summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CREDITS5
-rw-r--r--Documentation/ABI/stable/sysfs-driver-ib_srp13
-rw-r--r--Documentation/ABI/stable/sysfs-transport-srp39
-rw-r--r--Documentation/ABI/testing/sysfs-class-mtd2
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-batman-adv4
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-mesh34
-rw-r--r--Documentation/ABI/testing/sysfs-class-powercap152
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-roccat-ryos178
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-wiimote18
-rw-r--r--Documentation/DMA-API-HOWTO.txt37
-rw-r--r--Documentation/DMA-API.txt8
-rw-r--r--Documentation/DMA-attributes.txt6
-rw-r--r--Documentation/DocBook/device-drivers.tmpl5
-rw-r--r--Documentation/DocBook/filesystems.tmpl1
-rw-r--r--Documentation/DocBook/genericirq.tmpl64
-rw-r--r--Documentation/DocBook/mtdnand.tmpl2
-rw-r--r--Documentation/PCI/pci.txt8
-rw-r--r--Documentation/RCU/checklist.txt4
-rw-r--r--Documentation/RCU/stallwarn.txt22
-rw-r--r--Documentation/arm/Marvell/README1
-rw-r--r--Documentation/arm/sunxi/README26
-rw-r--r--Documentation/arm64/booting.txt45
-rw-r--r--Documentation/arm64/memory.txt2
-rw-r--r--Documentation/assoc_array.txt574
-rw-r--r--Documentation/block/biodoc.txt7
-rw-r--r--Documentation/block/biovecs.txt111
-rw-r--r--Documentation/connector/ucon.c2
-rw-r--r--Documentation/cpu-freq/cpu-drivers.txt27
-rw-r--r--Documentation/cpu-freq/governors.txt4
-rw-r--r--Documentation/cpuidle/governor.txt1
-rw-r--r--Documentation/device-mapper/cache-policies.txt6
-rw-r--r--Documentation/device-mapper/dm-crypt.txt11
-rw-r--r--Documentation/devices.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/arm-boards50
-rw-r--r--Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt3
-rw-r--r--Documentation/devicetree/bindings/arm/calxeda/mem-ctrlr.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/cci.txt60
-rw-r--r--Documentation/devicetree/bindings/arm/omap/omap.txt3
-rw-r--r--Documentation/devicetree/bindings/arm/vic.txt12
-rw-r--r--Documentation/devicetree/bindings/clock/emev2-clock.txt98
-rw-r--r--Documentation/devicetree/bindings/clock/imx6q-clock.txt5
-rw-r--r--Documentation/devicetree/bindings/clock/mvebu-corediv-clock.txt19
-rw-r--r--Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt14
-rw-r--r--Documentation/devicetree/bindings/clock/sunxi.txt4
-rw-r--r--Documentation/devicetree/bindings/clock/sunxi/sun4i-a10-gates.txt93
-rw-r--r--Documentation/devicetree/bindings/clock/sunxi/sun5i-a10s-gates.txt75
-rw-r--r--Documentation/devicetree/bindings/clock/sunxi/sun5i-a13-gates.txt58
-rw-r--r--Documentation/devicetree/bindings/clock/sunxi/sun6i-a31-gates.txt83
-rw-r--r--Documentation/devicetree/bindings/clock/sunxi/sun7i-a20-gates.txt98
-rw-r--r--Documentation/devicetree/bindings/crypto/omap-aes.txt31
-rw-r--r--Documentation/devicetree/bindings/crypto/omap-sham.txt28
-rw-r--r--Documentation/devicetree/bindings/dma/atmel-dma.txt2
-rw-r--r--Documentation/devicetree/bindings/hwrng/omap_rng.txt22
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-exynos5.txt44
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-rcar.txt23
-rw-r--r--Documentation/devicetree/bindings/i2c/trivial-devices.txt3
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/allwinner,sun4i-ic.txt3
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/interrupts.txt29
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/sunxi/sun4i-a10.txt89
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/sunxi/sun5i-a13.txt55
-rw-r--r--Documentation/devicetree/bindings/leds/leds-lp55xx.txt11
-rw-r--r--Documentation/devicetree/bindings/media/st-rc.txt29
-rw-r--r--Documentation/devicetree/bindings/mfd/as3722.txt194
-rw-r--r--Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt5
-rw-r--r--Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt9
-rw-r--r--Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt26
-rw-r--r--Documentation/devicetree/bindings/mtd/gpmc-nand.txt16
-rw-r--r--Documentation/devicetree/bindings/net/cpsw-phy-sel.txt28
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie.txt7
-rw-r--r--Documentation/devicetree/bindings/pci/mvebu-pci.txt10
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,mxs-pinctrl.txt859
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-single.txt11
-rw-r--r--Documentation/devicetree/bindings/power_supply/ti,bq24735.txt32
-rw-r--r--Documentation/devicetree/bindings/regulator/as3722-regulator.txt91
-rw-r--r--Documentation/devicetree/bindings/regulator/da9210.txt21
-rw-r--r--Documentation/devicetree/bindings/regulator/palmas-pmic.txt12
-rw-r--r--Documentation/devicetree/bindings/regulator/regulator.txt5
-rw-r--r--Documentation/devicetree/bindings/rng/qcom,prng.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/cs42l73.txt22
-rw-r--r--Documentation/devicetree/bindings/sound/davinci-evm-audio.txt42
-rw-r--r--Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt41
-rw-r--r--Documentation/devicetree/bindings/sound/tlv320aic3x.txt26
-rw-r--r--Documentation/devicetree/bindings/sound/tpa6130a2.txt27
-rw-r--r--Documentation/devicetree/bindings/timer/efm32,timer.txt23
-rw-r--r--Documentation/devicetree/bindings/tty/serial/renesas,sci-serial.txt53
-rw-r--r--Documentation/devicetree/bindings/usb/ux500-usb.txt2
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt8
-rw-r--r--Documentation/devicetree/bindings/video/atmel,lcdc.txt75
-rw-r--r--Documentation/devicetree/bindings/video/exynos_hdmi.txt14
-rw-r--r--Documentation/devicetree/bindings/video/exynos_mixer.txt4
-rw-r--r--Documentation/devicetree/bindings/watchdog/atmel-wdt.txt30
-rw-r--r--Documentation/devicetree/bindings/watchdog/dw_wdt.txt21
-rw-r--r--Documentation/devicetree/bindings/watchdog/men-a021-wdt.txt (renamed from Documentation/devicetree/bindings/gpio/men-a021-wdt.txt)0
-rw-r--r--Documentation/devicetree/bindings/watchdog/moxa,moxart-watchdog.txt15
-rw-r--r--Documentation/devicetree/bindings/watchdog/rt2880-wdt.txt19
-rw-r--r--Documentation/devicetree/bindings/watchdog/sirfsoc_wdt.txt14
-rw-r--r--Documentation/driver-model/devres.txt4
-rw-r--r--Documentation/efi-stub.txt (renamed from Documentation/x86/efi-stub.txt)0
-rw-r--r--Documentation/filesystems/Locking6
-rw-r--r--Documentation/filesystems/caching/netfs-api.txt73
-rw-r--r--Documentation/filesystems/f2fs.txt7
-rw-r--r--Documentation/filesystems/vfs.txt12
-rw-r--r--Documentation/hwmon/lm2506620
-rw-r--r--Documentation/hwmon/lm906
-rw-r--r--Documentation/hwmon/ltc297844
-rw-r--r--Documentation/input/gamepad.txt3
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/kbuild/kconfig.txt11
-rw-r--r--Documentation/kernel-parameters.txt124
-rw-r--r--Documentation/kernel-per-CPU-kthreads.txt17
-rw-r--r--Documentation/laptops/thinkpad-acpi.txt7
-rw-r--r--Documentation/lockstat.txt123
-rw-r--r--Documentation/networking/batman-adv.txt54
-rw-r--r--Documentation/networking/bonding.txt66
-rw-r--r--Documentation/networking/dccp.txt4
-rw-r--r--Documentation/networking/e100.txt2
-rw-r--r--Documentation/networking/ieee802154.txt4
-rw-r--r--Documentation/networking/l2tp.txt2
-rw-r--r--Documentation/networking/netdev-FAQ.txt24
-rw-r--r--Documentation/networking/netdevices.txt10
-rw-r--r--Documentation/networking/netlink_mmap.txt6
-rw-r--r--Documentation/networking/operstates.txt4
-rw-r--r--Documentation/networking/rxrpc.txt2
-rw-r--r--Documentation/networking/stmmac.txt8
-rw-r--r--Documentation/networking/vortex.txt4
-rw-r--r--Documentation/networking/x25-iface.txt2
-rw-r--r--Documentation/power/opp.txt108
-rw-r--r--Documentation/power/power_supply_class.txt8
-rw-r--r--Documentation/power/powercap/powercap.txt236
-rw-r--r--Documentation/power/runtime_pm.txt14
-rw-r--r--Documentation/ptp/testptp.c65
-rw-r--r--Documentation/s390/s390dbf.txt10
-rw-r--r--Documentation/scheduler/sched-arch.txt5
-rw-r--r--Documentation/security/00-INDEX2
-rw-r--r--Documentation/security/IMA-templates.txt87
-rw-r--r--Documentation/security/keys.txt20
-rw-r--r--Documentation/serial/driver4
-rw-r--r--Documentation/sound/alsa/ALSA-Configuration.txt2
-rw-r--r--Documentation/sound/alsa/Audiophile-Usb.txt2
-rw-r--r--Documentation/sound/alsa/CMIPCI.txt2
-rw-r--r--Documentation/sound/alsa/compress_offload.txt6
-rw-r--r--Documentation/sound/alsa/soc/DPCM.txt380
-rw-r--r--Documentation/sound/alsa/soc/codec.txt46
-rw-r--r--Documentation/sound/alsa/soc/dapm.txt73
-rw-r--r--Documentation/sound/alsa/soc/machine.txt6
-rw-r--r--Documentation/sound/alsa/soc/platform.txt19
-rw-r--r--Documentation/sysctl/kernel.txt76
-rw-r--r--Documentation/sysrq.txt28
-rw-r--r--Documentation/timers/00-INDEX4
-rw-r--r--Documentation/usb/gadget_configfs.txt6
-rw-r--r--Documentation/virtual/kvm/00-INDEX24
-rw-r--r--Documentation/virtual/kvm/api.txt152
-rw-r--r--Documentation/virtual/kvm/cpuid.txt7
-rw-r--r--Documentation/virtual/kvm/devices/vfio.txt22
-rw-r--r--Documentation/virtual/kvm/locking.txt19
-rw-r--r--Documentation/vm/00-INDEX20
-rw-r--r--MAINTAINERS181
-rw-r--r--Makefile8
-rw-r--r--arch/Kconfig22
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/Kbuild1
-rw-r--r--arch/alpha/include/uapi/asm/socket.h4
-rw-r--r--arch/arc/Kconfig10
-rw-r--r--arch/arc/boot/dts/abilis_tb100.dtsi172
-rw-r--r--arch/arc/boot/dts/abilis_tb100_dvk.dts24
-rw-r--r--arch/arc/boot/dts/abilis_tb101.dtsi178
-rw-r--r--arch/arc/boot/dts/abilis_tb101_dvk.dts24
-rw-r--r--arch/arc/boot/dts/abilis_tb10x.dtsi3
-rw-r--r--arch/arc/configs/fpga_defconfig3
-rw-r--r--arch/arc/include/asm/Kbuild1
-rw-r--r--arch/arc/include/asm/cache.h8
-rw-r--r--arch/arc/include/asm/irq.h4
-rw-r--r--arch/arc/include/asm/irqflags.h22
-rw-r--r--arch/arc/include/asm/mach_desc.h17
-rw-r--r--arch/arc/include/asm/mmu.h2
-rw-r--r--arch/arc/include/asm/mmu_context.h61
-rw-r--r--arch/arc/include/asm/prom.h14
-rw-r--r--arch/arc/include/asm/setup.h2
-rw-r--r--arch/arc/include/asm/smp.h2
-rw-r--r--arch/arc/include/asm/tlbflush.h11
-rw-r--r--arch/arc/include/asm/unaligned.h3
-rw-r--r--arch/arc/kernel/ctx_sw.c13
-rw-r--r--arch/arc/kernel/ctx_sw_asm.S11
-rw-r--r--arch/arc/kernel/devtree.c97
-rw-r--r--arch/arc/kernel/entry.S24
-rw-r--r--arch/arc/kernel/head.S2
-rw-r--r--arch/arc/kernel/irq.c12
-rw-r--r--arch/arc/kernel/kgdb.c12
-rw-r--r--arch/arc/kernel/kprobes.c8
-rw-r--r--arch/arc/kernel/reset.c1
-rw-r--r--arch/arc/kernel/setup.c17
-rw-r--r--arch/arc/kernel/smp.c10
-rw-r--r--arch/arc/kernel/stacktrace.c5
-rw-r--r--arch/arc/kernel/time.c4
-rw-r--r--arch/arc/kernel/traps.c3
-rw-r--r--arch/arc/mm/cache_arc700.c155
-rw-r--r--arch/arc/mm/fault.c8
-rw-r--r--arch/arc/mm/init.c7
-rw-r--r--arch/arc/mm/tlb.c91
-rw-r--r--arch/arc/mm/tlbex.S4
-rw-r--r--arch/arc/plat-tb10x/Kconfig2
-rw-r--r--arch/arm/Kconfig118
-rw-r--r--arch/arm/Kconfig.debug58
-rw-r--r--arch/arm/Makefile3
-rw-r--r--arch/arm/arm-soc-for-next-contents.txt173
-rw-r--r--arch/arm/boot/Makefile13
-rw-r--r--arch/arm/boot/compressed/Makefile4
-rw-r--r--arch/arm/boot/compressed/head-shark.S140
-rw-r--r--arch/arm/boot/compressed/head.S9
-rw-r--r--arch/arm/boot/compressed/ofw-shark.c260
-rw-r--r--arch/arm/boot/dts/Makefile65
-rw-r--r--arch/arm/boot/dts/am335x-base0033.dts16
-rw-r--r--arch/arm/boot/dts/am335x-bone-common.dtsi311
-rw-r--r--arch/arm/boot/dts/am335x-bone.dts18
-rw-r--r--arch/arm/boot/dts/am335x-boneblack.dts61
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts771
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts474
-rw-r--r--arch/arm/boot/dts/am335x-igep0033.dtsi278
-rw-r--r--arch/arm/boot/dts/am335x-nano.dts431
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi161
-rw-r--r--arch/arm/boot/dts/am4372.dtsi599
-rw-r--r--arch/arm/boot/dts/am43x-epos-evm.dts168
-rw-r--r--arch/arm/boot/dts/armada-370-netgear-rn104.dts193
-rw-r--r--arch/arm/boot/dts/armada-370-xp.dtsi20
-rw-r--r--arch/arm/boot/dts/armada-370.dtsi9
-rw-r--r--arch/arm/boot/dts/armada-xp-matrix.dts75
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78230.dtsi1
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78260.dtsi1
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78460.dtsi1
-rw-r--r--arch/arm/boot/dts/armada-xp.dtsi10
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi5
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi5
-rw-r--r--arch/arm/boot/dts/at91sam9g20ek_common.dtsi1
-rw-r--r--arch/arm/boot/dts/at91sam9g25.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9g35.dtsi1
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi5
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi8
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts34
-rw-r--r--arch/arm/boot/dts/at91sam9x25.dtsi24
-rw-r--r--arch/arm/boot/dts/at91sam9x35.dtsi1
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi72
-rw-r--r--arch/arm/boot/dts/at91sam9x5_macb0.dtsi56
-rw-r--r--arch/arm/boot/dts/at91sam9x5_macb1.dtsi44
-rw-r--r--arch/arm/boot/dts/at91sam9x5_usart3.dtsi51
-rw-r--r--arch/arm/boot/dts/atlas6.dtsi33
-rw-r--r--arch/arm/boot/dts/bcm11351-brt.dts1
-rw-r--r--arch/arm/boot/dts/bcm11351.dtsi54
-rw-r--r--arch/arm/boot/dts/bcm28155-ap.dts1
-rw-r--r--arch/arm/boot/dts/cros5250-common.dtsi8
-rw-r--r--arch/arm/boot/dts/dove-cm-a510.dts2
-rw-r--r--arch/arm/boot/dts/dove-cubox.dts20
-rw-r--r--arch/arm/boot/dts/dove-d2plug.dts2
-rw-r--r--arch/arm/boot/dts/dove-d3plug.dts103
-rw-r--r--arch/arm/boot/dts/dove-dove-db.dts2
-rw-r--r--arch/arm/boot/dts/dove.dtsi1037
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts275
-rw-r--r--arch/arm/boot/dts/dra7.dtsi586
-rw-r--r--arch/arm/boot/dts/ecx-2000.dts6
-rw-r--r--arch/arm/boot/dts/ecx-common.dtsi14
-rw-r--r--arch/arm/boot/dts/emev2-kzm9d-reference.dts57
-rw-r--r--arch/arm/boot/dts/emev2-kzm9d.dts33
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi10
-rw-r--r--arch/arm/boot/dts/exynos4210-origen.dts28
-rw-r--r--arch/arm/boot/dts/exynos4210-trats.dts7
-rw-r--r--arch/arm/boot/dts/exynos4210-universal_c210.dts4
-rw-r--r--arch/arm/boot/dts/exynos4412-origen.dts21
-rw-r--r--arch/arm/boot/dts/exynos5.dtsi21
-rw-r--r--arch/arm/boot/dts/exynos5250-arndale.dts45
-rw-r--r--arch/arm/boot/dts/exynos5250-pinctrl.dtsi44
-rw-r--r--arch/arm/boot/dts/exynos5250-smdk5250.dts24
-rw-r--r--arch/arm/boot/dts/exynos5250-snow.dts2
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi47
-rw-r--r--arch/arm/boot/dts/exynos5420-smdk5420.dts59
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi134
-rw-r--r--arch/arm/boot/dts/exynos5440-sd5v1.dts9
-rw-r--r--arch/arm/boot/dts/exynos5440-ssdk5440.dts4
-rw-r--r--arch/arm/boot/dts/exynos5440.dtsi2
-rw-r--r--arch/arm/boot/dts/highbank.dts6
-rw-r--r--arch/arm/boot/dts/imx23-evk.dts16
-rw-r--r--arch/arm/boot/dts/imx23-olinuxino.dts18
-rw-r--r--arch/arm/boot/dts/imx23-pinfunc.h333
-rw-r--r--arch/arm/boot/dts/imx23-stmp378x_devb.dts12
-rw-r--r--arch/arm/boot/dts/imx23.dtsi221
-rw-r--r--arch/arm/boot/dts/imx25.dtsi15
-rw-r--r--arch/arm/boot/dts/imx27-apf27dev.dts26
-rw-r--r--arch/arm/boot/dts/imx27.dtsi1
-rw-r--r--arch/arm/boot/dts/imx28-apf28.dts2
-rw-r--r--arch/arm/boot/dts/imx28-apf28dev.dts36
-rw-r--r--arch/arm/boot/dts/imx28-apx4devkit.dts60
-rw-r--r--arch/arm/boot/dts/imx28-cfa10036.dts26
-rw-r--r--arch/arm/boot/dts/imx28-cfa10037.dts18
-rw-r--r--arch/arm/boot/dts/imx28-cfa10049.dts156
-rw-r--r--arch/arm/boot/dts/imx28-cfa10055.dts80
-rw-r--r--arch/arm/boot/dts/imx28-cfa10056.dts38
-rw-r--r--arch/arm/boot/dts/imx28-cfa10057.dts66
-rw-r--r--arch/arm/boot/dts/imx28-cfa10058.dts24
-rw-r--r--arch/arm/boot/dts/imx28-evk.dts59
-rw-r--r--arch/arm/boot/dts/imx28-m28cu3.dts266
-rw-r--r--arch/arm/boot/dts/imx28-m28evk.dts28
-rw-r--r--arch/arm/boot/dts/imx28-pinfunc.h506
-rw-r--r--arch/arm/boot/dts/imx28-sps1.dts14
-rw-r--r--arch/arm/boot/dts/imx28-tx28.dts703
-rw-r--r--arch/arm/boot/dts/imx28.dtsi633
-rw-r--r--arch/arm/boot/dts/imx50-evk.dts85
-rw-r--r--arch/arm/boot/dts/imx50-pinfunc.h923
-rw-r--r--arch/arm/boot/dts/imx50.dtsi670
-rw-r--r--arch/arm/boot/dts/imx51-apf51dev.dts27
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts37
-rw-r--r--arch/arm/boot/dts/imx51-eukrea-cpuimx51.dtsi63
-rw-r--r--arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts132
-rw-r--r--arch/arm/boot/dts/imx51.dtsi39
-rw-r--r--arch/arm/boot/dts/imx53-qsb.dts9
-rw-r--r--arch/arm/boot/dts/imx53-voipac-bsb.dts135
-rw-r--r--arch/arm/boot/dts/imx53-voipac-dmm-668.dtsi193
-rw-r--r--arch/arm/boot/dts/imx53.dtsi15
-rw-r--r--arch/arm/boot/dts/imx6dl-gw51xx.dts19
-rw-r--r--arch/arm/boot/dts/imx6dl-gw52xx.dts19
-rw-r--r--arch/arm/boot/dts/imx6dl-gw53xx.dts19
-rw-r--r--arch/arm/boot/dts/imx6dl-gw54xx.dts19
-rw-r--r--arch/arm/boot/dts/imx6q-arm2.dts17
-rw-r--r--arch/arm/boot/dts/imx6q-cm-fx6.dts53
-rw-r--r--arch/arm/boot/dts/imx6q-gw51xx.dts19
-rw-r--r--arch/arm/boot/dts/imx6q-gw52xx.dts23
-rw-r--r--arch/arm/boot/dts/imx6q-gw53xx.dts23
-rw-r--r--arch/arm/boot/dts/imx6q-gw5400-a.dts443
-rw-r--r--arch/arm/boot/dts/imx6q-gw54xx.dts23
-rw-r--r--arch/arm/boot/dts/imx6q-pinfunc.h4
-rw-r--r--arch/arm/boot/dts/imx6q-sabrelite.dts104
-rw-r--r--arch/arm/boot/dts/imx6q-udoo.dts39
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw51xx.dtsi272
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw52xx.dtsi373
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw53xx.dtsi429
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw54xx.dtsi457
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabreauto.dtsi5
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabresd.dtsi18
-rw-r--r--arch/arm/boot/dts/imx6qdl-wandboard.dtsi23
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi129
-rw-r--r--arch/arm/boot/dts/imx6sl-evk.dts67
-rw-r--r--arch/arm/boot/dts/imx6sl.dtsi176
-rw-r--r--arch/arm/boot/dts/integrator.dtsi5
-rw-r--r--arch/arm/boot/dts/integratorap.dts5
-rw-r--r--arch/arm/boot/dts/integratorcp.dts13
-rw-r--r--arch/arm/boot/dts/keystone-clocks.dtsi821
-rw-r--r--arch/arm/boot/dts/keystone.dts63
-rw-r--r--arch/arm/boot/dts/kirkwood-db-88f6281.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-db-88f6282.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-db.dtsi44
-rw-r--r--arch/arm/boot/dts/kirkwood-dnskw.dtsi76
-rw-r--r--arch/arm/boot/dts/kirkwood-dockstar.dts40
-rw-r--r--arch/arm/boot/dts/kirkwood-goflexnet.dts51
-rw-r--r--arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts40
-rw-r--r--arch/arm/boot/dts/kirkwood-ib62x0.dts53
-rw-r--r--arch/arm/boot/dts/kirkwood-iconnect.dts59
-rw-r--r--arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts51
-rw-r--r--arch/arm/boot/dts/kirkwood-km_kirkwood.dts14
-rw-r--r--arch/arm/boot/dts/kirkwood-mplcec4.dts63
-rw-r--r--arch/arm/boot/dts/kirkwood-netgear_readynas_duo_v2.dts61
-rw-r--r--arch/arm/boot/dts/kirkwood-nsa310-common.dtsi86
-rw-r--r--arch/arm/boot/dts/kirkwood-nsa310.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-openblocks_a6.dts74
-rw-r--r--arch/arm/boot/dts/kirkwood-openblocks_a7.dts223
-rw-r--r--arch/arm/boot/dts/kirkwood-sheevaplug-common.dtsi42
-rw-r--r--arch/arm/boot/dts/kirkwood-topkick.dts62
-rw-r--r--arch/arm/boot/dts/kirkwood-ts219-6282.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood.dtsi57
-rw-r--r--arch/arm/boot/dts/kizbox.dts6
-rw-r--r--arch/arm/boot/dts/mxs-pinfunc.h31
-rw-r--r--arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi52
-rw-r--r--arch/arm/boot/dts/omap-zoom-common.dtsi33
-rw-r--r--arch/arm/boot/dts/omap2420-h4.dts6
-rw-r--r--arch/arm/boot/dts/omap3-beagle-xm.dts65
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts46
-rw-r--r--arch/arm/boot/dts/omap3-devkit8000.dts2
-rw-r--r--arch/arm/boot/dts/omap3-evm-37xx.dts151
-rw-r--r--arch/arm/boot/dts/omap3-evm-common.dtsi94
-rw-r--r--arch/arm/boot/dts/omap3-evm.dts60
-rw-r--r--arch/arm/boot/dts/omap3-gta04.dts170
-rw-r--r--arch/arm/boot/dts/omap3-igep.dtsi11
-rw-r--r--arch/arm/boot/dts/omap3-igep0020.dts107
-rw-r--r--arch/arm/boot/dts/omap3-igep0030.dts17
-rw-r--r--arch/arm/boot/dts/omap3-n9.dts18
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts484
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi174
-rw-r--r--arch/arm/boot/dts/omap3-n950.dts18
-rw-r--r--arch/arm/boot/dts/omap3-zoom3.dts217
-rw-r--r--arch/arm/boot/dts/omap3.dtsi58
-rw-r--r--arch/arm/boot/dts/omap3430-sdp.dts22
-rw-r--r--arch/arm/boot/dts/omap36xx.dtsi4
-rw-r--r--arch/arm/boot/dts/omap4-panda-common.dtsi82
-rw-r--r--arch/arm/boot/dts/omap4-panda-es.dts4
-rw-r--r--arch/arm/boot/dts/omap4-sdp.dts21
-rw-r--r--arch/arm/boot/dts/omap4.dtsi37
-rw-r--r--arch/arm/boot/dts/omap5-uevm.dts84
-rw-r--r--arch/arm/boot/dts/omap5.dtsi30
-rw-r--r--arch/arm/boot/dts/prima2.dtsi30
-rw-r--r--arch/arm/boot/dts/qcom-msm8660-surf.dts (renamed from arch/arm/boot/dts/msm8660-surf.dts)0
-rw-r--r--arch/arm/boot/dts/qcom-msm8960-cdp.dts (renamed from arch/arm/boot/dts/msm8960-cdp.dts)0
-rw-r--r--arch/arm/boot/dts/r7s72100-genmai.dts31
-rw-r--r--arch/arm/boot/dts/r7s72100.dtsi36
-rw-r--r--arch/arm/boot/dts/r8a73a4-ape6evm-reference.dts73
-rw-r--r--arch/arm/boot/dts/r8a73a4-ape6evm.dts1
-rw-r--r--arch/arm/boot/dts/r8a73a4.dtsi52
-rw-r--r--arch/arm/boot/dts/r8a7740-armadillo800eva-reference.dts78
-rw-r--r--arch/arm/boot/dts/r8a7740.dtsi35
-rw-r--r--arch/arm/boot/dts/r8a7778-bockw-reference.dts27
-rw-r--r--arch/arm/boot/dts/r8a7778.dtsi19
-rw-r--r--arch/arm/boot/dts/r8a7779-marzen-reference.dts8
-rw-r--r--arch/arm/boot/dts/r8a7779.dtsi5
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi89
-rw-r--r--arch/arm/boot/dts/r8a7791-koelsch.dts32
-rw-r--r--arch/arm/boot/dts/r8a7791.dtsi74
-rw-r--r--arch/arm/boot/dts/rk3066a-bqcurie2.dts109
-rw-r--r--arch/arm/boot/dts/rk3066a.dtsi120
-rw-r--r--arch/arm/boot/dts/rk3188-clocks.dtsi289
-rw-r--r--arch/arm/boot/dts/rk3188-radxarock.dts80
-rw-r--r--arch/arm/boot/dts/rk3188.dtsi253
-rw-r--r--arch/arm/boot/dts/rk3xxx.dtsi124
-rw-r--r--arch/arm/boot/dts/s3c6400.dtsi41
-rw-r--r--arch/arm/boot/dts/s3c6410-mini6410.dts228
-rw-r--r--arch/arm/boot/dts/s3c6410-smdk6410.dts103
-rw-r--r--arch/arm/boot/dts/s3c6410.dtsi57
-rw-r--r--arch/arm/boot/dts/s3c64xx-pinctrl.dtsi687
-rw-r--r--arch/arm/boot/dts/s3c64xx.dtsi199
-rw-r--r--arch/arm/boot/dts/sama5d3.dtsi208
-rw-r--r--arch/arm/boot/dts/sama5d31.dtsi16
-rw-r--r--arch/arm/boot/dts/sama5d31ek.dts3
-rw-r--r--arch/arm/boot/dts/sama5d33.dtsi14
-rw-r--r--arch/arm/boot/dts/sama5d33ek.dts3
-rw-r--r--arch/arm/boot/dts/sama5d34.dtsi16
-rw-r--r--arch/arm/boot/dts/sama5d34ek.dts3
-rw-r--r--arch/arm/boot/dts/sama5d35.dtsi18
-rw-r--r--arch/arm/boot/dts/sama5d35ek.dts3
-rw-r--r--arch/arm/boot/dts/sama5d3_can.dtsi54
-rw-r--r--arch/arm/boot/dts/sama5d3_emac.dtsi44
-rw-r--r--arch/arm/boot/dts/sama5d3_gmac.dtsi77
-rw-r--r--arch/arm/boot/dts/sama5d3_lcd.dtsi55
-rw-r--r--arch/arm/boot/dts/sama5d3_mci2.dtsi47
-rw-r--r--arch/arm/boot/dts/sama5d3_tcb1.dtsi27
-rw-r--r--arch/arm/boot/dts/sama5d3_uart.dtsi53
-rw-r--r--arch/arm/boot/dts/sama5d3xcm.dtsi1
-rw-r--r--arch/arm/boot/dts/sh73a0-kzm9g-reference.dts2
-rw-r--r--arch/arm/boot/dts/sh73a0.dtsi5
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi297
-rw-r--r--arch/arm/boot/dts/socfpga_arria5.dtsi58
-rw-r--r--arch/arm/boot/dts/socfpga_arria5_socdk.dts (renamed from arch/mips/include/asm/mach-powertv/powertv-clock.h)37
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5.dtsi (renamed from arch/arm/boot/dts/socfpga_cyclone5.dts)20
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_socdk.dts40
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_sockit.dts (renamed from arch/mips/include/asm/mach-powertv/irq.h)30
-rw-r--r--arch/arm/boot/dts/ste-dbx5x0.dtsi216
-rw-r--r--arch/arm/boot/dts/ste-href-stuib.dtsi (renamed from arch/arm/boot/dts/ste-stuib.dtsi)2
-rw-r--r--arch/arm/boot/dts/ste-href-tvk1281618.dtsi41
-rw-r--r--arch/arm/boot/dts/ste-href.dtsi109
-rw-r--r--arch/arm/boot/dts/ste-hrefprev60-stuib.dts34
-rw-r--r--arch/arm/boot/dts/ste-hrefprev60-tvk.dts19
-rw-r--r--arch/arm/boot/dts/ste-hrefprev60.dtsi (renamed from arch/arm/boot/dts/ste-hrefprev60.dts)37
-rw-r--r--arch/arm/boot/dts/ste-hrefv60plus-stuib.dts36
-rw-r--r--arch/arm/boot/dts/ste-hrefv60plus-tvk.dts21
-rw-r--r--arch/arm/boot/dts/ste-hrefv60plus.dts210
-rw-r--r--arch/arm/boot/dts/ste-hrefv60plus.dtsi70
-rw-r--r--arch/arm/boot/dts/ste-nomadik-stn8815.dtsi12
-rw-r--r--arch/arm/boot/dts/ste-snowball.dts85
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi5
-rw-r--r--arch/arm/boot/dts/sun5i-a10s.dtsi5
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi5
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi2
-rw-r--r--arch/arm/boot/dts/sun7i-a20-cubieboard2.dts12
-rw-r--r--arch/arm/boot/dts/sun7i-a20-cubietruck.dts63
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts18
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi71
-rw-r--r--arch/arm/boot/dts/tegra114-dalmore.dts32
-rw-r--r--arch/arm/boot/dts/tegra114.dtsi6
-rw-r--r--arch/arm/boot/dts/tegra124-venice2.dts27
-rw-r--r--arch/arm/boot/dts/tegra124.dtsi149
-rw-r--r--arch/arm/boot/dts/tegra30-cardhu.dtsi3
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi5
-rw-r--r--arch/arm/boot/dts/testcases/tests-interrupts.dtsi58
-rw-r--r--arch/arm/boot/dts/testcases/tests.dtsi1
-rw-r--r--arch/arm/boot/dts/twl4030.dtsi49
-rw-r--r--arch/arm/boot/dts/twl6030_omap4.dtsi38
-rw-r--r--arch/arm/boot/dts/versatile-ab.dts2
-rw-r--r--arch/arm/boot/dts/versatile-pb.dts2
-rw-r--r--arch/arm/boot/dts/vf610-cosmic.dts47
-rw-r--r--arch/arm/boot/dts/vf610-twr.dts17
-rw-r--r--arch/arm/boot/dts/vf610.dtsi12
-rw-r--r--arch/arm/boot/dts/zynq-7000.dtsi8
-rw-r--r--arch/arm/common/Makefile3
-rw-r--r--arch/arm/common/bL_switcher.c822
-rw-r--r--arch/arm/common/bL_switcher_dummy_if.c71
-rw-r--r--arch/arm/common/edma.c4
-rw-r--r--arch/arm/common/mcpm_entry.c27
-rw-r--r--arch/arm/common/mcpm_head.S18
-rw-r--r--arch/arm/common/mcpm_platsmp.c27
-rw-r--r--arch/arm/common/timer-sp.c2
-rw-r--r--arch/arm/common/via82c505.c83
-rw-r--r--arch/arm/configs/bcm_defconfig10
-rw-r--r--arch/arm/configs/bockw_defconfig4
-rw-r--r--arch/arm/configs/efm32_defconfig102
-rw-r--r--arch/arm/configs/genmai_defconfig116
-rw-r--r--arch/arm/configs/h3600_defconfig22
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig2
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig17
-rw-r--r--arch/arm/configs/integrator_defconfig25
-rw-r--r--arch/arm/configs/keystone_defconfig2
-rw-r--r--arch/arm/configs/koelsch_defconfig54
-rw-r--r--arch/arm/configs/lager_defconfig2
-rw-r--r--arch/arm/configs/marzen_defconfig2
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/mxs_defconfig7
-rw-r--r--arch/arm/configs/omap2plus_defconfig109
-rw-r--r--arch/arm/configs/prima2_defconfig1
-rw-r--r--arch/arm/configs/shark_defconfig80
-rw-r--r--arch/arm/configs/sunxi_defconfig61
-rw-r--r--arch/arm/configs/tegra_defconfig5
-rw-r--r--arch/arm/configs/u8500_defconfig12
-rw-r--r--arch/arm/configs/vexpress_defconfig73
-rw-r--r--arch/arm/crypto/.gitignore1
-rw-r--r--arch/arm/crypto/Makefile14
-rw-r--r--arch/arm/crypto/aes_glue.c22
-rw-r--r--arch/arm/crypto/aes_glue.h19
-rw-r--r--arch/arm/crypto/aesbs-core.S_shipped2544
-rw-r--r--arch/arm/crypto/aesbs-glue.c434
-rw-r--r--arch/arm/crypto/bsaes-armv7.pl2467
-rw-r--r--arch/arm/include/asm/Kbuild2
-rw-r--r--arch/arm/include/asm/arch_timer.h36
-rw-r--r--arch/arm/include/asm/assembler.h7
-rw-r--r--arch/arm/include/asm/atomic.h33
-rw-r--r--arch/arm/include/asm/bL_switcher.h77
-rw-r--r--arch/arm/include/asm/bug.h10
-rw-r--r--arch/arm/include/asm/cacheflush.h46
-rw-r--r--arch/arm/include/asm/cmpxchg.h58
-rw-r--r--arch/arm/include/asm/cputype.h1
-rw-r--r--arch/arm/include/asm/dma-mapping.h58
-rw-r--r--arch/arm/include/asm/hardirq.h2
-rw-r--r--arch/arm/include/asm/hardware/coresight.h8
-rw-r--r--arch/arm/include/asm/io.h8
-rw-r--r--arch/arm/include/asm/kgdb.h3
-rw-r--r--arch/arm/include/asm/kvm_arm.h9
-rw-r--r--arch/arm/include/asm/kvm_asm.h2
-rw-r--r--arch/arm/include/asm/kvm_emulate.h5
-rw-r--r--arch/arm/include/asm/kvm_host.h6
-rw-r--r--arch/arm/include/asm/kvm_mmu.h17
-rw-r--r--arch/arm/include/asm/mach/arch.h1
-rw-r--r--arch/arm/include/asm/mach/pci.h4
-rw-r--r--arch/arm/include/asm/mcpm.h39
-rw-r--r--arch/arm/include/asm/memory.h76
-rw-r--r--arch/arm/include/asm/mmu.h2
-rw-r--r--arch/arm/include/asm/pgtable-2level.h7
-rw-r--r--arch/arm/include/asm/pgtable-3level.h5
-rw-r--r--arch/arm/include/asm/processor.h33
-rw-r--r--arch/arm/include/asm/prom.h2
-rw-r--r--arch/arm/include/asm/sched_clock.h4
-rw-r--r--arch/arm/include/asm/setup.h2
-rw-r--r--arch/arm/include/asm/smp.h2
-rw-r--r--arch/arm/include/asm/spinlock.h36
-rw-r--r--arch/arm/include/asm/spinlock_types.h2
-rw-r--r--arch/arm/include/asm/timex.h6
-rw-r--r--arch/arm/include/asm/tlbflush.h48
-rw-r--r--arch/arm/include/asm/unified.h4
-rw-r--r--arch/arm/include/asm/xen/hypervisor.h2
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h50
-rw-r--r--arch/arm/include/asm/xen/page.h50
-rw-r--r--arch/arm/include/debug/efm32.S45
-rw-r--r--arch/arm/include/debug/imx-uart.h10
-rw-r--r--arch/arm/include/debug/msm.S5
-rw-r--r--arch/arm/include/debug/pl01x.S2
-rw-r--r--arch/arm/include/debug/vf.S26
-rw-r--r--arch/arm/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm/include/uapi/asm/hwcap.h1
-rw-r--r--arch/arm/include/uapi/asm/kvm.h3
-rw-r--r--arch/arm/include/uapi/asm/perf_regs.h23
-rw-r--r--arch/arm/kernel/Makefile4
-rw-r--r--arch/arm/kernel/arch_timer.c14
-rw-r--r--arch/arm/kernel/armksyms.c1
-rw-r--r--arch/arm/kernel/devtree.c57
-rw-r--r--arch/arm/kernel/entry-armv.S5
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/entry-v7m.S2
-rw-r--r--arch/arm/kernel/head.S82
-rw-r--r--arch/arm/kernel/hw_breakpoint.c14
-rw-r--r--arch/arm/kernel/kprobes.c8
-rw-r--r--arch/arm/kernel/module.c57
-rw-r--r--arch/arm/kernel/perf_event.c3
-rw-r--r--arch/arm/kernel/perf_event_cpu.c2
-rw-r--r--arch/arm/kernel/perf_regs.c30
-rw-r--r--arch/arm/kernel/psci_smp.c1
-rw-r--r--arch/arm/kernel/setup.c29
-rw-r--r--arch/arm/kernel/signal.c24
-rw-r--r--arch/arm/kernel/sigreturn_codes.S80
-rw-r--r--arch/arm/kernel/sleep.S27
-rw-r--r--arch/arm/kernel/smp.c23
-rw-r--r--arch/arm/kernel/smp_scu.c14
-rw-r--r--arch/arm/kernel/smp_tlb.c36
-rw-r--r--arch/arm/kernel/smp_twd.c24
-rw-r--r--arch/arm/kernel/suspend.c8
-rw-r--r--arch/arm/kernel/time.c29
-rw-r--r--arch/arm/kernel/traps.c24
-rw-r--r--arch/arm/kvm/Kconfig1
-rw-r--r--arch/arm/kvm/Makefile2
-rw-r--r--arch/arm/kvm/arm.c24
-rw-r--r--arch/arm/kvm/coproc.c120
-rw-r--r--arch/arm/kvm/coproc_a15.c117
-rw-r--r--arch/arm/kvm/coproc_a7.c54
-rw-r--r--arch/arm/kvm/emulate.c2
-rw-r--r--arch/arm/kvm/guest.c24
-rw-r--r--arch/arm/kvm/handle_exit.c20
-rw-r--r--arch/arm/kvm/mmu.c223
-rw-r--r--arch/arm/kvm/psci.c17
-rw-r--r--arch/arm/kvm/reset.c15
-rw-r--r--arch/arm/lib/Makefile1
-rw-r--r--arch/arm/lib/bitops.h5
-rw-r--r--arch/arm/lib/io-shark.c13
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c41
-rw-r--r--arch/arm/mach-at91/Makefile1
-rw-r--r--arch/arm/mach-at91/at91rm9200.c2
-rw-r--r--arch/arm/mach-at91/at91sam9260.c2
-rw-r--r--arch/arm/mach-at91/at91sam9261.c2
-rw-r--r--arch/arm/mach-at91/at91sam9261_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9263.c2
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9g45.c2
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9n12.c1
-rw-r--r--arch/arm/mach-at91/at91sam9rl.c2
-rw-r--r--arch/arm/mach-at91/at91sam9rl_devices.c6
-rw-r--r--arch/arm/mach-at91/board-cam60.c2
-rw-r--r--arch/arm/mach-at91/board-dt-rm9200.c7
-rw-r--r--arch/arm/mach-at91/board-dt-sam9.c7
-rw-r--r--arch/arm/mach-at91/board-sam9260ek.c2
-rw-r--r--arch/arm/mach-at91/board-sam9261ek.c10
-rw-r--r--arch/arm/mach-at91/board-sam9263ek.c8
-rw-r--r--arch/arm/mach-at91/board-sam9m10g45ek.c4
-rw-r--r--arch/arm/mach-at91/board-sam9rlek.c6
-rw-r--r--arch/arm/mach-at91/board.h4
-rw-r--r--arch/arm/mach-at91/pm.c27
-rw-r--r--arch/arm/mach-at91/pm.h59
-rw-r--r--arch/arm/mach-at91/setup.c14
-rw-r--r--arch/arm/mach-bcm/Kconfig22
-rw-r--r--arch/arm/mach-bcm/Makefile2
-rw-r--r--arch/arm/mach-bcm/board_bcm281xx.c3
-rw-r--r--arch/arm/mach-bcm2835/bcm2835.c2
-rw-r--r--arch/arm/mach-clps711x/common.c2
-rw-r--r--arch/arm/mach-clps711x/include/mach/timex.h2
-rw-r--r--arch/arm/mach-davinci/Kconfig1
-rw-r--r--arch/arm/mach-davinci/board-da830-evm.c18
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c8
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c15
-rw-r--r--arch/arm/mach-davinci/board-dm355-leopard.c14
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c8
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c7
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c15
-rw-r--r--arch/arm/mach-davinci/board-mityomapl138.c2
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c14
-rw-r--r--arch/arm/mach-davinci/board-omapl138-hawk.c8
-rw-r--r--arch/arm/mach-davinci/board-sffsdr.c2
-rw-r--r--arch/arm/mach-davinci/da830.c16
-rw-r--r--arch/arm/mach-davinci/da850.c16
-rw-r--r--arch/arm/mach-davinci/davinci.h7
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c26
-rw-r--r--arch/arm/mach-davinci/devices.c13
-rw-r--r--arch/arm/mach-davinci/dm355.c35
-rw-r--r--arch/arm/mach-davinci/dm365.c37
-rw-r--r--arch/arm/mach-davinci/dm644x.c30
-rw-r--r--arch/arm/mach-davinci/dm646x.c30
-rw-r--r--arch/arm/mach-davinci/include/mach/da8xx.h3
-rw-r--r--arch/arm/mach-davinci/include/mach/gpio-davinci.h91
-rw-r--r--arch/arm/mach-davinci/include/mach/gpio.h88
-rw-r--r--arch/arm/mach-davinci/include/mach/timex.h22
-rw-r--r--arch/arm/mach-davinci/time.c13
-rw-r--r--arch/arm/mach-dove/board-dt.c50
-rw-r--r--arch/arm/mach-dove/include/mach/timex.h9
-rw-r--r--arch/arm/mach-ebsa110/include/mach/timex.h19
-rw-r--r--arch/arm/mach-efm32/Makefile1
-rw-r--r--arch/arm/mach-efm32/dtmachine.c15
-rw-r--r--arch/arm/mach-ep93xx/Kconfig1
-rw-r--r--arch/arm/mach-ep93xx/core.c110
-rw-r--r--arch/arm/mach-ep93xx/include/mach/platform.h3
-rw-r--r--arch/arm/mach-ep93xx/include/mach/timex.h5
-rw-r--r--arch/arm/mach-exynos/Kconfig38
-rw-r--r--arch/arm/mach-exynos/Makefile4
-rw-r--r--arch/arm/mach-exynos/common.c19
-rw-r--r--arch/arm/mach-exynos/common.h2
-rw-r--r--arch/arm/mach-exynos/cpuidle.c18
-rw-r--r--arch/arm/mach-exynos/include/mach/timex.h29
-rw-r--r--arch/arm/mach-exynos/mach-exynos4-dt.c7
-rw-r--r--arch/arm/mach-exynos/mach-exynos5-dt.c7
-rw-r--r--arch/arm/mach-footbridge/include/mach/timex.h18
-rw-r--r--arch/arm/mach-gemini/include/mach/timex.h13
-rw-r--r--arch/arm/mach-gemini/time.c97
-rw-r--r--arch/arm/mach-highbank/Kconfig5
-rw-r--r--arch/arm/mach-highbank/Makefile2
-rw-r--r--arch/arm/mach-highbank/core.h4
-rw-r--r--arch/arm/mach-highbank/highbank.c47
-rw-r--r--arch/arm/mach-highbank/platsmp.c68
-rw-r--r--arch/arm/mach-highbank/pm.c27
-rw-r--r--arch/arm/mach-imx/Kconfig23
-rw-r--r--arch/arm/mach-imx/Makefile2
-rw-r--r--arch/arm/mach-imx/anatop.c33
-rw-r--r--arch/arm/mach-imx/clk-imx51-imx53.c100
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c192
-rw-r--r--arch/arm/mach-imx/clk-imx6sl.c3
-rw-r--r--arch/arm/mach-imx/common.h185
-rw-r--r--arch/arm/mach-imx/cpu.c95
-rw-r--r--arch/arm/mach-imx/epit.c2
-rw-r--r--arch/arm/mach-imx/gpc.c4
-rw-r--r--arch/arm/mach-imx/hotplug.c4
-rw-r--r--arch/arm/mach-imx/imx51-dt.c6
-rw-r--r--arch/arm/mach-imx/mach-armadillo5x0.c3
-rw-r--r--arch/arm/mach-imx/mach-imx53.c6
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c121
-rw-r--r--arch/arm/mach-imx/mach-imx6sl.c40
-rw-r--r--arch/arm/mach-imx/mach-mx31_3ds.c2
-rw-r--r--arch/arm/mach-imx/mach-pca100.c2
-rw-r--r--arch/arm/mach-imx/mach-pcm037.c5
-rw-r--r--arch/arm/mach-imx/mach-pcm038.c2
-rw-r--r--arch/arm/mach-imx/mach-pcm043.c2
-rw-r--r--arch/arm/mach-imx/mach-vf610.c9
-rw-r--r--arch/arm/mach-imx/mach-vpr200.c2
-rw-r--r--arch/arm/mach-imx/mm-imx5.c19
-rw-r--r--arch/arm/mach-imx/mx31lilly-db.c3
-rw-r--r--arch/arm/mach-imx/mxc.h6
-rw-r--r--arch/arm/mach-imx/pm-imx6q.c176
-rw-r--r--arch/arm/mach-imx/src.c16
-rw-r--r--arch/arm/mach-imx/system.c9
-rw-r--r--arch/arm/mach-imx/time.c2
-rw-r--r--arch/arm/mach-integrator/cm.h (renamed from arch/arm/mach-integrator/include/mach/cm.h)7
-rw-r--r--arch/arm/mach-integrator/core.c182
-rw-r--r--arch/arm/mach-integrator/include/mach/irqs.h81
-rw-r--r--arch/arm/mach-integrator/include/mach/timex.h26
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c152
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c185
-rw-r--r--arch/arm/mach-integrator/leds.c5
-rw-r--r--arch/arm/mach-integrator/pci_v3.c127
-rw-r--r--arch/arm/mach-iop13xx/include/mach/timex.h1
-rw-r--r--arch/arm/mach-iop32x/include/mach/timex.h6
-rw-r--r--arch/arm/mach-iop33x/include/mach/timex.h6
-rw-r--r--arch/arm/mach-ixp4xx/Kconfig4
-rw-r--r--arch/arm/mach-keystone/Kconfig2
-rw-r--r--arch/arm/mach-keystone/Makefile3
-rw-r--r--arch/arm/mach-keystone/platsmp.c1
-rw-r--r--arch/arm/mach-keystone/pm_domain.c82
-rw-r--r--arch/arm/mach-kirkwood/Makefile2
-rw-r--r--arch/arm/mach-kirkwood/board-dt.c103
-rw-r--r--arch/arm/mach-kirkwood/common.c1
-rw-r--r--arch/arm/mach-kirkwood/common.h6
-rw-r--r--arch/arm/mach-kirkwood/include/mach/bridge-regs.h2
-rw-r--r--arch/arm/mach-kirkwood/include/mach/timex.h10
-rw-r--r--arch/arm/mach-kirkwood/lacie_v2-common.c2
-rw-r--r--arch/arm/mach-kirkwood/pm.c73
-rw-r--r--arch/arm/mach-lpc32xx/include/mach/timex.h28
-rw-r--r--arch/arm/mach-mmp/include/mach/timex.h13
-rw-r--r--arch/arm/mach-mmp/ttc_dkb.c4
-rw-r--r--arch/arm/mach-msm/Kconfig13
-rw-r--r--arch/arm/mach-msm/Makefile3
-rw-r--r--arch/arm/mach-msm/board-dt-8660.c48
-rw-r--r--arch/arm/mach-msm/board-dt.c (renamed from arch/arm/mach-msm/board-dt-8960.c)17
-rw-r--r--arch/arm/mach-msm/include/mach/irqs-8960.h277
-rw-r--r--arch/arm/mach-msm/include/mach/irqs-8x60.h258
-rw-r--r--arch/arm/mach-msm/include/mach/irqs.h5
-rw-r--r--arch/arm/mach-msm/include/mach/timex.h21
-rw-r--r--arch/arm/mach-msm/timer.c1
-rw-r--r--arch/arm/mach-mv78xx0/include/mach/timex.h9
-rw-r--r--arch/arm/mach-mvebu/Kconfig1
-rw-r--r--arch/arm/mach-mvebu/coherency_ll.S3
-rw-r--r--arch/arm/mach-mvebu/headsmp.S4
-rw-r--r--arch/arm/mach-mxs/mach-mxs.c20
-rw-r--r--arch/arm/mach-netx/include/mach/timex.h20
-rw-r--r--arch/arm/mach-nomadik/cpu-8815.c71
-rw-r--r--arch/arm/mach-nspire/nspire.c9
-rw-r--r--arch/arm/mach-omap1/board-osk.c2
-rw-r--r--arch/arm/mach-omap1/common.h3
-rw-r--r--arch/arm/mach-omap1/fpga.c3
-rw-r--r--arch/arm/mach-omap1/gpio15xx.c8
-rw-r--r--arch/arm/mach-omap1/gpio16xx.c22
-rw-r--r--arch/arm/mach-omap1/gpio7xx.c30
-rw-r--r--arch/arm/mach-omap1/include/mach/timex.h5
-rw-r--r--arch/arm/mach-omap1/pm.c1
-rw-r--r--arch/arm/mach-omap1/time.c2
-rw-r--r--arch/arm/mach-omap1/timer32k.c2
-rw-r--r--arch/arm/mach-omap2/Kconfig65
-rw-r--r--arch/arm/mach-omap2/Makefile29
-rw-r--r--arch/arm/mach-omap2/board-3630sdp.c225
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c2
-rw-r--r--arch/arm/mach-omap2/board-flash.c2
-rw-r--r--arch/arm/mach-omap2/board-generic.c40
-rw-r--r--arch/arm/mach-omap2/board-h4.c2
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c718
-rw-r--r--arch/arm/mach-omap2/board-ldp.c3
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c10
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c756
-rw-r--r--arch/arm/mach-omap2/board-omap3stalker.c2
-rw-r--r--arch/arm/mach-omap2/board-rm680.c167
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c39
-rw-r--r--arch/arm/mach-omap2/board-rx51.c12
-rw-r--r--arch/arm/mach-omap2/board-zoom-debugboard.c139
-rw-r--r--arch/arm/mach-omap2/board-zoom-display.c71
-rw-r--r--arch/arm/mach-omap2/board-zoom-peripherals.c360
-rw-r--r--arch/arm/mach-omap2/board-zoom.c159
-rw-r--r--arch/arm/mach-omap2/board-zoom.h10
-rw-r--r--arch/arm/mach-omap2/cclock3xxx_data.c1
-rw-r--r--arch/arm/mach-omap2/clkt2xxx_apll.c4
-rw-r--r--arch/arm/mach-omap2/clkt2xxx_dpllcore.c11
-rw-r--r--arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c24
-rw-r--r--arch/arm/mach-omap2/clock.c38
-rw-r--r--arch/arm/mach-omap2/clock.h2
-rw-r--r--arch/arm/mach-omap2/clockdomain.h4
-rw-r--r--arch/arm/mach-omap2/clockdomains43xx_data.c196
-rw-r--r--arch/arm/mach-omap2/cm2xxx.c67
-rw-r--r--arch/arm/mach-omap2/cm2xxx.h8
-rw-r--r--arch/arm/mach-omap2/cm33xx.c16
-rw-r--r--arch/arm/mach-omap2/cm33xx.h12
-rw-r--r--arch/arm/mach-omap2/cm3xxx.c22
-rw-r--r--arch/arm/mach-omap2/cm3xxx.h1
-rw-r--r--arch/arm/mach-omap2/cminst44xx.c29
-rw-r--r--arch/arm/mach-omap2/cminst44xx.h26
-rw-r--r--arch/arm/mach-omap2/common.h7
-rw-r--r--arch/arm/mach-omap2/control.c54
-rw-r--r--arch/arm/mach-omap2/control.h1
-rw-r--r--arch/arm/mach-omap2/devices.c46
-rw-r--r--arch/arm/mach-omap2/display.c28
-rw-r--r--arch/arm/mach-omap2/display.h4
-rw-r--r--arch/arm/mach-omap2/drm.c24
-rw-r--r--arch/arm/mach-omap2/dss-common.c44
-rw-r--r--arch/arm/mach-omap2/dss-common.h1
-rw-r--r--arch/arm/mach-omap2/fb.c14
-rw-r--r--arch/arm/mach-omap2/gpmc.c86
-rw-r--r--arch/arm/mach-omap2/id.c20
-rw-r--r--arch/arm/mach-omap2/include/mach/timex.h5
-rw-r--r--arch/arm/mach-omap2/io.c26
-rw-r--r--arch/arm/mach-omap2/irq.c2
-rw-r--r--arch/arm/mach-omap2/mcbsp.c16
-rw-r--r--arch/arm/mach-omap2/mux.c8
-rw-r--r--arch/arm/mach-omap2/omap-pm.h2
-rw-r--r--arch/arm/mach-omap2/omap-secure.c76
-rw-r--r--arch/arm/mach-omap2/omap-secure.h17
-rw-r--r--arch/arm/mach-omap2/omap-smc.S21
-rw-r--r--arch/arm/mach-omap2/omap-smp.c7
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c18
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c57
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h163
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c643
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c1469
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_data.c1990
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c48
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_43xx_data.c758
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c230
-rw-r--r--arch/arm/mach-omap2/opp.c10
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c174
-rw-r--r--arch/arm/mach-omap2/pm.c20
-rw-r--r--arch/arm/mach-omap2/pm24xx.c24
-rw-r--r--arch/arm/mach-omap2/pm34xx.c3
-rw-r--r--arch/arm/mach-omap2/powerdomain.h1
-rw-r--r--arch/arm/mach-omap2/powerdomains43xx_data.c136
-rw-r--r--arch/arm/mach-omap2/prcm43xx.h146
-rw-r--r--arch/arm/mach-omap2/prm3xxx.h8
-rw-r--r--arch/arm/mach-omap2/prm44xx_54xx.h8
-rw-r--r--arch/arm/mach-omap2/prm_common.c11
-rw-r--r--arch/arm/mach-omap2/soc.h2
-rw-r--r--arch/arm/mach-omap2/timer.c19
-rw-r--r--arch/arm/mach-orion5x/include/mach/timex.h11
-rw-r--r--arch/arm/mach-prima2/common.c11
-rw-r--r--arch/arm/mach-prima2/common.h1
-rw-r--r--arch/arm/mach-pxa/Kconfig3
-rw-r--r--arch/arm/mach-pxa/include/mach/timex.h34
-rw-r--r--arch/arm/mach-pxa/stargate2.c2
-rw-r--r--arch/arm/mach-realview/include/mach/timex.h23
-rw-r--r--arch/arm/mach-rockchip/Kconfig5
-rw-r--r--arch/arm/mach-rockchip/rockchip.c9
-rw-r--r--arch/arm/mach-rpc/include/mach/timex.h17
-rw-r--r--arch/arm/mach-s3c24xx/Kconfig3
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2412.c8
-rw-r--r--arch/arm/mach-s3c24xx/common-s3c2443.c12
-rw-r--r--arch/arm/mach-s3c24xx/common.c206
-rw-r--r--arch/arm/mach-s3c24xx/common.h5
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/timex.h24
-rw-r--r--arch/arm/mach-s3c24xx/mach-jive.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-mini2440.c2
-rw-r--r--arch/arm/mach-s3c24xx/mach-smdk2413.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-smdk2416.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-smdk2443.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-vstms.c1
-rw-r--r--arch/arm/mach-s3c64xx/Kconfig29
-rw-r--r--arch/arm/mach-s3c64xx/Makefile3
-rw-r--r--arch/arm/mach-s3c64xx/clock.c1007
-rw-r--r--arch/arm/mach-s3c64xx/common.c33
-rw-r--r--arch/arm/mach-s3c64xx/common.h12
-rw-r--r--arch/arm/mach-s3c64xx/dma.c13
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/regs-clock.h132
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/timex.h24
-rw-r--r--arch/arm/mach-s3c64xx/irq-pm.c9
-rw-r--r--arch/arm/mach-s3c64xx/mach-anw6410.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410.c14
-rw-r--r--arch/arm/mach-s3c64xx/mach-hmt.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-mini6410.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-ncp.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c85
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq.c11
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6400.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c2
-rw-r--r--arch/arm/mach-s3c64xx/pm.c21
-rw-r--r--arch/arm/mach-s3c64xx/s3c6400.c15
-rw-r--r--arch/arm/mach-s3c64xx/s3c6410.c16
-rw-r--r--arch/arm/mach-s5p64x0/include/mach/timex.h27
-rw-r--r--arch/arm/mach-s5pc100/include/mach/timex.h24
-rw-r--r--arch/arm/mach-s5pv210/include/mach/timex.h29
-rw-r--r--arch/arm/mach-sa1100/assabet.c3
-rw-r--r--arch/arm/mach-sa1100/generic.c81
-rw-r--r--arch/arm/mach-sa1100/generic.h7
-rw-r--r--arch/arm/mach-sa1100/include/mach/gpio.h55
-rw-r--r--arch/arm/mach-sa1100/include/mach/h3xxx.h2
-rw-r--r--arch/arm/mach-sa1100/include/mach/timex.h12
-rw-r--r--arch/arm/mach-sa1100/simpad.c1
-rw-r--r--arch/arm/mach-shark/Makefile10
-rw-r--r--arch/arm/mach-shark/Makefile.boot2
-rw-r--r--arch/arm/mach-shark/core.c146
-rw-r--r--arch/arm/mach-shark/dma.c23
-rw-r--r--arch/arm/mach-shark/include/mach/debug-macro.S34
-rw-r--r--arch/arm/mach-shark/include/mach/entry-macro.S36
-rw-r--r--arch/arm/mach-shark/include/mach/framebuffer.h16
-rw-r--r--arch/arm/mach-shark/include/mach/hardware.h16
-rw-r--r--arch/arm/mach-shark/include/mach/irqs.h13
-rw-r--r--arch/arm/mach-shark/include/mach/isa-dma.h13
-rw-r--r--arch/arm/mach-shark/include/mach/memory.h26
-rw-r--r--arch/arm/mach-shark/include/mach/timex.h7
-rw-r--r--arch/arm/mach-shark/include/mach/uncompress.h50
-rw-r--r--arch/arm/mach-shark/irq.c108
-rw-r--r--arch/arm/mach-shark/leds.c117
-rw-r--r--arch/arm/mach-shark/pci.c57
-rw-r--r--arch/arm/mach-shmobile/Kconfig46
-rw-r--r--arch/arm/mach-shmobile/Makefile14
-rw-r--r--arch/arm/mach-shmobile/Makefile.boot3
-rw-r--r--arch/arm/mach-shmobile/board-ape6evm-reference.c2
-rw-r--r--arch/arm/mach-shmobile/board-ape6evm.c61
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c1
-rw-r--r--arch/arm/mach-shmobile/board-bockw-reference.c20
-rw-r--r--arch/arm/mach-shmobile/board-bockw.c418
-rw-r--r--arch/arm/mach-shmobile/board-genmai.c43
-rw-r--r--arch/arm/mach-shmobile/board-koelsch.c47
-rw-r--r--arch/arm/mach-shmobile/board-kzm9d-reference.c1
-rw-r--r--arch/arm/mach-shmobile/board-kzm9g.c1
-rw-r--r--arch/arm/mach-shmobile/board-lager-reference.c5
-rw-r--r--arch/arm/mach-shmobile/board-lager.c87
-rw-r--r--arch/arm/mach-shmobile/board-marzen-reference.c1
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c74
-rw-r--r--arch/arm/mach-shmobile/clock-r7s72100.c202
-rw-r--r--arch/arm/mach-shmobile/clock-r8a73a4.c5
-rw-r--r--arch/arm/mach-shmobile/clock-r8a7778.c44
-rw-r--r--arch/arm/mach-shmobile/clock-r8a7779.c2
-rw-r--r--arch/arm/mach-shmobile/clock-r8a7790.c24
-rw-r--r--arch/arm/mach-shmobile/clock-r8a7791.c237
-rw-r--r--arch/arm/mach-shmobile/headsmp.S3
-rw-r--r--arch/arm/mach-shmobile/include/mach/common.h12
-rw-r--r--arch/arm/mach-shmobile/include/mach/r7s72100.h8
-rw-r--r--arch/arm/mach-shmobile/include/mach/r8a73a4.h11
-rw-r--r--arch/arm/mach-shmobile/include/mach/r8a7778.h14
-rw-r--r--arch/arm/mach-shmobile/include/mach/r8a7779.h8
-rw-r--r--arch/arm/mach-shmobile/include/mach/r8a7790.h9
-rw-r--r--arch/arm/mach-shmobile/include/mach/r8a7791.h10
-rw-r--r--arch/arm/mach-shmobile/include/mach/rcar-gen2.h8
-rw-r--r--arch/arm/mach-shmobile/include/mach/timex.h6
-rw-r--r--arch/arm/mach-shmobile/platsmp-apmu.c195
-rw-r--r--arch/arm/mach-shmobile/platsmp-scu.c30
-rw-r--r--arch/arm/mach-shmobile/platsmp.c22
-rw-r--r--arch/arm/mach-shmobile/setup-r7s72100.c88
-rw-r--r--arch/arm/mach-shmobile/setup-r8a73a4.c95
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7778.c178
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7779.c160
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7790.c94
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7791.c184
-rw-r--r--arch/arm/mach-shmobile/setup-rcar-gen2.c91
-rw-r--r--arch/arm/mach-shmobile/smp-emev2.c6
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7779.c4
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7790.c67
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7791.c62
-rw-r--r--arch/arm/mach-shmobile/smp-sh73a0.c14
-rw-r--r--arch/arm/mach-socfpga/Kconfig1
-rw-r--r--arch/arm/mach-socfpga/socfpga.c2
-rw-r--r--arch/arm/mach-spear/Kconfig2
-rw-r--r--arch/arm/mach-spear/include/mach/timex.h19
-rw-r--r--arch/arm/mach-sti/Kconfig4
-rw-r--r--arch/arm/mach-sti/board-dt.c10
-rw-r--r--arch/arm/mach-sunxi/Kconfig6
-rw-r--r--arch/arm/mach-sunxi/sunxi.c45
-rw-r--r--arch/arm/mach-tegra/Kconfig14
-rw-r--r--arch/arm/mach-tegra/Makefile6
-rw-r--r--arch/arm/mach-tegra/board-paz00.c5
-rw-r--r--arch/arm/mach-tegra/board.h12
-rw-r--r--arch/arm/mach-tegra/common.c115
-rw-r--r--arch/arm/mach-tegra/cpuidle.c4
-rw-r--r--arch/arm/mach-tegra/flowctrl.c2
-rw-r--r--arch/arm/mach-tegra/fuse.c66
-rw-r--r--arch/arm/mach-tegra/fuse.h1
-rw-r--r--arch/arm/mach-tegra/gpio-names.h247
-rw-r--r--arch/arm/mach-tegra/hotplug.c2
-rw-r--r--arch/arm/mach-tegra/iomap.h155
-rw-r--r--arch/arm/mach-tegra/irammap.h6
-rw-r--r--arch/arm/mach-tegra/platsmp.c2
-rw-r--r--arch/arm/mach-tegra/pm.c20
-rw-r--r--arch/arm/mach-tegra/pm.h3
-rw-r--r--arch/arm/mach-tegra/pmc.c58
-rw-r--r--arch/arm/mach-tegra/pmc.h5
-rw-r--r--arch/arm/mach-tegra/powergate.c48
-rw-r--r--arch/arm/mach-tegra/reset-handler.S13
-rw-r--r--arch/arm/mach-tegra/reset.c2
-rw-r--r--arch/arm/mach-tegra/sleep-tegra20.S5
-rw-r--r--arch/arm/mach-tegra/sleep-tegra30.S54
-rw-r--r--arch/arm/mach-tegra/tegra.c73
-rw-r--r--arch/arm/mach-u300/Kconfig1
-rw-r--r--arch/arm/mach-u300/timer.c9
-rw-r--r--arch/arm/mach-ux500/Kconfig30
-rw-r--r--arch/arm/mach-ux500/Makefile5
-rw-r--r--arch/arm/mach-ux500/board-mop500-audio.c50
-rw-r--r--arch/arm/mach-ux500/board-mop500-sdi.c51
-rw-r--r--arch/arm/mach-ux500/board-mop500-stuib.c120
-rw-r--r--arch/arm/mach-ux500/board-mop500-u8500uib.c92
-rw-r--r--arch/arm/mach-ux500/board-mop500-uib.c133
-rw-r--r--arch/arm/mach-ux500/board-mop500.c637
-rw-r--r--arch/arm/mach-ux500/board-mop500.h16
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c124
-rw-r--r--arch/arm/mach-ux500/cpu.c19
-rw-r--r--arch/arm/mach-ux500/devices-common.c60
-rw-r--r--arch/arm/mach-ux500/devices-common.h149
-rw-r--r--arch/arm/mach-ux500/devices-db8500.c94
-rw-r--r--arch/arm/mach-ux500/devices-db8500.h110
-rw-r--r--arch/arm/mach-ux500/devices.h8
-rw-r--r--arch/arm/mach-ux500/setup.h1
-rw-r--r--arch/arm/mach-ux500/timer.c4
-rw-r--r--arch/arm/mach-ux500/usb.c135
-rw-r--r--arch/arm/mach-vexpress/Kconfig15
-rw-r--r--arch/arm/mach-vexpress/Makefile3
-rw-r--r--arch/arm/mach-vexpress/dcscb.c56
-rw-r--r--arch/arm/mach-vexpress/spc.c366
-rw-r--r--arch/arm/mach-vexpress/spc.h2
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c55
-rw-r--r--arch/arm/mach-vexpress/v2m.c14
-rw-r--r--arch/arm/mach-vt8500/Kconfig1
-rw-r--r--arch/arm/mach-vt8500/common.h24
-rw-r--r--arch/arm/mach-vt8500/vt8500.c6
-rw-r--r--arch/arm/mach-w90x900/include/mach/timex.h25
-rw-r--r--arch/arm/mach-zynq/Kconfig1
-rw-r--r--arch/arm/mach-zynq/common.c6
-rw-r--r--arch/arm/mm/Kconfig6
-rw-r--r--arch/arm/mm/abort-ev6.S5
-rw-r--r--arch/arm/mm/alignment.c9
-rw-r--r--arch/arm/mm/dma-mapping.c55
-rw-r--r--arch/arm/mm/idmap.c8
-rw-r--r--arch/arm/mm/init.c25
-rw-r--r--arch/arm/mm/mm.h2
-rw-r--r--arch/arm/mm/mmap.c6
-rw-r--r--arch/arm/mm/mmu.c82
-rw-r--r--arch/arm/mm/proc-v6.S4
-rw-r--r--arch/arm/mm/proc-v7.S4
-rw-r--r--arch/arm/net/bpf_jit_32.c7
-rw-r--r--arch/arm/plat-omap/dma.c1
-rw-r--r--arch/arm/plat-omap/include/plat/timex.h33
-rw-r--r--arch/arm/plat-samsung/devs.c5
-rw-r--r--arch/arm/plat-samsung/include/plat/cpu.h4
-rw-r--r--arch/arm/plat-samsung/include/plat/uncompress.h2
-rw-r--r--arch/arm/plat-samsung/init.c12
-rw-r--r--arch/arm/plat-samsung/s5p-irq-eint.c4
-rw-r--r--arch/arm/plat-versatile/headsmp.S2
-rw-r--r--arch/arm/vfp/vfpmodule.c6
-rw-r--r--arch/arm/xen/Makefile2
-rw-r--r--arch/arm/xen/mm.c65
-rw-r--r--arch/arm/xen/p2m.c208
-rw-r--r--arch/arm64/Kconfig15
-rw-r--r--arch/arm64/Makefile6
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/arch_timer.h42
-rw-r--r--arch/arm64/include/asm/assembler.h31
-rw-r--r--arch/arm64/include/asm/cmpxchg.h2
-rw-r--r--arch/arm64/include/asm/compat.h14
-rw-r--r--arch/arm64/include/asm/cpu_ops.h59
-rw-r--r--arch/arm64/include/asm/dma-mapping.h14
-rw-r--r--arch/arm64/include/asm/elf.h18
-rw-r--r--arch/arm64/include/asm/hwcap.h11
-rw-r--r--arch/arm64/include/asm/io.h11
-rw-r--r--arch/arm64/include/asm/irq.h1
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h5
-rw-r--r--arch/arm64/include/asm/kvm_host.h6
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h12
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h2
-rw-r--r--arch/arm64/include/asm/processor.h5
-rw-r--r--arch/arm64/include/asm/prom.h1
-rw-r--r--arch/arm64/include/asm/psci.h19
-rw-r--r--arch/arm64/include/asm/ptrace.h1
-rw-r--r--arch/arm64/include/asm/smp.h15
-rw-r--r--arch/arm64/include/asm/spinlock.h83
-rw-r--r--arch/arm64/include/asm/spinlock_types.h15
-rw-r--r--arch/arm64/include/asm/syscall.h6
-rw-r--r--arch/arm64/include/asm/virt.h3
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h47
-rw-r--r--arch/arm64/include/uapi/asm/byteorder.h4
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h1
-rw-r--r--arch/arm64/kernel/Makefile4
-rw-r--r--arch/arm64/kernel/arm64ksyms.c1
-rw-r--r--arch/arm64/kernel/cpu_ops.c87
-rw-r--r--arch/arm64/kernel/cputable.c2
-rw-r--r--arch/arm64/kernel/debug-monitors.c13
-rw-r--r--arch/arm64/kernel/head.S61
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c22
-rw-r--r--arch/arm64/kernel/irq.c61
-rw-r--r--arch/arm64/kernel/kuser32.S57
-rw-r--r--arch/arm64/kernel/perf_event.c11
-rw-r--r--arch/arm64/kernel/process.c7
-rw-r--r--arch/arm64/kernel/psci.c87
-rw-r--r--arch/arm64/kernel/setup.c80
-rw-r--r--arch/arm64/kernel/signal32.c28
-rw-r--r--arch/arm64/kernel/smp.c212
-rw-r--r--arch/arm64/kernel/smp_psci.c53
-rw-r--r--arch/arm64/kernel/smp_spin_table.c97
-rw-r--r--arch/arm64/kernel/sys32.S22
-rw-r--r--arch/arm64/kernel/time.c10
-rw-r--r--arch/arm64/kernel/vdso.c5
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S32
-rw-r--r--arch/arm64/kvm/guest.c20
-rw-r--r--arch/arm64/mm/init.c25
-rw-r--r--arch/arm64/mm/ioremap.c20
-rw-r--r--arch/arm64/mm/proc.S4
-rw-r--r--arch/arm64/xen/Makefile2
-rw-r--r--arch/avr32/boards/atngw100/evklcd10x.c8
-rw-r--r--arch/avr32/boards/atngw100/mrmt.c4
-rw-r--r--arch/avr32/boards/atstk1000/atstk1000.h2
-rw-r--r--arch/avr32/boards/atstk1000/setup.c2
-rw-r--r--arch/avr32/boards/favr-32/setup.c2
-rw-r--r--arch/avr32/boards/hammerhead/setup.c2
-rw-r--r--arch/avr32/boards/merisc/display.c2
-rw-r--r--arch/avr32/boards/mimc200/setup.c4
-rw-r--r--arch/avr32/include/asm/Kbuild1
-rw-r--r--arch/avr32/include/uapi/asm/socket.h2
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c8
-rw-r--r--arch/avr32/mach-at32ap/include/mach/board.h4
-rw-r--r--arch/blackfin/Kconfig1
-rw-r--r--arch/blackfin/include/asm/Kbuild1
-rw-r--r--arch/c6x/Kconfig7
-rw-r--r--arch/c6x/include/asm/Kbuild1
-rw-r--r--arch/c6x/include/asm/prom.h1
-rw-r--r--arch/c6x/include/asm/setup.h2
-rw-r--r--arch/c6x/kernel/devicetree.c29
-rw-r--r--arch/c6x/kernel/setup.c13
-rw-r--r--arch/c6x/kernel/vmlinux.lds.S6
-rw-r--r--arch/cris/Kconfig2
-rw-r--r--arch/cris/include/asm/Kbuild1
-rw-r--r--arch/cris/include/asm/pci.h1
-rw-r--r--arch/cris/include/uapi/asm/socket.h2
-rw-r--r--arch/frv/include/asm/Kbuild1
-rw-r--r--arch/frv/include/uapi/asm/socket.h2
-rw-r--r--arch/frv/mb93090-mb00/pci-frv.h1
-rw-r--r--arch/frv/mb93090-mb00/pci-vdk.c36
-rw-r--r--arch/h8300/Kconfig108
-rw-r--r--arch/h8300/Kconfig.cpu171
-rw-r--r--arch/h8300/Kconfig.debug68
-rw-r--r--arch/h8300/Kconfig.ide44
-rw-r--r--arch/h8300/Makefile71
-rw-r--r--arch/h8300/README38
-rw-r--r--arch/h8300/boot/Makefile22
-rw-r--r--arch/h8300/boot/compressed/Makefile37
-rw-r--r--arch/h8300/boot/compressed/head.S47
-rw-r--r--arch/h8300/boot/compressed/misc.c180
-rw-r--r--arch/h8300/boot/compressed/vmlinux.lds32
-rw-r--r--arch/h8300/boot/compressed/vmlinux.scr9
-rw-r--r--arch/h8300/defconfig42
-rw-r--r--arch/h8300/include/asm/Kbuild8
-rw-r--r--arch/h8300/include/asm/asm-offsets.h1
-rw-r--r--arch/h8300/include/asm/atomic.h146
-rw-r--r--arch/h8300/include/asm/barrier.h29
-rw-r--r--arch/h8300/include/asm/bitops.h211
-rw-r--r--arch/h8300/include/asm/bootinfo.h2
-rw-r--r--arch/h8300/include/asm/bug.h12
-rw-r--r--arch/h8300/include/asm/bugs.h16
-rw-r--r--arch/h8300/include/asm/cache.h13
-rw-r--r--arch/h8300/include/asm/cachectl.h14
-rw-r--r--arch/h8300/include/asm/cacheflush.h40
-rw-r--r--arch/h8300/include/asm/checksum.h102
-rw-r--r--arch/h8300/include/asm/cmpxchg.h60
-rw-r--r--arch/h8300/include/asm/cputime.h6
-rw-r--r--arch/h8300/include/asm/current.h25
-rw-r--r--arch/h8300/include/asm/dbg.h2
-rw-r--r--arch/h8300/include/asm/delay.h38
-rw-r--r--arch/h8300/include/asm/device.h7
-rw-r--r--arch/h8300/include/asm/div64.h1
-rw-r--r--arch/h8300/include/asm/dma.h15
-rw-r--r--arch/h8300/include/asm/elf.h101
-rw-r--r--arch/h8300/include/asm/emergency-restart.h6
-rw-r--r--arch/h8300/include/asm/fb.h12
-rw-r--r--arch/h8300/include/asm/flat.h26
-rw-r--r--arch/h8300/include/asm/fpu.h1
-rw-r--r--arch/h8300/include/asm/ftrace.h1
-rw-r--r--arch/h8300/include/asm/futex.h6
-rw-r--r--arch/h8300/include/asm/gpio-internal.h52
-rw-r--r--arch/h8300/include/asm/hardirq.h19
-rw-r--r--arch/h8300/include/asm/hw_irq.h1
-rw-r--r--arch/h8300/include/asm/io.h358
-rw-r--r--arch/h8300/include/asm/irq.h49
-rw-r--r--arch/h8300/include/asm/irq_regs.h1
-rw-r--r--arch/h8300/include/asm/irqflags.h43
-rw-r--r--arch/h8300/include/asm/kdebug.h1
-rw-r--r--arch/h8300/include/asm/kmap_types.h6
-rw-r--r--arch/h8300/include/asm/local.h6
-rw-r--r--arch/h8300/include/asm/local64.h1
-rw-r--r--arch/h8300/include/asm/mc146818rtc.h9
-rw-r--r--arch/h8300/include/asm/mmu_context.h32
-rw-r--r--arch/h8300/include/asm/mutex.h9
-rw-r--r--arch/h8300/include/asm/page.h78
-rw-r--r--arch/h8300/include/asm/page_offset.h3
-rw-r--r--arch/h8300/include/asm/param.h9
-rw-r--r--arch/h8300/include/asm/pci.h19
-rw-r--r--arch/h8300/include/asm/percpu.h6
-rw-r--r--arch/h8300/include/asm/pgalloc.h8
-rw-r--r--arch/h8300/include/asm/pgtable.h73
-rw-r--r--arch/h8300/include/asm/processor.h139
-rw-r--r--arch/h8300/include/asm/ptrace.h33
-rw-r--r--arch/h8300/include/asm/regs267x.h336
-rw-r--r--arch/h8300/include/asm/regs306x.h212
-rw-r--r--arch/h8300/include/asm/scatterlist.h6
-rw-r--r--arch/h8300/include/asm/sections.h6
-rw-r--r--arch/h8300/include/asm/segment.h49
-rw-r--r--arch/h8300/include/asm/sh_bios.h29
-rw-r--r--arch/h8300/include/asm/shm.h31
-rw-r--r--arch/h8300/include/asm/shmparam.h6
-rw-r--r--arch/h8300/include/asm/signal.h24
-rw-r--r--arch/h8300/include/asm/smp.h1
-rw-r--r--arch/h8300/include/asm/spinlock.h6
-rw-r--r--arch/h8300/include/asm/string.h44
-rw-r--r--arch/h8300/include/asm/switch_to.h50
-rw-r--r--arch/h8300/include/asm/target_time.h4
-rw-r--r--arch/h8300/include/asm/termios.h50
-rw-r--r--arch/h8300/include/asm/thread_info.h103
-rw-r--r--arch/h8300/include/asm/timer.h25
-rw-r--r--arch/h8300/include/asm/timex.h19
-rw-r--r--arch/h8300/include/asm/tlb.h8
-rw-r--r--arch/h8300/include/asm/tlbflush.h55
-rw-r--r--arch/h8300/include/asm/topology.h6
-rw-r--r--arch/h8300/include/asm/traps.h37
-rw-r--r--arch/h8300/include/asm/types.h9
-rw-r--r--arch/h8300/include/asm/uaccess.h163
-rw-r--r--arch/h8300/include/asm/ucontext.h12
-rw-r--r--arch/h8300/include/asm/unaligned.h11
-rw-r--r--arch/h8300/include/asm/unistd.h36
-rw-r--r--arch/h8300/include/asm/user.h75
-rw-r--r--arch/h8300/include/asm/virtconvert.h20
-rw-r--r--arch/h8300/include/uapi/asm/Kbuild34
-rw-r--r--arch/h8300/include/uapi/asm/auxvec.h4
-rw-r--r--arch/h8300/include/uapi/asm/bitsperlong.h1
-rw-r--r--arch/h8300/include/uapi/asm/byteorder.h6
-rw-r--r--arch/h8300/include/uapi/asm/errno.h6
-rw-r--r--arch/h8300/include/uapi/asm/fcntl.h11
-rw-r--r--arch/h8300/include/uapi/asm/ioctl.h1
-rw-r--r--arch/h8300/include/uapi/asm/ioctls.h8
-rw-r--r--arch/h8300/include/uapi/asm/ipcbuf.h1
-rw-r--r--arch/h8300/include/uapi/asm/kvm_para.h1
-rw-r--r--arch/h8300/include/uapi/asm/mman.h1
-rw-r--r--arch/h8300/include/uapi/asm/msgbuf.h31
-rw-r--r--arch/h8300/include/uapi/asm/param.h16
-rw-r--r--arch/h8300/include/uapi/asm/poll.h11
-rw-r--r--arch/h8300/include/uapi/asm/posix_types.h26
-rw-r--r--arch/h8300/include/uapi/asm/ptrace.h44
-rw-r--r--arch/h8300/include/uapi/asm/resource.h6
-rw-r--r--arch/h8300/include/uapi/asm/sembuf.h25
-rw-r--r--arch/h8300/include/uapi/asm/setup.h6
-rw-r--r--arch/h8300/include/uapi/asm/shmbuf.h42
-rw-r--r--arch/h8300/include/uapi/asm/sigcontext.h18
-rw-r--r--arch/h8300/include/uapi/asm/siginfo.h6
-rw-r--r--arch/h8300/include/uapi/asm/signal.h115
-rw-r--r--arch/h8300/include/uapi/asm/socket.h79
-rw-r--r--arch/h8300/include/uapi/asm/sockios.h13
-rw-r--r--arch/h8300/include/uapi/asm/stat.h78
-rw-r--r--arch/h8300/include/uapi/asm/statfs.h6
-rw-r--r--arch/h8300/include/uapi/asm/swab.h10
-rw-r--r--arch/h8300/include/uapi/asm/termbits.h201
-rw-r--r--arch/h8300/include/uapi/asm/termios.h44
-rw-r--r--arch/h8300/include/uapi/asm/types.h1
-rw-r--r--arch/h8300/include/uapi/asm/unistd.h330
-rw-r--r--arch/h8300/kernel/Makefile12
-rw-r--r--arch/h8300/kernel/asm-offsets.c60
-rw-r--r--arch/h8300/kernel/entry.S402
-rw-r--r--arch/h8300/kernel/gpio.c178
-rw-r--r--arch/h8300/kernel/h8300_ksyms.c100
-rw-r--r--arch/h8300/kernel/irq.c165
-rw-r--r--arch/h8300/kernel/module.c75
-rw-r--r--arch/h8300/kernel/process.c154
-rw-r--r--arch/h8300/kernel/ptrace.c168
-rw-r--r--arch/h8300/kernel/setup.c242
-rw-r--r--arch/h8300/kernel/signal.c444
-rw-r--r--arch/h8300/kernel/sys_h8300.c48
-rw-r--r--arch/h8300/kernel/syscalls.S338
-rw-r--r--arch/h8300/kernel/time.c66
-rw-r--r--arch/h8300/kernel/timer/Makefile6
-rw-r--r--arch/h8300/kernel/timer/itu.c82
-rw-r--r--arch/h8300/kernel/timer/timer16.c77
-rw-r--r--arch/h8300/kernel/timer/timer8.c102
-rw-r--r--arch/h8300/kernel/timer/tpu.c100
-rw-r--r--arch/h8300/kernel/traps.c166
-rw-r--r--arch/h8300/kernel/vmlinux.lds.S157
-rw-r--r--arch/h8300/lib/Makefile5
-rw-r--r--arch/h8300/lib/abs.S21
-rw-r--r--arch/h8300/lib/ashrdi3.c63
-rw-r--r--arch/h8300/lib/checksum.c164
-rw-r--r--arch/h8300/lib/memcpy.S84
-rw-r--r--arch/h8300/lib/memset.S61
-rw-r--r--arch/h8300/lib/romfs.S57
-rw-r--r--arch/h8300/mm/Makefile5
-rw-r--r--arch/h8300/mm/fault.c56
-rw-r--r--arch/h8300/mm/init.c155
-rw-r--r--arch/h8300/mm/kmap.c58
-rw-r--r--arch/h8300/mm/memory.c54
-rw-r--r--arch/h8300/platform/h8300h/Makefile7
-rw-r--r--arch/h8300/platform/h8300h/aki3068net/Makefile5
-rw-r--r--arch/h8300/platform/h8300h/aki3068net/crt0_ram.S110
-rw-r--r--arch/h8300/platform/h8300h/generic/Makefile5
-rw-r--r--arch/h8300/platform/h8300h/generic/crt0_ram.S107
-rw-r--r--arch/h8300/platform/h8300h/generic/crt0_rom.S122
-rw-r--r--arch/h8300/platform/h8300h/h8max/Makefile5
-rw-r--r--arch/h8300/platform/h8300h/h8max/crt0_ram.S110
-rw-r--r--arch/h8300/platform/h8300h/irq.c82
-rw-r--r--arch/h8300/platform/h8300h/ptrace_h8300h.c284
-rw-r--r--arch/h8300/platform/h8s/Makefile7
-rw-r--r--arch/h8300/platform/h8s/edosk2674/Makefile5
-rw-r--r--arch/h8300/platform/h8s/edosk2674/crt0_ram.S130
-rw-r--r--arch/h8300/platform/h8s/edosk2674/crt0_rom.S186
-rw-r--r--arch/h8300/platform/h8s/generic/Makefile5
-rw-r--r--arch/h8300/platform/h8s/generic/crt0_ram.S127
-rw-r--r--arch/h8300/platform/h8s/generic/crt0_rom.S128
-rw-r--r--arch/h8300/platform/h8s/irq.c104
-rw-r--r--arch/h8300/platform/h8s/ptrace_h8s.c84
-rw-r--r--arch/hexagon/include/asm/Kbuild1
-rw-r--r--arch/hexagon/kernel/setup.c3
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/include/asm/Kbuild1
-rw-r--r--arch/ia64/include/asm/io.h1
-rw-r--r--arch/ia64/include/asm/kvm_host.h6
-rw-r--r--arch/ia64/include/asm/xen/page-coherent.h38
-rw-r--r--arch/ia64/include/uapi/asm/socket.h2
-rw-r--r--arch/ia64/kernel/acpi.c38
-rw-r--r--arch/ia64/kernel/efi.c54
-rw-r--r--arch/ia64/kernel/kprobes.c2
-rw-r--r--arch/ia64/kernel/setup.c1
-rw-r--r--arch/ia64/kvm/kvm-ia64.c5
-rw-r--r--arch/m32r/include/asm/Kbuild1
-rw-r--r--arch/m32r/include/asm/mmu_context.h2
-rw-r--r--arch/m32r/include/uapi/asm/socket.h2
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/emu/nfblock.c13
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/floppy.h2
-rw-r--r--arch/m68k/include/asm/sun3xflop.h2
-rw-r--r--arch/m68k/include/asm/uaccess.h7
-rw-r--r--arch/m68k/platform/68000/timers.c2
-rw-r--r--arch/m68k/platform/68360/config.c2
-rw-r--r--arch/m68k/platform/coldfire/pit.c2
-rw-r--r--arch/m68k/platform/coldfire/sltimers.c4
-rw-r--r--arch/m68k/platform/coldfire/timers.c4
-rw-r--r--arch/metag/include/asm/Kbuild1
-rw-r--r--arch/metag/include/asm/mach/arch.h2
-rw-r--r--arch/metag/include/asm/prom.h23
-rw-r--r--arch/metag/include/asm/setup.h1
-rw-r--r--arch/metag/include/asm/topology.h2
-rw-r--r--arch/metag/kernel/devtree.c83
-rw-r--r--arch/metag/kernel/irq.c52
-rw-r--r--arch/metag/kernel/setup.c7
-rw-r--r--arch/metag/mm/init.c9
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/pci.h2
-rw-r--r--arch/microblaze/include/asm/prom.h39
-rw-r--r--arch/microblaze/kernel/prom.c32
-rw-r--r--arch/microblaze/kernel/setup.c3
-rw-r--r--arch/microblaze/kernel/timer.c1
-rw-r--r--arch/microblaze/pci/pci-common.c73
-rw-r--r--arch/mips/Kbuild.platforms1
-rw-r--r--arch/mips/Kconfig28
-rw-r--r--arch/mips/Kconfig.debug8
-rw-r--r--arch/mips/Makefile14
-rw-r--r--arch/mips/alchemy/devboards/db1235.c2
-rw-r--r--arch/mips/ath79/dev-common.c6
-rw-r--r--arch/mips/bcm47xx/Makefile1
-rw-r--r--arch/mips/bcm47xx/board.c309
-rw-r--r--arch/mips/bcm47xx/nvram.c20
-rw-r--r--arch/mips/bcm47xx/prom.c27
-rw-r--r--arch/mips/bcm47xx/setup.c2
-rw-r--r--arch/mips/bcm47xx/time.c23
-rw-r--r--arch/mips/boot/compressed/Makefile6
-rw-r--r--arch/mips/boot/compressed/decompress.c13
-rw-r--r--arch/mips/boot/compressed/ld.script5
-rw-r--r--arch/mips/cavium-octeon/setup.c4
-rw-r--r--arch/mips/cobalt/Makefile1
-rw-r--r--arch/mips/cobalt/console.c20
-rw-r--r--arch/mips/cobalt/setup.c3
-rw-r--r--arch/mips/configs/db1235_defconfig1
-rw-r--r--arch/mips/configs/powertv_defconfig136
-rw-r--r--arch/mips/dec/int-handler.S8
-rw-r--r--arch/mips/dec/ioasic-irq.c43
-rw-r--r--arch/mips/dec/prom/call_o32.S2
-rw-r--r--arch/mips/dec/prom/init.c2
-rw-r--r--arch/mips/dec/prom/memory.c2
-rw-r--r--arch/mips/dec/setup.c4
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/addrspace.h4
-rw-r--r--arch/mips/include/asm/atomic.h2
-rw-r--r--arch/mips/include/asm/barrier.h6
-rw-r--r--arch/mips/include/asm/cacheops.h93
-rw-r--r--arch/mips/include/asm/dec/ioasic.h2
-rw-r--r--arch/mips/include/asm/dec/ioasic_addrs.h2
-rw-r--r--arch/mips/include/asm/dec/kn01.h12
-rw-r--r--arch/mips/include/asm/dec/kn02ca.h2
-rw-r--r--arch/mips/include/asm/dec/prom.h2
-rw-r--r--arch/mips/include/asm/elf.h1
-rw-r--r--arch/mips/include/asm/kvm_host.h7
-rw-r--r--arch/mips/include/asm/mach-ath79/ar933x_uart_platform.h18
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h110
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h2
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h12
-rw-r--r--arch/mips/include/asm/mach-dec/cpu-feature-overrides.h87
-rw-r--r--arch/mips/include/asm/mach-generic/dma-coherence.h10
-rw-r--r--arch/mips/include/asm/mach-ip27/dma-coherence.h10
-rw-r--r--arch/mips/include/asm/mach-ip32/dma-coherence.h11
-rw-r--r--arch/mips/include/asm/mach-jazz/dma-coherence.h10
-rw-r--r--arch/mips/include/asm/mach-loongson/dma-coherence.h10
-rw-r--r--arch/mips/include/asm/mach-powertv/asic.h120
-rw-r--r--arch/mips/include/asm/mach-powertv/asic_reg_map.h90
-rw-r--r--arch/mips/include/asm/mach-powertv/asic_regs.h125
-rw-r--r--arch/mips/include/asm/mach-powertv/cpu-feature-overrides.h60
-rw-r--r--arch/mips/include/asm/mach-powertv/dma-coherence.h117
-rw-r--r--arch/mips/include/asm/mach-powertv/interrupts.h253
-rw-r--r--arch/mips/include/asm/mach-powertv/ioremap.h167
-rw-r--r--arch/mips/include/asm/mach-powertv/war.h27
-rw-r--r--arch/mips/include/asm/mips-boards/piix4.h78
-rw-r--r--arch/mips/include/asm/mmu_context.h22
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pip.h4
-rw-r--r--arch/mips/include/asm/prom.h14
-rw-r--r--arch/mips/include/asm/ptrace.h14
-rw-r--r--arch/mips/include/asm/r4kcache.h41
-rw-r--r--arch/mips/include/asm/setup.h8
-rw-r--r--arch/mips/include/asm/stackframe.h24
-rw-r--r--arch/mips/include/asm/syscall.h116
-rw-r--r--arch/mips/include/asm/thread_info.h42
-rw-r--r--arch/mips/include/asm/time.h2
-rw-r--r--arch/mips/include/asm/unistd.h7
-rw-r--r--arch/mips/include/uapi/asm/siginfo.h9
-rw-r--r--arch/mips/include/uapi/asm/socket.h2
-rw-r--r--arch/mips/kernel/Makefile3
-rw-r--r--arch/mips/kernel/cpu-probe.c32
-rw-r--r--arch/mips/kernel/csrc-powertv.c151
-rw-r--r--arch/mips/kernel/early_printk_8250.c66
-rw-r--r--arch/mips/kernel/ftrace.c33
-rw-r--r--arch/mips/kernel/genex.S14
-rw-r--r--arch/mips/kernel/irq_cpu.c2
-rw-r--r--arch/mips/kernel/module.c3
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c4
-rw-r--r--arch/mips/kernel/prom.c60
-rw-r--r--arch/mips/kernel/ptrace.c199
-rw-r--r--arch/mips/kernel/rtlx.c19
-rw-r--r--arch/mips/kernel/scall32-o32.S846
-rw-r--r--arch/mips/kernel/scall64-64.S3
-rw-r--r--arch/mips/kernel/scall64-n32.S1
-rw-r--r--arch/mips/kernel/scall64-o32.S10
-rw-r--r--arch/mips/kernel/setup.c22
-rw-r--r--arch/mips/kernel/smp-bmips.c4
-rw-r--r--arch/mips/kernel/smp.c1
-rw-r--r--arch/mips/kernel/traps.c38
-rw-r--r--arch/mips/kvm/kvm_mips.c5
-rw-r--r--arch/mips/lantiq/irq.c2
-rw-r--r--arch/mips/lantiq/prom.c1
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c2
-rw-r--r--arch/mips/mm/c-r4k.c52
-rw-r--r--arch/mips/mm/dma-default.c4
-rw-r--r--arch/mips/mm/init.c5
-rw-r--r--arch/mips/mm/tlb-funcs.S2
-rw-r--r--arch/mips/mm/tlb-r4k.c37
-rw-r--r--arch/mips/mm/tlbex.c307
-rw-r--r--arch/mips/mti-malta/malta-int.c10
-rw-r--r--arch/mips/mti-sead3/sead3-setup.c2
-rw-r--r--arch/mips/netlogic/common/smp.c2
-rw-r--r--arch/mips/netlogic/xlp/setup.c3
-rw-r--r--arch/mips/pci/fixup-lantiq.c13
-rw-r--r--arch/mips/pci/fixup-malta.c36
-rw-r--r--arch/mips/pci/pci-ar71xx.c3
-rw-r--r--arch/mips/pci/pci-ar724x.c9
-rw-r--r--arch/mips/pci/pci-rt3883.c24
-rw-r--r--arch/mips/pci/pci.c50
-rw-r--r--arch/mips/powertv/Kconfig12
-rw-r--r--arch/mips/powertv/Makefile29
-rw-r--r--arch/mips/powertv/Platform7
-rw-r--r--arch/mips/powertv/asic/Makefile21
-rw-r--r--arch/mips/powertv/asic/asic-calliope.c101
-rw-r--r--arch/mips/powertv/asic/asic-cronus.c101
-rw-r--r--arch/mips/powertv/asic/asic-gaia.c96
-rw-r--r--arch/mips/powertv/asic/asic-zeus.c101
-rw-r--r--arch/mips/powertv/asic/asic_devices.c549
-rw-r--r--arch/mips/powertv/asic/asic_int.c125
-rw-r--r--arch/mips/powertv/asic/irq_asic.c115
-rw-r--r--arch/mips/powertv/asic/prealloc-calliope.c385
-rw-r--r--arch/mips/powertv/asic/prealloc-cronus.c340
-rw-r--r--arch/mips/powertv/asic/prealloc-cronuslite.c174
-rw-r--r--arch/mips/powertv/asic/prealloc-gaia.c589
-rw-r--r--arch/mips/powertv/asic/prealloc-zeus.c304
-rw-r--r--arch/mips/powertv/asic/prealloc.h70
-rw-r--r--arch/mips/powertv/init.c90
-rw-r--r--arch/mips/powertv/init.h28
-rw-r--r--arch/mips/powertv/ioremap.c136
-rw-r--r--arch/mips/powertv/memory.c353
-rw-r--r--arch/mips/powertv/pci/Makefile19
-rw-r--r--arch/mips/powertv/pci/fixup-powertv.c37
-rw-r--r--arch/mips/powertv/pci/powertv-pci.h31
-rw-r--r--arch/mips/powertv/powertv-clock.h26
-rw-r--r--arch/mips/powertv/powertv-usb.c404
-rw-r--r--arch/mips/powertv/powertv_setup.c319
-rw-r--r--arch/mips/powertv/reset.c35
-rw-r--r--arch/mips/powertv/reset.h26
-rw-r--r--arch/mips/powertv/time.c36
-rw-r--r--arch/mips/ralink/clk.c2
-rw-r--r--arch/mips/ralink/mt7620.c2
-rw-r--r--arch/mips/ralink/of.c3
-rw-r--r--arch/mips/ralink/rt305x.c2
-rw-r--r--arch/mips/ralink/timer.c2
-rw-r--r--arch/mn10300/include/asm/Kbuild1
-rw-r--r--arch/mn10300/include/asm/mmu_context.h2
-rw-r--r--arch/mn10300/include/asm/pci.h1
-rw-r--r--arch/mn10300/include/uapi/asm/socket.h2
-rw-r--r--arch/mn10300/kernel/setup.c3
-rw-r--r--arch/mn10300/unit-asb2305/pci-asb2305.h1
-rw-r--r--arch/mn10300/unit-asb2305/pci.c5
-rw-r--r--arch/openrisc/include/asm/Kbuild1
-rw-r--r--arch/openrisc/include/asm/prom.h22
-rw-r--r--arch/openrisc/kernel/prom.c70
-rw-r--r--arch/openrisc/kernel/setup.c6
-rw-r--r--arch/parisc/Kconfig5
-rw-r--r--arch/parisc/Makefile22
-rw-r--r--arch/parisc/configs/712_defconfig2
-rw-r--r--arch/parisc/configs/a500_defconfig2
-rw-r--r--arch/parisc/configs/b180_defconfig3
-rw-r--r--arch/parisc/configs/c3000_defconfig3
-rw-r--r--arch/parisc/configs/c8000_defconfig2
-rw-r--r--arch/parisc/configs/default_defconfig2
-rw-r--r--arch/parisc/configs/generic-32bit_defconfig328
-rw-r--r--arch/parisc/configs/generic-64bit_defconfig345
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/include/asm/assembly.h12
-rw-r--r--arch/parisc/include/asm/ptrace.h4
-rw-r--r--arch/parisc/include/asm/socket.h10
-rw-r--r--arch/parisc/include/asm/thread_info.h4
-rw-r--r--arch/parisc/include/asm/uaccess.h7
-rw-r--r--arch/parisc/include/uapi/asm/socket.h11
-rw-r--r--arch/parisc/install.sh44
-rw-r--r--arch/parisc/kernel/Makefile1
-rw-r--r--arch/parisc/kernel/audit.c81
-rw-r--r--arch/parisc/kernel/cache.c1
-rw-r--r--arch/parisc/kernel/compat_audit.c40
-rw-r--r--arch/parisc/kernel/head.S4
-rw-r--r--arch/parisc/kernel/irq.c17
-rw-r--r--arch/parisc/kernel/ptrace.c26
-rw-r--r--arch/parisc/kernel/syscall.S6
-rw-r--r--arch/parisc/lib/lusercopy.S10
-rw-r--r--arch/parisc/mm/fault.c6
-rw-r--r--arch/powerpc/Kconfig14
-rw-r--r--arch/powerpc/Makefile37
-rw-r--r--arch/powerpc/boot/Makefile3
-rw-r--r--arch/powerpc/boot/dts/b4860emu.dts218
-rw-r--r--arch/powerpc/boot/dts/b4qds.dtsi51
-rw-r--r--arch/powerpc/boot/dts/c293pcie.dts1
-rw-r--r--arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/b4860si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/bsc9131si-pre.dtsi3
-rw-r--r--arch/powerpc/boot/dts/t4240emu.dts268
-rw-r--r--arch/powerpc/boot/dts/t4240qds.dts73
-rwxr-xr-xarch/powerpc/boot/wrapper4
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig7
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig5
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig27
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig12
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig2
-rw-r--r--arch/powerpc/configs/pseries_defconfig25
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/archrandom.h32
-rw-r--r--arch/powerpc/include/asm/checksum.h5
-rw-r--r--arch/powerpc/include/asm/disassemble.h4
-rw-r--r--arch/powerpc/include/asm/emulated_ops.h1
-rw-r--r--arch/powerpc/include/asm/exception-64s.h21
-rw-r--r--arch/powerpc/include/asm/fsl_ifc.h2
-rw-r--r--arch/powerpc/include/asm/hvsi.h16
-rw-r--r--arch/powerpc/include/asm/io.h69
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h232
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_32.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h8
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h9
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h7
-rw-r--r--arch/powerpc/include/asm/kvm_host.h57
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h107
-rw-r--r--arch/powerpc/include/asm/lppaca.h12
-rw-r--r--arch/powerpc/include/asm/machdep.h16
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h4
-rw-r--r--arch/powerpc/include/asm/opal.h109
-rw-r--r--arch/powerpc/include/asm/paca.h2
-rw-r--r--arch/powerpc/include/asm/page.h4
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h8
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h135
-rw-r--r--arch/powerpc/include/asm/processor.h91
-rw-r--r--arch/powerpc/include/asm/prom.h36
-rw-r--r--arch/powerpc/include/asm/pte-book3e.h2
-rw-r--r--arch/powerpc/include/asm/reg.h22
-rw-r--r--arch/powerpc/include/asm/reg_booke.h8
-rw-r--r--arch/powerpc/include/asm/scom.h23
-rw-r--r--arch/powerpc/include/asm/setup.h4
-rw-r--r--arch/powerpc/include/asm/sfp-machine.h2
-rw-r--r--arch/powerpc/include/asm/string.h4
-rw-r--r--arch/powerpc/include/asm/switch_to.h1
-rw-r--r--arch/powerpc/include/asm/word-at-a-time.h78
-rw-r--r--arch/powerpc/include/asm/xor.h67
-rw-r--r--arch/powerpc/include/uapi/asm/byteorder.h4
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h86
-rw-r--r--arch/powerpc/include/uapi/asm/socket.h2
-rw-r--r--arch/powerpc/kernel/align.c173
-rw-r--r--arch/powerpc/kernel/asm-offsets.c50
-rw-r--r--arch/powerpc/kernel/eeh.c9
-rw-r--r--arch/powerpc/kernel/entry_64.S40
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c1
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S6
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S30
-rw-r--r--arch/powerpc/kernel/fpu.S86
-rw-r--r--arch/powerpc/kernel/ftrace.c4
-rw-r--r--arch/powerpc/kernel/head_64.S3
-rw-r--r--arch/powerpc/kernel/head_8xx.S3
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S10
-rw-r--r--arch/powerpc/kernel/ibmebus.c14
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kernel/irq.c17
-rw-r--r--arch/powerpc/kernel/kgdb.c6
-rw-r--r--arch/powerpc/kernel/kprobes.c2
-rw-r--r--arch/powerpc/kernel/legacy_serial.c2
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c2
-rw-r--r--arch/powerpc/kernel/misc_32.S14
-rw-r--r--arch/powerpc/kernel/module.c3
-rw-r--r--arch/powerpc/kernel/module_32.c3
-rw-r--r--arch/powerpc/kernel/module_64.c19
-rw-r--r--arch/powerpc/kernel/nvram_64.c10
-rw-r--r--arch/powerpc/kernel/paca.c6
-rw-r--r--arch/powerpc/kernel/pci-common.c11
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c4
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c10
-rw-r--r--arch/powerpc/kernel/process.c67
-rw-r--r--arch/powerpc/kernel/prom.c40
-rw-r--r--arch/powerpc/kernel/prom_init.c28
-rw-r--r--arch/powerpc/kernel/ptrace.c209
-rw-r--r--arch/powerpc/kernel/ptrace32.c13
-rw-r--r--arch/powerpc/kernel/rtas_pci.c6
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/kernel/setup.h9
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/signal_32.c81
-rw-r--r--arch/powerpc/kernel/signal_64.c32
-rw-r--r--arch/powerpc/kernel/smp.c12
-rw-r--r--arch/powerpc/kernel/swsusp_asm64.S4
-rw-r--r--arch/powerpc/kernel/tm.S49
-rw-r--r--arch/powerpc/kernel/traps.c56
-rw-r--r--arch/powerpc/kernel/vdso.c3
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S4
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S4
-rw-r--r--arch/powerpc/kernel/vecemu.c6
-rw-r--r--arch/powerpc/kernel/vector.S80
-rw-r--r--arch/powerpc/kernel/vio.c73
-rw-r--r--arch/powerpc/kvm/44x.c58
-rw-r--r--arch/powerpc/kvm/44x_emulate.c8
-rw-r--r--arch/powerpc/kvm/44x_tlb.c2
-rw-r--r--arch/powerpc/kvm/Kconfig29
-rw-r--r--arch/powerpc/kvm/Makefile29
-rw-r--r--arch/powerpc/kvm/book3s.c257
-rw-r--r--arch/powerpc/kvm/book3s.h34
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c73
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c16
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c181
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c106
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c24
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c1
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c18
-rw-r--r--arch/powerpc/kvm/book3s_exports.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv.c389
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S3
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S618
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S32
-rw-r--r--arch/powerpc/kvm/book3s_mmu_hpte.c66
-rw-r--r--arch/powerpc/kvm/book3s_pr.c534
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c52
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S32
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c1
-rw-r--r--arch/powerpc/kvm/book3s_segment.S4
-rw-r--r--arch/powerpc/kvm/book3s_xics.c7
-rw-r--r--arch/powerpc/kvm/booke.c356
-rw-r--r--arch/powerpc/kvm/booke.h29
-rw-r--r--arch/powerpc/kvm/e500.c59
-rw-r--r--arch/powerpc/kvm/e500.h2
-rw-r--r--arch/powerpc/kvm/e500_emulate.c34
-rw-r--r--arch/powerpc/kvm/e500_mmu.c4
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c6
-rw-r--r--arch/powerpc/kvm/e500mc.c58
-rw-r--r--arch/powerpc/kvm/emulate.c12
-rw-r--r--arch/powerpc/kvm/powerpc.c171
-rw-r--r--arch/powerpc/kvm/trace.h429
-rw-r--r--arch/powerpc/kvm/trace_booke.h177
-rw-r--r--arch/powerpc/kvm/trace_pr.h297
-rw-r--r--arch/powerpc/lib/Makefile21
-rw-r--r--arch/powerpc/lib/copyuser_power7.S54
-rw-r--r--arch/powerpc/lib/memcpy_power7.S55
-rw-r--r--arch/powerpc/lib/sstep.c97
-rw-r--r--arch/powerpc/lib/xor_vmx.c177
-rw-r--r--arch/powerpc/mm/hash_native_64.c46
-rw-r--r--arch/powerpc/mm/hash_utils_64.c38
-rw-r--r--arch/powerpc/mm/init_32.c5
-rw-r--r--arch/powerpc/mm/init_64.c51
-rw-r--r--arch/powerpc/mm/numa.c8
-rw-r--r--arch/powerpc/mm/pgtable.c19
-rw-r--r--arch/powerpc/net/bpf_jit.h11
-rw-r--r--arch/powerpc/net/bpf_jit_64.S9
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c38
-rw-r--r--arch/powerpc/platforms/512x/clock.c1
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c18
-rw-r--r--arch/powerpc/platforms/512x/pdm360ng.c2
-rw-r--r--arch/powerpc/platforms/52xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c5
-rw-r--r--arch/powerpc/platforms/82xx/mpc8272_ads.c2
-rw-r--r--arch/powerpc/platforms/82xx/pq2fads.c2
-rw-r--r--arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c1
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c2
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig101
-rw-r--r--arch/powerpc/platforms/85xx/Makefile8
-rw-r--r--arch/powerpc/platforms/85xx/b4_qds.c102
-rw-r--r--arch/powerpc/platforms/85xx/c293pcie.c1
-rw-r--r--arch/powerpc/platforms/85xx/common.c2
-rw-r--r--arch/powerpc/platforms/85xx/corenet_ds.c96
-rw-r--r--arch/powerpc/platforms/85xx/corenet_ds.h19
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c182
-rw-r--r--arch/powerpc/platforms/85xx/p1010rdb.c2
-rw-r--r--arch/powerpc/platforms/85xx/p2041_rdb.c87
-rw-r--r--arch/powerpc/platforms/85xx/p3041_ds.c89
-rw-r--r--arch/powerpc/platforms/85xx/p4080_ds.c87
-rw-r--r--arch/powerpc/platforms/85xx/p5020_ds.c93
-rw-r--r--arch/powerpc/platforms/85xx/p5040_ds.c84
-rw-r--r--arch/powerpc/platforms/85xx/socrates_fpga_pic.c2
-rw-r--r--arch/powerpc/platforms/85xx/t4240_qds.c93
-rw-r--r--arch/powerpc/platforms/86xx/pic.c1
-rw-r--r--arch/powerpc/platforms/8xx/ep88xc.c2
-rw-r--r--arch/powerpc/platforms/8xx/mpc86xads_setup.c2
-rw-r--r--arch/powerpc/platforms/8xx/mpc885ads_setup.c2
-rw-r--r--arch/powerpc/platforms/8xx/tqm8xx_setup.c5
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype9
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_pciex.c6
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_sio.c7
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c7
-rw-r--r--arch/powerpc/platforms/cell/spu_manage.c13
-rw-r--r--arch/powerpc/platforms/chrp/nvram.c4
-rw-r--r--arch/powerpc/platforms/embedded6xx/flipper-pic.c1
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c3
-rw-r--r--arch/powerpc/platforms/fsl_uli1575.c12
-rw-r--r--arch/powerpc/platforms/pasemi/gpio_mdio.c1
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_base.c1
-rw-r--r--arch/powerpc/platforms/powermac/pic.c10
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig2
-rw-r--r--arch/powerpc/platforms/powernv/Makefile4
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c153
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c5
-rw-r--r--arch/powerpc/platforms/powernv/opal-flash.c667
-rw-r--r--arch/powerpc/platforms/powernv/opal-lpc.c1
-rw-r--r--arch/powerpc/platforms/powernv/opal-nvram.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-rtc.c12
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S16
-rw-r--r--arch/powerpc/platforms/powernv/opal-xscom.c105
-rw-r--r--arch/powerpc/platforms/powernv/opal.c58
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c76
-rw-r--r--arch/powerpc/platforms/powernv/pci-p5ioc2.c4
-rw-r--r--arch/powerpc/platforms/powernv/pci.c68
-rw-r--r--arch/powerpc/platforms/powernv/pci.h5
-rw-r--r--arch/powerpc/platforms/powernv/rng.c125
-rw-r--r--arch/powerpc/platforms/powernv/setup.c1
-rw-r--r--arch/powerpc/platforms/pseries/Makefile2
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c43
-rw-r--r--arch/powerpc/platforms/pseries/event_sources.c8
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c2
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c59
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c12
-rw-r--r--arch/powerpc/platforms/pseries/rng.c44
-rw-r--r--arch/powerpc/platforms/wsp/scom_smp.c18
-rw-r--r--arch/powerpc/platforms/wsp/scom_wsp.c12
-rw-r--r--arch/powerpc/platforms/wsp/wsp.c13
-rw-r--r--arch/powerpc/sysdev/Kconfig2
-rw-r--r--arch/powerpc/sysdev/axonram.c21
-rw-r--r--arch/powerpc/sysdev/cpm_common.c1
-rw-r--r--arch/powerpc/sysdev/fsl_gtm.c11
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c7
-rw-r--r--arch/powerpc/sysdev/fsl_pmc.c1
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c2
-rw-r--r--arch/powerpc/sysdev/fsl_rmu.c1
-rw-r--r--arch/powerpc/sysdev/fsl_soc.h3
-rw-r--r--arch/powerpc/sysdev/mpic.c16
-rw-r--r--arch/powerpc/sysdev/mpic_msgr.c6
-rw-r--r--arch/powerpc/sysdev/mpic_msi.c8
-rw-r--r--arch/powerpc/sysdev/mpic_timer.c2
-rw-r--r--arch/powerpc/sysdev/mv64x60_dev.c2
-rw-r--r--arch/powerpc/sysdev/of_rtc.c1
-rw-r--r--arch/powerpc/sysdev/ppc4xx_soc.c1
-rw-r--r--arch/powerpc/sysdev/scom.c36
-rw-r--r--arch/powerpc/sysdev/xics/ics-opal.c17
-rw-r--r--arch/powerpc/sysdev/xilinx_intc.c1
-rw-r--r--arch/s390/Kconfig62
-rw-r--r--arch/s390/Makefile22
-rw-r--r--arch/s390/appldata/appldata_base.c18
-rw-r--r--arch/s390/configs/default_defconfig655
-rw-r--r--arch/s390/configs/gcov_defconfig618
-rw-r--r--arch/s390/configs/performance_defconfig610
-rw-r--r--arch/s390/configs/zfcpdump_defconfig86
-rw-r--r--arch/s390/crypto/aes_s390.c15
-rw-r--r--arch/s390/defconfig4
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/atomic.h190
-rw-r--r--arch/s390/include/asm/bitops.h1008
-rw-r--r--arch/s390/include/asm/compat.h5
-rw-r--r--arch/s390/include/asm/ctl_reg.h112
-rw-r--r--arch/s390/include/asm/debug.h5
-rw-r--r--arch/s390/include/asm/dis.h52
-rw-r--r--arch/s390/include/asm/fcx.h38
-rw-r--r--arch/s390/include/asm/ipl.h10
-rw-r--r--arch/s390/include/asm/kvm_host.h8
-rw-r--r--arch/s390/include/asm/mmu_context.h10
-rw-r--r--arch/s390/include/asm/page.h7
-rw-r--r--arch/s390/include/asm/pci_debug.h5
-rw-r--r--arch/s390/include/asm/pci_insn.h15
-rw-r--r--arch/s390/include/asm/percpu.h137
-rw-r--r--arch/s390/include/asm/pgtable.h4
-rw-r--r--arch/s390/include/asm/processor.h18
-rw-r--r--arch/s390/include/asm/ptrace.h7
-rw-r--r--arch/s390/include/asm/setup.h7
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/s390/include/asm/switch_to.h124
-rw-r--r--arch/s390/include/asm/timex.h32
-rw-r--r--arch/s390/include/asm/uaccess.h18
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h4
-rw-r--r--arch/s390/include/uapi/asm/sigcontext.h1
-rw-r--r--arch/s390/include/uapi/asm/socket.h2
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/bitmap.c54
-rw-r--r--arch/s390/kernel/cache.c5
-rw-r--r--arch/s390/kernel/compat_linux.c4
-rw-r--r--arch/s390/kernel/compat_linux.h1
-rw-r--r--arch/s390/kernel/compat_signal.c93
-rw-r--r--arch/s390/kernel/crash_dump.c35
-rw-r--r--arch/s390/kernel/debug.c4
-rw-r--r--arch/s390/kernel/dis.c81
-rw-r--r--arch/s390/kernel/dumpstack.c1
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/entry.h1
-rw-r--r--arch/s390/kernel/ftrace.c9
-rw-r--r--arch/s390/kernel/head.S2
-rw-r--r--arch/s390/kernel/ipl.c4
-rw-r--r--arch/s390/kernel/irq.c52
-rw-r--r--arch/s390/kernel/kprobes.c15
-rw-r--r--arch/s390/kernel/pgm_check.S2
-rw-r--r--arch/s390/kernel/process.c15
-rw-r--r--arch/s390/kernel/ptrace.c70
-rw-r--r--arch/s390/kernel/runtime_instr.c2
-rw-r--r--arch/s390/kernel/setup.c59
-rw-r--r--arch/s390/kernel/signal.c49
-rw-r--r--arch/s390/kernel/smp.c21
-rw-r--r--arch/s390/kernel/vdso.c9
-rw-r--r--arch/s390/kernel/vtime.c4
-rw-r--r--arch/s390/kvm/diag.c4
-rw-r--r--arch/s390/kvm/gaccess.h21
-rw-r--r--arch/s390/kvm/intercept.c6
-rw-r--r--arch/s390/kvm/interrupt.c9
-rw-r--r--arch/s390/kvm/kvm-s390.c119
-rw-r--r--arch/s390/kvm/kvm-s390.h9
-rw-r--r--arch/s390/kvm/priv.c61
-rw-r--r--arch/s390/kvm/trace.h1
-rw-r--r--arch/s390/lib/Makefile2
-rw-r--r--arch/s390/lib/delay.c14
-rw-r--r--arch/s390/lib/find.c77
-rw-r--r--arch/s390/lib/uaccess_mvcos.c30
-rw-r--r--arch/s390/lib/uaccess_pt.c2
-rw-r--r--arch/s390/lib/uaccess_std.c305
-rw-r--r--arch/s390/math-emu/math.c2
-rw-r--r--arch/s390/mm/cmm.c12
-rw-r--r--arch/s390/mm/fault.c46
-rw-r--r--arch/s390/mm/gup.c83
-rw-r--r--arch/s390/mm/mmap.c12
-rw-r--r--arch/s390/mm/pageattr.c4
-rw-r--r--arch/s390/mm/pgtable.c43
-rw-r--r--arch/s390/net/bpf_jit_comp.c10
-rw-r--r--arch/s390/pci/pci.c108
-rw-r--r--arch/s390/pci/pci_clp.c33
-rw-r--r--arch/s390/pci/pci_dma.c18
-rw-r--r--arch/s390/pci/pci_event.c35
-rw-r--r--arch/score/include/asm/Kbuild1
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c1
-rw-r--r--arch/sh/include/asm/Kbuild1
-rw-r--r--arch/sh/include/asm/hw_breakpoint.h12
-rw-r--r--arch/sh/include/asm/mmu_context.h2
-rw-r--r--arch/sh/include/cpu-common/cpu/ubc.h17
-rw-r--r--arch/sh/include/cpu-sh2a/cpu/ubc.h14
-rw-r--r--arch/sh/kernel/cpu/sh2a/Makefile1
-rw-r--r--arch/sh/kernel/cpu/sh2a/ubc.c154
-rw-r--r--arch/sh/kernel/hw_breakpoint.c8
-rw-r--r--arch/sh/kernel/irq.c57
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/prom.h12
-rw-r--r--arch/sparc/include/uapi/asm/socket.h2
-rw-r--r--arch/sparc/kernel/irq_64.c31
-rw-r--r--arch/sparc/kernel/kprobes.c2
-rw-r--r--arch/sparc/kernel/prom_64.c53
-rw-r--r--arch/sparc/net/bpf_jit_comp.c1
-rw-r--r--arch/tile/include/asm/Kbuild1
-rw-r--r--arch/tile/kernel/pci.c7
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/um/kernel/exitcode.c4
-rw-r--r--arch/unicore32/Kconfig1
-rw-r--r--arch/unicore32/include/asm/Kbuild1
-rw-r--r--arch/x86/Kconfig89
-rw-r--r--arch/x86/Kconfig.debug10
-rw-r--r--arch/x86/boot/Makefile5
-rw-r--r--arch/x86/boot/boot.h10
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/boot/compressed/aslr.c267
-rw-r--r--arch/x86/boot/compressed/cmdline.c2
-rw-r--r--arch/x86/boot/compressed/cpuflags.c12
-rw-r--r--arch/x86/boot/compressed/eboot.c789
-rw-r--r--arch/x86/boot/compressed/eboot.h9
-rw-r--r--arch/x86/boot/compressed/head_32.S10
-rw-r--r--arch/x86/boot/compressed/head_64.S16
-rw-r--r--arch/x86/boot/compressed/misc.c18
-rw-r--r--arch/x86/boot/compressed/misc.h37
-rw-r--r--arch/x86/boot/compressed/mkpiggy.c16
-rw-r--r--arch/x86/boot/cpucheck.c100
-rw-r--r--arch/x86/boot/cpuflags.c104
-rw-r--r--arch/x86/boot/cpuflags.h19
-rw-r--r--arch/x86/boot/tools/build.c40
-rw-r--r--arch/x86/crypto/Makefile3
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c2
-rw-r--r--arch/x86/crypto/camellia_aesni_avx2_glue.c2
-rw-r--r--arch/x86/crypto/camellia_aesni_avx_glue.c2
-rw-r--r--arch/x86/crypto/cast5_avx_glue.c2
-rw-r--r--arch/x86/crypto/cast6_avx_glue.c2
-rw-r--r--arch/x86/crypto/serpent_avx2_glue.c2
-rw-r--r--arch/x86/crypto/serpent_avx_glue.c2
-rw-r--r--arch/x86/crypto/serpent_sse2_glue.c2
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c4
-rw-r--r--arch/x86/crypto/twofish_avx_glue.c2
-rw-r--r--arch/x86/include/asm/acpi.h1
-rw-r--r--arch/x86/include/asm/archrandom.h21
-rw-r--r--arch/x86/include/asm/atomic.h29
-rw-r--r--arch/x86/include/asm/atomic64_64.h28
-rw-r--r--arch/x86/include/asm/bitops.h24
-rw-r--r--arch/x86/include/asm/calling.h50
-rw-r--r--arch/x86/include/asm/efi.h2
-rw-r--r--arch/x86/include/asm/intel-mid.h113
-rw-r--r--arch/x86/include/asm/intel_mid_vrtc.h (renamed from arch/x86/include/asm/mrst-vrtc.h)4
-rw-r--r--arch/x86/include/asm/kdebug.h2
-rw-r--r--arch/x86/include/asm/kvm_emulate.h10
-rw-r--r--arch/x86/include/asm/kvm_host.h20
-rw-r--r--arch/x86/include/asm/local.h28
-rw-r--r--arch/x86/include/asm/mce.h1
-rw-r--r--arch/x86/include/asm/misc.h6
-rw-r--r--arch/x86/include/asm/mpspec.h2
-rw-r--r--arch/x86/include/asm/mrst.h81
-rw-r--r--arch/x86/include/asm/msr.h22
-rw-r--r--arch/x86/include/asm/page_64_types.h15
-rw-r--r--arch/x86/include/asm/percpu.h11
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h2
-rw-r--r--arch/x86/include/asm/preempt.h100
-rw-r--r--arch/x86/include/asm/prom.h5
-rw-r--r--arch/x86/include/asm/rmwcc.h41
-rw-r--r--arch/x86/include/asm/setup.h4
-rw-r--r--arch/x86/include/asm/simd.h11
-rw-r--r--arch/x86/include/asm/thread_info.h5
-rw-r--r--arch/x86/include/asm/uaccess.h98
-rw-r--r--arch/x86/include/asm/uaccess_32.h29
-rw-r--r--arch/x86/include/asm/uaccess_64.h52
-rw-r--r--arch/x86/include/asm/uv/uv.h10
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h57
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h31
-rw-r--r--arch/x86/include/asm/xen/page-coherent.h38
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h2
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h19
-rw-r--r--arch/x86/include/uapi/asm/kvm.h6
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h3
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/acpi/boot.c90
-rw-r--r--arch/x86/kernel/acpi/sleep.c11
-rw-r--r--arch/x86/kernel/acpi/sleep.h2
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S2
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S2
-rw-r--r--arch/x86/kernel/alternative.c11
-rw-r--r--arch/x86/kernel/apb_timer.c10
-rw-r--r--arch/x86/kernel/apic/apic.c8
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c70
-rw-r--r--arch/x86/kernel/asm-offsets.c1
-rw-r--r--arch/x86/kernel/cpu/amd.c8
-rw-r--r--arch/x86/kernel/cpu/centaur.c8
-rw-r--r--arch/x86/kernel/cpu/common.c17
-rw-r--r--arch/x86/kernel/cpu/cpu.h20
-rw-r--r--arch/x86/kernel/cpu/intel.c12
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-apei.c3
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c27
-rw-r--r--arch/x86/kernel/cpu/perf_event.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event.h6
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c78
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c203
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c29
-rw-r--r--arch/x86/kernel/cpu/rdrand.c14
-rw-r--r--arch/x86/kernel/cpu/scattered.c2
-rw-r--r--arch/x86/kernel/cpu/umc.c4
-rw-r--r--arch/x86/kernel/devicetree.c51
-rw-r--r--arch/x86/kernel/dumpstack.c11
-rw-r--r--arch/x86/kernel/early_printk.c9
-rw-r--r--arch/x86/kernel/entry_32.S7
-rw-r--r--arch/x86/kernel/entry_64.S8
-rw-r--r--arch/x86/kernel/head32.c4
-rw-r--r--arch/x86/kernel/i386_ksyms_32.c7
-rw-r--r--arch/x86/kernel/i8259.c3
-rw-r--r--arch/x86/kernel/irq_32.c34
-rw-r--r--arch/x86/kernel/irq_64.c21
-rw-r--r--arch/x86/kernel/jump_label.c25
-rw-r--r--arch/x86/kernel/kvm.c2
-rw-r--r--arch/x86/kernel/msr.c2
-rw-r--r--arch/x86/kernel/nmi.c4
-rw-r--r--arch/x86/kernel/preempt.S25
-rw-r--r--arch/x86/kernel/process.c6
-rw-r--r--arch/x86/kernel/process_32.c8
-rw-r--r--arch/x86/kernel/process_64.c10
-rw-r--r--arch/x86/kernel/reboot.c279
-rw-r--r--arch/x86/kernel/rtc.c12
-rw-r--r--arch/x86/kernel/setup.c27
-rw-r--r--arch/x86/kernel/smpboot.c58
-rw-r--r--arch/x86/kernel/topology.c11
-rw-r--r--arch/x86/kernel/traps.c4
-rw-r--r--arch/x86/kernel/vmlinux.lds.S9
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c7
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/kvm/Makefile2
-rw-r--r--arch/x86/kvm/cpuid.c111
-rw-r--r--arch/x86/kvm/cpuid.h5
-rw-r--r--arch/x86/kvm/emulate.c109
-rw-r--r--arch/x86/kvm/mmu.c115
-rw-r--r--arch/x86/kvm/mmu.h4
-rw-r--r--arch/x86/kvm/svm.c8
-rw-r--r--arch/x86/kvm/vmx.c155
-rw-r--r--arch/x86/kvm/x86.c101
-rw-r--r--arch/x86/kvm/x86.h1
-rw-r--r--arch/x86/lib/Makefile2
-rw-r--r--arch/x86/lib/misc.c21
-rw-r--r--arch/x86/lib/msr-smp.c62
-rw-r--r--arch/x86/lib/usercopy.c43
-rw-r--r--arch/x86/lib/usercopy_32.c8
-rw-r--r--arch/x86/mm/fault.c43
-rw-r--r--arch/x86/mm/init.c23
-rw-r--r--arch/x86/mm/init_32.c3
-rw-r--r--arch/x86/net/bpf_jit_comp.c18
-rw-r--r--arch/x86/pci/Makefile2
-rw-r--r--arch/x86/pci/acpi.c8
-rw-r--r--arch/x86/pci/fixup.c18
-rw-r--r--arch/x86/pci/intel_mid_pci.c (renamed from arch/x86/pci/mrst.c)20
-rw-r--r--arch/x86/platform/Makefile2
-rw-r--r--arch/x86/platform/efi/Makefile1
-rw-r--r--arch/x86/platform/efi/early_printk.c191
-rw-r--r--arch/x86/platform/efi/efi.c126
-rw-r--r--arch/x86/platform/geode/alix.c2
-rw-r--r--arch/x86/platform/geode/geos.c2
-rw-r--r--arch/x86/platform/geode/net5501.c2
-rw-r--r--arch/x86/platform/intel-mid/Makefile7
-rw-r--r--arch/x86/platform/intel-mid/device_libs/Makefile22
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_bma023.c20
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_emc1403.c41
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c83
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_ipc.c68
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_ipc.h17
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_lis331.c39
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_max3111.c35
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_max7315.c79
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c36
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic.c87
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic.h19
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c47
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c37
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c48
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c49
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c36
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c37
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c54
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c36
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_tca6416.c57
-rw-r--r--arch/x86/platform/intel-mid/early_printk_intel_mid.c (renamed from arch/x86/platform/mrst/early_printk_mrst.c)11
-rw-r--r--arch/x86/platform/intel-mid/intel-mid.c213
-rw-r--r--arch/x86/platform/intel-mid/intel_mid_vrtc.c (renamed from arch/x86/platform/mrst/vrtc.c)19
-rw-r--r--arch/x86/platform/intel-mid/sfi.c488
-rw-r--r--arch/x86/platform/mrst/Makefile3
-rw-r--r--arch/x86/platform/mrst/mrst.c1052
-rw-r--r--arch/x86/platform/olpc/olpc-xo15-sci.c9
-rw-r--r--arch/x86/platform/uv/Makefile2
-rw-r--r--arch/x86/platform/uv/uv_nmi.c711
-rw-r--r--arch/x86/tools/relocs.c20
-rw-r--r--arch/x86/xen/mmu.c15
-rw-r--r--arch/x86/xen/p2m.c6
-rw-r--r--arch/xtensa/include/asm/Kbuild1
-rw-r--r--arch/xtensa/include/asm/prom.h6
-rw-r--r--arch/xtensa/include/uapi/asm/socket.h2
-rw-r--r--arch/xtensa/kernel/entry.S49
-rw-r--r--arch/xtensa/kernel/setup.c55
-rw-r--r--arch/xtensa/kernel/signal.c2
-rw-r--r--arch/xtensa/platforms/iss/network.c3
-rw-r--r--block/Makefile5
-rw-r--r--block/blk-core.c207
-rw-r--r--block/blk-exec.c14
-rw-r--r--block/blk-flush.c156
-rw-r--r--block/blk-integrity.c40
-rw-r--r--block/blk-iopoll.c6
-rw-r--r--block/blk-lib.c22
-rw-r--r--block/blk-map.c6
-rw-r--r--block/blk-merge.c66
-rw-r--r--block/blk-mq-cpu.c93
-rw-r--r--block/blk-mq-cpumap.c108
-rw-r--r--block/blk-mq-sysfs.c384
-rw-r--r--block/blk-mq-tag.c204
-rw-r--r--block/blk-mq-tag.h27
-rw-r--r--block/blk-mq.c1500
-rw-r--r--block/blk-mq.h52
-rw-r--r--block/blk-settings.c9
-rw-r--r--block/blk-softirq.c8
-rw-r--r--block/blk-sysfs.c13
-rw-r--r--block/blk-throttle.c14
-rw-r--r--block/blk-timeout.c74
-rw-r--r--block/blk.h17
-rw-r--r--block/elevator.c24
-rw-r--r--block/scsi_ioctl.c39
-rw-r--r--crypto/Kconfig42
-rw-r--r--crypto/Makefile9
-rw-r--r--crypto/ablk_helper.c (renamed from arch/x86/crypto/ablk_helper.c)13
-rw-r--r--crypto/ablkcipher.c21
-rw-r--r--crypto/ansi_cprng.c4
-rw-r--r--crypto/asymmetric_keys/Kconfig3
-rw-r--r--crypto/asymmetric_keys/asymmetric_type.c1
-rw-r--r--crypto/asymmetric_keys/public_key.c66
-rw-r--r--crypto/asymmetric_keys/public_key.h6
-rw-r--r--crypto/asymmetric_keys/rsa.c19
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c35
-rw-r--r--crypto/asymmetric_keys/x509_parser.h18
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c232
-rw-r--r--crypto/async_tx/async_tx.c4
-rw-r--r--crypto/authenc.c54
-rw-r--r--crypto/authencesn.c34
-rw-r--r--crypto/ccm.c4
-rw-r--r--crypto/gcm.c2
-rw-r--r--crypto/hash_info.c56
-rw-r--r--crypto/memneq.c138
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/Kconfig41
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/ac.c256
-rw-r--r--drivers/acpi/acpi_extlog.c327
-rw-r--r--drivers/acpi/acpi_ipmi.c580
-rw-r--r--drivers/acpi/acpi_lpss.c12
-rw-r--r--drivers/acpi/acpi_memhotplug.c7
-rw-r--r--drivers/acpi/acpi_platform.c7
-rw-r--r--drivers/acpi/acpi_processor.c28
-rw-r--r--drivers/acpi/acpica/acdebug.h8
-rw-r--r--drivers/acpi/acpica/acevents.h9
-rw-r--r--drivers/acpi/acpica/acglobal.h20
-rw-r--r--drivers/acpi/acpica/aclocal.h11
-rw-r--r--drivers/acpi/acpica/acmacros.h31
-rw-r--r--drivers/acpi/acpica/acnamesp.h6
-rw-r--r--drivers/acpi/acpica/acutils.h17
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c5
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c10
-rw-r--r--drivers/acpi/acpica/dswexec.c6
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c3
-rw-r--r--drivers/acpi/acpica/evgpeblk.c6
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c4
-rw-r--r--drivers/acpi/acpica/evhandler.c4
-rw-r--r--drivers/acpi/acpica/evmisc.c14
-rw-r--r--drivers/acpi/acpica/evregion.c29
-rw-r--r--drivers/acpi/acpica/evsci.c79
-rw-r--r--drivers/acpi/acpica/evxface.c148
-rw-r--r--drivers/acpi/acpica/evxfevnt.c3
-rw-r--r--drivers/acpi/acpica/evxfgpe.c9
-rw-r--r--drivers/acpi/acpica/evxfregn.c7
-rw-r--r--drivers/acpi/acpica/excreate.c8
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c8
-rw-r--r--drivers/acpi/acpica/exmisc.c4
-rw-r--r--drivers/acpi/acpica/exoparg1.c8
-rw-r--r--drivers/acpi/acpica/exoparg2.c10
-rw-r--r--drivers/acpi/acpica/exoparg3.c4
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exregion.c1
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c3
-rw-r--r--drivers/acpi/acpica/hwxface.c43
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c7
-rw-r--r--drivers/acpi/acpica/nsaccess.c7
-rw-r--r--drivers/acpi/acpica/nsdump.c143
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c7
-rw-r--r--drivers/acpi/acpica/nseval.c4
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c4
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c2
-rw-r--r--drivers/acpi/acpica/nssearch.c3
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nsxfeval.c23
-rw-r--r--drivers/acpi/acpica/nsxfname.c7
-rw-r--r--drivers/acpi/acpica/nsxfobj.c7
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psxface.c6
-rw-r--r--drivers/acpi/acpica/rsmisc.c4
-rw-r--r--drivers/acpi/acpica/rsutils.c2
-rw-r--r--drivers/acpi/acpica/rsxface.c3
-rw-r--r--drivers/acpi/acpica/tbinstal.c18
-rw-r--r--drivers/acpi/acpica/tbprint.c18
-rw-r--r--drivers/acpi/acpica/tbutils.c5
-rw-r--r--drivers/acpi/acpica/tbxface.c16
-rw-r--r--drivers/acpi/acpica/tbxfload.c11
-rw-r--r--drivers/acpi/acpica/tbxfroot.c5
-rw-r--r--drivers/acpi/acpica/utalloc.c117
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c4
-rw-r--r--drivers/acpi/acpica/utdebug.c5
-rw-r--r--drivers/acpi/acpica/utdecode.c1
-rw-r--r--drivers/acpi/acpica/utdelete.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utexcep.c3
-rw-r--r--drivers/acpi/acpica/utglobal.c20
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utobject.c28
-rw-r--r--drivers/acpi/acpica/utownerid.c2
-rw-r--r--drivers/acpi/acpica/utresrc.c4
-rw-r--r--drivers/acpi/acpica/utstate.c1
-rw-r--r--drivers/acpi/acpica/utstring.c66
-rw-r--r--drivers/acpi/acpica/uttrack.c31
-rw-r--r--drivers/acpi/acpica/utxface.c45
-rw-r--r--drivers/acpi/acpica/utxferror.c3
-rw-r--r--drivers/acpi/acpica/utxfinit.c18
-rw-r--r--drivers/acpi/apei/Kconfig2
-rw-r--r--drivers/acpi/apei/Makefile2
-rw-r--r--drivers/acpi/apei/apei-base.c6
-rw-r--r--drivers/acpi/apei/apei-internal.h12
-rw-r--r--drivers/acpi/apei/ghes.c58
-rw-r--r--drivers/acpi/battery.c328
-rw-r--r--drivers/acpi/blacklist.c61
-rw-r--r--drivers/acpi/bus.c21
-rw-r--r--drivers/acpi/button.c9
-rw-r--r--drivers/acpi/cm_sbs.c105
-rw-r--r--drivers/acpi/device_pm.c8
-rw-r--r--drivers/acpi/dock.c6
-rw-r--r--drivers/acpi/ec.c49
-rw-r--r--drivers/acpi/fan.c2
-rw-r--r--drivers/acpi/internal.h4
-rw-r--r--drivers/acpi/numa.c4
-rw-r--r--drivers/acpi/osl.c48
-rw-r--r--drivers/acpi/pci_root.c246
-rw-r--r--drivers/acpi/proc.c305
-rw-r--r--drivers/acpi/processor_core.c26
-rw-r--r--drivers/acpi/processor_driver.c4
-rw-r--r--drivers/acpi/processor_idle.c61
-rw-r--r--drivers/acpi/processor_perflib.c22
-rw-r--r--drivers/acpi/sbs.c325
-rw-r--r--drivers/acpi/sysfs.c18
-rw-r--r--drivers/acpi/thermal.c53
-rw-r--r--drivers/acpi/utils.c21
-rw-r--r--drivers/acpi/video.c465
-rw-r--r--drivers/acpi/video_detect.c12
-rw-r--r--drivers/amba/bus.c6
-rw-r--r--drivers/ata/ahci.c6
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/ahci_imx.c101
-rw-r--r--drivers/ata/ahci_platform.c5
-rw-r--r--drivers/ata/ata_piix.c19
-rw-r--r--drivers/ata/libahci.c37
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/ata/libata-eh.c19
-rw-r--r--drivers/ata/libata-transport.c16
-rw-r--r--drivers/ata/pata_isapnp.c2
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c5
-rw-r--r--drivers/ata/pata_octeon_cf.c5
-rw-r--r--drivers/ata/sata_fsl.c2
-rw-r--r--drivers/ata/sata_highbank.c8
-rw-r--r--drivers/ata/sata_rcar.c10
-rw-r--r--drivers/atm/firestream.h1
-rw-r--r--drivers/auxdisplay/cfag12864bfb.c3
-rw-r--r--drivers/base/bus.c82
-rw-r--r--drivers/base/class.c29
-rw-r--r--drivers/base/core.c88
-rw-r--r--drivers/base/cpu.c39
-rw-r--r--drivers/base/devres.c31
-rw-r--r--drivers/base/dma-contiguous.c2
-rw-r--r--drivers/base/firmware_class.c38
-rw-r--r--drivers/base/platform.c17
-rw-r--r--drivers/base/power/main.c73
-rw-r--r--drivers/base/power/opp.c115
-rw-r--r--drivers/base/power/runtime.c5
-rw-r--r--drivers/base/regmap/Kconfig5
-rw-r--r--drivers/base/regmap/Makefile1
-rw-r--r--drivers/base/regmap/internal.h8
-rw-r--r--drivers/base/regmap/regcache.c19
-rw-r--r--drivers/base/regmap/regmap-debugfs.c57
-rw-r--r--drivers/base/regmap/regmap-irq.c16
-rw-r--r--drivers/base/regmap/regmap-spi.c3
-rw-r--r--drivers/base/regmap/regmap-spmi.c90
-rw-r--r--drivers/base/regmap/regmap.c366
-rw-r--r--drivers/bcma/host_pci.c8
-rw-r--r--drivers/bcma/main.c23
-rw-r--r--drivers/block/Kconfig17
-rw-r--r--drivers/block/Makefile3
-rw-r--r--drivers/block/aoe/aoe.h10
-rw-r--r--drivers/block/aoe/aoecmd.c153
-rw-r--r--drivers/block/brd.c18
-rw-r--r--drivers/block/cciss.c2
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_int.h3
-rw-r--r--drivers/block/drbd/drbd_main.c46
-rw-r--r--drivers/block/drbd/drbd_nl.c6
-rw-r--r--drivers/block/drbd/drbd_receiver.c64
-rw-r--r--drivers/block/drbd/drbd_req.c9
-rw-r--r--drivers/block/drbd/drbd_req.h2
-rw-r--r--drivers/block/drbd/drbd_worker.c8
-rw-r--r--drivers/block/floppy.c20
-rw-r--r--drivers/block/loop.c211
-rw-r--r--drivers/block/mg_disk.c2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c520
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h18
-rw-r--r--drivers/block/nbd.c14
-rw-r--r--drivers/block/null_blk.c635
-rw-r--r--drivers/block/nvme-core.c152
-rw-r--r--drivers/block/pktcdvd.c182
-rw-r--r--drivers/block/ps3disk.c17
-rw-r--r--drivers/block/ps3vram.c10
-rw-r--r--drivers/block/rbd.c91
-rw-r--r--drivers/block/rsxx/core.c8
-rw-r--r--drivers/block/rsxx/dev.c14
-rw-r--r--drivers/block/rsxx/dma.c134
-rw-r--r--drivers/block/rsxx/rsxx_priv.h11
-rw-r--r--drivers/block/skd_main.c5473
-rw-r--r--drivers/block/skd_s1120.h354
-rw-r--r--drivers/block/umem.c53
-rw-r--r--drivers/block/virtio_blk.c87
-rw-r--r--drivers/block/xen-blkback/blkback.c5
-rw-r--r--drivers/block/xen-blkfront.c99
-rw-r--r--drivers/bluetooth/Makefile2
-rw-r--r--drivers/bluetooth/ath3k.c4
-rw-r--r--drivers/bluetooth/bfusb.c31
-rw-r--r--drivers/bluetooth/bluecard_cs.c30
-rw-r--r--drivers/bluetooth/bpa10x.c11
-rw-r--r--drivers/bluetooth/bt3c_cs.c30
-rw-r--r--drivers/bluetooth/btmrvl_drv.h25
-rw-r--r--drivers/bluetooth/btmrvl_main.c249
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c14
-rw-r--r--drivers/bluetooth/btsdio.c6
-rw-r--r--drivers/bluetooth/btuart_cs.c30
-rw-r--r--drivers/bluetooth/btusb.c21
-rw-r--r--drivers/bluetooth/btwilink.c9
-rw-r--r--drivers/bluetooth/dtl1_cs.c30
-rw-r--r--drivers/bluetooth/hci_bcsp.c5
-rw-r--r--drivers/bluetooth/hci_h4.c24
-rw-r--r--drivers/bluetooth/hci_h5.c2
-rw-r--r--drivers/bluetooth/hci_ldisc.c12
-rw-r--r--drivers/bluetooth/hci_ll.c14
-rw-r--r--drivers/bluetooth/hci_vhci.c179
-rw-r--r--drivers/bus/arm-cci.c623
-rw-r--r--drivers/char/bsr.c1
-rw-r--r--drivers/char/hw_random/Kconfig38
-rw-r--r--drivers/char/hw_random/Makefile3
-rw-r--r--drivers/char/hw_random/msm-rng.c197
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c141
-rw-r--r--drivers/char/hw_random/pasemi-rng.c1
-rw-r--r--drivers/char/hw_random/powernv-rng.c81
-rw-r--r--drivers/char/hw_random/pseries-rng.c19
-rw-r--r--drivers/char/hw_random/via-rng.c2
-rw-r--r--drivers/char/hw_random/virtio-rng.c4
-rw-r--r--drivers/char/raw.c4
-rw-r--r--drivers/char/tpm/Kconfig37
-rw-r--r--drivers/char/tpm/Makefile11
-rw-r--r--drivers/char/tpm/tpm-interface.c (renamed from drivers/char/tpm/tpm.c)138
-rw-r--r--drivers/char/tpm/tpm.h3
-rw-r--r--drivers/char/tpm/tpm_atmel.c2
-rw-r--r--drivers/char/tpm/tpm_eventlog.c3
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c284
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c4
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c710
-rw-r--r--drivers/char/tpm/tpm_i2c_stm_st33.c12
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c6
-rw-r--r--drivers/char/tpm/tpm_ppi.c4
-rw-r--r--drivers/char/tpm/tpm_tis.c2
-rw-r--r--drivers/char/tpm/xen-tpmfront.c2
-rw-r--r--drivers/char/virtio_console.c25
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/clk-bcm2835.c8
-rw-r--r--drivers/clk/clk-fixed-factor.c2
-rw-r--r--drivers/clk/clk-highbank.c10
-rw-r--r--drivers/clk/clk-nomadik.c182
-rw-r--r--drivers/clk/clk-prima2.c29
-rw-r--r--drivers/clk/clk-vt8500.c34
-rw-r--r--drivers/clk/mvebu/armada-370.c4
-rw-r--r--drivers/clk/mxs/clk-imx23.c15
-rw-r--r--drivers/clk/mxs/clk-imx28.c16
-rw-r--r--drivers/clk/samsung/Makefile2
-rw-r--r--drivers/clk/shmobile/Makefile3
-rw-r--r--drivers/clk/shmobile/clk-emev2.c104
-rw-r--r--drivers/clk/socfpga/clk.c2
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c11
-rw-r--r--drivers/clk/ux500/Makefile1
-rw-r--r--drivers/clk/ux500/u8500_of_clk.c559
-rw-r--r--drivers/clk/ux500/u8540_clk.c2
-rw-r--r--drivers/clk/versatile/clk-icst.c2
-rw-r--r--drivers/clocksource/Kconfig24
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_arch_timer.c55
-rw-r--r--drivers/clocksource/arm_global_timer.c3
-rw-r--r--drivers/clocksource/bcm2835_timer.c4
-rw-r--r--drivers/clocksource/clksrc-dbx500-prcmu.c5
-rw-r--r--drivers/clocksource/clksrc-of.c1
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c16
-rw-r--r--drivers/clocksource/em_sti.c4
-rw-r--r--drivers/clocksource/mxs_timer.c4
-rw-r--r--drivers/clocksource/nomadik-mtu.c4
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c4
-rw-r--r--drivers/clocksource/sun4i_timer.c12
-rw-r--r--drivers/clocksource/tcb_clksrc.c61
-rw-r--r--drivers/clocksource/tegra20_timer.c8
-rw-r--r--drivers/clocksource/time-armada-370-xp.c4
-rw-r--r--drivers/clocksource/time-efm32.c275
-rw-r--r--drivers/clocksource/timer-prima2.c6
-rw-r--r--drivers/clocksource/vf_pit_timer.c4
-rw-r--r--drivers/clocksource/vt8500_timer.c2
-rw-r--r--drivers/connector/cn_proc.c18
-rw-r--r--drivers/connector/connector.c9
-rw-r--r--drivers/cpufreq/Kconfig11
-rw-r--r--drivers/cpufreq/Kconfig.arm19
-rw-r--r--drivers/cpufreq/Kconfig.powerpc6
-rw-r--r--drivers/cpufreq/Kconfig.x8615
-rw-r--r--drivers/cpufreq/Makefile6
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c60
-rw-r--r--drivers/cpufreq/arm_big_little.c453
-rw-r--r--drivers/cpufreq/arm_big_little.h5
-rw-r--r--drivers/cpufreq/arm_big_little_dt.c2
-rw-r--r--drivers/cpufreq/at32ap-cpufreq.c106
-rw-r--r--drivers/cpufreq/blackfin-cpufreq.c54
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c119
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c5
-rw-r--r--drivers/cpufreq/cpufreq.c322
-rw-r--r--drivers/cpufreq/cpufreq_governor.h5
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c1
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c11
-rw-r--r--drivers/cpufreq/cris-artpec3-cpufreq.c64
-rw-r--r--drivers/cpufreq/cris-etraxfs-cpufreq.c61
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c77
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c78
-rw-r--r--drivers/cpufreq/e_powersaver.c59
-rw-r--r--drivers/cpufreq/elanfreq.c88
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c87
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c67
-rw-r--r--drivers/cpufreq/exynos4x12-cpufreq.c69
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c67
-rw-r--r--drivers/cpufreq/freq_table.c59
-rw-r--r--drivers/cpufreq/gx-suspmod.c5
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c3
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c71
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c117
-rw-r--r--drivers/cpufreq/integrator-cpufreq.c75
-rw-r--r--drivers/cpufreq/intel_pstate.c219
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c107
-rw-r--r--drivers/cpufreq/longhaul.c45
-rw-r--r--drivers/cpufreq/longrun.c4
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c57
-rw-r--r--drivers/cpufreq/maple-cpufreq.c56
-rw-r--r--drivers/cpufreq/omap-cpufreq.c143
-rw-r--r--drivers/cpufreq/p4-clockmod.c53
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c52
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c15
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c53
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c70
-rw-r--r--drivers/cpufreq/powernow-k6.c67
-rw-r--r--drivers/cpufreq/powernow-k7.c42
-rw-r--r--drivers/cpufreq/powernow-k8.c52
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c54
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c50
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c70
-rw-r--r--drivers/cpufreq/pxa3xx-cpufreq.c46
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c67
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c27
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c81
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c86
-rw-r--r--drivers/cpufreq/sa1100-cpufreq.c49
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c46
-rw-r--r--drivers/cpufreq/sc520_freq.c64
-rw-r--r--drivers/cpufreq/sh-cpufreq.c22
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c42
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c44
-rw-r--r--drivers/cpufreq/spear-cpufreq.c64
-rw-r--r--drivers/cpufreq/speedstep-centrino.c84
-rw-r--r--drivers/cpufreq/speedstep-ich.c85
-rw-r--r--drivers/cpufreq/speedstep-smi.c76
-rw-r--r--drivers/cpufreq/tegra-cpufreq.c70
-rw-r--r--drivers/cpufreq/unicore2-cpufreq.c5
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c70
-rw-r--r--drivers/cpuidle/Kconfig.arm27
-rw-r--r--drivers/cpuidle/Makefile3
-rw-r--r--drivers/cpuidle/coupled.c2
-rw-r--r--drivers/cpuidle/cpuidle-at91.c (renamed from arch/arm/mach-at91/cpuidle.c)29
-rw-r--r--drivers/cpuidle/cpuidle-calxeda.c61
-rw-r--r--drivers/cpuidle/cpuidle-ux500.c2
-rw-r--r--drivers/cpuidle/cpuidle-zynq.c17
-rw-r--r--drivers/cpuidle/cpuidle.c78
-rw-r--r--drivers/cpuidle/driver.c67
-rw-r--r--drivers/cpuidle/governor.c43
-rw-r--r--drivers/cpuidle/sysfs.c7
-rw-r--r--drivers/crypto/caam/Kconfig25
-rw-r--r--drivers/crypto/caam/Makefile4
-rw-r--r--drivers/crypto/caam/caamalg.c83
-rw-r--r--drivers/crypto/caam/caamhash.c88
-rw-r--r--drivers/crypto/caam/caamrng.c29
-rw-r--r--drivers/crypto/caam/ctrl.c423
-rw-r--r--drivers/crypto/caam/desc.h17
-rw-r--r--drivers/crypto/caam/intern.h20
-rw-r--r--drivers/crypto/caam/jr.c341
-rw-r--r--drivers/crypto/caam/jr.h5
-rw-r--r--drivers/crypto/caam/regs.h14
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h34
-rw-r--r--drivers/crypto/dcp.c49
-rw-r--r--drivers/crypto/ixp4xx_crypto.c74
-rw-r--r--drivers/crypto/mv_cesa.c14
-rw-r--r--drivers/crypto/omap-aes.c6
-rw-r--r--drivers/crypto/omap-sham.c3
-rw-r--r--drivers/crypto/picoxcell_crypto.c32
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/crypto/talitos.c37
-rw-r--r--drivers/crypto/tegra-aes.c26
-rw-r--r--drivers/devfreq/devfreq.c29
-rw-r--r--drivers/devfreq/exynos/exynos4_bus.c29
-rw-r--r--drivers/devfreq/exynos/exynos5_bus.c57
-rw-r--r--drivers/dma/Kconfig16
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amba-pl08x.c12
-rw-r--r--drivers/dma/at_hdmac.c2
-rw-r--r--drivers/dma/bestcomm/sram.c1
-rw-r--r--drivers/dma/coh901318.c4
-rw-r--r--drivers/dma/cppi41.c96
-rw-r--r--drivers/dma/dma-jz4740.c2
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/dw/core.c4
-rw-r--r--drivers/dma/dw/platform.c8
-rw-r--r--drivers/dma/edma.c227
-rw-r--r--drivers/dma/fsldma.c2
-rw-r--r--drivers/dma/imx-dma.c42
-rw-r--r--drivers/dma/imx-sdma.c6
-rw-r--r--drivers/dma/intel_mid_dma.c4
-rw-r--r--drivers/dma/ioat/dma.c4
-rw-r--r--drivers/dma/ioat/dma_v3.c8
-rw-r--r--drivers/dma/iop-adma.c16
-rw-r--r--drivers/dma/k3dma.c4
-rw-r--r--drivers/dma/mmp_pdma.c7
-rw-r--r--drivers/dma/mmp_tdma.c40
-rw-r--r--drivers/dma/mpc512x_dma.c2
-rw-r--r--drivers/dma/mv_xor.c6
-rw-r--r--drivers/dma/mxs-dma.c6
-rw-r--r--drivers/dma/omap-dma.c2
-rw-r--r--drivers/dma/pl330.c32
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/dma/s3c24xx-dma.c1350
-rw-r--r--drivers/dma/sa11x0-dma.c2
-rw-r--r--drivers/dma/sh/shdma-base.c2
-rw-r--r--drivers/dma/sh/shdmac.c4
-rw-r--r--drivers/dma/ste_dma40.c7
-rw-r--r--drivers/dma/tegra20-apb-dma.c6
-rw-r--r--drivers/dma/txx9dmac.c4
-rw-r--r--drivers/edac/amd64_edac.c48
-rw-r--r--drivers/edac/amd64_edac.h8
-rw-r--r--drivers/edac/cell_edac.c1
-rw-r--r--drivers/edac/edac_device.c9
-rw-r--r--drivers/edac/edac_mc.c6
-rw-r--r--drivers/edac/edac_pci.c8
-rw-r--r--drivers/edac/ghes_edac.c16
-rw-r--r--drivers/edac/highbank_l2_edac.c33
-rw-r--r--drivers/edac/highbank_mc_edac.c175
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/firmware/dcdbas.c32
-rw-r--r--drivers/firmware/dmi_scan.c60
-rw-r--r--drivers/firmware/efi/Kconfig3
-rw-r--r--drivers/firmware/efi/Makefile1
-rw-r--r--drivers/firmware/efi/cper.c (renamed from drivers/acpi/apei/cper.c)132
-rw-r--r--drivers/firmware/efi/efi-stub-helper.c636
-rw-r--r--drivers/firmware/efi/efi.c140
-rw-r--r--drivers/firmware/efi/efivars.c2
-rw-r--r--drivers/firmware/google/gsmi.c13
-rw-r--r--drivers/fmc/Kconfig2
-rw-r--r--drivers/gpio/gpio-davinci.c132
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c1
-rw-r--r--drivers/gpio/gpio-sa1100.c2
-rw-r--r--drivers/gpio/gpio-samsung.c42
-rw-r--r--drivers/gpio/gpio-tnetv107x.c1
-rw-r--r--drivers/gpio/gpiolib-acpi.c9
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpu/drm/Kconfig73
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/armada/Kconfig24
-rw-r--r--drivers/gpu/drm/armada/Makefile7
-rw-r--r--drivers/gpu/drm/armada/armada_510.c87
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c1098
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h83
-rw-r--r--drivers/gpu/drm/armada/armada_debugfs.c183
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h113
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c421
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c170
-rw-r--r--drivers/gpu/drm/armada/armada_fb.h24
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c202
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c611
-rw-r--r--drivers/gpu/drm/armada/armada_gem.h52
-rw-r--r--drivers/gpu/drm/armada/armada_hw.h318
-rw-r--r--drivers/gpu/drm/armada/armada_ioctlP.h18
-rw-r--r--drivers/gpu/drm/armada/armada_output.c158
-rw-r--r--drivers/gpu/drm/armada/armada_output.h39
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c477
-rw-r--r--drivers/gpu/drm/armada/armada_slave.c139
-rw-r--r--drivers/gpu/drm/armada/armada_slave.h26
-rw-r--r--drivers/gpu/drm/ast/Kconfig1
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c1
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h1
-rw-r--r--drivers/gpu/drm/ast/ast_main.c6
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c6
-rw-r--r--drivers/gpu/drm/drm_context.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c104
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c32
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c16
-rw-r--r--drivers/gpu/drm/drm_drv.c85
-rw-r--r--drivers/gpu/drm/drm_edid.c277
-rw-r--r--drivers/gpu/drm/drm_edid_load.c108
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c8
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c17
-rw-r--r--drivers/gpu/drm/drm_fops.c68
-rw-r--r--drivers/gpu/drm/drm_gem.c29
-rw-r--r--drivers/gpu/drm/drm_global.c2
-rw-r--r--drivers/gpu/drm/drm_info.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c21
-rw-r--r--drivers/gpu/drm/drm_irq.c156
-rw-r--r--drivers/gpu/drm/drm_lock.c3
-rw-r--r--drivers/gpu/drm/drm_modes.c43
-rw-r--r--drivers/gpu/drm/drm_pci.c65
-rw-r--r--drivers/gpu/drm/drm_platform.c59
-rw-r--r--drivers/gpu/drm/drm_prime.c3
-rw-r--r--drivers/gpu/drm/drm_stub.c301
-rw-r--r--drivers/gpu/drm/drm_sysfs.c92
-rw-r--r--drivers/gpu/drm/drm_usb.c57
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c16
-rw-r--r--drivers/gpu/drm/gma500/Kconfig1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/gem.c5
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.h2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h9
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c22
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c3
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c11
-rw-r--r--drivers/gpu/drm/i915/Kconfig67
-rw-r--r--drivers/gpu/drm/i915/Makefile6
-rw-r--r--drivers/gpu/drm/i915/dvo.h11
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c1092
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c120
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c156
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h404
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c555
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c61
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c50
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c366
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c113
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c44
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c634
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h679
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c15
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c152
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h62
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c6
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c195
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h87
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c46
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c148
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1733
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c693
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h560
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c620
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h102
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c427
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.h109
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c317
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c28
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c (renamed from drivers/gpu/drm/i915/intel_fb.c)33
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c81
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c64
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c32
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c434
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c9
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c209
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1176
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c81
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h15
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c52
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c79
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c192
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c21
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c377
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c5
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c2
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c6
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c10
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c17
-rw-r--r--drivers/gpu/drm/qxl/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c33
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c56
-rw-r--r--drivers/gpu/drm/radeon/cik.c4
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c19
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c4
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/ni.c1
-rw-r--r--drivers/gpu/drm/radeon/r600.c1
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c3
-rw-r--r--drivers/gpu/drm/radeon/si.c1
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c4
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig1
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/Kconfig (renamed from drivers/gpu/host1x/drm/Kconfig)13
-rw-r--r--drivers/gpu/drm/tegra/Makefile15
-rw-r--r--drivers/gpu/drm/tegra/bus.c76
-rw-r--r--drivers/gpu/drm/tegra/dc.c (renamed from drivers/gpu/host1x/drm/dc.c)108
-rw-r--r--drivers/gpu/drm/tegra/dc.h (renamed from drivers/gpu/host1x/drm/dc.h)5
-rw-r--r--drivers/gpu/drm/tegra/drm.c714
-rw-r--r--drivers/gpu/drm/tegra/drm.h (renamed from drivers/gpu/host1x/drm/drm.h)101
-rw-r--r--drivers/gpu/drm/tegra/fb.c (renamed from drivers/gpu/host1x/drm/fb.c)38
-rw-r--r--drivers/gpu/drm/tegra/gem.c (renamed from drivers/gpu/host1x/drm/gem.c)44
-rw-r--r--drivers/gpu/drm/tegra/gem.h (renamed from drivers/gpu/host1x/drm/gem.h)16
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c227
-rw-r--r--drivers/gpu/drm/tegra/gr2d.h28
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c338
-rw-r--r--drivers/gpu/drm/tegra/gr3d.h27
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c (renamed from drivers/gpu/host1x/drm/hdmi.c)257
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h (renamed from drivers/gpu/host1x/drm/hdmi.h)152
-rw-r--r--drivers/gpu/drm/tegra/output.c (renamed from drivers/gpu/host1x/drm/output.c)64
-rw-r--r--drivers/gpu/drm/tegra/rgb.c (renamed from drivers/gpu/host1x/drm/rgb.c)19
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/udl/Kconfig1
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c1
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h1
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c7
-rw-r--r--drivers/gpu/drm/via/via_mm.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--drivers/gpu/host1x/Kconfig2
-rw-r--r--drivers/gpu/host1x/Makefile13
-rw-r--r--drivers/gpu/host1x/bus.c550
-rw-r--r--drivers/gpu/host1x/bus.h (renamed from drivers/gpu/host1x/host1x_client.h)24
-rw-r--r--drivers/gpu/host1x/cdma.c2
-rw-r--r--drivers/gpu/host1x/channel.h6
-rw-r--r--drivers/gpu/host1x/dev.c82
-rw-r--r--drivers/gpu/host1x/dev.h11
-rw-r--r--drivers/gpu/host1x/drm/drm.c647
-rw-r--r--drivers/gpu/host1x/drm/gr2d.c343
-rw-r--r--drivers/gpu/host1x/host1x.h30
-rw-r--r--drivers/gpu/host1x/host1x_bo.h87
-rw-r--r--drivers/gpu/host1x/hw/Makefile6
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c8
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c32
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c16
-rw-r--r--drivers/gpu/host1x/hw/host1x01.c16
-rw-r--r--drivers/gpu/host1x/hw/host1x02.c42
-rw-r--r--drivers/gpu/host1x/hw/host1x02.h (renamed from arch/arm/mach-highbank/hotplug.c)31
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x01_uclass.h6
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x02_channel.h121
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x02_sync.h243
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x02_uclass.h175
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c4
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c4
-rw-r--r--drivers/gpu/host1x/job.c73
-rw-r--r--drivers/gpu/host1x/job.h108
-rw-r--r--drivers/gpu/host1x/syncpt.c92
-rw-r--r--drivers/gpu/host1x/syncpt.h46
-rw-r--r--drivers/hid/Kconfig14
-rw-r--r--drivers/hid/Makefile2
-rw-r--r--drivers/hid/hid-apple.c22
-rw-r--r--drivers/hid/hid-core.c23
-rw-r--r--drivers/hid/hid-elo.c35
-rw-r--r--drivers/hid/hid-holtek-mouse.c4
-rw-r--r--drivers/hid/hid-ids.h22
-rw-r--r--drivers/hid/hid-input.c13
-rw-r--r--drivers/hid/hid-lenovo-tpkbd.c50
-rw-r--r--drivers/hid/hid-lg.c138
-rw-r--r--drivers/hid/hid-lg2ff.c2
-rw-r--r--drivers/hid/hid-logitech-dj.c14
-rw-r--r--drivers/hid/hid-multitouch.c27
-rw-r--r--drivers/hid/hid-roccat-common.c65
-rw-r--r--drivers/hid/hid-roccat-common.h62
-rw-r--r--drivers/hid/hid-roccat-konepure.c158
-rw-r--r--drivers/hid/hid-roccat-konepure.h72
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c4
-rw-r--r--drivers/hid/hid-roccat-ryos.c241
-rw-r--r--drivers/hid/hid-roccat-savu.c123
-rw-r--r--drivers/hid/hid-roccat-savu.h32
-rw-r--r--drivers/hid/hid-sensor-hub.c13
-rw-r--r--drivers/hid/hid-sony.c11
-rw-r--r--drivers/hid/hid-wiimote-core.c5
-rw-r--r--drivers/hid/hid-wiimote-modules.c117
-rw-r--r--drivers/hid/hid-wiimote.h2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c22
-rw-r--r--drivers/hid/usbhid/hid-quirks.c5
-rw-r--r--drivers/hsi/hsi.c10
-rw-r--r--drivers/hwmon/abituguru.c6
-rw-r--r--drivers/hwmon/abituguru3.c2
-rw-r--r--drivers/hwmon/acpi_power_meter.c13
-rw-r--r--drivers/hwmon/adcxx.c2
-rw-r--r--drivers/hwmon/adm1026.c6
-rw-r--r--drivers/hwmon/adt7310.c7
-rw-r--r--drivers/hwmon/adt7462.c5
-rw-r--r--drivers/hwmon/asc7621.c12
-rw-r--r--drivers/hwmon/asus_atk0110.c2
-rw-r--r--drivers/hwmon/atxp1.c3
-rw-r--r--drivers/hwmon/ds1621.c63
-rw-r--r--drivers/hwmon/emc1403.c120
-rw-r--r--drivers/hwmon/f71882fg.c1
-rw-r--r--drivers/hwmon/f75375s.c4
-rw-r--r--drivers/hwmon/gpio-fan.c46
-rw-r--r--drivers/hwmon/hwmon.c185
-rw-r--r--drivers/hwmon/ina209.c46
-rw-r--r--drivers/hwmon/ina2xx.c64
-rw-r--r--drivers/hwmon/jc42.c62
-rw-r--r--drivers/hwmon/lm70.c2
-rw-r--r--drivers/hwmon/lm73.c70
-rw-r--r--drivers/hwmon/lm90.c478
-rw-r--r--drivers/hwmon/lm95234.c138
-rw-r--r--drivers/hwmon/ltc4245.c78
-rw-r--r--drivers/hwmon/ltc4261.c56
-rw-r--r--drivers/hwmon/max16065.c124
-rw-r--r--drivers/hwmon/max6642.c73
-rw-r--r--drivers/hwmon/max6650.c2
-rw-r--r--drivers/hwmon/max6697.c55
-rw-r--r--drivers/hwmon/mc13783-adc.c2
-rw-r--r--drivers/hwmon/nct6775.c143
-rw-r--r--drivers/hwmon/pmbus/lm25066.c91
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c16
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c24
-rw-r--r--drivers/hwmon/tmp401.c92
-rw-r--r--drivers/hwmon/w83791d.c2
-rw-r--r--drivers/hwmon/w83792d.c2
-rw-r--r--drivers/hwmon/w83793.c5
-rw-r--r--drivers/i2c/busses/Kconfig11
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c6
-rw-r--r--drivers/i2c/busses/i2c-cpm.c2
-rw-r--r--drivers/i2c/busses/i2c-davinci.c2
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c774
-rw-r--r--drivers/i2c/busses/i2c-gpio.c1
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c2
-rw-r--r--drivers/i2c/busses/i2c-mpc.c2
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c2
-rw-r--r--drivers/i2c/busses/i2c-mxs.c340
-rw-r--r--drivers/i2c/busses/i2c-pnx.c1
-rw-r--r--drivers/i2c/busses/i2c-powermac.c1
-rw-r--r--drivers/i2c/busses/i2c-rcar.c65
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c1
-rw-r--r--drivers/i2c/busses/i2c-scmi.c6
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c8
-rw-r--r--drivers/i2c/busses/i2c-xiic.c3
-rw-r--r--drivers/i2c/i2c-core.c44
-rw-r--r--drivers/i2c/i2c-dev.c19
-rw-r--r--drivers/i2c/i2c-smbus.c10
-rw-r--r--drivers/i2c/muxes/i2c-arb-gpio-challenge.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c10
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c1
-rw-r--r--drivers/ide/Kconfig11
-rw-r--r--drivers/ide/Makefile2
-rw-r--r--drivers/ide/ide-h8300.c109
-rw-r--r--drivers/ide/ide-sysfs.c35
-rw-r--r--drivers/ide/ide.c2
-rw-r--r--drivers/idle/intel_idle.c58
-rw-r--r--drivers/infiniband/Kconfig11
-rw-r--r--drivers/infiniband/core/cma.c67
-rw-r--r--drivers/infiniband/core/netlink.c2
-rw-r--r--drivers/infiniband/core/uverbs.h2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c7
-rw-r--r--drivers/infiniband/core/uverbs_main.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_sdma.c7
-rw-r--r--drivers/infiniband/hw/mlx4/main.c10
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c23
-rw-r--r--drivers/infiniband/hw/mlx5/main.c3
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h6
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c173
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c21
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c8
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c7
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c11
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h14
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c126
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c14
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c24
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c29
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c10
-rw-r--r--drivers/infiniband/ulp/isert/Kconfig4
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c426
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h21
-rw-r--r--drivers/input/Kconfig2
-rw-r--r--drivers/input/evdev.c16
-rw-r--r--drivers/input/gameport/gameport.c17
-rw-r--r--drivers/input/input.c12
-rw-r--r--drivers/input/keyboard/Kconfig6
-rw-r--r--drivers/input/keyboard/gpio_keys.c1
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c1
-rw-r--r--drivers/input/keyboard/lpc32xx-keys.c2
-rw-r--r--drivers/input/keyboard/nspire-keypad.c6
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c12
-rw-r--r--drivers/input/keyboard/tegra-kbc.c2
-rw-r--r--drivers/input/misc/Kconfig4
-rw-r--r--drivers/input/misc/ad714x-spi.c1
-rw-r--r--drivers/input/misc/cm109.c14
-rw-r--r--drivers/input/misc/cobalt_btns.c2
-rw-r--r--drivers/input/misc/pwm-beeper.c1
-rw-r--r--drivers/input/misc/rb532_button.c1
-rw-r--r--drivers/input/misc/rotary_encoder.c1
-rw-r--r--drivers/input/misc/sirfsoc-onkey.c2
-rw-r--r--drivers/input/misc/uinput.c26
-rw-r--r--drivers/input/mouse/alps.c3
-rw-r--r--drivers/input/mouse/cypress_ps2.c29
-rw-r--r--drivers/input/serio/Kconfig18
-rw-r--r--drivers/input/serio/Makefile1
-rw-r--r--drivers/input/serio/hyperv-keyboard.c437
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h2
-rw-r--r--drivers/input/serio/i8042.c25
-rw-r--r--drivers/input/serio/serio.c70
-rw-r--r--drivers/input/serio/xilinx_ps2.c8
-rw-r--r--drivers/input/tablet/wacom_sys.c100
-rw-r--r--drivers/input/tablet/wacom_wac.c122
-rw-r--r--drivers/input/tablet/wacom_wac.h8
-rw-r--r--drivers/input/touchscreen/Kconfig13
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/ad7877.c2
-rw-r--r--drivers/input/touchscreen/ad7879-spi.c1
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.c2
-rw-r--r--drivers/input/touchscreen/egalax_ts.c2
-rw-r--r--drivers/input/touchscreen/htcpen.c2
-rw-r--r--drivers/input/touchscreen/st1232.c1
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c2
-rw-r--r--drivers/input/touchscreen/tsc2005.c2
-rw-r--r--drivers/input/touchscreen/zforce_ts.c836
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/dmar.c4
-rw-r--r--drivers/iommu/intel_irq_remapping.c21
-rw-r--r--drivers/iommu/iommu-traces.c27
-rw-r--r--drivers/iommu/iommu.c21
-rw-r--r--drivers/iommu/tegra-gart.c25
-rw-r--r--drivers/iommu/tegra-smmu.c2
-rw-r--r--drivers/ipack/ipack.c22
-rw-r--r--drivers/irqchip/exynos-combiner.c15
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c202
-rw-r--r--drivers/irqchip/irq-gic.c151
-rw-r--r--drivers/irqchip/irq-vic.c7
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c2
-rw-r--r--drivers/isdn/hardware/eicon/um_idi.c2
-rw-r--r--drivers/isdn/sc/init.c2
-rw-r--r--drivers/leds/Kconfig10
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-blinkm.c3
-rw-r--r--drivers/leds/leds-dac124s085.c3
-rw-r--r--drivers/leds/leds-gpio.c7
-rw-r--r--drivers/leds/leds-lp5523.c1
-rw-r--r--drivers/leds/leds-lp5562.c1
-rw-r--r--drivers/leds/leds-lp55xx-common.c28
-rw-r--r--drivers/leds/leds-lp8501.c1
-rw-r--r--drivers/leds/leds-ns2.c1
-rw-r--r--drivers/leds/leds-pca9685.c213
-rw-r--r--drivers/leds/leds-pwm.c2
-rw-r--r--drivers/lguest/lguest_device.c3
-rw-r--r--drivers/macintosh/Kconfig1
-rw-r--r--drivers/macintosh/macio_asic.c2
-rw-r--r--drivers/macintosh/rack-meter.c2
-rw-r--r--drivers/macintosh/smu.c1
-rw-r--r--drivers/macintosh/via-pmu.c2
-rw-r--r--drivers/md/Kconfig62
-rw-r--r--drivers/md/Makefile7
-rw-r--r--drivers/md/bcache/Kconfig11
-rw-r--r--drivers/md/bcache/alloc.c383
-rw-r--r--drivers/md/bcache/bcache.h329
-rw-r--r--drivers/md/bcache/bset.c289
-rw-r--r--drivers/md/bcache/bset.h93
-rw-r--r--drivers/md/bcache/btree.c1393
-rw-r--r--drivers/md/bcache/btree.h195
-rw-r--r--drivers/md/bcache/closure.c103
-rw-r--r--drivers/md/bcache/closure.h183
-rw-r--r--drivers/md/bcache/debug.c199
-rw-r--r--drivers/md/bcache/debug.h50
-rw-r--r--drivers/md/bcache/io.c196
-rw-r--r--drivers/md/bcache/journal.c305
-rw-r--r--drivers/md/bcache/journal.h52
-rw-r--r--drivers/md/bcache/movinggc.c89
-rw-r--r--drivers/md/bcache/request.c1172
-rw-r--r--drivers/md/bcache/request.h43
-rw-r--r--drivers/md/bcache/stats.c26
-rw-r--r--drivers/md/bcache/stats.h13
-rw-r--r--drivers/md/bcache/super.c210
-rw-r--r--drivers/md/bcache/sysfs.c42
-rw-r--r--drivers/md/bcache/trace.c1
-rw-r--r--drivers/md/bcache/util.c16
-rw-r--r--drivers/md/bcache/util.h15
-rw-r--r--drivers/md/bcache/writeback.c461
-rw-r--r--drivers/md/bcache/writeback.h46
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/dm-bio-record.h37
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-cache-metadata.c207
-rw-r--r--drivers/md/dm-cache-metadata.h28
-rw-r--r--drivers/md/dm-cache-policy-cleaner.c10
-rw-r--r--drivers/md/dm-cache-policy-era.c511
-rw-r--r--drivers/md/dm-cache-policy-hints.c774
-rw-r--r--drivers/md/dm-cache-policy-internal.h29
-rw-r--r--drivers/md/dm-cache-policy-mq.c352
-rw-r--r--drivers/md/dm-cache-policy.c70
-rw-r--r--drivers/md/dm-cache-policy.h77
-rw-r--r--drivers/md/dm-cache-shim-utils.c217
-rw-r--r--drivers/md/dm-cache-shim-utils.h73
-rw-r--r--drivers/md/dm-cache-stack-utils.c241
-rw-r--r--drivers/md/dm-cache-stack-utils.h34
-rw-r--r--drivers/md/dm-cache-target.c656
-rw-r--r--drivers/md/dm-crypt.c278
-rw-r--r--drivers/md/dm-delay.c7
-rw-r--r--drivers/md/dm-flakey.c7
-rw-r--r--drivers/md/dm-io.c37
-rw-r--r--drivers/md/dm-ioctl.c36
-rw-r--r--drivers/md/dm-linear.c3
-rw-r--r--drivers/md/dm-mpath.c31
-rw-r--r--drivers/md/dm-raid1.c20
-rw-r--r--drivers/md/dm-region-hash.c3
-rw-r--r--drivers/md/dm-snap.c19
-rw-r--r--drivers/md/dm-stripe.c13
-rw-r--r--drivers/md/dm-switch.c4
-rw-r--r--drivers/md/dm-table.c23
-rw-r--r--drivers/md/dm-thin.c30
-rw-r--r--drivers/md/dm-verity.c61
-rw-r--r--drivers/md/dm.c232
-rw-r--r--drivers/md/dm.h13
-rw-r--r--drivers/md/faulty.c19
-rw-r--r--drivers/md/linear.c96
-rw-r--r--drivers/md/md.c123
-rw-r--r--drivers/md/md.h3
-rw-r--r--drivers/md/multipath.c13
-rw-r--r--drivers/md/persistent-data/dm-array.c5
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c18
-rw-r--r--drivers/md/raid0.c79
-rw-r--r--drivers/md/raid1.c99
-rw-r--r--drivers/md/raid1.h1
-rw-r--r--drivers/md/raid10.c203
-rw-r--r--drivers/md/raid5.c430
-rw-r--r--drivers/md/raid5.h18
-rw-r--r--drivers/media/common/b2c2/flexcop-sram.c6
-rw-r--r--drivers/media/common/saa7146/saa7146_core.c4
-rw-r--r--drivers/media/common/siano/smscoreapi.c8
-rw-r--r--drivers/media/common/siano/smsdvb-main.c8
-rw-r--r--drivers/media/dvb-core/dvb_demux.c17
-rw-r--r--drivers/media/dvb-frontends/Kconfig7
-rw-r--r--drivers/media/dvb-frontends/Makefile1
-rw-r--r--drivers/media/dvb-frontends/cx24110.c2
-rw-r--r--drivers/media/dvb-frontends/cx24117.c1650
-rw-r--r--drivers/media/dvb-frontends/cx24117.h47
-rw-r--r--drivers/media/dvb-frontends/cx24123.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c4
-rw-r--r--drivers/media/dvb-frontends/dib9000.c4
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c12
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c4
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c1
-rw-r--r--drivers/media/dvb-frontends/rtl2832.h1
-rw-r--r--drivers/media/dvb-frontends/tda10071.c9
-rw-r--r--drivers/media/dvb-frontends/tda8083.c4
-rw-r--r--drivers/media/dvb-frontends/ts2020.c7
-rw-r--r--drivers/media/dvb-frontends/ts2020.h1
-rw-r--r--drivers/media/i2c/Kconfig11
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/ad9389b.c15
-rw-r--r--drivers/media/i2c/adv7183.c2
-rw-r--r--drivers/media/i2c/adv7343.c1
-rw-r--r--drivers/media/i2c/adv7511.c18
-rw-r--r--drivers/media/i2c/adv7842.c30
-rw-r--r--drivers/media/i2c/lm3560.c488
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c4
-rw-r--r--drivers/media/i2c/soc_camera/imx074.c4
-rw-r--r--drivers/media/i2c/soc_camera/ov9640.c2
-rw-r--r--drivers/media/i2c/ths8200.c13
-rw-r--r--drivers/media/i2c/tvp514x.c1
-rw-r--r--drivers/media/i2c/tvp7002.c1
-rw-r--r--drivers/media/pci/b2c2/flexcop-pci.c2
-rw-r--r--drivers/media/pci/bt8xx/bt878.c4
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c2
-rw-r--r--drivers/media/pci/cx18/Kconfig1
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c23
-rw-r--r--drivers/media/pci/cx23885/Kconfig1
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c108
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c24
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.c12
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c3
-rw-r--r--drivers/media/pci/cx23885/cx23885.h3
-rw-r--r--drivers/media/pci/cx25821/cx25821-cards.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-medusa-video.c18
-rw-r--r--drivers/media/pci/cx25821/cx25821-medusa-video.h6
-rw-r--r--drivers/media/pci/cx25821/cx25821-video-upstream.c8
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c29
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c17
-rw-r--r--drivers/media/pci/cx88/cx88-video.c18
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c2
-rw-r--r--drivers/media/pci/dm1105/dm1105.c5
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c2
-rw-r--r--drivers/media/pci/mantis/mantis_pci.c2
-rw-r--r--drivers/media/pci/meye/meye.c2
-rw-r--r--drivers/media/pci/ngene/ngene-core.c4
-rw-r--r--drivers/media/pci/pluto2/pluto2.c2
-rw-r--r--drivers/media/pci/pt1/pt1.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-alsa.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c1
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c3
-rw-r--r--drivers/media/pci/zoran/Kconfig1
-rw-r--r--drivers/media/pci/zoran/zoran_card.c2
-rw-r--r--drivers/media/platform/Kconfig23
-rw-r--r--drivers/media/platform/Makefile2
-rw-r--r--drivers/media/platform/coda.c278
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c2
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c4
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h1
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c29
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.c2
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c6
-rw-r--r--drivers/media/platform/fsl-viu.c2
-rw-r--r--drivers/media/platform/m2m-deinterlace.c3
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c14
-rw-r--r--drivers/media/platform/marvell-ccic/mmp-driver.c1
-rw-r--r--drivers/media/platform/mem2mem_testdev.c3
-rw-r--r--drivers/media/platform/omap3isp/isp.c6
-rw-r--r--drivers/media/platform/omap3isp/isp.h3
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c12
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c8
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_grp_layer.c2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_vp_layer.c2
-rw-r--r--drivers/media/platform/sh_vou.c2
-rw-r--r--drivers/media/platform/soc_camera/Kconfig1
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c5
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c5
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c2
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c46
-rw-r--r--drivers/media/platform/ti-vpe/Makefile5
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.c846
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.h203
-rw-r--r--drivers/media/platform/ti-vpe/vpdma_priv.h641
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c2099
-rw-r--r--drivers/media/platform/ti-vpe/vpe_regs.h496
-rw-r--r--drivers/media/platform/timblogiw.c4
-rw-r--r--drivers/media/radio/radio-keene.c2
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c5
-rw-r--r--drivers/media/radio/radio-shark.c2
-rw-r--r--drivers/media/radio/radio-shark2.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c4
-rw-r--r--drivers/media/radio/si4713-i2c.c2
-rw-r--r--drivers/media/radio/tef6862.c20
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c2
-rw-r--r--drivers/media/rc/Kconfig10
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/fintek-cir.h4
-rw-r--r--drivers/media/rc/gpio-ir-recv.c1
-rw-r--r--drivers/media/rc/iguanair.c1
-rw-r--r--drivers/media/rc/ir-rx51.c3
-rw-r--r--drivers/media/rc/keymaps/rc-dib0700-nec.c2
-rw-r--r--drivers/media/rc/keymaps/rc-dib0700-rc5.c2
-rw-r--r--drivers/media/rc/nuvoton-cir.h4
-rw-r--r--drivers/media/rc/st_rc.c395
-rw-r--r--drivers/media/rc/winbond-cir.c2
-rw-r--r--drivers/media/tuners/e4000.c3
-rw-r--r--drivers/media/tuners/fc0012.c2
-rw-r--r--drivers/media/tuners/fc0013.c2
-rw-r--r--drivers/media/tuners/r820t.c22
-rw-r--r--drivers/media/tuners/tda9887.c4
-rw-r--r--drivers/media/tuners/tuner-xc2028.c4
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c6
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c110
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c42
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.h1
-rw-r--r--drivers/media/usb/dvb-usb/az6027.c4
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c13
-rw-r--r--drivers/media/usb/em28xx/em28xx-camera.c42
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c121
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c63
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c7
-rw-r--r--drivers/media/usb/em28xx/em28xx.h2
-rw-r--r--drivers/media/usb/gspca/conex.c3
-rw-r--r--drivers/media/usb/gspca/cpia1.c4
-rw-r--r--drivers/media/usb/gspca/gspca.c48
-rw-r--r--drivers/media/usb/gspca/gspca.h10
-rw-r--r--drivers/media/usb/gspca/jeilinj.c5
-rw-r--r--drivers/media/usb/gspca/jl2005bcd.c2
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_mt9m111.c2
-rw-r--r--drivers/media/usb/gspca/mars.c7
-rw-r--r--drivers/media/usb/gspca/mr97310a.c6
-rw-r--r--drivers/media/usb/gspca/nw80x.c11
-rw-r--r--drivers/media/usb/gspca/ov519.c52
-rw-r--r--drivers/media/usb/gspca/ov534.c5
-rw-r--r--drivers/media/usb/gspca/ov534_9.c334
-rw-r--r--drivers/media/usb/gspca/pac207.c4
-rw-r--r--drivers/media/usb/gspca/pac7311.c6
-rw-r--r--drivers/media/usb/gspca/se401.c6
-rw-r--r--drivers/media/usb/gspca/sn9c20x.c6
-rw-r--r--drivers/media/usb/gspca/sonixb.c7
-rw-r--r--drivers/media/usb/gspca/sonixj.c3
-rw-r--r--drivers/media/usb/gspca/spca1528.c3
-rw-r--r--drivers/media/usb/gspca/spca500.c3
-rw-r--r--drivers/media/usb/gspca/sq905c.c2
-rw-r--r--drivers/media/usb/gspca/sq930x.c3
-rw-r--r--drivers/media/usb/gspca/stk014.c5
-rw-r--r--drivers/media/usb/gspca/stk1135.c76
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx.c2
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c2
-rw-r--r--drivers/media/usb/gspca/sunplus.c3
-rw-r--r--drivers/media/usb/gspca/topro.c13
-rw-r--r--drivers/media/usb/gspca/tv8532.c7
-rw-r--r--drivers/media/usb/gspca/vicam.c8
-rw-r--r--drivers/media/usb/gspca/w996Xcf.c28
-rw-r--r--drivers/media/usb/gspca/xirlink_cit.c46
-rw-r--r--drivers/media/usb/gspca/zc3xx.c3
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c11
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c2
-rw-r--r--drivers/media/usb/siano/smsusb.c43
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c7
-rw-r--r--drivers/media/usb/tlg2300/pd-main.c2
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c152
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c4
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c18
-rw-r--r--drivers/media/usb/uvc/uvc_video.c2
-rw-r--r--drivers/media/v4l2-core/tuner-core.c8
-rw-r--r--drivers/media/v4l2-core/v4l2-clk.c39
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c10
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c8
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c16
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c23
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c87
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c149
-rw-r--r--drivers/memstick/core/memstick.c18
-rw-r--r--drivers/message/fusion/mptsas.c8
-rw-r--r--drivers/message/i2o/core.h2
-rw-r--r--drivers/message/i2o/device.c32
-rw-r--r--drivers/message/i2o/driver.c2
-rw-r--r--drivers/mfd/88pm860x-core.c2
-rw-r--r--drivers/mfd/Kconfig26
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/aat2870-core.c2
-rw-r--r--drivers/mfd/arizona-core.c38
-rw-r--r--drivers/mfd/arizona-i2c.c1
-rw-r--r--drivers/mfd/arizona-spi.c1
-rw-r--r--drivers/mfd/as3711.c1
-rw-r--r--drivers/mfd/as3722.c449
-rw-r--r--drivers/mfd/da9052-i2c.c12
-rw-r--r--drivers/mfd/db8500-prcmu.c1
-rw-r--r--drivers/mfd/dbx500-prcmu-regs.h1
-rw-r--r--drivers/mfd/ezx-pcap.c8
-rw-r--r--drivers/mfd/lpc_ich.c15
-rw-r--r--drivers/mfd/lpc_sch.c3
-rw-r--r--drivers/mfd/max77686.c1
-rw-r--r--drivers/mfd/max77693-irq.c3
-rw-r--r--drivers/mfd/max77693.c19
-rw-r--r--drivers/mfd/max8907.c1
-rw-r--r--drivers/mfd/max8925-i2c.c2
-rw-r--r--drivers/mfd/max8997.c1
-rw-r--r--drivers/mfd/mc13xxx-core.c5
-rw-r--r--drivers/mfd/mc13xxx-i2c.c1
-rw-r--r--drivers/mfd/mc13xxx-spi.c5
-rw-r--r--drivers/mfd/mfd-core.c22
-rw-r--r--drivers/mfd/omap-usb-host.c18
-rw-r--r--drivers/mfd/omap-usb-tll.c6
-rw-r--r--drivers/mfd/palmas.c30
-rw-r--r--drivers/mfd/rts5249.c48
-rw-r--r--drivers/mfd/rtsx_pcr.c5
-rw-r--r--drivers/mfd/sec-core.c1
-rw-r--r--drivers/mfd/sm501.c4
-rw-r--r--drivers/mfd/stw481x.c250
-rw-r--r--drivers/mfd/tc3589x.c37
-rw-r--r--drivers/mfd/ti-ssp.c1
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c24
-rw-r--r--drivers/mfd/timberdale.c6
-rw-r--r--drivers/mfd/tps6507x.c1
-rw-r--r--drivers/mfd/tps65217.c2
-rw-r--r--drivers/mfd/tps6586x.c19
-rw-r--r--drivers/mfd/tps65910.c5
-rw-r--r--drivers/mfd/twl6040.c18
-rw-r--r--drivers/mfd/ucb1x00-core.c1
-rw-r--r--drivers/mfd/wm5102-tables.c1
-rw-r--r--drivers/mfd/wm5110-tables.c39
-rw-r--r--drivers/mfd/wm8994-core.c78
-rw-r--r--drivers/misc/carma/carma-fpga-program.c2
-rw-r--r--drivers/misc/carma/carma-fpga.c2
-rw-r--r--drivers/misc/eeprom/at24.c2
-rw-r--r--drivers/misc/tifm_core.c10
-rw-r--r--drivers/mmc/card/block.c2
-rw-r--r--drivers/mmc/card/queue.c3
-rw-r--r--drivers/mmc/core/bus.c14
-rw-r--r--drivers/mmc/core/core.c154
-rw-r--r--drivers/mmc/core/core.h6
-rw-r--r--drivers/mmc/core/mmc.c127
-rw-r--r--drivers/mmc/core/mmc_ops.c94
-rw-r--r--drivers/mmc/core/sd.c118
-rw-r--r--drivers/mmc/core/sdio.c82
-rw-r--r--drivers/mmc/core/sdio_bus.c21
-rw-r--r--drivers/mmc/host/atmel-mci.c82
-rw-r--r--drivers/mmc/host/au1xmmc.c7
-rw-r--r--drivers/mmc/host/bfin_sdh.c15
-rw-r--r--drivers/mmc/host/cb710-mmc.c10
-rw-r--r--drivers/mmc/host/davinci_mmc.c26
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c294
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c7
-rw-r--r--drivers/mmc/host/dw_mmc-socfpga.c34
-rw-r--r--drivers/mmc/host/dw_mmc.c604
-rw-r--r--drivers/mmc/host/dw_mmc.h55
-rw-r--r--drivers/mmc/host/jz4740_mmc.c4
-rw-r--r--drivers/mmc/host/mmci.c95
-rw-r--r--drivers/mmc/host/mmci.h4
-rw-r--r--drivers/mmc/host/msm_sdcc.c27
-rw-r--r--drivers/mmc/host/mvsdio.c46
-rw-r--r--drivers/mmc/host/mxcmmc.c12
-rw-r--r--drivers/mmc/host/mxs-mmc.c12
-rw-r--r--drivers/mmc/host/omap.c53
-rw-r--r--drivers/mmc/host/omap_hsmmc.c112
-rw-r--r--drivers/mmc/host/pxamci.c32
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c35
-rw-r--r--drivers/mmc/host/s3cmci.c29
-rw-r--r--drivers/mmc/host/sdhci-acpi.c5
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c14
-rw-r--r--drivers/mmc/host/sdhci-bcm2835.c8
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c550
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h37
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c35
-rw-r--r--drivers/mmc/host/sdhci-pci.c76
-rw-r--r--drivers/mmc/host/sdhci.c36
-rw-r--r--drivers/mmc/host/sdhci.h3
-rw-r--r--drivers/mmc/host/sdricoh_cs.c3
-rw-r--r--drivers/mmc/host/sh_mmcif.c10
-rw-r--r--drivers/mmc/host/tifm_sd.c4
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c9
-rw-r--r--drivers/mmc/host/via-sdmmc.c7
-rw-r--r--drivers/mmc/host/vub300.c18
-rw-r--r--drivers/mmc/host/wbsd.c33
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c57
-rw-r--r--drivers/mtd/bcm47xxpart.c23
-rw-r--r--drivers/mtd/devices/Kconfig7
-rw-r--r--drivers/mtd/devices/block2mtd.c1
-rw-r--r--drivers/mtd/devices/docg3.c2
-rw-r--r--drivers/mtd/devices/m25p80.c82
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c2
-rw-r--r--drivers/mtd/devices/phram.c66
-rw-r--r--drivers/mtd/devices/sst25l.c13
-rw-r--r--drivers/mtd/inftlcore.c2
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c2
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c1
-rw-r--r--drivers/mtd/maps/pci.c1
-rw-r--r--drivers/mtd/maps/plat-ram.c18
-rw-r--r--drivers/mtd/maps/scb2_flash.c1
-rw-r--r--drivers/mtd/mtdblock.c3
-rw-r--r--drivers/mtd/mtdblock_ro.c3
-rw-r--r--drivers/mtd/mtdchar.c1
-rw-r--r--drivers/mtd/mtdcore.c3
-rw-r--r--drivers/mtd/mtdsuper.c1
-rw-r--r--drivers/mtd/nand/Kconfig40
-rw-r--r--drivers/mtd/nand/atmel_nand.c79
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/main.c38
-rw-r--r--drivers/mtd/nand/denali.c4
-rw-r--r--drivers/mtd/nand/denali_pci.c1
-rw-r--r--drivers/mtd/nand/diskonchip.c2
-rw-r--r--drivers/mtd/nand/docg4.c18
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c3
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c135
-rw-r--r--drivers/mtd/nand/fsl_upm.c1
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c13
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c46
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-regs.h3
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c2
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c10
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c2
-rw-r--r--drivers/mtd/nand/mxc_nand.c1
-rw-r--r--drivers/mtd/nand/nand_base.c92
-rw-r--r--drivers/mtd/nand/nand_bbt.c38
-rw-r--r--drivers/mtd/nand/nandsim.c6
-rw-r--r--drivers/mtd/nand/ndfc.c1
-rw-r--r--drivers/mtd/nand/omap2.c641
-rw-r--r--drivers/mtd/nand/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c60
-rw-r--r--drivers/mtd/nand/socrates_nand.c14
-rw-r--r--drivers/mtd/nftlcore.c2
-rw-r--r--drivers/mtd/onenand/omap2.c22
-rw-r--r--drivers/mtd/onenand/onenand_base.c17
-rw-r--r--drivers/mtd/ssfdc.c2
-rw-r--r--drivers/mtd/tests/nandbiterrs.c2
-rw-r--r--drivers/mtd/tests/oobtest.c2
-rw-r--r--drivers/mtd/tests/pagetest.c2
-rw-r--r--drivers/mtd/tests/subpagetest.c2
-rw-r--r--drivers/mtd/ubi/attach.c11
-rw-r--r--drivers/mtd/ubi/build.c1
-rw-r--r--drivers/mtd/ubi/fastmap.c41
-rw-r--r--drivers/mtd/ubi/wl.c4
-rw-r--r--drivers/net/Space.c3
-rw-r--r--drivers/net/bonding/Makefile2
-rw-r--r--drivers/net/bonding/bond_3ad.c199
-rw-r--r--drivers/net/bonding/bond_alb.c150
-rw-r--r--drivers/net/bonding/bond_alb.h4
-rw-r--r--drivers/net/bonding/bond_main.c660
-rw-r--r--drivers/net/bonding/bond_netlink.c131
-rw-r--r--drivers/net/bonding/bond_options.c142
-rw-r--r--drivers/net/bonding/bond_procfs.c21
-rw-r--r--drivers/net/bonding/bond_sysfs.c229
-rw-r--r--drivers/net/bonding/bonding.h110
-rw-r--r--drivers/net/caif/caif_virtio.c23
-rw-r--r--drivers/net/can/at91_can.c6
-rw-r--r--drivers/net/can/bfin_can.c2
-rw-r--r--drivers/net/can/c_can/c_can.c6
-rw-r--r--drivers/net/can/c_can/c_can_pci.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c2
-rw-r--r--drivers/net/can/cc770/cc770_platform.c4
-rw-r--r--drivers/net/can/dev.c10
-rw-r--r--drivers/net/can/flexcan.c16
-rw-r--r--drivers/net/can/grcan.c3
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/mcp251x.c2
-rw-r--r--drivers/net/can/mscan/mscan.h6
-rw-r--r--drivers/net/can/pch_can.c1
-rw-r--r--drivers/net/can/sja1000/ems_pci.c1
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c1
-rw-r--r--drivers/net/can/sja1000/peak_pci.c2
-rw-r--r--drivers/net/can/sja1000/plx_pci.c1
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c1
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c2
-rw-r--r--drivers/net/can/softing/softing.h24
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb.c20
-rw-r--r--drivers/net/ethernet/3com/Kconfig4
-rw-r--r--drivers/net/ethernet/3com/typhoon.c1
-rw-r--r--drivers/net/ethernet/8390/8390.h40
-rw-r--r--drivers/net/ethernet/8390/Kconfig7
-rw-r--r--drivers/net/ethernet/8390/Makefile1
-rw-r--r--drivers/net/ethernet/8390/ax88796.c2
-rw-r--r--drivers/net/ethernet/8390/ne-h8300.c684
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c3
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.h2
-rw-r--r--drivers/net/ethernet/amd/7990.h12
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/atarilance.c4
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/declance.c19
-rw-r--r--drivers/net/ethernet/amd/lance.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c3
-rw-r--r--drivers/net/ethernet/apple/bmac.c4
-rw-r--r--drivers/net/ethernet/arc/emac_main.c16
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h6
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e.h12
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c46
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.h2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c15
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c137
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h26
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c62
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h38
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c25
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c476
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c75
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c151
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h7
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c4
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c142
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c14
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h43
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h46
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/regs.h35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c11
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c58
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c2
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c1
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h186
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c30
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h332
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c40
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c333
-rw-r--r--drivers/net/ethernet/fealnx.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c127
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h26
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c1
-rw-r--r--drivers/net/ethernet/fujitsu/Kconfig2
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/i825xx/82596.c4
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.h14
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c17
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c1
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.h18
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c1
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.h14
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c1
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.h18
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c4
-rw-r--r--drivers/net/ethernet/icplus/ipg.c1
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h32
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c12
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h45
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c338
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c69
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c484
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c433
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h38
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c8
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h74
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c96
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c79
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h22
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c26
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c4
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h22
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.h25
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h238
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c109
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c99
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h40
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c234
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h178
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c270
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c4
-rw-r--r--drivers/net/ethernet/jme.c6
-rw-r--r--drivers/net/ethernet/korina.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c11
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c110
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/marvell/skge.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c510
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c195
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c4
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c4
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c23
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c9
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c2
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c6
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c7
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h56
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c1
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h9
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c30
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h181
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c153
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h18
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c78
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c47
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c20
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c184
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h109
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c258
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c67
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c447
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c41
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h60
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c127
-rw-r--r--drivers/net/ethernet/rdc/r6040.c2
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c1
-rw-r--r--drivers/net/ethernet/realtek/8139too.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c16
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c406
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h1
-rw-r--r--drivers/net/ethernet/sfc/efx.c12
-rw-r--r--drivers/net/ethernet/sfc/efx.h105
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c5
-rw-r--r--drivers/net/ethernet/sfc/io.h5
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c18
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h120
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h56
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.h26
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h10
-rw-r--r--drivers/net/ethernet/sfc/nic.c82
-rw-r--r--drivers/net/ethernet/sfc/nic.h268
-rw-r--r--drivers/net/ethernet/sfc/phy.h8
-rw-r--r--drivers/net/ethernet/sfc/rx.c90
-rw-r--r--drivers/net/ethernet/sfc/selftest.h15
-rw-r--r--drivers/net/ethernet/sfc/tx.c426
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/sis/sis190.c1
-rw-r--r--drivers/net/ethernet/smsc/epic100.c126
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c331
-rw-r--r--drivers/net/ethernet/smsc/smc911x.h2
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c76
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c43
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c237
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h10
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c1
-rw-r--r--drivers/net/ethernet/sun/cassini.c4
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c4
-rw-r--r--drivers/net/ethernet/sun/sunhme.c12
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c1
-rw-r--r--drivers/net/ethernet/ti/Kconfig8
-rw-r--r--drivers/net/ethernet/ti/Makefile1
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c161
-rw-r--r--drivers/net/ethernet/ti/cpsw.c178
-rw-r--r--drivers/net/ethernet/ti/cpsw.h2
-rw-r--r--drivers/net/ethernet/ti/cpts.h9
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c5
-rw-r--r--drivers/net/ethernet/ti/tlan.c1
-rw-r--r--drivers/net/ethernet/tile/tilegx.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h29
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.h6
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c1
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.h4
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c1
-rw-r--r--drivers/net/ethernet/via/via-rhine.c1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c41
-rw-r--r--drivers/net/fddi/skfp/fplustm.c2
-rw-r--r--drivers/net/fddi/skfp/h/smc.h28
-rw-r--r--drivers/net/fddi/skfp/skfddi.c6
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c2
-rw-r--r--drivers/net/hamradio/scc.c2
-rw-r--r--drivers/net/hamradio/yam.c3
-rw-r--r--drivers/net/ieee802154/mrf24j40.c31
-rw-r--r--drivers/net/irda/ali-ircc.c2
-rw-r--r--drivers/net/irda/bfin_sir.c4
-rw-r--r--drivers/net/irda/donauboe.c4
-rw-r--r--drivers/net/irda/nsc-ircc.c2
-rw-r--r--drivers/net/irda/sh_irda.c2
-rw-r--r--drivers/net/irda/sh_sir.c2
-rw-r--r--drivers/net/irda/sir-dev.h29
-rw-r--r--drivers/net/macvlan.c11
-rw-r--r--drivers/net/netconsole.c75
-rw-r--r--drivers/net/phy/at803x.c57
-rw-r--r--drivers/net/phy/marvell.c4
-rw-r--r--drivers/net/phy/mdio_bus.c10
-rw-r--r--drivers/net/phy/micrel.c24
-rw-r--r--drivers/net/plip/plip.c2
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/usb/ax88179_178a.c34
-rw-r--r--drivers/net/usb/catc.c8
-rw-r--r--drivers/net/usb/cdc-phonet.c2
-rw-r--r--drivers/net/usb/cdc_mbim.c104
-rw-r--r--drivers/net/usb/cdc_ncm.c490
-rw-r--r--drivers/net/usb/qmi_wwan.c70
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/veth.c9
-rw-r--r--drivers/net/virtio_net.c221
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vxlan.c37
-rw-r--r--drivers/net/wan/farsync.c1
-rw-r--r--drivers/net/wan/hostess_sv11.c2
-rw-r--r--drivers/net/wan/sbni.c89
-rw-r--r--drivers/net/wan/sealevel.c2
-rw-r--r--drivers/net/wan/wanxl.c1
-rw-r--r--drivers/net/wan/x25_asy.h2
-rw-r--r--drivers/net/wan/z85230.h27
-rw-r--r--drivers/net/wimax/i2400m/i2400m-usb.h27
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h117
-rw-r--r--drivers/net/wireless/adm8211.c1
-rw-r--r--drivers/net/wireless/airo.c1
-rw-r--r--drivers/net/wireless/ath/Kconfig18
-rw-r--r--drivers/net/wireless/ath/Makefile5
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c42
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c397
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h126
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c355
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h80
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c157
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h27
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c241
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c19
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c314
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c287
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h79
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c732
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c467
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h76
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h24
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h32
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c67
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c1277
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h1037
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c15
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/common.h3
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h9
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig20
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile4
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c48
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c92
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c34
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c240
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h218
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h24
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h73
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c91
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c564
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c456
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c128
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h112
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c137
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c27
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c350
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c195
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c197
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c79
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c (renamed from drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c)23
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.h (renamed from drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h)28
-rw-r--r--drivers/net/wireless/ath/dfs_pri_detector.c (renamed from drivers/net/wireless/ath/ath9k/dfs_pri_detector.c)10
-rw-r--r--drivers/net/wireless/ath/dfs_pri_detector.h (renamed from drivers/net/wireless/ath/ath9k/dfs_pri_detector.h)2
-rw-r--r--drivers/net/wireless/ath/regd.c140
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Kconfig16
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Makefile7
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.c181
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.h49
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c805
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.h284
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h4657
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c1036
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c62
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.h33
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c2126
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h127
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c284
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.h160
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h238
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c1
-rw-r--r--drivers/net/wireless/atmel.c94
-rw-r--r--drivers/net/wireless/b43/dma.c9
-rw-r--r--drivers/net/wireless/b43/phy_n.c3
-rw-r--r--drivers/net/wireless/b43/xmit.c2
-rw-r--r--drivers/net/wireless/b43legacy/dma.c9
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c186
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c30
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h32
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h29
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c38
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h12
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c343
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.h5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c28
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h31
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h97
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h21
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.h18
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.h22
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/antsel.h14
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.h20
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h38
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.h110
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h219
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h371
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h91
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h145
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/rate.h48
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/stf.h31
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h16
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_d11.h2
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_utils.h44
-rw-r--r--drivers/net/wireless/cw1200/cw1200_spi.c6
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c4
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h87
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945.h82
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965.h2
-rw-r--r--drivers/net/wireless/iwlegacy/common.h66
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.h8
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h26
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c632
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/constants.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c515
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c206
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h149
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h69
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h29
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h21
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h34
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h55
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h16
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c23
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c75
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c242
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h88
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c101
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c60
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c75
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c42
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c793
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h163
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c21
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c474
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c206
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/testmode.h95
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c49
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c50
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c135
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c36
-rw-r--r--drivers/net/wireless/libertas/firmware.c5
-rw-r--r--drivers/net/wireless/libertas/if_cs.c8
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c8
-rw-r--r--drivers/net/wireless/libertas/if_spi.c6
-rw-r--r--drivers/net/wireless/libertas/if_usb.c17
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c2
-rw-r--r--drivers/net/wireless/mwifiex/join.c12
-rw-r--r--drivers/net/wireless/mwifiex/main.c10
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c6
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c2
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c3
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c2
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h24
-rw-r--r--drivers/net/wireless/mwl8k.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco.h31
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/p54/p54pci.c1
-rw-r--r--drivers/net/wireless/p54/p54spi.c2
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c10
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c2
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c2
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig28
-rw-r--r--drivers/net/wireless/rt2x00/Makefile2
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h44
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c289
-rw-r--r--drivers/net/wireless/rt2x00/rt2800mmio.c873
-rw-r--r--drivers/net/wireless/rt2x00/rt2800mmio.h165
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c951
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.h97
-rw-r--r--drivers/net/wireless/rt2x00/rt2800soc.c263
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c29
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h103
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c74
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c39
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c20
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c18
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c1
-rw-r--r--drivers/net/wireless/rtlwifi/base.c29
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/cam.h10
-rw-r--r--drivers/net/wireless/rtlwifi/core.c10
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c18
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.h29
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/phy.c28
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/phy.h52
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c25
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/def.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.h52
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/reg.h20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.h13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c187
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.h13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.h7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c28
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.h49
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/rf.h18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/reg.h5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.c29
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.h62
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/rf.h13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c6
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h2
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h4
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c2
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c95
-rw-r--r--drivers/net/wireless/ti/wl18xx/reg.h33
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c58
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c158
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c27
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c27
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h11
-rw-r--r--drivers/net/xen-netback/common.h13
-rw-r--r--drivers/net/xen-netback/interface.c19
-rw-r--r--drivers/net/xen-netback/netback.c304
-rw-r--r--drivers/net/xen-netback/xenbus.c56
-rw-r--r--drivers/net/xen-netfront.c4
-rw-r--r--drivers/nfc/Kconfig10
-rw-r--r--drivers/nfc/Makefile1
-rw-r--r--drivers/nfc/mei_phy.c6
-rw-r--r--drivers/nfc/microread/i2c.c32
-rw-r--r--drivers/nfc/microread/mei.c4
-rw-r--r--drivers/nfc/microread/microread.c7
-rw-r--r--drivers/nfc/nfcsim.c38
-rw-r--r--drivers/nfc/nfcwilink.c97
-rw-r--r--drivers/nfc/pn533.c604
-rw-r--r--drivers/nfc/pn544/i2c.c42
-rw-r--r--drivers/nfc/pn544/pn544.c129
-rw-r--r--drivers/nfc/port100.c1529
-rw-r--r--drivers/of/address.c18
-rw-r--r--drivers/of/base.c96
-rw-r--r--drivers/of/fdt.c138
-rw-r--r--drivers/of/irq.c164
-rw-r--r--drivers/of/of_pci.c1
-rw-r--r--drivers/of/of_pci_irq.c41
-rw-r--r--drivers/of/pdt.c1
-rw-r--r--drivers/of/platform.c5
-rw-r--r--drivers/of/selftest.c161
-rw-r--r--drivers/parport/Kconfig10
-rw-r--r--drivers/parport/parport_pc.c8
-rw-r--r--drivers/pci/host/Kconfig16
-rw-r--r--drivers/pci/host/Makefile2
-rw-r--r--drivers/pci/host/pci-exynos.c132
-rw-r--r--drivers/pci/host/pci-imx6.c568
-rw-r--r--drivers/pci/host/pci-mvebu.c248
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c333
-rw-r--r--drivers/pci/host/pci-tegra.c4
-rw-r--r--drivers/pci/host/pcie-designware.c257
-rw-r--r--drivers/pci/host/pcie-designware.h26
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c9
-rw-r--r--drivers/pci/hotplug/acpiphp.h10
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c37
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c29
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c58
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c6
-rw-r--r--drivers/pci/hotplug/shpchp.h2
-rw-r--r--drivers/pci/msi.c6
-rw-r--r--drivers/pci/pci-acpi.c3
-rw-r--r--drivers/pci/pci-driver.c28
-rw-r--r--drivers/pci/pci-sysfs.c132
-rw-r--r--drivers/pci/pci.c11
-rw-r--r--drivers/pci/pci.h4
-rw-r--r--drivers/pci/probe.c4
-rw-r--r--drivers/pci/quirks.c23
-rw-r--r--drivers/pci/setup-bus.c4
-rw-r--r--drivers/pcmcia/at91_cf.c11
-rw-r--r--drivers/pcmcia/ds.c65
-rw-r--r--drivers/pcmcia/electra_cf.c2
-rw-r--r--drivers/pinctrl/pinctrl-single.c388
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/eeepc-laptop.c8
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c44
-rw-r--r--drivers/platform/x86/ideapad-laptop.c344
-rw-r--r--drivers/platform/x86/intel-rst.c48
-rw-r--r--drivers/platform/x86/intel-smartconnect.c27
-rw-r--r--drivers/platform/x86/intel_menlow.c8
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c4
-rw-r--r--drivers/platform/x86/sony-laptop.c54
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c123
-rw-r--r--drivers/platform/x86/topstar-laptop.c8
-rw-r--r--drivers/platform/x86/toshiba_acpi.c44
-rw-r--r--drivers/platform/x86/wmi.c30
-rw-r--r--drivers/pnp/base.h2
-rw-r--r--drivers/pnp/driver.c2
-rw-r--r--drivers/pnp/interface.c43
-rw-r--r--drivers/pnp/pnpacpi/core.c11
-rw-r--r--drivers/power/Kconfig6
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/ab8500_charger.c17
-rw-r--r--drivers/power/bq2415x_charger.c6
-rw-r--r--drivers/power/bq24735-charger.c419
-rw-r--r--drivers/power/charger-manager.c85
-rw-r--r--drivers/power/isp1704_charger.c91
-rw-r--r--drivers/power/max17042_battery.c373
-rw-r--r--drivers/power/pm2301_charger.c10
-rw-r--r--drivers/power/tps65090-charger.c30
-rw-r--r--drivers/powercap/Kconfig32
-rw-r--r--drivers/powercap/Makefile2
-rw-r--r--drivers/powercap/intel_rapl.c1395
-rw-r--r--drivers/powercap/powercap_sys.c685
-rw-r--r--drivers/rapidio/rio-driver.c4
-rw-r--r--drivers/rapidio/rio-sysfs.c54
-rw-r--r--drivers/rapidio/rio.h4
-rw-r--r--drivers/regulator/88pm800.c12
-rw-r--r--drivers/regulator/88pm8607.c12
-rw-r--r--drivers/regulator/Kconfig26
-rw-r--r--drivers/regulator/Makefile4
-rw-r--r--drivers/regulator/aat2870-regulator.c11
-rw-r--r--drivers/regulator/ab3100.c3
-rw-r--r--drivers/regulator/ab8500-ext.c26
-rw-r--r--drivers/regulator/ad5398.c19
-rw-r--r--drivers/regulator/anatop-regulator.c7
-rw-r--r--drivers/regulator/arizona-ldo1.c12
-rw-r--r--drivers/regulator/arizona-micsupp.c14
-rw-r--r--drivers/regulator/as3711-regulator.c43
-rw-r--r--drivers/regulator/as3722-regulator.c908
-rw-r--r--drivers/regulator/core.c455
-rw-r--r--drivers/regulator/da903x.c17
-rw-r--r--drivers/regulator/da9052-regulator.c55
-rw-r--r--drivers/regulator/da9055-regulator.c24
-rw-r--r--drivers/regulator/da9063-regulator.c23
-rw-r--r--drivers/regulator/da9210-regulator.c19
-rw-r--r--drivers/regulator/devres.c415
-rw-r--r--drivers/regulator/fan53555.c12
-rw-r--r--drivers/regulator/fixed.c38
-rw-r--r--drivers/regulator/gpio-regulator.c1
-rw-r--r--drivers/regulator/helpers.c6
-rw-r--r--drivers/regulator/internal.h38
-rw-r--r--drivers/regulator/isl6271a-regulator.c24
-rw-r--r--drivers/regulator/lp3971.c4
-rw-r--r--drivers/regulator/lp872x.c33
-rw-r--r--drivers/regulator/lp8788-buck.c12
-rw-r--r--drivers/regulator/lp8788-ldo.c24
-rw-r--r--drivers/regulator/max1586.c26
-rw-r--r--drivers/regulator/max77686.c23
-rw-r--r--drivers/regulator/max77693.c29
-rw-r--r--drivers/regulator/max8649.c14
-rw-r--r--drivers/regulator/max8660.c30
-rw-r--r--drivers/regulator/max8907-regulator.c23
-rw-r--r--drivers/regulator/max8925-regulator.c12
-rw-r--r--drivers/regulator/max8973-regulator.c11
-rw-r--r--drivers/regulator/max8997.c44
-rw-r--r--drivers/regulator/max8998.c35
-rw-r--r--drivers/regulator/mc13783-regulator.c53
-rw-r--r--drivers/regulator/mc13892-regulator.c22
-rw-r--r--drivers/regulator/of_regulator.c6
-rw-r--r--drivers/regulator/palmas-regulator.c208
-rw-r--r--drivers/regulator/pcap-regulator.c13
-rw-r--r--drivers/regulator/pcf50633-regulator.c13
-rw-r--r--drivers/regulator/rc5t583-regulator.c22
-rw-r--r--drivers/regulator/s2mps11.c23
-rw-r--r--drivers/regulator/s5m8767.c24
-rw-r--r--drivers/regulator/stw481x-vmmc.c111
-rw-r--r--drivers/regulator/ti-abb-regulator.c86
-rw-r--r--drivers/regulator/tps51632-regulator.c11
-rw-r--r--drivers/regulator/tps6105x-regulator.c15
-rw-r--r--drivers/regulator/tps62360-regulator.c17
-rw-r--r--drivers/regulator/tps65023-regulator.c25
-rw-r--r--drivers/regulator/tps6507x-regulator.c23
-rw-r--r--drivers/regulator/tps65090-regulator.c37
-rw-r--r--drivers/regulator/tps65217-regulator.c50
-rw-r--r--drivers/regulator/tps6524x-regulator.c32
-rw-r--r--drivers/regulator/tps6586x-regulator.c33
-rw-r--r--drivers/regulator/tps65910-regulator.c35
-rw-r--r--drivers/regulator/tps65912-regulator.c33
-rw-r--r--drivers/regulator/tps80031-regulator.c30
-rw-r--r--drivers/regulator/twl-regulator.c3
-rw-r--r--drivers/regulator/vexpress.c3
-rw-r--r--drivers/regulator/wm831x-dcdc.c118
-rw-r--r--drivers/regulator/wm831x-isink.c25
-rw-r--r--drivers/regulator/wm831x-ldo.c75
-rw-r--r--drivers/regulator/wm8350-regulator.c12
-rw-r--r--drivers/regulator/wm8400-regulator.c19
-rw-r--r--drivers/regulator/wm8994-regulator.c14
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c3
-rw-r--r--drivers/rtc/interface.c13
-rw-r--r--drivers/rtc/rtc-mpc5121.c2
-rw-r--r--drivers/rtc/rtc-mrst.c4
-rw-r--r--drivers/rtc/rtc-pl031.c3
-rw-r--r--drivers/s390/block/dasd.c11
-rw-r--r--drivers/s390/block/dasd_eckd.c98
-rw-r--r--drivers/s390/block/dcssblk.c19
-rw-r--r--drivers/s390/block/scm_blk.h2
-rw-r--r--drivers/s390/block/xpram.c19
-rw-r--r--drivers/s390/char/monwriter.c2
-rw-r--r--drivers/s390/char/raw3270.c4
-rw-r--r--drivers/s390/char/sclp.c4
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/char/zcore.c20
-rw-r--r--drivers/s390/cio/airq.c19
-rw-r--r--drivers/s390/cio/cio.c4
-rw-r--r--drivers/s390/cio/eadm_sch.c29
-rw-r--r--drivers/s390/cio/eadm_sch.h4
-rw-r--r--drivers/s390/cio/qdio_debug.h8
-rw-r--r--drivers/s390/cio/qdio_main.c12
-rw-r--r--drivers/s390/crypto/zcrypt_debug.h12
-rw-r--r--drivers/s390/kvm/kvm_virtio.c8
-rw-r--r--drivers/s390/kvm/virtio_ccw.c5
-rw-r--r--drivers/s390/net/claw.h8
-rw-r--r--drivers/s390/net/ctcm_dbug.c2
-rw-r--r--drivers/s390/net/lcs.h8
-rw-r--r--drivers/s390/net/netiucv.c8
-rw-r--r--drivers/s390/net/qeth_core_main.c2
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c6
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h4
-rw-r--r--drivers/scsi/BusLogic.c16
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c1
-rw-r--r--drivers/scsi/atp870u.c2
-rw-r--r--drivers/scsi/bfa/bfad.c54
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c16
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c14
-rw-r--r--drivers/scsi/csiostor/csio_hw.c22
-rw-r--r--drivers/scsi/csiostor/csio_init.c2
-rw-r--r--drivers/scsi/dc395x.c1
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c14
-rw-r--r--drivers/scsi/fnic/fnic_main.c1
-rw-r--r--drivers/scsi/gdth.c2
-rw-r--r--drivers/scsi/hpsa.c1
-rw-r--r--drivers/scsi/libsas/sas_expander.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c3
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c5
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c41
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c39
-rw-r--r--drivers/scsi/mvsas/mv_init.c1
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2
-rw-r--r--drivers/scsi/mvumi.c2
-rw-r--r--drivers/scsi/ncr53c8xx.c2
-rw-r--r--drivers/scsi/osd/osd_initiator.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c1
-rw-r--r--drivers/scsi/pmcraid.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c15
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c1
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/scsi_transport_srp.c540
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/sd_dif.c30
-rw-r--r--drivers/scsi/sg.c176
-rw-r--r--drivers/scsi/stex.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.h2
-rw-r--r--drivers/scsi/tmscsim.c1
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c1
-rw-r--r--drivers/scsi/virtio_scsi.c16
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/spi/Kconfig5
-rw-r--r--drivers/spi/spi-altera.c2
-rw-r--r--drivers/spi/spi-ath79.c2
-rw-r--r--drivers/spi/spi-atmel.c50
-rw-r--r--drivers/spi/spi-au1550.c5
-rw-r--r--drivers/spi/spi-bcm2835.c4
-rw-r--r--drivers/spi/spi-bcm63xx.c6
-rw-r--r--drivers/spi/spi-bfin-sport.c29
-rw-r--r--drivers/spi/spi-bfin-v3.c3
-rw-r--r--drivers/spi/spi-bfin5xx.c48
-rw-r--r--drivers/spi/spi-bitbang.c25
-rw-r--r--drivers/spi/spi-butterfly.c15
-rw-r--r--drivers/spi/spi-clps711x.c7
-rw-r--r--drivers/spi/spi-davinci.c13
-rw-r--r--drivers/spi/spi-dw-mmio.c5
-rw-r--r--drivers/spi/spi-dw-pci.c3
-rw-r--r--drivers/spi/spi-dw.c4
-rw-r--r--drivers/spi/spi-efm32.c12
-rw-r--r--drivers/spi/spi-ep93xx.c7
-rw-r--r--drivers/spi/spi-fsl-cpm.c3
-rw-r--r--drivers/spi/spi-fsl-dspi.c10
-rw-r--r--drivers/spi/spi-fsl-espi.c12
-rw-r--r--drivers/spi/spi-gpio.c6
-rw-r--r--drivers/spi/spi-imx.c35
-rw-r--r--drivers/spi/spi-lm70llp.c2
-rw-r--r--drivers/spi/spi-mpc512x-psc.c5
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c4
-rw-r--r--drivers/spi/spi-mxs.c193
-rw-r--r--drivers/spi/spi-nuc900.c3
-rw-r--r--drivers/spi/spi-oc-tiny.c2
-rw-r--r--drivers/spi/spi-octeon.c4
-rw-r--r--drivers/spi/spi-omap-100k.c4
-rw-r--r--drivers/spi/spi-omap-uwire.c5
-rw-r--r--drivers/spi/spi-omap2-mcspi.c19
-rw-r--r--drivers/spi/spi-orion.c10
-rw-r--r--drivers/spi/spi-pl022.c10
-rw-r--r--drivers/spi/spi-ppc4xx.c3
-rw-r--r--drivers/spi/spi-pxa2xx.c41
-rw-r--r--drivers/spi/spi-rspi.c270
-rw-r--r--drivers/spi/spi-s3c24xx.c4
-rw-r--r--drivers/spi/spi-s3c64xx.c260
-rw-r--r--drivers/spi/spi-sh-hspi.c5
-rw-r--r--drivers/spi/spi-sh-sci.c2
-rw-r--r--drivers/spi/spi-sirf.c2
-rw-r--r--drivers/spi/spi-tegra114.c90
-rw-r--r--drivers/spi/spi-tegra20-sflash.c5
-rw-r--r--drivers/spi/spi-tegra20-slink.c144
-rw-r--r--drivers/spi/spi-ti-qspi.c46
-rw-r--r--drivers/spi/spi-topcliff-pch.c17
-rw-r--r--drivers/spi/spi-txx9.c11
-rw-r--r--drivers/spi/spi-xilinx.c2
-rw-r--r--drivers/spi/spi.c281
-rw-r--r--drivers/spi/spidev.c7
-rw-r--r--drivers/ssb/main.c25
-rw-r--r--drivers/staging/bcm/Bcmchar.c1
-rw-r--r--drivers/staging/dwc2/platform.c5
-rw-r--r--drivers/staging/et131x/et131x.c31
-rw-r--r--drivers/staging/iio/adc/ad7606.h2
-rw-r--r--drivers/staging/iio/meter/ade7753.c3
-rw-r--r--drivers/staging/iio/meter/ade7754.c3
-rw-r--r--drivers/staging/iio/meter/ade7759.c3
-rw-r--r--drivers/staging/imx-drm/Kconfig1
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c16
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c26
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c5
-rw-r--r--drivers/staging/media/lirc/TODO5
-rw-r--r--drivers/staging/media/lirc/lirc_bt829.c33
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c9
-rw-r--r--drivers/staging/media/msi3101/Kconfig2
-rw-r--r--drivers/staging/media/msi3101/sdr-msi3101.c10
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-disp.c25
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c170
-rw-r--r--drivers/staging/media/solo6x10/solo6x10.h1
-rw-r--r--drivers/staging/ozwpan/ozcdev.c3
-rw-r--r--drivers/staging/sb105x/sb_pci_mp.c2
-rw-r--r--drivers/staging/wlags49_h2/wl_priv.c9
-rw-r--r--drivers/staging/zram/zram_drv.c33
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_pscsi.c8
-rw-r--r--drivers/target/target_core_sbc.c5
-rw-r--r--drivers/target/target_core_xcopy.c53
-rw-r--r--drivers/thermal/Kconfig8
-rw-r--r--drivers/thermal/intel_powerclamp.c29
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.c2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c12
-rw-r--r--drivers/thermal/samsung/exynos_tmu.h7
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.c30
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.h13
-rw-r--r--drivers/thermal/thermal_hwmon.c2
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c1
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c14
-rw-r--r--drivers/tty/bfin_jtag_comm.c2
-rw-r--r--drivers/tty/ehv_bytechan.c1
-rw-r--r--drivers/tty/hvc/hvc_dcc.c21
-rw-r--r--drivers/tty/hvc/hvc_iucv.c3
-rw-r--r--drivers/tty/hvc/hvc_opal.c4
-rw-r--r--drivers/tty/hvc/hvc_vio.c5
-rw-r--r--drivers/tty/hvc/hvc_xen.c19
-rw-r--r--drivers/tty/hvc/hvsi_lib.c25
-rw-r--r--drivers/tty/n_tty.c13
-rw-r--r--drivers/tty/nozomi.c6
-rw-r--r--drivers/tty/serial/8250/8250_core.c2
-rw-r--r--drivers/tty/serial/8250/8250_dw.c76
-rw-r--r--drivers/tty/serial/8250/8250_em.c6
-rw-r--r--drivers/tty/serial/8250/8250_pci.c347
-rw-r--r--drivers/tty/serial/Kconfig3
-rw-r--r--drivers/tty/serial/amba-pl010.c3
-rw-r--r--drivers/tty/serial/amba-pl011.c3
-rw-r--r--drivers/tty/serial/arc_uart.c2
-rw-r--r--drivers/tty/serial/atmel_serial.c28
-rw-r--r--drivers/tty/serial/bfin_sport_uart.c11
-rw-r--r--drivers/tty/serial/bfin_uart.c21
-rw-r--r--drivers/tty/serial/clps711x.c2
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c4
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c1
-rw-r--r--drivers/tty/serial/ifx6x60.c2
-rw-r--r--drivers/tty/serial/imx.c82
-rw-r--r--drivers/tty/serial/ip22zilog.c2
-rw-r--r--drivers/tty/serial/max310x.c2
-rw-r--r--drivers/tty/serial/mfd.c13
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c5
-rw-r--r--drivers/tty/serial/mpsc.c2
-rw-r--r--drivers/tty/serial/mrst_max3110.c45
-rw-r--r--drivers/tty/serial/mxs-auart.c9
-rw-r--r--drivers/tty/serial/omap-serial.c141
-rw-r--r--drivers/tty/serial/pch_uart.c3
-rw-r--r--drivers/tty/serial/pmac_zilog.c4
-rw-r--r--drivers/tty/serial/sa1100.c5
-rw-r--r--drivers/tty/serial/samsung.c22
-rw-r--r--drivers/tty/serial/samsung.h2
-rw-r--r--drivers/tty/serial/sccnxp.c1
-rw-r--r--drivers/tty/serial/serial-tegra.c2
-rw-r--r--drivers/tty/serial/serial_txx9.c2
-rw-r--r--drivers/tty/serial/sh-sci.c129
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c11
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h12
-rw-r--r--drivers/tty/serial/sunsab.c2
-rw-r--r--drivers/tty/serial/sunsu.c2
-rw-r--r--drivers/tty/serial/sunzilog.c6
-rw-r--r--drivers/tty/serial/ucc_uart.c4
-rw-r--r--drivers/tty/serial/xilinx_uartps.c551
-rw-r--r--drivers/tty/sysrq.c2
-rw-r--r--drivers/tty/tty_port.c10
-rw-r--r--drivers/tty/vt/vt.c33
-rw-r--r--drivers/uio/uio.c17
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c7
-rw-r--r--drivers/usb/core/Kconfig2
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c7
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c1
-rw-r--r--drivers/usb/gadget/lpc32xx_udc.c4
-rw-r--r--drivers/usb/gadget/storage_common.c4
-rw-r--r--drivers/usb/host/bcma-hcd.c3
-rw-r--r--drivers/usb/host/ehci-atmel.c7
-rw-r--r--drivers/usb/host/ehci-exynos.c7
-rw-r--r--drivers/usb/host/ehci-octeon.c4
-rw-r--r--drivers/usb/host/ehci-omap.c10
-rw-r--r--drivers/usb/host/ehci-orion.c7
-rw-r--r--drivers/usb/host/ehci-platform.c10
-rw-r--r--drivers/usb/host/ehci-ppc-of.c2
-rw-r--r--drivers/usb/host/ehci-spear.c7
-rw-r--r--drivers/usb/host/ehci-tegra.c7
-rw-r--r--drivers/usb/host/fhci-hcd.c2
-rw-r--r--drivers/usb/host/ohci-at91.c9
-rw-r--r--drivers/usb/host/ohci-exynos.c7
-rw-r--r--drivers/usb/host/ohci-nxp.c5
-rw-r--r--drivers/usb/host/ohci-octeon.c5
-rw-r--r--drivers/usb/host/ohci-omap3.c10
-rw-r--r--drivers/usb/host/ohci-ppc-of.c2
-rw-r--r--drivers/usb/host/ohci-pxa27x.c8
-rw-r--r--drivers/usb/host/ohci-s3c2410.c8
-rw-r--r--drivers/usb/host/ohci-sa1111.c6
-rw-r--r--drivers/usb/host/ohci-spear.c7
-rw-r--r--drivers/usb/host/ssb-hcd.c3
-rw-r--r--drivers/usb/host/uhci-platform.c7
-rw-r--r--drivers/usb/musb/ux500.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/pl2303.c274
-rw-r--r--drivers/uwb/umc-bus.c13
-rw-r--r--drivers/vhost/scsi.c2
-rw-r--r--drivers/video/68328fb.c9
-rw-r--r--drivers/video/Kconfig6
-rw-r--r--drivers/video/amba-clcd.c9
-rw-r--r--drivers/video/amifb.c6
-rw-r--r--drivers/video/arcfb.c8
-rw-r--r--drivers/video/arkfb.c49
-rw-r--r--drivers/video/asiliantfb.c4
-rw-r--r--drivers/video/atafb.c7
-rw-r--r--drivers/video/atmel_lcdfb.c344
-rw-r--r--drivers/video/aty/aty128fb.c8
-rw-r--r--drivers/video/aty/atyfb_base.c1
-rw-r--r--drivers/video/aty/radeon_base.c5
-rw-r--r--drivers/video/aty/radeon_pm.c22
-rw-r--r--drivers/video/aty/radeonfb.h1
-rw-r--r--drivers/video/au1100fb.c42
-rw-r--r--drivers/video/au1200fb.c39
-rw-r--r--drivers/video/backlight/atmel-pwm-bl.c9
-rw-r--r--drivers/video/backlight/backlight.c31
-rw-r--r--drivers/video/backlight/l4f00242t03.c1
-rw-r--r--drivers/video/backlight/tosa_lcd.c6
-rw-r--r--drivers/video/bf54x-lq043fb.c14
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c14
-rw-r--r--drivers/video/broadsheetfb.c19
-rw-r--r--drivers/video/bw2.c2
-rw-r--r--drivers/video/carminefb.c4
-rw-r--r--drivers/video/cfbimgblt.c2
-rw-r--r--drivers/video/cg14.c6
-rw-r--r--drivers/video/cg3.c2
-rw-r--r--drivers/video/cg6.c4
-rw-r--r--drivers/video/cirrusfb.c6
-rw-r--r--drivers/video/cobalt_lcdfb.c17
-rw-r--r--drivers/video/controlfb.c4
-rw-r--r--drivers/video/cyber2000fb.c75
-rw-r--r--drivers/video/da8xx-fb.c21
-rw-r--r--drivers/video/efifb.c7
-rw-r--r--drivers/video/ep93xx-fb.c2
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi_common.c3
-rw-r--r--drivers/video/fb-puv3.c5
-rw-r--r--drivers/video/ffb.c2
-rw-r--r--drivers/video/fm2fb.c2
-rw-r--r--drivers/video/fsl-diu-fb.c4
-rw-r--r--drivers/video/gbefb.c6
-rw-r--r--drivers/video/geode/gx1fb_core.c3
-rw-r--r--drivers/video/geode/gxfb_core.c3
-rw-r--r--drivers/video/geode/lxfb_core.c4
-rw-r--r--drivers/video/grvga.c16
-rw-r--r--drivers/video/gxt4500.c3
-rw-r--r--drivers/video/hecubafb.c19
-rw-r--r--drivers/video/hgafb.c3
-rw-r--r--drivers/video/hitfb.c3
-rw-r--r--drivers/video/hpfb.c3
-rw-r--r--drivers/video/hyperv_fb.c45
-rw-r--r--drivers/video/i740fb.c9
-rw-r--r--drivers/video/i810/i810_main.c1
-rw-r--r--drivers/video/igafb.c5
-rw-r--r--drivers/video/imsttfb.c4
-rw-r--r--drivers/video/imxfb.c6
-rw-r--r--drivers/video/intelfb/intelfbdrv.c2
-rw-r--r--drivers/video/jz4740_fb.c29
-rw-r--r--drivers/video/kyro/fbdev.c10
-rw-r--r--drivers/video/leo.c4
-rw-r--r--drivers/video/macfb.c3
-rw-r--r--drivers/video/matrox/matroxfb_DAC1064.c4
-rw-r--r--drivers/video/matrox/matroxfb_Ti3026.c2
-rw-r--r--drivers/video/matrox/matroxfb_base.c6
-rw-r--r--drivers/video/matrox/matroxfb_maven.c14
-rw-r--r--drivers/video/mb862xx/mb862xxfbdrv.c3
-rw-r--r--drivers/video/mbx/mbxfb.c4
-rw-r--r--drivers/video/metronomefb.c17
-rw-r--r--drivers/video/mmp/fb/mmpfb.c34
-rw-r--r--drivers/video/mmp/hw/mmp_ctrl.c71
-rw-r--r--drivers/video/mmp/hw/mmp_ctrl.h5
-rw-r--r--drivers/video/mx3fb.c4
-rw-r--r--drivers/video/neofb.c9
-rw-r--r--drivers/video/nuc900fb.c9
-rw-r--r--drivers/video/nvidia/nv_hw.c2
-rw-r--r--drivers/video/offb.c3
-rw-r--r--drivers/video/omap/hwa742.c2
-rw-r--r--drivers/video/omap/omapfb_main.c4
-rw-r--r--drivers/video/omap2/displays-new/Kconfig6
-rw-r--r--drivers/video/omap2/displays-new/Makefile1
-rw-r--r--drivers/video/omap2/displays-new/connector-dvi.c7
-rw-r--r--drivers/video/omap2/displays-new/panel-dsi-cm.c2
-rw-r--r--drivers/video/omap2/displays-new/panel-tpo-td028ttec1.c480
-rw-r--r--drivers/video/omap2/dss/Makefile3
-rw-r--r--drivers/video/omap2/dss/core.c4
-rw-r--r--drivers/video/omap2/dss/dispc.c10
-rw-r--r--drivers/video/omap2/dss/display.c2
-rw-r--r--drivers/video/omap2/dss/dsi.c12
-rw-r--r--drivers/video/omap2/dss/dss.h4
-rw-r--r--drivers/video/omap2/dss/dss_features.c44
-rw-r--r--drivers/video/omap2/dss/dss_features.h8
-rw-r--r--drivers/video/omap2/dss/hdmi.c1184
-rw-r--r--drivers/video/omap2/dss/hdmi.h444
-rw-r--r--drivers/video/omap2/dss/hdmi4.c696
-rw-r--r--drivers/video/omap2/dss/hdmi4_core.c (renamed from drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c)771
-rw-r--r--drivers/video/omap2/dss/hdmi4_core.h (renamed from drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h)303
-rw-r--r--drivers/video/omap2/dss/hdmi_common.c423
-rw-r--r--drivers/video/omap2/dss/hdmi_phy.c160
-rw-r--r--drivers/video/omap2/dss/hdmi_pll.c230
-rw-r--r--drivers/video/omap2/dss/hdmi_wp.c271
-rw-r--r--drivers/video/omap2/dss/ti_hdmi.h187
-rw-r--r--drivers/video/p9100.c2
-rw-r--r--drivers/video/platinumfb.c3
-rw-r--r--drivers/video/pm2fb.c5
-rw-r--r--drivers/video/pm3fb.c4
-rw-r--r--drivers/video/pmag-ba-fb.c4
-rw-r--r--drivers/video/pmagb-b-fb.c9
-rw-r--r--drivers/video/pvr2fb.c25
-rw-r--r--drivers/video/pxa168fb.c6
-rw-r--r--drivers/video/pxafb.c16
-rw-r--r--drivers/video/q40fb.c3
-rw-r--r--drivers/video/riva/fbdev.c5
-rw-r--r--drivers/video/s1d13xxxfb.c15
-rw-r--r--drivers/video/s3c-fb.c2
-rw-r--r--drivers/video/s3c2410fb.c6
-rw-r--r--drivers/video/s3fb.c63
-rw-r--r--drivers/video/sa1100fb.c4
-rw-r--r--drivers/video/savage/savagefb_driver.c6
-rw-r--r--drivers/video/sbuslib.c2
-rw-r--r--drivers/video/sgivwfb.c4
-rw-r--r--drivers/video/sh_mobile_hdmi.c13
-rw-r--r--drivers/video/simplefb.c24
-rw-r--r--drivers/video/sis/init.c5
-rw-r--r--drivers/video/sis/sis_main.c8
-rw-r--r--drivers/video/skeletonfb.c3
-rw-r--r--drivers/video/smscufx.c2
-rw-r--r--drivers/video/ssd1307fb.c2
-rw-r--r--drivers/video/sstfb.c8
-rw-r--r--drivers/video/stifb.c4
-rw-r--r--drivers/video/sunxvr1000.c2
-rw-r--r--drivers/video/svgalib.c4
-rw-r--r--drivers/video/sysimgblt.c2
-rw-r--r--drivers/video/tcx.c6
-rw-r--r--drivers/video/tdfxfb.c1
-rw-r--r--drivers/video/tgafb.c4
-rw-r--r--drivers/video/tmiofb.c13
-rw-r--r--drivers/video/tridentfb.c1
-rw-r--r--drivers/video/udlfb.c2
-rw-r--r--drivers/video/uvesafb.c25
-rw-r--r--drivers/video/valkyriefb.c2
-rw-r--r--drivers/video/vesafb.c3
-rw-r--r--drivers/video/vfb.c10
-rw-r--r--drivers/video/vga16fb.c3
-rw-r--r--drivers/video/vt8500lcdfb.c2
-rw-r--r--drivers/video/vt8623fb.c41
-rw-r--r--drivers/video/w100fb.c7
-rw-r--r--drivers/video/wm8505fb.c14
-rw-r--r--drivers/video/wmt_ge_rops.c4
-rw-r--r--drivers/video/xilinxfb.c61
-rw-r--r--drivers/virt/fsl_hypervisor.c1
-rw-r--r--drivers/virtio/virtio.c27
-rw-r--r--drivers/virtio/virtio_balloon.c14
-rw-r--r--drivers/virtio/virtio_mmio.c3
-rw-r--r--drivers/virtio/virtio_pci.c3
-rw-r--r--drivers/virtio/virtio_ring.c34
-rw-r--r--drivers/watchdog/Kconfig28
-rw-r--r--drivers/watchdog/Makefile5
-rw-r--r--drivers/watchdog/acquirewdt.c4
-rw-r--r--drivers/watchdog/advantechwdt.c1
-rw-r--r--drivers/watchdog/alim1535_wdt.c1
-rw-r--r--drivers/watchdog/alim7101_wdt.c1
-rw-r--r--drivers/watchdog/ar7_wdt.c1
-rw-r--r--drivers/watchdog/at32ap700x_wdt.c1
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c3
-rw-r--r--drivers/watchdog/at91sam9_wdt.c309
-rw-r--r--drivers/watchdog/ath79_wdt.c1
-rw-r--r--drivers/watchdog/bcm2835_wdt.c1
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c1
-rw-r--r--drivers/watchdog/bfin_wdt.c1
-rw-r--r--drivers/watchdog/cpu5wdt.c1
-rw-r--r--drivers/watchdog/davinci_wdt.c1
-rw-r--r--drivers/watchdog/dw_wdt.c36
-rw-r--r--drivers/watchdog/ep93xx_wdt.c1
-rw-r--r--drivers/watchdog/eurotechwdt.c1
-rw-r--r--drivers/watchdog/gef_wdt.c2
-rw-r--r--drivers/watchdog/geodewdt.c1
-rw-r--r--drivers/watchdog/hpwdt.c1
-rw-r--r--drivers/watchdog/i6300esb.c1
-rw-r--r--drivers/watchdog/iTCO_wdt.c5
-rw-r--r--drivers/watchdog/ib700wdt.c1
-rw-r--r--drivers/watchdog/ibmasr.c1
-rw-r--r--drivers/watchdog/ie6xx_wdt.c1
-rw-r--r--drivers/watchdog/imx2_wdt.c2
-rw-r--r--drivers/watchdog/indydog.c1
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c5
-rw-r--r--drivers/watchdog/iop_wdt.c1
-rw-r--r--drivers/watchdog/it8712f_wdt.c1
-rw-r--r--drivers/watchdog/it87_wdt.c1
-rw-r--r--drivers/watchdog/ixp4xx_wdt.c2
-rw-r--r--drivers/watchdog/jz4740_wdt.c1
-rw-r--r--drivers/watchdog/kempld_wdt.c5
-rw-r--r--drivers/watchdog/ks8695_wdt.c1
-rw-r--r--drivers/watchdog/lantiq_wdt.c1
-rw-r--r--drivers/watchdog/m54xx_wdt.c1
-rw-r--r--drivers/watchdog/machzwd.c1
-rw-r--r--drivers/watchdog/max63xx_wdt.c1
-rw-r--r--drivers/watchdog/mixcomwd.c1
-rw-r--r--drivers/watchdog/moxart_wdt.c165
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c2
-rw-r--r--drivers/watchdog/mtx-1_wdt.c1
-rw-r--r--drivers/watchdog/mv64x60_wdt.c3
-rw-r--r--drivers/watchdog/nuc900_wdt.c1
-rw-r--r--drivers/watchdog/nv_tco.c1
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c1
-rw-r--r--drivers/watchdog/omap_wdt.c2
-rw-r--r--drivers/watchdog/orion_wdt.c3
-rw-r--r--drivers/watchdog/pc87413_wdt.c2
-rw-r--r--drivers/watchdog/pcwd.c4
-rw-r--r--drivers/watchdog/pcwd_pci.c4
-rw-r--r--drivers/watchdog/pcwd_usb.c12
-rw-r--r--drivers/watchdog/pika_wdt.c2
-rw-r--r--drivers/watchdog/pnx4008_wdt.c1
-rw-r--r--drivers/watchdog/pnx833x_wdt.c1
-rw-r--r--drivers/watchdog/rc32434_wdt.c4
-rw-r--r--drivers/watchdog/rdc321x_wdt.c3
-rw-r--r--drivers/watchdog/rt2880_wdt.c207
-rw-r--r--drivers/watchdog/s3c2410_wdt.c2
-rw-r--r--drivers/watchdog/sa1100_wdt.c1
-rw-r--r--drivers/watchdog/sb_wdog.c1
-rw-r--r--drivers/watchdog/sbc60xxwdt.c1
-rw-r--r--drivers/watchdog/sbc7240_wdt.c2
-rw-r--r--drivers/watchdog/sbc8360.c1
-rw-r--r--drivers/watchdog/sbc_epx_c3.c1
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c2
-rw-r--r--drivers/watchdog/sc1200wdt.c1
-rw-r--r--drivers/watchdog/sc520_wdt.c1
-rw-r--r--drivers/watchdog/sch311x_wdt.c5
-rw-r--r--drivers/watchdog/scx200_wdt.c1
-rw-r--r--drivers/watchdog/shwdt.c1
-rw-r--r--drivers/watchdog/sirfsoc_wdt.c224
-rw-r--r--drivers/watchdog/smsc37b787_wdt.c2
-rw-r--r--drivers/watchdog/softdog.c1
-rw-r--r--drivers/watchdog/sp5100_tco.c1
-rw-r--r--drivers/watchdog/sp805_wdt.c1
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c5
-rw-r--r--drivers/watchdog/sunxi_wdt.c2
-rw-r--r--drivers/watchdog/ts72xx_wdt.c38
-rw-r--r--drivers/watchdog/txx9wdt.c1
-rw-r--r--drivers/watchdog/ux500_wdt.c3
-rw-r--r--drivers/watchdog/w83627hf_wdt.c1
-rw-r--r--drivers/watchdog/w83697hf_wdt.c1
-rw-r--r--drivers/watchdog/w83697ug_wdt.c1
-rw-r--r--drivers/watchdog/w83877f_wdt.c1
-rw-r--r--drivers/watchdog/w83977f_wdt.c1
-rw-r--r--drivers/watchdog/wafer5823wdt.c1
-rw-r--r--drivers/watchdog/watchdog_core.c2
-rw-r--r--drivers/watchdog/wdrtas.c2
-rw-r--r--drivers/watchdog/wdt.c2
-rw-r--r--drivers/watchdog/wdt285.c1
-rw-r--r--drivers/watchdog/wdt977.c1
-rw-r--r--drivers/watchdog/wdt_pci.c2
-rw-r--r--drivers/watchdog/wm831x_wdt.c8
-rw-r--r--drivers/watchdog/xen_wdt.c1
-rw-r--r--drivers/xen/Kconfig1
-rw-r--r--drivers/xen/grant-table.c19
-rw-r--r--drivers/xen/swiotlb-xen.c118
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c24
-rw-r--r--drivers/xen/xenbus/xenbus_probe.h2
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c2
-rw-r--r--fs/9p/cache.c9
-rw-r--r--fs/9p/cache.h12
-rw-r--r--fs/9p/v9fs.c9
-rw-r--r--fs/9p/v9fs.h1
-rw-r--r--fs/9p/v9fs_vfs.h2
-rw-r--r--fs/9p/vfs_addr.c19
-rw-r--r--fs/9p/vfs_file.c154
-rw-r--r--fs/9p/vfs_inode.c32
-rw-r--r--fs/9p/vfs_inode_dotl.c21
-rw-r--r--fs/9p/vfs_super.c8
-rw-r--r--fs/9p/xattr.c10
-rw-r--r--fs/Makefile2
-rw-r--r--fs/adfs/file.c4
-rw-r--r--fs/affs/file.c4
-rw-r--r--fs/afs/cell.c2
-rw-r--r--fs/afs/file.c4
-rw-r--r--fs/afs/inode.c2
-rw-r--r--fs/afs/internal.h3
-rw-r--r--fs/afs/vlocation.c3
-rw-r--r--fs/afs/volume.c2
-rw-r--r--fs/afs/write.c9
-rw-r--r--fs/aio.c136
-rw-r--r--fs/bad_inode.c14
-rw-r--r--fs/befs/linuxvfs.c61
-rw-r--r--fs/bfs/file.c4
-rw-r--r--fs/bio-integrity.c191
-rw-r--r--fs/bio.c437
-rw-r--r--fs/block_dev.c27
-rw-r--r--fs/btrfs/Kconfig3
-rw-r--r--fs/btrfs/check-integrity.c8
-rw-r--r--fs/btrfs/compression.c17
-rw-r--r--fs/btrfs/ctree.h5
-rw-r--r--fs/btrfs/extent_io.c16
-rw-r--r--fs/btrfs/file-item.c19
-rw-r--r--fs/btrfs/file.c42
-rw-r--r--fs/btrfs/inode.c85
-rw-r--r--fs/btrfs/ioctl.c8
-rw-r--r--fs/btrfs/raid56.c22
-rw-r--r--fs/btrfs/scrub.c12
-rw-r--r--fs/btrfs/volumes.c12
-rw-r--r--fs/buffer.c12
-rw-r--r--fs/cachefiles/interface.c2
-rw-r--r--fs/ceph/addr.c3
-rw-r--r--fs/ceph/cache.c7
-rw-r--r--fs/ceph/caps.c27
-rw-r--r--fs/ceph/dir.c11
-rw-r--r--fs/ceph/file.c435
-rw-r--r--fs/ceph/inode.c59
-rw-r--r--fs/ceph/mds_client.c53
-rw-r--r--fs/ceph/mds_client.h1
-rw-r--r--fs/ceph/super.c1
-rw-r--r--fs/ceph/super.h9
-rw-r--r--fs/char_dev.c3
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h32
-rw-r--r--fs/cifs/cifspdu.h34
-rw-r--r--fs/cifs/cifsproto.h2
-rw-r--r--fs/cifs/cifssmb.c54
-rw-r--r--fs/cifs/connect.c25
-rw-r--r--fs/cifs/file.c4
-rw-r--r--fs/cifs/fscache.c8
-rw-r--r--fs/cifs/ioctl.c160
-rw-r--r--fs/cifs/link.c7
-rw-r--r--fs/cifs/misc.c22
-rw-r--r--fs/cifs/smb1ops.c12
-rw-r--r--fs/cifs/smb2ops.c196
-rw-r--r--fs/cifs/smb2pdu.c115
-rw-r--r--fs/cifs/smb2pdu.h43
-rw-r--r--fs/cifs/smb2proto.h4
-rw-r--r--fs/cifs/smb2transport.c12
-rw-r--r--fs/cifs/transport.c13
-rw-r--r--fs/dcache.c46
-rw-r--r--fs/direct-io.c227
-rw-r--r--fs/dlm/lockspace.c4
-rw-r--r--fs/ecryptfs/crypto.c2
-rw-r--r--fs/ecryptfs/dentry.c29
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h19
-rw-r--r--fs/ecryptfs/file.c15
-rw-r--r--fs/ecryptfs/inode.c16
-rw-r--r--fs/ecryptfs/keystore.c3
-rw-r--r--fs/ecryptfs/main.c3
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/exec.c1
-rw-r--r--fs/exofs/file.c4
-rw-r--r--fs/ext2/file.c4
-rw-r--r--fs/ext2/inode.c8
-rw-r--r--fs/ext3/dir.c36
-rw-r--r--fs/ext3/file.c4
-rw-r--r--fs/ext3/inode.c15
-rw-r--r--fs/ext3/super.c4
-rw-r--r--fs/ext4/ext4.h9
-rw-r--r--fs/ext4/file.c34
-rw-r--r--fs/ext4/indirect.c16
-rw-r--r--fs/ext4/inode.c52
-rw-r--r--fs/ext4/page-io.c9
-rw-r--r--fs/ext4/super.c152
-rw-r--r--fs/f2fs/Kconfig8
-rw-r--r--fs/f2fs/acl.c36
-rw-r--r--fs/f2fs/acl.h9
-rw-r--r--fs/f2fs/checkpoint.c68
-rw-r--r--fs/f2fs/data.c35
-rw-r--r--fs/f2fs/dir.c4
-rw-r--r--fs/f2fs/f2fs.h117
-rw-r--r--fs/f2fs/file.c49
-rw-r--r--fs/f2fs/gc.c31
-rw-r--r--fs/f2fs/inode.c62
-rw-r--r--fs/f2fs/namei.c52
-rw-r--r--fs/f2fs/node.c142
-rw-r--r--fs/f2fs/recovery.c45
-rw-r--r--fs/f2fs/segment.c96
-rw-r--r--fs/f2fs/segment.h38
-rw-r--r--fs/f2fs/super.c132
-rw-r--r--fs/f2fs/xattr.c36
-rw-r--r--fs/fat/file.c4
-rw-r--r--fs/fat/inode.c10
-rw-r--r--fs/file_table.c4
-rw-r--r--fs/fscache/cookie.c193
-rw-r--r--fs/fscache/fsdef.c1
-rw-r--r--fs/fscache/netfs.c1
-rw-r--r--fs/fscache/object.c9
-rw-r--r--fs/fscache/page.c59
-rw-r--r--fs/fuse/cuse.c15
-rw-r--r--fs/fuse/dir.c42
-rw-r--r--fs/fuse/file.c451
-rw-r--r--fs/fuse/fuse_i.h9
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/gfs2/aops.c11
-rw-r--r--fs/gfs2/bmap.c7
-rw-r--r--fs/gfs2/file.c31
-rw-r--r--fs/gfs2/glock.c83
-rw-r--r--fs/gfs2/glock.h2
-rw-r--r--fs/gfs2/glops.c4
-rw-r--r--fs/gfs2/incore.h41
-rw-r--r--fs/gfs2/inode.c53
-rw-r--r--fs/gfs2/lops.c2
-rw-r--r--fs/gfs2/main.c19
-rw-r--r--fs/gfs2/ops_fstype.c4
-rw-r--r--fs/gfs2/quota.c321
-rw-r--r--fs/gfs2/quota.h9
-rw-r--r--fs/gfs2/rgrp.c208
-rw-r--r--fs/gfs2/rgrp.h4
-rw-r--r--fs/gfs2/super.c2
-rw-r--r--fs/gfs2/sys.c2
-rw-r--r--fs/gfs2/util.c20
-rw-r--r--fs/gfs2/util.h2
-rw-r--r--fs/gfs2/xattr.c3
-rw-r--r--fs/hfs/inode.c11
-rw-r--r--fs/hfsplus/inode.c10
-rw-r--r--fs/hfsplus/wrapper.c2
-rw-r--r--fs/hostfs/hostfs_kern.c4
-rw-r--r--fs/hpfs/file.c4
-rw-r--r--fs/internal.h4
-rw-r--r--fs/iov-iter.c411
-rw-r--r--fs/jbd/transaction.c8
-rw-r--r--fs/jffs2/file.c8
-rw-r--r--fs/jffs2/fs.c4
-rw-r--r--fs/jfs/file.c4
-rw-r--r--fs/jfs/inode.c7
-rw-r--r--fs/jfs/jfs_inode.c3
-rw-r--r--fs/jfs/jfs_logmgr.c12
-rw-r--r--fs/jfs/jfs_metapage.c9
-rw-r--r--fs/libfs.c79
-rw-r--r--fs/logfs/dev_bdev.c20
-rw-r--r--fs/logfs/dev_mtd.c2
-rw-r--r--fs/logfs/file.c4
-rw-r--r--fs/logfs/super.c6
-rw-r--r--fs/minix/Kconfig2
-rw-r--r--fs/minix/file.c4
-rw-r--r--fs/mpage.c2
-rw-r--r--fs/namei.c3
-rw-r--r--fs/namespace.c22
-rw-r--r--fs/ncpfs/dir.c55
-rw-r--r--fs/ncpfs/file.c12
-rw-r--r--fs/nfs/Kconfig11
-rw-r--r--fs/nfs/blocklayout/blocklayout.c9
-rw-r--r--fs/nfs/callback.c3
-rw-r--r--fs/nfs/client.c10
-rw-r--r--fs/nfs/dir.c129
-rw-r--r--fs/nfs/direct.c316
-rw-r--r--fs/nfs/file.c146
-rw-r--r--fs/nfs/fscache.c202
-rw-r--r--fs/nfs/fscache.h18
-rw-r--r--fs/nfs/inode.c8
-rw-r--r--fs/nfs/internal.h12
-rw-r--r--fs/nfs/namespace.c5
-rw-r--r--fs/nfs/nfs3proc.c8
-rw-r--r--fs/nfs/nfs4_fs.h17
-rw-r--r--fs/nfs/nfs4client.c138
-rw-r--r--fs/nfs/nfs4file.c10
-rw-r--r--fs/nfs/nfs4namespace.c125
-rw-r--r--fs/nfs/nfs4proc.c447
-rw-r--r--fs/nfs/nfs4state.c264
-rw-r--r--fs/nfs/nfs4super.c12
-rw-r--r--fs/nfs/nfs4xdr.c113
-rw-r--r--fs/nfs/proc.c8
-rw-r--r--fs/nfs/super.c198
-rw-r--r--fs/nfs/unlink.c12
-rw-r--r--fs/nfs/write.c6
-rw-r--r--fs/nfsd/Kconfig2
-rw-r--r--fs/nfsd/export.c24
-rw-r--r--fs/nfsd/nfs4recover.c12
-rw-r--r--fs/nfsd/nfs4state.c43
-rw-r--r--fs/nfsd/nfs4xdr.c101
-rw-r--r--fs/nfsd/nfsfh.c36
-rw-r--r--fs/nfsd/nfsfh.h4
-rw-r--r--fs/nfsd/vfs.c9
-rw-r--r--fs/nilfs2/file.c4
-rw-r--r--fs/nilfs2/inode.c8
-rw-r--r--fs/nilfs2/segbuf.c3
-rw-r--r--fs/ocfs2/aops.c8
-rw-r--r--fs/ocfs2/aops.h2
-rw-r--r--fs/ocfs2/cluster/heartbeat.c2
-rw-r--r--fs/ocfs2/file.c55
-rw-r--r--fs/ocfs2/ocfs2_trace.h6
-rw-r--r--fs/omfs/file.c4
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/proc/proc_devtree.c3
-rw-r--r--fs/proc/self.c10
-rw-r--r--fs/quota/quota.c1
-rw-r--r--fs/ramfs/file-mmu.c4
-rw-r--r--fs/ramfs/file-nommu.c4
-rw-r--r--fs/read_write.c78
-rw-r--r--fs/reiserfs/file.c4
-rw-r--r--fs/reiserfs/inode.c7
-rw-r--r--fs/romfs/mmap-nommu.c2
-rw-r--r--fs/select.c3
-rw-r--r--fs/seq_file.c2
-rw-r--r--fs/squashfs/Kconfig13
-rw-r--r--fs/squashfs/Makefile7
-rw-r--r--fs/squashfs/block.c11
-rw-r--r--fs/squashfs/decompressor.c47
-rw-r--r--fs/squashfs/decompressor.h21
-rw-r--r--fs/squashfs/decompressor_multi.c200
-rw-r--r--fs/squashfs/decompressor_single.c86
-rw-r--r--fs/squashfs/lzo_wrapper.c24
-rw-r--r--fs/squashfs/squashfs.h9
-rw-r--r--fs/squashfs/squashfs_fs_sb.h3
-rw-r--r--fs/squashfs/super.c10
-rw-r--r--fs/squashfs/xz_wrapper.c89
-rw-r--r--fs/squashfs/zlib_wrapper.c50
-rw-r--r--fs/sysfs/Makefile3
-rw-r--r--fs/sysfs/bin.c502
-rw-r--r--fs/sysfs/dir.c424
-rw-r--r--fs/sysfs/file.c872
-rw-r--r--fs/sysfs/group.c33
-rw-r--r--fs/sysfs/inode.c30
-rw-r--r--fs/sysfs/mount.c24
-rw-r--r--fs/sysfs/symlink.c71
-rw-r--r--fs/sysfs/sysfs.h96
-rw-r--r--fs/sysv/file.c4
-rw-r--r--fs/ubifs/debug.c6
-rw-r--r--fs/ubifs/dir.c41
-rw-r--r--fs/ubifs/file.c12
-rw-r--r--fs/ubifs/gc.c3
-rw-r--r--fs/ubifs/journal.c6
-rw-r--r--fs/ubifs/super.c8
-rw-r--r--fs/ubifs/xattr.c16
-rw-r--r--fs/udf/file.c13
-rw-r--r--fs/udf/inode.c10
-rw-r--r--fs/udf/super.c45
-rw-r--r--fs/ufs/file.c4
-rw-r--r--fs/xfs/Makefile8
-rw-r--r--fs/xfs/xfs_acl.c8
-rw-r--r--fs/xfs/xfs_ag.h4
-rw-r--r--fs/xfs/xfs_alloc.c14
-rw-r--r--fs/xfs/xfs_alloc.h3
-rw-r--r--fs/xfs/xfs_alloc_btree.c14
-rw-r--r--fs/xfs/xfs_alloc_btree.h35
-rw-r--r--fs/xfs/xfs_aops.c31
-rw-r--r--fs/xfs/xfs_attr.c12
-rw-r--r--fs/xfs/xfs_attr_inactive.c21
-rw-r--r--fs/xfs/xfs_attr_leaf.c29
-rw-r--r--fs/xfs/xfs_attr_leaf.h232
-rw-r--r--fs/xfs/xfs_attr_list.c32
-rw-r--r--fs/xfs/xfs_attr_remote.c14
-rw-r--r--fs/xfs/xfs_attr_remote.h29
-rw-r--r--fs/xfs/xfs_bit.c4
-rw-r--r--fs/xfs/xfs_bmap.c22
-rw-r--r--fs/xfs/xfs_bmap_btree.c13
-rw-r--r--fs/xfs/xfs_bmap_btree.h105
-rw-r--r--fs/xfs/xfs_bmap_util.c293
-rw-r--r--fs/xfs/xfs_bmap_util.h9
-rw-r--r--fs/xfs/xfs_btree.c12
-rw-r--r--fs/xfs/xfs_btree.h79
-rw-r--r--fs/xfs/xfs_buf.c15
-rw-r--r--fs/xfs/xfs_buf_item.c9
-rw-r--r--fs/xfs/xfs_buf_item.h4
-rw-r--r--fs/xfs/xfs_da_btree.c264
-rw-r--r--fs/xfs/xfs_da_btree.h143
-rw-r--r--fs/xfs/xfs_da_format.c907
-rw-r--r--fs/xfs/xfs_da_format.h (renamed from fs/xfs/xfs_dir2_format.h)681
-rw-r--r--fs/xfs/xfs_dir2.c20
-rw-r--r--fs/xfs/xfs_dir2.h106
-rw-r--r--fs/xfs/xfs_dir2_block.c109
-rw-r--r--fs/xfs/xfs_dir2_data.c161
-rw-r--r--fs/xfs/xfs_dir2_leaf.c243
-rw-r--r--fs/xfs/xfs_dir2_node.c351
-rw-r--r--fs/xfs/xfs_dir2_priv.h20
-rw-r--r--fs/xfs/xfs_dir2_readdir.c42
-rw-r--r--fs/xfs/xfs_dir2_sf.c216
-rw-r--r--fs/xfs/xfs_discard.c11
-rw-r--r--fs/xfs/xfs_dquot.c133
-rw-r--r--fs/xfs/xfs_dquot.h2
-rw-r--r--fs/xfs/xfs_dquot_buf.c288
-rw-r--r--fs/xfs/xfs_dquot_item.c14
-rw-r--r--fs/xfs/xfs_error.c11
-rw-r--r--fs/xfs/xfs_export.c12
-rw-r--r--fs/xfs/xfs_extent_busy.c11
-rw-r--r--fs/xfs/xfs_extent_busy.h4
-rw-r--r--fs/xfs/xfs_extfree_item.c8
-rw-r--r--fs/xfs/xfs_file.c143
-rw-r--r--fs/xfs/xfs_filestream.c12
-rw-r--r--fs/xfs/xfs_format.h263
-rw-r--r--fs/xfs/xfs_fs.h4
-rw-r--r--fs/xfs/xfs_fsops.c45
-rw-r--r--fs/xfs/xfs_ialloc.c14
-rw-r--r--fs/xfs/xfs_ialloc.h5
-rw-r--r--fs/xfs/xfs_ialloc_btree.c13
-rw-r--r--fs/xfs/xfs_ialloc_btree.h51
-rw-r--r--fs/xfs/xfs_icache.c20
-rw-r--r--fs/xfs/xfs_icreate_item.c7
-rw-r--r--fs/xfs/xfs_inode.c266
-rw-r--r--fs/xfs/xfs_inode.h6
-rw-r--r--fs/xfs/xfs_inode_buf.c10
-rw-r--r--fs/xfs/xfs_inode_buf.h3
-rw-r--r--fs/xfs/xfs_inode_fork.c31
-rw-r--r--fs/xfs/xfs_inode_fork.h1
-rw-r--r--fs/xfs/xfs_inode_item.c12
-rw-r--r--fs/xfs/xfs_ioctl.c146
-rw-r--r--fs/xfs/xfs_ioctl32.c7
-rw-r--r--fs/xfs/xfs_iomap.c23
-rw-r--r--fs/xfs/xfs_iomap.h8
-rw-r--r--fs/xfs/xfs_iops.c70
-rw-r--r--fs/xfs/xfs_iops.h8
-rw-r--r--fs/xfs/xfs_itable.c15
-rw-r--r--fs/xfs/xfs_log.c74
-rw-r--r--fs/xfs/xfs_log.h10
-rw-r--r--fs/xfs/xfs_log_cil.c26
-rw-r--r--fs/xfs/xfs_log_format.h177
-rw-r--r--fs/xfs/xfs_log_priv.h17
-rw-r--r--fs/xfs/xfs_log_recover.c171
-rw-r--r--fs/xfs/xfs_log_rlimit.c9
-rw-r--r--fs/xfs/xfs_message.c5
-rw-r--r--fs/xfs/xfs_mount.c21
-rw-r--r--fs/xfs/xfs_mount.h3
-rw-r--r--fs/xfs/xfs_qm.c39
-rw-r--r--fs/xfs/xfs_qm.h2
-rw-r--r--fs/xfs/xfs_qm_bhv.c12
-rw-r--r--fs/xfs/xfs_qm_syscalls.c28
-rw-r--r--fs/xfs/xfs_quota.h4
-rw-r--r--fs/xfs/xfs_quota_defs.h4
-rw-r--r--fs/xfs/xfs_quotaops.c5
-rw-r--r--fs/xfs/xfs_rtalloc.c1552
-rw-r--r--fs/xfs/xfs_rtalloc.h24
-rw-r--r--fs/xfs/xfs_rtbitmap.c974
-rw-r--r--fs/xfs/xfs_sb.c46
-rw-r--r--fs/xfs/xfs_sb.h3
-rw-r--r--fs/xfs/xfs_shared.h244
-rw-r--r--fs/xfs/xfs_super.c38
-rw-r--r--fs/xfs/xfs_symlink.c102
-rw-r--r--fs/xfs/xfs_symlink.h2
-rw-r--r--fs/xfs/xfs_symlink_remote.c6
-rw-r--r--fs/xfs/xfs_trace.c16
-rw-r--r--fs/xfs/xfs_trans.c23
-rw-r--r--fs/xfs/xfs_trans.h20
-rw-r--r--fs/xfs/xfs_trans_ail.c7
-rw-r--r--fs/xfs/xfs_trans_buf.c12
-rw-r--r--fs/xfs/xfs_trans_dquot.c15
-rw-r--r--fs/xfs/xfs_trans_extfree.c7
-rw-r--r--fs/xfs/xfs_trans_inode.c13
-rw-r--r--fs/xfs/xfs_trans_priv.h1
-rw-r--r--fs/xfs/xfs_trans_resv.c18
-rw-r--r--fs/xfs/xfs_vnode.h8
-rw-r--r--fs/xfs/xfs_xattr.c8
-rw-r--r--include/acpi/acconfig.h2
-rw-r--r--include/acpi/acexcep.h8
-rw-r--r--include/acpi/acnames.h26
-rw-r--r--include/acpi/acpi_bus.h3
-rw-r--r--include/acpi/acpiosxf.h155
-rw-r--r--include/acpi/acpixf.h93
-rw-r--r--include/acpi/actbl.h19
-rw-r--r--include/acpi/actbl1.h14
-rw-r--r--include/acpi/actbl2.h4
-rw-r--r--include/acpi/actypes.h77
-rw-r--r--include/acpi/ghes.h2
-rw-r--r--include/acpi/platform/acenv.h8
-rw-r--r--include/acpi/platform/aclinux.h134
-rw-r--r--include/acpi/processor.h4
-rw-r--r--include/asm-generic/memory_model.h2
-rw-r--r--include/asm-generic/preempt.h105
-rw-r--r--include/asm-generic/simd.h14
-rw-r--r--include/asm-generic/vmlinux.lds.h1
-rw-r--r--include/clocksource/arm_arch_timer.h10
-rw-r--r--include/crypto/ablk_helper.h (renamed from arch/x86/include/asm/crypto/ablk_helper.h)0
-rw-r--r--include/crypto/algapi.h18
-rw-r--r--include/crypto/authenc.h12
-rw-r--r--include/crypto/hash_info.h40
-rw-r--r--include/crypto/public_key.h25
-rw-r--r--include/drm/drmP.h63
-rw-r--r--include/drm/drm_crtc.h37
-rw-r--r--include/drm/drm_crtc_helper.h2
-rw-r--r--include/drm/drm_dp_helper.h31
-rw-r--r--include/dt-bindings/mfd/as3722.h52
-rw-r--r--include/dt-bindings/mfd/dbx500-prcmu.h83
-rw-r--r--include/dt-bindings/pinctrl/am43xx.h31
-rw-r--r--include/dt-bindings/pinctrl/dra.h50
-rw-r--r--include/keys/big_key-type.h25
-rw-r--r--include/keys/keyring-type.h17
-rw-r--r--include/keys/system_keyring.h23
-rw-r--r--include/linux/acpi.h94
-rw-r--r--include/linux/aio.h25
-rw-r--r--include/linux/amba/bus.h2
-rw-r--r--include/linux/amba/serial.h2
-rw-r--r--include/linux/assoc_array.h92
-rw-r--r--include/linux/assoc_array_priv.h182
-rw-r--r--include/linux/ata.h7
-rw-r--r--include/linux/atmel_serial.h1
-rw-r--r--include/linux/backing-dev.h4
-rw-r--r--include/linux/backlight.h4
-rw-r--r--include/linux/bio.h290
-rw-r--r--include/linux/bitops.h11
-rw-r--r--include/linux/blk-mq.h183
-rw-r--r--include/linux/blk_types.h102
-rw-r--r--include/linux/blkdev.h69
-rw-r--r--include/linux/blktrace_api.h4
-rw-r--r--include/linux/ceph/messenger.h4
-rw-r--r--include/linux/cgroup.h37
-rw-r--r--include/linux/clk/mxs.h2
-rw-r--r--include/linux/clk/sunxi.h22
-rw-r--r--include/linux/clockchips.h1
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/cper.h13
-rw-r--r--include/linux/cpu.h16
-rw-r--r--include/linux/cpufreq.h83
-rw-r--r--include/linux/cpuidle.h8
-rw-r--r--include/linux/crc32.h40
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/debugfs.h12
-rw-r--r--include/linux/devfreq.h8
-rw-r--r--include/linux/device.h64
-rw-r--r--include/linux/dm-io.h4
-rw-r--r--include/linux/dma-mapping.h31
-rw-r--r--include/linux/dmaengine.h12
-rw-r--r--include/linux/dmi.h5
-rw-r--r--include/linux/edac.h2
-rw-r--r--include/linux/efi.h58
-rw-r--r--include/linux/etherdevice.h35
-rw-r--r--include/linux/fb.h12
-rw-r--r--include/linux/fcdevice.h2
-rw-r--r--include/linux/fddidevice.h7
-rw-r--r--include/linux/filter.h15
-rw-r--r--include/linux/fs.h171
-rw-r--r--include/linux/fscache-cache.h50
-rw-r--r--include/linux/fscache.h113
-rw-r--r--include/linux/ftrace.h1
-rw-r--r--include/linux/hardirq.h8
-rw-r--r--include/linux/hashtable.h15
-rw-r--r--include/linux/hippidevice.h10
-rw-r--r--include/linux/host1x.h284
-rw-r--r--include/linux/hwmon-vid.h2
-rw-r--r--include/linux/hwmon.h10
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/ide.h2
-rw-r--r--include/linux/ieee80211.h4
-rw-r--r--include/linux/inetdevice.h28
-rw-r--r--include/linux/interrupt.h11
-rw-r--r--include/linux/iommu.h2
-rw-r--r--include/linux/ipc_namespace.h6
-rw-r--r--include/linux/ipv6.h76
-rw-r--r--include/linux/irqchip/arm-gic.h7
-rw-r--r--include/linux/jump_label.h10
-rw-r--r--include/linux/jump_label_ratelimit.h2
-rw-r--r--include/linux/kdb.h1
-rw-r--r--include/linux/key-type.h6
-rw-r--r--include/linux/key.h52
-rw-r--r--include/linux/kgdb.h1
-rw-r--r--include/linux/kobj_completion.h18
-rw-r--r--include/linux/kobject.h1
-rw-r--r--include/linux/kvm_host.h42
-rw-r--r--include/linux/lockref.h6
-rw-r--r--include/linux/mempolicy.h1
-rw-r--r--include/linux/mfd/arizona/registers.h2
-rw-r--r--include/linux/mfd/as3722.h423
-rw-r--r--include/linux/mfd/core.h6
-rw-r--r--include/linux/mfd/da9052/da9052.h20
-rw-r--r--include/linux/mfd/dbx500-prcmu.h70
-rw-r--r--include/linux/mfd/max77693-private.h1
-rw-r--r--include/linux/mfd/max77693.h2
-rw-r--r--include/linux/mfd/mc13xxx.h7
-rw-r--r--include/linux/mfd/rtsx_pci.h53
-rw-r--r--include/linux/mfd/si476x-core.h2
-rw-r--r--include/linux/mfd/stw481x.h56
-rw-r--r--include/linux/mfd/syscon.h25
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h13
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h20
-rw-r--r--include/linux/mfd/wm8994/core.h47
-rw-r--r--include/linux/migrate.h7
-rw-r--r--include/linux/miscdevice.h1
-rw-r--r--include/linux/mlx4/cmd.h6
-rw-r--r--include/linux/mlx4/device.h22
-rw-r--r--include/linux/mlx5/device.h13
-rw-r--r--include/linux/mlx5/driver.h18
-rw-r--r--include/linux/mm.h132
-rw-r--r--include/linux/mm_types.h41
-rw-r--r--include/linux/mmc/card.h7
-rw-r--r--include/linux/mmc/core.h4
-rw-r--r--include/linux/mmc/dw_mmc.h4
-rw-r--r--include/linux/mmc/host.h5
-rw-r--r--include/linux/module.h3
-rw-r--r--include/linux/mtd/bbm.h2
-rw-r--r--include/linux/mtd/map.h4
-rw-r--r--include/linux/mtd/mtd.h8
-rw-r--r--include/linux/mtd/nand.h16
-rw-r--r--include/linux/net.h109
-rw-r--r--include/linux/netdev_features.h4
-rw-r--r--include/linux/netdevice.h481
-rw-r--r--include/linux/netfilter.h24
-rw-r--r--include/linux/netfilter/ipset/ip_set.h151
-rw-r--r--include/linux/netfilter/ipset/ip_set_comment.h57
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h2
-rw-r--r--include/linux/netfilter/nf_conntrack_h323.h14
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_sip.h162
-rw-r--r--include/linux/netfilter/nfnetlink.h29
-rw-r--r--include/linux/netfilter/nfnetlink_acct.h6
-rw-r--r--include/linux/netfilter/x_tables.h128
-rw-r--r--include/linux/netfilter_bridge.h4
-rw-r--r--include/linux/netfilter_ipv4.h6
-rw-r--r--include/linux/netfilter_ipv6.h10
-rw-r--r--include/linux/netpoll.h5
-rw-r--r--include/linux/nfs4.h4
-rw-r--r--include/linux/nfs_fs.h21
-rw-r--r--include/linux/nfs_fs_sb.h10
-rw-r--r--include/linux/nfs_xdr.h24
-rw-r--r--include/linux/of.h35
-rw-r--r--include/linux/of_address.h39
-rw-r--r--include/linux/of_fdt.h19
-rw-r--r--include/linux/of_irq.h37
-rw-r--r--include/linux/of_mtd.h21
-rw-r--r--include/linux/of_pci.h5
-rw-r--r--include/linux/opp.h134
-rw-r--r--include/linux/padata.h3
-rw-r--r--include/linux/page-flags-layout.h28
-rw-r--r--include/linux/page-flags.h4
-rw-r--r--include/linux/pci.h10
-rw-r--r--include/linux/percpu.h40
-rw-r--r--include/linux/percpu_ida.h23
-rw-r--r--include/linux/perf_event.h5
-rw-r--r--include/linux/platform_data/at24.h (renamed from include/linux/i2c/at24.h)2
-rw-r--r--include/linux/platform_data/clk-nomadik.h2
-rw-r--r--include/linux/platform_data/clk-ux500.h3
-rw-r--r--include/linux/platform_data/davinci_asp.h2
-rw-r--r--include/linux/platform_data/dma-s3c24xx.h46
-rw-r--r--include/linux/platform_data/edma.h8
-rw-r--r--include/linux/platform_data/gpio-davinci.h60
-rw-r--r--include/linux/platform_data/leds-lp55xx.h7
-rw-r--r--include/linux/platform_data/leds-pca9685.h35
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h5
-rw-r--r--include/linux/platform_data/mtd-nand-omap2.h18
-rw-r--r--include/linux/platform_data/pinctrl-single.h12
-rw-r--r--include/linux/platform_data/zforce_ts.h (renamed from arch/arm/mach-tegra/board-paz00.h)21
-rw-r--r--include/linux/platform_device.h1
-rw-r--r--include/linux/pm_opp.h139
-rw-r--r--include/linux/power/bq24735-charger.h (renamed from arch/arm/mach-versatile/include/mach/timex.h)28
-rw-r--r--include/linux/powercap.h325
-rw-r--r--include/linux/preempt.h112
-rw-r--r--include/linux/rculist.h23
-rw-r--r--include/linux/rcupdate.h24
-rw-r--r--include/linux/rcutiny.h17
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/linux/regmap.h53
-rw-r--r--include/linux/regulator/consumer.h79
-rw-r--r--include/linux/regulator/driver.h18
-rw-r--r--include/linux/regulator/machine.h7
-rw-r--r--include/linux/rtnetlink.h2
-rw-r--r--include/linux/sched.h167
-rw-r--r--include/linux/sched/sysctl.h3
-rw-r--r--include/linux/sched_clock.h4
-rw-r--r--include/linux/security.h26
-rw-r--r--include/linux/serial_core.h1
-rw-r--r--include/linux/serial_sci.h6
-rw-r--r--include/linux/sfi.h3
-rw-r--r--include/linux/skbuff.h282
-rw-r--r--include/linux/slab.h9
-rw-r--r--include/linux/slab_def.h4
-rw-r--r--include/linux/spi/rspi.h2
-rw-r--r--include/linux/spi/spi.h61
-rw-r--r--include/linux/ssb/ssb_driver_gige.h14
-rw-r--r--include/linux/stop_machine.h1
-rw-r--r--include/linux/sunrpc/clnt.h6
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--include/linux/sunrpc/xprt.h2
-rw-r--r--include/linux/sysfs.h88
-rw-r--r--include/linux/sysrq.h3
-rw-r--r--include/linux/tegra-powergate.h9
-rw-r--r--include/linux/thinkpad_acpi.h15
-rw-r--r--include/linux/thread_info.h17
-rw-r--r--include/linux/topology.h6
-rw-r--r--include/linux/tty.h29
-rw-r--r--include/linux/uaccess.h8
-rw-r--r--include/linux/uprobes.h6
-rw-r--r--include/linux/usb/cdc_ncm.h30
-rw-r--r--include/linux/user_namespace.h6
-rw-r--r--include/linux/virtio.h6
-rw-r--r--include/linux/virtio_config.h161
-rw-r--r--include/linux/virtio_ring.h2
-rw-r--r--include/linux/wait.h392
-rw-r--r--include/linux/yam.h2
-rw-r--r--include/media/lm3560.h97
-rw-r--r--include/media/soc_camera.h27
-rw-r--r--include/media/v4l2-clk.h17
-rw-r--r--include/media/v4l2-common.h4
-rw-r--r--include/media/v4l2-ctrls.h2
-rw-r--r--include/media/v4l2-fh.h2
-rw-r--r--include/media/v4l2-subdev.h19
-rw-r--r--include/media/videobuf2-core.h4
-rw-r--r--include/media/videobuf2-dma-sg.h10
-rw-r--r--include/net/bluetooth/bluetooth.h27
-rw-r--r--include/net/bluetooth/hci.h155
-rw-r--r--include/net/bluetooth/hci_core.h193
-rw-r--r--include/net/bluetooth/l2cap.h37
-rw-r--r--include/net/bluetooth/mgmt.h18
-rw-r--r--include/net/bluetooth/rfcomm.h6
-rw-r--r--include/net/bluetooth/sco.h5
-rw-r--r--include/net/caif/caif_hsi.h2
-rw-r--r--include/net/cfg80211.h18
-rw-r--r--include/net/checksum.h11
-rw-r--r--include/net/cipso_ipv4.h6
-rw-r--r--include/net/compat.h48
-rw-r--r--include/net/dcbevent.h6
-rw-r--r--include/net/dn.h20
-rw-r--r--include/net/dn_dev.h30
-rw-r--r--include/net/dn_fib.h47
-rw-r--r--include/net/dn_neigh.h12
-rw-r--r--include/net/dn_nsp.h49
-rw-r--r--include/net/dn_route.h13
-rw-r--r--include/net/dst.h37
-rw-r--r--include/net/esp.h12
-rw-r--r--include/net/fib_rules.h17
-rw-r--r--include/net/flow.h11
-rw-r--r--include/net/flow_keys.h3
-rw-r--r--include/net/garp.h27
-rw-r--r--include/net/gen_stats.h51
-rw-r--r--include/net/genetlink.h26
-rw-r--r--include/net/gre.h8
-rw-r--r--include/net/icmp.h10
-rw-r--r--include/net/if_inet6.h5
-rw-r--r--include/net/inet6_connection_sock.h32
-rw-r--r--include/net/inet6_hashtables.h67
-rw-r--r--include/net/inet_common.h48
-rw-r--r--include/net/inet_connection_sock.h79
-rw-r--r--include/net/inet_frag.h4
-rw-r--r--include/net/inet_hashtables.h99
-rw-r--r--include/net/inet_sock.h49
-rw-r--r--include/net/inet_timewait_sock.h69
-rw-r--r--include/net/inetpeer.h12
-rw-r--r--include/net/ip.h189
-rw-r--r--include/net/ip6_checksum.h4
-rw-r--r--include/net/ip6_fib.h52
-rw-r--r--include/net/ip6_route.h104
-rw-r--r--include/net/ip_fib.h61
-rw-r--r--include/net/ip_tunnels.h3
-rw-r--r--include/net/ip_vs.h245
-rw-r--r--include/net/ipv6.h270
-rw-r--r--include/net/ipx.h12
-rw-r--r--include/net/irda/ircomm_tty.h14
-rw-r--r--include/net/irda/irda.h21
-rw-r--r--include/net/irda/irda_device.h2
-rw-r--r--include/net/irda/irlap_event.h2
-rw-r--r--include/net/irda/irlap_frame.h4
-rw-r--r--include/net/iw_handler.h38
-rw-r--r--include/net/lapb.h52
-rw-r--r--include/net/llc.h50
-rw-r--r--include/net/llc_c_ac.h190
-rw-r--r--include/net/llc_c_ev.h207
-rw-r--r--include/net/llc_conn.h36
-rw-r--r--include/net/llc_if.h7
-rw-r--r--include/net/llc_pdu.h33
-rw-r--r--include/net/llc_s_ac.h20
-rw-r--r--include/net/llc_s_ev.h21
-rw-r--r--include/net/llc_sap.h22
-rw-r--r--include/net/mac80211.h42
-rw-r--r--include/net/mac802154.h2
-rw-r--r--include/net/mrp.h26
-rw-r--r--include/net/ndisc.h61
-rw-r--r--include/net/net_namespace.h32
-rw-r--r--include/net/netevent.h6
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h6
-rw-r--r--include/net/netfilter/ipv4/nf_defrag_ipv4.h2
-rw-r--r--include/net/netfilter/ipv6/nf_defrag_ipv6.h17
-rw-r--r--include/net/netfilter/nf_conntrack.h69
-rw-r--r--include/net/netfilter/nf_conntrack_acct.h12
-rw-r--r--include/net/netfilter/nf_conntrack_core.h69
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h22
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h2
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h40
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h16
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h32
-rw-r--r--include/net/netfilter/nf_conntrack_seqadj.h30
-rw-r--r--include/net/netfilter/nf_conntrack_synproxy.h28
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h4
-rw-r--r--include/net/netfilter/nf_conntrack_timestamp.h8
-rw-r--r--include/net/netfilter/nf_nat.h13
-rw-r--r--include/net/netfilter/nf_nat_core.h8
-rw-r--r--include/net/netfilter/nf_nat_helper.h29
-rw-r--r--include/net/netfilter/nf_nat_l3proto.h23
-rw-r--r--include/net/netfilter/nf_nat_l4proto.h30
-rw-r--r--include/net/netfilter/nf_queue.h2
-rw-r--r--include/net/netfilter/nf_tables.h519
-rw-r--r--include/net/netfilter/nf_tables_core.h42
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h23
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h30
-rw-r--r--include/net/netfilter/xt_rateest.h4
-rw-r--r--include/net/netlink.h63
-rw-r--r--include/net/netns/ipv4.h7
-rw-r--r--include/net/netns/nftables.h19
-rw-r--r--include/net/netrom.h89
-rw-r--r--include/net/nfc/digital.h227
-rw-r--r--include/net/nfc/hci.h6
-rw-r--r--include/net/nfc/nci.h4
-rw-r--r--include/net/nfc/nci_core.h46
-rw-r--r--include/net/nfc/nfc.h25
-rw-r--r--include/net/p8022.h18
-rw-r--r--include/net/ping.h4
-rw-r--r--include/net/protocol.h24
-rw-r--r--include/net/psnap.h4
-rw-r--r--include/net/raw.h6
-rw-r--r--include/net/rawv6.h3
-rw-r--r--include/net/request_sock.h15
-rw-r--r--include/net/rose.h114
-rw-r--r--include/net/route.h62
-rw-r--r--include/net/rtnetlink.h40
-rw-r--r--include/net/sch_generic.h11
-rw-r--r--include/net/scm.h10
-rw-r--r--include/net/sctp/checksum.h56
-rw-r--r--include/net/sctp/sctp.h13
-rw-r--r--include/net/secure_seq.h26
-rw-r--r--include/net/sock.h282
-rw-r--r--include/net/stp.h4
-rw-r--r--include/net/tcp.h449
-rw-r--r--include/net/tcp_memcontrol.h12
-rw-r--r--include/net/udp.h95
-rw-r--r--include/net/udplite.h6
-rw-r--r--include/net/vxlan.h11
-rw-r--r--include/net/wext.h16
-rw-r--r--include/net/wimax.h33
-rw-r--r--include/net/x25.h141
-rw-r--r--include/net/xfrm.h382
-rw-r--r--include/scsi/scsi_device.h4
-rw-r--r--include/scsi/scsi_transport_srp.h83
-rw-r--r--include/sound/ak4114.h4
-rw-r--r--include/sound/compress_driver.h12
-rw-r--r--include/sound/cs42l52.h2
-rw-r--r--include/sound/cs42l73.h22
-rw-r--r--include/sound/dmaengine_pcm.h8
-rw-r--r--include/sound/memalloc.h5
-rw-r--r--include/sound/rcar_snd.h2
-rw-r--r--include/sound/soc-dai.h17
-rw-r--r--include/sound/soc-dapm.h4
-rw-r--r--include/sound/soc.h100
-rw-r--r--include/trace/events/asoc.h1
-rw-r--r--include/trace/events/bcache.h73
-rw-r--r--include/trace/events/block.h26
-rw-r--r--include/trace/events/f2fs.h55
-rw-r--r--include/trace/events/iommu.h162
-rw-r--r--include/trace/events/kvm.h10
-rw-r--r--include/trace/events/power_cpu_migrate.h67
-rw-r--r--include/trace/events/rcu.h80
-rw-r--r--include/trace/events/sched.h21
-rw-r--r--include/trace/events/spi.h156
-rw-r--r--include/trace/events/swiotlb.h46
-rw-r--r--include/trace/events/target.h4
-rw-r--r--include/uapi/asm-generic/socket.h2
-rw-r--r--include/uapi/drm/armada_drm.h45
-rw-r--r--include/uapi/drm/drm.h37
-rw-r--r--include/uapi/drm/drm_mode.h47
-rw-r--r--include/uapi/drm/i915_drm.h8
-rw-r--r--include/uapi/drm/tegra_drm.h29
-rw-r--r--include/uapi/linux/audit.h1
-rw-r--r--include/uapi/linux/bcache.h373
-rw-r--r--include/uapi/linux/can/bcm.h32
-rw-r--r--include/uapi/linux/can/error.h32
-rw-r--r--include/uapi/linux/can/gw.h32
-rw-r--r--include/uapi/linux/can/netlink.h8
-rw-r--r--include/uapi/linux/can/raw.h32
-rw-r--r--include/uapi/linux/dm-ioctl.h15
-rw-r--r--include/uapi/linux/elf-em.h1
-rw-r--r--include/uapi/linux/hash_info.h37
-rw-r--r--include/uapi/linux/hsr_netlink.h50
-rw-r--r--include/uapi/linux/if_bonding.h2
-rw-r--r--include/uapi/linux/if_ether.h1
-rw-r--r--include/uapi/linux/if_link.h24
-rw-r--r--include/uapi/linux/keyctl.h1
-rw-r--r--include/uapi/linux/kvm.h11
-rw-r--r--include/uapi/linux/loop.h1
-rw-r--r--include/uapi/linux/major.h2
-rw-r--r--include/uapi/linux/netfilter/Kbuild2
-rw-r--r--include/uapi/linux/netfilter/ipset/ip_set.h16
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h4
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h718
-rw-r--r--include/uapi/linux/netfilter/nf_tables_compat.h38
-rw-r--r--include/uapi/linux/netfilter/nfnetlink.h10
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_cttimeout.h2
-rw-r--r--include/uapi/linux/nfc.h4
-rw-r--r--include/uapi/linux/nfs_mount.h2
-rw-r--r--include/uapi/linux/openvswitch.h18
-rw-r--r--include/uapi/linux/pci_regs.h4
-rw-r--r--include/uapi/linux/perf_event.h37
-rw-r--r--include/uapi/linux/pkt_cls.h14
-rw-r--r--include/uapi/linux/pkt_sched.h2
-rw-r--r--include/uapi/linux/tc_act/Kbuild1
-rw-r--r--include/uapi/linux/tc_act/tc_defact.h (renamed from include/linux/tc_act/tc_defact.h)2
-rw-r--r--include/uapi/linux/v4l2-controls.h4
-rw-r--r--include/uapi/mtd/mtd-abi.h9
-rw-r--r--include/uapi/rdma/ib_user_verbs.h6
-rw-r--r--include/uapi/sound/Kbuild1
-rw-r--r--include/uapi/sound/asound.h3
-rw-r--r--include/uapi/sound/firewire.h51
-rw-r--r--include/video/atmel_lcdc.h25
-rw-r--r--include/video/mmp_disp.h6
-rw-r--r--include/video/omap-panel-data.h13
-rw-r--r--include/xen/interface/io/netif.h18
-rw-r--r--include/xen/swiotlb-xen.h2
-rw-r--r--include/xen/xen-ops.h7
-rw-r--r--init/Kconfig22
-rw-r--r--init/main.c9
-rw-r--r--ipc/ipc_sysctl.c20
-rw-r--r--kernel/Makefile61
-rw-r--r--kernel/bounds.c4
-rw-r--r--kernel/cgroup.c262
-rw-r--r--kernel/context_tracking.c2
-rw-r--r--kernel/cpu.c17
-rw-r--r--kernel/cpu/idle.c16
-rw-r--r--kernel/debug/debug_core.c32
-rw-r--r--kernel/debug/debug_core.h3
-rw-r--r--kernel/debug/kdb/kdb_debugger.c5
-rw-r--r--kernel/debug/kdb/kdb_main.c3
-rw-r--r--kernel/events/core.c149
-rw-r--r--kernel/events/ring_buffer.c31
-rw-r--r--kernel/events/uprobes.c156
-rw-r--r--kernel/fork.c7
-rw-r--r--kernel/hung_task.c6
-rw-r--r--kernel/irq/chip.c2
-rw-r--r--kernel/irq/irqdomain.c13
-rw-r--r--kernel/irq/manage.c4
-rw-r--r--kernel/jump_label.c5
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/lockdep.c4
-rw-r--r--kernel/lockdep_proc.c15
-rw-r--r--kernel/modsign_certificate.S12
-rw-r--r--kernel/modsign_pubkey.c104
-rw-r--r--kernel/module-internal.h2
-rw-r--r--kernel/module.c66
-rw-r--r--kernel/module_signing.c11
-rw-r--r--kernel/mutex.c32
-rw-r--r--kernel/padata.c9
-rw-r--r--kernel/power/Kconfig16
-rw-r--r--kernel/power/block_io.c2
-rw-r--r--kernel/power/hibernate.c2
-rw-r--r--kernel/power/qos.c26
-rw-r--r--kernel/power/user.c20
-rw-r--r--kernel/rcu/Makefile6
-rw-r--r--kernel/rcu/rcu.h (renamed from kernel/rcu.h)7
-rw-r--r--kernel/rcu/srcu.c (renamed from kernel/srcu.c)0
-rw-r--r--kernel/rcu/tiny.c (renamed from kernel/rcutiny.c)37
-rw-r--r--kernel/rcu/tiny_plugin.h (renamed from kernel/rcutiny_plugin.h)0
-rw-r--r--kernel/rcu/torture.c (renamed from kernel/rcutorture.c)6
-rw-r--r--kernel/rcu/tree.c (renamed from kernel/rcutree.c)200
-rw-r--r--kernel/rcu/tree.h (renamed from kernel/rcutree.h)2
-rw-r--r--kernel/rcu/tree_plugin.h (renamed from kernel/rcutree_plugin.h)84
-rw-r--r--kernel/rcu/tree_trace.c (renamed from kernel/rcutree_trace.c)2
-rw-r--r--kernel/rcu/update.c (renamed from kernel/rcupdate.c)10
-rw-r--r--kernel/sched/core.c290
-rw-r--r--kernel/sched/debug.c68
-rw-r--r--kernel/sched/fair.c1359
-rw-r--r--kernel/sched/features.h19
-rw-r--r--kernel/sched/idle_task.c2
-rw-r--r--kernel/sched/rt.c22
-rw-r--r--kernel/sched/sched.h52
-rw-r--r--kernel/sched/stats.h46
-rw-r--r--kernel/sched/stop_task.c2
-rw-r--r--kernel/smp.c14
-rw-r--r--kernel/softirq.c53
-rw-r--r--kernel/stop_machine.c288
-rw-r--r--kernel/sysctl.c29
-rw-r--r--kernel/system_certificates.S10
-rw-r--r--kernel/system_keyring.c105
-rw-r--r--kernel/time/Kconfig2
-rw-r--r--kernel/time/alarmtimer.c4
-rw-r--r--kernel/time/clockevents.c67
-rw-r--r--kernel/time/clocksource.c52
-rw-r--r--kernel/time/ntp.c3
-rw-r--r--kernel/time/sched_clock.c114
-rw-r--r--kernel/time/tick-broadcast.c1
-rw-r--r--kernel/time/tick-internal.h2
-rw-r--r--kernel/time/timekeeping.c3
-rw-r--r--kernel/time/timer_stats.c8
-rw-r--r--kernel/timer.c8
-rw-r--r--kernel/trace/blktrace.c51
-rw-r--r--kernel/trace/ftrace.c140
-rw-r--r--kernel/trace/trace.c7
-rw-r--r--kernel/trace/trace.h25
-rw-r--r--kernel/trace/trace_functions_graph.c56
-rw-r--r--kernel/user.c4
-rw-r--r--kernel/user_namespace.c6
-rw-r--r--kernel/wait.c24
-rw-r--r--kernel/workqueue.c41
-rw-r--r--lib/Kconfig14
-rw-r--r--lib/Kconfig.debug11
-rw-r--r--lib/Makefile1
-rw-r--r--lib/assoc_array.c1746
-rw-r--r--lib/crc32.c456
-rw-r--r--lib/kobject.c93
-rw-r--r--lib/locking-selftest.c2
-rw-r--r--lib/lockref.c1
-rw-r--r--lib/mpi/mpiutil.c3
-rw-r--r--lib/percpu_counter.c15
-rw-r--r--lib/percpu_ida.c89
-rw-r--r--lib/scatterlist.c3
-rw-r--r--lib/smp_processor_id.c3
-rw-r--r--lib/swiotlb.c6
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/bounce.c45
-rw-r--r--mm/filemap.c433
-rw-r--r--mm/huge_memory.c119
-rw-r--r--mm/list_lru.c3
-rw-r--r--mm/memcontrol.c129
-rw-r--r--mm/memory.c158
-rw-r--r--mm/mempolicy.c82
-rw-r--r--mm/migrate.c49
-rw-r--r--mm/mm_init.c18
-rw-r--r--mm/mmzone.c14
-rw-r--r--mm/mprotect.c65
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/page_io.c25
-rw-r--r--mm/pagewalk.c2
-rw-r--r--mm/percpu.c5
-rw-r--r--mm/shmem.c61
-rw-r--r--mm/slab.c571
-rw-r--r--mm/slub.c37
-rw-r--r--mm/swap.c3
-rw-r--r--net/8021q/vlan.c18
-rw-r--r--net/8021q/vlan.h28
-rw-r--r--net/8021q/vlan_netlink.c2
-rw-r--r--net/9p/trans_virtio.c9
-rw-r--r--net/Kconfig1
-rw-r--r--net/Makefile1
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/batman-adv/Makefile3
-rw-r--r--net/batman-adv/bat_iv_ogm.c510
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c64
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h10
-rw-r--r--net/batman-adv/debugfs.c9
-rw-r--r--net/batman-adv/distributed-arp-table.c231
-rw-r--r--net/batman-adv/distributed-arp-table.h5
-rw-r--r--net/batman-adv/fragmentation.c491
-rw-r--r--net/batman-adv/fragmentation.h50
-rw-r--r--net/batman-adv/gateway_client.c247
-rw-r--r--net/batman-adv/gateway_client.h2
-rw-r--r--net/batman-adv/gateway_common.c230
-rw-r--r--net/batman-adv/gateway_common.h14
-rw-r--r--net/batman-adv/hard-interface.c110
-rw-r--r--net/batman-adv/hard-interface.h2
-rw-r--r--net/batman-adv/icmp_socket.c128
-rw-r--r--net/batman-adv/icmp_socket.h2
-rw-r--r--net/batman-adv/main.c694
-rw-r--r--net/batman-adv/main.h68
-rw-r--r--net/batman-adv/network-coding.c99
-rw-r--r--net/batman-adv/network-coding.h19
-rw-r--r--net/batman-adv/originator.c364
-rw-r--r--net/batman-adv/originator.h13
-rw-r--r--net/batman-adv/packet.h356
-rw-r--r--net/batman-adv/routing.c557
-rw-r--r--net/batman-adv/routing.h13
-rw-r--r--net/batman-adv/send.c239
-rw-r--r--net/batman-adv/send.h53
-rw-r--r--net/batman-adv/soft-interface.c269
-rw-r--r--net/batman-adv/soft-interface.h4
-rw-r--r--net/batman-adv/sysfs.c274
-rw-r--r--net/batman-adv/sysfs.h10
-rw-r--r--net/batman-adv/translation-table.c2086
-rw-r--r--net/batman-adv/translation-table.h46
-rw-r--r--net/batman-adv/types.h436
-rw-r--r--net/batman-adv/unicast.c491
-rw-r--r--net/batman-adv/unicast.h92
-rw-r--r--net/batman-adv/vis.c938
-rw-r--r--net/batman-adv/vis.h36
-rw-r--r--net/bluetooth/Makefile2
-rw-r--r--net/bluetooth/a2mp.c72
-rw-r--r--net/bluetooth/a2mp.h (renamed from include/net/bluetooth/a2mp.h)0
-rw-r--r--net/bluetooth/af_bluetooth.c73
-rw-r--r--net/bluetooth/amp.c10
-rw-r--r--net/bluetooth/amp.h (renamed from include/net/bluetooth/amp.h)0
-rw-r--r--net/bluetooth/bnep/core.c18
-rw-r--r--net/bluetooth/cmtp/core.c6
-rw-r--r--net/bluetooth/hci_conn.c194
-rw-r--r--net/bluetooth/hci_core.c1130
-rw-r--r--net/bluetooth/hci_event.c189
-rw-r--r--net/bluetooth/hci_sock.c210
-rw-r--r--net/bluetooth/hci_sysfs.c373
-rw-r--r--net/bluetooth/hidp/core.c18
-rw-r--r--net/bluetooth/hidp/hidp.h4
-rw-r--r--net/bluetooth/l2cap_core.c538
-rw-r--r--net/bluetooth/l2cap_sock.c222
-rw-r--r--net/bluetooth/mgmt.c1624
-rw-r--r--net/bluetooth/rfcomm/core.c33
-rw-r--r--net/bluetooth/rfcomm/sock.c55
-rw-r--r--net/bluetooth/sco.c57
-rw-r--r--net/bluetooth/smp.c98
-rw-r--r--net/bluetooth/smp.h (renamed from include/net/bluetooth/smp.h)0
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/bridge/br_fdb.c4
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_mdb.c2
-rw-r--r--net/bridge/br_multicast.c86
-rw-r--r--net/bridge/br_netfilter.c22
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/bridge/br_private.h307
-rw-r--r--net/bridge/br_private_stp.h24
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/bridge/br_vlan.c125
-rw-r--r--net/bridge/netfilter/Kconfig3
-rw-r--r--net/bridge/netfilter/Makefile2
-rw-r--r--net/bridge/netfilter/ebt_among.c2
-rw-r--r--net/bridge/netfilter/ebt_ulog.c9
-rw-r--r--net/bridge/netfilter/ebtable_filter.c16
-rw-r--r--net/bridge/netfilter/ebtable_nat.c16
-rw-r--r--net/bridge/netfilter/nf_tables_bridge.c65
-rw-r--r--net/can/af_can.c2
-rw-r--r--net/can/af_can.h6
-rw-r--r--net/ceph/auth_none.h2
-rw-r--r--net/ceph/auth_x.h2
-rw-r--r--net/ceph/crypto.h48
-rw-r--r--net/ceph/messenger.c43
-rw-r--r--net/compat.c2
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c542
-rw-r--r--net/core/dev_addr_lists.c4
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/flow_dissector.c75
-rw-r--r--net/core/iovec.c2
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/net-sysfs.c16
-rw-r--r--net/core/netpoll.c31
-rw-r--r--net/core/netprio_cgroup.c3
-rw-r--r--net/core/rtnetlink.c12
-rw-r--r--net/core/secure_seq.c18
-rw-r--r--net/core/skbuff.c96
-rw-r--r--net/core/sock.c46
-rw-r--r--net/core/utils.c49
-rw-r--r--net/dccp/ackvec.h21
-rw-r--r--net/dccp/ccid.h18
-rw-r--r--net/dccp/ccids/lib/loss_interval.h8
-rw-r--r--net/dccp/ccids/lib/packet_history.h25
-rw-r--r--net/dccp/ccids/lib/tfrc.h22
-rw-r--r--net/dccp/dccp.h186
-rw-r--r--net/dccp/feat.h26
-rw-r--r--net/dccp/ipv4.c18
-rw-r--r--net/dccp/ipv6.c83
-rw-r--r--net/dccp/ipv6.h2
-rw-r--r--net/dccp/minisocks.c15
-rw-r--r--net/dccp/output.c4
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c2
-rw-r--r--net/ethernet/eth.c30
-rw-r--r--net/hsr/Kconfig27
-rw-r--r--net/hsr/Makefile7
-rw-r--r--net/hsr/hsr_device.c596
-rw-r--r--net/hsr/hsr_device.h29
-rw-r--r--net/hsr/hsr_framereg.c503
-rw-r--r--net/hsr/hsr_framereg.h53
-rw-r--r--net/hsr/hsr_main.c469
-rw-r--r--net/hsr/hsr_main.h166
-rw-r--r--net/hsr/hsr_netlink.c457
-rw-r--r--net/hsr/hsr_netlink.h30
-rw-r--r--net/ieee802154/6lowpan.c51
-rw-r--r--net/ipv4/af_inet.c92
-rw-r--r--net/ipv4/esp4.c49
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/fib_lookup.h26
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv4/fib_trie.c15
-rw-r--r--net/ipv4/gre_demux.c29
-rw-r--r--net/ipv4/gre_offload.c3
-rw-r--r--net/ipv4/icmp.c5
-rw-r--r--net/ipv4/inet_connection_sock.c54
-rw-r--r--net/ipv4/inet_diag.c120
-rw-r--r--net/ipv4/inet_fragment.c3
-rw-r--r--net/ipv4/inet_hashtables.c110
-rw-r--r--net/ipv4/inet_timewait_sock.c59
-rw-r--r--net/ipv4/ip_fragment.c1
-rw-r--r--net/ipv4/ip_output.c38
-rw-r--r--net/ipv4/ip_sockglue.c25
-rw-r--r--net/ipv4/ip_tunnel_core.c33
-rw-r--r--net/ipv4/ip_vti.c81
-rw-r--r--net/ipv4/ipip.c11
-rw-r--r--net/ipv4/netfilter/Kconfig21
-rw-r--r--net/ipv4/netfilter/Makefile6
-rw-r--r--net/ipv4/netfilter/arp_tables.c5
-rw-r--r--net/ipv4/netfilter/arptable_filter.c5
-rw-r--r--net/ipv4/netfilter/ip_tables.c5
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c2
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c2
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c7
-rw-r--r--net/ipv4/netfilter/iptable_filter.c7
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c10
-rw-r--r--net/ipv4/netfilter/iptable_nat.c26
-rw-r--r--net/ipv4/netfilter/iptable_raw.c6
-rw-r--r--net/ipv4/netfilter/iptable_security.c7
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c12
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c6
-rw-r--r--net/ipv4/netfilter/nf_tables_arp.c102
-rw-r--r--net/ipv4/netfilter/nf_tables_ipv4.c128
-rw-r--r--net/ipv4/netfilter/nft_chain_nat_ipv4.c205
-rw-r--r--net/ipv4/netfilter/nft_chain_route_ipv4.c90
-rw-r--r--net/ipv4/netfilter/nft_reject_ipv4.c123
-rw-r--r--net/ipv4/ping.c29
-rw-r--r--net/ipv4/raw.c6
-rw-r--r--net/ipv4/route.c18
-rw-r--r--net/ipv4/syncookies.c80
-rw-r--r--net/ipv4/sysctl_net_ipv4.c130
-rw-r--r--net/ipv4/tcp.c21
-rw-r--r--net/ipv4/tcp_fastopen.c27
-rw-r--r--net/ipv4/tcp_input.c174
-rw-r--r--net/ipv4/tcp_ipv4.c124
-rw-r--r--net/ipv4/tcp_memcontrol.c90
-rw-r--r--net/ipv4/tcp_metrics.c27
-rw-r--r--net/ipv4/tcp_minisocks.c7
-rw-r--r--net/ipv4/tcp_offload.c21
-rw-r--r--net/ipv4/tcp_output.c47
-rw-r--r--net/ipv4/tcp_probe.c29
-rw-r--r--net/ipv4/tcp_timer.c9
-rw-r--r--net/ipv4/tcp_vegas.h10
-rw-r--r--net/ipv4/udp.c235
-rw-r--r--net/ipv4/udp_impl.h36
-rw-r--r--net/ipv4/udp_offload.c1
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c16
-rw-r--r--net/ipv4/xfrm4_policy.c7
-rw-r--r--net/ipv6/Kconfig29
-rw-r--r--net/ipv6/Makefile1
-rw-r--r--net/ipv6/addrconf.c41
-rw-r--r--net/ipv6/af_inet6.c65
-rw-r--r--net/ipv6/ah6.c3
-rw-r--r--net/ipv6/datagram.c25
-rw-r--r--net/ipv6/esp6.c51
-rw-r--r--net/ipv6/inet6_connection_sock.c33
-rw-r--r--net/ipv6/inet6_hashtables.c122
-rw-r--r--net/ipv6/ip6_fib.c205
-rw-r--r--net/ipv6/ip6_gre.c6
-rw-r--r--net/ipv6/ip6_offload.c40
-rw-r--r--net/ipv6/ip6_output.c29
-rw-r--r--net/ipv6/ip6_tunnel.c12
-rw-r--r--net/ipv6/ip6_vti.c1056
-rw-r--r--net/ipv6/ipcomp6.c3
-rw-r--r--net/ipv6/ipv6_sockglue.c7
-rw-r--r--net/ipv6/netfilter/Kconfig13
-rw-r--r--net/ipv6/netfilter/Makefile5
-rw-r--r--net/ipv6/netfilter/ip6_tables.c5
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c2
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c5
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c10
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c27
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c5
-rw-r--r--net/ipv6/netfilter/ip6table_security.c5
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c18
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c16
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c6
-rw-r--r--net/ipv6/netfilter/nf_tables_ipv6.c127
-rw-r--r--net/ipv6/netfilter/nft_chain_nat_ipv6.c211
-rw-r--r--net/ipv6/netfilter/nft_chain_route_ipv6.c88
-rw-r--r--net/ipv6/ping.c2
-rw-r--r--net/ipv6/raw.c17
-rw-r--r--net/ipv6/reassembly.c12
-rw-r--r--net/ipv6/route.c106
-rw-r--r--net/ipv6/sit.c28
-rw-r--r--net/ipv6/syncookies.c75
-rw-r--r--net/ipv6/tcp_ipv6.c113
-rw-r--r--net/ipv6/tcpv6_offload.c2
-rw-r--r--net/ipv6/udp.c93
-rw-r--r--net/ipv6/udp_impl.h41
-rw-r--r--net/ipv6/udp_offload.c2
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c69
-rw-r--r--net/ipv6/xfrm6_policy.c7
-rw-r--r--net/irda/af_irda.c5
-rw-r--r--net/irda/irnet/irnet.h15
-rw-r--r--net/key/af_key.c3
-rw-r--r--net/l2tp/l2tp_core.c40
-rw-r--r--net/l2tp/l2tp_core.h60
-rw-r--r--net/l2tp/l2tp_debugfs.c5
-rw-r--r--net/l2tp/l2tp_ip6.c16
-rw-r--r--net/l2tp/l2tp_netlink.c4
-rw-r--r--net/l2tp/l2tp_ppp.c16
-rw-r--r--net/mac80211/cfg.c94
-rw-r--r--net/mac80211/chan.c5
-rw-r--r--net/mac80211/debugfs.c55
-rw-r--r--net/mac80211/driver-ops.h27
-rw-r--r--net/mac80211/ibss.c608
-rw-r--r--net/mac80211/ieee80211_i.h34
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/key.c2
-rw-r--r--net/mac80211/mlme.c429
-rw-r--r--net/mac80211/offchannel.c2
-rw-r--r--net/mac80211/rate.c15
-rw-r--r--net/mac80211/rate.h12
-rw-r--r--net/mac80211/rc80211_minstrel.c14
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c23
-rw-r--r--net/mac80211/rc80211_pid_debugfs.c26
-rw-r--r--net/mac80211/rx.c42
-rw-r--r--net/mac80211/scan.c22
-rw-r--r--net/mac80211/spectmgmt.c162
-rw-r--r--net/mac80211/status.c3
-rw-r--r--net/mac80211/trace.h39
-rw-r--r--net/mac80211/tx.c42
-rw-r--r--net/mac80211/util.c171
-rw-r--r--net/mac80211/vht.c4
-rw-r--r--net/mac802154/ieee802154_dev.c6
-rw-r--r--net/mac802154/wpan.c2
-rw-r--r--net/mpls/mpls_gso.c1
-rw-r--r--net/netfilter/Kconfig52
-rw-r--r--net/netfilter/Makefile18
-rw-r--r--net/netfilter/core.c2
-rw-r--r--net/netfilter/ipset/Kconfig20
-rw-r--r--net/netfilter/ipset/Makefile2
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_gen.h163
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c125
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c156
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c112
-rw-r--r--net/netfilter/ipset/ip_set_core.c361
-rw-r--r--net/netfilter/ipset/ip_set_getport.c18
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h526
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c58
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c80
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c86
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c108
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c85
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c98
-rw-r--r--net/netfilter/ipset/ip_set_hash_netnet.c483
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c92
-rw-r--r--net/netfilter/ipset/ip_set_hash_netportnet.c588
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c263
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c42
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c7
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c4
-rw-r--r--net/netfilter/nf_conntrack_sip.c133
-rw-r--r--net/netfilter/nf_internals.h28
-rw-r--r--net/netfilter/nf_nat_core.c20
-rw-r--r--net/netfilter/nf_nat_sip.c35
-rw-r--r--net/netfilter/nf_tables_api.c3275
-rw-r--r--net/netfilter/nf_tables_core.c270
-rw-r--r--net/netfilter/nfnetlink.c175
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c161
-rw-r--r--net/netfilter/nfnetlink_log.c11
-rw-r--r--net/netfilter/nfnetlink_queue_core.c6
-rw-r--r--net/netfilter/nft_bitwise.c146
-rw-r--r--net/netfilter/nft_byteorder.c173
-rw-r--r--net/netfilter/nft_cmp.c223
-rw-r--r--net/netfilter/nft_compat.c768
-rw-r--r--net/netfilter/nft_counter.c113
-rw-r--r--net/netfilter/nft_ct.c258
-rw-r--r--net/netfilter/nft_expr_template.c94
-rw-r--r--net/netfilter/nft_exthdr.c133
-rw-r--r--net/netfilter/nft_hash.c231
-rw-r--r--net/netfilter/nft_immediate.c132
-rw-r--r--net/netfilter/nft_limit.c119
-rw-r--r--net/netfilter/nft_log.c146
-rw-r--r--net/netfilter/nft_lookup.c141
-rw-r--r--net/netfilter/nft_meta.c228
-rw-r--r--net/netfilter/nft_meta_target.c117
-rw-r--r--net/netfilter/nft_nat.c220
-rw-r--r--net/netfilter/nft_payload.c160
-rw-r--r--net/netfilter/nft_rbtree.c247
-rw-r--r--net/netfilter/x_tables.c7
-rw-r--r--net/netfilter/xt_NFQUEUE.c7
-rw-r--r--net/netfilter/xt_TCPMSS.c72
-rw-r--r--net/netfilter/xt_TPROXY.c2
-rw-r--r--net/netfilter/xt_set.c224
-rw-r--r--net/netfilter/xt_socket.c2
-rw-r--r--net/netlabel/netlabel_kapi.c2
-rw-r--r--net/nfc/Kconfig14
-rw-r--r--net/nfc/Makefile2
-rw-r--r--net/nfc/core.c22
-rw-r--r--net/nfc/digital.h170
-rw-r--r--net/nfc/digital_core.c737
-rw-r--r--net/nfc/digital_dep.c729
-rw-r--r--net/nfc/digital_technology.c770
-rw-r--r--net/nfc/nci/spi.c239
-rw-r--r--net/nfc/netlink.c91
-rw-r--r--net/nfc/rawsock.c7
-rw-r--r--net/openvswitch/Makefile2
-rw-r--r--net/openvswitch/datapath.c668
-rw-r--r--net/openvswitch/datapath.h9
-rw-r--r--net/openvswitch/dp_notify.c7
-rw-r--r--net/openvswitch/flow.c1605
-rw-r--r--net/openvswitch/flow.h132
-rw-r--r--net/openvswitch/flow_netlink.c1630
-rw-r--r--net/openvswitch/flow_netlink.h60
-rw-r--r--net/openvswitch/flow_table.c592
-rw-r--r--net/openvswitch/flow_table.h81
-rw-r--r--net/openvswitch/vport-gre.c2
-rw-r--r--net/openvswitch/vport-internal_dev.c2
-rw-r--r--net/openvswitch/vport-netdev.c16
-rw-r--r--net/openvswitch/vport-netdev.h1
-rw-r--r--net/openvswitch/vport-vxlan.c3
-rw-r--r--net/rds/connection.c12
-rw-r--r--net/rds/rds.h2
-rw-r--r--net/rxrpc/ar-internal.h150
-rw-r--r--net/sched/Kconfig10
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/act_police.c4
-rw-r--r--net/sched/cls_basic.c2
-rw-r--r--net/sched/cls_bpf.c385
-rw-r--r--net/sched/cls_cgroup.c4
-rw-r--r--net/sched/em_ipset.c7
-rw-r--r--net/sched/em_meta.c4
-rw-r--r--net/sched/sch_api.c3
-rw-r--r--net/sched/sch_fq.c23
-rw-r--r--net/sched/sch_generic.c11
-rw-r--r--net/sched/sch_htb.c17
-rw-r--r--net/sched/sch_netem.c18
-rw-r--r--net/sched/sch_tbf.c4
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/auth.c14
-rw-r--r--net/sctp/chunk.c2
-rw-r--r--net/sctp/ipv6.c26
-rw-r--r--net/sctp/output.c12
-rw-r--r--net/sctp/sm_make_chunk.c29
-rw-r--r--net/sctp/sm_sideeffect.c1
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/socket.c24
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c57
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_unseal.c8
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c10
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_upcall.c3
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c29
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c4
-rw-r--r--net/sunrpc/clnt.c148
-rw-r--r--net/sunrpc/rpc_pipe.c12
-rw-r--r--net/sunrpc/svcsock.c4
-rw-r--r--net/sunrpc/xprt.c63
-rw-r--r--net/sunrpc/xprtsock.c47
-rw-r--r--net/tipc/bearer.c18
-rw-r--r--net/tipc/bearer.h10
-rw-r--r--net/tipc/core.h28
-rw-r--r--net/tipc/eth_media.c68
-rw-r--r--net/tipc/ib_media.c58
-rw-r--r--net/tipc/link.c340
-rw-r--r--net/tipc/link.h4
-rw-r--r--net/tipc/msg.c27
-rw-r--r--net/tipc/msg.h3
-rw-r--r--net/tipc/port.c66
-rw-r--r--net/tipc/port.h16
-rw-r--r--net/tipc/socket.c12
-rw-r--r--net/unix/af_unix.c10
-rw-r--r--net/unix/diag.c1
-rw-r--r--net/vmw_vsock/Kconfig2
-rw-r--r--net/wimax/wimax-internal.h18
-rw-r--r--net/wireless/chan.c1
-rw-r--r--net/wireless/core.c23
-rw-r--r--net/wireless/core.h18
-rw-r--r--net/wireless/debugfs.c24
-rw-r--r--net/wireless/genregdb.awk6
-rw-r--r--net/wireless/ibss.c3
-rw-r--r--net/wireless/nl80211.c56
-rw-r--r--net/wireless/radiotap.c7
-rw-r--r--net/wireless/reg.c41
-rw-r--r--net/wireless/sysfs.h4
-rw-r--r--net/wireless/util.c9
-rw-r--r--net/x25/Kconfig4
-rw-r--r--net/xfrm/xfrm_hash.h4
-rw-r--r--net/xfrm/xfrm_ipcomp.c18
-rw-r--r--net/xfrm/xfrm_policy.c35
-rw-r--r--net/xfrm/xfrm_replay.c54
-rw-r--r--net/xfrm/xfrm_state.c6
-rw-r--r--net/xfrm/xfrm_user.c5
-rw-r--r--scripts/Makefile.modpost4
-rw-r--r--scripts/asn1_compiler.c2
-rw-r--r--scripts/coccinelle/api/devm_request_and_ioremap.cocci105
-rw-r--r--scripts/kallsyms.c12
-rw-r--r--scripts/kconfig/expr.h2
-rw-r--r--scripts/kconfig/mconf.c60
-rw-r--r--scripts/kconfig/menu.c11
-rw-r--r--scripts/kconfig/qconf.cc5
-rw-r--r--scripts/kconfig/qconf.h1
-rw-r--r--scripts/kconfig/symbol.c2
-rw-r--r--scripts/kconfig/zconf.l1
-rw-r--r--scripts/link-vmlinux.sh2
-rw-r--r--scripts/mod/modpost.c13
-rwxr-xr-xscripts/show_delta12
-rwxr-xr-xscripts/tags.sh5
-rw-r--r--security/Makefile1
-rw-r--r--security/apparmor/audit.c14
-rw-r--r--security/apparmor/capability.c15
-rw-r--r--security/apparmor/domain.c16
-rw-r--r--security/apparmor/include/audit.h1
-rw-r--r--security/apparmor/include/capability.h5
-rw-r--r--security/apparmor/include/ipc.h4
-rw-r--r--security/apparmor/ipc.c9
-rw-r--r--security/apparmor/lsm.c2
-rw-r--r--security/capability.c15
-rw-r--r--security/device_cgroup.c11
-rw-r--r--security/integrity/digsig.c37
-rw-r--r--security/integrity/digsig_asymmetric.c11
-rw-r--r--security/integrity/evm/evm_main.c4
-rw-r--r--security/integrity/evm/evm_posix_acl.c3
-rw-r--r--security/integrity/iint.c2
-rw-r--r--security/integrity/ima/Kconfig72
-rw-r--r--security/integrity/ima/Makefile2
-rw-r--r--security/integrity/ima/ima.h101
-rw-r--r--security/integrity/ima/ima_api.c136
-rw-r--r--security/integrity/ima/ima_appraise.c117
-rw-r--r--security/integrity/ima/ima_crypto.c134
-rw-r--r--security/integrity/ima/ima_fs.c67
-rw-r--r--security/integrity/ima/ima_init.c37
-rw-r--r--security/integrity/ima/ima_main.c63
-rw-r--r--security/integrity/ima/ima_policy.c1
-rw-r--r--security/integrity/ima/ima_queue.c10
-rw-r--r--security/integrity/ima/ima_template.c178
-rw-r--r--security/integrity/ima/ima_template_lib.c347
-rw-r--r--security/integrity/ima/ima_template_lib.h49
-rw-r--r--security/integrity/integrity.h47
-rw-r--r--security/keys/Kconfig29
-rw-r--r--security/keys/Makefile2
-rw-r--r--security/keys/big_key.c206
-rw-r--r--security/keys/compat.c3
-rw-r--r--security/keys/gc.c33
-rw-r--r--security/keys/internal.h74
-rw-r--r--security/keys/key.c102
-rw-r--r--security/keys/keyctl.c3
-rw-r--r--security/keys/keyring.c1505
-rw-r--r--security/keys/persistent.c169
-rw-r--r--security/keys/proc.c17
-rw-r--r--security/keys/process_keys.c141
-rw-r--r--security/keys/request_key.c60
-rw-r--r--security/keys/request_key_auth.c31
-rw-r--r--security/keys/sysctl.c11
-rw-r--r--security/keys/user_defined.c18
-rw-r--r--security/lsm_audit.c7
-rw-r--r--security/security.c13
-rw-r--r--security/selinux/hooks.c158
-rw-r--r--security/selinux/include/objsec.h4
-rw-r--r--security/selinux/include/security.h13
-rw-r--r--security/selinux/include/xfrm.h45
-rw-r--r--security/selinux/netlabel.c6
-rw-r--r--security/selinux/netnode.c2
-rw-r--r--security/selinux/selinuxfs.c4
-rw-r--r--security/selinux/ss/ebitmap.c20
-rw-r--r--security/selinux/ss/ebitmap.h10
-rw-r--r--security/selinux/ss/mls.c22
-rw-r--r--security/selinux/ss/mls_types.h2
-rw-r--r--security/selinux/ss/policydb.c3
-rw-r--r--security/selinux/ss/services.c66
-rw-r--r--security/selinux/xfrm.c453
-rw-r--r--security/smack/smack.h12
-rw-r--r--security/smack/smack_access.c10
-rw-r--r--security/smack/smack_lsm.c11
-rw-r--r--security/smack/smackfs.c10
-rw-r--r--sound/aoa/core/gpio-feature.c3
-rw-r--r--sound/aoa/soundbus/i2sbus/core.c2
-rw-r--r--sound/arm/pxa2xx-ac97-lib.c27
-rw-r--r--sound/arm/pxa2xx-ac97.c2
-rw-r--r--sound/arm/pxa2xx-pcm.c10
-rw-r--r--sound/core/compress_offload.c41
-rw-r--r--sound/core/init.c4
-rw-r--r--sound/core/memalloc.c61
-rw-r--r--sound/core/pcm.c4
-rw-r--r--sound/core/pcm_dmaengine.c22
-rw-r--r--sound/core/pcm_native.c10
-rw-r--r--sound/drivers/opl3/opl3_midi.c5
-rw-r--r--sound/drivers/pcsp/pcsp.c3
-rw-r--r--sound/firewire/Kconfig15
-rw-r--r--sound/firewire/Makefile2
-rw-r--r--sound/firewire/amdtp.c213
-rw-r--r--sound/firewire/amdtp.h46
-rw-r--r--sound/firewire/cmp.c50
-rw-r--r--sound/firewire/dice-interface.h371
-rw-r--r--sound/firewire/dice.c1494
-rw-r--r--sound/firewire/fcp.c2
-rw-r--r--sound/firewire/isight.c43
-rw-r--r--sound/firewire/lib.c24
-rw-r--r--sound/firewire/lib.h7
-rw-r--r--sound/firewire/scs1x.c8
-rw-r--r--sound/firewire/speakers.c16
-rw-r--r--sound/i2c/other/ak4114.c8
-rw-r--r--sound/i2c/other/ak4xxx-adda.c2
-rw-r--r--sound/oss/sb_ess.c2
-rw-r--r--sound/pci/ad1889.c2
-rw-r--r--sound/pci/ali5451/ali5451.c2
-rw-r--r--sound/pci/asihpi/asihpi.c9
-rw-r--r--sound/pci/au88x0/au88x0_pcm.c2
-rw-r--r--sound/pci/au88x0/au88x0_synth.c29
-rw-r--r--sound/pci/azt3328.c14
-rw-r--r--sound/pci/cs5535audio/cs5535audio_olpc.c4
-rw-r--r--sound/pci/ctxfi/ctdaio.c4
-rw-r--r--sound/pci/emu10k1/emufx.c76
-rw-r--r--sound/pci/hda/hda_auto_parser.c2
-rw-r--r--sound/pci/hda/hda_beep.c5
-rw-r--r--sound/pci/hda/hda_codec.c28
-rw-r--r--sound/pci/hda/hda_codec.h1
-rw-r--r--sound/pci/hda/hda_eld.c199
-rw-r--r--sound/pci/hda/hda_generic.c4
-rw-r--r--sound/pci/hda/hda_intel.c7
-rw-r--r--sound/pci/hda/hda_local.h27
-rw-r--r--sound/pci/hda/patch_analog.c21
-rw-r--r--sound/pci/hda/patch_ca0132.c2
-rw-r--r--sound/pci/hda/patch_conexant.c92
-rw-r--r--sound/pci/hda/patch_hdmi.c994
-rw-r--r--sound/pci/hda/patch_realtek.c60
-rw-r--r--sound/pci/hda/patch_sigmatel.c4
-rw-r--r--sound/pci/ice1712/psc724.c4
-rw-r--r--sound/pci/ice1712/wm8766.c3
-rw-r--r--sound/pci/ice1712/wm8776.c5
-rw-r--r--sound/pci/lola/lola.c2
-rw-r--r--sound/pci/rme96.c10
-rw-r--r--sound/pci/rme9652/hdspm.c7
-rw-r--r--sound/ppc/keywest.c4
-rw-r--r--sound/ppc/pmac.c2
-rw-r--r--sound/ppc/tumbler.c1
-rw-r--r--sound/soc/Makefile2
-rw-r--r--sound/soc/atmel/atmel-pcm.c13
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c2
-rw-r--r--sound/soc/atmel/atmel_wm8904.c8
-rw-r--r--sound/soc/atmel/sam9g20_wm8731.c1
-rw-r--r--sound/soc/blackfin/bf5xx-ac97-pcm.c11
-rw-r--r--sound/soc/blackfin/bf5xx-i2s-pcm.c10
-rw-r--r--sound/soc/cirrus/Kconfig2
-rw-r--r--sound/soc/cirrus/ep93xx-pcm.c13
-rw-r--r--sound/soc/codecs/88pm860x-codec.c75
-rw-r--r--sound/soc/codecs/88pm860x-codec.h117
-rw-r--r--sound/soc/codecs/ab8500-codec.c92
-rw-r--r--sound/soc/codecs/adau1373.c298
-rw-r--r--sound/soc/codecs/adav80x.c147
-rw-r--r--sound/soc/codecs/ak4104.c11
-rw-r--r--sound/soc/codecs/ak4641.c2
-rw-r--r--sound/soc/codecs/ak4642.c4
-rw-r--r--sound/soc/codecs/alc5632.c2
-rw-r--r--sound/soc/codecs/arizona.c23
-rw-r--r--sound/soc/codecs/cq93vc.c46
-rw-r--r--sound/soc/codecs/cs4271.c1
-rw-r--r--sound/soc/codecs/cs42l52.c93
-rw-r--r--sound/soc/codecs/cs42l52.h2
-rw-r--r--sound/soc/codecs/cs42l73.c114
-rw-r--r--sound/soc/codecs/cs42l73.h105
-rw-r--r--sound/soc/codecs/max98088.c624
-rw-r--r--sound/soc/codecs/max98095.c466
-rw-r--r--sound/soc/codecs/max9850.c39
-rw-r--r--sound/soc/codecs/mc13783.c137
-rw-r--r--sound/soc/codecs/ml26124.c2
-rw-r--r--sound/soc/codecs/pcm1681.c3
-rw-r--r--sound/soc/codecs/pcm1792a.c3
-rw-r--r--sound/soc/codecs/rt5640.c31
-rw-r--r--sound/soc/codecs/si476x.c64
-rw-r--r--sound/soc/codecs/sn95031.c35
-rw-r--r--sound/soc/codecs/tas5086.c173
-rw-r--r--sound/soc/codecs/tlv320aic23.c84
-rw-r--r--sound/soc/codecs/tlv320aic26.c139
-rw-r--r--sound/soc/codecs/tlv320aic26.h5
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c101
-rw-r--r--sound/soc/codecs/tlv320aic3x.c238
-rw-r--r--sound/soc/codecs/tpa6130a2.c32
-rw-r--r--sound/soc/codecs/twl4030.c80
-rw-r--r--sound/soc/codecs/twl6040.c26
-rw-r--r--sound/soc/codecs/wm0010.c8
-rw-r--r--sound/soc/codecs/wm5110.c12
-rw-r--r--sound/soc/codecs/wm8400.c95
-rw-r--r--sound/soc/codecs/wm8962.c226
-rw-r--r--sound/soc/codecs/wm8996.c2
-rw-r--r--sound/soc/codecs/wm_adsp.c27
-rw-r--r--sound/soc/codecs/wm_hubs.c1
-rw-r--r--sound/soc/davinci/Kconfig18
-rw-r--r--sound/soc/davinci/Makefile1
-rw-r--r--sound/soc/davinci/davinci-evm.c188
-rw-r--r--sound/soc/davinci/davinci-mcasp.c169
-rw-r--r--sound/soc/davinci/davinci-mcasp.h12
-rw-r--r--sound/soc/davinci/davinci-pcm.c9
-rw-r--r--sound/soc/fsl/eukrea-tlv320.c15
-rw-r--r--sound/soc/fsl/fsl_dma.c11
-rw-r--r--sound/soc/fsl/fsl_spdif.c22
-rw-r--r--sound/soc/fsl/fsl_ssi.c22
-rw-r--r--sound/soc/fsl/imx-audmux.c9
-rw-r--r--sound/soc/fsl/imx-mc13783.c3
-rw-r--r--sound/soc/fsl/imx-pcm-dma.c4
-rw-r--r--sound/soc/fsl/imx-pcm-fiq.c12
-rw-r--r--sound/soc/fsl/imx-sgtl5000.c4
-rw-r--r--sound/soc/fsl/imx-spdif.c4
-rw-r--r--sound/soc/fsl/imx-ssi.c26
-rw-r--r--sound/soc/fsl/imx-ssi.h2
-rw-r--r--sound/soc/fsl/imx-wm8962.c6
-rw-r--r--sound/soc/fsl/mpc5200_dma.c12
-rw-r--r--sound/soc/fsl/mpc5200_psc_ac97.c2
-rw-r--r--sound/soc/fsl/mpc8610_hpcd.c1
-rw-r--r--sound/soc/fsl/p1022_ds.c1
-rw-r--r--sound/soc/fsl/p1022_rdk.c1
-rw-r--r--sound/soc/generic/simple-card.c5
-rw-r--r--sound/soc/jz4740/jz4740-pcm.c12
-rw-r--r--sound/soc/kirkwood/kirkwood-dma.c15
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c108
-rw-r--r--sound/soc/kirkwood/kirkwood-openrd.c2
-rw-r--r--sound/soc/kirkwood/kirkwood-t5325.c2
-rw-r--r--sound/soc/kirkwood/kirkwood.h4
-rw-r--r--sound/soc/mid-x86/mfld_machine.c10
-rw-r--r--sound/soc/mxs/mxs-saif.c42
-rw-r--r--sound/soc/mxs/mxs-saif.h5
-rw-r--r--sound/soc/mxs/mxs-sgtl5000.c20
-rw-r--r--sound/soc/nuc900/nuc900-pcm.c9
-rw-r--r--sound/soc/omap/Kconfig4
-rw-r--r--sound/soc/omap/omap-mcpdm.c12
-rw-r--r--sound/soc/omap/omap-pcm.c11
-rw-r--r--sound/soc/omap/omap-twl4030.c5
-rw-r--r--sound/soc/pxa/brownstone.c1
-rw-r--r--sound/soc/pxa/corgi.c1
-rw-r--r--sound/soc/pxa/e740_wm9705.c1
-rw-r--r--sound/soc/pxa/e750_wm9705.c1
-rw-r--r--sound/soc/pxa/e800_wm9712.c1
-rw-r--r--sound/soc/pxa/imote2.c1
-rw-r--r--sound/soc/pxa/mioa701_wm9713.c1
-rw-r--r--sound/soc/pxa/mmp-sspa.c5
-rw-r--r--sound/soc/pxa/palm27x.c1
-rw-r--r--sound/soc/pxa/poodle.c1
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c56
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c11
-rw-r--r--sound/soc/pxa/tosa.c1
-rw-r--r--sound/soc/pxa/ttc-dkb.c1
-rw-r--r--sound/soc/s6000/s6000-pcm.c9
-rw-r--r--sound/soc/samsung/Kconfig2
-rw-r--r--sound/soc/samsung/bells.c1
-rw-r--r--sound/soc/samsung/dma.c11
-rw-r--r--sound/soc/samsung/i2s.c25
-rw-r--r--sound/soc/samsung/idma.c11
-rw-r--r--sound/soc/samsung/s3c-i2s-v2.c6
-rw-r--r--sound/soc/samsung/smdk_wm8994.c14
-rw-r--r--sound/soc/sh/rcar/adg.c11
-rw-r--r--sound/soc/sh/rcar/core.c81
-rw-r--r--sound/soc/sh/rcar/gen.c261
-rw-r--r--sound/soc/sh/rcar/rsnd.h9
-rw-r--r--sound/soc/sh/rcar/scu.c12
-rw-r--r--sound/soc/sh/rcar/ssi.c52
-rw-r--r--sound/soc/soc-cache.c263
-rw-r--r--sound/soc/soc-core.c343
-rw-r--r--sound/soc/soc-dapm.c124
-rw-r--r--sound/soc/soc-devres.c86
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c103
-rw-r--r--sound/soc/soc-io.c26
-rw-r--r--sound/soc/soc-jack.c7
-rw-r--r--sound/soc/soc-pcm.c60
-rw-r--r--sound/soc/soc-utils.c6
-rw-r--r--sound/soc/spear/spdif_in.c12
-rw-r--r--sound/soc/spear/spdif_out.c14
-rw-r--r--sound/soc/tegra/tegra20_i2s.c6
-rw-r--r--sound/soc/tegra/tegra20_spdif.c6
-rw-r--r--sound/soc/tegra/tegra30_ahub.c119
-rw-r--r--sound/soc/tegra/tegra30_ahub.h38
-rw-r--r--sound/soc/tegra/tegra30_i2s.c55
-rw-r--r--sound/soc/tegra/tegra30_i2s.h7
-rw-r--r--sound/soc/tegra/tegra_asoc_utils.c2
-rw-r--r--sound/soc/tegra/tegra_asoc_utils.h1
-rw-r--r--sound/soc/tegra/tegra_pcm.c1
-rw-r--r--sound/usb/6fire/chip.c2
-rw-r--r--sound/usb/caiaq/control.c92
-rw-r--r--sound/usb/caiaq/device.c25
-rw-r--r--sound/usb/caiaq/device.h5
-rw-r--r--sound/usb/card.c22
-rw-r--r--sound/usb/card.h12
-rw-r--r--sound/usb/endpoint.c139
-rw-r--r--sound/usb/endpoint.h4
-rw-r--r--sound/usb/helper.c1
-rw-r--r--sound/usb/mixer.c4
-rw-r--r--sound/usb/pcm.c40
-rw-r--r--sound/usb/usbaudio.h1
-rw-r--r--tools/lib/traceevent/Makefile18
-rw-r--r--tools/perf/.gitignore1
-rw-r--r--tools/perf/Documentation/Makefile79
-rw-r--r--tools/perf/Documentation/perf-buildid-cache.txt13
-rw-r--r--tools/perf/Documentation/perf-kvm.txt4
-rw-r--r--tools/perf/Documentation/perf-lock.txt2
-rw-r--r--tools/perf/Documentation/perf-record.txt25
-rw-r--r--tools/perf/Documentation/perf-report.txt16
-rw-r--r--tools/perf/Documentation/perf-stat.txt5
-rw-r--r--tools/perf/Documentation/perf-timechart.txt15
-rw-r--r--tools/perf/Documentation/perf-top.txt33
-rw-r--r--tools/perf/Documentation/perf-trace.txt31
-rw-r--r--tools/perf/Makefile848
-rw-r--r--tools/perf/Makefile.perf889
-rw-r--r--tools/perf/arch/arm/Makefile3
-rw-r--r--tools/perf/arch/arm/include/perf_regs.h54
-rw-r--r--tools/perf/arch/arm/util/unwind.c48
-rw-r--r--tools/perf/arch/x86/include/perf_regs.h6
-rw-r--r--tools/perf/arch/x86/util/unwind.c4
-rw-r--r--tools/perf/bash_completion106
-rw-r--r--tools/perf/bench/mem-memcpy-arch.h2
-rw-r--r--tools/perf/bench/mem-memcpy.c2
-rw-r--r--tools/perf/bench/mem-memset-arch.h2
-rw-r--r--tools/perf/bench/mem-memset.c2
-rw-r--r--tools/perf/bench/numa.c38
-rw-r--r--tools/perf/bench/sched-pipe.c115
-rw-r--r--tools/perf/builtin-annotate.c45
-rw-r--r--tools/perf/builtin-bench.c245
-rw-r--r--tools/perf/builtin-buildid-cache.c156
-rw-r--r--tools/perf/builtin-buildid-list.c11
-rw-r--r--tools/perf/builtin-diff.c30
-rw-r--r--tools/perf/builtin-evlist.c7
-rw-r--r--tools/perf/builtin-inject.c36
-rw-r--r--tools/perf/builtin-kmem.c7
-rw-r--r--tools/perf/builtin-kvm.c25
-rw-r--r--tools/perf/builtin-lock.c131
-rw-r--r--tools/perf/builtin-mem.c9
-rw-r--r--tools/perf/builtin-probe.c14
-rw-r--r--tools/perf/builtin-record.c184
-rw-r--r--tools/perf/builtin-report.c82
-rw-r--r--tools/perf/builtin-sched.c50
-rw-r--r--tools/perf/builtin-script.c59
-rw-r--r--tools/perf/builtin-stat.c169
-rw-r--r--tools/perf/builtin-timechart.c10
-rw-r--r--tools/perf/builtin-top.c66
-rw-r--r--tools/perf/builtin-trace.c1212
-rw-r--r--tools/perf/config/Makefile373
-rw-r--r--tools/perf/config/feature-checks/Makefile148
-rw-r--r--tools/perf/config/feature-checks/test-all.c110
-rw-r--r--tools/perf/config/feature-checks/test-backtrace.c13
-rw-r--r--tools/perf/config/feature-checks/test-bionic.c6
-rw-r--r--tools/perf/config/feature-checks/test-cplus-demangle.c14
-rw-r--r--tools/perf/config/feature-checks/test-dwarf.c10
-rw-r--r--tools/perf/config/feature-checks/test-fortify-source.c6
-rw-r--r--tools/perf/config/feature-checks/test-glibc.c8
-rw-r--r--tools/perf/config/feature-checks/test-gtk2-infobar.c11
-rw-r--r--tools/perf/config/feature-checks/test-gtk2.c10
-rw-r--r--tools/perf/config/feature-checks/test-hello.c6
-rw-r--r--tools/perf/config/feature-checks/test-libaudit.c10
-rw-r--r--tools/perf/config/feature-checks/test-libbfd.c15
-rw-r--r--tools/perf/config/feature-checks/test-libelf-getphdrnum.c8
-rw-r--r--tools/perf/config/feature-checks/test-libelf-mmap.c8
-rw-r--r--tools/perf/config/feature-checks/test-libelf.c8
-rw-r--r--tools/perf/config/feature-checks/test-libnuma.c9
-rw-r--r--tools/perf/config/feature-checks/test-libperl.c9
-rw-r--r--tools/perf/config/feature-checks/test-libpython-version.c10
-rw-r--r--tools/perf/config/feature-checks/test-libpython.c8
-rw-r--r--tools/perf/config/feature-checks/test-libslang.c6
-rw-r--r--tools/perf/config/feature-checks/test-libunwind-debug-frame.c16
-rw-r--r--tools/perf/config/feature-checks/test-libunwind.c27
-rw-r--r--tools/perf/config/feature-checks/test-on-exit.c16
-rw-r--r--tools/perf/config/feature-checks/test-stackprotector-all.c6
-rw-r--r--tools/perf/config/feature-checks/test-stackprotector.c6
-rw-r--r--tools/perf/config/feature-checks/test-volatile-register-var.c6
-rw-r--r--tools/perf/config/feature-tests.mak246
-rw-r--r--tools/perf/config/utilities.mak17
-rw-r--r--tools/perf/perf.c14
-rw-r--r--tools/perf/perf.h6
-rw-r--r--tools/perf/tests/code-reading.c1
-rw-r--r--tools/perf/tests/dso-data.c1
-rw-r--r--tools/perf/tests/hists_link.c8
-rw-r--r--tools/perf/tests/keep-tracking.c1
-rw-r--r--tools/perf/tests/mmap-basic.c1
-rw-r--r--tools/perf/tests/open-syscall-tp-fields.c4
-rw-r--r--tools/perf/tests/perf-record.c14
-rw-r--r--tools/perf/tests/perf-time-to-tsc.c4
-rw-r--r--tools/perf/tests/sample-parsing.c4
-rw-r--r--tools/perf/tests/sw-clock.c4
-rw-r--r--tools/perf/tests/task-exit.c20
-rw-r--r--tools/perf/ui/browsers/annotate.c24
-rw-r--r--tools/perf/ui/gtk/annotate.c13
-rw-r--r--tools/perf/ui/gtk/browser.c2
-rw-r--r--tools/perf/ui/gtk/gtk.h22
-rw-r--r--tools/perf/ui/gtk/progress.c20
-rw-r--r--tools/perf/ui/gtk/setup.c2
-rw-r--r--tools/perf/ui/gtk/util.c4
-rw-r--r--tools/perf/ui/progress.c32
-rw-r--r--tools/perf/ui/progress.h19
-rw-r--r--tools/perf/ui/setup.c61
-rw-r--r--tools/perf/ui/stdio/hist.c9
-rw-r--r--tools/perf/ui/tui/progress.c15
-rw-r--r--tools/perf/ui/tui/setup.c3
-rw-r--r--tools/perf/ui/tui/tui.h6
-rw-r--r--tools/perf/ui/ui.h14
-rwxr-xr-xtools/perf/util/PERF-VERSION-GEN2
-rw-r--r--tools/perf/util/annotate.c76
-rw-r--r--tools/perf/util/annotate.h26
-rw-r--r--tools/perf/util/build-id.c6
-rw-r--r--tools/perf/util/cache.h3
-rw-r--r--tools/perf/util/callchain.c147
-rw-r--r--tools/perf/util/callchain.h14
-rw-r--r--tools/perf/util/data.c120
-rw-r--r--tools/perf/util/data.h48
-rw-r--r--tools/perf/util/dso.c50
-rw-r--r--tools/perf/util/dso.h3
-rw-r--r--tools/perf/util/event.c32
-rw-r--r--tools/perf/util/event.h11
-rw-r--r--tools/perf/util/evlist.c258
-rw-r--r--tools/perf/util/evlist.h14
-rw-r--r--tools/perf/util/evsel.c15
-rw-r--r--tools/perf/util/evsel.h6
-rwxr-xr-xtools/perf/util/generate-cmdlist.sh4
-rw-r--r--tools/perf/util/header.c22
-rw-r--r--tools/perf/util/hist.c35
-rw-r--r--tools/perf/util/hist.h39
-rw-r--r--tools/perf/util/include/dwarf-regs.h2
-rw-r--r--tools/perf/util/include/linux/compiler.h19
-rw-r--r--tools/perf/util/intlist.c23
-rw-r--r--tools/perf/util/intlist.h2
-rw-r--r--tools/perf/util/machine.c127
-rw-r--r--tools/perf/util/machine.h8
-rw-r--r--tools/perf/util/map.c50
-rw-r--r--tools/perf/util/map.h7
-rw-r--r--tools/perf/util/parse-events.c4
-rw-r--r--tools/perf/util/parse-events.l63
-rw-r--r--tools/perf/util/path.c10
-rw-r--r--tools/perf/util/perf_regs.h4
-rw-r--r--tools/perf/util/pmu.c16
-rw-r--r--tools/perf/util/pmu.h1
-rw-r--r--tools/perf/util/probe-event.c5
-rw-r--r--tools/perf/util/probe-finder.c135
-rw-r--r--tools/perf/util/probe-finder.h5
-rw-r--r--tools/perf/util/python.c10
-rw-r--r--tools/perf/util/rblist.c27
-rw-r--r--tools/perf/util/rblist.h1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c37
-rw-r--r--tools/perf/util/session.c179
-rw-r--r--tools/perf/util/session.h11
-rw-r--r--tools/perf/util/sort.c302
-rw-r--r--tools/perf/util/sort.h4
-rw-r--r--tools/perf/util/srcline.c265
-rw-r--r--tools/perf/util/strfilter.c46
-rw-r--r--tools/perf/util/symbol-elf.c607
-rw-r--r--tools/perf/util/symbol-minimal.c15
-rw-r--r--tools/perf/util/symbol.c449
-rw-r--r--tools/perf/util/symbol.h29
-rw-r--r--tools/perf/util/thread.c72
-rw-r--r--tools/perf/util/top.h1
-rw-r--r--tools/perf/util/trace-event-parse.c36
-rw-r--r--tools/perf/util/trace-event.h9
-rw-r--r--tools/perf/util/unwind.c75
-rw-r--r--tools/perf/util/unwind.h4
-rw-r--r--tools/perf/util/util.c66
-rw-r--r--tools/perf/util/util.h26
-rw-r--r--tools/power/x86/turbostat/turbostat.c155
-rw-r--r--tools/scripts/Makefile.include23
-rw-r--r--tools/testing/ktest/examples/crosstests.conf6
-rw-r--r--tools/virtio/virtio_test.c6
-rw-r--r--tools/virtio/vringh_test.c13
-rw-r--r--virt/kvm/Kconfig3
-rw-r--r--virt/kvm/async_pf.c22
-rw-r--r--virt/kvm/iommu.c34
-rw-r--r--virt/kvm/kvm_main.c104
-rw-r--r--virt/kvm/vfio.c264
6663 files changed, 270345 insertions, 136597 deletions
diff --git a/CREDITS b/CREDITS
index 0640e1650483..b928516eea90 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3152,6 +3152,11 @@ N: Dipankar Sarma
E: dipankar@in.ibm.com
D: RCU
+N: Yoshinori Sato
+E: ysato@users.sourceforge.jp
+D: uClinux for Renesas H8/300 (H8300)
+D: http://uclinux-h8.sourceforge.jp/
+
N: Hannu Savolainen
E: hannu@opensound.com
D: Maintainer of the sound drivers until 2.1.x days.
diff --git a/Documentation/ABI/stable/sysfs-driver-ib_srp b/Documentation/ABI/stable/sysfs-driver-ib_srp
index 5c53d28f775c..b9688de8455b 100644
--- a/Documentation/ABI/stable/sysfs-driver-ib_srp
+++ b/Documentation/ABI/stable/sysfs-driver-ib_srp
@@ -61,6 +61,12 @@ Description: Interface for making ib_srp connect to a new target.
interrupt is handled by a different CPU then the comp_vector
parameter can be used to spread the SRP completion workload
over multiple CPU's.
+ * tl_retry_count, a number in the range 2..7 specifying the
+ IB RC retry count.
+ * queue_size, the maximum number of commands that the
+ initiator is allowed to queue per SCSI host. The default
+ value for this parameter is 62. The lowest supported value
+ is 2.
What: /sys/class/infiniband_srp/srp-<hca>-<port_number>/ibdev
Date: January 2, 2006
@@ -153,6 +159,13 @@ Contact: linux-rdma@vger.kernel.org
Description: InfiniBand service ID used for establishing communication with
the SRP target.
+What: /sys/class/scsi_host/host<n>/sgid
+Date: February 1, 2014
+KernelVersion: 3.13
+Contact: linux-rdma@vger.kernel.org
+Description: InfiniBand GID of the source port used for communication with
+ the SRP target.
+
What: /sys/class/scsi_host/host<n>/zero_req_lim
Date: September 20, 2006
KernelVersion: 2.6.18
diff --git a/Documentation/ABI/stable/sysfs-transport-srp b/Documentation/ABI/stable/sysfs-transport-srp
index b36fb0dc13c8..ec7af69fea0a 100644
--- a/Documentation/ABI/stable/sysfs-transport-srp
+++ b/Documentation/ABI/stable/sysfs-transport-srp
@@ -5,6 +5,24 @@ Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org
Description: Instructs an SRP initiator to disconnect from a target and to
remove all LUNs imported from that target.
+What: /sys/class/srp_remote_ports/port-<h>:<n>/dev_loss_tmo
+Date: February 1, 2014
+KernelVersion: 3.13
+Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org
+Description: Number of seconds the SCSI layer will wait after a transport
+ layer error has been observed before removing a target port.
+ Zero means immediate removal. Setting this attribute to "off"
+ will disable the dev_loss timer.
+
+What: /sys/class/srp_remote_ports/port-<h>:<n>/fast_io_fail_tmo
+Date: February 1, 2014
+KernelVersion: 3.13
+Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org
+Description: Number of seconds the SCSI layer will wait after a transport
+ layer error has been observed before failing I/O. Zero means
+ failing I/O immediately. Setting this attribute to "off" will
+ disable the fast_io_fail timer.
+
What: /sys/class/srp_remote_ports/port-<h>:<n>/port_id
Date: June 27, 2007
KernelVersion: 2.6.24
@@ -12,8 +30,29 @@ Contact: linux-scsi@vger.kernel.org
Description: 16-byte local SRP port identifier in hexadecimal format. An
example: 4c:49:4e:55:58:20:56:49:4f:00:00:00:00:00:00:00.
+What: /sys/class/srp_remote_ports/port-<h>:<n>/reconnect_delay
+Date: February 1, 2014
+KernelVersion: 3.13
+Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org
+Description: Number of seconds the SCSI layer will wait after a reconnect
+ attempt failed before retrying. Setting this attribute to
+ "off" will disable time-based reconnecting.
+
What: /sys/class/srp_remote_ports/port-<h>:<n>/roles
Date: June 27, 2007
KernelVersion: 2.6.24
Contact: linux-scsi@vger.kernel.org
Description: Role of the remote port. Either "SRP Initiator" or "SRP Target".
+
+What: /sys/class/srp_remote_ports/port-<h>:<n>/state
+Date: February 1, 2014
+KernelVersion: 3.13
+Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org
+Description: State of the transport layer used for communication with the
+ remote port. "running" if the transport layer is operational;
+ "blocked" if a transport layer error has been encountered but
+ the fast_io_fail_tmo timer has not yet fired; "fail-fast"
+ after the fast_io_fail_tmo timer has fired and before the
+ "dev_loss_tmo" timer has fired; "lost" after the
+ "dev_loss_tmo" timer has fired and before the port is finally
+ removed.
diff --git a/Documentation/ABI/testing/sysfs-class-mtd b/Documentation/ABI/testing/sysfs-class-mtd
index bfd119ace6ad..1399bb2da3eb 100644
--- a/Documentation/ABI/testing/sysfs-class-mtd
+++ b/Documentation/ABI/testing/sysfs-class-mtd
@@ -104,7 +104,7 @@ Description:
One of the following ASCII strings, representing the device
type:
- absent, ram, rom, nor, nand, dataflash, ubi, unknown
+ absent, ram, rom, nor, nand, mlc-nand, dataflash, ubi, unknown
What: /sys/class/mtd/mtdX/writesize
Date: April 2009
diff --git a/Documentation/ABI/testing/sysfs-class-net-batman-adv b/Documentation/ABI/testing/sysfs-class-net-batman-adv
index bdc00707c751..7f34a95bb963 100644
--- a/Documentation/ABI/testing/sysfs-class-net-batman-adv
+++ b/Documentation/ABI/testing/sysfs-class-net-batman-adv
@@ -1,13 +1,13 @@
What: /sys/class/net/<iface>/batman-adv/iface_status
Date: May 2010
-Contact: Marek Lindner <lindner_marek@yahoo.de>
+Contact: Marek Lindner <mareklindner@neomailbox.ch>
Description:
Indicates the status of <iface> as it is seen by batman.
What: /sys/class/net/<iface>/batman-adv/mesh_iface
Date: May 2010
-Contact: Marek Lindner <lindner_marek@yahoo.de>
+Contact: Marek Lindner <mareklindner@neomailbox.ch>
Description:
The /sys/class/net/<iface>/batman-adv/mesh_iface file
displays the batman mesh interface this <iface>
diff --git a/Documentation/ABI/testing/sysfs-class-net-mesh b/Documentation/ABI/testing/sysfs-class-net-mesh
index bdcd8b4e38f2..0baa657b18c4 100644
--- a/Documentation/ABI/testing/sysfs-class-net-mesh
+++ b/Documentation/ABI/testing/sysfs-class-net-mesh
@@ -1,22 +1,23 @@
What: /sys/class/net/<mesh_iface>/mesh/aggregated_ogms
Date: May 2010
-Contact: Marek Lindner <lindner_marek@yahoo.de>
+Contact: Marek Lindner <mareklindner@neomailbox.ch>
Description:
Indicates whether the batman protocol messages of the
mesh <mesh_iface> shall be aggregated or not.
-What: /sys/class/net/<mesh_iface>/mesh/ap_isolation
+What: /sys/class/net/<mesh_iface>/mesh/<vlan_subdir>/ap_isolation
Date: May 2011
-Contact: Antonio Quartulli <ordex@autistici.org>
+Contact: Antonio Quartulli <antonio@meshcoding.com>
Description:
Indicates whether the data traffic going from a
wireless client to another wireless client will be
- silently dropped.
+ silently dropped. <vlan_subdir> is empty when referring
+ to the untagged lan.
What: /sys/class/net/<mesh_iface>/mesh/bonding
Date: June 2010
-Contact: Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
+Contact: Simon Wunderlich <sw@simonwunderlich.de>
Description:
Indicates whether the data traffic going through the
mesh will be sent using multiple interfaces at the
@@ -24,7 +25,7 @@ Description:
What: /sys/class/net/<mesh_iface>/mesh/bridge_loop_avoidance
Date: November 2011
-Contact: Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
+Contact: Simon Wunderlich <sw@simonwunderlich.de>
Description:
Indicates whether the bridge loop avoidance feature
is enabled. This feature detects and avoids loops
@@ -41,21 +42,21 @@ Description:
What: /sys/class/net/<mesh_iface>/mesh/gw_bandwidth
Date: October 2010
-Contact: Marek Lindner <lindner_marek@yahoo.de>
+Contact: Marek Lindner <mareklindner@neomailbox.ch>
Description:
Defines the bandwidth which is propagated by this
node if gw_mode was set to 'server'.
What: /sys/class/net/<mesh_iface>/mesh/gw_mode
Date: October 2010
-Contact: Marek Lindner <lindner_marek@yahoo.de>
+Contact: Marek Lindner <mareklindner@neomailbox.ch>
Description:
Defines the state of the gateway features. Can be
either 'off', 'client' or 'server'.
What: /sys/class/net/<mesh_iface>/mesh/gw_sel_class
Date: October 2010
-Contact: Marek Lindner <lindner_marek@yahoo.de>
+Contact: Marek Lindner <mareklindner@neomailbox.ch>
Description:
Defines the selection criteria this node will use
to choose a gateway if gw_mode was set to 'client'.
@@ -77,25 +78,14 @@ Description:
What: /sys/class/net/<mesh_iface>/mesh/orig_interval
Date: May 2010
-Contact: Marek Lindner <lindner_marek@yahoo.de>
+Contact: Marek Lindner <mareklindner@neomailbox.ch>
Description:
Defines the interval in milliseconds in which batman
sends its protocol messages.
What: /sys/class/net/<mesh_iface>/mesh/routing_algo
Date: Dec 2011
-Contact: Marek Lindner <lindner_marek@yahoo.de>
+Contact: Marek Lindner <mareklindner@neomailbox.ch>
Description:
Defines the routing procotol this mesh instance
uses to find the optimal paths through the mesh.
-
-What: /sys/class/net/<mesh_iface>/mesh/vis_mode
-Date: May 2010
-Contact: Marek Lindner <lindner_marek@yahoo.de>
-Description:
- Each batman node only maintains information about its
- own local neighborhood, therefore generating graphs
- showing the topology of the entire mesh is not easily
- feasible without having a central instance to collect
- the local topologies from all nodes. This file allows
- to activate the collecting (server) mode.
diff --git a/Documentation/ABI/testing/sysfs-class-powercap b/Documentation/ABI/testing/sysfs-class-powercap
new file mode 100644
index 000000000000..db3b3ff70d84
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-powercap
@@ -0,0 +1,152 @@
+What: /sys/class/powercap/
+Date: September 2013
+KernelVersion: 3.13
+Contact: linux-pm@vger.kernel.org
+Description:
+ The powercap/ class sub directory belongs to the power cap
+ subsystem. Refer to
+ Documentation/power/powercap/powercap.txt for details.
+
+What: /sys/class/powercap/<control type>
+Date: September 2013
+KernelVersion: 3.13
+Contact: linux-pm@vger.kernel.org
+Description:
+ A <control type> is a unique name under /sys/class/powercap.
+ Here <control type> determines how the power is going to be
+ controlled. A <control type> can contain multiple power zones.
+
+What: /sys/class/powercap/<control type>/enabled
+Date: September 2013
+KernelVersion: 3.13
+Contact: linux-pm@vger.kernel.org
+Description:
+ This allows to enable/disable power capping for a "control type".
+ This status affects every power zone using this "control_type.
+
+What: /sys/class/powercap/<control type>/<power zone>
+Date: September 2013
+KernelVersion: 3.13
+Contact: linux-pm@vger.kernel.org
+Description:
+ A power zone is a single or a collection of devices, which can
+ be independently monitored and controlled. A power zone sysfs
+ entry is qualified with the name of the <control type>.
+ E.g. intel-rapl:0:1:1.
+
+What: /sys/class/powercap/<control type>/<power zone>/<child power zone>
+Date: September 2013
+KernelVersion: 3.13
+Contact: linux-pm@vger.kernel.org
+Description:
+ Power zones may be organized in a hierarchy in which child
+ power zones provide monitoring and control for a subset of
+ devices under the parent. For example, if there is a parent
+ power zone for a whole CPU package, each CPU core in it can
+ be a child power zone.
+
+What: /sys/class/powercap/.../<power zone>/name
+Date: September 2013
+KernelVersion: 3.13
+Contact: linux-pm@vger.kernel.org
+Description:
+ Specifies the name of this power zone.
+
+What: /sys/class/powercap/.../<power zone>/energy_uj
+Date: September 2013
+KernelVersion: 3.13
+Contact: linux-pm@vger.kernel.org
+Description:
+ Current energy counter in micro-joules. Write "0" to reset.
+ If the counter can not be reset, then this attribute is
+ read-only.
+
+What: /sys/class/powercap/.../<power zone>/max_energy_range_uj
+Date: September 2013
+KernelVersion: 3.13
+Contact: linux-pm@vger.kernel.org
+Description:
+ Range of the above energy counter in micro-joules.
+
+
+What: /sys/class/powercap/.../<power zone>/power_uw
+Date: September 2013
+KernelVersion: 3.13
+Contact: linux-pm@vger.kernel.org
+Description:
+ Current power in micro-watts.
+
+What: /sys/class/powercap/.../<power zone>/max_power_range_uw
+Date: September 2013
+KernelVersion: 3.13
+Contact: linux-pm@vger.kernel.org
+Description:
+ Range of the above power value in micro-watts.
+
+What: /sys/class/powercap/.../<power zone>/constraint_X_name
+Date: September 2013
+KernelVersion: 3.13
+Contact: linux-pm@vger.kernel.org
+Description:
+ Each power zone can define one or more constraints. Each
+ constraint can have an optional name. Here "X" can have values
+ from 0 to max integer.
+
+What: /sys/class/powercap/.../<power zone>/constraint_X_power_limit_uw
+Date: September 2013
+KernelVersion: 3.13
+Contact: linux-pm@vger.kernel.org
+Description:
+ Power limit in micro-watts should be applicable for
+ the time window specified by "constraint_X_time_window_us".
+ Here "X" can have values from 0 to max integer.
+
+What: /sys/class/powercap/.../<power zone>/constraint_X_time_window_us
+Date: September 2013
+KernelVersion: 3.13
+Contact: linux-pm@vger.kernel.org
+Description:
+ Time window in micro seconds. This is used along with
+ constraint_X_power_limit_uw to define a power constraint.
+ Here "X" can have values from 0 to max integer.
+
+
+What: /sys/class/powercap/<control type>/.../constraint_X_max_power_uw
+Date: September 2013
+KernelVersion: 3.13
+Contact: linux-pm@vger.kernel.org
+Description:
+ Maximum allowed power in micro watts for this constraint.
+ Here "X" can have values from 0 to max integer.
+
+What: /sys/class/powercap/<control type>/.../constraint_X_min_power_uw
+Date: September 2013
+KernelVersion: 3.13
+Contact: linux-pm@vger.kernel.org
+Description:
+ Minimum allowed power in micro watts for this constraint.
+ Here "X" can have values from 0 to max integer.
+
+What: /sys/class/powercap/.../<power zone>/constraint_X_max_time_window_us
+Date: September 2013
+KernelVersion: 3.13
+Contact: linux-pm@vger.kernel.org
+Description:
+ Maximum allowed time window in micro seconds for this
+ constraint. Here "X" can have values from 0 to max integer.
+
+What: /sys/class/powercap/.../<power zone>/constraint_X_min_time_window_us
+Date: September 2013
+KernelVersion: 3.13
+Contact: linux-pm@vger.kernel.org
+Description:
+ Minimum allowed time window in micro seconds for this
+ constraint. Here "X" can have values from 0 to max integer.
+
+What: /sys/class/powercap/.../<power zone>/enabled
+Date: September 2013
+KernelVersion: 3.13
+Contact: linux-pm@vger.kernel.org
+Description
+ This allows to enable/disable power capping at power zone level.
+ This applies to current power zone and its children.
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-ryos b/Documentation/ABI/testing/sysfs-driver-hid-roccat-ryos
new file mode 100644
index 000000000000..1d6a8cf9dc0a
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-ryos
@@ -0,0 +1,178 @@
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/control
+Date: October 2013
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one select which data from which
+ profile will be read next. The data has to be 3 bytes long.
+ This file is writeonly.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/profile
+Date: October 2013
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The mouse can store 5 profiles which can be switched by the
+ press of a button. profile holds index of actual profile.
+ This value is persistent, so its value determines the profile
+ that's active when the device is powered on next time.
+ When written, the device activates the set profile immediately.
+ The data has to be 3 bytes long.
+ The device will reject invalid data.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/keys_primary
+Date: October 2013
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one set the default of all keys for
+ a specific profile. Profile index is included in written data.
+ The data has to be 125 bytes long.
+ Before reading this file, control has to be written to select
+ which profile to read.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/keys_function
+Date: October 2013
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one set the function of the
+ function keys for a specific profile. Profile index is included
+ in written data. The data has to be 95 bytes long.
+ Before reading this file, control has to be written to select
+ which profile to read.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/keys_macro
+Date: October 2013
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one set the function of the macro
+ keys for a specific profile. Profile index is included in
+ written data. The data has to be 35 bytes long.
+ Before reading this file, control has to be written to select
+ which profile to read.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/keys_thumbster
+Date: October 2013
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one set the function of the
+ thumbster keys for a specific profile. Profile index is included
+ in written data. The data has to be 23 bytes long.
+ Before reading this file, control has to be written to select
+ which profile to read.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/keys_extra
+Date: October 2013
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one set the function of the
+ capslock and function keys for a specific profile. Profile index
+ is included in written data. The data has to be 8 bytes long.
+ Before reading this file, control has to be written to select
+ which profile to read.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/keys_easyzone
+Date: October 2013
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one set the function of the
+ easyzone keys for a specific profile. Profile index is included
+ in written data. The data has to be 294 bytes long.
+ Before reading this file, control has to be written to select
+ which profile to read.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/key_mask
+Date: October 2013
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one deactivate certain keys like
+ windows and application keys, to prevent accidental presses.
+ Profile index for which this settings occur is included in
+ written data. The data has to be 6 bytes long.
+ Before reading this file, control has to be written to select
+ which profile to read.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/light
+Date: October 2013
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one set the backlight intensity for
+ a specific profile. Profile index is included in written data.
+ This attribute is only valid for the glow and pro variant.
+ The data has to be 16 bytes long.
+ Before reading this file, control has to be written to select
+ which profile to read.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/macro
+Date: October 2013
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one store macros with max 480
+ keystrokes for a specific button for a specific profile.
+ Button and profile indexes are included in written data.
+ The data has to be 2002 bytes long.
+ Before reading this file, control has to be written to select
+ which profile and key to read.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/info
+Date: October 2013
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When read, this file returns general data like firmware version.
+ The data is 8 bytes long.
+ This file is readonly.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/reset
+Date: October 2013
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one reset the device.
+ The data has to be 3 bytes long.
+ This file is writeonly.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/talk
+Date: October 2013
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one trigger easyshift functionality
+ from the host.
+ The data has to be 16 bytes long.
+ This file is writeonly.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/light_control
+Date: October 2013
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one switch between stored and custom
+ light settings.
+ This attribute is only valid for the pro variant.
+ The data has to be 8 bytes long.
+ This file is writeonly.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/stored_lights
+Date: October 2013
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one set per-key lighting for different
+ layers.
+ This attribute is only valid for the pro variant.
+ The data has to be 1382 bytes long.
+ Before reading this file, control has to be written to select
+ which profile to read.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/custom_lights
+Date: October 2013
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one set the actual per-key lighting.
+ This attribute is only valid for the pro variant.
+ The data has to be 20 bytes long.
+ This file is writeonly.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/ryos/roccatryos<minor>/light_macro
+Date: October 2013
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one set a light macro that is looped
+ whenever the device gets in dimness mode.
+ This attribute is only valid for the pro variant.
+ The data has to be 2002 bytes long.
+ Before reading this file, control has to be written to select
+ which profile to read.
+Users: http://roccat.sourceforge.net
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-wiimote b/Documentation/ABI/testing/sysfs-driver-hid-wiimote
index ed5dd567d397..39dfa5cb1cc5 100644
--- a/Documentation/ABI/testing/sysfs-driver-hid-wiimote
+++ b/Documentation/ABI/testing/sysfs-driver-hid-wiimote
@@ -57,3 +57,21 @@ Description: This attribute is only provided if the device was detected as a
Calibration data is already applied by the kernel to all input
values but may be used by user-space to perform other
transformations.
+
+What: /sys/bus/hid/drivers/wiimote/<dev>/pro_calib
+Date: October 2013
+KernelVersion: 3.13
+Contact: David Herrmann <dh.herrmann@gmail.com>
+Description: This attribute is only provided if the device was detected as a
+ pro-controller. It provides a single line with 4 calibration
+ values for all 4 analog sticks. Format is: "x1:y1 x2:y2". Data
+ is prefixed with a +/-. Each value is a signed 16bit number.
+ Data is encoded as decimal numbers and specifies the offsets of
+ the analog sticks of the pro-controller.
+ Calibration data is already applied by the kernel to all input
+ values but may be used by user-space to perform other
+ transformations.
+ Calibration data is detected by the kernel during device setup.
+ You can write "scan\n" into this file to re-trigger calibration.
+ You can also write data directly in the form "x1:y1 x2:y2" to
+ set the calibration values manually.
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index 14129f149a75..5e983031cc11 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -101,14 +101,23 @@ style to do this even if your device holds the default setting,
because this shows that you did think about these issues wrt. your
device.
-The query is performed via a call to dma_set_mask():
+The query is performed via a call to dma_set_mask_and_coherent():
- int dma_set_mask(struct device *dev, u64 mask);
+ int dma_set_mask_and_coherent(struct device *dev, u64 mask);
-The query for consistent allocations is performed via a call to
-dma_set_coherent_mask():
+which will query the mask for both streaming and coherent APIs together.
+If you have some special requirements, then the following two separate
+queries can be used instead:
- int dma_set_coherent_mask(struct device *dev, u64 mask);
+ The query for streaming mappings is performed via a call to
+ dma_set_mask():
+
+ int dma_set_mask(struct device *dev, u64 mask);
+
+ The query for consistent allocations is performed via a call
+ to dma_set_coherent_mask():
+
+ int dma_set_coherent_mask(struct device *dev, u64 mask);
Here, dev is a pointer to the device struct of your device, and mask
is a bit mask describing which bits of an address your device
@@ -137,7 +146,7 @@ exactly why.
The standard 32-bit addressing device would do something like this:
- if (dma_set_mask(dev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING
"mydev: No suitable DMA available.\n");
goto ignore_this_device;
@@ -171,22 +180,20 @@ the case would look like this:
int using_dac, consistent_using_dac;
- if (!dma_set_mask(dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
using_dac = 1;
consistent_using_dac = 1;
- dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
- } else if (!dma_set_mask(dev, DMA_BIT_MASK(32))) {
+ } else if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
using_dac = 0;
consistent_using_dac = 0;
- dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
} else {
printk(KERN_WARNING
"mydev: No suitable DMA available.\n");
goto ignore_this_device;
}
-dma_set_coherent_mask() will always be able to set the same or a
-smaller mask as dma_set_mask(). However for the rare case that a
+The coherent coherent mask will always be able to set the same or a
+smaller mask as the streaming mask. However for the rare case that a
device driver only uses consistent allocations, one would have to
check the return value from dma_set_coherent_mask().
@@ -199,9 +206,9 @@ address you might do something like:
goto ignore_this_device;
}
-When dma_set_mask() is successful, and returns zero, the kernel saves
-away this mask you have provided. The kernel will use this
-information later when you make DMA mappings.
+When dma_set_mask() or dma_set_mask_and_coherent() is successful, and
+returns zero, the kernel saves away this mask you have provided. The
+kernel will use this information later when you make DMA mappings.
There is a case which we are aware of at this time, which is worth
mentioning in this documentation. If your device supports multiple
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index 78a6c569d204..e865279cec58 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -142,6 +142,14 @@ internal API for use by the platform than an external API for use by
driver writers.
int
+dma_set_mask_and_coherent(struct device *dev, u64 mask)
+
+Checks to see if the mask is possible and updates the device
+streaming and coherent DMA mask parameters if it is.
+
+Returns: 0 if successful and a negative error if not.
+
+int
dma_set_mask(struct device *dev, u64 mask)
Checks to see if the mask is possible and updates the device
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index e59480db9ee0..cc2450d80310 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -13,7 +13,7 @@ all pending DMA writes to complete, and thus provides a mechanism to
strictly order DMA from a device across all intervening busses and
bridges. This barrier is not specific to a particular type of
interconnect, it applies to the system as a whole, and so its
-implementation must account for the idiosyncracies of the system all
+implementation must account for the idiosyncrasies of the system all
the way from the DMA device to memory.
As an example of a situation where DMA_ATTR_WRITE_BARRIER would be
@@ -60,7 +60,7 @@ such mapping is non-trivial task and consumes very limited resources
Buffers allocated with this attribute can be only passed to user space
by calling dma_mmap_attrs(). By using this API, you are guaranteeing
that you won't dereference the pointer returned by dma_alloc_attr(). You
-can threat it as a cookie that must be passed to dma_mmap_attrs() and
+can treat it as a cookie that must be passed to dma_mmap_attrs() and
dma_free_attrs(). Make sure that both of these also get this attribute
set on each call.
@@ -82,7 +82,7 @@ to 'device' domain, what synchronizes CPU caches for the given region
(usually it means that the cache has been flushed or invalidated
depending on the dma direction). However, next calls to
dma_map_{single,page,sg}() for other devices will perform exactly the
-same sychronization operation on the CPU cache. CPU cache sychronization
+same synchronization operation on the CPU cache. CPU cache synchronization
might be a time consuming operation, especially if the buffers are
large, so it is highly recommended to avoid it if possible.
DMA_ATTR_SKIP_CPU_SYNC allows platform code to skip synchronization of
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index fe397f90a34f..6c9d9d37c83a 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -87,7 +87,10 @@ X!Iinclude/linux/kobject.h
!Ekernel/printk/printk.c
!Ekernel/panic.c
!Ekernel/sys.c
-!Ekernel/rcupdate.c
+!Ekernel/rcu/srcu.c
+!Ekernel/rcu/tree.c
+!Ekernel/rcu/tree_plugin.h
+!Ekernel/rcu/update.c
</sect1>
<sect1><title>Device Resource Management</title>
diff --git a/Documentation/DocBook/filesystems.tmpl b/Documentation/DocBook/filesystems.tmpl
index 25b58efd955d..4f676838da06 100644
--- a/Documentation/DocBook/filesystems.tmpl
+++ b/Documentation/DocBook/filesystems.tmpl
@@ -91,7 +91,6 @@
<title>The Filesystem for Exporting Kernel Objects</title>
!Efs/sysfs/file.c
!Efs/sysfs/symlink.c
-!Efs/sysfs/bin.c
</chapter>
<chapter id="debugfs">
diff --git a/Documentation/DocBook/genericirq.tmpl b/Documentation/DocBook/genericirq.tmpl
index d16d21b7a3b7..46347f603353 100644
--- a/Documentation/DocBook/genericirq.tmpl
+++ b/Documentation/DocBook/genericirq.tmpl
@@ -87,7 +87,7 @@
<chapter id="rationale">
<title>Rationale</title>
<para>
- The original implementation of interrupt handling in Linux is using
+ The original implementation of interrupt handling in Linux uses
the __do_IRQ() super-handler, which is able to deal with every
type of interrupt logic.
</para>
@@ -111,19 +111,19 @@
</itemizedlist>
</para>
<para>
- This split implementation of highlevel IRQ handlers allows us to
+ This split implementation of high-level IRQ handlers allows us to
optimize the flow of the interrupt handling for each specific
- interrupt type. This reduces complexity in that particular codepath
+ interrupt type. This reduces complexity in that particular code path
and allows the optimized handling of a given type.
</para>
<para>
The original general IRQ implementation used hw_interrupt_type
structures and their ->ack(), ->end() [etc.] callbacks to
differentiate the flow control in the super-handler. This leads to
- a mix of flow logic and lowlevel hardware logic, and it also leads
- to unnecessary code duplication: for example in i386, there is a
- ioapic_level_irq and a ioapic_edge_irq irq-type which share many
- of the lowlevel details but have different flow handling.
+ a mix of flow logic and low-level hardware logic, and it also leads
+ to unnecessary code duplication: for example in i386, there is an
+ ioapic_level_irq and an ioapic_edge_irq IRQ-type which share many
+ of the low-level details but have different flow handling.
</para>
<para>
A more natural abstraction is the clean separation of the
@@ -132,23 +132,23 @@
<para>
Analysing a couple of architecture's IRQ subsystem implementations
reveals that most of them can use a generic set of 'irq flow'
- methods and only need to add the chip level specific code.
+ methods and only need to add the chip-level specific code.
The separation is also valuable for (sub)architectures
- which need specific quirks in the irq flow itself but not in the
- chip-details - and thus provides a more transparent IRQ subsystem
+ which need specific quirks in the IRQ flow itself but not in the
+ chip details - and thus provides a more transparent IRQ subsystem
design.
</para>
<para>
- Each interrupt descriptor is assigned its own highlevel flow
+ Each interrupt descriptor is assigned its own high-level flow
handler, which is normally one of the generic
- implementations. (This highlevel flow handler implementation also
+ implementations. (This high-level flow handler implementation also
makes it simple to provide demultiplexing handlers which can be
found in embedded platforms on various architectures.)
</para>
<para>
The separation makes the generic interrupt handling layer more
flexible and extensible. For example, an (sub)architecture can
- use a generic irq-flow implementation for 'level type' interrupts
+ use a generic IRQ-flow implementation for 'level type' interrupts
and add a (sub)architecture specific 'edge type' implementation.
</para>
<para>
@@ -172,9 +172,9 @@
<para>
There are three main levels of abstraction in the interrupt code:
<orderedlist>
- <listitem><para>Highlevel driver API</para></listitem>
- <listitem><para>Highlevel IRQ flow handlers</para></listitem>
- <listitem><para>Chiplevel hardware encapsulation</para></listitem>
+ <listitem><para>High-level driver API</para></listitem>
+ <listitem><para>High-level IRQ flow handlers</para></listitem>
+ <listitem><para>Chip-level hardware encapsulation</para></listitem>
</orderedlist>
</para>
<sect1 id="Interrupt_control_flow">
@@ -189,16 +189,16 @@
which are assigned to this interrupt.
</para>
<para>
- Whenever an interrupt triggers, the lowlevel arch code calls into
- the generic interrupt code by calling desc->handle_irq().
- This highlevel IRQ handling function only uses desc->irq_data.chip
+ Whenever an interrupt triggers, the low-level architecture code calls
+ into the generic interrupt code by calling desc->handle_irq().
+ This high-level IRQ handling function only uses desc->irq_data.chip
primitives referenced by the assigned chip descriptor structure.
</para>
</sect1>
<sect1 id="Highlevel_Driver_API">
- <title>Highlevel Driver API</title>
+ <title>High-level Driver API</title>
<para>
- The highlevel Driver API consists of following functions:
+ The high-level Driver API consists of following functions:
<itemizedlist>
<listitem><para>request_irq()</para></listitem>
<listitem><para>free_irq()</para></listitem>
@@ -216,7 +216,7 @@
</para>
</sect1>
<sect1 id="Highlevel_IRQ_flow_handlers">
- <title>Highlevel IRQ flow handlers</title>
+ <title>High-level IRQ flow handlers</title>
<para>
The generic layer provides a set of pre-defined irq-flow methods:
<itemizedlist>
@@ -228,7 +228,7 @@
<listitem><para>handle_edge_eoi_irq</para></listitem>
<listitem><para>handle_bad_irq</para></listitem>
</itemizedlist>
- The interrupt flow handlers (either predefined or architecture
+ The interrupt flow handlers (either pre-defined or architecture
specific) are assigned to specific interrupts by the architecture
either during bootup or during device initialization.
</para>
@@ -297,7 +297,7 @@ desc->irq_data.chip->irq_unmask();
<para>
handle_fasteoi_irq provides a generic implementation
for interrupts, which only need an EOI at the end of
- the handler
+ the handler.
</para>
<para>
The following control flow is implemented (simplified excerpt):
@@ -394,7 +394,7 @@ if (desc->irq_data.chip->irq_eoi)
The generic functions are intended for 'clean' architectures and chips,
which have no platform-specific IRQ handling quirks. If an architecture
needs to implement quirks on the 'flow' level then it can do so by
- overriding the highlevel irq-flow handler.
+ overriding the high-level irq-flow handler.
</para>
</sect2>
<sect2 id="Delayed_interrupt_disable">
@@ -419,9 +419,9 @@ if (desc->irq_data.chip->irq_eoi)
</sect2>
</sect1>
<sect1 id="Chiplevel_hardware_encapsulation">
- <title>Chiplevel hardware encapsulation</title>
+ <title>Chip-level hardware encapsulation</title>
<para>
- The chip level hardware descriptor structure irq_chip
+ The chip-level hardware descriptor structure irq_chip
contains all the direct chip relevant functions, which
can be utilized by the irq flow implementations.
<itemizedlist>
@@ -429,14 +429,14 @@ if (desc->irq_data.chip->irq_eoi)
<listitem><para>irq_mask_ack() - Optional, recommended for performance</para></listitem>
<listitem><para>irq_mask()</para></listitem>
<listitem><para>irq_unmask()</para></listitem>
- <listitem><para>irq_eoi() - Optional, required for eoi flow handlers</para></listitem>
+ <listitem><para>irq_eoi() - Optional, required for EOI flow handlers</para></listitem>
<listitem><para>irq_retrigger() - Optional</para></listitem>
<listitem><para>irq_set_type() - Optional</para></listitem>
<listitem><para>irq_set_wake() - Optional</para></listitem>
</itemizedlist>
These primitives are strictly intended to mean what they say: ack means
ACK, masking means masking of an IRQ line, etc. It is up to the flow
- handler(s) to use these basic units of lowlevel functionality.
+ handler(s) to use these basic units of low-level functionality.
</para>
</sect1>
</chapter>
@@ -445,7 +445,7 @@ if (desc->irq_data.chip->irq_eoi)
<title>__do_IRQ entry point</title>
<para>
The original implementation __do_IRQ() was an alternative entry
- point for all types of interrupts. It not longer exists.
+ point for all types of interrupts. It no longer exists.
</para>
<para>
This handler turned out to be not suitable for all
@@ -468,11 +468,11 @@ if (desc->irq_data.chip->irq_eoi)
<chapter id="genericchip">
<title>Generic interrupt chip</title>
<para>
- To avoid copies of identical implementations of irq chips the
+ To avoid copies of identical implementations of IRQ chips the
core provides a configurable generic interrupt chip
implementation. Developers should check carefuly whether the
generic chip fits their needs before implementing the same
- functionality slightly different themself.
+ functionality slightly differently themselves.
</para>
!Ekernel/irq/generic-chip.c
</chapter>
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
index a248f42a121e..cd11926e07c7 100644
--- a/Documentation/DocBook/mtdnand.tmpl
+++ b/Documentation/DocBook/mtdnand.tmpl
@@ -1222,8 +1222,6 @@ in this page</entry>
#define NAND_BBT_VERSION 0x00000100
/* Create a bbt if none axists */
#define NAND_BBT_CREATE 0x00000200
-/* Search good / bad pattern through all pages of a block */
-#define NAND_BBT_SCANALLPAGES 0x00000400
/* Write bbt if neccecary */
#define NAND_BBT_WRITE 0x00001000
/* Read and write back block contents when writing bbt */
diff --git a/Documentation/PCI/pci.txt b/Documentation/PCI/pci.txt
index bccf602a87f5..6f458564d625 100644
--- a/Documentation/PCI/pci.txt
+++ b/Documentation/PCI/pci.txt
@@ -525,8 +525,9 @@ corresponding register block for you.
6. Other interesting functions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-pci_find_slot() Find pci_dev corresponding to given bus and
- slot numbers.
+pci_get_domain_bus_and_slot() Find pci_dev corresponding to given domain,
+ bus and slot and number. If the device is
+ found, its reference count is increased.
pci_set_power_state() Set PCI Power Management state (0=D0 ... 3=D3)
pci_find_capability() Find specified capability in device's capability
list.
@@ -582,7 +583,8 @@ having sane locking.
pci_find_device() Superseded by pci_get_device()
pci_find_subsys() Superseded by pci_get_subsys()
-pci_find_slot() Superseded by pci_get_slot()
+pci_find_slot() Superseded by pci_get_domain_bus_and_slot()
+pci_get_slot() Superseded by pci_get_domain_bus_and_slot()
The alternative is the traditional PCI device driver that walks PCI
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index 7703ec73a9bb..91266193b8f4 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -202,8 +202,8 @@ over a rather long period of time, but improvements are always welcome!
updater uses call_rcu_sched() or synchronize_sched(), then
the corresponding readers must disable preemption, possibly
by calling rcu_read_lock_sched() and rcu_read_unlock_sched().
- If the updater uses synchronize_srcu() or call_srcu(),
- the the corresponding readers must use srcu_read_lock() and
+ If the updater uses synchronize_srcu() or call_srcu(), then
+ the corresponding readers must use srcu_read_lock() and
srcu_read_unlock(), and with the same srcu_struct. The rules for
the expedited primitives are the same as for their non-expedited
counterparts. Mixing things up will result in confusion and
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index 8e9359de1d28..6f3a0057548e 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -12,12 +12,12 @@ CONFIG_RCU_CPU_STALL_TIMEOUT
This kernel configuration parameter defines the period of time
that RCU will wait from the beginning of a grace period until it
issues an RCU CPU stall warning. This time period is normally
- sixty seconds.
+ 21 seconds.
This configuration parameter may be changed at runtime via the
/sys/module/rcutree/parameters/rcu_cpu_stall_timeout, however
this parameter is checked only at the beginning of a cycle.
- So if you are 30 seconds into a 70-second stall, setting this
+ So if you are 10 seconds into a 40-second stall, setting this
sysfs parameter to (say) five will shorten the timeout for the
-next- stall, or the following warning for the current stall
(assuming the stall lasts long enough). It will not affect the
@@ -32,7 +32,7 @@ CONFIG_RCU_CPU_STALL_VERBOSE
also dump the stacks of any tasks that are blocking the current
RCU-preempt grace period.
-RCU_CPU_STALL_INFO
+CONFIG_RCU_CPU_STALL_INFO
This kernel configuration parameter causes the stall warning to
print out additional per-CPU diagnostic information, including
@@ -43,7 +43,8 @@ RCU_STALL_DELAY_DELTA
Although the lockdep facility is extremely useful, it does add
some overhead. Therefore, under CONFIG_PROVE_RCU, the
RCU_STALL_DELAY_DELTA macro allows five extra seconds before
- giving an RCU CPU stall warning message.
+ giving an RCU CPU stall warning message. (This is a cpp
+ macro, not a kernel configuration parameter.)
RCU_STALL_RAT_DELAY
@@ -52,7 +53,8 @@ RCU_STALL_RAT_DELAY
However, if the offending CPU does not detect its own stall in
the number of jiffies specified by RCU_STALL_RAT_DELAY, then
some other CPU will complain. This delay is normally set to
- two jiffies.
+ two jiffies. (This is a cpp macro, not a kernel configuration
+ parameter.)
When a CPU detects that it is stalling, it will print a message similar
to the following:
@@ -86,7 +88,12 @@ printing, there will be a spurious stall-warning message:
INFO: rcu_bh_state detected stalls on CPUs/tasks: { } (detected by 4, 2502 jiffies)
-This is rare, but does happen from time to time in real life.
+This is rare, but does happen from time to time in real life. It is also
+possible for a zero-jiffy stall to be flagged in this case, depending
+on how the stall warning and the grace-period initialization happen to
+interact. Please note that it is not possible to entirely eliminate this
+sort of false positive without resorting to things like stop_machine(),
+which is overkill for this sort of problem.
If the CONFIG_RCU_CPU_STALL_INFO kernel configuration parameter is set,
more information is printed with the stall-warning message, for example:
@@ -216,4 +223,5 @@ that portion of the stack which remains the same from trace to trace.
If you can reliably trigger the stall, ftrace can be quite helpful.
RCU bugs can often be debugged with the help of CONFIG_RCU_TRACE
-and with RCU's event tracing.
+and with RCU's event tracing. For information on RCU's event tracing,
+see include/trace/events/rcu.h.
diff --git a/Documentation/arm/Marvell/README b/Documentation/arm/Marvell/README
index 8f08a86e03b7..da0151db9964 100644
--- a/Documentation/arm/Marvell/README
+++ b/Documentation/arm/Marvell/README
@@ -88,6 +88,7 @@ EBU Armada family
MV78230
MV78260
MV78460
+ NOTE: not to be confused with the non-SMP 78xx0 SoCs
Product Brief: http://www.marvell.com/embedded-processors/armada-xp/assets/Marvell-ArmadaXP-SoC-product%20brief.pdf
No public datasheet available.
diff --git a/Documentation/arm/sunxi/README b/Documentation/arm/sunxi/README
index e3f93fb9224e..7945238453ed 100644
--- a/Documentation/arm/sunxi/README
+++ b/Documentation/arm/sunxi/README
@@ -10,6 +10,10 @@ SunXi family
Linux kernel mach directory: arch/arm/mach-sunxi
Flavors:
+ * ARM926 based SoCs
+ - Allwinner F20 (sun3i)
+ + Not Supported
+
* ARM Cortex-A8 based SoCs
- Allwinner A10 (sun4i)
+ Datasheet
@@ -25,4 +29,24 @@ SunXi family
+ Datasheet
http://dl.linux-sunxi.org/A13/A13%20Datasheet%20-%20v1.12%20%282012-03-29%29.pdf
+ User Manual
- http://dl.linux-sunxi.org/A13/A13%20User%20Manual%20-%20v1.2%20%282013-08-08%29.pdf
+ http://dl.linux-sunxi.org/A13/A13%20User%20Manual%20-%20v1.2%20%282013-01-08%29.pdf
+
+ * Dual ARM Cortex-A7 based SoCs
+ - Allwinner A20 (sun7i)
+ + User Manual
+ http://dl.linux-sunxi.org/A20/A20%20User%20Manual%202013-03-22.pdf
+
+ - Allwinner A23
+ + Not Supported
+
+ * Quad ARM Cortex-A7 based SoCs
+ - Allwinner A31 (sun6i)
+ + Datasheet
+ http://dl.linux-sunxi.org/A31/A31%20Datasheet%20-%20v1.00%20(2012-12-24).pdf
+
+ - Allwinner A31s (sun6i)
+ + Not Supported
+
+ * Quad ARM Cortex-A15, Quad ARM Cortex-A7 based SoCs
+ - Allwinner A80
+ + Not Supported \ No newline at end of file
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
index 98df4a03807e..a9691cc48fe3 100644
--- a/Documentation/arm64/booting.txt
+++ b/Documentation/arm64/booting.txt
@@ -115,9 +115,10 @@ Before jumping into the kernel, the following conditions must be met:
External caches (if present) must be configured and disabled.
- Architected timers
- CNTFRQ must be programmed with the timer frequency.
- If entering the kernel at EL1, CNTHCTL_EL2 must have EL1PCTEN (bit 0)
- set where available.
+ CNTFRQ must be programmed with the timer frequency and CNTVOFF must
+ be programmed with a consistent value on all CPUs. If entering the
+ kernel at EL1, CNTHCTL_EL2 must have EL1PCTEN (bit 0) set where
+ available.
- Coherency
All CPUs to be booted by the kernel must be part of the same coherency
@@ -130,30 +131,46 @@ Before jumping into the kernel, the following conditions must be met:
the kernel image will be entered must be initialised by software at a
higher exception level to prevent execution in an UNKNOWN state.
+The requirements described above for CPU mode, caches, MMUs, architected
+timers, coherency and system registers apply to all CPUs. All CPUs must
+enter the kernel in the same exception level.
+
The boot loader is expected to enter the kernel on each CPU in the
following manner:
- The primary CPU must jump directly to the first instruction of the
kernel image. The device tree blob passed by this CPU must contain
- for each CPU node:
-
- 1. An 'enable-method' property. Currently, the only supported value
- for this field is the string "spin-table".
-
- 2. A 'cpu-release-addr' property identifying a 64-bit,
- zero-initialised memory location.
+ an 'enable-method' property for each cpu node. The supported
+ enable-methods are described below.
It is expected that the bootloader will generate these device tree
properties and insert them into the blob prior to kernel entry.
-- Any secondary CPUs must spin outside of the kernel in a reserved area
- of memory (communicated to the kernel by a /memreserve/ region in the
+- CPUs with a "spin-table" enable-method must have a 'cpu-release-addr'
+ property in their cpu node. This property identifies a
+ naturally-aligned 64-bit zero-initalised memory location.
+
+ These CPUs should spin outside of the kernel in a reserved area of
+ memory (communicated to the kernel by a /memreserve/ region in the
device tree) polling their cpu-release-addr location, which must be
contained in the reserved region. A wfe instruction may be inserted
to reduce the overhead of the busy-loop and a sev will be issued by
the primary CPU. When a read of the location pointed to by the
- cpu-release-addr returns a non-zero value, the CPU must jump directly
- to this value.
+ cpu-release-addr returns a non-zero value, the CPU must jump to this
+ value. The value will be written as a single 64-bit little-endian
+ value, so CPUs must convert the read value to their native endianness
+ before jumping to it.
+
+- CPUs with a "psci" enable method should remain outside of
+ the kernel (i.e. outside of the regions of memory described to the
+ kernel in the memory node, or in a reserved area of memory described
+ to the kernel by a /memreserve/ region in the device tree). The
+ kernel will issue CPU_ON calls as described in ARM document number ARM
+ DEN 0022A ("Power State Coordination Interface System Software on ARM
+ processors") to bring CPUs into the kernel.
+
+ The device tree should contain a 'psci' node, as described in
+ Documentation/devicetree/bindings/arm/psci.txt.
- Secondary CPU general-purpose register settings
x0 = 0 (reserved for future use)
diff --git a/Documentation/arm64/memory.txt b/Documentation/arm64/memory.txt
index 78a377124ef0..f28899d5c83e 100644
--- a/Documentation/arm64/memory.txt
+++ b/Documentation/arm64/memory.txt
@@ -39,7 +39,7 @@ ffffffbffbc00000 ffffffbffbdfffff 2MB earlyprintk device
ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O space
-ffffffbbffff0000 ffffffbcffffffff ~2MB [guard]
+ffffffbffbe10000 ffffffbcffffffff ~2MB [guard]
ffffffbffc000000 ffffffbfffffffff 64MB modules
diff --git a/Documentation/assoc_array.txt b/Documentation/assoc_array.txt
new file mode 100644
index 000000000000..f4faec0f66e4
--- /dev/null
+++ b/Documentation/assoc_array.txt
@@ -0,0 +1,574 @@
+ ========================================
+ GENERIC ASSOCIATIVE ARRAY IMPLEMENTATION
+ ========================================
+
+Contents:
+
+ - Overview.
+
+ - The public API.
+ - Edit script.
+ - Operations table.
+ - Manipulation functions.
+ - Access functions.
+ - Index key form.
+
+ - Internal workings.
+ - Basic internal tree layout.
+ - Shortcuts.
+ - Splitting and collapsing nodes.
+ - Non-recursive iteration.
+ - Simultaneous alteration and iteration.
+
+
+========
+OVERVIEW
+========
+
+This associative array implementation is an object container with the following
+properties:
+
+ (1) Objects are opaque pointers. The implementation does not care where they
+ point (if anywhere) or what they point to (if anything).
+
+ [!] NOTE: Pointers to objects _must_ be zero in the least significant bit.
+
+ (2) Objects do not need to contain linkage blocks for use by the array. This
+ permits an object to be located in multiple arrays simultaneously.
+ Rather, the array is made up of metadata blocks that point to objects.
+
+ (3) Objects require index keys to locate them within the array.
+
+ (4) Index keys must be unique. Inserting an object with the same key as one
+ already in the array will replace the old object.
+
+ (5) Index keys can be of any length and can be of different lengths.
+
+ (6) Index keys should encode the length early on, before any variation due to
+ length is seen.
+
+ (7) Index keys can include a hash to scatter objects throughout the array.
+
+ (8) The array can iterated over. The objects will not necessarily come out in
+ key order.
+
+ (9) The array can be iterated over whilst it is being modified, provided the
+ RCU readlock is being held by the iterator. Note, however, under these
+ circumstances, some objects may be seen more than once. If this is a
+ problem, the iterator should lock against modification. Objects will not
+ be missed, however, unless deleted.
+
+(10) Objects in the array can be looked up by means of their index key.
+
+(11) Objects can be looked up whilst the array is being modified, provided the
+ RCU readlock is being held by the thread doing the look up.
+
+The implementation uses a tree of 16-pointer nodes internally that are indexed
+on each level by nibbles from the index key in the same manner as in a radix
+tree. To improve memory efficiency, shortcuts can be emplaced to skip over
+what would otherwise be a series of single-occupancy nodes. Further, nodes
+pack leaf object pointers into spare space in the node rather than making an
+extra branch until as such time an object needs to be added to a full node.
+
+
+==============
+THE PUBLIC API
+==============
+
+The public API can be found in <linux/assoc_array.h>. The associative array is
+rooted on the following structure:
+
+ struct assoc_array {
+ ...
+ };
+
+The code is selected by enabling CONFIG_ASSOCIATIVE_ARRAY.
+
+
+EDIT SCRIPT
+-----------
+
+The insertion and deletion functions produce an 'edit script' that can later be
+applied to effect the changes without risking ENOMEM. This retains the
+preallocated metadata blocks that will be installed in the internal tree and
+keeps track of the metadata blocks that will be removed from the tree when the
+script is applied.
+
+This is also used to keep track of dead blocks and dead objects after the
+script has been applied so that they can be freed later. The freeing is done
+after an RCU grace period has passed - thus allowing access functions to
+proceed under the RCU read lock.
+
+The script appears as outside of the API as a pointer of the type:
+
+ struct assoc_array_edit;
+
+There are two functions for dealing with the script:
+
+ (1) Apply an edit script.
+
+ void assoc_array_apply_edit(struct assoc_array_edit *edit);
+
+ This will perform the edit functions, interpolating various write barriers
+ to permit accesses under the RCU read lock to continue. The edit script
+ will then be passed to call_rcu() to free it and any dead stuff it points
+ to.
+
+ (2) Cancel an edit script.
+
+ void assoc_array_cancel_edit(struct assoc_array_edit *edit);
+
+ This frees the edit script and all preallocated memory immediately. If
+ this was for insertion, the new object is _not_ released by this function,
+ but must rather be released by the caller.
+
+These functions are guaranteed not to fail.
+
+
+OPERATIONS TABLE
+----------------
+
+Various functions take a table of operations:
+
+ struct assoc_array_ops {
+ ...
+ };
+
+This points to a number of methods, all of which need to be provided:
+
+ (1) Get a chunk of index key from caller data:
+
+ unsigned long (*get_key_chunk)(const void *index_key, int level);
+
+ This should return a chunk of caller-supplied index key starting at the
+ *bit* position given by the level argument. The level argument will be a
+ multiple of ASSOC_ARRAY_KEY_CHUNK_SIZE and the function should return
+ ASSOC_ARRAY_KEY_CHUNK_SIZE bits. No error is possible.
+
+
+ (2) Get a chunk of an object's index key.
+
+ unsigned long (*get_object_key_chunk)(const void *object, int level);
+
+ As the previous function, but gets its data from an object in the array
+ rather than from a caller-supplied index key.
+
+
+ (3) See if this is the object we're looking for.
+
+ bool (*compare_object)(const void *object, const void *index_key);
+
+ Compare the object against an index key and return true if it matches and
+ false if it doesn't.
+
+
+ (4) Diff the index keys of two objects.
+
+ int (*diff_objects)(const void *a, const void *b);
+
+ Return the bit position at which the index keys of two objects differ or
+ -1 if they are the same.
+
+
+ (5) Free an object.
+
+ void (*free_object)(void *object);
+
+ Free the specified object. Note that this may be called an RCU grace
+ period after assoc_array_apply_edit() was called, so synchronize_rcu() may
+ be necessary on module unloading.
+
+
+MANIPULATION FUNCTIONS
+----------------------
+
+There are a number of functions for manipulating an associative array:
+
+ (1) Initialise an associative array.
+
+ void assoc_array_init(struct assoc_array *array);
+
+ This initialises the base structure for an associative array. It can't
+ fail.
+
+
+ (2) Insert/replace an object in an associative array.
+
+ struct assoc_array_edit *
+ assoc_array_insert(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key,
+ void *object);
+
+ This inserts the given object into the array. Note that the least
+ significant bit of the pointer must be zero as it's used to type-mark
+ pointers internally.
+
+ If an object already exists for that key then it will be replaced with the
+ new object and the old one will be freed automatically.
+
+ The index_key argument should hold index key information and is
+ passed to the methods in the ops table when they are called.
+
+ This function makes no alteration to the array itself, but rather returns
+ an edit script that must be applied. -ENOMEM is returned in the case of
+ an out-of-memory error.
+
+ The caller should lock exclusively against other modifiers of the array.
+
+
+ (3) Delete an object from an associative array.
+
+ struct assoc_array_edit *
+ assoc_array_delete(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key);
+
+ This deletes an object that matches the specified data from the array.
+
+ The index_key argument should hold index key information and is
+ passed to the methods in the ops table when they are called.
+
+ This function makes no alteration to the array itself, but rather returns
+ an edit script that must be applied. -ENOMEM is returned in the case of
+ an out-of-memory error. NULL will be returned if the specified object is
+ not found within the array.
+
+ The caller should lock exclusively against other modifiers of the array.
+
+
+ (4) Delete all objects from an associative array.
+
+ struct assoc_array_edit *
+ assoc_array_clear(struct assoc_array *array,
+ const struct assoc_array_ops *ops);
+
+ This deletes all the objects from an associative array and leaves it
+ completely empty.
+
+ This function makes no alteration to the array itself, but rather returns
+ an edit script that must be applied. -ENOMEM is returned in the case of
+ an out-of-memory error.
+
+ The caller should lock exclusively against other modifiers of the array.
+
+
+ (5) Destroy an associative array, deleting all objects.
+
+ void assoc_array_destroy(struct assoc_array *array,
+ const struct assoc_array_ops *ops);
+
+ This destroys the contents of the associative array and leaves it
+ completely empty. It is not permitted for another thread to be traversing
+ the array under the RCU read lock at the same time as this function is
+ destroying it as no RCU deferral is performed on memory release -
+ something that would require memory to be allocated.
+
+ The caller should lock exclusively against other modifiers and accessors
+ of the array.
+
+
+ (6) Garbage collect an associative array.
+
+ int assoc_array_gc(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ bool (*iterator)(void *object, void *iterator_data),
+ void *iterator_data);
+
+ This iterates over the objects in an associative array and passes each one
+ to iterator(). If iterator() returns true, the object is kept. If it
+ returns false, the object will be freed. If the iterator() function
+ returns true, it must perform any appropriate refcount incrementing on the
+ object before returning.
+
+ The internal tree will be packed down if possible as part of the iteration
+ to reduce the number of nodes in it.
+
+ The iterator_data is passed directly to iterator() and is otherwise
+ ignored by the function.
+
+ The function will return 0 if successful and -ENOMEM if there wasn't
+ enough memory.
+
+ It is possible for other threads to iterate over or search the array under
+ the RCU read lock whilst this function is in progress. The caller should
+ lock exclusively against other modifiers of the array.
+
+
+ACCESS FUNCTIONS
+----------------
+
+There are two functions for accessing an associative array:
+
+ (1) Iterate over all the objects in an associative array.
+
+ int assoc_array_iterate(const struct assoc_array *array,
+ int (*iterator)(const void *object,
+ void *iterator_data),
+ void *iterator_data);
+
+ This passes each object in the array to the iterator callback function.
+ iterator_data is private data for that function.
+
+ This may be used on an array at the same time as the array is being
+ modified, provided the RCU read lock is held. Under such circumstances,
+ it is possible for the iteration function to see some objects twice. If
+ this is a problem, then modification should be locked against. The
+ iteration algorithm should not, however, miss any objects.
+
+ The function will return 0 if no objects were in the array or else it will
+ return the result of the last iterator function called. Iteration stops
+ immediately if any call to the iteration function results in a non-zero
+ return.
+
+
+ (2) Find an object in an associative array.
+
+ void *assoc_array_find(const struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key);
+
+ This walks through the array's internal tree directly to the object
+ specified by the index key..
+
+ This may be used on an array at the same time as the array is being
+ modified, provided the RCU read lock is held.
+
+ The function will return the object if found (and set *_type to the object
+ type) or will return NULL if the object was not found.
+
+
+INDEX KEY FORM
+--------------
+
+The index key can be of any form, but since the algorithms aren't told how long
+the key is, it is strongly recommended that the index key includes its length
+very early on before any variation due to the length would have an effect on
+comparisons.
+
+This will cause leaves with different length keys to scatter away from each
+other - and those with the same length keys to cluster together.
+
+It is also recommended that the index key begin with a hash of the rest of the
+key to maximise scattering throughout keyspace.
+
+The better the scattering, the wider and lower the internal tree will be.
+
+Poor scattering isn't too much of a problem as there are shortcuts and nodes
+can contain mixtures of leaves and metadata pointers.
+
+The index key is read in chunks of machine word. Each chunk is subdivided into
+one nibble (4 bits) per level, so on a 32-bit CPU this is good for 8 levels and
+on a 64-bit CPU, 16 levels. Unless the scattering is really poor, it is
+unlikely that more than one word of any particular index key will have to be
+used.
+
+
+=================
+INTERNAL WORKINGS
+=================
+
+The associative array data structure has an internal tree. This tree is
+constructed of two types of metadata blocks: nodes and shortcuts.
+
+A node is an array of slots. Each slot can contain one of four things:
+
+ (*) A NULL pointer, indicating that the slot is empty.
+
+ (*) A pointer to an object (a leaf).
+
+ (*) A pointer to a node at the next level.
+
+ (*) A pointer to a shortcut.
+
+
+BASIC INTERNAL TREE LAYOUT
+--------------------------
+
+Ignoring shortcuts for the moment, the nodes form a multilevel tree. The index
+key space is strictly subdivided by the nodes in the tree and nodes occur on
+fixed levels. For example:
+
+ Level: 0 1 2 3
+ =============== =============== =============== ===============
+ NODE D
+ NODE B NODE C +------>+---+
+ +------>+---+ +------>+---+ | | 0 |
+ NODE A | | 0 | | | 0 | | +---+
+ +---+ | +---+ | +---+ | : :
+ | 0 | | : : | : : | +---+
+ +---+ | +---+ | +---+ | | f |
+ | 1 |---+ | 3 |---+ | 7 |---+ +---+
+ +---+ +---+ +---+
+ : : : : | 8 |---+
+ +---+ +---+ +---+ | NODE E
+ | e |---+ | f | : : +------>+---+
+ +---+ | +---+ +---+ | 0 |
+ | f | | | f | +---+
+ +---+ | +---+ : :
+ | NODE F +---+
+ +------>+---+ | f |
+ | 0 | NODE G +---+
+ +---+ +------>+---+
+ : : | | 0 |
+ +---+ | +---+
+ | 6 |---+ : :
+ +---+ +---+
+ : : | f |
+ +---+ +---+
+ | f |
+ +---+
+
+In the above example, there are 7 nodes (A-G), each with 16 slots (0-f).
+Assuming no other meta data nodes in the tree, the key space is divided thusly:
+
+ KEY PREFIX NODE
+ ========== ====
+ 137* D
+ 138* E
+ 13[0-69-f]* C
+ 1[0-24-f]* B
+ e6* G
+ e[0-57-f]* F
+ [02-df]* A
+
+So, for instance, keys with the following example index keys will be found in
+the appropriate nodes:
+
+ INDEX KEY PREFIX NODE
+ =============== ======= ====
+ 13694892892489 13 C
+ 13795289025897 137 D
+ 13889dde88793 138 E
+ 138bbb89003093 138 E
+ 1394879524789 12 C
+ 1458952489 1 B
+ 9431809de993ba - A
+ b4542910809cd - A
+ e5284310def98 e F
+ e68428974237 e6 G
+ e7fffcbd443 e F
+ f3842239082 - A
+
+To save memory, if a node can hold all the leaves in its portion of keyspace,
+then the node will have all those leaves in it and will not have any metadata
+pointers - even if some of those leaves would like to be in the same slot.
+
+A node can contain a heterogeneous mix of leaves and metadata pointers.
+Metadata pointers must be in the slots that match their subdivisions of key
+space. The leaves can be in any slot not occupied by a metadata pointer. It
+is guaranteed that none of the leaves in a node will match a slot occupied by a
+metadata pointer. If the metadata pointer is there, any leaf whose key matches
+the metadata key prefix must be in the subtree that the metadata pointer points
+to.
+
+In the above example list of index keys, node A will contain:
+
+ SLOT CONTENT INDEX KEY (PREFIX)
+ ==== =============== ==================
+ 1 PTR TO NODE B 1*
+ any LEAF 9431809de993ba
+ any LEAF b4542910809cd
+ e PTR TO NODE F e*
+ any LEAF f3842239082
+
+and node B:
+
+ 3 PTR TO NODE C 13*
+ any LEAF 1458952489
+
+
+SHORTCUTS
+---------
+
+Shortcuts are metadata records that jump over a piece of keyspace. A shortcut
+is a replacement for a series of single-occupancy nodes ascending through the
+levels. Shortcuts exist to save memory and to speed up traversal.
+
+It is possible for the root of the tree to be a shortcut - say, for example,
+the tree contains at least 17 nodes all with key prefix '1111'. The insertion
+algorithm will insert a shortcut to skip over the '1111' keyspace in a single
+bound and get to the fourth level where these actually become different.
+
+
+SPLITTING AND COLLAPSING NODES
+------------------------------
+
+Each node has a maximum capacity of 16 leaves and metadata pointers. If the
+insertion algorithm finds that it is trying to insert a 17th object into a
+node, that node will be split such that at least two leaves that have a common
+key segment at that level end up in a separate node rooted on that slot for
+that common key segment.
+
+If the leaves in a full node and the leaf that is being inserted are
+sufficiently similar, then a shortcut will be inserted into the tree.
+
+When the number of objects in the subtree rooted at a node falls to 16 or
+fewer, then the subtree will be collapsed down to a single node - and this will
+ripple towards the root if possible.
+
+
+NON-RECURSIVE ITERATION
+-----------------------
+
+Each node and shortcut contains a back pointer to its parent and the number of
+slot in that parent that points to it. None-recursive iteration uses these to
+proceed rootwards through the tree, going to the parent node, slot N + 1 to
+make sure progress is made without the need for a stack.
+
+The backpointers, however, make simultaneous alteration and iteration tricky.
+
+
+SIMULTANEOUS ALTERATION AND ITERATION
+-------------------------------------
+
+There are a number of cases to consider:
+
+ (1) Simple insert/replace. This involves simply replacing a NULL or old
+ matching leaf pointer with the pointer to the new leaf after a barrier.
+ The metadata blocks don't change otherwise. An old leaf won't be freed
+ until after the RCU grace period.
+
+ (2) Simple delete. This involves just clearing an old matching leaf. The
+ metadata blocks don't change otherwise. The old leaf won't be freed until
+ after the RCU grace period.
+
+ (3) Insertion replacing part of a subtree that we haven't yet entered. This
+ may involve replacement of part of that subtree - but that won't affect
+ the iteration as we won't have reached the pointer to it yet and the
+ ancestry blocks are not replaced (the layout of those does not change).
+
+ (4) Insertion replacing nodes that we're actively processing. This isn't a
+ problem as we've passed the anchoring pointer and won't switch onto the
+ new layout until we follow the back pointers - at which point we've
+ already examined the leaves in the replaced node (we iterate over all the
+ leaves in a node before following any of its metadata pointers).
+
+ We might, however, re-see some leaves that have been split out into a new
+ branch that's in a slot further along than we were at.
+
+ (5) Insertion replacing nodes that we're processing a dependent branch of.
+ This won't affect us until we follow the back pointers. Similar to (4).
+
+ (6) Deletion collapsing a branch under us. This doesn't affect us because the
+ back pointers will get us back to the parent of the new node before we
+ could see the new node. The entire collapsed subtree is thrown away
+ unchanged - and will still be rooted on the same slot, so we shouldn't
+ process it a second time as we'll go back to slot + 1.
+
+Note:
+
+ (*) Under some circumstances, we need to simultaneously change the parent
+ pointer and the parent slot pointer on a node (say, for example, we
+ inserted another node before it and moved it up a level). We cannot do
+ this without locking against a read - so we have to replace that node too.
+
+ However, when we're changing a shortcut into a node this isn't a problem
+ as shortcuts only have one slot and so the parent slot number isn't used
+ when traversing backwards over one. This means that it's okay to change
+ the slot number first - provided suitable barriers are used to make sure
+ the parent slot number is read after the back pointer.
+
+Obsolete blocks and leaves are freed up after an RCU grace period has passed,
+so as long as anyone doing walking or iteration holds the RCU read lock, the
+old superstructure should not go away on them.
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 8df5e8e6dceb..2101e718670d 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -447,14 +447,13 @@ struct bio_vec {
* main unit of I/O for the block layer and lower layers (ie drivers)
*/
struct bio {
- sector_t bi_sector;
struct bio *bi_next; /* request queue link */
struct block_device *bi_bdev; /* target device */
unsigned long bi_flags; /* status, command, etc */
unsigned long bi_rw; /* low bits: r/w, high: priority */
unsigned int bi_vcnt; /* how may bio_vec's */
- unsigned int bi_idx; /* current index into bio_vec array */
+ struct bvec_iter bi_iter; /* current index into bio_vec array */
unsigned int bi_size; /* total size in bytes */
unsigned short bi_phys_segments; /* segments after physaddr coalesce*/
@@ -480,7 +479,7 @@ With this multipage bio design:
- Code that traverses the req list can find all the segments of a bio
by using rq_for_each_segment. This handles the fact that a request
has multiple bios, each of which can have multiple segments.
-- Drivers which can't process a large bio in one shot can use the bi_idx
+- Drivers which can't process a large bio in one shot can use the bi_iter
field to keep track of the next bio_vec entry to process.
(e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE)
[TBD: Should preferably also have a bi_voffset and bi_vlen to avoid modifying
@@ -589,7 +588,7 @@ driver should not modify these values. The block layer sets up the
nr_sectors and current_nr_sectors fields (based on the corresponding
hard_xxx values and the number of bytes transferred) and updates it on
every transfer that invokes end_that_request_first. It does the same for the
-buffer, bio, bio->bi_idx fields too.
+buffer, bio, bio->bi_iter fields too.
The buffer field is just a virtual address mapping of the current segment
of the i/o buffer in cases where the buffer resides in low-memory. For high
diff --git a/Documentation/block/biovecs.txt b/Documentation/block/biovecs.txt
new file mode 100644
index 000000000000..74a32ad52f53
--- /dev/null
+++ b/Documentation/block/biovecs.txt
@@ -0,0 +1,111 @@
+
+Immutable biovecs and biovec iterators:
+=======================================
+
+Kent Overstreet <kmo@daterainc.com>
+
+As of 3.13, biovecs should never be modified after a bio has been submitted.
+Instead, we have a new struct bvec_iter which represents a range of a biovec -
+the iterator will be modified as the bio is completed, not the biovec.
+
+More specifically, old code that needed to partially complete a bio would
+update bi_sector and bi_size, and advance bi_idx to the next biovec. If it
+ended up partway through a biovec, it would increment bv_offset and decrement
+bv_len by the number of bytes completed in that biovec.
+
+In the new scheme of things, everything that must be mutated in order to
+partially complete a bio is segregated into struct bvec_iter: bi_sector,
+bi_size and bi_idx have been moved there; and instead of modifying bv_offset
+and bv_len, struct bvec_iter has bi_bvec_done, which represents the number of
+bytes completed in the current bvec.
+
+There are a bunch of new helper macros for hiding the gory details - in
+particular, presenting the illusion of partially completed biovecs so that
+normal code doesn't have to deal with bi_bvec_done.
+
+ * Driver code should no longer refer to biovecs directly; we now have
+ bio_iovec() and bio_iovec_iter() macros that return literal struct biovecs,
+ constructed from the raw biovecs but taking into account bi_bvec_done and
+ bi_size.
+
+ bio_for_each_segment() has been updated to take a bvec_iter argument
+ instead of an integer (that corresponded to bi_idx); for a lot of code the
+ conversion just required changing the types of the arguments to
+ bio_for_each_segment().
+
+ * Advancing a bvec_iter is done with bio_advance_iter(); bio_advance() is a
+ wrapper around bio_advance_iter() that operates on bio->bi_iter, and also
+ advances the bio integrity's iter if present.
+
+ There is a lower level advance function - bvec_iter_advance() - which takes
+ a pointer to a biovec, not a bio; this is used by the bio integrity code.
+
+What's all this get us?
+=======================
+
+Having a real iterator, and making biovecs immutable, has a number of
+advantages:
+
+ * Before, iterating over bios was very awkward when you weren't processing
+ exactly one bvec at a time - for example, bio_copy_data() in fs/bio.c,
+ which copies the contents of one bio into another. Because the biovecs
+ wouldn't necessarily be the same size, the old code was tricky convoluted -
+ it had to walk two different bios at the same time, keeping both bi_idx and
+ and offset into the current biovec for each.
+
+ The new code is much more straightforward - have a look. This sort of
+ pattern comes up in a lot of places; a lot of drivers were essentially open
+ coding bvec iterators before, and having common implementation considerably
+ simplifies a lot of code.
+
+ * Before, any code that might need to use the biovec after the bio had been
+ completed (perhaps to copy the data somewhere else, or perhaps to resubmit
+ it somewhere else if there was an error) had to save the entire bvec array
+ - again, this was being done in a fair number of places.
+
+ * Biovecs can be shared between multiple bios - a bvec iter can represent an
+ arbitrary range of an existing biovec, both starting and ending midway
+ through biovecs. This is what enables efficient splitting of arbitrary
+ bios. Note that this means we _only_ use bi_size to determine when we've
+ reached the end of a bio, not bi_vcnt - and the bio_iovec() macro takes
+ bi_size into account when constructing biovecs.
+
+ * Splitting bios is now much simpler. The old bio_split() didn't even work on
+ bios with more than a single bvec! Now, we can efficiently split arbitrary
+ size bios - because the new bio can share the old bio's biovec.
+
+ Care must be taken to ensure the biovec isn't freed while the split bio is
+ still using it, in case the original bio completes first, though. Using
+ bio_chain() when splitting bios helps with this.
+
+ * Submitting partially completed bios is now perfectly fine - this comes up
+ occasionally in stacking block drivers and various code (e.g. md and
+ bcache) had some ugly workarounds for this.
+
+ It used to be the case that submitting a partially completed bio would work
+ fine to _most_ devices, but since accessing the raw bvec array was the
+ norm, not all drivers would respect bi_idx and those would break. Now,
+ since all drivers _must_ go through the bvec iterator - and have been
+ audited to make sure they are - submitting partially completed bios is
+ perfectly fine.
+
+Other implications:
+===================
+
+ * Almost all usage of bi_idx is now incorrect and has been removed; instead,
+ where previously you would have used bi_idx you'd now use a bvec_iter,
+ probably passing it to one of the helper macros.
+
+ I.e. instead of using bio_iovec_idx() (or bio->bi_iovec[bio->bi_idx]), you
+ now use bio_iter_iovec(), which takes a bvec_iter and returns a
+ literal struct bio_vec - constructed on the fly from the raw biovec but
+ taking into account bi_bvec_done (and bi_size).
+
+ * bi_vcnt can't be trusted or relied upon by driver code - i.e. anything that
+ doesn't actually own the bio. The reason is twofold: firstly, it's not
+ actually needed for iterating over the bio anymore - we only use bi_size.
+ Secondly, when cloning a bio and reusing (a portion of) the original bio's
+ biovec, in order to calculate bi_vcnt for the new bio we'd have to iterate
+ over all the biovecs in the new bio - which is silly as it's not needed.
+
+ So, don't use bi_vcnt anymore.
diff --git a/Documentation/connector/ucon.c b/Documentation/connector/ucon.c
index 4848db8c71ff..8a4da64e02a8 100644
--- a/Documentation/connector/ucon.c
+++ b/Documentation/connector/ucon.c
@@ -71,7 +71,7 @@ static int netlink_send(int s, struct cn_msg *msg)
nlh->nlmsg_seq = seq++;
nlh->nlmsg_pid = getpid();
nlh->nlmsg_type = NLMSG_DONE;
- nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh));
+ nlh->nlmsg_len = size;
nlh->nlmsg_flags = 0;
m = NLMSG_DATA(nlh);
diff --git a/Documentation/cpu-freq/cpu-drivers.txt b/Documentation/cpu-freq/cpu-drivers.txt
index 40282e617913..8b1a4451422e 100644
--- a/Documentation/cpu-freq/cpu-drivers.txt
+++ b/Documentation/cpu-freq/cpu-drivers.txt
@@ -23,8 +23,8 @@ Contents:
1.1 Initialization
1.2 Per-CPU Initialization
1.3 verify
-1.4 target or setpolicy?
-1.5 target
+1.4 target/target_index or setpolicy?
+1.5 target/target_index
1.6 setpolicy
2. Frequency Table Helpers
@@ -56,7 +56,8 @@ cpufreq_driver.init - A pointer to the per-CPU initialization
cpufreq_driver.verify - A pointer to a "verification" function.
cpufreq_driver.setpolicy _or_
-cpufreq_driver.target - See below on the differences.
+cpufreq_driver.target/
+target_index - See below on the differences.
And optionally
@@ -66,7 +67,7 @@ cpufreq_driver.resume - A pointer to a per-CPU resume function
which is called with interrupts disabled
and _before_ the pre-suspend frequency
and/or policy is restored by a call to
- ->target or ->setpolicy.
+ ->target/target_index or ->setpolicy.
cpufreq_driver.attr - A pointer to a NULL-terminated list of
"struct freq_attr" which allow to
@@ -103,8 +104,8 @@ policy->governor must contain the "default policy" for
this CPU. A few moments later,
cpufreq_driver.verify and either
cpufreq_driver.setpolicy or
- cpufreq_driver.target is called with
- these values.
+ cpufreq_driver.target/target_index is called
+ with these values.
For setting some of these values (cpuinfo.min[max]_freq, policy->min[max]), the
frequency table helpers might be helpful. See the section 2 for more information
@@ -133,20 +134,28 @@ range) is within policy->min and policy->max. If necessary, increase
policy->max first, and only if this is no solution, decrease policy->min.
-1.4 target or setpolicy?
+1.4 target/target_index or setpolicy?
----------------------------
Most cpufreq drivers or even most cpu frequency scaling algorithms
only allow the CPU to be set to one frequency. For these, you use the
-->target call.
+->target/target_index call.
Some cpufreq-capable processors switch the frequency between certain
limits on their own. These shall use the ->setpolicy call
-1.4. target
+1.4. target/target_index
-------------
+The target_index call has two arguments: struct cpufreq_policy *policy,
+and unsigned int index (into the exposed frequency table).
+
+The CPUfreq driver must set the new frequency when called here. The
+actual frequency must be determined by freq_table[index].frequency.
+
+Deprecated:
+----------
The target call has three arguments: struct cpufreq_policy *policy,
unsigned int target_frequency, unsigned int relation.
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index 219970ba54b7..77ec21574fb1 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -40,7 +40,7 @@ Most cpufreq drivers (in fact, all except one, longrun) or even most
cpu frequency scaling algorithms only offer the CPU to be set to one
frequency. In order to offer dynamic frequency scaling, the cpufreq
core must be able to tell these drivers of a "target frequency". So
-these specific drivers will be transformed to offer a "->target"
+these specific drivers will be transformed to offer a "->target/target_index"
call instead of the existing "->setpolicy" call. For "longrun", all
stays the same, though.
@@ -71,7 +71,7 @@ CPU can be set to switch independently | CPU can only be set
/ the limits of policy->{min,max}
/ \
/ \
- Using the ->setpolicy call, Using the ->target call,
+ Using the ->setpolicy call, Using the ->target/target_index call,
the limits and the the frequency closest
"policy" is set. to target_freq is set.
It is assured that it
diff --git a/Documentation/cpuidle/governor.txt b/Documentation/cpuidle/governor.txt
index 12c6bd50c9f6..d9020f5e847b 100644
--- a/Documentation/cpuidle/governor.txt
+++ b/Documentation/cpuidle/governor.txt
@@ -25,5 +25,4 @@ kernel configuration and platform will be selected by cpuidle.
Interfaces:
extern int cpuidle_register_governor(struct cpuidle_governor *gov);
-extern void cpuidle_unregister_governor(struct cpuidle_governor *gov);
struct cpuidle_governor
diff --git a/Documentation/device-mapper/cache-policies.txt b/Documentation/device-mapper/cache-policies.txt
index d7c440b444cc..df52a849957f 100644
--- a/Documentation/device-mapper/cache-policies.txt
+++ b/Documentation/device-mapper/cache-policies.txt
@@ -30,8 +30,10 @@ multiqueue
This policy is the default.
-The multiqueue policy has two sets of 16 queues: one set for entries
-waiting for the cache and another one for those in the cache.
+The multiqueue policy has three sets of 16 queues: one set for entries
+waiting for the cache and another two for those in the cache (a set for
+clean entries and a set for dirty entries).
+
Cache entries in the queues are aged based on logical time. Entry into
the cache is based on variable thresholds and queue selection is based
on hit count on entry. The policy aims to take different cache miss
diff --git a/Documentation/device-mapper/dm-crypt.txt b/Documentation/device-mapper/dm-crypt.txt
index 2c656ae43ba7..c81839b52c4d 100644
--- a/Documentation/device-mapper/dm-crypt.txt
+++ b/Documentation/device-mapper/dm-crypt.txt
@@ -4,12 +4,15 @@ dm-crypt
Device-Mapper's "crypt" target provides transparent encryption of block devices
using the kernel crypto API.
+For a more detailed description of supported parameters see:
+http://code.google.com/p/cryptsetup/wiki/DMCrypt
+
Parameters: <cipher> <key> <iv_offset> <device path> \
<offset> [<#opt_params> <opt_params>]
<cipher>
Encryption cipher and an optional IV generation mode.
- (In format cipher[:keycount]-chainmode-ivopts:ivmode).
+ (In format cipher[:keycount]-chainmode-ivmode[:ivopts]).
Examples:
des
aes-cbc-essiv:sha256
@@ -19,7 +22,11 @@ Parameters: <cipher> <key> <iv_offset> <device path> \
<key>
Key used for encryption. It is encoded as a hexadecimal number.
- You can only use key sizes that are valid for the selected cipher.
+ You can only use key sizes that are valid for the selected cipher
+ in combination with the selected iv mode.
+ Note that for some iv modes the key string can contain additional
+ keys (for example IV seed) so the key contains more parts concatenated
+ into a single string.
<keycount>
Multi-key compatibility mode. You can define <keycount> keys and
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index 23721d3be3e6..80b72419ffd8 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -414,6 +414,7 @@ Your cooperation is appreciated.
200 = /dev/net/tun TAP/TUN network device
201 = /dev/button/gulpb Transmeta GULP-B buttons
202 = /dev/emd/ctl Enhanced Metadisk RAID (EMD) control
+ 203 = /dev/cuse Cuse (character device in user-space)
204 = /dev/video/em8300 EM8300 DVD decoder control
205 = /dev/video/em8300_mv EM8300 DVD decoder video
206 = /dev/video/em8300_ma EM8300 DVD decoder audio
diff --git a/Documentation/devicetree/bindings/arm/arm-boards b/Documentation/devicetree/bindings/arm/arm-boards
index db5858e32d3f..5fac246a9530 100644
--- a/Documentation/devicetree/bindings/arm/arm-boards
+++ b/Documentation/devicetree/bindings/arm/arm-boards
@@ -9,9 +9,53 @@ Required properties (in root node):
FPGA type interrupt controllers, see the versatile-fpga-irq binding doc.
-In the root node the Integrator/CP must have a /cpcon node pointing
-to the CP control registers, and the Integrator/AP must have a
-/syscon node pointing to the Integrator/AP system controller.
+Required nodes:
+
+- core-module: the root node to the Integrator platforms must have
+ a core-module with regs and the compatible string
+ "arm,core-module-integrator"
+
+ Required properties for the core module:
+ - regs: the location and size of the core module registers, one
+ range of 0x200 bytes.
+
+- syscon: the root node of the Integrator platforms must have a
+ system controller node pointong to the control registers,
+ with the compatible string
+ "arm,integrator-ap-syscon"
+ "arm,integrator-cp-syscon"
+ respectively.
+
+ Required properties for the system controller:
+ - regs: the location and size of the system controller registers,
+ one range of 0x100 bytes.
+
+ Required properties for the AP system controller:
+ - interrupts: the AP syscon node must include the logical module
+ interrupts, stated in order of module instance <module 0>,
+ <module 1>, <module 2> ... for the CP system controller this
+ is not required not of any use.
+
+/dts-v1/;
+/include/ "integrator.dtsi"
+
+/ {
+ model = "ARM Integrator/AP";
+ compatible = "arm,integrator-ap";
+
+ core-module@10000000 {
+ compatible = "arm,core-module-integrator";
+ reg = <0x10000000 0x200>;
+ };
+
+ syscon {
+ compatible = "arm,integrator-ap-syscon";
+ reg = <0x11000000 0x100>;
+ interrupt-parent = <&pic>;
+ /* These are the logic module IRQs */
+ interrupts = <9>, <10>, <11>, <12>;
+ };
+};
ARM Versatile Application and Platform Baseboards
diff --git a/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt b/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt
index 61df564c0d23..d74091a8a3bf 100644
--- a/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt
+++ b/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt
@@ -4,6 +4,8 @@ Marvell Armada 370 and Armada XP Interrupt Controller
Required properties:
- compatible: Should be "marvell,mpic"
- interrupt-controller: Identifies the node as an interrupt controller.
+- msi-controller: Identifies the node as an PCI Message Signaled
+ Interrupt controller.
- #interrupt-cells: The number of cells to define the interrupts. Should be 1.
The cell is the IRQ number
@@ -24,6 +26,7 @@ Example:
#address-cells = <1>;
#size-cells = <1>;
interrupt-controller;
+ msi-controller;
reg = <0xd0020a00 0x1d0>,
<0xd0021070 0x58>;
};
diff --git a/Documentation/devicetree/bindings/arm/calxeda/mem-ctrlr.txt b/Documentation/devicetree/bindings/arm/calxeda/mem-ctrlr.txt
index f770ac0893d4..049675944b78 100644
--- a/Documentation/devicetree/bindings/arm/calxeda/mem-ctrlr.txt
+++ b/Documentation/devicetree/bindings/arm/calxeda/mem-ctrlr.txt
@@ -1,7 +1,9 @@
Calxeda DDR memory controller
Properties:
-- compatible : Should be "calxeda,hb-ddr-ctrl"
+- compatible : Should be:
+ - "calxeda,hb-ddr-ctrl" for ECX-1000
+ - "calxeda,ecx-2000-ddr-ctrl" for ECX-2000
- reg : Address and size for DDR controller registers.
- interrupts : Interrupt for DDR controller.
diff --git a/Documentation/devicetree/bindings/arm/cci.txt b/Documentation/devicetree/bindings/arm/cci.txt
index 92d36e2aa877..f28d82bbbc56 100644
--- a/Documentation/devicetree/bindings/arm/cci.txt
+++ b/Documentation/devicetree/bindings/arm/cci.txt
@@ -36,14 +36,18 @@ specific to ARM.
- reg
Usage: required
- Value type: <prop-encoded-array>
+ Value type: Integer cells. A register entry, expressed as a pair
+ of cells, containing base and size.
Definition: A standard property. Specifies base physical
address of CCI control registers common to all
interfaces.
- ranges:
Usage: required
- Value type: <prop-encoded-array>
+ Value type: Integer cells. An array of range entries, expressed
+ as a tuple of cells, containing child address,
+ parent address and the size of the region in the
+ child address space.
Definition: A standard property. Follow rules in the ePAPR for
hierarchical bus addressing. CCI interfaces
addresses refer to the parent node addressing
@@ -74,11 +78,49 @@ specific to ARM.
- reg:
Usage: required
- Value type: <prop-encoded-array>
+ Value type: Integer cells. A register entry, expressed
+ as a pair of cells, containing base and
+ size.
Definition: the base address and size of the
corresponding interface programming
registers.
+ - CCI PMU node
+
+ Parent node must be CCI interconnect node.
+
+ A CCI pmu node must contain the following properties:
+
+ - compatible
+ Usage: required
+ Value type: <string>
+ Definition: must be "arm,cci-400-pmu"
+
+ - reg:
+ Usage: required
+ Value type: Integer cells. A register entry, expressed
+ as a pair of cells, containing base and
+ size.
+ Definition: the base address and size of the
+ corresponding interface programming
+ registers.
+
+ - interrupts:
+ Usage: required
+ Value type: Integer cells. Array of interrupt specifier
+ entries, as defined in
+ ../interrupt-controller/interrupts.txt.
+ Definition: list of counter overflow interrupts, one per
+ counter. The interrupts must be specified
+ starting with the cycle counter overflow
+ interrupt, followed by counter0 overflow
+ interrupt, counter1 overflow interrupt,...
+ ,counterN overflow interrupt.
+
+ The CCI PMU has an interrupt signal for each
+ counter. The number of interrupts must be
+ equal to the number of counters.
+
* CCI interconnect bus masters
Description: masters in the device tree connected to a CCI port
@@ -144,7 +186,7 @@ Example:
#address-cells = <1>;
#size-cells = <1>;
reg = <0x0 0x2c090000 0 0x1000>;
- ranges = <0x0 0x0 0x2c090000 0x6000>;
+ ranges = <0x0 0x0 0x2c090000 0x10000>;
cci_control0: slave-if@1000 {
compatible = "arm,cci-400-ctrl-if";
@@ -163,6 +205,16 @@ Example:
interface-type = "ace";
reg = <0x5000 0x1000>;
};
+
+ pmu@9000 {
+ compatible = "arm,cci-400-pmu";
+ reg = <0x9000 0x5000>;
+ interrupts = <0 101 4>,
+ <0 102 4>,
+ <0 103 4>,
+ <0 104 4>,
+ <0 105 4>;
+ };
};
This CCI node corresponds to a CCI component whose control registers sits
diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt
index 91b7049affa1..808c1543b0f8 100644
--- a/Documentation/devicetree/bindings/arm/omap/omap.txt
+++ b/Documentation/devicetree/bindings/arm/omap/omap.txt
@@ -21,7 +21,8 @@ Required properties:
Optional properties:
- ti,no_idle_on_suspend: When present, it prevents the PM to idle the module
during suspend.
-
+- ti,no-reset-on-init: When present, the module should not be reset at init
+- ti,no-idle-on-init: When present, the module should not be idled at init
Example:
diff --git a/Documentation/devicetree/bindings/arm/vic.txt b/Documentation/devicetree/bindings/arm/vic.txt
index 266716b23437..dd527216c5fb 100644
--- a/Documentation/devicetree/bindings/arm/vic.txt
+++ b/Documentation/devicetree/bindings/arm/vic.txt
@@ -18,6 +18,15 @@ Required properties:
Optional properties:
- interrupts : Interrupt source for parent controllers if the VIC is nested.
+- valid-mask : A one cell big bit mask of valid interrupt sources. Each bit
+ represents single interrupt source, starting from source 0 at LSb and ending
+ at source 31 at MSb. A bit that is set means that the source is wired and
+ clear means otherwise. If unspecified, defaults to all valid.
+- valid-wakeup-mask : A one cell big bit mask of interrupt sources that can be
+ configured as wake up source for the system. Order of bits is the same as for
+ valid-mask property. A set bit means that this interrupt source can be
+ configured as a wake up source for the system. If unspecied, defaults to all
+ interrupt sources configurable as wake up sources.
Example:
@@ -26,4 +35,7 @@ Example:
interrupt-controller;
#interrupt-cells = <1>;
reg = <0x60000 0x1000>;
+
+ valid-mask = <0xffffff7f>;
+ valid-wakeup-mask = <0x0000ff7f>;
};
diff --git a/Documentation/devicetree/bindings/clock/emev2-clock.txt b/Documentation/devicetree/bindings/clock/emev2-clock.txt
new file mode 100644
index 000000000000..60bbb1a8c69a
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/emev2-clock.txt
@@ -0,0 +1,98 @@
+Device tree Clock bindings for Renesas EMMA Mobile EV2
+
+This binding uses the common clock binding.
+
+* SMU
+System Management Unit described in user's manual R19UH0037EJ1000_SMU.
+This is not a clock provider, but clocks under SMU depend on it.
+
+Required properties:
+- compatible: Should be "renesas,emev2-smu"
+- reg: Address and Size of SMU registers
+
+* SMU_CLKDIV
+Function block with an input mux and a divider, which corresponds to
+"Serial clock generator" in fig."Clock System Overview" of the manual,
+and "xxx frequency division setting register" (XXXCLKDIV) registers.
+This makes internal (neither input nor output) clock that is provided
+to input of xxxGCLK block.
+
+Required properties:
+- compatible: Should be "renesas,emev2-smu-clkdiv"
+- reg: Byte offset from SMU base and Bit position in the register
+- clocks: Parent clocks. Input clocks as described in clock-bindings.txt
+- #clock-cells: Should be <0>
+
+* SMU_GCLK
+Clock gating node shown as "Clock stop processing block" in the
+fig."Clock System Overview" of the manual.
+Registers are "xxx clock gate control register" (XXXGCLKCTRL).
+
+Required properties:
+- compatible: Should be "renesas,emev2-smu-gclk"
+- reg: Byte offset from SMU base and Bit position in the register
+- clocks: Input clock as described in clock-bindings.txt
+- #clock-cells: Should be <0>
+
+Example of provider:
+
+usia_u0_sclkdiv: usia_u0_sclkdiv {
+ compatible = "renesas,emev2-smu-clkdiv";
+ reg = <0x610 0>;
+ clocks = <&pll3_fo>, <&pll4_fo>, <&pll1_fo>, <&osc1_fo>;
+ #clock-cells = <0>;
+};
+
+usia_u0_sclk: usia_u0_sclk {
+ compatible = "renesas,emev2-smu-gclk";
+ reg = <0x4a0 1>;
+ clocks = <&usia_u0_sclkdiv>;
+ #clock-cells = <0>;
+};
+
+Example of consumer:
+
+uart@e1020000 {
+ compatible = "renesas,em-uart";
+ reg = <0xe1020000 0x38>;
+ interrupts = <0 8 0>;
+ clocks = <&usia_u0_sclk>;
+ clock-names = "sclk";
+};
+
+Example of clock-tree description:
+
+ This describes a clock path in the clock tree
+ c32ki -> pll3_fo -> usia_u0_sclkdiv -> usia_u0_sclk
+
+smu@e0110000 {
+ compatible = "renesas,emev2-smu";
+ reg = <0xe0110000 0x10000>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ c32ki: c32ki {
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ #clock-cells = <0>;
+ };
+ pll3_fo: pll3_fo {
+ compatible = "fixed-factor-clock";
+ clocks = <&c32ki>;
+ clock-div = <1>;
+ clock-mult = <7000>;
+ #clock-cells = <0>;
+ };
+ usia_u0_sclkdiv: usia_u0_sclkdiv {
+ compatible = "renesas,emev2-smu-clkdiv";
+ reg = <0x610 0>;
+ clocks = <&pll3_fo>;
+ #clock-cells = <0>;
+ };
+ usia_u0_sclk: usia_u0_sclk {
+ compatible = "renesas,emev2-smu-gclk";
+ reg = <0x4a0 1>;
+ clocks = <&usia_u0_sclkdiv>;
+ #clock-cells = <0>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/clock/imx6q-clock.txt b/Documentation/devicetree/bindings/clock/imx6q-clock.txt
index 5a90a724b520..6aab72bf67ea 100644
--- a/Documentation/devicetree/bindings/clock/imx6q-clock.txt
+++ b/Documentation/devicetree/bindings/clock/imx6q-clock.txt
@@ -215,6 +215,11 @@ clocks and IDs.
cko2 200
cko 201
vdoa 202
+ pll4_audio_div 203
+ lvds1_sel 204
+ lvds2_sel 205
+ lvds1_gate 206
+ lvds2_gate 207
Examples:
diff --git a/Documentation/devicetree/bindings/clock/mvebu-corediv-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-corediv-clock.txt
new file mode 100644
index 000000000000..c62391fc0e39
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/mvebu-corediv-clock.txt
@@ -0,0 +1,19 @@
+* Core Divider Clock bindings for Marvell MVEBU SoCs
+
+The following is a list of provided IDs and clock names on Armada 370/XP:
+ 0 = nand (NAND clock)
+
+Required properties:
+- compatible : must be "marvell,armada-370-corediv-clock"
+- reg : must be the register address of Core Divider control register
+- #clock-cells : from common clock binding; shall be set to 1
+- clocks : must be set to the parent's phandle
+
+Example:
+
+corediv_clk: corediv-clocks@18740 {
+ compatible = "marvell,armada-370-corediv-clock";
+ reg = <0x18740 0xc>;
+ #clock-cells = <1>;
+ clocks = <&pll>;
+};
diff --git a/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
index cffc93d97f54..fc2910fa7e45 100644
--- a/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
+++ b/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
@@ -1,10 +1,10 @@
-* Gated Clock bindings for Marvell Orion SoCs
+* Gated Clock bindings for Marvell EBU SoCs
-Marvell Dove and Kirkwood allow some peripheral clocks to be gated to save
-some power. The clock consumer should specify the desired clock by having
-the clock ID in its "clocks" phandle cell. The clock ID is directly mapped to
-the corresponding clock gating control bit in HW to ease manual clock lookup
-in datasheet.
+Marvell Armada 370/XP, Dove and Kirkwood allow some peripheral clocks to be
+gated to save some power. The clock consumer should specify the desired clock
+by having the clock ID in its "clocks" phandle cell. The clock ID is directly
+mapped to the corresponding clock gating control bit in HW to ease manual clock
+lookup in datasheet.
The following is a list of provided IDs for Armada 370:
ID Clock Peripheral
@@ -94,6 +94,8 @@ ID Clock Peripheral
Required properties:
- compatible : shall be one of the following:
+ "marvell,armada-370-gating-clock" - for Armada 370 SoC clock gating
+ "marvell,armada-xp-gating-clock" - for Armada XP SoC clock gating
"marvell,dove-gating-clock" - for Dove SoC clock gating
"marvell,kirkwood-gating-clock" - for Kirkwood SoC clock gating
- reg : shall be the register address of the Clock Gating Control register
diff --git a/Documentation/devicetree/bindings/clock/sunxi.txt b/Documentation/devicetree/bindings/clock/sunxi.txt
index 00a5c26454eb..91a748fed13d 100644
--- a/Documentation/devicetree/bindings/clock/sunxi.txt
+++ b/Documentation/devicetree/bindings/clock/sunxi.txt
@@ -45,8 +45,8 @@ Additionally, "allwinner,*-gates-clk" clocks require:
Clock consumers should specify the desired clocks they use with a
"clocks" phandle cell. Consumers that are using a gated clock should
-provide an additional ID in their clock property. The values of this
-ID are documented in sunxi/<soc>-gates.txt.
+provide an additional ID in their clock property. This ID is the
+offset of the bit controlling this particular gate in the register.
For example:
diff --git a/Documentation/devicetree/bindings/clock/sunxi/sun4i-a10-gates.txt b/Documentation/devicetree/bindings/clock/sunxi/sun4i-a10-gates.txt
deleted file mode 100644
index 6a03475bbfe2..000000000000
--- a/Documentation/devicetree/bindings/clock/sunxi/sun4i-a10-gates.txt
+++ /dev/null
@@ -1,93 +0,0 @@
-Gate clock outputs
-------------------
-
- * AXI gates ("allwinner,sun4i-axi-gates-clk")
-
- DRAM 0
-
- * AHB gates ("allwinner,sun4i-ahb-gates-clk")
-
- USB0 0
- EHCI0 1
- OHCI0 2*
- EHCI1 3
- OHCI1 4*
- SS 5
- DMA 6
- BIST 7
- MMC0 8
- MMC1 9
- MMC2 10
- MMC3 11
- MS 12**
- NAND 13
- SDRAM 14
-
- ACE 16
- EMAC 17
- TS 18
-
- SPI0 20
- SPI1 21
- SPI2 22
- SPI3 23
- PATA 24
- SATA 25**
- GPS 26*
-
- VE 32
- TVD 33
- TVE0 34
- TVE1 35
- LCD0 36
- LCD1 37
-
- CSI0 40
- CSI1 41
-
- HDMI 43
- DE_BE0 44
- DE_BE1 45
- DE_FE1 46
- DE_FE1 47
-
- MP 50
-
- MALI400 52
-
- * APB0 gates ("allwinner,sun4i-apb0-gates-clk")
-
- CODEC 0
- SPDIF 1*
- AC97 2
- IIS 3
-
- PIO 5
- IR0 6
- IR1 7
-
- KEYPAD 10
-
- * APB1 gates ("allwinner,sun4i-apb1-gates-clk")
-
- I2C0 0
- I2C1 1
- I2C2 2
-
- CAN 4
- SCR 5
- PS20 6
- PS21 7
-
- UART0 16
- UART1 17
- UART2 18
- UART3 19
- UART4 20
- UART5 21
- UART6 22
- UART7 23
-
-Notation:
- [*]: The datasheet didn't mention these, but they are present on AW code
- [**]: The datasheet had this marked as "NC" but they are used on AW code
diff --git a/Documentation/devicetree/bindings/clock/sunxi/sun5i-a10s-gates.txt b/Documentation/devicetree/bindings/clock/sunxi/sun5i-a10s-gates.txt
deleted file mode 100644
index d24279fe1429..000000000000
--- a/Documentation/devicetree/bindings/clock/sunxi/sun5i-a10s-gates.txt
+++ /dev/null
@@ -1,75 +0,0 @@
-Gate clock outputs
-------------------
-
- * AXI gates ("allwinner,sun4i-axi-gates-clk")
-
- DRAM 0
-
- * AHB gates ("allwinner,sun5i-a10s-ahb-gates-clk")
-
- USB0 0
- EHCI0 1
- OHCI0 2
-
- SS 5
- DMA 6
- BIST 7
- MMC0 8
- MMC1 9
- MMC2 10
-
- NAND 13
- SDRAM 14
-
- EMAC 17
- TS 18
-
- SPI0 20
- SPI1 21
- SPI2 22
-
- GPS 26
-
- HSTIMER 28
-
- VE 32
-
- TVE 34
-
- LCD 36
-
- CSI 40
-
- HDMI 43
- DE_BE 44
-
- DE_FE 46
-
- IEP 51
- MALI400 52
-
- * APB0 gates ("allwinner,sun5i-a10s-apb0-gates-clk")
-
- CODEC 0
-
- IIS 3
-
- PIO 5
- IR 6
-
- KEYPAD 10
-
- * APB1 gates ("allwinner,sun5i-a10s-apb1-gates-clk")
-
- I2C0 0
- I2C1 1
- I2C2 2
-
- UART0 16
- UART1 17
- UART2 18
- UART3 19
-
-Notation:
- [*]: The datasheet didn't mention these, but they are present on AW code
- [**]: The datasheet had this marked as "NC" but they are used on AW code
diff --git a/Documentation/devicetree/bindings/clock/sunxi/sun5i-a13-gates.txt b/Documentation/devicetree/bindings/clock/sunxi/sun5i-a13-gates.txt
deleted file mode 100644
index 006b6dfc4703..000000000000
--- a/Documentation/devicetree/bindings/clock/sunxi/sun5i-a13-gates.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-Gate clock outputs
-------------------
-
- * AXI gates ("allwinner,sun4i-axi-gates-clk")
-
- DRAM 0
-
- * AHB gates ("allwinner,sun5i-a13-ahb-gates-clk")
-
- USBOTG 0
- EHCI 1
- OHCI 2
-
- SS 5
- DMA 6
- BIST 7
- MMC0 8
- MMC1 9
- MMC2 10
-
- NAND 13
- SDRAM 14
-
- SPI0 20
- SPI1 21
- SPI2 22
-
- STIMER 28
-
- VE 32
-
- LCD 36
-
- CSI 40
-
- DE_BE 44
-
- DE_FE 46
-
- IEP 51
- MALI400 52
-
- * APB0 gates ("allwinner,sun5i-a13-apb0-gates-clk")
-
- CODEC 0
-
- PIO 5
- IR 6
-
- * APB1 gates ("allwinner,sun5i-a13-apb1-gates-clk")
-
- I2C0 0
- I2C1 1
- I2C2 2
-
- UART1 17
-
- UART3 19
diff --git a/Documentation/devicetree/bindings/clock/sunxi/sun6i-a31-gates.txt b/Documentation/devicetree/bindings/clock/sunxi/sun6i-a31-gates.txt
deleted file mode 100644
index fe44932b5c6b..000000000000
--- a/Documentation/devicetree/bindings/clock/sunxi/sun6i-a31-gates.txt
+++ /dev/null
@@ -1,83 +0,0 @@
-Gate clock outputs
-------------------
-
- * AHB1 gates ("allwinner,sun6i-a31-ahb1-gates-clk")
-
- MIPI DSI 1
-
- SS 5
- DMA 6
-
- MMC0 8
- MMC1 9
- MMC2 10
- MMC3 11
-
- NAND1 12
- NAND0 13
- SDRAM 14
-
- GMAC 17
- TS 18
- HSTIMER 19
- SPI0 20
- SPI1 21
- SPI2 22
- SPI3 23
- USB_OTG 24
-
- EHCI0 26
- EHCI1 27
-
- OHCI0 29
- OHCI1 30
- OHCI2 31
- VE 32
-
- LCD0 36
- LCD1 37
-
- CSI 40
-
- HDMI 43
- DE_BE0 44
- DE_BE1 45
- DE_FE1 46
- DE_FE1 47
-
- MP 50
-
- GPU 52
-
- DEU0 55
- DEU1 56
- DRC0 57
- DRC1 58
-
- * APB1 gates ("allwinner,sun6i-a31-apb1-gates-clk")
-
- CODEC 0
-
- DIGITAL MIC 4
- PIO 5
-
- DAUDIO0 12
- DAUDIO1 13
-
- * APB2 gates ("allwinner,sun6i-a31-apb2-gates-clk")
-
- I2C0 0
- I2C1 1
- I2C2 2
- I2C3 3
-
- UART0 16
- UART1 17
- UART2 18
- UART3 19
- UART4 20
- UART5 21
-
-Notation:
- [*]: The datasheet didn't mention these, but they are present on AW code
- [**]: The datasheet had this marked as "NC" but they are used on AW code
diff --git a/Documentation/devicetree/bindings/clock/sunxi/sun7i-a20-gates.txt b/Documentation/devicetree/bindings/clock/sunxi/sun7i-a20-gates.txt
deleted file mode 100644
index 357f4fdc02ef..000000000000
--- a/Documentation/devicetree/bindings/clock/sunxi/sun7i-a20-gates.txt
+++ /dev/null
@@ -1,98 +0,0 @@
-Gate clock outputs
-------------------
-
- * AXI gates ("allwinner,sun4i-axi-gates-clk")
-
- DRAM 0
-
- * AHB gates ("allwinner,sun7i-a20-ahb-gates-clk")
-
- USB0 0
- EHCI0 1
- OHCI0 2
- EHCI1 3
- OHCI1 4
- SS 5
- DMA 6
- BIST 7
- MMC0 8
- MMC1 9
- MMC2 10
- MMC3 11
- MS 12
- NAND 13
- SDRAM 14
-
- ACE 16
- EMAC 17
- TS 18
-
- SPI0 20
- SPI1 21
- SPI2 22
- SPI3 23
-
- SATA 25
-
- HSTIMER 28
-
- VE 32
- TVD 33
- TVE0 34
- TVE1 35
- LCD0 36
- LCD1 37
-
- CSI0 40
- CSI1 41
-
- HDMI1 42
- HDMI0 43
- DE_BE0 44
- DE_BE1 45
- DE_FE1 46
- DE_FE1 47
-
- GMAC 49
- MP 50
-
- MALI400 52
-
- * APB0 gates ("allwinner,sun7i-a20-apb0-gates-clk")
-
- CODEC 0
- SPDIF 1
- AC97 2
- IIS0 3
- IIS1 4
- PIO 5
- IR0 6
- IR1 7
- IIS2 8
-
- KEYPAD 10
-
- * APB1 gates ("allwinner,sun7i-a20-apb1-gates-clk")
-
- I2C0 0
- I2C1 1
- I2C2 2
- I2C3 3
- CAN 4
- SCR 5
- PS20 6
- PS21 7
-
- I2C4 15
- UART0 16
- UART1 17
- UART2 18
- UART3 19
- UART4 20
- UART5 21
- UART6 22
- UART7 23
-
-Notation:
- [*]: The datasheet didn't mention these, but they are present on AW code
- [**]: The datasheet had this marked as "NC" but they are used on AW code
diff --git a/Documentation/devicetree/bindings/crypto/omap-aes.txt b/Documentation/devicetree/bindings/crypto/omap-aes.txt
new file mode 100644
index 000000000000..fd9717653cbb
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/omap-aes.txt
@@ -0,0 +1,31 @@
+OMAP SoC AES crypto Module
+
+Required properties:
+
+- compatible : Should contain entries for this and backward compatible
+ AES versions:
+ - "ti,omap2-aes" for OMAP2.
+ - "ti,omap3-aes" for OMAP3.
+ - "ti,omap4-aes" for OMAP4 and AM33XX.
+ Note that the OMAP2 and 3 versions are compatible (OMAP3 supports
+ more algorithms) but they are incompatible with OMAP4.
+- ti,hwmods: Name of the hwmod associated with the AES module
+- reg : Offset and length of the register set for the module
+- interrupts : the interrupt-specifier for the AES module.
+
+Optional properties:
+- dmas: DMA specifiers for tx and rx dma. See the DMA client binding,
+ Documentation/devicetree/bindings/dma/dma.txt
+- dma-names: DMA request names should include "tx" and "rx" if present.
+
+Example:
+ /* AM335x */
+ aes: aes@53500000 {
+ compatible = "ti,omap4-aes";
+ ti,hwmods = "aes";
+ reg = <0x53500000 0xa0>;
+ interrupts = <102>;
+ dmas = <&edma 6>,
+ <&edma 5>;
+ dma-names = "tx", "rx";
+ };
diff --git a/Documentation/devicetree/bindings/crypto/omap-sham.txt b/Documentation/devicetree/bindings/crypto/omap-sham.txt
new file mode 100644
index 000000000000..f839acd6f0ee
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/omap-sham.txt
@@ -0,0 +1,28 @@
+OMAP SoC SHA crypto Module
+
+Required properties:
+
+- compatible : Should contain entries for this and backward compatible
+ SHAM versions:
+ - "ti,omap2-sham" for OMAP2 & OMAP3.
+ - "ti,omap4-sham" for OMAP4 and AM33XX.
+ Note that these two versions are incompatible.
+- ti,hwmods: Name of the hwmod associated with the SHAM module
+- reg : Offset and length of the register set for the module
+- interrupts : the interrupt-specifier for the SHAM module.
+
+Optional properties:
+- dmas: DMA specifiers for the rx dma. See the DMA client binding,
+ Documentation/devicetree/bindings/dma/dma.txt
+- dma-names: DMA request name. Should be "rx" if a dma is present.
+
+Example:
+ /* AM335x */
+ sham: sham@53100000 {
+ compatible = "ti,omap4-sham";
+ ti,hwmods = "sham";
+ reg = <0x53100000 0x200>;
+ interrupts = <109>;
+ dmas = <&edma 36>;
+ dma-names = "rx";
+ };
diff --git a/Documentation/devicetree/bindings/dma/atmel-dma.txt b/Documentation/devicetree/bindings/dma/atmel-dma.txt
index e1f343c7a34b..f69bcf5a6343 100644
--- a/Documentation/devicetree/bindings/dma/atmel-dma.txt
+++ b/Documentation/devicetree/bindings/dma/atmel-dma.txt
@@ -28,7 +28,7 @@ The three cells in order are:
dependent:
- bit 7-0: peripheral identifier for the hardware handshaking interface. The
identifier can be different for tx and rx.
- - bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 1 for ASAP.
+ - bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 2 for ASAP.
Example:
diff --git a/Documentation/devicetree/bindings/hwrng/omap_rng.txt b/Documentation/devicetree/bindings/hwrng/omap_rng.txt
new file mode 100644
index 000000000000..6a62acd86953
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwrng/omap_rng.txt
@@ -0,0 +1,22 @@
+OMAP SoC HWRNG Module
+
+Required properties:
+
+- compatible : Should contain entries for this and backward compatible
+ RNG versions:
+ - "ti,omap2-rng" for OMAP2.
+ - "ti,omap4-rng" for OMAP4, OMAP5 and AM33XX.
+ Note that these two versions are incompatible.
+- ti,hwmods: Name of the hwmod associated with the RNG module
+- reg : Offset and length of the register set for the module
+- interrupts : the interrupt number for the RNG module.
+ Only used for "ti,omap4-rng".
+
+Example:
+/* AM335x */
+rng: rng@48310000 {
+ compatible = "ti,omap4-rng";
+ ti,hwmods = "rng";
+ reg = <0x48310000 0x2000>;
+ interrupts = <111>;
+};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-exynos5.txt b/Documentation/devicetree/bindings/i2c/i2c-exynos5.txt
new file mode 100644
index 000000000000..056732cfdcee
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-exynos5.txt
@@ -0,0 +1,44 @@
+* Samsung's High Speed I2C controller
+
+The Samsung's High Speed I2C controller is used to interface with I2C devices
+at various speeds ranging from 100khz to 3.4Mhz.
+
+Required properties:
+ - compatible: value should be.
+ -> "samsung,exynos5-hsi2c", for i2c compatible with exynos5 hsi2c.
+ - reg: physical base address of the controller and length of memory mapped
+ region.
+ - interrupts: interrupt number to the cpu.
+ - #address-cells: always 1 (for i2c addresses)
+ - #size-cells: always 0
+
+ - Pinctrl:
+ - pinctrl-0: Pin control group to be used for this controller.
+ - pinctrl-names: Should contain only one value - "default".
+
+Optional properties:
+ - clock-frequency: Desired operating frequency in Hz of the bus.
+ -> If not specified, the bus operates in fast-speed mode at
+ at 100khz.
+ -> If specified, the bus operates in high-speed mode only if the
+ clock-frequency is >= 1Mhz.
+
+Example:
+
+hsi2c@12ca0000 {
+ compatible = "samsung,exynos5-hsi2c";
+ reg = <0x12ca0000 0x100>;
+ interrupts = <56>;
+ clock-frequency = <100000>;
+
+ pinctrl-0 = <&i2c4_bus>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ s2mps11_pmic@66 {
+ compatible = "samsung,s2mps11-pmic";
+ reg = <0x66>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt
new file mode 100644
index 000000000000..897cfcd5ce92
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt
@@ -0,0 +1,23 @@
+I2C for R-Car platforms
+
+Required properties:
+- compatible: Must be one of
+ "renesas,i2c-rcar"
+ "renesas,i2c-r8a7778"
+ "renesas,i2c-r8a7779"
+ "renesas,i2c-r8a7790"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- interrupts: interrupt specifier.
+
+Optional properties:
+- clock-frequency: desired I2C bus clock frequency in Hz. The absence of this
+ propoerty indicates the default frequency 100 kHz.
+
+Examples :
+
+i2c0: i2c@e6500000 {
+ compatible = "renesas,i2c-rcar-h2";
+ reg = <0 0xe6500000 0 0x428>;
+ interrupts = <0 174 0x4>;
+};
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index ad6a73852f08..f1fb26eed0e9 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -15,6 +15,7 @@ adi,adt7461 +/-1C TDM Extended Temp Range I.C
adt7461 +/-1C TDM Extended Temp Range I.C
at,24c08 i2c serial eeprom (24cxx)
atmel,24c02 i2c serial eeprom (24cxx)
+atmel,at97sc3204t i2c trusted platform module (TPM)
catalyst,24c32 i2c serial eeprom
dallas,ds1307 64 x 8, Serial, I2C Real-Time Clock
dallas,ds1338 I2C RTC with 56-Byte NV RAM
@@ -44,6 +45,7 @@ mc,rv3029c2 Real Time Clock Module with I2C-Bus
national,lm75 I2C TEMP SENSOR
national,lm80 Serial Interface ACPI-Compatible Microprocessor System Hardware Monitor
national,lm92 ±0.33°C Accurate, 12-Bit + Sign Temperature Sensor and Thermal Window Comparator with Two-Wire Interface
+nuvoton,npct501 i2c trusted platform module (TPM)
nxp,pca9556 Octal SMBus and I2C registered interface
nxp,pca9557 8-bit I2C-bus and SMBus I/O port with reset
nxp,pcf8563 Real-time clock/calendar
@@ -61,3 +63,4 @@ taos,tsl2550 Ambient Light Sensor with SMBUS/Two Wire Serial Interface
ti,tsc2003 I2C Touch-Screen Controller
ti,tmp102 Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface
ti,tmp275 Digital Temperature Sensor
+winbond,wpct301 i2c trusted platform module (TPM)
diff --git a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun4i-ic.txt b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun4i-ic.txt
index 57edb30dbbca..3d3b2b91e333 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun4i-ic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun4i-ic.txt
@@ -8,9 +8,6 @@ Required properties:
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. The value shall be 1.
-For the valid interrupt sources for your SoC, see the documentation in
-sunxi/<soc>.txt
-
Example:
intc: interrupt-controller {
diff --git a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
index 72a06c0ab1db..1486497a24c1 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
@@ -4,16 +4,33 @@ Specifying interrupt information for devices
1) Interrupt client nodes
-------------------------
-Nodes that describe devices which generate interrupts must contain an
-"interrupts" property. This property must contain a list of interrupt
-specifiers, one per output interrupt. The format of the interrupt specifier is
-determined by the interrupt controller to which the interrupts are routed; see
-section 2 below for details.
+Nodes that describe devices which generate interrupts must contain an either an
+"interrupts" property or an "interrupts-extended" property. These properties
+contain a list of interrupt specifiers, one per output interrupt. The format of
+the interrupt specifier is determined by the interrupt controller to which the
+interrupts are routed; see section 2 below for details.
+
+ Example:
+ interrupt-parent = <&intc1>;
+ interrupts = <5 0>, <6 0>;
The "interrupt-parent" property is used to specify the controller to which
interrupts are routed and contains a single phandle referring to the interrupt
controller node. This property is inherited, so it may be specified in an
-interrupt client node or in any of its parent nodes.
+interrupt client node or in any of its parent nodes. Interrupts listed in the
+"interrupts" property are always in reference to the node's interrupt parent.
+
+The "interrupts-extended" property is a special form for use when a node needs
+to reference multiple interrupt parents. Each entry in this property contains
+both the parent phandle and the interrupt specifier. "interrupts-extended"
+should only be used when a device has multiple interrupt parents.
+
+ Example:
+ interrupts-extended = <&intc1 5 1>, <&intc2 1 0>;
+
+A device node may contain either "interrupts" or "interrupts-extended", but not
+both. If both properties are present, then the operating system should log an
+error and use only the data in "interrupts".
2) Interrupt controller nodes
-----------------------------
diff --git a/Documentation/devicetree/bindings/interrupt-controller/sunxi/sun4i-a10.txt b/Documentation/devicetree/bindings/interrupt-controller/sunxi/sun4i-a10.txt
deleted file mode 100644
index 76b98c834499..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/sunxi/sun4i-a10.txt
+++ /dev/null
@@ -1,89 +0,0 @@
-Allwinner A10 (sun4i) interrupt sources
----------------------------------------
-
-The interrupt sources available for the Allwinner A10 SoC are the
-following one:
-
-0: ENMI
-1: UART0
-2: UART1
-3: UART2
-4: UART3
-5: IR0
-6: IR1
-7: I2C0
-8: I2C1
-9: I2C2
-10: SPI0
-11: SPI1
-12: SPI2
-13: SPDIF
-14: AC97
-15: TS
-16: I2S
-17: UART4
-18: UART5
-19: UART6
-20: UART7
-21: KEYPAD
-22: TIMER0
-23: TIMER1
-24: TIMER2
-25: TIMER3
-26: CAN
-27: DMA
-28: PIO
-29: TOUCH_PANEL
-30: AUDIO_CODEC
-31: LRADC
-32: MMC0
-33: MMC1
-34: MMC2
-35: MMC3
-36: MEMSTICK
-37: NAND
-38: USB0
-39: USB1
-40: USB2
-41: SCR
-42: CSI0
-43: CSI1
-44: LCDCTRL0
-45: LCDCTRL1
-46: MP
-47: DEFEBE0
-48: DEFEBE1
-49: PMU
-50: SPI3
-51: TZASC
-52: PATA
-53: VE
-54: SS
-55: EMAC
-56: SATA
-57: GPS
-58: HDMI
-59: TVE
-60: ACE
-61: TVD
-62: PS2_0
-63: PS2_1
-64: USB3
-65: USB4
-66: PLE_PFM
-67: TIMER4
-68: TIMER5
-69: GPU_GP
-70: GPU_GPMMU
-71: GPU_PP0
-72: GPU_PPMMU0
-73: GPU_PMU
-74: GPU_RSV0
-75: GPU_RSV1
-76: GPU_RSV2
-77: GPU_RSV3
-78: GPU_RSV4
-79: GPU_RSV5
-80: GPU_RSV6
-82: SYNC_TIMER0
-83: SYNC_TIMER1
diff --git a/Documentation/devicetree/bindings/interrupt-controller/sunxi/sun5i-a13.txt b/Documentation/devicetree/bindings/interrupt-controller/sunxi/sun5i-a13.txt
deleted file mode 100644
index 2ec3b5ce1a0b..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/sunxi/sun5i-a13.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-Allwinner A13 (sun5i) interrupt sources
----------------------------------------
-
-The interrupt sources available for the Allwinner A13 SoC are the
-following one:
-
-0: ENMI
-2: UART1
-4: UART3
-5: IR
-7: I2C0
-8: I2C1
-9: I2C2
-10: SPI0
-11: SPI1
-12: SPI2
-22: TIMER0
-23: TIMER1
-24: TIMER2
-25: TIMER3
-27: DMA
-28: PIO
-29: TOUCH_PANEL
-30: AUDIO_CODEC
-31: LRADC
-32: MMC0
-33: MMC1
-34: MMC2
-37: NAND
-38: USB OTG
-39: USB EHCI
-40: USB OHCI
-42: CSI
-44: LCDCTRL
-47: DEFEBE
-49: PMU
-53: VE
-54: SS
-66: PLE_PFM
-67: TIMER4
-68: TIMER5
-69: GPU_GP
-70: GPU_GPMMU
-71: GPU_PP0
-72: GPU_PPMMU0
-73: GPU_PMU
-74: GPU_RSV0
-75: GPU_RSV1
-76: GPU_RSV2
-77: GPU_RSV3
-78: GPU_RSV4
-79: GPU_RSV5
-80: GPU_RSV6
-82: SYNC_TIMER0
-83: SYNC_TIMER1
diff --git a/Documentation/devicetree/bindings/leds/leds-lp55xx.txt b/Documentation/devicetree/bindings/leds/leds-lp55xx.txt
index a61727f9a6d1..c55b8c016a9e 100644
--- a/Documentation/devicetree/bindings/leds/leds-lp55xx.txt
+++ b/Documentation/devicetree/bindings/leds/leds-lp55xx.txt
@@ -10,6 +10,7 @@ Each child has own specific current settings
- max-cur: Maximun current at each led channel.
Optional properties:
+- enable-gpio: GPIO attached to the chip's enable pin
- label: Used for naming LEDs
- pwr-sel: LP8501 specific property. Power selection for output channels.
0: D1~9 are connected to VDD
@@ -17,12 +18,15 @@ Optional properties:
2: D1~6 with VOUT, D7~9 with VDD
3: D1~9 are connected to VOUT
-Alternatively, each child can have specific channel name
-- chan-name: Name of each channel name
+Alternatively, each child can have a specific channel name and trigger:
+- chan-name (optional): name of channel
+- linux,default-trigger (optional): see
+ Documentation/devicetree/bindings/leds/common.txt
example 1) LP5521
3 LED channels, external clock used. Channel names are 'lp5521_pri:channel0',
-'lp5521_pri:channel1' and 'lp5521_pri:channel2'
+'lp5521_pri:channel1' and 'lp5521_pri:channel2', with a heartbeat trigger
+on channel 0.
lp5521@32 {
compatible = "national,lp5521";
@@ -33,6 +37,7 @@ lp5521@32 {
chan0 {
led-cur = /bits/ 8 <0x2f>;
max-cur = /bits/ 8 <0x5f>;
+ linux,default-trigger = "heartbeat";
};
chan1 {
diff --git a/Documentation/devicetree/bindings/media/st-rc.txt b/Documentation/devicetree/bindings/media/st-rc.txt
new file mode 100644
index 000000000000..05c432d08bca
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/st-rc.txt
@@ -0,0 +1,29 @@
+Device-Tree bindings for ST IRB IP
+
+Required properties:
+ - compatible: Should contain "st,comms-irb".
+ - reg: Base physical address of the controller and length of memory
+ mapped region.
+ - interrupts: interrupt-specifier for the sole interrupt generated by
+ the device. The interrupt specifier format depends on the interrupt
+ controller parent.
+ - rx-mode: can be "infrared" or "uhf". This property specifies the L1
+ protocol used for receiving remote control signals. rx-mode should
+ be present iff the rx pins are wired up.
+ - tx-mode: should be "infrared". This property specifies the L1
+ protocol used for transmitting remote control signals. tx-mode should
+ be present iff the tx pins are wired up.
+
+Optional properties:
+ - pinctrl-names, pinctrl-0: the pincontrol settings to configure muxing
+ properly for IRB pins.
+ - clocks : phandle with clock-specifier pair for IRB.
+
+Example node:
+
+ rc: rc@fe518000 {
+ compatible = "st,comms-irb";
+ reg = <0xfe518000 0x234>;
+ interrupts = <0 203 0>;
+ rx-mode = "infrared";
+ };
diff --git a/Documentation/devicetree/bindings/mfd/as3722.txt b/Documentation/devicetree/bindings/mfd/as3722.txt
new file mode 100644
index 000000000000..fc2191ecfd6b
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/as3722.txt
@@ -0,0 +1,194 @@
+* ams AS3722 Power management IC.
+
+Required properties:
+-------------------
+- compatible: Must be "ams,as3722".
+- reg: I2C device address.
+- interrupt-controller: AS3722 has internal interrupt controller which takes the
+ interrupt request from internal sub-blocks like RTC, regulators, GPIOs as well
+ as external input.
+- #interrupt-cells: Should be set to 2 for IRQ number and flags.
+ The first cell is the IRQ number. IRQ numbers for different interrupt source
+ of AS3722 are defined at dt-bindings/mfd/as3722.h
+ The second cell is the flags, encoded as the trigger masks from binding document
+ interrupts.txt, using dt-bindings/irq.
+
+Optional submodule and their properties:
+=======================================
+
+Pinmux and GPIO:
+===============
+Device has 8 GPIO pins which can be configured as GPIO as well as the special IO
+functions.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+Following are properties which is needed if GPIO and pinmux functionality
+is required:
+ Required properties:
+ -------------------
+ - gpio-controller: Marks the device node as a GPIO controller.
+ - #gpio-cells: Number of GPIO cells. Refer to binding document
+ gpio/gpio.txt
+
+ Optional properties:
+ --------------------
+ Following properties are require if pin control setting is required
+ at boot.
+ - pinctrl-names: A pinctrl state named "default" be defined, using the
+ bindings in pinctrl/pinctrl-binding.txt.
+ - pinctrl[0...n]: Properties to contain the phandle that refer to
+ different nodes of pin control settings. These nodes represents
+ the pin control setting of state 0 to state n. Each of these
+ nodes contains different subnodes to represents some desired
+ configuration for a list of pins. This configuration can
+ include the mux function to select on those pin(s), and
+ various pin configuration parameters, such as pull-up,
+ open drain.
+
+ Each subnode have following properties:
+ Required properties:
+ - pins: List of pins. Valid values of pins properties are:
+ gpio0, gpio1, gpio2, gpio3, gpio4, gpio5,
+ gpio6, gpio7
+
+ Optional properties:
+ function, bias-disable, bias-pull-up, bias-pull-down,
+ bias-high-impedance, drive-open-drain.
+
+ Valid values for function properties are:
+ gpio, interrupt-out, gpio-in-interrupt,
+ vsup-vbat-low-undebounce-out,
+ vsup-vbat-low-debounce-out,
+ voltage-in-standby, oc-pg-sd0, oc-pg-sd6,
+ powergood-out, pwm-in, pwm-out, clk32k-out,
+ watchdog-in, soft-reset-in
+
+Regulators:
+===========
+Device has multiple DCDC and LDOs. The node "regulators" is require if regulator
+functionality is needed.
+
+Following are properties of regulator subnode.
+
+ Optional properties:
+ -------------------
+ The input supply of regulators are the optional properties on the
+ regulator node. The input supply of these regulators are provided
+ through following properties:
+ vsup-sd2-supply: Input supply for SD2.
+ vsup-sd3-supply: Input supply for SD3.
+ vsup-sd4-supply: Input supply for SD4.
+ vsup-sd5-supply: Input supply for SD5.
+ vin-ldo0-supply: Input supply for LDO0.
+ vin-ldo1-6-supply: Input supply for LDO1 and LDO6.
+ vin-ldo2-5-7-supply: Input supply for LDO2, LDO5 and LDO7.
+ vin-ldo3-4-supply: Input supply for LDO3 and LDO4.
+ vin-ldo9-10-supply: Input supply for LDO9 and LDO10.
+ vin-ldo11-supply: Input supply for LDO11.
+
+ Optional sub nodes for regulators:
+ ---------------------------------
+ The subnodes name is the name of regulator and it must be one of:
+ sd[0-6], ldo[0-7], ldo[9-11]
+
+ Each sub-node should contain the constraints and initialization
+ information for that regulator. See regulator.txt for a description
+ of standard properties for these sub-nodes.
+ Additional optional custom properties are listed below.
+ ams,ext-control: External control of the rail. The option of
+ this properties will tell which external input is
+ controlling this rail. Valid values are 0, 1, 2 ad 3.
+ 0: There is no external control of this rail.
+ 1: Rail is controlled by ENABLE1 input pin.
+ 2: Rail is controlled by ENABLE2 input pin.
+ 3: Rail is controlled by ENABLE3 input pin.
+ Missing this property on DT will be assume as no
+ external control. The external control pin macros
+ are defined @dt-bindings/mfd/as3722.h
+
+ ams,enable-tracking: Enable tracking with SD1, only supported
+ by LDO3.
+
+Example:
+--------
+#include <dt-bindings/mfd/as3722.h>
+...
+ams3722 {
+ compatible = "ams,as3722";
+ reg = <0x48>;
+
+ interrupt-parent = <&intc>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&as3722_default>;
+
+ as3722_default: pinmux {
+ gpio0 {
+ pins = "gpio0";
+ function = "gpio";
+ bias-pull-down;
+ };
+
+ gpio1_2_4_7 {
+ pins = "gpio1", "gpio2", "gpio4", "gpio7";
+ function = "gpio";
+ bias-pull-up;
+ };
+
+ gpio5 {
+ pins = "gpio5";
+ function = "clk32k_out";
+ };
+ }
+
+ regulators {
+ vsup-sd2-supply = <...>;
+ ...
+
+ sd0 {
+ regulator-name = "vdd_cpu";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-always-on;
+ ams,ext-control = <2>;
+ };
+
+ sd1 {
+ regulator-name = "vdd_core";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-always-on;
+ ams,ext-control = <1>;
+ };
+
+ sd2 {
+ regulator-name = "vddio_ddr";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ };
+
+ sd4 {
+ regulator-name = "avdd-hdmi-pex";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-always-on;
+ };
+
+ sd5 {
+ regulator-name = "vdd-1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+ ....
+ };
+};
diff --git a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
index c67b975c8906..532b1d440abc 100644
--- a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
@@ -16,6 +16,8 @@ Required Properties:
specific extensions.
- "samsung,exynos5250-dw-mshc": for controllers with Samsung Exynos5250
specific extensions.
+ - "samsung,exynos5420-dw-mshc": for controllers with Samsung Exynos5420
+ specific extensions.
* samsung,dw-mshc-ciu-div: Specifies the divider value for the card interface
unit (ciu) clock. This property is applicable only for Exynos5 SoC's and
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index 1dd622546d06..9046ba06c47a 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -12,6 +12,11 @@ Required properties:
Optional properties:
- fsl,cd-controller : Indicate to use controller internal card detection
- fsl,wp-controller : Indicate to use controller internal write protection
+- fsl,delay-line : Specify the number of delay cells for override mode.
+ This is used to set the clock delay for DLL(Delay Line) on override mode
+ to select a proper data sampling window in case the clock quality is not good
+ due to signal path is too long on the board. Please refer to eSDHC/uSDHC
+ chapter, DLL (Delay Line) section in RM for details.
Examples:
diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
index 066a78b034ca..8f3f13315358 100644
--- a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
@@ -52,6 +52,9 @@ Optional properties:
is specified and the ciu clock is specified then we'll try to set the ciu
clock to this at probe time.
+* clock-freq-min-max: Minimum and Maximum clock frequency for card output
+ clock(cclk_out). If it's not specified, max is 200MHZ and min is 400KHz by default.
+
* num-slots: specifies the number of slots supported by the controller.
The number of physical slots actually used could be equal or less than the
value specified by num-slots. If this property is not specified, the value
@@ -66,6 +69,10 @@ Optional properties:
* supports-highspeed: Enables support for high speed cards (up to 50MHz)
+* caps2-mmc-hs200-1_8v: Supports mmc HS200 SDR 1.8V mode
+
+* caps2-mmc-hs200-1_2v: Supports mmc HS200 SDR 1.2V mode
+
* broken-cd: as documented in mmc core bindings.
* vmmc-supply: The phandle to the regulator to use for vmmc. If this is
@@ -93,8 +100,10 @@ board specific portions as listed below.
dwmmc0@12200000 {
clock-frequency = <400000000>;
+ clock-freq-min-max = <400000 200000000>;
num-slots = <1>;
supports-highspeed;
+ caps2-mmc-hs200-1_8v;
broken-cd;
fifo-depth = <0x80>;
card-detect-delay = <200>;
diff --git a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
index ed271fc255b2..8c8908ab84ba 100644
--- a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
+++ b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
@@ -20,8 +20,29 @@ ti,dual-volt: boolean, supports dual voltage cards
ti,non-removable: non-removable slot (like eMMC)
ti,needs-special-reset: Requires a special softreset sequence
ti,needs-special-hs-handling: HSMMC IP needs special setting for handling High Speed
+dmas: List of DMA specifiers with the controller specific format
+as described in the generic DMA client binding. A tx and rx
+specifier is required.
+dma-names: List of DMA request names. These strings correspond
+1:1 with the DMA specifiers listed in dmas. The string naming is
+to be "rx" and "tx" for RX and TX DMA requests, respectively.
+
+Examples:
+
+[hwmod populated DMA resources]
+
+ mmc1: mmc@0x4809c000 {
+ compatible = "ti,omap4-hsmmc";
+ reg = <0x4809c000 0x400>;
+ ti,hwmods = "mmc1";
+ ti,dual-volt;
+ bus-width = <4>;
+ vmmc-supply = <&vmmc>; /* phandle to regulator node */
+ ti,non-removable;
+ };
+
+[generic DMA request binding]
-Example:
mmc1: mmc@0x4809c000 {
compatible = "ti,omap4-hsmmc";
reg = <0x4809c000 0x400>;
@@ -30,4 +51,7 @@ Example:
bus-width = <4>;
vmmc-supply = <&vmmc>; /* phandle to regulator node */
ti,non-removable;
+ dmas = <&edma 24
+ &edma 25>;
+ dma-names = "tx", "rx";
};
diff --git a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
index df338cb5059c..5e1f31b5ff70 100644
--- a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
@@ -22,10 +22,10 @@ Optional properties:
width of 8 is assumed.
- ti,nand-ecc-opt: A string setting the ECC layout to use. One of:
-
- "sw" Software method (default)
- "hw" Hardware method
- "hw-romcode" gpmc hamming mode method & romcode layout
+ "sw" <deprecated> use "ham1" instead
+ "hw" <deprecated> use "ham1" instead
+ "hw-romcode" <deprecated> use "ham1" instead
+ "ham1" 1-bit Hamming ecc code
"bch4" 4-bit BCH ecc code
"bch8" 8-bit BCH ecc code
@@ -36,8 +36,12 @@ Optional properties:
"prefetch-dma" Prefetch enabled sDMA mode
"prefetch-irq" Prefetch enabled irq mode
- - elm_id: Specifies elm device node. This is required to support BCH
- error correction using ELM module.
+ - elm_id: <deprecated> use "ti,elm-id" instead
+ - ti,elm-id: Specifies phandle of the ELM devicetree node.
+ ELM is an on-chip hardware engine on TI SoC which is used for
+ locating ECC errors for BCHx algorithms. SoC devices which have
+ ELM hardware engines should specify this device node in .dtsi
+ Using ELM for ECC error correction frees some CPU cycles.
For inline partiton table parsing (optional):
diff --git a/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt b/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt
new file mode 100644
index 000000000000..7ff57a119f81
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt
@@ -0,0 +1,28 @@
+TI CPSW Phy mode Selection Device Tree Bindings
+-----------------------------------------------
+
+Required properties:
+- compatible : Should be "ti,am3352-cpsw-phy-sel"
+- reg : physical base address and size of the cpsw
+ registers map
+- reg-names : names of the register map given in "reg" node
+
+Optional properties:
+-rmii-clock-ext : If present, the driver will configure the RMII
+ interface to external clock usage
+
+Examples:
+
+ phy_sel: cpsw-phy-sel@44e10650 {
+ compatible = "ti,am3352-cpsw-phy-sel";
+ reg= <0x44e10650 0x4>;
+ reg-names = "gmii-sel";
+ };
+
+(or)
+ phy_sel: cpsw-phy-sel@44e10650 {
+ compatible = "ti,am3352-cpsw-phy-sel";
+ reg= <0x44e10650 0x4>;
+ reg-names = "gmii-sel";
+ rmii-clock-ext;
+ };
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index e216af356847..d5d26d443693 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -3,7 +3,7 @@
Required properties:
- compatible: should contain "snps,dw-pcie" to identify the
core, plus an identifier for the specific instance, such
- as "samsung,exynos5440-pcie".
+ as "samsung,exynos5440-pcie" or "fsl,imx6q-pcie".
- reg: base addresses and lengths of the pcie controller,
the phy controller, additional register for the phy controller.
- interrupts: interrupt values for level interrupt,
@@ -21,6 +21,11 @@ Required properties:
- num-lanes: number of lanes to use
- reset-gpio: gpio pin number of power good signal
+Optional properties for fsl,imx6q-pcie
+- power-on-gpio: gpio pin number of power-enable signal
+- wake-up-gpio: gpio pin number of incoming wakeup signal
+- disable-gpio: gpio pin number of outgoing rfkill/endpoint disable signal
+
Example:
SoC specific DT Entry:
diff --git a/Documentation/devicetree/bindings/pci/mvebu-pci.txt b/Documentation/devicetree/bindings/pci/mvebu-pci.txt
index 9556e2fedf6d..08c716b2c6b6 100644
--- a/Documentation/devicetree/bindings/pci/mvebu-pci.txt
+++ b/Documentation/devicetree/bindings/pci/mvebu-pci.txt
@@ -5,6 +5,7 @@ Mandatory properties:
- compatible: one of the following values:
marvell,armada-370-pcie
marvell,armada-xp-pcie
+ marvell,dove-pcie
marvell,kirkwood-pcie
- #address-cells, set to <3>
- #size-cells, set to <2>
@@ -14,6 +15,8 @@ Mandatory properties:
- ranges: ranges describing the MMIO registers to control the PCIe
interfaces, and ranges describing the MBus windows needed to access
the memory and I/O regions of each PCIe interface.
+- msi-parent: Link to the hardware entity that serves as the Message
+ Signaled Interrupt controller for this PCI controller.
The ranges describing the MMIO registers have the following layout:
@@ -74,6 +77,8 @@ and the following optional properties:
- marvell,pcie-lane: the physical PCIe lane number, for ports having
multiple lanes. If this property is not found, we assume that the
value is 0.
+- reset-gpios: optional gpio to PERST#
+- reset-delay-us: delay in us to wait after reset de-assertion
Example:
@@ -86,6 +91,7 @@ pcie-controller {
#size-cells = <2>;
bus-range = <0x00 0xff>;
+ msi-parent = <&mpic>;
ranges =
<0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000 /* Port 0.0 registers */
@@ -135,6 +141,10 @@ pcie-controller {
interrupt-map = <0 0 0 0 &mpic 58>;
marvell,pcie-port = <0>;
marvell,pcie-lane = <0>;
+ /* low-active PERST# reset on GPIO 25 */
+ reset-gpios = <&gpio0 25 1>;
+ /* wait 20ms for device settle after reset deassertion */
+ reset-delay-us = <20000>;
clocks = <&gateclk 5>;
status = "disabled";
};
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,mxs-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,mxs-pinctrl.txt
index 3077370c89af..1e70a8aff260 100644
--- a/Documentation/devicetree/bindings/pinctrl/fsl,mxs-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,mxs-pinctrl.txt
@@ -59,16 +59,16 @@ Required subnode-properties:
Optional subnode-properties:
- fsl,drive-strength: Integer.
- 0: 4 mA
- 1: 8 mA
- 2: 12 mA
- 3: 16 mA
+ 0: MXS_DRIVE_4mA
+ 1: MXS_DRIVE_8mA
+ 2: MXS_DRIVE_12mA
+ 3: MXS_DRIVE_16mA
- fsl,voltage: Integer.
- 0: 1.8 V
- 1: 3.3 V
+ 0: MXS_VOLTAGE_LOW - 1.8 V
+ 1: MXS_VOLTAGE_HIGH - 3.3 V
- fsl,pull-up: Integer.
- 0: Disable the internal pull-up
- 1: Enable the internal pull-up
+ 0: MXS_PULL_DISABLE - Disable the internal pull-up
+ 1: MXS_PULL_ENABLE - Enable the internal pull-up
Note that when enabling the pull-up, the internal pad keeper gets disabled.
Also, some pins doesn't have a pull up, in that case, setting the fsl,pull-up
@@ -85,23 +85,32 @@ pinctrl@80018000 {
mmc0_8bit_pins_a: mmc0-8bit@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x2000 0x2010 0x2020 0x2030
- 0x2040 0x2050 0x2060 0x2070
- 0x2080 0x2090 0x20a0>;
- fsl,drive-strength = <1>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ MX28_PAD_SSP0_DATA0__SSP0_D0
+ MX28_PAD_SSP0_DATA1__SSP0_D1
+ MX28_PAD_SSP0_DATA2__SSP0_D2
+ MX28_PAD_SSP0_DATA3__SSP0_D3
+ MX28_PAD_SSP0_DATA4__SSP0_D4
+ MX28_PAD_SSP0_DATA5__SSP0_D5
+ MX28_PAD_SSP0_DATA6__SSP0_D6
+ MX28_PAD_SSP0_DATA7__SSP0_D7
+ MX28_PAD_SSP0_CMD__SSP0_CMD
+ MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT
+ MX28_PAD_SSP0_SCK__SSP0_SCK
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
mmc_cd_cfg: mmc-cd-cfg {
- fsl,pinmux-ids = <0x2090>;
- fsl,pull-up = <0>;
+ fsl,pinmux-ids = <MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
mmc_sck_cfg: mmc-sck-cfg {
- fsl,pinmux-ids = <0x20a0>;
- fsl,drive-strength = <2>;
- fsl,pull-up = <0>;
+ fsl,pinmux-ids = <MX28_PAD_SSP0_SCK__SSP0_SCK>;
+ fsl,drive-strength = <MXS_DRIVE_12mA>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
};
@@ -112,811 +121,7 @@ adjusting the configuration for pins card-detection and clock from what group
node mmc0-8bit defines. Only the configuration properties to be adjusted need
to be listed in the config nodes.
-Valid values for i.MX28 pinmux-id:
-
-pinmux id
------- --
-MX28_PAD_GPMI_D00__GPMI_D0 0x0000
-MX28_PAD_GPMI_D01__GPMI_D1 0x0010
-MX28_PAD_GPMI_D02__GPMI_D2 0x0020
-MX28_PAD_GPMI_D03__GPMI_D3 0x0030
-MX28_PAD_GPMI_D04__GPMI_D4 0x0040
-MX28_PAD_GPMI_D05__GPMI_D5 0x0050
-MX28_PAD_GPMI_D06__GPMI_D6 0x0060
-MX28_PAD_GPMI_D07__GPMI_D7 0x0070
-MX28_PAD_GPMI_CE0N__GPMI_CE0N 0x0100
-MX28_PAD_GPMI_CE1N__GPMI_CE1N 0x0110
-MX28_PAD_GPMI_CE2N__GPMI_CE2N 0x0120
-MX28_PAD_GPMI_CE3N__GPMI_CE3N 0x0130
-MX28_PAD_GPMI_RDY0__GPMI_READY0 0x0140
-MX28_PAD_GPMI_RDY1__GPMI_READY1 0x0150
-MX28_PAD_GPMI_RDY2__GPMI_READY2 0x0160
-MX28_PAD_GPMI_RDY3__GPMI_READY3 0x0170
-MX28_PAD_GPMI_RDN__GPMI_RDN 0x0180
-MX28_PAD_GPMI_WRN__GPMI_WRN 0x0190
-MX28_PAD_GPMI_ALE__GPMI_ALE 0x01a0
-MX28_PAD_GPMI_CLE__GPMI_CLE 0x01b0
-MX28_PAD_GPMI_RESETN__GPMI_RESETN 0x01c0
-MX28_PAD_LCD_D00__LCD_D0 0x1000
-MX28_PAD_LCD_D01__LCD_D1 0x1010
-MX28_PAD_LCD_D02__LCD_D2 0x1020
-MX28_PAD_LCD_D03__LCD_D3 0x1030
-MX28_PAD_LCD_D04__LCD_D4 0x1040
-MX28_PAD_LCD_D05__LCD_D5 0x1050
-MX28_PAD_LCD_D06__LCD_D6 0x1060
-MX28_PAD_LCD_D07__LCD_D7 0x1070
-MX28_PAD_LCD_D08__LCD_D8 0x1080
-MX28_PAD_LCD_D09__LCD_D9 0x1090
-MX28_PAD_LCD_D10__LCD_D10 0x10a0
-MX28_PAD_LCD_D11__LCD_D11 0x10b0
-MX28_PAD_LCD_D12__LCD_D12 0x10c0
-MX28_PAD_LCD_D13__LCD_D13 0x10d0
-MX28_PAD_LCD_D14__LCD_D14 0x10e0
-MX28_PAD_LCD_D15__LCD_D15 0x10f0
-MX28_PAD_LCD_D16__LCD_D16 0x1100
-MX28_PAD_LCD_D17__LCD_D17 0x1110
-MX28_PAD_LCD_D18__LCD_D18 0x1120
-MX28_PAD_LCD_D19__LCD_D19 0x1130
-MX28_PAD_LCD_D20__LCD_D20 0x1140
-MX28_PAD_LCD_D21__LCD_D21 0x1150
-MX28_PAD_LCD_D22__LCD_D22 0x1160
-MX28_PAD_LCD_D23__LCD_D23 0x1170
-MX28_PAD_LCD_RD_E__LCD_RD_E 0x1180
-MX28_PAD_LCD_WR_RWN__LCD_WR_RWN 0x1190
-MX28_PAD_LCD_RS__LCD_RS 0x11a0
-MX28_PAD_LCD_CS__LCD_CS 0x11b0
-MX28_PAD_LCD_VSYNC__LCD_VSYNC 0x11c0
-MX28_PAD_LCD_HSYNC__LCD_HSYNC 0x11d0
-MX28_PAD_LCD_DOTCLK__LCD_DOTCLK 0x11e0
-MX28_PAD_LCD_ENABLE__LCD_ENABLE 0x11f0
-MX28_PAD_SSP0_DATA0__SSP0_D0 0x2000
-MX28_PAD_SSP0_DATA1__SSP0_D1 0x2010
-MX28_PAD_SSP0_DATA2__SSP0_D2 0x2020
-MX28_PAD_SSP0_DATA3__SSP0_D3 0x2030
-MX28_PAD_SSP0_DATA4__SSP0_D4 0x2040
-MX28_PAD_SSP0_DATA5__SSP0_D5 0x2050
-MX28_PAD_SSP0_DATA6__SSP0_D6 0x2060
-MX28_PAD_SSP0_DATA7__SSP0_D7 0x2070
-MX28_PAD_SSP0_CMD__SSP0_CMD 0x2080
-MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT 0x2090
-MX28_PAD_SSP0_SCK__SSP0_SCK 0x20a0
-MX28_PAD_SSP1_SCK__SSP1_SCK 0x20c0
-MX28_PAD_SSP1_CMD__SSP1_CMD 0x20d0
-MX28_PAD_SSP1_DATA0__SSP1_D0 0x20e0
-MX28_PAD_SSP1_DATA3__SSP1_D3 0x20f0
-MX28_PAD_SSP2_SCK__SSP2_SCK 0x2100
-MX28_PAD_SSP2_MOSI__SSP2_CMD 0x2110
-MX28_PAD_SSP2_MISO__SSP2_D0 0x2120
-MX28_PAD_SSP2_SS0__SSP2_D3 0x2130
-MX28_PAD_SSP2_SS1__SSP2_D4 0x2140
-MX28_PAD_SSP2_SS2__SSP2_D5 0x2150
-MX28_PAD_SSP3_SCK__SSP3_SCK 0x2180
-MX28_PAD_SSP3_MOSI__SSP3_CMD 0x2190
-MX28_PAD_SSP3_MISO__SSP3_D0 0x21a0
-MX28_PAD_SSP3_SS0__SSP3_D3 0x21b0
-MX28_PAD_AUART0_RX__AUART0_RX 0x3000
-MX28_PAD_AUART0_TX__AUART0_TX 0x3010
-MX28_PAD_AUART0_CTS__AUART0_CTS 0x3020
-MX28_PAD_AUART0_RTS__AUART0_RTS 0x3030
-MX28_PAD_AUART1_RX__AUART1_RX 0x3040
-MX28_PAD_AUART1_TX__AUART1_TX 0x3050
-MX28_PAD_AUART1_CTS__AUART1_CTS 0x3060
-MX28_PAD_AUART1_RTS__AUART1_RTS 0x3070
-MX28_PAD_AUART2_RX__AUART2_RX 0x3080
-MX28_PAD_AUART2_TX__AUART2_TX 0x3090
-MX28_PAD_AUART2_CTS__AUART2_CTS 0x30a0
-MX28_PAD_AUART2_RTS__AUART2_RTS 0x30b0
-MX28_PAD_AUART3_RX__AUART3_RX 0x30c0
-MX28_PAD_AUART3_TX__AUART3_TX 0x30d0
-MX28_PAD_AUART3_CTS__AUART3_CTS 0x30e0
-MX28_PAD_AUART3_RTS__AUART3_RTS 0x30f0
-MX28_PAD_PWM0__PWM_0 0x3100
-MX28_PAD_PWM1__PWM_1 0x3110
-MX28_PAD_PWM2__PWM_2 0x3120
-MX28_PAD_SAIF0_MCLK__SAIF0_MCLK 0x3140
-MX28_PAD_SAIF0_LRCLK__SAIF0_LRCLK 0x3150
-MX28_PAD_SAIF0_BITCLK__SAIF0_BITCLK 0x3160
-MX28_PAD_SAIF0_SDATA0__SAIF0_SDATA0 0x3170
-MX28_PAD_I2C0_SCL__I2C0_SCL 0x3180
-MX28_PAD_I2C0_SDA__I2C0_SDA 0x3190
-MX28_PAD_SAIF1_SDATA0__SAIF1_SDATA0 0x31a0
-MX28_PAD_SPDIF__SPDIF_TX 0x31b0
-MX28_PAD_PWM3__PWM_3 0x31c0
-MX28_PAD_PWM4__PWM_4 0x31d0
-MX28_PAD_LCD_RESET__LCD_RESET 0x31e0
-MX28_PAD_ENET0_MDC__ENET0_MDC 0x4000
-MX28_PAD_ENET0_MDIO__ENET0_MDIO 0x4010
-MX28_PAD_ENET0_RX_EN__ENET0_RX_EN 0x4020
-MX28_PAD_ENET0_RXD0__ENET0_RXD0 0x4030
-MX28_PAD_ENET0_RXD1__ENET0_RXD1 0x4040
-MX28_PAD_ENET0_TX_CLK__ENET0_TX_CLK 0x4050
-MX28_PAD_ENET0_TX_EN__ENET0_TX_EN 0x4060
-MX28_PAD_ENET0_TXD0__ENET0_TXD0 0x4070
-MX28_PAD_ENET0_TXD1__ENET0_TXD1 0x4080
-MX28_PAD_ENET0_RXD2__ENET0_RXD2 0x4090
-MX28_PAD_ENET0_RXD3__ENET0_RXD3 0x40a0
-MX28_PAD_ENET0_TXD2__ENET0_TXD2 0x40b0
-MX28_PAD_ENET0_TXD3__ENET0_TXD3 0x40c0
-MX28_PAD_ENET0_RX_CLK__ENET0_RX_CLK 0x40d0
-MX28_PAD_ENET0_COL__ENET0_COL 0x40e0
-MX28_PAD_ENET0_CRS__ENET0_CRS 0x40f0
-MX28_PAD_ENET_CLK__CLKCTRL_ENET 0x4100
-MX28_PAD_JTAG_RTCK__JTAG_RTCK 0x4140
-MX28_PAD_EMI_D00__EMI_DATA0 0x5000
-MX28_PAD_EMI_D01__EMI_DATA1 0x5010
-MX28_PAD_EMI_D02__EMI_DATA2 0x5020
-MX28_PAD_EMI_D03__EMI_DATA3 0x5030
-MX28_PAD_EMI_D04__EMI_DATA4 0x5040
-MX28_PAD_EMI_D05__EMI_DATA5 0x5050
-MX28_PAD_EMI_D06__EMI_DATA6 0x5060
-MX28_PAD_EMI_D07__EMI_DATA7 0x5070
-MX28_PAD_EMI_D08__EMI_DATA8 0x5080
-MX28_PAD_EMI_D09__EMI_DATA9 0x5090
-MX28_PAD_EMI_D10__EMI_DATA10 0x50a0
-MX28_PAD_EMI_D11__EMI_DATA11 0x50b0
-MX28_PAD_EMI_D12__EMI_DATA12 0x50c0
-MX28_PAD_EMI_D13__EMI_DATA13 0x50d0
-MX28_PAD_EMI_D14__EMI_DATA14 0x50e0
-MX28_PAD_EMI_D15__EMI_DATA15 0x50f0
-MX28_PAD_EMI_ODT0__EMI_ODT0 0x5100
-MX28_PAD_EMI_DQM0__EMI_DQM0 0x5110
-MX28_PAD_EMI_ODT1__EMI_ODT1 0x5120
-MX28_PAD_EMI_DQM1__EMI_DQM1 0x5130
-MX28_PAD_EMI_DDR_OPEN_FB__EMI_DDR_OPEN_FEEDBACK 0x5140
-MX28_PAD_EMI_CLK__EMI_CLK 0x5150
-MX28_PAD_EMI_DQS0__EMI_DQS0 0x5160
-MX28_PAD_EMI_DQS1__EMI_DQS1 0x5170
-MX28_PAD_EMI_DDR_OPEN__EMI_DDR_OPEN 0x51a0
-MX28_PAD_EMI_A00__EMI_ADDR0 0x6000
-MX28_PAD_EMI_A01__EMI_ADDR1 0x6010
-MX28_PAD_EMI_A02__EMI_ADDR2 0x6020
-MX28_PAD_EMI_A03__EMI_ADDR3 0x6030
-MX28_PAD_EMI_A04__EMI_ADDR4 0x6040
-MX28_PAD_EMI_A05__EMI_ADDR5 0x6050
-MX28_PAD_EMI_A06__EMI_ADDR6 0x6060
-MX28_PAD_EMI_A07__EMI_ADDR7 0x6070
-MX28_PAD_EMI_A08__EMI_ADDR8 0x6080
-MX28_PAD_EMI_A09__EMI_ADDR9 0x6090
-MX28_PAD_EMI_A10__EMI_ADDR10 0x60a0
-MX28_PAD_EMI_A11__EMI_ADDR11 0x60b0
-MX28_PAD_EMI_A12__EMI_ADDR12 0x60c0
-MX28_PAD_EMI_A13__EMI_ADDR13 0x60d0
-MX28_PAD_EMI_A14__EMI_ADDR14 0x60e0
-MX28_PAD_EMI_BA0__EMI_BA0 0x6100
-MX28_PAD_EMI_BA1__EMI_BA1 0x6110
-MX28_PAD_EMI_BA2__EMI_BA2 0x6120
-MX28_PAD_EMI_CASN__EMI_CASN 0x6130
-MX28_PAD_EMI_RASN__EMI_RASN 0x6140
-MX28_PAD_EMI_WEN__EMI_WEN 0x6150
-MX28_PAD_EMI_CE0N__EMI_CE0N 0x6160
-MX28_PAD_EMI_CE1N__EMI_CE1N 0x6170
-MX28_PAD_EMI_CKE__EMI_CKE 0x6180
-MX28_PAD_GPMI_D00__SSP1_D0 0x0001
-MX28_PAD_GPMI_D01__SSP1_D1 0x0011
-MX28_PAD_GPMI_D02__SSP1_D2 0x0021
-MX28_PAD_GPMI_D03__SSP1_D3 0x0031
-MX28_PAD_GPMI_D04__SSP1_D4 0x0041
-MX28_PAD_GPMI_D05__SSP1_D5 0x0051
-MX28_PAD_GPMI_D06__SSP1_D6 0x0061
-MX28_PAD_GPMI_D07__SSP1_D7 0x0071
-MX28_PAD_GPMI_CE0N__SSP3_D0 0x0101
-MX28_PAD_GPMI_CE1N__SSP3_D3 0x0111
-MX28_PAD_GPMI_CE2N__CAN1_TX 0x0121
-MX28_PAD_GPMI_CE3N__CAN1_RX 0x0131
-MX28_PAD_GPMI_RDY0__SSP1_CARD_DETECT 0x0141
-MX28_PAD_GPMI_RDY1__SSP1_CMD 0x0151
-MX28_PAD_GPMI_RDY2__CAN0_TX 0x0161
-MX28_PAD_GPMI_RDY3__CAN0_RX 0x0171
-MX28_PAD_GPMI_RDN__SSP3_SCK 0x0181
-MX28_PAD_GPMI_WRN__SSP1_SCK 0x0191
-MX28_PAD_GPMI_ALE__SSP3_D1 0x01a1
-MX28_PAD_GPMI_CLE__SSP3_D2 0x01b1
-MX28_PAD_GPMI_RESETN__SSP3_CMD 0x01c1
-MX28_PAD_LCD_D03__ETM_DA8 0x1031
-MX28_PAD_LCD_D04__ETM_DA9 0x1041
-MX28_PAD_LCD_D08__ETM_DA3 0x1081
-MX28_PAD_LCD_D09__ETM_DA4 0x1091
-MX28_PAD_LCD_D20__ENET1_1588_EVENT2_OUT 0x1141
-MX28_PAD_LCD_D21__ENET1_1588_EVENT2_IN 0x1151
-MX28_PAD_LCD_D22__ENET1_1588_EVENT3_OUT 0x1161
-MX28_PAD_LCD_D23__ENET1_1588_EVENT3_IN 0x1171
-MX28_PAD_LCD_RD_E__LCD_VSYNC 0x1181
-MX28_PAD_LCD_WR_RWN__LCD_HSYNC 0x1191
-MX28_PAD_LCD_RS__LCD_DOTCLK 0x11a1
-MX28_PAD_LCD_CS__LCD_ENABLE 0x11b1
-MX28_PAD_LCD_VSYNC__SAIF1_SDATA0 0x11c1
-MX28_PAD_LCD_HSYNC__SAIF1_SDATA1 0x11d1
-MX28_PAD_LCD_DOTCLK__SAIF1_MCLK 0x11e1
-MX28_PAD_SSP0_DATA4__SSP2_D0 0x2041
-MX28_PAD_SSP0_DATA5__SSP2_D3 0x2051
-MX28_PAD_SSP0_DATA6__SSP2_CMD 0x2061
-MX28_PAD_SSP0_DATA7__SSP2_SCK 0x2071
-MX28_PAD_SSP1_SCK__SSP2_D1 0x20c1
-MX28_PAD_SSP1_CMD__SSP2_D2 0x20d1
-MX28_PAD_SSP1_DATA0__SSP2_D6 0x20e1
-MX28_PAD_SSP1_DATA3__SSP2_D7 0x20f1
-MX28_PAD_SSP2_SCK__AUART2_RX 0x2101
-MX28_PAD_SSP2_MOSI__AUART2_TX 0x2111
-MX28_PAD_SSP2_MISO__AUART3_RX 0x2121
-MX28_PAD_SSP2_SS0__AUART3_TX 0x2131
-MX28_PAD_SSP2_SS1__SSP2_D1 0x2141
-MX28_PAD_SSP2_SS2__SSP2_D2 0x2151
-MX28_PAD_SSP3_SCK__AUART4_TX 0x2181
-MX28_PAD_SSP3_MOSI__AUART4_RX 0x2191
-MX28_PAD_SSP3_MISO__AUART4_RTS 0x21a1
-MX28_PAD_SSP3_SS0__AUART4_CTS 0x21b1
-MX28_PAD_AUART0_RX__I2C0_SCL 0x3001
-MX28_PAD_AUART0_TX__I2C0_SDA 0x3011
-MX28_PAD_AUART0_CTS__AUART4_RX 0x3021
-MX28_PAD_AUART0_RTS__AUART4_TX 0x3031
-MX28_PAD_AUART1_RX__SSP2_CARD_DETECT 0x3041
-MX28_PAD_AUART1_TX__SSP3_CARD_DETECT 0x3051
-MX28_PAD_AUART1_CTS__USB0_OVERCURRENT 0x3061
-MX28_PAD_AUART1_RTS__USB0_ID 0x3071
-MX28_PAD_AUART2_RX__SSP3_D1 0x3081
-MX28_PAD_AUART2_TX__SSP3_D2 0x3091
-MX28_PAD_AUART2_CTS__I2C1_SCL 0x30a1
-MX28_PAD_AUART2_RTS__I2C1_SDA 0x30b1
-MX28_PAD_AUART3_RX__CAN0_TX 0x30c1
-MX28_PAD_AUART3_TX__CAN0_RX 0x30d1
-MX28_PAD_AUART3_CTS__CAN1_TX 0x30e1
-MX28_PAD_AUART3_RTS__CAN1_RX 0x30f1
-MX28_PAD_PWM0__I2C1_SCL 0x3101
-MX28_PAD_PWM1__I2C1_SDA 0x3111
-MX28_PAD_PWM2__USB0_ID 0x3121
-MX28_PAD_SAIF0_MCLK__PWM_3 0x3141
-MX28_PAD_SAIF0_LRCLK__PWM_4 0x3151
-MX28_PAD_SAIF0_BITCLK__PWM_5 0x3161
-MX28_PAD_SAIF0_SDATA0__PWM_6 0x3171
-MX28_PAD_I2C0_SCL__TIMROT_ROTARYA 0x3181
-MX28_PAD_I2C0_SDA__TIMROT_ROTARYB 0x3191
-MX28_PAD_SAIF1_SDATA0__PWM_7 0x31a1
-MX28_PAD_LCD_RESET__LCD_VSYNC 0x31e1
-MX28_PAD_ENET0_MDC__GPMI_CE4N 0x4001
-MX28_PAD_ENET0_MDIO__GPMI_CE5N 0x4011
-MX28_PAD_ENET0_RX_EN__GPMI_CE6N 0x4021
-MX28_PAD_ENET0_RXD0__GPMI_CE7N 0x4031
-MX28_PAD_ENET0_RXD1__GPMI_READY4 0x4041
-MX28_PAD_ENET0_TX_CLK__HSADC_TRIGGER 0x4051
-MX28_PAD_ENET0_TX_EN__GPMI_READY5 0x4061
-MX28_PAD_ENET0_TXD0__GPMI_READY6 0x4071
-MX28_PAD_ENET0_TXD1__GPMI_READY7 0x4081
-MX28_PAD_ENET0_RXD2__ENET1_RXD0 0x4091
-MX28_PAD_ENET0_RXD3__ENET1_RXD1 0x40a1
-MX28_PAD_ENET0_TXD2__ENET1_TXD0 0x40b1
-MX28_PAD_ENET0_TXD3__ENET1_TXD1 0x40c1
-MX28_PAD_ENET0_RX_CLK__ENET0_RX_ER 0x40d1
-MX28_PAD_ENET0_COL__ENET1_TX_EN 0x40e1
-MX28_PAD_ENET0_CRS__ENET1_RX_EN 0x40f1
-MX28_PAD_GPMI_CE2N__ENET0_RX_ER 0x0122
-MX28_PAD_GPMI_CE3N__SAIF1_MCLK 0x0132
-MX28_PAD_GPMI_RDY0__USB0_ID 0x0142
-MX28_PAD_GPMI_RDY2__ENET0_TX_ER 0x0162
-MX28_PAD_GPMI_RDY3__HSADC_TRIGGER 0x0172
-MX28_PAD_GPMI_ALE__SSP3_D4 0x01a2
-MX28_PAD_GPMI_CLE__SSP3_D5 0x01b2
-MX28_PAD_LCD_D00__ETM_DA0 0x1002
-MX28_PAD_LCD_D01__ETM_DA1 0x1012
-MX28_PAD_LCD_D02__ETM_DA2 0x1022
-MX28_PAD_LCD_D03__ETM_DA3 0x1032
-MX28_PAD_LCD_D04__ETM_DA4 0x1042
-MX28_PAD_LCD_D05__ETM_DA5 0x1052
-MX28_PAD_LCD_D06__ETM_DA6 0x1062
-MX28_PAD_LCD_D07__ETM_DA7 0x1072
-MX28_PAD_LCD_D08__ETM_DA8 0x1082
-MX28_PAD_LCD_D09__ETM_DA9 0x1092
-MX28_PAD_LCD_D10__ETM_DA10 0x10a2
-MX28_PAD_LCD_D11__ETM_DA11 0x10b2
-MX28_PAD_LCD_D12__ETM_DA12 0x10c2
-MX28_PAD_LCD_D13__ETM_DA13 0x10d2
-MX28_PAD_LCD_D14__ETM_DA14 0x10e2
-MX28_PAD_LCD_D15__ETM_DA15 0x10f2
-MX28_PAD_LCD_D16__ETM_DA7 0x1102
-MX28_PAD_LCD_D17__ETM_DA6 0x1112
-MX28_PAD_LCD_D18__ETM_DA5 0x1122
-MX28_PAD_LCD_D19__ETM_DA4 0x1132
-MX28_PAD_LCD_D20__ETM_DA3 0x1142
-MX28_PAD_LCD_D21__ETM_DA2 0x1152
-MX28_PAD_LCD_D22__ETM_DA1 0x1162
-MX28_PAD_LCD_D23__ETM_DA0 0x1172
-MX28_PAD_LCD_RD_E__ETM_TCTL 0x1182
-MX28_PAD_LCD_WR_RWN__ETM_TCLK 0x1192
-MX28_PAD_LCD_HSYNC__ETM_TCTL 0x11d2
-MX28_PAD_LCD_DOTCLK__ETM_TCLK 0x11e2
-MX28_PAD_SSP1_SCK__ENET0_1588_EVENT2_OUT 0x20c2
-MX28_PAD_SSP1_CMD__ENET0_1588_EVENT2_IN 0x20d2
-MX28_PAD_SSP1_DATA0__ENET0_1588_EVENT3_OUT 0x20e2
-MX28_PAD_SSP1_DATA3__ENET0_1588_EVENT3_IN 0x20f2
-MX28_PAD_SSP2_SCK__SAIF0_SDATA1 0x2102
-MX28_PAD_SSP2_MOSI__SAIF0_SDATA2 0x2112
-MX28_PAD_SSP2_MISO__SAIF1_SDATA1 0x2122
-MX28_PAD_SSP2_SS0__SAIF1_SDATA2 0x2132
-MX28_PAD_SSP2_SS1__USB1_OVERCURRENT 0x2142
-MX28_PAD_SSP2_SS2__USB0_OVERCURRENT 0x2152
-MX28_PAD_SSP3_SCK__ENET1_1588_EVENT0_OUT 0x2182
-MX28_PAD_SSP3_MOSI__ENET1_1588_EVENT0_IN 0x2192
-MX28_PAD_SSP3_MISO__ENET1_1588_EVENT1_OUT 0x21a2
-MX28_PAD_SSP3_SS0__ENET1_1588_EVENT1_IN 0x21b2
-MX28_PAD_AUART0_RX__DUART_CTS 0x3002
-MX28_PAD_AUART0_TX__DUART_RTS 0x3012
-MX28_PAD_AUART0_CTS__DUART_RX 0x3022
-MX28_PAD_AUART0_RTS__DUART_TX 0x3032
-MX28_PAD_AUART1_RX__PWM_0 0x3042
-MX28_PAD_AUART1_TX__PWM_1 0x3052
-MX28_PAD_AUART1_CTS__TIMROT_ROTARYA 0x3062
-MX28_PAD_AUART1_RTS__TIMROT_ROTARYB 0x3072
-MX28_PAD_AUART2_RX__SSP3_D4 0x3082
-MX28_PAD_AUART2_TX__SSP3_D5 0x3092
-MX28_PAD_AUART2_CTS__SAIF1_BITCLK 0x30a2
-MX28_PAD_AUART2_RTS__SAIF1_LRCLK 0x30b2
-MX28_PAD_AUART3_RX__ENET0_1588_EVENT0_OUT 0x30c2
-MX28_PAD_AUART3_TX__ENET0_1588_EVENT0_IN 0x30d2
-MX28_PAD_AUART3_CTS__ENET0_1588_EVENT1_OUT 0x30e2
-MX28_PAD_AUART3_RTS__ENET0_1588_EVENT1_IN 0x30f2
-MX28_PAD_PWM0__DUART_RX 0x3102
-MX28_PAD_PWM1__DUART_TX 0x3112
-MX28_PAD_PWM2__USB1_OVERCURRENT 0x3122
-MX28_PAD_SAIF0_MCLK__AUART4_CTS 0x3142
-MX28_PAD_SAIF0_LRCLK__AUART4_RTS 0x3152
-MX28_PAD_SAIF0_BITCLK__AUART4_RX 0x3162
-MX28_PAD_SAIF0_SDATA0__AUART4_TX 0x3172
-MX28_PAD_I2C0_SCL__DUART_RX 0x3182
-MX28_PAD_I2C0_SDA__DUART_TX 0x3192
-MX28_PAD_SAIF1_SDATA0__SAIF0_SDATA1 0x31a2
-MX28_PAD_SPDIF__ENET1_RX_ER 0x31b2
-MX28_PAD_ENET0_MDC__SAIF0_SDATA1 0x4002
-MX28_PAD_ENET0_MDIO__SAIF0_SDATA2 0x4012
-MX28_PAD_ENET0_RX_EN__SAIF1_SDATA1 0x4022
-MX28_PAD_ENET0_RXD0__SAIF1_SDATA2 0x4032
-MX28_PAD_ENET0_TX_CLK__ENET0_1588_EVENT2_OUT 0x4052
-MX28_PAD_ENET0_RXD2__ENET0_1588_EVENT0_OUT 0x4092
-MX28_PAD_ENET0_RXD3__ENET0_1588_EVENT0_IN 0x40a2
-MX28_PAD_ENET0_TXD2__ENET0_1588_EVENT1_OUT 0x40b2
-MX28_PAD_ENET0_TXD3__ENET0_1588_EVENT1_IN 0x40c2
-MX28_PAD_ENET0_RX_CLK__ENET0_1588_EVENT2_IN 0x40d2
-MX28_PAD_ENET0_COL__ENET0_1588_EVENT3_OUT 0x40e2
-MX28_PAD_ENET0_CRS__ENET0_1588_EVENT3_IN 0x40f2
-MX28_PAD_GPMI_D00__GPIO_0_0 0x0003
-MX28_PAD_GPMI_D01__GPIO_0_1 0x0013
-MX28_PAD_GPMI_D02__GPIO_0_2 0x0023
-MX28_PAD_GPMI_D03__GPIO_0_3 0x0033
-MX28_PAD_GPMI_D04__GPIO_0_4 0x0043
-MX28_PAD_GPMI_D05__GPIO_0_5 0x0053
-MX28_PAD_GPMI_D06__GPIO_0_6 0x0063
-MX28_PAD_GPMI_D07__GPIO_0_7 0x0073
-MX28_PAD_GPMI_CE0N__GPIO_0_16 0x0103
-MX28_PAD_GPMI_CE1N__GPIO_0_17 0x0113
-MX28_PAD_GPMI_CE2N__GPIO_0_18 0x0123
-MX28_PAD_GPMI_CE3N__GPIO_0_19 0x0133
-MX28_PAD_GPMI_RDY0__GPIO_0_20 0x0143
-MX28_PAD_GPMI_RDY1__GPIO_0_21 0x0153
-MX28_PAD_GPMI_RDY2__GPIO_0_22 0x0163
-MX28_PAD_GPMI_RDY3__GPIO_0_23 0x0173
-MX28_PAD_GPMI_RDN__GPIO_0_24 0x0183
-MX28_PAD_GPMI_WRN__GPIO_0_25 0x0193
-MX28_PAD_GPMI_ALE__GPIO_0_26 0x01a3
-MX28_PAD_GPMI_CLE__GPIO_0_27 0x01b3
-MX28_PAD_GPMI_RESETN__GPIO_0_28 0x01c3
-MX28_PAD_LCD_D00__GPIO_1_0 0x1003
-MX28_PAD_LCD_D01__GPIO_1_1 0x1013
-MX28_PAD_LCD_D02__GPIO_1_2 0x1023
-MX28_PAD_LCD_D03__GPIO_1_3 0x1033
-MX28_PAD_LCD_D04__GPIO_1_4 0x1043
-MX28_PAD_LCD_D05__GPIO_1_5 0x1053
-MX28_PAD_LCD_D06__GPIO_1_6 0x1063
-MX28_PAD_LCD_D07__GPIO_1_7 0x1073
-MX28_PAD_LCD_D08__GPIO_1_8 0x1083
-MX28_PAD_LCD_D09__GPIO_1_9 0x1093
-MX28_PAD_LCD_D10__GPIO_1_10 0x10a3
-MX28_PAD_LCD_D11__GPIO_1_11 0x10b3
-MX28_PAD_LCD_D12__GPIO_1_12 0x10c3
-MX28_PAD_LCD_D13__GPIO_1_13 0x10d3
-MX28_PAD_LCD_D14__GPIO_1_14 0x10e3
-MX28_PAD_LCD_D15__GPIO_1_15 0x10f3
-MX28_PAD_LCD_D16__GPIO_1_16 0x1103
-MX28_PAD_LCD_D17__GPIO_1_17 0x1113
-MX28_PAD_LCD_D18__GPIO_1_18 0x1123
-MX28_PAD_LCD_D19__GPIO_1_19 0x1133
-MX28_PAD_LCD_D20__GPIO_1_20 0x1143
-MX28_PAD_LCD_D21__GPIO_1_21 0x1153
-MX28_PAD_LCD_D22__GPIO_1_22 0x1163
-MX28_PAD_LCD_D23__GPIO_1_23 0x1173
-MX28_PAD_LCD_RD_E__GPIO_1_24 0x1183
-MX28_PAD_LCD_WR_RWN__GPIO_1_25 0x1193
-MX28_PAD_LCD_RS__GPIO_1_26 0x11a3
-MX28_PAD_LCD_CS__GPIO_1_27 0x11b3
-MX28_PAD_LCD_VSYNC__GPIO_1_28 0x11c3
-MX28_PAD_LCD_HSYNC__GPIO_1_29 0x11d3
-MX28_PAD_LCD_DOTCLK__GPIO_1_30 0x11e3
-MX28_PAD_LCD_ENABLE__GPIO_1_31 0x11f3
-MX28_PAD_SSP0_DATA0__GPIO_2_0 0x2003
-MX28_PAD_SSP0_DATA1__GPIO_2_1 0x2013
-MX28_PAD_SSP0_DATA2__GPIO_2_2 0x2023
-MX28_PAD_SSP0_DATA3__GPIO_2_3 0x2033
-MX28_PAD_SSP0_DATA4__GPIO_2_4 0x2043
-MX28_PAD_SSP0_DATA5__GPIO_2_5 0x2053
-MX28_PAD_SSP0_DATA6__GPIO_2_6 0x2063
-MX28_PAD_SSP0_DATA7__GPIO_2_7 0x2073
-MX28_PAD_SSP0_CMD__GPIO_2_8 0x2083
-MX28_PAD_SSP0_DETECT__GPIO_2_9 0x2093
-MX28_PAD_SSP0_SCK__GPIO_2_10 0x20a3
-MX28_PAD_SSP1_SCK__GPIO_2_12 0x20c3
-MX28_PAD_SSP1_CMD__GPIO_2_13 0x20d3
-MX28_PAD_SSP1_DATA0__GPIO_2_14 0x20e3
-MX28_PAD_SSP1_DATA3__GPIO_2_15 0x20f3
-MX28_PAD_SSP2_SCK__GPIO_2_16 0x2103
-MX28_PAD_SSP2_MOSI__GPIO_2_17 0x2113
-MX28_PAD_SSP2_MISO__GPIO_2_18 0x2123
-MX28_PAD_SSP2_SS0__GPIO_2_19 0x2133
-MX28_PAD_SSP2_SS1__GPIO_2_20 0x2143
-MX28_PAD_SSP2_SS2__GPIO_2_21 0x2153
-MX28_PAD_SSP3_SCK__GPIO_2_24 0x2183
-MX28_PAD_SSP3_MOSI__GPIO_2_25 0x2193
-MX28_PAD_SSP3_MISO__GPIO_2_26 0x21a3
-MX28_PAD_SSP3_SS0__GPIO_2_27 0x21b3
-MX28_PAD_AUART0_RX__GPIO_3_0 0x3003
-MX28_PAD_AUART0_TX__GPIO_3_1 0x3013
-MX28_PAD_AUART0_CTS__GPIO_3_2 0x3023
-MX28_PAD_AUART0_RTS__GPIO_3_3 0x3033
-MX28_PAD_AUART1_RX__GPIO_3_4 0x3043
-MX28_PAD_AUART1_TX__GPIO_3_5 0x3053
-MX28_PAD_AUART1_CTS__GPIO_3_6 0x3063
-MX28_PAD_AUART1_RTS__GPIO_3_7 0x3073
-MX28_PAD_AUART2_RX__GPIO_3_8 0x3083
-MX28_PAD_AUART2_TX__GPIO_3_9 0x3093
-MX28_PAD_AUART2_CTS__GPIO_3_10 0x30a3
-MX28_PAD_AUART2_RTS__GPIO_3_11 0x30b3
-MX28_PAD_AUART3_RX__GPIO_3_12 0x30c3
-MX28_PAD_AUART3_TX__GPIO_3_13 0x30d3
-MX28_PAD_AUART3_CTS__GPIO_3_14 0x30e3
-MX28_PAD_AUART3_RTS__GPIO_3_15 0x30f3
-MX28_PAD_PWM0__GPIO_3_16 0x3103
-MX28_PAD_PWM1__GPIO_3_17 0x3113
-MX28_PAD_PWM2__GPIO_3_18 0x3123
-MX28_PAD_SAIF0_MCLK__GPIO_3_20 0x3143
-MX28_PAD_SAIF0_LRCLK__GPIO_3_21 0x3153
-MX28_PAD_SAIF0_BITCLK__GPIO_3_22 0x3163
-MX28_PAD_SAIF0_SDATA0__GPIO_3_23 0x3173
-MX28_PAD_I2C0_SCL__GPIO_3_24 0x3183
-MX28_PAD_I2C0_SDA__GPIO_3_25 0x3193
-MX28_PAD_SAIF1_SDATA0__GPIO_3_26 0x31a3
-MX28_PAD_SPDIF__GPIO_3_27 0x31b3
-MX28_PAD_PWM3__GPIO_3_28 0x31c3
-MX28_PAD_PWM4__GPIO_3_29 0x31d3
-MX28_PAD_LCD_RESET__GPIO_3_30 0x31e3
-MX28_PAD_ENET0_MDC__GPIO_4_0 0x4003
-MX28_PAD_ENET0_MDIO__GPIO_4_1 0x4013
-MX28_PAD_ENET0_RX_EN__GPIO_4_2 0x4023
-MX28_PAD_ENET0_RXD0__GPIO_4_3 0x4033
-MX28_PAD_ENET0_RXD1__GPIO_4_4 0x4043
-MX28_PAD_ENET0_TX_CLK__GPIO_4_5 0x4053
-MX28_PAD_ENET0_TX_EN__GPIO_4_6 0x4063
-MX28_PAD_ENET0_TXD0__GPIO_4_7 0x4073
-MX28_PAD_ENET0_TXD1__GPIO_4_8 0x4083
-MX28_PAD_ENET0_RXD2__GPIO_4_9 0x4093
-MX28_PAD_ENET0_RXD3__GPIO_4_10 0x40a3
-MX28_PAD_ENET0_TXD2__GPIO_4_11 0x40b3
-MX28_PAD_ENET0_TXD3__GPIO_4_12 0x40c3
-MX28_PAD_ENET0_RX_CLK__GPIO_4_13 0x40d3
-MX28_PAD_ENET0_COL__GPIO_4_14 0x40e3
-MX28_PAD_ENET0_CRS__GPIO_4_15 0x40f3
-MX28_PAD_ENET_CLK__GPIO_4_16 0x4103
-MX28_PAD_JTAG_RTCK__GPIO_4_20 0x4143
-
-Valid values for i.MX23 pinmux-id:
-
-pinmux id
------- --
-MX23_PAD_GPMI_D00__GPMI_D00 0x0000
-MX23_PAD_GPMI_D01__GPMI_D01 0x0010
-MX23_PAD_GPMI_D02__GPMI_D02 0x0020
-MX23_PAD_GPMI_D03__GPMI_D03 0x0030
-MX23_PAD_GPMI_D04__GPMI_D04 0x0040
-MX23_PAD_GPMI_D05__GPMI_D05 0x0050
-MX23_PAD_GPMI_D06__GPMI_D06 0x0060
-MX23_PAD_GPMI_D07__GPMI_D07 0x0070
-MX23_PAD_GPMI_D08__GPMI_D08 0x0080
-MX23_PAD_GPMI_D09__GPMI_D09 0x0090
-MX23_PAD_GPMI_D10__GPMI_D10 0x00a0
-MX23_PAD_GPMI_D11__GPMI_D11 0x00b0
-MX23_PAD_GPMI_D12__GPMI_D12 0x00c0
-MX23_PAD_GPMI_D13__GPMI_D13 0x00d0
-MX23_PAD_GPMI_D14__GPMI_D14 0x00e0
-MX23_PAD_GPMI_D15__GPMI_D15 0x00f0
-MX23_PAD_GPMI_CLE__GPMI_CLE 0x0100
-MX23_PAD_GPMI_ALE__GPMI_ALE 0x0110
-MX23_PAD_GPMI_CE2N__GPMI_CE2N 0x0120
-MX23_PAD_GPMI_RDY0__GPMI_RDY0 0x0130
-MX23_PAD_GPMI_RDY1__GPMI_RDY1 0x0140
-MX23_PAD_GPMI_RDY2__GPMI_RDY2 0x0150
-MX23_PAD_GPMI_RDY3__GPMI_RDY3 0x0160
-MX23_PAD_GPMI_WPN__GPMI_WPN 0x0170
-MX23_PAD_GPMI_WRN__GPMI_WRN 0x0180
-MX23_PAD_GPMI_RDN__GPMI_RDN 0x0190
-MX23_PAD_AUART1_CTS__AUART1_CTS 0x01a0
-MX23_PAD_AUART1_RTS__AUART1_RTS 0x01b0
-MX23_PAD_AUART1_RX__AUART1_RX 0x01c0
-MX23_PAD_AUART1_TX__AUART1_TX 0x01d0
-MX23_PAD_I2C_SCL__I2C_SCL 0x01e0
-MX23_PAD_I2C_SDA__I2C_SDA 0x01f0
-MX23_PAD_LCD_D00__LCD_D00 0x1000
-MX23_PAD_LCD_D01__LCD_D01 0x1010
-MX23_PAD_LCD_D02__LCD_D02 0x1020
-MX23_PAD_LCD_D03__LCD_D03 0x1030
-MX23_PAD_LCD_D04__LCD_D04 0x1040
-MX23_PAD_LCD_D05__LCD_D05 0x1050
-MX23_PAD_LCD_D06__LCD_D06 0x1060
-MX23_PAD_LCD_D07__LCD_D07 0x1070
-MX23_PAD_LCD_D08__LCD_D08 0x1080
-MX23_PAD_LCD_D09__LCD_D09 0x1090
-MX23_PAD_LCD_D10__LCD_D10 0x10a0
-MX23_PAD_LCD_D11__LCD_D11 0x10b0
-MX23_PAD_LCD_D12__LCD_D12 0x10c0
-MX23_PAD_LCD_D13__LCD_D13 0x10d0
-MX23_PAD_LCD_D14__LCD_D14 0x10e0
-MX23_PAD_LCD_D15__LCD_D15 0x10f0
-MX23_PAD_LCD_D16__LCD_D16 0x1100
-MX23_PAD_LCD_D17__LCD_D17 0x1110
-MX23_PAD_LCD_RESET__LCD_RESET 0x1120
-MX23_PAD_LCD_RS__LCD_RS 0x1130
-MX23_PAD_LCD_WR__LCD_WR 0x1140
-MX23_PAD_LCD_CS__LCD_CS 0x1150
-MX23_PAD_LCD_DOTCK__LCD_DOTCK 0x1160
-MX23_PAD_LCD_ENABLE__LCD_ENABLE 0x1170
-MX23_PAD_LCD_HSYNC__LCD_HSYNC 0x1180
-MX23_PAD_LCD_VSYNC__LCD_VSYNC 0x1190
-MX23_PAD_PWM0__PWM0 0x11a0
-MX23_PAD_PWM1__PWM1 0x11b0
-MX23_PAD_PWM2__PWM2 0x11c0
-MX23_PAD_PWM3__PWM3 0x11d0
-MX23_PAD_PWM4__PWM4 0x11e0
-MX23_PAD_SSP1_CMD__SSP1_CMD 0x2000
-MX23_PAD_SSP1_DETECT__SSP1_DETECT 0x2010
-MX23_PAD_SSP1_DATA0__SSP1_DATA0 0x2020
-MX23_PAD_SSP1_DATA1__SSP1_DATA1 0x2030
-MX23_PAD_SSP1_DATA2__SSP1_DATA2 0x2040
-MX23_PAD_SSP1_DATA3__SSP1_DATA3 0x2050
-MX23_PAD_SSP1_SCK__SSP1_SCK 0x2060
-MX23_PAD_ROTARYA__ROTARYA 0x2070
-MX23_PAD_ROTARYB__ROTARYB 0x2080
-MX23_PAD_EMI_A00__EMI_A00 0x2090
-MX23_PAD_EMI_A01__EMI_A01 0x20a0
-MX23_PAD_EMI_A02__EMI_A02 0x20b0
-MX23_PAD_EMI_A03__EMI_A03 0x20c0
-MX23_PAD_EMI_A04__EMI_A04 0x20d0
-MX23_PAD_EMI_A05__EMI_A05 0x20e0
-MX23_PAD_EMI_A06__EMI_A06 0x20f0
-MX23_PAD_EMI_A07__EMI_A07 0x2100
-MX23_PAD_EMI_A08__EMI_A08 0x2110
-MX23_PAD_EMI_A09__EMI_A09 0x2120
-MX23_PAD_EMI_A10__EMI_A10 0x2130
-MX23_PAD_EMI_A11__EMI_A11 0x2140
-MX23_PAD_EMI_A12__EMI_A12 0x2150
-MX23_PAD_EMI_BA0__EMI_BA0 0x2160
-MX23_PAD_EMI_BA1__EMI_BA1 0x2170
-MX23_PAD_EMI_CASN__EMI_CASN 0x2180
-MX23_PAD_EMI_CE0N__EMI_CE0N 0x2190
-MX23_PAD_EMI_CE1N__EMI_CE1N 0x21a0
-MX23_PAD_GPMI_CE1N__GPMI_CE1N 0x21b0
-MX23_PAD_GPMI_CE0N__GPMI_CE0N 0x21c0
-MX23_PAD_EMI_CKE__EMI_CKE 0x21d0
-MX23_PAD_EMI_RASN__EMI_RASN 0x21e0
-MX23_PAD_EMI_WEN__EMI_WEN 0x21f0
-MX23_PAD_EMI_D00__EMI_D00 0x3000
-MX23_PAD_EMI_D01__EMI_D01 0x3010
-MX23_PAD_EMI_D02__EMI_D02 0x3020
-MX23_PAD_EMI_D03__EMI_D03 0x3030
-MX23_PAD_EMI_D04__EMI_D04 0x3040
-MX23_PAD_EMI_D05__EMI_D05 0x3050
-MX23_PAD_EMI_D06__EMI_D06 0x3060
-MX23_PAD_EMI_D07__EMI_D07 0x3070
-MX23_PAD_EMI_D08__EMI_D08 0x3080
-MX23_PAD_EMI_D09__EMI_D09 0x3090
-MX23_PAD_EMI_D10__EMI_D10 0x30a0
-MX23_PAD_EMI_D11__EMI_D11 0x30b0
-MX23_PAD_EMI_D12__EMI_D12 0x30c0
-MX23_PAD_EMI_D13__EMI_D13 0x30d0
-MX23_PAD_EMI_D14__EMI_D14 0x30e0
-MX23_PAD_EMI_D15__EMI_D15 0x30f0
-MX23_PAD_EMI_DQM0__EMI_DQM0 0x3100
-MX23_PAD_EMI_DQM1__EMI_DQM1 0x3110
-MX23_PAD_EMI_DQS0__EMI_DQS0 0x3120
-MX23_PAD_EMI_DQS1__EMI_DQS1 0x3130
-MX23_PAD_EMI_CLK__EMI_CLK 0x3140
-MX23_PAD_EMI_CLKN__EMI_CLKN 0x3150
-MX23_PAD_GPMI_D00__LCD_D8 0x0001
-MX23_PAD_GPMI_D01__LCD_D9 0x0011
-MX23_PAD_GPMI_D02__LCD_D10 0x0021
-MX23_PAD_GPMI_D03__LCD_D11 0x0031
-MX23_PAD_GPMI_D04__LCD_D12 0x0041
-MX23_PAD_GPMI_D05__LCD_D13 0x0051
-MX23_PAD_GPMI_D06__LCD_D14 0x0061
-MX23_PAD_GPMI_D07__LCD_D15 0x0071
-MX23_PAD_GPMI_D08__LCD_D18 0x0081
-MX23_PAD_GPMI_D09__LCD_D19 0x0091
-MX23_PAD_GPMI_D10__LCD_D20 0x00a1
-MX23_PAD_GPMI_D11__LCD_D21 0x00b1
-MX23_PAD_GPMI_D12__LCD_D22 0x00c1
-MX23_PAD_GPMI_D13__LCD_D23 0x00d1
-MX23_PAD_GPMI_D14__AUART2_RX 0x00e1
-MX23_PAD_GPMI_D15__AUART2_TX 0x00f1
-MX23_PAD_GPMI_CLE__LCD_D16 0x0101
-MX23_PAD_GPMI_ALE__LCD_D17 0x0111
-MX23_PAD_GPMI_CE2N__ATA_A2 0x0121
-MX23_PAD_AUART1_RTS__IR_CLK 0x01b1
-MX23_PAD_AUART1_RX__IR_RX 0x01c1
-MX23_PAD_AUART1_TX__IR_TX 0x01d1
-MX23_PAD_I2C_SCL__GPMI_RDY2 0x01e1
-MX23_PAD_I2C_SDA__GPMI_CE2N 0x01f1
-MX23_PAD_LCD_D00__ETM_DA8 0x1001
-MX23_PAD_LCD_D01__ETM_DA9 0x1011
-MX23_PAD_LCD_D02__ETM_DA10 0x1021
-MX23_PAD_LCD_D03__ETM_DA11 0x1031
-MX23_PAD_LCD_D04__ETM_DA12 0x1041
-MX23_PAD_LCD_D05__ETM_DA13 0x1051
-MX23_PAD_LCD_D06__ETM_DA14 0x1061
-MX23_PAD_LCD_D07__ETM_DA15 0x1071
-MX23_PAD_LCD_D08__ETM_DA0 0x1081
-MX23_PAD_LCD_D09__ETM_DA1 0x1091
-MX23_PAD_LCD_D10__ETM_DA2 0x10a1
-MX23_PAD_LCD_D11__ETM_DA3 0x10b1
-MX23_PAD_LCD_D12__ETM_DA4 0x10c1
-MX23_PAD_LCD_D13__ETM_DA5 0x10d1
-MX23_PAD_LCD_D14__ETM_DA6 0x10e1
-MX23_PAD_LCD_D15__ETM_DA7 0x10f1
-MX23_PAD_LCD_RESET__ETM_TCTL 0x1121
-MX23_PAD_LCD_RS__ETM_TCLK 0x1131
-MX23_PAD_LCD_DOTCK__GPMI_RDY3 0x1161
-MX23_PAD_LCD_ENABLE__I2C_SCL 0x1171
-MX23_PAD_LCD_HSYNC__I2C_SDA 0x1181
-MX23_PAD_LCD_VSYNC__LCD_BUSY 0x1191
-MX23_PAD_PWM0__ROTARYA 0x11a1
-MX23_PAD_PWM1__ROTARYB 0x11b1
-MX23_PAD_PWM2__GPMI_RDY3 0x11c1
-MX23_PAD_PWM3__ETM_TCTL 0x11d1
-MX23_PAD_PWM4__ETM_TCLK 0x11e1
-MX23_PAD_SSP1_DETECT__GPMI_CE3N 0x2011
-MX23_PAD_SSP1_DATA1__I2C_SCL 0x2031
-MX23_PAD_SSP1_DATA2__I2C_SDA 0x2041
-MX23_PAD_ROTARYA__AUART2_RTS 0x2071
-MX23_PAD_ROTARYB__AUART2_CTS 0x2081
-MX23_PAD_GPMI_D00__SSP2_DATA0 0x0002
-MX23_PAD_GPMI_D01__SSP2_DATA1 0x0012
-MX23_PAD_GPMI_D02__SSP2_DATA2 0x0022
-MX23_PAD_GPMI_D03__SSP2_DATA3 0x0032
-MX23_PAD_GPMI_D04__SSP2_DATA4 0x0042
-MX23_PAD_GPMI_D05__SSP2_DATA5 0x0052
-MX23_PAD_GPMI_D06__SSP2_DATA6 0x0062
-MX23_PAD_GPMI_D07__SSP2_DATA7 0x0072
-MX23_PAD_GPMI_D08__SSP1_DATA4 0x0082
-MX23_PAD_GPMI_D09__SSP1_DATA5 0x0092
-MX23_PAD_GPMI_D10__SSP1_DATA6 0x00a2
-MX23_PAD_GPMI_D11__SSP1_DATA7 0x00b2
-MX23_PAD_GPMI_D15__GPMI_CE3N 0x00f2
-MX23_PAD_GPMI_RDY0__SSP2_DETECT 0x0132
-MX23_PAD_GPMI_RDY1__SSP2_CMD 0x0142
-MX23_PAD_GPMI_WRN__SSP2_SCK 0x0182
-MX23_PAD_AUART1_CTS__SSP1_DATA4 0x01a2
-MX23_PAD_AUART1_RTS__SSP1_DATA5 0x01b2
-MX23_PAD_AUART1_RX__SSP1_DATA6 0x01c2
-MX23_PAD_AUART1_TX__SSP1_DATA7 0x01d2
-MX23_PAD_I2C_SCL__AUART1_TX 0x01e2
-MX23_PAD_I2C_SDA__AUART1_RX 0x01f2
-MX23_PAD_LCD_D08__SAIF2_SDATA0 0x1082
-MX23_PAD_LCD_D09__SAIF1_SDATA0 0x1092
-MX23_PAD_LCD_D10__SAIF_MCLK_BITCLK 0x10a2
-MX23_PAD_LCD_D11__SAIF_LRCLK 0x10b2
-MX23_PAD_LCD_D12__SAIF2_SDATA1 0x10c2
-MX23_PAD_LCD_D13__SAIF2_SDATA2 0x10d2
-MX23_PAD_LCD_D14__SAIF1_SDATA2 0x10e2
-MX23_PAD_LCD_D15__SAIF1_SDATA1 0x10f2
-MX23_PAD_LCD_D16__SAIF_ALT_BITCLK 0x1102
-MX23_PAD_LCD_RESET__GPMI_CE3N 0x1122
-MX23_PAD_PWM0__DUART_RX 0x11a2
-MX23_PAD_PWM1__DUART_TX 0x11b2
-MX23_PAD_PWM3__AUART1_CTS 0x11d2
-MX23_PAD_PWM4__AUART1_RTS 0x11e2
-MX23_PAD_SSP1_CMD__JTAG_TDO 0x2002
-MX23_PAD_SSP1_DETECT__USB_OTG_ID 0x2012
-MX23_PAD_SSP1_DATA0__JTAG_TDI 0x2022
-MX23_PAD_SSP1_DATA1__JTAG_TCLK 0x2032
-MX23_PAD_SSP1_DATA2__JTAG_RTCK 0x2042
-MX23_PAD_SSP1_DATA3__JTAG_TMS 0x2052
-MX23_PAD_SSP1_SCK__JTAG_TRST 0x2062
-MX23_PAD_ROTARYA__SPDIF 0x2072
-MX23_PAD_ROTARYB__GPMI_CE3N 0x2082
-MX23_PAD_GPMI_D00__GPIO_0_0 0x0003
-MX23_PAD_GPMI_D01__GPIO_0_1 0x0013
-MX23_PAD_GPMI_D02__GPIO_0_2 0x0023
-MX23_PAD_GPMI_D03__GPIO_0_3 0x0033
-MX23_PAD_GPMI_D04__GPIO_0_4 0x0043
-MX23_PAD_GPMI_D05__GPIO_0_5 0x0053
-MX23_PAD_GPMI_D06__GPIO_0_6 0x0063
-MX23_PAD_GPMI_D07__GPIO_0_7 0x0073
-MX23_PAD_GPMI_D08__GPIO_0_8 0x0083
-MX23_PAD_GPMI_D09__GPIO_0_9 0x0093
-MX23_PAD_GPMI_D10__GPIO_0_10 0x00a3
-MX23_PAD_GPMI_D11__GPIO_0_11 0x00b3
-MX23_PAD_GPMI_D12__GPIO_0_12 0x00c3
-MX23_PAD_GPMI_D13__GPIO_0_13 0x00d3
-MX23_PAD_GPMI_D14__GPIO_0_14 0x00e3
-MX23_PAD_GPMI_D15__GPIO_0_15 0x00f3
-MX23_PAD_GPMI_CLE__GPIO_0_16 0x0103
-MX23_PAD_GPMI_ALE__GPIO_0_17 0x0113
-MX23_PAD_GPMI_CE2N__GPIO_0_18 0x0123
-MX23_PAD_GPMI_RDY0__GPIO_0_19 0x0133
-MX23_PAD_GPMI_RDY1__GPIO_0_20 0x0143
-MX23_PAD_GPMI_RDY2__GPIO_0_21 0x0153
-MX23_PAD_GPMI_RDY3__GPIO_0_22 0x0163
-MX23_PAD_GPMI_WPN__GPIO_0_23 0x0173
-MX23_PAD_GPMI_WRN__GPIO_0_24 0x0183
-MX23_PAD_GPMI_RDN__GPIO_0_25 0x0193
-MX23_PAD_AUART1_CTS__GPIO_0_26 0x01a3
-MX23_PAD_AUART1_RTS__GPIO_0_27 0x01b3
-MX23_PAD_AUART1_RX__GPIO_0_28 0x01c3
-MX23_PAD_AUART1_TX__GPIO_0_29 0x01d3
-MX23_PAD_I2C_SCL__GPIO_0_30 0x01e3
-MX23_PAD_I2C_SDA__GPIO_0_31 0x01f3
-MX23_PAD_LCD_D00__GPIO_1_0 0x1003
-MX23_PAD_LCD_D01__GPIO_1_1 0x1013
-MX23_PAD_LCD_D02__GPIO_1_2 0x1023
-MX23_PAD_LCD_D03__GPIO_1_3 0x1033
-MX23_PAD_LCD_D04__GPIO_1_4 0x1043
-MX23_PAD_LCD_D05__GPIO_1_5 0x1053
-MX23_PAD_LCD_D06__GPIO_1_6 0x1063
-MX23_PAD_LCD_D07__GPIO_1_7 0x1073
-MX23_PAD_LCD_D08__GPIO_1_8 0x1083
-MX23_PAD_LCD_D09__GPIO_1_9 0x1093
-MX23_PAD_LCD_D10__GPIO_1_10 0x10a3
-MX23_PAD_LCD_D11__GPIO_1_11 0x10b3
-MX23_PAD_LCD_D12__GPIO_1_12 0x10c3
-MX23_PAD_LCD_D13__GPIO_1_13 0x10d3
-MX23_PAD_LCD_D14__GPIO_1_14 0x10e3
-MX23_PAD_LCD_D15__GPIO_1_15 0x10f3
-MX23_PAD_LCD_D16__GPIO_1_16 0x1103
-MX23_PAD_LCD_D17__GPIO_1_17 0x1113
-MX23_PAD_LCD_RESET__GPIO_1_18 0x1123
-MX23_PAD_LCD_RS__GPIO_1_19 0x1133
-MX23_PAD_LCD_WR__GPIO_1_20 0x1143
-MX23_PAD_LCD_CS__GPIO_1_21 0x1153
-MX23_PAD_LCD_DOTCK__GPIO_1_22 0x1163
-MX23_PAD_LCD_ENABLE__GPIO_1_23 0x1173
-MX23_PAD_LCD_HSYNC__GPIO_1_24 0x1183
-MX23_PAD_LCD_VSYNC__GPIO_1_25 0x1193
-MX23_PAD_PWM0__GPIO_1_26 0x11a3
-MX23_PAD_PWM1__GPIO_1_27 0x11b3
-MX23_PAD_PWM2__GPIO_1_28 0x11c3
-MX23_PAD_PWM3__GPIO_1_29 0x11d3
-MX23_PAD_PWM4__GPIO_1_30 0x11e3
-MX23_PAD_SSP1_CMD__GPIO_2_0 0x2003
-MX23_PAD_SSP1_DETECT__GPIO_2_1 0x2013
-MX23_PAD_SSP1_DATA0__GPIO_2_2 0x2023
-MX23_PAD_SSP1_DATA1__GPIO_2_3 0x2033
-MX23_PAD_SSP1_DATA2__GPIO_2_4 0x2043
-MX23_PAD_SSP1_DATA3__GPIO_2_5 0x2053
-MX23_PAD_SSP1_SCK__GPIO_2_6 0x2063
-MX23_PAD_ROTARYA__GPIO_2_7 0x2073
-MX23_PAD_ROTARYB__GPIO_2_8 0x2083
-MX23_PAD_EMI_A00__GPIO_2_9 0x2093
-MX23_PAD_EMI_A01__GPIO_2_10 0x20a3
-MX23_PAD_EMI_A02__GPIO_2_11 0x20b3
-MX23_PAD_EMI_A03__GPIO_2_12 0x20c3
-MX23_PAD_EMI_A04__GPIO_2_13 0x20d3
-MX23_PAD_EMI_A05__GPIO_2_14 0x20e3
-MX23_PAD_EMI_A06__GPIO_2_15 0x20f3
-MX23_PAD_EMI_A07__GPIO_2_16 0x2103
-MX23_PAD_EMI_A08__GPIO_2_17 0x2113
-MX23_PAD_EMI_A09__GPIO_2_18 0x2123
-MX23_PAD_EMI_A10__GPIO_2_19 0x2133
-MX23_PAD_EMI_A11__GPIO_2_20 0x2143
-MX23_PAD_EMI_A12__GPIO_2_21 0x2153
-MX23_PAD_EMI_BA0__GPIO_2_22 0x2163
-MX23_PAD_EMI_BA1__GPIO_2_23 0x2173
-MX23_PAD_EMI_CASN__GPIO_2_24 0x2183
-MX23_PAD_EMI_CE0N__GPIO_2_25 0x2193
-MX23_PAD_EMI_CE1N__GPIO_2_26 0x21a3
-MX23_PAD_GPMI_CE1N__GPIO_2_27 0x21b3
-MX23_PAD_GPMI_CE0N__GPIO_2_28 0x21c3
-MX23_PAD_EMI_CKE__GPIO_2_29 0x21d3
-MX23_PAD_EMI_RASN__GPIO_2_30 0x21e3
-MX23_PAD_EMI_WEN__GPIO_2_31 0x21f3
+Valid values for i.MX28/i.MX23 pinmux-id are defined in
+arch/arm/boot/dts/imx28-pinfunc.h and arch/arm/boot/dts/imx23-pinfunc.h.
+The definitions for the padconfig properties can be found in
+arch/arm/boot/dts/mxs-pinfunc.h.
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-single.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-single.txt
index 5a02e30dd262..7069a0b84e3a 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-single.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-single.txt
@@ -72,6 +72,13 @@ Optional properties:
/* pin base, nr pins & gpio function */
pinctrl-single,gpio-range = <&range 0 3 0 &range 3 9 1>;
+- interrupt-controller : standard interrupt controller binding if using
+ interrupts for wake-up events for example. In this case pinctrl-single
+ is set up as a chained interrupt controller and the wake-up interrupts
+ can be requested by the drivers using request_irq().
+
+- #interrupt-cells : standard interrupt binding if using interrupts
+
This driver assumes that there is only one register for each pin (unless the
pinctrl-single,bit-per-mux is set), and uses the common pinctrl bindings as
specified in the pinctrl-bindings.txt document in this directory.
@@ -121,6 +128,8 @@ pmx_core: pinmux@4a100040 {
reg = <0x4a100040 0x0196>;
#address-cells = <1>;
#size-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
pinctrl-single,register-width = <16>;
pinctrl-single,function-mask = <0xffff>;
};
@@ -131,6 +140,8 @@ pmx_wkup: pinmux@4a31e040 {
reg = <0x4a31e040 0x0038>;
#address-cells = <1>;
#size-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
pinctrl-single,register-width = <16>;
pinctrl-single,function-mask = <0xffff>;
};
diff --git a/Documentation/devicetree/bindings/power_supply/ti,bq24735.txt b/Documentation/devicetree/bindings/power_supply/ti,bq24735.txt
new file mode 100644
index 000000000000..4f6a550184d0
--- /dev/null
+++ b/Documentation/devicetree/bindings/power_supply/ti,bq24735.txt
@@ -0,0 +1,32 @@
+TI BQ24735 Charge Controller
+~~~~~~~~~~
+
+Required properties :
+ - compatible : "ti,bq24735"
+
+Optional properties :
+ - interrupts : Specify the interrupt to be used to trigger when the AC
+ adapter is either plugged in or removed.
+ - ti,ac-detect-gpios : This GPIO is optionally used to read the AC adapter
+ presence. This is a Host GPIO that is configured as an input and
+ connected to the bq24735.
+ - ti,charge-current : Used to control and set the charging current. This value
+ must be between 128mA and 8.128A with a 64mA step resolution. The POR value
+ is 0x0000h. This number is in mA (e.g. 8192), see spec for more information
+ about the ChargeCurrent (0x14h) register.
+ - ti,charge-voltage : Used to control and set the charging voltage. This value
+ must be between 1.024V and 19.2V with a 16mV step resolution. The POR value
+ is 0x0000h. This number is in mV (e.g. 19200), see spec for more information
+ about the ChargeVoltage (0x15h) register.
+ - ti,input-current : Used to control and set the charger input current. This
+ value must be between 128mA and 8.064A with a 128mA step resolution. The
+ POR value is 0x1000h. This number is in mA (e.g. 8064), see the spec for
+ more information about the InputCurrent (0x3fh) register.
+
+Example:
+
+ bq24735@9 {
+ compatible = "ti,bq24735";
+ reg = <0x9>;
+ ti,ac-detect-gpios = <&gpio 72 0x1>;
+ }
diff --git a/Documentation/devicetree/bindings/regulator/as3722-regulator.txt b/Documentation/devicetree/bindings/regulator/as3722-regulator.txt
new file mode 100644
index 000000000000..caad0c8a258d
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/as3722-regulator.txt
@@ -0,0 +1,91 @@
+Regulator of AMS AS3722 PMIC.
+Name of the regulator subnode must be "regulators".
+
+Optional properties:
+--------------------
+The input supply of regulators are the optional properties on the
+regulator node. The AS3722 is having 7 DCDC step-down regulators as
+sd[0-6], 10 LDOs as ldo[0-7], ldo[9-11]. The input supply of these
+regulators are provided through following properties:
+vsup-sd2-supply: Input supply for SD2.
+vsup-sd3-supply: Input supply for SD3.
+vsup-sd4-supply: Input supply for SD4.
+vsup-sd5-supply: Input supply for SD5.
+vin-ldo0-supply: Input supply for LDO0.
+vin-ldo1-6-supply: Input supply for LDO1 and LDO6.
+vin-ldo2-5-7-supply: Input supply for LDO2, LDO5 and LDO7.
+vin-ldo3-4-supply: Input supply for LDO3 and LDO4.
+vin-ldo9-10-supply: Input supply for LDO9 and LDO10.
+vin-ldo11-supply: Input supply for LDO11.
+
+Optional nodes:
+--------------
+- regulators : Must contain a sub-node per regulator from the list below.
+ Each sub-node should contain the constraints and initialization
+ information for that regulator. See regulator.txt for a
+ description of standard properties for these sub-nodes.
+ Additional custom properties are listed below.
+ sd[0-6], ldo[0-7], ldo[9-11].
+
+ Optional sub-node properties:
+ ----------------------------
+ ams,ext-control: External control of the rail. The option of
+ this properties will tell which external input is
+ controlling this rail. Valid values are 0, 1, 2 ad 3.
+ 0: There is no external control of this rail.
+ 1: Rail is controlled by ENABLE1 input pin.
+ 2: Rail is controlled by ENABLE2 input pin.
+ 3: Rail is controlled by ENABLE3 input pin.
+ ams,enable-tracking: Enable tracking with SD1, only supported
+ by LDO3.
+
+Example:
+-------
+ ams3722: ams3722 {
+ compatible = "ams,as3722";
+ reg = <0x40>;
+ ...
+
+ regulators {
+ vsup-sd2-supply = <...>;
+ ...
+
+ sd0 {
+ regulator-name = "vdd_cpu";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-always-on;
+ ams,ext-control = <2>;
+ };
+
+ sd1 {
+ regulator-name = "vdd_core";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-always-on;
+ ams,ext-control = <1>;
+ };
+
+ sd2 {
+ regulator-name = "vddio_ddr";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ };
+
+ sd4 {
+ regulator-name = "avdd-hdmi-pex";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-always-on;
+ };
+
+ sd5 {
+ regulator-name = "vdd-1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+ ....
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/da9210.txt b/Documentation/devicetree/bindings/regulator/da9210.txt
new file mode 100644
index 000000000000..f120f229d67d
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/da9210.txt
@@ -0,0 +1,21 @@
+* Dialog Semiconductor DA9210 Voltage Regulator
+
+Required properties:
+
+- compatible: must be "diasemi,da9210"
+- reg: the i2c slave address of the regulator. It should be 0x68.
+
+Any standard regulator properties can be used to configure the single da9210
+DCDC.
+
+Example:
+
+ da9210@68 {
+ compatible = "diasemi,da9210";
+ reg = <0x68>;
+
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
diff --git a/Documentation/devicetree/bindings/regulator/palmas-pmic.txt b/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
index 875639ae0606..42e6b6bc48ff 100644
--- a/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
+++ b/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
@@ -26,11 +26,17 @@ Optional nodes:
For ti,palmas-pmic - smps12, smps123, smps3 depending on OTP,
smps45, smps457, smps7 depending on variant, smps6, smps[8-9],
- smps10_out2, smps10_out1, do[1-9], ldoln, ldousb.
+ smps10_out2, smps10_out1, ldo[1-9], ldoln, ldousb.
Optional sub-node properties:
ti,warm-reset - maintain voltage during warm reset(boolean)
- ti,roof-floor - control voltage selection by pin(boolean)
+ ti,roof-floor - This takes as optional argument on platform supporting
+ the rail from desired external control. If there is no argument then
+ it will be assume that it is controlled by NSLEEP pin.
+ The valid value for external pins are:
+ ENABLE1 then 1,
+ ENABLE2 then 2 or
+ NSLEEP then 3.
ti,mode-sleep - mode to adopt in pmic sleep 0 - off, 1 - auto,
2 - eco, 3 - forced pwm
ti,smps-range - OTP has the wrong range set for the hardware so override
@@ -61,7 +67,7 @@ pmic {
regulator-always-on;
regulator-boot-on;
ti,warm-reset;
- ti,roof-floor;
+ ti,roof-floor = <1>; /* ENABLE1 control */
ti,mode-sleep = <0>;
ti,smps-range = <1>;
};
diff --git a/Documentation/devicetree/bindings/regulator/regulator.txt b/Documentation/devicetree/bindings/regulator/regulator.txt
index 2bd8f0978765..e2c7f1e7251a 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/regulator.txt
@@ -14,6 +14,11 @@ Optional properties:
- regulator-ramp-delay: ramp delay for regulator(in uV/uS)
For hardwares which support disabling ramp rate, it should be explicitly
intialised to zero (regulator-ramp-delay = <0>) for disabling ramp delay.
+- regulator-enable-ramp-delay: The time taken, in microseconds, for the supply
+ rail to reach the target voltage, plus/minus whatever tolerance the board
+ design requires. This property describes the total system ramp time
+ required due to the combination of internal ramping of the regulator itself,
+ and board design issues such as trace capacitance and load on the supply.
Deprecated properties:
- regulator-compatible: If a regulator chip contains multiple
diff --git a/Documentation/devicetree/bindings/rng/qcom,prng.txt b/Documentation/devicetree/bindings/rng/qcom,prng.txt
new file mode 100644
index 000000000000..8e5853c2879b
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/qcom,prng.txt
@@ -0,0 +1,17 @@
+Qualcomm MSM pseudo random number generator.
+
+Required properties:
+
+- compatible : should be "qcom,prng"
+- reg : specifies base physical address and size of the registers map
+- clocks : phandle to clock-controller plus clock-specifier pair
+- clock-names : "core" clocks all registers, FIFO and circuits in PRNG IP block
+
+Example:
+
+ rng@f9bff000 {
+ compatible = "qcom,prng";
+ reg = <0xf9bff000 0x200>;
+ clocks = <&clock GCC_PRNG_AHB_CLK>;
+ clock-names = "core";
+ };
diff --git a/Documentation/devicetree/bindings/sound/cs42l73.txt b/Documentation/devicetree/bindings/sound/cs42l73.txt
new file mode 100644
index 000000000000..80ae910dbf6c
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cs42l73.txt
@@ -0,0 +1,22 @@
+CS42L73 audio CODEC
+
+Required properties:
+
+ - compatible : "cirrus,cs42l73"
+
+ - reg : the I2C address of the device for I2C
+
+Optional properties:
+
+ - reset_gpio : a GPIO spec for the reset pin.
+ - chgfreq : Charge Pump Frequency values 0x00-0x0F
+
+
+Example:
+
+codec: cs42l73@4a {
+ compatible = "cirrus,cs42l73";
+ reg = <0x4a>;
+ reset_gpio = <&gpio 10 0>;
+ chgfreq = <0x05>;
+}; \ No newline at end of file
diff --git a/Documentation/devicetree/bindings/sound/davinci-evm-audio.txt b/Documentation/devicetree/bindings/sound/davinci-evm-audio.txt
new file mode 100644
index 000000000000..865178d5cdf3
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/davinci-evm-audio.txt
@@ -0,0 +1,42 @@
+* Texas Instruments SoC audio setups with TLV320AIC3X Codec
+
+Required properties:
+- compatible : "ti,da830-evm-audio" : forDM365/DA8xx/OMAPL1x/AM33xx
+- ti,model : The user-visible name of this sound complex.
+- ti,audio-codec : The phandle of the TLV320AIC3x audio codec
+- ti,mcasp-controller : The phandle of the McASP controller
+- ti,codec-clock-rate : The Codec Clock rate (in Hz) applied to the Codec
+- ti,audio-routing : A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the connection's sink,
+ the second being the connection's source. Valid names for sources and
+ sinks are the codec's pins, and the jacks on the board:
+
+ Board connectors:
+
+ * Headphone Jack
+ * Line Out
+ * Mic Jack
+ * Line In
+
+
+Example:
+
+sound {
+ compatible = "ti,da830-evm-audio";
+ ti,model = "DA830 EVM";
+ ti,audio-codec = <&tlv320aic3x>;
+ ti,mcasp-controller = <&mcasp1>;
+ ti,codec-clock-rate = <12000000>;
+ ti,audio-routing =
+ "Headphone Jack", "HPLOUT",
+ "Headphone Jack", "HPROUT",
+ "Line Out", "LLOUT",
+ "Line Out", "RLOUT",
+ "MIC3L", "Mic Bias 2V",
+ "MIC3R", "Mic Bias 2V",
+ "Mic Bias 2V", "Mic Jack",
+ "LINE1L", "Line In",
+ "LINE2L", "Line In",
+ "LINE1R", "Line In",
+ "LINE2R", "Line In";
+};
diff --git a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
index 374e145c2ef1..ed785b3f67be 100644
--- a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
+++ b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
@@ -4,17 +4,25 @@ Required properties:
- compatible :
"ti,dm646x-mcasp-audio" : for DM646x platforms
"ti,da830-mcasp-audio" : for both DA830 & DA850 platforms
- "ti,omap2-mcasp-audio" : for OMAP2 platforms (TI81xx, AM33xx)
-
-- reg : Should contain McASP registers offset and length
-- interrupts : Interrupt number for McASP
-- op-mode : I2S/DIT ops mode.
-- tdm-slots : Slots for TDM operation.
-- num-serializer : Serializers used by McASP.
-- serial-dir : A list of serializer pin mode. The list number should be equal
- to "num-serializer" parameter. Each entry is a number indication
- serializer pin direction. (0 - INACTIVE, 1 - TX, 2 - RX)
+ "ti,am33xx-mcasp-audio" : for AM33xx platforms (AM33xx, TI81xx)
+- reg : Should contain reg specifiers for the entries in the reg-names property.
+- reg-names : Should contain:
+ * "mpu" for the main registers (required). For compatibility with
+ existing software, it is recommended this is the first entry.
+ * "dat" for separate data port register access (optional).
+- op-mode : I2S/DIT ops mode. 0 for I2S mode. 1 for DIT mode used for S/PDIF,
+ IEC60958-1, and AES-3 formats.
+- tdm-slots : Slots for TDM operation. Indicates number of channels transmitted
+ or received over one serializer.
+- serial-dir : A list of serializer configuration. Each entry is a number
+ indication for serializer pin direction.
+ (0 - INACTIVE, 1 - TX, 2 - RX)
+- dmas: two element list of DMA controller phandles and DMA request line
+ ordered pairs.
+- dma-names: identifier string for each DMA request line in the dmas property.
+ These strings correspond 1:1 with the ordered pairs in dmas. The dma
+ identifiers must be "rx" and "tx".
Optional properties:
@@ -23,18 +31,23 @@ Optional properties:
- rx-num-evt : FIFO levels.
- sram-size-playback : size of sram to be allocated during playback
- sram-size-capture : size of sram to be allocated during capture
+- interrupts : Interrupt numbers for McASP, currently not used by the driver
+- interrupt-names : Known interrupt names are "tx" and "rx"
+- pinctrl-0: Should specify pin control group used for this controller.
+- pinctrl-names: Should contain only one value - "default", for more details
+ please refer to pinctrl-bindings.txt
+
Example:
mcasp0: mcasp0@1d00000 {
compatible = "ti,da830-mcasp-audio";
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0x100000 0x3000>;
- interrupts = <82 83>;
+ reg-names "mpu";
+ interrupts = <82>, <83>;
+ interrupts-names = "tx", "rx";
op-mode = <0>; /* MCASP_IIS_MODE */
tdm-slots = <2>;
- num-serializer = <16>;
serial-dir = <
0 0 0 0 /* 0: INACTIVE, 1: TX, 2: RX */
0 0 0 0
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic3x.txt b/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
index 705a6b156c6c..5e6040c2c2e9 100644
--- a/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
+++ b/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
@@ -24,10 +24,36 @@ Optional properties:
3 - MICBIAS output is connected to AVDD,
If this node is not mentioned or if the value is incorrect, then MicBias
is powered down.
+- AVDD-supply, IOVDD-supply, DRVDD-supply, DVDD-supply : power supplies for the
+ device as covered in Documentation/devicetree/bindings/regulator/regulator.txt
+
+CODEC output pins:
+ * LLOUT
+ * RLOUT
+ * MONO_LOUT
+ * HPLOUT
+ * HPROUT
+ * HPLCOM
+ * HPRCOM
+
+CODEC input pins:
+ * MIC3L
+ * MIC3R
+ * LINE1L
+ * LINE2L
+ * LINE1R
+ * LINE2R
+
+The pins can be used in referring sound node's audio-routing property.
Example:
tlv320aic3x: tlv320aic3x@1b {
compatible = "ti,tlv320aic3x";
reg = <0x1b>;
+
+ AVDD-supply = <&regulator>;
+ IOVDD-supply = <&regulator>;
+ DRVDD-supply = <&regulator>;
+ DVDD-supply = <&regulator>;
};
diff --git a/Documentation/devicetree/bindings/sound/tpa6130a2.txt b/Documentation/devicetree/bindings/sound/tpa6130a2.txt
new file mode 100644
index 000000000000..6dfa740e4b2d
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/tpa6130a2.txt
@@ -0,0 +1,27 @@
+Texas Instruments - tpa6130a2 Codec module
+
+The tpa6130a2 serial control bus communicates through I2C protocols
+
+Required properties:
+
+- compatible - "string" - One of:
+ "ti,tpa6130a2" - TPA6130A2
+ "ti,tpa6140a2" - TPA6140A2
+
+
+- reg - <int> - I2C slave address
+
+- Vdd-supply - <phandle> - power supply regulator
+
+Optional properties:
+
+- power-gpio - gpio pin to power the device
+
+Example:
+
+tpa6130a2: tpa6130a2@60 {
+ compatible = "ti,tpa6130a2";
+ reg = <0x60>;
+ Vdd-supply = <&vmmc2>;
+ power-gpio = <&gpio4 2 GPIO_ACTIVE_HIGH>;
+};
diff --git a/Documentation/devicetree/bindings/timer/efm32,timer.txt b/Documentation/devicetree/bindings/timer/efm32,timer.txt
new file mode 100644
index 000000000000..97a568f696c9
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/efm32,timer.txt
@@ -0,0 +1,23 @@
+* EFM32 timer hardware
+
+The efm32 Giant Gecko SoCs come with four 16 bit timers. Two counters can be
+connected to form a 32 bit counter. Each timer has three Compare/Capture
+channels and can be used as PWM or Quadrature Decoder. Available clock sources
+are the cpu's HFPERCLK (with a 10-bit prescaler) or an external pin.
+
+Required properties:
+- compatible : Should be efm32,timer
+- reg : Address and length of the register set
+- clocks : Should contain a reference to the HFPERCLK
+
+Optional properties:
+- interrupts : Reference to the timer interrupt
+
+Example:
+
+timer@40010c00 {
+ compatible = "efm32,timer";
+ reg = <0x40010c00 0x400>;
+ interrupts = <14>;
+ clocks = <&cmu clk_HFPERCLKTIMER3>;
+};
diff --git a/Documentation/devicetree/bindings/tty/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/tty/serial/renesas,sci-serial.txt
new file mode 100644
index 000000000000..6ad1adfd4577
--- /dev/null
+++ b/Documentation/devicetree/bindings/tty/serial/renesas,sci-serial.txt
@@ -0,0 +1,53 @@
+* Renesas SH-Mobile Serial Communication Interface
+
+Required properties:
+- compatible : Should be "renesas,sci-<port type>-uart", where <port type> may be
+ SCI, SCIF, IRDA, SCIFA or SCIFB.
+- reg : Address and length of the register set for the device
+- interrupts : Should contain the following IRQs: ERI, RXI, TXI and BRI.
+- cell-index : The device id.
+- renesas,scscr : Should contain a bitfield used by the Serial Control Register.
+ b7 = SCSCR_TIE
+ b6 = SCSCR_RIE
+ b5 = SCSCR_TE
+ b4 = SCSCR_RE
+ b3 = SCSCR_REIE
+ b2 = SCSCR_TOIE
+ b1 = SCSCR_CKE1
+ b0 = SCSCR_CKE0
+- renesas,scbrr-algo-id : Algorithm ID for the Bit Rate Register
+ 1 = SCBRR_ALGO_1 ((clk + 16 * bps) / (16 * bps) - 1)
+ 2 = SCBRR_ALGO_2 ((clk + 16 * bps) / (32 * bps) - 1)
+ 3 = SCBRR_ALGO_3 (((clk * 2) + 16 * bps) / (16 * bps) - 1)
+ 4 = SCBRR_ALGO_4 (((clk * 2) + 16 * bps) / (32 * bps) - 1)
+ 5 = SCBRR_ALGO_5 (((clk * 1000 / 32) / bps) - 1)
+
+Optional properties:
+- renesas,autoconf : Set if device is capable of auto configuration
+- renesas,regtype : Overwrite the register layout. In most cases you can rely
+ on auto-probing (omit this property or set to 0) but some legacy devices
+ use a non-default register layout. Possible layouts are
+ 0 = SCIx_PROBE_REGTYPE (default)
+ 1 = SCIx_SCI_REGTYPE
+ 2 = SCIx_IRDA_REGTYPE
+ 3 = SCIx_SCIFA_REGTYPE
+ 4 = SCIx_SCIFB_REGTYPE
+ 5 = SCIx_SH2_SCIF_FIFODATA_REGTYPE
+ 6 = SCIx_SH3_SCIF_REGTYPE
+ 7 = SCIx_SH4_SCIF_REGTYPE
+ 8 = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE
+ 9 = SCIx_SH4_SCIF_FIFODATA_REGTYPE
+ 10 = SCIx_SH7705_SCIF_REGTYPE
+
+
+Example:
+ sci@0xe6c50000 {
+ compatible = "renesas,sci-SCIFA-uart";
+ interrupt-parent = <&intca>;
+ reg = <0xe6c50000 0x100>;
+ interrupts = <0x0c20>, <0x0c20>, <0x0c20>, <0x0c20>;
+ cell-index = <1>;
+ renesas,scscr = <0x30>;
+ renesas,scbrr-algo-id = <4>;
+ renesas,autoconf;
+ };
diff --git a/Documentation/devicetree/bindings/usb/ux500-usb.txt b/Documentation/devicetree/bindings/usb/ux500-usb.txt
index 330d6ec15401..439a41c79afa 100644
--- a/Documentation/devicetree/bindings/usb/ux500-usb.txt
+++ b/Documentation/devicetree/bindings/usb/ux500-usb.txt
@@ -15,7 +15,7 @@ Optional properties:
Example:
usb_per5@a03e0000 {
- compatible = "stericsson,db8500-musb", "mentor,musb";
+ compatible = "stericsson,db8500-musb";
reg = <0xa03e0000 0x10000>;
interrupts = <0 23 0x4>;
interrupt-names = "mc";
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 2956800f0240..d504cb6a3067 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -16,6 +16,7 @@ avago Avago Technologies
bosch Bosch Sensortec GmbH
brcm Broadcom Corporation
cavium Cavium, Inc.
+cdns Cadence Design Systems Inc.
chrp Common Hardware Reference Platform
cirrus Cirrus Logic, Inc.
cortina Cortina Systems, Inc.
@@ -25,6 +26,7 @@ denx Denx Software Engineering
emmicro EM Microelectronic
epson Seiko Epson Corp.
est ESTeem Wireless Modems
+eukrea Eukréa Electromatique
fsl Freescale Semiconductor
GEFanuc GE Fanuc Intelligent Platforms Embedded Systems, Inc.
gef GE Fanuc Intelligent Platforms Embedded Systems, Inc.
@@ -45,6 +47,7 @@ nintendo Nintendo
nvidia NVIDIA
nxp NXP Semiconductors
onnn ON Semiconductor Corp.
+phytec PHYTEC Messtechnik GmbH
picochip Picochip Ltd
powervr PowerVR (deprecated, use img)
qca Qualcomm Atheros, Inc.
@@ -64,12 +67,13 @@ snps Synopsys, Inc.
st STMicroelectronics
ste ST-Ericsson
stericsson ST-Ericsson
-toumaz Toumaz
ti Texas Instruments
toshiba Toshiba Corporation
+toumaz Toumaz
v3 V3 Semiconductor
via VIA Technologies, Inc.
+voipac Voipac Technologies s.r.o.
+winbond Winbond Electronics corp.
wlf Wolfson Microelectronics
wm Wondermedia Technologies, Inc.
-winbond Winbond Electronics corp.
xlnx Xilinx
diff --git a/Documentation/devicetree/bindings/video/atmel,lcdc.txt b/Documentation/devicetree/bindings/video/atmel,lcdc.txt
new file mode 100644
index 000000000000..1ec175eddca8
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/atmel,lcdc.txt
@@ -0,0 +1,75 @@
+Atmel LCDC Framebuffer
+-----------------------------------------------------
+
+Required properties:
+- compatible :
+ "atmel,at91sam9261-lcdc" ,
+ "atmel,at91sam9263-lcdc" ,
+ "atmel,at91sam9g10-lcdc" ,
+ "atmel,at91sam9g45-lcdc" ,
+ "atmel,at91sam9g45es-lcdc" ,
+ "atmel,at91sam9rl-lcdc" ,
+ "atmel,at32ap-lcdc"
+- reg : Should contain 1 register ranges(address and length)
+- interrupts : framebuffer controller interrupt
+- display: a phandle pointing to the display node
+
+Required nodes:
+- display: a display node is required to initialize the lcd panel
+ This should be in the board dts.
+- default-mode: a videomode within the display with timing parameters
+ as specified below.
+
+Example:
+
+ fb0: fb@0x00500000 {
+ compatible = "atmel,at91sam9g45-lcdc";
+ reg = <0x00500000 0x1000>;
+ interrupts = <23 3 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fb>;
+ display = <&display0>;
+ status = "okay";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ };
+
+Atmel LCDC Display
+-----------------------------------------------------
+Required properties (as per of_videomode_helper):
+
+ - atmel,dmacon: dma controler configuration
+ - atmel,lcdcon2: lcd controler configuration
+ - atmel,guard-time: lcd guard time (Delay in frame periods)
+ - bits-per-pixel: lcd panel bit-depth.
+
+Optional properties (as per of_videomode_helper):
+ - atmel,lcdcon-backlight: enable backlight
+ - atmel,lcd-wiring-mode: lcd wiring mode "RGB" or "BRG"
+ - atmel,power-control-gpio: gpio to power on or off the LCD (as many as needed)
+
+Example:
+ display0: display {
+ bits-per-pixel = <32>;
+ atmel,lcdcon-backlight;
+ atmel,dmacon = <0x1>;
+ atmel,lcdcon2 = <0x80008002>;
+ atmel,guard-time = <9>;
+ atmel,lcd-wiring-mode = <1>;
+
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: timing0 {
+ clock-frequency = <9000000>;
+ hactive = <480>;
+ vactive = <272>;
+ hback-porch = <1>;
+ hfront-porch = <1>;
+ vback-porch = <40>;
+ vfront-porch = <1>;
+ hsync-len = <45>;
+ vsync-len = <1>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/video/exynos_hdmi.txt b/Documentation/devicetree/bindings/video/exynos_hdmi.txt
index 323983be3c30..50decf8e1b90 100644
--- a/Documentation/devicetree/bindings/video/exynos_hdmi.txt
+++ b/Documentation/devicetree/bindings/video/exynos_hdmi.txt
@@ -12,7 +12,19 @@ Required properties:
a) phandle of the gpio controller node.
b) pin number within the gpio controller.
c) optional flags and pull up/down.
-
+- clocks: list of clock IDs from SoC clock driver.
+ a) hdmi: Gate of HDMI IP bus clock.
+ b) sclk_hdmi: Gate of HDMI special clock.
+ c) sclk_pixel: Pixel special clock, one of the two possible inputs of
+ HDMI clock mux.
+ d) sclk_hdmiphy: HDMI PHY clock output, one of two possible inputs of
+ HDMI clock mux.
+ e) mout_hdmi: It is required by the driver to switch between the 2
+ parents i.e. sclk_pixel and sclk_hdmiphy. If hdmiphy is stable
+ after configuration, parent is set to sclk_hdmiphy else
+ sclk_pixel.
+- clock-names: aliases as per driver requirements for above clock IDs:
+ "hdmi", "sclk_hdmi", "sclk_pixel", "sclk_hdmiphy" and "mout_hdmi".
Example:
hdmi {
diff --git a/Documentation/devicetree/bindings/video/exynos_mixer.txt b/Documentation/devicetree/bindings/video/exynos_mixer.txt
index 3334b0a8e343..7bfde9c9d658 100644
--- a/Documentation/devicetree/bindings/video/exynos_mixer.txt
+++ b/Documentation/devicetree/bindings/video/exynos_mixer.txt
@@ -10,6 +10,10 @@ Required properties:
- reg: physical base address of the mixer and length of memory mapped
region.
- interrupts: interrupt number to the cpu.
+- clocks: list of clock IDs from SoC clock driver.
+ a) mixer: Gate of Mixer IP bus clock.
+ b) sclk_hdmi: HDMI Special clock, one of the two possible inputs of
+ mixer mux.
Example:
diff --git a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt
index fcdd48f7dcff..f90e294d7631 100644
--- a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt
@@ -9,11 +9,37 @@ Required properties:
Optional properties:
- timeout-sec: contains the watchdog timeout in seconds.
+- interrupts : Should contain WDT interrupt.
+- atmel,max-heartbeat-sec : Should contain the maximum heartbeat value in
+ seconds. This value should be less or equal to 16. It is used to
+ compute the WDV field.
+- atmel,min-heartbeat-sec : Should contain the minimum heartbeat value in
+ seconds. This value must be smaller than the max-heartbeat-sec value.
+ It is used to compute the WDD field.
+- atmel,watchdog-type : Should be "hardware" or "software". Hardware watchdog
+ use the at91 watchdog reset. Software watchdog use the watchdog
+ interrupt to trigger a software reset.
+- atmel,reset-type : Should be "proc" or "all".
+ "all" : assert peripherals and processor reset signals
+ "proc" : assert the processor reset signal
+ This is valid only when using "hardware" watchdog.
+- atmel,disable : Should be present if you want to disable the watchdog.
+- atmel,idle-halt : Should be present if you want to stop the watchdog when
+ entering idle state.
+- atmel,dbg-halt : Should be present if you want to stop the watchdog when
+ entering debug state.
Example:
-
watchdog@fffffd40 {
compatible = "atmel,at91sam9260-wdt";
reg = <0xfffffd40 0x10>;
- timeout-sec = <10>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ timeout-sec = <15>;
+ atmel,watchdog-type = "hardware";
+ atmel,reset-type = "all";
+ atmel,dbg-halt;
+ atmel,idle-halt;
+ atmel,max-heartbeat-sec = <16>;
+ atmel,min-heartbeat-sec = <0>;
+ status = "okay";
};
diff --git a/Documentation/devicetree/bindings/watchdog/dw_wdt.txt b/Documentation/devicetree/bindings/watchdog/dw_wdt.txt
new file mode 100644
index 000000000000..08e16f684f2d
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/dw_wdt.txt
@@ -0,0 +1,21 @@
+Synopsys Designware Watchdog Timer
+
+Required Properties:
+
+- compatible : Should contain "snps,dw-wdt"
+- reg : Base address and size of the watchdog timer registers.
+- clocks : phandle + clock-specifier for the clock that drives the
+ watchdog timer.
+
+Optional Properties:
+
+- interrupts : The interrupt used for the watchdog timeout warning.
+
+Example:
+
+ watchdog0: wd@ffd02000 {
+ compatible = "snps,dw-wdt";
+ reg = <0xffd02000 0x1000>;
+ interrupts = <0 171 4>;
+ clocks = <&per_base_clk>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/men-a021-wdt.txt b/Documentation/devicetree/bindings/watchdog/men-a021-wdt.txt
index 370dee3226d9..370dee3226d9 100644
--- a/Documentation/devicetree/bindings/gpio/men-a021-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/men-a021-wdt.txt
diff --git a/Documentation/devicetree/bindings/watchdog/moxa,moxart-watchdog.txt b/Documentation/devicetree/bindings/watchdog/moxa,moxart-watchdog.txt
new file mode 100644
index 000000000000..1169857d1d12
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/moxa,moxart-watchdog.txt
@@ -0,0 +1,15 @@
+MOXA ART Watchdog timer
+
+Required properties:
+
+- compatible : Must be "moxa,moxart-watchdog"
+- reg : Should contain registers location and length
+- clocks : Should contain phandle for the clock that drives the counter
+
+Example:
+
+ watchdog: watchdog@98500000 {
+ compatible = "moxa,moxart-watchdog";
+ reg = <0x98500000 0x10>;
+ clocks = <&coreclk>;
+ };
diff --git a/Documentation/devicetree/bindings/watchdog/rt2880-wdt.txt b/Documentation/devicetree/bindings/watchdog/rt2880-wdt.txt
new file mode 100644
index 000000000000..d7bab3db9d1f
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/rt2880-wdt.txt
@@ -0,0 +1,19 @@
+Ralink Watchdog Timers
+
+Required properties:
+- compatible: must be "ralink,rt2880-wdt"
+- reg: physical base address of the controller and length of the register range
+
+Optional properties:
+- interrupt-parent: phandle to the INTC device node
+- interrupts: Specify the INTC interrupt number
+
+Example:
+
+ watchdog@120 {
+ compatible = "ralink,rt2880-wdt";
+ reg = <0x120 0x10>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/watchdog/sirfsoc_wdt.txt b/Documentation/devicetree/bindings/watchdog/sirfsoc_wdt.txt
new file mode 100644
index 000000000000..9cbc76c89b2b
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/sirfsoc_wdt.txt
@@ -0,0 +1,14 @@
+SiRFSoC Timer and Watchdog Timer(WDT) Controller
+
+Required properties:
+- compatible: "sirf,prima2-tick"
+- reg: Address range of tick timer/WDT register set
+- interrupts: interrupt number to the cpu
+
+Example:
+
+timer@b0020000 {
+ compatible = "sirf,prima2-tick";
+ reg = <0xb0020000 0x1000>;
+ interrupts = <0>;
+};
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index fcb34a5697ea..5bdc8cb5fc28 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -283,6 +283,7 @@ REGULATOR
devm_regulator_get()
devm_regulator_put()
devm_regulator_bulk_get()
+ devm_regulator_register()
CLOCK
devm_clk_get()
@@ -302,3 +303,6 @@ PHY
SLAVE DMA ENGINE
devm_acpi_dma_controller_register()
+
+SPI
+ devm_spi_register_master()
diff --git a/Documentation/x86/efi-stub.txt b/Documentation/efi-stub.txt
index 44e6bb6ead10..44e6bb6ead10 100644
--- a/Documentation/x86/efi-stub.txt
+++ b/Documentation/efi-stub.txt
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index fe7afe225381..21ef48f0778f 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -192,8 +192,8 @@ prototypes:
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
- int (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
- loff_t offset, unsigned long nr_segs);
+ int (*direct_IO)(int, struct kiocb *, struct iov_iter *iter,
+ loff_t offset);
int (*get_xip_mem)(struct address_space *, pgoff_t, int, void **,
unsigned long *);
int (*migratepage)(struct address_space *, struct page *, struct page *);
@@ -426,7 +426,9 @@ prototypes:
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+ ssize_t (*read_iter) (struct kiocb *, struct iov_iter *, loff_t);
ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+ ssize_t (*write_iter) (struct kiocb *, struct iov_iter *, loff_t);
int (*iterate) (struct file *, struct dir_context *);
unsigned int (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
diff --git a/Documentation/filesystems/caching/netfs-api.txt b/Documentation/filesystems/caching/netfs-api.txt
index 11a0a40ce445..aed6b94160b1 100644
--- a/Documentation/filesystems/caching/netfs-api.txt
+++ b/Documentation/filesystems/caching/netfs-api.txt
@@ -29,15 +29,16 @@ This document contains the following sections:
(6) Index registration
(7) Data file registration
(8) Miscellaneous object registration
- (9) Setting the data file size
+ (9) Setting the data file size
(10) Page alloc/read/write
(11) Page uncaching
(12) Index and data file consistency
- (13) Miscellaneous cookie operations
- (14) Cookie unregistration
- (15) Index invalidation
- (16) Data file invalidation
- (17) FS-Cache specific page flags.
+ (13) Cookie enablement
+ (14) Miscellaneous cookie operations
+ (15) Cookie unregistration
+ (16) Index invalidation
+ (17) Data file invalidation
+ (18) FS-Cache specific page flags.
=============================
@@ -334,7 +335,8 @@ the path to the file:
struct fscache_cookie *
fscache_acquire_cookie(struct fscache_cookie *parent,
const struct fscache_object_def *def,
- void *netfs_data);
+ void *netfs_data,
+ bool enable);
This function creates an index entry in the index represented by parent,
filling in the index entry by calling the operations pointed to by def.
@@ -350,6 +352,10 @@ object needs to be created somewhere down the hierarchy. Furthermore, an index
may be created in several different caches independently at different times.
This is all handled transparently, and the netfs doesn't see any of it.
+A cookie will be created in the disabled state if enabled is false. A cookie
+must be enabled to do anything with it. A disabled cookie can be enabled by
+calling fscache_enable_cookie() (see below).
+
For example, with AFS, a cell would be added to the primary index. This index
entry would have a dependent inode containing a volume location index for the
volume mappings within this cell:
@@ -357,7 +363,7 @@ volume mappings within this cell:
cell->cache =
fscache_acquire_cookie(afs_cache_netfs.primary_index,
&afs_cell_cache_index_def,
- cell);
+ cell, true);
Then when a volume location was accessed, it would be entered into the cell's
index and an inode would be allocated that acts as a volume type and hash chain
@@ -366,7 +372,7 @@ combination:
vlocation->cache =
fscache_acquire_cookie(cell->cache,
&afs_vlocation_cache_index_def,
- vlocation);
+ vlocation, true);
And then a particular flavour of volume (R/O for example) could be added to
that index, creating another index for vnodes (AFS inode equivalents):
@@ -374,7 +380,7 @@ that index, creating another index for vnodes (AFS inode equivalents):
volume->cache =
fscache_acquire_cookie(vlocation->cache,
&afs_volume_cache_index_def,
- volume);
+ volume, true);
======================
@@ -388,7 +394,7 @@ the object definition should be something other than index type.
vnode->cache =
fscache_acquire_cookie(volume->cache,
&afs_vnode_cache_object_def,
- vnode);
+ vnode, true);
=================================
@@ -404,7 +410,7 @@ it would be some other type of object such as a data file.
xattr->cache =
fscache_acquire_cookie(vnode->cache,
&afs_xattr_cache_object_def,
- xattr);
+ xattr, true);
Miscellaneous objects might be used to store extended attributes or directory
entries for example.
@@ -733,6 +739,47 @@ Note that partial updates may happen automatically at other times, such as when
data blocks are added to a data file object.
+=================
+COOKIE ENABLEMENT
+=================
+
+Cookies exist in one of two states: enabled and disabled. If a cookie is
+disabled, it ignores all attempts to acquire child cookies; check, update or
+invalidate its state; allocate, read or write backing pages - though it is
+still possible to uncache pages and relinquish the cookie.
+
+The initial enablement state is set by fscache_acquire_cookie(), but the cookie
+can be enabled or disabled later. To disable a cookie, call:
+
+ void fscache_disable_cookie(struct fscache_cookie *cookie,
+ bool invalidate);
+
+If the cookie is not already disabled, this locks the cookie against other
+enable and disable ops, marks the cookie as being disabled, discards or
+invalidates any backing objects and waits for cessation of activity on any
+associated object before unlocking the cookie.
+
+All possible failures are handled internally. The caller should consider
+calling fscache_uncache_all_inode_pages() afterwards to make sure all page
+markings are cleared up.
+
+Cookies can be enabled or reenabled with:
+
+ void fscache_enable_cookie(struct fscache_cookie *cookie,
+ bool (*can_enable)(void *data),
+ void *data)
+
+If the cookie is not already enabled, this locks the cookie against other
+enable and disable ops, invokes can_enable() and, if the cookie is not an index
+cookie, will begin the procedure of acquiring backing objects.
+
+The optional can_enable() function is passed the data argument and returns a
+ruling as to whether or not enablement should actually be permitted to begin.
+
+All possible failures are handled internally. The cookie will only be marked
+as enabled if provisional backing objects are allocated.
+
+
===============================
MISCELLANEOUS COOKIE OPERATIONS
===============================
@@ -778,7 +825,7 @@ COOKIE UNREGISTRATION
To get rid of a cookie, this function should be called.
void fscache_relinquish_cookie(struct fscache_cookie *cookie,
- int retire);
+ bool retire);
If retire is non-zero, then the object will be marked for recycling, and all
copies of it will be removed from all active caches in which it is present.
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index 3cd27bed6349..a3fe811bbdbc 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -119,6 +119,7 @@ active_logs=%u Support configuring the number of active logs. In the
Default number is 6.
disable_ext_identify Disable the extension list configured by mkfs, so f2fs
does not aware of cold files such as media files.
+inline_xattr Enable the inline xattrs feature.
================================================================================
DEBUGFS ENTRIES
@@ -164,6 +165,12 @@ Files in /sys/fs/f2fs/<devname>
gc_idle = 1 will select the Cost Benefit approach
& setting gc_idle = 2 will select the greedy aproach.
+ reclaim_segments This parameter controls the number of prefree
+ segments to be reclaimed. If the number of prefree
+ segments is larger than this number, f2fs tries to
+ conduct checkpoint to reclaim the prefree segments
+ to free segments. By default, 100 segments, 200MB.
+
================================================================================
USAGE
================================================================================
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index deb48b5fd883..47fa5a3e9185 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -573,8 +573,8 @@ struct address_space_operations {
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
- loff_t offset, unsigned long nr_segs);
+ ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter,
+ loff_t offset);
struct page* (*get_xip_page)(struct address_space *, sector_t,
int);
/* migrate the contents of a page to the specified target */
@@ -790,7 +790,9 @@ struct file_operations {
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+ ssize_t (*read_iter) (struct kiocb *, struct iov_iter *, loff_t);
ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+ ssize_t (*write_iter) (struct kiocb *, struct iov_iter *, loff_t);
int (*iterate) (struct file *, struct dir_context *);
unsigned int (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
@@ -825,10 +827,16 @@ otherwise noted.
aio_read: called by io_submit(2) and other asynchronous I/O operations
+ read_iter: aio_read replacement, called by io_submit(2) and other
+ asynchronous I/O operations
+
write: called by write(2) and related system calls
aio_write: called by io_submit(2) and other asynchronous I/O operations
+ write_iter: aio_write replacement, called by io_submit(2) and other
+ asynchronous I/O operations
+
iterate: called when the VFS needs to read the directory contents
poll: called by the VFS when a process wants to check if there is
diff --git a/Documentation/hwmon/lm25066 b/Documentation/hwmon/lm25066
index c1b57d72efc3..b34c3de5c1bc 100644
--- a/Documentation/hwmon/lm25066
+++ b/Documentation/hwmon/lm25066
@@ -8,6 +8,11 @@ Supported chips:
Datasheets:
http://www.ti.com/lit/gpn/lm25056
http://www.ti.com/lit/gpn/lm25056a
+ * TI LM25063
+ Prefix: 'lm25063'
+ Addresses scanned: -
+ Datasheet:
+ To be announced
* National Semiconductor LM25066
Prefix: 'lm25066'
Addresses scanned: -
@@ -32,7 +37,7 @@ Description
-----------
This driver supports hardware montoring for National Semiconductor / TI LM25056,
-LM25066, LM5064, and LM5064 Power Management, Monitoring, Control, and
+LM25063, LM25066, LM5064, and LM5066 Power Management, Monitoring, Control, and
Protection ICs.
The driver is a client driver to the core PMBus driver. Please see
@@ -64,8 +69,12 @@ in1_input Measured input voltage.
in1_average Average measured input voltage.
in1_min Minimum input voltage.
in1_max Maximum input voltage.
+in1_crit Critical high input voltage (LM25063 only).
+in1_lcrit Critical low input voltage (LM25063 only).
in1_min_alarm Input voltage low alarm.
in1_max_alarm Input voltage high alarm.
+in1_lcrit_alarm Input voltage critical low alarm (LM25063 only).
+in1_crit_alarm Input voltage critical high alarm. (LM25063 only).
in2_label "vmon"
in2_input Measured voltage on VAUX pin
@@ -80,12 +89,16 @@ in3_input Measured output voltage.
in3_average Average measured output voltage.
in3_min Minimum output voltage.
in3_min_alarm Output voltage low alarm.
+in3_highest Historical minimum output voltage (LM25063 only).
+in3_lowest Historical maximum output voltage (LM25063 only).
curr1_label "iin"
curr1_input Measured input current.
curr1_average Average measured input current.
curr1_max Maximum input current.
+curr1_crit Critical input current (LM25063 only).
curr1_max_alarm Input current high alarm.
+curr1_crit_alarm Input current critical high alarm (LM25063 only).
power1_label "pin"
power1_input Measured input power.
@@ -95,6 +108,11 @@ power1_alarm Input power alarm
power1_input_highest Historical maximum power.
power1_reset_history Write any value to reset maximum power history.
+power2_label "pout". LM25063 only.
+power2_input Measured output power.
+power2_max Maximum output power limit.
+power2_crit Critical output power limit.
+
temp1_input Measured temperature.
temp1_max Maximum temperature.
temp1_crit Critical high temperature.
diff --git a/Documentation/hwmon/lm90 b/Documentation/hwmon/lm90
index b466974e142f..ab81013cc390 100644
--- a/Documentation/hwmon/lm90
+++ b/Documentation/hwmon/lm90
@@ -122,6 +122,12 @@ Supported chips:
Prefix: 'g781'
Addresses scanned: I2C 0x4c, 0x4d
Datasheet: Not publicly available from GMT
+ * Texas Instruments TMP451
+ Prefix: 'tmp451'
+ Addresses scanned: I2C 0x4c
+ Datasheet: Publicly available at TI website
+ http://www.ti.com/litv/pdf/sbos686
+
Author: Jean Delvare <khali@linux-fr.org>
diff --git a/Documentation/hwmon/ltc2978 b/Documentation/hwmon/ltc2978
index dc0d08c61305..a0546fc42273 100644
--- a/Documentation/hwmon/ltc2978
+++ b/Documentation/hwmon/ltc2978
@@ -6,10 +6,15 @@ Supported chips:
Prefix: 'ltc2974'
Addresses scanned: -
Datasheet: http://www.linear.com/product/ltc2974
- * Linear Technology LTC2978
+ * Linear Technology LTC2977
+ Prefix: 'ltc2977'
+ Addresses scanned: -
+ Datasheet: http://www.linear.com/product/ltc2977
+ * Linear Technology LTC2978, LTC2978A
Prefix: 'ltc2978'
Addresses scanned: -
Datasheet: http://www.linear.com/product/ltc2978
+ http://www.linear.com/product/ltc2978a
* Linear Technology LTC3880
Prefix: 'ltc3880'
Addresses scanned: -
@@ -26,8 +31,9 @@ Description
-----------
LTC2974 is a quad digital power supply manager. LTC2978 is an octal power supply
-monitor. LTC3880 is a dual output poly-phase step-down DC/DC controller. LTC3883
-is a single phase step-down DC/DC controller.
+monitor. LTC2977 is a pin compatible replacement for LTC2978. LTC3880 is a dual
+output poly-phase step-down DC/DC controller. LTC3883 is a single phase
+step-down DC/DC controller.
Usage Notes
@@ -49,21 +55,25 @@ Sysfs attributes
in1_label "vin"
in1_input Measured input voltage.
in1_min Minimum input voltage.
-in1_max Maximum input voltage. LTC2974 and LTC2978 only.
-in1_lcrit Critical minimum input voltage. LTC2974 and LTC2978
- only.
+in1_max Maximum input voltage.
+ LTC2974, LTC2977, and LTC2978 only.
+in1_lcrit Critical minimum input voltage.
+ LTC2974, LTC2977, and LTC2978 only.
in1_crit Critical maximum input voltage.
in1_min_alarm Input voltage low alarm.
-in1_max_alarm Input voltage high alarm. LTC2974 and LTC2978 only.
-in1_lcrit_alarm Input voltage critical low alarm. LTC2974 and LTC2978
- only.
+in1_max_alarm Input voltage high alarm.
+ LTC2974, LTC2977, and LTC2978 only.
+in1_lcrit_alarm Input voltage critical low alarm.
+ LTC2974, LTC2977, and LTC2978 only.
in1_crit_alarm Input voltage critical high alarm.
-in1_lowest Lowest input voltage. LTC2974 and LTC2978 only.
+in1_lowest Lowest input voltage.
+ LTC2974, LTC2977, and LTC2978 only.
in1_highest Highest input voltage.
in1_reset_history Reset input voltage history.
in[N]_label "vout[1-8]".
LTC2974: N=2-5
+ LTC2977: N=2-9
LTC2978: N=2-9
LTC3880: N=2-3
LTC3883: N=2
@@ -83,21 +93,23 @@ in[N]_reset_history Reset output voltage history.
temp[N]_input Measured temperature.
On LTC2974, temp[1-4] report external temperatures,
and temp5 reports the chip temperature.
- On LTC2978, only one temperature measurement is
- supported and reports the chip temperature.
+ On LTC2977 and LTC2978, only one temperature measurement
+ is supported and reports the chip temperature.
On LTC3880, temp1 and temp2 report external
temperatures, and temp3 reports the chip temperature.
On LTC3883, temp1 reports an external temperature,
and temp2 reports the chip temperature.
-temp[N]_min Mimimum temperature. LTC2974 and LTC2978 only.
+temp[N]_min Mimimum temperature. LTC2974, LCT2977, and LTC2978 only.
temp[N]_max Maximum temperature.
temp[N]_lcrit Critical low temperature.
temp[N]_crit Critical high temperature.
-temp[N]_min_alarm Temperature low alarm. LTC2974 and LTC2978 only.
+temp[N]_min_alarm Temperature low alarm.
+ LTC2974, LTC2977, and LTC2978 only.
temp[N]_max_alarm Temperature high alarm.
temp[N]_lcrit_alarm Temperature critical low alarm.
temp[N]_crit_alarm Temperature critical high alarm.
-temp[N]_lowest Lowest measured temperature. LTC2974 and LTC2978 only.
+temp[N]_lowest Lowest measured temperature.
+ LTC2974, LTC2977, and LTC2978 only.
Not supported for chip temperature sensor on LTC2974.
temp[N]_highest Highest measured temperature. Not supported for chip
temperature sensor on LTC2974.
@@ -109,6 +121,7 @@ power1_input Measured input power.
power[N]_label "pout[1-4]".
LTC2974: N=1-4
+ LTC2977: Not supported
LTC2978: Not supported
LTC3880: N=1-2
LTC3883: N=2
@@ -123,6 +136,7 @@ curr1_reset_history Reset input current history. LTC3883 only.
curr[N]_label "iout[1-4]".
LTC2974: N=1-4
+ LTC2977: not supported
LTC2978: not supported
LTC3880: N=2-3
LTC3883: N=2
diff --git a/Documentation/input/gamepad.txt b/Documentation/input/gamepad.txt
index 8002c894c6b0..31bb6a4029ef 100644
--- a/Documentation/input/gamepad.txt
+++ b/Documentation/input/gamepad.txt
@@ -122,12 +122,14 @@ D-Pad:
BTN_DPAD_*
Analog buttons are reported as:
ABS_HAT0X and ABS_HAT0Y
+ (for ABS values negative is left/up, positive is right/down)
Analog-Sticks:
The left analog-stick is reported as ABS_X, ABS_Y. The right analog stick is
reported as ABS_RX, ABS_RY. Zero, one or two sticks may be present.
If analog-sticks provide digital buttons, they are mapped accordingly as
BTN_THUMBL (first/left) and BTN_THUMBR (second/right).
+ (for ABS values negative is left/up, positive is right/down)
Triggers:
Trigger buttons can be available as digital or analog buttons or both. User-
@@ -138,6 +140,7 @@ Triggers:
ABS_HAT2X (right/ZR) and BTN_TL2 or ABS_HAT2Y (left/ZL).
If only one trigger-button combination is present (upper+lower), they are
reported as "right" triggers (BTN_TR/ABS_HAT1X).
+ (ABS trigger values start at 0, pressure is reported as positive values)
Menu-Pad:
Menu buttons are always digital and are mapped according to their location
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 2a5f0e14efa3..7cbfa3c4fc3d 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -138,6 +138,7 @@ Code Seq#(hex) Include File Comments
'H' C0-DF net/bluetooth/cmtp/cmtp.h conflict!
'H' C0-DF net/bluetooth/bnep/bnep.h conflict!
'H' F1 linux/hid-roccat.h <mailto:erazor_de@users.sourceforge.net>
+'H' F8-FA sound/firewire.h
'I' all linux/isdn.h conflict!
'I' 00-0F drivers/isdn/divert/isdn_divert.h conflict!
'I' 40-4F linux/mISDNif.h conflict!
diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt
index 8ef6dbb6a462..bbc99c0c1094 100644
--- a/Documentation/kbuild/kconfig.txt
+++ b/Documentation/kbuild/kconfig.txt
@@ -20,16 +20,9 @@ symbols have been introduced.
To see a list of new config symbols when using "make oldconfig", use
cp user/some/old.config .config
- yes "" | make oldconfig >conf.new
+ make listnewconfig
-and the config program will list as (NEW) any new symbols that have
-unknown values. Of course, the .config file is also updated with
-new (default) values, so you can use:
-
- grep "(NEW)" conf.new
-
-to see the new config symbols or you can use diffconfig to see the
-differences between the previous and new .config files:
+and the config program will list any new symbols, one per line.
scripts/diffconfig .config.old .config | less
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index fcbb736d55fe..dcab4b64fb3b 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -847,6 +847,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
earlyprintk= [X86,SH,BLACKFIN,ARM]
earlyprintk=vga
+ earlyprintk=efi
earlyprintk=xen
earlyprintk=serial[,ttySn[,baudrate]]
earlyprintk=serial[,0x...[,baudrate]]
@@ -860,7 +861,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Append ",keep" to not disable it when the real console
takes over.
- Only vga or serial or usb debug port at a time.
+ Only one of vga, efi, serial, or usb debug port can
+ be used at a time.
Currently only ttyS0 and ttyS1 may be specified by
name. Other I/O ports may be explicitly specified
@@ -874,8 +876,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Interaction with the standard serial driver is not
very good.
- The VGA output is eventually overwritten by the real
- console.
+ The VGA and EFI output is eventually overwritten by
+ the real console.
The xen output can only be used by Xen PV guests.
@@ -1185,15 +1187,24 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
owned by uid=0.
ima_hash= [IMA]
- Format: { "sha1" | "md5" }
+ Format: { md5 | sha1 | rmd160 | sha256 | sha384
+ | sha512 | ... }
default: "sha1"
+ The list of supported hash algorithms is defined
+ in crypto/hash_info.h.
+
ima_tcb [IMA]
Load a policy which meets the needs of the Trusted
Computing Base. This means IMA will measure all
programs exec'd, files mmap'd for exec, and all files
opened for read by uid=0.
+ ima_template= [IMA]
+ Select one of defined IMA measurements template formats.
+ Formats: { "ima" | "ima-ng" }
+ Default: "ima-ng"
+
init= [KNL]
Format: <full_path>
Run specified binary instead of /sbin/init as init
@@ -1975,6 +1986,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
noapic [SMP,APIC] Tells the kernel to not make use of any
IOAPICs that may be present in the system.
+ nokaslr [X86]
+ Disable kernel base offset ASLR (Address Space
+ Layout Randomization) if built into the kernel.
+
noautogroup Disable scheduler automatic task group creation.
nobats [PPC] Do not use BATs for mapping kernel lowmem
@@ -2599,7 +2614,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
ramdisk_size= [RAM] Sizes of RAM disks in kilobytes
See Documentation/blockdev/ramdisk.txt.
- rcu_nocbs= [KNL,BOOT]
+ rcu_nocbs= [KNL]
In kernels built with CONFIG_RCU_NOCB_CPU=y, set
the specified list of CPUs to be no-callback CPUs.
Invocation of these CPUs' RCU callbacks will
@@ -2612,7 +2627,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
real-time workloads. It can also improve energy
efficiency for asymmetric multiprocessors.
- rcu_nocb_poll [KNL,BOOT]
+ rcu_nocb_poll [KNL]
Rather than requiring that offloaded CPUs
(specified by rcu_nocbs= above) explicitly
awaken the corresponding "rcuoN" kthreads,
@@ -2623,126 +2638,145 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
energy efficiency by requiring that the kthreads
periodically wake up to do the polling.
- rcutree.blimit= [KNL,BOOT]
+ rcutree.blimit= [KNL]
Set maximum number of finished RCU callbacks to process
in one batch.
- rcutree.fanout_leaf= [KNL,BOOT]
+ rcutree.rcu_fanout_leaf= [KNL]
Increase the number of CPUs assigned to each
leaf rcu_node structure. Useful for very large
systems.
- rcutree.jiffies_till_first_fqs= [KNL,BOOT]
+ rcutree.jiffies_till_first_fqs= [KNL]
Set delay from grace-period initialization to
first attempt to force quiescent states.
Units are jiffies, minimum value is zero,
and maximum value is HZ.
- rcutree.jiffies_till_next_fqs= [KNL,BOOT]
+ rcutree.jiffies_till_next_fqs= [KNL]
Set delay between subsequent attempts to force
quiescent states. Units are jiffies, minimum
value is one, and maximum value is HZ.
- rcutree.qhimark= [KNL,BOOT]
+ rcutree.qhimark= [KNL]
Set threshold of queued
RCU callbacks over which batch limiting is disabled.
- rcutree.qlowmark= [KNL,BOOT]
+ rcutree.qlowmark= [KNL]
Set threshold of queued RCU callbacks below which
batch limiting is re-enabled.
- rcutree.rcu_cpu_stall_suppress= [KNL,BOOT]
- Suppress RCU CPU stall warning messages.
-
- rcutree.rcu_cpu_stall_timeout= [KNL,BOOT]
- Set timeout for RCU CPU stall warning messages.
-
- rcutree.rcu_idle_gp_delay= [KNL,BOOT]
+ rcutree.rcu_idle_gp_delay= [KNL]
Set wakeup interval for idle CPUs that have
RCU callbacks (RCU_FAST_NO_HZ=y).
- rcutree.rcu_idle_lazy_gp_delay= [KNL,BOOT]
+ rcutree.rcu_idle_lazy_gp_delay= [KNL]
Set wakeup interval for idle CPUs that have
only "lazy" RCU callbacks (RCU_FAST_NO_HZ=y).
Lazy RCU callbacks are those which RCU can
prove do nothing more than free memory.
- rcutorture.fqs_duration= [KNL,BOOT]
+ rcutorture.fqs_duration= [KNL]
Set duration of force_quiescent_state bursts.
- rcutorture.fqs_holdoff= [KNL,BOOT]
+ rcutorture.fqs_holdoff= [KNL]
Set holdoff time within force_quiescent_state bursts.
- rcutorture.fqs_stutter= [KNL,BOOT]
+ rcutorture.fqs_stutter= [KNL]
Set wait time between force_quiescent_state bursts.
- rcutorture.irqreader= [KNL,BOOT]
- Test RCU readers from irq handlers.
+ rcutorture.gp_exp= [KNL]
+ Use expedited update-side primitives.
- rcutorture.n_barrier_cbs= [KNL,BOOT]
+ rcutorture.gp_normal= [KNL]
+ Use normal (non-expedited) update-side primitives.
+ If both gp_exp and gp_normal are set, do both.
+ If neither gp_exp nor gp_normal are set, still
+ do both.
+
+ rcutorture.n_barrier_cbs= [KNL]
Set callbacks/threads for rcu_barrier() testing.
- rcutorture.nfakewriters= [KNL,BOOT]
+ rcutorture.nfakewriters= [KNL]
Set number of concurrent RCU writers. These just
stress RCU, they don't participate in the actual
test, hence the "fake".
- rcutorture.nreaders= [KNL,BOOT]
+ rcutorture.nreaders= [KNL]
Set number of RCU readers.
- rcutorture.onoff_holdoff= [KNL,BOOT]
+ rcutorture.object_debug= [KNL]
+ Enable debug-object double-call_rcu() testing.
+
+ rcutorture.onoff_holdoff= [KNL]
Set time (s) after boot for CPU-hotplug testing.
- rcutorture.onoff_interval= [KNL,BOOT]
+ rcutorture.onoff_interval= [KNL]
Set time (s) between CPU-hotplug operations, or
zero to disable CPU-hotplug testing.
- rcutorture.shuffle_interval= [KNL,BOOT]
+ rcutorture.rcutorture_runnable= [BOOT]
+ Start rcutorture running at boot time.
+
+ rcutorture.shuffle_interval= [KNL]
Set task-shuffle interval (s). Shuffling tasks
allows some CPUs to go into dyntick-idle mode
during the rcutorture test.
- rcutorture.shutdown_secs= [KNL,BOOT]
+ rcutorture.shutdown_secs= [KNL]
Set time (s) after boot system shutdown. This
is useful for hands-off automated testing.
- rcutorture.stall_cpu= [KNL,BOOT]
+ rcutorture.stall_cpu= [KNL]
Duration of CPU stall (s) to test RCU CPU stall
warnings, zero to disable.
- rcutorture.stall_cpu_holdoff= [KNL,BOOT]
+ rcutorture.stall_cpu_holdoff= [KNL]
Time to wait (s) after boot before inducing stall.
- rcutorture.stat_interval= [KNL,BOOT]
+ rcutorture.stat_interval= [KNL]
Time (s) between statistics printk()s.
- rcutorture.stutter= [KNL,BOOT]
+ rcutorture.stutter= [KNL]
Time (s) to stutter testing, for example, specifying
five seconds causes the test to run for five seconds,
wait for five seconds, and so on. This tests RCU's
ability to transition abruptly to and from idle.
- rcutorture.test_boost= [KNL,BOOT]
+ rcutorture.test_boost= [KNL]
Test RCU priority boosting? 0=no, 1=maybe, 2=yes.
"Maybe" means test if the RCU implementation
under test support RCU priority boosting.
- rcutorture.test_boost_duration= [KNL,BOOT]
+ rcutorture.test_boost_duration= [KNL]
Duration (s) of each individual boost test.
- rcutorture.test_boost_interval= [KNL,BOOT]
+ rcutorture.test_boost_interval= [KNL]
Interval (s) between each boost test.
- rcutorture.test_no_idle_hz= [KNL,BOOT]
+ rcutorture.test_no_idle_hz= [KNL]
Test RCU's dyntick-idle handling. See also the
rcutorture.shuffle_interval parameter.
- rcutorture.torture_type= [KNL,BOOT]
+ rcutorture.torture_type= [KNL]
Specify the RCU implementation to test.
- rcutorture.verbose= [KNL,BOOT]
+ rcutorture.verbose= [KNL]
Enable additional printk() statements.
+ rcupdate.rcu_expedited= [KNL]
+ Use expedited grace-period primitives, for
+ example, synchronize_rcu_expedited() instead
+ of synchronize_rcu(). This reduces latency,
+ but can increase CPU utilization, degrade
+ real-time latency, and degrade energy efficiency.
+
+ rcupdate.rcu_cpu_stall_suppress= [KNL]
+ Suppress RCU CPU stall warning messages.
+
+ rcupdate.rcu_cpu_stall_timeout= [KNL]
+ Set timeout for RCU CPU stall warning messages.
+
rdinit= [KNL]
Format: <full_path>
Run specified binary instead of /init from the ramdisk,
@@ -3471,11 +3505,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
default x2apic cluster mode on platforms
supporting x2apic.
- x86_mrst_timer= [X86-32,APBT]
- Choose timer option for x86 Moorestown MID platform.
+ x86_intel_mid_timer= [X86-32,APBT]
+ Choose timer option for x86 Intel MID platform.
Two valid options are apbt timer only and lapic timer
plus one apbt timer for broadcast timer.
- x86_mrst_timer=apbt_only | lapic_and_apbt
+ x86_intel_mid_timer=apbt_only | lapic_and_apbt
xen_emul_unplug= [HW,X86,XEN]
Unplug Xen emulated devices
diff --git a/Documentation/kernel-per-CPU-kthreads.txt b/Documentation/kernel-per-CPU-kthreads.txt
index 32351bfabf20..827104fb9364 100644
--- a/Documentation/kernel-per-CPU-kthreads.txt
+++ b/Documentation/kernel-per-CPU-kthreads.txt
@@ -181,12 +181,17 @@ To reduce its OS jitter, do any of the following:
make sure that this is safe on your particular system.
d. It is not possible to entirely get rid of OS jitter
from vmstat_update() on CONFIG_SMP=y systems, but you
- can decrease its frequency by writing a large value to
- /proc/sys/vm/stat_interval. The default value is HZ,
- for an interval of one second. Of course, larger values
- will make your virtual-memory statistics update more
- slowly. Of course, you can also run your workload at
- a real-time priority, thus preempting vmstat_update().
+ can decrease its frequency by writing a large value
+ to /proc/sys/vm/stat_interval. The default value is
+ HZ, for an interval of one second. Of course, larger
+ values will make your virtual-memory statistics update
+ more slowly. Of course, you can also run your workload
+ at a real-time priority, thus preempting vmstat_update(),
+ but if your workload is CPU-bound, this is a bad idea.
+ However, there is an RFC patch from Christoph Lameter
+ (based on an earlier one from Gilad Ben-Yossef) that
+ reduces or even eliminates vmstat overhead for some
+ workloads at https://lkml.org/lkml/2013/9/4/379.
e. If running on high-end powerpc servers, build with
CONFIG_PPC_RTAS_DAEMON=n. This prevents the RTAS
daemon from running on each CPU every second or so.
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt
index 86c52360ffe7..fc04c14de4bb 100644
--- a/Documentation/laptops/thinkpad-acpi.txt
+++ b/Documentation/laptops/thinkpad-acpi.txt
@@ -1,7 +1,7 @@
ThinkPad ACPI Extras Driver
- Version 0.24
- December 11th, 2009
+ Version 0.25
+ October 16th, 2013
Borislav Deianov <borislav@users.sf.net>
Henrique de Moraes Holschuh <hmh@hmh.eng.br>
@@ -741,6 +741,9 @@ compiled with the CONFIG_THINKPAD_ACPI_UNSAFE_LEDS option enabled.
Distributions must never enable this option. Individual users that
are aware of the consequences are welcome to enabling it.
+Audio mute and microphone mute LEDs are supported, but currently not
+visible to userspace. They are used by the snd-hda-intel audio driver.
+
procfs notes:
The available commands are:
diff --git a/Documentation/lockstat.txt b/Documentation/lockstat.txt
index dd2f7b26ca30..72d010689751 100644
--- a/Documentation/lockstat.txt
+++ b/Documentation/lockstat.txt
@@ -46,16 +46,14 @@ With these hooks we provide the following statistics:
contentions - number of lock acquisitions that had to wait
wait time min - shortest (non-0) time we ever had to wait for a lock
max - longest time we ever had to wait for a lock
- total - total time we spend waiting on this lock
+ total - total time we spend waiting on this lock
+ avg - average time spent waiting on this lock
acq-bounces - number of lock acquisitions that involved x-cpu data
acquisitions - number of times we took the lock
hold time min - shortest (non-0) time we ever held the lock
- max - longest time we ever held the lock
- total - total time this lock was held
-
-From these number various other statistics can be derived, such as:
-
- hold time average = hold time total / acquisitions
+ max - longest time we ever held the lock
+ total - total time this lock was held
+ avg - average time this lock was held
These numbers are gathered per lock class, per read/write state (when
applicable).
@@ -84,37 +82,38 @@ Look at the current lock statistics:
# less /proc/lock_stat
-01 lock_stat version 0.3
-02 -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
-03 class name con-bounces contentions waittime-min waittime-max waittime-total acq-bounces acquisitions holdtime-min holdtime-max holdtime-total
-04 -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+01 lock_stat version 0.4
+02-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+03 class name con-bounces contentions waittime-min waittime-max waittime-total waittime-avg acq-bounces acquisitions holdtime-min holdtime-max holdtime-total holdtime-avg
+04-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
05
-06 &mm->mmap_sem-W: 233 538 18446744073708 22924.27 607243.51 1342 45806 1.71 8595.89 1180582.34
-07 &mm->mmap_sem-R: 205 587 18446744073708 28403.36 731975.00 1940 412426 0.58 187825.45 6307502.88
-08 ---------------
-09 &mm->mmap_sem 487 [<ffffffff8053491f>] do_page_fault+0x466/0x928
-10 &mm->mmap_sem 179 [<ffffffff802a6200>] sys_mprotect+0xcd/0x21d
-11 &mm->mmap_sem 279 [<ffffffff80210a57>] sys_mmap+0x75/0xce
-12 &mm->mmap_sem 76 [<ffffffff802a490b>] sys_munmap+0x32/0x59
-13 ---------------
-14 &mm->mmap_sem 270 [<ffffffff80210a57>] sys_mmap+0x75/0xce
-15 &mm->mmap_sem 431 [<ffffffff8053491f>] do_page_fault+0x466/0x928
-16 &mm->mmap_sem 138 [<ffffffff802a490b>] sys_munmap+0x32/0x59
-17 &mm->mmap_sem 145 [<ffffffff802a6200>] sys_mprotect+0xcd/0x21d
+06 &mm->mmap_sem-W: 46 84 0.26 939.10 16371.53 194.90 47291 2922365 0.16 2220301.69 17464026916.32 5975.99
+07 &mm->mmap_sem-R: 37 100 1.31 299502.61 325629.52 3256.30 212344 34316685 0.10 7744.91 95016910.20 2.77
+08 ---------------
+09 &mm->mmap_sem 1 [<ffffffff811502a7>] khugepaged_scan_mm_slot+0x57/0x280
+19 &mm->mmap_sem 96 [<ffffffff815351c4>] __do_page_fault+0x1d4/0x510
+11 &mm->mmap_sem 34 [<ffffffff81113d77>] vm_mmap_pgoff+0x87/0xd0
+12 &mm->mmap_sem 17 [<ffffffff81127e71>] vm_munmap+0x41/0x80
+13 ---------------
+14 &mm->mmap_sem 1 [<ffffffff81046fda>] dup_mmap+0x2a/0x3f0
+15 &mm->mmap_sem 60 [<ffffffff81129e29>] SyS_mprotect+0xe9/0x250
+16 &mm->mmap_sem 41 [<ffffffff815351c4>] __do_page_fault+0x1d4/0x510
+17 &mm->mmap_sem 68 [<ffffffff81113d77>] vm_mmap_pgoff+0x87/0xd0
18
-19 ...............................................................................................................................................................................................
+19.............................................................................................................................................................................................................................
20
-21 dcache_lock: 621 623 0.52 118.26 1053.02 6745 91930 0.29 316.29 118423.41
-22 -----------
-23 dcache_lock 179 [<ffffffff80378274>] _atomic_dec_and_lock+0x34/0x54
-24 dcache_lock 113 [<ffffffff802cc17b>] d_alloc+0x19a/0x1eb
-25 dcache_lock 99 [<ffffffff802ca0dc>] d_rehash+0x1b/0x44
-26 dcache_lock 104 [<ffffffff802cbca0>] d_instantiate+0x36/0x8a
-27 -----------
-28 dcache_lock 192 [<ffffffff80378274>] _atomic_dec_and_lock+0x34/0x54
-29 dcache_lock 98 [<ffffffff802ca0dc>] d_rehash+0x1b/0x44
-30 dcache_lock 72 [<ffffffff802cc17b>] d_alloc+0x19a/0x1eb
-31 dcache_lock 112 [<ffffffff802cbca0>] d_instantiate+0x36/0x8a
+21 unix_table_lock: 110 112 0.21 49.24 163.91 1.46 21094 66312 0.12 624.42 31589.81 0.48
+22 ---------------
+23 unix_table_lock 45 [<ffffffff8150ad8e>] unix_create1+0x16e/0x1b0
+24 unix_table_lock 47 [<ffffffff8150b111>] unix_release_sock+0x31/0x250
+25 unix_table_lock 15 [<ffffffff8150ca37>] unix_find_other+0x117/0x230
+26 unix_table_lock 5 [<ffffffff8150a09f>] unix_autobind+0x11f/0x1b0
+27 ---------------
+28 unix_table_lock 39 [<ffffffff8150b111>] unix_release_sock+0x31/0x250
+29 unix_table_lock 49 [<ffffffff8150ad8e>] unix_create1+0x16e/0x1b0
+30 unix_table_lock 20 [<ffffffff8150ca37>] unix_find_other+0x117/0x230
+31 unix_table_lock 4 [<ffffffff8150a09f>] unix_autobind+0x11f/0x1b0
+
This excerpt shows the first two lock class statistics. Line 01 shows the
output version - each time the format changes this will be updated. Line 02-04
@@ -131,30 +130,30 @@ The integer part of the time values is in us.
Dealing with nested locks, subclasses may appear:
-32...............................................................................................................................................................................................
+32...........................................................................................................................................................................................................................
33
-34 &rq->lock: 13128 13128 0.43 190.53 103881.26 97454 3453404 0.00 401.11 13224683.11
+34 &rq->lock: 13128 13128 0.43 190.53 103881.26 7.91 97454 3453404 0.00 401.11 13224683.11 3.82
35 ---------
-36 &rq->lock 645 [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
-37 &rq->lock 297 [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
-38 &rq->lock 360 [<ffffffff8103c4c5>] select_task_rq_fair+0x1f0/0x74a
-39 &rq->lock 428 [<ffffffff81045f98>] scheduler_tick+0x46/0x1fb
+36 &rq->lock 645 [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
+37 &rq->lock 297 [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
+38 &rq->lock 360 [<ffffffff8103c4c5>] select_task_rq_fair+0x1f0/0x74a
+39 &rq->lock 428 [<ffffffff81045f98>] scheduler_tick+0x46/0x1fb
40 ---------
-41 &rq->lock 77 [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
-42 &rq->lock 174 [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
-43 &rq->lock 4715 [<ffffffff8103ed4b>] double_rq_lock+0x42/0x54
-44 &rq->lock 893 [<ffffffff81340524>] schedule+0x157/0x7b8
+41 &rq->lock 77 [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
+42 &rq->lock 174 [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
+43 &rq->lock 4715 [<ffffffff8103ed4b>] double_rq_lock+0x42/0x54
+44 &rq->lock 893 [<ffffffff81340524>] schedule+0x157/0x7b8
45
-46...............................................................................................................................................................................................
+46...........................................................................................................................................................................................................................
47
-48 &rq->lock/1: 11526 11488 0.33 388.73 136294.31 21461 38404 0.00 37.93 109388.53
+48 &rq->lock/1: 1526 11488 0.33 388.73 136294.31 11.86 21461 38404 0.00 37.93 109388.53 2.84
49 -----------
-50 &rq->lock/1 11526 [<ffffffff8103ed58>] double_rq_lock+0x4f/0x54
+50 &rq->lock/1 11526 [<ffffffff8103ed58>] double_rq_lock+0x4f/0x54
51 -----------
-52 &rq->lock/1 5645 [<ffffffff8103ed4b>] double_rq_lock+0x42/0x54
-53 &rq->lock/1 1224 [<ffffffff81340524>] schedule+0x157/0x7b8
-54 &rq->lock/1 4336 [<ffffffff8103ed58>] double_rq_lock+0x4f/0x54
-55 &rq->lock/1 181 [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
+52 &rq->lock/1 5645 [<ffffffff8103ed4b>] double_rq_lock+0x42/0x54
+53 &rq->lock/1 1224 [<ffffffff81340524>] schedule+0x157/0x7b8
+54 &rq->lock/1 4336 [<ffffffff8103ed58>] double_rq_lock+0x4f/0x54
+55 &rq->lock/1 181 [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
Line 48 shows statistics for the second subclass (/1) of &rq->lock class
(subclass starts from 0), since in this case, as line 50 suggests,
@@ -163,16 +162,16 @@ double_rq_lock actually acquires a nested lock of two spinlocks.
View the top contending locks:
# grep : /proc/lock_stat | head
- &inode->i_data.tree_lock-W: 15 21657 0.18 1093295.30 11547131054.85 58 10415 0.16 87.51 6387.60
- &inode->i_data.tree_lock-R: 0 0 0.00 0.00 0.00 23302 231198 0.25 8.45 98023.38
- dcache_lock: 1037 1161 0.38 45.32 774.51 6611 243371 0.15 306.48 77387.24
- &inode->i_mutex: 161 286 18446744073709 62882.54 1244614.55 3653 20598 18446744073709 62318.60 1693822.74
- &zone->lru_lock: 94 94 0.53 7.33 92.10 4366 32690 0.29 59.81 16350.06
- &inode->i_data.i_mmap_mutex: 79 79 0.40 3.77 53.03 11779 87755 0.28 116.93 29898.44
- &q->__queue_lock: 48 50 0.52 31.62 86.31 774 13131 0.17 113.08 12277.52
- &rq->rq_lock_key: 43 47 0.74 68.50 170.63 3706 33929 0.22 107.99 17460.62
- &rq->rq_lock_key#2: 39 46 0.75 6.68 49.03 2979 32292 0.17 125.17 17137.63
- tasklist_lock-W: 15 15 1.45 10.87 32.70 1201 7390 0.58 62.55 13648.47
+ clockevents_lock: 2926159 2947636 0.15 46882.81 1784540466.34 605.41 3381345 3879161 0.00 2260.97 53178395.68 13.71
+ tick_broadcast_lock: 346460 346717 0.18 2257.43 39364622.71 113.54 3642919 4242696 0.00 2263.79 49173646.60 11.59
+ &mapping->i_mmap_mutex: 203896 203899 3.36 645530.05 31767507988.39 155800.21 3361776 8893984 0.17 2254.15 14110121.02 1.59
+ &rq->lock: 135014 136909 0.18 606.09 842160.68 6.15 1540728 10436146 0.00 728.72 17606683.41 1.69
+ &(&zone->lru_lock)->rlock: 93000 94934 0.16 59.18 188253.78 1.98 1199912 3809894 0.15 391.40 3559518.81 0.93
+ tasklist_lock-W: 40667 41130 0.23 1189.42 428980.51 10.43 270278 510106 0.16 653.51 3939674.91 7.72
+ tasklist_lock-R: 21298 21305 0.20 1310.05 215511.12 10.12 186204 241258 0.14 1162.33 1179779.23 4.89
+ rcu_node_1: 47656 49022 0.16 635.41 193616.41 3.95 844888 1865423 0.00 764.26 1656226.96 0.89
+ &(&dentry->d_lockref.lock)->rlock: 39791 40179 0.15 1302.08 88851.96 2.21 2790851 12527025 0.10 1910.75 3379714.27 0.27
+ rcu_node_0: 29203 30064 0.16 786.55 1555573.00 51.74 88963 244254 0.00 398.87 428872.51 1.76
Clear the statistics:
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
index c1d82047a4b1..89490beb3c0b 100644
--- a/Documentation/networking/batman-adv.txt
+++ b/Documentation/networking/batman-adv.txt
@@ -69,8 +69,7 @@ folder:
# aggregated_ogms gw_bandwidth log_level
# ap_isolation gw_mode orig_interval
# bonding gw_sel_class routing_algo
-# bridge_loop_avoidance hop_penalty vis_mode
-# fragmentation
+# bridge_loop_avoidance hop_penalty fragmentation
There is a special folder for debugging information:
@@ -78,7 +77,7 @@ There is a special folder for debugging information:
# ls /sys/kernel/debug/batman_adv/bat0/
# bla_backbone_table log transtable_global
# bla_claim_table originators transtable_local
-# gateways socket vis_data
+# gateways socket
Some of the files contain all sort of status information regard-
ing the mesh network. For example, you can view the table of
@@ -127,51 +126,6 @@ ously assigned to interfaces now used by batman advanced, e.g.
# ifconfig eth0 0.0.0.0
-VISUALIZATION
--------------
-
-If you want topology visualization, at least one mesh node must
-be configured as VIS-server:
-
-# echo "server" > /sys/class/net/bat0/mesh/vis_mode
-
-Each node is either configured as "server" or as "client" (de-
-fault: "client"). Clients send their topology data to the server
-next to them, and server synchronize with other servers. If there
-is no server configured (default) within the mesh, no topology
-information will be transmitted. With these "synchronizing
-servers", there can be 1 or more vis servers sharing the same (or
-at least very similar) data.
-
-When configured as server, you can get a topology snapshot of
-your mesh:
-
-# cat /sys/kernel/debug/batman_adv/bat0/vis_data
-
-This raw output is intended to be easily parsable and convertable
-with other tools. Have a look at the batctl README if you want a
-vis output in dot or json format for instance and how those out-
-puts could then be visualised in an image.
-
-The raw format consists of comma separated values per entry where
-each entry is giving information about a certain source inter-
-face. Each entry can/has to have the following values:
--> "mac" - mac address of an originator's source interface
- (each line begins with it)
--> "TQ mac value" - src mac's link quality towards mac address
- of a neighbor originator's interface which
- is being used for routing
--> "TT mac" - TT announced by source mac
--> "PRIMARY" - this is a primary interface
--> "SEC mac" - secondary mac address of source
- (requires preceding PRIMARY)
-
-The TQ value has a range from 4 to 255 with 255 being the best.
-The TT entries are showing which hosts are connected to the mesh
-via bat0 or being bridged into the mesh network. The PRIMARY/SEC
-values are only applied on primary interfaces
-
-
LOGGING/DEBUGGING
-----------------
@@ -245,5 +199,5 @@ Mailing-list: b.a.t.m.a.n@open-mesh.org (optional subscription
You can also contact the Authors:
-Marek Lindner <lindner_marek@yahoo.de>
-Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
+Marek Lindner <mareklindner@neomailbox.ch>
+Simon Wunderlich <sw@simonwunderlich.de>
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 9b28e714831a..3856ed2c45a9 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -743,21 +743,16 @@ xmit_hash_policy
protocol information to generate the hash.
Uses XOR of hardware MAC addresses and IP addresses to
- generate the hash. The IPv4 formula is
+ generate the hash. The formula is
- (((source IP XOR dest IP) AND 0xffff) XOR
- ( source MAC XOR destination MAC ))
- modulo slave count
+ hash = source MAC XOR destination MAC
+ hash = hash XOR source IP XOR destination IP
+ hash = hash XOR (hash RSHIFT 16)
+ hash = hash XOR (hash RSHIFT 8)
+ And then hash is reduced modulo slave count.
- The IPv6 formula is
-
- hash = (source ip quad 2 XOR dest IP quad 2) XOR
- (source ip quad 3 XOR dest IP quad 3) XOR
- (source ip quad 4 XOR dest IP quad 4)
-
- (((hash >> 24) XOR (hash >> 16) XOR (hash >> 8) XOR hash)
- XOR (source MAC XOR destination MAC))
- modulo slave count
+ If the protocol is IPv6 then the source and destination
+ addresses are first hashed using ipv6_addr_hash.
This algorithm will place all traffic to a particular
network peer on the same slave. For non-IP traffic,
@@ -779,21 +774,16 @@ xmit_hash_policy
slaves, although a single connection will not span
multiple slaves.
- The formula for unfragmented IPv4 TCP and UDP packets is
-
- ((source port XOR dest port) XOR
- ((source IP XOR dest IP) AND 0xffff)
- modulo slave count
+ The formula for unfragmented TCP and UDP packets is
- The formula for unfragmented IPv6 TCP and UDP packets is
+ hash = source port, destination port (as in the header)
+ hash = hash XOR source IP XOR destination IP
+ hash = hash XOR (hash RSHIFT 16)
+ hash = hash XOR (hash RSHIFT 8)
+ And then hash is reduced modulo slave count.
- hash = (source port XOR dest port) XOR
- ((source ip quad 2 XOR dest IP quad 2) XOR
- (source ip quad 3 XOR dest IP quad 3) XOR
- (source ip quad 4 XOR dest IP quad 4))
-
- ((hash >> 24) XOR (hash >> 16) XOR (hash >> 8) XOR hash)
- modulo slave count
+ If the protocol is IPv6 then the source and destination
+ addresses are first hashed using ipv6_addr_hash.
For fragmented TCP or UDP packets and all other IPv4 and
IPv6 protocol traffic, the source and destination port
@@ -801,10 +791,6 @@ xmit_hash_policy
formula is the same as for the layer2 transmit hash
policy.
- The IPv4 policy is intended to mimic the behavior of
- certain switches, notably Cisco switches with PFC2 as
- well as some Foundry and IBM products.
-
This algorithm is not fully 802.3ad compliant. A
single TCP or UDP conversation containing both
fragmented and unfragmented packets will see packets
@@ -815,6 +801,26 @@ xmit_hash_policy
conversations. Other implementations of 802.3ad may
or may not tolerate this noncompliance.
+ encap2+3
+
+ This policy uses the same formula as layer2+3 but it
+ relies on skb_flow_dissect to obtain the header fields
+ which might result in the use of inner headers if an
+ encapsulation protocol is used. For example this will
+ improve the performance for tunnel users because the
+ packets will be distributed according to the encapsulated
+ flows.
+
+ encap3+4
+
+ This policy uses the same formula as layer3+4 but it
+ relies on skb_flow_dissect to obtain the header fields
+ which might result in the use of inner headers if an
+ encapsulation protocol is used. For example this will
+ improve the performance for tunnel users because the
+ packets will be distributed according to the encapsulated
+ flows.
+
The default value is layer2. This option was added in bonding
version 2.6.3. In earlier versions of bonding, this parameter
does not exist, and the layer2 policy is the only policy. The
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt
index d718bc2ff1cf..bf5dbe3ab8c5 100644
--- a/Documentation/networking/dccp.txt
+++ b/Documentation/networking/dccp.txt
@@ -18,8 +18,8 @@ Introduction
Datagram Congestion Control Protocol (DCCP) is an unreliable, connection
oriented protocol designed to solve issues present in UDP and TCP, particularly
for real-time and multimedia (streaming) traffic.
-It divides into a base protocol (RFC 4340) and plugable congestion control
-modules called CCIDs. Like plugable TCP congestion control, at least one CCID
+It divides into a base protocol (RFC 4340) and pluggable congestion control
+modules called CCIDs. Like pluggable TCP congestion control, at least one CCID
needs to be enabled in order for the protocol to function properly. In the Linux
implementation, this is the TCP-like CCID2 (RFC 4341). Additional CCIDs, such as
the TCP-friendly CCID3 (RFC 4342), are optional.
diff --git a/Documentation/networking/e100.txt b/Documentation/networking/e100.txt
index 13a32124bca0..f862cf3aff34 100644
--- a/Documentation/networking/e100.txt
+++ b/Documentation/networking/e100.txt
@@ -103,7 +103,7 @@ Additional Configurations
PRO/100 Family of Adapters is e100.
As an example, if you install the e100 driver for two PRO/100 adapters
- (eth0 and eth1), add the following to a configuraton file in /etc/modprobe.d/
+ (eth0 and eth1), add the following to a configuration file in /etc/modprobe.d/
alias eth0 e100
alias eth1 e100
diff --git a/Documentation/networking/ieee802154.txt b/Documentation/networking/ieee802154.txt
index 09eb57329f11..22bbc7225f8e 100644
--- a/Documentation/networking/ieee802154.txt
+++ b/Documentation/networking/ieee802154.txt
@@ -4,7 +4,7 @@
Introduction
============
-The IEEE 802.15.4 working group focuses on standartization of bottom
+The IEEE 802.15.4 working group focuses on standardization of bottom
two layers: Medium Access Control (MAC) and Physical (PHY). And there
are mainly two options available for upper layers:
- ZigBee - proprietary protocol from ZigBee Alliance
@@ -66,7 +66,7 @@ net_device, with .type = ARPHRD_IEEE802154. Data is exchanged with socket family
code via plain sk_buffs. On skb reception skb->cb must contain additional
info as described in the struct ieee802154_mac_cb. During packet transmission
the skb->cb is used to provide additional data to device's header_ops->create
-function. Be aware, that this data can be overriden later (when socket code
+function. Be aware that this data can be overridden later (when socket code
submits skb to qdisc), so if you need something from that cb later, you should
store info in the skb->data on your own.
diff --git a/Documentation/networking/l2tp.txt b/Documentation/networking/l2tp.txt
index e63fc1f7bf87..c74434de2fa5 100644
--- a/Documentation/networking/l2tp.txt
+++ b/Documentation/networking/l2tp.txt
@@ -197,7 +197,7 @@ state information because the file format is subject to change. It is
implemented to provide extra debug information to help diagnose
problems.) Users should use the netlink API.
-/proc/net/pppol2tp is also provided for backwards compaibility with
+/proc/net/pppol2tp is also provided for backwards compatibility with
the original pppol2tp driver. It lists information about L2TPv2
tunnels and sessions only. Its use is discouraged.
diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt
index d9112f01c44a..0fe1c6e0dbcd 100644
--- a/Documentation/networking/netdev-FAQ.txt
+++ b/Documentation/networking/netdev-FAQ.txt
@@ -4,23 +4,23 @@ Information you need to know about netdev
Q: What is netdev?
-A: It is a mailing list for all network related linux stuff. This includes
+A: It is a mailing list for all network-related Linux stuff. This includes
anything found under net/ (i.e. core code like IPv6) and drivers/net
- (i.e. hardware specific drivers) in the linux source tree.
+ (i.e. hardware specific drivers) in the Linux source tree.
Note that some subsystems (e.g. wireless drivers) which have a high volume
of traffic have their own specific mailing lists.
- The netdev list is managed (like many other linux mailing lists) through
+ The netdev list is managed (like many other Linux mailing lists) through
VGER ( http://vger.kernel.org/ ) and archives can be found below:
http://marc.info/?l=linux-netdev
http://www.spinics.net/lists/netdev/
- Aside from subsystems like that mentioned above, all network related linux
- development (i.e. RFC, review, comments, etc) takes place on netdev.
+ Aside from subsystems like that mentioned above, all network-related Linux
+ development (i.e. RFC, review, comments, etc.) takes place on netdev.
-Q: How do the changes posted to netdev make their way into linux?
+Q: How do the changes posted to netdev make their way into Linux?
A: There are always two trees (git repositories) in play. Both are driven
by David Miller, the main network maintainer. There is the "net" tree,
@@ -35,7 +35,7 @@ A: There are always two trees (git repositories) in play. Both are driven
Q: How often do changes from these trees make it to the mainline Linus tree?
A: To understand this, you need to know a bit of background information
- on the cadence of linux development. Each new release starts off with
+ on the cadence of Linux development. Each new release starts off with
a two week "merge window" where the main maintainers feed their new
stuff to Linus for merging into the mainline tree. After the two weeks,
the merge window is closed, and it is called/tagged "-rc1". No new
@@ -46,7 +46,7 @@ A: To understand this, you need to know a bit of background information
things are in a state of churn), and a week after the last vX.Y-rcN
was done, the official "vX.Y" is released.
- Relating that to netdev: At the beginning of the 2 week merge window,
+ Relating that to netdev: At the beginning of the 2-week merge window,
the net-next tree will be closed - no new changes/features. The
accumulated new content of the past ~10 weeks will be passed onto
mainline/Linus via a pull request for vX.Y -- at the same time,
@@ -59,16 +59,16 @@ A: To understand this, you need to know a bit of background information
IMPORTANT: Do not send new net-next content to netdev during the
period during which net-next tree is closed.
- Shortly after the two weeks have passed, (and vX.Y-rc1 is released) the
+ Shortly after the two weeks have passed (and vX.Y-rc1 is released), the
tree for net-next reopens to collect content for the next (vX.Y+1) release.
If you aren't subscribed to netdev and/or are simply unsure if net-next
has re-opened yet, simply check the net-next git repository link above for
- any new networking related commits.
+ any new networking-related commits.
The "net" tree continues to collect fixes for the vX.Y content, and
is fed back to Linus at regular (~weekly) intervals. Meaning that the
- focus for "net" is on stablilization and bugfixes.
+ focus for "net" is on stabilization and bugfixes.
Finally, the vX.Y gets released, and the whole cycle starts over.
@@ -217,7 +217,7 @@ A: Attention to detail. Re-read your own work as if you were the
to why it happens, and then if necessary, explain why the fix proposed
is the best way to get things done. Don't mangle whitespace, and as
is common, don't mis-indent function arguments that span multiple lines.
- If it is your 1st patch, mail it to yourself so you can test apply
+ If it is your first patch, mail it to yourself so you can test apply
it to an unpatched tree to confirm infrastructure didn't mangle it.
Finally, go back and read Documentation/SubmittingPatches to be
diff --git a/Documentation/networking/netdevices.txt b/Documentation/networking/netdevices.txt
index c7ecc7080494..0b1cf6b2a592 100644
--- a/Documentation/networking/netdevices.txt
+++ b/Documentation/networking/netdevices.txt
@@ -10,12 +10,12 @@ network devices.
struct net_device allocation rules
==================================
Network device structures need to persist even after module is unloaded and
-must be allocated with kmalloc. If device has registered successfully,
-it will be freed on last use by free_netdev. This is required to handle the
-pathologic case cleanly (example: rmmod mydriver </sys/class/net/myeth/mtu )
+must be allocated with alloc_netdev_mqs() and friends.
+If device has registered successfully, it will be freed on last use
+by free_netdev(). This is required to handle the pathologic case cleanly
+(example: rmmod mydriver </sys/class/net/myeth/mtu )
-There are routines in net_init.c to handle the common cases of
-alloc_etherdev, alloc_netdev. These reserve extra space for driver
+alloc_netdev_mqs()/alloc_netdev() reserve extra space for driver
private data which gets freed when the network device is freed. If
separately allocated data is attached to the network device
(netdev_priv(dev)) then it is up to the module exit handler to free that.
diff --git a/Documentation/networking/netlink_mmap.txt b/Documentation/networking/netlink_mmap.txt
index 533378839546..b26122973525 100644
--- a/Documentation/networking/netlink_mmap.txt
+++ b/Documentation/networking/netlink_mmap.txt
@@ -45,7 +45,7 @@ processing.
Conversion of the reception path involves calling poll() on the file
descriptor, once the socket is readable the frames from the ring are
-processsed in order until no more messages are available, as indicated by
+processed in order until no more messages are available, as indicated by
a status word in the frame header.
On kernel side, in order to make use of memory mapped I/O on receive, the
@@ -56,7 +56,7 @@ Dumps of kernel databases automatically support memory mapped I/O.
Conversion of the transmit path involves changing message construction to
use memory from the TX ring instead of (usually) a buffer declared on the
-stack and setting up the frame header approriately. Optionally poll() can
+stack and setting up the frame header appropriately. Optionally poll() can
be used to wait for free frames in the TX ring.
Structured and definitions for using memory mapped I/O are contained in
@@ -231,7 +231,7 @@ Ring setup:
if (setsockopt(fd, NETLINK_TX_RING, &req, sizeof(req)) < 0)
exit(1)
- /* Calculate size of each invididual ring */
+ /* Calculate size of each individual ring */
ring_size = req.nm_block_nr * req.nm_block_size;
/* Map RX/TX rings. The TX ring is located after the RX ring */
diff --git a/Documentation/networking/operstates.txt b/Documentation/networking/operstates.txt
index 97694572338b..355c6d8ef8ad 100644
--- a/Documentation/networking/operstates.txt
+++ b/Documentation/networking/operstates.txt
@@ -89,8 +89,8 @@ packets. The name 'carrier' and the inversion are historical, think of
it as lower layer.
Note that for certain kind of soft-devices, which are not managing any
-real hardware, there is possible to set this bit from userpsace.
-One should use TVL IFLA_CARRIER to do so.
+real hardware, it is possible to set this bit from userspace. One
+should use TVL IFLA_CARRIER to do so.
netif_carrier_ok() can be used to query that bit.
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt
index 60d05eb77c64..b89bc82eed46 100644
--- a/Documentation/networking/rxrpc.txt
+++ b/Documentation/networking/rxrpc.txt
@@ -144,7 +144,7 @@ An overview of the RxRPC protocol:
(*) Calls use ACK packets to handle reliability. Data packets are also
explicitly sequenced per call.
- (*) There are two types of positive acknowledgement: hard-ACKs and soft-ACKs.
+ (*) There are two types of positive acknowledgment: hard-ACKs and soft-ACKs.
A hard-ACK indicates to the far side that all the data received to a point
has been received and processed; a soft-ACK indicates that the data has
been received but may yet be discarded and re-requested. The sender may
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index 457b8bbafb08..cdd916da838d 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -160,7 +160,7 @@ Where:
o pmt: core has the embedded power module (optional).
o force_sf_dma_mode: force DMA to use the Store and Forward mode
instead of the Threshold.
- o force_thresh_dma_mode: force DMA to use the Shreshold mode other than
+ o force_thresh_dma_mode: force DMA to use the Threshold mode other than
the Store and Forward mode.
o riwt_off: force to disable the RX watchdog feature and switch to NAPI mode.
o fix_mac_speed: this callback is used for modifying some syscfg registers
@@ -175,7 +175,7 @@ Where:
registers.
o custom_cfg/custom_data: this is a custom configuration that can be passed
while initializing the resources.
- o bsp_priv: another private poiter.
+ o bsp_priv: another private pointer.
For MDIO bus The we have:
@@ -271,7 +271,7 @@ reset procedure etc).
o dwmac1000_dma.c: dma functions for the GMAC chip;
o dwmac1000.h: specific header file for the GMAC;
o dwmac100_core: MAC 100 core and dma code;
- o dwmac100_dma.c: dma funtions for the MAC chip;
+ o dwmac100_dma.c: dma functions for the MAC chip;
o dwmac1000.h: specific header file for the MAC;
o dwmac_lib.c: generic DMA functions shared among chips;
o enh_desc.c: functions for handling enhanced descriptors;
@@ -364,4 +364,4 @@ Auto-negotiated Link Parter Ability.
10) TODO:
o XGMAC is not supported.
o Complete the TBI & RTBI support.
- o extened VLAN support for 3.70a SYNP GMAC.
+ o extend VLAN support for 3.70a SYNP GMAC.
diff --git a/Documentation/networking/vortex.txt b/Documentation/networking/vortex.txt
index 9a8041dcbb53..97282da82b75 100644
--- a/Documentation/networking/vortex.txt
+++ b/Documentation/networking/vortex.txt
@@ -68,7 +68,7 @@ Module parameters
There are several parameters which may be provided to the driver when
its module is loaded. These are usually placed in /etc/modprobe.d/*.conf
-configuretion files. Example:
+configuration files. Example:
options 3c59x debug=3 rx_copybreak=300
@@ -178,7 +178,7 @@ max_interrupt_work=N
The driver's interrupt service routine can handle many receive and
transmit packets in a single invocation. It does this in a loop.
- The value of max_interrupt_work governs how mnay times the interrupt
+ The value of max_interrupt_work governs how many times the interrupt
service routine will loop. The default value is 32 loops. If this
is exceeded the interrupt service routine gives up and generates a
warning message "eth0: Too much work in interrupt".
diff --git a/Documentation/networking/x25-iface.txt b/Documentation/networking/x25-iface.txt
index 78f662ee0622..7f213b556e85 100644
--- a/Documentation/networking/x25-iface.txt
+++ b/Documentation/networking/x25-iface.txt
@@ -105,7 +105,7 @@ reduced by the following measures or a combination thereof:
later.
The lapb module interface was modified to support this. Its
data_indication() method should now transparently pass the
- netif_rx() return value to the (lapb mopdule) caller.
+ netif_rx() return value to the (lapb module) caller.
(2) Drivers for kernel versions 2.2.x should always check the global
variable netdev_dropping when a new frame is received. The driver
should only call netif_rx() if netdev_dropping is zero. Otherwise
diff --git a/Documentation/power/opp.txt b/Documentation/power/opp.txt
index 425c51d56aef..b8a907dc0169 100644
--- a/Documentation/power/opp.txt
+++ b/Documentation/power/opp.txt
@@ -42,7 +42,7 @@ We can represent these as three OPPs as the following {Hz, uV} tuples:
OPP library provides a set of helper functions to organize and query the OPP
information. The library is located in drivers/base/power/opp.c and the header
-is located in include/linux/opp.h. OPP library can be enabled by enabling
+is located in include/linux/pm_opp.h. OPP library can be enabled by enabling
CONFIG_PM_OPP from power management menuconfig menu. OPP library depends on
CONFIG_PM as certain SoCs such as Texas Instrument's OMAP framework allows to
optionally boot at a certain OPP without needing cpufreq.
@@ -71,14 +71,14 @@ operations until that OPP could be re-enabled if possible.
OPP library facilitates this concept in it's implementation. The following
operational functions operate only on available opps:
-opp_find_freq_{ceil, floor}, opp_get_voltage, opp_get_freq, opp_get_opp_count
-and opp_init_cpufreq_table
+opp_find_freq_{ceil, floor}, dev_pm_opp_get_voltage, dev_pm_opp_get_freq, dev_pm_opp_get_opp_count
+and dev_pm_opp_init_cpufreq_table
-opp_find_freq_exact is meant to be used to find the opp pointer which can then
-be used for opp_enable/disable functions to make an opp available as required.
+dev_pm_opp_find_freq_exact is meant to be used to find the opp pointer which can then
+be used for dev_pm_opp_enable/disable functions to make an opp available as required.
WARNING: Users of OPP library should refresh their availability count using
-get_opp_count if opp_enable/disable functions are invoked for a device, the
+get_opp_count if dev_pm_opp_enable/disable functions are invoked for a device, the
exact mechanism to trigger these or the notification mechanism to other
dependent subsystems such as cpufreq are left to the discretion of the SoC
specific framework which uses the OPP library. Similar care needs to be taken
@@ -96,24 +96,24 @@ using RCU read locks. The opp_find_freq_{exact,ceil,floor},
opp_get_{voltage, freq, opp_count} fall into this category.
opp_{add,enable,disable} are updaters which use mutex and implement it's own
-RCU locking mechanisms. opp_init_cpufreq_table acts as an updater and uses
+RCU locking mechanisms. dev_pm_opp_init_cpufreq_table acts as an updater and uses
mutex to implment RCU updater strategy. These functions should *NOT* be called
under RCU locks and other contexts that prevent blocking functions in RCU or
mutex operations from working.
2. Initial OPP List Registration
================================
-The SoC implementation calls opp_add function iteratively to add OPPs per
+The SoC implementation calls dev_pm_opp_add function iteratively to add OPPs per
device. It is expected that the SoC framework will register the OPP entries
optimally- typical numbers range to be less than 5. The list generated by
registering the OPPs is maintained by OPP library throughout the device
operation. The SoC framework can subsequently control the availability of the
-OPPs dynamically using the opp_enable / disable functions.
+OPPs dynamically using the dev_pm_opp_enable / disable functions.
-opp_add - Add a new OPP for a specific domain represented by the device pointer.
+dev_pm_opp_add - Add a new OPP for a specific domain represented by the device pointer.
The OPP is defined using the frequency and voltage. Once added, the OPP
is assumed to be available and control of it's availability can be done
- with the opp_enable/disable functions. OPP library internally stores
+ with the dev_pm_opp_enable/disable functions. OPP library internally stores
and manages this information in the opp struct. This function may be
used by SoC framework to define a optimal list as per the demands of
SoC usage environment.
@@ -124,7 +124,7 @@ opp_add - Add a new OPP for a specific domain represented by the device pointer.
soc_pm_init()
{
/* Do things */
- r = opp_add(mpu_dev, 1000000, 900000);
+ r = dev_pm_opp_add(mpu_dev, 1000000, 900000);
if (!r) {
pr_err("%s: unable to register mpu opp(%d)\n", r);
goto no_cpufreq;
@@ -143,44 +143,44 @@ functions return the matching pointer representing the opp if a match is
found, else returns error. These errors are expected to be handled by standard
error checks such as IS_ERR() and appropriate actions taken by the caller.
-opp_find_freq_exact - Search for an OPP based on an *exact* frequency and
+dev_pm_opp_find_freq_exact - Search for an OPP based on an *exact* frequency and
availability. This function is especially useful to enable an OPP which
is not available by default.
Example: In a case when SoC framework detects a situation where a
higher frequency could be made available, it can use this function to
- find the OPP prior to call the opp_enable to actually make it available.
+ find the OPP prior to call the dev_pm_opp_enable to actually make it available.
rcu_read_lock();
- opp = opp_find_freq_exact(dev, 1000000000, false);
+ opp = dev_pm_opp_find_freq_exact(dev, 1000000000, false);
rcu_read_unlock();
/* dont operate on the pointer.. just do a sanity check.. */
if (IS_ERR(opp)) {
pr_err("frequency not disabled!\n");
/* trigger appropriate actions.. */
} else {
- opp_enable(dev,1000000000);
+ dev_pm_opp_enable(dev,1000000000);
}
NOTE: This is the only search function that operates on OPPs which are
not available.
-opp_find_freq_floor - Search for an available OPP which is *at most* the
+dev_pm_opp_find_freq_floor - Search for an available OPP which is *at most* the
provided frequency. This function is useful while searching for a lesser
match OR operating on OPP information in the order of decreasing
frequency.
Example: To find the highest opp for a device:
freq = ULONG_MAX;
rcu_read_lock();
- opp_find_freq_floor(dev, &freq);
+ dev_pm_opp_find_freq_floor(dev, &freq);
rcu_read_unlock();
-opp_find_freq_ceil - Search for an available OPP which is *at least* the
+dev_pm_opp_find_freq_ceil - Search for an available OPP which is *at least* the
provided frequency. This function is useful while searching for a
higher match OR operating on OPP information in the order of increasing
frequency.
Example 1: To find the lowest opp for a device:
freq = 0;
rcu_read_lock();
- opp_find_freq_ceil(dev, &freq);
+ dev_pm_opp_find_freq_ceil(dev, &freq);
rcu_read_unlock();
Example 2: A simplified implementation of a SoC cpufreq_driver->target:
soc_cpufreq_target(..)
@@ -188,7 +188,7 @@ opp_find_freq_ceil - Search for an available OPP which is *at least* the
/* Do stuff like policy checks etc. */
/* Find the best frequency match for the req */
rcu_read_lock();
- opp = opp_find_freq_ceil(dev, &freq);
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
rcu_read_unlock();
if (!IS_ERR(opp))
soc_switch_to_freq_voltage(freq);
@@ -208,34 +208,34 @@ as thermal considerations (e.g. don't use OPPx until the temperature drops).
WARNING: Do not use these functions in interrupt context.
-opp_enable - Make a OPP available for operation.
+dev_pm_opp_enable - Make a OPP available for operation.
Example: Lets say that 1GHz OPP is to be made available only if the
SoC temperature is lower than a certain threshold. The SoC framework
implementation might choose to do something as follows:
if (cur_temp < temp_low_thresh) {
/* Enable 1GHz if it was disabled */
rcu_read_lock();
- opp = opp_find_freq_exact(dev, 1000000000, false);
+ opp = dev_pm_opp_find_freq_exact(dev, 1000000000, false);
rcu_read_unlock();
/* just error check */
if (!IS_ERR(opp))
- ret = opp_enable(dev, 1000000000);
+ ret = dev_pm_opp_enable(dev, 1000000000);
else
goto try_something_else;
}
-opp_disable - Make an OPP to be not available for operation
+dev_pm_opp_disable - Make an OPP to be not available for operation
Example: Lets say that 1GHz OPP is to be disabled if the temperature
exceeds a threshold value. The SoC framework implementation might
choose to do something as follows:
if (cur_temp > temp_high_thresh) {
/* Disable 1GHz if it was enabled */
rcu_read_lock();
- opp = opp_find_freq_exact(dev, 1000000000, true);
+ opp = dev_pm_opp_find_freq_exact(dev, 1000000000, true);
rcu_read_unlock();
/* just error check */
if (!IS_ERR(opp))
- ret = opp_disable(dev, 1000000000);
+ ret = dev_pm_opp_disable(dev, 1000000000);
else
goto try_something_else;
}
@@ -247,7 +247,7 @@ information from the OPP structure is necessary. Once an OPP pointer is
retrieved using the search functions, the following functions can be used by SoC
framework to retrieve the information represented inside the OPP layer.
-opp_get_voltage - Retrieve the voltage represented by the opp pointer.
+dev_pm_opp_get_voltage - Retrieve the voltage represented by the opp pointer.
Example: At a cpufreq transition to a different frequency, SoC
framework requires to set the voltage represented by the OPP using
the regulator framework to the Power Management chip providing the
@@ -256,15 +256,15 @@ opp_get_voltage - Retrieve the voltage represented by the opp pointer.
{
/* do things */
rcu_read_lock();
- opp = opp_find_freq_ceil(dev, &freq);
- v = opp_get_voltage(opp);
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ v = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
if (v)
regulator_set_voltage(.., v);
/* do other things */
}
-opp_get_freq - Retrieve the freq represented by the opp pointer.
+dev_pm_opp_get_freq - Retrieve the freq represented by the opp pointer.
Example: Lets say the SoC framework uses a couple of helper functions
we could pass opp pointers instead of doing additional parameters to
handle quiet a bit of data parameters.
@@ -273,8 +273,8 @@ opp_get_freq - Retrieve the freq represented by the opp pointer.
/* do things.. */
max_freq = ULONG_MAX;
rcu_read_lock();
- max_opp = opp_find_freq_floor(dev,&max_freq);
- requested_opp = opp_find_freq_ceil(dev,&freq);
+ max_opp = dev_pm_opp_find_freq_floor(dev,&max_freq);
+ requested_opp = dev_pm_opp_find_freq_ceil(dev,&freq);
if (!IS_ERR(max_opp) && !IS_ERR(requested_opp))
r = soc_test_validity(max_opp, requested_opp);
rcu_read_unlock();
@@ -282,25 +282,25 @@ opp_get_freq - Retrieve the freq represented by the opp pointer.
}
soc_test_validity(..)
{
- if(opp_get_voltage(max_opp) < opp_get_voltage(requested_opp))
+ if(dev_pm_opp_get_voltage(max_opp) < dev_pm_opp_get_voltage(requested_opp))
return -EINVAL;
- if(opp_get_freq(max_opp) < opp_get_freq(requested_opp))
+ if(dev_pm_opp_get_freq(max_opp) < dev_pm_opp_get_freq(requested_opp))
return -EINVAL;
/* do things.. */
}
-opp_get_opp_count - Retrieve the number of available opps for a device
+dev_pm_opp_get_opp_count - Retrieve the number of available opps for a device
Example: Lets say a co-processor in the SoC needs to know the available
frequencies in a table, the main processor can notify as following:
soc_notify_coproc_available_frequencies()
{
/* Do things */
rcu_read_lock();
- num_available = opp_get_opp_count(dev);
+ num_available = dev_pm_opp_get_opp_count(dev);
speeds = kzalloc(sizeof(u32) * num_available, GFP_KERNEL);
/* populate the table in increasing order */
freq = 0;
- while (!IS_ERR(opp = opp_find_freq_ceil(dev, &freq))) {
+ while (!IS_ERR(opp = dev_pm_opp_find_freq_ceil(dev, &freq))) {
speeds[i] = freq;
freq++;
i++;
@@ -313,7 +313,7 @@ opp_get_opp_count - Retrieve the number of available opps for a device
6. Cpufreq Table Generation
===========================
-opp_init_cpufreq_table - cpufreq framework typically is initialized with
+dev_pm_opp_init_cpufreq_table - cpufreq framework typically is initialized with
cpufreq_frequency_table_cpuinfo which is provided with the list of
frequencies that are available for operation. This function provides
a ready to use conversion routine to translate the OPP layer's internal
@@ -326,7 +326,7 @@ opp_init_cpufreq_table - cpufreq framework typically is initialized with
soc_pm_init()
{
/* Do things */
- r = opp_init_cpufreq_table(dev, &freq_table);
+ r = dev_pm_opp_init_cpufreq_table(dev, &freq_table);
if (!r)
cpufreq_frequency_table_cpuinfo(policy, freq_table);
/* Do other things */
@@ -336,7 +336,7 @@ opp_init_cpufreq_table - cpufreq framework typically is initialized with
addition to CONFIG_PM as power management feature is required to
dynamically scale voltage and frequency in a system.
-opp_free_cpufreq_table - Free up the table allocated by opp_init_cpufreq_table
+dev_pm_opp_free_cpufreq_table - Free up the table allocated by dev_pm_opp_init_cpufreq_table
7. Data Structures
==================
@@ -358,16 +358,16 @@ accessed by various functions as described above. However, the structures
representing the actual OPPs and domains are internal to the OPP library itself
to allow for suitable abstraction reusable across systems.
-struct opp - The internal data structure of OPP library which is used to
+struct dev_pm_opp - The internal data structure of OPP library which is used to
represent an OPP. In addition to the freq, voltage, availability
information, it also contains internal book keeping information required
for the OPP library to operate on. Pointer to this structure is
provided back to the users such as SoC framework to be used as a
identifier for OPP in the interactions with OPP layer.
- WARNING: The struct opp pointer should not be parsed or modified by the
- users. The defaults of for an instance is populated by opp_add, but the
- availability of the OPP can be modified by opp_enable/disable functions.
+ WARNING: The struct dev_pm_opp pointer should not be parsed or modified by the
+ users. The defaults of for an instance is populated by dev_pm_opp_add, but the
+ availability of the OPP can be modified by dev_pm_opp_enable/disable functions.
struct device - This is used to identify a domain to the OPP layer. The
nature of the device and it's implementation is left to the user of
@@ -377,19 +377,19 @@ Overall, in a simplistic view, the data structure operations is represented as
following:
Initialization / modification:
- +-----+ /- opp_enable
-opp_add --> | opp | <-------
- | +-----+ \- opp_disable
+ +-----+ /- dev_pm_opp_enable
+dev_pm_opp_add --> | opp | <-------
+ | +-----+ \- dev_pm_opp_disable
\-------> domain_info(device)
Search functions:
- /-- opp_find_freq_ceil ---\ +-----+
-domain_info<---- opp_find_freq_exact -----> | opp |
- \-- opp_find_freq_floor ---/ +-----+
+ /-- dev_pm_opp_find_freq_ceil ---\ +-----+
+domain_info<---- dev_pm_opp_find_freq_exact -----> | opp |
+ \-- dev_pm_opp_find_freq_floor ---/ +-----+
Retrieval functions:
-+-----+ /- opp_get_voltage
++-----+ /- dev_pm_opp_get_voltage
| opp | <---
-+-----+ \- opp_get_freq
++-----+ \- dev_pm_opp_get_freq
-domain_info <- opp_get_opp_count
+domain_info <- dev_pm_opp_get_opp_count
diff --git a/Documentation/power/power_supply_class.txt b/Documentation/power/power_supply_class.txt
index 3f10b39b0346..89a8816990ff 100644
--- a/Documentation/power/power_supply_class.txt
+++ b/Documentation/power/power_supply_class.txt
@@ -135,11 +135,11 @@ CAPACITY_LEVEL - capacity level. This corresponds to
POWER_SUPPLY_CAPACITY_LEVEL_*.
TEMP - temperature of the power supply.
-TEMP_ALERT_MIN - minimum battery temperature alert value in milli centigrade.
-TEMP_ALERT_MAX - maximum battery temperature alert value in milli centigrade.
+TEMP_ALERT_MIN - minimum battery temperature alert.
+TEMP_ALERT_MAX - maximum battery temperature alert.
TEMP_AMBIENT - ambient temperature.
-TEMP_AMBIENT_ALERT_MIN - minimum ambient temperature alert value in milli centigrade.
-TEMP_AMBIENT_ALERT_MAX - maximum ambient temperature alert value in milli centigrade.
+TEMP_AMBIENT_ALERT_MIN - minimum ambient temperature alert.
+TEMP_AMBIENT_ALERT_MAX - maximum ambient temperature alert.
TIME_TO_EMPTY - seconds left for battery to be considered empty (i.e.
while battery powers a load)
diff --git a/Documentation/power/powercap/powercap.txt b/Documentation/power/powercap/powercap.txt
new file mode 100644
index 000000000000..1e6ef164e07a
--- /dev/null
+++ b/Documentation/power/powercap/powercap.txt
@@ -0,0 +1,236 @@
+Power Capping Framework
+==================================
+
+The power capping framework provides a consistent interface between the kernel
+and the user space that allows power capping drivers to expose the settings to
+user space in a uniform way.
+
+Terminology
+=========================
+The framework exposes power capping devices to user space via sysfs in the
+form of a tree of objects. The objects at the root level of the tree represent
+'control types', which correspond to different methods of power capping. For
+example, the intel-rapl control type represents the Intel "Running Average
+Power Limit" (RAPL) technology, whereas the 'idle-injection' control type
+corresponds to the use of idle injection for controlling power.
+
+Power zones represent different parts of the system, which can be controlled and
+monitored using the power capping method determined by the control type the
+given zone belongs to. They each contain attributes for monitoring power, as
+well as controls represented in the form of power constraints. If the parts of
+the system represented by different power zones are hierarchical (that is, one
+bigger part consists of multiple smaller parts that each have their own power
+controls), those power zones may also be organized in a hierarchy with one
+parent power zone containing multiple subzones and so on to reflect the power
+control topology of the system. In that case, it is possible to apply power
+capping to a set of devices together using the parent power zone and if more
+fine grained control is required, it can be applied through the subzones.
+
+
+Example sysfs interface tree:
+
+/sys/devices/virtual/powercap
+??? intel-rapl
+ ??? intel-rapl:0
+ ?   ??? constraint_0_name
+ ?   ??? constraint_0_power_limit_uw
+ ?   ??? constraint_0_time_window_us
+ ?   ??? constraint_1_name
+ ?   ??? constraint_1_power_limit_uw
+ ?   ??? constraint_1_time_window_us
+ ?   ??? device -> ../../intel-rapl
+ ?   ??? energy_uj
+ ?   ??? intel-rapl:0:0
+ ?   ?   ??? constraint_0_name
+ ?   ?   ??? constraint_0_power_limit_uw
+ ?   ?   ??? constraint_0_time_window_us
+ ?   ?   ??? constraint_1_name
+ ?   ?   ??? constraint_1_power_limit_uw
+ ?   ?   ??? constraint_1_time_window_us
+ ?   ?   ??? device -> ../../intel-rapl:0
+ ?   ?   ??? energy_uj
+ ?   ?   ??? max_energy_range_uj
+ ?   ?   ??? name
+ ?   ?   ??? enabled
+ ?   ?   ??? power
+ ?   ?   ?   ??? async
+ ?   ?   ?   []
+ ?   ?   ??? subsystem -> ../../../../../../class/power_cap
+ ?   ?   ??? uevent
+ ?   ??? intel-rapl:0:1
+ ?   ?   ??? constraint_0_name
+ ?   ?   ??? constraint_0_power_limit_uw
+ ?   ?   ??? constraint_0_time_window_us
+ ?   ?   ??? constraint_1_name
+ ?   ?   ??? constraint_1_power_limit_uw
+ ?   ?   ??? constraint_1_time_window_us
+ ?   ?   ??? device -> ../../intel-rapl:0
+ ?   ?   ??? energy_uj
+ ?   ?   ??? max_energy_range_uj
+ ?   ?   ??? name
+ ?   ?   ??? enabled
+ ?   ?   ??? power
+ ?   ?   ?   ??? async
+ ?   ?   ?   []
+ ?   ?   ??? subsystem -> ../../../../../../class/power_cap
+ ?   ?   ??? uevent
+ ?   ??? max_energy_range_uj
+ ?   ??? max_power_range_uw
+ ?   ??? name
+ ?   ??? enabled
+ ?   ??? power
+ ?   ?   ??? async
+ ?   ?   []
+ ?   ??? subsystem -> ../../../../../class/power_cap
+ ?   ??? enabled
+ ?   ??? uevent
+ ??? intel-rapl:1
+ ?   ??? constraint_0_name
+ ?   ??? constraint_0_power_limit_uw
+ ?   ??? constraint_0_time_window_us
+ ?   ??? constraint_1_name
+ ?   ??? constraint_1_power_limit_uw
+ ?   ??? constraint_1_time_window_us
+ ?   ??? device -> ../../intel-rapl
+ ?   ??? energy_uj
+ ?   ??? intel-rapl:1:0
+ ?   ?   ??? constraint_0_name
+ ?   ?   ??? constraint_0_power_limit_uw
+ ?   ?   ??? constraint_0_time_window_us
+ ?   ?   ??? constraint_1_name
+ ?   ?   ??? constraint_1_power_limit_uw
+ ?   ?   ??? constraint_1_time_window_us
+ ?   ?   ??? device -> ../../intel-rapl:1
+ ?   ?   ??? energy_uj
+ ?   ?   ??? max_energy_range_uj
+ ?   ?   ??? name
+ ?   ?   ??? enabled
+ ?   ?   ??? power
+ ?   ?   ?   ??? async
+ ?   ?   ?   []
+ ?   ?   ??? subsystem -> ../../../../../../class/power_cap
+ ?   ?   ??? uevent
+ ?   ??? intel-rapl:1:1
+ ?   ?   ??? constraint_0_name
+ ?   ?   ??? constraint_0_power_limit_uw
+ ?   ?   ??? constraint_0_time_window_us
+ ?   ?   ??? constraint_1_name
+ ?   ?   ??? constraint_1_power_limit_uw
+ ?   ?   ??? constraint_1_time_window_us
+ ?   ?   ??? device -> ../../intel-rapl:1
+ ?   ?   ??? energy_uj
+ ?   ?   ??? max_energy_range_uj
+ ?   ?   ??? name
+ ?   ?   ??? enabled
+ ?   ?   ??? power
+ ?   ?   ?   ??? async
+ ?   ?   ?   []
+ ?   ?   ??? subsystem -> ../../../../../../class/power_cap
+ ?   ?   ??? uevent
+ ?   ??? max_energy_range_uj
+ ?   ??? max_power_range_uw
+ ?   ??? name
+ ?   ??? enabled
+ ?   ??? power
+ ?   ?   ??? async
+ ?   ?   []
+ ?   ??? subsystem -> ../../../../../class/power_cap
+ ?   ??? uevent
+ ??? power
+ ?   ??? async
+ ?   []
+ ??? subsystem -> ../../../../class/power_cap
+ ??? enabled
+ ??? uevent
+
+The above example illustrates a case in which the Intel RAPL technology,
+available in Intel® IA-64 and IA-32 Processor Architectures, is used. There is one
+control type called intel-rapl which contains two power zones, intel-rapl:0 and
+intel-rapl:1, representing CPU packages. Each of these power zones contains
+two subzones, intel-rapl:j:0 and intel-rapl:j:1 (j = 0, 1), representing the
+"core" and the "uncore" parts of the given CPU package, respectively. All of
+the zones and subzones contain energy monitoring attributes (energy_uj,
+max_energy_range_uj) and constraint attributes (constraint_*) allowing controls
+to be applied (the constraints in the 'package' power zones apply to the whole
+CPU packages and the subzone constraints only apply to the respective parts of
+the given package individually). Since Intel RAPL doesn't provide instantaneous
+power value, there is no power_uw attribute.
+
+In addition to that, each power zone contains a name attribute, allowing the
+part of the system represented by that zone to be identified.
+For example:
+
+cat /sys/class/power_cap/intel-rapl/intel-rapl:0/name
+package-0
+
+The Intel RAPL technology allows two constraints, short term and long term,
+with two different time windows to be applied to each power zone. Thus for
+each zone there are 2 attributes representing the constraint names, 2 power
+limits and 2 attributes representing the sizes of the time windows. Such that,
+constraint_j_* attributes correspond to the jth constraint (j = 0,1).
+
+For example:
+ constraint_0_name
+ constraint_0_power_limit_uw
+ constraint_0_time_window_us
+ constraint_1_name
+ constraint_1_power_limit_uw
+ constraint_1_time_window_us
+
+Power Zone Attributes
+=================================
+Monitoring attributes
+----------------------
+
+energy_uj (rw): Current energy counter in micro joules. Write "0" to reset.
+If the counter can not be reset, then this attribute is read only.
+
+max_energy_range_uj (ro): Range of the above energy counter in micro-joules.
+
+power_uw (ro): Current power in micro watts.
+
+max_power_range_uw (ro): Range of the above power value in micro-watts.
+
+name (ro): Name of this power zone.
+
+It is possible that some domains have both power ranges and energy counter ranges;
+however, only one is mandatory.
+
+Constraints
+----------------
+constraint_X_power_limit_uw (rw): Power limit in micro watts, which should be
+applicable for the time window specified by "constraint_X_time_window_us".
+
+constraint_X_time_window_us (rw): Time window in micro seconds.
+
+constraint_X_name (ro): An optional name of the constraint
+
+constraint_X_max_power_uw(ro): Maximum allowed power in micro watts.
+
+constraint_X_min_power_uw(ro): Minimum allowed power in micro watts.
+
+constraint_X_max_time_window_us(ro): Maximum allowed time window in micro seconds.
+
+constraint_X_min_time_window_us(ro): Minimum allowed time window in micro seconds.
+
+Except power_limit_uw and time_window_us other fields are optional.
+
+Common zone and control type attributes
+----------------------------------------
+enabled (rw): Enable/Disable controls at zone level or for all zones using
+a control type.
+
+Power Cap Client Driver Interface
+==================================
+The API summary:
+
+Call powercap_register_control_type() to register control type object.
+Call powercap_register_zone() to register a power zone (under a given
+control type), either as a top-level power zone or as a subzone of another
+power zone registered earlier.
+The number of constraints in a power zone and the corresponding callbacks have
+to be defined prior to calling powercap_register_zone() to register that zone.
+
+To Free a power zone call powercap_unregister_zone().
+To free a control type object call powercap_unregister_control_type().
+Detailed API can be generated using kernel-doc on include/linux/powercap.h.
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 71d8fe4e75d3..0f54333b0ff2 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -145,11 +145,13 @@ The action performed by the idle callback is totally dependent on the subsystem
if the device can be suspended (i.e. if all of the conditions necessary for
suspending the device are satisfied) and to queue up a suspend request for the
device in that case. If there is no idle callback, or if the callback returns
-0, then the PM core will attempt to carry out a runtime suspend of the device;
-in essence, it will call pm_runtime_suspend() directly. To prevent this (for
-example, if the callback routine has started a delayed suspend), the routine
-should return a non-zero value. Negative error return codes are ignored by the
-PM core.
+0, then the PM core will attempt to carry out a runtime suspend of the device,
+also respecting devices configured for autosuspend. In essence this means a
+call to pm_runtime_autosuspend() (do note that drivers needs to update the
+device last busy mark, pm_runtime_mark_last_busy(), to control the delay under
+this circumstance). To prevent this (for example, if the callback routine has
+started a delayed suspend), the routine must return a non-zero value. Negative
+error return codes are ignored by the PM core.
The helper functions provided by the PM core, described in Section 4, guarantee
that the following constraints are met with respect to runtime PM callbacks for
@@ -308,7 +310,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
- execute the subsystem-level idle callback for the device; returns an
error code on failure, where -EINPROGRESS means that ->runtime_idle() is
already being executed; if there is no callback or the callback returns 0
- then run pm_runtime_suspend(dev) and return its result
+ then run pm_runtime_autosuspend(dev) and return its result
int pm_runtime_suspend(struct device *dev);
- execute the subsystem-level suspend callback for the device; returns 0 on
diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c
index f59ded066108..a74d0a84d329 100644
--- a/Documentation/ptp/testptp.c
+++ b/Documentation/ptp/testptp.c
@@ -100,6 +100,11 @@ static long ppb_to_scaled_ppm(int ppb)
return (long) (ppb * 65.536);
}
+static int64_t pctns(struct ptp_clock_time *t)
+{
+ return t->sec * 1000000000LL + t->nsec;
+}
+
static void usage(char *progname)
{
fprintf(stderr,
@@ -112,6 +117,8 @@ static void usage(char *progname)
" -f val adjust the ptp clock frequency by 'val' ppb\n"
" -g get the ptp clock time\n"
" -h prints this message\n"
+ " -k val measure the time offset between system and phc clock\n"
+ " for 'val' times (Maximum 25)\n"
" -p val enable output with a period of 'val' nanoseconds\n"
" -P val enable or disable (val=1|0) the system clock PPS\n"
" -s set the ptp clock time from the system time\n"
@@ -133,8 +140,12 @@ int main(int argc, char *argv[])
struct itimerspec timeout;
struct sigevent sigevent;
+ struct ptp_clock_time *pct;
+ struct ptp_sys_offset *sysoff;
+
+
char *progname;
- int c, cnt, fd;
+ int i, c, cnt, fd;
char *device = DEVICE;
clockid_t clkid;
@@ -144,14 +155,19 @@ int main(int argc, char *argv[])
int extts = 0;
int gettime = 0;
int oneshot = 0;
+ int pct_offset = 0;
+ int n_samples = 0;
int periodic = 0;
int perout = -1;
int pps = -1;
int settime = 0;
+ int64_t t1, t2, tp;
+ int64_t interval, offset;
+
progname = strrchr(argv[0], '/');
progname = progname ? 1+progname : argv[0];
- while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghp:P:sSt:v"))) {
+ while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghk:p:P:sSt:v"))) {
switch (c) {
case 'a':
oneshot = atoi(optarg);
@@ -174,6 +190,10 @@ int main(int argc, char *argv[])
case 'g':
gettime = 1;
break;
+ case 'k':
+ pct_offset = 1;
+ n_samples = atoi(optarg);
+ break;
case 'p':
perout = atoi(optarg);
break;
@@ -376,6 +396,47 @@ int main(int argc, char *argv[])
}
}
+ if (pct_offset) {
+ if (n_samples <= 0 || n_samples > 25) {
+ puts("n_samples should be between 1 and 25");
+ usage(progname);
+ return -1;
+ }
+
+ sysoff = calloc(1, sizeof(*sysoff));
+ if (!sysoff) {
+ perror("calloc");
+ return -1;
+ }
+ sysoff->n_samples = n_samples;
+
+ if (ioctl(fd, PTP_SYS_OFFSET, sysoff))
+ perror("PTP_SYS_OFFSET");
+ else
+ puts("system and phc clock time offset request okay");
+
+ pct = &sysoff->ts[0];
+ for (i = 0; i < sysoff->n_samples; i++) {
+ t1 = pctns(pct+2*i);
+ tp = pctns(pct+2*i+1);
+ t2 = pctns(pct+2*i+2);
+ interval = t2 - t1;
+ offset = (t2 + t1) / 2 - tp;
+
+ printf("system time: %ld.%ld\n",
+ (pct+2*i)->sec, (pct+2*i)->nsec);
+ printf("phc time: %ld.%ld\n",
+ (pct+2*i+1)->sec, (pct+2*i+1)->nsec);
+ printf("system time: %ld.%ld\n",
+ (pct+2*i+2)->sec, (pct+2*i+2)->nsec);
+ printf("system/phc clock time offset is %ld ns\n"
+ "system clock time delay is %ld ns\n",
+ offset, interval);
+ }
+
+ free(sysoff);
+ }
+
close(fd);
return 0;
}
diff --git a/Documentation/s390/s390dbf.txt b/Documentation/s390/s390dbf.txt
index fcaf0b4efba2..3da163383c93 100644
--- a/Documentation/s390/s390dbf.txt
+++ b/Documentation/s390/s390dbf.txt
@@ -158,6 +158,16 @@ Return Value: none
Description: Sets new actual debug level if new_level is valid.
---------------------------------------------------------------------------
+bool debug_level_enabled (debug_info_t * id, int level);
+
+Parameter: id: handle for debug log
+ level: debug level
+
+Return Value: True if level is less or equal to the current debug level.
+
+Description: Returns true if debug events for the specified level would be
+ logged. Otherwise returns false.
+---------------------------------------------------------------------------
void debug_stop_all(void);
Parameter: none
diff --git a/Documentation/scheduler/sched-arch.txt b/Documentation/scheduler/sched-arch.txt
index b1b8587b86f0..9290de703450 100644
--- a/Documentation/scheduler/sched-arch.txt
+++ b/Documentation/scheduler/sched-arch.txt
@@ -65,11 +65,6 @@ Possible arch/ problems
Possible arch problems I found (and either tried to fix or didn't):
-h8300 - Is such sleeping racy vs interrupts? (See #4a).
- The H8/300 manual I found indicates yes, however disabling IRQs
- over the sleep mean only NMIs can wake it up, so can't fix easily
- without doing spin waiting.
-
ia64 - is safe_halt call racy vs interrupts? (does it sleep?) (See #4a)
sh64 - Is sleeping racy vs interrupts? (See #4a)
diff --git a/Documentation/security/00-INDEX b/Documentation/security/00-INDEX
index 414235c1fcfc..45c82fd3e9d3 100644
--- a/Documentation/security/00-INDEX
+++ b/Documentation/security/00-INDEX
@@ -22,3 +22,5 @@ keys.txt
- description of the kernel key retention service.
tomoyo.txt
- documentation on the TOMOYO Linux Security Module.
+IMA-templates.txt
+ - documentation on the template management mechanism for IMA.
diff --git a/Documentation/security/IMA-templates.txt b/Documentation/security/IMA-templates.txt
new file mode 100644
index 000000000000..a777e5f1df5b
--- /dev/null
+++ b/Documentation/security/IMA-templates.txt
@@ -0,0 +1,87 @@
+ IMA Template Management Mechanism
+
+
+==== INTRODUCTION ====
+
+The original 'ima' template is fixed length, containing the filedata hash
+and pathname. The filedata hash is limited to 20 bytes (md5/sha1).
+The pathname is a null terminated string, limited to 255 characters.
+To overcome these limitations and to add additional file metadata, it is
+necessary to extend the current version of IMA by defining additional
+templates. For example, information that could be possibly reported are
+the inode UID/GID or the LSM labels either of the inode and of the process
+that is accessing it.
+
+However, the main problem to introduce this feature is that, each time
+a new template is defined, the functions that generate and display
+the measurements list would include the code for handling a new format
+and, thus, would significantly grow over the time.
+
+The proposed solution solves this problem by separating the template
+management from the remaining IMA code. The core of this solution is the
+definition of two new data structures: a template descriptor, to determine
+which information should be included in the measurement list; a template
+field, to generate and display data of a given type.
+
+Managing templates with these structures is very simple. To support
+a new data type, developers define the field identifier and implement
+two functions, init() and show(), respectively to generate and display
+measurement entries. Defining a new template descriptor requires
+specifying the template format, a string of field identifiers separated
+by the '|' character. While in the current implementation it is possible
+to define new template descriptors only by adding their definition in the
+template specific code (ima_template.c), in a future version it will be
+possible to register a new template on a running kernel by supplying to IMA
+the desired format string. In this version, IMA initializes at boot time
+all defined template descriptors by translating the format into an array
+of template fields structures taken from the set of the supported ones.
+
+After the initialization step, IMA will call ima_alloc_init_template()
+(new function defined within the patches for the new template management
+mechanism) to generate a new measurement entry by using the template
+descriptor chosen through the kernel configuration or through the newly
+introduced 'ima_template=' kernel command line parameter. It is during this
+phase that the advantages of the new architecture are clearly shown:
+the latter function will not contain specific code to handle a given template
+but, instead, it simply calls the init() method of the template fields
+associated to the chosen template descriptor and store the result (pointer
+to allocated data and data length) in the measurement entry structure.
+
+The same mechanism is employed to display measurements entries.
+The functions ima[_ascii]_measurements_show() retrieve, for each entry,
+the template descriptor used to produce that entry and call the show()
+method for each item of the array of template fields structures.
+
+
+
+==== SUPPORTED TEMPLATE FIELDS AND DESCRIPTORS ====
+
+In the following, there is the list of supported template fields
+('<identifier>': description), that can be used to define new template
+descriptors by adding their identifier to the format string
+(support for more data types will be added later):
+
+ - 'd': the digest of the event (i.e. the digest of a measured file),
+ calculated with the SHA1 or MD5 hash algorithm;
+ - 'n': the name of the event (i.e. the file name), with size up to 255 bytes;
+ - 'd-ng': the digest of the event, calculated with an arbitrary hash
+ algorithm (field format: [<hash algo>:]digest, where the digest
+ prefix is shown only if the hash algorithm is not SHA1 or MD5);
+ - 'n-ng': the name of the event, without size limitations.
+
+
+Below, there is the list of defined template descriptors:
+ - "ima": its format is 'd|n';
+ - "ima-ng" (default): its format is 'd-ng|n-ng'.
+
+
+
+==== USE ====
+
+To specify the template descriptor to be used to generate measurement entries,
+currently the following methods are supported:
+
+ - select a template descriptor among those supported in the kernel
+ configuration ('ima-ng' is the default choice);
+ - specify a template descriptor name from the kernel command line through
+ the 'ima_template=' parameter.
diff --git a/Documentation/security/keys.txt b/Documentation/security/keys.txt
index 7b4145d00452..a4c33f1a7c6d 100644
--- a/Documentation/security/keys.txt
+++ b/Documentation/security/keys.txt
@@ -865,15 +865,14 @@ encountered:
calling processes has a searchable link to the key from one of its
keyrings. There are three functions for dealing with these:
- key_ref_t make_key_ref(const struct key *key,
- unsigned long possession);
+ key_ref_t make_key_ref(const struct key *key, bool possession);
struct key *key_ref_to_ptr(const key_ref_t key_ref);
- unsigned long is_key_possessed(const key_ref_t key_ref);
+ bool is_key_possessed(const key_ref_t key_ref);
The first function constructs a key reference from a key pointer and
- possession information (which must be 0 or 1 and not any other value).
+ possession information (which must be true or false).
The second function retrieves the key pointer from a reference and the
third retrieves the possession flag.
@@ -961,14 +960,17 @@ payload contents" for more information.
the argument will not be parsed.
-(*) Extra references can be made to a key by calling the following function:
+(*) Extra references can be made to a key by calling one of the following
+ functions:
+ struct key *__key_get(struct key *key);
struct key *key_get(struct key *key);
- These need to be disposed of by calling key_put() when they've been
- finished with. The key pointer passed in will be returned. If the pointer
- is NULL or CONFIG_KEYS is not set then the key will not be dereferenced and
- no increment will take place.
+ Keys so references will need to be disposed of by calling key_put() when
+ they've been finished with. The key pointer passed in will be returned.
+
+ In the case of key_get(), if the pointer is NULL or CONFIG_KEYS is not set
+ then the key will not be dereferenced and no increment will take place.
(*) A key's serial number can be obtained by calling:
diff --git a/Documentation/serial/driver b/Documentation/serial/driver
index 067c47d46917..c3a7689a90e6 100644
--- a/Documentation/serial/driver
+++ b/Documentation/serial/driver
@@ -264,10 +264,6 @@ hardware.
Locking: none.
Interrupts: caller dependent.
- set_wake(port,state)
- Enable/disable power management wakeup on serial activity. Not
- currently implemented.
-
type(port)
Return a pointer to a string constant describing the specified
port, or return NULL, in which case the string 'unknown' is
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index 95731a08f257..b8dd0df76952 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -616,7 +616,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
As default, snd-dummy drivers doesn't allocate the real buffers
but either ignores read/write or mmap a single dummy page to all
- buffer pages, in order to save the resouces. If your apps need
+ buffer pages, in order to save the resources. If your apps need
the read/ written buffer data to be consistent, pass fake_buffer=0
option.
diff --git a/Documentation/sound/alsa/Audiophile-Usb.txt b/Documentation/sound/alsa/Audiophile-Usb.txt
index 654dd3b694a8..e7a5ed4dcae8 100644
--- a/Documentation/sound/alsa/Audiophile-Usb.txt
+++ b/Documentation/sound/alsa/Audiophile-Usb.txt
@@ -232,7 +232,7 @@ The parameter can be given:
# modprobe snd-usb-audio index=1 device_setup=0x09
* Or while configuring the modules options in your modules configuration file
- (tipically a .conf file in /etc/modprobe.d/ directory:
+ (typically a .conf file in /etc/modprobe.d/ directory:
alias snd-card-1 snd-usb-audio
options snd-usb-audio index=1 device_setup=0x09
diff --git a/Documentation/sound/alsa/CMIPCI.txt b/Documentation/sound/alsa/CMIPCI.txt
index 16935c8561f7..4e36e6e809ca 100644
--- a/Documentation/sound/alsa/CMIPCI.txt
+++ b/Documentation/sound/alsa/CMIPCI.txt
@@ -87,7 +87,7 @@ with 4 channels,
and use the interleaved 4 channel data.
-There are some control switchs affecting to the speaker connections:
+There are some control switches affecting to the speaker connections:
"Line-In Mode" - an enum control to change the behavior of line-in
jack. Either "Line-In", "Rear Output" or "Bass Output" can
diff --git a/Documentation/sound/alsa/compress_offload.txt b/Documentation/sound/alsa/compress_offload.txt
index fd74ff26376e..630c492c3dc2 100644
--- a/Documentation/sound/alsa/compress_offload.txt
+++ b/Documentation/sound/alsa/compress_offload.txt
@@ -217,12 +217,12 @@ Not supported:
would be enabled with ALSA kcontrols.
- Audio policy/resource management. This API does not provide any
- hooks to query the utilization of the audio DSP, nor any premption
+ hooks to query the utilization of the audio DSP, nor any preemption
mechanisms.
-- No notion of underun/overrun. Since the bytes written are compressed
+- No notion of underrun/overrun. Since the bytes written are compressed
in nature and data written/read doesn't translate directly to
- rendered output in time, this does not deal with underrun/overun and
+ rendered output in time, this does not deal with underrun/overrun and
maybe dealt in user-library
Credits:
diff --git a/Documentation/sound/alsa/soc/DPCM.txt b/Documentation/sound/alsa/soc/DPCM.txt
new file mode 100644
index 000000000000..0110180b7ac6
--- /dev/null
+++ b/Documentation/sound/alsa/soc/DPCM.txt
@@ -0,0 +1,380 @@
+Dynamic PCM
+===========
+
+1. Description
+==============
+
+Dynamic PCM allows an ALSA PCM device to digitally route its PCM audio to
+various digital endpoints during the PCM stream runtime. e.g. PCM0 can route
+digital audio to I2S DAI0, I2S DAI1 or PDM DAI2. This is useful for on SoC DSP
+drivers that expose several ALSA PCMs and can route to multiple DAIs.
+
+The DPCM runtime routing is determined by the ALSA mixer settings in the same
+way as the analog signal is routed in an ASoC codec driver. DPCM uses a DAPM
+graph representing the DSP internal audio paths and uses the mixer settings to
+determine the patch used by each ALSA PCM.
+
+DPCM re-uses all the existing component codec, platform and DAI drivers without
+any modifications.
+
+
+Phone Audio System with SoC based DSP
+-------------------------------------
+
+Consider the following phone audio subsystem. This will be used in this
+document for all examples :-
+
+| Front End PCMs | SoC DSP | Back End DAIs | Audio devices |
+
+ *************
+PCM0 <------------> * * <----DAI0-----> Codec Headset
+ * *
+PCM1 <------------> * * <----DAI1-----> Codec Speakers
+ * DSP *
+PCM2 <------------> * * <----DAI2-----> MODEM
+ * *
+PCM3 <------------> * * <----DAI3-----> BT
+ * *
+ * * <----DAI4-----> DMIC
+ * *
+ * * <----DAI5-----> FM
+ *************
+
+This diagram shows a simple smart phone audio subsystem. It supports Bluetooth,
+FM digital radio, Speakers, Headset Jack, digital microphones and cellular
+modem. This sound card exposes 4 DSP front end (FE) ALSA PCM devices and
+supports 6 back end (BE) DAIs. Each FE PCM can digitally route audio data to any
+of the BE DAIs. The FE PCM devices can also route audio to more than 1 BE DAI.
+
+
+
+Example - DPCM Switching playback from DAI0 to DAI1
+---------------------------------------------------
+
+Audio is being played to the Headset. After a while the user removes the headset
+and audio continues playing on the speakers.
+
+Playback on PCM0 to Headset would look like :-
+
+ *************
+PCM0 <============> * * <====DAI0=====> Codec Headset
+ * *
+PCM1 <------------> * * <----DAI1-----> Codec Speakers
+ * DSP *
+PCM2 <------------> * * <----DAI2-----> MODEM
+ * *
+PCM3 <------------> * * <----DAI3-----> BT
+ * *
+ * * <----DAI4-----> DMIC
+ * *
+ * * <----DAI5-----> FM
+ *************
+
+The headset is removed from the jack by user so the speakers must now be used :-
+
+ *************
+PCM0 <============> * * <----DAI0-----> Codec Headset
+ * *
+PCM1 <------------> * * <====DAI1=====> Codec Speakers
+ * DSP *
+PCM2 <------------> * * <----DAI2-----> MODEM
+ * *
+PCM3 <------------> * * <----DAI3-----> BT
+ * *
+ * * <----DAI4-----> DMIC
+ * *
+ * * <----DAI5-----> FM
+ *************
+
+The audio driver processes this as follows :-
+
+ 1) Machine driver receives Jack removal event.
+
+ 2) Machine driver OR audio HAL disables the Headset path.
+
+ 3) DPCM runs the PCM trigger(stop), hw_free(), shutdown() operations on DAI0
+ for headset since the path is now disabled.
+
+ 4) Machine driver or audio HAL enables the speaker path.
+
+ 5) DPCM runs the PCM ops for startup(), hw_params(), prepapre() and
+ trigger(start) for DAI1 Speakers since the path is enabled.
+
+In this example, the machine driver or userspace audio HAL can alter the routing
+and then DPCM will take care of managing the DAI PCM operations to either bring
+the link up or down. Audio playback does not stop during this transition.
+
+
+
+DPCM machine driver
+===================
+
+The DPCM enabled ASoC machine driver is similar to normal machine drivers
+except that we also have to :-
+
+ 1) Define the FE and BE DAI links.
+
+ 2) Define any FE/BE PCM operations.
+
+ 3) Define widget graph connections.
+
+
+1 FE and BE DAI links
+---------------------
+
+| Front End PCMs | SoC DSP | Back End DAIs | Audio devices |
+
+ *************
+PCM0 <------------> * * <----DAI0-----> Codec Headset
+ * *
+PCM1 <------------> * * <----DAI1-----> Codec Speakers
+ * DSP *
+PCM2 <------------> * * <----DAI2-----> MODEM
+ * *
+PCM3 <------------> * * <----DAI3-----> BT
+ * *
+ * * <----DAI4-----> DMIC
+ * *
+ * * <----DAI5-----> FM
+ *************
+
+For the example above we have to define 4 FE DAI links and 6 BE DAI links. The
+FE DAI links are defined as follows :-
+
+static struct snd_soc_dai_link machine_dais[] = {
+ {
+ .name = "PCM0 System",
+ .stream_name = "System Playback",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "dsp-audio",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .dynamic = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ },
+ .....< other FE and BE DAI links here >
+};
+
+This FE DAI link is pretty similar to a regular DAI link except that we also
+set the DAI link to a DPCM FE with the "dynamic = 1". The supported FE stream
+directions should also be set with the "dpcm_playback" and "dpcm_capture"
+flags. There is also an option to specify the ordering of the trigger call for
+each FE. This allows the ASoC core to trigger the DSP before or after the other
+components (as some DSPs have strong requirements for the ordering DAI/DSP
+start and stop sequences).
+
+The FE DAI above sets the codec and code DAIs to dummy devices since the BE is
+dynamic and will change depending on runtime config.
+
+The BE DAIs are configured as follows :-
+
+static struct snd_soc_dai_link machine_dais[] = {
+ .....< FE DAI links here >
+ {
+ .name = "Codec Headset",
+ .cpu_dai_name = "ssp-dai.0",
+ .platform_name = "snd-soc-dummy",
+ .no_pcm = 1,
+ .codec_name = "rt5640.0-001c",
+ .codec_dai_name = "rt5640-aif1",
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = hswult_ssp0_fixup,
+ .ops = &haswell_ops,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+ .....< other BE DAI links here >
+};
+
+This BE DAI link connects DAI0 to the codec (in this case RT5460 AIF1). It sets
+the "no_pcm" flag to mark it has a BE and sets flags for supported stream
+directions using "dpcm_playback" and "dpcm_capture" above.
+
+The BE has also flags set for ignoring suspend and PM down time. This allows
+the BE to work in a hostless mode where the host CPU is not transferring data
+like a BT phone call :-
+
+ *************
+PCM0 <------------> * * <----DAI0-----> Codec Headset
+ * *
+PCM1 <------------> * * <----DAI1-----> Codec Speakers
+ * DSP *
+PCM2 <------------> * * <====DAI2=====> MODEM
+ * *
+PCM3 <------------> * * <====DAI3=====> BT
+ * *
+ * * <----DAI4-----> DMIC
+ * *
+ * * <----DAI5-----> FM
+ *************
+
+This allows the host CPU to sleep whilst the DSP, MODEM DAI and the BT DAI are
+still in operation.
+
+A BE DAI link can also set the codec to a dummy device if the code is a device
+that is managed externally.
+
+Likewise a BE DAI can also set a dummy cpu DAI if the CPU DAI is managed by the
+DSP firmware.
+
+
+2 FE/BE PCM operations
+----------------------
+
+The BE above also exports some PCM operations and a "fixup" callback. The fixup
+callback is used by the machine driver to (re)configure the DAI based upon the
+FE hw params. i.e. the DSP may perform SRC or ASRC from the FE to BE.
+
+e.g. DSP converts all FE hw params to run at fixed rate of 48k, 16bit, stereo for
+DAI0. This means all FE hw_params have to be fixed in the machine driver for
+DAI0 so that the DAI is running at desired configuration regardless of the FE
+configuration.
+
+static int dai0_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ /* The DSP will covert the FE rate to 48k, stereo */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+
+ /* set DAI0 to 16 bit */
+ snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
+ SNDRV_PCM_HW_PARAM_FIRST_MASK],
+ SNDRV_PCM_FORMAT_S16_LE);
+ return 0;
+}
+
+The other PCM operation are the same as for regular DAI links. Use as necessary.
+
+
+3 Widget graph connections
+--------------------------
+
+The BE DAI links will normally be connected to the graph at initialisation time
+by the ASoC DAPM core. However, if the BE codec or BE DAI is a dummy then this
+has to be set explicitly in the driver :-
+
+/* BE for codec Headset - DAI0 is dummy and managed by DSP FW */
+{"DAI0 CODEC IN", NULL, "AIF1 Capture"},
+{"AIF1 Playback", NULL, "DAI0 CODEC OUT"},
+
+
+Writing a DPCM DSP driver
+=========================
+
+The DPCM DSP driver looks much like a standard platform class ASoC driver
+combined with elements from a codec class driver. A DSP platform driver must
+implement :-
+
+ 1) Front End PCM DAIs - i.e. struct snd_soc_dai_driver.
+
+ 2) DAPM graph showing DSP audio routing from FE DAIs to BEs.
+
+ 3) DAPM widgets from DSP graph.
+
+ 4) Mixers for gains, routing, etc.
+
+ 5) DMA configuration.
+
+ 6) BE AIF widgets.
+
+Items 6 is important for routing the audio outside of the DSP. AIF need to be
+defined for each BE and each stream direction. e.g for BE DAI0 above we would
+have :-
+
+SND_SOC_DAPM_AIF_IN("DAI0 RX", NULL, 0, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_AIF_OUT("DAI0 TX", NULL, 0, SND_SOC_NOPM, 0, 0),
+
+The BE AIF are used to connect the DSP graph to the graphs for the other
+component drivers (e.g. codec graph).
+
+
+Hostless PCM streams
+====================
+
+A hostless PCM stream is a stream that is not routed through the host CPU. An
+example of this would be a phone call from handset to modem.
+
+
+ *************
+PCM0 <------------> * * <----DAI0-----> Codec Headset
+ * *
+PCM1 <------------> * * <====DAI1=====> Codec Speakers/Mic
+ * DSP *
+PCM2 <------------> * * <====DAI2=====> MODEM
+ * *
+PCM3 <------------> * * <----DAI3-----> BT
+ * *
+ * * <----DAI4-----> DMIC
+ * *
+ * * <----DAI5-----> FM
+ *************
+
+In this case the PCM data is routed via the DSP. The host CPU in this use case
+is only used for control and can sleep during the runtime of the stream.
+
+The host can control the hostless link either by :-
+
+ 1) Configuring the link as a CODEC <-> CODEC style link. In this case the link
+ is enabled or disabled by the state of the DAPM graph. This usually means
+ there is a mixer control that can be used to connect or disconnect the path
+ between both DAIs.
+
+ 2) Hostless FE. This FE has a virtual connection to the BE DAI links on the DAPM
+ graph. Control is then carried out by the FE as regular PCM operations.
+ This method gives more control over the DAI links, but requires much more
+ userspace code to control the link. Its recommended to use CODEC<->CODEC
+ unless your HW needs more fine grained sequencing of the PCM ops.
+
+
+CODEC <-> CODEC link
+--------------------
+
+This DAI link is enabled when DAPM detects a valid path within the DAPM graph.
+The machine driver sets some additional parameters to the DAI link i.e.
+
+static const struct snd_soc_pcm_stream dai_params = {
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .rate_min = 8000,
+ .rate_max = 8000,
+ .channels_min = 2,
+ .channels_max = 2,
+};
+
+static struct snd_soc_dai_link dais[] = {
+ < ... more DAI links above ... >
+ {
+ .name = "MODEM",
+ .stream_name = "MODEM",
+ .cpu_dai_name = "dai2",
+ .codec_dai_name = "modem-aif1",
+ .codec_name = "modem",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBM_CFM,
+ .params = &dai_params,
+ }
+ < ... more DAI links here ... >
+
+These parameters are used to configure the DAI hw_params() when DAPM detects a
+valid path and then calls the PCM operations to start the link. DAPM will also
+call the appropriate PCM operations to disable the DAI when the path is no
+longer valid.
+
+
+Hostless FE
+-----------
+
+The DAI link(s) are enabled by a FE that does not read or write any PCM data.
+This means creating a new FE that is connected with a virtual path to both
+DAI links. The DAI links will be started when the FE PCM is started and stopped
+when the FE PCM is stopped. Note that the FE PCM cannot read or write data in
+this configuration.
+
+
diff --git a/Documentation/sound/alsa/soc/codec.txt b/Documentation/sound/alsa/soc/codec.txt
index bce23a4a7875..db5f9c9ae149 100644
--- a/Documentation/sound/alsa/soc/codec.txt
+++ b/Documentation/sound/alsa/soc/codec.txt
@@ -1,22 +1,23 @@
-ASoC Codec Driver
-=================
+ASoC Codec Class Driver
+=======================
-The codec driver is generic and hardware independent code that configures the
-codec to provide audio capture and playback. It should contain no code that is
-specific to the target platform or machine. All platform and machine specific
-code should be added to the platform and machine drivers respectively.
+The codec class driver is generic and hardware independent code that configures
+the codec, FM, MODEM, BT or external DSP to provide audio capture and playback.
+It should contain no code that is specific to the target platform or machine.
+All platform and machine specific code should be added to the platform and
+machine drivers respectively.
-Each codec driver *must* provide the following features:-
+Each codec class driver *must* provide the following features:-
1) Codec DAI and PCM configuration
- 2) Codec control IO - using I2C, 3 Wire(SPI) or both APIs
+ 2) Codec control IO - using RegMap API
3) Mixers and audio controls
4) Codec audio operations
+ 5) DAPM description.
+ 6) DAPM event handler.
Optionally, codec drivers can also provide:-
- 5) DAPM description.
- 6) DAPM event handler.
7) DAC Digital mute control.
Its probably best to use this guide in conjunction with the existing codec
@@ -64,26 +65,9 @@ struct snd_soc_dai_driver wm8731_dai = {
2 - Codec control IO
--------------------
The codec can usually be controlled via an I2C or SPI style interface
-(AC97 combines control with data in the DAI). The codec drivers provide
-functions to read and write the codec registers along with supplying a
-register cache:-
-
- /* IO control data and register cache */
- void *control_data; /* codec control (i2c/3wire) data */
- void *reg_cache;
-
-Codec read/write should do any data formatting and call the hardware
-read write below to perform the IO. These functions are called by the
-core and ALSA when performing DAPM or changing the mixer:-
-
- unsigned int (*read)(struct snd_soc_codec *, unsigned int);
- int (*write)(struct snd_soc_codec *, unsigned int, unsigned int);
-
-Codec hardware IO functions - usually points to either the I2C, SPI or AC97
-read/write:-
-
- hw_write_t hw_write;
- hw_read_t hw_read;
+(AC97 combines control with data in the DAI). The codec driver should use the
+Regmap API for all codec IO. Please see include/linux/regmap.h and existing
+codec drivers for example regmap usage.
3 - Mixers and audio controls
@@ -127,7 +111,7 @@ Defines a stereo enumerated control
4 - Codec Audio Operations
--------------------------
-The codec driver also supports the following ALSA operations:-
+The codec driver also supports the following ALSA PCM operations:-
/* SoC audio ops */
struct snd_soc_ops {
diff --git a/Documentation/sound/alsa/soc/dapm.txt b/Documentation/sound/alsa/soc/dapm.txt
index 05bf5a0eee41..6faab4880006 100644
--- a/Documentation/sound/alsa/soc/dapm.txt
+++ b/Documentation/sound/alsa/soc/dapm.txt
@@ -21,7 +21,7 @@ level power systems.
There are 4 power domains within DAPM
- 1. Codec domain - VREF, VMID (core codec and audio power)
+ 1. Codec bias domain - VREF, VMID (core codec and audio power)
Usually controlled at codec probe/remove and suspend/resume, although
can be set at stream time if power is not needed for sidetone, etc.
@@ -30,7 +30,7 @@ There are 4 power domains within DAPM
machine driver and responds to asynchronous events e.g when HP
are inserted
- 3. Path domain - audio susbsystem signal paths
+ 3. Path domain - audio subsystem signal paths
Automatically set when mixer and mux settings are changed by the user.
e.g. alsamixer, amixer.
@@ -63,14 +63,22 @@ Audio DAPM widgets fall into a number of types:-
o Line - Line Input/Output (and optional Jack)
o Speaker - Speaker
o Supply - Power or clock supply widget used by other widgets.
+ o Regulator - External regulator that supplies power to audio components.
+ o Clock - External clock that supplies clock to audio components.
+ o AIF IN - Audio Interface Input (with TDM slot mask).
+ o AIF OUT - Audio Interface Output (with TDM slot mask).
+ o Siggen - Signal Generator.
+ o DAI IN - Digital Audio Interface Input.
+ o DAI OUT - Digital Audio Interface Output.
+ o DAI Link - DAI Link between two DAI structures */
o Pre - Special PRE widget (exec before all others)
o Post - Special POST widget (exec after all others)
(Widgets are defined in include/sound/soc-dapm.h)
-Widgets are usually added in the codec driver and the machine driver. There are
-convenience macros defined in soc-dapm.h that can be used to quickly build a
-list of widgets of the codecs and machines DAPM widgets.
+Widgets can be added to the sound card by any of the component driver types.
+There are convenience macros defined in soc-dapm.h that can be used to quickly
+build a list of widgets of the codecs and machines DAPM widgets.
Most widgets have a name, register, shift and invert. Some widgets have extra
parameters for stream name and kcontrols.
@@ -80,11 +88,13 @@ parameters for stream name and kcontrols.
-------------------------
Stream Widgets relate to the stream power domain and only consist of ADCs
-(analog to digital converters) and DACs (digital to analog converters).
+(analog to digital converters), DACs (digital to analog converters),
+AIF IN and AIF OUT.
Stream widgets have the following format:-
SND_SOC_DAPM_DAC(name, stream name, reg, shift, invert),
+SND_SOC_DAPM_AIF_IN(name, stream, slot, reg, shift, invert)
NOTE: the stream name must match the corresponding stream name in your codec
snd_soc_codec_dai.
@@ -94,6 +104,11 @@ e.g. stream widgets for HiFi playback and capture
SND_SOC_DAPM_DAC("HiFi DAC", "HiFi Playback", REG, 3, 1),
SND_SOC_DAPM_ADC("HiFi ADC", "HiFi Capture", REG, 2, 1),
+e.g. stream widgets for AIF
+
+SND_SOC_DAPM_AIF_IN("AIF1RX", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
+
2.2 Path Domain Widgets
-----------------------
@@ -121,12 +136,14 @@ If you dont want the mixer elements prefixed with the name of the mixer widget,
you can use SND_SOC_DAPM_MIXER_NAMED_CTL instead. the parameters are the same
as for SND_SOC_DAPM_MIXER.
-2.3 Platform/Machine domain Widgets
------------------------------------
+
+2.3 Machine domain Widgets
+--------------------------
Machine widgets are different from codec widgets in that they don't have a
codec register bit associated with them. A machine widget is assigned to each
-machine audio component (non codec) that can be independently powered. e.g.
+machine audio component (non codec or DSP) that can be independently
+powered. e.g.
o Speaker Amp
o Microphone Bias
@@ -146,12 +163,12 @@ static int spitz_mic_bias(struct snd_soc_dapm_widget* w, int event)
SND_SOC_DAPM_MIC("Mic Jack", spitz_mic_bias),
-2.4 Codec Domain
-----------------
+2.4 Codec (BIAS) Domain
+-----------------------
-The codec power domain has no widgets and is handled by the codecs DAPM event
-handler. This handler is called when the codec powerstate is changed wrt to any
-stream event or by kernel PM events.
+The codec bias power domain has no widgets and is handled by the codecs DAPM
+event handler. This handler is called when the codec powerstate is changed wrt
+to any stream event or by kernel PM events.
2.5 Virtual Widgets
@@ -169,15 +186,16 @@ After all the widgets have been defined, they can then be added to the DAPM
subsystem individually with a call to snd_soc_dapm_new_control().
-3. Codec Widget Interconnections
-================================
+3. Codec/DSP Widget Interconnections
+====================================
-Widgets are connected to each other within the codec and machine by audio paths
-(called interconnections). Each interconnection must be defined in order to
-create a map of all audio paths between widgets.
+Widgets are connected to each other within the codec, platform and machine by
+audio paths (called interconnections). Each interconnection must be defined in
+order to create a map of all audio paths between widgets.
-This is easiest with a diagram of the codec (and schematic of the machine audio
-system), as it requires joining widgets together via their audio signal paths.
+This is easiest with a diagram of the codec or DSP (and schematic of the machine
+audio system), as it requires joining widgets together via their audio signal
+paths.
e.g., from the WM8731 output mixer (wm8731.c)
@@ -247,16 +265,9 @@ machine and includes the codec. e.g.
o Mic Jack
o Codec Pins
-When a codec pin is NC it can be marked as not used with a call to
-
-snd_soc_dapm_set_endpoint(codec, "Widget Name", 0);
-
-The last argument is 0 for inactive and 1 for active. This way the pin and its
-input widget will never be powered up and consume power.
-
-This also applies to machine widgets. e.g. if a headphone is connected to a
-jack then the jack can be marked active. If the headphone is removed, then
-the headphone jack can be marked inactive.
+Endpoints are added to the DAPM graph so that their usage can be determined in
+order to save power. e.g. NC codecs pins will be switched OFF, unconnected
+jacks can also be switched OFF.
5 DAPM Widget Events
diff --git a/Documentation/sound/alsa/soc/machine.txt b/Documentation/sound/alsa/soc/machine.txt
index d50c14df3411..74056dba52be 100644
--- a/Documentation/sound/alsa/soc/machine.txt
+++ b/Documentation/sound/alsa/soc/machine.txt
@@ -1,8 +1,10 @@
ASoC Machine Driver
===================
-The ASoC machine (or board) driver is the code that glues together the platform
-and codec drivers.
+The ASoC machine (or board) driver is the code that glues together all the
+component drivers (e.g. codecs, platforms and DAIs). It also describes the
+relationships between each componnent which include audio paths, GPIOs,
+interrupts, clocking, jacks and voltage regulators.
The machine driver can contain codec and platform specific code. It registers
the audio subsystem with the kernel as a platform device and is represented by
diff --git a/Documentation/sound/alsa/soc/platform.txt b/Documentation/sound/alsa/soc/platform.txt
index d57efad37e0a..3a08a2c9150c 100644
--- a/Documentation/sound/alsa/soc/platform.txt
+++ b/Documentation/sound/alsa/soc/platform.txt
@@ -1,9 +1,9 @@
ASoC Platform Driver
====================
-An ASoC platform driver can be divided into audio DMA and SoC DAI configuration
-and control. The platform drivers only target the SoC CPU and must have no board
-specific code.
+An ASoC platform driver class can be divided into audio DMA drivers, SoC DAI
+drivers and DSP drivers. The platform drivers only target the SoC CPU and must
+have no board specific code.
Audio DMA
=========
@@ -64,3 +64,16 @@ Each SoC DAI driver must provide the following features:-
5) Suspend and resume (optional)
Please see codec.txt for a description of items 1 - 4.
+
+
+SoC DSP Drivers
+===============
+
+Each SoC DSP driver usually supplies the following features :-
+
+ 1) DAPM graph
+ 2) Mixer controls
+ 3) DMA IO to/from DSP buffers (if applicable)
+ 4) Definition of DSP front end (FE) PCM devices.
+
+Please see DPCM.txt for a description of item 4.
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 9d4c1d18ad44..4273b2d71a27 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -355,6 +355,82 @@ utilize.
==============================================================
+numa_balancing
+
+Enables/disables automatic page fault based NUMA memory
+balancing. Memory is moved automatically to nodes
+that access it often.
+
+Enables/disables automatic NUMA memory balancing. On NUMA machines, there
+is a performance penalty if remote memory is accessed by a CPU. When this
+feature is enabled the kernel samples what task thread is accessing memory
+by periodically unmapping pages and later trapping a page fault. At the
+time of the page fault, it is determined if the data being accessed should
+be migrated to a local memory node.
+
+The unmapping of pages and trapping faults incur additional overhead that
+ideally is offset by improved memory locality but there is no universal
+guarantee. If the target workload is already bound to NUMA nodes then this
+feature should be disabled. Otherwise, if the system overhead from the
+feature is too high then the rate the kernel samples for NUMA hinting
+faults may be controlled by the numa_balancing_scan_period_min_ms,
+numa_balancing_scan_delay_ms, numa_balancing_scan_period_max_ms,
+numa_balancing_scan_size_mb, numa_balancing_settle_count sysctls and
+numa_balancing_migrate_deferred.
+
+==============================================================
+
+numa_balancing_scan_period_min_ms, numa_balancing_scan_delay_ms,
+numa_balancing_scan_period_max_ms, numa_balancing_scan_size_mb
+
+Automatic NUMA balancing scans tasks address space and unmaps pages to
+detect if pages are properly placed or if the data should be migrated to a
+memory node local to where the task is running. Every "scan delay" the task
+scans the next "scan size" number of pages in its address space. When the
+end of the address space is reached the scanner restarts from the beginning.
+
+In combination, the "scan delay" and "scan size" determine the scan rate.
+When "scan delay" decreases, the scan rate increases. The scan delay and
+hence the scan rate of every task is adaptive and depends on historical
+behaviour. If pages are properly placed then the scan delay increases,
+otherwise the scan delay decreases. The "scan size" is not adaptive but
+the higher the "scan size", the higher the scan rate.
+
+Higher scan rates incur higher system overhead as page faults must be
+trapped and potentially data must be migrated. However, the higher the scan
+rate, the more quickly a tasks memory is migrated to a local node if the
+workload pattern changes and minimises performance impact due to remote
+memory accesses. These sysctls control the thresholds for scan delays and
+the number of pages scanned.
+
+numa_balancing_scan_period_min_ms is the minimum time in milliseconds to
+scan a tasks virtual memory. It effectively controls the maximum scanning
+rate for each task.
+
+numa_balancing_scan_delay_ms is the starting "scan delay" used for a task
+when it initially forks.
+
+numa_balancing_scan_period_max_ms is the maximum time in milliseconds to
+scan a tasks virtual memory. It effectively controls the minimum scanning
+rate for each task.
+
+numa_balancing_scan_size_mb is how many megabytes worth of pages are
+scanned for a given scan.
+
+numa_balancing_settle_count is how many scan periods must complete before
+the schedule balancer stops pushing the task towards a preferred node. This
+gives the scheduler a chance to place the task on an alternative node if the
+preferred node is overloaded.
+
+numa_balancing_migrate_deferred is how many page migrations get skipped
+unconditionally, after a page migration is skipped because a page is shared
+with other tasks. This reduces page migration overhead, and determines
+how much stronger the "move task near its memory" policy scheduler becomes,
+versus the "move memory near its task" memory management policy, for workloads
+with shared memory.
+
+==============================================================
+
osrelease, ostype & version:
# cat osrelease
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
index 8cb4d7842a5f..0e307c94809a 100644
--- a/Documentation/sysrq.txt
+++ b/Documentation/sysrq.txt
@@ -11,27 +11,29 @@ regardless of whatever else it is doing, unless it is completely locked up.
You need to say "yes" to 'Magic SysRq key (CONFIG_MAGIC_SYSRQ)' when
configuring the kernel. When running a kernel with SysRq compiled in,
/proc/sys/kernel/sysrq controls the functions allowed to be invoked via
-the SysRq key. By default the file contains 1 which means that every
-possible SysRq request is allowed (in older versions SysRq was disabled
-by default, and you were required to specifically enable it at run-time
-but this is not the case any more). Here is the list of possible values
-in /proc/sys/kernel/sysrq:
+the SysRq key. The default value in this file is set by the
+CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE config symbol, which itself defaults
+to 1. Here is the list of possible values in /proc/sys/kernel/sysrq:
0 - disable sysrq completely
1 - enable all functions of sysrq
>1 - bitmask of allowed sysrq functions (see below for detailed function
description):
- 2 - enable control of console logging level
- 4 - enable control of keyboard (SAK, unraw)
- 8 - enable debugging dumps of processes etc.
- 16 - enable sync command
- 32 - enable remount read-only
- 64 - enable signalling of processes (term, kill, oom-kill)
- 128 - allow reboot/poweroff
- 256 - allow nicing of all RT tasks
+ 2 = 0x2 - enable control of console logging level
+ 4 = 0x4 - enable control of keyboard (SAK, unraw)
+ 8 = 0x8 - enable debugging dumps of processes etc.
+ 16 = 0x10 - enable sync command
+ 32 = 0x20 - enable remount read-only
+ 64 = 0x40 - enable signalling of processes (term, kill, oom-kill)
+ 128 = 0x80 - allow reboot/poweroff
+ 256 = 0x100 - allow nicing of all RT tasks
You can set the value in the file by the following command:
echo "number" >/proc/sys/kernel/sysrq
+The number may be written here either as decimal or as hexadecimal
+with the 0x prefix. CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE must always be
+written in hexadecimal.
+
Note that the value of /proc/sys/kernel/sysrq influences only the invocation
via a keyboard. Invocation of any operation via /proc/sysrq-trigger is always
allowed (by a user with admin privileges).
diff --git a/Documentation/timers/00-INDEX b/Documentation/timers/00-INDEX
index a9248da5cdbc..ef2ccbf77fa2 100644
--- a/Documentation/timers/00-INDEX
+++ b/Documentation/timers/00-INDEX
@@ -8,5 +8,9 @@ hpet_example.c
- sample hpet timer test program
hrtimers.txt
- subsystem for high-resolution kernel timers
+NO_HZ.txt
+ - Summary of the different methods for the scheduler clock-interrupts management.
+timers-howto.txt
+ - how to insert delays in the kernel the right (tm) way.
timer_stats.txt
- timer usage statistics
diff --git a/Documentation/usb/gadget_configfs.txt b/Documentation/usb/gadget_configfs.txt
index 8ec2a67c39b7..4cf53e406613 100644
--- a/Documentation/usb/gadget_configfs.txt
+++ b/Documentation/usb/gadget_configfs.txt
@@ -26,7 +26,7 @@ Linux provides a number of functions for gadgets to use.
Creating a gadget means deciding what configurations there will be
and which functions each configuration will provide.
-Configfs (please see Documentation/filesystems/configfs/*) lends itslef nicely
+Configfs (please see Documentation/filesystems/configfs/*) lends itself nicely
for the purpose of telling the kernel about the above mentioned decision.
This document is about how to do it.
@@ -99,7 +99,7 @@ directories must be created:
$ mkdir configs/<name>.<number>
where <name> can be any string which is legal in a filesystem and the
-<numebr> is the configuration's number, e.g.:
+<number> is the configuration's number, e.g.:
$ mkdir configs/c.1
@@ -327,7 +327,7 @@ from the buffer to the cs), but it is up to the implementer of the
two functions to decide what they actually do.
typedef struct configured_structure cs;
-typedef struc specific_attribute sa;
+typedef struct specific_attribute sa;
sa
+----------------------------------+
diff --git a/Documentation/virtual/kvm/00-INDEX b/Documentation/virtual/kvm/00-INDEX
new file mode 100644
index 000000000000..641ec9220179
--- /dev/null
+++ b/Documentation/virtual/kvm/00-INDEX
@@ -0,0 +1,24 @@
+00-INDEX
+ - this file.
+api.txt
+ - KVM userspace API.
+cpuid.txt
+ - KVM-specific cpuid leaves (x86).
+devices/
+ - KVM_CAP_DEVICE_CTRL userspace API.
+hypercalls.txt
+ - KVM hypercalls.
+locking.txt
+ - notes on KVM locks.
+mmu.txt
+ - the x86 kvm shadow mmu.
+msr.txt
+ - KVM-specific MSRs (x86).
+nested-vmx.txt
+ - notes on nested virtualization for Intel x86 processors.
+ppc-pv.txt
+ - the paravirtualization interface on PowerPC.
+review-checklist.txt
+ - review checklist for KVM patches.
+timekeeping.txt
+ - timekeeping virtualization for x86-based architectures.
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 858aecf21db2..a30035dd4c26 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1122,9 +1122,9 @@ struct kvm_cpuid2 {
struct kvm_cpuid_entry2 entries[0];
};
-#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX 1
-#define KVM_CPUID_FLAG_STATEFUL_FUNC 2
-#define KVM_CPUID_FLAG_STATE_READ_NEXT 4
+#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX BIT(0)
+#define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1)
+#define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2)
struct kvm_cpuid_entry2 {
__u32 function;
@@ -1810,6 +1810,50 @@ registers, find a list below:
PPC | KVM_REG_PPC_TLB3PS | 32
PPC | KVM_REG_PPC_EPTCFG | 32
PPC | KVM_REG_PPC_ICP_STATE | 64
+ PPC | KVM_REG_PPC_TB_OFFSET | 64
+ PPC | KVM_REG_PPC_SPMC1 | 32
+ PPC | KVM_REG_PPC_SPMC2 | 32
+ PPC | KVM_REG_PPC_IAMR | 64
+ PPC | KVM_REG_PPC_TFHAR | 64
+ PPC | KVM_REG_PPC_TFIAR | 64
+ PPC | KVM_REG_PPC_TEXASR | 64
+ PPC | KVM_REG_PPC_FSCR | 64
+ PPC | KVM_REG_PPC_PSPB | 32
+ PPC | KVM_REG_PPC_EBBHR | 64
+ PPC | KVM_REG_PPC_EBBRR | 64
+ PPC | KVM_REG_PPC_BESCR | 64
+ PPC | KVM_REG_PPC_TAR | 64
+ PPC | KVM_REG_PPC_DPDES | 64
+ PPC | KVM_REG_PPC_DAWR | 64
+ PPC | KVM_REG_PPC_DAWRX | 64
+ PPC | KVM_REG_PPC_CIABR | 64
+ PPC | KVM_REG_PPC_IC | 64
+ PPC | KVM_REG_PPC_VTB | 64
+ PPC | KVM_REG_PPC_CSIGR | 64
+ PPC | KVM_REG_PPC_TACR | 64
+ PPC | KVM_REG_PPC_TCSCR | 64
+ PPC | KVM_REG_PPC_PID | 64
+ PPC | KVM_REG_PPC_ACOP | 64
+ PPC | KVM_REG_PPC_VRSAVE | 32
+ PPC | KVM_REG_PPC_LPCR | 64
+ PPC | KVM_REG_PPC_PPR | 64
+ PPC | KVM_REG_PPC_ARCH_COMPAT 32
+ PPC | KVM_REG_PPC_TM_GPR0 | 64
+ ...
+ PPC | KVM_REG_PPC_TM_GPR31 | 64
+ PPC | KVM_REG_PPC_TM_VSR0 | 128
+ ...
+ PPC | KVM_REG_PPC_TM_VSR63 | 128
+ PPC | KVM_REG_PPC_TM_CR | 64
+ PPC | KVM_REG_PPC_TM_LR | 64
+ PPC | KVM_REG_PPC_TM_CTR | 64
+ PPC | KVM_REG_PPC_TM_FPSCR | 64
+ PPC | KVM_REG_PPC_TM_AMR | 64
+ PPC | KVM_REG_PPC_TM_PPR | 64
+ PPC | KVM_REG_PPC_TM_VRSAVE | 64
+ PPC | KVM_REG_PPC_TM_VSCR | 32
+ PPC | KVM_REG_PPC_TM_DSCR | 64
+ PPC | KVM_REG_PPC_TM_TAR | 64
ARM registers are mapped using the lower 32 bits. The upper 16 of that
is the register group type, or coprocessor number:
@@ -2304,7 +2348,31 @@ Possible features:
Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
-4.83 KVM_GET_REG_LIST
+4.83 KVM_ARM_PREFERRED_TARGET
+
+Capability: basic
+Architectures: arm, arm64
+Type: vm ioctl
+Parameters: struct struct kvm_vcpu_init (out)
+Returns: 0 on success; -1 on error
+Errors:
+ ENODEV: no preferred target available for the host
+
+This queries KVM for preferred CPU target type which can be emulated
+by KVM on underlying host.
+
+The ioctl returns struct kvm_vcpu_init instance containing information
+about preferred CPU target type and recommended features for it. The
+kvm_vcpu_init->features bitmap returned will have feature bits set if
+the preferred target recommends setting these features, but this is
+not mandatory.
+
+The information returned by this ioctl can be used to prepare an instance
+of struct kvm_vcpu_init for KVM_ARM_VCPU_INIT ioctl which will result in
+in VCPU matching underlying host.
+
+
+4.84 KVM_GET_REG_LIST
Capability: basic
Architectures: arm, arm64
@@ -2323,8 +2391,7 @@ struct kvm_reg_list {
This ioctl returns the guest registers that are supported for the
KVM_GET_ONE_REG/KVM_SET_ONE_REG calls.
-
-4.84 KVM_ARM_SET_DEVICE_ADDR
+4.85 KVM_ARM_SET_DEVICE_ADDR
Capability: KVM_CAP_ARM_SET_DEVICE_ADDR
Architectures: arm, arm64
@@ -2362,7 +2429,7 @@ must be called after calling KVM_CREATE_IRQCHIP, but before calling
KVM_RUN on any of the VCPUs. Calling this ioctl twice for any of the
base addresses will return -EEXIST.
-4.85 KVM_PPC_RTAS_DEFINE_TOKEN
+4.86 KVM_PPC_RTAS_DEFINE_TOKEN
Capability: KVM_CAP_PPC_RTAS
Architectures: ppc
@@ -2661,6 +2728,77 @@ and usually define the validity of a groups of registers. (e.g. one bit
};
+4.81 KVM_GET_EMULATED_CPUID
+
+Capability: KVM_CAP_EXT_EMUL_CPUID
+Architectures: x86
+Type: system ioctl
+Parameters: struct kvm_cpuid2 (in/out)
+Returns: 0 on success, -1 on error
+
+struct kvm_cpuid2 {
+ __u32 nent;
+ __u32 flags;
+ struct kvm_cpuid_entry2 entries[0];
+};
+
+The member 'flags' is used for passing flags from userspace.
+
+#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX BIT(0)
+#define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1)
+#define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2)
+
+struct kvm_cpuid_entry2 {
+ __u32 function;
+ __u32 index;
+ __u32 flags;
+ __u32 eax;
+ __u32 ebx;
+ __u32 ecx;
+ __u32 edx;
+ __u32 padding[3];
+};
+
+This ioctl returns x86 cpuid features which are emulated by
+kvm.Userspace can use the information returned by this ioctl to query
+which features are emulated by kvm instead of being present natively.
+
+Userspace invokes KVM_GET_EMULATED_CPUID by passing a kvm_cpuid2
+structure with the 'nent' field indicating the number of entries in
+the variable-size array 'entries'. If the number of entries is too low
+to describe the cpu capabilities, an error (E2BIG) is returned. If the
+number is too high, the 'nent' field is adjusted and an error (ENOMEM)
+is returned. If the number is just right, the 'nent' field is adjusted
+to the number of valid entries in the 'entries' array, which is then
+filled.
+
+The entries returned are the set CPUID bits of the respective features
+which kvm emulates, as returned by the CPUID instruction, with unknown
+or unsupported feature bits cleared.
+
+Features like x2apic, for example, may not be present in the host cpu
+but are exposed by kvm in KVM_GET_SUPPORTED_CPUID because they can be
+emulated efficiently and thus not included here.
+
+The fields in each entry are defined as follows:
+
+ function: the eax value used to obtain the entry
+ index: the ecx value used to obtain the entry (for entries that are
+ affected by ecx)
+ flags: an OR of zero or more of the following:
+ KVM_CPUID_FLAG_SIGNIFCANT_INDEX:
+ if the index field is valid
+ KVM_CPUID_FLAG_STATEFUL_FUNC:
+ if cpuid for this function returns different values for successive
+ invocations; there will be several entries with the same function,
+ all with this flag set
+ KVM_CPUID_FLAG_STATE_READ_NEXT:
+ for KVM_CPUID_FLAG_STATEFUL_FUNC entries, set if this entry is
+ the first entry to be read by a cpu
+ eax, ebx, ecx, edx: the values returned by the cpuid instruction for
+ this function/index combination
+
+
6. Capabilities that can be enabled
-----------------------------------
diff --git a/Documentation/virtual/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt
index 22ff659bc0fb..3c65feb83010 100644
--- a/Documentation/virtual/kvm/cpuid.txt
+++ b/Documentation/virtual/kvm/cpuid.txt
@@ -43,6 +43,13 @@ KVM_FEATURE_CLOCKSOURCE2 || 3 || kvmclock available at msrs
KVM_FEATURE_ASYNC_PF || 4 || async pf can be enabled by
|| || writing to msr 0x4b564d02
------------------------------------------------------------------------------
+KVM_FEATURE_STEAL_TIME || 5 || steal time can be enabled by
+ || || writing to msr 0x4b564d03.
+------------------------------------------------------------------------------
+KVM_FEATURE_PV_EOI || 6 || paravirtualized end of interrupt
+ || || handler can be enabled by writing
+ || || to msr 0x4b564d04.
+------------------------------------------------------------------------------
KVM_FEATURE_PV_UNHALT || 7 || guest checks this feature bit
|| || before enabling paravirtualized
|| || spinlock support.
diff --git a/Documentation/virtual/kvm/devices/vfio.txt b/Documentation/virtual/kvm/devices/vfio.txt
new file mode 100644
index 000000000000..ef51740c67ca
--- /dev/null
+++ b/Documentation/virtual/kvm/devices/vfio.txt
@@ -0,0 +1,22 @@
+VFIO virtual device
+===================
+
+Device types supported:
+ KVM_DEV_TYPE_VFIO
+
+Only one VFIO instance may be created per VM. The created device
+tracks VFIO groups in use by the VM and features of those groups
+important to the correctness and acceleration of the VM. As groups
+are enabled and disabled for use by the VM, KVM should be updated
+about their presence. When registered with KVM, a reference to the
+VFIO-group is held by KVM.
+
+Groups:
+ KVM_DEV_VFIO_GROUP
+
+KVM_DEV_VFIO_GROUP attributes:
+ KVM_DEV_VFIO_GROUP_ADD: Add a VFIO group to VFIO-KVM device tracking
+ KVM_DEV_VFIO_GROUP_DEL: Remove a VFIO group from VFIO-KVM device tracking
+
+For each, kvm_device_attr.addr points to an int32_t file descriptor
+for the VFIO group.
diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virtual/kvm/locking.txt
index 41b7ac9884b5..f8869410d40c 100644
--- a/Documentation/virtual/kvm/locking.txt
+++ b/Documentation/virtual/kvm/locking.txt
@@ -132,10 +132,14 @@ See the comments in spte_has_volatile_bits() and mmu_spte_update().
------------
Name: kvm_lock
-Type: raw_spinlock
+Type: spinlock_t
Arch: any
Protects: - vm_list
- - hardware virtualization enable/disable
+
+Name: kvm_count_lock
+Type: raw_spinlock_t
+Arch: any
+Protects: - hardware virtualization enable/disable
Comment: 'raw' because hardware enabling/disabling must be atomic /wrt
migration.
@@ -151,3 +155,14 @@ Type: spinlock_t
Arch: any
Protects: -shadow page/shadow tlb entry
Comment: it is a spinlock since it is used in mmu notifier.
+
+Name: kvm->srcu
+Type: srcu lock
+Arch: any
+Protects: - kvm->memslots
+ - kvm->buses
+Comment: The srcu read lock must be held while accessing memslots (e.g.
+ when using gfn_to_* functions) and while accessing in-kernel
+ MMIO/PIO address->device structure mapping (kvm->buses).
+ The srcu index can be stored in kvm_vcpu->srcu_idx per vcpu
+ if it is needed by multiple functions.
diff --git a/Documentation/vm/00-INDEX b/Documentation/vm/00-INDEX
index 5481c8ba3412..a39d06680e1c 100644
--- a/Documentation/vm/00-INDEX
+++ b/Documentation/vm/00-INDEX
@@ -4,10 +4,12 @@ active_mm.txt
- An explanation from Linus about tsk->active_mm vs tsk->mm.
balance
- various information on memory balancing.
-hugepage-mmap.c
- - Example app using huge page memory with the mmap system call.
-hugepage-shm.c
- - Example app using huge page memory with Sys V shared memory system calls.
+cleancache.txt
+ - Intro to cleancache and page-granularity victim cache.
+frontswap.txt
+ - Outline frontswap, part of the transcendent memory frontend.
+highmem.txt
+ - Outline of highmem and common issues.
hugetlbpage.txt
- a brief summary of hugetlbpage support in the Linux kernel.
hwpoison.txt
@@ -16,21 +18,23 @@ ksm.txt
- how to use the Kernel Samepage Merging feature.
locking
- info on how locking and synchronization is done in the Linux vm code.
-map_hugetlb.c
- - an example program that uses the MAP_HUGETLB mmap flag.
numa
- information about NUMA specific code in the Linux vm.
numa_memory_policy.txt
- documentation of concepts and APIs of the 2.6 memory policy support.
overcommit-accounting
- description of the Linux kernels overcommit handling modes.
-page-types.c
- - Tool for querying page flags
page_migration
- description of page migration in NUMA systems.
pagemap.txt
- pagemap, from the userspace perspective
slub.txt
- a short users guide for SLUB.
+soft-dirty.txt
+ - short explanation for soft-dirty PTEs
+transhuge.txt
+ - Transparent Hugepage Support, alternative way of using hugepages.
unevictable-lru.txt
- Unevictable LRU infrastructure
+zswap.txt
+ - Intro to compressed cache for swap pages
diff --git a/MAINTAINERS b/MAINTAINERS
index c75199cdd052..b527cca00259 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -253,6 +253,20 @@ F: drivers/pci/*acpi*
F: drivers/pci/*/*acpi*
F: drivers/pci/*/*/*acpi*
+ACPI COMPONENT ARCHITECTURE (ACPICA)
+M: Robert Moore <robert.moore@intel.com>
+M: Lv Zheng <lv.zheng@intel.com>
+M: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+L: linux-acpi@vger.kernel.org
+L: devel@acpica.org
+W: https://acpica.org/
+W: https://github.com/acpica/acpica/
+Q: https://patchwork.kernel.org/project/linux-acpi/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
+S: Supported
+F: drivers/acpi/acpica/
+F: include/acpi/
+
ACPI FAN DRIVER
M: Zhang Rui <rui.zhang@intel.com>
L: linux-acpi@vger.kernel.org
@@ -763,6 +777,10 @@ W: http://maxim.org.za/at91_26.html
W: http://www.linux4sam.org
S: Supported
F: arch/arm/mach-at91/
+F: arch/arm/boot/dts/at91*.dts
+F: arch/arm/boot/dts/at91*.dtsi
+F: arch/arm/boot/dts/sama*.dts
+F: arch/arm/boot/dts/sama*.dtsi
ARM/CALXEDA HIGHBANK ARCHITECTURE
M: Rob Herring <rob.herring@calxeda.com>
@@ -929,7 +947,7 @@ M: Javier Martinez Canillas <javier@dowhile0.org>
L: linux-omap@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-F: arch/arm/mach-omap2/board-igep0020.c
+F: arch/arm/boot/dts/omap3-igep*
ARM/INCOME PXA270 SUPPORT
M: Marek Vasut <marek.vasut@gmail.com>
@@ -1009,6 +1027,7 @@ ARM/Marvell Armada 370 and Armada XP SOC support
M: Jason Cooper <jason@lakedaemon.net>
M: Andrew Lunn <andrew@lunn.ch>
M: Gregory Clement <gregory.clement@free-electrons.com>
+M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-mvebu/
@@ -1016,6 +1035,7 @@ F: arch/arm/mach-mvebu/
ARM/Marvell Dove/Kirkwood/MV78xx0/Orion SOC support
M: Jason Cooper <jason@lakedaemon.net>
M: Andrew Lunn <andrew@lunn.ch>
+M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-dove/
@@ -1148,10 +1168,12 @@ F: drivers/net/ethernet/i825xx/ether1*
F: drivers/net/ethernet/seeq/ether3*
F: drivers/scsi/arm/
-ARM/SHARK MACHINE SUPPORT
-M: Alexander Schulz <alex@shark-linux.de>
-W: http://www.shark-linux.de/shark.html
+ARM/Rockchip SoC support
+M: Heiko Stuebner <heiko@sntech.de>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+F: arch/arm/mach-rockchip/
+F: drivers/*/*rockchip*
ARM/SAMSUNG ARM ARCHITECTURES
M: Ben Dooks <ben-linux@fluff.org>
@@ -1160,6 +1182,8 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/
S: Maintained
+F: arch/arm/boot/dts/s3c*
+F: arch/arm/boot/dts/exynos*
F: arch/arm/plat-samsung/
F: arch/arm/mach-s3c24*/
F: arch/arm/mach-s3c64xx/
@@ -1402,7 +1426,7 @@ M: Wolfram Sang <wsa@the-dreams.de>
L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/misc/eeprom/at24.c
-F: include/linux/i2c/at24.h
+F: include/linux/platform_data/at24.h
ATA OVER ETHERNET (AOE) DRIVER
M: "Ed L. Cashin" <ecashin@coraid.com>
@@ -1658,9 +1682,9 @@ F: drivers/video/backlight/
F: include/linux/backlight.h
BATMAN ADVANCED
-M: Marek Lindner <lindner_marek@yahoo.de>
-M: Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
-M: Antonio Quartulli <ordex@autistici.org>
+M: Marek Lindner <mareklindner@neomailbox.ch>
+M: Simon Wunderlich <sw@simonwunderlich.de>
+M: Antonio Quartulli <antonio@meshcoding.com>
L: b.a.t.m.a.n@lists.open-mesh.org
W: http://www.open-mesh.org/
S: Maintained
@@ -1791,6 +1815,7 @@ F: include/net/bluetooth/
BONDING DRIVER
M: Jay Vosburgh <fubar@us.ibm.com>
+M: Veaceslav Falico <vfalico@redhat.com>
M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org
W: http://sourceforge.net/projects/bonding/
@@ -2718,6 +2743,8 @@ T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
M: Vinod Koul <vinod.koul@intel.com>
M: Dan Williams <dan.j.williams@intel.com>
+L: dmaengine@vger.kernel.org
+Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
S: Supported
F: drivers/dma/
F: include/linux/dma*
@@ -2821,8 +2848,10 @@ M: Terje Bergström <tbergstrom@nvidia.com>
L: dri-devel@lists.freedesktop.org
L: linux-tegra@vger.kernel.org
T: git git://anongit.freedesktop.org/tegra/linux.git
-S: Maintained
+S: Supported
+F: drivers/gpu/drm/tegra/
F: drivers/gpu/host1x/
+F: include/linux/host1x.h
F: include/uapi/drm/tegra_drm.h
F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
@@ -3035,6 +3064,14 @@ W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/amd64_edac*
+EDAC-CALXEDA
+M: Doug Thompson <dougthompson@xmission.com>
+M: Robert Richter <rric@kernel.org>
+L: linux-edac@vger.kernel.org
+W: bluesmoke.sourceforge.net
+S: Maintained
+F: drivers/edac/highbank*
+
EDAC-CAVIUM
M: Ralf Baechle <ralf@linux-mips.org>
M: David Daney <david.daney@cavium.com>
@@ -4365,7 +4402,10 @@ F: arch/x86/kernel/microcode_intel.c
INTEL I/OAT DMA DRIVER
M: Dan Williams <dan.j.williams@intel.com>
-S: Maintained
+M: Dave Jiang <dave.jiang@intel.com>
+L: dmaengine@vger.kernel.org
+Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
+S: Supported
F: drivers/dma/ioat*
INTEL IOMMU (VT-d)
@@ -4763,9 +4803,10 @@ F: Documentation/hwmon/k8temp
F: drivers/hwmon/k8temp.c
KCONFIG
-M: Michal Marek <mmarek@suse.cz>
+M: "Yann E. MORIN" <yann.morin.1998@free.fr>
L: linux-kbuild@vger.kernel.org
-S: Odd Fixes
+T: git://gitorious.org/linux-kconfig/linux-kconfig
+S: Maintained
F: Documentation/kbuild/kconfig-language.txt
F: scripts/kconfig/
@@ -4828,7 +4869,8 @@ KERNEL VIRTUAL MACHINE (KVM)
M: Gleb Natapov <gleb@redhat.com>
M: Paolo Bonzini <pbonzini@redhat.com>
L: kvm@vger.kernel.org
-W: http://linux-kvm.org
+W: http://www.linux-kvm.org
+T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
S: Supported
F: Documentation/*/kvm*.txt
F: Documentation/virtual/kvm/
@@ -6102,6 +6144,12 @@ L: linux-omap@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-omap.c
+OMAP/NEWFLOW NANOBONE MACHINE SUPPORT
+M: Mark Jackson <mpfj@newflow.co.uk>
+L: linux-omap@vger.kernel.org
+S: Maintained
+F: arch/arm/boot/dts/am335x-nano.dts
+
OMFS FILESYSTEM
M: Bob Copeland <me@bobcopeland.com>
L: linux-karma-devel@lists.sourceforge.net
@@ -6378,6 +6426,7 @@ S: Supported
F: Documentation/PCI/
F: drivers/pci/
F: include/linux/pci*
+F: arch/x86/pci/
PCI DRIVER FOR NVIDIA TEGRA
M: Thierry Reding <thierry.reding@gmail.com>
@@ -6386,6 +6435,12 @@ S: Supported
F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
F: drivers/pci/host/pci-tegra.c
+PCI DRIVER FOR SAMSUNG EXYNOS
+M: Jingoo Han <jg1.han@samsung.com>
+L: linux-pci@vger.kernel.org
+S: Maintained
+F: drivers/pci/host/pci-exynos.c
+
PCMCIA SUBSYSTEM
P: Linux PCMCIA Team
L: linux-pcmcia@lists.infradead.org
@@ -6856,6 +6911,14 @@ L: linux-hexagon@vger.kernel.org
S: Supported
F: arch/hexagon/
+QUALCOMM WCN36XX WIRELESS DRIVER
+M: Eugene Krasnikov <k.eugene.e@gmail.com>
+L: wcn36xx@lists.infradead.org
+W: http://wireless.kernel.org/en/users/Drivers/wcn36xx
+T: git git://github.com/KrasnikovEugene/wcn36xx.git
+S: Supported
+F: drivers/net/wireless/ath/wcn36xx/
+
QUICKCAM PARALLEL PORT WEBCAMS
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
@@ -6943,7 +7006,7 @@ M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
F: Documentation/RCU/torture.txt
-F: kernel/rcutorture.c
+F: kernel/rcu/torture.c
RDC R-321X SoC
M: Florian Fainelli <florian@openwrt.org>
@@ -6970,8 +7033,9 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
F: Documentation/RCU/
X: Documentation/RCU/torture.txt
F: include/linux/rcu*
-F: kernel/rcu*
-X: kernel/rcutorture.c
+X: include/linux/srcu.h
+F: kernel/rcu/
+X: kernel/rcu/torture.c
REAL TIME CLOCK (RTC) SUBSYSTEM
M: Alessandro Zummo <a.zummo@towertech.it>
@@ -7296,6 +7360,8 @@ S: Maintained
F: kernel/sched/
F: include/linux/sched.h
F: include/uapi/linux/sched.h
+F: kernel/wait.c
+F: include/linux/wait.h
SCORE ARCHITECTURE
M: Chen Liqin <liqin.linux@gmail.com>
@@ -7430,9 +7496,10 @@ SELINUX SECURITY MODULE
M: Stephen Smalley <sds@tycho.nsa.gov>
M: James Morris <james.l.morris@oracle.com>
M: Eric Paris <eparis@parisplace.org>
+M: Paul Moore <paul@paul-moore.com>
L: selinux@tycho.nsa.gov (subscribers-only, general discussion)
W: http://selinuxproject.org
-T: git git://git.infradead.org/users/eparis/selinux.git
+T: git git://git.infradead.org/users/pcmoore/selinux
S: Supported
F: include/linux/selinux*
F: security/selinux/
@@ -7658,8 +7725,8 @@ M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
W: http://www.rdrop.com/users/paulmck/RCU/
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
-F: include/linux/srcu*
-F: kernel/srcu*
+F: include/linux/srcu.h
+F: kernel/rcu/srcu.c
SMACK SECURITY MODULE
M: Casey Schaufler <casey@schaufler-ca.com>
@@ -7830,6 +7897,13 @@ F: Documentation/sound/alsa/soc/
F: sound/soc/
F: include/sound/soc*
+SOUND - DMAENGINE HELPERS
+M: Lars-Peter Clausen <lars@metafoo.de>
+S: Supported
+F: include/sound/dmaengine_pcm.h
+F: sound/core/pcm_dmaengine.c
+F: sound/soc/soc-generic-dmaengine-pcm.c
+
SPARC + UltraSPARC (sparc/sparc64)
M: "David S. Miller" <davem@davemloft.net>
L: sparclinux@vger.kernel.org
@@ -8302,14 +8376,72 @@ L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/rc/ttusbir.c
-TEGRA SUPPORT
+TEGRA ARCHITECTURE SUPPORT
M: Stephen Warren <swarren@wwwdotorg.org>
+M: Thierry Reding <thierry.reding@gmail.com>
L: linux-tegra@vger.kernel.org
Q: http://patchwork.ozlabs.org/project/linux-tegra/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git
S: Supported
N: [^a-z]tegra
+TEGRA ASOC DRIVER
+M: Stephen Warren <swarren@wwwdotorg.org>
+S: Supported
+F: sound/soc/tegra/
+
+TEGRA CLOCK DRIVER
+M: Peter De Schrijver <pdeschrijver@nvidia.com>
+M: Prashant Gaikwad <pgaikwad@nvidia.com>
+S: Supported
+F: drivers/clk/tegra/
+
+TEGRA DMA DRIVER
+M: Laxman Dewangan <ldewangan@nvidia.com>
+S: Supported
+F: drivers/dma/tegra20-apb-dma.c
+
+TEGRA GPIO DRIVER
+M: Stephen Warren <swarren@wwwdotorg.org>
+S: Supported
+F: drivers/gpio/gpio-tegra.c
+
+TEGRA I2C DRIVER
+M: Laxman Dewangan <ldewangan@nvidia.com>
+S: Supported
+F: drivers/i2c/busses/i2c-tegra.c
+
+TEGRA IOMMU DRIVERS
+M: Hiroshi Doyu <hdoyu@nvidia.com>
+S: Supported
+F: drivers/iommu/tegra*
+
+TEGRA KBC DRIVER
+M: Rakesh Iyer <riyer@nvidia.com>
+M: Laxman Dewangan <ldewangan@nvidia.com>
+S: Supported
+F: drivers/input/keyboard/tegra-kbc.c
+
+TEGRA PINCTRL DRIVER
+M: Stephen Warren <swarren@wwwdotorg.org>
+S: Supported
+F: drivers/pinctrl/pinctrl-tegra*
+
+TEGRA PWM DRIVER
+M: Thierry Reding <thierry.reding@gmail.com>
+S: Supported
+F: drivers/pwm/pwm-tegra.c
+
+TEGRA SERIAL DRIVER
+M: Laxman Dewangan <ldewangan@nvidia.com>
+S: Supported
+F: drivers/tty/serial/serial-tegra.c
+
+TEGRA SPI DRIVER
+M: Laxman Dewangan <ldewangan@nvidia.com>
+S: Supported
+F: drivers/spi/spi-tegra*
+
TEHUTI ETHERNET DRIVER
M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org
@@ -8514,6 +8646,7 @@ F: drivers/media/usb/tm6000/
TPM DEVICE DRIVER
M: Leonidas Da Silva Barbosa <leosilva@linux.vnet.ibm.com>
M: Ashley Lai <ashley@ashleylai.com>
+M: Peter Huewe <peterhuewe@gmx.de>
M: Rajiv Andrade <mail@srajiv.net>
W: http://tpmdd.sourceforge.net
M: Marcel Selhorst <tpmdd@selhorst.net>
@@ -8610,14 +8743,6 @@ S: Maintained
F: arch/m68k/*/*_no.*
F: arch/m68k/include/asm/*_no.*
-UCLINUX FOR RENESAS H8/300 (H8300)
-M: Yoshinori Sato <ysato@users.sourceforge.jp>
-W: http://uclinux-h8.sourceforge.jp/
-S: Supported
-F: arch/h8300/
-F: drivers/ide/ide-h8300.c
-F: drivers/net/ethernet/8390/ne-h8300.c
-
UDF FILESYSTEM
M: Jan Kara <jack@suse.cz>
S: Maintained
diff --git a/Makefile b/Makefile
index 126321d2e6ad..5679e7995862 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 12
SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION =
NAME = One Giant Leap for Frogkind
# *DOCUMENTATION*
@@ -659,6 +659,12 @@ KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
# conserve stack if available
KBUILD_CFLAGS += $(call cc-option,-fconserve-stack)
+# disallow errors like 'EXPORT_GPL(foo);' with missing header
+KBUILD_CFLAGS += $(call cc-option,-Werror=implicit-int)
+
+# require functions to have arguments in prototypes, not empty 'int foo()'
+KBUILD_CFLAGS += $(call cc-option,-Werror=strict-prototypes)
+
# use the deterministic mode of AR if available
KBUILD_ARFLAGS := $(call ar-option,D)
diff --git a/arch/Kconfig b/arch/Kconfig
index af2cc6eabcc7..ded747c7b74c 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -353,6 +353,18 @@ config HAVE_CONTEXT_TRACKING
config HAVE_VIRT_CPU_ACCOUNTING
bool
+config HAVE_VIRT_CPU_ACCOUNTING_GEN
+ bool
+ default y if 64BIT
+ help
+ With VIRT_CPU_ACCOUNTING_GEN, cputime_t becomes 64-bit.
+ Before enabling this option, arch code must be audited
+ to ensure there are no races in concurrent read/write of
+ cputime_t. For example, reading/writing 64-bit cputime_t on
+ some 32-bit arches may require multiple accesses, so proper
+ locking is needed to protect against concurrent accesses.
+
+
config HAVE_IRQ_TIME_ACCOUNTING
bool
help
@@ -390,6 +402,16 @@ config HAVE_UNDERSCORE_SYMBOL_PREFIX
Some architectures generate an _ in front of C symbols; things like
module loading and assembly files need to know about this.
+config HAVE_IRQ_EXIT_ON_IRQ_STACK
+ bool
+ help
+ Architecture doesn't only execute the irq handler on the irq stack
+ but also irq_exit(). This way we can process softirqs on this irq
+ stack instead of switching to a new one when we call __do_softirq()
+ in the end of an hardirq.
+ This spares a stack switch and improves cache usage on softirq
+ processing.
+
#
# ABI hall of shame
#
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 35a300d4a9fb..84803f88a169 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -1,6 +1,7 @@
config ALPHA
bool
default y
+ select ARCH_MIGHT_HAVE_PC_PARPORT
select HAVE_AOUT
select HAVE_IDE
select HAVE_OPROFILE
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index a6e85f448c1c..f01fb505ad52 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -3,3 +3,4 @@ generic-y += clkdev.h
generic-y += exec.h
generic-y += trace_clock.h
+generic-y += preempt.h
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 467de010ea7e..e3a1491d5073 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -81,6 +81,8 @@
#define SO_SELECT_ERR_QUEUE 45
-#define SO_BUSY_POLL 46
+#define SO_BUSY_POLL 46
+
+#define SO_MAX_PACING_RATE 47
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 91dbb2757afd..fb4177e48260 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -35,6 +35,12 @@ config ARC
select PERF_USE_VMALLOC
select HAVE_DEBUG_STACKOVERFLOW
+config TRACE_IRQFLAGS_SUPPORT
+ def_bool y
+
+config LOCKDEP_SUPPORT
+ def_bool y
+
config SCHED_OMIT_FRAME_POINTER
def_bool y
@@ -139,8 +145,8 @@ config ARC_HAS_REENTRANT_IRQ_LV2
endif
config NR_CPUS
- int "Maximum number of CPUs (2-32)"
- range 2 32
+ int "Maximum number of CPUs (2-4096)"
+ range 2 4096
depends on SMP
default "2"
diff --git a/arch/arc/boot/dts/abilis_tb100.dtsi b/arch/arc/boot/dts/abilis_tb100.dtsi
index d9f8249aa66e..3942634f805a 100644
--- a/arch/arc/boot/dts/abilis_tb100.dtsi
+++ b/arch/arc/boot/dts/abilis_tb100.dtsi
@@ -43,124 +43,124 @@
iomux: iomux@FF10601c {
/* Port 1 */
pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */
- pingrp = "mis0_pins";
+ abilis,function = "mis0";
};
pctl_tsin_s1: pctl-tsin-s1 { /* Serial TS-in 1 */
- pingrp = "mis1_pins";
+ abilis,function = "mis1";
};
pctl_gpio_a: pctl-gpio-a { /* GPIO bank A */
- pingrp = "gpioa_pins";
+ abilis,function = "gpioa";
};
pctl_tsin_p1: pctl-tsin-p1 { /* Parallel TS-in 1 */
- pingrp = "mip1_pins";
+ abilis,function = "mip1";
};
/* Port 2 */
pctl_tsin_s2: pctl-tsin-s2 { /* Serial TS-in 2 */
- pingrp = "mis2_pins";
+ abilis,function = "mis2";
};
pctl_tsin_s3: pctl-tsin-s3 { /* Serial TS-in 3 */
- pingrp = "mis3_pins";
+ abilis,function = "mis3";
};
pctl_gpio_c: pctl-gpio-c { /* GPIO bank C */
- pingrp = "gpioc_pins";
+ abilis,function = "gpioc";
};
pctl_tsin_p3: pctl-tsin-p3 { /* Parallel TS-in 3 */
- pingrp = "mip3_pins";
+ abilis,function = "mip3";
};
/* Port 3 */
pctl_tsin_s4: pctl-tsin-s4 { /* Serial TS-in 4 */
- pingrp = "mis4_pins";
+ abilis,function = "mis4";
};
pctl_tsin_s5: pctl-tsin-s5 { /* Serial TS-in 5 */
- pingrp = "mis5_pins";
+ abilis,function = "mis5";
};
pctl_gpio_e: pctl-gpio-e { /* GPIO bank E */
- pingrp = "gpioe_pins";
+ abilis,function = "gpioe";
};
pctl_tsin_p5: pctl-tsin-p5 { /* Parallel TS-in 5 */
- pingrp = "mip5_pins";
+ abilis,function = "mip5";
};
/* Port 4 */
pctl_tsin_s6: pctl-tsin-s6 { /* Serial TS-in 6 */
- pingrp = "mis6_pins";
+ abilis,function = "mis6";
};
pctl_tsin_s7: pctl-tsin-s7 { /* Serial TS-in 7 */
- pingrp = "mis7_pins";
+ abilis,function = "mis7";
};
pctl_gpio_g: pctl-gpio-g { /* GPIO bank G */
- pingrp = "gpiog_pins";
+ abilis,function = "gpiog";
};
pctl_tsin_p7: pctl-tsin-p7 { /* Parallel TS-in 7 */
- pingrp = "mip7_pins";
+ abilis,function = "mip7";
};
/* Port 5 */
pctl_gpio_j: pctl-gpio-j { /* GPIO bank J */
- pingrp = "gpioj_pins";
+ abilis,function = "gpioj";
};
pctl_gpio_k: pctl-gpio-k { /* GPIO bank K */
- pingrp = "gpiok_pins";
+ abilis,function = "gpiok";
};
pctl_ciplus: pctl-ciplus { /* CI+ interface */
- pingrp = "ciplus_pins";
+ abilis,function = "ciplus";
};
pctl_mcard: pctl-mcard { /* M-Card interface */
- pingrp = "mcard_pins";
+ abilis,function = "mcard";
};
/* Port 6 */
pctl_tsout_p: pctl-tsout-p { /* Parallel TS-out */
- pingrp = "mop_pins";
+ abilis,function = "mop";
};
pctl_tsout_s0: pctl-tsout-s0 { /* Serial TS-out 0 */
- pingrp = "mos0_pins";
+ abilis,function = "mos0";
};
pctl_tsout_s1: pctl-tsout-s1 { /* Serial TS-out 1 */
- pingrp = "mos1_pins";
+ abilis,function = "mos1";
};
pctl_tsout_s2: pctl-tsout-s2 { /* Serial TS-out 2 */
- pingrp = "mos2_pins";
+ abilis,function = "mos2";
};
pctl_tsout_s3: pctl-tsout-s3 { /* Serial TS-out 3 */
- pingrp = "mos3_pins";
+ abilis,function = "mos3";
};
/* Port 7 */
pctl_uart0: pctl-uart0 { /* UART 0 */
- pingrp = "uart0_pins";
+ abilis,function = "uart0";
};
pctl_uart1: pctl-uart1 { /* UART 1 */
- pingrp = "uart1_pins";
+ abilis,function = "uart1";
};
pctl_gpio_l: pctl-gpio-l { /* GPIO bank L */
- pingrp = "gpiol_pins";
+ abilis,function = "gpiol";
};
pctl_gpio_m: pctl-gpio-m { /* GPIO bank M */
- pingrp = "gpiom_pins";
+ abilis,function = "gpiom";
};
/* Port 8 */
pctl_spi3: pctl-spi3 {
- pingrp = "spi3_pins";
+ abilis,function = "spi3";
};
/* Port 9 */
pctl_spi1: pctl-spi1 {
- pingrp = "spi1_pins";
+ abilis,function = "spi1";
};
pctl_gpio_n: pctl-gpio-n {
- pingrp = "gpion_pins";
+ abilis,function = "gpion";
};
/* Unmuxed GPIOs */
pctl_gpio_b: pctl-gpio-b {
- pingrp = "gpiob_pins";
+ abilis,function = "gpiob";
};
pctl_gpio_d: pctl-gpio-d {
- pingrp = "gpiod_pins";
+ abilis,function = "gpiod";
};
pctl_gpio_f: pctl-gpio-f {
- pingrp = "gpiof_pins";
+ abilis,function = "gpiof";
};
pctl_gpio_h: pctl-gpio-h {
- pingrp = "gpioh_pins";
+ abilis,function = "gpioh";
};
pctl_gpio_i: pctl-gpio-i {
- pingrp = "gpioi_pins";
+ abilis,function = "gpioi";
};
};
@@ -172,9 +172,10 @@
interrupts = <27 2>;
reg = <0xFF140000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <0>;
- gpio-pins = <&pctl_gpio_a>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <3>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioa";
};
gpiob: gpio@FF141000 {
compatible = "abilis,tb10x-gpio";
@@ -184,9 +185,10 @@
interrupts = <27 2>;
reg = <0xFF141000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <3>;
- gpio-pins = <&pctl_gpio_b>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <2>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiob";
};
gpioc: gpio@FF142000 {
compatible = "abilis,tb10x-gpio";
@@ -196,9 +198,10 @@
interrupts = <27 2>;
reg = <0xFF142000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <5>;
- gpio-pins = <&pctl_gpio_c>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <3>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioc";
};
gpiod: gpio@FF143000 {
compatible = "abilis,tb10x-gpio";
@@ -208,9 +211,10 @@
interrupts = <27 2>;
reg = <0xFF143000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <8>;
- gpio-pins = <&pctl_gpio_d>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <2>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiod";
};
gpioe: gpio@FF144000 {
compatible = "abilis,tb10x-gpio";
@@ -220,9 +224,10 @@
interrupts = <27 2>;
reg = <0xFF144000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <10>;
- gpio-pins = <&pctl_gpio_e>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <3>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioe";
};
gpiof: gpio@FF145000 {
compatible = "abilis,tb10x-gpio";
@@ -232,9 +237,10 @@
interrupts = <27 2>;
reg = <0xFF145000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <13>;
- gpio-pins = <&pctl_gpio_f>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <2>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiof";
};
gpiog: gpio@FF146000 {
compatible = "abilis,tb10x-gpio";
@@ -244,9 +250,10 @@
interrupts = <27 2>;
reg = <0xFF146000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <15>;
- gpio-pins = <&pctl_gpio_g>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <3>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiog";
};
gpioh: gpio@FF147000 {
compatible = "abilis,tb10x-gpio";
@@ -256,9 +263,10 @@
interrupts = <27 2>;
reg = <0xFF147000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <18>;
- gpio-pins = <&pctl_gpio_h>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <2>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioh";
};
gpioi: gpio@FF148000 {
compatible = "abilis,tb10x-gpio";
@@ -268,9 +276,10 @@
interrupts = <27 2>;
reg = <0xFF148000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <20>;
- gpio-pins = <&pctl_gpio_i>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <12>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioi";
};
gpioj: gpio@FF149000 {
compatible = "abilis,tb10x-gpio";
@@ -280,9 +289,10 @@
interrupts = <27 2>;
reg = <0xFF149000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <32>;
- gpio-pins = <&pctl_gpio_j>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <32>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioj";
};
gpiok: gpio@FF14a000 {
compatible = "abilis,tb10x-gpio";
@@ -292,9 +302,10 @@
interrupts = <27 2>;
reg = <0xFF14A000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <64>;
- gpio-pins = <&pctl_gpio_k>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <22>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiok";
};
gpiol: gpio@FF14b000 {
compatible = "abilis,tb10x-gpio";
@@ -304,9 +315,10 @@
interrupts = <27 2>;
reg = <0xFF14B000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <86>;
- gpio-pins = <&pctl_gpio_l>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <4>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiol";
};
gpiom: gpio@FF14c000 {
compatible = "abilis,tb10x-gpio";
@@ -316,9 +328,10 @@
interrupts = <27 2>;
reg = <0xFF14C000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <90>;
- gpio-pins = <&pctl_gpio_m>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <4>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiom";
};
gpion: gpio@FF14d000 {
compatible = "abilis,tb10x-gpio";
@@ -328,9 +341,10 @@
interrupts = <27 2>;
reg = <0xFF14D000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <94>;
- gpio-pins = <&pctl_gpio_n>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <5>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpion";
};
};
};
diff --git a/arch/arc/boot/dts/abilis_tb100_dvk.dts b/arch/arc/boot/dts/abilis_tb100_dvk.dts
index ebc313a9f5b2..3dd6ed941464 100644
--- a/arch/arc/boot/dts/abilis_tb100_dvk.dts
+++ b/arch/arc/boot/dts/abilis_tb100_dvk.dts
@@ -64,62 +64,62 @@
compatible = "gpio-leds";
power {
label = "Power";
- gpios = <&gpioi 0>;
+ gpios = <&gpioi 0 0>;
linux,default-trigger = "default-on";
};
heartbeat {
label = "Heartbeat";
- gpios = <&gpioi 1>;
+ gpios = <&gpioi 1 0>;
linux,default-trigger = "heartbeat";
};
led2 {
label = "LED2";
- gpios = <&gpioi 2>;
+ gpios = <&gpioi 2 0>;
default-state = "off";
};
led3 {
label = "LED3";
- gpios = <&gpioi 3>;
+ gpios = <&gpioi 3 0>;
default-state = "off";
};
led4 {
label = "LED4";
- gpios = <&gpioi 4>;
+ gpios = <&gpioi 4 0>;
default-state = "off";
};
led5 {
label = "LED5";
- gpios = <&gpioi 5>;
+ gpios = <&gpioi 5 0>;
default-state = "off";
};
led6 {
label = "LED6";
- gpios = <&gpioi 6>;
+ gpios = <&gpioi 6 0>;
default-state = "off";
};
led7 {
label = "LED7";
- gpios = <&gpioi 7>;
+ gpios = <&gpioi 7 0>;
default-state = "off";
};
led8 {
label = "LED8";
- gpios = <&gpioi 8>;
+ gpios = <&gpioi 8 0>;
default-state = "off";
};
led9 {
label = "LED9";
- gpios = <&gpioi 9>;
+ gpios = <&gpioi 9 0>;
default-state = "off";
};
led10 {
label = "LED10";
- gpios = <&gpioi 10>;
+ gpios = <&gpioi 10 0>;
default-state = "off";
};
led11 {
label = "LED11";
- gpios = <&gpioi 11>;
+ gpios = <&gpioi 11 0>;
default-state = "off";
};
};
diff --git a/arch/arc/boot/dts/abilis_tb101.dtsi b/arch/arc/boot/dts/abilis_tb101.dtsi
index da8ca7941e67..b0467229a5c4 100644
--- a/arch/arc/boot/dts/abilis_tb101.dtsi
+++ b/arch/arc/boot/dts/abilis_tb101.dtsi
@@ -43,133 +43,133 @@
iomux: iomux@FF10601c {
/* Port 1 */
pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */
- pingrp = "mis0_pins";
+ abilis,function = "mis0";
};
pctl_tsin_s1: pctl-tsin-s1 { /* Serial TS-in 1 */
- pingrp = "mis1_pins";
+ abilis,function = "mis1";
};
pctl_gpio_a: pctl-gpio-a { /* GPIO bank A */
- pingrp = "gpioa_pins";
+ abilis,function = "gpioa";
};
pctl_tsin_p1: pctl-tsin-p1 { /* Parallel TS-in 1 */
- pingrp = "mip1_pins";
+ abilis,function = "mip1";
};
/* Port 2 */
pctl_tsin_s2: pctl-tsin-s2 { /* Serial TS-in 2 */
- pingrp = "mis2_pins";
+ abilis,function = "mis2";
};
pctl_tsin_s3: pctl-tsin-s3 { /* Serial TS-in 3 */
- pingrp = "mis3_pins";
+ abilis,function = "mis3";
};
pctl_gpio_c: pctl-gpio-c { /* GPIO bank C */
- pingrp = "gpioc_pins";
+ abilis,function = "gpioc";
};
pctl_tsin_p3: pctl-tsin-p3 { /* Parallel TS-in 3 */
- pingrp = "mip3_pins";
+ abilis,function = "mip3";
};
/* Port 3 */
pctl_tsin_s4: pctl-tsin-s4 { /* Serial TS-in 4 */
- pingrp = "mis4_pins";
+ abilis,function = "mis4";
};
pctl_tsin_s5: pctl-tsin-s5 { /* Serial TS-in 5 */
- pingrp = "mis5_pins";
+ abilis,function = "mis5";
};
pctl_gpio_e: pctl-gpio-e { /* GPIO bank E */
- pingrp = "gpioe_pins";
+ abilis,function = "gpioe";
};
pctl_tsin_p5: pctl-tsin-p5 { /* Parallel TS-in 5 */
- pingrp = "mip5_pins";
+ abilis,function = "mip5";
};
/* Port 4 */
pctl_tsin_s6: pctl-tsin-s6 { /* Serial TS-in 6 */
- pingrp = "mis6_pins";
+ abilis,function = "mis6";
};
pctl_tsin_s7: pctl-tsin-s7 { /* Serial TS-in 7 */
- pingrp = "mis7_pins";
+ abilis,function = "mis7";
};
pctl_gpio_g: pctl-gpio-g { /* GPIO bank G */
- pingrp = "gpiog_pins";
+ abilis,function = "gpiog";
};
pctl_tsin_p7: pctl-tsin-p7 { /* Parallel TS-in 7 */
- pingrp = "mip7_pins";
+ abilis,function = "mip7";
};
/* Port 5 */
pctl_gpio_j: pctl-gpio-j { /* GPIO bank J */
- pingrp = "gpioj_pins";
+ abilis,function = "gpioj";
};
pctl_gpio_k: pctl-gpio-k { /* GPIO bank K */
- pingrp = "gpiok_pins";
+ abilis,function = "gpiok";
};
pctl_ciplus: pctl-ciplus { /* CI+ interface */
- pingrp = "ciplus_pins";
+ abilis,function = "ciplus";
};
pctl_mcard: pctl-mcard { /* M-Card interface */
- pingrp = "mcard_pins";
+ abilis,function = "mcard";
};
pctl_stc0: pctl-stc0 { /* Smart card I/F 0 */
- pingrp = "stc0_pins";
+ abilis,function = "stc0";
};
pctl_stc1: pctl-stc1 { /* Smart card I/F 1 */
- pingrp = "stc1_pins";
+ abilis,function = "stc1";
};
/* Port 6 */
pctl_tsout_p: pctl-tsout-p { /* Parallel TS-out */
- pingrp = "mop_pins";
+ abilis,function = "mop";
};
pctl_tsout_s0: pctl-tsout-s0 { /* Serial TS-out 0 */
- pingrp = "mos0_pins";
+ abilis,function = "mos0";
};
pctl_tsout_s1: pctl-tsout-s1 { /* Serial TS-out 1 */
- pingrp = "mos1_pins";
+ abilis,function = "mos1";
};
pctl_tsout_s2: pctl-tsout-s2 { /* Serial TS-out 2 */
- pingrp = "mos2_pins";
+ abilis,function = "mos2";
};
pctl_tsout_s3: pctl-tsout-s3 { /* Serial TS-out 3 */
- pingrp = "mos3_pins";
+ abilis,function = "mos3";
};
/* Port 7 */
pctl_uart0: pctl-uart0 { /* UART 0 */
- pingrp = "uart0_pins";
+ abilis,function = "uart0";
};
pctl_uart1: pctl-uart1 { /* UART 1 */
- pingrp = "uart1_pins";
+ abilis,function = "uart1";
};
pctl_gpio_l: pctl-gpio-l { /* GPIO bank L */
- pingrp = "gpiol_pins";
+ abilis,function = "gpiol";
};
pctl_gpio_m: pctl-gpio-m { /* GPIO bank M */
- pingrp = "gpiom_pins";
+ abilis,function = "gpiom";
};
/* Port 8 */
pctl_spi3: pctl-spi3 {
- pingrp = "spi3_pins";
+ abilis,function = "spi3";
};
pctl_jtag: pctl-jtag {
- pingrp = "jtag_pins";
+ abilis,function = "jtag";
};
/* Port 9 */
pctl_spi1: pctl-spi1 {
- pingrp = "spi1_pins";
+ abilis,function = "spi1";
};
pctl_gpio_n: pctl-gpio-n {
- pingrp = "gpion_pins";
+ abilis,function = "gpion";
};
/* Unmuxed GPIOs */
pctl_gpio_b: pctl-gpio-b {
- pingrp = "gpiob_pins";
+ abilis,function = "gpiob";
};
pctl_gpio_d: pctl-gpio-d {
- pingrp = "gpiod_pins";
+ abilis,function = "gpiod";
};
pctl_gpio_f: pctl-gpio-f {
- pingrp = "gpiof_pins";
+ abilis,function = "gpiof";
};
pctl_gpio_h: pctl-gpio-h {
- pingrp = "gpioh_pins";
+ abilis,function = "gpioh";
};
pctl_gpio_i: pctl-gpio-i {
- pingrp = "gpioi_pins";
+ abilis,function = "gpioi";
};
};
@@ -181,9 +181,10 @@
interrupts = <27 2>;
reg = <0xFF140000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <0>;
- gpio-pins = <&pctl_gpio_a>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <3>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioa";
};
gpiob: gpio@FF141000 {
compatible = "abilis,tb10x-gpio";
@@ -193,9 +194,10 @@
interrupts = <27 2>;
reg = <0xFF141000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <3>;
- gpio-pins = <&pctl_gpio_b>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <2>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiob";
};
gpioc: gpio@FF142000 {
compatible = "abilis,tb10x-gpio";
@@ -205,9 +207,10 @@
interrupts = <27 2>;
reg = <0xFF142000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <5>;
- gpio-pins = <&pctl_gpio_c>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <3>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioc";
};
gpiod: gpio@FF143000 {
compatible = "abilis,tb10x-gpio";
@@ -217,9 +220,10 @@
interrupts = <27 2>;
reg = <0xFF143000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <8>;
- gpio-pins = <&pctl_gpio_d>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <2>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiod";
};
gpioe: gpio@FF144000 {
compatible = "abilis,tb10x-gpio";
@@ -229,9 +233,10 @@
interrupts = <27 2>;
reg = <0xFF144000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <10>;
- gpio-pins = <&pctl_gpio_e>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <3>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioe";
};
gpiof: gpio@FF145000 {
compatible = "abilis,tb10x-gpio";
@@ -241,9 +246,10 @@
interrupts = <27 2>;
reg = <0xFF145000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <13>;
- gpio-pins = <&pctl_gpio_f>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <2>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiof";
};
gpiog: gpio@FF146000 {
compatible = "abilis,tb10x-gpio";
@@ -253,9 +259,10 @@
interrupts = <27 2>;
reg = <0xFF146000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <15>;
- gpio-pins = <&pctl_gpio_g>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <3>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiog";
};
gpioh: gpio@FF147000 {
compatible = "abilis,tb10x-gpio";
@@ -265,9 +272,10 @@
interrupts = <27 2>;
reg = <0xFF147000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <18>;
- gpio-pins = <&pctl_gpio_h>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <2>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioh";
};
gpioi: gpio@FF148000 {
compatible = "abilis,tb10x-gpio";
@@ -277,9 +285,10 @@
interrupts = <27 2>;
reg = <0xFF148000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <20>;
- gpio-pins = <&pctl_gpio_i>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <12>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioi";
};
gpioj: gpio@FF149000 {
compatible = "abilis,tb10x-gpio";
@@ -289,9 +298,10 @@
interrupts = <27 2>;
reg = <0xFF149000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <32>;
- gpio-pins = <&pctl_gpio_j>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <32>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioj";
};
gpiok: gpio@FF14a000 {
compatible = "abilis,tb10x-gpio";
@@ -301,9 +311,10 @@
interrupts = <27 2>;
reg = <0xFF14A000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <64>;
- gpio-pins = <&pctl_gpio_k>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <22>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiok";
};
gpiol: gpio@FF14b000 {
compatible = "abilis,tb10x-gpio";
@@ -313,9 +324,10 @@
interrupts = <27 2>;
reg = <0xFF14B000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <86>;
- gpio-pins = <&pctl_gpio_l>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <4>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiol";
};
gpiom: gpio@FF14c000 {
compatible = "abilis,tb10x-gpio";
@@ -325,9 +337,10 @@
interrupts = <27 2>;
reg = <0xFF14C000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <90>;
- gpio-pins = <&pctl_gpio_m>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <4>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiom";
};
gpion: gpio@FF14d000 {
compatible = "abilis,tb10x-gpio";
@@ -337,9 +350,10 @@
interrupts = <27 2>;
reg = <0xFF14D000 0x1000>;
gpio-controller;
- #gpio-cells = <1>;
- gpio-base = <94>;
- gpio-pins = <&pctl_gpio_n>;
+ #gpio-cells = <2>;
+ abilis,ngpio = <5>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpion";
};
};
};
diff --git a/arch/arc/boot/dts/abilis_tb101_dvk.dts b/arch/arc/boot/dts/abilis_tb101_dvk.dts
index b204657993aa..1cf51c280f28 100644
--- a/arch/arc/boot/dts/abilis_tb101_dvk.dts
+++ b/arch/arc/boot/dts/abilis_tb101_dvk.dts
@@ -64,62 +64,62 @@
compatible = "gpio-leds";
power {
label = "Power";
- gpios = <&gpioi 0>;
+ gpios = <&gpioi 0 0>;
linux,default-trigger = "default-on";
};
heartbeat {
label = "Heartbeat";
- gpios = <&gpioi 1>;
+ gpios = <&gpioi 1 0>;
linux,default-trigger = "heartbeat";
};
led2 {
label = "LED2";
- gpios = <&gpioi 2>;
+ gpios = <&gpioi 2 0>;
default-state = "off";
};
led3 {
label = "LED3";
- gpios = <&gpioi 3>;
+ gpios = <&gpioi 3 0>;
default-state = "off";
};
led4 {
label = "LED4";
- gpios = <&gpioi 4>;
+ gpios = <&gpioi 4 0>;
default-state = "off";
};
led5 {
label = "LED5";
- gpios = <&gpioi 5>;
+ gpios = <&gpioi 5 0>;
default-state = "off";
};
led6 {
label = "LED6";
- gpios = <&gpioi 6>;
+ gpios = <&gpioi 6 0>;
default-state = "off";
};
led7 {
label = "LED7";
- gpios = <&gpioi 7>;
+ gpios = <&gpioi 7 0>;
default-state = "off";
};
led8 {
label = "LED8";
- gpios = <&gpioi 8>;
+ gpios = <&gpioi 8 0>;
default-state = "off";
};
led9 {
label = "LED9";
- gpios = <&gpioi 9>;
+ gpios = <&gpioi 9 0>;
default-state = "off";
};
led10 {
label = "LED10";
- gpios = <&gpioi 10>;
+ gpios = <&gpioi 10 0>;
default-state = "off";
};
led11 {
label = "LED11";
- gpios = <&gpioi 11>;
+ gpios = <&gpioi 11 0>;
default-state = "off";
};
};
diff --git a/arch/arc/boot/dts/abilis_tb10x.dtsi b/arch/arc/boot/dts/abilis_tb10x.dtsi
index edf56f4749e1..a098d7c05e96 100644
--- a/arch/arc/boot/dts/abilis_tb10x.dtsi
+++ b/arch/arc/boot/dts/abilis_tb10x.dtsi
@@ -62,9 +62,8 @@
};
iomux: iomux@FF10601c {
- #address-cells = <1>;
- #size-cells = <1>;
compatible = "abilis,tb10x-iomux";
+ #gpio-range-cells = <3>;
reg = <0xFF10601c 0x4>;
};
diff --git a/arch/arc/configs/fpga_defconfig b/arch/arc/configs/fpga_defconfig
index 4ca50f1f8d05..e283aa586934 100644
--- a/arch/arc/configs/fpga_defconfig
+++ b/arch/arc/configs/fpga_defconfig
@@ -2,6 +2,8 @@ CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
@@ -62,4 +64,5 @@ CONFIG_TMPFS=y
CONFIG_NFS_FS=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
+# CONFIG_DEBUG_PREEMPT is not set
CONFIG_XZ_DEC=y
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index d8dd660898b9..5943f7f9d325 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -46,3 +46,4 @@ generic-y += ucontext.h
generic-y += user.h
generic-y += vga.h
generic-y += xor.h
+generic-y += preempt.h
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index e4abdaac6f9f..2fd3162ec4df 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -17,13 +17,7 @@
#endif
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-
-/* For a rare case where customers have differently config I/D */
-#define ARC_ICACHE_LINE_LEN L1_CACHE_BYTES
-#define ARC_DCACHE_LINE_LEN L1_CACHE_BYTES
-
-#define ICACHE_LINE_MASK (~(ARC_ICACHE_LINE_LEN - 1))
-#define DCACHE_LINE_MASK (~(ARC_DCACHE_LINE_LEN - 1))
+#define CACHE_LINE_MASK (~(L1_CACHE_BYTES - 1))
/*
* ARC700 doesn't cache any access in top 256M.
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index c0a72105ee0b..291a70db68b8 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -18,8 +18,8 @@
#include <asm-generic/irq.h>
-extern void __init arc_init_IRQ(void);
-extern int __init get_hw_config_num_irq(void);
+extern void arc_init_IRQ(void);
+extern int get_hw_config_num_irq(void);
void arc_local_timer_setup(unsigned int cpu);
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
index b68b53f458d1..cb7efc29f16f 100644
--- a/arch/arc/include/asm/irqflags.h
+++ b/arch/arc/include/asm/irqflags.h
@@ -151,16 +151,38 @@ static inline void arch_unmask_irq(unsigned int irq)
#else
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+.macro TRACE_ASM_IRQ_DISABLE
+ bl trace_hardirqs_off
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+ bl trace_hardirqs_on
+.endm
+
+#else
+
+.macro TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+.endm
+
+#endif
+
.macro IRQ_DISABLE scratch
lr \scratch, [status32]
bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
flag \scratch
+ TRACE_ASM_IRQ_DISABLE
.endm
.macro IRQ_ENABLE scratch
lr \scratch, [status32]
or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
flag \scratch
+ TRACE_ASM_IRQ_ENABLE
.endm
#endif /* __ASSEMBLY__ */
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
index 9998dc846ebb..e8993a2be6c2 100644
--- a/arch/arc/include/asm/mach_desc.h
+++ b/arch/arc/include/asm/mach_desc.h
@@ -51,22 +51,12 @@ struct machine_desc {
/*
* Current machine - only accessible during boot.
*/
-extern struct machine_desc *machine_desc;
+extern const struct machine_desc *machine_desc;
/*
* Machine type table - also only accessible during boot
*/
-extern struct machine_desc __arch_info_begin[], __arch_info_end[];
-#define for_each_machine_desc(p) \
- for (p = __arch_info_begin; p < __arch_info_end; p++)
-
-static inline struct machine_desc *default_machine_desc(void)
-{
- /* the default machine is the last one linked in */
- if (__arch_info_end - 1 < __arch_info_begin)
- return NULL;
- return __arch_info_end - 1;
-}
+extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
/*
* Set of macros to define architecture features.
@@ -81,7 +71,6 @@ __attribute__((__section__(".arch.info.init"))) = { \
#define MACHINE_END \
};
-extern struct machine_desc *setup_machine_fdt(void *dt);
-extern void __init copy_devtree(void);
+extern const struct machine_desc *setup_machine_fdt(void *dt);
#endif
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
index c2663b32866b..8c84ae98c337 100644
--- a/arch/arc/include/asm/mmu.h
+++ b/arch/arc/include/asm/mmu.h
@@ -48,7 +48,7 @@
#ifndef __ASSEMBLY__
typedef struct {
- unsigned long asid; /* 8 bit MMU PID + Generation cycle */
+ unsigned long asid[NR_CPUS]; /* 8 bit MMU PID + Generation cycle */
} mm_context_t;
#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index 43a1b51bb8cc..1fd467ef658f 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -30,13 +30,13 @@
* "Fast Context Switch" i.e. no TLB flush on ctxt-switch
*
* Linux assigns each task a unique ASID. A simple round-robin allocation
- * of H/w ASID is done using software tracker @asid_cache.
+ * of H/w ASID is done using software tracker @asid_cpu.
* When it reaches max 255, the allocation cycle starts afresh by flushing
* the entire TLB and wrapping ASID back to zero.
*
* A new allocation cycle, post rollover, could potentially reassign an ASID
* to a different task. Thus the rule is to refresh the ASID in a new cycle.
- * The 32 bit @asid_cache (and mm->asid) have 8 bits MMU PID and rest 24 bits
+ * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits
* serve as cycle/generation indicator and natural 32 bit unsigned math
* automagically increments the generation when lower 8 bits rollover.
*/
@@ -47,9 +47,11 @@
#define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1)
#define MM_CTXT_NO_ASID 0UL
-#define hw_pid(mm) (mm->context.asid & MM_CTXT_ASID_MASK)
+#define asid_mm(mm, cpu) mm->context.asid[cpu]
+#define hw_pid(mm, cpu) (asid_mm(mm, cpu) & MM_CTXT_ASID_MASK)
-extern unsigned int asid_cache;
+DECLARE_PER_CPU(unsigned int, asid_cache);
+#define asid_cpu(cpu) per_cpu(asid_cache, cpu)
/*
* Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle)
@@ -57,6 +59,7 @@ extern unsigned int asid_cache;
*/
static inline void get_new_mmu_context(struct mm_struct *mm)
{
+ const unsigned int cpu = smp_processor_id();
unsigned long flags;
local_irq_save(flags);
@@ -71,28 +74,28 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
* first need to destroy the context, setting it to invalid
* value.
*/
- if (!((mm->context.asid ^ asid_cache) & MM_CTXT_CYCLE_MASK))
+ if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK))
goto set_hw;
/* move to new ASID and handle rollover */
- if (unlikely(!(++asid_cache & MM_CTXT_ASID_MASK))) {
+ if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) {
- flush_tlb_all();
+ local_flush_tlb_all();
/*
* Above checke for rollover of 8 bit ASID in 32 bit container.
* If the container itself wrapped around, set it to a non zero
* "generation" to distinguish from no context
*/
- if (!asid_cache)
- asid_cache = MM_CTXT_FIRST_CYCLE;
+ if (!asid_cpu(cpu))
+ asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE;
}
/* Assign new ASID to tsk */
- mm->context.asid = asid_cache;
+ asid_mm(mm, cpu) = asid_cpu(cpu);
set_hw:
- write_aux_reg(ARC_REG_PID, hw_pid(mm) | MMU_ENABLE);
+ write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE);
local_irq_restore(flags);
}
@@ -104,16 +107,45 @@ set_hw:
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
- mm->context.asid = MM_CTXT_NO_ASID;
+ int i;
+
+ for_each_possible_cpu(i)
+ asid_mm(mm, i) = MM_CTXT_NO_ASID;
+
return 0;
}
+static inline void destroy_context(struct mm_struct *mm)
+{
+ unsigned long flags;
+
+ /* Needed to elide CONFIG_DEBUG_PREEMPT warning */
+ local_irq_save(flags);
+ asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID;
+ local_irq_restore(flags);
+}
+
/* Prepare the MMU for task: setup PID reg with allocated ASID
If task doesn't have an ASID (never alloc or stolen, get a new ASID)
*/
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
+ const int cpu = smp_processor_id();
+
+ /*
+ * Note that the mm_cpumask is "aggregating" only, we don't clear it
+ * for the switched-out task, unlike some other arches.
+ * It is used to enlist cpus for sending TLB flush IPIs and not sending
+ * it to CPUs where a task once ran-on, could cause stale TLB entry
+ * re-use, specially for a multi-threaded task.
+ * e.g. T1 runs on C1, migrates to C3. T2 running on C2 munmaps.
+ * For a non-aggregating mm_cpumask, IPI not sent C1, and if T1
+ * were to re-migrate to C1, it could access the unmapped region
+ * via any existing stale TLB entries.
+ */
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
#ifndef CONFIG_SMP
/* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
@@ -131,11 +163,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
*/
#define activate_mm(prev, next) switch_mm(prev, next, NULL)
-static inline void destroy_context(struct mm_struct *mm)
-{
- mm->context.asid = MM_CTXT_NO_ASID;
-}
-
/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
* for retiring-mm. However destroy_context( ) still needs to do that because
* between mm_release( ) = >deactive_mm( ) and
diff --git a/arch/arc/include/asm/prom.h b/arch/arc/include/asm/prom.h
deleted file mode 100644
index 692d0d0789a7..000000000000
--- a/arch/arc/include/asm/prom.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_ARC_PROM_H_
-#define _ASM_ARC_PROM_H_
-
-#define HAVE_ARCH_DEVTREE_FIXUPS
-
-#endif
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index 229e50681497..e10f8cef56a8 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -31,7 +31,7 @@ struct cpuinfo_data {
extern int root_mountflags, end_mem;
extern int running_on_hw;
-void __init setup_processor(void);
+void setup_processor(void);
void __init setup_arch_memory(void);
#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index c4fb211dcd25..eefc29f08cdb 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -30,7 +30,7 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
* APIs provided by arch SMP code to rest of arch code
*/
extern void __init smp_init_cpus(void);
-extern void __init first_lines_of_secondary(void);
+extern void first_lines_of_secondary(void);
extern const char *arc_platform_smp_cpuinfo(void);
/*
diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h
index b2f9bc7f68c8..71c7b2e4b874 100644
--- a/arch/arc/include/asm/tlbflush.h
+++ b/arch/arc/include/asm/tlbflush.h
@@ -18,11 +18,18 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
-/* XXX: Revisit for SMP */
+#ifndef CONFIG_SMP
#define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e)
#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
#define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e)
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
-
+#else
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+#endif /* CONFIG_SMP */
#endif
diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h
index 60702f3751d2..3e5f071bc00c 100644
--- a/arch/arc/include/asm/unaligned.h
+++ b/arch/arc/include/asm/unaligned.h
@@ -22,7 +22,8 @@ static inline int
misaligned_fixup(unsigned long address, struct pt_regs *regs,
struct callee_regs *cregs)
{
- return 0;
+ /* Not fixed */
+ return 1;
}
#endif
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c
index 34410eb1a308..c14a5bea0c76 100644
--- a/arch/arc/kernel/ctx_sw.c
+++ b/arch/arc/kernel/ctx_sw.c
@@ -17,6 +17,8 @@
#include <asm/asm-offsets.h>
#include <linux/sched.h>
+#define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4)
+
struct task_struct *__sched
__switch_to(struct task_struct *prev_task, struct task_struct *next_task)
{
@@ -45,7 +47,16 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task)
#endif
/* set ksp of outgoing task in tsk->thread.ksp */
+#if KSP_WORD_OFF <= 255
"st.as sp, [%3, %1] \n\t"
+#else
+ /*
+ * Workaround for NR_CPUS=4k
+ * %1 is bigger than 255 (S9 offset for st.as)
+ */
+ "add2 r24, %3, %1 \n\t"
+ "st sp, [r24] \n\t"
+#endif
"sync \n\t"
@@ -97,7 +108,7 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task)
/* FP/BLINK restore generated by gcc (standard func epilogue */
: "=r"(tmp)
- : "n"((TASK_THREAD + THREAD_KSP) / 4), "r"(next), "r"(prev)
+ : "n"(KSP_WORD_OFF), "r"(next), "r"(prev)
: "blink"
);
diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S
index d8972345e4c2..65690e7fcc8c 100644
--- a/arch/arc/kernel/ctx_sw_asm.S
+++ b/arch/arc/kernel/ctx_sw_asm.S
@@ -14,6 +14,8 @@
#include <asm/asm-offsets.h>
#include <asm/linkage.h>
+#define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4)
+
;################### Low Level Context Switch ##########################
.section .sched.text,"ax",@progbits
@@ -28,8 +30,13 @@ __switch_to:
SAVE_CALLEE_SAVED_KERNEL
/* Save the now KSP in task->thread.ksp */
- st.as sp, [r0, (TASK_THREAD + THREAD_KSP)/4]
-
+#if KSP_WORD_OFF <= 255
+ st.as sp, [r0, KSP_WORD_OFF]
+#else
+ /* Workaround for NR_CPUS=4k as ST.as can only take s9 offset */
+ add2 r24, r0, KSP_WORD_OFF
+ st sp, [r24]
+#endif
/*
* Return last task in r0 (return reg)
* On ARC, Return reg = First Arg reg = r0.
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
index 2340af0e1d6f..b6dc4e21fd32 100644
--- a/arch/arc/kernel/devtree.c
+++ b/arch/arc/kernel/devtree.c
@@ -14,10 +14,22 @@
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
-#include <asm/prom.h>
#include <asm/clk.h>
#include <asm/mach_desc.h>
+static const void * __init arch_get_next_mach(const char *const **match)
+{
+ static const struct machine_desc *mdesc = __arch_info_begin;
+ const struct machine_desc *m = mdesc;
+
+ if (m >= __arch_info_end)
+ return NULL;
+
+ mdesc++;
+ *match = m->dt_compat;
+ return m;
+}
+
/**
* setup_machine_fdt - Machine setup when an dtb was passed to the kernel
* @dt: virtual address pointer to dt blob
@@ -25,93 +37,24 @@
* If a dtb was passed to the kernel, then use it to choose the correct
* machine_desc and to setup the system.
*/
-struct machine_desc * __init setup_machine_fdt(void *dt)
+const struct machine_desc * __init setup_machine_fdt(void *dt)
{
- struct boot_param_header *devtree = dt;
- struct machine_desc *mdesc = NULL, *mdesc_best = NULL;
- unsigned int score, mdesc_score = ~1;
+ const struct machine_desc *mdesc;
unsigned long dt_root;
- const char *model, *compat;
void *clk;
- char manufacturer[16];
unsigned long len;
- /* check device tree validity */
- if (be32_to_cpu(devtree->magic) != OF_DT_HEADER)
+ if (!early_init_dt_scan(dt))
return NULL;
- initial_boot_params = devtree;
- dt_root = of_get_flat_dt_root();
-
- /*
- * The kernel could be multi-platform enabled, thus could have many
- * "baked-in" machine descriptors. Search thru all for the best
- * "compatible" string match.
- */
- for_each_machine_desc(mdesc) {
- score = of_flat_dt_match(dt_root, mdesc->dt_compat);
- if (score > 0 && score < mdesc_score) {
- mdesc_best = mdesc;
- mdesc_score = score;
- }
- }
- if (!mdesc_best) {
- const char *prop;
- long size;
-
- pr_err("\n unrecognized device tree list:\n[ ");
-
- prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
- if (prop) {
- while (size > 0) {
- printk("'%s' ", prop);
- size -= strlen(prop) + 1;
- prop += strlen(prop) + 1;
- }
- }
- printk("]\n\n");
-
+ mdesc = of_flat_dt_match_machine(NULL, arch_get_next_mach);
+ if (!mdesc)
machine_halt();
- }
-
- /* compat = "<manufacturer>,<model>" */
- compat = mdesc_best->dt_compat[0];
-
- model = strchr(compat, ',');
- if (model)
- model++;
-
- strlcpy(manufacturer, compat, model ? model - compat : strlen(compat));
-
- pr_info("Board \"%s\" from %s (Manufacturer)\n", model, manufacturer);
-
- /* Retrieve various information from the /chosen node */
- of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
-
- /* Initialize {size,address}-cells info */
- of_scan_flat_dt(early_init_dt_scan_root, NULL);
-
- /* Setup memory, calling early_init_dt_add_memory_arch */
- of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+ dt_root = of_get_flat_dt_root();
clk = of_get_flat_dt_prop(dt_root, "clock-frequency", &len);
if (clk)
arc_set_core_freq(of_read_ulong(clk, len/4));
- return mdesc_best;
-}
-
-/*
- * Copy the flattened DT out of .init since unflattening doesn't copy strings
- * and the normal DT APIs refs them from orig flat DT
- */
-void __init copy_devtree(void)
-{
- void *alloc = early_init_dt_alloc_memory_arch(
- be32_to_cpu(initial_boot_params->totalsize), 64);
- if (alloc) {
- memcpy(alloc, initial_boot_params,
- be32_to_cpu(initial_boot_params->totalsize));
- initial_boot_params = alloc;
- }
+ return mdesc;
}
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index b908dde8a331..47d09d07f093 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -250,6 +250,14 @@ ARC_ENTRY handle_interrupt_level1
lr r0, [icause1]
and r0, r0, 0x1f
+#ifdef CONFIG_TRACE_IRQFLAGS
+ ; icause1 needs to be read early, before calling tracing, which
+ ; can clobber scratch regs, hence use of stack to stash it
+ push r0
+ TRACE_ASM_IRQ_DISABLE
+ pop r0
+#endif
+
bl.d @arch_do_IRQ
mov r1, sp
@@ -337,9 +345,9 @@ ARC_ENTRY EV_TLBProtV
; vineetg: Mar 6th: Random Seg Fault issue #1
; ecr and efa were not saved in case an Intr sneaks in
; after fake rtie
- ;
+
lr r2, [ecr]
- lr r1, [efa] ; Faulting Data address
+ lr r0, [efa] ; Faulting Data address
; --------(4) Return from CPU Exception Mode ---------
; Fake a rtie, but rtie to next label
@@ -348,6 +356,8 @@ ARC_ENTRY EV_TLBProtV
FAKE_RET_FROM_EXCPN r9
+ mov r1, sp
+
;------ (5) Type of Protection Violation? ----------
;
; ProtV Hardware Exception is triggered for Access Faults of 2 types
@@ -358,16 +368,12 @@ ARC_ENTRY EV_TLBProtV
bbit1 r2, ECR_C_BIT_PROTV_MISALIG_DATA, 4f
;========= (6a) Access Violation Processing ========
- mov r0, sp ; pt_regs
bl do_page_fault
b ret_from_exception
;========== (6b) Non aligned access ============
4:
- mov r0, r1
- mov r1, sp ; pt_regs
-#ifdef CONFIG_ARC_MISALIGN_ACCESS
SAVE_CALLEE_SAVED_USER
mov r2, sp ; callee_regs
@@ -376,9 +382,6 @@ ARC_ENTRY EV_TLBProtV
; TBD: optimize - do this only if a callee reg was involved
; either a dst of emulated LD/ST or src with address-writeback
RESTORE_CALLEE_SAVED_USER
-#else
- bl do_misaligned_error
-#endif
b ret_from_exception
@@ -575,6 +578,7 @@ resume_user_mode_begin:
; --- (Slow Path #2) pending signal ---
mov r0, sp ; pt_regs for arg to do_signal()/do_notify_resume()
+ GET_CURR_THR_INFO_FLAGS r9
bbit0 r9, TIF_SIGPENDING, .Lchk_notify_resume
; Normal Trap/IRQ entry only saves Scratch (caller-saved) regs
@@ -640,6 +644,8 @@ resume_kernel_mode:
restore_regs :
+ TRACE_ASM_IRQ_ENABLE
+
lr r10, [status32]
; Restore REG File. In case multiple Events outstanding,
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 0f944f024513..2c878e964a64 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -95,7 +95,7 @@ stext:
;----------------------------------------------------------------
; First lines of code run by secondary before jumping to 'C'
;----------------------------------------------------------------
- .section .init.text, "ax",@progbits
+ .section .text, "ax",@progbits
.type first_lines_of_secondary, @function
.globl first_lines_of_secondary
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
index 5fc92455da36..a4b141ee9a6a 100644
--- a/arch/arc/kernel/irq.c
+++ b/arch/arc/kernel/irq.c
@@ -39,10 +39,14 @@ void arc_init_IRQ(void)
level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;
- if (level_mask) {
+ /*
+ * Write to register, even if no LV2 IRQs configured to reset it
+ * in case bootloader had mucked with it
+ */
+ write_aux_reg(AUX_IRQ_LEV, level_mask);
+
+ if (level_mask)
pr_info("Level-2 interrupts bitset %x\n", level_mask);
- write_aux_reg(AUX_IRQ_LEV, level_mask);
- }
}
/*
@@ -146,7 +150,7 @@ void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
set_irq_regs(old_regs);
}
-int __init get_hw_config_num_irq(void)
+int get_hw_config_num_irq(void)
{
uint32_t val = read_aux_reg(ARC_REG_VECBASE_BCR);
diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
index a7698fb14818..a2ff5c5d1450 100644
--- a/arch/arc/kernel/kgdb.c
+++ b/arch/arc/kernel/kgdb.c
@@ -196,6 +196,18 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
instruction_pointer(regs) = ip;
}
+static void kgdb_call_nmi_hook(void *ignored)
+{
+ kgdb_nmicallback(raw_smp_processor_id(), NULL);
+}
+
+void kgdb_roundup_cpus(unsigned long flags)
+{
+ local_irq_enable();
+ smp_call_function(kgdb_call_nmi_hook, NULL, 0);
+ local_irq_disable();
+}
+
struct kgdb_arch arch_kgdb_ops = {
/* breakpoint instruction: TRAP_S 0x3 */
#ifdef CONFIG_CPU_BIG_ENDIAN
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
index 72f97822784a..42b05046fad9 100644
--- a/arch/arc/kernel/kprobes.c
+++ b/arch/arc/kernel/kprobes.c
@@ -87,13 +87,13 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
}
static inline void __kprobes set_current_kprobe(struct kprobe *p)
{
- __get_cpu_var(current_kprobe) = p;
+ __this_cpu_write(current_kprobe, p);
}
static void __kprobes resume_execution(struct kprobe *p, unsigned long addr,
@@ -237,7 +237,7 @@ int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
return 1;
} else if (kprobe_running()) {
- p = __get_cpu_var(current_kprobe);
+ p = __this_cpu_read(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
setup_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
@@ -327,7 +327,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
*/
/* We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accouting
+ * we can also use npre/npostfault count for accounting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(cur);
diff --git a/arch/arc/kernel/reset.c b/arch/arc/kernel/reset.c
index e227a2b1c943..2768fa1e39b9 100644
--- a/arch/arc/kernel/reset.c
+++ b/arch/arc/kernel/reset.c
@@ -31,3 +31,4 @@ void machine_power_off(void)
}
void (*pm_power_off) (void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 2c68bc7e6a78..643eae4436e0 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -21,7 +21,6 @@
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/irq.h>
-#include <asm/prom.h>
#include <asm/unwind.h>
#include <asm/clk.h>
#include <asm/mach_desc.h>
@@ -31,14 +30,13 @@
int running_on_hw = 1; /* vs. on ISS */
char __initdata command_line[COMMAND_LINE_SIZE];
-struct machine_desc *machine_desc;
+const struct machine_desc *machine_desc;
struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
-
-void read_arc_build_cfg_regs(void)
+static void read_arc_build_cfg_regs(void)
{
struct bcr_perip uncached_space;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
@@ -106,7 +104,7 @@ static const struct cpuinfo_data arc_cpu_tbl[] = {
{ {0x00, NULL } }
};
-char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
+static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
{
int n = 0;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
@@ -171,7 +169,7 @@ static const struct id_to_str mac_mul_nm[] = {
{0x6, "Dual 16x16 and 32x16"}
};
-char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
+static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
{
int n = 0;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
@@ -234,7 +232,7 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
return buf;
}
-void arc_chk_ccms(void)
+static void arc_chk_ccms(void)
{
#if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM)
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
@@ -269,7 +267,7 @@ void arc_chk_ccms(void)
* hardware has dedicated regs which need to be saved/restored on ctx-sw
* (Single Precision uses core regs), thus kernel is kind of oblivious to it
*/
-void arc_chk_fpu(void)
+static void arc_chk_fpu(void)
{
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
@@ -346,8 +344,7 @@ void __init setup_arch(char **cmdline_p)
setup_arch_memory();
/* copy flat DT out of .init and then unflatten it */
- copy_devtree();
- unflatten_device_tree();
+ unflatten_and_copy_device_tree();
/* Can be issue if someone passes cmd line arg "ro"
* But that is unlikely so keeping it as it is
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index bca3052c956d..c2f9ebbc38f6 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -95,7 +95,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
* If it turns out to be elaborate, it's better to code it in assembly
*
*/
-void __attribute__((weak)) arc_platform_smp_wait_to_boot(int cpu)
+void __weak arc_platform_smp_wait_to_boot(int cpu)
{
/*
* As a hack for debugging - since debugger will single-step over the
@@ -128,6 +128,7 @@ void start_kernel_secondary(void)
atomic_inc(&mm->mm_users);
atomic_inc(&mm->mm_count);
current->active_mm = mm;
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
@@ -210,7 +211,6 @@ enum ipi_msg_type {
IPI_NOP = 0,
IPI_RESCHEDULE = 1,
IPI_CALL_FUNC,
- IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP
};
@@ -254,7 +254,7 @@ void smp_send_stop(void)
void arch_send_call_function_single_ipi(int cpu)
{
- ipi_send_msg(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+ ipi_send_msg(cpumask_of(cpu), IPI_CALL_FUNC);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
@@ -286,10 +286,6 @@ static inline void __do_IPI(unsigned long *ops, struct ipi_data *ipi, int cpu)
generic_smp_call_function_interrupt();
break;
- case IPI_CALL_FUNC_SINGLE:
- generic_smp_call_function_single_interrupt();
- break;
-
case IPI_CPU_STOP:
ipi_cpu_stop(cpu);
break;
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index f8b7d880304d..9ce47cfe2303 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -237,11 +237,14 @@ unsigned int get_wchan(struct task_struct *tsk)
*/
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
+ /* Assumes @tsk is sleeping so unwinds from __switch_to */
arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace);
}
void save_stack_trace(struct stack_trace *trace)
{
- arc_unwind_core(current, NULL, __collect_all, trace);
+ /* Pass NULL for task so it unwinds the current call frame */
+ arc_unwind_core(NULL, NULL, __collect_all, trace);
}
+EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 3fde7de3ea67..0a9b6b289c4f 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -206,7 +206,7 @@ static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
static irqreturn_t timer_irq_handler(int irq, void *dev_id)
{
- struct clock_event_device *clk = &__get_cpu_var(arc_clockevent_device);
+ struct clock_event_device *clk = this_cpu_ptr(&arc_clockevent_device);
arc_timer_event_ack(clk->mode == CLOCK_EVT_MODE_PERIODIC);
clk->event_handler(clk);
@@ -223,7 +223,7 @@ static struct irqaction arc_timer_irq = {
* Setup the local event timer for @cpu
* N.B. weak so that some exotic ARC SoCs can completely override it
*/
-void __attribute__((weak)) arc_local_timer_setup(unsigned int cpu)
+void __weak arc_local_timer_setup(unsigned int cpu)
{
struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu);
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
index e21692d2fdab..3eadfdabc322 100644
--- a/arch/arc/kernel/traps.c
+++ b/arch/arc/kernel/traps.c
@@ -84,19 +84,18 @@ DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", do_memory_error, BUS_ADRERR)
DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT)
DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN)
-#ifdef CONFIG_ARC_MISALIGN_ACCESS
/*
* Entry Point for Misaligned Data access Exception, for emulating in software
*/
int do_misaligned_access(unsigned long address, struct pt_regs *regs,
struct callee_regs *cregs)
{
+ /* If emulation not enabled, or failed, kill the task */
if (misaligned_fixup(address, regs, cregs) != 0)
return do_misaligned_error(address, regs);
return 0;
}
-#endif
/*
* Entry point for miscll errors such as Nested Exceptions
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index 5a1259cd948c..6b58c1de7577 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -182,7 +182,7 @@ void arc_cache_init(void)
#ifdef CONFIG_ARC_HAS_ICACHE
/* 1. Confirm some of I-cache params which Linux assumes */
- if (ic->line_len != ARC_ICACHE_LINE_LEN)
+ if (ic->line_len != L1_CACHE_BYTES)
panic("Cache H/W doesn't match kernel Config");
if (ic->ver != CONFIG_ARC_MMU_VER)
@@ -205,7 +205,7 @@ chk_dc:
return;
#ifdef CONFIG_ARC_HAS_DCACHE
- if (dc->line_len != ARC_DCACHE_LINE_LEN)
+ if (dc->line_len != L1_CACHE_BYTES)
panic("Cache H/W doesn't match kernel Config");
/* check for D-Cache aliasing */
@@ -240,6 +240,67 @@ chk_dc:
#define OP_INV 0x1
#define OP_FLUSH 0x2
#define OP_FLUSH_N_INV 0x3
+#define OP_INV_IC 0x4
+
+/*
+ * Common Helper for Line Operations on {I,D}-Cache
+ */
+static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
+ unsigned long sz, const int cacheop)
+{
+ unsigned int aux_cmd, aux_tag;
+ int num_lines;
+ const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
+
+ if (cacheop == OP_INV_IC) {
+ aux_cmd = ARC_REG_IC_IVIL;
+ aux_tag = ARC_REG_IC_PTAG;
+ }
+ else {
+ /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
+ aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
+ aux_tag = ARC_REG_DC_PTAG;
+ }
+
+ /* Ensure we properly floor/ceil the non-line aligned/sized requests
+ * and have @paddr - aligned to cache line and integral @num_lines.
+ * This however can be avoided for page sized since:
+ * -@paddr will be cache-line aligned already (being page aligned)
+ * -@sz will be integral multiple of line size (being page sized).
+ */
+ if (!full_page_op) {
+ sz += paddr & ~CACHE_LINE_MASK;
+ paddr &= CACHE_LINE_MASK;
+ vaddr &= CACHE_LINE_MASK;
+ }
+
+ num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
+
+#if (CONFIG_ARC_MMU_VER <= 2)
+ /* MMUv2 and before: paddr contains stuffed vaddrs bits */
+ paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
+#else
+ /* if V-P const for loop, PTAG can be written once outside loop */
+ if (full_page_op)
+ write_aux_reg(ARC_REG_DC_PTAG, paddr);
+#endif
+
+ while (num_lines-- > 0) {
+#if (CONFIG_ARC_MMU_VER > 2)
+ /* MMUv3, cache ops require paddr seperately */
+ if (!full_page_op) {
+ write_aux_reg(aux_tag, paddr);
+ paddr += L1_CACHE_BYTES;
+ }
+
+ write_aux_reg(aux_cmd, vaddr);
+ vaddr += L1_CACHE_BYTES;
+#else
+ write_aux_reg(aux, paddr);
+ paddr += L1_CACHE_BYTES;
+#endif
+ }
+}
#ifdef CONFIG_ARC_HAS_DCACHE
@@ -289,53 +350,6 @@ static inline void __dc_entire_op(const int cacheop)
write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
}
-/*
- * Per Line Operation on D-Cache
- * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
- * It's sole purpose is to help gcc generate ZOL
- * (aliasing VIPT dcache flushing needs both vaddr and paddr)
- */
-static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr,
- unsigned long sz, const int aux_reg)
-{
- int num_lines;
-
- /* Ensure we properly floor/ceil the non-line aligned/sized requests
- * and have @paddr - aligned to cache line and integral @num_lines.
- * This however can be avoided for page sized since:
- * -@paddr will be cache-line aligned already (being page aligned)
- * -@sz will be integral multiple of line size (being page sized).
- */
- if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
- sz += paddr & ~DCACHE_LINE_MASK;
- paddr &= DCACHE_LINE_MASK;
- vaddr &= DCACHE_LINE_MASK;
- }
-
- num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
-
-#if (CONFIG_ARC_MMU_VER <= 2)
- paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
-#endif
-
- while (num_lines-- > 0) {
-#if (CONFIG_ARC_MMU_VER > 2)
- /*
- * Just as for I$, in MMU v3, D$ ops also require
- * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops
- */
- write_aux_reg(ARC_REG_DC_PTAG, paddr);
-
- write_aux_reg(aux_reg, vaddr);
- vaddr += ARC_DCACHE_LINE_LEN;
-#else
- /* paddr contains stuffed vaddrs bits */
- write_aux_reg(aux_reg, paddr);
-#endif
- paddr += ARC_DCACHE_LINE_LEN;
- }
-}
-
/* For kernel mappings cache operation: index is same as paddr */
#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
@@ -346,7 +360,6 @@ static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
unsigned long sz, const int cacheop)
{
unsigned long flags, tmp = tmp;
- int aux;
local_irq_save(flags);
@@ -361,12 +374,7 @@ static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
}
- if (cacheop & OP_INV) /* Inv / flush-n-inv use same cmd reg */
- aux = ARC_REG_DC_IVDL;
- else
- aux = ARC_REG_DC_FLDL;
-
- __dc_line_loop(paddr, vaddr, sz, aux);
+ __cache_line_loop(paddr, vaddr, sz, cacheop);
if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
wait_for_flush();
@@ -438,42 +446,9 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
unsigned long sz)
{
unsigned long flags;
- int num_lines;
-
- /*
- * Ensure we properly floor/ceil the non-line aligned/sized requests:
- * However page sized flushes can be compile time optimised.
- * -@paddr will be cache-line aligned already (being page aligned)
- * -@sz will be integral multiple of line size (being page sized).
- */
- if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
- sz += paddr & ~ICACHE_LINE_MASK;
- paddr &= ICACHE_LINE_MASK;
- vaddr &= ICACHE_LINE_MASK;
- }
-
- num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
-
-#if (CONFIG_ARC_MMU_VER <= 2)
- /* bits 17:13 of vaddr go as bits 4:0 of paddr */
- paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
-#endif
local_irq_save(flags);
- while (num_lines-- > 0) {
-#if (CONFIG_ARC_MMU_VER > 2)
- /* tag comes from phy addr */
- write_aux_reg(ARC_REG_IC_PTAG, paddr);
-
- /* index bits come from vaddr */
- write_aux_reg(ARC_REG_IC_IVIL, vaddr);
- vaddr += ARC_ICACHE_LINE_LEN;
-#else
- /* paddr contains stuffed vaddrs bits */
- write_aux_reg(ARC_REG_IC_IVIL, paddr);
-#endif
- paddr += ARC_ICACHE_LINE_LEN;
- }
+ __cache_line_loop(paddr, vaddr, sz, OP_INV_IC);
local_irq_restore(flags);
}
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index d63f3de0cd5b..9c69552350c4 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -17,7 +17,7 @@
#include <asm/pgalloc.h>
#include <asm/mmu.h>
-static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
+static int handle_vmalloc_fault(unsigned long address)
{
/*
* Synchronize this task's top level page-table
@@ -27,7 +27,7 @@ static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
- pgd = pgd_offset_fast(mm, address);
+ pgd = pgd_offset_fast(current->active_mm, address);
pgd_k = pgd_offset_k(address);
if (!pgd_present(*pgd_k))
@@ -52,7 +52,7 @@ bad_area:
return 1;
}
-void do_page_fault(struct pt_regs *regs, unsigned long address)
+void do_page_fault(unsigned long address, struct pt_regs *regs)
{
struct vm_area_struct *vma = NULL;
struct task_struct *tsk = current;
@@ -72,7 +72,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address)
* nothing more.
*/
if (address >= VMALLOC_START && address <= VMALLOC_END) {
- ret = handle_vmalloc_fault(mm, address);
+ ret = handle_vmalloc_fault(address);
if (unlikely(ret))
goto bad_area_nosemaphore;
else
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 81279ec73a6a..55e0a85bea78 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -125,10 +125,3 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
free_reserved_area((void *)start, (void *)end, -1, "initrd");
}
#endif
-
-#ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
-{
- pr_err("%s(%llx, %llx)\n", __func__, start, end);
-}
-#endif /* CONFIG_OF_FLATTREE */
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 71cb26df4255..e1acf0ce5647 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -100,7 +100,7 @@
/* A copy of the ASID from the PID reg is kept in asid_cache */
-unsigned int asid_cache = MM_CTXT_FIRST_CYCLE;
+DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
/*
* Utility Routine to erase a J-TLB entry
@@ -274,6 +274,7 @@ noinline void local_flush_tlb_mm(struct mm_struct *mm)
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
+ const unsigned int cpu = smp_processor_id();
unsigned long flags;
/* If range @start to @end is more than 32 TLB entries deep,
@@ -297,9 +298,9 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
local_irq_save(flags);
- if (vma->vm_mm->context.asid != MM_CTXT_NO_ASID) {
+ if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
while (start < end) {
- tlb_entry_erase(start | hw_pid(vma->vm_mm));
+ tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
start += PAGE_SIZE;
}
}
@@ -346,6 +347,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
+ const unsigned int cpu = smp_processor_id();
unsigned long flags;
/* Note that it is critical that interrupts are DISABLED between
@@ -353,14 +355,87 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
*/
local_irq_save(flags);
- if (vma->vm_mm->context.asid != MM_CTXT_NO_ASID) {
- tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm));
+ if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
+ tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
utlb_invalidate();
}
local_irq_restore(flags);
}
+#ifdef CONFIG_SMP
+
+struct tlb_args {
+ struct vm_area_struct *ta_vma;
+ unsigned long ta_start;
+ unsigned long ta_end;
+};
+
+static inline void ipi_flush_tlb_page(void *arg)
+{
+ struct tlb_args *ta = arg;
+
+ local_flush_tlb_page(ta->ta_vma, ta->ta_start);
+}
+
+static inline void ipi_flush_tlb_range(void *arg)
+{
+ struct tlb_args *ta = arg;
+
+ local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
+}
+
+static inline void ipi_flush_tlb_kernel_range(void *arg)
+{
+ struct tlb_args *ta = (struct tlb_args *)arg;
+
+ local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
+}
+
+void flush_tlb_all(void)
+{
+ on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
+ mm, 1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+ struct tlb_args ta = {
+ .ta_vma = vma,
+ .ta_start = uaddr
+ };
+
+ on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct tlb_args ta = {
+ .ta_vma = vma,
+ .ta_start = start,
+ .ta_end = end
+ };
+
+ on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ struct tlb_args ta = {
+ .ta_start = start,
+ .ta_end = end
+ };
+
+ on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
+}
+#endif
+
/*
* Routine to create a TLB entry
*/
@@ -400,7 +475,7 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
local_irq_save(flags);
- tlb_paranoid_check(vma->vm_mm->context.asid, address);
+ tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address);
address &= PAGE_MASK;
@@ -610,9 +685,9 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
struct pt_regs *regs)
{
int set, way, n;
- unsigned int pd0[4], pd1[4]; /* assume max 4 ways */
unsigned long flags, is_valid;
struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+ unsigned int pd0[mmu->ways], pd1[mmu->ways];
local_irq_save(flags);
@@ -637,7 +712,7 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
continue;
/* Scan the set for duplicate ways: needs a nested loop */
- for (way = 0; way < mmu->ways; way++) {
+ for (way = 0; way < mmu->ways - 1; way++) {
if (!pd0[way])
continue;
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index cf7d7d9ad695..3fcfdb38d242 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -369,8 +369,8 @@ do_slow_path_pf:
EXCEPTION_PROLOGUE
; ------- setup args for Linux Page fault Hanlder ---------
- mov_s r0, sp
- lr r1, [efa]
+ mov_s r1, sp
+ lr r0, [efa]
; We don't want exceptions to be disabled while the fault is handled.
; Now that we have saved the context we return from exception hence
diff --git a/arch/arc/plat-tb10x/Kconfig b/arch/arc/plat-tb10x/Kconfig
index 1ab386bb5da8..6994c188dc88 100644
--- a/arch/arc/plat-tb10x/Kconfig
+++ b/arch/arc/plat-tb10x/Kconfig
@@ -20,8 +20,10 @@ menuconfig ARC_PLAT_TB10X
bool "Abilis TB10x"
select COMMON_CLK
select PINCTRL
+ select PINCTRL_TB10X
select PINMUX
select ARCH_REQUIRE_GPIOLIB
+ select GPIO_TB10X
select TB10X_IRQC
help
Support for platforms based on the TB10x home media gateway SOC by
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 1ad6fb6c094d..c3067f620800 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -5,6 +5,8 @@ config ARM
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
+ select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT if MMU
select CLONE_BACKWARDS
@@ -51,9 +53,12 @@ config ARM
select HAVE_MOD_ARCH_SPECIFIC if ARM_UNWIND
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_PERF_EVENTS
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16
+ select HAVE_VIRT_CPU_ACCOUNTING_GEN
select IRQ_FORCED_THREADING
select KTIME_SCALAR
select MODULES_USE_ELF_REL
@@ -260,6 +265,12 @@ config NEED_MACH_MEMORY_H
definitions for this platform. The need for mach/memory.h should
be avoided when possible.
+config NEED_MACH_TIMEX_H
+ bool
+ help
+ Select this when mach/timex.h is required to provide special
+ definitions for this platform. This should be avoided when possible.
+
config PHYS_OFFSET
hex "Physical address of main memory" if MMU
depends on !ARM_PATCH_PHYS_VIRT && !NEED_MACH_MEMORY_H
@@ -317,6 +328,7 @@ config ARCH_INTEGRATOR
select NEED_MACH_MEMORY_H
select PLAT_VERSATILE
select SPARSE_IRQ
+ select USE_OF
select VERSATILE_FPGA_IRQ
help
Support for ARM's Integrator platform.
@@ -358,10 +370,10 @@ config ARCH_AT91
bool "Atmel AT91"
select ARCH_REQUIRE_GPIOLIB
select CLKDEV_LOOKUP
- select HAVE_CLK
select IRQ_DOMAIN
select NEED_MACH_GPIO_H
select NEED_MACH_IO_H if PCCARD
+ select NEED_MACH_TIMEX_H
select PINCTRL
select PINCTRL_AT91 if USE_OF
help
@@ -372,7 +384,6 @@ config ARCH_CLPS711X
bool "Cirrus Logic CLPS711x/EP721x/EP731x-based"
select ARCH_REQUIRE_GPIOLIB
select AUTO_ZRELADDR
- select CLKDEV_LOOKUP
select CLKSRC_MMIO
select COMMON_CLK
select CPU_ARM720T
@@ -386,8 +397,9 @@ config ARCH_CLPS711X
config ARCH_GEMINI
bool "Cortina Systems Gemini"
select ARCH_REQUIRE_GPIOLIB
- select ARCH_USES_GETTIMEOFFSET
+ select CLKSRC_MMIO
select CPU_FA526
+ select GENERIC_CLOCKEVENTS
select NEED_MACH_GPIO_H
help
Support for the Cortina Systems Gemini family SoCs
@@ -406,6 +418,23 @@ config ARCH_EBSA110
Ethernet interface, two PCMCIA sockets, two serial ports and a
parallel port.
+config ARCH_EFM32
+ bool "Energy Micro efm32"
+ depends on !MMU
+ select ARCH_REQUIRE_GPIOLIB
+ select ARM_NVIC
+ select CLKSRC_MMIO
+ select CLKSRC_OF
+ select COMMON_CLK
+ select CPU_V7M
+ select GENERIC_CLOCKEVENTS
+ select NO_DMA
+ select NO_IOPORT
+ select SPARSE_IRQ
+ select USE_OF
+ help
+ Support for Energy Micro's (now Silicon Labs) efm32 processors.
+
config ARCH_EP93XX
bool "EP93xx-based"
select ARCH_HAS_HOLES_MEMORYMODEL
@@ -481,6 +510,7 @@ config ARCH_IXP4XX
bool "IXP4xx-based"
depends on MMU
select ARCH_HAS_DMA_SET_COHERENT_MASK
+ select ARCH_SUPPORTS_BIG_ENDIAN
select ARCH_REQUIRE_GPIOLIB
select CLKSRC_MMIO
select CPU_XSCALE
@@ -488,6 +518,7 @@ config ARCH_IXP4XX
select GENERIC_CLOCKEVENTS
select MIGHT_HAVE_PCI
select NEED_MACH_IO_H
+ select NEED_MACH_TIMEX_H
select USB_EHCI_BIG_ENDIAN_DESC
select USB_EHCI_BIG_ENDIAN_MMIO
help
@@ -573,6 +604,7 @@ config ARCH_KS8695
select CPU_ARM922T
select GENERIC_CLOCKEVENTS
select NEED_MACH_MEMORY_H
+ select NEED_MACH_TIMEX_H
help
Support for Micrel/Kendin KS8695 "Centaur" (ARM922T) based
System-on-Chip devices.
@@ -631,7 +663,6 @@ config ARCH_PXA
config ARCH_MSM
bool "Qualcomm MSM"
select ARCH_REQUIRE_GPIOLIB
- select CLKDEV_LOOKUP
select CLKSRC_OF if OF
select COMMON_CLK
select GENERIC_CLOCKEVENTS
@@ -649,7 +680,6 @@ config ARCH_SHMOBILE
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
- select HAVE_CLK
select HAVE_MACH_CLKDEV
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
@@ -692,7 +722,6 @@ config ARCH_SA1100
select GENERIC_CLOCKEVENTS
select HAVE_IDE
select ISA
- select NEED_MACH_GPIO_H
select NEED_MACH_MEMORY_H
select SPARSE_IRQ
help
@@ -706,7 +735,6 @@ config ARCH_S3C24XX
select CLKSRC_SAMSUNG_PWM
select GENERIC_CLOCKEVENTS
select GPIO_SAMSUNG
- select HAVE_CLK
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C2410_WATCHDOG if WATCHDOG
select HAVE_S3C_RTC if RTC_CLASS
@@ -727,21 +755,22 @@ config ARCH_S3C64XX
select ARM_VIC
select CLKDEV_LOOKUP
select CLKSRC_SAMSUNG_PWM
- select CPU_V6
+ select COMMON_CLK
+ select CPU_V6K
select GENERIC_CLOCKEVENTS
select GPIO_SAMSUNG
- select HAVE_CLK
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C2410_WATCHDOG if WATCHDOG
select HAVE_TCM
select NEED_MACH_GPIO_H
select NO_IOPORT
select PLAT_SAMSUNG
+ select PM_GENERIC_DOMAINS
select S3C_DEV_NAND
select S3C_GPIO_TRACK
select SAMSUNG_ATAGS
- select SAMSUNG_CLKSRC
select SAMSUNG_GPIOLIB_4BIT
+ select SAMSUNG_WAKEMASK
select SAMSUNG_WDT_RESET
select USB_ARCH_HAS_OHCI
help
@@ -754,7 +783,6 @@ config ARCH_S5P64X0
select CPU_V6
select GENERIC_CLOCKEVENTS
select GPIO_SAMSUNG
- select HAVE_CLK
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C2410_WATCHDOG if WATCHDOG
select HAVE_S3C_RTC if RTC_CLASS
@@ -773,7 +801,6 @@ config ARCH_S5PC100
select CPU_V7
select GENERIC_CLOCKEVENTS
select GPIO_SAMSUNG
- select HAVE_CLK
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C2410_WATCHDOG if WATCHDOG
select HAVE_S3C_RTC if RTC_CLASS
@@ -793,7 +820,6 @@ config ARCH_S5PV210
select CPU_V7
select GENERIC_CLOCKEVENTS
select GPIO_SAMSUNG
- select HAVE_CLK
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C2410_WATCHDOG if WATCHDOG
select HAVE_S3C_RTC if RTC_CLASS
@@ -810,11 +836,9 @@ config ARCH_EXYNOS
select ARCH_REQUIRE_GPIOLIB
select ARCH_SPARSEMEM_ENABLE
select ARM_GIC
- select CLKDEV_LOOKUP
select COMMON_CLK
select CPU_V7
select GENERIC_CLOCKEVENTS
- select HAVE_CLK
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C2410_WATCHDOG if WATCHDOG
select HAVE_S3C_RTC if RTC_CLASS
@@ -824,20 +848,6 @@ config ARCH_EXYNOS
help
Support for SAMSUNG's EXYNOS SoCs (EXYNOS4/5)
-config ARCH_SHARK
- bool "Shark"
- select ARCH_USES_GETTIMEOFFSET
- select CPU_SA110
- select ISA
- select ISA_DMA
- select NEED_MACH_MEMORY_H
- select PCI
- select VIRT_TO_BUS
- select ZONE_DMA
- help
- Support for the StrongARM based Digital DNARD machine, also known
- as "Shark" (<http://www.shark-linux.de/shark.html>).
-
config ARCH_DAVINCI
bool "TI DaVinci"
select ARCH_HAS_HOLES_MEMORYMODEL
@@ -847,7 +857,6 @@ config ARCH_DAVINCI
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_CHIP
select HAVE_IDE
- select NEED_MACH_GPIO_H
select TI_PRIV_EDMA
select USE_OF
select ZONE_DMA
@@ -865,7 +874,6 @@ config ARCH_OMAP1
select CLKSRC_MMIO
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_CHIP
- select HAVE_CLK
select HAVE_IDE
select IRQ_DOMAIN
select NEED_MACH_IO_H if PCCARD
@@ -1009,9 +1017,7 @@ source "arch/arm/mach-sti/Kconfig"
source "arch/arm/mach-s3c24xx/Kconfig"
-if ARCH_S3C64XX
source "arch/arm/mach-s3c64xx/Kconfig"
-endif
source "arch/arm/mach-s5p64x0/Kconfig"
@@ -1091,11 +1097,6 @@ config IWMMXT
Enable support for iWMMXt context switching at run time if
running on a CPU that supports it.
-config XSCALE_PMU
- bool
- depends on CPU_XSCALE
- default y
-
config MULTI_IRQ_HANDLER
bool
help
@@ -1431,12 +1432,6 @@ config PCI_NANOENGINE
config PCI_SYSCALL
def_bool PCI
-# Select the host bridge type
-config PCI_HOST_VIA82C505
- bool
- depends on PCI && ARCH_SHARK
- default y
-
config PCI_HOST_ITE8152
bool
depends on PCI && MACH_ARMCORE
@@ -1549,6 +1544,32 @@ config MCPM
for (multi-)cluster based systems, such as big.LITTLE based
systems.
+config BIG_LITTLE
+ bool "big.LITTLE support (Experimental)"
+ depends on CPU_V7 && SMP
+ select MCPM
+ help
+ This option enables support selections for the big.LITTLE
+ system architecture.
+
+config BL_SWITCHER
+ bool "big.LITTLE switcher support"
+ depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
+ select CPU_PM
+ select ARM_CPU_SUSPEND
+ help
+ The big.LITTLE "switcher" provides the core functionality to
+ transparently handle transition between a cluster of A15's
+ and a cluster of A7's in a big.LITTLE system.
+
+config BL_SWITCHER_DUMMY_IF
+ tristate "Simple big.LITTLE switcher user interface"
+ depends on BL_SWITCHER && DEBUG_KERNEL
+ help
+ This is a simple and dummy char dev interface to control
+ the big.LITTLE switcher core code. It is meant for
+ debugging purposes only.
+
choice
prompt "Memory split"
default VMSPLIT_3G
@@ -1803,7 +1824,7 @@ config FORCE_MAX_ZONEORDER
int "Maximum zone order" if ARCH_SHMOBILE
range 11 64 if ARCH_SHMOBILE
default "12" if SOC_AM33XX
- default "9" if SA1111
+ default "9" if SA1111 || ARCH_EFM32
default "11"
help
The kernel memory allocator divides physically contiguous memory
@@ -1872,6 +1893,12 @@ config CC_STACKPROTECTOR
neutralized via a kernel panic.
This feature requires gcc version 4.2 or above.
+config SWIOTLB
+ def_bool y
+
+config IOMMU_HELPER
+ def_bool SWIOTLB
+
config XEN_DOM0
def_bool y
depends on XEN
@@ -1882,6 +1909,7 @@ config XEN
depends on CPU_V7 && !CPU_V6
depends on !GENERIC_ATOMIC64
select ARM_PSCI
+ select SWIOTLB_XEN
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 9762c84b4198..df56c7e65a6d 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -255,6 +255,13 @@ choice
Say Y here if you want kernel low-level debugging support
on i.MX35.
+ config DEBUG_IMX50_UART
+ bool "i.MX50 Debug UART"
+ depends on SOC_IMX50
+ help
+ Say Y here if you want kernel low-level debugging support
+ on i.MX50.
+
config DEBUG_IMX51_UART
bool "i.MX51 Debug UART"
depends on SOC_IMX51
@@ -318,6 +325,7 @@ choice
config DEBUG_MSM_UART1
bool "Kernel low-level debugging messages via MSM UART1"
depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
+ select DEBUG_MSM_UART
help
Say Y here if you want the debug print routines to direct
their output to the first serial port on MSM devices.
@@ -325,6 +333,7 @@ choice
config DEBUG_MSM_UART2
bool "Kernel low-level debugging messages via MSM UART2"
depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
+ select DEBUG_MSM_UART
help
Say Y here if you want the debug print routines to direct
their output to the second serial port on MSM devices.
@@ -332,6 +341,7 @@ choice
config DEBUG_MSM_UART3
bool "Kernel low-level debugging messages via MSM UART3"
depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
+ select DEBUG_MSM_UART
help
Say Y here if you want the debug print routines to direct
their output to the third serial port on MSM devices.
@@ -340,6 +350,7 @@ choice
bool "Kernel low-level debugging messages via MSM 8660 UART"
depends on ARCH_MSM8X60
select MSM_HAS_DEBUG_UART_HS
+ select DEBUG_MSM_UART
help
Say Y here if you want the debug print routines to direct
their output to the serial port on MSM 8660 devices.
@@ -348,10 +359,20 @@ choice
bool "Kernel low-level debugging messages via MSM 8960 UART"
depends on ARCH_MSM8960
select MSM_HAS_DEBUG_UART_HS
+ select DEBUG_MSM_UART
help
Say Y here if you want the debug print routines to direct
their output to the serial port on MSM 8960 devices.
+ config DEBUG_MSM8974_UART
+ bool "Kernel low-level debugging messages via MSM 8974 UART"
+ depends on ARCH_MSM8974
+ select MSM_HAS_DEBUG_UART_HS
+ select DEBUG_MSM_UART
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the serial port on MSM 8974 devices.
+
config DEBUG_MVEBU_UART
bool "Kernel low-level debugging messages via MVEBU UART (old bootloaders)"
depends on ARCH_MVEBU
@@ -386,6 +407,13 @@ choice
when u-boot hands over to the kernel, the system
silently crashes, with no serial output at all.
+ config DEBUG_VF_UART
+ bool "Vybrid UART"
+ depends on SOC_VF610
+ help
+ Say Y here if you want kernel low-level debugging support
+ on Vybrid based platforms.
+
config DEBUG_NOMADIK_UART
bool "Kernel low-level debugging messages via NOMADIK UART"
depends on ARCH_NOMADIK
@@ -834,6 +862,20 @@ choice
options; the platform specific options are deprecated
and will be soon removed.
+ config DEBUG_LL_UART_EFM32
+ bool "Kernel low-level debugging via efm32 UART"
+ depends on ARCH_EFM32
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to an UART or USART port on efm32 based
+ machines. Use the following addresses for DEBUG_UART_PHYS:
+
+ 0x4000c000 | USART0
+ 0x4000c400 | USART1
+ 0x4000c800 | USART2
+ 0x4000e000 | UART0
+ 0x4000e400 | UART1
+
config DEBUG_LL_UART_PL01X
bool "Kernel low-level debugging via ARM Ltd PL01x Primecell UART"
help
@@ -862,6 +904,7 @@ config DEBUG_IMX_UART_PORT
DEBUG_IMX21_IMX27_UART || \
DEBUG_IMX31_UART || \
DEBUG_IMX35_UART || \
+ DEBUG_IMX50_UART || \
DEBUG_IMX51_UART || \
DEBUG_IMX53_UART || \
DEBUG_IMX6Q_UART || \
@@ -880,32 +923,35 @@ config DEBUG_STI_UART
bool
depends on ARCH_STI
+config DEBUG_MSM_UART
+ bool
+ depends on ARCH_MSM
+
config DEBUG_LL_INCLUDE
string
default "debug/8250.S" if DEBUG_LL_UART_8250 || DEBUG_UART_8250
default "debug/pl01x.S" if DEBUG_LL_UART_PL01X || DEBUG_UART_PL01X
default "debug/exynos.S" if DEBUG_EXYNOS_UART
+ default "debug/efm32.S" if DEBUG_LL_UART_EFM32
default "debug/icedcc.S" if DEBUG_ICEDCC
default "debug/imx.S" if DEBUG_IMX1_UART || \
DEBUG_IMX25_UART || \
DEBUG_IMX21_IMX27_UART || \
DEBUG_IMX31_UART || \
DEBUG_IMX35_UART || \
+ DEBUG_IMX50_UART || \
DEBUG_IMX51_UART || \
DEBUG_IMX53_UART ||\
DEBUG_IMX6Q_UART || \
DEBUG_IMX6SL_UART
- default "debug/msm.S" if DEBUG_MSM_UART1 || \
- DEBUG_MSM_UART2 || \
- DEBUG_MSM_UART3 || \
- DEBUG_MSM8660_UART || \
- DEBUG_MSM8960_UART
+ default "debug/msm.S" if DEBUG_MSM_UART
default "debug/omap2plus.S" if DEBUG_OMAP2PLUS_UART
default "debug/sirf.S" if DEBUG_SIRFPRIMA2_UART1 || DEBUG_SIRFMARCO_UART1
default "debug/sti.S" if DEBUG_STI_UART
default "debug/tegra.S" if DEBUG_TEGRA_UART
default "debug/ux500.S" if DEBUG_UX500_UART
default "debug/vexpress.S" if DEBUG_VEXPRESS_UART0_DETECT
+ default "debug/vf.S" if DEBUG_VF_UART
default "debug/vt8500.S" if DEBUG_VT8500_UART0
default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
default "mach/debug-macro.S"
@@ -951,6 +997,7 @@ config DEBUG_UART_PHYS
default 0x20064000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
default 0x20068000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
default 0x20201000 if DEBUG_BCM2835
+ default 0x4000e400 if DEBUG_LL_UART_EFM32
default 0x40090000 if ARCH_LPC32XX
default 0x40100000 if DEBUG_PXA_UART1
default 0x42000000 if ARCH_GEMINI
@@ -981,6 +1028,7 @@ config DEBUG_UART_PHYS
default 0xfff36000 if DEBUG_HIGHBANK_UART
default 0xfffff700 if ARCH_IOP33X
depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
+ DEBUG_LL_UART_EFM32 || \
DEBUG_UART_8250 || DEBUG_UART_PL01X
config DEBUG_UART_VIRT
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index db50b626be98..6da9abb30460 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -16,6 +16,7 @@ LDFLAGS :=
LDFLAGS_vmlinux :=-p --no-undefined -X
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
+LDFLAGS_MODULE += --be8
endif
OBJCOPYFLAGS :=-O binary -R .comment -S
@@ -152,6 +153,7 @@ machine-$(CONFIG_ARCH_CNS3XXX) += cns3xxx
machine-$(CONFIG_ARCH_DAVINCI) += davinci
machine-$(CONFIG_ARCH_DOVE) += dove
machine-$(CONFIG_ARCH_EBSA110) += ebsa110
+machine-$(CONFIG_ARCH_EFM32) += efm32
machine-$(CONFIG_ARCH_EP93XX) += ep93xx
machine-$(CONFIG_ARCH_EXYNOS) += exynos
machine-$(CONFIG_ARCH_GEMINI) += gemini
@@ -188,7 +190,6 @@ machine-$(CONFIG_ARCH_S5P64X0) += s5p64x0
machine-$(CONFIG_ARCH_S5PC100) += s5pc100
machine-$(CONFIG_ARCH_S5PV210) += s5pv210
machine-$(CONFIG_ARCH_SA1100) += sa1100
-machine-$(CONFIG_ARCH_SHARK) += shark
machine-$(CONFIG_ARCH_SHMOBILE) += shmobile
machine-$(CONFIG_ARCH_SHMOBILE_MULTI) += shmobile
machine-$(CONFIG_ARCH_SIRF) += prima2
diff --git a/arch/arm/arm-soc-for-next-contents.txt b/arch/arm/arm-soc-for-next-contents.txt
new file mode 100644
index 000000000000..482edc16ae9a
--- /dev/null
+++ b/arch/arm/arm-soc-for-next-contents.txt
@@ -0,0 +1,173 @@
+
+fixes-non-critical
+ omap/fixes-non-critical2
+ git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap tags/omap-for-v3.13/fixes-not-urgent-part2
+
+
+next/cleanup
+ shark/removal
+ git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git tags/del-shark-for-v3.13
+ cleanup/dt-clock
+ https://github.com/shesselba/linux-dove.git clk-of-init-v2_for-3.13
+ patch
+ ARM: drop explicit selection of HAVE_CLK and CLKDEV_LOOKUP
+ reneasas/initdata-cleanup
+ git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git tags/renesas-fixes5-for-v3.12
+ renesas/cleanup
+ git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git tags/renesas-cleanup-for-v3.13
+ patch
+ clk: nomadik: fix missing __init on nomadik_src_init
+ renesas/cleanup2
+ git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git tags/renesas-cleanup2-for-v3.13
+ patch
+ ARM: clps711x: Use linux/sched_clock.h
+ ARM: Remove temporary sched_clock.h header
+ qcom/cleanup
+ git://git.kernel.org/pub/scm/linux/kernel/git/davidb/linux-msm.git tags/msm-cleanup-for-3.13
+ tegra/cleanup
+ git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux.git tegra-for-3.13-cleanup
+ at91/cleanup
+ git://github.com/at91linux/linux-at91.git tags/at91-cleanup
+ samsung/cleanup
+ git://git.kernel.org/pub/scm/linux/kernel/git/kgene/linux-samsung.git tags/samsung-cleanup
+ omap/cleanup-cm
+ git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap tags/omap-for-v3.13/cm-scm-cleanup-signed
+ patch
+ ARM: gemini: convert to GENERIC_CLOCKEVENTS
+
+next/soc
+ samsung/s3c64xx-clk
+ http://git.kernel.org/pub/scm/linux/kernel/git/kgene/linux-samsung.git tags/samsung-clk-s3c64xx
+ patch
+ ARM: davinci: remove deprecated IRQF_DISABLED
+ renesas/soc
+ git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git tags/renesas-soc-for-v3.13
+ renesas/smp
+ git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git tags/renesas-smp-for-v3.13
+ renesas/soc2
+ git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git tags/renesas-soc2-for-v3.13
+ keystone/soc
+ git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git tags/keystone-soc-for-arm-soc
+ patch
+ ARM: keystone: fix PM domain initcall to be keystone only
+ omap/hwmod
+ git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap tags/omap-for-v3.13/hwmod-signed
+ omap/soc
+ git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap tags/omap-for-v3.13/soc-take2
+ davinci/soc
+ git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git tags/davinci-for-v3.13/soc-2 # rebased from -v3.12-rc5 to -rc3
+ integrator/soc
+ git fetch git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-integrator.git tags/integrator-for-v3.13-2
+ mvebu/soc
+ git fetch git://git.infradead.org/linux-mvebu.git tags/soc-3.13-2
+ highbank/soc
+ git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git tags/highbank-for-3.13
+ omap/hwmod2
+ git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap tags/omap-for-v3.13/am43xx-hwmod-signed
+ tegra/soc
+ git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git tegra-for-3.13-soc-v2
+ cleanup/dt-clock
+ (brought in as a dependency, part of next/cleanup)
+ sunxi/core
+ https://github.com/mripard/linux.git tags/sunxi-core-for-3.13
+ sunxi/defconfig
+ https://github.com/mripard/linux.git tags/sunxi-defconfig-for-3.13
+ imx/soc
+ git://git.linaro.org/people/shawnguo/linux-2.6.git tags/imx-soc-3.13
+ patches
+ ARM: vexpress: Enable platform-specific options in defconfig
+ ARM: vexpress: Make defconfig work again
+ bcm/soc
+ git://github.com/broadcom/bcm11351.git tags/bcm-for-3.13-soc2
+ patch
+ ARM: bcm281xx: Add ARCH_BCM_MOBILE to bcm config
+
+next/drivers
+ davinci/gpio
+ git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git tags/davinci-for-v3.13/gpio
+ mvebu/drivers
+ git://git.infradead.org/linux-mvebu.git tags/drivers-3.13
+ arm/drivers
+ git://git.kernel.org/pub/scm/linux/kernel/git/will/linux.git tags/arm-perf-3.13
+ mvebu/drivers2
+ git://git.infradead.org/linux-mvebu.git tags/drivers-3.13-2
+ samsung/s3c24xx0dma
+ git://git.kernel.org/pub/scm/linux/kernel/git/kgene/linux-samsung.git tags/s3c24xx-dma
+
+next/boards
+ renesas/boards
+ git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git renesas-boards-for-v3.13
+ renesas/defconfig
+ git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git tags/renesas-defconfig-for-v3.13
+ renesas/boards2
+ git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git tags/renesas-boards2-for-v3.13
+ omap/boards
+ git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap tags/omap-for-v3.13/board-signed
+ tegra/defconfig
+ git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux.git tegra-for-3.13-defconfig
+ rockchip/boards
+ git://git.kernel.org/pub/scm/linux/kernel/git/mmind/linux-rockchip.git tags/v3.13-rockchip-boards
+ # but moved MAINTAINERS patch to fixes
+
+next/dt
+ samsung/s3c64xx-dt
+ http://git.kernel.org/pub/scm/linux/kernel/git/kgene/linux-samsung.git tags/samsung-dt-s3c64xx
+ ux500/dt
+ git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git tags/ux500-dt-for-v3.13-2
+ patch "ARM: ux500: enable appended dtb in u8500_defconfig"
+ ux500/dt2
+ git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git tags/ux500-devicetree-2
+ renesas/dt
+ git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git tags/renesas-dt-for-v3.13
+ mvebu/dt
+ git://git.infradead.org/linux-mvebu.git tags/dt-3.13
+ mvebu/dt2
+ git://git.infradead.org/linux-mvebu.git tags/dt-3.13-2
+ renesas/dt2
+ git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git tags/renesas-dt2-for-v3.13
+ socfpga/dt
+ git://git.rocketboards.org/linux-socfpga-next.git tags/socfpga-dts-updates-for-v3.13
+ omap/pdata-quirks
+ git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap tags/omap-for-v3.13/quirk-signed
+ omap/dt
+ git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap tags/omap-for-v3.13/dt-signed
+ mvebu/dt3
+ git fetch git://git.infradead.org/linux-mvebu.git tags/dt-3.13-3
+ tegra/dt
+ git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux.git tegra-for-3.13-dt
+ at91/dt
+ git://github.com/at91linux/linux-at91.git tags/at91-dt
+ omap/dt2
+ git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap tags/omap-for-v3.13/board-removal-signed-take2
+ omap/pinctrl-fix
+ git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap tags/omap-for-v3.13/pinctrl-fix
+ sunxi/dt
+ https://github.com/mripard/linux.git tags/sunxi-dt-for-3.13
+ sunxi/fixes
+ https://github.com/mripard/linux.git tags/sunxi-fixes-for-3.13
+ ux500/dt3
+ git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git tags/ux500-dt-for-v3.13-3
+ samsung/dt
+ git://git.kernel.org/pub/scm/linux/kernel/git/kgene/linux-samsung.git tags/exynos-dt (all but last patch)
+ NOTE: Pulled the branch but dropped the topmost patch.
+ imx/dt
+ git://git.linaro.org/people/shawnguo/linux-2.6.git tags/imx-dt-3.13
+ mvevu/dt4
+ git://git.infradead.org/linux-mvebu.git tags/dt-3.13-4
+ omap/cpufreq
+ git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap tags/omap-for-v3.13/cpufreq-late
+ omap/dt3
+ git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap tags/omap-for-v3.13/dt-late
+ sirf/dt
+ git://git.kernel.org/pub/scm/linux/kernel/git/baohua/linux.git tags/sirf-dts-for-3.13
+ (Above branch cherry-picked back to 3.12-rc4 base)
+ patch
+ ARM: dts: use 'status' property for PCIe nodes
+ omap/dt-fixes
+ git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap tags/omap-for-v3.13/dt-fixes-for-merge-window
+ patch
+ ARM: tegra: fix Tegra114 IOMMU register address
+ bcm/dt
+ git://github.com/broadcom/bcm11351.git tags/bcm-for-3.13-dt (rebased locally for renaming / adding s-o-b)
+ mvebu/dt5
+ git://git.infradead.org/linux-mvebu.git tags/dt-3.13-5
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index ec2f8065f955..37e87d417d99 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -12,7 +12,7 @@
#
ifneq ($(MACHINE),)
-include $(srctree)/$(MACHINE)/Makefile.boot
+sinclude $(srctree)/$(MACHINE)/Makefile.boot
endif
# Note: the following conditions must always be true:
@@ -51,10 +51,19 @@ $(obj)/Image: vmlinux FORCE
$(obj)/compressed/vmlinux: $(obj)/Image FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed $@
+ifneq ($(CONFIG_AUTO_ZRELADDR)$(ZRELADDR),)
+
$(obj)/zImage: $(obj)/compressed/vmlinux FORCE
$(call if_changed,objcopy)
@$(kecho) ' Kernel: $@ is ready'
+else
+
+$(obj)/zImage: FORCE
+ @echo 'Either enable CONFIG_AUTO_ZRELADDR or provide zreladdr-y in Makefile.boot'
+ @false
+
+endif
endif
ifneq ($(LOADADDR),)
@@ -81,6 +90,8 @@ $(obj)/uImage: $(obj)/zImage FORCE
@$(kecho) ' Image $@ is ready'
$(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
+ @test -n "$(PARAMS_PHYS)" || \
+ (echo This machine does not support BOOTP; exit -1)
$(Q)$(MAKE) $(build)=$(obj)/bootp $@
@:
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 7ac1610252ba..e7190bb5998e 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -44,10 +44,6 @@ ifeq ($(CONFIG_ARCH_ACORN),y)
OBJS += ll_char_wr.o font.o
endif
-ifeq ($(CONFIG_ARCH_SHARK),y)
-OBJS += head-shark.o ofw-shark.o
-endif
-
ifeq ($(CONFIG_ARCH_SA1100),y)
OBJS += head-sa1100.o
endif
diff --git a/arch/arm/boot/compressed/head-shark.S b/arch/arm/boot/compressed/head-shark.S
deleted file mode 100644
index 92b56897ed64..000000000000
--- a/arch/arm/boot/compressed/head-shark.S
+++ /dev/null
@@ -1,140 +0,0 @@
-/* The head-file for the Shark
- * by Alexander Schulz
- *
- * Does the following:
- * - get the memory layout from firmware. This can only be done as long as the mmu
- * is still on.
- * - switch the mmu off, so we have physical addresses
- * - copy the kernel to 0x08508000. This is done to have a fixed address where the
- * C-parts (misc.c) are executed. This address must be known at compile-time,
- * but the load-address of the kernel depends on how much memory is installed.
- * - Jump to this location.
- * - Set r8 with 0, r7 with the architecture ID for head.S
- */
-
-#include <linux/linkage.h>
-
-#include <asm/assembler.h>
-
- .section ".start", "ax"
-
- .arch armv4
- b __beginning
-
-__ofw_data: .long 0 @ the number of memory blocks
- .space 128 @ (startaddr,size) ...
- .space 128 @ bootargs
- .align
-
-__beginning: mov r4, r0 @ save the entry to the firmware
-
- mov r0, #0xC0 @ disable irq and fiq
- mov r1, r0
- mrs r3, cpsr
- bic r2, r3, r0
- eor r2, r2, r1
- msr cpsr_c, r2
-
- mov r0, r4 @ get the Memory layout from firmware
- adr r1, __ofw_data
- add r2, r1, #4
- mov lr, pc
- b ofw_init
- mov r1, #0
-
- adr r2, __mmu_off @ calculate physical address
- sub r2, r2, #0xf0000000 @ openprom maps us at f000 virt, 0e50 phys
- adr r0, __ofw_data
- ldr r0, [r0, #4]
- add r2, r2, r0
- add r2, r2, #0x00500000
-
- mrc p15, 0, r3, c1, c0
- bic r3, r3, #0xC @ Write Buffer and DCache
- bic r3, r3, #0x1000 @ ICache
- mcr p15, 0, r3, c1, c0 @ disabled
-
- mov r0, #0
- mcr p15, 0, r0, c7, c7 @ flush I,D caches on v4
- mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
- mcr p15, 0, r0, c8, c7 @ flush I,D TLBs on v4
-
- bic r3, r3, #0x1 @ MMU
- mcr p15, 0, r3, c1, c0 @ disabled
-
- mov pc, r2
-
-__copy_target: .long 0x08507FFC
-__copy_end: .long 0x08607FFC
-
- .word _start
- .word __bss_start
-
- .align
-__temp_stack: .space 128
-
-__mmu_off:
- adr r0, __ofw_data @ read the 1. entry of the memory map
- ldr r0, [r0, #4]
- orr r0, r0, #0x00600000
- sub r0, r0, #4
-
- ldr r1, __copy_end
- ldr r3, __copy_target
-
-/* r0 = 0x0e600000 (current end of kernelcode)
- * r3 = 0x08508000 (where it should begin)
- * r1 = 0x08608000 (end of copying area, 1MB)
- * The kernel is compressed, so 1 MB should be enough.
- * copy the kernel to the beginning of physical memory
- * We start from the highest address, so we can copy
- * from 0x08500000 to 0x08508000 if we have only 8MB
- */
-
-/* As we get more 2.6-kernels it gets more and more
- * uncomfortable to be bound to kernel images of 1MB only.
- * So we add a loop here, to be able to copy some more.
- * Alexander Schulz 2005-07-17
- */
-
- mov r4, #3 @ How many megabytes to copy
-
-
-__MoveCode: sub r4, r4, #1
-
-__Copy: ldr r2, [r0], #-4
- str r2, [r1], #-4
- teq r1, r3
- bne __Copy
-
- /* The firmware maps us in blocks of 1 MB, the next block is
- _below_ the last one. So our decrementing source pointer
- ist right here, but the destination pointer must be increased
- by 2 MB */
- add r1, r1, #0x00200000
- add r3, r3, #0x00100000
-
- teq r4, #0
- bne __MoveCode
-
-
- /* and jump to it */
- adr r2, __go_on @ where we want to jump
- adr r0, __ofw_data @ read the 1. entry of the memory map
- ldr r0, [r0, #4]
- sub r2, r2, r0 @ we are mapped add 0e50 now, sub that (-0e00)
- sub r2, r2, #0x00500000 @ -0050
- ldr r0, __copy_target @ and add 0850 8000 instead
- add r0, r0, #4
- add r2, r2, r0
- mov pc, r2 @ and jump there
-
-__go_on:
- adr sp, __temp_stack
- add sp, sp, #128
- adr r0, __ofw_data
- mov lr, pc
- b create_params
-
- mov r8, #0
- mov r7, #15
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 75189f13cf54..066b03480b63 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -135,6 +135,7 @@ start:
.word _edata @ zImage end address
THUMB( .thumb )
1:
+ ARM_BE8( setend be ) @ go BE8 if compiled for BE8
mrs r9, cpsr
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install @ get into SVC mode, reversibly
@@ -699,9 +700,7 @@ __armv4_mmu_cache_on:
mrc p15, 0, r0, c1, c0, 0 @ read control reg
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
orr r0, r0, #0x0030
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r0, r0, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
bl __common_mmu_cache_on
mov r0, #0
mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
@@ -728,9 +727,7 @@ __armv7_mmu_cache_on:
orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
@ (needed for ARM1176)
#ifdef CONFIG_MMU
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r0, r0, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
orrne r0, r0, #1 @ MMU enabled
movne r1, #0xfffffffd @ domain 0 = client
diff --git a/arch/arm/boot/compressed/ofw-shark.c b/arch/arm/boot/compressed/ofw-shark.c
deleted file mode 100644
index 465c54b6b128..000000000000
--- a/arch/arm/boot/compressed/ofw-shark.c
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * linux/arch/arm/boot/compressed/ofw-shark.c
- *
- * by Alexander Schulz
- *
- * This file is used to get some basic information
- * about the memory layout of the shark we are running
- * on. Memory is usually divided in blocks a 8 MB.
- * And bootargs are copied from OpenFirmware.
- */
-
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <asm/setup.h>
-#include <asm/page.h>
-
-
-asmlinkage void
-create_params (unsigned long *buffer)
-{
- /* Is there a better address? Also change in mach-shark/core.c */
- struct tag *tag = (struct tag *) 0x08003000;
- int j,i,m,k,nr_banks,size;
- unsigned char *c;
-
- k = 0;
-
- /* Head of the taglist */
- tag->hdr.tag = ATAG_CORE;
- tag->hdr.size = tag_size(tag_core);
- tag->u.core.flags = 1;
- tag->u.core.pagesize = PAGE_SIZE;
- tag->u.core.rootdev = 0;
-
- /* Build up one tagged block for each memory region */
- size=0;
- nr_banks=(unsigned int) buffer[0];
- for (j=0;j<nr_banks;j++){
- /* search the lowest address and put it into the next entry */
- /* not a fast sort algorithm, but there are at most 8 entries */
- /* and this is used only once anyway */
- m=0xffffffff;
- for (i=0;i<(unsigned int) buffer[0];i++){
- if (buffer[2*i+1]<m) {
- m=buffer[2*i+1];
- k=i;
- }
- }
-
- tag = tag_next(tag);
- tag->hdr.tag = ATAG_MEM;
- tag->hdr.size = tag_size(tag_mem32);
- tag->u.mem.size = buffer[2*k+2];
- tag->u.mem.start = buffer[2*k+1];
-
- size += buffer[2*k+2];
-
- buffer[2*k+1]=0xffffffff; /* mark as copied */
- }
-
- /* The command line */
- tag = tag_next(tag);
- tag->hdr.tag = ATAG_CMDLINE;
-
- c=(unsigned char *)(&buffer[34]);
- j=0;
- while (*c) tag->u.cmdline.cmdline[j++]=*c++;
-
- tag->u.cmdline.cmdline[j]=0;
- tag->hdr.size = (j + 7 + sizeof(struct tag_header)) >> 2;
-
- /* Hardware revision */
- tag = tag_next(tag);
- tag->hdr.tag = ATAG_REVISION;
- tag->hdr.size = tag_size(tag_revision);
- tag->u.revision.rev = ((unsigned char) buffer[33])-'0';
-
- /* End of the taglist */
- tag = tag_next(tag);
- tag->hdr.tag = 0;
- tag->hdr.size = 0;
-}
-
-
-typedef int (*ofw_handle_t)(void *);
-
-/* Everything below is called with a wrong MMU setting.
- * This means: no string constants, no initialization of
- * arrays, no global variables! This is ugly but I didn't
- * want to write this in assembler :-)
- */
-
-int
-of_decode_int(const unsigned char *p)
-{
- unsigned int i = *p++ << 8;
- i = (i + *p++) << 8;
- i = (i + *p++) << 8;
- return (i + *p);
-}
-
-int
-OF_finddevice(ofw_handle_t openfirmware, char *name)
-{
- unsigned int args[8];
- char service[12];
-
- service[0]='f';
- service[1]='i';
- service[2]='n';
- service[3]='d';
- service[4]='d';
- service[5]='e';
- service[6]='v';
- service[7]='i';
- service[8]='c';
- service[9]='e';
- service[10]='\0';
-
- args[0]=(unsigned int)service;
- args[1]=1;
- args[2]=1;
- args[3]=(unsigned int)name;
-
- if (openfirmware(args) == -1)
- return -1;
- return args[4];
-}
-
-int
-OF_getproplen(ofw_handle_t openfirmware, int handle, char *prop)
-{
- unsigned int args[8];
- char service[12];
-
- service[0]='g';
- service[1]='e';
- service[2]='t';
- service[3]='p';
- service[4]='r';
- service[5]='o';
- service[6]='p';
- service[7]='l';
- service[8]='e';
- service[9]='n';
- service[10]='\0';
-
- args[0] = (unsigned int)service;
- args[1] = 2;
- args[2] = 1;
- args[3] = (unsigned int)handle;
- args[4] = (unsigned int)prop;
-
- if (openfirmware(args) == -1)
- return -1;
- return args[5];
-}
-
-int
-OF_getprop(ofw_handle_t openfirmware, int handle, char *prop, void *buf, unsigned int buflen)
-{
- unsigned int args[8];
- char service[8];
-
- service[0]='g';
- service[1]='e';
- service[2]='t';
- service[3]='p';
- service[4]='r';
- service[5]='o';
- service[6]='p';
- service[7]='\0';
-
- args[0] = (unsigned int)service;
- args[1] = 4;
- args[2] = 1;
- args[3] = (unsigned int)handle;
- args[4] = (unsigned int)prop;
- args[5] = (unsigned int)buf;
- args[6] = buflen;
-
- if (openfirmware(args) == -1)
- return -1;
- return args[7];
-}
-
-asmlinkage void ofw_init(ofw_handle_t o, int *nomr, int *pointer)
-{
- int phandle,i,mem_len,buffer[32];
- char temp[15];
-
- temp[0]='/';
- temp[1]='m';
- temp[2]='e';
- temp[3]='m';
- temp[4]='o';
- temp[5]='r';
- temp[6]='y';
- temp[7]='\0';
-
- phandle=OF_finddevice(o,temp);
-
- temp[0]='r';
- temp[1]='e';
- temp[2]='g';
- temp[3]='\0';
-
- mem_len = OF_getproplen(o,phandle, temp);
- OF_getprop(o,phandle, temp, buffer, mem_len);
- *nomr=mem_len >> 3;
-
- for (i=0; i<=mem_len/4; i++) pointer[i]=of_decode_int((const unsigned char *)&buffer[i]);
-
- temp[0]='/';
- temp[1]='c';
- temp[2]='h';
- temp[3]='o';
- temp[4]='s';
- temp[5]='e';
- temp[6]='n';
- temp[7]='\0';
-
- phandle=OF_finddevice(o,temp);
-
- temp[0]='b';
- temp[1]='o';
- temp[2]='o';
- temp[3]='t';
- temp[4]='a';
- temp[5]='r';
- temp[6]='g';
- temp[7]='s';
- temp[8]='\0';
-
- mem_len = OF_getproplen(o,phandle, temp);
- OF_getprop(o,phandle, temp, buffer, mem_len);
- if (mem_len > 128) mem_len=128;
- for (i=0; i<=mem_len/4; i++) pointer[i+33]=buffer[i];
- pointer[i+33]=0;
-
- temp[0]='/';
- temp[1]='\0';
- phandle=OF_finddevice(o,temp);
- temp[0]='b';
- temp[1]='a';
- temp[2]='n';
- temp[3]='n';
- temp[4]='e';
- temp[5]='r';
- temp[6]='-';
- temp[7]='n';
- temp[8]='a';
- temp[9]='m';
- temp[10]='e';
- temp[11]='\0';
- mem_len = OF_getproplen(o,phandle, temp);
- OF_getprop(o,phandle, temp, buffer, mem_len);
- * ((unsigned char *) &pointer[32]) = ((unsigned char *) buffer)[mem_len-2];
-}
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 802720e3e8fd..1c1fcf8dd296 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -40,17 +40,17 @@ dtb-$(CONFIG_ARCH_AT91) += sama5d31ek.dtb
dtb-$(CONFIG_ARCH_AT91) += sama5d33ek.dtb
dtb-$(CONFIG_ARCH_AT91) += sama5d34ek.dtb
dtb-$(CONFIG_ARCH_AT91) += sama5d35ek.dtb
-
dtb-$(CONFIG_ARCH_ATLAS6) += atlas6-evb.dtb
-
dtb-$(CONFIG_ARCH_BCM2835) += bcm2835-rpi-b.dtb
-dtb-$(CONFIG_ARCH_BCM) += bcm11351-brt.dtb \
+dtb-$(CONFIG_ARCH_BCM_MOBILE) += bcm11351-brt.dtb \
bcm28155-ap.dtb
+dtb-$(CONFIG_ARCH_BCM2835) += bcm2835-rpi-b.dtb
dtb-$(CONFIG_ARCH_DAVINCI) += da850-enbw-cmc.dtb \
da850-evm.dtb
dtb-$(CONFIG_ARCH_DOVE) += dove-cm-a510.dtb \
dove-cubox.dtb \
dove-d2plug.dtb \
+ dove-d3plug.dtb \
dove-dove-db.dtb
dtb-$(CONFIG_ARCH_EXYNOS) += exynos4210-origen.dtb \
exynos4210-smdkv310.dtb \
@@ -96,22 +96,25 @@ dtb-$(CONFIG_ARCH_KIRKWOOD) += kirkwood-cloudbox.dtb \
kirkwood-ns2mini.dtb \
kirkwood-nsa310.dtb \
kirkwood-nsa310a.dtb \
+ kirkwood-openblocks_a6.dtb \
+ kirkwood-openblocks_a7.dtb \
kirkwood-sheevaplug.dtb \
kirkwood-sheevaplug-esata.dtb \
kirkwood-topkick.dtb \
kirkwood-ts219-6281.dtb \
- kirkwood-ts219-6282.dtb \
- kirkwood-openblocks_a6.dtb
+ kirkwood-ts219-6282.dtb
dtb-$(CONFIG_ARCH_MARCO) += marco-evb.dtb
-dtb-$(CONFIG_ARCH_MSM) += msm8660-surf.dtb \
- msm8960-cdp.dtb
+dtb-$(CONFIG_ARCH_MSM) += qcom-msm8660-surf.dtb \
+ qcom-msm8960-cdp.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-370-db.dtb \
armada-370-mirabox.dtb \
armada-370-netgear-rn102.dtb \
+ armada-370-netgear-rn104.dtb \
armada-370-rd.dtb \
armada-xp-axpwifiap.dtb \
armada-xp-db.dtb \
armada-xp-gp.dtb \
+ armada-xp-matrix.dtb \
armada-xp-openblocks-ax3-4.dtb
dtb-$(CONFIG_ARCH_MXC) += \
imx25-karo-tx25.dtb \
@@ -124,26 +127,41 @@ dtb-$(CONFIG_ARCH_MXC) += \
imx27-phytec-phycard-s-som.dtb \
imx27-phytec-phycard-s-rdk.dtb \
imx31-bug.dtb \
+ imx50-evk.dtb \
imx51-apf51.dtb \
imx51-apf51dev.dtb \
imx51-babbage.dtb \
+ imx51-eukrea-mbimxsd51-baseboard.dtb \
imx53-ard.dtb \
imx53-evk.dtb \
imx53-m53evk.dtb \
imx53-mba53.dtb \
imx53-qsb.dtb \
imx53-smd.dtb \
+ imx53-voipac-bsb.dtb \
+ imx6dl-gw51xx.dtb \
+ imx6dl-gw52xx.dtb \
+ imx6dl-gw53xx.dtb \
+ imx6dl-gw54xx.dtb \
imx6dl-sabreauto.dtb \
imx6dl-sabresd.dtb \
imx6dl-wandboard.dtb \
imx6q-arm2.dtb \
+ imx6q-cm-fx6.dtb \
+ imx6q-gw51xx.dtb \
+ imx6q-gw52xx.dtb \
+ imx6q-gw53xx.dtb \
+ imx6q-gw5400-a.dtb \
+ imx6q-gw54xx.dtb \
imx6q-phytec-pbab01.dtb \
imx6q-sabreauto.dtb \
imx6q-sabrelite.dtb \
imx6q-sabresd.dtb \
imx6q-sbc6x.dtb \
+ imx6q-udoo.dtb \
imx6q-wandboard.dtb \
imx6sl-evk.dtb \
+ vf610-cosmic.dtb \
vf610-twr.dtb
dtb-$(CONFIG_ARCH_MXS) += imx23-evk.dtb \
imx23-olinuxino.dtb \
@@ -159,6 +177,7 @@ dtb-$(CONFIG_ARCH_MXS) += imx23-evk.dtb \
imx28-cfa10057.dtb \
imx28-cfa10058.dtb \
imx28-evk.dtb \
+ imx28-m28cu3.dtb \
imx28-m28evk.dtb \
imx28-sps1.dtb \
imx28-tx28.dtb
@@ -172,9 +191,15 @@ dtb-$(CONFIG_ARCH_OMAP2PLUS) += omap2420-h4.dtb \
omap3-devkit8000.dtb \
omap3-beagle-xm.dtb \
omap3-evm.dtb \
+ omap3-evm-37xx.dtb \
+ omap3-n900.dtb \
+ omap3-n9.dtb \
+ omap3-n950.dtb \
omap3-tobi.dtb \
+ omap3-gta04.dtb \
omap3-igep0020.dtb \
omap3-igep0030.dtb \
+ omap3-zoom3.dtb \
omap4-panda.dtb \
omap4-panda-a4.dtb \
omap4-panda-es.dtb \
@@ -186,25 +211,33 @@ dtb-$(CONFIG_ARCH_OMAP2PLUS) += omap2420-h4.dtb \
am335x-evmsk.dtb \
am335x-bone.dtb \
am335x-boneblack.dtb \
+ am335x-nano.dtb \
+ am335x-base0033.dtb \
am3517-evm.dtb \
am3517_mt_ventoux.dtb \
- am43x-epos-evm.dtb
+ am43x-epos-evm.dtb \
+ dra7-evm.dtb
dtb-$(CONFIG_ARCH_ORION5X) += orion5x-lacie-ethernet-disk-mini-v2.dtb
dtb-$(CONFIG_ARCH_PRIMA2) += prima2-evb.dtb
dtb-$(CONFIG_ARCH_U8500) += ste-snowball.dtb \
- ste-hrefprev60.dtb \
- ste-hrefv60plus.dtb \
+ ste-hrefprev60-stuib.dtb \
+ ste-hrefprev60-tvk.dtb \
+ ste-hrefv60plus-stuib.dtb \
+ ste-hrefv60plus-tvk.dtb \
ste-ccu8540.dtb \
ste-ccu9540.dtb
dtb-$(CONFIG_ARCH_S3C24XX) += s3c2416-smdk2416.dtb
+dtb-$(CONFIG_ARCH_S3C64XX) += s3c6410-mini6410.dtb \
+ s3c6410-smdk6410.dtb
dtb-$(CONFIG_ARCH_SHMOBILE) += emev2-kzm9d.dtb \
- emev2-kzm9d-reference.dtb \
+ r7s72100-genmai.dtb \
r8a7740-armadillo800eva.dtb \
r8a7778-bockw.dtb \
r8a7778-bockw-reference.dtb \
r8a7740-armadillo800eva-reference.dtb \
r8a7779-marzen.dtb \
r8a7779-marzen-reference.dtb \
+ r8a7791-koelsch.dtb \
r8a7790-lager.dtb \
r8a7790-lager-reference.dtb \
sh73a0-kzm9g.dtb \
@@ -212,8 +245,10 @@ dtb-$(CONFIG_ARCH_SHMOBILE) += emev2-kzm9d.dtb \
r8a73a4-ape6evm.dtb \
r8a73a4-ape6evm-reference.dtb \
sh7372-mackerel.dtb
-dtb-$(CONFIG_ARCH_SHMOBILE_MULTI) += emev2-kzm9d-reference.dtb
-dtb-$(CONFIG_ARCH_SOCFPGA) += socfpga_cyclone5.dtb \
+dtb-$(CONFIG_ARCH_SHMOBILE_MULTI) += emev2-kzm9d.dtb
+dtb-$(CONFIG_ARCH_SOCFPGA) += socfpga_arria5_socdk.dtb \
+ socfpga_cyclone5_socdk.dtb \
+ socfpga_cyclone5_sockit.dtb \
socfpga_vt.dtb
dtb-$(CONFIG_ARCH_SPEAR13XX) += spear1310-evb.dtb \
spear1340-evb.dtb
@@ -235,6 +270,7 @@ dtb-$(CONFIG_ARCH_SUNXI) += \
sun5i-a13-olinuxino.dtb \
sun6i-a31-colombus.dtb \
sun7i-a20-cubieboard2.dtb \
+ sun7i-a20-cubietruck.dtb \
sun7i-a20-olinuxino-micro.dtb
dtb-$(CONFIG_ARCH_TEGRA) += tegra20-harmony.dtb \
tegra20-iris-512.dtb \
@@ -249,7 +285,8 @@ dtb-$(CONFIG_ARCH_TEGRA) += tegra20-harmony.dtb \
tegra30-beaver.dtb \
tegra30-cardhu-a02.dtb \
tegra30-cardhu-a04.dtb \
- tegra114-dalmore.dtb
+ tegra114-dalmore.dtb \
+ tegra124-venice2.dtb
dtb-$(CONFIG_ARCH_VERSATILE) += versatile-ab.dtb \
versatile-pb.dtb
dtb-$(CONFIG_ARCH_U300) += ste-u300.dtb
diff --git a/arch/arm/boot/dts/am335x-base0033.dts b/arch/arm/boot/dts/am335x-base0033.dts
new file mode 100644
index 000000000000..b4f95c2bbf74
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-base0033.dts
@@ -0,0 +1,16 @@
+/*
+ * am335x-base0033.dts - Device Tree file for IGEP AQUILA EXPANSION
+ *
+ * Copyright (C) 2013 ISEE 2007 SL - http://www.isee.biz
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "am335x-igep0033.dtsi"
+
+/ {
+ model = "IGEP COM AM335x on AQUILA Expansion";
+ compatible = "isee,am335x-base0033", "isee,am335x-igep0033", "ti,am33xx";
+};
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
index 2f66deda9f5c..e3f27ec31718 100644
--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
@@ -21,177 +21,205 @@
reg = <0x80000000 0x10000000>; /* 256 MB */
};
- am33xx_pinmux: pinmux@44e10800 {
+ leds {
pinctrl-names = "default";
- pinctrl-0 = <&clkout2_pin>;
-
- user_leds_s0: user_leds_s0 {
- pinctrl-single,pins = <
- 0x54 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a5.gpio1_21 */
- 0x58 (PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a6.gpio1_22 */
- 0x5c (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a7.gpio1_23 */
- 0x60 (PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a8.gpio1_24 */
- >;
- };
+ pinctrl-0 = <&user_leds_s0>;
- i2c0_pins: pinmux_i2c0_pins {
- pinctrl-single,pins = <
- 0x188 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
- 0x18c (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
- >;
- };
+ compatible = "gpio-leds";
- uart0_pins: pinmux_uart0_pins {
- pinctrl-single,pins = <
- 0x170 (PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
- 0x174 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
- >;
+ led@2 {
+ label = "beaglebone:green:heartbeat";
+ gpios = <&gpio1 21 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ default-state = "off";
};
- clkout2_pin: pinmux_clkout2_pin {
- pinctrl-single,pins = <
- 0x1b4 (PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr1.clkout2 */
- >;
+ led@3 {
+ label = "beaglebone:green:mmc0";
+ gpios = <&gpio1 22 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc0";
+ default-state = "off";
};
- cpsw_default: cpsw_default {
- pinctrl-single,pins = <
- /* Slave 1 */
- 0x110 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxerr.mii1_rxerr */
- 0x114 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txen.mii1_txen */
- 0x118 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxdv.mii1_rxdv */
- 0x11c (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd3.mii1_txd3 */
- 0x120 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd2.mii1_txd2 */
- 0x124 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd1.mii1_txd1 */
- 0x128 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd0.mii1_txd0 */
- 0x12c (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_txclk.mii1_txclk */
- 0x130 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxclk.mii1_rxclk */
- 0x134 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd3.mii1_rxd3 */
- 0x138 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd2.mii1_rxd2 */
- 0x13c (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd1.mii1_rxd1 */
- 0x140 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd0.mii1_rxd0 */
- >;
+ led@4 {
+ label = "beaglebone:green:usr2";
+ gpios = <&gpio1 23 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "cpu0";
+ default-state = "off";
};
- cpsw_sleep: cpsw_sleep {
- pinctrl-single,pins = <
- /* Slave 1 reset value */
- 0x110 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x114 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x118 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x11c (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x120 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x124 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x128 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x12c (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x130 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x134 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x138 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x13c (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x140 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- >;
+ led@5 {
+ label = "beaglebone:green:usr3";
+ gpios = <&gpio1 24 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc1";
+ default-state = "off";
};
+ };
- davinci_mdio_default: davinci_mdio_default {
- pinctrl-single,pins = <
- /* MDIO */
- 0x148 (PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
- 0x14c (PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
- >;
- };
+ vmmcsd_fixed: fixedregulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vmmcsd_fixed";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+};
- davinci_mdio_sleep: davinci_mdio_sleep {
- pinctrl-single,pins = <
- /* MDIO reset value */
- 0x148 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x14c (PIN_INPUT_PULLDOWN | MUX_MODE7)
- >;
- };
+&am33xx_pinmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&clkout2_pin>;
+
+ user_leds_s0: user_leds_s0 {
+ pinctrl-single,pins = <
+ 0x54 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a5.gpio1_21 */
+ 0x58 (PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a6.gpio1_22 */
+ 0x5c (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a7.gpio1_23 */
+ 0x60 (PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a8.gpio1_24 */
+ >;
};
- ocp {
- uart0: serial@44e09000 {
- pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins>;
+ i2c0_pins: pinmux_i2c0_pins {
+ pinctrl-single,pins = <
+ 0x188 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
+ 0x18c (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ >;
+ };
- status = "okay";
- };
+ uart0_pins: pinmux_uart0_pins {
+ pinctrl-single,pins = <
+ 0x170 (PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
+ 0x174 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ >;
+ };
- musb: usb@47400000 {
- status = "okay";
+ clkout2_pin: pinmux_clkout2_pin {
+ pinctrl-single,pins = <
+ 0x1b4 (PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr1.clkout2 */
+ >;
+ };
- control@44e10000 {
- status = "okay";
- };
+ cpsw_default: cpsw_default {
+ pinctrl-single,pins = <
+ /* Slave 1 */
+ 0x110 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxerr.mii1_rxerr */
+ 0x114 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txen.mii1_txen */
+ 0x118 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxdv.mii1_rxdv */
+ 0x11c (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd3.mii1_txd3 */
+ 0x120 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd2.mii1_txd2 */
+ 0x124 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd1.mii1_txd1 */
+ 0x128 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd0.mii1_txd0 */
+ 0x12c (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_txclk.mii1_txclk */
+ 0x130 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxclk.mii1_rxclk */
+ 0x134 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd3.mii1_rxd3 */
+ 0x138 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd2.mii1_rxd2 */
+ 0x13c (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd1.mii1_rxd1 */
+ 0x140 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd0.mii1_rxd0 */
+ >;
+ };
- usb-phy@47401300 {
- status = "okay";
- };
+ cpsw_sleep: cpsw_sleep {
+ pinctrl-single,pins = <
+ /* Slave 1 reset value */
+ 0x110 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x114 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x118 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x11c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x120 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x124 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x128 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x12c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x130 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x134 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x138 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x13c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x140 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ >;
+ };
- usb-phy@47401b00 {
- status = "okay";
- };
+ davinci_mdio_default: davinci_mdio_default {
+ pinctrl-single,pins = <
+ /* MDIO */
+ 0x148 (PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
+ 0x14c (PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
+ >;
+ };
- usb@47401000 {
- status = "okay";
- };
+ davinci_mdio_sleep: davinci_mdio_sleep {
+ pinctrl-single,pins = <
+ /* MDIO reset value */
+ 0x148 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x14c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ >;
+ };
- usb@47401800 {
- status = "okay";
- dr_mode = "host";
- };
+ mmc1_pins: pinmux_mmc1_pins {
+ pinctrl-single,pins = <
+ 0x160 (PIN_INPUT | MUX_MODE7) /* GPIO0_6 */
+ >;
+ };
- dma-controller@07402000 {
- status = "okay";
- };
- };
+ emmc_pins: pinmux_emmc_pins {
+ pinctrl-single,pins = <
+ 0x80 (PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn1.mmc1_clk */
+ 0x84 (PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn2.mmc1_cmd */
+ 0x00 (PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad0.mmc1_dat0 */
+ 0x04 (PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad1.mmc1_dat1 */
+ 0x08 (PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad2.mmc1_dat2 */
+ 0x0c (PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad3.mmc1_dat3 */
+ 0x10 (PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad4.mmc1_dat4 */
+ 0x14 (PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad5.mmc1_dat5 */
+ 0x18 (PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad6.mmc1_dat6 */
+ 0x1c (PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad7.mmc1_dat7 */
+ >;
+ };
+};
- i2c0: i2c@44e0b000 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c0_pins>;
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins>;
- status = "okay";
- clock-frequency = <400000>;
+ status = "okay";
+};
- tps: tps@24 {
- reg = <0x24>;
- };
+&usb {
+ status = "okay";
- };
+ control@44e10000 {
+ status = "okay";
};
- leds {
- pinctrl-names = "default";
- pinctrl-0 = <&user_leds_s0>;
+ usb-phy@47401300 {
+ status = "okay";
+ };
- compatible = "gpio-leds";
+ usb-phy@47401b00 {
+ status = "okay";
+ };
- led@2 {
- label = "beaglebone:green:heartbeat";
- gpios = <&gpio1 21 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "heartbeat";
- default-state = "off";
- };
+ usb@47401000 {
+ status = "okay";
+ };
- led@3 {
- label = "beaglebone:green:mmc0";
- gpios = <&gpio1 22 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "mmc0";
- default-state = "off";
- };
+ usb@47401800 {
+ status = "okay";
+ dr_mode = "host";
+ };
- led@4 {
- label = "beaglebone:green:usr2";
- gpios = <&gpio1 23 GPIO_ACTIVE_HIGH>;
- default-state = "off";
- };
+ dma-controller@07402000 {
+ status = "okay";
+ };
+};
- led@5 {
- label = "beaglebone:green:usr3";
- gpios = <&gpio1 24 GPIO_ACTIVE_HIGH>;
- default-state = "off";
- };
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+
+ status = "okay";
+ clock-frequency = <400000>;
+
+ tps: tps@24 {
+ reg = <0x24>;
};
+
};
/include/ "tps65217.dtsi"
@@ -260,3 +288,12 @@
pinctrl-0 = <&davinci_mdio_default>;
pinctrl-1 = <&davinci_mdio_sleep>;
};
+
+&mmc1 {
+ status = "okay";
+ bus-width = <0x4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins>;
+ cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
+ cd-inverted;
+};
diff --git a/arch/arm/boot/dts/am335x-bone.dts b/arch/arm/boot/dts/am335x-bone.dts
index 7993c489982c..94ee427a6db1 100644
--- a/arch/arm/boot/dts/am335x-bone.dts
+++ b/arch/arm/boot/dts/am335x-bone.dts
@@ -9,3 +9,21 @@
#include "am33xx.dtsi"
#include "am335x-bone-common.dtsi"
+
+&ldo3_reg {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+};
+
+&mmc1 {
+ vmmc-supply = <&ldo3_reg>;
+};
+
+&sham {
+ status = "okay";
+};
+
+&aes {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts
index 197cadf72d2c..6b71ad95a5cf 100644
--- a/arch/arm/boot/dts/am335x-boneblack.dts
+++ b/arch/arm/boot/dts/am335x-boneblack.dts
@@ -15,3 +15,64 @@
regulator-max-microvolt = <1800000>;
regulator-always-on;
};
+
+&mmc1 {
+ vmmc-supply = <&vmmcsd_fixed>;
+};
+
+&mmc2 {
+ vmmc-supply = <&vmmcsd_fixed>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_pins>;
+ bus-width = <8>;
+ status = "okay";
+ ti,vcc-aux-disable-is-sleep;
+};
+
+&am33xx_pinmux {
+ nxp_hdmi_bonelt_pins: nxp_hdmi_bonelt_pins {
+ pinctrl-single,pins = <
+ 0x1b0 0x03 /* xdma_event_intr0, OMAP_MUX_MODE3 | AM33XX_PIN_OUTPUT */
+ 0xa0 0x08 /* lcd_data0.lcd_data0, OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT | AM33XX_PULL_DISA */
+ 0xa4 0x08 /* lcd_data1.lcd_data1, OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT | AM33XX_PULL_DISA */
+ 0xa8 0x08 /* lcd_data2.lcd_data2, OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT | AM33XX_PULL_DISA */
+ 0xac 0x08 /* lcd_data3.lcd_data3, OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT | AM33XX_PULL_DISA */
+ 0xb0 0x08 /* lcd_data4.lcd_data4, OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT | AM33XX_PULL_DISA */
+ 0xb4 0x08 /* lcd_data5.lcd_data5, OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT | AM33XX_PULL_DISA */
+ 0xb8 0x08 /* lcd_data6.lcd_data6, OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT | AM33XX_PULL_DISA */
+ 0xbc 0x08 /* lcd_data7.lcd_data7, OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT | AM33XX_PULL_DISA */
+ 0xc0 0x08 /* lcd_data8.lcd_data8, OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT | AM33XX_PULL_DISA */
+ 0xc4 0x08 /* lcd_data9.lcd_data9, OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT | AM33XX_PULL_DISA */
+ 0xc8 0x08 /* lcd_data10.lcd_data10, OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT | AM33XX_PULL_DISA */
+ 0xcc 0x08 /* lcd_data11.lcd_data11, OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT | AM33XX_PULL_DISA */
+ 0xd0 0x08 /* lcd_data12.lcd_data12, OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT | AM33XX_PULL_DISA */
+ 0xd4 0x08 /* lcd_data13.lcd_data13, OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT | AM33XX_PULL_DISA */
+ 0xd8 0x08 /* lcd_data14.lcd_data14, OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT | AM33XX_PULL_DISA */
+ 0xdc 0x08 /* lcd_data15.lcd_data15, OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT | AM33XX_PULL_DISA */
+ 0xe0 0x00 /* lcd_vsync.lcd_vsync, OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT */
+ 0xe4 0x00 /* lcd_hsync.lcd_hsync, OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT */
+ 0xe8 0x00 /* lcd_pclk.lcd_pclk, OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT */
+ 0xec 0x00 /* lcd_ac_bias_en.lcd_ac_bias_en, OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT */
+ >;
+ };
+ nxp_hdmi_bonelt_off_pins: nxp_hdmi_bonelt_off_pins {
+ pinctrl-single,pins = <
+ 0x1b0 0x03 /* xdma_event_intr0, OMAP_MUX_MODE3 | AM33XX_PIN_OUTPUT */
+ >;
+ };
+};
+
+&lcdc {
+ status = "okay";
+};
+
+/ {
+ hdmi {
+ compatible = "ti,tilcdc,slave";
+ i2c = <&i2c0>;
+ pinctrl-names = "default", "off";
+ pinctrl-0 = <&nxp_hdmi_bonelt_pins>;
+ pinctrl-1 = <&nxp_hdmi_bonelt_off_pins>;
+ status = "okay";
+ };
+};
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index e8ec8756e498..987429436171 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -24,324 +24,6 @@
reg = <0x80000000 0x10000000>; /* 256 MB */
};
- am33xx_pinmux: pinmux@44e10800 {
- pinctrl-names = "default";
- pinctrl-0 = <&matrix_keypad_s0 &volume_keys_s0 &clkout2_pin>;
-
- matrix_keypad_s0: matrix_keypad_s0 {
- pinctrl-single,pins = <
- 0x54 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a5.gpio1_21 */
- 0x58 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a6.gpio1_22 */
- 0x64 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_a9.gpio1_25 */
- 0x68 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_a10.gpio1_26 */
- 0x6c (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_a11.gpio1_27 */
- >;
- };
-
- volume_keys_s0: volume_keys_s0 {
- pinctrl-single,pins = <
- 0x150 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_sclk.gpio0_2 */
- 0x154 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_d0.gpio0_3 */
- >;
- };
-
- i2c0_pins: pinmux_i2c0_pins {
- pinctrl-single,pins = <
- 0x188 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
- 0x18c (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
- >;
- };
-
- i2c1_pins: pinmux_i2c1_pins {
- pinctrl-single,pins = <
- 0x158 (PIN_INPUT_PULLUP | MUX_MODE2) /* spi0_d1.i2c1_sda */
- 0x15c (PIN_INPUT_PULLUP | MUX_MODE2) /* spi0_cs0.i2c1_scl */
- >;
- };
-
- uart0_pins: pinmux_uart0_pins {
- pinctrl-single,pins = <
- 0x170 (PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
- 0x174 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
- >;
- };
-
- clkout2_pin: pinmux_clkout2_pin {
- pinctrl-single,pins = <
- 0x1b4 (PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr1.clkout2 */
- >;
- };
-
- nandflash_pins_s0: nandflash_pins_s0 {
- pinctrl-single,pins = <
- 0x0 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad0.gpmc_ad0 */
- 0x4 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad1.gpmc_ad1 */
- 0x8 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad2.gpmc_ad2 */
- 0xc (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad3.gpmc_ad3 */
- 0x10 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad4.gpmc_ad4 */
- 0x14 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad5.gpmc_ad5 */
- 0x18 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad6.gpmc_ad6 */
- 0x1c (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad7.gpmc_ad7 */
- 0x70 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_wait0.gpmc_wait0 */
- 0x74 (PIN_INPUT_PULLUP | MUX_MODE7) /* gpmc_wpn.gpio0_30 */
- 0x7c (PIN_OUTPUT | MUX_MODE0) /* gpmc_csn0.gpmc_csn0 */
- 0x90 (PIN_OUTPUT | MUX_MODE0) /* gpmc_advn_ale.gpmc_advn_ale */
- 0x94 (PIN_OUTPUT | MUX_MODE0) /* gpmc_oen_ren.gpmc_oen_ren */
- 0x98 (PIN_OUTPUT | MUX_MODE0) /* gpmc_wen.gpmc_wen */
- 0x9c (PIN_OUTPUT | MUX_MODE0) /* gpmc_be0n_cle.gpmc_be0n_cle */
- >;
- };
-
- ecap0_pins: backlight_pins {
- pinctrl-single,pins = <
- 0x164 0x0 /* eCAP0_in_PWM0_out.eCAP0_in_PWM0_out MODE0 */
- >;
- };
-
- cpsw_default: cpsw_default {
- pinctrl-single,pins = <
- /* Slave 1 */
- 0x114 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txen.rgmii1_tctl */
- 0x118 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxdv.rgmii1_rctl */
- 0x11c (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd3.rgmii1_td3 */
- 0x120 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd2.rgmii1_td2 */
- 0x124 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd1.rgmii1_td1 */
- 0x128 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd0.rgmii1_td0 */
- 0x12c (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txclk.rgmii1_tclk */
- 0x130 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxclk.rgmii1_rclk */
- 0x134 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd3.rgmii1_rd3 */
- 0x138 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd2.rgmii1_rd2 */
- 0x13c (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd1.rgmii1_rd1 */
- 0x140 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd0.rgmii1_rd0 */
- >;
- };
-
- cpsw_sleep: cpsw_sleep {
- pinctrl-single,pins = <
- /* Slave 1 reset value */
- 0x114 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x118 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x11c (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x120 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x124 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x128 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x12c (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x130 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x134 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x138 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x13c (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x140 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- >;
- };
-
- davinci_mdio_default: davinci_mdio_default {
- pinctrl-single,pins = <
- /* MDIO */
- 0x148 (PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
- 0x14c (PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
- >;
- };
-
- davinci_mdio_sleep: davinci_mdio_sleep {
- pinctrl-single,pins = <
- /* MDIO reset value */
- 0x148 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x14c (PIN_INPUT_PULLDOWN | MUX_MODE7)
- >;
- };
- };
-
- ocp {
- uart0: serial@44e09000 {
- pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins>;
-
- status = "okay";
- };
-
- i2c0: i2c@44e0b000 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c0_pins>;
-
- status = "okay";
- clock-frequency = <400000>;
-
- tps: tps@2d {
- reg = <0x2d>;
- };
- };
-
- musb: usb@47400000 {
- status = "okay";
-
- control@44e10000 {
- status = "okay";
- };
-
- usb-phy@47401300 {
- status = "okay";
- };
-
- usb-phy@47401b00 {
- status = "okay";
- };
-
- usb@47401000 {
- status = "okay";
- };
-
- usb@47401800 {
- status = "okay";
- dr_mode = "host";
- };
-
- dma-controller@07402000 {
- status = "okay";
- };
- };
-
- i2c1: i2c@4802a000 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c1_pins>;
-
- status = "okay";
- clock-frequency = <100000>;
-
- lis331dlh: lis331dlh@18 {
- compatible = "st,lis331dlh", "st,lis3lv02d";
- reg = <0x18>;
- Vdd-supply = <&lis3_reg>;
- Vdd_IO-supply = <&lis3_reg>;
-
- st,click-single-x;
- st,click-single-y;
- st,click-single-z;
- st,click-thresh-x = <10>;
- st,click-thresh-y = <10>;
- st,click-thresh-z = <10>;
- st,irq1-click;
- st,irq2-click;
- st,wakeup-x-lo;
- st,wakeup-x-hi;
- st,wakeup-y-lo;
- st,wakeup-y-hi;
- st,wakeup-z-lo;
- st,wakeup-z-hi;
- st,min-limit-x = <120>;
- st,min-limit-y = <120>;
- st,min-limit-z = <140>;
- st,max-limit-x = <550>;
- st,max-limit-y = <550>;
- st,max-limit-z = <750>;
- };
-
- tsl2550: tsl2550@39 {
- compatible = "taos,tsl2550";
- reg = <0x39>;
- };
-
- tmp275: tmp275@48 {
- compatible = "ti,tmp275";
- reg = <0x48>;
- };
- };
-
- elm: elm@48080000 {
- status = "okay";
- };
-
- epwmss0: epwmss@48300000 {
- status = "okay";
-
- ecap0: ecap@48300100 {
- status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&ecap0_pins>;
- };
- };
-
- gpmc: gpmc@50000000 {
- status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&nandflash_pins_s0>;
- ranges = <0 0 0x08000000 0x10000000>; /* CS0: NAND */
- nand@0,0 {
- reg = <0 0 0>; /* CS0, offset 0 */
- nand-bus-width = <8>;
- ti,nand-ecc-opt = "bch8";
- gpmc,device-nand = "true";
- gpmc,device-width = <1>;
- gpmc,sync-clk-ps = <0>;
- gpmc,cs-on-ns = <0>;
- gpmc,cs-rd-off-ns = <44>;
- gpmc,cs-wr-off-ns = <44>;
- gpmc,adv-on-ns = <6>;
- gpmc,adv-rd-off-ns = <34>;
- gpmc,adv-wr-off-ns = <44>;
- gpmc,we-on-ns = <0>;
- gpmc,we-off-ns = <40>;
- gpmc,oe-on-ns = <0>;
- gpmc,oe-off-ns = <54>;
- gpmc,access-ns = <64>;
- gpmc,rd-cycle-ns = <82>;
- gpmc,wr-cycle-ns = <82>;
- gpmc,wait-on-read = "true";
- gpmc,wait-on-write = "true";
- gpmc,bus-turnaround-ns = <0>;
- gpmc,cycle2cycle-delay-ns = <0>;
- gpmc,clk-activation-ns = <0>;
- gpmc,wait-monitoring-ns = <0>;
- gpmc,wr-access-ns = <40>;
- gpmc,wr-data-mux-bus-ns = <0>;
-
- #address-cells = <1>;
- #size-cells = <1>;
- elm_id = <&elm>;
-
- /* MTD partition table */
- partition@0 {
- label = "SPL1";
- reg = <0x00000000 0x000020000>;
- };
-
- partition@1 {
- label = "SPL2";
- reg = <0x00020000 0x00020000>;
- };
-
- partition@2 {
- label = "SPL3";
- reg = <0x00040000 0x00020000>;
- };
-
- partition@3 {
- label = "SPL4";
- reg = <0x00060000 0x00020000>;
- };
-
- partition@4 {
- label = "U-boot";
- reg = <0x00080000 0x001e0000>;
- };
-
- partition@5 {
- label = "environment";
- reg = <0x00260000 0x00020000>;
- };
-
- partition@6 {
- label = "Kernel";
- reg = <0x00280000 0x00500000>;
- };
-
- partition@7 {
- label = "File-System";
- reg = <0x00780000 0x0F880000>;
- };
- };
- };
- };
-
vbat: fixedregulator@0 {
compatible = "regulator-fixed";
regulator-name = "vbat";
@@ -403,10 +85,447 @@
brightness-levels = <0 51 53 56 62 75 101 152 255>;
default-brightness-level = <8>;
};
+
+ panel {
+ compatible = "ti,tilcdc,panel";
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcd_pins_s0>;
+ panel-info {
+ ac-bias = <255>;
+ ac-bias-intrpt = <0>;
+ dma-burst-sz = <16>;
+ bpp = <32>;
+ fdd = <0x80>;
+ sync-edge = <0>;
+ sync-ctrl = <1>;
+ raster-order = <0>;
+ fifo-th = <0>;
+ };
+
+ display-timings {
+ 800x480p62 {
+ clock-frequency = <30000000>;
+ hactive = <800>;
+ vactive = <480>;
+ hfront-porch = <39>;
+ hback-porch = <39>;
+ hsync-len = <47>;
+ vback-porch = <29>;
+ vfront-porch = <13>;
+ vsync-len = <2>;
+ hsync-active = <1>;
+ vsync-active = <1>;
+ };
+ };
+ };
+
+ sound {
+ compatible = "ti,da830-evm-audio";
+ ti,model = "AM335x-EVM";
+ ti,audio-codec = <&tlv320aic3106>;
+ ti,mcasp-controller = <&mcasp1>;
+ ti,codec-clock-rate = <12000000>;
+ ti,audio-routing =
+ "Headphone Jack", "HPLOUT",
+ "Headphone Jack", "HPROUT",
+ "LINE1L", "Line In",
+ "LINE1R", "Line In";
+ };
+};
+
+&am33xx_pinmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&matrix_keypad_s0 &volume_keys_s0 &clkout2_pin>;
+
+ matrix_keypad_s0: matrix_keypad_s0 {
+ pinctrl-single,pins = <
+ 0x54 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a5.gpio1_21 */
+ 0x58 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a6.gpio1_22 */
+ 0x64 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_a9.gpio1_25 */
+ 0x68 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_a10.gpio1_26 */
+ 0x6c (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_a11.gpio1_27 */
+ >;
+ };
+
+ volume_keys_s0: volume_keys_s0 {
+ pinctrl-single,pins = <
+ 0x150 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_sclk.gpio0_2 */
+ 0x154 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_d0.gpio0_3 */
+ >;
+ };
+
+ i2c0_pins: pinmux_i2c0_pins {
+ pinctrl-single,pins = <
+ 0x188 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
+ 0x18c (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ >;
+ };
+
+ i2c1_pins: pinmux_i2c1_pins {
+ pinctrl-single,pins = <
+ 0x158 (PIN_INPUT_PULLUP | MUX_MODE2) /* spi0_d1.i2c1_sda */
+ 0x15c (PIN_INPUT_PULLUP | MUX_MODE2) /* spi0_cs0.i2c1_scl */
+ >;
+ };
+
+ uart0_pins: pinmux_uart0_pins {
+ pinctrl-single,pins = <
+ 0x170 (PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
+ 0x174 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ >;
+ };
+
+ clkout2_pin: pinmux_clkout2_pin {
+ pinctrl-single,pins = <
+ 0x1b4 (PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr1.clkout2 */
+ >;
+ };
+
+ nandflash_pins_s0: nandflash_pins_s0 {
+ pinctrl-single,pins = <
+ 0x0 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad0.gpmc_ad0 */
+ 0x4 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad1.gpmc_ad1 */
+ 0x8 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad2.gpmc_ad2 */
+ 0xc (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad3.gpmc_ad3 */
+ 0x10 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad4.gpmc_ad4 */
+ 0x14 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad5.gpmc_ad5 */
+ 0x18 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad6.gpmc_ad6 */
+ 0x1c (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad7.gpmc_ad7 */
+ 0x70 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_wait0.gpmc_wait0 */
+ 0x74 (PIN_INPUT_PULLUP | MUX_MODE7) /* gpmc_wpn.gpio0_30 */
+ 0x7c (PIN_OUTPUT | MUX_MODE0) /* gpmc_csn0.gpmc_csn0 */
+ 0x90 (PIN_OUTPUT | MUX_MODE0) /* gpmc_advn_ale.gpmc_advn_ale */
+ 0x94 (PIN_OUTPUT | MUX_MODE0) /* gpmc_oen_ren.gpmc_oen_ren */
+ 0x98 (PIN_OUTPUT | MUX_MODE0) /* gpmc_wen.gpmc_wen */
+ 0x9c (PIN_OUTPUT | MUX_MODE0) /* gpmc_be0n_cle.gpmc_be0n_cle */
+ >;
+ };
+
+ ecap0_pins: backlight_pins {
+ pinctrl-single,pins = <
+ 0x164 0x0 /* eCAP0_in_PWM0_out.eCAP0_in_PWM0_out MODE0 */
+ >;
+ };
+
+ cpsw_default: cpsw_default {
+ pinctrl-single,pins = <
+ /* Slave 1 */
+ 0x114 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txen.rgmii1_tctl */
+ 0x118 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxdv.rgmii1_rctl */
+ 0x11c (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd3.rgmii1_td3 */
+ 0x120 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd2.rgmii1_td2 */
+ 0x124 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd1.rgmii1_td1 */
+ 0x128 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd0.rgmii1_td0 */
+ 0x12c (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txclk.rgmii1_tclk */
+ 0x130 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxclk.rgmii1_rclk */
+ 0x134 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd3.rgmii1_rd3 */
+ 0x138 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd2.rgmii1_rd2 */
+ 0x13c (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd1.rgmii1_rd1 */
+ 0x140 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd0.rgmii1_rd0 */
+ >;
+ };
+
+ cpsw_sleep: cpsw_sleep {
+ pinctrl-single,pins = <
+ /* Slave 1 reset value */
+ 0x114 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x118 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x11c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x120 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x124 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x128 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x12c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x130 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x134 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x138 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x13c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x140 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ >;
+ };
+
+ davinci_mdio_default: davinci_mdio_default {
+ pinctrl-single,pins = <
+ /* MDIO */
+ 0x148 (PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
+ 0x14c (PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
+ >;
+ };
+
+ davinci_mdio_sleep: davinci_mdio_sleep {
+ pinctrl-single,pins = <
+ /* MDIO reset value */
+ 0x148 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x14c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ >;
+ };
+
+ lcd_pins_s0: lcd_pins_s0 {
+ pinctrl-single,pins = <
+ 0x20 0x01 /* gpmc_ad8.lcd_data16, OUTPUT | MODE1 */
+ 0x24 0x01 /* gpmc_ad9.lcd_data17, OUTPUT | MODE1 */
+ 0x28 0x01 /* gpmc_ad10.lcd_data18, OUTPUT | MODE1 */
+ 0x2c 0x01 /* gpmc_ad11.lcd_data19, OUTPUT | MODE1 */
+ 0x30 0x01 /* gpmc_ad12.lcd_data20, OUTPUT | MODE1 */
+ 0x34 0x01 /* gpmc_ad13.lcd_data21, OUTPUT | MODE1 */
+ 0x38 0x01 /* gpmc_ad14.lcd_data22, OUTPUT | MODE1 */
+ 0x3c 0x01 /* gpmc_ad15.lcd_data23, OUTPUT | MODE1 */
+ 0xa0 0x00 /* lcd_data0.lcd_data0, OUTPUT | MODE0 */
+ 0xa4 0x00 /* lcd_data1.lcd_data1, OUTPUT | MODE0 */
+ 0xa8 0x00 /* lcd_data2.lcd_data2, OUTPUT | MODE0 */
+ 0xac 0x00 /* lcd_data3.lcd_data3, OUTPUT | MODE0 */
+ 0xb0 0x00 /* lcd_data4.lcd_data4, OUTPUT | MODE0 */
+ 0xb4 0x00 /* lcd_data5.lcd_data5, OUTPUT | MODE0 */
+ 0xb8 0x00 /* lcd_data6.lcd_data6, OUTPUT | MODE0 */
+ 0xbc 0x00 /* lcd_data7.lcd_data7, OUTPUT | MODE0 */
+ 0xc0 0x00 /* lcd_data8.lcd_data8, OUTPUT | MODE0 */
+ 0xc4 0x00 /* lcd_data9.lcd_data9, OUTPUT | MODE0 */
+ 0xc8 0x00 /* lcd_data10.lcd_data10, OUTPUT | MODE0 */
+ 0xcc 0x00 /* lcd_data11.lcd_data11, OUTPUT | MODE0 */
+ 0xd0 0x00 /* lcd_data12.lcd_data12, OUTPUT | MODE0 */
+ 0xd4 0x00 /* lcd_data13.lcd_data13, OUTPUT | MODE0 */
+ 0xd8 0x00 /* lcd_data14.lcd_data14, OUTPUT | MODE0 */
+ 0xdc 0x00 /* lcd_data15.lcd_data15, OUTPUT | MODE0 */
+ 0xe0 0x00 /* lcd_vsync.lcd_vsync, OUTPUT | MODE0 */
+ 0xe4 0x00 /* lcd_hsync.lcd_hsync, OUTPUT | MODE0 */
+ 0xe8 0x00 /* lcd_pclk.lcd_pclk, OUTPUT | MODE0 */
+ 0xec 0x00 /* lcd_ac_bias_en.lcd_ac_bias_en, OUTPUT | MODE0 */
+ >;
+ };
+
+ am335x_evm_audio_pins: am335x_evm_audio_pins {
+ pinctrl-single,pins = <
+ 0x10c (PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_rx_dv.mcasp1_aclkx */
+ 0x110 (PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_txd3.mcasp1_fsx */
+ 0x108 (PIN_OUTPUT_PULLDOWN | MUX_MODE4) /* mii1_col.mcasp1_axr2 */
+ 0x144 (PIN_INPUT_PULLDOWN | MUX_MODE4) /* rmii1_ref_clk.mcasp1_axr3 */
+ >;
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins>;
+
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+
+ status = "okay";
+ clock-frequency = <400000>;
+
+ tps: tps@2d {
+ reg = <0x2d>;
+ };
+};
+
+&usb {
+ status = "okay";
+
+ control@44e10000 {
+ status = "okay";
+ };
+
+ usb-phy@47401300 {
+ status = "okay";
+ };
+
+ usb-phy@47401b00 {
+ status = "okay";
+ };
+
+ usb@47401000 {
+ status = "okay";
+ };
+
+ usb@47401800 {
+ status = "okay";
+ dr_mode = "host";
+ };
+
+ dma-controller@07402000 {
+ status = "okay";
+ };
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
+
+ status = "okay";
+ clock-frequency = <100000>;
+
+ lis331dlh: lis331dlh@18 {
+ compatible = "st,lis331dlh", "st,lis3lv02d";
+ reg = <0x18>;
+ Vdd-supply = <&lis3_reg>;
+ Vdd_IO-supply = <&lis3_reg>;
+
+ st,click-single-x;
+ st,click-single-y;
+ st,click-single-z;
+ st,click-thresh-x = <10>;
+ st,click-thresh-y = <10>;
+ st,click-thresh-z = <10>;
+ st,irq1-click;
+ st,irq2-click;
+ st,wakeup-x-lo;
+ st,wakeup-x-hi;
+ st,wakeup-y-lo;
+ st,wakeup-y-hi;
+ st,wakeup-z-lo;
+ st,wakeup-z-hi;
+ st,min-limit-x = <120>;
+ st,min-limit-y = <120>;
+ st,min-limit-z = <140>;
+ st,max-limit-x = <550>;
+ st,max-limit-y = <550>;
+ st,max-limit-z = <750>;
+ };
+
+ tsl2550: tsl2550@39 {
+ compatible = "taos,tsl2550";
+ reg = <0x39>;
+ };
+
+ tmp275: tmp275@48 {
+ compatible = "ti,tmp275";
+ reg = <0x48>;
+ };
+
+ tlv320aic3106: tlv320aic3106@1b {
+ compatible = "ti,tlv320aic3106";
+ reg = <0x1b>;
+ status = "okay";
+
+ /* Regulators */
+ AVDD-supply = <&vaux2_reg>;
+ IOVDD-supply = <&vaux2_reg>;
+ DRVDD-supply = <&vaux2_reg>;
+ DVDD-supply = <&vbat>;
+ };
+};
+
+&lcdc {
+ status = "okay";
+};
+
+&elm {
+ status = "okay";
+};
+
+&epwmss0 {
+ status = "okay";
+
+ ecap0: ecap@48300100 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&ecap0_pins>;
+ };
+};
+
+&gpmc {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&nandflash_pins_s0>;
+ ranges = <0 0 0x08000000 0x10000000>; /* CS0: NAND */
+ nand@0,0 {
+ reg = <0 0 0>; /* CS0, offset 0 */
+ nand-bus-width = <8>;
+ ti,nand-ecc-opt = "bch8";
+ gpmc,device-nand = "true";
+ gpmc,device-width = <1>;
+ gpmc,sync-clk-ps = <0>;
+ gpmc,cs-on-ns = <0>;
+ gpmc,cs-rd-off-ns = <44>;
+ gpmc,cs-wr-off-ns = <44>;
+ gpmc,adv-on-ns = <6>;
+ gpmc,adv-rd-off-ns = <34>;
+ gpmc,adv-wr-off-ns = <44>;
+ gpmc,we-on-ns = <0>;
+ gpmc,we-off-ns = <40>;
+ gpmc,oe-on-ns = <0>;
+ gpmc,oe-off-ns = <54>;
+ gpmc,access-ns = <64>;
+ gpmc,rd-cycle-ns = <82>;
+ gpmc,wr-cycle-ns = <82>;
+ gpmc,wait-on-read = "true";
+ gpmc,wait-on-write = "true";
+ gpmc,bus-turnaround-ns = <0>;
+ gpmc,cycle2cycle-delay-ns = <0>;
+ gpmc,clk-activation-ns = <0>;
+ gpmc,wait-monitoring-ns = <0>;
+ gpmc,wr-access-ns = <40>;
+ gpmc,wr-data-mux-bus-ns = <0>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ elm_id = <&elm>;
+
+ /* MTD partition table */
+ partition@0 {
+ label = "SPL1";
+ reg = <0x00000000 0x000020000>;
+ };
+
+ partition@1 {
+ label = "SPL2";
+ reg = <0x00020000 0x00020000>;
+ };
+
+ partition@2 {
+ label = "SPL3";
+ reg = <0x00040000 0x00020000>;
+ };
+
+ partition@3 {
+ label = "SPL4";
+ reg = <0x00060000 0x00020000>;
+ };
+
+ partition@4 {
+ label = "U-boot";
+ reg = <0x00080000 0x001e0000>;
+ };
+
+ partition@5 {
+ label = "environment";
+ reg = <0x00260000 0x00020000>;
+ };
+
+ partition@6 {
+ label = "Kernel";
+ reg = <0x00280000 0x00500000>;
+ };
+
+ partition@7 {
+ label = "File-System";
+ reg = <0x00780000 0x0F880000>;
+ };
+ };
};
#include "tps65910.dtsi"
+&mcasp1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&am335x_evm_audio_pins>;
+
+ status = "okay";
+
+ op-mode = <0>; /* MCASP_IIS_MODE */
+ tdm-slots = <2>;
+ /* 4 serializers */
+ serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */
+ 0 0 1 2
+ >;
+ tx-num-evt = <1>;
+ rx-num-evt = <1>;
+};
+
&tps {
vcc1-supply = <&vbat>;
vcc2-supply = <&vbat>;
@@ -477,6 +596,8 @@
};
vmmc_reg: regulator@12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
regulator-always-on;
};
};
@@ -517,3 +638,17 @@
ti,adc-channels = <4 5 6 7>;
};
};
+
+&mmc1 {
+ status = "okay";
+ vmmc-supply = <&vmmc_reg>;
+ bus-width = <4>;
+};
+
+&sham {
+ status = "okay";
+};
+
+&aes {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 4f339fa91c57..03febf85fd2f 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -31,210 +31,6 @@
reg = <0x80000000 0x10000000>; /* 256 MB */
};
- am33xx_pinmux: pinmux@44e10800 {
- pinctrl-names = "default";
- pinctrl-0 = <&gpio_keys_s0 &clkout2_pin>;
-
- user_leds_s0: user_leds_s0 {
- pinctrl-single,pins = <
- 0x10 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad4.gpio1_4 */
- 0x14 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad5.gpio1_5 */
- 0x18 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad6.gpio1_6 */
- 0x1c (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad7.gpio1_7 */
- >;
- };
-
- gpio_keys_s0: gpio_keys_s0 {
- pinctrl-single,pins = <
- 0x94 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_oen_ren.gpio2_3 */
- 0x90 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_advn_ale.gpio2_2 */
- 0x70 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_wait0.gpio0_30 */
- 0x9c (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ben0_cle.gpio2_5 */
- >;
- };
-
- i2c0_pins: pinmux_i2c0_pins {
- pinctrl-single,pins = <
- 0x188 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
- 0x18c (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
- >;
- };
-
- uart0_pins: pinmux_uart0_pins {
- pinctrl-single,pins = <
- 0x170 (PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
- 0x174 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
- >;
- };
-
- clkout2_pin: pinmux_clkout2_pin {
- pinctrl-single,pins = <
- 0x1b4 (PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr1.clkout2 */
- >;
- };
-
- ecap2_pins: backlight_pins {
- pinctrl-single,pins = <
- 0x19c 0x4 /* mcasp0_ahclkr.ecap2_in_pwm2_out MODE4 */
- >;
- };
-
- cpsw_default: cpsw_default {
- pinctrl-single,pins = <
- /* Slave 1 */
- 0x114 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txen.rgmii1_tctl */
- 0x118 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxdv.rgmii1_rctl */
- 0x11c (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd3.rgmii1_td3 */
- 0x120 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd2.rgmii1_td2 */
- 0x124 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd1.rgmii1_td1 */
- 0x128 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd0.rgmii1_td0 */
- 0x12c (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txclk.rgmii1_tclk */
- 0x130 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxclk.rgmii1_rclk */
- 0x134 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd3.rgmii1_rd3 */
- 0x138 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd2.rgmii1_rd2 */
- 0x13c (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd1.rgmii1_rd1 */
- 0x140 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd0.rgmii1_rd0 */
-
- /* Slave 2 */
- 0x40 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a0.rgmii2_tctl */
- 0x44 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a1.rgmii2_rctl */
- 0x48 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a2.rgmii2_td3 */
- 0x4c (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a3.rgmii2_td2 */
- 0x50 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a4.rgmii2_td1 */
- 0x54 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a5.rgmii2_td0 */
- 0x58 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a6.rgmii2_tclk */
- 0x5c (PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a7.rgmii2_rclk */
- 0x60 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a8.rgmii2_rd3 */
- 0x64 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a9.rgmii2_rd2 */
- 0x68 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a10.rgmii2_rd1 */
- 0x6c (PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a11.rgmii2_rd0 */
- >;
- };
-
- cpsw_sleep: cpsw_sleep {
- pinctrl-single,pins = <
- /* Slave 1 reset value */
- 0x114 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x118 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x11c (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x120 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x124 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x128 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x12c (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x130 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x134 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x138 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x13c (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x140 (PIN_INPUT_PULLDOWN | MUX_MODE7)
-
- /* Slave 2 reset value*/
- 0x40 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x44 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x48 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x4c (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x50 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x54 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x58 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x5c (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x60 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x64 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x68 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x6c (PIN_INPUT_PULLDOWN | MUX_MODE7)
- >;
- };
-
- davinci_mdio_default: davinci_mdio_default {
- pinctrl-single,pins = <
- /* MDIO */
- 0x148 (PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
- 0x14c (PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
- >;
- };
-
- davinci_mdio_sleep: davinci_mdio_sleep {
- pinctrl-single,pins = <
- /* MDIO reset value */
- 0x148 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x14c (PIN_INPUT_PULLDOWN | MUX_MODE7)
- >;
- };
- };
-
- ocp {
- uart0: serial@44e09000 {
- pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins>;
-
- status = "okay";
- };
-
- i2c0: i2c@44e0b000 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c0_pins>;
-
- status = "okay";
- clock-frequency = <400000>;
-
- tps: tps@2d {
- reg = <0x2d>;
- };
-
- lis331dlh: lis331dlh@18 {
- compatible = "st,lis331dlh", "st,lis3lv02d";
- reg = <0x18>;
- Vdd-supply = <&lis3_reg>;
- Vdd_IO-supply = <&lis3_reg>;
-
- st,click-single-x;
- st,click-single-y;
- st,click-single-z;
- st,click-thresh-x = <10>;
- st,click-thresh-y = <10>;
- st,click-thresh-z = <10>;
- st,irq1-click;
- st,irq2-click;
- st,wakeup-x-lo;
- st,wakeup-x-hi;
- st,wakeup-y-lo;
- st,wakeup-y-hi;
- st,wakeup-z-lo;
- st,wakeup-z-hi;
- st,min-limit-x = <120>;
- st,min-limit-y = <120>;
- st,min-limit-z = <140>;
- st,max-limit-x = <550>;
- st,max-limit-y = <550>;
- st,max-limit-z = <750>;
- };
- };
-
- musb: usb@47400000 {
- status = "okay";
-
- control@44e10000 {
- status = "okay";
- };
-
- usb-phy@47401300 {
- status = "okay";
- };
-
- usb@47401000 {
- status = "okay";
- };
- };
-
- epwmss2: epwmss@48304000 {
- status = "okay";
-
- ecap2: ecap@48304100 {
- status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&ecap2_pins>;
- };
- };
- };
-
vbat: fixedregulator@0 {
compatible = "regulator-fixed";
regulator-name = "vbat";
@@ -319,6 +115,240 @@
brightness-levels = <0 58 61 66 75 90 125 170 255>;
default-brightness-level = <8>;
};
+
+ sound {
+ compatible = "ti,da830-evm-audio";
+ ti,model = "AM335x-EVMSK";
+ ti,audio-codec = <&tlv320aic3106>;
+ ti,mcasp-controller = <&mcasp1>;
+ ti,codec-clock-rate = <24576000>;
+ ti,audio-routing =
+ "Headphone Jack", "HPLOUT",
+ "Headphone Jack", "HPROUT";
+ };
+};
+
+&am33xx_pinmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_keys_s0 &clkout2_pin>;
+
+ user_leds_s0: user_leds_s0 {
+ pinctrl-single,pins = <
+ 0x10 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad4.gpio1_4 */
+ 0x14 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad5.gpio1_5 */
+ 0x18 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad6.gpio1_6 */
+ 0x1c (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad7.gpio1_7 */
+ >;
+ };
+
+ gpio_keys_s0: gpio_keys_s0 {
+ pinctrl-single,pins = <
+ 0x94 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_oen_ren.gpio2_3 */
+ 0x90 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_advn_ale.gpio2_2 */
+ 0x70 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_wait0.gpio0_30 */
+ 0x9c (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ben0_cle.gpio2_5 */
+ >;
+ };
+
+ i2c0_pins: pinmux_i2c0_pins {
+ pinctrl-single,pins = <
+ 0x188 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
+ 0x18c (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ >;
+ };
+
+ uart0_pins: pinmux_uart0_pins {
+ pinctrl-single,pins = <
+ 0x170 (PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
+ 0x174 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ >;
+ };
+
+ clkout2_pin: pinmux_clkout2_pin {
+ pinctrl-single,pins = <
+ 0x1b4 (PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr1.clkout2 */
+ >;
+ };
+
+ ecap2_pins: backlight_pins {
+ pinctrl-single,pins = <
+ 0x19c 0x4 /* mcasp0_ahclkr.ecap2_in_pwm2_out MODE4 */
+ >;
+ };
+
+ cpsw_default: cpsw_default {
+ pinctrl-single,pins = <
+ /* Slave 1 */
+ 0x114 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txen.rgmii1_tctl */
+ 0x118 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxdv.rgmii1_rctl */
+ 0x11c (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd3.rgmii1_td3 */
+ 0x120 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd2.rgmii1_td2 */
+ 0x124 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd1.rgmii1_td1 */
+ 0x128 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd0.rgmii1_td0 */
+ 0x12c (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txclk.rgmii1_tclk */
+ 0x130 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxclk.rgmii1_rclk */
+ 0x134 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd3.rgmii1_rd3 */
+ 0x138 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd2.rgmii1_rd2 */
+ 0x13c (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd1.rgmii1_rd1 */
+ 0x140 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd0.rgmii1_rd0 */
+
+ /* Slave 2 */
+ 0x40 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a0.rgmii2_tctl */
+ 0x44 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a1.rgmii2_rctl */
+ 0x48 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a2.rgmii2_td3 */
+ 0x4c (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a3.rgmii2_td2 */
+ 0x50 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a4.rgmii2_td1 */
+ 0x54 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a5.rgmii2_td0 */
+ 0x58 (PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a6.rgmii2_tclk */
+ 0x5c (PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a7.rgmii2_rclk */
+ 0x60 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a8.rgmii2_rd3 */
+ 0x64 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a9.rgmii2_rd2 */
+ 0x68 (PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a10.rgmii2_rd1 */
+ 0x6c (PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a11.rgmii2_rd0 */
+ >;
+ };
+
+ cpsw_sleep: cpsw_sleep {
+ pinctrl-single,pins = <
+ /* Slave 1 reset value */
+ 0x114 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x118 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x11c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x120 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x124 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x128 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x12c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x130 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x134 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x138 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x13c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x140 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+
+ /* Slave 2 reset value*/
+ 0x40 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x44 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x48 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x4c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x50 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x54 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x58 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x5c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x60 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x64 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x68 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x6c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ >;
+ };
+
+ davinci_mdio_default: davinci_mdio_default {
+ pinctrl-single,pins = <
+ /* MDIO */
+ 0x148 (PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
+ 0x14c (PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
+ >;
+ };
+
+ davinci_mdio_sleep: davinci_mdio_sleep {
+ pinctrl-single,pins = <
+ /* MDIO reset value */
+ 0x148 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x14c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ >;
+ };
+
+ mcasp1_pins: mcasp1_pins {
+ pinctrl-single,pins = <
+ 0x10c (PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_crs.mcasp1_aclkx */
+ 0x110 (PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_rxerr.mcasp1_fsx */
+ 0x108 (PIN_OUTPUT_PULLDOWN | MUX_MODE4) /* mii1_col.mcasp1_axr2 */
+ 0x144 (PIN_INPUT_PULLDOWN | MUX_MODE4) /* rmii1_ref_clk.mcasp1_axr3 */
+ >;
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins>;
+
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+
+ status = "okay";
+ clock-frequency = <400000>;
+
+ tps: tps@2d {
+ reg = <0x2d>;
+ };
+
+ lis331dlh: lis331dlh@18 {
+ compatible = "st,lis331dlh", "st,lis3lv02d";
+ reg = <0x18>;
+ Vdd-supply = <&lis3_reg>;
+ Vdd_IO-supply = <&lis3_reg>;
+
+ st,click-single-x;
+ st,click-single-y;
+ st,click-single-z;
+ st,click-thresh-x = <10>;
+ st,click-thresh-y = <10>;
+ st,click-thresh-z = <10>;
+ st,irq1-click;
+ st,irq2-click;
+ st,wakeup-x-lo;
+ st,wakeup-x-hi;
+ st,wakeup-y-lo;
+ st,wakeup-y-hi;
+ st,wakeup-z-lo;
+ st,wakeup-z-hi;
+ st,min-limit-x = <120>;
+ st,min-limit-y = <120>;
+ st,min-limit-z = <140>;
+ st,max-limit-x = <550>;
+ st,max-limit-y = <550>;
+ st,max-limit-z = <750>;
+ };
+
+ tlv320aic3106: tlv320aic3106@1b {
+ compatible = "ti,tlv320aic3106";
+ reg = <0x1b>;
+ status = "okay";
+
+ /* Regulators */
+ AVDD-supply = <&vaux2_reg>;
+ IOVDD-supply = <&vaux2_reg>;
+ DRVDD-supply = <&vaux2_reg>;
+ DVDD-supply = <&vbat>;
+ };
+};
+
+&usb {
+ status = "okay";
+
+ control@44e10000 {
+ status = "okay";
+ };
+
+ usb-phy@47401300 {
+ status = "okay";
+ };
+
+ usb@47401000 {
+ status = "okay";
+ };
+};
+
+&epwmss2 {
+ status = "okay";
+
+ ecap2: ecap@48304100 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&ecap2_pins>;
+ };
};
#include "tps65910.dtsi"
@@ -393,6 +423,8 @@
};
vmmc_reg: regulator@12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
regulator-always-on;
};
};
@@ -419,3 +451,37 @@
phy_id = <&davinci_mdio>, <1>;
phy-mode = "rgmii-txid";
};
+
+&mmc1 {
+ status = "okay";
+ vmmc-supply = <&vmmc_reg>;
+ bus-width = <4>;
+};
+
+&sham {
+ status = "okay";
+};
+
+&aes {
+ status = "okay";
+};
+
+&gpio0 {
+ ti,no-reset-on-init;
+};
+
+&mcasp1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcasp1_pins>;
+
+ status = "okay";
+
+ op-mode = <0>; /* MCASP_IIS_MODE */
+ tdm-slots = <2>;
+ /* 4 serializers */
+ serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */
+ 0 0 1 2
+ >;
+ tx-num-evt = <1>;
+ rx-num-evt = <1>;
+};
diff --git a/arch/arm/boot/dts/am335x-igep0033.dtsi b/arch/arm/boot/dts/am335x-igep0033.dtsi
new file mode 100644
index 000000000000..619624479311
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-igep0033.dtsi
@@ -0,0 +1,278 @@
+/*
+ * am335x-igep0033.dtsi - Device Tree file for IGEP COM AQUILA AM335x
+ *
+ * Copyright (C) 2013 ISEE 2007 SL - http://www.isee.biz
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/dts-v1/;
+
+#include "am33xx.dtsi"
+
+/ {
+ cpus {
+ cpu@0 {
+ cpu0-supply = <&vdd1_reg>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x10000000>; /* 256 MB */
+ };
+
+ leds {
+ pinctrl-names = "default";
+ pinctrl-0 = <&leds_pins>;
+
+ compatible = "gpio-leds";
+
+ led@0 {
+ label = "com:green:user";
+ gpios = <&gpio1 23 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+ };
+
+ vbat: fixedregulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vbat";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-boot-on;
+ };
+
+ vmmc: fixedregulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vmmc";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+};
+
+&am33xx_pinmux {
+ i2c0_pins: pinmux_i2c0_pins {
+ pinctrl-single,pins = <
+ 0x188 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
+ 0x18c (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ >;
+ };
+
+ nandflash_pins: pinmux_nandflash_pins {
+ pinctrl-single,pins = <
+ 0x0 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad0.gpmc_ad0 */
+ 0x4 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad1.gpmc_ad1 */
+ 0x8 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad2.gpmc_ad2 */
+ 0xc (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad3.gpmc_ad3 */
+ 0x10 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad4.gpmc_ad4 */
+ 0x14 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad5.gpmc_ad5 */
+ 0x18 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad6.gpmc_ad6 */
+ 0x1c (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad7.gpmc_ad7 */
+ 0x70 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_wait0.gpmc_wait0 */
+ 0x74 (PIN_INPUT_PULLUP | MUX_MODE7) /* gpmc_wpn.gpio0_30 */
+ 0x7c (PIN_OUTPUT | MUX_MODE0) /* gpmc_csn0.gpmc_csn0 */
+ 0x90 (PIN_OUTPUT | MUX_MODE0) /* gpmc_advn_ale.gpmc_advn_ale */
+ 0x94 (PIN_OUTPUT | MUX_MODE0) /* gpmc_oen_ren.gpmc_oen_ren */
+ 0x98 (PIN_OUTPUT | MUX_MODE0) /* gpmc_wen.gpmc_wen */
+ 0x9c (PIN_OUTPUT | MUX_MODE0) /* gpmc_be0n_cle.gpmc_be0n_cle */
+ >;
+ };
+
+ uart0_pins: pinmux_uart0_pins {
+ pinctrl-single,pins = <
+ 0x170 (PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
+ 0x174 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ >;
+ };
+
+ leds_pins: pinmux_leds_pins {
+ pinctrl-single,pins = <
+ 0x5c (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a7.gpio1_23 */
+ >;
+ };
+};
+
+&cpsw_emac0 {
+ phy_id = <&davinci_mdio>, <0>;
+};
+
+&cpsw_emac1 {
+ phy_id = <&davinci_mdio>, <1>;
+};
+
+&elm {
+ status = "okay";
+};
+
+&gpmc {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&nandflash_pins>;
+
+ ranges = <0 0 0x08000000 0x10000000>; /* CS0: NAND */
+
+ nand@0,0 {
+ reg = <0 0 0>; /* CS0, offset 0 */
+ nand-bus-width = <8>;
+ ti,nand-ecc-opt = "bch8";
+ gpmc,device-nand = "true";
+ gpmc,device-width = <1>;
+ gpmc,sync-clk-ps = <0>;
+ gpmc,cs-on-ns = <0>;
+ gpmc,cs-rd-off-ns = <44>;
+ gpmc,cs-wr-off-ns = <44>;
+ gpmc,adv-on-ns = <6>;
+ gpmc,adv-rd-off-ns = <34>;
+ gpmc,adv-wr-off-ns = <44>;
+ gpmc,we-on-ns = <0>;
+ gpmc,we-off-ns = <40>;
+ gpmc,oe-on-ns = <0>;
+ gpmc,oe-off-ns = <54>;
+ gpmc,access-ns = <64>;
+ gpmc,rd-cycle-ns = <82>;
+ gpmc,wr-cycle-ns = <82>;
+ gpmc,wait-on-read = "true";
+ gpmc,wait-on-write = "true";
+ gpmc,bus-turnaround-ns = <0>;
+ gpmc,cycle2cycle-delay-ns = <0>;
+ gpmc,clk-activation-ns = <0>;
+ gpmc,wait-monitoring-ns = <0>;
+ gpmc,wr-access-ns = <40>;
+ gpmc,wr-data-mux-bus-ns = <0>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ elm_id = <&elm>;
+
+ /* MTD partition table */
+ partition@0 {
+ label = "SPL";
+ reg = <0x00000000 0x000080000>;
+ };
+
+ partition@1 {
+ label = "U-boot";
+ reg = <0x00080000 0x001e0000>;
+ };
+
+ partition@2 {
+ label = "U-Boot Env";
+ reg = <0x00260000 0x00020000>;
+ };
+
+ partition@3 {
+ label = "Kernel";
+ reg = <0x00280000 0x00500000>;
+ };
+
+ partition@4 {
+ label = "File System";
+ reg = <0x00780000 0x007880000>;
+ };
+ };
+};
+
+&i2c0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+
+ clock-frequency = <400000>;
+
+ tps: tps@2d {
+ reg = <0x2d>;
+ };
+};
+
+&mmc1 {
+ status = "okay";
+ vmmc-supply = <&vmmc>;
+ bus-width = <4>;
+};
+
+&uart0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins>;
+};
+
+#include "tps65910.dtsi"
+
+&tps {
+ vcc1-supply = <&vbat>;
+ vcc2-supply = <&vbat>;
+ vcc3-supply = <&vbat>;
+ vcc4-supply = <&vbat>;
+ vcc5-supply = <&vbat>;
+ vcc6-supply = <&vbat>;
+ vcc7-supply = <&vbat>;
+ vccio-supply = <&vbat>;
+
+ regulators {
+ vrtc_reg: regulator@0 {
+ regulator-always-on;
+ };
+
+ vio_reg: regulator@1 {
+ regulator-always-on;
+ };
+
+ vdd1_reg: regulator@2 {
+ /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
+ regulator-name = "vdd_mpu";
+ regulator-min-microvolt = <912500>;
+ regulator-max-microvolt = <1312500>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd2_reg: regulator@3 {
+ /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
+ regulator-name = "vdd_core";
+ regulator-min-microvolt = <912500>;
+ regulator-max-microvolt = <1150000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd3_reg: regulator@4 {
+ regulator-always-on;
+ };
+
+ vdig1_reg: regulator@5 {
+ regulator-always-on;
+ };
+
+ vdig2_reg: regulator@6 {
+ regulator-always-on;
+ };
+
+ vpll_reg: regulator@7 {
+ regulator-always-on;
+ };
+
+ vdac_reg: regulator@8 {
+ regulator-always-on;
+ };
+
+ vaux1_reg: regulator@9 {
+ regulator-always-on;
+ };
+
+ vaux2_reg: regulator@10 {
+ regulator-always-on;
+ };
+
+ vaux33_reg: regulator@11 {
+ regulator-always-on;
+ };
+
+ vmmc_reg: regulator@12 {
+ regulator-always-on;
+ };
+ };
+};
+
diff --git a/arch/arm/boot/dts/am335x-nano.dts b/arch/arm/boot/dts/am335x-nano.dts
new file mode 100644
index 000000000000..9907b494b99c
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-nano.dts
@@ -0,0 +1,431 @@
+/*
+ * Copyright (C) 2013 Newflow Ltd - http://www.newflow.co.uk/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+#include "am33xx.dtsi"
+
+/ {
+ model = "Newflow AM335x NanoBone";
+ compatible = "ti,am33xx";
+
+ cpus {
+ cpu@0 {
+ cpu0-supply = <&dcdc2_reg>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x10000000>; /* 256 MB */
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led@0 {
+ label = "nanobone:green:usr1";
+ gpios = <&gpio1 5 0>;
+ default-state = "off";
+ };
+ };
+};
+
+&am33xx_pinmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&misc_pins>;
+
+ misc_pins: misc_pins {
+ pinctrl-single,pins = <
+ 0x15c (PIN_OUTPUT | MUX_MODE7) /* spi0_cs0.gpio0_5 */
+ >;
+ };
+
+ gpmc_pins: gpmc_pins {
+ pinctrl-single,pins = <
+ 0x0 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad0.gpmc_ad0 */
+ 0x4 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad1.gpmc_ad1 */
+ 0x8 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad2.gpmc_ad2 */
+ 0xc (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad3.gpmc_ad3 */
+ 0x10 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad4.gpmc_ad4 */
+ 0x14 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad5.gpmc_ad5 */
+ 0x18 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad6.gpmc_ad6 */
+ 0x1c (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad7.gpmc_ad7 */
+ 0x20 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad8.gpmc_ad8 */
+ 0x24 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad9.gpmc_ad9 */
+ 0x28 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad10.gpmc_ad10 */
+ 0x2c (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad11.gpmc_ad11 */
+ 0x30 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad12.gpmc_ad12 */
+ 0x34 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad13.gpmc_ad13 */
+ 0x38 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad14.gpmc_ad14 */
+ 0x3c (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad15.gpmc_ad15 */
+
+ 0x70 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_wait0.gpmc_wait0 */
+ 0x7c (PIN_OUTPUT | MUX_MODE0) /* gpmc_csn0.gpmc_csn0 */
+ 0x80 (PIN_OUTPUT | MUX_MODE0) /* gpmc_csn1.gpmc_csn1 */
+ 0x84 (PIN_OUTPUT | MUX_MODE0) /* gpmc_csn2.gpmc_csn2 */
+ 0x88 (PIN_OUTPUT | MUX_MODE0) /* gpmc_csn3.gpmc_csn3 */
+
+ 0x90 (PIN_OUTPUT | MUX_MODE0) /* gpmc_advn_ale.gpmc_advn_ale */
+ 0x94 (PIN_OUTPUT | MUX_MODE0) /* gpmc_oen_ren.gpmc_oen_ren */
+ 0x98 (PIN_OUTPUT | MUX_MODE0) /* gpmc_wen.gpmc_wen */
+ 0x9c (PIN_OUTPUT | MUX_MODE0) /* gpmc_ben0_cle.gpmc_ben0_cle */
+
+ 0xa4 (PIN_OUTPUT | MUX_MODE1) /* lcd_data1.gpmc_a1 */
+ 0xa8 (PIN_OUTPUT | MUX_MODE1) /* lcd_data2.gpmc_a2 */
+ 0xac (PIN_OUTPUT | MUX_MODE1) /* lcd_data3.gpmc_a3 */
+ 0xb0 (PIN_OUTPUT | MUX_MODE1) /* lcd_data4.gpmc_a4 */
+ 0xb4 (PIN_OUTPUT | MUX_MODE1) /* lcd_data5.gpmc_a5 */
+ 0xb8 (PIN_OUTPUT | MUX_MODE1) /* lcd_data6.gpmc_a6 */
+ 0xbc (PIN_OUTPUT | MUX_MODE1) /* lcd_data7.gpmc_a7 */
+
+ 0xe0 (PIN_OUTPUT | MUX_MODE1) /* lcd_vsync.gpmc_a8 */
+ 0xe4 (PIN_OUTPUT | MUX_MODE1) /* lcd_hsync.gpmc_a9 */
+ 0xe8 (PIN_OUTPUT | MUX_MODE1) /* lcd_pclk.gpmc_a10 */
+ >;
+ };
+
+ i2c0_pins: i2c0_pins {
+ pinctrl-single,pins = <
+ 0x188 (PIN_INPUT_PULLDOWN | MUX_MODE0) /* i2c0_sda.i2c0_sda */
+ 0x18c (PIN_INPUT_PULLDOWN | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ >;
+ };
+
+ uart0_pins: uart0_pins {
+ pinctrl-single,pins = <
+ 0x170 (PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
+ 0x174 (PIN_OUTPUT | MUX_MODE0) /* uart0_txd.uart0_txd */
+ >;
+ };
+
+ uart1_pins: uart1_pins {
+ pinctrl-single,pins = <
+ 0x178 (PIN_OUTPUT | MUX_MODE7) /* uart1_ctsn.uart1_ctsn */
+ 0x17c (PIN_OUTPUT | MUX_MODE7) /* uart1_rtsn.uart1_rtsn */
+ 0x180 (PIN_INPUT_PULLUP | MUX_MODE0) /* uart1_rxd.uart1_rxd */
+ 0x184 (PIN_OUTPUT | MUX_MODE0) /* uart1_txd.uart1_txd */
+ >;
+ };
+
+ uart2_pins: uart2_pins {
+ pinctrl-single,pins = <
+ 0xc0 (PIN_INPUT_PULLUP | MUX_MODE7) /* lcd_data8.gpio2[14] */
+ 0xc4 (PIN_OUTPUT | MUX_MODE7) /* lcd_data9.gpio2[15] */
+ 0x150 (PIN_INPUT | MUX_MODE1) /* spi0_sclk.uart2_rxd */
+ 0x154 (PIN_OUTPUT | MUX_MODE1) /* spi0_d0.uart2_txd */
+ >;
+ };
+
+ uart3_pins: uart3_pins {
+ pinctrl-single,pins = <
+ 0xc8 (PIN_INPUT_PULLUP | MUX_MODE6) /* lcd_data10.uart3_ctsn */
+ 0xcc (PIN_OUTPUT | MUX_MODE6) /* lcd_data11.uart3_rtsn */
+ 0x160 (PIN_INPUT | MUX_MODE1) /* spi0_cs1.uart3_rxd */
+ 0x164 (PIN_OUTPUT | MUX_MODE1) /* ecap0_in_pwm0_out.uart3_txd */
+ >;
+ };
+
+ uart4_pins: uart4_pins {
+ pinctrl-single,pins = <
+ 0xd0 (PIN_INPUT_PULLUP | MUX_MODE6) /* lcd_data12.uart4_ctsn */
+ 0xd4 (PIN_OUTPUT | MUX_MODE6) /* lcd_data13.uart4_rtsn */
+ 0x168 (PIN_INPUT | MUX_MODE1) /* uart0_ctsn.uart4_rxd */
+ 0x16c (PIN_OUTPUT | MUX_MODE1) /* uart0_rtsn.uart4_txd */
+ >;
+ };
+
+ uart5_pins: uart5_pins {
+ pinctrl-single,pins = <
+ 0xd8 (PIN_INPUT | MUX_MODE4) /* lcd_data14.uart5_rxd */
+ 0x144 (PIN_OUTPUT | MUX_MODE3) /* rmiii1_refclk.uart5_txd */
+ >;
+ };
+
+ mmc1_pins: mmc1_pins {
+ pinctrl-single,pins = <
+ 0xf0 (PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat0.mmc0_dat0 */
+ 0xf4 (PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat1.mmc0_dat1 */
+ 0xf8 (PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat2.mmc0_dat2 */
+ 0xfc (PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat3.mmc0_dat3 */
+ 0x100 (PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_clk.mmc0_clk */
+ 0x104 (PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_cmd.mmc0_cmd */
+ 0x1e8 (PIN_INPUT_PULLUP | MUX_MODE7) /* emu1.gpio3[8] */
+ 0x1a0 (PIN_INPUT_PULLUP | MUX_MODE7) /* mcasp0_aclkr.gpio3[18] */
+ >;
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins>;
+ status = "okay";
+ rts-gpio = <&gpio0 13 GPIO_ACTIVE_HIGH>;
+ rs485-rts-active-high;
+ rs485-rx-during-tx;
+ rs485-rts-delay = <1 1>;
+ linux,rs485-enabled-at-boot-time;
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_pins>;
+ status = "okay";
+ rts-gpio = <&gpio2 15 GPIO_ACTIVE_HIGH>;
+ rs485-rts-active-high;
+ rs485-rts-delay = <1 1>;
+ linux,rs485-enabled-at-boot-time;
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3_pins>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart4_pins>;
+ status = "okay";
+};
+
+&uart5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart5_pins>;
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+ pinctrl-names = "default";
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+
+ gpio@20 {
+ compatible = "mcp,mcp23017";
+ reg = <0x20>;
+ };
+
+ tps: tps@24 {
+ reg = <0x24>;
+ };
+
+ eeprom@53 {
+ compatible = "mcp,24c02";
+ reg = <0x53>;
+ pagesize = <8>;
+ };
+
+ rtc@68 {
+ compatible = "dallas,ds1307";
+ reg = <0x68>;
+ };
+};
+
+&elm {
+ status = "okay";
+};
+
+&gpmc {
+ compatible = "ti,am3352-gpmc";
+ ti,hwmods = "gpmc";
+ status = "okay";
+ gpmc,num-waitpins = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpmc_pins>;
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0x08000000 0x08000000>; /* CS0: NOR 128M */
+
+ nor@0,0 {
+ reg = <0 0x00000000 0x08000000>;
+ compatible = "cfi-flash";
+ linux,mtd-name = "spansion,s29gl010p11t";
+ bank-width = <2>;
+
+ gpmc,mux-add-data = <2>;
+
+ gpmc,sync-clk-ps = <0>;
+ gpmc,cs-on-ns = <0>;
+ gpmc,cs-rd-off-ns = <160>;
+ gpmc,cs-wr-off-ns = <160>;
+ gpmc,adv-on-ns = <10>;
+ gpmc,adv-rd-off-ns = <30>;
+ gpmc,adv-wr-off-ns = <30>;
+ gpmc,oe-on-ns = <40>;
+ gpmc,oe-off-ns = <160>;
+ gpmc,we-on-ns = <40>;
+ gpmc,we-off-ns = <160>;
+ gpmc,rd-cycle-ns = <160>;
+ gpmc,wr-cycle-ns = <160>;
+ gpmc,access-ns = <150>;
+ gpmc,page-burst-access-ns = <10>;
+ gpmc,cycle2cycle-samecsen;
+ gpmc,cycle2cycle-delay-ns = <20>;
+ gpmc,wr-data-mux-bus-ns = <70>;
+ gpmc,wr-access-ns = <80>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /*
+ MTD partition table
+ ===================
+ +------------+-->0x00000000-> U-Boot start
+ | |
+ | |-->0x000BFFFF-> U-Boot end
+ | |-->0x000C0000-> ENV1 start
+ | |
+ | |-->0x000DFFFF-> ENV1 end
+ | |-->0x000E0000-> ENV2 start
+ | |
+ | |-->0x000FFFFF-> ENV2 end
+ | |-->0x00100000-> Kernel start
+ | |
+ | |-->0x004FFFFF-> Kernel end
+ | |-->0x00500000-> File system start
+ | |
+ | |-->0x014FFFFF-> File system end
+ | |-->0x01500000-> User data start
+ | |
+ | |-->0x03FFFFFF-> User data end
+ | |-->0x04000000-> Data storage start
+ | |
+ +------------+-->0x08000000-> NOR end (Free end)
+ */
+ partition@0 {
+ label = "boot";
+ reg = <0x00000000 0x000c0000>; /* 768KB */
+ };
+
+ partition@1 {
+ label = "env1";
+ reg = <0x000c0000 0x00020000>; /* 128KB */
+ };
+
+ partition@2 {
+ label = "env2";
+ reg = <0x000e0000 0x00020000>; /* 128KB */
+ };
+
+ partition@3 {
+ label = "kernel";
+ reg = <0x00100000 0x00400000>; /* 4MB */
+ };
+
+ partition@4 {
+ label = "rootfs";
+ reg = <0x00500000 0x01000000>; /* 16MB */
+ };
+
+ partition@5 {
+ label = "user";
+ reg = <0x01500000 0x02b00000>; /* 43MB */
+ };
+
+ partition@6 {
+ label = "data";
+ reg = <0x04000000 0x04000000>; /* 64MB */
+ };
+ };
+};
+
+&mac {
+ dual_emac = <1>;
+};
+
+&cpsw_emac0 {
+ phy_id = <&davinci_mdio>, <0>;
+ dual_emac_res_vlan = <1>;
+};
+
+&cpsw_emac1 {
+ phy_id = <&davinci_mdio>, <1>;
+ dual_emac_res_vlan = <2>;
+};
+
+&mmc1 {
+ status = "okay";
+ vmmc-supply = <&ldo4_reg>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins>;
+ bus-width = <4>;
+ cd-gpios = <&gpio3 8 0>;
+ wp-gpios = <&gpio3 18 0>;
+};
+
+#include "tps65217.dtsi"
+
+&tps {
+ regulators {
+ dcdc1_reg: regulator@0 {
+ /* +1.5V voltage with ±4% tolerance */
+ regulator-min-microvolt = <1450000>;
+ regulator-max-microvolt = <1550000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ dcdc2_reg: regulator@1 {
+ /* VDD_MPU voltage limits 0.95V - 1.1V with ±4% tolerance */
+ regulator-name = "vdd_mpu";
+ regulator-min-microvolt = <915000>;
+ regulator-max-microvolt = <1140000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ dcdc3_reg: regulator@2 {
+ /* VDD_CORE voltage limits 0.95V - 1.1V with ±4% tolerance */
+ regulator-name = "vdd_core";
+ regulator-min-microvolt = <915000>;
+ regulator-max-microvolt = <1140000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1_reg: regulator@3 {
+ /* +1.8V voltage with ±4% tolerance */
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <1870000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo2_reg: regulator@4 {
+ /* +3.3V voltage with ±4% tolerance */
+ regulator-min-microvolt = <3175000>;
+ regulator-max-microvolt = <3430000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo3_reg: regulator@5 {
+ /* +1.8V voltage with ±4% tolerance */
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <1870000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo4_reg: regulator@6 {
+ /* +3.3V voltage with ±4% tolerance */
+ regulator-min-microvolt = <3175000>;
+ regulator-max-microvolt = <3430000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index f9c5da9c7fe1..f6d8ffe98d0b 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -18,6 +18,9 @@
interrupt-parent = <&intc>;
aliases {
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
@@ -30,6 +33,8 @@
usb1 = &usb1;
phy0 = &usb0_phy;
phy1 = &usb1_phy;
+ ethernet0 = &cpsw_emac0;
+ ethernet1 = &cpsw_emac1;
};
cpus {
@@ -57,6 +62,11 @@
};
};
+ pmu {
+ compatible = "arm,cortex-a8-pmu";
+ interrupts = <3>;
+ };
+
/*
* The soc node represents the soc top level view. It is uses for IPs
* that are not memory mapped in the MPU view or for the MPU itself.
@@ -100,13 +110,25 @@
reg = <0x48200000 0x1000>;
};
+ edma: edma@49000000 {
+ compatible = "ti,edma3";
+ ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
+ reg = <0x49000000 0x10000>,
+ <0x44e10f90 0x10>;
+ interrupts = <12 13 14>;
+ #dma-cells = <1>;
+ dma-channels = <64>;
+ ti,edma-regions = <4>;
+ ti,edma-slots = <256>;
+ };
+
gpio0: gpio@44e07000 {
compatible = "ti,omap4-gpio";
ti,hwmods = "gpio1";
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
reg = <0x44e07000 0x1000>;
interrupts = <96>;
};
@@ -117,7 +139,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
reg = <0x4804c000 0x1000>;
interrupts = <98>;
};
@@ -128,7 +150,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
reg = <0x481ac000 0x1000>;
interrupts = <32>;
};
@@ -139,7 +161,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
reg = <0x481ae000 0x1000>;
interrupts = <62>;
};
@@ -228,6 +250,50 @@
status = "disabled";
};
+ mmc1: mmc@48060000 {
+ compatible = "ti,omap4-hsmmc";
+ ti,hwmods = "mmc1";
+ ti,dual-volt;
+ ti,needs-special-reset;
+ ti,needs-special-hs-handling;
+ dmas = <&edma 24
+ &edma 25>;
+ dma-names = "tx", "rx";
+ interrupts = <64>;
+ interrupt-parent = <&intc>;
+ reg = <0x48060000 0x1000>;
+ status = "disabled";
+ };
+
+ mmc2: mmc@481d8000 {
+ compatible = "ti,omap4-hsmmc";
+ ti,hwmods = "mmc2";
+ ti,needs-special-reset;
+ dmas = <&edma 2
+ &edma 3>;
+ dma-names = "tx", "rx";
+ interrupts = <28>;
+ interrupt-parent = <&intc>;
+ reg = <0x481d8000 0x1000>;
+ status = "disabled";
+ };
+
+ mmc3: mmc@47810000 {
+ compatible = "ti,omap4-hsmmc";
+ ti,hwmods = "mmc3";
+ ti,needs-special-reset;
+ interrupts = <29>;
+ interrupt-parent = <&intc>;
+ reg = <0x47810000 0x1000>;
+ status = "disabled";
+ };
+
+ hwspinlock: spinlock@480ca000 {
+ compatible = "ti,omap4-hwspinlock";
+ reg = <0x480ca000 0x1000>;
+ ti,hwmods = "spinlock";
+ };
+
wdt2: wdt@44e35000 {
compatible = "ti,omap3-wdt";
ti,hwmods = "wd_timer2";
@@ -323,6 +389,11 @@
interrupts = <65>;
ti,spi-num-cs = <2>;
ti,hwmods = "spi0";
+ dmas = <&edma 16
+ &edma 17
+ &edma 18
+ &edma 19>;
+ dma-names = "tx0", "rx0", "tx1", "rx1";
status = "disabled";
};
@@ -334,6 +405,11 @@
interrupts = <125>;
ti,spi-num-cs = <2>;
ti,hwmods = "spi1";
+ dmas = <&edma 42
+ &edma 43
+ &edma 44
+ &edma 45>;
+ dma-names = "tx0", "rx0", "tx1", "rx1";
status = "disabled";
};
@@ -346,7 +422,7 @@
ti,hwmods = "usb_otg_hs";
status = "disabled";
- ctrl_mod: control@44e10000 {
+ usb_ctrl_mod: control@44e10000 {
compatible = "ti,am335x-usb-ctrl-module";
reg = <0x44e10620 0x10
0x44e10648 0x4>;
@@ -359,7 +435,7 @@
reg = <0x47401300 0x100>;
reg-names = "phy";
status = "disabled";
- ti,ctrl_mod = <&ctrl_mod>;
+ ti,ctrl_mod = <&usb_ctrl_mod>;
};
usb0: usb@47401000 {
@@ -407,7 +483,7 @@
reg = <0x47401b00 0x100>;
reg-names = "phy";
status = "disabled";
- ti,ctrl_mod = <&ctrl_mod>;
+ ti,ctrl_mod = <&usb_ctrl_mod>;
};
usb1: usb@47401800 {
@@ -594,6 +670,12 @@
/* Filled in by U-Boot */
mac-address = [ 00 00 00 00 00 00 ];
};
+
+ phy_sel: cpsw-phy-sel@44e10650 {
+ compatible = "ti,am3352-cpsw-phy-sel";
+ reg= <0x44e10650 0x4>;
+ reg-names = "gmii-sel";
+ };
};
ocmcram: ocmcram@40300000 {
@@ -607,6 +689,7 @@
reg = <0x44d00000 0x4000 /* M3 UMEM */
0x44d80000 0x2000>; /* M3 DMEM */
ti,hwmods = "wkup_m3";
+ ti,no-reset-on-init;
};
elm: elm@48080000 {
@@ -617,6 +700,15 @@
status = "disabled";
};
+ lcdc: lcdc@4830e000 {
+ compatible = "ti,am33xx-tilcdc";
+ reg = <0x4830e000 0x1000>;
+ interrupt-parent = <&intc>;
+ interrupts = <36>;
+ ti,hwmods = "lcdc";
+ status = "disabled";
+ };
+
tscadc: tscadc@44e0d000 {
compatible = "ti,am3359-tscadc";
reg = <0x44e0d000 0x1000>;
@@ -637,6 +729,7 @@
gpmc: gpmc@50000000 {
compatible = "ti,am3352-gpmc";
ti,hwmods = "gpmc";
+ ti,no-idle-on-init;
reg = <0x50000000 0x2000>;
interrupts = <100>;
gpmc,num-cs = <7>;
@@ -645,5 +738,59 @@
#size-cells = <1>;
status = "disabled";
};
+
+ sham: sham@53100000 {
+ compatible = "ti,omap4-sham";
+ ti,hwmods = "sham";
+ reg = <0x53100000 0x200>;
+ interrupts = <109>;
+ dmas = <&edma 36>;
+ dma-names = "rx";
+ };
+
+ aes: aes@53500000 {
+ compatible = "ti,omap4-aes";
+ ti,hwmods = "aes";
+ reg = <0x53500000 0xa0>;
+ interrupts = <103>;
+ dmas = <&edma 6>,
+ <&edma 5>;
+ dma-names = "tx", "rx";
+ };
+
+ mcasp0: mcasp@48038000 {
+ compatible = "ti,am33xx-mcasp-audio";
+ ti,hwmods = "mcasp0";
+ reg = <0x48038000 0x2000>,
+ <0x46000000 0x400000>;
+ reg-names = "mpu", "dat";
+ interrupts = <80>, <81>;
+ interrupts-names = "tx", "rx";
+ status = "disabled";
+ dmas = <&edma 8>,
+ <&edma 9>;
+ dma-names = "tx", "rx";
+ };
+
+ mcasp1: mcasp@4803C000 {
+ compatible = "ti,am33xx-mcasp-audio";
+ ti,hwmods = "mcasp1";
+ reg = <0x4803C000 0x2000>,
+ <0x46400000 0x400000>;
+ reg-names = "mpu", "dat";
+ interrupts = <82>, <83>;
+ interrupts-names = "tx", "rx";
+ status = "disabled";
+ dmas = <&edma 10>,
+ <&edma 11>;
+ dma-names = "tx", "rx";
+ };
+
+ rng: rng@48310000 {
+ compatible = "ti,omap4-rng";
+ ti,hwmods = "rng";
+ reg = <0x48310000 0x2000>;
+ interrupts = <111>;
+ };
};
};
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index ddc1df77ac52..974d103ab3b1 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -18,12 +18,21 @@
aliases {
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
serial0 = &uart0;
+ ethernet0 = &cpsw_emac0;
+ ethernet1 = &cpsw_emac1;
};
cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
cpu@0 {
compatible = "arm,cortex-a9";
+ device_type = "cpu";
+ reg = <0>;
};
};
@@ -35,16 +44,100 @@
<0x48240100 0x0100>;
};
+ l2-cache-controller@48242000 {
+ compatible = "arm,pl310-cache";
+ reg = <0x48242000 0x1000>;
+ cache-unified;
+ cache-level = <2>;
+ };
+
+ am43xx_pinmux: pinmux@44e10800 {
+ compatible = "pinctrl-single";
+ reg = <0x44e10800 0x31c>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-single,register-width = <32>;
+ pinctrl-single,function-mask = <0xffffffff>;
+ };
+
ocp {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges;
+ ti,hwmods = "l3_main";
+
+ edma: edma@49000000 {
+ compatible = "ti,edma3";
+ ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
+ reg = <0x49000000 0x10000>,
+ <0x44e10f90 0x10>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ dma-channels = <64>;
+ ti,edma-regions = <4>;
+ ti,edma-slots = <256>;
+ };
uart0: serial@44e09000 {
compatible = "ti,am4372-uart","ti,omap2-uart";
reg = <0x44e09000 0x2000>;
interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "uart1";
+ };
+
+ uart1: serial@48022000 {
+ compatible = "ti,am4372-uart","ti,omap2-uart";
+ reg = <0x48022000 0x2000>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "uart2";
+ status = "disabled";
+ };
+
+ uart2: serial@48024000 {
+ compatible = "ti,am4372-uart","ti,omap2-uart";
+ reg = <0x48024000 0x2000>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "uart3";
+ status = "disabled";
+ };
+
+ uart3: serial@481a6000 {
+ compatible = "ti,am4372-uart","ti,omap2-uart";
+ reg = <0x481a6000 0x2000>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "uart4";
+ status = "disabled";
+ };
+
+ uart4: serial@481a8000 {
+ compatible = "ti,am4372-uart","ti,omap2-uart";
+ reg = <0x481a8000 0x2000>;
+ interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "uart5";
+ status = "disabled";
+ };
+
+ uart5: serial@481aa000 {
+ compatible = "ti,am4372-uart","ti,omap2-uart";
+ reg = <0x481aa000 0x2000>;
+ interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "uart6";
+ status = "disabled";
+ };
+
+ mailbox: mailbox@480C8000 {
+ compatible = "ti,omap4-mailbox";
+ reg = <0x480C8000 0x200>;
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "mailbox";
+ ti,mbox-num-users = <4>;
+ ti,mbox-num-fifos = <8>;
+ ti,mbox-names = "wkup_m3";
+ ti,mbox-data = <0 0 0 0>;
+ status = "disabled";
};
timer1: timer@44e31000 {
@@ -52,17 +145,523 @@
reg = <0x44e31000 0x400>;
interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
ti,timer-alwon;
+ ti,hwmods = "timer1";
};
timer2: timer@48040000 {
compatible = "ti,am4372-timer","ti,am335x-timer";
reg = <0x48040000 0x400>;
interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "timer2";
+ };
+
+ timer3: timer@48042000 {
+ compatible = "ti,am4372-timer","ti,am335x-timer";
+ reg = <0x48042000 0x400>;
+ interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "timer3";
+ status = "disabled";
+ };
+
+ timer4: timer@48044000 {
+ compatible = "ti,am4372-timer","ti,am335x-timer";
+ reg = <0x48044000 0x400>;
+ interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
+ ti,timer-pwm;
+ ti,hwmods = "timer4";
+ status = "disabled";
+ };
+
+ timer5: timer@48046000 {
+ compatible = "ti,am4372-timer","ti,am335x-timer";
+ reg = <0x48046000 0x400>;
+ interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
+ ti,timer-pwm;
+ ti,hwmods = "timer5";
+ status = "disabled";
+ };
+
+ timer6: timer@48048000 {
+ compatible = "ti,am4372-timer","ti,am335x-timer";
+ reg = <0x48048000 0x400>;
+ interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+ ti,timer-pwm;
+ ti,hwmods = "timer6";
+ status = "disabled";
+ };
+
+ timer7: timer@4804a000 {
+ compatible = "ti,am4372-timer","ti,am335x-timer";
+ reg = <0x4804a000 0x400>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ ti,timer-pwm;
+ ti,hwmods = "timer7";
+ status = "disabled";
+ };
+
+ timer8: timer@481c1000 {
+ compatible = "ti,am4372-timer","ti,am335x-timer";
+ reg = <0x481c1000 0x400>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "timer8";
+ status = "disabled";
+ };
+
+ timer9: timer@4833d000 {
+ compatible = "ti,am4372-timer","ti,am335x-timer";
+ reg = <0x4833d000 0x400>;
+ interrupts = <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "timer9";
+ status = "disabled";
+ };
+
+ timer10: timer@4833f000 {
+ compatible = "ti,am4372-timer","ti,am335x-timer";
+ reg = <0x4833f000 0x400>;
+ interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "timer10";
+ status = "disabled";
+ };
+
+ timer11: timer@48341000 {
+ compatible = "ti,am4372-timer","ti,am335x-timer";
+ reg = <0x48341000 0x400>;
+ interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "timer11";
+ status = "disabled";
};
counter32k: counter@44e86000 {
compatible = "ti,am4372-counter32k","ti,omap-counter32k";
reg = <0x44e86000 0x40>;
+ ti,hwmods = "counter_32k";
+ };
+
+ rtc@44e3e000 {
+ compatible = "ti,am4372-rtc","ti,da830-rtc";
+ reg = <0x44e3e000 0x1000>;
+ interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "rtc";
+ status = "disabled";
+ };
+
+ wdt@44e35000 {
+ compatible = "ti,am4372-wdt","ti,omap3-wdt";
+ reg = <0x44e35000 0x1000>;
+ interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "wd_timer2";
+ };
+
+ gpio0: gpio@44e07000 {
+ compatible = "ti,am4372-gpio","ti,omap4-gpio";
+ reg = <0x44e07000 0x1000>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ti,hwmods = "gpio1";
+ status = "disabled";
+ };
+
+ gpio1: gpio@4804c000 {
+ compatible = "ti,am4372-gpio","ti,omap4-gpio";
+ reg = <0x4804c000 0x1000>;
+ interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ti,hwmods = "gpio2";
+ status = "disabled";
+ };
+
+ gpio2: gpio@481ac000 {
+ compatible = "ti,am4372-gpio","ti,omap4-gpio";
+ reg = <0x481ac000 0x1000>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ti,hwmods = "gpio3";
+ status = "disabled";
+ };
+
+ gpio3: gpio@481ae000 {
+ compatible = "ti,am4372-gpio","ti,omap4-gpio";
+ reg = <0x481ae000 0x1000>;
+ interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ti,hwmods = "gpio4";
+ status = "disabled";
+ };
+
+ gpio4: gpio@48320000 {
+ compatible = "ti,am4372-gpio","ti,omap4-gpio";
+ reg = <0x48320000 0x1000>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ti,hwmods = "gpio5";
+ status = "disabled";
+ };
+
+ gpio5: gpio@48322000 {
+ compatible = "ti,am4372-gpio","ti,omap4-gpio";
+ reg = <0x48322000 0x1000>;
+ interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ti,hwmods = "gpio6";
+ status = "disabled";
+ };
+
+ i2c0: i2c@44e0b000 {
+ compatible = "ti,am4372-i2c","ti,omap4-i2c";
+ reg = <0x44e0b000 0x1000>;
+ interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "i2c1";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@4802a000 {
+ compatible = "ti,am4372-i2c","ti,omap4-i2c";
+ reg = <0x4802a000 0x1000>;
+ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "i2c2";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@4819c000 {
+ compatible = "ti,am4372-i2c","ti,omap4-i2c";
+ reg = <0x4819c000 0x1000>;
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "i2c3";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi0: spi@48030000 {
+ compatible = "ti,am4372-mcspi","ti,omap4-mcspi";
+ reg = <0x48030000 0x400>;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "spi0";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ mmc1: mmc@48060000 {
+ compatible = "ti,omap4-hsmmc";
+ reg = <0x48060000 0x1000>;
+ ti,hwmods = "mmc1";
+ ti,dual-volt;
+ ti,needs-special-reset;
+ dmas = <&edma 24
+ &edma 25>;
+ dma-names = "tx", "rx";
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ mmc2: mmc@481d8000 {
+ compatible = "ti,omap4-hsmmc";
+ reg = <0x481d8000 0x1000>;
+ ti,hwmods = "mmc2";
+ ti,needs-special-reset;
+ dmas = <&edma 2
+ &edma 3>;
+ dma-names = "tx", "rx";
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ mmc3: mmc@47810000 {
+ compatible = "ti,omap4-hsmmc";
+ reg = <0x47810000 0x1000>;
+ ti,hwmods = "mmc3";
+ ti,needs-special-reset;
+ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ spi1: spi@481a0000 {
+ compatible = "ti,am4372-mcspi","ti,omap4-mcspi";
+ reg = <0x481a0000 0x400>;
+ interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "spi1";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi2: spi@481a2000 {
+ compatible = "ti,am4372-mcspi","ti,omap4-mcspi";
+ reg = <0x481a2000 0x400>;
+ interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "spi2";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi3: spi@481a4000 {
+ compatible = "ti,am4372-mcspi","ti,omap4-mcspi";
+ reg = <0x481a4000 0x400>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "spi3";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi4: spi@48345000 {
+ compatible = "ti,am4372-mcspi","ti,omap4-mcspi";
+ reg = <0x48345000 0x400>;
+ interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "spi4";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ mac: ethernet@4a100000 {
+ compatible = "ti,am4372-cpsw","ti,cpsw";
+ reg = <0x4a100000 0x800
+ 0x4a101200 0x100>;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ti,hwmods = "cpgmac0";
+ status = "disabled";
+ cpdma_channels = <8>;
+ ale_entries = <1024>;
+ bd_ram_size = <0x2000>;
+ no_bd_ram = <0>;
+ rx_descs = <64>;
+ mac_control = <0x20>;
+ slaves = <2>;
+ active_slave = <0>;
+ cpts_clock_mult = <0x80000000>;
+ cpts_clock_shift = <29>;
+ ranges;
+
+ davinci_mdio: mdio@4a101000 {
+ compatible = "ti,am4372-mdio","ti,davinci_mdio";
+ reg = <0x4a101000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ti,hwmods = "davinci_mdio";
+ bus_freq = <1000000>;
+ status = "disabled";
+ };
+
+ cpsw_emac0: slave@4a100200 {
+ /* Filled in by U-Boot */
+ mac-address = [ 00 00 00 00 00 00 ];
+ };
+
+ cpsw_emac1: slave@4a100300 {
+ /* Filled in by U-Boot */
+ mac-address = [ 00 00 00 00 00 00 ];
+ };
+ };
+
+ epwmss0: epwmss@48300000 {
+ compatible = "ti,am4372-pwmss","ti,am33xx-pwmss";
+ reg = <0x48300000 0x10>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ ti,hwmods = "epwmss0";
+ status = "disabled";
+
+ ecap0: ecap@48300100 {
+ compatible = "ti,am4372-ecap","ti,am33xx-ecap";
+ reg = <0x48300100 0x80>;
+ ti,hwmods = "ecap0";
+ status = "disabled";
+ };
+
+ ehrpwm0: ehrpwm@48300200 {
+ compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm";
+ reg = <0x48300200 0x80>;
+ ti,hwmods = "ehrpwm0";
+ status = "disabled";
+ };
+ };
+
+ epwmss1: epwmss@48302000 {
+ compatible = "ti,am4372-pwmss","ti,am33xx-pwmss";
+ reg = <0x48302000 0x10>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ ti,hwmods = "epwmss1";
+ status = "disabled";
+
+ ecap1: ecap@48302100 {
+ compatible = "ti,am4372-ecap","ti,am33xx-ecap";
+ reg = <0x48302100 0x80>;
+ ti,hwmods = "ecap1";
+ status = "disabled";
+ };
+
+ ehrpwm1: ehrpwm@48302200 {
+ compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm";
+ reg = <0x48302200 0x80>;
+ ti,hwmods = "ehrpwm1";
+ status = "disabled";
+ };
+ };
+
+ epwmss2: epwmss@48304000 {
+ compatible = "ti,am4372-pwmss","ti,am33xx-pwmss";
+ reg = <0x48304000 0x10>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ ti,hwmods = "epwmss2";
+ status = "disabled";
+
+ ecap2: ecap@48304100 {
+ compatible = "ti,am4372-ecap","ti,am33xx-ecap";
+ reg = <0x48304100 0x80>;
+ ti,hwmods = "ecap2";
+ status = "disabled";
+ };
+
+ ehrpwm2: ehrpwm@48304200 {
+ compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm";
+ reg = <0x48304200 0x80>;
+ ti,hwmods = "ehrpwm2";
+ status = "disabled";
+ };
+ };
+
+ epwmss3: epwmss@48306000 {
+ compatible = "ti,am4372-pwmss","ti,am33xx-pwmss";
+ reg = <0x48306000 0x10>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ ti,hwmods = "epwmss3";
+ status = "disabled";
+
+ ehrpwm3: ehrpwm@48306200 {
+ compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm";
+ reg = <0x48306200 0x80>;
+ ti,hwmods = "ehrpwm3";
+ status = "disabled";
+ };
+ };
+
+ epwmss4: epwmss@48308000 {
+ compatible = "ti,am4372-pwmss","ti,am33xx-pwmss";
+ reg = <0x48308000 0x10>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ ti,hwmods = "epwmss4";
+ status = "disabled";
+
+ ehrpwm4: ehrpwm@48308200 {
+ compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm";
+ reg = <0x48308200 0x80>;
+ ti,hwmods = "ehrpwm4";
+ status = "disabled";
+ };
+ };
+
+ epwmss5: epwmss@4830a000 {
+ compatible = "ti,am4372-pwmss","ti,am33xx-pwmss";
+ reg = <0x4830a000 0x10>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ ti,hwmods = "epwmss5";
+ status = "disabled";
+
+ ehrpwm5: ehrpwm@4830a200 {
+ compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm";
+ reg = <0x4830a200 0x80>;
+ ti,hwmods = "ehrpwm5";
+ status = "disabled";
+ };
+ };
+
+ sham: sham@53100000 {
+ compatible = "ti,omap5-sham";
+ ti,hwmods = "sham";
+ reg = <0x53100000 0x300>;
+ dmas = <&edma 36>;
+ dma-names = "rx";
+ interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ aes: aes@53501000 {
+ compatible = "ti,omap4-aes";
+ ti,hwmods = "aes";
+ reg = <0x53501000 0xa0>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&edma 6
+ &edma 5>;
+ dma-names = "tx", "rx";
+ };
+
+ des: des@53701000 {
+ compatible = "ti,omap4-des";
+ ti,hwmods = "des";
+ reg = <0x53701000 0xa0>;
+ interrupts = <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&edma 34
+ &edma 33>;
+ dma-names = "tx", "rx";
+ };
+
+ mcasp0: mcasp@48038000 {
+ compatible = "ti,am33xx-mcasp-audio";
+ ti,hwmods = "mcasp0";
+ reg = <0x48038000 0x2000>,
+ <0x46000000 0x400000>;
+ reg-names = "mpu", "dat";
+ interrupts = <80>, <81>;
+ interrupts-names = "tx", "rx";
+ status = "disabled";
+ dmas = <&edma 8>,
+ <&edma 9>;
+ dma-names = "tx", "rx";
+ };
+
+ mcasp1: mcasp@4803C000 {
+ compatible = "ti,am33xx-mcasp-audio";
+ ti,hwmods = "mcasp1";
+ reg = <0x4803C000 0x2000>,
+ <0x46400000 0x400000>;
+ reg-names = "mpu", "dat";
+ interrupts = <82>, <83>;
+ interrupts-names = "tx", "rx";
+ status = "disabled";
+ dmas = <&edma 10>,
+ <&edma 11>;
+ dma-names = "tx", "rx";
};
};
};
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index 74174d48f476..fbf9c4c7a94f 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -11,8 +11,176 @@
/dts-v1/;
#include "am4372.dtsi"
+#include <dt-bindings/pinctrl/am43xx.h>
+#include <dt-bindings/gpio/gpio.h>
/ {
model = "TI AM43x EPOS EVM";
compatible = "ti,am43x-epos-evm","ti,am4372","ti,am43";
+
+ vmmcsd_fixed: fixedregulator-sd {
+ compatible = "regulator-fixed";
+ regulator-name = "vmmcsd_fixed";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ enable-active-high;
+ };
+
+ am43xx_pinmux: pinmux@44e10800 {
+ cpsw_default: cpsw_default {
+ pinctrl-single,pins = <
+ /* Slave 1 */
+ 0x10c (PIN_INPUT_PULLDOWN | MUX_MODE1) /* mii1_crs.rmii1_crs */
+ 0x110 (PIN_INPUT_PULLDOWN | MUX_MODE1) /* mii1_rxerr.rmii1_rxerr */
+ 0x114 (PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_txen.rmii1_txen */
+ 0x118 (PIN_INPUT_PULLDOWN | MUX_MODE1) /* mii1_rxdv.rmii1_rxdv */
+ 0x124 (PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_txd1.rmii1_txd1 */
+ 0x128 (PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_txd0.rmii1_txd0 */
+ 0x13c (PIN_INPUT_PULLDOWN | MUX_MODE1) /* mii1_rxd1.rmii1_rxd1 */
+ 0x140 (PIN_INPUT_PULLDOWN | MUX_MODE1) /* mii1_rxd0.rmii1_rxd0 */
+ 0x144 (PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii1_refclk.rmii1_refclk */
+ >;
+ };
+
+ cpsw_sleep: cpsw_sleep {
+ pinctrl-single,pins = <
+ /* Slave 1 reset value */
+ 0x10c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x110 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x114 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x118 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x124 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x128 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x13c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x140 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x144 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ >;
+ };
+
+ davinci_mdio_default: davinci_mdio_default {
+ pinctrl-single,pins = <
+ /* MDIO */
+ 0x148 (PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
+ 0x14c (PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
+ >;
+ };
+
+ davinci_mdio_sleep: davinci_mdio_sleep {
+ pinctrl-single,pins = <
+ /* MDIO reset value */
+ 0x148 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x14c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ >;
+ };
+
+ i2c0_pins: pinmux_i2c0_pins {
+ pinctrl-single,pins = <
+ 0x188 (PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* i2c0_sda.i2c0_sda */
+ 0x18c (PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ >;
+ };
+ };
+
+ matrix_keypad: matrix_keypad@0 {
+ compatible = "gpio-matrix-keypad";
+ debounce-delay-ms = <5>;
+ col-scan-delay-us = <2>;
+
+ row-gpios = <&gpio0 12 GPIO_ACTIVE_HIGH /* Bank0, pin12 */
+ &gpio0 13 GPIO_ACTIVE_HIGH /* Bank0, pin13 */
+ &gpio0 14 GPIO_ACTIVE_HIGH /* Bank0, pin14 */
+ &gpio0 15 GPIO_ACTIVE_HIGH>; /* Bank0, pin15 */
+
+ col-gpios = <&gpio3 9 GPIO_ACTIVE_HIGH /* Bank3, pin9 */
+ &gpio3 10 GPIO_ACTIVE_HIGH /* Bank3, pin10 */
+ &gpio2 18 GPIO_ACTIVE_HIGH /* Bank2, pin18 */
+ &gpio2 19 GPIO_ACTIVE_HIGH>; /* Bank2, pin19 */
+
+ linux,keymap = <0x00000201 /* P1 */
+ 0x01000204 /* P4 */
+ 0x02000207 /* P7 */
+ 0x0300020a /* NUMERIC_STAR */
+ 0x00010202 /* P2 */
+ 0x01010205 /* P5 */
+ 0x02010208 /* P8 */
+ 0x03010200 /* P0 */
+ 0x00020203 /* P3 */
+ 0x01020206 /* P6 */
+ 0x02020209 /* P9 */
+ 0x0302020b /* NUMERIC_POUND */
+ 0x00030067 /* UP */
+ 0x0103006a /* RIGHT */
+ 0x0203006c /* DOWN */
+ 0x03030069>; /* LEFT */
+ };
+};
+
+&mmc1 {
+ status = "okay";
+ vmmc-supply = <&vmmcsd_fixed>;
+ bus-width = <4>;
+};
+
+&mac {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&cpsw_default>;
+ pinctrl-1 = <&cpsw_sleep>;
+ status = "okay";
+};
+
+&davinci_mdio {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&davinci_mdio_default>;
+ pinctrl-1 = <&davinci_mdio_sleep>;
+ status = "okay";
+};
+
+&cpsw_emac0 {
+ phy_id = <&davinci_mdio>, <16>;
+ phy-mode = "rmii";
+};
+
+&cpsw_emac1 {
+ phy_id = <&davinci_mdio>, <1>;
+ phy-mode = "rmii";
+};
+
+&i2c0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+
+ at24@50 {
+ compatible = "at24,24c256";
+ pagesize = <64>;
+ reg = <0x50>;
+ };
+
+ pixcir_ts@5c {
+ compatible = "pixcir,pixcir_ts";
+ reg = <0x5c>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <17 0>;
+
+ attb-gpio = <&gpio1 17 GPIO_ACTIVE_HIGH>;
+
+ x-size = <1024>;
+ y-size = <768>;
+ };
+};
+
+&gpio0 {
+ status = "okay";
+};
+
+&gpio1 {
+ status = "okay";
+};
+
+&gpio2 {
+ status = "okay";
+};
+
+&gpio3 {
+ status = "okay";
};
diff --git a/arch/arm/boot/dts/armada-370-netgear-rn104.dts b/arch/arm/boot/dts/armada-370-netgear-rn104.dts
new file mode 100644
index 000000000000..b0b32f5fbeb4
--- /dev/null
+++ b/arch/arm/boot/dts/armada-370-netgear-rn104.dts
@@ -0,0 +1,193 @@
+/*
+ * Device Tree file for NETGEAR ReadyNAS 104
+ *
+ * Copyright (C) 2013, Arnaud EBALARD <arno@natisbad.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/dts-v1/;
+
+#include "armada-370.dtsi"
+
+/ {
+ model = "NETGEAR ReadyNAS 104";
+ compatible = "netgear,readynas-104", "marvell,armada370", "marvell,armada-370-xp";
+
+ chosen {
+ bootargs = "console=ttyS0,115200 earlyprintk";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x20000000>; /* 512 MB */
+ };
+
+ soc {
+ ranges = <MBUS_ID(0xf0, 0x01) 0 0xd0000000 0x100000
+ MBUS_ID(0x01, 0xe0) 0 0xfff00000 0x100000>;
+
+ pcie-controller {
+ status = "okay";
+
+ /* Connected to FL1009 USB 3.0 controller */
+ pcie@1,0 {
+ /* Port 0, Lane 0 */
+ status = "okay";
+ };
+
+ /* Connected to Marvell 88SE9215 SATA controller */
+ pcie@2,0 {
+ /* Port 1, Lane 0 */
+ status = "okay";
+ };
+ };
+
+ internal-regs {
+ serial@12000 {
+ clock-frequency = <200000000>;
+ status = "okay";
+ };
+
+ pinctrl {
+ poweroff: poweroff {
+ marvell,pins = "mpp60";
+ marvell,function = "gpio";
+ };
+
+ backup_key_pin: backup-key-pin {
+ marvell,pins = "mpp52";
+ marvell,function = "gpio";
+ };
+
+ power_key_pin: power-key-pin {
+ marvell,pins = "mpp62";
+ marvell,function = "gpio";
+ };
+
+ backup_led_pin: backup-led-pin {
+ marvell,pins = "mpp63";
+ marvell,function = "gpo";
+ };
+
+ power_led_pin: power-led-pin {
+ marvell,pins = "mpp64";
+ marvell,function = "gpio";
+ };
+
+ reset_key_pin: reset-key-pin {
+ marvell,pins = "mpp65";
+ marvell,function = "gpio";
+ };
+ };
+
+ mdio {
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+ };
+
+ ethernet@70000 {
+ status = "okay";
+ phy = <&phy0>;
+ phy-mode = "rgmii-id";
+ };
+
+ ethernet@74000 {
+ status = "okay";
+ phy = <&phy1>;
+ phy-mode = "rgmii-id";
+ };
+
+ usb@50000 {
+ status = "okay";
+ };
+
+ i2c@11000 {
+ compatible = "marvell,mv64xxx-i2c";
+ clock-frequency = <100000>;
+ status = "okay";
+
+ g762: g762@3e {
+ compatible = "gmt,g762";
+ reg = <0x3e>;
+ clocks = <&g762_clk>; /* input clock */
+ fan_gear_mode = <0>;
+ fan_startv = <1>;
+ pwm_polarity = <0>;
+ };
+ };
+ };
+ };
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ g762_clk: fixedclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <8192>;
+ };
+ };
+
+ gpio_leds {
+ compatible = "gpio-leds";
+ pinctrl-0 = <&backup_led_pin &power_led_pin>;
+ pinctrl-names = "default";
+
+ blue_backup_led {
+ label = "rn104:blue:backup";
+ gpios = <&gpio1 31 0>; /* GPIO 63 Active High */
+ default-state = "off";
+ };
+
+ blue_power_led {
+ label = "rn104:blue:pwr";
+ gpios = <&gpio2 0 1>; /* GPIO 64 Active Low */
+ linux,default-trigger = "keep";
+ };
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&backup_key_pin
+ &power_key_pin
+ &reset_key_pin>;
+ pinctrl-names = "default";
+
+ button@1 {
+ label = "Backup Button";
+ linux,code = <133>; /* KEY_COPY */
+ gpios = <&gpio1 20 1>;
+ };
+
+ button@2 {
+ label = "Power Button";
+ linux,code = <116>; /* KEY_POWER */
+ gpios = <&gpio1 30 0>;
+ };
+
+ button@3 {
+ label = "Reset Button";
+ linux,code = <0x198>; /* KEY_RESTART */
+ gpios = <&gpio2 1 1>;
+ };
+ };
+
+ gpio_poweroff {
+ compatible = "gpio-poweroff";
+ pinctrl-0 = <&poweroff>;
+ pinctrl-names = "default";
+ gpios = <&gpio1 28 1>;
+ };
+};
diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
index 1de2dae0fdae..00d6a798c705 100644
--- a/arch/arm/boot/dts/armada-370-xp.dtsi
+++ b/arch/arm/boot/dts/armada-370-xp.dtsi
@@ -113,6 +113,7 @@
#interrupt-cells = <1>;
#size-cells = <1>;
interrupt-controller;
+ msi-controller;
};
coherency-fabric@20200 {
@@ -137,6 +138,14 @@
status = "disabled";
};
+ coredivclk: corediv-clock@18740 {
+ compatible = "marvell,armada-370-corediv-clock";
+ reg = <0x18740 0xc>;
+ #clock-cells = <1>;
+ clocks = <&mainpll>;
+ clock-output-names = "nand";
+ };
+
timer@20300 {
reg = <0x20300 0x30>, <0x21040 0x30>;
interrupts = <37>, <38>, <39>, <40>, <5>, <6>;
@@ -176,7 +185,6 @@
i2c0: i2c@11000 {
compatible = "marvell,mv64xxx-i2c";
- reg = <0x11000 0x20>;
#address-cells = <1>;
#size-cells = <0>;
interrupts = <31>;
@@ -187,7 +195,6 @@
i2c1: i2c@11100 {
compatible = "marvell,mv64xxx-i2c";
- reg = <0x11100 0x20>;
#address-cells = <1>;
#size-cells = <0>;
interrupts = <32>;
@@ -252,4 +259,13 @@
};
};
+
+ clocks {
+ /* 2 GHz fixed main PLL */
+ mainpll: mainpll {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <2000000000>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
index e134d7a90c9a..7a4b82e71aaf 100644
--- a/arch/arm/boot/dts/armada-370.dtsi
+++ b/arch/arm/boot/dts/armada-370.dtsi
@@ -44,6 +44,7 @@
#address-cells = <3>;
#size-cells = <2>;
+ msi-parent = <&mpic>;
bus-range = <0x00 0xff>;
ranges =
@@ -218,6 +219,14 @@
};
};
+ i2c0: i2c@11000 {
+ reg = <0x11000 0x20>;
+ };
+
+ i2c1: i2c@11100 {
+ reg = <0x11100 0x20>;
+ };
+
usb@50000 {
clocks = <&coreclk 0>;
};
diff --git a/arch/arm/boot/dts/armada-xp-matrix.dts b/arch/arm/boot/dts/armada-xp-matrix.dts
new file mode 100644
index 000000000000..e47c49ecd55c
--- /dev/null
+++ b/arch/arm/boot/dts/armada-xp-matrix.dts
@@ -0,0 +1,75 @@
+/*
+ * Device Tree file for Marvell Armada XP Matrix board
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "armada-xp-mv78460.dtsi"
+
+/ {
+ model = "Marvell Armada XP Matrix Board";
+ compatible = "marvell,axp-matrix", "marvell,armadaxp-mv78460", "marvell,armadaxp", "marvell,armada-370-xp";
+
+ chosen {
+ bootargs = "console=ttyS0,115200 earlyprintk";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0 0x00000000 0 0x80000000>; /* 2 GB */
+ };
+
+ soc {
+ ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
+ MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000>;
+
+ internal-regs {
+ serial@12000 {
+ clock-frequency = <250000000>;
+ status = "okay";
+ };
+ serial@12100 {
+ clock-frequency = <250000000>;
+ status = "okay";
+ };
+ serial@12200 {
+ clock-frequency = <250000000>;
+ status = "okay";
+ };
+ serial@12300 {
+ clock-frequency = <250000000>;
+ status = "okay";
+ };
+
+ sata@a0000 {
+ nr-ports = <2>;
+ status = "okay";
+ };
+
+ ethernet@30000 {
+ status = "okay";
+ phy-mode = "sgmii";
+ };
+
+ pcie-controller {
+ status = "okay";
+
+ pcie@1,0 {
+ /* Port 0, Lane 0 */
+ status = "okay";
+ };
+ };
+
+ usb@50000 {
+ status = "okay";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
index 0358a33cba48..3f5e6121c730 100644
--- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
@@ -57,6 +57,7 @@
#address-cells = <3>;
#size-cells = <2>;
+ msi-parent = <&mpic>;
bus-range = <0x00 0xff>;
ranges =
diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
index 0e82c5062243..3e9fd1353f89 100644
--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
@@ -58,6 +58,7 @@
#address-cells = <3>;
#size-cells = <2>;
+ msi-parent = <&mpic>;
bus-range = <0x00 0xff>;
ranges =
diff --git a/arch/arm/boot/dts/armada-xp-mv78460.dtsi b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
index e82c1b80af17..31ba6d8fbadf 100644
--- a/arch/arm/boot/dts/armada-xp-mv78460.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
@@ -74,6 +74,7 @@
#address-cells = <3>;
#size-cells = <2>;
+ msi-parent = <&mpic>;
bus-range = <0x00 0xff>;
ranges =
diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
index 3058522f5aad..281c6447e872 100644
--- a/arch/arm/boot/dts/armada-xp.dtsi
+++ b/arch/arm/boot/dts/armada-xp.dtsi
@@ -147,6 +147,16 @@
};
};
+ i2c0: i2c@11000 {
+ compatible = "marvell,mv78230-i2c", "marvell,mv64xxx-i2c";
+ reg = <0x11000 0x100>;
+ };
+
+ i2c1: i2c@11100 {
+ compatible = "marvell,mv78230-i2c", "marvell,mv64xxx-i2c";
+ reg = <0x11100 0x100>;
+ };
+
usb@50000 {
clocks = <&gateclk 18>;
};
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index 56ee8282a7a8..997901f7ed73 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -648,6 +648,11 @@
watchdog@fffffd40 {
compatible = "atmel,at91sam9260-wdt";
reg = <0xfffffd40 0x10>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ atmel,watchdog-type = "hardware";
+ atmel,reset-type = "all";
+ atmel,dbg-halt;
+ atmel,idle-halt;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index d5bd65f74602..45fb0a46d398 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -523,6 +523,11 @@
watchdog@fffffd40 {
compatible = "atmel,at91sam9260-wdt";
reg = <0xfffffd40 0x10>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ atmel,watchdog-type = "hardware";
+ atmel,reset-type = "all";
+ atmel,dbg-halt;
+ atmel,idle-halt;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
index 137354689ad0..cb2c010e08e2 100644
--- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
+++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
@@ -96,7 +96,6 @@
};
spi0: spi@fffc8000 {
- status = "okay";
cs-gpios = <0>, <&pioC 11 0>, <0>, <0>;
mtd_dataflash@0 {
compatible = "atmel,at45", "atmel,dataflash";
diff --git a/arch/arm/boot/dts/at91sam9g25.dtsi b/arch/arm/boot/dts/at91sam9g25.dtsi
index b4ec6fe53fc7..17b879990914 100644
--- a/arch/arm/boot/dts/at91sam9g25.dtsi
+++ b/arch/arm/boot/dts/at91sam9g25.dtsi
@@ -7,6 +7,8 @@
*/
#include "at91sam9x5.dtsi"
+#include "at91sam9x5_usart3.dtsi"
+#include "at91sam9x5_macb0.dtsi"
/ {
model = "Atmel AT91SAM9G25 SoC";
diff --git a/arch/arm/boot/dts/at91sam9g35.dtsi b/arch/arm/boot/dts/at91sam9g35.dtsi
index bebf9f55614b..e35c2fcf8298 100644
--- a/arch/arm/boot/dts/at91sam9g35.dtsi
+++ b/arch/arm/boot/dts/at91sam9g35.dtsi
@@ -7,6 +7,7 @@
*/
#include "at91sam9x5.dtsi"
+#include "at91sam9x5_macb0.dtsi"
/ {
model = "Atmel AT91SAM9G35 SoC";
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index c3e514837074..16534c70012d 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -639,6 +639,11 @@
watchdog@fffffd40 {
compatible = "atmel,at91sam9260-wdt";
reg = <0xfffffd40 0x10>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ atmel,watchdog-type = "hardware";
+ atmel,reset-type = "all";
+ atmel,dbg-halt;
+ atmel,idle-halt;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index 9fb7ffd32af2..b30a6e08d027 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -437,6 +437,9 @@
compatible = "atmel,at91sam9g45-ssc";
reg = <0xf0010000 0x4000>;
interrupts = <28 IRQ_TYPE_LEVEL_HIGH 5>;
+ dmas = <&dma 0 AT91_DMA_CFG_PER_ID(21)>,
+ <&dma 0 AT91_DMA_CFG_PER_ID(22)>;
+ dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
status = "disabled";
@@ -537,6 +540,11 @@
watchdog@fffffe40 {
compatible = "atmel,at91sam9260-wdt";
reg = <0xfffffe40 0x10>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ atmel,watchdog-type = "hardware";
+ atmel,reset-type = "all";
+ atmel,dbg-halt;
+ atmel,idle-halt;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index 27a9352b9d7a..e9487f6f0166 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -38,9 +38,18 @@
status = "okay";
};
+ ssc0: ssc@f0010000 {
+ status = "okay";
+ };
+
i2c0: i2c@f8010000 {
status = "okay";
+ wm8904: codec@1a {
+ compatible = "wm8904";
+ reg = <0x1a>;
+ };
+
qt1070: keyboard@1b {
compatible = "qt1070";
reg = <0x1b>;
@@ -82,6 +91,13 @@
<AT91_PIOA 2 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
};
};
+
+ sound {
+ pinctrl_pck0_as_audio_mck: pck0_as_audio_mck {
+ atmel,pins =
+ <AT91_PIOB 10 AT91_PERIPH_B AT91_PINCTRL_NONE>;
+ };
+ };
};
spi0: spi@f0000000 {
@@ -142,4 +158,22 @@
gpio-key,wakeup;
};
};
+
+ sound {
+ compatible = "atmel,asoc-wm8904";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pck0_as_audio_mck>;
+
+ atmel,model = "wm8904 @ AT91SAM9N12";
+ atmel,audio-routing =
+ "Headphone Jack", "HPOUTL",
+ "Headphone Jack", "HPOUTR",
+ "IN2L", "Line In Jack",
+ "IN2R", "Line In Jack",
+ "Mic", "MICBIAS",
+ "IN1L", "Mic";
+
+ atmel,ssc-controller = <&ssc0>;
+ atmel,audio-codec = <&wm8904>;
+ };
};
diff --git a/arch/arm/boot/dts/at91sam9x25.dtsi b/arch/arm/boot/dts/at91sam9x25.dtsi
index 49e94aba938f..c2554219f7a4 100644
--- a/arch/arm/boot/dts/at91sam9x25.dtsi
+++ b/arch/arm/boot/dts/at91sam9x25.dtsi
@@ -7,6 +7,9 @@
*/
#include "at91sam9x5.dtsi"
+#include "at91sam9x5_usart3.dtsi"
+#include "at91sam9x5_macb0.dtsi"
+#include "at91sam9x5_macb1.dtsi"
/ {
model = "Atmel AT91SAM9X25 SoC";
@@ -22,27 +25,6 @@
0x80000000 0xfffd0000 0xb83fffff /* pioC */
0x003fffff 0x003f8000 0x00000000 /* pioD */
>;
-
- macb1 {
- pinctrl_macb1_rmii: macb1_rmii-0 {
- atmel,pins =
- <AT91_PIOC 16 AT91_PERIPH_B AT91_PINCTRL_NONE /* PC16 periph B */
- AT91_PIOC 18 AT91_PERIPH_B AT91_PINCTRL_NONE /* PC18 periph B */
- AT91_PIOC 19 AT91_PERIPH_B AT91_PINCTRL_NONE /* PC19 periph B */
- AT91_PIOC 20 AT91_PERIPH_B AT91_PINCTRL_NONE /* PC20 periph B */
- AT91_PIOC 21 AT91_PERIPH_B AT91_PINCTRL_NONE /* PC21 periph B */
- AT91_PIOC 27 AT91_PERIPH_B AT91_PINCTRL_NONE /* PC27 periph B */
- AT91_PIOC 28 AT91_PERIPH_B AT91_PINCTRL_NONE /* PC28 periph B */
- AT91_PIOC 29 AT91_PERIPH_B AT91_PINCTRL_NONE /* PC29 periph B */
- AT91_PIOC 30 AT91_PERIPH_B AT91_PINCTRL_NONE /* PC30 periph B */
- AT91_PIOC 31 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC31 periph B */
- };
- };
- };
-
- macb1: ethernet@f8030000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_macb1_rmii>;
};
};
};
diff --git a/arch/arm/boot/dts/at91sam9x35.dtsi b/arch/arm/boot/dts/at91sam9x35.dtsi
index 1a3d525a1f5d..8eac66ce0ab7 100644
--- a/arch/arm/boot/dts/at91sam9x35.dtsi
+++ b/arch/arm/boot/dts/at91sam9x35.dtsi
@@ -7,6 +7,7 @@
*/
#include "at91sam9x5.dtsi"
+#include "at91sam9x5_macb0.dtsi"
/ {
model = "Atmel AT91SAM9X35 SoC";
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index e74dc15efa9d..289bcfc16ede 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -206,29 +206,6 @@
};
};
- usart3 {
- pinctrl_usart3: usart3-0 {
- atmel,pins =
- <AT91_PIOC 22 AT91_PERIPH_B AT91_PINCTRL_PULL_UP /* PC22 periph B with pullup */
- AT91_PIOC 23 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC23 periph B */
- };
-
- pinctrl_usart3_rts: usart3_rts-0 {
- atmel,pins =
- <AT91_PIOC 24 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC24 periph B */
- };
-
- pinctrl_usart3_cts: usart3_cts-0 {
- atmel,pins =
- <AT91_PIOC 25 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC25 periph B */
- };
-
- pinctrl_usart3_sck: usart3_sck-0 {
- atmel,pins =
- <AT91_PIOC 26 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC26 periph B */
- };
- };
-
uart0 {
pinctrl_uart0: uart0-0 {
atmel,pins =
@@ -277,34 +254,6 @@
};
};
- macb0 {
- pinctrl_macb0_rmii: macb0_rmii-0 {
- atmel,pins =
- <AT91_PIOB 0 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB0 periph A */
- AT91_PIOB 1 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB1 periph A */
- AT91_PIOB 2 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB2 periph A */
- AT91_PIOB 3 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB3 periph A */
- AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB4 periph A */
- AT91_PIOB 5 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB5 periph A */
- AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB6 periph A */
- AT91_PIOB 7 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB7 periph A */
- AT91_PIOB 9 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB9 periph A */
- AT91_PIOB 10 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* PB10 periph A */
- };
-
- pinctrl_macb0_rmii_mii: macb0_rmii_mii-0 {
- atmel,pins =
- <AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB8 periph A */
- AT91_PIOB 11 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB11 periph A */
- AT91_PIOB 12 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB12 periph A */
- AT91_PIOB 13 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB13 periph A */
- AT91_PIOB 14 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB14 periph A */
- AT91_PIOB 15 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB15 periph A */
- AT91_PIOB 16 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB16 periph A */
- AT91_PIOB 17 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* PB17 periph A */
- };
- };
-
mmc0 {
pinctrl_mmc0_slot0_clk_cmd_dat0: mmc0_slot0_clk_cmd_dat0-0 {
atmel,pins =
@@ -610,22 +559,6 @@
status = "disabled";
};
- macb0: ethernet@f802c000 {
- compatible = "cdns,at32ap7000-macb", "cdns,macb";
- reg = <0xf802c000 0x100>;
- interrupts = <24 IRQ_TYPE_LEVEL_HIGH 3>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_macb0_rmii>;
- status = "disabled";
- };
-
- macb1: ethernet@f8030000 {
- compatible = "cdns,at32ap7000-macb", "cdns,macb";
- reg = <0xf8030000 0x100>;
- interrupts = <27 IRQ_TYPE_LEVEL_HIGH 3>;
- status = "disabled";
- };
-
i2c0: i2c@f8010000 {
compatible = "atmel,at91sam9x5-i2c";
reg = <0xf8010000 0x100>;
@@ -820,6 +753,11 @@
watchdog@fffffe40 {
compatible = "atmel,at91sam9260-wdt";
reg = <0xfffffe40 0x10>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ atmel,watchdog-type = "hardware";
+ atmel,reset-type = "all";
+ atmel,dbg-halt;
+ atmel,idle-halt;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/at91sam9x5_macb0.dtsi b/arch/arm/boot/dts/at91sam9x5_macb0.dtsi
new file mode 100644
index 000000000000..55731ffba764
--- /dev/null
+++ b/arch/arm/boot/dts/at91sam9x5_macb0.dtsi
@@ -0,0 +1,56 @@
+/*
+ * at91sam9x5_macb0.dtsi - Device Tree Include file for AT91SAM9x5 SoC with 1
+ * Ethernet interface.
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * Licensed under GPLv2.
+ */
+
+#include <dt-bindings/pinctrl/at91.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ ahb {
+ apb {
+ pinctrl@fffff400 {
+ macb0 {
+ pinctrl_macb0_rmii: macb0_rmii-0 {
+ atmel,pins =
+ <AT91_PIOB 0 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB0 periph A */
+ AT91_PIOB 1 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB1 periph A */
+ AT91_PIOB 2 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB2 periph A */
+ AT91_PIOB 3 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB3 periph A */
+ AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB4 periph A */
+ AT91_PIOB 5 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB5 periph A */
+ AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB6 periph A */
+ AT91_PIOB 7 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB7 periph A */
+ AT91_PIOB 9 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB9 periph A */
+ AT91_PIOB 10 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* PB10 periph A */
+ };
+
+ pinctrl_macb0_rmii_mii: macb0_rmii_mii-0 {
+ atmel,pins =
+ <AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB8 periph A */
+ AT91_PIOB 11 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB11 periph A */
+ AT91_PIOB 12 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB12 periph A */
+ AT91_PIOB 13 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB13 periph A */
+ AT91_PIOB 14 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB14 periph A */
+ AT91_PIOB 15 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB15 periph A */
+ AT91_PIOB 16 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB16 periph A */
+ AT91_PIOB 17 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* PB17 periph A */
+ };
+ };
+ };
+
+ macb0: ethernet@f802c000 {
+ compatible = "cdns,at32ap7000-macb", "cdns,macb";
+ reg = <0xf802c000 0x100>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_macb0_rmii>;
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/at91sam9x5_macb1.dtsi b/arch/arm/boot/dts/at91sam9x5_macb1.dtsi
new file mode 100644
index 000000000000..77425a627a94
--- /dev/null
+++ b/arch/arm/boot/dts/at91sam9x5_macb1.dtsi
@@ -0,0 +1,44 @@
+/*
+ * at91sam9x5_macb1.dtsi - Device Tree Include file for AT91SAM9x5 SoC with 2
+ * Ethernet interfaces.
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * Licensed under GPLv2.
+ */
+
+#include <dt-bindings/pinctrl/at91.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ ahb {
+ apb {
+ pinctrl@fffff400 {
+ macb1 {
+ pinctrl_macb1_rmii: macb1_rmii-0 {
+ atmel,pins =
+ <AT91_PIOC 16 AT91_PERIPH_B AT91_PINCTRL_NONE /* PC16 periph B */
+ AT91_PIOC 18 AT91_PERIPH_B AT91_PINCTRL_NONE /* PC18 periph B */
+ AT91_PIOC 19 AT91_PERIPH_B AT91_PINCTRL_NONE /* PC19 periph B */
+ AT91_PIOC 20 AT91_PERIPH_B AT91_PINCTRL_NONE /* PC20 periph B */
+ AT91_PIOC 21 AT91_PERIPH_B AT91_PINCTRL_NONE /* PC21 periph B */
+ AT91_PIOC 27 AT91_PERIPH_B AT91_PINCTRL_NONE /* PC27 periph B */
+ AT91_PIOC 28 AT91_PERIPH_B AT91_PINCTRL_NONE /* PC28 periph B */
+ AT91_PIOC 29 AT91_PERIPH_B AT91_PINCTRL_NONE /* PC29 periph B */
+ AT91_PIOC 30 AT91_PERIPH_B AT91_PINCTRL_NONE /* PC30 periph B */
+ AT91_PIOC 31 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC31 periph B */
+ };
+ };
+ };
+
+ macb1: ethernet@f8030000 {
+ compatible = "cdns,at32ap7000-macb", "cdns,macb";
+ reg = <0xf8030000 0x100>;
+ interrupts = <27 IRQ_TYPE_LEVEL_HIGH 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_macb1_rmii>;
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/at91sam9x5_usart3.dtsi b/arch/arm/boot/dts/at91sam9x5_usart3.dtsi
new file mode 100644
index 000000000000..2347e9563cef
--- /dev/null
+++ b/arch/arm/boot/dts/at91sam9x5_usart3.dtsi
@@ -0,0 +1,51 @@
+/*
+ * at91sam9x5_usart3.dtsi - Device Tree Include file for AT91SAM9x5 SoC with
+ * 4 USART.
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * Licensed under GPLv2.
+ */
+
+#include <dt-bindings/pinctrl/at91.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ ahb {
+ apb {
+ pinctrl@fffff400 {
+ usart3 {
+ pinctrl_usart3: usart3-0 {
+ atmel,pins =
+ <AT91_PIOC 22 AT91_PERIPH_B AT91_PINCTRL_PULL_UP /* PC22 periph B with pullup */
+ AT91_PIOC 23 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC23 periph B */
+ };
+
+ pinctrl_usart3_rts: usart3_rts-0 {
+ atmel,pins =
+ <AT91_PIOC 24 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC24 periph B */
+ };
+
+ pinctrl_usart3_cts: usart3_cts-0 {
+ atmel,pins =
+ <AT91_PIOC 25 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC25 periph B */
+ };
+
+ pinctrl_usart3_sck: usart3_sck-0 {
+ atmel,pins =
+ <AT91_PIOC 26 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC26 periph B */
+ };
+ };
+ };
+
+ usart3: serial@f8028000 {
+ compatible = "atmel,at91sam9260-usart";
+ reg = <0xf8028000 0x200>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH 5>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usart3>;
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/atlas6.dtsi b/arch/arm/boot/dts/atlas6.dtsi
index 6db4f81d4795..a49032c6e199 100644
--- a/arch/arm/boot/dts/atlas6.dtsi
+++ b/arch/arm/boot/dts/atlas6.dtsi
@@ -65,6 +65,11 @@
compatible = "sirf,prima2-rsc";
reg = <0x88020000 0x1000>;
};
+
+ cphifbg@88030000 {
+ compatible = "sirf,prima2-cphifbg";
+ reg = <0x88030000 0x1000>;
+ };
};
mem-iobg {
@@ -75,10 +80,17 @@
memory-controller@90000000 {
compatible = "sirf,prima2-memc";
- reg = <0x90000000 0x10000>;
+ reg = <0x90000000 0x2000>;
interrupts = <27>;
clocks = <&clks 5>;
};
+
+ memc-monitor {
+ compatible = "sirf,prima2-memcmon";
+ reg = <0x90002000 0x200>;
+ interrupts = <4>;
+ clocks = <&clks 32>;
+ };
};
disp-iobg {
@@ -120,6 +132,20 @@
};
};
+ graphics2d-iobg {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0xa0000000 0xa0000000 0x8000000>;
+
+ ble@a0000000 {
+ compatible = "sirf,atlas6-ble";
+ reg = <0xa0000000 0x2000>;
+ interrupts = <5>;
+ clocks = <&clks 33>;
+ };
+ };
+
dsp-iobg {
compatible = "simple-bus";
#address-cells = <1>;
@@ -271,6 +297,11 @@
compatible = "sirf,prima2-spi";
reg = <0xb0170000 0x10000>;
interrupts = <16>;
+ sirf,spi-num-chipselects = <1>;
+ sirf,spi-dma-rx-channel = <12>;
+ sirf,spi-dma-tx-channel = <13>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&clks 20>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/bcm11351-brt.dts b/arch/arm/boot/dts/bcm11351-brt.dts
index 9d36eb4e3c41..23cd16d736bf 100644
--- a/arch/arm/boot/dts/bcm11351-brt.dts
+++ b/arch/arm/boot/dts/bcm11351-brt.dts
@@ -40,6 +40,7 @@
sdio4: sdio@3f1b0000 {
max-frequency = <48000000>;
+ cd-gpios = <&gpio 14 0>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm11351.dtsi b/arch/arm/boot/dts/bcm11351.dtsi
index 05a5aabe3b2c..b0c0610d1395 100644
--- a/arch/arm/boot/dts/bcm11351.dtsi
+++ b/arch/arm/boot/dts/bcm11351.dtsi
@@ -49,6 +49,36 @@
reg-io-width = <4>;
};
+ uart@3e001000 {
+ compatible = "brcm,bcm11351-dw-apb-uart", "snps,dw-apb-uart";
+ status = "disabled";
+ reg = <0x3e001000 0x1000>;
+ clock-frequency = <13000000>;
+ interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ uart@3e002000 {
+ compatible = "brcm,bcm11351-dw-apb-uart", "snps,dw-apb-uart";
+ status = "disabled";
+ reg = <0x3e002000 0x1000>;
+ clock-frequency = <13000000>;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ uart@3e003000 {
+ compatible = "brcm,bcm11351-dw-apb-uart", "snps,dw-apb-uart";
+ status = "disabled";
+ reg = <0x3e003000 0x1000>;
+ clock-frequency = <13000000>;
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
L2: l2-cache {
compatible = "brcm,bcm11351-a2-pl310-cache";
reg = <0x3ff20000 0x1000>;
@@ -68,31 +98,47 @@
clock-frequency = <32768>;
};
+ gpio: gpio@35003000 {
+ compatible = "brcm,bcm11351-gpio", "brcm,kona-gpio";
+ reg = <0x35003000 0x800>;
+ interrupts =
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ };
+
sdio1: sdio@3f180000 {
compatible = "brcm,kona-sdhci";
reg = <0x3f180000 0x10000>;
- interrupts = <0x0 77 0x4>;
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
sdio2: sdio@3f190000 {
compatible = "brcm,kona-sdhci";
reg = <0x3f190000 0x10000>;
- interrupts = <0x0 76 0x4>;
+ interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
sdio3: sdio@3f1a0000 {
compatible = "brcm,kona-sdhci";
reg = <0x3f1a0000 0x10000>;
- interrupts = <0x0 74 0x4>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
sdio4: sdio@3f1b0000 {
compatible = "brcm,kona-sdhci";
reg = <0x3f1b0000 0x10000>;
- interrupts = <0x0 73 0x4>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/bcm28155-ap.dts b/arch/arm/boot/dts/bcm28155-ap.dts
index 96ae67a2f0d3..08e47c285227 100644
--- a/arch/arm/boot/dts/bcm28155-ap.dts
+++ b/arch/arm/boot/dts/bcm28155-ap.dts
@@ -40,6 +40,7 @@
sdio4: sdio@3f1b0000 {
max-frequency = <48000000>;
+ cd-gpios = <&gpio 14 0>;
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/cros5250-common.dtsi b/arch/arm/boot/dts/cros5250-common.dtsi
index dc259e8b8a73..6470536a68c4 100644
--- a/arch/arm/boot/dts/cros5250-common.dtsi
+++ b/arch/arm/boot/dts/cros5250-common.dtsi
@@ -229,7 +229,7 @@
};
};
- dwmmc0@12200000 {
+ mmc@12200000 {
num-slots = <1>;
supports-highspeed;
broken-cd;
@@ -247,11 +247,11 @@
};
};
- dwmmc1@12210000 {
+ mmc@12210000 {
status = "disabled";
};
- dwmmc2@12220000 {
+ mmc@12220000 {
num-slots = <1>;
supports-highspeed;
fifo-depth = <0x80>;
@@ -269,7 +269,7 @@
};
};
- dwmmc3@12230000 {
+ mmc@12230000 {
num-slots = <1>;
supports-highspeed;
broken-cd;
diff --git a/arch/arm/boot/dts/dove-cm-a510.dts b/arch/arm/boot/dts/dove-cm-a510.dts
index 61a8062e56de..50c0d6904497 100644
--- a/arch/arm/boot/dts/dove-cm-a510.dts
+++ b/arch/arm/boot/dts/dove-cm-a510.dts
@@ -1,6 +1,6 @@
/dts-v1/;
-/include/ "dove.dtsi"
+#include "dove.dtsi"
/ {
model = "Compulab CM-A510";
diff --git a/arch/arm/boot/dts/dove-cubox.dts b/arch/arm/boot/dts/dove-cubox.dts
index 022646ef4b38..8349a248ecea 100644
--- a/arch/arm/boot/dts/dove-cubox.dts
+++ b/arch/arm/boot/dts/dove-cubox.dts
@@ -1,6 +1,6 @@
/dts-v1/;
-/include/ "dove.dtsi"
+#include "dove.dtsi"
/ {
model = "SolidRun CuBox";
@@ -99,18 +99,12 @@
silabs,pll-master;
};
- clkout1 {
- reg = <1>;
- silabs,drive-strength = <8>;
- silabs,multisynth-source = <1>;
- silabs,clock-source = <0>;
- silabs,pll-master;
- };
-
clkout2 {
reg = <2>;
+ silabs,drive-strength = <8>;
silabs,multisynth-source = <1>;
silabs,clock-source = <0>;
+ silabs,pll-master;
};
};
};
@@ -132,3 +126,11 @@
reg = <0>;
};
};
+
+&audio1 {
+ status = "okay";
+ clocks = <&gate_clk 13>, <&si5351 2>;
+ clock-names = "internal", "extclk";
+ pinctrl-0 = <&pmx_audio1_i2s1_spdifo &pmx_audio1_extclk>;
+ pinctrl-names = "default";
+};
diff --git a/arch/arm/boot/dts/dove-d2plug.dts b/arch/arm/boot/dts/dove-d2plug.dts
index e2222ce94f2f..c11d3636c8e5 100644
--- a/arch/arm/boot/dts/dove-d2plug.dts
+++ b/arch/arm/boot/dts/dove-d2plug.dts
@@ -1,6 +1,6 @@
/dts-v1/;
-/include/ "dove.dtsi"
+#include "dove.dtsi"
/ {
model = "Globalscale D2Plug";
diff --git a/arch/arm/boot/dts/dove-d3plug.dts b/arch/arm/boot/dts/dove-d3plug.dts
new file mode 100644
index 000000000000..f5f59bb5a534
--- /dev/null
+++ b/arch/arm/boot/dts/dove-d3plug.dts
@@ -0,0 +1,103 @@
+/dts-v1/;
+
+#include "dove.dtsi"
+
+/ {
+ model = "Globalscale D3Plug";
+ compatible = "globalscale,d3plug", "marvell,dove";
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x40000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200n8 earlyprintk root=/dev/mmcblk0p2 rw rootwait";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-0 = <&pmx_gpio_0 &pmx_gpio_1 &pmx_gpio_2>;
+ pinctrl-names = "default";
+
+ wlan-act {
+ label = "wlan-act";
+ gpios = <&gpio0 0 1>;
+ };
+
+ wlan-ap {
+ label = "wlan-ap";
+ gpios = <&gpio0 1 1>;
+ };
+
+ status {
+ label = "status";
+ gpios = <&gpio0 2 1>;
+ };
+ };
+
+ regulators {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usb_power: regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <1>;
+ regulator-name = "USB Power";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ regulator-always-on;
+ regulator-boot-on;
+ gpio = <&gpio0 8 0>;
+ pinctrl-0 = <&pmx_gpio_8>;
+ pinctrl-names = "default";
+ };
+ };
+};
+
+&uart0 { status = "okay"; };
+&sata0 { status = "okay"; };
+&i2c0 { status = "okay"; };
+
+/* Samsung M8G2F eMMC */
+&sdio0 {
+ status = "okay";
+ non-removable;
+ bus-width = <4>;
+};
+
+/* Marvell SD8787 WLAN/BT */
+&sdio1 {
+ status = "okay";
+ non-removable;
+};
+
+&spi0 {
+ status = "okay";
+
+ /* spi0.0: 2M Flash Macronix MX25L1605D */
+ spi-flash@0 {
+ compatible = "st,m25l1605d";
+ spi-max-frequency = <86000000>;
+ reg = <0>;
+ };
+};
+
+&pcie {
+ status = "okay";
+ /* Fresco Logic USB3.0 xHCI controller */
+ pcie-port@0 {
+ status = "okay";
+ reset-gpios = <&gpio0 26 1>;
+ reset-delay-us = <20000>;
+ pinctrl-0 = <&pmx_camera_gpio>;
+ pinctrl-names = "default";
+ };
+ /* Mini-PCIe slot */
+ pcie-port@1 {
+ status = "okay";
+ reset-gpios = <&gpio0 25 1>;
+ };
+};
diff --git a/arch/arm/boot/dts/dove-dove-db.dts b/arch/arm/boot/dts/dove-dove-db.dts
index e5a920beab45..bb725dca3a10 100644
--- a/arch/arm/boot/dts/dove-dove-db.dts
+++ b/arch/arm/boot/dts/dove-dove-db.dts
@@ -1,6 +1,6 @@
/dts-v1/;
-/include/ "dove.dtsi"
+#include "dove.dtsi"
/ {
model = "Marvell DB-MV88AP510-BP Development Board";
diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
index cc279166646f..113a8bc7bee7 100644
--- a/arch/arm/boot/dts/dove.dtsi
+++ b/arch/arm/boot/dts/dove.dtsi
@@ -1,8 +1,11 @@
/include/ "skeleton.dtsi"
+#define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16))
+
/ {
compatible = "marvell,dove";
model = "Marvell Armada 88AP510 SoC";
+ interrupt-parent = <&intc>;
aliases {
gpio0 = &gpio0;
@@ -27,482 +30,576 @@
marvell,tauros2-cache-features = <0>;
};
- soc@f1000000 {
- compatible = "simple-bus";
- #address-cells = <1>;
+ mbus {
+ compatible = "marvell,dove-mbus", "marvell,mbus", "simple-bus";
+ #address-cells = <2>;
#size-cells = <1>;
- interrupt-parent = <&intc>;
-
- ranges = <0xc8000000 0xc8000000 0x0100000 /* CESA SRAM 1M */
- 0xe0000000 0xe0000000 0x8000000 /* PCIe0 Mem 128M */
- 0xe8000000 0xe8000000 0x8000000 /* PCIe1 Mem 128M */
- 0xf0000000 0xf0000000 0x0100000 /* ScratchPad 1M */
- 0x00000000 0xf1000000 0x1000000 /* SB/NB regs 16M */
- 0xf2000000 0xf2000000 0x0100000 /* PCIe0 I/O 1M */
- 0xf2100000 0xf2100000 0x0100000 /* PCIe0 I/O 1M */
- 0xf8000000 0xf8000000 0x8000000>; /* BootROM 128M */
-
- timer: timer@20300 {
- compatible = "marvell,orion-timer";
- reg = <0x20300 0x20>;
- interrupt-parent = <&bridge_intc>;
- interrupts = <1>, <2>;
- clocks = <&core_clk 0>;
- };
-
- intc: main-interrupt-ctrl@20200 {
- compatible = "marvell,orion-intc";
- interrupt-controller;
- #interrupt-cells = <1>;
- reg = <0x20200 0x10>, <0x20210 0x10>;
- };
-
- bridge_intc: bridge-interrupt-ctrl@20110 {
- compatible = "marvell,orion-bridge-intc";
- interrupt-controller;
- #interrupt-cells = <1>;
- reg = <0x20110 0x8>;
- interrupts = <0>;
- marvell,#interrupts = <5>;
- };
-
- core_clk: core-clocks@d0214 {
- compatible = "marvell,dove-core-clock";
- reg = <0xd0214 0x4>;
- #clock-cells = <1>;
- };
-
- gate_clk: clock-gating-ctrl@d0038 {
- compatible = "marvell,dove-gating-clock";
- reg = <0xd0038 0x4>;
- clocks = <&core_clk 0>;
- #clock-cells = <1>;
- };
-
- thermal: thermal-diode@d001c {
- compatible = "marvell,dove-thermal";
- reg = <0xd001c 0x0c>, <0xd005c 0x08>;
- };
-
- uart0: serial@12000 {
- compatible = "ns16550a";
- reg = <0x12000 0x100>;
- reg-shift = <2>;
- interrupts = <7>;
- clocks = <&core_clk 0>;
- status = "disabled";
- };
-
- uart1: serial@12100 {
- compatible = "ns16550a";
- reg = <0x12100 0x100>;
- reg-shift = <2>;
- interrupts = <8>;
- clocks = <&core_clk 0>;
- pinctrl-0 = <&pmx_uart1>;
- pinctrl-names = "default";
- status = "disabled";
- };
-
- uart2: serial@12200 {
- compatible = "ns16550a";
- reg = <0x12000 0x100>;
- reg-shift = <2>;
- interrupts = <9>;
- clocks = <&core_clk 0>;
+ controller = <&mbusc>;
+ pcie-mem-aperture = <0xe0000000 0x10000000>; /* 256M MEM space */
+ pcie-io-aperture = <0xf2000000 0x00200000>; /* 2M I/O space */
+
+ ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x0100000 /* MBUS regs 1M */
+ MBUS_ID(0xf0, 0x02) 0 0xf1800000 0x1000000 /* AXI regs 16M */
+ MBUS_ID(0x01, 0xfd) 0 0xf8000000 0x8000000 /* BootROM 128M */
+ MBUS_ID(0x03, 0x01) 0 0xc8000000 0x0100000 /* CESA SRAM 1M */
+ MBUS_ID(0x0d, 0x00) 0 0xf0000000 0x0100000>; /* PMU SRAM 1M */
+
+ pcie: pcie-controller {
+ compatible = "marvell,dove-pcie";
status = "disabled";
- };
-
- uart3: serial@12300 {
- compatible = "ns16550a";
- reg = <0x12100 0x100>;
- reg-shift = <2>;
- interrupts = <10>;
- clocks = <&core_clk 0>;
- status = "disabled";
- };
-
- gpio0: gpio-ctrl@d0400 {
- compatible = "marvell,orion-gpio";
- #gpio-cells = <2>;
- gpio-controller;
- reg = <0xd0400 0x20>;
- ngpios = <32>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <12>, <13>, <14>, <60>;
- };
-
- gpio1: gpio-ctrl@d0420 {
- compatible = "marvell,orion-gpio";
- #gpio-cells = <2>;
- gpio-controller;
- reg = <0xd0420 0x20>;
- ngpios = <32>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <61>;
- };
-
- gpio2: gpio-ctrl@e8400 {
- compatible = "marvell,orion-gpio";
- #gpio-cells = <2>;
- gpio-controller;
- reg = <0xe8400 0x0c>;
- ngpios = <8>;
- };
-
- pinctrl: pin-ctrl@d0200 {
- compatible = "marvell,dove-pinctrl";
- reg = <0xd0200 0x10>;
- clocks = <&gate_clk 22>;
-
- pmx_gpio_0: pmx-gpio-0 {
- marvell,pins = "mpp0";
- marvell,function = "gpio";
- };
-
- pmx_gpio_1: pmx-gpio-1 {
- marvell,pins = "mpp1";
- marvell,function = "gpio";
- };
-
- pmx_gpio_2: pmx-gpio-2 {
- marvell,pins = "mpp2";
- marvell,function = "gpio";
- };
-
- pmx_gpio_3: pmx-gpio-3 {
- marvell,pins = "mpp3";
- marvell,function = "gpio";
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ msi-parent = <&intc>;
+ bus-range = <0x00 0xff>;
+
+ ranges = <0x82000000 0x0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x2000
+ 0x82000000 0x0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x2000
+ 0x82000000 0x1 0x0 MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 Mem */
+ 0x81000000 0x1 0x0 MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 I/O */
+ 0x82000000 0x2 0x0 MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 Mem */
+ 0x81000000 0x2 0x0 MBUS_ID(0x08, 0xe0) 0 1 0>; /* Port 1.0 I/O */
+
+ pcie-port@0 {
+ device_type = "pci";
+ status = "disabled";
+ assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
+ reg = <0x0800 0 0 0 0>;
+ clocks = <&gate_clk 4>;
+ marvell,pcie-port = <0>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0
+ 0x81000000 0 0 0x81000000 0x1 0 1 0>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &intc 16>;
+ };
+
+ pcie-port@1 {
+ device_type = "pci";
+ status = "disabled";
+ assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
+ reg = <0x1000 0 0 0 0>;
+ clocks = <&gate_clk 5>;
+ marvell,pcie-port = <1>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0
+ 0x81000000 0 0 0x81000000 0x2 0 1 0>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &intc 18>;
};
-
- pmx_gpio_4: pmx-gpio-4 {
- marvell,pins = "mpp4";
- marvell,function = "gpio";
- };
-
- pmx_gpio_5: pmx-gpio-5 {
- marvell,pins = "mpp5";
- marvell,function = "gpio";
- };
-
- pmx_gpio_6: pmx-gpio-6 {
- marvell,pins = "mpp6";
- marvell,function = "gpio";
- };
-
- pmx_gpio_7: pmx-gpio-7 {
- marvell,pins = "mpp7";
- marvell,function = "gpio";
- };
-
- pmx_gpio_8: pmx-gpio-8 {
- marvell,pins = "mpp8";
- marvell,function = "gpio";
- };
-
- pmx_gpio_9: pmx-gpio-9 {
- marvell,pins = "mpp9";
- marvell,function = "gpio";
- };
-
- pmx_gpio_10: pmx-gpio-10 {
- marvell,pins = "mpp10";
- marvell,function = "gpio";
- };
-
- pmx_gpio_11: pmx-gpio-11 {
- marvell,pins = "mpp11";
- marvell,function = "gpio";
- };
-
- pmx_gpio_12: pmx-gpio-12 {
- marvell,pins = "mpp12";
- marvell,function = "gpio";
- };
-
- pmx_gpio_13: pmx-gpio-13 {
- marvell,pins = "mpp13";
- marvell,function = "gpio";
- };
-
- pmx_gpio_14: pmx-gpio-14 {
- marvell,pins = "mpp14";
- marvell,function = "gpio";
- };
-
- pmx_gpio_15: pmx-gpio-15 {
- marvell,pins = "mpp15";
- marvell,function = "gpio";
- };
-
- pmx_gpio_16: pmx-gpio-16 {
- marvell,pins = "mpp16";
- marvell,function = "gpio";
- };
-
- pmx_gpio_17: pmx-gpio-17 {
- marvell,pins = "mpp17";
- marvell,function = "gpio";
- };
-
- pmx_gpio_18: pmx-gpio-18 {
- marvell,pins = "mpp18";
- marvell,function = "gpio";
- };
-
- pmx_gpio_19: pmx-gpio-19 {
- marvell,pins = "mpp19";
- marvell,function = "gpio";
- };
-
- pmx_gpio_20: pmx-gpio-20 {
- marvell,pins = "mpp20";
- marvell,function = "gpio";
- };
-
- pmx_gpio_21: pmx-gpio-21 {
- marvell,pins = "mpp21";
- marvell,function = "gpio";
- };
-
- pmx_camera: pmx-camera {
- marvell,pins = "mpp_camera";
- marvell,function = "camera";
- };
-
- pmx_camera_gpio: pmx-camera-gpio {
- marvell,pins = "mpp_camera";
- marvell,function = "gpio";
- };
-
- pmx_sdio0: pmx-sdio0 {
- marvell,pins = "mpp_sdio0";
- marvell,function = "sdio0";
- };
-
- pmx_sdio0_gpio: pmx-sdio0-gpio {
- marvell,pins = "mpp_sdio0";
- marvell,function = "gpio";
- };
-
- pmx_sdio1: pmx-sdio1 {
- marvell,pins = "mpp_sdio1";
- marvell,function = "sdio1";
- };
-
- pmx_sdio1_gpio: pmx-sdio1-gpio {
- marvell,pins = "mpp_sdio1";
- marvell,function = "gpio";
- };
-
- pmx_audio1_gpio: pmx-audio1-gpio {
- marvell,pins = "mpp_audio1";
- marvell,function = "gpio";
- };
-
- pmx_spi0: pmx-spi0 {
- marvell,pins = "mpp_spi0";
- marvell,function = "spi0";
- };
-
- pmx_spi0_gpio: pmx-spi0-gpio {
- marvell,pins = "mpp_spi0";
- marvell,function = "gpio";
- };
-
- pmx_uart1: pmx-uart1 {
- marvell,pins = "mpp_uart1";
- marvell,function = "uart1";
- };
-
- pmx_uart1_gpio: pmx-uart1-gpio {
- marvell,pins = "mpp_uart1";
- marvell,function = "gpio";
- };
-
- pmx_nand: pmx-nand {
- marvell,pins = "mpp_nand";
- marvell,function = "nand";
- };
-
- pmx_nand_gpo: pmx-nand-gpo {
- marvell,pins = "mpp_nand";
- marvell,function = "gpo";
- };
- };
-
- spi0: spi-ctrl@10600 {
- compatible = "marvell,orion-spi";
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- interrupts = <6>;
- reg = <0x10600 0x28>;
- clocks = <&core_clk 0>;
- pinctrl-0 = <&pmx_spi0>;
- pinctrl-names = "default";
- status = "disabled";
- };
-
- spi1: spi-ctrl@14600 {
- compatible = "marvell,orion-spi";
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- interrupts = <5>;
- reg = <0x14600 0x28>;
- clocks = <&core_clk 0>;
- status = "disabled";
- };
-
- i2c0: i2c-ctrl@11000 {
- compatible = "marvell,mv64xxx-i2c";
- reg = <0x11000 0x20>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupts = <11>;
- clock-frequency = <400000>;
- timeout-ms = <1000>;
- clocks = <&core_clk 0>;
- status = "disabled";
};
- ehci0: usb-host@50000 {
- compatible = "marvell,orion-ehci";
- reg = <0x50000 0x1000>;
- interrupts = <24>;
- clocks = <&gate_clk 0>;
- status = "okay";
- };
-
- ehci1: usb-host@51000 {
- compatible = "marvell,orion-ehci";
- reg = <0x51000 0x1000>;
- interrupts = <25>;
- clocks = <&gate_clk 1>;
- status = "okay";
- };
-
- sdio0: sdio-host@92000 {
- compatible = "marvell,dove-sdhci";
- reg = <0x92000 0x100>;
- interrupts = <35>, <37>;
- clocks = <&gate_clk 8>;
- pinctrl-0 = <&pmx_sdio0>;
- pinctrl-names = "default";
- status = "disabled";
- };
-
- sdio1: sdio-host@90000 {
- compatible = "marvell,dove-sdhci";
- reg = <0x90000 0x100>;
- interrupts = <36>, <38>;
- clocks = <&gate_clk 9>;
- pinctrl-0 = <&pmx_sdio1>;
- pinctrl-names = "default";
- status = "disabled";
- };
-
- sata0: sata-host@a0000 {
- compatible = "marvell,orion-sata";
- reg = <0xa0000 0x2400>;
- interrupts = <62>;
- clocks = <&gate_clk 3>;
- nr-ports = <1>;
- status = "disabled";
- };
-
- rtc: real-time-clock@d8500 {
- compatible = "marvell,orion-rtc";
- reg = <0xd8500 0x20>;
- };
-
- crypto: crypto-engine@30000 {
- compatible = "marvell,orion-crypto";
- reg = <0x30000 0x10000>,
- <0xc8000000 0x800>;
- reg-names = "regs", "sram";
- interrupts = <31>;
- clocks = <&gate_clk 15>;
- status = "okay";
- };
-
- xor0: dma-engine@60800 {
- compatible = "marvell,orion-xor";
- reg = <0x60800 0x100
- 0x60a00 0x100>;
- clocks = <&gate_clk 23>;
- status = "okay";
-
- channel0 {
- interrupts = <39>;
- dmacap,memcpy;
- dmacap,xor;
- };
-
- channel1 {
- interrupts = <40>;
- dmacap,memset;
- dmacap,memcpy;
- dmacap,xor;
- };
- };
-
- xor1: dma-engine@60900 {
- compatible = "marvell,orion-xor";
- reg = <0x60900 0x100
- 0x60b00 0x100>;
- clocks = <&gate_clk 24>;
- status = "okay";
-
- channel0 {
- interrupts = <42>;
- dmacap,memcpy;
- dmacap,xor;
- };
-
- channel1 {
- interrupts = <43>;
- dmacap,memset;
- dmacap,memcpy;
- dmacap,xor;
- };
- };
-
- mdio: mdio-bus@72004 {
- compatible = "marvell,orion-mdio";
+ internal-regs {
+ compatible = "simple-bus";
#address-cells = <1>;
- #size-cells = <0>;
- reg = <0x72004 0x84>;
- interrupts = <30>;
- clocks = <&gate_clk 2>;
- status = "disabled";
-
- ethphy: ethernet-phy {
- device-type = "ethernet-phy";
- /* set phy address in board file */
- };
- };
-
- eth: ethernet-controller@72000 {
- compatible = "marvell,orion-eth";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x72000 0x4000>;
- clocks = <&gate_clk 2>;
- marvell,tx-checksum-limit = <1600>;
- status = "disabled";
-
- ethernet-port@0 {
- device_type = "network";
- compatible = "marvell,orion-eth-port";
- reg = <0>;
- interrupts = <29>;
- /* overwrite MAC address in bootloader */
- local-mac-address = [00 00 00 00 00 00];
- phy-handle = <&ethphy>;
+ #size-cells = <1>;
+ ranges = <0x00000000 MBUS_ID(0xf0, 0x01) 0 0x0100000 /* MBUS regs 1M */
+ 0x00800000 MBUS_ID(0xf0, 0x02) 0 0x1000000 /* AXI regs 16M */
+ 0xffffe000 MBUS_ID(0x03, 0x01) 0 0x0000800 /* CESA SRAM 2k */
+ 0xfffff000 MBUS_ID(0x0d, 0x00) 0 0x0000800>; /* PMU SRAM 2k */
+
+ mbusc: mbus-ctrl@20000 {
+ compatible = "marvell,mbus-controller";
+ reg = <0x20000 0x80>, <0x800100 0x8>;
+ };
+
+ timer: timer@20300 {
+ compatible = "marvell,orion-timer";
+ reg = <0x20300 0x20>;
+ interrupt-parent = <&bridge_intc>;
+ interrupts = <1>, <2>;
+ clocks = <&core_clk 0>;
+ };
+
+ intc: main-interrupt-ctrl@20200 {
+ compatible = "marvell,orion-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x20200 0x10>, <0x20210 0x10>;
+ };
+
+ bridge_intc: bridge-interrupt-ctrl@20110 {
+ compatible = "marvell,orion-bridge-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x20110 0x8>;
+ interrupts = <0>;
+ marvell,#interrupts = <5>;
+ };
+
+ core_clk: core-clocks@d0214 {
+ compatible = "marvell,dove-core-clock";
+ reg = <0xd0214 0x4>;
+ #clock-cells = <1>;
+ };
+
+ gate_clk: clock-gating-ctrl@d0038 {
+ compatible = "marvell,dove-gating-clock";
+ reg = <0xd0038 0x4>;
+ clocks = <&core_clk 0>;
+ #clock-cells = <1>;
+ };
+
+ thermal: thermal-diode@d001c {
+ compatible = "marvell,dove-thermal";
+ reg = <0xd001c 0x0c>, <0xd005c 0x08>;
+ };
+
+ uart0: serial@12000 {
+ compatible = "ns16550a";
+ reg = <0x12000 0x100>;
+ reg-shift = <2>;
+ interrupts = <7>;
+ clocks = <&core_clk 0>;
+ status = "disabled";
+ };
+
+ uart1: serial@12100 {
+ compatible = "ns16550a";
+ reg = <0x12100 0x100>;
+ reg-shift = <2>;
+ interrupts = <8>;
+ clocks = <&core_clk 0>;
+ pinctrl-0 = <&pmx_uart1>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ uart2: serial@12200 {
+ compatible = "ns16550a";
+ reg = <0x12000 0x100>;
+ reg-shift = <2>;
+ interrupts = <9>;
+ clocks = <&core_clk 0>;
+ status = "disabled";
+ };
+
+ uart3: serial@12300 {
+ compatible = "ns16550a";
+ reg = <0x12100 0x100>;
+ reg-shift = <2>;
+ interrupts = <10>;
+ clocks = <&core_clk 0>;
+ status = "disabled";
+ };
+
+ gpio0: gpio-ctrl@d0400 {
+ compatible = "marvell,orion-gpio";
+ #gpio-cells = <2>;
+ gpio-controller;
+ reg = <0xd0400 0x20>;
+ ngpios = <32>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <12>, <13>, <14>, <60>;
+ };
+
+ gpio1: gpio-ctrl@d0420 {
+ compatible = "marvell,orion-gpio";
+ #gpio-cells = <2>;
+ gpio-controller;
+ reg = <0xd0420 0x20>;
+ ngpios = <32>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <61>;
+ };
+
+ gpio2: gpio-ctrl@e8400 {
+ compatible = "marvell,orion-gpio";
+ #gpio-cells = <2>;
+ gpio-controller;
+ reg = <0xe8400 0x0c>;
+ ngpios = <8>;
+ };
+
+ pinctrl: pin-ctrl@d0200 {
+ compatible = "marvell,dove-pinctrl";
+ reg = <0xd0200 0x10>;
+ clocks = <&gate_clk 22>;
+
+ pmx_gpio_0: pmx-gpio-0 {
+ marvell,pins = "mpp0";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_1: pmx-gpio-1 {
+ marvell,pins = "mpp1";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_2: pmx-gpio-2 {
+ marvell,pins = "mpp2";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_3: pmx-gpio-3 {
+ marvell,pins = "mpp3";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_4: pmx-gpio-4 {
+ marvell,pins = "mpp4";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_5: pmx-gpio-5 {
+ marvell,pins = "mpp5";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_6: pmx-gpio-6 {
+ marvell,pins = "mpp6";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_7: pmx-gpio-7 {
+ marvell,pins = "mpp7";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_8: pmx-gpio-8 {
+ marvell,pins = "mpp8";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_9: pmx-gpio-9 {
+ marvell,pins = "mpp9";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_10: pmx-gpio-10 {
+ marvell,pins = "mpp10";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_11: pmx-gpio-11 {
+ marvell,pins = "mpp11";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_12: pmx-gpio-12 {
+ marvell,pins = "mpp12";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_13: pmx-gpio-13 {
+ marvell,pins = "mpp13";
+ marvell,function = "gpio";
+ };
+
+ pmx_audio1_extclk: pmx-audio1-extclk {
+ marvell,pins = "mpp13";
+ marvell,function = "audio1";
+ };
+
+ pmx_gpio_14: pmx-gpio-14 {
+ marvell,pins = "mpp14";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_15: pmx-gpio-15 {
+ marvell,pins = "mpp15";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_16: pmx-gpio-16 {
+ marvell,pins = "mpp16";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_17: pmx-gpio-17 {
+ marvell,pins = "mpp17";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_18: pmx-gpio-18 {
+ marvell,pins = "mpp18";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_19: pmx-gpio-19 {
+ marvell,pins = "mpp19";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_20: pmx-gpio-20 {
+ marvell,pins = "mpp20";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_21: pmx-gpio-21 {
+ marvell,pins = "mpp21";
+ marvell,function = "gpio";
+ };
+
+ pmx_camera: pmx-camera {
+ marvell,pins = "mpp_camera";
+ marvell,function = "camera";
+ };
+
+ pmx_camera_gpio: pmx-camera-gpio {
+ marvell,pins = "mpp_camera";
+ marvell,function = "gpio";
+ };
+
+ pmx_sdio0: pmx-sdio0 {
+ marvell,pins = "mpp_sdio0";
+ marvell,function = "sdio0";
+ };
+
+ pmx_sdio0_gpio: pmx-sdio0-gpio {
+ marvell,pins = "mpp_sdio0";
+ marvell,function = "gpio";
+ };
+
+ pmx_sdio1: pmx-sdio1 {
+ marvell,pins = "mpp_sdio1";
+ marvell,function = "sdio1";
+ };
+
+ pmx_sdio1_gpio: pmx-sdio1-gpio {
+ marvell,pins = "mpp_sdio1";
+ marvell,function = "gpio";
+ };
+
+ pmx_audio1_gpio: pmx-audio1-gpio {
+ marvell,pins = "mpp_audio1";
+ marvell,function = "gpio";
+ };
+
+ pmx_audio1_i2s1_spdifo: pmx-audio1-i2s1-spdifo {
+ marvell,pins = "mpp_audio1";
+ marvell,function = "i2s1/spdifo";
+ };
+
+ pmx_spi0: pmx-spi0 {
+ marvell,pins = "mpp_spi0";
+ marvell,function = "spi0";
+ };
+
+ pmx_spi0_gpio: pmx-spi0-gpio {
+ marvell,pins = "mpp_spi0";
+ marvell,function = "gpio";
+ };
+
+ pmx_uart1: pmx-uart1 {
+ marvell,pins = "mpp_uart1";
+ marvell,function = "uart1";
+ };
+
+ pmx_uart1_gpio: pmx-uart1-gpio {
+ marvell,pins = "mpp_uart1";
+ marvell,function = "gpio";
+ };
+
+ pmx_nand: pmx-nand {
+ marvell,pins = "mpp_nand";
+ marvell,function = "nand";
+ };
+
+ pmx_nand_gpo: pmx-nand-gpo {
+ marvell,pins = "mpp_nand";
+ marvell,function = "gpo";
+ };
+ };
+
+ spi0: spi-ctrl@10600 {
+ compatible = "marvell,orion-spi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ interrupts = <6>;
+ reg = <0x10600 0x28>;
+ clocks = <&core_clk 0>;
+ pinctrl-0 = <&pmx_spi0>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ spi1: spi-ctrl@14600 {
+ compatible = "marvell,orion-spi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ interrupts = <5>;
+ reg = <0x14600 0x28>;
+ clocks = <&core_clk 0>;
+ status = "disabled";
+ };
+
+ i2c0: i2c-ctrl@11000 {
+ compatible = "marvell,mv64xxx-i2c";
+ reg = <0x11000 0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <11>;
+ clock-frequency = <400000>;
+ timeout-ms = <1000>;
+ clocks = <&core_clk 0>;
+ status = "disabled";
+ };
+
+ ehci0: usb-host@50000 {
+ compatible = "marvell,orion-ehci";
+ reg = <0x50000 0x1000>;
+ interrupts = <24>;
+ clocks = <&gate_clk 0>;
+ status = "okay";
+ };
+
+ ehci1: usb-host@51000 {
+ compatible = "marvell,orion-ehci";
+ reg = <0x51000 0x1000>;
+ interrupts = <25>;
+ clocks = <&gate_clk 1>;
+ status = "okay";
+ };
+
+ sdio0: sdio-host@92000 {
+ compatible = "marvell,dove-sdhci";
+ reg = <0x92000 0x100>;
+ interrupts = <35>, <37>;
+ clocks = <&gate_clk 8>;
+ pinctrl-0 = <&pmx_sdio0>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ sdio1: sdio-host@90000 {
+ compatible = "marvell,dove-sdhci";
+ reg = <0x90000 0x100>;
+ interrupts = <36>, <38>;
+ clocks = <&gate_clk 9>;
+ pinctrl-0 = <&pmx_sdio1>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ sata0: sata-host@a0000 {
+ compatible = "marvell,orion-sata";
+ reg = <0xa0000 0x2400>;
+ interrupts = <62>;
+ clocks = <&gate_clk 3>;
+ nr-ports = <1>;
+ status = "disabled";
+ };
+
+ rtc: real-time-clock@d8500 {
+ compatible = "marvell,orion-rtc";
+ reg = <0xd8500 0x20>;
+ };
+
+ crypto: crypto-engine@30000 {
+ compatible = "marvell,orion-crypto";
+ reg = <0x30000 0x10000>,
+ <0xffffe000 0x800>;
+ reg-names = "regs", "sram";
+ interrupts = <31>;
+ clocks = <&gate_clk 15>;
+ status = "okay";
+ };
+
+ xor0: dma-engine@60800 {
+ compatible = "marvell,orion-xor";
+ reg = <0x60800 0x100
+ 0x60a00 0x100>;
+ clocks = <&gate_clk 23>;
+ status = "okay";
+
+ channel0 {
+ interrupts = <39>;
+ dmacap,memcpy;
+ dmacap,xor;
+ };
+
+ channel1 {
+ interrupts = <40>;
+ dmacap,memcpy;
+ dmacap,xor;
+ };
+ };
+
+ xor1: dma-engine@60900 {
+ compatible = "marvell,orion-xor";
+ reg = <0x60900 0x100
+ 0x60b00 0x100>;
+ clocks = <&gate_clk 24>;
+ status = "okay";
+
+ channel0 {
+ interrupts = <42>;
+ dmacap,memcpy;
+ dmacap,xor;
+ };
+
+ channel1 {
+ interrupts = <43>;
+ dmacap,memcpy;
+ dmacap,xor;
+ };
+ };
+
+ mdio: mdio-bus@72004 {
+ compatible = "marvell,orion-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x72004 0x84>;
+ interrupts = <30>;
+ clocks = <&gate_clk 2>;
+ status = "disabled";
+
+ ethphy: ethernet-phy {
+ device-type = "ethernet-phy";
+ /* set phy address in board file */
+ };
+ };
+
+ eth: ethernet-ctrl@72000 {
+ compatible = "marvell,orion-eth";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x72000 0x4000>;
+ clocks = <&gate_clk 2>;
+ marvell,tx-checksum-limit = <1600>;
+ status = "disabled";
+
+ ethernet-port@0 {
+ device_type = "network";
+ compatible = "marvell,orion-eth-port";
+ reg = <0>;
+ interrupts = <29>;
+ /* overwrite MAC address in bootloader */
+ local-mac-address = [00 00 00 00 00 00];
+ phy-handle = <&ethphy>;
+ };
+ };
+
+ audio0: audio-controller@b0000 {
+ compatible = "marvell,dove-audio";
+ reg = <0xb0000 0x2210>;
+ interrupts = <19>, <20>;
+ clocks = <&gate_clk 12>;
+ clock-names = "internal";
+ status = "disabled";
+ };
+
+ audio1: audio-controller@b4000 {
+ compatible = "marvell,dove-audio";
+ reg = <0xb4000 0x2210>;
+ interrupts = <21>, <22>;
+ clocks = <&gate_clk 13>;
+ clock-names = "internal";
+ status = "disabled";
};
};
};
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
new file mode 100644
index 000000000000..5babba0a3a75
--- /dev/null
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+#include "dra7.dtsi"
+
+/ {
+ model = "TI DRA7";
+ compatible = "ti,dra7-evm", "ti,dra752", "ti,dra7";
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x60000000>; /* 1536 MB */
+ };
+
+ mmc2_3v3: fixedregulator-mmc2 {
+ compatible = "regulator-fixed";
+ regulator-name = "mmc2_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+};
+
+&dra7_pmx_core {
+ i2c1_pins: pinmux_i2c1_pins {
+ pinctrl-single,pins = <
+ 0x400 (PIN_INPUT | MUX_MODE0) /* i2c1_sda */
+ 0x404 (PIN_INPUT | MUX_MODE0) /* i2c1_scl */
+ >;
+ };
+
+ i2c2_pins: pinmux_i2c2_pins {
+ pinctrl-single,pins = <
+ 0x408 (PIN_INPUT | MUX_MODE0) /* i2c2_sda */
+ 0x40c (PIN_INPUT | MUX_MODE0) /* i2c2_scl */
+ >;
+ };
+
+ i2c3_pins: pinmux_i2c3_pins {
+ pinctrl-single,pins = <
+ 0x410 (PIN_INPUT | MUX_MODE0) /* i2c3_sda */
+ 0x414 (PIN_INPUT | MUX_MODE0) /* i2c3_scl */
+ >;
+ };
+
+ mcspi1_pins: pinmux_mcspi1_pins {
+ pinctrl-single,pins = <
+ 0x3a4 (PIN_INPUT | MUX_MODE0) /* spi2_clk */
+ 0x3a8 (PIN_INPUT | MUX_MODE0) /* spi2_d1 */
+ 0x3ac (PIN_INPUT | MUX_MODE0) /* spi2_d0 */
+ 0x3b0 (PIN_INPUT_SLEW | MUX_MODE0) /* spi2_cs0 */
+ 0x3b4 (PIN_INPUT_SLEW | MUX_MODE0) /* spi2_cs1 */
+ 0x3b8 (PIN_INPUT_SLEW | MUX_MODE6) /* spi2_cs2 */
+ 0x3bc (PIN_INPUT_SLEW | MUX_MODE6) /* spi2_cs3 */
+ >;
+ };
+
+ mcspi2_pins: pinmux_mcspi2_pins {
+ pinctrl-single,pins = <
+ 0x3c0 (PIN_INPUT | MUX_MODE0) /* spi2_sclk */
+ 0x3c4 (PIN_INPUT_SLEW | MUX_MODE0) /* spi2_d1 */
+ 0x3c8 (PIN_INPUT_SLEW | MUX_MODE0) /* spi2_d1 */
+ 0x3cc (PIN_INPUT_SLEW | MUX_MODE0) /* spi2_cs0 */
+ >;
+ };
+
+ uart1_pins: pinmux_uart1_pins {
+ pinctrl-single,pins = <
+ 0x3e0 (PIN_INPUT_SLEW | MUX_MODE0) /* uart1_rxd */
+ 0x3e4 (PIN_INPUT_SLEW | MUX_MODE0) /* uart1_txd */
+ 0x3e8 (PIN_INPUT | MUX_MODE3) /* uart1_ctsn */
+ 0x3ec (PIN_INPUT | MUX_MODE3) /* uart1_rtsn */
+ >;
+ };
+
+ uart2_pins: pinmux_uart2_pins {
+ pinctrl-single,pins = <
+ 0x3f0 (PIN_INPUT | MUX_MODE0) /* uart2_rxd */
+ 0x3f4 (PIN_INPUT | MUX_MODE0) /* uart2_txd */
+ 0x3f8 (PIN_INPUT | MUX_MODE0) /* uart2_ctsn */
+ 0x3fc (PIN_INPUT | MUX_MODE0) /* uart2_rtsn */
+ >;
+ };
+
+ uart3_pins: pinmux_uart3_pins {
+ pinctrl-single,pins = <
+ 0x248 (PIN_INPUT_SLEW | MUX_MODE0) /* uart3_rxd */
+ 0x24c (PIN_INPUT_SLEW | MUX_MODE0) /* uart3_txd */
+ >;
+ };
+};
+
+&i2c1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
+ clock-frequency = <400000>;
+
+ tps659038: tps659038@58 {
+ compatible = "ti,tps659038";
+ reg = <0x58>;
+
+ tps659038_pmic {
+ compatible = "ti,tps659038-pmic";
+
+ regulators {
+ smps123_reg: smps123 {
+ /* VDD_MPU */
+ regulator-name = "smps123";
+ regulator-min-microvolt = < 850000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ smps45_reg: smps45 {
+ /* VDD_DSPEVE */
+ regulator-name = "smps45";
+ regulator-min-microvolt = < 850000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-boot-on;
+ };
+
+ smps6_reg: smps6 {
+ /* VDD_GPU - over VDD_SMPS6 */
+ regulator-name = "smps6";
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <12500000>;
+ regulator-boot-on;
+ };
+
+ smps7_reg: smps7 {
+ /* CORE_VDD */
+ regulator-name = "smps7";
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <1030000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ smps8_reg: smps8 {
+ /* VDD_IVAHD */
+ regulator-name = "smps8";
+ regulator-min-microvolt = < 850000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-boot-on;
+ };
+
+ smps9_reg: smps9 {
+ /* VDDS1V8 */
+ regulator-name = "smps9";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ldo1_reg: ldo1 {
+ /* LDO1_OUT --> SDIO */
+ regulator-name = "ldo1";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ };
+
+ ldo2_reg: ldo2 {
+ /* VDD_RTCIO */
+ /* LDO2 -> VDDSHV5, LDO2 also goes to CAN_PHY_3V3 */
+ regulator-name = "ldo2";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ };
+
+ ldo3_reg: ldo3 {
+ /* VDDA_1V8_PHY */
+ regulator-name = "ldo3";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ };
+
+ ldo9_reg: ldo9 {
+ /* VDD_RTC */
+ regulator-name = "ldo9";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-boot-on;
+ };
+
+ ldoln_reg: ldoln {
+ /* VDDA_1V8_PLL */
+ regulator-name = "ldoln";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ldousb_reg: ldousb {
+ /* VDDA_3V_USB: VDDA_USBHS33 */
+ regulator-name = "ldousb";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ };
+ };
+ };
+ };
+};
+
+&i2c2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins>;
+ clock-frequency = <400000>;
+};
+
+&i2c3 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_pins>;
+ clock-frequency = <3400000>;
+};
+
+&mcspi1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcspi1_pins>;
+};
+
+&mcspi2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcspi2_pins>;
+};
+
+&uart1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins>;
+};
+
+&uart2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_pins>;
+};
+
+&uart3 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3_pins>;
+};
+
+&mmc1 {
+ status = "okay";
+ vmmc-supply = <&ldo1_reg>;
+ bus-width = <4>;
+};
+
+&mmc2 {
+ status = "okay";
+ vmmc-supply = <&mmc2_3v3>;
+ bus-width = <8>;
+};
+
+&cpu0 {
+ cpu0-supply = <&smps123_reg>;
+};
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
new file mode 100644
index 000000000000..d0df4c4e8b0a
--- /dev/null
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -0,0 +1,586 @@
+/*
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ * Based on "omap4.dtsi"
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/pinctrl/dra.h>
+
+#include "skeleton.dtsi"
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "ti,dra7xx";
+ interrupt-parent = <&gic>;
+
+ aliases {
+ i2c0 = &i2c1;
+ i2c1 = &i2c2;
+ i2c2 = &i2c3;
+ i2c3 = &i2c4;
+ i2c4 = &i2c5;
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ serial3 = &uart4;
+ serial4 = &uart5;
+ serial5 = &uart6;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0>;
+
+ operating-points = <
+ /* kHz uV */
+ 1000000 1060000
+ 1176000 1160000
+ >;
+ };
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <1>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ gic: interrupt-controller@48211000 {
+ compatible = "arm,cortex-a15-gic";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ reg = <0x48211000 0x1000>,
+ <0x48212000 0x1000>,
+ <0x48214000 0x2000>,
+ <0x48216000 0x2000>;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ /*
+ * The soc node represents the soc top level view. It is uses for IPs
+ * that are not memory mapped in the MPU view or for the MPU itself.
+ */
+ soc {
+ compatible = "ti,omap-infra";
+ mpu {
+ compatible = "ti,omap5-mpu";
+ ti,hwmods = "mpu";
+ };
+ };
+
+ /*
+ * XXX: Use a flat representation of the SOC interconnect.
+ * The real OMAP interconnect network is quite complex.
+ * Since that will not bring real advantage to represent that in DT for
+ * the moment, just use a fake OCP bus entry to represent the whole bus
+ * hierarchy.
+ */
+ ocp {
+ compatible = "ti,omap4-l3-noc", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ ti,hwmods = "l3_main_1", "l3_main_2";
+ reg = <0x44000000 0x2000>,
+ <0x44800000 0x3000>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+
+ counter32k: counter@4ae04000 {
+ compatible = "ti,omap-counter32k";
+ reg = <0x4ae04000 0x40>;
+ ti,hwmods = "counter_32k";
+ };
+
+ dra7_pmx_core: pinmux@4a003400 {
+ compatible = "pinctrl-single";
+ reg = <0x4a003400 0x0464>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-single,register-width = <32>;
+ pinctrl-single,function-mask = <0x3fffffff>;
+ };
+
+ sdma: dma-controller@4a056000 {
+ compatible = "ti,omap4430-sdma";
+ reg = <0x4a056000 0x1000>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ #dma-channels = <32>;
+ #dma-requests = <127>;
+ };
+
+ gpio1: gpio@4ae10000 {
+ compatible = "ti,omap4-gpio";
+ reg = <0x4ae10000 0x200>;
+ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "gpio1";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gpio2: gpio@48055000 {
+ compatible = "ti,omap4-gpio";
+ reg = <0x48055000 0x200>;
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "gpio2";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gpio3: gpio@48057000 {
+ compatible = "ti,omap4-gpio";
+ reg = <0x48057000 0x200>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "gpio3";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gpio4: gpio@48059000 {
+ compatible = "ti,omap4-gpio";
+ reg = <0x48059000 0x200>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "gpio4";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gpio5: gpio@4805b000 {
+ compatible = "ti,omap4-gpio";
+ reg = <0x4805b000 0x200>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "gpio5";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gpio6: gpio@4805d000 {
+ compatible = "ti,omap4-gpio";
+ reg = <0x4805d000 0x200>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "gpio6";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gpio7: gpio@48051000 {
+ compatible = "ti,omap4-gpio";
+ reg = <0x48051000 0x200>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "gpio7";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gpio8: gpio@48053000 {
+ compatible = "ti,omap4-gpio";
+ reg = <0x48053000 0x200>;
+ interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "gpio8";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ uart1: serial@4806a000 {
+ compatible = "ti,omap4-uart";
+ reg = <0x4806a000 0x100>;
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "uart1";
+ clock-frequency = <48000000>;
+ status = "disabled";
+ };
+
+ uart2: serial@4806c000 {
+ compatible = "ti,omap4-uart";
+ reg = <0x4806c000 0x100>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "uart2";
+ clock-frequency = <48000000>;
+ status = "disabled";
+ };
+
+ uart3: serial@48020000 {
+ compatible = "ti,omap4-uart";
+ reg = <0x48020000 0x100>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "uart3";
+ clock-frequency = <48000000>;
+ status = "disabled";
+ };
+
+ uart4: serial@4806e000 {
+ compatible = "ti,omap4-uart";
+ reg = <0x4806e000 0x100>;
+ interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "uart4";
+ clock-frequency = <48000000>;
+ status = "disabled";
+ };
+
+ uart5: serial@48066000 {
+ compatible = "ti,omap4-uart";
+ reg = <0x48066000 0x100>;
+ interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "uart5";
+ clock-frequency = <48000000>;
+ status = "disabled";
+ };
+
+ uart6: serial@48068000 {
+ compatible = "ti,omap4-uart";
+ reg = <0x48068000 0x100>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "uart6";
+ clock-frequency = <48000000>;
+ status = "disabled";
+ };
+
+ uart7: serial@48420000 {
+ compatible = "ti,omap4-uart";
+ reg = <0x48420000 0x100>;
+ ti,hwmods = "uart7";
+ clock-frequency = <48000000>;
+ status = "disabled";
+ };
+
+ uart8: serial@48422000 {
+ compatible = "ti,omap4-uart";
+ reg = <0x48422000 0x100>;
+ ti,hwmods = "uart8";
+ clock-frequency = <48000000>;
+ status = "disabled";
+ };
+
+ uart9: serial@48424000 {
+ compatible = "ti,omap4-uart";
+ reg = <0x48424000 0x100>;
+ ti,hwmods = "uart9";
+ clock-frequency = <48000000>;
+ status = "disabled";
+ };
+
+ uart10: serial@4ae2b000 {
+ compatible = "ti,omap4-uart";
+ reg = <0x4ae2b000 0x100>;
+ ti,hwmods = "uart10";
+ clock-frequency = <48000000>;
+ status = "disabled";
+ };
+
+ timer1: timer@4ae18000 {
+ compatible = "ti,omap5430-timer";
+ reg = <0x4ae18000 0x80>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "timer1";
+ ti,timer-alwon;
+ };
+
+ timer2: timer@48032000 {
+ compatible = "ti,omap5430-timer";
+ reg = <0x48032000 0x80>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "timer2";
+ };
+
+ timer3: timer@48034000 {
+ compatible = "ti,omap5430-timer";
+ reg = <0x48034000 0x80>;
+ interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "timer3";
+ };
+
+ timer4: timer@48036000 {
+ compatible = "ti,omap5430-timer";
+ reg = <0x48036000 0x80>;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "timer4";
+ };
+
+ timer5: timer@48820000 {
+ compatible = "ti,omap5430-timer";
+ reg = <0x48820000 0x80>;
+ interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "timer5";
+ ti,timer-dsp;
+ };
+
+ timer6: timer@48822000 {
+ compatible = "ti,omap5430-timer";
+ reg = <0x48822000 0x80>;
+ interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "timer6";
+ ti,timer-dsp;
+ ti,timer-pwm;
+ };
+
+ timer7: timer@48824000 {
+ compatible = "ti,omap5430-timer";
+ reg = <0x48824000 0x80>;
+ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "timer7";
+ ti,timer-dsp;
+ };
+
+ timer8: timer@48826000 {
+ compatible = "ti,omap5430-timer";
+ reg = <0x48826000 0x80>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "timer8";
+ ti,timer-dsp;
+ ti,timer-pwm;
+ };
+
+ timer9: timer@4803e000 {
+ compatible = "ti,omap5430-timer";
+ reg = <0x4803e000 0x80>;
+ interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "timer9";
+ };
+
+ timer10: timer@48086000 {
+ compatible = "ti,omap5430-timer";
+ reg = <0x48086000 0x80>;
+ interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "timer10";
+ };
+
+ timer11: timer@48088000 {
+ compatible = "ti,omap5430-timer";
+ reg = <0x48088000 0x80>;
+ interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "timer11";
+ ti,timer-pwm;
+ };
+
+ timer13: timer@48828000 {
+ compatible = "ti,omap5430-timer";
+ reg = <0x48828000 0x80>;
+ ti,hwmods = "timer13";
+ status = "disabled";
+ };
+
+ timer14: timer@4882a000 {
+ compatible = "ti,omap5430-timer";
+ reg = <0x4882a000 0x80>;
+ ti,hwmods = "timer14";
+ status = "disabled";
+ };
+
+ timer15: timer@4882c000 {
+ compatible = "ti,omap5430-timer";
+ reg = <0x4882c000 0x80>;
+ ti,hwmods = "timer15";
+ status = "disabled";
+ };
+
+ timer16: timer@4882e000 {
+ compatible = "ti,omap5430-timer";
+ reg = <0x4882e000 0x80>;
+ ti,hwmods = "timer16";
+ status = "disabled";
+ };
+
+ wdt2: wdt@4ae14000 {
+ compatible = "ti,omap4-wdt";
+ reg = <0x4ae14000 0x80>;
+ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "wd_timer2";
+ };
+
+ i2c1: i2c@48070000 {
+ compatible = "ti,omap4-i2c";
+ reg = <0x48070000 0x100>;
+ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ti,hwmods = "i2c1";
+ status = "disabled";
+ };
+
+ i2c2: i2c@48072000 {
+ compatible = "ti,omap4-i2c";
+ reg = <0x48072000 0x100>;
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ti,hwmods = "i2c2";
+ status = "disabled";
+ };
+
+ i2c3: i2c@48060000 {
+ compatible = "ti,omap4-i2c";
+ reg = <0x48060000 0x100>;
+ interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ti,hwmods = "i2c3";
+ status = "disabled";
+ };
+
+ i2c4: i2c@4807a000 {
+ compatible = "ti,omap4-i2c";
+ reg = <0x4807a000 0x100>;
+ interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ti,hwmods = "i2c4";
+ status = "disabled";
+ };
+
+ i2c5: i2c@4807c000 {
+ compatible = "ti,omap4-i2c";
+ reg = <0x4807c000 0x100>;
+ interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ti,hwmods = "i2c5";
+ status = "disabled";
+ };
+
+ mmc1: mmc@4809c000 {
+ compatible = "ti,omap4-hsmmc";
+ reg = <0x4809c000 0x400>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "mmc1";
+ ti,dual-volt;
+ ti,needs-special-reset;
+ dmas = <&sdma 61>, <&sdma 62>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ mmc2: mmc@480b4000 {
+ compatible = "ti,omap4-hsmmc";
+ reg = <0x480b4000 0x400>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "mmc2";
+ ti,needs-special-reset;
+ dmas = <&sdma 47>, <&sdma 48>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ mmc3: mmc@480ad000 {
+ compatible = "ti,omap4-hsmmc";
+ reg = <0x480ad000 0x400>;
+ interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "mmc3";
+ ti,needs-special-reset;
+ dmas = <&sdma 77>, <&sdma 78>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ mmc4: mmc@480d1000 {
+ compatible = "ti,omap4-hsmmc";
+ reg = <0x480d1000 0x400>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "mmc4";
+ ti,needs-special-reset;
+ dmas = <&sdma 57>, <&sdma 58>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ mcspi1: spi@48098000 {
+ compatible = "ti,omap4-mcspi";
+ reg = <0x48098000 0x200>;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ti,hwmods = "mcspi1";
+ ti,spi-num-cs = <4>;
+ dmas = <&sdma 35>,
+ <&sdma 36>,
+ <&sdma 37>,
+ <&sdma 38>,
+ <&sdma 39>,
+ <&sdma 40>,
+ <&sdma 41>,
+ <&sdma 42>;
+ dma-names = "tx0", "rx0", "tx1", "rx1",
+ "tx2", "rx2", "tx3", "rx3";
+ status = "disabled";
+ };
+
+ mcspi2: spi@4809a000 {
+ compatible = "ti,omap4-mcspi";
+ reg = <0x4809a000 0x200>;
+ interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ti,hwmods = "mcspi2";
+ ti,spi-num-cs = <2>;
+ dmas = <&sdma 43>,
+ <&sdma 44>,
+ <&sdma 45>,
+ <&sdma 46>;
+ dma-names = "tx0", "rx0", "tx1", "rx1";
+ status = "disabled";
+ };
+
+ mcspi3: spi@480b8000 {
+ compatible = "ti,omap4-mcspi";
+ reg = <0x480b8000 0x200>;
+ interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ti,hwmods = "mcspi3";
+ ti,spi-num-cs = <2>;
+ dmas = <&sdma 15>, <&sdma 16>;
+ dma-names = "tx0", "rx0";
+ status = "disabled";
+ };
+
+ mcspi4: spi@480ba000 {
+ compatible = "ti,omap4-mcspi";
+ reg = <0x480ba000 0x200>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ti,hwmods = "mcspi4";
+ ti,spi-num-cs = <1>;
+ dmas = <&sdma 70>, <&sdma 71>;
+ dma-names = "tx0", "rx0";
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/ecx-2000.dts b/arch/arm/boot/dts/ecx-2000.dts
index 139b40cc3a23..2ccbb57fbfa8 100644
--- a/arch/arm/boot/dts/ecx-2000.dts
+++ b/arch/arm/boot/dts/ecx-2000.dts
@@ -85,6 +85,12 @@
<1 10 0xf08>;
};
+ memory-controller@fff00000 {
+ compatible = "calxeda,ecx-2000-ddr-ctrl";
+ reg = <0xfff00000 0x1000>;
+ interrupts = <0 91 4>;
+ };
+
intc: interrupt-controller@fff11000 {
compatible = "arm,cortex-a15-gic";
#interrupt-cells = <3>;
diff --git a/arch/arm/boot/dts/ecx-common.dtsi b/arch/arm/boot/dts/ecx-common.dtsi
index e8559b753c9d..b90045a8f8e3 100644
--- a/arch/arm/boot/dts/ecx-common.dtsi
+++ b/arch/arm/boot/dts/ecx-common.dtsi
@@ -19,6 +19,14 @@
bootargs = "console=ttyAMA0";
};
+ psci {
+ compatible = "arm,psci";
+ method = "smc";
+ cpu_suspend = <0x84000002>;
+ cpu_off = <0x84000004>;
+ cpu_on = <0x84000006>;
+ };
+
soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -45,12 +53,6 @@
status = "disabled";
};
- memory-controller@fff00000 {
- compatible = "calxeda,hb-ddr-ctrl";
- reg = <0xfff00000 0x1000>;
- interrupts = <0 91 4>;
- };
-
ipc@fff20000 {
compatible = "arm,pl320", "arm,primecell";
reg = <0xfff20000 0x1000>;
diff --git a/arch/arm/boot/dts/emev2-kzm9d-reference.dts b/arch/arm/boot/dts/emev2-kzm9d-reference.dts
deleted file mode 100644
index cceefda268b6..000000000000
--- a/arch/arm/boot/dts/emev2-kzm9d-reference.dts
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Device Tree Source for the KZM9D board
- *
- * Copyright (C) 2013 Renesas Solutions Corp.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-/dts-v1/;
-
-/include/ "emev2.dtsi"
-
-/ {
- model = "EMEV2 KZM9D Board";
- compatible = "renesas,kzm9d-reference", "renesas,emev2";
-
- memory {
- device_type = "memory";
- reg = <0x40000000 0x8000000>;
- };
-
- chosen {
- bootargs = "console=ttyS1,115200n81 ignore_loglevel root=/dev/nfs ip=dhcp";
- };
-
- reg_1p8v: regulator@0 {
- compatible = "regulator-fixed";
- regulator-name = "fixed-1.8V";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- reg_3p3v: regulator@1 {
- compatible = "regulator-fixed";
- regulator-name = "fixed-3.3V";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- lan9220@20000000 {
- compatible = "smsc,lan9220", "smsc,lan9115";
- reg = <0x20000000 0x10000>;
- phy-mode = "mii";
- interrupt-parent = <&gpio0>;
- interrupts = <1 1>; /* active high */
- reg-io-width = <4>;
- smsc,irq-active-high;
- smsc,irq-push-pull;
- vddvario-supply = <&reg_1p8v>;
- vdd33a-supply = <&reg_3p3v>;
- };
-};
diff --git a/arch/arm/boot/dts/emev2-kzm9d.dts b/arch/arm/boot/dts/emev2-kzm9d.dts
index f92e812fdd9f..861aa7d6fc7d 100644
--- a/arch/arm/boot/dts/emev2-kzm9d.dts
+++ b/arch/arm/boot/dts/emev2-kzm9d.dts
@@ -1,7 +1,7 @@
/*
* Device Tree Source for the KZM9D board
*
- * Copyright (C) 2012 Renesas Solutions Corp.
+ * Copyright (C) 2013 Renesas Solutions Corp.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
@@ -23,4 +23,35 @@
chosen {
bootargs = "console=ttyS1,115200n81 ignore_loglevel root=/dev/nfs ip=dhcp";
};
+
+ reg_1p8v: regulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-1.8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_3p3v: regulator@1 {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ lan9220@20000000 {
+ compatible = "smsc,lan9220", "smsc,lan9115";
+ reg = <0x20000000 0x10000>;
+ phy-mode = "mii";
+ interrupt-parent = <&gpio0>;
+ interrupts = <1 1>; /* active high */
+ reg-io-width = <4>;
+ smsc,irq-active-high;
+ smsc,irq-push-pull;
+ vddvario-supply = <&reg_1p8v>;
+ vdd33a-supply = <&reg_3p3v>;
+ };
};
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index caadc0257342..a73eeb5f258f 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -49,6 +49,12 @@
reg = <0x10000000 0x100>;
};
+ mipi_phy: video-phy@10020710 {
+ compatible = "samsung,s5pv210-mipi-video-phy";
+ reg = <0x10020710 8>;
+ #phy-cells = <1>;
+ };
+
pd_mfc: mfc-power-domain@10023C40 {
compatible = "samsung,exynos4210-pd";
reg = <0x10023C40 0x20>;
@@ -161,6 +167,8 @@
clock-names = "csis", "sclk_csis";
bus-width = <4>;
samsung,power-domain = <&pd_cam>;
+ phys = <&mipi_phy 0>;
+ phy-names = "csis";
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@@ -174,6 +182,8 @@
clock-names = "csis", "sclk_csis";
bus-width = <2>;
samsung,power-domain = <&pd_cam>;
+ phys = <&mipi_phy 2>;
+ phy-names = "csis";
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts
index 382d8c7e2906..1a12fb23767c 100644
--- a/arch/arm/boot/dts/exynos4210-origen.dts
+++ b/arch/arm/boot/dts/exynos4210-origen.dts
@@ -32,13 +32,20 @@
bootargs ="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC2,115200 init=/linuxrc";
};
- mmc_reg: voltage-regulator {
- compatible = "regulator-fixed";
- regulator-name = "VMEM_VDD_2.8V";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
- gpio = <&gpx1 1 0>;
- enable-active-high;
+ regulators {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mmc_reg: regulator@0 {
+ compatible = "regulator-fixed";
+ reg = <0>;
+ regulator-name = "VMEM_VDD_2.8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ gpio = <&gpx1 1 0>;
+ enable-active-high;
+ };
};
tmu@100C0000 {
@@ -192,7 +199,12 @@
};
buck1_reg: BUCK1 {
- regulator-name = "VDD_ARM_1.2V";
+ /*
+ * HACK: The real name is VDD_ARM_1.2V,
+ * but exynos-cpufreq does not support
+ * DT-based regulator lookup yet.
+ */
+ regulator-name = "vdd_arm";
regulator-min-microvolt = <950000>;
regulator-max-microvolt = <1350000>;
regulator-always-on;
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
index 1c164f234bcc..63cc571ca307 100644
--- a/arch/arm/boot/dts/exynos4210-trats.dts
+++ b/arch/arm/boot/dts/exynos4210-trats.dts
@@ -290,7 +290,12 @@
};
varm_breg: BUCK1 {
- regulator-name = "VARM_1.2V_C210";
+ /*
+ * HACK: The real name is VARM_1.2V_C210,
+ * but exynos-cpufreq does not support
+ * DT-based regulator lookup yet.
+ */
+ regulator-name = "vdd_arm";
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1350000>;
regulator-always-on;
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts
index 889cdada1ce9..d2e3f5f5916d 100644
--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
+++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
@@ -350,3 +350,7 @@
status = "okay";
};
};
+
+&mdma1 {
+ reg = <0x12840000 0x1000>;
+};
diff --git a/arch/arm/boot/dts/exynos4412-origen.dts b/arch/arm/boot/dts/exynos4412-origen.dts
index 8768b03702e5..d65984c440f6 100644
--- a/arch/arm/boot/dts/exynos4412-origen.dts
+++ b/arch/arm/boot/dts/exynos4412-origen.dts
@@ -32,13 +32,20 @@
reg = <0x0203F000 0x1000>;
};
- mmc_reg: voltage-regulator {
- compatible = "regulator-fixed";
- regulator-name = "VMEM_VDD_2.8V";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
- gpio = <&gpx1 1 0>;
- enable-active-high;
+ regulators {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mmc_reg: regulator@0 {
+ compatible = "regulator-fixed";
+ reg = <0>;
+ regulator-name = "VMEM_VDD_2.8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ gpio = <&gpx1 1 0>;
+ enable-active-high;
+ };
};
pinctrl@11000000 {
diff --git a/arch/arm/boot/dts/exynos5.dtsi b/arch/arm/boot/dts/exynos5.dtsi
index 074739d39e2d..e52b038a7a11 100644
--- a/arch/arm/boot/dts/exynos5.dtsi
+++ b/arch/arm/boot/dts/exynos5.dtsi
@@ -50,27 +50,6 @@
interrupts = <1 9 0xf04>;
};
- dwmmc_0: dwmmc0@12200000 {
- compatible = "samsung,exynos5250-dw-mshc";
- interrupts = <0 75 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- dwmmc_1: dwmmc1@12210000 {
- compatible = "samsung,exynos5250-dw-mshc";
- interrupts = <0 76 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- dwmmc_2: dwmmc2@12220000 {
- compatible = "samsung,exynos5250-dw-mshc";
- interrupts = <0 77 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
serial@12C00000 {
compatible = "samsung,exynos4210-uart";
reg = <0x12C00000 0x100>;
diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts
index cee55fa33731..b77a37ec81c2 100644
--- a/arch/arm/boot/dts/exynos5250-arndale.dts
+++ b/arch/arm/boot/dts/exynos5250-arndale.dts
@@ -324,7 +324,14 @@
};
i2c@12C80000 {
- status = "disabled";
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <66000>;
+ samsung,i2c-slave-addr = <0x50>;
+
+ hdmiddc@50 {
+ compatible = "samsung,exynos4210-hdmiddc";
+ reg = <0x50>;
+ };
};
i2c@12C90000 {
@@ -362,15 +369,26 @@
status = "disabled";
};
+ i2c@12CE0000 {
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <66000>;
+ samsung,i2c-slave-addr = <0x38>;
+
+ hdmiphy@38 {
+ compatible = "samsung,exynos4212-hdmiphy";
+ reg = <0x38>;
+ };
+ };
+
i2c@121D0000 {
status = "disabled";
};
- dwmmc_0: dwmmc0@12200000 {
+ mmc_0: mmc@12200000 {
+ status = "okay";
num-slots = <1>;
supports-highspeed;
broken-cd;
- fifo-depth = <0x80>;
card-detect-delay = <200>;
samsung,dw-mshc-ciu-div = <3>;
samsung,dw-mshc-sdr-timing = <2 3>;
@@ -385,14 +403,10 @@
};
};
- dwmmc_1: dwmmc1@12210000 {
- status = "disabled";
- };
-
- dwmmc_2: dwmmc2@12220000 {
+ mmc_2: mmc@12220000 {
+ status = "okay";
num-slots = <1>;
supports-highspeed;
- fifo-depth = <0x80>;
card-detect-delay = <200>;
samsung,dw-mshc-ciu-div = <3>;
samsung,dw-mshc-sdr-timing = <2 3>;
@@ -408,8 +422,8 @@
};
};
- dwmmc_3: dwmmc3@12230000 {
- status = "disabled";
+ i2s0: i2s@03830000 {
+ status = "okay";
};
spi_0: spi@12d20000 {
@@ -482,13 +496,15 @@
#address-cells = <1>;
#size-cells = <0>;
- main_dc_reg: fixedregulator@1 {
+ main_dc_reg: regulator@0 {
compatible = "regulator-fixed";
+ reg = <0>;
regulator-name = "MAIN_DC";
};
- mmc_reg: voltage-regulator {
+ mmc_reg: regulator@1 {
compatible = "regulator-fixed";
+ reg = <1>;
regulator-name = "VDD_33ON_2.8V";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
@@ -496,8 +512,9 @@
enable-active-high;
};
- reg_hdmi_en: fixedregulator@0 {
+ reg_hdmi_en: regulator@2 {
compatible = "regulator-fixed";
+ reg = <2>;
regulator-name = "hdmi-en";
};
};
diff --git a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
index 724a22f9b1c8..9a49e6804ae1 100644
--- a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
@@ -210,21 +210,21 @@
samsung,pins = "gpa0-2", "gpa0-3";
samsung,pin-function = <2>;
samsung,pin-pud = <0>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
i2c2_bus: i2c2-bus {
samsung,pins = "gpa0-6", "gpa0-7";
samsung,pin-function = <3>;
samsung,pin-pud = <3>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
i2c2_hs_bus: i2c2-hs-bus {
samsung,pins = "gpa0-6", "gpa0-7";
samsung,pin-function = <4>;
samsung,pin-pud = <3>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
uart2_data: uart2-data {
@@ -238,21 +238,21 @@
samsung,pins = "gpa1-2", "gpa1-3";
samsung,pin-function = <2>;
samsung,pin-pud = <0>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
i2c3_bus: i2c3-bus {
samsung,pins = "gpa1-2", "gpa1-3";
samsung,pin-function = <3>;
samsung,pin-pud = <3>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
i2c3_hs_bus: i2c3-hs-bus {
samsung,pins = "gpa1-2", "gpa1-3";
samsung,pin-function = <4>;
samsung,pin-pud = <3>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
uart3_data: uart3-data {
@@ -273,14 +273,14 @@
samsung,pins = "gpa2-0", "gpa2-1";
samsung,pin-function = <3>;
samsung,pin-pud = <3>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
i2c5_bus: i2c5-bus {
samsung,pins = "gpa2-2", "gpa2-3";
samsung,pin-function = <3>;
samsung,pin-pud = <3>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
spi1_bus: spi1-bus {
@@ -376,14 +376,14 @@
samsung,pins = "gpb3-0", "gpb3-1";
samsung,pin-function = <4>;
samsung,pin-pud = <3>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
i2c1_hs_bus: i2c1-hs-bus {
samsung,pins = "gpb3-2", "gpb3-3";
samsung,pin-function = <4>;
samsung,pin-pud = <3>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
sd0_clk: sd0-clk {
@@ -551,14 +551,14 @@
samsung,pins = "gpd0-2", "gpd0-3";
samsung,pin-function = <2>;
samsung,pin-pud = <0>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
dp_hpd: dp_hpd {
samsung,pins = "gpx0-7";
samsung,pin-function = <3>;
samsung,pin-pud = <0>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
};
@@ -649,42 +649,42 @@
"gpf1-0", "gpf1-1", "gpf1-2", "gpf1-3";
samsung,pin-function = <3>;
samsung,pin-pud = <0>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
cam_i2c2_bus: cam-i2c2-bus {
samsung,pins = "gpe0-6", "gpe1-0";
samsung,pin-function = <4>;
samsung,pin-pud = <3>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
cam_spi1_bus: cam-spi1-bus {
samsung,pins = "gpe0-4", "gpe0-5", "gpf0-2", "gpf0-3";
samsung,pin-function = <4>;
samsung,pin-pud = <0>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
cam_i2c1_bus: cam-i2c1-bus {
samsung,pins = "gpf0-2", "gpf0-3";
samsung,pin-function = <2>;
samsung,pin-pud = <3>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
cam_i2c0_bus: cam-i2c0-bus {
samsung,pins = "gpf0-0", "gpf0-1";
samsung,pin-function = <2>;
samsung,pin-pud = <3>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
cam_spi0_bus: cam-spi0-bus {
samsung,pins = "gpf1-0", "gpf1-1", "gpf1-2", "gpf1-3";
samsung,pin-function = <2>;
samsung,pin-pud = <0>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
cam_bayrgb_bus: cam-bayrgb-bus {
@@ -695,7 +695,7 @@
"gpg2-0", "gpg2-1";
samsung,pin-function = <2>;
samsung,pin-pud = <0>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
cam_port_a: cam-port-a {
@@ -704,7 +704,7 @@
"gph1-4", "gph1-5", "gph1-6", "gph1-7";
samsung,pin-function = <2>;
samsung,pin-pud = <0>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
};
@@ -756,7 +756,7 @@
"gpv1-4", "gpv1-5", "gpv1-6", "gpv1-7";
samsung,pin-function = <2>;
samsung,pin-pud = <0>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
c2c_txd: c2c-txd {
@@ -766,7 +766,7 @@
"gpv3-4", "gpv3-5", "gpv3-6", "gpv3-7";
samsung,pin-function = <2>;
samsung,pin-pud = <0>;
- samaung,pin-drv = <0>;
+ samsung,pin-drv = <0>;
};
};
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index 2538b329f2ce..13746dfb20aa 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -140,11 +140,11 @@
};
};
- dwmmc0@12200000 {
+ mmc@12200000 {
+ status = "okay";
num-slots = <1>;
supports-highspeed;
broken-cd;
- fifo-depth = <0x80>;
card-detect-delay = <200>;
samsung,dw-mshc-ciu-div = <3>;
samsung,dw-mshc-sdr-timing = <2 3>;
@@ -158,14 +158,10 @@
};
};
- dwmmc1@12210000 {
- status = "disabled";
- };
-
- dwmmc2@12220000 {
+ mmc@12220000 {
+ status = "okay";
num-slots = <1>;
supports-highspeed;
- fifo-depth = <0x80>;
card-detect-delay = <200>;
samsung,dw-mshc-ciu-div = <3>;
samsung,dw-mshc-sdr-timing = <2 3>;
@@ -180,10 +176,6 @@
};
};
- dwmmc3@12230000 {
- status = "disabled";
- };
-
spi_0: spi@12d20000 {
status = "disabled";
};
@@ -231,14 +223,6 @@
status = "okay";
};
- i2s1: i2s@12D60000 {
- status = "disabled";
- };
-
- i2s2: i2s@12D70000 {
- status = "disabled";
- };
-
sound {
compatible = "samsung,smdk-wm8994";
diff --git a/arch/arm/boot/dts/exynos5250-snow.dts b/arch/arm/boot/dts/exynos5250-snow.dts
index fd711e245e8d..a9395c426db4 100644
--- a/arch/arm/boot/dts/exynos5250-snow.dts
+++ b/arch/arm/boot/dts/exynos5250-snow.dts
@@ -175,7 +175,7 @@
* On Snow we've got SIP WiFi and so can keep drive strengths low to
* reduce EMI.
*/
- dwmmc3@12230000 {
+ mmc@12230000 {
slot@0 {
pinctrl-names = "default";
pinctrl-0 = <&sd3_clk &sd3_cmd &sd3_bus4>;
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index bbac42a78ce5..b98ffc3a5fe2 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -33,10 +33,10 @@
gsc1 = &gsc_1;
gsc2 = &gsc_2;
gsc3 = &gsc_3;
- mshc0 = &dwmmc_0;
- mshc1 = &dwmmc_1;
- mshc2 = &dwmmc_2;
- mshc3 = &dwmmc_3;
+ mshc0 = &mmc_0;
+ mshc1 = &mmc_1;
+ mshc2 = &mmc_2;
+ mshc3 = &mmc_3;
i2c0 = &i2c_0;
i2c1 = &i2c_1;
i2c2 = &i2c_2;
@@ -392,25 +392,43 @@
pinctrl-0 = <&spi2_bus>;
};
- dwmmc_0: dwmmc0@12200000 {
+ mmc_0: mmc@12200000 {
+ compatible = "samsung,exynos5250-dw-mshc";
+ interrupts = <0 75 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <0x12200000 0x1000>;
clocks = <&clock 280>, <&clock 139>;
clock-names = "biu", "ciu";
+ fifo-depth = <0x80>;
+ status = "disabled";
};
- dwmmc_1: dwmmc1@12210000 {
+ mmc_1: mmc@12210000 {
+ compatible = "samsung,exynos5250-dw-mshc";
+ interrupts = <0 76 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <0x12210000 0x1000>;
clocks = <&clock 281>, <&clock 140>;
clock-names = "biu", "ciu";
+ fifo-depth = <0x80>;
+ status = "disabled";
};
- dwmmc_2: dwmmc2@12220000 {
+ mmc_2: mmc@12220000 {
+ compatible = "samsung,exynos5250-dw-mshc";
+ interrupts = <0 77 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <0x12220000 0x1000>;
clocks = <&clock 282>, <&clock 141>;
clock-names = "biu", "ciu";
+ fifo-depth = <0x80>;
+ status = "disabled";
};
- dwmmc_3: dwmmc3@12230000 {
+ mmc_3: mmc@12230000 {
compatible = "samsung,exynos5250-dw-mshc";
reg = <0x12230000 0x1000>;
interrupts = <0 78 0>;
@@ -418,10 +436,13 @@
#size-cells = <0>;
clocks = <&clock 283>, <&clock 142>;
clock-names = "biu", "ciu";
+ fifo-depth = <0x80>;
+ status = "disabled";
};
i2s0: i2s@03830000 {
compatible = "samsung,s5pv210-i2s";
+ status = "disabled";
reg = <0x03830000 0x100>;
dmas = <&pdma0 10
&pdma0 9
@@ -438,6 +459,7 @@
i2s1: i2s@12D60000 {
compatible = "samsung,s3c6410-i2s";
+ status = "disabled";
reg = <0x12D60000 0x100>;
dmas = <&pdma1 12
&pdma1 11>;
@@ -450,6 +472,7 @@
i2s2: i2s@12D70000 {
compatible = "samsung,s3c6410-i2s";
+ status = "disabled";
reg = <0x12D70000 0x100>;
dmas = <&pdma0 12
&pdma0 11>;
@@ -615,16 +638,18 @@
compatible = "samsung,exynos4212-hdmi";
reg = <0x14530000 0x70000>;
interrupts = <0 95 0>;
- clocks = <&clock 333>, <&clock 136>, <&clock 137>,
- <&clock 333>, <&clock 333>;
+ clocks = <&clock 344>, <&clock 136>, <&clock 137>,
+ <&clock 159>, <&clock 1024>;
clock-names = "hdmi", "sclk_hdmi", "sclk_pixel",
- "sclk_hdmiphy", "hdmiphy";
+ "sclk_hdmiphy", "mout_hdmi";
};
mixer {
compatible = "samsung,exynos5250-mixer";
reg = <0x14450000 0x10000>;
interrupts = <0 94 0>;
+ clocks = <&clock 343>, <&clock 136>;
+ clock-names = "mixer", "sclk_hdmi";
};
dp_phy: video-phy@10040720 {
diff --git a/arch/arm/boot/dts/exynos5420-smdk5420.dts b/arch/arm/boot/dts/exynos5420-smdk5420.dts
index bafba25ba7c2..fb5a1e25c632 100644
--- a/arch/arm/boot/dts/exynos5420-smdk5420.dts
+++ b/arch/arm/boot/dts/exynos5420-smdk5420.dts
@@ -31,6 +31,39 @@
};
};
+ mmc@12200000 {
+ status = "okay";
+ broken-cd;
+ supports-highspeed;
+ card-detect-delay = <200>;
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-sdr-timing = <0 4>;
+ samsung,dw-mshc-ddr-timing = <0 2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus4 &sd0_bus8>;
+
+ slot@0 {
+ reg = <0>;
+ bus-width = <8>;
+ };
+ };
+
+ mmc@12220000 {
+ status = "okay";
+ supports-highspeed;
+ card-detect-delay = <200>;
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-sdr-timing = <2 3>;
+ samsung,dw-mshc-ddr-timing = <1 2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_cd &sd2_bus4>;
+
+ slot@0 {
+ reg = <0>;
+ bus-width = <4>;
+ };
+ };
+
dp-controller@145B0000 {
pinctrl-names = "default";
pinctrl-0 = <&dp_hpd>;
@@ -61,4 +94,30 @@
};
};
+ pinctrl@13400000 {
+ hdmi_hpd_irq: hdmi-hpd-irq {
+ samsung,pins = "gpx3-7";
+ samsung,pin-function = <0>;
+ samsung,pin-pud = <1>;
+ samsung,pin-drv = <0>;
+ };
+ };
+
+ hdmi@14530000 {
+ status = "okay";
+ hpd-gpio = <&gpx3 7 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmi_hpd_irq>;
+ };
+
+ i2c_2: i2c@12C80000 {
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <66000>;
+ status = "okay";
+
+ hdmiddc@50 {
+ compatible = "samsung,exynos4210-hdmiddc";
+ reg = <0x50>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index d537cd704e19..6ffefd163fa0 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -22,11 +22,20 @@
compatible = "samsung,exynos5420";
aliases {
+ mshc0 = &mmc_0;
+ mshc1 = &mmc_1;
+ mshc2 = &mmc_2;
pinctrl0 = &pinctrl_0;
pinctrl1 = &pinctrl_1;
pinctrl2 = &pinctrl_2;
pinctrl3 = &pinctrl_3;
pinctrl4 = &pinctrl_4;
+ i2c0 = &i2c_0;
+ i2c1 = &i2c_1;
+ i2c2 = &i2c_2;
+ i2c3 = &i2c_3;
+ gsc0 = &gsc_0;
+ gsc1 = &gsc_1;
};
cpus {
@@ -84,6 +93,42 @@
clock-names = "mfc";
};
+ mmc_0: mmc@12200000 {
+ compatible = "samsung,exynos5420-dw-mshc-smu";
+ interrupts = <0 75 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x12200000 0x2000>;
+ clocks = <&clock 351>, <&clock 132>;
+ clock-names = "biu", "ciu";
+ fifo-depth = <0x40>;
+ status = "disabled";
+ };
+
+ mmc_1: mmc@12210000 {
+ compatible = "samsung,exynos5420-dw-mshc-smu";
+ interrupts = <0 76 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x12210000 0x2000>;
+ clocks = <&clock 352>, <&clock 133>;
+ clock-names = "biu", "ciu";
+ fifo-depth = <0x40>;
+ status = "disabled";
+ };
+
+ mmc_2: mmc@12220000 {
+ compatible = "samsung,exynos5420-dw-mshc";
+ interrupts = <0 77 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x12220000 0x1000>;
+ clocks = <&clock 353>, <&clock 134>;
+ clock-names = "biu", "ciu";
+ fifo-depth = <0x40>;
+ status = "disabled";
+ };
+
mct@101C0000 {
compatible = "samsung,exynos4210-mct";
reg = <0x101C0000 0x800>;
@@ -235,4 +280,93 @@
io-channel-ranges;
status = "disabled";
};
+
+ i2c_0: i2c@12C60000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x12C60000 0x100>;
+ interrupts = <0 56 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clock 261>;
+ clock-names = "i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_bus>;
+ status = "disabled";
+ };
+
+ i2c_1: i2c@12C70000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x12C70000 0x100>;
+ interrupts = <0 57 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clock 262>;
+ clock-names = "i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_bus>;
+ status = "disabled";
+ };
+
+ i2c_2: i2c@12C80000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x12C80000 0x100>;
+ interrupts = <0 58 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clock 263>;
+ clock-names = "i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_bus>;
+ status = "disabled";
+ };
+
+ i2c_3: i2c@12C90000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x12C90000 0x100>;
+ interrupts = <0 59 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clock 264>;
+ clock-names = "i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_bus>;
+ status = "disabled";
+ };
+
+ hdmi@14530000 {
+ compatible = "samsung,exynos4212-hdmi";
+ reg = <0x14530000 0x70000>;
+ interrupts = <0 95 0>;
+ clocks = <&clock 413>, <&clock 143>, <&clock 768>,
+ <&clock 158>, <&clock 640>;
+ clock-names = "hdmi", "sclk_hdmi", "sclk_pixel",
+ "sclk_hdmiphy", "mout_hdmi";
+ status = "disabled";
+ };
+
+ mixer@14450000 {
+ compatible = "samsung,exynos5420-mixer";
+ reg = <0x14450000 0x10000>;
+ interrupts = <0 94 0>;
+ clocks = <&clock 431>, <&clock 143>;
+ clock-names = "mixer", "sclk_hdmi";
+ };
+
+ gsc_0: video-scaler@13e00000 {
+ compatible = "samsung,exynos5-gsc";
+ reg = <0x13e00000 0x1000>;
+ interrupts = <0 85 0>;
+ clocks = <&clock 465>;
+ clock-names = "gscl";
+ samsung,power-domain = <&gsc_pd>;
+ };
+
+ gsc_1: video-scaler@13e10000 {
+ compatible = "samsung,exynos5-gsc";
+ reg = <0x13e10000 0x1000>;
+ interrupts = <0 86 0>;
+ clocks = <&clock 466>;
+ clock-names = "gscl";
+ samsung,power-domain = <&gsc_pd>;
+ };
};
diff --git a/arch/arm/boot/dts/exynos5440-sd5v1.dts b/arch/arm/boot/dts/exynos5440-sd5v1.dts
index 5b22508050da..f7c3d9ea4f9b 100644
--- a/arch/arm/boot/dts/exynos5440-sd5v1.dts
+++ b/arch/arm/boot/dts/exynos5440-sd5v1.dts
@@ -17,7 +17,7 @@
compatible = "samsung,sd5v1", "samsung,exynos5440";
chosen {
- bootargs = "root=/dev/sda2 rw rootwait ignore_loglevel early_printk no_console_suspend mem=2048M@0x80000000 mem=6144M@0x100000000 console=ttySAC0,115200";
+ bootargs = "root=/dev/sda2 rw rootwait ignore_loglevel earlyprintk no_console_suspend mem=2048M@0x80000000 mem=6144M@0x100000000 console=ttySAC0,115200";
};
fixed-rate-clocks {
@@ -36,4 +36,11 @@
status = "disabled";
};
+ pcie@290000 {
+ status = "disabled";
+ };
+
+ pcie@2a0000 {
+ status = "disabled";
+ };
};
diff --git a/arch/arm/boot/dts/exynos5440-ssdk5440.dts b/arch/arm/boot/dts/exynos5440-ssdk5440.dts
index ede772741f81..d58cb787061a 100644
--- a/arch/arm/boot/dts/exynos5440-ssdk5440.dts
+++ b/arch/arm/boot/dts/exynos5440-ssdk5440.dts
@@ -17,7 +17,7 @@
compatible = "samsung,ssdk5440", "samsung,exynos5440";
chosen {
- bootargs = "root=/dev/sda2 rw rootwait ignore_loglevel early_printk no_console_suspend mem=2048M@0x80000000 mem=6144M@0x100000000 console=ttySAC0,115200";
+ bootargs = "root=/dev/sda2 rw rootwait ignore_loglevel earlyprintk no_console_suspend mem=2048M@0x80000000 mem=6144M@0x100000000 console=ttySAC0,115200";
};
spi_0: spi@D0000 {
@@ -68,9 +68,11 @@
pcie@290000 {
reset-gpio = <&pin_ctrl 5 0>;
+ status = "okay";
};
pcie@2a0000 {
reset-gpio = <&pin_ctrl 22 0>;
+ status = "okay";
};
};
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi
index 5d6cf4965d6e..8da107088ce4 100644
--- a/arch/arm/boot/dts/exynos5440.dtsi
+++ b/arch/arm/boot/dts/exynos5440.dtsi
@@ -276,6 +276,7 @@
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0x0 0 &gic 53>;
num-lanes = <4>;
+ status = "disabled";
};
pcie@2a0000 {
@@ -296,5 +297,6 @@
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0x0 0 &gic 56>;
num-lanes = <4>;
+ status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/highbank.dts b/arch/arm/boot/dts/highbank.dts
index 6aad34ad9517..ed14aeac0566 100644
--- a/arch/arm/boot/dts/highbank.dts
+++ b/arch/arm/boot/dts/highbank.dts
@@ -86,6 +86,12 @@
soc {
ranges = <0x00000000 0x00000000 0xffffffff>;
+ memory-controller@fff00000 {
+ compatible = "calxeda,hb-ddr-ctrl";
+ reg = <0xfff00000 0x1000>;
+ interrupts = <0 91 4>;
+ };
+
timer@fff10600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0xfff10600 0x20>;
diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts
index 185c7c01102a..1f026adefd45 100644
--- a/arch/arm/boot/dts/imx23-evk.dts
+++ b/arch/arm/boot/dts/imx23-evk.dts
@@ -10,7 +10,7 @@
*/
/dts-v1/;
-/include/ "imx23.dtsi"
+#include "imx23.dtsi"
/ {
model = "Freescale i.MX23 Evaluation Kit";
@@ -45,14 +45,14 @@
hog_pins_a: hog@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x1123 /* MX23_PAD_LCD_RESET__GPIO_1_18 */
- 0x11d3 /* MX23_PAD_PWM3__GPIO_1_29 */
- 0x11e3 /* MX23_PAD_PWM4__GPIO_1_30 */
- 0x2010 /* MX23_PAD_SSP1_DETECT__SSP1_DETECT */
+ MX23_PAD_LCD_RESET__GPIO_1_18
+ MX23_PAD_PWM3__GPIO_1_29
+ MX23_PAD_PWM4__GPIO_1_30
+ MX23_PAD_SSP1_DETECT__SSP1_DETECT
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
};
diff --git a/arch/arm/boot/dts/imx23-olinuxino.dts b/arch/arm/boot/dts/imx23-olinuxino.dts
index fc766ae12e24..526bfdbd87f9 100644
--- a/arch/arm/boot/dts/imx23-olinuxino.dts
+++ b/arch/arm/boot/dts/imx23-olinuxino.dts
@@ -12,7 +12,7 @@
*/
/dts-v1/;
-/include/ "imx23.dtsi"
+#include "imx23.dtsi"
/ {
model = "i.MX23 Olinuxino Low Cost Board";
@@ -40,21 +40,21 @@
hog_pins_a: hog@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x0113 /* MX23_PAD_GPMI_ALE__GPIO_0_17 */
+ MX23_PAD_GPMI_ALE__GPIO_0_17
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
led_pin_gpio2_1: led_gpio2_1@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x2013 /* MX23_PAD_SSP1_DETECT__GPIO_2_1 */
+ MX23_PAD_SSP1_DETECT__GPIO_2_1
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
};
diff --git a/arch/arm/boot/dts/imx23-pinfunc.h b/arch/arm/boot/dts/imx23-pinfunc.h
new file mode 100644
index 000000000000..5c0f32ca3a93
--- /dev/null
+++ b/arch/arm/boot/dts/imx23-pinfunc.h
@@ -0,0 +1,333 @@
+/*
+ * Header providing constants for i.MX23 pinctrl bindings.
+ *
+ * Copyright (C) 2013 Lothar Waßmann <LW@KARO-electronics.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#ifndef __DT_BINDINGS_MX23_PINCTRL_H__
+#define __DT_BINDINGS_MX23_PINCTRL_H__
+
+#include "mxs-pinfunc.h"
+
+#define MX23_PAD_GPMI_D00__GPMI_D00 0x0000
+#define MX23_PAD_GPMI_D01__GPMI_D01 0x0010
+#define MX23_PAD_GPMI_D02__GPMI_D02 0x0020
+#define MX23_PAD_GPMI_D03__GPMI_D03 0x0030
+#define MX23_PAD_GPMI_D04__GPMI_D04 0x0040
+#define MX23_PAD_GPMI_D05__GPMI_D05 0x0050
+#define MX23_PAD_GPMI_D06__GPMI_D06 0x0060
+#define MX23_PAD_GPMI_D07__GPMI_D07 0x0070
+#define MX23_PAD_GPMI_D08__GPMI_D08 0x0080
+#define MX23_PAD_GPMI_D09__GPMI_D09 0x0090
+#define MX23_PAD_GPMI_D10__GPMI_D10 0x00a0
+#define MX23_PAD_GPMI_D11__GPMI_D11 0x00b0
+#define MX23_PAD_GPMI_D12__GPMI_D12 0x00c0
+#define MX23_PAD_GPMI_D13__GPMI_D13 0x00d0
+#define MX23_PAD_GPMI_D14__GPMI_D14 0x00e0
+#define MX23_PAD_GPMI_D15__GPMI_D15 0x00f0
+#define MX23_PAD_GPMI_CLE__GPMI_CLE 0x0100
+#define MX23_PAD_GPMI_ALE__GPMI_ALE 0x0110
+#define MX23_PAD_GPMI_CE2N__GPMI_CE2N 0x0120
+#define MX23_PAD_GPMI_RDY0__GPMI_RDY0 0x0130
+#define MX23_PAD_GPMI_RDY1__GPMI_RDY1 0x0140
+#define MX23_PAD_GPMI_RDY2__GPMI_RDY2 0x0150
+#define MX23_PAD_GPMI_RDY3__GPMI_RDY3 0x0160
+#define MX23_PAD_GPMI_WPN__GPMI_WPN 0x0170
+#define MX23_PAD_GPMI_WRN__GPMI_WRN 0x0180
+#define MX23_PAD_GPMI_RDN__GPMI_RDN 0x0190
+#define MX23_PAD_AUART1_CTS__AUART1_CTS 0x01a0
+#define MX23_PAD_AUART1_RTS__AUART1_RTS 0x01b0
+#define MX23_PAD_AUART1_RX__AUART1_RX 0x01c0
+#define MX23_PAD_AUART1_TX__AUART1_TX 0x01d0
+#define MX23_PAD_I2C_SCL__I2C_SCL 0x01e0
+#define MX23_PAD_I2C_SDA__I2C_SDA 0x01f0
+#define MX23_PAD_LCD_D00__LCD_D00 0x1000
+#define MX23_PAD_LCD_D01__LCD_D01 0x1010
+#define MX23_PAD_LCD_D02__LCD_D02 0x1020
+#define MX23_PAD_LCD_D03__LCD_D03 0x1030
+#define MX23_PAD_LCD_D04__LCD_D04 0x1040
+#define MX23_PAD_LCD_D05__LCD_D05 0x1050
+#define MX23_PAD_LCD_D06__LCD_D06 0x1060
+#define MX23_PAD_LCD_D07__LCD_D07 0x1070
+#define MX23_PAD_LCD_D08__LCD_D08 0x1080
+#define MX23_PAD_LCD_D09__LCD_D09 0x1090
+#define MX23_PAD_LCD_D10__LCD_D10 0x10a0
+#define MX23_PAD_LCD_D11__LCD_D11 0x10b0
+#define MX23_PAD_LCD_D12__LCD_D12 0x10c0
+#define MX23_PAD_LCD_D13__LCD_D13 0x10d0
+#define MX23_PAD_LCD_D14__LCD_D14 0x10e0
+#define MX23_PAD_LCD_D15__LCD_D15 0x10f0
+#define MX23_PAD_LCD_D16__LCD_D16 0x1100
+#define MX23_PAD_LCD_D17__LCD_D17 0x1110
+#define MX23_PAD_LCD_RESET__LCD_RESET 0x1120
+#define MX23_PAD_LCD_RS__LCD_RS 0x1130
+#define MX23_PAD_LCD_WR__LCD_WR 0x1140
+#define MX23_PAD_LCD_CS__LCD_CS 0x1150
+#define MX23_PAD_LCD_DOTCK__LCD_DOTCK 0x1160
+#define MX23_PAD_LCD_ENABLE__LCD_ENABLE 0x1170
+#define MX23_PAD_LCD_HSYNC__LCD_HSYNC 0x1180
+#define MX23_PAD_LCD_VSYNC__LCD_VSYNC 0x1190
+#define MX23_PAD_PWM0__PWM0 0x11a0
+#define MX23_PAD_PWM1__PWM1 0x11b0
+#define MX23_PAD_PWM2__PWM2 0x11c0
+#define MX23_PAD_PWM3__PWM3 0x11d0
+#define MX23_PAD_PWM4__PWM4 0x11e0
+#define MX23_PAD_SSP1_CMD__SSP1_CMD 0x2000
+#define MX23_PAD_SSP1_DETECT__SSP1_DETECT 0x2010
+#define MX23_PAD_SSP1_DATA0__SSP1_DATA0 0x2020
+#define MX23_PAD_SSP1_DATA1__SSP1_DATA1 0x2030
+#define MX23_PAD_SSP1_DATA2__SSP1_DATA2 0x2040
+#define MX23_PAD_SSP1_DATA3__SSP1_DATA3 0x2050
+#define MX23_PAD_SSP1_SCK__SSP1_SCK 0x2060
+#define MX23_PAD_ROTARYA__ROTARYA 0x2070
+#define MX23_PAD_ROTARYB__ROTARYB 0x2080
+#define MX23_PAD_EMI_A00__EMI_A00 0x2090
+#define MX23_PAD_EMI_A01__EMI_A01 0x20a0
+#define MX23_PAD_EMI_A02__EMI_A02 0x20b0
+#define MX23_PAD_EMI_A03__EMI_A03 0x20c0
+#define MX23_PAD_EMI_A04__EMI_A04 0x20d0
+#define MX23_PAD_EMI_A05__EMI_A05 0x20e0
+#define MX23_PAD_EMI_A06__EMI_A06 0x20f0
+#define MX23_PAD_EMI_A07__EMI_A07 0x2100
+#define MX23_PAD_EMI_A08__EMI_A08 0x2110
+#define MX23_PAD_EMI_A09__EMI_A09 0x2120
+#define MX23_PAD_EMI_A10__EMI_A10 0x2130
+#define MX23_PAD_EMI_A11__EMI_A11 0x2140
+#define MX23_PAD_EMI_A12__EMI_A12 0x2150
+#define MX23_PAD_EMI_BA0__EMI_BA0 0x2160
+#define MX23_PAD_EMI_BA1__EMI_BA1 0x2170
+#define MX23_PAD_EMI_CASN__EMI_CASN 0x2180
+#define MX23_PAD_EMI_CE0N__EMI_CE0N 0x2190
+#define MX23_PAD_EMI_CE1N__EMI_CE1N 0x21a0
+#define MX23_PAD_GPMI_CE1N__GPMI_CE1N 0x21b0
+#define MX23_PAD_GPMI_CE0N__GPMI_CE0N 0x21c0
+#define MX23_PAD_EMI_CKE__EMI_CKE 0x21d0
+#define MX23_PAD_EMI_RASN__EMI_RASN 0x21e0
+#define MX23_PAD_EMI_WEN__EMI_WEN 0x21f0
+#define MX23_PAD_EMI_D00__EMI_D00 0x3000
+#define MX23_PAD_EMI_D01__EMI_D01 0x3010
+#define MX23_PAD_EMI_D02__EMI_D02 0x3020
+#define MX23_PAD_EMI_D03__EMI_D03 0x3030
+#define MX23_PAD_EMI_D04__EMI_D04 0x3040
+#define MX23_PAD_EMI_D05__EMI_D05 0x3050
+#define MX23_PAD_EMI_D06__EMI_D06 0x3060
+#define MX23_PAD_EMI_D07__EMI_D07 0x3070
+#define MX23_PAD_EMI_D08__EMI_D08 0x3080
+#define MX23_PAD_EMI_D09__EMI_D09 0x3090
+#define MX23_PAD_EMI_D10__EMI_D10 0x30a0
+#define MX23_PAD_EMI_D11__EMI_D11 0x30b0
+#define MX23_PAD_EMI_D12__EMI_D12 0x30c0
+#define MX23_PAD_EMI_D13__EMI_D13 0x30d0
+#define MX23_PAD_EMI_D14__EMI_D14 0x30e0
+#define MX23_PAD_EMI_D15__EMI_D15 0x30f0
+#define MX23_PAD_EMI_DQM0__EMI_DQM0 0x3100
+#define MX23_PAD_EMI_DQM1__EMI_DQM1 0x3110
+#define MX23_PAD_EMI_DQS0__EMI_DQS0 0x3120
+#define MX23_PAD_EMI_DQS1__EMI_DQS1 0x3130
+#define MX23_PAD_EMI_CLK__EMI_CLK 0x3140
+#define MX23_PAD_EMI_CLKN__EMI_CLKN 0x3150
+#define MX23_PAD_GPMI_D00__LCD_D8 0x0001
+#define MX23_PAD_GPMI_D01__LCD_D9 0x0011
+#define MX23_PAD_GPMI_D02__LCD_D10 0x0021
+#define MX23_PAD_GPMI_D03__LCD_D11 0x0031
+#define MX23_PAD_GPMI_D04__LCD_D12 0x0041
+#define MX23_PAD_GPMI_D05__LCD_D13 0x0051
+#define MX23_PAD_GPMI_D06__LCD_D14 0x0061
+#define MX23_PAD_GPMI_D07__LCD_D15 0x0071
+#define MX23_PAD_GPMI_D08__LCD_D18 0x0081
+#define MX23_PAD_GPMI_D09__LCD_D19 0x0091
+#define MX23_PAD_GPMI_D10__LCD_D20 0x00a1
+#define MX23_PAD_GPMI_D11__LCD_D21 0x00b1
+#define MX23_PAD_GPMI_D12__LCD_D22 0x00c1
+#define MX23_PAD_GPMI_D13__LCD_D23 0x00d1
+#define MX23_PAD_GPMI_D14__AUART2_RX 0x00e1
+#define MX23_PAD_GPMI_D15__AUART2_TX 0x00f1
+#define MX23_PAD_GPMI_CLE__LCD_D16 0x0101
+#define MX23_PAD_GPMI_ALE__LCD_D17 0x0111
+#define MX23_PAD_GPMI_CE2N__ATA_A2 0x0121
+#define MX23_PAD_AUART1_RTS__IR_CLK 0x01b1
+#define MX23_PAD_AUART1_RX__IR_RX 0x01c1
+#define MX23_PAD_AUART1_TX__IR_TX 0x01d1
+#define MX23_PAD_I2C_SCL__GPMI_RDY2 0x01e1
+#define MX23_PAD_I2C_SDA__GPMI_CE2N 0x01f1
+#define MX23_PAD_LCD_D00__ETM_DA8 0x1001
+#define MX23_PAD_LCD_D01__ETM_DA9 0x1011
+#define MX23_PAD_LCD_D02__ETM_DA10 0x1021
+#define MX23_PAD_LCD_D03__ETM_DA11 0x1031
+#define MX23_PAD_LCD_D04__ETM_DA12 0x1041
+#define MX23_PAD_LCD_D05__ETM_DA13 0x1051
+#define MX23_PAD_LCD_D06__ETM_DA14 0x1061
+#define MX23_PAD_LCD_D07__ETM_DA15 0x1071
+#define MX23_PAD_LCD_D08__ETM_DA0 0x1081
+#define MX23_PAD_LCD_D09__ETM_DA1 0x1091
+#define MX23_PAD_LCD_D10__ETM_DA2 0x10a1
+#define MX23_PAD_LCD_D11__ETM_DA3 0x10b1
+#define MX23_PAD_LCD_D12__ETM_DA4 0x10c1
+#define MX23_PAD_LCD_D13__ETM_DA5 0x10d1
+#define MX23_PAD_LCD_D14__ETM_DA6 0x10e1
+#define MX23_PAD_LCD_D15__ETM_DA7 0x10f1
+#define MX23_PAD_LCD_RESET__ETM_TCTL 0x1121
+#define MX23_PAD_LCD_RS__ETM_TCLK 0x1131
+#define MX23_PAD_LCD_DOTCK__GPMI_RDY3 0x1161
+#define MX23_PAD_LCD_ENABLE__I2C_SCL 0x1171
+#define MX23_PAD_LCD_HSYNC__I2C_SDA 0x1181
+#define MX23_PAD_LCD_VSYNC__LCD_BUSY 0x1191
+#define MX23_PAD_PWM0__ROTARYA 0x11a1
+#define MX23_PAD_PWM1__ROTARYB 0x11b1
+#define MX23_PAD_PWM2__GPMI_RDY3 0x11c1
+#define MX23_PAD_PWM3__ETM_TCTL 0x11d1
+#define MX23_PAD_PWM4__ETM_TCLK 0x11e1
+#define MX23_PAD_SSP1_DETECT__GPMI_CE3N 0x2011
+#define MX23_PAD_SSP1_DATA1__I2C_SCL 0x2031
+#define MX23_PAD_SSP1_DATA2__I2C_SDA 0x2041
+#define MX23_PAD_ROTARYA__AUART2_RTS 0x2071
+#define MX23_PAD_ROTARYB__AUART2_CTS 0x2081
+#define MX23_PAD_GPMI_D00__SSP2_DATA0 0x0002
+#define MX23_PAD_GPMI_D01__SSP2_DATA1 0x0012
+#define MX23_PAD_GPMI_D02__SSP2_DATA2 0x0022
+#define MX23_PAD_GPMI_D03__SSP2_DATA3 0x0032
+#define MX23_PAD_GPMI_D04__SSP2_DATA4 0x0042
+#define MX23_PAD_GPMI_D05__SSP2_DATA5 0x0052
+#define MX23_PAD_GPMI_D06__SSP2_DATA6 0x0062
+#define MX23_PAD_GPMI_D07__SSP2_DATA7 0x0072
+#define MX23_PAD_GPMI_D08__SSP1_DATA4 0x0082
+#define MX23_PAD_GPMI_D09__SSP1_DATA5 0x0092
+#define MX23_PAD_GPMI_D10__SSP1_DATA6 0x00a2
+#define MX23_PAD_GPMI_D11__SSP1_DATA7 0x00b2
+#define MX23_PAD_GPMI_D15__GPMI_CE3N 0x00f2
+#define MX23_PAD_GPMI_RDY0__SSP2_DETECT 0x0132
+#define MX23_PAD_GPMI_RDY1__SSP2_CMD 0x0142
+#define MX23_PAD_GPMI_WRN__SSP2_SCK 0x0182
+#define MX23_PAD_AUART1_CTS__SSP1_DATA4 0x01a2
+#define MX23_PAD_AUART1_RTS__SSP1_DATA5 0x01b2
+#define MX23_PAD_AUART1_RX__SSP1_DATA6 0x01c2
+#define MX23_PAD_AUART1_TX__SSP1_DATA7 0x01d2
+#define MX23_PAD_I2C_SCL__AUART1_TX 0x01e2
+#define MX23_PAD_I2C_SDA__AUART1_RX 0x01f2
+#define MX23_PAD_LCD_D08__SAIF2_SDATA0 0x1082
+#define MX23_PAD_LCD_D09__SAIF1_SDATA0 0x1092
+#define MX23_PAD_LCD_D10__SAIF_MCLK_BITCLK 0x10a2
+#define MX23_PAD_LCD_D11__SAIF_LRCLK 0x10b2
+#define MX23_PAD_LCD_D12__SAIF2_SDATA1 0x10c2
+#define MX23_PAD_LCD_D13__SAIF2_SDATA2 0x10d2
+#define MX23_PAD_LCD_D14__SAIF1_SDATA2 0x10e2
+#define MX23_PAD_LCD_D15__SAIF1_SDATA1 0x10f2
+#define MX23_PAD_LCD_D16__SAIF_ALT_BITCLK 0x1102
+#define MX23_PAD_LCD_RESET__GPMI_CE3N 0x1122
+#define MX23_PAD_PWM0__DUART_RX 0x11a2
+#define MX23_PAD_PWM1__DUART_TX 0x11b2
+#define MX23_PAD_PWM3__AUART1_CTS 0x11d2
+#define MX23_PAD_PWM4__AUART1_RTS 0x11e2
+#define MX23_PAD_SSP1_CMD__JTAG_TDO 0x2002
+#define MX23_PAD_SSP1_DETECT__USB_OTG_ID 0x2012
+#define MX23_PAD_SSP1_DATA0__JTAG_TDI 0x2022
+#define MX23_PAD_SSP1_DATA1__JTAG_TCLK 0x2032
+#define MX23_PAD_SSP1_DATA2__JTAG_RTCK 0x2042
+#define MX23_PAD_SSP1_DATA3__JTAG_TMS 0x2052
+#define MX23_PAD_SSP1_SCK__JTAG_TRST 0x2062
+#define MX23_PAD_ROTARYA__SPDIF 0x2072
+#define MX23_PAD_ROTARYB__GPMI_CE3N 0x2082
+#define MX23_PAD_GPMI_D00__GPIO_0_0 0x0003
+#define MX23_PAD_GPMI_D01__GPIO_0_1 0x0013
+#define MX23_PAD_GPMI_D02__GPIO_0_2 0x0023
+#define MX23_PAD_GPMI_D03__GPIO_0_3 0x0033
+#define MX23_PAD_GPMI_D04__GPIO_0_4 0x0043
+#define MX23_PAD_GPMI_D05__GPIO_0_5 0x0053
+#define MX23_PAD_GPMI_D06__GPIO_0_6 0x0063
+#define MX23_PAD_GPMI_D07__GPIO_0_7 0x0073
+#define MX23_PAD_GPMI_D08__GPIO_0_8 0x0083
+#define MX23_PAD_GPMI_D09__GPIO_0_9 0x0093
+#define MX23_PAD_GPMI_D10__GPIO_0_10 0x00a3
+#define MX23_PAD_GPMI_D11__GPIO_0_11 0x00b3
+#define MX23_PAD_GPMI_D12__GPIO_0_12 0x00c3
+#define MX23_PAD_GPMI_D13__GPIO_0_13 0x00d3
+#define MX23_PAD_GPMI_D14__GPIO_0_14 0x00e3
+#define MX23_PAD_GPMI_D15__GPIO_0_15 0x00f3
+#define MX23_PAD_GPMI_CLE__GPIO_0_16 0x0103
+#define MX23_PAD_GPMI_ALE__GPIO_0_17 0x0113
+#define MX23_PAD_GPMI_CE2N__GPIO_0_18 0x0123
+#define MX23_PAD_GPMI_RDY0__GPIO_0_19 0x0133
+#define MX23_PAD_GPMI_RDY1__GPIO_0_20 0x0143
+#define MX23_PAD_GPMI_RDY2__GPIO_0_21 0x0153
+#define MX23_PAD_GPMI_RDY3__GPIO_0_22 0x0163
+#define MX23_PAD_GPMI_WPN__GPIO_0_23 0x0173
+#define MX23_PAD_GPMI_WRN__GPIO_0_24 0x0183
+#define MX23_PAD_GPMI_RDN__GPIO_0_25 0x0193
+#define MX23_PAD_AUART1_CTS__GPIO_0_26 0x01a3
+#define MX23_PAD_AUART1_RTS__GPIO_0_27 0x01b3
+#define MX23_PAD_AUART1_RX__GPIO_0_28 0x01c3
+#define MX23_PAD_AUART1_TX__GPIO_0_29 0x01d3
+#define MX23_PAD_I2C_SCL__GPIO_0_30 0x01e3
+#define MX23_PAD_I2C_SDA__GPIO_0_31 0x01f3
+#define MX23_PAD_LCD_D00__GPIO_1_0 0x1003
+#define MX23_PAD_LCD_D01__GPIO_1_1 0x1013
+#define MX23_PAD_LCD_D02__GPIO_1_2 0x1023
+#define MX23_PAD_LCD_D03__GPIO_1_3 0x1033
+#define MX23_PAD_LCD_D04__GPIO_1_4 0x1043
+#define MX23_PAD_LCD_D05__GPIO_1_5 0x1053
+#define MX23_PAD_LCD_D06__GPIO_1_6 0x1063
+#define MX23_PAD_LCD_D07__GPIO_1_7 0x1073
+#define MX23_PAD_LCD_D08__GPIO_1_8 0x1083
+#define MX23_PAD_LCD_D09__GPIO_1_9 0x1093
+#define MX23_PAD_LCD_D10__GPIO_1_10 0x10a3
+#define MX23_PAD_LCD_D11__GPIO_1_11 0x10b3
+#define MX23_PAD_LCD_D12__GPIO_1_12 0x10c3
+#define MX23_PAD_LCD_D13__GPIO_1_13 0x10d3
+#define MX23_PAD_LCD_D14__GPIO_1_14 0x10e3
+#define MX23_PAD_LCD_D15__GPIO_1_15 0x10f3
+#define MX23_PAD_LCD_D16__GPIO_1_16 0x1103
+#define MX23_PAD_LCD_D17__GPIO_1_17 0x1113
+#define MX23_PAD_LCD_RESET__GPIO_1_18 0x1123
+#define MX23_PAD_LCD_RS__GPIO_1_19 0x1133
+#define MX23_PAD_LCD_WR__GPIO_1_20 0x1143
+#define MX23_PAD_LCD_CS__GPIO_1_21 0x1153
+#define MX23_PAD_LCD_DOTCK__GPIO_1_22 0x1163
+#define MX23_PAD_LCD_ENABLE__GPIO_1_23 0x1173
+#define MX23_PAD_LCD_HSYNC__GPIO_1_24 0x1183
+#define MX23_PAD_LCD_VSYNC__GPIO_1_25 0x1193
+#define MX23_PAD_PWM0__GPIO_1_26 0x11a3
+#define MX23_PAD_PWM1__GPIO_1_27 0x11b3
+#define MX23_PAD_PWM2__GPIO_1_28 0x11c3
+#define MX23_PAD_PWM3__GPIO_1_29 0x11d3
+#define MX23_PAD_PWM4__GPIO_1_30 0x11e3
+#define MX23_PAD_SSP1_CMD__GPIO_2_0 0x2003
+#define MX23_PAD_SSP1_DETECT__GPIO_2_1 0x2013
+#define MX23_PAD_SSP1_DATA0__GPIO_2_2 0x2023
+#define MX23_PAD_SSP1_DATA1__GPIO_2_3 0x2033
+#define MX23_PAD_SSP1_DATA2__GPIO_2_4 0x2043
+#define MX23_PAD_SSP1_DATA3__GPIO_2_5 0x2053
+#define MX23_PAD_SSP1_SCK__GPIO_2_6 0x2063
+#define MX23_PAD_ROTARYA__GPIO_2_7 0x2073
+#define MX23_PAD_ROTARYB__GPIO_2_8 0x2083
+#define MX23_PAD_EMI_A00__GPIO_2_9 0x2093
+#define MX23_PAD_EMI_A01__GPIO_2_10 0x20a3
+#define MX23_PAD_EMI_A02__GPIO_2_11 0x20b3
+#define MX23_PAD_EMI_A03__GPIO_2_12 0x20c3
+#define MX23_PAD_EMI_A04__GPIO_2_13 0x20d3
+#define MX23_PAD_EMI_A05__GPIO_2_14 0x20e3
+#define MX23_PAD_EMI_A06__GPIO_2_15 0x20f3
+#define MX23_PAD_EMI_A07__GPIO_2_16 0x2103
+#define MX23_PAD_EMI_A08__GPIO_2_17 0x2113
+#define MX23_PAD_EMI_A09__GPIO_2_18 0x2123
+#define MX23_PAD_EMI_A10__GPIO_2_19 0x2133
+#define MX23_PAD_EMI_A11__GPIO_2_20 0x2143
+#define MX23_PAD_EMI_A12__GPIO_2_21 0x2153
+#define MX23_PAD_EMI_BA0__GPIO_2_22 0x2163
+#define MX23_PAD_EMI_BA1__GPIO_2_23 0x2173
+#define MX23_PAD_EMI_CASN__GPIO_2_24 0x2183
+#define MX23_PAD_EMI_CE0N__GPIO_2_25 0x2193
+#define MX23_PAD_EMI_CE1N__GPIO_2_26 0x21a3
+#define MX23_PAD_GPMI_CE1N__GPIO_2_27 0x21b3
+#define MX23_PAD_GPMI_CE0N__GPIO_2_28 0x21c3
+#define MX23_PAD_EMI_CKE__GPIO_2_29 0x21d3
+#define MX23_PAD_EMI_RASN__GPIO_2_30 0x21e3
+#define MX23_PAD_EMI_WEN__GPIO_2_31 0x21f3
+
+#endif /* __DT_BINDINGS_MX23_PINCTRL_H__ */
diff --git a/arch/arm/boot/dts/imx23-stmp378x_devb.dts b/arch/arm/boot/dts/imx23-stmp378x_devb.dts
index 85c3864b6a56..cb64e2b191ea 100644
--- a/arch/arm/boot/dts/imx23-stmp378x_devb.dts
+++ b/arch/arm/boot/dts/imx23-stmp378x_devb.dts
@@ -10,7 +10,7 @@
*/
/dts-v1/;
-/include/ "imx23.dtsi"
+#include "imx23.dtsi"
/ {
model = "Freescale STMP378x Development Board";
@@ -39,12 +39,12 @@
hog_pins_a: hog@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x11d3 /* MX23_PAD_PWM3__GPIO_1_29 */
- 0x11e3 /* MX23_PAD_PWM4__GPIO_1_30 */
+ MX23_PAD_PWM3__GPIO_1_29
+ MX23_PAD_PWM4__GPIO_1_30
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
};
};
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
index 28b5ce289662..87faa6e8b6e7 100644
--- a/arch/arm/boot/dts/imx23.dtsi
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -9,7 +9,8 @@
* http://www.gnu.org/copyleft/gpl.html
*/
-/include/ "skeleton.dtsi"
+#include "skeleton.dtsi"
+#include "imx23-pinfunc.h"
/ {
interrupt-parent = <&icoll>;
@@ -137,174 +138,174 @@
duart_pins_a: duart@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x11a2 /* MX23_PAD_PWM0__DUART_RX */
- 0x11b2 /* MX23_PAD_PWM1__DUART_TX */
+ MX23_PAD_PWM0__DUART_RX
+ MX23_PAD_PWM1__DUART_TX
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
auart0_pins_a: auart0@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x01c0 /* MX23_PAD_AUART1_RX__AUART1_RX */
- 0x01d0 /* MX23_PAD_AUART1_TX__AUART1_TX */
- 0x01a0 /* MX23_PAD_AUART1_CTS__AUART1_CTS */
- 0x01b0 /* MX23_PAD_AUART1_RTS__AUART1_RTS */
+ MX23_PAD_AUART1_RX__AUART1_RX
+ MX23_PAD_AUART1_TX__AUART1_TX
+ MX23_PAD_AUART1_CTS__AUART1_CTS
+ MX23_PAD_AUART1_RTS__AUART1_RTS
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
auart0_2pins_a: auart0-2pins@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x01e2 /* MX23_PAD_I2C_SCL__AUART1_TX */
- 0x01f2 /* MX23_PAD_I2C_SDA__AUART1_RX */
+ MX23_PAD_I2C_SCL__AUART1_TX
+ MX23_PAD_I2C_SDA__AUART1_RX
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
gpmi_pins_a: gpmi-nand@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x0000 /* MX23_PAD_GPMI_D00__GPMI_D00 */
- 0x0010 /* MX23_PAD_GPMI_D01__GPMI_D01 */
- 0x0020 /* MX23_PAD_GPMI_D02__GPMI_D02 */
- 0x0030 /* MX23_PAD_GPMI_D03__GPMI_D03 */
- 0x0040 /* MX23_PAD_GPMI_D04__GPMI_D04 */
- 0x0050 /* MX23_PAD_GPMI_D05__GPMI_D05 */
- 0x0060 /* MX23_PAD_GPMI_D06__GPMI_D06 */
- 0x0070 /* MX23_PAD_GPMI_D07__GPMI_D07 */
- 0x0100 /* MX23_PAD_GPMI_CLE__GPMI_CLE */
- 0x0110 /* MX23_PAD_GPMI_ALE__GPMI_ALE */
- 0x0130 /* MX23_PAD_GPMI_RDY0__GPMI_RDY0 */
- 0x0140 /* MX23_PAD_GPMI_RDY1__GPMI_RDY1 */
- 0x0170 /* MX23_PAD_GPMI_WPN__GPMI_WPN */
- 0x0180 /* MX23_PAD_GPMI_WRN__GPMI_WRN */
- 0x0190 /* MX23_PAD_GPMI_RDN__GPMI_RDN */
- 0x21b0 /* MX23_PAD_GPMI_CE1N__GPMI_CE1N */
- 0x21c0 /* MX23_PAD_GPMI_CE0N__GPMI_CE0N */
+ MX23_PAD_GPMI_D00__GPMI_D00
+ MX23_PAD_GPMI_D01__GPMI_D01
+ MX23_PAD_GPMI_D02__GPMI_D02
+ MX23_PAD_GPMI_D03__GPMI_D03
+ MX23_PAD_GPMI_D04__GPMI_D04
+ MX23_PAD_GPMI_D05__GPMI_D05
+ MX23_PAD_GPMI_D06__GPMI_D06
+ MX23_PAD_GPMI_D07__GPMI_D07
+ MX23_PAD_GPMI_CLE__GPMI_CLE
+ MX23_PAD_GPMI_ALE__GPMI_ALE
+ MX23_PAD_GPMI_RDY0__GPMI_RDY0
+ MX23_PAD_GPMI_RDY1__GPMI_RDY1
+ MX23_PAD_GPMI_WPN__GPMI_WPN
+ MX23_PAD_GPMI_WRN__GPMI_WRN
+ MX23_PAD_GPMI_RDN__GPMI_RDN
+ MX23_PAD_GPMI_CE1N__GPMI_CE1N
+ MX23_PAD_GPMI_CE0N__GPMI_CE0N
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
gpmi_pins_fixup: gpmi-pins-fixup {
fsl,pinmux-ids = <
- 0x0170 /* MX23_PAD_GPMI_WPN__GPMI_WPN */
- 0x0180 /* MX23_PAD_GPMI_WRN__GPMI_WRN */
- 0x0190 /* MX23_PAD_GPMI_RDN__GPMI_RDN */
+ MX23_PAD_GPMI_WPN__GPMI_WPN
+ MX23_PAD_GPMI_WRN__GPMI_WRN
+ MX23_PAD_GPMI_RDN__GPMI_RDN
>;
- fsl,drive-strength = <2>;
+ fsl,drive-strength = <MXS_DRIVE_12mA>;
};
mmc0_4bit_pins_a: mmc0-4bit@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x2020 /* MX23_PAD_SSP1_DATA0__SSP1_DATA0 */
- 0x2030 /* MX23_PAD_SSP1_DATA1__SSP1_DATA1 */
- 0x2040 /* MX23_PAD_SSP1_DATA2__SSP1_DATA2 */
- 0x2050 /* MX23_PAD_SSP1_DATA3__SSP1_DATA3 */
- 0x2000 /* MX23_PAD_SSP1_CMD__SSP1_CMD */
- 0x2060 /* MX23_PAD_SSP1_SCK__SSP1_SCK */
+ MX23_PAD_SSP1_DATA0__SSP1_DATA0
+ MX23_PAD_SSP1_DATA1__SSP1_DATA1
+ MX23_PAD_SSP1_DATA2__SSP1_DATA2
+ MX23_PAD_SSP1_DATA3__SSP1_DATA3
+ MX23_PAD_SSP1_CMD__SSP1_CMD
+ MX23_PAD_SSP1_SCK__SSP1_SCK
>;
- fsl,drive-strength = <1>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
mmc0_8bit_pins_a: mmc0-8bit@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x2020 /* MX23_PAD_SSP1_DATA0__SSP1_DATA0 */
- 0x2030 /* MX23_PAD_SSP1_DATA1__SSP1_DATA1 */
- 0x2040 /* MX23_PAD_SSP1_DATA2__SSP1_DATA2 */
- 0x2050 /* MX23_PAD_SSP1_DATA3__SSP1_DATA3 */
- 0x0082 /* MX23_PAD_GPMI_D08__SSP1_DATA4 */
- 0x0092 /* MX23_PAD_GPMI_D09__SSP1_DATA5 */
- 0x00a2 /* MX23_PAD_GPMI_D10__SSP1_DATA6 */
- 0x00b2 /* MX23_PAD_GPMI_D11__SSP1_DATA7 */
- 0x2000 /* MX23_PAD_SSP1_CMD__SSP1_CMD */
- 0x2010 /* MX23_PAD_SSP1_DETECT__SSP1_DETECT */
- 0x2060 /* MX23_PAD_SSP1_SCK__SSP1_SCK */
+ MX23_PAD_SSP1_DATA0__SSP1_DATA0
+ MX23_PAD_SSP1_DATA1__SSP1_DATA1
+ MX23_PAD_SSP1_DATA2__SSP1_DATA2
+ MX23_PAD_SSP1_DATA3__SSP1_DATA3
+ MX23_PAD_GPMI_D08__SSP1_DATA4
+ MX23_PAD_GPMI_D09__SSP1_DATA5
+ MX23_PAD_GPMI_D10__SSP1_DATA6
+ MX23_PAD_GPMI_D11__SSP1_DATA7
+ MX23_PAD_SSP1_CMD__SSP1_CMD
+ MX23_PAD_SSP1_DETECT__SSP1_DETECT
+ MX23_PAD_SSP1_SCK__SSP1_SCK
>;
- fsl,drive-strength = <1>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
mmc0_pins_fixup: mmc0-pins-fixup {
fsl,pinmux-ids = <
- 0x2010 /* MX23_PAD_SSP1_DETECT__SSP1_DETECT */
- 0x2060 /* MX23_PAD_SSP1_SCK__SSP1_SCK */
+ MX23_PAD_SSP1_DETECT__SSP1_DETECT
+ MX23_PAD_SSP1_SCK__SSP1_SCK
>;
- fsl,pull-up = <0>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
pwm2_pins_a: pwm2@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x11c0 /* MX23_PAD_PWM2__PWM2 */
+ MX23_PAD_PWM2__PWM2
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
lcdif_24bit_pins_a: lcdif-24bit@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x1000 /* MX23_PAD_LCD_D00__LCD_D0 */
- 0x1010 /* MX23_PAD_LCD_D01__LCD_D1 */
- 0x1020 /* MX23_PAD_LCD_D02__LCD_D2 */
- 0x1030 /* MX23_PAD_LCD_D03__LCD_D3 */
- 0x1040 /* MX23_PAD_LCD_D04__LCD_D4 */
- 0x1050 /* MX23_PAD_LCD_D05__LCD_D5 */
- 0x1060 /* MX23_PAD_LCD_D06__LCD_D6 */
- 0x1070 /* MX23_PAD_LCD_D07__LCD_D7 */
- 0x1080 /* MX23_PAD_LCD_D08__LCD_D8 */
- 0x1090 /* MX23_PAD_LCD_D09__LCD_D9 */
- 0x10a0 /* MX23_PAD_LCD_D10__LCD_D10 */
- 0x10b0 /* MX23_PAD_LCD_D11__LCD_D11 */
- 0x10c0 /* MX23_PAD_LCD_D12__LCD_D12 */
- 0x10d0 /* MX23_PAD_LCD_D13__LCD_D13 */
- 0x10e0 /* MX23_PAD_LCD_D14__LCD_D14 */
- 0x10f0 /* MX23_PAD_LCD_D15__LCD_D15 */
- 0x1100 /* MX23_PAD_LCD_D16__LCD_D16 */
- 0x1110 /* MX23_PAD_LCD_D17__LCD_D17 */
- 0x0081 /* MX23_PAD_GPMI_D08__LCD_D18 */
- 0x0091 /* MX23_PAD_GPMI_D09__LCD_D19 */
- 0x00a1 /* MX23_PAD_GPMI_D10__LCD_D20 */
- 0x00b1 /* MX23_PAD_GPMI_D11__LCD_D21 */
- 0x00c1 /* MX23_PAD_GPMI_D12__LCD_D22 */
- 0x00d1 /* MX23_PAD_GPMI_D13__LCD_D23 */
- 0x1160 /* MX23_PAD_LCD_DOTCK__LCD_DOTCK */
- 0x1170 /* MX23_PAD_LCD_ENABLE__LCD_ENABLE */
- 0x1180 /* MX23_PAD_LCD_HSYNC__LCD_HSYNC */
- 0x1190 /* MX23_PAD_LCD_VSYNC__LCD_VSYNC */
+ MX23_PAD_LCD_D00__LCD_D00
+ MX23_PAD_LCD_D01__LCD_D01
+ MX23_PAD_LCD_D02__LCD_D02
+ MX23_PAD_LCD_D03__LCD_D03
+ MX23_PAD_LCD_D04__LCD_D04
+ MX23_PAD_LCD_D05__LCD_D05
+ MX23_PAD_LCD_D06__LCD_D06
+ MX23_PAD_LCD_D07__LCD_D07
+ MX23_PAD_LCD_D08__LCD_D08
+ MX23_PAD_LCD_D09__LCD_D09
+ MX23_PAD_LCD_D10__LCD_D10
+ MX23_PAD_LCD_D11__LCD_D11
+ MX23_PAD_LCD_D12__LCD_D12
+ MX23_PAD_LCD_D13__LCD_D13
+ MX23_PAD_LCD_D14__LCD_D14
+ MX23_PAD_LCD_D15__LCD_D15
+ MX23_PAD_LCD_D16__LCD_D16
+ MX23_PAD_LCD_D17__LCD_D17
+ MX23_PAD_GPMI_D08__LCD_D18
+ MX23_PAD_GPMI_D09__LCD_D19
+ MX23_PAD_GPMI_D10__LCD_D20
+ MX23_PAD_GPMI_D11__LCD_D21
+ MX23_PAD_GPMI_D12__LCD_D22
+ MX23_PAD_GPMI_D13__LCD_D23
+ MX23_PAD_LCD_DOTCK__LCD_DOTCK
+ MX23_PAD_LCD_ENABLE__LCD_ENABLE
+ MX23_PAD_LCD_HSYNC__LCD_HSYNC
+ MX23_PAD_LCD_VSYNC__LCD_VSYNC
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
spi2_pins_a: spi2@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x0182 /* MX23_PAD_GPMI_WRN__SSP2_SCK */
- 0x0142 /* MX23_PAD_GPMI_RDY1__SSP2_CMD */
- 0x0002 /* MX23_PAD_GPMI_D00__SSP2_DATA0 */
- 0x0032 /* MX23_PAD_GPMI_D03__SSP2_DATA3 */
+ MX23_PAD_GPMI_WRN__SSP2_SCK
+ MX23_PAD_GPMI_RDY1__SSP2_CMD
+ MX23_PAD_GPMI_D00__SSP2_DATA0
+ MX23_PAD_GPMI_D03__SSP2_DATA3
>;
- fsl,drive-strength = <1>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
};
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index 737ed5da8f71..623ed553b090 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -178,7 +178,7 @@
reg = <0x43fac000 0x4000>;
};
- audmux@43fb0000 {
+ audmux: audmux@43fb0000 {
compatible = "fsl,imx25-audmux", "fsl,imx31-audmux";
reg = <0x43fb0000 0x4000>;
status = "disabled";
@@ -236,6 +236,11 @@
compatible = "fsl,imx25-ssi", "fsl,imx21-ssi";
reg = <0x50014000 0x4000>;
interrupts = <11>;
+ clocks = <&clks 118>;
+ clock-names = "ipg";
+ dmas = <&sdma 24 1 0>,
+ <&sdma 25 1 0>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -266,6 +271,11 @@
compatible = "fsl,imx25-ssi", "fsl,imx21-ssi";
reg = <0x50034000 0x4000>;
interrupts = <12>;
+ clocks = <&clks 117>;
+ clock-names = "ipg";
+ dmas = <&sdma 28 1 0>,
+ <&sdma 29 1 0>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -436,13 +446,14 @@
#interrupt-cells = <2>;
};
- sdma@53fd4000 {
+ sdma: sdma@53fd4000 {
compatible = "fsl,imx25-sdma", "fsl,imx35-sdma";
reg = <0x53fd4000 0x4000>;
clocks = <&clks 112>, <&clks 68>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
interrupts = <34>;
+ fsl,sdma-ram-script-name = "imx/sdma/sdma-imx25.bin";
};
wdog@53fdc000 {
diff --git a/arch/arm/boot/dts/imx27-apf27dev.dts b/arch/arm/boot/dts/imx27-apf27dev.dts
index 2a377ca1881a..47c8c26012e4 100644
--- a/arch/arm/boot/dts/imx27-apf27dev.dts
+++ b/arch/arm/boot/dts/imx27-apf27dev.dts
@@ -16,6 +16,26 @@
model = "Armadeus Systems APF27Dev docking/development board";
compatible = "armadeus,imx27-apf27dev", "armadeus,imx27-apf27", "fsl,imx27";
+ display: display {
+ model = "Chimei-LW700AT9003";
+ native-mode = <&timing0>;
+ bits-per-pixel = <16>; /* non-standard but required */
+ fsl,pcr = <0xfae80083>; /* non-standard but required */
+ display-timings {
+ timing0: 640x480 {
+ clock-frequency = <33000033>;
+ hactive = <800>;
+ vactive = <640>;
+ hback-porch = <96>;
+ hfront-porch = <96>;
+ vback-porch = <20>;
+ vfront-porch = <21>;
+ hsync-len = <64>;
+ vsync-len = <4>;
+ };
+ };
+ };
+
gpio-keys {
compatible = "gpio-keys";
@@ -50,6 +70,12 @@
status = "okay";
};
+&fb {
+ display = <&display>;
+ fsl,dmacr = <0x00020010>;
+ status = "okay";
+};
+
&i2c1 {
clock-frequency = <400000>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index b7a1c6d950b9..826231eb4446 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -123,6 +123,7 @@
};
pwm: pwm@10006000 {
+ #pwm-cells = <2>;
compatible = "fsl,imx27-pwm";
reg = <0x10006000 0x1000>;
interrupts = <23>;
diff --git a/arch/arm/boot/dts/imx28-apf28.dts b/arch/arm/boot/dts/imx28-apf28.dts
index 7eb075876c4c..7198fe3798c6 100644
--- a/arch/arm/boot/dts/imx28-apf28.dts
+++ b/arch/arm/boot/dts/imx28-apf28.dts
@@ -10,7 +10,7 @@
*/
/dts-v1/;
-/include/ "imx28.dtsi"
+#include "imx28.dtsi"
/ {
model = "Armadeus Systems APF28 module";
diff --git a/arch/arm/boot/dts/imx28-apf28dev.dts b/arch/arm/boot/dts/imx28-apf28dev.dts
index b602494c152b..e2efd8d89c4f 100644
--- a/arch/arm/boot/dts/imx28-apf28dev.dts
+++ b/arch/arm/boot/dts/imx28-apf28dev.dts
@@ -10,7 +10,7 @@
*/
/* APF28Dev is a docking board for the APF28 SOM */
-/include/ "imx28-apf28.dts"
+#include "imx28-apf28.dts"
/ {
model = "Armadeus Systems APF28Dev docking/development board";
@@ -41,30 +41,30 @@
hog_pins_apf28dev: hog@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x1103 /* MX28_PAD_LCD_D16__GPIO_1_16 */
- 0x1113 /* MX28_PAD_LCD_D17__GPIO_1_17 */
- 0x1123 /* MX28_PAD_LCD_D18__GPIO_1_18 */
- 0x1133 /* MX28_PAD_LCD_D19__GPIO_1_19 */
- 0x1143 /* MX28_PAD_LCD_D20__GPIO_1_20 */
- 0x1153 /* MX28_PAD_LCD_D21__GPIO_1_21 */
- 0x1163 /* MX28_PAD_LCD_D22__GPIO_1_22 */
+ MX28_PAD_LCD_D16__GPIO_1_16
+ MX28_PAD_LCD_D17__GPIO_1_17
+ MX28_PAD_LCD_D18__GPIO_1_18
+ MX28_PAD_LCD_D19__GPIO_1_19
+ MX28_PAD_LCD_D20__GPIO_1_20
+ MX28_PAD_LCD_D21__GPIO_1_21
+ MX28_PAD_LCD_D22__GPIO_1_22
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
lcdif_pins_apf28dev: lcdif-apf28dev@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x1181 /* MX28_PAD_LCD_RD_E__LCD_VSYNC */
- 0x1191 /* MX28_PAD_LCD_WR_RWN__LCD_HSYNC */
- 0x11a1 /* MX28_PAD_LCD_RS__LCD_DOTCLK */
- 0x11b1 /* MX28_PAD_LCD_CS__LCD_ENABLE */
+ MX28_PAD_LCD_RD_E__LCD_VSYNC
+ MX28_PAD_LCD_WR_RWN__LCD_HSYNC
+ MX28_PAD_LCD_RS__LCD_DOTCLK
+ MX28_PAD_LCD_CS__LCD_ENABLE
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
};
diff --git a/arch/arm/boot/dts/imx28-apx4devkit.dts b/arch/arm/boot/dts/imx28-apx4devkit.dts
index 0e7fed47bd8d..6f254ca816cb 100644
--- a/arch/arm/boot/dts/imx28-apx4devkit.dts
+++ b/arch/arm/boot/dts/imx28-apx4devkit.dts
@@ -1,5 +1,5 @@
/dts-v1/;
-/include/ "imx28.dtsi"
+#include "imx28.dtsi"
/ {
model = "Bluegiga APX4 Development Kit";
@@ -40,53 +40,53 @@
hog_pins_a: hog@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x0113 /* MX28_PAD_GPMI_CE1N__GPIO_0_17 */
- 0x0153 /* MX28_PAD_GPMI_RDY1__GPIO_0_21 */
- 0x2123 /* MX28_PAD_SSP2_MISO__GPIO_2_18 */
- 0x2131 /* MX28_PAD_SSP2_SS0__GPIO_2_19 */
- 0x31c3 /* MX28_PAD_PWM3__GPIO_3_28 */
- 0x31e3 /* MX28_PAD_LCD_RESET__GPIO_3_30 */
- 0x4143 /* MX28_PAD_JTAG_RTCK__GPIO_4_20 */
+ MX28_PAD_GPMI_CE1N__GPIO_0_17
+ MX28_PAD_GPMI_RDY1__GPIO_0_21
+ MX28_PAD_SSP2_MISO__GPIO_2_18
+ MX28_PAD_SSP2_SS0__AUART3_TX /* was: 0x2131 - MX28_PAD_SSP2_SS0__GPIO_2_19 */
+ MX28_PAD_PWM3__GPIO_3_28
+ MX28_PAD_LCD_RESET__GPIO_3_30
+ MX28_PAD_JTAG_RTCK__GPIO_4_20
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
lcdif_pins_apx4: lcdif-apx4@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x1181 /* MX28_PAD_LCD_RD_E__LCD_VSYNC */
- 0x1191 /* MX28_PAD_LCD_WR_RWN__LCD_HSYNC */
- 0x11a1 /* MX28_PAD_LCD_RS__LCD_DOTCLK */
- 0x11b1 /* MX28_PAD_LCD_CS__LCD_ENABLE */
+ MX28_PAD_LCD_RD_E__LCD_VSYNC
+ MX28_PAD_LCD_WR_RWN__LCD_HSYNC
+ MX28_PAD_LCD_RS__LCD_DOTCLK
+ MX28_PAD_LCD_CS__LCD_ENABLE
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
mmc2_4bit_pins_apx4: mmc2-4bit-apx4@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x2041 /* MX28_PAD_SSP0_DATA4__SSP2_D0 */
- 0x2051 /* MX28_PAD_SSP0_DATA5__SSP2_D3 */
- 0x2061 /* MX28_PAD_SSP0_DATA6__SSP2_CMD */
- 0x2071 /* MX28_PAD_SSP0_DATA7__SSP2_SCK */
- 0x2141 /* MX28_PAD_SSP2_SS1__SSP2_D1 */
- 0x2151 /* MX28_PAD_SSP2_SS2__SSP2_D2 */
+ MX28_PAD_SSP0_DATA4__SSP2_D0
+ MX28_PAD_SSP0_DATA5__SSP2_D3
+ MX28_PAD_SSP0_DATA6__SSP2_CMD
+ MX28_PAD_SSP0_DATA7__SSP2_SCK
+ MX28_PAD_SSP2_SS1__SSP2_D1
+ MX28_PAD_SSP2_SS2__SSP2_D2
>;
- fsl,drive-strength = <1>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
mmc2_sck_cfg_apx4: mmc2-sck-cfg-apx4 {
fsl,pinmux-ids = <
- 0x2071 /* MX28_PAD_SSP0_DATA7__SSP2_SCK */
+ MX28_PAD_SSP0_DATA7__SSP2_SCK
>;
- fsl,drive-strength = <2>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_12mA>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
};
diff --git a/arch/arm/boot/dts/imx28-cfa10036.dts b/arch/arm/boot/dts/imx28-cfa10036.dts
index 1ec8c94bbac9..cabb6171a19d 100644
--- a/arch/arm/boot/dts/imx28-cfa10036.dts
+++ b/arch/arm/boot/dts/imx28-cfa10036.dts
@@ -10,7 +10,7 @@
*/
/dts-v1/;
-/include/ "imx28.dtsi"
+#include "imx28.dtsi"
/ {
model = "Crystalfontz CFA-10036 Board";
@@ -26,31 +26,31 @@
ssd1306_cfa10036: ssd1306-10036@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x2073 /* MX28_PAD_SSP0_D7__GPIO_2_7 */
+ MX28_PAD_SSP0_DATA7__GPIO_2_7
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
led_pins_cfa10036: leds-10036@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x3043 /* MX28_PAD_AUART1_RX__GPIO_3_4 */
+ MX28_PAD_AUART1_RX__GPIO_3_4
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
usb0_otg_cfa10036: otg-10036@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x0142 /* MX28_PAD_GPMI_READY0__USB0_ID */
+ MX28_PAD_GPMI_RDY0__USB0_ID
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
};
diff --git a/arch/arm/boot/dts/imx28-cfa10037.dts b/arch/arm/boot/dts/imx28-cfa10037.dts
index 182b99fe35f3..f93e9a700e52 100644
--- a/arch/arm/boot/dts/imx28-cfa10037.dts
+++ b/arch/arm/boot/dts/imx28-cfa10037.dts
@@ -13,7 +13,7 @@
* The CFA-10049 is an expansion board for the CFA-10036 module, thus we
* need to include the CFA-10036 DTS.
*/
-/include/ "imx28-cfa10036.dts"
+#include "imx28-cfa10036.dts"
/ {
model = "Crystalfontz CFA-10037 Board";
@@ -25,21 +25,21 @@
usb_pins_cfa10037: usb-10037@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x0073 /* MX28_PAD_GPMI_D7__GPIO_0_7 */
+ MX28_PAD_GPMI_D07__GPIO_0_7
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
mac0_pins_cfa10037: mac0-10037@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x2153 /* MX28_PAD_SSP2_D5__GPIO_2_21 */
+ MX28_PAD_SSP2_SS2__GPIO_2_21
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
};
};
diff --git a/arch/arm/boot/dts/imx28-cfa10049.dts b/arch/arm/boot/dts/imx28-cfa10049.dts
index 06e4cfaf7dd2..7087b4bf6a8f 100644
--- a/arch/arm/boot/dts/imx28-cfa10049.dts
+++ b/arch/arm/boot/dts/imx28-cfa10049.dts
@@ -13,7 +13,7 @@
* The CFA-10049 is an expansion board for the CFA-10036 module, thus we
* need to include the CFA-10036 DTS.
*/
-/include/ "imx28-cfa10036.dts"
+#include "imx28-cfa10036.dts"
/ {
model = "Crystalfontz CFA-10049 Board";
@@ -25,150 +25,150 @@
usb_pins_cfa10049: usb-10049@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x0073 /* MX28_PAD_GPMI_D7__GPIO_0_7 */
+ MX28_PAD_GPMI_D07__GPIO_0_7
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
i2cmux_pins_cfa10049: i2cmux-10049@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x1163 /* MX28_PAD_LCD_D22__GPIO_1_22 */
- 0x1173 /* MX28_PAD_LCD_D22__GPIO_1_23 */
+ MX28_PAD_LCD_D22__GPIO_1_22
+ MX28_PAD_LCD_D23__GPIO_1_23
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
mac0_pins_cfa10049: mac0-10049@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x2153 /* MX28_PAD_SSP2_D5__GPIO_2_21 */
+ MX28_PAD_SSP2_SS2__GPIO_2_21
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
pca_pins_cfa10049: pca-10049@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x2133 /* MX28_PAD_SSP2_D3__GPIO_2_19 */
+ MX28_PAD_SSP2_SS0__GPIO_2_19
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
rotary_pins_cfa10049: rotary-10049@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x3183 /* MX28_PAD_I2C0_SCL__GPIO_3_24 */
- 0x3193 /* MX28_PAD_I2C0_SDA__GPIO_3_25 */
+ MX28_PAD_I2C0_SCL__GPIO_3_24
+ MX28_PAD_I2C0_SDA__GPIO_3_25
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
rotary_btn_pins_cfa10049: rotary-btn-10049@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x31a3 /* MX28_PAD_SAIF_SDATA0__GPIO_3_26 */
+ MX28_PAD_SAIF1_SDATA0__GPIO_3_26
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
spi2_pins_cfa10049: spi2-cfa10049@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x2103 /* MX28_PAD_SSP2_SCK__GPIO_2_16 */
- 0x2113 /* MX28_PAD_SSP2_CMD__GPIO_2_17 */
- 0x2123 /* MX28_PAD_SSP2_D0__GPIO_2_18 */
- 0x3053 /* MX28_PAD_AUART1_TX__GPIO_3_5 */
+ MX28_PAD_SSP2_SCK__GPIO_2_16
+ MX28_PAD_SSP2_MOSI__GPIO_2_17
+ MX28_PAD_SSP2_MISO__GPIO_2_18
+ MX28_PAD_AUART1_TX__GPIO_3_5
>;
- fsl,drive-strength = <1>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
spi3_pins_cfa10049: spi3-cfa10049@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x0183 /* MX28_PAD_GPMI_RDN__GPIO_0_24 */
- 0x01c3 /* MX28_PAD_GPMI_RESETN__GPIO_0_28 */
- 0x0113 /* MX28_PAD_GPMI_CE1N__GPIO_0_17 */
- 0x01a3 /* MX28_PAD_GPMI_ALE__GPIO_0_26 */
- 0x01b3 /* MX28_PAD_GPMI_CLE__GPIO_0_27 */
+ MX28_PAD_GPMI_RDN__GPIO_0_24
+ MX28_PAD_GPMI_RESETN__GPIO_0_28
+ MX28_PAD_GPMI_CE1N__GPIO_0_17
+ MX28_PAD_GPMI_ALE__GPIO_0_26
+ MX28_PAD_GPMI_CLE__GPIO_0_27
>;
- fsl,drive-strength = <1>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
lcdif_18bit_pins_cfa10049: lcdif-18bit@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x1000 /* MX28_PAD_LCD_D00__LCD_D0 */
- 0x1010 /* MX28_PAD_LCD_D01__LCD_D1 */
- 0x1020 /* MX28_PAD_LCD_D02__LCD_D2 */
- 0x1030 /* MX28_PAD_LCD_D03__LCD_D3 */
- 0x1040 /* MX28_PAD_LCD_D04__LCD_D4 */
- 0x1050 /* MX28_PAD_LCD_D05__LCD_D5 */
- 0x1060 /* MX28_PAD_LCD_D06__LCD_D6 */
- 0x1070 /* MX28_PAD_LCD_D07__LCD_D7 */
- 0x1080 /* MX28_PAD_LCD_D08__LCD_D8 */
- 0x1090 /* MX28_PAD_LCD_D09__LCD_D9 */
- 0x10a0 /* MX28_PAD_LCD_D10__LCD_D10 */
- 0x10b0 /* MX28_PAD_LCD_D11__LCD_D11 */
- 0x10c0 /* MX28_PAD_LCD_D12__LCD_D12 */
- 0x10d0 /* MX28_PAD_LCD_D13__LCD_D13 */
- 0x10e0 /* MX28_PAD_LCD_D14__LCD_D14 */
- 0x10f0 /* MX28_PAD_LCD_D15__LCD_D15 */
- 0x1100 /* MX28_PAD_LCD_D16__LCD_D16 */
- 0x1110 /* MX28_PAD_LCD_D17__LCD_D17 */
+ MX28_PAD_LCD_D00__LCD_D0
+ MX28_PAD_LCD_D01__LCD_D1
+ MX28_PAD_LCD_D02__LCD_D2
+ MX28_PAD_LCD_D03__LCD_D3
+ MX28_PAD_LCD_D04__LCD_D4
+ MX28_PAD_LCD_D05__LCD_D5
+ MX28_PAD_LCD_D06__LCD_D6
+ MX28_PAD_LCD_D07__LCD_D7
+ MX28_PAD_LCD_D08__LCD_D8
+ MX28_PAD_LCD_D09__LCD_D9
+ MX28_PAD_LCD_D10__LCD_D10
+ MX28_PAD_LCD_D11__LCD_D11
+ MX28_PAD_LCD_D12__LCD_D12
+ MX28_PAD_LCD_D13__LCD_D13
+ MX28_PAD_LCD_D14__LCD_D14
+ MX28_PAD_LCD_D15__LCD_D15
+ MX28_PAD_LCD_D16__LCD_D16
+ MX28_PAD_LCD_D17__LCD_D17
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
lcdif_pins_cfa10049: lcdif-evk@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x1181 /* MX28_PAD_LCD_RD_E__LCD_VSYNC */
- 0x1191 /* MX28_PAD_LCD_WR_RWN__LCD_HSYNC */
- 0x11a1 /* MX28_PAD_LCD_RS__LCD_DOTCLK */
- 0x11b1 /* MX28_PAD_LCD_CS__LCD_ENABLE */
+ MX28_PAD_LCD_RD_E__LCD_VSYNC
+ MX28_PAD_LCD_WR_RWN__LCD_HSYNC
+ MX28_PAD_LCD_RS__LCD_DOTCLK
+ MX28_PAD_LCD_CS__LCD_ENABLE
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
lcdif_pins_cfa10049_pullup: lcdif-10049-pullup@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x31e3 /* MX28_PAD_LCD_RESET__GPIO_3_30 */
+ MX28_PAD_LCD_RESET__GPIO_3_30
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
w1_gpio_pins: w1-gpio@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x1153 /* MX28_PAD_LCD_D21__GPIO_1_21 */
+ MX28_PAD_LCD_D21__GPIO_1_21
>;
- fsl,drive-strength = <1>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>; /* 0 will enable the keeper */
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>; /* 0 will enable the keeper */
};
};
diff --git a/arch/arm/boot/dts/imx28-cfa10055.dts b/arch/arm/boot/dts/imx28-cfa10055.dts
index 171bcbe1ec4b..c3900e7ba331 100644
--- a/arch/arm/boot/dts/imx28-cfa10055.dts
+++ b/arch/arm/boot/dts/imx28-cfa10055.dts
@@ -14,7 +14,7 @@
* The CFA-10055 is an expansion board for the CFA-10036 module and
* CFA-10037, thus we need to include the CFA-10037 DTS.
*/
-/include/ "imx28-cfa10037.dts"
+#include "imx28-cfa10037.dts"
/ {
model = "Crystalfontz CFA-10055 Board";
@@ -26,64 +26,64 @@
spi2_pins_cfa10055: spi2-cfa10055@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x2103 /* MX28_PAD_SSP2_SCK__GPIO_2_16 */
- 0x2113 /* MX28_PAD_SSP2_CMD__GPIO_2_17 */
- 0x2123 /* MX28_PAD_SSP2_D0__GPIO_2_18 */
- 0x3053 /* MX28_PAD_AUART1_TX__GPIO_3_5 */
+ MX28_PAD_SSP2_SCK__GPIO_2_16
+ MX28_PAD_SSP2_MOSI__GPIO_2_17
+ MX28_PAD_SSP2_MISO__GPIO_2_18
+ MX28_PAD_AUART1_TX__GPIO_3_5
>;
- fsl,drive-strength = <1>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
lcdif_18bit_pins_cfa10055: lcdif-18bit@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x1000 /* MX28_PAD_LCD_D00__LCD_D0 */
- 0x1010 /* MX28_PAD_LCD_D01__LCD_D1 */
- 0x1020 /* MX28_PAD_LCD_D02__LCD_D2 */
- 0x1030 /* MX28_PAD_LCD_D03__LCD_D3 */
- 0x1040 /* MX28_PAD_LCD_D04__LCD_D4 */
- 0x1050 /* MX28_PAD_LCD_D05__LCD_D5 */
- 0x1060 /* MX28_PAD_LCD_D06__LCD_D6 */
- 0x1070 /* MX28_PAD_LCD_D07__LCD_D7 */
- 0x1080 /* MX28_PAD_LCD_D08__LCD_D8 */
- 0x1090 /* MX28_PAD_LCD_D09__LCD_D9 */
- 0x10a0 /* MX28_PAD_LCD_D10__LCD_D10 */
- 0x10b0 /* MX28_PAD_LCD_D11__LCD_D11 */
- 0x10c0 /* MX28_PAD_LCD_D12__LCD_D12 */
- 0x10d0 /* MX28_PAD_LCD_D13__LCD_D13 */
- 0x10e0 /* MX28_PAD_LCD_D14__LCD_D14 */
- 0x10f0 /* MX28_PAD_LCD_D15__LCD_D15 */
- 0x1100 /* MX28_PAD_LCD_D16__LCD_D16 */
- 0x1110 /* MX28_PAD_LCD_D17__LCD_D17 */
+ MX28_PAD_LCD_D00__LCD_D0
+ MX28_PAD_LCD_D01__LCD_D1
+ MX28_PAD_LCD_D02__LCD_D2
+ MX28_PAD_LCD_D03__LCD_D3
+ MX28_PAD_LCD_D04__LCD_D4
+ MX28_PAD_LCD_D05__LCD_D5
+ MX28_PAD_LCD_D06__LCD_D6
+ MX28_PAD_LCD_D07__LCD_D7
+ MX28_PAD_LCD_D08__LCD_D8
+ MX28_PAD_LCD_D09__LCD_D9
+ MX28_PAD_LCD_D10__LCD_D10
+ MX28_PAD_LCD_D11__LCD_D11
+ MX28_PAD_LCD_D12__LCD_D12
+ MX28_PAD_LCD_D13__LCD_D13
+ MX28_PAD_LCD_D14__LCD_D14
+ MX28_PAD_LCD_D15__LCD_D15
+ MX28_PAD_LCD_D16__LCD_D16
+ MX28_PAD_LCD_D17__LCD_D17
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
lcdif_pins_cfa10055: lcdif-evk@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x1181 /* MX28_PAD_LCD_RD_E__LCD_VSYNC */
- 0x1191 /* MX28_PAD_LCD_WR_RWN__LCD_HSYNC */
- 0x11a1 /* MX28_PAD_LCD_RS__LCD_DOTCLK */
- 0x11b1 /* MX28_PAD_LCD_CS__LCD_ENABLE */
+ MX28_PAD_LCD_RD_E__LCD_VSYNC
+ MX28_PAD_LCD_WR_RWN__LCD_HSYNC
+ MX28_PAD_LCD_RS__LCD_DOTCLK
+ MX28_PAD_LCD_CS__LCD_ENABLE
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
lcdif_pins_cfa10055_pullup: lcdif-10055-pullup@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x31e3 /* MX28_PAD_LCD_RESET__GPIO_3_30 */
+ MX28_PAD_LCD_RESET__GPIO_3_30
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
};
diff --git a/arch/arm/boot/dts/imx28-cfa10056.dts b/arch/arm/boot/dts/imx28-cfa10056.dts
index b45dd0e4ee57..cef959a97219 100644
--- a/arch/arm/boot/dts/imx28-cfa10056.dts
+++ b/arch/arm/boot/dts/imx28-cfa10056.dts
@@ -13,7 +13,7 @@
* The CFA-10055 is an expansion board for the CFA-10036 module and
* CFA-10037, thus we need to include the CFA-10037 DTS.
*/
-/include/ "imx28-cfa10037.dts"
+#include "imx28-cfa10037.dts"
/ {
model = "Crystalfontz CFA-10056 Board";
@@ -25,37 +25,37 @@
spi2_pins_cfa10056: spi2-cfa10056@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x2103 /* MX28_PAD_SSP2_SCK__GPIO_2_16 */
- 0x2113 /* MX28_PAD_SSP2_CMD__GPIO_2_17 */
- 0x2123 /* MX28_PAD_SSP2_D0__GPIO_2_18 */
- 0x3053 /* MX28_PAD_AUART1_TX__GPIO_3_5 */
+ MX28_PAD_SSP2_SCK__GPIO_2_16
+ MX28_PAD_SSP2_MOSI__GPIO_2_17
+ MX28_PAD_SSP2_MISO__GPIO_2_18
+ MX28_PAD_AUART1_TX__GPIO_3_5
>;
- fsl,drive-strength = <1>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
lcdif_pins_cfa10056: lcdif-10056@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x1181 /* MX28_PAD_LCD_RD_E__LCD_VSYNC */
- 0x1191 /* MX28_PAD_LCD_WR_RWN__LCD_HSYNC */
- 0x11a1 /* MX28_PAD_LCD_RS__LCD_DOTCLK */
- 0x11b1 /* MX28_PAD_LCD_CS__LCD_ENABLE */
+ MX28_PAD_LCD_RD_E__LCD_VSYNC
+ MX28_PAD_LCD_WR_RWN__LCD_HSYNC
+ MX28_PAD_LCD_RS__LCD_DOTCLK
+ MX28_PAD_LCD_CS__LCD_ENABLE
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
lcdif_pins_cfa10056_pullup: lcdif-10056-pullup@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x31e3 /* MX28_PAD_LCD_RESET__GPIO_3_30 */
+ MX28_PAD_LCD_RESET__GPIO_3_30
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
};
diff --git a/arch/arm/boot/dts/imx28-cfa10057.dts b/arch/arm/boot/dts/imx28-cfa10057.dts
index 0333c0532f28..3c1312885ae0 100644
--- a/arch/arm/boot/dts/imx28-cfa10057.dts
+++ b/arch/arm/boot/dts/imx28-cfa10057.dts
@@ -14,7 +14,7 @@
* The CFA-10057 is an expansion board for the CFA-10036 module, thus we
* need to include the CFA-10036 DTS.
*/
-/include/ "imx28-cfa10036.dts"
+#include "imx28-cfa10036.dts"
/ {
model = "Crystalfontz CFA-10057 Board";
@@ -26,51 +26,51 @@
usb_pins_cfa10057: usb-10057@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x0073 /* MX28_PAD_GPMI_D7__GPIO_0_7 */
+ MX28_PAD_GPMI_D07__GPIO_0_7
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
lcdif_18bit_pins_cfa10057: lcdif-18bit@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x1000 /* MX28_PAD_LCD_D00__LCD_D0 */
- 0x1010 /* MX28_PAD_LCD_D01__LCD_D1 */
- 0x1020 /* MX28_PAD_LCD_D02__LCD_D2 */
- 0x1030 /* MX28_PAD_LCD_D03__LCD_D3 */
- 0x1040 /* MX28_PAD_LCD_D04__LCD_D4 */
- 0x1050 /* MX28_PAD_LCD_D05__LCD_D5 */
- 0x1060 /* MX28_PAD_LCD_D06__LCD_D6 */
- 0x1070 /* MX28_PAD_LCD_D07__LCD_D7 */
- 0x1080 /* MX28_PAD_LCD_D08__LCD_D8 */
- 0x1090 /* MX28_PAD_LCD_D09__LCD_D9 */
- 0x10a0 /* MX28_PAD_LCD_D10__LCD_D10 */
- 0x10b0 /* MX28_PAD_LCD_D11__LCD_D11 */
- 0x10c0 /* MX28_PAD_LCD_D12__LCD_D12 */
- 0x10d0 /* MX28_PAD_LCD_D13__LCD_D13 */
- 0x10e0 /* MX28_PAD_LCD_D14__LCD_D14 */
- 0x10f0 /* MX28_PAD_LCD_D15__LCD_D15 */
- 0x1100 /* MX28_PAD_LCD_D16__LCD_D16 */
- 0x1110 /* MX28_PAD_LCD_D17__LCD_D17 */
+ MX28_PAD_LCD_D00__LCD_D0
+ MX28_PAD_LCD_D01__LCD_D1
+ MX28_PAD_LCD_D02__LCD_D2
+ MX28_PAD_LCD_D03__LCD_D3
+ MX28_PAD_LCD_D04__LCD_D4
+ MX28_PAD_LCD_D05__LCD_D5
+ MX28_PAD_LCD_D06__LCD_D6
+ MX28_PAD_LCD_D07__LCD_D7
+ MX28_PAD_LCD_D08__LCD_D8
+ MX28_PAD_LCD_D09__LCD_D9
+ MX28_PAD_LCD_D10__LCD_D10
+ MX28_PAD_LCD_D11__LCD_D11
+ MX28_PAD_LCD_D12__LCD_D12
+ MX28_PAD_LCD_D13__LCD_D13
+ MX28_PAD_LCD_D14__LCD_D14
+ MX28_PAD_LCD_D15__LCD_D15
+ MX28_PAD_LCD_D16__LCD_D16
+ MX28_PAD_LCD_D17__LCD_D17
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
lcdif_pins_cfa10057: lcdif-evk@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x1181 /* MX28_PAD_LCD_RD_E__LCD_VSYNC */
- 0x1191 /* MX28_PAD_LCD_WR_RWN__LCD_HSYNC */
- 0x11a1 /* MX28_PAD_LCD_RS__LCD_DOTCLK */
- 0x11b1 /* MX28_PAD_LCD_CS__LCD_ENABLE */
+ MX28_PAD_LCD_RD_E__LCD_VSYNC
+ MX28_PAD_LCD_WR_RWN__LCD_HSYNC
+ MX28_PAD_LCD_RS__LCD_DOTCLK
+ MX28_PAD_LCD_CS__LCD_ENABLE
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
};
diff --git a/arch/arm/boot/dts/imx28-cfa10058.dts b/arch/arm/boot/dts/imx28-cfa10058.dts
index 64c64c55a82a..2469d34df0ae 100644
--- a/arch/arm/boot/dts/imx28-cfa10058.dts
+++ b/arch/arm/boot/dts/imx28-cfa10058.dts
@@ -14,7 +14,7 @@
* The CFA-10058 is an expansion board for the CFA-10036 module, thus we
* need to include the CFA-10036 DTS.
*/
-/include/ "imx28-cfa10036.dts"
+#include "imx28-cfa10036.dts"
/ {
model = "Crystalfontz CFA-10058 Board";
@@ -26,24 +26,24 @@
usb_pins_cfa10058: usb-10058@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x0073 /* MX28_PAD_GPMI_D7__GPIO_0_7 */
+ MX28_PAD_GPMI_D07__GPIO_0_7
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
lcdif_pins_cfa10058: lcdif-10058@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x1181 /* MX28_PAD_LCD_RD_E__LCD_VSYNC */
- 0x1191 /* MX28_PAD_LCD_WR_RWN__LCD_HSYNC */
- 0x11a1 /* MX28_PAD_LCD_RS__LCD_DOTCLK */
- 0x11b1 /* MX28_PAD_LCD_CS__LCD_ENABLE */
+ MX28_PAD_LCD_RD_E__LCD_VSYNC
+ MX28_PAD_LCD_WR_RWN__LCD_HSYNC
+ MX28_PAD_LCD_RS__LCD_DOTCLK
+ MX28_PAD_LCD_CS__LCD_ENABLE
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
};
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts
index 15715d921d14..1f63845b8ce0 100644
--- a/arch/arm/boot/dts/imx28-evk.dts
+++ b/arch/arm/boot/dts/imx28-evk.dts
@@ -10,7 +10,7 @@
*/
/dts-v1/;
-/include/ "imx28.dtsi"
+#include "imx28.dtsi"
/ {
model = "Freescale i.MX28 Evaluation Kit";
@@ -70,52 +70,52 @@
hog_pins_a: hog@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x20d3 /* MX28_PAD_SSP1_CMD__GPIO_2_13 */
- 0x20f3 /* MX28_PAD_SSP1_DATA3__GPIO_2_15 */
- 0x40d3 /* MX28_PAD_ENET0_RX_CLK__GPIO_4_13 */
- 0x20c3 /* MX28_PAD_SSP1_SCK__GPIO_2_12 */
- 0x31c3 /* MX28_PAD_PWM3__GPIO_3_28 */
- 0x31e3 /* MX28_PAD_LCD_RESET__GPIO_3_30 */
- 0x3083 /* MX28_PAD_AUART2_RX__GPIO_3_8 */
- 0x3093 /* MX28_PAD_AUART2_TX__GPIO_3_9 */
+ MX28_PAD_SSP1_CMD__GPIO_2_13
+ MX28_PAD_SSP1_DATA3__GPIO_2_15
+ MX28_PAD_ENET0_RX_CLK__GPIO_4_13
+ MX28_PAD_SSP1_SCK__GPIO_2_12
+ MX28_PAD_PWM3__GPIO_3_28
+ MX28_PAD_LCD_RESET__GPIO_3_30
+ MX28_PAD_AUART2_RX__GPIO_3_8
+ MX28_PAD_AUART2_TX__GPIO_3_9
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
led_pin_gpio3_5: led_gpio3_5@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x3053 /* MX28_PAD_AUART1_TX__GPIO_3_5 */
+ MX28_PAD_AUART1_TX__GPIO_3_5
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
gpmi_pins_evk: gpmi-nand-evk@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x0110 /* MX28_PAD_GPMI_CE1N__GPMI_CE1N */
- 0x0150 /* MX28_PAD_GPMI_RDY1__GPMI_READY1 */
+ MX28_PAD_GPMI_CE1N__GPMI_CE1N
+ MX28_PAD_GPMI_RDY1__GPMI_READY1
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
lcdif_pins_evk: lcdif-evk@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x1181 /* MX28_PAD_LCD_RD_E__LCD_VSYNC */
- 0x1191 /* MX28_PAD_LCD_WR_RWN__LCD_HSYNC */
- 0x11a1 /* MX28_PAD_LCD_RS__LCD_DOTCLK */
- 0x11b1 /* MX28_PAD_LCD_CS__LCD_ENABLE */
+ MX28_PAD_LCD_RD_E__LCD_VSYNC
+ MX28_PAD_LCD_WR_RWN__LCD_HSYNC
+ MX28_PAD_LCD_RS__LCD_DOTCLK
+ MX28_PAD_LCD_CS__LCD_ENABLE
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
};
@@ -182,6 +182,7 @@
};
lradc@80050000 {
+ fsl,lradc-touchscreen-wires = <4>;
status = "okay";
};
@@ -242,6 +243,8 @@
ahb@80080000 {
usb0: usb@80080000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb0_id_pins_a>;
vbus-supply = <&reg_usb0_vbus>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx28-m28cu3.dts b/arch/arm/boot/dts/imx28-m28cu3.dts
new file mode 100644
index 000000000000..d3958da60bd7
--- /dev/null
+++ b/arch/arm/boot/dts/imx28-m28cu3.dts
@@ -0,0 +1,266 @@
+/*
+ * Copyright (C) 2013 Marek Vasut <marex@denx.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include "imx28.dtsi"
+
+/ {
+ model = "MSR M28CU3";
+ compatible = "msr,m28cu3", "fsl,imx28";
+
+ memory {
+ reg = <0x40000000 0x08000000>;
+ };
+
+ apb@80000000 {
+ apbh@80000000 {
+ gpmi-nand@8000c000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpmi_pins_a &gpmi_status_cfg>;
+ status = "okay";
+
+ partition@0 {
+ label = "gpmi-nfc-0-boot";
+ reg = <0x00000000 0x01400000>;
+ read-only;
+ };
+
+ partition@1 {
+ label = "gpmi-nfc-general-use";
+ reg = <0x01400000 0x0ec00000>;
+ };
+ };
+
+ ssp0: ssp@80010000 {
+ compatible = "fsl,imx28-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_4bit_pins_a
+ &mmc0_cd_cfg
+ &mmc0_sck_cfg>;
+ bus-width = <4>;
+ vmmc-supply = <&reg_vddio_sd0>;
+ status = "okay";
+ };
+
+ ssp2: ssp@80014000 {
+ compatible = "fsl,imx28-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_4bit_pins_a
+ &mmc2_cd_cfg
+ &mmc2_sck_cfg>;
+ bus-width = <4>;
+ vmmc-supply = <&reg_vddio_sd1>;
+ status = "okay";
+ };
+
+ pinctrl@80018000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hog_pins_a>;
+
+ hog_pins_a: hog@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_SSP2_SS0__GPIO_2_19
+ MX28_PAD_PWM4__GPIO_3_29
+ MX28_PAD_AUART2_RX__GPIO_3_8
+ MX28_PAD_ENET0_RX_CLK__GPIO_4_13
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ lcdif_pins_m28: lcdif-m28@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_LCD_VSYNC__LCD_VSYNC
+ MX28_PAD_LCD_HSYNC__LCD_HSYNC
+ MX28_PAD_LCD_DOTCLK__LCD_DOTCLK
+ MX28_PAD_LCD_RESET__LCD_RESET
+ MX28_PAD_LCD_CS__LCD_ENABLE
+ MX28_PAD_AUART1_TX__GPIO_3_5
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ led_pins_gpio: leds-m28@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_SSP3_MISO__GPIO_2_26
+ MX28_PAD_SSP3_SCK__GPIO_2_24
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+ };
+
+ ocotp@8002c000 {
+ status = "okay";
+ };
+
+ lcdif@80030000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcdif_24bit_pins_a
+ &lcdif_pins_m28>;
+ display = <&display>;
+ reset-active-high;
+ status = "okay";
+
+ display: display0 {
+ bits-per-pixel = <32>;
+ bus-width = <24>;
+
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: timing0 {
+ clock-frequency = <6410256>;
+ hactive = <320>;
+ vactive = <240>;
+ hback-porch = <38>;
+ hfront-porch = <20>;
+ vback-porch = <15>;
+ vfront-porch = <5>;
+ hsync-len = <30>;
+ vsync-len = <3>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ de-active = <1>;
+ pixelclk-active = <1>;
+ };
+ };
+ };
+ };
+ };
+
+ apbx@80040000 {
+ duart: serial@80074000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&duart_pins_b>;
+ status = "okay";
+ };
+
+ usbphy1: usbphy@8007e000 {
+ status = "okay";
+ };
+
+ auart0: serial@8006a000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart0_2pins_a>;
+ status = "okay";
+ };
+
+ auart3: serial@80070000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart3_2pins_b>;
+ status = "okay";
+ };
+
+ pwm: pwm@80064000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm3_pins_a>;
+ status = "okay";
+ };
+ };
+ };
+
+ ahb@80080000 {
+ usb1: usb@80090000 {
+ vbus-supply = <&reg_usb1_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usbphy1_pins_a>;
+ disable-over-current;
+ status = "okay";
+ };
+
+ mac0: ethernet@800f0000 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac0_pins_a>;
+ phy-reset-gpios = <&gpio4 13 0>;
+ phy-reset-duration = <100>;
+ status = "okay";
+ };
+
+ mac1: ethernet@800f4000 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac1_pins_a>;
+ status = "okay";
+ };
+ };
+
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm 3 5000000>;
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <6>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pins_gpio>;
+
+ user1 {
+ label = "sd0-led";
+ gpios = <&gpio2 26 0>;
+ linux,default-trigger = "mmc0";
+ };
+
+ user2 {
+ label = "sd1-led";
+ gpios = <&gpio2 24 0>;
+ linux,default-trigger = "mmc2";
+ };
+ };
+
+ regulators {
+ compatible = "simple-bus";
+
+ reg_3p3v: 3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_vddio_sd0: vddio-sd0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vddio-sd0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio3 29 0>;
+ };
+
+ reg_vddio_sd1: vddio-sd1 {
+ compatible = "regulator-fixed";
+ regulator-name = "vddio-sd1";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio2 19 0>;
+ };
+
+ reg_usb1_vbus: usb1_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 8 0>;
+ enable-active-high;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts
index 0d322a2bebaf..8e2477fbe1d7 100644
--- a/arch/arm/boot/dts/imx28-m28evk.dts
+++ b/arch/arm/boot/dts/imx28-m28evk.dts
@@ -10,7 +10,7 @@
*/
/dts-v1/;
-/include/ "imx28.dtsi"
+#include "imx28.dtsi"
/ {
model = "DENX M28EVK";
@@ -92,26 +92,26 @@
hog_pins_a: hog@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x31c3 /* MX28_PAD_PWM3__GPIO_3_28 */
- 0x30a3 /* MX28_PAD_AUART2_CTS__GPIO_3_10 */
- 0x30b3 /* MX28_PAD_AUART2_RTS__GPIO_3_11 */
- 0x30c3 /* MX28_PAD_AUART3_RX__GPIO_3_12 */
- 0x30d3 /* MX28_PAD_AUART3_TX__GPIO_3_13 */
+ MX28_PAD_PWM3__GPIO_3_28
+ MX28_PAD_AUART2_CTS__GPIO_3_10
+ MX28_PAD_AUART2_RTS__GPIO_3_11
+ MX28_PAD_AUART3_RX__GPIO_3_12
+ MX28_PAD_AUART3_TX__GPIO_3_13
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
lcdif_pins_m28: lcdif-m28@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x11e0 /* MX28_PAD_LCD_DOTCLK__LCD_DOTCLK */
- 0x11f0 /* MX28_PAD_LCD_ENABLE__LCD_ENABLE */
+ MX28_PAD_LCD_DOTCLK__LCD_DOTCLK
+ MX28_PAD_LCD_ENABLE__LCD_ENABLE
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
};
diff --git a/arch/arm/boot/dts/imx28-pinfunc.h b/arch/arm/boot/dts/imx28-pinfunc.h
new file mode 100644
index 000000000000..e11f69ba0fe4
--- /dev/null
+++ b/arch/arm/boot/dts/imx28-pinfunc.h
@@ -0,0 +1,506 @@
+/*
+ * Header providing constants for i.MX28 pinctrl bindings.
+ *
+ * Copyright (C) 2013 Lothar Waßmann <LW@KARO-electronics.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#ifndef __DT_BINDINGS_MX28_PINCTRL_H__
+#define __DT_BINDINGS_MX28_PINCTRL_H__
+
+#include "mxs-pinfunc.h"
+
+#define MX28_PAD_GPMI_D00__GPMI_D0 0x0000
+#define MX28_PAD_GPMI_D01__GPMI_D1 0x0010
+#define MX28_PAD_GPMI_D02__GPMI_D2 0x0020
+#define MX28_PAD_GPMI_D03__GPMI_D3 0x0030
+#define MX28_PAD_GPMI_D04__GPMI_D4 0x0040
+#define MX28_PAD_GPMI_D05__GPMI_D5 0x0050
+#define MX28_PAD_GPMI_D06__GPMI_D6 0x0060
+#define MX28_PAD_GPMI_D07__GPMI_D7 0x0070
+#define MX28_PAD_GPMI_CE0N__GPMI_CE0N 0x0100
+#define MX28_PAD_GPMI_CE1N__GPMI_CE1N 0x0110
+#define MX28_PAD_GPMI_CE2N__GPMI_CE2N 0x0120
+#define MX28_PAD_GPMI_CE3N__GPMI_CE3N 0x0130
+#define MX28_PAD_GPMI_RDY0__GPMI_READY0 0x0140
+#define MX28_PAD_GPMI_RDY1__GPMI_READY1 0x0150
+#define MX28_PAD_GPMI_RDY2__GPMI_READY2 0x0160
+#define MX28_PAD_GPMI_RDY3__GPMI_READY3 0x0170
+#define MX28_PAD_GPMI_RDN__GPMI_RDN 0x0180
+#define MX28_PAD_GPMI_WRN__GPMI_WRN 0x0190
+#define MX28_PAD_GPMI_ALE__GPMI_ALE 0x01a0
+#define MX28_PAD_GPMI_CLE__GPMI_CLE 0x01b0
+#define MX28_PAD_GPMI_RESETN__GPMI_RESETN 0x01c0
+#define MX28_PAD_LCD_D00__LCD_D0 0x1000
+#define MX28_PAD_LCD_D01__LCD_D1 0x1010
+#define MX28_PAD_LCD_D02__LCD_D2 0x1020
+#define MX28_PAD_LCD_D03__LCD_D3 0x1030
+#define MX28_PAD_LCD_D04__LCD_D4 0x1040
+#define MX28_PAD_LCD_D05__LCD_D5 0x1050
+#define MX28_PAD_LCD_D06__LCD_D6 0x1060
+#define MX28_PAD_LCD_D07__LCD_D7 0x1070
+#define MX28_PAD_LCD_D08__LCD_D8 0x1080
+#define MX28_PAD_LCD_D09__LCD_D9 0x1090
+#define MX28_PAD_LCD_D10__LCD_D10 0x10a0
+#define MX28_PAD_LCD_D11__LCD_D11 0x10b0
+#define MX28_PAD_LCD_D12__LCD_D12 0x10c0
+#define MX28_PAD_LCD_D13__LCD_D13 0x10d0
+#define MX28_PAD_LCD_D14__LCD_D14 0x10e0
+#define MX28_PAD_LCD_D15__LCD_D15 0x10f0
+#define MX28_PAD_LCD_D16__LCD_D16 0x1100
+#define MX28_PAD_LCD_D17__LCD_D17 0x1110
+#define MX28_PAD_LCD_D18__LCD_D18 0x1120
+#define MX28_PAD_LCD_D19__LCD_D19 0x1130
+#define MX28_PAD_LCD_D20__LCD_D20 0x1140
+#define MX28_PAD_LCD_D21__LCD_D21 0x1150
+#define MX28_PAD_LCD_D22__LCD_D22 0x1160
+#define MX28_PAD_LCD_D23__LCD_D23 0x1170
+#define MX28_PAD_LCD_RD_E__LCD_RD_E 0x1180
+#define MX28_PAD_LCD_WR_RWN__LCD_WR_RWN 0x1190
+#define MX28_PAD_LCD_RS__LCD_RS 0x11a0
+#define MX28_PAD_LCD_CS__LCD_CS 0x11b0
+#define MX28_PAD_LCD_VSYNC__LCD_VSYNC 0x11c0
+#define MX28_PAD_LCD_HSYNC__LCD_HSYNC 0x11d0
+#define MX28_PAD_LCD_DOTCLK__LCD_DOTCLK 0x11e0
+#define MX28_PAD_LCD_ENABLE__LCD_ENABLE 0x11f0
+#define MX28_PAD_SSP0_DATA0__SSP0_D0 0x2000
+#define MX28_PAD_SSP0_DATA1__SSP0_D1 0x2010
+#define MX28_PAD_SSP0_DATA2__SSP0_D2 0x2020
+#define MX28_PAD_SSP0_DATA3__SSP0_D3 0x2030
+#define MX28_PAD_SSP0_DATA4__SSP0_D4 0x2040
+#define MX28_PAD_SSP0_DATA5__SSP0_D5 0x2050
+#define MX28_PAD_SSP0_DATA6__SSP0_D6 0x2060
+#define MX28_PAD_SSP0_DATA7__SSP0_D7 0x2070
+#define MX28_PAD_SSP0_CMD__SSP0_CMD 0x2080
+#define MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT 0x2090
+#define MX28_PAD_SSP0_SCK__SSP0_SCK 0x20a0
+#define MX28_PAD_SSP1_SCK__SSP1_SCK 0x20c0
+#define MX28_PAD_SSP1_CMD__SSP1_CMD 0x20d0
+#define MX28_PAD_SSP1_DATA0__SSP1_D0 0x20e0
+#define MX28_PAD_SSP1_DATA3__SSP1_D3 0x20f0
+#define MX28_PAD_SSP2_SCK__SSP2_SCK 0x2100
+#define MX28_PAD_SSP2_MOSI__SSP2_CMD 0x2110
+#define MX28_PAD_SSP2_MISO__SSP2_D0 0x2120
+#define MX28_PAD_SSP2_SS0__SSP2_D3 0x2130
+#define MX28_PAD_SSP2_SS1__SSP2_D4 0x2140
+#define MX28_PAD_SSP2_SS2__SSP2_D5 0x2150
+#define MX28_PAD_SSP3_SCK__SSP3_SCK 0x2180
+#define MX28_PAD_SSP3_MOSI__SSP3_CMD 0x2190
+#define MX28_PAD_SSP3_MISO__SSP3_D0 0x21a0
+#define MX28_PAD_SSP3_SS0__SSP3_D3 0x21b0
+#define MX28_PAD_AUART0_RX__AUART0_RX 0x3000
+#define MX28_PAD_AUART0_TX__AUART0_TX 0x3010
+#define MX28_PAD_AUART0_CTS__AUART0_CTS 0x3020
+#define MX28_PAD_AUART0_RTS__AUART0_RTS 0x3030
+#define MX28_PAD_AUART1_RX__AUART1_RX 0x3040
+#define MX28_PAD_AUART1_TX__AUART1_TX 0x3050
+#define MX28_PAD_AUART1_CTS__AUART1_CTS 0x3060
+#define MX28_PAD_AUART1_RTS__AUART1_RTS 0x3070
+#define MX28_PAD_AUART2_RX__AUART2_RX 0x3080
+#define MX28_PAD_AUART2_TX__AUART2_TX 0x3090
+#define MX28_PAD_AUART2_CTS__AUART2_CTS 0x30a0
+#define MX28_PAD_AUART2_RTS__AUART2_RTS 0x30b0
+#define MX28_PAD_AUART3_RX__AUART3_RX 0x30c0
+#define MX28_PAD_AUART3_TX__AUART3_TX 0x30d0
+#define MX28_PAD_AUART3_CTS__AUART3_CTS 0x30e0
+#define MX28_PAD_AUART3_RTS__AUART3_RTS 0x30f0
+#define MX28_PAD_PWM0__PWM_0 0x3100
+#define MX28_PAD_PWM1__PWM_1 0x3110
+#define MX28_PAD_PWM2__PWM_2 0x3120
+#define MX28_PAD_SAIF0_MCLK__SAIF0_MCLK 0x3140
+#define MX28_PAD_SAIF0_LRCLK__SAIF0_LRCLK 0x3150
+#define MX28_PAD_SAIF0_BITCLK__SAIF0_BITCLK 0x3160
+#define MX28_PAD_SAIF0_SDATA0__SAIF0_SDATA0 0x3170
+#define MX28_PAD_I2C0_SCL__I2C0_SCL 0x3180
+#define MX28_PAD_I2C0_SDA__I2C0_SDA 0x3190
+#define MX28_PAD_SAIF1_SDATA0__SAIF1_SDATA0 0x31a0
+#define MX28_PAD_SPDIF__SPDIF_TX 0x31b0
+#define MX28_PAD_PWM3__PWM_3 0x31c0
+#define MX28_PAD_PWM4__PWM_4 0x31d0
+#define MX28_PAD_LCD_RESET__LCD_RESET 0x31e0
+#define MX28_PAD_ENET0_MDC__ENET0_MDC 0x4000
+#define MX28_PAD_ENET0_MDIO__ENET0_MDIO 0x4010
+#define MX28_PAD_ENET0_RX_EN__ENET0_RX_EN 0x4020
+#define MX28_PAD_ENET0_RXD0__ENET0_RXD0 0x4030
+#define MX28_PAD_ENET0_RXD1__ENET0_RXD1 0x4040
+#define MX28_PAD_ENET0_TX_CLK__ENET0_TX_CLK 0x4050
+#define MX28_PAD_ENET0_TX_EN__ENET0_TX_EN 0x4060
+#define MX28_PAD_ENET0_TXD0__ENET0_TXD0 0x4070
+#define MX28_PAD_ENET0_TXD1__ENET0_TXD1 0x4080
+#define MX28_PAD_ENET0_RXD2__ENET0_RXD2 0x4090
+#define MX28_PAD_ENET0_RXD3__ENET0_RXD3 0x40a0
+#define MX28_PAD_ENET0_TXD2__ENET0_TXD2 0x40b0
+#define MX28_PAD_ENET0_TXD3__ENET0_TXD3 0x40c0
+#define MX28_PAD_ENET0_RX_CLK__ENET0_RX_CLK 0x40d0
+#define MX28_PAD_ENET0_COL__ENET0_COL 0x40e0
+#define MX28_PAD_ENET0_CRS__ENET0_CRS 0x40f0
+#define MX28_PAD_ENET_CLK__CLKCTRL_ENET 0x4100
+#define MX28_PAD_JTAG_RTCK__JTAG_RTCK 0x4140
+#define MX28_PAD_EMI_D00__EMI_DATA0 0x5000
+#define MX28_PAD_EMI_D01__EMI_DATA1 0x5010
+#define MX28_PAD_EMI_D02__EMI_DATA2 0x5020
+#define MX28_PAD_EMI_D03__EMI_DATA3 0x5030
+#define MX28_PAD_EMI_D04__EMI_DATA4 0x5040
+#define MX28_PAD_EMI_D05__EMI_DATA5 0x5050
+#define MX28_PAD_EMI_D06__EMI_DATA6 0x5060
+#define MX28_PAD_EMI_D07__EMI_DATA7 0x5070
+#define MX28_PAD_EMI_D08__EMI_DATA8 0x5080
+#define MX28_PAD_EMI_D09__EMI_DATA9 0x5090
+#define MX28_PAD_EMI_D10__EMI_DATA10 0x50a0
+#define MX28_PAD_EMI_D11__EMI_DATA11 0x50b0
+#define MX28_PAD_EMI_D12__EMI_DATA12 0x50c0
+#define MX28_PAD_EMI_D13__EMI_DATA13 0x50d0
+#define MX28_PAD_EMI_D14__EMI_DATA14 0x50e0
+#define MX28_PAD_EMI_D15__EMI_DATA15 0x50f0
+#define MX28_PAD_EMI_ODT0__EMI_ODT0 0x5100
+#define MX28_PAD_EMI_DQM0__EMI_DQM0 0x5110
+#define MX28_PAD_EMI_ODT1__EMI_ODT1 0x5120
+#define MX28_PAD_EMI_DQM1__EMI_DQM1 0x5130
+#define MX28_PAD_EMI_DDR_OPEN_FB__EMI_DDR_OPEN_FEEDBACK 0x5140
+#define MX28_PAD_EMI_CLK__EMI_CLK 0x5150
+#define MX28_PAD_EMI_DQS0__EMI_DQS0 0x5160
+#define MX28_PAD_EMI_DQS1__EMI_DQS1 0x5170
+#define MX28_PAD_EMI_DDR_OPEN__EMI_DDR_OPEN 0x51a0
+#define MX28_PAD_EMI_A00__EMI_ADDR0 0x6000
+#define MX28_PAD_EMI_A01__EMI_ADDR1 0x6010
+#define MX28_PAD_EMI_A02__EMI_ADDR2 0x6020
+#define MX28_PAD_EMI_A03__EMI_ADDR3 0x6030
+#define MX28_PAD_EMI_A04__EMI_ADDR4 0x6040
+#define MX28_PAD_EMI_A05__EMI_ADDR5 0x6050
+#define MX28_PAD_EMI_A06__EMI_ADDR6 0x6060
+#define MX28_PAD_EMI_A07__EMI_ADDR7 0x6070
+#define MX28_PAD_EMI_A08__EMI_ADDR8 0x6080
+#define MX28_PAD_EMI_A09__EMI_ADDR9 0x6090
+#define MX28_PAD_EMI_A10__EMI_ADDR10 0x60a0
+#define MX28_PAD_EMI_A11__EMI_ADDR11 0x60b0
+#define MX28_PAD_EMI_A12__EMI_ADDR12 0x60c0
+#define MX28_PAD_EMI_A13__EMI_ADDR13 0x60d0
+#define MX28_PAD_EMI_A14__EMI_ADDR14 0x60e0
+#define MX28_PAD_EMI_BA0__EMI_BA0 0x6100
+#define MX28_PAD_EMI_BA1__EMI_BA1 0x6110
+#define MX28_PAD_EMI_BA2__EMI_BA2 0x6120
+#define MX28_PAD_EMI_CASN__EMI_CASN 0x6130
+#define MX28_PAD_EMI_RASN__EMI_RASN 0x6140
+#define MX28_PAD_EMI_WEN__EMI_WEN 0x6150
+#define MX28_PAD_EMI_CE0N__EMI_CE0N 0x6160
+#define MX28_PAD_EMI_CE1N__EMI_CE1N 0x6170
+#define MX28_PAD_EMI_CKE__EMI_CKE 0x6180
+#define MX28_PAD_GPMI_D00__SSP1_D0 0x0001
+#define MX28_PAD_GPMI_D01__SSP1_D1 0x0011
+#define MX28_PAD_GPMI_D02__SSP1_D2 0x0021
+#define MX28_PAD_GPMI_D03__SSP1_D3 0x0031
+#define MX28_PAD_GPMI_D04__SSP1_D4 0x0041
+#define MX28_PAD_GPMI_D05__SSP1_D5 0x0051
+#define MX28_PAD_GPMI_D06__SSP1_D6 0x0061
+#define MX28_PAD_GPMI_D07__SSP1_D7 0x0071
+#define MX28_PAD_GPMI_CE0N__SSP3_D0 0x0101
+#define MX28_PAD_GPMI_CE1N__SSP3_D3 0x0111
+#define MX28_PAD_GPMI_CE2N__CAN1_TX 0x0121
+#define MX28_PAD_GPMI_CE3N__CAN1_RX 0x0131
+#define MX28_PAD_GPMI_RDY0__SSP1_CARD_DETECT 0x0141
+#define MX28_PAD_GPMI_RDY1__SSP1_CMD 0x0151
+#define MX28_PAD_GPMI_RDY2__CAN0_TX 0x0161
+#define MX28_PAD_GPMI_RDY3__CAN0_RX 0x0171
+#define MX28_PAD_GPMI_RDN__SSP3_SCK 0x0181
+#define MX28_PAD_GPMI_WRN__SSP1_SCK 0x0191
+#define MX28_PAD_GPMI_ALE__SSP3_D1 0x01a1
+#define MX28_PAD_GPMI_CLE__SSP3_D2 0x01b1
+#define MX28_PAD_GPMI_RESETN__SSP3_CMD 0x01c1
+#define MX28_PAD_LCD_D03__ETM_DA8 0x1031
+#define MX28_PAD_LCD_D04__ETM_DA9 0x1041
+#define MX28_PAD_LCD_D08__ETM_DA3 0x1081
+#define MX28_PAD_LCD_D09__ETM_DA4 0x1091
+#define MX28_PAD_LCD_D20__ENET1_1588_EVENT2_OUT 0x1141
+#define MX28_PAD_LCD_D21__ENET1_1588_EVENT2_IN 0x1151
+#define MX28_PAD_LCD_D22__ENET1_1588_EVENT3_OUT 0x1161
+#define MX28_PAD_LCD_D23__ENET1_1588_EVENT3_IN 0x1171
+#define MX28_PAD_LCD_RD_E__LCD_VSYNC 0x1181
+#define MX28_PAD_LCD_WR_RWN__LCD_HSYNC 0x1191
+#define MX28_PAD_LCD_RS__LCD_DOTCLK 0x11a1
+#define MX28_PAD_LCD_CS__LCD_ENABLE 0x11b1
+#define MX28_PAD_LCD_VSYNC__SAIF1_SDATA0 0x11c1
+#define MX28_PAD_LCD_HSYNC__SAIF1_SDATA1 0x11d1
+#define MX28_PAD_LCD_DOTCLK__SAIF1_MCLK 0x11e1
+#define MX28_PAD_SSP0_DATA4__SSP2_D0 0x2041
+#define MX28_PAD_SSP0_DATA5__SSP2_D3 0x2051
+#define MX28_PAD_SSP0_DATA6__SSP2_CMD 0x2061
+#define MX28_PAD_SSP0_DATA7__SSP2_SCK 0x2071
+#define MX28_PAD_SSP1_SCK__SSP2_D1 0x20c1
+#define MX28_PAD_SSP1_CMD__SSP2_D2 0x20d1
+#define MX28_PAD_SSP1_DATA0__SSP2_D6 0x20e1
+#define MX28_PAD_SSP1_DATA3__SSP2_D7 0x20f1
+#define MX28_PAD_SSP2_SCK__AUART2_RX 0x2101
+#define MX28_PAD_SSP2_MOSI__AUART2_TX 0x2111
+#define MX28_PAD_SSP2_MISO__AUART3_RX 0x2121
+#define MX28_PAD_SSP2_SS0__AUART3_TX 0x2131
+#define MX28_PAD_SSP2_SS1__SSP2_D1 0x2141
+#define MX28_PAD_SSP2_SS2__SSP2_D2 0x2151
+#define MX28_PAD_SSP3_SCK__AUART4_TX 0x2181
+#define MX28_PAD_SSP3_MOSI__AUART4_RX 0x2191
+#define MX28_PAD_SSP3_MISO__AUART4_RTS 0x21a1
+#define MX28_PAD_SSP3_SS0__AUART4_CTS 0x21b1
+#define MX28_PAD_AUART0_RX__I2C0_SCL 0x3001
+#define MX28_PAD_AUART0_TX__I2C0_SDA 0x3011
+#define MX28_PAD_AUART0_CTS__AUART4_RX 0x3021
+#define MX28_PAD_AUART0_RTS__AUART4_TX 0x3031
+#define MX28_PAD_AUART1_RX__SSP2_CARD_DETECT 0x3041
+#define MX28_PAD_AUART1_TX__SSP3_CARD_DETECT 0x3051
+#define MX28_PAD_AUART1_CTS__USB0_OVERCURRENT 0x3061
+#define MX28_PAD_AUART1_RTS__USB0_ID 0x3071
+#define MX28_PAD_AUART2_RX__SSP3_D1 0x3081
+#define MX28_PAD_AUART2_TX__SSP3_D2 0x3091
+#define MX28_PAD_AUART2_CTS__I2C1_SCL 0x30a1
+#define MX28_PAD_AUART2_RTS__I2C1_SDA 0x30b1
+#define MX28_PAD_AUART3_RX__CAN0_TX 0x30c1
+#define MX28_PAD_AUART3_TX__CAN0_RX 0x30d1
+#define MX28_PAD_AUART3_CTS__CAN1_TX 0x30e1
+#define MX28_PAD_AUART3_RTS__CAN1_RX 0x30f1
+#define MX28_PAD_PWM0__I2C1_SCL 0x3101
+#define MX28_PAD_PWM1__I2C1_SDA 0x3111
+#define MX28_PAD_PWM2__USB0_ID 0x3121
+#define MX28_PAD_SAIF0_MCLK__PWM_3 0x3141
+#define MX28_PAD_SAIF0_LRCLK__PWM_4 0x3151
+#define MX28_PAD_SAIF0_BITCLK__PWM_5 0x3161
+#define MX28_PAD_SAIF0_SDATA0__PWM_6 0x3171
+#define MX28_PAD_I2C0_SCL__TIMROT_ROTARYA 0x3181
+#define MX28_PAD_I2C0_SDA__TIMROT_ROTARYB 0x3191
+#define MX28_PAD_SAIF1_SDATA0__PWM_7 0x31a1
+#define MX28_PAD_LCD_RESET__LCD_VSYNC 0x31e1
+#define MX28_PAD_ENET0_MDC__GPMI_CE4N 0x4001
+#define MX28_PAD_ENET0_MDIO__GPMI_CE5N 0x4011
+#define MX28_PAD_ENET0_RX_EN__GPMI_CE6N 0x4021
+#define MX28_PAD_ENET0_RXD0__GPMI_CE7N 0x4031
+#define MX28_PAD_ENET0_RXD1__GPMI_READY4 0x4041
+#define MX28_PAD_ENET0_TX_CLK__HSADC_TRIGGER 0x4051
+#define MX28_PAD_ENET0_TX_EN__GPMI_READY5 0x4061
+#define MX28_PAD_ENET0_TXD0__GPMI_READY6 0x4071
+#define MX28_PAD_ENET0_TXD1__GPMI_READY7 0x4081
+#define MX28_PAD_ENET0_RXD2__ENET1_RXD0 0x4091
+#define MX28_PAD_ENET0_RXD3__ENET1_RXD1 0x40a1
+#define MX28_PAD_ENET0_TXD2__ENET1_TXD0 0x40b1
+#define MX28_PAD_ENET0_TXD3__ENET1_TXD1 0x40c1
+#define MX28_PAD_ENET0_RX_CLK__ENET0_RX_ER 0x40d1
+#define MX28_PAD_ENET0_COL__ENET1_TX_EN 0x40e1
+#define MX28_PAD_ENET0_CRS__ENET1_RX_EN 0x40f1
+#define MX28_PAD_GPMI_CE2N__ENET0_RX_ER 0x0122
+#define MX28_PAD_GPMI_CE3N__SAIF1_MCLK 0x0132
+#define MX28_PAD_GPMI_RDY0__USB0_ID 0x0142
+#define MX28_PAD_GPMI_RDY2__ENET0_TX_ER 0x0162
+#define MX28_PAD_GPMI_RDY3__HSADC_TRIGGER 0x0172
+#define MX28_PAD_GPMI_ALE__SSP3_D4 0x01a2
+#define MX28_PAD_GPMI_CLE__SSP3_D5 0x01b2
+#define MX28_PAD_LCD_D00__ETM_DA0 0x1002
+#define MX28_PAD_LCD_D01__ETM_DA1 0x1012
+#define MX28_PAD_LCD_D02__ETM_DA2 0x1022
+#define MX28_PAD_LCD_D03__ETM_DA3 0x1032
+#define MX28_PAD_LCD_D04__ETM_DA4 0x1042
+#define MX28_PAD_LCD_D05__ETM_DA5 0x1052
+#define MX28_PAD_LCD_D06__ETM_DA6 0x1062
+#define MX28_PAD_LCD_D07__ETM_DA7 0x1072
+#define MX28_PAD_LCD_D08__ETM_DA8 0x1082
+#define MX28_PAD_LCD_D09__ETM_DA9 0x1092
+#define MX28_PAD_LCD_D10__ETM_DA10 0x10a2
+#define MX28_PAD_LCD_D11__ETM_DA11 0x10b2
+#define MX28_PAD_LCD_D12__ETM_DA12 0x10c2
+#define MX28_PAD_LCD_D13__ETM_DA13 0x10d2
+#define MX28_PAD_LCD_D14__ETM_DA14 0x10e2
+#define MX28_PAD_LCD_D15__ETM_DA15 0x10f2
+#define MX28_PAD_LCD_D16__ETM_DA7 0x1102
+#define MX28_PAD_LCD_D17__ETM_DA6 0x1112
+#define MX28_PAD_LCD_D18__ETM_DA5 0x1122
+#define MX28_PAD_LCD_D19__ETM_DA4 0x1132
+#define MX28_PAD_LCD_D20__ETM_DA3 0x1142
+#define MX28_PAD_LCD_D21__ETM_DA2 0x1152
+#define MX28_PAD_LCD_D22__ETM_DA1 0x1162
+#define MX28_PAD_LCD_D23__ETM_DA0 0x1172
+#define MX28_PAD_LCD_RD_E__ETM_TCTL 0x1182
+#define MX28_PAD_LCD_WR_RWN__ETM_TCLK 0x1192
+#define MX28_PAD_LCD_HSYNC__ETM_TCTL 0x11d2
+#define MX28_PAD_LCD_DOTCLK__ETM_TCLK 0x11e2
+#define MX28_PAD_SSP1_SCK__ENET0_1588_EVENT2_OUT 0x20c2
+#define MX28_PAD_SSP1_CMD__ENET0_1588_EVENT2_IN 0x20d2
+#define MX28_PAD_SSP1_DATA0__ENET0_1588_EVENT3_OUT 0x20e2
+#define MX28_PAD_SSP1_DATA3__ENET0_1588_EVENT3_IN 0x20f2
+#define MX28_PAD_SSP2_SCK__SAIF0_SDATA1 0x2102
+#define MX28_PAD_SSP2_MOSI__SAIF0_SDATA2 0x2112
+#define MX28_PAD_SSP2_MISO__SAIF1_SDATA1 0x2122
+#define MX28_PAD_SSP2_SS0__SAIF1_SDATA2 0x2132
+#define MX28_PAD_SSP2_SS1__USB1_OVERCURRENT 0x2142
+#define MX28_PAD_SSP2_SS2__USB0_OVERCURRENT 0x2152
+#define MX28_PAD_SSP3_SCK__ENET1_1588_EVENT0_OUT 0x2182
+#define MX28_PAD_SSP3_MOSI__ENET1_1588_EVENT0_IN 0x2192
+#define MX28_PAD_SSP3_MISO__ENET1_1588_EVENT1_OUT 0x21a2
+#define MX28_PAD_SSP3_SS0__ENET1_1588_EVENT1_IN 0x21b2
+#define MX28_PAD_AUART0_RX__DUART_CTS 0x3002
+#define MX28_PAD_AUART0_TX__DUART_RTS 0x3012
+#define MX28_PAD_AUART0_CTS__DUART_RX 0x3022
+#define MX28_PAD_AUART0_RTS__DUART_TX 0x3032
+#define MX28_PAD_AUART1_RX__PWM_0 0x3042
+#define MX28_PAD_AUART1_TX__PWM_1 0x3052
+#define MX28_PAD_AUART1_CTS__TIMROT_ROTARYA 0x3062
+#define MX28_PAD_AUART1_RTS__TIMROT_ROTARYB 0x3072
+#define MX28_PAD_AUART2_RX__SSP3_D4 0x3082
+#define MX28_PAD_AUART2_TX__SSP3_D5 0x3092
+#define MX28_PAD_AUART2_CTS__SAIF1_BITCLK 0x30a2
+#define MX28_PAD_AUART2_RTS__SAIF1_LRCLK 0x30b2
+#define MX28_PAD_AUART3_RX__ENET0_1588_EVENT0_OUT 0x30c2
+#define MX28_PAD_AUART3_TX__ENET0_1588_EVENT0_IN 0x30d2
+#define MX28_PAD_AUART3_CTS__ENET0_1588_EVENT1_OUT 0x30e2
+#define MX28_PAD_AUART3_RTS__ENET0_1588_EVENT1_IN 0x30f2
+#define MX28_PAD_PWM0__DUART_RX 0x3102
+#define MX28_PAD_PWM1__DUART_TX 0x3112
+#define MX28_PAD_PWM2__USB1_OVERCURRENT 0x3122
+#define MX28_PAD_SAIF0_MCLK__AUART4_CTS 0x3142
+#define MX28_PAD_SAIF0_LRCLK__AUART4_RTS 0x3152
+#define MX28_PAD_SAIF0_BITCLK__AUART4_RX 0x3162
+#define MX28_PAD_SAIF0_SDATA0__AUART4_TX 0x3172
+#define MX28_PAD_I2C0_SCL__DUART_RX 0x3182
+#define MX28_PAD_I2C0_SDA__DUART_TX 0x3192
+#define MX28_PAD_SAIF1_SDATA0__SAIF0_SDATA1 0x31a2
+#define MX28_PAD_SPDIF__ENET1_RX_ER 0x31b2
+#define MX28_PAD_ENET0_MDC__SAIF0_SDATA1 0x4002
+#define MX28_PAD_ENET0_MDIO__SAIF0_SDATA2 0x4012
+#define MX28_PAD_ENET0_RX_EN__SAIF1_SDATA1 0x4022
+#define MX28_PAD_ENET0_RXD0__SAIF1_SDATA2 0x4032
+#define MX28_PAD_ENET0_TX_CLK__ENET0_1588_EVENT2_OUT 0x4052
+#define MX28_PAD_ENET0_RXD2__ENET0_1588_EVENT0_OUT 0x4092
+#define MX28_PAD_ENET0_RXD3__ENET0_1588_EVENT0_IN 0x40a2
+#define MX28_PAD_ENET0_TXD2__ENET0_1588_EVENT1_OUT 0x40b2
+#define MX28_PAD_ENET0_TXD3__ENET0_1588_EVENT1_IN 0x40c2
+#define MX28_PAD_ENET0_RX_CLK__ENET0_1588_EVENT2_IN 0x40d2
+#define MX28_PAD_ENET0_COL__ENET0_1588_EVENT3_OUT 0x40e2
+#define MX28_PAD_ENET0_CRS__ENET0_1588_EVENT3_IN 0x40f2
+#define MX28_PAD_GPMI_D00__GPIO_0_0 0x0003
+#define MX28_PAD_GPMI_D01__GPIO_0_1 0x0013
+#define MX28_PAD_GPMI_D02__GPIO_0_2 0x0023
+#define MX28_PAD_GPMI_D03__GPIO_0_3 0x0033
+#define MX28_PAD_GPMI_D04__GPIO_0_4 0x0043
+#define MX28_PAD_GPMI_D05__GPIO_0_5 0x0053
+#define MX28_PAD_GPMI_D06__GPIO_0_6 0x0063
+#define MX28_PAD_GPMI_D07__GPIO_0_7 0x0073
+#define MX28_PAD_GPMI_CE0N__GPIO_0_16 0x0103
+#define MX28_PAD_GPMI_CE1N__GPIO_0_17 0x0113
+#define MX28_PAD_GPMI_CE2N__GPIO_0_18 0x0123
+#define MX28_PAD_GPMI_CE3N__GPIO_0_19 0x0133
+#define MX28_PAD_GPMI_RDY0__GPIO_0_20 0x0143
+#define MX28_PAD_GPMI_RDY1__GPIO_0_21 0x0153
+#define MX28_PAD_GPMI_RDY2__GPIO_0_22 0x0163
+#define MX28_PAD_GPMI_RDY3__GPIO_0_23 0x0173
+#define MX28_PAD_GPMI_RDN__GPIO_0_24 0x0183
+#define MX28_PAD_GPMI_WRN__GPIO_0_25 0x0193
+#define MX28_PAD_GPMI_ALE__GPIO_0_26 0x01a3
+#define MX28_PAD_GPMI_CLE__GPIO_0_27 0x01b3
+#define MX28_PAD_GPMI_RESETN__GPIO_0_28 0x01c3
+#define MX28_PAD_LCD_D00__GPIO_1_0 0x1003
+#define MX28_PAD_LCD_D01__GPIO_1_1 0x1013
+#define MX28_PAD_LCD_D02__GPIO_1_2 0x1023
+#define MX28_PAD_LCD_D03__GPIO_1_3 0x1033
+#define MX28_PAD_LCD_D04__GPIO_1_4 0x1043
+#define MX28_PAD_LCD_D05__GPIO_1_5 0x1053
+#define MX28_PAD_LCD_D06__GPIO_1_6 0x1063
+#define MX28_PAD_LCD_D07__GPIO_1_7 0x1073
+#define MX28_PAD_LCD_D08__GPIO_1_8 0x1083
+#define MX28_PAD_LCD_D09__GPIO_1_9 0x1093
+#define MX28_PAD_LCD_D10__GPIO_1_10 0x10a3
+#define MX28_PAD_LCD_D11__GPIO_1_11 0x10b3
+#define MX28_PAD_LCD_D12__GPIO_1_12 0x10c3
+#define MX28_PAD_LCD_D13__GPIO_1_13 0x10d3
+#define MX28_PAD_LCD_D14__GPIO_1_14 0x10e3
+#define MX28_PAD_LCD_D15__GPIO_1_15 0x10f3
+#define MX28_PAD_LCD_D16__GPIO_1_16 0x1103
+#define MX28_PAD_LCD_D17__GPIO_1_17 0x1113
+#define MX28_PAD_LCD_D18__GPIO_1_18 0x1123
+#define MX28_PAD_LCD_D19__GPIO_1_19 0x1133
+#define MX28_PAD_LCD_D20__GPIO_1_20 0x1143
+#define MX28_PAD_LCD_D21__GPIO_1_21 0x1153
+#define MX28_PAD_LCD_D22__GPIO_1_22 0x1163
+#define MX28_PAD_LCD_D23__GPIO_1_23 0x1173
+#define MX28_PAD_LCD_RD_E__GPIO_1_24 0x1183
+#define MX28_PAD_LCD_WR_RWN__GPIO_1_25 0x1193
+#define MX28_PAD_LCD_RS__GPIO_1_26 0x11a3
+#define MX28_PAD_LCD_CS__GPIO_1_27 0x11b3
+#define MX28_PAD_LCD_VSYNC__GPIO_1_28 0x11c3
+#define MX28_PAD_LCD_HSYNC__GPIO_1_29 0x11d3
+#define MX28_PAD_LCD_DOTCLK__GPIO_1_30 0x11e3
+#define MX28_PAD_LCD_ENABLE__GPIO_1_31 0x11f3
+#define MX28_PAD_SSP0_DATA0__GPIO_2_0 0x2003
+#define MX28_PAD_SSP0_DATA1__GPIO_2_1 0x2013
+#define MX28_PAD_SSP0_DATA2__GPIO_2_2 0x2023
+#define MX28_PAD_SSP0_DATA3__GPIO_2_3 0x2033
+#define MX28_PAD_SSP0_DATA4__GPIO_2_4 0x2043
+#define MX28_PAD_SSP0_DATA5__GPIO_2_5 0x2053
+#define MX28_PAD_SSP0_DATA6__GPIO_2_6 0x2063
+#define MX28_PAD_SSP0_DATA7__GPIO_2_7 0x2073
+#define MX28_PAD_SSP0_CMD__GPIO_2_8 0x2083
+#define MX28_PAD_SSP0_DETECT__GPIO_2_9 0x2093
+#define MX28_PAD_SSP0_SCK__GPIO_2_10 0x20a3
+#define MX28_PAD_SSP1_SCK__GPIO_2_12 0x20c3
+#define MX28_PAD_SSP1_CMD__GPIO_2_13 0x20d3
+#define MX28_PAD_SSP1_DATA0__GPIO_2_14 0x20e3
+#define MX28_PAD_SSP1_DATA3__GPIO_2_15 0x20f3
+#define MX28_PAD_SSP2_SCK__GPIO_2_16 0x2103
+#define MX28_PAD_SSP2_MOSI__GPIO_2_17 0x2113
+#define MX28_PAD_SSP2_MISO__GPIO_2_18 0x2123
+#define MX28_PAD_SSP2_SS0__GPIO_2_19 0x2133
+#define MX28_PAD_SSP2_SS1__GPIO_2_20 0x2143
+#define MX28_PAD_SSP2_SS2__GPIO_2_21 0x2153
+#define MX28_PAD_SSP3_SCK__GPIO_2_24 0x2183
+#define MX28_PAD_SSP3_MOSI__GPIO_2_25 0x2193
+#define MX28_PAD_SSP3_MISO__GPIO_2_26 0x21a3
+#define MX28_PAD_SSP3_SS0__GPIO_2_27 0x21b3
+#define MX28_PAD_AUART0_RX__GPIO_3_0 0x3003
+#define MX28_PAD_AUART0_TX__GPIO_3_1 0x3013
+#define MX28_PAD_AUART0_CTS__GPIO_3_2 0x3023
+#define MX28_PAD_AUART0_RTS__GPIO_3_3 0x3033
+#define MX28_PAD_AUART1_RX__GPIO_3_4 0x3043
+#define MX28_PAD_AUART1_TX__GPIO_3_5 0x3053
+#define MX28_PAD_AUART1_CTS__GPIO_3_6 0x3063
+#define MX28_PAD_AUART1_RTS__GPIO_3_7 0x3073
+#define MX28_PAD_AUART2_RX__GPIO_3_8 0x3083
+#define MX28_PAD_AUART2_TX__GPIO_3_9 0x3093
+#define MX28_PAD_AUART2_CTS__GPIO_3_10 0x30a3
+#define MX28_PAD_AUART2_RTS__GPIO_3_11 0x30b3
+#define MX28_PAD_AUART3_RX__GPIO_3_12 0x30c3
+#define MX28_PAD_AUART3_TX__GPIO_3_13 0x30d3
+#define MX28_PAD_AUART3_CTS__GPIO_3_14 0x30e3
+#define MX28_PAD_AUART3_RTS__GPIO_3_15 0x30f3
+#define MX28_PAD_PWM0__GPIO_3_16 0x3103
+#define MX28_PAD_PWM1__GPIO_3_17 0x3113
+#define MX28_PAD_PWM2__GPIO_3_18 0x3123
+#define MX28_PAD_SAIF0_MCLK__GPIO_3_20 0x3143
+#define MX28_PAD_SAIF0_LRCLK__GPIO_3_21 0x3153
+#define MX28_PAD_SAIF0_BITCLK__GPIO_3_22 0x3163
+#define MX28_PAD_SAIF0_SDATA0__GPIO_3_23 0x3173
+#define MX28_PAD_I2C0_SCL__GPIO_3_24 0x3183
+#define MX28_PAD_I2C0_SDA__GPIO_3_25 0x3193
+#define MX28_PAD_SAIF1_SDATA0__GPIO_3_26 0x31a3
+#define MX28_PAD_SPDIF__GPIO_3_27 0x31b3
+#define MX28_PAD_PWM3__GPIO_3_28 0x31c3
+#define MX28_PAD_PWM4__GPIO_3_29 0x31d3
+#define MX28_PAD_LCD_RESET__GPIO_3_30 0x31e3
+#define MX28_PAD_ENET0_MDC__GPIO_4_0 0x4003
+#define MX28_PAD_ENET0_MDIO__GPIO_4_1 0x4013
+#define MX28_PAD_ENET0_RX_EN__GPIO_4_2 0x4023
+#define MX28_PAD_ENET0_RXD0__GPIO_4_3 0x4033
+#define MX28_PAD_ENET0_RXD1__GPIO_4_4 0x4043
+#define MX28_PAD_ENET0_TX_CLK__GPIO_4_5 0x4053
+#define MX28_PAD_ENET0_TX_EN__GPIO_4_6 0x4063
+#define MX28_PAD_ENET0_TXD0__GPIO_4_7 0x4073
+#define MX28_PAD_ENET0_TXD1__GPIO_4_8 0x4083
+#define MX28_PAD_ENET0_RXD2__GPIO_4_9 0x4093
+#define MX28_PAD_ENET0_RXD3__GPIO_4_10 0x40a3
+#define MX28_PAD_ENET0_TXD2__GPIO_4_11 0x40b3
+#define MX28_PAD_ENET0_TXD3__GPIO_4_12 0x40c3
+#define MX28_PAD_ENET0_RX_CLK__GPIO_4_13 0x40d3
+#define MX28_PAD_ENET0_COL__GPIO_4_14 0x40e3
+#define MX28_PAD_ENET0_CRS__GPIO_4_15 0x40f3
+#define MX28_PAD_ENET_CLK__GPIO_4_16 0x4103
+#define MX28_PAD_JTAG_RTCK__GPIO_4_20 0x4143
+
+#endif /* __DT_BINDINGS_MX28_PINCTRL_H__ */
diff --git a/arch/arm/boot/dts/imx28-sps1.dts b/arch/arm/boot/dts/imx28-sps1.dts
index 6c6a5442800a..4870f07bf56a 100644
--- a/arch/arm/boot/dts/imx28-sps1.dts
+++ b/arch/arm/boot/dts/imx28-sps1.dts
@@ -10,7 +10,7 @@
*/
/dts-v1/;
-/include/ "imx28.dtsi"
+#include "imx28.dtsi"
/ {
model = "SchulerControl GmbH, SC SPS 1";
@@ -29,13 +29,13 @@
hog_pins_a: hog-gpios@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x0003 /* MX28_PAD_GPMI_D00__GPIO_0_0 */
- 0x0033 /* MX28_PAD_GPMI_D03__GPIO_0_3 */
- 0x0063 /* MX28_PAD_GPMI_D06__GPIO_0_6 */
+ MX28_PAD_GPMI_D00__GPIO_0_0
+ MX28_PAD_GPMI_D03__GPIO_0_3
+ MX28_PAD_GPMI_D06__GPIO_0_6
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
};
diff --git a/arch/arm/boot/dts/imx28-tx28.dts b/arch/arm/boot/dts/imx28-tx28.dts
index 37be532f0055..be5a0550d58c 100644
--- a/arch/arm/boot/dts/imx28-tx28.dts
+++ b/arch/arm/boot/dts/imx28-tx28.dts
@@ -1,106 +1,139 @@
+/*
+ * Copyright 2012 Shawn Guo <shawn.guo@linaro.org>
+ * Copyright 2013 Lothar Waßmann <LW@KARO-electronics.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
/dts-v1/;
-/include/ "imx28.dtsi"
+#include "imx28.dtsi"
+#include <dt-bindings/gpio/gpio.h>
/ {
model = "Ka-Ro electronics TX28 module";
compatible = "karo,tx28", "fsl,imx28";
+ aliases {
+ can0 = &can0;
+ can1 = &can1;
+ display = &display;
+ ds1339 = &ds1339;
+ gpio5 = &gpio5;
+ lcdif = &lcdif;
+ lcdif_23bit_pins = &tx28_lcdif_23bit_pins;
+ lcdif_24bit_pins = &lcdif_24bit_pins_a;
+ stk5led = &user_led;
+ usbotg = &usb0;
+ };
+
memory {
- reg = <0x40000000 0x08000000>;
- };
-
- apb@80000000 {
- apbh@80000000 {
- ssp0: ssp@80010000 {
- compatible = "fsl,imx28-mmc";
- pinctrl-names = "default";
- pinctrl-0 = <&mmc0_4bit_pins_a
- &mmc0_cd_cfg
- &mmc0_sck_cfg>;
- bus-width = <4>;
- status = "okay";
- };
+ reg = <0 0>; /* will be filled in by U-Boot */
+ };
- pinctrl@80018000 {
- pinctrl-names = "default";
- pinctrl-0 = <&hog_pins_a>;
-
- hog_pins_a: hog@0 {
- reg = <0>;
- fsl,pinmux-ids = <
- 0x40a3 /* MX28_PAD_ENET0_RXD3__GPIO_4_10 */
- >;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
- };
-
- mac0_pins_gpio: mac0-gpio-mode@0 {
- reg = <0>;
- fsl,pinmux-ids = <
- 0x4003 /* MX28_PAD_ENET0_MDC__GPIO_4_0 */
- 0x4013 /* MX28_PAD_ENET0_MDIO__GPIO_4_1 */
- 0x4023 /* MX28_PAD_ENET0_RX_EN__GPIO_4_2 */
- 0x4033 /* MX28_PAD_ENET0_RXD0__GPIO_4_3 */
- 0x4043 /* MX28_PAD_ENET0_RXD1__GPIO_4_4 */
- 0x4063 /* MX28_PAD_ENET0_TX_EN__GPIO_4_6 */
- 0x4073 /* MX28_PAD_ENET0_TXD0__GPIO_4_7 */
- 0x4083 /* MX28_PAD_ENET0_TXD1__GPIO_4_8 */
- 0x4103 /* MX28_PAD_ENET_CLK__GPIO_4_16 */
- >;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
- };
- };
+ onewire {
+ compatible = "w1-gpio";
+ gpios = <&gpio2 7 0>;
+ status = "disabled";
+ };
+
+ regulators {
+ compatible = "simple-bus";
+
+ reg_usb0_vbus: usb0_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb0_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio0 18 0>;
+ enable-active-high;
};
- apbx@80040000 {
- i2c0: i2c@80058000 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c0_pins_a>;
- status = "okay";
+ reg_usb1_vbus: usb1_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 27 0>;
+ enable-active-high;
+ };
- ds1339: rtc@68 {
- compatible = "mxim,ds1339";
- reg = <0x68>;
- };
- };
+ reg_2p5v: 2p5v {
+ compatible = "regulator-fixed";
+ regulator-name = "2P5V";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ };
- pwm: pwm@80064000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pwm0_pins_a>;
- status = "okay";
- };
+ reg_3p3v: 3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
- duart: serial@80074000 {
- pinctrl-names = "default";
- pinctrl-0 = <&duart_4pins_a>;
- status = "okay";
- };
+ reg_can_xcvr: can-xcvr {
+ compatible = "regulator-fixed";
+ regulator-name = "CAN XCVR";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio1 0 0>;
+ enable-active-low;
+ pinctrl-names = "default";
+ pinctrl-0 = <&tx28_flexcan_xcvr_pins>;
+ };
- auart1: serial@8006c000 {
- pinctrl-names = "default";
- pinctrl-0 = <&auart1_pins_a>;
- status = "okay";
- };
+ reg_lcd: lcd-power {
+ compatible = "regulator-fixed";
+ regulator-name = "LCD POWER";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio1 31 0>;
+ enable-active-high;
+ };
+
+ reg_lcd_reset: lcd-reset {
+ compatible = "regulator-fixed";
+ regulator-name = "LCD RESET";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio3 30 0>;
+ startup-delay-us = <300000>;
+ enable-active-high;
+ regulator-always-on;
+ regulator-boot-on;
};
};
- ahb@80080000 {
- mac0: ethernet@800f0000 {
- phy-mode = "rmii";
- pinctrl-names = "default", "gpio_mode";
- pinctrl-0 = <&mac0_pins_a>;
- pinctrl-1 = <&mac0_pins_gpio>;
- status = "okay";
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ mclk: clock@0 {
+ compatible = "fixed-clock";
+ reg = <0>;
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
};
};
+ sound {
+ compatible = "fsl,imx28-tx28-sgtl5000",
+ "fsl,mxs-audio-sgtl5000";
+ model = "imx28-tx28-sgtl5000";
+ saif-controllers = <&saif0 &saif1>;
+ audio-codec = <&sgtl5000>;
+ };
+
leds {
compatible = "gpio-leds";
- user {
+ user_led: user {
label = "Heartbeat";
gpios = <&gpio4 10 0>;
linux,default-trigger = "heartbeat";
@@ -109,8 +142,512 @@
backlight {
compatible = "pwm-backlight";
- pwms = <&pwm 0 5000000>;
- brightness-levels = <0 4 8 16 32 64 128 255>;
- default-brightness-level = <6>;
+ pwms = <&pwm 0 500000>;
+ /*
+ * a silly way to create a 1:1 relationship between the
+ * PWM value and the actual duty cycle
+ */
+ brightness-levels = < 0 1 2 3 4 5 6 7 8 9
+ 10 11 12 13 14 15 16 17 18 19
+ 20 21 22 23 24 25 26 27 28 29
+ 30 31 32 33 34 35 36 37 38 39
+ 40 41 42 43 44 45 46 47 48 49
+ 50 51 52 53 54 55 56 57 58 59
+ 60 61 62 63 64 65 66 67 68 69
+ 70 71 72 73 74 75 76 77 78 79
+ 80 81 82 83 84 85 86 87 88 89
+ 90 91 92 93 94 95 96 97 98 99
+ 100>;
+ default-brightness-level = <50>;
+ };
+
+ matrix_keypad: matrix-keypad@0 {
+ compatible = "gpio-matrix-keypad";
+ col-gpios = <
+ &gpio5 0 0
+ &gpio5 1 0
+ &gpio5 2 0
+ &gpio5 3 0
+ >;
+ row-gpios = <
+ &gpio5 4 0
+ &gpio5 5 0
+ &gpio5 6 0
+ &gpio5 7 0
+ >;
+ /* sample keymap */
+ linux,keymap = <
+ 0x00000074 /* row 0, col 0, KEY_POWER */
+ 0x00010052 /* row 0, col 1, KEY_KP0 */
+ 0x0002004f /* row 0, col 2, KEY_KP1 */
+ 0x00030050 /* row 0, col 3, KEY_KP2 */
+ 0x01000051 /* row 1, col 0, KEY_KP3 */
+ 0x0101004b /* row 1, col 1, KEY_KP4 */
+ 0x0102004c /* row 1, col 2, KEY_KP5 */
+ 0x0103004d /* row 1, col 3, KEY_KP6 */
+ 0x02000047 /* row 2, col 0, KEY_KP7 */
+ 0x02010048 /* row 2, col 1, KEY_KP8 */
+ 0x02020049 /* row 2, col 2, KEY_KP9 */
+ >;
+ gpio-activelow;
+ linux,wakeup;
+ debounce-delay-ms = <100>;
+ col-scan-delay-us = <5000>;
+ linux,no-autorepeat;
+ };
+};
+
+/* 2nd TX-Std UART - (A)UART1 */
+&auart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart1_pins_a>;
+ status = "okay";
+};
+
+/* 3rd TX-Std UART - (A)UART3 */
+&auart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart3_pins_a>;
+ status = "okay";
+};
+
+&can0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&can0_pins_a>;
+ xceiver-supply = <&reg_can_xcvr>;
+ status = "okay";
+};
+
+&can1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&can1_pins_a>;
+ xceiver-supply = <&reg_can_xcvr>;
+ status = "okay";
+};
+
+&digctl {
+ status = "okay";
+};
+
+/* 1st TX-Std UART - (D)UART */
+&duart {
+ pinctrl-names = "default";
+ pinctrl-0 = <&duart_4pins_a>;
+ status = "okay";
+};
+
+&gpmi {
+ pinctrl-0 = <&gpmi_pins_a &gpmi_status_cfg>;
+ nand-on-flash-bbt;
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins_a>;
+ clock-frequency = <400000>;
+ status = "okay";
+
+ sgtl5000: sgtl5000@0a {
+ compatible = "fsl,sgtl5000";
+ reg = <0x0a>;
+ VDDA-supply = <&reg_2p5v>;
+ VDDIO-supply = <&reg_3p3v>;
+ clocks = <&mclk>;
+ };
+
+ gpio5: pca953x@20 {
+ compatible = "nxp,pca9554";
+ reg = <0x20>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&tx28_pca9554_pins>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <28 0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ polytouch: edt-ft5x06@38 {
+ compatible = "edt,edt-ft5x06";
+ reg = <0x38>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&tx28_edt_ft5x06_pins>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <5 0>;
+ reset-gpios = <&gpio2 6 1>;
+ wake-gpios = <&gpio4 9 0>;
+ };
+
+ touchscreen: tsc2007@48 {
+ compatible = "ti,tsc2007";
+ reg = <0x48>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&tx28_tsc2007_pins>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <20 0>;
+ pendown-gpio = <&gpio3 20 1>;
+ ti,x-plate-ohms = /bits/ 16 <660>;
+ };
+
+ ds1339: rtc@68 {
+ compatible = "mxim,ds1339";
+ reg = <0x68>;
+ };
+};
+
+&lcdif {
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcdif_24bit_pins_a &lcdif_sync_pins_a &tx28_lcdif_ctrl_pins>;
+ lcd-supply = <&reg_lcd>;
+ display = <&display>;
+ status = "okay";
+
+ display: display@0 {
+ bits-per-pixel = <32>;
+ bus-width = <24>;
+ display-timings {
+ native-mode = <&timing5>;
+ timing0: timing0 {
+ panel-name = "VGA";
+ clock-frequency = <25175000>;
+ hactive = <640>;
+ vactive = <480>;
+ hback-porch = <48>;
+ hsync-len = <96>;
+ hfront-porch = <16>;
+ vback-porch = <33>;
+ vsync-len = <2>;
+ vfront-porch = <10>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ de-active = <1>;
+ pixelclk-active = <1>;
+ };
+
+ timing1: timing1 {
+ panel-name = "ETV570";
+ clock-frequency = <25175000>;
+ hactive = <640>;
+ vactive = <480>;
+ hback-porch = <114>;
+ hsync-len = <30>;
+ hfront-porch = <16>;
+ vback-porch = <32>;
+ vsync-len = <3>;
+ vfront-porch = <10>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ de-active = <1>;
+ pixelclk-active = <1>;
+ };
+
+ timing2: timing2 {
+ panel-name = "ET0350";
+ clock-frequency = <6500000>;
+ hactive = <320>;
+ vactive = <240>;
+ hback-porch = <34>;
+ hsync-len = <34>;
+ hfront-porch = <20>;
+ vback-porch = <15>;
+ vsync-len = <3>;
+ vfront-porch = <4>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ de-active = <1>;
+ pixelclk-active = <1>;
+ };
+
+ timing3: timing3 {
+ panel-name = "ET0430";
+ clock-frequency = <9000000>;
+ hactive = <480>;
+ vactive = <272>;
+ hback-porch = <2>;
+ hsync-len = <41>;
+ hfront-porch = <2>;
+ vback-porch = <2>;
+ vsync-len = <10>;
+ vfront-porch = <2>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ de-active = <1>;
+ pixelclk-active = <1>;
+ };
+
+ timing4: timing4 {
+ panel-name = "ET0500", "ET0700";
+ clock-frequency = <33260000>;
+ hactive = <800>;
+ vactive = <480>;
+ hback-porch = <88>;
+ hsync-len = <128>;
+ hfront-porch = <40>;
+ vback-porch = <33>;
+ vsync-len = <2>;
+ vfront-porch = <10>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ de-active = <1>;
+ pixelclk-active = <1>;
+ };
+
+ timing5: timing5 {
+ panel-name = "ETQ570";
+ clock-frequency = <6400000>;
+ hactive = <320>;
+ vactive = <240>;
+ hback-porch = <38>;
+ hsync-len = <30>;
+ hfront-porch = <30>;
+ vback-porch = <16>;
+ vsync-len = <3>;
+ vfront-porch = <4>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ de-active = <1>;
+ pixelclk-active = <1>;
+ };
+ };
+ };
+};
+
+&lradc {
+ fsl,lradc-touchscreen-wires = <4>;
+ status = "okay";
+};
+
+&mac0 {
+ phy-mode = "rmii";
+ pinctrl-names = "default", "gpio_mode";
+ pinctrl-0 = <&mac0_pins_a>;
+ pinctrl-1 = <&tx28_mac0_pins_gpio>;
+ status = "okay";
+};
+
+&mac1 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac1_pins_a>;
+ /* not enabled by default */
+};
+
+&mxs_rtc {
+ status = "okay";
+};
+
+&ocotp {
+ status = "okay";
+};
+
+&pwm {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm0_pins_a>;
+ status = "okay";
+};
+
+&pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hog_pins_a>;
+
+ hog_pins_a: hog@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_ENET0_RXD3__GPIO_4_10 /* module LED */
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ tx28_edt_ft5x06_pins: tx28-edt-ft5x06-pins {
+ fsl,pinmux-ids = <
+ MX28_PAD_SSP0_DATA6__GPIO_2_6 /* RESET */
+ MX28_PAD_SSP0_DATA5__GPIO_2_5 /* IRQ */
+ MX28_PAD_ENET0_RXD2__GPIO_4_9 /* WAKE */
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ tx28_flexcan_xcvr_pins: tx28-flexcan-xcvr-pins {
+ fsl,pinmux-ids = <
+ MX28_PAD_LCD_D00__GPIO_1_0
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ tx28_lcdif_23bit_pins: tx28-lcdif-23bit {
+ fsl,pinmux-ids = <
+ /* LCD_D00 may be used as Flexcan Transceiver Enable on STK5-V5 */
+ MX28_PAD_LCD_D01__LCD_D1
+ MX28_PAD_LCD_D02__LCD_D2
+ MX28_PAD_LCD_D03__LCD_D3
+ MX28_PAD_LCD_D04__LCD_D4
+ MX28_PAD_LCD_D05__LCD_D5
+ MX28_PAD_LCD_D06__LCD_D6
+ MX28_PAD_LCD_D07__LCD_D7
+ MX28_PAD_LCD_D08__LCD_D8
+ MX28_PAD_LCD_D09__LCD_D9
+ MX28_PAD_LCD_D10__LCD_D10
+ MX28_PAD_LCD_D11__LCD_D11
+ MX28_PAD_LCD_D12__LCD_D12
+ MX28_PAD_LCD_D13__LCD_D13
+ MX28_PAD_LCD_D14__LCD_D14
+ MX28_PAD_LCD_D15__LCD_D15
+ MX28_PAD_LCD_D16__LCD_D16
+ MX28_PAD_LCD_D17__LCD_D17
+ MX28_PAD_LCD_D18__LCD_D18
+ MX28_PAD_LCD_D19__LCD_D19
+ MX28_PAD_LCD_D20__LCD_D20
+ MX28_PAD_LCD_D21__LCD_D21
+ MX28_PAD_LCD_D22__LCD_D22
+ MX28_PAD_LCD_D23__LCD_D23
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ tx28_lcdif_ctrl_pins: tx28-lcdif-ctrl {
+ fsl,pinmux-ids = <
+ MX28_PAD_LCD_ENABLE__GPIO_1_31 /* Enable */
+ MX28_PAD_LCD_RESET__GPIO_3_30 /* Reset */
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ tx28_mac0_pins_gpio: tx28-mac0-gpio-pins {
+ fsl,pinmux-ids = <
+ MX28_PAD_ENET0_MDC__GPIO_4_0
+ MX28_PAD_ENET0_MDIO__GPIO_4_1
+ MX28_PAD_ENET0_RX_EN__GPIO_4_2
+ MX28_PAD_ENET0_RXD0__GPIO_4_3
+ MX28_PAD_ENET0_RXD1__GPIO_4_4
+ MX28_PAD_ENET0_TX_EN__GPIO_4_6
+ MX28_PAD_ENET0_TXD0__GPIO_4_7
+ MX28_PAD_ENET0_TXD1__GPIO_4_8
+ MX28_PAD_ENET_CLK__GPIO_4_16
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ tx28_pca9554_pins: tx28-pca9554-pins {
+ fsl,pinmux-ids = <
+ MX28_PAD_PWM3__GPIO_3_28
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ tx28_tsc2007_pins: tx28-tsc2007-pins {
+ fsl,pinmux-ids = <
+ MX28_PAD_SAIF0_MCLK__GPIO_3_20 /* TSC2007 IRQ */
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+
+ tx28_usbphy0_pins: tx28-usbphy0-pins {
+ fsl,pinmux-ids = <
+ MX28_PAD_GPMI_CE2N__GPIO_0_18 /* USBOTG_VBUSEN */
+ MX28_PAD_GPMI_CE3N__GPIO_0_19 /* USBOTH_OC */
+ >;
+ fsl,drive-strength = <MXS_DRIVE_12mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ tx28_usbphy1_pins: tx28-usbphy1-pins {
+ fsl,pinmux-ids = <
+ MX28_PAD_SPDIF__GPIO_3_27 /* USBH_VBUSEN */
+ MX28_PAD_JTAG_RTCK__GPIO_4_20 /* USBH_OC */
+ >;
+ fsl,drive-strength = <MXS_DRIVE_12mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+};
+
+&saif0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&saif0_pins_b>;
+ fsl,saif-master;
+ status = "okay";
+};
+
+&saif1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&saif1_pins_a>;
+ status = "okay";
+};
+
+&ssp0 {
+ compatible = "fsl,imx28-mmc";
+ pinctrl-names = "default", "special";
+ pinctrl-0 = <&mmc0_4bit_pins_a
+ &mmc0_cd_cfg
+ &mmc0_sck_cfg>;
+ bus-width = <4>;
+ status = "okay";
+};
+
+&ssp3 {
+ compatible = "fsl,imx28-spi";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi3_pins_a>;
+ clock-frequency = <57600000>;
+ status = "okay";
+
+ spidev0: spi@0 {
+ compatible = "spidev";
+ reg = <0>;
+ spi-max-frequency = <57600000>;
+ };
+
+ spidev1: spi@1 {
+ compatible = "spidev";
+ reg = <1>;
+ spi-max-frequency = <57600000>;
};
};
+
+&usb0 {
+ vbus-supply = <&reg_usb0_vbus>;
+ disable-over-current;
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
+&usb1 {
+ vbus-supply = <&reg_usb1_vbus>;
+ disable-over-current;
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usbphy0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&tx28_usbphy0_pins>;
+ phy_type = "utmi";
+ status = "okay";
+};
+
+&usbphy1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&tx28_usbphy1_pins>;
+ phy_type = "utmi";
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 7363fded95ee..4be520a93385 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -9,7 +9,8 @@
* http://www.gnu.org/copyleft/gpl.html
*/
-/include/ "skeleton.dtsi"
+#include "skeleton.dtsi"
+#include "imx28-pinfunc.h"
/ {
interrupt-parent = <&icoll>;
@@ -207,538 +208,592 @@
duart_pins_a: duart@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x3102 /* MX28_PAD_PWM0__DUART_RX */
- 0x3112 /* MX28_PAD_PWM1__DUART_TX */
+ MX28_PAD_PWM0__DUART_RX
+ MX28_PAD_PWM1__DUART_TX
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
duart_pins_b: duart@1 {
reg = <1>;
fsl,pinmux-ids = <
- 0x3022 /* MX28_PAD_AUART0_CTS__DUART_RX */
- 0x3032 /* MX28_PAD_AUART0_RTS__DUART_TX */
+ MX28_PAD_AUART0_CTS__DUART_RX
+ MX28_PAD_AUART0_RTS__DUART_TX
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
duart_4pins_a: duart-4pins@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x3022 /* MX28_PAD_AUART0_CTS__DUART_RX */
- 0x3032 /* MX28_PAD_AUART0_RTS__DUART_TX */
- 0x3002 /* MX28_PAD_AUART0_RX__DUART_CTS */
- 0x3012 /* MX28_PAD_AUART0_TX__DUART_RTS */
+ MX28_PAD_AUART0_CTS__DUART_RX
+ MX28_PAD_AUART0_RTS__DUART_TX
+ MX28_PAD_AUART0_RX__DUART_CTS
+ MX28_PAD_AUART0_TX__DUART_RTS
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
gpmi_pins_a: gpmi-nand@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x0000 /* MX28_PAD_GPMI_D00__GPMI_D0 */
- 0x0010 /* MX28_PAD_GPMI_D01__GPMI_D1 */
- 0x0020 /* MX28_PAD_GPMI_D02__GPMI_D2 */
- 0x0030 /* MX28_PAD_GPMI_D03__GPMI_D3 */
- 0x0040 /* MX28_PAD_GPMI_D04__GPMI_D4 */
- 0x0050 /* MX28_PAD_GPMI_D05__GPMI_D5 */
- 0x0060 /* MX28_PAD_GPMI_D06__GPMI_D6 */
- 0x0070 /* MX28_PAD_GPMI_D07__GPMI_D7 */
- 0x0100 /* MX28_PAD_GPMI_CE0N__GPMI_CE0N */
- 0x0140 /* MX28_PAD_GPMI_RDY0__GPMI_READY0 */
- 0x0180 /* MX28_PAD_GPMI_RDN__GPMI_RDN */
- 0x0190 /* MX28_PAD_GPMI_WRN__GPMI_WRN */
- 0x01a0 /* MX28_PAD_GPMI_ALE__GPMI_ALE */
- 0x01b0 /* MX28_PAD_GPMI_CLE__GPMI_CLE */
- 0x01c0 /* MX28_PAD_GPMI_RESETN__GPMI_RESETN */
+ MX28_PAD_GPMI_D00__GPMI_D0
+ MX28_PAD_GPMI_D01__GPMI_D1
+ MX28_PAD_GPMI_D02__GPMI_D2
+ MX28_PAD_GPMI_D03__GPMI_D3
+ MX28_PAD_GPMI_D04__GPMI_D4
+ MX28_PAD_GPMI_D05__GPMI_D5
+ MX28_PAD_GPMI_D06__GPMI_D6
+ MX28_PAD_GPMI_D07__GPMI_D7
+ MX28_PAD_GPMI_CE0N__GPMI_CE0N
+ MX28_PAD_GPMI_RDY0__GPMI_READY0
+ MX28_PAD_GPMI_RDN__GPMI_RDN
+ MX28_PAD_GPMI_WRN__GPMI_WRN
+ MX28_PAD_GPMI_ALE__GPMI_ALE
+ MX28_PAD_GPMI_CLE__GPMI_CLE
+ MX28_PAD_GPMI_RESETN__GPMI_RESETN
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
gpmi_status_cfg: gpmi-status-cfg {
fsl,pinmux-ids = <
- 0x0180 /* MX28_PAD_GPMI_RDN__GPMI_RDN */
- 0x0190 /* MX28_PAD_GPMI_WRN__GPMI_WRN */
- 0x01c0 /* MX28_PAD_GPMI_RESETN__GPMI_RESETN */
+ MX28_PAD_GPMI_RDN__GPMI_RDN
+ MX28_PAD_GPMI_WRN__GPMI_WRN
+ MX28_PAD_GPMI_RESETN__GPMI_RESETN
>;
- fsl,drive-strength = <2>;
+ fsl,drive-strength = <MXS_DRIVE_12mA>;
};
auart0_pins_a: auart0@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x3000 /* MX28_PAD_AUART0_RX__AUART0_RX */
- 0x3010 /* MX28_PAD_AUART0_TX__AUART0_TX */
- 0x3020 /* MX28_PAD_AUART0_CTS__AUART0_CTS */
- 0x3030 /* MX28_PAD_AUART0_RTS__AUART0_RTS */
+ MX28_PAD_AUART0_RX__AUART0_RX
+ MX28_PAD_AUART0_TX__AUART0_TX
+ MX28_PAD_AUART0_CTS__AUART0_CTS
+ MX28_PAD_AUART0_RTS__AUART0_RTS
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
auart0_2pins_a: auart0-2pins@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x3000 /* MX28_PAD_AUART0_RX__AUART0_RX */
- 0x3010 /* MX28_PAD_AUART0_TX__AUART0_TX */
+ MX28_PAD_AUART0_RX__AUART0_RX
+ MX28_PAD_AUART0_TX__AUART0_TX
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
auart1_pins_a: auart1@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x3040 /* MX28_PAD_AUART1_RX__AUART1_RX */
- 0x3050 /* MX28_PAD_AUART1_TX__AUART1_TX */
- 0x3060 /* MX28_PAD_AUART1_CTS__AUART1_CTS */
- 0x3070 /* MX28_PAD_AUART1_RTS__AUART1_RTS */
+ MX28_PAD_AUART1_RX__AUART1_RX
+ MX28_PAD_AUART1_TX__AUART1_TX
+ MX28_PAD_AUART1_CTS__AUART1_CTS
+ MX28_PAD_AUART1_RTS__AUART1_RTS
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
auart1_2pins_a: auart1-2pins@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x3040 /* MX28_PAD_AUART1_RX__AUART1_RX */
- 0x3050 /* MX28_PAD_AUART1_TX__AUART1_TX */
+ MX28_PAD_AUART1_RX__AUART1_RX
+ MX28_PAD_AUART1_TX__AUART1_TX
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
auart2_2pins_a: auart2-2pins@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x2101 /* MX28_PAD_SSP2_SCK__AUART2_RX */
- 0x2111 /* MX28_PAD_SSP2_MOSI__AUART2_TX */
+ MX28_PAD_SSP2_SCK__AUART2_RX
+ MX28_PAD_SSP2_MOSI__AUART2_TX
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
auart2_2pins_b: auart2-2pins@1 {
reg = <1>;
fsl,pinmux-ids = <
- 0x3080 /* MX28_PAD_AUART2_RX__AUART2_RX */
- 0x3090 /* MX28_PAD_AUART2_TX__AUART2_TX */
+ MX28_PAD_AUART2_RX__AUART2_RX
+ MX28_PAD_AUART2_TX__AUART2_TX
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ auart2_pins_a: auart2-pins@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_AUART2_RX__AUART2_RX
+ MX28_PAD_AUART2_TX__AUART2_TX
+ MX28_PAD_AUART2_CTS__AUART2_CTS
+ MX28_PAD_AUART2_RTS__AUART2_RTS
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
auart3_pins_a: auart3@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x30c0 /* MX28_PAD_AUART3_RX__AUART3_RX */
- 0x30d0 /* MX28_PAD_AUART3_TX__AUART3_TX */
- 0x30e0 /* MX28_PAD_AUART3_CTS__AUART3_CTS */
- 0x30f0 /* MX28_PAD_AUART3_RTS__AUART3_RTS */
+ MX28_PAD_AUART3_RX__AUART3_RX
+ MX28_PAD_AUART3_TX__AUART3_TX
+ MX28_PAD_AUART3_CTS__AUART3_CTS
+ MX28_PAD_AUART3_RTS__AUART3_RTS
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
auart3_2pins_a: auart3-2pins@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x2121 /* MX28_PAD_SSP2_MISO__AUART3_RX */
- 0x2131 /* MX28_PAD_SSP2_SS0__AUART3_TX */
+ MX28_PAD_SSP2_MISO__AUART3_RX
+ MX28_PAD_SSP2_SS0__AUART3_TX
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
auart3_2pins_b: auart3-2pins@1 {
reg = <1>;
fsl,pinmux-ids = <
- 0x30c0 /* MX28_PAD_AUART3_RX__AUART3_RX */
- 0x30d0 /* MX28_PAD_AUART3_TX__AUART3_TX */
+ MX28_PAD_AUART3_RX__AUART3_RX
+ MX28_PAD_AUART3_TX__AUART3_TX
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
auart4_2pins_a: auart4@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x2181 /* MX28_PAD_SSP3_SCK__AUART4_TX */
- 0x2191 /* MX28_PAD_SSP3_MOSI__AUART4_RX */
+ MX28_PAD_SSP3_SCK__AUART4_TX
+ MX28_PAD_SSP3_MOSI__AUART4_RX
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
mac0_pins_a: mac0@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x4000 /* MX28_PAD_ENET0_MDC__ENET0_MDC */
- 0x4010 /* MX28_PAD_ENET0_MDIO__ENET0_MDIO */
- 0x4020 /* MX28_PAD_ENET0_RX_EN__ENET0_RX_EN */
- 0x4030 /* MX28_PAD_ENET0_RXD0__ENET0_RXD0 */
- 0x4040 /* MX28_PAD_ENET0_RXD1__ENET0_RXD1 */
- 0x4060 /* MX28_PAD_ENET0_TX_EN__ENET0_TX_EN */
- 0x4070 /* MX28_PAD_ENET0_TXD0__ENET0_TXD0 */
- 0x4080 /* MX28_PAD_ENET0_TXD1__ENET0_TXD1 */
- 0x4100 /* MX28_PAD_ENET_CLK__CLKCTRL_ENET */
+ MX28_PAD_ENET0_MDC__ENET0_MDC
+ MX28_PAD_ENET0_MDIO__ENET0_MDIO
+ MX28_PAD_ENET0_RX_EN__ENET0_RX_EN
+ MX28_PAD_ENET0_RXD0__ENET0_RXD0
+ MX28_PAD_ENET0_RXD1__ENET0_RXD1
+ MX28_PAD_ENET0_TX_EN__ENET0_TX_EN
+ MX28_PAD_ENET0_TXD0__ENET0_TXD0
+ MX28_PAD_ENET0_TXD1__ENET0_TXD1
+ MX28_PAD_ENET_CLK__CLKCTRL_ENET
>;
- fsl,drive-strength = <1>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
mac1_pins_a: mac1@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x40f1 /* MX28_PAD_ENET0_CRS__ENET1_RX_EN */
- 0x4091 /* MX28_PAD_ENET0_RXD2__ENET1_RXD0 */
- 0x40a1 /* MX28_PAD_ENET0_RXD3__ENET1_RXD1 */
- 0x40e1 /* MX28_PAD_ENET0_COL__ENET1_TX_EN */
- 0x40b1 /* MX28_PAD_ENET0_TXD2__ENET1_TXD0 */
- 0x40c1 /* MX28_PAD_ENET0_TXD3__ENET1_TXD1 */
+ MX28_PAD_ENET0_CRS__ENET1_RX_EN
+ MX28_PAD_ENET0_RXD2__ENET1_RXD0
+ MX28_PAD_ENET0_RXD3__ENET1_RXD1
+ MX28_PAD_ENET0_COL__ENET1_TX_EN
+ MX28_PAD_ENET0_TXD2__ENET1_TXD0
+ MX28_PAD_ENET0_TXD3__ENET1_TXD1
>;
- fsl,drive-strength = <1>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
mmc0_8bit_pins_a: mmc0-8bit@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x2000 /* MX28_PAD_SSP0_DATA0__SSP0_D0 */
- 0x2010 /* MX28_PAD_SSP0_DATA1__SSP0_D1 */
- 0x2020 /* MX28_PAD_SSP0_DATA2__SSP0_D2 */
- 0x2030 /* MX28_PAD_SSP0_DATA3__SSP0_D3 */
- 0x2040 /* MX28_PAD_SSP0_DATA4__SSP0_D4 */
- 0x2050 /* MX28_PAD_SSP0_DATA5__SSP0_D5 */
- 0x2060 /* MX28_PAD_SSP0_DATA6__SSP0_D6 */
- 0x2070 /* MX28_PAD_SSP0_DATA7__SSP0_D7 */
- 0x2080 /* MX28_PAD_SSP0_CMD__SSP0_CMD */
- 0x2090 /* MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT */
- 0x20a0 /* MX28_PAD_SSP0_SCK__SSP0_SCK */
+ MX28_PAD_SSP0_DATA0__SSP0_D0
+ MX28_PAD_SSP0_DATA1__SSP0_D1
+ MX28_PAD_SSP0_DATA2__SSP0_D2
+ MX28_PAD_SSP0_DATA3__SSP0_D3
+ MX28_PAD_SSP0_DATA4__SSP0_D4
+ MX28_PAD_SSP0_DATA5__SSP0_D5
+ MX28_PAD_SSP0_DATA6__SSP0_D6
+ MX28_PAD_SSP0_DATA7__SSP0_D7
+ MX28_PAD_SSP0_CMD__SSP0_CMD
+ MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT
+ MX28_PAD_SSP0_SCK__SSP0_SCK
>;
- fsl,drive-strength = <1>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
mmc0_4bit_pins_a: mmc0-4bit@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x2000 /* MX28_PAD_SSP0_DATA0__SSP0_D0 */
- 0x2010 /* MX28_PAD_SSP0_DATA1__SSP0_D1 */
- 0x2020 /* MX28_PAD_SSP0_DATA2__SSP0_D2 */
- 0x2030 /* MX28_PAD_SSP0_DATA3__SSP0_D3 */
- 0x2080 /* MX28_PAD_SSP0_CMD__SSP0_CMD */
- 0x2090 /* MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT */
- 0x20a0 /* MX28_PAD_SSP0_SCK__SSP0_SCK */
+ MX28_PAD_SSP0_DATA0__SSP0_D0
+ MX28_PAD_SSP0_DATA1__SSP0_D1
+ MX28_PAD_SSP0_DATA2__SSP0_D2
+ MX28_PAD_SSP0_DATA3__SSP0_D3
+ MX28_PAD_SSP0_CMD__SSP0_CMD
+ MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT
+ MX28_PAD_SSP0_SCK__SSP0_SCK
>;
- fsl,drive-strength = <1>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
mmc0_cd_cfg: mmc0-cd-cfg {
fsl,pinmux-ids = <
- 0x2090 /* MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT */
+ MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT
>;
- fsl,pull-up = <0>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
mmc0_sck_cfg: mmc0-sck-cfg {
fsl,pinmux-ids = <
- 0x20a0 /* MX28_PAD_SSP0_SCK__SSP0_SCK */
+ MX28_PAD_SSP0_SCK__SSP0_SCK
+ >;
+ fsl,drive-strength = <MXS_DRIVE_12mA>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ mmc2_4bit_pins_a: mmc2-4bit@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_SSP0_DATA4__SSP2_D0
+ MX28_PAD_SSP1_SCK__SSP2_D1
+ MX28_PAD_SSP1_CMD__SSP2_D2
+ MX28_PAD_SSP0_DATA5__SSP2_D3
+ MX28_PAD_SSP0_DATA6__SSP2_CMD
+ MX28_PAD_AUART1_RX__SSP2_CARD_DETECT
+ MX28_PAD_SSP0_DATA7__SSP2_SCK
>;
- fsl,drive-strength = <2>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
+ };
+
+ mmc2_cd_cfg: mmc2-cd-cfg {
+ fsl,pinmux-ids = <
+ MX28_PAD_AUART1_RX__SSP2_CARD_DETECT
+ >;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ mmc2_sck_cfg: mmc2-sck-cfg {
+ fsl,pinmux-ids = <
+ MX28_PAD_SSP0_DATA7__SSP2_SCK
+ >;
+ fsl,drive-strength = <MXS_DRIVE_12mA>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
i2c0_pins_a: i2c0@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x3180 /* MX28_PAD_I2C0_SCL__I2C0_SCL */
- 0x3190 /* MX28_PAD_I2C0_SDA__I2C0_SDA */
+ MX28_PAD_I2C0_SCL__I2C0_SCL
+ MX28_PAD_I2C0_SDA__I2C0_SDA
>;
- fsl,drive-strength = <1>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
i2c0_pins_b: i2c0@1 {
reg = <1>;
fsl,pinmux-ids = <
- 0x3001 /* MX28_PAD_AUART0_RX__I2C0_SCL */
- 0x3011 /* MX28_PAD_AUART0_TX__I2C0_SDA */
+ MX28_PAD_AUART0_RX__I2C0_SCL
+ MX28_PAD_AUART0_TX__I2C0_SDA
>;
- fsl,drive-strength = <1>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
i2c1_pins_a: i2c1@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x3101 /* MX28_PAD_PWM0__I2C1_SCL */
- 0x3111 /* MX28_PAD_PWM1__I2C1_SDA */
+ MX28_PAD_PWM0__I2C1_SCL
+ MX28_PAD_PWM1__I2C1_SDA
>;
- fsl,drive-strength = <1>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
saif0_pins_a: saif0@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x3140 /* MX28_PAD_SAIF0_MCLK__SAIF0_MCLK */
- 0x3150 /* MX28_PAD_SAIF0_LRCLK__SAIF0_LRCLK */
- 0x3160 /* MX28_PAD_SAIF0_BITCLK__SAIF0_BITCLK */
- 0x3170 /* MX28_PAD_SAIF0_SDATA0__SAIF0_SDATA0 */
+ MX28_PAD_SAIF0_MCLK__SAIF0_MCLK
+ MX28_PAD_SAIF0_LRCLK__SAIF0_LRCLK
+ MX28_PAD_SAIF0_BITCLK__SAIF0_BITCLK
+ MX28_PAD_SAIF0_SDATA0__SAIF0_SDATA0
>;
- fsl,drive-strength = <2>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_12mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
saif0_pins_b: saif0@1 {
reg = <1>;
fsl,pinmux-ids = <
- 0x3150 /* MX28_PAD_SAIF0_LRCLK__SAIF0_LRCLK */
- 0x3160 /* MX28_PAD_SAIF0_BITCLK__SAIF0_BITCLK */
- 0x3170 /* MX28_PAD_SAIF0_SDATA0__SAIF0_SDATA0 */
+ MX28_PAD_SAIF0_LRCLK__SAIF0_LRCLK
+ MX28_PAD_SAIF0_BITCLK__SAIF0_BITCLK
+ MX28_PAD_SAIF0_SDATA0__SAIF0_SDATA0
>;
- fsl,drive-strength = <2>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_12mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
saif1_pins_a: saif1@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x31a0 /* MX28_PAD_SAIF1_SDATA0__SAIF1_SDATA0 */
+ MX28_PAD_SAIF1_SDATA0__SAIF1_SDATA0
>;
- fsl,drive-strength = <2>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_12mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
pwm0_pins_a: pwm0@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x3100 /* MX28_PAD_PWM0__PWM_0 */
+ MX28_PAD_PWM0__PWM_0
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
pwm2_pins_a: pwm2@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x3120 /* MX28_PAD_PWM2__PWM_2 */
+ MX28_PAD_PWM2__PWM_2
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
pwm3_pins_a: pwm3@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x31c0 /* MX28_PAD_PWM3__PWM_3 */
+ MX28_PAD_PWM3__PWM_3
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
pwm3_pins_b: pwm3@1 {
reg = <1>;
fsl,pinmux-ids = <
- 0x3141 /* MX28_PAD_SAIF0_MCLK__PWM3 */
+ MX28_PAD_SAIF0_MCLK__PWM_3
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
pwm4_pins_a: pwm4@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x31d0 /* MX28_PAD_PWM4__PWM_4 */
+ MX28_PAD_PWM4__PWM_4
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
lcdif_24bit_pins_a: lcdif-24bit@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x1000 /* MX28_PAD_LCD_D00__LCD_D0 */
- 0x1010 /* MX28_PAD_LCD_D01__LCD_D1 */
- 0x1020 /* MX28_PAD_LCD_D02__LCD_D2 */
- 0x1030 /* MX28_PAD_LCD_D03__LCD_D3 */
- 0x1040 /* MX28_PAD_LCD_D04__LCD_D4 */
- 0x1050 /* MX28_PAD_LCD_D05__LCD_D5 */
- 0x1060 /* MX28_PAD_LCD_D06__LCD_D6 */
- 0x1070 /* MX28_PAD_LCD_D07__LCD_D7 */
- 0x1080 /* MX28_PAD_LCD_D08__LCD_D8 */
- 0x1090 /* MX28_PAD_LCD_D09__LCD_D9 */
- 0x10a0 /* MX28_PAD_LCD_D10__LCD_D10 */
- 0x10b0 /* MX28_PAD_LCD_D11__LCD_D11 */
- 0x10c0 /* MX28_PAD_LCD_D12__LCD_D12 */
- 0x10d0 /* MX28_PAD_LCD_D13__LCD_D13 */
- 0x10e0 /* MX28_PAD_LCD_D14__LCD_D14 */
- 0x10f0 /* MX28_PAD_LCD_D15__LCD_D15 */
- 0x1100 /* MX28_PAD_LCD_D16__LCD_D16 */
- 0x1110 /* MX28_PAD_LCD_D17__LCD_D17 */
- 0x1120 /* MX28_PAD_LCD_D18__LCD_D18 */
- 0x1130 /* MX28_PAD_LCD_D19__LCD_D19 */
- 0x1140 /* MX28_PAD_LCD_D20__LCD_D20 */
- 0x1150 /* MX28_PAD_LCD_D21__LCD_D21 */
- 0x1160 /* MX28_PAD_LCD_D22__LCD_D22 */
- 0x1170 /* MX28_PAD_LCD_D23__LCD_D23 */
+ MX28_PAD_LCD_D00__LCD_D0
+ MX28_PAD_LCD_D01__LCD_D1
+ MX28_PAD_LCD_D02__LCD_D2
+ MX28_PAD_LCD_D03__LCD_D3
+ MX28_PAD_LCD_D04__LCD_D4
+ MX28_PAD_LCD_D05__LCD_D5
+ MX28_PAD_LCD_D06__LCD_D6
+ MX28_PAD_LCD_D07__LCD_D7
+ MX28_PAD_LCD_D08__LCD_D8
+ MX28_PAD_LCD_D09__LCD_D9
+ MX28_PAD_LCD_D10__LCD_D10
+ MX28_PAD_LCD_D11__LCD_D11
+ MX28_PAD_LCD_D12__LCD_D12
+ MX28_PAD_LCD_D13__LCD_D13
+ MX28_PAD_LCD_D14__LCD_D14
+ MX28_PAD_LCD_D15__LCD_D15
+ MX28_PAD_LCD_D16__LCD_D16
+ MX28_PAD_LCD_D17__LCD_D17
+ MX28_PAD_LCD_D18__LCD_D18
+ MX28_PAD_LCD_D19__LCD_D19
+ MX28_PAD_LCD_D20__LCD_D20
+ MX28_PAD_LCD_D21__LCD_D21
+ MX28_PAD_LCD_D22__LCD_D22
+ MX28_PAD_LCD_D23__LCD_D23
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
lcdif_16bit_pins_a: lcdif-16bit@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x1000 /* MX28_PAD_LCD_D00__LCD_D0 */
- 0x1010 /* MX28_PAD_LCD_D01__LCD_D1 */
- 0x1020 /* MX28_PAD_LCD_D02__LCD_D2 */
- 0x1030 /* MX28_PAD_LCD_D03__LCD_D3 */
- 0x1040 /* MX28_PAD_LCD_D04__LCD_D4 */
- 0x1050 /* MX28_PAD_LCD_D05__LCD_D5 */
- 0x1060 /* MX28_PAD_LCD_D06__LCD_D6 */
- 0x1070 /* MX28_PAD_LCD_D07__LCD_D7 */
- 0x1080 /* MX28_PAD_LCD_D08__LCD_D8 */
- 0x1090 /* MX28_PAD_LCD_D09__LCD_D9 */
- 0x10a0 /* MX28_PAD_LCD_D10__LCD_D10 */
- 0x10b0 /* MX28_PAD_LCD_D11__LCD_D11 */
- 0x10c0 /* MX28_PAD_LCD_D12__LCD_D12 */
- 0x10d0 /* MX28_PAD_LCD_D13__LCD_D13 */
- 0x10e0 /* MX28_PAD_LCD_D14__LCD_D14 */
- 0x10f0 /* MX28_PAD_LCD_D15__LCD_D15 */
+ MX28_PAD_LCD_D00__LCD_D0
+ MX28_PAD_LCD_D01__LCD_D1
+ MX28_PAD_LCD_D02__LCD_D2
+ MX28_PAD_LCD_D03__LCD_D3
+ MX28_PAD_LCD_D04__LCD_D4
+ MX28_PAD_LCD_D05__LCD_D5
+ MX28_PAD_LCD_D06__LCD_D6
+ MX28_PAD_LCD_D07__LCD_D7
+ MX28_PAD_LCD_D08__LCD_D8
+ MX28_PAD_LCD_D09__LCD_D9
+ MX28_PAD_LCD_D10__LCD_D10
+ MX28_PAD_LCD_D11__LCD_D11
+ MX28_PAD_LCD_D12__LCD_D12
+ MX28_PAD_LCD_D13__LCD_D13
+ MX28_PAD_LCD_D14__LCD_D14
+ MX28_PAD_LCD_D15__LCD_D15
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
lcdif_sync_pins_a: lcdif-sync@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x11a1 /* MX28_PAD_LCD_RS__LCD_DOTCLK */
- 0x11b1 /* MX28_PAD_LCD_CS__LCD_ENABLE */
- 0x1181 /* MX28_PAD_LCD_RD_E__LCD_VSYNC */
- 0x1191 /* MX28_PAD_LCD_WR_RWN__LCD_HSYNC */
+ MX28_PAD_LCD_RS__LCD_DOTCLK
+ MX28_PAD_LCD_CS__LCD_ENABLE
+ MX28_PAD_LCD_RD_E__LCD_VSYNC
+ MX28_PAD_LCD_WR_RWN__LCD_HSYNC
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
can0_pins_a: can0@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x0161 /* MX28_PAD_GPMI_RDY2__CAN0_TX */
- 0x0171 /* MX28_PAD_GPMI_RDY3__CAN0_RX */
+ MX28_PAD_GPMI_RDY2__CAN0_TX
+ MX28_PAD_GPMI_RDY3__CAN0_RX
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
can1_pins_a: can1@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x0121 /* MX28_PAD_GPMI_CE2N__CAN1_TX */
- 0x0131 /* MX28_PAD_GPMI_CE3N__CAN1_RX */
+ MX28_PAD_GPMI_CE2N__CAN1_TX
+ MX28_PAD_GPMI_CE3N__CAN1_RX
>;
- fsl,drive-strength = <0>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
spi2_pins_a: spi2@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x2100 /* MX28_PAD_SSP2_SCK__SSP2_SCK */
- 0x2110 /* MX28_PAD_SSP2_MOSI__SSP2_CMD */
- 0x2120 /* MX28_PAD_SSP2_MISO__SSP2_D0 */
- 0x2130 /* MX28_PAD_SSP2_SS0__SSP2_D3 */
+ MX28_PAD_SSP2_SCK__SSP2_SCK
+ MX28_PAD_SSP2_MOSI__SSP2_CMD
+ MX28_PAD_SSP2_MISO__SSP2_D0
+ MX28_PAD_SSP2_SS0__SSP2_D3
>;
- fsl,drive-strength = <1>;
- fsl,voltage = <1>;
- fsl,pull-up = <1>;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
spi3_pins_a: spi3@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x3082 /* MX28_PAD_AUART2_RX__SSP3_D4 */
- 0x3092 /* MX28_PAD_AUART2_TX__SSP3_D5 */
- 0x2180 /* MX28_PAD_SSP3_SCK__SSP3_SCK */
- 0x2190 /* MX28_PAD_SSP3_MOSI__SSP3_CMD */
- 0x21A0 /* MX28_PAD_SSP3_MISO__SSP3_D0 */
- 0x21B0 /* MX28_PAD_SSP3_SS0__SSP3_D3 */
+ MX28_PAD_AUART2_RX__SSP3_D4
+ MX28_PAD_AUART2_TX__SSP3_D5
+ MX28_PAD_SSP3_SCK__SSP3_SCK
+ MX28_PAD_SSP3_MOSI__SSP3_CMD
+ MX28_PAD_SSP3_MISO__SSP3_D0
+ MX28_PAD_SSP3_SS0__SSP3_D3
>;
- fsl,drive-strength = <1>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
usbphy0_pins_a: usbphy0@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x2152 /* MX28_PAD_SSP2_SS2__USB0_OVERCURRENT */
+ MX28_PAD_SSP2_SS2__USB0_OVERCURRENT
>;
- fsl,drive-strength = <2>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_12mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
usbphy0_pins_b: usbphy0@1 {
reg = <1>;
fsl,pinmux-ids = <
- 0x3061 /* MX28_PAD_AUART1_CTS__USB0_OVERCURRENT */
+ MX28_PAD_AUART1_CTS__USB0_OVERCURRENT
>;
- fsl,drive-strength = <2>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_12mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
};
usbphy1_pins_a: usbphy1@0 {
reg = <0>;
fsl,pinmux-ids = <
- 0x2142 /* MX28_PAD_SSP2_SS1__USB1_OVERCURRENT */
+ MX28_PAD_SSP2_SS1__USB1_OVERCURRENT
+ >;
+ fsl,drive-strength = <MXS_DRIVE_12mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ usb0_id_pins_a: usb0id@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_AUART1_RTS__USB0_ID
>;
- fsl,drive-strength = <2>;
- fsl,voltage = <1>;
- fsl,pull-up = <0>;
+ fsl,drive-strength = <MXS_DRIVE_12mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
};
};
diff --git a/arch/arm/boot/dts/imx50-evk.dts b/arch/arm/boot/dts/imx50-evk.dts
new file mode 100644
index 000000000000..60d9baf47730
--- /dev/null
+++ b/arch/arm/boot/dts/imx50-evk.dts
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2013 Greg Ungerer <gerg@uclinux.org>
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include "imx50.dtsi"
+
+/ {
+ model = "Freescale i.MX50 Evaluation Kit";
+ compatible = "fsl,imx50-evk", "fsl,imx50";
+
+ memory {
+ reg = <0x70000000 0x80000000>;
+ };
+};
+
+&cspi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_cspi_1>;
+ fsl,spi-num-chipselects = <2>;
+ cs-gpios = <&gpio4 11 0>, <&gpio4 13 0>;
+ status = "okay";
+
+ flash: m25p32@1 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "m25p32", "m25p80";
+ spi-max-frequency = <25000000>;
+ reg = <1>;
+
+ partition@0 {
+ label = "bootloader";
+ reg = <0x0 0x100000>;
+ read-only;
+ };
+
+ partition@100000 {
+ label = "kernel";
+ reg = <0x100000 0x300000>;
+ };
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec_1>;
+ phy-mode = "rmii";
+ phy-reset-gpios = <&gpio4 12 0>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_1>;
+ status = "okay";
+};
+
+&usbh1 {
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&usbh2 {
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&usbh3 {
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&usbotg {
+ pinctrl-names = "default";
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx50-pinfunc.h b/arch/arm/boot/dts/imx50-pinfunc.h
new file mode 100644
index 000000000000..97e6e7f4ebdd
--- /dev/null
+++ b/arch/arm/boot/dts/imx50-pinfunc.h
@@ -0,0 +1,923 @@
+/*
+ * Copyright 2013 Greg Ungerer <gerg@uclinux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DTS_IMX50_PINFUNC_H
+#define __DTS_IMX50_PINFUNC_H
+
+/*
+ * The pin function ID is a tuple of
+ * <mux_reg conf_reg input_reg mux_mode input_val>
+ */
+#define MX50_PAD_KEY_COL0__KPP_COL_0 0x020 0x2cc 0x000 0x0 0x0
+#define MX50_PAD_KEY_COL0__GPIO4_0 0x020 0x2cc 0x000 0x1 0x0
+#define MX50_PAD_KEY_COL0__EIM_NANDF_CLE 0x020 0x2cc 0x000 0x2 0x0
+#define MX50_PAD_KEY_COL0__CTI_TRIGIN7 0x020 0x2cc 0x000 0x6 0x0
+#define MX50_PAD_KEY_COL0__USBPHY1_TXREADY 0x020 0x2cc 0x000 0x7 0x0
+#define MX50_PAD_KEY_ROW0__KPP_ROW_0 0x024 0x2d0 0x000 0x0 0x0
+#define MX50_PAD_KEY_ROW0__GPIO4_1 0x024 0x2d0 0x000 0x1 0x0
+#define MX50_PAD_KEY_ROW0__EIM_NANDF_ALE 0x024 0x2d0 0x000 0x2 0x0
+#define MX50_PAD_KEY_ROW0__CTI_TRIGIN_ACK7 0x024 0x2d0 0x000 0x6 0x0
+#define MX50_PAD_KEY_ROW0__USBPHY1_RXVALID 0x024 0x2d0 0x000 0x7 0x0
+#define MX50_PAD_KEY_COL1__KPP_COL_1 0x028 0x2d4 0x000 0x0 0x0
+#define MX50_PAD_KEY_COL1__GPIO4_2 0x028 0x2d4 0x000 0x1 0x0
+#define MX50_PAD_KEY_COL1__EIM_NANDF_CEN_0 0x028 0x2d4 0x000 0x2 0x0
+#define MX50_PAD_KEY_COL1__CTI_TRIGOUT_ACK6 0x028 0x2d4 0x000 0x6 0x0
+#define MX50_PAD_KEY_COL1__USBPHY1_RXACTIVE 0x028 0x2d4 0x000 0x7 0x0
+#define MX50_PAD_KEY_ROW1__KPP_ROW_1 0x02c 0x2d8 0x000 0x0 0x0
+#define MX50_PAD_KEY_ROW1__GPIO4_3 0x02c 0x2d8 0x000 0x1 0x0
+#define MX50_PAD_KEY_ROW1__EIM_NANDF_CEN_1 0x02c 0x2d8 0x000 0x2 0x0
+#define MX50_PAD_KEY_ROW1__CTI_TRIGOUT_ACK7 0x02c 0x2d8 0x000 0x6 0x0
+#define MX50_PAD_KEY_ROW1__USBPHY1_RXERROR 0x02c 0x2d8 0x000 0x7 0x0
+#define MX50_PAD_KEY_COL2__KPP_COL_1 0x030 0x2dc 0x000 0x0 0x0
+#define MX50_PAD_KEY_COL2__GPIO4_4 0x030 0x2dc 0x000 0x1 0x0
+#define MX50_PAD_KEY_COL2__EIM_NANDF_CEN_2 0x030 0x2dc 0x000 0x2 0x0
+#define MX50_PAD_KEY_COL2__CTI_TRIGOUT6 0x030 0x2dc 0x000 0x6 0x0
+#define MX50_PAD_KEY_COL2__USBPHY1_SIECLOCK 0x030 0x2dc 0x000 0x7 0x0
+#define MX50_PAD_KEY_ROW2__KPP_ROW_2 0x034 0x2e0 0x000 0x0 0x0
+#define MX50_PAD_KEY_ROW2__GPIO4_5 0x034 0x2e0 0x000 0x1 0x0
+#define MX50_PAD_KEY_ROW2__EIM_NANDF_CEN_3 0x034 0x2e0 0x000 0x2 0x0
+#define MX50_PAD_KEY_ROW2__CTI_TRIGOUT7 0x034 0x2e0 0x000 0x6 0x0
+#define MX50_PAD_KEY_ROW2__USBPHY1_LINESTATE_0 0x034 0x2e0 0x000 0x7 0x0
+#define MX50_PAD_KEY_COL3__KPP_COL_2 0x038 0x2e4 0x000 0x0 0x0
+#define MX50_PAD_KEY_COL3__GPIO4_6 0x038 0x2e4 0x000 0x1 0x0
+#define MX50_PAD_KEY_COL3__EIM_NANDF_READY0 0x038 0x2e4 0x7b4 0x2 0x0
+#define MX50_PAD_KEY_COL3__SDMA_EXT_EVENT_0 0x038 0x2e4 0x7b8 0x6 0x0
+#define MX50_PAD_KEY_COL3__USBPHY1_LINESTATE_1 0x038 0x2e4 0x000 0x7 0x0
+#define MX50_PAD_KEY_ROW3__KPP_ROW_3 0x03c 0x2e8 0x000 0x0 0x0
+#define MX50_PAD_KEY_ROW3__GPIO4_7 0x03c 0x2e8 0x000 0x1 0x0
+#define MX50_PAD_KEY_ROW3__EIM_NANDF_DQS 0x03c 0x2e8 0x7b0 0x2 0x0
+#define MX50_PAD_KEY_ROW3__SDMA_EXT_EVENT_1 0x03c 0x2e8 0x7bc 0x6 0x0
+#define MX50_PAD_KEY_ROW3__USBPHY1_VBUSVALID 0x03c 0x2e8 0x000 0x7 0x0
+#define MX50_PAD_I2C1_SCL__I2C1_SCL 0x040 0x2ec 0x000 0x0 0x0
+#define MX50_PAD_I2C1_SCL__GPIO6_18 0x040 0x2ec 0x000 0x1 0x0
+#define MX50_PAD_I2C1_SCL__UART2_TXD_MUX 0x040 0x2ec 0x7cc 0x2 0x0
+#define MX50_PAD_I2C1_SDA__I2C1_SDA 0x044 0x2f0 0x000 0x0 0x0
+#define MX50_PAD_I2C1_SDA__GPIO6_19 0x044 0x2f0 0x000 0x1 0x0
+#define MX50_PAD_I2C1_SDA__UART2_RXD_MUX 0x044 0x2f0 0x7cc 0x2 0x1
+#define MX50_PAD_I2C2_SCL__I2C2_SCL 0x048 0x2f4 0x000 0x0 0x0
+#define MX50_PAD_I2C2_SCL__GPIO6_20 0x048 0x2f4 0x000 0x1 0x0
+#define MX50_PAD_I2C2_SCL__UART2_CTS 0x048 0x2f4 0x000 0x2 0x0
+#define MX50_PAD_I2C2_SDA__I2C2_SDA 0x04c 0x2f8 0x000 0x0 0x0
+#define MX50_PAD_I2C2_SDA__GPIO6_21 0x04c 0x2f8 0x000 0x1 0x0
+#define MX50_PAD_I2C2_SDA__UART2_RTS 0x04c 0x2f8 0x7c8 0x2 0x1
+#define MX50_PAD_I2C3_SCL__I2C3_SCL 0x050 0x2fc 0x000 0x0 0x0
+#define MX50_PAD_I2C3_SCL__GPIO6_22 0x050 0x2fc 0x000 0x1 0x0
+#define MX50_PAD_I2C3_SCL__FEC_MDC 0x050 0x2fc 0x000 0x2 0x0
+#define MX50_PAD_I2C3_SCL__GPC_PMIC_RDY 0x050 0x2fc 0x000 0x3 0x0
+#define MX50_PAD_I2C3_SCL__GPT_CAPIN1 0x050 0x2fc 0x000 0x5 0x0
+#define MX50_PAD_I2C3_SCL__OBSERVE_MUX_OBSRV_INT_OUT0 0x050 0x2fc 0x000 0x6 0x0
+#define MX50_PAD_I2C3_SCL__USBOH1_USBOTG_OC 0x050 0x2fc 0x7e8 0x7 0x0
+#define MX50_PAD_I2C3_SDA__I2C3_SDA 0x054 0x300 0x000 0x0 0x0
+#define MX50_PAD_I2C3_SDA__GPIO6_23 0x054 0x300 0x000 0x1 0x0
+#define MX50_PAD_I2C3_SDA__FEC_MDIO 0x054 0x300 0x774 0x2 0x0
+#define MX50_PAD_I2C3_SDA__TZIC_PWRFAIL_INT 0x054 0x300 0x000 0x3 0x0
+#define MX50_PAD_I2C3_SDA__SRTC_ALARM_DEB 0x054 0x300 0x000 0x4 0x0
+#define MX50_PAD_I2C3_SDA__GPT_CAPIN2 0x054 0x300 0x000 0x5 0x0
+#define MX50_PAD_I2C3_SDA__OBSERVE_MUX_OBSRV_INT_OUT1 0x054 0x300 0x000 0x6 0x0
+#define MX50_PAD_I2C3_SDA__USBOH1_USBOTG_PWR 0x054 0x300 0x000 0x7 0x0
+#define MX50_PAD_PWM1__PWM1_PWMO 0x058 0x304 0x000 0x0 0x0
+#define MX50_PAD_PWM1__GPIO6_24 0x058 0x304 0x000 0x1 0x0
+#define MX50_PAD_PWM1__USBOH1_USBOTG_OC 0x058 0x304 0x7e8 0x2 0x1
+#define MX50_PAD_PWM1__GPT_CMPOUT1 0x058 0x304 0x000 0x5 0x0
+#define MX50_PAD_PWM1__OBSERVE_MUX_OBSRV_INT_OUT2 0x058 0x304 0x000 0x6 0x0
+#define MX50_PAD_PWM1__SJC_FAIL 0x058 0x304 0x000 0x7 0x0
+#define MX50_PAD_PWM2__PWM2_PWMO 0x05c 0x308 0x000 0x0 0x0
+#define MX50_PAD_PWM2__GPIO6_25 0x05c 0x308 0x000 0x1 0x0
+#define MX50_PAD_PWM2__USBOH1_USBOTG_PWR 0x05c 0x308 0x000 0x2 0x0
+#define MX50_PAD_PWM2__GPT_CMPOUT2 0x05c 0x308 0x000 0x5 0x0
+#define MX50_PAD_PWM2__OBSERVE_MUX_OBSRV_INT_OUT3 0x05c 0x308 0x000 0x6 0x0
+#define MX50_PAD_PWM2__SRC_ANY_PU_RST 0x05c 0x308 0x000 0x7 0x0
+#define MX50_PAD_OWIRE__OWIRE_LINE 0x060 0x30c 0x000 0x0 0x0
+#define MX50_PAD_OWIRE__GPIO6_26 0x060 0x30c 0x000 0x1 0x0
+#define MX50_PAD_OWIRE__USBOH1_USBH1_OC 0x060 0x30c 0x000 0x2 0x0
+#define MX50_PAD_OWIRE__CCM_SSI_EXT1_CLK 0x060 0x30c 0x000 0x3 0x0
+#define MX50_PAD_OWIRE__EPDC_PWRIRQ 0x060 0x30c 0x000 0x4 0x0
+#define MX50_PAD_OWIRE__GPT_CMPOUT3 0x060 0x30c 0x000 0x5 0x0
+#define MX50_PAD_OWIRE__OBSERVE_MUX_OBSRV_INT_OUT4 0x060 0x30c 0x000 0x6 0x0
+#define MX50_PAD_OWIRE__SJC_JTAG_ACT 0x060 0x30c 0x000 0x7 0x0
+#define MX50_PAD_EPITO__EPIT1_EPITO 0x064 0x310 0x000 0x0 0x0
+#define MX50_PAD_EPITO__GPIO6_27 0x064 0x310 0x000 0x1 0x0
+#define MX50_PAD_EPITO__USBOH1_USBH1_PWR 0x064 0x310 0x000 0x2 0x0
+#define MX50_PAD_EPITO__CCM_SSI_EXT2_CLK 0x064 0x310 0x000 0x3 0x0
+#define MX50_PAD_EPITO__DPLLIP1_TOG_EN 0x064 0x310 0x000 0x4 0x0
+#define MX50_PAD_EPITO__GPT_CLK_IN 0x064 0x310 0x000 0x5 0x0
+#define MX50_PAD_EPITO__PMU_IRQ_B 0x064 0x310 0x000 0x6 0x0
+#define MX50_PAD_EPITO__SJC_DE_B 0x064 0x310 0x000 0x7 0x0
+#define MX50_PAD_WDOG__WDOG1_WDOG_B 0x068 0x314 0x000 0x0 0x0
+#define MX50_PAD_WDOG__GPIO6_28 0x068 0x314 0x000 0x1 0x0
+#define MX50_PAD_WDOG__WDOG1_WDOG_RST_B_DEB 0x068 0x314 0x000 0x2 0x0
+#define MX50_PAD_WDOG__CCM_XTAL32K 0x068 0x314 0x000 0x6 0x0
+#define MX50_PAD_WDOG__SJC_DONE 0x068 0x314 0x000 0x7 0x0
+#define MX50_PAD_SSI_TXFS__AUDMUX_AUD3_TXFS 0x06c 0x318 0x000 0x0 0x0
+#define MX50_PAD_SSI_TXFS__GPIO6_0 0x06c 0x318 0x000 0x1 0x0
+#define MX50_PAD_SSI_TXFS__SRC_BT_FUSE_RSV_1 0x06c 0x318 0x000 0x6 0x0
+#define MX50_PAD_SSI_TXFS__USBPHY1_DATAOUT_8 0x06c 0x318 0x000 0x7 0x0
+#define MX50_PAD_SSI_TXC__AUDMUX_AUD3_TXC 0x070 0x31c 0x000 0x0 0x0
+#define MX50_PAD_SSI_TXC__GPIO6_1 0x070 0x31c 0x000 0x1 0x0
+#define MX50_PAD_SSI_TXC__SRC_BT_FUSE_RSV_0 0x070 0x31c 0x000 0x6 0x0
+#define MX50_PAD_SSI_TXC__USBPHY1_DATAOUT_9 0x070 0x31c 0x000 0x7 0x0
+#define MX50_PAD_SSI_TXD__AUDMUX_AUD3_TXD 0x074 0x320 0x000 0x0 0x0
+#define MX50_PAD_SSI_TXD__GPIO6_2 0x074 0x320 0x000 0x1 0x0
+#define MX50_PAD_SSI_TXD__CSPI_RDY 0x074 0x320 0x6e8 0x4 0x0
+#define MX50_PAD_SSI_TXD__USBPHY1_DATAOUT_10 0x074 0x320 0x000 0x7 0x0
+#define MX50_PAD_SSI_RXD__AUDMUX_AUD3_RXD 0x078 0x324 0x000 0x0 0x0
+#define MX50_PAD_SSI_RXD__GPIO6_3 0x078 0x324 0x000 0x1 0x0
+#define MX50_PAD_SSI_RXD__CSPI_SS3 0x078 0x324 0x6f4 0x4 0x0
+#define MX50_PAD_SSI_RXD__USBPHY1_DATAOUT_11 0x078 0x324 0x000 0x7 0x0
+#define MX50_PAD_SSI_RXFS__AUDMUX_AUD3_RXFS 0x07c 0x328 0x000 0x0 0x0
+#define MX50_PAD_SSI_RXFS__GPIO6_4 0x07c 0x328 0x000 0x1 0x0
+#define MX50_PAD_SSI_RXFS__UART5_TXD_MUX 0x07c 0x328 0x7e4 0x2 0x0
+#define MX50_PAD_SSI_RXFS__EIM_WEIM_D_6 0x07c 0x328 0x804 0x3 0x0
+#define MX50_PAD_SSI_RXFS__CSPI_SS2 0x07c 0x328 0x6f0 0x4 0x0
+#define MX50_PAD_SSI_RXFS__FEC_COL 0x07c 0x328 0x770 0x5 0x0
+#define MX50_PAD_SSI_RXFS__FEC_MDC 0x07c 0x328 0x000 0x6 0x0
+#define MX50_PAD_SSI_RXFS__USBPHY1_DATAOUT_12 0x07c 0x328 0x000 0x7 0x0
+#define MX50_PAD_SSI_RXC__AUDMUX_AUD3_RXC 0x080 0x32c 0x000 0x0 0x0
+#define MX50_PAD_SSI_RXC__GPIO6_5 0x080 0x32c 0x000 0x1 0x0
+#define MX50_PAD_SSI_RXC__UART5_RXD_MUX 0x080 0x32c 0x7e4 0x2 0x1
+#define MX50_PAD_SSI_RXC__EIM_WEIM_D_7 0x080 0x32c 0x808 0x3 0x0
+#define MX50_PAD_SSI_RXC__CSPI_SS1 0x080 0x32c 0x6ec 0x4 0x0
+#define MX50_PAD_SSI_RXC__FEC_RX_CLK 0x080 0x32c 0x780 0x5 0x0
+#define MX50_PAD_SSI_RXC__FEC_MDIO 0x080 0x32c 0x774 0x6 0x1
+#define MX50_PAD_SSI_RXC__USBPHY1_DATAOUT_13 0x080 0x32c 0x000 0x7 0x0
+#define MX50_PAD_UART1_TXD__UART1_TXD_MUX 0x084 0x330 0x7c4 0x0 0x0
+#define MX50_PAD_UART1_TXD__GPIO6_6 0x084 0x330 0x000 0x1 0x0
+#define MX50_PAD_UART1_TXD__USBPHY1_DATAOUT_14 0x084 0x330 0x000 0x7 0x0
+#define MX50_PAD_UART1_RXD__UART1_RXD_MUX 0x088 0x334 0x7c4 0x0 0x1
+#define MX50_PAD_UART1_RXD__GPIO6_7 0x088 0x334 0x000 0x1 0x0
+#define MX50_PAD_UART1_RXD__USBPHY1_DATAOUT_15 0x088 0x334 0x000 0x7 0x0
+#define MX50_PAD_UART1_CTS__UART1_CTS 0x08c 0x338 0x000 0x0 0x0
+#define MX50_PAD_UART1_CTS__GPIO6_8 0x08c 0x338 0x000 0x1 0x0
+#define MX50_PAD_UART1_CTS__UART5_TXD_MUX 0x08c 0x338 0x7e4 0x2 0x2
+#define MX50_PAD_UART1_CTS__ESDHC4_DAT4 0x08c 0x338 0x760 0x4 0x0
+#define MX50_PAD_UART1_CTS__ESDHC4_CMD 0x08c 0x338 0x74c 0x5 0x0
+#define MX50_PAD_UART1_CTS__USBPHY2_DATAOUT_8 0x08c 0x338 0x000 0x7 0x0
+#define MX50_PAD_UART1_RTS__UART1_RTS 0x090 0x33c 0x7c0 0x0 0x3
+#define MX50_PAD_UART1_RTS__GPIO6_9 0x090 0x33c 0x000 0x1 0x0
+#define MX50_PAD_UART1_RTS__UART5_RXD_MUX 0x090 0x33c 0x7e4 0x2 0x3
+#define MX50_PAD_UART1_RTS__ESDHC4_DAT5 0x090 0x33c 0x764 0x4 0x0
+#define MX50_PAD_UART1_RTS__ESDHC4_CLK 0x090 0x33c 0x748 0x5 0x0
+#define MX50_PAD_UART1_RTS__USBPHY2_DATAOUT_9 0x090 0x33c 0x000 0x7 0x0
+#define MX50_PAD_UART2_TXD__UART2_TXD_MUX 0x094 0x340 0x7cc 0x0 0x2
+#define MX50_PAD_UART2_TXD__GPIO6_10 0x094 0x340 0x000 0x1 0x0
+#define MX50_PAD_UART2_TXD__ESDHC4_DAT6 0x094 0x340 0x768 0x4 0x0
+#define MX50_PAD_UART2_TXD__ESDHC4_DAT4 0x094 0x340 0x760 0x5 0x1
+#define MX50_PAD_UART2_TXD__USBPHY2_DATAOUT_10 0x094 0x340 0x000 0x7 0x0
+#define MX50_PAD_UART2_RXD__UART2_RXD_MUX 0x098 0x344 0x7cc 0x0 0x3
+#define MX50_PAD_UART2_RXD__GPIO6_11 0x098 0x344 0x000 0x1 0x0
+#define MX50_PAD_UART2_RXD__ESDHC4_DAT7 0x098 0x344 0x76c 0x4 0x0
+#define MX50_PAD_UART2_RXD__ESDHC4_DAT5 0x098 0x344 0x764 0x5 0x1
+#define MX50_PAD_UART2_RXD__USBPHY2_DATAOUT_11 0x098 0x344 0x000 0x7 0x0
+#define MX50_PAD_UART2_CTS__UART2_CTS 0x09c 0x348 0x000 0x0 0x0
+#define MX50_PAD_UART2_CTS__GPIO6_12 0x09c 0x348 0x000 0x1 0x0
+#define MX50_PAD_UART2_CTS__ESDHC4_CMD 0x09c 0x348 0x74c 0x4 0x1
+#define MX50_PAD_UART2_CTS__ESDHC4_DAT6 0x09c 0x348 0x768 0x5 0x1
+#define MX50_PAD_UART2_CTS__USBPHY2_DATAOUT_12 0x09c 0x348 0x000 0x7 0x0
+#define MX50_PAD_UART2_RTS__UART2_RTS 0x0a0 0x34c 0x7c8 0x0 0x2
+#define MX50_PAD_UART2_RTS__GPIO6_13 0x0a0 0x34c 0x000 0x1 0x0
+#define MX50_PAD_UART2_RTS__ESDHC4_CLK 0x0a0 0x34c 0x748 0x4 0x1
+#define MX50_PAD_UART2_RTS__ESDHC4_DAT7 0x0a0 0x34c 0x76c 0x5 0x1
+#define MX50_PAD_UART2_RTS__USBPHY2_DATAOUT_13 0x0a0 0x34c 0x000 0x7 0x0
+#define MX50_PAD_UART3_TXD__UART3_TXD_MUX 0x0a4 0x350 0x7d4 0x0 0x0
+#define MX50_PAD_UART3_TXD__GPIO6_14 0x0a4 0x350 0x000 0x1 0x0
+#define MX50_PAD_UART3_TXD__ESDHC1_DAT4 0x0a4 0x350 0x000 0x3 0x0
+#define MX50_PAD_UART3_TXD__ESDHC4_DAT0 0x0a4 0x350 0x000 0x4 0x0
+#define MX50_PAD_UART3_TXD__ESDHC2_WP 0x0a4 0x350 0x744 0x5 0x0
+#define MX50_PAD_UART3_TXD__EIM_WEIM_D_12 0x0a4 0x350 0x81c 0x6 0x0
+#define MX50_PAD_UART3_TXD__USBPHY2_DATAOUT_14 0x0a4 0x350 0x000 0x7 0x0
+#define MX50_PAD_UART3_RXD__UART3_RXD_MUX 0x0a8 0x354 0x7d4 0x0 0x1
+#define MX50_PAD_UART3_RXD__GPIO6_15 0x0a8 0x354 0x000 0x1 0x0
+#define MX50_PAD_UART3_RXD__ESDHC1_DAT5 0x0a8 0x354 0x000 0x3 0x0
+#define MX50_PAD_UART3_RXD__ESDHC4_DAT1 0x0a8 0x354 0x754 0x4 0x0
+#define MX50_PAD_UART3_RXD__ESDHC2_CD 0x0a8 0x354 0x740 0x5 0x0
+#define MX50_PAD_UART3_RXD__EIM_WEIM_D_13 0x0a8 0x354 0x820 0x6 0x0
+#define MX50_PAD_UART3_RXD__USBPHY2_DATAOUT_15 0x0a8 0x354 0x000 0x7 0x0
+#define MX50_PAD_UART4_TXD__UART4_TXD_MUX 0x0ac 0x358 0x7dc 0x0 0x0
+#define MX50_PAD_UART4_TXD__GPIO6_16 0x0ac 0x358 0x000 0x1 0x0
+#define MX50_PAD_UART4_TXD__UART3_CTS 0x0ac 0x358 0x7d0 0x2 0x0
+#define MX50_PAD_UART4_TXD__ESDHC1_DAT6 0x0ac 0x358 0x000 0x3 0x0
+#define MX50_PAD_UART4_TXD__ESDHC4_DAT2 0x0ac 0x358 0x758 0x4 0x0
+#define MX50_PAD_UART4_TXD__ESDHC2_LCTL 0x0ac 0x358 0x000 0x5 0x0
+#define MX50_PAD_UART4_TXD__EIM_WEIM_D_14 0x0ac 0x358 0x824 0x6 0x0
+#define MX50_PAD_UART4_RXD__UART4_RXD_MUX 0x0b0 0x35c 0x7dc 0x0 0x1
+#define MX50_PAD_UART4_RXD__GPIO6_17 0x0b0 0x35c 0x000 0x1 0x0
+#define MX50_PAD_UART4_RXD__UART3_RTS 0x0b0 0x35c 0x7d0 0x2 0x1
+#define MX50_PAD_UART4_RXD__ESDHC1_DAT7 0x0b0 0x35c 0x000 0x3 0x0
+#define MX50_PAD_UART4_RXD__ESDHC4_DAT3 0x0b0 0x35c 0x75c 0x4 0x0
+#define MX50_PAD_UART4_RXD__ESDHC1_LCTL 0x0b0 0x35c 0x000 0x5 0x0
+#define MX50_PAD_UART4_RXD__EIM_WEIM_D_15 0x0b0 0x35c 0x828 0x6 0x0
+#define MX50_PAD_CSPI_SCLK__CSPI_SCLK 0x0b4 0x360 0x000 0x0 0x0
+#define MX50_PAD_CSPI_SCLK__GPIO4_8 0x0b4 0x360 0x000 0x1 0x0
+#define MX50_PAD_CSPI_MOSI__CSPI_MOSI 0x0b8 0x364 0x000 0x0 0x0
+#define MX50_PAD_CSPI_MOSI__GPIO4_9 0x0b8 0x364 0x000 0x1 0x0
+#define MX50_PAD_CSPI_MISO__CSPI_MISO 0x0bc 0x368 0x000 0x0 0x0
+#define MX50_PAD_CSPI_MISO__GPIO4_10 0x0bc 0x368 0x000 0x1 0x0
+#define MX50_PAD_CSPI_SS0__CSPI_SS0 0x0c0 0x36c 0x000 0x0 0x0
+#define MX50_PAD_CSPI_SS0__GPIO4_11 0x0c0 0x36c 0x000 0x1 0x0
+#define MX50_PAD_ECSPI1_SCLK__ECSPI1_SCLK 0x0c4 0x370 0x000 0x0 0x0
+#define MX50_PAD_ECSPI1_SCLK__GPIO4_12 0x0c4 0x370 0x000 0x1 0x0
+#define MX50_PAD_ECSPI1_SCLK__CSPI_RDY 0x0c4 0x370 0x6e8 0x2 0x1
+#define MX50_PAD_ECSPI1_SCLK__ECSPI2_RDY 0x0c4 0x370 0x000 0x3 0x0
+#define MX50_PAD_ECSPI1_SCLK__UART3_RTS 0x0c4 0x370 0x7d0 0x4 0x2
+#define MX50_PAD_ECSPI1_SCLK__EPDC_SDCE_6 0x0c4 0x370 0x000 0x5 0x0
+#define MX50_PAD_ECSPI1_SCLK__EIM_WEIM_D_8 0x0c4 0x370 0x80c 0x7 0x0
+#define MX50_PAD_ECSPI1_MOSI__ECSPI1_MOSI 0x0c8 0x374 0x000 0x0 0x0
+#define MX50_PAD_ECSPI1_MOSI__GPIO4_13 0x0c8 0x374 0x000 0x1 0x0
+#define MX50_PAD_ECSPI1_MOSI__CSPI_SS1 0x0c8 0x374 0x6ec 0x2 0x1
+#define MX50_PAD_ECSPI1_MOSI__ECSPI2_SS1 0x0c8 0x374 0x000 0x3 0x0
+#define MX50_PAD_ECSPI1_MOSI__UART3_CTS 0x0c8 0x374 0x000 0x4 0x0
+#define MX50_PAD_ECSPI1_MOSI__EPDC_SDCE_7 0x0c8 0x374 0x000 0x5 0x0
+#define MX50_PAD_ECSPI1_MOSI__EIM_WEIM_D_9 0x0c8 0x374 0x810 0x7 0x0
+#define MX50_PAD_ECSPI1_MISO__ECSPI1_MISO 0x0cc 0x378 0x000 0x0 0x0
+#define MX50_PAD_ECSPI1_MISO__GPIO4_14 0x0cc 0x378 0x000 0x1 0x0
+#define MX50_PAD_ECSPI1_MISO__CSPI_SS2 0x0cc 0x378 0x6f0 0x2 0x1
+#define MX50_PAD_ECSPI1_MISO__ECSPI2_SS2 0x0cc 0x378 0x000 0x3 0x0
+#define MX50_PAD_ECSPI1_MISO__UART4_RTS 0x0cc 0x378 0x7d8 0x4 0x0
+#define MX50_PAD_ECSPI1_MISO__EPDC_SDCE_8 0x0cc 0x378 0x000 0x5 0x0
+#define MX50_PAD_ECSPI1_MISO__EIM_WEIM_D_10 0x0cc 0x378 0x814 0x7 0x0
+#define MX50_PAD_ECSPI1_SS0__ECSPI1_SS0 0x0d0 0x37c 0x000 0x0 0x0
+#define MX50_PAD_ECSPI1_SS0__GPIO4_15 0x0d0 0x37c 0x000 0x1 0x0
+#define MX50_PAD_ECSPI1_SS0__CSPI_SS3 0x0d0 0x37c 0x6f4 0x2 0x1
+#define MX50_PAD_ECSPI1_SS0__ECSPI2_SS3 0x0d0 0x37c 0x000 0x3 0x0
+#define MX50_PAD_ECSPI1_SS0__UART4_CTS 0x0d0 0x37c 0x000 0x4 0x0
+#define MX50_PAD_ECSPI1_SS0__EPDC_SDCE_9 0x0d0 0x37c 0x000 0x5 0x0
+#define MX50_PAD_ECSPI1_SS0__EIM_WEIM_D_11 0x0d0 0x37c 0x818 0x7 0x0
+#define MX50_PAD_ECSPI2_SCLK__ECSPI2_SCLK 0x0d4 0x380 0x000 0x0 0x0
+#define MX50_PAD_ECSPI2_SCLK__GPIO4_16 0x0d4 0x380 0x000 0x1 0x0
+#define MX50_PAD_ECSPI2_SCLK__ELCDIF_WR_RWN 0x0d4 0x380 0x000 0x2 0x0
+#define MX50_PAD_ECSPI2_SCLK__ECSPI1_RDY 0x0d4 0x380 0x000 0x3 0x0
+#define MX50_PAD_ECSPI2_SCLK__UART5_RTS 0x0d4 0x380 0x7e0 0x4 0x0
+#define MX50_PAD_ECSPI2_SCLK__ELCDIF_DOTCLK 0x0d4 0x380 0x000 0x5 0x0
+#define MX50_PAD_ECSPI2_SCLK__EIM_NANDF_CEN_4 0x0d4 0x380 0x000 0x6 0x0
+#define MX50_PAD_ECSPI2_SCLK__EIM_WEIM_D_8 0x0d4 0x380 0x80c 0x7 0x1
+#define MX50_PAD_ECSPI2_MOSI__ECSPI2_MOSI 0x0d8 0x384 0x000 0x0 0x0
+#define MX50_PAD_ECSPI2_MOSI__GPIO4_17 0x0d8 0x384 0x000 0x1 0x0
+#define MX50_PAD_ECSPI2_MOSI__ELCDIF_RE_E 0x0d8 0x384 0x000 0x2 0x0
+#define MX50_PAD_ECSPI2_MOSI__ECSPI1_SS1 0x0d8 0x384 0x000 0x3 0x0
+#define MX50_PAD_ECSPI2_MOSI__UART5_CTS 0x0d8 0x384 0x7e0 0x4 0x1
+#define MX50_PAD_ECSPI2_MOSI__ELCDIF_ENABLE 0x0d8 0x384 0x000 0x5 0x0
+#define MX50_PAD_ECSPI2_MOSI__EIM_NANDF_CEN_5 0x0d8 0x384 0x000 0x6 0x0
+#define MX50_PAD_ECSPI2_MOSI__EIM_WEIM_D_9 0x0d8 0x384 0x810 0x7 0x1
+#define MX50_PAD_ECSPI2_MISO__ECSPI2_MISO 0x0dc 0x388 0x000 0x0 0x0
+#define MX50_PAD_ECSPI2_MISO__GPIO4_18 0x0dc 0x388 0x000 0x1 0x0
+#define MX50_PAD_ECSPI2_MISO__ELCDIF_RS 0x0dc 0x388 0x000 0x2 0x0
+#define MX50_PAD_ECSPI2_MISO__ECSPI1_SS2 0x0dc 0x388 0x000 0x3 0x0
+#define MX50_PAD_ECSPI2_MISO__UART5_TXD_MUX 0x0dc 0x388 0x7e4 0x4 0x4
+#define MX50_PAD_ECSPI2_MISO__ELCDIF_VSYNC 0x0dc 0x388 0x73c 0x5 0x0
+#define MX50_PAD_ECSPI2_MISO__EIM_NANDF_CEN_6 0x0dc 0x388 0x000 0x6 0x0
+#define MX50_PAD_ECSPI2_MISO__EIM_WEIM_D_10 0x0dc 0x388 0x814 0x7 0x1
+#define MX50_PAD_ECSPI2_SS0__ECSPI2_SS0 0x0e0 0x38c 0x000 0x0 0x0
+#define MX50_PAD_ECSPI2_SS0__GPIO4_19 0x0e0 0x38c 0x000 0x1 0x0
+#define MX50_PAD_ECSPI2_SS0__ELCDIF_CS 0x0e0 0x38c 0x000 0x2 0x0
+#define MX50_PAD_ECSPI2_SS0__ECSPI2_SS3 0x0e0 0x38c 0x000 0x3 0x0
+#define MX50_PAD_ECSPI2_SS0__UART5_RXD_MUX 0x0e0 0x38c 0x7e4 0x4 0x5
+#define MX50_PAD_ECSPI2_SS0__ELCDIF_HSYNC 0x0e0 0x38c 0x6f8 0x5 0x0
+#define MX50_PAD_ECSPI2_SS0__EIM_NANDF_CEN_7 0x0e0 0x38c 0x000 0x6 0x0
+#define MX50_PAD_ECSPI2_SS0__EIM_WEIM_D_11 0x0e0 0x38c 0x818 0x7 0x1
+#define MX50_PAD_SD1_CLK__ESDHC1_CLK 0x0e4 0x390 0x000 0x0 0x0
+#define MX50_PAD_SD1_CLK__GPIO5_0 0x0e4 0x390 0x000 0x1 0x0
+#define MX50_PAD_SD1_CLK__CCM_CLKO 0x0e4 0x390 0x000 0x7 0x0
+#define MX50_PAD_SD1_CMD__ESDHC1_CMD 0x0e8 0x394 0x000 0x0 0x0
+#define MX50_PAD_SD1_CMD__GPIO5_1 0x0e8 0x394 0x000 0x1 0x0
+#define MX50_PAD_SD1_CMD__CCM_CLKO2 0x0e8 0x394 0x000 0x7 0x0
+#define MX50_PAD_SD1_D0__ESDHC1_DAT0 0x0ec 0x398 0x000 0x0 0x0
+#define MX50_PAD_SD1_D0__GPIO5_2 0x0ec 0x398 0x000 0x1 0x0
+#define MX50_PAD_SD1_D0__CCM_PLL1_BYP 0x0ec 0x398 0x6dc 0x7 0x0
+#define MX50_PAD_SD1_D1__ESDHC1_DAT1 0x0f0 0x39c 0x000 0x0 0x0
+#define MX50_PAD_SD1_D1__GPIO5_3 0x0f0 0x39c 0x000 0x1 0x0
+#define MX50_PAD_SD1_D1__CCM_PLL2_BYP 0x0f0 0x39c 0x000 0x7 0x0
+#define MX50_PAD_SD1_D2__ESDHC1_DAT2 0x0f4 0x3a0 0x000 0x0 0x0
+#define MX50_PAD_SD1_D2__GPIO5_4 0x0f4 0x3a0 0x000 0x1 0x0
+#define MX50_PAD_SD1_D2__CCM_PLL3_BYP 0x0f4 0x3a0 0x6e4 0x7 0x0
+#define MX50_PAD_SD1_D3__ESDHC1_DAT3 0x0f8 0x3a4 0x000 0x0 0x0
+#define MX50_PAD_SD1_D3__GPIO5_5 0x0f8 0x3a4 0x000 0x1 0x0
+#define MX50_PAD_SD2_CLK__ESDHC2_CLK 0x0fc 0x3a8 0x000 0x0 0x0
+#define MX50_PAD_SD2_CLK__GPIO5_6 0x0fc 0x3a8 0x000 0x1 0x0
+#define MX50_PAD_SD2_CLK__MSHC_SCLK 0x0fc 0x3a8 0x000 0x2 0x0
+#define MX50_PAD_SD2_CMD__ESDHC2_CMD 0x100 0x3ac 0x000 0x0 0x0
+#define MX50_PAD_SD2_CMD__GPIO5_7 0x100 0x3ac 0x000 0x1 0x0
+#define MX50_PAD_SD2_CMD__MSHC_BS 0x100 0x3ac 0x000 0x2 0x0
+#define MX50_PAD_SD2_D0__ESDHC2_DAT0 0x104 0x3b0 0x000 0x0 0x0
+#define MX50_PAD_SD2_D0__GPIO5_8 0x104 0x3b0 0x000 0x1 0x0
+#define MX50_PAD_SD2_D0__MSHC_DATA_0 0x104 0x3b0 0x000 0x2 0x0
+#define MX50_PAD_SD2_D0__KPP_COL_4 0x104 0x3b0 0x790 0x3 0x0
+#define MX50_PAD_SD2_D1__ESDHC2_DAT1 0x108 0x3b4 0x000 0x0 0x0
+#define MX50_PAD_SD2_D1__GPIO5_9 0x108 0x3b4 0x000 0x1 0x0
+#define MX50_PAD_SD2_D1__MSHC_DATA_1 0x108 0x3b4 0x000 0x2 0x0
+#define MX50_PAD_SD2_D1__KPP_ROW_4 0x108 0x3b4 0x7a0 0x3 0x0
+#define MX50_PAD_SD2_D2__ESDHC2_DAT2 0x10c 0x3b8 0x000 0x0 0x0
+#define MX50_PAD_SD2_D2__GPIO5_10 0x10c 0x3b8 0x000 0x1 0x0
+#define MX50_PAD_SD2_D2__MSHC_DATA_2 0x10c 0x3b8 0x000 0x2 0x0
+#define MX50_PAD_SD2_D2__KPP_COL_5 0x10c 0x3b8 0x794 0x3 0x0
+#define MX50_PAD_SD2_D3__ESDHC2_DAT3 0x110 0x3bc 0x000 0x0 0x0
+#define MX50_PAD_SD2_D3__GPIO5_11 0x110 0x3bc 0x000 0x1 0x0
+#define MX50_PAD_SD2_D3__MSHC_DATA_3 0x110 0x3bc 0x000 0x2 0x0
+#define MX50_PAD_SD2_D3__KPP_ROW_5 0x110 0x3bc 0x7a4 0x3 0x0
+#define MX50_PAD_SD2_D4__ESDHC2_DAT4 0x114 0x3c0 0x000 0x0 0x0
+#define MX50_PAD_SD2_D4__GPIO5_12 0x114 0x3c0 0x000 0x1 0x0
+#define MX50_PAD_SD2_D4__AUDMUX_AUD4_RXFS 0x114 0x3c0 0x6d0 0x2 0x0
+#define MX50_PAD_SD2_D4__KPP_COL_6 0x114 0x3c0 0x798 0x3 0x0
+#define MX50_PAD_SD2_D4__EIM_WEIM_D_0 0x114 0x3c0 0x7ec 0x4 0x0
+#define MX50_PAD_SD2_D4__CCM_CCM_OUT_0 0x114 0x3c0 0x000 0x7 0x0
+#define MX50_PAD_SD2_D5__ESDHC2_DAT5 0x118 0x3c4 0x000 0x0 0x0
+#define MX50_PAD_SD2_D5__GPIO5_13 0x118 0x3c4 0x000 0x1 0x0
+#define MX50_PAD_SD2_D5__AUDMUX_AUD4_RXC 0x118 0x3c4 0x6cc 0x2 0x0
+#define MX50_PAD_SD2_D5__KPP_ROW_6 0x118 0x3c4 0x7a8 0x3 0x0
+#define MX50_PAD_SD2_D5__EIM_WEIM_D_1 0x118 0x3c4 0x7f0 0x4 0x0
+#define MX50_PAD_SD2_D5__CCM_CCM_OUT_1 0x118 0x3c4 0x000 0x7 0x0
+#define MX50_PAD_SD2_D6__ESDHC2_DAT6 0x11c 0x3c8 0x000 0x0 0x0
+#define MX50_PAD_SD2_D6__GPIO5_14 0x11c 0x3c8 0x000 0x1 0x0
+#define MX50_PAD_SD2_D6__AUDMUX_AUD4_RXD 0x11c 0x3c8 0x6c4 0x2 0x0
+#define MX50_PAD_SD2_D6__KPP_COL_7 0x11c 0x3c8 0x79c 0x3 0x0
+#define MX50_PAD_SD2_D6__EIM_WEIM_D_2 0x11c 0x3c8 0x7f4 0x4 0x0
+#define MX50_PAD_SD2_D6__CCM_CCM_OUT_2 0x11c 0x3c8 0x000 0x7 0x0
+#define MX50_PAD_SD2_D7__ESDHC2_DAT7 0x120 0x3cc 0x000 0x0 0x0
+#define MX50_PAD_SD2_D7__GPIO5_15 0x120 0x3cc 0x000 0x1 0x0
+#define MX50_PAD_SD2_D7__AUDMUX_AUD4_TXFS 0x120 0x3cc 0x6d8 0x2 0x0
+#define MX50_PAD_SD2_D7__KPP_ROW_7 0x120 0x3cc 0x7ac 0x3 0x0
+#define MX50_PAD_SD2_D7__EIM_WEIM_D_3 0x120 0x3cc 0x7f8 0x4 0x0
+#define MX50_PAD_SD2_D7__CCM_STOP 0x120 0x3cc 0x000 0x7 0x0
+#define MX50_PAD_SD2_WP__ESDHC2_WP 0x124 0x3d0 0x744 0x0 0x1
+#define MX50_PAD_SD2_WP__GPIO5_16 0x124 0x3d0 0x000 0x1 0x0
+#define MX50_PAD_SD2_WP__AUDMUX_AUD4_TXD 0x124 0x3d0 0x6c8 0x2 0x0
+#define MX50_PAD_SD2_WP__EIM_WEIM_D_4 0x124 0x3d0 0x7fc 0x4 0x0
+#define MX50_PAD_SD2_WP__CCM_WAIT 0x124 0x3d0 0x000 0x7 0x0
+#define MX50_PAD_SD2_CD__ESDHC2_CD 0x128 0x3d4 0x740 0x0 0x1
+#define MX50_PAD_SD2_CD__GPIO5_17 0x128 0x3d4 0x000 0x1 0x0
+#define MX50_PAD_SD2_CD__AUDMUX_AUD4_TXC 0x128 0x3d4 0x6d4 0x2 0x0
+#define MX50_PAD_SD2_CD__EIM_WEIM_D_5 0x128 0x3d4 0x800 0x4 0x0
+#define MX50_PAD_SD2_CD__CCM_REF_EN_B 0x128 0x3d4 0x000 0x7 0x0
+#define MX50_PAD_DISP_D0__ELCDIF_DAT_0 0x12c 0x40c 0x6fc 0x0 0x0
+#define MX50_PAD_DISP_D0__GPIO2_0 0x12c 0x40c 0x000 0x1 0x0
+#define MX50_PAD_DISP_D0__FEC_TX_CLK 0x12c 0x40c 0x78c 0x2 0x0
+#define MX50_PAD_DISP_D0__EIM_WEIM_A_16 0x12c 0x40c 0x000 0x3 0x0
+#define MX50_PAD_DISP_D0__SDMA_DEBUG_PC_0 0x12c 0x40c 0x000 0x6 0x0
+#define MX50_PAD_DISP_D0__USBPHY1_VSTATUS_0 0x12c 0x40c 0x000 0x7 0x0
+#define MX50_PAD_DISP_D1__ELCDIF_DAT_1 0x130 0x410 0x700 0x0 0x0
+#define MX50_PAD_DISP_D1__GPIO2_1 0x130 0x410 0x000 0x1 0x0
+#define MX50_PAD_DISP_D1__FEC_RX_ERR 0x130 0x410 0x788 0x2 0x0
+#define MX50_PAD_DISP_D1__EIM_WEIM_A_17 0x130 0x410 0x000 0x3 0x0
+#define MX50_PAD_DISP_D1__SDMA_DEBUG_PC_1 0x130 0x410 0x000 0x6 0x0
+#define MX50_PAD_DISP_D1__USBPHY1_VSTATUS_1 0x130 0x410 0x000 0x7 0x0
+#define MX50_PAD_DISP_D2__ELCDIF_DAT_2 0x134 0x414 0x704 0x0 0x0
+#define MX50_PAD_DISP_D2__GPIO2_2 0x134 0x414 0x000 0x1 0x0
+#define MX50_PAD_DISP_D2__FEC_RX_DV 0x134 0x414 0x784 0x2 0x0
+#define MX50_PAD_DISP_D2__EIM_WEIM_A_18 0x134 0x414 0x000 0x3 0x0
+#define MX50_PAD_DISP_D2__SDMA_DEBUG_PC_2 0x134 0x414 0x000 0x6 0x0
+#define MX50_PAD_DISP_D2__USBPHY1_VSTATUS_2 0x134 0x414 0x000 0x7 0x0
+#define MX50_PAD_DISP_D3__ELCDIF_DAT_3 0x138 0x418 0x708 0x0 0x0
+#define MX50_PAD_DISP_D3__GPIO2_3 0x138 0x418 0x000 0x1 0x0
+#define MX50_PAD_DISP_D3__FEC_RDATA_1 0x138 0x418 0x77c 0x2 0x0
+#define MX50_PAD_DISP_D3__EIM_WEIM_A_19 0x138 0x418 0x000 0x3 0x0
+#define MX50_PAD_DISP_D3__FEC_COL 0x138 0x418 0x770 0x4 0x1
+#define MX50_PAD_DISP_D3__SDMA_DEBUG_PC_3 0x138 0x418 0x000 0x6 0x0
+#define MX50_PAD_DISP_D3__USBPHY1_VSTATUS_3 0x138 0x418 0x000 0x7 0x0
+#define MX50_PAD_DISP_D4__ELCDIF_DAT_4 0x13c 0x41c 0x70c 0x0 0x0
+#define MX50_PAD_DISP_D4__GPIO2_4 0x13c 0x41c 0x000 0x1 0x0
+#define MX50_PAD_DISP_D4__FEC_RDATA_0 0x13c 0x41c 0x778 0x2 0x0
+#define MX50_PAD_DISP_D4__EIM_WEIM_A_20 0x13c 0x41c 0x000 0x3 0x0
+#define MX50_PAD_DISP_D4__SDMA_DEBUG_PC_4 0x13c 0x41c 0x000 0x6 0x0
+#define MX50_PAD_DISP_D4__USBPHY1_VSTATUS_4 0x13c 0x41c 0x000 0x7 0x0
+#define MX50_PAD_DISP_D5__ELCDIF_DAT_5 0x140 0x420 0x710 0x0 0x0
+#define MX50_PAD_DISP_D5__GPIO2_5 0x140 0x420 0x000 0x1 0x0
+#define MX50_PAD_DISP_D5__FEC_TX_EN 0x140 0x420 0x000 0x2 0x0
+#define MX50_PAD_DISP_D5__EIM_WEIM_A_21 0x140 0x420 0x000 0x3 0x0
+#define MX50_PAD_DISP_D5__SDMA_DEBUG_PC_5 0x140 0x420 0x000 0x6 0x0
+#define MX50_PAD_DISP_D5__USBPHY1_VSTATUS_5 0x140 0x420 0x000 0x7 0x0
+#define MX50_PAD_DISP_D6__ELCDIF_DAT_6 0x144 0x424 0x714 0x0 0x0
+#define MX50_PAD_DISP_D6__GPIO2_6 0x144 0x424 0x000 0x1 0x0
+#define MX50_PAD_DISP_D6__FEC_TDATA_1 0x144 0x424 0x000 0x2 0x0
+#define MX50_PAD_DISP_D6__EIM_WEIM_A_22 0x144 0x424 0x000 0x3 0x0
+#define MX50_PAD_DISP_D6__FEC_RX_CLK 0x144 0x424 0x780 0x4 0x1
+#define MX50_PAD_DISP_D6__SDMA_DEBUG_PC_6 0x144 0x424 0x000 0x6 0x0
+#define MX50_PAD_DISP_D6__USBPHY1_VSTATUS_6 0x144 0x424 0x000 0x7 0x0
+#define MX50_PAD_DISP_D7__ELCDIF_DAT_7 0x148 0x428 0x718 0x0 0x0
+#define MX50_PAD_DISP_D7__GPIO2_7 0x148 0x428 0x000 0x1 0x0
+#define MX50_PAD_DISP_D7__FEC_TDATA_0 0x148 0x428 0x000 0x2 0x0
+#define MX50_PAD_DISP_D7__EIM_WEIM_A_23 0x148 0x428 0x000 0x3 0x0
+#define MX50_PAD_DISP_D7__SDMA_DEBUG_PC_7 0x148 0x428 0x000 0x6 0x0
+#define MX50_PAD_DISP_D7__USBPHY1_VSTATUS_7 0x148 0x428 0x000 0x7 0x0
+#define MX50_PAD_DISP_WR__ELCDIF_WR_RWN 0x14c 0x42c 0x000 0x0 0x0
+#define MX50_PAD_DISP_WR__GPIO2_16 0x14c 0x42c 0x000 0x1 0x0
+#define MX50_PAD_DISP_WR__ELCDIF_DOTCLK 0x14c 0x42c 0x000 0x2 0x0
+#define MX50_PAD_DISP_WR__EIM_WEIM_A_24 0x14c 0x42c 0x000 0x3 0x0
+#define MX50_PAD_DISP_WR__SDMA_DEBUG_PC_8 0x14c 0x42c 0x000 0x6 0x0
+#define MX50_PAD_DISP_WR__USBPHY1_AVALID 0x14c 0x42c 0x000 0x7 0x0
+#define MX50_PAD_DISP_RD__ELCDIF_RD_E 0x150 0x430 0x000 0x0 0x0
+#define MX50_PAD_DISP_RD__GPIO2_19 0x150 0x430 0x000 0x1 0x0
+#define MX50_PAD_DISP_RD__ELCDIF_ENABLE 0x150 0x430 0x000 0x2 0x0
+#define MX50_PAD_DISP_RD__EIM_WEIM_A_25 0x150 0x430 0x000 0x3 0x0
+#define MX50_PAD_DISP_RD__SDMA_DEBUG_PC_9 0x150 0x430 0x000 0x6 0x0
+#define MX50_PAD_DISP_RD__USBPHY1_BVALID 0x150 0x430 0x000 0x7 0x0
+#define MX50_PAD_DISP_RS__ELCDIF_RS 0x154 0x434 0x000 0x0 0x0
+#define MX50_PAD_DISP_RS__GPIO2_17 0x154 0x434 0x000 0x1 0x0
+#define MX50_PAD_DISP_RS__ELCDIF_VSYNC 0x154 0x434 0x73c 0x2 0x1
+#define MX50_PAD_DISP_RS__EIM_WEIM_A_26 0x154 0x434 0x000 0x3 0x0
+#define MX50_PAD_DISP_RS__SDMA_DEBUG_PC_10 0x154 0x434 0x000 0x6 0x0
+#define MX50_PAD_DISP_RS__USBPHY1_ENDSESSION 0x154 0x434 0x000 0x7 0x0
+#define MX50_PAD_DISP_CS__ELCDIF_CS 0x158 0x438 0x000 0x0 0x0
+#define MX50_PAD_DISP_CS__GPIO2_21 0x158 0x438 0x000 0x1 0x0
+#define MX50_PAD_DISP_CS__ELCDIF_HSYNC 0x158 0x438 0x6f8 0x2 0x1
+#define MX50_PAD_DISP_CS__EIM_WEIM_A_27 0x158 0x438 0x000 0x3 0x0
+#define MX50_PAD_DISP_CS__EIM_WEIM_CS_3 0x158 0x438 0x000 0x4 0x0
+#define MX50_PAD_DISP_CS__SDMA_DEBUG_PC_11 0x158 0x438 0x000 0x6 0x0
+#define MX50_PAD_DISP_CS__USBPHY1_IDDIG 0x158 0x438 0x000 0x7 0x0
+#define MX50_PAD_DISP_BUSY__ELCDIF_BUSY 0x15c 0x43c 0x6f8 0x0 0x2
+#define MX50_PAD_DISP_BUSY__GPIO2_18 0x15c 0x43c 0x000 0x1 0x0
+#define MX50_PAD_DISP_BUSY__EIM_WEIM_CS_3 0x15c 0x43c 0x000 0x4 0x0
+#define MX50_PAD_DISP_BUSY__SDMA_DEBUG_PC_12 0x15c 0x43c 0x000 0x6 0x0
+#define MX50_PAD_DISP_BUSY__USBPHY2_HOSTDISCONNECT 0x15c 0x43c 0x000 0x7 0x0
+#define MX50_PAD_DISP_RESET__ELCDIF_RESET 0x160 0x440 0x000 0x0 0x0
+#define MX50_PAD_DISP_RESET__GPIO2_20 0x160 0x440 0x000 0x1 0x0
+#define MX50_PAD_DISP_RESET__EIM_WEIM_CS_3 0x160 0x440 0x000 0x4 0x0
+#define MX50_PAD_DISP_RESET__SDMA_DEBUG_PC_13 0x160 0x440 0x000 0x6 0x0
+#define MX50_PAD_DISP_RESET__USBPHY2_BISTOK 0x160 0x440 0x000 0x7 0x0
+#define MX50_PAD_SD3_CMD__ESDHC3_CMD 0x164 0x444 0x000 0x0 0x0
+#define MX50_PAD_SD3_CMD__GPIO5_18 0x164 0x444 0x000 0x1 0x0
+#define MX50_PAD_SD3_CMD__EIM_NANDF_WRN 0x164 0x444 0x000 0x2 0x0
+#define MX50_PAD_SD3_CMD__SSP_CMD 0x164 0x444 0x000 0x3 0x0
+#define MX50_PAD_SD3_CLK__ESDHC3_CLK 0x168 0x448 0x000 0x0 0x0
+#define MX50_PAD_SD3_CLK__GPIO5_19 0x168 0x448 0x000 0x1 0x0
+#define MX50_PAD_SD3_CLK__EIM_NANDF_RDN 0x168 0x448 0x000 0x2 0x0
+#define MX50_PAD_SD3_CLK__SSP_CLK 0x168 0x448 0x000 0x3 0x0
+#define MX50_PAD_SD3_D0__ESDHC3_DAT0 0x16c 0x44c 0x000 0x0 0x0
+#define MX50_PAD_SD3_D0__GPIO5_20 0x16c 0x44c 0x000 0x1 0x0
+#define MX50_PAD_SD3_D0__EIM_NANDF_D_4 0x16c 0x44c 0x000 0x2 0x0
+#define MX50_PAD_SD3_D0__SSP_D0 0x16c 0x44c 0x000 0x3 0x0
+#define MX50_PAD_SD3_D0__CCM_PLL1_BYP 0x16c 0x44c 0x6dc 0x7 0x1
+#define MX50_PAD_SD3_D1__ESDHC3_DAT1 0x170 0x450 0x000 0x0 0x0
+#define MX50_PAD_SD3_D1__GPIO5_21 0x170 0x450 0x000 0x1 0x0
+#define MX50_PAD_SD3_D1__EIM_NANDF_D_5 0x170 0x450 0x000 0x2 0x0
+#define MX50_PAD_SD3_D1__SSP_D1 0x170 0x450 0x000 0x3 0x0
+#define MX50_PAD_SD3_D1__CCM_PLL2_BYP 0x170 0x450 0x000 0x7 0x0
+#define MX50_PAD_SD3_D2__ESDHC3_DAT2 0x174 0x454 0x000 0x0 0x0
+#define MX50_PAD_SD3_D2__GPIO5_22 0x174 0x454 0x000 0x1 0x0
+#define MX50_PAD_SD3_D2__EIM_NANDF_D_6 0x174 0x454 0x000 0x2 0x0
+#define MX50_PAD_SD3_D2__SSP_D2 0x174 0x454 0x000 0x3 0x0
+#define MX50_PAD_SD3_D2__CCM_PLL3_BYP 0x174 0x454 0x6e4 0x7 0x1
+#define MX50_PAD_SD3_D3__ESDHC3_DAT3 0x178 0x458 0x000 0x0 0x0
+#define MX50_PAD_SD3_D3__GPIO5_23 0x178 0x458 0x000 0x1 0x0
+#define MX50_PAD_SD3_D3__EIM_NANDF_D_7 0x178 0x458 0x000 0x2 0x0
+#define MX50_PAD_SD3_D3__SSP_D3 0x178 0x458 0x000 0x3 0x0
+#define MX50_PAD_SD3_D4__ESDHC3_DAT4 0x17c 0x45c 0x000 0x0 0x0
+#define MX50_PAD_SD3_D4__GPIO5_24 0x17c 0x45c 0x000 0x1 0x0
+#define MX50_PAD_SD3_D4__EIM_NANDF_D_0 0x17c 0x45c 0x000 0x2 0x0
+#define MX50_PAD_SD3_D4__SSP_D4 0x17c 0x45c 0x000 0x3 0x0
+#define MX50_PAD_SD3_D5__ESDHC3_DAT5 0x180 0x460 0x000 0x0 0x0
+#define MX50_PAD_SD3_D5__GPIO5_25 0x180 0x460 0x000 0x1 0x0
+#define MX50_PAD_SD3_D5__EIM_NANDF_D_1 0x180 0x460 0x000 0x2 0x0
+#define MX50_PAD_SD3_D5__SSP_D5 0x180 0x460 0x000 0x3 0x0
+#define MX50_PAD_SD3_D6__ESDHC3_DAT6 0x184 0x464 0x000 0x0 0x0
+#define MX50_PAD_SD3_D6__GPIO5_26 0x184 0x464 0x000 0x1 0x0
+#define MX50_PAD_SD3_D6__EIM_NANDF_D_2 0x184 0x464 0x000 0x2 0x0
+#define MX50_PAD_SD3_D6__SSP_D6 0x184 0x464 0x000 0x3 0x0
+#define MX50_PAD_SD3_D7__ESDHC3_DAT7 0x188 0x468 0x000 0x0 0x0
+#define MX50_PAD_SD3_D7__GPIO5_27 0x188 0x468 0x000 0x1 0x0
+#define MX50_PAD_SD3_D7__EIM_NANDF_D_3 0x188 0x468 0x000 0x2 0x0
+#define MX50_PAD_SD3_D7__SSP_D7 0x188 0x468 0x000 0x3 0x0
+#define MX50_PAD_SD3_WP__ESDHC3_WP 0x18c 0x46C 0x000 0x0 0x0
+#define MX50_PAD_SD3_WP__GPIO5_28 0x18c 0x46C 0x000 0x1 0x0
+#define MX50_PAD_SD3_WP__EIM_NANDF_RESETN 0x18c 0x46C 0x000 0x2 0x0
+#define MX50_PAD_SD3_WP__SSP_CD 0x18c 0x46C 0x000 0x3 0x0
+#define MX50_PAD_SD3_WP__ESDHC4_LCTL 0x18c 0x46C 0x000 0x4 0x0
+#define MX50_PAD_SD3_WP__EIM_WEIM_CS_3 0x18c 0x46C 0x000 0x5 0x0
+#define MX50_PAD_DISP_D8__ELCDIF_DAT_8 0x190 0x470 0x71c 0x0 0x0
+#define MX50_PAD_DISP_D8__GPIO2_8 0x190 0x470 0x000 0x1 0x0
+#define MX50_PAD_DISP_D8__EIM_NANDF_CLE 0x190 0x470 0x000 0x2 0x0
+#define MX50_PAD_DISP_D8__ESDHC1_LCTL 0x190 0x470 0x000 0x3 0x0
+#define MX50_PAD_DISP_D8__ESDHC4_CMD 0x190 0x470 0x74c 0x4 0x2
+#define MX50_PAD_DISP_D8__KPP_COL_4 0x190 0x470 0x790 0x5 0x1
+#define MX50_PAD_DISP_D8__FEC_TX_CLK 0x190 0x470 0x78c 0x6 0x1
+#define MX50_PAD_DISP_D8__USBPHY1_DATAOUT_0 0x190 0x470 0x000 0x7 0x0
+#define MX50_PAD_DISP_D9__ELCDIF_DAT_9 0x194 0x474 0x720 0x0 0x0
+#define MX50_PAD_DISP_D9__GPIO2_9 0x194 0x474 0x000 0x1 0x0
+#define MX50_PAD_DISP_D9__EIM_NANDF_ALE 0x194 0x474 0x000 0x2 0x0
+#define MX50_PAD_DISP_D9__ESDHC2_LCTL 0x194 0x474 0x000 0x3 0x0
+#define MX50_PAD_DISP_D9__ESDHC4_CLK 0x194 0x474 0x748 0x4 0x2
+#define MX50_PAD_DISP_D9__KPP_ROW_4 0x194 0x474 0x7a0 0x5 0x1
+#define MX50_PAD_DISP_D9__FEC_RX_ER 0x194 0x474 0x788 0x6 0x1
+#define MX50_PAD_DISP_D9__USBPHY1_DATAOUT_1 0x194 0x474 0x000 0x7 0x0
+#define MX50_PAD_DISP_D10__ELCDIF_DAT_10 0x198 0x478 0x724 0x0 0x0
+#define MX50_PAD_DISP_D10__GPIO2_10 0x198 0x478 0x000 0x1 0x0
+#define MX50_PAD_DISP_D10__EIM_NANDF_CEN_0 0x198 0x478 0x000 0x2 0x0
+#define MX50_PAD_DISP_D10__ESDHC3_LCTL 0x198 0x478 0x000 0x3 0x0
+#define MX50_PAD_DISP_D10__ESDHC4_DAT0 0x198 0x478 0x000 0x4 0x0
+#define MX50_PAD_DISP_D10__KPP_COL_5 0x198 0x478 0x794 0x5 0x1
+#define MX50_PAD_DISP_D10__FEC_RX_DV 0x198 0x478 0x784 0x6 0x1
+#define MX50_PAD_DISP_D10__USBPHY1_DATAOUT_2 0x198 0x478 0x000 0x7 0x0
+#define MX50_PAD_DISP_D11__ELCDIF_DAT_11 0x19c 0x47c 0x728 0x0 0x0
+#define MX50_PAD_DISP_D11__GPIO2_11 0x19c 0x47c 0x000 0x1 0x0
+#define MX50_PAD_DISP_D11__EIM_NANDF_CEN_1 0x19c 0x47c 0x000 0x2 0x0
+#define MX50_PAD_DISP_D11__ESDHC4_DAT1 0x19c 0x47c 0x754 0x4 0x1
+#define MX50_PAD_DISP_D11__KPP_ROW_5 0x19c 0x47c 0x7a4 0x5 0x1
+#define MX50_PAD_DISP_D11__FEC_RDATA_1 0x19c 0x47c 0x77c 0x6 0x1
+#define MX50_PAD_DISP_D11__USBPHY1_DATAOUT_3 0x19c 0x47c 0x000 0x7 0x0
+#define MX50_PAD_DISP_D12__ELCDIF_DAT_12 0x1a0 0x480 0x72c 0x0 0x0
+#define MX50_PAD_DISP_D12__GPIO2_12 0x1a0 0x480 0x000 0x1 0x0
+#define MX50_PAD_DISP_D12__EIM_NANDF_CEN_2 0x1a0 0x480 0x000 0x2 0x0
+#define MX50_PAD_DISP_D12__ESDHC1_CD 0x1a0 0x480 0x000 0x3 0x0
+#define MX50_PAD_DISP_D12__ESDHC4_DAT2 0x1a0 0x480 0x758 0x4 0x1
+#define MX50_PAD_DISP_D12__KPP_COL_6 0x1a0 0x480 0x798 0x5 0x1
+#define MX50_PAD_DISP_D12__FEC_RDATA_0 0x1a0 0x480 0x778 0x6 0x1
+#define MX50_PAD_DISP_D12__USBPHY1_DATAOUT_4 0x1a0 0x480 0x000 0x7 0x0
+#define MX50_PAD_DISP_D13__ELCDIF_DAT_13 0x1a4 0x484 0x730 0x0 0x0
+#define MX50_PAD_DISP_D13__GPIO2_13 0x1a4 0x484 0x000 0x1 0x0
+#define MX50_PAD_DISP_D13__EIM_NANDF_CEN_3 0x1a4 0x484 0x000 0x2 0x0
+#define MX50_PAD_DISP_D13__ESDHC3_CD 0x1a4 0x484 0x000 0x3 0x0
+#define MX50_PAD_DISP_D13__ESDHC4_DAT3 0x1a4 0x484 0x75c 0x4 0x1
+#define MX50_PAD_DISP_D13__KPP_ROW_6 0x1a4 0x484 0x7a8 0x5 0x1
+#define MX50_PAD_DISP_D13__FEC_TX_EN 0x1a4 0x484 0x000 0x6 0x0
+#define MX50_PAD_DISP_D13__USBPHY1_DATAOUT_5 0x1a4 0x484 0x000 0x7 0x0
+#define MX50_PAD_DISP_D14__ELCDIF_DAT_14 0x1a8 0x488 0x734 0x0 0x0
+#define MX50_PAD_DISP_D14__GPIO2_14 0x1a8 0x488 0x000 0x1 0x0
+#define MX50_PAD_DISP_D14__EIM_NANDF_READY0 0x1a8 0x488 0x7b4 0x2 0x1
+#define MX50_PAD_DISP_D14__ESDHC1_WP 0x1a8 0x488 0x000 0x3 0x0
+#define MX50_PAD_DISP_D14__ESDHC4_WP 0x1a8 0x488 0x000 0x4 0x0
+#define MX50_PAD_DISP_D14__KPP_COL_7 0x1a8 0x488 0x79c 0x5 0x1
+#define MX50_PAD_DISP_D14__FEC_TDATA_1 0x1a8 0x488 0x000 0x6 0x0
+#define MX50_PAD_DISP_D14__USBPHY1_DATAOUT_6 0x1a8 0x488 0x000 0x7 0x0
+#define MX50_PAD_DISP_D15__ELCDIF_DAT_15 0x1ac 0x48c 0x738 0x0 0x0
+#define MX50_PAD_DISP_D15__GPIO2_15 0x1ac 0x48c 0x000 0x1 0x0
+#define MX50_PAD_DISP_D15__EIM_NANDF_DQS 0x1ac 0x48c 0x7b0 0x2 0x1
+#define MX50_PAD_DISP_D15__ESDHC3_RST 0x1ac 0x48c 0x000 0x3 0x0
+#define MX50_PAD_DISP_D15__ESDHC4_CD 0x1ac 0x48c 0x000 0x4 0x0
+#define MX50_PAD_DISP_D15__KPP_ROW_7 0x1ac 0x48c 0x7ac 0x5 0x1
+#define MX50_PAD_DISP_D15__FEC_TDATA_0 0x1ac 0x48c 0x000 0x6 0x0
+#define MX50_PAD_DISP_D15__USBPHY1_DATAOUT_7 0x1ac 0x48c 0x000 0x7 0x0
+#define MX50_PAD_EPDC_D0__EPDC_SDDO_0 0x1b0 0x54c 0x000 0x0 0x0
+#define MX50_PAD_EPDC_D0__GPIO3_0 0x1b0 0x54c 0x000 0x1 0x0
+#define MX50_PAD_EPDC_D0__EIM_WEIM_D_0 0x1b0 0x54c 0x7ec 0x2 0x1
+#define MX50_PAD_EPDC_D0__ELCDIF_RS 0x1b0 0x54c 0x000 0x3 0x0
+#define MX50_PAD_EPDC_D0__ELCDIF_DOTCLK 0x1b0 0x54c 0x000 0x4 0x0
+#define MX50_PAD_EPDC_D0__SDMA_DEBUG_EVT_CHN_LINES_0 0x1b0 0x54c 0x000 0x6 0x0
+#define MX50_PAD_EPDC_D0__USBPHY2_DATAOUT_0 0x1b0 0x54c 0x000 0x7 0x0
+#define MX50_PAD_EPDC_D1__EPDC_SDDO_1 0x1b4 0x550 0x000 0x0 0x0
+#define MX50_PAD_EPDC_D1__GPIO3_1 0x1b4 0x550 0x000 0x1 0x0
+#define MX50_PAD_EPDC_D1__EIM_WEIM_D_1 0x1b4 0x550 0x7f0 0x2 0x1
+#define MX50_PAD_EPDC_D1__ELCDIF_CS 0x1b4 0x550 0x000 0x3 0x0
+#define MX50_PAD_EPDC_D1__ELCDIF_ENABLE 0x1b4 0x550 0x000 0x4 0x0
+#define MX50_PAD_EPDC_D1__SDMA_DEBUG_EVT_CHN_LINES_1 0x1b4 0x550 0x000 0x6 0x0
+#define MX50_PAD_EPDC_D1__USBPHY2_DATAOUT_1 0x1b4 0x550 0x000 0x7 0x0
+#define MX50_PAD_EPDC_D2__EPDC_SDDO_2 0x1b8 0x554 0x000 0x0 0x0
+#define MX50_PAD_EPDC_D2__GPIO3_2 0x1b8 0x554 0x000 0x1 0x0
+#define MX50_PAD_EPDC_D2__EIM_WEIM_D_2 0x1b8 0x554 0x7f4 0x2 0x1
+#define MX50_PAD_EPDC_D2__ELCDIF_WR_RWN 0x1b8 0x554 0x000 0x3 0x0
+#define MX50_PAD_EPDC_D2__ELCDIF_VSYNC 0x1b8 0x554 0x73c 0x4 0x2
+#define MX50_PAD_EPDC_D2__SDMA_DEBUG_EVT_CHN_LINES_2 0x1b8 0x554 0x000 0x6 0x0
+#define MX50_PAD_EPDC_D2__USBPHY2_DATAOUT_2 0x1b8 0x554 0x000 0x7 0x0
+#define MX50_PAD_EPDC_D3__EPDC_SDDO_3 0x1bc 0x558 0x000 0x0 0x0
+#define MX50_PAD_EPDC_D3__GPIO3_3 0x1bc 0x558 0x000 0x1 0x0
+#define MX50_PAD_EPDC_D3__EIM_WEIM_D_3 0x1bc 0x558 0x7f8 0x2 0x1
+#define MX50_PAD_EPDC_D3__ELCDIF_RD_E 0x1bc 0x558 0x000 0x3 0x0
+#define MX50_PAD_EPDC_D3__ELCDIF_HSYNC 0x1bc 0x558 0x6f8 0x4 0x3
+#define MX50_PAD_EPDC_D3__SDMA_DEBUG_EVT_CHN_LINES_3 0x1bc 0x558 0x000 0x6 0x0
+#define MX50_PAD_EPDC_D3__USBPHY2_DATAOUT_3 0x1bc 0x558 0x000 0x7 0x0
+#define MX50_PAD_EPDC_D4__EPDC_SDDO_4 0x1c0 0x55c 0x000 0x0 0x0
+#define MX50_PAD_EPDC_D4__GPIO3_4 0x1c0 0x55c 0x000 0x1 0x0
+#define MX50_PAD_EPDC_D4__EIM_WEIM_D_4 0x1c0 0x55c 0x7fc 0x2 0x1
+#define MX50_PAD_EPDC_D4__SDMA_DEBUG_EVT_CHN_LINES_4 0x1c0 0x55c 0x000 0x6 0x0
+#define MX50_PAD_EPDC_D4__USBPHY2_DATAOUT_4 0x1c0 0x55c 0x000 0x7 0x0
+#define MX50_PAD_EPDC_D5__EPDC_SDDO_5 0x1c4 0x560 0x000 0x0 0x0
+#define MX50_PAD_EPDC_D5__GPIO3_5 0x1c4 0x560 0x000 0x1 0x0
+#define MX50_PAD_EPDC_D5__EIM_WEIM_D_5 0x1c4 0x560 0x800 0x2 0x1
+#define MX50_PAD_EPDC_D5__SDMA_DEBUG_EVT_CHN_LINES_5 0x1c4 0x560 0x000 0x6 0x0
+#define MX50_PAD_EPDC_D5__USBPHY2_DATAOUT_5 0x1c4 0x560 0x000 0x7 0x0
+#define MX50_PAD_EPDC_D6__EPDC_SDDO_6 0x1c8 0x564 0x000 0x0 0x0
+#define MX50_PAD_EPDC_D6__GPIO3_6 0x1c8 0x564 0x000 0x1 0x0
+#define MX50_PAD_EPDC_D6__EIM_WEIM_D_6 0x1c8 0x564 0x804 0x2 0x1
+#define MX50_PAD_EPDC_D6__SDMA_DEBUG_EVT_CHN_LINES_6 0x1c8 0x564 0x000 0x6 0x0
+#define MX50_PAD_EPDC_D6__USBPHY2_DATAOUT_6 0x1c8 0x564 0x000 0x7 0x0
+#define MX50_PAD_EPDC_D7__EPDC_SDDO_7 0x1cc 0x568 0x000 0x0 0x0
+#define MX50_PAD_EPDC_D7__GPIO3_7 0x1cc 0x568 0x000 0x1 0x0
+#define MX50_PAD_EPDC_D7__EIM_WEIM_D_7 0x1cc 0x568 0x808 0x2 0x1
+#define MX50_PAD_EPDC_D7__SDMA_DEBUG_EVT_CHN_LINES_7 0x1cc 0x568 0x000 0x6 0x0
+#define MX50_PAD_EPDC_D7__USBPHY2_DATAOUT_7 0x1cc 0x568 0x000 0x7 0x0
+#define MX50_PAD_EPDC_D8__EPDC_SDDO_8 0x1d0 0x56c 0x000 0x0 0x0
+#define MX50_PAD_EPDC_D8__GPIO3_8 0x1d0 0x56c 0x000 0x1 0x0
+#define MX50_PAD_EPDC_D8__EIM_WEIM_D_8 0x1d0 0x56c 0x80c 0x2 0x2
+#define MX50_PAD_EPDC_D8__ELCDIF_DAT_24 0x1d0 0x56c 0x000 0x3 0x0
+#define MX50_PAD_EPDC_D8__SDMA_DEBUG_MATCHED_DMBUS 0x1d0 0x56c 0x000 0x6 0x0
+#define MX50_PAD_EPDC_D8__USBPHY2_VSTATUS_0 0x1d0 0x56c 0x000 0x7 0x0
+#define MX50_PAD_EPDC_D9__EPDC_SDDO_9 0x1d4 0x570 0x000 0x0 0x0
+#define MX50_PAD_EPDC_D9__GPIO3_9 0x1d4 0x570 0x000 0x1 0x0
+#define MX50_PAD_EPDC_D9__EIM_WEIM_D_9 0x1d4 0x570 0x810 0x2 0x2
+#define MX50_PAD_EPDC_D9__ELCDIF_DAT_25 0x1d4 0x570 0x000 0x3 0x0
+#define MX50_PAD_EPDC_D9__SDMA_DEBUG_EVENT_CHANNEL_SEL 0x1d4 0x570 0x000 0x6 0x0
+#define MX50_PAD_EPDC_D9__USBPHY2_VSTATUS_1 0x1d4 0x570 0x000 0x7 0x0
+#define MX50_PAD_EPDC_D10__EPDC_SDDO_10 0x1d8 0x574 0x000 0x0 0x0
+#define MX50_PAD_EPDC_D10__GPIO3_10 0x1d8 0x574 0x000 0x1 0x0
+#define MX50_PAD_EPDC_D10__EIM_WEIM_D_10 0x1d8 0x574 0x814 0x2 0x2
+#define MX50_PAD_EPDC_D10__ELCDIF_DAT_26 0x1d8 0x574 0x000 0x3 0x0
+#define MX50_PAD_EPDC_D10__SDMA_DEBUG_EVENT_CHANNEL_0 0x1d8 0x574 0x000 0x6 0x0
+#define MX50_PAD_EPDC_D10__USBPHY2_VSTATUS_2 0x1d8 0x574 0x000 0x7 0x0
+#define MX50_PAD_EPDC_D11__EPDC_SDDO_11 0x1dc 0x578 0x000 0x0 0x0
+#define MX50_PAD_EPDC_D11__GPIO3_11 0x1dc 0x578 0x000 0x1 0x0
+#define MX50_PAD_EPDC_D11__EIM_WEIM_D_11 0x1dc 0x578 0x818 0x2 0x2
+#define MX50_PAD_EPDC_D11__ELCDIF_DAT_27 0x1dc 0x578 0x000 0x3 0x0
+#define MX50_PAD_EPDC_D11__SDMA_DEBUG_EVENT_CHANNEL_1 0x1dc 0x578 0x000 0x6 0x0
+#define MX50_PAD_EPDC_D11__USBPHY2_VSTATUS_3 0x1dc 0x578 0x000 0x7 0x0
+#define MX50_PAD_EPDC_D12__EPDC_SDDO_12 0x1e0 0x57c 0x000 0x0 0x0
+#define MX50_PAD_EPDC_D12__GPIO3_12 0x1e0 0x57c 0x000 0x1 0x0
+#define MX50_PAD_EPDC_D12__EIM_WEIM_D_12 0x1e0 0x57c 0x81c 0x2 0x1
+#define MX50_PAD_EPDC_D12__ELCDIF_DAT_28 0x1e0 0x57c 0x000 0x3 0x0
+#define MX50_PAD_EPDC_D12__SDMA_DEBUG_EVENT_CHANNEL_2 0x1e0 0x57c 0x000 0x6 0x0
+#define MX50_PAD_EPDC_D12__USBPHY2_VSTATUS_4 0x1e0 0x57c 0x000 0x7 0x0
+#define MX50_PAD_EPDC_D13__EPDC_SDDO_13 0x1e4 0x580 0x000 0x0 0x0
+#define MX50_PAD_EPDC_D13__GPIO3_13 0x1e4 0x580 0x000 0x1 0x0
+#define MX50_PAD_EPDC_D13__EIM_WEIM_D_13 0x1e4 0x580 0x820 0x2 0x1
+#define MX50_PAD_EPDC_D13__ELCDIF_DAT_29 0x1e4 0x580 0x000 0x3 0x0
+#define MX50_PAD_EPDC_D13__SDMA_DEBUG_EVENT_CHANNEL_3 0x1e4 0x580 0x000 0x6 0x0
+#define MX50_PAD_EPDC_D13__USBPHY2_VSTATUS_5 0x1e4 0x580 0x000 0x7 0x0
+#define MX50_PAD_EPDC_D14__EPDC_SDDO_14 0x1e8 0x584 0x000 0x0 0x0
+#define MX50_PAD_EPDC_D14__GPIO3_14 0x1e8 0x584 0x000 0x1 0x0
+#define MX50_PAD_EPDC_D14__EIM_WEIM_D_14 0x1e8 0x584 0x824 0x2 0x1
+#define MX50_PAD_EPDC_D14__ELCDIF_DAT_30 0x1e8 0x584 0x000 0x3 0x0
+#define MX50_PAD_EPDC_D14__AUDMUX_AUD6_TXD 0x1e8 0x584 0x000 0x4 0x0
+#define MX50_PAD_EPDC_D14__SDMA_DEBUG_EVENT_CHANNEL_4 0x1e8 0x584 0x000 0x6 0x0
+#define MX50_PAD_EPDC_D14__USBPHY2_VSTATUS_6 0x1e8 0x584 0x000 0x7 0x0
+#define MX50_PAD_EPDC_D15__EPDC_SDDO_15 0x1ec 0x588 0x000 0x0 0x0
+#define MX50_PAD_EPDC_D15__GPIO3_15 0x1ec 0x588 0x000 0x1 0x0
+#define MX50_PAD_EPDC_D15__EIM_WEIM_D_15 0x1ec 0x588 0x828 0x2 0x1
+#define MX50_PAD_EPDC_D15__ELCDIF_DAT_31 0x1ec 0x588 0x000 0x3 0x0
+#define MX50_PAD_EPDC_D15__AUDMUX_AUD6_TXC 0x1ec 0x588 0x000 0x4 0x0
+#define MX50_PAD_EPDC_D15__SDMA_DEBUG_EVENT_CHANNEL_5 0x1ec 0x588 0x000 0x6 0x0
+#define MX50_PAD_EPDC_D15__USBPHY2_VSTATUS_7 0x1ec 0x588 0x000 0x7 0x0
+#define MX50_PAD_EPDC_GDCLK__EPDC_GDCLK 0x1f0 0x58c 0x000 0x0 0x0
+#define MX50_PAD_EPDC_GDCLK__GPIO3_16 0x1f0 0x58c 0x000 0x1 0x0
+#define MX50_PAD_EPDC_GDCLK__EIM_WEIM_D_16 0x1f0 0x58c 0x000 0x2 0x0
+#define MX50_PAD_EPDC_GDCLK__ELCDIF_DAT_16 0x1f0 0x58c 0x000 0x3 0x0
+#define MX50_PAD_EPDC_GDCLK__AUDMUX_AUD6_TXFS 0x1f0 0x58c 0x000 0x4 0x0
+#define MX50_PAD_EPDC_GDCLK__SDMA_DEBUG_CORE_STATE_0 0x1f0 0x58c 0x000 0x6 0x0
+#define MX50_PAD_EPDC_GDCLK__USBPHY2_BISTOK 0x1f0 0x58c 0x000 0x7 0x0
+#define MX50_PAD_EPDC_GDSP__EPCD_GDSP 0x1f4 0x590 0x000 0x0 0x0
+#define MX50_PAD_EPDC_GDSP__GPIO3_17 0x1f4 0x590 0x000 0x1 0x0
+#define MX50_PAD_EPDC_GDSP__EIM_WEIM_D_17 0x1f4 0x590 0x000 0x2 0x0
+#define MX50_PAD_EPDC_GDSP__ELCDIF_DAT_17 0x1f4 0x590 0x000 0x3 0x0
+#define MX50_PAD_EPDC_GDSP__AUDMUX_AUD6_RXD 0x1f4 0x590 0x000 0x4 0x0
+#define MX50_PAD_EPDC_GDSP__SDMA_DEBUG_CORE_STATE_1 0x1f4 0x590 0x000 0x6 0x0
+#define MX50_PAD_EPDC_GDSP__USBPHY2_BVALID 0x1f4 0x590 0x000 0x7 0x0
+#define MX50_PAD_EPDC_GDOE__EPCD_GDOE 0x1f8 0x594 0x000 0x0 0x0
+#define MX50_PAD_EPDC_GDOE__GPIO3_18 0x1f8 0x594 0x000 0x1 0x0
+#define MX50_PAD_EPDC_GDOE__EIM_WEIM_D_18 0x1f8 0x594 0x000 0x2 0x0
+#define MX50_PAD_EPDC_GDOE__ELCDIF_DAT_18 0x1f8 0x594 0x000 0x3 0x0
+#define MX50_PAD_EPDC_GDOE__AUDMUX_AUD6_RXC 0x1f8 0x594 0x000 0x4 0x0
+#define MX50_PAD_EPDC_GDOE__SDMA_DEBUG_CORE_STATE_2 0x1f8 0x594 0x000 0x6 0x0
+#define MX50_PAD_EPDC_GDOE__USBPHY2_ENDSESSION 0x1f8 0x594 0x000 0x7 0x0
+#define MX50_PAD_EPDC_GDRL__EPCD_GDRL 0x1fc 0x598 0x000 0x0 0x0
+#define MX50_PAD_EPDC_GDRL__GPIO3_19 0x1fc 0x598 0x000 0x1 0x0
+#define MX50_PAD_EPDC_GDRL__EIM_WEIM_D_19 0x1f8 0x598 0x000 0x2 0x0
+#define MX50_PAD_EPDC_GDRL__ELCDIF_DAT_19 0x1fc 0x598 0x000 0x3 0x0
+#define MX50_PAD_EPDC_GDRL__AUDMUX_AUD6_RXFS 0x1fc 0x598 0x000 0x4 0x0
+#define MX50_PAD_EPDC_GDRL__SDMA_DEBUG_CORE_STATE_3 0x1fc 0x598 0x000 0x6 0x0
+#define MX50_PAD_EPDC_GDRL__USBPHY2_IDDIG 0x1fc 0x598 0x000 0x7 0x0
+#define MX50_PAD_EPDC_SDCLK__EPCD_SDCLK 0x200 0x59c 0x000 0x0 0x0
+#define MX50_PAD_EPDC_SDCLK__GPIO3_20 0x200 0x59c 0x000 0x1 0x0
+#define MX50_PAD_EPDC_SDCLK__EIM_WEIM_D_20 0x200 0x59c 0x000 0x2 0x0
+#define MX50_PAD_EPDC_SDCLK__ELCDIF_DAT_20 0x200 0x59c 0x000 0x3 0x0
+#define MX50_PAD_EPDC_SDCLK__AUDMUX_AUD5_TXD 0x200 0x59c 0x000 0x4 0x0
+#define MX50_PAD_EPDC_SDCLK__SDMA_DEBUG_BUS_DEVICE_0 0x200 0x59c 0x000 0x6 0x0
+#define MX50_PAD_EPDC_SDCLK__USBPHY2_HOSTDISCONNECT 0x200 0x59c 0x000 0x7 0x0
+#define MX50_PAD_EPDC_SDOEZ__EPCD_SDOEZ 0x204 0x5a0 0x000 0x0 0x0
+#define MX50_PAD_EPDC_SDOEZ__GPIO3_21 0x204 0x5a0 0x000 0x1 0x0
+#define MX50_PAD_EPDC_SDOEZ__EIM_WEIM_D_21 0x204 0x5a0 0x000 0x2 0x0
+#define MX50_PAD_EPDC_SDOEZ__ELCDIF_DAT_21 0x204 0x5a0 0x000 0x3 0x0
+#define MX50_PAD_EPDC_SDOEZ__AUDMUX_AUD5_TXC 0x204 0x5a0 0x000 0x4 0x0
+#define MX50_PAD_EPDC_SDOEZ__SDMA_DEBUG_BUS_DEVICE_1 0x204 0x5a0 0x000 0x6 0x0
+#define MX50_PAD_EPDC_SDOEZ__USBPHY2_TXREADY 0x204 0x5a0 0x000 0x7 0x0
+#define MX50_PAD_EPDC_SDOED__EPCD_SDOED 0x208 0x5a4 0x000 0x0 0x0
+#define MX50_PAD_EPDC_SDOED__GPIO3_22 0x208 0x5a4 0x000 0x1 0x0
+#define MX50_PAD_EPDC_SDOED__EIM_WEIM_D_22 0x208 0x5a4 0x000 0x2 0x0
+#define MX50_PAD_EPDC_SDOED__ELCDIF_DAT_22 0x208 0x5a4 0x000 0x3 0x0
+#define MX50_PAD_EPDC_SDOED__AUDMUX_AUD5_TXFS 0x208 0x5a4 0x000 0x4 0x0
+#define MX50_PAD_EPDC_SDOED__SDMA_DEBUG_BUS_DEVICE_2 0x208 0x5a4 0x000 0x6 0x0
+#define MX50_PAD_EPDC_SDOED__USBPHY2_RXVALID 0x208 0x5a4 0x000 0x7 0x0
+#define MX50_PAD_EPDC_SDOE__EPCD_SDOE 0x20c 0x5a8 0x000 0x0 0x0
+#define MX50_PAD_EPDC_SDOE__GPIO3_23 0x20c 0x5a8 0x000 0x1 0x0
+#define MX50_PAD_EPDC_SDOE__EIM_WEIM_D_23 0x20c 0x5a8 0x000 0x2 0x0
+#define MX50_PAD_EPDC_SDOE__ELCDIF_DAT_23 0x20c 0x5a8 0x000 0x3 0x0
+#define MX50_PAD_EPDC_SDOE__AUDMUX_AUD5_RXD 0x20c 0x5a8 0x000 0x4 0x0
+#define MX50_PAD_EPDC_SDOE__SDMA_DEBUG_BUS_DEVICE_3 0x20c 0x5a8 0x000 0x6 0x0
+#define MX50_PAD_EPDC_SDOE__USBPHY2_RXACTIVE 0x20c 0x5a8 0x000 0x7 0x0
+#define MX50_PAD_EPDC_SDLE__EPCD_SDLE 0x210 0x5ac 0x000 0x0 0x0
+#define MX50_PAD_EPDC_SDLE__GPIO3_24 0x210 0x5ac 0x000 0x1 0x0
+#define MX50_PAD_EPDC_SDLE__EIM_WEIM_D_24 0x210 0x5ac 0x000 0x2 0x0
+#define MX50_PAD_EPDC_SDLE__ELCDIF_DAT_8 0x210 0x5ac 0x71c 0x3 0x1
+#define MX50_PAD_EPDC_SDLE__AUDMUX_AUD5_RXC 0x210 0x5ac 0x000 0x4 0x0
+#define MX50_PAD_EPDC_SDLE__SDMA_DEBUG_BUS_DEVICE_4 0x210 0x5ac 0x000 0x6 0x0
+#define MX50_PAD_EPDC_SDLE__USBPHY2_RXERROR 0x210 0x5ac 0x000 0x7 0x0
+#define MX50_PAD_EPDC_SDCLKN__EPCD_SDCLKN 0x214 0x5b0 0x000 0x0 0x0
+#define MX50_PAD_EPDC_SDCLKN__GPIO3_25 0x214 0x5b0 0x000 0x1 0x0
+#define MX50_PAD_EPDC_SDCLKN__EIM_WEIM_D_25 0x214 0x5b0 0x000 0x2 0x0
+#define MX50_PAD_EPDC_SDCLKN__ELCDIF_DAT_9 0x214 0x5b0 0x720 0x3 0x1
+#define MX50_PAD_EPDC_SDCLKN__AUDMUX_AUD5_RXFS 0x214 0x5b0 0x000 0x4 0x0
+#define MX50_PAD_EPDC_SDCLKN__SDMA_DEBUG_BUS_ERROR 0x214 0x5b0 0x000 0x6 0x0
+#define MX50_PAD_EPDC_SDCLKN__USBPHY2_SIECLOCK 0x214 0x5b0 0x000 0x7 0x0
+#define MX50_PAD_EPDC_SDSHR__EPCD_SDSHR 0x218 0x5b4 0x000 0x0 0x0
+#define MX50_PAD_EPDC_SDSHR__GPIO3_26 0x218 0x5b4 0x000 0x1 0x0
+#define MX50_PAD_EPDC_SDSHR__EIM_WEIM_D_26 0x218 0x5b4 0x000 0x2 0x0
+#define MX50_PAD_EPDC_SDSHR__ELCDIF_DAT_10 0x218 0x5b4 0x724 0x3 0x1
+#define MX50_PAD_EPDC_SDSHR__AUDMUX_AUD4_TXD 0x218 0x5b4 0x6c8 0x4 0x1
+#define MX50_PAD_EPDC_SDSHR__SDMA_DEBUG_BUS_RWB 0x218 0x5b4 0x000 0x6 0x0
+#define MX50_PAD_EPDC_SDSHR__USBPHY2_LINESTATE_0 0x218 0x5b4 0x000 0x7 0x0
+#define MX50_PAD_EPDC_PWRCOM__EPCD_PWRCOM 0x21c 0x5b8 0x000 0x0 0x0
+#define MX50_PAD_EPDC_PWRCOM__GPIO3_27 0x21c 0x5b8 0x000 0x1 0x0
+#define MX50_PAD_EPDC_PWRCOM__EIM_WEIM_D_27 0x21c 0x5b8 0x000 0x2 0x0
+#define MX50_PAD_EPDC_PWRCOM__ELCDIF_DAT_11 0x21c 0x5b8 0x728 0x3 0x1
+#define MX50_PAD_EPDC_PWRCOM__AUDMUX_AUD4_TXC 0x21c 0x5b8 0x6d4 0x4 0x1
+#define MX50_PAD_EPDC_PWRCOM__SDMA_DEBUG_CORE_RUN 0x21c 0x5b8 0x000 0x6 0x0
+#define MX50_PAD_EPDC_PWRCOM__USBPHY2_LINESTATE_1 0x21c 0x5b8 0x000 0x7 0x0
+#define MX50_PAD_EPDC_PWRSTAT__EPCD_PWRSTAT 0x220 0x5bc 0x000 0x0 0x0
+#define MX50_PAD_EPDC_PWRSTAT__GPIO3_28 0x220 0x5bc 0x000 0x1 0x0
+#define MX50_PAD_EPDC_PWRSTAT__EIM_WEIM_D_28 0x220 0x5bc 0x000 0x2 0x0
+#define MX50_PAD_EPDC_PWRSTAT__ELCDIF_DAT_12 0x220 0x5bc 0x72c 0x3 0x1
+#define MX50_PAD_EPDC_PWRSTAT__AUDMUX_AUD4_TXFS 0x220 0x5bc 0x6d8 0x4 0x1
+#define MX50_PAD_EPDC_PWRSTAT__SDMA_DEBUG_MODE 0x220 0x5bc 0x000 0x6 0x0
+#define MX50_PAD_EPDC_PWRSTAT__USBPHY2_VBUSVALID 0x220 0x5bc 0x000 0x7 0x0
+#define MX50_PAD_EPDC_PWRCTRL0__EPCD_PWRCTRL0 0x224 0x5c0 0x000 0x0 0x0
+#define MX50_PAD_EPDC_PWRCTRL0__GPIO3_29 0x224 0x5c0 0x000 0x1 0x0
+#define MX50_PAD_EPDC_PWRCTRL0__EIM_WEIM_D_29 0x224 0x5c0 0x000 0x2 0x0
+#define MX50_PAD_EPDC_PWRCTRL0__ELCDIF_DAT_13 0x224 0x5c0 0x730 0x3 0x1
+#define MX50_PAD_EPDC_PWRCTRL0__AUDMUX_AUD4_RXD 0x224 0x5c0 0x6c4 0x4 0x1
+#define MX50_PAD_EPDC_PWRCTRL0__SDMA_DEBUG_RTBUFFER_WRITE 0x224 0x5c0 0x000 0x6 0x0
+#define MX50_PAD_EPDC_PWRCTRL0__USBPHY2_AVALID 0x224 0x5c0 0x000 0x7 0x0
+#define MX50_PAD_EPDC_PWRCTRL1__EPCD_PWRCTRL1 0x228 0x5c4 0x000 0x0 0x0
+#define MX50_PAD_EPDC_PWRCTRL1__GPIO3_30 0x228 0x5c4 0x000 0x1 0x0
+#define MX50_PAD_EPDC_PWRCTRL1__EIM_WEIM_D_30 0x228 0x5c4 0x000 0x2 0x0
+#define MX50_PAD_EPDC_PWRCTRL1__ELCDIF_DAT_14 0x228 0x5c4 0x734 0x3 0x1
+#define MX50_PAD_EPDC_PWRCTRL1__AUDMUX_AUD4_RXC 0x228 0x5c4 0x6cc 0x4 0x1
+#define MX50_PAD_EPDC_PWRCTRL1__SDMA_DEBUG_YIELD 0x228 0x5c4 0x000 0x6 0x0
+#define MX50_PAD_EPDC_PWRCTRL1__USBPHY1_ONBIST 0x228 0x5c4 0x000 0x7 0x0
+#define MX50_PAD_EPDC_PWRCTRL2__EPCD_PWRCTRL2 0x22c 0x5c8 0x000 0x0 0x0
+#define MX50_PAD_EPDC_PWRCTRL2__GPIO3_31 0x22c 0x5c8 0x000 0x1 0x0
+#define MX50_PAD_EPDC_PWRCTRL2__EIM_WEIM_D_31 0x22c 0x5c8 0x000 0x2 0x0
+#define MX50_PAD_EPDC_PWRCTRL2__ELCDIF_DAT_15 0x22c 0x5c8 0x738 0x3 0x1
+#define MX50_PAD_EPDC_PWRCTRL2__AUDMUX_AUD4_RXFS 0x22c 0x5c8 0x6d0 0x4 0x1
+#define MX50_PAD_EPDC_PWRCTRL2__SDMA_EXT_EVENT_0 0x22c 0x5c8 0x7b8 0x6 0x1
+#define MX50_PAD_EPDC_PWRCTRL2__USBPHY2_ONBIST 0x22c 0x5c8 0x000 0x7 0x0
+#define MX50_PAD_EPDC_PWRCTRL3__EPCD_PWRCTRL3 0x230 0x5cc 0x000 0x0 0x0
+#define MX50_PAD_EPDC_PWRCTRL3__GPIO4_20 0x230 0x5cc 0x000 0x1 0x0
+#define MX50_PAD_EPDC_PWRCTRL3__EIM_WEIM_EB_2 0x230 0x5cc 0x000 0x2 0x0
+#define MX50_PAD_EPDC_PWRCTRL3__SDMA_EXT_EVENT_1 0x230 0x5cc 0x7bc 0x6 0x1
+#define MX50_PAD_EPDC_PWRCTRL3__USBPHY1_BISTOK 0x230 0x5cc 0x000 0x7 0x0
+#define MX50_PAD_EPDC_VCOM0__EPCD_VCOM_0 0x234 0x5d0 0x000 0x0 0x0
+#define MX50_PAD_EPDC_VCOM0__GPIO4_21 0x234 0x5d0 0x000 0x1 0x0
+#define MX50_PAD_EPDC_VCOM0__EIM_WEIM_EB_3 0x234 0x5d0 0x000 0x2 0x0
+#define MX50_PAD_EPDC_VCOM0__USBPHY2_BISTOK 0x234 0x5d0 0x000 0x7 0x0
+#define MX50_PAD_EPDC_VCOM1__EPCD_VCOM_1 0x238 0x5d4 0x000 0x0 0x0
+#define MX50_PAD_EPDC_VCOM1__GPIO4_22 0x238 0x5d4 0x000 0x1 0x0
+#define MX50_PAD_EPDC_VCOM1__EIM_WEIM_CS_3 0x238 0x5d4 0x000 0x2 0x0
+#define MX50_PAD_EPDC_BDR0__EPCD_BDR_0 0x23c 0x5d8 0x000 0x0 0x0
+#define MX50_PAD_EPDC_BDR0__GPIO4_23 0x23c 0x5d8 0x000 0x1 0x0
+#define MX50_PAD_EPDC_BDR0__ELCDIF_DAT_7 0x23c 0x5d8 0x718 0x3 0x1
+#define MX50_PAD_EPDC_BDR1__EPCD_BDR_1 0x240 0x5dc 0x000 0x0 0x0
+#define MX50_PAD_EPDC_BDR1__GPIO4_24 0x240 0x5dc 0x000 0x1 0x0
+#define MX50_PAD_EPDC_BDR1__ELCDIF_DAT_6 0x240 0x5dc 0x714 0x3 0x1
+#define MX50_PAD_EPDC_SDCE0__EPCD_SDCE_0 0x244 0x5e0 0x000 0x0 0x0
+#define MX50_PAD_EPDC_SDCE0__GPIO4_25 0x244 0x5e0 0x000 0x1 0x0
+#define MX50_PAD_EPDC_SDCE0__ELCDIF_DAT_5 0x244 0x5e0 0x710 0x3 0x1
+#define MX50_PAD_EPDC_SDCE1__EPCD_SDCE_1 0x248 0x5e4 0x000 0x0 0x0
+#define MX50_PAD_EPDC_SDCE1__GPIO4_26 0x248 0x5e4 0x000 0x1 0x0
+#define MX50_PAD_EPDC_SDCE1__ELCDIF_DAT_4 0x248 0x5e4 0x70c 0x3 0x0
+#define MX50_PAD_EPDC_SDCE2__EPCD_SDCE_2 0x24c 0x5e8 0x000 0x0 0x0
+#define MX50_PAD_EPDC_SDCE2__GPIO4_27 0x24c 0x5e8 0x000 0x1 0x0
+#define MX50_PAD_EPDC_SDCE2__ELCDIF_DAT_3 0x24c 0x5e8 0x708 0x3 0x1
+#define MX50_PAD_EPDC_SDCE3__EPCD_SDCE_3 0x250 0x5ec 0x000 0x0 0x0
+#define MX50_PAD_EPDC_SDCE3__GPIO4_28 0x250 0x5ec 0x000 0x1 0x0
+#define MX50_PAD_EPDC_SDCE3__ELCDIF_DAT_2 0x250 0x5ec 0x704 0x3 0x1
+#define MX50_PAD_EPDC_SDCE4__EPCD_SDCE_4 0x254 0x5f0 0x000 0x0 0x0
+#define MX50_PAD_EPDC_SDCE4__GPIO4_29 0x254 0x5f0 0x000 0x1 0x0
+#define MX50_PAD_EPDC_SDCE4__ELCDIF_DAT_1 0x254 0x5f0 0x700 0x3 0x1
+#define MX50_PAD_EPDC_SDCE5__EPCD_SDCE_5 0x258 0x5f4 0x000 0x0 0x0
+#define MX50_PAD_EPDC_SDCE5__GPIO4_30 0x258 0x5f4 0x000 0x1 0x0
+#define MX50_PAD_EPDC_SDCE5__ELCDIF_DAT_0 0x258 0x5f4 0x6fc 0x3 0x1
+#define MX50_PAD_EIM_DA0__EIM_WEIM_A_0 0x25c 0x5f8 0x000 0x0 0x0
+#define MX50_PAD_EIM_DA0__GPIO1_0 0x25c 0x5f8 0x000 0x1 0x0
+#define MX50_PAD_EIM_DA0__KPP_COL_4 0x25c 0x5f8 0x790 0x3 0x2
+#define MX50_PAD_EIM_DA0__TPIU_TRACE_0 0x25c 0x5f8 0x000 0x6 0x0
+#define MX50_PAD_EIM_DA0__SRC_BT_CFG1_0 0x25c 0x5f8 0x000 0x7 0x0
+#define MX50_PAD_EIM_DA1__EIM_WEIM_A_1 0x260 0x5fc 0x000 0x0 0x0
+#define MX50_PAD_EIM_DA1__GPIO1_1 0x260 0x5fc 0x000 0x1 0x0
+#define MX50_PAD_EIM_DA1__KPP_ROW_4 0x260 0x5fc 0x7a0 0x3 0x2
+#define MX50_PAD_EIM_DA1__TPIU_TRACE_1 0x260 0x5fc 0x000 0x6 0x0
+#define MX50_PAD_EIM_DA1__SRC_BT_CFG1_1 0x260 0x5fc 0x000 0x7 0x0
+#define MX50_PAD_EIM_DA2__EIM_WEIM_A_2 0x264 0x600 0x000 0x0 0x0
+#define MX50_PAD_EIM_DA2__GPIO1_2 0x264 0x600 0x000 0x1 0x0
+#define MX50_PAD_EIM_DA2__KPP_COL_5 0x264 0x600 0x794 0x3 0x2
+#define MX50_PAD_EIM_DA2__TPIU_TRACE_2 0x264 0x600 0x000 0x6 0x0
+#define MX50_PAD_EIM_DA2__SRC_BT_CFG1_2 0x264 0x600 0x000 0x7 0x0
+#define MX50_PAD_EIM_DA3__EIM_WEIM_A_3 0x268 0x604 0x000 0x0 0x0
+#define MX50_PAD_EIM_DA3__GPIO1_3 0x268 0x604 0x000 0x1 0x0
+#define MX50_PAD_EIM_DA3__KPP_ROW_5 0x268 0x604 0x7a4 0x3 0x2
+#define MX50_PAD_EIM_DA3__TPIU_TRACE_3 0x268 0x604 0x000 0x6 0x0
+#define MX50_PAD_EIM_DA3__SRC_BT_CFG1_3 0x268 0x604 0x000 0x7 0x0
+#define MX50_PAD_EIM_DA4__EIM_WEIM_A_4 0x26c 0x608 0x000 0x0 0x0
+#define MX50_PAD_EIM_DA4__GPIO1_4 0x26c 0x608 0x000 0x1 0x0
+#define MX50_PAD_EIM_DA4__KPP_COL_6 0x26c 0x608 0x798 0x3 0x2
+#define MX50_PAD_EIM_DA4__TPIU_TRACE_4 0x26c 0x608 0x000 0x6 0x0
+#define MX50_PAD_EIM_DA4__SRC_BT_CFG1_4 0x26c 0x608 0x000 0x7 0x0
+#define MX50_PAD_EIM_DA5__EIM_WEIM_A_5 0x270 0x60c 0x000 0x0 0x0
+#define MX50_PAD_EIM_DA5__GPIO1_5 0x270 0x60c 0x000 0x1 0x0
+#define MX50_PAD_EIM_DA5__KPP_ROW_6 0x270 0x60c 0x7a8 0x3 0x2
+#define MX50_PAD_EIM_DA5__TPIU_TRACE_5 0x270 0x60c 0x000 0x6 0x0
+#define MX50_PAD_EIM_DA5__SRC_BT_CFG1_5 0x270 0x60c 0x000 0x7 0x0
+#define MX50_PAD_EIM_DA6__EIM_WEIM_A_6 0x274 0x610 0x000 0x0 0x0
+#define MX50_PAD_EIM_DA6__GPIO1_6 0x274 0x610 0x000 0x1 0x0
+#define MX50_PAD_EIM_DA6__KPP_COL_7 0x274 0x610 0x79c 0x3 0x2
+#define MX50_PAD_EIM_DA6__TPIU_TRACE_6 0x274 0x610 0x000 0x6 0x0
+#define MX50_PAD_EIM_DA6__SRC_BT_CFG1_6 0x274 0x610 0x000 0x7 0x0
+#define MX50_PAD_EIM_DA7__EIM_WEIM_A_7 0x278 0x614 0x000 0x0 0x0
+#define MX50_PAD_EIM_DA7__GPIO1_7 0x278 0x614 0x000 0x1 0x0
+#define MX50_PAD_EIM_DA7__KPP_ROW_7 0x278 0x614 0x7ac 0x3 0x2
+#define MX50_PAD_EIM_DA7__TPIU_TRACE_7 0x278 0x614 0x000 0x6 0x0
+#define MX50_PAD_EIM_DA7__SRC_BT_CFG1_7 0x278 0x614 0x000 0x7 0x0
+#define MX50_PAD_EIM_DA8__EIM_WEIM_A_8 0x27c 0x618 0x000 0x0 0x0
+#define MX50_PAD_EIM_DA8__GPIO1_8 0x27c 0x618 0x000 0x1 0x0
+#define MX50_PAD_EIM_DA8__EIM_NANDF_CLE 0x27c 0x618 0x000 0x2 0x0
+#define MX50_PAD_EIM_DA8__TPIU_TRACE_8 0x27c 0x618 0x000 0x6 0x0
+#define MX50_PAD_EIM_DA8__SRC_BT_CFG2_0 0x27c 0x618 0x000 0x7 0x0
+#define MX50_PAD_EIM_DA9__EIM_WEIM_A_9 0x280 0x61c 0x000 0x0 0x0
+#define MX50_PAD_EIM_DA9__GPIO1_9 0x280 0x61c 0x000 0x1 0x0
+#define MX50_PAD_EIM_DA9__EIM_NANDF_ALE 0x280 0x61c 0x000 0x2 0x0
+#define MX50_PAD_EIM_DA9__TPIU_TRACE_9 0x280 0x61c 0x000 0x6 0x0
+#define MX50_PAD_EIM_DA9__SRC_BT_CFG2_1 0x280 0x61c 0x000 0x7 0x0
+#define MX50_PAD_EIM_DA10__EIM_WEIM_A_10 0x284 0x620 0x000 0x0 0x0
+#define MX50_PAD_EIM_DA10__GPIO1_10 0x284 0x620 0x000 0x1 0x0
+#define MX50_PAD_EIM_DA10__EIM_NANDF_CEN_0 0x284 0x620 0x000 0x2 0x0
+#define MX50_PAD_EIM_DA10__TPIU_TRACE_10 0x284 0x620 0x000 0x6 0x0
+#define MX50_PAD_EIM_DA10__SRC_BT_CFG2_2 0x284 0x620 0x000 0x7 0x0
+#define MX50_PAD_EIM_DA11__EIM_WEIM_A_11 0x288 0x624 0x000 0x0 0x0
+#define MX50_PAD_EIM_DA11__GPIO1_11 0x288 0x624 0x000 0x1 0x0
+#define MX50_PAD_EIM_DA11__EIM_NANDF_CEN_1 0x288 0x624 0x000 0x2 0x0
+#define MX50_PAD_EIM_DA11__TPIU_TRACE_11 0x288 0x624 0x000 0x6 0x0
+#define MX50_PAD_EIM_DA11__SRC_BT_CFG2_3 0x288 0x624 0x000 0x7 0x0
+#define MX50_PAD_EIM_DA12__EIM_WEIM_A_12 0x28c 0x628 0x000 0x0 0x0
+#define MX50_PAD_EIM_DA12__GPIO1_12 0x28c 0x628 0x000 0x1 0x0
+#define MX50_PAD_EIM_DA12__EIM_NANDF_CEN_2 0x28c 0x628 0x000 0x2 0x0
+#define MX50_PAD_EIM_DA12__EPDC_SDCE_6 0x28c 0x628 0x000 0x3 0x0
+#define MX50_PAD_EIM_DA12__TPIU_TRACE_12 0x28c 0x628 0x000 0x6 0x0
+#define MX50_PAD_EIM_DA12__SRC_BT_CFG2_4 0x28c 0x628 0x000 0x7 0x0
+#define MX50_PAD_EIM_DA13__EIM_WEIM_A_13 0x290 0x62c 0x000 0x0 0x0
+#define MX50_PAD_EIM_DA13__GPIO1_13 0x290 0x62c 0x000 0x1 0x0
+#define MX50_PAD_EIM_DA13__EIM_NANDF_CEN_3 0x290 0x62c 0x000 0x2 0x0
+#define MX50_PAD_EIM_DA13__EPDC_SDCE_7 0x290 0x62c 0x000 0x3 0x0
+#define MX50_PAD_EIM_DA13__TPIU_TRACE_13 0x290 0x62c 0x000 0x6 0x0
+#define MX50_PAD_EIM_DA13__SRC_BT_CFG2_5 0x290 0x62c 0x000 0x7 0x0
+#define MX50_PAD_EIM_DA14__EIM_WEIM_A_14 0x294 0x630 0x000 0x0 0x0
+#define MX50_PAD_EIM_DA14__GPIO1_14 0x294 0x630 0x000 0x1 0x0
+#define MX50_PAD_EIM_DA14__EIM_NANDF_READY0 0x294 0x630 0x7b4 0x2 0x2
+#define MX50_PAD_EIM_DA14__EPDC_SDCE_8 0x294 0x630 0x000 0x3 0x0
+#define MX50_PAD_EIM_DA14__TPIU_TRACE_14 0x294 0x630 0x000 0x6 0x0
+#define MX50_PAD_EIM_DA14__SRC_BT_CFG2_6 0x294 0x630 0x000 0x7 0x0
+#define MX50_PAD_EIM_DA15__EIM_WEIM_A_15 0x298 0x634 0x000 0x0 0x0
+#define MX50_PAD_EIM_DA15__GPIO1_15 0x298 0x634 0x000 0x1 0x0
+#define MX50_PAD_EIM_DA15__EIM_NANDF_DQS 0x298 0x634 0x7b0 0x2 0x2
+#define MX50_PAD_EIM_DA15__EPDC_SDCE_9 0x298 0x634 0x000 0x3 0x0
+#define MX50_PAD_EIM_DA15__TPIU_TRACE_15 0x298 0x634 0x000 0x6 0x0
+#define MX50_PAD_EIM_DA15__SRC_BT_CFG2_7 0x298 0x634 0x000 0x7 0x0
+#define MX50_PAD_EIM_CS2__EIM_WEIM_CS_2 0x29c 0x638 0x000 0x0 0x0
+#define MX50_PAD_EIM_CS2__GPIO1_16 0x29c 0x638 0x000 0x1 0x0
+#define MX50_PAD_EIM_CS2__EIM_WEIM_A_27 0x29c 0x638 0x000 0x2 0x0
+#define MX50_PAD_EIM_CS2__TPIU_TRCLK 0x29c 0x638 0x000 0x6 0x0
+#define MX50_PAD_EIM_CS2__SRC_BT_CFG3_0 0x29c 0x638 0x000 0x7 0x0
+#define MX50_PAD_EIM_CS1__EIM_WEIM_CS_1 0x2a0 0x63c 0x000 0x0 0x0
+#define MX50_PAD_EIM_CS1__GPIO1_17 0x2a0 0x63c 0x000 0x1 0x0
+#define MX50_PAD_EIM_CS1__TPIU_TRCTL 0x2a0 0x63c 0x000 0x6 0x0
+#define MX50_PAD_EIM_CS1__SRC_BT_CFG3_1 0x2a0 0x63c 0x000 0x7 0x0
+#define MX50_PAD_EIM_CS0__EIM_WEIM_CS_0 0x2a4 0x640 0x000 0x0 0x0
+#define MX50_PAD_EIM_CS0__GPIO1_18 0x2a4 0x640 0x000 0x1 0x0
+#define MX50_PAD_EIM_CS0__SRC_BT_CFG3_2 0x2a4 0x640 0x000 0x7 0x0
+#define MX50_PAD_EIM_EB0__EIM_WEIM_EB_0 0x2a8 0x644 0x000 0x0 0x0
+#define MX50_PAD_EIM_EB0__GPIO1_19 0x2a8 0x644 0x000 0x1 0x0
+#define MX50_PAD_EIM_EB0__SRC_BT_CFG3_3 0x2a8 0x644 0x000 0x7 0x0
+#define MX50_PAD_EIM_EB1__EIM_WEIM_EB_1 0x2ac 0x648 0x000 0x0 0x0
+#define MX50_PAD_EIM_EB1__GPIO1_20 0x2ac 0x648 0x000 0x1 0x0
+#define MX50_PAD_EIM_EB1__SRC_BT_CFG3_4 0x2ac 0x648 0x000 0x7 0x0
+#define MX50_PAD_EIM_WAIT__EIM_WEIM_WAIT 0x2b0 0x64c 0x000 0x0 0x0
+#define MX50_PAD_EIM_WAIT__GPIO1_21 0x2b0 0x64c 0x000 0x1 0x0
+#define MX50_PAD_EIM_WAIT__EIM_WEIM_DTACK_B 0x2b0 0x64c 0x000 0x2 0x0
+#define MX50_PAD_EIM_WAIT__SRC_BT_CFG3_5 0x2b0 0x64c 0x000 0x7 0x0
+#define MX50_PAD_EIM_BCLK__EIM_WEIM_BCLK 0x2b4 0x650 0x000 0x0 0x0
+#define MX50_PAD_EIM_BCLK__GPIO1_22 0x2b4 0x650 0x000 0x1 0x0
+#define MX50_PAD_EIM_BCLK__SRC_BT_CFG3_6 0x2b4 0x650 0x000 0x7 0x0
+#define MX50_PAD_EIM_RDY__EIM_WEIM_RDY 0x2b8 0x654 0x000 0x0 0x0
+#define MX50_PAD_EIM_RDY__GPIO1_23 0x2b8 0x654 0x000 0x1 0x0
+#define MX50_PAD_EIM_RDY__SRC_BT_CFG3_7 0x2b8 0x654 0x000 0x7 0x0
+#define MX50_PAD_EIM_OE__EIM_WEIM_OE 0x2bc 0x658 0x000 0x0 0x0
+#define MX50_PAD_EIM_OE__GPIO1_24 0x2bc 0x658 0x000 0x1 0x0
+#define MX50_PAD_EIM_OE__INT_BOOT 0x2bc 0x658 0x000 0x7 0x0
+#define MX50_PAD_EIM_RW__EIM_WEIM_RW 0x2c0 0x65c 0x000 0x0 0x0
+#define MX50_PAD_EIM_RW__GPIO1_25 0x2c0 0x65c 0x000 0x1 0x0
+#define MX50_PAD_EIM_RW__SYSTEM_RST 0x2c0 0x65c 0x000 0x7 0x0
+#define MX50_PAD_EIM_LBA__EIM_WEIM_LBA 0x2c4 0x660 0x000 0x0 0x0
+#define MX50_PAD_EIM_LBA__GPIO1_26 0x2c4 0x660 0x000 0x1 0x0
+#define MX50_PAD_EIM_LBA__TESTER_ACK 0x2c4 0x660 0x000 0x7 0x0
+#define MX50_PAD_EIM_CRE__EIM_WEIM_CRE 0x2c8 0x664 0x000 0x0 0x0
+#define MX50_PAD_EIM_CRE__GPIO1_27 0x2c8 0x664 0x000 0x1 0x0
+
+#endif /* __DTS_IMX50_PINFUNC_H */
diff --git a/arch/arm/boot/dts/imx50.dtsi b/arch/arm/boot/dts/imx50.dtsi
new file mode 100644
index 000000000000..970b6e4a0d14
--- /dev/null
+++ b/arch/arm/boot/dts/imx50.dtsi
@@ -0,0 +1,670 @@
+/*
+ * Copyright 2013 Greg Ungerer <gerg@uclinux.org>
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include "skeleton.dtsi"
+#include "imx50-pinfunc.h"
+
+/ {
+ aliases {
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+ gpio3 = &gpio4;
+ gpio4 = &gpio5;
+ gpio5 = &gpio6;
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ serial3 = &uart4;
+ serial4 = &uart5;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a8";
+ reg = <0x0>;
+ };
+ };
+
+ tzic: tz-interrupt-controller@0fffc000 {
+ compatible = "fsl,imx50-tzic", "fsl,imx53-tzic", "fsl,tzic";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x0fffc000 0x4000>;
+ };
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ckil {
+ compatible = "fsl,imx-ckil", "fixed-clock";
+ clock-frequency = <32768>;
+ };
+
+ ckih1 {
+ compatible = "fsl,imx-ckih1", "fixed-clock";
+ clock-frequency = <22579200>;
+ };
+
+ ckih2 {
+ compatible = "fsl,imx-ckih2", "fixed-clock";
+ clock-frequency = <0>;
+ };
+
+ osc {
+ compatible = "fsl,imx-osc", "fixed-clock";
+ clock-frequency = <24000000>;
+ };
+ };
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ interrupt-parent = <&tzic>;
+ ranges;
+
+ aips@50000000 { /* AIPS1 */
+ compatible = "fsl,aips-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x50000000 0x10000000>;
+ ranges;
+
+ spba@50000000 {
+ compatible = "fsl,spba-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x50000000 0x40000>;
+ ranges;
+
+ esdhc1: esdhc@50004000 {
+ compatible = "fsl,imx50-esdhc";
+ reg = <0x50004000 0x4000>;
+ interrupts = <1>;
+ clocks = <&clks 44>, <&clks 0>, <&clks 71>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
+ status = "disabled";
+ };
+
+ esdhc2: esdhc@50008000 {
+ compatible = "fsl,imx50-esdhc";
+ reg = <0x50008000 0x4000>;
+ interrupts = <2>;
+ clocks = <&clks 45>, <&clks 0>, <&clks 72>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
+ status = "disabled";
+ };
+
+ uart3: serial@5000c000 {
+ compatible = "fsl,imx50-uart", "fsl,imx21-uart";
+ reg = <0x5000c000 0x4000>;
+ interrupts = <33>;
+ clocks = <&clks 32>, <&clks 33>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ ecspi1: ecspi@50010000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx50-ecspi", "fsl,imx51-ecspi";
+ reg = <0x50010000 0x4000>;
+ interrupts = <36>;
+ clocks = <&clks 51>, <&clks 52>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ ssi2: ssi@50014000 {
+ compatible = "fsl,imx50-ssi", "fsl,imx21-ssi";
+ reg = <0x50014000 0x4000>;
+ interrupts = <30>;
+ clocks = <&clks 49>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <25 24 23 22>; /* TX0 RX0 TX1 RX1 */
+ status = "disabled";
+ };
+
+ esdhc3: esdhc@50020000 {
+ compatible = "fsl,imx50-esdhc";
+ reg = <0x50020000 0x4000>;
+ interrupts = <3>;
+ clocks = <&clks 46>, <&clks 0>, <&clks 73>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
+ status = "disabled";
+ };
+
+ esdhc4: esdhc@50024000 {
+ compatible = "fsl,imx50-esdhc";
+ reg = <0x50024000 0x4000>;
+ interrupts = <4>;
+ clocks = <&clks 47>, <&clks 0>, <&clks 74>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
+ status = "disabled";
+ };
+ };
+
+ usbotg: usb@53f80000 {
+ compatible = "fsl,imx50-usb", "fsl,imx27-usb";
+ reg = <0x53f80000 0x0200>;
+ interrupts = <18>;
+ clocks = <&clks 124>;
+ status = "disabled";
+ };
+
+ usbh1: usb@53f80200 {
+ compatible = "fsl,imx50-usb", "fsl,imx27-usb";
+ reg = <0x53f80200 0x0200>;
+ interrupts = <14>;
+ clocks = <&clks 125>;
+ status = "disabled";
+ };
+
+ usbh2: usb@53f80400 {
+ compatible = "fsl,imx50-usb", "fsl,imx27-usb";
+ reg = <0x53f80400 0x0200>;
+ interrupts = <16>;
+ clocks = <&clks 108>;
+ status = "disabled";
+ };
+
+ usbh3: usb@53f80600 {
+ compatible = "fsl,imx50-usb", "fsl,imx27-usb";
+ reg = <0x53f80600 0x0200>;
+ interrupts = <17>;
+ clocks = <&clks 108>;
+ status = "disabled";
+ };
+
+ gpio1: gpio@53f84000 {
+ compatible = "fsl,imx50-gpio", "fsl,imx35-gpio";
+ reg = <0x53f84000 0x4000>;
+ interrupts = <50 51>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio@53f88000 {
+ compatible = "fsl,imx50-gpio", "fsl,imx35-gpio";
+ reg = <0x53f88000 0x4000>;
+ interrupts = <52 53>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio3: gpio@53f8c000 {
+ compatible = "fsl,imx50-gpio", "fsl,imx35-gpio";
+ reg = <0x53f8c000 0x4000>;
+ interrupts = <54 55>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio4: gpio@53f90000 {
+ compatible = "fsl,imx50-gpio", "fsl,imx35-gpio";
+ reg = <0x53f90000 0x4000>;
+ interrupts = <56 57>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ wdog1: wdog@53f98000 {
+ compatible = "fsl,imx50-wdt", "fsl,imx21-wdt";
+ reg = <0x53f98000 0x4000>;
+ interrupts = <58>;
+ clocks = <&clks 0>;
+ };
+
+ gpt: timer@53fa0000 {
+ compatible = "fsl,imx50-gpt", "fsl,imx31-gpt";
+ reg = <0x53fa0000 0x4000>;
+ interrupts = <39>;
+ clocks = <&clks 36>, <&clks 41>;
+ clock-names = "ipg", "per";
+ };
+
+ iomuxc: iomuxc@53fa8000 {
+ compatible = "fsl,imx50-iomuxc", "fsl,imx53-iomuxc";
+ reg = <0x53fa8000 0x4000>;
+ };
+
+ gpr: iomuxc-gpr@53fa8000 {
+ compatible = "fsl,imx50-iomuxc-gpr", "syscon";
+ reg = <0x53fa8000 0xc>;
+ };
+
+ pwm1: pwm@53fb4000 {
+ #pwm-cells = <2>;
+ compatible = "fsl,imx50-pwm", "fsl,imx27-pwm";
+ reg = <0x53fb4000 0x4000>;
+ clocks = <&clks 37>, <&clks 38>;
+ clock-names = "ipg", "per";
+ interrupts = <61>;
+ };
+
+ pwm2: pwm@53fb8000 {
+ #pwm-cells = <2>;
+ compatible = "fsl,imx50-pwm", "fsl,imx27-pwm";
+ reg = <0x53fb8000 0x4000>;
+ clocks = <&clks 39>, <&clks 40>;
+ clock-names = "ipg", "per";
+ interrupts = <94>;
+ };
+
+ uart1: serial@53fbc000 {
+ compatible = "fsl,imx50-uart", "fsl,imx21-uart";
+ reg = <0x53fbc000 0x4000>;
+ interrupts = <31>;
+ clocks = <&clks 28>, <&clks 29>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ uart2: serial@53fc0000 {
+ compatible = "fsl,imx50-uart", "fsl,imx21-uart";
+ reg = <0x53fc0000 0x4000>;
+ interrupts = <32>;
+ clocks = <&clks 30>, <&clks 31>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ src: src@53fd0000 {
+ compatible = "fsl,imx50-src", "fsl,imx51-src";
+ reg = <0x53fd0000 0x4000>;
+ #reset-cells = <1>;
+ };
+
+ clks: ccm@53fd4000{
+ compatible = "fsl,imx50-ccm";
+ reg = <0x53fd4000 0x4000>;
+ interrupts = <0 71 0x04 0 72 0x04>;
+ #clock-cells = <1>;
+ };
+
+ gpio5: gpio@53fdc000 {
+ compatible = "fsl,imx50-gpio", "fsl,imx35-gpio";
+ reg = <0x53fdc000 0x4000>;
+ interrupts = <103 104>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio6: gpio@53fe0000 {
+ compatible = "fsl,imx50-gpio", "fsl,imx35-gpio";
+ reg = <0x53fe0000 0x4000>;
+ interrupts = <105 106>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ i2c3: i2c@53fec000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx50-i2c", "fsl,imx21-i2c";
+ reg = <0x53fec000 0x4000>;
+ interrupts = <64>;
+ clocks = <&clks 88>;
+ status = "disabled";
+ };
+
+ uart4: serial@53ff0000 {
+ compatible = "fsl,imx50-uart", "fsl,imx21-uart";
+ reg = <0x53ff0000 0x4000>;
+ interrupts = <13>;
+ clocks = <&clks 65>, <&clks 66>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+ };
+
+ aips@60000000 { /* AIPS2 */
+ compatible = "fsl,aips-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x60000000 0x10000000>;
+ ranges;
+
+ uart5: serial@63f90000 {
+ compatible = "fsl,imx50-uart", "fsl,imx21-uart";
+ reg = <0x63f90000 0x4000>;
+ interrupts = <86>;
+ clocks = <&clks 67>, <&clks 68>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ owire: owire@63fa4000 {
+ compatible = "fsl,imx50-owire", "fsl,imx21-owire";
+ reg = <0x63fa4000 0x4000>;
+ clocks = <&clks 159>;
+ status = "disabled";
+ };
+
+ ecspi2: ecspi@63fac000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx50-ecspi", "fsl,imx51-ecspi";
+ reg = <0x63fac000 0x4000>;
+ interrupts = <37>;
+ clocks = <&clks 53>, <&clks 54>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ sdma: sdma@63fb0000 {
+ compatible = "fsl,imx50-sdma", "fsl,imx35-sdma";
+ reg = <0x63fb0000 0x4000>;
+ interrupts = <6>;
+ clocks = <&clks 56>, <&clks 56>;
+ clock-names = "ipg", "ahb";
+ fsl,sdma-ram-script-name = "imx/sdma/sdma-imx50.bin";
+ };
+
+ cspi: cspi@63fc0000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx50-cspi", "fsl,imx35-cspi";
+ reg = <0x63fc0000 0x4000>;
+ interrupts = <38>;
+ clocks = <&clks 55>, <&clks 55>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ i2c2: i2c@63fc4000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx50-i2c", "fsl,imx21-i2c";
+ reg = <0x63fc4000 0x4000>;
+ interrupts = <63>;
+ clocks = <&clks 35>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@63fc8000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx50-i2c", "fsl,imx21-i2c";
+ reg = <0x63fc8000 0x4000>;
+ interrupts = <62>;
+ clocks = <&clks 34>;
+ status = "disabled";
+ };
+
+ ssi1: ssi@63fcc000 {
+ compatible = "fsl,imx50-ssi", "fsl,imx21-ssi";
+ reg = <0x63fcc000 0x4000>;
+ interrupts = <29>;
+ clocks = <&clks 48>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <29 28 27 26>; /* TX0 RX0 TX1 RX1 */
+ status = "disabled";
+ };
+
+ audmux: audmux@63fd0000 {
+ compatible = "fsl,imx50-audmux", "fsl,imx31-audmux";
+ reg = <0x63fd0000 0x4000>;
+ status = "disabled";
+ };
+
+ fec: ethernet@63fec000 {
+ compatible = "fsl,imx53-fec", "fsl,imx25-fec";
+ reg = <0x63fec000 0x4000>;
+ interrupts = <87>;
+ clocks = <&clks 42>, <&clks 42>, <&clks 42>;
+ clock-names = "ipg", "ahb", "ptp";
+ status = "disabled";
+ };
+ };
+ };
+};
+
+&iomuxc {
+ cspi {
+ pinctrl_cspi_1: cspigrp-1 {
+ fsl,pins = <
+ MX50_PAD_CSPI_SCLK__CSPI_SCLK 0x00
+ MX50_PAD_CSPI_MISO__CSPI_MISO 0x00
+ MX50_PAD_CSPI_MOSI__CSPI_MOSI 0x00
+ MX50_PAD_CSPI_SS0__GPIO4_11 0xc4
+ MX50_PAD_ECSPI1_MOSI__CSPI_SS1 0xf4
+ >;
+ };
+ };
+
+ ecspi1 {
+ pinctrl_ecspi1_1: ecspi1grp-1 {
+ fsl,pins = <
+ MX50_PAD_ECSPI1_SCLK__ECSPI1_SCLK 0x00
+ MX50_PAD_ECSPI1_SS0__ECSPI1_SS0 0x00
+ MX50_PAD_ECSPI1_MISO__ECSPI1_MISO 0x00
+ MX50_PAD_ECSPI1_MOSI__ECSPI1_MOSI 0x00
+ >;
+ };
+ };
+
+ esdhc1 {
+ pinctrl_esdhc1_1: esdhc1grp-1 {
+ fsl,pins = <
+ MX50_PAD_SD1_D0__ESDHC1_DAT0 0x1d4
+ MX50_PAD_SD1_D1__ESDHC1_DAT1 0x1d4
+ MX50_PAD_SD1_D2__ESDHC1_DAT2 0x1d4
+ MX50_PAD_SD1_D3__ESDHC1_DAT3 0x1d4
+ MX50_PAD_SD1_CMD__ESDHC1_CMD 0x1e4
+ MX50_PAD_SD1_CLK__ESDHC1_CLK 0xd4
+ >;
+ };
+
+ pinctrl_esdhc1_2: esdhc1grp-2 {
+ fsl,pins = <
+ MX50_PAD_SD1_D0__ESDHC1_DAT0 0x1d4
+ MX50_PAD_SD1_D1__ESDHC1_DAT1 0x1d4
+ MX50_PAD_SD1_D2__ESDHC1_DAT2 0x1d4
+ MX50_PAD_SD1_D3__ESDHC1_DAT3 0x1d4
+ MX50_PAD_UART3_TXD__ESDHC1_DAT4 0x1d4
+ MX50_PAD_UART3_RXD__ESDHC1_DAT5 0x1d4
+ MX50_PAD_UART4_TXD__ESDHC1_DAT6 0x1d4
+ MX50_PAD_UART4_RXD__ESDHC1_DAT7 0x1d4
+ MX50_PAD_SD1_CMD__ESDHC1_CMD 0x14
+ MX50_PAD_SD1_CLK__ESDHC1_CLK 0xd4
+ >;
+ };
+ };
+
+ esdhc2 {
+ pinctrl_esdhc2_1: esdhc2grp-1 {
+ fsl,pins = <
+ MX50_PAD_SD2_CMD__ESDHC2_CMD 0x1e4
+ MX50_PAD_SD2_CLK__ESDHC2_CLK 0xd4
+ MX50_PAD_SD2_D0__ESDHC2_DAT0 0x1d4
+ MX50_PAD_SD2_D1__ESDHC2_DAT1 0x1d4
+ MX50_PAD_SD2_D2__ESDHC2_DAT2 0x1d4
+ MX50_PAD_SD2_D3__ESDHC2_DAT3 0x1d4
+ MX50_PAD_SD2_D4__ESDHC2_DAT4 0x1d4
+ MX50_PAD_SD2_D5__ESDHC2_DAT5 0x1d4
+ MX50_PAD_SD2_D6__ESDHC2_DAT6 0x1d4
+ MX50_PAD_SD2_D7__ESDHC2_DAT7 0x1d4
+ >;
+ };
+ };
+
+ esdhc3 {
+ pinctrl_esdhc3_1: esdhc3grp-1 {
+ fsl,pins = <
+ MX50_PAD_SD3_D0__ESDHC3_DAT0 0x1d4
+ MX50_PAD_SD3_D1__ESDHC3_DAT1 0x1d4
+ MX50_PAD_SD3_D2__ESDHC3_DAT2 0x1d4
+ MX50_PAD_SD3_D3__ESDHC3_DAT3 0x1d4
+ MX50_PAD_SD3_D4__ESDHC3_DAT4 0x1d4
+ MX50_PAD_SD3_D5__ESDHC3_DAT5 0x1d4
+ MX50_PAD_SD3_D6__ESDHC3_DAT6 0x1d4
+ MX50_PAD_SD3_D7__ESDHC3_DAT7 0x1d4
+ MX50_PAD_SD3_CMD__ESDHC3_CMD 0x1e4
+ MX50_PAD_SD3_CLK__ESDHC3_CLK 0xd4
+ >;
+ };
+ };
+
+ fec {
+ pinctrl_fec_1: fecgrp-1 {
+ fsl,pins = <
+ MX50_PAD_SSI_RXFS__FEC_MDC 0x80
+ MX50_PAD_SSI_RXC__FEC_MDIO 0x80
+ MX50_PAD_DISP_D0__FEC_TX_CLK 0x80
+ MX50_PAD_DISP_D1__FEC_RX_ERR 0x80
+ MX50_PAD_DISP_D2__FEC_RX_DV 0x80
+ MX50_PAD_DISP_D3__FEC_RDATA_1 0x80
+ MX50_PAD_DISP_D4__FEC_RDATA_0 0x80
+ MX50_PAD_DISP_D5__FEC_TX_EN 0x80
+ MX50_PAD_DISP_D6__FEC_TDATA_1 0x80
+ MX50_PAD_DISP_D7__FEC_TDATA_0 0x80
+ >;
+ };
+
+ pinctrl_fec_2: fecgrp-2 {
+ fsl,pins = <
+ MX50_PAD_I2C3_SCL__FEC_MDC 0x80
+ MX50_PAD_I2C3_SDA__FEC_MDIO 0x80
+ MX50_PAD_DISP_D0__FEC_TX_CLK 0x80
+ MX50_PAD_DISP_D10__FEC_RX_DV 0x80
+ MX50_PAD_DISP_D11__FEC_RDATA_1 0x80
+ MX50_PAD_DISP_D12__FEC_RDATA_0 0x80
+ MX50_PAD_DISP_D13__FEC_TX_EN 0x80
+ MX50_PAD_DISP_D14__FEC_TDATA_1 0x80
+ MX50_PAD_DISP_D15__FEC_TDATA_0 0x80
+ >;
+ };
+
+ };
+
+ i2c1 {
+ pinctrl_i2c1_1: i2c1grp-1 {
+ fsl,pins = <
+ MX50_PAD_I2C1_SDA__I2C1_SDA 0x12c
+ MX50_PAD_I2C1_SCL__I2C1_SCL 0x12c
+ >;
+ };
+ };
+
+ i2c2 {
+ pinctrl_i2c2_1: i2c2grp-1 {
+ fsl,pins = <
+ MX50_PAD_I2C2_SDA__I2C2_SDA 0x12c
+ MX50_PAD_I2C2_SCL__I2C2_SCL 0x12c
+ >;
+ };
+ };
+
+ i2c3 {
+ pinctrl_i2c3_1: i2c3grp-1 {
+ fsl,pins = <
+ MX50_PAD_I2C3_SDA__I2C3_SDA 0x12c
+ MX50_PAD_I2C3_SCL__I2C3_SCL 0x12c
+ >;
+ };
+ };
+
+ owire {
+ pinctrl_owire_1: owiregrp-1 {
+ fsl,pins = <
+ MX50_PAD_OWIRE__OWIRE_LINE 0x84
+ >;
+ };
+ };
+
+ uart1 {
+ pinctrl_uart1_1: uart1grp-1 {
+ fsl,pins = <
+ MX50_PAD_UART1_TXD__UART1_TXD_MUX 0x1e4
+ MX50_PAD_UART1_RXD__UART1_RXD_MUX 0x1e4
+ MX50_PAD_UART1_RTS__UART1_RTS 0x1e4
+ MX50_PAD_UART1_CTS__UART1_CTS 0x1e4
+ >;
+ };
+ };
+
+ uart2 {
+ pinctrl_uart2_1: uart2grp-1 {
+ fsl,pins = <
+ MX50_PAD_UART2_TXD__UART2_TXD_MUX 0x1e4
+ MX50_PAD_UART2_RXD__UART2_RXD_MUX 0x1e4
+ MX50_PAD_UART2_RTS__UART2_RTS 0x1e4
+ MX50_PAD_UART2_CTS__UART2_CTS 0x1e4
+ >;
+ };
+
+ pinctrl_uart2_2: uart2grp-2 {
+ fsl,pins = <
+ MX50_PAD_I2C1_SCL__UART2_TXD_MUX 0x1e4
+ MX50_PAD_I2C1_SDA__UART2_RXD_MUX 0x1e4
+ MX50_PAD_I2C2_SDA__UART2_RTS 0x1e4
+ MX50_PAD_I2C2_SCL__UART2_CTS 0x1e4
+ >;
+ };
+ };
+
+ uart3 {
+ pinctrl_uart3_1: uart3grp-1 {
+ fsl,pins = <
+ MX50_PAD_UART3_TXD__UART3_TXD_MUX 0x1e4
+ MX50_PAD_UART3_RXD__UART3_RXD_MUX 0x1e4
+ MX50_PAD_ECSPI1_SCLK__UART3_RTS 0x1e4
+ MX50_PAD_ECSPI1_MOSI__UART3_CTS 0x1e4
+ >;
+ };
+ };
+
+ uart4 {
+ pinctrl_uart4_1: uart4grp-1 {
+ fsl,pins = <
+ MX50_PAD_UART4_TXD__UART4_TXD_MUX 0x1e4
+ MX50_PAD_UART4_RXD__UART4_RXD_MUX 0x1e4
+ MX50_PAD_ECSPI1_MISO__UART4_RTS 0x1e4
+ MX50_PAD_ECSPI1_SS0__UART4_CTS 0x1e4
+ >;
+ };
+ };
+
+ uart5 {
+ pinctrl_uart5_1: uart5grp-1 {
+ fsl,pins = <
+ MX50_PAD_ECSPI2_MISO__UART5_TXD_MUX 0x1e4
+ MX50_PAD_ECSPI2_SS0__UART5_RXD_MUX 0x1e4
+ MX50_PAD_ECSPI2_SCLK__UART5_RTS 0x1e4
+ MX50_PAD_ECSPI2_MOSI__UART5_CTS 0x1e4
+ >;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx51-apf51dev.dts b/arch/arm/boot/dts/imx51-apf51dev.dts
index 123fe84e0e8c..5a7f552786a1 100644
--- a/arch/arm/boot/dts/imx51-apf51dev.dts
+++ b/arch/arm/boot/dts/imx51-apf51dev.dts
@@ -16,6 +16,33 @@
model = "Armadeus Systems APF51Dev docking/development board";
compatible = "armadeus,imx51-apf51dev", "armadeus,imx51-apf51", "fsl,imx51";
+ display@di1 {
+ compatible = "fsl,imx-parallel-display";
+ crtcs = <&ipu 0>;
+ interface-pix-fmt = "bgr666";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ipu_disp1_1>;
+
+ display-timings {
+ lw700 {
+ native-mode;
+ clock-frequency = <33000033>;
+ hactive = <800>;
+ vactive = <480>;
+ hback-porch = <96>;
+ hfront-porch = <96>;
+ vback-porch = <20>;
+ vfront-porch = <21>;
+ hsync-len = <64>;
+ vsync-len = <4>;
+ hsync-active = <1>;
+ vsync-active = <1>;
+ de-active = <1>;
+ pixelclk-active = <0>;
+ };
+ };
+ };
+
gpio-keys {
compatible = "gpio-keys";
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index 1d337d99ecd5..be1407cf5abd 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -27,6 +27,20 @@
interface-pix-fmt = "rgb24";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ipu_disp1_1>;
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: dvi {
+ clock-frequency = <65000000>;
+ hactive = <1024>;
+ vactive = <768>;
+ hback-porch = <220>;
+ hfront-porch = <40>;
+ vback-porch = <21>;
+ vfront-porch = <7>;
+ hsync-len = <60>;
+ vsync-len = <10>;
+ };
+ };
};
display@di1 {
@@ -35,6 +49,25 @@
interface-pix-fmt = "rgb565";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ipu_disp2_1>;
+ status = "disabled";
+ display-timings {
+ native-mode = <&timing1>;
+ timing1: claawvga {
+ clock-frequency = <27000000>;
+ hactive = <800>;
+ vactive = <480>;
+ hback-porch = <40>;
+ hfront-porch = <60>;
+ vback-porch = <10>;
+ vfront-porch = <10>;
+ hsync-len = <20>;
+ vsync-len = <10>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ de-active = <1>;
+ pixelclk-active = <0>;
+ };
+ };
};
gpio-keys {
@@ -95,7 +128,7 @@
&uart3 {
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart3_1>;
+ pinctrl-0 = <&pinctrl_uart3_1 &pinctrl_uart3_rtscts_1>;
fsl,uart-has-rtscts;
status = "okay";
};
@@ -252,7 +285,7 @@
&uart1 {
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart1_1>;
+ pinctrl-0 = <&pinctrl_uart1_1 &pinctrl_uart1_rtscts_1>;
fsl,uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx51-eukrea-cpuimx51.dtsi b/arch/arm/boot/dts/imx51-eukrea-cpuimx51.dtsi
new file mode 100644
index 000000000000..8638656f708c
--- /dev/null
+++ b/arch/arm/boot/dts/imx51-eukrea-cpuimx51.dtsi
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Eukréa Electromatique <denis@eukrea.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include "imx51.dtsi"
+
+/ {
+ model = "Eukrea CPUIMX51";
+ compatible = "eukrea,cpuimx51", "fsl,imx51";
+
+ memory {
+ reg = <0x90000000 0x10000000>; /* 256M */
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec_2>;
+ status = "okay";
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1_1>;
+ status = "okay";
+
+ pcf8563@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ };
+};
+
+&iomuxc {
+ tsc2007 {
+ pinctrl_tsc2007_1: tsc2007grp-1 {
+ fsl,pins = <
+ MX51_PAD_GPIO_NAND__GPIO_NAND 0x1f5
+ MX51_PAD_NANDF_D8__GPIO4_0 0x1f5
+ >;
+ };
+ };
+};
+
+&nfc {
+ nand-bus-width = <8>;
+ nand-ecc-mode = "hw";
+ nand-on-flash-bbt;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts b/arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts
new file mode 100644
index 000000000000..64d8082761cc
--- /dev/null
+++ b/arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2013 Eukréa Electromatique <denis@eukrea.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+/dts-v1/;
+#include "imx51-eukrea-cpuimx51.dtsi"
+
+/ {
+ model = "Eukrea CPUIMX51";
+ compatible = "eukrea,mbimxsd51","eukrea,cpuimx51", "fsl,imx51";
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpiokeys_1>;
+
+ button-1 {
+ label = "BP1";
+ gpios = <&gpio3 31 1>;
+ linux,code = <256>;
+ gpio-key,wakeup;
+ linux,input-type = <1>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpioled>;
+
+ led1 {
+ label = "led1";
+ gpios = <&gpio3 30 1>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+};
+
+&audmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux_1>;
+ status = "okay";
+};
+
+&esdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc1_1 &pinctrl_esdhc1_cd>;
+ fsl,cd-controller;
+ status = "okay";
+};
+
+&i2c1 {
+ tlv320aic23: codec@1a {
+ compatible = "ti,tlv320aic23";
+ reg = <0x1a>;
+ };
+};
+
+&iomuxc {
+ backlight {
+ pinctrl_backlight_1: backlightgrp-1 {
+ fsl,pins = <
+ MX51_PAD_DI1_D1_CS__GPIO3_4 0x1f5
+ >;
+ };
+ };
+
+ esdhc1 {
+ pinctrl_esdhc1_cd: esdhc1_cd {
+ fsl,pins = <
+ MX51_PAD_GPIO1_0__SD1_CD 0x20d5
+ >;
+ };
+ };
+
+ gpio-keys {
+ pinctrl_gpiokeys_1: gpiokeysgrp-1 {
+ fsl,pins = <
+ MX51_PAD_NANDF_D9__GPIO3_31 0x1f5
+ >;
+ };
+ };
+
+ leds {
+ pinctrl_gpioled: gpioledgrp-1 {
+ fsl,pins = <
+ MX51_PAD_NANDF_D10__GPIO3_30 0x80000000
+ >;
+ };
+ };
+
+ reg_lcd_3v3 {
+ pinctrl_reg_lcd_3v3: reg_lcd_3v3 {
+ fsl,pins = <
+ MX51_PAD_CSI1_D9__GPIO3_13 0x1f5
+ >;
+ };
+ };
+};
+
+&ssi2 {
+ fsl,mode = "i2s-slave";
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_1>;
+ fsl,uart-has-rtscts;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3_2 &pinctrl_uart3_rtscts_2>;
+ fsl,uart-has-rtscts;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index 54cee6517902..48fa41b837e2 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -86,6 +86,11 @@
interrupt-parent = <&tzic>;
ranges;
+ iram: iram@1ffe0000 {
+ compatible = "mmio-sram";
+ reg = <0x1ffe0000 0x20000>;
+ };
+
ipu: ipu@40000000 {
#crtc-cells = <1>;
compatible = "fsl,imx51-ipu";
@@ -374,6 +379,14 @@
clocks = <&clks 107>;
};
+ owire: owire@83fa4000 {
+ compatible = "fsl,imx51-owire", "fsl,imx21-owire";
+ reg = <0x83fa4000 0x4000>;
+ interrupts = <88>;
+ clocks = <&clks 159>;
+ status = "disabled";
+ };
+
ecspi2: ecspi@83fac000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -608,6 +621,15 @@
};
};
+ i2c1 {
+ pinctrl_i2c1_1: i2c1grp-1 {
+ fsl,pins = <
+ MX51_PAD_SD2_CMD__I2C1_SCL 0x400001ed
+ MX51_PAD_SD2_CLK__I2C1_SDA 0x400001ed
+ >;
+ };
+ };
+
i2c2 {
pinctrl_i2c2_1: i2c2grp-1 {
fsl,pins = <
@@ -747,6 +769,11 @@
fsl,pins = <
MX51_PAD_UART1_RXD__UART1_RXD 0x1c5
MX51_PAD_UART1_TXD__UART1_TXD 0x1c5
+ >;
+ };
+
+ pinctrl_uart1_rtscts_1: uart1rtscts-1 {
+ fsl,pins = <
MX51_PAD_UART1_RTS__UART1_RTS 0x1c5
MX51_PAD_UART1_CTS__UART1_CTS 0x1c5
>;
@@ -767,6 +794,11 @@
fsl,pins = <
MX51_PAD_EIM_D25__UART3_RXD 0x1c5
MX51_PAD_EIM_D26__UART3_TXD 0x1c5
+ >;
+ };
+
+ pinctrl_uart3_rtscts_1: uart3rtscts-1 {
+ fsl,pins = <
MX51_PAD_EIM_D27__UART3_RTS 0x1c5
MX51_PAD_EIM_D24__UART3_CTS 0x1c5
>;
@@ -778,6 +810,13 @@
MX51_PAD_UART3_TXD__UART3_TXD 0x1c5
>;
};
+
+ pinctrl_uart3_rtscts_2: uart3rtscts-2 {
+ fsl,pins = <
+ MX51_PAD_KEY_COL4__UART3_RTS 0x1c5
+ MX51_PAD_KEY_COL5__UART3_CTS 0x1c5
+ >;
+ };
};
usbh1 {
diff --git a/arch/arm/boot/dts/imx53-qsb.dts b/arch/arm/boot/dts/imx53-qsb.dts
index e97ddae09d74..91a5935a4aac 100644
--- a/arch/arm/boot/dts/imx53-qsb.dts
+++ b/arch/arm/boot/dts/imx53-qsb.dts
@@ -55,19 +55,20 @@
label = "Power Button";
gpios = <&gpio1 8 0>;
linux,code = <116>; /* KEY_POWER */
- gpio-key,wakeup;
};
volume-up {
label = "Volume Up";
gpios = <&gpio2 14 0>;
linux,code = <115>; /* KEY_VOLUMEUP */
+ gpio-key,wakeup;
};
volume-down {
label = "Volume Down";
gpios = <&gpio2 15 0>;
linux,code = <114>; /* KEY_VOLUMEDOWN */
+ gpio-key,wakeup;
};
};
@@ -122,7 +123,6 @@
&esdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1_1>;
- cd-gpios = <&gpio3 13 0>;
status = "okay";
};
@@ -136,6 +136,7 @@
pinctrl-0 = <&pinctrl_esdhc3_1>;
cd-gpios = <&gpio3 11 0>;
wp-gpios = <&gpio3 12 0>;
+ bus-width = <8>;
status = "okay";
};
@@ -152,7 +153,6 @@
MX53_PAD_PATA_DATA15__GPIO2_15 0x80000000
MX53_PAD_EIM_DA11__GPIO3_11 0x80000000
MX53_PAD_EIM_DA12__GPIO3_12 0x80000000
- MX53_PAD_EIM_DA13__GPIO3_13 0x80000000
MX53_PAD_PATA_DA_0__GPIO7_6 0x80000000
MX53_PAD_PATA_DA_2__GPIO7_8 0x80000000
MX53_PAD_GPIO_16__GPIO7_11 0x80000000
@@ -318,5 +318,6 @@
};
&usbotg {
- status = "okay";
+ dr_mode = "peripheral";
+ status = "okay";
};
diff --git a/arch/arm/boot/dts/imx53-voipac-bsb.dts b/arch/arm/boot/dts/imx53-voipac-bsb.dts
new file mode 100644
index 000000000000..5c88c0e77fd3
--- /dev/null
+++ b/arch/arm/boot/dts/imx53-voipac-bsb.dts
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2013 Rostislav Lisovy <lisovy@gmail.com>, PiKRON s.r.o.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include "imx53-voipac-dmm-668.dtsi"
+
+/ {
+ sound {
+ compatible = "fsl,imx53-voipac-sgtl5000",
+ "fsl,imx-audio-sgtl5000";
+ model = "imx53-voipac-sgtl5000";
+ ssi-controller = <&ssi2>;
+ audio-codec = <&sgtl5000>;
+ audio-routing =
+ "Headphone Jack", "HP_OUT";
+ mux-int-port = <2>;
+ mux-ext-port = <5>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pin_gpio>;
+
+ led1 {
+ label = "led-red";
+ gpios = <&gpio3 29 0>;
+ default-state = "off";
+ };
+
+ led2 {
+ label = "led-orange";
+ gpios = <&gpio2 31 0>;
+ default-state = "off";
+ };
+ };
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ hog {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ /* SD2_CD */
+ MX53_PAD_EIM_D25__GPIO3_25 0x80000000
+ /* SD2_WP */
+ MX53_PAD_EIM_A19__GPIO2_19 0x80000000
+ >;
+ };
+
+ led_pin_gpio: led_gpio {
+ fsl,pins = <
+ MX53_PAD_EIM_D29__GPIO3_29 0x80000000
+ MX53_PAD_EIM_EB3__GPIO2_31 0x80000000
+ >;
+ };
+ };
+
+ /* Keyboard controller */
+ kpp {
+ pinctrl_kpp_1: kppgrp-1 {
+ fsl,pins = <
+ MX53_PAD_GPIO_9__KPP_COL_6 0xe8
+ MX53_PAD_GPIO_4__KPP_COL_7 0xe8
+ MX53_PAD_KEY_COL2__KPP_COL_2 0xe8
+ MX53_PAD_KEY_COL3__KPP_COL_3 0xe8
+ MX53_PAD_KEY_COL4__KPP_COL_4 0xe8
+
+ MX53_PAD_GPIO_2__KPP_ROW_6 0xe0
+ MX53_PAD_GPIO_5__KPP_ROW_7 0xe0
+ MX53_PAD_KEY_ROW2__KPP_ROW_2 0xe0
+ MX53_PAD_KEY_ROW3__KPP_ROW_3 0xe0
+ MX53_PAD_KEY_ROW4__KPP_ROW_4 0xe0
+ >;
+ };
+ };
+};
+
+&audmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux_1>; /* SSI1 */
+ status = "okay";
+};
+
+&esdhc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc2_1>;
+ cd-gpios = <&gpio3 25 0>;
+ wp-gpios = <&gpio2 19 0>;
+ vmmc-supply = <&reg_3p3v>;
+ status = "okay";
+};
+
+&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3_2>;
+ status = "okay";
+
+ sgtl5000: codec@0a {
+ compatible = "fsl,sgtl5000";
+ reg = <0x0a>;
+ VDDA-supply = <&reg_3p3v>;
+ VDDIO-supply = <&reg_3p3v>;
+ clocks = <&clks 150>;
+ };
+};
+
+&kpp {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_kpp_1>;
+ linux,keymap = <
+ 0x0203003b /* KEY_F1 */
+ 0x0603003c /* KEY_F2 */
+ 0x0207003d /* KEY_F3 */
+ 0x0607003e /* KEY_F4 */
+ >;
+ keypad,num-rows = <8>;
+ keypad,num-columns = <1>;
+ status = "okay";
+};
+
+&ssi2 {
+ fsl,mode = "i2s-slave";
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx53-voipac-dmm-668.dtsi b/arch/arm/boot/dts/imx53-voipac-dmm-668.dtsi
new file mode 100644
index 000000000000..a7d03ada6514
--- /dev/null
+++ b/arch/arm/boot/dts/imx53-voipac-dmm-668.dtsi
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2013 Rostislav Lisovy <lisovy@gmail.com>, PiKRON s.r.o.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include "imx53.dtsi"
+
+/ {
+ model = "Voipac i.MX53 X53-DMM-668";
+ compatible = "voipac,imx53-dmm-668", "fsl,imx53";
+
+ memory@70000000 {
+ device_type = "memory";
+ reg = <0x70000000 0x20000000>;
+ };
+
+ memory@b0000000 {
+ device_type = "memory";
+ reg = <0xb0000000 0x20000000>;
+ };
+
+ regulators {
+ compatible = "simple-bus";
+
+ reg_3p3v: 3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ };
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ hog {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ /* Make DA9053 regulator functional */
+ MX53_PAD_GPIO_16__GPIO7_11 0x80000000
+ /* FEC Power enable */
+ MX53_PAD_GPIO_11__GPIO4_1 0x80000000
+ /* FEC RST */
+ MX53_PAD_GPIO_12__GPIO4_2 0x80000000
+ >;
+ };
+ };
+};
+
+&ecspi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1_1>;
+ fsl,spi-num-chipselects = <4>;
+ cs-gpios = <&gpio2 30 0>, <&gpio3 19 0>, <&gpio2 16 0>, <&gpio2 17 0>;
+ status = "okay";
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec_1>;
+ phy-mode = "rmii";
+ phy-reset-gpios = <&gpio4 2 0>;
+ status = "okay";
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1_2>;
+ status = "okay";
+
+ pmic: dialog@48 {
+ compatible = "dlg,da9053-aa", "dlg,da9052";
+ reg = <0x48>;
+ interrupt-parent = <&gpio7>;
+ interrupts = <11 0x8>; /* low-level active IRQ at GPIO7_11 */
+
+ regulators {
+ buck1_reg: buck1 {
+ regulator-name = "BUCKCORE";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-always-on;
+ };
+
+ buck2_reg: buck2 {
+ regulator-name = "BUCKPRO";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ };
+
+ buck3_reg: buck3 {
+ regulator-name = "BUCKMEM";
+ regulator-min-microvolt = <1420000>;
+ regulator-max-microvolt = <1580000>;
+ regulator-always-on;
+ };
+
+ buck4_reg: buck4 {
+ regulator-name = "BUCKPERI";
+ regulator-min-microvolt = <2370000>;
+ regulator-max-microvolt = <2630000>;
+ regulator-always-on;
+ };
+
+ ldo1_reg: ldo1 {
+ regulator-name = "ldo1_1v3";
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo2_reg: ldo2 {
+ regulator-name = "ldo2_1v3";
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ };
+
+ ldo3_reg: ldo3 {
+ regulator-name = "ldo3_3v3";
+ regulator-min-microvolt = <3250000>;
+ regulator-max-microvolt = <3350000>;
+ regulator-always-on;
+ };
+
+ ldo4_reg: ldo4 {
+ regulator-name = "ldo4_2v775";
+ regulator-min-microvolt = <2770000>;
+ regulator-max-microvolt = <2780000>;
+ regulator-always-on;
+ };
+
+ ldo5_reg: ldo5 {
+ regulator-name = "ldo5_3v3";
+ regulator-min-microvolt = <3250000>;
+ regulator-max-microvolt = <3350000>;
+ regulator-always-on;
+ };
+
+ ldo6_reg: ldo6 {
+ regulator-name = "ldo6_1v3";
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ };
+
+ ldo7_reg: ldo7 {
+ regulator-name = "ldo7_2v75";
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-always-on;
+ };
+
+ ldo8_reg: ldo8 {
+ regulator-name = "ldo8_1v8";
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <1850000>;
+ regulator-always-on;
+ };
+
+ ldo9_reg: ldo9 {
+ regulator-name = "ldo9_1v5";
+ regulator-min-microvolt = <1450000>;
+ regulator-max-microvolt = <1550000>;
+ regulator-always-on;
+ };
+
+ ldo10_reg: ldo10 {
+ regulator-name = "ldo10_1v3";
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_2>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 4307e80b2d2e..362eca0c9270 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -281,6 +281,14 @@
#interrupt-cells = <2>;
};
+ kpp: kpp@53f94000 {
+ compatible = "fsl,imx53-kpp", "fsl,imx21-kpp";
+ reg = <0x53f94000 0x4000>;
+ interrupts = <60>;
+ clocks = <&clks 0>;
+ status = "disabled";
+ };
+
wdog1: wdog@53f98000 {
compatible = "fsl,imx53-wdt", "fsl,imx21-wdt";
reg = <0x53f98000 0x4000>;
@@ -599,6 +607,13 @@
MX53_PAD_GPIO_5__I2C3_SCL 0xc0000000
>;
};
+
+ pinctrl_i2c3_2: i2c3grp-2 {
+ fsl,pins = <
+ MX53_PAD_GPIO_3__I2C3_SCL 0xc0000000
+ MX53_PAD_GPIO_6__I2C3_SDA 0xc0000000
+ >;
+ };
};
ipu_disp0 {
diff --git a/arch/arm/boot/dts/imx6dl-gw51xx.dts b/arch/arm/boot/dts/imx6dl-gw51xx.dts
new file mode 100644
index 000000000000..4bd055f4c930
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-gw51xx.dts
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2013 Gateworks Corporation
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include "imx6dl.dtsi"
+#include "imx6qdl-gw51xx.dtsi"
+
+/ {
+ model = "Gateworks Ventana i.MX6 DualLite GW51XX";
+ compatible = "gw,imx6dl-gw51xx", "gw,ventana", "fsl,imx6dl";
+};
diff --git a/arch/arm/boot/dts/imx6dl-gw52xx.dts b/arch/arm/boot/dts/imx6dl-gw52xx.dts
new file mode 100644
index 000000000000..c9136058f15e
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-gw52xx.dts
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2013 Gateworks Corporation
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include "imx6dl.dtsi"
+#include "imx6qdl-gw52xx.dtsi"
+
+/ {
+ model = "Gateworks Ventana i.MX6 DualLite GW52XX";
+ compatible = "gw,imx6dl-gw52xx", "gw,ventana", "fsl,imx6dl";
+};
diff --git a/arch/arm/boot/dts/imx6dl-gw53xx.dts b/arch/arm/boot/dts/imx6dl-gw53xx.dts
new file mode 100644
index 000000000000..61818a14fde6
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-gw53xx.dts
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2013 Gateworks Corporation
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include "imx6dl.dtsi"
+#include "imx6qdl-gw53xx.dtsi"
+
+/ {
+ model = "Gateworks Ventana i.MX6 DualLite GW53XX";
+ compatible = "gw,imx6dl-gw53xx", "gw,ventana", "fsl,imx6dl";
+};
diff --git a/arch/arm/boot/dts/imx6dl-gw54xx.dts b/arch/arm/boot/dts/imx6dl-gw54xx.dts
new file mode 100644
index 000000000000..ab38b6770a06
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-gw54xx.dts
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2013 Gateworks Corporation
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include "imx6dl.dtsi"
+#include "imx6qdl-gw54xx.dtsi"
+
+/ {
+ model = "Gateworks Ventana i.MX6 DualLite GW54XX";
+ compatible = "gw,imx6dl-gw54xx", "gw,ventana", "fsl,imx6dl";
+};
diff --git a/arch/arm/boot/dts/imx6q-arm2.dts b/arch/arm/boot/dts/imx6q-arm2.dts
index edf1bd967164..fb7a1fc1a510 100644
--- a/arch/arm/boot/dts/imx6q-arm2.dts
+++ b/arch/arm/boot/dts/imx6q-arm2.dts
@@ -31,6 +31,15 @@
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
+
+ reg_usb_otg_vbus: usb_otg_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 22 0>;
+ enable-active-high;
+ };
};
leds {
@@ -79,6 +88,14 @@
status = "okay";
};
+&usbotg {
+ vbus-supply = <&reg_usb_otg_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg_1>;
+ disable-over-current;
+ status = "okay";
+};
+
&usdhc3 {
cd-gpios = <&gpio6 11 0>;
wp-gpios = <&gpio6 14 0>;
diff --git a/arch/arm/boot/dts/imx6q-cm-fx6.dts b/arch/arm/boot/dts/imx6q-cm-fx6.dts
new file mode 100644
index 000000000000..2419751a7fb8
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-cm-fx6.dts
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2013 CompuLab Ltd.
+ *
+ * Author: Valentin Raevsky <valentin@compulab.co.il>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include "imx6q.dtsi"
+
+/ {
+ model = "CompuLab CM-FX6";
+ compatible = "compulab,cm-fx6", "fsl,imx6q";
+
+ memory {
+ reg = <0x10000000 0x80000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ heartbeat-led {
+ label = "Heartbeat";
+ gpios = <&gpio2 31 0>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet_1>;
+ phy-mode = "rgmii";
+ status = "okay";
+};
+
+&gpmi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpmi_nand_1>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4_1>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6q-gw51xx.dts b/arch/arm/boot/dts/imx6q-gw51xx.dts
new file mode 100644
index 000000000000..af4929aee075
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-gw51xx.dts
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2013 Gateworks Corporation
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include "imx6q.dtsi"
+#include "imx6qdl-gw54xx.dtsi"
+
+/ {
+ model = "Gateworks Ventana i.MX6 Quad GW51XX";
+ compatible = "gw,imx6q-gw51xx", "gw,ventana", "fsl,imx6q";
+};
diff --git a/arch/arm/boot/dts/imx6q-gw52xx.dts b/arch/arm/boot/dts/imx6q-gw52xx.dts
new file mode 100644
index 000000000000..5f71ddbc7f05
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-gw52xx.dts
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2013 Gateworks Corporation
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include "imx6q.dtsi"
+#include "imx6qdl-gw52xx.dtsi"
+
+/ {
+ model = "Gateworks Ventana i.MX6 Quad GW52XX";
+ compatible = "gw,imx6q-gw52xx", "gw,ventana", "fsl,imx6q";
+};
+
+&sata {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6q-gw53xx.dts b/arch/arm/boot/dts/imx6q-gw53xx.dts
new file mode 100644
index 000000000000..360c316b4740
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-gw53xx.dts
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2013 Gateworks Corporation
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include "imx6q.dtsi"
+#include "imx6qdl-gw53xx.dtsi"
+
+/ {
+ model = "Gateworks Ventana i.MX6 Quad GW53XX";
+ compatible = "gw,imx6q-gw53xx", "gw,ventana", "fsl,imx6q";
+};
+
+&sata {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6q-gw5400-a.dts b/arch/arm/boot/dts/imx6q-gw5400-a.dts
new file mode 100644
index 000000000000..66662f958eb8
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-gw5400-a.dts
@@ -0,0 +1,443 @@
+/*
+ * Copyright 2013 Gateworks Corporation
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include "imx6q.dtsi"
+
+/ {
+ model = "Gateworks Ventana GW5400-A";
+ compatible = "gw,imx6q-gw5400-a", "gw,ventana", "fsl,imx6q";
+
+ /* these are used by bootloader for disabling nodes */
+ aliases {
+ ethernet0 = &fec;
+ ethernet1 = &eth1;
+ i2c0 = &i2c1;
+ i2c1 = &i2c2;
+ i2c2 = &i2c3;
+ led0 = &led0;
+ led1 = &led1;
+ led2 = &led2;
+ sky2 = &eth1;
+ ssi0 = &ssi1;
+ spi0 = &ecspi1;
+ usb0 = &usbh1;
+ usb1 = &usbotg;
+ usdhc2 = &usdhc3;
+ };
+
+ chosen {
+ bootargs = "console=ttymxc1,115200";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led0: user1 {
+ label = "user1";
+ gpios = <&gpio4 6 0>; /* 102 -> MX6_PANLEDG */
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+ led1: user2 {
+ label = "user2";
+ gpios = <&gpio4 10 0>; /* 106 -> MX6_PANLEDR */
+ default-state = "off";
+ };
+
+ led2: user3 {
+ label = "user3";
+ gpios = <&gpio4 15 1>; /* 111 -> MX6_LOCLED# */
+ default-state = "off";
+ };
+ };
+
+ memory {
+ reg = <0x10000000 0x40000000>;
+ };
+
+ pps {
+ compatible = "pps-gpio";
+ gpios = <&gpio1 5 0>;
+ status = "okay";
+ };
+
+ regulators {
+ compatible = "simple-bus";
+
+ reg_1p0v: 1p0v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P0V";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ };
+
+ reg_3p3v: 3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_usb_h1_vbus: usb_h1_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_h1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ reg_usb_otg_vbus: usb_otg_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 22 0>;
+ enable-active-high;
+ };
+ };
+
+ sound {
+ compatible = "fsl,imx6q-sabrelite-sgtl5000",
+ "fsl,imx-audio-sgtl5000";
+ model = "imx6q-sabrelite-sgtl5000";
+ ssi-controller = <&ssi1>;
+ audio-codec = <&codec>;
+ audio-routing =
+ "MIC_IN", "Mic Jack",
+ "Mic Jack", "Mic Bias",
+ "Headphone Jack", "HP_OUT";
+ mux-int-port = <1>;
+ mux-ext-port = <4>;
+ };
+};
+
+&audmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux_1>;
+ status = "okay";
+};
+
+&ecspi1 {
+ fsl,spi-num-chipselects = <1>;
+ cs-gpios = <&gpio3 19 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1_1>;
+ status = "okay";
+
+ flash: m25p80@0 {
+ compatible = "sst,w25q256";
+ spi-max-frequency = <30000000>;
+ reg = <0>;
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet_1>;
+ phy-mode = "rgmii";
+ phy-reset-gpios = <&gpio1 30 0>;
+ status = "okay";
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1_1>;
+ status = "okay";
+
+ eeprom1: eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+
+ eeprom2: eeprom@51 {
+ compatible = "atmel,24c02";
+ reg = <0x51>;
+ pagesize = <16>;
+ };
+
+ eeprom3: eeprom@52 {
+ compatible = "atmel,24c02";
+ reg = <0x52>;
+ pagesize = <16>;
+ };
+
+ eeprom4: eeprom@53 {
+ compatible = "atmel,24c02";
+ reg = <0x53>;
+ pagesize = <16>;
+ };
+
+ gpio: pca9555@23 {
+ compatible = "nxp,pca9555";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ hwmon: gsc@29 {
+ compatible = "gw,gsp";
+ reg = <0x29>;
+ };
+
+ rtc: ds1672@68 {
+ compatible = "dallas,ds1672";
+ reg = <0x68>;
+ };
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2_2>;
+ status = "okay";
+
+ pmic: pfuze100@08 {
+ compatible = "fsl,pfuze100";
+ reg = <0x08>;
+
+ regulators {
+ sw1a_reg: sw1ab {
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1875000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw1c_reg: sw1c {
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1875000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw2_reg: sw2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3950000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3a_reg: sw3a {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1975000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3b_reg: sw3b {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1975000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw4_reg: sw4 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ swbst_reg: swbst {
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5150000>;
+ };
+
+ snvs_reg: vsnvs {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vref_reg: vrefddr {
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vgen1_reg: vgen1 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1550000>;
+ };
+
+ vgen2_reg: vgen2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1550000>;
+ };
+
+ vgen3_reg: vgen3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vgen4_reg: vgen4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen5_reg: vgen5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen6_reg: vgen6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ };
+ };
+
+ pciswitch: pex8609@3f {
+ compatible = "plx,pex8609";
+ reg = <0x3f>;
+ };
+
+ pciclkgen: si52147@6b {
+ compatible = "sil,si52147";
+ reg = <0x6b>;
+ };
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3_2>;
+ status = "okay";
+
+ accelerometer: mma8450@1c {
+ compatible = "fsl,mma8450";
+ reg = <0x1c>;
+ };
+
+ codec: sgtl5000@0a {
+ compatible = "fsl,sgtl5000";
+ reg = <0x0a>;
+ clocks = <&clks 201>;
+ VDDA-supply = <&sw4_reg>;
+ VDDIO-supply = <&reg_3p3v>;
+ };
+
+ hdmiin: adv7611@4c {
+ compatible = "adi,adv7611";
+ reg = <0x4c>;
+ };
+
+ touchscreen: egalax_ts@04 {
+ compatible = "eeti,egalax_ts";
+ reg = <0x04>;
+ interrupt-parent = <&gpio7>;
+ interrupts = <12 2>; /* gpio7_12 active low */
+ wakeup-gpios = <&gpio7 12 0>;
+ };
+
+ videoout: adv7393@2a {
+ compatible = "adi,adv7393";
+ reg = <0x2a>;
+ };
+
+ videoin: adv7180@20 {
+ compatible = "adi,adv7180";
+ reg = <0x20>;
+ };
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ hog {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x80000000 /* OTG_PWR_EN */
+ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x80000000 /* SPINOR_CS0# */
+ MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28 0x80000000 /* PCIE IRQ */
+ MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x80000000 /* PCIE RST */
+ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x000130b0 /* AUD4_MCK */
+ MX6QDL_PAD_GPIO_5__GPIO1_IO05 0x80000000 /* GPS_PPS */
+ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x80000000 /* TOUCH_IRQ# */
+ MX6QDL_PAD_KEY_COL0__GPIO4_IO06 0x80000000 /* user1 led */
+ MX6QDL_PAD_KEY_COL2__GPIO4_IO10 0x80000000 /* user2 led */
+ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x80000000 /* user3 led */
+ MX6QDL_PAD_SD1_DAT0__GPIO1_IO16 0x80000000 /* USBHUB_RST# */
+ MX6QDL_PAD_SD1_DAT3__GPIO1_IO21 0x80000000 /* MIPI_DIO */
+ >;
+ };
+ };
+};
+
+&ldb {
+ status = "okay";
+ lvds-channel@0 {
+ crtcs = <&ipu1 0>, <&ipu1 1>, <&ipu2 0>, <&ipu2 1>;
+ };
+};
+
+&pcie {
+ reset-gpio = <&gpio1 29 0>;
+ status = "okay";
+
+ eth1: sky2@8 { /* MAC/PHY on bus 8 */
+ compatible = "marvell,sky2";
+ };
+};
+
+&ssi1 {
+ fsl,mode = "i2s-slave";
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_2>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2_3>;
+ status = "okay";
+};
+
+&uart5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart5_1>;
+ status = "okay";
+};
+
+&usbotg {
+ vbus-supply = <&reg_usb_otg_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg_1>;
+ disable-over-current;
+ status = "okay";
+};
+
+&usbh1 {
+ vbus-supply = <&reg_usb_h1_vbus>;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3_2>;
+ cd-gpios = <&gpio7 0 0>;
+ vmmc-supply = <&reg_3p3v>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6q-gw54xx.dts b/arch/arm/boot/dts/imx6q-gw54xx.dts
new file mode 100644
index 000000000000..ab518d66a75e
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-gw54xx.dts
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2013 Gateworks Corporation
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include "imx6q.dtsi"
+#include "imx6qdl-gw54xx.dtsi"
+
+/ {
+ model = "Gateworks Ventana i.MX6 Quad GW54XX";
+ compatible = "gw,imx6q-gw54xx", "gw,ventana", "fsl,imx6q";
+};
+
+&sata {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6q-pinfunc.h b/arch/arm/boot/dts/imx6q-pinfunc.h
index 9bbe82bdee41..97ed0816a6e0 100644
--- a/arch/arm/boot/dts/imx6q-pinfunc.h
+++ b/arch/arm/boot/dts/imx6q-pinfunc.h
@@ -536,7 +536,7 @@
#define MX6QDL_PAD_ENET_REF_CLK__ESAI_RX_FS 0x1d4 0x4e8 0x85c 0x2 0x0
#define MX6QDL_PAD_ENET_REF_CLK__GPIO1_IO23 0x1d4 0x4e8 0x000 0x5 0x0
#define MX6QDL_PAD_ENET_REF_CLK__SPDIF_SR_CLK 0x1d4 0x4e8 0x000 0x6 0x0
-#define MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x1d8 0x4ec 0x000 0x0 0x0
+#define MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x1d8 0x4ec 0x004 0x0 0xff0d0100
#define MX6QDL_PAD_ENET_RX_ER__ENET_RX_ER 0x1d8 0x4ec 0x000 0x1 0x0
#define MX6QDL_PAD_ENET_RX_ER__ESAI_RX_HF_CLK 0x1d8 0x4ec 0x864 0x2 0x0
#define MX6QDL_PAD_ENET_RX_ER__SPDIF_IN 0x1d8 0x4ec 0x914 0x3 0x1
@@ -654,7 +654,7 @@
#define MX6QDL_PAD_GPIO_1__ESAI_RX_CLK 0x224 0x5f4 0x86c 0x0 0x1
#define MX6QDL_PAD_GPIO_1__WDOG2_B 0x224 0x5f4 0x000 0x1 0x0
#define MX6QDL_PAD_GPIO_1__KEY_ROW5 0x224 0x5f4 0x8f4 0x2 0x0
-#define MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x224 0x5f4 0x000 0x3 0x0
+#define MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x224 0x5f4 0x004 0x3 0xff0d0101
#define MX6QDL_PAD_GPIO_1__PWM2_OUT 0x224 0x5f4 0x000 0x4 0x0
#define MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x224 0x5f4 0x000 0x5 0x0
#define MX6QDL_PAD_GPIO_1__SD1_CD_B 0x224 0x5f4 0x000 0x6 0x0
diff --git a/arch/arm/boot/dts/imx6q-sabrelite.dts b/arch/arm/boot/dts/imx6q-sabrelite.dts
index 3530280f5150..f004913f7d80 100644
--- a/arch/arm/boot/dts/imx6q-sabrelite.dts
+++ b/arch/arm/boot/dts/imx6q-sabrelite.dts
@@ -65,8 +65,10 @@
};
};
-&sata {
+&audmux {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux_1>;
};
&ecspi1 {
@@ -83,11 +85,29 @@
};
};
-&ssi1 {
- fsl,mode = "i2s-slave";
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet_1>;
+ phy-mode = "rgmii";
+ phy-reset-gpios = <&gpio3 23 0>;
status = "okay";
};
+&i2c1 {
+ status = "okay";
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1_1>;
+
+ codec: sgtl5000@0a {
+ compatible = "fsl,sgtl5000";
+ reg = <0x0a>;
+ clocks = <&clks 201>;
+ VDDA-supply = <&reg_2p5v>;
+ VDDIO-supply = <&reg_3p3v>;
+ };
+};
+
&iomuxc {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hog>;
@@ -103,28 +123,61 @@
MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x80000000
MX6QDL_PAD_SD3_DAT4__GPIO7_IO01 0x1f0b0
MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x80000000
+ MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x80000000
>;
};
};
};
-&usbotg {
- vbus-supply = <&reg_usb_otg_vbus>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usbotg_1>;
- disable-over-current;
+&ldb {
+ status = "okay";
+
+ lvds-channel@0 {
+ fsl,data-mapping = "spwg";
+ fsl,data-width = <18>;
+ status = "okay";
+
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: hsd100pxn1 {
+ clock-frequency = <65000000>;
+ hactive = <1024>;
+ vactive = <768>;
+ hback-porch = <220>;
+ hfront-porch = <40>;
+ vback-porch = <21>;
+ vfront-porch = <7>;
+ hsync-len = <60>;
+ vsync-len = <10>;
+ };
+ };
+ };
+};
+
+&sata {
+ status = "okay";
+};
+
+&ssi1 {
+ fsl,mode = "i2s-slave";
status = "okay";
};
+&uart2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2_1>;
+};
+
&usbh1 {
status = "okay";
};
-&fec {
+&usbotg {
+ vbus-supply = <&reg_usb_otg_vbus>;
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_enet_1>;
- phy-mode = "rgmii";
- phy-reset-gpios = <&gpio3 23 0>;
+ pinctrl-0 = <&pinctrl_usbotg_1>;
+ disable-over-current;
status = "okay";
};
@@ -145,30 +198,3 @@
vmmc-supply = <&reg_3p3v>;
status = "okay";
};
-
-&audmux {
- status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_audmux_1>;
-};
-
-&uart2 {
- status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart2_1>;
-};
-
-&i2c1 {
- status = "okay";
- clock-frequency = <100000>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_i2c1_1>;
-
- codec: sgtl5000@0a {
- compatible = "fsl,sgtl5000";
- reg = <0x0a>;
- clocks = <&clks 201>;
- VDDA-supply = <&reg_2p5v>;
- VDDIO-supply = <&reg_3p3v>;
- };
-};
diff --git a/arch/arm/boot/dts/imx6q-udoo.dts b/arch/arm/boot/dts/imx6q-udoo.dts
new file mode 100644
index 000000000000..6e1ccdc019a7
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-udoo.dts
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * Author: Fabio Estevam <fabio.estevam@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/dts-v1/;
+#include "imx6q.dtsi"
+
+/ {
+ model = "Udoo i.MX6 Quad Board";
+ compatible = "udoo,imx6q-udoo", "fsl,imx6q";
+
+ memory {
+ reg = <0x10000000 0x40000000>;
+ };
+};
+
+&sata {
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2_1>;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3_2>;
+ non-removable;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
new file mode 100644
index 000000000000..e9ccfa4dce9d
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2013 Gateworks Corporation
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/ {
+ /* these are used by bootloader for disabling nodes */
+ aliases {
+ can0 = &can1;
+ ethernet0 = &fec;
+ led0 = &led0;
+ led1 = &led1;
+ nand = &gpmi;
+ usb0 = &usbh1;
+ usb1 = &usbotg;
+ };
+
+ chosen {
+ bootargs = "console=ttymxc1,115200";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led0: user1 {
+ label = "user1";
+ gpios = <&gpio4 6 0>; /* 102 -> MX6_PANLEDG */
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+ led1: user2 {
+ label = "user2";
+ gpios = <&gpio4 7 0>; /* 103 -> MX6_PANLEDR */
+ default-state = "off";
+ };
+ };
+
+ memory {
+ reg = <0x10000000 0x20000000>;
+ };
+
+ pps {
+ compatible = "pps-gpio";
+ gpios = <&gpio1 26 0>;
+ status = "okay";
+ };
+
+ regulators {
+ compatible = "simple-bus";
+
+ reg_3p3v: 3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_5p0v: 5p0v {
+ compatible = "regulator-fixed";
+ regulator-name = "5P0V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ reg_usb_otg_vbus: usb_otg_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 22 0>;
+ enable-active-high;
+ };
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet_1>;
+ phy-mode = "rgmii";
+ phy-reset-gpios = <&gpio1 30 0>;
+ status = "okay";
+};
+
+&gpmi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpmi_nand_2>;
+ status = "okay";
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1_1>;
+ status = "okay";
+
+ eeprom1: eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+
+ eeprom2: eeprom@51 {
+ compatible = "atmel,24c02";
+ reg = <0x51>;
+ pagesize = <16>;
+ };
+
+ eeprom3: eeprom@52 {
+ compatible = "atmel,24c02";
+ reg = <0x52>;
+ pagesize = <16>;
+ };
+
+ eeprom4: eeprom@53 {
+ compatible = "atmel,24c02";
+ reg = <0x53>;
+ pagesize = <16>;
+ };
+
+ gpio: pca9555@23 {
+ compatible = "nxp,pca9555";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ hwmon: gsc@29 {
+ compatible = "gw,gsp";
+ reg = <0x29>;
+ };
+
+ rtc: ds1672@68 {
+ compatible = "dallas,ds1672";
+ reg = <0x68>;
+ };
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2_2>;
+ status = "okay";
+
+ pmic: ltc3676@3c {
+ compatible = "ltc,ltc3676";
+ reg = <0x3c>;
+
+ regulators {
+ sw1_reg: ltc3676__sw1 {
+ regulator-min-microvolt = <1175000>;
+ regulator-max-microvolt = <1175000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw2_reg: ltc3676__sw2 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3_reg: ltc3676__sw3 {
+ regulator-min-microvolt = <1175000>;
+ regulator-max-microvolt = <1175000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw4_reg: ltc3676__sw4 {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo2_reg: ltc3676__ldo2 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo4_reg: ltc3676__ldo4 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+ };
+ };
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3_2>;
+ status = "okay";
+
+ videoin: adv7180@20 {
+ compatible = "adi,adv7180";
+ reg = <0x20>;
+ };
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ hog {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_A19__GPIO2_IO19 0x80000000 /* MEZZ_DIO0 */
+ MX6QDL_PAD_EIM_A20__GPIO2_IO18 0x80000000 /* MEZZ_DIO1 */
+ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x80000000 /* OTG_PWR_EN */
+ MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x80000000 /* GPS_PPS */
+ MX6QDL_PAD_ENET_TXD0__GPIO1_IO30 0x80000000 /* PHY Reset */
+ MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x80000000 /* PCIE_RST# */
+ MX6QDL_PAD_KEY_COL0__GPIO4_IO06 0x80000000 /* user1 led */
+ MX6QDL_PAD_KEY_ROW0__GPIO4_IO07 0x80000000 /* user2 led */
+ >;
+ };
+ };
+};
+
+&pcie {
+ reset-gpio = <&gpio1 0 0>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_2>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2_3>;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3_3>;
+ status = "okay";
+};
+
+&uart5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart5_1>;
+ status = "okay";
+};
+
+&usbotg {
+ vbus-supply = <&reg_usb_otg_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg_1>;
+ disable-over-current;
+ status = "okay";
+};
+
+&usbh1 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
new file mode 100644
index 000000000000..164a9448753c
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
@@ -0,0 +1,373 @@
+/*
+ * Copyright 2013 Gateworks Corporation
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/ {
+ /* these are used by bootloader for disabling nodes */
+ aliases {
+ ethernet0 = &fec;
+ led0 = &led0;
+ led1 = &led1;
+ led2 = &led2;
+ nand = &gpmi;
+ ssi0 = &ssi1;
+ usb0 = &usbh1;
+ usb1 = &usbotg;
+ usdhc2 = &usdhc3;
+ };
+
+ chosen {
+ bootargs = "console=ttymxc1,115200";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led0: user1 {
+ label = "user1";
+ gpios = <&gpio4 6 0>; /* 102 -> MX6_PANLEDG */
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+ led1: user2 {
+ label = "user2";
+ gpios = <&gpio4 7 0>; /* 103 -> MX6_PANLEDR */
+ default-state = "off";
+ };
+
+ led2: user3 {
+ label = "user3";
+ gpios = <&gpio4 15 1>; /* 111 - MX6_LOCLED# */
+ default-state = "off";
+ };
+ };
+
+ memory {
+ reg = <0x10000000 0x20000000>;
+ };
+
+ pps {
+ compatible = "pps-gpio";
+ gpios = <&gpio1 26 0>;
+ status = "okay";
+ };
+
+ regulators {
+ compatible = "simple-bus";
+
+ reg_1p0v: 1p0v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P0V";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ };
+
+ /* remove this fixed regulator once ltc3676__sw2 driver available */
+ reg_1p8v: 1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ reg_3p3v: 3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_5p0v: 5p0v {
+ compatible = "regulator-fixed";
+ regulator-name = "5P0V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ reg_usb_otg_vbus: usb_otg_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 22 0>;
+ enable-active-high;
+ };
+ };
+
+ sound {
+ compatible = "fsl,imx6q-sabrelite-sgtl5000",
+ "fsl,imx-audio-sgtl5000";
+ model = "imx6q-sabrelite-sgtl5000";
+ ssi-controller = <&ssi1>;
+ audio-codec = <&codec>;
+ audio-routing =
+ "MIC_IN", "Mic Jack",
+ "Mic Jack", "Mic Bias",
+ "Headphone Jack", "HP_OUT";
+ mux-int-port = <1>;
+ mux-ext-port = <4>;
+ };
+};
+
+&audmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux_1>;
+ status = "okay";
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet_1>;
+ phy-mode = "rgmii";
+ phy-reset-gpios = <&gpio1 30 0>;
+ status = "okay";
+};
+
+&gpmi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpmi_nand_2>;
+ status = "okay";
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1_1>;
+ status = "okay";
+
+ eeprom1: eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+
+ eeprom2: eeprom@51 {
+ compatible = "atmel,24c02";
+ reg = <0x51>;
+ pagesize = <16>;
+ };
+
+ eeprom3: eeprom@52 {
+ compatible = "atmel,24c02";
+ reg = <0x52>;
+ pagesize = <16>;
+ };
+
+ eeprom4: eeprom@53 {
+ compatible = "atmel,24c02";
+ reg = <0x53>;
+ pagesize = <16>;
+ };
+
+ gpio: pca9555@23 {
+ compatible = "nxp,pca9555";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ hwmon: gsc@29 {
+ compatible = "gw,gsp";
+ reg = <0x29>;
+ };
+
+ rtc: ds1672@68 {
+ compatible = "dallas,ds1672";
+ reg = <0x68>;
+ };
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2_2>;
+ status = "okay";
+
+ pciswitch: pex8609@3f {
+ compatible = "plx,pex8609";
+ reg = <0x3f>;
+ };
+
+ pmic: ltc3676@3c {
+ compatible = "ltc,ltc3676";
+ reg = <0x3c>;
+
+ regulators {
+ sw1_reg: ltc3676__sw1 {
+ regulator-min-microvolt = <1175000>;
+ regulator-max-microvolt = <1175000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw2_reg: ltc3676__sw2 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3_reg: ltc3676__sw3 {
+ regulator-min-microvolt = <1175000>;
+ regulator-max-microvolt = <1175000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw4_reg: ltc3676__sw4 {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo2_reg: ltc3676__ldo2 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo3_reg: ltc3676__ldo3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo4_reg: ltc3676__ldo4 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+ };
+ };
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3_2>;
+ status = "okay";
+
+ accelerometer: fxos8700@1e {
+ compatible = "fsl,fxos8700";
+ reg = <0x13>;
+ };
+
+ codec: sgtl5000@0a {
+ compatible = "fsl,sgtl5000";
+ reg = <0x0a>;
+ clocks = <&clks 169>;
+ VDDA-supply = <&reg_1p8v>;
+ VDDIO-supply = <&reg_3p3v>;
+ };
+
+ touchscreen: egalax_ts@04 {
+ compatible = "eeti,egalax_ts";
+ reg = <0x04>;
+ interrupt-parent = <&gpio7>;
+ interrupts = <12 2>; /* gpio7_12 active low */
+ wakeup-gpios = <&gpio7 12 0>;
+ };
+
+ videoin: adv7180@20 {
+ compatible = "adi,adv7180";
+ reg = <0x20>;
+ };
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ hog {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_A19__GPIO2_IO19 0x80000000 /* MEZZ_DIO0 */
+ MX6QDL_PAD_EIM_A20__GPIO2_IO18 0x80000000 /* MEZZ_DIO1 */
+ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x80000000 /* OTG_PWR_EN */
+ MX6QDL_PAD_EIM_D31__GPIO3_IO31 0x80000000 /* VIDDEC_PDN# */
+ MX6QDL_PAD_ENET_TXD0__GPIO1_IO30 0x80000000 /* PHY Reset */
+ MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x80000000 /* PCIE_RST# */
+ MX6QDL_PAD_ENET_RXD0__GPIO1_IO27 0x80000000 /* GPS_PWDN */
+ MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x80000000 /* GPS_PPS */
+ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x000130b0 /* AUD4_MCK */
+ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x80000000 /* USB_SEL_PCI */
+ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x80000000 /* TOUCH_IRQ# */
+ MX6QDL_PAD_KEY_COL0__GPIO4_IO06 0x80000000 /* user1 led */
+ MX6QDL_PAD_KEY_ROW0__GPIO4_IO07 0x80000000 /* user2 led */
+ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x80000000 /* user3 led */
+ MX6QDL_PAD_SD2_CMD__GPIO1_IO11 0x80000000 /* LVDS_TCH# */
+ MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x80000000 /* SD3_CD# */
+ MX6QDL_PAD_SD4_DAT3__GPIO2_IO11 0x80000000 /* UART2_EN# */
+ >;
+ };
+ };
+};
+
+&ldb {
+ status = "okay";
+ lvds-channel@0 {
+ crtcs = <&ipu1 0>, <&ipu1 1>;
+ };
+};
+
+&pcie {
+ reset-gpio = <&gpio1 29 0>;
+ status = "okay";
+};
+
+&ssi1 {
+ fsl,mode = "i2s-slave";
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_2>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2_3>;
+ status = "okay";
+};
+
+&uart5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart5_1>;
+ status = "okay";
+};
+
+&usbotg {
+ vbus-supply = <&reg_usb_otg_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg_1>;
+ disable-over-current;
+ status = "okay";
+};
+
+&usbh1 {
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3_2>;
+ cd-gpios = <&gpio7 0 0>;
+ vmmc-supply = <&reg_3p3v>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
new file mode 100644
index 000000000000..506338dcea11
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
@@ -0,0 +1,429 @@
+/*
+ * Copyright 2013 Gateworks Corporation
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/ {
+ /* these are used by bootloader for disabling nodes */
+ aliases {
+ can0 = &can1;
+ ethernet0 = &fec;
+ ethernet1 = &eth1;
+ led0 = &led0;
+ led1 = &led1;
+ led2 = &led2;
+ nand = &gpmi;
+ sky2 = &eth1;
+ ssi0 = &ssi1;
+ usb0 = &usbh1;
+ usb1 = &usbotg;
+ usdhc2 = &usdhc3;
+ };
+
+ chosen {
+ bootargs = "console=ttymxc1,115200";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led0: user1 {
+ label = "user1";
+ gpios = <&gpio4 6 0>; /* 102 -> MX6_PANLEDG */
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+ led1: user2 {
+ label = "user2";
+ gpios = <&gpio4 7 0>; /* 103 -> MX6_PANLEDR */
+ default-state = "off";
+ };
+
+ led2: user3 {
+ label = "user3";
+ gpios = <&gpio4 15 1>; /* 111 -> MX6_LOCLED# */
+ default-state = "off";
+ };
+ };
+
+ memory {
+ reg = <0x10000000 0x40000000>;
+ };
+
+ pps {
+ compatible = "pps-gpio";
+ gpios = <&gpio1 26 0>;
+ status = "okay";
+ };
+
+ regulators {
+ compatible = "simple-bus";
+
+ reg_1p0v: 1p0v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P0V";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ };
+
+ /* remove when pmic 1p8 regulator available */
+ reg_1p8v: 1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ reg_3p3v: 3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_usb_h1_vbus: usb_h1_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_h1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ reg_usb_otg_vbus: usb_otg_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 22 0>;
+ enable-active-high;
+ };
+ };
+
+ sound {
+ compatible = "fsl,imx6q-sabrelite-sgtl5000",
+ "fsl,imx-audio-sgtl5000";
+ model = "imx6q-sabrelite-sgtl5000";
+ ssi-controller = <&ssi1>;
+ audio-codec = <&codec>;
+ audio-routing =
+ "MIC_IN", "Mic Jack",
+ "Mic Jack", "Mic Bias",
+ "Headphone Jack", "HP_OUT";
+ mux-int-port = <1>;
+ mux-ext-port = <4>;
+ };
+};
+
+&audmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux_1>;
+ status = "okay";
+};
+
+&can1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1_1>;
+ status = "okay";
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet_1>;
+ phy-mode = "rgmii";
+ phy-reset-gpios = <&gpio1 30 0>;
+ status = "okay";
+};
+
+&gpmi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpmi_nand_2>;
+ status = "okay";
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1_1>;
+ status = "okay";
+
+ eeprom1: eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+
+ eeprom2: eeprom@51 {
+ compatible = "atmel,24c02";
+ reg = <0x51>;
+ pagesize = <16>;
+ };
+
+ eeprom3: eeprom@52 {
+ compatible = "atmel,24c02";
+ reg = <0x52>;
+ pagesize = <16>;
+ };
+
+ eeprom4: eeprom@53 {
+ compatible = "atmel,24c02";
+ reg = <0x53>;
+ pagesize = <16>;
+ };
+
+ gpio: pca9555@23 {
+ compatible = "nxp,pca9555";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ hwmon: gsc@29 {
+ compatible = "gw,gsp";
+ reg = <0x29>;
+ };
+
+ rtc: ds1672@68 {
+ compatible = "dallas,ds1672";
+ reg = <0x68>;
+ };
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2_2>;
+ status = "okay";
+
+ pciclkgen: si53156@6b {
+ compatible = "sil,si53156";
+ reg = <0x6b>;
+ };
+
+ pciswitch: pex8606@3f {
+ compatible = "plx,pex8606";
+ reg = <0x3f>;
+ };
+
+ pmic: ltc3676@3c {
+ compatible = "ltc,ltc3676";
+ reg = <0x3c>;
+
+ regulators {
+ /* VDD_SOC */
+ sw1_reg: ltc3676__sw1 {
+ regulator-min-microvolt = <1175000>;
+ regulator-max-microvolt = <1175000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* VDD_1P8 */
+ sw2_reg: ltc3676__sw2 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* VDD_ARM */
+ sw3_reg: ltc3676__sw3 {
+ regulator-min-microvolt = <1175000>;
+ regulator-max-microvolt = <1175000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* VDD_DDR */
+ sw4_reg: ltc3676__sw4 {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* VDD_2P5 */
+ ldo2_reg: ltc3676__ldo2 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* VDD_1P8 */
+ ldo3_reg: ltc3676__ldo3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* VDD_HIGH */
+ ldo4_reg: ltc3676__ldo4 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+ };
+ };
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3_2>;
+ status = "okay";
+
+ accelerometer: fxos8700@1e {
+ compatible = "fsl,fxos8700";
+ reg = <0x1e>;
+ };
+
+ codec: sgtl5000@0a {
+ compatible = "fsl,sgtl5000";
+ reg = <0x0a>;
+ clocks = <&clks 201>;
+ VDDA-supply = <&reg_1p8v>;
+ VDDIO-supply = <&reg_3p3v>;
+ };
+
+ hdmiin: adv7611@4c {
+ compatible = "adi,adv7611";
+ reg = <0x4c>;
+ };
+
+ touchscreen: egalax_ts@04 {
+ compatible = "eeti,egalax_ts";
+ reg = <0x04>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <11 2>; /* gpio1_11 active low */
+ wakeup-gpios = <&gpio1 11 0>;
+ };
+
+ videoout: adv7393@2a {
+ compatible = "adi,adv7393";
+ reg = <0x2a>;
+ };
+
+ videoin: adv7180@20 {
+ compatible = "adi,adv7180";
+ reg = <0x20>;
+ };
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ hog {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_A19__GPIO2_IO19 0x80000000 /* PCIE6EXP_DIO0 */
+ MX6QDL_PAD_EIM_A20__GPIO2_IO18 0x80000000 /* PCIE6EXP_DIO1 */
+ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x80000000 /* OTG_PWR_EN */
+ MX6QDL_PAD_ENET_RXD0__GPIO1_IO27 0x80000000 /* GPS_SHDN */
+ MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x80000000 /* GPS_PPS */
+ MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28 0x80000000 /* PCIE IRQ */
+ MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x80000000 /* PCIE RST */
+ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x000130b0 /* AUD4_MCK */
+ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x80000000 /* CAN_STBY */
+ MX6QDL_PAD_GPIO_8__GPIO1_IO08 0x80000000 /* PMIC_IRQ# */
+ MX6QDL_PAD_GPIO_9__GPIO1_IO09 0x80000000 /* HUB_RST# */
+ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x80000000 /* PCIE_WDIS# */
+ MX6QDL_PAD_GPIO_19__GPIO4_IO05 0x80000000 /* ACCEL_IRQ# */
+ MX6QDL_PAD_KEY_COL0__GPIO4_IO06 0x80000000 /* user1 led */
+ MX6QDL_PAD_KEY_COL4__GPIO4_IO14 0x80000000 /* USBOTG_OC# */
+ MX6QDL_PAD_KEY_ROW0__GPIO4_IO07 0x80000000 /* user2 led */
+ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x80000000 /* user3 led */
+ MX6QDL_PAD_SD2_CMD__GPIO1_IO11 0x80000000 /* TOUCH_IRQ# */
+ MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x80000000 /* SD3_DET# */
+ >;
+ };
+ };
+};
+
+&ldb {
+ status = "okay";
+
+ lvds-channel@1 {
+ fsl,data-mapping = "spwg";
+ fsl,data-width = <18>;
+ status = "okay";
+
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: hsd100pxn1 {
+ clock-frequency = <65000000>;
+ hactive = <1024>;
+ vactive = <768>;
+ hback-porch = <220>;
+ hfront-porch = <40>;
+ vback-porch = <21>;
+ vfront-porch = <7>;
+ hsync-len = <60>;
+ vsync-len = <10>;
+ };
+ };
+ };
+};
+
+&pcie {
+ reset-gpio = <&gpio1 29 0>;
+ status = "okay";
+
+ eth1: sky2@8 { /* MAC/PHY on bus 8 */
+ compatible = "marvell,sky2";
+ };
+};
+
+&ssi1 {
+ fsl,mode = "i2s-slave";
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_2>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2_3>;
+ status = "okay";
+};
+
+&uart5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart5_1>;
+ status = "okay";
+};
+
+&usbotg {
+ vbus-supply = <&reg_usb_otg_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg_1>;
+ disable-over-current;
+ status = "okay";
+};
+
+&usbh1 {
+ vbus-supply = <&reg_usb_h1_vbus>;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3_2>;
+ cd-gpios = <&gpio7 0 0>;
+ vmmc-supply = <&reg_3p3v>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
new file mode 100644
index 000000000000..2a67aa05ba00
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
@@ -0,0 +1,457 @@
+/*
+ * Copyright 2013 Gateworks Corporation
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/ {
+ /* these are used by bootloader for disabling nodes */
+ aliases {
+ can0 = &can1;
+ ethernet0 = &fec;
+ ethernet1 = &eth1;
+ led0 = &led0;
+ led1 = &led1;
+ led2 = &led2;
+ nand = &gpmi;
+ sky2 = &eth1;
+ ssi0 = &ssi1;
+ usb0 = &usbh1;
+ usb1 = &usbotg;
+ usdhc2 = &usdhc3;
+ };
+
+ chosen {
+ bootargs = "console=ttymxc1,115200";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led0: user1 {
+ label = "user1";
+ gpios = <&gpio4 6 0>; /* 102 -> MX6_PANLEDG */
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+ led1: user2 {
+ label = "user2";
+ gpios = <&gpio4 7 0>; /* 103 -> MX6_PANLEDR */
+ default-state = "off";
+ };
+
+ led2: user3 {
+ label = "user3";
+ gpios = <&gpio4 15 1>; /* 111 -> MX6_LOCLED# */
+ default-state = "off";
+ };
+ };
+
+ memory {
+ reg = <0x10000000 0x40000000>;
+ };
+
+ pps {
+ compatible = "pps-gpio";
+ gpios = <&gpio1 26 0>;
+ status = "okay";
+ };
+
+ regulators {
+ compatible = "simple-bus";
+
+ reg_1p0v: 1p0v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P0V";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ };
+
+ reg_3p3v: 3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_usb_h1_vbus: usb_h1_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_h1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ reg_usb_otg_vbus: usb_otg_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 22 0>;
+ enable-active-high;
+ };
+ };
+
+ sound {
+ compatible = "fsl,imx6q-sabrelite-sgtl5000",
+ "fsl,imx-audio-sgtl5000";
+ model = "imx6q-sabrelite-sgtl5000";
+ ssi-controller = <&ssi1>;
+ audio-codec = <&codec>;
+ audio-routing =
+ "MIC_IN", "Mic Jack",
+ "Mic Jack", "Mic Bias",
+ "Headphone Jack", "HP_OUT";
+ mux-int-port = <1>;
+ mux-ext-port = <4>;
+ };
+};
+
+&audmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux_1>; /* AUD4<->sgtl5000 */
+ status = "okay";
+};
+
+&can1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1_1>;
+ status = "okay";
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet_1>;
+ phy-mode = "rgmii";
+ phy-reset-gpios = <&gpio1 30 0>;
+ status = "okay";
+};
+
+&gpmi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpmi_nand_2>;
+ status = "okay";
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1_1>;
+ status = "okay";
+
+ eeprom1: eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+
+ eeprom2: eeprom@51 {
+ compatible = "atmel,24c02";
+ reg = <0x51>;
+ pagesize = <16>;
+ };
+
+ eeprom3: eeprom@52 {
+ compatible = "atmel,24c02";
+ reg = <0x52>;
+ pagesize = <16>;
+ };
+
+ eeprom4: eeprom@53 {
+ compatible = "atmel,24c02";
+ reg = <0x53>;
+ pagesize = <16>;
+ };
+
+ gpio: pca9555@23 {
+ compatible = "nxp,pca9555";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ hwmon: gsc@29 {
+ compatible = "gw,gsp";
+ reg = <0x29>;
+ };
+
+ rtc: ds1672@68 {
+ compatible = "dallas,ds1672";
+ reg = <0x68>;
+ };
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2_2>;
+ status = "okay";
+
+ pmic: pfuze100@08 {
+ compatible = "fsl,pfuze100";
+ reg = <0x08>;
+
+ regulators {
+ sw1a_reg: sw1ab {
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1875000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw1c_reg: sw1c {
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1875000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw2_reg: sw2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3950000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3a_reg: sw3a {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1975000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3b_reg: sw3b {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1975000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw4_reg: sw4 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ swbst_reg: swbst {
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5150000>;
+ };
+
+ snvs_reg: vsnvs {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vref_reg: vrefddr {
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vgen1_reg: vgen1 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1550000>;
+ };
+
+ vgen2_reg: vgen2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1550000>;
+ };
+
+ vgen3_reg: vgen3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vgen4_reg: vgen4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen5_reg: vgen5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen6_reg: vgen6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ };
+ };
+
+ pciswitch: pex8609@3f {
+ compatible = "plx,pex8609";
+ reg = <0x3f>;
+ };
+
+ pciclkgen: si52147@6b {
+ compatible = "sil,si52147";
+ reg = <0x6b>;
+ };
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3_2>;
+ status = "okay";
+
+ accelerometer: fxos8700@1e {
+ compatible = "fsl,fxos8700";
+ reg = <0x1e>;
+ };
+
+ codec: sgtl5000@0a {
+ compatible = "fsl,sgtl5000";
+ reg = <0x0a>;
+ clocks = <&clks 201>;
+ VDDA-supply = <&sw4_reg>;
+ VDDIO-supply = <&reg_3p3v>;
+ };
+
+ hdmiin: adv7611@4c {
+ compatible = "adi,adv7611";
+ reg = <0x4c>;
+ };
+
+ touchscreen: egalax_ts@04 {
+ compatible = "eeti,egalax_ts";
+ reg = <0x04>;
+ interrupt-parent = <&gpio7>;
+ interrupts = <12 2>; /* gpio7_12 active low */
+ wakeup-gpios = <&gpio7 12 0>;
+ };
+
+ videoout: adv7393@2a {
+ compatible = "adi,adv7393";
+ reg = <0x2a>;
+ };
+
+ videoin: adv7180@20 {
+ compatible = "adi,adv7180";
+ reg = <0x20>;
+ };
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ hog {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x80000000 /* OTG_PWR_EN */
+ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x80000000 /* SPINOR_CS0# */
+ MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x80000000 /* GPS_PPS */
+ MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28 0x80000000 /* PCIE IRQ */
+ MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x80000000 /* PCIE RST */
+ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x000130b0 /* AUD4_MCK */
+ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x80000000 /* CAN_STBY */
+ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x80000000 /* TOUCH_IRQ# */
+ MX6QDL_PAD_KEY_COL0__GPIO4_IO06 0x80000000 /* user1 led */
+ MX6QDL_PAD_KEY_ROW0__GPIO4_IO07 0x80000000 /* user2 led */
+ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x80000000 /* user3 led */
+ MX6QDL_PAD_SD1_DAT0__GPIO1_IO16 0x80000000 /* USBHUB_RST# */
+ MX6QDL_PAD_SD1_DAT3__GPIO1_IO21 0x80000000 /* MIPI_DIO */
+ >;
+ };
+ };
+};
+
+&ldb {
+ status = "okay";
+
+ lvds-channel@1 {
+ fsl,data-mapping = "spwg";
+ fsl,data-width = <18>;
+ status = "okay";
+
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: hsd100pxn1 {
+ clock-frequency = <65000000>;
+ hactive = <1024>;
+ vactive = <768>;
+ hback-porch = <220>;
+ hfront-porch = <40>;
+ vback-porch = <21>;
+ vfront-porch = <7>;
+ hsync-len = <60>;
+ vsync-len = <10>;
+ };
+ };
+ };
+};
+
+&pcie {
+ reset-gpio = <&gpio1 29 0>;
+ status = "okay";
+
+ eth1: sky2@8 { /* MAC/PHY on bus 8 */
+ compatible = "marvell,sky2";
+ };
+};
+
+&ssi1 {
+ fsl,mode = "i2s-slave";
+ status = "okay";
+};
+
+&ssi2 {
+ fsl,mode = "i2s-slave";
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_2>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2_3>;
+ status = "okay";
+};
+
+&uart5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart5_1>;
+ status = "okay";
+};
+
+&usbotg {
+ vbus-supply = <&reg_usb_otg_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg_1>;
+ disable-over-current;
+ status = "okay";
+};
+
+&usbh1 {
+ vbus-supply = <&reg_usb_h1_vbus>;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3_2>;
+ cd-gpios = <&gpio7 0 0>;
+ vmmc-supply = <&reg_3p3v>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
index 1cbbc5160d27..ff6f1e8f2dd9 100644
--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
@@ -54,6 +54,7 @@
fsl,pins = <
MX6QDL_PAD_NANDF_CS2__GPIO6_IO15 0x80000000
MX6QDL_PAD_SD2_DAT2__GPIO1_IO13 0x80000000
+ MX6QDL_PAD_GPIO_18__SD3_VSELECT 0x17059
>;
};
};
@@ -74,8 +75,10 @@
};
&usdhc3 {
- pinctrl-names = "default";
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
pinctrl-0 = <&pinctrl_usdhc3_1>;
+ pinctrl-1 = <&pinctrl_usdhc3_1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_1_200mhz>;
cd-gpios = <&gpio6 15 0>;
wp-gpios = <&gpio1 13 0>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
index 39eafc222a2e..e75e11b36dff 100644
--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
@@ -80,6 +80,14 @@
mux-int-port = <2>;
mux-ext-port = <3>;
};
+
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm1 0 5000000>;
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <7>;
+ status = "okay";
+ };
};
&audmux {
@@ -108,6 +116,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet_1>;
phy-mode = "rgmii";
+ phy-reset-gpios = <&gpio1 25 0>;
status = "okay";
};
@@ -172,6 +181,7 @@
MX6QDL_PAD_NANDF_CLE__GPIO6_IO07 0x80000000
MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x80000000
MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x80000000
+ MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x80000000
>;
};
};
@@ -202,6 +212,12 @@
};
};
+&pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm0_1>;
+ status = "okay";
+};
+
&ssi2 {
fsl,mode = "i2s-slave";
status = "okay";
@@ -229,6 +245,7 @@
&usdhc2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc2_1>;
+ bus-width = <8>;
cd-gpios = <&gpio2 2 0>;
wp-gpios = <&gpio2 3 0>;
status = "okay";
@@ -237,6 +254,7 @@
&usdhc3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc3_1>;
+ bus-width = <8>;
cd-gpios = <&gpio2 0 0>;
wp-gpios = <&gpio2 1 0>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
index a55113e65bcb..35f547929167 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
@@ -43,6 +43,13 @@
mux-int-port = <1>;
mux-ext-port = <3>;
};
+
+ sound-spdif {
+ compatible = "fsl,imx-audio-spdif";
+ model = "imx-spdif";
+ spdif-controller = <&spdif>;
+ spdif-out;
+ };
};
&audmux {
@@ -81,6 +88,7 @@
MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x80000000 /* WL_REG_ON */
MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x80000000 /* WL_HOST_WAKE */
MX6QDL_PAD_ENET_TXD0__GPIO1_IO30 0x80000000 /* WL_WAKE */
+ MX6QDL_PAD_EIM_D29__GPIO3_IO29 0x80000000
>;
};
};
@@ -90,6 +98,13 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet_1>;
phy-mode = "rgmii";
+ phy-reset-gpios = <&gpio3 29 0>;
+ status = "okay";
+};
+
+&spdif {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spdif_3>;
status = "okay";
};
@@ -115,6 +130,14 @@
status = "okay";
};
+&usbotg {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg_1>;
+ disable-over-current;
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
&usdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc1_2>;
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index ccd55c2fdb67..226ce75c87a8 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -116,6 +116,22 @@
arm,data-latency = <4 2 3>;
};
+ pcie: pcie@0x01000000 {
+ compatible = "fsl,imx6q-pcie", "snps,dw-pcie";
+ reg = <0x01ffc000 0x4000>; /* DBI */
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ ranges = <0x00000800 0 0x01f00000 0x01f00000 0 0x00080000 /* configuration space */
+ 0x81000000 0 0 0x01f80000 0 0x00010000 /* downstream I/O */
+ 0x82000000 0 0x01000000 0x01000000 0 0x00f00000>; /* non-prefetchable memory */
+ num-lanes = <1>;
+ interrupts = <0 123 0x04>;
+ clocks = <&clks 189>, <&clks 187>, <&clks 206>, <&clks 144>;
+ clock-names = "pcie_ref_125m", "sata_ref_100m", "lvds_gate", "pcie_axi";
+ status = "disabled";
+ };
+
pmu {
compatible = "arm,cortex-a9-pmu";
interrupts = <0 94 0x04>;
@@ -136,8 +152,23 @@
ranges;
spdif: spdif@02004000 {
+ compatible = "fsl,imx35-spdif";
reg = <0x02004000 0x4000>;
interrupts = <0 52 0x04>;
+ dmas = <&sdma 14 18 0>,
+ <&sdma 15 18 0>;
+ dma-names = "rx", "tx";
+ clocks = <&clks 197>, <&clks 3>,
+ <&clks 197>, <&clks 107>,
+ <&clks 0>, <&clks 118>,
+ <&clks 62>, <&clks 139>,
+ <&clks 0>;
+ clock-names = "core", "rxtx0",
+ "rxtx1", "rxtx2",
+ "rxtx3", "rxtx4",
+ "rxtx5", "rxtx6",
+ "rxtx7";
+ status = "disabled";
};
ecspi1: ecspi@02008000 {
@@ -300,6 +331,7 @@
interrupts = <0 110 0x04>;
clocks = <&clks 108>, <&clks 109>;
clock-names = "ipg", "per";
+ status = "disabled";
};
can2: flexcan@02094000 {
@@ -308,6 +340,7 @@
interrupts = <0 111 0x04>;
clocks = <&clks 110>, <&clks 111>;
clock-names = "ipg", "per";
+ status = "disabled";
};
gpt: gpt@02098000 {
@@ -606,6 +639,14 @@
MX6QDL_PAD_DISP0_DAT19__AUD5_RXD 0x80000000
>;
};
+
+ pinctrl_audmux_4: audmux-4 {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D24__AUD5_RXFS 0x80000000
+ MX6QDL_PAD_EIM_D25__AUD5_RXC 0x80000000
+ MX6QDL_PAD_DISP0_DAT19__AUD5_RXD 0x80000000
+ >;
+ };
};
ecspi1 {
@@ -778,6 +819,28 @@
MX6QDL_PAD_SD4_DAT0__NAND_DQS 0x00b1
>;
};
+
+ /* No Strobe */
+ pinctrl_gpmi_nand_2: gpmi-nand-2 {
+ fsl,pins = <
+ MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
+ MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
+ MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
+ MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
+ MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
+ MX6QDL_PAD_NANDF_CS1__NAND_CE1_B 0xb0b1
+ MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
+ MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
+ MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
+ MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
+ MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
+ MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
+ MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
+ MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
+ MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
+ MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
+ >;
+ };
};
hdmi_hdcp {
@@ -1010,6 +1073,12 @@
MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0
>;
};
+
+ pinctrl_spdif_3: spdifgrp-3 {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_RXD0__SPDIF_OUT 0x1b0b0
+ >;
+ };
};
uart1 {
@@ -1019,6 +1088,13 @@
MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
>;
};
+
+ pinctrl_uart1_2: uart1grp-2 {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
+ MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
+ >;
+ };
};
uart2 {
@@ -1037,6 +1113,13 @@
MX6QDL_PAD_EIM_D29__UART2_DTE_RTS_B 0x1b0b1
>;
};
+
+ pinctrl_uart2_3: uart2grp-3 {
+ fsl,pins = <
+ MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA 0x1b0b1
+ MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA 0x1b0b1
+ >;
+ };
};
uart3 {
@@ -1057,6 +1140,13 @@
MX6QDL_PAD_EIM_EB3__UART3_RTS_B 0x1b0b1
>;
};
+
+ pinctrl_uart3_3: uart3grp-3 {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1
+ >;
+ };
};
uart4 {
@@ -1068,6 +1158,15 @@
};
};
+ uart5 {
+ pinctrl_uart5_1: uart5grp-1 {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL1__UART5_TX_DATA 0x1b0b1
+ MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA 0x1b0b1
+ >;
+ };
+ };
+
usbotg {
pinctrl_usbotg_1: usbotggrp-1 {
fsl,pins = <
@@ -1184,6 +1283,36 @@
>;
};
+ pinctrl_usdhc3_1_100mhz: usdhc3grp-1-100mhz { /* 100Mhz */
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170b9
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170b9
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170b9
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170b9
+ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x170b9
+ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x170b9
+ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x170b9
+ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x170b9
+ >;
+ };
+
+ pinctrl_usdhc3_1_200mhz: usdhc3grp-1-200mhz { /* 200Mhz */
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170f9
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170f9
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170f9
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170f9
+ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x170f9
+ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x170f9
+ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x170f9
+ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x170f9
+ >;
+ };
+
pinctrl_usdhc3_2: usdhc3grp-2 {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts
index 2886a590823d..cc68e19c5163 100644
--- a/arch/arm/boot/dts/imx6sl-evk.dts
+++ b/arch/arm/boot/dts/imx6sl-evk.dts
@@ -17,6 +17,44 @@
memory {
reg = <0x80000000 0x40000000>;
};
+
+ regulators {
+ compatible = "simple-bus";
+
+ reg_usb_otg1_vbus: usb_otg1_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio4 0 0>;
+ enable-active-high;
+ };
+
+ reg_usb_otg2_vbus: usb_otg2_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg2_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio4 2 0>;
+ enable-active-high;
+ };
+ };
+};
+
+&ecspi1 {
+ fsl,spi-num-chipselects = <1>;
+ cs-gpios = <&gpio4 11 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1_1>;
+ status = "okay";
+
+ flash: m25p80@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "st,m25p32";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ };
};
&fec {
@@ -38,6 +76,8 @@
MX6SL_PAD_SD2_DAT7__GPIO5_IO00 0x17059
MX6SL_PAD_SD2_DAT6__GPIO4_IO29 0x17059
MX6SL_PAD_REF_CLK_32K__GPIO3_IO22 0x17059
+ MX6SL_PAD_KEY_COL4__GPIO4_IO00 0x80000000
+ MX6SL_PAD_KEY_COL5__GPIO4_IO02 0x80000000
>;
};
};
@@ -49,9 +89,26 @@
status = "okay";
};
-&usdhc1 {
+&usbotg1 {
+ vbus-supply = <&reg_usb_otg1_vbus>;
pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg1_1>;
+ disable-over-current;
+ status = "okay";
+};
+
+&usbotg2 {
+ vbus-supply = <&reg_usb_otg2_vbus>;
+ dr_mode = "host";
+ disable-over-current;
+ status = "okay";
+};
+
+&usdhc1 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
pinctrl-0 = <&pinctrl_usdhc1_1>;
+ pinctrl-1 = <&pinctrl_usdhc1_1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc1_1_200mhz>;
bus-width = <8>;
cd-gpios = <&gpio4 7 0>;
wp-gpios = <&gpio4 6 0>;
@@ -59,16 +116,20 @@
};
&usdhc2 {
- pinctrl-names = "default";
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
pinctrl-0 = <&pinctrl_usdhc2_1>;
+ pinctrl-1 = <&pinctrl_usdhc2_1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc2_1_200mhz>;
cd-gpios = <&gpio5 0 0>;
wp-gpios = <&gpio4 29 0>;
status = "okay";
};
&usdhc3 {
- pinctrl-names = "default";
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
pinctrl-0 = <&pinctrl_usdhc3_1>;
+ pinctrl-1 = <&pinctrl_usdhc3_1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_1_200mhz>;
cd-gpios = <&gpio3 22 0>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
index c46651e4d966..28558f1aaf2d 100644
--- a/arch/arm/boot/dts/imx6sl.dtsi
+++ b/arch/arm/boot/dts/imx6sl.dtsi
@@ -13,16 +13,20 @@
/ {
aliases {
- serial0 = &uart1;
- serial1 = &uart2;
- serial2 = &uart3;
- serial3 = &uart4;
- serial4 = &uart5;
gpio0 = &gpio1;
gpio1 = &gpio2;
gpio2 = &gpio3;
gpio3 = &gpio4;
gpio4 = &gpio5;
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ serial3 = &uart4;
+ serial4 = &uart5;
+ spi0 = &ecspi1;
+ spi1 = &ecspi2;
+ spi2 = &ecspi3;
+ spi3 = &ecspi4;
};
cpus {
@@ -380,7 +384,9 @@
};
anatop: anatop@020c8000 {
- compatible = "fsl,imx6sl-anatop", "syscon", "simple-bus";
+ compatible = "fsl,imx6sl-anatop",
+ "fsl,imx6q-anatop",
+ "syscon", "simple-bus";
reg = <0x020c8000 0x1000>;
interrupts = <0 49 0x04 0 54 0x04 0 127 0x04>;
@@ -528,10 +534,26 @@
interrupts = <0 89 0x04>;
};
+ gpr: iomuxc-gpr@020e0000 {
+ compatible = "fsl,imx6sl-iomuxc-gpr",
+ "fsl,imx6q-iomuxc-gpr", "syscon";
+ reg = <0x020e0000 0x38>;
+ };
+
iomuxc: iomuxc@020e0000 {
compatible = "fsl,imx6sl-iomuxc";
reg = <0x020e0000 0x4000>;
+ ecspi1 {
+ pinctrl_ecspi1_1: ecspi1grp-1 {
+ fsl,pins = <
+ MX6SL_PAD_ECSPI1_MISO__ECSPI1_MISO 0x100b1
+ MX6SL_PAD_ECSPI1_MOSI__ECSPI1_MOSI 0x100b1
+ MX6SL_PAD_ECSPI1_SCLK__ECSPI1_SCLK 0x100b1
+ >;
+ };
+ };
+
fec {
pinctrl_fec_1: fecgrp-1 {
fsl,pins = <
@@ -557,6 +579,64 @@
};
};
+ usbotg1 {
+ pinctrl_usbotg1_1: usbotg1grp-1 {
+ fsl,pins = <
+ MX6SL_PAD_EPDC_PWRCOM__USB_OTG1_ID 0x17059
+ >;
+ };
+
+ pinctrl_usbotg1_2: usbotg1grp-2 {
+ fsl,pins = <
+ MX6SL_PAD_FEC_RXD0__USB_OTG1_ID 0x17059
+ >;
+ };
+
+ pinctrl_usbotg1_3: usbotg1grp-3 {
+ fsl,pins = <
+ MX6SL_PAD_LCD_DAT1__USB_OTG1_ID 0x17059
+ >;
+ };
+
+ pinctrl_usbotg1_4: usbotg1grp-4 {
+ fsl,pins = <
+ MX6SL_PAD_REF_CLK_32K__USB_OTG1_ID 0x17059
+ >;
+ };
+
+ pinctrl_usbotg1_5: usbotg1grp-5 {
+ fsl,pins = <
+ MX6SL_PAD_SD3_DAT0__USB_OTG1_ID 0x17059
+ >;
+ };
+ };
+
+ usbotg2 {
+ pinctrl_usbotg2_1: usbotg2grp-1 {
+ fsl,pins = <
+ MX6SL_PAD_ECSPI1_SCLK__USB_OTG2_OC 0x17059
+ >;
+ };
+
+ pinctrl_usbotg2_2: usbotg2grp-2 {
+ fsl,pins = <
+ MX6SL_PAD_ECSPI2_SCLK__USB_OTG2_OC 0x17059
+ >;
+ };
+
+ pinctrl_usbotg2_3: usbotg2grp-3 {
+ fsl,pins = <
+ MX6SL_PAD_KEY_ROW5__USB_OTG2_OC 0x17059
+ >;
+ };
+
+ pinctrl_usbotg2_4: usbotg2grp-4 {
+ fsl,pins = <
+ MX6SL_PAD_SD3_DAT2__USB_OTG2_OC 0x17059
+ >;
+ };
+ };
+
usdhc1 {
pinctrl_usdhc1_1: usdhc1grp-1 {
fsl,pins = <
@@ -572,6 +652,38 @@
MX6SL_PAD_SD1_DAT7__SD1_DATA7 0x17059
>;
};
+
+ pinctrl_usdhc1_1_100mhz: usdhc1grp-1-100mhz {
+ fsl,pins = <
+ MX6SL_PAD_SD1_CMD__SD1_CMD 0x170b9
+ MX6SL_PAD_SD1_CLK__SD1_CLK 0x100b9
+ MX6SL_PAD_SD1_DAT0__SD1_DATA0 0x170b9
+ MX6SL_PAD_SD1_DAT1__SD1_DATA1 0x170b9
+ MX6SL_PAD_SD1_DAT2__SD1_DATA2 0x170b9
+ MX6SL_PAD_SD1_DAT3__SD1_DATA3 0x170b9
+ MX6SL_PAD_SD1_DAT4__SD1_DATA4 0x170b9
+ MX6SL_PAD_SD1_DAT5__SD1_DATA5 0x170b9
+ MX6SL_PAD_SD1_DAT6__SD1_DATA6 0x170b9
+ MX6SL_PAD_SD1_DAT7__SD1_DATA7 0x170b9
+ >;
+ };
+
+ pinctrl_usdhc1_1_200mhz: usdhc1grp-1-200mhz {
+ fsl,pins = <
+ MX6SL_PAD_SD1_CMD__SD1_CMD 0x170f9
+ MX6SL_PAD_SD1_CLK__SD1_CLK 0x100f9
+ MX6SL_PAD_SD1_DAT0__SD1_DATA0 0x170f9
+ MX6SL_PAD_SD1_DAT1__SD1_DATA1 0x170f9
+ MX6SL_PAD_SD1_DAT2__SD1_DATA2 0x170f9
+ MX6SL_PAD_SD1_DAT3__SD1_DATA3 0x170f9
+ MX6SL_PAD_SD1_DAT4__SD1_DATA4 0x170f9
+ MX6SL_PAD_SD1_DAT5__SD1_DATA5 0x170f9
+ MX6SL_PAD_SD1_DAT6__SD1_DATA6 0x170f9
+ MX6SL_PAD_SD1_DAT7__SD1_DATA7 0x170f9
+ >;
+ };
+
+
};
usdhc2 {
@@ -585,6 +697,29 @@
MX6SL_PAD_SD2_DAT3__SD2_DATA3 0x17059
>;
};
+
+ pinctrl_usdhc2_1_100mhz: usdhc2grp-1-100mhz {
+ fsl,pins = <
+ MX6SL_PAD_SD2_CMD__SD2_CMD 0x170b9
+ MX6SL_PAD_SD2_CLK__SD2_CLK 0x100b9
+ MX6SL_PAD_SD2_DAT0__SD2_DATA0 0x170b9
+ MX6SL_PAD_SD2_DAT1__SD2_DATA1 0x170b9
+ MX6SL_PAD_SD2_DAT2__SD2_DATA2 0x170b9
+ MX6SL_PAD_SD2_DAT3__SD2_DATA3 0x170b9
+ >;
+ };
+
+ pinctrl_usdhc2_1_200mhz: usdhc2grp-1-200mhz {
+ fsl,pins = <
+ MX6SL_PAD_SD2_CMD__SD2_CMD 0x170f9
+ MX6SL_PAD_SD2_CLK__SD2_CLK 0x100f9
+ MX6SL_PAD_SD2_DAT0__SD2_DATA0 0x170f9
+ MX6SL_PAD_SD2_DAT1__SD2_DATA1 0x170f9
+ MX6SL_PAD_SD2_DAT2__SD2_DATA2 0x170f9
+ MX6SL_PAD_SD2_DAT3__SD2_DATA3 0x170f9
+ >;
+ };
+
};
usdhc3 {
@@ -598,6 +733,28 @@
MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x17059
>;
};
+
+ pinctrl_usdhc3_1_100mhz: usdhc3grp-1-100mhz {
+ fsl,pins = <
+ MX6SL_PAD_SD3_CMD__SD3_CMD 0x170b9
+ MX6SL_PAD_SD3_CLK__SD3_CLK 0x100b9
+ MX6SL_PAD_SD3_DAT0__SD3_DATA0 0x170b9
+ MX6SL_PAD_SD3_DAT1__SD3_DATA1 0x170b9
+ MX6SL_PAD_SD3_DAT2__SD3_DATA2 0x170b9
+ MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x170b9
+ >;
+ };
+
+ pinctrl_usdhc3_1_200mhz: usdhc3grp-1-200mhz {
+ fsl,pins = <
+ MX6SL_PAD_SD3_CMD__SD3_CMD 0x170f9
+ MX6SL_PAD_SD3_CLK__SD3_CLK 0x100f9
+ MX6SL_PAD_SD3_DAT0__SD3_DATA0 0x170f9
+ MX6SL_PAD_SD3_DAT1__SD3_DATA1 0x170f9
+ MX6SL_PAD_SD3_DAT2__SD3_DATA2 0x170f9
+ MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x170f9
+ >;
+ };
};
};
@@ -619,7 +776,8 @@
<&clks IMX6SL_CLK_SDMA>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
- fsl,sdma-ram-script-name = "imx/sdma/sdma-imx6sl.bin";
+ /* imx6sl reuses imx6q sdma firmware */
+ fsl,sdma-ram-script-name = "imx/sdma/sdma-imx6q.bin";
};
pxp: pxp@020f0000 {
@@ -663,7 +821,7 @@
usbotg2: usb@02184200 {
compatible = "fsl,imx6sl-usb", "fsl,imx27-usb";
reg = <0x02184200 0x200>;
- interrupts = <0 40 0x04>;
+ interrupts = <0 42 0x04>;
clocks = <&clks IMX6SL_CLK_USBOH3>;
fsl,usbphy = <&usbphy2>;
fsl,usbmisc = <&usbmisc 1>;
@@ -673,7 +831,7 @@
usbh: usb@02184400 {
compatible = "fsl,imx6sl-usb", "fsl,imx27-usb";
reg = <0x02184400 0x200>;
- interrupts = <0 42 0x04>;
+ interrupts = <0 40 0x04>;
clocks = <&clks IMX6SL_CLK_USBOH3>;
fsl,usbmisc = <&usbmisc 2>;
status = "disabled";
diff --git a/arch/arm/boot/dts/integrator.dtsi b/arch/arm/boot/dts/integrator.dtsi
index 813b91d7bea2..0f06f8687b0b 100644
--- a/arch/arm/boot/dts/integrator.dtsi
+++ b/arch/arm/boot/dts/integrator.dtsi
@@ -5,6 +5,11 @@
/include/ "skeleton.dtsi"
/ {
+ core-module@10000000 {
+ compatible = "arm,core-module-integrator";
+ reg = <0x10000000 0x200>;
+ };
+
timer@13000000 {
reg = <0x13000000 0x100>;
interrupt-parent = <&pic>;
diff --git a/arch/arm/boot/dts/integratorap.dts b/arch/arm/boot/dts/integratorap.dts
index b6b82eca8d1e..e6be9315ff0a 100644
--- a/arch/arm/boot/dts/integratorap.dts
+++ b/arch/arm/boot/dts/integratorap.dts
@@ -19,8 +19,11 @@
};
syscon {
- /* AP system controller registers */
+ compatible = "arm,integrator-ap-syscon";
reg = <0x11000000 0x100>;
+ interrupt-parent = <&pic>;
+ /* These are the logical module IRQs */
+ interrupts = <9>, <10>, <11>, <12>;
};
timer0: timer@13000000 {
diff --git a/arch/arm/boot/dts/integratorcp.dts b/arch/arm/boot/dts/integratorcp.dts
index ff1aea0ee043..7deb3a3182b4 100644
--- a/arch/arm/boot/dts/integratorcp.dts
+++ b/arch/arm/boot/dts/integratorcp.dts
@@ -9,29 +9,28 @@
model = "ARM Integrator/CP";
compatible = "arm,integrator-cp";
- aliases {
- arm,timer-primary = &timer2;
- arm,timer-secondary = &timer1;
- };
-
chosen {
bootargs = "root=/dev/ram0 console=ttyAMA0,38400n8 earlyprintk";
};
- cpcon {
- /* CP controller registers */
+ syscon {
+ compatible = "arm,integrator-cp-syscon";
reg = <0xcb000000 0x100>;
};
timer0: timer@13000000 {
+ /* TIMER0 runs @ 25MHz */
compatible = "arm,integrator-cp-timer";
+ status = "disabled";
};
timer1: timer@13000100 {
+ /* TIMER1 runs @ 1MHz */
compatible = "arm,integrator-cp-timer";
};
timer2: timer@13000200 {
+ /* TIMER2 runs @ 1MHz */
compatible = "arm,integrator-cp-timer";
};
diff --git a/arch/arm/boot/dts/keystone-clocks.dtsi b/arch/arm/boot/dts/keystone-clocks.dtsi
new file mode 100644
index 000000000000..d6713b113258
--- /dev/null
+++ b/arch/arm/boot/dts/keystone-clocks.dtsi
@@ -0,0 +1,821 @@
+/*
+ * Device Tree Source for Keystone 2 clock tree
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+clocks {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ refclkmain: refclkmain {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <122880000>;
+ clock-output-names = "refclk-main";
+ };
+
+ mainpllclk: mainpllclk@2310110 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,main-pll-clock";
+ clocks = <&refclkmain>;
+ reg = <0x02620350 4>, <0x02310110 4>;
+ reg-names = "control", "multiplier";
+ fixed-postdiv = <2>;
+ };
+
+ papllclk: papllclk@2620358 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,pll-clock";
+ clocks = <&refclkmain>;
+ clock-output-names = "pa-pll-clk";
+ reg = <0x02620358 4>;
+ reg-names = "control";
+ fixed-postdiv = <6>;
+ };
+
+ ddr3allclk: ddr3apllclk@2620360 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,pll-clock";
+ clocks = <&refclkmain>;
+ clock-output-names = "ddr-3a-pll-clk";
+ reg = <0x02620360 4>;
+ reg-names = "control";
+ fixed-postdiv = <6>;
+ };
+
+ ddr3bllclk: ddr3bpllclk@2620368 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,pll-clock";
+ clocks = <&refclkmain>;
+ clock-output-names = "ddr-3b-pll-clk";
+ reg = <0x02620368 4>;
+ reg-names = "control";
+ fixed-postdiv = <6>;
+ };
+
+ armpllclk: armpllclk@2620370 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,pll-clock";
+ clocks = <&refclkmain>;
+ clock-output-names = "arm-pll-clk";
+ reg = <0x02620370 4>;
+ reg-names = "control";
+ fixed-postdiv = <6>;
+ };
+
+ mainmuxclk: mainmuxclk@2310108 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,pll-mux-clock";
+ clocks = <&mainpllclk>, <&refclkmain>;
+ reg = <0x02310108 4>;
+ bit-shift = <23>;
+ bit-mask = <1>;
+ clock-output-names = "mainmuxclk";
+ };
+
+ chipclk1: chipclk1 {
+ #clock-cells = <0>;
+ compatible = "fixed-factor-clock";
+ clocks = <&mainmuxclk>;
+ clock-div = <1>;
+ clock-mult = <1>;
+ clock-output-names = "chipclk1";
+ };
+
+ chipclk1rstiso: chipclk1rstiso {
+ #clock-cells = <0>;
+ compatible = "fixed-factor-clock";
+ clocks = <&mainmuxclk>;
+ clock-div = <1>;
+ clock-mult = <1>;
+ clock-output-names = "chipclk1rstiso";
+ };
+
+ gemtraceclk: gemtraceclk@2310120 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,pll-divider-clock";
+ clocks = <&mainmuxclk>;
+ reg = <0x02310120 4>;
+ bit-shift = <0>;
+ bit-mask = <8>;
+ clock-output-names = "gemtraceclk";
+ };
+
+ chipstmxptclk: chipstmxptclk {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,pll-divider-clock";
+ clocks = <&mainmuxclk>;
+ reg = <0x02310164 4>;
+ bit-shift = <0>;
+ bit-mask = <8>;
+ clock-output-names = "chipstmxptclk";
+ };
+
+ chipclk12: chipclk12 {
+ #clock-cells = <0>;
+ compatible = "fixed-factor-clock";
+ clocks = <&chipclk1>;
+ clock-div = <2>;
+ clock-mult = <1>;
+ clock-output-names = "chipclk12";
+ };
+
+ chipclk13: chipclk13 {
+ #clock-cells = <0>;
+ compatible = "fixed-factor-clock";
+ clocks = <&chipclk1>;
+ clock-div = <3>;
+ clock-mult = <1>;
+ clock-output-names = "chipclk13";
+ };
+
+ chipclk14: chipclk14 {
+ #clock-cells = <0>;
+ compatible = "fixed-factor-clock";
+ clocks = <&chipclk1>;
+ clock-div = <4>;
+ clock-mult = <1>;
+ clock-output-names = "chipclk14";
+ };
+
+ chipclk16: chipclk16 {
+ #clock-cells = <0>;
+ compatible = "fixed-factor-clock";
+ clocks = <&chipclk1>;
+ clock-div = <6>;
+ clock-mult = <1>;
+ clock-output-names = "chipclk16";
+ };
+
+ chipclk112: chipclk112 {
+ #clock-cells = <0>;
+ compatible = "fixed-factor-clock";
+ clocks = <&chipclk1>;
+ clock-div = <12>;
+ clock-mult = <1>;
+ clock-output-names = "chipclk112";
+ };
+
+ chipclk124: chipclk124 {
+ #clock-cells = <0>;
+ compatible = "fixed-factor-clock";
+ clocks = <&chipclk1>;
+ clock-div = <24>;
+ clock-mult = <1>;
+ clock-output-names = "chipclk114";
+ };
+
+ chipclk1rstiso13: chipclk1rstiso13 {
+ #clock-cells = <0>;
+ compatible = "fixed-factor-clock";
+ clocks = <&chipclk1rstiso>;
+ clock-div = <3>;
+ clock-mult = <1>;
+ clock-output-names = "chipclk1rstiso13";
+ };
+
+ chipclk1rstiso14: chipclk1rstiso14 {
+ #clock-cells = <0>;
+ compatible = "fixed-factor-clock";
+ clocks = <&chipclk1rstiso>;
+ clock-div = <4>;
+ clock-mult = <1>;
+ clock-output-names = "chipclk1rstiso14";
+ };
+
+ chipclk1rstiso16: chipclk1rstiso16 {
+ #clock-cells = <0>;
+ compatible = "fixed-factor-clock";
+ clocks = <&chipclk1rstiso>;
+ clock-div = <6>;
+ clock-mult = <1>;
+ clock-output-names = "chipclk1rstiso16";
+ };
+
+ chipclk1rstiso112: chipclk1rstiso112 {
+ #clock-cells = <0>;
+ compatible = "fixed-factor-clock";
+ clocks = <&chipclk1rstiso>;
+ clock-div = <12>;
+ clock-mult = <1>;
+ clock-output-names = "chipclk1rstiso112";
+ };
+
+ clkmodrst0: clkmodrst0 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk16>;
+ clock-output-names = "modrst0";
+ reg = <0x02350000 0xb00>, <0x02350000 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <0>;
+ };
+
+
+ clkusb: clkusb {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk16>;
+ clock-output-names = "usb";
+ reg = <0x02350008 0xb00>, <0x02350000 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <0>;
+ };
+
+ clkaemifspi: clkaemifspi {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk16>;
+ clock-output-names = "aemif-spi";
+ reg = <0x0235000c 0xb00>, <0x02350000 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <0>;
+ };
+
+
+ clkdebugsstrc: clkdebugsstrc {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "debugss-trc";
+ reg = <0x02350014 0xb00>, <0x02350000 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <0>;
+ };
+
+ clktetbtrc: clktetbtrc {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "tetb-trc";
+ reg = <0x02350018 0xb00>, <0x02350004 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <1>;
+ };
+
+ clkpa: clkpa {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk16>;
+ clock-output-names = "pa";
+ reg = <0x0235001c 0xb00>, <0x02350008 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <2>;
+ };
+
+ clkcpgmac: clkcpgmac {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&clkpa>;
+ clock-output-names = "cpgmac";
+ reg = <0x02350020 0xb00>, <0x02350008 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <2>;
+ };
+
+ clksa: clksa {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&clkpa>;
+ clock-output-names = "sa";
+ reg = <0x02350024 0xb00>, <0x02350008 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <2>;
+ };
+
+ clkpcie: clkpcie {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk12>;
+ clock-output-names = "pcie";
+ reg = <0x02350028 0xb00>, <0x0235000c 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <3>;
+ };
+
+ clksrio: clksrio {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk1rstiso13>;
+ clock-output-names = "srio";
+ reg = <0x0235002c 0xb00>, <0x02350010 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <4>;
+ };
+
+ clkhyperlink0: clkhyperlink0 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk12>;
+ clock-output-names = "hyperlink-0";
+ reg = <0x02350030 0xb00>, <0x02350014 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <5>;
+ };
+
+ clksr: clksr {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk1rstiso112>;
+ clock-output-names = "sr";
+ reg = <0x02350034 0xb00>, <0x02350018 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <6>;
+ };
+
+ clkmsmcsram: clkmsmcsram {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk1>;
+ clock-output-names = "msmcsram";
+ reg = <0x02350038 0xb00>, <0x0235001c 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <7>;
+ };
+
+ clkgem0: clkgem0 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk1>;
+ clock-output-names = "gem0";
+ reg = <0x0235003c 0xb00>, <0x02350020 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <8>;
+ };
+
+ clkgem1: clkgem1 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk1>;
+ clock-output-names = "gem1";
+ reg = <0x02350040 0xb00>, <0x02350024 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <9>;
+ };
+
+ clkgem2: clkgem2 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk1>;
+ clock-output-names = "gem2";
+ reg = <0x02350044 0xb00>, <0x02350028 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <10>;
+ };
+
+ clkgem3: clkgem3 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk1>;
+ clock-output-names = "gem3";
+ reg = <0x02350048 0xb00>, <0x0235002c 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <11>;
+ };
+
+ clkgem4: clkgem4 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk1>;
+ clock-output-names = "gem4";
+ reg = <0x0235004c 0xb00>, <0x02350030 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <12>;
+ };
+
+ clkgem5: clkgem5 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk1>;
+ clock-output-names = "gem5";
+ reg = <0x02350050 0xb00>, <0x02350034 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <13>;
+ };
+
+ clkgem6: clkgem6 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk1>;
+ clock-output-names = "gem6";
+ reg = <0x02350054 0xb00>, <0x02350038 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <14>;
+ };
+
+ clkgem7: clkgem7 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk1>;
+ clock-output-names = "gem7";
+ reg = <0x02350058 0xb00>, <0x0235003c 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <15>;
+ };
+
+ clkddr30: clkddr30 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk12>;
+ clock-output-names = "ddr3-0";
+ reg = <0x0235005c 0xb00>, <0x02350040 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <16>;
+ };
+
+ clkddr31: clkddr31 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "ddr3-1";
+ reg = <0x02350060 0xb00>, <0x02350040 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <16>;
+ };
+
+ clktac: clktac {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "tac";
+ reg = <0x02350064 0xb00>, <0x02350044 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <17>;
+ };
+
+ clkrac01: clktac01 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "rac-01";
+ reg = <0x02350068 0xb00>, <0x02350044 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <17>;
+ };
+
+ clkrac23: clktac23 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "rac-23";
+ reg = <0x0235006c 0xb00>, <0x02350048 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <18>;
+ };
+
+ clkfftc0: clkfftc0 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "fftc-0";
+ reg = <0x02350070 0xb00>, <0x0235004c 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <19>;
+ };
+
+ clkfftc1: clkfftc1 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "fftc-1";
+ reg = <0x02350074 0xb00>, <0x023504c0 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <19>;
+ };
+
+ clkfftc2: clkfftc2 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "fftc-2";
+ reg = <0x02350078 0xb00>, <0x02350050 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <20>;
+ };
+
+ clkfftc3: clkfftc3 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "fftc-3";
+ reg = <0x0235007c 0xb00>, <0x02350050 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <20>;
+ };
+
+ clkfftc4: clkfftc4 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "fftc-4";
+ reg = <0x02350080 0xb00>, <0x02350050 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <20>;
+ };
+
+ clkfftc5: clkfftc5 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "fftc-5";
+ reg = <0x02350084 0xb00>, <0x02350050 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <20>;
+ };
+
+ clkaif: clkaif {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "aif";
+ reg = <0x02350088 0xb00>, <0x02350054 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <21>;
+ };
+
+ clktcp3d0: clktcp3d0 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "tcp3d-0";
+ reg = <0x0235008c 0xb00>, <0x02350058 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <22>;
+ };
+
+ clktcp3d1: clktcp3d1 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "tcp3d-1";
+ reg = <0x02350090 0xb00>, <0x02350058 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <22>;
+ };
+
+ clktcp3d2: clktcp3d2 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "tcp3d-2";
+ reg = <0x02350094 0xb00>, <0x0235005c 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <23>;
+ };
+
+ clktcp3d3: clktcp3d3 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "tcp3d-3";
+ reg = <0x02350098 0xb00>, <0x0235005c 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <23>;
+ };
+
+ clkvcp0: clkvcp0 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "vcp-0";
+ reg = <0x0235009c 0xb00>, <0x02350060 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <24>;
+ };
+
+ clkvcp1: clkvcp1 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "vcp-1";
+ reg = <0x023500a0 0xb00>, <0x02350060 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <24>;
+ };
+
+ clkvcp2: clkvcp2 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "vcp-2";
+ reg = <0x023500a4 0xb00>, <0x02350060 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <24>;
+ };
+
+ clkvcp3: clkvcp3 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "vcp-3";
+ reg = <0x0235000a8 0xb00>, <0x02350060 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <24>;
+ };
+
+ clkvcp4: clkvcp4 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "vcp-4";
+ reg = <0x023500ac 0xb00>, <0x02350064 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <25>;
+ };
+
+ clkvcp5: clkvcp5 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "vcp-5";
+ reg = <0x023500b0 0xb00>, <0x02350064 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <25>;
+ };
+
+ clkvcp6: clkvcp6 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "vcp-6";
+ reg = <0x023500b4 0xb00>, <0x02350064 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <25>;
+ };
+
+ clkvcp7: clkvcp7 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "vcp-7";
+ reg = <0x023500b8 0xb00>, <0x02350064 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <25>;
+ };
+
+ clkbcp: clkbcp {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "bcp";
+ reg = <0x023500bc 0xb00>, <0x02350068 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <26>;
+ };
+
+ clkdxb: clkdxb {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "dxb";
+ reg = <0x023500c0 0xb00>, <0x0235006c 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <27>;
+ };
+
+ clkhyperlink1: clkhyperlink1 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk12>;
+ clock-output-names = "hyperlink-1";
+ reg = <0x023500c4 0xb00>, <0x02350070 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <28>;
+ };
+
+ clkxge: clkxge {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&chipclk13>;
+ clock-output-names = "xge";
+ reg = <0x023500c8 0xb00>, <0x02350074 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <29>;
+ };
+
+ clkwdtimer0: clkwdtimer0 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&clkmodrst0>;
+ clock-output-names = "timer0";
+ reg = <0x02350000 0xb00>, <0x02350000 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <0>;
+ };
+
+ clkwdtimer1: clkwdtimer1 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&clkmodrst0>;
+ clock-output-names = "timer1";
+ reg = <0x02350000 0xb00>, <0x02350000 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <0>;
+ };
+
+ clkwdtimer2: clkwdtimer2 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&clkmodrst0>;
+ clock-output-names = "timer2";
+ reg = <0x02350000 0xb00>, <0x02350000 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <0>;
+ };
+
+ clkwdtimer3: clkwdtimer3 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&clkmodrst0>;
+ clock-output-names = "timer3";
+ reg = <0x02350000 0xb00>, <0x02350000 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <0>;
+ };
+
+ clkuart0: clkuart0 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&clkmodrst0>;
+ clock-output-names = "uart0";
+ reg = <0x02350000 0xb00>, <0x02350000 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <0>;
+ };
+
+ clkuart1: clkuart1 {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&clkmodrst0>;
+ clock-output-names = "uart1";
+ reg = <0x02350000 0xb00>, <0x02350000 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <0>;
+ };
+
+ clkaemif: clkaemif {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&clkaemifspi>;
+ clock-output-names = "aemif";
+ reg = <0x02350000 0xb00>, <0x02350000 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <0>;
+ };
+
+ clkusim: clkusim {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&clkmodrst0>;
+ clock-output-names = "usim";
+ reg = <0x02350000 0xb00>, <0x02350000 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <0>;
+ };
+
+ clki2c: clki2c {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&clkmodrst0>;
+ clock-output-names = "i2c";
+ reg = <0x02350000 0xb00>, <0x02350000 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <0>;
+ };
+
+ clkspi: clkspi {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&clkaemifspi>;
+ clock-output-names = "spi";
+ reg = <0x02350000 0xb00>, <0x02350000 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <0>;
+ };
+
+ clkgpio: clkgpio {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&clkmodrst0>;
+ clock-output-names = "gpio";
+ reg = <0x02350000 0xb00>, <0x02350000 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <0>;
+ };
+
+ clkkeymgr: clkkeymgr {
+ #clock-cells = <0>;
+ compatible = "ti,keystone,psc-clock";
+ clocks = <&clkmodrst0>;
+ clock-output-names = "keymgr";
+ reg = <0x02350000 0xb00>, <0x02350000 0x400>;
+ reg-names = "control", "domain";
+ domain-id = <0>;
+ };
+};
diff --git a/arch/arm/boot/dts/keystone.dts b/arch/arm/boot/dts/keystone.dts
index a68e34bbecb2..100bdf52b847 100644
--- a/arch/arm/boot/dts/keystone.dts
+++ b/arch/arm/boot/dts/keystone.dts
@@ -100,13 +100,15 @@
reg = <0x023100e8 4>; /* pll reset control reg */
};
+ /include/ "keystone-clocks.dtsi"
+
uart0: serial@02530c00 {
compatible = "ns16550a";
current-speed = <115200>;
reg-shift = <2>;
reg-io-width = <4>;
reg = <0x02530c00 0x100>;
- clock-frequency = <133120000>;
+ clocks = <&clkuart0>;
interrupts = <GIC_SPI 277 IRQ_TYPE_EDGE_RISING>;
};
@@ -116,9 +118,66 @@
reg-shift = <2>;
reg-io-width = <4>;
reg = <0x02531000 0x100>;
- clock-frequency = <133120000>;
+ clocks = <&clkuart1>;
interrupts = <GIC_SPI 280 IRQ_TYPE_EDGE_RISING>;
};
+ i2c0: i2c@2530000 {
+ compatible = "ti,davinci-i2c";
+ reg = <0x02530000 0x400>;
+ clock-frequency = <100000>;
+ clocks = <&clki2c>;
+ interrupts = <GIC_SPI 283 IRQ_TYPE_EDGE_RISING>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dtt@50 {
+ compatible = "at,24c1024";
+ reg = <0x50>;
+ };
+ };
+
+ i2c1: i2c@2530400 {
+ compatible = "ti,davinci-i2c";
+ reg = <0x02530400 0x400>;
+ clock-frequency = <100000>;
+ clocks = <&clki2c>;
+ interrupts = <GIC_SPI 286 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ i2c2: i2c@2530800 {
+ compatible = "ti,davinci-i2c";
+ reg = <0x02530800 0x400>;
+ clock-frequency = <100000>;
+ clocks = <&clki2c>;
+ interrupts = <GIC_SPI 289 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ spi0: spi@21000400 {
+ compatible = "ti,dm6441-spi";
+ reg = <0x21000400 0x200>;
+ num-cs = <4>;
+ ti,davinci-spi-intr-line = <0>;
+ interrupts = <GIC_SPI 292 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkspi>;
+ };
+
+ spi1: spi@21000600 {
+ compatible = "ti,dm6441-spi";
+ reg = <0x21000600 0x200>;
+ num-cs = <4>;
+ ti,davinci-spi-intr-line = <0>;
+ interrupts = <GIC_SPI 296 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkspi>;
+ };
+
+ spi2: spi@21000800 {
+ compatible = "ti,dm6441-spi";
+ reg = <0x21000800 0x200>;
+ num-cs = <4>;
+ ti,davinci-spi-intr-line = <0>;
+ interrupts = <GIC_SPI 300 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkspi>;
+ };
};
};
diff --git a/arch/arm/boot/dts/kirkwood-db-88f6281.dts b/arch/arm/boot/dts/kirkwood-db-88f6281.dts
index 72c4b0a0366f..c39dd766c75a 100644
--- a/arch/arm/boot/dts/kirkwood-db-88f6281.dts
+++ b/arch/arm/boot/dts/kirkwood-db-88f6281.dts
@@ -19,7 +19,6 @@
compatible = "marvell,db-88f6281-bp", "marvell,kirkwood-88f6281", "marvell,kirkwood";
mbus {
- ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000>;
pcie-controller {
status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood-db-88f6282.dts b/arch/arm/boot/dts/kirkwood-db-88f6282.dts
index 36c411d34926..701c6b6cdaa2 100644
--- a/arch/arm/boot/dts/kirkwood-db-88f6282.dts
+++ b/arch/arm/boot/dts/kirkwood-db-88f6282.dts
@@ -19,7 +19,6 @@
compatible = "marvell,db-88f6282-bp", "marvell,kirkwood-88f6282", "marvell,kirkwood";
mbus {
- ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000>;
pcie-controller {
status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood-db.dtsi b/arch/arm/boot/dts/kirkwood-db.dtsi
index c0e2a5879174..053aa20fb30f 100644
--- a/arch/arm/boot/dts/kirkwood-db.dtsi
+++ b/arch/arm/boot/dts/kirkwood-db.dtsi
@@ -39,28 +39,6 @@
status = "ok";
};
- nand@3000000 {
- pinctrl-0 = <&pmx_nand>;
- pinctrl-names = "default";
- chip-delay = <25>;
- status = "okay";
-
- partition@0 {
- label = "uboot";
- reg = <0x0 0x100000>;
- };
-
- partition@100000 {
- label = "uImage";
- reg = <0x100000 0x400000>;
- };
-
- partition@500000 {
- label = "root";
- reg = <0x500000 0x1fb00000>;
- };
- };
-
sata@80000 {
nr-ports = <2>;
status = "okay";
@@ -80,6 +58,28 @@
};
};
+&nand {
+ pinctrl-0 = <&pmx_nand>;
+ pinctrl-names = "default";
+ chip-delay = <25>;
+ status = "okay";
+
+ partition@0 {
+ label = "uboot";
+ reg = <0x0 0x100000>;
+ };
+
+ partition@100000 {
+ label = "uImage";
+ reg = <0x100000 0x400000>;
+ };
+
+ partition@500000 {
+ label = "root";
+ reg = <0x500000 0x1fb00000>;
+ };
+};
+
&mdio {
status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
index d544f77a4ca4..aefa375a550d 100644
--- a/arch/arm/boot/dts/kirkwood-dnskw.dtsi
+++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
@@ -148,44 +148,6 @@
status = "okay";
nr-ports = <2>;
};
-
- nand@3000000 {
- pinctrl-0 = <&pmx_nand>;
- pinctrl-names = "default";
- status = "okay";
- chip-delay = <35>;
-
- partition@0 {
- label = "u-boot";
- reg = <0x0000000 0x100000>;
- read-only;
- };
-
- partition@100000 {
- label = "uImage";
- reg = <0x0100000 0x500000>;
- };
-
- partition@600000 {
- label = "ramdisk";
- reg = <0x0600000 0x500000>;
- };
-
- partition@b00000 {
- label = "image";
- reg = <0x0b00000 0x6600000>;
- };
-
- partition@7100000 {
- label = "mini firmware";
- reg = <0x7100000 0xa00000>;
- };
-
- partition@7b00000 {
- label = "config";
- reg = <0x7b00000 0x500000>;
- };
- };
};
regulators {
@@ -220,6 +182,44 @@
};
};
+&nand {
+ pinctrl-0 = <&pmx_nand>;
+ pinctrl-names = "default";
+ status = "okay";
+ chip-delay = <35>;
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x0000000 0x100000>;
+ read-only;
+ };
+
+ partition@100000 {
+ label = "uImage";
+ reg = <0x0100000 0x500000>;
+ };
+
+ partition@600000 {
+ label = "ramdisk";
+ reg = <0x0600000 0x500000>;
+ };
+
+ partition@b00000 {
+ label = "image";
+ reg = <0x0b00000 0x6600000>;
+ };
+
+ partition@7100000 {
+ label = "mini firmware";
+ reg = <0x7100000 0xa00000>;
+ };
+
+ partition@7b00000 {
+ label = "config";
+ reg = <0x7b00000 0x500000>;
+ };
+};
+
&mdio {
status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood-dockstar.dts b/arch/arm/boot/dts/kirkwood-dockstar.dts
index 59a2117c35a7..33ff368fbfa5 100644
--- a/arch/arm/boot/dts/kirkwood-dockstar.dts
+++ b/arch/arm/boot/dts/kirkwood-dockstar.dts
@@ -34,26 +34,6 @@
serial@12000 {
status = "ok";
};
-
- nand@3000000 {
- status = "okay";
-
- partition@0 {
- label = "u-boot";
- reg = <0x0000000 0x100000>;
- read-only;
- };
-
- partition@100000 {
- label = "uImage";
- reg = <0x0100000 0x400000>;
- };
-
- partition@500000 {
- label = "data";
- reg = <0x0500000 0xfb00000>;
- };
- };
};
gpio-leds {
compatible = "gpio-leds";
@@ -91,6 +71,26 @@
};
};
+&nand {
+ status = "okay";
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x0000000 0x100000>;
+ read-only;
+ };
+
+ partition@100000 {
+ label = "uImage";
+ reg = <0x0100000 0x400000>;
+ };
+
+ partition@500000 {
+ label = "data";
+ reg = <0x0500000 0xfb00000>;
+ };
+};
+
&mdio {
status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood-goflexnet.dts b/arch/arm/boot/dts/kirkwood-goflexnet.dts
index 6f7c7d7ecf2a..a43bebb25110 100644
--- a/arch/arm/boot/dts/kirkwood-goflexnet.dts
+++ b/arch/arm/boot/dts/kirkwood-goflexnet.dts
@@ -67,31 +67,6 @@
status = "ok";
};
- nand@3000000 {
- chip-delay = <40>;
- status = "okay";
-
- partition@0 {
- label = "u-boot";
- reg = <0x0000000 0x100000>;
- read-only;
- };
-
- partition@100000 {
- label = "uImage";
- reg = <0x0100000 0x400000>;
- };
-
- partition@500000 {
- label = "pogoplug";
- reg = <0x0500000 0x2000000>;
- };
-
- partition@2500000 {
- label = "root";
- reg = <0x02500000 0xd800000>;
- };
- };
sata@80000 {
status = "okay";
nr-ports = <2>;
@@ -171,6 +146,32 @@
};
};
+&nand {
+ chip-delay = <40>;
+ status = "okay";
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x0000000 0x100000>;
+ read-only;
+ };
+
+ partition@100000 {
+ label = "uImage";
+ reg = <0x0100000 0x400000>;
+ };
+
+ partition@500000 {
+ label = "pogoplug";
+ reg = <0x0500000 0x2000000>;
+ };
+
+ partition@2500000 {
+ label = "root";
+ reg = <0x02500000 0xd800000>;
+ };
+};
+
&mdio {
status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts b/arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts
index 6548b9dc6855..d30a91a5047d 100644
--- a/arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts
+++ b/arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts
@@ -40,26 +40,6 @@
status = "ok";
};
- nand@3000000 {
- status = "okay";
-
- partition@0 {
- label = "u-boot";
- reg = <0x00000000 0x00100000>;
- read-only;
- };
-
- partition@100000 {
- label = "uImage";
- reg = <0x00100000 0x00400000>;
- };
-
- partition@500000 {
- label = "data";
- reg = <0x00500000 0x1fb00000>;
- };
- };
-
sata@80000 {
status = "okay";
nr-ports = <1>;
@@ -97,6 +77,26 @@
};
};
+&nand {
+ status = "okay";
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x00000000 0x00100000>;
+ read-only;
+ };
+
+ partition@100000 {
+ label = "uImage";
+ reg = <0x00100000 0x00400000>;
+ };
+
+ partition@500000 {
+ label = "data";
+ reg = <0x00500000 0x1fb00000>;
+ };
+};
+
&mdio {
status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood-ib62x0.dts b/arch/arm/boot/dts/kirkwood-ib62x0.dts
index cb711a3bd983..c5fb02f7ebc3 100644
--- a/arch/arm/boot/dts/kirkwood-ib62x0.dts
+++ b/arch/arm/boot/dts/kirkwood-ib62x0.dts
@@ -5,7 +5,7 @@
/ {
model = "RaidSonic ICY BOX IB-NAS62x0 (Rev B)";
- compatible = "raidsonic,ib-nas6210-b", "raidsonic,ib-nas6220-b", "raidsonic,ib-nas6210", "raidsonic,ib-nas6220", "raidsonic,ib-nas62x0", "marvell,kirkwood-88f6281", "marvell,kirkwood";
+ compatible = "raidsonic,ib-nas6210-b", "raidsonic,ib-nas6220-b", "raidsonic,ib-nas6210", "raidsonic,ib-nas6220", "raidsonic,ib-nas62x0", "marvell,kirkwood-88f6281", "marvell,kirkwood";
memory {
device_type = "memory";
@@ -43,6 +43,7 @@
marvell,function = "gpio";
};
};
+
serial@12000 {
status = "okay";
};
@@ -51,28 +52,6 @@
status = "okay";
nr-ports = <2>;
};
-
- nand@3000000 {
- status = "okay";
- pinctrl-0 = <&pmx_nand>;
- pinctrl-names = "default";
-
- partition@0 {
- label = "u-boot";
- reg = <0x0000000 0x100000>;
- };
-
- partition@100000 {
- label = "uImage";
- reg = <0x0100000 0x600000>;
- };
-
- partition@700000 {
- label = "root";
- reg = <0x0700000 0xf900000>;
- };
-
- };
};
gpio_keys {
@@ -93,6 +72,7 @@
gpios = <&gpio0 28 1>;
};
};
+
gpio-leds {
compatible = "gpio-leds";
pinctrl-0 = <&pmx_led_os_red &pmx_led_os_green
@@ -113,13 +93,39 @@
gpios = <&gpio0 27 0>;
};
};
+
gpio_poweroff {
compatible = "gpio-poweroff";
pinctrl-0 = <&pmx_power_off>;
pinctrl-names = "default";
gpios = <&gpio0 24 0>;
};
+};
+
+&nand {
+ status = "okay";
+ pinctrl-0 = <&pmx_nand>;
+ pinctrl-names = "default";
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x0000000 0xe0000>;
+ };
+ partition@e0000 {
+ label = "u-boot environment";
+ reg = <0xe0000 0x100000>;
+ };
+
+ partition@100000 {
+ label = "uImage";
+ reg = <0x0100000 0x600000>;
+ };
+
+ partition@700000 {
+ label = "root";
+ reg = <0x0700000 0xf900000>;
+ };
};
@@ -134,6 +140,7 @@
&eth0 {
status = "okay";
+
ethernet0-port@0 {
phy-handle = <&ethphy0>;
};
diff --git a/arch/arm/boot/dts/kirkwood-iconnect.dts b/arch/arm/boot/dts/kirkwood-iconnect.dts
index 0323f017eeed..4a62b206f680 100644
--- a/arch/arm/boot/dts/kirkwood-iconnect.dts
+++ b/arch/arm/boot/dts/kirkwood-iconnect.dts
@@ -19,7 +19,6 @@
};
mbus {
- ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000>;
pcie-controller {
status = "okay";
@@ -83,35 +82,6 @@
serial@12000 {
status = "ok";
};
-
- nand@3000000 {
- status = "okay";
-
- partition@0 {
- label = "uboot";
- reg = <0x0000000 0xc0000>;
- };
-
- partition@a0000 {
- label = "env";
- reg = <0xa0000 0x20000>;
- };
-
- partition@100000 {
- label = "zImage";
- reg = <0x100000 0x300000>;
- };
-
- partition@540000 {
- label = "initrd";
- reg = <0x540000 0x300000>;
- };
-
- partition@980000 {
- label = "boot";
- reg = <0x980000 0x1f400000>;
- };
- };
};
gpio-leds {
@@ -180,6 +150,35 @@
};
};
+&nand {
+ status = "okay";
+
+ partition@0 {
+ label = "uboot";
+ reg = <0x0000000 0xc0000>;
+ };
+
+ partition@a0000 {
+ label = "env";
+ reg = <0xa0000 0x20000>;
+ };
+
+ partition@100000 {
+ label = "zImage";
+ reg = <0x100000 0x300000>;
+ };
+
+ partition@540000 {
+ label = "initrd";
+ reg = <0x540000 0x300000>;
+ };
+
+ partition@980000 {
+ label = "boot";
+ reg = <0x980000 0x1f400000>;
+ };
+};
+
&mdio {
status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
index df8447442b37..d15395d671ed 100644
--- a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
+++ b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
@@ -113,31 +113,6 @@
status = "ok";
};
- nand@3000000 {
- status = "okay";
-
- partition@0 {
- label = "u-boot";
- reg = <0x0000000 0x100000>;
- read-only;
- };
-
- partition@a0000 {
- label = "env";
- reg = <0xa0000 0x20000>;
- read-only;
- };
-
- partition@100000 {
- label = "uImage";
- reg = <0x100000 0x300000>;
- };
-
- partition@400000 {
- label = "uInitrd";
- reg = <0x540000 0x1000000>;
- };
- };
sata@80000 {
status = "okay";
nr-ports = <2>;
@@ -195,6 +170,32 @@
};
};
+&nand {
+ status = "okay";
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x0000000 0x100000>;
+ read-only;
+ };
+
+ partition@a0000 {
+ label = "env";
+ reg = <0xa0000 0x20000>;
+ read-only;
+ };
+
+ partition@100000 {
+ label = "uImage";
+ reg = <0x100000 0x300000>;
+ };
+
+ partition@400000 {
+ label = "uInitrd";
+ reg = <0x540000 0x1000000>;
+ };
+};
+
&mdio {
status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood-km_kirkwood.dts b/arch/arm/boot/dts/kirkwood-km_kirkwood.dts
index 6899408482d2..cd44f37e54b5 100644
--- a/arch/arm/boot/dts/kirkwood-km_kirkwood.dts
+++ b/arch/arm/boot/dts/kirkwood-km_kirkwood.dts
@@ -34,13 +34,6 @@
serial@12000 {
status = "ok";
};
-
- nand@3000000 {
- pinctrl-0 = <&pmx_nand>;
- pinctrl-names = "default";
- status = "ok";
- chip-delay = <25>;
- };
};
i2c@0 {
@@ -51,6 +44,13 @@
};
};
+&nand {
+ pinctrl-0 = <&pmx_nand>;
+ pinctrl-names = "default";
+ status = "ok";
+ chip-delay = <25>;
+};
+
&mdio {
status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood-mplcec4.dts b/arch/arm/boot/dts/kirkwood-mplcec4.dts
index ce2b94b513db..6c1ec2786e6e 100644
--- a/arch/arm/boot/dts/kirkwood-mplcec4.dts
+++ b/arch/arm/boot/dts/kirkwood-mplcec4.dts
@@ -17,7 +17,6 @@
};
mbus {
- ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000>;
pcie-controller {
status = "okay";
@@ -96,37 +95,6 @@
pinctrl-names = "default";
};
- nand@3000000 {
- pinctrl-0 = <&pmx_nand>;
- pinctrl-names = "default";
- status = "okay";
-
- partition@0 {
- label = "uboot";
- reg = <0x0000000 0x100000>;
- };
-
- partition@100000 {
- label = "env";
- reg = <0x100000 0x80000>;
- };
-
- partition@180000 {
- label = "fdt";
- reg = <0x180000 0x80000>;
- };
-
- partition@200000 {
- label = "kernel";
- reg = <0x200000 0x400000>;
- };
-
- partition@600000 {
- label = "rootfs";
- reg = <0x600000 0x1fa00000>;
- };
- };
-
rtc@10300 {
status = "disabled";
};
@@ -194,6 +162,37 @@
};
};
+&nand {
+ pinctrl-0 = <&pmx_nand>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ partition@0 {
+ label = "uboot";
+ reg = <0x0000000 0x100000>;
+ };
+
+ partition@100000 {
+ label = "env";
+ reg = <0x100000 0x80000>;
+ };
+
+ partition@180000 {
+ label = "fdt";
+ reg = <0x180000 0x80000>;
+ };
+
+ partition@200000 {
+ label = "kernel";
+ reg = <0x200000 0x400000>;
+ };
+
+ partition@600000 {
+ label = "rootfs";
+ reg = <0x600000 0x1fa00000>;
+ };
+};
+
&mdio {
status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood-netgear_readynas_duo_v2.dts b/arch/arm/boot/dts/kirkwood-netgear_readynas_duo_v2.dts
index 874857ea9cb8..e6a102cf424c 100644
--- a/arch/arm/boot/dts/kirkwood-netgear_readynas_duo_v2.dts
+++ b/arch/arm/boot/dts/kirkwood-netgear_readynas_duo_v2.dts
@@ -17,7 +17,6 @@
};
mbus {
- ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000>;
pcie-controller {
status = "okay";
@@ -98,36 +97,6 @@
status = "okay";
};
- nand@3000000 {
- status = "okay";
-
- partition@0 {
- label = "u-boot";
- reg = <0x0000000 0x180000>;
- read-only;
- };
-
- partition@180000 {
- label = "u-boot-env";
- reg = <0x180000 0x20000>;
- };
-
- partition@200000 {
- label = "uImage";
- reg = <0x0200000 0x600000>;
- };
-
- partition@800000 {
- label = "minirootfs";
- reg = <0x0800000 0x1000000>;
- };
-
- partition@1800000 {
- label = "jffs2";
- reg = <0x1800000 0x6800000>;
- };
- };
-
sata@80000 {
status = "okay";
nr-ports = <2>;
@@ -208,6 +177,36 @@
};
};
+&nand {
+ status = "okay";
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x0000000 0x180000>;
+ read-only;
+ };
+
+ partition@180000 {
+ label = "u-boot-env";
+ reg = <0x180000 0x20000>;
+ };
+
+ partition@200000 {
+ label = "uImage";
+ reg = <0x0200000 0x600000>;
+ };
+
+ partition@800000 {
+ label = "minirootfs";
+ reg = <0x0800000 0x1000000>;
+ };
+
+ partition@1800000 {
+ label = "jffs2";
+ reg = <0x1800000 0x6800000>;
+ };
+};
+
&mdio {
status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi b/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi
index 06267a91de38..e3f915defd3d 100644
--- a/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi
+++ b/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi
@@ -27,49 +27,6 @@
nr-ports = <2>;
};
- nand@3000000 {
- status = "okay";
- chip-delay = <35>;
-
- partition@0 {
- label = "uboot";
- reg = <0x0000000 0x0100000>;
- read-only;
- };
- partition@100000 {
- label = "uboot_env";
- reg = <0x0100000 0x0080000>;
- };
- partition@180000 {
- label = "key_store";
- reg = <0x0180000 0x0080000>;
- };
- partition@200000 {
- label = "info";
- reg = <0x0200000 0x0080000>;
- };
- partition@280000 {
- label = "etc";
- reg = <0x0280000 0x0a00000>;
- };
- partition@c80000 {
- label = "kernel_1";
- reg = <0x0c80000 0x0a00000>;
- };
- partition@1680000 {
- label = "rootfs1";
- reg = <0x1680000 0x2fc0000>;
- };
- partition@4640000 {
- label = "kernel_2";
- reg = <0x4640000 0x0a00000>;
- };
- partition@5040000 {
- label = "rootfs2";
- reg = <0x5040000 0x2fc0000>;
- };
- };
-
pcie-controller {
status = "okay";
@@ -105,3 +62,46 @@
};
};
};
+
+&nand {
+ status = "okay";
+ chip-delay = <35>;
+
+ partition@0 {
+ label = "uboot";
+ reg = <0x0000000 0x0100000>;
+ read-only;
+ };
+ partition@100000 {
+ label = "uboot_env";
+ reg = <0x0100000 0x0080000>;
+ };
+ partition@180000 {
+ label = "key_store";
+ reg = <0x0180000 0x0080000>;
+ };
+ partition@200000 {
+ label = "info";
+ reg = <0x0200000 0x0080000>;
+ };
+ partition@280000 {
+ label = "etc";
+ reg = <0x0280000 0x0a00000>;
+ };
+ partition@c80000 {
+ label = "kernel_1";
+ reg = <0x0c80000 0x0a00000>;
+ };
+ partition@1680000 {
+ label = "rootfs1";
+ reg = <0x1680000 0x2fc0000>;
+ };
+ partition@4640000 {
+ label = "kernel_2";
+ reg = <0x4640000 0x0a00000>;
+ };
+ partition@5040000 {
+ label = "rootfs2";
+ reg = <0x5040000 0x2fc0000>;
+ };
+};
diff --git a/arch/arm/boot/dts/kirkwood-nsa310.dts b/arch/arm/boot/dts/kirkwood-nsa310.dts
index 7aeae0c2c1f4..b5418bcaecce 100644
--- a/arch/arm/boot/dts/kirkwood-nsa310.dts
+++ b/arch/arm/boot/dts/kirkwood-nsa310.dts
@@ -15,7 +15,6 @@
};
mbus {
- ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000>;
pcie-controller {
status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
index 85ccf8d8abb1..f0e3d213604c 100644
--- a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
+++ b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
@@ -29,43 +29,6 @@
pinctrl-names = "default";
};
- nand@3000000 {
- chip-delay = <25>;
- status = "okay";
- pinctrl-0 = <&pmx_nand>;
- pinctrl-names = "default";
-
- partition@0 {
- label = "uboot";
- reg = <0x0 0x90000>;
- };
-
- partition@90000 {
- label = "env";
- reg = <0x90000 0x44000>;
- };
-
- partition@d4000 {
- label = "test";
- reg = <0xd4000 0x24000>;
- };
-
- partition@f4000 {
- label = "conf";
- reg = <0xf4000 0x400000>;
- };
-
- partition@4f4000 {
- label = "linux";
- reg = <0x4f4000 0x1d20000>;
- };
-
- partition@2214000 {
- label = "user";
- reg = <0x2214000 0x1dec000>;
- };
- };
-
sata@80000 {
nr-ports = <1>;
status = "okay";
@@ -167,6 +130,43 @@
};
};
+&nand {
+ chip-delay = <25>;
+ status = "okay";
+ pinctrl-0 = <&pmx_nand>;
+ pinctrl-names = "default";
+
+ partition@0 {
+ label = "uboot";
+ reg = <0x0 0x90000>;
+ };
+
+ partition@90000 {
+ label = "env";
+ reg = <0x90000 0x44000>;
+ };
+
+ partition@d4000 {
+ label = "test";
+ reg = <0xd4000 0x24000>;
+ };
+
+ partition@f4000 {
+ label = "conf";
+ reg = <0xf4000 0x400000>;
+ };
+
+ partition@4f4000 {
+ label = "linux";
+ reg = <0x4f4000 0x1d20000>;
+ };
+
+ partition@2214000 {
+ label = "user";
+ reg = <0x2214000 0x1dec000>;
+ };
+};
+
&mdio {
status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
new file mode 100644
index 000000000000..851fb2a60f20
--- /dev/null
+++ b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
@@ -0,0 +1,223 @@
+/*
+ * Device Tree file for OpenBlocks A7 board
+ *
+ * Copyright (C) 2013 Free Electrons
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+
+#include "kirkwood.dtsi"
+#include "kirkwood-6282.dtsi"
+
+/ {
+ model = "Plat'Home OpenBlocksA7";
+ compatible = "plathome,openblocks-a7", "marvell,kirkwood-88f6283", "marvell,kirkwood";
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x40000000>; /* 1 GB */
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200n8 earlyprintk";
+ };
+
+ ocp@f1000000 {
+ serial@12000 {
+ status = "ok";
+ pinctrl-0 = <&pmx_uart0>;
+ pinctrl-names = "default";
+ };
+
+ serial@12100 {
+ status = "ok";
+ pinctrl-0 = <&pmx_uart1>;
+ pinctrl-names = "default";
+ };
+
+ sata@80000 {
+ nr-ports = <1>;
+ status = "okay";
+ };
+
+ i2c@11100 {
+ status = "okay";
+ pinctrl-0 = <&pmx_twsi1>;
+ pinctrl-names = "default";
+
+ s24c02: s24c02@50 {
+ compatible = "24c02";
+ reg = <0x50>;
+ };
+ };
+
+ pinctrl: pinctrl@10000 {
+ pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header>;
+ pinctrl-names = "default";
+
+ pmx_uart0: pmx-uart0 {
+ marvell,pins = "mpp10", "mpp11", "mpp15",
+ "mpp16";
+ marvell,function = "uart0";
+ };
+
+ pmx_uart1: pmx-uart1 {
+ marvell,pins = "mpp13", "mpp14", "mpp8",
+ "mpp9";
+ marvell,function = "uart1";
+ };
+
+ pmx_sysrst: pmx-sysrst {
+ marvell,pins = "mpp6";
+ marvell,function = "sysrst";
+ };
+
+ pmx_dip_switches: pmx-dip-switches {
+ marvell,pins = "mpp44", "mpp45", "mpp46", "mpp47";
+ marvell,function = "gpio";
+ };
+
+ /*
+ * Accessible on connector J202. The MPP
+ * listed below are pin 1-7, pin 8 is unused,
+ * pin 9 is external reset input and pin 10 is
+ * ground.
+ */
+ pmx_gpio_header: pmx-gpio-header {
+ marvell,pins = "mpp17", "mpp7", "mpp29", "mpp28",
+ "mpp35", "mpp34", "mpp40";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_init: pmx-init {
+ marvell,pins = "mpp38";
+ marvell,function = "gpio";
+ };
+
+ pmx_usb_oc: pmx-usb-oc {
+ marvell,pins = "mpp39";
+ marvell,function = "gpio";
+ };
+
+ pmx_leds: pmx-leds {
+ marvell,pins = "mpp41", "mpp42", "mpp43";
+ marvell,function = "gpio";
+ };
+
+ pmx_ge1: pmx-ge1 {
+ marvell,pins = "mpp20", "mpp21", "mpp22", "mpp23",
+ "mpp24", "mpp25", "mpp26", "mpp27",
+ "mpp30", "mpp31", "mpp32", "mpp33";
+ marvell,function = "ge1";
+ };
+ };
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+ pinctrl-0 = <&pmx_leds>;
+ pinctrl-names = "default";
+
+ led-red {
+ label = "obsa7:red:stat";
+ gpios = <&gpio1 9 1>;
+ };
+
+ led-green {
+ label = "obsa7:green:stat";
+ gpios = <&gpio1 10 1>;
+ };
+
+ led-yellow {
+ label = "obsa7:yellow:stat";
+ gpios = <&gpio1 11 1>;
+ };
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ pinctrl-0 = <&pmx_gpio_init>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ button@1 {
+ label = "Init Button";
+ linux,code = <116>;
+ gpios = <&gpio1 6 0>;
+ };
+ };
+};
+
+&nand {
+ chip-delay = <25>;
+ status = "okay";
+ pinctrl-0 = <&pmx_nand>;
+ pinctrl-names = "default";
+
+ partition@0 {
+ label = "uboot";
+ reg = <0x0 0x1c0000>;
+ };
+
+ partition@1c0000 {
+ label = "env";
+ reg = <0x1c0000 0x2c0000>;
+ };
+
+ partition@480000 {
+ label = "test";
+ reg = <0x480000 0x160000>;
+ };
+
+ partition@5e0000 {
+ label = "conf";
+ reg = <0x5e0000 0x540000>;
+ };
+
+ partition@b20000 {
+ label = "linux";
+ reg = <0xb20000 0x3d40000>;
+ };
+
+ partition@4860000 {
+ label = "user";
+ reg = <0x4860000 0xb7a0000>;
+ };
+};
+
+&mdio {
+ status = "okay";
+
+ ethphy0: ethernet-phy@0 {
+ device_type = "ethernet-phy";
+ reg = <0>;
+ };
+
+ ethphy1: ethernet-phy@1 {
+ device_type = "ethernet-phy";
+ reg = <1>;
+ };
+};
+
+&eth0 {
+ status = "okay";
+ ethernet0-port@0 {
+ phy-handle = <&ethphy0>;
+ };
+};
+
+&eth1 {
+ status = "okay";
+ pinctrl-0 = <&pmx_ge1>;
+ pinctrl-names = "default";
+ ethernet1-port@0 {
+ phy-handle = <&ethphy1>;
+ };
+};
diff --git a/arch/arm/boot/dts/kirkwood-sheevaplug-common.dtsi b/arch/arm/boot/dts/kirkwood-sheevaplug-common.dtsi
index 5696b630b70b..1173d7fb31b2 100644
--- a/arch/arm/boot/dts/kirkwood-sheevaplug-common.dtsi
+++ b/arch/arm/boot/dts/kirkwood-sheevaplug-common.dtsi
@@ -48,27 +48,6 @@
pinctrl-names = "default";
status = "okay";
};
-
- nand@3000000 {
- pinctrl-0 = <&pmx_nand>;
- pinctrl-names = "default";
- status = "okay";
-
- partition@0 {
- label = "u-boot";
- reg = <0x0000000 0x100000>;
- };
-
- partition@100000 {
- label = "uImage";
- reg = <0x0100000 0x400000>;
- };
-
- partition@500000 {
- label = "root";
- reg = <0x0500000 0x1fb00000>;
- };
- };
};
regulators {
@@ -92,6 +71,27 @@
};
};
+&nand {
+ pinctrl-0 = <&pmx_nand>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x0000000 0x100000>;
+ };
+
+ partition@100000 {
+ label = "uImage";
+ reg = <0x0100000 0x400000>;
+ };
+
+ partition@500000 {
+ label = "root";
+ reg = <0x0500000 0x1fb00000>;
+ };
+};
+
&mdio {
status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood-topkick.dts b/arch/arm/boot/dts/kirkwood-topkick.dts
index 30842b4ff293..320da677b984 100644
--- a/arch/arm/boot/dts/kirkwood-topkick.dts
+++ b/arch/arm/boot/dts/kirkwood-topkick.dts
@@ -90,37 +90,6 @@
pinctrl-names = "default";
};
- nand@3000000 {
- status = "okay";
- pinctrl-0 = <&pmx_nand>;
- pinctrl-names = "default";
-
- partition@0 {
- label = "u-boot";
- reg = <0x0000000 0x180000>;
- };
-
- partition@180000 {
- label = "u-boot env";
- reg = <0x0180000 0x20000>;
- };
-
- partition@200000 {
- label = "uImage";
- reg = <0x0200000 0x600000>;
- };
-
- partition@800000 {
- label = "uInitrd";
- reg = <0x0800000 0x1000000>;
- };
-
- partition@1800000 {
- label = "rootfs";
- reg = <0x1800000 0xe800000>;
- };
- };
-
sata@80000 {
status = "okay";
nr-ports = <1>;
@@ -204,6 +173,37 @@
};
};
+&nand {
+ status = "okay";
+ pinctrl-0 = <&pmx_nand>;
+ pinctrl-names = "default";
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x0000000 0x180000>;
+ };
+
+ partition@180000 {
+ label = "u-boot env";
+ reg = <0x0180000 0x20000>;
+ };
+
+ partition@200000 {
+ label = "uImage";
+ reg = <0x0200000 0x600000>;
+ };
+
+ partition@800000 {
+ label = "uInitrd";
+ reg = <0x0800000 0x1000000>;
+ };
+
+ partition@1800000 {
+ label = "rootfs";
+ reg = <0x1800000 0xe800000>;
+ };
+};
+
&mdio {
status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood-ts219-6282.dts b/arch/arm/boot/dts/kirkwood-ts219-6282.dts
index 9efcd2dc79d3..345562f75891 100644
--- a/arch/arm/boot/dts/kirkwood-ts219-6282.dts
+++ b/arch/arm/boot/dts/kirkwood-ts219-6282.dts
@@ -6,7 +6,6 @@
/ {
mbus {
- ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000>;
pcie-controller {
status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi
index 1335b2e1bed4..8b73c80f1dad 100644
--- a/arch/arm/boot/dts/kirkwood.dtsi
+++ b/arch/arm/boot/dts/kirkwood.dtsi
@@ -28,16 +28,43 @@
compatible = "marvell,kirkwood-mbus", "simple-bus";
#address-cells = <2>;
#size-cells = <1>;
+ /* If a board file needs to change this ranges it must replace it completely */
+ ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000 /* internal-regs */
+ MBUS_ID(0x01, 0x2f) 0 0xf4000000 0x10000 /* nand flash */
+ MBUS_ID(0x03, 0x01) 0 0xf5000000 0x10000 /* crypto sram */
+ >;
controller = <&mbusc>;
pcie-mem-aperture = <0xe0000000 0x10000000>; /* 256 MiB memory space */
pcie-io-aperture = <0xf2000000 0x100000>; /* 1 MiB I/O space */
+
+ crypto@0301 {
+ compatible = "marvell,orion-crypto";
+ reg = <MBUS_ID(0xf0, 0x01) 0x30000 0x10000>,
+ <MBUS_ID(0x03, 0x01) 0 0x800>;
+ reg-names = "regs", "sram";
+ interrupts = <22>;
+ clocks = <&gate_clk 17>;
+ status = "okay";
+ };
+
+ nand: nand@012f {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ cle = <0>;
+ ale = <1>;
+ bank-width = <1>;
+ compatible = "marvell,orion-nand";
+ reg = <MBUS_ID(0x01, 0x2f) 0 0x400>;
+ chip-delay = <25>;
+ /* set partition map and/or chip-delay in board dts */
+ clocks = <&gate_clk 7>;
+ status = "disabled";
+ };
};
ocp@f1000000 {
compatible = "simple-bus";
- ranges = <0x00000000 0xf1000000 0x0100000
- 0xf4000000 0xf4000000 0x0000400
- 0xf5000000 0xf5000000 0x0000400>;
+ ranges = <0x00000000 0xf1000000 0x0100000>;
#address-cells = <1>;
#size-cells = <1>;
@@ -193,20 +220,6 @@
status = "okay";
};
- nand@3000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cle = <0>;
- ale = <1>;
- bank-width = <1>;
- compatible = "marvell,orion-nand";
- reg = <0xf4000000 0x400>;
- chip-delay = <25>;
- /* set partition map and/or chip-delay in board dts */
- clocks = <&gate_clk 7>;
- status = "disabled";
- };
-
i2c@11000 {
compatible = "marvell,mv64xxx-i2c";
reg = <0x11000 0x20>;
@@ -218,16 +231,6 @@
status = "disabled";
};
- crypto@30000 {
- compatible = "marvell,orion-crypto";
- reg = <0x30000 0x10000>,
- <0xf5000000 0x800>;
- reg-names = "regs", "sram";
- interrupts = <22>;
- clocks = <&gate_clk 17>;
- status = "okay";
- };
-
mdio: mdio-bus@72004 {
compatible = "marvell,orion-mdio";
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/kizbox.dts b/arch/arm/boot/dts/kizbox.dts
index 02df1914a47c..928f6eef2d59 100644
--- a/arch/arm/boot/dts/kizbox.dts
+++ b/arch/arm/boot/dts/kizbox.dts
@@ -53,6 +53,12 @@
status = "okay";
};
+ watchdog@fffffd40 {
+ timeout-sec = <15>;
+ atmel,max-heartbeat-sec = <16>;
+ atmel,min-heartbeat-sec = <0>;
+ status = "okay";
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/mxs-pinfunc.h b/arch/arm/boot/dts/mxs-pinfunc.h
new file mode 100644
index 000000000000..c6da987b20cb
--- /dev/null
+++ b/arch/arm/boot/dts/mxs-pinfunc.h
@@ -0,0 +1,31 @@
+/*
+ * Header providing constants for i.MX28 pinctrl bindings.
+ *
+ * Copyright (C) 2013 Lothar Waßmann <LW@KARO-electronics.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#ifndef __DT_BINDINGS_MXS_PINCTRL_H__
+#define __DT_BINDINGS_MXS_PINCTRL_H__
+
+/* fsl,drive-strength property */
+#define MXS_DRIVE_4mA 0
+#define MXS_DRIVE_8mA 1
+#define MXS_DRIVE_12mA 2
+#define MXS_DRIVE_16mA 3
+
+/* fsl,voltage property */
+#define MXS_VOLTAGE_LOW 0
+#define MXS_VOLTAGE_HIGH 1
+
+/* fsl,pull-up property */
+#define MXS_PULL_DISABLE 0
+#define MXS_PULL_ENABLE 1
+
+#endif /* __DT_BINDINGS_MXS_PINCTRL_H__ */
diff --git a/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi b/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi
new file mode 100644
index 000000000000..9c18adf788f7
--- /dev/null
+++ b/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi
@@ -0,0 +1,52 @@
+/*
+ * Common file for GPMC connected smsc911x on omaps
+ *
+ * Note that the board specifc DTS file needs to specify
+ * ranges, pinctrl, reg, interrupt parent and interrupts.
+ */
+
+/ {
+ vddvario: regulator-vddvario {
+ compatible = "regulator-fixed";
+ regulator-name = "vddvario";
+ regulator-always-on;
+ };
+
+ vdd33a: regulator-vdd33a {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd33a";
+ regulator-always-on;
+ };
+};
+
+&gpmc {
+ ethernet@gpmc {
+ compatible = "smsc,lan9221", "smsc,lan9115";
+ bank-width = <2>;
+ gpmc,mux-add-data;
+ gpmc,cs-on-ns = <0>;
+ gpmc,cs-rd-off-ns = <186>;
+ gpmc,cs-wr-off-ns = <186>;
+ gpmc,adv-on-ns = <12>;
+ gpmc,adv-rd-off-ns = <48>;
+ gpmc,adv-wr-off-ns = <48>;
+ gpmc,oe-on-ns = <54>;
+ gpmc,oe-off-ns = <168>;
+ gpmc,we-on-ns = <54>;
+ gpmc,we-off-ns = <168>;
+ gpmc,rd-cycle-ns = <186>;
+ gpmc,wr-cycle-ns = <186>;
+ gpmc,access-ns = <114>;
+ gpmc,page-burst-access-ns = <6>;
+ gpmc,bus-turnaround-ns = <12>;
+ gpmc,cycle2cycle-delay-ns = <18>;
+ gpmc,wr-data-mux-bus-ns = <90>;
+ gpmc,wr-access-ns = <186>;
+ gpmc,cycle2cycle-samecsen;
+ gpmc,cycle2cycle-diffcsen;
+ vmmc-supply = <&vddvario>;
+ vmmc_aux-supply = <&vdd33a>;
+ reg-io-width = <4>;
+ smsc,save-mac-address;
+ };
+};
diff --git a/arch/arm/boot/dts/omap-zoom-common.dtsi b/arch/arm/boot/dts/omap-zoom-common.dtsi
new file mode 100644
index 000000000000..b0ee342598f0
--- /dev/null
+++ b/arch/arm/boot/dts/omap-zoom-common.dtsi
@@ -0,0 +1,33 @@
+/*
+ * Common features on the Zoom debug board
+ */
+
+#include "omap-gpmc-smsc911x.dtsi"
+
+&gpmc {
+ ranges = <3 0 0x10000000 0x00000400>,
+ <7 0 0x2c000000 0x01000000>;
+
+ /*
+ * Four port TL16CP754C serial port on GPMC,
+ * they probably share the same GPIO IRQ
+ * REVISIT: Add timing support from slls644g.pdf
+ */
+ 8250@3,0 {
+ compatible = "ns16550a";
+ reg = <3 0 0x100>;
+ bank-width = <2>;
+ reg-shift = <1>;
+ reg-io-width = <1>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <6 IRQ_TYPE_EDGE_RISING>; /* gpio102 */
+ clock-frequency = <1843200>;
+ current-speed = <115200>;
+ };
+
+ ethernet@gpmc {
+ reg = <7 0 0xff>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <30 IRQ_TYPE_LEVEL_LOW>; /* gpio158 */
+ };
+};
diff --git a/arch/arm/boot/dts/omap2420-h4.dts b/arch/arm/boot/dts/omap2420-h4.dts
index 224c08f472f4..34cdecb4fdda 100644
--- a/arch/arm/boot/dts/omap2420-h4.dts
+++ b/arch/arm/boot/dts/omap2420-h4.dts
@@ -50,15 +50,15 @@
label = "bootloader";
reg = <0 0x20000>;
};
- partition@0x20000 {
+ partition@20000 {
label = "params";
reg = <0x20000 0x20000>;
};
- partition@0x40000 {
+ partition@40000 {
label = "kernel";
reg = <0x40000 0x200000>;
};
- partition@0x240000 {
+ partition@240000 {
label = "file-system";
reg = <0x240000 0x3dc0000>;
};
diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts
index ba4dcfc6b721..31a632f7effb 100644
--- a/arch/arm/boot/dts/omap3-beagle-xm.dts
+++ b/arch/arm/boot/dts/omap3-beagle-xm.dts
@@ -69,6 +69,23 @@
};
};
+
+ /* HS USB Port 2 Power */
+ hsusb2_power: hsusb2_power_reg {
+ compatible = "regulator-fixed";
+ regulator-name = "hsusb2_vbus";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&twl_gpio 18 0>; /* GPIO LEDA */
+ startup-delay-us = <70000>;
+ };
+
+ /* HS USB Host PHY on PORT 2 */
+ hsusb2_phy: hsusb2_phy {
+ compatible = "usb-nop-xceiv";
+ reset-gpios = <&gpio5 19 GPIO_ACTIVE_LOW>; /* gpio_147 */
+ vcc-supply = <&hsusb2_power>;
+ };
};
&omap3_pmx_wkup {
@@ -79,6 +96,37 @@
};
};
+&omap3_pmx_core {
+ pinctrl-names = "default";
+ pinctrl-0 = <
+ &hsusbb2_pins
+ >;
+
+ uart3_pins: pinmux_uart3_pins {
+ pinctrl-single,pins = <
+ 0x16e (PIN_INPUT | PIN_OFF_WAKEUPENABLE | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */
+ 0x170 (PIN_OUTPUT | MUX_MODE0) /* uart3_tx_irtx.uart3_tx_irtx OUTPUT | MODE0 */
+ >;
+ };
+
+ hsusbb2_pins: pinmux_hsusbb2_pins {
+ pinctrl-single,pins = <
+ 0x5c0 (PIN_OUTPUT | MUX_MODE3) /* etk_d10.hsusb2_clk */
+ 0x5c2 (PIN_OUTPUT | MUX_MODE3) /* etk_d11.hsusb2_stp */
+ 0x5c4 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d12.hsusb2_dir */
+ 0x5c6 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d13.hsusb2_nxt */
+ 0x5c8 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d14.hsusb2_data0 */
+ 0x5cA (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d15.hsusb2_data1 */
+ 0x1a4 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* mcspi1_cs3.hsusb2_data2 */
+ 0x1a6 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* mcspi2_clk.hsusb2_data7 */
+ 0x1a8 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* mcspi2_simo.hsusb2_data4 */
+ 0x1aa (PIN_INPUT_PULLDOWN | MUX_MODE3) /* mcspi2_somi.hsusb2_data5 */
+ 0x1ac (PIN_INPUT_PULLDOWN | MUX_MODE3) /* mcspi2_cs0.hsusb2_data6 */
+ 0x1ae (PIN_INPUT_PULLDOWN | MUX_MODE3) /* mcspi2_cs1.hsusb2_data3 */
+ >;
+ };
+};
+
&i2c1 {
clock-frequency = <2600000>;
@@ -150,15 +198,6 @@
power = <50>;
};
-&omap3_pmx_core {
- uart3_pins: pinmux_uart3_pins {
- pinctrl-single,pins = <
- 0x16e (PIN_INPUT | PIN_OFF_WAKEUPENABLE | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */
- 0x170 (PIN_OUTPUT | MUX_MODE0) /* uart3_tx_irtx.uart3_tx_irtx OUTPUT | MODE0 */
- >;
- };
-};
-
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&uart3_pins>;
@@ -168,3 +207,11 @@
pinctrl-names = "default";
pinctrl-0 = <&gpio1_pins>;
};
+
+&usbhshost {
+ port2-mode = "ehci-phy";
+};
+
+&usbhsehci {
+ phys = <0 &hsusb2_phy>;
+};
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index dfd83103657a..fa532aaacc68 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -44,17 +44,6 @@
};
};
- /* HS USB Port 2 RESET */
- hsusb2_reset: hsusb2_reset_reg {
- compatible = "regulator-fixed";
- regulator-name = "hsusb2_reset";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&gpio5 19 0>; /* gpio_147 */
- startup-delay-us = <70000>;
- enable-active-high;
- };
-
/* HS USB Port 2 Power */
hsusb2_power: hsusb2_power_reg {
compatible = "regulator-fixed";
@@ -68,7 +57,7 @@
/* HS USB Host PHY on PORT 2 */
hsusb2_phy: hsusb2_phy {
compatible = "usb-nop-xceiv";
- reset-supply = <&hsusb2_reset>;
+ reset-gpios = <&gpio5 19 GPIO_ACTIVE_LOW>; /* gpio_147 */
vcc-supply = <&hsusb2_power>;
};
@@ -101,18 +90,18 @@
hsusbb2_pins: pinmux_hsusbb2_pins {
pinctrl-single,pins = <
- 0x5c0 (PIN_OUTPUT | MUX_MODE3) /* usbb2_ulpitll_clk.usbb1_ulpiphy_clk */
- 0x5c2 (PIN_OUTPUT | MUX_MODE3) /* usbb2_ulpitll_clk.usbb1_ulpiphy_stp */
- 0x5c4 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* usbb2_ulpitll_clk.usbb1_ulpiphy_dir */
- 0x5c6 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* usbb2_ulpitll_clk.usbb1_ulpiphy_nxt */
- 0x5c8 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* usbb2_ulpitll_clk.usbb1_ulpiphy_dat0 */
- 0x5cA (PIN_INPUT_PULLDOWN | MUX_MODE3) /* usbb2_ulpitll_clk.usbb1_ulpiphy_dat1 */
- 0x1a4 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* usbb2_ulpitll_clk.usbb1_ulpiphy_dat2 */
- 0x1a6 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* usbb2_ulpitll_clk.usbb1_ulpiphy_dat3 */
- 0x1a8 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* usbb2_ulpitll_clk.usbb1_ulpiphy_dat4 */
- 0x1aa (PIN_INPUT_PULLDOWN | MUX_MODE3) /* usbb2_ulpitll_clk.usbb1_ulpiphy_dat5 */
- 0x1ac (PIN_INPUT_PULLDOWN | MUX_MODE3) /* usbb2_ulpitll_clk.usbb1_ulpiphy_dat6 */
- 0x1ae (PIN_INPUT_PULLDOWN | MUX_MODE3) /* usbb2_ulpitll_clk.usbb1_ulpiphy_dat7 */
+ 0x5c0 (PIN_OUTPUT | MUX_MODE3) /* etk_d10.hsusb2_clk */
+ 0x5c2 (PIN_OUTPUT | MUX_MODE3) /* etk_d11.hsusb2_stp */
+ 0x5c4 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d12.hsusb2_dir */
+ 0x5c6 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d13.hsusb2_nxt */
+ 0x5c8 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d14.hsusb2_data0 */
+ 0x5cA (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d15.hsusb2_data1 */
+ 0x1a4 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* mcspi1_cs3.hsusb2_data2 */
+ 0x1a6 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* mcspi2_clk.hsusb2_data7 */
+ 0x1a8 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* mcspi2_simo.hsusb2_data4 */
+ 0x1aa (PIN_INPUT_PULLDOWN | MUX_MODE3) /* mcspi2_somi.hsusb2_data5 */
+ 0x1ac (PIN_INPUT_PULLDOWN | MUX_MODE3) /* mcspi2_cs0.hsusb2_data6 */
+ 0x1ae (PIN_INPUT_PULLDOWN | MUX_MODE3) /* mcspi2_cs1.hsusb2_data3 */
>;
};
@@ -180,3 +169,12 @@
pinctrl-names = "default";
pinctrl-0 = <&gpio1_pins>;
};
+
+&usb_otg_hs {
+ interface-type = <0>;
+ usb-phy = <&usb2_phy>;
+ phys = <&usb2_phy>;
+ phy-names = "usb2-phy";
+ mode = <3>;
+ power = <50>;
+};
diff --git a/arch/arm/boot/dts/omap3-devkit8000.dts b/arch/arm/boot/dts/omap3-devkit8000.dts
index 7ef282795dd4..4665421bb7bc 100644
--- a/arch/arm/boot/dts/omap3-devkit8000.dts
+++ b/arch/arm/boot/dts/omap3-devkit8000.dts
@@ -125,7 +125,7 @@
nand-bus-width = <16>;
gpmc,device-nand;
- gpmc,sync-clki-ps = <0>;
+ gpmc,sync-clk-ps = <0>;
gpmc,cs-on-ns = <0>;
gpmc,cs-rd-off-ns = <44>;
gpmc,cs-wr-off-ns = <44>;
diff --git a/arch/arm/boot/dts/omap3-evm-37xx.dts b/arch/arm/boot/dts/omap3-evm-37xx.dts
new file mode 100644
index 000000000000..4df68ad3736a
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-evm-37xx.dts
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+#include "omap36xx.dtsi"
+#include "omap3-evm-common.dtsi"
+
+
+/ {
+ model = "TI OMAP37XX EVM (TMDSEVM3730)";
+ compatible = "ti,omap3-evm-37xx", "ti,omap36xx";
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x10000000>; /* 256 MB */
+ };
+
+ wl12xx_vmmc: wl12xx_vmmc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wl12xx_gpio>;
+ };
+};
+
+&omap3_pmx_core {
+ mmc1_pins: pinmux_mmc1_pins {
+ pinctrl-single,pins = <
+ 0x114 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* sdmmc1_clk.sdmmc1_clk */
+ 0x116 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_cmd.sdmmc1_cmd */
+ 0x118 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat0.sdmmc1_dat0 */
+ 0x11a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */
+ 0x11c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */
+ 0x11e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */
+ 0x120 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat4.sdmmc1_dat4 */
+ 0x122 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat5.sdmmc1_dat5 */
+ 0x124 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat6.sdmmc1_dat6 */
+ 0x126 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat7.sdmmc1_dat7 */
+ >;
+ };
+
+ /* NOTE: Clocked externally, needs INPUT also for sdmmc2_clk.sdmmc2_clk */
+ mmc2_pins: pinmux_mmc2_pins {
+ pinctrl-single,pins = <
+ 0x128 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk.sdmmc2_clk */
+ 0x12a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd.sdmmc2_cmd */
+ 0x12c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0.sdmmc2_dat0 */
+ 0x12e (WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */
+ 0x130 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2.sdmmc2_dat2 */
+ 0x132 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3.sdmmc2_dat3 */
+ >;
+ };
+
+ uart3_pins: pinmux_uart3_pins {
+ pinctrl-single,pins = <
+ 0x16e (WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */
+ 0x170 (PIN_OUTPUT | MUX_MODE0) /* uart3_tx_irtx.uart3_tx_irtx */
+ >;
+ };
+
+ wl12xx_gpio: pinmux_wl12xx_gpio {
+ pinctrl-single,pins = <
+ 0x150 (PIN_OUTPUT | MUX_MODE4) /* uart1_cts.gpio_150 */
+ 0x14e (PIN_INPUT | MUX_MODE4) /* uart1_rts.gpio_149 */
+ >;
+ };
+
+ smsc911x_pins: pinmux_smsc911x_pins {
+ pinctrl-single,pins = <
+ 0x1a2 (PIN_INPUT | MUX_MODE4) /* mcspi1_cs2.gpio_176 */
+ >;
+ };
+};
+
+&mmc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins>;
+};
+
+&mmc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_pins>;
+};
+
+&mmc3 {
+ status = "disabled";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3_pins>;
+};
+
+&gpmc {
+ ranges = <0 0 0x00000000 0x20000000>,
+ <5 0 0x2c000000 0x01000000>;
+
+ nand@0,0 {
+ linux,mtd-name= "hynix,h8kds0un0mer-4em";
+ reg = <0 0 0>;
+ nand-bus-width = <16>;
+ ti,nand-ecc-opt = "bch8";
+
+ gpmc,sync-clk-ps = <0>;
+ gpmc,cs-on-ns = <0>;
+ gpmc,cs-rd-off-ns = <44>;
+ gpmc,cs-wr-off-ns = <44>;
+ gpmc,adv-on-ns = <6>;
+ gpmc,adv-rd-off-ns = <34>;
+ gpmc,adv-wr-off-ns = <44>;
+ gpmc,we-off-ns = <40>;
+ gpmc,oe-off-ns = <54>;
+ gpmc,access-ns = <64>;
+ gpmc,rd-cycle-ns = <82>;
+ gpmc,wr-cycle-ns = <82>;
+ gpmc,wr-access-ns = <40>;
+ gpmc,wr-data-mux-bus-ns = <0>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "X-Loader";
+ reg = <0 0x80000>;
+ };
+ partition@0x80000 {
+ label = "U-Boot";
+ reg = <0x80000 0x1c0000>;
+ };
+ partition@0x1c0000 {
+ label = "Environment";
+ reg = <0x240000 0x40000>;
+ };
+ partition@0x280000 {
+ label = "Kernel";
+ reg = <0x280000 0x500000>;
+ };
+ partition@0x780000 {
+ label = "Filesystem";
+ reg = <0x780000 0x1f880000>;
+ };
+ };
+
+ ethernet@gpmc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&smsc911x_pins>;
+ };
+};
diff --git a/arch/arm/boot/dts/omap3-evm-common.dtsi b/arch/arm/boot/dts/omap3-evm-common.dtsi
new file mode 100644
index 000000000000..b5493296d103
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-evm-common.dtsi
@@ -0,0 +1,94 @@
+/*
+ * Common support for omap3 EVM boards
+ */
+
+#include "omap-gpmc-smsc911x.dtsi"
+
+/ {
+ cpus {
+ cpu@0 {
+ cpu0-supply = <&vcc>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ ledb {
+ label = "omap3evm::ledb";
+ gpios = <&twl_gpio 19 GPIO_ACTIVE_HIGH>; /* LEDB */
+ linux,default-trigger = "default-on";
+ };
+ };
+
+ wl12xx_vmmc: wl12xx_vmmc {
+ compatible = "regulator-fixed";
+ regulator-name = "vwl1271";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ gpio = <&gpio5 22 0>; /* gpio150 */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ vin-supply = <&vmmc2>;
+ };
+};
+
+&i2c1 {
+ clock-frequency = <2600000>;
+
+ twl: twl@48 {
+ reg = <0x48>;
+ interrupts = <7>; /* SYS_NIRQ cascaded to intc */
+ interrupt-parent = <&intc>;
+ };
+};
+
+#include "twl4030.dtsi"
+#include "twl4030_omap3.dtsi"
+
+&i2c2 {
+ clock-frequency = <400000>;
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+
+ /*
+ * TVP5146 Video decoder-in for analog input support.
+ */
+ tvp5146@5c {
+ compatible = "ti,tvp5146m2";
+ reg = <0x5c>;
+ };
+};
+
+&mmc1 {
+ vmmc-supply = <&vmmc1>;
+ vmmc_aux-supply = <&vsim>;
+ bus-width = <8>;
+};
+
+&mmc2 {
+ vmmc-supply = <&wl12xx_vmmc>;
+ non-removable;
+ bus-width = <4>;
+ cap-power-off-card;
+};
+
+&twl_gpio {
+ ti,use-leds;
+};
+
+&usb_otg_hs {
+ interface-type = <0>;
+ usb-phy = <&usb2_phy>;
+ mode = <3>;
+ power = <50>;
+};
+
+&gpmc {
+ ethernet@gpmc {
+ interrupt-parent = <&gpio6>;
+ interrupts = <16 8>;
+ reg = <5 0 0xff>;
+ };
+};
diff --git a/arch/arm/boot/dts/omap3-evm.dts b/arch/arm/boot/dts/omap3-evm.dts
index 4134dd05c3a4..e10dcd0fa539 100644
--- a/arch/arm/boot/dts/omap3-evm.dts
+++ b/arch/arm/boot/dts/omap3-evm.dts
@@ -8,70 +8,14 @@
/dts-v1/;
#include "omap34xx.dtsi"
+#include "omap3-evm-common.dtsi"
/ {
- model = "TI OMAP3 EVM (OMAP3530, AM/DM37x)";
+ model = "TI OMAP35XX EVM (TMDSEVM3530)";
compatible = "ti,omap3-evm", "ti,omap3";
- cpus {
- cpu@0 {
- cpu0-supply = <&vcc>;
- };
- };
-
memory {
device_type = "memory";
reg = <0x80000000 0x10000000>; /* 256 MB */
};
-
- leds {
- compatible = "gpio-leds";
- ledb {
- label = "omap3evm::ledb";
- gpios = <&twl_gpio 19 GPIO_ACTIVE_HIGH>; /* LEDB */
- linux,default-trigger = "default-on";
- };
- };
-};
-
-&i2c1 {
- clock-frequency = <2600000>;
-
- twl: twl@48 {
- reg = <0x48>;
- interrupts = <7>; /* SYS_NIRQ cascaded to intc */
- interrupt-parent = <&intc>;
- };
-};
-
-#include "twl4030.dtsi"
-#include "twl4030_omap3.dtsi"
-
-&i2c2 {
- clock-frequency = <400000>;
-};
-
-&i2c3 {
- clock-frequency = <400000>;
-
- /*
- * TVP5146 Video decoder-in for analog input support.
- */
- tvp5146@5c {
- compatible = "ti,tvp5146m2";
- reg = <0x5c>;
- };
-};
-
-&twl_gpio {
- ti,use-leds;
-};
-
-&usb_otg_hs {
- interface-type = <0>;
- usb-phy = <&usb2_phy>;
- phys = <&usb2_phy>;
- phy-names = "usb2-phy";
- mode = <3>;
- power = <50>;
};
diff --git a/arch/arm/boot/dts/omap3-gta04.dts b/arch/arm/boot/dts/omap3-gta04.dts
new file mode 100644
index 000000000000..b9b55c95a566
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-gta04.dts
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2013 Marek Belisko <marek@goldelico.com>
+ *
+ * Based on omap3-beagle-xm.dts
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+#include "omap36xx.dtsi"
+
+/ {
+ model = "OMAP3 GTA04";
+ compatible = "ti,omap3-gta04", "ti,omap3";
+
+ cpus {
+ cpu@0 {
+ cpu0-supply = <&vcc>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x20000000>; /* 512 MB */
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ aux-button {
+ label = "aux";
+ linux,code = <169>;
+ gpios = <&gpio1 7 GPIO_ACTIVE_LOW>;
+ gpio-key,wakeup;
+ };
+ };
+};
+
+&omap3_pmx_core {
+ uart1_pins: pinmux_uart1_pins {
+ pinctrl-single,pins = <
+ 0x152 (PIN_INPUT | MUX_MODE0) /* uart1_rx.uart1_rx */
+ 0x14c (PIN_OUTPUT |MUX_MODE0) /* uart1_tx.uart1_tx */
+ >;
+ };
+
+ uart2_pins: pinmux_uart2_pins {
+ pinctrl-single,pins = <
+ 0x14a (PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */
+ 0x148 (PIN_OUTPUT | MUX_MODE0) /* uart2_tx.uart2_tx */
+ >;
+ };
+
+ uart3_pins: pinmux_uart3_pins {
+ pinctrl-single,pins = <
+ 0x16e (PIN_INPUT | MUX_MODE0) /* uart3_rx.uart3_rx */
+ 0x170 (PIN_OUTPUT | MUX_MODE0) /* uart3_tx.uart3_tx */
+ >;
+ };
+
+ mmc1_pins: pinmux_mmc1_pins {
+ pinctrl-single,pins = <
+ 0x114 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_clk.sdmmc1_clk */
+ 0x116 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_cmd.sdmmc1_cmd */
+ 0x118 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat0.sdmmc1_dat0 */
+ 0x11a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */
+ 0x11c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */
+ 0x11e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */
+ >;
+ };
+};
+
+&i2c1 {
+ clock-frequency = <2600000>;
+
+ twl: twl@48 {
+ reg = <0x48>;
+ interrupts = <7>; /* SYS_NIRQ cascaded to intc */
+ interrupt-parent = <&intc>;
+ };
+};
+
+#include "twl4030.dtsi"
+#include "twl4030_omap3.dtsi"
+
+&i2c2 {
+ clock-frequency = <400000>;
+
+ /* pressure sensor */
+ bmp085@77 {
+ compatible = "bosch,bmp085";
+ reg = <0x77>;
+ };
+
+ /* leds */
+ tca6507@45 {
+ compatible = "ti,tca6507";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x45>;
+
+ gta04_led0: red_aux@0 {
+ label = "gta04:red:aux";
+ reg = <0x0>;
+ };
+
+ gta04_led1: green_aux@1 {
+ label = "gta04:green:aux";
+ reg = <0x1>;
+ };
+
+ gta04_led3: red_power@3 {
+ label = "gta04:red:power";
+ reg = <0x3>;
+ linux,default-trigger = "default-on";
+ };
+
+ gta04_led4: green_power@4 {
+ label = "gta04:green:power";
+ reg = <0x4>;
+ };
+ };
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+};
+
+&usb_otg_hs {
+ interface-type = <0>;
+ usb-phy = <&usb2_phy>;
+ phys = <&usb2_phy>;
+ phy-names = "usb2-phy";
+ mode = <3>;
+ power = <50>;
+};
+
+&mmc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins>;
+ vmmc-supply = <&vmmc1>;
+ vmmc_aux-supply = <&vsim>;
+ bus-width = <4>;
+};
+
+&mmc2 {
+ status = "disabled";
+};
+
+&mmc3 {
+ status = "disabled";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins>;
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_pins>;
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3_pins>;
+};
+
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi
index 2326d11462a5..ba1e58b7b7e3 100644
--- a/arch/arm/boot/dts/omap3-igep.dtsi
+++ b/arch/arm/boot/dts/omap3-igep.dtsi
@@ -77,6 +77,8 @@
0x1a2 (PIN_INPUT | MUX_MODE4) /* mcspi1_cs2.gpio_176 */
>;
};
+
+ leds_pins: pinmux_leds_pins { };
};
&i2c1 {
@@ -141,3 +143,12 @@
&twl_gpio {
ti,use-leds;
};
+
+&usb_otg_hs {
+ interface-type = <0>;
+ usb-phy = <&usb2_phy>;
+ phys = <&usb2_phy>;
+ phy-names = "usb2-phy";
+ mode = <3>;
+ power = <50>;
+};
diff --git a/arch/arm/boot/dts/omap3-igep0020.dts b/arch/arm/boot/dts/omap3-igep0020.dts
index e8c48284587c..d5cc79267250 100644
--- a/arch/arm/boot/dts/omap3-igep0020.dts
+++ b/arch/arm/boot/dts/omap3-igep0020.dts
@@ -10,13 +10,17 @@
*/
#include "omap3-igep.dtsi"
+#include "omap-gpmc-smsc911x.dtsi"
/ {
model = "IGEPv2";
compatible = "isee,omap3-igep0020", "ti,omap3";
leds {
+ pinctrl-names = "default";
+ pinctrl-0 = <&leds_pins>;
compatible = "gpio-leds";
+
boot {
label = "omap3:green:boot";
gpios = <&gpio1 26 GPIO_ACTIVE_HIGH>;
@@ -41,19 +45,56 @@
};
};
- vddvario: regulator-vddvario {
- compatible = "regulator-fixed";
- regulator-name = "vddvario";
- regulator-always-on;
+ /* HS USB Port 1 Power */
+ hsusb1_power: hsusb1_power_reg {
+ compatible = "regulator-fixed";
+ regulator-name = "hsusb1_vbus";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&twl_gpio 18 GPIO_ACTIVE_LOW>; /* GPIO LEDA */
+ startup-delay-us = <70000>;
+ };
+
+ /* HS USB Host PHY on PORT 1 */
+ hsusb1_phy: hsusb1_phy {
+ compatible = "usb-nop-xceiv";
+ reset-gpios = <&gpio1 24 GPIO_ACTIVE_LOW>; /* gpio_24 */
+ vcc-supply = <&hsusb1_power>;
};
+};
- vdd33a: regulator-vdd33a {
- compatible = "regulator-fixed";
- regulator-name = "vdd33a";
- regulator-always-on;
+&omap3_pmx_core {
+ pinctrl-names = "default";
+ pinctrl-0 = <
+ &hsusbb1_pins
+ >;
+
+ hsusbb1_pins: pinmux_hsusbb1_pins {
+ pinctrl-single,pins = <
+ 0x5aa (PIN_OUTPUT | MUX_MODE3) /* etk_ctl.hsusb1_clk */
+ 0x5a8 (PIN_OUTPUT | MUX_MODE3) /* etk_clk.hsusb1_stp */
+ 0x5bc (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d8.hsusb1_dir */
+ 0x5be (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d9.hsusb1_nxt */
+ 0x5ac (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d0.hsusb1_data0 */
+ 0x5ae (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d1.hsusb1_data1 */
+ 0x5b0 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d2.hsusb1_data2 */
+ 0x5b2 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d3.hsusb1_data7 */
+ 0x5b4 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d4.hsusb1_data4 */
+ 0x5b6 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d5.hsusb1_data5 */
+ 0x5b8 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d6.hsusb1_data6 */
+ 0x5ba (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d7.hsusb1_data3 */
+ >;
};
};
+&leds_pins {
+ pinctrl-single,pins = <
+ 0x5c4 (PIN_OUTPUT | MUX_MODE4) /* etk_d12.gpio_26 */
+ 0x5c6 (PIN_OUTPUT | MUX_MODE4) /* etk_d13.gpio_27 */
+ 0x5c8 (PIN_OUTPUT | MUX_MODE4) /* etk_d14.gpio_28 */
+ >;
+};
+
&i2c3 {
clock-frequency = <100000>;
@@ -99,59 +140,37 @@
label = "SPL";
reg = <0 0x100000>;
};
- partition@0x80000 {
+ partition@80000 {
label = "U-Boot";
reg = <0x100000 0x180000>;
};
- partition@0x1c0000 {
+ partition@1c0000 {
label = "Environment";
reg = <0x280000 0x100000>;
};
- partition@0x280000 {
+ partition@280000 {
label = "Kernel";
reg = <0x380000 0x300000>;
};
- partition@0x780000 {
+ partition@780000 {
label = "Filesystem";
reg = <0x680000 0x1f980000>;
};
};
- ethernet@5,0 {
+ ethernet@gpmc {
pinctrl-names = "default";
pinctrl-0 = <&smsc911x_pins>;
- compatible = "smsc,lan9221", "smsc,lan9115";
reg = <5 0 0xff>;
- bank-width = <2>;
-
- gpmc,mux-add-data;
- gpmc,cs-on-ns = <0>;
- gpmc,cs-rd-off-ns = <186>;
- gpmc,cs-wr-off-ns = <186>;
- gpmc,adv-on-ns = <12>;
- gpmc,adv-rd-off-ns = <48>;
- gpmc,adv-wr-off-ns = <48>;
- gpmc,oe-on-ns = <54>;
- gpmc,oe-off-ns = <168>;
- gpmc,we-on-ns = <54>;
- gpmc,we-off-ns = <168>;
- gpmc,rd-cycle-ns = <186>;
- gpmc,wr-cycle-ns = <186>;
- gpmc,access-ns = <114>;
- gpmc,page-burst-access-ns = <6>;
- gpmc,bus-turnaround-ns = <12>;
- gpmc,cycle2cycle-delay-ns = <18>;
- gpmc,wr-data-mux-bus-ns = <90>;
- gpmc,wr-access-ns = <186>;
- gpmc,cycle2cycle-samecsen;
- gpmc,cycle2cycle-diffcsen;
-
interrupt-parent = <&gpio6>;
- interrupts = <16 8>;
- vmmc-supply = <&vddvario>;
- vmmc_aux-supply = <&vdd33a>;
- reg-io-width = <4>;
-
- smsc,save-mac-address;
+ interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
};
};
+
+&usbhshost {
+ port1-mode = "ehci-phy";
+};
+
+&usbhsehci {
+ phys = <&hsusb1_phy>;
+};
diff --git a/arch/arm/boot/dts/omap3-igep0030.dts b/arch/arm/boot/dts/omap3-igep0030.dts
index 644d05383836..525e6d9b0978 100644
--- a/arch/arm/boot/dts/omap3-igep0030.dts
+++ b/arch/arm/boot/dts/omap3-igep0030.dts
@@ -16,7 +16,10 @@
compatible = "isee,omap3-igep0030", "ti,omap3";
leds {
+ pinctrl-names = "default";
+ pinctrl-0 = <&leds_pins>;
compatible = "gpio-leds";
+
boot {
label = "omap3:green:boot";
gpios = <&twl_gpio 13 GPIO_ACTIVE_LOW>;
@@ -43,6 +46,12 @@
};
};
+&leds_pins {
+ pinctrl-single,pins = <
+ 0x5b0 (PIN_OUTPUT | MUX_MODE4) /* etk_d2.gpio_16 */
+ >;
+};
+
&gpmc {
ranges = <0 0 0x00000000 0x20000000>;
@@ -74,19 +83,19 @@
label = "SPL";
reg = <0 0x100000>;
};
- partition@0x80000 {
+ partition@80000 {
label = "U-Boot";
reg = <0x100000 0x180000>;
};
- partition@0x1c0000 {
+ partition@1c0000 {
label = "Environment";
reg = <0x280000 0x100000>;
};
- partition@0x280000 {
+ partition@280000 {
label = "Kernel";
reg = <0x380000 0x300000>;
};
- partition@0x780000 {
+ partition@780000 {
label = "Filesystem";
reg = <0x680000 0x1f980000>;
};
diff --git a/arch/arm/boot/dts/omap3-n9.dts b/arch/arm/boot/dts/omap3-n9.dts
new file mode 100644
index 000000000000..39828ce464ee
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-n9.dts
@@ -0,0 +1,18 @@
+/*
+ * omap3-n9.dts - Device Tree file for Nokia N9
+ *
+ * Written by: Aaro Koskinen <aaro.koskinen@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/dts-v1/;
+
+#include "omap3-n950-n9.dtsi"
+
+/ {
+ model = "Nokia N9";
+ compatible = "nokia,omap3-n9", "ti,omap3";
+};
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
new file mode 100644
index 000000000000..c4f20bfe4cce
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -0,0 +1,484 @@
+/*
+ * Copyright (C) 2013 Pavel Machek <pavel@ucw.cz>
+ * Copyright 2013 Aaro Koskinen <aaro.koskinen@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 (or later) as
+ * published by the Free Software Foundation.
+ */
+
+/dts-v1/;
+
+#include "omap34xx.dtsi"
+
+/ {
+ model = "Nokia N900";
+ compatible = "nokia,omap3-n900", "ti,omap3";
+
+ cpus {
+ cpu@0 {
+ cpu0-supply = <&vcc>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x10000000>; /* 256 MB */
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+
+ camera_lens_cover {
+ label = "Camera Lens Cover";
+ gpios = <&gpio4 14 GPIO_ACTIVE_LOW>; /* 110 */
+ linux,input-type = <5>; /* EV_SW */
+ linux,code = <0x09>; /* SW_CAMERA_LENS_COVER */
+ gpio-key,wakeup;
+ };
+
+ camera_focus {
+ label = "Camera Focus";
+ gpios = <&gpio3 4 GPIO_ACTIVE_LOW>; /* 68 */
+ linux,code = <0x210>; /* KEY_CAMERA_FOCUS */
+ gpio-key,wakeup;
+ };
+
+ camera_capture {
+ label = "Camera Capture";
+ gpios = <&gpio3 5 GPIO_ACTIVE_LOW>; /* 69 */
+ linux,code = <0xd4>; /* KEY_CAMERA */
+ gpio-key,wakeup;
+ };
+
+ lock_button {
+ label = "Lock Button";
+ gpios = <&gpio4 17 GPIO_ACTIVE_LOW>; /* 113 */
+ linux,code = <0x98>; /* KEY_SCREENLOCK */
+ gpio-key,wakeup;
+ };
+
+ keypad_slide {
+ label = "Keypad Slide";
+ gpios = <&gpio3 7 GPIO_ACTIVE_LOW>; /* 71 */
+ linux,input-type = <5>; /* EV_SW */
+ linux,code = <0x0a>; /* SW_KEYPAD_SLIDE */
+ gpio-key,wakeup;
+ };
+
+ proximity_sensor {
+ label = "Proximity Sensor";
+ gpios = <&gpio3 25 GPIO_ACTIVE_HIGH>; /* 89 */
+ linux,input-type = <5>; /* EV_SW */
+ linux,code = <0x0b>; /* SW_FRONT_PROXIMITY */
+ };
+ };
+
+};
+
+&omap3_pmx_core {
+ pinctrl-names = "default";
+
+ uart2_pins: pinmux_uart2_pins {
+ pinctrl-single,pins = <
+ 0x14a (PIN_INPUT | MUX_MODE0) /* uart2_rx */
+ 0x148 (PIN_OUTPUT | MUX_MODE0) /* uart2_tx */
+ >;
+ };
+
+ uart3_pins: pinmux_uart3_pins {
+ pinctrl-single,pins = <
+ 0x16e (PIN_INPUT | MUX_MODE0) /* uart3_rx */
+ 0x170 (PIN_OUTPUT | MUX_MODE0) /* uart3_tx */
+ >;
+ };
+
+ i2c1_pins: pinmux_i2c1_pins {
+ pinctrl-single,pins = <
+ 0x18a (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c1_scl */
+ 0x18c (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c1_sda */
+ >;
+ };
+
+ i2c2_pins: pinmux_i2c2_pins {
+ pinctrl-single,pins = <
+ 0x18e (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c2_scl */
+ 0x190 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c2_sda */
+ >;
+ };
+
+ i2c3_pins: pinmux_i2c3_pins {
+ pinctrl-single,pins = <
+ 0x192 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c3_scl */
+ 0x194 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c3_sda */
+ >;
+ };
+
+ mmc1_pins: pinmux_mmc1_pins {
+ pinctrl-single,pins = <
+ 0x114 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_clk */
+ 0x116 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_cmd */
+ 0x118 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat0 */
+ 0x11a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat1 */
+ 0x11c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat2 */
+ 0x11e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat3 */
+ >;
+ };
+
+ display_pins: pinmux_display_pins {
+ pinctrl-single,pins = <
+ 0x0d4 (PIN_OUTPUT | MUX_MODE4) /* RX51_LCD_RESET_GPIO */
+ >;
+ };
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
+
+ clock-frequency = <2200000>;
+
+ twl: twl@48 {
+ reg = <0x48>;
+ interrupts = <7>; /* SYS_NIRQ cascaded to intc */
+ interrupt-parent = <&intc>;
+ };
+};
+
+#include "twl4030.dtsi"
+#include "twl4030_omap3.dtsi"
+
+&vaux1 {
+ regulator-name = "V28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-always-on; /* due battery cover sensor */
+};
+
+&vaux2 {
+ regulator-name = "VCSI";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+};
+
+&vaux3 {
+ regulator-name = "VMMC2_30";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3000000>;
+};
+
+&vaux4 {
+ regulator-name = "VCAM_ANA_28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+};
+
+&vmmc1 {
+ regulator-name = "VMMC1";
+ regulator-min-microvolt = <1850000>;
+ regulator-max-microvolt = <3150000>;
+};
+
+&vmmc2 {
+ regulator-name = "V28_A";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-always-on; /* due VIO leak to AIC34 VDDs */
+};
+
+&vpll1 {
+ regulator-name = "VPLL";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+};
+
+&vpll2 {
+ regulator-name = "VSDI_CSI";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+};
+
+&vsim {
+ regulator-name = "VMMC2_IO_18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+};
+
+&vio {
+ regulator-name = "VIO";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+};
+
+&vintana1 {
+ regulator-name = "VINTANA1";
+ /* fixed to 1500000 */
+ regulator-always-on;
+};
+
+&vintana2 {
+ regulator-name = "VINTANA2";
+ regulator-min-microvolt = <2750000>;
+ regulator-max-microvolt = <2750000>;
+ regulator-always-on;
+};
+
+&vintdig {
+ regulator-name = "VINTDIG";
+ /* fixed to 1500000 */
+ regulator-always-on;
+};
+
+&twl {
+ twl_audio: audio {
+ compatible = "ti,twl4030-audio";
+ ti,enable-vibra = <1>;
+ };
+};
+
+&twl_gpio {
+ ti,pullups = <0x0>;
+ ti,pulldowns = <0x03ff3f>; /* BIT(0..5) | BIT(8..17) */
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins>;
+
+ clock-frequency = <100000>;
+
+ tlv320aic3x: tlv320aic3x@18 {
+ compatible = "ti,tlv320aic3x";
+ reg = <0x18>;
+ gpio-reset = <&gpio2 28 GPIO_ACTIVE_HIGH>; /* 60 */
+ ai3x-gpio-func = <
+ 0 /* AIC3X_GPIO1_FUNC_DISABLED */
+ 5 /* AIC3X_GPIO2_FUNC_DIGITAL_MIC_INPUT */
+ >;
+
+ AVDD-supply = <&vmmc2>;
+ DRVDD-supply = <&vmmc2>;
+ IOVDD-supply = <&vio>;
+ DVDD-supply = <&vio>;
+ };
+
+ tlv320aic3x_aux: tlv320aic3x@19 {
+ compatible = "ti,tlv320aic3x";
+ reg = <0x19>;
+ gpio-reset = <&gpio2 28 GPIO_ACTIVE_HIGH>; /* 60 */
+
+ AVDD-supply = <&vmmc2>;
+ DRVDD-supply = <&vmmc2>;
+ IOVDD-supply = <&vio>;
+ DVDD-supply = <&vio>;
+ };
+
+ lp5523: lp5523@32 {
+ compatible = "national,lp5523";
+ reg = <0x32>;
+ clock-mode = /bits/ 8 <0>; /* LP55XX_CLOCK_AUTO */
+ enable-gpio = <&gpio2 9 GPIO_ACTIVE_HIGH>; /* 41 */
+
+ chan0 {
+ chan-name = "lp5523:kb1";
+ led-cur = /bits/ 8 <50>;
+ max-cur = /bits/ 8 <100>;
+ };
+
+ chan1 {
+ chan-name = "lp5523:kb2";
+ led-cur = /bits/ 8 <50>;
+ max-cur = /bits/ 8 <100>;
+ };
+
+ chan2 {
+ chan-name = "lp5523:kb3";
+ led-cur = /bits/ 8 <50>;
+ max-cur = /bits/ 8 <100>;
+ };
+
+ chan3 {
+ chan-name = "lp5523:kb4";
+ led-cur = /bits/ 8 <50>;
+ max-cur = /bits/ 8 <100>;
+ };
+
+ chan4 {
+ chan-name = "lp5523:b";
+ led-cur = /bits/ 8 <50>;
+ max-cur = /bits/ 8 <100>;
+ };
+
+ chan5 {
+ chan-name = "lp5523:g";
+ led-cur = /bits/ 8 <50>;
+ max-cur = /bits/ 8 <100>;
+ };
+
+ chan6 {
+ chan-name = "lp5523:r";
+ led-cur = /bits/ 8 <50>;
+ max-cur = /bits/ 8 <100>;
+ };
+
+ chan7 {
+ chan-name = "lp5523:kb5";
+ led-cur = /bits/ 8 <50>;
+ max-cur = /bits/ 8 <100>;
+ };
+
+ chan8 {
+ chan-name = "lp5523:kb6";
+ led-cur = /bits/ 8 <50>;
+ max-cur = /bits/ 8 <100>;
+ };
+ };
+
+ bq27200: bq27200@55 {
+ compatible = "ti,bq27200";
+ reg = <0x55>;
+ };
+};
+
+&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_pins>;
+
+ clock-frequency = <400000>;
+};
+
+&mmc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins>;
+ vmmc-supply = <&vmmc1>;
+ bus-width = <4>;
+ cd-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* 160 */
+};
+
+&mmc2 {
+ status = "disabled";
+};
+
+&mmc3 {
+ status = "disabled";
+};
+
+&gpmc {
+ ranges = <0 0 0x04000000 0x10000000>; /* 256MB */
+
+ /* gpio-irq for dma: 65 */
+
+ onenand@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0 0 0x10000000>;
+
+ gpmc,sync-read;
+ gpmc,sync-write;
+ gpmc,burst-length = <16>;
+ gpmc,burst-read;
+ gpmc,burst-wrap;
+ gpmc,burst-write;
+ gpmc,device-width = <2>; /* GPMC_DEVWIDTH_16BIT */
+ gpmc,mux-add-data = <2>; /* GPMC_MUX_AD */
+ gpmc,cs-on-ns = <0>;
+ gpmc,cs-rd-off-ns = <87>;
+ gpmc,cs-wr-off-ns = <87>;
+ gpmc,adv-on-ns = <0>;
+ gpmc,adv-rd-off-ns = <10>;
+ gpmc,adv-wr-off-ns = <10>;
+ gpmc,oe-on-ns = <15>;
+ gpmc,oe-off-ns = <87>;
+ gpmc,we-on-ns = <0>;
+ gpmc,we-off-ns = <87>;
+ gpmc,rd-cycle-ns = <112>;
+ gpmc,wr-cycle-ns = <112>;
+ gpmc,access-ns = <81>;
+ gpmc,page-burst-access-ns = <15>;
+ gpmc,bus-turnaround-ns = <0>;
+ gpmc,cycle2cycle-delay-ns = <0>;
+ gpmc,wait-monitoring-ns = <0>;
+ gpmc,clk-activation-ns = <5>;
+ gpmc,wr-data-mux-bus-ns = <30>;
+ gpmc,wr-access-ns = <81>;
+ gpmc,sync-clk-ps = <15000>;
+
+ /*
+ * MTD partition table corresponding to Nokia's
+ * Maemo 5 (Fremantle) release.
+ */
+ partition@0 {
+ label = "bootloader";
+ reg = <0x00000000 0x00020000>;
+ read-only;
+ };
+ partition@1 {
+ label = "config";
+ reg = <0x00020000 0x00060000>;
+ };
+ partition@2 {
+ label = "log";
+ reg = <0x00080000 0x00040000>;
+ };
+ partition@3 {
+ label = "kernel";
+ reg = <0x000c0000 0x00200000>;
+ };
+ partition@4 {
+ label = "initfs";
+ reg = <0x002c0000 0x00200000>;
+ };
+ partition@5 {
+ label = "rootfs";
+ reg = <0x004c0000 0x0fb40000>;
+ };
+ };
+};
+
+&mcspi1 {
+ /*
+ * For some reason, touchscreen is necessary for screen to work at
+ * all on real hw. It works well without it on emulator.
+ *
+ * Also... order in the device tree actually matters here.
+ */
+ tsc2005@0 {
+ compatible = "tsc2005";
+ spi-max-frequency = <6000000>;
+ reg = <0>;
+ };
+ mipid@2 {
+ compatible = "acx565akm";
+ spi-max-frequency = <6000000>;
+ reg = <2>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&display_pins>;
+ };
+};
+
+&usb_otg_hs {
+ interface-type = <0>;
+ usb-phy = <&usb2_phy>;
+ phys = <&usb2_phy>;
+ phy-names = "usb2-phy";
+ mode = <2>;
+ power = <50>;
+};
+
+&uart1 {
+ status = "disabled";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_pins>;
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3_pins>;
+};
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
new file mode 100644
index 000000000000..94eb77d3b9dd
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -0,0 +1,174 @@
+/*
+ * omap3-n950-n9.dtsi - Device Tree file for Nokia N950 & N9 (common stuff)
+ *
+ * Written by: Aaro Koskinen <aaro.koskinen@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "omap36xx.dtsi"
+
+/ {
+ cpus {
+ cpu@0 {
+ cpu0-supply = <&vcc>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x40000000>; /* 1 GB */
+ };
+
+ vemmc: fixedregulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "VEMMC";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ gpio = <&gpio5 29 0>; /* gpio line 157 */
+ startup-delay-us = <150>;
+ enable-active-high;
+ };
+};
+
+&omap3_pmx_core {
+ mmc2_pins: pinmux_mmc2_pins {
+ pinctrl-single,pins = <
+ 0x128 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk */
+ 0x12a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd */
+ 0x12c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0 */
+ 0x12e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1 */
+ 0x130 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2 */
+ 0x132 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3 */
+ >;
+ };
+};
+
+&i2c1 {
+ clock-frequency = <2900000>;
+
+ twl: twl@48 {
+ reg = <0x48>;
+ interrupts = <7>; /* SYS_NIRQ cascaded to intc */
+ interrupt-parent = <&intc>;
+ };
+};
+
+/include/ "twl4030.dtsi"
+
+&twl {
+ compatible = "ti,twl5031";
+};
+
+&twl_gpio {
+ ti,pullups = <0x000001>; /* BIT(0) */
+ ti,pulldowns = <0x008106>; /* BIT(1) | BIT(2) | BIT(8) | BIT(15) */
+};
+
+&i2c2 {
+ clock-frequency = <400000>;
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+};
+
+&mmc1 {
+ status = "disabled";
+};
+
+&mmc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_pins>;
+ vmmc-supply = <&vemmc>;
+ bus-width = <4>;
+ ti,non-removable;
+};
+
+&mmc3 {
+ status = "disabled";
+};
+
+&usb_otg_hs {
+ interface-type = <0>;
+ usb-phy = <&usb2_phy>;
+ phys = <&usb2_phy>;
+ phy-names = "usb2-phy";
+ mode = <3>;
+ power = <50>;
+};
+
+&gpmc {
+ ranges = <0 0 0x04000000 0x20000000>;
+
+ onenand@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0 0 0x20000000>;
+
+ gpmc,sync-read;
+ gpmc,sync-write;
+ gpmc,burst-length = <16>;
+ gpmc,burst-read;
+ gpmc,burst-wrap;
+ gpmc,burst-write;
+ gpmc,device-width = <2>;
+ gpmc,mux-add-data = <2>;
+ gpmc,cs-on-ns = <0>;
+ gpmc,cs-rd-off-ns = <87>;
+ gpmc,cs-wr-off-ns = <87>;
+ gpmc,adv-on-ns = <0>;
+ gpmc,adv-rd-off-ns = <10>;
+ gpmc,adv-wr-off-ns = <10>;
+ gpmc,oe-on-ns = <15>;
+ gpmc,oe-off-ns = <87>;
+ gpmc,we-on-ns = <0>;
+ gpmc,we-off-ns = <87>;
+ gpmc,rd-cycle-ns = <112>;
+ gpmc,wr-cycle-ns = <112>;
+ gpmc,access-ns = <81>;
+ gpmc,page-burst-access-ns = <15>;
+ gpmc,bus-turnaround-ns = <0>;
+ gpmc,cycle2cycle-delay-ns = <0>;
+ gpmc,wait-monitoring-ns = <0>;
+ gpmc,clk-activation-ns = <5>;
+ gpmc,wr-data-mux-bus-ns = <30>;
+ gpmc,wr-access-ns = <81>;
+ gpmc,sync-clk-ps = <15000>;
+
+ /*
+ * MTD partition table corresponding to Nokia's MeeGo 1.2
+ * Harmattan release.
+ */
+ partition@0 {
+ label = "bootloader";
+ reg = <0x00000000 0x00100000>;
+ };
+ partition@1 {
+ label = "config";
+ reg = <0x00100000 0x002c0000>;
+ };
+ partition@2 {
+ label = "kernel";
+ reg = <0x003c0000 0x01000000>;
+ };
+ partition@3 {
+ label = "log";
+ reg = <0x013c0000 0x00200000>;
+ };
+ partition@4 {
+ label = "var";
+ reg = <0x015c0000 0x1ca40000>;
+ };
+ partition@5 {
+ label = "moslo";
+ reg = <0x1e000000 0x02000000>;
+ };
+ partition@6 {
+ label = "omap2-onenand";
+ reg = <0x00000000 0x20000000>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/omap3-n950.dts b/arch/arm/boot/dts/omap3-n950.dts
new file mode 100644
index 000000000000..b076a526b999
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-n950.dts
@@ -0,0 +1,18 @@
+/*
+ * omap3-n950.dts - Device Tree file for Nokia N950
+ *
+ * Written by: Aaro Koskinen <aaro.koskinen@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/dts-v1/;
+
+#include "omap3-n950-n9.dtsi"
+
+/ {
+ model = "Nokia N950";
+ compatible = "nokia,omap3-n950", "ti,omap3";
+};
diff --git a/arch/arm/boot/dts/omap3-zoom3.dts b/arch/arm/boot/dts/omap3-zoom3.dts
new file mode 100644
index 000000000000..15eb9fe5169c
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-zoom3.dts
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+#include "omap36xx.dtsi"
+#include "omap-zoom-common.dtsi"
+
+/ {
+ model = "TI Zoom3";
+ compatible = "ti,omap3-zoom3", "ti,omap36xx", "ti,omap3";
+
+ cpus {
+ cpu@0 {
+ cpu0-supply = <&vcc>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x20000000>; /* 512 MB */
+ };
+
+ vddvario: regulator-vddvario {
+ compatible = "regulator-fixed";
+ regulator-name = "vddvario";
+ regulator-always-on;
+ };
+
+ vdd33a: regulator-vdd33a {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd33a";
+ regulator-always-on;
+ };
+
+ wl12xx_vmmc: wl12xx_vmmc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wl12xx_gpio>;
+ compatible = "regulator-fixed";
+ regulator-name = "vwl1271";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ gpio = <&gpio4 5 0>; /* gpio101 */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
+};
+
+&omap3_pmx_core {
+ /* REVISIT: twl gpio0 is mmc0_cd */
+ mmc1_pins: pinmux_mmc1_pins {
+ pinctrl-single,pins = <
+ 0x114 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* sdmmc1_clk.sdmmc1_clk */
+ 0x116 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* sdmmc1_cmd.sdmmc1_cmd */
+ 0x118 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat0.sdmmc1_dat0 */
+ 0x11a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */
+ 0x11c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */
+ 0x11e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */
+ >;
+ };
+
+ mmc2_pins: pinmux_mmc2_pins {
+ pinctrl-single,pins = <
+ 0x128 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk.sdmmc2_clk */
+ 0x12a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd.sdmmc2_cmd */
+ 0x12c (PIN_INPUT | MUX_MODE0) /* sdmmc2_dat0.sdmmc2_dat0 */
+ 0x12e (PIN_INPUT | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */
+ 0x130 (PIN_INPUT | MUX_MODE0) /* sdmmc2_dat2.sdmmc2_dat2 */
+ 0x132 (PIN_INPUT | MUX_MODE0) /* sdmmc2_dat3.sdmmc2_dat3 */
+ 0x134 (PIN_INPUT | MUX_MODE0) /* sdmmc2_dat4.sdmmc2_dat4 */
+ 0x136 (PIN_INPUT | MUX_MODE0) /* sdmmc2_dat5.sdmmc2_dat5 */
+ 0x138 (PIN_INPUT | MUX_MODE0) /* sdmmc2_dat6.sdmmc2_dat6 */
+ 0x13a (PIN_INPUT | MUX_MODE0) /* sdmmc2_dat7.sdmmc2_dat7 */
+ >;
+ };
+
+ mmc3_pins: pinmux_mmc3_pins {
+ pinctrl-single,pins = <
+ 0x168 (PIN_INPUT | MUX_MODE4) /* mcbsp1_clkx.gpio_162 WLAN IRQ */
+ 0x1a0 (PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs1.sdmmc3_cmd */
+ 0x5a8 (PIN_INPUT_PULLUP | MUX_MODE2) /* etk_clk.sdmmc3_clk */
+ 0x5b4 (PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d4.sdmmc3_dat0 */
+ 0x5b6 (WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d5.sdmmc3_dat1 */
+ 0x5b8 (PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d6.sdmmc3_dat2 */
+ 0x5b2 (PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d3.sdmmc3_dat3 */
+ >;
+ };
+
+ uart1_pins: pinmux_uart1_pins {
+ pinctrl-single,pins = <
+ 0x150 (PIN_INPUT | MUX_MODE0) /* uart1_cts.uart1_cts */
+ 0x14e (PIN_OUTPUT | MUX_MODE0) /* uart1_rts.uart1_rts */
+ 0x152 (WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart1_rx.uart1_rx */
+ 0x14c (PIN_OUTPUT | MUX_MODE0) /* uart1_tx.uart1_tx */
+ >;
+ };
+
+ uart2_pins: pinmux_uart2_pins {
+ pinctrl-single,pins = <
+ 0x144 (PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_cts.uart2_cts */
+ 0x146 (PIN_OUTPUT | MUX_MODE0) /* uart2_rts.uart2_rts */
+ 0x14a (WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */
+ 0x148 (PIN_OUTPUT | MUX_MODE0) /* uart2_tx.uart2_tx */
+ >;
+ };
+
+ uart3_pins: pinmux_uart3_pins {
+ pinctrl-single,pins = <
+ 0x16a (PIN_INPUT_PULLDOWN | MUX_MODE0) /* uart3_cts_rctx.uart3_cts_rctx */
+ 0x16c (PIN_OUTPUT | MUX_MODE0) /* uart3_rts_sd.uart3_rts_sd */
+ 0x16e (WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */
+ 0x170 (PIN_OUTPUT | MUX_MODE0) /* uart3_tx_irtx.uart3_tx_irtx */
+ >;
+ };
+
+ /* wl12xx GPIO output for WLAN_EN */
+ wl12xx_gpio: pinmux_wl12xx_gpio {
+ pinctrl-single,pins = <
+ 0xea (PIN_OUTPUT| MUX_MODE4) /* cam_d2.gpio_101 */
+ >;
+ };
+};
+
+&omap3_pmx_wkup {
+ wlan_host_wkup: pinmux_wlan_host_wkup_pins {
+ pinctrl-single,pins = <
+ 0x1a (PIN_INPUT_PULLUP | MUX_MODE4) /* sys_clkout1.gpio_10 WLAN_HOST_WKUP */
+ >;
+ };
+};
+
+&i2c1 {
+ clock-frequency = <2600000>;
+
+ twl: twl@48 {
+ reg = <0x48>;
+ interrupts = <7>; /* SYS_NIRQ cascaded to intc */
+ interrupt-parent = <&intc>;
+ };
+};
+
+#include "twl4030.dtsi"
+
+&i2c2 {
+ clock-frequency = <400000>;
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+
+ /*
+ * TVP5146 Video decoder-in for analog input support.
+ */
+ tvp5146@5c {
+ compatible = "ti,tvp5146m2";
+ reg = <0x5c>;
+ };
+};
+
+&twl_gpio {
+ ti,use-leds;
+};
+
+&mmc1 {
+ vmmc-supply = <&vmmc1>;
+ vmmc_aux-supply = <&vsim>;
+ bus-width = <4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins>;
+};
+/*
+&mmc2 {
+ vmmc-supply = <&vmmc2>;
+ ti,non-removable;
+ bus-width = <8>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_pins>;
+};
+*/
+&mmc3 {
+ vmmc-supply = <&wl12xx_vmmc>;
+ non-removable;
+ bus-width = <4>;
+ cap-power-off-card;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc3_pins>;
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins>;
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_pins>;
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3_pins>;
+};
+
+&uart4 {
+ status = "disabled";
+};
+
+&usb_otg_hs {
+ interface-type = <0>;
+ usb-phy = <&usb2_phy>;
+ mode = <3>;
+ power = <50>;
+};
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index b41bd57f4328..f3a0c26ed0c2 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -19,6 +19,9 @@
interrupt-parent = <&intc>;
aliases {
+ i2c0 = &i2c1;
+ i2c1 = &i2c2;
+ i2c2 = &i2c3;
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;
@@ -37,6 +40,7 @@
pmu {
compatible = "arm,cortex-a8-pmu";
+ reg = <0x54000000 0x800000>;
interrupts = <3>;
ti,hwmods = "debugss";
};
@@ -71,6 +75,8 @@
*/
ocp {
compatible = "simple-bus";
+ reg = <0x68000000 0x10000>;
+ interrupts = <9 10>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -107,15 +113,19 @@
reg = <0x48002030 0x05cc>;
#address-cells = <1>;
#size-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
pinctrl-single,register-width = <16>;
pinctrl-single,function-mask = <0xff1f>;
};
- omap3_pmx_wkup: pinmux@0x48002a00 {
+ omap3_pmx_wkup: pinmux@48002a00 {
compatible = "ti,omap3-padconf", "pinctrl-single";
reg = <0x48002a00 0x5c>;
#address-cells = <1>;
#size-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
pinctrl-single,register-width = <16>;
pinctrl-single,function-mask = <0xff1f>;
};
@@ -189,24 +199,40 @@
uart1: serial@4806a000 {
compatible = "ti,omap3-uart";
+ reg = <0x4806a000 0x2000>;
+ interrupts = <72>;
+ dmas = <&sdma 49 &sdma 50>;
+ dma-names = "tx", "rx";
ti,hwmods = "uart1";
clock-frequency = <48000000>;
};
uart2: serial@4806c000 {
compatible = "ti,omap3-uart";
+ reg = <0x4806c000 0x400>;
+ interrupts = <73>;
+ dmas = <&sdma 51 &sdma 52>;
+ dma-names = "tx", "rx";
ti,hwmods = "uart2";
clock-frequency = <48000000>;
};
uart3: serial@49020000 {
compatible = "ti,omap3-uart";
+ reg = <0x49020000 0x400>;
+ interrupts = <74>;
+ dmas = <&sdma 53 &sdma 54>;
+ dma-names = "tx", "rx";
ti,hwmods = "uart3";
clock-frequency = <48000000>;
};
i2c1: i2c@48070000 {
compatible = "ti,omap3-i2c";
+ reg = <0x48070000 0x80>;
+ interrupts = <56>;
+ dmas = <&sdma 27 &sdma 28>;
+ dma-names = "tx", "rx";
#address-cells = <1>;
#size-cells = <0>;
ti,hwmods = "i2c1";
@@ -214,6 +240,10 @@
i2c2: i2c@48072000 {
compatible = "ti,omap3-i2c";
+ reg = <0x48072000 0x80>;
+ interrupts = <57>;
+ dmas = <&sdma 29 &sdma 30>;
+ dma-names = "tx", "rx";
#address-cells = <1>;
#size-cells = <0>;
ti,hwmods = "i2c2";
@@ -221,6 +251,10 @@
i2c3: i2c@48060000 {
compatible = "ti,omap3-i2c";
+ reg = <0x48060000 0x80>;
+ interrupts = <61>;
+ dmas = <&sdma 25 &sdma 26>;
+ dma-names = "tx", "rx";
#address-cells = <1>;
#size-cells = <0>;
ti,hwmods = "i2c3";
@@ -228,6 +262,8 @@
mcspi1: spi@48098000 {
compatible = "ti,omap2-mcspi";
+ reg = <0x48098000 0x100>;
+ interrupts = <65>;
#address-cells = <1>;
#size-cells = <0>;
ti,hwmods = "mcspi1";
@@ -246,6 +282,8 @@
mcspi2: spi@4809a000 {
compatible = "ti,omap2-mcspi";
+ reg = <0x4809a000 0x100>;
+ interrupts = <66>;
#address-cells = <1>;
#size-cells = <0>;
ti,hwmods = "mcspi2";
@@ -259,6 +297,8 @@
mcspi3: spi@480b8000 {
compatible = "ti,omap2-mcspi";
+ reg = <0x480b8000 0x100>;
+ interrupts = <91>;
#address-cells = <1>;
#size-cells = <0>;
ti,hwmods = "mcspi3";
@@ -272,6 +312,8 @@
mcspi4: spi@480ba000 {
compatible = "ti,omap2-mcspi";
+ reg = <0x480ba000 0x100>;
+ interrupts = <48>;
#address-cells = <1>;
#size-cells = <0>;
ti,hwmods = "mcspi4";
@@ -280,8 +322,17 @@
dma-names = "tx0", "rx0";
};
+ hdqw1w: 1w@480b2000 {
+ compatible = "ti,omap3-1w";
+ reg = <0x480b2000 0x1000>;
+ interrupts = <58>;
+ ti,hwmods = "hdq1w";
+ };
+
mmc1: mmc@4809c000 {
compatible = "ti,omap3-hsmmc";
+ reg = <0x4809c000 0x200>;
+ interrupts = <83>;
ti,hwmods = "mmc1";
ti,dual-volt;
dmas = <&sdma 61>, <&sdma 62>;
@@ -290,6 +341,8 @@
mmc2: mmc@480b4000 {
compatible = "ti,omap3-hsmmc";
+ reg = <0x480b4000 0x200>;
+ interrupts = <86>;
ti,hwmods = "mmc2";
dmas = <&sdma 47>, <&sdma 48>;
dma-names = "tx", "rx";
@@ -297,6 +350,8 @@
mmc3: mmc@480ad000 {
compatible = "ti,omap3-hsmmc";
+ reg = <0x480ad000 0x200>;
+ interrupts = <94>;
ti,hwmods = "mmc3";
dmas = <&sdma 77>, <&sdma 78>;
dma-names = "tx", "rx";
@@ -304,6 +359,7 @@
wdt2: wdt@48314000 {
compatible = "ti,omap3-wdt";
+ reg = <0x48314000 0x80>;
ti,hwmods = "wd_timer2";
};
diff --git a/arch/arm/boot/dts/omap3430-sdp.dts b/arch/arm/boot/dts/omap3430-sdp.dts
index e2249bcc3e63..281914ed0151 100644
--- a/arch/arm/boot/dts/omap3430-sdp.dts
+++ b/arch/arm/boot/dts/omap3430-sdp.dts
@@ -84,15 +84,15 @@
label = "bootloader-nor";
reg = <0 0x40000>;
};
- partition@0x40000 {
+ partition@40000 {
label = "params-nor";
reg = <0x40000 0x40000>;
};
- partition@0x80000 {
+ partition@80000 {
label = "kernel-nor";
reg = <0x80000 0x200000>;
};
- partition@0x280000 {
+ partition@280000 {
label = "filesystem-nor";
reg = <0x240000 0x7d80000>;
};
@@ -125,19 +125,19 @@
label = "xloader-nand";
reg = <0 0x80000>;
};
- partition@0x80000 {
+ partition@80000 {
label = "bootloader-nand";
reg = <0x80000 0x140000>;
};
- partition@0x1c0000 {
+ partition@1c0000 {
label = "params-nand";
reg = <0x1c0000 0xc0000>;
};
- partition@0x280000 {
+ partition@280000 {
label = "kernel-nand";
reg = <0x280000 0x500000>;
};
- partition@0x780000 {
+ partition@780000 {
label = "filesystem-nand";
reg = <0x780000 0x7880000>;
};
@@ -170,19 +170,19 @@
label = "xloader-onenand";
reg = <0 0x80000>;
};
- partition@0x80000 {
+ partition@80000 {
label = "bootloader-onenand";
reg = <0x80000 0x40000>;
};
- partition@0xc0000 {
+ partition@c0000 {
label = "params-onenand";
reg = <0xc0000 0x20000>;
};
- partition@0xe0000 {
+ partition@e0000 {
label = "kernel-onenand";
reg = <0xe0000 0x200000>;
};
- partition@0x2e0000 {
+ partition@2e0000 {
label = "filesystem-onenand";
reg = <0x2e0000 0xfd20000>;
};
diff --git a/arch/arm/boot/dts/omap36xx.dtsi b/arch/arm/boot/dts/omap36xx.dtsi
index f8b3765eb9be..380c22eb468e 100644
--- a/arch/arm/boot/dts/omap36xx.dtsi
+++ b/arch/arm/boot/dts/omap36xx.dtsi
@@ -31,6 +31,10 @@
ocp {
uart4: serial@49042000 {
compatible = "ti,omap3-uart";
+ reg = <0x49042000 0x400>;
+ interrupts = <80>;
+ dmas = <&sdma 81 &sdma 82>;
+ dma-names = "tx", "rx";
ti,hwmods = "uart4";
clock-frequency = <48000000>;
};
diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi
index 814ab67c8c29..298e85020e1b 100644
--- a/arch/arm/boot/dts/omap4-panda-common.dtsi
+++ b/arch/arm/boot/dts/omap4-panda-common.dtsi
@@ -60,22 +60,6 @@
"AFMR", "Line In";
};
- /*
- * Temp hack: Need to be replaced with the proper gpio-controlled
- * reset driver as soon it will be merged.
- * http://thread.gmane.org/gmane.linux.drivers.devicetree/36830
- */
- /* HS USB Port 1 RESET */
- hsusb1_reset: hsusb1_reset_reg {
- compatible = "regulator-fixed";
- regulator-name = "hsusb1_reset";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&gpio2 30 0>; /* gpio_62 */
- startup-delay-us = <70000>;
- enable-active-high;
- };
-
/* HS USB Port 1 Power */
hsusb1_power: hsusb1_power_reg {
compatible = "regulator-fixed";
@@ -97,7 +81,7 @@
/* HS USB Host PHY on PORT 1 */
hsusb1_phy: hsusb1_phy {
compatible = "usb-nop-xceiv";
- reset-supply = <&hsusb1_reset>;
+ reset-gpios = <&gpio2 30 GPIO_ACTIVE_LOW>; /* gpio_62 */
vcc-supply = <&hsusb1_power>;
/**
* FIXME:
@@ -122,37 +106,19 @@
};
};
-&omap4_pmx_wkup {
- pinctrl-names = "default";
- pinctrl-0 = <
- &twl6030_wkup_pins
- >;
-
- twl6030_wkup_pins: pinmux_twl6030_wkup_pins {
- pinctrl-single,pins = <
- 0x14 (PIN_OUTPUT | MUX_MODE2) /* fref_clk0_out.sys_drm_msecure */
- >;
- };
-};
-
&omap4_pmx_core {
pinctrl-names = "default";
pinctrl-0 = <
- &twl6030_pins
&twl6040_pins
&mcpdm_pins
&mcbsp1_pins
+ &dss_dpi_pins
+ &tfp410_pins
&dss_hdmi_pins
&tpd12s015_pins
&hsusbb1_pins
>;
- twl6030_pins: pinmux_twl6030_pins {
- pinctrl-single,pins = <
- 0x15e (WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1.sys_nirq1 */
- >;
- };
-
twl6040_pins: pinmux_twl6040_pins {
pinctrl-single,pins = <
0xe0 (PIN_OUTPUT | MUX_MODE3) /* hdq_sio.gpio_127 */
@@ -179,6 +145,47 @@
>;
};
+ dss_dpi_pins: pinmux_dss_dpi_pins {
+ pinctrl-single,pins = <
+ 0x122 (PIN_OUTPUT | MUX_MODE5) /* dispc2_data23 */
+ 0x124 (PIN_OUTPUT | MUX_MODE5) /* dispc2_data22 */
+ 0x126 (PIN_OUTPUT | MUX_MODE5) /* dispc2_data21 */
+ 0x128 (PIN_OUTPUT | MUX_MODE5) /* dispc2_data20 */
+ 0x12a (PIN_OUTPUT | MUX_MODE5) /* dispc2_data19 */
+ 0x12c (PIN_OUTPUT | MUX_MODE5) /* dispc2_data18 */
+ 0x12e (PIN_OUTPUT | MUX_MODE5) /* dispc2_data15 */
+ 0x130 (PIN_OUTPUT | MUX_MODE5) /* dispc2_data14 */
+ 0x132 (PIN_OUTPUT | MUX_MODE5) /* dispc2_data13 */
+ 0x134 (PIN_OUTPUT | MUX_MODE5) /* dispc2_data12 */
+ 0x136 (PIN_OUTPUT | MUX_MODE5) /* dispc2_data11 */
+
+ 0x174 (PIN_OUTPUT | MUX_MODE5) /* dispc2_data10 */
+ 0x176 (PIN_OUTPUT | MUX_MODE5) /* dispc2_data9 */
+ 0x178 (PIN_OUTPUT | MUX_MODE5) /* dispc2_data16 */
+ 0x17a (PIN_OUTPUT | MUX_MODE5) /* dispc2_data17 */
+ 0x17c (PIN_OUTPUT | MUX_MODE5) /* dispc2_hsync */
+ 0x17e (PIN_OUTPUT | MUX_MODE5) /* dispc2_pclk */
+ 0x180 (PIN_OUTPUT | MUX_MODE5) /* dispc2_vsync */
+ 0x182 (PIN_OUTPUT | MUX_MODE5) /* dispc2_de */
+ 0x184 (PIN_OUTPUT | MUX_MODE5) /* dispc2_data8 */
+ 0x186 (PIN_OUTPUT | MUX_MODE5) /* dispc2_data7 */
+ 0x188 (PIN_OUTPUT | MUX_MODE5) /* dispc2_data6 */
+ 0x18a (PIN_OUTPUT | MUX_MODE5) /* dispc2_data5 */
+ 0x18c (PIN_OUTPUT | MUX_MODE5) /* dispc2_data4 */
+ 0x18e (PIN_OUTPUT | MUX_MODE5) /* dispc2_data3 */
+
+ 0x190 (PIN_OUTPUT | MUX_MODE5) /* dispc2_data2 */
+ 0x192 (PIN_OUTPUT | MUX_MODE5) /* dispc2_data1 */
+ 0x194 (PIN_OUTPUT | MUX_MODE5) /* dispc2_data0 */
+ >;
+ };
+
+ tfp410_pins: pinmux_tfp410_pins {
+ pinctrl-single,pins = <
+ 0x144 (PIN_OUTPUT | MUX_MODE3) /* gpio_0 */
+ >;
+ };
+
dss_hdmi_pins: pinmux_dss_hdmi_pins {
pinctrl-single,pins = <
0x5a (PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_cec.hdmi_cec */
@@ -305,6 +312,7 @@
};
#include "twl6030.dtsi"
+#include "twl6030_omap4.dtsi"
&i2c2 {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/omap4-panda-es.dts b/arch/arm/boot/dts/omap4-panda-es.dts
index 56c435468e94..816d1c95b592 100644
--- a/arch/arm/boot/dts/omap4-panda-es.dts
+++ b/arch/arm/boot/dts/omap4-panda-es.dts
@@ -62,3 +62,7 @@
gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
};
};
+
+&gpio1 {
+ ti,no-reset-on-init;
+};
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index 4f78380ecdb8..5fc3f43c5a81 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -155,23 +155,9 @@
};
};
-&omap4_pmx_wkup {
- pinctrl-names = "default";
- pinctrl-0 = <
- &twl6030_wkup_pins
- >;
-
- twl6030_wkup_pins: pinmux_twl6030_wkup_pins {
- pinctrl-single,pins = <
- 0x14 (PIN_OUTPUT | MUX_MODE2) /* fref_clk0_out.sys_drm_msecure */
- >;
- };
-};
-
&omap4_pmx_core {
pinctrl-names = "default";
pinctrl-0 = <
- &twl6030_pins
&twl6040_pins
&mcpdm_pins
&dmic_pins
@@ -206,12 +192,6 @@
>;
};
- twl6030_pins: pinmux_twl6030_pins {
- pinctrl-single,pins = <
- 0x15e (WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1.sys_nirq1 */
- >;
- };
-
twl6040_pins: pinmux_twl6040_pins {
pinctrl-single,pins = <
0xe0 (PIN_OUTPUT | MUX_MODE3) /* hdq_sio.gpio_127 */
@@ -370,6 +350,7 @@
};
#include "twl6030.dtsi"
+#include "twl6030_omap4.dtsi"
&i2c2 {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index ea4054bfdfd4..a1e05853afcd 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -17,6 +17,10 @@
interrupt-parent = <&gic>;
aliases {
+ i2c0 = &i2c1;
+ i2c1 = &i2c2;
+ i2c2 = &i2c3;
+ i2c3 = &i2c4;
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;
@@ -56,7 +60,7 @@
cache-level = <2>;
};
- local-timer@0x48240600 {
+ local-timer@48240600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0x48240600 0x20>;
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(3) | IRQ_TYPE_LEVEL_HIGH)>;
@@ -114,6 +118,8 @@
reg = <0x4a100040 0x0196>;
#address-cells = <1>;
#size-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
pinctrl-single,register-width = <16>;
pinctrl-single,function-mask = <0x7fff>;
};
@@ -122,6 +128,8 @@
reg = <0x4a31e040 0x0038>;
#address-cells = <1>;
#size-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
pinctrl-single,register-width = <16>;
pinctrl-single,function-mask = <0x7fff>;
};
@@ -214,6 +222,7 @@
gpmc,num-cs = <8>;
gpmc,num-waitpins = <4>;
ti,hwmods = "gpmc";
+ ti,no-idle-on-init;
};
uart1: serial@4806a000 {
@@ -248,6 +257,12 @@
clock-frequency = <48000000>;
};
+ hwspinlock: spinlock@4a0f6000 {
+ compatible = "ti,omap4-hwspinlock";
+ reg = <0x4a0f6000 0x1000>;
+ ti,hwmods = "spinlock";
+ };
+
i2c1: i2c@48070000 {
compatible = "ti,omap4-i2c";
reg = <0x48070000 0x100>;
@@ -492,6 +507,7 @@
reg = <0x4c000000 0x100>;
interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "emif1";
+ ti,no-idle-on-init;
phy-type = <1>;
hw-caps-read-idle-ctrl;
hw-caps-ll-interface;
@@ -503,6 +519,7 @@
reg = <0x4d000000 0x100>;
interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "emif2";
+ ti,no-idle-on-init;
phy-type = <1>;
hw-caps-read-idle-ctrl;
hw-caps-ll-interface;
@@ -670,5 +687,23 @@
ram-bits = <12>;
ctrl-module = <&omap_control_usbotg>;
};
+
+ aes: aes@4b501000 {
+ compatible = "ti,omap4-aes";
+ ti,hwmods = "aes";
+ reg = <0x4b501000 0xa0>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&sdma 111>, <&sdma 110>;
+ dma-names = "tx", "rx";
+ };
+
+ des: des@480a5000 {
+ compatible = "ti,omap4-des";
+ ti,hwmods = "des";
+ reg = <0x480a5000 0xa0>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&sdma 117>, <&sdma 116>;
+ dma-names = "tx", "rx";
+ };
};
};
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 65d7b601651c..002fa70180a5 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -27,21 +27,10 @@
regulator-max-microvolt = <3000000>;
};
- /* HS USB Port 2 RESET */
- hsusb2_reset: hsusb2_reset_reg {
- compatible = "regulator-fixed";
- regulator-name = "hsusb2_reset";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&gpio3 16 GPIO_ACTIVE_HIGH>; /* gpio3_80 HUB_NRESET */
- startup-delay-us = <70000>;
- enable-active-high;
- };
-
/* HS USB Host PHY on PORT 2 */
hsusb2_phy: hsusb2_phy {
compatible = "usb-nop-xceiv";
- reset-supply = <&hsusb2_reset>;
+ reset-gpios = <&gpio3 16 GPIO_ACTIVE_LOW>; /* gpio3_80 HUB_NRESET */
/**
* FIXME
* Put the right clock phandle here when available
@@ -51,21 +40,10 @@
clock-frequency = <19200000>;
};
- /* HS USB Port 3 RESET */
- hsusb3_reset: hsusb3_reset_reg {
- compatible = "regulator-fixed";
- regulator-name = "hsusb3_reset";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&gpio3 15 GPIO_ACTIVE_HIGH>; /* gpio3_79 ETH_NRESET */
- startup-delay-us = <70000>;
- enable-active-high;
- };
-
/* HS USB Host PHY on PORT 3 */
hsusb3_phy: hsusb3_phy {
compatible = "usb-nop-xceiv";
- reset-supply = <&hsusb3_reset>;
+ reset-gpios = <&gpio3 15 GPIO_ACTIVE_LOW>; /* gpio3_79 ETH_NRESET */
};
leds {
@@ -84,7 +62,6 @@
pinctrl-0 = <
&twl6040_pins
&mcpdm_pins
- &dmic_pins
&mcbsp1_pins
&mcbsp2_pins
&usbhost_pins
@@ -93,7 +70,7 @@
twl6040_pins: pinmux_twl6040_pins {
pinctrl-single,pins = <
- 0x18a (PIN_OUTPUT | MUX_MODE6) /* perslimbus2_clock.gpio5_145 */
+ 0x17e (PIN_OUTPUT | MUX_MODE6) /* mcspi1_somi.gpio5_141 */
>;
};
@@ -107,15 +84,6 @@
>;
};
- dmic_pins: pinmux_dmic_pins {
- pinctrl-single,pins = <
- 0x144 (PIN_INPUT | MUX_MODE0) /* abedmic_din1.abedmic_din1 */
- 0x146 (PIN_INPUT | MUX_MODE0) /* abedmic_din2.abedmic_din2 */
- 0x148 (PIN_INPUT | MUX_MODE0) /* abedmic_din3.abedmic_din3 */
- 0x14a (PIN_OUTPUT | MUX_MODE0) /* abedmic_clk1.abedmic_clk1 */
- >;
- };
-
mcbsp1_pins: pinmux_mcbsp1_pins {
pinctrl-single,pins = <
0x14c (PIN_INPUT | MUX_MODE1) /* abedmic_clk2.abemcbsp1_fsx */
@@ -153,25 +121,25 @@
0xbc (PIN_INPUT | MUX_MODE0) /* mcspi2_clk */
0xbe (PIN_INPUT | MUX_MODE0) /* mcspi2_simo */
0xc0 (PIN_INPUT_PULLUP | MUX_MODE0) /* mcspi2_somi */
- 0xc2 (PIN_OUTPUT | MUX_MODE0) /* mcspi2_cs */
+ 0xc2 (PIN_OUTPUT | MUX_MODE0) /* mcspi2_cs0 */
>;
};
mcspi3_pins: pinmux_mcspi3_pins {
pinctrl-single,pins = <
- 0x78 (PIN_INPUT | MUX_MODE1) /* mcspi2_somi */
- 0x7a (PIN_INPUT | MUX_MODE1) /* mcspi2_cs */
- 0x7c (PIN_INPUT | MUX_MODE1) /* mcspi2_simo */
- 0x7e (PIN_INPUT | MUX_MODE1) /* mcspi2_clk */
+ 0x78 (PIN_INPUT | MUX_MODE1) /* mcspi3_somi */
+ 0x7a (PIN_INPUT | MUX_MODE1) /* mcspi3_cs0 */
+ 0x7c (PIN_INPUT | MUX_MODE1) /* mcspi3_simo */
+ 0x7e (PIN_INPUT | MUX_MODE1) /* mcspi3_clk */
>;
};
mcspi4_pins: pinmux_mcspi4_pins {
pinctrl-single,pins = <
- 0x164 (PIN_INPUT | MUX_MODE1) /* mcspi2_clk */
- 0x168 (PIN_INPUT | MUX_MODE1) /* mcspi2_simo */
- 0x16a (PIN_INPUT | MUX_MODE1) /* mcspi2_somi */
- 0x16c (PIN_INPUT | MUX_MODE1) /* mcspi2_cs */
+ 0x164 (PIN_INPUT | MUX_MODE1) /* mcspi4_clk */
+ 0x168 (PIN_INPUT | MUX_MODE1) /* mcspi4_simo */
+ 0x16a (PIN_INPUT | MUX_MODE1) /* mcspi4_somi */
+ 0x16c (PIN_INPUT | MUX_MODE1) /* mcspi4_cs0 */
>;
};
@@ -271,6 +239,14 @@
reg = <0x48>;
interrupt-controller;
#interrupt-cells = <2>;
+ ti,system-power-controller;
+
+ extcon_usb3: palmas_usb {
+ compatible = "ti,palmas-usb-vid";
+ ti,enable-vbus-detection;
+ ti,enable-id-detection;
+ ti,wakeup;
+ };
palmas_pmic {
compatible = "ti,palmas-pmic";
@@ -334,15 +310,22 @@
ti,smps-range = <0x80>;
};
- smps10_reg: smps10 {
+ smps10_out2_reg: smps10_out2 {
/* VBUS_5V_OTG */
- regulator-name = "smps10";
+ regulator-name = "smps10_out2";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
regulator-always-on;
regulator-boot-on;
};
+ smps10_out1_reg: smps10_out1 {
+ /* VBUS_5V_OTG */
+ regulator-name = "smps10_out1";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
ldo1_reg: ldo1 {
/* VDDAPHY_CAM: vdda_csiport */
regulator-name = "ldo1";
@@ -470,6 +453,11 @@
phys = <0 &hsusb2_phy &hsusb3_phy>;
};
+&usb3 {
+ extcon = <&extcon_usb3>;
+ vbus-supply = <&smps10_out1_reg>;
+};
+
&mcspi1 {
};
@@ -503,3 +491,7 @@
pinctrl-names = "default";
pinctrl-0 = <&uart5_pins>;
};
+
+&cpu0 {
+ cpu0-supply = <&smps123_reg>;
+};
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index c0ec6dce30fe..fc3fad563861 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -21,6 +21,11 @@
interrupt-parent = <&gic>;
aliases {
+ i2c0 = &i2c1;
+ i2c1 = &i2c2;
+ i2c2 = &i2c3;
+ i2c3 = &i2c4;
+ i2c4 = &i2c5;
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;
@@ -33,10 +38,17 @@
#address-cells = <1>;
#size-cells = <0>;
- cpu@0 {
+ cpu0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a15";
reg = <0x0>;
+
+ operating-points = <
+ /* kHz uV */
+ 500000 880000
+ 1000000 1060000
+ 1500000 1250000
+ >;
};
cpu@1 {
device_type = "cpu";
@@ -52,7 +64,6 @@
<GIC_PPI 14 (GIC_CPU_MASK_RAW(3) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11 (GIC_CPU_MASK_RAW(3) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 (GIC_CPU_MASK_RAW(3) | IRQ_TYPE_LEVEL_LOW)>;
- clock-frequency = <6144000>;
};
gic: interrupt-controller@48211000 {
@@ -276,6 +287,12 @@
ti,hwmods = "i2c5";
};
+ hwspinlock: spinlock@4a0f6000 {
+ compatible = "ti,omap4-hwspinlock";
+ reg = <0x4a0f6000 0x1000>;
+ ti,hwmods = "spinlock";
+ };
+
mcspi1: spi@48098000 {
compatible = "ti,omap4-mcspi";
reg = <0x48098000 0x200>;
@@ -604,9 +621,10 @@
ti,hwmods = "wd_timer2";
};
- emif1: emif@0x4c000000 {
+ emif1: emif@4c000000 {
compatible = "ti,emif-4d5";
ti,hwmods = "emif1";
+ ti,no-idle-on-init;
phy-type = <2>; /* DDR PHY type: Intelli PHY */
reg = <0x4c000000 0x400>;
interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
@@ -615,9 +633,10 @@
hw-caps-temp-alert;
};
- emif2: emif@0x4d000000 {
+ emif2: emif@4d000000 {
compatible = "ti,emif-4d5";
ti,hwmods = "emif2";
+ ti,no-idle-on-init;
phy-type = <2>; /* DDR PHY type: Intelli PHY */
reg = <0x4d000000 0x400>;
interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
@@ -638,7 +657,7 @@
reg-names = "power";
};
- omap_dwc3@4a020000 {
+ usb3: omap_dwc3@4a020000 {
compatible = "ti,dwc3";
ti,hwmods = "usb_otg_ss";
reg = <0x4a020000 0x10000>;
@@ -652,6 +671,7 @@
reg = <0x4a030000 0x10000>;
interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
usb-phy = <&usb2_phy>, <&usb3_phy>;
+ dr_mode = "peripheral";
tx-fifo-resize;
};
};
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi
index 27ed9f5144bc..7cf78afee7b1 100644
--- a/arch/arm/boot/dts/prima2.dtsi
+++ b/arch/arm/boot/dts/prima2.dtsi
@@ -76,6 +76,11 @@
compatible = "sirf,prima2-rsc";
reg = <0x88020000 0x1000>;
};
+
+ cphifbg@88030000 {
+ compatible = "sirf,prima2-cphifbg";
+ reg = <0x88030000 0x1000>;
+ };
};
mem-iobg {
@@ -86,10 +91,17 @@
memory-controller@90000000 {
compatible = "sirf,prima2-memc";
- reg = <0x90000000 0x10000>;
+ reg = <0x90000000 0x2000>;
interrupts = <27>;
clocks = <&clks 5>;
};
+
+ memc-monitor {
+ compatible = "sirf,prima2-memcmon";
+ reg = <0x90002000 0x200>;
+ interrupts = <4>;
+ clocks = <&clks 32>;
+ };
};
disp-iobg {
@@ -287,7 +299,13 @@
compatible = "sirf,prima2-spi";
reg = <0xb00d0000 0x10000>;
interrupts = <15>;
+ sirf,spi-num-chipselects = <1>;
+ sirf,spi-dma-rx-channel = <25>;
+ sirf,spi-dma-tx-channel = <20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&clks 19>;
+ status = "disabled";
};
spi1: spi@b0170000 {
@@ -295,7 +313,13 @@
compatible = "sirf,prima2-spi";
reg = <0xb0170000 0x10000>;
interrupts = <16>;
+ sirf,spi-num-chipselects = <1>;
+ sirf,spi-dma-rx-channel = <12>;
+ sirf,spi-dma-tx-channel = <13>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&clks 20>;
+ status = "disabled";
};
i2c0: i2c@b00e0000 {
@@ -304,6 +328,8 @@
reg = <0xb00e0000 0x10000>;
interrupts = <24>;
clocks = <&clks 17>;
+ #address-cells = <1>;
+ #size-cells = <0>;
};
i2c1: i2c@b00f0000 {
@@ -312,6 +338,8 @@
reg = <0xb00f0000 0x10000>;
interrupts = <25>;
clocks = <&clks 18>;
+ #address-cells = <1>;
+ #size-cells = <0>;
};
tsc@b0110000 {
diff --git a/arch/arm/boot/dts/msm8660-surf.dts b/arch/arm/boot/dts/qcom-msm8660-surf.dts
index 386d42870215..386d42870215 100644
--- a/arch/arm/boot/dts/msm8660-surf.dts
+++ b/arch/arm/boot/dts/qcom-msm8660-surf.dts
diff --git a/arch/arm/boot/dts/msm8960-cdp.dts b/arch/arm/boot/dts/qcom-msm8960-cdp.dts
index 93e9f7e0b7ad..93e9f7e0b7ad 100644
--- a/arch/arm/boot/dts/msm8960-cdp.dts
+++ b/arch/arm/boot/dts/qcom-msm8960-cdp.dts
diff --git a/arch/arm/boot/dts/r7s72100-genmai.dts b/arch/arm/boot/dts/r7s72100-genmai.dts
new file mode 100644
index 000000000000..1fb20f2333cc
--- /dev/null
+++ b/arch/arm/boot/dts/r7s72100-genmai.dts
@@ -0,0 +1,31 @@
+/*
+ * Device Tree Source for the Genmai board
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/dts-v1/;
+/include/ "r7s72100.dtsi"
+
+/ {
+ model = "Genmai";
+ compatible = "renesas,genmai", "renesas,r7s72100";
+
+ chosen {
+ bootargs = "console=ttySC2,115200 ignore_loglevel rw root=/dev/nfs ip=dhcp";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x08000000 0x08000000>;
+ };
+
+ lbsc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+};
diff --git a/arch/arm/boot/dts/r7s72100.dtsi b/arch/arm/boot/dts/r7s72100.dtsi
new file mode 100644
index 000000000000..46b82aa7dc4e
--- /dev/null
+++ b/arch/arm/boot/dts/r7s72100.dtsi
@@ -0,0 +1,36 @@
+/*
+ * Device Tree Source for the r7s72100 SoC
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/ {
+ compatible = "renesas,r7s72100";
+ interrupt-parent = <&gic>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <0>;
+ };
+ };
+
+ gic: interrupt-controller@e8201000 {
+ compatible = "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0xe8201000 0x1000>,
+ <0xe8202000 0x1000>;
+ };
+};
diff --git a/arch/arm/boot/dts/r8a73a4-ape6evm-reference.dts b/arch/arm/boot/dts/r8a73a4-ape6evm-reference.dts
index f444624eb097..9443e93d3cac 100644
--- a/arch/arm/boot/dts/r8a73a4-ape6evm-reference.dts
+++ b/arch/arm/boot/dts/r8a73a4-ape6evm-reference.dts
@@ -10,6 +10,7 @@
/dts-v1/;
/include/ "r8a73a4.dtsi"
+#include <dt-bindings/gpio/gpio.h>
/ {
model = "APE6EVM";
@@ -24,6 +25,34 @@
reg = <0 0x40000000 0 0x40000000>;
};
+ vcc_mmc0: regulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "MMC0 Vcc";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-always-on;
+ };
+
+ vcc_sdhi0: regulator@1 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "SDHI0 Vcc";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&pfc 76 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ /* Common 3.3V rail, used by several devices on APE6EVM */
+ ape6evm_fixed_3v3: regulator@2 {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
lbsc {
compatible = "simple-bus";
#address-cells = <1>;
@@ -33,6 +62,7 @@
};
&i2c5 {
+ status = "okay";
vdd_dvfs: max8973@1b {
compatible = "maxim,max8973";
reg = <0x1b>;
@@ -62,4 +92,47 @@
renesas,groups = "scifa0_data";
renesas,function = "scifa0";
};
+
+ mmc0_pins: mmcif {
+ renesas,groups = "mmc0_data8", "mmc0_ctrl";
+ renesas,function = "mmc0";
+ };
+
+ sdhi0_pins: sdhi0 {
+ renesas,groups = "sdhi0_data4", "sdhi0_ctrl", "sdhi0_cd";
+ renesas,function = "sdhi0";
+ };
+
+ sdhi1_pins: sdhi1 {
+ renesas,groups = "sdhi1_data4", "sdhi1_ctrl";
+ renesas,function = "sdhi1";
+ };
+};
+
+&mmcif0 {
+ vmmc-supply = <&vcc_mmc0>;
+ bus-width = <8>;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins>;
+ status = "okay";
+};
+
+&sdhi0 {
+ vmmc-supply = <&vcc_sdhi0>;
+ bus-width = <4>;
+ toshiba,mmc-wrprotect-disable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdhi0_pins>;
+ status = "okay";
+};
+
+&sdhi1 {
+ vmmc-supply = <&ape6evm_fixed_3v3>;
+ bus-width = <4>;
+ broken-cd;
+ toshiba,mmc-wrprotect-disable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdhi1_pins>;
+ status = "okay";
};
diff --git a/arch/arm/boot/dts/r8a73a4-ape6evm.dts b/arch/arm/boot/dts/r8a73a4-ape6evm.dts
index 72f867e65791..91436b58016f 100644
--- a/arch/arm/boot/dts/r8a73a4-ape6evm.dts
+++ b/arch/arm/boot/dts/r8a73a4-ape6evm.dts
@@ -52,6 +52,7 @@
};
&i2c5 {
+ status = "okay";
vdd_dvfs: max8973@1b {
compatible = "maxim,max8973";
reg = <0x1b>;
diff --git a/arch/arm/boot/dts/r8a73a4.dtsi b/arch/arm/boot/dts/r8a73a4.dtsi
index 658fcc537576..287e047592a0 100644
--- a/arch/arm/boot/dts/r8a73a4.dtsi
+++ b/arch/arm/boot/dts/r8a73a4.dtsi
@@ -78,6 +78,49 @@
<0 56 4>, <0 57 4>;
};
+ dmac: dma-multiplexer@0 {
+ compatible = "renesas,shdma-mux";
+ #dma-cells = <1>;
+ dma-channels = <20>;
+ dma-requests = <256>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ dma0: dma-controller@e6700020 {
+ compatible = "renesas,shdma-r8a73a4";
+ reg = <0 0xe6700020 0 0x89e0>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 220 4
+ 0 200 4
+ 0 201 4
+ 0 202 4
+ 0 203 4
+ 0 204 4
+ 0 205 4
+ 0 206 4
+ 0 207 4
+ 0 208 4
+ 0 209 4
+ 0 210 4
+ 0 211 4
+ 0 212 4
+ 0 213 4
+ 0 214 4
+ 0 215 4
+ 0 216 4
+ 0 217 4
+ 0 218 4
+ 0 219 4>;
+ interrupt-names = "error",
+ "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14", "ch15",
+ "ch16", "ch17", "ch18", "ch19";
+ };
+ };
+
thermal@e61f0000 {
compatible = "renesas,rcar-thermal";
reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>,
@@ -93,6 +136,7 @@
reg = <0 0xe6500000 0 0x428>;
interrupt-parent = <&gic>;
interrupts = <0 174 0x4>;
+ status = "disabled";
};
i2c1: i2c@e6510000 {
@@ -102,6 +146,7 @@
reg = <0 0xe6510000 0 0x428>;
interrupt-parent = <&gic>;
interrupts = <0 175 0x4>;
+ status = "disabled";
};
i2c2: i2c@e6520000 {
@@ -111,6 +156,7 @@
reg = <0 0xe6520000 0 0x428>;
interrupt-parent = <&gic>;
interrupts = <0 176 0x4>;
+ status = "disabled";
};
i2c3: i2c@e6530000 {
@@ -120,6 +166,7 @@
reg = <0 0xe6530000 0 0x428>;
interrupt-parent = <&gic>;
interrupts = <0 177 0x4>;
+ status = "disabled";
};
i2c4: i2c@e6540000 {
@@ -129,6 +176,7 @@
reg = <0 0xe6540000 0 0x428>;
interrupt-parent = <&gic>;
interrupts = <0 178 0x4>;
+ status = "disabled";
};
i2c5: i2c@e60b0000 {
@@ -138,6 +186,7 @@
reg = <0 0xe60b0000 0 0x428>;
interrupt-parent = <&gic>;
interrupts = <0 179 0x4>;
+ status = "disabled";
};
i2c6: i2c@e6550000 {
@@ -147,6 +196,7 @@
reg = <0 0xe6550000 0 0x428>;
interrupt-parent = <&gic>;
interrupts = <0 184 0x4>;
+ status = "disabled";
};
i2c7: i2c@e6560000 {
@@ -156,6 +206,7 @@
reg = <0 0xe6560000 0 0x428>;
interrupt-parent = <&gic>;
interrupts = <0 185 0x4>;
+ status = "disabled";
};
i2c8: i2c@e6570000 {
@@ -165,6 +216,7 @@
reg = <0 0xe6570000 0 0x428>;
interrupt-parent = <&gic>;
interrupts = <0 173 0x4>;
+ status = "disabled";
};
mmcif0: mmcif@ee200000 {
diff --git a/arch/arm/boot/dts/r8a7740-armadillo800eva-reference.dts b/arch/arm/boot/dts/r8a7740-armadillo800eva-reference.dts
index c638e4ab91b8..1c56c5e56950 100644
--- a/arch/arm/boot/dts/r8a7740-armadillo800eva-reference.dts
+++ b/arch/arm/boot/dts/r8a7740-armadillo800eva-reference.dts
@@ -11,6 +11,7 @@
/dts-v1/;
/include/ "r8a7740.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pwm/pwm.h>
/ {
model = "armadillo 800 eva reference";
@@ -34,6 +35,33 @@
regulator-boot-on;
};
+ vcc_sdhi0: regulator@1 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "SDHI0 Vcc";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&pfc 75 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ vccq_sdhi0: regulator@2 {
+ compatible = "regulator-gpio";
+
+ regulator-name = "SDHI0 VccQ";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_sdhi0>;
+
+ enable-gpio = <&pfc 74 GPIO_ACTIVE_HIGH>;
+ gpios = <&pfc 17 GPIO_ACTIVE_HIGH>;
+ states = <3300000 0
+ 1800000 1>;
+
+ enable-active-high;
+ };
+
leds {
compatible = "gpio-leds";
led1 {
@@ -49,9 +77,19 @@
gpios = <&pfc 177 GPIO_ACTIVE_HIGH>;
};
};
+
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&tpu 2 33333 PWM_POLARITY_INVERTED>;
+ brightness-levels = <0 1 2 4 8 16 32 64 128 255>;
+ default-brightness-level = <9>;
+ pinctrl-0 = <&backlight_pins>;
+ pinctrl-names = "default";
+ };
};
&i2c0 {
+ status = "okay";
touchscreen: st1232@55 {
compatible = "sitronix,st1232";
reg = <0x55>;
@@ -76,4 +114,44 @@
renesas,groups = "intc_irq10";
renesas,function = "intc";
};
+
+ backlight_pins: backlight {
+ renesas,groups = "tpu0_to2_1";
+ renesas,function = "tpu0";
+ };
+
+ mmc0_pins: mmc0 {
+ renesas,groups = "mmc0_data8_1", "mmc0_ctrl_1";
+ renesas,function = "mmc0";
+ };
+
+ sdhi0_pins: sdhi0 {
+ renesas,groups = "sdhi0_data4", "sdhi0_ctrl", "sdhi0_wp";
+ renesas,function = "sdhi0";
+ };
+};
+
+&tpu {
+ status = "okay";
+};
+
+&mmcif0 {
+ pinctrl-0 = <&mmc0_pins>;
+ pinctrl-names = "default";
+
+ vmmc-supply = <&reg_3p3v>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
+
+&sdhi0 {
+ pinctrl-0 = <&sdhi0_pins>;
+ pinctrl-names = "default";
+
+ vmmc-supply = <&vcc_sdhi0>;
+ vqmmc-supply = <&vccq_sdhi0>;
+ bus-width = <4>;
+ cd-gpios = <&pfc 167 GPIO_ACTIVE_LOW>;
+ status = "okay";
};
diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi
index 44d3d520e01f..ae1e230f711d 100644
--- a/arch/arm/boot/dts/r8a7740.dtsi
+++ b/arch/arm/boot/dts/r8a7740.dtsi
@@ -131,6 +131,7 @@
0 202 0x4
0 203 0x4
0 204 0x4>;
+ status = "disabled";
};
i2c1: i2c@e6c20000 {
@@ -143,6 +144,7 @@
0 71 0x4
0 72 0x4
0 73 0x4>;
+ status = "disabled";
};
pfc: pfc@e6050000 {
@@ -159,4 +161,37 @@
status = "disabled";
#pwm-cells = <3>;
};
+
+ mmcif0: mmcif@e6bd0000 {
+ compatible = "renesas,sh-mmcif";
+ reg = <0xe6bd0000 0x100>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 56 4
+ 0 57 4>;
+ status = "disabled";
+ };
+
+ sdhi0: sdhi@e6850000 {
+ compatible = "renesas,sdhi-r8a7740";
+ reg = <0xe6850000 0x100>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 117 4
+ 0 118 4
+ 0 119 4>;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ status = "disabled";
+ };
+
+ sdhi1: sdhi@e6860000 {
+ compatible = "renesas,sdhi-r8a7740";
+ reg = <0xe6860000 0x100>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 121 4
+ 0 122 4
+ 0 123 4>;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ status = "disabled";
+ };
};
diff --git a/arch/arm/boot/dts/r8a7778-bockw-reference.dts b/arch/arm/boot/dts/r8a7778-bockw-reference.dts
index 9bb903a3230d..969e386e852c 100644
--- a/arch/arm/boot/dts/r8a7778-bockw-reference.dts
+++ b/arch/arm/boot/dts/r8a7778-bockw-reference.dts
@@ -22,11 +22,36 @@
compatible = "renesas,bockw-reference", "renesas,r8a7778";
chosen {
- bootargs = "console=ttySC0,115200 ignore_loglevel rw";
+ bootargs = "console=ttySC0,115200 ignore_loglevel root=/dev/nfs ip=dhcp rw";
};
memory {
device_type = "memory";
reg = <0x60000000 0x10000000>;
};
+
+ fixedregulator3v3: fixedregulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ethernet@18300000 {
+ compatible = "smsc,lan9220", "smsc,lan9115";
+ reg = <0x18300000 0x1000>;
+
+ phy-mode = "mii";
+ interrupt-parent = <&irqpin>;
+ interrupts = <0 0>; /* IRQ0: hwirq 0 on irqpin */
+ reg-io-width = <4>;
+ vddvario-supply = <&fixedregulator3v3>;
+ vdd33a-supply = <&fixedregulator3v3>;
+ };
+};
+
+&irqpin {
+ status = "okay";
};
diff --git a/arch/arm/boot/dts/r8a7778.dtsi b/arch/arm/boot/dts/r8a7778.dtsi
index 3577aba82583..a6308a399e2d 100644
--- a/arch/arm/boot/dts/r8a7778.dtsi
+++ b/arch/arm/boot/dts/r8a7778.dtsi
@@ -33,6 +33,25 @@
<0xfe430000 0x100>;
};
+ /* irqpin: IRQ0 - IRQ3 */
+ irqpin: irqpin@fe78001c {
+ compatible = "renesas,intc-irqpin";
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ status = "disabled"; /* default off */
+ reg = <0xfe78001c 4>,
+ <0xfe780010 4>,
+ <0xfe780024 4>,
+ <0xfe780044 4>,
+ <0xfe780064 4>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 27 0x4
+ 0 28 0x4
+ 0 29 0x4
+ 0 30 0x4>;
+ sense-bitfield-width = <2>;
+ };
+
gpio0: gpio@ffc40000 {
compatible = "renesas,gpio-r8a7778", "renesas,gpio-rcar";
reg = <0xffc40000 0x2c>;
diff --git a/arch/arm/boot/dts/r8a7779-marzen-reference.dts b/arch/arm/boot/dts/r8a7779-marzen-reference.dts
index 6d5508392252..ab4110aa3c3b 100644
--- a/arch/arm/boot/dts/r8a7779-marzen-reference.dts
+++ b/arch/arm/boot/dts/r8a7779-marzen-reference.dts
@@ -42,8 +42,8 @@
pinctrl-names = "default";
phy-mode = "mii";
- interrupt-parent = <&gic>;
- interrupts = <0 28 0x4>;
+ interrupt-parent = <&irqpin0>;
+ interrupts = <1 0>; /* IRQ1: hwirq 1 on irqpin0 */
reg-io-width = <4>;
vddvario-supply = <&fixedregulator3v3>;
vdd33a-supply = <&fixedregulator3v3>;
@@ -63,6 +63,10 @@
};
};
+&irqpin0 {
+ status = "okay";
+};
+
&pfc {
pinctrl-0 = <&scif2_pins &scif4_pins &sdhi0_pins>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi
index ebbe507fcbfa..19faeac3fd2e 100644
--- a/arch/arm/boot/dts/r8a7779.dtsi
+++ b/arch/arm/boot/dts/r8a7779.dtsi
@@ -135,6 +135,7 @@
irqpin0: irqpin@fe780010 {
compatible = "renesas,intc-irqpin";
#interrupt-cells = <2>;
+ status = "disabled";
interrupt-controller;
reg = <0xfe78001c 4>,
<0xfe780010 4>,
@@ -156,6 +157,7 @@
reg = <0xffc70000 0x1000>;
interrupt-parent = <&gic>;
interrupts = <0 79 0x4>;
+ status = "disabled";
};
i2c1: i2c@ffc71000 {
@@ -165,6 +167,7 @@
reg = <0xffc71000 0x1000>;
interrupt-parent = <&gic>;
interrupts = <0 82 0x4>;
+ status = "disabled";
};
i2c2: i2c@ffc72000 {
@@ -174,6 +177,7 @@
reg = <0xffc72000 0x1000>;
interrupt-parent = <&gic>;
interrupts = <0 80 0x4>;
+ status = "disabled";
};
i2c3: i2c@ffc73000 {
@@ -183,6 +187,7 @@
reg = <0xffc73000 0x1000>;
interrupt-parent = <&gic>;
interrupts = <0 81 0x4>;
+ status = "disabled";
};
pfc: pfc@fffc0000 {
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index 413b4c29e782..ee845fad939b 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -24,6 +24,55 @@
reg = <0>;
clock-frequency = <1300000000>;
};
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <1>;
+ clock-frequency = <1300000000>;
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <2>;
+ clock-frequency = <1300000000>;
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <3>;
+ clock-frequency = <1300000000>;
+ };
+
+ cpu4: cpu@4 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x100>;
+ clock-frequency = <780000000>;
+ };
+
+ cpu5: cpu@5 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x101>;
+ clock-frequency = <780000000>;
+ };
+
+ cpu6: cpu@6 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x102>;
+ clock-frequency = <780000000>;
+ };
+
+ cpu7: cpu@7 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x103>;
+ clock-frequency = <780000000>;
+ };
};
gic: interrupt-controller@f1001000 {
@@ -127,6 +176,46 @@
interrupts = <0 0 4>, <0 1 4>, <0 2 4>, <0 3 4>;
};
+ i2c0: i2c@e6508000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "renesas,i2c-r8a7790";
+ reg = <0 0xe6508000 0 0x40>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 287 0x4>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@e6518000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "renesas,i2c-r8a7790";
+ reg = <0 0xe6518000 0 0x40>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 288 0x4>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@e6530000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "renesas,i2c-r8a7790";
+ reg = <0 0xe6530000 0 0x40>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 286 0x4>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@e6540000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "renesas,i2c-r8a7790";
+ reg = <0 0xe6540000 0 0x40>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 290 0x4>;
+ status = "disabled";
+ };
+
mmcif0: mmcif@ee200000 {
compatible = "renesas,sh-mmcif";
reg = <0 0xee200000 0 0x80>;
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
new file mode 100644
index 000000000000..1ce5250ec278
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
@@ -0,0 +1,32 @@
+/*
+ * Device Tree Source for the Koelsch board
+ *
+ * Copyright (C) 2013 Renesas Electronics Corporation
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/dts-v1/;
+/include/ "r8a7791.dtsi"
+
+/ {
+ model = "Koelsch";
+ compatible = "renesas,koelsch", "renesas,r8a7791";
+
+ chosen {
+ bootargs = "console=ttySC6,115200 ignore_loglevel rw root=/dev/nfs ip=dhcp";
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0 0x40000000 0 0x80000000>;
+ };
+
+ lbsc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+};
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
new file mode 100644
index 000000000000..fea5cfef4691
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -0,0 +1,74 @@
+/*
+ * Device Tree Source for the r8a7791 SoC
+ *
+ * Copyright (C) 2013 Renesas Electronics Corporation
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/ {
+ compatible = "renesas,r8a7791";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0>;
+ clock-frequency = <1300000000>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <1>;
+ clock-frequency = <1300000000>;
+ };
+ };
+
+ gic: interrupt-controller@f1001000 {
+ compatible = "arm,cortex-a15-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0 0xf1001000 0 0x1000>,
+ <0 0xf1002000 0 0x1000>,
+ <0 0xf1004000 0 0x2000>,
+ <0 0xf1006000 0 0x2000>;
+ interrupts = <1 9 0xf04>;
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <1 13 0xf08>,
+ <1 14 0xf08>,
+ <1 11 0xf08>,
+ <1 10 0xf08>;
+ };
+
+ irqc0: interrupt-controller@e61c0000 {
+ compatible = "renesas,irqc";
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ reg = <0 0xe61c0000 0 0x200>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 0 4>,
+ <0 1 4>,
+ <0 2 4>,
+ <0 3 4>,
+ <0 12 4>,
+ <0 13 4>,
+ <0 14 4>,
+ <0 15 4>,
+ <0 16 4>,
+ <0 17 4>;
+ };
+};
diff --git a/arch/arm/boot/dts/rk3066a-bqcurie2.dts b/arch/arm/boot/dts/rk3066a-bqcurie2.dts
new file mode 100644
index 000000000000..035df4053c21
--- /dev/null
+++ b/arch/arm/boot/dts/rk3066a-bqcurie2.dts
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2013 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+#include "rk3066a.dtsi"
+
+/ {
+ model = "bq Curie 2";
+
+ memory {
+ reg = <0x60000000 0x40000000>;
+ };
+
+ soc {
+ uart0: serial@10124000 {
+ status = "okay";
+ };
+
+ uart1: serial@10126000 {
+ status = "okay";
+ };
+
+ uart2: serial@20064000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_xfer>;
+ status = "okay";
+ };
+
+ uart3: serial@20068000 {
+ status = "okay";
+ };
+
+ vcc_sd0: fixed-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "sdmmc-supply";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ gpio = <&gpio3 7 GPIO_ACTIVE_LOW>;
+ startup-delay-us = <100000>;
+ };
+
+ dwmmc@10214000 { /* sdmmc */
+ num-slots = <1>;
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_cd &sd0_bus4>;
+ vmmc-supply = <&vcc_sd0>;
+
+ slot@0 {
+ reg = <0>;
+ bus-width = <4>;
+ disable-wp;
+ };
+ };
+
+ dwmmc@10218000 { /* wifi */
+ num-slots = <1>;
+ status = "okay";
+ non-removable;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd1_clk &sd1_cmd &sd1_bus4>;
+
+ slot@0 {
+ reg = <0>;
+ bus-width = <4>;
+ disable-wp;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ autorepeat;
+
+ button@0 {
+ gpios = <&gpio6 2 GPIO_ACTIVE_LOW>; /* GPIO6_A2 */
+ linux,code = <116>;
+ label = "GPIO Key Power";
+ linux,input-type = <1>;
+ gpio-key,wakeup = <1>;
+ debounce-interval = <100>;
+ };
+ button@1 {
+ gpios = <&gpio4 21 GPIO_ACTIVE_LOW>; /* GPIO4_C5 */
+ linux,code = <104>;
+ label = "GPIO Key Vol-";
+ linux,input-type = <1>;
+ gpio-key,wakeup = <0>;
+ debounce-interval = <100>;
+ };
+ /* VOL+ comes somehow thru the ADC */
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/rk3066a.dtsi b/arch/arm/boot/dts/rk3066a.dtsi
index 56bfac93d3f6..be5d2b09a363 100644
--- a/arch/arm/boot/dts/rk3066a.dtsi
+++ b/arch/arm/boot/dts/rk3066a.dtsi
@@ -14,15 +14,12 @@
*/
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/pinctrl/rockchip.h>
-#include "skeleton.dtsi"
+#include "rk3xxx.dtsi"
#include "rk3066a-clocks.dtsi"
/ {
compatible = "rockchip,rk3066a";
- interrupt-parent = <&gic>;
cpus {
#address-cells = <1>;
@@ -43,33 +40,6 @@
};
soc {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "simple-bus";
- ranges;
-
- gic: interrupt-controller@1013d000 {
- compatible = "arm,cortex-a9-gic";
- interrupt-controller;
- #interrupt-cells = <3>;
- reg = <0x1013d000 0x1000>,
- <0x1013c100 0x0100>;
- };
-
- L2: l2-cache-controller@10138000 {
- compatible = "arm,pl310-cache";
- reg = <0x10138000 0x1000>;
- cache-unified;
- cache-level = <2>;
- };
-
- local-timer@1013c600 {
- compatible = "arm,cortex-a9-twd-timer";
- reg = <0x1013c600 0x20>;
- interrupts = <GIC_PPI 13 0x304>;
- clocks = <&dummy150m>;
- };
-
timer@20038000 {
compatible = "snps,dw-apb-timer-osc";
reg = <0x20038000 0x100>;
@@ -191,17 +161,14 @@
uart0_xfer: uart0-xfer {
rockchip,pins = <RK_GPIO1 0 RK_FUNC_1 &pcfg_pull_default>,
<RK_GPIO1 1 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
uart0_cts: uart0-cts {
rockchip,pins = <RK_GPIO1 2 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
uart0_rts: uart0-rts {
rockchip,pins = <RK_GPIO1 3 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
};
@@ -209,17 +176,14 @@
uart1_xfer: uart1-xfer {
rockchip,pins = <RK_GPIO1 4 RK_FUNC_1 &pcfg_pull_default>,
<RK_GPIO1 5 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
uart1_cts: uart1-cts {
rockchip,pins = <RK_GPIO1 6 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
uart1_rts: uart1-rts {
rockchip,pins = <RK_GPIO1 7 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
};
@@ -227,7 +191,6 @@
uart2_xfer: uart2-xfer {
rockchip,pins = <RK_GPIO1 8 RK_FUNC_1 &pcfg_pull_default>,
<RK_GPIO1 9 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
/* no rts / cts for uart2 */
};
@@ -236,44 +199,36 @@
uart3_xfer: uart3-xfer {
rockchip,pins = <RK_GPIO3 27 RK_FUNC_1 &pcfg_pull_default>,
<RK_GPIO3 28 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
uart3_cts: uart3-cts {
rockchip,pins = <RK_GPIO3 29 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
uart3_rts: uart3-rts {
rockchip,pins = <RK_GPIO3 30 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
};
sd0 {
sd0_clk: sd0-clk {
rockchip,pins = <RK_GPIO3 8 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
sd0_cmd: sd0-cmd {
rockchip,pins = <RK_GPIO3 9 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
sd0_cd: sd0-cd {
rockchip,pins = <RK_GPIO3 14 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
sd0_wp: sd0-wp {
rockchip,pins = <RK_GPIO3 15 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
sd0_bus1: sd0-bus-width1 {
rockchip,pins = <RK_GPIO3 10 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
sd0_bus4: sd0-bus-width4 {
@@ -281,34 +236,28 @@
<RK_GPIO3 11 RK_FUNC_1 &pcfg_pull_default>,
<RK_GPIO3 12 RK_FUNC_1 &pcfg_pull_default>,
<RK_GPIO3 13 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
};
sd1 {
sd1_clk: sd1-clk {
rockchip,pins = <RK_GPIO3 21 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
sd1_cmd: sd1-cmd {
rockchip,pins = <RK_GPIO3 16 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
sd1_cd: sd1-cd {
rockchip,pins = <RK_GPIO3 22 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
sd1_wp: sd1-wp {
rockchip,pins = <RK_GPIO3 23 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
sd1_bus1: sd1-bus-width1 {
rockchip,pins = <RK_GPIO3 17 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
sd1_bus4: sd1-bus-width4 {
@@ -316,75 +265,8 @@
<RK_GPIO3 18 RK_FUNC_1 &pcfg_pull_default>,
<RK_GPIO3 19 RK_FUNC_1 &pcfg_pull_default>,
<RK_GPIO3 20 RK_FUNC_1 &pcfg_pull_default>;
- rockchip,config = <&pcfg_pull_default>;
};
};
};
-
- uart0: serial@10124000 {
- compatible = "snps,dw-apb-uart";
- reg = <0x10124000 0x400>;
- interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
- reg-shift = <2>;
- reg-io-width = <1>;
- clocks = <&clk_gates1 8>;
- status = "disabled";
- };
-
- uart1: serial@10126000 {
- compatible = "snps,dw-apb-uart";
- reg = <0x10126000 0x400>;
- interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
- reg-shift = <2>;
- reg-io-width = <1>;
- clocks = <&clk_gates1 10>;
- status = "disabled";
- };
-
- uart2: serial@20064000 {
- compatible = "snps,dw-apb-uart";
- reg = <0x20064000 0x400>;
- interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
- reg-shift = <2>;
- reg-io-width = <1>;
- clocks = <&clk_gates1 12>;
- status = "disabled";
- };
-
- uart3: serial@20068000 {
- compatible = "snps,dw-apb-uart";
- reg = <0x20068000 0x400>;
- interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
- reg-shift = <2>;
- reg-io-width = <1>;
- clocks = <&clk_gates1 14>;
- status = "disabled";
- };
-
- dwmmc@10214000 {
- compatible = "rockchip,rk2928-dw-mshc";
- reg = <0x10214000 0x1000>;
- interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- clocks = <&clk_gates5 10>, <&clk_gates2 11>;
- clock-names = "biu", "ciu";
-
- status = "disabled";
- };
-
- dwmmc@10218000 {
- compatible = "rockchip,rk2928-dw-mshc";
- reg = <0x10218000 0x1000>;
- interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- clocks = <&clk_gates5 11>, <&clk_gates2 13>;
- clock-names = "biu", "ciu";
-
- status = "disabled";
- };
};
};
diff --git a/arch/arm/boot/dts/rk3188-clocks.dtsi b/arch/arm/boot/dts/rk3188-clocks.dtsi
new file mode 100644
index 000000000000..b1b92dc245ce
--- /dev/null
+++ b/arch/arm/boot/dts/rk3188-clocks.dtsi
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2013 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /*
+ * This is a dummy clock, to be used as placeholder on
+ * other mux clocks when a specific parent clock is not
+ * yet implemented. It should be dropped when the driver
+ * is complete.
+ */
+ dummy: dummy {
+ compatible = "fixed-clock";
+ clock-frequency = <0>;
+ #clock-cells = <0>;
+ };
+
+ xin24m: xin24m {
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ #clock-cells = <0>;
+ };
+
+ dummy48m: dummy48m {
+ compatible = "fixed-clock";
+ clock-frequency = <48000000>;
+ #clock-cells = <0>;
+ };
+
+ dummy150m: dummy150m {
+ compatible = "fixed-clock";
+ clock-frequency = <150000000>;
+ #clock-cells = <0>;
+ };
+
+ clk_gates0: gate-clk@200000d0 {
+ compatible = "rockchip,rk2928-gate-clk";
+ reg = <0x200000d0 0x4>;
+ clocks = <&dummy150m>, <&dummy>,
+ <&dummy>, <&dummy>,
+ <&dummy>, <&dummy>,
+ <&dummy>, <&dummy>,
+ <&dummy>, <&dummy>,
+ <&dummy>, <&dummy>,
+ <&dummy>, <&dummy>,
+ <&dummy>, <&dummy>;
+
+ clock-output-names =
+ "gate_core_periph", "gate_cpu_gpll",
+ "gate_ddrphy", "gate_aclk_cpu",
+ "gate_hclk_cpu", "gate_pclk_cpu",
+ "gate_atclk_cpu", "gate_aclk_core",
+ "reserved", "gate_i2s0",
+ "gate_i2s0_frac", "reserved",
+ "reserved", "gate_spdif",
+ "gate_spdif_frac", "gate_testclk";
+
+ #clock-cells = <1>;
+ };
+
+ clk_gates1: gate-clk@200000d4 {
+ compatible = "rockchip,rk2928-gate-clk";
+ reg = <0x200000d4 0x4>;
+ clocks = <&xin24m>, <&xin24m>,
+ <&xin24m>, <&dummy>,
+ <&dummy>, <&xin24m>,
+ <&xin24m>, <&dummy>,
+ <&xin24m>, <&dummy>,
+ <&xin24m>, <&dummy>,
+ <&xin24m>, <&dummy>,
+ <&xin24m>, <&dummy>;
+
+ clock-output-names =
+ "gate_timer0", "gate_timer1",
+ "gate_timer3", "gate_jtag",
+ "gate_aclk_lcdc1_src", "gate_otgphy0",
+ "gate_otgphy1", "gate_ddr_gpll",
+ "gate_uart0", "gate_frac_uart0",
+ "gate_uart1", "gate_frac_uart1",
+ "gate_uart2", "gate_frac_uart2",
+ "gate_uart3", "gate_frac_uart3";
+
+ #clock-cells = <1>;
+ };
+
+ clk_gates2: gate-clk@200000d8 {
+ compatible = "rockchip,rk2928-gate-clk";
+ reg = <0x200000d8 0x4>;
+ clocks = <&clk_gates2 1>, <&dummy>,
+ <&dummy>, <&dummy>,
+ <&dummy>, <&dummy>,
+ <&clk_gates2 3>, <&dummy>,
+ <&dummy>, <&dummy>,
+ <&dummy>, <&dummy48m>,
+ <&dummy>, <&dummy48m>,
+ <&dummy>, <&dummy>;
+
+ clock-output-names =
+ "gate_periph_src", "gate_aclk_periph",
+ "gate_hclk_periph", "gate_pclk_periph",
+ "gate_smc", "gate_mac",
+ "gate_hsadc", "gate_hsadc_frac",
+ "gate_saradc", "gate_spi0",
+ "gate_spi1", "gate_mmc0",
+ "gate_mac_lbtest", "gate_mmc1",
+ "gate_emmc", "reserved";
+
+ #clock-cells = <1>;
+ };
+
+ clk_gates3: gate-clk@200000dc {
+ compatible = "rockchip,rk2928-gate-clk";
+ reg = <0x200000dc 0x4>;
+ clocks = <&dummy>, <&dummy>,
+ <&dummy>, <&dummy>,
+ <&xin24m>, <&xin24m>,
+ <&dummy>, <&dummy>,
+ <&xin24m>, <&dummy>,
+ <&dummy>, <&dummy>,
+ <&dummy>, <&dummy>,
+ <&xin24m>, <&dummy>;
+
+ clock-output-names =
+ "gate_aclk_lcdc0_src", "gate_dclk_lcdc0",
+ "gate_dclk_lcdc1", "gate_pclkin_cif0",
+ "gate_timer2", "gate_timer4",
+ "gate_hsicphy", "gate_cif0_out",
+ "gate_timer5", "gate_aclk_vepu",
+ "gate_hclk_vepu", "gate_aclk_vdpu",
+ "gate_hclk_vdpu", "reserved",
+ "gate_timer6", "gate_aclk_gpu_src";
+
+ #clock-cells = <1>;
+ };
+
+ clk_gates4: gate-clk@200000e0 {
+ compatible = "rockchip,rk2928-gate-clk";
+ reg = <0x200000e0 0x4>;
+ clocks = <&clk_gates2 2>, <&clk_gates2 3>,
+ <&clk_gates2 1>, <&clk_gates2 1>,
+ <&clk_gates2 1>, <&clk_gates2 2>,
+ <&clk_gates2 2>, <&clk_gates2 2>,
+ <&clk_gates0 4>, <&clk_gates0 4>,
+ <&clk_gates0 3>, <&dummy>,
+ <&clk_gates0 3>, <&dummy>,
+ <&dummy>, <&dummy>;
+
+ clock-output-names =
+ "gate_hclk_peri_axi_matrix", "gate_pclk_peri_axi_matrix",
+ "gate_aclk_cpu_peri", "gate_aclk_peri_axi_matrix",
+ "gate_aclk_pei_niu", "gate_hclk_usb_peri",
+ "gate_hclk_peri_ahb_arbi", "gate_hclk_emem_peri",
+ "gate_hclk_cpubus", "gate_hclk_ahb2apb",
+ "gate_aclk_strc_sys", "reserved",
+ "gate_aclk_intmem", "reserved",
+ "gate_hclk_imem1", "gate_hclk_imem0";
+
+ #clock-cells = <1>;
+ };
+
+ clk_gates5: gate-clk@200000e4 {
+ compatible = "rockchip,rk2928-gate-clk";
+ reg = <0x200000e4 0x4>;
+ clocks = <&clk_gates0 3>, <&clk_gates2 1>,
+ <&clk_gates0 5>, <&clk_gates0 5>,
+ <&clk_gates0 5>, <&clk_gates0 5>,
+ <&clk_gates0 4>, <&clk_gates0 5>,
+ <&clk_gates2 1>, <&clk_gates2 2>,
+ <&clk_gates2 2>, <&clk_gates2 2>,
+ <&clk_gates2 2>, <&clk_gates4 5>;
+
+ clock-output-names =
+ "gate_aclk_dmac1", "gate_aclk_dmac2",
+ "gate_pclk_efuse", "gate_pclk_tzpc",
+ "gate_pclk_grf", "gate_pclk_pmu",
+ "gate_hclk_rom", "gate_pclk_ddrupctl",
+ "gate_aclk_smc", "gate_hclk_nandc",
+ "gate_hclk_mmc0", "gate_hclk_mmc1",
+ "gate_hclk_emmc", "gate_hclk_otg0";
+
+ #clock-cells = <1>;
+ };
+
+ clk_gates6: gate-clk@200000e8 {
+ compatible = "rockchip,rk2928-gate-clk";
+ reg = <0x200000e8 0x4>;
+ clocks = <&clk_gates3 0>, <&clk_gates0 4>,
+ <&clk_gates0 4>, <&clk_gates1 4>,
+ <&clk_gates0 4>, <&clk_gates3 0>,
+ <&dummy>, <&dummy>,
+ <&clk_gates3 0>, <&clk_gates0 4>,
+ <&clk_gates0 4>, <&clk_gates1 4>,
+ <&clk_gates0 4>, <&clk_gates3 0>;
+
+ clock-output-names =
+ "gate_aclk_lcdc0", "gate_hclk_lcdc0",
+ "gate_hclk_lcdc1", "gate_aclk_lcdc1",
+ "gate_hclk_cif0", "gate_aclk_cif0",
+ "reserved", "reserved",
+ "gate_aclk_ipp", "gate_hclk_ipp",
+ "gate_hclk_rga", "gate_aclk_rga",
+ "gate_hclk_vio_bus", "gate_aclk_vio0";
+
+ #clock-cells = <1>;
+ };
+
+ clk_gates7: gate-clk@200000ec {
+ compatible = "rockchip,rk2928-gate-clk";
+ reg = <0x200000ec 0x4>;
+ clocks = <&clk_gates2 2>, <&clk_gates0 4>,
+ <&clk_gates0 4>, <&dummy>,
+ <&dummy>, <&clk_gates2 2>,
+ <&clk_gates2 2>, <&clk_gates0 5>,
+ <&dummy>, <&clk_gates0 5>,
+ <&clk_gates0 5>, <&clk_gates2 3>,
+ <&clk_gates2 3>, <&clk_gates2 3>,
+ <&clk_gates2 3>, <&clk_gates2 3>;
+
+ clock-output-names =
+ "gate_hclk_emac", "gate_hclk_spdif",
+ "gate_hclk_i2s0_2ch", "gate_hclk_otg1",
+ "gate_hclk_hsic", "gate_hclk_hsadc",
+ "gate_hclk_pidf", "gate_pclk_timer0",
+ "reserved", "gate_pclk_timer2",
+ "gate_pclk_pwm01", "gate_pclk_pwm23",
+ "gate_pclk_spi0", "gate_pclk_spi1",
+ "gate_pclk_saradc", "gate_pclk_wdt";
+
+ #clock-cells = <1>;
+ };
+
+ clk_gates8: gate-clk@200000f0 {
+ compatible = "rockchip,rk2928-gate-clk";
+ reg = <0x200000f0 0x4>;
+ clocks = <&clk_gates0 5>, <&clk_gates0 5>,
+ <&clk_gates2 3>, <&clk_gates2 3>,
+ <&clk_gates0 5>, <&clk_gates0 5>,
+ <&clk_gates2 3>, <&clk_gates2 3>,
+ <&clk_gates2 3>, <&clk_gates0 5>,
+ <&clk_gates0 5>, <&clk_gates0 5>,
+ <&clk_gates2 3>, <&dummy>;
+
+ clock-output-names =
+ "gate_pclk_uart0", "gate_pclk_uart1",
+ "gate_pclk_uart2", "gate_pclk_uart3",
+ "gate_pclk_i2c0", "gate_pclk_i2c1",
+ "gate_pclk_i2c2", "gate_pclk_i2c3",
+ "gate_pclk_i2c4", "gate_pclk_gpio0",
+ "gate_pclk_gpio1", "gate_pclk_gpio2",
+ "gate_pclk_gpio3", "gate_aclk_gps";
+
+ #clock-cells = <1>;
+ };
+
+ clk_gates9: gate-clk@200000f4 {
+ compatible = "rockchip,rk2928-gate-clk";
+ reg = <0x200000f4 0x4>;
+ clocks = <&dummy>, <&dummy>,
+ <&dummy>, <&dummy>,
+ <&dummy>, <&dummy>,
+ <&dummy>, <&dummy>;
+
+ clock-output-names =
+ "gate_clk_core_dbg", "gate_pclk_dbg",
+ "gate_clk_trace", "gate_atclk",
+ "gate_clk_l2c", "gate_aclk_vio1",
+ "gate_pclk_publ", "gate_aclk_gpu";
+
+ #clock-cells = <1>;
+ };
+ };
+
+};
diff --git a/arch/arm/boot/dts/rk3188-radxarock.dts b/arch/arm/boot/dts/rk3188-radxarock.dts
new file mode 100644
index 000000000000..3ba1968a70ab
--- /dev/null
+++ b/arch/arm/boot/dts/rk3188-radxarock.dts
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+#include "rk3188.dtsi"
+
+/ {
+ model = "Radxa Rock";
+
+ memory {
+ reg = <0x60000000 0x80000000>;
+ };
+
+ soc {
+ uart0: serial@10124000 {
+ status = "okay";
+ };
+
+ uart1: serial@10126000 {
+ status = "okay";
+ };
+
+ uart2: serial@20064000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_xfer>;
+ status = "okay";
+ };
+
+ uart3: serial@20068000 {
+ status = "okay";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ autorepeat;
+
+ button@0 {
+ gpios = <&gpio0 4 GPIO_ACTIVE_LOW>;
+ linux,code = <116>;
+ label = "GPIO Key Power";
+ linux,input-type = <1>;
+ gpio-key,wakeup = <1>;
+ debounce-interval = <100>;
+ };
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ green {
+ gpios = <&gpio0 12 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ yellow {
+ gpios = <&gpio0 14 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ sleep {
+ gpios = <&gpio0 15 0>;
+ default-state = "off";
+ };
+ };
+
+ };
+};
diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi
new file mode 100644
index 000000000000..1a26b03b3649
--- /dev/null
+++ b/arch/arm/boot/dts/rk3188.dtsi
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2013 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include "rk3xxx.dtsi"
+#include "rk3188-clocks.dtsi"
+
+/ {
+ compatible = "rockchip,rk3188";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ next-level-cache = <&L2>;
+ reg = <0x0>;
+ };
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ next-level-cache = <&L2>;
+ reg = <0x1>;
+ };
+ cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ next-level-cache = <&L2>;
+ reg = <0x2>;
+ };
+ cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ next-level-cache = <&L2>;
+ reg = <0x3>;
+ };
+ };
+
+ soc {
+ global-timer@1013c200 {
+ interrupts = <GIC_PPI 11 0xf04>;
+ };
+
+ local-timer@1013c600 {
+ interrupts = <GIC_PPI 13 0xf04>;
+ };
+
+ pinctrl@20008000 {
+ compatible = "rockchip,rk3188-pinctrl";
+ reg = <0x20008000 0xa0>,
+ <0x20008164 0x1a0>;
+ reg-names = "base", "pull";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ gpio0: gpio0@0x2000a000 {
+ compatible = "rockchip,rk3188-gpio-bank0";
+ reg = <0x2000a000 0x100>,
+ <0x20004064 0x8>;
+ interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_gates8 9>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio1: gpio1@0x2003c000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x2003c000 0x100>;
+ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_gates8 10>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio2@2003e000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x2003e000 0x100>;
+ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_gates8 11>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio3: gpio3@20080000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x20080000 0x100>;
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_gates8 12>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ pcfg_pull_up: pcfg_pull_up {
+ bias-pull-up;
+ };
+
+ pcfg_pull_down: pcfg_pull_down {
+ bias-pull-down;
+ };
+
+ pcfg_pull_none: pcfg_pull_none {
+ bias-disable;
+ };
+
+ uart0 {
+ uart0_xfer: uart0-xfer {
+ rockchip,pins = <RK_GPIO1 0 RK_FUNC_1 &pcfg_pull_none>,
+ <RK_GPIO1 1 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ uart0_cts: uart0-cts {
+ rockchip,pins = <RK_GPIO1 2 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ uart0_rts: uart0-rts {
+ rockchip,pins = <RK_GPIO1 3 RK_FUNC_1 &pcfg_pull_none>;
+ };
+ };
+
+ uart1 {
+ uart1_xfer: uart1-xfer {
+ rockchip,pins = <RK_GPIO1 4 RK_FUNC_1 &pcfg_pull_none>,
+ <RK_GPIO1 5 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ uart1_cts: uart1-cts {
+ rockchip,pins = <RK_GPIO1 6 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ uart1_rts: uart1-rts {
+ rockchip,pins = <RK_GPIO1 7 RK_FUNC_1 &pcfg_pull_none>;
+ };
+ };
+
+ uart2 {
+ uart2_xfer: uart2-xfer {
+ rockchip,pins = <RK_GPIO1 8 RK_FUNC_1 &pcfg_pull_none>,
+ <RK_GPIO1 9 RK_FUNC_1 &pcfg_pull_none>;
+ };
+ /* no rts / cts for uart2 */
+ };
+
+ uart3 {
+ uart3_xfer: uart3-xfer {
+ rockchip,pins = <RK_GPIO1 10 RK_FUNC_1 &pcfg_pull_none>,
+ <RK_GPIO1 11 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ uart3_cts: uart3-cts {
+ rockchip,pins = <RK_GPIO1 12 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ uart3_rts: uart3-rts {
+ rockchip,pins = <RK_GPIO1 13 RK_FUNC_1 &pcfg_pull_none>;
+ };
+ };
+
+ sd0 {
+ sd0_clk: sd0-clk {
+ rockchip,pins = <RK_GPIO3 2 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ sd0_cmd: sd0-cmd {
+ rockchip,pins = <RK_GPIO3 3 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ sd0_cd: sd0-cd {
+ rockchip,pins = <RK_GPIO3 8 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ sd0_wp: sd0-wp {
+ rockchip,pins = <RK_GPIO3 9 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ sd0_pwr: sd0-pwr {
+ rockchip,pins = <RK_GPIO3 1 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ sd0_bus1: sd0-bus-width1 {
+ rockchip,pins = <RK_GPIO3 4 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ sd0_bus4: sd0-bus-width4 {
+ rockchip,pins = <RK_GPIO3 4 RK_FUNC_1 &pcfg_pull_none>,
+ <RK_GPIO3 5 RK_FUNC_1 &pcfg_pull_none>,
+ <RK_GPIO3 6 RK_FUNC_1 &pcfg_pull_none>,
+ <RK_GPIO3 7 RK_FUNC_1 &pcfg_pull_none>;
+ };
+ };
+
+ sd1 {
+ sd1_clk: sd1-clk {
+ rockchip,pins = <RK_GPIO3 21 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ sd1_cmd: sd1-cmd {
+ rockchip,pins = <RK_GPIO3 16 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ sd1_cd: sd1-cd {
+ rockchip,pins = <RK_GPIO3 22 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ sd1_wp: sd1-wp {
+ rockchip,pins = <RK_GPIO3 23 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ sd1_bus1: sd1-bus-width1 {
+ rockchip,pins = <RK_GPIO3 17 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ sd1_bus4: sd1-bus-width4 {
+ rockchip,pins = <RK_GPIO3 17 RK_FUNC_1 &pcfg_pull_none>,
+ <RK_GPIO3 18 RK_FUNC_1 &pcfg_pull_none>,
+ <RK_GPIO3 19 RK_FUNC_1 &pcfg_pull_none>,
+ <RK_GPIO3 20 RK_FUNC_1 &pcfg_pull_none>;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi
new file mode 100644
index 000000000000..0fcbcfd67de2
--- /dev/null
+++ b/arch/arm/boot/dts/rk3xxx.dtsi
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2013 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "skeleton.dtsi"
+
+/ {
+ interrupt-parent = <&gic>;
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ ranges;
+
+ gic: interrupt-controller@1013d000 {
+ compatible = "arm,cortex-a9-gic";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ reg = <0x1013d000 0x1000>,
+ <0x1013c100 0x0100>;
+ };
+
+ L2: l2-cache-controller@10138000 {
+ compatible = "arm,pl310-cache";
+ reg = <0x10138000 0x1000>;
+ cache-unified;
+ cache-level = <2>;
+ };
+
+ global-timer@1013c200 {
+ compatible = "arm,cortex-a9-global-timer";
+ reg = <0x1013c200 0x20>;
+ interrupts = <GIC_PPI 11 0x304>;
+ clocks = <&dummy150m>;
+ };
+
+ local-timer@1013c600 {
+ compatible = "arm,cortex-a9-twd-timer";
+ reg = <0x1013c600 0x20>;
+ interrupts = <GIC_PPI 13 0x304>;
+ clocks = <&dummy150m>;
+ };
+
+ uart0: serial@10124000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x10124000 0x400>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <1>;
+ clocks = <&clk_gates1 8>;
+ status = "disabled";
+ };
+
+ uart1: serial@10126000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x10126000 0x400>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <1>;
+ clocks = <&clk_gates1 10>;
+ status = "disabled";
+ };
+
+ uart2: serial@20064000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x20064000 0x400>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <1>;
+ clocks = <&clk_gates1 12>;
+ status = "disabled";
+ };
+
+ uart3: serial@20068000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x20068000 0x400>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <1>;
+ clocks = <&clk_gates1 14>;
+ status = "disabled";
+ };
+
+ dwmmc@10214000 {
+ compatible = "rockchip,rk2928-dw-mshc";
+ reg = <0x10214000 0x1000>;
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ clocks = <&clk_gates5 10>, <&clk_gates2 11>;
+ clock-names = "biu", "ciu";
+
+ status = "disabled";
+ };
+
+ dwmmc@10218000 {
+ compatible = "rockchip,rk2928-dw-mshc";
+ reg = <0x10218000 0x1000>;
+ interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ clocks = <&clk_gates5 11>, <&clk_gates2 13>;
+ clock-names = "biu", "ciu";
+
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/s3c6400.dtsi b/arch/arm/boot/dts/s3c6400.dtsi
new file mode 100644
index 000000000000..a7d1c8ec150d
--- /dev/null
+++ b/arch/arm/boot/dts/s3c6400.dtsi
@@ -0,0 +1,41 @@
+/*
+ * Samsung's S3C6400 SoC device tree source
+ *
+ * Copyright (c) 2013 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * Samsung's S3C6400 SoC device nodes are listed in this file. S3C6400
+ * based board files can include this file and provide values for board specfic
+ * bindings.
+ *
+ * Note: This file does not include device nodes for all the controllers in
+ * S3C6400 SoC. As device tree coverage for S3C6400 increases, additional
+ * nodes can be added to this file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include "s3c64xx.dtsi"
+
+/ {
+ compatible = "samsung,s3c6400";
+};
+
+&vic0 {
+ valid-mask = <0xfffffe1f>;
+ valid-wakeup-mask = <0x00200004>;
+};
+
+&vic1 {
+ valid-mask = <0xffffffff>;
+ valid-wakeup-mask = <0x53020000>;
+};
+
+&soc {
+ clocks: clock-controller@7e00f000 {
+ compatible = "samsung,s3c6400-clock";
+ reg = <0x7e00f000 0x1000>;
+ #clock-cells = <1>;
+ };
+};
diff --git a/arch/arm/boot/dts/s3c6410-mini6410.dts b/arch/arm/boot/dts/s3c6410-mini6410.dts
new file mode 100644
index 000000000000..57e00f9bce99
--- /dev/null
+++ b/arch/arm/boot/dts/s3c6410-mini6410.dts
@@ -0,0 +1,228 @@
+/*
+ * Samsung's S3C6410 based Mini6410 board device tree source
+ *
+ * Copyright (c) 2013 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * Device tree source file for FriendlyARM Mini6410 board which is based on
+ * Samsung's S3C6410 SoC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+#include "s3c6410.dtsi"
+
+/ {
+ model = "FriendlyARM Mini6410 board based on S3C6410";
+ compatible = "friendlyarm,mini6410", "samsung,s3c6410";
+
+ memory {
+ reg = <0x50000000 0x10000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttySAC0,115200n8 earlyprintk rootwait root=/dev/mmcblk0p1";
+ };
+
+ clocks {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fin_pll: oscillator@0 {
+ compatible = "fixed-clock";
+ reg = <0>;
+ clock-frequency = <12000000>;
+ clock-output-names = "fin_pll";
+ #clock-cells = <0>;
+ };
+
+ xusbxti: oscillator@1 {
+ compatible = "fixed-clock";
+ reg = <1>;
+ clock-output-names = "xusbxti";
+ clock-frequency = <48000000>;
+ #clock-cells = <0>;
+ };
+ };
+
+ srom-cs1@18000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x18000000 0x8000000>;
+ ranges;
+
+ ethernet@18000000 {
+ compatible = "davicom,dm9000";
+ reg = <0x18000000 0x2 0x18000004 0x2>;
+ interrupt-parent = <&gpn>;
+ interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
+ davicom,no-eeprom;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_keys>;
+ autorepeat;
+
+ button-k1 {
+ label = "K1";
+ gpios = <&gpn 0 GPIO_ACTIVE_LOW>;
+ linux,code = <2>;
+ debounce-interval = <20>;
+ };
+
+ button-k2 {
+ label = "K2";
+ gpios = <&gpn 1 GPIO_ACTIVE_LOW>;
+ linux,code = <3>;
+ debounce-interval = <20>;
+ };
+
+ button-k3 {
+ label = "K3";
+ gpios = <&gpn 2 GPIO_ACTIVE_LOW>;
+ linux,code = <4>;
+ debounce-interval = <20>;
+ };
+
+ button-k4 {
+ label = "K4";
+ gpios = <&gpn 3 GPIO_ACTIVE_LOW>;
+ linux,code = <5>;
+ debounce-interval = <20>;
+ };
+
+ button-k5 {
+ label = "K5";
+ gpios = <&gpn 4 GPIO_ACTIVE_LOW>;
+ linux,code = <6>;
+ debounce-interval = <20>;
+ };
+
+ button-k6 {
+ label = "K6";
+ gpios = <&gpn 5 GPIO_ACTIVE_LOW>;
+ linux,code = <7>;
+ debounce-interval = <20>;
+ };
+
+ button-k7 {
+ label = "K7";
+ gpios = <&gpl 11 GPIO_ACTIVE_LOW>;
+ linux,code = <8>;
+ debounce-interval = <20>;
+ };
+
+ button-k8 {
+ label = "K8";
+ gpios = <&gpl 12 GPIO_ACTIVE_LOW>;
+ linux,code = <9>;
+ debounce-interval = <20>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_leds>;
+
+ led-1 {
+ label = "LED1";
+ gpios = <&gpk 4 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ led-2 {
+ label = "LED2";
+ gpios = <&gpk 5 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "mmc0";
+ };
+
+ led-3 {
+ label = "LED3";
+ gpios = <&gpk 6 GPIO_ACTIVE_LOW>;
+ };
+
+ led-4 {
+ label = "LED4";
+ gpios = <&gpk 7 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ buzzer {
+ compatible = "pwm-beeper";
+ pwms = <&pwm 0 1000000 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm0_out>;
+ };
+};
+
+&sdhci0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>;
+ bus-width = <4>;
+ status = "okay";
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_data>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_data>, <&uart1_fctl>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_data>;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3_data>;
+ status = "okay";
+};
+
+&pwm {
+ status = "okay";
+};
+
+&pinctrl0 {
+ gpio_leds: gpio-leds {
+ samsung,pins = "gpk-4", "gpk-5", "gpk-6", "gpk-7";
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ gpio_keys: gpio-keys {
+ samsung,pins = "gpn-0", "gpn-1", "gpn-2", "gpn-3",
+ "gpn-4", "gpn-5", "gpl-11", "gpl-12";
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_bus>;
+ status = "okay";
+
+ eeprom@50 {
+ compatible = "atmel,24c08";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+};
diff --git a/arch/arm/boot/dts/s3c6410-smdk6410.dts b/arch/arm/boot/dts/s3c6410-smdk6410.dts
new file mode 100644
index 000000000000..ecf35ec466f7
--- /dev/null
+++ b/arch/arm/boot/dts/s3c6410-smdk6410.dts
@@ -0,0 +1,103 @@
+/*
+ * Samsung S3C6410 based SMDK6410 board device tree source.
+ *
+ * Copyright (c) 2013 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * Device tree source file for SAMSUNG SMDK6410 board which is based on
+ * Samsung's S3C6410 SoC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+#include "s3c6410.dtsi"
+
+/ {
+ model = "SAMSUNG SMDK6410 board based on S3C6410";
+ compatible = "samsung,mini6410", "samsung,s3c6410";
+
+ memory {
+ reg = <0x50000000 0x8000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttySAC0,115200n8 earlyprintk rootwait root=/dev/mmcblk0p1";
+ };
+
+ clocks {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fin_pll: oscillator@0 {
+ compatible = "fixed-clock";
+ reg = <0>;
+ clock-frequency = <12000000>;
+ clock-output-names = "fin_pll";
+ #clock-cells = <0>;
+ };
+
+ xusbxti: oscillator@1 {
+ compatible = "fixed-clock";
+ reg = <1>;
+ clock-output-names = "xusbxti";
+ clock-frequency = <48000000>;
+ #clock-cells = <0>;
+ };
+ };
+
+ srom-cs1@18000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x18000000 0x8000000>;
+ ranges;
+
+ ethernet@18000000 {
+ compatible = "smsc,lan9115";
+ reg = <0x18000000 0x10000>;
+ interrupt-parent = <&gpn>;
+ interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
+ phy-mode = "mii";
+ reg-io-width = <4>;
+ smsc,force-internal-phy;
+ };
+ };
+};
+
+&sdhci0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>;
+ bus-width = <4>;
+ status = "okay";
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_data>, <&uart0_fctl>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_data>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_data>;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3_data>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/s3c6410.dtsi b/arch/arm/boot/dts/s3c6410.dtsi
new file mode 100644
index 000000000000..eb4226b3407c
--- /dev/null
+++ b/arch/arm/boot/dts/s3c6410.dtsi
@@ -0,0 +1,57 @@
+/*
+ * Samsung's S3C6410 SoC device tree source
+ *
+ * Copyright (c) 2013 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * Samsung's S3C6410 SoC device nodes are listed in this file. S3C6410
+ * based board files can include this file and provide values for board specfic
+ * bindings.
+ *
+ * Note: This file does not include device nodes for all the controllers in
+ * S3C6410 SoC. As device tree coverage for S3C6410 increases, additional
+ * nodes can be added to this file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include "s3c64xx.dtsi"
+
+/ {
+ compatible = "samsung,s3c6410";
+
+ aliases {
+ i2c1 = &i2c1;
+ };
+};
+
+&vic0 {
+ valid-mask = <0xffffff7f>;
+ valid-wakeup-mask = <0x00200004>;
+};
+
+&vic1 {
+ valid-mask = <0xffffffff>;
+ valid-wakeup-mask = <0x53020000>;
+};
+
+&soc {
+ clocks: clock-controller@7e00f000 {
+ compatible = "samsung,s3c6410-clock";
+ reg = <0x7e00f000 0x1000>;
+ #clock-cells = <1>;
+ };
+
+ i2c1: i2c@7f00f000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x7f00f000 0x1000>;
+ interrupt-parent = <&vic0>;
+ interrupts = <5>;
+ clock-names = "i2c";
+ clocks = <&clocks PCLK_IIC1>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+};
diff --git a/arch/arm/boot/dts/s3c64xx-pinctrl.dtsi b/arch/arm/boot/dts/s3c64xx-pinctrl.dtsi
new file mode 100644
index 000000000000..b1197d8b04de
--- /dev/null
+++ b/arch/arm/boot/dts/s3c64xx-pinctrl.dtsi
@@ -0,0 +1,687 @@
+/*
+ * Samsung's S3C64xx SoC series common device tree source
+ * - pin control-related definitions
+ *
+ * Copyright (c) 2013 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * Samsung's S3C64xx SoCs pin banks, pin-mux and pin-config options are
+ * listed as device tree nodes in this file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define PIN_PULL_NONE 0
+#define PIN_PULL_DOWN 1
+#define PIN_PULL_UP 2
+
+&pinctrl0 {
+ /*
+ * Pin banks
+ */
+
+ gpa: gpa {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpb: gpb {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpc: gpc {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpd: gpd {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpe: gpe {
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpf: gpf {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpg: gpg {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gph: gph {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpi: gpi {
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpj: gpj {
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpk: gpk {
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpl: gpl {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpm: gpm {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpn: gpn {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpo: gpo {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpp: gpp {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpq: gpq {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /*
+ * Pin groups
+ */
+
+ uart0_data: uart0-data {
+ samsung,pins = "gpa-0", "gpa-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ uart0_fctl: uart0-fctl {
+ samsung,pins = "gpa-2", "gpa-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ uart1_data: uart1-data {
+ samsung,pins = "gpa-4", "gpa-5";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ uart1_fctl: uart1-fctl {
+ samsung,pins = "gpa-6", "gpa-7";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ uart2_data: uart2-data {
+ samsung,pins = "gpb-0", "gpb-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ uart3_data: uart3-data {
+ samsung,pins = "gpb-2", "gpb-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ ext_dma_0: ext-dma-0 {
+ samsung,pins = "gpb-0", "gpb-1";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ ext_dma_1: ext-dma-1 {
+ samsung,pins = "gpb-2", "gpb-3";
+ samsung,pin-function = <4>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ irda_data_0: irda-data-0 {
+ samsung,pins = "gpb-0", "gpb-1";
+ samsung,pin-function = <4>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ irda_data_1: irda-data-1 {
+ samsung,pins = "gpb-2", "gpb-3";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ irda_sdbw: irda-sdbw {
+ samsung,pins = "gpb-4";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ i2c0_bus: i2c0-bus {
+ samsung,pins = "gpb-5", "gpb-6";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_UP>;
+ };
+
+ i2c1_bus: i2c1-bus {
+ /* S3C6410-only */
+ samsung,pins = "gpb-2", "gpb-3";
+ samsung,pin-function = <6>;
+ samsung,pin-pud = <PIN_PULL_UP>;
+ };
+
+ spi0_bus: spi0-bus {
+ samsung,pins = "gpc-0", "gpc-1", "gpc-2";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_UP>;
+ };
+
+ spi0_cs: spi0-cs {
+ samsung,pins = "gpc-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ spi1_bus: spi1-bus {
+ samsung,pins = "gpc-4", "gpc-5", "gpc-6";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_UP>;
+ };
+
+ spi1_cs: spi1-cs {
+ samsung,pins = "gpc-7";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ sd0_cmd: sd0-cmd {
+ samsung,pins = "gpg-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ sd0_clk: sd0-clk {
+ samsung,pins = "gpg-0";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ sd0_bus1: sd0-bus1 {
+ samsung,pins = "gpg-2";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ sd0_bus4: sd0-bus4 {
+ samsung,pins = "gpg-2", "gpg-3", "gpg-4", "gpg-5";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ sd0_cd: sd0-cd {
+ samsung,pins = "gpg-6";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_UP>;
+ };
+
+ sd1_cmd: sd1-cmd {
+ samsung,pins = "gph-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ sd1_clk: sd1-clk {
+ samsung,pins = "gph-0";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ sd1_bus1: sd1-bus1 {
+ samsung,pins = "gph-2";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ sd1_bus4: sd1-bus4 {
+ samsung,pins = "gph-2", "gph-3", "gph-4", "gph-5";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ sd1_bus8: sd1-bus8 {
+ samsung,pins = "gph-2", "gph-3", "gph-4", "gph-5",
+ "gph-6", "gph-7", "gph-8", "gph-9";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ sd1_cd: sd1-cd {
+ samsung,pins = "gpg-6";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_UP>;
+ };
+
+ sd2_cmd: sd2-cmd {
+ samsung,pins = "gpc-4";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ sd2_clk: sd2-clk {
+ samsung,pins = "gpc-5";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ sd2_bus1: sd2-bus1 {
+ samsung,pins = "gph-6";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ sd2_bus4: sd2-bus4 {
+ samsung,pins = "gph-6", "gph-7", "gph-8", "gph-9";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ i2s0_bus: i2s0-bus {
+ samsung,pins = "gpd-0", "gpd-2", "gpd-3", "gpd-4";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ i2s0_cdclk: i2s0-cdclk {
+ samsung,pins = "gpd-1";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ i2s1_bus: i2s1-bus {
+ samsung,pins = "gpe-0", "gpe-2", "gpe-3", "gpe-4";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ i2s1_cdclk: i2s1-cdclk {
+ samsung,pins = "gpe-1";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ i2s2_bus: i2s2-bus {
+ /* S3C6410-only */
+ samsung,pins = "gpc-4", "gpc-5", "gpc-6", "gph-6",
+ "gph-8", "gph-9";
+ samsung,pin-function = <5>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ i2s2_cdclk: i2s2-cdclk {
+ /* S3C6410-only */
+ samsung,pins = "gph-7";
+ samsung,pin-function = <5>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ pcm0_bus: pcm0-bus {
+ samsung,pins = "gpd-0", "gpd-2", "gpd-3", "gpd-4";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ pcm0_extclk: pcm0-extclk {
+ samsung,pins = "gpd-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ pcm1_bus: pcm1-bus {
+ samsung,pins = "gpe-0", "gpe-2", "gpe-3", "gpe-4";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ pcm1_extclk: pcm1-extclk {
+ samsung,pins = "gpe-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ ac97_bus_0: ac97-bus-0 {
+ samsung,pins = "gpd-0", "gpd-1", "gpd-2", "gpd-3", "gpd-4";
+ samsung,pin-function = <4>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ ac97_bus_1: ac97-bus-1 {
+ samsung,pins = "gpe-0", "gpe-1", "gpe-2", "gpe-3", "gpe-4";
+ samsung,pin-function = <4>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ cam_port: cam-port {
+ samsung,pins = "gpf-0", "gpf-1", "gpf-2", "gpf-4",
+ "gpf-5", "gpf-6", "gpf-7", "gpf-8",
+ "gpf-9", "gpf-10", "gpf-11", "gpf-12";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ cam_rst: cam-rst {
+ samsung,pins = "gpf-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ cam_field: cam-field {
+ /* S3C6410-only */
+ samsung,pins = "gpb-4";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ pwm_extclk: pwm-extclk {
+ samsung,pins = "gpf-13";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ pwm0_out: pwm0-out {
+ samsung,pins = "gpf-14";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ pwm1_out: pwm1-out {
+ samsung,pins = "gpf-15";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ clkout0: clkout-0 {
+ samsung,pins = "gpf-14";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_col0_0: keypad-col0-0 {
+ samsung,pins = "gph-0";
+ samsung,pin-function = <4>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_col1_0: keypad-col1-0 {
+ samsung,pins = "gph-1";
+ samsung,pin-function = <4>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_col2_0: keypad-col2-0 {
+ samsung,pins = "gph-2";
+ samsung,pin-function = <4>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_col3_0: keypad-col3-0 {
+ samsung,pins = "gph-3";
+ samsung,pin-function = <4>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_col4_0: keypad-col4-0 {
+ samsung,pins = "gph-4";
+ samsung,pin-function = <4>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_col5_0: keypad-col5-0 {
+ samsung,pins = "gph-5";
+ samsung,pin-function = <4>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_col6_0: keypad-col6-0 {
+ samsung,pins = "gph-6";
+ samsung,pin-function = <4>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_col7_0: keypad-col7-0 {
+ samsung,pins = "gph-7";
+ samsung,pin-function = <4>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_col0_1: keypad-col0-1 {
+ samsung,pins = "gpl-0";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_col1_1: keypad-col1-1 {
+ samsung,pins = "gpl-1";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_col2_1: keypad-col2-1 {
+ samsung,pins = "gpl-2";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_col3_1: keypad-col3-1 {
+ samsung,pins = "gpl-3";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_col4_1: keypad-col4-1 {
+ samsung,pins = "gpl-4";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_col5_1: keypad-col5-1 {
+ samsung,pins = "gpl-5";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_col6_1: keypad-col6-1 {
+ samsung,pins = "gpl-6";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_col7_1: keypad-col7-1 {
+ samsung,pins = "gpl-7";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_row0_0: keypad-row0-0 {
+ samsung,pins = "gpk-8";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_row1_0: keypad-row1-0 {
+ samsung,pins = "gpk-9";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_row2_0: keypad-row2-0 {
+ samsung,pins = "gpk-10";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_row3_0: keypad-row3-0 {
+ samsung,pins = "gpk-11";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_row4_0: keypad-row4-0 {
+ samsung,pins = "gpk-12";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_row5_0: keypad-row5-0 {
+ samsung,pins = "gpk-13";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_row6_0: keypad-row6-0 {
+ samsung,pins = "gpk-14";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_row7_0: keypad-row7-0 {
+ samsung,pins = "gpk-15";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_row0_1: keypad-row0-1 {
+ samsung,pins = "gpn-0";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_row1_1: keypad-row1-1 {
+ samsung,pins = "gpn-1";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_row2_1: keypad-row2-1 {
+ samsung,pins = "gpn-2";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_row3_1: keypad-row3-1 {
+ samsung,pins = "gpn-3";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_row4_1: keypad-row4-1 {
+ samsung,pins = "gpn-4";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_row5_1: keypad-row5-1 {
+ samsung,pins = "gpn-5";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_row6_1: keypad-row6-1 {
+ samsung,pins = "gpn-6";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ keypad_row7_1: keypad-row7-1 {
+ samsung,pins = "gpn-7";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ lcd_ctrl: lcd-ctrl {
+ samsung,pins = "gpj-8", "gpj-9", "gpj-10", "gpj-11";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ lcd_data16: lcd-data-width16 {
+ samsung,pins = "gpi-3", "gpi-4", "gpi-5", "gpi-6",
+ "gpi-7", "gpi-10", "gpi-11", "gpi-12",
+ "gpi-13", "gpi-14", "gpi-15", "gpj-3",
+ "gpj-4", "gpj-5", "gpj-6", "gpj-7";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ lcd_data18: lcd-data-width18 {
+ samsung,pins = "gpi-2", "gpi-3", "gpi-4", "gpi-5",
+ "gpi-6", "gpi-7", "gpi-10", "gpi-11",
+ "gpi-12", "gpi-13", "gpi-14", "gpi-15",
+ "gpj-2", "gpj-3", "gpj-4", "gpj-5",
+ "gpj-6", "gpj-7";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ lcd_data24: lcd-data-width24 {
+ samsung,pins = "gpi-0", "gpi-1", "gpi-2", "gpi-3",
+ "gpi-4", "gpi-5", "gpi-6", "gpi-7",
+ "gpi-8", "gpi-9", "gpi-10", "gpi-11",
+ "gpi-12", "gpi-13", "gpi-14", "gpi-15",
+ "gpj-0", "gpj-1", "gpj-2", "gpj-3",
+ "gpj-4", "gpj-5", "gpj-6", "gpj-7";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+
+ hsi_bus: hsi-bus {
+ samsung,pins = "gpk-0", "gpk-1", "gpk-2", "gpk-3",
+ "gpk-4", "gpk-5", "gpk-6", "gpk-7";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <PIN_PULL_NONE>;
+ };
+};
diff --git a/arch/arm/boot/dts/s3c64xx.dtsi b/arch/arm/boot/dts/s3c64xx.dtsi
new file mode 100644
index 000000000000..4e3be4d3493d
--- /dev/null
+++ b/arch/arm/boot/dts/s3c64xx.dtsi
@@ -0,0 +1,199 @@
+/*
+ * Samsung's S3C64xx SoC series common device tree source
+ *
+ * Copyright (c) 2013 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * Samsung's S3C64xx SoC series device nodes are listed in this file.
+ * Particular SoCs from S3C64xx series can include this file and provide
+ * values for SoCs specfic bindings.
+ *
+ * Note: This file does not include device nodes for all the controllers in
+ * S3C64xx SoCs. As device tree coverage for S3C64xx increases, additional
+ * nodes can be added to this file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "skeleton.dtsi"
+#include <dt-bindings/clock/samsung,s3c64xx-clock.h>
+
+/ {
+ aliases {
+ i2c0 = &i2c0;
+ pinctrl0 = &pinctrl0;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,arm1176jzf-s", "arm,arm1176";
+ reg = <0x0>;
+ };
+ };
+
+ soc: soc {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ vic0: interrupt-controller@71200000 {
+ compatible = "arm,pl192-vic";
+ interrupt-controller;
+ reg = <0x71200000 0x1000>;
+ #interrupt-cells = <1>;
+ };
+
+ vic1: interrupt-controller@71300000 {
+ compatible = "arm,pl192-vic";
+ interrupt-controller;
+ reg = <0x71300000 0x1000>;
+ #interrupt-cells = <1>;
+ };
+
+ sdhci0: sdhci@7c200000 {
+ compatible = "samsung,s3c6410-sdhci";
+ reg = <0x7c200000 0x100>;
+ interrupt-parent = <&vic1>;
+ interrupts = <24>;
+ clock-names = "hsmmc", "mmc_busclk.0", "mmc_busclk.2";
+ clocks = <&clocks HCLK_HSMMC0>, <&clocks HCLK_HSMMC0>,
+ <&clocks SCLK_MMC0>;
+ status = "disabled";
+ };
+
+ sdhci1: sdhci@7c300000 {
+ compatible = "samsung,s3c6410-sdhci";
+ reg = <0x7c300000 0x100>;
+ interrupt-parent = <&vic1>;
+ interrupts = <25>;
+ clock-names = "hsmmc", "mmc_busclk.0", "mmc_busclk.2";
+ clocks = <&clocks HCLK_HSMMC1>, <&clocks HCLK_HSMMC1>,
+ <&clocks SCLK_MMC1>;
+ status = "disabled";
+ };
+
+ sdhci2: sdhci@7c400000 {
+ compatible = "samsung,s3c6410-sdhci";
+ reg = <0x7c400000 0x100>;
+ interrupt-parent = <&vic1>;
+ interrupts = <17>;
+ clock-names = "hsmmc", "mmc_busclk.0", "mmc_busclk.2";
+ clocks = <&clocks HCLK_HSMMC2>, <&clocks HCLK_HSMMC2>,
+ <&clocks SCLK_MMC2>;
+ status = "disabled";
+ };
+
+ watchdog: watchdog@7e004000 {
+ compatible = "samsung,s3c2410-wdt";
+ reg = <0x7e004000 0x1000>;
+ interrupt-parent = <&vic0>;
+ interrupts = <26>;
+ clock-names = "watchdog";
+ clocks = <&clocks PCLK_WDT>;
+ status = "disabled";
+ };
+
+ i2c0: i2c@7f004000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x7f004000 0x1000>;
+ interrupt-parent = <&vic1>;
+ interrupts = <18>;
+ clock-names = "i2c";
+ clocks = <&clocks PCLK_IIC0>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ uart0: serial@7f005000 {
+ compatible = "samsung,s3c6400-uart";
+ reg = <0x7f005000 0x100>;
+ interrupt-parent = <&vic1>;
+ interrupts = <5>;
+ clock-names = "uart", "clk_uart_baud2",
+ "clk_uart_baud3";
+ clocks = <&clocks PCLK_UART0>, <&clocks PCLK_UART0>,
+ <&clocks SCLK_UART>;
+ status = "disabled";
+ };
+
+ uart1: serial@7f005400 {
+ compatible = "samsung,s3c6400-uart";
+ reg = <0x7f005400 0x100>;
+ interrupt-parent = <&vic1>;
+ interrupts = <6>;
+ clock-names = "uart", "clk_uart_baud2",
+ "clk_uart_baud3";
+ clocks = <&clocks PCLK_UART1>, <&clocks PCLK_UART1>,
+ <&clocks SCLK_UART>;
+ status = "disabled";
+ };
+
+ uart2: serial@7f005800 {
+ compatible = "samsung,s3c6400-uart";
+ reg = <0x7f005800 0x100>;
+ interrupt-parent = <&vic1>;
+ interrupts = <7>;
+ clock-names = "uart", "clk_uart_baud2",
+ "clk_uart_baud3";
+ clocks = <&clocks PCLK_UART2>, <&clocks PCLK_UART2>,
+ <&clocks SCLK_UART>;
+ status = "disabled";
+ };
+
+ uart3: serial@7f005c00 {
+ compatible = "samsung,s3c6400-uart";
+ reg = <0x7f005c00 0x100>;
+ interrupt-parent = <&vic1>;
+ interrupts = <8>;
+ clock-names = "uart", "clk_uart_baud2",
+ "clk_uart_baud3";
+ clocks = <&clocks PCLK_UART3>, <&clocks PCLK_UART3>,
+ <&clocks SCLK_UART>;
+ status = "disabled";
+ };
+
+ pwm: pwm@7f006000 {
+ compatible = "samsung,s3c6400-pwm";
+ reg = <0x7f006000 0x1000>;
+ interrupt-parent = <&vic0>;
+ interrupts = <23>, <24>, <25>, <27>, <28>;
+ clock-names = "timers";
+ clocks = <&clocks PCLK_PWM>;
+ samsung,pwm-outputs = <0>, <1>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pinctrl0: pinctrl@7f008000 {
+ compatible = "samsung,s3c64xx-pinctrl";
+ reg = <0x7f008000 0x1000>;
+ interrupt-parent = <&vic1>;
+ interrupts = <21>;
+
+ pctrl_int_map: pinctrl-interrupt-map {
+ interrupt-map = <0 &vic0 0>,
+ <1 &vic0 1>,
+ <2 &vic1 0>,
+ <3 &vic1 1>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+
+ wakeup-interrupt-controller {
+ compatible = "samsung,s3c64xx-wakeup-eint";
+ interrupts = <0>, <1>, <2>, <3>;
+ interrupt-parent = <&pctrl_int_map>;
+ };
+ };
+ };
+};
+
+#include "s3c64xx-pinctrl.dtsi"
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index b7f49615120d..cbc7c8ecdeaa 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -31,7 +31,6 @@
gpio3 = &pioD;
gpio4 = &pioE;
tcb0 = &tcb0;
- tcb1 = &tcb1;
i2c0 = &i2c0;
i2c1 = &i2c1;
i2c2 = &i2c2;
@@ -105,15 +104,6 @@
status = "disabled";
};
- can0: can@f000c000 {
- compatible = "atmel,at91sam9x5-can";
- reg = <0xf000c000 0x300>;
- interrupts = <40 IRQ_TYPE_LEVEL_HIGH 3>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_can0_rx_tx>;
- status = "disabled";
- };
-
tcb0: timer@f0010000 {
compatible = "atmel,at91sam9x5-tcb";
reg = <0xf0010000 0x100>;
@@ -166,15 +156,6 @@
status = "disabled";
};
- macb0: ethernet@f0028000 {
- compatible = "cdns,pc302-gem", "cdns,gem";
- reg = <0xf0028000 0x100>;
- interrupts = <34 IRQ_TYPE_LEVEL_HIGH 3>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_macb0_data_rgmii &pinctrl_macb0_signal_rgmii>;
- status = "disabled";
- };
-
isi: isi@f0034000 {
compatible = "atmel,at91sam9g45-isi";
reg = <0xf0034000 0x4000>;
@@ -195,19 +176,6 @@
#size-cells = <0>;
};
- mmc2: mmc@f8004000 {
- compatible = "atmel,hsmci";
- reg = <0xf8004000 0x600>;
- interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>;
- dmas = <&dma1 2 AT91_DMA_CFG_PER_ID(1)>;
- dma-names = "rxtx";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_mmc2_clk_cmd_dat0 &pinctrl_mmc2_dat1_3>;
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
spi1: spi@f8008000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -231,20 +199,6 @@
status = "disabled";
};
- can1: can@f8010000 {
- compatible = "atmel,at91sam9x5-can";
- reg = <0xf8010000 0x300>;
- interrupts = <41 IRQ_TYPE_LEVEL_HIGH 3>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_can1_rx_tx>;
- };
-
- tcb1: timer@f8014000 {
- compatible = "atmel,at91sam9x5-tcb";
- reg = <0xf8014000 0x100>;
- interrupts = <27 IRQ_TYPE_LEVEL_HIGH 0>;
- };
-
adc0: adc@f8018000 {
compatible = "atmel,at91sam9260-adc";
reg = <0xf8018000 0x100>;
@@ -341,15 +295,6 @@
status = "disabled";
};
- macb1: ethernet@f802c000 {
- compatible = "cdns,at32ap7000-macb", "cdns,macb";
- reg = <0xf802c000 0x100>;
- interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_macb1_rmii>;
- status = "disabled";
- };
-
sha@f8034000 {
compatible = "atmel,sam9g46-sha";
reg = <0xf8034000 0x100>;
@@ -474,22 +419,6 @@
};
};
- can0 {
- pinctrl_can0_rx_tx: can0_rx_tx {
- atmel,pins =
- <AT91_PIOD 14 AT91_PERIPH_C AT91_PINCTRL_NONE /* PD14 periph C RX, conflicts with SCK0, SPI0_NPCS1 */
- AT91_PIOD 15 AT91_PERIPH_C AT91_PINCTRL_NONE>; /* PD15 periph C TX, conflicts with CTS0, SPI0_NPCS2 */
- };
- };
-
- can1 {
- pinctrl_can1_rx_tx: can1_rx_tx {
- atmel,pins =
- <AT91_PIOB 14 AT91_PERIPH_B AT91_PINCTRL_NONE /* PB14 periph B RX, conflicts with GCRS */
- AT91_PIOB 15 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PB15 periph B TX, conflicts with GCOL */
- };
- };
-
dbgu {
pinctrl_dbgu: dbgu-0 {
atmel,pins =
@@ -537,107 +466,6 @@
};
};
- lcd {
- pinctrl_lcd: lcd-0 {
- atmel,pins =
- <AT91_PIOA 24 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA24 periph A LCDPWM */
- AT91_PIOA 26 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA26 periph A LCDVSYNC */
- AT91_PIOA 27 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA27 periph A LCDHSYNC */
- AT91_PIOA 25 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA25 periph A LCDDISP */
- AT91_PIOA 29 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA29 periph A LCDDEN */
- AT91_PIOA 28 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA28 periph A LCDPCK */
- AT91_PIOA 0 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA0 periph A LCDD0 pin */
- AT91_PIOA 1 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA1 periph A LCDD1 pin */
- AT91_PIOA 2 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA2 periph A LCDD2 pin */
- AT91_PIOA 3 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA3 periph A LCDD3 pin */
- AT91_PIOA 4 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA4 periph A LCDD4 pin */
- AT91_PIOA 5 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA5 periph A LCDD5 pin */
- AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA6 periph A LCDD6 pin */
- AT91_PIOA 7 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA7 periph A LCDD7 pin */
- AT91_PIOA 8 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA8 periph A LCDD8 pin */
- AT91_PIOA 9 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA9 periph A LCDD9 pin */
- AT91_PIOA 10 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA10 periph A LCDD10 pin */
- AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA11 periph A LCDD11 pin */
- AT91_PIOA 12 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA12 periph A LCDD12 pin */
- AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA13 periph A LCDD13 pin */
- AT91_PIOA 14 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA14 periph A LCDD14 pin */
- AT91_PIOA 15 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA15 periph A LCDD15 pin */
- AT91_PIOC 14 AT91_PERIPH_C AT91_PINCTRL_NONE /* PC14 periph C LCDD16 pin */
- AT91_PIOC 13 AT91_PERIPH_C AT91_PINCTRL_NONE /* PC13 periph C LCDD17 pin */
- AT91_PIOC 12 AT91_PERIPH_C AT91_PINCTRL_NONE /* PC12 periph C LCDD18 pin */
- AT91_PIOC 11 AT91_PERIPH_C AT91_PINCTRL_NONE /* PC11 periph C LCDD19 pin */
- AT91_PIOC 10 AT91_PERIPH_C AT91_PINCTRL_NONE /* PC10 periph C LCDD20 pin */
- AT91_PIOC 15 AT91_PERIPH_C AT91_PINCTRL_NONE /* PC15 periph C LCDD21 pin */
- AT91_PIOE 27 AT91_PERIPH_C AT91_PINCTRL_NONE /* PE27 periph C LCDD22 pin */
- AT91_PIOE 28 AT91_PERIPH_C AT91_PINCTRL_NONE>; /* PE28 periph C LCDD23 pin */
- };
- };
-
- macb0 {
- pinctrl_macb0_data_rgmii: macb0_data_rgmii {
- atmel,pins =
- <AT91_PIOB 0 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB0 periph A GTX0, conflicts with PWMH0 */
- AT91_PIOB 1 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB1 periph A GTX1, conflicts with PWML0 */
- AT91_PIOB 2 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB2 periph A GTX2, conflicts with TK1 */
- AT91_PIOB 3 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB3 periph A GTX3, conflicts with TF1 */
- AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB4 periph A GRX0, conflicts with PWMH1 */
- AT91_PIOB 5 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB5 periph A GRX1, conflicts with PWML1 */
- AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB6 periph A GRX2, conflicts with TD1 */
- AT91_PIOB 7 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* PB7 periph A GRX3, conflicts with RK1 */
- };
- pinctrl_macb0_data_gmii: macb0_data_gmii {
- atmel,pins =
- <AT91_PIOB 19 AT91_PERIPH_B AT91_PINCTRL_NONE /* PB19 periph B GTX4, conflicts with MCI1_CDA */
- AT91_PIOB 20 AT91_PERIPH_B AT91_PINCTRL_NONE /* PB20 periph B GTX5, conflicts with MCI1_DA0 */
- AT91_PIOB 21 AT91_PERIPH_B AT91_PINCTRL_NONE /* PB21 periph B GTX6, conflicts with MCI1_DA1 */
- AT91_PIOB 22 AT91_PERIPH_B AT91_PINCTRL_NONE /* PB22 periph B GTX7, conflicts with MCI1_DA2 */
- AT91_PIOB 23 AT91_PERIPH_B AT91_PINCTRL_NONE /* PB23 periph B GRX4, conflicts with MCI1_DA3 */
- AT91_PIOB 24 AT91_PERIPH_B AT91_PINCTRL_NONE /* PB24 periph B GRX5, conflicts with MCI1_CK */
- AT91_PIOB 25 AT91_PERIPH_B AT91_PINCTRL_NONE /* PB25 periph B GRX6, conflicts with SCK1 */
- AT91_PIOB 26 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PB26 periph B GRX7, conflicts with CTS1 */
- };
- pinctrl_macb0_signal_rgmii: macb0_signal_rgmii {
- atmel,pins =
- <AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB8 periph A GTXCK, conflicts with PWMH2 */
- AT91_PIOB 9 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB9 periph A GTXEN, conflicts with PWML2 */
- AT91_PIOB 11 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB11 periph A GRXCK, conflicts with RD1 */
- AT91_PIOB 13 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB13 periph A GRXER, conflicts with PWML3 */
- AT91_PIOB 16 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB16 periph A GMDC */
- AT91_PIOB 17 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB17 periph A GMDIO */
- AT91_PIOB 18 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* PB18 periph A G125CK */
- };
- pinctrl_macb0_signal_gmii: macb0_signal_gmii {
- atmel,pins =
- <AT91_PIOB 9 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB9 periph A GTXEN, conflicts with PWML2 */
- AT91_PIOB 10 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB10 periph A GTXER, conflicts with RF1 */
- AT91_PIOB 11 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB11 periph A GRXCK, conflicts with RD1 */
- AT91_PIOB 12 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB12 periph A GRXDV, conflicts with PWMH3 */
- AT91_PIOB 13 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB13 periph A GRXER, conflicts with PWML3 */
- AT91_PIOB 14 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB14 periph A GCRS, conflicts with CANRX1 */
- AT91_PIOB 15 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB15 periph A GCOL, conflicts with CANTX1 */
- AT91_PIOB 16 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB16 periph A GMDC */
- AT91_PIOB 17 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB17 periph A GMDIO */
- AT91_PIOB 27 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PB27 periph B G125CKO */
- };
-
- };
-
- macb1 {
- pinctrl_macb1_rmii: macb1_rmii-0 {
- atmel,pins =
- <AT91_PIOC 0 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC0 periph A ETX0, conflicts with TIOA3 */
- AT91_PIOC 1 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC1 periph A ETX1, conflicts with TIOB3 */
- AT91_PIOC 2 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC2 periph A ERX0, conflicts with TCLK3 */
- AT91_PIOC 3 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC3 periph A ERX1, conflicts with TIOA4 */
- AT91_PIOC 4 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC4 periph A ETXEN, conflicts with TIOB4 */
- AT91_PIOC 5 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC5 periph A ECRSDV,conflicts with TCLK4 */
- AT91_PIOC 6 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC6 periph A ERXER, conflicts with TIOA5 */
- AT91_PIOC 7 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC7 periph A EREFCK, conflicts with TIOB5 */
- AT91_PIOC 8 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC8 periph A EMDC, conflicts with TCLK5 */
- AT91_PIOC 9 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* PC9 periph A EMDIO */
- };
- };
-
mmc0 {
pinctrl_mmc0_clk_cmd_dat0: mmc0_clk_cmd_dat0 {
atmel,pins =
@@ -675,21 +503,6 @@
};
};
- mmc2 {
- pinctrl_mmc2_clk_cmd_dat0: mmc2_clk_cmd_dat0 {
- atmel,pins =
- <AT91_PIOC 15 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC15 periph A MCI2_CK, conflicts with PCK2 */
- AT91_PIOC 10 AT91_PERIPH_A AT91_PINCTRL_PULL_UP /* PC10 periph A MCI2_CDA with pullup */
- AT91_PIOC 11 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>; /* PC11 periph A MCI2_DA0 with pullup */
- };
- pinctrl_mmc2_dat1_3: mmc2_dat1_3 {
- atmel,pins =
- <AT91_PIOC 12 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC12 periph A MCI2_DA1 with pullup, conflicts with TIOA1 */
- AT91_PIOC 13 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC13 periph A MCI2_DA2 with pullup, conflicts with TIOB1 */
- AT91_PIOC 14 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* PC14 periph A MCI2_DA3 with pullup, conflicts with TCLK1 */
- };
- };
-
nand0 {
pinctrl_nand0_ale_cle: nand0_ale_cle-0 {
atmel,pins =
@@ -748,22 +561,6 @@
};
};
- uart0 {
- pinctrl_uart0: uart0-0 {
- atmel,pins =
- <AT91_PIOC 29 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC29 periph A, conflicts with PWMFI2, ISI_D8 */
- AT91_PIOC 30 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>; /* PC30 periph A with pullup, conflicts with ISI_PCK */
- };
- };
-
- uart1 {
- pinctrl_uart1: uart1-0 {
- atmel,pins =
- <AT91_PIOA 30 AT91_PERIPH_B AT91_PINCTRL_NONE /* PA30 periph B, conflicts with TWD0, ISI_VSYNC */
- AT91_PIOA 31 AT91_PERIPH_B AT91_PINCTRL_PULL_UP>; /* PA31 periph B with pullup, conflicts with TWCK0, ISI_HSYNC */
- };
- };
-
usart0 {
pinctrl_usart0: usart0-0 {
atmel,pins =
@@ -891,6 +688,11 @@
watchdog@fffffe40 {
compatible = "atmel,at91sam9260-wdt";
reg = <0xfffffe40 0x10>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH 7>;
+ atmel,watchdog-type = "hardware";
+ atmel,reset-type = "all";
+ atmel,dbg-halt;
+ atmel,idle-halt;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sama5d31.dtsi b/arch/arm/boot/dts/sama5d31.dtsi
new file mode 100644
index 000000000000..7997dc9863ed
--- /dev/null
+++ b/arch/arm/boot/dts/sama5d31.dtsi
@@ -0,0 +1,16 @@
+/*
+ * sama5d31.dtsi - Device Tree Include file for SAMA5D31 SoC
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * Licensed under GPLv2 or later.
+ */
+#include "sama5d3.dtsi"
+#include "sama5d3_lcd.dtsi"
+#include "sama5d3_emac.dtsi"
+#include "sama5d3_mci2.dtsi"
+#include "sama5d3_uart.dtsi"
+
+/ {
+ compatible = "atmel,samad31", "atmel,sama5d3", "atmel,sama5";
+};
diff --git a/arch/arm/boot/dts/sama5d31ek.dts b/arch/arm/boot/dts/sama5d31ek.dts
index 027bac7510b6..04eec0dfcf7d 100644
--- a/arch/arm/boot/dts/sama5d31ek.dts
+++ b/arch/arm/boot/dts/sama5d31ek.dts
@@ -7,12 +7,13 @@
* Licensed under GPLv2 or later.
*/
/dts-v1/;
+#include "sama5d31.dtsi"
#include "sama5d3xmb.dtsi"
#include "sama5d3xdm.dtsi"
/ {
model = "Atmel SAMA5D31-EK";
- compatible = "atmel,sama5d31ek", "atmel,sama5d3xmb", "atmel,sama5d3xcm", "atmel,sama5d3", "atmel,sama5";
+ compatible = "atmel,sama5d31ek", "atmel,sama5d3xmb", "atmel,sama5d3xcm", "atmel,sama5d31", "atmel,sama5d3", "atmel,sama5";
ahb {
apb {
diff --git a/arch/arm/boot/dts/sama5d33.dtsi b/arch/arm/boot/dts/sama5d33.dtsi
new file mode 100644
index 000000000000..39f832253caf
--- /dev/null
+++ b/arch/arm/boot/dts/sama5d33.dtsi
@@ -0,0 +1,14 @@
+/*
+ * sama5d33.dtsi - Device Tree Include file for SAMA5D33 SoC
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * Licensed under GPLv2 or later.
+ */
+#include "sama5d3.dtsi"
+#include "sama5d3_lcd.dtsi"
+#include "sama5d3_gmac.dtsi"
+
+/ {
+ compatible = "atmel,samad33", "atmel,sama5d3", "atmel,sama5";
+};
diff --git a/arch/arm/boot/dts/sama5d33ek.dts b/arch/arm/boot/dts/sama5d33ek.dts
index 99bd0c8e0471..cbd6a3ff1545 100644
--- a/arch/arm/boot/dts/sama5d33ek.dts
+++ b/arch/arm/boot/dts/sama5d33ek.dts
@@ -7,12 +7,13 @@
* Licensed under GPLv2 or later.
*/
/dts-v1/;
+#include "sama5d33.dtsi"
#include "sama5d3xmb.dtsi"
#include "sama5d3xdm.dtsi"
/ {
model = "Atmel SAMA5D33-EK";
- compatible = "atmel,sama5d33ek", "atmel,sama5d3xmb", "atmel,sama5d3xcm", "atmel,sama5d3", "atmel,sama5";
+ compatible = "atmel,sama5d33ek", "atmel,sama5d3xmb", "atmel,sama5d3xcm", "atmel,sama5d33", "atmel,sama5d3", "atmel,sama5";
ahb {
apb {
diff --git a/arch/arm/boot/dts/sama5d34.dtsi b/arch/arm/boot/dts/sama5d34.dtsi
new file mode 100644
index 000000000000..89cda2c0da39
--- /dev/null
+++ b/arch/arm/boot/dts/sama5d34.dtsi
@@ -0,0 +1,16 @@
+/*
+ * sama5d34.dtsi - Device Tree Include file for SAMA5D34 SoC
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * Licensed under GPLv2 or later.
+ */
+#include "sama5d3.dtsi"
+#include "sama5d3_lcd.dtsi"
+#include "sama5d3_gmac.dtsi"
+#include "sama5d3_can.dtsi"
+#include "sama5d3_mci2.dtsi"
+
+/ {
+ compatible = "atmel,samad34", "atmel,sama5d3", "atmel,sama5";
+};
diff --git a/arch/arm/boot/dts/sama5d34ek.dts b/arch/arm/boot/dts/sama5d34ek.dts
index fb8ee11cf282..878aa164275a 100644
--- a/arch/arm/boot/dts/sama5d34ek.dts
+++ b/arch/arm/boot/dts/sama5d34ek.dts
@@ -7,12 +7,13 @@
* Licensed under GPLv2 or later.
*/
/dts-v1/;
+#include "sama5d34.dtsi"
#include "sama5d3xmb.dtsi"
#include "sama5d3xdm.dtsi"
/ {
model = "Atmel SAMA5D34-EK";
- compatible = "atmel,sama5d34ek", "atmel,sama5d3xmb", "atmel,sama5d3xcm", "atmel,sama5d3", "atmel,sama5";
+ compatible = "atmel,sama5d34ek", "atmel,sama5d3xmb", "atmel,sama5d3xcm", "atmel,sama5d34", "atmel,sama5d3", "atmel,sama5";
ahb {
apb {
diff --git a/arch/arm/boot/dts/sama5d35.dtsi b/arch/arm/boot/dts/sama5d35.dtsi
new file mode 100644
index 000000000000..d20cd71b5f0e
--- /dev/null
+++ b/arch/arm/boot/dts/sama5d35.dtsi
@@ -0,0 +1,18 @@
+/*
+ * sama5d35.dtsi - Device Tree Include file for SAMA5D35 SoC
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * Licensed under GPLv2 or later.
+ */
+#include "sama5d3.dtsi"
+#include "sama5d3_gmac.dtsi"
+#include "sama5d3_emac.dtsi"
+#include "sama5d3_can.dtsi"
+#include "sama5d3_mci2.dtsi"
+#include "sama5d3_uart.dtsi"
+#include "sama5d3_tcb1.dtsi"
+
+/ {
+ compatible = "atmel,samad35", "atmel,sama5d3", "atmel,sama5";
+};
diff --git a/arch/arm/boot/dts/sama5d35ek.dts b/arch/arm/boot/dts/sama5d35ek.dts
index 509a53d9cc7b..9089c7c6cea8 100644
--- a/arch/arm/boot/dts/sama5d35ek.dts
+++ b/arch/arm/boot/dts/sama5d35ek.dts
@@ -7,11 +7,12 @@
* Licensed under GPLv2 or later.
*/
/dts-v1/;
+#include "sama5d35.dtsi"
#include "sama5d3xmb.dtsi"
/ {
model = "Atmel SAMA5D35-EK";
- compatible = "atmel,sama5d35ek", "atmel,sama5d3xmb", "atmel,sama5d3xcm", "atmel,sama5d3", "atmel,sama5";
+ compatible = "atmel,sama5d35ek", "atmel,sama5d3xmb", "atmel,sama5d3xcm", "atmel,sama5d35", "atmel,sama5d3", "atmel,sama5";
ahb {
apb {
diff --git a/arch/arm/boot/dts/sama5d3_can.dtsi b/arch/arm/boot/dts/sama5d3_can.dtsi
new file mode 100644
index 000000000000..8ed3260cef66
--- /dev/null
+++ b/arch/arm/boot/dts/sama5d3_can.dtsi
@@ -0,0 +1,54 @@
+/*
+ * at91sama5d3_can.dtsi - Device Tree Include file for AT91SAM9x5 SoC with
+ * CAN support
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * Licensed under GPLv2.
+ */
+
+#include <dt-bindings/pinctrl/at91.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ ahb {
+ apb {
+ pinctrl@fffff200 {
+ can0 {
+ pinctrl_can0_rx_tx: can0_rx_tx {
+ atmel,pins =
+ <AT91_PIOD 14 AT91_PERIPH_C AT91_PINCTRL_NONE /* PD14 periph C RX, conflicts with SCK0, SPI0_NPCS1 */
+ AT91_PIOD 15 AT91_PERIPH_C AT91_PINCTRL_NONE>; /* PD15 periph C TX, conflicts with CTS0, SPI0_NPCS2 */
+ };
+ };
+
+ can1 {
+ pinctrl_can1_rx_tx: can1_rx_tx {
+ atmel,pins =
+ <AT91_PIOB 14 AT91_PERIPH_B AT91_PINCTRL_NONE /* PB14 periph B RX, conflicts with GCRS */
+ AT91_PIOB 15 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PB15 periph B TX, conflicts with GCOL */
+ };
+ };
+
+ };
+
+ can0: can@f000c000 {
+ compatible = "atmel,at91sam9x5-can";
+ reg = <0xf000c000 0x300>;
+ interrupts = <40 IRQ_TYPE_LEVEL_HIGH 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can0_rx_tx>;
+ status = "disabled";
+ };
+
+ can1: can@f8010000 {
+ compatible = "atmel,at91sam9x5-can";
+ reg = <0xf8010000 0x300>;
+ interrupts = <41 IRQ_TYPE_LEVEL_HIGH 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1_rx_tx>;
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/sama5d3_emac.dtsi b/arch/arm/boot/dts/sama5d3_emac.dtsi
new file mode 100644
index 000000000000..4d4f351f1f9f
--- /dev/null
+++ b/arch/arm/boot/dts/sama5d3_emac.dtsi
@@ -0,0 +1,44 @@
+/*
+ * at91sama5d3_emac.dtsi - Device Tree Include file for AT91SAM9x5 SoC with
+ * Ethernet.
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * Licensed under GPLv2.
+ */
+
+#include <dt-bindings/pinctrl/at91.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ ahb {
+ apb {
+ pinctrl@fffff200 {
+ macb1 {
+ pinctrl_macb1_rmii: macb1_rmii-0 {
+ atmel,pins =
+ <AT91_PIOC 0 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC0 periph A ETX0, conflicts with TIOA3 */
+ AT91_PIOC 1 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC1 periph A ETX1, conflicts with TIOB3 */
+ AT91_PIOC 2 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC2 periph A ERX0, conflicts with TCLK3 */
+ AT91_PIOC 3 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC3 periph A ERX1, conflicts with TIOA4 */
+ AT91_PIOC 4 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC4 periph A ETXEN, conflicts with TIOB4 */
+ AT91_PIOC 5 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC5 periph A ECRSDV,conflicts with TCLK4 */
+ AT91_PIOC 6 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC6 periph A ERXER, conflicts with TIOA5 */
+ AT91_PIOC 7 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC7 periph A EREFCK, conflicts with TIOB5 */
+ AT91_PIOC 8 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC8 periph A EMDC, conflicts with TCLK5 */
+ AT91_PIOC 9 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* PC9 periph A EMDIO */
+ };
+ };
+ };
+
+ macb1: ethernet@f802c000 {
+ compatible = "cdns,at32ap7000-macb", "cdns,macb";
+ reg = <0xf802c000 0x100>;
+ interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_macb1_rmii>;
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/sama5d3_gmac.dtsi b/arch/arm/boot/dts/sama5d3_gmac.dtsi
new file mode 100644
index 000000000000..0ba8be30ccd8
--- /dev/null
+++ b/arch/arm/boot/dts/sama5d3_gmac.dtsi
@@ -0,0 +1,77 @@
+/*
+ * at91sama5d3_gmac.dtsi - Device Tree Include file for AT91SAM9x5 SoC with
+ * Gigabit Ethernet.
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * Licensed under GPLv2.
+ */
+
+#include <dt-bindings/pinctrl/at91.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ ahb {
+ apb {
+ pinctrl@fffff200 {
+ macb0 {
+ pinctrl_macb0_data_rgmii: macb0_data_rgmii {
+ atmel,pins =
+ <AT91_PIOB 0 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB0 periph A GTX0, conflicts with PWMH0 */
+ AT91_PIOB 1 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB1 periph A GTX1, conflicts with PWML0 */
+ AT91_PIOB 2 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB2 periph A GTX2, conflicts with TK1 */
+ AT91_PIOB 3 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB3 periph A GTX3, conflicts with TF1 */
+ AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB4 periph A GRX0, conflicts with PWMH1 */
+ AT91_PIOB 5 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB5 periph A GRX1, conflicts with PWML1 */
+ AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB6 periph A GRX2, conflicts with TD1 */
+ AT91_PIOB 7 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* PB7 periph A GRX3, conflicts with RK1 */
+ };
+ pinctrl_macb0_data_gmii: macb0_data_gmii {
+ atmel,pins =
+ <AT91_PIOB 19 AT91_PERIPH_B AT91_PINCTRL_NONE /* PB19 periph B GTX4, conflicts with MCI1_CDA */
+ AT91_PIOB 20 AT91_PERIPH_B AT91_PINCTRL_NONE /* PB20 periph B GTX5, conflicts with MCI1_DA0 */
+ AT91_PIOB 21 AT91_PERIPH_B AT91_PINCTRL_NONE /* PB21 periph B GTX6, conflicts with MCI1_DA1 */
+ AT91_PIOB 22 AT91_PERIPH_B AT91_PINCTRL_NONE /* PB22 periph B GTX7, conflicts with MCI1_DA2 */
+ AT91_PIOB 23 AT91_PERIPH_B AT91_PINCTRL_NONE /* PB23 periph B GRX4, conflicts with MCI1_DA3 */
+ AT91_PIOB 24 AT91_PERIPH_B AT91_PINCTRL_NONE /* PB24 periph B GRX5, conflicts with MCI1_CK */
+ AT91_PIOB 25 AT91_PERIPH_B AT91_PINCTRL_NONE /* PB25 periph B GRX6, conflicts with SCK1 */
+ AT91_PIOB 26 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PB26 periph B GRX7, conflicts with CTS1 */
+ };
+ pinctrl_macb0_signal_rgmii: macb0_signal_rgmii {
+ atmel,pins =
+ <AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB8 periph A GTXCK, conflicts with PWMH2 */
+ AT91_PIOB 9 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB9 periph A GTXEN, conflicts with PWML2 */
+ AT91_PIOB 11 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB11 periph A GRXCK, conflicts with RD1 */
+ AT91_PIOB 13 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB13 periph A GRXER, conflicts with PWML3 */
+ AT91_PIOB 16 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB16 periph A GMDC */
+ AT91_PIOB 17 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB17 periph A GMDIO */
+ AT91_PIOB 18 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* PB18 periph A G125CK */
+ };
+ pinctrl_macb0_signal_gmii: macb0_signal_gmii {
+ atmel,pins =
+ <AT91_PIOB 9 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB9 periph A GTXEN, conflicts with PWML2 */
+ AT91_PIOB 10 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB10 periph A GTXER, conflicts with RF1 */
+ AT91_PIOB 11 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB11 periph A GRXCK, conflicts with RD1 */
+ AT91_PIOB 12 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB12 periph A GRXDV, conflicts with PWMH3 */
+ AT91_PIOB 13 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB13 periph A GRXER, conflicts with PWML3 */
+ AT91_PIOB 14 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB14 periph A GCRS, conflicts with CANRX1 */
+ AT91_PIOB 15 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB15 periph A GCOL, conflicts with CANTX1 */
+ AT91_PIOB 16 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB16 periph A GMDC */
+ AT91_PIOB 17 AT91_PERIPH_A AT91_PINCTRL_NONE /* PB17 periph A GMDIO */
+ AT91_PIOB 27 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PB27 periph B G125CKO */
+ };
+
+ };
+ };
+
+ macb0: ethernet@f0028000 {
+ compatible = "cdns,pc302-gem", "cdns,gem";
+ reg = <0xf0028000 0x100>;
+ interrupts = <34 IRQ_TYPE_LEVEL_HIGH 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_macb0_data_rgmii &pinctrl_macb0_signal_rgmii>;
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/sama5d3_lcd.dtsi b/arch/arm/boot/dts/sama5d3_lcd.dtsi
new file mode 100644
index 000000000000..01f52a79f8ba
--- /dev/null
+++ b/arch/arm/boot/dts/sama5d3_lcd.dtsi
@@ -0,0 +1,55 @@
+/*
+ * at91sama5d3_lcd.dtsi - Device Tree Include file for AT91SAM9x5 SoC with
+ * LCD support
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * Licensed under GPLv2.
+ */
+
+#include <dt-bindings/pinctrl/at91.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ ahb {
+ apb {
+ pinctrl@fffff200 {
+ lcd {
+ pinctrl_lcd: lcd-0 {
+ atmel,pins =
+ <AT91_PIOA 24 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA24 periph A LCDPWM */
+ AT91_PIOA 26 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA26 periph A LCDVSYNC */
+ AT91_PIOA 27 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA27 periph A LCDHSYNC */
+ AT91_PIOA 25 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA25 periph A LCDDISP */
+ AT91_PIOA 29 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA29 periph A LCDDEN */
+ AT91_PIOA 28 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA28 periph A LCDPCK */
+ AT91_PIOA 0 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA0 periph A LCDD0 pin */
+ AT91_PIOA 1 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA1 periph A LCDD1 pin */
+ AT91_PIOA 2 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA2 periph A LCDD2 pin */
+ AT91_PIOA 3 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA3 periph A LCDD3 pin */
+ AT91_PIOA 4 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA4 periph A LCDD4 pin */
+ AT91_PIOA 5 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA5 periph A LCDD5 pin */
+ AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA6 periph A LCDD6 pin */
+ AT91_PIOA 7 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA7 periph A LCDD7 pin */
+ AT91_PIOA 8 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA8 periph A LCDD8 pin */
+ AT91_PIOA 9 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA9 periph A LCDD9 pin */
+ AT91_PIOA 10 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA10 periph A LCDD10 pin */
+ AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA11 periph A LCDD11 pin */
+ AT91_PIOA 12 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA12 periph A LCDD12 pin */
+ AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA13 periph A LCDD13 pin */
+ AT91_PIOA 14 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA14 periph A LCDD14 pin */
+ AT91_PIOA 15 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA15 periph A LCDD15 pin */
+ AT91_PIOC 14 AT91_PERIPH_C AT91_PINCTRL_NONE /* PC14 periph C LCDD16 pin */
+ AT91_PIOC 13 AT91_PERIPH_C AT91_PINCTRL_NONE /* PC13 periph C LCDD17 pin */
+ AT91_PIOC 12 AT91_PERIPH_C AT91_PINCTRL_NONE /* PC12 periph C LCDD18 pin */
+ AT91_PIOC 11 AT91_PERIPH_C AT91_PINCTRL_NONE /* PC11 periph C LCDD19 pin */
+ AT91_PIOC 10 AT91_PERIPH_C AT91_PINCTRL_NONE /* PC10 periph C LCDD20 pin */
+ AT91_PIOC 15 AT91_PERIPH_C AT91_PINCTRL_NONE /* PC15 periph C LCDD21 pin */
+ AT91_PIOE 27 AT91_PERIPH_C AT91_PINCTRL_NONE /* PE27 periph C LCDD22 pin */
+ AT91_PIOE 28 AT91_PERIPH_C AT91_PINCTRL_NONE>; /* PE28 periph C LCDD23 pin */
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/sama5d3_mci2.dtsi b/arch/arm/boot/dts/sama5d3_mci2.dtsi
new file mode 100644
index 000000000000..38e88e39e551
--- /dev/null
+++ b/arch/arm/boot/dts/sama5d3_mci2.dtsi
@@ -0,0 +1,47 @@
+/*
+ * at91sama5d3_mci2.dtsi - Device Tree Include file for AT91SAM9x5 SoC with
+ * 3 MMC ports
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * Licensed under GPLv2.
+ */
+
+#include <dt-bindings/pinctrl/at91.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ ahb {
+ apb {
+ pinctrl@fffff200 {
+ mmc2 {
+ pinctrl_mmc2_clk_cmd_dat0: mmc2_clk_cmd_dat0 {
+ atmel,pins =
+ <AT91_PIOC 15 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC15 periph A MCI2_CK, conflicts with PCK2 */
+ AT91_PIOC 10 AT91_PERIPH_A AT91_PINCTRL_PULL_UP /* PC10 periph A MCI2_CDA with pullup */
+ AT91_PIOC 11 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>; /* PC11 periph A MCI2_DA0 with pullup */
+ };
+ pinctrl_mmc2_dat1_3: mmc2_dat1_3 {
+ atmel,pins =
+ <AT91_PIOC 12 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC12 periph A MCI2_DA1 with pullup, conflicts with TIOA1 */
+ AT91_PIOC 13 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC13 periph A MCI2_DA2 with pullup, conflicts with TIOB1 */
+ AT91_PIOC 14 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* PC14 periph A MCI2_DA3 with pullup, conflicts with TCLK1 */
+ };
+ };
+ };
+
+ mmc2: mmc@f8004000 {
+ compatible = "atmel,hsmci";
+ reg = <0xf8004000 0x600>;
+ interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>;
+ dmas = <&dma1 2 AT91_DMA_CFG_PER_ID(1)>;
+ dma-names = "rxtx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mmc2_clk_cmd_dat0 &pinctrl_mmc2_dat1_3>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/sama5d3_tcb1.dtsi b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
new file mode 100644
index 000000000000..5264bb4a6998
--- /dev/null
+++ b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
@@ -0,0 +1,27 @@
+/*
+ * at91sama5d3_tcb1.dtsi - Device Tree Include file for AT91SAM9x5 SoC with
+ * 2 TC blocks.
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * Licensed under GPLv2.
+ */
+
+#include <dt-bindings/pinctrl/at91.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ aliases {
+ tcb1 = &tcb1;
+ };
+
+ ahb {
+ apb {
+ tcb1: timer@f8014000 {
+ compatible = "atmel,at91sam9x5-tcb";
+ reg = <0xf8014000 0x100>;
+ interrupts = <27 IRQ_TYPE_LEVEL_HIGH 0>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/sama5d3_uart.dtsi b/arch/arm/boot/dts/sama5d3_uart.dtsi
new file mode 100644
index 000000000000..98fcb2d57446
--- /dev/null
+++ b/arch/arm/boot/dts/sama5d3_uart.dtsi
@@ -0,0 +1,53 @@
+/*
+ * at91sama5d3_uart.dtsi - Device Tree Include file for AT91SAM9x5 SoC with
+ * UART support
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * Licensed under GPLv2.
+ */
+
+#include <dt-bindings/pinctrl/at91.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ ahb {
+ apb {
+ pinctrl@fffff200 {
+ uart0 {
+ pinctrl_uart0: uart0-0 {
+ atmel,pins =
+ <AT91_PIOC 29 AT91_PERIPH_A AT91_PINCTRL_NONE /* PC29 periph A, conflicts with PWMFI2, ISI_D8 */
+ AT91_PIOC 30 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>; /* PC30 periph A with pullup, conflicts with ISI_PCK */
+ };
+ };
+
+ uart1 {
+ pinctrl_uart1: uart1-0 {
+ atmel,pins =
+ <AT91_PIOA 30 AT91_PERIPH_B AT91_PINCTRL_NONE /* PA30 periph B, conflicts with TWD0, ISI_VSYNC */
+ AT91_PIOA 31 AT91_PERIPH_B AT91_PINCTRL_PULL_UP>; /* PA31 periph B with pullup, conflicts with TWCK0, ISI_HSYNC */
+ };
+ };
+ };
+
+ uart0: serial@f0024000 {
+ compatible = "atmel,at91sam9260-usart";
+ reg = <0xf0024000 0x200>;
+ interrupts = <16 IRQ_TYPE_LEVEL_HIGH 5>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0>;
+ status = "disabled";
+ };
+
+ uart1: serial@f8028000 {
+ compatible = "atmel,at91sam9260-usart";
+ reg = <0xf8028000 0x200>;
+ interrupts = <17 IRQ_TYPE_LEVEL_HIGH 5>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/sama5d3xcm.dtsi b/arch/arm/boot/dts/sama5d3xcm.dtsi
index 31ed9e3bb649..726a0f35100c 100644
--- a/arch/arm/boot/dts/sama5d3xcm.dtsi
+++ b/arch/arm/boot/dts/sama5d3xcm.dtsi
@@ -6,7 +6,6 @@
*
* Licensed under GPLv2 or later.
*/
-#include "sama5d3.dtsi"
/ {
compatible = "atmel,samad3xcm", "atmel,sama5d3", "atmel,sama5";
diff --git a/arch/arm/boot/dts/sh73a0-kzm9g-reference.dts b/arch/arm/boot/dts/sh73a0-kzm9g-reference.dts
index 212230629f27..8ee06dd81799 100644
--- a/arch/arm/boot/dts/sh73a0-kzm9g-reference.dts
+++ b/arch/arm/boot/dts/sh73a0-kzm9g-reference.dts
@@ -108,6 +108,7 @@
};
&i2c0 {
+ status = "okay";
as3711@40 {
compatible = "ams,as3711";
reg = <0x40>;
@@ -183,6 +184,7 @@
&i2c3 {
pinctrl-0 = <&i2c3_pins>;
pinctrl-names = "default";
+ status = "okay";
};
&mmcif {
diff --git a/arch/arm/boot/dts/sh73a0.dtsi b/arch/arm/boot/dts/sh73a0.dtsi
index 3955c7606a6f..fcf26889a8a0 100644
--- a/arch/arm/boot/dts/sh73a0.dtsi
+++ b/arch/arm/boot/dts/sh73a0.dtsi
@@ -135,6 +135,7 @@
0 168 0x4
0 169 0x4
0 170 0x4>;
+ status = "disabled";
};
i2c1: i2c@e6822000 {
@@ -147,6 +148,7 @@
0 52 0x4
0 53 0x4
0 54 0x4>;
+ status = "disabled";
};
i2c2: i2c@e6824000 {
@@ -159,6 +161,7 @@
0 172 0x4
0 173 0x4
0 174 0x4>;
+ status = "disabled";
};
i2c3: i2c@e6826000 {
@@ -171,6 +174,7 @@
0 184 0x4
0 185 0x4
0 186 0x4>;
+ status = "disabled";
};
i2c4: i2c@e6828000 {
@@ -183,6 +187,7 @@
0 188 0x4
0 189 0x4
0 190 0x4>;
+ status = "disabled";
};
mmcif: mmcif@e6bd0000 {
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index e273fa993b8c..6d09b8d42fdd 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -147,7 +147,7 @@
reg = <0x58>;
};
- cfg_s2f_usr0_clk: cfg_s2f_usr0_clk {
+ cfg_h2f_usr0_clk: cfg_h2f_usr0_clk {
#clock-cells = <0>;
compatible = "altr,socfpga-perip-clk";
clocks = <&main_pll>;
@@ -198,7 +198,7 @@
reg = <0x98>;
};
- s2f_usr1_clk: s2f_usr1_clk {
+ h2f_usr1_clk: h2f_usr1_clk {
#clock-cells = <0>;
compatible = "altr,socfpga-perip-clk";
clocks = <&periph_pll>;
@@ -235,7 +235,7 @@
reg = <0xD0>;
};
- s2f_usr2_clk: s2f_usr2_clk {
+ h2f_usr2_clk: h2f_usr2_clk {
#clock-cells = <0>;
compatible = "altr,socfpga-perip-clk";
clocks = <&sdram_pll>;
@@ -243,197 +243,197 @@
};
};
- mpu_periph_clk: mpu_periph_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&mpuclk>;
- fixed-divider = <4>;
+ mpu_periph_clk: mpu_periph_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&mpuclk>;
+ fixed-divider = <4>;
};
- mpu_l2_ram_clk: mpu_l2_ram_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&mpuclk>;
- fixed-divider = <2>;
+ mpu_l2_ram_clk: mpu_l2_ram_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&mpuclk>;
+ fixed-divider = <2>;
};
- l4_main_clk: l4_main_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&mainclk>;
- clk-gate = <0x60 0>;
+ l4_main_clk: l4_main_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&mainclk>;
+ clk-gate = <0x60 0>;
};
- l3_main_clk: l3_main_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&mainclk>;
+ l3_main_clk: l3_main_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&mainclk>;
};
- l3_mp_clk: l3_mp_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&mainclk>;
- div-reg = <0x64 0 2>;
- clk-gate = <0x60 1>;
+ l3_mp_clk: l3_mp_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&mainclk>;
+ div-reg = <0x64 0 2>;
+ clk-gate = <0x60 1>;
};
- l3_sp_clk: l3_sp_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&mainclk>;
- div-reg = <0x64 2 2>;
- };
+ l3_sp_clk: l3_sp_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&mainclk>;
+ div-reg = <0x64 2 2>;
+ };
- l4_mp_clk: l4_mp_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&mainclk>, <&per_base_clk>;
- div-reg = <0x64 4 3>;
- clk-gate = <0x60 2>;
+ l4_mp_clk: l4_mp_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&mainclk>, <&per_base_clk>;
+ div-reg = <0x64 4 3>;
+ clk-gate = <0x60 2>;
};
- l4_sp_clk: l4_sp_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&mainclk>, <&per_base_clk>;
- div-reg = <0x64 7 3>;
- clk-gate = <0x60 3>;
+ l4_sp_clk: l4_sp_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&mainclk>, <&per_base_clk>;
+ div-reg = <0x64 7 3>;
+ clk-gate = <0x60 3>;
};
- dbg_at_clk: dbg_at_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&dbg_base_clk>;
- div-reg = <0x68 0 2>;
- clk-gate = <0x60 4>;
+ dbg_at_clk: dbg_at_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&dbg_base_clk>;
+ div-reg = <0x68 0 2>;
+ clk-gate = <0x60 4>;
};
- dbg_clk: dbg_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&dbg_base_clk>;
- div-reg = <0x68 2 2>;
- clk-gate = <0x60 5>;
+ dbg_clk: dbg_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&dbg_base_clk>;
+ div-reg = <0x68 2 2>;
+ clk-gate = <0x60 5>;
};
- dbg_trace_clk: dbg_trace_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&dbg_base_clk>;
- div-reg = <0x6C 0 3>;
- clk-gate = <0x60 6>;
+ dbg_trace_clk: dbg_trace_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&dbg_base_clk>;
+ div-reg = <0x6C 0 3>;
+ clk-gate = <0x60 6>;
};
- dbg_timer_clk: dbg_timer_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&dbg_base_clk>;
- clk-gate = <0x60 7>;
+ dbg_timer_clk: dbg_timer_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&dbg_base_clk>;
+ clk-gate = <0x60 7>;
};
- cfg_clk: cfg_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&cfg_s2f_usr0_clk>;
- clk-gate = <0x60 8>;
+ cfg_clk: cfg_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&cfg_h2f_usr0_clk>;
+ clk-gate = <0x60 8>;
};
- s2f_user0_clk: s2f_user0_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&cfg_s2f_usr0_clk>;
- clk-gate = <0x60 9>;
+ h2f_user0_clk: h2f_user0_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&cfg_h2f_usr0_clk>;
+ clk-gate = <0x60 9>;
};
- emac_0_clk: emac_0_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&emac0_clk>;
- clk-gate = <0xa0 0>;
+ emac_0_clk: emac_0_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&emac0_clk>;
+ clk-gate = <0xa0 0>;
};
- emac_1_clk: emac_1_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&emac1_clk>;
- clk-gate = <0xa0 1>;
+ emac_1_clk: emac_1_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&emac1_clk>;
+ clk-gate = <0xa0 1>;
};
- usb_mp_clk: usb_mp_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&per_base_clk>;
- clk-gate = <0xa0 2>;
- div-reg = <0xa4 0 3>;
+ usb_mp_clk: usb_mp_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&per_base_clk>;
+ clk-gate = <0xa0 2>;
+ div-reg = <0xa4 0 3>;
};
- spi_m_clk: spi_m_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&per_base_clk>;
- clk-gate = <0xa0 3>;
- div-reg = <0xa4 3 3>;
+ spi_m_clk: spi_m_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&per_base_clk>;
+ clk-gate = <0xa0 3>;
+ div-reg = <0xa4 3 3>;
};
- can0_clk: can0_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&per_base_clk>;
- clk-gate = <0xa0 4>;
- div-reg = <0xa4 6 3>;
+ can0_clk: can0_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&per_base_clk>;
+ clk-gate = <0xa0 4>;
+ div-reg = <0xa4 6 3>;
};
- can1_clk: can1_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&per_base_clk>;
- clk-gate = <0xa0 5>;
- div-reg = <0xa4 9 3>;
+ can1_clk: can1_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&per_base_clk>;
+ clk-gate = <0xa0 5>;
+ div-reg = <0xa4 9 3>;
};
- gpio_db_clk: gpio_db_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&per_base_clk>;
- clk-gate = <0xa0 6>;
- div-reg = <0xa8 0 24>;
+ gpio_db_clk: gpio_db_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&per_base_clk>;
+ clk-gate = <0xa0 6>;
+ div-reg = <0xa8 0 24>;
};
- s2f_user1_clk: s2f_user1_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&s2f_usr1_clk>;
- clk-gate = <0xa0 7>;
+ h2f_user1_clk: h2f_user1_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&h2f_usr1_clk>;
+ clk-gate = <0xa0 7>;
};
- sdmmc_clk: sdmmc_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&f2s_periph_ref_clk>, <&main_nand_sdmmc_clk>, <&per_nand_mmc_clk>;
- clk-gate = <0xa0 8>;
+ sdmmc_clk: sdmmc_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&f2s_periph_ref_clk>, <&main_nand_sdmmc_clk>, <&per_nand_mmc_clk>;
+ clk-gate = <0xa0 8>;
};
- nand_x_clk: nand_x_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&f2s_periph_ref_clk>, <&main_nand_sdmmc_clk>, <&per_nand_mmc_clk>;
- clk-gate = <0xa0 9>;
+ nand_x_clk: nand_x_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&f2s_periph_ref_clk>, <&main_nand_sdmmc_clk>, <&per_nand_mmc_clk>;
+ clk-gate = <0xa0 9>;
};
- nand_clk: nand_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&f2s_periph_ref_clk>, <&main_nand_sdmmc_clk>, <&per_nand_mmc_clk>;
- clk-gate = <0xa0 10>;
- fixed-divider = <4>;
+ nand_clk: nand_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&f2s_periph_ref_clk>, <&main_nand_sdmmc_clk>, <&per_nand_mmc_clk>;
+ clk-gate = <0xa0 10>;
+ fixed-divider = <4>;
};
- qspi_clk: qspi_clk {
- #clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
- clocks = <&f2s_periph_ref_clk>, <&main_qspi_clk>, <&per_qspi_clk>;
- clk-gate = <0xa0 11>;
+ qspi_clk: qspi_clk {
+ #clock-cells = <0>;
+ compatible = "altr,socfpga-gate-clk";
+ clocks = <&f2s_periph_ref_clk>, <&main_qspi_clk>, <&per_qspi_clk>;
+ clk-gate = <0xa0 11>;
};
};
};
@@ -473,6 +473,7 @@
compatible = "arm,cortex-a9-twd-timer";
reg = <0xfffec600 0x100>;
interrupts = <1 13 0xf04>;
+ clocks = <&mpu_periph_clk>;
};
timer0: timer0@ffc08000 {
@@ -516,9 +517,9 @@
};
rstmgr@ffd05000 {
- compatible = "altr,rst-mgr";
- reg = <0xffd05000 0x1000>;
- };
+ compatible = "altr,rst-mgr";
+ reg = <0xffd05000 0x1000>;
+ };
sysmgr@ffd08000 {
compatible = "altr,sys-mgr";
diff --git a/arch/arm/boot/dts/socfpga_arria5.dtsi b/arch/arm/boot/dts/socfpga_arria5.dtsi
new file mode 100644
index 000000000000..a85b4043f888
--- /dev/null
+++ b/arch/arm/boot/dts/socfpga_arria5.dtsi
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2013 Altera Corporation <www.altera.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/dts-v1/;
+/include/ "socfpga.dtsi"
+
+/ {
+ soc {
+ clkmgr@ffd04000 {
+ clocks {
+ osc1 {
+ clock-frequency = <25000000>;
+ };
+ };
+ };
+
+ serial0@ffc02000 {
+ clock-frequency = <100000000>;
+ };
+
+ serial1@ffc03000 {
+ clock-frequency = <100000000>;
+ };
+
+ sysmgr@ffd08000 {
+ cpu1-start-addr = <0xffd080c4>;
+ };
+
+ timer0@ffc08000 {
+ clock-frequency = <100000000>;
+ };
+
+ timer1@ffc09000 {
+ clock-frequency = <100000000>;
+ };
+
+ timer2@ffd00000 {
+ clock-frequency = <25000000>;
+ };
+
+ timer3@ffd01000 {
+ clock-frequency = <25000000>;
+ };
+ };
+};
diff --git a/arch/mips/include/asm/mach-powertv/powertv-clock.h b/arch/arm/boot/dts/socfpga_arria5_socdk.dts
index 6f3e9a0fcf8c..5beffb2265f4 100644
--- a/arch/mips/include/asm/mach-powertv/powertv-clock.h
+++ b/arch/arm/boot/dts/socfpga_arria5_socdk.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009 Cisco Systems, Inc.
+ * Copyright (C) 2013 Altera Corporation <www.altera.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -12,18 +12,29 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-/*
- * Local definitions for the powertv PCI code
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _POWERTV_PCI_POWERTV_PCI_H_
-#define _POWERTV_PCI_POWERTV_PCI_H_
-extern int asic_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
-extern int asic_pcie_init(void);
-extern int asic_pcie_init(void);
+/include/ "socfpga_arria5.dtsi"
+
+/ {
+ model = "Altera SOCFPGA Arria V SoC Development Kit";
+ compatible = "altr,socfpga-arria5", "altr,socfpga";
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ };
+
+ memory {
+ name = "memory";
+ device_type = "memory";
+ reg = <0x0 0x40000000>; /* 1GB */
+ };
-extern int log_level;
-#endif
+ aliases {
+ /* this allow the ethaddr uboot environmnet variable contents
+ * to be added to the gmac1 device tree blob.
+ */
+ ethernet0 = &gmac1;
+ };
+};
diff --git a/arch/arm/boot/dts/socfpga_cyclone5.dts b/arch/arm/boot/dts/socfpga_cyclone5.dtsi
index 973999d2c697..a8716f6dbe2e 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5.dtsi
@@ -19,26 +19,6 @@
/include/ "socfpga.dtsi"
/ {
- model = "Altera SOCFPGA Cyclone V";
- compatible = "altr,socfpga-cyclone5", "altr,socfpga";
-
- chosen {
- bootargs = "console=ttyS0,57600";
- };
-
- memory {
- name = "memory";
- device_type = "memory";
- reg = <0x0 0x40000000>; /* 1GB */
- };
-
- aliases {
- /* this allow the ethaddr uboot environmnet variable contents
- * to be added to the gmac1 device tree blob.
- */
- ethernet0 = &gmac1;
- };
-
soc {
clkmgr@ffd04000 {
clocks {
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
new file mode 100644
index 000000000000..2ee52ab8cabb
--- /dev/null
+++ b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2012 Altera Corporation <www.altera.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/include/ "socfpga_cyclone5.dtsi"
+
+/ {
+ model = "Altera SOCFPGA Cyclone V SoC Development Kit";
+ compatible = "altr,socfpga-cyclone5", "altr,socfpga";
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ };
+
+ memory {
+ name = "memory";
+ device_type = "memory";
+ reg = <0x0 0x40000000>; /* 1GB */
+ };
+
+ aliases {
+ /* this allow the ethaddr uboot environmnet variable contents
+ * to be added to the gmac1 device tree blob.
+ */
+ ethernet0 = &gmac1;
+ };
+};
diff --git a/arch/mips/include/asm/mach-powertv/irq.h b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
index 4bd5d0c61a91..50b99a2c12ae 100644
--- a/arch/mips/include/asm/mach-powertv/irq.h
+++ b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009 Cisco Systems, Inc.
+ * Copyright (C) 2013 Steffen Trumtrar <s.trumtrar@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -12,14 +12,26 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _ASM_MACH_POWERTV_IRQ_H
-#define _ASM_MACH_POWERTV_IRQ_H
-#include <asm/mach-powertv/interrupts.h>
+/include/ "socfpga_cyclone5.dtsi"
-#define MIPS_CPU_IRQ_BASE ibase
-#define NR_IRQS 127
-#endif
+/ {
+ model = "Terasic SoCkit";
+ compatible = "altr,socfpga-cyclone5", "altr,socfpga";
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ };
+
+ memory {
+ name = "memory";
+ device_type = "memory";
+ reg = <0x0 0x40000000>; /* 1GB */
+ };
+};
+
+&gmac1 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index 1c1091eedade..7da99fe497e1 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -10,6 +10,7 @@
*/
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/mfd/dbx500-prcmu.h>
#include "skeleton.dtsi"
/ {
@@ -42,16 +43,56 @@
interrupts = <0 7 IRQ_TYPE_LEVEL_HIGH>;
};
+
+ clocks {
+ compatible = "stericsson,u8500-clks";
+
+ prcmu_clk: prcmu-clock {
+ #clock-cells = <1>;
+ };
+
+ prcc_pclk: prcc-periph-clock {
+ #clock-cells = <2>;
+ };
+
+ prcc_kclk: prcc-kernel-clock {
+ #clock-cells = <2>;
+ };
+
+ rtc_clk: rtc32k-clock {
+ #clock-cells = <0>;
+ };
+
+ smp_twd_clk: smp-twd-clock {
+ #clock-cells = <0>;
+ };
+ };
+
+ mtu@a03c6000 {
+ /* Nomadik System Timer */
+ compatible = "st,nomadik-mtu";
+ reg = <0xa03c6000 0x1000>;
+ interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&prcmu_clk PRCMU_TIMCLK>, <&prcc_pclk 6 6>;
+ clock-names = "timclk", "apb_pclk";
+ };
+
timer@a0410600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0xa0410600 0x20>;
interrupts = <1 13 0x304>; /* IRQ level high per-CPU */
+
+ clocks = <&smp_twd_clk>;
};
rtc@80154000 {
compatible = "arm,rtc-pl031", "arm,primecell";
reg = <0x80154000 0x1000>;
interrupts = <0 18 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&rtc_clk>;
+ clock-names = "apb_pclk";
};
gpio0: gpio@8012e000 {
@@ -65,6 +106,8 @@
gpio-controller;
#gpio-cells = <2>;
gpio-bank = <0>;
+
+ clocks = <&prcc_pclk 1 9>;
};
gpio1: gpio@8012e080 {
@@ -78,6 +121,8 @@
gpio-controller;
#gpio-cells = <2>;
gpio-bank = <1>;
+
+ clocks = <&prcc_pclk 1 9>;
};
gpio2: gpio@8000e000 {
@@ -91,6 +136,8 @@
gpio-controller;
#gpio-cells = <2>;
gpio-bank = <2>;
+
+ clocks = <&prcc_pclk 3 8>;
};
gpio3: gpio@8000e080 {
@@ -104,6 +151,8 @@
gpio-controller;
#gpio-cells = <2>;
gpio-bank = <3>;
+
+ clocks = <&prcc_pclk 3 8>;
};
gpio4: gpio@8000e100 {
@@ -117,6 +166,8 @@
gpio-controller;
#gpio-cells = <2>;
gpio-bank = <4>;
+
+ clocks = <&prcc_pclk 3 8>;
};
gpio5: gpio@8000e180 {
@@ -130,6 +181,8 @@
gpio-controller;
#gpio-cells = <2>;
gpio-bank = <5>;
+
+ clocks = <&prcc_pclk 3 8>;
};
gpio6: gpio@8011e000 {
@@ -143,6 +196,8 @@
gpio-controller;
#gpio-cells = <2>;
gpio-bank = <6>;
+
+ clocks = <&prcc_pclk 2 11>;
};
gpio7: gpio@8011e080 {
@@ -156,6 +211,8 @@
gpio-controller;
#gpio-cells = <2>;
gpio-bank = <7>;
+
+ clocks = <&prcc_pclk 2 11>;
};
gpio8: gpio@a03fe000 {
@@ -169,6 +226,8 @@
gpio-controller;
#gpio-cells = <2>;
gpio-bank = <8>;
+
+ clocks = <&prcc_pclk 5 1>;
};
pinctrl {
@@ -177,8 +236,7 @@
};
usb_per5@a03e0000 {
- compatible = "stericsson,db8500-musb",
- "mentor,musb";
+ compatible = "stericsson,db8500-musb";
reg = <0xa03e0000 0x10000>;
interrupts = <0 23 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "mc";
@@ -210,6 +268,8 @@
"iep_6_14", "oep_6_14",
"iep_7_15", "oep_7_15",
"iep_8", "oep_8";
+
+ clocks = <&prcc_pclk 5 0>;
};
dma: dma-controller@801C0000 {
@@ -220,6 +280,8 @@
#dma-cells = <3>;
memcpy-channels = <56 57 58 59 60>;
+
+ clocks = <&prcmu_clk PRCMU_DMACLK>;
};
prcmu: prcmu@80157000 {
@@ -238,6 +300,13 @@
reg = <0x80157450 0xC>;
};
+ cpufreq {
+ compatible = "stericsson,cpufreq-ux500";
+ clocks = <&prcmu_clk PRCMU_ARMSS>;
+ clock-names = "armss";
+ status = "disabled";
+ };
+
thermal@801573c0 {
compatible = "stericsson,db8500-thermal";
reg = <0x801573c0 0x40>;
@@ -559,65 +628,74 @@
compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
reg = <0x80004000 0x1000>;
interrupts = <0 21 IRQ_TYPE_LEVEL_HIGH>;
- arm,primecell-periphid = <0x180024>;
#address-cells = <1>;
#size-cells = <0>;
v-i2c-supply = <&db8500_vape_reg>;
clock-frequency = <400000>;
+ clocks = <&prcc_kclk 3 3>, <&prcc_pclk 3 3>;
+ clock-names = "i2cclk", "apb_pclk";
};
i2c@80122000 {
compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
reg = <0x80122000 0x1000>;
interrupts = <0 22 IRQ_TYPE_LEVEL_HIGH>;
- arm,primecell-periphid = <0x180024>;
#address-cells = <1>;
#size-cells = <0>;
v-i2c-supply = <&db8500_vape_reg>;
clock-frequency = <400000>;
+
+ clocks = <&prcc_kclk 1 2>, <&prcc_pclk 1 2>;
+ clock-names = "i2cclk", "apb_pclk";
};
i2c@80128000 {
compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
reg = <0x80128000 0x1000>;
interrupts = <0 55 IRQ_TYPE_LEVEL_HIGH>;
- arm,primecell-periphid = <0x180024>;
#address-cells = <1>;
#size-cells = <0>;
v-i2c-supply = <&db8500_vape_reg>;
clock-frequency = <400000>;
+
+ clocks = <&prcc_kclk 1 6>, <&prcc_pclk 1 6>;
+ clock-names = "i2cclk", "apb_pclk";
};
i2c@80110000 {
compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
reg = <0x80110000 0x1000>;
interrupts = <0 12 IRQ_TYPE_LEVEL_HIGH>;
- arm,primecell-periphid = <0x180024>;
#address-cells = <1>;
#size-cells = <0>;
v-i2c-supply = <&db8500_vape_reg>;
clock-frequency = <400000>;
+
+ clocks = <&prcc_kclk 2 0>, <&prcc_pclk 2 0>;
+ clock-names = "i2cclk", "apb_pclk";
};
i2c@8012a000 {
compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
reg = <0x8012a000 0x1000>;
interrupts = <0 51 IRQ_TYPE_LEVEL_HIGH>;
- arm,primecell-periphid = <0x180024>;
#address-cells = <1>;
#size-cells = <0>;
v-i2c-supply = <&db8500_vape_reg>;
clock-frequency = <400000>;
+
+ clocks = <&prcc_kclk 1 9>, <&prcc_pclk 1 10>;
+ clock-names = "i2cclk", "apb_pclk";
};
ssp@80002000 {
@@ -626,7 +704,80 @@
interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- status = "disabled";
+ clocks = <&prcc_kclk 3 1>, <&prcc_pclk 3 1>;
+ clock-names = "ssp0clk", "apb_pclk";
+ dmas = <&dma 8 0 0x2>, /* Logical - DevToMem */
+ <&dma 8 0 0x0>; /* Logical - MemToDev */
+ dma-names = "rx", "tx";
+ };
+
+ ssp@80003000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0x80003000 0x1000>;
+ interrupts = <0 52 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&prcc_kclk 3 2>, <&prcc_pclk 3 2>;
+ clock-names = "ssp1clk", "apb_pclk";
+ dmas = <&dma 9 0 0x2>, /* Logical - DevToMem */
+ <&dma 9 0 0x0>; /* Logical - MemToDev */
+ dma-names = "rx", "tx";
+ };
+
+ spi@8011a000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0x8011a000 0x1000>;
+ interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ /* Same clock wired to kernel and pclk */
+ clocks = <&prcc_pclk 2 8>, <&prcc_pclk 2 8>;
+ clock-names = "spi0clk", "apb_pclk";
+ dmas = <&dma 0 0 0x2>, /* Logical - DevToMem */
+ <&dma 0 0 0x0>; /* Logical - MemToDev */
+ dma-names = "rx", "tx";
+ };
+
+ spi@80112000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0x80112000 0x1000>;
+ interrupts = <0 96 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ /* Same clock wired to kernel and pclk */
+ clocks = <&prcc_pclk 2 2>, <&prcc_pclk 2 2>;
+ clock-names = "spi1clk", "apb_pclk";
+ dmas = <&dma 35 0 0x2>, /* Logical - DevToMem */
+ <&dma 35 0 0x0>; /* Logical - MemToDev */
+ dma-names = "rx", "tx";
+ };
+
+ spi@80111000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0x80111000 0x1000>;
+ interrupts = <0 6 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ /* Same clock wired to kernel and pclk */
+ clocks = <&prcc_pclk 2 1>, <&prcc_pclk 2 1>;
+ clock-names = "spi2clk", "apb_pclk";
+ dmas = <&dma 33 0 0x2>, /* Logical - DevToMem */
+ <&dma 33 0 0x0>; /* Logical - MemToDev */
+ dma-names = "rx", "tx";
+ };
+
+ spi@80129000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0x80129000 0x1000>;
+ interrupts = <0 49 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ /* Same clock wired to kernel and pclk */
+ clocks = <&prcc_pclk 1 7>, <&prcc_pclk 1 7>;
+ clock-names = "spi3clk", "apb_pclk";
+ dmas = <&dma 40 0 0x2>, /* Logical - DevToMem */
+ <&dma 40 0 0x0>; /* Logical - MemToDev */
+ dma-names = "rx", "tx";
};
uart@80120000 {
@@ -638,6 +789,9 @@
<&dma 13 0 0x0>; /* Logical - MemToDev */
dma-names = "rx", "tx";
+ clocks = <&prcc_kclk 1 0>, <&prcc_pclk 1 0>;
+ clock-names = "uart", "apb_pclk";
+
status = "disabled";
};
@@ -650,6 +804,9 @@
<&dma 12 0 0x0>; /* Logical - MemToDev */
dma-names = "rx", "tx";
+ clocks = <&prcc_kclk 1 1>, <&prcc_pclk 1 1>;
+ clock-names = "uart", "apb_pclk";
+
status = "disabled";
};
@@ -662,6 +819,9 @@
<&dma 11 0 0x0>; /* Logical - MemToDev */
dma-names = "rx", "tx";
+ clocks = <&prcc_kclk 3 6>, <&prcc_pclk 3 6>;
+ clock-names = "uart", "apb_pclk";
+
status = "disabled";
};
@@ -674,6 +834,9 @@
<&dma 29 0 0x0>; /* Logical - MemToDev */
dma-names = "rx", "tx";
+ clocks = <&prcc_kclk 1 5>, <&prcc_pclk 1 5>;
+ clock-names = "sdi", "apb_pclk";
+
status = "disabled";
};
@@ -686,6 +849,9 @@
<&dma 32 0 0x0>; /* Logical - MemToDev */
dma-names = "rx", "tx";
+ clocks = <&prcc_kclk 2 4>, <&prcc_pclk 2 6>;
+ clock-names = "sdi", "apb_pclk";
+
status = "disabled";
};
@@ -698,6 +864,9 @@
<&dma 28 0 0x0>; /* Logical - MemToDev */
dma-names = "rx", "tx";
+ clocks = <&prcc_kclk 3 4>, <&prcc_pclk 3 4>;
+ clock-names = "sdi", "apb_pclk";
+
status = "disabled";
};
@@ -705,6 +874,10 @@
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80119000 0x1000>;
interrupts = <0 59 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&prcc_kclk 2 5>, <&prcc_pclk 2 7>;
+ clock-names = "sdi", "apb_pclk";
+
status = "disabled";
};
@@ -717,6 +890,9 @@
<&dma 42 0 0x0>; /* Logical - MemToDev */
dma-names = "rx", "tx";
+ clocks = <&prcc_kclk 2 2>, <&prcc_pclk 2 4>;
+ clock-names = "sdi", "apb_pclk";
+
status = "disabled";
};
@@ -724,6 +900,10 @@
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80008000 0x1000>;
interrupts = <0 100 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&prcc_kclk 3 7>, <&prcc_pclk 3 7>;
+ clock-names = "sdi", "apb_pclk";
+
status = "disabled";
};
@@ -732,6 +912,10 @@
reg = <0x80123000 0x1000>;
interrupts = <0 31 IRQ_TYPE_LEVEL_HIGH>;
v-ape-supply = <&db8500_vape_reg>;
+
+ clocks = <&prcc_kclk 1 3>, <&prcc_pclk 1 3>;
+ clock-names = "msp", "apb_pclk";
+
status = "disabled";
};
@@ -740,6 +924,10 @@
reg = <0x80124000 0x1000>;
interrupts = <0 62 IRQ_TYPE_LEVEL_HIGH>;
v-ape-supply = <&db8500_vape_reg>;
+
+ clocks = <&prcc_kclk 1 4>, <&prcc_pclk 1 4>;
+ clock-names = "msp", "apb_pclk";
+
status = "disabled";
};
@@ -749,6 +937,10 @@
reg = <0x80117000 0x1000>;
interrupts = <0 98 IRQ_TYPE_LEVEL_HIGH>;
v-ape-supply = <&db8500_vape_reg>;
+
+ clocks = <&prcc_kclk 2 3>, <&prcc_pclk 2 5>;
+ clock-names = "msp", "apb_pclk";
+
status = "disabled";
};
@@ -757,6 +949,10 @@
reg = <0x80125000 0x1000>;
interrupts = <0 62 IRQ_TYPE_LEVEL_HIGH>;
v-ape-supply = <&db8500_vape_reg>;
+
+ clocks = <&prcc_kclk 1 10>, <&prcc_pclk 1 11>;
+ clock-names = "msp", "apb_pclk";
+
status = "disabled";
};
@@ -772,7 +968,7 @@
cpufreq-cooling {
compatible = "stericsson,db8500-cpufreq-cooling";
status = "disabled";
- };
+ };
vmmci: regulator-gpio {
compatible = "regulator-gpio";
@@ -797,6 +993,7 @@
interrupts = <0 15 IRQ_TYPE_LEVEL_HIGH>;
v-ape-supply = <&db8500_vape_reg>;
+ clocks = <&prcc_pclk 6 1>;
};
hash@a03c2000 {
@@ -804,6 +1001,7 @@
reg = <0xa03c2000 0x1000>;
v-ape-supply = <&db8500_vape_reg>;
+ clocks = <&prcc_pclk 6 2>;
};
};
};
diff --git a/arch/arm/boot/dts/ste-stuib.dtsi b/arch/arm/boot/dts/ste-href-stuib.dtsi
index 524e33240ad4..76704ec0ffcc 100644
--- a/arch/arm/boot/dts/ste-stuib.dtsi
+++ b/arch/arm/boot/dts/ste-href-stuib.dtsi
@@ -57,7 +57,6 @@
bu21013_tp@5c {
compatible = "rohm,bu21013_tp";
reg = <0x5c>;
- touch-gpio = <&gpio2 20 0x4>;
avdd-supply = <&ab8500_ldo_aux1_reg>;
rohm,touch-max-x = <384>;
@@ -68,7 +67,6 @@
bu21013_tp@5d {
compatible = "rohm,bu21013_tp";
reg = <0x5d>;
- touch-gpio = <&gpio2 20 0x4>;
avdd-supply = <&ab8500_ldo_aux1_reg>;
rohm,touch-max-x = <384>;
diff --git a/arch/arm/boot/dts/ste-href-tvk1281618.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618.dtsi
new file mode 100644
index 000000000000..76d3ef13175f
--- /dev/null
+++ b/arch/arm/boot/dts/ste-href-tvk1281618.dtsi
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2012 ST-Ericsson AB
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ *
+ * Device Tree for the TVK1281618 UIB
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ soc {
+ /* Add Synaptics touch screen, TC35892 keypad etc here */
+ i2c@80004000 {
+ tc3589x@44 {
+ compatible = "tc3589x";
+ reg = <0x44>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <26 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ tc3589x_gpio {
+ compatible = "tc3589x-gpio";
+ interrupts = <0 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
index 370e03f5e7b2..aa3f02060fdd 100644
--- a/arch/arm/boot/dts/ste-href.dtsi
+++ b/arch/arm/boot/dts/ste-href.dtsi
@@ -41,28 +41,6 @@
status = "okay";
};
- i2c@80004000 {
- tc3589x@42 {
- compatible = "tc3589x";
- reg = <0x42>;
- interrupt-parent = <&gpio6>;
- interrupts = <25 IRQ_TYPE_EDGE_RISING>;
-
- interrupt-controller;
- #interrupt-cells = <2>;
-
- tc3589x_gpio: tc3589x_gpio {
- compatible = "tc3589x-gpio";
- interrupts = <0 IRQ_TYPE_EDGE_RISING>;
-
- interrupt-controller;
- #interrupt-cells = <2>;
- gpio-controller;
- #gpio-cells = <2>;
- };
- };
- };
-
i2c@80128000 {
lp5521@33 {
compatible = "national,lp5521";
@@ -72,6 +50,7 @@
chan0 {
led-cur = /bits/ 8 <0x2f>;
max-cur = /bits/ 8 <0x5f>;
+ linux,default-trigger = "heartbeat";
};
chan1 {
led-cur = /bits/ 8 <0x2f>;
@@ -102,7 +81,7 @@
};
bh1780@29 {
compatible = "rohm,bh1780gli";
- reg = <0x33>;
+ reg = <0x29>;
};
};
@@ -167,89 +146,11 @@
};
prcmu@80157000 {
- db8500-prcmu-regulators {
- db8500_vape_reg: db8500_vape {
- regulator-name = "db8500-vape";
- };
-
- db8500_varm_reg: db8500_varm {
- regulator-name = "db8500-varm";
- };
-
- db8500_vmodem_reg: db8500_vmodem {
- regulator-name = "db8500-vmodem";
- };
-
- db8500_vpll_reg: db8500_vpll {
- regulator-name = "db8500-vpll";
- };
-
- db8500_vsmps1_reg: db8500_vsmps1 {
- regulator-name = "db8500-vsmps1";
- };
-
- db8500_vsmps2_reg: db8500_vsmps2 {
- regulator-name = "db8500-vsmps2";
- };
-
- db8500_vsmps3_reg: db8500_vsmps3 {
- regulator-name = "db8500-vsmps3";
- };
-
- db8500_vrf1_reg: db8500_vrf1 {
- regulator-name = "db8500-vrf1";
- };
-
- db8500_sva_mmdsp_reg: db8500_sva_mmdsp {
- regulator-name = "db8500-sva-mmdsp";
- };
-
- db8500_sva_mmdsp_ret_reg: db8500_sva_mmdsp_ret {
- regulator-name = "db8500-sva-mmdsp-ret";
- };
-
- db8500_sva_pipe_reg: db8500_sva_pipe {
- regulator-name = "db8500_sva_pipe";
- };
-
- db8500_sia_mmdsp_reg: db8500_sia_mmdsp {
- regulator-name = "db8500_sia_mmdsp";
- };
-
- db8500_sia_mmdsp_ret_reg: db8500_sia_mmdsp_ret {
- regulator-name = "db8500-sia-mmdsp-ret";
- };
-
- db8500_sia_pipe_reg: db8500_sia_pipe {
- regulator-name = "db8500-sia-pipe";
- };
-
- db8500_sga_reg: db8500_sga {
- regulator-name = "db8500-sga";
- };
-
- db8500_b2r2_mcde_reg: db8500_b2r2_mcde {
- regulator-name = "db8500-b2r2-mcde";
- };
-
- db8500_esram12_reg: db8500_esram12 {
- regulator-name = "db8500-esram12";
- };
-
- db8500_esram12_ret_reg: db8500_esram12_ret {
- regulator-name = "db8500-esram12-ret";
- };
-
- db8500_esram34_reg: db8500_esram34 {
- regulator-name = "db8500-esram34";
+ ab8500 {
+ ab8500-gpio {
+ compatible = "stericsson,ab8500-gpio";
};
- db8500_esram34_ret_reg: db8500_esram34_ret {
- regulator-name = "db8500-esram34-ret";
- };
- };
-
- ab8500 {
ab8500-regulators {
ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
regulator-name = "V-DISPLAY";
diff --git a/arch/arm/boot/dts/ste-hrefprev60-stuib.dts b/arch/arm/boot/dts/ste-hrefprev60-stuib.dts
new file mode 100644
index 000000000000..2b1cb5b584b6
--- /dev/null
+++ b/arch/arm/boot/dts/ste-hrefprev60-stuib.dts
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012 ST-Ericsson AB
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include "ste-hrefprev60.dtsi"
+#include "ste-href-stuib.dtsi"
+
+/ {
+ model = "ST-Ericsson HREF (pre-v60) and ST UIB";
+ compatible = "st-ericsson,mop500", "st-ericsson,u8500";
+
+ soc {
+ /* Reset line for the BU21013 touchscreen */
+ i2c@80110000 {
+ /* Only one of these will be used */
+ bu21013_tp@5c {
+ touch-gpio = <&gpio2 12 0x4>;
+ reset-gpio = <&tc3589x_gpio 13 0x4>;
+ };
+ bu21013_tp@5d {
+ touch-gpio = <&gpio2 12 0x4>;
+ reset-gpio = <&tc3589x_gpio 13 0x4>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/ste-hrefprev60-tvk.dts b/arch/arm/boot/dts/ste-hrefprev60-tvk.dts
new file mode 100644
index 000000000000..59523f866812
--- /dev/null
+++ b/arch/arm/boot/dts/ste-hrefprev60-tvk.dts
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2012 ST-Ericsson AB
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include "ste-hrefprev60.dtsi"
+#include "ste-href-tvk1281618.dtsi"
+
+/ {
+ model = "ST-Ericsson HREF (pre-v60) and TVK1281618 UIB";
+ compatible = "st-ericsson,mop500", "st-ericsson,u8500";
+};
diff --git a/arch/arm/boot/dts/ste-hrefprev60.dts b/arch/arm/boot/dts/ste-hrefprev60.dtsi
index d8d3b99ab007..b2cd7bc2752f 100644
--- a/arch/arm/boot/dts/ste-hrefprev60.dts
+++ b/arch/arm/boot/dts/ste-hrefprev60.dtsi
@@ -7,17 +7,14 @@
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
+ *
+ * Device Tree for the HREF+ prior to the v60 variant.
*/
-/dts-v1/;
#include "ste-dbx5x0.dtsi"
#include "ste-href.dtsi"
-#include "ste-stuib.dtsi"
/ {
- model = "ST-Ericsson HREF (pre-v60) platform with Device Tree";
- compatible = "st-ericsson,mop500", "st-ericsson,u8500";
-
gpio_keys {
button@1 {
gpios = <&tc3589x_gpio 7 0x4>;
@@ -25,24 +22,30 @@
};
soc {
- prcmu@80157000 {
- ab8500@5 {
- ab8500-gpio {
- compatible = "stericsson,ab8500-gpio";
- };
- };
- };
-
i2c@80004000 {
tps61052@33 {
compatible = "tps61052";
reg = <0x33>;
};
- };
- i2c@80110000 {
- bu21013_tp@5c {
- reset-gpio = <&tc3589x_gpio 13 0x4>;
+ tc3589x@42 {
+ compatible = "tc3589x";
+ reg = <0x42>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <25 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ tc3589x_gpio: tc3589x_gpio {
+ compatible = "tc3589x-gpio";
+ interrupts = <0 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
};
};
diff --git a/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts b/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts
new file mode 100644
index 000000000000..8c6a2de56cf1
--- /dev/null
+++ b/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 ST-Ericsson AB
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ *
+ * Device Tree for the HREF version 60 or later with the ST UIB
+ */
+
+/dts-v1/;
+#include "ste-hrefv60plus.dtsi"
+#include "ste-href-stuib.dtsi"
+
+/ {
+ model = "ST-Ericsson HREF (v60+) and ST UIB";
+ compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500";
+
+ soc {
+ /* Reset line for the BU21013 touchscreen */
+ i2c@80110000 {
+ /* Only one of these will be used */
+ bu21013_tp@5c {
+ touch-gpio = <&gpio2 20 0x4>;
+ reset-gpio = <&gpio4 17 0x4>;
+ };
+ bu21013_tp@5d {
+ touch-gpio = <&gpio2 20 0x4>;
+ reset-gpio = <&gpio4 17 0x4>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts b/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts
new file mode 100644
index 000000000000..d53cccdce776
--- /dev/null
+++ b/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2012 ST-Ericsson AB
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ *
+ * Device Tree for the HREF version 60 or later with the TVK1281618 UIB
+ */
+
+/dts-v1/;
+#include "ste-hrefv60plus.dtsi"
+#include "ste-href-tvk1281618.dtsi"
+
+/ {
+ model = "ST-Ericsson HREF (v60+) and TVK1281618 UIB";
+ compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500";
+};
diff --git a/arch/arm/boot/dts/ste-hrefv60plus.dts b/arch/arm/boot/dts/ste-hrefv60plus.dts
deleted file mode 100644
index 6e52ebbf113f..000000000000
--- a/arch/arm/boot/dts/ste-hrefv60plus.dts
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright 2012 ST-Ericsson AB
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-
-/dts-v1/;
-#include "ste-dbx5x0.dtsi"
-#include "ste-href.dtsi"
-#include "ste-stuib.dtsi"
-
-/ {
- model = "ST-Ericsson HREF (v60+) platform with Device Tree";
- compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500";
-
- gpio_keys {
- button@1 {
- gpios = <&gpio6 25 0x4>;
- };
- };
-
- soc {
- i2c@80110000 {
- bu21013_tp@0x5c {
- reset-gpio = <&gpio4 15 0x4>;
- };
- };
-
- // External Micro SD slot
- sdi0_per1@80126000 {
- arm,primecell-periphid = <0x10480180>;
- max-frequency = <100000000>;
- bus-width = <4>;
- mmc-cap-sd-highspeed;
- mmc-cap-mmc-highspeed;
- vmmc-supply = <&ab8500_ldo_aux3_reg>;
-
- cd-gpios = <&tc3589x_gpio 3 0x4>;
-
- status = "okay";
- };
-
- // WLAN SDIO channel
- sdi1_per2@80118000 {
- arm,primecell-periphid = <0x10480180>;
- max-frequency = <100000000>;
- bus-width = <4>;
-
- status = "okay";
- };
-
- // PoP:ed eMMC
- sdi2_per3@80005000 {
- arm,primecell-periphid = <0x10480180>;
- max-frequency = <100000000>;
- bus-width = <8>;
- mmc-cap-mmc-highspeed;
-
- status = "okay";
- };
-
- // On-board eMMC
- sdi4_per2@80114000 {
- arm,primecell-periphid = <0x10480180>;
- max-frequency = <100000000>;
- bus-width = <8>;
- mmc-cap-mmc-highspeed;
- vmmc-supply = <&ab8500_ldo_aux2_reg>;
-
- status = "okay";
- };
-
- prcmu@80157000 {
- db8500-prcmu-regulators {
- db8500_vape_reg: db8500_vape {
- regulator-name = "db8500-vape";
- };
-
- db8500_varm_reg: db8500_varm {
- regulator-name = "db8500-varm";
- };
-
- db8500_vmodem_reg: db8500_vmodem {
- regulator-name = "db8500-vmodem";
- };
-
- db8500_vpll_reg: db8500_vpll {
- regulator-name = "db8500-vpll";
- };
-
- db8500_vsmps1_reg: db8500_vsmps1 {
- regulator-name = "db8500-vsmps1";
- };
-
- db8500_vsmps2_reg: db8500_vsmps2 {
- regulator-name = "db8500-vsmps2";
- };
-
- db8500_vsmps3_reg: db8500_vsmps3 {
- regulator-name = "db8500-vsmps3";
- };
-
- db8500_vrf1_reg: db8500_vrf1 {
- regulator-name = "db8500-vrf1";
- };
-
- db8500_sva_mmdsp_reg: db8500_sva_mmdsp {
- regulator-name = "db8500-sva-mmdsp";
- };
-
- db8500_sva_mmdsp_ret_reg: db8500_sva_mmdsp_ret {
- regulator-name = "db8500-sva-mmdsp-ret";
- };
-
- db8500_sva_pipe_reg: db8500_sva_pipe {
- regulator-name = "db8500_sva_pipe";
- };
-
- db8500_sia_mmdsp_reg: db8500_sia_mmdsp {
- regulator-name = "db8500_sia_mmdsp";
- };
-
- db8500_sia_mmdsp_ret_reg: db8500_sia_mmdsp_ret {
- regulator-name = "db8500-sia-mmdsp-ret";
- };
-
- db8500_sia_pipe_reg: db8500_sia_pipe {
- regulator-name = "db8500-sia-pipe";
- };
-
- db8500_sga_reg: db8500_sga {
- regulator-name = "db8500-sga";
- };
-
- db8500_b2r2_mcde_reg: db8500_b2r2_mcde {
- regulator-name = "db8500-b2r2-mcde";
- };
-
- db8500_esram12_reg: db8500_esram12 {
- regulator-name = "db8500-esram12";
- };
-
- db8500_esram12_ret_reg: db8500_esram12_ret {
- regulator-name = "db8500-esram12-ret";
- };
-
- db8500_esram34_reg: db8500_esram34 {
- regulator-name = "db8500-esram34";
- };
-
- db8500_esram34_ret_reg: db8500_esram34_ret {
- regulator-name = "db8500-esram34-ret";
- };
- };
-
- ab8500 {
- ab8500-regulators {
- ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
- regulator-name = "V-DISPLAY";
- };
-
- ab8500_ldo_aux2_reg: ab8500_ldo_aux2 {
- regulator-name = "V-eMMC1";
- };
-
- ab8500_ldo_aux3_reg: ab8500_ldo_aux3 {
- regulator-name = "V-MMC-SD";
- };
-
- ab8500_ldo_intcore_reg: ab8500_ldo_intcore {
- regulator-name = "V-INTCORE";
- };
-
- ab8500_ldo_tvout_reg: ab8500_ldo_tvout {
- regulator-name = "V-TVOUT";
- };
-
- ab8500_ldo_usb_reg: ab8500_ldo_usb {
- regulator-name = "dummy";
- };
-
- ab8500_ldo_audio_reg: ab8500_ldo_audio {
- regulator-name = "V-AUD";
- };
-
- ab8500_ldo_anamic1_reg: ab8500_ldo_anamic1 {
- regulator-name = "V-AMIC1";
- };
-
- ab8500_ldo_anamic2_reg: ab8500_ldo_anamic2 {
- regulator-name = "V-AMIC2";
- };
-
- ab8500_ldo_dmic_reg: ab8500_ldo_dmic {
- regulator-name = "V-DMIC";
- };
-
- ab8500_ldo_ana_reg: ab8500_ldo_ana {
- regulator-name = "V-CSI/DSI";
- };
- };
- };
- };
- };
-};
diff --git a/arch/arm/boot/dts/ste-hrefv60plus.dtsi b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
new file mode 100644
index 000000000000..aed511b47a9e
--- /dev/null
+++ b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2012 ST-Ericsson AB
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include "ste-dbx5x0.dtsi"
+#include "ste-href.dtsi"
+
+/ {
+ model = "ST-Ericsson HREF (v60+) platform with Device Tree";
+ compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500";
+
+ gpio_keys {
+ button@1 {
+ gpios = <&gpio5 25 0x4>;
+ };
+ };
+
+ soc {
+ // External Micro SD slot
+ sdi0_per1@80126000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <100000000>;
+ bus-width = <4>;
+ mmc-cap-sd-highspeed;
+ mmc-cap-mmc-highspeed;
+ vmmc-supply = <&ab8500_ldo_aux3_reg>;
+
+ cd-gpios = <&gpio2 31 0x4>; // 95
+
+ status = "okay";
+ };
+
+ // WLAN SDIO channel
+ sdi1_per2@80118000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <100000000>;
+ bus-width = <4>;
+
+ status = "okay";
+ };
+
+ // PoP:ed eMMC
+ sdi2_per3@80005000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <100000000>;
+ bus-width = <8>;
+ mmc-cap-mmc-highspeed;
+
+ status = "okay";
+ };
+
+ // On-board eMMC
+ sdi4_per2@80114000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <100000000>;
+ bus-width = <8>;
+ mmc-cap-mmc-highspeed;
+ vmmc-supply = <&ab8500_ldo_aux2_reg>;
+
+ status = "okay";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index 9169d3025f39..79425e3836ce 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -653,6 +653,7 @@
reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd";
clocks = <&hclksmc>;
status = "okay";
+ timings = /bits/ 8 <0 0 0 0x10 0x0a 0>;
partition@0 {
label = "X-Loader(NAND)";
@@ -707,8 +708,14 @@
pinctrl-0 = <&i2c0_default_mux>, <&i2c0_default_mode>;
stw4811@2d {
- compatible = "st,stw4811";
- reg = <0x2d>;
+ compatible = "st,stw4811";
+ reg = <0x2d>;
+ vmmc_regulator: vmmc {
+ compatible = "st,stw481x-vmmc";
+ regulator-name = "VMMC";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
};
};
@@ -839,6 +846,7 @@
cd-inverted;
pinctrl-names = "default";
pinctrl-0 = <&mmcsd_default_mux>, <&mmcsd_default_mode>;
+ vmmc-supply = <&vmmc_regulator>;
};
};
};
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index f1fc128e249d..f0b39f835914 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -111,12 +111,13 @@
vdd33a-supply = <&en_3v3_reg>;
vddvario-supply = <&db8500_vape_reg>;
-
reg-shift = <1>;
reg-io-width = <2>;
smsc,force-internal-phy;
smsc,irq-active-high;
smsc,irq-push-pull;
+
+ clocks = <&prcc_pclk 3 0>;
};
};
@@ -170,86 +171,8 @@
};
prcmu@80157000 {
- db8500-prcmu-regulators {
- db8500_vape_reg: db8500_vape {
- regulator-name = "db8500-vape";
- };
-
- db8500_varm_reg: db8500_varm {
- regulator-name = "db8500-varm";
- };
-
- db8500_vmodem_reg: db8500_vmodem {
- regulator-name = "db8500-vmodem";
- };
-
- db8500_vpll_reg: db8500_vpll {
- regulator-name = "db8500-vpll";
- };
-
- db8500_vsmps1_reg: db8500_vsmps1 {
- regulator-name = "db8500-vsmps1";
- };
-
- db8500_vsmps2_reg: db8500_vsmps2 {
- regulator-name = "db8500-vsmps2";
- };
-
- db8500_vsmps3_reg: db8500_vsmps3 {
- regulator-name = "db8500-vsmps3";
- };
-
- db8500_vrf1_reg: db8500_vrf1 {
- regulator-name = "db8500-vrf1";
- };
-
- db8500_sva_mmdsp_reg: db8500_sva_mmdsp {
- regulator-name = "db8500-sva-mmdsp";
- };
-
- db8500_sva_mmdsp_ret_reg: db8500_sva_mmdsp_ret {
- regulator-name = "db8500-sva-mmdsp-ret";
- };
-
- db8500_sva_pipe_reg: db8500_sva_pipe {
- regulator-name = "db8500_sva_pipe";
- };
-
- db8500_sia_mmdsp_reg: db8500_sia_mmdsp {
- regulator-name = "db8500_sia_mmdsp";
- };
-
- db8500_sia_mmdsp_ret_reg: db8500_sia_mmdsp_ret {
- regulator-name = "db8500-sia-mmdsp-ret";
- };
-
- db8500_sia_pipe_reg: db8500_sia_pipe {
- regulator-name = "db8500-sia-pipe";
- };
-
- db8500_sga_reg: db8500_sga {
- regulator-name = "db8500-sga";
- };
-
- db8500_b2r2_mcde_reg: db8500_b2r2_mcde {
- regulator-name = "db8500-b2r2-mcde";
- };
-
- db8500_esram12_reg: db8500_esram12 {
- regulator-name = "db8500-esram12";
- };
-
- db8500_esram12_ret_reg: db8500_esram12_ret {
- regulator-name = "db8500-esram12-ret";
- };
-
- db8500_esram34_reg: db8500_esram34 {
- regulator-name = "db8500-esram34";
- };
-
- db8500_esram34_ret_reg: db8500_esram34_ret {
- regulator-name = "db8500-esram34-ret";
- };
+ cpufreq {
+ status = "okay";
};
thermal@801573c0 {
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index c32770a28acf..319cc6b509da 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -266,6 +266,11 @@
reg = <0x01c20c90 0x10>;
};
+ sid: eeprom@01c23800 {
+ compatible = "allwinner,sun4i-sid";
+ reg = <0x01c23800 0x10>;
+ };
+
uart0: serial@01c28000 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28000 0x400>;
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index 3b4a0574f068..52476742a104 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -255,6 +255,11 @@
reg = <0x01c20c90 0x10>;
};
+ sid: eeprom@01c23800 {
+ compatible = "allwinner,sun4i-sid";
+ reg = <0x01c23800 0x10>;
+ };
+
uart0: serial@01c28000 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28000 0x400>;
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index f6091dc0936c..ce8ef2a45be0 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -222,6 +222,11 @@
reg = <0x01c20c90 0x10>;
};
+ sid: eeprom@01c23800 {
+ compatible = "allwinner,sun4i-sid";
+ reg = <0x01c23800 0x10>;
+ };
+
uart1: serial@01c28400 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28400 0x400>;
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index f244f5f02365..c1751a64889a 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -175,7 +175,7 @@
apb2_gates: apb2_gates@01c2006c {
#clock-cells = <1>;
compatible = "allwinner,sun6i-a31-apb2-gates-clk";
- reg = <0x01c2006c 0x8>;
+ reg = <0x01c2006c 0x4>;
clocks = <&apb2>;
clock-output-names = "apb2_i2c0", "apb2_i2c1",
"apb2_i2c2", "apb2_i2c3", "apb2_uart0",
diff --git a/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts b/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts
index 15e625eca312..5c51cb8a98b0 100644
--- a/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts
+++ b/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts
@@ -48,6 +48,18 @@
pinctrl-0 = <&uart0_pins_a>;
status = "okay";
};
+
+ i2c0: i2c@01c2ac00 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins_a>;
+ status = "okay";
+ };
+
+ i2c1: i2c@01c2b000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins_a>;
+ status = "okay";
+ };
};
leds {
diff --git a/arch/arm/boot/dts/sun7i-a20-cubietruck.dts b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
new file mode 100644
index 000000000000..8a1009d6c829
--- /dev/null
+++ b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Oliver Schinagl
+ *
+ * Oliver Schinagl <oliver@schinagl.nl>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "sun7i-a20.dtsi"
+
+/ {
+ model = "Cubietech Cubietruck";
+ compatible = "cubietech,cubietruck", "allwinner,sun7i-a20";
+
+ soc@01c00000 {
+ pinctrl@01c20800 {
+ led_pins_cubietruck: led_pins@0 {
+ allwinner,pins = "PH7", "PH11", "PH20", "PH21";
+ allwinner,function = "gpio_out";
+ allwinner,drive = <0>;
+ allwinner,pull = <0>;
+ };
+ };
+
+ uart0: serial@01c28000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins_a>;
+ status = "okay";
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pins_cubietruck>;
+
+ blue {
+ label = "cubietruck:blue:usr";
+ gpios = <&pio 7 21 0>;
+ };
+
+ orange {
+ label = "cubietruck:orange:usr";
+ gpios = <&pio 7 20 0>;
+ };
+
+ white {
+ label = "cubietruck:white:usr";
+ gpios = <&pio 7 11 0>;
+ };
+
+ green {
+ label = "cubietruck:green:usr";
+ gpios = <&pio 7 7 0>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
index 9e778557fadb..ead3013f9aca 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
@@ -60,6 +60,24 @@
pinctrl-0 = <&uart7_pins_a>;
status = "okay";
};
+
+ i2c0: i2c@01c2ac00 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins_a>;
+ status = "okay";
+ };
+
+ i2c1: i2c@01c2b000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins_a>;
+ status = "okay";
+ };
+
+ i2c2: i2c@01c2b400 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins_a>;
+ status = "okay";
+ };
};
leds {
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 80559cbdbc87..e46cfedde74c 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -215,6 +215,27 @@
allwinner,pull = <0>;
};
+ i2c0_pins_a: i2c0@0 {
+ allwinner,pins = "PB0", "PB1";
+ allwinner,function = "i2c0";
+ allwinner,drive = <0>;
+ allwinner,pull = <0>;
+ };
+
+ i2c1_pins_a: i2c1@0 {
+ allwinner,pins = "PB18", "PB19";
+ allwinner,function = "i2c1";
+ allwinner,drive = <0>;
+ allwinner,pull = <0>;
+ };
+
+ i2c2_pins_a: i2c2@0 {
+ allwinner,pins = "PB20", "PB21";
+ allwinner,function = "i2c2";
+ allwinner,drive = <0>;
+ allwinner,pull = <0>;
+ };
+
emac_pins_a: emac0@0 {
allwinner,pins = "PA0", "PA1", "PA2",
"PA3", "PA4", "PA5", "PA6",
@@ -244,6 +265,11 @@
reg = <0x01c20c90 0x10>;
};
+ sid: eeprom@01c23800 {
+ compatible = "allwinner,sun7i-a20-sid";
+ reg = <0x01c23800 0x200>;
+ };
+
uart0: serial@01c28000 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28000 0x400>;
@@ -324,6 +350,51 @@
status = "disabled";
};
+ i2c0: i2c@01c2ac00 {
+ compatible = "allwinner,sun4i-i2c";
+ reg = <0x01c2ac00 0x400>;
+ interrupts = <0 7 1>;
+ clocks = <&apb1_gates 0>;
+ clock-frequency = <100000>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@01c2b000 {
+ compatible = "allwinner,sun4i-i2c";
+ reg = <0x01c2b000 0x400>;
+ interrupts = <0 8 1>;
+ clocks = <&apb1_gates 1>;
+ clock-frequency = <100000>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@01c2b400 {
+ compatible = "allwinner,sun4i-i2c";
+ reg = <0x01c2b400 0x400>;
+ interrupts = <0 9 1>;
+ clocks = <&apb1_gates 2>;
+ clock-frequency = <100000>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@01c2b800 {
+ compatible = "allwinner,sun4i-i2c";
+ reg = <0x01c2b800 0x400>;
+ interrupts = <0 88 1>;
+ clocks = <&apb1_gates 3>;
+ clock-frequency = <100000>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@01c2bc00 {
+ compatible = "allwinner,sun4i-i2c";
+ reg = <0x01c2bc00 0x400>;
+ interrupts = <0 89 1>;
+ clocks = <&apb1_gates 15>;
+ clock-frequency = <100000>;
+ status = "disabled";
+ };
+
gic: interrupt-controller@01c81000 {
compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic";
reg = <0x01c81000 0x1000>,
diff --git a/arch/arm/boot/dts/tegra114-dalmore.dts b/arch/arm/boot/dts/tegra114-dalmore.dts
index 60230288884b..cb5ec23b03a7 100644
--- a/arch/arm/boot/dts/tegra114-dalmore.dts
+++ b/arch/arm/boot/dts/tegra114-dalmore.dts
@@ -1,5 +1,6 @@
/dts-v1/;
+#include <dt-bindings/input/input.h>
#include "tegra114.dtsi"
/ {
@@ -738,6 +739,14 @@
realtek,ldo1-en-gpios =
<&gpio TEGRA_GPIO(V, 3) GPIO_ACTIVE_HIGH>;
};
+
+ temperature-sensor@4c {
+ compatible = "onnn,nct1008";
+ reg = <0x4c>;
+ vcc-supply = <&palmas_ldo6_reg>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(O, 4) IRQ_TYPE_LEVEL_LOW>;
+ };
};
i2c@7000d000 {
@@ -947,7 +956,7 @@
regulator-max-microvolt = <1800000>;
};
- ldo6 {
+ palmas_ldo6_reg: ldo6 {
regulator-name = "vdd-sensor-2v85";
regulator-min-microvolt = <2850000>;
regulator-max-microvolt = <2850000>;
@@ -1011,6 +1020,19 @@
interrupt-parent = <&palmas>;
interrupts = <8 0>;
};
+
+ pinmux {
+ compatible = "ti,tps65913-pinctrl";
+ pinctrl-names = "default";
+ pinctrl-0 = <&palmas_default>;
+
+ palmas_default: pinmux {
+ pin_gpio6 {
+ pins = "gpio6";
+ function = "gpio";
+ };
+ };
+ };
};
};
@@ -1081,26 +1103,26 @@
home {
label = "Home";
gpios = <&gpio TEGRA_GPIO(I, 5) GPIO_ACTIVE_LOW>;
- linux,code = <102>; /* KEY_HOME */
+ linux,code = <KEY_HOME>;
};
power {
label = "Power";
gpios = <&gpio TEGRA_GPIO(Q, 0) GPIO_ACTIVE_LOW>;
- linux,code = <116>; /* KEY_POWER */
+ linux,code = <KEY_POWER>;
gpio-key,wakeup;
};
volume_down {
label = "Volume Down";
gpios = <&gpio TEGRA_GPIO(R, 1) GPIO_ACTIVE_LOW>;
- linux,code = <114>; /* KEY_VOLUMEDOWN */
+ linux,code = <KEY_VOLUMEDOWN>;
};
volume_up {
label = "Volume Up";
gpios = <&gpio TEGRA_GPIO(R, 2) GPIO_ACTIVE_LOW>;
- linux,code = <115>; /* KEY_VOLUMEUP */
+ linux,code = <KEY_VOLUMEUP>;
};
};
diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi
index 2905145d8e59..8d42787c8ff1 100644
--- a/arch/arm/boot/dts/tegra114.dtsi
+++ b/arch/arm/boot/dts/tegra114.dtsi
@@ -318,9 +318,9 @@
iommu {
compatible = "nvidia,tegra114-smmu", "nvidia,tegra30-smmu";
- reg = <0x7000f010 0x02c
- 0x7000f1f0 0x010
- 0x7000f228 0x074>;
+ reg = <0x70019010 0x02c
+ 0x700191f0 0x010
+ 0x70019228 0x074>;
nvidia,#asids = <4>;
dma-window = <0 0x40000000>;
nvidia,swgroups = <0x18659fe>;
diff --git a/arch/arm/boot/dts/tegra124-venice2.dts b/arch/arm/boot/dts/tegra124-venice2.dts
new file mode 100644
index 000000000000..431d67a2b413
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124-venice2.dts
@@ -0,0 +1,27 @@
+/dts-v1/;
+
+#include "tegra124.dtsi"
+
+/ {
+ model = "NVIDIA Tegra124 Venice2";
+ compatible = "nvidia,venice2", "nvidia,tegra124";
+
+ memory {
+ reg = <0x80000000 0x80000000>;
+ };
+
+ serial@70006000 {
+ status = "okay";
+ };
+
+ pmc@7000e400 {
+ nvidia,invert-interrupt;
+ nvidia,suspend-mode = <1>;
+ nvidia,cpu-pwr-good-time = <500>;
+ nvidia,cpu-pwr-off-time = <300>;
+ nvidia,core-pwr-good-time = <641 3845>;
+ nvidia,core-pwr-off-time = <61036>;
+ nvidia,core-power-req-active-high;
+ nvidia,sys-clock-req-active-high;
+ };
+};
diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi
new file mode 100644
index 000000000000..b7413004ee77
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124.dtsi
@@ -0,0 +1,149 @@
+#include <dt-bindings/gpio/tegra-gpio.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "skeleton.dtsi"
+
+/ {
+ compatible = "nvidia,tegra124";
+ interrupt-parent = <&gic>;
+
+ gic: interrupt-controller@50041000 {
+ compatible = "arm,cortex-a15-gic";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x50041000 0x1000>,
+ <0x50042000 0x1000>,
+ <0x50044000 0x2000>,
+ <0x50046000 0x2000>;
+ interrupts = <GIC_PPI 9
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ timer@60005000 {
+ compatible = "nvidia,tegra124-timer", "nvidia,tegra20-timer";
+ reg = <0x60005000 0x400>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpio: gpio@6000d000 {
+ compatible = "nvidia,tegra124-gpio", "nvidia,tegra30-gpio";
+ reg = <0x6000d000 0x1000>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ };
+
+ /*
+ * There are two serial driver i.e. 8250 based simple serial
+ * driver and APB DMA based serial driver for higher baudrate
+ * and performace. To enable the 8250 based driver, the compatible
+ * is "nvidia,tegra124-uart", "nvidia,tegra20-uart" and to enable
+ * the APB DMA based serial driver, the comptible is
+ * "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart".
+ */
+ serial@70006000 {
+ compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
+ reg = <0x70006000 0x40>;
+ reg-shift = <2>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ serial@70006040 {
+ compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
+ reg = <0x70006040 0x40>;
+ reg-shift = <2>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ serial@70006200 {
+ compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
+ reg = <0x70006200 0x40>;
+ reg-shift = <2>;
+ interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ serial@70006300 {
+ compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
+ reg = <0x70006300 0x40>;
+ reg-shift = <2>;
+ interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ serial@70006400 {
+ compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
+ reg = <0x70006400 0x40>;
+ reg-shift = <2>;
+ interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ rtc@7000e000 {
+ compatible = "nvidia,tegra124-rtc", "nvidia,tegra20-rtc";
+ reg = <0x7000e000 0x100>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pmc@7000e400 {
+ compatible = "nvidia,tegra124-pmc";
+ reg = <0x7000e400 0x400>;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <1>;
+ };
+
+ cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <2>;
+ };
+
+ cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <3>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <GIC_PPI 13
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+};
diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
index e19dbf238e5c..5ea7dfa4d9fa 100644
--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
+++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
@@ -294,9 +294,10 @@
};
};
- nct1008 {
+ temperature-sensor@4c {
compatible = "onnn,nct1008";
reg = <0x4c>;
+ vcc-supply = <&sys_3v3_reg>;
interrupt-parent = <&gpio>;
interrupts = <TEGRA_GPIO(CC, 2) IRQ_TYPE_LEVEL_LOW>;
};
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index 0022c127e1d9..2bd55cfd88ad 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -136,12 +136,13 @@
gr3d {
compatible = "nvidia,tegra30-gr3d";
reg = <0x54180000 0x00040000>;
- clocks = <&tegra_car 24 &tegra_car 98>;
+ clocks = <&tegra_car TEGRA30_CLK_GR3D
+ &tegra_car TEGRA30_CLK_GR3D2>;
clock-names = "3d", "3d2";
};
dc@54200000 {
- compatible = "nvidia,tegra30-dc";
+ compatible = "nvidia,tegra30-dc", "nvidia,tegra20-dc";
reg = <0x54200000 0x00040000>;
interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&tegra_car TEGRA30_CLK_DISP1>,
diff --git a/arch/arm/boot/dts/testcases/tests-interrupts.dtsi b/arch/arm/boot/dts/testcases/tests-interrupts.dtsi
new file mode 100644
index 000000000000..c843720bd3e5
--- /dev/null
+++ b/arch/arm/boot/dts/testcases/tests-interrupts.dtsi
@@ -0,0 +1,58 @@
+
+/ {
+ testcase-data {
+ interrupts {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ test_intc0: intc0 {
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ test_intc1: intc1 {
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ };
+
+ test_intc2: intc2 {
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ test_intmap0: intmap0 {
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ interrupt-map = <1 &test_intc0 9>,
+ <2 &test_intc1 10 11 12>,
+ <3 &test_intc2 13 14>,
+ <4 &test_intc2 15 16>;
+ };
+
+ test_intmap1: intmap1 {
+ #interrupt-cells = <2>;
+ interrupt-map = <0x5000 1 2 &test_intc0 15>;
+ };
+
+ interrupts0 {
+ interrupt-parent = <&test_intc0>;
+ interrupts = <1>, <2>, <3>, <4>;
+ };
+
+ interrupts1 {
+ interrupt-parent = <&test_intmap0>;
+ interrupts = <1>, <2>, <3>, <4>;
+ };
+
+ interrupts-extended0 {
+ reg = <0x5000 0x100>;
+ interrupts-extended = <&test_intc0 1>,
+ <&test_intc1 2 3 4>,
+ <&test_intc2 5 6>,
+ <&test_intmap0 1>,
+ <&test_intmap0 2>,
+ <&test_intmap0 3>,
+ <&test_intmap1 1 2>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/testcases/tests.dtsi b/arch/arm/boot/dts/testcases/tests.dtsi
index a7c5067622e8..3f123ecc9dd7 100644
--- a/arch/arm/boot/dts/testcases/tests.dtsi
+++ b/arch/arm/boot/dts/testcases/tests.dtsi
@@ -1 +1,2 @@
/include/ "tests-phandle.dtsi"
+/include/ "tests-interrupts.dtsi"
diff --git a/arch/arm/boot/dts/twl4030.dtsi b/arch/arm/boot/dts/twl4030.dtsi
index 5aba238d1f1e..fb1b2ec8eaa9 100644
--- a/arch/arm/boot/dts/twl4030.dtsi
+++ b/arch/arm/boot/dts/twl4030.dtsi
@@ -23,6 +23,22 @@
compatible = "ti,twl4030-wdt";
};
+ vaux1: regulator-vaux1 {
+ compatible = "ti,twl4030-vaux1";
+ };
+
+ vaux2: regulator-vaux2 {
+ compatible = "ti,twl4030-vaux2";
+ };
+
+ vaux3: regulator-vaux3 {
+ compatible = "ti,twl4030-vaux3";
+ };
+
+ vaux4: regulator-vaux4 {
+ compatible = "ti,twl4030-vaux4";
+ };
+
vcc: regulator-vdd1 {
compatible = "ti,twl4030-vdd1";
regulator-min-microvolt = <600000>;
@@ -35,10 +51,20 @@
regulator-max-microvolt = <1800000>;
};
- vpll2: regulator-vpll2 {
- compatible = "ti,twl4030-vpll2";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
+ vio: regulator-vio {
+ compatible = "ti,twl4030-vio";
+ };
+
+ vintana1: regulator-vintana1 {
+ compatible = "ti,twl4030-vintana1";
+ };
+
+ vintana2: regulator-vintana2 {
+ compatible = "ti,twl4030-vintana2";
+ };
+
+ vintdig: regulator-vintdig {
+ compatible = "ti,twl4030-vintdig";
};
vmmc1: regulator-vmmc1 {
@@ -65,6 +91,16 @@
compatible = "ti,twl4030-vusb3v1";
};
+ vpll1: regulator-vpll1 {
+ compatible = "ti,twl4030-vpll1";
+ };
+
+ vpll2: regulator-vpll2 {
+ compatible = "ti,twl4030-vpll2";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
vsim: regulator-vsim {
compatible = "ti,twl4030-vsim";
regulator-min-microvolt = <1800000>;
@@ -98,4 +134,9 @@
compatible = "ti,twl4030-pwmled";
#pwm-cells = <2>;
};
+
+ twl_pwrbutton: pwrbutton {
+ compatible = "ti,twl4030-pwrbutton";
+ interrupts = <8>;
+ };
};
diff --git a/arch/arm/boot/dts/twl6030_omap4.dtsi b/arch/arm/boot/dts/twl6030_omap4.dtsi
new file mode 100644
index 000000000000..a4fa5703c42b
--- /dev/null
+++ b/arch/arm/boot/dts/twl6030_omap4.dtsi
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+&twl {
+ /*
+ * On most OMAP4 platforms, the twl6030 IRQ line is connected
+ * to the SYS_NIRQ1 line on OMAP and the twl6030 MSECURE line is
+ * connected to the fref_clk0_out.sys_drm_msecure line.
+ * Therefore, configure the defaults for the SYS_NIRQ1 and
+ * fref_clk0_out.sys_drm_msecure pins here.
+ */
+ pinctrl-names = "default";
+ pinctrl-0 = <
+ &twl6030_pins
+ &twl6030_wkup_pins
+ >;
+};
+
+&omap4_pmx_wkup {
+ twl6030_wkup_pins: pinmux_twl6030_wkup_pins {
+ pinctrl-single,pins = <
+ 0x14 (PIN_OUTPUT | MUX_MODE2) /* fref_clk0_out.sys_drm_msecure */
+ >;
+ };
+};
+
+&omap4_pmx_core {
+ twl6030_pins: pinmux_twl6030_pins {
+ pinctrl-single,pins = <
+ 0x15e (WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1.sys_nirq1 */
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts
index dde75ae8b4b1..e01e5a081def 100644
--- a/arch/arm/boot/dts/versatile-ab.dts
+++ b/arch/arm/boot/dts/versatile-ab.dts
@@ -185,7 +185,7 @@
mmc@5000 {
compatible = "arm,primecell";
reg = < 0x5000 0x1000>;
- interrupts = <22 34>;
+ interrupts-extended = <&vic 22 &sic 2>;
};
kmi@6000 {
compatible = "arm,pl050", "arm,primecell";
diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
index 7e8175269064..f43907c40c93 100644
--- a/arch/arm/boot/dts/versatile-pb.dts
+++ b/arch/arm/boot/dts/versatile-pb.dts
@@ -41,7 +41,7 @@
mmc@b000 {
compatible = "arm,primecell";
reg = <0xb000 0x1000>;
- interrupts = <23 34>;
+ interrupts-extended = <&vic 23 &sic 2>;
};
};
};
diff --git a/arch/arm/boot/dts/vf610-cosmic.dts b/arch/arm/boot/dts/vf610-cosmic.dts
new file mode 100644
index 000000000000..c42e4f938dcd
--- /dev/null
+++ b/arch/arm/boot/dts/vf610-cosmic.dts
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ * Copyright 2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/dts-v1/;
+#include "vf610.dtsi"
+
+/ {
+ model = "PHYTEC Cosmic/Cosmic+ Board";
+ compatible = "phytec,vf610-cosmic", "fsl,vf610";
+
+ chosen {
+ bootargs = "console=ttyLP1,115200";
+ };
+
+ memory {
+ reg = <0x80000000 0x10000000>;
+ };
+
+ clocks {
+ enet_ext {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+ };
+
+};
+
+&fec1 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec1_1>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_1>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/vf610-twr.dts b/arch/arm/boot/dts/vf610-twr.dts
index 1a58678b93fa..c8047ca16501 100644
--- a/arch/arm/boot/dts/vf610-twr.dts
+++ b/arch/arm/boot/dts/vf610-twr.dts
@@ -36,6 +36,23 @@
};
+&dspi0 {
+ bus-num = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_dspi0_1>;
+ status = "okay";
+
+ sflash: at26df081a@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "atmel,at26df081a";
+ spi-max-frequency = <16000000>;
+ spi-cpol;
+ spi-cpha;
+ reg = <0>;
+ };
+};
+
&fec0 {
phy-mode = "rmii";
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/vf610.dtsi b/arch/arm/boot/dts/vf610.dtsi
index 67d929cf9804..d31ce1b4a7b0 100644
--- a/arch/arm/boot/dts/vf610.dtsi
+++ b/arch/arm/boot/dts/vf610.dtsi
@@ -123,6 +123,18 @@
status = "disabled";
};
+ dspi0: dspi0@4002c000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,vf610-dspi";
+ reg = <0x4002c000 0x1000>;
+ interrupts = <0 67 0x04>;
+ clocks = <&clks VF610_CLK_DSPI0>;
+ clock-names = "dspi";
+ spi-num-chipselects = <5>;
+ status = "disabled";
+ };
+
sai2: sai@40031000 {
compatible = "fsl,vf610-sai";
reg = <0x40031000 0x1000>;
diff --git a/arch/arm/boot/dts/zynq-7000.dtsi b/arch/arm/boot/dts/zynq-7000.dtsi
index e32b92b949d2..e7f73b2e4550 100644
--- a/arch/arm/boot/dts/zynq-7000.dtsi
+++ b/arch/arm/boot/dts/zynq-7000.dtsi
@@ -92,6 +92,14 @@
};
};
+ global_timer: timer@f8f00200 {
+ compatible = "arm,cortex-a9-global-timer";
+ reg = <0xf8f00200 0x20>;
+ interrupts = <1 11 0x301>;
+ interrupt-parent = <&intc>;
+ clocks = <&clkc 4>;
+ };
+
ttc0: ttc0@f8001000 {
interrupt-parent = <&intc>;
interrupts = < 0 10 4 0 11 4 0 12 4 >;
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 8c60f473e976..4bdc41622c36 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -6,7 +6,6 @@ obj-y += firmware.o
obj-$(CONFIG_ICST) += icst.o
obj-$(CONFIG_SA1111) += sa1111.o
-obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o
obj-$(CONFIG_DMABOUNCE) += dmabounce.o
obj-$(CONFIG_SHARP_LOCOMO) += locomo.o
obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
@@ -17,3 +16,5 @@ obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
AFLAGS_mcpm_head.o := -march=armv7-a
AFLAGS_vlock.o := -march=armv7-a
obj-$(CONFIG_TI_PRIV_EDMA) += edma.o
+obj-$(CONFIG_BL_SWITCHER) += bL_switcher.o
+obj-$(CONFIG_BL_SWITCHER_DUMMY_IF) += bL_switcher_dummy_if.o
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c
new file mode 100644
index 000000000000..63bbc4f70564
--- /dev/null
+++ b/arch/arm/common/bL_switcher.c
@@ -0,0 +1,822 @@
+/*
+ * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
+ *
+ * Created by: Nicolas Pitre, March 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/atomic.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/clockchips.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <linux/notifier.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/moduleparam.h>
+
+#include <asm/smp_plat.h>
+#include <asm/cputype.h>
+#include <asm/suspend.h>
+#include <asm/mcpm.h>
+#include <asm/bL_switcher.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/power_cpu_migrate.h>
+
+
+/*
+ * Use our own MPIDR accessors as the generic ones in asm/cputype.h have
+ * __attribute_const__ and we don't want the compiler to assume any
+ * constness here as the value _does_ change along some code paths.
+ */
+
+static int read_mpidr(void)
+{
+ unsigned int id;
+ asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id));
+ return id & MPIDR_HWID_BITMASK;
+}
+
+/*
+ * Get a global nanosecond time stamp for tracing.
+ */
+static s64 get_ns(void)
+{
+ struct timespec ts;
+ getnstimeofday(&ts);
+ return timespec_to_ns(&ts);
+}
+
+/*
+ * bL switcher core code.
+ */
+
+static void bL_do_switch(void *_arg)
+{
+ unsigned ib_mpidr, ib_cpu, ib_cluster;
+ long volatile handshake, **handshake_ptr = _arg;
+
+ pr_debug("%s\n", __func__);
+
+ ib_mpidr = cpu_logical_map(smp_processor_id());
+ ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
+ ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
+
+ /* Advertise our handshake location */
+ if (handshake_ptr) {
+ handshake = 0;
+ *handshake_ptr = &handshake;
+ } else
+ handshake = -1;
+
+ /*
+ * Our state has been saved at this point. Let's release our
+ * inbound CPU.
+ */
+ mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume);
+ sev();
+
+ /*
+ * From this point, we must assume that our counterpart CPU might
+ * have taken over in its parallel world already, as if execution
+ * just returned from cpu_suspend(). It is therefore important to
+ * be very careful not to make any change the other guy is not
+ * expecting. This is why we need stack isolation.
+ *
+ * Fancy under cover tasks could be performed here. For now
+ * we have none.
+ */
+
+ /*
+ * Let's wait until our inbound is alive.
+ */
+ while (!handshake) {
+ wfe();
+ smp_mb();
+ }
+
+ /* Let's put ourself down. */
+ mcpm_cpu_power_down();
+
+ /* should never get here */
+ BUG();
+}
+
+/*
+ * Stack isolation. To ensure 'current' remains valid, we just use another
+ * piece of our thread's stack space which should be fairly lightly used.
+ * The selected area starts just above the thread_info structure located
+ * at the very bottom of the stack, aligned to a cache line, and indexed
+ * with the cluster number.
+ */
+#define STACK_SIZE 512
+extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
+static int bL_switchpoint(unsigned long _arg)
+{
+ unsigned int mpidr = read_mpidr();
+ unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ void *stack = current_thread_info() + 1;
+ stack = PTR_ALIGN(stack, L1_CACHE_BYTES);
+ stack += clusterid * STACK_SIZE + STACK_SIZE;
+ call_with_stack(bL_do_switch, (void *)_arg, stack);
+ BUG();
+}
+
+/*
+ * Generic switcher interface
+ */
+
+static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS];
+static int bL_switcher_cpu_pairing[NR_CPUS];
+
+/*
+ * bL_switch_to - Switch to a specific cluster for the current CPU
+ * @new_cluster_id: the ID of the cluster to switch to.
+ *
+ * This function must be called on the CPU to be switched.
+ * Returns 0 on success, else a negative status code.
+ */
+static int bL_switch_to(unsigned int new_cluster_id)
+{
+ unsigned int mpidr, this_cpu, that_cpu;
+ unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
+ struct completion inbound_alive;
+ struct tick_device *tdev;
+ enum clock_event_mode tdev_mode;
+ long volatile *handshake_ptr;
+ int ipi_nr, ret;
+
+ this_cpu = smp_processor_id();
+ ob_mpidr = read_mpidr();
+ ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0);
+ ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1);
+ BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr);
+
+ if (new_cluster_id == ob_cluster)
+ return 0;
+
+ that_cpu = bL_switcher_cpu_pairing[this_cpu];
+ ib_mpidr = cpu_logical_map(that_cpu);
+ ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
+ ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
+
+ pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n",
+ this_cpu, ob_mpidr, ib_mpidr);
+
+ this_cpu = smp_processor_id();
+
+ /* Close the gate for our entry vectors */
+ mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL);
+ mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL);
+
+ /* Install our "inbound alive" notifier. */
+ init_completion(&inbound_alive);
+ ipi_nr = register_ipi_completion(&inbound_alive, this_cpu);
+ ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]);
+ mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr);
+
+ /*
+ * Let's wake up the inbound CPU now in case it requires some delay
+ * to come online, but leave it gated in our entry vector code.
+ */
+ ret = mcpm_cpu_power_up(ib_cpu, ib_cluster);
+ if (ret) {
+ pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret);
+ return ret;
+ }
+
+ /*
+ * Raise a SGI on the inbound CPU to make sure it doesn't stall
+ * in a possible WFI, such as in bL_power_down().
+ */
+ gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0);
+
+ /*
+ * Wait for the inbound to come up. This allows for other
+ * tasks to be scheduled in the mean time.
+ */
+ wait_for_completion(&inbound_alive);
+ mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0);
+
+ /*
+ * From this point we are entering the switch critical zone
+ * and can't take any interrupts anymore.
+ */
+ local_irq_disable();
+ local_fiq_disable();
+ trace_cpu_migrate_begin(get_ns(), ob_mpidr);
+
+ /* redirect GIC's SGIs to our counterpart */
+ gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
+
+ tdev = tick_get_device(this_cpu);
+ if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu)))
+ tdev = NULL;
+ if (tdev) {
+ tdev_mode = tdev->evtdev->mode;
+ clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
+ }
+
+ ret = cpu_pm_enter();
+
+ /* we can not tolerate errors at this point */
+ if (ret)
+ panic("%s: cpu_pm_enter() returned %d\n", __func__, ret);
+
+ /* Swap the physical CPUs in the logical map for this logical CPU. */
+ cpu_logical_map(this_cpu) = ib_mpidr;
+ cpu_logical_map(that_cpu) = ob_mpidr;
+
+ /* Let's do the actual CPU switch. */
+ ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint);
+ if (ret > 0)
+ panic("%s: cpu_suspend() returned %d\n", __func__, ret);
+
+ /* We are executing on the inbound CPU at this point */
+ mpidr = read_mpidr();
+ pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr);
+ BUG_ON(mpidr != ib_mpidr);
+
+ mcpm_cpu_powered_up();
+
+ ret = cpu_pm_exit();
+
+ if (tdev) {
+ clockevents_set_mode(tdev->evtdev, tdev_mode);
+ clockevents_program_event(tdev->evtdev,
+ tdev->evtdev->next_event, 1);
+ }
+
+ trace_cpu_migrate_finish(get_ns(), ib_mpidr);
+ local_fiq_enable();
+ local_irq_enable();
+
+ *handshake_ptr = 1;
+ dsb_sev();
+
+ if (ret)
+ pr_err("%s exiting with error %d\n", __func__, ret);
+ return ret;
+}
+
+struct bL_thread {
+ spinlock_t lock;
+ struct task_struct *task;
+ wait_queue_head_t wq;
+ int wanted_cluster;
+ struct completion started;
+ bL_switch_completion_handler completer;
+ void *completer_cookie;
+};
+
+static struct bL_thread bL_threads[NR_CPUS];
+
+static int bL_switcher_thread(void *arg)
+{
+ struct bL_thread *t = arg;
+ struct sched_param param = { .sched_priority = 1 };
+ int cluster;
+ bL_switch_completion_handler completer;
+ void *completer_cookie;
+
+ sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+ complete(&t->started);
+
+ do {
+ if (signal_pending(current))
+ flush_signals(current);
+ wait_event_interruptible(t->wq,
+ t->wanted_cluster != -1 ||
+ kthread_should_stop());
+
+ spin_lock(&t->lock);
+ cluster = t->wanted_cluster;
+ completer = t->completer;
+ completer_cookie = t->completer_cookie;
+ t->wanted_cluster = -1;
+ t->completer = NULL;
+ spin_unlock(&t->lock);
+
+ if (cluster != -1) {
+ bL_switch_to(cluster);
+
+ if (completer)
+ completer(completer_cookie);
+ }
+ } while (!kthread_should_stop());
+
+ return 0;
+}
+
+static struct task_struct *bL_switcher_thread_create(int cpu, void *arg)
+{
+ struct task_struct *task;
+
+ task = kthread_create_on_node(bL_switcher_thread, arg,
+ cpu_to_node(cpu), "kswitcher_%d", cpu);
+ if (!IS_ERR(task)) {
+ kthread_bind(task, cpu);
+ wake_up_process(task);
+ } else
+ pr_err("%s failed for CPU %d\n", __func__, cpu);
+ return task;
+}
+
+/*
+ * bL_switch_request_cb - Switch to a specific cluster for the given CPU,
+ * with completion notification via a callback
+ *
+ * @cpu: the CPU to switch
+ * @new_cluster_id: the ID of the cluster to switch to.
+ * @completer: switch completion callback. if non-NULL,
+ * @completer(@completer_cookie) will be called on completion of
+ * the switch, in non-atomic context.
+ * @completer_cookie: opaque context argument for @completer.
+ *
+ * This function causes a cluster switch on the given CPU by waking up
+ * the appropriate switcher thread. This function may or may not return
+ * before the switch has occurred.
+ *
+ * If a @completer callback function is supplied, it will be called when
+ * the switch is complete. This can be used to determine asynchronously
+ * when the switch is complete, regardless of when bL_switch_request()
+ * returns. When @completer is supplied, no new switch request is permitted
+ * for the affected CPU until after the switch is complete, and @completer
+ * has returned.
+ */
+int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
+ bL_switch_completion_handler completer,
+ void *completer_cookie)
+{
+ struct bL_thread *t;
+
+ if (cpu >= ARRAY_SIZE(bL_threads)) {
+ pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
+ return -EINVAL;
+ }
+
+ t = &bL_threads[cpu];
+
+ if (IS_ERR(t->task))
+ return PTR_ERR(t->task);
+ if (!t->task)
+ return -ESRCH;
+
+ spin_lock(&t->lock);
+ if (t->completer) {
+ spin_unlock(&t->lock);
+ return -EBUSY;
+ }
+ t->completer = completer;
+ t->completer_cookie = completer_cookie;
+ t->wanted_cluster = new_cluster_id;
+ spin_unlock(&t->lock);
+ wake_up(&t->wq);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bL_switch_request_cb);
+
+/*
+ * Activation and configuration code.
+ */
+
+static DEFINE_MUTEX(bL_switcher_activation_lock);
+static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier);
+static unsigned int bL_switcher_active;
+static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS];
+static cpumask_t bL_switcher_removed_logical_cpus;
+
+int bL_switcher_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&bL_activation_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(bL_switcher_register_notifier);
+
+int bL_switcher_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&bL_activation_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier);
+
+static int bL_activation_notify(unsigned long val)
+{
+ int ret;
+
+ ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL);
+ if (ret & NOTIFY_STOP_MASK)
+ pr_err("%s: notifier chain failed with status 0x%x\n",
+ __func__, ret);
+ return notifier_to_errno(ret);
+}
+
+static void bL_switcher_restore_cpus(void)
+{
+ int i;
+
+ for_each_cpu(i, &bL_switcher_removed_logical_cpus)
+ cpu_up(i);
+}
+
+static int bL_switcher_halve_cpus(void)
+{
+ int i, j, cluster_0, gic_id, ret;
+ unsigned int cpu, cluster, mask;
+ cpumask_t available_cpus;
+
+ /* First pass to validate what we have */
+ mask = 0;
+ for_each_online_cpu(i) {
+ cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+ if (cluster >= 2) {
+ pr_err("%s: only dual cluster systems are supported\n", __func__);
+ return -EINVAL;
+ }
+ if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER))
+ return -EINVAL;
+ mask |= (1 << cluster);
+ }
+ if (mask != 3) {
+ pr_err("%s: no CPU pairing possible\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * Now let's do the pairing. We match each CPU with another CPU
+ * from a different cluster. To get a uniform scheduling behavior
+ * without fiddling with CPU topology and compute capacity data,
+ * we'll use logical CPUs initially belonging to the same cluster.
+ */
+ memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing));
+ cpumask_copy(&available_cpus, cpu_online_mask);
+ cluster_0 = -1;
+ for_each_cpu(i, &available_cpus) {
+ int match = -1;
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+ if (cluster_0 == -1)
+ cluster_0 = cluster;
+ if (cluster != cluster_0)
+ continue;
+ cpumask_clear_cpu(i, &available_cpus);
+ for_each_cpu(j, &available_cpus) {
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1);
+ /*
+ * Let's remember the last match to create "odd"
+ * pairings on purpose in order for other code not
+ * to assume any relation between physical and
+ * logical CPU numbers.
+ */
+ if (cluster != cluster_0)
+ match = j;
+ }
+ if (match != -1) {
+ bL_switcher_cpu_pairing[i] = match;
+ cpumask_clear_cpu(match, &available_cpus);
+ pr_info("CPU%d paired with CPU%d\n", i, match);
+ }
+ }
+
+ /*
+ * Now we disable the unwanted CPUs i.e. everything that has no
+ * pairing information (that includes the pairing counterparts).
+ */
+ cpumask_clear(&bL_switcher_removed_logical_cpus);
+ for_each_online_cpu(i) {
+ cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+
+ /* Let's take note of the GIC ID for this CPU */
+ gic_id = gic_get_cpu_id(i);
+ if (gic_id < 0) {
+ pr_err("%s: bad GIC ID for CPU %d\n", __func__, i);
+ bL_switcher_restore_cpus();
+ return -EINVAL;
+ }
+ bL_gic_id[cpu][cluster] = gic_id;
+ pr_info("GIC ID for CPU %u cluster %u is %u\n",
+ cpu, cluster, gic_id);
+
+ if (bL_switcher_cpu_pairing[i] != -1) {
+ bL_switcher_cpu_original_cluster[i] = cluster;
+ continue;
+ }
+
+ ret = cpu_down(i);
+ if (ret) {
+ bL_switcher_restore_cpus();
+ return ret;
+ }
+ cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus);
+ }
+
+ return 0;
+}
+
+/* Determine the logical CPU a given physical CPU is grouped on. */
+int bL_switcher_get_logical_index(u32 mpidr)
+{
+ int cpu;
+
+ if (!bL_switcher_active)
+ return -EUNATCH;
+
+ mpidr &= MPIDR_HWID_BITMASK;
+ for_each_online_cpu(cpu) {
+ int pairing = bL_switcher_cpu_pairing[cpu];
+ if (pairing == -1)
+ continue;
+ if ((mpidr == cpu_logical_map(cpu)) ||
+ (mpidr == cpu_logical_map(pairing)))
+ return cpu;
+ }
+ return -EINVAL;
+}
+
+static void bL_switcher_trace_trigger_cpu(void *__always_unused info)
+{
+ trace_cpu_migrate_current(get_ns(), read_mpidr());
+}
+
+int bL_switcher_trace_trigger(void)
+{
+ int ret;
+
+ preempt_disable();
+
+ bL_switcher_trace_trigger_cpu(NULL);
+ ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);
+
+ preempt_enable();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger);
+
+static int bL_switcher_enable(void)
+{
+ int cpu, ret;
+
+ mutex_lock(&bL_switcher_activation_lock);
+ cpu_hotplug_driver_lock();
+ if (bL_switcher_active) {
+ cpu_hotplug_driver_unlock();
+ mutex_unlock(&bL_switcher_activation_lock);
+ return 0;
+ }
+
+ pr_info("big.LITTLE switcher initializing\n");
+
+ ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE);
+ if (ret)
+ goto error;
+
+ ret = bL_switcher_halve_cpus();
+ if (ret)
+ goto error;
+
+ bL_switcher_trace_trigger();
+
+ for_each_online_cpu(cpu) {
+ struct bL_thread *t = &bL_threads[cpu];
+ spin_lock_init(&t->lock);
+ init_waitqueue_head(&t->wq);
+ init_completion(&t->started);
+ t->wanted_cluster = -1;
+ t->task = bL_switcher_thread_create(cpu, t);
+ }
+
+ bL_switcher_active = 1;
+ bL_activation_notify(BL_NOTIFY_POST_ENABLE);
+ pr_info("big.LITTLE switcher initialized\n");
+ goto out;
+
+error:
+ pr_warn("big.LITTLE switcher initialization failed\n");
+ bL_activation_notify(BL_NOTIFY_POST_DISABLE);
+
+out:
+ cpu_hotplug_driver_unlock();
+ mutex_unlock(&bL_switcher_activation_lock);
+ return ret;
+}
+
+#ifdef CONFIG_SYSFS
+
+static void bL_switcher_disable(void)
+{
+ unsigned int cpu, cluster;
+ struct bL_thread *t;
+ struct task_struct *task;
+
+ mutex_lock(&bL_switcher_activation_lock);
+ cpu_hotplug_driver_lock();
+
+ if (!bL_switcher_active)
+ goto out;
+
+ if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) {
+ bL_activation_notify(BL_NOTIFY_POST_ENABLE);
+ goto out;
+ }
+
+ bL_switcher_active = 0;
+
+ /*
+ * To deactivate the switcher, we must shut down the switcher
+ * threads to prevent any other requests from being accepted.
+ * Then, if the final cluster for given logical CPU is not the
+ * same as the original one, we'll recreate a switcher thread
+ * just for the purpose of switching the CPU back without any
+ * possibility for interference from external requests.
+ */
+ for_each_online_cpu(cpu) {
+ t = &bL_threads[cpu];
+ task = t->task;
+ t->task = NULL;
+ if (!task || IS_ERR(task))
+ continue;
+ kthread_stop(task);
+ /* no more switch may happen on this CPU at this point */
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+ if (cluster == bL_switcher_cpu_original_cluster[cpu])
+ continue;
+ init_completion(&t->started);
+ t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu];
+ task = bL_switcher_thread_create(cpu, t);
+ if (!IS_ERR(task)) {
+ wait_for_completion(&t->started);
+ kthread_stop(task);
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+ if (cluster == bL_switcher_cpu_original_cluster[cpu])
+ continue;
+ }
+ /* If execution gets here, we're in trouble. */
+ pr_crit("%s: unable to restore original cluster for CPU %d\n",
+ __func__, cpu);
+ pr_crit("%s: CPU %d can't be restored\n",
+ __func__, bL_switcher_cpu_pairing[cpu]);
+ cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu],
+ &bL_switcher_removed_logical_cpus);
+ }
+
+ bL_switcher_restore_cpus();
+ bL_switcher_trace_trigger();
+
+ bL_activation_notify(BL_NOTIFY_POST_DISABLE);
+
+out:
+ cpu_hotplug_driver_unlock();
+ mutex_unlock(&bL_switcher_activation_lock);
+}
+
+static ssize_t bL_switcher_active_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", bL_switcher_active);
+}
+
+static ssize_t bL_switcher_active_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+
+ switch (buf[0]) {
+ case '0':
+ bL_switcher_disable();
+ ret = 0;
+ break;
+ case '1':
+ ret = bL_switcher_enable();
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return (ret >= 0) ? count : ret;
+}
+
+static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = bL_switcher_trace_trigger();
+
+ return ret ? ret : count;
+}
+
+static struct kobj_attribute bL_switcher_active_attr =
+ __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store);
+
+static struct kobj_attribute bL_switcher_trace_trigger_attr =
+ __ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store);
+
+static struct attribute *bL_switcher_attrs[] = {
+ &bL_switcher_active_attr.attr,
+ &bL_switcher_trace_trigger_attr.attr,
+ NULL,
+};
+
+static struct attribute_group bL_switcher_attr_group = {
+ .attrs = bL_switcher_attrs,
+};
+
+static struct kobject *bL_switcher_kobj;
+
+static int __init bL_switcher_sysfs_init(void)
+{
+ int ret;
+
+ bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj);
+ if (!bL_switcher_kobj)
+ return -ENOMEM;
+ ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group);
+ if (ret)
+ kobject_put(bL_switcher_kobj);
+ return ret;
+}
+
+#endif /* CONFIG_SYSFS */
+
+bool bL_switcher_get_enabled(void)
+{
+ mutex_lock(&bL_switcher_activation_lock);
+
+ return bL_switcher_active;
+}
+EXPORT_SYMBOL_GPL(bL_switcher_get_enabled);
+
+void bL_switcher_put_enabled(void)
+{
+ mutex_unlock(&bL_switcher_activation_lock);
+}
+EXPORT_SYMBOL_GPL(bL_switcher_put_enabled);
+
+/*
+ * Veto any CPU hotplug operation on those CPUs we've removed
+ * while the switcher is active.
+ * We're just not ready to deal with that given the trickery involved.
+ */
+static int bL_switcher_hotplug_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ if (bL_switcher_active) {
+ int pairing = bL_switcher_cpu_pairing[(unsigned long)hcpu];
+ switch (action & 0xf) {
+ case CPU_UP_PREPARE:
+ case CPU_DOWN_PREPARE:
+ if (pairing == -1)
+ return NOTIFY_BAD;
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+static bool no_bL_switcher;
+core_param(no_bL_switcher, no_bL_switcher, bool, 0644);
+
+static int __init bL_switcher_init(void)
+{
+ int ret;
+
+ if (MAX_NR_CLUSTERS != 2) {
+ pr_err("%s: only dual cluster systems are supported\n", __func__);
+ return -EINVAL;
+ }
+
+ cpu_notifier(bL_switcher_hotplug_callback, 0);
+
+ if (!no_bL_switcher) {
+ ret = bL_switcher_enable();
+ if (ret)
+ return ret;
+ }
+
+#ifdef CONFIG_SYSFS
+ ret = bL_switcher_sysfs_init();
+ if (ret)
+ pr_err("%s: unable to create sysfs entry\n", __func__);
+#endif
+
+ return 0;
+}
+
+late_initcall(bL_switcher_init);
diff --git a/arch/arm/common/bL_switcher_dummy_if.c b/arch/arm/common/bL_switcher_dummy_if.c
new file mode 100644
index 000000000000..3f47f1203c6b
--- /dev/null
+++ b/arch/arm/common/bL_switcher_dummy_if.c
@@ -0,0 +1,71 @@
+/*
+ * arch/arm/common/bL_switcher_dummy_if.c -- b.L switcher dummy interface
+ *
+ * Created by: Nicolas Pitre, November 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * Dummy interface to user space for debugging purpose only.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <asm/uaccess.h>
+#include <asm/bL_switcher.h>
+
+static ssize_t bL_switcher_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *pos)
+{
+ unsigned char val[3];
+ unsigned int cpu, cluster;
+ int ret;
+
+ pr_debug("%s\n", __func__);
+
+ if (len < 3)
+ return -EINVAL;
+
+ if (copy_from_user(val, buf, 3))
+ return -EFAULT;
+
+ /* format: <cpu#>,<cluster#> */
+ if (val[0] < '0' || val[0] > '9' ||
+ val[1] != ',' ||
+ val[2] < '0' || val[2] > '1')
+ return -EINVAL;
+
+ cpu = val[0] - '0';
+ cluster = val[2] - '0';
+ ret = bL_switch_request(cpu, cluster);
+
+ return ret ? : len;
+}
+
+static const struct file_operations bL_switcher_fops = {
+ .write = bL_switcher_write,
+ .owner = THIS_MODULE,
+};
+
+static struct miscdevice bL_switcher_device = {
+ MISC_DYNAMIC_MINOR,
+ "b.L_switcher",
+ &bL_switcher_fops
+};
+
+static int __init bL_switcher_dummy_if_init(void)
+{
+ return misc_register(&bL_switcher_device);
+}
+
+static void __exit bL_switcher_dummy_if_exit(void)
+{
+ misc_deregister(&bL_switcher_device);
+}
+
+module_init(bL_switcher_dummy_if_init);
+module_exit(bL_switcher_dummy_if_exit);
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
index 8e1a0245907f..41bca32409fc 100644
--- a/arch/arm/common/edma.c
+++ b/arch/arm/common/edma.c
@@ -404,7 +404,7 @@ static irqreturn_t dma_irq_handler(int irq, void *data)
BIT(slot));
if (edma_cc[ctlr]->intr_data[channel].callback)
edma_cc[ctlr]->intr_data[channel].callback(
- channel, DMA_COMPLETE,
+ channel, EDMA_DMA_COMPLETE,
edma_cc[ctlr]->intr_data[channel].data);
}
} while (sh_ipr);
@@ -459,7 +459,7 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
callback) {
edma_cc[ctlr]->intr_data[k].
callback(k,
- DMA_CC_ERROR,
+ EDMA_DMA_CC_ERROR,
edma_cc[ctlr]->intr_data
[k].data);
}
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index 990250965f2c..26020a03f659 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -27,6 +27,18 @@ void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
}
+extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2];
+
+void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
+ unsigned long poke_phys_addr, unsigned long poke_val)
+{
+ unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
+ poke[0] = poke_phys_addr;
+ poke[1] = poke_val;
+ __cpuc_flush_dcache_area((void *)poke, 8);
+ outer_clean_range(__pa(poke), __pa(poke + 2));
+}
+
static const struct mcpm_platform_ops *platform_ops;
int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
@@ -90,6 +102,21 @@ void mcpm_cpu_power_down(void)
BUG();
}
+int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster)
+{
+ int ret;
+
+ if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down_finish))
+ return -EUNATCH;
+
+ ret = platform_ops->power_down_finish(cpu, cluster);
+ if (ret)
+ pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n",
+ __func__, cpu, cluster, ret);
+
+ return ret;
+}
+
void mcpm_cpu_suspend(u64 expected_residency)
{
phys_reset_t phys_reset;
diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S
index 39c96df3477a..e02db4b81a66 100644
--- a/arch/arm/common/mcpm_head.S
+++ b/arch/arm/common/mcpm_head.S
@@ -15,6 +15,7 @@
#include <linux/linkage.h>
#include <asm/mcpm.h>
+#include <asm/assembler.h>
#include "vlock.h"
@@ -47,6 +48,7 @@
ENTRY(mcpm_entry_point)
+ ARM_BE8(setend be)
THUMB( adr r12, BSYM(1f) )
THUMB( bx r12 )
THUMB( .thumb )
@@ -71,12 +73,19 @@ ENTRY(mcpm_entry_point)
* position independent way.
*/
adr r5, 3f
- ldmia r5, {r6, r7, r8, r11}
+ ldmia r5, {r0, r6, r7, r8, r11}
+ add r0, r5, r0 @ r0 = mcpm_entry_early_pokes
add r6, r5, r6 @ r6 = mcpm_entry_vectors
ldr r7, [r5, r7] @ r7 = mcpm_power_up_setup_phys
add r8, r5, r8 @ r8 = mcpm_sync
add r11, r5, r11 @ r11 = first_man_locks
+ @ Perform an early poke, if any
+ add r0, r0, r4, lsl #3
+ ldmia r0, {r0, r1}
+ teq r0, #0
+ strne r1, [r0]
+
mov r0, #MCPM_SYNC_CLUSTER_SIZE
mla r8, r0, r10, r8 @ r8 = sync cluster base
@@ -195,7 +204,8 @@ mcpm_entry_gated:
.align 2
-3: .word mcpm_entry_vectors - .
+3: .word mcpm_entry_early_pokes - .
+ .word mcpm_entry_vectors - 3b
.word mcpm_power_up_setup_phys - 3b
.word mcpm_sync - 3b
.word first_man_locks - 3b
@@ -214,6 +224,10 @@ first_man_locks:
ENTRY(mcpm_entry_vectors)
.space 4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
+ .type mcpm_entry_early_pokes, #object
+ENTRY(mcpm_entry_early_pokes)
+ .space 8 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
+
.type mcpm_power_up_setup_phys, #object
ENTRY(mcpm_power_up_setup_phys)
.space 4 @ set by mcpm_sync_init()
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
index 1bc34c7567fd..177251a4dd9a 100644
--- a/arch/arm/common/mcpm_platsmp.c
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -19,14 +19,23 @@
#include <asm/smp.h>
#include <asm/smp_plat.h>
+static void cpu_to_pcpu(unsigned int cpu,
+ unsigned int *pcpu, unsigned int *pcluster)
+{
+ unsigned int mpidr;
+
+ mpidr = cpu_logical_map(cpu);
+ *pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ *pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+}
+
static int mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- unsigned int mpidr, pcpu, pcluster, ret;
+ unsigned int pcpu, pcluster, ret;
extern void secondary_startup(void);
- mpidr = cpu_logical_map(cpu);
- pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ cpu_to_pcpu(cpu, &pcpu, &pcluster);
+
pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n",
__func__, cpu, pcpu, pcluster);
@@ -47,6 +56,15 @@ static void mcpm_secondary_init(unsigned int cpu)
#ifdef CONFIG_HOTPLUG_CPU
+static int mcpm_cpu_kill(unsigned int cpu)
+{
+ unsigned int pcpu, pcluster;
+
+ cpu_to_pcpu(cpu, &pcpu, &pcluster);
+
+ return !mcpm_cpu_power_down_finish(pcpu, pcluster);
+}
+
static int mcpm_cpu_disable(unsigned int cpu)
{
/*
@@ -73,6 +91,7 @@ static struct smp_operations __initdata mcpm_smp_ops = {
.smp_boot_secondary = mcpm_boot_secondary,
.smp_secondary_init = mcpm_secondary_init,
#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_kill = mcpm_cpu_kill,
.cpu_disable = mcpm_cpu_disable,
.cpu_die = mcpm_cpu_die,
#endif
diff --git a/arch/arm/common/timer-sp.c b/arch/arm/common/timer-sp.c
index e901d0f3e0bb..ce922d0ea7aa 100644
--- a/arch/arm/common/timer-sp.c
+++ b/arch/arm/common/timer-sp.c
@@ -175,7 +175,7 @@ static struct clock_event_device sp804_clockevent = {
static struct irqaction sp804_timer_irq = {
.name = "timer",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = sp804_timer_interrupt,
.dev_id = &sp804_clockevent,
};
diff --git a/arch/arm/common/via82c505.c b/arch/arm/common/via82c505.c
deleted file mode 100644
index 6cb362e56d29..000000000000
--- a/arch/arm/common/via82c505.c
+++ /dev/null
@@ -1,83 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-
-
-#include <asm/mach/pci.h>
-
-#define MAX_SLOTS 7
-
-#define CONFIG_CMD(bus, devfn, where) (0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3))
-
-static int
-via82c505_read_config(struct pci_bus *bus, unsigned int devfn, int where,
- int size, u32 *value)
-{
- outl(CONFIG_CMD(bus,devfn,where),0xCF8);
- switch (size) {
- case 1:
- *value=inb(0xCFC + (where&3));
- break;
- case 2:
- *value=inw(0xCFC + (where&2));
- break;
- case 4:
- *value=inl(0xCFC);
- break;
- }
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int
-via82c505_write_config(struct pci_bus *bus, unsigned int devfn, int where,
- int size, u32 value)
-{
- outl(CONFIG_CMD(bus,devfn,where),0xCF8);
- switch (size) {
- case 1:
- outb(value, 0xCFC + (where&3));
- break;
- case 2:
- outw(value, 0xCFC + (where&2));
- break;
- case 4:
- outl(value, 0xCFC);
- break;
- }
- return PCIBIOS_SUCCESSFUL;
-}
-
-struct pci_ops via82c505_ops = {
- .read = via82c505_read_config,
- .write = via82c505_write_config,
-};
-
-void __init via82c505_preinit(void)
-{
- printk(KERN_DEBUG "PCI: VIA 82c505\n");
- if (!request_region(0xA8,2,"via config")) {
- printk(KERN_WARNING"VIA 82c505: Unable to request region 0xA8\n");
- return;
- }
- if (!request_region(0xCF8,8,"pci config")) {
- printk(KERN_WARNING"VIA 82c505: Unable to request region 0xCF8\n");
- release_region(0xA8, 2);
- return;
- }
-
- /* Enable compatible Mode */
- outb(0x96,0xA8);
- outb(0x18,0xA9);
- outb(0x93,0xA8);
- outb(0xd0,0xA9);
-
-}
-
-int __init via82c505_setup(int nr, struct pci_sys_data *sys)
-{
- return (nr == 0);
-}
diff --git a/arch/arm/configs/bcm_defconfig b/arch/arm/configs/bcm_defconfig
index 6e4931097dd4..287ac1d7aac7 100644
--- a/arch/arm/configs/bcm_defconfig
+++ b/arch/arm/configs/bcm_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
@@ -25,10 +24,9 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
-CONFIG_EFI_PARTITION=y
CONFIG_ARCH_BCM=y
+CONFIG_ARCH_BCM_MOBILE=y
CONFIG_ARM_THUMBEE=y
-CONFIG_ARM_ERRATA_743622=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
# CONFIG_OABI_COMPAT is not set
@@ -50,7 +48,6 @@ CONFIG_UNIX_DIAG=y
CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
-CONFIG_ARPD=y
CONFIG_SYN_COOKIES=y
CONFIG_TCP_MD5SIG=y
CONFIG_IPV6=y
@@ -95,7 +92,6 @@ CONFIG_MMC_UNSAFE_RESUME=y
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_TEST=y
CONFIG_MMC_SDHCI=y
-CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_BCM_KONA=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@@ -117,12 +113,12 @@ CONFIG_CONFIGFS_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
-CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=110
CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
-CONFIG_DEBUG_INFO=y
# CONFIG_FTRACE is not set
CONFIG_CRC_CCITT=y
CONFIG_CRC_T10DIF=y
diff --git a/arch/arm/configs/bockw_defconfig b/arch/arm/configs/bockw_defconfig
index e7e94948d194..b38cd107f82d 100644
--- a/arch/arm/configs/bockw_defconfig
+++ b/arch/arm/configs/bockw_defconfig
@@ -91,6 +91,10 @@ CONFIG_VIDEO_RCAR_VIN=y
CONFIG_VIDEO_ML86V7667=y
CONFIG_SPI=y
CONFIG_SPI_SH_HSPI=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_RCAR=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_EHCI_HCD=y
diff --git a/arch/arm/configs/efm32_defconfig b/arch/arm/configs/efm32_defconfig
new file mode 100644
index 000000000000..f59fffb3d0c6
--- /dev/null
+++ b/arch/arm/configs/efm32_defconfig
@@ -0,0 +1,102 @@
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=12
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_UID16 is not set
+# CONFIG_BASE_FULL is not set
+# CONFIG_FUTEX is not set
+# CONFIG_EPOLL is not set
+# CONFIG_SIGNALFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_MMU is not set
+CONFIG_ARCH_EFM32=y
+# CONFIG_KUSER_HELPERS is not set
+CONFIG_SET_MEM_PARAM=y
+CONFIG_DRAM_BASE=0x88000000
+CONFIG_DRAM_SIZE=0x00400000
+CONFIG_FLASH_MEM_BASE=0x8c000000
+CONFIG_FLASH_SIZE=0x01000000
+CONFIG_PREEMPT=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_XIP_KERNEL=y
+CONFIG_XIP_PHYS_ADDR=0x8c000000
+CONFIG_BINFMT_FLAT=y
+CONFIG_BINFMT_SHARED_FLAT=y
+# CONFIG_COREDUMP is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_FW_LOADER is not set
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK_RO=y
+CONFIG_MTD_ROM=y
+CONFIG_MTD_UCLINUX=y
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_BLK_DEV is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+CONFIG_KS8851=y
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_UNIX98_PTYS is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_EFM32_UART=y
+CONFIG_SERIAL_EFM32_UART_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_SPI=y
+CONFIG_SPI_EFM32=y
+CONFIG_GPIO_SYSFS=y
+# CONFIG_USB_SUPPORT is not set
+CONFIG_MMC=y
+CONFIG_MMC_SPI=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+# CONFIG_FILE_LOCKING is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+CONFIG_ROMFS_FS=y
+CONFIG_ROMFS_BACKED_BY_MTD=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/arm/configs/genmai_defconfig b/arch/arm/configs/genmai_defconfig
new file mode 100644
index 000000000000..69b1531a4c80
--- /dev/null
+++ b/arch/arm/configs/genmai_defconfig
@@ -0,0 +1,116 @@
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+CONFIG_SLAB=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_SHMOBILE=y
+CONFIG_ARCH_R7S72100=y
+CONFIG_MACH_GENMAI=y
+# CONFIG_SH_TIMER_CMT is not set
+# CONFIG_SH_TIMER_MTU2 is not set
+# CONFIG_SH_TIMER_TMU is not set
+# CONFIG_EM_TIMER_STI is not set
+CONFIG_ARM_ERRATA_430973=y
+CONFIG_ARM_ERRATA_458693=y
+CONFIG_ARM_ERRATA_460075=y
+CONFIG_ARM_ERRATA_743622=y
+CONFIG_ARM_ERRATA_754322=y
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_FORCE_MAX_ZONEORDER=13
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_KEXEC=y
+CONFIG_AUTO_ZRELADDR=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM_RUNTIME=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_NETDEVICES=y
+# CONFIG_NET_CORE is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+CONFIG_SH_ETH=y
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=10
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C_SH_MOBILE=y
+# CONFIG_HWMON is not set
+CONFIG_THERMAL=y
+CONFIG_RCAR_THERMAL=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_DRM=y
+CONFIG_DRM_RCAR_DU=y
+# CONFIG_USB_SUPPORT is not set
+CONFIG_MMC=y
+CONFIG_MMC_SDHI=y
+CONFIG_MMC_SH_MMCIF=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_RTC_CLASS=y
+CONFIG_DMADEVICES=y
+CONFIG_SH_DMAE=y
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+# CONFIG_ARM_UNWIND is not set
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arm/configs/h3600_defconfig b/arch/arm/configs/h3600_defconfig
index 317960f12488..0142ec37e0be 100644
--- a/arch/arm/configs/h3600_defconfig
+++ b/arch/arm/configs/h3600_defconfig
@@ -1,5 +1,6 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_MODULES=y
@@ -11,11 +12,11 @@ CONFIG_ARCH_SA1100=y
CONFIG_SA1100_H3600=y
CONFIG_PCCARD=y
CONFIG_PCMCIA_SA1100=y
+CONFIG_PREEMPT=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
# CONFIG_CPU_FREQ_STAT is not set
CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
CONFIG_NET=y
CONFIG_UNIX=y
CONFIG_INET=y
@@ -24,13 +25,10 @@ CONFIG_IRDA=m
CONFIG_IRLAN=m
CONFIG_IRNET=m
CONFIG_IRCOMM=m
-CONFIG_SA1100_FIR=m
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_REDBOOT_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -41,19 +39,15 @@ CONFIG_MTD_SA1100=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-# CONFIG_MISC_DEVICES is not set
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECS=y
CONFIG_NETDEVICES=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_WLAN is not set
-CONFIG_NET_PCMCIA=y
CONFIG_PCMCIA_PCNET=y
CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_ASYNC=m
+# CONFIG_WLAN is not set
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
@@ -64,8 +58,6 @@ CONFIG_SERIAL_SA1100_CONSOLE=y
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FB_SA1100=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_MSDOS_FS=m
@@ -74,6 +66,4 @@ CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=m
CONFIG_NFS_FS=y
CONFIG_NFSD=m
-CONFIG_SMB_FS=m
CONFIG_NLS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index e958ebe79779..6309ee52ccfc 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -91,6 +91,7 @@ CONFIG_SMSC911X=y
CONFIG_SMSC_PHY=y
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
CONFIG_KEYBOARD_IMX=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
@@ -118,6 +119,7 @@ CONFIG_IMX2_WDT=y
CONFIG_MFD_MC13XXX_SPI=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
CONFIG_REGULATOR_MC13783=y
CONFIG_REGULATOR_MC13892=y
CONFIG_MEDIA_SUPPORT=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 5d488c24b132..c814e0e96034 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -129,10 +129,10 @@ CONFIG_MOUSE_PS2_ELANTECH=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_EGALAX=y
CONFIG_TOUCHSCREEN_MC13783=y
+CONFIG_TOUCHSCREEN_TSC2007=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_MMA8450=y
CONFIG_SERIO_SERPORT=m
-CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_IMX=y
@@ -188,22 +188,33 @@ CONFIG_SND_SOC_PHYCORE_AC97=y
CONFIG_SND_SOC_EUKREA_TLV320=y
CONFIG_SND_SOC_IMX_WM8962=y
CONFIG_SND_SOC_IMX_SGTL5000=y
+CONFIG_SND_SOC_IMX_SPDIF=y
CONFIG_SND_SOC_IMX_MC13783=y
CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_MXC=y
CONFIG_USB_STORAGE=y
CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
CONFIG_USB_CHIPIDEA_HOST=y
-CONFIG_USB_PHY=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_USB_MXS_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_ETH=m
+CONFIG_USB_MASS_STORAGE=m
CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_ESDHC_IMX=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_GPIO=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
CONFIG_RTC_DRV_MC13XXX=y
@@ -246,7 +257,6 @@ CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
-CONFIG_CONFIGFS_FS=m
CONFIG_JFFS2_FS=y
CONFIG_UBIFS_FS=y
CONFIG_NFS_FS=y
@@ -261,6 +271,7 @@ CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_UTF8=y
CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
+CONFIG_PROVE_LOCKING=y
# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_FTRACE is not set
# CONFIG_ARM_UNWIND is not set
diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig
index a8314c3ee84d..5bae19557591 100644
--- a/arch/arm/configs/integrator_defconfig
+++ b/arch/arm/configs/integrator_defconfig
@@ -1,15 +1,17 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
-CONFIG_TINY_RCU=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
CONFIG_ARCH_INTEGRATOR=y
CONFIG_ARCH_INTEGRATOR_AP=y
CONFIG_ARCH_INTEGRATOR_CP=y
+CONFIG_INTEGRATOR_IMPD1=y
CONFIG_CPU_ARM720T=y
CONFIG_CPU_ARM920T=y
CONFIG_CPU_ARM922T=y
@@ -18,12 +20,9 @@ CONFIG_CPU_ARM1020=y
CONFIG_CPU_ARM1022=y
CONFIG_CPU_ARM1026=y
CONFIG_PCI=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
-CONFIG_LEDS=y
-CONFIG_LEDS_CPU=y
+# CONFIG_ATAGS is not set
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="console=ttyAM0,38400n8 root=/dev/nfs ip=bootp"
@@ -44,24 +43,20 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_AFS_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_PHYSMAP=y
+CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
CONFIG_E100=y
CONFIG_SMC91X=y
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIAL_AMBA_PL010=y
-CONFIG_SERIAL_AMBA_PL010_CONSOLE=y
CONFIG_FB=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_ARMCLCD=y
@@ -71,19 +66,23 @@ CONFIG_FB_MATROX_MYSTIQUE=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_MMC=y
CONFIG_MMC_ARMMMCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_PL030=y
+CONFIG_COMMON_CLK_DEBUG=y
CONFIG_EXT2_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm/configs/keystone_defconfig b/arch/arm/configs/keystone_defconfig
index 1f36b823905f..9943e5da74f1 100644
--- a/arch/arm/configs/keystone_defconfig
+++ b/arch/arm/configs/keystone_defconfig
@@ -123,7 +123,9 @@ CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_I2C=y
# CONFIG_I2C_COMPAT is not set
CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DAVINCI=y
CONFIG_SPI=y
+CONFIG_SPI_DAVINCI=y
CONFIG_SPI_SPIDEV=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
diff --git a/arch/arm/configs/koelsch_defconfig b/arch/arm/configs/koelsch_defconfig
new file mode 100644
index 000000000000..825c16dee8a0
--- /dev/null
+++ b/arch/arm/configs/koelsch_defconfig
@@ -0,0 +1,54 @@
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+CONFIG_SLAB=y
+# CONFIG_BLOCK is not set
+CONFIG_ARCH_SHMOBILE=y
+CONFIG_ARCH_R8A7791=y
+CONFIG_MACH_KOELSCH=y
+# CONFIG_SWP_EMULATE is not set
+CONFIG_CPU_BPREDICT_DISABLE=y
+CONFIG_PL310_ERRATA_588369=y
+CONFIG_ARM_ERRATA_754322=y
+CONFIG_SMP=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_AEABI=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_KEXEC=y
+CONFIG_AUTO_ZRELADDR=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM_RUNTIME=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=20
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+# CONFIG_HWMON is not set
+CONFIG_THERMAL=y
+CONFIG_RCAR_THERMAL=y
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+# CONFIG_ARM_UNWIND is not set
diff --git a/arch/arm/configs/lager_defconfig b/arch/arm/configs/lager_defconfig
index e777ef22b801..35bff5e0d57a 100644
--- a/arch/arm/configs/lager_defconfig
+++ b/arch/arm/configs/lager_defconfig
@@ -89,6 +89,8 @@ CONFIG_THERMAL=y
CONFIG_RCAR_THERMAL=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_DRM=y
+CONFIG_DRM_RCAR_DU=y
# CONFIG_USB_SUPPORT is not set
CONFIG_MMC=y
CONFIG_MMC_SDHI=y
diff --git a/arch/arm/configs/marzen_defconfig b/arch/arm/configs/marzen_defconfig
index 000e9205b2b9..5cc6360340b1 100644
--- a/arch/arm/configs/marzen_defconfig
+++ b/arch/arm/configs/marzen_defconfig
@@ -92,6 +92,8 @@ CONFIG_SOC_CAMERA=y
CONFIG_VIDEO_RCAR_VIN=y
# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
CONFIG_VIDEO_ADV7180=y
+CONFIG_DRM=y
+CONFIG_DRM_RCAR_DU=y
CONFIG_USB=y
CONFIG_USB_RCAR_PHY=y
CONFIG_MMC=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 119fc378fc52..4a5903e04827 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -6,6 +6,7 @@ CONFIG_ARCH_MVEBU=y
CONFIG_MACH_ARMADA_370=y
CONFIG_MACH_ARMADA_XP=y
CONFIG_ARCH_BCM=y
+CONFIG_ARCH_BCM_MOBILE=y
CONFIG_GPIO_PCA953X=y
CONFIG_ARCH_HIGHBANK=y
CONFIG_ARCH_KEYSTONE=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index 4555c025629a..6150108e15de 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -76,7 +76,6 @@ CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_TSC2007=m
# CONFIG_SERIO is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
@@ -91,7 +90,6 @@ CONFIG_I2C_MXS=y
CONFIG_SPI=y
CONFIG_SPI_GPIO=m
CONFIG_SPI_MXS=y
-CONFIG_DEBUG_GPIO=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
@@ -115,9 +113,12 @@ CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
CONFIG_USB_CHIPIDEA_HOST=y
-CONFIG_USB_PHY=y
CONFIG_USB_MXS_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_ETH=m
+CONFIG_USB_MASS_STORAGE=m
CONFIG_MMC=y
CONFIG_MMC_UNSAFE_RESUME=y
CONFIG_MMC_MXS=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 254cf0539439..98a50c309b90 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -1,14 +1,13 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
-CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_SLAB=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
@@ -20,22 +19,21 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
CONFIG_ARCH_MULTI_V6=y
-CONFIG_ARCH_OMAP2PLUS=y
+CONFIG_OMAP_RESET_CLOCKS=y
+CONFIG_OMAP_MUX_DEBUG=y
CONFIG_ARCH_OMAP2=y
CONFIG_ARCH_OMAP3=y
CONFIG_ARCH_OMAP4=y
+CONFIG_SOC_OMAP5=y
CONFIG_SOC_AM33XX=y
-CONFIG_OMAP_RESET_CLOCKS=y
-CONFIG_OMAP_MUX_DEBUG=y
-CONFIG_ARCH_VEXPRESS_CA9X4=y
+CONFIG_SOC_DRA7XX=y
CONFIG_ARM_THUMBEE=y
CONFIG_ARM_ERRATA_411920=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
-CONFIG_LEDS=y
+CONFIG_CMA=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_ARM_APPENDED_DTB=y
@@ -61,8 +59,6 @@ CONFIG_IP_PNP_RARP=y
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
CONFIG_CAN=m
-CONFIG_CAN_RAW=m
-CONFIG_CAN_BCM=m
CONFIG_CAN_C_CAN=m
CONFIG_CAN_C_CAN_PLATFORM=m
CONFIG_BT=m
@@ -77,14 +73,13 @@ CONFIG_MAC80211=m
CONFIG_MAC80211_RC_PID=y
CONFIG_MAC80211_RC_DEFAULT_PID=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_CMA=y
-CONFIG_DMA_CMA=y
-CONFIG_CONNECTOR=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_OMAP_OCP2SCP=y
+CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_OOPS=y
CONFIG_MTD_CFI=y
@@ -98,32 +93,40 @@ CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=16384
-CONFIG_SENSORS_LIS3LV02D=m
CONFIG_SENSORS_TSL2550=m
-CONFIG_SENSORS_LIS3_I2C=m
CONFIG_BMP085_I2C=m
+CONFIG_SENSORS_LIS3_I2C=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_MD=y
CONFIG_NETDEVICES=y
-CONFIG_SMSC_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMC91X=y
-CONFIG_SMSC911X=y
CONFIG_KS8851=y
CONFIG_KS8851_MLL=y
-CONFIG_LIBERTAS=m
-CONFIG_LIBERTAS_USB=m
-CONFIG_LIBERTAS_SDIO=m
-CONFIG_LIBERTAS_DEBUG=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_TI_CPSW=y
+CONFIG_AT803X_PHY=y
+CONFIG_SMSC_PHY=y
CONFIG_USB_USBNET=y
CONFIG_USB_NET_SMSC95XX=y
CONFIG_USB_ALI_M5632=y
CONFIG_USB_AN2720=y
CONFIG_USB_EPSON2888=y
CONFIG_USB_KC2190=y
+CONFIG_LIBERTAS=m
+CONFIG_LIBERTAS_USB=m
+CONFIG_LIBERTAS_SDIO=m
+CONFIG_LIBERTAS_DEBUG=y
+CONFIG_WL_TI=y
+CONFIG_WL12XX=m
+CONFIG_WL18XX=m
+CONFIG_WLCORE_SPI=m
+CONFIG_WLCORE_SDIO=m
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_SDIO=m
+CONFIG_MWIFIEX_USB=m
CONFIG_INPUT_JOYDEV=y
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
@@ -133,7 +136,6 @@ CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ADS7846=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_TWL4030_PWRBUTTON=y
-CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
@@ -143,8 +145,7 @@ CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
-CONFIG_SERIAL_AMBA_PL011=y
-CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_OMAP=y
CONFIG_SERIAL_OMAP_CONSOLE=y
CONFIG_HW_RANDOM=y
@@ -158,31 +159,31 @@ CONFIG_GPIO_TWL4030=y
CONFIG_W1=y
CONFIG_POWER_SUPPLY=y
CONFIG_SENSORS_LM75=m
-CONFIG_WATCHDOG=y
CONFIG_THERMAL=y
-CONFIG_THERMAL_HWMON=y
-CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
CONFIG_THERMAL_GOV_FAIR_SHARE=y
-CONFIG_THERMAL_GOV_STEP_WISE=y
CONFIG_THERMAL_GOV_USER_SPACE=y
-CONFIG_CPU_THERMAL=y
+CONFIG_TI_SOC_THERMAL=y
+CONFIG_OMAP4_THERMAL=y
+CONFIG_OMAP5_THERMAL=y
+CONFIG_DRA752_THERMAL=y
+CONFIG_WATCHDOG=y
CONFIG_OMAP_WATCHDOG=y
CONFIG_TWL4030_WATCHDOG=y
+CONFIG_MFD_PALMAS=y
CONFIG_MFD_TPS65217=y
CONFIG_MFD_TPS65910=y
CONFIG_TWL6040_CORE=y
-CONFIG_REGULATOR_TWL4030=y
+CONFIG_REGULATOR_PALMAS=y
CONFIG_REGULATOR_TPS65023=y
CONFIG_REGULATOR_TPS6507X=y
CONFIG_REGULATOR_TPS65217=y
CONFIG_REGULATOR_TPS65910=y
+CONFIG_REGULATOR_TWL4030=y
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
-CONFIG_FB_OMAP_LCD_VGA=y
CONFIG_OMAP2_DSS=m
-CONFIG_OMAP2_DSS_RFBI=y
CONFIG_OMAP2_DSS_SDI=y
CONFIG_OMAP2_DSS_DSI=y
CONFIG_FB_OMAP2=m
@@ -194,12 +195,8 @@ CONFIG_DISPLAY_PANEL_DPI=m
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_PLATFORM=y
-CONFIG_DISPLAY_SUPPORT=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
CONFIG_LOGO=y
CONFIG_SOUND=m
CONFIG_SND=m
@@ -216,14 +213,14 @@ CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=m
CONFIG_USB=y
CONFIG_USB_DEBUG=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_WDM=y
CONFIG_USB_STORAGE=y
-CONFIG_USB_LIBUSUAL=y
+CONFIG_USB_DWC3=m
CONFIG_USB_TEST=y
-CONFIG_USB_PHY=y
CONFIG_NOP_USB_XCEIV=y
+CONFIG_OMAP_USB2=y
+CONFIG_OMAP_USB3=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_DEBUG=y
CONFIG_USB_GADGET_DEBUG_FILES=y
@@ -232,7 +229,6 @@ CONFIG_USB_ZERO=m
CONFIG_MMC=y
CONFIG_MMC_UNSAFE_RESUME=y
CONFIG_SDIO_UART=y
-CONFIG_MMC_ARMMMCI=y
CONFIG_MMC_OMAP=y
CONFIG_MMC_OMAP_HS=y
CONFIG_NEW_LEDS=y
@@ -252,11 +248,8 @@ CONFIG_RTC_DRV_OMAP=y
CONFIG_DMADEVICES=y
CONFIG_TI_EDMA=y
CONFIG_DMA_OMAP=y
-CONFIG_TI_SOC_THERMAL=y
-CONFIG_TI_THERMAL=y
-CONFIG_OMAP4_THERMAL=y
-CONFIG_OMAP5_THERMAL=y
-CONFIG_DRA752_THERMAL=y
+CONFIG_EXTCON=y
+CONFIG_EXTCON_PALMAS=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_FS_XATTR is not set
@@ -275,23 +268,18 @@ CONFIG_JFFS2_RUBIN=y
CONFIG_UBIFS_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
CONFIG_PROVE_LOCKING=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SECURITY=y
CONFIG_CRYPTO_MICHAEL_MIC=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
@@ -300,9 +288,6 @@ CONFIG_CRC_T10DIF=y
CONFIG_CRC_ITU_T=y
CONFIG_CRC7=y
CONFIG_LIBCRC32C=y
-CONFIG_SOC_OMAP5=y
-CONFIG_TI_DAVINCI_MDIO=y
-CONFIG_TI_DAVINCI_CPDMA=y
-CONFIG_TI_CPSW=y
-CONFIG_AT803X_PHY=y
-CONFIG_SOC_DRA7XX=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
diff --git a/arch/arm/configs/prima2_defconfig b/arch/arm/configs/prima2_defconfig
index 002a1ceadceb..23591dba47a0 100644
--- a/arch/arm/configs/prima2_defconfig
+++ b/arch/arm/configs/prima2_defconfig
@@ -39,6 +39,7 @@ CONFIG_SPI=y
CONFIG_SPI_SIRF=y
CONFIG_SPI_SPIDEV=y
# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
CONFIG_USB_GADGET=y
CONFIG_USB_MASS_STORAGE=m
CONFIG_MMC=y
diff --git a/arch/arm/configs/shark_defconfig b/arch/arm/configs/shark_defconfig
deleted file mode 100644
index e319b2c56f11..000000000000
--- a/arch/arm/configs/shark_defconfig
+++ /dev/null
@@ -1,80 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_SHARK=y
-CONFIG_LEDS=y
-CONFIG_LEDS_TIMER=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_FPE_NWFPE=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_STANDALONE is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
-CONFIG_PARPORT=m
-CONFIG_PARPORT_PC=m
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=m
-CONFIG_SCSI=m
-CONFIG_BLK_DEV_SD=m
-CONFIG_CHR_DEV_ST=m
-CONFIG_BLK_DEV_SR=m
-CONFIG_CHR_DEV_SG=m
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
-CONFIG_CS89x0=y
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_PRINTER=m
-# CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_FB_CYBER2000=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-CONFIG_SOUND=m
-CONFIG_SOUND_PRIME=m
-CONFIG_SOUND_OSS=m
-CONFIG_SOUND_SB=m
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_CMOS=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFSD=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_ISO8859_1=m
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_DEBUG_USER=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
new file mode 100644
index 000000000000..d57a85badb5e
--- /dev/null
+++ b/arch/arm/configs/sunxi_defconfig
@@ -0,0 +1,61 @@
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_ARCH_SUNXI=y
+CONFIG_SMP=y
+CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
+CONFIG_HIGHPTE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_NETDEVICES=y
+CONFIG_SUN4I_EMAC=y
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=8
+CONFIG_SERIAL_8250_RUNTIME_UARTS=8
+CONFIG_SERIAL_8250_DW=y
+CONFIG_I2C=y
+# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MV64XXX=y
+CONFIG_GPIO_SYSFS=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_SUNXI_WATCHDOG=y
+# CONFIG_USB_SUPPORT is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_COMMON_CLK_DEBUG=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_NLS=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index ea042e80e54d..4934295bb4f0 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -27,6 +27,7 @@ CONFIG_ARCH_TEGRA=y
CONFIG_ARCH_TEGRA_2x_SOC=y
CONFIG_ARCH_TEGRA_3x_SOC=y
CONFIG_ARCH_TEGRA_114_SOC=y
+CONFIG_ARCH_TEGRA_124_SOC=y
CONFIG_TEGRA_EMC_SCALING_ENABLE=y
CONFIG_PCI=y
CONFIG_PCI_MSI=y
@@ -41,9 +42,11 @@ CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_KEXEC=y
CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_IDLE=y
CONFIG_VFP=y
+CONFIG_NEON=y
CONFIG_PM_RUNTIME=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -129,6 +132,7 @@ CONFIG_SPI=y
CONFIG_SPI_TEGRA114=y
CONFIG_SPI_TEGRA20_SFLASH=y
CONFIG_SPI_TEGRA20_SLINK=y
+CONFIG_PINCTRL_PALMAS=y
CONFIG_GPIO_PCA953X_IRQ=y
CONFIG_GPIO_PALMAS=y
CONFIG_GPIO_TPS6586X=y
@@ -223,6 +227,7 @@ CONFIG_KEYBOARD_NVEC=y
CONFIG_SERIO_NVEC_PS2=y
CONFIG_NVEC_POWER=y
CONFIG_NVEC_PAZ00=y
+CONFIG_COMMON_CLK_DEBUG=y
CONFIG_TEGRA_IOMMU_GART=y
CONFIG_TEGRA_IOMMU_SMMU=y
CONFIG_MEMORY=y
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index a0025dc13021..ac632cc38f24 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -1,4 +1,3 @@
-CONFIG_HIGHMEM=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
@@ -16,6 +15,9 @@ CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_PREEMPT=y
CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_CMDLINE="root=/dev/ram0 console=ttyAMA2,115200n8"
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
@@ -68,8 +70,8 @@ CONFIG_CPU_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_MFD_STMPE=y
CONFIG_MFD_TC3589X=y
-CONFIG_REGULATOR_GPIO=y
CONFIG_REGULATOR_AB8500=y
+CONFIG_REGULATOR_GPIO=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SOC=y
@@ -78,10 +80,8 @@ CONFIG_SND_SOC_UX500_MACH_MOP500=y
CONFIG_USB=y
CONFIG_USB_MUSB_HDRC=y
CONFIG_USB_MUSB_UX500=y
-CONFIG_USB_PHY=y
CONFIG_AB8500_USB=y
CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_MUSB_HDRC=y
CONFIG_USB_ETH=m
CONFIG_MMC=y
CONFIG_MMC_UNSAFE_RESUME=y
@@ -116,12 +116,12 @@ CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
-CONFIG_DEBUG_INFO=y
# CONFIG_FTRACE is not set
CONFIG_DEBUG_USER=y
CONFIG_CRYPTO_DEV_UX500=y
diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig
index f2de51f0bd18..f489fdaa19b8 100644
--- a/arch/arm/configs/vexpress_defconfig
+++ b/arch/arm/configs/vexpress_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
@@ -8,11 +7,9 @@ CONFIG_CGROUPS=y
CONFIG_CPUSETS=y
# CONFIG_UTS_NS is not set
# CONFIG_IPC_NS is not set
-# CONFIG_USER_NS is not set
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_MODULES=y
@@ -23,14 +20,22 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_VEXPRESS_CA9X4=y
+CONFIG_ARCH_VEXPRESS_DCSCB=y
+CONFIG_ARCH_VEXPRESS_TC2_PM=y
# CONFIG_SWP_EMULATE is not set
CONFIG_SMP=y
+CONFIG_HAVE_ARM_ARCH_TIMER=y
+CONFIG_MCPM=y
CONFIG_VMSPLIT_2G=y
-CONFIG_HOTPLUG_CPU=y
+CONFIG_NR_CPUS=8
+CONFIG_ARM_PSCI=y
CONFIG_AEABI=y
+CONFIG_CMA=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="root=/dev/nfs nfsroot=10.1.69.3:/work/nfsroot ip=dhcp console=ttyAMA0 mem=128M"
+CONFIG_CMDLINE="console=ttyAMA0"
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y
CONFIG_VFP=y
CONFIG_NEON=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
@@ -44,37 +49,46 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_ARM_INTEGRATOR=y
-CONFIG_MISC_DEVICES=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_PLATRAM=y
+CONFIG_MTD_UBI=y
+CONFIG_PROC_DEVICETREE=y
+CONFIG_VIRTIO_BLK=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
-# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_SCSI_VIRTIO=y
CONFIG_ATA=y
# CONFIG_SATA_PMP is not set
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
+CONFIG_VIRTIO_NET=y
+CONFIG_SMC91X=y
CONFIG_SMSC911X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_WLAN is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIO_AMBAKMI=y
+CONFIG_LEGACY_PTY_COUNT=16
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=16
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_VIRTIO=y
+CONFIG_I2C=y
+CONFIG_I2C_VERSATILE=y
+CONFIG_SENSORS_VEXPRESS=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_VEXPRESS=y
CONFIG_FB=y
CONFIG_FB_ARMCLCD=y
CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -103,38 +117,45 @@ CONFIG_HID_THRUSTMASTER=y
CONFIG_HID_ZEROPLUS=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_MON=y
CONFIG_USB_ISP1760_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_MMC=y
CONFIG_MMC_ARMMMCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_PL031=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
+CONFIG_UBIFS_FS=y
CONFIG_CRAMFS=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_LZO=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-# CONFIG_RPCSEC_GSS_KRB5 is not set
+CONFIG_9P_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
-CONFIG_DEBUG_LL=y
-CONFIG_EARLY_PRINTK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arm/crypto/.gitignore b/arch/arm/crypto/.gitignore
new file mode 100644
index 000000000000..6231d36b3635
--- /dev/null
+++ b/arch/arm/crypto/.gitignore
@@ -0,0 +1 @@
+aesbs-core.S
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index a2c83851bc90..81cda39860c5 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -3,7 +3,17 @@
#
obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
+obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o
obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
-aes-arm-y := aes-armv4.o aes_glue.o
-sha1-arm-y := sha1-armv4-large.o sha1_glue.o
+aes-arm-y := aes-armv4.o aes_glue.o
+aes-arm-bs-y := aesbs-core.o aesbs-glue.o
+sha1-arm-y := sha1-armv4-large.o sha1_glue.o
+
+quiet_cmd_perl = PERL $@
+ cmd_perl = $(PERL) $(<) > $(@)
+
+$(src)/aesbs-core.S_shipped: $(src)/bsaes-armv7.pl
+ $(call cmd,perl)
+
+.PRECIOUS: $(obj)/aesbs-core.S
diff --git a/arch/arm/crypto/aes_glue.c b/arch/arm/crypto/aes_glue.c
index 59f7877ead6a..3003fa1f6fb4 100644
--- a/arch/arm/crypto/aes_glue.c
+++ b/arch/arm/crypto/aes_glue.c
@@ -6,22 +6,12 @@
#include <linux/crypto.h>
#include <crypto/aes.h>
-#define AES_MAXNR 14
+#include "aes_glue.h"
-typedef struct {
- unsigned int rd_key[4 *(AES_MAXNR + 1)];
- int rounds;
-} AES_KEY;
-
-struct AES_CTX {
- AES_KEY enc_key;
- AES_KEY dec_key;
-};
-
-asmlinkage void AES_encrypt(const u8 *in, u8 *out, AES_KEY *ctx);
-asmlinkage void AES_decrypt(const u8 *in, u8 *out, AES_KEY *ctx);
-asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
-asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
+EXPORT_SYMBOL(AES_encrypt);
+EXPORT_SYMBOL(AES_decrypt);
+EXPORT_SYMBOL(private_AES_set_encrypt_key);
+EXPORT_SYMBOL(private_AES_set_decrypt_key);
static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
@@ -81,7 +71,7 @@ static struct crypto_alg aes_alg = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
- .cia_setkey = aes_set_key,
+ .cia_setkey = aes_set_key,
.cia_encrypt = aes_encrypt,
.cia_decrypt = aes_decrypt
}
diff --git a/arch/arm/crypto/aes_glue.h b/arch/arm/crypto/aes_glue.h
new file mode 100644
index 000000000000..cca3e51eb606
--- /dev/null
+++ b/arch/arm/crypto/aes_glue.h
@@ -0,0 +1,19 @@
+
+#define AES_MAXNR 14
+
+struct AES_KEY {
+ unsigned int rd_key[4 * (AES_MAXNR + 1)];
+ int rounds;
+};
+
+struct AES_CTX {
+ struct AES_KEY enc_key;
+ struct AES_KEY dec_key;
+};
+
+asmlinkage void AES_encrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
+asmlinkage void AES_decrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
+asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey,
+ const int bits, struct AES_KEY *key);
+asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey,
+ const int bits, struct AES_KEY *key);
diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped
new file mode 100644
index 000000000000..64205d453260
--- /dev/null
+++ b/arch/arm/crypto/aesbs-core.S_shipped
@@ -0,0 +1,2544 @@
+
+@ ====================================================================
+@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+@ project. The module is, however, dual licensed under OpenSSL and
+@ CRYPTOGAMS licenses depending on where you obtain it. For further
+@ details see http://www.openssl.org/~appro/cryptogams/.
+@
+@ Specific modes and adaptation for Linux kernel by Ard Biesheuvel
+@ <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is
+@ granted.
+@ ====================================================================
+
+@ Bit-sliced AES for ARM NEON
+@
+@ February 2012.
+@
+@ This implementation is direct adaptation of bsaes-x86_64 module for
+@ ARM NEON. Except that this module is endian-neutral [in sense that
+@ it can be compiled for either endianness] by courtesy of vld1.8's
+@ neutrality. Initial version doesn't implement interface to OpenSSL,
+@ only low-level primitives and unsupported entry points, just enough
+@ to collect performance results, which for Cortex-A8 core are:
+@
+@ encrypt 19.5 cycles per byte processed with 128-bit key
+@ decrypt 22.1 cycles per byte processed with 128-bit key
+@ key conv. 440 cycles per 128-bit key/0.18 of 8x block
+@
+@ Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
+@ which is [much] worse than anticipated (for further details see
+@ http://www.openssl.org/~appro/Snapdragon-S4.html).
+@
+@ Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
+@ manages in 20.0 cycles].
+@
+@ When comparing to x86_64 results keep in mind that NEON unit is
+@ [mostly] single-issue and thus can't [fully] benefit from
+@ instruction-level parallelism. And when comparing to aes-armv4
+@ results keep in mind key schedule conversion overhead (see
+@ bsaes-x86_64.pl for further details)...
+@
+@ <appro@openssl.org>
+
+@ April-August 2013
+@
+@ Add CBC, CTR and XTS subroutines, adapt for kernel use.
+@
+@ <ard.biesheuvel@linaro.org>
+
+#ifndef __KERNEL__
+# include "arm_arch.h"
+
+# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
+# define VFP_ABI_POP vldmia sp!,{d8-d15}
+# define VFP_ABI_FRAME 0x40
+#else
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+# define VFP_ABI_FRAME 0
+# define BSAES_ASM_EXTENDED_KEY
+# define XTS_CHAIN_TWEAK
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+#endif
+
+#ifdef __thumb__
+# define adrl adr
+#endif
+
+#if __ARM_ARCH__>=7
+.text
+.syntax unified @ ARMv7-capable assembler is expected to handle this
+#ifdef __thumb2__
+.thumb
+#else
+.code 32
+#endif
+
+.fpu neon
+
+.type _bsaes_decrypt8,%function
+.align 4
+_bsaes_decrypt8:
+ adr r6,_bsaes_decrypt8
+ vldmia r4!, {q9} @ round 0 key
+ add r6,r6,#.LM0ISR-_bsaes_decrypt8
+
+ vldmia r6!, {q8} @ .LM0ISR
+ veor q10, q0, q9 @ xor with round0 key
+ veor q11, q1, q9
+ vtbl.8 d0, {q10}, d16
+ vtbl.8 d1, {q10}, d17
+ veor q12, q2, q9
+ vtbl.8 d2, {q11}, d16
+ vtbl.8 d3, {q11}, d17
+ veor q13, q3, q9
+ vtbl.8 d4, {q12}, d16
+ vtbl.8 d5, {q12}, d17
+ veor q14, q4, q9
+ vtbl.8 d6, {q13}, d16
+ vtbl.8 d7, {q13}, d17
+ veor q15, q5, q9
+ vtbl.8 d8, {q14}, d16
+ vtbl.8 d9, {q14}, d17
+ veor q10, q6, q9
+ vtbl.8 d10, {q15}, d16
+ vtbl.8 d11, {q15}, d17
+ veor q11, q7, q9
+ vtbl.8 d12, {q10}, d16
+ vtbl.8 d13, {q10}, d17
+ vtbl.8 d14, {q11}, d16
+ vtbl.8 d15, {q11}, d17
+ vmov.i8 q8,#0x55 @ compose .LBS0
+ vmov.i8 q9,#0x33 @ compose .LBS1
+ vshr.u64 q10, q6, #1
+ vshr.u64 q11, q4, #1
+ veor q10, q10, q7
+ veor q11, q11, q5
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #1
+ veor q5, q5, q11
+ vshl.u64 q11, q11, #1
+ veor q6, q6, q10
+ veor q4, q4, q11
+ vshr.u64 q10, q2, #1
+ vshr.u64 q11, q0, #1
+ veor q10, q10, q3
+ veor q11, q11, q1
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q3, q3, q10
+ vshl.u64 q10, q10, #1
+ veor q1, q1, q11
+ vshl.u64 q11, q11, #1
+ veor q2, q2, q10
+ veor q0, q0, q11
+ vmov.i8 q8,#0x0f @ compose .LBS2
+ vshr.u64 q10, q5, #2
+ vshr.u64 q11, q4, #2
+ veor q10, q10, q7
+ veor q11, q11, q6
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #2
+ veor q6, q6, q11
+ vshl.u64 q11, q11, #2
+ veor q5, q5, q10
+ veor q4, q4, q11
+ vshr.u64 q10, q1, #2
+ vshr.u64 q11, q0, #2
+ veor q10, q10, q3
+ veor q11, q11, q2
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q3, q3, q10
+ vshl.u64 q10, q10, #2
+ veor q2, q2, q11
+ vshl.u64 q11, q11, #2
+ veor q1, q1, q10
+ veor q0, q0, q11
+ vshr.u64 q10, q3, #4
+ vshr.u64 q11, q2, #4
+ veor q10, q10, q7
+ veor q11, q11, q6
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #4
+ veor q6, q6, q11
+ vshl.u64 q11, q11, #4
+ veor q3, q3, q10
+ veor q2, q2, q11
+ vshr.u64 q10, q1, #4
+ vshr.u64 q11, q0, #4
+ veor q10, q10, q5
+ veor q11, q11, q4
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #4
+ veor q4, q4, q11
+ vshl.u64 q11, q11, #4
+ veor q1, q1, q10
+ veor q0, q0, q11
+ sub r5,r5,#1
+ b .Ldec_sbox
+.align 4
+.Ldec_loop:
+ vldmia r4!, {q8-q11}
+ veor q8, q8, q0
+ veor q9, q9, q1
+ vtbl.8 d0, {q8}, d24
+ vtbl.8 d1, {q8}, d25
+ vldmia r4!, {q8}
+ veor q10, q10, q2
+ vtbl.8 d2, {q9}, d24
+ vtbl.8 d3, {q9}, d25
+ vldmia r4!, {q9}
+ veor q11, q11, q3
+ vtbl.8 d4, {q10}, d24
+ vtbl.8 d5, {q10}, d25
+ vldmia r4!, {q10}
+ vtbl.8 d6, {q11}, d24
+ vtbl.8 d7, {q11}, d25
+ vldmia r4!, {q11}
+ veor q8, q8, q4
+ veor q9, q9, q5
+ vtbl.8 d8, {q8}, d24
+ vtbl.8 d9, {q8}, d25
+ veor q10, q10, q6
+ vtbl.8 d10, {q9}, d24
+ vtbl.8 d11, {q9}, d25
+ veor q11, q11, q7
+ vtbl.8 d12, {q10}, d24
+ vtbl.8 d13, {q10}, d25
+ vtbl.8 d14, {q11}, d24
+ vtbl.8 d15, {q11}, d25
+.Ldec_sbox:
+ veor q1, q1, q4
+ veor q3, q3, q4
+
+ veor q4, q4, q7
+ veor q1, q1, q6
+ veor q2, q2, q7
+ veor q6, q6, q4
+
+ veor q0, q0, q1
+ veor q2, q2, q5
+ veor q7, q7, q6
+ veor q3, q3, q0
+ veor q5, q5, q0
+ veor q1, q1, q3
+ veor q11, q3, q0
+ veor q10, q7, q4
+ veor q9, q1, q6
+ veor q13, q4, q0
+ vmov q8, q10
+ veor q12, q5, q2
+
+ vorr q10, q10, q9
+ veor q15, q11, q8
+ vand q14, q11, q12
+ vorr q11, q11, q12
+ veor q12, q12, q9
+ vand q8, q8, q9
+ veor q9, q6, q2
+ vand q15, q15, q12
+ vand q13, q13, q9
+ veor q9, q3, q7
+ veor q12, q1, q5
+ veor q11, q11, q13
+ veor q10, q10, q13
+ vand q13, q9, q12
+ vorr q9, q9, q12
+ veor q11, q11, q15
+ veor q8, q8, q13
+ veor q10, q10, q14
+ veor q9, q9, q15
+ veor q8, q8, q14
+ vand q12, q4, q6
+ veor q9, q9, q14
+ vand q13, q0, q2
+ vand q14, q7, q1
+ vorr q15, q3, q5
+ veor q11, q11, q12
+ veor q9, q9, q14
+ veor q8, q8, q15
+ veor q10, q10, q13
+
+ @ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3
+
+ @ new smaller inversion
+
+ vand q14, q11, q9
+ vmov q12, q8
+
+ veor q13, q10, q14
+ veor q15, q8, q14
+ veor q14, q8, q14 @ q14=q15
+
+ vbsl q13, q9, q8
+ vbsl q15, q11, q10
+ veor q11, q11, q10
+
+ vbsl q12, q13, q14
+ vbsl q8, q14, q13
+
+ vand q14, q12, q15
+ veor q9, q9, q8
+
+ veor q14, q14, q11
+ veor q12, q5, q2
+ veor q8, q1, q6
+ veor q10, q15, q14
+ vand q10, q10, q5
+ veor q5, q5, q1
+ vand q11, q1, q15
+ vand q5, q5, q14
+ veor q1, q11, q10
+ veor q5, q5, q11
+ veor q15, q15, q13
+ veor q14, q14, q9
+ veor q11, q15, q14
+ veor q10, q13, q9
+ vand q11, q11, q12
+ vand q10, q10, q2
+ veor q12, q12, q8
+ veor q2, q2, q6
+ vand q8, q8, q15
+ vand q6, q6, q13
+ vand q12, q12, q14
+ vand q2, q2, q9
+ veor q8, q8, q12
+ veor q2, q2, q6
+ veor q12, q12, q11
+ veor q6, q6, q10
+ veor q5, q5, q12
+ veor q2, q2, q12
+ veor q1, q1, q8
+ veor q6, q6, q8
+
+ veor q12, q3, q0
+ veor q8, q7, q4
+ veor q11, q15, q14
+ veor q10, q13, q9
+ vand q11, q11, q12
+ vand q10, q10, q0
+ veor q12, q12, q8
+ veor q0, q0, q4
+ vand q8, q8, q15
+ vand q4, q4, q13
+ vand q12, q12, q14
+ vand q0, q0, q9
+ veor q8, q8, q12
+ veor q0, q0, q4
+ veor q12, q12, q11
+ veor q4, q4, q10
+ veor q15, q15, q13
+ veor q14, q14, q9
+ veor q10, q15, q14
+ vand q10, q10, q3
+ veor q3, q3, q7
+ vand q11, q7, q15
+ vand q3, q3, q14
+ veor q7, q11, q10
+ veor q3, q3, q11
+ veor q3, q3, q12
+ veor q0, q0, q12
+ veor q7, q7, q8
+ veor q4, q4, q8
+ veor q1, q1, q7
+ veor q6, q6, q5
+
+ veor q4, q4, q1
+ veor q2, q2, q7
+ veor q5, q5, q7
+ veor q4, q4, q2
+ veor q7, q7, q0
+ veor q4, q4, q5
+ veor q3, q3, q6
+ veor q6, q6, q1
+ veor q3, q3, q4
+
+ veor q4, q4, q0
+ veor q7, q7, q3
+ subs r5,r5,#1
+ bcc .Ldec_done
+ @ multiplication by 0x05-0x00-0x04-0x00
+ vext.8 q8, q0, q0, #8
+ vext.8 q14, q3, q3, #8
+ vext.8 q15, q5, q5, #8
+ veor q8, q8, q0
+ vext.8 q9, q1, q1, #8
+ veor q14, q14, q3
+ vext.8 q10, q6, q6, #8
+ veor q15, q15, q5
+ vext.8 q11, q4, q4, #8
+ veor q9, q9, q1
+ vext.8 q12, q2, q2, #8
+ veor q10, q10, q6
+ vext.8 q13, q7, q7, #8
+ veor q11, q11, q4
+ veor q12, q12, q2
+ veor q13, q13, q7
+
+ veor q0, q0, q14
+ veor q1, q1, q14
+ veor q6, q6, q8
+ veor q2, q2, q10
+ veor q4, q4, q9
+ veor q1, q1, q15
+ veor q6, q6, q15
+ veor q2, q2, q14
+ veor q7, q7, q11
+ veor q4, q4, q14
+ veor q3, q3, q12
+ veor q2, q2, q15
+ veor q7, q7, q15
+ veor q5, q5, q13
+ vext.8 q8, q0, q0, #12 @ x0 <<< 32
+ vext.8 q9, q1, q1, #12
+ veor q0, q0, q8 @ x0 ^ (x0 <<< 32)
+ vext.8 q10, q6, q6, #12
+ veor q1, q1, q9
+ vext.8 q11, q4, q4, #12
+ veor q6, q6, q10
+ vext.8 q12, q2, q2, #12
+ veor q4, q4, q11
+ vext.8 q13, q7, q7, #12
+ veor q2, q2, q12
+ vext.8 q14, q3, q3, #12
+ veor q7, q7, q13
+ vext.8 q15, q5, q5, #12
+ veor q3, q3, q14
+
+ veor q9, q9, q0
+ veor q5, q5, q15
+ vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64)
+ veor q10, q10, q1
+ veor q8, q8, q5
+ veor q9, q9, q5
+ vext.8 q1, q1, q1, #8
+ veor q13, q13, q2
+ veor q0, q0, q8
+ veor q14, q14, q7
+ veor q1, q1, q9
+ vext.8 q8, q2, q2, #8
+ veor q12, q12, q4
+ vext.8 q9, q7, q7, #8
+ veor q15, q15, q3
+ vext.8 q2, q4, q4, #8
+ veor q11, q11, q6
+ vext.8 q7, q5, q5, #8
+ veor q12, q12, q5
+ vext.8 q4, q3, q3, #8
+ veor q11, q11, q5
+ vext.8 q3, q6, q6, #8
+ veor q5, q9, q13
+ veor q11, q11, q2
+ veor q7, q7, q15
+ veor q6, q4, q14
+ veor q4, q8, q12
+ veor q2, q3, q10
+ vmov q3, q11
+ @ vmov q5, q9
+ vldmia r6, {q12} @ .LISR
+ ite eq @ Thumb2 thing, sanity check in ARM
+ addeq r6,r6,#0x10
+ bne .Ldec_loop
+ vldmia r6, {q12} @ .LISRM0
+ b .Ldec_loop
+.align 4
+.Ldec_done:
+ vmov.i8 q8,#0x55 @ compose .LBS0
+ vmov.i8 q9,#0x33 @ compose .LBS1
+ vshr.u64 q10, q3, #1
+ vshr.u64 q11, q2, #1
+ veor q10, q10, q5
+ veor q11, q11, q7
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #1
+ veor q7, q7, q11
+ vshl.u64 q11, q11, #1
+ veor q3, q3, q10
+ veor q2, q2, q11
+ vshr.u64 q10, q6, #1
+ vshr.u64 q11, q0, #1
+ veor q10, q10, q4
+ veor q11, q11, q1
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q4, q4, q10
+ vshl.u64 q10, q10, #1
+ veor q1, q1, q11
+ vshl.u64 q11, q11, #1
+ veor q6, q6, q10
+ veor q0, q0, q11
+ vmov.i8 q8,#0x0f @ compose .LBS2
+ vshr.u64 q10, q7, #2
+ vshr.u64 q11, q2, #2
+ veor q10, q10, q5
+ veor q11, q11, q3
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #2
+ veor q3, q3, q11
+ vshl.u64 q11, q11, #2
+ veor q7, q7, q10
+ veor q2, q2, q11
+ vshr.u64 q10, q1, #2
+ vshr.u64 q11, q0, #2
+ veor q10, q10, q4
+ veor q11, q11, q6
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q4, q4, q10
+ vshl.u64 q10, q10, #2
+ veor q6, q6, q11
+ vshl.u64 q11, q11, #2
+ veor q1, q1, q10
+ veor q0, q0, q11
+ vshr.u64 q10, q4, #4
+ vshr.u64 q11, q6, #4
+ veor q10, q10, q5
+ veor q11, q11, q3
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #4
+ veor q3, q3, q11
+ vshl.u64 q11, q11, #4
+ veor q4, q4, q10
+ veor q6, q6, q11
+ vshr.u64 q10, q1, #4
+ vshr.u64 q11, q0, #4
+ veor q10, q10, q7
+ veor q11, q11, q2
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #4
+ veor q2, q2, q11
+ vshl.u64 q11, q11, #4
+ veor q1, q1, q10
+ veor q0, q0, q11
+ vldmia r4, {q8} @ last round key
+ veor q6, q6, q8
+ veor q4, q4, q8
+ veor q2, q2, q8
+ veor q7, q7, q8
+ veor q3, q3, q8
+ veor q5, q5, q8
+ veor q0, q0, q8
+ veor q1, q1, q8
+ bx lr
+.size _bsaes_decrypt8,.-_bsaes_decrypt8
+
+.type _bsaes_const,%object
+.align 6
+_bsaes_const:
+.LM0ISR: @ InvShiftRows constants
+ .quad 0x0a0e0206070b0f03, 0x0004080c0d010509
+.LISR:
+ .quad 0x0504070602010003, 0x0f0e0d0c080b0a09
+.LISRM0:
+ .quad 0x01040b0e0205080f, 0x0306090c00070a0d
+.LM0SR: @ ShiftRows constants
+ .quad 0x0a0e02060f03070b, 0x0004080c05090d01
+.LSR:
+ .quad 0x0504070600030201, 0x0f0e0d0c0a09080b
+.LSRM0:
+ .quad 0x0304090e00050a0f, 0x01060b0c0207080d
+.LM0:
+ .quad 0x02060a0e03070b0f, 0x0004080c0105090d
+.LREVM0SR:
+ .quad 0x090d01050c000408, 0x03070b0f060a0e02
+.asciz "Bit-sliced AES for NEON, CRYPTOGAMS by <appro@openssl.org>"
+.align 6
+.size _bsaes_const,.-_bsaes_const
+
+.type _bsaes_encrypt8,%function
+.align 4
+_bsaes_encrypt8:
+ adr r6,_bsaes_encrypt8
+ vldmia r4!, {q9} @ round 0 key
+ sub r6,r6,#_bsaes_encrypt8-.LM0SR
+
+ vldmia r6!, {q8} @ .LM0SR
+_bsaes_encrypt8_alt:
+ veor q10, q0, q9 @ xor with round0 key
+ veor q11, q1, q9
+ vtbl.8 d0, {q10}, d16
+ vtbl.8 d1, {q10}, d17
+ veor q12, q2, q9
+ vtbl.8 d2, {q11}, d16
+ vtbl.8 d3, {q11}, d17
+ veor q13, q3, q9
+ vtbl.8 d4, {q12}, d16
+ vtbl.8 d5, {q12}, d17
+ veor q14, q4, q9
+ vtbl.8 d6, {q13}, d16
+ vtbl.8 d7, {q13}, d17
+ veor q15, q5, q9
+ vtbl.8 d8, {q14}, d16
+ vtbl.8 d9, {q14}, d17
+ veor q10, q6, q9
+ vtbl.8 d10, {q15}, d16
+ vtbl.8 d11, {q15}, d17
+ veor q11, q7, q9
+ vtbl.8 d12, {q10}, d16
+ vtbl.8 d13, {q10}, d17
+ vtbl.8 d14, {q11}, d16
+ vtbl.8 d15, {q11}, d17
+_bsaes_encrypt8_bitslice:
+ vmov.i8 q8,#0x55 @ compose .LBS0
+ vmov.i8 q9,#0x33 @ compose .LBS1
+ vshr.u64 q10, q6, #1
+ vshr.u64 q11, q4, #1
+ veor q10, q10, q7
+ veor q11, q11, q5
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #1
+ veor q5, q5, q11
+ vshl.u64 q11, q11, #1
+ veor q6, q6, q10
+ veor q4, q4, q11
+ vshr.u64 q10, q2, #1
+ vshr.u64 q11, q0, #1
+ veor q10, q10, q3
+ veor q11, q11, q1
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q3, q3, q10
+ vshl.u64 q10, q10, #1
+ veor q1, q1, q11
+ vshl.u64 q11, q11, #1
+ veor q2, q2, q10
+ veor q0, q0, q11
+ vmov.i8 q8,#0x0f @ compose .LBS2
+ vshr.u64 q10, q5, #2
+ vshr.u64 q11, q4, #2
+ veor q10, q10, q7
+ veor q11, q11, q6
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #2
+ veor q6, q6, q11
+ vshl.u64 q11, q11, #2
+ veor q5, q5, q10
+ veor q4, q4, q11
+ vshr.u64 q10, q1, #2
+ vshr.u64 q11, q0, #2
+ veor q10, q10, q3
+ veor q11, q11, q2
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q3, q3, q10
+ vshl.u64 q10, q10, #2
+ veor q2, q2, q11
+ vshl.u64 q11, q11, #2
+ veor q1, q1, q10
+ veor q0, q0, q11
+ vshr.u64 q10, q3, #4
+ vshr.u64 q11, q2, #4
+ veor q10, q10, q7
+ veor q11, q11, q6
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #4
+ veor q6, q6, q11
+ vshl.u64 q11, q11, #4
+ veor q3, q3, q10
+ veor q2, q2, q11
+ vshr.u64 q10, q1, #4
+ vshr.u64 q11, q0, #4
+ veor q10, q10, q5
+ veor q11, q11, q4
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #4
+ veor q4, q4, q11
+ vshl.u64 q11, q11, #4
+ veor q1, q1, q10
+ veor q0, q0, q11
+ sub r5,r5,#1
+ b .Lenc_sbox
+.align 4
+.Lenc_loop:
+ vldmia r4!, {q8-q11}
+ veor q8, q8, q0
+ veor q9, q9, q1
+ vtbl.8 d0, {q8}, d24
+ vtbl.8 d1, {q8}, d25
+ vldmia r4!, {q8}
+ veor q10, q10, q2
+ vtbl.8 d2, {q9}, d24
+ vtbl.8 d3, {q9}, d25
+ vldmia r4!, {q9}
+ veor q11, q11, q3
+ vtbl.8 d4, {q10}, d24
+ vtbl.8 d5, {q10}, d25
+ vldmia r4!, {q10}
+ vtbl.8 d6, {q11}, d24
+ vtbl.8 d7, {q11}, d25
+ vldmia r4!, {q11}
+ veor q8, q8, q4
+ veor q9, q9, q5
+ vtbl.8 d8, {q8}, d24
+ vtbl.8 d9, {q8}, d25
+ veor q10, q10, q6
+ vtbl.8 d10, {q9}, d24
+ vtbl.8 d11, {q9}, d25
+ veor q11, q11, q7
+ vtbl.8 d12, {q10}, d24
+ vtbl.8 d13, {q10}, d25
+ vtbl.8 d14, {q11}, d24
+ vtbl.8 d15, {q11}, d25
+.Lenc_sbox:
+ veor q2, q2, q1
+ veor q5, q5, q6
+ veor q3, q3, q0
+ veor q6, q6, q2
+ veor q5, q5, q0
+
+ veor q6, q6, q3
+ veor q3, q3, q7
+ veor q7, q7, q5
+ veor q3, q3, q4
+ veor q4, q4, q5
+
+ veor q2, q2, q7
+ veor q3, q3, q1
+ veor q1, q1, q5
+ veor q11, q7, q4
+ veor q10, q1, q2
+ veor q9, q5, q3
+ veor q13, q2, q4
+ vmov q8, q10
+ veor q12, q6, q0
+
+ vorr q10, q10, q9
+ veor q15, q11, q8
+ vand q14, q11, q12
+ vorr q11, q11, q12
+ veor q12, q12, q9
+ vand q8, q8, q9
+ veor q9, q3, q0
+ vand q15, q15, q12
+ vand q13, q13, q9
+ veor q9, q7, q1
+ veor q12, q5, q6
+ veor q11, q11, q13
+ veor q10, q10, q13
+ vand q13, q9, q12
+ vorr q9, q9, q12
+ veor q11, q11, q15
+ veor q8, q8, q13
+ veor q10, q10, q14
+ veor q9, q9, q15
+ veor q8, q8, q14
+ vand q12, q2, q3
+ veor q9, q9, q14
+ vand q13, q4, q0
+ vand q14, q1, q5
+ vorr q15, q7, q6
+ veor q11, q11, q12
+ veor q9, q9, q14
+ veor q8, q8, q15
+ veor q10, q10, q13
+
+ @ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3
+
+ @ new smaller inversion
+
+ vand q14, q11, q9
+ vmov q12, q8
+
+ veor q13, q10, q14
+ veor q15, q8, q14
+ veor q14, q8, q14 @ q14=q15
+
+ vbsl q13, q9, q8
+ vbsl q15, q11, q10
+ veor q11, q11, q10
+
+ vbsl q12, q13, q14
+ vbsl q8, q14, q13
+
+ vand q14, q12, q15
+ veor q9, q9, q8
+
+ veor q14, q14, q11
+ veor q12, q6, q0
+ veor q8, q5, q3
+ veor q10, q15, q14
+ vand q10, q10, q6
+ veor q6, q6, q5
+ vand q11, q5, q15
+ vand q6, q6, q14
+ veor q5, q11, q10
+ veor q6, q6, q11
+ veor q15, q15, q13
+ veor q14, q14, q9
+ veor q11, q15, q14
+ veor q10, q13, q9
+ vand q11, q11, q12
+ vand q10, q10, q0
+ veor q12, q12, q8
+ veor q0, q0, q3
+ vand q8, q8, q15
+ vand q3, q3, q13
+ vand q12, q12, q14
+ vand q0, q0, q9
+ veor q8, q8, q12
+ veor q0, q0, q3
+ veor q12, q12, q11
+ veor q3, q3, q10
+ veor q6, q6, q12
+ veor q0, q0, q12
+ veor q5, q5, q8
+ veor q3, q3, q8
+
+ veor q12, q7, q4
+ veor q8, q1, q2
+ veor q11, q15, q14
+ veor q10, q13, q9
+ vand q11, q11, q12
+ vand q10, q10, q4
+ veor q12, q12, q8
+ veor q4, q4, q2
+ vand q8, q8, q15
+ vand q2, q2, q13
+ vand q12, q12, q14
+ vand q4, q4, q9
+ veor q8, q8, q12
+ veor q4, q4, q2
+ veor q12, q12, q11
+ veor q2, q2, q10
+ veor q15, q15, q13
+ veor q14, q14, q9
+ veor q10, q15, q14
+ vand q10, q10, q7
+ veor q7, q7, q1
+ vand q11, q1, q15
+ vand q7, q7, q14
+ veor q1, q11, q10
+ veor q7, q7, q11
+ veor q7, q7, q12
+ veor q4, q4, q12
+ veor q1, q1, q8
+ veor q2, q2, q8
+ veor q7, q7, q0
+ veor q1, q1, q6
+ veor q6, q6, q0
+ veor q4, q4, q7
+ veor q0, q0, q1
+
+ veor q1, q1, q5
+ veor q5, q5, q2
+ veor q2, q2, q3
+ veor q3, q3, q5
+ veor q4, q4, q5
+
+ veor q6, q6, q3
+ subs r5,r5,#1
+ bcc .Lenc_done
+ vext.8 q8, q0, q0, #12 @ x0 <<< 32
+ vext.8 q9, q1, q1, #12
+ veor q0, q0, q8 @ x0 ^ (x0 <<< 32)
+ vext.8 q10, q4, q4, #12
+ veor q1, q1, q9
+ vext.8 q11, q6, q6, #12
+ veor q4, q4, q10
+ vext.8 q12, q3, q3, #12
+ veor q6, q6, q11
+ vext.8 q13, q7, q7, #12
+ veor q3, q3, q12
+ vext.8 q14, q2, q2, #12
+ veor q7, q7, q13
+ vext.8 q15, q5, q5, #12
+ veor q2, q2, q14
+
+ veor q9, q9, q0
+ veor q5, q5, q15
+ vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64)
+ veor q10, q10, q1
+ veor q8, q8, q5
+ veor q9, q9, q5
+ vext.8 q1, q1, q1, #8
+ veor q13, q13, q3
+ veor q0, q0, q8
+ veor q14, q14, q7
+ veor q1, q1, q9
+ vext.8 q8, q3, q3, #8
+ veor q12, q12, q6
+ vext.8 q9, q7, q7, #8
+ veor q15, q15, q2
+ vext.8 q3, q6, q6, #8
+ veor q11, q11, q4
+ vext.8 q7, q5, q5, #8
+ veor q12, q12, q5
+ vext.8 q6, q2, q2, #8
+ veor q11, q11, q5
+ vext.8 q2, q4, q4, #8
+ veor q5, q9, q13
+ veor q4, q8, q12
+ veor q3, q3, q11
+ veor q7, q7, q15
+ veor q6, q6, q14
+ @ vmov q4, q8
+ veor q2, q2, q10
+ @ vmov q5, q9
+ vldmia r6, {q12} @ .LSR
+ ite eq @ Thumb2 thing, samity check in ARM
+ addeq r6,r6,#0x10
+ bne .Lenc_loop
+ vldmia r6, {q12} @ .LSRM0
+ b .Lenc_loop
+.align 4
+.Lenc_done:
+ vmov.i8 q8,#0x55 @ compose .LBS0
+ vmov.i8 q9,#0x33 @ compose .LBS1
+ vshr.u64 q10, q2, #1
+ vshr.u64 q11, q3, #1
+ veor q10, q10, q5
+ veor q11, q11, q7
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #1
+ veor q7, q7, q11
+ vshl.u64 q11, q11, #1
+ veor q2, q2, q10
+ veor q3, q3, q11
+ vshr.u64 q10, q4, #1
+ vshr.u64 q11, q0, #1
+ veor q10, q10, q6
+ veor q11, q11, q1
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q6, q6, q10
+ vshl.u64 q10, q10, #1
+ veor q1, q1, q11
+ vshl.u64 q11, q11, #1
+ veor q4, q4, q10
+ veor q0, q0, q11
+ vmov.i8 q8,#0x0f @ compose .LBS2
+ vshr.u64 q10, q7, #2
+ vshr.u64 q11, q3, #2
+ veor q10, q10, q5
+ veor q11, q11, q2
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #2
+ veor q2, q2, q11
+ vshl.u64 q11, q11, #2
+ veor q7, q7, q10
+ veor q3, q3, q11
+ vshr.u64 q10, q1, #2
+ vshr.u64 q11, q0, #2
+ veor q10, q10, q6
+ veor q11, q11, q4
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q6, q6, q10
+ vshl.u64 q10, q10, #2
+ veor q4, q4, q11
+ vshl.u64 q11, q11, #2
+ veor q1, q1, q10
+ veor q0, q0, q11
+ vshr.u64 q10, q6, #4
+ vshr.u64 q11, q4, #4
+ veor q10, q10, q5
+ veor q11, q11, q2
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #4
+ veor q2, q2, q11
+ vshl.u64 q11, q11, #4
+ veor q6, q6, q10
+ veor q4, q4, q11
+ vshr.u64 q10, q1, #4
+ vshr.u64 q11, q0, #4
+ veor q10, q10, q7
+ veor q11, q11, q3
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #4
+ veor q3, q3, q11
+ vshl.u64 q11, q11, #4
+ veor q1, q1, q10
+ veor q0, q0, q11
+ vldmia r4, {q8} @ last round key
+ veor q4, q4, q8
+ veor q6, q6, q8
+ veor q3, q3, q8
+ veor q7, q7, q8
+ veor q2, q2, q8
+ veor q5, q5, q8
+ veor q0, q0, q8
+ veor q1, q1, q8
+ bx lr
+.size _bsaes_encrypt8,.-_bsaes_encrypt8
+.type _bsaes_key_convert,%function
+.align 4
+_bsaes_key_convert:
+ adr r6,_bsaes_key_convert
+ vld1.8 {q7}, [r4]! @ load round 0 key
+ sub r6,r6,#_bsaes_key_convert-.LM0
+ vld1.8 {q15}, [r4]! @ load round 1 key
+
+ vmov.i8 q8, #0x01 @ bit masks
+ vmov.i8 q9, #0x02
+ vmov.i8 q10, #0x04
+ vmov.i8 q11, #0x08
+ vmov.i8 q12, #0x10
+ vmov.i8 q13, #0x20
+ vldmia r6, {q14} @ .LM0
+
+#ifdef __ARMEL__
+ vrev32.8 q7, q7
+ vrev32.8 q15, q15
+#endif
+ sub r5,r5,#1
+ vstmia r12!, {q7} @ save round 0 key
+ b .Lkey_loop
+
+.align 4
+.Lkey_loop:
+ vtbl.8 d14,{q15},d28
+ vtbl.8 d15,{q15},d29
+ vmov.i8 q6, #0x40
+ vmov.i8 q15, #0x80
+
+ vtst.8 q0, q7, q8
+ vtst.8 q1, q7, q9
+ vtst.8 q2, q7, q10
+ vtst.8 q3, q7, q11
+ vtst.8 q4, q7, q12
+ vtst.8 q5, q7, q13
+ vtst.8 q6, q7, q6
+ vtst.8 q7, q7, q15
+ vld1.8 {q15}, [r4]! @ load next round key
+ vmvn q0, q0 @ "pnot"
+ vmvn q1, q1
+ vmvn q5, q5
+ vmvn q6, q6
+#ifdef __ARMEL__
+ vrev32.8 q15, q15
+#endif
+ subs r5,r5,#1
+ vstmia r12!,{q0-q7} @ write bit-sliced round key
+ bne .Lkey_loop
+
+ vmov.i8 q7,#0x63 @ compose .L63
+ @ don't save last round key
+ bx lr
+.size _bsaes_key_convert,.-_bsaes_key_convert
+.extern AES_cbc_encrypt
+.extern AES_decrypt
+
+.global bsaes_cbc_encrypt
+.type bsaes_cbc_encrypt,%function
+.align 5
+bsaes_cbc_encrypt:
+#ifndef __KERNEL__
+ cmp r2, #128
+#ifndef __thumb__
+ blo AES_cbc_encrypt
+#else
+ bhs 1f
+ b AES_cbc_encrypt
+1:
+#endif
+#endif
+
+ @ it is up to the caller to make sure we are called with enc == 0
+
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr}
+ VFP_ABI_PUSH
+ ldr r8, [ip] @ IV is 1st arg on the stack
+ mov r2, r2, lsr#4 @ len in 16 byte blocks
+ sub sp, #0x10 @ scratch space to carry over the IV
+ mov r9, sp @ save sp
+
+ ldr r10, [r3, #240] @ get # of rounds
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key
+ add r12, #96 @ sifze of bit-slices key schedule
+
+ @ populate the key schedule
+ mov r4, r3 @ pass key
+ mov r5, r10 @ pass # of rounds
+ mov sp, r12 @ sp is sp
+ bl _bsaes_key_convert
+ vldmia sp, {q6}
+ vstmia r12, {q15} @ save last round key
+ veor q7, q7, q6 @ fix up round 0 key
+ vstmia sp, {q7}
+#else
+ ldr r12, [r3, #244]
+ eors r12, #1
+ beq 0f
+
+ @ populate the key schedule
+ str r12, [r3, #244]
+ mov r4, r3 @ pass key
+ mov r5, r10 @ pass # of rounds
+ add r12, r3, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ add r4, r3, #248
+ vldmia r4, {q6}
+ vstmia r12, {q15} @ save last round key
+ veor q7, q7, q6 @ fix up round 0 key
+ vstmia r4, {q7}
+
+.align 2
+0:
+#endif
+
+ vld1.8 {q15}, [r8] @ load IV
+ b .Lcbc_dec_loop
+
+.align 4
+.Lcbc_dec_loop:
+ subs r2, r2, #0x8
+ bmi .Lcbc_dec_loop_finish
+
+ vld1.8 {q0-q1}, [r0]! @ load input
+ vld1.8 {q2-q3}, [r0]!
+#ifndef BSAES_ASM_EXTENDED_KEY
+ mov r4, sp @ pass the key
+#else
+ add r4, r3, #248
+#endif
+ vld1.8 {q4-q5}, [r0]!
+ mov r5, r10
+ vld1.8 {q6-q7}, [r0]
+ sub r0, r0, #0x60
+ vstmia r9, {q15} @ put aside IV
+
+ bl _bsaes_decrypt8
+
+ vldmia r9, {q14} @ reload IV
+ vld1.8 {q8-q9}, [r0]! @ reload input
+ veor q0, q0, q14 @ ^= IV
+ vld1.8 {q10-q11}, [r0]!
+ veor q1, q1, q8
+ veor q6, q6, q9
+ vld1.8 {q12-q13}, [r0]!
+ veor q4, q4, q10
+ veor q2, q2, q11
+ vld1.8 {q14-q15}, [r0]!
+ veor q7, q7, q12
+ vst1.8 {q0-q1}, [r1]! @ write output
+ veor q3, q3, q13
+ vst1.8 {q6}, [r1]!
+ veor q5, q5, q14
+ vst1.8 {q4}, [r1]!
+ vst1.8 {q2}, [r1]!
+ vst1.8 {q7}, [r1]!
+ vst1.8 {q3}, [r1]!
+ vst1.8 {q5}, [r1]!
+
+ b .Lcbc_dec_loop
+
+.Lcbc_dec_loop_finish:
+ adds r2, r2, #8
+ beq .Lcbc_dec_done
+
+ vld1.8 {q0}, [r0]! @ load input
+ cmp r2, #2
+ blo .Lcbc_dec_one
+ vld1.8 {q1}, [r0]!
+#ifndef BSAES_ASM_EXTENDED_KEY
+ mov r4, sp @ pass the key
+#else
+ add r4, r3, #248
+#endif
+ mov r5, r10
+ vstmia r9, {q15} @ put aside IV
+ beq .Lcbc_dec_two
+ vld1.8 {q2}, [r0]!
+ cmp r2, #4
+ blo .Lcbc_dec_three
+ vld1.8 {q3}, [r0]!
+ beq .Lcbc_dec_four
+ vld1.8 {q4}, [r0]!
+ cmp r2, #6
+ blo .Lcbc_dec_five
+ vld1.8 {q5}, [r0]!
+ beq .Lcbc_dec_six
+ vld1.8 {q6}, [r0]!
+ sub r0, r0, #0x70
+
+ bl _bsaes_decrypt8
+
+ vldmia r9, {q14} @ reload IV
+ vld1.8 {q8-q9}, [r0]! @ reload input
+ veor q0, q0, q14 @ ^= IV
+ vld1.8 {q10-q11}, [r0]!
+ veor q1, q1, q8
+ veor q6, q6, q9
+ vld1.8 {q12-q13}, [r0]!
+ veor q4, q4, q10
+ veor q2, q2, q11
+ vld1.8 {q15}, [r0]!
+ veor q7, q7, q12
+ vst1.8 {q0-q1}, [r1]! @ write output
+ veor q3, q3, q13
+ vst1.8 {q6}, [r1]!
+ vst1.8 {q4}, [r1]!
+ vst1.8 {q2}, [r1]!
+ vst1.8 {q7}, [r1]!
+ vst1.8 {q3}, [r1]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_six:
+ sub r0, r0, #0x60
+ bl _bsaes_decrypt8
+ vldmia r9,{q14} @ reload IV
+ vld1.8 {q8-q9}, [r0]! @ reload input
+ veor q0, q0, q14 @ ^= IV
+ vld1.8 {q10-q11}, [r0]!
+ veor q1, q1, q8
+ veor q6, q6, q9
+ vld1.8 {q12}, [r0]!
+ veor q4, q4, q10
+ veor q2, q2, q11
+ vld1.8 {q15}, [r0]!
+ veor q7, q7, q12
+ vst1.8 {q0-q1}, [r1]! @ write output
+ vst1.8 {q6}, [r1]!
+ vst1.8 {q4}, [r1]!
+ vst1.8 {q2}, [r1]!
+ vst1.8 {q7}, [r1]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_five:
+ sub r0, r0, #0x50
+ bl _bsaes_decrypt8
+ vldmia r9, {q14} @ reload IV
+ vld1.8 {q8-q9}, [r0]! @ reload input
+ veor q0, q0, q14 @ ^= IV
+ vld1.8 {q10-q11}, [r0]!
+ veor q1, q1, q8
+ veor q6, q6, q9
+ vld1.8 {q15}, [r0]!
+ veor q4, q4, q10
+ vst1.8 {q0-q1}, [r1]! @ write output
+ veor q2, q2, q11
+ vst1.8 {q6}, [r1]!
+ vst1.8 {q4}, [r1]!
+ vst1.8 {q2}, [r1]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_four:
+ sub r0, r0, #0x40
+ bl _bsaes_decrypt8
+ vldmia r9, {q14} @ reload IV
+ vld1.8 {q8-q9}, [r0]! @ reload input
+ veor q0, q0, q14 @ ^= IV
+ vld1.8 {q10}, [r0]!
+ veor q1, q1, q8
+ veor q6, q6, q9
+ vld1.8 {q15}, [r0]!
+ veor q4, q4, q10
+ vst1.8 {q0-q1}, [r1]! @ write output
+ vst1.8 {q6}, [r1]!
+ vst1.8 {q4}, [r1]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_three:
+ sub r0, r0, #0x30
+ bl _bsaes_decrypt8
+ vldmia r9, {q14} @ reload IV
+ vld1.8 {q8-q9}, [r0]! @ reload input
+ veor q0, q0, q14 @ ^= IV
+ vld1.8 {q15}, [r0]!
+ veor q1, q1, q8
+ veor q6, q6, q9
+ vst1.8 {q0-q1}, [r1]! @ write output
+ vst1.8 {q6}, [r1]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_two:
+ sub r0, r0, #0x20
+ bl _bsaes_decrypt8
+ vldmia r9, {q14} @ reload IV
+ vld1.8 {q8}, [r0]! @ reload input
+ veor q0, q0, q14 @ ^= IV
+ vld1.8 {q15}, [r0]! @ reload input
+ veor q1, q1, q8
+ vst1.8 {q0-q1}, [r1]! @ write output
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_one:
+ sub r0, r0, #0x10
+ mov r10, r1 @ save original out pointer
+ mov r1, r9 @ use the iv scratch space as out buffer
+ mov r2, r3
+ vmov q4,q15 @ just in case ensure that IV
+ vmov q5,q0 @ and input are preserved
+ bl AES_decrypt
+ vld1.8 {q0}, [r9,:64] @ load result
+ veor q0, q0, q4 @ ^= IV
+ vmov q15, q5 @ q5 holds input
+ vst1.8 {q0}, [r10] @ write output
+
+.Lcbc_dec_done:
+#ifndef BSAES_ASM_EXTENDED_KEY
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+.Lcbc_dec_bzero: @ wipe key schedule [if any]
+ vstmia sp!, {q0-q1}
+ cmp sp, r9
+ bne .Lcbc_dec_bzero
+#endif
+
+ mov sp, r9
+ add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb
+ vst1.8 {q15}, [r8] @ return IV
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc}
+.size bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
+.extern AES_encrypt
+.global bsaes_ctr32_encrypt_blocks
+.type bsaes_ctr32_encrypt_blocks,%function
+.align 5
+bsaes_ctr32_encrypt_blocks:
+ cmp r2, #8 @ use plain AES for
+ blo .Lctr_enc_short @ small sizes
+
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr}
+ VFP_ABI_PUSH
+ ldr r8, [ip] @ ctr is 1st arg on the stack
+ sub sp, sp, #0x10 @ scratch space to carry over the ctr
+ mov r9, sp @ save sp
+
+ ldr r10, [r3, #240] @ get # of rounds
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key
+ add r12, #96 @ size of bit-sliced key schedule
+
+ @ populate the key schedule
+ mov r4, r3 @ pass key
+ mov r5, r10 @ pass # of rounds
+ mov sp, r12 @ sp is sp
+ bl _bsaes_key_convert
+ veor q7,q7,q15 @ fix up last round key
+ vstmia r12, {q7} @ save last round key
+
+ vld1.8 {q0}, [r8] @ load counter
+ add r8, r6, #.LREVM0SR-.LM0 @ borrow r8
+ vldmia sp, {q4} @ load round0 key
+#else
+ ldr r12, [r3, #244]
+ eors r12, #1
+ beq 0f
+
+ @ populate the key schedule
+ str r12, [r3, #244]
+ mov r4, r3 @ pass key
+ mov r5, r10 @ pass # of rounds
+ add r12, r3, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ veor q7,q7,q15 @ fix up last round key
+ vstmia r12, {q7} @ save last round key
+
+.align 2
+0: add r12, r3, #248
+ vld1.8 {q0}, [r8] @ load counter
+ adrl r8, .LREVM0SR @ borrow r8
+ vldmia r12, {q4} @ load round0 key
+ sub sp, #0x10 @ place for adjusted round0 key
+#endif
+
+ vmov.i32 q8,#1 @ compose 1<<96
+ veor q9,q9,q9
+ vrev32.8 q0,q0
+ vext.8 q8,q9,q8,#4
+ vrev32.8 q4,q4
+ vadd.u32 q9,q8,q8 @ compose 2<<96
+ vstmia sp, {q4} @ save adjusted round0 key
+ b .Lctr_enc_loop
+
+.align 4
+.Lctr_enc_loop:
+ vadd.u32 q10, q8, q9 @ compose 3<<96
+ vadd.u32 q1, q0, q8 @ +1
+ vadd.u32 q2, q0, q9 @ +2
+ vadd.u32 q3, q0, q10 @ +3
+ vadd.u32 q4, q1, q10
+ vadd.u32 q5, q2, q10
+ vadd.u32 q6, q3, q10
+ vadd.u32 q7, q4, q10
+ vadd.u32 q10, q5, q10 @ next counter
+
+ @ Borrow prologue from _bsaes_encrypt8 to use the opportunity
+ @ to flip byte order in 32-bit counter
+
+ vldmia sp, {q9} @ load round0 key
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x10 @ pass next round key
+#else
+ add r4, r3, #264
+#endif
+ vldmia r8, {q8} @ .LREVM0SR
+ mov r5, r10 @ pass rounds
+ vstmia r9, {q10} @ save next counter
+ sub r6, r8, #.LREVM0SR-.LSR @ pass constants
+
+ bl _bsaes_encrypt8_alt
+
+ subs r2, r2, #8
+ blo .Lctr_enc_loop_done
+
+ vld1.8 {q8-q9}, [r0]! @ load input
+ vld1.8 {q10-q11}, [r0]!
+ veor q0, q8
+ veor q1, q9
+ vld1.8 {q12-q13}, [r0]!
+ veor q4, q10
+ veor q6, q11
+ vld1.8 {q14-q15}, [r0]!
+ veor q3, q12
+ vst1.8 {q0-q1}, [r1]! @ write output
+ veor q7, q13
+ veor q2, q14
+ vst1.8 {q4}, [r1]!
+ veor q5, q15
+ vst1.8 {q6}, [r1]!
+ vmov.i32 q8, #1 @ compose 1<<96
+ vst1.8 {q3}, [r1]!
+ veor q9, q9, q9
+ vst1.8 {q7}, [r1]!
+ vext.8 q8, q9, q8, #4
+ vst1.8 {q2}, [r1]!
+ vadd.u32 q9,q8,q8 @ compose 2<<96
+ vst1.8 {q5}, [r1]!
+ vldmia r9, {q0} @ load counter
+
+ bne .Lctr_enc_loop
+ b .Lctr_enc_done
+
+.align 4
+.Lctr_enc_loop_done:
+ add r2, r2, #8
+ vld1.8 {q8}, [r0]! @ load input
+ veor q0, q8
+ vst1.8 {q0}, [r1]! @ write output
+ cmp r2, #2
+ blo .Lctr_enc_done
+ vld1.8 {q9}, [r0]!
+ veor q1, q9
+ vst1.8 {q1}, [r1]!
+ beq .Lctr_enc_done
+ vld1.8 {q10}, [r0]!
+ veor q4, q10
+ vst1.8 {q4}, [r1]!
+ cmp r2, #4
+ blo .Lctr_enc_done
+ vld1.8 {q11}, [r0]!
+ veor q6, q11
+ vst1.8 {q6}, [r1]!
+ beq .Lctr_enc_done
+ vld1.8 {q12}, [r0]!
+ veor q3, q12
+ vst1.8 {q3}, [r1]!
+ cmp r2, #6
+ blo .Lctr_enc_done
+ vld1.8 {q13}, [r0]!
+ veor q7, q13
+ vst1.8 {q7}, [r1]!
+ beq .Lctr_enc_done
+ vld1.8 {q14}, [r0]
+ veor q2, q14
+ vst1.8 {q2}, [r1]!
+
+.Lctr_enc_done:
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+#ifndef BSAES_ASM_EXTENDED_KEY
+.Lctr_enc_bzero: @ wipe key schedule [if any]
+ vstmia sp!, {q0-q1}
+ cmp sp, r9
+ bne .Lctr_enc_bzero
+#else
+ vstmia sp, {q0-q1}
+#endif
+
+ mov sp, r9
+ add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc} @ return
+
+.align 4
+.Lctr_enc_short:
+ ldr ip, [sp] @ ctr pointer is passed on stack
+ stmdb sp!, {r4-r8, lr}
+
+ mov r4, r0 @ copy arguments
+ mov r5, r1
+ mov r6, r2
+ mov r7, r3
+ ldr r8, [ip, #12] @ load counter LSW
+ vld1.8 {q1}, [ip] @ load whole counter value
+#ifdef __ARMEL__
+ rev r8, r8
+#endif
+ sub sp, sp, #0x10
+ vst1.8 {q1}, [sp,:64] @ copy counter value
+ sub sp, sp, #0x10
+
+.Lctr_enc_short_loop:
+ add r0, sp, #0x10 @ input counter value
+ mov r1, sp @ output on the stack
+ mov r2, r7 @ key
+
+ bl AES_encrypt
+
+ vld1.8 {q0}, [r4]! @ load input
+ vld1.8 {q1}, [sp,:64] @ load encrypted counter
+ add r8, r8, #1
+#ifdef __ARMEL__
+ rev r0, r8
+ str r0, [sp, #0x1c] @ next counter value
+#else
+ str r8, [sp, #0x1c] @ next counter value
+#endif
+ veor q0,q0,q1
+ vst1.8 {q0}, [r5]! @ store output
+ subs r6, r6, #1
+ bne .Lctr_enc_short_loop
+
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+ vstmia sp!, {q0-q1}
+
+ ldmia sp!, {r4-r8, pc}
+.size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
+.globl bsaes_xts_encrypt
+.type bsaes_xts_encrypt,%function
+.align 4
+bsaes_xts_encrypt:
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr} @ 0x20
+ VFP_ABI_PUSH
+ mov r6, sp @ future r3
+
+ mov r7, r0
+ mov r8, r1
+ mov r9, r2
+ mov r10, r3
+
+ sub r0, sp, #0x10 @ 0x10
+ bic r0, #0xf @ align at 16 bytes
+ mov sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+ ldr r0, [ip] @ pointer to input tweak
+#else
+ @ generate initial tweak
+ ldr r0, [ip, #4] @ iv[]
+ mov r1, sp
+ ldr r2, [ip, #0] @ key2
+ bl AES_encrypt
+ mov r0,sp @ pointer to initial tweak
+#endif
+
+ ldr r1, [r10, #240] @ get # of rounds
+ mov r3, r6
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, r1, lsl#7 @ 128 bytes per inner round key
+ @ add r12, #96 @ size of bit-sliced key schedule
+ sub r12, #48 @ place for tweak[9]
+
+ @ populate the key schedule
+ mov r4, r10 @ pass key
+ mov r5, r1 @ pass # of rounds
+ mov sp, r12
+ add r12, #0x90 @ pass key schedule
+ bl _bsaes_key_convert
+ veor q7, q7, q15 @ fix up last round key
+ vstmia r12, {q7} @ save last round key
+#else
+ ldr r12, [r10, #244]
+ eors r12, #1
+ beq 0f
+
+ str r12, [r10, #244]
+ mov r4, r10 @ pass key
+ mov r5, r1 @ pass # of rounds
+ add r12, r10, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ veor q7, q7, q15 @ fix up last round key
+ vstmia r12, {q7}
+
+.align 2
+0: sub sp, #0x90 @ place for tweak[9]
+#endif
+
+ vld1.8 {q8}, [r0] @ initial tweak
+ adr r2, .Lxts_magic
+
+ subs r9, #0x80
+ blo .Lxts_enc_short
+ b .Lxts_enc_loop
+
+.align 4
+.Lxts_enc_loop:
+ vldmia r2, {q5} @ load XTS magic
+ vshr.s64 q6, q8, #63
+ mov r0, sp
+ vand q6, q6, q5
+ vadd.u64 q9, q8, q8
+ vst1.64 {q8}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q9, #63
+ veor q9, q9, q6
+ vand q7, q7, q5
+ vadd.u64 q10, q9, q9
+ vst1.64 {q9}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q10, #63
+ veor q10, q10, q7
+ vand q6, q6, q5
+ vld1.8 {q0}, [r7]!
+ vadd.u64 q11, q10, q10
+ vst1.64 {q10}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q11, #63
+ veor q11, q11, q6
+ vand q7, q7, q5
+ vld1.8 {q1}, [r7]!
+ veor q0, q0, q8
+ vadd.u64 q12, q11, q11
+ vst1.64 {q11}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q12, #63
+ veor q12, q12, q7
+ vand q6, q6, q5
+ vld1.8 {q2}, [r7]!
+ veor q1, q1, q9
+ vadd.u64 q13, q12, q12
+ vst1.64 {q12}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q13, #63
+ veor q13, q13, q6
+ vand q7, q7, q5
+ vld1.8 {q3}, [r7]!
+ veor q2, q2, q10
+ vadd.u64 q14, q13, q13
+ vst1.64 {q13}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q14, #63
+ veor q14, q14, q7
+ vand q6, q6, q5
+ vld1.8 {q4}, [r7]!
+ veor q3, q3, q11
+ vadd.u64 q15, q14, q14
+ vst1.64 {q14}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q15, #63
+ veor q15, q15, q6
+ vand q7, q7, q5
+ vld1.8 {q5}, [r7]!
+ veor q4, q4, q12
+ vadd.u64 q8, q15, q15
+ vst1.64 {q15}, [r0,:128]!
+ vswp d15,d14
+ veor q8, q8, q7
+ vst1.64 {q8}, [r0,:128] @ next round tweak
+
+ vld1.8 {q6-q7}, [r7]!
+ veor q5, q5, q13
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q6, q6, q14
+ mov r5, r1 @ pass rounds
+ veor q7, q7, q15
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12-q13}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q4, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q6, q11
+ vld1.64 {q14-q15}, [r0,:128]!
+ veor q10, q3, q12
+ vst1.8 {q8-q9}, [r8]!
+ veor q11, q7, q13
+ veor q12, q2, q14
+ vst1.8 {q10-q11}, [r8]!
+ veor q13, q5, q15
+ vst1.8 {q12-q13}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+
+ subs r9, #0x80
+ bpl .Lxts_enc_loop
+
+.Lxts_enc_short:
+ adds r9, #0x70
+ bmi .Lxts_enc_done
+
+ vldmia r2, {q5} @ load XTS magic
+ vshr.s64 q7, q8, #63
+ mov r0, sp
+ vand q7, q7, q5
+ vadd.u64 q9, q8, q8
+ vst1.64 {q8}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q9, #63
+ veor q9, q9, q7
+ vand q6, q6, q5
+ vadd.u64 q10, q9, q9
+ vst1.64 {q9}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q10, #63
+ veor q10, q10, q6
+ vand q7, q7, q5
+ vld1.8 {q0}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_enc_1
+ vadd.u64 q11, q10, q10
+ vst1.64 {q10}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q11, #63
+ veor q11, q11, q7
+ vand q6, q6, q5
+ vld1.8 {q1}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_enc_2
+ veor q0, q0, q8
+ vadd.u64 q12, q11, q11
+ vst1.64 {q11}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q12, #63
+ veor q12, q12, q6
+ vand q7, q7, q5
+ vld1.8 {q2}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_enc_3
+ veor q1, q1, q9
+ vadd.u64 q13, q12, q12
+ vst1.64 {q12}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q13, #63
+ veor q13, q13, q7
+ vand q6, q6, q5
+ vld1.8 {q3}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_enc_4
+ veor q2, q2, q10
+ vadd.u64 q14, q13, q13
+ vst1.64 {q13}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q14, #63
+ veor q14, q14, q6
+ vand q7, q7, q5
+ vld1.8 {q4}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_enc_5
+ veor q3, q3, q11
+ vadd.u64 q15, q14, q14
+ vst1.64 {q14}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q15, #63
+ veor q15, q15, q7
+ vand q6, q6, q5
+ vld1.8 {q5}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_enc_6
+ veor q4, q4, q12
+ sub r9, #0x10
+ vst1.64 {q15}, [r0,:128] @ next round tweak
+
+ vld1.8 {q6}, [r7]!
+ veor q5, q5, q13
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q6, q6, q14
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12-q13}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q4, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q6, q11
+ vld1.64 {q14}, [r0,:128]!
+ veor q10, q3, q12
+ vst1.8 {q8-q9}, [r8]!
+ veor q11, q7, q13
+ veor q12, q2, q14
+ vst1.8 {q10-q11}, [r8]!
+ vst1.8 {q12}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_6:
+ vst1.64 {q14}, [r0,:128] @ next round tweak
+
+ veor q4, q4, q12
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q5, q5, q13
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12-q13}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q4, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q6, q11
+ veor q10, q3, q12
+ vst1.8 {q8-q9}, [r8]!
+ veor q11, q7, q13
+ vst1.8 {q10-q11}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+
+@ put this in range for both ARM and Thumb mode adr instructions
+.align 5
+.Lxts_magic:
+ .quad 1, 0x87
+
+.align 5
+.Lxts_enc_5:
+ vst1.64 {q13}, [r0,:128] @ next round tweak
+
+ veor q3, q3, q11
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q4, q4, q12
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q4, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q6, q11
+ veor q10, q3, q12
+ vst1.8 {q8-q9}, [r8]!
+ vst1.8 {q10}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_4:
+ vst1.64 {q12}, [r0,:128] @ next round tweak
+
+ veor q2, q2, q10
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q3, q3, q11
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ veor q1, q1, q9
+ veor q8, q4, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q6, q11
+ vst1.8 {q8-q9}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_3:
+ vst1.64 {q11}, [r0,:128] @ next round tweak
+
+ veor q1, q1, q9
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q2, q2, q10
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10}, [r0,:128]!
+ veor q0, q0, q8
+ veor q1, q1, q9
+ veor q8, q4, q10
+ vst1.8 {q0-q1}, [r8]!
+ vst1.8 {q8}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_2:
+ vst1.64 {q10}, [r0,:128] @ next round tweak
+
+ veor q0, q0, q8
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q1, q1, q9
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ veor q0, q0, q8
+ veor q1, q1, q9
+ vst1.8 {q0-q1}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_1:
+ mov r0, sp
+ veor q0, q8
+ mov r1, sp
+ vst1.8 {q0}, [sp,:128]
+ mov r2, r10
+ mov r4, r3 @ preserve fp
+
+ bl AES_encrypt
+
+ vld1.8 {q0}, [sp,:128]
+ veor q0, q0, q8
+ vst1.8 {q0}, [r8]!
+ mov r3, r4
+
+ vmov q8, q9 @ next round tweak
+
+.Lxts_enc_done:
+#ifndef XTS_CHAIN_TWEAK
+ adds r9, #0x10
+ beq .Lxts_enc_ret
+ sub r6, r8, #0x10
+
+.Lxts_enc_steal:
+ ldrb r0, [r7], #1
+ ldrb r1, [r8, #-0x10]
+ strb r0, [r8, #-0x10]
+ strb r1, [r8], #1
+
+ subs r9, #1
+ bhi .Lxts_enc_steal
+
+ vld1.8 {q0}, [r6]
+ mov r0, sp
+ veor q0, q0, q8
+ mov r1, sp
+ vst1.8 {q0}, [sp,:128]
+ mov r2, r10
+ mov r4, r3 @ preserve fp
+
+ bl AES_encrypt
+
+ vld1.8 {q0}, [sp,:128]
+ veor q0, q0, q8
+ vst1.8 {q0}, [r6]
+ mov r3, r4
+#endif
+
+.Lxts_enc_ret:
+ bic r0, r3, #0xf
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+#ifdef XTS_CHAIN_TWEAK
+ ldr r1, [r3, #0x20+VFP_ABI_FRAME] @ chain tweak
+#endif
+.Lxts_enc_bzero: @ wipe key schedule [if any]
+ vstmia sp!, {q0-q1}
+ cmp sp, r0
+ bne .Lxts_enc_bzero
+
+ mov sp, r3
+#ifdef XTS_CHAIN_TWEAK
+ vst1.8 {q8}, [r1]
+#endif
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc} @ return
+
+.size bsaes_xts_encrypt,.-bsaes_xts_encrypt
+
+.globl bsaes_xts_decrypt
+.type bsaes_xts_decrypt,%function
+.align 4
+bsaes_xts_decrypt:
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr} @ 0x20
+ VFP_ABI_PUSH
+ mov r6, sp @ future r3
+
+ mov r7, r0
+ mov r8, r1
+ mov r9, r2
+ mov r10, r3
+
+ sub r0, sp, #0x10 @ 0x10
+ bic r0, #0xf @ align at 16 bytes
+ mov sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+ ldr r0, [ip] @ pointer to input tweak
+#else
+ @ generate initial tweak
+ ldr r0, [ip, #4] @ iv[]
+ mov r1, sp
+ ldr r2, [ip, #0] @ key2
+ bl AES_encrypt
+ mov r0, sp @ pointer to initial tweak
+#endif
+
+ ldr r1, [r10, #240] @ get # of rounds
+ mov r3, r6
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, r1, lsl#7 @ 128 bytes per inner round key
+ @ add r12, #96 @ size of bit-sliced key schedule
+ sub r12, #48 @ place for tweak[9]
+
+ @ populate the key schedule
+ mov r4, r10 @ pass key
+ mov r5, r1 @ pass # of rounds
+ mov sp, r12
+ add r12, #0x90 @ pass key schedule
+ bl _bsaes_key_convert
+ add r4, sp, #0x90
+ vldmia r4, {q6}
+ vstmia r12, {q15} @ save last round key
+ veor q7, q7, q6 @ fix up round 0 key
+ vstmia r4, {q7}
+#else
+ ldr r12, [r10, #244]
+ eors r12, #1
+ beq 0f
+
+ str r12, [r10, #244]
+ mov r4, r10 @ pass key
+ mov r5, r1 @ pass # of rounds
+ add r12, r10, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ add r4, r10, #248
+ vldmia r4, {q6}
+ vstmia r12, {q15} @ save last round key
+ veor q7, q7, q6 @ fix up round 0 key
+ vstmia r4, {q7}
+
+.align 2
+0: sub sp, #0x90 @ place for tweak[9]
+#endif
+ vld1.8 {q8}, [r0] @ initial tweak
+ adr r2, .Lxts_magic
+
+ tst r9, #0xf @ if not multiple of 16
+ it ne @ Thumb2 thing, sanity check in ARM
+ subne r9, #0x10 @ subtract another 16 bytes
+ subs r9, #0x80
+
+ blo .Lxts_dec_short
+ b .Lxts_dec_loop
+
+.align 4
+.Lxts_dec_loop:
+ vldmia r2, {q5} @ load XTS magic
+ vshr.s64 q6, q8, #63
+ mov r0, sp
+ vand q6, q6, q5
+ vadd.u64 q9, q8, q8
+ vst1.64 {q8}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q9, #63
+ veor q9, q9, q6
+ vand q7, q7, q5
+ vadd.u64 q10, q9, q9
+ vst1.64 {q9}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q10, #63
+ veor q10, q10, q7
+ vand q6, q6, q5
+ vld1.8 {q0}, [r7]!
+ vadd.u64 q11, q10, q10
+ vst1.64 {q10}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q11, #63
+ veor q11, q11, q6
+ vand q7, q7, q5
+ vld1.8 {q1}, [r7]!
+ veor q0, q0, q8
+ vadd.u64 q12, q11, q11
+ vst1.64 {q11}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q12, #63
+ veor q12, q12, q7
+ vand q6, q6, q5
+ vld1.8 {q2}, [r7]!
+ veor q1, q1, q9
+ vadd.u64 q13, q12, q12
+ vst1.64 {q12}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q13, #63
+ veor q13, q13, q6
+ vand q7, q7, q5
+ vld1.8 {q3}, [r7]!
+ veor q2, q2, q10
+ vadd.u64 q14, q13, q13
+ vst1.64 {q13}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q14, #63
+ veor q14, q14, q7
+ vand q6, q6, q5
+ vld1.8 {q4}, [r7]!
+ veor q3, q3, q11
+ vadd.u64 q15, q14, q14
+ vst1.64 {q14}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q15, #63
+ veor q15, q15, q6
+ vand q7, q7, q5
+ vld1.8 {q5}, [r7]!
+ veor q4, q4, q12
+ vadd.u64 q8, q15, q15
+ vst1.64 {q15}, [r0,:128]!
+ vswp d15,d14
+ veor q8, q8, q7
+ vst1.64 {q8}, [r0,:128] @ next round tweak
+
+ vld1.8 {q6-q7}, [r7]!
+ veor q5, q5, q13
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q6, q6, q14
+ mov r5, r1 @ pass rounds
+ veor q7, q7, q15
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12-q13}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q6, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q4, q11
+ vld1.64 {q14-q15}, [r0,:128]!
+ veor q10, q2, q12
+ vst1.8 {q8-q9}, [r8]!
+ veor q11, q7, q13
+ veor q12, q3, q14
+ vst1.8 {q10-q11}, [r8]!
+ veor q13, q5, q15
+ vst1.8 {q12-q13}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+
+ subs r9, #0x80
+ bpl .Lxts_dec_loop
+
+.Lxts_dec_short:
+ adds r9, #0x70
+ bmi .Lxts_dec_done
+
+ vldmia r2, {q5} @ load XTS magic
+ vshr.s64 q7, q8, #63
+ mov r0, sp
+ vand q7, q7, q5
+ vadd.u64 q9, q8, q8
+ vst1.64 {q8}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q9, #63
+ veor q9, q9, q7
+ vand q6, q6, q5
+ vadd.u64 q10, q9, q9
+ vst1.64 {q9}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q10, #63
+ veor q10, q10, q6
+ vand q7, q7, q5
+ vld1.8 {q0}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_dec_1
+ vadd.u64 q11, q10, q10
+ vst1.64 {q10}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q11, #63
+ veor q11, q11, q7
+ vand q6, q6, q5
+ vld1.8 {q1}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_dec_2
+ veor q0, q0, q8
+ vadd.u64 q12, q11, q11
+ vst1.64 {q11}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q12, #63
+ veor q12, q12, q6
+ vand q7, q7, q5
+ vld1.8 {q2}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_dec_3
+ veor q1, q1, q9
+ vadd.u64 q13, q12, q12
+ vst1.64 {q12}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q13, #63
+ veor q13, q13, q7
+ vand q6, q6, q5
+ vld1.8 {q3}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_dec_4
+ veor q2, q2, q10
+ vadd.u64 q14, q13, q13
+ vst1.64 {q13}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q14, #63
+ veor q14, q14, q6
+ vand q7, q7, q5
+ vld1.8 {q4}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_dec_5
+ veor q3, q3, q11
+ vadd.u64 q15, q14, q14
+ vst1.64 {q14}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q15, #63
+ veor q15, q15, q7
+ vand q6, q6, q5
+ vld1.8 {q5}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_dec_6
+ veor q4, q4, q12
+ sub r9, #0x10
+ vst1.64 {q15}, [r0,:128] @ next round tweak
+
+ vld1.8 {q6}, [r7]!
+ veor q5, q5, q13
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q6, q6, q14
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12-q13}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q6, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q4, q11
+ vld1.64 {q14}, [r0,:128]!
+ veor q10, q2, q12
+ vst1.8 {q8-q9}, [r8]!
+ veor q11, q7, q13
+ veor q12, q3, q14
+ vst1.8 {q10-q11}, [r8]!
+ vst1.8 {q12}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_6:
+ vst1.64 {q14}, [r0,:128] @ next round tweak
+
+ veor q4, q4, q12
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q5, q5, q13
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12-q13}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q6, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q4, q11
+ veor q10, q2, q12
+ vst1.8 {q8-q9}, [r8]!
+ veor q11, q7, q13
+ vst1.8 {q10-q11}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_5:
+ vst1.64 {q13}, [r0,:128] @ next round tweak
+
+ veor q3, q3, q11
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q4, q4, q12
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q6, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q4, q11
+ veor q10, q2, q12
+ vst1.8 {q8-q9}, [r8]!
+ vst1.8 {q10}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_4:
+ vst1.64 {q12}, [r0,:128] @ next round tweak
+
+ veor q2, q2, q10
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q3, q3, q11
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ veor q1, q1, q9
+ veor q8, q6, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q4, q11
+ vst1.8 {q8-q9}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_3:
+ vst1.64 {q11}, [r0,:128] @ next round tweak
+
+ veor q1, q1, q9
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q2, q2, q10
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10}, [r0,:128]!
+ veor q0, q0, q8
+ veor q1, q1, q9
+ veor q8, q6, q10
+ vst1.8 {q0-q1}, [r8]!
+ vst1.8 {q8}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_2:
+ vst1.64 {q10}, [r0,:128] @ next round tweak
+
+ veor q0, q0, q8
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q1, q1, q9
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ veor q0, q0, q8
+ veor q1, q1, q9
+ vst1.8 {q0-q1}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_1:
+ mov r0, sp
+ veor q0, q8
+ mov r1, sp
+ vst1.8 {q0}, [sp,:128]
+ mov r2, r10
+ mov r4, r3 @ preserve fp
+ mov r5, r2 @ preserve magic
+
+ bl AES_decrypt
+
+ vld1.8 {q0}, [sp,:128]
+ veor q0, q0, q8
+ vst1.8 {q0}, [r8]!
+ mov r3, r4
+ mov r2, r5
+
+ vmov q8, q9 @ next round tweak
+
+.Lxts_dec_done:
+#ifndef XTS_CHAIN_TWEAK
+ adds r9, #0x10
+ beq .Lxts_dec_ret
+
+ @ calculate one round of extra tweak for the stolen ciphertext
+ vldmia r2, {q5}
+ vshr.s64 q6, q8, #63
+ vand q6, q6, q5
+ vadd.u64 q9, q8, q8
+ vswp d13,d12
+ veor q9, q9, q6
+
+ @ perform the final decryption with the last tweak value
+ vld1.8 {q0}, [r7]!
+ mov r0, sp
+ veor q0, q0, q9
+ mov r1, sp
+ vst1.8 {q0}, [sp,:128]
+ mov r2, r10
+ mov r4, r3 @ preserve fp
+
+ bl AES_decrypt
+
+ vld1.8 {q0}, [sp,:128]
+ veor q0, q0, q9
+ vst1.8 {q0}, [r8]
+
+ mov r6, r8
+.Lxts_dec_steal:
+ ldrb r1, [r8]
+ ldrb r0, [r7], #1
+ strb r1, [r8, #0x10]
+ strb r0, [r8], #1
+
+ subs r9, #1
+ bhi .Lxts_dec_steal
+
+ vld1.8 {q0}, [r6]
+ mov r0, sp
+ veor q0, q8
+ mov r1, sp
+ vst1.8 {q0}, [sp,:128]
+ mov r2, r10
+
+ bl AES_decrypt
+
+ vld1.8 {q0}, [sp,:128]
+ veor q0, q0, q8
+ vst1.8 {q0}, [r6]
+ mov r3, r4
+#endif
+
+.Lxts_dec_ret:
+ bic r0, r3, #0xf
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+#ifdef XTS_CHAIN_TWEAK
+ ldr r1, [r3, #0x20+VFP_ABI_FRAME] @ chain tweak
+#endif
+.Lxts_dec_bzero: @ wipe key schedule [if any]
+ vstmia sp!, {q0-q1}
+ cmp sp, r0
+ bne .Lxts_dec_bzero
+
+ mov sp, r3
+#ifdef XTS_CHAIN_TWEAK
+ vst1.8 {q8}, [r1]
+#endif
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc} @ return
+
+.size bsaes_xts_decrypt,.-bsaes_xts_decrypt
+#endif
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
new file mode 100644
index 000000000000..4522366da759
--- /dev/null
+++ b/arch/arm/crypto/aesbs-glue.c
@@ -0,0 +1,434 @@
+/*
+ * linux/arch/arm/crypto/aesbs-glue.c - glue code for NEON bit sliced AES
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <crypto/aes.h>
+#include <crypto/ablk_helper.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+
+#include "aes_glue.h"
+
+#define BIT_SLICED_KEY_MAXSIZE (128 * (AES_MAXNR - 1) + 2 * AES_BLOCK_SIZE)
+
+struct BS_KEY {
+ struct AES_KEY rk;
+ int converted;
+ u8 __aligned(8) bs[BIT_SLICED_KEY_MAXSIZE];
+} __aligned(8);
+
+asmlinkage void bsaes_enc_key_convert(u8 out[], struct AES_KEY const *in);
+asmlinkage void bsaes_dec_key_convert(u8 out[], struct AES_KEY const *in);
+
+asmlinkage void bsaes_cbc_encrypt(u8 const in[], u8 out[], u32 bytes,
+ struct BS_KEY *key, u8 iv[]);
+
+asmlinkage void bsaes_ctr32_encrypt_blocks(u8 const in[], u8 out[], u32 blocks,
+ struct BS_KEY *key, u8 const iv[]);
+
+asmlinkage void bsaes_xts_encrypt(u8 const in[], u8 out[], u32 bytes,
+ struct BS_KEY *key, u8 tweak[]);
+
+asmlinkage void bsaes_xts_decrypt(u8 const in[], u8 out[], u32 bytes,
+ struct BS_KEY *key, u8 tweak[]);
+
+struct aesbs_cbc_ctx {
+ struct AES_KEY enc;
+ struct BS_KEY dec;
+};
+
+struct aesbs_ctr_ctx {
+ struct BS_KEY enc;
+};
+
+struct aesbs_xts_ctx {
+ struct BS_KEY enc;
+ struct BS_KEY dec;
+ struct AES_KEY twkey;
+};
+
+static int aesbs_cbc_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ int bits = key_len * 8;
+
+ if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+ ctx->dec.rk = ctx->enc;
+ private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
+ ctx->dec.converted = 0;
+ return 0;
+}
+
+static int aesbs_ctr_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct aesbs_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+ int bits = key_len * 8;
+
+ if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+ ctx->enc.converted = 0;
+ return 0;
+}
+
+static int aesbs_xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+ int bits = key_len * 4;
+
+ if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+ ctx->dec.rk = ctx->enc.rk;
+ private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
+ private_AES_set_encrypt_key(in_key + key_len / 2, bits, &ctx->twkey);
+ ctx->enc.converted = ctx->dec.converted = 0;
+ return 0;
+}
+
+static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while (walk.nbytes) {
+ u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
+ u8 *src = walk.src.virt.addr;
+
+ if (walk.dst.virt.addr == walk.src.virt.addr) {
+ u8 *iv = walk.iv;
+
+ do {
+ crypto_xor(src, iv, AES_BLOCK_SIZE);
+ AES_encrypt(src, src, &ctx->enc);
+ iv = src;
+ src += AES_BLOCK_SIZE;
+ } while (--blocks);
+ memcpy(walk.iv, iv, AES_BLOCK_SIZE);
+ } else {
+ u8 *dst = walk.dst.virt.addr;
+
+ do {
+ crypto_xor(walk.iv, src, AES_BLOCK_SIZE);
+ AES_encrypt(walk.iv, dst, &ctx->enc);
+ memcpy(walk.iv, dst, AES_BLOCK_SIZE);
+ src += AES_BLOCK_SIZE;
+ dst += AES_BLOCK_SIZE;
+ } while (--blocks);
+ }
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ return err;
+}
+
+static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+ while ((walk.nbytes / AES_BLOCK_SIZE) >= 8) {
+ kernel_neon_begin();
+ bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
+ walk.nbytes, &ctx->dec, walk.iv);
+ kernel_neon_end();
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ while (walk.nbytes) {
+ u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
+ u8 *dst = walk.dst.virt.addr;
+ u8 *src = walk.src.virt.addr;
+ u8 bk[2][AES_BLOCK_SIZE];
+ u8 *iv = walk.iv;
+
+ do {
+ if (walk.dst.virt.addr == walk.src.virt.addr)
+ memcpy(bk[blocks & 1], src, AES_BLOCK_SIZE);
+
+ AES_decrypt(src, dst, &ctx->dec.rk);
+ crypto_xor(dst, iv, AES_BLOCK_SIZE);
+
+ if (walk.dst.virt.addr == walk.src.virt.addr)
+ iv = bk[blocks & 1];
+ else
+ iv = src;
+
+ dst += AES_BLOCK_SIZE;
+ src += AES_BLOCK_SIZE;
+ } while (--blocks);
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ return err;
+}
+
+static void inc_be128_ctr(__be32 ctr[], u32 addend)
+{
+ int i;
+
+ for (i = 3; i >= 0; i--, addend = 1) {
+ u32 n = be32_to_cpu(ctr[i]) + addend;
+
+ ctr[i] = cpu_to_be32(n);
+ if (n >= addend)
+ break;
+ }
+}
+
+static int aesbs_ctr_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct aesbs_ctr_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ u32 blocks;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+ while ((blocks = walk.nbytes / AES_BLOCK_SIZE)) {
+ u32 tail = walk.nbytes % AES_BLOCK_SIZE;
+ __be32 *ctr = (__be32 *)walk.iv;
+ u32 headroom = UINT_MAX - be32_to_cpu(ctr[3]);
+
+ /* avoid 32 bit counter overflow in the NEON code */
+ if (unlikely(headroom < blocks)) {
+ blocks = headroom + 1;
+ tail = walk.nbytes - blocks * AES_BLOCK_SIZE;
+ }
+ kernel_neon_begin();
+ bsaes_ctr32_encrypt_blocks(walk.src.virt.addr,
+ walk.dst.virt.addr, blocks,
+ &ctx->enc, walk.iv);
+ kernel_neon_end();
+ inc_be128_ctr(ctr, blocks);
+
+ nbytes -= blocks * AES_BLOCK_SIZE;
+ if (nbytes && nbytes == tail && nbytes <= AES_BLOCK_SIZE)
+ break;
+
+ err = blkcipher_walk_done(desc, &walk, tail);
+ }
+ if (walk.nbytes) {
+ u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
+ u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+ u8 ks[AES_BLOCK_SIZE];
+
+ AES_encrypt(walk.iv, ks, &ctx->enc.rk);
+ if (tdst != tsrc)
+ memcpy(tdst, tsrc, nbytes);
+ crypto_xor(tdst, ks, nbytes);
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ return err;
+}
+
+static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+ /* generate the initial tweak */
+ AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
+
+ while (walk.nbytes) {
+ kernel_neon_begin();
+ bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
+ walk.nbytes, &ctx->enc, walk.iv);
+ kernel_neon_end();
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ return err;
+}
+
+static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+ /* generate the initial tweak */
+ AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
+
+ while (walk.nbytes) {
+ kernel_neon_begin();
+ bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
+ walk.nbytes, &ctx->dec, walk.iv);
+ kernel_neon_end();
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ return err;
+}
+
+static struct crypto_alg aesbs_algs[] = { {
+ .cra_name = "__cbc-aes-neonbs",
+ .cra_driver_name = "__driver-cbc-aes-neonbs",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aesbs_cbc_set_key,
+ .encrypt = aesbs_cbc_encrypt,
+ .decrypt = aesbs_cbc_decrypt,
+ },
+}, {
+ .cra_name = "__ctr-aes-neonbs",
+ .cra_driver_name = "__driver-ctr-aes-neonbs",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aesbs_ctr_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aesbs_ctr_set_key,
+ .encrypt = aesbs_ctr_encrypt,
+ .decrypt = aesbs_ctr_encrypt,
+ },
+}, {
+ .cra_name = "__xts-aes-neonbs",
+ .cra_driver_name = "__driver-xts-aes-neonbs",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aesbs_xts_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blkcipher = {
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aesbs_xts_set_key,
+ .encrypt = aesbs_xts_encrypt,
+ .decrypt = aesbs_xts_decrypt,
+ },
+}, {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-neonbs",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = __ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ }
+}, {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-neonbs",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ }
+}, {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-neonbs",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_ablkcipher = {
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ }
+} };
+
+static int __init aesbs_mod_init(void)
+{
+ if (!cpu_has_neon())
+ return -ENODEV;
+
+ return crypto_register_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+}
+
+static void __exit aesbs_mod_exit(void)
+{
+ crypto_unregister_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+}
+
+module_init(aesbs_mod_init);
+module_exit(aesbs_mod_exit);
+
+MODULE_DESCRIPTION("Bit sliced AES in CBC/CTR/XTS modes using NEON");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl
new file mode 100644
index 000000000000..f3d96d932573
--- /dev/null
+++ b/arch/arm/crypto/bsaes-armv7.pl
@@ -0,0 +1,2467 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+#
+# Specific modes and adaptation for Linux kernel by Ard Biesheuvel
+# <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is
+# granted.
+# ====================================================================
+
+# Bit-sliced AES for ARM NEON
+#
+# February 2012.
+#
+# This implementation is direct adaptation of bsaes-x86_64 module for
+# ARM NEON. Except that this module is endian-neutral [in sense that
+# it can be compiled for either endianness] by courtesy of vld1.8's
+# neutrality. Initial version doesn't implement interface to OpenSSL,
+# only low-level primitives and unsupported entry points, just enough
+# to collect performance results, which for Cortex-A8 core are:
+#
+# encrypt 19.5 cycles per byte processed with 128-bit key
+# decrypt 22.1 cycles per byte processed with 128-bit key
+# key conv. 440 cycles per 128-bit key/0.18 of 8x block
+#
+# Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
+# which is [much] worse than anticipated (for further details see
+# http://www.openssl.org/~appro/Snapdragon-S4.html).
+#
+# Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
+# manages in 20.0 cycles].
+#
+# When comparing to x86_64 results keep in mind that NEON unit is
+# [mostly] single-issue and thus can't [fully] benefit from
+# instruction-level parallelism. And when comparing to aes-armv4
+# results keep in mind key schedule conversion overhead (see
+# bsaes-x86_64.pl for further details)...
+#
+# <appro@openssl.org>
+
+# April-August 2013
+#
+# Add CBC, CTR and XTS subroutines, adapt for kernel use.
+#
+# <ard.biesheuvel@linaro.org>
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+my ($inp,$out,$len,$key)=("r0","r1","r2","r3");
+my @XMM=map("q$_",(0..15));
+
+{
+my ($key,$rounds,$const)=("r4","r5","r6");
+
+sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; }
+sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; }
+
+sub Sbox {
+# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b0, b1, b4, b6, b3, b7, b2, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+ &InBasisChange (@b);
+ &Inv_GF256 (@b[6,5,0,3,7,1,4,2],@t,@s);
+ &OutBasisChange (@b[7,1,4,2,6,5,0,3]);
+}
+
+sub InBasisChange {
+# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb
+my @b=@_[0..7];
+$code.=<<___;
+ veor @b[2], @b[2], @b[1]
+ veor @b[5], @b[5], @b[6]
+ veor @b[3], @b[3], @b[0]
+ veor @b[6], @b[6], @b[2]
+ veor @b[5], @b[5], @b[0]
+
+ veor @b[6], @b[6], @b[3]
+ veor @b[3], @b[3], @b[7]
+ veor @b[7], @b[7], @b[5]
+ veor @b[3], @b[3], @b[4]
+ veor @b[4], @b[4], @b[5]
+
+ veor @b[2], @b[2], @b[7]
+ veor @b[3], @b[3], @b[1]
+ veor @b[1], @b[1], @b[5]
+___
+}
+
+sub OutBasisChange {
+# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b1, b2, b4, b7, b0, b3, b5] < msb
+my @b=@_[0..7];
+$code.=<<___;
+ veor @b[0], @b[0], @b[6]
+ veor @b[1], @b[1], @b[4]
+ veor @b[4], @b[4], @b[6]
+ veor @b[2], @b[2], @b[0]
+ veor @b[6], @b[6], @b[1]
+
+ veor @b[1], @b[1], @b[5]
+ veor @b[5], @b[5], @b[3]
+ veor @b[3], @b[3], @b[7]
+ veor @b[7], @b[7], @b[5]
+ veor @b[2], @b[2], @b[5]
+
+ veor @b[4], @b[4], @b[7]
+___
+}
+
+sub InvSbox {
+# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b0, b1, b6, b4, b2, b7, b3, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+ &InvInBasisChange (@b);
+ &Inv_GF256 (@b[5,1,2,6,3,7,0,4],@t,@s);
+ &InvOutBasisChange (@b[3,7,0,4,5,1,2,6]);
+}
+
+sub InvInBasisChange { # OutBasisChange in reverse (with twist)
+my @b=@_[5,1,2,6,3,7,0,4];
+$code.=<<___
+ veor @b[1], @b[1], @b[7]
+ veor @b[4], @b[4], @b[7]
+
+ veor @b[7], @b[7], @b[5]
+ veor @b[1], @b[1], @b[3]
+ veor @b[2], @b[2], @b[5]
+ veor @b[3], @b[3], @b[7]
+
+ veor @b[6], @b[6], @b[1]
+ veor @b[2], @b[2], @b[0]
+ veor @b[5], @b[5], @b[3]
+ veor @b[4], @b[4], @b[6]
+ veor @b[0], @b[0], @b[6]
+ veor @b[1], @b[1], @b[4]
+___
+}
+
+sub InvOutBasisChange { # InBasisChange in reverse
+my @b=@_[2,5,7,3,6,1,0,4];
+$code.=<<___;
+ veor @b[1], @b[1], @b[5]
+ veor @b[2], @b[2], @b[7]
+
+ veor @b[3], @b[3], @b[1]
+ veor @b[4], @b[4], @b[5]
+ veor @b[7], @b[7], @b[5]
+ veor @b[3], @b[3], @b[4]
+ veor @b[5], @b[5], @b[0]
+ veor @b[3], @b[3], @b[7]
+ veor @b[6], @b[6], @b[2]
+ veor @b[2], @b[2], @b[1]
+ veor @b[6], @b[6], @b[3]
+
+ veor @b[3], @b[3], @b[0]
+ veor @b[5], @b[5], @b[6]
+___
+}
+
+sub Mul_GF4 {
+#;*************************************************************
+#;* Mul_GF4: Input x0-x1,y0-y1 Output x0-x1 Temp t0 (8) *
+#;*************************************************************
+my ($x0,$x1,$y0,$y1,$t0,$t1)=@_;
+$code.=<<___;
+ veor $t0, $y0, $y1
+ vand $t0, $t0, $x0
+ veor $x0, $x0, $x1
+ vand $t1, $x1, $y0
+ vand $x0, $x0, $y1
+ veor $x1, $t1, $t0
+ veor $x0, $x0, $t1
+___
+}
+
+sub Mul_GF4_N { # not used, see next subroutine
+# multiply and scale by N
+my ($x0,$x1,$y0,$y1,$t0)=@_;
+$code.=<<___;
+ veor $t0, $y0, $y1
+ vand $t0, $t0, $x0
+ veor $x0, $x0, $x1
+ vand $x1, $x1, $y0
+ vand $x0, $x0, $y1
+ veor $x1, $x1, $x0
+ veor $x0, $x0, $t0
+___
+}
+
+sub Mul_GF4_N_GF4 {
+# interleaved Mul_GF4_N and Mul_GF4
+my ($x0,$x1,$y0,$y1,$t0,
+ $x2,$x3,$y2,$y3,$t1)=@_;
+$code.=<<___;
+ veor $t0, $y0, $y1
+ veor $t1, $y2, $y3
+ vand $t0, $t0, $x0
+ vand $t1, $t1, $x2
+ veor $x0, $x0, $x1
+ veor $x2, $x2, $x3
+ vand $x1, $x1, $y0
+ vand $x3, $x3, $y2
+ vand $x0, $x0, $y1
+ vand $x2, $x2, $y3
+ veor $x1, $x1, $x0
+ veor $x2, $x2, $x3
+ veor $x0, $x0, $t0
+ veor $x3, $x3, $t1
+___
+}
+sub Mul_GF16_2 {
+my @x=@_[0..7];
+my @y=@_[8..11];
+my @t=@_[12..15];
+$code.=<<___;
+ veor @t[0], @x[0], @x[2]
+ veor @t[1], @x[1], @x[3]
+___
+ &Mul_GF4 (@x[0], @x[1], @y[0], @y[1], @t[2..3]);
+$code.=<<___;
+ veor @y[0], @y[0], @y[2]
+ veor @y[1], @y[1], @y[3]
+___
+ Mul_GF4_N_GF4 (@t[0], @t[1], @y[0], @y[1], @t[3],
+ @x[2], @x[3], @y[2], @y[3], @t[2]);
+$code.=<<___;
+ veor @x[0], @x[0], @t[0]
+ veor @x[2], @x[2], @t[0]
+ veor @x[1], @x[1], @t[1]
+ veor @x[3], @x[3], @t[1]
+
+ veor @t[0], @x[4], @x[6]
+ veor @t[1], @x[5], @x[7]
+___
+ &Mul_GF4_N_GF4 (@t[0], @t[1], @y[0], @y[1], @t[3],
+ @x[6], @x[7], @y[2], @y[3], @t[2]);
+$code.=<<___;
+ veor @y[0], @y[0], @y[2]
+ veor @y[1], @y[1], @y[3]
+___
+ &Mul_GF4 (@x[4], @x[5], @y[0], @y[1], @t[2..3]);
+$code.=<<___;
+ veor @x[4], @x[4], @t[0]
+ veor @x[6], @x[6], @t[0]
+ veor @x[5], @x[5], @t[1]
+ veor @x[7], @x[7], @t[1]
+___
+}
+sub Inv_GF256 {
+#;********************************************************************
+#;* Inv_GF256: Input x0-x7 Output x0-x7 Temp t0-t3,s0-s3 (144) *
+#;********************************************************************
+my @x=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+# direct optimizations from hardware
+$code.=<<___;
+ veor @t[3], @x[4], @x[6]
+ veor @t[2], @x[5], @x[7]
+ veor @t[1], @x[1], @x[3]
+ veor @s[1], @x[7], @x[6]
+ vmov @t[0], @t[2]
+ veor @s[0], @x[0], @x[2]
+
+ vorr @t[2], @t[2], @t[1]
+ veor @s[3], @t[3], @t[0]
+ vand @s[2], @t[3], @s[0]
+ vorr @t[3], @t[3], @s[0]
+ veor @s[0], @s[0], @t[1]
+ vand @t[0], @t[0], @t[1]
+ veor @t[1], @x[3], @x[2]
+ vand @s[3], @s[3], @s[0]
+ vand @s[1], @s[1], @t[1]
+ veor @t[1], @x[4], @x[5]
+ veor @s[0], @x[1], @x[0]
+ veor @t[3], @t[3], @s[1]
+ veor @t[2], @t[2], @s[1]
+ vand @s[1], @t[1], @s[0]
+ vorr @t[1], @t[1], @s[0]
+ veor @t[3], @t[3], @s[3]
+ veor @t[0], @t[0], @s[1]
+ veor @t[2], @t[2], @s[2]
+ veor @t[1], @t[1], @s[3]
+ veor @t[0], @t[0], @s[2]
+ vand @s[0], @x[7], @x[3]
+ veor @t[1], @t[1], @s[2]
+ vand @s[1], @x[6], @x[2]
+ vand @s[2], @x[5], @x[1]
+ vorr @s[3], @x[4], @x[0]
+ veor @t[3], @t[3], @s[0]
+ veor @t[1], @t[1], @s[2]
+ veor @t[0], @t[0], @s[3]
+ veor @t[2], @t[2], @s[1]
+
+ @ Inv_GF16 \t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3
+
+ @ new smaller inversion
+
+ vand @s[2], @t[3], @t[1]
+ vmov @s[0], @t[0]
+
+ veor @s[1], @t[2], @s[2]
+ veor @s[3], @t[0], @s[2]
+ veor @s[2], @t[0], @s[2] @ @s[2]=@s[3]
+
+ vbsl @s[1], @t[1], @t[0]
+ vbsl @s[3], @t[3], @t[2]
+ veor @t[3], @t[3], @t[2]
+
+ vbsl @s[0], @s[1], @s[2]
+ vbsl @t[0], @s[2], @s[1]
+
+ vand @s[2], @s[0], @s[3]
+ veor @t[1], @t[1], @t[0]
+
+ veor @s[2], @s[2], @t[3]
+___
+# output in s3, s2, s1, t1
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \t2, \t3, \t0, \t1, \s0, \s1, \s2, \s3
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3
+ &Mul_GF16_2(@x,@s[3,2,1],@t[1],@s[0],@t[0,2,3]);
+
+### output msb > [x3,x2,x1,x0,x7,x6,x5,x4] < lsb
+}
+
+# AES linear components
+
+sub ShiftRows {
+my @x=@_[0..7];
+my @t=@_[8..11];
+my $mask=pop;
+$code.=<<___;
+ vldmia $key!, {@t[0]-@t[3]}
+ veor @t[0], @t[0], @x[0]
+ veor @t[1], @t[1], @x[1]
+ vtbl.8 `&Dlo(@x[0])`, {@t[0]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[0])`, {@t[0]}, `&Dhi($mask)`
+ vldmia $key!, {@t[0]}
+ veor @t[2], @t[2], @x[2]
+ vtbl.8 `&Dlo(@x[1])`, {@t[1]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[1])`, {@t[1]}, `&Dhi($mask)`
+ vldmia $key!, {@t[1]}
+ veor @t[3], @t[3], @x[3]
+ vtbl.8 `&Dlo(@x[2])`, {@t[2]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[2])`, {@t[2]}, `&Dhi($mask)`
+ vldmia $key!, {@t[2]}
+ vtbl.8 `&Dlo(@x[3])`, {@t[3]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[3])`, {@t[3]}, `&Dhi($mask)`
+ vldmia $key!, {@t[3]}
+ veor @t[0], @t[0], @x[4]
+ veor @t[1], @t[1], @x[5]
+ vtbl.8 `&Dlo(@x[4])`, {@t[0]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[4])`, {@t[0]}, `&Dhi($mask)`
+ veor @t[2], @t[2], @x[6]
+ vtbl.8 `&Dlo(@x[5])`, {@t[1]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[5])`, {@t[1]}, `&Dhi($mask)`
+ veor @t[3], @t[3], @x[7]
+ vtbl.8 `&Dlo(@x[6])`, {@t[2]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[6])`, {@t[2]}, `&Dhi($mask)`
+ vtbl.8 `&Dlo(@x[7])`, {@t[3]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[7])`, {@t[3]}, `&Dhi($mask)`
+___
+}
+
+sub MixColumns {
+# modified to emit output in order suitable for feeding back to aesenc[last]
+my @x=@_[0..7];
+my @t=@_[8..15];
+my $inv=@_[16]; # optional
+$code.=<<___;
+ vext.8 @t[0], @x[0], @x[0], #12 @ x0 <<< 32
+ vext.8 @t[1], @x[1], @x[1], #12
+ veor @x[0], @x[0], @t[0] @ x0 ^ (x0 <<< 32)
+ vext.8 @t[2], @x[2], @x[2], #12
+ veor @x[1], @x[1], @t[1]
+ vext.8 @t[3], @x[3], @x[3], #12
+ veor @x[2], @x[2], @t[2]
+ vext.8 @t[4], @x[4], @x[4], #12
+ veor @x[3], @x[3], @t[3]
+ vext.8 @t[5], @x[5], @x[5], #12
+ veor @x[4], @x[4], @t[4]
+ vext.8 @t[6], @x[6], @x[6], #12
+ veor @x[5], @x[5], @t[5]
+ vext.8 @t[7], @x[7], @x[7], #12
+ veor @x[6], @x[6], @t[6]
+
+ veor @t[1], @t[1], @x[0]
+ veor @x[7], @x[7], @t[7]
+ vext.8 @x[0], @x[0], @x[0], #8 @ (x0 ^ (x0 <<< 32)) <<< 64)
+ veor @t[2], @t[2], @x[1]
+ veor @t[0], @t[0], @x[7]
+ veor @t[1], @t[1], @x[7]
+ vext.8 @x[1], @x[1], @x[1], #8
+ veor @t[5], @t[5], @x[4]
+ veor @x[0], @x[0], @t[0]
+ veor @t[6], @t[6], @x[5]
+ veor @x[1], @x[1], @t[1]
+ vext.8 @t[0], @x[4], @x[4], #8
+ veor @t[4], @t[4], @x[3]
+ vext.8 @t[1], @x[5], @x[5], #8
+ veor @t[7], @t[7], @x[6]
+ vext.8 @x[4], @x[3], @x[3], #8
+ veor @t[3], @t[3], @x[2]
+ vext.8 @x[5], @x[7], @x[7], #8
+ veor @t[4], @t[4], @x[7]
+ vext.8 @x[3], @x[6], @x[6], #8
+ veor @t[3], @t[3], @x[7]
+ vext.8 @x[6], @x[2], @x[2], #8
+ veor @x[7], @t[1], @t[5]
+___
+$code.=<<___ if (!$inv);
+ veor @x[2], @t[0], @t[4]
+ veor @x[4], @x[4], @t[3]
+ veor @x[5], @x[5], @t[7]
+ veor @x[3], @x[3], @t[6]
+ @ vmov @x[2], @t[0]
+ veor @x[6], @x[6], @t[2]
+ @ vmov @x[7], @t[1]
+___
+$code.=<<___ if ($inv);
+ veor @t[3], @t[3], @x[4]
+ veor @x[5], @x[5], @t[7]
+ veor @x[2], @x[3], @t[6]
+ veor @x[3], @t[0], @t[4]
+ veor @x[4], @x[6], @t[2]
+ vmov @x[6], @t[3]
+ @ vmov @x[7], @t[1]
+___
+}
+
+sub InvMixColumns_orig {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+$code.=<<___;
+ @ multiplication by 0x0e
+ vext.8 @t[7], @x[7], @x[7], #12
+ vmov @t[2], @x[2]
+ veor @x[2], @x[2], @x[5] @ 2 5
+ veor @x[7], @x[7], @x[5] @ 7 5
+ vext.8 @t[0], @x[0], @x[0], #12
+ vmov @t[5], @x[5]
+ veor @x[5], @x[5], @x[0] @ 5 0 [1]
+ veor @x[0], @x[0], @x[1] @ 0 1
+ vext.8 @t[1], @x[1], @x[1], #12
+ veor @x[1], @x[1], @x[2] @ 1 25
+ veor @x[0], @x[0], @x[6] @ 01 6 [2]
+ vext.8 @t[3], @x[3], @x[3], #12
+ veor @x[1], @x[1], @x[3] @ 125 3 [4]
+ veor @x[2], @x[2], @x[0] @ 25 016 [3]
+ veor @x[3], @x[3], @x[7] @ 3 75
+ veor @x[7], @x[7], @x[6] @ 75 6 [0]
+ vext.8 @t[6], @x[6], @x[6], #12
+ vmov @t[4], @x[4]
+ veor @x[6], @x[6], @x[4] @ 6 4
+ veor @x[4], @x[4], @x[3] @ 4 375 [6]
+ veor @x[3], @x[3], @x[7] @ 375 756=36
+ veor @x[6], @x[6], @t[5] @ 64 5 [7]
+ veor @x[3], @x[3], @t[2] @ 36 2
+ vext.8 @t[5], @t[5], @t[5], #12
+ veor @x[3], @x[3], @t[4] @ 362 4 [5]
+___
+ my @y = @x[7,5,0,2,1,3,4,6];
+$code.=<<___;
+ @ multiplication by 0x0b
+ veor @y[1], @y[1], @y[0]
+ veor @y[0], @y[0], @t[0]
+ vext.8 @t[2], @t[2], @t[2], #12
+ veor @y[1], @y[1], @t[1]
+ veor @y[0], @y[0], @t[5]
+ vext.8 @t[4], @t[4], @t[4], #12
+ veor @y[1], @y[1], @t[6]
+ veor @y[0], @y[0], @t[7]
+ veor @t[7], @t[7], @t[6] @ clobber t[7]
+
+ veor @y[3], @y[3], @t[0]
+ veor @y[1], @y[1], @y[0]
+ vext.8 @t[0], @t[0], @t[0], #12
+ veor @y[2], @y[2], @t[1]
+ veor @y[4], @y[4], @t[1]
+ vext.8 @t[1], @t[1], @t[1], #12
+ veor @y[2], @y[2], @t[2]
+ veor @y[3], @y[3], @t[2]
+ veor @y[5], @y[5], @t[2]
+ veor @y[2], @y[2], @t[7]
+ vext.8 @t[2], @t[2], @t[2], #12
+ veor @y[3], @y[3], @t[3]
+ veor @y[6], @y[6], @t[3]
+ veor @y[4], @y[4], @t[3]
+ veor @y[7], @y[7], @t[4]
+ vext.8 @t[3], @t[3], @t[3], #12
+ veor @y[5], @y[5], @t[4]
+ veor @y[7], @y[7], @t[7]
+ veor @t[7], @t[7], @t[5] @ clobber t[7] even more
+ veor @y[3], @y[3], @t[5]
+ veor @y[4], @y[4], @t[4]
+
+ veor @y[5], @y[5], @t[7]
+ vext.8 @t[4], @t[4], @t[4], #12
+ veor @y[6], @y[6], @t[7]
+ veor @y[4], @y[4], @t[7]
+
+ veor @t[7], @t[7], @t[5]
+ vext.8 @t[5], @t[5], @t[5], #12
+
+ @ multiplication by 0x0d
+ veor @y[4], @y[4], @y[7]
+ veor @t[7], @t[7], @t[6] @ restore t[7]
+ veor @y[7], @y[7], @t[4]
+ vext.8 @t[6], @t[6], @t[6], #12
+ veor @y[2], @y[2], @t[0]
+ veor @y[7], @y[7], @t[5]
+ vext.8 @t[7], @t[7], @t[7], #12
+ veor @y[2], @y[2], @t[2]
+
+ veor @y[3], @y[3], @y[1]
+ veor @y[1], @y[1], @t[1]
+ veor @y[0], @y[0], @t[0]
+ veor @y[3], @y[3], @t[0]
+ veor @y[1], @y[1], @t[5]
+ veor @y[0], @y[0], @t[5]
+ vext.8 @t[0], @t[0], @t[0], #12
+ veor @y[1], @y[1], @t[7]
+ veor @y[0], @y[0], @t[6]
+ veor @y[3], @y[3], @y[1]
+ veor @y[4], @y[4], @t[1]
+ vext.8 @t[1], @t[1], @t[1], #12
+
+ veor @y[7], @y[7], @t[7]
+ veor @y[4], @y[4], @t[2]
+ veor @y[5], @y[5], @t[2]
+ veor @y[2], @y[2], @t[6]
+ veor @t[6], @t[6], @t[3] @ clobber t[6]
+ vext.8 @t[2], @t[2], @t[2], #12
+ veor @y[4], @y[4], @y[7]
+ veor @y[3], @y[3], @t[6]
+
+ veor @y[6], @y[6], @t[6]
+ veor @y[5], @y[5], @t[5]
+ vext.8 @t[5], @t[5], @t[5], #12
+ veor @y[6], @y[6], @t[4]
+ vext.8 @t[4], @t[4], @t[4], #12
+ veor @y[5], @y[5], @t[6]
+ veor @y[6], @y[6], @t[7]
+ vext.8 @t[7], @t[7], @t[7], #12
+ veor @t[6], @t[6], @t[3] @ restore t[6]
+ vext.8 @t[3], @t[3], @t[3], #12
+
+ @ multiplication by 0x09
+ veor @y[4], @y[4], @y[1]
+ veor @t[1], @t[1], @y[1] @ t[1]=y[1]
+ veor @t[0], @t[0], @t[5] @ clobber t[0]
+ vext.8 @t[6], @t[6], @t[6], #12
+ veor @t[1], @t[1], @t[5]
+ veor @y[3], @y[3], @t[0]
+ veor @t[0], @t[0], @y[0] @ t[0]=y[0]
+ veor @t[1], @t[1], @t[6]
+ veor @t[6], @t[6], @t[7] @ clobber t[6]
+ veor @y[4], @y[4], @t[1]
+ veor @y[7], @y[7], @t[4]
+ veor @y[6], @y[6], @t[3]
+ veor @y[5], @y[5], @t[2]
+ veor @t[4], @t[4], @y[4] @ t[4]=y[4]
+ veor @t[3], @t[3], @y[3] @ t[3]=y[3]
+ veor @t[5], @t[5], @y[5] @ t[5]=y[5]
+ veor @t[2], @t[2], @y[2] @ t[2]=y[2]
+ veor @t[3], @t[3], @t[7]
+ veor @XMM[5], @t[5], @t[6]
+ veor @XMM[6], @t[6], @y[6] @ t[6]=y[6]
+ veor @XMM[2], @t[2], @t[6]
+ veor @XMM[7], @t[7], @y[7] @ t[7]=y[7]
+
+ vmov @XMM[0], @t[0]
+ vmov @XMM[1], @t[1]
+ @ vmov @XMM[2], @t[2]
+ vmov @XMM[3], @t[3]
+ vmov @XMM[4], @t[4]
+ @ vmov @XMM[5], @t[5]
+ @ vmov @XMM[6], @t[6]
+ @ vmov @XMM[7], @t[7]
+___
+}
+
+sub InvMixColumns {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+# Thanks to Jussi Kivilinna for providing pointer to
+#
+# | 0e 0b 0d 09 | | 02 03 01 01 | | 05 00 04 00 |
+# | 09 0e 0b 0d | = | 01 02 03 01 | x | 00 05 00 04 |
+# | 0d 09 0e 0b | | 01 01 02 03 | | 04 00 05 00 |
+# | 0b 0d 09 0e | | 03 01 01 02 | | 00 04 00 05 |
+
+$code.=<<___;
+ @ multiplication by 0x05-0x00-0x04-0x00
+ vext.8 @t[0], @x[0], @x[0], #8
+ vext.8 @t[6], @x[6], @x[6], #8
+ vext.8 @t[7], @x[7], @x[7], #8
+ veor @t[0], @t[0], @x[0]
+ vext.8 @t[1], @x[1], @x[1], #8
+ veor @t[6], @t[6], @x[6]
+ vext.8 @t[2], @x[2], @x[2], #8
+ veor @t[7], @t[7], @x[7]
+ vext.8 @t[3], @x[3], @x[3], #8
+ veor @t[1], @t[1], @x[1]
+ vext.8 @t[4], @x[4], @x[4], #8
+ veor @t[2], @t[2], @x[2]
+ vext.8 @t[5], @x[5], @x[5], #8
+ veor @t[3], @t[3], @x[3]
+ veor @t[4], @t[4], @x[4]
+ veor @t[5], @t[5], @x[5]
+
+ veor @x[0], @x[0], @t[6]
+ veor @x[1], @x[1], @t[6]
+ veor @x[2], @x[2], @t[0]
+ veor @x[4], @x[4], @t[2]
+ veor @x[3], @x[3], @t[1]
+ veor @x[1], @x[1], @t[7]
+ veor @x[2], @x[2], @t[7]
+ veor @x[4], @x[4], @t[6]
+ veor @x[5], @x[5], @t[3]
+ veor @x[3], @x[3], @t[6]
+ veor @x[6], @x[6], @t[4]
+ veor @x[4], @x[4], @t[7]
+ veor @x[5], @x[5], @t[7]
+ veor @x[7], @x[7], @t[5]
+___
+ &MixColumns (@x,@t,1); # flipped 2<->3 and 4<->6
+}
+
+sub swapmove {
+my ($a,$b,$n,$mask,$t)=@_;
+$code.=<<___;
+ vshr.u64 $t, $b, #$n
+ veor $t, $t, $a
+ vand $t, $t, $mask
+ veor $a, $a, $t
+ vshl.u64 $t, $t, #$n
+ veor $b, $b, $t
+___
+}
+sub swapmove2x {
+my ($a0,$b0,$a1,$b1,$n,$mask,$t0,$t1)=@_;
+$code.=<<___;
+ vshr.u64 $t0, $b0, #$n
+ vshr.u64 $t1, $b1, #$n
+ veor $t0, $t0, $a0
+ veor $t1, $t1, $a1
+ vand $t0, $t0, $mask
+ vand $t1, $t1, $mask
+ veor $a0, $a0, $t0
+ vshl.u64 $t0, $t0, #$n
+ veor $a1, $a1, $t1
+ vshl.u64 $t1, $t1, #$n
+ veor $b0, $b0, $t0
+ veor $b1, $b1, $t1
+___
+}
+
+sub bitslice {
+my @x=reverse(@_[0..7]);
+my ($t0,$t1,$t2,$t3)=@_[8..11];
+$code.=<<___;
+ vmov.i8 $t0,#0x55 @ compose .LBS0
+ vmov.i8 $t1,#0x33 @ compose .LBS1
+___
+ &swapmove2x(@x[0,1,2,3],1,$t0,$t2,$t3);
+ &swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+$code.=<<___;
+ vmov.i8 $t0,#0x0f @ compose .LBS2
+___
+ &swapmove2x(@x[0,2,1,3],2,$t1,$t2,$t3);
+ &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+
+ &swapmove2x(@x[0,4,1,5],4,$t0,$t2,$t3);
+ &swapmove2x(@x[2,6,3,7],4,$t0,$t2,$t3);
+}
+
+$code.=<<___;
+#ifndef __KERNEL__
+# include "arm_arch.h"
+
+# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
+# define VFP_ABI_POP vldmia sp!,{d8-d15}
+# define VFP_ABI_FRAME 0x40
+#else
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+# define VFP_ABI_FRAME 0
+# define BSAES_ASM_EXTENDED_KEY
+# define XTS_CHAIN_TWEAK
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+#endif
+
+#ifdef __thumb__
+# define adrl adr
+#endif
+
+#if __ARM_ARCH__>=7
+.text
+.syntax unified @ ARMv7-capable assembler is expected to handle this
+#ifdef __thumb2__
+.thumb
+#else
+.code 32
+#endif
+
+.fpu neon
+
+.type _bsaes_decrypt8,%function
+.align 4
+_bsaes_decrypt8:
+ adr $const,_bsaes_decrypt8
+ vldmia $key!, {@XMM[9]} @ round 0 key
+ add $const,$const,#.LM0ISR-_bsaes_decrypt8
+
+ vldmia $const!, {@XMM[8]} @ .LM0ISR
+ veor @XMM[10], @XMM[0], @XMM[9] @ xor with round0 key
+ veor @XMM[11], @XMM[1], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+ veor @XMM[12], @XMM[2], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+ veor @XMM[13], @XMM[3], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
+ veor @XMM[14], @XMM[4], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
+ veor @XMM[15], @XMM[5], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
+ veor @XMM[10], @XMM[6], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
+ veor @XMM[11], @XMM[7], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+ vtbl.8 `&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+___
+ &bitslice (@XMM[0..7, 8..11]);
+$code.=<<___;
+ sub $rounds,$rounds,#1
+ b .Ldec_sbox
+.align 4
+.Ldec_loop:
+___
+ &ShiftRows (@XMM[0..7, 8..12]);
+$code.=".Ldec_sbox:\n";
+ &InvSbox (@XMM[0..7, 8..15]);
+$code.=<<___;
+ subs $rounds,$rounds,#1
+ bcc .Ldec_done
+___
+ &InvMixColumns (@XMM[0,1,6,4,2,7,3,5, 8..15]);
+$code.=<<___;
+ vldmia $const, {@XMM[12]} @ .LISR
+ ite eq @ Thumb2 thing, sanity check in ARM
+ addeq $const,$const,#0x10
+ bne .Ldec_loop
+ vldmia $const, {@XMM[12]} @ .LISRM0
+ b .Ldec_loop
+.align 4
+.Ldec_done:
+___
+ &bitslice (@XMM[0,1,6,4,2,7,3,5, 8..11]);
+$code.=<<___;
+ vldmia $key, {@XMM[8]} @ last round key
+ veor @XMM[6], @XMM[6], @XMM[8]
+ veor @XMM[4], @XMM[4], @XMM[8]
+ veor @XMM[2], @XMM[2], @XMM[8]
+ veor @XMM[7], @XMM[7], @XMM[8]
+ veor @XMM[3], @XMM[3], @XMM[8]
+ veor @XMM[5], @XMM[5], @XMM[8]
+ veor @XMM[0], @XMM[0], @XMM[8]
+ veor @XMM[1], @XMM[1], @XMM[8]
+ bx lr
+.size _bsaes_decrypt8,.-_bsaes_decrypt8
+
+.type _bsaes_const,%object
+.align 6
+_bsaes_const:
+.LM0ISR: @ InvShiftRows constants
+ .quad 0x0a0e0206070b0f03, 0x0004080c0d010509
+.LISR:
+ .quad 0x0504070602010003, 0x0f0e0d0c080b0a09
+.LISRM0:
+ .quad 0x01040b0e0205080f, 0x0306090c00070a0d
+.LM0SR: @ ShiftRows constants
+ .quad 0x0a0e02060f03070b, 0x0004080c05090d01
+.LSR:
+ .quad 0x0504070600030201, 0x0f0e0d0c0a09080b
+.LSRM0:
+ .quad 0x0304090e00050a0f, 0x01060b0c0207080d
+.LM0:
+ .quad 0x02060a0e03070b0f, 0x0004080c0105090d
+.LREVM0SR:
+ .quad 0x090d01050c000408, 0x03070b0f060a0e02
+.asciz "Bit-sliced AES for NEON, CRYPTOGAMS by <appro\@openssl.org>"
+.align 6
+.size _bsaes_const,.-_bsaes_const
+
+.type _bsaes_encrypt8,%function
+.align 4
+_bsaes_encrypt8:
+ adr $const,_bsaes_encrypt8
+ vldmia $key!, {@XMM[9]} @ round 0 key
+ sub $const,$const,#_bsaes_encrypt8-.LM0SR
+
+ vldmia $const!, {@XMM[8]} @ .LM0SR
+_bsaes_encrypt8_alt:
+ veor @XMM[10], @XMM[0], @XMM[9] @ xor with round0 key
+ veor @XMM[11], @XMM[1], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+ veor @XMM[12], @XMM[2], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+ veor @XMM[13], @XMM[3], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
+ veor @XMM[14], @XMM[4], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
+ veor @XMM[15], @XMM[5], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
+ veor @XMM[10], @XMM[6], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
+ veor @XMM[11], @XMM[7], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+ vtbl.8 `&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+_bsaes_encrypt8_bitslice:
+___
+ &bitslice (@XMM[0..7, 8..11]);
+$code.=<<___;
+ sub $rounds,$rounds,#1
+ b .Lenc_sbox
+.align 4
+.Lenc_loop:
+___
+ &ShiftRows (@XMM[0..7, 8..12]);
+$code.=".Lenc_sbox:\n";
+ &Sbox (@XMM[0..7, 8..15]);
+$code.=<<___;
+ subs $rounds,$rounds,#1
+ bcc .Lenc_done
+___
+ &MixColumns (@XMM[0,1,4,6,3,7,2,5, 8..15]);
+$code.=<<___;
+ vldmia $const, {@XMM[12]} @ .LSR
+ ite eq @ Thumb2 thing, samity check in ARM
+ addeq $const,$const,#0x10
+ bne .Lenc_loop
+ vldmia $const, {@XMM[12]} @ .LSRM0
+ b .Lenc_loop
+.align 4
+.Lenc_done:
+___
+ # output in lsb > [t0, t1, t4, t6, t3, t7, t2, t5] < msb
+ &bitslice (@XMM[0,1,4,6,3,7,2,5, 8..11]);
+$code.=<<___;
+ vldmia $key, {@XMM[8]} @ last round key
+ veor @XMM[4], @XMM[4], @XMM[8]
+ veor @XMM[6], @XMM[6], @XMM[8]
+ veor @XMM[3], @XMM[3], @XMM[8]
+ veor @XMM[7], @XMM[7], @XMM[8]
+ veor @XMM[2], @XMM[2], @XMM[8]
+ veor @XMM[5], @XMM[5], @XMM[8]
+ veor @XMM[0], @XMM[0], @XMM[8]
+ veor @XMM[1], @XMM[1], @XMM[8]
+ bx lr
+.size _bsaes_encrypt8,.-_bsaes_encrypt8
+___
+}
+{
+my ($out,$inp,$rounds,$const)=("r12","r4","r5","r6");
+
+sub bitslice_key {
+my @x=reverse(@_[0..7]);
+my ($bs0,$bs1,$bs2,$t2,$t3)=@_[8..12];
+
+ &swapmove (@x[0,1],1,$bs0,$t2,$t3);
+$code.=<<___;
+ @ &swapmove(@x[2,3],1,$t0,$t2,$t3);
+ vmov @x[2], @x[0]
+ vmov @x[3], @x[1]
+___
+ #&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+
+ &swapmove2x (@x[0,2,1,3],2,$bs1,$t2,$t3);
+$code.=<<___;
+ @ &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+ vmov @x[4], @x[0]
+ vmov @x[6], @x[2]
+ vmov @x[5], @x[1]
+ vmov @x[7], @x[3]
+___
+ &swapmove2x (@x[0,4,1,5],4,$bs2,$t2,$t3);
+ &swapmove2x (@x[2,6,3,7],4,$bs2,$t2,$t3);
+}
+
+$code.=<<___;
+.type _bsaes_key_convert,%function
+.align 4
+_bsaes_key_convert:
+ adr $const,_bsaes_key_convert
+ vld1.8 {@XMM[7]}, [$inp]! @ load round 0 key
+ sub $const,$const,#_bsaes_key_convert-.LM0
+ vld1.8 {@XMM[15]}, [$inp]! @ load round 1 key
+
+ vmov.i8 @XMM[8], #0x01 @ bit masks
+ vmov.i8 @XMM[9], #0x02
+ vmov.i8 @XMM[10], #0x04
+ vmov.i8 @XMM[11], #0x08
+ vmov.i8 @XMM[12], #0x10
+ vmov.i8 @XMM[13], #0x20
+ vldmia $const, {@XMM[14]} @ .LM0
+
+#ifdef __ARMEL__
+ vrev32.8 @XMM[7], @XMM[7]
+ vrev32.8 @XMM[15], @XMM[15]
+#endif
+ sub $rounds,$rounds,#1
+ vstmia $out!, {@XMM[7]} @ save round 0 key
+ b .Lkey_loop
+
+.align 4
+.Lkey_loop:
+ vtbl.8 `&Dlo(@XMM[7])`,{@XMM[15]},`&Dlo(@XMM[14])`
+ vtbl.8 `&Dhi(@XMM[7])`,{@XMM[15]},`&Dhi(@XMM[14])`
+ vmov.i8 @XMM[6], #0x40
+ vmov.i8 @XMM[15], #0x80
+
+ vtst.8 @XMM[0], @XMM[7], @XMM[8]
+ vtst.8 @XMM[1], @XMM[7], @XMM[9]
+ vtst.8 @XMM[2], @XMM[7], @XMM[10]
+ vtst.8 @XMM[3], @XMM[7], @XMM[11]
+ vtst.8 @XMM[4], @XMM[7], @XMM[12]
+ vtst.8 @XMM[5], @XMM[7], @XMM[13]
+ vtst.8 @XMM[6], @XMM[7], @XMM[6]
+ vtst.8 @XMM[7], @XMM[7], @XMM[15]
+ vld1.8 {@XMM[15]}, [$inp]! @ load next round key
+ vmvn @XMM[0], @XMM[0] @ "pnot"
+ vmvn @XMM[1], @XMM[1]
+ vmvn @XMM[5], @XMM[5]
+ vmvn @XMM[6], @XMM[6]
+#ifdef __ARMEL__
+ vrev32.8 @XMM[15], @XMM[15]
+#endif
+ subs $rounds,$rounds,#1
+ vstmia $out!,{@XMM[0]-@XMM[7]} @ write bit-sliced round key
+ bne .Lkey_loop
+
+ vmov.i8 @XMM[7],#0x63 @ compose .L63
+ @ don't save last round key
+ bx lr
+.size _bsaes_key_convert,.-_bsaes_key_convert
+___
+}
+
+if (0) { # following four functions are unsupported interface
+ # used for benchmarking...
+$code.=<<___;
+.globl bsaes_enc_key_convert
+.type bsaes_enc_key_convert,%function
+.align 4
+bsaes_enc_key_convert:
+ stmdb sp!,{r4-r6,lr}
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+
+ ldr r5,[$inp,#240] @ pass rounds
+ mov r4,$inp @ pass key
+ mov r12,$out @ pass key schedule
+ bl _bsaes_key_convert
+ veor @XMM[7],@XMM[7],@XMM[15] @ fix up last round key
+ vstmia r12, {@XMM[7]} @ save last round key
+
+ vldmia sp!,{d8-d15}
+ ldmia sp!,{r4-r6,pc}
+.size bsaes_enc_key_convert,.-bsaes_enc_key_convert
+
+.globl bsaes_encrypt_128
+.type bsaes_encrypt_128,%function
+.align 4
+bsaes_encrypt_128:
+ stmdb sp!,{r4-r6,lr}
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+.Lenc128_loop:
+ vld1.8 {@XMM[0]-@XMM[1]}, [$inp]! @ load input
+ vld1.8 {@XMM[2]-@XMM[3]}, [$inp]!
+ mov r4,$key @ pass the key
+ vld1.8 {@XMM[4]-@XMM[5]}, [$inp]!
+ mov r5,#10 @ pass rounds
+ vld1.8 {@XMM[6]-@XMM[7]}, [$inp]!
+
+ bl _bsaes_encrypt8
+
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ vst1.8 {@XMM[4]}, [$out]!
+ vst1.8 {@XMM[6]}, [$out]!
+ vst1.8 {@XMM[3]}, [$out]!
+ vst1.8 {@XMM[7]}, [$out]!
+ vst1.8 {@XMM[2]}, [$out]!
+ subs $len,$len,#0x80
+ vst1.8 {@XMM[5]}, [$out]!
+ bhi .Lenc128_loop
+
+ vldmia sp!,{d8-d15}
+ ldmia sp!,{r4-r6,pc}
+.size bsaes_encrypt_128,.-bsaes_encrypt_128
+
+.globl bsaes_dec_key_convert
+.type bsaes_dec_key_convert,%function
+.align 4
+bsaes_dec_key_convert:
+ stmdb sp!,{r4-r6,lr}
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+
+ ldr r5,[$inp,#240] @ pass rounds
+ mov r4,$inp @ pass key
+ mov r12,$out @ pass key schedule
+ bl _bsaes_key_convert
+ vldmia $out, {@XMM[6]}
+ vstmia r12, {@XMM[15]} @ save last round key
+ veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
+ vstmia $out, {@XMM[7]}
+
+ vldmia sp!,{d8-d15}
+ ldmia sp!,{r4-r6,pc}
+.size bsaes_dec_key_convert,.-bsaes_dec_key_convert
+
+.globl bsaes_decrypt_128
+.type bsaes_decrypt_128,%function
+.align 4
+bsaes_decrypt_128:
+ stmdb sp!,{r4-r6,lr}
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+.Ldec128_loop:
+ vld1.8 {@XMM[0]-@XMM[1]}, [$inp]! @ load input
+ vld1.8 {@XMM[2]-@XMM[3]}, [$inp]!
+ mov r4,$key @ pass the key
+ vld1.8 {@XMM[4]-@XMM[5]}, [$inp]!
+ mov r5,#10 @ pass rounds
+ vld1.8 {@XMM[6]-@XMM[7]}, [$inp]!
+
+ bl _bsaes_decrypt8
+
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ vst1.8 {@XMM[6]}, [$out]!
+ vst1.8 {@XMM[4]}, [$out]!
+ vst1.8 {@XMM[2]}, [$out]!
+ vst1.8 {@XMM[7]}, [$out]!
+ vst1.8 {@XMM[3]}, [$out]!
+ subs $len,$len,#0x80
+ vst1.8 {@XMM[5]}, [$out]!
+ bhi .Ldec128_loop
+
+ vldmia sp!,{d8-d15}
+ ldmia sp!,{r4-r6,pc}
+.size bsaes_decrypt_128,.-bsaes_decrypt_128
+___
+}
+{
+my ($inp,$out,$len,$key, $ivp,$fp,$rounds)=map("r$_",(0..3,8..10));
+my ($keysched)=("sp");
+
+$code.=<<___;
+.extern AES_cbc_encrypt
+.extern AES_decrypt
+
+.global bsaes_cbc_encrypt
+.type bsaes_cbc_encrypt,%function
+.align 5
+bsaes_cbc_encrypt:
+#ifndef __KERNEL__
+ cmp $len, #128
+#ifndef __thumb__
+ blo AES_cbc_encrypt
+#else
+ bhs 1f
+ b AES_cbc_encrypt
+1:
+#endif
+#endif
+
+ @ it is up to the caller to make sure we are called with enc == 0
+
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr}
+ VFP_ABI_PUSH
+ ldr $ivp, [ip] @ IV is 1st arg on the stack
+ mov $len, $len, lsr#4 @ len in 16 byte blocks
+ sub sp, #0x10 @ scratch space to carry over the IV
+ mov $fp, sp @ save sp
+
+ ldr $rounds, [$key, #240] @ get # of rounds
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key
+ add r12, #`128-32` @ sifze of bit-slices key schedule
+
+ @ populate the key schedule
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ mov sp, r12 @ sp is $keysched
+ bl _bsaes_key_convert
+ vldmia $keysched, {@XMM[6]}
+ vstmia r12, {@XMM[15]} @ save last round key
+ veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
+ vstmia $keysched, {@XMM[7]}
+#else
+ ldr r12, [$key, #244]
+ eors r12, #1
+ beq 0f
+
+ @ populate the key schedule
+ str r12, [$key, #244]
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ add r12, $key, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ add r4, $key, #248
+ vldmia r4, {@XMM[6]}
+ vstmia r12, {@XMM[15]} @ save last round key
+ veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
+ vstmia r4, {@XMM[7]}
+
+.align 2
+0:
+#endif
+
+ vld1.8 {@XMM[15]}, [$ivp] @ load IV
+ b .Lcbc_dec_loop
+
+.align 4
+.Lcbc_dec_loop:
+ subs $len, $len, #0x8
+ bmi .Lcbc_dec_loop_finish
+
+ vld1.8 {@XMM[0]-@XMM[1]}, [$inp]! @ load input
+ vld1.8 {@XMM[2]-@XMM[3]}, [$inp]!
+#ifndef BSAES_ASM_EXTENDED_KEY
+ mov r4, $keysched @ pass the key
+#else
+ add r4, $key, #248
+#endif
+ vld1.8 {@XMM[4]-@XMM[5]}, [$inp]!
+ mov r5, $rounds
+ vld1.8 {@XMM[6]-@XMM[7]}, [$inp]
+ sub $inp, $inp, #0x60
+ vstmia $fp, {@XMM[15]} @ put aside IV
+
+ bl _bsaes_decrypt8
+
+ vldmia $fp, {@XMM[14]} @ reload IV
+ vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
+ veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
+ vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
+ veor @XMM[1], @XMM[1], @XMM[8]
+ veor @XMM[6], @XMM[6], @XMM[9]
+ vld1.8 {@XMM[12]-@XMM[13]}, [$inp]!
+ veor @XMM[4], @XMM[4], @XMM[10]
+ veor @XMM[2], @XMM[2], @XMM[11]
+ vld1.8 {@XMM[14]-@XMM[15]}, [$inp]!
+ veor @XMM[7], @XMM[7], @XMM[12]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ veor @XMM[3], @XMM[3], @XMM[13]
+ vst1.8 {@XMM[6]}, [$out]!
+ veor @XMM[5], @XMM[5], @XMM[14]
+ vst1.8 {@XMM[4]}, [$out]!
+ vst1.8 {@XMM[2]}, [$out]!
+ vst1.8 {@XMM[7]}, [$out]!
+ vst1.8 {@XMM[3]}, [$out]!
+ vst1.8 {@XMM[5]}, [$out]!
+
+ b .Lcbc_dec_loop
+
+.Lcbc_dec_loop_finish:
+ adds $len, $len, #8
+ beq .Lcbc_dec_done
+
+ vld1.8 {@XMM[0]}, [$inp]! @ load input
+ cmp $len, #2
+ blo .Lcbc_dec_one
+ vld1.8 {@XMM[1]}, [$inp]!
+#ifndef BSAES_ASM_EXTENDED_KEY
+ mov r4, $keysched @ pass the key
+#else
+ add r4, $key, #248
+#endif
+ mov r5, $rounds
+ vstmia $fp, {@XMM[15]} @ put aside IV
+ beq .Lcbc_dec_two
+ vld1.8 {@XMM[2]}, [$inp]!
+ cmp $len, #4
+ blo .Lcbc_dec_three
+ vld1.8 {@XMM[3]}, [$inp]!
+ beq .Lcbc_dec_four
+ vld1.8 {@XMM[4]}, [$inp]!
+ cmp $len, #6
+ blo .Lcbc_dec_five
+ vld1.8 {@XMM[5]}, [$inp]!
+ beq .Lcbc_dec_six
+ vld1.8 {@XMM[6]}, [$inp]!
+ sub $inp, $inp, #0x70
+
+ bl _bsaes_decrypt8
+
+ vldmia $fp, {@XMM[14]} @ reload IV
+ vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
+ veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
+ vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
+ veor @XMM[1], @XMM[1], @XMM[8]
+ veor @XMM[6], @XMM[6], @XMM[9]
+ vld1.8 {@XMM[12]-@XMM[13]}, [$inp]!
+ veor @XMM[4], @XMM[4], @XMM[10]
+ veor @XMM[2], @XMM[2], @XMM[11]
+ vld1.8 {@XMM[15]}, [$inp]!
+ veor @XMM[7], @XMM[7], @XMM[12]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ veor @XMM[3], @XMM[3], @XMM[13]
+ vst1.8 {@XMM[6]}, [$out]!
+ vst1.8 {@XMM[4]}, [$out]!
+ vst1.8 {@XMM[2]}, [$out]!
+ vst1.8 {@XMM[7]}, [$out]!
+ vst1.8 {@XMM[3]}, [$out]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_six:
+ sub $inp, $inp, #0x60
+ bl _bsaes_decrypt8
+ vldmia $fp,{@XMM[14]} @ reload IV
+ vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
+ veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
+ vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
+ veor @XMM[1], @XMM[1], @XMM[8]
+ veor @XMM[6], @XMM[6], @XMM[9]
+ vld1.8 {@XMM[12]}, [$inp]!
+ veor @XMM[4], @XMM[4], @XMM[10]
+ veor @XMM[2], @XMM[2], @XMM[11]
+ vld1.8 {@XMM[15]}, [$inp]!
+ veor @XMM[7], @XMM[7], @XMM[12]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ vst1.8 {@XMM[6]}, [$out]!
+ vst1.8 {@XMM[4]}, [$out]!
+ vst1.8 {@XMM[2]}, [$out]!
+ vst1.8 {@XMM[7]}, [$out]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_five:
+ sub $inp, $inp, #0x50
+ bl _bsaes_decrypt8
+ vldmia $fp, {@XMM[14]} @ reload IV
+ vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
+ veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
+ vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
+ veor @XMM[1], @XMM[1], @XMM[8]
+ veor @XMM[6], @XMM[6], @XMM[9]
+ vld1.8 {@XMM[15]}, [$inp]!
+ veor @XMM[4], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ veor @XMM[2], @XMM[2], @XMM[11]
+ vst1.8 {@XMM[6]}, [$out]!
+ vst1.8 {@XMM[4]}, [$out]!
+ vst1.8 {@XMM[2]}, [$out]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_four:
+ sub $inp, $inp, #0x40
+ bl _bsaes_decrypt8
+ vldmia $fp, {@XMM[14]} @ reload IV
+ vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
+ veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
+ vld1.8 {@XMM[10]}, [$inp]!
+ veor @XMM[1], @XMM[1], @XMM[8]
+ veor @XMM[6], @XMM[6], @XMM[9]
+ vld1.8 {@XMM[15]}, [$inp]!
+ veor @XMM[4], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ vst1.8 {@XMM[6]}, [$out]!
+ vst1.8 {@XMM[4]}, [$out]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_three:
+ sub $inp, $inp, #0x30
+ bl _bsaes_decrypt8
+ vldmia $fp, {@XMM[14]} @ reload IV
+ vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
+ veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
+ vld1.8 {@XMM[15]}, [$inp]!
+ veor @XMM[1], @XMM[1], @XMM[8]
+ veor @XMM[6], @XMM[6], @XMM[9]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ vst1.8 {@XMM[6]}, [$out]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_two:
+ sub $inp, $inp, #0x20
+ bl _bsaes_decrypt8
+ vldmia $fp, {@XMM[14]} @ reload IV
+ vld1.8 {@XMM[8]}, [$inp]! @ reload input
+ veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
+ vld1.8 {@XMM[15]}, [$inp]! @ reload input
+ veor @XMM[1], @XMM[1], @XMM[8]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_one:
+ sub $inp, $inp, #0x10
+ mov $rounds, $out @ save original out pointer
+ mov $out, $fp @ use the iv scratch space as out buffer
+ mov r2, $key
+ vmov @XMM[4],@XMM[15] @ just in case ensure that IV
+ vmov @XMM[5],@XMM[0] @ and input are preserved
+ bl AES_decrypt
+ vld1.8 {@XMM[0]}, [$fp,:64] @ load result
+ veor @XMM[0], @XMM[0], @XMM[4] @ ^= IV
+ vmov @XMM[15], @XMM[5] @ @XMM[5] holds input
+ vst1.8 {@XMM[0]}, [$rounds] @ write output
+
+.Lcbc_dec_done:
+#ifndef BSAES_ASM_EXTENDED_KEY
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+.Lcbc_dec_bzero: @ wipe key schedule [if any]
+ vstmia $keysched!, {q0-q1}
+ cmp $keysched, $fp
+ bne .Lcbc_dec_bzero
+#endif
+
+ mov sp, $fp
+ add sp, #0x10 @ add sp,$fp,#0x10 is no good for thumb
+ vst1.8 {@XMM[15]}, [$ivp] @ return IV
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc}
+.size bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
+___
+}
+{
+my ($inp,$out,$len,$key, $ctr,$fp,$rounds)=(map("r$_",(0..3,8..10)));
+my $const = "r6"; # shared with _bsaes_encrypt8_alt
+my $keysched = "sp";
+
+$code.=<<___;
+.extern AES_encrypt
+.global bsaes_ctr32_encrypt_blocks
+.type bsaes_ctr32_encrypt_blocks,%function
+.align 5
+bsaes_ctr32_encrypt_blocks:
+ cmp $len, #8 @ use plain AES for
+ blo .Lctr_enc_short @ small sizes
+
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr}
+ VFP_ABI_PUSH
+ ldr $ctr, [ip] @ ctr is 1st arg on the stack
+ sub sp, sp, #0x10 @ scratch space to carry over the ctr
+ mov $fp, sp @ save sp
+
+ ldr $rounds, [$key, #240] @ get # of rounds
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key
+ add r12, #`128-32` @ size of bit-sliced key schedule
+
+ @ populate the key schedule
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ mov sp, r12 @ sp is $keysched
+ bl _bsaes_key_convert
+ veor @XMM[7],@XMM[7],@XMM[15] @ fix up last round key
+ vstmia r12, {@XMM[7]} @ save last round key
+
+ vld1.8 {@XMM[0]}, [$ctr] @ load counter
+ add $ctr, $const, #.LREVM0SR-.LM0 @ borrow $ctr
+ vldmia $keysched, {@XMM[4]} @ load round0 key
+#else
+ ldr r12, [$key, #244]
+ eors r12, #1
+ beq 0f
+
+ @ populate the key schedule
+ str r12, [$key, #244]
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ add r12, $key, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ veor @XMM[7],@XMM[7],@XMM[15] @ fix up last round key
+ vstmia r12, {@XMM[7]} @ save last round key
+
+.align 2
+0: add r12, $key, #248
+ vld1.8 {@XMM[0]}, [$ctr] @ load counter
+ adrl $ctr, .LREVM0SR @ borrow $ctr
+ vldmia r12, {@XMM[4]} @ load round0 key
+ sub sp, #0x10 @ place for adjusted round0 key
+#endif
+
+ vmov.i32 @XMM[8],#1 @ compose 1<<96
+ veor @XMM[9],@XMM[9],@XMM[9]
+ vrev32.8 @XMM[0],@XMM[0]
+ vext.8 @XMM[8],@XMM[9],@XMM[8],#4
+ vrev32.8 @XMM[4],@XMM[4]
+ vadd.u32 @XMM[9],@XMM[8],@XMM[8] @ compose 2<<96
+ vstmia $keysched, {@XMM[4]} @ save adjusted round0 key
+ b .Lctr_enc_loop
+
+.align 4
+.Lctr_enc_loop:
+ vadd.u32 @XMM[10], @XMM[8], @XMM[9] @ compose 3<<96
+ vadd.u32 @XMM[1], @XMM[0], @XMM[8] @ +1
+ vadd.u32 @XMM[2], @XMM[0], @XMM[9] @ +2
+ vadd.u32 @XMM[3], @XMM[0], @XMM[10] @ +3
+ vadd.u32 @XMM[4], @XMM[1], @XMM[10]
+ vadd.u32 @XMM[5], @XMM[2], @XMM[10]
+ vadd.u32 @XMM[6], @XMM[3], @XMM[10]
+ vadd.u32 @XMM[7], @XMM[4], @XMM[10]
+ vadd.u32 @XMM[10], @XMM[5], @XMM[10] @ next counter
+
+ @ Borrow prologue from _bsaes_encrypt8 to use the opportunity
+ @ to flip byte order in 32-bit counter
+
+ vldmia $keysched, {@XMM[9]} @ load round0 key
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, $keysched, #0x10 @ pass next round key
+#else
+ add r4, $key, #`248+16`
+#endif
+ vldmia $ctr, {@XMM[8]} @ .LREVM0SR
+ mov r5, $rounds @ pass rounds
+ vstmia $fp, {@XMM[10]} @ save next counter
+ sub $const, $ctr, #.LREVM0SR-.LSR @ pass constants
+
+ bl _bsaes_encrypt8_alt
+
+ subs $len, $len, #8
+ blo .Lctr_enc_loop_done
+
+ vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ load input
+ vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
+ veor @XMM[0], @XMM[8]
+ veor @XMM[1], @XMM[9]
+ vld1.8 {@XMM[12]-@XMM[13]}, [$inp]!
+ veor @XMM[4], @XMM[10]
+ veor @XMM[6], @XMM[11]
+ vld1.8 {@XMM[14]-@XMM[15]}, [$inp]!
+ veor @XMM[3], @XMM[12]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ veor @XMM[7], @XMM[13]
+ veor @XMM[2], @XMM[14]
+ vst1.8 {@XMM[4]}, [$out]!
+ veor @XMM[5], @XMM[15]
+ vst1.8 {@XMM[6]}, [$out]!
+ vmov.i32 @XMM[8], #1 @ compose 1<<96
+ vst1.8 {@XMM[3]}, [$out]!
+ veor @XMM[9], @XMM[9], @XMM[9]
+ vst1.8 {@XMM[7]}, [$out]!
+ vext.8 @XMM[8], @XMM[9], @XMM[8], #4
+ vst1.8 {@XMM[2]}, [$out]!
+ vadd.u32 @XMM[9],@XMM[8],@XMM[8] @ compose 2<<96
+ vst1.8 {@XMM[5]}, [$out]!
+ vldmia $fp, {@XMM[0]} @ load counter
+
+ bne .Lctr_enc_loop
+ b .Lctr_enc_done
+
+.align 4
+.Lctr_enc_loop_done:
+ add $len, $len, #8
+ vld1.8 {@XMM[8]}, [$inp]! @ load input
+ veor @XMM[0], @XMM[8]
+ vst1.8 {@XMM[0]}, [$out]! @ write output
+ cmp $len, #2
+ blo .Lctr_enc_done
+ vld1.8 {@XMM[9]}, [$inp]!
+ veor @XMM[1], @XMM[9]
+ vst1.8 {@XMM[1]}, [$out]!
+ beq .Lctr_enc_done
+ vld1.8 {@XMM[10]}, [$inp]!
+ veor @XMM[4], @XMM[10]
+ vst1.8 {@XMM[4]}, [$out]!
+ cmp $len, #4
+ blo .Lctr_enc_done
+ vld1.8 {@XMM[11]}, [$inp]!
+ veor @XMM[6], @XMM[11]
+ vst1.8 {@XMM[6]}, [$out]!
+ beq .Lctr_enc_done
+ vld1.8 {@XMM[12]}, [$inp]!
+ veor @XMM[3], @XMM[12]
+ vst1.8 {@XMM[3]}, [$out]!
+ cmp $len, #6
+ blo .Lctr_enc_done
+ vld1.8 {@XMM[13]}, [$inp]!
+ veor @XMM[7], @XMM[13]
+ vst1.8 {@XMM[7]}, [$out]!
+ beq .Lctr_enc_done
+ vld1.8 {@XMM[14]}, [$inp]
+ veor @XMM[2], @XMM[14]
+ vst1.8 {@XMM[2]}, [$out]!
+
+.Lctr_enc_done:
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+#ifndef BSAES_ASM_EXTENDED_KEY
+.Lctr_enc_bzero: @ wipe key schedule [if any]
+ vstmia $keysched!, {q0-q1}
+ cmp $keysched, $fp
+ bne .Lctr_enc_bzero
+#else
+ vstmia $keysched, {q0-q1}
+#endif
+
+ mov sp, $fp
+ add sp, #0x10 @ add sp,$fp,#0x10 is no good for thumb
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc} @ return
+
+.align 4
+.Lctr_enc_short:
+ ldr ip, [sp] @ ctr pointer is passed on stack
+ stmdb sp!, {r4-r8, lr}
+
+ mov r4, $inp @ copy arguments
+ mov r5, $out
+ mov r6, $len
+ mov r7, $key
+ ldr r8, [ip, #12] @ load counter LSW
+ vld1.8 {@XMM[1]}, [ip] @ load whole counter value
+#ifdef __ARMEL__
+ rev r8, r8
+#endif
+ sub sp, sp, #0x10
+ vst1.8 {@XMM[1]}, [sp,:64] @ copy counter value
+ sub sp, sp, #0x10
+
+.Lctr_enc_short_loop:
+ add r0, sp, #0x10 @ input counter value
+ mov r1, sp @ output on the stack
+ mov r2, r7 @ key
+
+ bl AES_encrypt
+
+ vld1.8 {@XMM[0]}, [r4]! @ load input
+ vld1.8 {@XMM[1]}, [sp,:64] @ load encrypted counter
+ add r8, r8, #1
+#ifdef __ARMEL__
+ rev r0, r8
+ str r0, [sp, #0x1c] @ next counter value
+#else
+ str r8, [sp, #0x1c] @ next counter value
+#endif
+ veor @XMM[0],@XMM[0],@XMM[1]
+ vst1.8 {@XMM[0]}, [r5]! @ store output
+ subs r6, r6, #1
+ bne .Lctr_enc_short_loop
+
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+ vstmia sp!, {q0-q1}
+
+ ldmia sp!, {r4-r8, pc}
+.size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
+___
+}
+{
+######################################################################
+# void bsaes_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+# const AES_KEY *key1, const AES_KEY *key2,
+# const unsigned char iv[16]);
+#
+my ($inp,$out,$len,$key,$rounds,$magic,$fp)=(map("r$_",(7..10,1..3)));
+my $const="r6"; # returned by _bsaes_key_convert
+my $twmask=@XMM[5];
+my @T=@XMM[6..7];
+
+$code.=<<___;
+.globl bsaes_xts_encrypt
+.type bsaes_xts_encrypt,%function
+.align 4
+bsaes_xts_encrypt:
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr} @ 0x20
+ VFP_ABI_PUSH
+ mov r6, sp @ future $fp
+
+ mov $inp, r0
+ mov $out, r1
+ mov $len, r2
+ mov $key, r3
+
+ sub r0, sp, #0x10 @ 0x10
+ bic r0, #0xf @ align at 16 bytes
+ mov sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+ ldr r0, [ip] @ pointer to input tweak
+#else
+ @ generate initial tweak
+ ldr r0, [ip, #4] @ iv[]
+ mov r1, sp
+ ldr r2, [ip, #0] @ key2
+ bl AES_encrypt
+ mov r0,sp @ pointer to initial tweak
+#endif
+
+ ldr $rounds, [$key, #240] @ get # of rounds
+ mov $fp, r6
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key
+ @ add r12, #`128-32` @ size of bit-sliced key schedule
+ sub r12, #`32+16` @ place for tweak[9]
+
+ @ populate the key schedule
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ mov sp, r12
+ add r12, #0x90 @ pass key schedule
+ bl _bsaes_key_convert
+ veor @XMM[7], @XMM[7], @XMM[15] @ fix up last round key
+ vstmia r12, {@XMM[7]} @ save last round key
+#else
+ ldr r12, [$key, #244]
+ eors r12, #1
+ beq 0f
+
+ str r12, [$key, #244]
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ add r12, $key, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ veor @XMM[7], @XMM[7], @XMM[15] @ fix up last round key
+ vstmia r12, {@XMM[7]}
+
+.align 2
+0: sub sp, #0x90 @ place for tweak[9]
+#endif
+
+ vld1.8 {@XMM[8]}, [r0] @ initial tweak
+ adr $magic, .Lxts_magic
+
+ subs $len, #0x80
+ blo .Lxts_enc_short
+ b .Lxts_enc_loop
+
+.align 4
+.Lxts_enc_loop:
+ vldmia $magic, {$twmask} @ load XTS magic
+ vshr.s64 @T[0], @XMM[8], #63
+ mov r0, sp
+ vand @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+ vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+ vst1.64 {@XMM[$i-1]}, [r0,:128]!
+ vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+ vshr.s64 @T[1], @XMM[$i], #63
+ veor @XMM[$i], @XMM[$i], @T[0]
+ vand @T[1], @T[1], $twmask
+___
+ @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+ vld1.8 {@XMM[$i-10]}, [$inp]!
+___
+$code.=<<___ if ($i>=11);
+ veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+ vadd.u64 @XMM[8], @XMM[15], @XMM[15]
+ vst1.64 {@XMM[15]}, [r0,:128]!
+ vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+ veor @XMM[8], @XMM[8], @T[0]
+ vst1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+
+ vld1.8 {@XMM[6]-@XMM[7]}, [$inp]!
+ veor @XMM[5], @XMM[5], @XMM[13]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[6], @XMM[6], @XMM[14]
+ mov r5, $rounds @ pass rounds
+ veor @XMM[7], @XMM[7], @XMM[15]
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[6], @XMM[11]
+ vld1.64 {@XMM[14]-@XMM[15]}, [r0,:128]!
+ veor @XMM[10], @XMM[3], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ veor @XMM[11], @XMM[7], @XMM[13]
+ veor @XMM[12], @XMM[2], @XMM[14]
+ vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
+ veor @XMM[13], @XMM[5], @XMM[15]
+ vst1.8 {@XMM[12]-@XMM[13]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+
+ subs $len, #0x80
+ bpl .Lxts_enc_loop
+
+.Lxts_enc_short:
+ adds $len, #0x70
+ bmi .Lxts_enc_done
+
+ vldmia $magic, {$twmask} @ load XTS magic
+ vshr.s64 @T[0], @XMM[8], #63
+ mov r0, sp
+ vand @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+ vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+ vst1.64 {@XMM[$i-1]}, [r0,:128]!
+ vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+ vshr.s64 @T[1], @XMM[$i], #63
+ veor @XMM[$i], @XMM[$i], @T[0]
+ vand @T[1], @T[1], $twmask
+___
+ @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+ vld1.8 {@XMM[$i-10]}, [$inp]!
+ subs $len, #0x10
+ bmi .Lxts_enc_`$i-9`
+___
+$code.=<<___ if ($i>=11);
+ veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+ sub $len, #0x10
+ vst1.64 {@XMM[15]}, [r0,:128] @ next round tweak
+
+ vld1.8 {@XMM[6]}, [$inp]!
+ veor @XMM[5], @XMM[5], @XMM[13]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[6], @XMM[6], @XMM[14]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[6], @XMM[11]
+ vld1.64 {@XMM[14]}, [r0,:128]!
+ veor @XMM[10], @XMM[3], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ veor @XMM[11], @XMM[7], @XMM[13]
+ veor @XMM[12], @XMM[2], @XMM[14]
+ vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
+ vst1.8 {@XMM[12]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_6:
+ vst1.64 {@XMM[14]}, [r0,:128] @ next round tweak
+
+ veor @XMM[4], @XMM[4], @XMM[12]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[5], @XMM[5], @XMM[13]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[6], @XMM[11]
+ veor @XMM[10], @XMM[3], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ veor @XMM[11], @XMM[7], @XMM[13]
+ vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+
+@ put this in range for both ARM and Thumb mode adr instructions
+.align 5
+.Lxts_magic:
+ .quad 1, 0x87
+
+.align 5
+.Lxts_enc_5:
+ vst1.64 {@XMM[13]}, [r0,:128] @ next round tweak
+
+ veor @XMM[3], @XMM[3], @XMM[11]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[4], @XMM[4], @XMM[12]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[6], @XMM[11]
+ veor @XMM[10], @XMM[3], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ vst1.8 {@XMM[10]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_4:
+ vst1.64 {@XMM[12]}, [r0,:128] @ next round tweak
+
+ veor @XMM[2], @XMM[2], @XMM[10]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[3], @XMM[3], @XMM[11]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[6], @XMM[11]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_3:
+ vst1.64 {@XMM[11]}, [r0,:128] @ next round tweak
+
+ veor @XMM[1], @XMM[1], @XMM[9]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[2], @XMM[2], @XMM[10]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]!
+ vld1.64 {@XMM[10]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ vst1.8 {@XMM[8]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_2:
+ vst1.64 {@XMM[10]}, [r0,:128] @ next round tweak
+
+ veor @XMM[0], @XMM[0], @XMM[8]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[1], @XMM[1], @XMM[9]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_1:
+ mov r0, sp
+ veor @XMM[0], @XMM[8]
+ mov r1, sp
+ vst1.8 {@XMM[0]}, [sp,:128]
+ mov r2, $key
+ mov r4, $fp @ preserve fp
+
+ bl AES_encrypt
+
+ vld1.8 {@XMM[0]}, [sp,:128]
+ veor @XMM[0], @XMM[0], @XMM[8]
+ vst1.8 {@XMM[0]}, [$out]!
+ mov $fp, r4
+
+ vmov @XMM[8], @XMM[9] @ next round tweak
+
+.Lxts_enc_done:
+#ifndef XTS_CHAIN_TWEAK
+ adds $len, #0x10
+ beq .Lxts_enc_ret
+ sub r6, $out, #0x10
+
+.Lxts_enc_steal:
+ ldrb r0, [$inp], #1
+ ldrb r1, [$out, #-0x10]
+ strb r0, [$out, #-0x10]
+ strb r1, [$out], #1
+
+ subs $len, #1
+ bhi .Lxts_enc_steal
+
+ vld1.8 {@XMM[0]}, [r6]
+ mov r0, sp
+ veor @XMM[0], @XMM[0], @XMM[8]
+ mov r1, sp
+ vst1.8 {@XMM[0]}, [sp,:128]
+ mov r2, $key
+ mov r4, $fp @ preserve fp
+
+ bl AES_encrypt
+
+ vld1.8 {@XMM[0]}, [sp,:128]
+ veor @XMM[0], @XMM[0], @XMM[8]
+ vst1.8 {@XMM[0]}, [r6]
+ mov $fp, r4
+#endif
+
+.Lxts_enc_ret:
+ bic r0, $fp, #0xf
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+#ifdef XTS_CHAIN_TWEAK
+ ldr r1, [$fp, #0x20+VFP_ABI_FRAME] @ chain tweak
+#endif
+.Lxts_enc_bzero: @ wipe key schedule [if any]
+ vstmia sp!, {q0-q1}
+ cmp sp, r0
+ bne .Lxts_enc_bzero
+
+ mov sp, $fp
+#ifdef XTS_CHAIN_TWEAK
+ vst1.8 {@XMM[8]}, [r1]
+#endif
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc} @ return
+
+.size bsaes_xts_encrypt,.-bsaes_xts_encrypt
+
+.globl bsaes_xts_decrypt
+.type bsaes_xts_decrypt,%function
+.align 4
+bsaes_xts_decrypt:
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr} @ 0x20
+ VFP_ABI_PUSH
+ mov r6, sp @ future $fp
+
+ mov $inp, r0
+ mov $out, r1
+ mov $len, r2
+ mov $key, r3
+
+ sub r0, sp, #0x10 @ 0x10
+ bic r0, #0xf @ align at 16 bytes
+ mov sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+ ldr r0, [ip] @ pointer to input tweak
+#else
+ @ generate initial tweak
+ ldr r0, [ip, #4] @ iv[]
+ mov r1, sp
+ ldr r2, [ip, #0] @ key2
+ bl AES_encrypt
+ mov r0, sp @ pointer to initial tweak
+#endif
+
+ ldr $rounds, [$key, #240] @ get # of rounds
+ mov $fp, r6
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key
+ @ add r12, #`128-32` @ size of bit-sliced key schedule
+ sub r12, #`32+16` @ place for tweak[9]
+
+ @ populate the key schedule
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ mov sp, r12
+ add r12, #0x90 @ pass key schedule
+ bl _bsaes_key_convert
+ add r4, sp, #0x90
+ vldmia r4, {@XMM[6]}
+ vstmia r12, {@XMM[15]} @ save last round key
+ veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
+ vstmia r4, {@XMM[7]}
+#else
+ ldr r12, [$key, #244]
+ eors r12, #1
+ beq 0f
+
+ str r12, [$key, #244]
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ add r12, $key, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ add r4, $key, #248
+ vldmia r4, {@XMM[6]}
+ vstmia r12, {@XMM[15]} @ save last round key
+ veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
+ vstmia r4, {@XMM[7]}
+
+.align 2
+0: sub sp, #0x90 @ place for tweak[9]
+#endif
+ vld1.8 {@XMM[8]}, [r0] @ initial tweak
+ adr $magic, .Lxts_magic
+
+ tst $len, #0xf @ if not multiple of 16
+ it ne @ Thumb2 thing, sanity check in ARM
+ subne $len, #0x10 @ subtract another 16 bytes
+ subs $len, #0x80
+
+ blo .Lxts_dec_short
+ b .Lxts_dec_loop
+
+.align 4
+.Lxts_dec_loop:
+ vldmia $magic, {$twmask} @ load XTS magic
+ vshr.s64 @T[0], @XMM[8], #63
+ mov r0, sp
+ vand @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+ vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+ vst1.64 {@XMM[$i-1]}, [r0,:128]!
+ vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+ vshr.s64 @T[1], @XMM[$i], #63
+ veor @XMM[$i], @XMM[$i], @T[0]
+ vand @T[1], @T[1], $twmask
+___
+ @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+ vld1.8 {@XMM[$i-10]}, [$inp]!
+___
+$code.=<<___ if ($i>=11);
+ veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+ vadd.u64 @XMM[8], @XMM[15], @XMM[15]
+ vst1.64 {@XMM[15]}, [r0,:128]!
+ vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+ veor @XMM[8], @XMM[8], @T[0]
+ vst1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+
+ vld1.8 {@XMM[6]-@XMM[7]}, [$inp]!
+ veor @XMM[5], @XMM[5], @XMM[13]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[6], @XMM[6], @XMM[14]
+ mov r5, $rounds @ pass rounds
+ veor @XMM[7], @XMM[7], @XMM[15]
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[6], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[4], @XMM[11]
+ vld1.64 {@XMM[14]-@XMM[15]}, [r0,:128]!
+ veor @XMM[10], @XMM[2], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ veor @XMM[11], @XMM[7], @XMM[13]
+ veor @XMM[12], @XMM[3], @XMM[14]
+ vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
+ veor @XMM[13], @XMM[5], @XMM[15]
+ vst1.8 {@XMM[12]-@XMM[13]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+
+ subs $len, #0x80
+ bpl .Lxts_dec_loop
+
+.Lxts_dec_short:
+ adds $len, #0x70
+ bmi .Lxts_dec_done
+
+ vldmia $magic, {$twmask} @ load XTS magic
+ vshr.s64 @T[0], @XMM[8], #63
+ mov r0, sp
+ vand @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+ vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+ vst1.64 {@XMM[$i-1]}, [r0,:128]!
+ vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+ vshr.s64 @T[1], @XMM[$i], #63
+ veor @XMM[$i], @XMM[$i], @T[0]
+ vand @T[1], @T[1], $twmask
+___
+ @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+ vld1.8 {@XMM[$i-10]}, [$inp]!
+ subs $len, #0x10
+ bmi .Lxts_dec_`$i-9`
+___
+$code.=<<___ if ($i>=11);
+ veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+ sub $len, #0x10
+ vst1.64 {@XMM[15]}, [r0,:128] @ next round tweak
+
+ vld1.8 {@XMM[6]}, [$inp]!
+ veor @XMM[5], @XMM[5], @XMM[13]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[6], @XMM[6], @XMM[14]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[6], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[4], @XMM[11]
+ vld1.64 {@XMM[14]}, [r0,:128]!
+ veor @XMM[10], @XMM[2], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ veor @XMM[11], @XMM[7], @XMM[13]
+ veor @XMM[12], @XMM[3], @XMM[14]
+ vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
+ vst1.8 {@XMM[12]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_6:
+ vst1.64 {@XMM[14]}, [r0,:128] @ next round tweak
+
+ veor @XMM[4], @XMM[4], @XMM[12]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[5], @XMM[5], @XMM[13]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[6], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[4], @XMM[11]
+ veor @XMM[10], @XMM[2], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ veor @XMM[11], @XMM[7], @XMM[13]
+ vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_5:
+ vst1.64 {@XMM[13]}, [r0,:128] @ next round tweak
+
+ veor @XMM[3], @XMM[3], @XMM[11]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[4], @XMM[4], @XMM[12]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[6], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[4], @XMM[11]
+ veor @XMM[10], @XMM[2], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ vst1.8 {@XMM[10]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_4:
+ vst1.64 {@XMM[12]}, [r0,:128] @ next round tweak
+
+ veor @XMM[2], @XMM[2], @XMM[10]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[3], @XMM[3], @XMM[11]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[6], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[4], @XMM[11]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_3:
+ vst1.64 {@XMM[11]}, [r0,:128] @ next round tweak
+
+ veor @XMM[1], @XMM[1], @XMM[9]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[2], @XMM[2], @XMM[10]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]!
+ vld1.64 {@XMM[10]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[6], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ vst1.8 {@XMM[8]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_2:
+ vst1.64 {@XMM[10]}, [r0,:128] @ next round tweak
+
+ veor @XMM[0], @XMM[0], @XMM[8]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[1], @XMM[1], @XMM[9]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_1:
+ mov r0, sp
+ veor @XMM[0], @XMM[8]
+ mov r1, sp
+ vst1.8 {@XMM[0]}, [sp,:128]
+ mov r2, $key
+ mov r4, $fp @ preserve fp
+ mov r5, $magic @ preserve magic
+
+ bl AES_decrypt
+
+ vld1.8 {@XMM[0]}, [sp,:128]
+ veor @XMM[0], @XMM[0], @XMM[8]
+ vst1.8 {@XMM[0]}, [$out]!
+ mov $fp, r4
+ mov $magic, r5
+
+ vmov @XMM[8], @XMM[9] @ next round tweak
+
+.Lxts_dec_done:
+#ifndef XTS_CHAIN_TWEAK
+ adds $len, #0x10
+ beq .Lxts_dec_ret
+
+ @ calculate one round of extra tweak for the stolen ciphertext
+ vldmia $magic, {$twmask}
+ vshr.s64 @XMM[6], @XMM[8], #63
+ vand @XMM[6], @XMM[6], $twmask
+ vadd.u64 @XMM[9], @XMM[8], @XMM[8]
+ vswp `&Dhi("@XMM[6]")`,`&Dlo("@XMM[6]")`
+ veor @XMM[9], @XMM[9], @XMM[6]
+
+ @ perform the final decryption with the last tweak value
+ vld1.8 {@XMM[0]}, [$inp]!
+ mov r0, sp
+ veor @XMM[0], @XMM[0], @XMM[9]
+ mov r1, sp
+ vst1.8 {@XMM[0]}, [sp,:128]
+ mov r2, $key
+ mov r4, $fp @ preserve fp
+
+ bl AES_decrypt
+
+ vld1.8 {@XMM[0]}, [sp,:128]
+ veor @XMM[0], @XMM[0], @XMM[9]
+ vst1.8 {@XMM[0]}, [$out]
+
+ mov r6, $out
+.Lxts_dec_steal:
+ ldrb r1, [$out]
+ ldrb r0, [$inp], #1
+ strb r1, [$out, #0x10]
+ strb r0, [$out], #1
+
+ subs $len, #1
+ bhi .Lxts_dec_steal
+
+ vld1.8 {@XMM[0]}, [r6]
+ mov r0, sp
+ veor @XMM[0], @XMM[8]
+ mov r1, sp
+ vst1.8 {@XMM[0]}, [sp,:128]
+ mov r2, $key
+
+ bl AES_decrypt
+
+ vld1.8 {@XMM[0]}, [sp,:128]
+ veor @XMM[0], @XMM[0], @XMM[8]
+ vst1.8 {@XMM[0]}, [r6]
+ mov $fp, r4
+#endif
+
+.Lxts_dec_ret:
+ bic r0, $fp, #0xf
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+#ifdef XTS_CHAIN_TWEAK
+ ldr r1, [$fp, #0x20+VFP_ABI_FRAME] @ chain tweak
+#endif
+.Lxts_dec_bzero: @ wipe key schedule [if any]
+ vstmia sp!, {q0-q1}
+ cmp sp, r0
+ bne .Lxts_dec_bzero
+
+ mov sp, $fp
+#ifdef XTS_CHAIN_TWEAK
+ vst1.8 {@XMM[8]}, [r1]
+#endif
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc} @ return
+
+.size bsaes_xts_decrypt,.-bsaes_xts_decrypt
+___
+}
+$code.=<<___;
+#endif
+___
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+open SELF,$0;
+while(<SELF>) {
+ next if (/^#!/);
+ last if (!s/^#/@/ and !/^$/);
+ print;
+}
+close SELF;
+
+print $code;
+
+close STDOUT;
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 59ceae8f3c95..c38b58c80202 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -24,6 +24,7 @@ generic-y += sembuf.h
generic-y += serial.h
generic-y += shmbuf.h
generic-y += siginfo.h
+generic-y += simd.h
generic-y += sizes.h
generic-y += socket.h
generic-y += sockios.h
@@ -32,3 +33,4 @@ generic-y += termios.h
generic-y += timex.h
generic-y += trace_clock.h
generic-y += unaligned.h
+generic-y += preempt.h
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index 5665134bfa3e..0704e0cf5571 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -87,17 +87,43 @@ static inline u64 arch_counter_get_cntvct(void)
return cval;
}
-static inline void arch_counter_set_user_access(void)
+static inline u32 arch_timer_get_cntkctl(void)
{
u32 cntkctl;
-
asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
+ return cntkctl;
+}
- /* disable user access to everything */
- cntkctl &= ~((3 << 8) | (7 << 0));
-
+static inline void arch_timer_set_cntkctl(u32 cntkctl)
+{
asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
}
+
+static inline void arch_counter_set_user_access(void)
+{
+ u32 cntkctl = arch_timer_get_cntkctl();
+
+ /* Disable user access to both physical/virtual counters/timers */
+ /* Also disable virtual event stream */
+ cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
+ | ARCH_TIMER_USR_VT_ACCESS_EN
+ | ARCH_TIMER_VIRT_EVT_EN
+ | ARCH_TIMER_USR_VCT_ACCESS_EN
+ | ARCH_TIMER_USR_PCT_ACCESS_EN);
+ arch_timer_set_cntkctl(cntkctl);
+}
+
+static inline void arch_timer_evtstrm_enable(int divider)
+{
+ u32 cntkctl = arch_timer_get_cntkctl();
+ cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
+ /* Set the divider and enable virtual event stream */
+ cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
+ | ARCH_TIMER_VIRT_EVT_EN;
+ arch_timer_set_cntkctl(cntkctl);
+ elf_hwcap |= HWCAP_EVTSTRM;
+}
+
#endif
#endif
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index fcc1b5bf6979..5c2285160575 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -53,6 +53,13 @@
#define put_byte_3 lsl #0
#endif
+/* Select code for any configuration running in BE8 mode */
+#ifdef CONFIG_CPU_ENDIAN_BE8
+#define ARM_BE8(code...) code
+#else
+#define ARM_BE8(code...)
+#endif
+
/*
* Data preload for architectures that support it
*/
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index da1c77d39327..134aa28b6f64 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -12,6 +12,7 @@
#define __ASM_ARM_ATOMIC_H
#include <linux/compiler.h>
+#include <linux/prefetch.h>
#include <linux/types.h>
#include <linux/irqflags.h>
#include <asm/barrier.h>
@@ -41,6 +42,7 @@ static inline void atomic_add(int i, atomic_t *v)
unsigned long tmp;
int result;
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_add\n"
"1: ldrex %0, [%3]\n"
" add %0, %0, %4\n"
@@ -79,6 +81,7 @@ static inline void atomic_sub(int i, atomic_t *v)
unsigned long tmp;
int result;
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_sub\n"
"1: ldrex %0, [%3]\n"
" sub %0, %0, %4\n"
@@ -138,6 +141,7 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
unsigned long tmp, tmp2;
+ prefetchw(addr);
__asm__ __volatile__("@ atomic_clear_mask\n"
"1: ldrex %0, [%3]\n"
" bic %0, %0, %4\n"
@@ -283,6 +287,7 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
{
u64 tmp;
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_set\n"
"1: ldrexd %0, %H0, [%2]\n"
" strexd %0, %3, %H3, [%2]\n"
@@ -299,10 +304,11 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
u64 result;
unsigned long tmp;
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_add\n"
"1: ldrexd %0, %H0, [%3]\n"
-" adds %0, %0, %4\n"
-" adc %H0, %H0, %H4\n"
+" adds %Q0, %Q0, %Q4\n"
+" adc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
@@ -320,8 +326,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
__asm__ __volatile__("@ atomic64_add_return\n"
"1: ldrexd %0, %H0, [%3]\n"
-" adds %0, %0, %4\n"
-" adc %H0, %H0, %H4\n"
+" adds %Q0, %Q0, %Q4\n"
+" adc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
@@ -339,10 +345,11 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
u64 result;
unsigned long tmp;
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_sub\n"
"1: ldrexd %0, %H0, [%3]\n"
-" subs %0, %0, %4\n"
-" sbc %H0, %H0, %H4\n"
+" subs %Q0, %Q0, %Q4\n"
+" sbc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
@@ -360,8 +367,8 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
__asm__ __volatile__("@ atomic64_sub_return\n"
"1: ldrexd %0, %H0, [%3]\n"
-" subs %0, %0, %4\n"
-" sbc %H0, %H0, %H4\n"
+" subs %Q0, %Q0, %Q4\n"
+" sbc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
@@ -428,9 +435,9 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
__asm__ __volatile__("@ atomic64_dec_if_positive\n"
"1: ldrexd %0, %H0, [%3]\n"
-" subs %0, %0, #1\n"
-" sbc %H0, %H0, #0\n"
-" teq %H0, #0\n"
+" subs %Q0, %Q0, #1\n"
+" sbc %R0, %R0, #0\n"
+" teq %R0, #0\n"
" bmi 2f\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
@@ -459,8 +466,8 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
" teqeq %H0, %H5\n"
" moveq %1, #0\n"
" beq 2f\n"
-" adds %0, %0, %6\n"
-" adc %H0, %H0, %H6\n"
+" adds %Q0, %Q0, %Q6\n"
+" adc %R0, %R0, %R6\n"
" strexd %2, %0, %H0, [%4]\n"
" teq %2, #0\n"
" bne 1b\n"
diff --git a/arch/arm/include/asm/bL_switcher.h b/arch/arm/include/asm/bL_switcher.h
new file mode 100644
index 000000000000..1714800fa113
--- /dev/null
+++ b/arch/arm/include/asm/bL_switcher.h
@@ -0,0 +1,77 @@
+/*
+ * arch/arm/include/asm/bL_switcher.h
+ *
+ * Created by: Nicolas Pitre, April 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_BL_SWITCHER_H
+#define ASM_BL_SWITCHER_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+typedef void (*bL_switch_completion_handler)(void *cookie);
+
+int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
+ bL_switch_completion_handler completer,
+ void *completer_cookie);
+static inline int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
+{
+ return bL_switch_request_cb(cpu, new_cluster_id, NULL, NULL);
+}
+
+/*
+ * Register here to be notified about runtime enabling/disabling of
+ * the switcher.
+ *
+ * The notifier chain is called with the switcher activation lock held:
+ * the switcher will not be enabled or disabled during callbacks.
+ * Callbacks must not call bL_switcher_{get,put}_enabled().
+ */
+#define BL_NOTIFY_PRE_ENABLE 0
+#define BL_NOTIFY_POST_ENABLE 1
+#define BL_NOTIFY_PRE_DISABLE 2
+#define BL_NOTIFY_POST_DISABLE 3
+
+#ifdef CONFIG_BL_SWITCHER
+
+int bL_switcher_register_notifier(struct notifier_block *nb);
+int bL_switcher_unregister_notifier(struct notifier_block *nb);
+
+/*
+ * Use these functions to temporarily prevent enabling/disabling of
+ * the switcher.
+ * bL_switcher_get_enabled() returns true if the switcher is currently
+ * enabled. Each call to bL_switcher_get_enabled() must be followed
+ * by a call to bL_switcher_put_enabled(). These functions are not
+ * recursive.
+ */
+bool bL_switcher_get_enabled(void);
+void bL_switcher_put_enabled(void);
+
+int bL_switcher_trace_trigger(void);
+int bL_switcher_get_logical_index(u32 mpidr);
+
+#else
+static inline int bL_switcher_register_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int bL_switcher_unregister_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline bool bL_switcher_get_enabled(void) { return false; }
+static inline void bL_switcher_put_enabled(void) { }
+static inline int bL_switcher_trace_trigger(void) { return 0; }
+static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; }
+#endif /* CONFIG_BL_SWITCHER */
+
+#endif
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index 7af5c6c3653a..b274bde24905 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -2,6 +2,8 @@
#define _ASMARM_BUG_H
#include <linux/linkage.h>
+#include <linux/types.h>
+#include <asm/opcodes.h>
#ifdef CONFIG_BUG
@@ -12,10 +14,10 @@
*/
#ifdef CONFIG_THUMB2_KERNEL
#define BUG_INSTR_VALUE 0xde02
-#define BUG_INSTR_TYPE ".hword "
+#define BUG_INSTR(__value) __inst_thumb16(__value)
#else
#define BUG_INSTR_VALUE 0xe7f001f2
-#define BUG_INSTR_TYPE ".word "
+#define BUG_INSTR(__value) __inst_arm(__value)
#endif
@@ -33,7 +35,7 @@
#define __BUG(__file, __line, __value) \
do { \
- asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n" \
+ asm volatile("1:\t" BUG_INSTR(__value) "\n" \
".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
"2:\t.asciz " #__file "\n" \
".popsection\n" \
@@ -48,7 +50,7 @@ do { \
#define __BUG(__file, __line, __value) \
do { \
- asm volatile(BUG_INSTR_TYPE #__value); \
+ asm volatile(BUG_INSTR(__value) "\n"); \
unreachable(); \
} while (0)
#endif /* CONFIG_DEBUG_BUGVERBOSE */
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 15f2d5bf8875..ee753f1749cd 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -435,4 +435,50 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
+/*
+ * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
+ * To do so we must:
+ *
+ * - Clear the SCTLR.C bit to prevent further cache allocations
+ * - Flush the desired level of cache
+ * - Clear the ACTLR "SMP" bit to disable local coherency
+ *
+ * ... and so without any intervening memory access in between those steps,
+ * not even to the stack.
+ *
+ * WARNING -- After this has been called:
+ *
+ * - No ldrex/strex (and similar) instructions must be used.
+ * - The CPU is obviously no longer coherent with the other CPUs.
+ * - This is unlikely to work as expected if Linux is running non-secure.
+ *
+ * Note:
+ *
+ * - This is known to apply to several ARMv7 processor implementations,
+ * however some exceptions may exist. Caveat emptor.
+ *
+ * - The clobber list is dictated by the call to v7_flush_dcache_*.
+ * fp is preserved to the stack explicitly prior disabling the cache
+ * since adding it to the clobber list is incompatible with having
+ * CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering
+ * trampoline are inserted by the linker and to keep sp 64-bit aligned.
+ */
+#define v7_exit_coherency_flush(level) \
+ asm volatile( \
+ "stmfd sp!, {fp, ip} \n\t" \
+ "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
+ "bic r0, r0, #"__stringify(CR_C)" \n\t" \
+ "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
+ "isb \n\t" \
+ "bl v7_flush_dcache_"__stringify(level)" \n\t" \
+ "clrex \n\t" \
+ "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
+ "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
+ "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
+ "isb \n\t" \
+ "dsb \n\t" \
+ "ldmfd sp!, {fp, ip}" \
+ : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
+ "r9","r10","lr","memory" )
+
#endif
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 4f009c10540d..df2fbba7efc8 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -223,6 +223,42 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
return ret;
}
+static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
+ unsigned long long old,
+ unsigned long long new)
+{
+ unsigned long long oldval;
+ unsigned long res;
+
+ __asm__ __volatile__(
+"1: ldrexd %1, %H1, [%3]\n"
+" teq %1, %4\n"
+" teqeq %H1, %H4\n"
+" bne 2f\n"
+" strexd %0, %5, %H5, [%3]\n"
+" teq %0, #0\n"
+" bne 1b\n"
+"2:"
+ : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
+ : "r" (ptr), "r" (old), "r" (new)
+ : "cc");
+
+ return oldval;
+}
+
+static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
+ unsigned long long old,
+ unsigned long long new)
+{
+ unsigned long long ret;
+
+ smp_mb();
+ ret = __cmpxchg64(ptr, old, new);
+ smp_mb();
+
+ return ret;
+}
+
#define cmpxchg_local(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
(unsigned long)(o), \
@@ -230,18 +266,16 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
sizeof(*(ptr))))
#define cmpxchg64(ptr, o, n) \
- ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
- atomic64_t, \
- counter), \
- (unsigned long long)(o), \
- (unsigned long long)(n)))
-
-#define cmpxchg64_local(ptr, o, n) \
- ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
- local64_t, \
- a), \
- (unsigned long long)(o), \
- (unsigned long long)(n)))
+ ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
+ (unsigned long long)(o), \
+ (unsigned long long)(n)))
+
+#define cmpxchg64_relaxed(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
+ (unsigned long long)(o), \
+ (unsigned long long)(n)))
+
+#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
#endif /* __LINUX_ARM_ARCH__ >= 6 */
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 9672e978d50d..acdde76b39bb 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -10,6 +10,7 @@
#define CPUID_TLBTYPE 3
#define CPUID_MPUIR 4
#define CPUID_MPIDR 5
+#define CPUID_REVIDR 6
#ifdef CONFIG_CPU_V7M
#define CPUID_EXT_PFR0 0x40
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 5b579b951503..9f8401667182 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -10,18 +10,30 @@
#include <asm-generic/dma-coherent.h>
#include <asm/memory.h>
+#include <asm/cacheflush.h>
+
+#include <xen/xen.h>
+#include <asm/xen/hypervisor.h>
#define DMA_ERROR_CODE (~0)
extern struct dma_map_ops arm_dma_ops;
extern struct dma_map_ops arm_coherent_dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
return &arm_dma_ops;
}
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ if (xen_initial_domain())
+ return xen_dma_ops;
+ else
+ return __generic_dma_ops(dev);
+}
+
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
{
BUG_ON(!dev);
@@ -64,6 +76,7 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
}
+
#else
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
{
@@ -86,6 +99,49 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
}
#endif
+/* The ARM override for dma_max_pfn() */
+static inline unsigned long dma_max_pfn(struct device *dev)
+{
+ return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
+}
+#define dma_max_pfn(dev) dma_max_pfn(dev)
+
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ unsigned int offset = paddr & ~PAGE_MASK;
+ return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
+{
+ unsigned int offset = dev_addr & ~PAGE_MASK;
+ return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
+}
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ u64 limit, mask;
+
+ if (dev->dma_mask)
+ mask = *dev->dma_mask;
+ else
+ mask = dev->coherent_dma_mask;
+
+ if (mask == 0)
+ return 0;
+
+ limit = (mask + 1) & ~mask;
+ if (limit && size > limit)
+ return 0;
+
+ if ((addr | (addr + size - 1)) & ~mask)
+ return 0;
+
+ return 1;
+}
+
+static inline void dma_mark_clean(void *addr, size_t size) { }
+
/*
* DMA errors are defined by all-bits-set in the DMA address.
*/
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 2740c2a2df63..3d7351c844aa 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -5,7 +5,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 6
+#define NR_IPI 7
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
index 0cf7a6b842ff..ad774f37c47c 100644
--- a/arch/arm/include/asm/hardware/coresight.h
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -24,8 +24,8 @@
#define TRACER_TIMEOUT 10000
#define etm_writel(t, v, x) \
- (__raw_writel((v), (t)->etm_regs + (x)))
-#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x)))
+ (writel_relaxed((v), (t)->etm_regs + (x)))
+#define etm_readl(t, x) (readl_relaxed((t)->etm_regs + (x)))
/* CoreSight Management Registers */
#define CSMR_LOCKACCESS 0xfb0
@@ -142,8 +142,8 @@
#define ETBFF_TRIGFL BIT(10)
#define etb_writel(t, v, x) \
- (__raw_writel((v), (t)->etb_regs + (x)))
-#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x)))
+ (writel_relaxed((v), (t)->etb_regs + (x)))
+#define etb_readl(t, x) (readl_relaxed((t)->etb_regs + (x)))
#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
#define etm_unlock(t) \
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index d070741b2b37..c45effc18312 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -24,9 +24,11 @@
#ifdef __KERNEL__
#include <linux/types.h>
+#include <linux/blk_types.h>
#include <asm/byteorder.h>
#include <asm/memory.h>
#include <asm-generic/pci_iomap.h>
+#include <xen/xen.h>
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
@@ -372,6 +374,12 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
#define BIOVEC_MERGEABLE(vec1, vec2) \
((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
+extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
+ const struct bio_vec *vec2);
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
+ (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
+
#ifdef CONFIG_MMU
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
index 48066ce9ea34..0a9d5dd93294 100644
--- a/arch/arm/include/asm/kgdb.h
+++ b/arch/arm/include/asm/kgdb.h
@@ -11,6 +11,7 @@
#define __ARM_KGDB_H__
#include <linux/ptrace.h>
+#include <asm/opcodes.h>
/*
* GDB assumes that we're a user process being debugged, so
@@ -41,7 +42,7 @@
static inline void arch_kgdb_breakpoint(void)
{
- asm(".word 0xe7ffdeff");
+ asm(__inst_arm(0xe7ffdeff));
}
extern void kgdb_handle_bus_error(void);
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 64e96960de29..1d3153c7eb41 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -57,6 +57,7 @@
* TSC: Trap SMC
* TSW: Trap cache operations by set/way
* TWI: Trap WFI
+ * TWE: Trap WFE
* TIDCP: Trap L2CTLR/L2ECTLR
* BSU_IS: Upgrade barriers to the inner shareable domain
* FB: Force broadcast of all maintainance operations
@@ -67,7 +68,7 @@
*/
#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
- HCR_SWIO | HCR_TIDCP)
+ HCR_TWE | HCR_SWIO | HCR_TIDCP)
#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
/* System Control Register (SCTLR) bits */
@@ -95,12 +96,12 @@
#define TTBCR_IRGN1 (3 << 24)
#define TTBCR_EPD1 (1 << 23)
#define TTBCR_A1 (1 << 22)
-#define TTBCR_T1SZ (3 << 16)
+#define TTBCR_T1SZ (7 << 16)
#define TTBCR_SH0 (3 << 12)
#define TTBCR_ORGN0 (3 << 10)
#define TTBCR_IRGN0 (3 << 8)
#define TTBCR_EPD0 (1 << 7)
-#define TTBCR_T0SZ 3
+#define TTBCR_T0SZ (7 << 0)
#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
/* Hyp System Trap Register */
@@ -208,6 +209,8 @@
#define HSR_EC_DABT (0x24)
#define HSR_EC_DABT_HYP (0x25)
+#define HSR_WFI_IS_WFE (1U << 0)
+
#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
#define HSR_DABT_S1PTW (1U << 7)
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index a2f43ddcc300..661da11f76f4 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -39,7 +39,7 @@
#define c6_IFAR 17 /* Instruction Fault Address Register */
#define c7_PAR 18 /* Physical Address Register */
#define c7_PAR_high 19 /* PAR top 32 bits */
-#define c9_L2CTLR 20 /* Cortex A15 L2 Control Register */
+#define c9_L2CTLR 20 /* Cortex A15/A7 L2 Control Register */
#define c10_PRRR 21 /* Primary Region Remap Register */
#define c10_NMRR 22 /* Normal Memory Remap Register */
#define c12_VBAR 23 /* Vector Base Address Register */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index a464e8d7b6c5..708e4d8a647f 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -157,4 +157,9 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
}
+static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.cp15[c0_MPIDR];
+}
+
#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 7d22517d8071..8a6f6db14ee4 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -38,11 +38,6 @@
#define KVM_VCPU_MAX_FEATURES 1
-/* We don't currently support large pages. */
-#define KVM_HPAGE_GFN_SHIFT(x) 0
-#define KVM_NR_PAGE_SIZES 1
-#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
-
#include <kvm/arm_vgic.h>
struct kvm_vcpu;
@@ -154,6 +149,7 @@ struct kvm_vcpu_stat {
struct kvm_vcpu_init;
int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init);
+int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
struct kvm_one_reg;
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 9b28c41f4ba9..77de4a41cc50 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -62,6 +62,12 @@ phys_addr_t kvm_get_idmap_vector(void);
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
+static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
+{
+ *pmd = new_pmd;
+ flush_pmd_entry(pmd);
+}
+
static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
{
*pte = new_pte;
@@ -103,9 +109,15 @@ static inline void kvm_set_s2pte_writable(pte_t *pte)
pte_val(*pte) |= L_PTE_S2_RDWR;
}
+static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
+{
+ pmd_val(*pmd) |= L_PMD_S2_RDWR;
+}
+
struct kvm;
-static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
+static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
+ unsigned long size)
{
/*
* If we are going to insert an instruction page and the icache is
@@ -120,8 +132,7 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
* need any kind of flushing (DDI 0406C.b - Page B3-1392).
*/
if (icache_is_pipt()) {
- unsigned long hva = gfn_to_hva(kvm, gfn);
- __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
+ __cpuc_coherent_user_range(hva, hva + size);
} else if (!icache_is_vivt_asid_tagged()) {
/* any kind of VIPT cache */
__flush_icache_all();
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 402a2bc6aa68..17a3fa2979e8 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -49,6 +49,7 @@ struct machine_desc {
bool (*smp_init)(void);
void (*fixup)(struct tag *, char **,
struct meminfo *);
+ void (*init_meminfo)(void);
void (*reserve)(void);/* reserve mem blocks */
void (*map_io)(void);/* IO mapping function */
void (*init_early)(void);
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 454d642a4070..7fc42784becb 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -106,8 +106,4 @@ extern int dc21285_setup(int nr, struct pci_sys_data *);
extern void dc21285_preinit(void);
extern void dc21285_postinit(void);
-extern struct pci_ops via82c505_ops;
-extern int via82c505_setup(int nr, struct pci_sys_data *);
-extern void via82c505_init(void *sysdata);
-
#endif /* __ASM_MACH_PCI_H */
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index fc82a88f5b69..608516ebabfe 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -42,6 +42,14 @@ extern void mcpm_entry_point(void);
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
/*
+ * This sets an early poke i.e a value to be poked into some address
+ * from very early assembly code before the CPU is ungated. The
+ * address must be physical, and if 0 then nothing will happen.
+ */
+void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
+ unsigned long poke_phys_addr, unsigned long poke_val);
+
+/*
* CPU/cluster power operations API for higher subsystems to use.
*/
@@ -81,10 +89,40 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
*
* This will return if mcpm_platform_register() has not been called
* previously in which case the caller should take appropriate action.
+ *
+ * On success, the CPU is not guaranteed to be truly halted until
+ * mcpm_cpu_power_down_finish() subsequently returns non-zero for the
+ * specified cpu. Until then, other CPUs should make sure they do not
+ * trash memory the target CPU might be executing/accessing.
*/
void mcpm_cpu_power_down(void);
/**
+ * mcpm_cpu_power_down_finish - wait for a specified CPU to halt, and
+ * make sure it is powered off
+ *
+ * @cpu: CPU number within given cluster
+ * @cluster: cluster number for the CPU
+ *
+ * Call this function to ensure that a pending powerdown has taken
+ * effect and the CPU is safely parked before performing non-mcpm
+ * operations that may affect the CPU (such as kexec trashing the
+ * kernel text).
+ *
+ * It is *not* necessary to call this function if you only need to
+ * serialise a pending powerdown with mcpm_cpu_power_up() or a wakeup
+ * event.
+ *
+ * Do not call this function unless the specified CPU has already
+ * called mcpm_cpu_power_down() or has committed to doing so.
+ *
+ * @return:
+ * - zero if the CPU is in a safely parked state
+ * - nonzero otherwise (e.g., timeout)
+ */
+int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster);
+
+/**
* mcpm_cpu_suspend - bring the calling CPU in a suspended state
*
* @expected_residency: duration in microseconds the CPU is expected
@@ -126,6 +164,7 @@ int mcpm_cpu_powered_up(void);
struct mcpm_platform_ops {
int (*power_up)(unsigned int cpu, unsigned int cluster);
void (*power_down)(void);
+ int (*power_down_finish)(unsigned int cpu, unsigned int cluster);
void (*suspend)(u64);
void (*powered_up)(void);
};
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index e750a938fd3c..4dd21457ef9d 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -172,8 +172,13 @@
* so that all we need to do is modify the 8-bit constant field.
*/
#define __PV_BITS_31_24 0x81000000
+#define __PV_BITS_7_0 0x81
+
+extern u64 __pv_phys_offset;
+extern u64 __pv_offset;
+extern void fixup_pv_table(const void *, unsigned long);
+extern const void *__pv_table_begin, *__pv_table_end;
-extern unsigned long __pv_phys_offset;
#define PHYS_OFFSET __pv_phys_offset
#define __pv_stub(from,to,instr,type) \
@@ -185,22 +190,58 @@ extern unsigned long __pv_phys_offset;
: "=r" (to) \
: "r" (from), "I" (type))
-static inline unsigned long __virt_to_phys(unsigned long x)
+#define __pv_stub_mov_hi(t) \
+ __asm__ volatile("@ __pv_stub_mov\n" \
+ "1: mov %R0, %1\n" \
+ " .pushsection .pv_table,\"a\"\n" \
+ " .long 1b\n" \
+ " .popsection\n" \
+ : "=r" (t) \
+ : "I" (__PV_BITS_7_0))
+
+#define __pv_add_carry_stub(x, y) \
+ __asm__ volatile("@ __pv_add_carry_stub\n" \
+ "1: adds %Q0, %1, %2\n" \
+ " adc %R0, %R0, #0\n" \
+ " .pushsection .pv_table,\"a\"\n" \
+ " .long 1b\n" \
+ " .popsection\n" \
+ : "+r" (y) \
+ : "r" (x), "I" (__PV_BITS_31_24) \
+ : "cc")
+
+static inline phys_addr_t __virt_to_phys(unsigned long x)
{
- unsigned long t;
- __pv_stub(x, t, "add", __PV_BITS_31_24);
+ phys_addr_t t;
+
+ if (sizeof(phys_addr_t) == 4) {
+ __pv_stub(x, t, "add", __PV_BITS_31_24);
+ } else {
+ __pv_stub_mov_hi(t);
+ __pv_add_carry_stub(x, t);
+ }
return t;
}
-static inline unsigned long __phys_to_virt(unsigned long x)
+static inline unsigned long __phys_to_virt(phys_addr_t x)
{
unsigned long t;
__pv_stub(x, t, "sub", __PV_BITS_31_24);
return t;
}
+
#else
-#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
-#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
+
+static inline phys_addr_t __virt_to_phys(unsigned long x)
+{
+ return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
+}
+
+static inline unsigned long __phys_to_virt(phys_addr_t x)
+{
+ return x - PHYS_OFFSET + PAGE_OFFSET;
+}
+
#endif
#endif
#endif /* __ASSEMBLY__ */
@@ -238,16 +279,33 @@ static inline phys_addr_t virt_to_phys(const volatile void *x)
static inline void *phys_to_virt(phys_addr_t x)
{
- return (void *)(__phys_to_virt((unsigned long)(x)));
+ return (void *)__phys_to_virt(x);
}
/*
* Drivers should NOT use these either.
*/
#define __pa(x) __virt_to_phys((unsigned long)(x))
-#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
+#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
+
+/*
+ * These are for systems that have a hardware interconnect supported alias of
+ * physical memory for idmap purposes. Most cases should leave these
+ * untouched.
+ */
+static inline phys_addr_t __virt_to_idmap(unsigned long x)
+{
+ if (arch_virt_to_idmap)
+ return arch_virt_to_idmap(x);
+ else
+ return __virt_to_phys(x);
+}
+
+#define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x))
+
/*
* Virtual <-> DMA view memory address translations
* Again, these are *only* valid on the kernel direct mapped RAM
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 6f18da09668b..64fd15159b7d 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -16,7 +16,7 @@ typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
#define ASID_BITS 8
#define ASID_MASK ((~0ULL) << ASID_BITS)
-#define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK)
+#define ASID(mm) ((unsigned int)((mm)->context.id.counter & ~ASID_MASK))
#else
#define ASID(mm) (0)
#endif
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index f97ee02386ee..86a659a19526 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -181,6 +181,13 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
+/*
+ * We don't have huge page support for short descriptors, for the moment
+ * define empty stubs for use by pin_page_for_write.
+ */
+#define pmd_hugewillfault(pmd) (0)
+#define pmd_thp_or_huge(pmd) (0)
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_PGTABLE_2LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 5689c18c85f5..4f9503908dca 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -126,6 +126,8 @@
#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
+
/*
* Hyp-mode PL2 PTE definitions for LPAE.
*/
@@ -206,6 +208,9 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
+#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
+#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 413f3876341c..c3d5fc124a05 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -22,6 +22,7 @@
#include <asm/hw_breakpoint.h>
#include <asm/ptrace.h>
#include <asm/types.h>
+#include <asm/unified.h>
#ifdef __KERNEL__
#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
@@ -87,6 +88,17 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
#define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
+#ifdef CONFIG_SMP
+#define __ALT_SMP_ASM(smp, up) \
+ "9998: " smp "\n" \
+ " .pushsection \".alt.smp.init\", \"a\"\n" \
+ " .long 9998b\n" \
+ " " up "\n" \
+ " .popsection\n"
+#else
+#define __ALT_SMP_ASM(smp, up) up
+#endif
+
/*
* Prefetching support - only ARMv5.
*/
@@ -97,17 +109,22 @@ static inline void prefetch(const void *ptr)
{
__asm__ __volatile__(
"pld\t%a0"
- :
- : "p" (ptr)
- : "cc");
+ :: "p" (ptr));
}
+#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
#define ARCH_HAS_PREFETCHW
-#define prefetchw(ptr) prefetch(ptr)
-
-#define ARCH_HAS_SPINLOCK_PREFETCH
-#define spin_lock_prefetch(x) do { } while (0)
-
+static inline void prefetchw(const void *ptr)
+{
+ __asm__ __volatile__(
+ ".arch_extension mp\n"
+ __ALT_SMP_ASM(
+ WASM(pldw) "\t%a0",
+ WASM(pld) "\t%a0"
+ )
+ :: "p" (ptr));
+}
+#endif
#endif
#define HAVE_ARCH_PICK_MMAP_LAYOUT
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
index 4a2985e21969..b681575ad3de 100644
--- a/arch/arm/include/asm/prom.h
+++ b/arch/arm/include/asm/prom.h
@@ -11,8 +11,6 @@
#ifndef __ASMARM_PROM_H
#define __ASMARM_PROM_H
-#define HAVE_ARCH_DEVTREE_FIXUPS
-
#ifdef CONFIG_OF
extern const struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h
deleted file mode 100644
index 2389b71a8e7c..000000000000
--- a/arch/arm/include/asm/sched_clock.h
+++ /dev/null
@@ -1,4 +0,0 @@
-/* You shouldn't include this file. Use linux/sched_clock.h instead.
- * Temporary file until all asm/sched_clock.h users are gone
- */
-#include <linux/sched_clock.h>
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index c50f05609501..8d6a089dfb76 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -49,7 +49,7 @@ extern struct meminfo meminfo;
#define bank_phys_end(bank) ((bank)->start + (bank)->size)
#define bank_phys_size(bank) (bank)->size
-extern int arm_add_memory(phys_addr_t start, phys_addr_t size);
+extern int arm_add_memory(u64 start, u64 size);
extern void early_print(const char *str, ...);
extern void dump_machine_table(void);
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index a8cae71caceb..22a3b9b5d4a1 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -84,6 +84,8 @@ extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
+extern int register_ipi_completion(struct completion *completion, int cpu);
+
struct smp_operations {
#ifdef CONFIG_SMP
/*
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 4f2c28060c9a..ef3c6072aa45 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -5,21 +5,13 @@
#error SMP not supported on pre-ARMv6 CPUs
#endif
-#include <asm/processor.h>
+#include <linux/prefetch.h>
/*
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
* extensions, so when running on UP, we have to patch these instructions away.
*/
-#define ALT_SMP(smp, up) \
- "9998: " smp "\n" \
- " .pushsection \".alt.smp.init\", \"a\"\n" \
- " .long 9998b\n" \
- " " up "\n" \
- " .popsection\n"
-
#ifdef CONFIG_THUMB2_KERNEL
-#define SEV ALT_SMP("sev.w", "nop.w")
/*
* For Thumb-2, special care is needed to ensure that the conditional WFE
* instruction really does assemble to exactly 4 bytes (as required by
@@ -31,17 +23,18 @@
* the assembler won't change IT instructions which are explicitly present
* in the input.
*/
-#define WFE(cond) ALT_SMP( \
+#define WFE(cond) __ALT_SMP_ASM( \
"it " cond "\n\t" \
"wfe" cond ".n", \
\
"nop.w" \
)
#else
-#define SEV ALT_SMP("sev", "nop")
-#define WFE(cond) ALT_SMP("wfe" cond, "nop")
+#define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop")
#endif
+#define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop))
+
static inline void dsb_sev(void)
{
#if __LINUX_ARM_ARCH__ >= 7
@@ -77,6 +70,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
u32 newval;
arch_spinlock_t lockval;
+ prefetchw(&lock->slock);
__asm__ __volatile__(
"1: ldrex %0, [%3]\n"
" add %1, %0, %4\n"
@@ -100,6 +94,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
unsigned long contended, res;
u32 slock;
+ prefetchw(&lock->slock);
do {
__asm__ __volatile__(
" ldrex %0, [%3]\n"
@@ -127,10 +122,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
dsb_sev();
}
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.tickets.owner == lock.tickets.next;
+}
+
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
- struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
- return tickets.owner != tickets.next;
+ return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
}
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
@@ -152,6 +151,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
+ prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
@@ -170,6 +170,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long contended, res;
+ prefetchw(&rw->lock);
do {
__asm__ __volatile__(
" ldrex %0, [%2]\n"
@@ -203,7 +204,7 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
}
/* write_can_lock - would write_trylock() succeed? */
-#define arch_write_can_lock(x) ((x)->lock == 0)
+#define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0)
/*
* Read locks are a bit more hairy:
@@ -221,6 +222,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
+ prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
@@ -241,6 +243,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
smp_mb();
+ prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" sub %0, %0, #1\n"
@@ -259,6 +262,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long contended, res;
+ prefetchw(&rw->lock);
do {
__asm__ __volatile__(
" ldrex %0, [%2]\n"
@@ -280,7 +284,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
}
/* read_can_lock - would read_trylock() succeed? */
-#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
+#define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index b262d2f8b478..47663fcb10ad 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -25,7 +25,7 @@ typedef struct {
#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
typedef struct {
- volatile unsigned int lock;
+ u32 lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h
index 83f2aa83899c..5e656626bb00 100644
--- a/arch/arm/include/asm/timex.h
+++ b/arch/arm/include/asm/timex.h
@@ -12,10 +12,10 @@
#ifndef _ASMARM_TIMEX_H
#define _ASMARM_TIMEX_H
-#ifdef CONFIG_ARCH_MULTIPLATFORM
-#define CLOCK_TICK_RATE 1000000
-#else
+#ifdef CONFIG_NEED_MACH_TIMEX_H
#include <mach/timex.h>
+#else
+#define CLOCK_TICK_RATE 1000000
#endif
typedef unsigned long cycles_t;
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 38960264040c..def9e570199f 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -560,37 +560,6 @@ static inline void __flush_bp_all(void)
asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
}
-#include <asm/cputype.h>
-#ifdef CONFIG_ARM_ERRATA_798181
-static inline int erratum_a15_798181(void)
-{
- unsigned int midr = read_cpuid_id();
-
- /* Cortex-A15 r0p0..r3p2 affected */
- if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
- return 0;
- return 1;
-}
-
-static inline void dummy_flush_tlb_a15_erratum(void)
-{
- /*
- * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
- */
- asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
- dsb(ish);
-}
-#else
-static inline int erratum_a15_798181(void)
-{
- return 0;
-}
-
-static inline void dummy_flush_tlb_a15_erratum(void)
-{
-}
-#endif
-
/*
* flush_pmd_entry
*
@@ -697,4 +666,21 @@ extern void flush_bp_all(void);
#endif
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_ARM_ERRATA_798181
+extern void erratum_a15_798181_init(void);
+#else
+static inline void erratum_a15_798181_init(void) {}
+#endif
+extern bool (*erratum_a15_798181_handler)(void);
+
+static inline bool erratum_a15_798181(void)
+{
+ if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) &&
+ erratum_a15_798181_handler))
+ return erratum_a15_798181_handler();
+ return false;
+}
+#endif
+
#endif
diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h
index f5989f46b4d2..b88beaba6b4a 100644
--- a/arch/arm/include/asm/unified.h
+++ b/arch/arm/include/asm/unified.h
@@ -38,6 +38,8 @@
#ifdef __ASSEMBLY__
#define W(instr) instr.w
#define BSYM(sym) sym + 1
+#else
+#define WASM(instr) #instr ".w"
#endif
#else /* !CONFIG_THUMB2_KERNEL */
@@ -50,6 +52,8 @@
#ifdef __ASSEMBLY__
#define W(instr) instr
#define BSYM(sym) sym
+#else
+#define WASM(instr) #instr
#endif
#endif /* CONFIG_THUMB2_KERNEL */
diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h
index d7ab99a0c9eb..1317ee40f4df 100644
--- a/arch/arm/include/asm/xen/hypervisor.h
+++ b/arch/arm/include/asm/xen/hypervisor.h
@@ -16,4 +16,6 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
return PARAVIRT_LAZY_NONE;
}
+extern struct dma_map_ops *xen_dma_ops;
+
#endif /* _ASM_ARM_XEN_HYPERVISOR_H */
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
new file mode 100644
index 000000000000..1109017499e5
--- /dev/null
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -0,0 +1,50 @@
+#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
+#define _ASM_ARM_XEN_PAGE_COHERENT_H
+
+#include <asm/page.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-mapping.h>
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
+{
+ return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+}
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ if (__generic_dma_ops(hwdev)->unmap_page)
+ __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
+}
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
+ __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (__generic_dma_ops(hwdev)->sync_single_for_device)
+ __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
+}
+#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 359a7b50b158..5d0e4c5dc711 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -6,12 +6,12 @@
#include <linux/pfn.h>
#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <xen/xen.h>
#include <xen/interface/grant_table.h>
-#define pfn_to_mfn(pfn) (pfn)
#define phys_to_machine_mapping_valid(pfn) (1)
-#define mfn_to_pfn(mfn) (mfn)
#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
#define pte_mfn pte_pfn
@@ -32,6 +32,44 @@ typedef struct xpaddr {
#define INVALID_P2M_ENTRY (~0UL)
+unsigned long __pfn_to_mfn(unsigned long pfn);
+unsigned long __mfn_to_pfn(unsigned long mfn);
+extern struct rb_root phys_to_mach;
+
+static inline unsigned long pfn_to_mfn(unsigned long pfn)
+{
+ unsigned long mfn;
+
+ if (phys_to_mach.rb_node != NULL) {
+ mfn = __pfn_to_mfn(pfn);
+ if (mfn != INVALID_P2M_ENTRY)
+ return mfn;
+ }
+
+ if (xen_initial_domain())
+ return pfn;
+ else
+ return INVALID_P2M_ENTRY;
+}
+
+static inline unsigned long mfn_to_pfn(unsigned long mfn)
+{
+ unsigned long pfn;
+
+ if (phys_to_mach.rb_node != NULL) {
+ pfn = __mfn_to_pfn(mfn);
+ if (pfn != INVALID_P2M_ENTRY)
+ return pfn;
+ }
+
+ if (xen_initial_domain())
+ return mfn;
+ else
+ return INVALID_P2M_ENTRY;
+}
+
+#define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn)
+
static inline xmaddr_t phys_to_machine(xpaddr_t phys)
{
unsigned offset = phys.paddr & ~PAGE_MASK;
@@ -76,11 +114,9 @@ static inline int m2p_remove_override(struct page *page, bool clear_pte)
return 0;
}
-static inline bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-{
- BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
- return true;
-}
+bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
+ unsigned long nr_pages);
static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
diff --git a/arch/arm/include/debug/efm32.S b/arch/arm/include/debug/efm32.S
new file mode 100644
index 000000000000..2265a199280c
--- /dev/null
+++ b/arch/arm/include/debug/efm32.S
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2013 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define UARTn_CMD 0x000c
+#define UARTn_CMD_TXEN 0x0004
+
+#define UARTn_STATUS 0x0010
+#define UARTn_STATUS_TXC 0x0020
+#define UARTn_STATUS_TXBL 0x0040
+
+#define UARTn_TXDATA 0x0034
+
+ .macro addruart, rx, tmp
+ ldr \rx, =(CONFIG_DEBUG_UART_PHYS)
+
+ /*
+ * enable TX. The driver might disable it to save energy. We
+ * don't care about disabling at the end as during debug power
+ * consumption isn't that important.
+ */
+ ldr \tmp, =(UARTn_CMD_TXEN)
+ str \tmp, [\rx, #UARTn_CMD]
+ .endm
+
+ .macro senduart,rd,rx
+ strb \rd, [\rx, #UARTn_TXDATA]
+ .endm
+
+ .macro waituart,rd,rx
+1001: ldr \rd, [\rx, #UARTn_STATUS]
+ tst \rd, #UARTn_STATUS_TXBL
+ beq 1001b
+ .endm
+
+ .macro busyuart,rd,rx
+1001: ldr \rd, [\rx, UARTn_STATUS]
+ tst \rd, #UARTn_STATUS_TXC
+ bne 1001b
+ .endm
diff --git a/arch/arm/include/debug/imx-uart.h b/arch/arm/include/debug/imx-uart.h
index 29da84e183f4..42b823cd2d22 100644
--- a/arch/arm/include/debug/imx-uart.h
+++ b/arch/arm/include/debug/imx-uart.h
@@ -43,6 +43,14 @@
#define IMX35_UART_BASE_ADDR(n) IMX35_UART##n##_BASE_ADDR
#define IMX35_UART_BASE(n) IMX35_UART_BASE_ADDR(n)
+#define IMX50_UART1_BASE_ADDR 0x53fbc000
+#define IMX50_UART2_BASE_ADDR 0x53fc0000
+#define IMX50_UART3_BASE_ADDR 0x5000c000
+#define IMX50_UART4_BASE_ADDR 0x53ff0000
+#define IMX50_UART5_BASE_ADDR 0x63f90000
+#define IMX50_UART_BASE_ADDR(n) IMX50_UART##n##_BASE_ADDR
+#define IMX50_UART_BASE(n) IMX50_UART_BASE_ADDR(n)
+
#define IMX51_UART1_BASE_ADDR 0x73fbc000
#define IMX51_UART2_BASE_ADDR 0x73fc0000
#define IMX51_UART3_BASE_ADDR 0x7000c000
@@ -85,6 +93,8 @@
#define UART_PADDR IMX_DEBUG_UART_BASE(IMX31)
#elif defined(CONFIG_DEBUG_IMX35_UART)
#define UART_PADDR IMX_DEBUG_UART_BASE(IMX35)
+#elif defined(CONFIG_DEBUG_IMX50_UART)
+#define UART_PADDR IMX_DEBUG_UART_BASE(IMX50)
#elif defined(CONFIG_DEBUG_IMX51_UART)
#define UART_PADDR IMX_DEBUG_UART_BASE(IMX51)
#elif defined(CONFIG_DEBUG_IMX53_UART)
diff --git a/arch/arm/include/debug/msm.S b/arch/arm/include/debug/msm.S
index 9166e1bc470e..9d653d475903 100644
--- a/arch/arm/include/debug/msm.S
+++ b/arch/arm/include/debug/msm.S
@@ -46,6 +46,11 @@
#define MSM_DEBUG_UART_PHYS 0x16440000
#endif
+#ifdef CONFIG_DEBUG_MSM8974_UART
+#define MSM_DEBUG_UART_BASE 0xFA71E000
+#define MSM_DEBUG_UART_PHYS 0xF991E000
+#endif
+
.macro addruart, rp, rv, tmp
#ifdef MSM_DEBUG_UART_PHYS
ldr \rp, =MSM_DEBUG_UART_PHYS
diff --git a/arch/arm/include/debug/pl01x.S b/arch/arm/include/debug/pl01x.S
index 37c6895b87e6..92ef808a2337 100644
--- a/arch/arm/include/debug/pl01x.S
+++ b/arch/arm/include/debug/pl01x.S
@@ -25,12 +25,14 @@
.macro waituart,rd,rx
1001: ldr \rd, [\rx, #UART01x_FR]
+ ARM_BE8( rev \rd, \rd )
tst \rd, #UART01x_FR_TXFF
bne 1001b
.endm
.macro busyuart,rd,rx
1001: ldr \rd, [\rx, #UART01x_FR]
+ ARM_BE8( rev \rd, \rd )
tst \rd, #UART01x_FR_BUSY
bne 1001b
.endm
diff --git a/arch/arm/include/debug/vf.S b/arch/arm/include/debug/vf.S
new file mode 100644
index 000000000000..ba12cc44b2cb
--- /dev/null
+++ b/arch/arm/include/debug/vf.S
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+ .macro addruart, rp, rv, tmp
+ ldr \rp, =0x40028000 @ physical
+ ldr \rv, =0xfe028000 @ virtual
+ .endm
+
+ .macro senduart, rd, rx
+ strb \rd, [\rx, #0x7] @ Data Register
+ .endm
+
+ .macro busyuart, rd, rx
+1001: ldrb \rd, [\rx, #0x4] @ Status Register 1
+ tst \rd, #1 << 6 @ TC
+ beq 1001b @ wait until transmit done
+ .endm
+
+ .macro waituart,rd,rx
+ .endm
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index 18d76fd5a2af..70a1c9da30ca 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -7,6 +7,7 @@ header-y += hwcap.h
header-y += ioctls.h
header-y += kvm_para.h
header-y += mman.h
+header-y += perf_regs.h
header-y += posix_types.h
header-y += ptrace.h
header-y += setup.h
diff --git a/arch/arm/include/uapi/asm/hwcap.h b/arch/arm/include/uapi/asm/hwcap.h
index 6d34d080372a..7dcc10d67253 100644
--- a/arch/arm/include/uapi/asm/hwcap.h
+++ b/arch/arm/include/uapi/asm/hwcap.h
@@ -26,5 +26,6 @@
#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
#define HWCAP_LPAE (1 << 20)
+#define HWCAP_EVTSTRM (1 << 21)
#endif /* _UAPI__ASMARM_HWCAP_H */
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index c1ee007523d7..c498b60c0505 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -63,7 +63,8 @@ struct kvm_regs {
/* Supported Processor Types */
#define KVM_ARM_TARGET_CORTEX_A15 0
-#define KVM_ARM_NUM_TARGETS 1
+#define KVM_ARM_TARGET_CORTEX_A7 1
+#define KVM_ARM_NUM_TARGETS 2
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
#define KVM_ARM_DEVICE_TYPE_SHIFT 0
diff --git a/arch/arm/include/uapi/asm/perf_regs.h b/arch/arm/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000000..ce59448458b2
--- /dev/null
+++ b/arch/arm/include/uapi/asm/perf_regs.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_ARM_PERF_REGS_H
+#define _ASM_ARM_PERF_REGS_H
+
+enum perf_event_arm_regs {
+ PERF_REG_ARM_R0,
+ PERF_REG_ARM_R1,
+ PERF_REG_ARM_R2,
+ PERF_REG_ARM_R3,
+ PERF_REG_ARM_R4,
+ PERF_REG_ARM_R5,
+ PERF_REG_ARM_R6,
+ PERF_REG_ARM_R7,
+ PERF_REG_ARM_R8,
+ PERF_REG_ARM_R9,
+ PERF_REG_ARM_R10,
+ PERF_REG_ARM_FP,
+ PERF_REG_ARM_IP,
+ PERF_REG_ARM_SP,
+ PERF_REG_ARM_LR,
+ PERF_REG_ARM_PC,
+ PERF_REG_ARM_MAX,
+};
+#endif /* _ASM_ARM_PERF_REGS_H */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 5140df5f23aa..a30fc9be9e9e 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -17,7 +17,8 @@ CFLAGS_REMOVE_return_address.o = -pg
obj-y := elf.o entry-common.o irq.o opcodes.o \
process.o ptrace.o return_address.o \
- setup.o signal.o stacktrace.o sys_arm.o time.o traps.o
+ setup.o signal.o sigreturn_codes.o \
+ stacktrace.o sys_arm.o time.o traps.o
obj-$(CONFIG_ATAGS) += atags_parse.o
obj-$(CONFIG_ATAGS_PROC) += atags_proc.o
@@ -78,6 +79,7 @@ obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
+obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index 221f07b11ccb..1791f12c180b 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/sched_clock.h>
#include <asm/delay.h>
@@ -22,13 +21,6 @@ static unsigned long arch_timer_read_counter_long(void)
return arch_timer_read_counter();
}
-static u32 sched_clock_mult __read_mostly;
-
-static unsigned long long notrace arch_timer_sched_clock(void)
-{
- return arch_timer_read_counter() * sched_clock_mult;
-}
-
static struct delay_timer arch_delay_timer;
static void __init arch_timer_delay_timer_register(void)
@@ -48,11 +40,5 @@ int __init arch_timer_arch_init(void)
arch_timer_delay_timer_register();
- /* Cache the sched_clock multiplier to save a divide in the hot path. */
- sched_clock_mult = NSEC_PER_SEC / arch_timer_rate;
- sched_clock_func = arch_timer_sched_clock;
- pr_info("sched_clock: ARM arch timer >56 bits at %ukHz, resolution %uns\n",
- arch_timer_rate / 1000, sched_clock_mult);
-
return 0;
}
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 60d3b738d420..1f031ddd0667 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -155,4 +155,5 @@ EXPORT_SYMBOL(__gnu_mcount_nc);
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
EXPORT_SYMBOL(__pv_phys_offset);
+EXPORT_SYMBOL(__pv_offset);
#endif
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index f35906b3d8c9..739c3dfc1da2 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -174,6 +174,19 @@ bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu);
}
+static const void * __init arch_get_next_mach(const char *const **match)
+{
+ static const struct machine_desc *mdesc = __arch_info_begin;
+ const struct machine_desc *m = mdesc;
+
+ if (m >= __arch_info_end)
+ return NULL;
+
+ mdesc++;
+ *match = m->dt_compat;
+ return m;
+}
+
/**
* setup_machine_fdt - Machine setup when an dtb was passed to the kernel
* @dt_phys: physical address of dt blob
@@ -183,11 +196,7 @@ bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
*/
const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
{
- struct boot_param_header *devtree;
const struct machine_desc *mdesc, *mdesc_best = NULL;
- unsigned int score, mdesc_score = ~1;
- unsigned long dt_root;
- const char *model;
#ifdef CONFIG_ARCH_MULTIPLATFORM
DT_MACHINE_START(GENERIC_DT, "Generic DT based system")
@@ -196,32 +205,20 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
mdesc_best = &__mach_desc_GENERIC_DT;
#endif
- if (!dt_phys)
+ if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys)))
return NULL;
- devtree = phys_to_virt(dt_phys);
+ mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach);
- /* check device tree validity */
- if (be32_to_cpu(devtree->magic) != OF_DT_HEADER)
- return NULL;
-
- /* Search the mdescs for the 'best' compatible value match */
- initial_boot_params = devtree;
- dt_root = of_get_flat_dt_root();
- for_each_machine_desc(mdesc) {
- score = of_flat_dt_match(dt_root, mdesc->dt_compat);
- if (score > 0 && score < mdesc_score) {
- mdesc_best = mdesc;
- mdesc_score = score;
- }
- }
- if (!mdesc_best) {
+ if (!mdesc) {
const char *prop;
long size;
+ unsigned long dt_root;
early_print("\nError: unrecognized/unsupported "
"device tree compatible list:\n[ ");
+ dt_root = of_get_flat_dt_root();
prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
while (size > 0) {
early_print("'%s' ", prop);
@@ -233,22 +230,8 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
dump_machine_table(); /* does not return */
}
- model = of_get_flat_dt_prop(dt_root, "model", NULL);
- if (!model)
- model = of_get_flat_dt_prop(dt_root, "compatible", NULL);
- if (!model)
- model = "<unknown>";
- pr_info("Machine: %s, model: %s\n", mdesc_best->name, model);
-
- /* Retrieve various information from the /chosen node */
- of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
- /* Initialize {size,address}-cells info */
- of_scan_flat_dt(early_init_dt_scan_root, NULL);
- /* Setup memory, calling early_init_dt_add_memory_arch */
- of_scan_flat_dt(early_init_dt_scan_memory, NULL);
-
/* Change machine number to match the mdesc we're using */
- __machine_arch_type = mdesc_best->nr;
+ __machine_arch_type = mdesc->nr;
- return mdesc_best;
+ return mdesc;
}
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 9cbe70c8b0ef..55090fbb81a2 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -416,9 +416,8 @@ __und_usr:
bne __und_usr_thumb
sub r4, r2, #4 @ ARM instr at LR - 4
1: ldrt r0, [r4]
-#ifdef CONFIG_CPU_ENDIAN_BE8
- rev r0, r0 @ little endian instruction
-#endif
+ ARM_BE8(rev r0, r0) @ little endian instruction
+
@ r0 = 32-bit ARM instruction which caused the exception
@ r2 = PC value for the following instruction (:= regs->ARM_pc)
@ r4 = PC value for the faulting instruction
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index bc6bd9683ba4..a2dcafdf1bc8 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -393,9 +393,7 @@ ENTRY(vector_swi)
#else
USER( ldr r10, [lr, #-4] ) @ get SWI instruction
#endif
-#ifdef CONFIG_CPU_ENDIAN_BE8
- rev r10, r10 @ little endian instruction
-#endif
+ ARM_BE8(rev r10, r10) @ little endian instruction
#elif defined(CONFIG_AEABI)
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
index 52b26432c9a9..2260f1855820 100644
--- a/arch/arm/kernel/entry-v7m.S
+++ b/arch/arm/kernel/entry-v7m.S
@@ -14,8 +14,6 @@
#include <asm/thread_notify.h>
#include <asm/v7m.h>
-#include <mach/entry-macro.S>
-
#include "entry-header.S"
#ifdef CONFIG_TRACE_IRQFLAGS
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 476de57dcef2..7801866e626a 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -77,6 +77,7 @@
__HEAD
ENTRY(stext)
+ ARM_BE8(setend be ) @ ensure we are in BE8 mode
THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
@@ -352,6 +353,9 @@ ENTRY(secondary_startup)
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
+
+ ARM_BE8(setend be) @ ensure we are in BE8 mode
+
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install_secondary
#endif
@@ -555,6 +559,14 @@ ENTRY(fixup_smp)
ldmfd sp!, {r4 - r6, pc}
ENDPROC(fixup_smp)
+#ifdef __ARMEB__
+#define LOW_OFFSET 0x4
+#define HIGH_OFFSET 0x0
+#else
+#define LOW_OFFSET 0x0
+#define HIGH_OFFSET 0x4
+#endif
+
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
/* __fixup_pv_table - patch the stub instructions with the delta between
@@ -565,17 +577,20 @@ ENDPROC(fixup_smp)
__HEAD
__fixup_pv_table:
adr r0, 1f
- ldmia r0, {r3-r5, r7}
- sub r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
+ ldmia r0, {r3-r7}
+ mvn ip, #0
+ subs r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
add r4, r4, r3 @ adjust table start address
add r5, r5, r3 @ adjust table end address
- add r7, r7, r3 @ adjust __pv_phys_offset address
- str r8, [r7] @ save computed PHYS_OFFSET to __pv_phys_offset
+ add r6, r6, r3 @ adjust __pv_phys_offset address
+ add r7, r7, r3 @ adjust __pv_offset address
+ str r8, [r6, #LOW_OFFSET] @ save computed PHYS_OFFSET to __pv_phys_offset
+ strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits
mov r6, r3, lsr #24 @ constant for add/sub instructions
teq r3, r6, lsl #24 @ must be 16MiB aligned
THUMB( it ne @ cross section branch )
bne __error
- str r6, [r7, #4] @ save to __pv_offset
+ str r3, [r7, #LOW_OFFSET] @ save to __pv_offset low bits
b __fixup_a_pv_table
ENDPROC(__fixup_pv_table)
@@ -584,10 +599,19 @@ ENDPROC(__fixup_pv_table)
.long __pv_table_begin
.long __pv_table_end
2: .long __pv_phys_offset
+ .long __pv_offset
.text
__fixup_a_pv_table:
+ adr r0, 3f
+ ldr r6, [r0]
+ add r6, r6, r3
+ ldr r0, [r6, #HIGH_OFFSET] @ pv_offset high word
+ ldr r6, [r6, #LOW_OFFSET] @ pv_offset low word
+ mov r6, r6, lsr #24
+ cmn r0, #1
#ifdef CONFIG_THUMB2_KERNEL
+ moveq r0, #0x200000 @ set bit 21, mov to mvn instruction
lsls r6, #24
beq 2f
clz r7, r6
@@ -601,18 +625,42 @@ __fixup_a_pv_table:
b 2f
1: add r7, r3
ldrh ip, [r7, #2]
- and ip, 0x8f00
- orr ip, r6 @ mask in offset bits 31-24
+ARM_BE8(rev16 ip, ip)
+ tst ip, #0x4000
+ and ip, #0x8f00
+ orrne ip, r6 @ mask in offset bits 31-24
+ orreq ip, r0 @ mask in offset bits 7-0
+ARM_BE8(rev16 ip, ip)
strh ip, [r7, #2]
+ bne 2f
+ ldrh ip, [r7]
+ARM_BE8(rev16 ip, ip)
+ bic ip, #0x20
+ orr ip, ip, r0, lsr #16
+ARM_BE8(rev16 ip, ip)
+ strh ip, [r7]
2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
bcc 1b
bx lr
#else
+ moveq r0, #0x400000 @ set bit 22, mov to mvn instruction
b 2f
1: ldr ip, [r7, r3]
+#ifdef CONFIG_CPU_ENDIAN_BE8
+ @ in BE8, we load data in BE, but instructions still in LE
+ bic ip, ip, #0xff000000
+ tst ip, #0x000f0000 @ check the rotation field
+ orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24
+ biceq ip, ip, #0x00004000 @ clear bit 22
+ orreq ip, ip, r0, lsl #24 @ mask in offset bits 7-0
+#else
bic ip, ip, #0x000000ff
- orr ip, ip, r6 @ mask in offset bits 31-24
+ tst ip, #0xf00 @ check the rotation field
+ orrne ip, ip, r6 @ mask in offset bits 31-24
+ biceq ip, ip, #0x400000 @ clear bit 22
+ orreq ip, ip, r0 @ mask in offset bits 7-0
+#endif
str ip, [r7, r3]
2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
@@ -621,28 +669,30 @@ __fixup_a_pv_table:
#endif
ENDPROC(__fixup_a_pv_table)
+ .align
+3: .long __pv_offset
+
ENTRY(fixup_pv_table)
stmfd sp!, {r4 - r7, lr}
- ldr r2, 2f @ get address of __pv_phys_offset
mov r3, #0 @ no offset
mov r4, r0 @ r0 = table start
add r5, r0, r1 @ r1 = table size
- ldr r6, [r2, #4] @ get __pv_offset
bl __fixup_a_pv_table
ldmfd sp!, {r4 - r7, pc}
ENDPROC(fixup_pv_table)
- .align
-2: .long __pv_phys_offset
-
.data
.globl __pv_phys_offset
.type __pv_phys_offset, %object
__pv_phys_offset:
- .long 0
- .size __pv_phys_offset, . - __pv_phys_offset
+ .quad 0
+ .size __pv_phys_offset, . -__pv_phys_offset
+
+ .globl __pv_offset
+ .type __pv_offset, %object
__pv_offset:
- .long 0
+ .quad 0
+ .size __pv_offset, . -__pv_offset
#endif
#include "head-common.S"
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 7b95de601357..3d446605cbf8 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -344,13 +344,13 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
/* Breakpoint */
ctrl_base = ARM_BASE_BCR;
val_base = ARM_BASE_BVR;
- slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+ slots = this_cpu_ptr(bp_on_reg);
max_slots = core_num_brps;
} else {
/* Watchpoint */
ctrl_base = ARM_BASE_WCR;
val_base = ARM_BASE_WVR;
- slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
max_slots = core_num_wrps;
}
@@ -396,12 +396,12 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
/* Breakpoint */
base = ARM_BASE_BCR;
- slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+ slots = this_cpu_ptr(bp_on_reg);
max_slots = core_num_brps;
} else {
/* Watchpoint */
base = ARM_BASE_WCR;
- slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
max_slots = core_num_wrps;
}
@@ -697,7 +697,7 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl;
- slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
for (i = 0; i < core_num_wrps; ++i) {
rcu_read_lock();
@@ -768,7 +768,7 @@ static void watchpoint_single_step_handler(unsigned long pc)
struct perf_event *wp, **slots;
struct arch_hw_breakpoint *info;
- slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
for (i = 0; i < core_num_wrps; ++i) {
rcu_read_lock();
@@ -802,7 +802,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl;
- slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+ slots = this_cpu_ptr(bp_on_reg);
/* The exception entry code places the amended lr in the PC. */
addr = regs->ARM_pc;
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 170e9f34003f..a7b621ece23d 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -171,13 +171,13 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
}
static void __kprobes set_current_kprobe(struct kprobe *p)
{
- __get_cpu_var(current_kprobe) = p;
+ __this_cpu_write(current_kprobe, p);
}
static void __kprobes
@@ -421,10 +421,10 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
continue;
if (ri->rp && ri->rp->handler) {
- __get_cpu_var(current_kprobe) = &ri->rp->kp;
+ __this_cpu_write(current_kprobe, &ri->rp->kp);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
ri->rp->handler(ri, regs);
- __get_cpu_var(current_kprobe) = NULL;
+ __this_cpu_write(current_kprobe, NULL);
}
orig_ret_address = (unsigned long)ri->ret_addr;
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 084dc8896986..5fdb4038f969 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -24,6 +24,7 @@
#include <asm/sections.h>
#include <asm/smp_plat.h>
#include <asm/unwind.h>
+#include <asm/opcodes.h>
#ifdef CONFIG_XIP_KERNEL
/*
@@ -60,6 +61,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
Elf32_Sym *sym;
const char *symname;
s32 offset;
+ u32 tmp;
#ifdef CONFIG_THUMB2_KERNEL
u32 upper, lower, sign, j1, j2;
#endif
@@ -95,7 +97,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
case R_ARM_PC24:
case R_ARM_CALL:
case R_ARM_JUMP24:
- offset = (*(u32 *)loc & 0x00ffffff) << 2;
+ offset = __mem_to_opcode_arm(*(u32 *)loc);
+ offset = (offset & 0x00ffffff) << 2;
if (offset & 0x02000000)
offset -= 0x04000000;
@@ -111,9 +114,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
}
offset >>= 2;
+ offset &= 0x00ffffff;
- *(u32 *)loc &= 0xff000000;
- *(u32 *)loc |= offset & 0x00ffffff;
+ *(u32 *)loc &= __opcode_to_mem_arm(0xff000000);
+ *(u32 *)loc |= __opcode_to_mem_arm(offset);
break;
case R_ARM_V4BX:
@@ -121,8 +125,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
* other bits to re-code instruction as
* MOV PC,Rm.
*/
- *(u32 *)loc &= 0xf000000f;
- *(u32 *)loc |= 0x01a0f000;
+ *(u32 *)loc &= __opcode_to_mem_arm(0xf000000f);
+ *(u32 *)loc |= __opcode_to_mem_arm(0x01a0f000);
break;
case R_ARM_PREL31:
@@ -132,7 +136,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
case R_ARM_MOVW_ABS_NC:
case R_ARM_MOVT_ABS:
- offset = *(u32 *)loc;
+ offset = tmp = __mem_to_opcode_arm(*(u32 *)loc);
offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
offset = (offset ^ 0x8000) - 0x8000;
@@ -140,16 +144,18 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS)
offset >>= 16;
- *(u32 *)loc &= 0xfff0f000;
- *(u32 *)loc |= ((offset & 0xf000) << 4) |
- (offset & 0x0fff);
+ tmp &= 0xfff0f000;
+ tmp |= ((offset & 0xf000) << 4) |
+ (offset & 0x0fff);
+
+ *(u32 *)loc = __opcode_to_mem_arm(tmp);
break;
#ifdef CONFIG_THUMB2_KERNEL
case R_ARM_THM_CALL:
case R_ARM_THM_JUMP24:
- upper = *(u16 *)loc;
- lower = *(u16 *)(loc + 2);
+ upper = __mem_to_opcode_thumb16(*(u16 *)loc);
+ lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
/*
* 25 bit signed address range (Thumb-2 BL and B.W
@@ -198,17 +204,20 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
sign = (offset >> 24) & 1;
j1 = sign ^ (~(offset >> 23) & 1);
j2 = sign ^ (~(offset >> 22) & 1);
- *(u16 *)loc = (u16)((upper & 0xf800) | (sign << 10) |
+ upper = (u16)((upper & 0xf800) | (sign << 10) |
((offset >> 12) & 0x03ff));
- *(u16 *)(loc + 2) = (u16)((lower & 0xd000) |
- (j1 << 13) | (j2 << 11) |
- ((offset >> 1) & 0x07ff));
+ lower = (u16)((lower & 0xd000) |
+ (j1 << 13) | (j2 << 11) |
+ ((offset >> 1) & 0x07ff));
+
+ *(u16 *)loc = __opcode_to_mem_thumb16(upper);
+ *(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
break;
case R_ARM_THM_MOVW_ABS_NC:
case R_ARM_THM_MOVT_ABS:
- upper = *(u16 *)loc;
- lower = *(u16 *)(loc + 2);
+ upper = __mem_to_opcode_thumb16(*(u16 *)loc);
+ lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
/*
* MOVT/MOVW instructions encoding in Thumb-2:
@@ -229,12 +238,14 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS)
offset >>= 16;
- *(u16 *)loc = (u16)((upper & 0xfbf0) |
- ((offset & 0xf000) >> 12) |
- ((offset & 0x0800) >> 1));
- *(u16 *)(loc + 2) = (u16)((lower & 0x8f00) |
- ((offset & 0x0700) << 4) |
- (offset & 0x00ff));
+ upper = (u16)((upper & 0xfbf0) |
+ ((offset & 0xf000) >> 12) |
+ ((offset & 0x0800) >> 1));
+ lower = (u16)((lower & 0x8f00) |
+ ((offset & 0x0700) << 4) |
+ (offset & 0x00ff));
+ *(u16 *)loc = __opcode_to_mem_thumb16(upper);
+ *(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
break;
#endif
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index e186ee1e63f6..bc3f2efa0d86 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -256,12 +256,11 @@ validate_event(struct pmu_hw_events *hw_events,
struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- struct pmu *leader_pmu = event->group_leader->pmu;
if (is_software_event(event))
return 1;
- if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+ if (event->state < PERF_EVENT_STATE_OFF)
return 1;
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 8d6147b2001f..d85055cd24ba 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -68,7 +68,7 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
{
- return &__get_cpu_var(cpu_hw_events);
+ return this_cpu_ptr(&cpu_hw_events);
}
static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
diff --git a/arch/arm/kernel/perf_regs.c b/arch/arm/kernel/perf_regs.c
new file mode 100644
index 000000000000..6e4379c67cbc
--- /dev/null
+++ b/arch/arm/kernel/perf_regs.c
@@ -0,0 +1,30 @@
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/bug.h>
+#include <asm/perf_regs.h>
+#include <asm/ptrace.h>
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+ if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM_MAX))
+ return 0;
+
+ return regs->uregs[idx];
+}
+
+#define REG_RESERVED (~((1ULL << PERF_REG_ARM_MAX) - 1))
+
+int perf_reg_validate(u64 mask)
+{
+ if (!mask || mask & REG_RESERVED)
+ return -EINVAL;
+
+ return 0;
+}
+
+u64 perf_reg_abi(struct task_struct *task)
+{
+ return PERF_SAMPLE_REGS_ABI_32;
+}
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c
index 70ded3fb42d9..570a48cc3d64 100644
--- a/arch/arm/kernel/psci_smp.c
+++ b/arch/arm/kernel/psci_smp.c
@@ -14,7 +14,6 @@
*/
#include <linux/init.h>
-#include <linux/irqchip/arm-gic.h>
#include <linux/smp.h>
#include <linux/of.h>
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 0e1e2b3afa45..6a1b8a81b1ae 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -73,6 +73,8 @@ __setup("fpe=", fpe_setup);
#endif
extern void paging_init(const struct machine_desc *desc);
+extern void early_paging_init(const struct machine_desc *,
+ struct proc_info_list *);
extern void sanity_check_meminfo(void);
extern enum reboot_mode reboot_mode;
extern void setup_dma_zone(const struct machine_desc *desc);
@@ -599,6 +601,8 @@ static void __init setup_processor(void)
elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
#endif
+ erratum_a15_798181_init();
+
feat_v6_fixup();
cacheid_init();
@@ -619,9 +623,10 @@ void __init dump_machine_table(void)
/* can't use cpu_relax() here as it may require MMU setup */;
}
-int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
+int __init arm_add_memory(u64 start, u64 size)
{
struct membank *bank = &meminfo.bank[meminfo.nr_banks];
+ u64 aligned_start;
if (meminfo.nr_banks >= NR_BANKS) {
printk(KERN_CRIT "NR_BANKS too low, "
@@ -634,10 +639,16 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
* Size is appropriately rounded down, start is rounded up.
*/
size -= start & ~PAGE_MASK;
- bank->start = PAGE_ALIGN(start);
+ aligned_start = PAGE_ALIGN(start);
-#ifndef CONFIG_ARM_LPAE
- if (bank->start + size < bank->start) {
+#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
+ if (aligned_start > ULONG_MAX) {
+ printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
+ "32-bit physical address space\n", (long long)start);
+ return -EINVAL;
+ }
+
+ if (aligned_start + size > ULONG_MAX) {
printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
"32-bit physical address space\n", (long long)start);
/*
@@ -645,10 +656,11 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
* 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
* This means we lose a page after masking.
*/
- size = ULONG_MAX - bank->start;
+ size = ULONG_MAX - aligned_start;
}
#endif
+ bank->start = aligned_start;
bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
/*
@@ -669,8 +681,8 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
static int __init early_mem(char *p)
{
static int usermem __initdata = 0;
- phys_addr_t size;
- phys_addr_t start;
+ u64 size;
+ u64 start;
char *endp;
/*
@@ -878,6 +890,8 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+
+ early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
sanity_check_meminfo();
arm_memblock_init(&meminfo, mdesc);
@@ -975,6 +989,7 @@ static const char *hwcap_str[] = {
"idivt",
"vfpd32",
"lpae",
+ "evtstrm",
NULL
};
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index ab3304225272..64845fc4152a 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -21,29 +21,7 @@
#include <asm/unistd.h>
#include <asm/vfp.h>
-/*
- * For ARM syscalls, we encode the syscall number into the instruction.
- */
-#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
-#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
-
-/*
- * With EABI, the syscall number has to be loaded into r7.
- */
-#define MOV_R7_NR_SIGRETURN (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
-#define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
-
-/*
- * For Thumb syscalls, we pass the syscall number via r7. We therefore
- * need two 16-bit instructions.
- */
-#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
-#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
-
-static const unsigned long sigreturn_codes[7] = {
- MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
- MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
-};
+extern const unsigned long sigreturn_codes[7];
static unsigned long signal_return_offset;
diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S
new file mode 100644
index 000000000000..3c5d0f2170fd
--- /dev/null
+++ b/arch/arm/kernel/sigreturn_codes.S
@@ -0,0 +1,80 @@
+/*
+ * sigreturn_codes.S - code sinpets for sigreturn syscalls
+ *
+ * Created by: Victor Kamensky, 2013-08-13
+ * Copyright: (C) 2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/unistd.h>
+
+/*
+ * For ARM syscalls, we encode the syscall number into the instruction.
+ * With EABI, the syscall number has to be loaded into r7. As result
+ * ARM syscall sequence snippet will have move and svc in .arm encoding
+ *
+ * For Thumb syscalls, we pass the syscall number via r7. We therefore
+ * need two 16-bit instructions in .thumb encoding
+ *
+ * Please note sigreturn_codes code are not executed in place. Instead
+ * they just copied by kernel into appropriate places. Code inside of
+ * arch/arm/kernel/signal.c is very sensitive to layout of these code
+ * snippets.
+ */
+
+#if __LINUX_ARM_ARCH__ <= 4
+ /*
+ * Note we manually set minimally required arch that supports
+ * required thumb opcodes for early arch versions. It is OK
+ * for this file to be used in combination with other
+ * lower arch variants, since these code snippets are only
+ * used as input data.
+ */
+ .arch armv4t
+#endif
+
+ .section .rodata
+ .global sigreturn_codes
+ .type sigreturn_codes, #object
+
+ .arm
+
+sigreturn_codes:
+
+ /* ARM sigreturn syscall code snippet */
+ mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
+ swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)
+
+ /* Thumb sigreturn syscall code snippet */
+ .thumb
+ movs r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
+ swi #0
+
+ /* ARM sigreturn_rt syscall code snippet */
+ .arm
+ mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
+ swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)
+
+ /* Thumb sigreturn_rt syscall code snippet */
+ .thumb
+ movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
+ swi #0
+
+ /*
+ * Note on addtional space: setup_return in signal.c
+ * algorithm uses two words copy regardless whether
+ * it is thumb case or not, so we need additional
+ * word after real last entry.
+ */
+ .arm
+ .space 4
+
+ .size sigreturn_codes, . - sigreturn_codes
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index db1536b8b30b..b907d9b790ab 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -55,6 +55,7 @@
* specific registers and some other data for resume.
* r0 = suspend function arg0
* r1 = suspend function
+ * r2 = MPIDR value the resuming CPU will use
*/
ENTRY(__cpu_suspend)
stmfd sp!, {r4 - r11, lr}
@@ -67,23 +68,18 @@ ENTRY(__cpu_suspend)
mov r5, sp @ current virtual SP
add r4, r4, #12 @ Space for pgd, virt sp, phys resume fn
sub sp, sp, r4 @ allocate CPU state on stack
- stmfd sp!, {r0, r1} @ save suspend func arg and pointer
- add r0, sp, #8 @ save pointer to save block
- mov r1, r4 @ size of save block
- mov r2, r5 @ virtual SP
ldr r3, =sleep_save_sp
+ stmfd sp!, {r0, r1} @ save suspend func arg and pointer
ldr r3, [r3, #SLEEP_SAVE_SP_VIRT]
- ALT_SMP(mrc p15, 0, r9, c0, c0, 5)
- ALT_UP_B(1f)
- ldr r8, =mpidr_hash
- /*
- * This ldmia relies on the memory layout of the mpidr_hash
- * struct mpidr_hash.
- */
- ldmia r8, {r4-r7} @ r4 = mpidr mask (r5,r6,r7) = l[0,1,2] shifts
- compute_mpidr_hash lr, r5, r6, r7, r9, r4
- add r3, r3, lr, lsl #2
-1:
+ ALT_SMP(ldr r0, =mpidr_hash)
+ ALT_UP_B(1f)
+ /* This ldmia relies on the memory layout of the mpidr_hash struct */
+ ldmia r0, {r1, r6-r8} @ r1 = mpidr mask (r6,r7,r8) = l[0,1,2] shifts
+ compute_mpidr_hash r0, r6, r7, r8, r2, r1
+ add r3, r3, r0, lsl #2
+1: mov r2, r5 @ virtual SP
+ mov r1, r4 @ size of save block
+ add r0, sp, #8 @ pointer to save block
bl __cpu_suspend_save
adr lr, BSYM(cpu_suspend_abort)
ldmfd sp!, {r0, pc} @ call suspend fn
@@ -130,6 +126,7 @@ ENDPROC(cpu_resume_after_mmu)
.data
.align
ENTRY(cpu_resume)
+ARM_BE8(setend be) @ ensure we are in BE mode
mov r1, #0
ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
ALT_UP_B(1f)
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 72024ea8a3a6..5c820cbcf918 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -66,6 +66,7 @@ enum ipi_msg_type {
IPI_CALL_FUNC,
IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
+ IPI_COMPLETION,
};
static DECLARE_COMPLETION(cpu_running);
@@ -80,7 +81,7 @@ void __init smp_set_ops(struct smp_operations *ops)
static unsigned long get_arch_pgd(pgd_t *pgd)
{
- phys_addr_t pgdir = virt_to_phys(pgd);
+ phys_addr_t pgdir = virt_to_idmap(pgd);
BUG_ON(pgdir & ARCH_PGD_MASK);
return pgdir >> ARCH_PGD_SHIFT;
}
@@ -456,6 +457,7 @@ static const char *ipi_types[NR_IPI] = {
S(IPI_CALL_FUNC, "Function call interrupts"),
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
+ S(IPI_COMPLETION, "completion interrupts"),
};
void show_ipi_list(struct seq_file *p, int prec)
@@ -515,6 +517,19 @@ static void ipi_cpu_stop(unsigned int cpu)
cpu_relax();
}
+static DEFINE_PER_CPU(struct completion *, cpu_completion);
+
+int register_ipi_completion(struct completion *completion, int cpu)
+{
+ per_cpu(cpu_completion, cpu) = completion;
+ return IPI_COMPLETION;
+}
+
+static void ipi_complete(unsigned int cpu)
+{
+ complete(per_cpu(cpu_completion, cpu));
+}
+
/*
* Main handler for inter-processor interrupts
*/
@@ -565,6 +580,12 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
irq_exit();
break;
+ case IPI_COMPLETION:
+ irq_enter();
+ ipi_complete(cpu);
+ irq_exit();
+ break;
+
default:
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
cpu, ipinr);
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 5bc1a63284e3..1aafa0d785eb 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -28,7 +28,7 @@
*/
unsigned int __init scu_get_core_count(void __iomem *scu_base)
{
- unsigned int ncores = __raw_readl(scu_base + SCU_CONFIG);
+ unsigned int ncores = readl_relaxed(scu_base + SCU_CONFIG);
return (ncores & 0x03) + 1;
}
@@ -42,19 +42,19 @@ void scu_enable(void __iomem *scu_base)
#ifdef CONFIG_ARM_ERRATA_764369
/* Cortex-A9 only */
if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) {
- scu_ctrl = __raw_readl(scu_base + 0x30);
+ scu_ctrl = readl_relaxed(scu_base + 0x30);
if (!(scu_ctrl & 1))
- __raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
+ writel_relaxed(scu_ctrl | 0x1, scu_base + 0x30);
}
#endif
- scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
+ scu_ctrl = readl_relaxed(scu_base + SCU_CTRL);
/* already enabled? */
if (scu_ctrl & 1)
return;
scu_ctrl |= 1;
- __raw_writel(scu_ctrl, scu_base + SCU_CTRL);
+ writel_relaxed(scu_ctrl, scu_base + SCU_CTRL);
/*
* Ensure that the data accessed by CPU0 before the SCU was
@@ -80,9 +80,9 @@ int scu_power_mode(void __iomem *scu_base, unsigned int mode)
if (mode > 3 || mode == 1 || cpu > 3)
return -EINVAL;
- val = __raw_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
+ val = readb_relaxed(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
val |= mode;
- __raw_writeb(val, scu_base + SCU_CPU_STATUS + cpu);
+ writeb_relaxed(val, scu_base + SCU_CPU_STATUS + cpu);
return 0;
}
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 83ccca303df8..95d063620b76 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -70,6 +70,40 @@ static inline void ipi_flush_bp_all(void *ignored)
local_flush_bp_all();
}
+#ifdef CONFIG_ARM_ERRATA_798181
+bool (*erratum_a15_798181_handler)(void);
+
+static bool erratum_a15_798181_partial(void)
+{
+ asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
+ dsb(ish);
+ return false;
+}
+
+static bool erratum_a15_798181_broadcast(void)
+{
+ asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
+ dsb(ish);
+ return true;
+}
+
+void erratum_a15_798181_init(void)
+{
+ unsigned int midr = read_cpuid_id();
+ unsigned int revidr = read_cpuid(CPUID_REVIDR);
+
+ /* Cortex-A15 r0p0..r3p2 w/o ECO fix affected */
+ if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2 ||
+ (revidr & 0x210) == 0x210) {
+ return;
+ }
+ if (revidr & 0x10)
+ erratum_a15_798181_handler = erratum_a15_798181_partial;
+ else
+ erratum_a15_798181_handler = erratum_a15_798181_broadcast;
+}
+#endif
+
static void ipi_flush_tlb_a15_erratum(void *arg)
{
dmb();
@@ -80,7 +114,6 @@ static void broadcast_tlb_a15_erratum(void)
if (!erratum_a15_798181())
return;
- dummy_flush_tlb_a15_erratum();
smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1);
}
@@ -92,7 +125,6 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
if (!erratum_a15_798181())
return;
- dummy_flush_tlb_a15_erratum();
this_cpu = get_cpu();
a15_erratum_get_cpumask(this_cpu, mm, &mask);
smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 2985c9f0905d..6591e26fc13f 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -45,7 +45,7 @@ static void twd_set_mode(enum clock_event_mode mode,
case CLOCK_EVT_MODE_PERIODIC:
ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
| TWD_TIMER_CONTROL_PERIODIC;
- __raw_writel(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
+ writel_relaxed(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
twd_base + TWD_TIMER_LOAD);
break;
case CLOCK_EVT_MODE_ONESHOT:
@@ -58,18 +58,18 @@ static void twd_set_mode(enum clock_event_mode mode,
ctrl = 0;
}
- __raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
+ writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL);
}
static int twd_set_next_event(unsigned long evt,
struct clock_event_device *unused)
{
- unsigned long ctrl = __raw_readl(twd_base + TWD_TIMER_CONTROL);
+ unsigned long ctrl = readl_relaxed(twd_base + TWD_TIMER_CONTROL);
ctrl |= TWD_TIMER_CONTROL_ENABLE;
- __raw_writel(evt, twd_base + TWD_TIMER_COUNTER);
- __raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
+ writel_relaxed(evt, twd_base + TWD_TIMER_COUNTER);
+ writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL);
return 0;
}
@@ -82,8 +82,8 @@ static int twd_set_next_event(unsigned long evt,
*/
static int twd_timer_ack(void)
{
- if (__raw_readl(twd_base + TWD_TIMER_INTSTAT)) {
- __raw_writel(1, twd_base + TWD_TIMER_INTSTAT);
+ if (readl_relaxed(twd_base + TWD_TIMER_INTSTAT)) {
+ writel_relaxed(1, twd_base + TWD_TIMER_INTSTAT);
return 1;
}
@@ -211,15 +211,15 @@ static void twd_calibrate_rate(void)
waitjiffies += 5;
/* enable, no interrupt or reload */
- __raw_writel(0x1, twd_base + TWD_TIMER_CONTROL);
+ writel_relaxed(0x1, twd_base + TWD_TIMER_CONTROL);
/* maximum value */
- __raw_writel(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER);
+ writel_relaxed(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER);
while (get_jiffies_64() < waitjiffies)
udelay(10);
- count = __raw_readl(twd_base + TWD_TIMER_COUNTER);
+ count = readl_relaxed(twd_base + TWD_TIMER_COUNTER);
twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
@@ -277,7 +277,7 @@ static void twd_timer_setup(void)
* bother with the below.
*/
if (per_cpu(percpu_setup_called, cpu)) {
- __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
+ writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
clockevents_register_device(clk);
enable_percpu_irq(clk->irq, 0);
return;
@@ -290,7 +290,7 @@ static void twd_timer_setup(void)
* The following is done once per CPU the first time .setup() is
* called.
*/
- __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
+ writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
clk->name = "local_timer";
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 41cf3cbf756d..2835d35234ca 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -10,7 +10,7 @@
#include <asm/suspend.h>
#include <asm/tlbflush.h>
-extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
+extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid);
extern void cpu_resume_mmu(void);
#ifdef CONFIG_MMU
@@ -21,6 +21,7 @@ extern void cpu_resume_mmu(void);
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
struct mm_struct *mm = current->active_mm;
+ u32 __mpidr = cpu_logical_map(smp_processor_id());
int ret;
if (!idmap_pgd)
@@ -32,7 +33,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
* resume (indicated by a zero return code), we need to switch
* back to the correct page tables.
*/
- ret = __cpu_suspend(arg, fn);
+ ret = __cpu_suspend(arg, fn, __mpidr);
if (ret == 0) {
cpu_switch_mm(mm->pgd, mm);
local_flush_bp_all();
@@ -44,7 +45,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
#else
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
- return __cpu_suspend(arg, fn);
+ u32 __mpidr = cpu_logical_map(smp_processor_id());
+ return __cpu_suspend(arg, fn, __mpidr);
}
#define idmap_pgd NULL
#endif
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 98aee3258398..829a96d4a179 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -11,25 +11,26 @@
* This file contains the ARM-specific time handling details:
* reading the RTC at bootup, etc...
*/
+#include <linux/clk-provider.h>
+#include <linux/clocksource.h>
+#include <linux/errno.h>
#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/time.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/profile.h>
#include <linux/sched.h>
+#include <linux/sched_clock.h>
#include <linux/smp.h>
+#include <linux/time.h>
#include <linux/timex.h>
-#include <linux/errno.h>
-#include <linux/profile.h>
#include <linux/timer.h>
-#include <linux/clocksource.h>
-#include <linux/irq.h>
-#include <linux/sched_clock.h>
-#include <asm/thread_info.h>
-#include <asm/stacktrace.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
+#include <asm/stacktrace.h>
+#include <asm/thread_info.h>
#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || \
defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE)
@@ -116,8 +117,12 @@ int __init register_persistent_clock(clock_access_fn read_boot,
void __init time_init(void)
{
- if (machine_desc->init_time)
+ if (machine_desc->init_time) {
machine_desc->init_time();
- else
+ } else {
+#ifdef CONFIG_COMMON_CLK
+ of_clk_init(NULL);
+#endif
clocksource_of_init();
+ }
}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 8fcda140358d..6125f259b7b5 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -34,6 +34,7 @@
#include <asm/unwind.h>
#include <asm/tls.h>
#include <asm/system_misc.h>
+#include <asm/opcodes.h>
static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
@@ -341,15 +342,17 @@ void arm_notify_die(const char *str, struct pt_regs *regs,
int is_valid_bugaddr(unsigned long pc)
{
#ifdef CONFIG_THUMB2_KERNEL
- unsigned short bkpt;
+ u16 bkpt;
+ u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
#else
- unsigned long bkpt;
+ u32 bkpt;
+ u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
#endif
if (probe_kernel_address((unsigned *)pc, bkpt))
return 0;
- return bkpt == BUG_INSTR_VALUE;
+ return bkpt == insn;
}
#endif
@@ -402,25 +405,28 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
if (processor_mode(regs) == SVC_MODE) {
#ifdef CONFIG_THUMB2_KERNEL
if (thumb_mode(regs)) {
- instr = ((u16 *)pc)[0];
+ instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]);
if (is_wide_instruction(instr)) {
- instr <<= 16;
- instr |= ((u16 *)pc)[1];
+ u16 inst2;
+ inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]);
+ instr = __opcode_thumb32_compose(instr, inst2);
}
} else
#endif
- instr = *(u32 *) pc;
+ instr = __mem_to_opcode_arm(*(u32 *) pc);
} else if (thumb_mode(regs)) {
if (get_user(instr, (u16 __user *)pc))
goto die_sig;
+ instr = __mem_to_opcode_thumb16(instr);
if (is_wide_instruction(instr)) {
unsigned int instr2;
if (get_user(instr2, (u16 __user *)pc+1))
goto die_sig;
- instr <<= 16;
- instr |= instr2;
+ instr2 = __mem_to_opcode_thumb16(instr2);
+ instr = __opcode_thumb32_compose(instr, instr2);
}
} else if (get_user(instr, (u32 __user *)pc)) {
+ instr = __mem_to_opcode_arm(instr);
goto die_sig;
}
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index ebf5015508b5..466bd299b1a8 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -20,6 +20,7 @@ config KVM
bool "Kernel-based Virtual Machine (KVM) support"
select PREEMPT_NOTIFIERS
select ANON_INODES
+ select HAVE_KVM_CPU_RELAX_INTERCEPT
select KVM_MMIO
select KVM_ARM_HOST
depends on ARM_VIRT_EXT && ARM_LPAE
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index d99bee4950e5..789bca9e64a7 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -19,6 +19,6 @@ kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o
obj-y += kvm-arm.o init.o interrupts.o
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
-obj-y += coproc.o coproc_a15.o mmio.o psci.o perf.o
+obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 9c697db2787e..2a700e00528d 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -65,7 +65,7 @@ static bool vgic_present;
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
{
BUG_ON(preemptible());
- __get_cpu_var(kvm_arm_running_vcpu) = vcpu;
+ __this_cpu_write(kvm_arm_running_vcpu, vcpu);
}
/**
@@ -75,7 +75,7 @@ static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
{
BUG_ON(preemptible());
- return __get_cpu_var(kvm_arm_running_vcpu);
+ return __this_cpu_read(kvm_arm_running_vcpu);
}
/**
@@ -152,12 +152,13 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
}
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+ unsigned long npages)
{
return 0;
}
@@ -797,6 +798,19 @@ long kvm_arch_vm_ioctl(struct file *filp,
return -EFAULT;
return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
}
+ case KVM_ARM_PREFERRED_TARGET: {
+ int err;
+ struct kvm_vcpu_init init;
+
+ err = kvm_vcpu_preferred_target(&init);
+ if (err)
+ return err;
+
+ if (copy_to_user(argp, &init, sizeof(init)))
+ return -EFAULT;
+
+ return 0;
+ }
default:
return -EINVAL;
}
@@ -815,7 +829,7 @@ static void cpu_init_hyp_mode(void *dummy)
boot_pgd_ptr = kvm_mmu_get_boot_httbr();
pgd_ptr = kvm_mmu_get_httbr();
- stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
+ stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
hyp_stack_ptr = stack_page + PAGE_SIZE;
vector_ptr = (unsigned long)__kvm_hyp_vector;
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index db9cf692d4dd..78c0885d6501 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -71,6 +71,98 @@ int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
+static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+{
+ /*
+ * Compute guest MPIDR. We build a virtual cluster out of the
+ * vcpu_id, but we read the 'U' bit from the underlying
+ * hardware directly.
+ */
+ vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
+ ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
+ (vcpu->vcpu_id & 3));
+}
+
+/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */
+static bool access_actlr(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r)
+{
+ if (p->is_write)
+ return ignore_write(vcpu, p);
+
+ *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR];
+ return true;
+}
+
+/* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */
+static bool access_cbar(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r)
+{
+ if (p->is_write)
+ return write_to_read_only(vcpu, p);
+ return read_zero(vcpu, p);
+}
+
+/* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */
+static bool access_l2ctlr(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r)
+{
+ if (p->is_write)
+ return ignore_write(vcpu, p);
+
+ *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR];
+ return true;
+}
+
+static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+{
+ u32 l2ctlr, ncores;
+
+ asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
+ l2ctlr &= ~(3 << 24);
+ ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
+ /* How many cores in the current cluster and the next ones */
+ ncores -= (vcpu->vcpu_id & ~3);
+ /* Cap it to the maximum number of cores in a single cluster */
+ ncores = min(ncores, 3U);
+ l2ctlr |= (ncores & 3) << 24;
+
+ vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
+}
+
+static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+{
+ u32 actlr;
+
+ /* ACTLR contains SMP bit: make sure you create all cpus first! */
+ asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
+ /* Make the SMP bit consistent with the guest configuration */
+ if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
+ actlr |= 1U << 6;
+ else
+ actlr &= ~(1U << 6);
+
+ vcpu->arch.cp15[c1_ACTLR] = actlr;
+}
+
+/*
+ * TRM entries: A7:4.3.50, A15:4.3.49
+ * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored).
+ */
+static bool access_l2ectlr(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r)
+{
+ if (p->is_write)
+ return ignore_write(vcpu, p);
+
+ *vcpu_reg(vcpu, p->Rt1) = 0;
+ return true;
+}
+
/* See note at ARM ARM B1.14.4 */
static bool access_dcsw(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
@@ -153,10 +245,22 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
* registers preceding 32-bit ones.
*/
static const struct coproc_reg cp15_regs[] = {
+ /* MPIDR: we use VMPIDR for guest access. */
+ { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
+ NULL, reset_mpidr, c0_MPIDR },
+
/* CSSELR: swapped by interrupt.S. */
{ CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
NULL, reset_unknown, c0_CSSELR },
+ /* ACTLR: trapped by HCR.TAC bit. */
+ { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
+ access_actlr, reset_actlr, c1_ACTLR },
+
+ /* CPACR: swapped by interrupt.S. */
+ { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
+ NULL, reset_val, c1_CPACR, 0x00000000 },
+
/* TTBR0/TTBR1: swapped by interrupt.S. */
{ CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
{ CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
@@ -195,6 +299,13 @@ static const struct coproc_reg cp15_regs[] = {
{ CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
{ CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
/*
+ * L2CTLR access (guest wants to know #CPUs).
+ */
+ { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
+ access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
+ { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
+
+ /*
* Dummy performance monitor implementation.
*/
{ CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
@@ -234,6 +345,9 @@ static const struct coproc_reg cp15_regs[] = {
/* CNTKCTL: swapped by interrupt.S. */
{ CRn(14), CRm( 1), Op1( 0), Op2( 0), is32,
NULL, reset_val, c14_CNTKCTL, 0x00000000 },
+
+ /* The Configuration Base Address Register. */
+ { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
};
/* Target specific emulation tables */
@@ -241,6 +355,12 @@ static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
{
+ unsigned int i;
+
+ for (i = 1; i < table->num; i++)
+ BUG_ON(cmp_reg(&table->table[i-1],
+ &table->table[i]) >= 0);
+
target_tables[table->target] = table;
}
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
index cf93472b9dd6..bb0cac1410cc 100644
--- a/arch/arm/kvm/coproc_a15.c
+++ b/arch/arm/kvm/coproc_a15.c
@@ -17,101 +17,12 @@
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kvm_host.h>
-#include <asm/cputype.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_host.h>
-#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
+#include <asm/kvm_emulate.h>
#include <linux/init.h>
-static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
-{
- /*
- * Compute guest MPIDR:
- * (Even if we present only one VCPU to the guest on an SMP
- * host we don't set the U bit in the MPIDR, or vice versa, as
- * revealing the underlying hardware properties is likely to
- * be the best choice).
- */
- vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & ~MPIDR_LEVEL_MASK)
- | (vcpu->vcpu_id & MPIDR_LEVEL_MASK);
-}
-
#include "coproc.h"
-/* A15 TRM 4.3.28: RO WI */
-static bool access_actlr(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- if (p->is_write)
- return ignore_write(vcpu, p);
-
- *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR];
- return true;
-}
-
-/* A15 TRM 4.3.60: R/O. */
-static bool access_cbar(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- if (p->is_write)
- return write_to_read_only(vcpu, p);
- return read_zero(vcpu, p);
-}
-
-/* A15 TRM 4.3.48: R/O WI. */
-static bool access_l2ctlr(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- if (p->is_write)
- return ignore_write(vcpu, p);
-
- *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR];
- return true;
-}
-
-static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
-{
- u32 l2ctlr, ncores;
-
- asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
- l2ctlr &= ~(3 << 24);
- ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
- l2ctlr |= (ncores & 3) << 24;
-
- vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
-}
-
-static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
-{
- u32 actlr;
-
- /* ACTLR contains SMP bit: make sure you create all cpus first! */
- asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
- /* Make the SMP bit consistent with the guest configuration */
- if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
- actlr |= 1U << 6;
- else
- actlr &= ~(1U << 6);
-
- vcpu->arch.cp15[c1_ACTLR] = actlr;
-}
-
-/* A15 TRM 4.3.49: R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). */
-static bool access_l2ectlr(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- if (p->is_write)
- return ignore_write(vcpu, p);
-
- *vcpu_reg(vcpu, p->Rt1) = 0;
- return true;
-}
-
/*
* A15-specific CP15 registers.
* CRn denotes the primary register number, but is copied to the CRm in the
@@ -121,29 +32,9 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
* registers preceding 32-bit ones.
*/
static const struct coproc_reg a15_regs[] = {
- /* MPIDR: we use VMPIDR for guest access. */
- { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
- NULL, reset_mpidr, c0_MPIDR },
-
/* SCTLR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
NULL, reset_val, c1_SCTLR, 0x00C50078 },
- /* ACTLR: trapped by HCR.TAC bit. */
- { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
- access_actlr, reset_actlr, c1_ACTLR },
- /* CPACR: swapped by interrupt.S. */
- { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
- NULL, reset_val, c1_CPACR, 0x00000000 },
-
- /*
- * L2CTLR access (guest wants to know #CPUs).
- */
- { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
- access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
- { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
-
- /* The Configuration Base Address Register. */
- { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
};
static struct kvm_coproc_target_table a15_target_table = {
@@ -154,12 +45,6 @@ static struct kvm_coproc_target_table a15_target_table = {
static int __init coproc_a15_init(void)
{
- unsigned int i;
-
- for (i = 1; i < ARRAY_SIZE(a15_regs); i++)
- BUG_ON(cmp_reg(&a15_regs[i-1],
- &a15_regs[i]) >= 0);
-
kvm_register_target_coproc_table(&a15_target_table);
return 0;
}
diff --git a/arch/arm/kvm/coproc_a7.c b/arch/arm/kvm/coproc_a7.c
new file mode 100644
index 000000000000..1df767331588
--- /dev/null
+++ b/arch/arm/kvm/coproc_a7.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Copyright (C) 2013 - ARM Ltd
+ *
+ * Authors: Rusty Russell <rusty@rustcorp.au>
+ * Christoffer Dall <c.dall@virtualopensystems.com>
+ * Jonathan Austin <jonathan.austin@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/kvm_host.h>
+#include <asm/kvm_coproc.h>
+#include <asm/kvm_emulate.h>
+#include <linux/init.h>
+
+#include "coproc.h"
+
+/*
+ * Cortex-A7 specific CP15 registers.
+ * CRn denotes the primary register number, but is copied to the CRm in the
+ * user space API for 64-bit register access in line with the terminology used
+ * in the ARM ARM.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
+ * registers preceding 32-bit ones.
+ */
+static const struct coproc_reg a7_regs[] = {
+ /* SCTLR: swapped by interrupt.S. */
+ { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
+ NULL, reset_val, c1_SCTLR, 0x00C50878 },
+};
+
+static struct kvm_coproc_target_table a7_target_table = {
+ .target = KVM_ARM_TARGET_CORTEX_A7,
+ .table = a7_regs,
+ .num = ARRAY_SIZE(a7_regs),
+};
+
+static int __init coproc_a7_init(void)
+{
+ kvm_register_target_coproc_table(&a7_target_table);
+ return 0;
+}
+late_initcall(coproc_a7_init);
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index bdede9e7da51..d6c005283678 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -354,7 +354,7 @@ static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
if (is_pabt) {
- /* Set DFAR and DFSR */
+ /* Set IFAR and IFSR */
vcpu->arch.cp15[c6_IFAR] = addr;
is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
/* Always give debug fault for now - should give guest a clue */
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 152d03612181..20f8d97904af 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -190,6 +190,8 @@ int __attribute_const__ kvm_target_cpu(void)
return -EINVAL;
switch (part_number) {
+ case ARM_CPU_PART_CORTEX_A7:
+ return KVM_ARM_TARGET_CORTEX_A7;
case ARM_CPU_PART_CORTEX_A15:
return KVM_ARM_TARGET_CORTEX_A15;
default:
@@ -202,7 +204,7 @@ int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
{
unsigned int i;
- /* We can only do a cortex A15 for now. */
+ /* We can only cope with guest==host and only on A15/A7 (for now). */
if (init->target != kvm_target_cpu())
return -EINVAL;
@@ -222,6 +224,26 @@ int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
return kvm_reset_vcpu(vcpu);
}
+int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
+{
+ int target = kvm_target_cpu();
+
+ if (target < 0)
+ return -ENODEV;
+
+ memset(init, 0, sizeof(*init));
+
+ /*
+ * For now, we don't return any features.
+ * In future, we might use features to return target
+ * specific features available for the preferred
+ * target type.
+ */
+ init->target = (__u32)target;
+
+ return 0;
+}
+
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -EINVAL;
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index df4c82d47ad7..a92079011a83 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -73,23 +73,29 @@ static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
/**
- * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
+ * kvm_handle_wfx - handle a WFI or WFE instructions trapped in guests
* @vcpu: the vcpu pointer
* @run: the kvm_run structure pointer
*
- * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
- * halt execution of world-switches and schedule other host processes until
- * there is an incoming IRQ or FIQ to the VM.
+ * WFE: Yield the CPU and come back to this vcpu when the scheduler
+ * decides to.
+ * WFI: Simply call kvm_vcpu_block(), which will halt execution of
+ * world-switches and schedule other host processes until there is an
+ * incoming IRQ or FIQ to the VM.
*/
-static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
trace_kvm_wfi(*vcpu_pc(vcpu));
- kvm_vcpu_block(vcpu);
+ if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE)
+ kvm_vcpu_on_spin(vcpu);
+ else
+ kvm_vcpu_block(vcpu);
+
return 1;
}
static exit_handle_fn arm_exit_handlers[] = {
- [HSR_EC_WFI] = kvm_handle_wfi,
+ [HSR_EC_WFI] = kvm_handle_wfx,
[HSR_EC_CP15_32] = kvm_handle_cp15_32,
[HSR_EC_CP15_64] = kvm_handle_cp15_64,
[HSR_EC_CP14_MR] = kvm_handle_cp14_access,
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index b0de86b56c13..371958370de4 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -19,6 +19,7 @@
#include <linux/mman.h>
#include <linux/kvm_host.h>
#include <linux/io.h>
+#include <linux/hugetlb.h>
#include <trace/events/kvm.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
@@ -41,6 +42,8 @@ static unsigned long hyp_idmap_start;
static unsigned long hyp_idmap_end;
static phys_addr_t hyp_idmap_vector;
+#define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x))
+
static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
/*
@@ -93,19 +96,29 @@ static bool page_empty(void *ptr)
static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
{
- pmd_t *pmd_table = pmd_offset(pud, 0);
- pud_clear(pud);
- kvm_tlb_flush_vmid_ipa(kvm, addr);
- pmd_free(NULL, pmd_table);
+ if (pud_huge(*pud)) {
+ pud_clear(pud);
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
+ } else {
+ pmd_t *pmd_table = pmd_offset(pud, 0);
+ pud_clear(pud);
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
+ pmd_free(NULL, pmd_table);
+ }
put_page(virt_to_page(pud));
}
static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
{
- pte_t *pte_table = pte_offset_kernel(pmd, 0);
- pmd_clear(pmd);
- kvm_tlb_flush_vmid_ipa(kvm, addr);
- pte_free_kernel(NULL, pte_table);
+ if (kvm_pmd_huge(*pmd)) {
+ pmd_clear(pmd);
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
+ } else {
+ pte_t *pte_table = pte_offset_kernel(pmd, 0);
+ pmd_clear(pmd);
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
+ pte_free_kernel(NULL, pte_table);
+ }
put_page(virt_to_page(pmd));
}
@@ -136,18 +149,32 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
continue;
}
+ if (pud_huge(*pud)) {
+ /*
+ * If we are dealing with a huge pud, just clear it and
+ * move on.
+ */
+ clear_pud_entry(kvm, pud, addr);
+ addr = pud_addr_end(addr, end);
+ continue;
+ }
+
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
addr = pmd_addr_end(addr, end);
continue;
}
- pte = pte_offset_kernel(pmd, addr);
- clear_pte_entry(kvm, pte, addr);
- next = addr + PAGE_SIZE;
+ if (!kvm_pmd_huge(*pmd)) {
+ pte = pte_offset_kernel(pmd, addr);
+ clear_pte_entry(kvm, pte, addr);
+ next = addr + PAGE_SIZE;
+ }
- /* If we emptied the pte, walk back up the ladder */
- if (page_empty(pte)) {
+ /*
+ * If the pmd entry is to be cleared, walk back up the ladder
+ */
+ if (kvm_pmd_huge(*pmd) || page_empty(pte)) {
clear_pmd_entry(kvm, pmd, addr);
next = pmd_addr_end(addr, end);
if (page_empty(pmd) && !page_empty(pud)) {
@@ -420,29 +447,71 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
kvm->arch.pgd = NULL;
}
-
-static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
- phys_addr_t addr, const pte_t *new_pte, bool iomap)
+static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+ phys_addr_t addr)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
- pte_t *pte, old_pte;
- /* Create 2nd stage page table mapping - Level 1 */
pgd = kvm->arch.pgd + pgd_index(addr);
pud = pud_offset(pgd, addr);
if (pud_none(*pud)) {
if (!cache)
- return 0; /* ignore calls from kvm_set_spte_hva */
+ return NULL;
pmd = mmu_memory_cache_alloc(cache);
pud_populate(NULL, pud, pmd);
get_page(virt_to_page(pud));
}
- pmd = pmd_offset(pud, addr);
+ return pmd_offset(pud, addr);
+}
+
+static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
+ *cache, phys_addr_t addr, const pmd_t *new_pmd)
+{
+ pmd_t *pmd, old_pmd;
+
+ pmd = stage2_get_pmd(kvm, cache, addr);
+ VM_BUG_ON(!pmd);
+
+ /*
+ * Mapping in huge pages should only happen through a fault. If a
+ * page is merged into a transparent huge page, the individual
+ * subpages of that huge page should be unmapped through MMU
+ * notifiers before we get here.
+ *
+ * Merging of CompoundPages is not supported; they should become
+ * splitting first, unmapped, merged, and mapped back in on-demand.
+ */
+ VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
+
+ old_pmd = *pmd;
+ kvm_set_pmd(pmd, *new_pmd);
+ if (pmd_present(old_pmd))
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
+ else
+ get_page(virt_to_page(pmd));
+ return 0;
+}
+
+static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+ phys_addr_t addr, const pte_t *new_pte, bool iomap)
+{
+ pmd_t *pmd;
+ pte_t *pte, old_pte;
- /* Create 2nd stage page table mapping - Level 2 */
+ /* Create stage-2 page table mapping - Level 1 */
+ pmd = stage2_get_pmd(kvm, cache, addr);
+ if (!pmd) {
+ /*
+ * Ignore calls from kvm_set_spte_hva for unallocated
+ * address ranges.
+ */
+ return 0;
+ }
+
+ /* Create stage-2 page mappings - Level 2 */
if (pmd_none(*pmd)) {
if (!cache)
return 0; /* ignore calls from kvm_set_spte_hva */
@@ -507,16 +576,60 @@ out:
return ret;
}
+static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
+{
+ pfn_t pfn = *pfnp;
+ gfn_t gfn = *ipap >> PAGE_SHIFT;
+
+ if (PageTransCompound(pfn_to_page(pfn))) {
+ unsigned long mask;
+ /*
+ * The address we faulted on is backed by a transparent huge
+ * page. However, because we map the compound huge page and
+ * not the individual tail page, we need to transfer the
+ * refcount to the head page. We have to be careful that the
+ * THP doesn't start to split while we are adjusting the
+ * refcounts.
+ *
+ * We are sure this doesn't happen, because mmu_notifier_retry
+ * was successful and we are holding the mmu_lock, so if this
+ * THP is trying to split, it will be blocked in the mmu
+ * notifier before touching any of the pages, specifically
+ * before being able to call __split_huge_page_refcount().
+ *
+ * We can therefore safely transfer the refcount from PG_tail
+ * to PG_head and switch the pfn from a tail page to the head
+ * page accordingly.
+ */
+ mask = PTRS_PER_PMD - 1;
+ VM_BUG_ON((gfn & mask) != (pfn & mask));
+ if (pfn & mask) {
+ *ipap &= PMD_MASK;
+ kvm_release_pfn_clean(pfn);
+ pfn &= ~mask;
+ kvm_get_pfn(pfn);
+ *pfnp = pfn;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
- gfn_t gfn, struct kvm_memory_slot *memslot,
+ struct kvm_memory_slot *memslot,
unsigned long fault_status)
{
- pte_t new_pte;
- pfn_t pfn;
int ret;
- bool write_fault, writable;
+ bool write_fault, writable, hugetlb = false, force_pte = false;
unsigned long mmu_seq;
+ gfn_t gfn = fault_ipa >> PAGE_SHIFT;
+ unsigned long hva = gfn_to_hva(vcpu->kvm, gfn);
+ struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
+ struct vm_area_struct *vma;
+ pfn_t pfn;
write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
if (fault_status == FSC_PERM && !write_fault) {
@@ -524,6 +637,26 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
}
+ /* Let's check if we will get back a huge page backed by hugetlbfs */
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma_intersection(current->mm, hva, hva + 1);
+ if (is_vm_hugetlb_page(vma)) {
+ hugetlb = true;
+ gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
+ } else {
+ /*
+ * Pages belonging to VMAs not aligned to the PMD mapping
+ * granularity cannot be mapped using block descriptors even
+ * if the pages belong to a THP for the process, because the
+ * stage-2 block descriptor will cover more than a single THP
+ * and we loose atomicity for unmapping, updates, and splits
+ * of the THP or other pages in the stage-2 block range.
+ */
+ if (vma->vm_start & ~PMD_MASK)
+ force_pte = true;
+ }
+ up_read(&current->mm->mmap_sem);
+
/* We need minimum second+third level pages */
ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
if (ret)
@@ -541,26 +674,40 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
*/
smp_rmb();
- pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable);
+ pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
if (is_error_pfn(pfn))
return -EFAULT;
- new_pte = pfn_pte(pfn, PAGE_S2);
- coherent_icache_guest_page(vcpu->kvm, gfn);
-
- spin_lock(&vcpu->kvm->mmu_lock);
- if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
+ spin_lock(&kvm->mmu_lock);
+ if (mmu_notifier_retry(kvm, mmu_seq))
goto out_unlock;
- if (writable) {
- kvm_set_s2pte_writable(&new_pte);
- kvm_set_pfn_dirty(pfn);
+ if (!hugetlb && !force_pte)
+ hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
+
+ if (hugetlb) {
+ pmd_t new_pmd = pfn_pmd(pfn, PAGE_S2);
+ new_pmd = pmd_mkhuge(new_pmd);
+ if (writable) {
+ kvm_set_s2pmd_writable(&new_pmd);
+ kvm_set_pfn_dirty(pfn);
+ }
+ coherent_icache_guest_page(kvm, hva & PMD_MASK, PMD_SIZE);
+ ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
+ } else {
+ pte_t new_pte = pfn_pte(pfn, PAGE_S2);
+ if (writable) {
+ kvm_set_s2pte_writable(&new_pte);
+ kvm_set_pfn_dirty(pfn);
+ }
+ coherent_icache_guest_page(kvm, hva, PAGE_SIZE);
+ ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false);
}
- stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
+
out_unlock:
- spin_unlock(&vcpu->kvm->mmu_lock);
+ spin_unlock(&kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
- return 0;
+ return ret;
}
/**
@@ -629,7 +776,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
memslot = gfn_to_memslot(vcpu->kvm, gfn);
- ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
+ ret = user_mem_abort(vcpu, fault_ipa, memslot, fault_status);
if (ret == 0)
ret = 1;
out_unlock:
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 86a693a02ba3..311263124acf 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -18,6 +18,7 @@
#include <linux/kvm_host.h>
#include <linux/wait.h>
+#include <asm/cputype.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_psci.h>
@@ -34,22 +35,30 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
{
struct kvm *kvm = source_vcpu->kvm;
- struct kvm_vcpu *vcpu;
+ struct kvm_vcpu *vcpu = NULL, *tmp;
wait_queue_head_t *wq;
unsigned long cpu_id;
+ unsigned long mpidr;
phys_addr_t target_pc;
+ int i;
cpu_id = *vcpu_reg(source_vcpu, 1);
if (vcpu_mode_is_32bit(source_vcpu))
cpu_id &= ~((u32) 0);
- if (cpu_id >= atomic_read(&kvm->online_vcpus))
+ kvm_for_each_vcpu(i, tmp, kvm) {
+ mpidr = kvm_vcpu_get_mpidr(tmp);
+ if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) {
+ vcpu = tmp;
+ break;
+ }
+ }
+
+ if (!vcpu)
return KVM_PSCI_RET_INVAL;
target_pc = *vcpu_reg(source_vcpu, 2);
- vcpu = kvm_get_vcpu(kvm, cpu_id);
-
wq = kvm_arch_vcpu_wq(vcpu);
if (!waitqueue_active(wq))
return KVM_PSCI_RET_INVAL;
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index c02ba4af599f..f558c073c023 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -30,16 +30,14 @@
#include <kvm/arm_arch_timer.h>
/******************************************************************************
- * Cortex-A15 Reset Values
+ * Cortex-A15 and Cortex-A7 Reset Values
*/
-static const int a15_max_cpu_idx = 3;
-
-static struct kvm_regs a15_regs_reset = {
+static struct kvm_regs cortexa_regs_reset = {
.usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
};
-static const struct kvm_irq_level a15_vtimer_irq = {
+static const struct kvm_irq_level cortexa_vtimer_irq = {
{ .irq = 27 },
.level = 1,
};
@@ -62,12 +60,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
const struct kvm_irq_level *cpu_vtimer_irq;
switch (vcpu->arch.target) {
+ case KVM_ARM_TARGET_CORTEX_A7:
case KVM_ARM_TARGET_CORTEX_A15:
- if (vcpu->vcpu_id > a15_max_cpu_idx)
- return -EINVAL;
- reset_regs = &a15_regs_reset;
+ reset_regs = &cortexa_regs_reset;
vcpu->arch.midr = read_cpuid_id();
- cpu_vtimer_irq = &a15_vtimer_irq;
+ cpu_vtimer_irq = &cortexa_vtimer_irq;
break;
default:
return -ENODEV;
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index bd454b09133e..47d7338561de 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -41,7 +41,6 @@ else
endif
lib-$(CONFIG_ARCH_RPC) += ecard.o io-acorn.o floppydma.o
-lib-$(CONFIG_ARCH_SHARK) += io-shark.o
$(obj)/csumpartialcopy.o: $(obj)/csumpartialcopygeneric.S
$(obj)/csumpartialcopyuser.o: $(obj)/csumpartialcopygeneric.S
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index d6408d1ee543..e0c68d5bb7dc 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -10,6 +10,11 @@ UNWIND( .fnstart )
and r3, r0, #31 @ Get bit offset
mov r0, r0, lsr #5
add r1, r1, r0, lsl #2 @ Get word offset
+#if __LINUX_ARM_ARCH__ >= 7
+ .arch_extension mp
+ ALT_SMP(W(pldw) [r1])
+ ALT_UP(W(nop))
+#endif
mov r3, r2, lsl r3
1: ldrex r2, [r1]
\instr r2, r2, r3
diff --git a/arch/arm/lib/io-shark.c b/arch/arm/lib/io-shark.c
deleted file mode 100644
index 824253948f51..000000000000
--- a/arch/arm/lib/io-shark.c
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * linux/arch/arm/lib/io-shark.c
- *
- * by Alexander Schulz
- *
- * derived from:
- * linux/arch/arm/lib/io-ebsa.S
- * Copyright (C) 1995, 1996 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 025f742dd4df..3e58d710013c 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -18,6 +18,7 @@
#include <linux/hardirq.h> /* for in_atomic() */
#include <linux/gfp.h>
#include <linux/highmem.h>
+#include <linux/hugetlb.h>
#include <asm/current.h>
#include <asm/page.h>
@@ -40,7 +41,35 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
return 0;
pmd = pmd_offset(pud, addr);
- if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd)))
+ if (unlikely(pmd_none(*pmd)))
+ return 0;
+
+ /*
+ * A pmd can be bad if it refers to a HugeTLB or THP page.
+ *
+ * Both THP and HugeTLB pages have the same pmd layout
+ * and should not be manipulated by the pte functions.
+ *
+ * Lock the page table for the destination and check
+ * to see that it's still huge and whether or not we will
+ * need to fault on write, or if we have a splitting THP.
+ */
+ if (unlikely(pmd_thp_or_huge(*pmd))) {
+ ptl = &current->mm->page_table_lock;
+ spin_lock(ptl);
+ if (unlikely(!pmd_thp_or_huge(*pmd)
+ || pmd_hugewillfault(*pmd)
+ || pmd_trans_splitting(*pmd))) {
+ spin_unlock(ptl);
+ return 0;
+ }
+
+ *ptep = NULL;
+ *ptlp = ptl;
+ return 1;
+ }
+
+ if (unlikely(pmd_bad(*pmd)))
return 0;
pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
@@ -94,7 +123,10 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
from += tocopy;
n -= tocopy;
- pte_unmap_unlock(pte, ptl);
+ if (pte)
+ pte_unmap_unlock(pte, ptl);
+ else
+ spin_unlock(ptl);
}
if (!atomic)
up_read(&current->mm->mmap_sem);
@@ -147,7 +179,10 @@ __clear_user_memset(void __user *addr, unsigned long n)
addr += tocopy;
n -= tocopy;
- pte_unmap_unlock(pte, ptl);
+ if (pte)
+ pte_unmap_unlock(pte, ptl);
+ else
+ spin_unlock(ptl);
}
up_read(&current->mm->mmap_sem);
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index 3b0a9538093c..c1b737097c95 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -98,7 +98,6 @@ obj-y += leds.o
# Power Management
obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_AT91_SLOW_CLOCK) += pm_slowclock.o
-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
ifeq ($(CONFIG_PM_DEBUG),y)
CFLAGS_pm.o += -DDEBUG
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c
index 4aad93d54d6f..25805f2f6010 100644
--- a/arch/arm/mach-at91/at91rm9200.c
+++ b/arch/arm/mach-at91/at91rm9200.c
@@ -27,6 +27,7 @@
#include "generic.h"
#include "clock.h"
#include "sam9_smc.h"
+#include "pm.h"
/* --------------------------------------------------------------------
* Clocks
@@ -327,6 +328,7 @@ static void __init at91rm9200_ioremap_registers(void)
{
at91rm9200_ioremap_st(AT91RM9200_BASE_ST);
at91_ioremap_ramc(0, AT91RM9200_BASE_MC, 256);
+ at91_pm_set_standby(at91rm9200_standby);
}
static void __init at91rm9200_initialize(void)
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index 5de6074b4f4f..f8629a3fa245 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -28,6 +28,7 @@
#include "generic.h"
#include "clock.h"
#include "sam9_smc.h"
+#include "pm.h"
/* --------------------------------------------------------------------
* Clocks
@@ -342,6 +343,7 @@ static void __init at91sam9260_ioremap_registers(void)
at91sam926x_ioremap_pit(AT91SAM9260_BASE_PIT);
at91sam9_ioremap_smc(0, AT91SAM9260_BASE_SMC);
at91_ioremap_matrix(AT91SAM9260_BASE_MATRIX);
+ at91_pm_set_standby(at91sam9_sdram_standby);
}
static void __init at91sam9260_initialize(void)
diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c
index 0e0793241ab7..1f3867a17a28 100644
--- a/arch/arm/mach-at91/at91sam9261.c
+++ b/arch/arm/mach-at91/at91sam9261.c
@@ -27,6 +27,7 @@
#include "generic.h"
#include "clock.h"
#include "sam9_smc.h"
+#include "pm.h"
/* --------------------------------------------------------------------
* Clocks
@@ -284,6 +285,7 @@ static void __init at91sam9261_ioremap_registers(void)
at91sam926x_ioremap_pit(AT91SAM9261_BASE_PIT);
at91sam9_ioremap_smc(0, AT91SAM9261_BASE_SMC);
at91_ioremap_matrix(AT91SAM9261_BASE_MATRIX);
+ at91_pm_set_standby(at91sam9_sdram_standby);
}
static void __init at91sam9261_initialize(void)
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index 629ea5fc95cf..b2a34740146a 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -465,7 +465,7 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
#if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE)
static u64 lcdc_dmamask = DMA_BIT_MASK(32);
-static struct atmel_lcdfb_info lcdc_data;
+static struct atmel_lcdfb_pdata lcdc_data;
static struct resource lcdc_resources[] = {
[0] = {
@@ -498,7 +498,7 @@ static struct platform_device at91_lcdc_device = {
.num_resources = ARRAY_SIZE(lcdc_resources),
};
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data)
{
if (!data) {
return;
@@ -559,7 +559,7 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
platform_device_register(&at91_lcdc_device);
}
#else
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {}
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) {}
#endif
diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c
index 6ce7d1850893..90d455d294a1 100644
--- a/arch/arm/mach-at91/at91sam9263.c
+++ b/arch/arm/mach-at91/at91sam9263.c
@@ -26,6 +26,7 @@
#include "generic.h"
#include "clock.h"
#include "sam9_smc.h"
+#include "pm.h"
/* --------------------------------------------------------------------
* Clocks
@@ -321,6 +322,7 @@ static void __init at91sam9263_ioremap_registers(void)
at91sam9_ioremap_smc(0, AT91SAM9263_BASE_SMC0);
at91sam9_ioremap_smc(1, AT91SAM9263_BASE_SMC1);
at91_ioremap_matrix(AT91SAM9263_BASE_MATRIX);
+ at91_pm_set_standby(at91sam9_sdram_standby);
}
static void __init at91sam9263_initialize(void)
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index 858c8aac2daf..4aeadddbc181 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -832,7 +832,7 @@ void __init at91_add_device_can(struct at91_can_data *data) {}
#if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE)
static u64 lcdc_dmamask = DMA_BIT_MASK(32);
-static struct atmel_lcdfb_info lcdc_data;
+static struct atmel_lcdfb_pdata lcdc_data;
static struct resource lcdc_resources[] = {
[0] = {
@@ -859,7 +859,7 @@ static struct platform_device at91_lcdc_device = {
.num_resources = ARRAY_SIZE(lcdc_resources),
};
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data)
{
if (!data)
return;
@@ -891,7 +891,7 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
platform_device_register(&at91_lcdc_device);
}
#else
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {}
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) {}
#endif
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index 474ee04d24b9..e9bf0b8f40eb 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -26,6 +26,7 @@
#include "generic.h"
#include "clock.h"
#include "sam9_smc.h"
+#include "pm.h"
/* --------------------------------------------------------------------
* Clocks
@@ -370,6 +371,7 @@ static void __init at91sam9g45_ioremap_registers(void)
at91sam926x_ioremap_pit(AT91SAM9G45_BASE_PIT);
at91sam9_ioremap_smc(0, AT91SAM9G45_BASE_SMC);
at91_ioremap_matrix(AT91SAM9G45_BASE_MATRIX);
+ at91_pm_set_standby(at91_ddr_standby);
}
static void __init at91sam9g45_initialize(void)
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index acb703e13331..cb36fa872d30 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -965,7 +965,7 @@ void __init at91_add_device_isi(struct isi_platform_data *data,
#if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE)
static u64 lcdc_dmamask = DMA_BIT_MASK(32);
-static struct atmel_lcdfb_info lcdc_data;
+static struct atmel_lcdfb_pdata lcdc_data;
static struct resource lcdc_resources[] = {
[0] = {
@@ -991,7 +991,7 @@ static struct platform_device at91_lcdc_device = {
.num_resources = ARRAY_SIZE(lcdc_resources),
};
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data)
{
if (!data)
return;
@@ -1037,7 +1037,7 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
platform_device_register(&at91_lcdc_device);
}
#else
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {}
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) {}
#endif
diff --git a/arch/arm/mach-at91/at91sam9n12.c b/arch/arm/mach-at91/at91sam9n12.c
index c7d670d11802..2d895a297739 100644
--- a/arch/arm/mach-at91/at91sam9n12.c
+++ b/arch/arm/mach-at91/at91sam9n12.c
@@ -169,6 +169,7 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb_clk),
CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb_clk),
CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc_clk),
+ CLKDEV_CON_DEV_ID(NULL, "f0010000.ssc", &ssc_clk),
CLKDEV_CON_DEV_ID("dma_clk", "ffffec00.dma-controller", &dma_clk),
CLKDEV_CON_DEV_ID(NULL, "f8010000.i2c", &twi0_clk),
CLKDEV_CON_DEV_ID(NULL, "f8014000.i2c", &twi1_clk),
diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c
index d4ec0d9a9872..88995af09c04 100644
--- a/arch/arm/mach-at91/at91sam9rl.c
+++ b/arch/arm/mach-at91/at91sam9rl.c
@@ -27,6 +27,7 @@
#include "generic.h"
#include "clock.h"
#include "sam9_smc.h"
+#include "pm.h"
/* --------------------------------------------------------------------
* Clocks
@@ -287,6 +288,7 @@ static void __init at91sam9rl_ioremap_registers(void)
at91sam926x_ioremap_pit(AT91SAM9RL_BASE_PIT);
at91sam9_ioremap_smc(0, AT91SAM9RL_BASE_SMC);
at91_ioremap_matrix(AT91SAM9RL_BASE_MATRIX);
+ at91_pm_set_standby(at91sam9_sdram_standby);
}
static void __init at91sam9rl_initialize(void)
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index 352468f265a9..a698bdab2cce 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -498,7 +498,7 @@ void __init at91_add_device_ac97(struct ac97c_platform_data *data) {}
#if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE)
static u64 lcdc_dmamask = DMA_BIT_MASK(32);
-static struct atmel_lcdfb_info lcdc_data;
+static struct atmel_lcdfb_pdata lcdc_data;
static struct resource lcdc_resources[] = {
[0] = {
@@ -525,7 +525,7 @@ static struct platform_device at91_lcdc_device = {
.num_resources = ARRAY_SIZE(lcdc_resources),
};
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data)
{
if (!data) {
return;
@@ -557,7 +557,7 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
platform_device_register(&at91_lcdc_device);
}
#else
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {}
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) {}
#endif
diff --git a/arch/arm/mach-at91/board-cam60.c b/arch/arm/mach-at91/board-cam60.c
index ade948b82662..112e867c4abe 100644
--- a/arch/arm/mach-at91/board-cam60.c
+++ b/arch/arm/mach-at91/board-cam60.c
@@ -112,7 +112,7 @@ static struct spi_board_info cam60_spi_devices[] __initdata = {
/*
* MACB Ethernet device
*/
-static struct __initdata macb_platform_data cam60_macb_data = {
+static struct macb_platform_data cam60_macb_data __initdata = {
.phy_irq_pin = AT91_PIN_PB5,
.is_rmii = 0,
};
diff --git a/arch/arm/mach-at91/board-dt-rm9200.c b/arch/arm/mach-at91/board-dt-rm9200.c
index 3fcb6623a33e..3a185faee795 100644
--- a/arch/arm/mach-at91/board-dt-rm9200.c
+++ b/arch/arm/mach-at91/board-dt-rm9200.c
@@ -14,7 +14,6 @@
#include <linux/gpio.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <asm/setup.h>
#include <asm/irq.h>
@@ -36,11 +35,6 @@ static void __init at91rm9200_dt_init_irq(void)
of_irq_init(irq_of_match);
}
-static void __init at91rm9200_dt_device_init(void)
-{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-}
-
static const char *at91rm9200_dt_board_compat[] __initdata = {
"atmel,at91rm9200",
NULL
@@ -52,6 +46,5 @@ DT_MACHINE_START(at91rm9200_dt, "Atmel AT91RM9200 (Device Tree)")
.handle_irq = at91_aic_handle_irq,
.init_early = at91rm9200_dt_initialize,
.init_irq = at91rm9200_dt_init_irq,
- .init_machine = at91rm9200_dt_device_init,
.dt_compat = at91rm9200_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-dt-sam9.c b/arch/arm/mach-at91/board-dt-sam9.c
index 8db30132abed..3dab868b02fa 100644
--- a/arch/arm/mach-at91/board-dt-sam9.c
+++ b/arch/arm/mach-at91/board-dt-sam9.c
@@ -13,7 +13,6 @@
#include <linux/gpio.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <asm/setup.h>
#include <asm/irq.h>
@@ -37,11 +36,6 @@ static void __init at91_dt_init_irq(void)
of_irq_init(irq_of_match);
}
-static void __init at91_dt_device_init(void)
-{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-}
-
static const char *at91_dt_board_compat[] __initdata = {
"atmel,at91sam9",
NULL
@@ -54,6 +48,5 @@ DT_MACHINE_START(at91sam_dt, "Atmel AT91SAM (Device Tree)")
.handle_irq = at91_aic_handle_irq,
.init_early = at91_dt_initialize,
.init_irq = at91_dt_init_irq,
- .init_machine = at91_dt_device_init,
.dt_compat = at91_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9260ek.c b/arch/arm/mach-at91/board-sam9260ek.c
index 0b153c87521d..f4f8735315da 100644
--- a/arch/arm/mach-at91/board-sam9260ek.c
+++ b/arch/arm/mach-at91/board-sam9260ek.c
@@ -28,7 +28,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/at73c213.h>
#include <linux/clk.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c
index d3437624ca4e..473546b9408b 100644
--- a/arch/arm/mach-at91/board-sam9261ek.c
+++ b/arch/arm/mach-at91/board-sam9261ek.c
@@ -389,7 +389,7 @@ static struct fb_monspecs at91fb_default_stn_monspecs = {
| ATMEL_LCDC_IFWIDTH_4 \
| ATMEL_LCDC_SCANMOD_SINGLE)
-static void at91_lcdc_stn_power_control(int on)
+static void at91_lcdc_stn_power_control(struct atmel_lcdfb_pdata *pdata, int on)
{
/* backlight */
if (on) { /* power up */
@@ -401,7 +401,7 @@ static void at91_lcdc_stn_power_control(int on)
}
}
-static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = {
.default_bpp = 1,
.default_dmacon = ATMEL_LCDC_DMAEN,
.default_lcdcon2 = AT91SAM9261_DEFAULT_STN_LCDCON2,
@@ -445,7 +445,7 @@ static struct fb_monspecs at91fb_default_tft_monspecs = {
| ATMEL_LCDC_DISTYPE_TFT \
| ATMEL_LCDC_CLKMOD_ALWAYSACTIVE)
-static void at91_lcdc_tft_power_control(int on)
+static void at91_lcdc_tft_power_control(struct atmel_lcdfb_pdata *pdata, int on)
{
if (on)
at91_set_gpio_value(AT91_PIN_PA12, 0); /* power up */
@@ -453,7 +453,7 @@ static void at91_lcdc_tft_power_control(int on)
at91_set_gpio_value(AT91_PIN_PA12, 1); /* power down */
}
-static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = {
.lcdcon_is_backlight = true,
.default_bpp = 16,
.default_dmacon = ATMEL_LCDC_DMAEN,
@@ -465,7 +465,7 @@ static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
#endif
#else
-static struct atmel_lcdfb_info __initdata ek_lcdc_data;
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data;
#endif
diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c
index 3284df05df14..2f931915c80c 100644
--- a/arch/arm/mach-at91/board-sam9263ek.c
+++ b/arch/arm/mach-at91/board-sam9263ek.c
@@ -27,7 +27,7 @@
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
#include <linux/fb.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
@@ -275,13 +275,13 @@ static struct fb_monspecs at91fb_default_monspecs = {
| ATMEL_LCDC_DISTYPE_TFT \
| ATMEL_LCDC_CLKMOD_ALWAYSACTIVE)
-static void at91_lcdc_power_control(int on)
+static void at91_lcdc_power_control(struct atmel_lcdfb_pdata *pdata, int on)
{
at91_set_gpio_value(AT91_PIN_PA30, on);
}
/* Driver datas */
-static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = {
.lcdcon_is_backlight = true,
.default_bpp = 16,
.default_dmacon = ATMEL_LCDC_DMAEN,
@@ -292,7 +292,7 @@ static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
};
#else
-static struct atmel_lcdfb_info __initdata ek_lcdc_data;
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data;
#endif
diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c
index 2a94896a1375..ef39078c8ce2 100644
--- a/arch/arm/mach-at91/board-sam9m10g45ek.c
+++ b/arch/arm/mach-at91/board-sam9m10g45ek.c
@@ -284,7 +284,7 @@ static struct fb_monspecs at91fb_default_monspecs = {
| ATMEL_LCDC_CLKMOD_ALWAYSACTIVE)
/* Driver datas */
-static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = {
.lcdcon_is_backlight = true,
.default_bpp = 32,
.default_dmacon = ATMEL_LCDC_DMAEN,
@@ -295,7 +295,7 @@ static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
};
#else
-static struct atmel_lcdfb_info __initdata ek_lcdc_data;
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data;
#endif
diff --git a/arch/arm/mach-at91/board-sam9rlek.c b/arch/arm/mach-at91/board-sam9rlek.c
index aa265dcf2128..604eecf6cd70 100644
--- a/arch/arm/mach-at91/board-sam9rlek.c
+++ b/arch/arm/mach-at91/board-sam9rlek.c
@@ -170,7 +170,7 @@ static struct fb_monspecs at91fb_default_monspecs = {
| ATMEL_LCDC_DISTYPE_TFT \
| ATMEL_LCDC_CLKMOD_ALWAYSACTIVE)
-static void at91_lcdc_power_control(int on)
+static void at91_lcdc_power_control(struct atmel_lcdfb_pdata *pdata, int on)
{
if (on)
at91_set_gpio_value(AT91_PIN_PC1, 0); /* power up */
@@ -179,7 +179,7 @@ static void at91_lcdc_power_control(int on)
}
/* Driver datas */
-static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = {
.lcdcon_is_backlight = true,
.default_bpp = 16,
.default_dmacon = ATMEL_LCDC_DMAEN,
@@ -191,7 +191,7 @@ static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
};
#else
-static struct atmel_lcdfb_info __initdata ek_lcdc_data;
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data;
#endif
diff --git a/arch/arm/mach-at91/board.h b/arch/arm/mach-at91/board.h
index 4a234fb2ab3b..6c08b341167d 100644
--- a/arch/arm/mach-at91/board.h
+++ b/arch/arm/mach-at91/board.h
@@ -107,8 +107,8 @@ extern void __init at91_add_device_pwm(u32 mask);
extern void __init at91_add_device_ssc(unsigned id, unsigned pins);
/* LCD Controller */
-struct atmel_lcdfb_info;
-extern void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data);
+struct atmel_lcdfb_pdata;
+extern void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data);
/* AC97 */
extern void __init at91_add_device_ac97(struct ac97c_platform_data *data);
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 15afb5d9271f..9986542e8060 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -39,6 +39,8 @@
#include "at91_rstc.h"
#include "at91_shdwc.h"
+static void (*at91_pm_standby)(void);
+
static void __init show_reset_status(void)
{
static char reset[] __initdata = "reset";
@@ -266,14 +268,8 @@ static int at91_pm_enter(suspend_state_t state)
* For ARM 926 based chips, this requirement is weaker
* as at91sam9 can access a RAM in self-refresh mode.
*/
- if (cpu_is_at91rm9200())
- at91rm9200_standby();
- else if (cpu_is_at91sam9g45())
- at91sam9g45_standby();
- else if (cpu_is_at91sam9263())
- at91sam9263_standby();
- else
- at91sam9_standby();
+ if (at91_pm_standby)
+ at91_pm_standby();
break;
case PM_SUSPEND_ON:
@@ -314,6 +310,18 @@ static const struct platform_suspend_ops at91_pm_ops = {
.end = at91_pm_end,
};
+static struct platform_device at91_cpuidle_device = {
+ .name = "cpuidle-at91",
+};
+
+void at91_pm_set_standby(void (*at91_standby)(void))
+{
+ if (at91_standby) {
+ at91_cpuidle_device.dev.platform_data = at91_standby;
+ at91_pm_standby = at91_standby;
+ }
+}
+
static int __init at91_pm_init(void)
{
#ifdef CONFIG_AT91_SLOW_CLOCK
@@ -325,6 +333,9 @@ static int __init at91_pm_init(void)
/* AT91RM9200 SDRAM low-power mode cannot be used with self-refresh. */
if (cpu_is_at91rm9200())
at91_ramc_write(0, AT91RM9200_SDRAMC_LPR, 0);
+
+ if (at91_cpuidle_device.dev.platform_data)
+ platform_device_register(&at91_cpuidle_device);
suspend_set_ops(&at91_pm_ops);
diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h
index 2f5908f0b8c5..3ed190ce062b 100644
--- a/arch/arm/mach-at91/pm.h
+++ b/arch/arm/mach-at91/pm.h
@@ -11,9 +11,13 @@
#ifndef __ARCH_ARM_MACH_AT91_PM
#define __ARCH_ARM_MACH_AT91_PM
+#include <asm/proc-fns.h>
+
#include <mach/at91_ramc.h>
#include <mach/at91rm9200_sdramc.h>
+extern void at91_pm_set_standby(void (*at91_standby)(void));
+
/*
* The AT91RM9200 goes into self-refresh mode with this command, and will
* terminate self-refresh automatically on the next SDRAM access.
@@ -45,16 +49,18 @@ static inline void at91rm9200_standby(void)
/* We manage both DDRAM/SDRAM controllers, we need more than one value to
* remember.
*/
-static inline void at91sam9g45_standby(void)
+static inline void at91_ddr_standby(void)
{
/* Those two values allow us to delay self-refresh activation
* to the maximum. */
- u32 lpr0, lpr1;
- u32 saved_lpr0, saved_lpr1;
+ u32 lpr0, lpr1 = 0;
+ u32 saved_lpr0, saved_lpr1 = 0;
- saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
- lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
- lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
+ if (at91_ramc_base[1]) {
+ saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
+ lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
+ lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
+ }
saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
@@ -62,25 +68,29 @@ static inline void at91sam9g45_standby(void)
/* self-refresh mode now */
at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
- at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
+ if (at91_ramc_base[1])
+ at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
cpu_do_idle();
at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
- at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
+ if (at91_ramc_base[1])
+ at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
}
/* We manage both DDRAM/SDRAM controllers, we need more than one value to
* remember.
*/
-static inline void at91sam9263_standby(void)
+static inline void at91sam9_sdram_standby(void)
{
- u32 lpr0, lpr1;
- u32 saved_lpr0, saved_lpr1;
+ u32 lpr0, lpr1 = 0;
+ u32 saved_lpr0, saved_lpr1 = 0;
- saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
- lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
- lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
+ if (at91_ramc_base[1]) {
+ saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
+ lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
+ lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
+ }
saved_lpr0 = at91_ramc_read(0, AT91_SDRAMC_LPR);
lpr0 = saved_lpr0 & ~AT91_SDRAMC_LPCB;
@@ -88,27 +98,14 @@ static inline void at91sam9263_standby(void)
/* self-refresh mode now */
at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0);
- at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
+ if (at91_ramc_base[1])
+ at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
cpu_do_idle();
at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0);
- at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
-}
-
-static inline void at91sam9_standby(void)
-{
- u32 saved_lpr, lpr;
-
- saved_lpr = at91_ramc_read(0, AT91_SDRAMC_LPR);
-
- lpr = saved_lpr & ~AT91_SDRAMC_LPCB;
- at91_ramc_write(0, AT91_SDRAMC_LPR, lpr |
- AT91_SDRAMC_LPCB_SELF_REFRESH);
-
- cpu_do_idle();
-
- at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr);
+ if (at91_ramc_base[1])
+ at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
}
#endif
diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
index b17fbcf4d9e8..094b3459c288 100644
--- a/arch/arm/mach-at91/setup.c
+++ b/arch/arm/mach-at91/setup.c
@@ -23,6 +23,7 @@
#include "at91_shdwc.h"
#include "soc.h"
#include "generic.h"
+#include "pm.h"
struct at91_init_soc __initdata at91_boot_soc;
@@ -376,15 +377,16 @@ static void at91_dt_rstc(void)
}
static struct of_device_id ramc_ids[] = {
- { .compatible = "atmel,at91rm9200-sdramc" },
- { .compatible = "atmel,at91sam9260-sdramc" },
- { .compatible = "atmel,at91sam9g45-ddramc" },
+ { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
+ { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
+ { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
{ /*sentinel*/ }
};
static void at91_dt_ramc(void)
{
struct device_node *np;
+ const struct of_device_id *of_id;
np = of_find_matching_node(NULL, ramc_ids);
if (!np)
@@ -396,6 +398,12 @@ static void at91_dt_ramc(void)
/* the controller may have 2 banks */
at91_ramc_base[1] = of_iomap(np, 1);
+ of_id = of_match_node(ramc_ids, np);
+ if (!of_id)
+ pr_warn("AT91: ramc no standby function available\n");
+ else
+ at91_pm_set_standby(of_id->data);
+
of_node_put(np);
}
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index 69d67f714a2f..9fe6d88737ed 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -1,5 +1,16 @@
config ARCH_BCM
- bool "Broadcom SoC" if ARCH_MULTI_V7
+ bool "Broadcom SoC Support"
+ depends on ARCH_MULTIPLATFORM
+ help
+ This enables support for Broadcom ARM based SoC
+ chips
+
+if ARCH_BCM
+
+menu "Broadcom SoC Selection"
+
+config ARCH_BCM_MOBILE
+ bool "Broadcom Mobile SoC" if ARCH_MULTI_V7
depends on MMU
select ARCH_REQUIRE_GPIOLIB
select ARM_ERRATA_754322
@@ -9,12 +20,17 @@ config ARCH_BCM
select CLKSRC_OF
select GENERIC_CLOCKEVENTS
select GENERIC_TIME
- select GPIO_BCM
+ select GPIO_BCM_KONA
select SPARSE_IRQ
select TICK_ONESHOT
select CACHE_L2X0
+ select HAVE_ARM_ARCH_TIMER
help
- This enables support for system based on Broadcom SoCs.
+ This enables support for systems based on Broadcom mobile SoCs.
It currently supports the 'BCM281XX' family, which includes
BCM11130, BCM11140, BCM11351, BCM28145 and
BCM28155 variants.
+
+endmenu
+
+endif
diff --git a/arch/arm/mach-bcm/Makefile b/arch/arm/mach-bcm/Makefile
index e3d03033a7e2..c2ccd5a0f772 100644
--- a/arch/arm/mach-bcm/Makefile
+++ b/arch/arm/mach-bcm/Makefile
@@ -10,6 +10,6 @@
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
-obj-$(CONFIG_ARCH_BCM) := board_bcm281xx.o bcm_kona_smc.o bcm_kona_smc_asm.o kona.o
+obj-$(CONFIG_ARCH_BCM_MOBILE) := board_bcm281xx.o bcm_kona_smc.o bcm_kona_smc_asm.o kona.o
plus_sec := $(call as-instr,.arch_extension sec,+sec)
AFLAGS_bcm_kona_smc_asm.o :=-Wa,-march=armv7-a$(plus_sec)
diff --git a/arch/arm/mach-bcm/board_bcm281xx.c b/arch/arm/mach-bcm/board_bcm281xx.c
index 8d9f931164bb..cb3dc364405c 100644
--- a/arch/arm/mach-bcm/board_bcm281xx.c
+++ b/arch/arm/mach-bcm/board_bcm281xx.c
@@ -67,8 +67,7 @@ static void __init board_init(void)
static const char * const bcm11351_dt_compat[] = { "brcm,bcm11351", NULL, };
-DT_MACHINE_START(BCM11351_DT, "Broadcom Application Processor")
- .init_time = clocksource_of_init,
+DT_MACHINE_START(BCM11351_DT, "BCM281xx Broadcom Application Processor")
.init_machine = board_init,
.restart = bcm_kona_restart,
.dt_compat = bcm11351_dt_compat,
diff --git a/arch/arm/mach-bcm2835/bcm2835.c b/arch/arm/mach-bcm2835/bcm2835.c
index 40686d7ef500..d50135be0c20 100644
--- a/arch/arm/mach-bcm2835/bcm2835.c
+++ b/arch/arm/mach-bcm2835/bcm2835.c
@@ -18,7 +18,6 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/clk/bcm2835.h>
-#include <linux/clocksource.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -134,7 +133,6 @@ DT_MACHINE_START(BCM2835, "BCM2835")
.init_irq = bcm2835_init_irq,
.handle_irq = bcm2835_handle_irq,
.init_machine = bcm2835_init,
- .init_time = clocksource_of_init,
.restart = bcm2835_restart,
.dt_compat = bcm2835_compat
MACHINE_END
diff --git a/arch/arm/mach-clps711x/common.c b/arch/arm/mach-clps711x/common.c
index 4ca2f3ca2de4..134641d688bb 100644
--- a/arch/arm/mach-clps711x/common.c
+++ b/arch/arm/mach-clps711x/common.c
@@ -29,12 +29,12 @@
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/clk-provider.h>
+#include <linux/sched_clock.h>
#include <asm/exception.h>
#include <asm/mach/irq.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
-#include <asm/sched_clock.h>
#include <asm/system_misc.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-clps711x/include/mach/timex.h b/arch/arm/mach-clps711x/include/mach/timex.h
deleted file mode 100644
index de6fd192d1c3..000000000000
--- a/arch/arm/mach-clps711x/include/mach/timex.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* Bogus value */
-#define CLOCK_TICK_RATE 512000
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index e026b19b23ea..a075b3e0c5c7 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -40,7 +40,6 @@ config ARCH_DAVINCI_DA850
bool "DA850/OMAP-L138/AM18x based system"
select ARCH_DAVINCI_DA8XX
select ARCH_HAS_CPUFREQ
- select CPU_FREQ_TABLE
select CP_INTC
config ARCH_DAVINCI_DA8XX
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index c4bdc0a1c36e..d1f45af7a530 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -17,22 +17,24 @@
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/i2c/pcf857x.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
+#include <linux/platform_data/gpio-davinci.h>
+#include <linux/platform_data/mtd-davinci.h>
+#include <linux/platform_data/mtd-davinci-aemif.h>
+#include <linux/platform_data/spi-davinci.h>
+#include <linux/platform_data/usb-davinci.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
+#include <mach/common.h>
#include <mach/cp_intc.h>
#include <mach/mux.h>
-#include <linux/platform_data/mtd-davinci.h>
#include <mach/da8xx.h>
-#include <linux/platform_data/usb-davinci.h>
-#include <linux/platform_data/mtd-davinci-aemif.h>
-#include <linux/platform_data/spi-davinci.h>
#define DA830_EVM_PHY_ID ""
/*
@@ -74,7 +76,7 @@ static int da830_evm_usb_ocic_notify(da8xx_ocic_handler_t handler)
if (handler != NULL) {
da830_evm_usb_ocic_handler = handler;
- error = request_irq(irq, da830_evm_usb_ocic_irq, IRQF_DISABLED |
+ error = request_irq(irq, da830_evm_usb_ocic_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"OHCI over-current indicator", NULL);
if (error)
@@ -591,6 +593,10 @@ static __init void da830_evm_init(void)
struct davinci_soc_info *soc_info = &davinci_soc_info;
int ret;
+ ret = da830_register_gpio();
+ if (ret)
+ pr_warn("da830_evm_init: GPIO init failed: %d\n", ret);
+
ret = da830_register_edma(da830_edma_rsv);
if (ret)
pr_warning("da830_evm_init: edma registration failed: %d\n",
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index dd1fb24521aa..e0af0eccde8f 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -18,7 +18,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
#include <linux/platform_data/pca953x.h>
#include <linux/input.h>
#include <linux/input/tps6507x-ts.h>
@@ -28,6 +28,7 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/gpio-davinci.h>
#include <linux/platform_data/mtd-davinci.h>
#include <linux/platform_data/mtd-davinci-aemif.h>
#include <linux/platform_data/spi-davinci.h>
@@ -38,6 +39,7 @@
#include <linux/spi/flash.h>
#include <linux/wl12xx.h>
+#include <mach/common.h>
#include <mach/cp_intc.h>
#include <mach/da8xx.h>
#include <mach/mux.h>
@@ -1437,6 +1439,10 @@ static __init void da850_evm_init(void)
{
int ret;
+ ret = da850_register_gpio();
+ if (ret)
+ pr_warn("%s: GPIO init failed: %d\n", __func__, ret);
+
ret = pmic_tps65070_init();
if (ret)
pr_warn("%s: TPS65070 PMIC init failed: %d\n", __func__, ret);
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index 42b23a3194a0..ecdc7d44fa70 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -22,15 +22,17 @@
#include <media/tvp514x.h>
#include <linux/spi/spi.h>
#include <linux/spi/eeprom.h>
+#include <linux/platform_data/gpio-davinci.h>
+#include <linux/platform_data/i2c-davinci.h>
+#include <linux/platform_data/mtd-davinci.h>
+#include <linux/platform_data/mmc-davinci.h>
+#include <linux/platform_data/usb-davinci.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <linux/platform_data/i2c-davinci.h>
#include <mach/serial.h>
-#include <linux/platform_data/mtd-davinci.h>
-#include <linux/platform_data/mmc-davinci.h>
-#include <linux/platform_data/usb-davinci.h>
+#include <mach/common.h>
#include "davinci.h"
@@ -375,6 +377,11 @@ static struct spi_board_info dm355_evm_spi_info[] __initconst = {
static __init void dm355_evm_init(void)
{
struct clk *aemif;
+ int ret;
+
+ ret = dm355_gpio_register();
+ if (ret)
+ pr_warn("%s: GPIO init failed: %d\n", __func__, ret);
gpio_request(1, "dm9000");
gpio_direction_input(1);
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index 65a984c52df6..43bacbf15314 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -19,15 +19,16 @@
#include <linux/clk.h>
#include <linux/spi/spi.h>
#include <linux/spi/eeprom.h>
+#include <linux/platform_data/i2c-davinci.h>
+#include <linux/platform_data/mmc-davinci.h>
+#include <linux/platform_data/mtd-davinci.h>
+#include <linux/platform_data/usb-davinci.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <linux/platform_data/i2c-davinci.h>
+#include <mach/common.h>
#include <mach/serial.h>
-#include <linux/platform_data/mtd-davinci.h>
-#include <linux/platform_data/mmc-davinci.h>
-#include <linux/platform_data/usb-davinci.h>
#include "davinci.h"
@@ -234,6 +235,11 @@ static struct spi_board_info dm355_leopard_spi_info[] __initconst = {
static __init void dm355_leopard_init(void)
{
struct clk *aemif;
+ int ret;
+
+ ret = dm355_gpio_register();
+ if (ret)
+ pr_warn("%s: GPIO init failed: %d\n", __func__, ret);
gpio_request(9, "dm9000");
gpio_direction_input(9);
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 4078ba93776b..e08a8684ead2 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -18,7 +18,7 @@
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/clk.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
#include <linux/leds.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -743,6 +743,12 @@ static struct spi_board_info dm365_evm_spi_info[] __initconst = {
static __init void dm365_evm_init(void)
{
+ int ret;
+
+ ret = dm365_gpio_register();
+ if (ret)
+ pr_warn("%s: GPIO init failed: %d\n", __func__, ret);
+
evm_init_i2c();
davinci_serial_init(dm365_serial_device);
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 40bb9b5b87e8..987605b78556 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -15,7 +15,7 @@
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/i2c/pcf857x.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
@@ -754,9 +754,14 @@ static int davinci_phy_fixup(struct phy_device *phydev)
static __init void davinci_evm_init(void)
{
+ int ret;
struct clk *aemif_clk;
struct davinci_soc_info *soc_info = &davinci_soc_info;
+ ret = dm644x_gpio_register();
+ if (ret)
+ pr_warn("%s: GPIO init failed: %d\n", __func__, ret);
+
aemif_clk = clk_get(NULL, "aemif");
clk_prepare_enable(aemif_clk);
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index 2bc3651d56cc..13d0801fd6b1 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -22,7 +22,7 @@
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
#include <linux/i2c/pcf857x.h>
#include <media/tvp514x.h>
@@ -33,17 +33,19 @@
#include <linux/mtd/partitions.h>
#include <linux/clk.h>
#include <linux/export.h>
+#include <linux/platform_data/gpio-davinci.h>
+#include <linux/platform_data/i2c-davinci.h>
+#include <linux/platform_data/mtd-davinci.h>
+#include <linux/platform_data/mtd-davinci-aemif.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/common.h>
+#include <mach/irqs.h>
#include <mach/serial.h>
-#include <linux/platform_data/i2c-davinci.h>
-#include <linux/platform_data/mtd-davinci.h>
#include <mach/clock.h>
#include <mach/cdce949.h>
-#include <linux/platform_data/mtd-davinci-aemif.h>
#include "davinci.h"
#include "clock.h"
@@ -786,8 +788,13 @@ static struct edma_rsv_info dm646x_edma_rsv[] = {
static __init void evm_init(void)
{
+ int ret;
struct davinci_soc_info *soc_info = &davinci_soc_info;
+ ret = dm646x_gpio_register();
+ if (ret)
+ pr_warn("%s: GPIO init failed: %d\n", __func__, ret);
+
evm_init_i2c();
davinci_serial_init(dm646x_serial_device);
dm646x_init_mcasp0(&dm646x_evm_snd_data[0]);
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c
index cd0f58730c2b..7aa105b1fd0f 100644
--- a/arch/arm/mach-davinci/board-mityomapl138.c
+++ b/arch/arm/mach-davinci/board-mityomapl138.c
@@ -15,7 +15,7 @@
#include <linux/mtd/partitions.h>
#include <linux/regulator/machine.h>
#include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
#include <linux/etherdevice.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 46f336fca803..bb680af98374 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -26,17 +26,18 @@
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/mtd/partitions.h>
+#include <linux/platform_data/gpio-davinci.h>
+#include <linux/platform_data/i2c-davinci.h>
+#include <linux/platform_data/mmc-davinci.h>
+#include <linux/platform_data/mtd-davinci.h>
+#include <linux/platform_data/usb-davinci.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/common.h>
-#include <linux/platform_data/i2c-davinci.h>
#include <mach/serial.h>
#include <mach/mux.h>
-#include <linux/platform_data/mtd-davinci.h>
-#include <linux/platform_data/mmc-davinci.h>
-#include <linux/platform_data/usb-davinci.h>
#include "davinci.h"
@@ -169,9 +170,14 @@ static struct davinci_mmc_config davinci_ntosd2_mmc_config = {
static __init void davinci_ntosd2_init(void)
{
+ int ret;
struct clk *aemif_clk;
struct davinci_soc_info *soc_info = &davinci_soc_info;
+ ret = dm644x_gpio_register();
+ if (ret)
+ pr_warn("%s: GPIO init failed: %d\n", __func__, ret);
+
aemif_clk = clk_get(NULL, "aemif");
clk_prepare_enable(aemif_clk);
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
index ab98c75cabb4..2aac51d0e853 100644
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -13,10 +13,12 @@
#include <linux/init.h>
#include <linux/console.h>
#include <linux/gpio.h>
+#include <linux/platform_data/gpio-davinci.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
+#include <mach/common.h>
#include <mach/cp_intc.h>
#include <mach/da8xx.h>
#include <mach/mux.h>
@@ -211,7 +213,7 @@ static int hawk_usb_ocic_notify(da8xx_ocic_handler_t handler)
hawk_usb_ocic_handler = handler;
error = request_irq(irq, omapl138_hawk_usb_ocic_irq,
- IRQF_DISABLED | IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING,
"OHCI over-current indicator", NULL);
if (error)
@@ -290,6 +292,10 @@ static __init void omapl138_hawk_init(void)
{
int ret;
+ ret = da850_register_gpio();
+ if (ret)
+ pr_warn("%s: GPIO init failed: %d\n", __func__, ret);
+
davinci_serial_init(da8xx_serial_device);
omapl138_hawk_config_emac();
diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c
index d84360148100..41c7c9615791 100644
--- a/arch/arm/mach-davinci/board-sffsdr.c
+++ b/arch/arm/mach-davinci/board-sffsdr.c
@@ -26,7 +26,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c
index d6c746e35ad9..0813b5167e05 100644
--- a/arch/arm/mach-davinci/da830.c
+++ b/arch/arm/mach-davinci/da830.c
@@ -11,6 +11,7 @@
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/clk.h>
+#include <linux/platform_data/gpio-davinci.h>
#include <asm/mach/map.h>
@@ -20,7 +21,6 @@
#include <mach/common.h>
#include <mach/time.h>
#include <mach/da8xx.h>
-#include <mach/gpio-davinci.h>
#include "clock.h"
#include "mux.h"
@@ -1151,6 +1151,16 @@ static struct davinci_id da830_ids[] = {
},
};
+static struct davinci_gpio_platform_data da830_gpio_platform_data = {
+ .ngpio = 128,
+ .intc_irq_num = DA830_N_CP_INTC_IRQ,
+};
+
+int __init da830_register_gpio(void)
+{
+ return da8xx_register_gpio(&da830_gpio_platform_data);
+}
+
static struct davinci_timer_instance da830_timer_instance[2] = {
{
.base = DA8XX_TIMER64P0_BASE,
@@ -1196,10 +1206,6 @@ static struct davinci_soc_info davinci_soc_info_da830 = {
.intc_irq_prios = da830_default_priorities,
.intc_irq_num = DA830_N_CP_INTC_IRQ,
.timer_info = &da830_timer_info,
- .gpio_type = GPIO_TYPE_DAVINCI,
- .gpio_base = DA8XX_GPIO_BASE,
- .gpio_num = 128,
- .gpio_irq = IRQ_DA8XX_GPIO0,
.emac_pdata = &da8xx_emac_pdata,
};
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index f56e5fbfa2fd..352984e1528a 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/cpufreq.h>
#include <linux/regulator/consumer.h>
+#include <linux/platform_data/gpio-davinci.h>
#include <asm/mach/map.h>
@@ -28,7 +29,6 @@
#include <mach/da8xx.h>
#include <mach/cpufreq.h>
#include <mach/pm.h>
-#include <mach/gpio-davinci.h>
#include "clock.h"
#include "mux.h"
@@ -1281,6 +1281,16 @@ int __init da850_register_vpif_capture(struct vpif_capture_config
return platform_device_register(&da850_vpif_capture_dev);
}
+static struct davinci_gpio_platform_data da850_gpio_platform_data = {
+ .ngpio = 144,
+ .intc_irq_num = DA850_N_CP_INTC_IRQ,
+};
+
+int __init da850_register_gpio(void)
+{
+ return da8xx_register_gpio(&da850_gpio_platform_data);
+}
+
static struct davinci_soc_info davinci_soc_info_da850 = {
.io_desc = da850_io_desc,
.io_desc_num = ARRAY_SIZE(da850_io_desc),
@@ -1298,10 +1308,6 @@ static struct davinci_soc_info davinci_soc_info_da850 = {
.intc_irq_prios = da850_default_priorities,
.intc_irq_num = DA850_N_CP_INTC_IRQ,
.timer_info = &da850_timer_info,
- .gpio_type = GPIO_TYPE_DAVINCI,
- .gpio_base = DA8XX_GPIO_BASE,
- .gpio_num = 144,
- .gpio_irq = IRQ_DA8XX_GPIO0,
.emac_pdata = &da8xx_emac_pdata,
.sram_dma = DA8XX_SHARED_RAM_BASE,
.sram_len = SZ_128K,
diff --git a/arch/arm/mach-davinci/davinci.h b/arch/arm/mach-davinci/davinci.h
index 2ab5d577186f..2eebc4338802 100644
--- a/arch/arm/mach-davinci/davinci.h
+++ b/arch/arm/mach-davinci/davinci.h
@@ -53,6 +53,9 @@ extern void __iomem *davinci_sysmod_base;
#define DAVINCI_SYSMOD_VIRT(x) (davinci_sysmod_base + (x))
void davinci_map_sysmod(void);
+#define DAVINCI_GPIO_BASE 0x01C67000
+int davinci_gpio_register(struct resource *res, int size, void *pdata);
+
/* DM355 base addresses */
#define DM355_ASYNC_EMIF_CONTROL_BASE 0x01e10000
#define DM355_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
@@ -82,6 +85,7 @@ void dm355_init_spi0(unsigned chipselect_mask,
const struct spi_board_info *info, unsigned len);
void dm355_init_asp1(u32 evt_enable, struct snd_platform_data *pdata);
int dm355_init_video(struct vpfe_config *, struct vpbe_config *);
+int dm355_gpio_register(void);
/* DM365 function declarations */
void dm365_init(void);
@@ -92,11 +96,13 @@ void dm365_init_rtc(void);
void dm365_init_spi0(unsigned chipselect_mask,
const struct spi_board_info *info, unsigned len);
int dm365_init_video(struct vpfe_config *, struct vpbe_config *);
+int dm365_gpio_register(void);
/* DM644x function declarations */
void dm644x_init(void);
void dm644x_init_asp(struct snd_platform_data *pdata);
int dm644x_init_video(struct vpfe_config *, struct vpbe_config *);
+int dm644x_gpio_register(void);
/* DM646x function declarations */
void dm646x_init(void);
@@ -106,6 +112,7 @@ int dm646x_init_edma(struct edma_rsv_info *rsv);
void dm646x_video_init(void);
void dm646x_setup_vpif(struct vpif_display_config *,
struct vpif_capture_config *);
+int dm646x_gpio_register(void);
extern struct platform_device dm365_serial_device[];
extern struct platform_device dm355_serial_device[];
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 2e473fefd71e..c46eccbbd512 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -665,6 +665,32 @@ int __init da8xx_register_lcdc(struct da8xx_lcdc_platform_data *pdata)
return platform_device_register(&da8xx_lcdc_device);
}
+static struct resource da8xx_gpio_resources[] = {
+ { /* registers */
+ .start = DA8XX_GPIO_BASE,
+ .end = DA8XX_GPIO_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ { /* interrupt */
+ .start = IRQ_DA8XX_GPIO0,
+ .end = IRQ_DA8XX_GPIO8,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device da8xx_gpio_device = {
+ .name = "davinci_gpio",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(da8xx_gpio_resources),
+ .resource = da8xx_gpio_resources,
+};
+
+int __init da8xx_register_gpio(void *pdata)
+{
+ da8xx_gpio_device.dev.platform_data = pdata;
+ return platform_device_register(&da8xx_gpio_device);
+}
+
static struct resource da8xx_mmcsd0_resources[] = {
{ /* registers */
.start = DA8XX_MMCSD0_BASE,
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index 111573c0aad1..3996e98f52fb 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -318,6 +318,19 @@ static void davinci_init_wdt(void)
platform_device_register(&davinci_wdt_device);
}
+static struct platform_device davinci_gpio_device = {
+ .name = "davinci_gpio",
+ .id = -1,
+};
+
+int davinci_gpio_register(struct resource *res, int size, void *pdata)
+{
+ davinci_gpio_device.resource = res;
+ davinci_gpio_device.num_resources = size;
+ davinci_gpio_device.dev.platform_data = pdata;
+ return platform_device_register(&davinci_gpio_device);
+}
+
/*-------------------------------------------------------------------------*/
/*-------------------------------------------------------------------------*/
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 3eaa5f6b2160..ef9ff1fb6f52 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -13,8 +13,10 @@
#include <linux/serial_8250.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
-
#include <linux/spi/spi.h>
+#include <linux/platform_data/edma.h>
+#include <linux/platform_data/gpio-davinci.h>
+#include <linux/platform_data/spi-davinci.h>
#include <asm/mach/map.h>
@@ -25,9 +27,6 @@
#include <mach/time.h>
#include <mach/serial.h>
#include <mach/common.h>
-#include <linux/platform_data/spi-davinci.h>
-#include <mach/gpio-davinci.h>
-#include <linux/platform_data/edma.h>
#include "davinci.h"
#include "clock.h"
@@ -886,6 +885,30 @@ static struct platform_device dm355_vpbe_dev = {
},
};
+static struct resource dm355_gpio_resources[] = {
+ { /* registers */
+ .start = DAVINCI_GPIO_BASE,
+ .end = DAVINCI_GPIO_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ { /* interrupt */
+ .start = IRQ_DM355_GPIOBNK0,
+ .end = IRQ_DM355_GPIOBNK6,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct davinci_gpio_platform_data dm355_gpio_platform_data = {
+ .ngpio = 104,
+ .intc_irq_num = DAVINCI_N_AINTC_IRQ,
+};
+
+int __init dm355_gpio_register(void)
+{
+ return davinci_gpio_register(dm355_gpio_resources,
+ sizeof(dm355_gpio_resources),
+ &dm355_gpio_platform_data);
+}
/*----------------------------------------------------------------------*/
static struct map_desc dm355_io_desc[] = {
@@ -1005,10 +1028,6 @@ static struct davinci_soc_info davinci_soc_info_dm355 = {
.intc_irq_prios = dm355_default_priorities,
.intc_irq_num = DAVINCI_N_AINTC_IRQ,
.timer_info = &dm355_timer_info,
- .gpio_type = GPIO_TYPE_DAVINCI,
- .gpio_base = DAVINCI_GPIO_BASE,
- .gpio_num = 104,
- .gpio_irq = IRQ_DM355_GPIOBNK0,
.sram_dma = 0x00010000,
.sram_len = SZ_32K,
};
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index c29e324eb0bb..1511a0680f9a 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -19,6 +19,9 @@
#include <linux/dma-mapping.h>
#include <linux/spi/spi.h>
#include <linux/platform_data/edma.h>
+#include <linux/platform_data/gpio-davinci.h>
+#include <linux/platform_data/keyscan-davinci.h>
+#include <linux/platform_data/spi-davinci.h>
#include <asm/mach/map.h>
@@ -29,9 +32,6 @@
#include <mach/time.h>
#include <mach/serial.h>
#include <mach/common.h>
-#include <linux/platform_data/keyscan-davinci.h>
-#include <linux/platform_data/spi-davinci.h>
-#include <mach/gpio-davinci.h>
#include "davinci.h"
#include "clock.h"
@@ -698,6 +698,32 @@ void __init dm365_init_spi0(unsigned chipselect_mask,
platform_device_register(&dm365_spi0_device);
}
+static struct resource dm365_gpio_resources[] = {
+ { /* registers */
+ .start = DAVINCI_GPIO_BASE,
+ .end = DAVINCI_GPIO_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ { /* interrupt */
+ .start = IRQ_DM365_GPIO0,
+ .end = IRQ_DM365_GPIO7,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct davinci_gpio_platform_data dm365_gpio_platform_data = {
+ .ngpio = 104,
+ .intc_irq_num = DAVINCI_N_AINTC_IRQ,
+ .gpio_unbanked = 8,
+};
+
+int __init dm365_gpio_register(void)
+{
+ return davinci_gpio_register(dm365_gpio_resources,
+ sizeof(dm365_gpio_resources),
+ &dm365_gpio_platform_data);
+}
+
static struct emac_platform_data dm365_emac_pdata = {
.ctrl_reg_offset = DM365_EMAC_CNTRL_OFFSET,
.ctrl_mod_reg_offset = DM365_EMAC_CNTRL_MOD_OFFSET,
@@ -1105,11 +1131,6 @@ static struct davinci_soc_info davinci_soc_info_dm365 = {
.intc_irq_prios = dm365_default_priorities,
.intc_irq_num = DAVINCI_N_AINTC_IRQ,
.timer_info = &dm365_timer_info,
- .gpio_type = GPIO_TYPE_DAVINCI,
- .gpio_base = DAVINCI_GPIO_BASE,
- .gpio_num = 104,
- .gpio_irq = IRQ_DM365_GPIO0,
- .gpio_unbanked = 8, /* really 16 ... skip muxed GPIOs */
.emac_pdata = &dm365_emac_pdata,
.sram_dma = 0x00010000,
.sram_len = SZ_32K,
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 4f74682293d6..143a3217e8ef 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -13,6 +13,7 @@
#include <linux/serial_8250.h>
#include <linux/platform_device.h>
#include <linux/platform_data/edma.h>
+#include <linux/platform_data/gpio-davinci.h>
#include <asm/mach/map.h>
@@ -23,7 +24,6 @@
#include <mach/time.h>
#include <mach/serial.h>
#include <mach/common.h>
-#include <mach/gpio-davinci.h>
#include "davinci.h"
#include "clock.h"
@@ -771,6 +771,30 @@ static struct platform_device dm644x_vpbe_dev = {
},
};
+static struct resource dm644_gpio_resources[] = {
+ { /* registers */
+ .start = DAVINCI_GPIO_BASE,
+ .end = DAVINCI_GPIO_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ { /* interrupt */
+ .start = IRQ_GPIOBNK0,
+ .end = IRQ_GPIOBNK4,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct davinci_gpio_platform_data dm644_gpio_platform_data = {
+ .ngpio = 71,
+ .intc_irq_num = DAVINCI_N_AINTC_IRQ,
+};
+
+int __init dm644x_gpio_register(void)
+{
+ return davinci_gpio_register(dm644_gpio_resources,
+ sizeof(dm644_gpio_resources),
+ &dm644_gpio_platform_data);
+}
/*----------------------------------------------------------------------*/
static struct map_desc dm644x_io_desc[] = {
@@ -897,10 +921,6 @@ static struct davinci_soc_info davinci_soc_info_dm644x = {
.intc_irq_prios = dm644x_default_priorities,
.intc_irq_num = DAVINCI_N_AINTC_IRQ,
.timer_info = &dm644x_timer_info,
- .gpio_type = GPIO_TYPE_DAVINCI,
- .gpio_base = DAVINCI_GPIO_BASE,
- .gpio_num = 71,
- .gpio_irq = IRQ_GPIOBNK0,
.emac_pdata = &dm644x_emac_pdata,
.sram_dma = 0x00008000,
.sram_len = SZ_16K,
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 68f8d1f1aca1..2a73f299c1d0 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -14,6 +14,7 @@
#include <linux/serial_8250.h>
#include <linux/platform_device.h>
#include <linux/platform_data/edma.h>
+#include <linux/platform_data/gpio-davinci.h>
#include <asm/mach/map.h>
@@ -24,7 +25,6 @@
#include <mach/time.h>
#include <mach/serial.h>
#include <mach/common.h>
-#include <mach/gpio-davinci.h>
#include "davinci.h"
#include "clock.h"
@@ -748,6 +748,30 @@ static struct platform_device vpif_capture_dev = {
.num_resources = ARRAY_SIZE(vpif_capture_resource),
};
+static struct resource dm646x_gpio_resources[] = {
+ { /* registers */
+ .start = DAVINCI_GPIO_BASE,
+ .end = DAVINCI_GPIO_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ { /* interrupt */
+ .start = IRQ_DM646X_GPIOBNK0,
+ .end = IRQ_DM646X_GPIOBNK2,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct davinci_gpio_platform_data dm646x_gpio_platform_data = {
+ .ngpio = 43,
+ .intc_irq_num = DAVINCI_N_AINTC_IRQ,
+};
+
+int __init dm646x_gpio_register(void)
+{
+ return davinci_gpio_register(dm646x_gpio_resources,
+ sizeof(dm646x_gpio_resources),
+ &dm646x_gpio_platform_data);
+}
/*----------------------------------------------------------------------*/
static struct map_desc dm646x_io_desc[] = {
@@ -874,10 +898,6 @@ static struct davinci_soc_info davinci_soc_info_dm646x = {
.intc_irq_prios = dm646x_default_priorities,
.intc_irq_num = DAVINCI_N_AINTC_IRQ,
.timer_info = &dm646x_timer_info,
- .gpio_type = GPIO_TYPE_DAVINCI,
- .gpio_base = DAVINCI_GPIO_BASE,
- .gpio_num = 43, /* Only 33 usable */
- .gpio_irq = IRQ_DM646X_GPIOBNK0,
.emac_pdata = &dm646x_emac_pdata,
.sram_dma = 0x10010000,
.sram_len = SZ_32K,
diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h
index aae53072c0eb..39e58b48e826 100644
--- a/arch/arm/mach-davinci/include/mach/da8xx.h
+++ b/arch/arm/mach-davinci/include/mach/da8xx.h
@@ -97,6 +97,7 @@ int da8xx_register_mmcsd0(struct davinci_mmc_config *config);
int da850_register_mmcsd1(struct davinci_mmc_config *config);
void da8xx_register_mcasp(int id, struct snd_platform_data *pdata);
int da8xx_register_rtc(void);
+int da8xx_register_gpio(void *pdata);
int da850_register_cpufreq(char *async_clk);
int da8xx_register_cpuidle(void);
void __iomem *da8xx_get_mem_ctlr(void);
@@ -110,6 +111,8 @@ int da850_register_vpif_capture
void da8xx_restart(enum reboot_mode mode, const char *cmd);
void da8xx_rproc_reserve_cma(void);
int da8xx_register_rproc(void);
+int da850_register_gpio(void);
+int da830_register_gpio(void);
extern struct platform_device da8xx_serial_device[];
extern struct emac_platform_data da8xx_emac_pdata;
diff --git a/arch/arm/mach-davinci/include/mach/gpio-davinci.h b/arch/arm/mach-davinci/include/mach/gpio-davinci.h
deleted file mode 100644
index 1fdd1fd35448..000000000000
--- a/arch/arm/mach-davinci/include/mach/gpio-davinci.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * TI DaVinci GPIO Support
- *
- * Copyright (c) 2006 David Brownell
- * Copyright (c) 2007, MontaVista Software, Inc. <source@mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __DAVINCI_DAVINCI_GPIO_H
-#define __DAVINCI_DAVINCI_GPIO_H
-
-#include <linux/io.h>
-#include <linux/spinlock.h>
-
-#include <asm-generic/gpio.h>
-
-#include <mach/irqs.h>
-#include <mach/common.h>
-
-#define DAVINCI_GPIO_BASE 0x01C67000
-
-enum davinci_gpio_type {
- GPIO_TYPE_DAVINCI = 0,
- GPIO_TYPE_TNETV107X,
-};
-
-/*
- * basic gpio routines
- *
- * board-specific init should be done by arch/.../.../board-XXX.c (maybe
- * initializing banks together) rather than boot loaders; kexec() won't
- * go through boot loaders.
- *
- * the gpio clock will be turned on when gpios are used, and you may also
- * need to pay attention to PINMUX registers to be sure those pins are
- * used as gpios, not with other peripherals.
- *
- * On-chip GPIOs are numbered 0..(DAVINCI_N_GPIO-1). For documentation,
- * and maybe for later updates, code may write GPIO(N). These may be
- * all 1.8V signals, all 3.3V ones, or a mix of the two. A given chip
- * may not support all the GPIOs in that range.
- *
- * GPIOs can also be on external chips, numbered after the ones built-in
- * to the DaVinci chip. For now, they won't be usable as IRQ sources.
- */
-#define GPIO(X) (X) /* 0 <= X <= (DAVINCI_N_GPIO - 1) */
-
-/* Convert GPIO signal to GPIO pin number */
-#define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio))
-
-struct davinci_gpio_controller {
- struct gpio_chip chip;
- int irq_base;
- spinlock_t lock;
- void __iomem *regs;
- void __iomem *set_data;
- void __iomem *clr_data;
- void __iomem *in_data;
-};
-
-/* The __gpio_to_controller() and __gpio_mask() functions inline to constants
- * with constant parameters; or in outlined code they execute at runtime.
- *
- * You'd access the controller directly when reading or writing more than
- * one gpio value at a time, and to support wired logic where the value
- * being driven by the cpu need not match the value read back.
- *
- * These are NOT part of the cross-platform GPIO interface
- */
-static inline struct davinci_gpio_controller *
-__gpio_to_controller(unsigned gpio)
-{
- struct davinci_gpio_controller *ctlrs = davinci_soc_info.gpio_ctlrs;
- int index = gpio / 32;
-
- if (!ctlrs || index >= davinci_soc_info.gpio_ctlrs_num)
- return NULL;
-
- return ctlrs + index;
-}
-
-static inline u32 __gpio_mask(unsigned gpio)
-{
- return 1 << (gpio % 32);
-}
-
-#endif /* __DAVINCI_DAVINCI_GPIO_H */
diff --git a/arch/arm/mach-davinci/include/mach/gpio.h b/arch/arm/mach-davinci/include/mach/gpio.h
deleted file mode 100644
index 960e9de47e1e..000000000000
--- a/arch/arm/mach-davinci/include/mach/gpio.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * TI DaVinci GPIO Support
- *
- * Copyright (c) 2006 David Brownell
- * Copyright (c) 2007, MontaVista Software, Inc. <source@mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __DAVINCI_GPIO_H
-#define __DAVINCI_GPIO_H
-
-#include <asm-generic/gpio.h>
-
-#define __ARM_GPIOLIB_COMPLEX
-
-/* The inline versions use the static inlines in the driver header */
-#include "gpio-davinci.h"
-
-/*
- * The get/set/clear functions will inline when called with constant
- * parameters referencing built-in GPIOs, for low-overhead bitbanging.
- *
- * gpio_set_value() will inline only on traditional Davinci style controllers
- * with distinct set/clear registers.
- *
- * Otherwise, calls with variable parameters or referencing external
- * GPIOs (e.g. on GPIO expander chips) use outlined functions.
- */
-static inline void gpio_set_value(unsigned gpio, int value)
-{
- if (__builtin_constant_p(value) && gpio < davinci_soc_info.gpio_num) {
- struct davinci_gpio_controller *ctlr;
- u32 mask;
-
- ctlr = __gpio_to_controller(gpio);
-
- if (ctlr->set_data != ctlr->clr_data) {
- mask = __gpio_mask(gpio);
- if (value)
- __raw_writel(mask, ctlr->set_data);
- else
- __raw_writel(mask, ctlr->clr_data);
- return;
- }
- }
-
- __gpio_set_value(gpio, value);
-}
-
-/* Returns zero or nonzero; works for gpios configured as inputs OR
- * as outputs, at least for built-in GPIOs.
- *
- * NOTE: for built-in GPIOs, changes in reported values are synchronized
- * to the GPIO clock. This is easily seen after calling gpio_set_value()
- * and then immediately gpio_get_value(), where the gpio_get_value() will
- * return the old value until the GPIO clock ticks and the new value gets
- * latched.
- */
-static inline int gpio_get_value(unsigned gpio)
-{
- struct davinci_gpio_controller *ctlr;
-
- if (!__builtin_constant_p(gpio) || gpio >= davinci_soc_info.gpio_num)
- return __gpio_get_value(gpio);
-
- ctlr = __gpio_to_controller(gpio);
- return __gpio_mask(gpio) & __raw_readl(ctlr->in_data);
-}
-
-static inline int gpio_cansleep(unsigned gpio)
-{
- if (__builtin_constant_p(gpio) && gpio < davinci_soc_info.gpio_num)
- return 0;
- else
- return __gpio_cansleep(gpio);
-}
-
-static inline int irq_to_gpio(unsigned irq)
-{
- /* don't support the reverse mapping */
- return -ENOSYS;
-}
-
-#endif /* __DAVINCI_GPIO_H */
diff --git a/arch/arm/mach-davinci/include/mach/timex.h b/arch/arm/mach-davinci/include/mach/timex.h
deleted file mode 100644
index 9b885298f106..000000000000
--- a/arch/arm/mach-davinci/include/mach/timex.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * DaVinci timer defines
- *
- * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
- *
- * 2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#ifndef __ASM_ARCH_TIMEX_H
-#define __ASM_ARCH_TIMEX_H
-
-/*
- * Alert: Not all timers of the DaVinci family run at a frequency of 27MHz,
- * but we should be fine as long as CLOCK_TICK_RATE or LATCH (see include/
- * linux/jiffies.h) are not used directly in code. Currently none of the
- * code relevant to DaVinci platform depends on these values directly.
- */
-#define CLOCK_TICK_RATE 27000000
-
-#endif /* __ASM_ARCH_TIMEX_H__ */
diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c
index 7a55b5c95971..56c6eb5266ad 100644
--- a/arch/arm/mach-davinci/time.c
+++ b/arch/arm/mach-davinci/time.c
@@ -181,7 +181,7 @@ static struct timer_s timers[] = {
.name = "clockevent",
.opts = TIMER_OPTS_DISABLED,
.irqaction = {
- .flags = IRQF_DISABLED | IRQF_TIMER,
+ .flags = IRQF_TIMER,
.handler = timer_interrupt,
}
},
@@ -190,7 +190,7 @@ static struct timer_s timers[] = {
.period = ~0,
.opts = TIMER_OPTS_PERIODIC,
.irqaction = {
- .flags = IRQF_DISABLED | IRQF_TIMER,
+ .flags = IRQF_TIMER,
.handler = freerun_interrupt,
}
},
@@ -331,7 +331,6 @@ static void davinci_set_mode(enum clock_event_mode mode,
static struct clock_event_device clockevent_davinci = {
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .shift = 32,
.set_next_event = davinci_set_next_event,
.set_mode = davinci_set_mode,
};
@@ -397,14 +396,10 @@ void __init davinci_timer_init(void)
/* setup clockevent */
clockevent_davinci.name = id_to_name[timers[TID_CLOCKEVENT].id];
- clockevent_davinci.mult = div_sc(davinci_clock_tick_rate, NSEC_PER_SEC,
- clockevent_davinci.shift);
- clockevent_davinci.max_delta_ns =
- clockevent_delta2ns(0xfffffffe, &clockevent_davinci);
- clockevent_davinci.min_delta_ns = 50000; /* 50 usec */
clockevent_davinci.cpumask = cpumask_of(0);
- clockevents_register_device(&clockevent_davinci);
+ clockevents_config_and_register(&clockevent_davinci,
+ davinci_clock_tick_rate, 1, 0xfffffffe);
for (i=0; i< ARRAY_SIZE(timers); i++)
timer32_config(&timers[i]);
diff --git a/arch/arm/mach-dove/board-dt.c b/arch/arm/mach-dove/board-dt.c
index 49f72a848423..49fa9abd09da 100644
--- a/arch/arm/mach-dove/board-dt.c
+++ b/arch/arm/mach-dove/board-dt.c
@@ -10,54 +10,15 @@
#include <linux/init.h>
#include <linux/clk-provider.h>
-#include <linux/clocksource.h>
-#include <linux/irqchip.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/platform_data/usb-ehci-orion.h>
#include <asm/hardware/cache-tauros2.h>
#include <asm/mach/arch.h>
#include <mach/dove.h>
#include <mach/pm.h>
#include <plat/common.h>
-#include <plat/irq.h>
#include "common.h"
-/*
- * There are still devices that doesn't even know about DT,
- * get clock gates here and add a clock lookup.
- */
-static void __init dove_legacy_clk_init(void)
-{
- struct device_node *np = of_find_compatible_node(NULL, NULL,
- "marvell,dove-gating-clock");
- struct of_phandle_args clkspec;
-
- clkspec.np = np;
- clkspec.args_count = 1;
-
- clkspec.args[0] = CLOCK_GATING_BIT_PCIE0;
- orion_clkdev_add("0", "pcie",
- of_clk_get_from_provider(&clkspec));
-
- clkspec.args[0] = CLOCK_GATING_BIT_PCIE1;
- orion_clkdev_add("1", "pcie",
- of_clk_get_from_provider(&clkspec));
-}
-
-static void __init dove_dt_time_init(void)
-{
- of_clk_init(NULL);
- clocksource_of_init();
-}
-
-static void __init dove_dt_init_early(void)
-{
- mvebu_mbus_init("marvell,dove-mbus",
- BRIDGE_WINS_BASE, BRIDGE_WINS_SZ,
- DOVE_MC_WINS_BASE, DOVE_MC_WINS_SZ);
-}
-
static void __init dove_dt_init(void)
{
pr_info("Dove 88AP510 SoC\n");
@@ -65,14 +26,7 @@ static void __init dove_dt_init(void)
#ifdef CONFIG_CACHE_TAUROS2
tauros2_init(0);
#endif
- dove_setup_cpu_wins();
-
- /* Setup clocks for legacy devices */
- dove_legacy_clk_init();
-
- /* Internal devices not ported to DT yet */
- dove_pcie_init(1, 1);
-
+ BUG_ON(mvebu_mbus_dt_init());
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
@@ -83,8 +37,6 @@ static const char * const dove_dt_board_compat[] = {
DT_MACHINE_START(DOVE_DT, "Marvell Dove (Flattened Device Tree)")
.map_io = dove_map_io,
- .init_early = dove_dt_init_early,
- .init_time = dove_dt_time_init,
.init_machine = dove_dt_init,
.restart = dove_restart,
.dt_compat = dove_dt_board_compat,
diff --git a/arch/arm/mach-dove/include/mach/timex.h b/arch/arm/mach-dove/include/mach/timex.h
deleted file mode 100644
index 251d538541db..000000000000
--- a/arch/arm/mach-dove/include/mach/timex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * arch/arm/mach-dove/include/mach/timex.h
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#define CLOCK_TICK_RATE (100 * HZ)
diff --git a/arch/arm/mach-ebsa110/include/mach/timex.h b/arch/arm/mach-ebsa110/include/mach/timex.h
deleted file mode 100644
index 4fb43b22a102..000000000000
--- a/arch/arm/mach-ebsa110/include/mach/timex.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/mach-ebsa110/include/mach/timex.h
- *
- * Copyright (C) 1997, 1998 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * EBSA110 architecture timex specifications
- */
-
-/*
- * On the EBSA, the clock ticks at weird rates.
- * This is therefore not used to calculate the
- * divisor.
- */
-#define CLOCK_TICK_RATE 47894000
-
diff --git a/arch/arm/mach-efm32/Makefile b/arch/arm/mach-efm32/Makefile
new file mode 100644
index 000000000000..3a74af7413e8
--- /dev/null
+++ b/arch/arm/mach-efm32/Makefile
@@ -0,0 +1 @@
+obj-y += dtmachine.o
diff --git a/arch/arm/mach-efm32/dtmachine.c b/arch/arm/mach-efm32/dtmachine.c
new file mode 100644
index 000000000000..2367495193c1
--- /dev/null
+++ b/arch/arm/mach-efm32/dtmachine.c
@@ -0,0 +1,15 @@
+#include <linux/kernel.h>
+
+#include <asm/v7m.h>
+
+#include <asm/mach/arch.h>
+
+static const char *const efm32gg_compat[] __initconst = {
+ "efm32,dk3750",
+ NULL
+};
+
+DT_MACHINE_START(EFM32DT, "EFM32 (Device Tree Support)")
+ .dt_compat = efm32gg_compat,
+ .restart = armv7m_restart,
+MACHINE_END
diff --git a/arch/arm/mach-ep93xx/Kconfig b/arch/arm/mach-ep93xx/Kconfig
index 93e54fd4e3d5..bec570ae6494 100644
--- a/arch/arm/mach-ep93xx/Kconfig
+++ b/arch/arm/mach-ep93xx/Kconfig
@@ -5,6 +5,7 @@ menu "Cirrus EP93xx Implementation Options"
config EP93XX_SOC_COMMON
bool
default y
+ select SOC_BUS
select LEDS_GPIO_REGISTER
config CRUNCH
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index d95ee28a616a..157ba88433c9 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
+#include <linux/sys_soc.h>
#include <linux/timex.h>
#include <linux/irq.h>
#include <linux/io.h>
@@ -44,6 +45,7 @@
#include <linux/platform_data/spi-ep93xx.h>
#include <mach/gpio-ep93xx.h>
+#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
@@ -137,7 +139,7 @@ static irqreturn_t ep93xx_timer_interrupt(int irq, void *dev_id)
static struct irqaction ep93xx_timer_irq = {
.name = "ep93xx timer",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = ep93xx_timer_interrupt,
};
@@ -925,8 +927,108 @@ void ep93xx_ide_release_gpio(struct platform_device *pdev)
}
EXPORT_SYMBOL(ep93xx_ide_release_gpio);
-void __init ep93xx_init_devices(void)
+/*************************************************************************
+ * EP93xx Security peripheral
+ *************************************************************************/
+
+/*
+ * The Maverick Key is 256 bits of micro fuses blown at the factory during
+ * manufacturing to uniquely identify a part.
+ *
+ * See: http://arm.cirrus.com/forum/viewtopic.php?t=486&highlight=maverick+key
+ */
+#define EP93XX_SECURITY_REG(x) (EP93XX_SECURITY_BASE + (x))
+#define EP93XX_SECURITY_SECFLG EP93XX_SECURITY_REG(0x2400)
+#define EP93XX_SECURITY_FUSEFLG EP93XX_SECURITY_REG(0x2410)
+#define EP93XX_SECURITY_UNIQID EP93XX_SECURITY_REG(0x2440)
+#define EP93XX_SECURITY_UNIQCHK EP93XX_SECURITY_REG(0x2450)
+#define EP93XX_SECURITY_UNIQVAL EP93XX_SECURITY_REG(0x2460)
+#define EP93XX_SECURITY_SECID1 EP93XX_SECURITY_REG(0x2500)
+#define EP93XX_SECURITY_SECID2 EP93XX_SECURITY_REG(0x2504)
+#define EP93XX_SECURITY_SECCHK1 EP93XX_SECURITY_REG(0x2520)
+#define EP93XX_SECURITY_SECCHK2 EP93XX_SECURITY_REG(0x2524)
+#define EP93XX_SECURITY_UNIQID2 EP93XX_SECURITY_REG(0x2700)
+#define EP93XX_SECURITY_UNIQID3 EP93XX_SECURITY_REG(0x2704)
+#define EP93XX_SECURITY_UNIQID4 EP93XX_SECURITY_REG(0x2708)
+#define EP93XX_SECURITY_UNIQID5 EP93XX_SECURITY_REG(0x270c)
+
+static char ep93xx_soc_id[33];
+
+static const char __init *ep93xx_get_soc_id(void)
{
+ unsigned int id, id2, id3, id4, id5;
+
+ if (__raw_readl(EP93XX_SECURITY_UNIQVAL) != 1)
+ return "bad Hamming code";
+
+ id = __raw_readl(EP93XX_SECURITY_UNIQID);
+ id2 = __raw_readl(EP93XX_SECURITY_UNIQID2);
+ id3 = __raw_readl(EP93XX_SECURITY_UNIQID3);
+ id4 = __raw_readl(EP93XX_SECURITY_UNIQID4);
+ id5 = __raw_readl(EP93XX_SECURITY_UNIQID5);
+
+ if (id != id2)
+ return "invalid";
+
+ snprintf(ep93xx_soc_id, sizeof(ep93xx_soc_id),
+ "%08x%08x%08x%08x", id2, id3, id4, id5);
+
+ return ep93xx_soc_id;
+}
+
+static const char __init *ep93xx_get_soc_rev(void)
+{
+ int rev = ep93xx_chip_revision();
+
+ switch (rev) {
+ case EP93XX_CHIP_REV_D0:
+ return "D0";
+ case EP93XX_CHIP_REV_D1:
+ return "D1";
+ case EP93XX_CHIP_REV_E0:
+ return "E0";
+ case EP93XX_CHIP_REV_E1:
+ return "E1";
+ case EP93XX_CHIP_REV_E2:
+ return "E2";
+ default:
+ return "unknown";
+ }
+}
+
+static const char __init *ep93xx_get_machine_name(void)
+{
+ return kasprintf(GFP_KERNEL,"%s", machine_desc->name);
+}
+
+static struct device __init *ep93xx_init_soc(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct soc_device *soc_dev;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return NULL;
+
+ soc_dev_attr->machine = ep93xx_get_machine_name();
+ soc_dev_attr->family = "Cirrus Logic EP93xx";
+ soc_dev_attr->revision = ep93xx_get_soc_rev();
+ soc_dev_attr->soc_id = ep93xx_get_soc_id();
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr->machine);
+ kfree(soc_dev_attr);
+ return NULL;
+ }
+
+ return soc_device_to_device(soc_dev);
+}
+
+struct device __init *ep93xx_init_devices(void)
+{
+ struct device *parent;
+
/* Disallow access to MaverickCrunch initially */
ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_CPENA);
@@ -937,6 +1039,8 @@ void __init ep93xx_init_devices(void)
EP93XX_SYSCON_DEVCFG_GONIDE |
EP93XX_SYSCON_DEVCFG_HONIDE);
+ parent = ep93xx_init_soc();
+
/* Get the GPIO working early, other devices need it */
platform_device_register(&ep93xx_gpio_device);
@@ -949,6 +1053,8 @@ void __init ep93xx_init_devices(void)
platform_device_register(&ep93xx_wdt_device);
gpio_led_register_device(-1, &ep93xx_led_data);
+
+ return parent;
}
void ep93xx_restart(enum reboot_mode mode, const char *cmd)
diff --git a/arch/arm/mach-ep93xx/include/mach/platform.h b/arch/arm/mach-ep93xx/include/mach/platform.h
index e256e0baec2e..4c0bbd97f741 100644
--- a/arch/arm/mach-ep93xx/include/mach/platform.h
+++ b/arch/arm/mach-ep93xx/include/mach/platform.h
@@ -6,6 +6,7 @@
#include <linux/reboot.h>
+struct device;
struct i2c_gpio_platform_data;
struct i2c_board_info;
struct spi_board_info;
@@ -54,7 +55,7 @@ void ep93xx_register_ide(void);
int ep93xx_ide_acquire_gpio(struct platform_device *pdev);
void ep93xx_ide_release_gpio(struct platform_device *pdev);
-void ep93xx_init_devices(void);
+struct device *ep93xx_init_devices(void);
extern void ep93xx_timer_init(void);
void ep93xx_restart(enum reboot_mode, const char *);
diff --git a/arch/arm/mach-ep93xx/include/mach/timex.h b/arch/arm/mach-ep93xx/include/mach/timex.h
deleted file mode 100644
index 6b3503b01fa6..000000000000
--- a/arch/arm/mach-ep93xx/include/mach/timex.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-ep93xx/include/mach/timex.h
- */
-
-#define CLOCK_TICK_RATE 983040
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 56fe819ee10b..f9d67a0acb2a 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -14,19 +14,28 @@ menu "SAMSUNG EXYNOS SoCs Support"
config ARCH_EXYNOS4
bool "SAMSUNG EXYNOS4"
default y
+ select ARM_AMBA
+ select CLKSRC_OF
+ select CLKSRC_SAMSUNG_PWM if CPU_EXYNOS4210
+ select CPU_EXYNOS4210
select GIC_NON_BANKED
+ select KEYBOARD_SAMSUNG if INPUT_KEYBOARD
select HAVE_ARM_SCU if SMP
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
select PINCTRL
+ select S5P_DEV_MFC
help
Samsung EXYNOS4 SoCs based systems
config ARCH_EXYNOS5
bool "SAMSUNG EXYNOS5"
+ select ARM_AMBA
+ select CLKSRC_OF
select HAVE_ARM_SCU if SMP
select HAVE_SMP
select PINCTRL
+ select USB_ARCH_HAS_XHCI
help
Samsung EXYNOS5 (Cortex-A15) SoC based systems
@@ -110,35 +119,6 @@ config SOC_EXYNOS5440
help
Enable EXYNOS5440 SoC support
-comment "Flattened Device Tree based board for EXYNOS SoCs"
-
-config MACH_EXYNOS4_DT
- bool "Samsung Exynos4 Machine using device tree"
- default y
- depends on ARCH_EXYNOS4
- select ARM_AMBA
- select CLKSRC_OF
- select CLKSRC_SAMSUNG_PWM if CPU_EXYNOS4210
- select CPU_EXYNOS4210
- select KEYBOARD_SAMSUNG if INPUT_KEYBOARD
- select S5P_DEV_MFC
- help
- Machine support for Samsung Exynos4 machine with device tree enabled.
- Select this if a fdt blob is available for the Exynos4 SoC based board.
- Note: This is under development and not all peripherals can be supported
- with this machine file.
-
-config MACH_EXYNOS5_DT
- bool "SAMSUNG EXYNOS5 Machine using device tree"
- default y
- depends on ARCH_EXYNOS5
- select ARM_AMBA
- select CLKSRC_OF
- select USB_ARCH_HAS_XHCI
- help
- Machine support for Samsung EXYNOS5 machine with device tree enabled.
- Select this if a fdt blob is available for the EXYNOS5 SoC based board.
-
endmenu
endif
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
index 53696154aead..8930b66b4abd 100644
--- a/arch/arm/mach-exynos/Makefile
+++ b/arch/arm/mach-exynos/Makefile
@@ -32,5 +32,5 @@ AFLAGS_exynos-smc.o :=-Wa,-march=armv7-a$(plus_sec)
# machine support
-obj-$(CONFIG_MACH_EXYNOS4_DT) += mach-exynos4-dt.o
-obj-$(CONFIG_MACH_EXYNOS5_DT) += mach-exynos5-dt.o
+obj-$(CONFIG_ARCH_EXYNOS4) += mach-exynos4-dt.o
+obj-$(CONFIG_ARCH_EXYNOS5) += mach-exynos5-dt.o
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
index ba95e5db2501..61d2906ccefb 100644
--- a/arch/arm/mach-exynos/common.c
+++ b/arch/arm/mach-exynos/common.c
@@ -26,10 +26,9 @@
#include <linux/export.h>
#include <linux/irqdomain.h>
#include <linux/of_address.h>
-#include <linux/clocksource.h>
-#include <linux/clk-provider.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/platform_device.h>
#include <asm/proc-fns.h>
#include <asm/exception.h>
@@ -294,6 +293,16 @@ void exynos5_restart(enum reboot_mode mode, const char *cmd)
__raw_writel(val, addr);
}
+static struct platform_device exynos_cpuidle = {
+ .name = "exynos_cpuidle",
+ .id = -1,
+};
+
+void __init exynos_cpuidle_init(void)
+{
+ platform_device_register(&exynos_cpuidle);
+}
+
void __init exynos_init_late(void)
{
if (of_machine_is_compatible("samsung,exynos5440"))
@@ -367,12 +376,6 @@ static void __init exynos5_map_io(void)
iotable_init(exynos5250_iodesc, ARRAY_SIZE(exynos5250_iodesc));
}
-void __init exynos_init_time(void)
-{
- of_clk_init(NULL);
- clocksource_of_init();
-}
-
struct bus_type exynos_subsys = {
.name = "exynos-core",
.dev_name = "exynos-core",
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index 8646a141ae46..ff9b6a9419b0 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -16,12 +16,12 @@
#include <linux/of.h>
void mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1);
-void exynos_init_time(void);
struct map_desc;
void exynos_init_io(void);
void exynos4_restart(enum reboot_mode mode, const char *cmd);
void exynos5_restart(enum reboot_mode mode, const char *cmd);
+void exynos_cpuidle_init(void);
void exynos_init_late(void);
void exynos_firmware_init(void);
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index ac139226d63c..ddbfe8709fe7 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/export.h>
#include <linux/time.h>
+#include <linux/platform_device.h>
#include <asm/proc-fns.h>
#include <asm/smp_scu.h>
@@ -192,7 +193,7 @@ static void __init exynos5_core_down_clk(void)
__raw_writel(tmp, EXYNOS5_PWR_CTRL2);
}
-static int __init exynos4_init_cpuidle(void)
+static int exynos_cpuidle_probe(struct platform_device *pdev)
{
int cpu_id, ret;
struct cpuidle_device *device;
@@ -205,7 +206,7 @@ static int __init exynos4_init_cpuidle(void)
ret = cpuidle_register_driver(&exynos4_idle_driver);
if (ret) {
- printk(KERN_ERR "CPUidle failed to register driver\n");
+ dev_err(&pdev->dev, "failed to register cpuidle driver\n");
return ret;
}
@@ -219,11 +220,20 @@ static int __init exynos4_init_cpuidle(void)
ret = cpuidle_register_device(device);
if (ret) {
- printk(KERN_ERR "CPUidle register device failed\n");
+ dev_err(&pdev->dev, "failed to register cpuidle device\n");
return ret;
}
}
return 0;
}
-device_initcall(exynos4_init_cpuidle);
+
+static struct platform_driver exynos_cpuidle_driver = {
+ .probe = exynos_cpuidle_probe,
+ .driver = {
+ .name = "exynos_cpuidle",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(exynos_cpuidle_driver);
diff --git a/arch/arm/mach-exynos/include/mach/timex.h b/arch/arm/mach-exynos/include/mach/timex.h
deleted file mode 100644
index 6d138750a708..000000000000
--- a/arch/arm/mach-exynos/include/mach/timex.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* linux/arch/arm/mach-exynos4/include/mach/timex.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Copyright (c) 2003-2010 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * Based on arch/arm/mach-s5p6442/include/mach/timex.h
- *
- * EXYNOS4 - time parameters
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_TIMEX_H
-#define __ASM_ARCH_TIMEX_H __FILE__
-
-/* CLOCK_TICK_RATE needs to be evaluatable by the cpp, so making it
- * a variable is useless. It seems as long as we make our timers an
- * exact multiple of HZ, any value that makes a 1->1 correspondence
- * for the time conversion functions to/from jiffies is acceptable.
-*/
-
-#define CLOCK_TICK_RATE 12000000
-
-#endif /* __ASM_ARCH_TIMEX_H */
diff --git a/arch/arm/mach-exynos/mach-exynos4-dt.c b/arch/arm/mach-exynos/mach-exynos4-dt.c
index 0099c6c13bba..4603e6bd424b 100644
--- a/arch/arm/mach-exynos/mach-exynos4-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos4-dt.c
@@ -11,12 +11,8 @@
* published by the Free Software Foundation.
*/
-#include <linux/kernel.h>
#include <linux/of_platform.h>
#include <linux/of_fdt.h>
-#include <linux/serial_core.h>
-#include <linux/memblock.h>
-#include <linux/clocksource.h>
#include <asm/mach/arch.h>
#include <plat/mfc.h>
@@ -25,6 +21,8 @@
static void __init exynos4_dt_machine_init(void)
{
+ exynos_cpuidle_init();
+
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
@@ -54,7 +52,6 @@ DT_MACHINE_START(EXYNOS4210_DT, "Samsung Exynos4 (Flattened Device Tree)")
.init_early = exynos_firmware_init,
.init_machine = exynos4_dt_machine_init,
.init_late = exynos_init_late,
- .init_time = exynos_init_time,
.dt_compat = exynos4_dt_compat,
.restart = exynos4_restart,
.reserve = exynos4_reserve,
diff --git a/arch/arm/mach-exynos/mach-exynos5-dt.c b/arch/arm/mach-exynos/mach-exynos5-dt.c
index f874b773ca13..1fe075a70c1e 100644
--- a/arch/arm/mach-exynos/mach-exynos5-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos5-dt.c
@@ -11,14 +11,10 @@
#include <linux/of_platform.h>
#include <linux/of_fdt.h>
-#include <linux/memblock.h>
#include <linux/io.h>
-#include <linux/clocksource.h>
#include <asm/mach/arch.h>
#include <mach/regs-pmu.h>
-
-#include <plat/cpu.h>
#include <plat/mfc.h>
#include "common.h"
@@ -47,6 +43,8 @@ static void __init exynos5_dt_machine_init(void)
}
}
+ exynos_cpuidle_init();
+
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
@@ -76,7 +74,6 @@ DT_MACHINE_START(EXYNOS5_DT, "SAMSUNG EXYNOS5 (Flattened Device Tree)")
.map_io = exynos_init_io,
.init_machine = exynos5_dt_machine_init,
.init_late = exynos_init_late,
- .init_time = exynos_init_time,
.dt_compat = exynos5_dt_compat,
.restart = exynos5_restart,
.reserve = exynos5_reserve,
diff --git a/arch/arm/mach-footbridge/include/mach/timex.h b/arch/arm/mach-footbridge/include/mach/timex.h
deleted file mode 100644
index d0fea9d6d4ab..000000000000
--- a/arch/arm/mach-footbridge/include/mach/timex.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * arch/arm/mach-footbridge/include/mach/timex.h
- *
- * Copyright (C) 1998 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * EBSA285 architecture timex specifications
- */
-
-/*
- * We assume a constant here; this satisfies the maths in linux/timex.h
- * and linux/time.h. CLOCK_TICK_RATE is actually system dependent, but
- * this must be a constant.
- */
-#define CLOCK_TICK_RATE (50000000/16)
diff --git a/arch/arm/mach-gemini/include/mach/timex.h b/arch/arm/mach-gemini/include/mach/timex.h
deleted file mode 100644
index dc5690ba975c..000000000000
--- a/arch/arm/mach-gemini/include/mach/timex.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * Gemini timex specifications
- *
- * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-/* When AHB bus frequency is 150MHz */
-#define CLOCK_TICK_RATE 38000000
diff --git a/arch/arm/mach-gemini/time.c b/arch/arm/mach-gemini/time.c
index 21dc5a89d1c4..0a63c4d25b64 100644
--- a/arch/arm/mach-gemini/time.c
+++ b/arch/arm/mach-gemini/time.c
@@ -13,6 +13,8 @@
#include <mach/hardware.h>
#include <mach/global_reg.h>
#include <asm/mach/time.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
/*
* Register definitions for the timers
@@ -33,19 +35,89 @@
#define TIMER_3_CR_CLOCK (1 << 7)
#define TIMER_3_CR_INT (1 << 8)
+static unsigned int tick_rate;
+
+static int gemini_timer_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ u32 cr;
+
+ cr = readl(TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE)));
+
+ /* This may be overdoing it, feel free to test without this */
+ cr &= ~TIMER_2_CR_ENABLE;
+ cr &= ~TIMER_2_CR_INT;
+ writel(cr, TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE)));
+
+ /* Set next event */
+ writel(cycles, TIMER_COUNT(IO_ADDRESS(GEMINI_TIMER2_BASE)));
+ writel(cycles, TIMER_LOAD(IO_ADDRESS(GEMINI_TIMER2_BASE)));
+ cr |= TIMER_2_CR_ENABLE;
+ cr |= TIMER_2_CR_INT;
+ writel(cr, TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE)));
+
+ return 0;
+}
+
+static void gemini_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ u32 period = DIV_ROUND_CLOSEST(tick_rate, HZ);
+ u32 cr;
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ /* Start the timer */
+ writel(period,
+ TIMER_COUNT(IO_ADDRESS(GEMINI_TIMER2_BASE)));
+ writel(period,
+ TIMER_LOAD(IO_ADDRESS(GEMINI_TIMER2_BASE)));
+ cr = readl(TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE)));
+ cr |= TIMER_2_CR_ENABLE;
+ cr |= TIMER_2_CR_INT;
+ writel(cr, TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE)));
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_RESUME:
+ /*
+ * Disable also for oneshot: the set_next() call will
+ * arm the timer instead.
+ */
+ cr = readl(TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE)));
+ cr &= ~TIMER_2_CR_ENABLE;
+ cr &= ~TIMER_2_CR_INT;
+ writel(cr, TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE)));
+ break;
+ default:
+ break;
+ }
+}
+
+/* Use TIMER2 as clock event */
+static struct clock_event_device gemini_clockevent = {
+ .name = "TIMER2",
+ .rating = 300, /* Reasonably fast and accurate clock event */
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_next_event = gemini_timer_set_next_event,
+ .set_mode = gemini_timer_set_mode,
+};
+
/*
* IRQ handler for the timer
*/
static irqreturn_t gemini_timer_interrupt(int irq, void *dev_id)
{
- timer_tick();
+ struct clock_event_device *evt = &gemini_clockevent;
+ evt->event_handler(evt);
return IRQ_HANDLED;
}
static struct irqaction gemini_timer_irq = {
.name = "Gemini Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER,
+ .flags = IRQF_TIMER,
.handler = gemini_timer_interrupt,
};
@@ -54,9 +126,9 @@ static struct irqaction gemini_timer_irq = {
*/
void __init gemini_timer_init(void)
{
- unsigned int tick_rate, reg_v;
+ u32 reg_v;
- reg_v = __raw_readl(IO_ADDRESS(GEMINI_GLOBAL_BASE + GLOBAL_STATUS));
+ reg_v = readl(IO_ADDRESS(GEMINI_GLOBAL_BASE + GLOBAL_STATUS));
tick_rate = REG_TO_AHB_SPEED(reg_v) * 1000000;
printk(KERN_INFO "Bus: %dMHz", tick_rate / 1000000);
@@ -82,8 +154,17 @@ void __init gemini_timer_init(void)
* Make irqs happen for the system timer
*/
setup_irq(IRQ_TIMER2, &gemini_timer_irq);
- /* Start the timer */
- __raw_writel(tick_rate / HZ, TIMER_COUNT(IO_ADDRESS(GEMINI_TIMER2_BASE)));
- __raw_writel(tick_rate / HZ, TIMER_LOAD(IO_ADDRESS(GEMINI_TIMER2_BASE)));
- __raw_writel(TIMER_2_CR_ENABLE | TIMER_2_CR_INT, TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE)));
+
+ /* Enable and use TIMER1 as clock source */
+ writel(0xffffffff, TIMER_COUNT(IO_ADDRESS(GEMINI_TIMER1_BASE)));
+ writel(0xffffffff, TIMER_LOAD(IO_ADDRESS(GEMINI_TIMER1_BASE)));
+ writel(TIMER_1_CR_ENABLE, TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE)));
+ if (clocksource_mmio_init(TIMER_COUNT(IO_ADDRESS(GEMINI_TIMER1_BASE)),
+ "TIMER1", tick_rate, 300, 32,
+ clocksource_mmio_readl_up))
+ pr_err("timer: failed to initialize gemini clock source\n");
+
+ /* Configure and register the clockevent */
+ clockevents_config_and_register(&gemini_clockevent, tick_rate,
+ 1, 0xffffffff);
}
diff --git a/arch/arm/mach-highbank/Kconfig b/arch/arm/mach-highbank/Kconfig
index 8e8437dea3ce..08332d841440 100644
--- a/arch/arm/mach-highbank/Kconfig
+++ b/arch/arm/mach-highbank/Kconfig
@@ -4,15 +4,16 @@ config ARCH_HIGHBANK
select ARCH_HAS_CPUFREQ
select ARCH_HAS_HOLES_MEMORYMODEL
select ARCH_HAS_OPP
+ select ARCH_SUPPORTS_BIG_ENDIAN
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARM_AMBA
select ARM_ERRATA_764369
select ARM_ERRATA_775420
- select ARM_ERRATA_798181
+ select ARM_ERRATA_798181 if SMP
select ARM_GIC
+ select ARM_PSCI
select ARM_TIMER_SP804
select CACHE_L2X0
- select CLKDEV_LOOKUP
select COMMON_CLK
select CPU_V7
select GENERIC_CLOCKEVENTS
diff --git a/arch/arm/mach-highbank/Makefile b/arch/arm/mach-highbank/Makefile
index 8a1ef576d79f..55840f414d3e 100644
--- a/arch/arm/mach-highbank/Makefile
+++ b/arch/arm/mach-highbank/Makefile
@@ -3,6 +3,4 @@ obj-y := highbank.o system.o smc.o
plus_sec := $(call as-instr,.arch_extension sec,+sec)
AFLAGS_smc.o :=-Wa,-march=armv7-a$(plus_sec)
-obj-$(CONFIG_SMP) += platsmp.o
-obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
obj-$(CONFIG_PM_SLEEP) += pm.o
diff --git a/arch/arm/mach-highbank/core.h b/arch/arm/mach-highbank/core.h
index aea1ec5ab6f8..7ec5edcd1336 100644
--- a/arch/arm/mach-highbank/core.h
+++ b/arch/arm/mach-highbank/core.h
@@ -3,7 +3,6 @@
#include <linux/reboot.h>
-extern void highbank_set_cpu_jump(int cpu, void *jump_addr);
extern void highbank_restart(enum reboot_mode, const char *);
extern void __iomem *scu_base_addr;
@@ -14,8 +13,5 @@ static inline void highbank_pm_init(void) {}
#endif
extern void highbank_smc1(int fn, int arg);
-extern void highbank_cpu_die(unsigned int cpu);
-
-extern struct smp_operations highbank_smp_ops;
#endif
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 8e63ccdb0de3..b3d7e5634b83 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -24,11 +24,9 @@
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/amba/bus.h>
-#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
-#include <asm/cacheflush.h>
-#include <asm/cputype.h>
-#include <asm/smp_plat.h>
+#include <asm/psci.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -49,17 +47,6 @@ static void __init highbank_scu_map_io(void)
scu_base_addr = ioremap(base, SZ_4K);
}
-#define HB_JUMP_TABLE_PHYS(cpu) (0x40 + (0x10 * (cpu)))
-#define HB_JUMP_TABLE_VIRT(cpu) phys_to_virt(HB_JUMP_TABLE_PHYS(cpu))
-
-void highbank_set_cpu_jump(int cpu, void *jump_addr)
-{
- cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);
- writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu));
- __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16);
- outer_clean_range(HB_JUMP_TABLE_PHYS(cpu),
- HB_JUMP_TABLE_PHYS(cpu) + 15);
-}
static void highbank_l2x0_disable(void)
{
@@ -83,20 +70,6 @@ static void __init highbank_init_irq(void)
}
}
-static void __init highbank_timer_init(void)
-{
- struct device_node *np;
-
- /* Map system registers */
- np = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs");
- sregs_base = of_iomap(np, 0);
- WARN_ON(!sregs_base);
-
- of_clk_init(NULL);
-
- clocksource_of_init();
-}
-
static void highbank_power_off(void)
{
highbank_set_pwr_shutdown();
@@ -153,8 +126,19 @@ static struct notifier_block highbank_platform_nb = {
.notifier_call = highbank_platform_notifier,
};
+static struct platform_device highbank_cpuidle_device = {
+ .name = "cpuidle-calxeda",
+};
+
static void __init highbank_init(void)
{
+ struct device_node *np;
+
+ /* Map system registers */
+ np = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs");
+ sregs_base = of_iomap(np, 0);
+ WARN_ON(!sregs_base);
+
pm_power_off = highbank_power_off;
highbank_pm_init();
@@ -162,6 +146,9 @@ static void __init highbank_init(void)
bus_register_notifier(&amba_bustype, &highbank_amba_nb);
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+
+ if (psci_ops.cpu_suspend)
+ platform_device_register(&highbank_cpuidle_device);
}
static const char *highbank_match[] __initconst = {
@@ -174,9 +161,7 @@ DT_MACHINE_START(HIGHBANK, "Highbank")
#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE)
.dma_zone_size = (4ULL * SZ_1G),
#endif
- .smp = smp_ops(highbank_smp_ops),
.init_irq = highbank_init_irq,
- .init_time = highbank_timer_init,
.init_machine = highbank_init,
.dt_compat = highbank_match,
.restart = highbank_restart,
diff --git a/arch/arm/mach-highbank/platsmp.c b/arch/arm/mach-highbank/platsmp.c
deleted file mode 100644
index 32d75cf55cbc..000000000000
--- a/arch/arm/mach-highbank/platsmp.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright 2010-2011 Calxeda, Inc.
- * Based on platsmp.c, Copyright (C) 2002 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/io.h>
-
-#include <asm/smp_scu.h>
-
-#include "core.h"
-
-extern void secondary_startup(void);
-
-static int highbank_boot_secondary(unsigned int cpu, struct task_struct *idle)
-{
- highbank_set_cpu_jump(cpu, secondary_startup);
- arch_send_wakeup_ipi_mask(cpumask_of(cpu));
- return 0;
-}
-
-/*
- * Initialise the CPU possible map early - this describes the CPUs
- * which may be present or become present in the system.
- */
-static void __init highbank_smp_init_cpus(void)
-{
- unsigned int i, ncores = 4;
-
- /* sanity check */
- if (ncores > NR_CPUS) {
- printk(KERN_WARNING
- "highbank: no. of cores (%d) greater than configured "
- "maximum of %d - clipping\n",
- ncores, NR_CPUS);
- ncores = NR_CPUS;
- }
-
- for (i = 0; i < ncores; i++)
- set_cpu_possible(i, true);
-}
-
-static void __init highbank_smp_prepare_cpus(unsigned int max_cpus)
-{
- if (scu_base_addr)
- scu_enable(scu_base_addr);
-}
-
-struct smp_operations highbank_smp_ops __initdata = {
- .smp_init_cpus = highbank_smp_init_cpus,
- .smp_prepare_cpus = highbank_smp_prepare_cpus,
- .smp_boot_secondary = highbank_boot_secondary,
-#ifdef CONFIG_HOTPLUG_CPU
- .cpu_die = highbank_cpu_die,
-#endif
-};
diff --git a/arch/arm/mach-highbank/pm.c b/arch/arm/mach-highbank/pm.c
index 04eddb4f4380..7f2bd85eb935 100644
--- a/arch/arm/mach-highbank/pm.c
+++ b/arch/arm/mach-highbank/pm.c
@@ -16,27 +16,19 @@
#include <linux/cpu_pm.h>
#include <linux/init.h>
-#include <linux/io.h>
#include <linux/suspend.h>
-#include <asm/cacheflush.h>
-#include <asm/proc-fns.h>
#include <asm/suspend.h>
-
-#include "core.h"
-#include "sysregs.h"
+#include <asm/psci.h>
static int highbank_suspend_finish(unsigned long val)
{
- outer_flush_all();
- outer_disable();
-
- highbank_set_pwr_suspend();
-
- cpu_do_idle();
+ const struct psci_power_state ps = {
+ .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
+ .affinity_level = 1,
+ };
- highbank_clear_pwr_request();
- return 0;
+ return psci_ops.cpu_suspend(ps, __pa(cpu_resume));
}
static int highbank_pm_enter(suspend_state_t state)
@@ -44,15 +36,11 @@ static int highbank_pm_enter(suspend_state_t state)
cpu_pm_enter();
cpu_cluster_pm_enter();
- highbank_set_cpu_jump(0, cpu_resume);
cpu_suspend(0, highbank_suspend_finish);
cpu_cluster_pm_exit();
cpu_pm_exit();
- highbank_smc1(0x102, 0x1);
- if (scu_base_addr)
- scu_enable(scu_base_addr);
return 0;
}
@@ -63,5 +51,8 @@ static const struct platform_suspend_ops highbank_pm_ops = {
void __init highbank_pm_init(void)
{
+ if (!psci_ops.cpu_suspend)
+ return;
+
suspend_set_ops(&highbank_pm_ops);
}
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 29a8af6922a8..15c9ae368b58 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -4,13 +4,14 @@ config ARCH_MXC
select ARM_CPU_SUSPEND if PM
select ARM_PATCH_PHYS_VIRT
select AUTO_ZRELADDR if !ZBOOT_ROM
- select CLKDEV_LOOKUP
select CLKSRC_MMIO
+ select COMMON_CLK
select GENERIC_ALLOCATOR
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_CHIP
select MIGHT_HAVE_CACHE_L2X0 if ARCH_MULTI_V6_V7
select MULTI_IRQ_HANDLER
+ select SOC_BUS
select SPARSE_IRQ
select USE_OF
help
@@ -24,7 +25,7 @@ config MXC_IRQ_PRIOR
help
Select this if you want to use prioritized IRQ handling.
This feature prevents higher priority ISR to be interrupted
- by lower priority IRQ even IRQF_DISABLED flag is not set.
+ by lower priority IRQ.
This may be useful in embedded applications, where are strong
requirements for timing.
Say N here, unless you have a specialized requirement.
@@ -92,14 +93,12 @@ config MACH_MX27
config SOC_IMX1
bool
select ARCH_MX1
- select COMMON_CLK
select CPU_ARM920T
select IMX_HAVE_IOMUX_V1
select MXC_AVIC
config SOC_IMX21
bool
- select COMMON_CLK
select CPU_ARM926T
select IMX_HAVE_IOMUX_V1
select MXC_AVIC
@@ -108,7 +107,6 @@ config SOC_IMX25
bool
select ARCH_MX25
select ARCH_MXC_IOMUX_V3
- select COMMON_CLK
select CPU_ARM926T
select MXC_AVIC
@@ -116,7 +114,6 @@ config SOC_IMX27
bool
select ARCH_HAS_CPUFREQ
select ARCH_HAS_OPP
- select COMMON_CLK
select CPU_ARM926T
select IMX_HAVE_IOMUX_V1
select MACH_MX27
@@ -124,7 +121,6 @@ config SOC_IMX27
config SOC_IMX31
bool
- select COMMON_CLK
select CPU_V6
select IMX_HAVE_PLATFORM_MXC_RNGA
select MXC_AVIC
@@ -133,7 +129,6 @@ config SOC_IMX31
config SOC_IMX35
bool
select ARCH_MXC_IOMUX_V3
- select COMMON_CLK
select CPU_V6K
select HAVE_EPIT
select MXC_AVIC
@@ -144,7 +139,6 @@ config SOC_IMX5
select ARCH_HAS_CPUFREQ
select ARCH_HAS_OPP
select ARCH_MXC_IOMUX_V3
- select COMMON_CLK
select CPU_V7
select MXC_TZIC
@@ -772,6 +766,14 @@ endchoice
comment "Device tree only"
+config SOC_IMX50
+ bool "i.MX50 support"
+ select HAVE_IMX_SRC
+ select SOC_IMX5
+
+ help
+ This enables support for Freescale i.MX50 processor.
+
config SOC_IMX53
bool "i.MX53 support"
select HAVE_IMX_SRC
@@ -791,7 +793,6 @@ config SOC_IMX6Q
select ARM_ERRATA_764369 if SMP
select ARM_ERRATA_775420
select ARM_GIC
- select COMMON_CLK
select CPU_V7
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
@@ -801,6 +802,8 @@ config SOC_IMX6Q
select HAVE_IMX_SRC
select HAVE_SMP
select MFD_SYSCON
+ select MIGHT_HAVE_PCI
+ select PCI_DOMAINS if PCI
select PINCTRL
select PINCTRL_IMX6Q
select PL310_ERRATA_588369 if CACHE_PL310
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 5383c589ad71..1789e2b31903 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -102,6 +102,8 @@ obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o mach-imx6sl.o
ifeq ($(CONFIG_PM),y)
obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o
+# i.MX6SL reuses i.MX6Q code
+obj-$(CONFIG_SOC_IMX6SL) += pm-imx6q.o headsmp.o
endif
# i.MX5 based machines
diff --git a/arch/arm/mach-imx/anatop.c b/arch/arm/mach-imx/anatop.c
index ad3b755abb78..4a40bbb46183 100644
--- a/arch/arm/mach-imx/anatop.c
+++ b/arch/arm/mach-imx/anatop.c
@@ -16,6 +16,7 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include "common.h"
+#include "hardware.h"
#define REG_SET 0x4
#define REG_CLR 0x8
@@ -26,6 +27,7 @@
#define ANADIG_USB1_CHRG_DETECT 0x1b0
#define ANADIG_USB2_CHRG_DETECT 0x210
#define ANADIG_DIGPROG 0x260
+#define ANADIG_DIGPROG_IMX6SL 0x280
#define BM_ANADIG_REG_2P5_ENABLE_WEAK_LINREG 0x40000
#define BM_ANADIG_REG_CORE_FET_ODRIVE 0x20000000
@@ -76,21 +78,38 @@ static void imx_anatop_usb_chrg_detect_disable(void)
BM_ANADIG_USB_CHRG_DETECT_CHK_CHRG_B);
}
-u32 imx_anatop_get_digprog(void)
+void __init imx_init_revision_from_anatop(void)
{
struct device_node *np;
void __iomem *anatop_base;
- static u32 digprog;
-
- if (digprog)
- return digprog;
+ unsigned int revision;
+ u32 digprog;
+ u16 offset = ANADIG_DIGPROG;
np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
anatop_base = of_iomap(np, 0);
WARN_ON(!anatop_base);
- digprog = readl_relaxed(anatop_base + ANADIG_DIGPROG);
+ if (of_device_is_compatible(np, "fsl,imx6sl-anatop"))
+ offset = ANADIG_DIGPROG_IMX6SL;
+ digprog = readl_relaxed(anatop_base + offset);
+ iounmap(anatop_base);
+
+ switch (digprog & 0xff) {
+ case 0:
+ revision = IMX_CHIP_REVISION_1_0;
+ break;
+ case 1:
+ revision = IMX_CHIP_REVISION_1_1;
+ break;
+ case 2:
+ revision = IMX_CHIP_REVISION_1_2;
+ break;
+ default:
+ revision = IMX_CHIP_REVISION_UNKNOWN;
+ }
- return digprog;
+ mxc_set_cpu_type(digprog >> 16 & 0xff);
+ imx_set_soc_revision(revision);
}
void __init imx_anatop_init(void)
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c
index 7c0dc4540aa4..219c65e3f6cc 100644
--- a/arch/arm/mach-imx/clk-imx51-imx53.c
+++ b/arch/arm/mach-imx/clk-imx51-imx53.c
@@ -11,8 +11,12 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include "crm-regs-imx5.h"
#include "clk.h"
@@ -131,8 +135,6 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
{
int i;
- of_clk_init(NULL);
-
clk[dummy] = imx_clk_fixed("dummy", 0);
clk[ckil] = imx_obtain_fixed_clock("ckil", rate_ckil);
clk[osc] = imx_obtain_fixed_clock("osc", rate_osc);
@@ -363,6 +365,64 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
clk_prepare_enable(clk[tmax3]); /* esdhc1, esdhc4 */
}
+static void __init mx50_clocks_init(struct device_node *np)
+{
+ void __iomem *base;
+ unsigned long r;
+ int i, irq;
+
+ clk[pll1_sw] = imx_clk_pllv2("pll1_sw", "osc", MX53_DPLL1_BASE);
+ clk[pll2_sw] = imx_clk_pllv2("pll2_sw", "osc", MX53_DPLL2_BASE);
+ clk[pll3_sw] = imx_clk_pllv2("pll3_sw", "osc", MX53_DPLL3_BASE);
+
+ clk[esdhc1_per_gate] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
+ clk[esdhc2_per_gate] = imx_clk_gate2("esdhc2_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 6);
+ clk[esdhc3_per_gate] = imx_clk_gate2("esdhc3_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 10);
+ clk[esdhc4_per_gate] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
+ clk[usb_phy1_gate] = imx_clk_gate2("usb_phy1_gate", "usb_phy_sel", MXC_CCM_CCGR4, 10);
+ clk[usb_phy2_gate] = imx_clk_gate2("usb_phy2_gate", "usb_phy_sel", MXC_CCM_CCGR4, 12);
+ clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "per_root", MXC_CCM_CCGR1, 22);
+
+ clk[cko1_sel] = imx_clk_mux("cko1_sel", MXC_CCM_CCOSR, 0, 4,
+ mx53_cko1_sel, ARRAY_SIZE(mx53_cko1_sel));
+ clk[cko1_podf] = imx_clk_divider("cko1_podf", "cko1_sel", MXC_CCM_CCOSR, 4, 3);
+ clk[cko1] = imx_clk_gate2("cko1", "cko1_podf", MXC_CCM_CCOSR, 7);
+
+ clk[cko2_sel] = imx_clk_mux("cko2_sel", MXC_CCM_CCOSR, 16, 5,
+ mx53_cko2_sel, ARRAY_SIZE(mx53_cko2_sel));
+ clk[cko2_podf] = imx_clk_divider("cko2_podf", "cko2_sel", MXC_CCM_CCOSR, 21, 3);
+ clk[cko2] = imx_clk_gate2("cko2", "cko2_podf", MXC_CCM_CCOSR, 24);
+
+ for (i = 0; i < ARRAY_SIZE(clk); i++)
+ if (IS_ERR(clk[i]))
+ pr_err("i.MX50 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
+ clk_data.clks = clk;
+ clk_data.clk_num = ARRAY_SIZE(clk);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ mx5_clocks_common_init(0, 0, 0, 0);
+
+ /* set SDHC root clock to 200MHZ*/
+ clk_set_rate(clk[esdhc_a_podf], 200000000);
+ clk_set_rate(clk[esdhc_b_podf], 200000000);
+
+ clk_prepare_enable(clk[iim_gate]);
+ imx_print_silicon_rev("i.MX50", IMX_CHIP_REVISION_1_1);
+ clk_disable_unprepare(clk[iim_gate]);
+
+ r = clk_round_rate(clk[usboh3_per_gate], 54000000);
+ clk_set_rate(clk[usboh3_per_gate], r);
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx50-gpt");
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+ irq = irq_of_parse_and_map(np, 0);
+ mxc_timer_init(base, irq);
+}
+CLK_OF_DECLARE(imx50_ccm, "fsl,imx50-ccm", mx50_clocks_init);
+
int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
unsigned long rate_ckih1, unsigned long rate_ckih2)
{
@@ -465,12 +525,17 @@ int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
return 0;
}
-int __init mx53_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
- unsigned long rate_ckih1, unsigned long rate_ckih2)
+static void __init mx51_clocks_init_dt(struct device_node *np)
{
- int i;
+ mx51_clocks_init(0, 0, 0, 0);
+}
+CLK_OF_DECLARE(imx51_ccm, "fsl,imx51-ccm", mx51_clocks_init_dt);
+
+static void __init mx53_clocks_init(struct device_node *np)
+{
+ int i, irq;
unsigned long r;
- struct device_node *np;
+ void __iomem *base;
clk[pll1_sw] = imx_clk_pllv2("pll1_sw", "osc", MX53_DPLL1_BASE);
clk[pll2_sw] = imx_clk_pllv2("pll2_sw", "osc", MX53_DPLL2_BASE);
@@ -529,12 +594,11 @@ int __init mx53_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
pr_err("i.MX53 clk %d: register failed with %ld\n",
i, PTR_ERR(clk[i]));
- np = of_find_compatible_node(NULL, NULL, "fsl,imx53-ccm");
clk_data.clks = clk;
clk_data.clk_num = ARRAY_SIZE(clk);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
- mx5_clocks_common_init(rate_ckil, rate_osc, rate_ckih1, rate_ckih2);
+ mx5_clocks_common_init(0, 0, 0, 0);
clk_register_clkdev(clk[vpu_gate], NULL, "imx53-vpu.0");
clk_register_clkdev(clk[i2c3_gate], NULL, "imx21-i2c.2");
@@ -557,9 +621,6 @@ int __init mx53_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
clk_set_rate(clk[esdhc_a_podf], 200000000);
clk_set_rate(clk[esdhc_b_podf], 200000000);
- /* System timer */
- mxc_timer_init(MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR), MX53_INT_GPT);
-
clk_prepare_enable(clk[iim_gate]);
imx_print_silicon_rev("i.MX53", mx53_revision());
clk_disable_unprepare(clk[iim_gate]);
@@ -567,15 +628,10 @@ int __init mx53_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
r = clk_round_rate(clk[usboh3_per_gate], 54000000);
clk_set_rate(clk[usboh3_per_gate], r);
- return 0;
-}
-
-int __init mx51_clocks_init_dt(void)
-{
- return mx51_clocks_init(0, 0, 0, 0);
-}
-
-int __init mx53_clocks_init_dt(void)
-{
- return mx53_clocks_init(0, 0, 0, 0);
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx53-gpt");
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+ irq = irq_of_parse_and_map(np, 0);
+ mxc_timer_init(base, irq);
}
+CLK_OF_DECLARE(imx53_ccm, "fsl,imx53-ccm", mx53_clocks_init);
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 9181a241d3a8..edd522e8c4af 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -14,7 +14,6 @@
#include <linux/types.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
-#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -25,155 +24,6 @@
#include "common.h"
#include "hardware.h"
-#define CCR 0x0
-#define BM_CCR_WB_COUNT (0x7 << 16)
-#define BM_CCR_RBC_BYPASS_COUNT (0x3f << 21)
-#define BM_CCR_RBC_EN (0x1 << 27)
-
-#define CCGR0 0x68
-#define CCGR1 0x6c
-#define CCGR2 0x70
-#define CCGR3 0x74
-#define CCGR4 0x78
-#define CCGR5 0x7c
-#define CCGR6 0x80
-#define CCGR7 0x84
-
-#define CLPCR 0x54
-#define BP_CLPCR_LPM 0
-#define BM_CLPCR_LPM (0x3 << 0)
-#define BM_CLPCR_BYPASS_PMIC_READY (0x1 << 2)
-#define BM_CLPCR_ARM_CLK_DIS_ON_LPM (0x1 << 5)
-#define BM_CLPCR_SBYOS (0x1 << 6)
-#define BM_CLPCR_DIS_REF_OSC (0x1 << 7)
-#define BM_CLPCR_VSTBY (0x1 << 8)
-#define BP_CLPCR_STBY_COUNT 9
-#define BM_CLPCR_STBY_COUNT (0x3 << 9)
-#define BM_CLPCR_COSC_PWRDOWN (0x1 << 11)
-#define BM_CLPCR_WB_PER_AT_LPM (0x1 << 16)
-#define BM_CLPCR_WB_CORE_AT_LPM (0x1 << 17)
-#define BM_CLPCR_BYP_MMDC_CH0_LPM_HS (0x1 << 19)
-#define BM_CLPCR_BYP_MMDC_CH1_LPM_HS (0x1 << 21)
-#define BM_CLPCR_MASK_CORE0_WFI (0x1 << 22)
-#define BM_CLPCR_MASK_CORE1_WFI (0x1 << 23)
-#define BM_CLPCR_MASK_CORE2_WFI (0x1 << 24)
-#define BM_CLPCR_MASK_CORE3_WFI (0x1 << 25)
-#define BM_CLPCR_MASK_SCU_IDLE (0x1 << 26)
-#define BM_CLPCR_MASK_L2CC_IDLE (0x1 << 27)
-
-#define CGPR 0x64
-#define BM_CGPR_CHICKEN_BIT (0x1 << 17)
-
-static void __iomem *ccm_base;
-
-void imx6q_set_chicken_bit(void)
-{
- u32 val = readl_relaxed(ccm_base + CGPR);
-
- val |= BM_CGPR_CHICKEN_BIT;
- writel_relaxed(val, ccm_base + CGPR);
-}
-
-static void imx6q_enable_rbc(bool enable)
-{
- u32 val;
- static bool last_rbc_mode;
-
- if (last_rbc_mode == enable)
- return;
- /*
- * need to mask all interrupts in GPC before
- * operating RBC configurations
- */
- imx_gpc_mask_all();
-
- /* configure RBC enable bit */
- val = readl_relaxed(ccm_base + CCR);
- val &= ~BM_CCR_RBC_EN;
- val |= enable ? BM_CCR_RBC_EN : 0;
- writel_relaxed(val, ccm_base + CCR);
-
- /* configure RBC count */
- val = readl_relaxed(ccm_base + CCR);
- val &= ~BM_CCR_RBC_BYPASS_COUNT;
- val |= enable ? BM_CCR_RBC_BYPASS_COUNT : 0;
- writel(val, ccm_base + CCR);
-
- /*
- * need to delay at least 2 cycles of CKIL(32K)
- * due to hardware design requirement, which is
- * ~61us, here we use 65us for safe
- */
- udelay(65);
-
- /* restore GPC interrupt mask settings */
- imx_gpc_restore_all();
-
- last_rbc_mode = enable;
-}
-
-static void imx6q_enable_wb(bool enable)
-{
- u32 val;
- static bool last_wb_mode;
-
- if (last_wb_mode == enable)
- return;
-
- /* configure well bias enable bit */
- val = readl_relaxed(ccm_base + CLPCR);
- val &= ~BM_CLPCR_WB_PER_AT_LPM;
- val |= enable ? BM_CLPCR_WB_PER_AT_LPM : 0;
- writel_relaxed(val, ccm_base + CLPCR);
-
- /* configure well bias count */
- val = readl_relaxed(ccm_base + CCR);
- val &= ~BM_CCR_WB_COUNT;
- val |= enable ? BM_CCR_WB_COUNT : 0;
- writel_relaxed(val, ccm_base + CCR);
-
- last_wb_mode = enable;
-}
-
-int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
-{
- u32 val = readl_relaxed(ccm_base + CLPCR);
-
- val &= ~BM_CLPCR_LPM;
- switch (mode) {
- case WAIT_CLOCKED:
- imx6q_enable_wb(false);
- imx6q_enable_rbc(false);
- break;
- case WAIT_UNCLOCKED:
- val |= 0x1 << BP_CLPCR_LPM;
- val |= BM_CLPCR_ARM_CLK_DIS_ON_LPM;
- break;
- case STOP_POWER_ON:
- val |= 0x2 << BP_CLPCR_LPM;
- break;
- case WAIT_UNCLOCKED_POWER_OFF:
- val |= 0x1 << BP_CLPCR_LPM;
- val &= ~BM_CLPCR_VSTBY;
- val &= ~BM_CLPCR_SBYOS;
- break;
- case STOP_POWER_OFF:
- val |= 0x2 << BP_CLPCR_LPM;
- val |= 0x3 << BP_CLPCR_STBY_COUNT;
- val |= BM_CLPCR_VSTBY;
- val |= BM_CLPCR_SBYOS;
- imx6q_enable_wb(true);
- imx6q_enable_rbc(true);
- break;
- default:
- return -EINVAL;
- }
-
- writel_relaxed(val, ccm_base + CLPCR);
-
- return 0;
-}
-
static const char *step_sels[] = { "osc", "pll2_pfd2_396m", };
static const char *pll1_sw_sels[] = { "pll1_sys", "step", };
static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", };
@@ -182,7 +32,7 @@ static const char *periph2_clk2_sels[] = { "pll3_usb_otg", "pll2_bus", };
static const char *periph_sels[] = { "periph_pre", "periph_clk2", };
static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", };
static const char *axi_sels[] = { "periph", "pll2_pfd2_396m", "periph", "pll3_pfd1_540m", };
-static const char *audio_sels[] = { "pll4_post_div", "pll3_pfd2_508m", "pll3_pfd3_454m", "pll3_usb_otg", };
+static const char *audio_sels[] = { "pll4_audio_div", "pll3_pfd2_508m", "pll3_pfd3_454m", "pll3_usb_otg", };
static const char *gpu_axi_sels[] = { "axi", "ahb", };
static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", };
static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
@@ -196,7 +46,7 @@ static const char *ipu2_di0_sels[] = { "ipu2_di0_pre", "dummy", "dummy", "ldb_di
static const char *ipu2_di1_sels[] = { "ipu2_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
static const char *hsi_tx_sels[] = { "pll3_120m", "pll2_pfd2_396m", };
static const char *pcie_axi_sels[] = { "axi", "ahb", };
-static const char *ssi_sels[] = { "pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_post_div", };
+static const char *ssi_sels[] = { "pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_audio_div", };
static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", };
static const char *emi_sels[] = { "pll2_pfd2_396m", "pll3_usb_otg", "axi", "pll2_pfd0_352m", };
@@ -205,7 +55,7 @@ static const char *vdo_axi_sels[] = { "axi", "ahb", };
static const char *vpu_axi_sels[] = { "axi", "pll2_pfd2_396m", "pll2_pfd0_352m", };
static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video_div",
"dummy", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0",
- "ipu2_di1", "ahb", "ipg", "ipg_per", "ckil", "pll4_post_div", };
+ "ipu2_di1", "ahb", "ipg", "ipg_per", "ckil", "pll4_audio_div", };
static const char *cko2_sels[] = {
"mmdc_ch0_axi", "mmdc_ch1_axi", "usdhc4", "usdhc1",
"gpu2d_axi", "dummy", "ecspi_root", "gpu3d_axi",
@@ -217,6 +67,11 @@ static const char *cko2_sels[] = {
"uart_serial", "spdif", "asrc", "hsi_tx",
};
static const char *cko_sels[] = { "cko1", "cko2", };
+static const char *lvds_sels[] = {
+ "dummy", "dummy", "dummy", "dummy", "dummy", "dummy",
+ "pll4_audio", "pll5_video", "pll8_mlb", "enet_ref",
+ "pcie_ref", "sata_ref",
+};
enum mx6q_clks {
dummy, ckil, ckih, osc, pll2_pfd0_352m, pll2_pfd1_594m, pll2_pfd2_396m,
@@ -251,7 +106,8 @@ enum mx6q_clks {
ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2, ldb_di0_div_3_5, ldb_di1_div_3_5,
sata_ref, sata_ref_100m, pcie_ref, pcie_ref_125m, enet_ref, usbphy1_gate,
usbphy2_gate, pll4_post_div, pll5_post_div, pll5_video_div, eim_slow,
- spdif, cko2_sel, cko2_podf, cko2, cko, vdoa, clk_max
+ spdif, cko2_sel, cko2_podf, cko2, cko, vdoa, pll4_audio_div,
+ lvds1_sel, lvds2_sel, lvds1_gate, lvds2_gate, clk_max
};
static struct clk *clk[clk_max];
@@ -300,7 +156,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
WARN_ON(!base);
/* Audio/video PLL post dividers do not work on i.MX6q revision 1.0 */
- if (cpu_is_imx6q() && imx6q_revision() == IMX_CHIP_REVISION_1_0) {
+ if (cpu_is_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_1_0) {
post_div_table[1].div = 1;
post_div_table[2].div = 1;
video_div_table[1].div = 1;
@@ -342,6 +198,18 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
base + 0xe0, 0, 2, 0, clk_enet_ref_table,
&imx_ccm_lock);
+ clk[lvds1_sel] = imx_clk_mux("lvds1_sel", base + 0x160, 0, 5, lvds_sels, ARRAY_SIZE(lvds_sels));
+ clk[lvds2_sel] = imx_clk_mux("lvds2_sel", base + 0x160, 5, 5, lvds_sels, ARRAY_SIZE(lvds_sels));
+
+ /*
+ * lvds1_gate and lvds2_gate are pseudo-gates. Both can be
+ * independently configured as clock inputs or outputs. We treat
+ * the "output_enable" bit as a gate, even though it's really just
+ * enabling clock output.
+ */
+ clk[lvds1_gate] = imx_clk_gate("lvds1_gate", "dummy", base + 0x160, 10);
+ clk[lvds2_gate] = imx_clk_gate("lvds2_gate", "dummy", base + 0x160, 11);
+
/* name parent_name reg idx */
clk[pll2_pfd0_352m] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0);
clk[pll2_pfd1_594m] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1);
@@ -359,13 +227,15 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[twd] = imx_clk_fixed_factor("twd", "arm", 1, 2);
clk[pll4_post_div] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock);
+ clk[pll4_audio_div] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock);
clk[pll5_post_div] = clk_register_divider_table(NULL, "pll5_post_div", "pll5_video", CLK_SET_RATE_PARENT, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock);
clk[pll5_video_div] = clk_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", CLK_SET_RATE_PARENT, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock);
np = ccm_node;
base = of_iomap(np, 0);
WARN_ON(!base);
- ccm_base = base;
+
+ imx6q_pm_set_ccm_base(base);
/* name reg shift width parent_names num_parents */
clk[step] = imx_clk_mux("step", base + 0xc, 8, 1, step_sels, ARRAY_SIZE(step_sels));
@@ -428,7 +298,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[asrc_podf] = imx_clk_divider("asrc_podf", "asrc_pred", base + 0x30, 9, 3);
clk[spdif_pred] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3);
clk[spdif_podf] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3);
- clk[can_root] = imx_clk_divider("can_root", "pll3_usb_otg", base + 0x20, 2, 6);
+ clk[can_root] = imx_clk_divider("can_root", "pll3_60m", base + 0x20, 2, 6);
clk[ecspi_root] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6);
clk[gpu2d_core_podf] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 23, 3);
clk[gpu3d_core_podf] = imx_clk_divider("gpu3d_core_podf", "gpu3d_core_sel", base + 0x18, 26, 3);
@@ -573,7 +443,8 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk_register_clkdev(clk[pll4_post_div], "pll4_post_div", NULL);
clk_register_clkdev(clk[pll4_audio], "pll4_audio", NULL);
- if ((imx6q_revision() != IMX_CHIP_REVISION_1_0) || cpu_is_imx6dl()) {
+ if ((imx_get_soc_revision() != IMX_CHIP_REVISION_1_0) ||
+ cpu_is_imx6dl()) {
clk_set_parent(clk[ldb_di0_sel], clk[pll5_video_div]);
clk_set_parent(clk[ldb_di1_sel], clk[pll5_video_div]);
}
@@ -603,8 +474,9 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
if (ret)
pr_warn("failed to set up CLKO: %d\n", ret);
- /* Set initial power mode */
- imx6q_set_lpm(WAIT_CLOCKED);
+ /* All existing boards with PCIe use LVDS1 */
+ if (IS_ENABLED(CONFIG_PCI_IMX6))
+ clk_set_parent(clk[lvds1_sel], clk[sata_ref]);
np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
base = of_iomap(np, 0);
diff --git a/arch/arm/mach-imx/clk-imx6sl.c b/arch/arm/mach-imx/clk-imx6sl.c
index a5c3c5d21aee..c0c4ef55e35b 100644
--- a/arch/arm/mach-imx/clk-imx6sl.c
+++ b/arch/arm/mach-imx/clk-imx6sl.c
@@ -127,6 +127,9 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
base = of_iomap(np, 0);
WARN_ON(!base);
+ /* Reuse imx6q pm code */
+ imx6q_pm_set_ccm_base(base);
+
/* name reg shift width parent_names num_parents */
clks[IMX6SL_CLK_STEP] = imx_clk_mux("step", base + 0xc, 8, 1, step_sels, ARRAY_SIZE(step_sels));
clks[IMX6SL_CLK_PLL1_SW] = imx_clk_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels));
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index 4517fd760bfc..24a7899e36a8 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -13,74 +13,73 @@
#include <linux/reboot.h>
+struct irq_data;
struct platform_device;
struct pt_regs;
struct clk;
enum mxc_cpu_pwr_mode;
-extern void mx1_map_io(void);
-extern void mx21_map_io(void);
-extern void mx25_map_io(void);
-extern void mx27_map_io(void);
-extern void mx31_map_io(void);
-extern void mx35_map_io(void);
-extern void mx51_map_io(void);
-extern void mx53_map_io(void);
-extern void imx1_init_early(void);
-extern void imx21_init_early(void);
-extern void imx25_init_early(void);
-extern void imx27_init_early(void);
-extern void imx31_init_early(void);
-extern void imx35_init_early(void);
-extern void imx51_init_early(void);
-extern void imx53_init_early(void);
-extern void mxc_init_irq(void __iomem *);
-extern void tzic_init_irq(void __iomem *);
-extern void mx1_init_irq(void);
-extern void mx21_init_irq(void);
-extern void mx25_init_irq(void);
-extern void mx27_init_irq(void);
-extern void mx31_init_irq(void);
-extern void mx35_init_irq(void);
-extern void mx51_init_irq(void);
-extern void mx53_init_irq(void);
-extern void imx1_soc_init(void);
-extern void imx21_soc_init(void);
-extern void imx25_soc_init(void);
-extern void imx27_soc_init(void);
-extern void imx31_soc_init(void);
-extern void imx35_soc_init(void);
-extern void imx51_soc_init(void);
-extern void imx51_init_late(void);
-extern void imx53_init_late(void);
-extern void epit_timer_init(void __iomem *base, int irq);
-extern void mxc_timer_init(void __iomem *, int);
-extern int mx1_clocks_init(unsigned long fref);
-extern int mx21_clocks_init(unsigned long lref, unsigned long fref);
-extern int mx25_clocks_init(void);
-extern int mx27_clocks_init(unsigned long fref);
-extern int mx31_clocks_init(unsigned long fref);
-extern int mx35_clocks_init(void);
-extern int mx51_clocks_init(unsigned long ckil, unsigned long osc,
+void mx1_map_io(void);
+void mx21_map_io(void);
+void mx25_map_io(void);
+void mx27_map_io(void);
+void mx31_map_io(void);
+void mx35_map_io(void);
+void mx51_map_io(void);
+void mx53_map_io(void);
+void imx1_init_early(void);
+void imx21_init_early(void);
+void imx25_init_early(void);
+void imx27_init_early(void);
+void imx31_init_early(void);
+void imx35_init_early(void);
+void imx51_init_early(void);
+void imx53_init_early(void);
+void mxc_init_irq(void __iomem *);
+void tzic_init_irq(void __iomem *);
+void mx1_init_irq(void);
+void mx21_init_irq(void);
+void mx25_init_irq(void);
+void mx27_init_irq(void);
+void mx31_init_irq(void);
+void mx35_init_irq(void);
+void mx51_init_irq(void);
+void mx53_init_irq(void);
+void imx1_soc_init(void);
+void imx21_soc_init(void);
+void imx25_soc_init(void);
+void imx27_soc_init(void);
+void imx31_soc_init(void);
+void imx35_soc_init(void);
+void imx51_soc_init(void);
+void imx51_init_late(void);
+void imx53_init_late(void);
+void epit_timer_init(void __iomem *base, int irq);
+void mxc_timer_init(void __iomem *, int);
+int mx1_clocks_init(unsigned long fref);
+int mx21_clocks_init(unsigned long lref, unsigned long fref);
+int mx25_clocks_init(void);
+int mx27_clocks_init(unsigned long fref);
+int mx31_clocks_init(unsigned long fref);
+int mx35_clocks_init(void);
+int mx51_clocks_init(unsigned long ckil, unsigned long osc,
unsigned long ckih1, unsigned long ckih2);
-extern int mx53_clocks_init(unsigned long ckil, unsigned long osc,
- unsigned long ckih1, unsigned long ckih2);
-extern int mx25_clocks_init_dt(void);
-extern int mx27_clocks_init_dt(void);
-extern int mx31_clocks_init_dt(void);
-extern int mx51_clocks_init_dt(void);
-extern int mx53_clocks_init_dt(void);
-extern struct platform_device *mxc_register_gpio(char *name, int id,
+int mx25_clocks_init_dt(void);
+int mx27_clocks_init_dt(void);
+int mx31_clocks_init_dt(void);
+struct platform_device *mxc_register_gpio(char *name, int id,
resource_size_t iobase, resource_size_t iosize, int irq, int irq_high);
-extern void mxc_set_cpu_type(unsigned int type);
-extern void mxc_restart(enum reboot_mode, const char *);
-extern void mxc_arch_reset_init(void __iomem *);
-extern void mxc_arch_reset_init_dt(void);
-extern int mx53_revision(void);
-extern int imx6q_revision(void);
-extern int mx53_display_revision(void);
-extern void imx_set_aips(void __iomem *);
-extern int mxc_device_init(void);
+void mxc_set_cpu_type(unsigned int type);
+void mxc_restart(enum reboot_mode, const char *);
+void mxc_arch_reset_init(void __iomem *);
+void mxc_arch_reset_init_dt(void);
+int mx53_revision(void);
+void imx_set_aips(void __iomem *);
+int mxc_device_init(void);
+void imx_set_soc_revision(unsigned int rev);
+unsigned int imx_get_soc_revision(void);
+void imx_init_revision_from_anatop(void);
+struct device *imx_soc_device_init(void);
enum mxc_cpu_pwr_mode {
WAIT_CLOCKED, /* wfi only */
@@ -97,8 +96,8 @@ enum mx3_cpu_pwr_mode {
MX3_SLEEP,
};
-extern void mx3_cpu_lp_set(enum mx3_cpu_pwr_mode mode);
-extern void imx_print_silicon_rev(const char *cpu, int srev);
+void mx3_cpu_lp_set(enum mx3_cpu_pwr_mode mode);
+void imx_print_silicon_rev(const char *cpu, int srev);
void avic_handle_irq(struct pt_regs *);
void tzic_handle_irq(struct pt_regs *);
@@ -112,54 +111,56 @@ void tzic_handle_irq(struct pt_regs *);
#define imx51_handle_irq tzic_handle_irq
#define imx53_handle_irq tzic_handle_irq
-extern void imx_enable_cpu(int cpu, bool enable);
-extern void imx_set_cpu_jump(int cpu, void *jump_addr);
-extern u32 imx_get_cpu_arg(int cpu);
-extern void imx_set_cpu_arg(int cpu, u32 arg);
-extern void v7_cpu_resume(void);
+void imx_enable_cpu(int cpu, bool enable);
+void imx_set_cpu_jump(int cpu, void *jump_addr);
+u32 imx_get_cpu_arg(int cpu);
+void imx_set_cpu_arg(int cpu, u32 arg);
+void v7_cpu_resume(void);
#ifdef CONFIG_SMP
-extern void v7_secondary_startup(void);
-extern void imx_scu_map_io(void);
-extern void imx_smp_prepare(void);
-extern void imx_scu_standby_enable(void);
+void v7_secondary_startup(void);
+void imx_scu_map_io(void);
+void imx_smp_prepare(void);
+void imx_scu_standby_enable(void);
#else
static inline void imx_scu_map_io(void) {}
static inline void imx_smp_prepare(void) {}
static inline void imx_scu_standby_enable(void) {}
#endif
-extern void imx_src_init(void);
-extern void imx_src_prepare_restart(void);
-extern void imx_gpc_init(void);
-extern void imx_gpc_pre_suspend(void);
-extern void imx_gpc_post_resume(void);
-extern void imx_gpc_mask_all(void);
-extern void imx_gpc_restore_all(void);
-extern void imx_anatop_init(void);
-extern void imx_anatop_pre_suspend(void);
-extern void imx_anatop_post_resume(void);
-extern u32 imx_anatop_get_digprog(void);
-extern int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode);
-extern void imx6q_set_chicken_bit(void);
-
-extern void imx_cpu_die(unsigned int cpu);
-extern int imx_cpu_kill(unsigned int cpu);
+void imx_src_init(void);
+void imx_gpc_init(void);
+void imx_gpc_pre_suspend(void);
+void imx_gpc_post_resume(void);
+void imx_gpc_mask_all(void);
+void imx_gpc_restore_all(void);
+void imx_gpc_irq_mask(struct irq_data *d);
+void imx_gpc_irq_unmask(struct irq_data *d);
+void imx_anatop_init(void);
+void imx_anatop_pre_suspend(void);
+void imx_anatop_post_resume(void);
+int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode);
+void imx6q_set_chicken_bit(void);
+
+void imx_cpu_die(unsigned int cpu);
+int imx_cpu_kill(unsigned int cpu);
#ifdef CONFIG_PM
-extern void imx6q_pm_init(void);
-extern void imx5_pm_init(void);
+void imx6q_pm_init(void);
+void imx6q_pm_set_ccm_base(void __iomem *base);
+void imx5_pm_init(void);
#else
static inline void imx6q_pm_init(void) {}
+static inline void imx6q_pm_set_ccm_base(void __iomem *base) {}
static inline void imx5_pm_init(void) {}
#endif
#ifdef CONFIG_NEON
-extern int mx51_neon_fixup(void);
+int mx51_neon_fixup(void);
#else
static inline int mx51_neon_fixup(void) { return 0; }
#endif
#ifdef CONFIG_CACHE_L2X0
-extern void imx_init_l2cache(void);
+void imx_init_l2cache(void);
#else
static inline void imx_init_l2cache(void) {}
#endif
diff --git a/arch/arm/mach-imx/cpu.c b/arch/arm/mach-imx/cpu.c
index e70e3acbf9bd..ba3b498a67ec 100644
--- a/arch/arm/mach-imx/cpu.c
+++ b/arch/arm/mach-imx/cpu.c
@@ -1,6 +1,9 @@
-
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
#include "hardware.h"
#include "common.h"
@@ -8,11 +11,23 @@
unsigned int __mxc_cpu_type;
EXPORT_SYMBOL(__mxc_cpu_type);
+static unsigned int imx_soc_revision;
+
void mxc_set_cpu_type(unsigned int type)
{
__mxc_cpu_type = type;
}
+void imx_set_soc_revision(unsigned int rev)
+{
+ imx_soc_revision = rev;
+}
+
+unsigned int imx_get_soc_revision(void)
+{
+ return imx_soc_revision;
+}
+
void imx_print_silicon_rev(const char *cpu, int srev)
{
if (srev == IMX_CHIP_REVISION_UNKNOWN)
@@ -44,3 +59,81 @@ void __init imx_set_aips(void __iomem *base)
reg = __raw_readl(base + 0x50) & 0x00FFFFFF;
__raw_writel(reg, base + 0x50);
}
+
+struct device * __init imx_soc_device_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct soc_device *soc_dev;
+ struct device_node *root;
+ const char *soc_id;
+ int ret;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return NULL;
+
+ soc_dev_attr->family = "Freescale i.MX";
+
+ root = of_find_node_by_path("/");
+ ret = of_property_read_string(root, "model", &soc_dev_attr->machine);
+ of_node_put(root);
+ if (ret)
+ goto free_soc;
+
+ switch (__mxc_cpu_type) {
+ case MXC_CPU_MX1:
+ soc_id = "i.MX1";
+ break;
+ case MXC_CPU_MX21:
+ soc_id = "i.MX21";
+ break;
+ case MXC_CPU_MX25:
+ soc_id = "i.MX25";
+ break;
+ case MXC_CPU_MX27:
+ soc_id = "i.MX27";
+ break;
+ case MXC_CPU_MX31:
+ soc_id = "i.MX31";
+ break;
+ case MXC_CPU_MX35:
+ soc_id = "i.MX35";
+ break;
+ case MXC_CPU_MX51:
+ soc_id = "i.MX51";
+ break;
+ case MXC_CPU_MX53:
+ soc_id = "i.MX53";
+ break;
+ case MXC_CPU_IMX6SL:
+ soc_id = "i.MX6SL";
+ break;
+ case MXC_CPU_IMX6DL:
+ soc_id = "i.MX6DL";
+ break;
+ case MXC_CPU_IMX6Q:
+ soc_id = "i.MX6Q";
+ break;
+ default:
+ soc_id = "Unknown";
+ }
+ soc_dev_attr->soc_id = soc_id;
+
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%d.%d",
+ (imx_soc_revision >> 4) & 0xf,
+ imx_soc_revision & 0xf);
+ if (!soc_dev_attr->revision)
+ goto free_soc;
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev))
+ goto free_rev;
+
+ return soc_device_to_device(soc_dev);
+
+free_rev:
+ kfree(soc_dev_attr->revision);
+free_soc:
+ kfree(soc_dev_attr);
+ return NULL;
+}
diff --git a/arch/arm/mach-imx/epit.c b/arch/arm/mach-imx/epit.c
index e02de188ae83..074b1a81ba76 100644
--- a/arch/arm/mach-imx/epit.c
+++ b/arch/arm/mach-imx/epit.c
@@ -171,7 +171,7 @@ static irqreturn_t epit_timer_interrupt(int irq, void *dev_id)
static struct irqaction epit_timer_irq = {
.name = "i.MX EPIT Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = epit_timer_interrupt,
};
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index 44a65e9ff1fc..586e0171a652 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -90,7 +90,7 @@ void imx_gpc_restore_all(void)
writel_relaxed(gpc_saved_imrs[i], reg_imr1 + i * 4);
}
-static void imx_gpc_irq_unmask(struct irq_data *d)
+void imx_gpc_irq_unmask(struct irq_data *d)
{
void __iomem *reg;
u32 val;
@@ -105,7 +105,7 @@ static void imx_gpc_irq_unmask(struct irq_data *d)
writel_relaxed(val, reg);
}
-static void imx_gpc_irq_mask(struct irq_data *d)
+void imx_gpc_irq_mask(struct irq_data *d)
{
void __iomem *reg;
u32 val;
diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c
index 3daf1ed90579..b35e99cc5e5b 100644
--- a/arch/arm/mach-imx/hotplug.c
+++ b/arch/arm/mach-imx/hotplug.c
@@ -52,7 +52,9 @@ void imx_cpu_die(unsigned int cpu)
* the register being cleared to kill the cpu.
*/
imx_set_cpu_arg(cpu, ~0);
- cpu_do_idle();
+
+ while (1)
+ cpu_do_idle();
}
int imx_cpu_kill(unsigned int cpu)
diff --git a/arch/arm/mach-imx/imx51-dt.c b/arch/arm/mach-imx/imx51-dt.c
index 53e43e579dd7..bece8a65e6f0 100644
--- a/arch/arm/mach-imx/imx51-dt.c
+++ b/arch/arm/mach-imx/imx51-dt.c
@@ -34,17 +34,11 @@ static const char *imx51_dt_board_compat[] __initdata = {
NULL
};
-static void __init imx51_timer_init(void)
-{
- mx51_clocks_init_dt();
-}
-
DT_MACHINE_START(IMX51_DT, "Freescale i.MX51 (Device Tree Support)")
.map_io = mx51_map_io,
.init_early = imx51_init_early,
.init_irq = mx51_init_irq,
.handle_irq = imx51_handle_irq,
- .init_time = imx51_timer_init,
.init_machine = imx51_dt_init,
.init_late = imx51_init_late,
.dt_compat = imx51_dt_board_compat,
diff --git a/arch/arm/mach-imx/mach-armadillo5x0.c b/arch/arm/mach-imx/mach-armadillo5x0.c
index 368a6e3f5926..58b864a3fc20 100644
--- a/arch/arm/mach-imx/mach-armadillo5x0.c
+++ b/arch/arm/mach-imx/mach-armadillo5x0.c
@@ -404,8 +404,7 @@ static int armadillo5x0_sdhc1_init(struct device *dev,
/* When supported the trigger type have to be BOTH */
ret = request_irq(gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_ATA_DMACK)),
- detect_irq,
- IRQF_DISABLED | IRQF_TRIGGER_FALLING,
+ detect_irq, IRQF_TRIGGER_FALLING,
"sdhc-detect", data);
if (ret)
diff --git a/arch/arm/mach-imx/mach-imx53.c b/arch/arm/mach-imx/mach-imx53.c
index 98c58944015a..c9c4d8d96931 100644
--- a/arch/arm/mach-imx/mach-imx53.c
+++ b/arch/arm/mach-imx/mach-imx53.c
@@ -36,17 +36,11 @@ static const char *imx53_dt_board_compat[] __initdata = {
NULL
};
-static void __init imx53_timer_init(void)
-{
- mx53_clocks_init_dt();
-}
-
DT_MACHINE_START(IMX53_DT, "Freescale i.MX53 (Device Tree Support)")
.map_io = mx53_map_io,
.init_early = imx53_init_early,
.init_irq = mx53_init_irq,
.handle_irq = imx53_handle_irq,
- .init_time = imx53_timer_init,
.init_machine = imx53_dt_init,
.init_late = imx53_init_late,
.dt_compat = imx53_dt_board_compat,
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 90372a21087f..170f13f72157 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -11,9 +11,7 @@
*/
#include <linux/clk.h>
-#include <linux/clk-provider.h>
#include <linux/clkdev.h>
-#include <linux/clocksource.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/export.h>
@@ -25,8 +23,9 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include <linux/opp.h>
+#include <linux/pci.h>
#include <linux/phy.h>
+#include <linux/pm_opp.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
#include <linux/micrel_phy.h>
@@ -40,64 +39,6 @@
#include "cpuidle.h"
#include "hardware.h"
-static u32 chip_revision;
-
-int imx6q_revision(void)
-{
- return chip_revision;
-}
-
-static void __init imx6q_init_revision(void)
-{
- u32 rev = imx_anatop_get_digprog();
-
- switch (rev & 0xff) {
- case 0:
- chip_revision = IMX_CHIP_REVISION_1_0;
- break;
- case 1:
- chip_revision = IMX_CHIP_REVISION_1_1;
- break;
- case 2:
- chip_revision = IMX_CHIP_REVISION_1_2;
- break;
- default:
- chip_revision = IMX_CHIP_REVISION_UNKNOWN;
- }
-
- mxc_set_cpu_type(rev >> 16 & 0xff);
-}
-
-static void imx6q_restart(enum reboot_mode mode, const char *cmd)
-{
- struct device_node *np;
- void __iomem *wdog_base;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-wdt");
- wdog_base = of_iomap(np, 0);
- if (!wdog_base)
- goto soft;
-
- imx_src_prepare_restart();
-
- /* enable wdog */
- writew_relaxed(1 << 2, wdog_base);
- /* write twice to ensure the request will not get ignored */
- writew_relaxed(1 << 2, wdog_base);
-
- /* wait for reset to assert ... */
- mdelay(500);
-
- pr_err("Watchdog reset failed to assert reset\n");
-
- /* delay to allow the serial port to show the message */
- mdelay(50);
-
-soft:
- /* we'll take a jump through zero as a poor second */
- soft_restart(0);
-}
-
/* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */
static int ksz9021rn_phy_fixup(struct phy_device *phydev)
{
@@ -139,6 +80,34 @@ static int ksz9031rn_phy_fixup(struct phy_device *dev)
return 0;
}
+/*
+ * fixup for PLX PEX8909 bridge to configure GPIO1-7 as output High
+ * as they are used for slots1-7 PERST#
+ */
+static void ventana_pciesw_early_fixup(struct pci_dev *dev)
+{
+ u32 dw;
+
+ if (!of_machine_is_compatible("gw,ventana"))
+ return;
+
+ if (dev->devfn != 0)
+ return;
+
+ pci_read_config_dword(dev, 0x62c, &dw);
+ dw |= 0xaaa8; // GPIO1-7 outputs
+ pci_write_config_dword(dev, 0x62c, dw);
+
+ pci_read_config_dword(dev, 0x644, &dw);
+ dw |= 0xfe; // GPIO1-7 output high
+ pci_write_config_dword(dev, 0x644, dw);
+
+ msleep(100);
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8609, ventana_pciesw_early_fixup);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8606, ventana_pciesw_early_fixup);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8604, ventana_pciesw_early_fixup);
+
static int ar8031_phy_fixup(struct phy_device *dev)
{
u16 val;
@@ -192,9 +161,20 @@ static void __init imx6q_1588_init(void)
static void __init imx6q_init_machine(void)
{
+ struct device *parent;
+
+ imx_print_silicon_rev(cpu_is_imx6dl() ? "i.MX6DL" : "i.MX6Q",
+ imx_get_soc_revision());
+
+ mxc_arch_reset_init_dt();
+
+ parent = imx_soc_device_init();
+ if (parent == NULL)
+ pr_warn("failed to initialize soc device\n");
+
imx6q_enet_phy_init();
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, parent);
imx_anatop_init();
imx6q_pm_init();
@@ -226,7 +206,7 @@ static void __init imx6q_opp_check_1p2ghz(struct device *cpu_dev)
val = readl_relaxed(base + OCOTP_CFG3);
val >>= OCOTP_CFG3_SPEED_SHIFT;
if ((val & 0x3) != OCOTP_CFG3_SPEED_1P2GHZ)
- if (opp_disable(cpu_dev, 1200000000))
+ if (dev_pm_opp_disable(cpu_dev, 1200000000))
pr_warn("failed to disable 1.2 GHz OPP\n");
put_node:
@@ -269,7 +249,7 @@ static void __init imx6q_init_late(void)
* WAIT mode is broken on TO 1.0 and 1.1, so there is no point
* to run cpuidle on them.
*/
- if (imx6q_revision() > IMX_CHIP_REVISION_1_1)
+ if (imx_get_soc_revision() > IMX_CHIP_REVISION_1_1)
imx6q_cpuidle_init();
if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ)) {
@@ -286,21 +266,13 @@ static void __init imx6q_map_io(void)
static void __init imx6q_init_irq(void)
{
- imx6q_init_revision();
+ imx_init_revision_from_anatop();
imx_init_l2cache();
imx_src_init();
imx_gpc_init();
irqchip_init();
}
-static void __init imx6q_timer_init(void)
-{
- of_clk_init(NULL);
- clocksource_of_init();
- imx_print_silicon_rev(cpu_is_imx6dl() ? "i.MX6DL" : "i.MX6Q",
- imx6q_revision());
-}
-
static const char *imx6q_dt_compat[] __initdata = {
"fsl,imx6dl",
"fsl,imx6q",
@@ -311,9 +283,8 @@ DT_MACHINE_START(IMX6Q, "Freescale i.MX6 Quad/DualLite (Device Tree)")
.smp = smp_ops(imx_smp_ops),
.map_io = imx6q_map_io,
.init_irq = imx6q_init_irq,
- .init_time = imx6q_timer_init,
.init_machine = imx6q_init_machine,
.init_late = imx6q_init_late,
.dt_compat = imx6q_dt_compat,
- .restart = imx6q_restart,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx6sl.c b/arch/arm/mach-imx/mach-imx6sl.c
index 0d75dc54f715..2f952e3fcf89 100644
--- a/arch/arm/mach-imx/mach-imx6sl.c
+++ b/arch/arm/mach-imx/mach-imx6sl.c
@@ -7,35 +7,60 @@
*
*/
-#include <linux/clk-provider.h>
#include <linux/irqchip.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/regmap.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include "common.h"
+static void __init imx6sl_fec_init(void)
+{
+ struct regmap *gpr;
+
+ /* set FEC clock from internal PLL clock source */
+ gpr = syscon_regmap_lookup_by_compatible("fsl,imx6sl-iomuxc-gpr");
+ if (!IS_ERR(gpr)) {
+ regmap_update_bits(gpr, IOMUXC_GPR1,
+ IMX6SL_GPR1_FEC_CLOCK_MUX2_SEL_MASK, 0);
+ regmap_update_bits(gpr, IOMUXC_GPR1,
+ IMX6SL_GPR1_FEC_CLOCK_MUX1_SEL_MASK, 0);
+ } else {
+ pr_err("failed to find fsl,imx6sl-iomux-gpr regmap\n");
+ }
+}
+
static void __init imx6sl_init_machine(void)
{
+ struct device *parent;
+
mxc_arch_reset_init_dt();
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ parent = imx_soc_device_init();
+ if (parent == NULL)
+ pr_warn("failed to initialize soc device\n");
+
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, parent);
+
+ imx6sl_fec_init();
+ imx_anatop_init();
+ /* Reuse imx6q pm code */
+ imx6q_pm_init();
}
static void __init imx6sl_init_irq(void)
{
+ imx_init_revision_from_anatop();
imx_init_l2cache();
imx_src_init();
imx_gpc_init();
irqchip_init();
}
-static void __init imx6sl_timer_init(void)
-{
- of_clk_init(NULL);
-}
-
static const char *imx6sl_dt_compat[] __initdata = {
"fsl,imx6sl",
NULL,
@@ -44,7 +69,6 @@ static const char *imx6sl_dt_compat[] __initdata = {
DT_MACHINE_START(IMX6SL, "Freescale i.MX6 SoloLite (Device Tree)")
.map_io = debug_ll_io_init,
.init_irq = imx6sl_init_irq,
- .init_time = imx6sl_timer_init,
.init_machine = imx6sl_init_machine,
.dt_compat = imx6sl_dt_compat,
.restart = mxc_restart,
diff --git a/arch/arm/mach-imx/mach-mx31_3ds.c b/arch/arm/mach-imx/mach-mx31_3ds.c
index 1ed916175d41..50044a21b388 100644
--- a/arch/arm/mach-imx/mach-mx31_3ds.c
+++ b/arch/arm/mach-imx/mach-mx31_3ds.c
@@ -311,7 +311,7 @@ static int mx31_3ds_sdhc1_init(struct device *dev,
}
ret = request_irq(gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1)),
- detect_irq, IRQF_DISABLED |
+ detect_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"sdhc1-detect", data);
if (ret) {
diff --git a/arch/arm/mach-imx/mach-pca100.c b/arch/arm/mach-imx/mach-pca100.c
index 19bb6441a7d4..c5f95674e9b7 100644
--- a/arch/arm/mach-imx/mach-pca100.c
+++ b/arch/arm/mach-imx/mach-pca100.c
@@ -20,7 +20,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
#include <linux/dma-mapping.h>
#include <linux/spi/spi.h>
#include <linux/spi/eeprom.h>
diff --git a/arch/arm/mach-imx/mach-pcm037.c b/arch/arm/mach-imx/mach-pcm037.c
index bc0261e99d39..639a3dfb0092 100644
--- a/arch/arm/mach-imx/mach-pcm037.c
+++ b/arch/arm/mach-imx/mach-pcm037.c
@@ -23,7 +23,7 @@
#include <linux/smsc911x.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
#include <linux/irq.h>
@@ -371,8 +371,7 @@ static int pcm970_sdhc1_init(struct device *dev, irq_handler_t detect_irq,
#endif
ret = request_irq(gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_SCK6)), detect_irq,
- IRQF_DISABLED | IRQF_TRIGGER_FALLING,
- "sdhc-detect", data);
+ IRQF_TRIGGER_FALLING, "sdhc-detect", data);
if (ret)
goto err_gpio_free_2;
diff --git a/arch/arm/mach-imx/mach-pcm038.c b/arch/arm/mach-imx/mach-pcm038.c
index e805ac273e9c..592ddbe031ac 100644
--- a/arch/arm/mach-imx/mach-pcm038.c
+++ b/arch/arm/mach-imx/mach-pcm038.c
@@ -18,7 +18,7 @@
*/
#include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
#include <linux/io.h>
#include <linux/mtd/plat-ram.h>
#include <linux/mtd/physmap.h>
diff --git a/arch/arm/mach-imx/mach-pcm043.c b/arch/arm/mach-imx/mach-pcm043.c
index b726cb1c5fdd..ac504b67326b 100644
--- a/arch/arm/mach-imx/mach-pcm043.c
+++ b/arch/arm/mach-imx/mach-pcm043.c
@@ -24,7 +24,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
diff --git a/arch/arm/mach-imx/mach-vf610.c b/arch/arm/mach-imx/mach-vf610.c
index 816991deb9b8..af0cb8a9dc48 100644
--- a/arch/arm/mach-imx/mach-vf610.c
+++ b/arch/arm/mach-imx/mach-vf610.c
@@ -8,9 +8,7 @@
*/
#include <linux/of_platform.h>
-#include <linux/clocksource.h>
#include <linux/irqchip.h>
-#include <linux/clk-provider.h>
#include <asm/mach/arch.h>
#include <asm/hardware/cache-l2x0.h>
@@ -28,12 +26,6 @@ static void __init vf610_init_irq(void)
irqchip_init();
}
-static void __init vf610_init_time(void)
-{
- of_clk_init(NULL);
- clocksource_of_init();
-}
-
static const char *vf610_dt_compat[] __initdata = {
"fsl,vf610",
NULL,
@@ -41,7 +33,6 @@ static const char *vf610_dt_compat[] __initdata = {
DT_MACHINE_START(VYBRID_VF610, "Freescale Vybrid VF610 (Device Tree)")
.init_irq = vf610_init_irq,
- .init_time = vf610_init_time,
.init_machine = vf610_init_machine,
.dt_compat = vf610_dt_compat,
.restart = mxc_restart,
diff --git a/arch/arm/mach-imx/mach-vpr200.c b/arch/arm/mach-imx/mach-vpr200.c
index 0910761e8280..8825d1217d18 100644
--- a/arch/arm/mach-imx/mach-vpr200.c
+++ b/arch/arm/mach-imx/mach-vpr200.c
@@ -29,7 +29,7 @@
#include <asm/mach/time.h>
#include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
#include <linux/mfd/mc13xxx.h>
#include "common.h"
diff --git a/arch/arm/mach-imx/mm-imx5.c b/arch/arm/mach-imx/mm-imx5.c
index eb3cce38c70d..d1d52600f458 100644
--- a/arch/arm/mach-imx/mm-imx5.c
+++ b/arch/arm/mach-imx/mm-imx5.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/pinctrl/machine.h>
+#include <linux/of_address.h>
#include <asm/mach/map.h>
@@ -88,8 +89,15 @@ void __init imx51_init_early(void)
void __init imx53_init_early(void)
{
+ struct device_node *np;
+ void __iomem *base;
+
mxc_set_cpu_type(MXC_CPU_MX53);
- mxc_iomux_v3_init(MX53_IO_ADDRESS(MX53_IOMUXC_BASE_ADDR));
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx53-iomuxc");
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+ mxc_iomux_v3_init(base);
imx_src_init();
}
@@ -100,7 +108,14 @@ void __init mx51_init_irq(void)
void __init mx53_init_irq(void)
{
- tzic_init_irq(MX53_IO_ADDRESS(MX53_TZIC_BASE_ADDR));
+ struct device_node *np;
+ void __iomem *base;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx53-tzic");
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+
+ tzic_init_irq(base);
}
static struct sdma_platform_data imx51_sdma_pdata __initdata = {
diff --git a/arch/arm/mach-imx/mx31lilly-db.c b/arch/arm/mach-imx/mx31lilly-db.c
index d4361b80c5fb..649fe49ce85e 100644
--- a/arch/arm/mach-imx/mx31lilly-db.c
+++ b/arch/arm/mach-imx/mx31lilly-db.c
@@ -130,8 +130,7 @@ static int mxc_mmc1_init(struct device *dev,
gpio_direction_input(gpio_wp);
ret = request_irq(gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_1)),
- detect_irq,
- IRQF_DISABLED | IRQF_TRIGGER_FALLING,
+ detect_irq, IRQF_TRIGGER_FALLING,
"MMC detect", data);
if (ret)
goto exit_free_wp;
diff --git a/arch/arm/mach-imx/mxc.h b/arch/arm/mach-imx/mxc.h
index 8629e5be7ecd..b08ab3ad4a6d 100644
--- a/arch/arm/mach-imx/mxc.h
+++ b/arch/arm/mach-imx/mxc.h
@@ -34,6 +34,7 @@
#define MXC_CPU_MX35 35
#define MXC_CPU_MX51 51
#define MXC_CPU_MX53 53
+#define MXC_CPU_IMX6SL 0x60
#define MXC_CPU_IMX6DL 0x61
#define MXC_CPU_IMX6Q 0x63
@@ -152,6 +153,11 @@ extern unsigned int __mxc_cpu_type;
#endif
#ifndef __ASSEMBLY__
+static inline bool cpu_is_imx6sl(void)
+{
+ return __mxc_cpu_type == MXC_CPU_IMX6SL;
+}
+
static inline bool cpu_is_imx6dl(void)
{
return __mxc_cpu_type == MXC_CPU_IMX6DL;
diff --git a/arch/arm/mach-imx/pm-imx6q.c b/arch/arm/mach-imx/pm-imx6q.c
index 204942749e21..aecd9f8037e0 100644
--- a/arch/arm/mach-imx/pm-imx6q.c
+++ b/arch/arm/mach-imx/pm-imx6q.c
@@ -10,9 +10,15 @@
* http://www.gnu.org/copyleft/gpl.html
*/
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
#include <linux/suspend.h>
#include <asm/cacheflush.h>
#include <asm/proc-fns.h>
@@ -22,6 +28,147 @@
#include "common.h"
#include "hardware.h"
+#define CCR 0x0
+#define BM_CCR_WB_COUNT (0x7 << 16)
+#define BM_CCR_RBC_BYPASS_COUNT (0x3f << 21)
+#define BM_CCR_RBC_EN (0x1 << 27)
+
+#define CLPCR 0x54
+#define BP_CLPCR_LPM 0
+#define BM_CLPCR_LPM (0x3 << 0)
+#define BM_CLPCR_BYPASS_PMIC_READY (0x1 << 2)
+#define BM_CLPCR_ARM_CLK_DIS_ON_LPM (0x1 << 5)
+#define BM_CLPCR_SBYOS (0x1 << 6)
+#define BM_CLPCR_DIS_REF_OSC (0x1 << 7)
+#define BM_CLPCR_VSTBY (0x1 << 8)
+#define BP_CLPCR_STBY_COUNT 9
+#define BM_CLPCR_STBY_COUNT (0x3 << 9)
+#define BM_CLPCR_COSC_PWRDOWN (0x1 << 11)
+#define BM_CLPCR_WB_PER_AT_LPM (0x1 << 16)
+#define BM_CLPCR_WB_CORE_AT_LPM (0x1 << 17)
+#define BM_CLPCR_BYP_MMDC_CH0_LPM_HS (0x1 << 19)
+#define BM_CLPCR_BYP_MMDC_CH1_LPM_HS (0x1 << 21)
+#define BM_CLPCR_MASK_CORE0_WFI (0x1 << 22)
+#define BM_CLPCR_MASK_CORE1_WFI (0x1 << 23)
+#define BM_CLPCR_MASK_CORE2_WFI (0x1 << 24)
+#define BM_CLPCR_MASK_CORE3_WFI (0x1 << 25)
+#define BM_CLPCR_MASK_SCU_IDLE (0x1 << 26)
+#define BM_CLPCR_MASK_L2CC_IDLE (0x1 << 27)
+
+#define CGPR 0x64
+#define BM_CGPR_CHICKEN_BIT (0x1 << 17)
+
+static void __iomem *ccm_base;
+
+void imx6q_set_chicken_bit(void)
+{
+ u32 val = readl_relaxed(ccm_base + CGPR);
+
+ val |= BM_CGPR_CHICKEN_BIT;
+ writel_relaxed(val, ccm_base + CGPR);
+}
+
+static void imx6q_enable_rbc(bool enable)
+{
+ u32 val;
+
+ /*
+ * need to mask all interrupts in GPC before
+ * operating RBC configurations
+ */
+ imx_gpc_mask_all();
+
+ /* configure RBC enable bit */
+ val = readl_relaxed(ccm_base + CCR);
+ val &= ~BM_CCR_RBC_EN;
+ val |= enable ? BM_CCR_RBC_EN : 0;
+ writel_relaxed(val, ccm_base + CCR);
+
+ /* configure RBC count */
+ val = readl_relaxed(ccm_base + CCR);
+ val &= ~BM_CCR_RBC_BYPASS_COUNT;
+ val |= enable ? BM_CCR_RBC_BYPASS_COUNT : 0;
+ writel(val, ccm_base + CCR);
+
+ /*
+ * need to delay at least 2 cycles of CKIL(32K)
+ * due to hardware design requirement, which is
+ * ~61us, here we use 65us for safe
+ */
+ udelay(65);
+
+ /* restore GPC interrupt mask settings */
+ imx_gpc_restore_all();
+}
+
+static void imx6q_enable_wb(bool enable)
+{
+ u32 val;
+
+ /* configure well bias enable bit */
+ val = readl_relaxed(ccm_base + CLPCR);
+ val &= ~BM_CLPCR_WB_PER_AT_LPM;
+ val |= enable ? BM_CLPCR_WB_PER_AT_LPM : 0;
+ writel_relaxed(val, ccm_base + CLPCR);
+
+ /* configure well bias count */
+ val = readl_relaxed(ccm_base + CCR);
+ val &= ~BM_CCR_WB_COUNT;
+ val |= enable ? BM_CCR_WB_COUNT : 0;
+ writel_relaxed(val, ccm_base + CCR);
+}
+
+int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
+{
+ struct irq_desc *iomuxc_irq_desc;
+ u32 val = readl_relaxed(ccm_base + CLPCR);
+
+ val &= ~BM_CLPCR_LPM;
+ switch (mode) {
+ case WAIT_CLOCKED:
+ break;
+ case WAIT_UNCLOCKED:
+ val |= 0x1 << BP_CLPCR_LPM;
+ val |= BM_CLPCR_ARM_CLK_DIS_ON_LPM;
+ break;
+ case STOP_POWER_ON:
+ val |= 0x2 << BP_CLPCR_LPM;
+ break;
+ case WAIT_UNCLOCKED_POWER_OFF:
+ val |= 0x1 << BP_CLPCR_LPM;
+ val &= ~BM_CLPCR_VSTBY;
+ val &= ~BM_CLPCR_SBYOS;
+ break;
+ case STOP_POWER_OFF:
+ val |= 0x2 << BP_CLPCR_LPM;
+ val |= 0x3 << BP_CLPCR_STBY_COUNT;
+ val |= BM_CLPCR_VSTBY;
+ val |= BM_CLPCR_SBYOS;
+ if (cpu_is_imx6sl()) {
+ val |= BM_CLPCR_BYPASS_PMIC_READY;
+ val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
+ } else {
+ val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Unmask the always pending IOMUXC interrupt #32 as wakeup source to
+ * deassert dsm_request signal, so that we can ensure dsm_request
+ * is not asserted when we're going to write CLPCR register to set LPM.
+ * After setting up LPM bits, we need to mask this wakeup source.
+ */
+ iomuxc_irq_desc = irq_to_desc(32);
+ imx_gpc_irq_unmask(&iomuxc_irq_desc->irq_data);
+ writel_relaxed(val, ccm_base + CLPCR);
+ imx_gpc_irq_mask(&iomuxc_irq_desc->irq_data);
+
+ return 0;
+}
+
static int imx6q_suspend_finish(unsigned long val)
{
cpu_do_idle();
@@ -33,14 +180,19 @@ static int imx6q_pm_enter(suspend_state_t state)
switch (state) {
case PM_SUSPEND_MEM:
imx6q_set_lpm(STOP_POWER_OFF);
+ imx6q_enable_wb(true);
+ imx6q_enable_rbc(true);
imx_gpc_pre_suspend();
imx_anatop_pre_suspend();
imx_set_cpu_jump(0, v7_cpu_resume);
/* Zzz ... */
cpu_suspend(0, imx6q_suspend_finish);
- imx_smp_prepare();
+ if (cpu_is_imx6q() || cpu_is_imx6dl())
+ imx_smp_prepare();
imx_anatop_post_resume();
imx_gpc_post_resume();
+ imx6q_enable_rbc(false);
+ imx6q_enable_wb(false);
imx6q_set_lpm(WAIT_CLOCKED);
break;
default:
@@ -55,7 +207,29 @@ static const struct platform_suspend_ops imx6q_pm_ops = {
.valid = suspend_valid_only_mem,
};
+void __init imx6q_pm_set_ccm_base(void __iomem *base)
+{
+ ccm_base = base;
+}
+
void __init imx6q_pm_init(void)
{
+ struct regmap *gpr;
+
+ WARN_ON(!ccm_base);
+
+ /*
+ * Force IOMUXC irq pending, so that the interrupt to GPC can be
+ * used to deassert dsm_request signal when the signal gets
+ * asserted unexpectedly.
+ */
+ gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+ if (!IS_ERR(gpr))
+ regmap_update_bits(gpr, IOMUXC_GPR1, IMX6Q_GPR1_GINT,
+ IMX6Q_GPR1_GINT);
+
+ /* Set initial power mode */
+ imx6q_set_lpm(WAIT_CLOCKED);
+
suspend_set_ops(&imx6q_pm_ops);
}
diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c
index 10a6b1a8c5ac..45f7f4e0a447 100644
--- a/arch/arm/mach-imx/src.c
+++ b/arch/arm/mach-imx/src.c
@@ -91,6 +91,7 @@ void imx_enable_cpu(int cpu, bool enable)
spin_lock(&scr_lock);
val = readl_relaxed(src_base + SRC_SCR);
val = enable ? val | mask : val & ~mask;
+ val |= 1 << (BP_SRC_SCR_CORE1_RST + cpu - 1);
writel_relaxed(val, src_base + SRC_SCR);
spin_unlock(&scr_lock);
}
@@ -114,21 +115,6 @@ void imx_set_cpu_arg(int cpu, u32 arg)
writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4);
}
-void imx_src_prepare_restart(void)
-{
- u32 val;
-
- /* clear enable bits of secondary cores */
- spin_lock(&scr_lock);
- val = readl_relaxed(src_base + SRC_SCR);
- val &= ~(0x7 << BP_SRC_SCR_CORE1_ENABLE);
- writel_relaxed(val, src_base + SRC_SCR);
- spin_unlock(&scr_lock);
-
- /* clear persistent entry register of primary core */
- writel_relaxed(0, src_base + SRC_GPR1);
-}
-
void __init imx_src_init(void)
{
struct device_node *np;
diff --git a/arch/arm/mach-imx/system.c b/arch/arm/mach-imx/system.c
index 80c177c36c5f..5e3027d3692f 100644
--- a/arch/arm/mach-imx/system.c
+++ b/arch/arm/mach-imx/system.c
@@ -52,6 +52,15 @@ void mxc_restart(enum reboot_mode mode, const char *cmd)
/* Assert SRS signal */
__raw_writew(wcr_enable, wdog_base);
+ /*
+ * Due to imx6q errata ERR004346 (WDOG: WDOG SRS bit requires to be
+ * written twice), we add another two writes to ensure there must be at
+ * least two writes happen in the same one 32kHz clock period. We save
+ * the target check here, since the writes shouldn't be a huge burden
+ * for other platforms.
+ */
+ __raw_writew(wcr_enable, wdog_base);
+ __raw_writew(wcr_enable, wdog_base);
/* wait for reset to assert... */
mdelay(500);
diff --git a/arch/arm/mach-imx/time.c b/arch/arm/mach-imx/time.c
index cd46529e9eaa..9b6638aadeaa 100644
--- a/arch/arm/mach-imx/time.c
+++ b/arch/arm/mach-imx/time.c
@@ -250,7 +250,7 @@ static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id)
static struct irqaction mxc_timer_irq = {
.name = "i.MX Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = mxc_timer_interrupt,
};
diff --git a/arch/arm/mach-integrator/include/mach/cm.h b/arch/arm/mach-integrator/cm.h
index 202e6a57f100..4ecff7bff482 100644
--- a/arch/arm/mach-integrator/include/mach/cm.h
+++ b/arch/arm/mach-integrator/cm.h
@@ -1,9 +1,12 @@
/*
- * update the core module control register.
+ * access the core module control register.
*/
+u32 cm_get(void);
void cm_control(u32, u32);
-#define CM_CTRL __io_address(INTEGRATOR_HDR_CTRL)
+struct device_node;
+void cm_init(void);
+void cm_clear_irqs(void);
#define CM_CTRL_LED (1 << 0)
#define CM_CTRL_nMBDET (1 << 1)
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index 4cdfd7365925..00ddf20ed91b 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -22,77 +22,30 @@
#include <linux/amba/serial.h>
#include <linux/io.h>
#include <linux/stat.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include <mach/hardware.h>
#include <mach/platform.h>
-#include <mach/cm.h>
-#include <mach/irqs.h>
#include <asm/mach-types.h>
#include <asm/mach/time.h>
#include <asm/pgtable.h>
+#include "cm.h"
#include "common.h"
-#ifdef CONFIG_ATAGS
-
-#define INTEGRATOR_RTC_IRQ { IRQ_RTCINT }
-#define INTEGRATOR_UART0_IRQ { IRQ_UARTINT0 }
-#define INTEGRATOR_UART1_IRQ { IRQ_UARTINT1 }
-#define KMI0_IRQ { IRQ_KMIINT0 }
-#define KMI1_IRQ { IRQ_KMIINT1 }
-
-static AMBA_APB_DEVICE(rtc, "rtc", 0,
- INTEGRATOR_RTC_BASE, INTEGRATOR_RTC_IRQ, NULL);
-
-static AMBA_APB_DEVICE(uart0, "uart0", 0,
- INTEGRATOR_UART0_BASE, INTEGRATOR_UART0_IRQ, NULL);
-
-static AMBA_APB_DEVICE(uart1, "uart1", 0,
- INTEGRATOR_UART1_BASE, INTEGRATOR_UART1_IRQ, NULL);
-
-static AMBA_APB_DEVICE(kmi0, "kmi0", 0, KMI0_BASE, KMI0_IRQ, NULL);
-static AMBA_APB_DEVICE(kmi1, "kmi1", 0, KMI1_BASE, KMI1_IRQ, NULL);
-
-static struct amba_device *amba_devs[] __initdata = {
- &rtc_device,
- &uart0_device,
- &uart1_device,
- &kmi0_device,
- &kmi1_device,
-};
+static DEFINE_RAW_SPINLOCK(cm_lock);
+static void __iomem *cm_base;
-int __init integrator_init(bool is_cp)
+/**
+ * cm_get - get the value from the CM_CTRL register
+ */
+u32 cm_get(void)
{
- int i;
-
- /*
- * The Integrator/AP lacks necessary AMBA PrimeCell IDs, so we need to
- * hard-code them. The Integator/CP and forward have proper cell IDs.
- * Else we leave them undefined to the bus driver can autoprobe them.
- */
- if (!is_cp && IS_ENABLED(CONFIG_ARCH_INTEGRATOR_AP)) {
- rtc_device.periphid = 0x00041030;
- uart0_device.periphid = 0x00041010;
- uart1_device.periphid = 0x00041010;
- kmi0_device.periphid = 0x00041050;
- kmi1_device.periphid = 0x00041050;
- uart0_device.dev.platform_data = &ap_uart_data;
- uart1_device.dev.platform_data = &ap_uart_data;
- }
-
- for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
- struct amba_device *d = amba_devs[i];
- amba_device_register(d, &iomem_resource);
- }
-
- return 0;
+ return readl(cm_base + INTEGRATOR_HDR_CTRL_OFFSET);
}
-#endif
-
-static DEFINE_RAW_SPINLOCK(cm_lock);
-
/**
* cm_control - update the CM_CTRL register.
* @mask: bits to change
@@ -104,12 +57,80 @@ void cm_control(u32 mask, u32 set)
u32 val;
raw_spin_lock_irqsave(&cm_lock, flags);
- val = readl(CM_CTRL) & ~mask;
- writel(val | set, CM_CTRL);
+ val = readl(cm_base + INTEGRATOR_HDR_CTRL_OFFSET) & ~mask;
+ writel(val | set, cm_base + INTEGRATOR_HDR_CTRL_OFFSET);
raw_spin_unlock_irqrestore(&cm_lock, flags);
}
-EXPORT_SYMBOL(cm_control);
+static const char *integrator_arch_str(u32 id)
+{
+ switch ((id >> 16) & 0xff) {
+ case 0x00:
+ return "ASB little-endian";
+ case 0x01:
+ return "AHB little-endian";
+ case 0x03:
+ return "AHB-Lite system bus, bi-endian";
+ case 0x04:
+ return "AHB";
+ case 0x08:
+ return "AHB system bus, ASB processor bus";
+ default:
+ return "Unknown";
+ }
+}
+
+static const char *integrator_fpga_str(u32 id)
+{
+ switch ((id >> 12) & 0xf) {
+ case 0x01:
+ return "XC4062";
+ case 0x02:
+ return "XC4085";
+ case 0x03:
+ return "XVC600";
+ case 0x04:
+ return "EPM7256AE (Altera PLD)";
+ default:
+ return "Unknown";
+ }
+}
+
+void cm_clear_irqs(void)
+{
+ /* disable core module IRQs */
+ writel(0xffffffffU, cm_base + INTEGRATOR_HDR_IC_OFFSET +
+ IRQ_ENABLE_CLEAR);
+}
+
+static const struct of_device_id cm_match[] = {
+ { .compatible = "arm,core-module-integrator"},
+ { },
+};
+
+void cm_init(void)
+{
+ struct device_node *cm = of_find_matching_node(NULL, cm_match);
+ u32 val;
+
+ if (!cm) {
+ pr_crit("no core module node found in device tree\n");
+ return;
+ }
+ cm_base = of_iomap(cm, 0);
+ if (!cm_base) {
+ pr_crit("could not remap core module\n");
+ return;
+ }
+ cm_clear_irqs();
+ val = readl(cm_base + INTEGRATOR_HDR_ID_OFFSET);
+ pr_info("Detected ARM core module:\n");
+ pr_info(" Manufacturer: %02x\n", (val >> 24));
+ pr_info(" Architecture: %s\n", integrator_arch_str(val));
+ pr_info(" FPGA: %s\n", integrator_fpga_str(val));
+ pr_info(" Build: %02x\n", (val >> 4) & 0xFF);
+ pr_info(" Rev: %c\n", ('A' + (val & 0x03)));
+}
/*
* We need to stop things allocating the low memory; ideally we need a
@@ -145,27 +166,7 @@ static ssize_t intcp_get_arch(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- const char *arch;
-
- switch ((integrator_id >> 16) & 0xff) {
- case 0x00:
- arch = "ASB little-endian";
- break;
- case 0x01:
- arch = "AHB little-endian";
- break;
- case 0x03:
- arch = "AHB-Lite system bus, bi-endian";
- break;
- case 0x04:
- arch = "AHB";
- break;
- default:
- arch = "Unknown";
- break;
- }
-
- return sprintf(buf, "%s\n", arch);
+ return sprintf(buf, "%s\n", integrator_arch_str(integrator_id));
}
static struct device_attribute intcp_arch_attr =
@@ -175,24 +176,7 @@ static ssize_t intcp_get_fpga(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- const char *fpga;
-
- switch ((integrator_id >> 12) & 0xf) {
- case 0x01:
- fpga = "XC4062";
- break;
- case 0x02:
- fpga = "XC4085";
- break;
- case 0x04:
- fpga = "EPM7256AE (Altera PLD)";
- break;
- default:
- fpga = "Unknown";
- break;
- }
-
- return sprintf(buf, "%s\n", fpga);
+ return sprintf(buf, "%s\n", integrator_fpga_str(integrator_id));
}
static struct device_attribute intcp_fpga_attr =
diff --git a/arch/arm/mach-integrator/include/mach/irqs.h b/arch/arm/mach-integrator/include/mach/irqs.h
deleted file mode 100644
index eff0adad9ae3..000000000000
--- a/arch/arm/mach-integrator/include/mach/irqs.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * arch/arm/mach-integrator/include/mach/irqs.h
- *
- * Copyright (C) 1999 ARM Limited
- * Copyright (C) 2000 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-/*
- * Interrupt numbers, all of the above are just static reservations
- * used so they can be encoded into device resources. They will finally
- * be done away with when switching to device tree.
- */
-#define IRQ_PIC_START 64
-#define IRQ_SOFTINT (IRQ_PIC_START+0)
-#define IRQ_UARTINT0 (IRQ_PIC_START+1)
-#define IRQ_UARTINT1 (IRQ_PIC_START+2)
-#define IRQ_KMIINT0 (IRQ_PIC_START+3)
-#define IRQ_KMIINT1 (IRQ_PIC_START+4)
-#define IRQ_TIMERINT0 (IRQ_PIC_START+5)
-#define IRQ_TIMERINT1 (IRQ_PIC_START+6)
-#define IRQ_TIMERINT2 (IRQ_PIC_START+7)
-#define IRQ_RTCINT (IRQ_PIC_START+8)
-#define IRQ_AP_EXPINT0 (IRQ_PIC_START+9)
-#define IRQ_AP_EXPINT1 (IRQ_PIC_START+10)
-#define IRQ_AP_EXPINT2 (IRQ_PIC_START+11)
-#define IRQ_AP_EXPINT3 (IRQ_PIC_START+12)
-#define IRQ_AP_PCIINT0 (IRQ_PIC_START+13)
-#define IRQ_AP_PCIINT1 (IRQ_PIC_START+14)
-#define IRQ_AP_PCIINT2 (IRQ_PIC_START+15)
-#define IRQ_AP_PCIINT3 (IRQ_PIC_START+16)
-#define IRQ_AP_V3INT (IRQ_PIC_START+17)
-#define IRQ_AP_CPINT0 (IRQ_PIC_START+18)
-#define IRQ_AP_CPINT1 (IRQ_PIC_START+19)
-#define IRQ_AP_LBUSTIMEOUT (IRQ_PIC_START+20)
-#define IRQ_AP_APCINT (IRQ_PIC_START+21)
-#define IRQ_CP_CLCDCINT (IRQ_PIC_START+22)
-#define IRQ_CP_MMCIINT0 (IRQ_PIC_START+23)
-#define IRQ_CP_MMCIINT1 (IRQ_PIC_START+24)
-#define IRQ_CP_AACIINT (IRQ_PIC_START+25)
-#define IRQ_CP_CPPLDINT (IRQ_PIC_START+26)
-#define IRQ_CP_ETHINT (IRQ_PIC_START+27)
-#define IRQ_CP_TSPENINT (IRQ_PIC_START+28)
-#define IRQ_PIC_END (IRQ_PIC_START+28)
-
-#define IRQ_CIC_START (IRQ_PIC_END+1)
-#define IRQ_CM_SOFTINT (IRQ_CIC_START+0)
-#define IRQ_CM_COMMRX (IRQ_CIC_START+1)
-#define IRQ_CM_COMMTX (IRQ_CIC_START+2)
-#define IRQ_CIC_END (IRQ_CIC_START+2)
-
-/*
- * IntegratorCP only
- */
-#define IRQ_SIC_START (IRQ_CIC_END+1)
-#define IRQ_SIC_CP_SOFTINT (IRQ_SIC_START+0)
-#define IRQ_SIC_CP_RI0 (IRQ_SIC_START+1)
-#define IRQ_SIC_CP_RI1 (IRQ_SIC_START+2)
-#define IRQ_SIC_CP_CARDIN (IRQ_SIC_START+3)
-#define IRQ_SIC_CP_LMINT0 (IRQ_SIC_START+4)
-#define IRQ_SIC_CP_LMINT1 (IRQ_SIC_START+5)
-#define IRQ_SIC_CP_LMINT2 (IRQ_SIC_START+6)
-#define IRQ_SIC_CP_LMINT3 (IRQ_SIC_START+7)
-#define IRQ_SIC_CP_LMINT4 (IRQ_SIC_START+8)
-#define IRQ_SIC_CP_LMINT5 (IRQ_SIC_START+9)
-#define IRQ_SIC_CP_LMINT6 (IRQ_SIC_START+10)
-#define IRQ_SIC_CP_LMINT7 (IRQ_SIC_START+11)
-#define IRQ_SIC_END (IRQ_SIC_START+11)
diff --git a/arch/arm/mach-integrator/include/mach/timex.h b/arch/arm/mach-integrator/include/mach/timex.h
deleted file mode 100644
index 1dcb42028c82..000000000000
--- a/arch/arm/mach-integrator/include/mach/timex.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * arch/arm/mach-integrator/include/mach/timex.h
- *
- * Integrator architecture timex specifications
- *
- * Copyright (C) 1999 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-/*
- * ??
- */
-#define CLOCK_TICK_RATE (50000000 / 16)
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index d9e95e612fcb..d50dc2dbfd89 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -51,13 +51,13 @@
#include <asm/mach-types.h>
#include <mach/lm.h>
-#include <mach/irqs.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
+#include "cm.h"
#include "common.h"
#include "pci_v3.h"
@@ -146,7 +146,7 @@ static int irq_suspend(void)
static void irq_resume(void)
{
/* disable all irq sources */
- writel(-1, VA_CMIC_BASE + IRQ_ENABLE_CLEAR);
+ cm_clear_irqs();
writel(-1, VA_IC_BASE + IRQ_ENABLE_CLEAR);
writel(-1, VA_IC_BASE + FIQ_ENABLE_CLEAR);
@@ -402,8 +402,6 @@ void __init ap_init_early(void)
{
}
-#ifdef CONFIG_OF
-
static void __init ap_of_timer_init(void)
{
struct device_node *node;
@@ -450,8 +448,7 @@ static const struct of_device_id fpga_irq_of_match[] __initconst = {
static void __init ap_init_irq_of(void)
{
- /* disable core module IRQs */
- writel(0xffffffffU, VA_CMIC_BASE + IRQ_ENABLE_CLEAR);
+ cm_init();
of_irq_init(fpga_irq_of_match);
integrator_clk_init(false);
}
@@ -473,6 +470,11 @@ static struct of_dev_auxdata ap_auxdata_lookup[] __initdata = {
{ /* sentinel */ },
};
+static const struct of_device_id ap_syscon_match[] = {
+ { .compatible = "arm,integrator-ap-syscon"},
+ { },
+};
+
static void __init ap_init_of(void)
{
unsigned long sc_dec;
@@ -489,7 +491,8 @@ static void __init ap_init_of(void)
root = of_find_node_by_path("/");
if (!root)
return;
- syscon = of_find_node_by_path("/syscon");
+
+ syscon = of_find_matching_node(root, ap_syscon_match);
if (!syscon)
return;
@@ -541,7 +544,7 @@ static void __init ap_init_of(void)
lmdev->resource.start = 0xc0000000 + 0x10000000 * i;
lmdev->resource.end = lmdev->resource.start + 0x0fffffff;
lmdev->resource.flags = IORESOURCE_MEM;
- lmdev->irq = IRQ_AP_EXPINT0 + i;
+ lmdev->irq = irq_of_parse_and_map(syscon, i);
lmdev->id = i;
lm_device_register(lmdev);
@@ -564,136 +567,3 @@ DT_MACHINE_START(INTEGRATOR_AP_DT, "ARM Integrator/AP (Device Tree)")
.restart = integrator_restart,
.dt_compat = ap_dt_board_compat,
MACHINE_END
-
-#endif
-
-#ifdef CONFIG_ATAGS
-
-/*
- * For the ATAG boot some static mappings are needed. This will
- * go away with the ATAG support down the road.
- */
-
-static struct map_desc ap_io_desc_atag[] __initdata = {
- {
- .virtual = IO_ADDRESS(INTEGRATOR_SC_BASE),
- .pfn = __phys_to_pfn(INTEGRATOR_SC_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE
- },
-};
-
-static void __init ap_map_io_atag(void)
-{
- iotable_init(ap_io_desc_atag, ARRAY_SIZE(ap_io_desc_atag));
- ap_map_io();
-}
-
-/*
- * This is where non-devicetree initialization code is collected and stashed
- * for eventual deletion.
- */
-
-static struct platform_device pci_v3_device = {
- .name = "pci-v3",
- .id = 0,
-};
-
-static struct resource cfi_flash_resource = {
- .start = INTEGRATOR_FLASH_BASE,
- .end = INTEGRATOR_FLASH_BASE + INTEGRATOR_FLASH_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
-static struct platform_device cfi_flash_device = {
- .name = "physmap-flash",
- .id = 0,
- .dev = {
- .platform_data = &ap_flash_data,
- },
- .num_resources = 1,
- .resource = &cfi_flash_resource,
-};
-
-static void __init ap_timer_init(void)
-{
- struct clk *clk;
- unsigned long rate;
-
- clk = clk_get_sys("ap_timer", NULL);
- BUG_ON(IS_ERR(clk));
- clk_prepare_enable(clk);
- rate = clk_get_rate(clk);
-
- writel(0, TIMER0_VA_BASE + TIMER_CTRL);
- writel(0, TIMER1_VA_BASE + TIMER_CTRL);
- writel(0, TIMER2_VA_BASE + TIMER_CTRL);
-
- integrator_clocksource_init(rate, (void __iomem *)TIMER2_VA_BASE);
- integrator_clockevent_init(rate, (void __iomem *)TIMER1_VA_BASE,
- IRQ_TIMERINT1);
-}
-
-#define INTEGRATOR_SC_VALID_INT 0x003fffff
-
-static void __init ap_init_irq(void)
-{
- /* Disable all interrupts initially. */
- /* Do the core module ones */
- writel(-1, VA_CMIC_BASE + IRQ_ENABLE_CLEAR);
-
- /* do the header card stuff next */
- writel(-1, VA_IC_BASE + IRQ_ENABLE_CLEAR);
- writel(-1, VA_IC_BASE + FIQ_ENABLE_CLEAR);
-
- fpga_irq_init(VA_IC_BASE, "SC", IRQ_PIC_START,
- -1, INTEGRATOR_SC_VALID_INT, NULL);
- integrator_clk_init(false);
-}
-
-static void __init ap_init(void)
-{
- unsigned long sc_dec;
- int i;
-
- platform_device_register(&pci_v3_device);
- platform_device_register(&cfi_flash_device);
-
- ap_syscon_base = __io_address(INTEGRATOR_SC_BASE);
- sc_dec = readl(ap_syscon_base + INTEGRATOR_SC_DEC_OFFSET);
- for (i = 0; i < 4; i++) {
- struct lm_device *lmdev;
-
- if ((sc_dec & (16 << i)) == 0)
- continue;
-
- lmdev = kzalloc(sizeof(struct lm_device), GFP_KERNEL);
- if (!lmdev)
- continue;
-
- lmdev->resource.start = 0xc0000000 + 0x10000000 * i;
- lmdev->resource.end = lmdev->resource.start + 0x0fffffff;
- lmdev->resource.flags = IORESOURCE_MEM;
- lmdev->irq = IRQ_AP_EXPINT0 + i;
- lmdev->id = i;
-
- lm_device_register(lmdev);
- }
-
- integrator_init(false);
-}
-
-MACHINE_START(INTEGRATOR, "ARM-Integrator")
- /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
- .atag_offset = 0x100,
- .reserve = integrator_reserve,
- .map_io = ap_map_io_atag,
- .init_early = ap_init_early,
- .init_irq = ap_init_irq,
- .handle_irq = fpga_handle_irq,
- .init_time = ap_timer_init,
- .init_machine = ap_init,
- .restart = integrator_restart,
-MACHINE_END
-
-#endif
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index 8c60fcb08a98..1df6e7602cad 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -36,9 +36,7 @@
#include <asm/hardware/arm_timer.h>
#include <asm/hardware/icst.h>
-#include <mach/cm.h>
#include <mach/lm.h>
-#include <mach/irqs.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
@@ -50,6 +48,7 @@
#include <plat/clcd.h>
#include <plat/sched_clock.h>
+#include "cm.h"
#include "common.h"
/* Base address to the CP controller */
@@ -249,7 +248,6 @@ static void __init intcp_init_early(void)
#endif
}
-#ifdef CONFIG_OF
static const struct of_device_id fpga_irq_of_match[] __initconst = {
{ .compatible = "arm,versatile-fpga-irq", .data = fpga_irq_of_init, },
{ /* Sentinel */ }
@@ -257,6 +255,7 @@ static const struct of_device_id fpga_irq_of_match[] __initconst = {
static void __init intcp_init_irq_of(void)
{
+ cm_init();
of_irq_init(fpga_irq_of_match);
integrator_clk_init(true);
}
@@ -287,6 +286,11 @@ static struct of_dev_auxdata intcp_auxdata_lookup[] __initdata = {
{ /* sentinel */ },
};
+static const struct of_device_id intcp_syscon_match[] = {
+ { .compatible = "arm,integrator-cp-syscon"},
+ { },
+};
+
static void __init intcp_init_of(void)
{
struct device_node *root;
@@ -301,7 +305,8 @@ static void __init intcp_init_of(void)
root = of_find_node_by_path("/");
if (!root)
return;
- cpcon = of_find_node_by_path("/cpcon");
+
+ cpcon = of_find_matching_node(root, intcp_syscon_match);
if (!cpcon)
return;
@@ -354,175 +359,3 @@ DT_MACHINE_START(INTEGRATOR_CP_DT, "ARM Integrator/CP (Device Tree)")
.restart = integrator_restart,
.dt_compat = intcp_dt_board_compat,
MACHINE_END
-
-#endif
-
-#ifdef CONFIG_ATAGS
-
-/*
- * For the ATAG boot some static mappings are needed. This will
- * go away with the ATAG support down the road.
- */
-
-static struct map_desc intcp_io_desc_atag[] __initdata = {
- {
- .virtual = IO_ADDRESS(INTEGRATOR_CP_CTL_BASE),
- .pfn = __phys_to_pfn(INTEGRATOR_CP_CTL_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE
- },
-};
-
-static void __init intcp_map_io_atag(void)
-{
- iotable_init(intcp_io_desc_atag, ARRAY_SIZE(intcp_io_desc_atag));
- intcp_con_base = __io_address(INTEGRATOR_CP_CTL_BASE);
- intcp_map_io();
-}
-
-
-/*
- * This is where non-devicetree initialization code is collected and stashed
- * for eventual deletion.
- */
-
-#define INTCP_FLASH_SIZE SZ_32M
-
-static struct resource intcp_flash_resource = {
- .start = INTCP_PA_FLASH_BASE,
- .end = INTCP_PA_FLASH_BASE + INTCP_FLASH_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
-static struct platform_device intcp_flash_device = {
- .name = "physmap-flash",
- .id = 0,
- .dev = {
- .platform_data = &intcp_flash_data,
- },
- .num_resources = 1,
- .resource = &intcp_flash_resource,
-};
-
-#define INTCP_ETH_SIZE 0x10
-
-static struct resource smc91x_resources[] = {
- [0] = {
- .start = INTEGRATOR_CP_ETH_BASE,
- .end = INTEGRATOR_CP_ETH_BASE + INTCP_ETH_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_CP_ETHINT,
- .end = IRQ_CP_ETHINT,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device smc91x_device = {
- .name = "smc91x",
- .id = 0,
- .num_resources = ARRAY_SIZE(smc91x_resources),
- .resource = smc91x_resources,
-};
-
-static struct platform_device *intcp_devs[] __initdata = {
- &intcp_flash_device,
- &smc91x_device,
-};
-
-#define INTCP_VA_CIC_BASE __io_address(INTEGRATOR_HDR_BASE + 0x40)
-#define INTCP_VA_PIC_BASE __io_address(INTEGRATOR_IC_BASE)
-#define INTCP_VA_SIC_BASE __io_address(INTEGRATOR_CP_SIC_BASE)
-
-static void __init intcp_init_irq(void)
-{
- u32 pic_mask, cic_mask, sic_mask;
-
- /* These masks are for the HW IRQ registers */
- pic_mask = ~((~0u) << (11 - 0));
- pic_mask |= (~((~0u) << (29 - 22))) << 22;
- cic_mask = ~((~0u) << (1 + IRQ_CIC_END - IRQ_CIC_START));
- sic_mask = ~((~0u) << (1 + IRQ_SIC_END - IRQ_SIC_START));
-
- /*
- * Disable all interrupt sources
- */
- writel(0xffffffff, INTCP_VA_PIC_BASE + IRQ_ENABLE_CLEAR);
- writel(0xffffffff, INTCP_VA_PIC_BASE + FIQ_ENABLE_CLEAR);
- writel(0xffffffff, INTCP_VA_CIC_BASE + IRQ_ENABLE_CLEAR);
- writel(0xffffffff, INTCP_VA_CIC_BASE + FIQ_ENABLE_CLEAR);
- writel(sic_mask, INTCP_VA_SIC_BASE + IRQ_ENABLE_CLEAR);
- writel(sic_mask, INTCP_VA_SIC_BASE + FIQ_ENABLE_CLEAR);
-
- fpga_irq_init(INTCP_VA_PIC_BASE, "PIC", IRQ_PIC_START,
- -1, pic_mask, NULL);
-
- fpga_irq_init(INTCP_VA_CIC_BASE, "CIC", IRQ_CIC_START,
- -1, cic_mask, NULL);
-
- fpga_irq_init(INTCP_VA_SIC_BASE, "SIC", IRQ_SIC_START,
- IRQ_CP_CPPLDINT, sic_mask, NULL);
-
- integrator_clk_init(true);
-}
-
-#define TIMER0_VA_BASE __io_address(INTEGRATOR_TIMER0_BASE)
-#define TIMER1_VA_BASE __io_address(INTEGRATOR_TIMER1_BASE)
-#define TIMER2_VA_BASE __io_address(INTEGRATOR_TIMER2_BASE)
-
-static void __init cp_timer_init(void)
-{
- writel(0, TIMER0_VA_BASE + TIMER_CTRL);
- writel(0, TIMER1_VA_BASE + TIMER_CTRL);
- writel(0, TIMER2_VA_BASE + TIMER_CTRL);
-
- sp804_clocksource_init(TIMER2_VA_BASE, "timer2");
- sp804_clockevents_init(TIMER1_VA_BASE, IRQ_TIMERINT1, "timer1");
-}
-
-#define INTEGRATOR_CP_MMC_IRQS { IRQ_CP_MMCIINT0, IRQ_CP_MMCIINT1 }
-#define INTEGRATOR_CP_AACI_IRQS { IRQ_CP_AACIINT }
-
-static AMBA_APB_DEVICE(mmc, "mmci", 0, INTEGRATOR_CP_MMC_BASE,
- INTEGRATOR_CP_MMC_IRQS, &mmc_data);
-
-static AMBA_APB_DEVICE(aaci, "aaci", 0, INTEGRATOR_CP_AACI_BASE,
- INTEGRATOR_CP_AACI_IRQS, NULL);
-
-static AMBA_AHB_DEVICE(clcd, "clcd", 0, INTCP_PA_CLCD_BASE,
- { IRQ_CP_CLCDCINT }, &clcd_data);
-
-static struct amba_device *amba_devs[] __initdata = {
- &mmc_device,
- &aaci_device,
- &clcd_device,
-};
-
-static void __init intcp_init(void)
-{
- int i;
-
- platform_add_devices(intcp_devs, ARRAY_SIZE(intcp_devs));
-
- for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
- struct amba_device *d = amba_devs[i];
- amba_device_register(d, &iomem_resource);
- }
- integrator_init(true);
-}
-
-MACHINE_START(CINTEGRATOR, "ARM-IntegratorCP")
- /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
- .atag_offset = 0x100,
- .reserve = integrator_reserve,
- .map_io = intcp_map_io_atag,
- .init_early = intcp_init_early,
- .init_irq = intcp_init_irq,
- .handle_irq = fpga_handle_irq,
- .init_time = cp_timer_init,
- .init_machine = intcp_init,
- .restart = integrator_restart,
-MACHINE_END
-
-#endif
diff --git a/arch/arm/mach-integrator/leds.c b/arch/arm/mach-integrator/leds.c
index 7a7f6d3273bf..cb6ac58f5e07 100644
--- a/arch/arm/mach-integrator/leds.c
+++ b/arch/arm/mach-integrator/leds.c
@@ -11,10 +11,11 @@
#include <linux/slab.h>
#include <linux/leds.h>
-#include <mach/cm.h>
#include <mach/hardware.h>
#include <mach/platform.h>
+#include "cm.h"
+
#if defined(CONFIG_NEW_LEDS) && defined(CONFIG_LEDS_CLASS)
#define ALPHA_REG __io_address(INTEGRATOR_DBG_BASE)
@@ -78,7 +79,7 @@ static void cm_led_set(struct led_classdev *cdev,
static enum led_brightness cm_led_get(struct led_classdev *cdev)
{
- u32 reg = readl(CM_CTRL);
+ u32 reg = cm_get();
return (reg & CM_CTRL_LED) ? LED_FULL : LED_OFF;
}
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c
index bef100527c42..c5e01b24d9fb 100644
--- a/arch/arm/mach-integrator/pci_v3.c
+++ b/arch/arm/mach-integrator/pci_v3.c
@@ -36,7 +36,6 @@
#include <mach/hardware.h>
#include <mach/platform.h>
-#include <mach/irqs.h>
#include <asm/mach/map.h>
#include <asm/signal.h>
@@ -605,7 +604,7 @@ v3_pci_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
return 1;
}
-static irqreturn_t v3_irq(int dummy, void *devid)
+static irqreturn_t v3_irq(int irq, void *devid)
{
#ifdef CONFIG_DEBUG_LL
struct pt_regs *regs = get_irq_regs();
@@ -615,7 +614,7 @@ static irqreturn_t v3_irq(int dummy, void *devid)
extern void printascii(const char *);
sprintf(buf, "V3 int %d: pc=0x%08lx [%08lx] LBFADDR=%08x LBFCODE=%02x "
- "ISTAT=%02x\n", IRQ_AP_V3INT, pc, instr,
+ "ISTAT=%02x\n", irq, pc, instr,
__raw_readl(ap_syscon_base + INTEGRATOR_SC_LBFADDR_OFFSET),
__raw_readl(ap_syscon_base + INTEGRATOR_SC_LBFCODE_OFFSET) & 255,
v3_readb(V3_LB_ISTAT));
@@ -809,21 +808,6 @@ static u8 __init pci_v3_swizzle(struct pci_dev *dev, u8 *pinp)
return pci_common_swizzle(dev, pinp);
}
-static int irq_tab[4] __initdata = {
- IRQ_AP_PCIINT0, IRQ_AP_PCIINT1, IRQ_AP_PCIINT2, IRQ_AP_PCIINT3
-};
-
-/*
- * map the specified device/slot/pin to an IRQ. This works out such
- * that slot 9 pin 1 is INT0, pin 2 is INT1, and slot 10 pin 1 is INT1.
- */
-static int __init pci_v3_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- int intnr = ((slot - 9) + (pin - 1)) & 3;
-
- return irq_tab[intnr];
-}
-
static struct hw_pci pci_v3 __initdata = {
.swizzle = pci_v3_swizzle,
.setup = pci_v3_setup,
@@ -833,32 +817,27 @@ static struct hw_pci pci_v3 __initdata = {
.postinit = pci_v3_postinit,
};
-#ifdef CONFIG_OF
-
-static int __init pci_v3_map_irq_dt(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- struct of_irq oirq;
- int ret;
-
- ret = of_irq_map_pci(dev, &oirq);
- if (ret) {
- dev_err(&dev->dev, "of_irq_map_pci() %d\n", ret);
- /* Proper return code 0 == NO_IRQ */
- return 0;
- }
-
- return irq_create_of_mapping(oirq.controller, oirq.specifier,
- oirq.size);
-}
-
-static int __init pci_v3_dtprobe(struct platform_device *pdev,
- struct device_node *np)
+static int __init pci_v3_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct of_pci_range_parser parser;
struct of_pci_range range;
struct resource *res;
int irq, ret;
+ /* Remap the Integrator system controller */
+ ap_syscon_base = devm_ioremap(&pdev->dev, INTEGRATOR_SC_BASE, 0x100);
+ if (!ap_syscon_base) {
+ dev_err(&pdev->dev, "unable to remap the AP syscon for PCIv3\n");
+ return -ENODEV;
+ }
+
+ /* Device tree probe path */
+ if (!np) {
+ dev_err(&pdev->dev, "no device tree node for PCIv3\n");
+ return -ENODEV;
+ }
+
if (of_pci_range_parser_init(&parser, np))
return -EINVAL;
@@ -919,77 +898,7 @@ static int __init pci_v3_dtprobe(struct platform_device *pdev,
return -EINVAL;
}
- pci_v3.map_irq = pci_v3_map_irq_dt;
- pci_common_init_dev(&pdev->dev, &pci_v3);
-
- return 0;
-}
-
-#else
-
-static inline int pci_v3_dtprobe(struct platform_device *pdev,
- struct device_node *np)
-{
- return -EINVAL;
-}
-
-#endif
-
-static int __init pci_v3_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- int ret;
-
- /* Remap the Integrator system controller */
- ap_syscon_base = ioremap(INTEGRATOR_SC_BASE, 0x100);
- if (!ap_syscon_base) {
- dev_err(&pdev->dev, "unable to remap the AP syscon for PCIv3\n");
- return -ENODEV;
- }
-
- /* Device tree probe path */
- if (np)
- return pci_v3_dtprobe(pdev, np);
-
- pci_v3_base = devm_ioremap(&pdev->dev, PHYS_PCI_V3_BASE, SZ_64K);
- if (!pci_v3_base) {
- dev_err(&pdev->dev, "unable to remap PCIv3 base\n");
- return -ENODEV;
- }
-
- ret = devm_request_irq(&pdev->dev, IRQ_AP_V3INT, v3_irq, 0, "V3", NULL);
- if (ret) {
- dev_err(&pdev->dev, "unable to grab PCI error interrupt: %d\n",
- ret);
- return -ENODEV;
- }
-
- conf_mem.name = "PCIv3 config";
- conf_mem.start = PHYS_PCI_CONFIG_BASE;
- conf_mem.end = PHYS_PCI_CONFIG_BASE + SZ_16M - 1;
- conf_mem.flags = IORESOURCE_MEM;
-
- io_mem.name = "PCIv3 I/O";
- io_mem.start = PHYS_PCI_IO_BASE;
- io_mem.end = PHYS_PCI_IO_BASE + SZ_16M - 1;
- io_mem.flags = IORESOURCE_MEM;
-
- non_mem_pci = 0x00000000;
- non_mem_pci_sz = SZ_256M;
- non_mem.name = "PCIv3 non-prefetched mem";
- non_mem.start = PHYS_PCI_MEM_BASE;
- non_mem.end = PHYS_PCI_MEM_BASE + SZ_256M - 1;
- non_mem.flags = IORESOURCE_MEM;
-
- pre_mem_pci = 0x10000000;
- pre_mem_pci_sz = SZ_256M;
- pre_mem.name = "PCIv3 prefetched mem";
- pre_mem.start = PHYS_PCI_PRE_BASE + SZ_256M;
- pre_mem.end = PHYS_PCI_PRE_BASE + SZ_256M - 1;
- pre_mem.flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
-
- pci_v3.map_irq = pci_v3_map_irq;
-
+ pci_v3.map_irq = of_irq_parse_and_map_pci;
pci_common_init_dev(&pdev->dev, &pci_v3);
return 0;
diff --git a/arch/arm/mach-iop13xx/include/mach/timex.h b/arch/arm/mach-iop13xx/include/mach/timex.h
deleted file mode 100644
index 45fb2745bb54..000000000000
--- a/arch/arm/mach-iop13xx/include/mach/timex.h
+++ /dev/null
@@ -1 +0,0 @@
-#define CLOCK_TICK_RATE (100 * HZ)
diff --git a/arch/arm/mach-iop32x/include/mach/timex.h b/arch/arm/mach-iop32x/include/mach/timex.h
deleted file mode 100644
index 7262ab81419d..000000000000
--- a/arch/arm/mach-iop32x/include/mach/timex.h
+++ /dev/null
@@ -1,6 +0,0 @@
-/*
- * arch/arm/mach-iop32x/include/mach/timex.h
- *
- * IOP32x architecture timex specifications
- */
-#define CLOCK_TICK_RATE (100 * HZ)
diff --git a/arch/arm/mach-iop33x/include/mach/timex.h b/arch/arm/mach-iop33x/include/mach/timex.h
deleted file mode 100644
index 54c589091d6e..000000000000
--- a/arch/arm/mach-iop33x/include/mach/timex.h
+++ /dev/null
@@ -1,6 +0,0 @@
-/*
- * arch/arm/mach-iop33x/include/mach/timex.h
- *
- * IOP3xx architecture timex specifications
- */
-#define CLOCK_TICK_RATE (100 * HZ)
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index 30e1ebe3a891..c342dc4e8a45 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -1,9 +1,5 @@
if ARCH_IXP4XX
-config ARCH_SUPPORTS_BIG_ENDIAN
- bool
- default y
-
menu "Intel IXP4xx Implementation Options"
comment "IXP4xx Platforms"
diff --git a/arch/arm/mach-keystone/Kconfig b/arch/arm/mach-keystone/Kconfig
index 366d1a3b418d..f20c53e75ed9 100644
--- a/arch/arm/mach-keystone/Kconfig
+++ b/arch/arm/mach-keystone/Kconfig
@@ -9,6 +9,8 @@ config ARCH_KEYSTONE
select GENERIC_CLOCKEVENTS
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARM_ERRATA_798181 if SMP
+ select COMMON_CLK_KEYSTONE
+ select TI_EDMA
help
Support for boards based on the Texas Instruments Keystone family of
SoCs.
diff --git a/arch/arm/mach-keystone/Makefile b/arch/arm/mach-keystone/Makefile
index ddc52b05dc84..25d92396fbfa 100644
--- a/arch/arm/mach-keystone/Makefile
+++ b/arch/arm/mach-keystone/Makefile
@@ -4,3 +4,6 @@ plus_sec := $(call as-instr,.arch_extension sec,+sec)
AFLAGS_smc.o :=-Wa,-march=armv7-a$(plus_sec)
obj-$(CONFIG_SMP) += platsmp.o
+
+# PM domain driver for Keystone SOCs
+obj-$(CONFIG_ARCH_KEYSTONE) += pm_domain.o
diff --git a/arch/arm/mach-keystone/platsmp.c b/arch/arm/mach-keystone/platsmp.c
index c12296157d4a..5cf0683577ea 100644
--- a/arch/arm/mach-keystone/platsmp.c
+++ b/arch/arm/mach-keystone/platsmp.c
@@ -17,7 +17,6 @@
#include <linux/io.h>
#include <asm/smp_plat.h>
-#include <asm/prom.h>
#include "keystone.h"
diff --git a/arch/arm/mach-keystone/pm_domain.c b/arch/arm/mach-keystone/pm_domain.c
new file mode 100644
index 000000000000..29625232e954
--- /dev/null
+++ b/arch/arm/mach-keystone/pm_domain.c
@@ -0,0 +1,82 @@
+/*
+ * PM domain driver for Keystone2 devices
+ *
+ * Copyright 2013 Texas Instruments, Inc.
+ * Santosh Shilimkar <santosh.shillimkar@ti.com>
+ *
+ * Based on Kevins work on DAVINCI SOCs
+ * Kevin Hilman <khilman@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_clock.h>
+#include <linux/platform_device.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+
+#ifdef CONFIG_PM_RUNTIME
+static int keystone_pm_runtime_suspend(struct device *dev)
+{
+ int ret;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ ret = pm_generic_runtime_suspend(dev);
+ if (ret)
+ return ret;
+
+ ret = pm_clk_suspend(dev);
+ if (ret) {
+ pm_generic_runtime_resume(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int keystone_pm_runtime_resume(struct device *dev)
+{
+ dev_dbg(dev, "%s\n", __func__);
+
+ pm_clk_resume(dev);
+
+ return pm_generic_runtime_resume(dev);
+}
+#endif
+
+static struct dev_pm_domain keystone_pm_domain = {
+ .ops = {
+ SET_RUNTIME_PM_OPS(keystone_pm_runtime_suspend,
+ keystone_pm_runtime_resume, NULL)
+ USE_PLATFORM_PM_SLEEP_OPS
+ },
+};
+
+static struct pm_clk_notifier_block platform_domain_notifier = {
+ .pm_domain = &keystone_pm_domain,
+};
+
+static struct of_device_id of_keystone_table[] = {
+ {.compatible = "ti,keystone"},
+ { /* end of list */ },
+};
+
+int __init keystone_pm_runtime_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, of_keystone_table);
+ if (!np)
+ return 0;
+
+ of_clk_init(NULL);
+ pm_clk_add_notifier(&platform_bus_type, &platform_domain_notifier);
+
+ return 0;
+}
+subsys_initcall(keystone_pm_runtime_init);
diff --git a/arch/arm/mach-kirkwood/Makefile b/arch/arm/mach-kirkwood/Makefile
index d1f8e3d0793b..144b51102939 100644
--- a/arch/arm/mach-kirkwood/Makefile
+++ b/arch/arm/mach-kirkwood/Makefile
@@ -1,5 +1,7 @@
obj-y += common.o pcie.o
obj-$(CONFIG_KIRKWOOD_LEGACY) += irq.o mpp.o
+obj-$(CONFIG_PM) += pm.o
+
obj-$(CONFIG_MACH_D2NET_V2) += d2net_v2-setup.o lacie_v2-common.o
obj-$(CONFIG_MACH_NET2BIG_V2) += netxbig_v2-setup.o lacie_v2-common.o
obj-$(CONFIG_MACH_NET5BIG_V2) += netxbig_v2-setup.o lacie_v2-common.o
diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c
index 82d3ad8e87cf..9caa4fe95913 100644
--- a/arch/arm/mach-kirkwood/board-dt.c
+++ b/arch/arm/mach-kirkwood/board-dt.c
@@ -13,9 +13,10 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <linux/clk-provider.h>
-#include <linux/clocksource.h>
#include <linux/dma-mapping.h>
#include <linux/irqchip.h>
#include <linux/kexec.h>
@@ -44,14 +45,6 @@ static void __init kirkwood_legacy_clk_init(void)
clkspec.np = np;
clkspec.args_count = 1;
- clkspec.args[0] = CGC_BIT_PEX0;
- orion_clkdev_add("0", "pcie",
- of_clk_get_from_provider(&clkspec));
-
- clkspec.args[0] = CGC_BIT_PEX1;
- orion_clkdev_add("1", "pcie",
- of_clk_get_from_provider(&clkspec));
-
/*
* The ethernet interfaces forget the MAC address assigned by
* u-boot if the clocks are turned off. Until proper DT support
@@ -66,17 +59,83 @@ static void __init kirkwood_legacy_clk_init(void)
clk_prepare_enable(clk);
}
-static void __init kirkwood_dt_time_init(void)
-{
- of_clk_init(NULL);
- clocksource_of_init();
-}
+#define MV643XX_ETH_MAC_ADDR_LOW 0x0414
+#define MV643XX_ETH_MAC_ADDR_HIGH 0x0418
-static void __init kirkwood_dt_init_early(void)
+static void __init kirkwood_dt_eth_fixup(void)
{
- mvebu_mbus_init("marvell,kirkwood-mbus",
- BRIDGE_WINS_BASE, BRIDGE_WINS_SZ,
- DDR_WINDOW_CPU_BASE, DDR_WINDOW_CPU_SZ);
+ struct device_node *np;
+
+ /*
+ * The ethernet interfaces forget the MAC address assigned by u-boot
+ * if the clocks are turned off. Usually, u-boot on kirkwood boards
+ * has no DT support to properly set local-mac-address property.
+ * As a workaround, we get the MAC address from mv643xx_eth registers
+ * and update the port device node if no valid MAC address is set.
+ */
+ for_each_compatible_node(np, NULL, "marvell,kirkwood-eth-port") {
+ struct device_node *pnp = of_get_parent(np);
+ struct clk *clk;
+ struct property *pmac;
+ void __iomem *io;
+ u8 *macaddr;
+ u32 reg;
+
+ if (!pnp)
+ continue;
+
+ /* skip disabled nodes or nodes with valid MAC address*/
+ if (!of_device_is_available(pnp) || of_get_mac_address(np))
+ goto eth_fixup_skip;
+
+ clk = of_clk_get(pnp, 0);
+ if (IS_ERR(clk))
+ goto eth_fixup_skip;
+
+ io = of_iomap(pnp, 0);
+ if (!io)
+ goto eth_fixup_no_map;
+
+ /* ensure port clock is not gated to not hang CPU */
+ clk_prepare_enable(clk);
+
+ /* store MAC address register contents in local-mac-address */
+ pr_err(FW_INFO "%s: local-mac-address is not set\n",
+ np->full_name);
+
+ pmac = kzalloc(sizeof(*pmac) + 6, GFP_KERNEL);
+ if (!pmac)
+ goto eth_fixup_no_mem;
+
+ pmac->value = pmac + 1;
+ pmac->length = 6;
+ pmac->name = kstrdup("local-mac-address", GFP_KERNEL);
+ if (!pmac->name) {
+ kfree(pmac);
+ goto eth_fixup_no_mem;
+ }
+
+ macaddr = pmac->value;
+ reg = readl(io + MV643XX_ETH_MAC_ADDR_HIGH);
+ macaddr[0] = (reg >> 24) & 0xff;
+ macaddr[1] = (reg >> 16) & 0xff;
+ macaddr[2] = (reg >> 8) & 0xff;
+ macaddr[3] = reg & 0xff;
+
+ reg = readl(io + MV643XX_ETH_MAC_ADDR_LOW);
+ macaddr[4] = (reg >> 8) & 0xff;
+ macaddr[5] = reg & 0xff;
+
+ of_update_property(np, pmac);
+
+eth_fixup_no_mem:
+ iounmap(io);
+ clk_disable_unprepare(clk);
+eth_fixup_no_map:
+ clk_put(clk);
+eth_fixup_skip:
+ of_node_put(pnp);
+ }
}
static void __init kirkwood_dt_init(void)
@@ -92,16 +151,16 @@ static void __init kirkwood_dt_init(void)
writel(readl(CPU_CONFIG) & ~CPU_CONFIG_ERROR_PROP, CPU_CONFIG);
BUG_ON(mvebu_mbus_dt_init());
- kirkwood_setup_wins();
kirkwood_l2_init();
kirkwood_cpufreq_init();
-
+ kirkwood_cpuidle_init();
/* Setup clocks for legacy devices */
kirkwood_legacy_clk_init();
- kirkwood_cpuidle_init();
+ kirkwood_pm_init();
+ kirkwood_dt_eth_fixup();
#ifdef CONFIG_KEXEC
kexec_reinit = kirkwood_enable_pcie;
@@ -121,8 +180,6 @@ static const char * const kirkwood_dt_board_compat[] = {
DT_MACHINE_START(KIRKWOOD_DT, "Marvell Kirkwood (Flattened Device Tree)")
/* Maintainer: Jason Cooper <jason@lakedaemon.net> */
.map_io = kirkwood_map_io,
- .init_early = kirkwood_dt_init_early,
- .init_time = kirkwood_dt_time_init,
.init_machine = kirkwood_dt_init,
.restart = kirkwood_restart,
.dt_compat = kirkwood_dt_board_compat,
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 176761134a66..f3407a5db216 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -721,6 +721,7 @@ void __init kirkwood_init(void)
kirkwood_xor1_init();
kirkwood_crypto_init();
+ kirkwood_pm_init();
kirkwood_cpuidle_init();
#ifdef CONFIG_KEXEC
kexec_reinit = kirkwood_enable_pcie;
diff --git a/arch/arm/mach-kirkwood/common.h b/arch/arm/mach-kirkwood/common.h
index 1296de94febf..05fd648df543 100644
--- a/arch/arm/mach-kirkwood/common.h
+++ b/arch/arm/mach-kirkwood/common.h
@@ -58,6 +58,12 @@ void kirkwood_cpufreq_init(void);
void kirkwood_restart(enum reboot_mode, const char *);
void kirkwood_clk_init(void);
+#ifdef CONFIG_PM
+void kirkwood_pm_init(void);
+#else
+static inline void kirkwood_pm_init(void) {};
+#endif
+
/* board init functions for boards not fully converted to fdt */
#ifdef CONFIG_MACH_MV88F6281GTW_GE_DT
void mv88f6281gtw_ge_init(void);
diff --git a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
index 91242c944d7a..8b9d1c9ff199 100644
--- a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
+++ b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
@@ -78,4 +78,6 @@
#define CGC_TDM (1 << 20)
#define CGC_RESERVED (0x6 << 21)
+#define MEMORY_PM_CTRL (BRIDGE_VIRT_BASE + 0x118)
+
#endif
diff --git a/arch/arm/mach-kirkwood/include/mach/timex.h b/arch/arm/mach-kirkwood/include/mach/timex.h
deleted file mode 100644
index c923cd169b9c..000000000000
--- a/arch/arm/mach-kirkwood/include/mach/timex.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * arch/arm/mach-kirkwood/include/mach/timex.h
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#define CLOCK_TICK_RATE (100 * HZ)
-
diff --git a/arch/arm/mach-kirkwood/lacie_v2-common.c b/arch/arm/mach-kirkwood/lacie_v2-common.c
index 489495976fcd..8e3e4331c380 100644
--- a/arch/arm/mach-kirkwood/lacie_v2-common.c
+++ b/arch/arm/mach-kirkwood/lacie_v2-common.c
@@ -12,7 +12,7 @@
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
#include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
#include <linux/gpio.h>
#include <asm/mach/time.h>
#include <mach/kirkwood.h>
diff --git a/arch/arm/mach-kirkwood/pm.c b/arch/arm/mach-kirkwood/pm.c
new file mode 100644
index 000000000000..8783a7184e73
--- /dev/null
+++ b/arch/arm/mach-kirkwood/pm.c
@@ -0,0 +1,73 @@
+/*
+ * Power Management driver for Marvell Kirkwood SoCs
+ *
+ * Copyright (C) 2013 Ezequiel Garcia <ezequiel@free-electrons.com>
+ * Copyright (C) 2010 Simon Guinot <sguinot@lacie.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License,
+ * version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/suspend.h>
+#include <linux/io.h>
+#include <mach/bridge-regs.h>
+
+static void __iomem *ddr_operation_base;
+
+static void kirkwood_low_power(void)
+{
+ u32 mem_pm_ctrl;
+
+ mem_pm_ctrl = readl(MEMORY_PM_CTRL);
+
+ /* Set peripherals to low-power mode */
+ writel_relaxed(~0, MEMORY_PM_CTRL);
+
+ /* Set DDR in self-refresh */
+ writel_relaxed(0x7, ddr_operation_base);
+
+ /*
+ * Set CPU in wait-for-interrupt state.
+ * This disables the CPU core clocks,
+ * the array clocks, and also the L2 controller.
+ */
+ cpu_do_idle();
+
+ writel_relaxed(mem_pm_ctrl, MEMORY_PM_CTRL);
+}
+
+static int kirkwood_suspend_enter(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ kirkwood_low_power();
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int kirkwood_pm_valid_standby(suspend_state_t state)
+{
+ return state == PM_SUSPEND_STANDBY;
+}
+
+static const struct platform_suspend_ops kirkwood_suspend_ops = {
+ .enter = kirkwood_suspend_enter,
+ .valid = kirkwood_pm_valid_standby,
+};
+
+int __init kirkwood_pm_init(void)
+{
+ ddr_operation_base = ioremap(DDR_OPERATION_BASE, 4);
+ suspend_set_ops(&kirkwood_suspend_ops);
+ return 0;
+}
diff --git a/arch/arm/mach-lpc32xx/include/mach/timex.h b/arch/arm/mach-lpc32xx/include/mach/timex.h
deleted file mode 100644
index 8d4066b16b3f..000000000000
--- a/arch/arm/mach-lpc32xx/include/mach/timex.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * arch/arm/mach-lpc32xx/include/mach/timex.h
- *
- * Author: Kevin Wells <kevin.wells@nxp.com>
- *
- * Copyright (C) 2010 NXP Semiconductors
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_ARCH_TIMEX_H
-#define __ASM_ARCH_TIMEX_H
-
-/*
- * Rate in Hz of the main system oscillator. This value should match
- * the value 'MAIN_OSC_FREQ' in platform.h
- */
-#define CLOCK_TICK_RATE 13000000
-
-#endif
diff --git a/arch/arm/mach-mmp/include/mach/timex.h b/arch/arm/mach-mmp/include/mach/timex.h
deleted file mode 100644
index 70c9f1d88c02..000000000000
--- a/arch/arm/mach-mmp/include/mach/timex.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * linux/arch/arm/mach-mmp/include/mach/timex.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifdef CONFIG_CPU_MMP2
-#define CLOCK_TICK_RATE 6500000
-#else
-#define CLOCK_TICK_RATE 3250000
-#endif
diff --git a/arch/arm/mach-mmp/ttc_dkb.c b/arch/arm/mach-mmp/ttc_dkb.c
index 702232996c8c..cfadd974f5ce 100644
--- a/arch/arm/mach-mmp/ttc_dkb.c
+++ b/arch/arm/mach-mmp/ttc_dkb.c
@@ -191,7 +191,6 @@ static struct pxa3xx_nand_platform_data dkb_nand_info = {
#define SCLK_SOURCE_SELECT(x) (x << 30) /* 0x0 ~ 0x3 */
/* link config */
#define CFG_DUMBMODE(mode) (mode << 28) /* 0x0 ~ 0x6*/
-#define CFG_GRA_SWAPRB(x) (x << 0) /* 1: rbswap enabled */
static struct mmp_mach_path_config dkb_disp_config[] = {
[0] = {
.name = "mmp-parallel",
@@ -199,8 +198,7 @@ static struct mmp_mach_path_config dkb_disp_config[] = {
.output_type = PATH_OUT_PARALLEL,
.path_config = CFG_IOPADMODE(0x1)
| SCLK_SOURCE_SELECT(0x1),
- .link_config = CFG_DUMBMODE(0x2)
- | CFG_GRA_SWAPRB(0x1),
+ .link_config = CFG_DUMBMODE(0x2),
},
};
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 905efc8cac79..2586c2865874 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -1,12 +1,12 @@
if ARCH_MSM
comment "Qualcomm MSM SoC Type"
- depends on (ARCH_MSM8X60 || ARCH_MSM8960)
+ depends on ARCH_MSM_DT
choice
prompt "Qualcomm MSM SoC Type"
default ARCH_MSM7X00A
- depends on !(ARCH_MSM8X60 || ARCH_MSM8960)
+ depends on !ARCH_MSM_DT
config ARCH_MSM7X00A
bool "MSM7x00A / MSM7x01A"
@@ -49,7 +49,6 @@ config ARCH_MSM8X60
select GPIO_MSM_V2
select HAVE_SMP
select MSM_SCM if SMP
- select USE_OF
config ARCH_MSM8960
bool "MSM8960"
@@ -58,6 +57,11 @@ config ARCH_MSM8960
select HAVE_SMP
select GPIO_MSM_V2
select MSM_SCM if SMP
+
+config ARCH_MSM_DT
+ def_bool y
+ depends on (ARCH_MSM8X60 || ARCH_MSM8960)
+ select SPARSE_IRQ
select USE_OF
config MSM_HAS_DEBUG_UART_HS
@@ -68,6 +72,7 @@ config MSM_SOC_REV_A
config ARCH_MSM_ARM11
bool
+
config ARCH_MSM_SCORPION
bool
@@ -75,6 +80,7 @@ config MSM_VIC
bool
menu "Qualcomm MSM Board Type"
+ depends on !ARCH_MSM_DT
config MACH_HALIBUT
depends on ARCH_MSM
@@ -122,6 +128,7 @@ config MSM_SMD
config MSM_GPIOMUX
bool
+ depends on !ARCH_MSM_DT
help
Support for MSM V1 TLMM GPIOMUX architecture.
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index d872634c2f85..7ed4c1b2bdd2 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -26,7 +26,6 @@ obj-$(CONFIG_MACH_TROUT) += board-trout.o board-trout-gpio.o board-trout-mmc.o b
obj-$(CONFIG_MACH_HALIBUT) += board-halibut.o devices-msm7x00.o
obj-$(CONFIG_ARCH_MSM7X30) += board-msm7x30.o devices-msm7x30.o
obj-$(CONFIG_ARCH_QSD8X50) += board-qsd8x50.o devices-qsd8x50.o
-obj-$(CONFIG_ARCH_MSM8X60) += board-dt-8660.o
-obj-$(CONFIG_ARCH_MSM8960) += board-dt-8960.o
+obj-$(CONFIG_ARCH_MSM_DT) += board-dt.o
obj-$(CONFIG_MSM_GPIOMUX) += gpiomux.o
obj-$(CONFIG_ARCH_QSD8X50) += gpiomux-8x50.o
diff --git a/arch/arm/mach-msm/board-dt-8660.c b/arch/arm/mach-msm/board-dt-8660.c
deleted file mode 100644
index c2946892f5e3..000000000000
--- a/arch/arm/mach-msm/board-dt-8660.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/init.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include "common.h"
-
-static void __init msm8x60_init_late(void)
-{
- smd_debugfs_init();
-}
-
-static struct of_dev_auxdata msm_auxdata_lookup[] __initdata = {
- {}
-};
-
-static void __init msm8x60_dt_init(void)
-{
- of_platform_populate(NULL, of_default_bus_match_table,
- msm_auxdata_lookup, NULL);
-}
-
-static const char *msm8x60_fluid_match[] __initdata = {
- "qcom,msm8660-fluid",
- "qcom,msm8660-surf",
- NULL
-};
-
-DT_MACHINE_START(MSM_DT, "Qualcomm MSM (Flattened Device Tree)")
- .smp = smp_ops(msm_smp_ops),
- .init_machine = msm8x60_dt_init,
- .init_late = msm8x60_init_late,
- .dt_compat = msm8x60_fluid_match,
-MACHINE_END
diff --git a/arch/arm/mach-msm/board-dt-8960.c b/arch/arm/mach-msm/board-dt.c
index d4ca52c45111..16e6183ac9f1 100644
--- a/arch/arm/mach-msm/board-dt-8960.c
+++ b/arch/arm/mach-msm/board-dt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2012,2013 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -11,6 +11,7 @@
*/
#include <linux/init.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <asm/mach/arch.h>
@@ -18,18 +19,14 @@
#include "common.h"
-static void __init msm_dt_init(void)
-{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-}
-
-static const char * const msm8960_dt_match[] __initconst = {
+static const char * const msm_dt_match[] __initconst = {
+ "qcom,msm8660-fluid",
+ "qcom,msm8660-surf",
"qcom,msm8960-cdp",
NULL
};
-DT_MACHINE_START(MSM8960_DT, "Qualcomm MSM (Flattened Device Tree)")
+DT_MACHINE_START(MSM_DT, "Qualcomm MSM (Flattened Device Tree)")
.smp = smp_ops(msm_smp_ops),
- .init_machine = msm_dt_init,
- .dt_compat = msm8960_dt_match,
+ .dt_compat = msm_dt_match,
MACHINE_END
diff --git a/arch/arm/mach-msm/include/mach/irqs-8960.h b/arch/arm/mach-msm/include/mach/irqs-8960.h
deleted file mode 100644
index 81ab2a6792bd..000000000000
--- a/arch/arm/mach-msm/include/mach/irqs-8960.h
+++ /dev/null
@@ -1,277 +0,0 @@
-/* Copyright (c) 2011 Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_ARCH_MSM_IRQS_8960_H
-#define __ASM_ARCH_MSM_IRQS_8960_H
-
-/* MSM ACPU Interrupt Numbers */
-
-/* 0-15: STI/SGI (software triggered/generated interrupts)
- 16-31: PPI (private peripheral interrupts)
- 32+: SPI (shared peripheral interrupts) */
-
-#define GIC_PPI_START 16
-#define GIC_SPI_START 32
-
-#define INT_VGIC (GIC_PPI_START + 0)
-#define INT_DEBUG_TIMER_EXP (GIC_PPI_START + 1)
-#define INT_GP_TIMER_EXP (GIC_PPI_START + 2)
-#define INT_GP_TIMER2_EXP (GIC_PPI_START + 3)
-#define WDT0_ACCSCSSNBARK_INT (GIC_PPI_START + 4)
-#define WDT1_ACCSCSSNBARK_INT (GIC_PPI_START + 5)
-#define AVS_SVICINT (GIC_PPI_START + 6)
-#define AVS_SVICINTSWDONE (GIC_PPI_START + 7)
-#define CPU_DBGCPUXCOMMRXFULL (GIC_PPI_START + 8)
-#define CPU_DBGCPUXCOMMTXEMPTY (GIC_PPI_START + 9)
-#define CPU_SICCPUXPERFMONIRPTREQ (GIC_PPI_START + 10)
-#define SC_AVSCPUXDOWN (GIC_PPI_START + 11)
-#define SC_AVSCPUXUP (GIC_PPI_START + 12)
-#define SC_SICCPUXACGIRPTREQ (GIC_PPI_START + 13)
-#define SC_SICCPUXEXTFAULTIRPTREQ (GIC_PPI_START + 14)
-/* PPI 15 is unused */
-
-#define SC_SICMPUIRPTREQ (GIC_SPI_START + 0)
-#define SC_SICL2IRPTREQ (GIC_SPI_START + 1)
-#define SC_SICL2PERFMONIRPTREQ (GIC_SPI_START + 2)
-#define SC_SICAGCIRPTREQ (GIC_SPI_START + 3)
-#define TLMM_APCC_DIR_CONN_IRQ_0 (GIC_SPI_START + 4)
-#define TLMM_APCC_DIR_CONN_IRQ_1 (GIC_SPI_START + 5)
-#define TLMM_APCC_DIR_CONN_IRQ_2 (GIC_SPI_START + 6)
-#define TLMM_APCC_DIR_CONN_IRQ_3 (GIC_SPI_START + 7)
-#define TLMM_APCC_DIR_CONN_IRQ_4 (GIC_SPI_START + 8)
-#define TLMM_APCC_DIR_CONN_IRQ_5 (GIC_SPI_START + 9)
-#define TLMM_APCC_DIR_CONN_IRQ_6 (GIC_SPI_START + 10)
-#define TLMM_APCC_DIR_CONN_IRQ_7 (GIC_SPI_START + 11)
-#define TLMM_APCC_DIR_CONN_IRQ_8 (GIC_SPI_START + 12)
-#define TLMM_APCC_DIR_CONN_IRQ_9 (GIC_SPI_START + 13)
-#define PM8921_SEC_IRQ_103 (GIC_SPI_START + 14)
-#define PM8018_SEC_IRQ_106 (GIC_SPI_START + 15)
-#define TLMM_APCC_SUMMARY_IRQ (GIC_SPI_START + 16)
-#define SPDM_RT_1_IRQ (GIC_SPI_START + 17)
-#define SPDM_DIAG_IRQ (GIC_SPI_START + 18)
-#define RPM_APCC_CPU0_GP_HIGH_IRQ (GIC_SPI_START + 19)
-#define RPM_APCC_CPU0_GP_MEDIUM_IRQ (GIC_SPI_START + 20)
-#define RPM_APCC_CPU0_GP_LOW_IRQ (GIC_SPI_START + 21)
-#define RPM_APCC_CPU0_WAKE_UP_IRQ (GIC_SPI_START + 22)
-#define RPM_APCC_CPU1_GP_HIGH_IRQ (GIC_SPI_START + 23)
-#define RPM_APCC_CPU1_GP_MEDIUM_IRQ (GIC_SPI_START + 24)
-#define RPM_APCC_CPU1_GP_LOW_IRQ (GIC_SPI_START + 25)
-#define RPM_APCC_CPU1_WAKE_UP_IRQ (GIC_SPI_START + 26)
-#define SSBI2_2_SC_CPU0_SECURE_IRQ (GIC_SPI_START + 27)
-#define SSBI2_2_SC_CPU0_NON_SECURE_IRQ (GIC_SPI_START + 28)
-#define SSBI2_1_SC_CPU0_SECURE_IRQ (GIC_SPI_START + 29)
-#define SSBI2_1_SC_CPU0_NON_SECURE_IRQ (GIC_SPI_START + 30)
-#define MSMC_SC_SEC_CE_IRQ (GIC_SPI_START + 31)
-#define MSMC_SC_PRI_CE_IRQ (GIC_SPI_START + 32)
-#define SLIMBUS0_CORE_EE1_IRQ (GIC_SPI_START + 33)
-#define SLIMBUS0_BAM_EE1_IRQ (GIC_SPI_START + 34)
-#define Q6FW_WDOG_EXPIRED_IRQ (GIC_SPI_START + 35)
-#define Q6SW_WDOG_EXPIRED_IRQ (GIC_SPI_START + 36)
-#define MSS_TO_APPS_IRQ_0 (GIC_SPI_START + 37)
-#define MSS_TO_APPS_IRQ_1 (GIC_SPI_START + 38)
-#define MSS_TO_APPS_IRQ_2 (GIC_SPI_START + 39)
-#define MSS_TO_APPS_IRQ_3 (GIC_SPI_START + 40)
-#define MSS_TO_APPS_IRQ_4 (GIC_SPI_START + 41)
-#define MSS_TO_APPS_IRQ_5 (GIC_SPI_START + 42)
-#define MSS_TO_APPS_IRQ_6 (GIC_SPI_START + 43)
-#define MSS_TO_APPS_IRQ_7 (GIC_SPI_START + 44)
-#define MSS_TO_APPS_IRQ_8 (GIC_SPI_START + 45)
-#define MSS_TO_APPS_IRQ_9 (GIC_SPI_START + 46)
-#define VPE_IRQ (GIC_SPI_START + 47)
-#define VFE_IRQ (GIC_SPI_START + 48)
-#define VCODEC_IRQ (GIC_SPI_START + 49)
-#define TV_ENC_IRQ (GIC_SPI_START + 50)
-#define SMMU_VPE_CB_SC_SECURE_IRQ (GIC_SPI_START + 51)
-#define SMMU_VPE_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 52)
-#define SMMU_VFE_CB_SC_SECURE_IRQ (GIC_SPI_START + 53)
-#define SMMU_VFE_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 54)
-#define SMMU_VCODEC_B_CB_SC_SECURE_IRQ (GIC_SPI_START + 55)
-#define SMMU_VCODEC_B_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 56)
-#define SMMU_VCODEC_A_CB_SC_SECURE_IRQ (GIC_SPI_START + 57)
-#define SMMU_VCODEC_A_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 58)
-#define SMMU_ROT_CB_SC_SECURE_IRQ (GIC_SPI_START + 59)
-#define SMMU_ROT_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 60)
-#define SMMU_MDP1_CB_SC_SECURE_IRQ (GIC_SPI_START + 61)
-#define SMMU_MDP1_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 62)
-#define SMMU_MDP0_CB_SC_SECURE_IRQ (GIC_SPI_START + 63)
-#define SMMU_MDP0_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 64)
-#define SMMU_JPEGD_CB_SC_SECURE_IRQ (GIC_SPI_START + 65)
-#define SMMU_JPEGD_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 66)
-#define SMMU_IJPEG_CB_SC_SECURE_IRQ (GIC_SPI_START + 67)
-#define SMMU_IJPEG_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 68)
-#define SMMU_GFX3D_CB_SC_SECURE_IRQ (GIC_SPI_START + 69)
-#define SMMU_GFX3D_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 70)
-#define SMMU_GFX2D0_CB_SC_SECURE_IRQ (GIC_SPI_START + 71)
-#define SMMU_GFX2D0_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 72)
-#define ROT_IRQ (GIC_SPI_START + 73)
-#define MMSS_FABRIC_IRQ (GIC_SPI_START + 74)
-#define MDP_IRQ (GIC_SPI_START + 75)
-#define JPEGD_IRQ (GIC_SPI_START + 76)
-#define JPEG_IRQ (GIC_SPI_START + 77)
-#define MMSS_IMEM_IRQ (GIC_SPI_START + 78)
-#define HDMI_IRQ (GIC_SPI_START + 79)
-#define GFX3D_IRQ (GIC_SPI_START + 80)
-#define GFX2D0_IRQ (GIC_SPI_START + 81)
-#define DSI1_IRQ (GIC_SPI_START + 82)
-#define CSI_1_IRQ (GIC_SPI_START + 83)
-#define CSI_0_IRQ (GIC_SPI_START + 84)
-#define LPASS_SCSS_AUDIO_IF_OUT0_IRQ (GIC_SPI_START + 85)
-#define LPASS_SCSS_MIDI_IRQ (GIC_SPI_START + 86)
-#define LPASS_Q6SS_WDOG_EXPIRED (GIC_SPI_START + 87)
-#define LPASS_SCSS_GP_LOW_IRQ (GIC_SPI_START + 88)
-#define LPASS_SCSS_GP_MEDIUM_IRQ (GIC_SPI_START + 89)
-#define LPASS_SCSS_GP_HIGH_IRQ (GIC_SPI_START + 90)
-#define TOP_IMEM_IRQ (GIC_SPI_START + 91)
-#define FABRIC_SYS_IRQ (GIC_SPI_START + 92)
-#define FABRIC_APPS_IRQ (GIC_SPI_START + 93)
-#define USB1_HS_BAM_IRQ (GIC_SPI_START + 94)
-#define SDC4_BAM_IRQ (GIC_SPI_START + 95)
-#define SDC3_BAM_IRQ (GIC_SPI_START + 96)
-#define SDC2_BAM_IRQ (GIC_SPI_START + 97)
-#define SDC1_BAM_IRQ (GIC_SPI_START + 98)
-#define FABRIC_SPS_IRQ (GIC_SPI_START + 99)
-#define USB1_HS_IRQ (GIC_SPI_START + 100)
-#define SDC4_IRQ_0 (GIC_SPI_START + 101)
-#define SDC3_IRQ_0 (GIC_SPI_START + 102)
-#define SDC2_IRQ_0 (GIC_SPI_START + 103)
-#define SDC1_IRQ_0 (GIC_SPI_START + 104)
-#define SPS_BAM_DMA_IRQ (GIC_SPI_START + 105)
-#define SPS_SEC_VIOL_IRQ (GIC_SPI_START + 106)
-#define SPS_MTI_0 (GIC_SPI_START + 107)
-#define SPS_MTI_1 (GIC_SPI_START + 108)
-#define SPS_MTI_2 (GIC_SPI_START + 109)
-#define SPS_MTI_3 (GIC_SPI_START + 110)
-#define SPS_MTI_4 (GIC_SPI_START + 111)
-#define SPS_MTI_5 (GIC_SPI_START + 112)
-#define SPS_MTI_6 (GIC_SPI_START + 113)
-#define SPS_MTI_7 (GIC_SPI_START + 114)
-#define SPS_MTI_8 (GIC_SPI_START + 115)
-#define SPS_MTI_9 (GIC_SPI_START + 116)
-#define SPS_MTI_10 (GIC_SPI_START + 117)
-#define SPS_MTI_11 (GIC_SPI_START + 118)
-#define SPS_MTI_12 (GIC_SPI_START + 119)
-#define SPS_MTI_13 (GIC_SPI_START + 120)
-#define SPS_MTI_14 (GIC_SPI_START + 121)
-#define SPS_MTI_15 (GIC_SPI_START + 122)
-#define SPS_MTI_16 (GIC_SPI_START + 123)
-#define SPS_MTI_17 (GIC_SPI_START + 124)
-#define SPS_MTI_18 (GIC_SPI_START + 125)
-#define SPS_MTI_19 (GIC_SPI_START + 126)
-#define SPS_MTI_20 (GIC_SPI_START + 127)
-#define SPS_MTI_21 (GIC_SPI_START + 128)
-#define SPS_MTI_22 (GIC_SPI_START + 129)
-#define SPS_MTI_23 (GIC_SPI_START + 130)
-#define SPS_MTI_24 (GIC_SPI_START + 131)
-#define SPS_MTI_25 (GIC_SPI_START + 132)
-#define SPS_MTI_26 (GIC_SPI_START + 133)
-#define SPS_MTI_27 (GIC_SPI_START + 134)
-#define SPS_MTI_28 (GIC_SPI_START + 135)
-#define SPS_MTI_29 (GIC_SPI_START + 136)
-#define SPS_MTI_30 (GIC_SPI_START + 137)
-#define SPS_MTI_31 (GIC_SPI_START + 138)
-#define CSIPHY_4LN_IRQ (GIC_SPI_START + 139)
-#define CSIPHY_2LN_IRQ (GIC_SPI_START + 140)
-#define USB2_IRQ (GIC_SPI_START + 141)
-#define USB1_IRQ (GIC_SPI_START + 142)
-#define TSSC_SSBI_IRQ (GIC_SPI_START + 143)
-#define TSSC_SAMPLE_IRQ (GIC_SPI_START + 144)
-#define TSSC_PENUP_IRQ (GIC_SPI_START + 145)
-#define GSBI1_UARTDM_IRQ (GIC_SPI_START + 146)
-#define GSBI1_QUP_IRQ (GIC_SPI_START + 147)
-#define GSBI2_UARTDM_IRQ (GIC_SPI_START + 148)
-#define GSBI2_QUP_IRQ (GIC_SPI_START + 149)
-#define GSBI3_UARTDM_IRQ (GIC_SPI_START + 150)
-#define GSBI3_QUP_IRQ (GIC_SPI_START + 151)
-#define GSBI4_UARTDM_IRQ (GIC_SPI_START + 152)
-#define GSBI4_QUP_IRQ (GIC_SPI_START + 153)
-#define GSBI5_UARTDM_IRQ (GIC_SPI_START + 154)
-#define GSBI5_QUP_IRQ (GIC_SPI_START + 155)
-#define GSBI6_UARTDM_IRQ (GIC_SPI_START + 156)
-#define GSBI6_QUP_IRQ (GIC_SPI_START + 157)
-#define GSBI7_UARTDM_IRQ (GIC_SPI_START + 158)
-#define GSBI7_QUP_IRQ (GIC_SPI_START + 159)
-#define GSBI8_UARTDM_IRQ (GIC_SPI_START + 160)
-#define GSBI8_QUP_IRQ (GIC_SPI_START + 161)
-#define TSIF_TSPP_IRQ (GIC_SPI_START + 162)
-#define TSIF_BAM_IRQ (GIC_SPI_START + 163)
-#define TSIF2_IRQ (GIC_SPI_START + 164)
-#define TSIF1_IRQ (GIC_SPI_START + 165)
-#define DSI2_IRQ (GIC_SPI_START + 166)
-#define ISPIF_IRQ (GIC_SPI_START + 167)
-#define MSMC_SC_SEC_TMR_IRQ (GIC_SPI_START + 168)
-#define MSMC_SC_SEC_WDOG_BARK_IRQ (GIC_SPI_START + 169)
-#define INT_ADM0_SCSS_0_IRQ (GIC_SPI_START + 170)
-#define INT_ADM0_SCSS_1_IRQ (GIC_SPI_START + 171)
-#define INT_ADM0_SCSS_2_IRQ (GIC_SPI_START + 172)
-#define INT_ADM0_SCSS_3_IRQ (GIC_SPI_START + 173)
-#define CC_SCSS_WDT1CPU1BITEEXPIRED (GIC_SPI_START + 174)
-#define CC_SCSS_WDT1CPU0BITEEXPIRED (GIC_SPI_START + 175)
-#define CC_SCSS_WDT0CPU1BITEEXPIRED (GIC_SPI_START + 176)
-#define CC_SCSS_WDT0CPU0BITEEXPIRED (GIC_SPI_START + 177)
-#define TSENS_UPPER_LOWER_INT (GIC_SPI_START + 178)
-#define SSBI2_2_SC_CPU1_SECURE_INT (GIC_SPI_START + 179)
-#define SSBI2_2_SC_CPU1_NON_SECURE_INT (GIC_SPI_START + 180)
-#define SSBI2_1_SC_CPU1_SECURE_INT (GIC_SPI_START + 181)
-#define SSBI2_1_SC_CPU1_NON_SECURE_INT (GIC_SPI_START + 182)
-#define XPU_SUMMARY_IRQ (GIC_SPI_START + 183)
-#define BUS_EXCEPTION_SUMMARY_IRQ (GIC_SPI_START + 184)
-#define HSDDRX_EBI1CH0_IRQ (GIC_SPI_START + 185)
-#define HSDDRX_EBI1CH1_IRQ (GIC_SPI_START + 186)
-#define SDC5_BAM_IRQ (GIC_SPI_START + 187)
-#define SDC5_IRQ_0 (GIC_SPI_START + 188)
-#define GSBI9_UARTDM_IRQ (GIC_SPI_START + 189)
-#define GSBI9_QUP_IRQ (GIC_SPI_START + 190)
-#define GSBI10_UARTDM_IRQ (GIC_SPI_START + 191)
-#define GSBI10_QUP_IRQ (GIC_SPI_START + 192)
-#define GSBI11_UARTDM_IRQ (GIC_SPI_START + 193)
-#define GSBI11_QUP_IRQ (GIC_SPI_START + 194)
-#define GSBI12_UARTDM_IRQ (GIC_SPI_START + 195)
-#define GSBI12_QUP_IRQ (GIC_SPI_START + 196)
-#define RIVA_APSS_LTECOEX_IRQ (GIC_SPI_START + 197)
-#define RIVA_APSS_SPARE_IRQ (GIC_SPI_START + 198)
-#define RIVA_APSS_WDOG_BITE_RESET_RDY_IRQ (GIC_SPI_START + 199)
-#define RIVA_ASS_RESET_DONE_IRQ (GIC_SPI_START + 200)
-#define RIVA_APSS_ASIC_IRQ (GIC_SPI_START + 201)
-#define RIVA_APPS_WLAN_RX_DATA_AVAIL_IRQ (GIC_SPI_START + 202)
-#define RIVA_APPS_WLAN_DATA_XFER_DONE_IRQ (GIC_SPI_START + 203)
-#define RIVA_APPS_WLAM_SMSM_IRQ (GIC_SPI_START + 204)
-#define RIVA_APPS_LOG_CTRL_IRQ (GIC_SPI_START + 205)
-#define RIVA_APPS_FM_CTRL_IRQ (GIC_SPI_START + 206)
-#define RIVA_APPS_HCI_IRQ (GIC_SPI_START + 207)
-#define RIVA_APPS_WLAN_CTRL_IRQ (GIC_SPI_START + 208)
-#define A2_BAM_IRQ (GIC_SPI_START + 209)
-#define SMMU_GFX2D1_CB_SC_SECURE_IRQ (GIC_SPI_START + 210)
-#define SMMU_GFX2D1_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 211)
-#define GFX2D1_IRQ (GIC_SPI_START + 212)
-#define PPSS_WDOG_TIMER_IRQ (GIC_SPI_START + 213)
-#define SPS_SLIMBUS_CORE_EE0_IRQ (GIC_SPI_START + 214)
-#define SPS_SLIMBUS_BAM_EE0_IRQ (GIC_SPI_START + 215)
-#define QDSS_ETB_IRQ (GIC_SPI_START + 216)
-#define QDSS_CTI2KPSS_CPU1_IRQ (GIC_SPI_START + 217)
-#define QDSS_CTI2KPSS_CPU0_IRQ (GIC_SPI_START + 218)
-#define TLMM_APCC_DIR_CONN_IRQ_16 (GIC_SPI_START + 219)
-#define TLMM_APCC_DIR_CONN_IRQ_17 (GIC_SPI_START + 220)
-#define TLMM_APCC_DIR_CONN_IRQ_18 (GIC_SPI_START + 221)
-#define TLMM_APCC_DIR_CONN_IRQ_19 (GIC_SPI_START + 222)
-#define TLMM_APCC_DIR_CONN_IRQ_20 (GIC_SPI_START + 223)
-#define TLMM_APCC_DIR_CONN_IRQ_21 (GIC_SPI_START + 224)
-#define PM8921_SEC_IRQ_104 (GIC_SPI_START + 225)
-#define PM8018_SEC_IRQ_107 (GIC_SPI_START + 226)
-
-/* For now, use the maximum number of interrupts until a pending GIC issue
- * is sorted out */
-#define NR_MSM_IRQS 1020
-#define NR_BOARD_IRQS 0
-#define NR_GPIO_IRQS 0
-
-#endif
-
diff --git a/arch/arm/mach-msm/include/mach/irqs-8x60.h b/arch/arm/mach-msm/include/mach/irqs-8x60.h
deleted file mode 100644
index f65841c74c0b..000000000000
--- a/arch/arm/mach-msm/include/mach/irqs-8x60.h
+++ /dev/null
@@ -1,258 +0,0 @@
-/* Copyright (c) 2010 Code Aurora Forum. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __ASM_ARCH_MSM_IRQS_8X60_H
-#define __ASM_ARCH_MSM_IRQS_8X60_H
-
-/* MSM ACPU Interrupt Numbers */
-
-/* 0-15: STI/SGI (software triggered/generated interrupts)
- * 16-31: PPI (private peripheral interrupts)
- * 32+: SPI (shared peripheral interrupts)
- */
-
-#define GIC_PPI_START 16
-#define GIC_SPI_START 32
-
-#define INT_DEBUG_TIMER_EXP (GIC_PPI_START + 0)
-#define INT_GP_TIMER_EXP (GIC_PPI_START + 1)
-#define INT_GP_TIMER2_EXP (GIC_PPI_START + 2)
-#define WDT0_ACCSCSSNBARK_INT (GIC_PPI_START + 3)
-#define WDT1_ACCSCSSNBARK_INT (GIC_PPI_START + 4)
-#define AVS_SVICINT (GIC_PPI_START + 5)
-#define AVS_SVICINTSWDONE (GIC_PPI_START + 6)
-#define CPU_DBGCPUXCOMMRXFULL (GIC_PPI_START + 7)
-#define CPU_DBGCPUXCOMMTXEMPTY (GIC_PPI_START + 8)
-#define CPU_SICCPUXPERFMONIRPTREQ (GIC_PPI_START + 9)
-#define SC_AVSCPUXDOWN (GIC_PPI_START + 10)
-#define SC_AVSCPUXUP (GIC_PPI_START + 11)
-#define SC_SICCPUXACGIRPTREQ (GIC_PPI_START + 12)
-/* PPI 13 to 15 are unused */
-
-
-#define SC_SICMPUIRPTREQ (GIC_SPI_START + 0)
-#define SC_SICL2IRPTREQ (GIC_SPI_START + 1)
-#define SC_SICL2ACGIRPTREQ (GIC_SPI_START + 2)
-#define NC (GIC_SPI_START + 3)
-#define TLMM_SCSS_DIR_CONN_IRQ_0 (GIC_SPI_START + 4)
-#define TLMM_SCSS_DIR_CONN_IRQ_1 (GIC_SPI_START + 5)
-#define TLMM_SCSS_DIR_CONN_IRQ_2 (GIC_SPI_START + 6)
-#define TLMM_SCSS_DIR_CONN_IRQ_3 (GIC_SPI_START + 7)
-#define TLMM_SCSS_DIR_CONN_IRQ_4 (GIC_SPI_START + 8)
-#define TLMM_SCSS_DIR_CONN_IRQ_5 (GIC_SPI_START + 9)
-#define TLMM_SCSS_DIR_CONN_IRQ_6 (GIC_SPI_START + 10)
-#define TLMM_SCSS_DIR_CONN_IRQ_7 (GIC_SPI_START + 11)
-#define TLMM_SCSS_DIR_CONN_IRQ_8 (GIC_SPI_START + 12)
-#define TLMM_SCSS_DIR_CONN_IRQ_9 (GIC_SPI_START + 13)
-#define PM8058_SEC_IRQ_N (GIC_SPI_START + 14)
-#define PM8901_SEC_IRQ_N (GIC_SPI_START + 15)
-#define TLMM_SCSS_SUMMARY_IRQ (GIC_SPI_START + 16)
-#define SPDM_RT_1_IRQ (GIC_SPI_START + 17)
-#define SPDM_DIAG_IRQ (GIC_SPI_START + 18)
-#define RPM_SCSS_CPU0_GP_HIGH_IRQ (GIC_SPI_START + 19)
-#define RPM_SCSS_CPU0_GP_MEDIUM_IRQ (GIC_SPI_START + 20)
-#define RPM_SCSS_CPU0_GP_LOW_IRQ (GIC_SPI_START + 21)
-#define RPM_SCSS_CPU0_WAKE_UP_IRQ (GIC_SPI_START + 22)
-#define RPM_SCSS_CPU1_GP_HIGH_IRQ (GIC_SPI_START + 23)
-#define RPM_SCSS_CPU1_GP_MEDIUM_IRQ (GIC_SPI_START + 24)
-#define RPM_SCSS_CPU1_GP_LOW_IRQ (GIC_SPI_START + 25)
-#define RPM_SCSS_CPU1_WAKE_UP_IRQ (GIC_SPI_START + 26)
-#define SSBI2_2_SC_CPU0_SECURE_INT (GIC_SPI_START + 27)
-#define SSBI2_2_SC_CPU0_NON_SECURE_INT (GIC_SPI_START + 28)
-#define SSBI2_1_SC_CPU0_SECURE_INT (GIC_SPI_START + 29)
-#define SSBI2_1_SC_CPU0_NON_SECURE_INT (GIC_SPI_START + 30)
-#define MSMC_SC_SEC_CE_IRQ (GIC_SPI_START + 31)
-#define MSMC_SC_PRI_CE_IRQ (GIC_SPI_START + 32)
-#define MARM_FIQ (GIC_SPI_START + 33)
-#define MARM_IRQ (GIC_SPI_START + 34)
-#define MARM_L2CC_IRQ (GIC_SPI_START + 35)
-#define MARM_WDOG_EXPIRED (GIC_SPI_START + 36)
-#define MARM_SCSS_GP_IRQ_0 (GIC_SPI_START + 37)
-#define MARM_SCSS_GP_IRQ_1 (GIC_SPI_START + 38)
-#define MARM_SCSS_GP_IRQ_2 (GIC_SPI_START + 39)
-#define MARM_SCSS_GP_IRQ_3 (GIC_SPI_START + 40)
-#define MARM_SCSS_GP_IRQ_4 (GIC_SPI_START + 41)
-#define MARM_SCSS_GP_IRQ_5 (GIC_SPI_START + 42)
-#define MARM_SCSS_GP_IRQ_6 (GIC_SPI_START + 43)
-#define MARM_SCSS_GP_IRQ_7 (GIC_SPI_START + 44)
-#define MARM_SCSS_GP_IRQ_8 (GIC_SPI_START + 45)
-#define MARM_SCSS_GP_IRQ_9 (GIC_SPI_START + 46)
-#define VPE_IRQ (GIC_SPI_START + 47)
-#define VFE_IRQ (GIC_SPI_START + 48)
-#define VCODEC_IRQ (GIC_SPI_START + 49)
-#define TV_ENC_IRQ (GIC_SPI_START + 50)
-#define SMMU_VPE_CB_SC_SECURE_IRQ (GIC_SPI_START + 51)
-#define SMMU_VPE_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 52)
-#define SMMU_VFE_CB_SC_SECURE_IRQ (GIC_SPI_START + 53)
-#define SMMU_VFE_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 54)
-#define SMMU_VCODEC_B_CB_SC_SECURE_IRQ (GIC_SPI_START + 55)
-#define SMMU_VCODEC_B_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 56)
-#define SMMU_VCODEC_A_CB_SC_SECURE_IRQ (GIC_SPI_START + 57)
-#define SMMU_VCODEC_A_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 58)
-#define SMMU_ROT_CB_SC_SECURE_IRQ (GIC_SPI_START + 59)
-#define SMMU_ROT_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 60)
-#define SMMU_MDP1_CB_SC_SECURE_IRQ (GIC_SPI_START + 61)
-#define SMMU_MDP1_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 62)
-#define SMMU_MDP0_CB_SC_SECURE_IRQ (GIC_SPI_START + 63)
-#define SMMU_MDP0_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 64)
-#define SMMU_JPEGD_CB_SC_SECURE_IRQ (GIC_SPI_START + 65)
-#define SMMU_JPEGD_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 66)
-#define SMMU_IJPEG_CB_SC_SECURE_IRQ (GIC_SPI_START + 67)
-#define SMMU_IJPEG_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 68)
-#define SMMU_GFX3D_CB_SC_SECURE_IRQ (GIC_SPI_START + 69)
-#define SMMU_GFX3D_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 70)
-#define SMMU_GFX2D0_CB_SC_SECURE_IRQ (GIC_SPI_START + 71)
-#define SMMU_GFX2D0_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 72)
-#define ROT_IRQ (GIC_SPI_START + 73)
-#define MMSS_FABRIC_IRQ (GIC_SPI_START + 74)
-#define MDP_IRQ (GIC_SPI_START + 75)
-#define JPEGD_IRQ (GIC_SPI_START + 76)
-#define JPEG_IRQ (GIC_SPI_START + 77)
-#define MMSS_IMEM_IRQ (GIC_SPI_START + 78)
-#define HDMI_IRQ (GIC_SPI_START + 79)
-#define GFX3D_IRQ (GIC_SPI_START + 80)
-#define GFX2D0_IRQ (GIC_SPI_START + 81)
-#define DSI_IRQ (GIC_SPI_START + 82)
-#define CSI_1_IRQ (GIC_SPI_START + 83)
-#define CSI_0_IRQ (GIC_SPI_START + 84)
-#define LPASS_SCSS_AUDIO_IF_OUT0_IRQ (GIC_SPI_START + 85)
-#define LPASS_SCSS_MIDI_IRQ (GIC_SPI_START + 86)
-#define LPASS_Q6SS_WDOG_EXPIRED (GIC_SPI_START + 87)
-#define LPASS_SCSS_GP_LOW_IRQ (GIC_SPI_START + 88)
-#define LPASS_SCSS_GP_MEDIUM_IRQ (GIC_SPI_START + 89)
-#define LPASS_SCSS_GP_HIGH_IRQ (GIC_SPI_START + 90)
-#define TOP_IMEM_IRQ (GIC_SPI_START + 91)
-#define FABRIC_SYS_IRQ (GIC_SPI_START + 92)
-#define FABRIC_APPS_IRQ (GIC_SPI_START + 93)
-#define USB1_HS_BAM_IRQ (GIC_SPI_START + 94)
-#define SDC4_BAM_IRQ (GIC_SPI_START + 95)
-#define SDC3_BAM_IRQ (GIC_SPI_START + 96)
-#define SDC2_BAM_IRQ (GIC_SPI_START + 97)
-#define SDC1_BAM_IRQ (GIC_SPI_START + 98)
-#define FABRIC_SPS_IRQ (GIC_SPI_START + 99)
-#define USB1_HS_IRQ (GIC_SPI_START + 100)
-#define SDC4_IRQ_0 (GIC_SPI_START + 101)
-#define SDC3_IRQ_0 (GIC_SPI_START + 102)
-#define SDC2_IRQ_0 (GIC_SPI_START + 103)
-#define SDC1_IRQ_0 (GIC_SPI_START + 104)
-#define SPS_BAM_DMA_IRQ (GIC_SPI_START + 105)
-#define SPS_SEC_VIOL_IRQ (GIC_SPI_START + 106)
-#define SPS_MTI_0 (GIC_SPI_START + 107)
-#define SPS_MTI_1 (GIC_SPI_START + 108)
-#define SPS_MTI_2 (GIC_SPI_START + 109)
-#define SPS_MTI_3 (GIC_SPI_START + 110)
-#define SPS_MTI_4 (GIC_SPI_START + 111)
-#define SPS_MTI_5 (GIC_SPI_START + 112)
-#define SPS_MTI_6 (GIC_SPI_START + 113)
-#define SPS_MTI_7 (GIC_SPI_START + 114)
-#define SPS_MTI_8 (GIC_SPI_START + 115)
-#define SPS_MTI_9 (GIC_SPI_START + 116)
-#define SPS_MTI_10 (GIC_SPI_START + 117)
-#define SPS_MTI_11 (GIC_SPI_START + 118)
-#define SPS_MTI_12 (GIC_SPI_START + 119)
-#define SPS_MTI_13 (GIC_SPI_START + 120)
-#define SPS_MTI_14 (GIC_SPI_START + 121)
-#define SPS_MTI_15 (GIC_SPI_START + 122)
-#define SPS_MTI_16 (GIC_SPI_START + 123)
-#define SPS_MTI_17 (GIC_SPI_START + 124)
-#define SPS_MTI_18 (GIC_SPI_START + 125)
-#define SPS_MTI_19 (GIC_SPI_START + 126)
-#define SPS_MTI_20 (GIC_SPI_START + 127)
-#define SPS_MTI_21 (GIC_SPI_START + 128)
-#define SPS_MTI_22 (GIC_SPI_START + 129)
-#define SPS_MTI_23 (GIC_SPI_START + 130)
-#define SPS_MTI_24 (GIC_SPI_START + 131)
-#define SPS_MTI_25 (GIC_SPI_START + 132)
-#define SPS_MTI_26 (GIC_SPI_START + 133)
-#define SPS_MTI_27 (GIC_SPI_START + 134)
-#define SPS_MTI_28 (GIC_SPI_START + 135)
-#define SPS_MTI_29 (GIC_SPI_START + 136)
-#define SPS_MTI_30 (GIC_SPI_START + 137)
-#define SPS_MTI_31 (GIC_SPI_START + 138)
-#define UXMC_EBI2_WR_ER_DONE_IRQ (GIC_SPI_START + 139)
-#define UXMC_EBI2_OP_DONE_IRQ (GIC_SPI_START + 140)
-#define USB2_IRQ (GIC_SPI_START + 141)
-#define USB1_IRQ (GIC_SPI_START + 142)
-#define TSSC_SSBI_IRQ (GIC_SPI_START + 143)
-#define TSSC_SAMPLE_IRQ (GIC_SPI_START + 144)
-#define TSSC_PENUP_IRQ (GIC_SPI_START + 145)
-#define INT_UART1DM_IRQ (GIC_SPI_START + 146)
-#define GSBI1_QUP_IRQ (GIC_SPI_START + 147)
-#define INT_UART2DM_IRQ (GIC_SPI_START + 148)
-#define GSBI2_QUP_IRQ (GIC_SPI_START + 149)
-#define INT_UART3DM_IRQ (GIC_SPI_START + 150)
-#define GSBI3_QUP_IRQ (GIC_SPI_START + 151)
-#define INT_UART4DM_IRQ (GIC_SPI_START + 152)
-#define GSBI4_QUP_IRQ (GIC_SPI_START + 153)
-#define INT_UART5DM_IRQ (GIC_SPI_START + 154)
-#define GSBI5_QUP_IRQ (GIC_SPI_START + 155)
-#define INT_UART6DM_IRQ (GIC_SPI_START + 156)
-#define GSBI6_QUP_IRQ (GIC_SPI_START + 157)
-#define INT_UART7DM_IRQ (GIC_SPI_START + 158)
-#define GSBI7_QUP_IRQ (GIC_SPI_START + 159)
-#define INT_UART8DM_IRQ (GIC_SPI_START + 160)
-#define GSBI8_QUP_IRQ (GIC_SPI_START + 161)
-#define TSIF_TSPP_IRQ (GIC_SPI_START + 162)
-#define TSIF_BAM_IRQ (GIC_SPI_START + 163)
-#define TSIF2_IRQ (GIC_SPI_START + 164)
-#define TSIF1_IRQ (GIC_SPI_START + 165)
-#define INT_ADM1_MASTER (GIC_SPI_START + 166)
-#define INT_ADM1_AARM (GIC_SPI_START + 167)
-#define INT_ADM1_SD2 (GIC_SPI_START + 168)
-#define INT_ADM1_SD3 (GIC_SPI_START + 169)
-#define INT_ADM0_MASTER (GIC_SPI_START + 170)
-#define INT_ADM0_AARM (GIC_SPI_START + 171)
-#define INT_ADM0_SD2 (GIC_SPI_START + 172)
-#define INT_ADM0_SD3 (GIC_SPI_START + 173)
-#define CC_SCSS_WDT1CPU1BITEEXPIRED (GIC_SPI_START + 174)
-#define CC_SCSS_WDT1CPU0BITEEXPIRED (GIC_SPI_START + 175)
-#define CC_SCSS_WDT0CPU1BITEEXPIRED (GIC_SPI_START + 176)
-#define CC_SCSS_WDT0CPU0BITEEXPIRED (GIC_SPI_START + 177)
-#define TSENS_UPPER_LOWER_INT (GIC_SPI_START + 178)
-#define SSBI2_2_SC_CPU1_SECURE_INT (GIC_SPI_START + 179)
-#define SSBI2_2_SC_CPU1_NON_SECURE_INT (GIC_SPI_START + 180)
-#define SSBI2_1_SC_CPU1_SECURE_INT (GIC_SPI_START + 181)
-#define SSBI2_1_SC_CPU1_NON_SECURE_INT (GIC_SPI_START + 182)
-#define XPU_SUMMARY_IRQ (GIC_SPI_START + 183)
-#define BUS_EXCEPTION_SUMMARY_IRQ (GIC_SPI_START + 184)
-#define HSDDRX_SMICH0_IRQ (GIC_SPI_START + 185)
-#define HSDDRX_EBI1_IRQ (GIC_SPI_START + 186)
-#define SDC5_BAM_IRQ (GIC_SPI_START + 187)
-#define SDC5_IRQ_0 (GIC_SPI_START + 188)
-#define INT_UART9DM_IRQ (GIC_SPI_START + 189)
-#define GSBI9_QUP_IRQ (GIC_SPI_START + 190)
-#define INT_UART10DM_IRQ (GIC_SPI_START + 191)
-#define GSBI10_QUP_IRQ (GIC_SPI_START + 192)
-#define INT_UART11DM_IRQ (GIC_SPI_START + 193)
-#define GSBI11_QUP_IRQ (GIC_SPI_START + 194)
-#define INT_UART12DM_IRQ (GIC_SPI_START + 195)
-#define GSBI12_QUP_IRQ (GIC_SPI_START + 196)
-
-/*SPI 197 to 209 arent used in 8x60*/
-#define SMMU_GFX2D1_CB_SC_SECURE_IRQ (GIC_SPI_START + 210)
-#define SMMU_GFX2D1_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 211)
-
-/*SPI 212 to 216 arent used in 8x60*/
-#define SMPSS_SPARE_1 (GIC_SPI_START + 217)
-#define SMPSS_SPARE_2 (GIC_SPI_START + 218)
-#define SMPSS_SPARE_3 (GIC_SPI_START + 219)
-#define SMPSS_SPARE_4 (GIC_SPI_START + 220)
-#define SMPSS_SPARE_5 (GIC_SPI_START + 221)
-#define SMPSS_SPARE_6 (GIC_SPI_START + 222)
-#define SMPSS_SPARE_7 (GIC_SPI_START + 223)
-
-#define NR_GPIO_IRQS 173
-#define NR_MSM_IRQS 256
-#define NR_BOARD_IRQS 0
-
-#endif
diff --git a/arch/arm/mach-msm/include/mach/irqs.h b/arch/arm/mach-msm/include/mach/irqs.h
index 3cd78b165abb..164d355c96ea 100644
--- a/arch/arm/mach-msm/include/mach/irqs.h
+++ b/arch/arm/mach-msm/include/mach/irqs.h
@@ -24,11 +24,6 @@
#elif defined(CONFIG_ARCH_QSD8X50)
#include "irqs-8x50.h"
#include "sirc.h"
-#elif defined(CONFIG_ARCH_MSM8X60)
-#include "irqs-8x60.h"
-#elif defined(CONFIG_ARCH_MSM8960)
-/* TODO: Make these not generic. */
-#include "irqs-8960.h"
#elif defined(CONFIG_ARCH_MSM_ARM11)
#include "irqs-7x00.h"
#else
diff --git a/arch/arm/mach-msm/include/mach/timex.h b/arch/arm/mach-msm/include/mach/timex.h
deleted file mode 100644
index a62e6b215aec..000000000000
--- a/arch/arm/mach-msm/include/mach/timex.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* arch/arm/mach-msm/include/mach/timex.h
- *
- * Copyright (C) 2007 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __ASM_ARCH_MSM_TIMEX_H
-#define __ASM_ARCH_MSM_TIMEX_H
-
-#define CLOCK_TICK_RATE 1000000
-
-#endif
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 696fb73296d0..1e9c3383daba 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -274,7 +274,6 @@ static void __init msm_dt_timer_init(struct device_node *np)
pr_err("Unknown frequency\n");
return;
}
- of_node_put(np);
event_base = base + 0x4;
sts_base = base + 0x88;
diff --git a/arch/arm/mach-mv78xx0/include/mach/timex.h b/arch/arm/mach-mv78xx0/include/mach/timex.h
deleted file mode 100644
index 0e8c443c723a..000000000000
--- a/arch/arm/mach-mv78xx0/include/mach/timex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * arch/arm/mach-mv78xx0/include/mach/timex.h
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#define CLOCK_TICK_RATE (100 * HZ)
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index 9eb63d724602..5e269d7263ce 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -1,5 +1,6 @@
config ARCH_MVEBU
bool "Marvell SOCs with Device Tree support" if ARCH_MULTI_V7
+ select ARCH_SUPPORTS_BIG_ENDIAN
select CLKSRC_MMIO
select COMMON_CLK
select GENERIC_CLOCKEVENTS
diff --git a/arch/arm/mach-mvebu/coherency_ll.S b/arch/arm/mach-mvebu/coherency_ll.S
index 5476669ba905..ee7598fe75db 100644
--- a/arch/arm/mach-mvebu/coherency_ll.S
+++ b/arch/arm/mach-mvebu/coherency_ll.S
@@ -20,6 +20,8 @@
#define ARMADA_XP_CFB_CTL_REG_OFFSET 0x0
#define ARMADA_XP_CFB_CFG_REG_OFFSET 0x4
+#include <asm/assembler.h>
+
.text
/*
* r0: Coherency fabric base register address
@@ -29,6 +31,7 @@ ENTRY(ll_set_cpu_coherent)
/* Create bit by cpu index */
mov r3, #(1 << 24)
lsl r1, r3, r1
+ARM_BE8(rev r1, r1)
/* Add CPU to SMP group - Atomic */
add r3, r0, #ARMADA_XP_CFB_CTL_REG_OFFSET
diff --git a/arch/arm/mach-mvebu/headsmp.S b/arch/arm/mach-mvebu/headsmp.S
index 8a1b0c96e9ec..3dd80df428f7 100644
--- a/arch/arm/mach-mvebu/headsmp.S
+++ b/arch/arm/mach-mvebu/headsmp.S
@@ -21,12 +21,16 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <asm/assembler.h>
+
/*
* Armada XP specific entry point for secondary CPUs.
* We add the CPU to the coherency fabric and then jump to secondary
* startup
*/
ENTRY(armada_xp_secondary_startup)
+ ARM_BE8(setend be ) @ go BE8 if entered LE
+
/* Get coherency fabric base physical address */
adr r0, 1f
ldr r1, [r0]
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
index 98f6e2adb53e..1dc5acd4fc99 100644
--- a/arch/arm/mach-mxs/mach-mxs.c
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -13,8 +13,6 @@
#include <linux/clk.h>
#include <linux/clk/mxs.h>
#include <linux/clkdev.h>
-#include <linux/clocksource.h>
-#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio.h>
@@ -332,6 +330,11 @@ static void __init crystalfontz_init(void)
update_fec_mac_prop(OUI_CRYSTALFONTZ);
}
+static void __init m28cu3_init(void)
+{
+ update_fec_mac_prop(OUI_DENX);
+}
+
static const char __init *mxs_get_soc_id(void)
{
struct device_node *np;
@@ -459,6 +462,8 @@ static void __init mxs_machine_init(void)
apx4devkit_init();
else if (of_machine_is_compatible("crystalfontz,cfa10036"))
crystalfontz_init();
+ else if (of_machine_is_compatible("msr,m28cu3"))
+ m28cu3_init();
of_platform_populate(NULL, of_default_bus_match_table,
NULL, parent);
@@ -490,16 +495,6 @@ static void mxs_restart(enum reboot_mode mode, const char *cmd)
soft_restart(0);
}
-static void __init mxs_timer_init(void)
-{
- if (of_machine_is_compatible("fsl,imx23"))
- mx23_clocks_init();
- else
- mx28_clocks_init();
- of_clk_init(NULL);
- clocksource_of_init();
-}
-
static const char *mxs_dt_compat[] __initdata = {
"fsl,imx28",
"fsl,imx23",
@@ -508,7 +503,6 @@ static const char *mxs_dt_compat[] __initdata = {
DT_MACHINE_START(MXS, "Freescale MXS (Device Tree)")
.handle_irq = icoll_handle_irq,
- .init_time = mxs_timer_init,
.init_machine = mxs_machine_init,
.init_late = mxs_pm_init,
.dt_compat = mxs_dt_compat,
diff --git a/arch/arm/mach-netx/include/mach/timex.h b/arch/arm/mach-netx/include/mach/timex.h
deleted file mode 100644
index 1120dd0ba393..000000000000
--- a/arch/arm/mach-netx/include/mach/timex.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * arch/arm/mach-netx/include/mach/timex.h
- *
- * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#define CLOCK_TICK_RATE 100000000
diff --git a/arch/arm/mach-nomadik/cpu-8815.c b/arch/arm/mach-nomadik/cpu-8815.c
index 13e0df9c11ce..cce2c9dfb5d1 100644
--- a/arch/arm/mach-nomadik/cpu-8815.c
+++ b/arch/arm/mach-nomadik/cpu-8815.c
@@ -25,15 +25,11 @@
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/dma-mapping.h>
-#include <linux/platform_data/clk-nomadik.h>
-#include <linux/clocksource.h>
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
-#include <linux/mtd/fsmc.h>
#include <linux/gpio.h>
-#include <linux/amba/mmci.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -113,50 +109,6 @@ static void cpu8815_restart(enum reboot_mode mode, const char *cmd)
writel(1, srcbase + 0x18);
}
-/* Initial value for SRC control register: all timers use MXTAL/8 source */
-#define SRC_CR_INIT_MASK 0x00007fff
-#define SRC_CR_INIT_VAL 0x2aaa8000
-
-static void __init cpu8815_timer_init_of(void)
-{
- struct device_node *mtu;
- void __iomem *base;
- int irq;
- u32 src_cr;
-
- /* We need this to be up now */
- nomadik_clk_init();
-
- mtu = of_find_node_by_path("/mtu@101e2000");
- if (!mtu)
- return;
- base = of_iomap(mtu, 0);
- if (WARN_ON(!base))
- return;
- irq = irq_of_parse_and_map(mtu, 0);
-
- pr_info("Remapped MTU @ %p, irq: %d\n", base, irq);
-
- /* Configure timer sources in "system reset controller" ctrl reg */
- src_cr = readl(base);
- src_cr &= SRC_CR_INIT_MASK;
- src_cr |= SRC_CR_INIT_VAL;
- writel(src_cr, base);
-
- clocksource_of_init();
-}
-
-static struct fsmc_nand_timings cpu8815_nand_timings = {
- .thiz = 0,
- .thold = 0x10,
- .twait = 0x0A,
- .tset = 0,
-};
-
-static struct fsmc_nand_platform_data cpu8815_nand_data = {
- .nand_timings = &cpu8815_nand_timings,
-};
-
/*
* The SMSC911x IRQ is connected to a GPIO pin, but the driver expects
* to simply request an IRQ passed as a resource. So the GPIO pin needs
@@ -190,15 +142,6 @@ static int __init cpu8815_eth_init(void)
device_initcall(cpu8815_eth_init);
/*
- * TODO:
- * cannot be set from device tree, convert to a proper DT
- * binding.
- */
-static struct mmci_platform_data mmcsd_plat_data = {
- .ocr_mask = MMC_VDD_29_30,
-};
-
-/*
* This GPIO pin turns on a line that is used to detect card insertion
* on this board.
*/
@@ -232,24 +175,13 @@ static int __init cpu8815_mmcsd_init(void)
}
device_initcall(cpu8815_mmcsd_init);
-
-/* These are mostly to get the right device names for the clock lookups */
-static struct of_dev_auxdata cpu8815_auxdata_lookup[] __initdata = {
- OF_DEV_AUXDATA("stericsson,fsmc-nand", NOMADIK_FSMC_BASE,
- NULL, &cpu8815_nand_data),
- OF_DEV_AUXDATA("arm,primecell", NOMADIK_SDI_BASE,
- NULL, &mmcsd_plat_data),
- { /* sentinel */ },
-};
-
static void __init cpu8815_init_of(void)
{
#ifdef CONFIG_CACHE_L2X0
/* At full speed latency must be >=2, so 0x249 in low bits */
l2x0_of_init(0x00730249, 0xfe000fff);
#endif
- of_platform_populate(NULL, of_default_bus_match_table,
- cpu8815_auxdata_lookup, NULL);
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
static const char * cpu8815_board_compat[] = {
@@ -259,7 +191,6 @@ static const char * cpu8815_board_compat[] = {
DT_MACHINE_START(NOMADIK_DT, "Nomadik STn8815")
.map_io = cpu8815_map_io,
- .init_time = cpu8815_timer_init_of,
.init_machine = cpu8815_init_of,
.restart = cpu8815_restart,
.dt_compat = cpu8815_board_compat,
diff --git a/arch/arm/mach-nspire/nspire.c b/arch/arm/mach-nspire/nspire.c
index 99e26092a9f7..4b2ed2e8352f 100644
--- a/arch/arm/mach-nspire/nspire.c
+++ b/arch/arm/mach-nspire/nspire.c
@@ -14,11 +14,9 @@
#include <linux/of_platform.h>
#include <linux/irqchip.h>
#include <linux/irqchip/arm-vic.h>
-#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/amba/bus.h>
#include <linux/amba/clcd.h>
-#include <linux/clocksource.h>
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
@@ -65,12 +63,6 @@ static void __init nspire_init(void)
nspire_auxdata, NULL);
}
-static void __init nspire_init_time(void)
-{
- of_clk_init(NULL);
- clocksource_of_init();
-}
-
static void nspire_restart(char mode, const char *cmd)
{
void __iomem *base = ioremap(NSPIRE_MISC_PHYS_BASE, SZ_4K);
@@ -83,7 +75,6 @@ static void nspire_restart(char mode, const char *cmd)
DT_MACHINE_START(NSPIRE, "TI-NSPIRE")
.dt_compat = nspire_dt_match,
.map_io = nspire_map_io,
- .init_time = nspire_init_time,
.init_machine = nspire_init,
.restart = nspire_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index a7ce69286688..d68909b095f1 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -300,7 +300,7 @@ static struct omap_lcd_config osk_lcd_config __initdata = {
#ifdef CONFIG_OMAP_OSK_MISTRAL
#include <linux/input.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
diff --git a/arch/arm/mach-omap1/common.h b/arch/arm/mach-omap1/common.h
index abec019a5281..732f8ee2fcd2 100644
--- a/arch/arm/mach-omap1/common.h
+++ b/arch/arm/mach-omap1/common.h
@@ -46,6 +46,9 @@ static inline void omap7xx_map_io(void)
void omap1510_fpga_init_irq(void);
void omap15xx_map_io(void);
#else
+static inline void omap1510_fpga_init_irq(void)
+{
+}
static inline void omap15xx_map_io(void)
{
}
diff --git a/arch/arm/mach-omap1/fpga.c b/arch/arm/mach-omap1/fpga.c
index 8bd71b2d0967..3c0e42219200 100644
--- a/arch/arm/mach-omap1/fpga.c
+++ b/arch/arm/mach-omap1/fpga.c
@@ -135,8 +135,7 @@ static struct irq_chip omap_fpga_irq = {
* mask_ack routine for all of the FPGA interrupts has been changed from
* fpga_mask_ack_irq() to fpga_ack_irq() so that the specific FPGA interrupt
* being serviced is left unmasked. We can do this because the FPGA cascade
- * interrupt is installed with the IRQF_DISABLED flag, which leaves all
- * interrupts masked at the CPU while an FPGA interrupt handler executes.
+ * interrupt is run with all interrupts masked.
*
* Limited testing indicates that this workaround appears to be effective
* for the smc9194 Ethernet driver used on the Innovator. It should work
diff --git a/arch/arm/mach-omap1/gpio15xx.c b/arch/arm/mach-omap1/gpio15xx.c
index 02b3eb2e201c..312a0924d786 100644
--- a/arch/arm/mach-omap1/gpio15xx.c
+++ b/arch/arm/mach-omap1/gpio15xx.c
@@ -25,7 +25,7 @@
#define OMAP1510_GPIO_BASE 0xFFFCE000
/* gpio1 */
-static struct __initdata resource omap15xx_mpu_gpio_resources[] = {
+static struct resource omap15xx_mpu_gpio_resources[] = {
{
.start = OMAP1_MPUIO_VBASE,
.end = OMAP1_MPUIO_VBASE + SZ_2K - 1,
@@ -48,7 +48,7 @@ static struct omap_gpio_reg_offs omap15xx_mpuio_regs = {
.irqctrl = OMAP_MPUIO_GPIO_INT_EDGE,
};
-static struct __initdata omap_gpio_platform_data omap15xx_mpu_gpio_config = {
+static struct omap_gpio_platform_data omap15xx_mpu_gpio_config = {
.is_mpuio = true,
.bank_width = 16,
.bank_stride = 1,
@@ -66,7 +66,7 @@ static struct platform_device omap15xx_mpu_gpio = {
};
/* gpio2 */
-static struct __initdata resource omap15xx_gpio_resources[] = {
+static struct resource omap15xx_gpio_resources[] = {
{
.start = OMAP1510_GPIO_BASE,
.end = OMAP1510_GPIO_BASE + SZ_2K - 1,
@@ -90,7 +90,7 @@ static struct omap_gpio_reg_offs omap15xx_gpio_regs = {
.pinctrl = OMAP1510_GPIO_PIN_CONTROL,
};
-static struct __initdata omap_gpio_platform_data omap15xx_gpio_config = {
+static struct omap_gpio_platform_data omap15xx_gpio_config = {
.bank_width = 16,
.regs = &omap15xx_gpio_regs,
};
diff --git a/arch/arm/mach-omap1/gpio16xx.c b/arch/arm/mach-omap1/gpio16xx.c
index b9952a258d82..6e6ec93dcbb3 100644
--- a/arch/arm/mach-omap1/gpio16xx.c
+++ b/arch/arm/mach-omap1/gpio16xx.c
@@ -31,7 +31,7 @@
#define SYSCONFIG_WORD 0x14
/* mpu gpio */
-static struct __initdata resource omap16xx_mpu_gpio_resources[] = {
+static struct resource omap16xx_mpu_gpio_resources[] = {
{
.start = OMAP1_MPUIO_VBASE,
.end = OMAP1_MPUIO_VBASE + SZ_2K - 1,
@@ -54,7 +54,7 @@ static struct omap_gpio_reg_offs omap16xx_mpuio_regs = {
.irqctrl = OMAP_MPUIO_GPIO_INT_EDGE,
};
-static struct __initdata omap_gpio_platform_data omap16xx_mpu_gpio_config = {
+static struct omap_gpio_platform_data omap16xx_mpu_gpio_config = {
.is_mpuio = true,
.bank_width = 16,
.bank_stride = 1,
@@ -72,7 +72,7 @@ static struct platform_device omap16xx_mpu_gpio = {
};
/* gpio1 */
-static struct __initdata resource omap16xx_gpio1_resources[] = {
+static struct resource omap16xx_gpio1_resources[] = {
{
.start = OMAP1610_GPIO1_BASE,
.end = OMAP1610_GPIO1_BASE + SZ_2K - 1,
@@ -100,7 +100,7 @@ static struct omap_gpio_reg_offs omap16xx_gpio_regs = {
.edgectrl2 = OMAP1610_GPIO_EDGE_CTRL2,
};
-static struct __initdata omap_gpio_platform_data omap16xx_gpio1_config = {
+static struct omap_gpio_platform_data omap16xx_gpio1_config = {
.bank_width = 16,
.regs = &omap16xx_gpio_regs,
};
@@ -116,7 +116,7 @@ static struct platform_device omap16xx_gpio1 = {
};
/* gpio2 */
-static struct __initdata resource omap16xx_gpio2_resources[] = {
+static struct resource omap16xx_gpio2_resources[] = {
{
.start = OMAP1610_GPIO2_BASE,
.end = OMAP1610_GPIO2_BASE + SZ_2K - 1,
@@ -128,7 +128,7 @@ static struct __initdata resource omap16xx_gpio2_resources[] = {
},
};
-static struct __initdata omap_gpio_platform_data omap16xx_gpio2_config = {
+static struct omap_gpio_platform_data omap16xx_gpio2_config = {
.bank_width = 16,
.regs = &omap16xx_gpio_regs,
};
@@ -144,7 +144,7 @@ static struct platform_device omap16xx_gpio2 = {
};
/* gpio3 */
-static struct __initdata resource omap16xx_gpio3_resources[] = {
+static struct resource omap16xx_gpio3_resources[] = {
{
.start = OMAP1610_GPIO3_BASE,
.end = OMAP1610_GPIO3_BASE + SZ_2K - 1,
@@ -156,7 +156,7 @@ static struct __initdata resource omap16xx_gpio3_resources[] = {
},
};
-static struct __initdata omap_gpio_platform_data omap16xx_gpio3_config = {
+static struct omap_gpio_platform_data omap16xx_gpio3_config = {
.bank_width = 16,
.regs = &omap16xx_gpio_regs,
};
@@ -172,7 +172,7 @@ static struct platform_device omap16xx_gpio3 = {
};
/* gpio4 */
-static struct __initdata resource omap16xx_gpio4_resources[] = {
+static struct resource omap16xx_gpio4_resources[] = {
{
.start = OMAP1610_GPIO4_BASE,
.end = OMAP1610_GPIO4_BASE + SZ_2K - 1,
@@ -184,7 +184,7 @@ static struct __initdata resource omap16xx_gpio4_resources[] = {
},
};
-static struct __initdata omap_gpio_platform_data omap16xx_gpio4_config = {
+static struct omap_gpio_platform_data omap16xx_gpio4_config = {
.bank_width = 16,
.regs = &omap16xx_gpio_regs,
};
@@ -199,7 +199,7 @@ static struct platform_device omap16xx_gpio4 = {
.resource = omap16xx_gpio4_resources,
};
-static struct __initdata platform_device * omap16xx_gpio_dev[] = {
+static struct platform_device *omap16xx_gpio_dev[] __initdata = {
&omap16xx_mpu_gpio,
&omap16xx_gpio1,
&omap16xx_gpio2,
diff --git a/arch/arm/mach-omap1/gpio7xx.c b/arch/arm/mach-omap1/gpio7xx.c
index f5819b2b7cbe..4612d2506a2d 100644
--- a/arch/arm/mach-omap1/gpio7xx.c
+++ b/arch/arm/mach-omap1/gpio7xx.c
@@ -30,7 +30,7 @@
#define OMAP1_MPUIO_VBASE OMAP1_MPUIO_BASE
/* mpu gpio */
-static struct __initdata resource omap7xx_mpu_gpio_resources[] = {
+static struct resource omap7xx_mpu_gpio_resources[] = {
{
.start = OMAP1_MPUIO_VBASE,
.end = OMAP1_MPUIO_VBASE + SZ_2K - 1,
@@ -53,7 +53,7 @@ static struct omap_gpio_reg_offs omap7xx_mpuio_regs = {
.irqctrl = OMAP_MPUIO_GPIO_INT_EDGE >> 1,
};
-static struct __initdata omap_gpio_platform_data omap7xx_mpu_gpio_config = {
+static struct omap_gpio_platform_data omap7xx_mpu_gpio_config = {
.is_mpuio = true,
.bank_width = 16,
.bank_stride = 2,
@@ -71,7 +71,7 @@ static struct platform_device omap7xx_mpu_gpio = {
};
/* gpio1 */
-static struct __initdata resource omap7xx_gpio1_resources[] = {
+static struct resource omap7xx_gpio1_resources[] = {
{
.start = OMAP7XX_GPIO1_BASE,
.end = OMAP7XX_GPIO1_BASE + SZ_2K - 1,
@@ -94,7 +94,7 @@ static struct omap_gpio_reg_offs omap7xx_gpio_regs = {
.irqctrl = OMAP7XX_GPIO_INT_CONTROL,
};
-static struct __initdata omap_gpio_platform_data omap7xx_gpio1_config = {
+static struct omap_gpio_platform_data omap7xx_gpio1_config = {
.bank_width = 32,
.regs = &omap7xx_gpio_regs,
};
@@ -110,7 +110,7 @@ static struct platform_device omap7xx_gpio1 = {
};
/* gpio2 */
-static struct __initdata resource omap7xx_gpio2_resources[] = {
+static struct resource omap7xx_gpio2_resources[] = {
{
.start = OMAP7XX_GPIO2_BASE,
.end = OMAP7XX_GPIO2_BASE + SZ_2K - 1,
@@ -122,7 +122,7 @@ static struct __initdata resource omap7xx_gpio2_resources[] = {
},
};
-static struct __initdata omap_gpio_platform_data omap7xx_gpio2_config = {
+static struct omap_gpio_platform_data omap7xx_gpio2_config = {
.bank_width = 32,
.regs = &omap7xx_gpio_regs,
};
@@ -138,7 +138,7 @@ static struct platform_device omap7xx_gpio2 = {
};
/* gpio3 */
-static struct __initdata resource omap7xx_gpio3_resources[] = {
+static struct resource omap7xx_gpio3_resources[] = {
{
.start = OMAP7XX_GPIO3_BASE,
.end = OMAP7XX_GPIO3_BASE + SZ_2K - 1,
@@ -150,7 +150,7 @@ static struct __initdata resource omap7xx_gpio3_resources[] = {
},
};
-static struct __initdata omap_gpio_platform_data omap7xx_gpio3_config = {
+static struct omap_gpio_platform_data omap7xx_gpio3_config = {
.bank_width = 32,
.regs = &omap7xx_gpio_regs,
};
@@ -166,7 +166,7 @@ static struct platform_device omap7xx_gpio3 = {
};
/* gpio4 */
-static struct __initdata resource omap7xx_gpio4_resources[] = {
+static struct resource omap7xx_gpio4_resources[] = {
{
.start = OMAP7XX_GPIO4_BASE,
.end = OMAP7XX_GPIO4_BASE + SZ_2K - 1,
@@ -178,7 +178,7 @@ static struct __initdata resource omap7xx_gpio4_resources[] = {
},
};
-static struct __initdata omap_gpio_platform_data omap7xx_gpio4_config = {
+static struct omap_gpio_platform_data omap7xx_gpio4_config = {
.bank_width = 32,
.regs = &omap7xx_gpio_regs,
};
@@ -194,7 +194,7 @@ static struct platform_device omap7xx_gpio4 = {
};
/* gpio5 */
-static struct __initdata resource omap7xx_gpio5_resources[] = {
+static struct resource omap7xx_gpio5_resources[] = {
{
.start = OMAP7XX_GPIO5_BASE,
.end = OMAP7XX_GPIO5_BASE + SZ_2K - 1,
@@ -206,7 +206,7 @@ static struct __initdata resource omap7xx_gpio5_resources[] = {
},
};
-static struct __initdata omap_gpio_platform_data omap7xx_gpio5_config = {
+static struct omap_gpio_platform_data omap7xx_gpio5_config = {
.bank_width = 32,
.regs = &omap7xx_gpio_regs,
};
@@ -222,7 +222,7 @@ static struct platform_device omap7xx_gpio5 = {
};
/* gpio6 */
-static struct __initdata resource omap7xx_gpio6_resources[] = {
+static struct resource omap7xx_gpio6_resources[] = {
{
.start = OMAP7XX_GPIO6_BASE,
.end = OMAP7XX_GPIO6_BASE + SZ_2K - 1,
@@ -234,7 +234,7 @@ static struct __initdata resource omap7xx_gpio6_resources[] = {
},
};
-static struct __initdata omap_gpio_platform_data omap7xx_gpio6_config = {
+static struct omap_gpio_platform_data omap7xx_gpio6_config = {
.bank_width = 32,
.regs = &omap7xx_gpio_regs,
};
@@ -249,7 +249,7 @@ static struct platform_device omap7xx_gpio6 = {
.resource = omap7xx_gpio6_resources,
};
-static struct __initdata platform_device * omap7xx_gpio_dev[] = {
+static struct platform_device *omap7xx_gpio_dev[] __initdata = {
&omap7xx_mpu_gpio,
&omap7xx_gpio1,
&omap7xx_gpio2,
diff --git a/arch/arm/mach-omap1/include/mach/timex.h b/arch/arm/mach-omap1/include/mach/timex.h
deleted file mode 100644
index 4793790d53cc..000000000000
--- a/arch/arm/mach-omap1/include/mach/timex.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-omap1/include/mach/timex.h
- */
-
-#include <plat/timex.h>
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
index 358b82cb9f78..40a1ae319610 100644
--- a/arch/arm/mach-omap1/pm.c
+++ b/arch/arm/mach-omap1/pm.c
@@ -628,7 +628,6 @@ static irqreturn_t omap_wakeup_interrupt(int irq, void *dev)
static struct irqaction omap_wakeup_irq = {
.name = "peripheral wakeup",
- .flags = IRQF_DISABLED,
.handler = omap_wakeup_interrupt
};
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index 80603d2fef77..6b5f298d6638 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -160,7 +160,7 @@ static irqreturn_t omap_mpu_timer1_interrupt(int irq, void *dev_id)
static struct irqaction omap_mpu_timer1_irq = {
.name = "mpu_timer1",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = omap_mpu_timer1_interrupt,
};
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 0b74246ba62c..107e7ab3edba 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -156,7 +156,7 @@ static irqreturn_t omap_32k_timer_interrupt(int irq, void *dev_id)
static struct irqaction omap_32k_timer_irq = {
.name = "32KHz timer",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = omap_32k_timer_interrupt,
};
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index b5fb5f7992df..dc21df166161 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -8,7 +8,6 @@ config ARCH_OMAP2
select CPU_V6
select MULTI_IRQ_HANDLER
select SOC_HAS_OMAP2_SDRC
- select COMMON_CLK
config ARCH_OMAP3
bool "TI OMAP3"
@@ -22,7 +21,6 @@ config ARCH_OMAP3
select PM_OPP if PM
select PM_RUNTIME if CPU_IDLE
select SOC_HAS_OMAP2_SDRC
- select COMMON_CLK
select USB_ARCH_HAS_EHCI if USB_SUPPORT
config ARCH_OMAP4
@@ -45,7 +43,6 @@ config ARCH_OMAP4
select PM_OPP if PM
select PM_RUNTIME if CPU_IDLE
select USB_ARCH_HAS_EHCI if USB_SUPPORT
- select COMMON_CLK
select ARM_ERRATA_754322
select ARM_ERRATA_775420
@@ -59,7 +56,6 @@ config SOC_OMAP5
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if LOCAL_TIMERS
select HAVE_SMP
- select COMMON_CLK
select HAVE_ARM_ARCH_TIMER
select ARM_ERRATA_798181 if SMP
@@ -70,7 +66,6 @@ config SOC_AM33XX
select ARM_CPU_SUSPEND if PM
select CPU_V7
select MULTI_IRQ_HANDLER
- select COMMON_CLK
config SOC_AM43XX
bool "TI AM43x"
@@ -79,7 +74,6 @@ config SOC_AM43XX
select ARCH_OMAP2PLUS
select MULTI_IRQ_HANDLER
select ARM_GIC
- select COMMON_CLK
select MACH_OMAP_GENERIC
config ARCH_OMAP2PLUS
@@ -89,11 +83,11 @@ config ARCH_OMAP2PLUS
select ARCH_HAS_HOLES_MEMORYMODEL
select ARCH_OMAP
select ARCH_REQUIRE_GPIOLIB
- select CLKDEV_LOOKUP
select CLKSRC_MMIO
+ select COMMON_CLK
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_CHIP
- select HAVE_CLK
+ select MACH_OMAP_GENERIC
select OMAP_DM_TIMER
select PINCTRL
select PROC_DEVICETREE if PROC_FS
@@ -187,16 +181,11 @@ config OMAP_PACKAGE_CUS
config OMAP_PACKAGE_CBP
bool
-comment "OMAP Board Type"
+comment "OMAP Legacy Platform Data Board Type"
depends on ARCH_OMAP2PLUS
config MACH_OMAP_GENERIC
- bool "Generic OMAP2+ board"
- depends on ARCH_OMAP2PLUS
- default y
- help
- Support for generic TI OMAP2+ boards using Flattened Device Tree.
- More information at Documentation/devicetree
+ bool
config MACH_OMAP2_TUSB6010
bool
@@ -260,12 +249,6 @@ config MACH_OVERO
default y
select OMAP_PACKAGE_CBB
-config MACH_OMAP3EVM
- bool "OMAP 3530 EVM board"
- depends on ARCH_OMAP3
- default y
- select OMAP_PACKAGE_CBB
-
config MACH_OMAP3517EVM
bool "OMAP3517/ AM3517 EVM board"
depends on ARCH_OMAP3
@@ -314,33 +297,12 @@ config MACH_NOKIA_N8X0
select MACH_NOKIA_N810_WIMAX
select OMAP_PACKAGE_ZAC
-config MACH_NOKIA_RM680
- bool "Nokia N950 (RM-680) / N9 (RM-696) phones"
- depends on ARCH_OMAP3
- default y
- select MACH_NOKIA_RM696
- select OMAP_PACKAGE_CBB
-
config MACH_NOKIA_RX51
bool "Nokia N900 (RX-51) phone"
depends on ARCH_OMAP3
default y
select OMAP_PACKAGE_CBB
-config MACH_OMAP_ZOOM2
- bool "OMAP3 Zoom2 board"
- depends on ARCH_OMAP3
- default y
- select OMAP_PACKAGE_CBB
- select REGULATOR_FIXED_VOLTAGE if REGULATOR
-
-config MACH_OMAP_ZOOM3
- bool "OMAP3630 Zoom3 board"
- depends on ARCH_OMAP3
- default y
- select OMAP_PACKAGE_CBP
- select REGULATOR_FIXED_VOLTAGE if REGULATOR
-
config MACH_CM_T35
bool "CompuLab CM-T35/CM-T3730 modules"
depends on ARCH_OMAP3
@@ -357,31 +319,12 @@ config MACH_CM_T3517
config MACH_CM_T3730
bool
-config MACH_IGEP0020
- bool "IGEP v2 board"
- depends on ARCH_OMAP3
- default y
- select OMAP_PACKAGE_CBB
-
-config MACH_IGEP0030
- bool "IGEP OMAP3 module"
- depends on ARCH_OMAP3
- default y
- select MACH_IGEP0020
- select OMAP_PACKAGE_CBB
-
config MACH_SBC3530
bool "OMAP3 SBC STALKER board"
depends on ARCH_OMAP3
default y
select OMAP_PACKAGE_CUS
-config MACH_OMAP_3630SDP
- bool "OMAP3630 SDP board"
- depends on ARCH_OMAP3
- default y
- select OMAP_PACKAGE_CBP
-
config MACH_TI8168EVM
bool "TI8168 Evaluation Module"
depends on SOC_TI81XX
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index afb457c3135b..e15ac005ef17 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -8,7 +8,7 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
# Common support
obj-y := id.o io.o control.o mux.o devices.o fb.o serial.o gpmc.o timer.o pm.o \
common.o gpio.o dma.o wd_timer.o display.o i2c.o hdq1w.o omap_hwmod.o \
- omap_device.o sram.o
+ omap_device.o sram.o drm.o
omap-2-3-common = irq.o
hwmod-common = omap_hwmod.o omap_hwmod_reset.o \
@@ -112,13 +112,13 @@ obj-$(CONFIG_ARCH_OMAP2) += prm2xxx_3xxx.o prm2xxx.o cm2xxx.o
obj-$(CONFIG_ARCH_OMAP3) += prm2xxx_3xxx.o prm3xxx.o cm3xxx.o
obj-$(CONFIG_ARCH_OMAP3) += vc3xxx_data.o vp3xxx_data.o
obj-$(CONFIG_SOC_AM33XX) += prm33xx.o cm33xx.o
-obj-$(CONFIG_SOC_AM43XX) += prm33xx.o cm33xx.o
omap-prcm-4-5-common = cminst44xx.o cm44xx.o prm44xx.o \
prcm_mpu44xx.o prminst44xx.o \
vc44xx_data.o vp44xx_data.o
obj-$(CONFIG_ARCH_OMAP4) += $(omap-prcm-4-5-common)
obj-$(CONFIG_SOC_OMAP5) += $(omap-prcm-4-5-common)
obj-$(CONFIG_SOC_DRA7XX) += $(omap-prcm-4-5-common)
+obj-$(CONFIG_SOC_AM43XX) += $(omap-prcm-4-5-common)
# OMAP voltage domains
voltagedomain-common := voltage.o vc.o vp.o
@@ -146,6 +146,7 @@ obj-$(CONFIG_ARCH_OMAP4) += powerdomains44xx_data.o
obj-$(CONFIG_SOC_AM33XX) += $(powerdomain-common)
obj-$(CONFIG_SOC_AM33XX) += powerdomains33xx_data.o
obj-$(CONFIG_SOC_AM43XX) += $(powerdomain-common)
+obj-$(CONFIG_SOC_AM43XX) += powerdomains43xx_data.o
obj-$(CONFIG_SOC_OMAP5) += $(powerdomain-common)
obj-$(CONFIG_SOC_OMAP5) += powerdomains54xx_data.o
obj-$(CONFIG_SOC_DRA7XX) += $(powerdomain-common)
@@ -165,6 +166,7 @@ obj-$(CONFIG_ARCH_OMAP4) += clockdomains44xx_data.o
obj-$(CONFIG_SOC_AM33XX) += $(clockdomain-common)
obj-$(CONFIG_SOC_AM33XX) += clockdomains33xx_data.o
obj-$(CONFIG_SOC_AM43XX) += $(clockdomain-common)
+obj-$(CONFIG_SOC_AM43XX) += clockdomains43xx_data.o
obj-$(CONFIG_SOC_OMAP5) += $(clockdomain-common)
obj-$(CONFIG_SOC_OMAP5) += clockdomains54xx_data.o
obj-$(CONFIG_SOC_DRA7XX) += $(clockdomain-common)
@@ -210,6 +212,11 @@ obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_2xxx_3xxx_ipblock_data.o
obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_2xxx_3xxx_interconnect_data.o
obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_3xxx_data.o
obj-$(CONFIG_SOC_AM33XX) += omap_hwmod_33xx_data.o
+obj-$(CONFIG_SOC_AM33XX) += omap_hwmod_33xx_43xx_interconnect_data.o
+obj-$(CONFIG_SOC_AM33XX) += omap_hwmod_33xx_43xx_ipblock_data.o
+obj-$(CONFIG_SOC_AM43XX) += omap_hwmod_43xx_data.o
+obj-$(CONFIG_SOC_AM43XX) += omap_hwmod_33xx_43xx_interconnect_data.o
+obj-$(CONFIG_SOC_AM43XX) += omap_hwmod_33xx_43xx_ipblock_data.o
obj-$(CONFIG_ARCH_OMAP4) += omap_hwmod_44xx_data.o
obj-$(CONFIG_SOC_OMAP5) += omap_hwmod_54xx_data.o
obj-$(CONFIG_SOC_DRA7XX) += omap_hwmod_7xx_data.o
@@ -228,12 +235,8 @@ endif
# OMAP2420 MSDI controller integration support ("MMC")
obj-$(CONFIG_SOC_OMAP2420) += msdi.o
-ifneq ($(CONFIG_DRM_OMAP),)
-obj-y += drm.o
-endif
-
# Specific board support
-obj-$(CONFIG_MACH_OMAP_GENERIC) += board-generic.o
+obj-$(CONFIG_MACH_OMAP_GENERIC) += board-generic.o pdata-quirks.o
obj-$(CONFIG_MACH_OMAP_H4) += board-h4.o
obj-$(CONFIG_MACH_OMAP_2430SDP) += board-2430sdp.o
obj-$(CONFIG_MACH_OMAP3_BEAGLE) += board-omap3beagle.o
@@ -242,26 +245,14 @@ obj-$(CONFIG_MACH_OMAP_LDP) += board-ldp.o
obj-$(CONFIG_MACH_OMAP3530_LV_SOM) += board-omap3logic.o
obj-$(CONFIG_MACH_OMAP3_TORPEDO) += board-omap3logic.o
obj-$(CONFIG_MACH_OVERO) += board-overo.o
-obj-$(CONFIG_MACH_OMAP3EVM) += board-omap3evm.o
obj-$(CONFIG_MACH_OMAP3_PANDORA) += board-omap3pandora.o
obj-$(CONFIG_MACH_OMAP_3430SDP) += board-3430sdp.o
obj-$(CONFIG_MACH_NOKIA_N8X0) += board-n8x0.o
-obj-$(CONFIG_MACH_NOKIA_RM680) += board-rm680.o sdram-nokia.o
obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51.o sdram-nokia.o
obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51-peripherals.o
obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51-video.o
-obj-$(CONFIG_MACH_OMAP_ZOOM2) += board-zoom.o board-zoom-peripherals.o
-obj-$(CONFIG_MACH_OMAP_ZOOM2) += board-zoom-display.o
-obj-$(CONFIG_MACH_OMAP_ZOOM2) += board-zoom-debugboard.o
-obj-$(CONFIG_MACH_OMAP_ZOOM3) += board-zoom.o board-zoom-peripherals.o
-obj-$(CONFIG_MACH_OMAP_ZOOM3) += board-zoom-display.o
-obj-$(CONFIG_MACH_OMAP_ZOOM3) += board-zoom-debugboard.o
-obj-$(CONFIG_MACH_OMAP_3630SDP) += board-3630sdp.o
-obj-$(CONFIG_MACH_OMAP_3630SDP) += board-zoom-peripherals.o
-obj-$(CONFIG_MACH_OMAP_3630SDP) += board-zoom-display.o
obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o
obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o
-obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o
obj-$(CONFIG_MACH_TOUCHBOOK) += board-omap3touchbook.o
obj-$(CONFIG_MACH_OMAP3517EVM) += board-am3517evm.o
diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c
deleted file mode 100644
index 20d6d8189240..000000000000
--- a/arch/arm/mach-omap2/board-3630sdp.c
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Copyright (C) 2009 Texas Instruments Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/gpio.h>
-#include <linux/mtd/nand.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-
-#include "common.h"
-#include "gpmc-smc91x.h"
-
-#include "board-zoom.h"
-
-#include "board-flash.h"
-#include "mux.h"
-#include "sdram-hynix-h8mbx00u0mer-0em.h"
-
-#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
-
-static struct omap_smc91x_platform_data board_smc91x_data = {
- .cs = 3,
- .flags = GPMC_MUX_ADD_DATA | IORESOURCE_IRQ_LOWLEVEL,
-};
-
-static void __init board_smc91x_init(void)
-{
- board_smc91x_data.gpio_irq = 158;
- gpmc_smc91x_init(&board_smc91x_data);
-}
-
-#else
-
-static inline void board_smc91x_init(void)
-{
-}
-
-#endif /* defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) */
-
-static void enable_board_wakeup_source(void)
-{
- /* T2 interrupt line (keypad) */
- omap_mux_init_signal("sys_nirq",
- OMAP_WAKEUP_EN | OMAP_PIN_INPUT_PULLUP);
-}
-
-static struct usbhs_phy_data phy_data[] __initdata = {
- {
- .port = 1,
- .reset_gpio = 126,
- .vcc_gpio = -EINVAL,
- },
- {
- .port = 2,
- .reset_gpio = 61,
- .vcc_gpio = -EINVAL,
- },
-};
-
-static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
-
- .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
- .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
-};
-
-#ifdef CONFIG_OMAP_MUX
-static struct omap_board_mux board_mux[] __initdata = {
- { .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#endif
-
-/*
- * SDP3630 CS organization
- * See also the Switch S8 settings in the comments.
- */
-static char chip_sel_sdp[][GPMC_CS_NUM] = {
- {PDC_NOR, PDC_NAND, PDC_ONENAND, DBG_MPDB, 0, 0, 0, 0}, /* S8:1111 */
- {PDC_ONENAND, PDC_NAND, PDC_NOR, DBG_MPDB, 0, 0, 0, 0}, /* S8:1110 */
- {PDC_NAND, PDC_ONENAND, PDC_NOR, DBG_MPDB, 0, 0, 0, 0}, /* S8:1101 */
-};
-
-static struct mtd_partition sdp_nor_partitions[] = {
- /* bootloader (U-Boot, etc) in first sector */
- {
- .name = "Bootloader-NOR",
- .offset = 0,
- .size = SZ_256K,
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
- /* bootloader params in the next sector */
- {
- .name = "Params-NOR",
- .offset = MTDPART_OFS_APPEND,
- .size = SZ_256K,
- .mask_flags = 0,
- },
- /* kernel */
- {
- .name = "Kernel-NOR",
- .offset = MTDPART_OFS_APPEND,
- .size = SZ_2M,
- .mask_flags = 0
- },
- /* file system */
- {
- .name = "Filesystem-NOR",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL,
- .mask_flags = 0
- }
-};
-
-static struct mtd_partition sdp_onenand_partitions[] = {
- {
- .name = "X-Loader-OneNAND",
- .offset = 0,
- .size = 4 * (64 * 2048),
- .mask_flags = MTD_WRITEABLE /* force read-only */
- },
- {
- .name = "U-Boot-OneNAND",
- .offset = MTDPART_OFS_APPEND,
- .size = 2 * (64 * 2048),
- .mask_flags = MTD_WRITEABLE /* force read-only */
- },
- {
- .name = "U-Boot Environment-OneNAND",
- .offset = MTDPART_OFS_APPEND,
- .size = 1 * (64 * 2048),
- },
- {
- .name = "Kernel-OneNAND",
- .offset = MTDPART_OFS_APPEND,
- .size = 16 * (64 * 2048),
- },
- {
- .name = "File System-OneNAND",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-static struct mtd_partition sdp_nand_partitions[] = {
- /* All the partition sizes are listed in terms of NAND block size */
- {
- .name = "X-Loader-NAND",
- .offset = 0,
- .size = 4 * (64 * 2048),
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
- {
- .name = "U-Boot-NAND",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x80000 */
- .size = 10 * (64 * 2048),
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
- {
- .name = "Boot Env-NAND",
-
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x1c0000 */
- .size = 6 * (64 * 2048),
- },
- {
- .name = "Kernel-NAND",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x280000 */
- .size = 40 * (64 * 2048),
- },
- {
- .name = "File System - NAND",
- .size = MTDPART_SIZ_FULL,
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x780000 */
- },
-};
-
-static struct flash_partitions sdp_flash_partitions[] = {
- {
- .parts = sdp_nor_partitions,
- .nr_parts = ARRAY_SIZE(sdp_nor_partitions),
- },
- {
- .parts = sdp_onenand_partitions,
- .nr_parts = ARRAY_SIZE(sdp_onenand_partitions),
- },
- {
- .parts = sdp_nand_partitions,
- .nr_parts = ARRAY_SIZE(sdp_nand_partitions),
- },
-};
-
-static void __init omap_sdp_init(void)
-{
- omap3_mux_init(board_mux, OMAP_PACKAGE_CBP);
- zoom_peripherals_init();
- omap_sdrc_init(h8mbx00u0mer0em_sdrc_params,
- h8mbx00u0mer0em_sdrc_params);
- zoom_display_init();
- board_smc91x_init();
- board_flash_init(sdp_flash_partitions, chip_sel_sdp, NAND_BUSWIDTH_16);
- enable_board_wakeup_source();
-
- usbhs_init_phys(phy_data, ARRAY_SIZE(phy_data));
- usbhs_init(&usbhs_bdata);
-}
-
-MACHINE_START(OMAP_3630SDP, "OMAP 3630SDP board")
- .atag_offset = 0x100,
- .reserve = omap_reserve,
- .map_io = omap3_map_io,
- .init_early = omap3630_init_early,
- .init_irq = omap3_init_irq,
- .handle_irq = omap3_intc_handle_irq,
- .init_machine = omap_sdp_init,
- .init_late = omap3630_init_late,
- .init_time = omap3_sync32k_timer_init,
- .restart = omap3xxx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index 33d159e2386e..8dd0ec858cf1 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -25,7 +25,7 @@
#include <linux/gpio.h>
#include <linux/platform_data/gpio-omap.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
#include <linux/i2c/twl.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
diff --git a/arch/arm/mach-omap2/board-flash.c b/arch/arm/mach-omap2/board-flash.c
index fc20a61f6b2a..ac82512b9c8c 100644
--- a/arch/arm/mach-omap2/board-flash.c
+++ b/arch/arm/mach-omap2/board-flash.c
@@ -142,7 +142,7 @@ __init board_nand_init(struct mtd_partition *nand_parts, u8 nr_parts, u8 cs,
board_nand_data.nr_parts = nr_parts;
board_nand_data.devsize = nand_type;
- board_nand_data.ecc_opt = OMAP_ECC_HAMMING_CODE_DEFAULT;
+ board_nand_data.ecc_opt = OMAP_ECC_BCH8_CODE_HW;
gpmc_nand_init(&board_nand_data, gpmc_t);
}
#endif /* CONFIG_MTD_NAND_OMAP2 || CONFIG_MTD_NAND_OMAP2_MODULE */
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 87162e1b94a5..19f1652e94cf 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -15,13 +15,10 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/irqdomain.h>
-#include <linux/clk.h>
#include <asm/mach/arch.h>
#include "common.h"
-#include "common-board-devices.h"
-#include "dss-common.h"
#if !(defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3))
#define intc_of_init NULL
@@ -36,40 +33,9 @@ static struct of_device_id omap_dt_match_table[] __initdata = {
{ }
};
-/*
- * Create alias for USB host PHY clock.
- * Remove this when clock phandle can be provided via DT
- */
-static void __init legacy_init_ehci_clk(char *clkname)
-{
- int ret;
-
- ret = clk_add_alias("main_clk", NULL, clkname, NULL);
- if (ret) {
- pr_err("%s:Failed to add main_clk alias to %s :%d\n",
- __func__, clkname, ret);
- }
-}
-
static void __init omap_generic_init(void)
{
- omap_sdrc_init(NULL, NULL);
-
- of_platform_populate(NULL, omap_dt_match_table, NULL, NULL);
-
- /*
- * HACK: call display setup code for selected boards to enable omapdss.
- * This will be removed when omapdss supports DT.
- */
- if (of_machine_is_compatible("ti,omap4-panda")) {
- omap4_panda_display_init_of();
- legacy_init_ehci_clk("auxclk3_ck");
-
- }
- else if (of_machine_is_compatible("ti,omap4-sdp"))
- omap_4430sdp_display_init_of();
- else if (of_machine_is_compatible("ti,omap5-uevm"))
- legacy_init_ehci_clk("auxclk1_ck");
+ pdata_quirks_init(omap_dt_match_table);
}
#ifdef CONFIG_SOC_OMAP2420
@@ -180,6 +146,7 @@ DT_MACHINE_START(AM33XX_DT, "Generic AM33XX (Flattened Device Tree)")
.init_irq = omap_intc_of_init,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap_generic_init,
+ .init_late = am33xx_init_late,
.init_time = omap3_gptimer_timer_init,
.dt_compat = am33xx_boards_compat,
.restart = am33xx_restart,
@@ -219,6 +186,7 @@ DT_MACHINE_START(OMAP5_DT, "Generic OMAP5 (Flattened Device Tree)")
.init_early = omap5_init_early,
.init_irq = omap_gic_of_init,
.init_machine = omap_generic_init,
+ .init_late = omap5_init_late,
.init_time = omap5_realtime_timer_init,
.dt_compat = omap5_boards_compat,
.restart = omap44xx_restart,
@@ -234,6 +202,7 @@ static const char *am43_boards_compat[] __initdata = {
DT_MACHINE_START(AM43_DT, "Generic AM43 (Flattened Device Tree)")
.map_io = am33xx_map_io,
.init_early = am43xx_init_early,
+ .init_late = am43xx_init_late,
.init_irq = omap_gic_of_init,
.init_machine = omap_generic_init,
.init_time = omap3_sync32k_timer_init,
@@ -252,6 +221,7 @@ DT_MACHINE_START(DRA7XX_DT, "Generic DRA7XX (Flattened Device Tree)")
.smp = smp_ops(omap4_smp_ops),
.map_io = omap5_map_io,
.init_early = dra7xx_init_early,
+ .init_late = dra7xx_init_late,
.init_irq = omap_gic_of_init,
.init_machine = omap_generic_init,
.init_time = omap5_realtime_timer_init,
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index 87e41a8b8d46..f7808349a734 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -20,7 +20,7 @@
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
#include <linux/input.h>
#include <linux/err.h>
#include <linux/clk.h>
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
deleted file mode 100644
index 06dbb2d3d38b..000000000000
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ /dev/null
@@ -1,718 +0,0 @@
-/*
- * Copyright (C) 2009 Integration Software and Electronic Engineering.
- *
- * Modified from mach-omap2/board-generic.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/input.h>
-#include <linux/usb/phy.h>
-
-#include <linux/regulator/machine.h>
-#include <linux/regulator/fixed.h>
-#include <linux/i2c/twl.h>
-#include <linux/mmc/host.h>
-
-#include <linux/mtd/nand.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
-#include <linux/platform_data/mtd-onenand-omap2.h>
-
-#include "common.h"
-#include "gpmc.h"
-#include "mux.h"
-#include "hsmmc.h"
-#include "sdram-numonyx-m65kxxxxam.h"
-#include "common-board-devices.h"
-#include "board-flash.h"
-#include "control.h"
-#include "gpmc-onenand.h"
-
-#define IGEP2_SMSC911X_CS 5
-#define IGEP2_SMSC911X_GPIO 176
-#define IGEP2_GPIO_USBH_NRESET 24
-#define IGEP2_GPIO_LED0_GREEN 26
-#define IGEP2_GPIO_LED0_RED 27
-#define IGEP2_GPIO_LED1_RED 28
-#define IGEP2_GPIO_DVI_PUP 170
-
-#define IGEP2_RB_GPIO_WIFI_NPD 94
-#define IGEP2_RB_GPIO_WIFI_NRESET 95
-#define IGEP2_RB_GPIO_BT_NRESET 137
-#define IGEP2_RC_GPIO_WIFI_NPD 138
-#define IGEP2_RC_GPIO_WIFI_NRESET 139
-#define IGEP2_RC_GPIO_BT_NRESET 137
-
-#define IGEP3_GPIO_LED0_GREEN 54
-#define IGEP3_GPIO_LED0_RED 53
-#define IGEP3_GPIO_LED1_RED 16
-#define IGEP3_GPIO_USBH_NRESET 183
-
-#define IGEP_SYSBOOT_MASK 0x1f
-#define IGEP_SYSBOOT_NAND 0x0f
-#define IGEP_SYSBOOT_ONENAND 0x10
-
-/*
- * IGEP2 Hardware Revision Table
- *
- * --------------------------------------------------------------------------
- * | Id. | Hw Rev. | HW0 (28) | WIFI_NPD | WIFI_NRESET | BT_NRESET |
- * --------------------------------------------------------------------------
- * | 0 | B | high | gpio94 | gpio95 | - |
- * | 0 | B/C (B-compatible) | high | gpio94 | gpio95 | gpio137 |
- * | 1 | C | low | gpio138 | gpio139 | gpio137 |
- * --------------------------------------------------------------------------
- */
-
-#define IGEP2_BOARD_HWREV_B 0
-#define IGEP2_BOARD_HWREV_C 1
-#define IGEP3_BOARD_HWREV 2
-
-static u8 hwrev;
-
-static void __init igep2_get_revision(void)
-{
- u8 ret;
-
- if (machine_is_igep0030()) {
- hwrev = IGEP3_BOARD_HWREV;
- return;
- }
-
- omap_mux_init_gpio(IGEP2_GPIO_LED1_RED, OMAP_PIN_INPUT);
-
- if (gpio_request_one(IGEP2_GPIO_LED1_RED, GPIOF_IN, "GPIO_HW0_REV")) {
- pr_warning("IGEP2: Could not obtain gpio GPIO_HW0_REV\n");
- pr_err("IGEP2: Unknown Hardware Revision\n");
- return;
- }
-
- ret = gpio_get_value(IGEP2_GPIO_LED1_RED);
- if (ret == 0) {
- pr_info("IGEP2: Hardware Revision C (B-NON compatible)\n");
- hwrev = IGEP2_BOARD_HWREV_C;
- } else if (ret == 1) {
- pr_info("IGEP2: Hardware Revision B/C (B compatible)\n");
- hwrev = IGEP2_BOARD_HWREV_B;
- } else {
- pr_err("IGEP2: Unknown Hardware Revision\n");
- hwrev = -1;
- }
-
- gpio_free(IGEP2_GPIO_LED1_RED);
-}
-
-#if defined(CONFIG_MTD_ONENAND_OMAP2) || \
- defined(CONFIG_MTD_ONENAND_OMAP2_MODULE) || \
- defined(CONFIG_MTD_NAND_OMAP2) || \
- defined(CONFIG_MTD_NAND_OMAP2_MODULE)
-
-#define ONENAND_MAP 0x20000000
-
-/* NAND04GR4E1A ( x2 Flash built-in COMBO POP MEMORY )
- * Since the device is equipped with two DataRAMs, and two-plane NAND
- * Flash memory array, these two component enables simultaneous program
- * of 4KiB. Plane1 has only even blocks such as block0, block2, block4
- * while Plane2 has only odd blocks such as block1, block3, block5.
- * So MTD regards it as 4KiB page size and 256KiB block size 64*(2*2048)
- */
-
-static struct mtd_partition igep_flash_partitions[] = {
- {
- .name = "X-Loader",
- .offset = 0,
- .size = 2 * (64*(2*2048))
- },
- {
- .name = "U-Boot",
- .offset = MTDPART_OFS_APPEND,
- .size = 6 * (64*(2*2048)),
- },
- {
- .name = "Environment",
- .offset = MTDPART_OFS_APPEND,
- .size = 2 * (64*(2*2048)),
- },
- {
- .name = "Kernel",
- .offset = MTDPART_OFS_APPEND,
- .size = 12 * (64*(2*2048)),
- },
- {
- .name = "File System",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-static inline u32 igep_get_sysboot_value(void)
-{
- return omap_ctrl_readl(OMAP343X_CONTROL_STATUS) & IGEP_SYSBOOT_MASK;
-}
-
-static void __init igep_flash_init(void)
-{
- u32 mux;
- mux = igep_get_sysboot_value();
-
- if (mux == IGEP_SYSBOOT_NAND) {
- pr_info("IGEP: initializing NAND memory device\n");
- board_nand_init(igep_flash_partitions,
- ARRAY_SIZE(igep_flash_partitions),
- 0, NAND_BUSWIDTH_16, nand_default_timings);
- } else if (mux == IGEP_SYSBOOT_ONENAND) {
- pr_info("IGEP: initializing OneNAND memory device\n");
- board_onenand_init(igep_flash_partitions,
- ARRAY_SIZE(igep_flash_partitions), 0);
- } else {
- pr_err("IGEP: Flash: unsupported sysboot sequence found\n");
- }
-}
-
-#else
-static void __init igep_flash_init(void) {}
-#endif
-
-#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
-
-#include <linux/smsc911x.h>
-#include "gpmc-smsc911x.h"
-
-static struct omap_smsc911x_platform_data smsc911x_cfg = {
- .cs = IGEP2_SMSC911X_CS,
- .gpio_irq = IGEP2_SMSC911X_GPIO,
- .gpio_reset = -EINVAL,
- .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
-};
-
-static inline void __init igep2_init_smsc911x(void)
-{
- gpmc_smsc911x_init(&smsc911x_cfg);
-}
-
-#else
-static inline void __init igep2_init_smsc911x(void) { }
-#endif
-
-static struct regulator_consumer_supply igep_vmmc1_supply[] = {
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
-};
-
-/* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
-static struct regulator_init_data igep_vmmc1 = {
- .constraints = {
- .min_uV = 1850000,
- .max_uV = 3150000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(igep_vmmc1_supply),
- .consumer_supplies = igep_vmmc1_supply,
-};
-
-static struct regulator_consumer_supply igep_vio_supply[] = {
- REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1"),
-};
-
-static struct regulator_init_data igep_vio = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .apply_uV = 1,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(igep_vio_supply),
- .consumer_supplies = igep_vio_supply,
-};
-
-static struct regulator_consumer_supply igep_vmmc2_supply[] = {
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
-};
-
-static struct regulator_init_data igep_vmmc2 = {
- .constraints = {
- .valid_modes_mask = REGULATOR_MODE_NORMAL,
- .always_on = 1,
- },
- .num_consumer_supplies = ARRAY_SIZE(igep_vmmc2_supply),
- .consumer_supplies = igep_vmmc2_supply,
-};
-
-static struct fixed_voltage_config igep_vwlan = {
- .supply_name = "vwlan",
- .microvolts = 3300000,
- .gpio = -EINVAL,
- .enabled_at_boot = 1,
- .init_data = &igep_vmmc2,
-};
-
-static struct platform_device igep_vwlan_device = {
- .name = "reg-fixed-voltage",
- .id = 0,
- .dev = {
- .platform_data = &igep_vwlan,
- },
-};
-
-static struct omap2_hsmmc_info mmc[] = {
- {
- .mmc = 1,
- .caps = MMC_CAP_4_BIT_DATA,
- .gpio_cd = -EINVAL,
- .gpio_wp = -EINVAL,
- .deferred = true,
- },
-#if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE)
- {
- .mmc = 2,
- .caps = MMC_CAP_4_BIT_DATA,
- .gpio_cd = -EINVAL,
- .gpio_wp = -EINVAL,
- },
-#endif
- {} /* Terminator */
-};
-
-#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
-#include <linux/leds.h>
-
-static struct gpio_led igep_gpio_leds[] = {
- [0] = {
- .name = "omap3:red:user0",
- .default_state = 0,
- },
- [1] = {
- .name = "omap3:green:boot",
- .default_state = 1,
- },
- [2] = {
- .name = "omap3:red:user1",
- .default_state = 0,
- },
- [3] = {
- .name = "omap3:green:user1",
- .default_state = 0,
- .gpio = -EINVAL, /* gets replaced */
- .active_low = 1,
- },
-};
-
-static struct gpio_led_platform_data igep_led_pdata = {
- .leds = igep_gpio_leds,
- .num_leds = ARRAY_SIZE(igep_gpio_leds),
-};
-
-static struct platform_device igep_led_device = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &igep_led_pdata,
- },
-};
-
-static void __init igep_leds_init(void)
-{
- if (machine_is_igep0020()) {
- igep_gpio_leds[0].gpio = IGEP2_GPIO_LED0_RED;
- igep_gpio_leds[1].gpio = IGEP2_GPIO_LED0_GREEN;
- igep_gpio_leds[2].gpio = IGEP2_GPIO_LED1_RED;
- } else {
- igep_gpio_leds[0].gpio = IGEP3_GPIO_LED0_RED;
- igep_gpio_leds[1].gpio = IGEP3_GPIO_LED0_GREEN;
- igep_gpio_leds[2].gpio = IGEP3_GPIO_LED1_RED;
- }
-
- platform_device_register(&igep_led_device);
-}
-
-#else
-static struct gpio igep_gpio_leds[] __initdata = {
- { -EINVAL, GPIOF_OUT_INIT_LOW, "gpio-led:red:d0" },
- { -EINVAL, GPIOF_OUT_INIT_LOW, "gpio-led:green:d0" },
- { -EINVAL, GPIOF_OUT_INIT_LOW, "gpio-led:red:d1" },
-};
-
-static inline void igep_leds_init(void)
-{
- int i;
-
- if (machine_is_igep0020()) {
- igep_gpio_leds[0].gpio = IGEP2_GPIO_LED0_RED;
- igep_gpio_leds[1].gpio = IGEP2_GPIO_LED0_GREEN;
- igep_gpio_leds[2].gpio = IGEP2_GPIO_LED1_RED;
- } else {
- igep_gpio_leds[0].gpio = IGEP3_GPIO_LED0_RED;
- igep_gpio_leds[1].gpio = IGEP3_GPIO_LED0_GREEN;
- igep_gpio_leds[2].gpio = IGEP3_GPIO_LED1_RED;
- }
-
- if (gpio_request_array(igep_gpio_leds, ARRAY_SIZE(igep_gpio_leds))) {
- pr_warning("IGEP v2: Could not obtain leds gpios\n");
- return;
- }
-
- for (i = 0; i < ARRAY_SIZE(igep_gpio_leds); i++)
- gpio_export(igep_gpio_leds[i].gpio, 0);
-}
-#endif
-
-static struct gpio igep2_twl_gpios[] = {
- { -EINVAL, GPIOF_IN, "GPIO_EHCI_NOC" },
- { -EINVAL, GPIOF_OUT_INIT_LOW, "GPIO_USBH_CPEN" },
-};
-
-static int igep_twl_gpio_setup(struct device *dev,
- unsigned gpio, unsigned ngpio)
-{
- int ret;
-
- /* gpio + 0 is "mmc0_cd" (input/IRQ) */
- mmc[0].gpio_cd = gpio + 0;
- omap_hsmmc_late_init(mmc);
-
- /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */
-#if !defined(CONFIG_LEDS_GPIO) && !defined(CONFIG_LEDS_GPIO_MODULE)
- ret = gpio_request_one(gpio + TWL4030_GPIO_MAX + 1, GPIOF_OUT_INIT_HIGH,
- "gpio-led:green:d1");
- if (ret == 0)
- gpio_export(gpio + TWL4030_GPIO_MAX + 1, 0);
- else
- pr_warning("IGEP: Could not obtain gpio GPIO_LED1_GREEN\n");
-#else
- igep_gpio_leds[3].gpio = gpio + TWL4030_GPIO_MAX + 1;
-#endif
-
- if (machine_is_igep0030())
- return 0;
-
- /*
- * REVISIT: need ehci-omap hooks for external VBUS
- * power switch and overcurrent detect
- */
- igep2_twl_gpios[0].gpio = gpio + 1;
-
- /* TWL4030_GPIO_MAX + 0 == ledA, GPIO_USBH_CPEN (out, active low) */
- igep2_twl_gpios[1].gpio = gpio + TWL4030_GPIO_MAX;
-
- ret = gpio_request_array(igep2_twl_gpios, ARRAY_SIZE(igep2_twl_gpios));
- if (ret < 0)
- pr_err("IGEP2: Could not obtain gpio for USBH_CPEN");
-
- return 0;
-};
-
-static struct twl4030_gpio_platform_data igep_twl4030_gpio_pdata = {
- .use_leds = true,
- .setup = igep_twl_gpio_setup,
-};
-
-static struct connector_dvi_platform_data omap3stalker_dvi_connector_pdata = {
- .name = "dvi",
- .source = "tfp410.0",
- .i2c_bus_num = 3,
-};
-
-static struct platform_device omap3stalker_dvi_connector_device = {
- .name = "connector-dvi",
- .id = 0,
- .dev.platform_data = &omap3stalker_dvi_connector_pdata,
-};
-
-static struct encoder_tfp410_platform_data omap3stalker_tfp410_pdata = {
- .name = "tfp410.0",
- .source = "dpi.0",
- .data_lines = 24,
- .power_down_gpio = IGEP2_GPIO_DVI_PUP,
-};
-
-static struct platform_device omap3stalker_tfp410_device = {
- .name = "tfp410",
- .id = 0,
- .dev.platform_data = &omap3stalker_tfp410_pdata,
-};
-
-static struct omap_dss_board_info igep2_dss_data = {
- .default_display_name = "dvi",
-};
-
-static struct platform_device *igep_devices[] __initdata = {
- &igep_vwlan_device,
- &omap3stalker_tfp410_device,
- &omap3stalker_dvi_connector_device,
-};
-
-static int igep2_keymap[] = {
- KEY(0, 0, KEY_LEFT),
- KEY(0, 1, KEY_RIGHT),
- KEY(0, 2, KEY_A),
- KEY(0, 3, KEY_B),
- KEY(1, 0, KEY_DOWN),
- KEY(1, 1, KEY_UP),
- KEY(1, 2, KEY_E),
- KEY(1, 3, KEY_F),
- KEY(2, 0, KEY_ENTER),
- KEY(2, 1, KEY_I),
- KEY(2, 2, KEY_J),
- KEY(2, 3, KEY_K),
- KEY(3, 0, KEY_M),
- KEY(3, 1, KEY_N),
- KEY(3, 2, KEY_O),
- KEY(3, 3, KEY_P)
-};
-
-static struct matrix_keymap_data igep2_keymap_data = {
- .keymap = igep2_keymap,
- .keymap_size = ARRAY_SIZE(igep2_keymap),
-};
-
-static struct twl4030_keypad_data igep2_keypad_pdata = {
- .keymap_data = &igep2_keymap_data,
- .rows = 4,
- .cols = 4,
- .rep = 1,
-};
-
-static struct twl4030_platform_data igep_twldata = {
- /* platform_data for children goes here */
- .gpio = &igep_twl4030_gpio_pdata,
- .vmmc1 = &igep_vmmc1,
- .vio = &igep_vio,
-};
-
-static struct i2c_board_info __initdata igep2_i2c3_boardinfo[] = {
- {
- I2C_BOARD_INFO("eeprom", 0x50),
- },
-};
-
-static void __init igep_i2c_init(void)
-{
- int ret;
-
- omap3_pmic_get_config(&igep_twldata, TWL_COMMON_PDATA_USB,
- TWL_COMMON_REGULATOR_VPLL2);
- igep_twldata.vpll2->constraints.apply_uV = true;
- igep_twldata.vpll2->constraints.name = "VDVI";
-
- if (machine_is_igep0020()) {
- /*
- * Bus 3 is attached to the DVI port where devices like the
- * pico DLP projector don't work reliably with 400kHz
- */
- ret = omap_register_i2c_bus(3, 100, igep2_i2c3_boardinfo,
- ARRAY_SIZE(igep2_i2c3_boardinfo));
- if (ret)
- pr_warning("IGEP2: Could not register I2C3 bus (%d)\n", ret);
-
- igep_twldata.keypad = &igep2_keypad_pdata;
- /* Get common pmic data */
- omap3_pmic_get_config(&igep_twldata, TWL_COMMON_PDATA_AUDIO, 0);
- }
-
- omap3_pmic_init("twl4030", &igep_twldata);
-}
-
-static struct usbhs_phy_data igep2_phy_data[] __initdata = {
- {
- .port = 1,
- .reset_gpio = IGEP2_GPIO_USBH_NRESET,
- .vcc_gpio = -EINVAL,
- },
-};
-
-static struct usbhs_phy_data igep3_phy_data[] __initdata = {
- {
- .port = 2,
- .reset_gpio = IGEP3_GPIO_USBH_NRESET,
- .vcc_gpio = -EINVAL,
- },
-};
-
-static struct usbhs_omap_platform_data igep2_usbhs_bdata __initdata = {
- .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
-};
-
-static struct usbhs_omap_platform_data igep3_usbhs_bdata __initdata = {
- .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
-};
-
-#ifdef CONFIG_OMAP_MUX
-static struct omap_board_mux board_mux[] __initdata = {
- /* Display Sub System */
- OMAP3_MUX(DSS_PCLK, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_HSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_VSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_ACBIAS, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA0, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA1, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA2, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA3, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA4, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA5, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA6, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA7, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA8, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA9, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA10, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA11, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA12, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA13, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA14, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA15, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA16, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA17, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA18, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA19, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA20, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA21, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA22, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA23, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- /* TFP410 PanelBus DVI Transmitte (GPIO_170) */
- OMAP3_MUX(HDQ_SIO, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
- /* SMSC9221 LAN Controller ETH IRQ (GPIO_176) */
- OMAP3_MUX(MCSPI1_CS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
- { .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#endif
-
-#if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE)
-static struct gpio igep_wlan_bt_gpios[] __initdata = {
- { -EINVAL, GPIOF_OUT_INIT_HIGH, "GPIO_WIFI_NPD" },
- { -EINVAL, GPIOF_OUT_INIT_HIGH, "GPIO_WIFI_NRESET" },
- { -EINVAL, GPIOF_OUT_INIT_HIGH, "GPIO_BT_NRESET" },
-};
-
-static void __init igep_wlan_bt_init(void)
-{
- int err;
-
- /* GPIO's for WLAN-BT combo depends on hardware revision */
- if (hwrev == IGEP2_BOARD_HWREV_B) {
- igep_wlan_bt_gpios[0].gpio = IGEP2_RB_GPIO_WIFI_NPD;
- igep_wlan_bt_gpios[1].gpio = IGEP2_RB_GPIO_WIFI_NRESET;
- igep_wlan_bt_gpios[2].gpio = IGEP2_RB_GPIO_BT_NRESET;
- } else if (hwrev == IGEP2_BOARD_HWREV_C || machine_is_igep0030()) {
- igep_wlan_bt_gpios[0].gpio = IGEP2_RC_GPIO_WIFI_NPD;
- igep_wlan_bt_gpios[1].gpio = IGEP2_RC_GPIO_WIFI_NRESET;
- igep_wlan_bt_gpios[2].gpio = IGEP2_RC_GPIO_BT_NRESET;
- } else
- return;
-
- /* Make sure that the GPIO pins are muxed correctly */
- omap_mux_init_gpio(igep_wlan_bt_gpios[0].gpio, OMAP_PIN_OUTPUT);
- omap_mux_init_gpio(igep_wlan_bt_gpios[1].gpio, OMAP_PIN_OUTPUT);
- omap_mux_init_gpio(igep_wlan_bt_gpios[2].gpio, OMAP_PIN_OUTPUT);
-
- err = gpio_request_array(igep_wlan_bt_gpios,
- ARRAY_SIZE(igep_wlan_bt_gpios));
- if (err) {
- pr_warning("IGEP2: Could not obtain WIFI/BT gpios\n");
- return;
- }
-
- gpio_export(igep_wlan_bt_gpios[0].gpio, 0);
- gpio_export(igep_wlan_bt_gpios[1].gpio, 0);
- gpio_export(igep_wlan_bt_gpios[2].gpio, 0);
-
- gpio_set_value(igep_wlan_bt_gpios[1].gpio, 0);
- udelay(10);
- gpio_set_value(igep_wlan_bt_gpios[1].gpio, 1);
-
-}
-#else
-static inline void __init igep_wlan_bt_init(void) { }
-#endif
-
-static struct regulator_consumer_supply dummy_supplies[] = {
- REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
- REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
-};
-
-static void __init igep_init(void)
-{
- regulator_register_fixed(1, dummy_supplies, ARRAY_SIZE(dummy_supplies));
- omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
-
- /* Get IGEP2 hardware revision */
- igep2_get_revision();
-
- omap_hsmmc_init(mmc);
-
- /* Register I2C busses and drivers */
- igep_i2c_init();
- platform_add_devices(igep_devices, ARRAY_SIZE(igep_devices));
- omap_serial_init();
- omap_sdrc_init(m65kxxxxam_sdrc_params,
- m65kxxxxam_sdrc_params);
- usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
- usb_musb_init(NULL);
-
- igep_flash_init();
- igep_leds_init();
- omap_twl4030_audio_init("igep2", NULL);
-
- /*
- * WLAN-BT combo module from MuRata which has a Marvell WLAN
- * (88W8686) + CSR Bluetooth chipset. Uses SDIO interface.
- */
- igep_wlan_bt_init();
-
- if (machine_is_igep0020()) {
- omap_display_init(&igep2_dss_data);
- igep2_init_smsc911x();
- usbhs_init_phys(igep2_phy_data, ARRAY_SIZE(igep2_phy_data));
- usbhs_init(&igep2_usbhs_bdata);
- } else {
- usbhs_init_phys(igep3_phy_data, ARRAY_SIZE(igep3_phy_data));
- usbhs_init(&igep3_usbhs_bdata);
- }
-}
-
-MACHINE_START(IGEP0020, "IGEP v2 board")
- .atag_offset = 0x100,
- .reserve = omap_reserve,
- .map_io = omap3_map_io,
- .init_early = omap35xx_init_early,
- .init_irq = omap3_init_irq,
- .handle_irq = omap3_intc_handle_irq,
- .init_machine = igep_init,
- .init_late = omap35xx_init_late,
- .init_time = omap3_sync32k_timer_init,
- .restart = omap3xxx_restart,
-MACHINE_END
-
-MACHINE_START(IGEP0030, "IGEP OMAP3 module")
- .atag_offset = 0x100,
- .reserve = omap_reserve,
- .map_io = omap3_map_io,
- .init_early = omap35xx_init_early,
- .init_irq = omap3_init_irq,
- .handle_irq = omap3_intc_handle_irq,
- .init_machine = igep_init,
- .init_late = omap35xx_init_late,
- .init_time = omap3_sync32k_timer_init,
- .restart = omap3xxx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index dd8da2c5399f..4ec8d82b0492 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -36,7 +36,6 @@
#include <asm/mach/map.h>
#include "common.h"
-#include "board-zoom.h"
#include "gpmc.h"
#include "gpmc-smsc911x.h"
@@ -406,7 +405,7 @@ static void __init omap_ldp_init(void)
usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
usb_musb_init(NULL);
board_nand_init(ldp_nand_partitions, ARRAY_SIZE(ldp_nand_partitions),
- ZOOM_NAND_CS, 0, nand_default_timings);
+ 0, 0, nand_default_timings);
omap_hsmmc_init(mmc);
ldp_display_init();
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 8b9cd0690ce7..a516c1bda141 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -25,7 +25,7 @@
#include <linux/gpio.h>
#include <linux/input.h>
#include <linux/gpio_keys.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/cpu.h>
#include <linux/mtd/mtd.h>
@@ -516,11 +516,11 @@ static int __init beagle_opp_init(void)
return -ENODEV;
}
/* Enable MPU 1GHz and lower opps */
- r = opp_enable(mpu_dev, 800000000);
+ r = dev_pm_opp_enable(mpu_dev, 800000000);
/* TODO: MPU 1GHz needs SR and ABB */
/* Enable IVA 800MHz and lower opps */
- r |= opp_enable(iva_dev, 660000000);
+ r |= dev_pm_opp_enable(iva_dev, 660000000);
/* TODO: DSP 800MHz needs SR and ABB */
if (r) {
pr_err("%s: failed to enable higher opp %d\n",
@@ -529,8 +529,8 @@ static int __init beagle_opp_init(void)
* Cleanup - disable the higher freqs - we dont care
* about the results
*/
- opp_disable(mpu_dev, 800000000);
- opp_disable(iva_dev, 660000000);
+ dev_pm_opp_disable(mpu_dev, 800000000);
+ dev_pm_opp_disable(iva_dev, 660000000);
}
}
return 0;
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
deleted file mode 100644
index 18143873346c..000000000000
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ /dev/null
@@ -1,756 +0,0 @@
-/*
- * linux/arch/arm/mach-omap2/board-omap3evm.c
- *
- * Copyright (C) 2008 Texas Instruments
- *
- * Modified from mach-omap2/board-3430sdp.c
- *
- * Initial code: Syed Mohammed Khasim
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/gpio.h>
-#include <linux/input.h>
-#include <linux/input/matrix_keypad.h>
-#include <linux/leds.h>
-#include <linux/interrupt.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/nand.h>
-
-#include <linux/spi/spi.h>
-#include <linux/spi/ads7846.h>
-#include <linux/i2c/twl.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/musb.h>
-#include <linux/usb/usb_phy_gen_xceiv.h>
-#include <linux/smsc911x.h>
-
-#include <linux/wl12xx.h>
-#include <linux/regulator/fixed.h>
-#include <linux/regulator/machine.h>
-#include <linux/mmc/host.h>
-#include <linux/export.h>
-#include <linux/usb/phy.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include <linux/platform_data/mtd-nand-omap2.h>
-#include "common.h"
-#include <linux/platform_data/spi-omap2-mcspi.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
-
-#include "soc.h"
-#include "mux.h"
-#include "sdram-micron-mt46h32m32lf-6.h"
-#include "hsmmc.h"
-#include "common-board-devices.h"
-#include "board-flash.h"
-
-#define NAND_CS 0
-
-#define OMAP3_EVM_TS_GPIO 175
-#define OMAP3_EVM_EHCI_VBUS 22
-#define OMAP3_EVM_EHCI_SELECT 61
-
-#define OMAP3EVM_ETHR_START 0x2c000000
-#define OMAP3EVM_ETHR_SIZE 1024
-#define OMAP3EVM_ETHR_ID_REV 0x50
-#define OMAP3EVM_ETHR_GPIO_IRQ 176
-#define OMAP3EVM_SMSC911X_CS 5
-/*
- * Eth Reset signal
- * 64 = Generation 1 (<=RevD)
- * 7 = Generation 2 (>=RevE)
- */
-#define OMAP3EVM_GEN1_ETHR_GPIO_RST 64
-#define OMAP3EVM_GEN2_ETHR_GPIO_RST 7
-
-/*
- * OMAP35x EVM revision
- * Run time detection of EVM revision is done by reading Ethernet
- * PHY ID -
- * GEN_1 = 0x01150000
- * GEN_2 = 0x92200000
- */
-enum {
- OMAP3EVM_BOARD_GEN_1 = 0, /* EVM Rev between A - D */
- OMAP3EVM_BOARD_GEN_2, /* EVM Rev >= Rev E */
-};
-
-static u8 omap3_evm_version;
-
-static u8 get_omap3_evm_rev(void)
-{
- return omap3_evm_version;
-}
-
-static void __init omap3_evm_get_revision(void)
-{
- void __iomem *ioaddr;
- unsigned int smsc_id;
-
- /* Ethernet PHY ID is stored at ID_REV register */
- ioaddr = ioremap_nocache(OMAP3EVM_ETHR_START, SZ_1K);
- if (!ioaddr)
- return;
- smsc_id = readl(ioaddr + OMAP3EVM_ETHR_ID_REV) & 0xFFFF0000;
- iounmap(ioaddr);
-
- switch (smsc_id) {
- /*SMSC9115 chipset*/
- case 0x01150000:
- omap3_evm_version = OMAP3EVM_BOARD_GEN_1;
- break;
- /*SMSC 9220 chipset*/
- case 0x92200000:
- default:
- omap3_evm_version = OMAP3EVM_BOARD_GEN_2;
- }
-}
-
-#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
-#include "gpmc-smsc911x.h"
-
-static struct omap_smsc911x_platform_data smsc911x_cfg = {
- .cs = OMAP3EVM_SMSC911X_CS,
- .gpio_irq = OMAP3EVM_ETHR_GPIO_IRQ,
- .gpio_reset = -EINVAL,
- .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
-};
-
-static inline void __init omap3evm_init_smsc911x(void)
-{
- /* Configure ethernet controller reset gpio */
- if (cpu_is_omap3430()) {
- if (get_omap3_evm_rev() == OMAP3EVM_BOARD_GEN_1)
- smsc911x_cfg.gpio_reset = OMAP3EVM_GEN1_ETHR_GPIO_RST;
- else
- smsc911x_cfg.gpio_reset = OMAP3EVM_GEN2_ETHR_GPIO_RST;
- }
-
- gpmc_smsc911x_init(&smsc911x_cfg);
-}
-
-#else
-static inline void __init omap3evm_init_smsc911x(void) { return; }
-#endif
-
-/*
- * OMAP3EVM LCD Panel control signals
- */
-#define OMAP3EVM_LCD_PANEL_LR 2
-#define OMAP3EVM_LCD_PANEL_UD 3
-#define OMAP3EVM_LCD_PANEL_INI 152
-#define OMAP3EVM_LCD_PANEL_QVGA 154
-#define OMAP3EVM_LCD_PANEL_RESB 155
-
-#define OMAP3EVM_LCD_PANEL_ENVDD 153
-#define OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO 210
-
-/*
- * OMAP3EVM DVI control signals
- */
-#define OMAP3EVM_DVI_PANEL_EN_GPIO 199
-
-#ifdef CONFIG_BROKEN
-static void __init omap3_evm_display_init(void)
-{
- int r;
-
- r = gpio_request_one(OMAP3EVM_LCD_PANEL_ENVDD, GPIOF_OUT_INIT_LOW,
- "lcd_panel_envdd");
- if (r)
- pr_err("failed to get lcd_panel_envdd GPIO\n");
-
- r = gpio_request_one(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO,
- GPIOF_OUT_INIT_LOW, "lcd_panel_bklight");
- if (r)
- pr_err("failed to get lcd_panel_bklight GPIO\n");
-
- if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2)
- gpio_set_value_cansleep(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 0);
- else
- gpio_set_value_cansleep(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 1);
-}
-#endif
-
-static struct panel_sharp_ls037v7dw01_platform_data omap3_evm_lcd_pdata = {
- .name = "lcd",
- .source = "dpi.0",
-
- .data_lines = 18,
-
- .resb_gpio = OMAP3EVM_LCD_PANEL_RESB,
- .ini_gpio = OMAP3EVM_LCD_PANEL_INI,
- .mo_gpio = OMAP3EVM_LCD_PANEL_QVGA,
- .lr_gpio = OMAP3EVM_LCD_PANEL_LR,
- .ud_gpio = OMAP3EVM_LCD_PANEL_UD,
-};
-
-static struct platform_device omap3_evm_lcd_device = {
- .name = "panel-sharp-ls037v7dw01",
- .id = 0,
- .dev.platform_data = &omap3_evm_lcd_pdata,
-};
-
-static struct connector_dvi_platform_data omap3_evm_dvi_connector_pdata = {
- .name = "dvi",
- .source = "tfp410.0",
- .i2c_bus_num = -1,
-};
-
-static struct platform_device omap3_evm_dvi_connector_device = {
- .name = "connector-dvi",
- .id = 0,
- .dev.platform_data = &omap3_evm_dvi_connector_pdata,
-};
-
-static struct encoder_tfp410_platform_data omap3_evm_tfp410_pdata = {
- .name = "tfp410.0",
- .source = "dpi.0",
- .data_lines = 24,
- .power_down_gpio = OMAP3EVM_DVI_PANEL_EN_GPIO,
-};
-
-static struct platform_device omap3_evm_tfp410_device = {
- .name = "tfp410",
- .id = 0,
- .dev.platform_data = &omap3_evm_tfp410_pdata,
-};
-
-static struct connector_atv_platform_data omap3_evm_tv_pdata = {
- .name = "tv",
- .source = "venc.0",
- .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
- .invert_polarity = false,
-};
-
-static struct platform_device omap3_evm_tv_connector_device = {
- .name = "connector-analog-tv",
- .id = 0,
- .dev.platform_data = &omap3_evm_tv_pdata,
-};
-
-static struct omap_dss_board_info omap3_evm_dss_data = {
- .default_display_name = "lcd",
-};
-
-static struct regulator_consumer_supply omap3evm_vmmc1_supply[] = {
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
-};
-
-static struct regulator_consumer_supply omap3evm_vsim_supply[] = {
- REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"),
-};
-
-/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
-static struct regulator_init_data omap3evm_vmmc1 = {
- .constraints = {
- .min_uV = 1850000,
- .max_uV = 3150000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(omap3evm_vmmc1_supply),
- .consumer_supplies = omap3evm_vmmc1_supply,
-};
-
-/* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */
-static struct regulator_init_data omap3evm_vsim = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 3000000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(omap3evm_vsim_supply),
- .consumer_supplies = omap3evm_vsim_supply,
-};
-
-static struct omap2_hsmmc_info mmc[] = {
- {
- .mmc = 1,
- .caps = MMC_CAP_4_BIT_DATA,
- .gpio_cd = -EINVAL,
- .gpio_wp = 63,
- .deferred = true,
- },
-#ifdef CONFIG_WILINK_PLATFORM_DATA
- {
- .name = "wl1271",
- .mmc = 2,
- .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD,
- .gpio_wp = -EINVAL,
- .gpio_cd = -EINVAL,
- .nonremovable = true,
- },
-#endif
- {} /* Terminator */
-};
-
-static struct gpio_led gpio_leds[] = {
- {
- .name = "omap3evm::ledb",
- /* normally not visible (board underside) */
- .default_trigger = "default-on",
- .gpio = -EINVAL, /* gets replaced */
- .active_low = true,
- },
-};
-
-static struct gpio_led_platform_data gpio_led_info = {
- .leds = gpio_leds,
- .num_leds = ARRAY_SIZE(gpio_leds),
-};
-
-static struct platform_device leds_gpio = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &gpio_led_info,
- },
-};
-
-
-static int omap3evm_twl_gpio_setup(struct device *dev,
- unsigned gpio, unsigned ngpio)
-{
- int r, lcd_bl_en;
-
- /* gpio + 0 is "mmc0_cd" (input/IRQ) */
- mmc[0].gpio_cd = gpio + 0;
- omap_hsmmc_late_init(mmc);
-
- /*
- * Most GPIOs are for USB OTG. Some are mostly sent to
- * the P2 connector; notably LEDA for the LCD backlight.
- */
-
- /* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */
- lcd_bl_en = get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2 ?
- GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
- r = gpio_request_one(gpio + TWL4030_GPIO_MAX, lcd_bl_en, "EN_LCD_BKL");
- if (r)
- printk(KERN_ERR "failed to get/set lcd_bkl gpio\n");
-
- /* gpio + 7 == DVI Enable */
- gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "EN_DVI");
-
- /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */
- gpio_leds[0].gpio = gpio + TWL4030_GPIO_MAX + 1;
-
- platform_device_register(&leds_gpio);
-
- /* Enable VBUS switch by setting TWL4030.GPIO2DIR as output
- * for starting USB tranceiver
- */
-#ifdef CONFIG_TWL4030_CORE
- if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) {
- u8 val;
-
- twl_i2c_read_u8(TWL4030_MODULE_GPIO, &val, REG_GPIODATADIR1);
- val |= 0x04; /* TWL4030.GPIO2DIR BIT at GPIODATADIR1(0x9B) */
- twl_i2c_write_u8(TWL4030_MODULE_GPIO, val, REG_GPIODATADIR1);
- }
-#endif
-
- return 0;
-}
-
-static struct twl4030_gpio_platform_data omap3evm_gpio_data = {
- .use_leds = true,
- .setup = omap3evm_twl_gpio_setup,
-};
-
-static uint32_t board_keymap[] = {
- KEY(0, 0, KEY_LEFT),
- KEY(0, 1, KEY_DOWN),
- KEY(0, 2, KEY_ENTER),
- KEY(0, 3, KEY_M),
-
- KEY(1, 0, KEY_RIGHT),
- KEY(1, 1, KEY_UP),
- KEY(1, 2, KEY_I),
- KEY(1, 3, KEY_N),
-
- KEY(2, 0, KEY_A),
- KEY(2, 1, KEY_E),
- KEY(2, 2, KEY_J),
- KEY(2, 3, KEY_O),
-
- KEY(3, 0, KEY_B),
- KEY(3, 1, KEY_F),
- KEY(3, 2, KEY_K),
- KEY(3, 3, KEY_P)
-};
-
-static struct matrix_keymap_data board_map_data = {
- .keymap = board_keymap,
- .keymap_size = ARRAY_SIZE(board_keymap),
-};
-
-static struct twl4030_keypad_data omap3evm_kp_data = {
- .keymap_data = &board_map_data,
- .rows = 4,
- .cols = 4,
- .rep = 1,
-};
-
-/* ads7846 on SPI */
-static struct regulator_consumer_supply omap3evm_vio_supply[] = {
- REGULATOR_SUPPLY("vcc", "spi1.0"),
-};
-
-/* VIO for ads7846 */
-static struct regulator_init_data omap3evm_vio = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(omap3evm_vio_supply),
- .consumer_supplies = omap3evm_vio_supply,
-};
-
-#ifdef CONFIG_WILINK_PLATFORM_DATA
-
-#define OMAP3EVM_WLAN_PMENA_GPIO (150)
-#define OMAP3EVM_WLAN_IRQ_GPIO (149)
-
-static struct regulator_consumer_supply omap3evm_vmmc2_supply[] = {
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
-};
-
-/* VMMC2 for driving the WL12xx module */
-static struct regulator_init_data omap3evm_vmmc2 = {
- .constraints = {
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(omap3evm_vmmc2_supply),
- .consumer_supplies = omap3evm_vmmc2_supply,
-};
-
-static struct fixed_voltage_config omap3evm_vwlan = {
- .supply_name = "vwl1271",
- .microvolts = 1800000, /* 1.80V */
- .gpio = OMAP3EVM_WLAN_PMENA_GPIO,
- .startup_delay = 70000, /* 70ms */
- .enable_high = 1,
- .enabled_at_boot = 0,
- .init_data = &omap3evm_vmmc2,
-};
-
-static struct platform_device omap3evm_wlan_regulator = {
- .name = "reg-fixed-voltage",
- .id = 1,
- .dev = {
- .platform_data = &omap3evm_vwlan,
- },
-};
-
-struct wl12xx_platform_data omap3evm_wlan_data __initdata = {
- .board_ref_clock = WL12XX_REFCLOCK_38, /* 38.4 MHz */
-};
-#endif
-
-/* VAUX2 for USB */
-static struct regulator_consumer_supply omap3evm_vaux2_supplies[] = {
- REGULATOR_SUPPLY("VDD_CSIPHY1", "omap3isp"), /* OMAP ISP */
- REGULATOR_SUPPLY("VDD_CSIPHY2", "omap3isp"), /* OMAP ISP */
- REGULATOR_SUPPLY("vcc", "usb_phy_gen_xceiv.2"), /* hsusb port 2 */
- REGULATOR_SUPPLY("vaux2", NULL),
-};
-
-static struct regulator_init_data omap3evm_vaux2 = {
- .constraints = {
- .min_uV = 2800000,
- .max_uV = 2800000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(omap3evm_vaux2_supplies),
- .consumer_supplies = omap3evm_vaux2_supplies,
-};
-
-static struct twl4030_platform_data omap3evm_twldata = {
- /* platform_data for children goes here */
- .keypad = &omap3evm_kp_data,
- .gpio = &omap3evm_gpio_data,
- .vio = &omap3evm_vio,
- .vmmc1 = &omap3evm_vmmc1,
- .vsim = &omap3evm_vsim,
-};
-
-static int __init omap3_evm_i2c_init(void)
-{
- omap3_pmic_get_config(&omap3evm_twldata,
- TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_MADC |
- TWL_COMMON_PDATA_AUDIO,
- TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
-
- omap3evm_twldata.vdac->constraints.apply_uV = true;
- omap3evm_twldata.vpll2->constraints.apply_uV = true;
-
- omap3_pmic_init("twl4030", &omap3evm_twldata);
- omap_register_i2c_bus(2, 400, NULL, 0);
- omap_register_i2c_bus(3, 400, NULL, 0);
- return 0;
-}
-
-static struct usbhs_phy_data phy_data[] __initdata = {
- {
- .port = 2,
- .reset_gpio = -1, /* set at runtime */
- .vcc_gpio = -EINVAL,
- },
-};
-
-static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
- .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
-};
-
-#ifdef CONFIG_OMAP_MUX
-static struct omap_board_mux omap35x_board_mux[] __initdata = {
- OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP |
- OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW |
- OMAP_PIN_OFF_WAKEUPENABLE),
- OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
- OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW |
- OMAP_PIN_OFF_WAKEUPENABLE),
- OMAP3_MUX(SYS_BOOT5, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
- OMAP_PIN_OFF_NONE),
- OMAP3_MUX(GPMC_WAIT2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
- OMAP_PIN_OFF_NONE),
-#ifdef CONFIG_WILINK_PLATFORM_DATA
- /* WLAN IRQ - GPIO 149 */
- OMAP3_MUX(UART1_RTS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
-
- /* WLAN POWER ENABLE - GPIO 150 */
- OMAP3_MUX(UART1_CTS, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
-
- /* MMC2 SDIO pin muxes for WL12xx */
- OMAP3_MUX(SDMMC2_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
- OMAP3_MUX(SDMMC2_CMD, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
- OMAP3_MUX(SDMMC2_DAT0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
- OMAP3_MUX(SDMMC2_DAT1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
- OMAP3_MUX(SDMMC2_DAT2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
- OMAP3_MUX(SDMMC2_DAT3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
-#endif
- { .reg_offset = OMAP_MUX_TERMINATOR },
-};
-
-static struct omap_board_mux omap36x_board_mux[] __initdata = {
- OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP |
- OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW |
- OMAP_PIN_OFF_WAKEUPENABLE),
- OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
- OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW |
- OMAP_PIN_OFF_WAKEUPENABLE),
- /* AM/DM37x EVM: DSS data bus muxed with sys_boot */
- OMAP3_MUX(DSS_DATA18, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
- OMAP3_MUX(DSS_DATA19, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
- OMAP3_MUX(DSS_DATA22, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
- OMAP3_MUX(DSS_DATA21, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
- OMAP3_MUX(DSS_DATA22, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
- OMAP3_MUX(DSS_DATA23, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
- OMAP3_MUX(SYS_BOOT0, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
- OMAP3_MUX(SYS_BOOT1, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
- OMAP3_MUX(SYS_BOOT3, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
- OMAP3_MUX(SYS_BOOT4, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
- OMAP3_MUX(SYS_BOOT5, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
- OMAP3_MUX(SYS_BOOT6, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
-#ifdef CONFIG_WILINK_PLATFORM_DATA
- /* WLAN IRQ - GPIO 149 */
- OMAP3_MUX(UART1_RTS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
-
- /* WLAN POWER ENABLE - GPIO 150 */
- OMAP3_MUX(UART1_CTS, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
-
- /* MMC2 SDIO pin muxes for WL12xx */
- OMAP3_MUX(SDMMC2_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
- OMAP3_MUX(SDMMC2_CMD, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
- OMAP3_MUX(SDMMC2_DAT0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
- OMAP3_MUX(SDMMC2_DAT1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
- OMAP3_MUX(SDMMC2_DAT2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
- OMAP3_MUX(SDMMC2_DAT3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
-#endif
-
- { .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#else
-#define omap35x_board_mux NULL
-#define omap36x_board_mux NULL
-#endif
-
-static struct omap_musb_board_data musb_board_data = {
- .interface_type = MUSB_INTERFACE_ULPI,
- .mode = MUSB_OTG,
- .power = 100,
-};
-
-static struct gpio omap3_evm_ehci_gpios[] __initdata = {
- { OMAP3_EVM_EHCI_VBUS, GPIOF_OUT_INIT_HIGH, "enable EHCI VBUS" },
- { OMAP3_EVM_EHCI_SELECT, GPIOF_OUT_INIT_LOW, "select EHCI port" },
-};
-
-static void __init omap3_evm_wl12xx_init(void)
-{
-#ifdef CONFIG_WILINK_PLATFORM_DATA
- int ret;
-
- /* WL12xx WLAN Init */
- omap3evm_wlan_data.irq = gpio_to_irq(OMAP3EVM_WLAN_IRQ_GPIO);
- ret = wl12xx_set_platform_data(&omap3evm_wlan_data);
- if (ret)
- pr_err("error setting wl12xx data: %d\n", ret);
- ret = platform_device_register(&omap3evm_wlan_regulator);
- if (ret)
- pr_err("error registering wl12xx device: %d\n", ret);
-#endif
-}
-
-static struct regulator_consumer_supply dummy_supplies[] = {
- REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
- REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
-};
-
-static struct mtd_partition omap3evm_nand_partitions[] = {
- /* All the partition sizes are listed in terms of NAND block size */
- {
- .name = "X-Loader",
- .offset = 0,
- .size = 4*(SZ_128K),
- .mask_flags = MTD_WRITEABLE
- },
- {
- .name = "U-Boot",
- .offset = MTDPART_OFS_APPEND,
- .size = 14*(SZ_128K),
- .mask_flags = MTD_WRITEABLE
- },
- {
- .name = "U-Boot Env",
- .offset = MTDPART_OFS_APPEND,
- .size = 2*(SZ_128K)
- },
- {
- .name = "Kernel",
- .offset = MTDPART_OFS_APPEND,
- .size = 40*(SZ_128K)
- },
- {
- .name = "File system",
- .size = MTDPART_SIZ_FULL,
- .offset = MTDPART_OFS_APPEND,
- },
-};
-
-static void __init omap3_evm_init(void)
-{
- struct omap_board_mux *obm;
-
- omap3_evm_get_revision();
- regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
-
- obm = (cpu_is_omap3630()) ? omap36x_board_mux : omap35x_board_mux;
- omap3_mux_init(obm, OMAP_PACKAGE_CBB);
-
- omap_mux_init_gpio(63, OMAP_PIN_INPUT);
- omap_hsmmc_init(mmc);
-
- if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2)
- omap3evm_twldata.vaux2 = &omap3evm_vaux2;
-
- omap3_evm_i2c_init();
-
- omap_display_init(&omap3_evm_dss_data);
- platform_device_register(&omap3_evm_lcd_device);
- platform_device_register(&omap3_evm_tfp410_device);
- platform_device_register(&omap3_evm_dvi_connector_device);
- platform_device_register(&omap3_evm_tv_connector_device);
-
- omap_serial_init();
- omap_sdrc_init(mt46h32m32lf6_sdrc_params, NULL);
-
- /* OMAP3EVM uses ISP1504 phy and so register nop transceiver */
- usb_nop_xceiv_register();
-
- if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) {
- /* enable EHCI VBUS using GPIO22 */
- omap_mux_init_gpio(OMAP3_EVM_EHCI_VBUS, OMAP_PIN_INPUT_PULLUP);
- /* Select EHCI port on main board */
- omap_mux_init_gpio(OMAP3_EVM_EHCI_SELECT,
- OMAP_PIN_INPUT_PULLUP);
- gpio_request_array(omap3_evm_ehci_gpios,
- ARRAY_SIZE(omap3_evm_ehci_gpios));
-
- /* setup EHCI phy reset config */
- omap_mux_init_gpio(21, OMAP_PIN_INPUT_PULLUP);
- phy_data[0].reset_gpio = 21;
-
- /* EVM REV >= E can supply 500mA with EXTVBUS programming */
- musb_board_data.power = 500;
- musb_board_data.extvbus = 1;
- } else {
- /* setup EHCI phy reset on MDC */
- omap_mux_init_gpio(135, OMAP_PIN_OUTPUT);
- phy_data[0].reset_gpio = 135;
- }
- usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
- usb_musb_init(&musb_board_data);
-
- usbhs_init_phys(phy_data, ARRAY_SIZE(phy_data));
- usbhs_init(&usbhs_bdata);
- board_nand_init(omap3evm_nand_partitions,
- ARRAY_SIZE(omap3evm_nand_partitions), NAND_CS,
- NAND_BUSWIDTH_16, NULL);
-
- omap_ads7846_init(1, OMAP3_EVM_TS_GPIO, 310, NULL);
- omap3evm_init_smsc911x();
-#ifdef CONFIG_BROKEN
- omap3_evm_display_init();
-#endif
- omap3_evm_wl12xx_init();
- omap_twl4030_audio_init("omap3evm", NULL);
-}
-
-MACHINE_START(OMAP3EVM, "OMAP3 EVM")
- /* Maintainer: Syed Mohammed Khasim - Texas Instruments */
- .atag_offset = 0x100,
- .reserve = omap_reserve,
- .map_io = omap3_map_io,
- .init_early = omap35xx_init_early,
- .init_irq = omap3_init_irq,
- .handle_irq = omap3_intc_handle_irq,
- .init_machine = omap3_evm_init,
- .init_late = omap35xx_init_late,
- .init_time = omap3_sync32k_timer_init,
- .restart = omap3xxx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
index ba8342fef799..119efaf5808a 100644
--- a/arch/arm/mach-omap2/board-omap3stalker.c
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -32,7 +32,7 @@
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
#include <linux/smsc911x.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
#include <linux/usb/phy.h>
#include <asm/mach-types.h>
diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c
deleted file mode 100644
index 345e8c4b8731..000000000000
--- a/arch/arm/mach-omap2/board-rm680.c
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Board support file for Nokia N950 (RM-680) / N9 (RM-696).
- *
- * Copyright (C) 2010 Nokia
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/io.h>
-#include <linux/i2c.h>
-#include <linux/gpio.h>
-#include <linux/init.h>
-#include <linux/i2c/twl.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/fixed.h>
-#include <linux/regulator/machine.h>
-#include <linux/regulator/consumer.h>
-#include <linux/platform_data/mtd-onenand-omap2.h>
-#include <linux/usb/phy.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach-types.h>
-
-#include "common.h"
-#include "mux.h"
-#include "gpmc.h"
-#include "mmc.h"
-#include "hsmmc.h"
-#include "sdram-nokia.h"
-#include "common-board-devices.h"
-#include "gpmc-onenand.h"
-
-static struct regulator_consumer_supply rm680_vemmc_consumers[] = {
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
-};
-
-/* Fixed regulator for internal eMMC */
-static struct regulator_init_data rm680_vemmc = {
- .constraints = {
- .name = "rm680_vemmc",
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_STATUS
- | REGULATOR_CHANGE_MODE,
- },
- .num_consumer_supplies = ARRAY_SIZE(rm680_vemmc_consumers),
- .consumer_supplies = rm680_vemmc_consumers,
-};
-
-static struct fixed_voltage_config rm680_vemmc_config = {
- .supply_name = "VEMMC",
- .microvolts = 2900000,
- .gpio = 157,
- .startup_delay = 150,
- .enable_high = 1,
- .init_data = &rm680_vemmc,
-};
-
-static struct platform_device rm680_vemmc_device = {
- .name = "reg-fixed-voltage",
- .dev = {
- .platform_data = &rm680_vemmc_config,
- },
-};
-
-static struct platform_device *rm680_peripherals_devices[] __initdata = {
- &rm680_vemmc_device,
-};
-
-/* TWL */
-static struct twl4030_gpio_platform_data rm680_gpio_data = {
- .pullups = BIT(0),
- .pulldowns = BIT(1) | BIT(2) | BIT(8) | BIT(15),
-};
-
-static struct twl4030_platform_data rm680_twl_data = {
- .gpio = &rm680_gpio_data,
- /* add rest of the children here */
-};
-
-static void __init rm680_i2c_init(void)
-{
- omap3_pmic_get_config(&rm680_twl_data, TWL_COMMON_PDATA_USB, 0);
- omap_pmic_init(1, 2900, "twl5031", 7 + OMAP_INTC_START, &rm680_twl_data);
- omap_register_i2c_bus(2, 400, NULL, 0);
- omap_register_i2c_bus(3, 400, NULL, 0);
-}
-
-#if defined(CONFIG_MTD_ONENAND_OMAP2) || \
- defined(CONFIG_MTD_ONENAND_OMAP2_MODULE)
-static struct omap_onenand_platform_data board_onenand_data[] = {
- {
- .gpio_irq = 65,
- .flags = ONENAND_SYNC_READWRITE,
- }
-};
-#endif
-
-/* eMMC */
-static struct omap2_hsmmc_info mmc[] __initdata = {
- {
- .name = "internal",
- .mmc = 2,
- .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED,
- .gpio_cd = -EINVAL,
- .gpio_wp = -EINVAL,
- },
- { /* Terminator */ }
-};
-
-static void __init rm680_peripherals_init(void)
-{
- platform_add_devices(rm680_peripherals_devices,
- ARRAY_SIZE(rm680_peripherals_devices));
- rm680_i2c_init();
- gpmc_onenand_init(board_onenand_data);
- omap_hsmmc_init(mmc);
-}
-
-#ifdef CONFIG_OMAP_MUX
-static struct omap_board_mux board_mux[] __initdata = {
- { .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#endif
-
-static void __init rm680_init(void)
-{
- struct omap_sdrc_params *sdrc_params;
-
- omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
- omap_serial_init();
-
- sdrc_params = nokia_get_sdram_timings();
- omap_sdrc_init(sdrc_params, sdrc_params);
-
- usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
- usb_musb_init(NULL);
- rm680_peripherals_init();
-}
-
-MACHINE_START(NOKIA_RM680, "Nokia RM-680 board")
- .atag_offset = 0x100,
- .reserve = omap_reserve,
- .map_io = omap3_map_io,
- .init_early = omap3630_init_early,
- .init_irq = omap3_init_irq,
- .handle_irq = omap3_intc_handle_irq,
- .init_machine = rm680_init,
- .init_late = omap3630_init_late,
- .init_time = omap3_sync32k_timer_init,
- .restart = omap3xxx_restart,
-MACHINE_END
-
-MACHINE_START(NOKIA_RM696, "Nokia RM-696 board")
- .atag_offset = 0x100,
- .reserve = omap_reserve,
- .map_io = omap3_map_io,
- .init_early = omap3630_init_early,
- .init_irq = omap3_init_irq,
- .handle_irq = omap3_intc_handle_irq,
- .init_machine = rm680_init,
- .init_late = omap3630_init_late,
- .init_time = omap3_sync32k_timer_init,
- .restart = omap3xxx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index f6fe388af989..f093af17f5e6 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -57,6 +57,8 @@
#include "common-board-devices.h"
#include "gpmc.h"
#include "gpmc-onenand.h"
+#include "soc.h"
+#include "omap-secure.h"
#define SYSTEM_REV_B_USES_VAUX3 0x1699
#define SYSTEM_REV_S_USES_VAUX3 0x8
@@ -211,29 +213,11 @@ static struct lp55xx_led_config rx51_lp5523_led_config[] = {
}
};
-static int rx51_lp5523_setup(void)
-{
- return gpio_request_one(RX51_LP5523_CHIP_EN_GPIO, GPIOF_DIR_OUT,
- "lp5523_enable");
-}
-
-static void rx51_lp5523_release(void)
-{
- gpio_free(RX51_LP5523_CHIP_EN_GPIO);
-}
-
-static void rx51_lp5523_enable(bool state)
-{
- gpio_set_value(RX51_LP5523_CHIP_EN_GPIO, !!state);
-}
-
static struct lp55xx_platform_data rx51_lp5523_platform_data = {
.led_config = rx51_lp5523_led_config,
.num_channels = ARRAY_SIZE(rx51_lp5523_led_config),
.clock_mode = LP55XX_CLOCK_AUTO,
- .setup_resources = rx51_lp5523_setup,
- .release_resources = rx51_lp5523_release,
- .enable = rx51_lp5523_enable,
+ .enable_gpio = RX51_LP5523_CHIP_EN_GPIO,
};
#endif
@@ -1298,6 +1282,22 @@ static void __init rx51_init_twl4030_hwmon(void)
platform_device_register(&madc_hwmon);
}
+static struct platform_device omap3_rom_rng_device = {
+ .name = "omap3-rom-rng",
+ .id = -1,
+ .dev = {
+ .platform_data = rx51_secure_rng_call,
+ },
+};
+
+static void __init rx51_init_omap3_rom_rng(void)
+{
+ if (omap_type() == OMAP2_DEVICE_TYPE_SEC) {
+ pr_info("RX-51: Registring OMAP3 HWRNG device\n");
+ platform_device_register(&omap3_rom_rng_device);
+ }
+}
+
void __init rx51_peripherals_init(void)
{
rx51_i2c_init();
@@ -1318,5 +1318,6 @@ void __init rx51_peripherals_init(void)
rx51_charger_init();
rx51_init_twl4030_hwmon();
+ rx51_init_omap3_rom_rng();
}
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index 7735105561d8..db168c9627a1 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -2,6 +2,8 @@
* Board support file for Nokia N900 (aka RX-51).
*
* Copyright (C) 2007, 2008 Nokia
+ * Copyright (C) 2012 Ivaylo Dimitrov <freemangordon@abv.bg>
+ * Copyright (C) 2013 Pali Rohár <pali.rohar@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -31,7 +33,9 @@
#include "mux.h"
#include "gpmc.h"
#include "pm.h"
+#include "soc.h"
#include "sdram-nokia.h"
+#include "omap-secure.h"
#define RX51_GPIO_SLEEP_IND 162
@@ -103,6 +107,14 @@ static void __init rx51_init(void)
usb_musb_init(&musb_board_data);
rx51_peripherals_init();
+ if (omap_type() == OMAP2_DEVICE_TYPE_SEC) {
+#ifdef CONFIG_ARM_ERRATA_430973
+ pr_info("RX-51: Enabling ARM errata 430973 workaround\n");
+ /* set IBE to 1 */
+ rx51_secure_update_aux_cr(BIT(6), 0);
+#endif
+ }
+
/* Ensure SDRC pins are mux'd for self-refresh */
omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
diff --git a/arch/arm/mach-omap2/board-zoom-debugboard.c b/arch/arm/mach-omap2/board-zoom-debugboard.c
deleted file mode 100644
index 42e5f231a799..000000000000
--- a/arch/arm/mach-omap2/board-zoom-debugboard.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright (C) 2009 Texas Instruments Inc.
- * Mikkel Christensen <mlc@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/gpio.h>
-#include <linux/serial_8250.h>
-#include <linux/smsc911x.h>
-#include <linux/interrupt.h>
-
-#include <linux/regulator/fixed.h>
-#include <linux/regulator/machine.h>
-
-#include "gpmc.h"
-#include "gpmc-smsc911x.h"
-
-#include "board-zoom.h"
-
-#include "soc.h"
-#include "common.h"
-
-#define ZOOM_SMSC911X_CS 7
-#define ZOOM_SMSC911X_GPIO 158
-#define ZOOM_QUADUART_CS 3
-#define ZOOM_QUADUART_GPIO 102
-#define ZOOM_QUADUART_RST_GPIO 152
-#define QUART_CLK 1843200
-#define DEBUG_BASE 0x08000000
-#define ZOOM_ETHR_START DEBUG_BASE
-
-static struct omap_smsc911x_platform_data zoom_smsc911x_cfg = {
- .cs = ZOOM_SMSC911X_CS,
- .gpio_irq = ZOOM_SMSC911X_GPIO,
- .gpio_reset = -EINVAL,
- .flags = SMSC911X_USE_32BIT,
-};
-
-static inline void __init zoom_init_smsc911x(void)
-{
- gpmc_smsc911x_init(&zoom_smsc911x_cfg);
-}
-
-static struct plat_serial8250_port serial_platform_data[] = {
- {
- .mapbase = ZOOM_UART_BASE,
- .flags = UPF_BOOT_AUTOCONF|UPF_IOREMAP|UPF_SHARE_IRQ,
- .irqflags = IRQF_SHARED | IRQF_TRIGGER_RISING,
- .iotype = UPIO_MEM,
- .regshift = 1,
- .uartclk = QUART_CLK,
- }, {
- .flags = 0
- }
-};
-
-static struct platform_device zoom_debugboard_serial_device = {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM,
- .dev = {
- .platform_data = serial_platform_data,
- },
-};
-
-static inline void __init zoom_init_quaduart(void)
-{
- int quart_cs;
- unsigned long cs_mem_base;
- int quart_gpio = 0;
-
- if (gpio_request_one(ZOOM_QUADUART_RST_GPIO,
- GPIOF_OUT_INIT_LOW,
- "TL16CP754C GPIO") < 0) {
- pr_err("Failed to request GPIO%d for TL16CP754C\n",
- ZOOM_QUADUART_RST_GPIO);
- return;
- }
-
- quart_cs = ZOOM_QUADUART_CS;
-
- if (gpmc_cs_request(quart_cs, SZ_1M, &cs_mem_base) < 0) {
- pr_err("Failed to request GPMC mem for Quad UART(TL16CP754C)\n");
- return;
- }
-
- quart_gpio = ZOOM_QUADUART_GPIO;
-
- if (gpio_request_one(quart_gpio, GPIOF_IN, "TL16CP754C GPIO") < 0)
- printk(KERN_ERR "Failed to request GPIO%d for TL16CP754C\n",
- quart_gpio);
-
- serial_platform_data[0].irq = gpio_to_irq(102);
-}
-
-static inline int omap_zoom_debugboard_detect(void)
-{
- int debug_board_detect = 0;
- int ret = 1;
-
- debug_board_detect = ZOOM_SMSC911X_GPIO;
-
- if (gpio_request_one(debug_board_detect, GPIOF_IN,
- "Zoom debug board detect") < 0) {
- pr_err("Failed to request GPIO%d for Zoom debug board detect\n",
- debug_board_detect);
- return 0;
- }
-
- if (!gpio_get_value(debug_board_detect)) {
- ret = 0;
- }
- gpio_free(debug_board_detect);
- return ret;
-}
-
-static struct platform_device *zoom_devices[] __initdata = {
- &zoom_debugboard_serial_device,
-};
-
-static struct regulator_consumer_supply dummy_supplies[] = {
- REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
- REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
-};
-
-int __init zoom_debugboard_init(void)
-{
- if (!omap_zoom_debugboard_detect())
- return 0;
-
- regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
- zoom_init_smsc911x();
- zoom_init_quaduart();
- return platform_add_devices(zoom_devices, ARRAY_SIZE(zoom_devices));
-}
diff --git a/arch/arm/mach-omap2/board-zoom-display.c b/arch/arm/mach-omap2/board-zoom-display.c
deleted file mode 100644
index 3d8ecc1e05bd..000000000000
--- a/arch/arm/mach-omap2/board-zoom-display.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 2010 Texas Instruments Inc.
- *
- * Modified from mach-omap2/board-zoom-peripherals.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/spi/spi.h>
-#include <linux/platform_data/spi-omap2-mcspi.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
-
-#include "board-zoom.h"
-#include "soc.h"
-#include "common.h"
-
-#define LCD_PANEL_RESET_GPIO_PROD 96
-#define LCD_PANEL_RESET_GPIO_PILOT 55
-#define LCD_PANEL_QVGA_GPIO 56
-
-static struct panel_nec_nl8048hl11_platform_data zoom_lcd_pdata = {
- .name = "lcd",
- .source = "dpi.0",
-
- .data_lines = 24,
-
- .res_gpio = -1, /* filled in code */
- .qvga_gpio = LCD_PANEL_QVGA_GPIO,
-};
-
-static struct omap_dss_board_info zoom_dss_data = {
- .default_display_name = "lcd",
-};
-
-static void __init zoom_lcd_panel_init(void)
-{
- zoom_lcd_pdata.res_gpio = (omap_rev() > OMAP3430_REV_ES3_0) ?
- LCD_PANEL_RESET_GPIO_PROD :
- LCD_PANEL_RESET_GPIO_PILOT;
-}
-
-static struct omap2_mcspi_device_config dss_lcd_mcspi_config = {
- .turbo_mode = 1,
-};
-
-static struct spi_board_info nec_8048_spi_board_info[] __initdata = {
- [0] = {
- .modalias = "panel-nec-nl8048hl11",
- .bus_num = 1,
- .chip_select = 2,
- .max_speed_hz = 375000,
- .controller_data = &dss_lcd_mcspi_config,
- .platform_data = &zoom_lcd_pdata,
- },
-};
-
-void __init zoom_display_init(void)
-{
- omap_display_init(&zoom_dss_data);
- zoom_lcd_panel_init();
- spi_register_board_info(nec_8048_spi_board_info,
- ARRAY_SIZE(nec_8048_spi_board_info));
-}
-
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c
deleted file mode 100644
index a90375d5b2b6..000000000000
--- a/arch/arm/mach-omap2/board-zoom-peripherals.c
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
- * Copyright (C) 2009 Texas Instruments Inc.
- *
- * Modified from mach-omap2/board-zoom2.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/input/matrix_keypad.h>
-#include <linux/gpio.h>
-#include <linux/i2c/twl.h>
-#include <linux/regulator/machine.h>
-#include <linux/regulator/fixed.h>
-#include <linux/wl12xx.h>
-#include <linux/mmc/host.h>
-#include <linux/platform_data/gpio-omap.h>
-#include <linux/platform_data/omap-twl4030.h>
-#include <linux/usb/phy.h>
-#include <linux/pwm.h>
-#include <linux/leds_pwm.h>
-#include <linux/pwm_backlight.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include "common.h"
-
-#include "board-zoom.h"
-
-#include "mux.h"
-#include "hsmmc.h"
-#include "common-board-devices.h"
-
-#define OMAP_ZOOM_WLAN_PMENA_GPIO (101)
-#define OMAP_ZOOM_TSC2004_IRQ_GPIO (153)
-#define OMAP_ZOOM_WLAN_IRQ_GPIO (162)
-
-/* Zoom2 has Qwerty keyboard*/
-static uint32_t board_keymap[] = {
- KEY(0, 0, KEY_E),
- KEY(0, 1, KEY_R),
- KEY(0, 2, KEY_T),
- KEY(0, 3, KEY_HOME),
- KEY(0, 6, KEY_I),
- KEY(0, 7, KEY_LEFTSHIFT),
- KEY(1, 0, KEY_D),
- KEY(1, 1, KEY_F),
- KEY(1, 2, KEY_G),
- KEY(1, 3, KEY_SEND),
- KEY(1, 6, KEY_K),
- KEY(1, 7, KEY_ENTER),
- KEY(2, 0, KEY_X),
- KEY(2, 1, KEY_C),
- KEY(2, 2, KEY_V),
- KEY(2, 3, KEY_END),
- KEY(2, 6, KEY_DOT),
- KEY(2, 7, KEY_CAPSLOCK),
- KEY(3, 0, KEY_Z),
- KEY(3, 1, KEY_KPPLUS),
- KEY(3, 2, KEY_B),
- KEY(3, 3, KEY_F1),
- KEY(3, 6, KEY_O),
- KEY(3, 7, KEY_SPACE),
- KEY(4, 0, KEY_W),
- KEY(4, 1, KEY_Y),
- KEY(4, 2, KEY_U),
- KEY(4, 3, KEY_F2),
- KEY(4, 4, KEY_VOLUMEUP),
- KEY(4, 6, KEY_L),
- KEY(4, 7, KEY_LEFT),
- KEY(5, 0, KEY_S),
- KEY(5, 1, KEY_H),
- KEY(5, 2, KEY_J),
- KEY(5, 3, KEY_F3),
- KEY(5, 4, KEY_UNKNOWN),
- KEY(5, 5, KEY_VOLUMEDOWN),
- KEY(5, 6, KEY_M),
- KEY(5, 7, KEY_RIGHT),
- KEY(6, 0, KEY_Q),
- KEY(6, 1, KEY_A),
- KEY(6, 2, KEY_N),
- KEY(6, 3, KEY_BACKSPACE),
- KEY(6, 6, KEY_P),
- KEY(6, 7, KEY_UP),
- KEY(7, 0, KEY_PROG1), /*MACRO 1 <User defined> */
- KEY(7, 1, KEY_PROG2), /*MACRO 2 <User defined> */
- KEY(7, 2, KEY_PROG3), /*MACRO 3 <User defined> */
- KEY(7, 3, KEY_PROG4), /*MACRO 4 <User defined> */
- KEY(7, 6, KEY_SELECT),
- KEY(7, 7, KEY_DOWN)
-};
-
-static struct matrix_keymap_data board_map_data = {
- .keymap = board_keymap,
- .keymap_size = ARRAY_SIZE(board_keymap),
-};
-
-static struct twl4030_keypad_data zoom_kp_twl4030_data = {
- .keymap_data = &board_map_data,
- .rows = 8,
- .cols = 8,
- .rep = 1,
-};
-
-static struct regulator_consumer_supply zoom_vmmc1_supply[] = {
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
-};
-
-static struct regulator_consumer_supply zoom_vsim_supply[] = {
- REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"),
-};
-
-static struct regulator_consumer_supply zoom_vmmc2_supply[] = {
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
-};
-
-static struct regulator_consumer_supply zoom_vmmc3_supply[] = {
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.2"),
-};
-
-/* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
-static struct regulator_init_data zoom_vmmc1 = {
- .constraints = {
- .min_uV = 1850000,
- .max_uV = 3150000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(zoom_vmmc1_supply),
- .consumer_supplies = zoom_vmmc1_supply,
-};
-
-/* VMMC2 for MMC2 card */
-static struct regulator_init_data zoom_vmmc2 = {
- .constraints = {
- .min_uV = 1850000,
- .max_uV = 1850000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(zoom_vmmc2_supply),
- .consumer_supplies = zoom_vmmc2_supply,
-};
-
-/* VSIM for OMAP VDD_MMC1A (i/o for DAT4..DAT7) */
-static struct regulator_init_data zoom_vsim = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 3000000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(zoom_vsim_supply),
- .consumer_supplies = zoom_vsim_supply,
-};
-
-static struct regulator_init_data zoom_vmmc3 = {
- .constraints = {
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(zoom_vmmc3_supply),
- .consumer_supplies = zoom_vmmc3_supply,
-};
-
-static struct fixed_voltage_config zoom_vwlan = {
- .supply_name = "vwl1271",
- .microvolts = 1800000, /* 1.8V */
- .gpio = OMAP_ZOOM_WLAN_PMENA_GPIO,
- .startup_delay = 70000, /* 70msec */
- .enable_high = 1,
- .enabled_at_boot = 0,
- .init_data = &zoom_vmmc3,
-};
-
-static struct platform_device omap_vwlan_device = {
- .name = "reg-fixed-voltage",
- .id = 1,
- .dev = {
- .platform_data = &zoom_vwlan,
- },
-};
-
-static struct pwm_lookup zoom_pwm_lookup[] = {
- PWM_LOOKUP("twl-pwm", 0, "leds_pwm", "zoom::keypad"),
- PWM_LOOKUP("twl-pwm", 1, "pwm-backlight", "backlight"),
-};
-
-static struct led_pwm zoom_pwm_leds[] = {
- {
- .name = "zoom::keypad",
- .max_brightness = 127,
- .pwm_period_ns = 7812500,
- },
-};
-
-static struct led_pwm_platform_data zoom_pwm_data = {
- .num_leds = ARRAY_SIZE(zoom_pwm_leds),
- .leds = zoom_pwm_leds,
-};
-
-static struct platform_device zoom_leds_pwm = {
- .name = "leds_pwm",
- .id = -1,
- .dev = {
- .platform_data = &zoom_pwm_data,
- },
-};
-
-static struct platform_pwm_backlight_data zoom_backlight_data = {
- .pwm_id = 1,
- .max_brightness = 127,
- .dft_brightness = 127,
- .pwm_period_ns = 7812500,
-};
-
-static struct platform_device zoom_backlight_pwm = {
- .name = "pwm-backlight",
- .id = -1,
- .dev = {
- .platform_data = &zoom_backlight_data,
- },
-};
-
-static struct platform_device *zoom_devices[] __initdata = {
- &omap_vwlan_device,
- &zoom_leds_pwm,
- &zoom_backlight_pwm,
-};
-
-static struct wl12xx_platform_data omap_zoom_wlan_data __initdata = {
- .board_ref_clock = WL12XX_REFCLOCK_26, /* 26 MHz */
-};
-
-static struct omap2_hsmmc_info mmc[] = {
- {
- .name = "external",
- .mmc = 1,
- .caps = MMC_CAP_4_BIT_DATA,
- .gpio_wp = -EINVAL,
- .power_saving = true,
- .deferred = true,
- },
- {
- .name = "internal",
- .mmc = 2,
- .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
- .gpio_cd = -EINVAL,
- .gpio_wp = -EINVAL,
- .nonremovable = true,
- .power_saving = true,
- },
- {
- .name = "wl1271",
- .mmc = 3,
- .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD,
- .gpio_wp = -EINVAL,
- .gpio_cd = -EINVAL,
- .nonremovable = true,
- },
- {} /* Terminator */
-};
-
-static struct omap_tw4030_pdata omap_twl4030_audio_data = {
- .voice_connected = true,
- .custom_routing = true,
-
- .has_hs = OMAP_TWL4030_LEFT | OMAP_TWL4030_RIGHT,
- .has_hf = OMAP_TWL4030_LEFT | OMAP_TWL4030_RIGHT,
-
- .has_mainmic = true,
- .has_submic = true,
- .has_hsmic = true,
- .has_linein = OMAP_TWL4030_LEFT | OMAP_TWL4030_RIGHT,
-};
-
-static int zoom_twl_gpio_setup(struct device *dev,
- unsigned gpio, unsigned ngpio)
-{
- /* gpio + 0 is "mmc0_cd" (input/IRQ) */
- mmc[0].gpio_cd = gpio + 0;
- omap_hsmmc_late_init(mmc);
-
- /* Audio setup */
- omap_twl4030_audio_data.jack_detect = gpio + 2;
- omap_twl4030_audio_init("Zoom2", &omap_twl4030_audio_data);
-
- return 0;
-}
-
-static struct twl4030_gpio_platform_data zoom_gpio_data = {
- .setup = zoom_twl_gpio_setup,
-};
-
-static struct twl4030_platform_data zoom_twldata = {
- /* platform_data for children goes here */
- .gpio = &zoom_gpio_data,
- .keypad = &zoom_kp_twl4030_data,
- .vmmc1 = &zoom_vmmc1,
- .vmmc2 = &zoom_vmmc2,
- .vsim = &zoom_vsim,
-};
-
-static int __init omap_i2c_init(void)
-{
- omap3_pmic_get_config(&zoom_twldata,
- TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_BCI |
- TWL_COMMON_PDATA_MADC | TWL_COMMON_PDATA_AUDIO,
- TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
-
- if (machine_is_omap_zoom2())
- zoom_twldata.audio->codec->ramp_delay_value = 3; /* 161 ms */
-
- omap_pmic_init(1, 2400, "twl5030", 7 + OMAP_INTC_START, &zoom_twldata);
- omap_register_i2c_bus(2, 400, NULL, 0);
- omap_register_i2c_bus(3, 400, NULL, 0);
- return 0;
-}
-
-static void enable_board_wakeup_source(void)
-{
- /* T2 interrupt line (keypad) */
- omap_mux_init_signal("sys_nirq",
- OMAP_WAKEUP_EN | OMAP_PIN_INPUT_PULLUP);
-}
-
-void __init zoom_peripherals_init(void)
-{
- int ret;
-
- omap_zoom_wlan_data.irq = gpio_to_irq(OMAP_ZOOM_WLAN_IRQ_GPIO);
- ret = wl12xx_set_platform_data(&omap_zoom_wlan_data);
-
- if (ret)
- pr_err("error setting wl12xx data: %d\n", ret);
-
- omap_hsmmc_init(mmc);
- omap_i2c_init();
- pwm_add_table(zoom_pwm_lookup, ARRAY_SIZE(zoom_pwm_lookup));
- platform_add_devices(zoom_devices, ARRAY_SIZE(zoom_devices));
- usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
- usb_musb_init(NULL);
- enable_board_wakeup_source();
- omap_serial_init();
-}
diff --git a/arch/arm/mach-omap2/board-zoom.c b/arch/arm/mach-omap2/board-zoom.c
deleted file mode 100644
index 1a3dd865d8eb..000000000000
--- a/arch/arm/mach-omap2/board-zoom.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Copyright (C) 2009-2010 Texas Instruments Inc.
- * Mikkel Christensen <mlc@ti.com>
- * Felipe Balbi <balbi@ti.com>
- *
- * Modified from mach-omap2/board-ldp.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/gpio.h>
-#include <linux/i2c/twl.h>
-#include <linux/mtd/nand.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-
-#include "common.h"
-
-#include "board-zoom.h"
-
-#include "board-flash.h"
-#include "mux.h"
-#include "sdram-micron-mt46h32m32lf-6.h"
-#include "sdram-hynix-h8mbx00u0mer-0em.h"
-
-#define ZOOM3_EHCI_RESET_GPIO 64
-
-#ifdef CONFIG_OMAP_MUX
-static struct omap_board_mux board_mux[] __initdata = {
- /* WLAN IRQ - GPIO 162 */
- OMAP3_MUX(MCBSP1_CLKX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
- /* WLAN POWER ENABLE - GPIO 101 */
- OMAP3_MUX(CAM_D2, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
- /* WLAN SDIO: MMC3 CMD */
- OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE3 | OMAP_PIN_INPUT_PULLUP),
- /* WLAN SDIO: MMC3 CLK */
- OMAP3_MUX(ETK_CLK, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP),
- /* WLAN SDIO: MMC3 DAT[0-3] */
- OMAP3_MUX(ETK_D3, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP),
- OMAP3_MUX(ETK_D4, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP),
- OMAP3_MUX(ETK_D5, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP),
- OMAP3_MUX(ETK_D6, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP),
- { .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#endif
-
-static struct mtd_partition zoom_nand_partitions[] = {
- /* All the partition sizes are listed in terms of NAND block size */
- {
- .name = "X-Loader-NAND",
- .offset = 0,
- .size = 4 * (64 * 2048), /* 512KB, 0x80000 */
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
- {
- .name = "U-Boot-NAND",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x80000 */
- .size = 10 * (64 * 2048), /* 1.25MB, 0x140000 */
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
- {
- .name = "Boot Env-NAND",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x1c0000 */
- .size = 2 * (64 * 2048), /* 256KB, 0x40000 */
- },
- {
- .name = "Kernel-NAND",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x0200000*/
- .size = 240 * (64 * 2048), /* 30M, 0x1E00000 */
- },
- {
- .name = "system",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x2000000 */
- .size = 3328 * (64 * 2048), /* 416M, 0x1A000000 */
- },
- {
- .name = "userdata",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x1C000000*/
- .size = 256 * (64 * 2048), /* 32M, 0x2000000 */
- },
- {
- .name = "cache",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x1E000000*/
- .size = 256 * (64 * 2048), /* 32M, 0x2000000 */
- },
-};
-
-static struct usbhs_phy_data phy_data[] __initdata = {
- {
- .port = 2,
- .reset_gpio = ZOOM3_EHCI_RESET_GPIO,
- .vcc_gpio = -EINVAL,
- },
-};
-
-static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
- .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
-};
-
-static void __init omap_zoom_init(void)
-{
- if (machine_is_omap_zoom2()) {
- omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
- } else if (machine_is_omap_zoom3()) {
- omap3_mux_init(board_mux, OMAP_PACKAGE_CBP);
- omap_mux_init_gpio(ZOOM3_EHCI_RESET_GPIO, OMAP_PIN_OUTPUT);
-
- usbhs_init_phys(phy_data, ARRAY_SIZE(phy_data));
- usbhs_init(&usbhs_bdata);
- }
-
- board_nand_init(zoom_nand_partitions,
- ARRAY_SIZE(zoom_nand_partitions), ZOOM_NAND_CS,
- NAND_BUSWIDTH_16, nand_default_timings);
- zoom_debugboard_init();
- zoom_peripherals_init();
-
- if (machine_is_omap_zoom2())
- omap_sdrc_init(mt46h32m32lf6_sdrc_params,
- mt46h32m32lf6_sdrc_params);
- else if (machine_is_omap_zoom3())
- omap_sdrc_init(h8mbx00u0mer0em_sdrc_params,
- h8mbx00u0mer0em_sdrc_params);
-
- zoom_display_init();
-}
-
-MACHINE_START(OMAP_ZOOM2, "OMAP Zoom2 board")
- .atag_offset = 0x100,
- .reserve = omap_reserve,
- .map_io = omap3_map_io,
- .init_early = omap3430_init_early,
- .init_irq = omap3_init_irq,
- .handle_irq = omap3_intc_handle_irq,
- .init_machine = omap_zoom_init,
- .init_late = omap3430_init_late,
- .init_time = omap3_sync32k_timer_init,
- .restart = omap3xxx_restart,
-MACHINE_END
-
-MACHINE_START(OMAP_ZOOM3, "OMAP Zoom3 board")
- .atag_offset = 0x100,
- .reserve = omap_reserve,
- .map_io = omap3_map_io,
- .init_early = omap3630_init_early,
- .init_irq = omap3_init_irq,
- .handle_irq = omap3_intc_handle_irq,
- .init_machine = omap_zoom_init,
- .init_late = omap3630_init_late,
- .init_time = omap3_sync32k_timer_init,
- .restart = omap3xxx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-omap2/board-zoom.h b/arch/arm/mach-omap2/board-zoom.h
deleted file mode 100644
index 2e9486940ead..000000000000
--- a/arch/arm/mach-omap2/board-zoom.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Defines for zoom boards
- */
-#include <video/omapdss.h>
-
-#define ZOOM_NAND_CS 0
-
-extern int __init zoom_debugboard_init(void);
-extern void __init zoom_peripherals_init(void);
-extern void __init zoom_display_init(void);
diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c
index 334b76745900..03a2829beb8e 100644
--- a/arch/arm/mach-omap2/cclock3xxx_data.c
+++ b/arch/arm/mach-omap2/cclock3xxx_data.c
@@ -3275,6 +3275,7 @@ static struct omap_clk omap36xx_clks[] = {
static struct omap_clk omap34xx_omap36xx_clks[] = {
CLK(NULL, "aes1_ick", &aes1_ick),
CLK("omap_rng", "ick", &rng_ick),
+ CLK("omap3-rom-rng", "ick", &rng_ick),
CLK(NULL, "sha11_ick", &sha11_ick),
CLK(NULL, "des1_ick", &des1_ick),
CLK(NULL, "cam_mclk", &cam_mclk),
diff --git a/arch/arm/mach-omap2/clkt2xxx_apll.c b/arch/arm/mach-omap2/clkt2xxx_apll.c
index 25b1feed480d..c78e893eba7d 100644
--- a/arch/arm/mach-omap2/clkt2xxx_apll.c
+++ b/arch/arm/mach-omap2/clkt2xxx_apll.c
@@ -52,7 +52,7 @@ static bool omap2xxx_clk_apll_locked(struct clk_hw *hw)
apll_mask = EN_APLL_LOCKED << clk->enable_bit;
- r = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKEN);
+ r = omap2xxx_cm_get_pll_status();
return ((r & apll_mask) == apll_mask) ? true : false;
}
@@ -126,7 +126,7 @@ u32 omap2xxx_get_apll_clkin(void)
{
u32 aplls, srate = 0;
- aplls = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL1);
+ aplls = omap2xxx_cm_get_pll_config();
aplls &= OMAP24XX_APLLS_CLKIN_MASK;
aplls >>= OMAP24XX_APLLS_CLKIN_SHIFT;
diff --git a/arch/arm/mach-omap2/clkt2xxx_dpllcore.c b/arch/arm/mach-omap2/clkt2xxx_dpllcore.c
index d8620105c42a..3ff32543493c 100644
--- a/arch/arm/mach-omap2/clkt2xxx_dpllcore.c
+++ b/arch/arm/mach-omap2/clkt2xxx_dpllcore.c
@@ -60,8 +60,7 @@ unsigned long omap2xxx_clk_get_core_rate(void)
core_clk = omap2_get_dpll_rate(dpll_core_ck);
- v = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
- v &= OMAP24XX_CORE_CLK_SRC_MASK;
+ v = omap2xxx_cm_get_core_clk_src();
if (v == CORE_CLK_SRC_32K)
core_clk = 32768;
@@ -79,8 +78,7 @@ static long omap2_dpllcore_round_rate(unsigned long target_rate)
{
u32 high, low, core_clk_src;
- core_clk_src = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
- core_clk_src &= OMAP24XX_CORE_CLK_SRC_MASK;
+ core_clk_src = omap2xxx_cm_get_core_clk_src();
if (core_clk_src == CORE_CLK_SRC_DPLL) { /* DPLL clockout */
high = curr_prcm_set->dpll_speed * 2;
@@ -120,8 +118,7 @@ int omap2_reprogram_dpllcore(struct clk_hw *hw, unsigned long rate,
const struct dpll_data *dd;
cur_rate = omap2xxx_clk_get_core_rate();
- mult = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
- mult &= OMAP24XX_CORE_CLK_SRC_MASK;
+ mult = omap2xxx_cm_get_core_clk_src();
if ((rate == (cur_rate / 2)) && (mult == 2)) {
omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL, 1);
@@ -145,7 +142,7 @@ int omap2_reprogram_dpllcore(struct clk_hw *hw, unsigned long rate,
tmpset.cm_clksel1_pll &= ~(dd->mult_mask |
dd->div1_mask);
div = ((curr_prcm_set->xtal_speed / 1000000) - 1);
- tmpset.cm_clksel2_pll = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
+ tmpset.cm_clksel2_pll = omap2xxx_cm_get_core_pll_config();
tmpset.cm_clksel2_pll &= ~OMAP24XX_CORE_CLK_SRC_MASK;
if (rate > low) {
tmpset.cm_clksel2_pll |= CORE_CLK_SRC_DPLL_X2;
diff --git a/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c b/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
index ae2b35e76dc8..b935ed2922d8 100644
--- a/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
+++ b/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
@@ -98,7 +98,7 @@ long omap2_round_to_table_rate(struct clk_hw *hw, unsigned long rate,
int omap2_select_table_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- u32 cur_rate, done_rate, bypass = 0, tmp;
+ u32 cur_rate, done_rate, bypass = 0;
const struct prcm_config *prcm;
unsigned long found_speed = 0;
unsigned long flags;
@@ -141,23 +141,11 @@ int omap2_select_table_rate(struct clk_hw *hw, unsigned long rate,
else
done_rate = CORE_CLK_SRC_DPLL;
- /* MPU divider */
- omap2_cm_write_mod_reg(prcm->cm_clksel_mpu, MPU_MOD, CM_CLKSEL);
-
- /* dsp + iva1 div(2420), iva2.1(2430) */
- omap2_cm_write_mod_reg(prcm->cm_clksel_dsp,
- OMAP24XX_DSP_MOD, CM_CLKSEL);
-
- omap2_cm_write_mod_reg(prcm->cm_clksel_gfx, GFX_MOD, CM_CLKSEL);
-
- /* Major subsystem dividers */
- tmp = omap2_cm_read_mod_reg(CORE_MOD, CM_CLKSEL1) & OMAP24XX_CLKSEL_DSS2_MASK;
- omap2_cm_write_mod_reg(prcm->cm_clksel1_core | tmp, CORE_MOD,
- CM_CLKSEL1);
-
- if (cpu_is_omap2430())
- omap2_cm_write_mod_reg(prcm->cm_clksel_mdm,
- OMAP2430_MDM_MOD, CM_CLKSEL);
+ omap2xxx_cm_set_mod_dividers(prcm->cm_clksel_mpu,
+ prcm->cm_clksel_dsp,
+ prcm->cm_clksel_gfx,
+ prcm->cm_clksel1_core,
+ prcm->cm_clksel_mdm);
/* x2 to enter omap2xxx_sdrc_init_params() */
omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL_X2, 1);
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index 0c38ca96c840..c7c5d31e9082 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -543,6 +543,44 @@ int omap2_clk_disable_autoidle_all(void)
}
/**
+ * omap2_clk_deny_idle - disable autoidle on an OMAP clock
+ * @clk: struct clk * to disable autoidle for
+ *
+ * Disable autoidle on an OMAP clock.
+ */
+int omap2_clk_deny_idle(struct clk *clk)
+{
+ struct clk_hw_omap *c;
+
+ if (__clk_get_flags(clk) & CLK_IS_BASIC)
+ return -EINVAL;
+
+ c = to_clk_hw_omap(__clk_get_hw(clk));
+ if (c->ops && c->ops->deny_idle)
+ c->ops->deny_idle(c);
+ return 0;
+}
+
+/**
+ * omap2_clk_allow_idle - enable autoidle on an OMAP clock
+ * @clk: struct clk * to enable autoidle for
+ *
+ * Enable autoidle on an OMAP clock.
+ */
+int omap2_clk_allow_idle(struct clk *clk)
+{
+ struct clk_hw_omap *c;
+
+ if (__clk_get_flags(clk) & CLK_IS_BASIC)
+ return -EINVAL;
+
+ c = to_clk_hw_omap(__clk_get_hw(clk));
+ if (c->ops && c->ops->allow_idle)
+ c->ops->allow_idle(c);
+ return 0;
+}
+
+/**
* omap2_clk_enable_init_clocks - prepare & enable a list of clocks
* @clk_names: ptr to an array of strings of clock names to enable
* @num_clocks: number of clock names in @clk_names
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index 7aa32cd292f9..82916cc82c92 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -411,6 +411,8 @@ void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk,
void omap2_init_clk_hw_omap_clocks(struct clk *clk);
int omap2_clk_enable_autoidle_all(void);
int omap2_clk_disable_autoidle_all(void);
+int omap2_clk_allow_idle(struct clk *clk);
+int omap2_clk_deny_idle(struct clk *clk);
void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks);
int omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name);
void omap2_clk_print_new_rates(const char *hfclkin_ck_name,
diff --git a/arch/arm/mach-omap2/clockdomain.h b/arch/arm/mach-omap2/clockdomain.h
index 4b03394fa0c5..f17f00697cc0 100644
--- a/arch/arm/mach-omap2/clockdomain.h
+++ b/arch/arm/mach-omap2/clockdomain.h
@@ -132,7 +132,7 @@ struct clockdomain {
u8 _flags;
const u8 dep_bit;
const u8 prcm_partition;
- const s16 cm_inst;
+ const u16 cm_inst;
const u16 clkdm_offs;
struct clkdm_dep *wkdep_srcs;
struct clkdm_dep *sleepdep_srcs;
@@ -218,6 +218,7 @@ extern void __init am33xx_clockdomains_init(void);
extern void __init omap44xx_clockdomains_init(void);
extern void __init omap54xx_clockdomains_init(void);
extern void __init dra7xx_clockdomains_init(void);
+void am43xx_clockdomains_init(void);
extern void clkdm_add_autodeps(struct clockdomain *clkdm);
extern void clkdm_del_autodeps(struct clockdomain *clkdm);
@@ -226,6 +227,7 @@ extern struct clkdm_ops omap2_clkdm_operations;
extern struct clkdm_ops omap3_clkdm_operations;
extern struct clkdm_ops omap4_clkdm_operations;
extern struct clkdm_ops am33xx_clkdm_operations;
+extern struct clkdm_ops am43xx_clkdm_operations;
extern struct clkdm_dep gfx_24xx_wkdeps[];
extern struct clkdm_dep dsp_24xx_wkdeps[];
diff --git a/arch/arm/mach-omap2/clockdomains43xx_data.c b/arch/arm/mach-omap2/clockdomains43xx_data.c
new file mode 100644
index 000000000000..6d71c6082a24
--- /dev/null
+++ b/arch/arm/mach-omap2/clockdomains43xx_data.c
@@ -0,0 +1,196 @@
+/*
+ * AM43xx Clock domains framework
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+
+#include "clockdomain.h"
+#include "prcm44xx.h"
+#include "prcm43xx.h"
+
+static struct clockdomain l4_cefuse_43xx_clkdm = {
+ .name = "l4_cefuse_clkdm",
+ .pwrdm = { .name = "cefuse_pwrdm" },
+ .prcm_partition = AM43XX_CM_PARTITION,
+ .cm_inst = AM43XX_CM_CEFUSE_INST,
+ .clkdm_offs = AM43XX_CM_CEFUSE_CEFUSE_CDOFFS,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain mpu_43xx_clkdm = {
+ .name = "mpu_clkdm",
+ .pwrdm = { .name = "mpu_pwrdm" },
+ .prcm_partition = AM43XX_CM_PARTITION,
+ .cm_inst = AM43XX_CM_MPU_INST,
+ .clkdm_offs = AM43XX_CM_MPU_MPU_CDOFFS,
+ .flags = CLKDM_CAN_HWSUP_SWSUP,
+};
+
+static struct clockdomain l4ls_43xx_clkdm = {
+ .name = "l4ls_clkdm",
+ .pwrdm = { .name = "per_pwrdm" },
+ .prcm_partition = AM43XX_CM_PARTITION,
+ .cm_inst = AM43XX_CM_PER_INST,
+ .clkdm_offs = AM43XX_CM_PER_L4LS_CDOFFS,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain tamper_43xx_clkdm = {
+ .name = "tamper_clkdm",
+ .pwrdm = { .name = "tamper_pwrdm" },
+ .prcm_partition = AM43XX_CM_PARTITION,
+ .cm_inst = AM43XX_CM_TAMPER_INST,
+ .clkdm_offs = AM43XX_CM_TAMPER_TAMPER_CDOFFS,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain l4_rtc_43xx_clkdm = {
+ .name = "l4_rtc_clkdm",
+ .pwrdm = { .name = "rtc_pwrdm" },
+ .prcm_partition = AM43XX_CM_PARTITION,
+ .cm_inst = AM43XX_CM_RTC_INST,
+ .clkdm_offs = AM43XX_CM_RTC_RTC_CDOFFS,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain pruss_ocp_43xx_clkdm = {
+ .name = "pruss_ocp_clkdm",
+ .pwrdm = { .name = "per_pwrdm" },
+ .prcm_partition = AM43XX_CM_PARTITION,
+ .cm_inst = AM43XX_CM_PER_INST,
+ .clkdm_offs = AM43XX_CM_PER_ICSS_CDOFFS,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain ocpwp_l3_43xx_clkdm = {
+ .name = "ocpwp_l3_clkdm",
+ .pwrdm = { .name = "per_pwrdm" },
+ .prcm_partition = AM43XX_CM_PARTITION,
+ .cm_inst = AM43XX_CM_PER_INST,
+ .clkdm_offs = AM43XX_CM_PER_OCPWP_L3_CDOFFS,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain l3s_tsc_43xx_clkdm = {
+ .name = "l3s_tsc_clkdm",
+ .pwrdm = { .name = "wkup_pwrdm" },
+ .prcm_partition = AM43XX_CM_PARTITION,
+ .cm_inst = AM43XX_CM_WKUP_INST,
+ .clkdm_offs = AM43XX_CM_WKUP_L3S_TSC_CDOFFS,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain dss_43xx_clkdm = {
+ .name = "dss_clkdm",
+ .pwrdm = { .name = "per_pwrdm" },
+ .prcm_partition = AM43XX_CM_PARTITION,
+ .cm_inst = AM43XX_CM_PER_INST,
+ .clkdm_offs = AM43XX_CM_PER_DSS_CDOFFS,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain l3_aon_43xx_clkdm = {
+ .name = "l3_aon_clkdm",
+ .pwrdm = { .name = "wkup_pwrdm" },
+ .prcm_partition = AM43XX_CM_PARTITION,
+ .cm_inst = AM43XX_CM_WKUP_INST,
+ .clkdm_offs = AM43XX_CM_WKUP_L3_AON_CDOFFS,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain emif_43xx_clkdm = {
+ .name = "emif_clkdm",
+ .pwrdm = { .name = "per_pwrdm" },
+ .prcm_partition = AM43XX_CM_PARTITION,
+ .cm_inst = AM43XX_CM_PER_INST,
+ .clkdm_offs = AM43XX_CM_PER_EMIF_CDOFFS,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain l4_wkup_aon_43xx_clkdm = {
+ .name = "l4_wkup_aon_clkdm",
+ .pwrdm = { .name = "wkup_pwrdm" },
+ .prcm_partition = AM43XX_CM_PARTITION,
+ .cm_inst = AM43XX_CM_WKUP_INST,
+ .clkdm_offs = AM43XX_CM_WKUP_L4_WKUP_AON_CDOFFS,
+};
+
+static struct clockdomain l3_43xx_clkdm = {
+ .name = "l3_clkdm",
+ .pwrdm = { .name = "per_pwrdm" },
+ .prcm_partition = AM43XX_CM_PARTITION,
+ .cm_inst = AM43XX_CM_PER_INST,
+ .clkdm_offs = AM43XX_CM_PER_L3_CDOFFS,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain l4_wkup_43xx_clkdm = {
+ .name = "l4_wkup_clkdm",
+ .pwrdm = { .name = "wkup_pwrdm" },
+ .prcm_partition = AM43XX_CM_PARTITION,
+ .cm_inst = AM43XX_CM_WKUP_INST,
+ .clkdm_offs = AM43XX_CM_WKUP_WKUP_CDOFFS,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain cpsw_125mhz_43xx_clkdm = {
+ .name = "cpsw_125mhz_clkdm",
+ .pwrdm = { .name = "per_pwrdm" },
+ .prcm_partition = AM43XX_CM_PARTITION,
+ .cm_inst = AM43XX_CM_PER_INST,
+ .clkdm_offs = AM43XX_CM_PER_CPSW_CDOFFS,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain gfx_l3_43xx_clkdm = {
+ .name = "gfx_l3_clkdm",
+ .pwrdm = { .name = "gfx_pwrdm" },
+ .prcm_partition = AM43XX_CM_PARTITION,
+ .cm_inst = AM43XX_CM_GFX_INST,
+ .clkdm_offs = AM43XX_CM_GFX_GFX_L3_CDOFFS,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain l3s_43xx_clkdm = {
+ .name = "l3s_clkdm",
+ .pwrdm = { .name = "per_pwrdm" },
+ .prcm_partition = AM43XX_CM_PARTITION,
+ .cm_inst = AM43XX_CM_PER_INST,
+ .clkdm_offs = AM43XX_CM_PER_L3S_CDOFFS,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain *clockdomains_am43xx[] __initdata = {
+ &l4_cefuse_43xx_clkdm,
+ &mpu_43xx_clkdm,
+ &l4ls_43xx_clkdm,
+ &tamper_43xx_clkdm,
+ &l4_rtc_43xx_clkdm,
+ &pruss_ocp_43xx_clkdm,
+ &ocpwp_l3_43xx_clkdm,
+ &l3s_tsc_43xx_clkdm,
+ &dss_43xx_clkdm,
+ &l3_aon_43xx_clkdm,
+ &emif_43xx_clkdm,
+ &l4_wkup_aon_43xx_clkdm,
+ &l3_43xx_clkdm,
+ &l4_wkup_43xx_clkdm,
+ &cpsw_125mhz_43xx_clkdm,
+ &gfx_l3_43xx_clkdm,
+ &l3s_43xx_clkdm,
+ NULL
+};
+
+void __init am43xx_clockdomains_init(void)
+{
+ clkdm_register_platform_funcs(&am43xx_clkdm_operations);
+ clkdm_register_clkdms(clockdomains_am43xx);
+ clkdm_complete_init();
+}
diff --git a/arch/arm/mach-omap2/cm2xxx.c b/arch/arm/mach-omap2/cm2xxx.c
index 6774a53a3874..ce25abbcffae 100644
--- a/arch/arm/mach-omap2/cm2xxx.c
+++ b/arch/arm/mach-omap2/cm2xxx.c
@@ -327,6 +327,73 @@ struct clkdm_ops omap2_clkdm_operations = {
.clkdm_clk_disable = omap2xxx_clkdm_clk_disable,
};
+int omap2xxx_cm_fclks_active(void)
+{
+ u32 f1, f2;
+
+ f1 = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
+ f2 = omap2_cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2);
+
+ return (f1 | f2) ? 1 : 0;
+}
+
+int omap2xxx_cm_mpu_retention_allowed(void)
+{
+ u32 l;
+
+ /* Check for MMC, UART2, UART1, McSPI2, McSPI1 and DSS1. */
+ l = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
+ if (l & (OMAP2420_EN_MMC_MASK | OMAP24XX_EN_UART2_MASK |
+ OMAP24XX_EN_UART1_MASK | OMAP24XX_EN_MCSPI2_MASK |
+ OMAP24XX_EN_MCSPI1_MASK | OMAP24XX_EN_DSS1_MASK))
+ return 0;
+ /* Check for UART3. */
+ l = omap2_cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2);
+ if (l & OMAP24XX_EN_UART3_MASK)
+ return 0;
+
+ return 1;
+}
+
+u32 omap2xxx_cm_get_core_clk_src(void)
+{
+ u32 v;
+
+ v = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
+ v &= OMAP24XX_CORE_CLK_SRC_MASK;
+
+ return v;
+}
+
+u32 omap2xxx_cm_get_core_pll_config(void)
+{
+ return omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
+}
+
+u32 omap2xxx_cm_get_pll_config(void)
+{
+ return omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL1);
+}
+
+u32 omap2xxx_cm_get_pll_status(void)
+{
+ return omap2_cm_read_mod_reg(PLL_MOD, CM_CLKEN);
+}
+
+void omap2xxx_cm_set_mod_dividers(u32 mpu, u32 dsp, u32 gfx, u32 core, u32 mdm)
+{
+ u32 tmp;
+
+ omap2_cm_write_mod_reg(mpu, MPU_MOD, CM_CLKSEL);
+ omap2_cm_write_mod_reg(dsp, OMAP24XX_DSP_MOD, CM_CLKSEL);
+ omap2_cm_write_mod_reg(gfx, GFX_MOD, CM_CLKSEL);
+ tmp = omap2_cm_read_mod_reg(CORE_MOD, CM_CLKSEL1) &
+ OMAP24XX_CLKSEL_DSS2_MASK;
+ omap2_cm_write_mod_reg(core | tmp, CORE_MOD, CM_CLKSEL1);
+ if (cpu_is_omap2430())
+ omap2_cm_write_mod_reg(mdm, OMAP2430_MDM_MOD, CM_CLKSEL);
+}
+
/*
*
*/
diff --git a/arch/arm/mach-omap2/cm2xxx.h b/arch/arm/mach-omap2/cm2xxx.h
index 4cbb39b051d2..891d81c3c8f4 100644
--- a/arch/arm/mach-omap2/cm2xxx.h
+++ b/arch/arm/mach-omap2/cm2xxx.h
@@ -62,6 +62,14 @@ extern int omap2xxx_cm_wait_module_ready(s16 prcm_mod, u8 idlest_id,
u8 idlest_shift);
extern int omap2xxx_cm_split_idlest_reg(void __iomem *idlest_reg,
s16 *prcm_inst, u8 *idlest_reg_id);
+extern int omap2xxx_cm_fclks_active(void);
+extern int omap2xxx_cm_mpu_retention_allowed(void);
+extern u32 omap2xxx_cm_get_core_clk_src(void);
+extern u32 omap2xxx_cm_get_core_pll_config(void);
+extern u32 omap2xxx_cm_get_pll_config(void);
+extern u32 omap2xxx_cm_get_pll_status(void);
+extern void omap2xxx_cm_set_mod_dividers(u32 mpu, u32 dsp, u32 gfx, u32 core,
+ u32 mdm);
extern int __init omap2xxx_cm_init(void);
diff --git a/arch/arm/mach-omap2/cm33xx.c b/arch/arm/mach-omap2/cm33xx.c
index 325a51576576..40a22e5649ae 100644
--- a/arch/arm/mach-omap2/cm33xx.c
+++ b/arch/arm/mach-omap2/cm33xx.c
@@ -48,13 +48,13 @@
/* Private functions */
/* Read a register in a CM instance */
-static inline u32 am33xx_cm_read_reg(s16 inst, u16 idx)
+static inline u32 am33xx_cm_read_reg(u16 inst, u16 idx)
{
return __raw_readl(cm_base + inst + idx);
}
/* Write into a register in a CM */
-static inline void am33xx_cm_write_reg(u32 val, s16 inst, u16 idx)
+static inline void am33xx_cm_write_reg(u32 val, u16 inst, u16 idx)
{
__raw_writel(val, cm_base + inst + idx);
}
@@ -138,7 +138,7 @@ static bool _is_module_ready(u16 inst, s16 cdoffs, u16 clkctrl_offs)
* @c must be the unshifted value for CLKTRCTRL - i.e., this function
* will handle the shift itself.
*/
-static void _clktrctrl_write(u8 c, s16 inst, u16 cdoffs)
+static void _clktrctrl_write(u8 c, u16 inst, u16 cdoffs)
{
u32 v;
@@ -158,7 +158,7 @@ static void _clktrctrl_write(u8 c, s16 inst, u16 cdoffs)
* Returns true if the clockdomain referred to by (@inst, @cdoffs)
* is in hardware-supervised idle mode, or 0 otherwise.
*/
-bool am33xx_cm_is_clkdm_in_hwsup(s16 inst, u16 cdoffs)
+bool am33xx_cm_is_clkdm_in_hwsup(u16 inst, u16 cdoffs)
{
u32 v;
@@ -177,7 +177,7 @@ bool am33xx_cm_is_clkdm_in_hwsup(s16 inst, u16 cdoffs)
* Put a clockdomain referred to by (@inst, @cdoffs) into
* hardware-supervised idle mode. No return value.
*/
-void am33xx_cm_clkdm_enable_hwsup(s16 inst, u16 cdoffs)
+void am33xx_cm_clkdm_enable_hwsup(u16 inst, u16 cdoffs)
{
_clktrctrl_write(OMAP34XX_CLKSTCTRL_ENABLE_AUTO, inst, cdoffs);
}
@@ -191,7 +191,7 @@ void am33xx_cm_clkdm_enable_hwsup(s16 inst, u16 cdoffs)
* software-supervised idle mode, i.e., controlled manually by the
* Linux OMAP clockdomain code. No return value.
*/
-void am33xx_cm_clkdm_disable_hwsup(s16 inst, u16 cdoffs)
+void am33xx_cm_clkdm_disable_hwsup(u16 inst, u16 cdoffs)
{
_clktrctrl_write(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, inst, cdoffs);
}
@@ -204,7 +204,7 @@ void am33xx_cm_clkdm_disable_hwsup(s16 inst, u16 cdoffs)
* Put a clockdomain referred to by (@inst, @cdoffs) into idle
* No return value.
*/
-void am33xx_cm_clkdm_force_sleep(s16 inst, u16 cdoffs)
+void am33xx_cm_clkdm_force_sleep(u16 inst, u16 cdoffs)
{
_clktrctrl_write(OMAP34XX_CLKSTCTRL_FORCE_SLEEP, inst, cdoffs);
}
@@ -217,7 +217,7 @@ void am33xx_cm_clkdm_force_sleep(s16 inst, u16 cdoffs)
* Take a clockdomain referred to by (@inst, @cdoffs) out of idle,
* waking it up. No return value.
*/
-void am33xx_cm_clkdm_force_wakeup(s16 inst, u16 cdoffs)
+void am33xx_cm_clkdm_force_wakeup(u16 inst, u16 cdoffs)
{
_clktrctrl_write(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP, inst, cdoffs);
}
diff --git a/arch/arm/mach-omap2/cm33xx.h b/arch/arm/mach-omap2/cm33xx.h
index 9d1f4fcdebbb..cfb8891b0c0e 100644
--- a/arch/arm/mach-omap2/cm33xx.h
+++ b/arch/arm/mach-omap2/cm33xx.h
@@ -377,13 +377,13 @@
#ifndef __ASSEMBLER__
-extern bool am33xx_cm_is_clkdm_in_hwsup(s16 inst, u16 cdoffs);
-extern void am33xx_cm_clkdm_enable_hwsup(s16 inst, u16 cdoffs);
-extern void am33xx_cm_clkdm_disable_hwsup(s16 inst, u16 cdoffs);
-extern void am33xx_cm_clkdm_force_sleep(s16 inst, u16 cdoffs);
-extern void am33xx_cm_clkdm_force_wakeup(s16 inst, u16 cdoffs);
+bool am33xx_cm_is_clkdm_in_hwsup(u16 inst, u16 cdoffs);
+void am33xx_cm_clkdm_enable_hwsup(u16 inst, u16 cdoffs);
+void am33xx_cm_clkdm_disable_hwsup(u16 inst, u16 cdoffs);
+void am33xx_cm_clkdm_force_sleep(u16 inst, u16 cdoffs);
+void am33xx_cm_clkdm_force_wakeup(u16 inst, u16 cdoffs);
-#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
+#ifdef CONFIG_SOC_AM33XX
extern int am33xx_cm_wait_module_idle(u16 inst, s16 cdoffs,
u16 clkctrl_offs);
extern void am33xx_cm_module_enable(u8 mode, u16 inst, s16 cdoffs,
diff --git a/arch/arm/mach-omap2/cm3xxx.c b/arch/arm/mach-omap2/cm3xxx.c
index 9061c307d915..f6f028867bfe 100644
--- a/arch/arm/mach-omap2/cm3xxx.c
+++ b/arch/arm/mach-omap2/cm3xxx.c
@@ -636,6 +636,28 @@ void omap3_cm_restore_context(void)
OMAP3_CM_CLKOUT_CTRL_OFFSET);
}
+void omap3_cm_save_scratchpad_contents(u32 *ptr)
+{
+ *ptr++ = omap2_cm_read_mod_reg(CORE_MOD, CM_CLKSEL);
+ *ptr++ = omap2_cm_read_mod_reg(WKUP_MOD, CM_CLKSEL);
+ *ptr++ = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKEN);
+
+ /*
+ * As per erratum i671, ROM code does not respect the PER DPLL
+ * programming scheme if CM_AUTOIDLE_PLL..AUTO_PERIPH_DPLL == 1.
+ * Then, in anycase, clear these bits to avoid extra latencies.
+ */
+ *ptr++ = omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE) &
+ ~OMAP3430_AUTO_PERIPH_DPLL_MASK;
+ *ptr++ = omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL1_PLL);
+ *ptr++ = omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL2_PLL);
+ *ptr++ = omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL3);
+ *ptr++ = omap2_cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKEN_PLL);
+ *ptr++ = omap2_cm_read_mod_reg(MPU_MOD, OMAP3430_CM_AUTOIDLE_PLL);
+ *ptr++ = omap2_cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL);
+ *ptr++ = omap2_cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKSEL2_PLL);
+}
+
/*
*
*/
diff --git a/arch/arm/mach-omap2/cm3xxx.h b/arch/arm/mach-omap2/cm3xxx.h
index e8e146f4a43f..8224c91b4d7a 100644
--- a/arch/arm/mach-omap2/cm3xxx.h
+++ b/arch/arm/mach-omap2/cm3xxx.h
@@ -83,6 +83,7 @@ extern int omap3xxx_cm_split_idlest_reg(void __iomem *idlest_reg,
extern void omap3_cm_save_context(void);
extern void omap3_cm_restore_context(void);
+extern void omap3_cm_save_scratchpad_contents(u32 *ptr);
extern int __init omap3xxx_cm_init(void);
diff --git a/arch/arm/mach-omap2/cminst44xx.c b/arch/arm/mach-omap2/cminst44xx.c
index f0290f5566fe..731ca134348c 100644
--- a/arch/arm/mach-omap2/cminst44xx.c
+++ b/arch/arm/mach-omap2/cminst44xx.c
@@ -111,7 +111,7 @@ static bool _is_module_ready(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_offs)
/* Public functions */
/* Read a register in a CM instance */
-u32 omap4_cminst_read_inst_reg(u8 part, s16 inst, u16 idx)
+u32 omap4_cminst_read_inst_reg(u8 part, u16 inst, u16 idx)
{
BUG_ON(part >= OMAP4_MAX_PRCM_PARTITIONS ||
part == OMAP4430_INVALID_PRCM_PARTITION ||
@@ -120,7 +120,7 @@ u32 omap4_cminst_read_inst_reg(u8 part, s16 inst, u16 idx)
}
/* Write into a register in a CM instance */
-void omap4_cminst_write_inst_reg(u32 val, u8 part, s16 inst, u16 idx)
+void omap4_cminst_write_inst_reg(u32 val, u8 part, u16 inst, u16 idx)
{
BUG_ON(part >= OMAP4_MAX_PRCM_PARTITIONS ||
part == OMAP4430_INVALID_PRCM_PARTITION ||
@@ -129,7 +129,7 @@ void omap4_cminst_write_inst_reg(u32 val, u8 part, s16 inst, u16 idx)
}
/* Read-modify-write a register in CM1. Caller must lock */
-u32 omap4_cminst_rmw_inst_reg_bits(u32 mask, u32 bits, u8 part, s16 inst,
+u32 omap4_cminst_rmw_inst_reg_bits(u32 mask, u32 bits, u8 part, u16 inst,
s16 idx)
{
u32 v;
@@ -142,12 +142,12 @@ u32 omap4_cminst_rmw_inst_reg_bits(u32 mask, u32 bits, u8 part, s16 inst,
return v;
}
-u32 omap4_cminst_set_inst_reg_bits(u32 bits, u8 part, s16 inst, s16 idx)
+u32 omap4_cminst_set_inst_reg_bits(u32 bits, u8 part, u16 inst, s16 idx)
{
return omap4_cminst_rmw_inst_reg_bits(bits, bits, part, inst, idx);
}
-u32 omap4_cminst_clear_inst_reg_bits(u32 bits, u8 part, s16 inst, s16 idx)
+u32 omap4_cminst_clear_inst_reg_bits(u32 bits, u8 part, u16 inst, s16 idx)
{
return omap4_cminst_rmw_inst_reg_bits(bits, 0x0, part, inst, idx);
}
@@ -177,7 +177,7 @@ u32 omap4_cminst_read_inst_reg_bits(u8 part, u16 inst, s16 idx, u32 mask)
* @c must be the unshifted value for CLKTRCTRL - i.e., this function
* will handle the shift itself.
*/
-static void _clktrctrl_write(u8 c, u8 part, s16 inst, u16 cdoffs)
+static void _clktrctrl_write(u8 c, u8 part, u16 inst, u16 cdoffs)
{
u32 v;
@@ -196,7 +196,7 @@ static void _clktrctrl_write(u8 c, u8 part, s16 inst, u16 cdoffs)
* Returns true if the clockdomain referred to by (@part, @inst, @cdoffs)
* is in hardware-supervised idle mode, or 0 otherwise.
*/
-bool omap4_cminst_is_clkdm_in_hwsup(u8 part, s16 inst, u16 cdoffs)
+bool omap4_cminst_is_clkdm_in_hwsup(u8 part, u16 inst, u16 cdoffs)
{
u32 v;
@@ -216,7 +216,7 @@ bool omap4_cminst_is_clkdm_in_hwsup(u8 part, s16 inst, u16 cdoffs)
* Put a clockdomain referred to by (@part, @inst, @cdoffs) into
* hardware-supervised idle mode. No return value.
*/
-void omap4_cminst_clkdm_enable_hwsup(u8 part, s16 inst, u16 cdoffs)
+void omap4_cminst_clkdm_enable_hwsup(u8 part, u16 inst, u16 cdoffs)
{
_clktrctrl_write(OMAP34XX_CLKSTCTRL_ENABLE_AUTO, part, inst, cdoffs);
}
@@ -231,7 +231,7 @@ void omap4_cminst_clkdm_enable_hwsup(u8 part, s16 inst, u16 cdoffs)
* software-supervised idle mode, i.e., controlled manually by the
* Linux OMAP clockdomain code. No return value.
*/
-void omap4_cminst_clkdm_disable_hwsup(u8 part, s16 inst, u16 cdoffs)
+void omap4_cminst_clkdm_disable_hwsup(u8 part, u16 inst, u16 cdoffs)
{
_clktrctrl_write(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, part, inst, cdoffs);
}
@@ -245,7 +245,7 @@ void omap4_cminst_clkdm_disable_hwsup(u8 part, s16 inst, u16 cdoffs)
* Take a clockdomain referred to by (@part, @inst, @cdoffs) out of idle,
* waking it up. No return value.
*/
-void omap4_cminst_clkdm_force_wakeup(u8 part, s16 inst, u16 cdoffs)
+void omap4_cminst_clkdm_force_wakeup(u8 part, u16 inst, u16 cdoffs)
{
_clktrctrl_write(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP, part, inst, cdoffs);
}
@@ -483,3 +483,12 @@ struct clkdm_ops omap4_clkdm_operations = {
.clkdm_clk_enable = omap4_clkdm_clk_enable,
.clkdm_clk_disable = omap4_clkdm_clk_disable,
};
+
+struct clkdm_ops am43xx_clkdm_operations = {
+ .clkdm_sleep = omap4_clkdm_sleep,
+ .clkdm_wakeup = omap4_clkdm_wakeup,
+ .clkdm_allow_idle = omap4_clkdm_allow_idle,
+ .clkdm_deny_idle = omap4_clkdm_deny_idle,
+ .clkdm_clk_enable = omap4_clkdm_clk_enable,
+ .clkdm_clk_disable = omap4_clkdm_clk_disable,
+};
diff --git a/arch/arm/mach-omap2/cminst44xx.h b/arch/arm/mach-omap2/cminst44xx.h
index bd7bab889745..7f56ea444bc4 100644
--- a/arch/arm/mach-omap2/cminst44xx.h
+++ b/arch/arm/mach-omap2/cminst44xx.h
@@ -11,11 +11,11 @@
#ifndef __ARCH_ASM_MACH_OMAP2_CMINST44XX_H
#define __ARCH_ASM_MACH_OMAP2_CMINST44XX_H
-extern bool omap4_cminst_is_clkdm_in_hwsup(u8 part, s16 inst, u16 cdoffs);
-extern void omap4_cminst_clkdm_enable_hwsup(u8 part, s16 inst, u16 cdoffs);
-extern void omap4_cminst_clkdm_disable_hwsup(u8 part, s16 inst, u16 cdoffs);
-extern void omap4_cminst_clkdm_force_sleep(u8 part, s16 inst, u16 cdoffs);
-extern void omap4_cminst_clkdm_force_wakeup(u8 part, s16 inst, u16 cdoffs);
+bool omap4_cminst_is_clkdm_in_hwsup(u8 part, u16 inst, u16 cdoffs);
+void omap4_cminst_clkdm_enable_hwsup(u8 part, u16 inst, u16 cdoffs);
+void omap4_cminst_clkdm_disable_hwsup(u8 part, u16 inst, u16 cdoffs);
+void omap4_cminst_clkdm_force_sleep(u8 part, u16 inst, u16 cdoffs);
+void omap4_cminst_clkdm_force_wakeup(u8 part, u16 inst, u16 cdoffs);
extern int omap4_cminst_wait_module_ready(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_offs);
extern int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs,
u16 clkctrl_offs);
@@ -27,14 +27,14 @@ extern void omap4_cminst_module_disable(u8 part, u16 inst, s16 cdoffs,
* In an ideal world, we would not export these low-level functions,
* but this will probably take some time to fix properly
*/
-extern u32 omap4_cminst_read_inst_reg(u8 part, s16 inst, u16 idx);
-extern void omap4_cminst_write_inst_reg(u32 val, u8 part, s16 inst, u16 idx);
-extern u32 omap4_cminst_rmw_inst_reg_bits(u32 mask, u32 bits, u8 part,
- s16 inst, s16 idx);
-extern u32 omap4_cminst_set_inst_reg_bits(u32 bits, u8 part, s16 inst,
- s16 idx);
-extern u32 omap4_cminst_clear_inst_reg_bits(u32 bits, u8 part, s16 inst,
- s16 idx);
+u32 omap4_cminst_read_inst_reg(u8 part, u16 inst, u16 idx);
+void omap4_cminst_write_inst_reg(u32 val, u8 part, u16 inst, u16 idx);
+u32 omap4_cminst_rmw_inst_reg_bits(u32 mask, u32 bits, u8 part,
+ u16 inst, s16 idx);
+u32 omap4_cminst_set_inst_reg_bits(u32 bits, u8 part, u16 inst,
+ s16 idx);
+u32 omap4_cminst_clear_inst_reg_bits(u32 bits, u8 part, u16 inst,
+ s16 idx);
extern u32 omap4_cminst_read_inst_reg_bits(u8 part, u16 inst, s16 idx,
u32 mask);
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 4a5684b96492..f7644febee81 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -98,6 +98,7 @@ void am35xx_init_early(void);
void ti81xx_init_early(void);
void am33xx_init_early(void);
void am43xx_init_early(void);
+void am43xx_init_late(void);
void omap4430_init_early(void);
void omap5_init_early(void);
void omap3_init_late(void); /* Do not use this one */
@@ -109,8 +110,11 @@ void omap35xx_init_late(void);
void omap3630_init_late(void);
void am35xx_init_late(void);
void ti81xx_init_late(void);
+void am33xx_init_late(void);
+void omap5_init_late(void);
int omap2_common_pm_late_init(void);
void dra7xx_init_early(void);
+void dra7xx_init_late(void);
#ifdef CONFIG_SOC_BUS
void omap_soc_device_init(void);
@@ -288,6 +292,9 @@ static inline void omap4_cpu_resume(void)
#endif
+void pdata_quirks_init(struct of_device_id *);
+void omap_pcs_legacy_init(int irq, void (*rearm)(void));
+
struct omap_sdrc_params;
extern void omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
struct omap_sdrc_params *sdrc_cs1);
diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
index 31e0dfe4a4ea..44bb4d544dcf 100644
--- a/arch/arm/mach-omap2/control.c
+++ b/arch/arm/mach-omap2/control.c
@@ -46,17 +46,7 @@ struct omap3_scratchpad {
struct omap3_scratchpad_prcm_block {
u32 prm_clksrc_ctrl;
u32 prm_clksel;
- u32 cm_clksel_core;
- u32 cm_clksel_wkup;
- u32 cm_clken_pll;
- u32 cm_autoidle_pll;
- u32 cm_clksel1_pll;
- u32 cm_clksel2_pll;
- u32 cm_clksel3_pll;
- u32 cm_clken_pll_mpu;
- u32 cm_autoidle_pll_mpu;
- u32 cm_clksel1_pll_mpu;
- u32 cm_clksel2_pll_mpu;
+ u32 cm_contents[11];
u32 prcm_block_size;
};
@@ -347,34 +337,9 @@ void omap3_save_scratchpad_contents(void)
prcm_block_contents.prm_clksel =
omap2_prm_read_mod_reg(OMAP3430_CCR_MOD,
OMAP3_PRM_CLKSEL_OFFSET);
- prcm_block_contents.cm_clksel_core =
- omap2_cm_read_mod_reg(CORE_MOD, CM_CLKSEL);
- prcm_block_contents.cm_clksel_wkup =
- omap2_cm_read_mod_reg(WKUP_MOD, CM_CLKSEL);
- prcm_block_contents.cm_clken_pll =
- omap2_cm_read_mod_reg(PLL_MOD, CM_CLKEN);
- /*
- * As per erratum i671, ROM code does not respect the PER DPLL
- * programming scheme if CM_AUTOIDLE_PLL..AUTO_PERIPH_DPLL == 1.
- * Then, in anycase, clear these bits to avoid extra latencies.
- */
- prcm_block_contents.cm_autoidle_pll =
- omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE) &
- ~OMAP3430_AUTO_PERIPH_DPLL_MASK;
- prcm_block_contents.cm_clksel1_pll =
- omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL1_PLL);
- prcm_block_contents.cm_clksel2_pll =
- omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL2_PLL);
- prcm_block_contents.cm_clksel3_pll =
- omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL3);
- prcm_block_contents.cm_clken_pll_mpu =
- omap2_cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKEN_PLL);
- prcm_block_contents.cm_autoidle_pll_mpu =
- omap2_cm_read_mod_reg(MPU_MOD, OMAP3430_CM_AUTOIDLE_PLL);
- prcm_block_contents.cm_clksel1_pll_mpu =
- omap2_cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL);
- prcm_block_contents.cm_clksel2_pll_mpu =
- omap2_cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKSEL2_PLL);
+
+ omap3_cm_save_scratchpad_contents(prcm_block_contents.cm_contents);
+
prcm_block_contents.prcm_block_size = 0x0;
/* Populate the SDRC block contents */
@@ -604,4 +569,15 @@ int omap3_ctrl_save_padconf(void)
return 0;
}
+/**
+ * omap3_ctrl_set_iva_bootmode_idle - sets the IVA2 bootmode to idle
+ *
+ * Sets the bootmode for IVA2 to idle. This is needed by the PM code to
+ * force disable IVA2 so that it does not prevent any low-power states.
+ */
+void omap3_ctrl_set_iva_bootmode_idle(void)
+{
+ omap_ctrl_writel(OMAP3_IVA2_BOOTMOD_IDLE,
+ OMAP343X_CONTROL_IVA2_BOOTMOD);
+}
#endif /* CONFIG_ARCH_OMAP3 && CONFIG_PM */
diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h
index f7d7c2ef1b40..da054801b114 100644
--- a/arch/arm/mach-omap2/control.h
+++ b/arch/arm/mach-omap2/control.h
@@ -427,6 +427,7 @@ extern void omap_ctrl_write_dsp_boot_addr(u32 bootaddr);
extern void omap_ctrl_write_dsp_boot_mode(u8 bootmode);
extern void omap3630_ctrl_disable_rta(void);
extern int omap3_ctrl_save_padconf(void);
+extern void omap3_ctrl_set_iva_bootmode_idle(void);
extern void omap2_set_globals_control(void __iomem *ctrl,
void __iomem *ctrl_pad);
#else
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 5c5315ba129b..0dd6398bade4 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -19,7 +19,6 @@
#include <linux/of.h>
#include <linux/pinctrl/machine.h>
#include <linux/platform_data/omap4-keypad.h>
-#include <linux/wl12xx.h>
#include <linux/platform_data/mailbox-omap.h>
#include <asm/mach-types.h>
@@ -37,6 +36,7 @@
#include "mux.h"
#include "control.h"
#include "devices.h"
+#include "display.h"
#define L3_MODULES_MAX_LEN 12
#define L3_MODULES 3
@@ -466,47 +466,13 @@ static struct platform_device omap_vout_device = {
.resource = &omap_vout_resource[0],
.id = -1,
};
-static void omap_init_vout(void)
-{
- if (platform_device_register(&omap_vout_device) < 0)
- printk(KERN_ERR "Unable to register OMAP-VOUT device\n");
-}
-#else
-static inline void omap_init_vout(void) {}
-#endif
-
-#if IS_ENABLED(CONFIG_WL12XX)
-static struct wl12xx_platform_data wl12xx __initdata;
-
-void __init omap_init_wl12xx_of(void)
+int __init omap_init_vout(void)
{
- int ret;
-
- if (!of_have_populated_dt())
- return;
-
- if (of_machine_is_compatible("ti,omap4-sdp")) {
- wl12xx.board_ref_clock = WL12XX_REFCLOCK_26;
- wl12xx.board_tcxo_clock = WL12XX_TCXOCLOCK_26;
- wl12xx.irq = gpio_to_irq(53);
- } else if (of_machine_is_compatible("ti,omap4-panda")) {
- wl12xx.board_ref_clock = WL12XX_REFCLOCK_38;
- wl12xx.irq = gpio_to_irq(53);
- } else {
- return;
- }
-
- ret = wl12xx_set_platform_data(&wl12xx);
- if (ret) {
- pr_err("error setting wl12xx data: %d\n", ret);
- return;
- }
+ return platform_device_register(&omap_vout_device);
}
#else
-static inline void omap_init_wl12xx_of(void)
-{
-}
+int __init omap_init_vout(void) { return 0; }
#endif
/*-------------------------------------------------------------------------*/
@@ -531,12 +497,8 @@ static int __init omap2_init_devices(void)
omap_init_sham();
omap_init_aes();
omap_init_rng();
- } else {
- /* These can be removed when bindings are done */
- omap_init_wl12xx_of();
}
omap_init_sti();
- omap_init_vout();
return 0;
}
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 03a0516c7f67..a4e536b11ec9 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -416,6 +416,34 @@ int __init omap_display_init(struct omap_dss_board_info *board_data)
}
}
+ /* create DRM device */
+ r = omap_init_drm();
+ if (r < 0) {
+ pr_err("Unable to register omapdrm device\n");
+ return r;
+ }
+
+ /* create vrfb device */
+ r = omap_init_vrfb();
+ if (r < 0) {
+ pr_err("Unable to register omapvrfb device\n");
+ return r;
+ }
+
+ /* create FB device */
+ r = omap_init_fb();
+ if (r < 0) {
+ pr_err("Unable to register omapfb device\n");
+ return r;
+ }
+
+ /* create V4L2 display device */
+ r = omap_init_vout();
+ if (r < 0) {
+ pr_err("Unable to register omap_vout device\n");
+ return r;
+ }
+
return 0;
}
diff --git a/arch/arm/mach-omap2/display.h b/arch/arm/mach-omap2/display.h
index b871b017b352..f3d2ce4bc262 100644
--- a/arch/arm/mach-omap2/display.h
+++ b/arch/arm/mach-omap2/display.h
@@ -26,4 +26,8 @@ struct omap_dss_dispc_dev_attr {
bool has_framedonetv_irq;
};
+int omap_init_drm(void);
+int omap_init_vrfb(void);
+int omap_init_fb(void);
+int omap_init_vout(void);
#endif
diff --git a/arch/arm/mach-omap2/drm.c b/arch/arm/mach-omap2/drm.c
index 59a4af779f42..facd7406a03d 100644
--- a/arch/arm/mach-omap2/drm.c
+++ b/arch/arm/mach-omap2/drm.c
@@ -26,10 +26,9 @@
#include <linux/platform_data/omap_drm.h>
#include "soc.h"
-#include "omap_device.h"
-#include "omap_hwmod.h"
+#include "display.h"
-#if defined(CONFIG_DRM_OMAP) || (CONFIG_DRM_OMAP_MODULE)
+#if defined(CONFIG_DRM_OMAP) || defined(CONFIG_DRM_OMAP_MODULE)
static struct omap_drm_platform_data platform_data;
@@ -42,26 +41,13 @@ static struct platform_device omap_drm_device = {
.id = 0,
};
-static int __init omap_init_drm(void)
+int __init omap_init_drm(void)
{
- struct omap_hwmod *oh = NULL;
- struct platform_device *pdev;
-
- /* lookup and populate the DMM information, if present - OMAP4+ */
- oh = omap_hwmod_lookup("dmm");
-
- if (oh) {
- pdev = omap_device_build(oh->name, -1, oh, NULL, 0);
- WARN(IS_ERR(pdev), "Could not build omap_device for %s\n",
- oh->name);
- }
-
platform_data.omaprev = GET_OMAP_TYPE;
return platform_device_register(&omap_drm_device);
}
-
-omap_arch_initcall(omap_init_drm);
-
+#else
+int __init omap_init_drm(void) { return 0; }
#endif
diff --git a/arch/arm/mach-omap2/dss-common.c b/arch/arm/mach-omap2/dss-common.c
index bf89effa4c99..365bfd3d9c68 100644
--- a/arch/arm/mach-omap2/dss-common.c
+++ b/arch/arm/mach-omap2/dss-common.c
@@ -213,3 +213,47 @@ void __init omap_4430sdp_display_init_of(void)
platform_device_register(&sdp4430_tpd_device);
platform_device_register(&sdp4430_hdmi_connector_device);
}
+
+
+/* OMAP3 IGEPv2 data */
+
+#define IGEP2_DVI_TFP410_POWER_DOWN_GPIO 170
+
+/* DVI Connector */
+static struct connector_dvi_platform_data omap3_igep2_dvi_connector_pdata = {
+ .name = "dvi",
+ .source = "tfp410.0",
+ .i2c_bus_num = 3,
+};
+
+static struct platform_device omap3_igep2_dvi_connector_device = {
+ .name = "connector-dvi",
+ .id = 0,
+ .dev.platform_data = &omap3_igep2_dvi_connector_pdata,
+};
+
+/* TFP410 DPI-to-DVI chip */
+static struct encoder_tfp410_platform_data omap3_igep2_tfp410_pdata = {
+ .name = "tfp410.0",
+ .source = "dpi.0",
+ .data_lines = 24,
+ .power_down_gpio = IGEP2_DVI_TFP410_POWER_DOWN_GPIO,
+};
+
+static struct platform_device omap3_igep2_tfp410_device = {
+ .name = "tfp410",
+ .id = 0,
+ .dev.platform_data = &omap3_igep2_tfp410_pdata,
+};
+
+static struct omap_dss_board_info igep2_dss_data = {
+ .default_display_name = "dvi",
+};
+
+void __init omap3_igep2_display_init_of(void)
+{
+ omap_display_init(&igep2_dss_data);
+
+ platform_device_register(&omap3_igep2_tfp410_device);
+ platform_device_register(&omap3_igep2_dvi_connector_device);
+}
diff --git a/arch/arm/mach-omap2/dss-common.h b/arch/arm/mach-omap2/dss-common.h
index c28fe3c03588..a9becf0d5be8 100644
--- a/arch/arm/mach-omap2/dss-common.h
+++ b/arch/arm/mach-omap2/dss-common.h
@@ -8,5 +8,6 @@
void __init omap4_panda_display_init_of(void);
void __init omap_4430sdp_display_init_of(void);
+void __init omap3_igep2_display_init_of(void);
#endif
diff --git a/arch/arm/mach-omap2/fb.c b/arch/arm/mach-omap2/fb.c
index 2ca33cc0c484..26e28e94f625 100644
--- a/arch/arm/mach-omap2/fb.c
+++ b/arch/arm/mach-omap2/fb.c
@@ -32,6 +32,7 @@
#include <asm/mach/map.h>
#include "soc.h"
+#include "display.h"
#ifdef CONFIG_OMAP2_VRFB
@@ -64,7 +65,7 @@ static const struct resource omap3_vrfb_resources[] = {
DEFINE_RES_MEM_NAMED(0xfc000000u, 0x4000000, "vrfb-area-11"),
};
-static int __init omap_init_vrfb(void)
+int __init omap_init_vrfb(void)
{
struct platform_device *pdev;
const struct resource *res;
@@ -85,8 +86,8 @@ static int __init omap_init_vrfb(void)
return PTR_RET(pdev);
}
-
-omap_arch_initcall(omap_init_vrfb);
+#else
+int __init omap_init_vrfb(void) { return 0; }
#endif
#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
@@ -105,11 +106,10 @@ static struct platform_device omap_fb_device = {
.num_resources = 0,
};
-static int __init omap_init_fb(void)
+int __init omap_init_fb(void)
{
return platform_device_register(&omap_fb_device);
}
-
-omap_arch_initcall(omap_init_fb);
-
+#else
+int __init omap_init_fb(void) { return 0; }
#endif
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 579697adaae7..81de56251955 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1341,14 +1341,6 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
#ifdef CONFIG_MTD_NAND
-static const char * const nand_ecc_opts[] = {
- [OMAP_ECC_HAMMING_CODE_DEFAULT] = "sw",
- [OMAP_ECC_HAMMING_CODE_HW] = "hw",
- [OMAP_ECC_HAMMING_CODE_HW_ROMCODE] = "hw-romcode",
- [OMAP_ECC_BCH4_CODE_HW] = "bch4",
- [OMAP_ECC_BCH8_CODE_HW] = "bch8",
-};
-
static const char * const nand_xfer_types[] = {
[NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
[NAND_OMAP_POLLED] = "polled",
@@ -1378,13 +1370,41 @@ static int gpmc_probe_nand_child(struct platform_device *pdev,
gpmc_nand_data->cs = val;
gpmc_nand_data->of_node = child;
- if (!of_property_read_string(child, "ti,nand-ecc-opt", &s))
- for (val = 0; val < ARRAY_SIZE(nand_ecc_opts); val++)
- if (!strcasecmp(s, nand_ecc_opts[val])) {
- gpmc_nand_data->ecc_opt = val;
- break;
- }
+ /* Detect availability of ELM module */
+ gpmc_nand_data->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
+ if (gpmc_nand_data->elm_of_node == NULL)
+ gpmc_nand_data->elm_of_node =
+ of_parse_phandle(child, "elm_id", 0);
+ if (gpmc_nand_data->elm_of_node == NULL)
+ pr_warn("%s: ti,elm-id property not found\n", __func__);
+
+ /* select ecc-scheme for NAND */
+ if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
+ pr_err("%s: ti,nand-ecc-opt not found\n", __func__);
+ return -ENODEV;
+ }
+ if (!strcmp(s, "ham1") || !strcmp(s, "sw") ||
+ !strcmp(s, "hw") || !strcmp(s, "hw-romcode"))
+ gpmc_nand_data->ecc_opt =
+ OMAP_ECC_HAM1_CODE_HW;
+ else if (!strcmp(s, "bch4"))
+ if (gpmc_nand_data->elm_of_node)
+ gpmc_nand_data->ecc_opt =
+ OMAP_ECC_BCH4_CODE_HW;
+ else
+ gpmc_nand_data->ecc_opt =
+ OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
+ else if (!strcmp(s, "bch8"))
+ if (gpmc_nand_data->elm_of_node)
+ gpmc_nand_data->ecc_opt =
+ OMAP_ECC_BCH8_CODE_HW;
+ else
+ gpmc_nand_data->ecc_opt =
+ OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
+ else
+ pr_err("%s: ti,nand-ecc-opt invalid value\n", __func__);
+ /* select data transfer mode for NAND controller */
if (!of_property_read_string(child, "ti,nand-xfer-type", &s))
for (val = 0; val < ARRAY_SIZE(nand_xfer_types); val++)
if (!strcasecmp(s, nand_xfer_types[val])) {
@@ -1521,6 +1541,42 @@ err:
return ret;
}
+/*
+ * REVISIT: Add timing support from slls644g.pdf
+ */
+static int gpmc_probe_8250(struct platform_device *pdev,
+ struct device_node *child)
+{
+ struct resource res;
+ unsigned long base;
+ int ret, cs;
+
+ if (of_property_read_u32(child, "reg", &cs) < 0) {
+ dev_err(&pdev->dev, "%s has no 'reg' property\n",
+ child->full_name);
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(child, 0, &res) < 0) {
+ dev_err(&pdev->dev, "%s has malformed 'reg' property\n",
+ child->full_name);
+ return -ENODEV;
+ }
+
+ ret = gpmc_cs_request(cs, resource_size(&res), &base);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot request GPMC CS %d\n", cs);
+ return ret;
+ }
+
+ if (of_platform_device_create(child, NULL, &pdev->dev))
+ return 0;
+
+ dev_err(&pdev->dev, "failed to create gpmc child %s\n", child->name);
+
+ return -ENODEV;
+}
+
static int gpmc_probe_dt(struct platform_device *pdev)
{
int ret;
@@ -1564,6 +1620,8 @@ static int gpmc_probe_dt(struct platform_device *pdev)
else if (of_node_cmp(child->name, "ethernet") == 0 ||
of_node_cmp(child->name, "nor") == 0)
ret = gpmc_probe_generic_child(pdev, child);
+ else if (of_node_cmp(child->name, "8250") == 0)
+ ret = gpmc_probe_8250(pdev, child);
if (WARN(ret < 0, "%s: probing gpmc child %s failed\n",
__func__, child->full_name))
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 0289adcb6efb..9428c5f9d4f2 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/random.h>
#include <linux/slab.h>
#ifdef CONFIG_SOC_BUS
@@ -130,6 +131,17 @@ void omap_get_die_id(struct omap_die_id *odi)
odi->id_3 = read_tap_reg(OMAP_TAP_DIE_ID_3);
}
+static int __init omap_feed_randpool(void)
+{
+ struct omap_die_id odi;
+
+ /* Throw the die ID into the entropy pool at boot */
+ omap_get_die_id(&odi);
+ add_device_randomness(&odi, sizeof(odi));
+ return 0;
+}
+omap_device_initcall(omap_feed_randpool);
+
void __init omap2xxx_check_revision(void)
{
int i, j;
@@ -576,8 +588,8 @@ void __init omap5xxx_check_revision(void)
case 0xb942:
switch (rev) {
case 0:
- omap_revision = OMAP5430_REV_ES1_0;
- break;
+ /* No support for ES1.0 Test chip */
+ BUG();
case 1:
default:
omap_revision = OMAP5430_REV_ES2_0;
@@ -587,8 +599,8 @@ void __init omap5xxx_check_revision(void)
case 0xb998:
switch (rev) {
case 0:
- omap_revision = OMAP5432_REV_ES1_0;
- break;
+ /* No support for ES1.0 Test chip */
+ BUG();
case 1:
default:
omap_revision = OMAP5432_REV_ES2_0;
diff --git a/arch/arm/mach-omap2/include/mach/timex.h b/arch/arm/mach-omap2/include/mach/timex.h
deleted file mode 100644
index de9f8fc40e7c..000000000000
--- a/arch/arm/mach-omap2/include/mach/timex.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-omap2/include/mach/timex.h
- */
-
-#include <plat/timex.h>
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index ff2113ce4014..cd22262a2cc0 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -583,6 +583,11 @@ void __init am33xx_init_early(void)
omap_hwmod_init_postsetup();
omap_clk_init = am33xx_clk_init;
}
+
+void __init am33xx_init_late(void)
+{
+ omap_common_late_init();
+}
#endif
#ifdef CONFIG_SOC_AM43XX
@@ -594,7 +599,18 @@ void __init am43xx_init_early(void)
NULL);
omap2_set_globals_prm(AM33XX_L4_WK_IO_ADDRESS(AM43XX_PRCM_BASE));
omap2_set_globals_cm(AM33XX_L4_WK_IO_ADDRESS(AM43XX_PRCM_BASE), NULL);
+ omap_prm_base_init();
+ omap_cm_base_init();
omap3xxx_check_revision();
+ am43xx_powerdomains_init();
+ am43xx_clockdomains_init();
+ am43xx_hwmod_init();
+ omap_hwmod_init_postsetup();
+}
+
+void __init am43xx_init_late(void)
+{
+ omap_common_late_init();
}
#endif
@@ -651,6 +667,11 @@ void __init omap5_init_early(void)
omap54xx_hwmod_init();
omap_hwmod_init_postsetup();
}
+
+void __init omap5_init_late(void)
+{
+ omap_common_late_init();
+}
#endif
#ifdef CONFIG_SOC_DRA7XX
@@ -671,6 +692,11 @@ void __init dra7xx_init_early(void)
dra7xx_hwmod_init();
omap_hwmod_init_postsetup();
}
+
+void __init dra7xx_init_late(void)
+{
+ omap_common_late_init();
+}
#endif
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index 3926f370448f..e022a869bff2 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -233,7 +233,7 @@ static inline void omap_intc_handle_irq(void __iomem *base_addr, struct pt_regs
goto out;
irqnr = readl_relaxed(base_addr + 0xd8);
-#ifdef CONFIG_SOC_TI81XX
+#if IS_ENABLED(CONFIG_SOC_TI81XX) || IS_ENABLED(CONFIG_SOC_AM33XX)
if (irqnr)
goto out;
irqnr = readl_relaxed(base_addr + 0xf8);
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c
index 5d8768075dd9..b4ac3af1160c 100644
--- a/arch/arm/mach-omap2/mcbsp.c
+++ b/arch/arm/mach-omap2/mcbsp.c
@@ -25,6 +25,7 @@
#include "soc.h"
#include "omap_device.h"
+#include "clock.h"
/*
* FIXME: Find a mechanism to enable/disable runtime the McBSP ICLK autoidle.
@@ -33,22 +34,18 @@
#include "cm3xxx.h"
#include "cm-regbits-34xx.h"
+static struct clk *mcbsp_iclks[5];
+
static int omap3_enable_st_clock(unsigned int id, bool enable)
{
- unsigned int w;
-
/*
* Sidetone uses McBSP ICLK - which must not idle when sidetones
* are enabled or sidetones start sounding ugly.
*/
- w = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_AUTOIDLE);
if (enable)
- w &= ~(1 << (id - 2));
+ return omap2_clk_deny_idle(mcbsp_iclks[id]);
else
- w |= 1 << (id - 2);
- omap2_cm_write_mod_reg(w, OMAP3430_PER_MOD, CM_AUTOIDLE);
-
- return 0;
+ return omap2_clk_allow_idle(mcbsp_iclks[id]);
}
static int __init omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
@@ -58,6 +55,7 @@ static int __init omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
struct omap_hwmod *oh_device[2];
struct omap_mcbsp_platform_data *pdata = NULL;
struct platform_device *pdev;
+ char clk_name[11];
sscanf(oh->name, "mcbsp%d", &id);
@@ -99,6 +97,8 @@ static int __init omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
oh_device[1] = omap_hwmod_lookup((
(struct omap_mcbsp_dev_attr *)(oh->dev_attr))->sidetone);
pdata->enable_st_clock = omap3_enable_st_clock;
+ sprintf(clk_name, "mcbsp%d_ick", id);
+ mcbsp_iclks[id] = clk_get(NULL, clk_name);
count++;
}
pdev = omap_device_build_ss(name, id, oh_device, count, pdata,
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index f82cf878d6af..48094b58c88f 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -811,6 +811,12 @@ int __init omap_mux_late_init(void)
}
}
+ omap_mux_dbg_init();
+
+ /* see pinctrl-single-omap for the wake-up interrupt handling */
+ if (of_have_populated_dt())
+ return 0;
+
ret = request_irq(omap_prcm_event_to_irq("io"),
omap_hwmod_mux_handle_irq, IRQF_SHARED | IRQF_NO_SUSPEND,
"hwmod_io", omap_mux_late_init);
@@ -818,8 +824,6 @@ int __init omap_mux_late_init(void)
if (ret)
pr_warning("mux: Failed to setup hwmod io irq %d\n", ret);
- omap_mux_dbg_init();
-
return 0;
}
diff --git a/arch/arm/mach-omap2/omap-pm.h b/arch/arm/mach-omap2/omap-pm.h
index 67faa7b8fe92..1d777e63e05c 100644
--- a/arch/arm/mach-omap2/omap-pm.h
+++ b/arch/arm/mach-omap2/omap-pm.h
@@ -17,7 +17,7 @@
#include <linux/device.h>
#include <linux/cpufreq.h>
#include <linux/clk.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
/*
* agent_id values for use with omap_pm_set_min_bus_tput():
diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c
index b970440cffca..5ac122e88f67 100644
--- a/arch/arm/mach-omap2/omap-secure.c
+++ b/arch/arm/mach-omap2/omap-secure.c
@@ -3,6 +3,8 @@
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Copyright (C) 2012 Ivaylo Dimitrov <freemangordon@abv.bg>
+ * Copyright (C) 2013 Pali Rohár <pali.rohar@gmail.com>
*
*
* This program is free software,you can redistribute it and/or modify
@@ -70,3 +72,77 @@ phys_addr_t omap_secure_ram_mempool_base(void)
{
return omap_secure_memblock_base;
}
+
+/**
+ * rx51_secure_dispatcher: Routine to dispatch secure PPA API calls
+ * @idx: The PPA API index
+ * @process: Process ID
+ * @flag: The flag indicating criticality of operation
+ * @nargs: Number of valid arguments out of four.
+ * @arg1, arg2, arg3 args4: Parameters passed to secure API
+ *
+ * Return the non-zero error value on failure.
+ *
+ * NOTE: rx51_secure_dispatcher differs from omap_secure_dispatcher because
+ * it calling omap_smc3() instead omap_smc2() and param[0] is nargs+1
+ */
+u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+ u32 ret;
+ u32 param[5];
+
+ param[0] = nargs+1; /* RX-51 needs number of arguments + 1 */
+ param[1] = arg1;
+ param[2] = arg2;
+ param[3] = arg3;
+ param[4] = arg4;
+
+ /*
+ * Secure API needs physical address
+ * pointer for the parameters
+ */
+ local_irq_disable();
+ local_fiq_disable();
+ flush_cache_all();
+ outer_clean_range(__pa(param), __pa(param + 5));
+ ret = omap_smc3(idx, process, flag, __pa(param));
+ flush_cache_all();
+ local_fiq_enable();
+ local_irq_enable();
+
+ return ret;
+}
+
+/**
+ * rx51_secure_update_aux_cr: Routine to modify the contents of Auxiliary Control Register
+ * @set_bits: bits to set in ACR
+ * @clr_bits: bits to clear in ACR
+ *
+ * Return the non-zero error value on failure.
+*/
+u32 rx51_secure_update_aux_cr(u32 set_bits, u32 clear_bits)
+{
+ u32 acr;
+
+ /* Read ACR */
+ asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
+ acr &= ~clear_bits;
+ acr |= set_bits;
+
+ return rx51_secure_dispatcher(RX51_PPA_WRITE_ACR,
+ 0,
+ FLAG_START_CRITICAL,
+ 1, acr, 0, 0, 0);
+}
+
+/**
+ * rx51_secure_rng_call: Routine for HW random generator
+ */
+u32 rx51_secure_rng_call(u32 ptr, u32 count, u32 flag)
+{
+ return rx51_secure_dispatcher(RX51_PPA_HWRNG,
+ 0,
+ NO_FLAG,
+ 3, ptr, count, flag, 0);
+}
diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h
index 0e729170c46b..8cc7d331437d 100644
--- a/arch/arm/mach-omap2/omap-secure.h
+++ b/arch/arm/mach-omap2/omap-secure.h
@@ -3,6 +3,8 @@
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Copyright (C) 2012 Ivaylo Dimitrov <freemangordon@abv.bg>
+ * Copyright (C) 2013 Pali Rohár <pali.rohar@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -42,23 +44,38 @@
#define OMAP4_MON_L2X0_AUXCTRL_INDEX 0x109
#define OMAP4_MON_L2X0_PREFETCH_INDEX 0x113
+#define OMAP5_DRA7_MON_SET_CNTFRQ_INDEX 0x109
+
/* Secure PPA(Primary Protected Application) APIs */
#define OMAP4_PPA_L2_POR_INDEX 0x23
#define OMAP4_PPA_CPU_ACTRL_SMP_INDEX 0x25
+/* Secure RX-51 PPA (Primary Protected Application) APIs */
+#define RX51_PPA_HWRNG 29
+#define RX51_PPA_L2_INVAL 40
+#define RX51_PPA_WRITE_ACR 42
+
#ifndef __ASSEMBLER__
extern u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs,
u32 arg1, u32 arg2, u32 arg3, u32 arg4);
extern u32 omap_smc2(u32 id, u32 falg, u32 pargs);
+extern u32 omap_smc3(u32 id, u32 process, u32 flag, u32 pargs);
extern phys_addr_t omap_secure_ram_mempool_base(void);
extern int omap_secure_ram_reserve_memblock(void);
+extern u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+extern u32 rx51_secure_update_aux_cr(u32 set_bits, u32 clear_bits);
+extern u32 rx51_secure_rng_call(u32 ptr, u32 count, u32 flag);
+
#ifdef CONFIG_OMAP4_ERRATA_I688
extern int omap_barrier_reserve_memblock(void);
#else
static inline void omap_barrier_reserve_memblock(void)
{ }
#endif
+
+void set_cntfreq(void);
#endif /* __ASSEMBLER__ */
#endif /* OMAP_ARCH_OMAP_SECURE_H */
diff --git a/arch/arm/mach-omap2/omap-smc.S b/arch/arm/mach-omap2/omap-smc.S
index f6441c13cd8c..fd90125bffc7 100644
--- a/arch/arm/mach-omap2/omap-smc.S
+++ b/arch/arm/mach-omap2/omap-smc.S
@@ -1,9 +1,11 @@
/*
- * OMAP44xx secure APIs file.
+ * OMAP34xx and OMAP44xx secure APIs file.
*
* Copyright (C) 2010 Texas Instruments, Inc.
* Written by Santosh Shilimkar <santosh.shilimkar@ti.com>
*
+ * Copyright (C) 2012 Ivaylo Dimitrov <freemangordon@abv.bg>
+ * Copyright (C) 2013 Pali Rohár <pali.rohar@gmail.com>
*
* This program is free software,you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -54,6 +56,23 @@ ENTRY(omap_smc2)
ldmfd sp!, {r4-r12, pc}
ENDPROC(omap_smc2)
+/**
+ * u32 omap_smc3(u32 service_id, u32 process_id, u32 flag, u32 pargs)
+ * Low level common routine for secure HAL and PPA APIs via smc #1
+ * r0 - @service_id: Secure Service ID
+ * r1 - @process_id: Process ID
+ * r2 - @flag: Flag to indicate the criticality of operation
+ * r3 - @pargs: Physical address of parameter list
+ */
+ENTRY(omap_smc3)
+ stmfd sp!, {r4-r11, lr}
+ mov r12, r0 @ Copy the secure service ID
+ mov r6, #0xff @ Indicate new Task call
+ dsb @ Memory Barrier (not sure if needed, copied from omap_smc2)
+ smc #1 @ Call PPA service
+ ldmfd sp!, {r4-r11, pc}
+ENDPROC(omap_smc3)
+
ENTRY(omap_modify_auxcoreboot0)
stmfd sp!, {r1-r12, lr}
ldr r12, =0x104
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 891211093295..75e95d4fb448 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -66,6 +66,13 @@ static void omap4_secondary_init(unsigned int cpu)
4, 0, 0, 0, 0, 0);
/*
+ * Configure the CNTFRQ register for the secondary cpu's which
+ * indicates the frequency of the cpu local timers.
+ */
+ if (soc_is_omap54xx() || soc_is_dra7xx())
+ set_cntfreq();
+
+ /*
* Synchronise with the boot thread.
*/
spin_lock(&boot_lock);
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index 813c61558a5f..3664562f9148 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -33,8 +33,12 @@
#include "omap4-sar-layout.h"
#include "common.h"
-#define MAX_NR_REG_BANKS 5
-#define MAX_IRQS 160
+#define AM43XX_NR_REG_BANKS 7
+#define AM43XX_IRQS 224
+#define MAX_NR_REG_BANKS AM43XX_NR_REG_BANKS
+#define MAX_IRQS AM43XX_IRQS
+#define DEFAULT_NR_REG_BANKS 5
+#define DEFAULT_IRQS 160
#define WKG_MASK_ALL 0x00000000
#define WKG_UNMASK_ALL 0xffffffff
#define CPU_ENA_OFFSET 0x400
@@ -47,8 +51,8 @@ static void __iomem *wakeupgen_base;
static void __iomem *sar_base;
static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
static unsigned int irq_target_cpu[MAX_IRQS];
-static unsigned int irq_banks = MAX_NR_REG_BANKS;
-static unsigned int max_irqs = MAX_IRQS;
+static unsigned int irq_banks = DEFAULT_NR_REG_BANKS;
+static unsigned int max_irqs = DEFAULT_IRQS;
static unsigned int omap_secure_apis;
/*
@@ -418,12 +422,16 @@ int __init omap_wakeupgen_init(void)
irq_banks = OMAP4_NR_BANKS;
max_irqs = OMAP4_NR_IRQS;
omap_secure_apis = 1;
+ } else if (soc_is_am43xx()) {
+ irq_banks = AM43XX_NR_REG_BANKS;
+ max_irqs = AM43XX_IRQS;
}
/* Clear all IRQ bitmasks at wakeupGen level */
for (i = 0; i < irq_banks; i++) {
wakeupgen_writel(0, i, CPU0_ID);
- wakeupgen_writel(0, i, CPU1_ID);
+ if (!soc_is_am43xx())
+ wakeupgen_writel(0, i, CPU1_ID);
}
/*
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index d9ee0ff094d4..e3f0ecaf87dd 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2357,25 +2357,29 @@ static struct device_node *of_dev_hwmod_lookup(struct device_node *np,
/**
* _init_mpu_rt_base - populate the virtual address for a hwmod
* @oh: struct omap_hwmod * to locate the virtual address
+ * @data: (unused, caller should pass NULL)
+ * @np: struct device_node * of the IP block's device node in the DT data
*
* Cache the virtual address used by the MPU to access this IP block's
* registers. This address is needed early so the OCP registers that
* are part of the device's address space can be ioremapped properly.
- * No return value.
+ *
+ * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
+ * -ENXIO on absent or invalid register target address space.
*/
-static void __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data)
+static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
+ struct device_node *np)
{
struct omap_hwmod_addr_space *mem;
void __iomem *va_start = NULL;
- struct device_node *np;
if (!oh)
- return;
+ return -EINVAL;
_save_mpu_port_index(oh);
if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
- return;
+ return -ENXIO;
mem = _find_mpu_rt_addr_space(oh);
if (!mem) {
@@ -2383,25 +2387,24 @@ static void __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data)
oh->name);
/* Extract the IO space from device tree blob */
- if (!of_have_populated_dt())
- return;
+ if (!np)
+ return -ENXIO;
- np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh);
- if (np)
- va_start = of_iomap(np, oh->mpu_rt_idx);
+ va_start = of_iomap(np, oh->mpu_rt_idx);
} else {
va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start);
}
if (!va_start) {
pr_err("omap_hwmod: %s: Could not ioremap\n", oh->name);
- return;
+ return -ENXIO;
}
pr_debug("omap_hwmod: %s: MPU register target at va %p\n",
oh->name, va_start);
oh->_mpu_rt_va = va_start;
+ return 0;
}
/**
@@ -2414,18 +2417,28 @@ static void __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data)
* registered at this point. This is the first of two phases for
* hwmod initialization. Code called here does not touch any hardware
* registers, it simply prepares internal data structures. Returns 0
- * upon success or if the hwmod isn't registered, or -EINVAL upon
- * failure.
+ * upon success or if the hwmod isn't registered or if the hwmod's
+ * address space is not defined, or -EINVAL upon failure.
*/
static int __init _init(struct omap_hwmod *oh, void *data)
{
int r;
+ struct device_node *np = NULL;
if (oh->_state != _HWMOD_STATE_REGISTERED)
return 0;
- if (oh->class->sysc)
- _init_mpu_rt_base(oh, NULL);
+ if (of_have_populated_dt())
+ np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh);
+
+ if (oh->class->sysc) {
+ r = _init_mpu_rt_base(oh, NULL, np);
+ if (r < 0) {
+ WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
+ oh->name);
+ return 0;
+ }
+ }
r = _init_clocks(oh, NULL);
if (r < 0) {
@@ -2433,6 +2446,12 @@ static int __init _init(struct omap_hwmod *oh, void *data)
return -EINVAL;
}
+ if (np)
+ if (of_find_property(np, "ti,no-reset-on-init", NULL))
+ oh->flags |= HWMOD_INIT_NO_RESET;
+ if (of_find_property(np, "ti,no-idle-on-init", NULL))
+ oh->flags |= HWMOD_INIT_NO_IDLE;
+
oh->_state = _HWMOD_STATE_INITIALIZED;
return 0;
@@ -4125,6 +4144,14 @@ void __init omap_hwmod_init(void)
soc_ops.init_clkdm = _init_clkdm;
soc_ops.update_context_lost = _omap4_update_context_lost;
soc_ops.get_context_lost = _omap4_get_context_lost;
+ } else if (soc_is_am43xx()) {
+ soc_ops.enable_module = _omap4_enable_module;
+ soc_ops.disable_module = _omap4_disable_module;
+ soc_ops.wait_target_ready = _omap4_wait_target_ready;
+ soc_ops.assert_hardreset = _omap4_assert_hardreset;
+ soc_ops.deassert_hardreset = _omap4_deassert_hardreset;
+ soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted;
+ soc_ops.init_clkdm = _init_clkdm;
} else if (soc_is_am33xx()) {
soc_ops.enable_module = _am33xx_enable_module;
soc_ops.disable_module = _am33xx_disable_module;
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index d02acf9308d3..0f97d635ff90 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -752,6 +752,7 @@ extern int omap44xx_hwmod_init(void);
extern int omap54xx_hwmod_init(void);
extern int am33xx_hwmod_init(void);
extern int dra7xx_hwmod_init(void);
+int am43xx_hwmod_init(void);
extern int __init omap_hwmod_register_links(struct omap_hwmod_ocp_if **ois);
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h
new file mode 100644
index 000000000000..130332c0534d
--- /dev/null
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h
@@ -0,0 +1,163 @@
+/*
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ *
+ * Data common for AM335x and AM43x
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_OMAP_HWMOD_33XX_43XX_COMMON_DATA_H
+#define __ARCH_ARM_MACH_OMAP2_OMAP_HWMOD_33XX_43XX_COMMON_DATA_H
+
+extern struct omap_hwmod_ocp_if am33xx_mpu__l3_main;
+extern struct omap_hwmod_ocp_if am33xx_l3_main__l3_s;
+extern struct omap_hwmod_ocp_if am33xx_l3_s__l4_ls;
+extern struct omap_hwmod_ocp_if am33xx_l3_s__l4_wkup;
+extern struct omap_hwmod_ocp_if am33xx_l3_main__l3_instr;
+extern struct omap_hwmod_ocp_if am33xx_mpu__prcm;
+extern struct omap_hwmod_ocp_if am33xx_l3_s__l3_main;
+extern struct omap_hwmod_ocp_if am33xx_pruss__l3_main;
+extern struct omap_hwmod_ocp_if am33xx_gfx__l3_main;
+extern struct omap_hwmod_ocp_if am33xx_l3_main__gfx;
+extern struct omap_hwmod_ocp_if am33xx_l4_wkup__rtc;
+extern struct omap_hwmod_ocp_if am33xx_l4_per__dcan0;
+extern struct omap_hwmod_ocp_if am33xx_l4_per__dcan1;
+extern struct omap_hwmod_ocp_if am33xx_l4_per__gpio1;
+extern struct omap_hwmod_ocp_if am33xx_l4_per__gpio2;
+extern struct omap_hwmod_ocp_if am33xx_l4_per__gpio3;
+extern struct omap_hwmod_ocp_if am33xx_cpgmac0__mdio;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__elm;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss0;
+extern struct omap_hwmod_ocp_if am33xx_epwmss0__ecap0;
+extern struct omap_hwmod_ocp_if am33xx_epwmss0__eqep0;
+extern struct omap_hwmod_ocp_if am33xx_epwmss0__ehrpwm0;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss1;
+extern struct omap_hwmod_ocp_if am33xx_epwmss1__ecap1;
+extern struct omap_hwmod_ocp_if am33xx_epwmss1__eqep1;
+extern struct omap_hwmod_ocp_if am33xx_epwmss1__ehrpwm1;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss2;
+extern struct omap_hwmod_ocp_if am33xx_epwmss2__ecap2;
+extern struct omap_hwmod_ocp_if am33xx_epwmss2__eqep2;
+extern struct omap_hwmod_ocp_if am33xx_epwmss2__ehrpwm2;
+extern struct omap_hwmod_ocp_if am33xx_l3_s__gpmc;
+extern struct omap_hwmod_ocp_if am33xx_l4_per__i2c2;
+extern struct omap_hwmod_ocp_if am33xx_l4_per__i2c3;
+extern struct omap_hwmod_ocp_if am33xx_l4_per__mailbox;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__spinlock;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__mcasp0;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__mcasp1;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__mmc0;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__mmc1;
+extern struct omap_hwmod_ocp_if am33xx_l3_s__mmc2;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__mcspi0;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__mcspi1;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__timer2;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__timer3;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__timer4;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__timer5;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__timer6;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__timer7;
+extern struct omap_hwmod_ocp_if am33xx_l3_main__tpcc;
+extern struct omap_hwmod_ocp_if am33xx_l3_main__tptc0;
+extern struct omap_hwmod_ocp_if am33xx_l3_main__tptc1;
+extern struct omap_hwmod_ocp_if am33xx_l3_main__tptc2;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__uart2;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__uart3;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__uart4;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__uart5;
+extern struct omap_hwmod_ocp_if am33xx_l4_ls__uart6;
+extern struct omap_hwmod_ocp_if am33xx_l3_main__ocmc;
+extern struct omap_hwmod_ocp_if am33xx_l3_main__sha0;
+extern struct omap_hwmod_ocp_if am33xx_l3_main__aes0;
+
+extern struct omap_hwmod am33xx_l3_main_hwmod;
+extern struct omap_hwmod am33xx_l3_s_hwmod;
+extern struct omap_hwmod am33xx_l3_instr_hwmod;
+extern struct omap_hwmod am33xx_l4_ls_hwmod;
+extern struct omap_hwmod am33xx_l4_wkup_hwmod;
+extern struct omap_hwmod am33xx_mpu_hwmod;
+extern struct omap_hwmod am33xx_pruss_hwmod;
+extern struct omap_hwmod am33xx_gfx_hwmod;
+extern struct omap_hwmod am33xx_prcm_hwmod;
+extern struct omap_hwmod am33xx_aes0_hwmod;
+extern struct omap_hwmod am33xx_sha0_hwmod;
+extern struct omap_hwmod am33xx_ocmcram_hwmod;
+extern struct omap_hwmod am33xx_smartreflex0_hwmod;
+extern struct omap_hwmod am33xx_smartreflex1_hwmod;
+extern struct omap_hwmod am33xx_cpgmac0_hwmod;
+extern struct omap_hwmod am33xx_mdio_hwmod;
+extern struct omap_hwmod am33xx_dcan0_hwmod;
+extern struct omap_hwmod am33xx_dcan1_hwmod;
+extern struct omap_hwmod am33xx_elm_hwmod;
+extern struct omap_hwmod am33xx_epwmss0_hwmod;
+extern struct omap_hwmod am33xx_ecap0_hwmod;
+extern struct omap_hwmod am33xx_eqep0_hwmod;
+extern struct omap_hwmod am33xx_ehrpwm0_hwmod;
+extern struct omap_hwmod am33xx_epwmss1_hwmod;
+extern struct omap_hwmod am33xx_ecap1_hwmod;
+extern struct omap_hwmod am33xx_eqep1_hwmod;
+extern struct omap_hwmod am33xx_ehrpwm1_hwmod;
+extern struct omap_hwmod am33xx_epwmss2_hwmod;
+extern struct omap_hwmod am33xx_ecap2_hwmod;
+extern struct omap_hwmod am33xx_eqep2_hwmod;
+extern struct omap_hwmod am33xx_ehrpwm2_hwmod;
+extern struct omap_hwmod am33xx_gpio1_hwmod;
+extern struct omap_hwmod am33xx_gpio2_hwmod;
+extern struct omap_hwmod am33xx_gpio3_hwmod;
+extern struct omap_hwmod am33xx_gpmc_hwmod;
+extern struct omap_hwmod am33xx_i2c1_hwmod;
+extern struct omap_hwmod am33xx_i2c2_hwmod;
+extern struct omap_hwmod am33xx_i2c3_hwmod;
+extern struct omap_hwmod am33xx_mailbox_hwmod;
+extern struct omap_hwmod am33xx_mcasp0_hwmod;
+extern struct omap_hwmod am33xx_mcasp1_hwmod;
+extern struct omap_hwmod am33xx_mmc0_hwmod;
+extern struct omap_hwmod am33xx_mmc1_hwmod;
+extern struct omap_hwmod am33xx_mmc2_hwmod;
+extern struct omap_hwmod am33xx_rtc_hwmod;
+extern struct omap_hwmod am33xx_spi0_hwmod;
+extern struct omap_hwmod am33xx_spi1_hwmod;
+extern struct omap_hwmod am33xx_spinlock_hwmod;
+extern struct omap_hwmod am33xx_timer1_hwmod;
+extern struct omap_hwmod am33xx_timer2_hwmod;
+extern struct omap_hwmod am33xx_timer3_hwmod;
+extern struct omap_hwmod am33xx_timer4_hwmod;
+extern struct omap_hwmod am33xx_timer5_hwmod;
+extern struct omap_hwmod am33xx_timer6_hwmod;
+extern struct omap_hwmod am33xx_timer7_hwmod;
+extern struct omap_hwmod am33xx_tpcc_hwmod;
+extern struct omap_hwmod am33xx_tptc0_hwmod;
+extern struct omap_hwmod am33xx_tptc1_hwmod;
+extern struct omap_hwmod am33xx_tptc2_hwmod;
+extern struct omap_hwmod am33xx_uart1_hwmod;
+extern struct omap_hwmod am33xx_uart2_hwmod;
+extern struct omap_hwmod am33xx_uart3_hwmod;
+extern struct omap_hwmod am33xx_uart4_hwmod;
+extern struct omap_hwmod am33xx_uart5_hwmod;
+extern struct omap_hwmod am33xx_uart6_hwmod;
+extern struct omap_hwmod am33xx_wd_timer1_hwmod;
+
+extern struct omap_hwmod_class am33xx_l4_hwmod_class;
+extern struct omap_hwmod_class am33xx_wkup_m3_hwmod_class;
+extern struct omap_hwmod_class am33xx_control_hwmod_class;
+extern struct omap_hwmod_class am33xx_gpio_hwmod_class;
+extern struct omap_hwmod_class am33xx_timer_hwmod_class;
+extern struct omap_hwmod_class am33xx_epwmss_hwmod_class;
+extern struct omap_hwmod_class am33xx_ehrpwm_hwmod_class;
+extern struct omap_hwmod_class am33xx_spi_hwmod_class;
+
+extern struct omap_gpio_dev_attr gpio_dev_attr;
+extern struct omap2_mcspi_dev_attr mcspi_attrib;
+
+void omap_hwmod_am33xx_reg(void);
+void omap_hwmod_am43xx_reg(void);
+
+#endif
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c
new file mode 100644
index 000000000000..e2db378b849e
--- /dev/null
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c
@@ -0,0 +1,643 @@
+/*
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ *
+ * Interconnects common for AM335x and AM43x
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sizes.h>
+#include "omap_hwmod.h"
+#include "omap_hwmod_33xx_43xx_common_data.h"
+
+/* mpu -> l3 main */
+struct omap_hwmod_ocp_if am33xx_mpu__l3_main = {
+ .master = &am33xx_mpu_hwmod,
+ .slave = &am33xx_l3_main_hwmod,
+ .clk = "dpll_mpu_m2_ck",
+ .user = OCP_USER_MPU,
+};
+
+/* l3 main -> l3 s */
+struct omap_hwmod_ocp_if am33xx_l3_main__l3_s = {
+ .master = &am33xx_l3_main_hwmod,
+ .slave = &am33xx_l3_s_hwmod,
+ .clk = "l3s_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3 s -> l4 per/ls */
+struct omap_hwmod_ocp_if am33xx_l3_s__l4_ls = {
+ .master = &am33xx_l3_s_hwmod,
+ .slave = &am33xx_l4_ls_hwmod,
+ .clk = "l3s_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3 s -> l4 wkup */
+struct omap_hwmod_ocp_if am33xx_l3_s__l4_wkup = {
+ .master = &am33xx_l3_s_hwmod,
+ .slave = &am33xx_l4_wkup_hwmod,
+ .clk = "l3s_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3 main -> l3 instr */
+struct omap_hwmod_ocp_if am33xx_l3_main__l3_instr = {
+ .master = &am33xx_l3_main_hwmod,
+ .slave = &am33xx_l3_instr_hwmod,
+ .clk = "l3s_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* mpu -> prcm */
+struct omap_hwmod_ocp_if am33xx_mpu__prcm = {
+ .master = &am33xx_mpu_hwmod,
+ .slave = &am33xx_prcm_hwmod,
+ .clk = "dpll_mpu_m2_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3 s -> l3 main*/
+struct omap_hwmod_ocp_if am33xx_l3_s__l3_main = {
+ .master = &am33xx_l3_s_hwmod,
+ .slave = &am33xx_l3_main_hwmod,
+ .clk = "l3s_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* pru-icss -> l3 main */
+struct omap_hwmod_ocp_if am33xx_pruss__l3_main = {
+ .master = &am33xx_pruss_hwmod,
+ .slave = &am33xx_l3_main_hwmod,
+ .clk = "l3_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* gfx -> l3 main */
+struct omap_hwmod_ocp_if am33xx_gfx__l3_main = {
+ .master = &am33xx_gfx_hwmod,
+ .slave = &am33xx_l3_main_hwmod,
+ .clk = "dpll_core_m4_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3 main -> gfx */
+struct omap_hwmod_ocp_if am33xx_l3_main__gfx = {
+ .master = &am33xx_l3_main_hwmod,
+ .slave = &am33xx_gfx_hwmod,
+ .clk = "dpll_core_m4_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4 wkup -> rtc */
+struct omap_hwmod_ocp_if am33xx_l4_wkup__rtc = {
+ .master = &am33xx_l4_wkup_hwmod,
+ .slave = &am33xx_rtc_hwmod,
+ .clk = "clkdiv32k_ick",
+ .user = OCP_USER_MPU,
+};
+
+/* l4 per/ls -> DCAN0 */
+struct omap_hwmod_ocp_if am33xx_l4_per__dcan0 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_dcan0_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4 per/ls -> DCAN1 */
+struct omap_hwmod_ocp_if am33xx_l4_per__dcan1 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_dcan1_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4 per/ls -> GPIO2 */
+struct omap_hwmod_ocp_if am33xx_l4_per__gpio1 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_gpio1_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4 per/ls -> gpio3 */
+struct omap_hwmod_ocp_if am33xx_l4_per__gpio2 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_gpio2_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4 per/ls -> gpio4 */
+struct omap_hwmod_ocp_if am33xx_l4_per__gpio3 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_gpio3_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+struct omap_hwmod_ocp_if am33xx_cpgmac0__mdio = {
+ .master = &am33xx_cpgmac0_hwmod,
+ .slave = &am33xx_mdio_hwmod,
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space am33xx_elm_addr_space[] = {
+ {
+ .pa_start = 0x48080000,
+ .pa_end = 0x48080000 + SZ_8K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_ocp_if am33xx_l4_ls__elm = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_elm_hwmod,
+ .clk = "l4ls_gclk",
+ .addr = am33xx_elm_addr_space,
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space am33xx_epwmss0_addr_space[] = {
+ {
+ .pa_start = 0x48300000,
+ .pa_end = 0x48300000 + SZ_16 - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss0 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_epwmss0_hwmod,
+ .clk = "l4ls_gclk",
+ .addr = am33xx_epwmss0_addr_space,
+ .user = OCP_USER_MPU,
+};
+
+struct omap_hwmod_ocp_if am33xx_epwmss0__ecap0 = {
+ .master = &am33xx_epwmss0_hwmod,
+ .slave = &am33xx_ecap0_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+struct omap_hwmod_ocp_if am33xx_epwmss0__eqep0 = {
+ .master = &am33xx_epwmss0_hwmod,
+ .slave = &am33xx_eqep0_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+struct omap_hwmod_ocp_if am33xx_epwmss0__ehrpwm0 = {
+ .master = &am33xx_epwmss0_hwmod,
+ .slave = &am33xx_ehrpwm0_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+
+static struct omap_hwmod_addr_space am33xx_epwmss1_addr_space[] = {
+ {
+ .pa_start = 0x48302000,
+ .pa_end = 0x48302000 + SZ_16 - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss1 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_epwmss1_hwmod,
+ .clk = "l4ls_gclk",
+ .addr = am33xx_epwmss1_addr_space,
+ .user = OCP_USER_MPU,
+};
+
+struct omap_hwmod_ocp_if am33xx_epwmss1__ecap1 = {
+ .master = &am33xx_epwmss1_hwmod,
+ .slave = &am33xx_ecap1_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+struct omap_hwmod_ocp_if am33xx_epwmss1__eqep1 = {
+ .master = &am33xx_epwmss1_hwmod,
+ .slave = &am33xx_eqep1_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+struct omap_hwmod_ocp_if am33xx_epwmss1__ehrpwm1 = {
+ .master = &am33xx_epwmss1_hwmod,
+ .slave = &am33xx_ehrpwm1_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space am33xx_epwmss2_addr_space[] = {
+ {
+ .pa_start = 0x48304000,
+ .pa_end = 0x48304000 + SZ_16 - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss2 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_epwmss2_hwmod,
+ .clk = "l4ls_gclk",
+ .addr = am33xx_epwmss2_addr_space,
+ .user = OCP_USER_MPU,
+};
+
+struct omap_hwmod_ocp_if am33xx_epwmss2__ecap2 = {
+ .master = &am33xx_epwmss2_hwmod,
+ .slave = &am33xx_ecap2_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+struct omap_hwmod_ocp_if am33xx_epwmss2__eqep2 = {
+ .master = &am33xx_epwmss2_hwmod,
+ .slave = &am33xx_eqep2_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+struct omap_hwmod_ocp_if am33xx_epwmss2__ehrpwm2 = {
+ .master = &am33xx_epwmss2_hwmod,
+ .slave = &am33xx_ehrpwm2_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+/* l3s cfg -> gpmc */
+static struct omap_hwmod_addr_space am33xx_gpmc_addr_space[] = {
+ {
+ .pa_start = 0x50000000,
+ .pa_end = 0x50000000 + SZ_8K - 1,
+ .flags = ADDR_TYPE_RT,
+ },
+ { }
+};
+
+struct omap_hwmod_ocp_if am33xx_l3_s__gpmc = {
+ .master = &am33xx_l3_s_hwmod,
+ .slave = &am33xx_gpmc_hwmod,
+ .clk = "l3s_gclk",
+ .addr = am33xx_gpmc_addr_space,
+ .user = OCP_USER_MPU,
+};
+
+/* i2c2 */
+struct omap_hwmod_ocp_if am33xx_l4_per__i2c2 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_i2c2_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+struct omap_hwmod_ocp_if am33xx_l4_per__i2c3 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_i2c3_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space am33xx_mailbox_addrs[] = {
+ {
+ .pa_start = 0x480C8000,
+ .pa_end = 0x480C8000 + (SZ_4K - 1),
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+/* l4 ls -> mailbox */
+struct omap_hwmod_ocp_if am33xx_l4_per__mailbox = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_mailbox_hwmod,
+ .clk = "l4ls_gclk",
+ .addr = am33xx_mailbox_addrs,
+ .user = OCP_USER_MPU,
+};
+
+/* l4 ls -> spinlock */
+struct omap_hwmod_ocp_if am33xx_l4_ls__spinlock = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_spinlock_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+/* l4 ls -> mcasp0 */
+static struct omap_hwmod_addr_space am33xx_mcasp0_addr_space[] = {
+ {
+ .pa_start = 0x48038000,
+ .pa_end = 0x48038000 + SZ_8K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_ocp_if am33xx_l4_ls__mcasp0 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_mcasp0_hwmod,
+ .clk = "l4ls_gclk",
+ .addr = am33xx_mcasp0_addr_space,
+ .user = OCP_USER_MPU,
+};
+
+/* l4 ls -> mcasp1 */
+static struct omap_hwmod_addr_space am33xx_mcasp1_addr_space[] = {
+ {
+ .pa_start = 0x4803C000,
+ .pa_end = 0x4803C000 + SZ_8K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_ocp_if am33xx_l4_ls__mcasp1 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_mcasp1_hwmod,
+ .clk = "l4ls_gclk",
+ .addr = am33xx_mcasp1_addr_space,
+ .user = OCP_USER_MPU,
+};
+
+/* l4 ls -> mmc0 */
+static struct omap_hwmod_addr_space am33xx_mmc0_addr_space[] = {
+ {
+ .pa_start = 0x48060100,
+ .pa_end = 0x48060100 + SZ_4K - 1,
+ .flags = ADDR_TYPE_RT,
+ },
+ { }
+};
+
+struct omap_hwmod_ocp_if am33xx_l4_ls__mmc0 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_mmc0_hwmod,
+ .clk = "l4ls_gclk",
+ .addr = am33xx_mmc0_addr_space,
+ .user = OCP_USER_MPU,
+};
+
+/* l4 ls -> mmc1 */
+static struct omap_hwmod_addr_space am33xx_mmc1_addr_space[] = {
+ {
+ .pa_start = 0x481d8100,
+ .pa_end = 0x481d8100 + SZ_4K - 1,
+ .flags = ADDR_TYPE_RT,
+ },
+ { }
+};
+
+struct omap_hwmod_ocp_if am33xx_l4_ls__mmc1 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_mmc1_hwmod,
+ .clk = "l4ls_gclk",
+ .addr = am33xx_mmc1_addr_space,
+ .user = OCP_USER_MPU,
+};
+
+/* l3 s -> mmc2 */
+static struct omap_hwmod_addr_space am33xx_mmc2_addr_space[] = {
+ {
+ .pa_start = 0x47810100,
+ .pa_end = 0x47810100 + SZ_64K - 1,
+ .flags = ADDR_TYPE_RT,
+ },
+ { }
+};
+
+struct omap_hwmod_ocp_if am33xx_l3_s__mmc2 = {
+ .master = &am33xx_l3_s_hwmod,
+ .slave = &am33xx_mmc2_hwmod,
+ .clk = "l3s_gclk",
+ .addr = am33xx_mmc2_addr_space,
+ .user = OCP_USER_MPU,
+};
+
+/* l4 ls -> mcspi0 */
+struct omap_hwmod_ocp_if am33xx_l4_ls__mcspi0 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_spi0_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+/* l4 ls -> mcspi1 */
+struct omap_hwmod_ocp_if am33xx_l4_ls__mcspi1 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_spi1_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+/* l4 per -> timer2 */
+struct omap_hwmod_ocp_if am33xx_l4_ls__timer2 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_timer2_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+/* l4 per -> timer3 */
+struct omap_hwmod_ocp_if am33xx_l4_ls__timer3 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_timer3_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+/* l4 per -> timer4 */
+struct omap_hwmod_ocp_if am33xx_l4_ls__timer4 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_timer4_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+/* l4 per -> timer5 */
+struct omap_hwmod_ocp_if am33xx_l4_ls__timer5 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_timer5_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+/* l4 per -> timer6 */
+struct omap_hwmod_ocp_if am33xx_l4_ls__timer6 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_timer6_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+/* l4 per -> timer7 */
+struct omap_hwmod_ocp_if am33xx_l4_ls__timer7 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_timer7_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+/* l3 main -> tpcc */
+struct omap_hwmod_ocp_if am33xx_l3_main__tpcc = {
+ .master = &am33xx_l3_main_hwmod,
+ .slave = &am33xx_tpcc_hwmod,
+ .clk = "l3_gclk",
+ .user = OCP_USER_MPU,
+};
+
+/* l3 main -> tpcc0 */
+static struct omap_hwmod_addr_space am33xx_tptc0_addr_space[] = {
+ {
+ .pa_start = 0x49800000,
+ .pa_end = 0x49800000 + SZ_8K - 1,
+ .flags = ADDR_TYPE_RT,
+ },
+ { }
+};
+
+struct omap_hwmod_ocp_if am33xx_l3_main__tptc0 = {
+ .master = &am33xx_l3_main_hwmod,
+ .slave = &am33xx_tptc0_hwmod,
+ .clk = "l3_gclk",
+ .addr = am33xx_tptc0_addr_space,
+ .user = OCP_USER_MPU,
+};
+
+/* l3 main -> tpcc1 */
+static struct omap_hwmod_addr_space am33xx_tptc1_addr_space[] = {
+ {
+ .pa_start = 0x49900000,
+ .pa_end = 0x49900000 + SZ_8K - 1,
+ .flags = ADDR_TYPE_RT,
+ },
+ { }
+};
+
+struct omap_hwmod_ocp_if am33xx_l3_main__tptc1 = {
+ .master = &am33xx_l3_main_hwmod,
+ .slave = &am33xx_tptc1_hwmod,
+ .clk = "l3_gclk",
+ .addr = am33xx_tptc1_addr_space,
+ .user = OCP_USER_MPU,
+};
+
+/* l3 main -> tpcc2 */
+static struct omap_hwmod_addr_space am33xx_tptc2_addr_space[] = {
+ {
+ .pa_start = 0x49a00000,
+ .pa_end = 0x49a00000 + SZ_8K - 1,
+ .flags = ADDR_TYPE_RT,
+ },
+ { }
+};
+
+struct omap_hwmod_ocp_if am33xx_l3_main__tptc2 = {
+ .master = &am33xx_l3_main_hwmod,
+ .slave = &am33xx_tptc2_hwmod,
+ .clk = "l3_gclk",
+ .addr = am33xx_tptc2_addr_space,
+ .user = OCP_USER_MPU,
+};
+
+/* l4 ls -> uart2 */
+struct omap_hwmod_ocp_if am33xx_l4_ls__uart2 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_uart2_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+/* l4 ls -> uart3 */
+struct omap_hwmod_ocp_if am33xx_l4_ls__uart3 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_uart3_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+/* l4 ls -> uart4 */
+struct omap_hwmod_ocp_if am33xx_l4_ls__uart4 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_uart4_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+/* l4 ls -> uart5 */
+struct omap_hwmod_ocp_if am33xx_l4_ls__uart5 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_uart5_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+/* l4 ls -> uart6 */
+struct omap_hwmod_ocp_if am33xx_l4_ls__uart6 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_uart6_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+/* l3 main -> ocmc */
+struct omap_hwmod_ocp_if am33xx_l3_main__ocmc = {
+ .master = &am33xx_l3_main_hwmod,
+ .slave = &am33xx_ocmcram_hwmod,
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3 main -> sha0 HIB2 */
+static struct omap_hwmod_addr_space am33xx_sha0_addrs[] = {
+ {
+ .pa_start = 0x53100000,
+ .pa_end = 0x53100000 + SZ_512 - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_ocp_if am33xx_l3_main__sha0 = {
+ .master = &am33xx_l3_main_hwmod,
+ .slave = &am33xx_sha0_hwmod,
+ .clk = "sha0_fck",
+ .addr = am33xx_sha0_addrs,
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3 main -> AES0 HIB2 */
+static struct omap_hwmod_addr_space am33xx_aes0_addrs[] = {
+ {
+ .pa_start = 0x53500000,
+ .pa_end = 0x53500000 + SZ_1M - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_ocp_if am33xx_l3_main__aes0 = {
+ .master = &am33xx_l3_main_hwmod,
+ .slave = &am33xx_aes0_hwmod,
+ .clk = "aes0_fck",
+ .addr = am33xx_aes0_addrs,
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
new file mode 100644
index 000000000000..0f178623e7da
--- /dev/null
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
@@ -0,0 +1,1469 @@
+/*
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ *
+ * Hwmod common for AM335x and AM43x
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_data/gpio-omap.h>
+#include <linux/platform_data/spi-omap2-mcspi.h>
+#include "omap_hwmod.h"
+#include "i2c.h"
+#include "mmc.h"
+#include "wd_timer.h"
+#include "cm33xx.h"
+#include "prm33xx.h"
+#include "omap_hwmod_33xx_43xx_common_data.h"
+#include "prcm43xx.h"
+
+#define CLKCTRL(oh, clkctrl) ((oh).prcm.omap4.clkctrl_offs = (clkctrl))
+#define RSTCTRL(oh, rstctrl) ((oh).prcm.omap4.rstctrl_offs = (rstctrl))
+#define RSTST(oh, rstst) ((oh).prcm.omap4.rstst_offs = (rstst))
+
+/*
+ * 'l3' class
+ * instance(s): l3_main, l3_s, l3_instr
+ */
+static struct omap_hwmod_class am33xx_l3_hwmod_class = {
+ .name = "l3",
+};
+
+struct omap_hwmod am33xx_l3_main_hwmod = {
+ .name = "l3_main",
+ .class = &am33xx_l3_hwmod_class,
+ .clkdm_name = "l3_clkdm",
+ .flags = HWMOD_INIT_NO_IDLE,
+ .main_clk = "l3_gclk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* l3_s */
+struct omap_hwmod am33xx_l3_s_hwmod = {
+ .name = "l3_s",
+ .class = &am33xx_l3_hwmod_class,
+ .clkdm_name = "l3s_clkdm",
+};
+
+/* l3_instr */
+struct omap_hwmod am33xx_l3_instr_hwmod = {
+ .name = "l3_instr",
+ .class = &am33xx_l3_hwmod_class,
+ .clkdm_name = "l3_clkdm",
+ .flags = HWMOD_INIT_NO_IDLE,
+ .main_clk = "l3_gclk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/*
+ * 'l4' class
+ * instance(s): l4_ls, l4_hs, l4_wkup, l4_fw
+ */
+struct omap_hwmod_class am33xx_l4_hwmod_class = {
+ .name = "l4",
+};
+
+/* l4_ls */
+struct omap_hwmod am33xx_l4_ls_hwmod = {
+ .name = "l4_ls",
+ .class = &am33xx_l4_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .flags = HWMOD_INIT_NO_IDLE,
+ .main_clk = "l4ls_gclk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* l4_wkup */
+struct omap_hwmod am33xx_l4_wkup_hwmod = {
+ .name = "l4_wkup",
+ .class = &am33xx_l4_hwmod_class,
+ .clkdm_name = "l4_wkup_clkdm",
+ .flags = HWMOD_INIT_NO_IDLE,
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/*
+ * 'mpu' class
+ */
+static struct omap_hwmod_class am33xx_mpu_hwmod_class = {
+ .name = "mpu",
+};
+
+struct omap_hwmod am33xx_mpu_hwmod = {
+ .name = "mpu",
+ .class = &am33xx_mpu_hwmod_class,
+ .clkdm_name = "mpu_clkdm",
+ .flags = HWMOD_INIT_NO_IDLE,
+ .main_clk = "dpll_mpu_m2_ck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/*
+ * 'wakeup m3' class
+ * Wakeup controller sub-system under wakeup domain
+ */
+struct omap_hwmod_class am33xx_wkup_m3_hwmod_class = {
+ .name = "wkup_m3",
+};
+
+/*
+ * 'pru-icss' class
+ * Programmable Real-Time Unit and Industrial Communication Subsystem
+ */
+static struct omap_hwmod_class am33xx_pruss_hwmod_class = {
+ .name = "pruss",
+};
+
+static struct omap_hwmod_rst_info am33xx_pruss_resets[] = {
+ { .name = "pruss", .rst_shift = 1 },
+};
+
+/* pru-icss */
+/* Pseudo hwmod for reset control purpose only */
+struct omap_hwmod am33xx_pruss_hwmod = {
+ .name = "pruss",
+ .class = &am33xx_pruss_hwmod_class,
+ .clkdm_name = "pruss_ocp_clkdm",
+ .main_clk = "pruss_ocp_gclk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .rst_lines = am33xx_pruss_resets,
+ .rst_lines_cnt = ARRAY_SIZE(am33xx_pruss_resets),
+};
+
+/* gfx */
+/* Pseudo hwmod for reset control purpose only */
+static struct omap_hwmod_class am33xx_gfx_hwmod_class = {
+ .name = "gfx",
+};
+
+static struct omap_hwmod_rst_info am33xx_gfx_resets[] = {
+ { .name = "gfx", .rst_shift = 0, .st_shift = 0},
+};
+
+struct omap_hwmod am33xx_gfx_hwmod = {
+ .name = "gfx",
+ .class = &am33xx_gfx_hwmod_class,
+ .clkdm_name = "gfx_l3_clkdm",
+ .main_clk = "gfx_fck_div_ck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .rst_lines = am33xx_gfx_resets,
+ .rst_lines_cnt = ARRAY_SIZE(am33xx_gfx_resets),
+};
+
+/*
+ * 'prcm' class
+ * power and reset manager (whole prcm infrastructure)
+ */
+static struct omap_hwmod_class am33xx_prcm_hwmod_class = {
+ .name = "prcm",
+};
+
+/* prcm */
+struct omap_hwmod am33xx_prcm_hwmod = {
+ .name = "prcm",
+ .class = &am33xx_prcm_hwmod_class,
+ .clkdm_name = "l4_wkup_clkdm",
+};
+
+/*
+ * 'aes0' class
+ */
+static struct omap_hwmod_class_sysconfig am33xx_aes0_sysc = {
+ .rev_offs = 0x80,
+ .sysc_offs = 0x84,
+ .syss_offs = 0x88,
+ .sysc_flags = SYSS_HAS_RESET_STATUS,
+};
+
+static struct omap_hwmod_class am33xx_aes0_hwmod_class = {
+ .name = "aes0",
+ .sysc = &am33xx_aes0_sysc,
+};
+
+struct omap_hwmod am33xx_aes0_hwmod = {
+ .name = "aes",
+ .class = &am33xx_aes0_hwmod_class,
+ .clkdm_name = "l3_clkdm",
+ .main_clk = "aes0_fck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* sha0 HIB2 (the 'P' (public) device) */
+static struct omap_hwmod_class_sysconfig am33xx_sha0_sysc = {
+ .rev_offs = 0x100,
+ .sysc_offs = 0x110,
+ .syss_offs = 0x114,
+ .sysc_flags = SYSS_HAS_RESET_STATUS,
+};
+
+static struct omap_hwmod_class am33xx_sha0_hwmod_class = {
+ .name = "sha0",
+ .sysc = &am33xx_sha0_sysc,
+};
+
+struct omap_hwmod am33xx_sha0_hwmod = {
+ .name = "sham",
+ .class = &am33xx_sha0_hwmod_class,
+ .clkdm_name = "l3_clkdm",
+ .main_clk = "l3_gclk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* ocmcram */
+static struct omap_hwmod_class am33xx_ocmcram_hwmod_class = {
+ .name = "ocmcram",
+};
+
+struct omap_hwmod am33xx_ocmcram_hwmod = {
+ .name = "ocmcram",
+ .class = &am33xx_ocmcram_hwmod_class,
+ .clkdm_name = "l3_clkdm",
+ .flags = HWMOD_INIT_NO_IDLE,
+ .main_clk = "l3_gclk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* 'smartreflex' class */
+static struct omap_hwmod_class am33xx_smartreflex_hwmod_class = {
+ .name = "smartreflex",
+};
+
+/* smartreflex0 */
+struct omap_hwmod am33xx_smartreflex0_hwmod = {
+ .name = "smartreflex0",
+ .class = &am33xx_smartreflex_hwmod_class,
+ .clkdm_name = "l4_wkup_clkdm",
+ .main_clk = "smartreflex0_fck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* smartreflex1 */
+struct omap_hwmod am33xx_smartreflex1_hwmod = {
+ .name = "smartreflex1",
+ .class = &am33xx_smartreflex_hwmod_class,
+ .clkdm_name = "l4_wkup_clkdm",
+ .main_clk = "smartreflex1_fck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/*
+ * 'control' module class
+ */
+struct omap_hwmod_class am33xx_control_hwmod_class = {
+ .name = "control",
+};
+
+/*
+ * 'cpgmac' class
+ * cpsw/cpgmac sub system
+ */
+static struct omap_hwmod_class_sysconfig am33xx_cpgmac_sysc = {
+ .rev_offs = 0x0,
+ .sysc_offs = 0x8,
+ .syss_offs = 0x4,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
+ SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | MSTANDBY_FORCE |
+ MSTANDBY_NO),
+ .sysc_fields = &omap_hwmod_sysc_type3,
+};
+
+static struct omap_hwmod_class am33xx_cpgmac0_hwmod_class = {
+ .name = "cpgmac0",
+ .sysc = &am33xx_cpgmac_sysc,
+};
+
+struct omap_hwmod am33xx_cpgmac0_hwmod = {
+ .name = "cpgmac0",
+ .class = &am33xx_cpgmac0_hwmod_class,
+ .clkdm_name = "cpsw_125mhz_clkdm",
+ .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
+ .main_clk = "cpsw_125mhz_gclk",
+ .mpu_rt_idx = 1,
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/*
+ * mdio class
+ */
+static struct omap_hwmod_class am33xx_mdio_hwmod_class = {
+ .name = "davinci_mdio",
+};
+
+struct omap_hwmod am33xx_mdio_hwmod = {
+ .name = "davinci_mdio",
+ .class = &am33xx_mdio_hwmod_class,
+ .clkdm_name = "cpsw_125mhz_clkdm",
+ .main_clk = "cpsw_125mhz_gclk",
+};
+
+/*
+ * dcan class
+ */
+static struct omap_hwmod_class am33xx_dcan_hwmod_class = {
+ .name = "d_can",
+};
+
+/* dcan0 */
+struct omap_hwmod am33xx_dcan0_hwmod = {
+ .name = "d_can0",
+ .class = &am33xx_dcan_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "dcan0_fck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* dcan1 */
+struct omap_hwmod am33xx_dcan1_hwmod = {
+ .name = "d_can1",
+ .class = &am33xx_dcan_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "dcan1_fck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* elm */
+static struct omap_hwmod_class_sysconfig am33xx_elm_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
+ SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class am33xx_elm_hwmod_class = {
+ .name = "elm",
+ .sysc = &am33xx_elm_sysc,
+};
+
+struct omap_hwmod am33xx_elm_hwmod = {
+ .name = "elm",
+ .class = &am33xx_elm_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* pwmss */
+static struct omap_hwmod_class_sysconfig am33xx_epwmss_sysc = {
+ .rev_offs = 0x0,
+ .sysc_offs = 0x4,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
+ MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+struct omap_hwmod_class am33xx_epwmss_hwmod_class = {
+ .name = "epwmss",
+ .sysc = &am33xx_epwmss_sysc,
+};
+
+static struct omap_hwmod_class am33xx_ecap_hwmod_class = {
+ .name = "ecap",
+};
+
+static struct omap_hwmod_class am33xx_eqep_hwmod_class = {
+ .name = "eqep",
+};
+
+struct omap_hwmod_class am33xx_ehrpwm_hwmod_class = {
+ .name = "ehrpwm",
+};
+
+/* epwmss0 */
+struct omap_hwmod am33xx_epwmss0_hwmod = {
+ .name = "epwmss0",
+ .class = &am33xx_epwmss_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* ecap0 */
+struct omap_hwmod am33xx_ecap0_hwmod = {
+ .name = "ecap0",
+ .class = &am33xx_ecap_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+};
+
+/* eqep0 */
+struct omap_hwmod am33xx_eqep0_hwmod = {
+ .name = "eqep0",
+ .class = &am33xx_eqep_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+};
+
+/* ehrpwm0 */
+struct omap_hwmod am33xx_ehrpwm0_hwmod = {
+ .name = "ehrpwm0",
+ .class = &am33xx_ehrpwm_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+};
+
+/* epwmss1 */
+struct omap_hwmod am33xx_epwmss1_hwmod = {
+ .name = "epwmss1",
+ .class = &am33xx_epwmss_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* ecap1 */
+struct omap_hwmod am33xx_ecap1_hwmod = {
+ .name = "ecap1",
+ .class = &am33xx_ecap_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+};
+
+/* eqep1 */
+struct omap_hwmod am33xx_eqep1_hwmod = {
+ .name = "eqep1",
+ .class = &am33xx_eqep_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+};
+
+/* ehrpwm1 */
+struct omap_hwmod am33xx_ehrpwm1_hwmod = {
+ .name = "ehrpwm1",
+ .class = &am33xx_ehrpwm_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+};
+
+/* epwmss2 */
+struct omap_hwmod am33xx_epwmss2_hwmod = {
+ .name = "epwmss2",
+ .class = &am33xx_epwmss_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* ecap2 */
+struct omap_hwmod am33xx_ecap2_hwmod = {
+ .name = "ecap2",
+ .class = &am33xx_ecap_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+};
+
+/* eqep2 */
+struct omap_hwmod am33xx_eqep2_hwmod = {
+ .name = "eqep2",
+ .class = &am33xx_eqep_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+};
+
+/* ehrpwm2 */
+struct omap_hwmod am33xx_ehrpwm2_hwmod = {
+ .name = "ehrpwm2",
+ .class = &am33xx_ehrpwm_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+};
+
+/*
+ * 'gpio' class: for gpio 0,1,2,3
+ */
+static struct omap_hwmod_class_sysconfig am33xx_gpio_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0114,
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP |
+ SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+ SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class am33xx_gpio_hwmod_class = {
+ .name = "gpio",
+ .sysc = &am33xx_gpio_sysc,
+ .rev = 2,
+};
+
+struct omap_gpio_dev_attr gpio_dev_attr = {
+ .bank_width = 32,
+ .dbck_flag = true,
+};
+
+/* gpio1 */
+static struct omap_hwmod_opt_clk gpio1_opt_clks[] = {
+ { .role = "dbclk", .clk = "gpio1_dbclk" },
+};
+
+struct omap_hwmod am33xx_gpio1_hwmod = {
+ .name = "gpio2",
+ .class = &am33xx_gpio_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+ .main_clk = "l4ls_gclk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .opt_clks = gpio1_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(gpio1_opt_clks),
+ .dev_attr = &gpio_dev_attr,
+};
+
+/* gpio2 */
+static struct omap_hwmod_opt_clk gpio2_opt_clks[] = {
+ { .role = "dbclk", .clk = "gpio2_dbclk" },
+};
+
+struct omap_hwmod am33xx_gpio2_hwmod = {
+ .name = "gpio3",
+ .class = &am33xx_gpio_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+ .main_clk = "l4ls_gclk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .opt_clks = gpio2_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(gpio2_opt_clks),
+ .dev_attr = &gpio_dev_attr,
+};
+
+/* gpio3 */
+static struct omap_hwmod_opt_clk gpio3_opt_clks[] = {
+ { .role = "dbclk", .clk = "gpio3_dbclk" },
+};
+
+struct omap_hwmod am33xx_gpio3_hwmod = {
+ .name = "gpio4",
+ .class = &am33xx_gpio_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+ .main_clk = "l4ls_gclk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .opt_clks = gpio3_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(gpio3_opt_clks),
+ .dev_attr = &gpio_dev_attr,
+};
+
+/* gpmc */
+static struct omap_hwmod_class_sysconfig gpmc_sysc = {
+ .rev_offs = 0x0,
+ .sysc_offs = 0x10,
+ .syss_offs = 0x14,
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class am33xx_gpmc_hwmod_class = {
+ .name = "gpmc",
+ .sysc = &gpmc_sysc,
+};
+
+struct omap_hwmod am33xx_gpmc_hwmod = {
+ .name = "gpmc",
+ .class = &am33xx_gpmc_hwmod_class,
+ .clkdm_name = "l3s_clkdm",
+ .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
+ .main_clk = "l3s_gclk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* 'i2c' class */
+static struct omap_hwmod_class_sysconfig am33xx_i2c_sysc = {
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0090,
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class i2c_class = {
+ .name = "i2c",
+ .sysc = &am33xx_i2c_sysc,
+ .rev = OMAP_I2C_IP_VERSION_2,
+ .reset = &omap_i2c_reset,
+};
+
+static struct omap_i2c_dev_attr i2c_dev_attr = {
+ .flags = OMAP_I2C_FLAG_BUS_SHIFT_NONE,
+};
+
+/* i2c1 */
+struct omap_hwmod am33xx_i2c1_hwmod = {
+ .name = "i2c1",
+ .class = &i2c_class,
+ .clkdm_name = "l4_wkup_clkdm",
+ .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
+ .main_clk = "dpll_per_m2_div4_wkupdm_ck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .dev_attr = &i2c_dev_attr,
+};
+
+/* i2c1 */
+struct omap_hwmod am33xx_i2c2_hwmod = {
+ .name = "i2c2",
+ .class = &i2c_class,
+ .clkdm_name = "l4ls_clkdm",
+ .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
+ .main_clk = "dpll_per_m2_div4_ck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .dev_attr = &i2c_dev_attr,
+};
+
+/* i2c3 */
+struct omap_hwmod am33xx_i2c3_hwmod = {
+ .name = "i2c3",
+ .class = &i2c_class,
+ .clkdm_name = "l4ls_clkdm",
+ .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
+ .main_clk = "dpll_per_m2_div4_ck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .dev_attr = &i2c_dev_attr,
+};
+
+/*
+ * 'mailbox' class
+ * mailbox module allowing communication between the on-chip processors using a
+ * queued mailbox-interrupt mechanism.
+ */
+static struct omap_hwmod_class_sysconfig am33xx_mailbox_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .sysc_flags = (SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class am33xx_mailbox_hwmod_class = {
+ .name = "mailbox",
+ .sysc = &am33xx_mailbox_sysc,
+};
+
+struct omap_hwmod am33xx_mailbox_hwmod = {
+ .name = "mailbox",
+ .class = &am33xx_mailbox_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/*
+ * 'mcasp' class
+ */
+static struct omap_hwmod_class_sysconfig am33xx_mcasp_sysc = {
+ .rev_offs = 0x0,
+ .sysc_offs = 0x4,
+ .sysc_flags = SYSC_HAS_SIDLEMODE,
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type3,
+};
+
+static struct omap_hwmod_class am33xx_mcasp_hwmod_class = {
+ .name = "mcasp",
+ .sysc = &am33xx_mcasp_sysc,
+};
+
+/* mcasp0 */
+struct omap_hwmod am33xx_mcasp0_hwmod = {
+ .name = "mcasp0",
+ .class = &am33xx_mcasp_hwmod_class,
+ .clkdm_name = "l3s_clkdm",
+ .main_clk = "mcasp0_fck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* mcasp1 */
+struct omap_hwmod am33xx_mcasp1_hwmod = {
+ .name = "mcasp1",
+ .class = &am33xx_mcasp_hwmod_class,
+ .clkdm_name = "l3s_clkdm",
+ .main_clk = "mcasp1_fck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* 'mmc' class */
+static struct omap_hwmod_class_sysconfig am33xx_mmc_sysc = {
+ .rev_offs = 0x1fc,
+ .sysc_offs = 0x10,
+ .syss_offs = 0x14,
+ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class am33xx_mmc_hwmod_class = {
+ .name = "mmc",
+ .sysc = &am33xx_mmc_sysc,
+};
+
+/* mmc0 */
+static struct omap_mmc_dev_attr am33xx_mmc0_dev_attr = {
+ .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
+};
+
+struct omap_hwmod am33xx_mmc0_hwmod = {
+ .name = "mmc1",
+ .class = &am33xx_mmc_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "mmc_clk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .dev_attr = &am33xx_mmc0_dev_attr,
+};
+
+/* mmc1 */
+static struct omap_mmc_dev_attr am33xx_mmc1_dev_attr = {
+ .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
+};
+
+struct omap_hwmod am33xx_mmc1_hwmod = {
+ .name = "mmc2",
+ .class = &am33xx_mmc_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "mmc_clk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .dev_attr = &am33xx_mmc1_dev_attr,
+};
+
+/* mmc2 */
+static struct omap_mmc_dev_attr am33xx_mmc2_dev_attr = {
+ .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
+};
+struct omap_hwmod am33xx_mmc2_hwmod = {
+ .name = "mmc3",
+ .class = &am33xx_mmc_hwmod_class,
+ .clkdm_name = "l3s_clkdm",
+ .main_clk = "mmc_clk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .dev_attr = &am33xx_mmc2_dev_attr,
+};
+
+/*
+ * 'rtc' class
+ * rtc subsystem
+ */
+static struct omap_hwmod_class_sysconfig am33xx_rtc_sysc = {
+ .rev_offs = 0x0074,
+ .sysc_offs = 0x0078,
+ .sysc_flags = SYSC_HAS_SIDLEMODE,
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO |
+ SIDLE_SMART | SIDLE_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type3,
+};
+
+static struct omap_hwmod_class am33xx_rtc_hwmod_class = {
+ .name = "rtc",
+ .sysc = &am33xx_rtc_sysc,
+};
+
+struct omap_hwmod am33xx_rtc_hwmod = {
+ .name = "rtc",
+ .class = &am33xx_rtc_hwmod_class,
+ .clkdm_name = "l4_rtc_clkdm",
+ .main_clk = "clk_32768_ck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* 'spi' class */
+static struct omap_hwmod_class_sysconfig am33xx_mcspi_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0110,
+ .syss_offs = 0x0114,
+ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
+ SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class am33xx_spi_hwmod_class = {
+ .name = "mcspi",
+ .sysc = &am33xx_mcspi_sysc,
+ .rev = OMAP4_MCSPI_REV,
+};
+
+/* spi0 */
+struct omap2_mcspi_dev_attr mcspi_attrib = {
+ .num_chipselect = 2,
+};
+struct omap_hwmod am33xx_spi0_hwmod = {
+ .name = "spi0",
+ .class = &am33xx_spi_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "dpll_per_m2_div4_ck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .dev_attr = &mcspi_attrib,
+};
+
+/* spi1 */
+struct omap_hwmod am33xx_spi1_hwmod = {
+ .name = "spi1",
+ .class = &am33xx_spi_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "dpll_per_m2_div4_ck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .dev_attr = &mcspi_attrib,
+};
+
+/*
+ * 'spinlock' class
+ * spinlock provides hardware assistance for synchronizing the
+ * processes running on multiple processors
+ */
+
+static struct omap_hwmod_class_sysconfig am33xx_spinlock_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class am33xx_spinlock_hwmod_class = {
+ .name = "spinlock",
+ .sysc = &am33xx_spinlock_sysc,
+};
+
+struct omap_hwmod am33xx_spinlock_hwmod = {
+ .name = "spinlock",
+ .class = &am33xx_spinlock_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* 'timer 2-7' class */
+static struct omap_hwmod_class_sysconfig am33xx_timer_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+struct omap_hwmod_class am33xx_timer_hwmod_class = {
+ .name = "timer",
+ .sysc = &am33xx_timer_sysc,
+};
+
+/* timer1 1ms */
+static struct omap_hwmod_class_sysconfig am33xx_timer1ms_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
+ SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class am33xx_timer1ms_hwmod_class = {
+ .name = "timer",
+ .sysc = &am33xx_timer1ms_sysc,
+};
+
+struct omap_hwmod am33xx_timer1_hwmod = {
+ .name = "timer1",
+ .class = &am33xx_timer1ms_hwmod_class,
+ .clkdm_name = "l4_wkup_clkdm",
+ .main_clk = "timer1_fck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+struct omap_hwmod am33xx_timer2_hwmod = {
+ .name = "timer2",
+ .class = &am33xx_timer_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "timer2_fck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+struct omap_hwmod am33xx_timer3_hwmod = {
+ .name = "timer3",
+ .class = &am33xx_timer_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "timer3_fck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+struct omap_hwmod am33xx_timer4_hwmod = {
+ .name = "timer4",
+ .class = &am33xx_timer_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "timer4_fck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+struct omap_hwmod am33xx_timer5_hwmod = {
+ .name = "timer5",
+ .class = &am33xx_timer_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "timer5_fck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+struct omap_hwmod am33xx_timer6_hwmod = {
+ .name = "timer6",
+ .class = &am33xx_timer_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "timer6_fck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+struct omap_hwmod am33xx_timer7_hwmod = {
+ .name = "timer7",
+ .class = &am33xx_timer_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "timer7_fck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* tpcc */
+static struct omap_hwmod_class am33xx_tpcc_hwmod_class = {
+ .name = "tpcc",
+};
+
+struct omap_hwmod am33xx_tpcc_hwmod = {
+ .name = "tpcc",
+ .class = &am33xx_tpcc_hwmod_class,
+ .clkdm_name = "l3_clkdm",
+ .main_clk = "l3_gclk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+static struct omap_hwmod_class_sysconfig am33xx_tptc_sysc = {
+ .rev_offs = 0x0,
+ .sysc_offs = 0x10,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_MIDLEMODE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_SMART | MSTANDBY_FORCE),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+/* 'tptc' class */
+static struct omap_hwmod_class am33xx_tptc_hwmod_class = {
+ .name = "tptc",
+ .sysc = &am33xx_tptc_sysc,
+};
+
+/* tptc0 */
+struct omap_hwmod am33xx_tptc0_hwmod = {
+ .name = "tptc0",
+ .class = &am33xx_tptc_hwmod_class,
+ .clkdm_name = "l3_clkdm",
+ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
+ .main_clk = "l3_gclk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* tptc1 */
+struct omap_hwmod am33xx_tptc1_hwmod = {
+ .name = "tptc1",
+ .class = &am33xx_tptc_hwmod_class,
+ .clkdm_name = "l3_clkdm",
+ .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
+ .main_clk = "l3_gclk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* tptc2 */
+struct omap_hwmod am33xx_tptc2_hwmod = {
+ .name = "tptc2",
+ .class = &am33xx_tptc_hwmod_class,
+ .clkdm_name = "l3_clkdm",
+ .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
+ .main_clk = "l3_gclk",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* 'uart' class */
+static struct omap_hwmod_class_sysconfig uart_sysc = {
+ .rev_offs = 0x50,
+ .sysc_offs = 0x54,
+ .syss_offs = 0x58,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP |
+ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class uart_class = {
+ .name = "uart",
+ .sysc = &uart_sysc,
+};
+
+struct omap_hwmod am33xx_uart1_hwmod = {
+ .name = "uart1",
+ .class = &uart_class,
+ .clkdm_name = "l4_wkup_clkdm",
+ .flags = DEBUG_AM33XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
+ .main_clk = "dpll_per_m2_div4_wkupdm_ck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+struct omap_hwmod am33xx_uart2_hwmod = {
+ .name = "uart2",
+ .class = &uart_class,
+ .clkdm_name = "l4ls_clkdm",
+ .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .main_clk = "dpll_per_m2_div4_ck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* uart3 */
+struct omap_hwmod am33xx_uart3_hwmod = {
+ .name = "uart3",
+ .class = &uart_class,
+ .clkdm_name = "l4ls_clkdm",
+ .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .main_clk = "dpll_per_m2_div4_ck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+struct omap_hwmod am33xx_uart4_hwmod = {
+ .name = "uart4",
+ .class = &uart_class,
+ .clkdm_name = "l4ls_clkdm",
+ .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .main_clk = "dpll_per_m2_div4_ck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+struct omap_hwmod am33xx_uart5_hwmod = {
+ .name = "uart5",
+ .class = &uart_class,
+ .clkdm_name = "l4ls_clkdm",
+ .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .main_clk = "dpll_per_m2_div4_ck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+struct omap_hwmod am33xx_uart6_hwmod = {
+ .name = "uart6",
+ .class = &uart_class,
+ .clkdm_name = "l4ls_clkdm",
+ .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .main_clk = "dpll_per_m2_div4_ck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* 'wd_timer' class */
+static struct omap_hwmod_class_sysconfig wdt_sysc = {
+ .rev_offs = 0x0,
+ .sysc_offs = 0x10,
+ .syss_offs = 0x14,
+ .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class am33xx_wd_timer_hwmod_class = {
+ .name = "wd_timer",
+ .sysc = &wdt_sysc,
+ .pre_shutdown = &omap2_wd_timer_disable,
+};
+
+/*
+ * XXX: device.c file uses hardcoded name for watchdog timer
+ * driver "wd_timer2, so we are also using same name as of now...
+ */
+struct omap_hwmod am33xx_wd_timer1_hwmod = {
+ .name = "wd_timer2",
+ .class = &am33xx_wd_timer_hwmod_class,
+ .clkdm_name = "l4_wkup_clkdm",
+ .flags = HWMOD_SWSUP_SIDLE,
+ .main_clk = "wdt1_fck",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+static void omap_hwmod_am33xx_clkctrl(void)
+{
+ CLKCTRL(am33xx_uart2_hwmod, AM33XX_CM_PER_UART1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_uart3_hwmod, AM33XX_CM_PER_UART2_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_uart4_hwmod, AM33XX_CM_PER_UART3_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_uart5_hwmod, AM33XX_CM_PER_UART4_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_uart6_hwmod, AM33XX_CM_PER_UART5_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_dcan0_hwmod, AM33XX_CM_PER_DCAN0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_dcan1_hwmod, AM33XX_CM_PER_DCAN1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_elm_hwmod, AM33XX_CM_PER_ELM_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_epwmss0_hwmod, AM33XX_CM_PER_EPWMSS0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_epwmss1_hwmod, AM33XX_CM_PER_EPWMSS1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_epwmss2_hwmod, AM33XX_CM_PER_EPWMSS2_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_gpio1_hwmod, AM33XX_CM_PER_GPIO1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_gpio2_hwmod, AM33XX_CM_PER_GPIO2_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_gpio3_hwmod, AM33XX_CM_PER_GPIO3_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_i2c2_hwmod, AM33XX_CM_PER_I2C1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_i2c3_hwmod, AM33XX_CM_PER_I2C2_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_mailbox_hwmod, AM33XX_CM_PER_MAILBOX0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_mcasp0_hwmod, AM33XX_CM_PER_MCASP0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_mcasp1_hwmod, AM33XX_CM_PER_MCASP1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_mmc0_hwmod, AM33XX_CM_PER_MMC0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_mmc1_hwmod, AM33XX_CM_PER_MMC1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_spi0_hwmod, AM33XX_CM_PER_SPI0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_spi1_hwmod, AM33XX_CM_PER_SPI1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_spinlock_hwmod, AM33XX_CM_PER_SPINLOCK_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_timer2_hwmod, AM33XX_CM_PER_TIMER2_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_timer3_hwmod, AM33XX_CM_PER_TIMER3_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_timer4_hwmod, AM33XX_CM_PER_TIMER4_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_timer5_hwmod, AM33XX_CM_PER_TIMER5_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_timer6_hwmod, AM33XX_CM_PER_TIMER6_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_timer7_hwmod, AM33XX_CM_PER_TIMER7_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_smartreflex0_hwmod,
+ AM33XX_CM_WKUP_SMARTREFLEX0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_smartreflex1_hwmod,
+ AM33XX_CM_WKUP_SMARTREFLEX1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_uart1_hwmod, AM33XX_CM_WKUP_UART0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_timer1_hwmod, AM33XX_CM_WKUP_TIMER1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_i2c1_hwmod, AM33XX_CM_WKUP_I2C0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_wd_timer1_hwmod, AM33XX_CM_WKUP_WDT1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_rtc_hwmod, AM33XX_CM_RTC_RTC_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_mmc2_hwmod, AM33XX_CM_PER_MMC2_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_gpmc_hwmod, AM33XX_CM_PER_GPMC_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_l4_ls_hwmod, AM33XX_CM_PER_L4LS_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_l4_wkup_hwmod, AM33XX_CM_WKUP_L4WKUP_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_l3_main_hwmod, AM33XX_CM_PER_L3_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_tpcc_hwmod, AM33XX_CM_PER_TPCC_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_tptc0_hwmod, AM33XX_CM_PER_TPTC0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_tptc1_hwmod, AM33XX_CM_PER_TPTC1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_tptc2_hwmod, AM33XX_CM_PER_TPTC2_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_gfx_hwmod, AM33XX_CM_GFX_GFX_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_cpgmac0_hwmod, AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_pruss_hwmod, AM33XX_CM_PER_PRUSS_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_mpu_hwmod , AM33XX_CM_MPU_MPU_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_l3_instr_hwmod , AM33XX_CM_PER_L3_INSTR_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_ocmcram_hwmod , AM33XX_CM_PER_OCMCRAM_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_sha0_hwmod , AM33XX_CM_PER_SHA0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_aes0_hwmod , AM33XX_CM_PER_AES0_CLKCTRL_OFFSET);
+}
+
+static void omap_hwmod_am33xx_rst(void)
+{
+ RSTCTRL(am33xx_pruss_hwmod, AM33XX_RM_PER_RSTCTRL_OFFSET);
+ RSTCTRL(am33xx_gfx_hwmod, AM33XX_RM_GFX_RSTCTRL_OFFSET);
+ RSTST(am33xx_gfx_hwmod, AM33XX_RM_GFX_RSTST_OFFSET);
+}
+
+void omap_hwmod_am33xx_reg(void)
+{
+ omap_hwmod_am33xx_clkctrl();
+ omap_hwmod_am33xx_rst();
+}
+
+static void omap_hwmod_am43xx_clkctrl(void)
+{
+ CLKCTRL(am33xx_uart2_hwmod, AM43XX_CM_PER_UART1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_uart3_hwmod, AM43XX_CM_PER_UART2_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_uart4_hwmod, AM43XX_CM_PER_UART3_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_uart5_hwmod, AM43XX_CM_PER_UART4_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_uart6_hwmod, AM43XX_CM_PER_UART5_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_dcan0_hwmod, AM43XX_CM_PER_DCAN0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_dcan1_hwmod, AM43XX_CM_PER_DCAN1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_elm_hwmod, AM43XX_CM_PER_ELM_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_epwmss0_hwmod, AM43XX_CM_PER_EPWMSS0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_epwmss1_hwmod, AM43XX_CM_PER_EPWMSS1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_epwmss2_hwmod, AM43XX_CM_PER_EPWMSS2_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_gpio1_hwmod, AM43XX_CM_PER_GPIO1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_gpio2_hwmod, AM43XX_CM_PER_GPIO2_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_gpio3_hwmod, AM43XX_CM_PER_GPIO3_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_i2c2_hwmod, AM43XX_CM_PER_I2C1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_i2c3_hwmod, AM43XX_CM_PER_I2C2_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_mailbox_hwmod, AM43XX_CM_PER_MAILBOX0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_mcasp0_hwmod, AM43XX_CM_PER_MCASP0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_mcasp1_hwmod, AM43XX_CM_PER_MCASP1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_mmc0_hwmod, AM43XX_CM_PER_MMC0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_mmc1_hwmod, AM43XX_CM_PER_MMC1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_spi0_hwmod, AM43XX_CM_PER_SPI0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_spi1_hwmod, AM43XX_CM_PER_SPI1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_spinlock_hwmod, AM43XX_CM_PER_SPINLOCK_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_timer2_hwmod, AM43XX_CM_PER_TIMER2_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_timer3_hwmod, AM43XX_CM_PER_TIMER3_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_timer4_hwmod, AM43XX_CM_PER_TIMER4_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_timer5_hwmod, AM43XX_CM_PER_TIMER5_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_timer6_hwmod, AM43XX_CM_PER_TIMER6_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_timer7_hwmod, AM43XX_CM_PER_TIMER7_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_smartreflex0_hwmod,
+ AM43XX_CM_WKUP_SMARTREFLEX0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_smartreflex1_hwmod,
+ AM43XX_CM_WKUP_SMARTREFLEX1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_uart1_hwmod, AM43XX_CM_WKUP_UART0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_timer1_hwmod, AM43XX_CM_WKUP_TIMER1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_i2c1_hwmod, AM43XX_CM_WKUP_I2C0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_wd_timer1_hwmod, AM43XX_CM_WKUP_WDT1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_rtc_hwmod, AM43XX_CM_RTC_RTC_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_mmc2_hwmod, AM43XX_CM_PER_MMC2_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_gpmc_hwmod, AM43XX_CM_PER_GPMC_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_l4_ls_hwmod, AM43XX_CM_PER_L4LS_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_l4_wkup_hwmod, AM43XX_CM_WKUP_L4WKUP_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_l3_main_hwmod, AM43XX_CM_PER_L3_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_tpcc_hwmod, AM43XX_CM_PER_TPCC_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_tptc0_hwmod, AM43XX_CM_PER_TPTC0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_tptc1_hwmod, AM43XX_CM_PER_TPTC1_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_tptc2_hwmod, AM43XX_CM_PER_TPTC2_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_gfx_hwmod, AM43XX_CM_GFX_GFX_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_cpgmac0_hwmod, AM43XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_pruss_hwmod, AM43XX_CM_PER_PRUSS_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_mpu_hwmod , AM43XX_CM_MPU_MPU_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_l3_instr_hwmod , AM43XX_CM_PER_L3_INSTR_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_ocmcram_hwmod , AM43XX_CM_PER_OCMCRAM_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_sha0_hwmod , AM43XX_CM_PER_SHA0_CLKCTRL_OFFSET);
+ CLKCTRL(am33xx_aes0_hwmod , AM43XX_CM_PER_AES0_CLKCTRL_OFFSET);
+}
+
+static void omap_hwmod_am43xx_rst(void)
+{
+ RSTCTRL(am33xx_pruss_hwmod, AM43XX_RM_PER_RSTCTRL_OFFSET);
+ RSTCTRL(am33xx_gfx_hwmod, AM43XX_RM_GFX_RSTCTRL_OFFSET);
+ RSTST(am33xx_gfx_hwmod, AM43XX_RM_GFX_RSTST_OFFSET);
+}
+
+void omap_hwmod_am43xx_reg(void)
+{
+ omap_hwmod_am43xx_clkctrl();
+ omap_hwmod_am43xx_rst();
+}
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index 215894f8910d..6b406ca4bd3b 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -29,6 +29,7 @@
#include "i2c.h"
#include "mmc.h"
#include "wd_timer.h"
+#include "omap_hwmod_33xx_43xx_common_data.h"
/*
* IP blocks
@@ -52,7 +53,7 @@ static struct omap_hwmod am33xx_emif_hwmod = {
.name = "emif",
.class = &am33xx_emif_hwmod_class,
.clkdm_name = "l3_clkdm",
- .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
+ .flags = HWMOD_INIT_NO_IDLE,
.main_clk = "dpll_ddr_m2_div2_ck",
.prcm = {
.omap4 = {
@@ -62,79 +63,12 @@ static struct omap_hwmod am33xx_emif_hwmod = {
},
};
-/*
- * 'l3' class
- * instance(s): l3_main, l3_s, l3_instr
- */
-static struct omap_hwmod_class am33xx_l3_hwmod_class = {
- .name = "l3",
-};
-
-static struct omap_hwmod am33xx_l3_main_hwmod = {
- .name = "l3_main",
- .class = &am33xx_l3_hwmod_class,
- .clkdm_name = "l3_clkdm",
- .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
- .main_clk = "l3_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_L3_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* l3_s */
-static struct omap_hwmod am33xx_l3_s_hwmod = {
- .name = "l3_s",
- .class = &am33xx_l3_hwmod_class,
- .clkdm_name = "l3s_clkdm",
-};
-
-/* l3_instr */
-static struct omap_hwmod am33xx_l3_instr_hwmod = {
- .name = "l3_instr",
- .class = &am33xx_l3_hwmod_class,
- .clkdm_name = "l3_clkdm",
- .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
- .main_clk = "l3_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_L3_INSTR_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
- * 'l4' class
- * instance(s): l4_ls, l4_hs, l4_wkup, l4_fw
- */
-static struct omap_hwmod_class am33xx_l4_hwmod_class = {
- .name = "l4",
-};
-
-/* l4_ls */
-static struct omap_hwmod am33xx_l4_ls_hwmod = {
- .name = "l4_ls",
- .class = &am33xx_l4_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
- .main_clk = "l4ls_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_L4LS_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
/* l4_hs */
static struct omap_hwmod am33xx_l4_hs_hwmod = {
.name = "l4_hs",
.class = &am33xx_l4_hwmod_class,
.clkdm_name = "l4hs_clkdm",
- .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
+ .flags = HWMOD_INIT_NO_IDLE,
.main_clk = "l4hs_gclk",
.prcm = {
.omap4 = {
@@ -144,50 +78,6 @@ static struct omap_hwmod am33xx_l4_hs_hwmod = {
},
};
-
-/* l4_wkup */
-static struct omap_hwmod am33xx_l4_wkup_hwmod = {
- .name = "l4_wkup",
- .class = &am33xx_l4_hwmod_class,
- .clkdm_name = "l4_wkup_clkdm",
- .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_WKUP_L4WKUP_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
- * 'mpu' class
- */
-static struct omap_hwmod_class am33xx_mpu_hwmod_class = {
- .name = "mpu",
-};
-
-static struct omap_hwmod am33xx_mpu_hwmod = {
- .name = "mpu",
- .class = &am33xx_mpu_hwmod_class,
- .clkdm_name = "mpu_clkdm",
- .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
- .main_clk = "dpll_mpu_m2_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_MPU_MPU_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
- * 'wakeup m3' class
- * Wakeup controller sub-system under wakeup domain
- */
-static struct omap_hwmod_class am33xx_wkup_m3_hwmod_class = {
- .name = "wkup_m3",
-};
-
static struct omap_hwmod_rst_info am33xx_wkup_m3_resets[] = {
{ .name = "wkup_m3", .rst_shift = 3, .st_shift = 5 },
};
@@ -213,78 +103,6 @@ static struct omap_hwmod am33xx_wkup_m3_hwmod = {
};
/*
- * 'pru-icss' class
- * Programmable Real-Time Unit and Industrial Communication Subsystem
- */
-static struct omap_hwmod_class am33xx_pruss_hwmod_class = {
- .name = "pruss",
-};
-
-static struct omap_hwmod_rst_info am33xx_pruss_resets[] = {
- { .name = "pruss", .rst_shift = 1 },
-};
-
-/* pru-icss */
-/* Pseudo hwmod for reset control purpose only */
-static struct omap_hwmod am33xx_pruss_hwmod = {
- .name = "pruss",
- .class = &am33xx_pruss_hwmod_class,
- .clkdm_name = "pruss_ocp_clkdm",
- .main_clk = "pruss_ocp_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_PRUSS_CLKCTRL_OFFSET,
- .rstctrl_offs = AM33XX_RM_PER_RSTCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .rst_lines = am33xx_pruss_resets,
- .rst_lines_cnt = ARRAY_SIZE(am33xx_pruss_resets),
-};
-
-/* gfx */
-/* Pseudo hwmod for reset control purpose only */
-static struct omap_hwmod_class am33xx_gfx_hwmod_class = {
- .name = "gfx",
-};
-
-static struct omap_hwmod_rst_info am33xx_gfx_resets[] = {
- { .name = "gfx", .rst_shift = 0, .st_shift = 0},
-};
-
-static struct omap_hwmod am33xx_gfx_hwmod = {
- .name = "gfx",
- .class = &am33xx_gfx_hwmod_class,
- .clkdm_name = "gfx_l3_clkdm",
- .main_clk = "gfx_fck_div_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_GFX_GFX_CLKCTRL_OFFSET,
- .rstctrl_offs = AM33XX_RM_GFX_RSTCTRL_OFFSET,
- .rstst_offs = AM33XX_RM_GFX_RSTST_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .rst_lines = am33xx_gfx_resets,
- .rst_lines_cnt = ARRAY_SIZE(am33xx_gfx_resets),
-};
-
-/*
- * 'prcm' class
- * power and reset manager (whole prcm infrastructure)
- */
-static struct omap_hwmod_class am33xx_prcm_hwmod_class = {
- .name = "prcm",
-};
-
-/* prcm */
-static struct omap_hwmod am33xx_prcm_hwmod = {
- .name = "prcm",
- .class = &am33xx_prcm_hwmod_class,
- .clkdm_name = "l4_wkup_clkdm",
-};
-
-/*
* 'adc/tsc' class
* TouchScreen Controller (Anolog-To-Digital Converter)
*/
@@ -388,79 +206,6 @@ static struct omap_hwmod am33xx_ocpwp_hwmod = {
#endif
/*
- * 'aes0' class
- */
-static struct omap_hwmod_class_sysconfig am33xx_aes0_sysc = {
- .rev_offs = 0x80,
- .sysc_offs = 0x84,
- .syss_offs = 0x88,
- .sysc_flags = SYSS_HAS_RESET_STATUS,
-};
-
-static struct omap_hwmod_class am33xx_aes0_hwmod_class = {
- .name = "aes0",
- .sysc = &am33xx_aes0_sysc,
-};
-
-static struct omap_hwmod am33xx_aes0_hwmod = {
- .name = "aes",
- .class = &am33xx_aes0_hwmod_class,
- .clkdm_name = "l3_clkdm",
- .main_clk = "aes0_fck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_AES0_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* sha0 HIB2 (the 'P' (public) device) */
-static struct omap_hwmod_class_sysconfig am33xx_sha0_sysc = {
- .rev_offs = 0x100,
- .sysc_offs = 0x110,
- .syss_offs = 0x114,
- .sysc_flags = SYSS_HAS_RESET_STATUS,
-};
-
-static struct omap_hwmod_class am33xx_sha0_hwmod_class = {
- .name = "sha0",
- .sysc = &am33xx_sha0_sysc,
-};
-
-static struct omap_hwmod am33xx_sha0_hwmod = {
- .name = "sham",
- .class = &am33xx_sha0_hwmod_class,
- .clkdm_name = "l3_clkdm",
- .main_clk = "l3_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_SHA0_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* ocmcram */
-static struct omap_hwmod_class am33xx_ocmcram_hwmod_class = {
- .name = "ocmcram",
-};
-
-static struct omap_hwmod am33xx_ocmcram_hwmod = {
- .name = "ocmcram",
- .class = &am33xx_ocmcram_hwmod_class,
- .clkdm_name = "l3_clkdm",
- .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
- .main_clk = "l3_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_OCMCRAM_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
* 'debugss' class
* debug sub system
*/
@@ -488,51 +233,11 @@ static struct omap_hwmod am33xx_debugss_hwmod = {
.opt_clks_cnt = ARRAY_SIZE(debugss_opt_clks),
};
-/* 'smartreflex' class */
-static struct omap_hwmod_class am33xx_smartreflex_hwmod_class = {
- .name = "smartreflex",
-};
-
-/* smartreflex0 */
-static struct omap_hwmod am33xx_smartreflex0_hwmod = {
- .name = "smartreflex0",
- .class = &am33xx_smartreflex_hwmod_class,
- .clkdm_name = "l4_wkup_clkdm",
- .main_clk = "smartreflex0_fck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_WKUP_SMARTREFLEX0_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* smartreflex1 */
-static struct omap_hwmod am33xx_smartreflex1_hwmod = {
- .name = "smartreflex1",
- .class = &am33xx_smartreflex_hwmod_class,
- .clkdm_name = "l4_wkup_clkdm",
- .main_clk = "smartreflex1_fck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_WKUP_SMARTREFLEX1_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
- * 'control' module class
- */
-static struct omap_hwmod_class am33xx_control_hwmod_class = {
- .name = "control",
-};
-
static struct omap_hwmod am33xx_control_hwmod = {
.name = "control",
.class = &am33xx_control_hwmod_class,
.clkdm_name = "l4_wkup_clkdm",
- .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
+ .flags = HWMOD_INIT_NO_IDLE,
.main_clk = "dpll_core_m4_div2_ck",
.prcm = {
.omap4 = {
@@ -542,288 +247,6 @@ static struct omap_hwmod am33xx_control_hwmod = {
},
};
-/*
- * 'cpgmac' class
- * cpsw/cpgmac sub system
- */
-static struct omap_hwmod_class_sysconfig am33xx_cpgmac_sysc = {
- .rev_offs = 0x0,
- .sysc_offs = 0x8,
- .syss_offs = 0x4,
- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
- SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | MSTANDBY_FORCE |
- MSTANDBY_NO),
- .sysc_fields = &omap_hwmod_sysc_type3,
-};
-
-static struct omap_hwmod_class am33xx_cpgmac0_hwmod_class = {
- .name = "cpgmac0",
- .sysc = &am33xx_cpgmac_sysc,
-};
-
-static struct omap_hwmod am33xx_cpgmac0_hwmod = {
- .name = "cpgmac0",
- .class = &am33xx_cpgmac0_hwmod_class,
- .clkdm_name = "cpsw_125mhz_clkdm",
- .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
- .main_clk = "cpsw_125mhz_gclk",
- .mpu_rt_idx = 1,
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
- * mdio class
- */
-static struct omap_hwmod_class am33xx_mdio_hwmod_class = {
- .name = "davinci_mdio",
-};
-
-static struct omap_hwmod am33xx_mdio_hwmod = {
- .name = "davinci_mdio",
- .class = &am33xx_mdio_hwmod_class,
- .clkdm_name = "cpsw_125mhz_clkdm",
- .main_clk = "cpsw_125mhz_gclk",
-};
-
-/*
- * dcan class
- */
-static struct omap_hwmod_class am33xx_dcan_hwmod_class = {
- .name = "d_can",
-};
-
-/* dcan0 */
-static struct omap_hwmod am33xx_dcan0_hwmod = {
- .name = "d_can0",
- .class = &am33xx_dcan_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "dcan0_fck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_DCAN0_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* dcan1 */
-static struct omap_hwmod am33xx_dcan1_hwmod = {
- .name = "d_can1",
- .class = &am33xx_dcan_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "dcan1_fck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_DCAN1_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* elm */
-static struct omap_hwmod_class_sysconfig am33xx_elm_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
- SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class am33xx_elm_hwmod_class = {
- .name = "elm",
- .sysc = &am33xx_elm_sysc,
-};
-
-static struct omap_hwmod am33xx_elm_hwmod = {
- .name = "elm",
- .class = &am33xx_elm_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_ELM_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* pwmss */
-static struct omap_hwmod_class_sysconfig am33xx_epwmss_sysc = {
- .rev_offs = 0x0,
- .sysc_offs = 0x4,
- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
- MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class am33xx_epwmss_hwmod_class = {
- .name = "epwmss",
- .sysc = &am33xx_epwmss_sysc,
-};
-
-static struct omap_hwmod_class am33xx_ecap_hwmod_class = {
- .name = "ecap",
-};
-
-static struct omap_hwmod_class am33xx_eqep_hwmod_class = {
- .name = "eqep",
-};
-
-static struct omap_hwmod_class am33xx_ehrpwm_hwmod_class = {
- .name = "ehrpwm",
-};
-
-/* epwmss0 */
-static struct omap_hwmod am33xx_epwmss0_hwmod = {
- .name = "epwmss0",
- .class = &am33xx_epwmss_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_EPWMSS0_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* ecap0 */
-static struct omap_hwmod am33xx_ecap0_hwmod = {
- .name = "ecap0",
- .class = &am33xx_ecap_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
-};
-
-/* eqep0 */
-static struct omap_hwmod am33xx_eqep0_hwmod = {
- .name = "eqep0",
- .class = &am33xx_eqep_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
-};
-
-/* ehrpwm0 */
-static struct omap_hwmod am33xx_ehrpwm0_hwmod = {
- .name = "ehrpwm0",
- .class = &am33xx_ehrpwm_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
-};
-
-/* epwmss1 */
-static struct omap_hwmod am33xx_epwmss1_hwmod = {
- .name = "epwmss1",
- .class = &am33xx_epwmss_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_EPWMSS1_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* ecap1 */
-static struct omap_hwmod am33xx_ecap1_hwmod = {
- .name = "ecap1",
- .class = &am33xx_ecap_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
-};
-
-/* eqep1 */
-static struct omap_hwmod am33xx_eqep1_hwmod = {
- .name = "eqep1",
- .class = &am33xx_eqep_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
-};
-
-/* ehrpwm1 */
-static struct omap_hwmod am33xx_ehrpwm1_hwmod = {
- .name = "ehrpwm1",
- .class = &am33xx_ehrpwm_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
-};
-
-/* epwmss2 */
-static struct omap_hwmod am33xx_epwmss2_hwmod = {
- .name = "epwmss2",
- .class = &am33xx_epwmss_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_EPWMSS2_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* ecap2 */
-static struct omap_hwmod am33xx_ecap2_hwmod = {
- .name = "ecap2",
- .class = &am33xx_ecap_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
-};
-
-/* eqep2 */
-static struct omap_hwmod am33xx_eqep2_hwmod = {
- .name = "eqep2",
- .class = &am33xx_eqep_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
-};
-
-/* ehrpwm2 */
-static struct omap_hwmod am33xx_ehrpwm2_hwmod = {
- .name = "ehrpwm2",
- .class = &am33xx_ehrpwm_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
-};
-
-/*
- * 'gpio' class: for gpio 0,1,2,3
- */
-static struct omap_hwmod_class_sysconfig am33xx_gpio_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0114,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP |
- SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
- SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class am33xx_gpio_hwmod_class = {
- .name = "gpio",
- .sysc = &am33xx_gpio_sysc,
- .rev = 2,
-};
-
-static struct omap_gpio_dev_attr gpio_dev_attr = {
- .bank_width = 32,
- .dbck_flag = true,
-};
-
/* gpio0 */
static struct omap_hwmod_opt_clk gpio0_opt_clks[] = {
{ .role = "dbclk", .clk = "gpio0_dbclk" },
@@ -846,174 +269,6 @@ static struct omap_hwmod am33xx_gpio0_hwmod = {
.dev_attr = &gpio_dev_attr,
};
-/* gpio1 */
-static struct omap_hwmod_opt_clk gpio1_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio1_dbclk" },
-};
-
-static struct omap_hwmod am33xx_gpio1_hwmod = {
- .name = "gpio2",
- .class = &am33xx_gpio_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l4ls_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_GPIO1_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .opt_clks = gpio1_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio1_opt_clks),
- .dev_attr = &gpio_dev_attr,
-};
-
-/* gpio2 */
-static struct omap_hwmod_opt_clk gpio2_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio2_dbclk" },
-};
-
-static struct omap_hwmod am33xx_gpio2_hwmod = {
- .name = "gpio3",
- .class = &am33xx_gpio_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l4ls_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_GPIO2_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .opt_clks = gpio2_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio2_opt_clks),
- .dev_attr = &gpio_dev_attr,
-};
-
-/* gpio3 */
-static struct omap_hwmod_opt_clk gpio3_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio3_dbclk" },
-};
-
-static struct omap_hwmod am33xx_gpio3_hwmod = {
- .name = "gpio4",
- .class = &am33xx_gpio_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l4ls_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_GPIO3_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .opt_clks = gpio3_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio3_opt_clks),
- .dev_attr = &gpio_dev_attr,
-};
-
-/* gpmc */
-static struct omap_hwmod_class_sysconfig gpmc_sysc = {
- .rev_offs = 0x0,
- .sysc_offs = 0x10,
- .syss_offs = 0x14,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class am33xx_gpmc_hwmod_class = {
- .name = "gpmc",
- .sysc = &gpmc_sysc,
-};
-
-static struct omap_hwmod am33xx_gpmc_hwmod = {
- .name = "gpmc",
- .class = &am33xx_gpmc_hwmod_class,
- .clkdm_name = "l3s_clkdm",
- .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
- .main_clk = "l3s_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_GPMC_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* 'i2c' class */
-static struct omap_hwmod_class_sysconfig am33xx_i2c_sysc = {
- .sysc_offs = 0x0010,
- .syss_offs = 0x0090,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class i2c_class = {
- .name = "i2c",
- .sysc = &am33xx_i2c_sysc,
- .rev = OMAP_I2C_IP_VERSION_2,
- .reset = &omap_i2c_reset,
-};
-
-static struct omap_i2c_dev_attr i2c_dev_attr = {
- .flags = OMAP_I2C_FLAG_BUS_SHIFT_NONE,
-};
-
-/* i2c1 */
-static struct omap_hwmod am33xx_i2c1_hwmod = {
- .name = "i2c1",
- .class = &i2c_class,
- .clkdm_name = "l4_wkup_clkdm",
- .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
- .main_clk = "dpll_per_m2_div4_wkupdm_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_WKUP_I2C0_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .dev_attr = &i2c_dev_attr,
-};
-
-/* i2c1 */
-static struct omap_hwmod am33xx_i2c2_hwmod = {
- .name = "i2c2",
- .class = &i2c_class,
- .clkdm_name = "l4ls_clkdm",
- .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
- .main_clk = "dpll_per_m2_div4_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_I2C1_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .dev_attr = &i2c_dev_attr,
-};
-
-/* i2c3 */
-static struct omap_hwmod am33xx_i2c3_hwmod = {
- .name = "i2c3",
- .class = &i2c_class,
- .clkdm_name = "l4ls_clkdm",
- .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
- .main_clk = "dpll_per_m2_div4_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_I2C2_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .dev_attr = &i2c_dev_attr,
-};
-
-
/* lcdc */
static struct omap_hwmod_class_sysconfig lcdc_sysc = {
.rev_offs = 0x0,
@@ -1043,600 +298,6 @@ static struct omap_hwmod am33xx_lcdc_hwmod = {
};
/*
- * 'mailbox' class
- * mailbox module allowing communication between the on-chip processors using a
- * queued mailbox-interrupt mechanism.
- */
-static struct omap_hwmod_class_sysconfig am33xx_mailbox_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .sysc_flags = (SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class am33xx_mailbox_hwmod_class = {
- .name = "mailbox",
- .sysc = &am33xx_mailbox_sysc,
-};
-
-static struct omap_hwmod am33xx_mailbox_hwmod = {
- .name = "mailbox",
- .class = &am33xx_mailbox_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_MAILBOX0_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
- * 'mcasp' class
- */
-static struct omap_hwmod_class_sysconfig am33xx_mcasp_sysc = {
- .rev_offs = 0x0,
- .sysc_offs = 0x4,
- .sysc_flags = SYSC_HAS_SIDLEMODE,
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type3,
-};
-
-static struct omap_hwmod_class am33xx_mcasp_hwmod_class = {
- .name = "mcasp",
- .sysc = &am33xx_mcasp_sysc,
-};
-
-/* mcasp0 */
-static struct omap_hwmod am33xx_mcasp0_hwmod = {
- .name = "mcasp0",
- .class = &am33xx_mcasp_hwmod_class,
- .clkdm_name = "l3s_clkdm",
- .main_clk = "mcasp0_fck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_MCASP0_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* mcasp1 */
-static struct omap_hwmod am33xx_mcasp1_hwmod = {
- .name = "mcasp1",
- .class = &am33xx_mcasp_hwmod_class,
- .clkdm_name = "l3s_clkdm",
- .main_clk = "mcasp1_fck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_MCASP1_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* 'mmc' class */
-static struct omap_hwmod_class_sysconfig am33xx_mmc_sysc = {
- .rev_offs = 0x1fc,
- .sysc_offs = 0x10,
- .syss_offs = 0x14,
- .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
- SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class am33xx_mmc_hwmod_class = {
- .name = "mmc",
- .sysc = &am33xx_mmc_sysc,
-};
-
-/* mmc0 */
-static struct omap_mmc_dev_attr am33xx_mmc0_dev_attr = {
- .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
-};
-
-static struct omap_hwmod am33xx_mmc0_hwmod = {
- .name = "mmc1",
- .class = &am33xx_mmc_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "mmc_clk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_MMC0_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .dev_attr = &am33xx_mmc0_dev_attr,
-};
-
-/* mmc1 */
-static struct omap_mmc_dev_attr am33xx_mmc1_dev_attr = {
- .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
-};
-
-static struct omap_hwmod am33xx_mmc1_hwmod = {
- .name = "mmc2",
- .class = &am33xx_mmc_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "mmc_clk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_MMC1_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .dev_attr = &am33xx_mmc1_dev_attr,
-};
-
-/* mmc2 */
-static struct omap_mmc_dev_attr am33xx_mmc2_dev_attr = {
- .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
-};
-static struct omap_hwmod am33xx_mmc2_hwmod = {
- .name = "mmc3",
- .class = &am33xx_mmc_hwmod_class,
- .clkdm_name = "l3s_clkdm",
- .main_clk = "mmc_clk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_MMC2_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .dev_attr = &am33xx_mmc2_dev_attr,
-};
-
-/*
- * 'rtc' class
- * rtc subsystem
- */
-static struct omap_hwmod_class_sysconfig am33xx_rtc_sysc = {
- .rev_offs = 0x0074,
- .sysc_offs = 0x0078,
- .sysc_flags = SYSC_HAS_SIDLEMODE,
- .idlemodes = (SIDLE_FORCE | SIDLE_NO |
- SIDLE_SMART | SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type3,
-};
-
-static struct omap_hwmod_class am33xx_rtc_hwmod_class = {
- .name = "rtc",
- .sysc = &am33xx_rtc_sysc,
-};
-
-static struct omap_hwmod am33xx_rtc_hwmod = {
- .name = "rtc",
- .class = &am33xx_rtc_hwmod_class,
- .clkdm_name = "l4_rtc_clkdm",
- .main_clk = "clk_32768_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_RTC_RTC_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* 'spi' class */
-static struct omap_hwmod_class_sysconfig am33xx_mcspi_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0110,
- .syss_offs = 0x0114,
- .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
- SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class am33xx_spi_hwmod_class = {
- .name = "mcspi",
- .sysc = &am33xx_mcspi_sysc,
- .rev = OMAP4_MCSPI_REV,
-};
-
-/* spi0 */
-static struct omap2_mcspi_dev_attr mcspi_attrib = {
- .num_chipselect = 2,
-};
-static struct omap_hwmod am33xx_spi0_hwmod = {
- .name = "spi0",
- .class = &am33xx_spi_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "dpll_per_m2_div4_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_SPI0_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .dev_attr = &mcspi_attrib,
-};
-
-/* spi1 */
-static struct omap_hwmod am33xx_spi1_hwmod = {
- .name = "spi1",
- .class = &am33xx_spi_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "dpll_per_m2_div4_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_SPI1_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .dev_attr = &mcspi_attrib,
-};
-
-/*
- * 'spinlock' class
- * spinlock provides hardware assistance for synchronizing the
- * processes running on multiple processors
- */
-static struct omap_hwmod_class am33xx_spinlock_hwmod_class = {
- .name = "spinlock",
-};
-
-static struct omap_hwmod am33xx_spinlock_hwmod = {
- .name = "spinlock",
- .class = &am33xx_spinlock_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_SPINLOCK_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* 'timer 2-7' class */
-static struct omap_hwmod_class_sysconfig am33xx_timer_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class am33xx_timer_hwmod_class = {
- .name = "timer",
- .sysc = &am33xx_timer_sysc,
-};
-
-/* timer1 1ms */
-static struct omap_hwmod_class_sysconfig am33xx_timer1ms_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
- SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class am33xx_timer1ms_hwmod_class = {
- .name = "timer",
- .sysc = &am33xx_timer1ms_sysc,
-};
-
-static struct omap_hwmod am33xx_timer1_hwmod = {
- .name = "timer1",
- .class = &am33xx_timer1ms_hwmod_class,
- .clkdm_name = "l4_wkup_clkdm",
- .main_clk = "timer1_fck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_WKUP_TIMER1_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-static struct omap_hwmod am33xx_timer2_hwmod = {
- .name = "timer2",
- .class = &am33xx_timer_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "timer2_fck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_TIMER2_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-static struct omap_hwmod am33xx_timer3_hwmod = {
- .name = "timer3",
- .class = &am33xx_timer_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "timer3_fck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_TIMER3_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-static struct omap_hwmod am33xx_timer4_hwmod = {
- .name = "timer4",
- .class = &am33xx_timer_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "timer4_fck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_TIMER4_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-static struct omap_hwmod am33xx_timer5_hwmod = {
- .name = "timer5",
- .class = &am33xx_timer_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "timer5_fck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_TIMER5_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-static struct omap_hwmod am33xx_timer6_hwmod = {
- .name = "timer6",
- .class = &am33xx_timer_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "timer6_fck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_TIMER6_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-static struct omap_hwmod am33xx_timer7_hwmod = {
- .name = "timer7",
- .class = &am33xx_timer_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "timer7_fck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_TIMER7_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* tpcc */
-static struct omap_hwmod_class am33xx_tpcc_hwmod_class = {
- .name = "tpcc",
-};
-
-static struct omap_hwmod am33xx_tpcc_hwmod = {
- .name = "tpcc",
- .class = &am33xx_tpcc_hwmod_class,
- .clkdm_name = "l3_clkdm",
- .main_clk = "l3_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_TPCC_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-static struct omap_hwmod_class_sysconfig am33xx_tptc_sysc = {
- .rev_offs = 0x0,
- .sysc_offs = 0x10,
- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
- SYSC_HAS_MIDLEMODE),
- .idlemodes = (SIDLE_FORCE | SIDLE_SMART | MSTANDBY_FORCE),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-/* 'tptc' class */
-static struct omap_hwmod_class am33xx_tptc_hwmod_class = {
- .name = "tptc",
- .sysc = &am33xx_tptc_sysc,
-};
-
-/* tptc0 */
-static struct omap_hwmod am33xx_tptc0_hwmod = {
- .name = "tptc0",
- .class = &am33xx_tptc_hwmod_class,
- .clkdm_name = "l3_clkdm",
- .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
- .main_clk = "l3_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_TPTC0_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* tptc1 */
-static struct omap_hwmod am33xx_tptc1_hwmod = {
- .name = "tptc1",
- .class = &am33xx_tptc_hwmod_class,
- .clkdm_name = "l3_clkdm",
- .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
- .main_clk = "l3_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_TPTC1_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* tptc2 */
-static struct omap_hwmod am33xx_tptc2_hwmod = {
- .name = "tptc2",
- .class = &am33xx_tptc_hwmod_class,
- .clkdm_name = "l3_clkdm",
- .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
- .main_clk = "l3_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_TPTC2_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* 'uart' class */
-static struct omap_hwmod_class_sysconfig uart_sysc = {
- .rev_offs = 0x50,
- .sysc_offs = 0x54,
- .syss_offs = 0x58,
- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP |
- SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class uart_class = {
- .name = "uart",
- .sysc = &uart_sysc,
-};
-
-/* uart1 */
-static struct omap_hwmod am33xx_uart1_hwmod = {
- .name = "uart1",
- .class = &uart_class,
- .clkdm_name = "l4_wkup_clkdm",
- .flags = DEBUG_AM33XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "dpll_per_m2_div4_wkupdm_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_WKUP_UART0_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-static struct omap_hwmod am33xx_uart2_hwmod = {
- .name = "uart2",
- .class = &uart_class,
- .clkdm_name = "l4ls_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "dpll_per_m2_div4_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_UART1_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* uart3 */
-static struct omap_hwmod am33xx_uart3_hwmod = {
- .name = "uart3",
- .class = &uart_class,
- .clkdm_name = "l4ls_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "dpll_per_m2_div4_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_UART2_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-static struct omap_hwmod am33xx_uart4_hwmod = {
- .name = "uart4",
- .class = &uart_class,
- .clkdm_name = "l4ls_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "dpll_per_m2_div4_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_UART3_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-static struct omap_hwmod am33xx_uart5_hwmod = {
- .name = "uart5",
- .class = &uart_class,
- .clkdm_name = "l4ls_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "dpll_per_m2_div4_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_UART4_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-static struct omap_hwmod am33xx_uart6_hwmod = {
- .name = "uart6",
- .class = &uart_class,
- .clkdm_name = "l4ls_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "dpll_per_m2_div4_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_UART5_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* 'wd_timer' class */
-static struct omap_hwmod_class_sysconfig wdt_sysc = {
- .rev_offs = 0x0,
- .sysc_offs = 0x10,
- .syss_offs = 0x14,
- .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class am33xx_wd_timer_hwmod_class = {
- .name = "wd_timer",
- .sysc = &wdt_sysc,
- .pre_shutdown = &omap2_wd_timer_disable,
-};
-
-/*
- * XXX: device.c file uses hardcoded name for watchdog timer
- * driver "wd_timer2, so we are also using same name as of now...
- */
-static struct omap_hwmod am33xx_wd_timer1_hwmod = {
- .name = "wd_timer2",
- .class = &am33xx_wd_timer_hwmod_class,
- .clkdm_name = "l4_wkup_clkdm",
- .flags = HWMOD_SWSUP_SIDLE,
- .main_clk = "wdt1_fck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_WKUP_WDT1_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
* 'usb_otg' class
* high-speed on-the-go universal serial bus (usb_otg) controller
*/
@@ -1690,14 +351,6 @@ static struct omap_hwmod_ocp_if am33xx_l3_main__emif = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* mpu -> l3 main */
-static struct omap_hwmod_ocp_if am33xx_mpu__l3_main = {
- .master = &am33xx_mpu_hwmod,
- .slave = &am33xx_l3_main_hwmod,
- .clk = "dpll_mpu_m2_ck",
- .user = OCP_USER_MPU,
-};
-
/* l3 main -> l4 hs */
static struct omap_hwmod_ocp_if am33xx_l3_main__l4_hs = {
.master = &am33xx_l3_main_hwmod,
@@ -1706,62 +359,6 @@ static struct omap_hwmod_ocp_if am33xx_l3_main__l4_hs = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l3 main -> l3 s */
-static struct omap_hwmod_ocp_if am33xx_l3_main__l3_s = {
- .master = &am33xx_l3_main_hwmod,
- .slave = &am33xx_l3_s_hwmod,
- .clk = "l3s_gclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3 s -> l4 per/ls */
-static struct omap_hwmod_ocp_if am33xx_l3_s__l4_ls = {
- .master = &am33xx_l3_s_hwmod,
- .slave = &am33xx_l4_ls_hwmod,
- .clk = "l3s_gclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3 s -> l4 wkup */
-static struct omap_hwmod_ocp_if am33xx_l3_s__l4_wkup = {
- .master = &am33xx_l3_s_hwmod,
- .slave = &am33xx_l4_wkup_hwmod,
- .clk = "l3s_gclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3 main -> l3 instr */
-static struct omap_hwmod_ocp_if am33xx_l3_main__l3_instr = {
- .master = &am33xx_l3_main_hwmod,
- .slave = &am33xx_l3_instr_hwmod,
- .clk = "l3s_gclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* mpu -> prcm */
-static struct omap_hwmod_ocp_if am33xx_mpu__prcm = {
- .master = &am33xx_mpu_hwmod,
- .slave = &am33xx_prcm_hwmod,
- .clk = "dpll_mpu_m2_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3 s -> l3 main*/
-static struct omap_hwmod_ocp_if am33xx_l3_s__l3_main = {
- .master = &am33xx_l3_s_hwmod,
- .slave = &am33xx_l3_main_hwmod,
- .clk = "l3s_gclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* pru-icss -> l3 main */
-static struct omap_hwmod_ocp_if am33xx_pruss__l3_main = {
- .master = &am33xx_pruss_hwmod,
- .slave = &am33xx_l3_main_hwmod,
- .clk = "l3_gclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* wkup m3 -> l4 wkup */
static struct omap_hwmod_ocp_if am33xx_wkup_m3__l4_wkup = {
.master = &am33xx_wkup_m3_hwmod,
@@ -1770,14 +367,6 @@ static struct omap_hwmod_ocp_if am33xx_wkup_m3__l4_wkup = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* gfx -> l3 main */
-static struct omap_hwmod_ocp_if am33xx_gfx__l3_main = {
- .master = &am33xx_gfx_hwmod,
- .slave = &am33xx_l3_main_hwmod,
- .clk = "dpll_core_m4_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l4 wkup -> wkup m3 */
static struct omap_hwmod_ocp_if am33xx_l4_wkup__wkup_m3 = {
.master = &am33xx_l4_wkup_hwmod,
@@ -1794,14 +383,6 @@ static struct omap_hwmod_ocp_if am33xx_l4_hs__pruss = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l3 main -> gfx */
-static struct omap_hwmod_ocp_if am33xx_l3_main__gfx = {
- .master = &am33xx_l3_main_hwmod,
- .slave = &am33xx_gfx_hwmod,
- .clk = "dpll_core_m4_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l3_main -> debugss */
static struct omap_hwmod_addr_space am33xx_debugss_addrs[] = {
{
@@ -1844,54 +425,6 @@ static struct omap_hwmod_ocp_if am33xx_l4_wkup__control = {
.user = OCP_USER_MPU,
};
-/* l4 wkup -> rtc */
-static struct omap_hwmod_ocp_if am33xx_l4_wkup__rtc = {
- .master = &am33xx_l4_wkup_hwmod,
- .slave = &am33xx_rtc_hwmod,
- .clk = "clkdiv32k_ick",
- .user = OCP_USER_MPU,
-};
-
-/* l4 per/ls -> DCAN0 */
-static struct omap_hwmod_ocp_if am33xx_l4_per__dcan0 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_dcan0_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4 per/ls -> DCAN1 */
-static struct omap_hwmod_ocp_if am33xx_l4_per__dcan1 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_dcan1_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4 per/ls -> GPIO2 */
-static struct omap_hwmod_ocp_if am33xx_l4_per__gpio1 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_gpio1_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4 per/ls -> gpio3 */
-static struct omap_hwmod_ocp_if am33xx_l4_per__gpio2 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_gpio2_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4 per/ls -> gpio4 */
-static struct omap_hwmod_ocp_if am33xx_l4_per__gpio3 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_gpio3_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* L4 WKUP -> I2C1 */
static struct omap_hwmod_ocp_if am33xx_l4_wkup__i2c1 = {
.master = &am33xx_l4_wkup_hwmod,
@@ -1933,177 +466,6 @@ static struct omap_hwmod_ocp_if am33xx_l4_hs__cpgmac0 = {
.user = OCP_USER_MPU,
};
-static struct omap_hwmod_ocp_if am33xx_cpgmac0__mdio = {
- .master = &am33xx_cpgmac0_hwmod,
- .slave = &am33xx_mdio_hwmod,
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_addr_space am33xx_elm_addr_space[] = {
- {
- .pa_start = 0x48080000,
- .pa_end = 0x48080000 + SZ_8K - 1,
- .flags = ADDR_TYPE_RT
- },
- { }
-};
-
-static struct omap_hwmod_ocp_if am33xx_l4_ls__elm = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_elm_hwmod,
- .clk = "l4ls_gclk",
- .addr = am33xx_elm_addr_space,
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_addr_space am33xx_epwmss0_addr_space[] = {
- {
- .pa_start = 0x48300000,
- .pa_end = 0x48300000 + SZ_16 - 1,
- .flags = ADDR_TYPE_RT
- },
- { }
-};
-
-static struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss0 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_epwmss0_hwmod,
- .clk = "l4ls_gclk",
- .addr = am33xx_epwmss0_addr_space,
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am33xx_epwmss0__ecap0 = {
- .master = &am33xx_epwmss0_hwmod,
- .slave = &am33xx_ecap0_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am33xx_epwmss0__eqep0 = {
- .master = &am33xx_epwmss0_hwmod,
- .slave = &am33xx_eqep0_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am33xx_epwmss0__ehrpwm0 = {
- .master = &am33xx_epwmss0_hwmod,
- .slave = &am33xx_ehrpwm0_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-
-static struct omap_hwmod_addr_space am33xx_epwmss1_addr_space[] = {
- {
- .pa_start = 0x48302000,
- .pa_end = 0x48302000 + SZ_16 - 1,
- .flags = ADDR_TYPE_RT
- },
- { }
-};
-
-static struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss1 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_epwmss1_hwmod,
- .clk = "l4ls_gclk",
- .addr = am33xx_epwmss1_addr_space,
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am33xx_epwmss1__ecap1 = {
- .master = &am33xx_epwmss1_hwmod,
- .slave = &am33xx_ecap1_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am33xx_epwmss1__eqep1 = {
- .master = &am33xx_epwmss1_hwmod,
- .slave = &am33xx_eqep1_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am33xx_epwmss1__ehrpwm1 = {
- .master = &am33xx_epwmss1_hwmod,
- .slave = &am33xx_ehrpwm1_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_addr_space am33xx_epwmss2_addr_space[] = {
- {
- .pa_start = 0x48304000,
- .pa_end = 0x48304000 + SZ_16 - 1,
- .flags = ADDR_TYPE_RT
- },
- { }
-};
-
-static struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss2 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_epwmss2_hwmod,
- .clk = "l4ls_gclk",
- .addr = am33xx_epwmss2_addr_space,
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am33xx_epwmss2__ecap2 = {
- .master = &am33xx_epwmss2_hwmod,
- .slave = &am33xx_ecap2_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am33xx_epwmss2__eqep2 = {
- .master = &am33xx_epwmss2_hwmod,
- .slave = &am33xx_eqep2_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am33xx_epwmss2__ehrpwm2 = {
- .master = &am33xx_epwmss2_hwmod,
- .slave = &am33xx_ehrpwm2_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l3s cfg -> gpmc */
-static struct omap_hwmod_addr_space am33xx_gpmc_addr_space[] = {
- {
- .pa_start = 0x50000000,
- .pa_end = 0x50000000 + SZ_8K - 1,
- .flags = ADDR_TYPE_RT,
- },
- { }
-};
-
-static struct omap_hwmod_ocp_if am33xx_l3_s__gpmc = {
- .master = &am33xx_l3_s_hwmod,
- .slave = &am33xx_gpmc_hwmod,
- .clk = "l3s_gclk",
- .addr = am33xx_gpmc_addr_space,
- .user = OCP_USER_MPU,
-};
-
-/* i2c2 */
-static struct omap_hwmod_ocp_if am33xx_l4_per__i2c2 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_i2c2_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am33xx_l4_per__i2c3 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_i2c3_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
static struct omap_hwmod_addr_space am33xx_lcdc_addr_space[] = {
{
.pa_start = 0x4830E000,
@@ -2121,138 +483,6 @@ static struct omap_hwmod_ocp_if am33xx_l3_main__lcdc = {
.user = OCP_USER_MPU,
};
-static struct omap_hwmod_addr_space am33xx_mailbox_addrs[] = {
- {
- .pa_start = 0x480C8000,
- .pa_end = 0x480C8000 + (SZ_4K - 1),
- .flags = ADDR_TYPE_RT
- },
- { }
-};
-
-/* l4 ls -> mailbox */
-static struct omap_hwmod_ocp_if am33xx_l4_per__mailbox = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_mailbox_hwmod,
- .clk = "l4ls_gclk",
- .addr = am33xx_mailbox_addrs,
- .user = OCP_USER_MPU,
-};
-
-/* l4 ls -> spinlock */
-static struct omap_hwmod_ocp_if am33xx_l4_ls__spinlock = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_spinlock_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l4 ls -> mcasp0 */
-static struct omap_hwmod_addr_space am33xx_mcasp0_addr_space[] = {
- {
- .pa_start = 0x48038000,
- .pa_end = 0x48038000 + SZ_8K - 1,
- .flags = ADDR_TYPE_RT
- },
- { }
-};
-
-static struct omap_hwmod_ocp_if am33xx_l4_ls__mcasp0 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_mcasp0_hwmod,
- .clk = "l4ls_gclk",
- .addr = am33xx_mcasp0_addr_space,
- .user = OCP_USER_MPU,
-};
-
-/* l4 ls -> mcasp1 */
-static struct omap_hwmod_addr_space am33xx_mcasp1_addr_space[] = {
- {
- .pa_start = 0x4803C000,
- .pa_end = 0x4803C000 + SZ_8K - 1,
- .flags = ADDR_TYPE_RT
- },
- { }
-};
-
-static struct omap_hwmod_ocp_if am33xx_l4_ls__mcasp1 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_mcasp1_hwmod,
- .clk = "l4ls_gclk",
- .addr = am33xx_mcasp1_addr_space,
- .user = OCP_USER_MPU,
-};
-
-/* l4 ls -> mmc0 */
-static struct omap_hwmod_addr_space am33xx_mmc0_addr_space[] = {
- {
- .pa_start = 0x48060100,
- .pa_end = 0x48060100 + SZ_4K - 1,
- .flags = ADDR_TYPE_RT,
- },
- { }
-};
-
-static struct omap_hwmod_ocp_if am33xx_l4_ls__mmc0 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_mmc0_hwmod,
- .clk = "l4ls_gclk",
- .addr = am33xx_mmc0_addr_space,
- .user = OCP_USER_MPU,
-};
-
-/* l4 ls -> mmc1 */
-static struct omap_hwmod_addr_space am33xx_mmc1_addr_space[] = {
- {
- .pa_start = 0x481d8100,
- .pa_end = 0x481d8100 + SZ_4K - 1,
- .flags = ADDR_TYPE_RT,
- },
- { }
-};
-
-static struct omap_hwmod_ocp_if am33xx_l4_ls__mmc1 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_mmc1_hwmod,
- .clk = "l4ls_gclk",
- .addr = am33xx_mmc1_addr_space,
- .user = OCP_USER_MPU,
-};
-
-/* l3 s -> mmc2 */
-static struct omap_hwmod_addr_space am33xx_mmc2_addr_space[] = {
- {
- .pa_start = 0x47810100,
- .pa_end = 0x47810100 + SZ_64K - 1,
- .flags = ADDR_TYPE_RT,
- },
- { }
-};
-
-static struct omap_hwmod_ocp_if am33xx_l3_s__mmc2 = {
- .master = &am33xx_l3_s_hwmod,
- .slave = &am33xx_mmc2_hwmod,
- .clk = "l3s_gclk",
- .addr = am33xx_mmc2_addr_space,
- .user = OCP_USER_MPU,
-};
-
-/* l4 ls -> mcspi0 */
-static struct omap_hwmod_ocp_if am33xx_l4_ls__mcspi0 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_spi0_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l4 ls -> mcspi1 */
-static struct omap_hwmod_ocp_if am33xx_l4_ls__mcspi1 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_spi1_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
/* l4 wkup -> timer1 */
static struct omap_hwmod_ocp_if am33xx_l4_wkup__timer1 = {
.master = &am33xx_l4_wkup_hwmod,
@@ -2261,116 +491,6 @@ static struct omap_hwmod_ocp_if am33xx_l4_wkup__timer1 = {
.user = OCP_USER_MPU,
};
-/* l4 per -> timer2 */
-static struct omap_hwmod_ocp_if am33xx_l4_ls__timer2 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_timer2_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l4 per -> timer3 */
-static struct omap_hwmod_ocp_if am33xx_l4_ls__timer3 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_timer3_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l4 per -> timer4 */
-static struct omap_hwmod_ocp_if am33xx_l4_ls__timer4 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_timer4_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l4 per -> timer5 */
-static struct omap_hwmod_ocp_if am33xx_l4_ls__timer5 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_timer5_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l4 per -> timer6 */
-static struct omap_hwmod_ocp_if am33xx_l4_ls__timer6 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_timer6_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l4 per -> timer7 */
-static struct omap_hwmod_ocp_if am33xx_l4_ls__timer7 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_timer7_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l3 main -> tpcc */
-static struct omap_hwmod_ocp_if am33xx_l3_main__tpcc = {
- .master = &am33xx_l3_main_hwmod,
- .slave = &am33xx_tpcc_hwmod,
- .clk = "l3_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l3 main -> tpcc0 */
-static struct omap_hwmod_addr_space am33xx_tptc0_addr_space[] = {
- {
- .pa_start = 0x49800000,
- .pa_end = 0x49800000 + SZ_8K - 1,
- .flags = ADDR_TYPE_RT,
- },
- { }
-};
-
-static struct omap_hwmod_ocp_if am33xx_l3_main__tptc0 = {
- .master = &am33xx_l3_main_hwmod,
- .slave = &am33xx_tptc0_hwmod,
- .clk = "l3_gclk",
- .addr = am33xx_tptc0_addr_space,
- .user = OCP_USER_MPU,
-};
-
-/* l3 main -> tpcc1 */
-static struct omap_hwmod_addr_space am33xx_tptc1_addr_space[] = {
- {
- .pa_start = 0x49900000,
- .pa_end = 0x49900000 + SZ_8K - 1,
- .flags = ADDR_TYPE_RT,
- },
- { }
-};
-
-static struct omap_hwmod_ocp_if am33xx_l3_main__tptc1 = {
- .master = &am33xx_l3_main_hwmod,
- .slave = &am33xx_tptc1_hwmod,
- .clk = "l3_gclk",
- .addr = am33xx_tptc1_addr_space,
- .user = OCP_USER_MPU,
-};
-
-/* l3 main -> tpcc2 */
-static struct omap_hwmod_addr_space am33xx_tptc2_addr_space[] = {
- {
- .pa_start = 0x49a00000,
- .pa_end = 0x49a00000 + SZ_8K - 1,
- .flags = ADDR_TYPE_RT,
- },
- { }
-};
-
-static struct omap_hwmod_ocp_if am33xx_l3_main__tptc2 = {
- .master = &am33xx_l3_main_hwmod,
- .slave = &am33xx_tptc2_hwmod,
- .clk = "l3_gclk",
- .addr = am33xx_tptc2_addr_space,
- .user = OCP_USER_MPU,
-};
-
/* l4 wkup -> uart1 */
static struct omap_hwmod_ocp_if am33xx_l4_wkup__uart1 = {
.master = &am33xx_l4_wkup_hwmod,
@@ -2379,46 +499,6 @@ static struct omap_hwmod_ocp_if am33xx_l4_wkup__uart1 = {
.user = OCP_USER_MPU,
};
-/* l4 ls -> uart2 */
-static struct omap_hwmod_ocp_if am33xx_l4_ls__uart2 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_uart2_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l4 ls -> uart3 */
-static struct omap_hwmod_ocp_if am33xx_l4_ls__uart3 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_uart3_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l4 ls -> uart4 */
-static struct omap_hwmod_ocp_if am33xx_l4_ls__uart4 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_uart4_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l4 ls -> uart5 */
-static struct omap_hwmod_ocp_if am33xx_l4_ls__uart5 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_uart5_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l4 ls -> uart6 */
-static struct omap_hwmod_ocp_if am33xx_l4_ls__uart6 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_uart6_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
/* l4 wkup -> wd_timer1 */
static struct omap_hwmod_ocp_if am33xx_l4_wkup__wd_timer1 = {
.master = &am33xx_l4_wkup_hwmod,
@@ -2437,47 +517,39 @@ static struct omap_hwmod_ocp_if am33xx_l3_s__usbss = {
.flags = OCPIF_SWSUP_IDLE,
};
-/* l3 main -> ocmc */
-static struct omap_hwmod_ocp_if am33xx_l3_main__ocmc = {
- .master = &am33xx_l3_main_hwmod,
- .slave = &am33xx_ocmcram_hwmod,
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3 main -> sha0 HIB2 */
-static struct omap_hwmod_addr_space am33xx_sha0_addrs[] = {
- {
- .pa_start = 0x53100000,
- .pa_end = 0x53100000 + SZ_512 - 1,
- .flags = ADDR_TYPE_RT
- },
- { }
+/* rng */
+static struct omap_hwmod_class_sysconfig am33xx_rng_sysc = {
+ .rev_offs = 0x1fe0,
+ .sysc_offs = 0x1fe4,
+ .sysc_flags = SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE,
+ .idlemodes = SIDLE_FORCE | SIDLE_NO,
+ .sysc_fields = &omap_hwmod_sysc_type1,
};
-static struct omap_hwmod_ocp_if am33xx_l3_main__sha0 = {
- .master = &am33xx_l3_main_hwmod,
- .slave = &am33xx_sha0_hwmod,
- .clk = "sha0_fck",
- .addr = am33xx_sha0_addrs,
- .user = OCP_USER_MPU | OCP_USER_SDMA,
+static struct omap_hwmod_class am33xx_rng_hwmod_class = {
+ .name = "rng",
+ .sysc = &am33xx_rng_sysc,
};
-/* l3 main -> AES0 HIB2 */
-static struct omap_hwmod_addr_space am33xx_aes0_addrs[] = {
- {
- .pa_start = 0x53500000,
- .pa_end = 0x53500000 + SZ_1M - 1,
- .flags = ADDR_TYPE_RT
+static struct omap_hwmod am33xx_rng_hwmod = {
+ .name = "rng",
+ .class = &am33xx_rng_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .flags = HWMOD_SWSUP_SIDLE,
+ .main_clk = "rng_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM33XX_CM_PER_RNG_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
},
- { }
};
-static struct omap_hwmod_ocp_if am33xx_l3_main__aes0 = {
- .master = &am33xx_l3_main_hwmod,
- .slave = &am33xx_aes0_hwmod,
- .clk = "aes0_fck",
- .addr = am33xx_aes0_addrs,
- .user = OCP_USER_MPU | OCP_USER_SDMA,
+static struct omap_hwmod_ocp_if am33xx_l4_per__rng = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_rng_hwmod,
+ .clk = "rng_fck",
+ .user = OCP_USER_MPU,
};
static struct omap_hwmod_ocp_if *am33xx_hwmod_ocp_ifs[] __initdata = {
@@ -2559,11 +631,13 @@ static struct omap_hwmod_ocp_if *am33xx_hwmod_ocp_ifs[] __initdata = {
&am33xx_cpgmac0__mdio,
&am33xx_l3_main__sha0,
&am33xx_l3_main__aes0,
+ &am33xx_l4_per__rng,
NULL,
};
int __init am33xx_hwmod_init(void)
{
+ omap_hwmod_am33xx_reg();
omap_hwmod_init();
return omap_hwmod_register_links(am33xx_hwmod_ocp_ifs);
}
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 0c3a427da544..9e56fabd7fa3 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -3693,6 +3693,53 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__aes = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
+/*
+ * 'ssi' class
+ * synchronous serial interface (multichannel and full-duplex serial if)
+ */
+
+static struct omap_hwmod_class_sysconfig omap34xx_ssi_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_EMUFREE |
+ SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
+ MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap34xx_ssi_hwmod_class = {
+ .name = "ssi",
+ .sysc = &omap34xx_ssi_sysc,
+};
+
+static struct omap_hwmod omap34xx_ssi_hwmod = {
+ .name = "ssi",
+ .class = &omap34xx_ssi_hwmod_class,
+ .clkdm_name = "core_l4_clkdm",
+ .main_clk = "ssi_ssr_fck",
+ .prcm = {
+ .omap2 = {
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_SSI_SHIFT,
+ .module_offs = CORE_MOD,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT,
+ },
+ },
+};
+
+/* L4 CORE -> SSI */
+static struct omap_hwmod_ocp_if omap34xx_l4_core__ssi = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap34xx_ssi_hwmod,
+ .clk = "ssi_ick",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
static struct omap_hwmod_ocp_if *omap3xxx_hwmod_ocp_ifs[] __initdata = {
&omap3xxx_l3_main__l4_core,
&omap3xxx_l3_main__l4_per,
@@ -3818,6 +3865,7 @@ static struct omap_hwmod_ocp_if *omap34xx_hwmod_ocp_ifs[] __initdata = {
#ifdef CONFIG_OMAP_IOMMU_IVA2
&omap3xxx_l3_main__mmu_iva,
#endif
+ &omap34xx_l4_core__ssi,
NULL
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
new file mode 100644
index 000000000000..9002fca76699
--- /dev/null
+++ b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
@@ -0,0 +1,758 @@
+/*
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ *
+ * Hwmod present only in AM43x and those that differ other than register
+ * offsets as compared to AM335x.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_data/gpio-omap.h>
+#include <linux/platform_data/spi-omap2-mcspi.h>
+#include "omap_hwmod.h"
+#include "omap_hwmod_33xx_43xx_common_data.h"
+#include "prcm43xx.h"
+
+/* IP blocks */
+static struct omap_hwmod am43xx_l4_hs_hwmod = {
+ .name = "l4_hs",
+ .class = &am33xx_l4_hwmod_class,
+ .clkdm_name = "l3_clkdm",
+ .flags = HWMOD_INIT_NO_IDLE,
+ .main_clk = "l4hs_gclk",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_PER_L4HS_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+static struct omap_hwmod_rst_info am33xx_wkup_m3_resets[] = {
+ { .name = "wkup_m3", .rst_shift = 3, .st_shift = 5 },
+};
+
+static struct omap_hwmod am43xx_wkup_m3_hwmod = {
+ .name = "wkup_m3",
+ .class = &am33xx_wkup_m3_hwmod_class,
+ .clkdm_name = "l4_wkup_aon_clkdm",
+ /* Keep hardreset asserted */
+ .flags = HWMOD_INIT_NO_RESET | HWMOD_NO_IDLEST,
+ .main_clk = "sys_clkin_ck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_WKUP_WKUP_M3_CLKCTRL_OFFSET,
+ .rstctrl_offs = AM43XX_RM_WKUP_RSTCTRL_OFFSET,
+ .rstst_offs = AM43XX_RM_WKUP_RSTST_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .rst_lines = am33xx_wkup_m3_resets,
+ .rst_lines_cnt = ARRAY_SIZE(am33xx_wkup_m3_resets),
+};
+
+static struct omap_hwmod am43xx_control_hwmod = {
+ .name = "control",
+ .class = &am33xx_control_hwmod_class,
+ .clkdm_name = "l4_wkup_clkdm",
+ .flags = HWMOD_INIT_NO_IDLE,
+ .main_clk = "sys_clkin_ck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_WKUP_CONTROL_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+static struct omap_hwmod_opt_clk gpio0_opt_clks[] = {
+ { .role = "dbclk", .clk = "gpio0_dbclk" },
+};
+
+static struct omap_hwmod am43xx_gpio0_hwmod = {
+ .name = "gpio1",
+ .class = &am33xx_gpio_hwmod_class,
+ .clkdm_name = "l4_wkup_clkdm",
+ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+ .main_clk = "sys_clkin_ck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_WKUP_GPIO0_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .opt_clks = gpio0_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(gpio0_opt_clks),
+ .dev_attr = &gpio_dev_attr,
+};
+
+static struct omap_hwmod_class_sysconfig am43xx_synctimer_sysc = {
+ .rev_offs = 0x0,
+ .sysc_offs = 0x4,
+ .sysc_flags = SYSC_HAS_SIDLEMODE,
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class am43xx_synctimer_hwmod_class = {
+ .name = "synctimer",
+ .sysc = &am43xx_synctimer_sysc,
+};
+
+static struct omap_hwmod am43xx_synctimer_hwmod = {
+ .name = "counter_32k",
+ .class = &am43xx_synctimer_hwmod_class,
+ .clkdm_name = "l4_wkup_aon_clkdm",
+ .flags = HWMOD_SWSUP_SIDLE,
+ .main_clk = "synctimer_32kclk",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_WKUP_SYNCTIMER_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+static struct omap_hwmod am43xx_timer8_hwmod = {
+ .name = "timer8",
+ .class = &am33xx_timer_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "timer8_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_PER_TIMER8_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+static struct omap_hwmod am43xx_timer9_hwmod = {
+ .name = "timer9",
+ .class = &am33xx_timer_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "timer9_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_PER_TIMER9_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+static struct omap_hwmod am43xx_timer10_hwmod = {
+ .name = "timer10",
+ .class = &am33xx_timer_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "timer10_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_PER_TIMER10_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+static struct omap_hwmod am43xx_timer11_hwmod = {
+ .name = "timer11",
+ .class = &am33xx_timer_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "timer11_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_PER_TIMER11_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+static struct omap_hwmod am43xx_epwmss3_hwmod = {
+ .name = "epwmss3",
+ .class = &am33xx_epwmss_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_PER_EPWMSS3_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+static struct omap_hwmod am43xx_ehrpwm3_hwmod = {
+ .name = "ehrpwm3",
+ .class = &am33xx_ehrpwm_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+};
+
+static struct omap_hwmod am43xx_epwmss4_hwmod = {
+ .name = "epwmss4",
+ .class = &am33xx_epwmss_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_PER_EPWMSS4_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+static struct omap_hwmod am43xx_ehrpwm4_hwmod = {
+ .name = "ehrpwm4",
+ .class = &am33xx_ehrpwm_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+};
+
+static struct omap_hwmod am43xx_epwmss5_hwmod = {
+ .name = "epwmss5",
+ .class = &am33xx_epwmss_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_PER_EPWMSS5_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+static struct omap_hwmod am43xx_ehrpwm5_hwmod = {
+ .name = "ehrpwm5",
+ .class = &am33xx_ehrpwm_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+};
+
+static struct omap_hwmod am43xx_spi2_hwmod = {
+ .name = "spi2",
+ .class = &am33xx_spi_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "dpll_per_m2_div4_ck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_PER_SPI2_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .dev_attr = &mcspi_attrib,
+};
+
+static struct omap_hwmod am43xx_spi3_hwmod = {
+ .name = "spi3",
+ .class = &am33xx_spi_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "dpll_per_m2_div4_ck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_PER_SPI3_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .dev_attr = &mcspi_attrib,
+};
+
+static struct omap_hwmod am43xx_spi4_hwmod = {
+ .name = "spi4",
+ .class = &am33xx_spi_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "dpll_per_m2_div4_ck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_PER_SPI4_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .dev_attr = &mcspi_attrib,
+};
+
+static struct omap_hwmod_opt_clk gpio4_opt_clks[] = {
+ { .role = "dbclk", .clk = "gpio4_dbclk" },
+};
+
+static struct omap_hwmod am43xx_gpio4_hwmod = {
+ .name = "gpio5",
+ .class = &am33xx_gpio_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+ .main_clk = "l4ls_gclk",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_PER_GPIO4_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .opt_clks = gpio4_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(gpio4_opt_clks),
+ .dev_attr = &gpio_dev_attr,
+};
+
+static struct omap_hwmod_opt_clk gpio5_opt_clks[] = {
+ { .role = "dbclk", .clk = "gpio5_dbclk" },
+};
+
+static struct omap_hwmod am43xx_gpio5_hwmod = {
+ .name = "gpio6",
+ .class = &am33xx_gpio_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+ .main_clk = "l4ls_gclk",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_PER_GPIO5_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .opt_clks = gpio5_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(gpio5_opt_clks),
+ .dev_attr = &gpio_dev_attr,
+};
+
+static struct omap_hwmod_class am43xx_ocp2scp_hwmod_class = {
+ .name = "ocp2scp",
+};
+
+static struct omap_hwmod am43xx_ocp2scp0_hwmod = {
+ .name = "ocp2scp0",
+ .class = &am43xx_ocp2scp_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_PER_USBPHYOCP2SCP0_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+static struct omap_hwmod am43xx_ocp2scp1_hwmod = {
+ .name = "ocp2scp1",
+ .class = &am43xx_ocp2scp_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_PER_USBPHYOCP2SCP1_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+static struct omap_hwmod_class_sysconfig am43xx_usb_otg_ss_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .sysc_flags = (SYSC_HAS_DMADISABLE | SYSC_HAS_MIDLEMODE |
+ SYSC_HAS_SIDLEMODE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP | MSTANDBY_FORCE |
+ MSTANDBY_NO | MSTANDBY_SMART |
+ MSTANDBY_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class am43xx_usb_otg_ss_hwmod_class = {
+ .name = "usb_otg_ss",
+ .sysc = &am43xx_usb_otg_ss_sysc,
+};
+
+static struct omap_hwmod am43xx_usb_otg_ss0_hwmod = {
+ .name = "usb_otg_ss0",
+ .class = &am43xx_usb_otg_ss_hwmod_class,
+ .clkdm_name = "l3s_clkdm",
+ .main_clk = "l3s_gclk",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_PER_USB_OTG_SS0_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+static struct omap_hwmod am43xx_usb_otg_ss1_hwmod = {
+ .name = "usb_otg_ss1",
+ .class = &am43xx_usb_otg_ss_hwmod_class,
+ .clkdm_name = "l3s_clkdm",
+ .main_clk = "l3s_gclk",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_PER_USB_OTG_SS1_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+static struct omap_hwmod_class_sysconfig am43xx_qspi_sysc = {
+ .sysc_offs = 0x0010,
+ .sysc_flags = SYSC_HAS_SIDLEMODE,
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class am43xx_qspi_hwmod_class = {
+ .name = "qspi",
+ .sysc = &am43xx_qspi_sysc,
+};
+
+static struct omap_hwmod am43xx_qspi_hwmod = {
+ .name = "qspi",
+ .class = &am43xx_qspi_hwmod_class,
+ .clkdm_name = "l3s_clkdm",
+ .main_clk = "l3s_gclk",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_PER_QSPI_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* Interfaces */
+static struct omap_hwmod_ocp_if am43xx_l3_main__l4_hs = {
+ .master = &am33xx_l3_main_hwmod,
+ .slave = &am43xx_l4_hs_hwmod,
+ .clk = "l3s_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if am43xx_wkup_m3__l4_wkup = {
+ .master = &am43xx_wkup_m3_hwmod,
+ .slave = &am33xx_l4_wkup_hwmod,
+ .clk = "sys_clkin_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_wkup__wkup_m3 = {
+ .master = &am33xx_l4_wkup_hwmod,
+ .slave = &am43xx_wkup_m3_hwmod,
+ .clk = "sys_clkin_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l3_main__pruss = {
+ .master = &am33xx_l3_main_hwmod,
+ .slave = &am33xx_pruss_hwmod,
+ .clk = "dpll_core_m4_ck",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_wkup__smartreflex0 = {
+ .master = &am33xx_l4_wkup_hwmod,
+ .slave = &am33xx_smartreflex0_hwmod,
+ .clk = "sys_clkin_ck",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_wkup__smartreflex1 = {
+ .master = &am33xx_l4_wkup_hwmod,
+ .slave = &am33xx_smartreflex1_hwmod,
+ .clk = "sys_clkin_ck",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_wkup__control = {
+ .master = &am33xx_l4_wkup_hwmod,
+ .slave = &am43xx_control_hwmod,
+ .clk = "sys_clkin_ck",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_wkup__i2c1 = {
+ .master = &am33xx_l4_wkup_hwmod,
+ .slave = &am33xx_i2c1_hwmod,
+ .clk = "sys_clkin_ck",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_wkup__gpio0 = {
+ .master = &am33xx_l4_wkup_hwmod,
+ .slave = &am43xx_gpio0_hwmod,
+ .clk = "sys_clkin_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_hs__cpgmac0 = {
+ .master = &am43xx_l4_hs_hwmod,
+ .slave = &am33xx_cpgmac0_hwmod,
+ .clk = "cpsw_125mhz_gclk",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_wkup__timer1 = {
+ .master = &am33xx_l4_wkup_hwmod,
+ .slave = &am33xx_timer1_hwmod,
+ .clk = "sys_clkin_ck",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_wkup__uart1 = {
+ .master = &am33xx_l4_wkup_hwmod,
+ .slave = &am33xx_uart1_hwmod,
+ .clk = "sys_clkin_ck",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_wkup__wd_timer1 = {
+ .master = &am33xx_l4_wkup_hwmod,
+ .slave = &am33xx_wd_timer1_hwmod,
+ .clk = "sys_clkin_ck",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am33xx_l4_wkup__synctimer = {
+ .master = &am33xx_l4_wkup_hwmod,
+ .slave = &am43xx_synctimer_hwmod,
+ .clk = "sys_clkin_ck",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_ls__timer8 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am43xx_timer8_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_ls__timer9 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am43xx_timer9_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_ls__timer10 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am43xx_timer10_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_ls__timer11 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am43xx_timer11_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_ls__epwmss3 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am43xx_epwmss3_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_epwmss3__ehrpwm3 = {
+ .master = &am43xx_epwmss3_hwmod,
+ .slave = &am43xx_ehrpwm3_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_ls__epwmss4 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am43xx_epwmss4_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_epwmss4__ehrpwm4 = {
+ .master = &am43xx_epwmss4_hwmod,
+ .slave = &am43xx_ehrpwm4_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_ls__epwmss5 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am43xx_epwmss5_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_epwmss5__ehrpwm5 = {
+ .master = &am43xx_epwmss5_hwmod,
+ .slave = &am43xx_ehrpwm5_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_ls__mcspi2 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am43xx_spi2_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_ls__mcspi3 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am43xx_spi3_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_ls__mcspi4 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am43xx_spi4_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_ls__gpio4 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am43xx_gpio4_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_ls__gpio5 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am43xx_gpio5_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_ls__ocp2scp0 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am43xx_ocp2scp0_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_ls__ocp2scp1 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am43xx_ocp2scp1_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l3_s__usbotgss0 = {
+ .master = &am33xx_l3_s_hwmod,
+ .slave = &am43xx_usb_otg_ss0_hwmod,
+ .clk = "l3s_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l3_s__usbotgss1 = {
+ .master = &am33xx_l3_s_hwmod,
+ .slave = &am43xx_usb_otg_ss1_hwmod,
+ .clk = "l3s_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l3_s__qspi = {
+ .master = &am33xx_l3_s_hwmod,
+ .slave = &am43xx_qspi_hwmod,
+ .clk = "l3s_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
+ &am33xx_l4_wkup__synctimer,
+ &am43xx_l4_ls__timer8,
+ &am43xx_l4_ls__timer9,
+ &am43xx_l4_ls__timer10,
+ &am43xx_l4_ls__timer11,
+ &am43xx_l4_ls__epwmss3,
+ &am43xx_epwmss3__ehrpwm3,
+ &am43xx_l4_ls__epwmss4,
+ &am43xx_epwmss4__ehrpwm4,
+ &am43xx_l4_ls__epwmss5,
+ &am43xx_epwmss5__ehrpwm5,
+ &am43xx_l4_ls__mcspi2,
+ &am43xx_l4_ls__mcspi3,
+ &am43xx_l4_ls__mcspi4,
+ &am43xx_l4_ls__gpio4,
+ &am43xx_l4_ls__gpio5,
+ &am43xx_l3_main__pruss,
+ &am33xx_mpu__l3_main,
+ &am33xx_mpu__prcm,
+ &am33xx_l3_s__l4_ls,
+ &am33xx_l3_s__l4_wkup,
+ &am43xx_l3_main__l4_hs,
+ &am33xx_l3_main__l3_s,
+ &am33xx_l3_main__l3_instr,
+ &am33xx_l3_main__gfx,
+ &am33xx_l3_s__l3_main,
+ &am33xx_pruss__l3_main,
+ &am43xx_wkup_m3__l4_wkup,
+ &am33xx_gfx__l3_main,
+ &am43xx_l4_wkup__wkup_m3,
+ &am43xx_l4_wkup__control,
+ &am43xx_l4_wkup__smartreflex0,
+ &am43xx_l4_wkup__smartreflex1,
+ &am43xx_l4_wkup__uart1,
+ &am43xx_l4_wkup__timer1,
+ &am43xx_l4_wkup__i2c1,
+ &am43xx_l4_wkup__gpio0,
+ &am43xx_l4_wkup__wd_timer1,
+ &am43xx_l3_s__qspi,
+ &am33xx_l4_per__dcan0,
+ &am33xx_l4_per__dcan1,
+ &am33xx_l4_per__gpio1,
+ &am33xx_l4_per__gpio2,
+ &am33xx_l4_per__gpio3,
+ &am33xx_l4_per__i2c2,
+ &am33xx_l4_per__i2c3,
+ &am33xx_l4_per__mailbox,
+ &am33xx_l4_ls__mcasp0,
+ &am33xx_l4_ls__mcasp1,
+ &am33xx_l4_ls__mmc0,
+ &am33xx_l4_ls__mmc1,
+ &am33xx_l3_s__mmc2,
+ &am33xx_l4_ls__timer2,
+ &am33xx_l4_ls__timer3,
+ &am33xx_l4_ls__timer4,
+ &am33xx_l4_ls__timer5,
+ &am33xx_l4_ls__timer6,
+ &am33xx_l4_ls__timer7,
+ &am33xx_l3_main__tpcc,
+ &am33xx_l4_ls__uart2,
+ &am33xx_l4_ls__uart3,
+ &am33xx_l4_ls__uart4,
+ &am33xx_l4_ls__uart5,
+ &am33xx_l4_ls__uart6,
+ &am33xx_l4_ls__elm,
+ &am33xx_l4_ls__epwmss0,
+ &am33xx_epwmss0__ecap0,
+ &am33xx_epwmss0__eqep0,
+ &am33xx_epwmss0__ehrpwm0,
+ &am33xx_l4_ls__epwmss1,
+ &am33xx_epwmss1__ecap1,
+ &am33xx_epwmss1__eqep1,
+ &am33xx_epwmss1__ehrpwm1,
+ &am33xx_l4_ls__epwmss2,
+ &am33xx_epwmss2__ecap2,
+ &am33xx_epwmss2__eqep2,
+ &am33xx_epwmss2__ehrpwm2,
+ &am33xx_l3_s__gpmc,
+ &am33xx_l4_ls__mcspi0,
+ &am33xx_l4_ls__mcspi1,
+ &am33xx_l3_main__tptc0,
+ &am33xx_l3_main__tptc1,
+ &am33xx_l3_main__tptc2,
+ &am33xx_l3_main__ocmc,
+ &am43xx_l4_hs__cpgmac0,
+ &am33xx_cpgmac0__mdio,
+ &am33xx_l3_main__sha0,
+ &am33xx_l3_main__aes0,
+ &am43xx_l4_ls__ocp2scp0,
+ &am43xx_l4_ls__ocp2scp1,
+ &am43xx_l3_s__usbotgss0,
+ &am43xx_l3_s__usbotgss1,
+ NULL,
+};
+
+int __init am43xx_hwmod_init(void)
+{
+ omap_hwmod_am43xx_reg();
+ omap_hwmod_init();
+ return omap_hwmod_register_links(am43xx_hwmod_ocp_ifs);
+}
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 9c3b504477d7..1e5b12cb8246 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -914,7 +914,7 @@ static struct omap_hwmod omap44xx_emif1_hwmod = {
.name = "emif1",
.class = &omap44xx_emif_hwmod_class,
.clkdm_name = "l3_emif_clkdm",
- .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
+ .flags = HWMOD_INIT_NO_IDLE,
.main_clk = "ddrphy_ck",
.prcm = {
.omap4 = {
@@ -930,7 +930,7 @@ static struct omap_hwmod omap44xx_emif2_hwmod = {
.name = "emif2",
.class = &omap44xx_emif_hwmod_class,
.clkdm_name = "l3_emif_clkdm",
- .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
+ .flags = HWMOD_INIT_NO_IDLE,
.main_clk = "ddrphy_ck",
.prcm = {
.omap4 = {
@@ -2193,7 +2193,7 @@ static struct omap_hwmod omap44xx_mpu_hwmod = {
.name = "mpu",
.class = &omap44xx_mpu_hwmod_class,
.clkdm_name = "mpuss_clkdm",
- .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
+ .flags = HWMOD_INIT_NO_IDLE,
.main_clk = "dpll_mpu_m2_ck",
.prcm = {
.omap4 = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index cde415570e04..9e08d6994a0b 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -352,7 +352,7 @@ static struct omap_hwmod omap54xx_emif1_hwmod = {
.name = "emif1",
.class = &omap54xx_emif_hwmod_class,
.clkdm_name = "emif_clkdm",
- .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
+ .flags = HWMOD_INIT_NO_IDLE,
.main_clk = "dpll_core_h11x2_ck",
.prcm = {
.omap4 = {
@@ -368,7 +368,7 @@ static struct omap_hwmod omap54xx_emif2_hwmod = {
.name = "emif2",
.class = &omap54xx_emif_hwmod_class,
.clkdm_name = "emif_clkdm",
- .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
+ .flags = HWMOD_INIT_NO_IDLE,
.main_clk = "dpll_core_h11x2_ck",
.prcm = {
.omap4 = {
@@ -1135,7 +1135,7 @@ static struct omap_hwmod omap54xx_mpu_hwmod = {
.name = "mpu",
.class = &omap54xx_mpu_hwmod_class,
.clkdm_name = "mpu_clkdm",
- .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
+ .flags = HWMOD_INIT_NO_IDLE,
.main_clk = "dpll_mpu_m2_ck",
.prcm = {
.omap4 = {
@@ -1146,6 +1146,77 @@ static struct omap_hwmod omap54xx_mpu_hwmod = {
};
/*
+ * 'spinlock' class
+ * spinlock provides hardware assistance for synchronizing the processes
+ * running on multiple processors
+ */
+
+static struct omap_hwmod_class_sysconfig omap54xx_spinlock_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap54xx_spinlock_hwmod_class = {
+ .name = "spinlock",
+ .sysc = &omap54xx_spinlock_sysc,
+};
+
+/* spinlock */
+static struct omap_hwmod omap54xx_spinlock_hwmod = {
+ .name = "spinlock",
+ .class = &omap54xx_spinlock_hwmod_class,
+ .clkdm_name = "l4cfg_clkdm",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = OMAP54XX_CM_L4CFG_SPINLOCK_CLKCTRL_OFFSET,
+ .context_offs = OMAP54XX_RM_L4CFG_SPINLOCK_CONTEXT_OFFSET,
+ },
+ },
+};
+
+/*
+ * 'ocp2scp' class
+ * bridge to transform ocp interface protocol to scp (serial control port)
+ * protocol
+ */
+
+static struct omap_hwmod_class_sysconfig omap54xx_ocp2scp_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap54xx_ocp2scp_hwmod_class = {
+ .name = "ocp2scp",
+ .sysc = &omap54xx_ocp2scp_sysc,
+};
+
+/* ocp2scp1 */
+static struct omap_hwmod omap54xx_ocp2scp1_hwmod = {
+ .name = "ocp2scp1",
+ .class = &omap54xx_ocp2scp_hwmod_class,
+ .clkdm_name = "l3init_clkdm",
+ .main_clk = "l4_root_clk_div",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = OMAP54XX_CM_L3INIT_OCP2SCP1_CLKCTRL_OFFSET,
+ .context_offs = OMAP54XX_RM_L3INIT_OCP2SCP1_CONTEXT_OFFSET,
+ .modulemode = MODULEMODE_HWCTRL,
+ },
+ },
+};
+
+/*
* 'timer' class
* general purpose timer module with accurate 1ms tick
* This class contains several variants: ['timer_1ms', 'timer']
@@ -1465,6 +1536,123 @@ static struct omap_hwmod omap54xx_uart6_hwmod = {
};
/*
+ * 'usb_host_hs' class
+ * high-speed multi-port usb host controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap54xx_usb_host_hs_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS |
+ SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
+ MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class omap54xx_usb_host_hs_hwmod_class = {
+ .name = "usb_host_hs",
+ .sysc = &omap54xx_usb_host_hs_sysc,
+};
+
+static struct omap_hwmod omap54xx_usb_host_hs_hwmod = {
+ .name = "usb_host_hs",
+ .class = &omap54xx_usb_host_hs_hwmod_class,
+ .clkdm_name = "l3init_clkdm",
+ /*
+ * Errata: USBHOST Configured In Smart-Idle Can Lead To a Deadlock
+ * id: i660
+ *
+ * Description:
+ * In the following configuration :
+ * - USBHOST module is set to smart-idle mode
+ * - PRCM asserts idle_req to the USBHOST module ( This typically
+ * happens when the system is going to a low power mode : all ports
+ * have been suspended, the master part of the USBHOST module has
+ * entered the standby state, and SW has cut the functional clocks)
+ * - an USBHOST interrupt occurs before the module is able to answer
+ * idle_ack, typically a remote wakeup IRQ.
+ * Then the USB HOST module will enter a deadlock situation where it
+ * is no more accessible nor functional.
+ *
+ * Workaround:
+ * Don't use smart idle; use only force idle, hence HWMOD_SWSUP_SIDLE
+ */
+
+ /*
+ * Errata: USB host EHCI may stall when entering smart-standby mode
+ * Id: i571
+ *
+ * Description:
+ * When the USBHOST module is set to smart-standby mode, and when it is
+ * ready to enter the standby state (i.e. all ports are suspended and
+ * all attached devices are in suspend mode), then it can wrongly assert
+ * the Mstandby signal too early while there are still some residual OCP
+ * transactions ongoing. If this condition occurs, the internal state
+ * machine may go to an undefined state and the USB link may be stuck
+ * upon the next resume.
+ *
+ * Workaround:
+ * Don't use smart standby; use only force standby,
+ * hence HWMOD_SWSUP_MSTANDBY
+ */
+
+ /*
+ * During system boot; If the hwmod framework resets the module
+ * the module will have smart idle settings; which can lead to deadlock
+ * (above Errata Id:i660); so, dont reset the module during boot;
+ * Use HWMOD_INIT_NO_RESET.
+ */
+
+ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
+ HWMOD_INIT_NO_RESET,
+ .main_clk = "l3init_60m_fclk",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = OMAP54XX_CM_L3INIT_USB_HOST_HS_CLKCTRL_OFFSET,
+ .context_offs = OMAP54XX_RM_L3INIT_USB_HOST_HS_CONTEXT_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/*
+ * 'usb_tll_hs' class
+ * usb_tll_hs module is the adapter on the usb_host_hs ports
+ */
+
+static struct omap_hwmod_class_sysconfig omap54xx_usb_tll_hs_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap54xx_usb_tll_hs_hwmod_class = {
+ .name = "usb_tll_hs",
+ .sysc = &omap54xx_usb_tll_hs_sysc,
+};
+
+static struct omap_hwmod omap54xx_usb_tll_hs_hwmod = {
+ .name = "usb_tll_hs",
+ .class = &omap54xx_usb_tll_hs_hwmod_class,
+ .clkdm_name = "l3init_clkdm",
+ .main_clk = "l4_root_clk_div",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = OMAP54XX_CM_L3INIT_USB_TLL_HS_CLKCTRL_OFFSET,
+ .context_offs = OMAP54XX_RM_L3INIT_USB_TLL_HS_CONTEXT_OFFSET,
+ .modulemode = MODULEMODE_HWCTRL,
+ },
+ },
+};
+
+/*
* 'usb_otg_ss' class
* 2.0 super speed (usb_otg_ss) controller
*/
@@ -1960,6 +2148,22 @@ static struct omap_hwmod_ocp_if omap54xx_l4_cfg__mpu = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
+/* l4_cfg -> spinlock */
+static struct omap_hwmod_ocp_if omap54xx_l4_cfg__spinlock = {
+ .master = &omap54xx_l4_cfg_hwmod,
+ .slave = &omap54xx_spinlock_hwmod,
+ .clk = "l4_root_clk_div",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_cfg -> ocp2scp1 */
+static struct omap_hwmod_ocp_if omap54xx_l4_cfg__ocp2scp1 = {
+ .master = &omap54xx_l4_cfg_hwmod,
+ .slave = &omap54xx_ocp2scp1_hwmod,
+ .clk = "l4_root_clk_div",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
/* l4_wkup -> timer1 */
static struct omap_hwmod_ocp_if omap54xx_l4_wkup__timer1 = {
.master = &omap54xx_l4_wkup_hwmod,
@@ -2096,6 +2300,22 @@ static struct omap_hwmod_ocp_if omap54xx_l4_per__uart6 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
+/* l4_cfg -> usb_host_hs */
+static struct omap_hwmod_ocp_if omap54xx_l4_cfg__usb_host_hs = {
+ .master = &omap54xx_l4_cfg_hwmod,
+ .slave = &omap54xx_usb_host_hs_hwmod,
+ .clk = "l3_iclk_div",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_cfg -> usb_tll_hs */
+static struct omap_hwmod_ocp_if omap54xx_l4_cfg__usb_tll_hs = {
+ .master = &omap54xx_l4_cfg_hwmod,
+ .slave = &omap54xx_usb_tll_hs_hwmod,
+ .clk = "l4_root_clk_div",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
/* l4_cfg -> usb_otg_ss */
static struct omap_hwmod_ocp_if omap54xx_l4_cfg__usb_otg_ss = {
.master = &omap54xx_l4_cfg_hwmod,
@@ -2163,6 +2383,8 @@ static struct omap_hwmod_ocp_if *omap54xx_hwmod_ocp_ifs[] __initdata = {
&omap54xx_l4_per__mmc4,
&omap54xx_l4_per__mmc5,
&omap54xx_l4_cfg__mpu,
+ &omap54xx_l4_cfg__spinlock,
+ &omap54xx_l4_cfg__ocp2scp1,
&omap54xx_l4_wkup__timer1,
&omap54xx_l4_per__timer2,
&omap54xx_l4_per__timer3,
@@ -2180,6 +2402,8 @@ static struct omap_hwmod_ocp_if *omap54xx_hwmod_ocp_ifs[] __initdata = {
&omap54xx_l4_per__uart4,
&omap54xx_l4_per__uart5,
&omap54xx_l4_per__uart6,
+ &omap54xx_l4_cfg__usb_host_hs,
+ &omap54xx_l4_cfg__usb_tll_hs,
&omap54xx_l4_cfg__usb_otg_ss,
&omap54xx_l4_wkup__wd_timer2,
NULL,
diff --git a/arch/arm/mach-omap2/opp.c b/arch/arm/mach-omap2/opp.c
index bd41d59a7cab..a358a07e18f2 100644
--- a/arch/arm/mach-omap2/opp.c
+++ b/arch/arm/mach-omap2/opp.c
@@ -17,7 +17,8 @@
* GNU General Public License for more details.
*/
#include <linux/module.h>
-#include <linux/opp.h>
+#include <linux/of.h>
+#include <linux/pm_opp.h>
#include <linux/cpu.h>
#include "omap_device.h"
@@ -40,6 +41,9 @@ int __init omap_init_opp_table(struct omap_opp_def *opp_def,
{
int i, r;
+ if (of_have_populated_dt())
+ return -EINVAL;
+
if (!opp_def || !opp_def_size) {
pr_err("%s: invalid params!\n", __func__);
return -EINVAL;
@@ -81,14 +85,14 @@ int __init omap_init_opp_table(struct omap_opp_def *opp_def,
dev = &oh->od->pdev->dev;
}
- r = opp_add(dev, opp_def->freq, opp_def->u_volt);
+ r = dev_pm_opp_add(dev, opp_def->freq, opp_def->u_volt);
if (r) {
dev_err(dev, "%s: add OPP %ld failed for %s [%d] result=%d\n",
__func__, opp_def->freq,
opp_def->hwmod_name, i, r);
} else {
if (!opp_def->default_available)
- r = opp_disable(dev, opp_def->freq);
+ r = dev_pm_opp_disable(dev, opp_def->freq);
if (r)
dev_err(dev, "%s: disable %ld failed for %s [%d] result=%d\n",
__func__, opp_def->freq,
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
new file mode 100644
index 000000000000..10c71450cf63
--- /dev/null
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -0,0 +1,174 @@
+/*
+ * Legacy platform_data quirks
+ *
+ * Copyright (C) 2013 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of_platform.h>
+#include <linux/wl12xx.h>
+
+#include <linux/platform_data/pinctrl-single.h>
+
+#include "common.h"
+#include "common-board-devices.h"
+#include "dss-common.h"
+#include "control.h"
+
+struct pdata_init {
+ const char *compatible;
+ void (*fn)(void);
+};
+
+/*
+ * Create alias for USB host PHY clock.
+ * Remove this when clock phandle can be provided via DT
+ */
+static void __init __used legacy_init_ehci_clk(char *clkname)
+{
+ int ret;
+
+ ret = clk_add_alias("main_clk", NULL, clkname, NULL);
+ if (ret)
+ pr_err("%s:Failed to add main_clk alias to %s :%d\n",
+ __func__, clkname, ret);
+}
+
+#if IS_ENABLED(CONFIG_WL12XX)
+
+static struct wl12xx_platform_data wl12xx __initdata;
+
+static void __init __used legacy_init_wl12xx(unsigned ref_clock,
+ unsigned tcxo_clock,
+ int gpio)
+{
+ int res;
+
+ wl12xx.board_ref_clock = ref_clock;
+ wl12xx.board_tcxo_clock = tcxo_clock;
+ wl12xx.irq = gpio_to_irq(gpio);
+
+ res = wl12xx_set_platform_data(&wl12xx);
+ if (res) {
+ pr_err("error setting wl12xx data: %d\n", res);
+ return;
+ }
+}
+#else
+static inline void legacy_init_wl12xx(unsigned ref_clock,
+ unsigned tcxo_clock,
+ int gpio)
+{
+}
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
+static void __init hsmmc2_internal_input_clk(void)
+{
+ u32 reg;
+
+ reg = omap_ctrl_readl(OMAP343X_CONTROL_DEVCONF1);
+ reg |= OMAP2_MMCSDIO2ADPCLKISEL;
+ omap_ctrl_writel(reg, OMAP343X_CONTROL_DEVCONF1);
+}
+
+static void __init omap3_igep0020_legacy_init(void)
+{
+ omap3_igep2_display_init_of();
+}
+
+static void __init omap3_evm_legacy_init(void)
+{
+ legacy_init_wl12xx(WL12XX_REFCLOCK_38, 0, 149);
+}
+
+static void __init omap3_zoom_legacy_init(void)
+{
+ legacy_init_wl12xx(WL12XX_REFCLOCK_26, 0, 162);
+}
+#endif /* CONFIG_ARCH_OMAP3 */
+
+#ifdef CONFIG_ARCH_OMAP4
+static void __init omap4_sdp_legacy_init(void)
+{
+ omap_4430sdp_display_init_of();
+ legacy_init_wl12xx(WL12XX_REFCLOCK_26,
+ WL12XX_TCXOCLOCK_26, 53);
+}
+
+static void __init omap4_panda_legacy_init(void)
+{
+ omap4_panda_display_init_of();
+ legacy_init_ehci_clk("auxclk3_ck");
+ legacy_init_wl12xx(WL12XX_REFCLOCK_38, 0, 53);
+}
+#endif
+
+#ifdef CONFIG_SOC_OMAP5
+static void __init omap5_uevm_legacy_init(void)
+{
+ legacy_init_ehci_clk("auxclk1_ck");
+}
+#endif
+
+static struct pcs_pdata pcs_pdata;
+
+void omap_pcs_legacy_init(int irq, void (*rearm)(void))
+{
+ pcs_pdata.irq = irq;
+ pcs_pdata.rearm = rearm;
+}
+
+struct of_dev_auxdata omap_auxdata_lookup[] __initdata = {
+#ifdef CONFIG_ARCH_OMAP3
+ OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002030, "48002030.pinmux", &pcs_pdata),
+ OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002a00, "48002a00.pinmux", &pcs_pdata),
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+ OF_DEV_AUXDATA("ti,omap4-padconf", 0x4a100040, "4a100040.pinmux", &pcs_pdata),
+ OF_DEV_AUXDATA("ti,omap4-padconf", 0x4a31e040, "4a31e040.pinmux", &pcs_pdata),
+#endif
+ { /* sentinel */ },
+};
+
+static struct pdata_init pdata_quirks[] __initdata = {
+#ifdef CONFIG_ARCH_OMAP3
+ { "nokia,omap3-n9", hsmmc2_internal_input_clk, },
+ { "nokia,omap3-n950", hsmmc2_internal_input_clk, },
+ { "isee,omap3-igep0020", omap3_igep0020_legacy_init, },
+ { "ti,omap3-evm-37xx", omap3_evm_legacy_init, },
+ { "ti,omap3-zoom3", omap3_zoom_legacy_init, },
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+ { "ti,omap4-sdp", omap4_sdp_legacy_init, },
+ { "ti,omap4-panda", omap4_panda_legacy_init, },
+#endif
+#ifdef CONFIG_SOC_OMAP5
+ { "ti,omap5-uevm", omap5_uevm_legacy_init, },
+#endif
+ { /* sentinel */ },
+};
+
+void __init pdata_quirks_init(struct of_device_id *omap_dt_match_table)
+{
+ struct pdata_init *quirks = pdata_quirks;
+
+ omap_sdrc_init(NULL, NULL);
+ of_platform_populate(NULL, omap_dt_match_table,
+ omap_auxdata_lookup, NULL);
+
+ while (quirks->compatible) {
+ if (of_machine_is_compatible(quirks->compatible)) {
+ if (quirks->fn)
+ quirks->fn();
+ break;
+ }
+ quirks++;
+ }
+}
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index e742118fcfd2..e1b41416fbf1 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -13,7 +13,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/err.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/export.h>
#include <linux/suspend.h>
#include <linux/cpu.h>
@@ -131,7 +131,7 @@ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
{
struct voltagedomain *voltdm;
struct clk *clk;
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long freq, bootup_volt;
struct device *dev;
@@ -172,7 +172,7 @@ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
clk_put(clk);
rcu_read_lock();
- opp = opp_find_freq_ceil(dev, &freq);
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
if (IS_ERR(opp)) {
rcu_read_unlock();
pr_err("%s: unable to find boot up OPP for vdd_%s\n",
@@ -180,7 +180,7 @@ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
goto exit;
}
- bootup_volt = opp_get_voltage(opp);
+ bootup_volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
if (!bootup_volt) {
pr_err("%s: unable to find voltage corresponding to the bootup OPP for vdd_%s\n",
@@ -266,7 +266,12 @@ static void __init omap4_init_voltages(void)
static inline void omap_init_cpufreq(void)
{
- struct platform_device_info devinfo = { .name = "omap-cpufreq", };
+ struct platform_device_info devinfo = { };
+
+ if (!of_have_populated_dt())
+ devinfo.name = "omap-cpufreq";
+ else
+ devinfo.name = "cpufreq-cpu0";
platform_device_register_full(&devinfo);
}
@@ -300,10 +305,11 @@ int __init omap2_common_pm_late_init(void)
/* Smartreflex device init */
omap_devinit_smartreflex();
- /* cpufreq dummy device instantiation */
- omap_init_cpufreq();
}
+ /* cpufreq dummy device instantiation */
+ omap_init_cpufreq();
+
#ifdef CONFIG_SUSPEND
suspend_set_ops(&omap_pm_ops);
#endif
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index ce956b0a7ba4..8c0759496c8d 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -62,16 +62,6 @@ static struct clockdomain *dsp_clkdm, *mpu_clkdm, *wkup_clkdm, *gfx_clkdm;
static struct clk *osc_ck, *emul_ck;
-static int omap2_fclks_active(void)
-{
- u32 f1, f2;
-
- f1 = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
- f2 = omap2_cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2);
-
- return (f1 | f2) ? 1 : 0;
-}
-
static int omap2_enter_full_retention(void)
{
u32 l;
@@ -142,17 +132,7 @@ static int sti_console_enabled;
static int omap2_allow_mpu_retention(void)
{
- u32 l;
-
- /* Check for MMC, UART2, UART1, McSPI2, McSPI1 and DSS1. */
- l = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
- if (l & (OMAP2420_EN_MMC_MASK | OMAP24XX_EN_UART2_MASK |
- OMAP24XX_EN_UART1_MASK | OMAP24XX_EN_MCSPI2_MASK |
- OMAP24XX_EN_MCSPI1_MASK | OMAP24XX_EN_DSS1_MASK))
- return 0;
- /* Check for UART3. */
- l = omap2_cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2);
- if (l & OMAP24XX_EN_UART3_MASK)
+ if (!omap2xxx_cm_mpu_retention_allowed())
return 0;
if (sti_console_enabled)
return 0;
@@ -188,7 +168,7 @@ static void omap2_enter_mpu_retention(void)
static int omap2_can_sleep(void)
{
- if (omap2_fclks_active())
+ if (omap2xxx_cm_fclks_active())
return 0;
if (__clk_is_enabled(osc_ck))
return 0;
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 5a2d8034c8de..93b80e5da8d4 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -430,8 +430,7 @@ static void __init omap3_iva_idle(void)
OMAP3430_IVA2_MOD, CM_FCLKEN);
/* Set IVA2 boot mode to 'idle' */
- omap_ctrl_writel(OMAP3_IVA2_BOOTMOD_IDLE,
- OMAP343X_CONTROL_IVA2_BOOTMOD);
+ omap3_ctrl_set_iva_bootmode_idle();
/* Un-reset IVA2 */
omap2_prm_write_mod_reg(0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
diff --git a/arch/arm/mach-omap2/powerdomain.h b/arch/arm/mach-omap2/powerdomain.h
index baf3d8bf6bea..da5a59ae77b6 100644
--- a/arch/arm/mach-omap2/powerdomain.h
+++ b/arch/arm/mach-omap2/powerdomain.h
@@ -257,6 +257,7 @@ extern void am33xx_powerdomains_init(void);
extern void omap44xx_powerdomains_init(void);
extern void omap54xx_powerdomains_init(void);
extern void dra7xx_powerdomains_init(void);
+void am43xx_powerdomains_init(void);
extern struct pwrdm_ops omap2_pwrdm_operations;
extern struct pwrdm_ops omap3_pwrdm_operations;
diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
new file mode 100644
index 000000000000..95fee54c38ab
--- /dev/null
+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
@@ -0,0 +1,136 @@
+/*
+ * AM43xx Power domains framework
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include "powerdomain.h"
+
+#include "prcm-common.h"
+#include "prcm44xx.h"
+#include "prcm43xx.h"
+
+static struct powerdomain gfx_43xx_pwrdm = {
+ .name = "gfx_pwrdm",
+ .voltdm = { .name = "core" },
+ .prcm_offs = AM43XX_PRM_GFX_INST,
+ .prcm_partition = AM43XX_PRM_PARTITION,
+ .pwrsts = PWRSTS_OFF_ON,
+ .banks = 1,
+ .pwrsts_mem_on = {
+ [0] = PWRSTS_ON, /* gfx_mem */
+ },
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+static struct powerdomain mpu_43xx_pwrdm = {
+ .name = "mpu_pwrdm",
+ .voltdm = { .name = "mpu" },
+ .prcm_offs = AM43XX_PRM_MPU_INST,
+ .prcm_partition = AM43XX_PRM_PARTITION,
+ .pwrsts = PWRSTS_OFF_RET_ON,
+ .pwrsts_logic_ret = PWRSTS_OFF_RET,
+ .banks = 3,
+ .pwrsts_mem_ret = {
+ [0] = PWRSTS_OFF_RET, /* mpu_l1 */
+ [1] = PWRSTS_OFF_RET, /* mpu_l2 */
+ [2] = PWRSTS_OFF_RET, /* mpu_ram */
+ },
+ .pwrsts_mem_on = {
+ [0] = PWRSTS_ON, /* mpu_l1 */
+ [1] = PWRSTS_ON, /* mpu_l2 */
+ [2] = PWRSTS_ON, /* mpu_ram */
+ },
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+static struct powerdomain rtc_43xx_pwrdm = {
+ .name = "rtc_pwrdm",
+ .voltdm = { .name = "rtc" },
+ .prcm_offs = AM43XX_PRM_RTC_INST,
+ .prcm_partition = AM43XX_PRM_PARTITION,
+ .pwrsts = PWRSTS_ON,
+};
+
+static struct powerdomain wkup_43xx_pwrdm = {
+ .name = "wkup_pwrdm",
+ .voltdm = { .name = "core" },
+ .prcm_offs = AM43XX_PRM_WKUP_INST,
+ .prcm_partition = AM43XX_PRM_PARTITION,
+ .pwrsts = PWRSTS_ON,
+ .banks = 1,
+ .pwrsts_mem_on = {
+ [0] = PWRSTS_ON, /* debugss_mem */
+ },
+};
+
+static struct powerdomain tamper_43xx_pwrdm = {
+ .name = "tamper_pwrdm",
+ .voltdm = { .name = "tamper" },
+ .prcm_offs = AM43XX_PRM_TAMPER_INST,
+ .prcm_partition = AM43XX_PRM_PARTITION,
+ .pwrsts = PWRSTS_ON,
+};
+
+static struct powerdomain cefuse_43xx_pwrdm = {
+ .name = "cefuse_pwrdm",
+ .voltdm = { .name = "core" },
+ .prcm_offs = AM43XX_PRM_CEFUSE_INST,
+ .prcm_partition = AM43XX_PRM_PARTITION,
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+static struct powerdomain per_43xx_pwrdm = {
+ .name = "per_pwrdm",
+ .voltdm = { .name = "core" },
+ .prcm_offs = AM43XX_PRM_PER_INST,
+ .prcm_partition = AM43XX_PRM_PARTITION,
+ .pwrsts = PWRSTS_OFF_RET_ON,
+ .pwrsts_logic_ret = PWRSTS_OFF_RET,
+ .banks = 4,
+ .pwrsts_mem_ret = {
+ [0] = PWRSTS_OFF_RET, /* icss_mem */
+ [1] = PWRSTS_OFF_RET, /* per_mem */
+ [2] = PWRSTS_OFF_RET, /* ram1_mem */
+ [3] = PWRSTS_OFF_RET, /* ram2_mem */
+ },
+ .pwrsts_mem_on = {
+ [0] = PWRSTS_ON, /* icss_mem */
+ [1] = PWRSTS_ON, /* per_mem */
+ [2] = PWRSTS_ON, /* ram1_mem */
+ [3] = PWRSTS_ON, /* ram2_mem */
+ },
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+static struct powerdomain *powerdomains_am43xx[] __initdata = {
+ &gfx_43xx_pwrdm,
+ &mpu_43xx_pwrdm,
+ &rtc_43xx_pwrdm,
+ &wkup_43xx_pwrdm,
+ &tamper_43xx_pwrdm,
+ &cefuse_43xx_pwrdm,
+ &per_43xx_pwrdm,
+ NULL
+};
+
+static int am43xx_check_vcvp(void)
+{
+ return 0;
+}
+
+void __init am43xx_powerdomains_init(void)
+{
+ omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
+ pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
+ pwrdm_register_pwrdms(powerdomains_am43xx);
+ pwrdm_complete_init();
+}
diff --git a/arch/arm/mach-omap2/prcm43xx.h b/arch/arm/mach-omap2/prcm43xx.h
new file mode 100644
index 000000000000..7785be984edd
--- /dev/null
+++ b/arch/arm/mach-omap2/prcm43xx.h
@@ -0,0 +1,146 @@
+/*
+ * AM43x PRCM defines
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_PRCM_43XX_H
+#define __ARCH_ARM_MACH_OMAP2_PRCM_43XX_H
+
+#define AM43XX_PRM_PARTITION 1
+#define AM43XX_CM_PARTITION 1
+
+/* PRM instances */
+#define AM43XX_PRM_OCP_SOCKET_INST 0x0000
+#define AM43XX_PRM_MPU_INST 0x0300
+#define AM43XX_PRM_GFX_INST 0x0400
+#define AM43XX_PRM_RTC_INST 0x0500
+#define AM43XX_PRM_TAMPER_INST 0x0600
+#define AM43XX_PRM_CEFUSE_INST 0x0700
+#define AM43XX_PRM_PER_INST 0x0800
+#define AM43XX_PRM_WKUP_INST 0x2000
+#define AM43XX_PRM_DEVICE_INST 0x4000
+
+/* RM RSTCTRL offsets */
+#define AM43XX_RM_PER_RSTCTRL_OFFSET 0x0010
+#define AM43XX_RM_GFX_RSTCTRL_OFFSET 0x0010
+#define AM43XX_RM_WKUP_RSTCTRL_OFFSET 0x0010
+
+/* RM RSTST offsets */
+#define AM43XX_RM_GFX_RSTST_OFFSET 0x0014
+#define AM43XX_RM_WKUP_RSTST_OFFSET 0x0014
+
+/* CM instances */
+#define AM43XX_CM_WKUP_INST 0x2800
+#define AM43XX_CM_DEVICE_INST 0x4100
+#define AM43XX_CM_DPLL_INST 0x4200
+#define AM43XX_CM_MPU_INST 0x8300
+#define AM43XX_CM_GFX_INST 0x8400
+#define AM43XX_CM_RTC_INST 0x8500
+#define AM43XX_CM_TAMPER_INST 0x8600
+#define AM43XX_CM_CEFUSE_INST 0x8700
+#define AM43XX_CM_PER_INST 0x8800
+
+/* CD offsets */
+#define AM43XX_CM_WKUP_L3_AON_CDOFFS 0x0000
+#define AM43XX_CM_WKUP_L3S_TSC_CDOFFS 0x0100
+#define AM43XX_CM_WKUP_L4_WKUP_AON_CDOFFS 0x0200
+#define AM43XX_CM_WKUP_WKUP_CDOFFS 0x0300
+#define AM43XX_CM_MPU_MPU_CDOFFS 0x0000
+#define AM43XX_CM_GFX_GFX_L3_CDOFFS 0x0000
+#define AM43XX_CM_RTC_RTC_CDOFFS 0x0000
+#define AM43XX_CM_TAMPER_TAMPER_CDOFFS 0x0000
+#define AM43XX_CM_CEFUSE_CEFUSE_CDOFFS 0x0000
+#define AM43XX_CM_PER_L3_CDOFFS 0x0000
+#define AM43XX_CM_PER_L3S_CDOFFS 0x0200
+#define AM43XX_CM_PER_ICSS_CDOFFS 0x0300
+#define AM43XX_CM_PER_L4LS_CDOFFS 0x0400
+#define AM43XX_CM_PER_EMIF_CDOFFS 0x0700
+#define AM43XX_CM_PER_DSS_CDOFFS 0x0a00
+#define AM43XX_CM_PER_CPSW_CDOFFS 0x0b00
+#define AM43XX_CM_PER_OCPWP_L3_CDOFFS 0x0c00
+
+/* CLK CTRL offsets */
+#define AM43XX_CM_PER_UART1_CLKCTRL_OFFSET 0x0580
+#define AM43XX_CM_PER_UART2_CLKCTRL_OFFSET 0x0588
+#define AM43XX_CM_PER_UART3_CLKCTRL_OFFSET 0x0590
+#define AM43XX_CM_PER_UART4_CLKCTRL_OFFSET 0x0598
+#define AM43XX_CM_PER_UART5_CLKCTRL_OFFSET 0x05a0
+#define AM43XX_CM_PER_DCAN0_CLKCTRL_OFFSET 0x0428
+#define AM43XX_CM_PER_DCAN1_CLKCTRL_OFFSET 0x0430
+#define AM43XX_CM_PER_ELM_CLKCTRL_OFFSET 0x0468
+#define AM43XX_CM_PER_EPWMSS0_CLKCTRL_OFFSET 0x0438
+#define AM43XX_CM_PER_EPWMSS1_CLKCTRL_OFFSET 0x0440
+#define AM43XX_CM_PER_EPWMSS2_CLKCTRL_OFFSET 0x0448
+#define AM43XX_CM_PER_GPIO1_CLKCTRL_OFFSET 0x0478
+#define AM43XX_CM_PER_GPIO2_CLKCTRL_OFFSET 0x0480
+#define AM43XX_CM_PER_GPIO3_CLKCTRL_OFFSET 0x0488
+#define AM43XX_CM_PER_I2C1_CLKCTRL_OFFSET 0x04a8
+#define AM43XX_CM_PER_I2C2_CLKCTRL_OFFSET 0x04b0
+#define AM43XX_CM_PER_MAILBOX0_CLKCTRL_OFFSET 0x04b8
+#define AM43XX_CM_PER_MMC0_CLKCTRL_OFFSET 0x04c0
+#define AM43XX_CM_PER_MMC1_CLKCTRL_OFFSET 0x04c8
+#define AM43XX_CM_PER_SPI0_CLKCTRL_OFFSET 0x0500
+#define AM43XX_CM_PER_SPI1_CLKCTRL_OFFSET 0x0508
+#define AM43XX_CM_PER_SPINLOCK_CLKCTRL_OFFSET 0x0528
+#define AM43XX_CM_PER_TIMER2_CLKCTRL_OFFSET 0x0530
+#define AM43XX_CM_PER_TIMER3_CLKCTRL_OFFSET 0x0538
+#define AM43XX_CM_PER_TIMER4_CLKCTRL_OFFSET 0x0540
+#define AM43XX_CM_PER_TIMER5_CLKCTRL_OFFSET 0x0548
+#define AM43XX_CM_PER_TIMER6_CLKCTRL_OFFSET 0x0550
+#define AM43XX_CM_PER_TIMER7_CLKCTRL_OFFSET 0x0558
+#define AM43XX_CM_WKUP_WKUP_M3_CLKCTRL_OFFSET 0x0228
+#define AM43XX_CM_WKUP_CONTROL_CLKCTRL_OFFSET 0x0360
+#define AM43XX_CM_WKUP_SMARTREFLEX0_CLKCTRL_OFFSET 0x0350
+#define AM43XX_CM_WKUP_SMARTREFLEX1_CLKCTRL_OFFSET 0x0358
+#define AM43XX_CM_WKUP_UART0_CLKCTRL_OFFSET 0x0348
+#define AM43XX_CM_WKUP_TIMER1_CLKCTRL_OFFSET 0x0328
+#define AM43XX_CM_WKUP_I2C0_CLKCTRL_OFFSET 0x0340
+#define AM43XX_CM_WKUP_GPIO0_CLKCTRL_OFFSET 0x0368
+#define AM43XX_CM_WKUP_ADC_TSC_CLKCTRL_OFFSET 0x0120
+#define AM43XX_CM_WKUP_WDT1_CLKCTRL_OFFSET 0x0338
+#define AM43XX_CM_WKUP_L4WKUP_CLKCTRL_OFFSET 0x0220
+#define AM43XX_CM_RTC_RTC_CLKCTRL_OFFSET 0x0020
+#define AM43XX_CM_PER_MMC2_CLKCTRL_OFFSET 0x0248
+#define AM43XX_CM_PER_QSPI_CLKCTRL_OFFSET 0x0258
+#define AM43XX_CM_PER_GPMC_CLKCTRL_OFFSET 0x0220
+#define AM43XX_CM_PER_MCASP0_CLKCTRL_OFFSET 0x0238
+#define AM43XX_CM_PER_MCASP1_CLKCTRL_OFFSET 0x0240
+#define AM43XX_CM_PER_L4LS_CLKCTRL_OFFSET 0x0420
+#define AM43XX_CM_PER_L3_CLKCTRL_OFFSET 0x0020
+#define AM43XX_CM_PER_TPCC_CLKCTRL_OFFSET 0x0078
+#define AM43XX_CM_PER_TPTC0_CLKCTRL_OFFSET 0x0080
+#define AM43XX_CM_PER_TPTC1_CLKCTRL_OFFSET 0x0088
+#define AM43XX_CM_PER_TPTC2_CLKCTRL_OFFSET 0x0090
+#define AM43XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET 0x0b20
+#define AM43XX_CM_PER_PRUSS_CLKCTRL_OFFSET 0x0320
+#define AM43XX_CM_GFX_GFX_CLKCTRL_OFFSET 0x0020
+#define AM43XX_CM_PER_L4HS_CLKCTRL_OFFSET 0x00a0
+#define AM43XX_CM_MPU_MPU_CLKCTRL_OFFSET 0x0020
+#define AM43XX_CM_PER_L3_INSTR_CLKCTRL_OFFSET 0x0040
+#define AM43XX_CM_PER_OCMCRAM_CLKCTRL_OFFSET 0x0050
+#define AM43XX_CM_PER_SHA0_CLKCTRL_OFFSET 0x0058
+#define AM43XX_CM_PER_AES0_CLKCTRL_OFFSET 0x0028
+#define AM43XX_CM_PER_TIMER8_CLKCTRL_OFFSET 0x0560
+#define AM43XX_CM_PER_TIMER9_CLKCTRL_OFFSET 0x0568
+#define AM43XX_CM_PER_TIMER10_CLKCTRL_OFFSET 0x0570
+#define AM43XX_CM_PER_TIMER11_CLKCTRL_OFFSET 0x0578
+#define AM43XX_CM_WKUP_SYNCTIMER_CLKCTRL_OFFSET 0x0230
+#define AM43XX_CM_PER_EPWMSS3_CLKCTRL_OFFSET 0x0450
+#define AM43XX_CM_PER_EPWMSS4_CLKCTRL_OFFSET 0x0458
+#define AM43XX_CM_PER_EPWMSS5_CLKCTRL_OFFSET 0x0460
+#define AM43XX_CM_PER_SPI2_CLKCTRL_OFFSET 0x0510
+#define AM43XX_CM_PER_SPI3_CLKCTRL_OFFSET 0x0518
+#define AM43XX_CM_PER_SPI4_CLKCTRL_OFFSET 0x0520
+#define AM43XX_CM_PER_GPIO4_CLKCTRL_OFFSET 0x0490
+#define AM43XX_CM_PER_GPIO5_CLKCTRL_OFFSET 0x0498
+#define AM43XX_CM_PER_USB_OTG_SS0_CLKCTRL_OFFSET 0x0260
+#define AM43XX_CM_PER_USBPHYOCP2SCP0_CLKCTRL_OFFSET 0x05B8
+#define AM43XX_CM_PER_USB_OTG_SS1_CLKCTRL_OFFSET 0x0268
+#define AM43XX_CM_PER_USBPHYOCP2SCP1_CLKCTRL_OFFSET 0x05C0
+
+#endif
diff --git a/arch/arm/mach-omap2/prm3xxx.h b/arch/arm/mach-omap2/prm3xxx.h
index 277f71794e61..f8eb83323b1a 100644
--- a/arch/arm/mach-omap2/prm3xxx.h
+++ b/arch/arm/mach-omap2/prm3xxx.h
@@ -144,7 +144,13 @@ extern u32 omap3_prm_vcvp_read(u8 offset);
extern void omap3_prm_vcvp_write(u32 val, u8 offset);
extern u32 omap3_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset);
-extern void omap3xxx_prm_reconfigure_io_chain(void);
+#ifdef CONFIG_ARCH_OMAP3
+void omap3xxx_prm_reconfigure_io_chain(void);
+#else
+static inline void omap3xxx_prm_reconfigure_io_chain(void)
+{
+}
+#endif
/* PRM interrupt-related functions */
extern void omap3xxx_prm_read_pending_irqs(unsigned long *events);
diff --git a/arch/arm/mach-omap2/prm44xx_54xx.h b/arch/arm/mach-omap2/prm44xx_54xx.h
index 7cd22abb8f15..a085d9cc1f5d 100644
--- a/arch/arm/mach-omap2/prm44xx_54xx.h
+++ b/arch/arm/mach-omap2/prm44xx_54xx.h
@@ -42,7 +42,13 @@ extern u32 omap4_prm_vcvp_read(u8 offset);
extern void omap4_prm_vcvp_write(u32 val, u8 offset);
extern u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset);
-extern void omap44xx_prm_reconfigure_io_chain(void);
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5)
+void omap44xx_prm_reconfigure_io_chain(void);
+#else
+static inline void omap44xx_prm_reconfigure_io_chain(void)
+{
+}
+#endif
/* PRM interrupt-related functions */
extern void omap44xx_prm_read_pending_irqs(unsigned long *events);
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index 228b850e632f..a2e1174ad1b6 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include "soc.h"
#include "prm2xxx_3xxx.h"
#include "prm2xxx.h"
#include "prm3xxx.h"
@@ -322,6 +323,16 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
prcm_irq_chips[i] = gc;
}
+ if (of_have_populated_dt()) {
+ int irq = omap_prcm_event_to_irq("io");
+ if (cpu_is_omap34xx())
+ omap_pcs_legacy_init(irq,
+ omap3xxx_prm_reconfigure_io_chain);
+ else
+ omap_pcs_legacy_init(irq,
+ omap44xx_prm_reconfigure_io_chain);
+ }
+
return 0;
err:
diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h
index 4588df1447ed..076bd90a6ce0 100644
--- a/arch/arm/mach-omap2/soc.h
+++ b/arch/arm/mach-omap2/soc.h
@@ -455,9 +455,7 @@ IS_OMAP_TYPE(3430, 0x3430)
#define OMAP4470_REV_ES1_0 (OMAP447X_CLASS | (0x10 << 8))
#define OMAP54XX_CLASS 0x54000054
-#define OMAP5430_REV_ES1_0 (OMAP54XX_CLASS | (0x30 << 16) | (0x10 << 8))
#define OMAP5430_REV_ES2_0 (OMAP54XX_CLASS | (0x30 << 16) | (0x20 << 8))
-#define OMAP5432_REV_ES1_0 (OMAP54XX_CLASS | (0x32 << 16) | (0x10 << 8))
#define OMAP5432_REV_ES2_0 (OMAP54XX_CLASS | (0x32 << 16) | (0x20 << 8))
void omap2xxx_check_revision(void);
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index ead48fa5715e..3ca81e0ada5e 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -55,6 +55,7 @@
#include "soc.h"
#include "common.h"
#include "powerdomain.h"
+#include "omap-secure.h"
#define REALTIME_COUNTER_BASE 0x48243200
#define INCREMENTER_NUMERATOR_OFFSET 0x10
@@ -66,6 +67,15 @@
static struct omap_dm_timer clkev;
static struct clock_event_device clockevent_gpt;
+#ifdef CONFIG_SOC_HAS_REALTIME_COUNTER
+static unsigned long arch_timer_freq;
+
+void set_cntfreq(void)
+{
+ omap_smc1(OMAP5_DRA7_MON_SET_CNTFRQ_INDEX, arch_timer_freq);
+}
+#endif
+
static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = &clockevent_gpt;
@@ -78,7 +88,7 @@ static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id)
static struct irqaction omap2_gp_timer_irq = {
.name = "gp_timer",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = omap2_gp_timer_interrupt,
};
@@ -515,6 +525,10 @@ static void __init realtime_counter_init(void)
num = 8;
den = 25;
break;
+ case 20000000:
+ num = 192;
+ den = 625;
+ break;
case 2600000:
num = 384;
den = 1625;
@@ -542,6 +556,9 @@ static void __init realtime_counter_init(void)
reg |= den;
__raw_writel(reg, base + INCREMENTER_DENUMERATOR_RELOAD_OFFSET);
+ arch_timer_freq = (rate / den) * num;
+ set_cntfreq();
+
iounmap(base);
}
#else
diff --git a/arch/arm/mach-orion5x/include/mach/timex.h b/arch/arm/mach-orion5x/include/mach/timex.h
deleted file mode 100644
index 4c69820e0810..000000000000
--- a/arch/arm/mach-orion5x/include/mach/timex.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * arch/arm/mach-orion5x/include/mach/timex.h
- *
- * Tzachi Perelstein <tzachi@marvell.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#define CLOCK_TICK_RATE (100 * HZ)
diff --git a/arch/arm/mach-prima2/common.c b/arch/arm/mach-prima2/common.c
index e110b6d4ae8c..d49aff74de98 100644
--- a/arch/arm/mach-prima2/common.c
+++ b/arch/arm/mach-prima2/common.c
@@ -6,7 +6,6 @@
* Licensed under GPLv2 or later.
*/
-#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/sizes.h>
@@ -21,13 +20,6 @@ void __init sirfsoc_init_late(void)
sirfsoc_pm_init();
}
-static __init void sirfsoc_init_time(void)
-{
- /* initialize clocking early, we want to set the OS timer */
- sirfsoc_of_clk_init();
- clocksource_of_init();
-}
-
static __init void sirfsoc_map_io(void)
{
sirfsoc_map_lluart();
@@ -43,7 +35,6 @@ static const char *atlas6_dt_match[] __initdata = {
DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)")
/* Maintainer: Barry Song <baohua.song@csr.com> */
.map_io = sirfsoc_map_io,
- .init_time = sirfsoc_init_time,
.init_late = sirfsoc_init_late,
.dt_compat = atlas6_dt_match,
.restart = sirfsoc_restart,
@@ -59,7 +50,6 @@ static const char *prima2_dt_match[] __initdata = {
DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)")
/* Maintainer: Barry Song <baohua.song@csr.com> */
.map_io = sirfsoc_map_io,
- .init_time = sirfsoc_init_time,
.dma_zone_size = SZ_256M,
.init_late = sirfsoc_init_late,
.dt_compat = prima2_dt_match,
@@ -77,7 +67,6 @@ DT_MACHINE_START(MARCO_DT, "Generic MARCO (Flattened Device Tree)")
/* Maintainer: Barry Song <baohua.song@csr.com> */
.smp = smp_ops(sirfsoc_smp_ops),
.map_io = sirfsoc_map_io,
- .init_time = sirfsoc_init_time,
.init_late = sirfsoc_init_late,
.dt_compat = marco_dt_match,
.restart = sirfsoc_restart,
diff --git a/arch/arm/mach-prima2/common.h b/arch/arm/mach-prima2/common.h
index a6304858474a..4b768060a858 100644
--- a/arch/arm/mach-prima2/common.h
+++ b/arch/arm/mach-prima2/common.h
@@ -23,7 +23,6 @@ extern void sirfsoc_secondary_startup(void);
extern void sirfsoc_cpu_die(unsigned int cpu);
extern void __init sirfsoc_of_irq_init(void);
-extern void __init sirfsoc_of_clk_init(void);
extern void sirfsoc_restart(enum reboot_mode, const char *);
extern asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs);
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index a8427115ee07..96100dbf5a2e 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -615,14 +615,12 @@ endmenu
config PXA25x
bool
select CPU_XSCALE
- select CPU_FREQ_TABLE if CPU_FREQ
help
Select code specific to PXA21x/25x/26x variants
config PXA27x
bool
select CPU_XSCALE
- select CPU_FREQ_TABLE if CPU_FREQ
help
Select code specific to PXA27x variants
@@ -635,7 +633,6 @@ config CPU_PXA26x
config PXA3xx
bool
select CPU_XSC3
- select CPU_FREQ_TABLE if CPU_FREQ
help
Select code specific to PXA3xx variants
diff --git a/arch/arm/mach-pxa/include/mach/timex.h b/arch/arm/mach-pxa/include/mach/timex.h
deleted file mode 100644
index af6760a50e1a..000000000000
--- a/arch/arm/mach-pxa/include/mach/timex.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * arch/arm/mach-pxa/include/mach/timex.h
- *
- * Author: Nicolas Pitre
- * Created: Jun 15, 2001
- * Copyright: MontaVista Software Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/* Various drivers are still using the constant of CLOCK_TICK_RATE, for
- * those drivers to at least work, the definition is provided here.
- *
- * NOTE: this is no longer accurate when multiple processors and boards
- * are selected, newer drivers should not depend on this any more. Use
- * either the clocksource/clockevent or get this at run-time by calling
- * get_clock_tick_rate() (as defined in generic.c).
- */
-
-#if defined(CONFIG_PXA25x)
-/* PXA250/210 timer base */
-#define CLOCK_TICK_RATE 3686400
-#elif defined(CONFIG_PXA27x)
-/* PXA27x timer base */
-#ifdef CONFIG_MACH_MAINSTONE
-#define CLOCK_TICK_RATE 3249600
-#else
-#define CLOCK_TICK_RATE 3250000
-#endif
-#else
-#define CLOCK_TICK_RATE 3250000
-#endif
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c
index 62aea3e835f3..01de542432a6 100644
--- a/arch/arm/mach-pxa/stargate2.c
+++ b/arch/arm/mach-pxa/stargate2.c
@@ -27,7 +27,7 @@
#include <linux/i2c/pxa-i2c.h>
#include <linux/i2c/pcf857x.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
#include <linux/smc91x.h>
#include <linux/gpio.h>
#include <linux/leds.h>
diff --git a/arch/arm/mach-realview/include/mach/timex.h b/arch/arm/mach-realview/include/mach/timex.h
deleted file mode 100644
index 4eeb069373c2..000000000000
--- a/arch/arm/mach-realview/include/mach/timex.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * arch/arm/mach-realview/include/mach/timex.h
- *
- * RealView architecture timex specifications
- *
- * Copyright (C) 2003 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#define CLOCK_TICK_RATE (50000000 / 16)
diff --git a/arch/arm/mach-rockchip/Kconfig b/arch/arm/mach-rockchip/Kconfig
index 25ee12b21f01..cf073dea5784 100644
--- a/arch/arm/mach-rockchip/Kconfig
+++ b/arch/arm/mach-rockchip/Kconfig
@@ -5,12 +5,13 @@ config ARCH_ROCKCHIP
select ARCH_REQUIRE_GPIOLIB
select ARM_GIC
select CACHE_L2X0
- select HAVE_ARM_TWD if LOCAL_TIMERS
+ select HAVE_ARM_TWD if SMP
select HAVE_SMP
- select LOCAL_TIMERS if SMP
select COMMON_CLK
select GENERIC_CLOCKEVENTS
select DW_APB_TIMER_OF
+ select ARM_GLOBAL_TIMER
+ select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
help
Support for Rockchip's Cortex-A9 Single-to-Quad-Core-SoCs
containing the RK2928, RK30xx and RK31xx series.
diff --git a/arch/arm/mach-rockchip/rockchip.c b/arch/arm/mach-rockchip/rockchip.c
index 724d2d81f976..82c0b0709712 100644
--- a/arch/arm/mach-rockchip/rockchip.c
+++ b/arch/arm/mach-rockchip/rockchip.c
@@ -19,18 +19,10 @@
#include <linux/init.h>
#include <linux/of_platform.h>
#include <linux/irqchip.h>
-#include <linux/dw_apb_timer.h>
-#include <linux/clk-provider.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/hardware/cache-l2x0.h>
-static void __init rockchip_timer_init(void)
-{
- of_clk_init(NULL);
- clocksource_of_init();
-}
-
static void __init rockchip_dt_init(void)
{
l2x0_of_init(0, ~0UL);
@@ -47,6 +39,5 @@ static const char * const rockchip_board_dt_compat[] = {
DT_MACHINE_START(ROCKCHIP_DT, "Rockchip Cortex-A9 (Device Tree)")
.init_machine = rockchip_dt_init,
- .init_time = rockchip_timer_init,
.dt_compat = rockchip_board_dt_compat,
MACHINE_END
diff --git a/arch/arm/mach-rpc/include/mach/timex.h b/arch/arm/mach-rpc/include/mach/timex.h
deleted file mode 100644
index dd75e7387bbe..000000000000
--- a/arch/arm/mach-rpc/include/mach/timex.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * arch/arm/mach-rpc/include/mach/timex.h
- *
- * Copyright (C) 1997, 1998 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * RiscPC architecture timex specifications
- */
-
-/*
- * On the RiscPC, the clock ticks at 2MHz.
- */
-#define CLOCK_TICK_RATE 2000000
-
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index dba2173e70f3..8f1d327e0cd1 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -28,6 +28,7 @@ config CPU_S3C2410
select CPU_ARM920T
select CPU_LLSERIAL_S3C2410
select S3C2410_CLOCK
+ select S3C2410_DMA if S3C24XX_DMA
select ARM_S3C2410_CPUFREQ if ARM_S3C24XX_CPUFREQ
select S3C2410_PM if PM
select SAMSUNG_WDT_RESET
@@ -70,6 +71,7 @@ config CPU_S3C2442
select CPU_ARM920T
select CPU_LLSERIAL_S3C2440
select S3C2410_CLOCK
+ select S3C2410_DMA if S3C24XX_DMA
select S3C2410_PM if PM
help
Support for S3C2442 Samsung Mobile CPU based systems.
@@ -148,7 +150,6 @@ config S3C2410_DMA_DEBUG
config S3C2410_DMA
bool
depends on S3C24XX_DMA && (CPU_S3C2410 || CPU_S3C2442)
- default y if CPU_S3C2410 || CPU_S3C2442
help
DMA device selection for S3C2410 and compatible CPUs
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2412.c b/arch/arm/mach-s3c24xx/clock-s3c2412.c
index d8f253f2b486..11b3b28457bb 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2412.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2412.c
@@ -484,22 +484,22 @@ static struct clk init_clocks_disable[] = {
static struct clk init_clocks[] = {
{
- .name = "dma",
+ .name = "dma.0",
.parent = &clk_h,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_DMA0,
}, {
- .name = "dma",
+ .name = "dma.1",
.parent = &clk_h,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_DMA1,
}, {
- .name = "dma",
+ .name = "dma.2",
.parent = &clk_h,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_DMA2,
}, {
- .name = "dma",
+ .name = "dma.3",
.parent = &clk_h,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_DMA3,
diff --git a/arch/arm/mach-s3c24xx/common-s3c2443.c b/arch/arm/mach-s3c24xx/common-s3c2443.c
index f6b9f2ef01bd..65d3eef73090 100644
--- a/arch/arm/mach-s3c24xx/common-s3c2443.c
+++ b/arch/arm/mach-s3c24xx/common-s3c2443.c
@@ -438,32 +438,32 @@ static struct clk init_clocks_off[] = {
static struct clk init_clocks[] = {
{
- .name = "dma",
+ .name = "dma.0",
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_DMA0,
}, {
- .name = "dma",
+ .name = "dma.1",
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_DMA1,
}, {
- .name = "dma",
+ .name = "dma.2",
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_DMA2,
}, {
- .name = "dma",
+ .name = "dma.3",
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_DMA3,
}, {
- .name = "dma",
+ .name = "dma.4",
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_DMA4,
}, {
- .name = "dma",
+ .name = "dma.5",
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_DMA5,
diff --git a/arch/arm/mach-s3c24xx/common.c b/arch/arm/mach-s3c24xx/common.c
index 457261c98433..4adaa4b43ffe 100644
--- a/arch/arm/mach-s3c24xx/common.c
+++ b/arch/arm/mach-s3c24xx/common.c
@@ -31,6 +31,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/platform_data/dma-s3c24xx.h>
#include <mach/hardware.h>
#include <mach/regs-clock.h>
@@ -44,6 +45,7 @@
#include <mach/regs-gpio.h>
#include <plat/regs-serial.h>
+#include <mach/dma.h>
#include <plat/cpu.h>
#include <plat/devs.h>
@@ -329,3 +331,207 @@ void __init_or_cpufreq s3c24xx_setup_clocks(unsigned long fclk,
clk_p.rate = pclk;
clk_f.rate = fclk;
}
+
+#if defined(CONFIG_CPU_S3C2410) || defined(CONFIG_CPU_S3C2412) || \
+ defined(CONFIG_CPU_S3C2440) || defined(CONFIG_CPU_S3C2442)
+static struct resource s3c2410_dma_resource[] = {
+ [0] = DEFINE_RES_MEM(S3C24XX_PA_DMA, S3C24XX_SZ_DMA),
+ [1] = DEFINE_RES_IRQ(IRQ_DMA0),
+ [2] = DEFINE_RES_IRQ(IRQ_DMA1),
+ [3] = DEFINE_RES_IRQ(IRQ_DMA2),
+ [4] = DEFINE_RES_IRQ(IRQ_DMA3),
+};
+#endif
+
+#if defined(CONFIG_CPU_S3C2410) || defined(CONFIG_CPU_S3C2442)
+static struct s3c24xx_dma_channel s3c2410_dma_channels[DMACH_MAX] = {
+ [DMACH_XD0] = { S3C24XX_DMA_AHB, true, S3C24XX_DMA_CHANREQ(0, 0), },
+ [DMACH_XD1] = { S3C24XX_DMA_AHB, true, S3C24XX_DMA_CHANREQ(0, 1), },
+ [DMACH_SDI] = { S3C24XX_DMA_APB, false, S3C24XX_DMA_CHANREQ(2, 0) |
+ S3C24XX_DMA_CHANREQ(2, 2) |
+ S3C24XX_DMA_CHANREQ(1, 3),
+ },
+ [DMACH_SPI0] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(3, 1), },
+ [DMACH_SPI1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(2, 3), },
+ [DMACH_UART0] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(1, 0), },
+ [DMACH_UART1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(1, 1), },
+ [DMACH_UART2] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(0, 3), },
+ [DMACH_TIMER] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(3, 0) |
+ S3C24XX_DMA_CHANREQ(3, 2) |
+ S3C24XX_DMA_CHANREQ(3, 3),
+ },
+ [DMACH_I2S_IN] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(2, 1) |
+ S3C24XX_DMA_CHANREQ(1, 2),
+ },
+ [DMACH_I2S_OUT] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(0, 2), },
+ [DMACH_USB_EP1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 0), },
+ [DMACH_USB_EP2] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 1), },
+ [DMACH_USB_EP3] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 2), },
+ [DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 3), },
+};
+
+static struct s3c24xx_dma_platdata s3c2410_dma_platdata = {
+ .num_phy_channels = 4,
+ .channels = s3c2410_dma_channels,
+ .num_channels = DMACH_MAX,
+};
+
+struct platform_device s3c2410_device_dma = {
+ .name = "s3c2410-dma",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(s3c2410_dma_resource),
+ .resource = s3c2410_dma_resource,
+ .dev = {
+ .platform_data = &s3c2410_dma_platdata,
+ },
+};
+#endif
+
+#ifdef CONFIG_CPU_S3C2412
+static struct s3c24xx_dma_channel s3c2412_dma_channels[DMACH_MAX] = {
+ [DMACH_XD0] = { S3C24XX_DMA_AHB, true, 17 },
+ [DMACH_XD1] = { S3C24XX_DMA_AHB, true, 18 },
+ [DMACH_SDI] = { S3C24XX_DMA_APB, false, 10 },
+ [DMACH_SPI0_RX] = { S3C24XX_DMA_APB, true, 1 },
+ [DMACH_SPI0_TX] = { S3C24XX_DMA_APB, true, 0 },
+ [DMACH_SPI1_RX] = { S3C24XX_DMA_APB, true, 3 },
+ [DMACH_SPI1_TX] = { S3C24XX_DMA_APB, true, 2 },
+ [DMACH_UART0] = { S3C24XX_DMA_APB, true, 19 },
+ [DMACH_UART1] = { S3C24XX_DMA_APB, true, 21 },
+ [DMACH_UART2] = { S3C24XX_DMA_APB, true, 23 },
+ [DMACH_UART0_SRC2] = { S3C24XX_DMA_APB, true, 20 },
+ [DMACH_UART1_SRC2] = { S3C24XX_DMA_APB, true, 22 },
+ [DMACH_UART2_SRC2] = { S3C24XX_DMA_APB, true, 24 },
+ [DMACH_TIMER] = { S3C24XX_DMA_APB, true, 9 },
+ [DMACH_I2S_IN] = { S3C24XX_DMA_APB, true, 5 },
+ [DMACH_I2S_OUT] = { S3C24XX_DMA_APB, true, 4 },
+ [DMACH_USB_EP1] = { S3C24XX_DMA_APB, true, 13 },
+ [DMACH_USB_EP2] = { S3C24XX_DMA_APB, true, 14 },
+ [DMACH_USB_EP3] = { S3C24XX_DMA_APB, true, 15 },
+ [DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, 16 },
+};
+
+static struct s3c24xx_dma_platdata s3c2412_dma_platdata = {
+ .num_phy_channels = 4,
+ .channels = s3c2412_dma_channels,
+ .num_channels = DMACH_MAX,
+};
+
+struct platform_device s3c2412_device_dma = {
+ .name = "s3c2412-dma",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(s3c2410_dma_resource),
+ .resource = s3c2410_dma_resource,
+ .dev = {
+ .platform_data = &s3c2412_dma_platdata,
+ },
+};
+#endif
+
+#if defined(CONFIG_CPU_S3C2440)
+static struct s3c24xx_dma_channel s3c2440_dma_channels[DMACH_MAX] = {
+ [DMACH_XD0] = { S3C24XX_DMA_AHB, true, S3C24XX_DMA_CHANREQ(0, 0), },
+ [DMACH_XD1] = { S3C24XX_DMA_AHB, true, S3C24XX_DMA_CHANREQ(0, 1), },
+ [DMACH_SDI] = { S3C24XX_DMA_APB, false, S3C24XX_DMA_CHANREQ(2, 0) |
+ S3C24XX_DMA_CHANREQ(6, 1) |
+ S3C24XX_DMA_CHANREQ(2, 2) |
+ S3C24XX_DMA_CHANREQ(1, 3),
+ },
+ [DMACH_SPI0] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(3, 1), },
+ [DMACH_SPI1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(2, 3), },
+ [DMACH_UART0] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(1, 0), },
+ [DMACH_UART1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(1, 1), },
+ [DMACH_UART2] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(0, 3), },
+ [DMACH_TIMER] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(3, 0) |
+ S3C24XX_DMA_CHANREQ(3, 2) |
+ S3C24XX_DMA_CHANREQ(3, 3),
+ },
+ [DMACH_I2S_IN] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(2, 1) |
+ S3C24XX_DMA_CHANREQ(1, 2),
+ },
+ [DMACH_I2S_OUT] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(5, 0) |
+ S3C24XX_DMA_CHANREQ(0, 2),
+ },
+ [DMACH_PCM_IN] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(6, 0) |
+ S3C24XX_DMA_CHANREQ(5, 2),
+ },
+ [DMACH_PCM_OUT] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(5, 1) |
+ S3C24XX_DMA_CHANREQ(6, 3),
+ },
+ [DMACH_MIC_IN] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(6, 2) |
+ S3C24XX_DMA_CHANREQ(5, 3),
+ },
+ [DMACH_USB_EP1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 0), },
+ [DMACH_USB_EP2] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 1), },
+ [DMACH_USB_EP3] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 2), },
+ [DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 3), },
+};
+
+static struct s3c24xx_dma_platdata s3c2440_dma_platdata = {
+ .num_phy_channels = 4,
+ .channels = s3c2440_dma_channels,
+ .num_channels = DMACH_MAX,
+};
+
+struct platform_device s3c2440_device_dma = {
+ .name = "s3c2410-dma",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(s3c2410_dma_resource),
+ .resource = s3c2410_dma_resource,
+ .dev = {
+ .platform_data = &s3c2440_dma_platdata,
+ },
+};
+#endif
+
+#if defined(CONFIG_CPUS_3C2443) || defined(CONFIG_CPU_S3C2416)
+static struct resource s3c2443_dma_resource[] = {
+ [0] = DEFINE_RES_MEM(S3C24XX_PA_DMA, S3C24XX_SZ_DMA),
+ [1] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA0),
+ [2] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA1),
+ [3] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA2),
+ [4] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA3),
+ [5] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA4),
+ [6] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA5),
+};
+
+static struct s3c24xx_dma_channel s3c2443_dma_channels[DMACH_MAX] = {
+ [DMACH_XD0] = { S3C24XX_DMA_AHB, true, 17 },
+ [DMACH_XD1] = { S3C24XX_DMA_AHB, true, 18 },
+ [DMACH_SDI] = { S3C24XX_DMA_APB, false, 10 },
+ [DMACH_SPI0_RX] = { S3C24XX_DMA_APB, true, 1 },
+ [DMACH_SPI0_TX] = { S3C24XX_DMA_APB, true, 0 },
+ [DMACH_SPI1_RX] = { S3C24XX_DMA_APB, true, 3 },
+ [DMACH_SPI1_TX] = { S3C24XX_DMA_APB, true, 2 },
+ [DMACH_UART0] = { S3C24XX_DMA_APB, true, 19 },
+ [DMACH_UART1] = { S3C24XX_DMA_APB, true, 21 },
+ [DMACH_UART2] = { S3C24XX_DMA_APB, true, 23 },
+ [DMACH_UART3] = { S3C24XX_DMA_APB, true, 25 },
+ [DMACH_UART0_SRC2] = { S3C24XX_DMA_APB, true, 20 },
+ [DMACH_UART1_SRC2] = { S3C24XX_DMA_APB, true, 22 },
+ [DMACH_UART2_SRC2] = { S3C24XX_DMA_APB, true, 24 },
+ [DMACH_UART3_SRC2] = { S3C24XX_DMA_APB, true, 26 },
+ [DMACH_TIMER] = { S3C24XX_DMA_APB, true, 9 },
+ [DMACH_I2S_IN] = { S3C24XX_DMA_APB, true, 5 },
+ [DMACH_I2S_OUT] = { S3C24XX_DMA_APB, true, 4 },
+ [DMACH_PCM_IN] = { S3C24XX_DMA_APB, true, 28 },
+ [DMACH_PCM_OUT] = { S3C24XX_DMA_APB, true, 27 },
+ [DMACH_MIC_IN] = { S3C24XX_DMA_APB, true, 29 },
+};
+
+static struct s3c24xx_dma_platdata s3c2443_dma_platdata = {
+ .num_phy_channels = 6,
+ .channels = s3c2443_dma_channels,
+ .num_channels = DMACH_MAX,
+};
+
+struct platform_device s3c2443_device_dma = {
+ .name = "s3c2443-dma",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(s3c2443_dma_resource),
+ .resource = s3c2443_dma_resource,
+ .dev = {
+ .platform_data = &s3c2443_dma_platdata,
+ },
+};
+#endif
diff --git a/arch/arm/mach-s3c24xx/common.h b/arch/arm/mach-s3c24xx/common.h
index 84b280654f4c..e46c10417216 100644
--- a/arch/arm/mach-s3c24xx/common.h
+++ b/arch/arm/mach-s3c24xx/common.h
@@ -109,4 +109,9 @@ extern void s3c2443_init_irq(void);
extern struct syscore_ops s3c24xx_irq_syscore_ops;
+extern struct platform_device s3c2410_device_dma;
+extern struct platform_device s3c2412_device_dma;
+extern struct platform_device s3c2440_device_dma;
+extern struct platform_device s3c2443_device_dma;
+
#endif /* __ARCH_ARM_MACH_S3C24XX_COMMON_H */
diff --git a/arch/arm/mach-s3c24xx/include/mach/timex.h b/arch/arm/mach-s3c24xx/include/mach/timex.h
deleted file mode 100644
index fe9ca1ffd51b..000000000000
--- a/arch/arm/mach-s3c24xx/include/mach/timex.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* arch/arm/mach-s3c2410/include/mach/timex.h
- *
- * Copyright (c) 2003-2005 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2410 - time parameters
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_TIMEX_H
-#define __ASM_ARCH_TIMEX_H
-
-/* CLOCK_TICK_RATE needs to be evaluatable by the cpp, so making it
- * a variable is useless. It seems as long as we make our timers an
- * exact multiple of HZ, any value that makes a 1->1 correspondence
- * for the time conversion functions to/from jiffies is acceptable.
-*/
-
-#define CLOCK_TICK_RATE 12000000
-
-#endif /* __ASM_ARCH_TIMEX_H */
diff --git a/arch/arm/mach-s3c24xx/mach-jive.c b/arch/arm/mach-s3c24xx/mach-jive.c
index a45fcd8ccf79..43c23e220f5b 100644
--- a/arch/arm/mach-s3c24xx/mach-jive.c
+++ b/arch/arm/mach-s3c24xx/mach-jive.c
@@ -466,6 +466,7 @@ static struct platform_device *jive_devices[] __initdata = {
&jive_device_wm8750,
&s3c_device_nand,
&s3c_device_usbgadget,
+ &s3c2412_device_dma,
};
static struct s3c2410_udc_mach_info jive_udc_cfg __initdata = {
diff --git a/arch/arm/mach-s3c24xx/mach-mini2440.c b/arch/arm/mach-s3c24xx/mach-mini2440.c
index a83db46320bc..4a18d49a63e0 100644
--- a/arch/arm/mach-s3c24xx/mach-mini2440.c
+++ b/arch/arm/mach-s3c24xx/mach-mini2440.c
@@ -24,7 +24,7 @@
#include <linux/io.h>
#include <linux/serial_core.h>
#include <linux/dm9000.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
#include <linux/platform_device.h>
#include <linux/gpio_keys.h>
#include <linux/i2c.h>
diff --git a/arch/arm/mach-s3c24xx/mach-smdk2413.c b/arch/arm/mach-s3c24xx/mach-smdk2413.c
index 8146e920f10d..c9d31ef28dd1 100644
--- a/arch/arm/mach-s3c24xx/mach-smdk2413.c
+++ b/arch/arm/mach-s3c24xx/mach-smdk2413.c
@@ -89,6 +89,7 @@ static struct platform_device *smdk2413_devices[] __initdata = {
&s3c_device_i2c0,
&s3c_device_iis,
&s3c_device_usbgadget,
+ &s3c2412_device_dma,
};
static void __init smdk2413_fixup(struct tag *tags, char **cmdline,
diff --git a/arch/arm/mach-s3c24xx/mach-smdk2416.c b/arch/arm/mach-s3c24xx/mach-smdk2416.c
index cb46847c66b4..f88e672ad1e4 100644
--- a/arch/arm/mach-s3c24xx/mach-smdk2416.c
+++ b/arch/arm/mach-s3c24xx/mach-smdk2416.c
@@ -215,6 +215,7 @@ static struct platform_device *smdk2416_devices[] __initdata = {
&s3c_device_hsmmc0,
&s3c_device_hsmmc1,
&s3c_device_usb_hsudc,
+ &s3c2443_device_dma,
};
static void __init smdk2416_map_io(void)
diff --git a/arch/arm/mach-s3c24xx/mach-smdk2443.c b/arch/arm/mach-s3c24xx/mach-smdk2443.c
index 9435c3bef18a..d9933fcc6cc8 100644
--- a/arch/arm/mach-s3c24xx/mach-smdk2443.c
+++ b/arch/arm/mach-s3c24xx/mach-smdk2443.c
@@ -115,6 +115,7 @@ static struct platform_device *smdk2443_devices[] __initdata = {
#ifdef CONFIG_SND_SOC_SMDK2443_WM9710
&s3c_device_ac97,
#endif
+ &s3c2443_device_dma,
};
static void __init smdk2443_map_io(void)
diff --git a/arch/arm/mach-s3c24xx/mach-vstms.c b/arch/arm/mach-s3c24xx/mach-vstms.c
index b66588428ec9..f7ec9c550787 100644
--- a/arch/arm/mach-s3c24xx/mach-vstms.c
+++ b/arch/arm/mach-s3c24xx/mach-vstms.c
@@ -126,6 +126,7 @@ static struct platform_device *vstms_devices[] __initdata = {
&s3c_device_iis,
&s3c_device_rtc,
&s3c_device_nand,
+ &s3c2412_device_dma,
};
static void __init vstms_fixup(struct tag *tags, char **cmdline,
diff --git a/arch/arm/mach-s3c64xx/Kconfig b/arch/arm/mach-s3c64xx/Kconfig
index 041da5172423..2cb8dc55b50e 100644
--- a/arch/arm/mach-s3c64xx/Kconfig
+++ b/arch/arm/mach-s3c64xx/Kconfig
@@ -3,16 +3,7 @@
#
# Licensed under GPLv2
-# temporary until we can eliminate all drivers using it.
-config PLAT_S3C64XX
- bool
- depends on ARCH_S3C64XX
- default y
- select PM_GENERIC_DOMAINS
- select SAMSUNG_WAKEMASK
- help
- Base platform code for any Samsung S3C64XX device
-
+if ARCH_S3C64XX
# Configuration options for the S3C6410 CPU
@@ -306,3 +297,21 @@ config MACH_WLF_CRAGG_6410
select SAMSUNG_GPIO_EXTRA128
help
Machine support for the Wolfson Cragganmore S3C6410 variant.
+
+config MACH_S3C64XX_DT
+ bool "Samsung S3C6400/S3C6410 machine using Device Tree"
+ select CLKSRC_OF
+ select CPU_S3C6400
+ select CPU_S3C6410
+ select PINCTRL
+ select PINCTRL_S3C64XX
+ select USE_OF
+ help
+ Machine support for Samsung S3C6400/S3C6410 machines with Device Tree
+ enabled.
+ Select this if a fdt blob is available for your S3C64XX SoC based
+ board.
+ Note: This is under development and not all peripherals can be
+ supported with this machine file.
+
+endif
diff --git a/arch/arm/mach-s3c64xx/Makefile b/arch/arm/mach-s3c64xx/Makefile
index 31d0c9101272..6faedcffce04 100644
--- a/arch/arm/mach-s3c64xx/Makefile
+++ b/arch/arm/mach-s3c64xx/Makefile
@@ -12,7 +12,7 @@ obj- :=
# Core
-obj-y += common.o clock.o
+obj-y += common.o
# Core support
@@ -57,3 +57,4 @@ obj-$(CONFIG_MACH_SMARTQ7) += mach-smartq7.o
obj-$(CONFIG_MACH_SMDK6400) += mach-smdk6400.o
obj-$(CONFIG_MACH_SMDK6410) += mach-smdk6410.o
obj-$(CONFIG_MACH_WLF_CRAGG_6410) += mach-crag6410.o mach-crag6410-module.o
+obj-$(CONFIG_MACH_S3C64XX_DT) += mach-s3c64xx-dt.o
diff --git a/arch/arm/mach-s3c64xx/clock.c b/arch/arm/mach-s3c64xx/clock.c
deleted file mode 100644
index c1bcc4a6d3a8..000000000000
--- a/arch/arm/mach-s3c64xx/clock.c
+++ /dev/null
@@ -1,1007 +0,0 @@
-/* linux/arch/arm/plat-s3c64xx/clock.c
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * S3C64XX Base clock support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <mach/map.h>
-
-#include <mach/regs-clock.h>
-
-#include <plat/cpu.h>
-#include <plat/devs.h>
-#include <plat/cpu-freq.h>
-#include <plat/clock.h>
-#include <plat/clock-clksrc.h>
-#include <plat/pll.h>
-
-#include "regs-sys.h"
-
-/* fin_apll, fin_mpll and fin_epll are all the same clock, which we call
- * ext_xtal_mux for want of an actual name from the manual.
-*/
-
-static struct clk clk_ext_xtal_mux = {
- .name = "ext_xtal",
-};
-
-#define clk_fin_apll clk_ext_xtal_mux
-#define clk_fin_mpll clk_ext_xtal_mux
-#define clk_fin_epll clk_ext_xtal_mux
-
-#define clk_fout_mpll clk_mpll
-#define clk_fout_epll clk_epll
-
-struct clk clk_h2 = {
- .name = "hclk2",
- .rate = 0,
-};
-
-struct clk clk_27m = {
- .name = "clk_27m",
- .rate = 27000000,
-};
-
-static int clk_48m_ctrl(struct clk *clk, int enable)
-{
- unsigned long flags;
- u32 val;
-
- /* can't rely on clock lock, this register has other usages */
- local_irq_save(flags);
-
- val = __raw_readl(S3C64XX_OTHERS);
- if (enable)
- val |= S3C64XX_OTHERS_USBMASK;
- else
- val &= ~S3C64XX_OTHERS_USBMASK;
-
- __raw_writel(val, S3C64XX_OTHERS);
- local_irq_restore(flags);
-
- return 0;
-}
-
-struct clk clk_48m = {
- .name = "clk_48m",
- .rate = 48000000,
- .enable = clk_48m_ctrl,
-};
-
-struct clk clk_xusbxti = {
- .name = "xusbxti",
- .rate = 48000000,
-};
-
-static int inline s3c64xx_gate(void __iomem *reg,
- struct clk *clk,
- int enable)
-{
- unsigned int ctrlbit = clk->ctrlbit;
- u32 con;
-
- con = __raw_readl(reg);
-
- if (enable)
- con |= ctrlbit;
- else
- con &= ~ctrlbit;
-
- __raw_writel(con, reg);
- return 0;
-}
-
-static int s3c64xx_pclk_ctrl(struct clk *clk, int enable)
-{
- return s3c64xx_gate(S3C_PCLK_GATE, clk, enable);
-}
-
-static int s3c64xx_hclk_ctrl(struct clk *clk, int enable)
-{
- return s3c64xx_gate(S3C_HCLK_GATE, clk, enable);
-}
-
-int s3c64xx_sclk_ctrl(struct clk *clk, int enable)
-{
- return s3c64xx_gate(S3C_SCLK_GATE, clk, enable);
-}
-
-static struct clk init_clocks_off[] = {
- {
- .name = "nand",
- .parent = &clk_h,
- }, {
- .name = "rtc",
- .parent = &clk_p,
- .enable = s3c64xx_pclk_ctrl,
- .ctrlbit = S3C_CLKCON_PCLK_RTC,
- }, {
- .name = "adc",
- .parent = &clk_p,
- .enable = s3c64xx_pclk_ctrl,
- .ctrlbit = S3C_CLKCON_PCLK_TSADC,
- }, {
- .name = "i2c",
- .devname = "s3c2440-i2c.0",
- .parent = &clk_p,
- .enable = s3c64xx_pclk_ctrl,
- .ctrlbit = S3C_CLKCON_PCLK_IIC,
- }, {
- .name = "i2c",
- .devname = "s3c2440-i2c.1",
- .parent = &clk_p,
- .enable = s3c64xx_pclk_ctrl,
- .ctrlbit = S3C6410_CLKCON_PCLK_I2C1,
- }, {
- .name = "keypad",
- .parent = &clk_p,
- .enable = s3c64xx_pclk_ctrl,
- .ctrlbit = S3C_CLKCON_PCLK_KEYPAD,
- }, {
- .name = "spi",
- .devname = "s3c6410-spi.0",
- .parent = &clk_p,
- .enable = s3c64xx_pclk_ctrl,
- .ctrlbit = S3C_CLKCON_PCLK_SPI0,
- }, {
- .name = "spi",
- .devname = "s3c6410-spi.1",
- .parent = &clk_p,
- .enable = s3c64xx_pclk_ctrl,
- .ctrlbit = S3C_CLKCON_PCLK_SPI1,
- }, {
- .name = "48m",
- .devname = "s3c-sdhci.0",
- .parent = &clk_48m,
- .enable = s3c64xx_sclk_ctrl,
- .ctrlbit = S3C_CLKCON_SCLK_MMC0_48,
- }, {
- .name = "48m",
- .devname = "s3c-sdhci.1",
- .parent = &clk_48m,
- .enable = s3c64xx_sclk_ctrl,
- .ctrlbit = S3C_CLKCON_SCLK_MMC1_48,
- }, {
- .name = "48m",
- .devname = "s3c-sdhci.2",
- .parent = &clk_48m,
- .enable = s3c64xx_sclk_ctrl,
- .ctrlbit = S3C_CLKCON_SCLK_MMC2_48,
- }, {
- .name = "ac97",
- .parent = &clk_p,
- .ctrlbit = S3C_CLKCON_PCLK_AC97,
- }, {
- .name = "cfcon",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_IHOST,
- }, {
- .name = "dma0",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_DMA0,
- }, {
- .name = "dma1",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_DMA1,
- }, {
- .name = "3dse",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_3DSE,
- }, {
- .name = "hclk_secur",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_SECUR,
- }, {
- .name = "sdma1",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_SDMA1,
- }, {
- .name = "sdma0",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_SDMA0,
- }, {
- .name = "hclk_jpeg",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_JPEG,
- }, {
- .name = "camif",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_CAMIF,
- }, {
- .name = "hclk_scaler",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_SCALER,
- }, {
- .name = "2d",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_2D,
- }, {
- .name = "tv",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_TV,
- }, {
- .name = "post0",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_POST0,
- }, {
- .name = "rot",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_ROT,
- }, {
- .name = "hclk_mfc",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_MFC,
- }, {
- .name = "pclk_mfc",
- .parent = &clk_p,
- .enable = s3c64xx_pclk_ctrl,
- .ctrlbit = S3C_CLKCON_PCLK_MFC,
- }, {
- .name = "dac27",
- .enable = s3c64xx_sclk_ctrl,
- .ctrlbit = S3C_CLKCON_SCLK_DAC27,
- }, {
- .name = "tv27",
- .enable = s3c64xx_sclk_ctrl,
- .ctrlbit = S3C_CLKCON_SCLK_TV27,
- }, {
- .name = "scaler27",
- .enable = s3c64xx_sclk_ctrl,
- .ctrlbit = S3C_CLKCON_SCLK_SCALER27,
- }, {
- .name = "sclk_scaler",
- .enable = s3c64xx_sclk_ctrl,
- .ctrlbit = S3C_CLKCON_SCLK_SCALER,
- }, {
- .name = "post0_27",
- .enable = s3c64xx_sclk_ctrl,
- .ctrlbit = S3C_CLKCON_SCLK_POST0_27,
- }, {
- .name = "secur",
- .enable = s3c64xx_sclk_ctrl,
- .ctrlbit = S3C_CLKCON_SCLK_SECUR,
- }, {
- .name = "sclk_mfc",
- .enable = s3c64xx_sclk_ctrl,
- .ctrlbit = S3C_CLKCON_SCLK_MFC,
- }, {
- .name = "sclk_jpeg",
- .enable = s3c64xx_sclk_ctrl,
- .ctrlbit = S3C_CLKCON_SCLK_JPEG,
- },
-};
-
-static struct clk clk_48m_spi0 = {
- .name = "spi_48m",
- .devname = "s3c6410-spi.0",
- .parent = &clk_48m,
- .enable = s3c64xx_sclk_ctrl,
- .ctrlbit = S3C_CLKCON_SCLK_SPI0_48,
-};
-
-static struct clk clk_48m_spi1 = {
- .name = "spi_48m",
- .devname = "s3c6410-spi.1",
- .parent = &clk_48m,
- .enable = s3c64xx_sclk_ctrl,
- .ctrlbit = S3C_CLKCON_SCLK_SPI1_48,
-};
-
-static struct clk clk_i2s0 = {
- .name = "iis",
- .devname = "samsung-i2s.0",
- .parent = &clk_p,
- .enable = s3c64xx_pclk_ctrl,
- .ctrlbit = S3C_CLKCON_PCLK_IIS0,
-};
-
-static struct clk clk_i2s1 = {
- .name = "iis",
- .devname = "samsung-i2s.1",
- .parent = &clk_p,
- .enable = s3c64xx_pclk_ctrl,
- .ctrlbit = S3C_CLKCON_PCLK_IIS1,
-};
-
-#ifdef CONFIG_CPU_S3C6410
-static struct clk clk_i2s2 = {
- .name = "iis",
- .devname = "samsung-i2s.2",
- .parent = &clk_p,
- .enable = s3c64xx_pclk_ctrl,
- .ctrlbit = S3C6410_CLKCON_PCLK_IIS2,
-};
-#endif
-
-static struct clk init_clocks[] = {
- {
- .name = "lcd",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_LCD,
- }, {
- .name = "gpio",
- .parent = &clk_p,
- .enable = s3c64xx_pclk_ctrl,
- .ctrlbit = S3C_CLKCON_PCLK_GPIO,
- }, {
- .name = "usb-host",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_UHOST,
- }, {
- .name = "otg",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_USB,
- }, {
- .name = "timers",
- .parent = &clk_p,
- .enable = s3c64xx_pclk_ctrl,
- .ctrlbit = S3C_CLKCON_PCLK_PWM,
- }, {
- .name = "uart",
- .devname = "s3c6400-uart.0",
- .parent = &clk_p,
- .enable = s3c64xx_pclk_ctrl,
- .ctrlbit = S3C_CLKCON_PCLK_UART0,
- }, {
- .name = "uart",
- .devname = "s3c6400-uart.1",
- .parent = &clk_p,
- .enable = s3c64xx_pclk_ctrl,
- .ctrlbit = S3C_CLKCON_PCLK_UART1,
- }, {
- .name = "uart",
- .devname = "s3c6400-uart.2",
- .parent = &clk_p,
- .enable = s3c64xx_pclk_ctrl,
- .ctrlbit = S3C_CLKCON_PCLK_UART2,
- }, {
- .name = "uart",
- .devname = "s3c6400-uart.3",
- .parent = &clk_p,
- .enable = s3c64xx_pclk_ctrl,
- .ctrlbit = S3C_CLKCON_PCLK_UART3,
- }, {
- .name = "watchdog",
- .parent = &clk_p,
- .ctrlbit = S3C_CLKCON_PCLK_WDT,
- },
-};
-
-static struct clk clk_hsmmc0 = {
- .name = "hsmmc",
- .devname = "s3c-sdhci.0",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_HSMMC0,
-};
-
-static struct clk clk_hsmmc1 = {
- .name = "hsmmc",
- .devname = "s3c-sdhci.1",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_HSMMC1,
-};
-
-static struct clk clk_hsmmc2 = {
- .name = "hsmmc",
- .devname = "s3c-sdhci.2",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_HSMMC2,
-};
-
-static struct clk clk_fout_apll = {
- .name = "fout_apll",
-};
-
-static struct clk *clk_src_apll_list[] = {
- [0] = &clk_fin_apll,
- [1] = &clk_fout_apll,
-};
-
-static struct clksrc_sources clk_src_apll = {
- .sources = clk_src_apll_list,
- .nr_sources = ARRAY_SIZE(clk_src_apll_list),
-};
-
-static struct clksrc_clk clk_mout_apll = {
- .clk = {
- .name = "mout_apll",
- },
- .reg_src = { .reg = S3C_CLK_SRC, .shift = 0, .size = 1 },
- .sources = &clk_src_apll,
-};
-
-static struct clk *clk_src_epll_list[] = {
- [0] = &clk_fin_epll,
- [1] = &clk_fout_epll,
-};
-
-static struct clksrc_sources clk_src_epll = {
- .sources = clk_src_epll_list,
- .nr_sources = ARRAY_SIZE(clk_src_epll_list),
-};
-
-static struct clksrc_clk clk_mout_epll = {
- .clk = {
- .name = "mout_epll",
- },
- .reg_src = { .reg = S3C_CLK_SRC, .shift = 2, .size = 1 },
- .sources = &clk_src_epll,
-};
-
-static struct clk *clk_src_mpll_list[] = {
- [0] = &clk_fin_mpll,
- [1] = &clk_fout_mpll,
-};
-
-static struct clksrc_sources clk_src_mpll = {
- .sources = clk_src_mpll_list,
- .nr_sources = ARRAY_SIZE(clk_src_mpll_list),
-};
-
-static struct clksrc_clk clk_mout_mpll = {
- .clk = {
- .name = "mout_mpll",
- },
- .reg_src = { .reg = S3C_CLK_SRC, .shift = 1, .size = 1 },
- .sources = &clk_src_mpll,
-};
-
-static unsigned int armclk_mask;
-
-static unsigned long s3c64xx_clk_arm_get_rate(struct clk *clk)
-{
- unsigned long rate = clk_get_rate(clk->parent);
- u32 clkdiv;
-
- /* divisor mask starts at bit0, so no need to shift */
- clkdiv = __raw_readl(S3C_CLK_DIV0) & armclk_mask;
-
- return rate / (clkdiv + 1);
-}
-
-static unsigned long s3c64xx_clk_arm_round_rate(struct clk *clk,
- unsigned long rate)
-{
- unsigned long parent = clk_get_rate(clk->parent);
- u32 div;
-
- if (parent < rate)
- return parent;
-
- div = (parent / rate) - 1;
- if (div > armclk_mask)
- div = armclk_mask;
-
- return parent / (div + 1);
-}
-
-static int s3c64xx_clk_arm_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long parent = clk_get_rate(clk->parent);
- u32 div;
- u32 val;
-
- if (rate < parent / (armclk_mask + 1))
- return -EINVAL;
-
- rate = clk_round_rate(clk, rate);
- div = clk_get_rate(clk->parent) / rate;
-
- val = __raw_readl(S3C_CLK_DIV0);
- val &= ~armclk_mask;
- val |= (div - 1);
- __raw_writel(val, S3C_CLK_DIV0);
-
- return 0;
-
-}
-
-static struct clk clk_arm = {
- .name = "armclk",
- .parent = &clk_mout_apll.clk,
- .ops = &(struct clk_ops) {
- .get_rate = s3c64xx_clk_arm_get_rate,
- .set_rate = s3c64xx_clk_arm_set_rate,
- .round_rate = s3c64xx_clk_arm_round_rate,
- },
-};
-
-static unsigned long s3c64xx_clk_doutmpll_get_rate(struct clk *clk)
-{
- unsigned long rate = clk_get_rate(clk->parent);
-
- printk(KERN_DEBUG "%s: parent is %ld\n", __func__, rate);
-
- if (__raw_readl(S3C_CLK_DIV0) & S3C6400_CLKDIV0_MPLL_MASK)
- rate /= 2;
-
- return rate;
-}
-
-static struct clk_ops clk_dout_ops = {
- .get_rate = s3c64xx_clk_doutmpll_get_rate,
-};
-
-static struct clk clk_dout_mpll = {
- .name = "dout_mpll",
- .parent = &clk_mout_mpll.clk,
- .ops = &clk_dout_ops,
-};
-
-static struct clk *clkset_spi_mmc_list[] = {
- &clk_mout_epll.clk,
- &clk_dout_mpll,
- &clk_fin_epll,
- &clk_27m,
-};
-
-static struct clksrc_sources clkset_spi_mmc = {
- .sources = clkset_spi_mmc_list,
- .nr_sources = ARRAY_SIZE(clkset_spi_mmc_list),
-};
-
-static struct clk *clkset_irda_list[] = {
- &clk_mout_epll.clk,
- &clk_dout_mpll,
- NULL,
- &clk_27m,
-};
-
-static struct clksrc_sources clkset_irda = {
- .sources = clkset_irda_list,
- .nr_sources = ARRAY_SIZE(clkset_irda_list),
-};
-
-static struct clk *clkset_uart_list[] = {
- &clk_mout_epll.clk,
- &clk_dout_mpll,
- NULL,
- NULL
-};
-
-static struct clksrc_sources clkset_uart = {
- .sources = clkset_uart_list,
- .nr_sources = ARRAY_SIZE(clkset_uart_list),
-};
-
-static struct clk *clkset_uhost_list[] = {
- &clk_48m,
- &clk_mout_epll.clk,
- &clk_dout_mpll,
- &clk_fin_epll,
-};
-
-static struct clksrc_sources clkset_uhost = {
- .sources = clkset_uhost_list,
- .nr_sources = ARRAY_SIZE(clkset_uhost_list),
-};
-
-/* The peripheral clocks are all controlled via clocksource followed
- * by an optional divider and gate stage. We currently roll this into
- * one clock which hides the intermediate clock from the mux.
- *
- * Note, the JPEG clock can only be an even divider...
- *
- * The scaler and LCD clocks depend on the S3C64XX version, and also
- * have a common parent divisor so are not included here.
- */
-
-/* clocks that feed other parts of the clock source tree */
-
-static struct clk clk_iis_cd0 = {
- .name = "iis_cdclk0",
-};
-
-static struct clk clk_iis_cd1 = {
- .name = "iis_cdclk1",
-};
-
-static struct clk clk_iisv4_cd = {
- .name = "iis_cdclk_v4",
-};
-
-static struct clk clk_pcm_cd = {
- .name = "pcm_cdclk",
-};
-
-static struct clk *clkset_audio0_list[] = {
- [0] = &clk_mout_epll.clk,
- [1] = &clk_dout_mpll,
- [2] = &clk_fin_epll,
- [3] = &clk_iis_cd0,
- [4] = &clk_pcm_cd,
-};
-
-static struct clksrc_sources clkset_audio0 = {
- .sources = clkset_audio0_list,
- .nr_sources = ARRAY_SIZE(clkset_audio0_list),
-};
-
-static struct clk *clkset_audio1_list[] = {
- [0] = &clk_mout_epll.clk,
- [1] = &clk_dout_mpll,
- [2] = &clk_fin_epll,
- [3] = &clk_iis_cd1,
- [4] = &clk_pcm_cd,
-};
-
-static struct clksrc_sources clkset_audio1 = {
- .sources = clkset_audio1_list,
- .nr_sources = ARRAY_SIZE(clkset_audio1_list),
-};
-
-#ifdef CONFIG_CPU_S3C6410
-static struct clk *clkset_audio2_list[] = {
- [0] = &clk_mout_epll.clk,
- [1] = &clk_dout_mpll,
- [2] = &clk_fin_epll,
- [3] = &clk_iisv4_cd,
- [4] = &clk_pcm_cd,
-};
-
-static struct clksrc_sources clkset_audio2 = {
- .sources = clkset_audio2_list,
- .nr_sources = ARRAY_SIZE(clkset_audio2_list),
-};
-#endif
-
-static struct clksrc_clk clksrcs[] = {
- {
- .clk = {
- .name = "usb-bus-host",
- .ctrlbit = S3C_CLKCON_SCLK_UHOST,
- .enable = s3c64xx_sclk_ctrl,
- },
- .reg_src = { .reg = S3C_CLK_SRC, .shift = 5, .size = 2 },
- .reg_div = { .reg = S3C_CLK_DIV1, .shift = 20, .size = 4 },
- .sources = &clkset_uhost,
- }, {
- .clk = {
- .name = "irda-bus",
- .ctrlbit = S3C_CLKCON_SCLK_IRDA,
- .enable = s3c64xx_sclk_ctrl,
- },
- .reg_src = { .reg = S3C_CLK_SRC, .shift = 24, .size = 2 },
- .reg_div = { .reg = S3C_CLK_DIV2, .shift = 20, .size = 4 },
- .sources = &clkset_irda,
- }, {
- .clk = {
- .name = "camera",
- .ctrlbit = S3C_CLKCON_SCLK_CAM,
- .enable = s3c64xx_sclk_ctrl,
- .parent = &clk_h2,
- },
- .reg_div = { .reg = S3C_CLK_DIV0, .shift = 20, .size = 4 },
- },
-};
-
-/* Where does UCLK0 come from? */
-static struct clksrc_clk clk_sclk_uclk = {
- .clk = {
- .name = "uclk1",
- .ctrlbit = S3C_CLKCON_SCLK_UART,
- .enable = s3c64xx_sclk_ctrl,
- },
- .reg_src = { .reg = S3C_CLK_SRC, .shift = 13, .size = 1 },
- .reg_div = { .reg = S3C_CLK_DIV2, .shift = 16, .size = 4 },
- .sources = &clkset_uart,
-};
-
-static struct clksrc_clk clk_sclk_mmc0 = {
- .clk = {
- .name = "mmc_bus",
- .devname = "s3c-sdhci.0",
- .ctrlbit = S3C_CLKCON_SCLK_MMC0,
- .enable = s3c64xx_sclk_ctrl,
- },
- .reg_src = { .reg = S3C_CLK_SRC, .shift = 18, .size = 2 },
- .reg_div = { .reg = S3C_CLK_DIV1, .shift = 0, .size = 4 },
- .sources = &clkset_spi_mmc,
-};
-
-static struct clksrc_clk clk_sclk_mmc1 = {
- .clk = {
- .name = "mmc_bus",
- .devname = "s3c-sdhci.1",
- .ctrlbit = S3C_CLKCON_SCLK_MMC1,
- .enable = s3c64xx_sclk_ctrl,
- },
- .reg_src = { .reg = S3C_CLK_SRC, .shift = 20, .size = 2 },
- .reg_div = { .reg = S3C_CLK_DIV1, .shift = 4, .size = 4 },
- .sources = &clkset_spi_mmc,
-};
-
-static struct clksrc_clk clk_sclk_mmc2 = {
- .clk = {
- .name = "mmc_bus",
- .devname = "s3c-sdhci.2",
- .ctrlbit = S3C_CLKCON_SCLK_MMC2,
- .enable = s3c64xx_sclk_ctrl,
- },
- .reg_src = { .reg = S3C_CLK_SRC, .shift = 22, .size = 2 },
- .reg_div = { .reg = S3C_CLK_DIV1, .shift = 8, .size = 4 },
- .sources = &clkset_spi_mmc,
-};
-
-static struct clksrc_clk clk_sclk_spi0 = {
- .clk = {
- .name = "spi-bus",
- .devname = "s3c6410-spi.0",
- .ctrlbit = S3C_CLKCON_SCLK_SPI0,
- .enable = s3c64xx_sclk_ctrl,
- },
- .reg_src = { .reg = S3C_CLK_SRC, .shift = 14, .size = 2 },
- .reg_div = { .reg = S3C_CLK_DIV2, .shift = 0, .size = 4 },
- .sources = &clkset_spi_mmc,
-};
-
-static struct clksrc_clk clk_sclk_spi1 = {
- .clk = {
- .name = "spi-bus",
- .devname = "s3c6410-spi.1",
- .ctrlbit = S3C_CLKCON_SCLK_SPI1,
- .enable = s3c64xx_sclk_ctrl,
- },
- .reg_src = { .reg = S3C_CLK_SRC, .shift = 16, .size = 2 },
- .reg_div = { .reg = S3C_CLK_DIV2, .shift = 4, .size = 4 },
- .sources = &clkset_spi_mmc,
-};
-
-static struct clksrc_clk clk_audio_bus0 = {
- .clk = {
- .name = "audio-bus",
- .devname = "samsung-i2s.0",
- .ctrlbit = S3C_CLKCON_SCLK_AUDIO0,
- .enable = s3c64xx_sclk_ctrl,
- },
- .reg_src = { .reg = S3C_CLK_SRC, .shift = 7, .size = 3 },
- .reg_div = { .reg = S3C_CLK_DIV2, .shift = 8, .size = 4 },
- .sources = &clkset_audio0,
-};
-
-static struct clksrc_clk clk_audio_bus1 = {
- .clk = {
- .name = "audio-bus",
- .devname = "samsung-i2s.1",
- .ctrlbit = S3C_CLKCON_SCLK_AUDIO1,
- .enable = s3c64xx_sclk_ctrl,
- },
- .reg_src = { .reg = S3C_CLK_SRC, .shift = 10, .size = 3 },
- .reg_div = { .reg = S3C_CLK_DIV2, .shift = 12, .size = 4 },
- .sources = &clkset_audio1,
-};
-
-#ifdef CONFIG_CPU_S3C6410
-static struct clksrc_clk clk_audio_bus2 = {
- .clk = {
- .name = "audio-bus",
- .devname = "samsung-i2s.2",
- .ctrlbit = S3C6410_CLKCON_SCLK_AUDIO2,
- .enable = s3c64xx_sclk_ctrl,
- },
- .reg_src = { .reg = S3C6410_CLK_SRC2, .shift = 0, .size = 3 },
- .reg_div = { .reg = S3C_CLK_DIV2, .shift = 24, .size = 4 },
- .sources = &clkset_audio2,
-};
-#endif
-/* Clock initialisation code */
-
-static struct clksrc_clk *init_parents[] = {
- &clk_mout_apll,
- &clk_mout_epll,
- &clk_mout_mpll,
-};
-
-static struct clksrc_clk *clksrc_cdev[] = {
- &clk_sclk_uclk,
- &clk_sclk_mmc0,
- &clk_sclk_mmc1,
- &clk_sclk_mmc2,
- &clk_sclk_spi0,
- &clk_sclk_spi1,
- &clk_audio_bus0,
- &clk_audio_bus1,
-};
-
-static struct clk *clk_cdev[] = {
- &clk_hsmmc0,
- &clk_hsmmc1,
- &clk_hsmmc2,
- &clk_48m_spi0,
- &clk_48m_spi1,
- &clk_i2s0,
- &clk_i2s1,
-};
-
-static struct clk_lookup s3c64xx_clk_lookup[] = {
- CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
- CLKDEV_INIT(NULL, "clk_uart_baud3", &clk_sclk_uclk.clk),
- CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.0", &clk_hsmmc0),
- CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.0", &clk_hsmmc1),
- CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.0", &clk_hsmmc2),
- CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &clk_sclk_mmc0.clk),
- CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_sclk_mmc1.clk),
- CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &clk_sclk_mmc2.clk),
- CLKDEV_INIT(NULL, "spi_busclk0", &clk_p),
- CLKDEV_INIT("s3c6410-spi.0", "spi_busclk1", &clk_sclk_spi0.clk),
- CLKDEV_INIT("s3c6410-spi.0", "spi_busclk2", &clk_48m_spi0),
- CLKDEV_INIT("s3c6410-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
- CLKDEV_INIT("s3c6410-spi.1", "spi_busclk2", &clk_48m_spi1),
- CLKDEV_INIT("samsung-i2s.0", "i2s_opclk0", &clk_i2s0),
- CLKDEV_INIT("samsung-i2s.0", "i2s_opclk1", &clk_audio_bus0.clk),
- CLKDEV_INIT("samsung-i2s.1", "i2s_opclk0", &clk_i2s1),
- CLKDEV_INIT("samsung-i2s.1", "i2s_opclk1", &clk_audio_bus1.clk),
-#ifdef CONFIG_CPU_S3C6410
- CLKDEV_INIT("samsung-i2s.2", "i2s_opclk0", &clk_i2s2),
- CLKDEV_INIT("samsung-i2s.2", "i2s_opclk1", &clk_audio_bus2.clk),
-#endif
-};
-
-#define GET_DIV(clk, field) ((((clk) & field##_MASK) >> field##_SHIFT) + 1)
-
-void __init_or_cpufreq s3c64xx_setup_clocks(void)
-{
- struct clk *xtal_clk;
- unsigned long xtal;
- unsigned long fclk;
- unsigned long hclk;
- unsigned long hclk2;
- unsigned long pclk;
- unsigned long epll;
- unsigned long apll;
- unsigned long mpll;
- unsigned int ptr;
- u32 clkdiv0;
-
- printk(KERN_DEBUG "%s: registering clocks\n", __func__);
-
- clkdiv0 = __raw_readl(S3C_CLK_DIV0);
- printk(KERN_DEBUG "%s: clkdiv0 = %08x\n", __func__, clkdiv0);
-
- xtal_clk = clk_get(NULL, "xtal");
- BUG_ON(IS_ERR(xtal_clk));
-
- xtal = clk_get_rate(xtal_clk);
- clk_put(xtal_clk);
-
- printk(KERN_DEBUG "%s: xtal is %ld\n", __func__, xtal);
-
- /* For now assume the mux always selects the crystal */
- clk_ext_xtal_mux.parent = xtal_clk;
-
- epll = s3c_get_pll6553x(xtal, __raw_readl(S3C_EPLL_CON0),
- __raw_readl(S3C_EPLL_CON1));
- mpll = s3c6400_get_pll(xtal, __raw_readl(S3C_MPLL_CON));
- apll = s3c6400_get_pll(xtal, __raw_readl(S3C_APLL_CON));
-
- fclk = mpll;
-
- printk(KERN_INFO "S3C64XX: PLL settings, A=%ld, M=%ld, E=%ld\n",
- apll, mpll, epll);
-
- if(__raw_readl(S3C64XX_OTHERS) & S3C64XX_OTHERS_SYNCMUXSEL)
- /* Synchronous mode */
- hclk2 = apll / GET_DIV(clkdiv0, S3C6400_CLKDIV0_HCLK2);
- else
- /* Asynchronous mode */
- hclk2 = mpll / GET_DIV(clkdiv0, S3C6400_CLKDIV0_HCLK2);
-
- hclk = hclk2 / GET_DIV(clkdiv0, S3C6400_CLKDIV0_HCLK);
- pclk = hclk2 / GET_DIV(clkdiv0, S3C6400_CLKDIV0_PCLK);
-
- printk(KERN_INFO "S3C64XX: HCLK2=%ld, HCLK=%ld, PCLK=%ld\n",
- hclk2, hclk, pclk);
-
- clk_fout_mpll.rate = mpll;
- clk_fout_epll.rate = epll;
- clk_fout_apll.rate = apll;
-
- clk_h2.rate = hclk2;
- clk_h.rate = hclk;
- clk_p.rate = pclk;
- clk_f.rate = fclk;
-
- for (ptr = 0; ptr < ARRAY_SIZE(init_parents); ptr++)
- s3c_set_clksrc(init_parents[ptr], true);
-
- for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
- s3c_set_clksrc(&clksrcs[ptr], true);
-}
-
-static struct clk *clks1[] __initdata = {
- &clk_ext_xtal_mux,
- &clk_iis_cd0,
- &clk_iis_cd1,
- &clk_iisv4_cd,
- &clk_pcm_cd,
- &clk_mout_epll.clk,
- &clk_mout_mpll.clk,
- &clk_dout_mpll,
- &clk_arm,
-};
-
-static struct clk *clks[] __initdata = {
- &clk_ext,
- &clk_epll,
- &clk_27m,
- &clk_48m,
- &clk_h2,
- &clk_xusbxti,
-};
-
-/**
- * s3c64xx_register_clocks - register clocks for s3c6400 and s3c6410
- * @xtal: The rate for the clock crystal feeding the PLLs.
- * @armclk_divlimit: Divisor mask for ARMCLK.
- *
- * Register the clocks for the S3C6400 and S3C6410 SoC range, such
- * as ARMCLK as well as the necessary parent clocks.
- *
- * This call does not setup the clocks, which is left to the
- * s3c64xx_setup_clocks() call which may be needed by the cpufreq
- * or resume code to re-set the clocks if the bootloader has changed
- * them.
- */
-void __init s3c64xx_register_clocks(unsigned long xtal,
- unsigned armclk_divlimit)
-{
- unsigned int cnt;
-
- armclk_mask = armclk_divlimit;
-
- s3c24xx_register_baseclocks(xtal);
- s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
-
- s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
-
- s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
- s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
-
- s3c24xx_register_clocks(clk_cdev, ARRAY_SIZE(clk_cdev));
- for (cnt = 0; cnt < ARRAY_SIZE(clk_cdev); cnt++)
- s3c_disable_clocks(clk_cdev[cnt], 1);
-
- s3c24xx_register_clocks(clks1, ARRAY_SIZE(clks1));
- s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
- for (cnt = 0; cnt < ARRAY_SIZE(clksrc_cdev); cnt++)
- s3c_register_clksrc(clksrc_cdev[cnt], 1);
- clkdev_add_table(s3c64xx_clk_lookup, ARRAY_SIZE(s3c64xx_clk_lookup));
-}
diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c
index 73d79cf5e141..7a3ce4c39e5f 100644
--- a/arch/arm/mach-s3c64xx/common.c
+++ b/arch/arm/mach-s3c64xx/common.c
@@ -14,9 +14,14 @@
* published by the Free Software Foundation.
*/
+/*
+ * NOTE: Code in this file is not used when booting with Device Tree support.
+ */
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/clk-provider.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/serial_core.h>
@@ -38,7 +43,6 @@
#include <mach/regs-gpio.h>
#include <plat/cpu.h>
-#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/pm.h>
#include <plat/gpio-cfg.h>
@@ -50,6 +54,19 @@
#include "common.h"
+/* External clock frequency */
+static unsigned long xtal_f = 12000000, xusbxti_f = 48000000;
+
+void __init s3c64xx_set_xtal_freq(unsigned long freq)
+{
+ xtal_f = freq;
+}
+
+void __init s3c64xx_set_xusbxti_freq(unsigned long freq)
+{
+ xusbxti_f = freq;
+}
+
/* uart registration process */
static void __init s3c64xx_init_uarts(struct s3c2410_uartcfg *cfg, int no)
@@ -67,7 +84,6 @@ static struct cpu_table cpu_ids[] __initdata = {
.idcode = S3C6400_CPU_ID,
.idmask = S3C64XX_CPU_MASK,
.map_io = s3c6400_map_io,
- .init_clocks = s3c6400_init_clocks,
.init_uarts = s3c64xx_init_uarts,
.init = s3c6400_init,
.name = name_s3c6400,
@@ -75,7 +91,6 @@ static struct cpu_table cpu_ids[] __initdata = {
.idcode = S3C6410_CPU_ID,
.idmask = S3C64XX_CPU_MASK,
.map_io = s3c6410_map_io,
- .init_clocks = s3c6410_init_clocks,
.init_uarts = s3c64xx_init_uarts,
.init = s3c6410_init,
.name = name_s3c6410,
@@ -192,6 +207,10 @@ void __init s3c64xx_init_io(struct map_desc *mach_desc, int size)
static __init int s3c64xx_dev_init(void)
{
+ /* Not applicable when using DT. */
+ if (of_have_populated_dt())
+ return 0;
+
subsys_system_register(&s3c64xx_subsys, NULL);
return device_register(&s3c64xx_dev);
}
@@ -213,8 +232,10 @@ void __init s3c64xx_init_irq(u32 vic0_valid, u32 vic1_valid)
{
/*
* FIXME: there is no better place to put this at the moment
- * (samsung_wdt_reset_init needs clocks)
+ * (s3c64xx_clk_init needs ioremap and must happen before init_time
+ * samsung_wdt_reset_init needs clocks)
*/
+ s3c64xx_clk_init(NULL, xtal_f, xusbxti_f, soc_is_s3c6400(), S3C_VA_SYS);
samsung_wdt_reset_init(S3C_VA_WATCHDOG);
printk(KERN_DEBUG "%s: initialising interrupts\n", __func__);
@@ -391,6 +412,10 @@ static int __init s3c64xx_init_irq_eint(void)
{
int irq;
+ /* On DT-enabled systems EINTs are handled by pinctrl-s3c64xx driver. */
+ if (of_have_populated_dt())
+ return -ENODEV;
+
for (irq = IRQ_EINT(0); irq <= IRQ_EINT(27); irq++) {
irq_set_chip_and_handler(irq, &s3c_irq_eint, handle_level_irq);
irq_set_chip_data(irq, (void *)eint_irq_to_bit(irq));
diff --git a/arch/arm/mach-s3c64xx/common.h b/arch/arm/mach-s3c64xx/common.h
index e8f990b37665..bd3bd562011e 100644
--- a/arch/arm/mach-s3c64xx/common.h
+++ b/arch/arm/mach-s3c64xx/common.h
@@ -22,21 +22,21 @@
void s3c64xx_init_irq(u32 vic0, u32 vic1);
void s3c64xx_init_io(struct map_desc *mach_desc, int size);
-void s3c64xx_register_clocks(unsigned long xtal, unsigned armclk_limit);
-void s3c64xx_setup_clocks(void);
-
void s3c64xx_restart(enum reboot_mode mode, const char *cmd);
void s3c64xx_init_late(void);
+void s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
+ unsigned long xusbxti_f, bool is_s3c6400, void __iomem *reg_base);
+void s3c64xx_set_xtal_freq(unsigned long freq);
+void s3c64xx_set_xusbxti_freq(unsigned long freq);
+
#ifdef CONFIG_CPU_S3C6400
extern int s3c6400_init(void);
extern void s3c6400_init_irq(void);
extern void s3c6400_map_io(void);
-extern void s3c6400_init_clocks(int xtal);
#else
-#define s3c6400_init_clocks NULL
#define s3c6400_map_io NULL
#define s3c6400_init NULL
#endif
@@ -46,10 +46,8 @@ extern void s3c6400_init_clocks(int xtal);
extern int s3c6410_init(void);
extern void s3c6410_init_irq(void);
extern void s3c6410_map_io(void);
-extern void s3c6410_init_clocks(int xtal);
#else
-#define s3c6410_init_clocks NULL
#define s3c6410_map_io NULL
#define s3c6410_init NULL
#endif
diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c
index 759846c28d12..7e22c2113816 100644
--- a/arch/arm/mach-s3c64xx/dma.c
+++ b/arch/arm/mach-s3c64xx/dma.c
@@ -12,6 +12,10 @@
* published by the Free Software Foundation.
*/
+/*
+ * NOTE: Code in this file is not used when booting with Device Tree support.
+ */
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -24,6 +28,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/amba/pl080.h>
+#include <linux/of.h>
#include <mach/dma.h>
#include <mach/map.h>
@@ -677,7 +682,7 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
goto err_map;
}
- clk_enable(dmac->clk);
+ clk_prepare_enable(dmac->clk);
dmac->regs = regs;
dmac->chanbase = chbase;
@@ -711,7 +716,7 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
return 0;
err_clk:
- clk_disable(dmac->clk);
+ clk_disable_unprepare(dmac->clk);
clk_put(dmac->clk);
err_map:
iounmap(regs);
@@ -726,6 +731,10 @@ static int __init s3c64xx_dma_init(void)
{
int ret;
+ /* This driver is not supported when booting with device tree. */
+ if (of_have_populated_dt())
+ return -ENODEV;
+
printk(KERN_INFO "%s: Registering DMA channels\n", __func__);
dma_pool = dma_pool_create("DMA-LLI", NULL, sizeof(struct pl080s_lli), 16, 0);
diff --git a/arch/arm/mach-s3c64xx/include/mach/regs-clock.h b/arch/arm/mach-s3c64xx/include/mach/regs-clock.h
index 05332b998ec0..4f44aac77092 100644
--- a/arch/arm/mach-s3c64xx/include/mach/regs-clock.h
+++ b/arch/arm/mach-s3c64xx/include/mach/regs-clock.h
@@ -15,145 +15,21 @@
#ifndef __PLAT_REGS_CLOCK_H
#define __PLAT_REGS_CLOCK_H __FILE__
+/*
+ * FIXME: Remove remaining definitions
+ */
+
#define S3C_CLKREG(x) (S3C_VA_SYS + (x))
-#define S3C_APLL_LOCK S3C_CLKREG(0x00)
-#define S3C_MPLL_LOCK S3C_CLKREG(0x04)
-#define S3C_EPLL_LOCK S3C_CLKREG(0x08)
-#define S3C_APLL_CON S3C_CLKREG(0x0C)
-#define S3C_MPLL_CON S3C_CLKREG(0x10)
-#define S3C_EPLL_CON0 S3C_CLKREG(0x14)
-#define S3C_EPLL_CON1 S3C_CLKREG(0x18)
-#define S3C_CLK_SRC S3C_CLKREG(0x1C)
-#define S3C_CLK_DIV0 S3C_CLKREG(0x20)
-#define S3C_CLK_DIV1 S3C_CLKREG(0x24)
-#define S3C_CLK_DIV2 S3C_CLKREG(0x28)
-#define S3C_CLK_OUT S3C_CLKREG(0x2C)
-#define S3C_HCLK_GATE S3C_CLKREG(0x30)
#define S3C_PCLK_GATE S3C_CLKREG(0x34)
-#define S3C_SCLK_GATE S3C_CLKREG(0x38)
-#define S3C_MEM0_GATE S3C_CLKREG(0x3C)
#define S3C6410_CLK_SRC2 S3C_CLKREG(0x10C)
#define S3C_MEM_SYS_CFG S3C_CLKREG(0x120)
-/* CLKDIV0 */
-#define S3C6400_CLKDIV0_PCLK_MASK (0xf << 12)
-#define S3C6400_CLKDIV0_PCLK_SHIFT (12)
-#define S3C6400_CLKDIV0_HCLK2_MASK (0x7 << 9)
-#define S3C6400_CLKDIV0_HCLK2_SHIFT (9)
-#define S3C6400_CLKDIV0_HCLK_MASK (0x1 << 8)
-#define S3C6400_CLKDIV0_HCLK_SHIFT (8)
-#define S3C6400_CLKDIV0_MPLL_MASK (0x1 << 4)
-#define S3C6400_CLKDIV0_MPLL_SHIFT (4)
-
-#define S3C6400_CLKDIV0_ARM_MASK (0x7 << 0)
-#define S3C6410_CLKDIV0_ARM_MASK (0xf << 0)
-#define S3C6400_CLKDIV0_ARM_SHIFT (0)
-
-/* HCLK GATE Registers */
-#define S3C_CLKCON_HCLK_3DSE (1<<31)
-#define S3C_CLKCON_HCLK_UHOST (1<<29)
-#define S3C_CLKCON_HCLK_SECUR (1<<28)
-#define S3C_CLKCON_HCLK_SDMA1 (1<<27)
-#define S3C_CLKCON_HCLK_SDMA0 (1<<26)
-#define S3C_CLKCON_HCLK_IROM (1<<25)
-#define S3C_CLKCON_HCLK_DDR1 (1<<24)
-#define S3C_CLKCON_HCLK_DDR0 (1<<23)
-#define S3C_CLKCON_HCLK_MEM1 (1<<22)
-#define S3C_CLKCON_HCLK_MEM0 (1<<21)
-#define S3C_CLKCON_HCLK_USB (1<<20)
-#define S3C_CLKCON_HCLK_HSMMC2 (1<<19)
-#define S3C_CLKCON_HCLK_HSMMC1 (1<<18)
-#define S3C_CLKCON_HCLK_HSMMC0 (1<<17)
-#define S3C_CLKCON_HCLK_MDP (1<<16)
-#define S3C_CLKCON_HCLK_DHOST (1<<15)
-#define S3C_CLKCON_HCLK_IHOST (1<<14)
-#define S3C_CLKCON_HCLK_DMA1 (1<<13)
-#define S3C_CLKCON_HCLK_DMA0 (1<<12)
-#define S3C_CLKCON_HCLK_JPEG (1<<11)
-#define S3C_CLKCON_HCLK_CAMIF (1<<10)
-#define S3C_CLKCON_HCLK_SCALER (1<<9)
-#define S3C_CLKCON_HCLK_2D (1<<8)
-#define S3C_CLKCON_HCLK_TV (1<<7)
-#define S3C_CLKCON_HCLK_POST0 (1<<5)
-#define S3C_CLKCON_HCLK_ROT (1<<4)
-#define S3C_CLKCON_HCLK_LCD (1<<3)
-#define S3C_CLKCON_HCLK_TZIC (1<<2)
-#define S3C_CLKCON_HCLK_INTC (1<<1)
-#define S3C_CLKCON_HCLK_MFC (1<<0)
-
/* PCLK GATE Registers */
-#define S3C6410_CLKCON_PCLK_I2C1 (1<<27)
-#define S3C6410_CLKCON_PCLK_IIS2 (1<<26)
-#define S3C_CLKCON_PCLK_SKEY (1<<24)
-#define S3C_CLKCON_PCLK_CHIPID (1<<23)
-#define S3C_CLKCON_PCLK_SPI1 (1<<22)
-#define S3C_CLKCON_PCLK_SPI0 (1<<21)
-#define S3C_CLKCON_PCLK_HSIRX (1<<20)
-#define S3C_CLKCON_PCLK_HSITX (1<<19)
-#define S3C_CLKCON_PCLK_GPIO (1<<18)
-#define S3C_CLKCON_PCLK_IIC (1<<17)
-#define S3C_CLKCON_PCLK_IIS1 (1<<16)
-#define S3C_CLKCON_PCLK_IIS0 (1<<15)
-#define S3C_CLKCON_PCLK_AC97 (1<<14)
-#define S3C_CLKCON_PCLK_TZPC (1<<13)
-#define S3C_CLKCON_PCLK_TSADC (1<<12)
-#define S3C_CLKCON_PCLK_KEYPAD (1<<11)
-#define S3C_CLKCON_PCLK_IRDA (1<<10)
-#define S3C_CLKCON_PCLK_PCM1 (1<<9)
-#define S3C_CLKCON_PCLK_PCM0 (1<<8)
-#define S3C_CLKCON_PCLK_PWM (1<<7)
-#define S3C_CLKCON_PCLK_RTC (1<<6)
-#define S3C_CLKCON_PCLK_WDT (1<<5)
#define S3C_CLKCON_PCLK_UART3 (1<<4)
#define S3C_CLKCON_PCLK_UART2 (1<<3)
#define S3C_CLKCON_PCLK_UART1 (1<<2)
#define S3C_CLKCON_PCLK_UART0 (1<<1)
-#define S3C_CLKCON_PCLK_MFC (1<<0)
-
-/* SCLK GATE Registers */
-#define S3C_CLKCON_SCLK_UHOST (1<<30)
-#define S3C_CLKCON_SCLK_MMC2_48 (1<<29)
-#define S3C_CLKCON_SCLK_MMC1_48 (1<<28)
-#define S3C_CLKCON_SCLK_MMC0_48 (1<<27)
-#define S3C_CLKCON_SCLK_MMC2 (1<<26)
-#define S3C_CLKCON_SCLK_MMC1 (1<<25)
-#define S3C_CLKCON_SCLK_MMC0 (1<<24)
-#define S3C_CLKCON_SCLK_SPI1_48 (1<<23)
-#define S3C_CLKCON_SCLK_SPI0_48 (1<<22)
-#define S3C_CLKCON_SCLK_SPI1 (1<<21)
-#define S3C_CLKCON_SCLK_SPI0 (1<<20)
-#define S3C_CLKCON_SCLK_DAC27 (1<<19)
-#define S3C_CLKCON_SCLK_TV27 (1<<18)
-#define S3C_CLKCON_SCLK_SCALER27 (1<<17)
-#define S3C_CLKCON_SCLK_SCALER (1<<16)
-#define S3C_CLKCON_SCLK_LCD27 (1<<15)
-#define S3C_CLKCON_SCLK_LCD (1<<14)
-#define S3C6400_CLKCON_SCLK_POST1_27 (1<<13)
-#define S3C6410_CLKCON_FIMC (1<<13)
-#define S3C_CLKCON_SCLK_POST0_27 (1<<12)
-#define S3C6400_CLKCON_SCLK_POST1 (1<<11)
-#define S3C6410_CLKCON_SCLK_AUDIO2 (1<<11)
-#define S3C_CLKCON_SCLK_POST0 (1<<10)
-#define S3C_CLKCON_SCLK_AUDIO1 (1<<9)
-#define S3C_CLKCON_SCLK_AUDIO0 (1<<8)
-#define S3C_CLKCON_SCLK_SECUR (1<<7)
-#define S3C_CLKCON_SCLK_IRDA (1<<6)
-#define S3C_CLKCON_SCLK_UART (1<<5)
-#define S3C_CLKCON_SCLK_ONENAND (1<<4)
-#define S3C_CLKCON_SCLK_MFC (1<<3)
-#define S3C_CLKCON_SCLK_CAM (1<<2)
-#define S3C_CLKCON_SCLK_JPEG (1<<1)
-
-/* CLKSRC */
-
-#define S3C6400_CLKSRC_APLL_MOUT (1 << 0)
-#define S3C6400_CLKSRC_MPLL_MOUT (1 << 1)
-#define S3C6400_CLKSRC_EPLL_MOUT (1 << 2)
-#define S3C6400_CLKSRC_APLL_MOUT_SHIFT (0)
-#define S3C6400_CLKSRC_MPLL_MOUT_SHIFT (1)
-#define S3C6400_CLKSRC_EPLL_MOUT_SHIFT (2)
-#define S3C6400_CLKSRC_MFC (1 << 4)
/* MEM_SYS_CFG */
#define MEM_SYS_CFG_INDEP_CF 0x4000
diff --git a/arch/arm/mach-s3c64xx/include/mach/timex.h b/arch/arm/mach-s3c64xx/include/mach/timex.h
deleted file mode 100644
index fb2e8cd40829..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/timex.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* arch/arm/mach-s3c64xx/include/mach/timex.h
- *
- * Copyright (c) 2003-2005 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C6400 - time parameters
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_TIMEX_H
-#define __ASM_ARCH_TIMEX_H
-
-/* CLOCK_TICK_RATE needs to be evaluatable by the cpp, so making it
- * a variable is useless. It seems as long as we make our timers an
- * exact multiple of HZ, any value that makes a 1->1 correspondence
- * for the time conversion functions to/from jiffies is acceptable.
-*/
-
-#define CLOCK_TICK_RATE 12000000
-
-#endif /* __ASM_ARCH_TIMEX_H */
diff --git a/arch/arm/mach-s3c64xx/irq-pm.c b/arch/arm/mach-s3c64xx/irq-pm.c
index c3da1b68d03e..1649c0d1c1b8 100644
--- a/arch/arm/mach-s3c64xx/irq-pm.c
+++ b/arch/arm/mach-s3c64xx/irq-pm.c
@@ -12,12 +12,17 @@
* published by the Free Software Foundation.
*/
+/*
+ * NOTE: Code in this file is not used when booting with Device Tree support.
+ */
+
#include <linux/kernel.h>
#include <linux/syscore_ops.h>
#include <linux/interrupt.h>
#include <linux/serial_core.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <mach/map.h>
@@ -101,6 +106,10 @@ static struct syscore_ops s3c64xx_irq_syscore_ops = {
static __init int s3c64xx_syscore_init(void)
{
+ /* Appropriate drivers (pinctrl, uart) handle this when using DT. */
+ if (of_have_populated_dt())
+ return 0;
+
register_syscore_ops(&s3c64xx_irq_syscore_ops);
return 0;
diff --git a/arch/arm/mach-s3c64xx/mach-anw6410.c b/arch/arm/mach-s3c64xx/mach-anw6410.c
index 35e3f54574ef..d266dd5f7060 100644
--- a/arch/arm/mach-s3c64xx/mach-anw6410.c
+++ b/arch/arm/mach-s3c64xx/mach-anw6410.c
@@ -207,7 +207,7 @@ static struct platform_device *anw6410_devices[] __initdata = {
static void __init anw6410_map_io(void)
{
s3c64xx_init_io(anw6410_iodesc, ARRAY_SIZE(anw6410_iodesc));
- s3c24xx_init_clocks(12000000);
+ s3c64xx_set_xtal_freq(12000000);
s3c24xx_init_uarts(anw6410_uartcfgs, ARRAY_SIZE(anw6410_uartcfgs));
samsung_set_timer_source(SAMSUNG_PWM3, SAMSUNG_PWM4);
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index eb8e5a1aca42..aca7d16e195d 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -310,10 +310,6 @@ static struct regulator_consumer_supply wallvdd_consumers[] = {
REGULATOR_SUPPLY("SPKVDDL", "spi0.1"),
REGULATOR_SUPPLY("SPKVDDR", "spi0.1"),
- REGULATOR_SUPPLY("SPKVDDL", "wm5102-codec"),
- REGULATOR_SUPPLY("SPKVDDR", "wm5102-codec"),
- REGULATOR_SUPPLY("SPKVDDL", "wm5110-codec"),
- REGULATOR_SUPPLY("SPKVDDR", "wm5110-codec"),
REGULATOR_SUPPLY("DC1VDD", "0-0034"),
REGULATOR_SUPPLY("DC2VDD", "0-0034"),
@@ -653,14 +649,6 @@ static struct regulator_consumer_supply pvdd_1v8_consumers[] = {
REGULATOR_SUPPLY("DBVDD3", "spi0.1"),
REGULATOR_SUPPLY("LDOVDD", "spi0.1"),
REGULATOR_SUPPLY("CPVDD", "spi0.1"),
-
- REGULATOR_SUPPLY("DBVDD2", "wm5102-codec"),
- REGULATOR_SUPPLY("DBVDD3", "wm5102-codec"),
- REGULATOR_SUPPLY("CPVDD", "wm5102-codec"),
-
- REGULATOR_SUPPLY("DBVDD2", "wm5110-codec"),
- REGULATOR_SUPPLY("DBVDD3", "wm5110-codec"),
- REGULATOR_SUPPLY("CPVDD", "wm5110-codec"),
};
static struct regulator_init_data pvdd_1v8 = {
@@ -743,7 +731,7 @@ static struct s3c2410_platform_i2c i2c1_pdata = {
static void __init crag6410_map_io(void)
{
s3c64xx_init_io(NULL, 0);
- s3c24xx_init_clocks(12000000);
+ s3c64xx_set_xtal_freq(12000000);
s3c24xx_init_uarts(crag6410_uartcfgs, ARRAY_SIZE(crag6410_uartcfgs));
samsung_set_timer_source(SAMSUNG_PWM3, SAMSUNG_PWM4);
diff --git a/arch/arm/mach-s3c64xx/mach-hmt.c b/arch/arm/mach-s3c64xx/mach-hmt.c
index f39569e0f2e6..e8064044ef79 100644
--- a/arch/arm/mach-s3c64xx/mach-hmt.c
+++ b/arch/arm/mach-s3c64xx/mach-hmt.c
@@ -247,7 +247,7 @@ static struct platform_device *hmt_devices[] __initdata = {
static void __init hmt_map_io(void)
{
s3c64xx_init_io(hmt_iodesc, ARRAY_SIZE(hmt_iodesc));
- s3c24xx_init_clocks(12000000);
+ s3c64xx_set_xtal_freq(12000000);
s3c24xx_init_uarts(hmt_uartcfgs, ARRAY_SIZE(hmt_uartcfgs));
samsung_set_timer_source(SAMSUNG_PWM3, SAMSUNG_PWM4);
}
diff --git a/arch/arm/mach-s3c64xx/mach-mini6410.c b/arch/arm/mach-s3c64xx/mach-mini6410.c
index fc043e3ecdf8..58d46a3d7b78 100644
--- a/arch/arm/mach-s3c64xx/mach-mini6410.c
+++ b/arch/arm/mach-s3c64xx/mach-mini6410.c
@@ -231,7 +231,7 @@ static void __init mini6410_map_io(void)
u32 tmp;
s3c64xx_init_io(NULL, 0);
- s3c24xx_init_clocks(12000000);
+ s3c64xx_set_xtal_freq(12000000);
s3c24xx_init_uarts(mini6410_uartcfgs, ARRAY_SIZE(mini6410_uartcfgs));
samsung_set_timer_source(SAMSUNG_PWM3, SAMSUNG_PWM4);
diff --git a/arch/arm/mach-s3c64xx/mach-ncp.c b/arch/arm/mach-s3c64xx/mach-ncp.c
index 7e2c3908f1f8..2067b0bf55b4 100644
--- a/arch/arm/mach-s3c64xx/mach-ncp.c
+++ b/arch/arm/mach-s3c64xx/mach-ncp.c
@@ -86,7 +86,7 @@ static struct map_desc ncp_iodesc[] __initdata = {};
static void __init ncp_map_io(void)
{
s3c64xx_init_io(ncp_iodesc, ARRAY_SIZE(ncp_iodesc));
- s3c24xx_init_clocks(12000000);
+ s3c64xx_set_xtal_freq(12000000);
s3c24xx_init_uarts(ncp_uartcfgs, ARRAY_SIZE(ncp_uartcfgs));
samsung_set_timer_source(SAMSUNG_PWM3, SAMSUNG_PWM4);
}
diff --git a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
new file mode 100644
index 000000000000..7eb9a10fc1af
--- /dev/null
+++ b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
@@ -0,0 +1,85 @@
+/*
+ * Samsung's S3C64XX flattened device tree enabled machine
+ *
+ * Copyright (c) 2013 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/clk-provider.h>
+#include <linux/irqchip.h>
+#include <linux/of_platform.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/system_misc.h>
+
+#include <plat/cpu.h>
+#include <plat/watchdog-reset.h>
+
+#include <mach/map.h>
+
+#include "common.h"
+
+/*
+ * IO mapping for shared system controller IP.
+ *
+ * FIXME: Make remaining drivers use dynamic mapping.
+ */
+static struct map_desc s3c64xx_dt_iodesc[] __initdata = {
+ {
+ .virtual = (unsigned long)S3C_VA_SYS,
+ .pfn = __phys_to_pfn(S3C64XX_PA_SYSCON),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ },
+};
+
+static void __init s3c64xx_dt_map_io(void)
+{
+ debug_ll_io_init();
+ iotable_init(s3c64xx_dt_iodesc, ARRAY_SIZE(s3c64xx_dt_iodesc));
+
+ s3c64xx_init_cpu();
+
+ if (!soc_is_s3c64xx())
+ panic("SoC is not S3C64xx!");
+}
+
+static void __init s3c64xx_dt_init_irq(void)
+{
+ of_clk_init(NULL);
+ samsung_wdt_reset_of_init();
+ irqchip_init();
+};
+
+static void __init s3c64xx_dt_init_machine(void)
+{
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+}
+
+static void s3c64xx_dt_restart(enum reboot_mode mode, const char *cmd)
+{
+ if (mode != REBOOT_SOFT)
+ samsung_wdt_reset();
+
+ /* if all else fails, or mode was for soft, jump to 0 */
+ soft_restart(0);
+}
+
+static char const *s3c64xx_dt_compat[] __initdata = {
+ "samsung,s3c6400",
+ "samsung,s3c6410",
+ NULL
+};
+
+DT_MACHINE_START(S3C6400_DT, "Samsung S3C64xx (Flattened Device Tree)")
+ /* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */
+ .dt_compat = s3c64xx_dt_compat,
+ .map_io = s3c64xx_dt_map_io,
+ .init_irq = s3c64xx_dt_init_irq,
+ .init_machine = s3c64xx_dt_init_machine,
+ .restart = s3c64xx_dt_restart,
+MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smartq.c b/arch/arm/mach-s3c64xx/mach-smartq.c
index 86d980b448fd..0f47237be3b2 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq.c
@@ -337,13 +337,6 @@ err:
return ret;
}
-static int __init smartq_usb_otg_init(void)
-{
- clk_xusbxti.rate = 12000000;
-
- return 0;
-}
-
static int __init smartq_wifi_init(void)
{
int ret;
@@ -377,7 +370,8 @@ static struct map_desc smartq_iodesc[] __initdata = {};
void __init smartq_map_io(void)
{
s3c64xx_init_io(smartq_iodesc, ARRAY_SIZE(smartq_iodesc));
- s3c24xx_init_clocks(12000000);
+ s3c64xx_set_xtal_freq(12000000);
+ s3c64xx_set_xusbxti_freq(12000000);
s3c24xx_init_uarts(smartq_uartcfgs, ARRAY_SIZE(smartq_uartcfgs));
samsung_set_timer_source(SAMSUNG_PWM3, SAMSUNG_PWM4);
@@ -399,7 +393,6 @@ void __init smartq_machine_init(void)
WARN_ON(smartq_lcd_setup_gpio());
WARN_ON(smartq_power_off_init());
WARN_ON(smartq_usb_host_init());
- WARN_ON(smartq_usb_otg_init());
WARN_ON(smartq_wifi_init());
platform_add_devices(smartq_devices, ARRAY_SIZE(smartq_devices));
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6400.c b/arch/arm/mach-s3c64xx/mach-smdk6400.c
index d70c0843aea2..27381cfcabbe 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6400.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6400.c
@@ -65,7 +65,7 @@ static struct map_desc smdk6400_iodesc[] = {};
static void __init smdk6400_map_io(void)
{
s3c64xx_init_io(smdk6400_iodesc, ARRAY_SIZE(smdk6400_iodesc));
- s3c24xx_init_clocks(12000000);
+ s3c64xx_set_xtal_freq(12000000);
s3c24xx_init_uarts(smdk6400_uartcfgs, ARRAY_SIZE(smdk6400_uartcfgs));
samsung_set_timer_source(SAMSUNG_PWM3, SAMSUNG_PWM4);
}
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index d90b450c5645..2a7b32ca5c96 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -634,7 +634,7 @@ static void __init smdk6410_map_io(void)
u32 tmp;
s3c64xx_init_io(smdk6410_iodesc, ARRAY_SIZE(smdk6410_iodesc));
- s3c24xx_init_clocks(12000000);
+ s3c64xx_set_xtal_freq(12000000);
s3c24xx_init_uarts(smdk6410_uartcfgs, ARRAY_SIZE(smdk6410_uartcfgs));
samsung_set_timer_source(SAMSUNG_PWM3, SAMSUNG_PWM4);
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c
index 6a1f91fea678..8cdb824a3b43 100644
--- a/arch/arm/mach-s3c64xx/pm.c
+++ b/arch/arm/mach-s3c64xx/pm.c
@@ -194,29 +194,8 @@ void s3c_pm_debug_smdkled(u32 set, u32 clear)
#endif
static struct sleep_save core_save[] = {
- SAVE_ITEM(S3C_APLL_LOCK),
- SAVE_ITEM(S3C_MPLL_LOCK),
- SAVE_ITEM(S3C_EPLL_LOCK),
- SAVE_ITEM(S3C_CLK_SRC),
- SAVE_ITEM(S3C_CLK_DIV0),
- SAVE_ITEM(S3C_CLK_DIV1),
- SAVE_ITEM(S3C_CLK_DIV2),
- SAVE_ITEM(S3C_CLK_OUT),
- SAVE_ITEM(S3C_HCLK_GATE),
- SAVE_ITEM(S3C_PCLK_GATE),
- SAVE_ITEM(S3C_SCLK_GATE),
- SAVE_ITEM(S3C_MEM0_GATE),
-
- SAVE_ITEM(S3C_EPLL_CON1),
- SAVE_ITEM(S3C_EPLL_CON0),
-
SAVE_ITEM(S3C64XX_MEM0DRVCON),
SAVE_ITEM(S3C64XX_MEM1DRVCON),
-
-#ifndef CONFIG_CPU_FREQ
- SAVE_ITEM(S3C_APLL_CON),
- SAVE_ITEM(S3C_MPLL_CON),
-#endif
};
static struct sleep_save misc_save[] = {
diff --git a/arch/arm/mach-s3c64xx/s3c6400.c b/arch/arm/mach-s3c64xx/s3c6400.c
index 4869714c6f1b..3db0c98222f7 100644
--- a/arch/arm/mach-s3c64xx/s3c6400.c
+++ b/arch/arm/mach-s3c64xx/s3c6400.c
@@ -9,6 +9,10 @@
* published by the Free Software Foundation.
*/
+/*
+ * NOTE: Code in this file is not used when booting with Device Tree support.
+ */
+
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
@@ -20,6 +24,7 @@
#include <linux/device.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -58,12 +63,6 @@ void __init s3c6400_map_io(void)
s3c64xx_onenand1_setname("s3c6400-onenand");
}
-void __init s3c6400_init_clocks(int xtal)
-{
- s3c64xx_register_clocks(xtal, S3C6400_CLKDIV0_ARM_MASK);
- s3c64xx_setup_clocks();
-}
-
void __init s3c6400_init_irq(void)
{
/* VIC0 does not have IRQS 5..7,
@@ -82,6 +81,10 @@ static struct device s3c6400_dev = {
static int __init s3c6400_core_init(void)
{
+ /* Not applicable when using DT. */
+ if (of_have_populated_dt())
+ return 0;
+
return subsys_system_register(&s3c6400_subsys, NULL);
}
diff --git a/arch/arm/mach-s3c64xx/s3c6410.c b/arch/arm/mach-s3c64xx/s3c6410.c
index 31c29fdf1800..72b2278953a8 100644
--- a/arch/arm/mach-s3c64xx/s3c6410.c
+++ b/arch/arm/mach-s3c64xx/s3c6410.c
@@ -10,6 +10,10 @@
* published by the Free Software Foundation.
*/
+/*
+ * NOTE: Code in this file is not used when booting with Device Tree support.
+ */
+
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
@@ -21,6 +25,7 @@
#include <linux/device.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -62,13 +67,6 @@ void __init s3c6410_map_io(void)
s3c_cfcon_setname("s3c64xx-pata");
}
-void __init s3c6410_init_clocks(int xtal)
-{
- printk(KERN_DEBUG "%s: initialising clocks\n", __func__);
- s3c64xx_register_clocks(xtal, S3C6410_CLKDIV0_ARM_MASK);
- s3c64xx_setup_clocks();
-}
-
void __init s3c6410_init_irq(void)
{
/* VIC0 is missing IRQ7, VIC1 is fully populated. */
@@ -86,6 +84,10 @@ static struct device s3c6410_dev = {
static int __init s3c6410_core_init(void)
{
+ /* Not applicable when using DT. */
+ if (of_have_populated_dt())
+ return 0;
+
return subsys_system_register(&s3c6410_subsys, NULL);
}
diff --git a/arch/arm/mach-s5p64x0/include/mach/timex.h b/arch/arm/mach-s5p64x0/include/mach/timex.h
deleted file mode 100644
index 4b91faa195a8..000000000000
--- a/arch/arm/mach-s5p64x0/include/mach/timex.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* linux/arch/arm/mach-s5p64x0/include/mach/timex.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Copyright (c) 2003-2005 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S5P64X0 - time parameters
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_TIMEX_H
-#define __ASM_ARCH_TIMEX_H
-
-/* CLOCK_TICK_RATE needs to be evaluatable by the cpp, so making it
- * a variable is useless. It seems as long as we make our timers an
- * exact multiple of HZ, any value that makes a 1->1 correspondence
- * for the time conversion functions to/from jiffies is acceptable.
-*/
-
-#define CLOCK_TICK_RATE 12000000
-
-#endif /* __ASM_ARCH_TIMEX_H */
diff --git a/arch/arm/mach-s5pc100/include/mach/timex.h b/arch/arm/mach-s5pc100/include/mach/timex.h
deleted file mode 100644
index 47ffb17aff96..000000000000
--- a/arch/arm/mach-s5pc100/include/mach/timex.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* arch/arm/mach-s5pc100/include/mach/timex.h
- *
- * Copyright (c) 2003-2005 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C6400 - time parameters
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_TIMEX_H
-#define __ASM_ARCH_TIMEX_H
-
-/* CLOCK_TICK_RATE needs to be evaluatable by the cpp, so making it
- * a variable is useless. It seems as long as we make our timers an
- * exact multiple of HZ, any value that makes a 1->1 correspondence
- * for the time conversion functions to/from jiffies is acceptable.
-*/
-
-#define CLOCK_TICK_RATE 12000000
-
-#endif /* __ASM_ARCH_TIMEX_H */
diff --git a/arch/arm/mach-s5pv210/include/mach/timex.h b/arch/arm/mach-s5pv210/include/mach/timex.h
deleted file mode 100644
index 73dc85496a83..000000000000
--- a/arch/arm/mach-s5pv210/include/mach/timex.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* linux/arch/arm/mach-s5pv210/include/mach/timex.h
- *
- * Copyright (c) 2003-2010 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Based on arch/arm/mach-s5p6442/include/mach/timex.h
- *
- * S5PV210 - time parameters
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_TIMEX_H
-#define __ASM_ARCH_TIMEX_H __FILE__
-
-/* CLOCK_TICK_RATE needs to be evaluatable by the cpp, so making it
- * a variable is useless. It seems as long as we make our timers an
- * exact multiple of HZ, any value that makes a 1->1 correspondence
- * for the time conversion functions to/from jiffies is acceptable.
-*/
-
-#define CLOCK_TICK_RATE 12000000
-
-#endif /* __ASM_ARCH_TIMEX_H */
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index e838ba27e443..c9808c684152 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -512,6 +512,9 @@ static void __init assabet_map_io(void)
* Its called GPCLKR0 in my SA1110 manual.
*/
Ser1SDCR0 |= SDCR0_SUS;
+ MSC1 = (MSC1 & ~0xffff) |
+ MSC_NonBrst | MSC_32BitStMem |
+ MSC_RdAcc(2) | MSC_WrAcc(2) | MSC_Rec(0);
if (!machine_has_neponset())
sa1100_register_uart_fns(&assabet_port_fns);
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index f25b6119e028..d4ea142c4edd 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -42,74 +42,31 @@ EXPORT_SYMBOL(reset_status);
/*
* This table is setup for a 3.6864MHz Crystal.
*/
-static const unsigned short cclk_frequency_100khz[NR_FREQS] = {
- 590, /* 59.0 MHz */
- 737, /* 73.7 MHz */
- 885, /* 88.5 MHz */
- 1032, /* 103.2 MHz */
- 1180, /* 118.0 MHz */
- 1327, /* 132.7 MHz */
- 1475, /* 147.5 MHz */
- 1622, /* 162.2 MHz */
- 1769, /* 176.9 MHz */
- 1917, /* 191.7 MHz */
- 2064, /* 206.4 MHz */
- 2212, /* 221.2 MHz */
- 2359, /* 235.9 MHz */
- 2507, /* 250.7 MHz */
- 2654, /* 265.4 MHz */
- 2802 /* 280.2 MHz */
+struct cpufreq_frequency_table sa11x0_freq_table[NR_FREQS+1] = {
+ { .frequency = 59000, /* 59.0 MHz */},
+ { .frequency = 73700, /* 73.7 MHz */},
+ { .frequency = 88500, /* 88.5 MHz */},
+ { .frequency = 103200, /* 103.2 MHz */},
+ { .frequency = 118000, /* 118.0 MHz */},
+ { .frequency = 132700, /* 132.7 MHz */},
+ { .frequency = 147500, /* 147.5 MHz */},
+ { .frequency = 162200, /* 162.2 MHz */},
+ { .frequency = 176900, /* 176.9 MHz */},
+ { .frequency = 191700, /* 191.7 MHz */},
+ { .frequency = 206400, /* 206.4 MHz */},
+ { .frequency = 221200, /* 221.2 MHz */},
+ { .frequency = 235900, /* 235.9 MHz */},
+ { .frequency = 250700, /* 250.7 MHz */},
+ { .frequency = 265400, /* 265.4 MHz */},
+ { .frequency = 280200, /* 280.2 MHz */},
+ { .frequency = CPUFREQ_TABLE_END, },
};
-/* rounds up(!) */
-unsigned int sa11x0_freq_to_ppcr(unsigned int khz)
-{
- int i;
-
- khz /= 100;
-
- for (i = 0; i < NR_FREQS; i++)
- if (cclk_frequency_100khz[i] >= khz)
- break;
-
- return i;
-}
-
-unsigned int sa11x0_ppcr_to_freq(unsigned int idx)
-{
- unsigned int freq = 0;
- if (idx < NR_FREQS)
- freq = cclk_frequency_100khz[idx] * 100;
- return freq;
-}
-
-
-/* make sure that only the "userspace" governor is run -- anything else wouldn't make sense on
- * this platform, anyway.
- */
-int sa11x0_verify_speed(struct cpufreq_policy *policy)
-{
- unsigned int tmp;
- if (policy->cpu)
- return -EINVAL;
-
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
-
- /* make sure that at least one frequency is within the policy */
- tmp = cclk_frequency_100khz[sa11x0_freq_to_ppcr(policy->min)] * 100;
- if (tmp > policy->max)
- policy->max = tmp;
-
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
-
- return 0;
-}
-
unsigned int sa11x0_getspeed(unsigned int cpu)
{
if (cpu)
return 0;
- return cclk_frequency_100khz[PPCR & 0xf] * 100;
+ return sa11x0_freq_table[PPCR & 0xf].frequency;
}
/*
diff --git a/arch/arm/mach-sa1100/generic.h b/arch/arm/mach-sa1100/generic.h
index 9a33695c9492..0d92e119b36b 100644
--- a/arch/arm/mach-sa1100/generic.h
+++ b/arch/arm/mach-sa1100/generic.h
@@ -3,6 +3,7 @@
*
* Author: Nicolas Pitre
*/
+#include <linux/cpufreq.h>
#include <linux/reboot.h>
extern void sa1100_timer_init(void);
@@ -19,12 +20,8 @@ extern void sa11x0_init_late(void);
extern void sa1110_mb_enable(void);
extern void sa1110_mb_disable(void);
-struct cpufreq_policy;
-
-extern unsigned int sa11x0_freq_to_ppcr(unsigned int khz);
-extern int sa11x0_verify_speed(struct cpufreq_policy *policy);
+extern struct cpufreq_frequency_table sa11x0_freq_table[];
extern unsigned int sa11x0_getspeed(unsigned int cpu);
-extern unsigned int sa11x0_ppcr_to_freq(unsigned int idx);
struct flash_platform_data;
struct resource;
diff --git a/arch/arm/mach-sa1100/include/mach/gpio.h b/arch/arm/mach-sa1100/include/mach/gpio.h
deleted file mode 100644
index 6a9eecf3137e..000000000000
--- a/arch/arm/mach-sa1100/include/mach/gpio.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * arch/arm/mach-sa1100/include/mach/gpio.h
- *
- * SA1100 GPIO wrappers for arch-neutral GPIO calls
- *
- * Written by Philipp Zabel <philipp.zabel@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef __ASM_ARCH_SA1100_GPIO_H
-#define __ASM_ARCH_SA1100_GPIO_H
-
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include <asm/irq.h>
-#include <asm-generic/gpio.h>
-
-#define __ARM_GPIOLIB_COMPLEX
-
-static inline int gpio_get_value(unsigned gpio)
-{
- if (__builtin_constant_p(gpio) && (gpio <= GPIO_MAX))
- return GPLR & GPIO_GPIO(gpio);
- else
- return __gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned gpio, int value)
-{
- if (__builtin_constant_p(gpio) && (gpio <= GPIO_MAX))
- if (value)
- GPSR = GPIO_GPIO(gpio);
- else
- GPCR = GPIO_GPIO(gpio);
- else
- __gpio_set_value(gpio, value);
-}
-
-#define gpio_cansleep __gpio_cansleep
-
-#endif
diff --git a/arch/arm/mach-sa1100/include/mach/h3xxx.h b/arch/arm/mach-sa1100/include/mach/h3xxx.h
index 7d9df16f04a2..c810620db53d 100644
--- a/arch/arm/mach-sa1100/include/mach/h3xxx.h
+++ b/arch/arm/mach-sa1100/include/mach/h3xxx.h
@@ -13,6 +13,8 @@
#ifndef _INCLUDE_H3XXX_H_
#define _INCLUDE_H3XXX_H_
+#include "hardware.h" /* Gives GPIO_MAX */
+
/* Physical memory regions corresponding to chip selects */
#define H3600_EGPIO_PHYS (SA1100_CS5_PHYS + 0x01000000)
#define H3600_BANK_2_PHYS SA1100_CS2_PHYS
diff --git a/arch/arm/mach-sa1100/include/mach/timex.h b/arch/arm/mach-sa1100/include/mach/timex.h
deleted file mode 100644
index 7a5d017b58b3..000000000000
--- a/arch/arm/mach-sa1100/include/mach/timex.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * arch/arm/mach-sa1100/include/mach/timex.h
- *
- * SA1100 architecture timex specifications
- *
- * Copyright (C) 1998
- */
-
-/*
- * SA1100 timer
- */
-#define CLOCK_TICK_RATE 3686400
diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c
index bcbc94540e45..41e476e571d7 100644
--- a/arch/arm/mach-sa1100/simpad.c
+++ b/arch/arm/mach-sa1100/simpad.c
@@ -19,6 +19,7 @@
#include <mach/hardware.h>
#include <asm/setup.h>
+#include <asm/irq.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-shark/Makefile b/arch/arm/mach-shark/Makefile
deleted file mode 100644
index 29657183c452..000000000000
--- a/arch/arm/mach-shark/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-# Object file lists.
-
-obj-y := core.o dma.o irq.o pci.o leds.o
-obj-m :=
-obj-n :=
-obj- :=
diff --git a/arch/arm/mach-shark/Makefile.boot b/arch/arm/mach-shark/Makefile.boot
deleted file mode 100644
index e40e24e4ca34..000000000000
--- a/arch/arm/mach-shark/Makefile.boot
+++ /dev/null
@@ -1,2 +0,0 @@
- zreladdr-y += 0x08008000
-
diff --git a/arch/arm/mach-shark/core.c b/arch/arm/mach-shark/core.c
deleted file mode 100644
index 1d32c5e8eab6..000000000000
--- a/arch/arm/mach-shark/core.c
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * linux/arch/arm/mach-shark/arch.c
- *
- * Architecture specific stuff.
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/sched.h>
-#include <linux/serial_8250.h>
-#include <linux/io.h>
-#include <linux/cpu.h>
-#include <linux/reboot.h>
-
-#include <asm/setup.h>
-#include <asm/mach-types.h>
-#include <asm/param.h>
-#include <asm/system_misc.h>
-
-#include <asm/mach/map.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/time.h>
-
-#define ROMCARD_SIZE 0x08000000
-#define ROMCARD_START 0x10000000
-
-static void shark_restart(enum reboot_mode mode, const char *cmd)
-{
- short temp;
- /* Reset the Machine via pc[3] of the sequoia chipset */
- outw(0x09,0x24);
- temp=inw(0x26);
- temp = temp | (1<<3) | (1<<10);
- outw(0x09,0x24);
- outw(temp,0x26);
-}
-
-static struct plat_serial8250_port serial_platform_data[] = {
- {
- .iobase = 0x3f8,
- .irq = 4,
- .uartclk = 1843200,
- .regshift = 0,
- .iotype = UPIO_PORT,
- .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
- },
- {
- .iobase = 0x2f8,
- .irq = 3,
- .uartclk = 1843200,
- .regshift = 0,
- .iotype = UPIO_PORT,
- .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
- },
- { },
-};
-
-static struct platform_device serial_device = {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM,
- .dev = {
- .platform_data = serial_platform_data,
- },
-};
-
-static struct resource rtc_resources[] = {
- [0] = {
- .start = 0x70,
- .end = 0x73,
- .flags = IORESOURCE_IO,
- },
- [1] = {
- .start = IRQ_ISA_RTC_ALARM,
- .end = IRQ_ISA_RTC_ALARM,
- .flags = IORESOURCE_IRQ,
- }
-};
-
-static struct platform_device rtc_device = {
- .name = "rtc_cmos",
- .id = -1,
- .resource = rtc_resources,
- .num_resources = ARRAY_SIZE(rtc_resources),
-};
-
-static int __init shark_init(void)
-{
- int ret;
-
- if (machine_is_shark())
- {
- ret = platform_device_register(&rtc_device);
- if (ret) printk(KERN_ERR "Unable to register RTC device: %d\n", ret);
- ret = platform_device_register(&serial_device);
- if (ret) printk(KERN_ERR "Unable to register Serial device: %d\n", ret);
- }
- return 0;
-}
-
-arch_initcall(shark_init);
-
-extern void shark_init_irq(void);
-
-#define IRQ_TIMER 0
-#define HZ_TIME ((1193180 + HZ/2) / HZ)
-
-static irqreturn_t
-shark_timer_interrupt(int irq, void *dev_id)
-{
- timer_tick();
- return IRQ_HANDLED;
-}
-
-static struct irqaction shark_timer_irq = {
- .name = "Shark Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
- .handler = shark_timer_interrupt,
-};
-
-/*
- * Set up timer interrupt, and return the current time in seconds.
- */
-static void __init shark_timer_init(void)
-{
- outb(0x34, 0x43); /* binary, mode 0, LSB/MSB, Ch 0 */
- outb(HZ_TIME & 0xff, 0x40); /* LSB of count */
- outb(HZ_TIME >> 8, 0x40);
-
- setup_irq(IRQ_TIMER, &shark_timer_irq);
-}
-
-static void shark_init_early(void)
-{
- cpu_idle_poll_ctrl(true);
-}
-
-MACHINE_START(SHARK, "Shark")
- /* Maintainer: Alexander Schulz */
- .atag_offset = 0x3000,
- .init_early = shark_init_early,
- .init_irq = shark_init_irq,
- .init_time = shark_timer_init,
- .dma_zone_size = SZ_4M,
- .restart = shark_restart,
-MACHINE_END
diff --git a/arch/arm/mach-shark/dma.c b/arch/arm/mach-shark/dma.c
deleted file mode 100644
index 10b5b8b3272a..000000000000
--- a/arch/arm/mach-shark/dma.c
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * linux/arch/arm/mach-shark/dma.c
- *
- * by Alexander Schulz
- *
- * derived from:
- * arch/arm/kernel/dma-ebsa285.c
- * Copyright (C) 1998 Phil Blundell
- */
-
-#include <linux/init.h>
-
-#include <asm/dma.h>
-#include <asm/mach/dma.h>
-
-static int __init shark_dma_init(void)
-{
-#ifdef CONFIG_ISA_DMA
- isa_init_dma();
-#endif
- return 0;
-}
-core_initcall(shark_dma_init);
diff --git a/arch/arm/mach-shark/include/mach/debug-macro.S b/arch/arm/mach-shark/include/mach/debug-macro.S
deleted file mode 100644
index d129119a3f69..000000000000
--- a/arch/arm/mach-shark/include/mach/debug-macro.S
+++ /dev/null
@@ -1,34 +0,0 @@
-/* arch/arm/mach-shark/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
-*/
-
- .macro addruart, rp, rv, tmp
- mov \rp, #0x3f8
- orr \rv, \rp, #0xfe000000
- orr \rv, \rv, #0x00e00000
- orr \rp, \rp, #0x40000000
- .endm
-
- .macro senduart,rd,rx
- strb \rd, [\rx]
- .endm
-
- .macro waituart,rd,rx
- .endm
-
- .macro busyuart,rd,rx
- mov \rd, #0
-1001: add \rd, \rd, #1
- teq \rd, #0x10000
- bne 1001b
- .endm
-
diff --git a/arch/arm/mach-shark/include/mach/entry-macro.S b/arch/arm/mach-shark/include/mach/entry-macro.S
deleted file mode 100644
index c9e49f049532..000000000000
--- a/arch/arm/mach-shark/include/mach/entry-macro.S
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * arch/arm/mach-shark/include/mach/entry-macro.S
- *
- * Low-level IRQ helper macros for Shark platform
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
- .macro get_irqnr_preamble, base, tmp
- mov \base, #0xfe000000
- orr \base, \base, #0x00e00000
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
-
- mov \irqstat, #0x0C
- strb \irqstat, [\base, #0x20] @outb(0x0C, 0x20) /* Poll command */
- ldrb \irqnr, [\base, #0x20] @irq = inb(0x20) & 7
- and \irqstat, \irqnr, #0x80
- teq \irqstat, #0
- beq 43f
- and \irqnr, \irqnr, #7
- teq \irqnr, #2
- bne 44f
-43: mov \irqstat, #0x0C
- strb \irqstat, [\base, #0xa0] @outb(0x0C, 0xA0) /* Poll command */
- ldrb \irqnr, [\base, #0xa0] @irq = (inb(0xA0) & 7) + 8
- and \irqstat, \irqnr, #0x80
- teq \irqstat, #0
- beq 44f
- and \irqnr, \irqnr, #7
- add \irqnr, \irqnr, #8
-44: teq \irqstat, #0
- .endm
-
diff --git a/arch/arm/mach-shark/include/mach/framebuffer.h b/arch/arm/mach-shark/include/mach/framebuffer.h
deleted file mode 100644
index 84a5bf6e5ba3..000000000000
--- a/arch/arm/mach-shark/include/mach/framebuffer.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * arch/arm/mach-shark/include/mach/framebuffer.h
- *
- * by Alexander Schulz
- *
- */
-
-#ifndef __ASM_ARCH_FRAMEBUFFER_H
-#define __ASM_ARCH_FRAMEBUFFER_H
-
-/* defines for the Framebuffer */
-#define FB_START 0x06000000
-#define FB_SIZE 0x01000000
-
-#endif
-
diff --git a/arch/arm/mach-shark/include/mach/hardware.h b/arch/arm/mach-shark/include/mach/hardware.h
deleted file mode 100644
index 663f952a8ab3..000000000000
--- a/arch/arm/mach-shark/include/mach/hardware.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * arch/arm/mach-shark/include/mach/hardware.h
- *
- * by Alexander Schulz
- *
- * derived from:
- * arch/arm/mach-ebsa110/include/mach/hardware.h
- * Copyright (C) 1996-1999 Russell King.
- */
-#ifndef __ASM_ARCH_HARDWARE_H
-#define __ASM_ARCH_HARDWARE_H
-
-#define UNCACHEABLE_ADDR 0xdf010000
-
-#endif
-
diff --git a/arch/arm/mach-shark/include/mach/irqs.h b/arch/arm/mach-shark/include/mach/irqs.h
deleted file mode 100644
index c8e8a4e1f61a..000000000000
--- a/arch/arm/mach-shark/include/mach/irqs.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * arch/arm/mach-shark/include/mach/irqs.h
- *
- * by Alexander Schulz
- */
-
-#define NR_IRQS 16
-
-#define IRQ_ISA_KEYBOARD 1
-#define IRQ_ISA_RTC_ALARM 8
-#define I8042_KBD_IRQ 1
-#define I8042_AUX_IRQ 12
-#define IRQ_HARDDISK 14
diff --git a/arch/arm/mach-shark/include/mach/isa-dma.h b/arch/arm/mach-shark/include/mach/isa-dma.h
deleted file mode 100644
index 96c43b8f8dda..000000000000
--- a/arch/arm/mach-shark/include/mach/isa-dma.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * arch/arm/mach-shark/include/mach/isa-dma.h
- *
- * by Alexander Schulz
- */
-#ifndef __ASM_ARCH_DMA_H
-#define __ASM_ARCH_DMA_H
-
-#define MAX_DMA_CHANNELS 8
-#define DMA_ISA_CASCADE 4
-
-#endif /* _ASM_ARCH_DMA_H */
-
diff --git a/arch/arm/mach-shark/include/mach/memory.h b/arch/arm/mach-shark/include/mach/memory.h
deleted file mode 100644
index 1cf8d6962617..000000000000
--- a/arch/arm/mach-shark/include/mach/memory.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * arch/arm/mach-shark/include/mach/memory.h
- *
- * by Alexander Schulz
- *
- * derived from:
- * arch/arm/mach-ebsa110/include/mach/memory.h
- * Copyright (c) 1996-1999 Russell King.
- */
-#ifndef __ASM_ARCH_MEMORY_H
-#define __ASM_ARCH_MEMORY_H
-
-#include <asm/sizes.h>
-
-/*
- * Physical DRAM offset.
- */
-#define PLAT_PHYS_OFFSET UL(0x08000000)
-
-/*
- * Cache flushing area
- */
-#define FLUSH_BASE_PHYS 0x80000000
-#define FLUSH_BASE 0xdf000000
-
-#endif
diff --git a/arch/arm/mach-shark/include/mach/timex.h b/arch/arm/mach-shark/include/mach/timex.h
deleted file mode 100644
index bb6eeaebed86..000000000000
--- a/arch/arm/mach-shark/include/mach/timex.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/*
- * arch/arm/mach-shark/include/mach/timex.h
- *
- * by Alexander Schulz
- */
-
-#define CLOCK_TICK_RATE 1193180
diff --git a/arch/arm/mach-shark/include/mach/uncompress.h b/arch/arm/mach-shark/include/mach/uncompress.h
deleted file mode 100644
index a168435aecc9..000000000000
--- a/arch/arm/mach-shark/include/mach/uncompress.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * arch/arm/mach-shark/include/mach/uncompress.h
- * by Alexander Schulz
- *
- * derived from:
- * arch/arm/mach-footbridge/include/mach/uncompress.h
- * Copyright (C) 1996,1997,1998 Russell King
- */
-
-#define SERIAL_BASE ((volatile unsigned char *)0x400003f8)
-
-static inline void putc(int c)
-{
- volatile int t;
-
- SERIAL_BASE[0] = c;
- t=0x10000;
- while (t--);
-}
-
-static inline void flush(void)
-{
-}
-
-#ifdef DEBUG
-static void putn(unsigned long z)
-{
- int i;
- char x;
-
- putc('0');
- putc('x');
- for (i=0;i<8;i++) {
- x='0'+((z>>((7-i)*4))&0xf);
- if (x>'9') x=x-'0'+'A'-10;
- putc(x);
- }
-}
-
-static void putr()
-{
- putc('\n');
- putc('\r');
-}
-#endif
-
-/*
- * nothing to do
- */
-#define arch_decomp_setup()
diff --git a/arch/arm/mach-shark/irq.c b/arch/arm/mach-shark/irq.c
deleted file mode 100644
index 5dce13e429f3..000000000000
--- a/arch/arm/mach-shark/irq.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * linux/arch/arm/mach-shark/irq.c
- *
- * by Alexander Schulz
- *
- * derived from linux/arch/ppc/kernel/i8259.c and:
- * arch/arm/mach-ebsa110/include/mach/irq.h
- * Copyright (C) 1996-1998 Russell King
- */
-
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-
-#include <asm/irq.h>
-#include <asm/mach/irq.h>
-
-/*
- * 8259A PIC functions to handle ISA devices:
- */
-
-/*
- * This contains the irq mask for both 8259A irq controllers,
- * Let through the cascade-interrupt no. 2 (ff-(1<<2)==fb)
- */
-static unsigned char cached_irq_mask[2] = { 0xfb, 0xff };
-
-/*
- * These have to be protected by the irq controller spinlock
- * before being called.
- */
-static void shark_disable_8259A_irq(struct irq_data *d)
-{
- unsigned int mask;
- if (d->irq<8) {
- mask = 1 << d->irq;
- cached_irq_mask[0] |= mask;
- outb(cached_irq_mask[1],0xA1);
- } else {
- mask = 1 << (d->irq-8);
- cached_irq_mask[1] |= mask;
- outb(cached_irq_mask[0],0x21);
- }
-}
-
-static void shark_enable_8259A_irq(struct irq_data *d)
-{
- unsigned int mask;
- if (d->irq<8) {
- mask = ~(1 << d->irq);
- cached_irq_mask[0] &= mask;
- outb(cached_irq_mask[0],0x21);
- } else {
- mask = ~(1 << (d->irq-8));
- cached_irq_mask[1] &= mask;
- outb(cached_irq_mask[1],0xA1);
- }
-}
-
-static void shark_ack_8259A_irq(struct irq_data *d){}
-
-static irqreturn_t bogus_int(int irq, void *dev_id)
-{
- printk("Got interrupt %i!\n",irq);
- return IRQ_NONE;
-}
-
-static struct irqaction cascade;
-
-static struct irq_chip fb_chip = {
- .name = "XT-PIC",
- .irq_ack = shark_ack_8259A_irq,
- .irq_mask = shark_disable_8259A_irq,
- .irq_unmask = shark_enable_8259A_irq,
-};
-
-void __init shark_init_irq(void)
-{
- int irq;
-
- for (irq = 0; irq < NR_IRQS; irq++) {
- irq_set_chip_and_handler(irq, &fb_chip, handle_edge_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
- }
-
- /* init master interrupt controller */
- outb(0x11, 0x20); /* Start init sequence, edge triggered (level: 0x19)*/
- outb(0x00, 0x21); /* Vector base */
- outb(0x04, 0x21); /* Cascade (slave) on IRQ2 */
- outb(0x03, 0x21); /* Select 8086 mode , auto eoi*/
- outb(0x0A, 0x20);
- /* init slave interrupt controller */
- outb(0x11, 0xA0); /* Start init sequence, edge triggered */
- outb(0x08, 0xA1); /* Vector base */
- outb(0x02, 0xA1); /* Cascade (slave) on IRQ2 */
- outb(0x03, 0xA1); /* Select 8086 mode, auto eoi */
- outb(0x0A, 0xA0);
- outb(cached_irq_mask[1],0xA1);
- outb(cached_irq_mask[0],0x21);
- //request_region(0x20,0x2,"pic1");
- //request_region(0xA0,0x2,"pic2");
-
- cascade.handler = bogus_int;
- cascade.name = "cascade";
- setup_irq(2,&cascade);
-}
-
diff --git a/arch/arm/mach-shark/leds.c b/arch/arm/mach-shark/leds.c
deleted file mode 100644
index 081c778a10ac..000000000000
--- a/arch/arm/mach-shark/leds.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * DIGITAL Shark LED control routines.
- *
- * Driver for the 3 user LEDs found on the Shark
- * Based on Versatile and RealView machine LED code
- *
- * License terms: GNU General Public License (GPL) version 2
- * Author: Bryan Wu <bryan.wu@canonical.com>
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/leds.h>
-
-#include <asm/mach-types.h>
-
-#if defined(CONFIG_NEW_LEDS) && defined(CONFIG_LEDS_CLASS)
-struct shark_led {
- struct led_classdev cdev;
- u8 mask;
-};
-
-/*
- * The triggers lines up below will only be used if the
- * LED triggers are compiled in.
- */
-static const struct {
- const char *name;
- const char *trigger;
-} shark_leds[] = {
- { "shark:amber0", "default-on", }, /* Bit 5 */
- { "shark:green", "heartbeat", }, /* Bit 6 */
- { "shark:amber1", "cpu0" }, /* Bit 7 */
-};
-
-static u16 led_reg_read(void)
-{
- outw(0x09, 0x24);
- return inw(0x26);
-}
-
-static void led_reg_write(u16 value)
-{
- outw(0x09, 0x24);
- outw(value, 0x26);
-}
-
-static void shark_led_set(struct led_classdev *cdev,
- enum led_brightness b)
-{
- struct shark_led *led = container_of(cdev,
- struct shark_led, cdev);
- u16 reg = led_reg_read();
-
- if (b != LED_OFF)
- reg |= led->mask;
- else
- reg &= ~led->mask;
-
- led_reg_write(reg);
-}
-
-static enum led_brightness shark_led_get(struct led_classdev *cdev)
-{
- struct shark_led *led = container_of(cdev,
- struct shark_led, cdev);
- u16 reg = led_reg_read();
-
- return (reg & led->mask) ? LED_FULL : LED_OFF;
-}
-
-static int __init shark_leds_init(void)
-{
- int i;
- u16 reg;
-
- if (!machine_is_shark())
- return -ENODEV;
-
- for (i = 0; i < ARRAY_SIZE(shark_leds); i++) {
- struct shark_led *led;
-
- led = kzalloc(sizeof(*led), GFP_KERNEL);
- if (!led)
- break;
-
- led->cdev.name = shark_leds[i].name;
- led->cdev.brightness_set = shark_led_set;
- led->cdev.brightness_get = shark_led_get;
- led->cdev.default_trigger = shark_leds[i].trigger;
-
- /* Count in 5 bits offset */
- led->mask = BIT(i + 5);
-
- if (led_classdev_register(NULL, &led->cdev) < 0) {
- kfree(led);
- break;
- }
- }
-
- /* Make LEDs independent of power-state */
- request_region(0x24, 4, "led_reg");
- reg = led_reg_read();
- reg |= 1 << 10;
- led_reg_write(reg);
-
- return 0;
-}
-
-/*
- * Since we may have triggers on any subsystem, defer registration
- * until after subsystem_init.
- */
-fs_initcall(shark_leds_init);
-#endif
diff --git a/arch/arm/mach-shark/pci.c b/arch/arm/mach-shark/pci.c
deleted file mode 100644
index 6d91a914c1dd..000000000000
--- a/arch/arm/mach-shark/pci.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * linux/arch/arm/mach-shark/pci.c
- *
- * PCI bios-type initialisation for PCI machines
- *
- * Bits taken from various places.
- */
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <video/vga.h>
-
-#include <asm/irq.h>
-#include <asm/mach/pci.h>
-#include <asm/mach-types.h>
-
-#define IO_START 0x40000000
-
-static int __init shark_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- if (dev->bus->number == 0)
- if (dev->devfn == 0)
- return 255;
- else
- return 11;
- else
- return 255;
-}
-
-extern void __init via82c505_preinit(void);
-
-static struct hw_pci shark_pci __initdata = {
- .setup = via82c505_setup,
- .map_irq = shark_map_irq,
- .nr_controllers = 1,
- .ops = &via82c505_ops,
- .preinit = via82c505_preinit,
-};
-
-static int __init shark_pci_init(void)
-{
- if (!machine_is_shark())
- return -ENODEV;
-
- pcibios_min_io = 0x6000;
- pcibios_min_mem = 0x50000000;
- vga_base = 0xe8000000;
-
- pci_ioremap_io(0, IO_START);
-
- pci_common_init(&shark_pci);
-
- return 0;
-}
-
-subsys_initcall(shark_pci_init);
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 1f94c310c477..a4a4b75109b2 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -22,16 +22,10 @@ config ARCH_EMEV2
comment "SH-Mobile Board Type"
-config MACH_KZM9D_REFERENCE
- bool "KZM9D board - Reference Device Tree Implementation"
+config MACH_KZM9D
+ bool "KZM9D board"
depends on ARCH_EMEV2
select REGULATOR_FIXED_VOLTAGE if REGULATOR
- ---help---
- Use reference implementation of KZM9D board support
- which makes a greater use of device tree at the expense
- of not supporting a number of devices.
-
- This is intended to aid developers
comment "SH-Mobile System Configuration"
endif
@@ -101,12 +95,24 @@ config ARCH_R8A7790
select SH_CLK_CPG
select RENESAS_IRQC
+config ARCH_R8A7791
+ bool "R-Car M2 (R8A77910)"
+ select ARM_GIC
+ select CPU_V7
+ select SH_CLK_CPG
+
config ARCH_EMEV2
bool "Emma Mobile EV2"
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARM_GIC
select CPU_V7
+config ARCH_R7S72100
+ bool "RZ/A1H (R7S72100)"
+ select ARM_GIC
+ select CPU_V7
+ select SH_CLK_CPG
+
comment "SH-Mobile Board Type"
config MACH_APE6EVM
@@ -162,6 +168,8 @@ config MACH_BOCKW
select RENESAS_INTC_IRQPIN
select REGULATOR_FIXED_VOLTAGE if REGULATOR
select USE_OF
+ select SND_SOC_AK4554 if SND_SIMPLE_CARD
+ select SND_SOC_AK4642 if SND_SIMPLE_CARD
config MACH_BOCKW_REFERENCE
bool "BOCK-W - Reference Device Tree Implementation"
@@ -177,6 +185,11 @@ config MACH_BOCKW_REFERENCE
This is intended to aid developers
+config MACH_GENMAI
+ bool "Genmai board"
+ depends on ARCH_R7S72100
+ select USE_OF
+
config MACH_MARZEN
bool "MARZEN board"
depends on ARCH_R8A7779
@@ -213,23 +226,16 @@ config MACH_LAGER_REFERENCE
This is intended to aid developers
-config MACH_KZM9D
- bool "KZM9D board"
- depends on ARCH_EMEV2
- select REGULATOR_FIXED_VOLTAGE if REGULATOR
+config MACH_KOELSCH
+ bool "Koelsch board"
+ depends on ARCH_R8A7791
select USE_OF
-config MACH_KZM9D_REFERENCE
- bool "KZM9D board - Reference Device Tree Implementation"
+config MACH_KZM9D
+ bool "KZM9D board"
depends on ARCH_EMEV2
select REGULATOR_FIXED_VOLTAGE if REGULATOR
select USE_OF
- ---help---
- Use reference implementation of KZM9D board support
- which makes a greater use of device tree at the expense
- of not supporting a number of devices.
-
- This is intended to aid developers
config MACH_KZM9G
bool "KZM-A9-GT board"
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index 2705bfa8c113..51db2bcafabf 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -15,7 +15,10 @@ obj-$(CONFIG_ARCH_R8A7740) += setup-r8a7740.o
obj-$(CONFIG_ARCH_R8A7778) += setup-r8a7778.o
obj-$(CONFIG_ARCH_R8A7779) += setup-r8a7779.o
obj-$(CONFIG_ARCH_R8A7790) += setup-r8a7790.o
+obj-$(CONFIG_ARCH_R8A7790) += setup-r8a7790.o setup-rcar-gen2.o
+obj-$(CONFIG_ARCH_R8A7791) += setup-r8a7791.o setup-rcar-gen2.o
obj-$(CONFIG_ARCH_EMEV2) += setup-emev2.o
+obj-$(CONFIG_ARCH_R7S72100) += setup-r7s72100.o
# Clock objects
ifndef CONFIG_COMMON_CLK
@@ -27,13 +30,17 @@ obj-$(CONFIG_ARCH_R8A7740) += clock-r8a7740.o
obj-$(CONFIG_ARCH_R8A7778) += clock-r8a7778.o
obj-$(CONFIG_ARCH_R8A7779) += clock-r8a7779.o
obj-$(CONFIG_ARCH_R8A7790) += clock-r8a7790.o
+obj-$(CONFIG_ARCH_R8A7791) += clock-r8a7791.o
obj-$(CONFIG_ARCH_EMEV2) += clock-emev2.o
+obj-$(CONFIG_ARCH_R7S72100) += clock-r7s72100.o
endif
# SMP objects
smp-y := platsmp.o headsmp.o
smp-$(CONFIG_ARCH_SH73A0) += smp-sh73a0.o headsmp-scu.o platsmp-scu.o
smp-$(CONFIG_ARCH_R8A7779) += smp-r8a7779.o headsmp-scu.o platsmp-scu.o
+smp-$(CONFIG_ARCH_R8A7790) += smp-r8a7790.o platsmp-apmu.o
+smp-$(CONFIG_ARCH_R8A7791) += smp-r8a7791.o platsmp-apmu.o
smp-$(CONFIG_ARCH_EMEV2) += smp-emev2.o headsmp-scu.o platsmp-scu.o
# IRQ objects
@@ -48,21 +55,26 @@ obj-$(CONFIG_ARCH_R8A7740) += pm-r8a7740.o pm-rmobile.o
obj-$(CONFIG_ARCH_R8A7779) += pm-r8a7779.o
# Board objects
+ifdef CONFIG_ARCH_SHMOBILE_MULTI
+obj-$(CONFIG_MACH_KZM9D) += board-kzm9d-reference.o
+else
obj-$(CONFIG_MACH_APE6EVM) += board-ape6evm.o
obj-$(CONFIG_MACH_APE6EVM_REFERENCE) += board-ape6evm-reference.o
obj-$(CONFIG_MACH_MACKEREL) += board-mackerel.o
obj-$(CONFIG_MACH_BOCKW) += board-bockw.o
obj-$(CONFIG_MACH_BOCKW_REFERENCE) += board-bockw-reference.o
+obj-$(CONFIG_MACH_GENMAI) += board-genmai.o
obj-$(CONFIG_MACH_MARZEN) += board-marzen.o
obj-$(CONFIG_MACH_MARZEN_REFERENCE) += board-marzen-reference.o
obj-$(CONFIG_MACH_LAGER) += board-lager.o
obj-$(CONFIG_MACH_LAGER_REFERENCE) += board-lager-reference.o
obj-$(CONFIG_MACH_ARMADILLO800EVA) += board-armadillo800eva.o
obj-$(CONFIG_MACH_ARMADILLO800EVA_REFERENCE) += board-armadillo800eva-reference.o
+obj-$(CONFIG_MACH_KOELSCH) += board-koelsch.o
obj-$(CONFIG_MACH_KZM9D) += board-kzm9d.o
-obj-$(CONFIG_MACH_KZM9D_REFERENCE) += board-kzm9d-reference.o
obj-$(CONFIG_MACH_KZM9G) += board-kzm9g.o
obj-$(CONFIG_MACH_KZM9G_REFERENCE) += board-kzm9g-reference.o
+endif
# Framework support
obj-$(CONFIG_SMP) += $(smp-y)
diff --git a/arch/arm/mach-shmobile/Makefile.boot b/arch/arm/mach-shmobile/Makefile.boot
index 6a504fe7d86c..391d72a5536c 100644
--- a/arch/arm/mach-shmobile/Makefile.boot
+++ b/arch/arm/mach-shmobile/Makefile.boot
@@ -6,8 +6,9 @@ loadaddr-$(CONFIG_MACH_ARMADILLO800EVA) += 0x40008000
loadaddr-$(CONFIG_MACH_ARMADILLO800EVA_REFERENCE) += 0x40008000
loadaddr-$(CONFIG_MACH_BOCKW) += 0x60008000
loadaddr-$(CONFIG_MACH_BOCKW_REFERENCE) += 0x60008000
+loadaddr-$(CONFIG_MACH_GENMAI) += 0x8008000
+loadaddr-$(CONFIG_MACH_KOELSCH) += 0x40008000
loadaddr-$(CONFIG_MACH_KZM9D) += 0x40008000
-loadaddr-$(CONFIG_MACH_KZM9D_REFERENCE) += 0x40008000
loadaddr-$(CONFIG_MACH_KZM9G) += 0x41008000
loadaddr-$(CONFIG_MACH_KZM9G_REFERENCE) += 0x41008000
loadaddr-$(CONFIG_MACH_LAGER) += 0x40008000
diff --git a/arch/arm/mach-shmobile/board-ape6evm-reference.c b/arch/arm/mach-shmobile/board-ape6evm-reference.c
index a23fa714f7ac..3276afcf3cc9 100644
--- a/arch/arm/mach-shmobile/board-ape6evm-reference.c
+++ b/arch/arm/mach-shmobile/board-ape6evm-reference.c
@@ -57,7 +57,7 @@ static const char *ape6evm_boards_compat_dt[] __initdata = {
};
DT_MACHINE_START(APE6EVM_DT, "ape6evm")
- .init_early = r8a73a4_init_delay,
+ .init_early = r8a73a4_init_early,
.init_machine = ape6evm_add_standard_devices,
.dt_compat = ape6evm_boards_compat_dt,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-ape6evm.c b/arch/arm/mach-shmobile/board-ape6evm.c
index 24b87eea9da3..0fa068e30a30 100644
--- a/arch/arm/mach-shmobile/board-ape6evm.c
+++ b/arch/arm/mach-shmobile/board-ape6evm.c
@@ -86,7 +86,7 @@ static struct gpio_keys_button gpio_buttons[] = {
GPIO_KEY(KEY_VOLUMEDOWN, 329, "S21"),
};
-static struct __initdata gpio_keys_platform_data ape6evm_keys_pdata = {
+static struct gpio_keys_platform_data ape6evm_keys_pdata __initdata = {
.buttons = gpio_buttons,
.nbuttons = ARRAY_SIZE(gpio_buttons),
};
@@ -113,22 +113,58 @@ static const struct smsc911x_platform_config lan9220_data __initconst = {
};
/*
- * On APE6EVM power is supplied to MMCIF by a tps80032 regulator. For now we
- * model a VDD supply to MMCIF, using a fixed 3.3V regulator. Also use the
- * static power supply for SDHI0 and SDHI1, whereas SDHI0's VccQ is also
- * supplied by the same tps80032 regulator and thus can also be adjusted
- * dynamically.
+ * MMC0 power supplies:
+ * Both Vcc and VccQ to eMMC on APE6EVM are supplied by a tps80032 voltage
+ * regulator. Until support for it is added to this file we simulate the
+ * Vcc supply by a fixed always-on regulator
*/
-static struct regulator_consumer_supply fixed3v3_power_consumers[] =
+static struct regulator_consumer_supply vcc_mmc0_consumers[] =
{
REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"),
+};
+
+/*
+ * SDHI0 power supplies:
+ * Vcc to SDHI0 on APE6EVM is supplied by a GPIO-switchable regulator. VccQ is
+ * provided by the same tps80032 regulator as both MMC0 voltages - see comment
+ * above
+ */
+static struct regulator_consumer_supply vcc_sdhi0_consumers[] =
+{
REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
+};
+
+static struct regulator_init_data vcc_sdhi0_init_data = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(vcc_sdhi0_consumers),
+ .consumer_supplies = vcc_sdhi0_consumers,
+};
+
+static const struct fixed_voltage_config vcc_sdhi0_info __initconst = {
+ .supply_name = "SDHI0 Vcc",
+ .microvolts = 3300000,
+ .gpio = 76,
+ .enable_high = 1,
+ .init_data = &vcc_sdhi0_init_data,
+};
+
+/*
+ * SDHI1 power supplies:
+ * Vcc and VccQ to SDHI1 on APE6EVM are both fixed at 3.3V
+ */
+static struct regulator_consumer_supply vcc_sdhi1_consumers[] =
+{
REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"),
};
/* MMCIF */
static const struct sh_mmcif_plat_data mmcif0_pdata __initconst = {
.caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
+ .slave_id_tx = SHDMA_SLAVE_MMCIF0_TX,
+ .slave_id_rx = SHDMA_SLAVE_MMCIF0_RX,
+ .ccs_unsupported = true,
};
static const struct resource mmcif0_resources[] __initconst = {
@@ -215,14 +251,19 @@ static void __init ape6evm_add_standard_devices(void)
platform_device_register_resndata(&platform_bus, "smsc911x", -1,
lan9220_res, ARRAY_SIZE(lan9220_res),
&lan9220_data, sizeof(lan9220_data));
- regulator_register_always_on(1, "fixed-3.3V", fixed3v3_power_consumers,
- ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
+
+ regulator_register_always_on(1, "MMC0 Vcc", vcc_mmc0_consumers,
+ ARRAY_SIZE(vcc_mmc0_consumers), 2800000);
platform_device_register_resndata(&platform_bus, "sh_mmcif", 0,
mmcif0_resources, ARRAY_SIZE(mmcif0_resources),
&mmcif0_pdata, sizeof(mmcif0_pdata));
+ platform_device_register_data(&platform_bus, "reg-fixed-voltage", 2,
+ &vcc_sdhi0_info, sizeof(vcc_sdhi0_info));
platform_device_register_resndata(&platform_bus, "sh_mobile_sdhi", 0,
sdhi0_resources, ARRAY_SIZE(sdhi0_resources),
&sdhi0_pdata, sizeof(sdhi0_pdata));
+ regulator_register_always_on(3, "SDHI1 Vcc", vcc_sdhi1_consumers,
+ ARRAY_SIZE(vcc_sdhi1_consumers), 3300000);
platform_device_register_resndata(&platform_bus, "sh_mobile_sdhi", 1,
sdhi1_resources, ARRAY_SIZE(sdhi1_resources),
&sdhi1_pdata, sizeof(sdhi1_pdata));
@@ -240,7 +281,7 @@ static const char *ape6evm_boards_compat_dt[] __initdata = {
};
DT_MACHINE_START(APE6EVM_DT, "ape6evm")
- .init_early = r8a73a4_init_delay,
+ .init_early = r8a73a4_init_early,
.init_machine = ape6evm_add_standard_devices,
.dt_compat = ape6evm_boards_compat_dt,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 7f8f6076d360..8bc8e4c58847 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -823,6 +823,7 @@ static struct sh_mmcif_plat_data sh_mmcif_plat = {
.caps = MMC_CAP_4_BIT_DATA |
MMC_CAP_8_BIT_DATA |
MMC_CAP_NONREMOVABLE,
+ .ccs_unsupported = true,
.slave_id_tx = SHDMA_SLAVE_MMCIF_TX,
.slave_id_rx = SHDMA_SLAVE_MMCIF_RX,
};
diff --git a/arch/arm/mach-shmobile/board-bockw-reference.c b/arch/arm/mach-shmobile/board-bockw-reference.c
index 1a7c893e1a52..ae88fdad4b3a 100644
--- a/arch/arm/mach-shmobile/board-bockw-reference.c
+++ b/arch/arm/mach-shmobile/board-bockw-reference.c
@@ -36,15 +36,35 @@ static const struct pinctrl_map bockw_pinctrl_map[] = {
"scif0_ctrl", "scif0"),
};
+#define FPGA 0x18200000
+#define IRQ0MR 0x30
+#define COMCTLR 0x101c
static void __init bockw_init(void)
{
+ static void __iomem *fpga;
+
r8a7778_clock_init();
+ r8a7778_init_irq_extpin_dt(1);
pinctrl_register_mappings(bockw_pinctrl_map,
ARRAY_SIZE(bockw_pinctrl_map));
r8a7778_pinmux_init();
r8a7778_add_dt_devices();
+ fpga = ioremap_nocache(FPGA, SZ_1M);
+ if (fpga) {
+ /*
+ * CAUTION
+ *
+ * IRQ0/1 is cascaded interrupt from FPGA.
+ * it should be cared in the future
+ * Now, it is assuming IRQ0 was used only from SMSC.
+ */
+ u16 val = ioread16(fpga + IRQ0MR);
+ val &= ~(1 << 4); /* enable SMSC911x */
+ iowrite16(val, fpga + IRQ0MR);
+ }
+
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c
index 6b9faf3908f7..6163fb1bde60 100644
--- a/arch/arm/mach-shmobile/board-bockw.c
+++ b/arch/arm/mach-shmobile/board-bockw.c
@@ -32,11 +32,19 @@
#include <linux/smsc911x.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
+#include <linux/usb/renesas_usbhs.h>
#include <media/soc_camera.h>
#include <mach/common.h>
#include <mach/irqs.h>
#include <mach/r8a7778.h>
#include <asm/mach/arch.h>
+#include <sound/rcar_snd.h>
+#include <sound/simple_card.h>
+
+#define FPGA 0x18200000
+#define IRQ0MR 0x30
+#define COMCTLR 0x101c
+static void __iomem *fpga;
/*
* CN9(Upper side) SCIF/RCAN selection
@@ -63,6 +71,45 @@
* SW19 (MMC) 1 pin
*/
+/*
+ * SSI settings
+ *
+ * SW45: 1-4 side (SSI5 out, ROUT/LOUT CN19 Mid)
+ * SW46: 1101 (SSI6 Recorde)
+ * SW47: 1110 (SSI5 Playback)
+ * SW48: 11 (Recorde power)
+ * SW49: 1 (SSI slave mode)
+ * SW50: 1111 (SSI7, SSI8)
+ * SW51: 1111 (SSI3, SSI4)
+ * SW54: 1pin (ak4554 FPGA control)
+ * SW55: 1 (CLKB is 24.5760MHz)
+ * SW60: 1pin (ak4554 FPGA control)
+ * SW61: 3pin (use X11 clock)
+ * SW78: 3-6 (ak4642 connects I2C0)
+ *
+ * You can use sound as
+ *
+ * hw0: CN19: SSI56-AK4643
+ * hw1: CN21: SSI3-AK4554(playback)
+ * hw2: CN21: SSI4-AK4554(capture)
+ * hw3: CN20: SSI7-AK4554(playback)
+ * hw4: CN20: SSI8-AK4554(capture)
+ *
+ * this command is required when playback on hw0.
+ *
+ * # amixer set "LINEOUT Mixer DACL" on
+ */
+
+/*
+ * USB
+ *
+ * USB1 (CN29) can be Host/Function
+ *
+ * Host Func
+ * SW98 1 2
+ * SW99 1 3
+ */
+
/* Dummy supplies, where voltage doesn't matter */
static struct regulator_consumer_supply dummy_supplies[] = {
REGULATOR_SUPPLY("vddvario", "smsc911x"),
@@ -81,16 +128,76 @@ static struct resource smsc911x_resources[] __initdata = {
DEFINE_RES_IRQ(irq_pin(0)), /* IRQ 0 */
};
+#if IS_ENABLED(CONFIG_USB_RENESAS_USBHS_UDC)
+/*
+ * When USB1 is Func
+ */
+static int usbhsf_get_id(struct platform_device *pdev)
+{
+ return USBHS_GADGET;
+}
+
+#define SUSPMODE 0x102
+static int usbhsf_power_ctrl(struct platform_device *pdev,
+ void __iomem *base, int enable)
+{
+ enable = !!enable;
+
+ r8a7778_usb_phy_power(enable);
+
+ iowrite16(enable << 14, base + SUSPMODE);
+
+ return 0;
+}
+
+static struct resource usbhsf_resources[] __initdata = {
+ DEFINE_RES_MEM(0xffe60000, 0x110),
+ DEFINE_RES_IRQ(gic_iid(0x4f)),
+};
+
+static struct renesas_usbhs_platform_info usbhs_info __initdata = {
+ .platform_callback = {
+ .get_id = usbhsf_get_id,
+ .power_ctrl = usbhsf_power_ctrl,
+ },
+ .driver_param = {
+ .buswait_bwait = 4,
+ },
+};
+
+#define USB_PHY_SETTING {.port1_func = 1, .ovc_pin[1].active_high = 1,}
+#define USB1_DEVICE "renesas_usbhs"
+#define ADD_USB_FUNC_DEVICE_IF_POSSIBLE() \
+ platform_device_register_resndata( \
+ &platform_bus, "renesas_usbhs", -1, \
+ usbhsf_resources, \
+ ARRAY_SIZE(usbhsf_resources), \
+ &usbhs_info, sizeof(struct renesas_usbhs_platform_info))
+
+#else
+/*
+ * When USB1 is Host
+ */
+#define USB_PHY_SETTING { }
+#define USB1_DEVICE "ehci-platform"
+#define ADD_USB_FUNC_DEVICE_IF_POSSIBLE()
+
+#endif
+
/* USB */
static struct resource usb_phy_resources[] __initdata = {
DEFINE_RES_MEM(0xffe70800, 0x100),
DEFINE_RES_MEM(0xffe76000, 0x100),
};
-static struct rcar_phy_platform_data usb_phy_platform_data __initdata;
+static struct rcar_phy_platform_data usb_phy_platform_data __initdata =
+ USB_PHY_SETTING;
+
/* SDHI */
static struct sh_mobile_sdhi_info sdhi0_info __initdata = {
+ .dma_slave_tx = HPBDMA_SLAVE_SDHI0_TX,
+ .dma_slave_rx = HPBDMA_SLAVE_SDHI0_RX,
.tmio_caps = MMC_CAP_SD_HIGHSPEED,
.tmio_ocr_mask = MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34,
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
@@ -101,6 +208,12 @@ static struct resource sdhi0_resources[] __initdata = {
DEFINE_RES_IRQ(gic_iid(0x77)),
};
+/* Ether */
+static struct resource ether_resources[] __initdata = {
+ DEFINE_RES_MEM(0xfde00000, 0x400),
+ DEFINE_RES_IRQ(gic_iid(0x89)),
+};
+
static struct sh_eth_plat_data ether_platform_data __initdata = {
.phy = 0x01,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
@@ -118,7 +231,9 @@ static struct sh_eth_plat_data ether_platform_data __initdata = {
static struct i2c_board_info i2c0_devices[] = {
{
I2C_BOARD_INFO("rx8581", 0x51),
- },
+ }, {
+ I2C_BOARD_INFO("ak4643", 0x12),
+ }
};
/* HSPI*/
@@ -162,10 +277,6 @@ static struct sh_mmcif_plat_data sh_mmcif_plat __initdata = {
MMC_CAP_NEEDS_POLL,
};
-static struct rcar_vin_platform_data vin_platform_data __initdata = {
- .flags = RCAR_VIN_BT656,
-};
-
/* In the default configuration both decoders reside on I2C bus 0 */
#define BOCKW_CAMERA(idx) \
static struct i2c_board_info camera##idx##_info = { \
@@ -181,7 +292,237 @@ static struct soc_camera_link iclink##idx##_ml86v7667 __initdata = { \
BOCKW_CAMERA(0);
BOCKW_CAMERA(1);
+/* Sound */
+static struct resource rsnd_resources[] __initdata = {
+ [RSND_GEN1_SRU] = DEFINE_RES_MEM(0xffd90000, 0x1000),
+ [RSND_GEN1_SSI] = DEFINE_RES_MEM(0xffd91000, 0x1240),
+ [RSND_GEN1_ADG] = DEFINE_RES_MEM(0xfffe0000, 0x24),
+};
+
+static struct rsnd_ssi_platform_info rsnd_ssi[] = {
+ RSND_SSI_UNUSED, /* SSI 0 */
+ RSND_SSI_UNUSED, /* SSI 1 */
+ RSND_SSI_UNUSED, /* SSI 2 */
+ RSND_SSI_SET(1, 0, gic_iid(0x85), RSND_SSI_PLAY),
+ RSND_SSI_SET(2, 0, gic_iid(0x85), RSND_SSI_CLK_PIN_SHARE | RSND_SSI_CLK_FROM_ADG),
+ RSND_SSI_SET(0, 0, gic_iid(0x86), RSND_SSI_PLAY),
+ RSND_SSI_SET(0, 0, gic_iid(0x86), 0),
+ RSND_SSI_SET(3, 0, gic_iid(0x86), RSND_SSI_PLAY),
+ RSND_SSI_SET(4, 0, gic_iid(0x86), RSND_SSI_CLK_PIN_SHARE | RSND_SSI_CLK_FROM_ADG),
+};
+
+static struct rsnd_scu_platform_info rsnd_scu[9] = {
+ /* no member at this point */
+};
+
+enum {
+ AK4554_34 = 0,
+ AK4643_56,
+ AK4554_78,
+ SOUND_MAX,
+};
+
+static int rsnd_codec_power(int id, int enable)
+{
+ static int sound_user[SOUND_MAX] = {0, 0, 0};
+ int *usr = NULL;
+ u32 bit;
+
+ switch (id) {
+ case 3:
+ case 4:
+ usr = sound_user + AK4554_34;
+ bit = (1 << 10);
+ break;
+ case 5:
+ case 6:
+ usr = sound_user + AK4643_56;
+ bit = (1 << 6);
+ break;
+ case 7:
+ case 8:
+ usr = sound_user + AK4554_78;
+ bit = (1 << 7);
+ break;
+ }
+
+ if (!usr)
+ return -EIO;
+
+ if (enable) {
+ if (*usr == 0) {
+ u32 val = ioread16(fpga + COMCTLR);
+ val &= ~bit;
+ iowrite16(val, fpga + COMCTLR);
+ }
+
+ (*usr)++;
+ } else {
+ if (*usr == 0)
+ return 0;
+
+ (*usr)--;
+
+ if (*usr == 0) {
+ u32 val = ioread16(fpga + COMCTLR);
+ val |= bit;
+ iowrite16(val, fpga + COMCTLR);
+ }
+ }
+
+ return 0;
+}
+
+static int rsnd_start(int id)
+{
+ return rsnd_codec_power(id, 1);
+}
+
+static int rsnd_stop(int id)
+{
+ return rsnd_codec_power(id, 0);
+}
+
+static struct rcar_snd_info rsnd_info = {
+ .flags = RSND_GEN1,
+ .ssi_info = rsnd_ssi,
+ .ssi_info_nr = ARRAY_SIZE(rsnd_ssi),
+ .scu_info = rsnd_scu,
+ .scu_info_nr = ARRAY_SIZE(rsnd_scu),
+ .start = rsnd_start,
+ .stop = rsnd_stop,
+};
+
+static struct asoc_simple_card_info rsnd_card_info[] = {
+ /* SSI5, SSI6 */
+ {
+ .name = "AK4643",
+ .card = "SSI56-AK4643",
+ .codec = "ak4642-codec.0-0012",
+ .platform = "rcar_sound",
+ .daifmt = SND_SOC_DAIFMT_LEFT_J,
+ .cpu_dai = {
+ .name = "rsnd-dai.0",
+ .fmt = SND_SOC_DAIFMT_CBS_CFS,
+ },
+ .codec_dai = {
+ .name = "ak4642-hifi",
+ .fmt = SND_SOC_DAIFMT_CBM_CFM,
+ .sysclk = 11289600,
+ },
+ },
+ /* SSI3 */
+ {
+ .name = "AK4554",
+ .card = "SSI3-AK4554(playback)",
+ .codec = "ak4554-adc-dac.0",
+ .platform = "rcar_sound",
+ .cpu_dai = {
+ .name = "rsnd-dai.1",
+ .fmt = SND_SOC_DAIFMT_CBM_CFM |
+ SND_SOC_DAIFMT_RIGHT_J,
+ },
+ .codec_dai = {
+ .name = "ak4554-hifi",
+ },
+ },
+ /* SSI4 */
+ {
+ .name = "AK4554",
+ .card = "SSI4-AK4554(capture)",
+ .codec = "ak4554-adc-dac.0",
+ .platform = "rcar_sound",
+ .cpu_dai = {
+ .name = "rsnd-dai.2",
+ .fmt = SND_SOC_DAIFMT_CBM_CFM |
+ SND_SOC_DAIFMT_LEFT_J,
+ },
+ .codec_dai = {
+ .name = "ak4554-hifi",
+ },
+ },
+ /* SSI7 */
+ {
+ .name = "AK4554",
+ .card = "SSI7-AK4554(playback)",
+ .codec = "ak4554-adc-dac.1",
+ .platform = "rcar_sound",
+ .cpu_dai = {
+ .name = "rsnd-dai.3",
+ .fmt = SND_SOC_DAIFMT_CBM_CFM |
+ SND_SOC_DAIFMT_RIGHT_J,
+ },
+ .codec_dai = {
+ .name = "ak4554-hifi",
+ },
+ },
+ /* SSI8 */
+ {
+ .name = "AK4554",
+ .card = "SSI8-AK4554(capture)",
+ .codec = "ak4554-adc-dac.1",
+ .platform = "rcar_sound",
+ .cpu_dai = {
+ .name = "rsnd-dai.4",
+ .fmt = SND_SOC_DAIFMT_CBM_CFM |
+ SND_SOC_DAIFMT_LEFT_J,
+ },
+ .codec_dai = {
+ .name = "ak4554-hifi",
+ },
+ }
+};
+
+/* VIN */
+static struct rcar_vin_platform_data vin_platform_data __initdata = {
+ .flags = RCAR_VIN_BT656,
+};
+
+#define R8A7778_VIN(idx) \
+static struct resource vin##idx##_resources[] __initdata = { \
+ DEFINE_RES_MEM(0xffc50000 + 0x1000 * (idx), 0x1000), \
+ DEFINE_RES_IRQ(gic_iid(0x5a)), \
+}; \
+ \
+static struct platform_device_info vin##idx##_info __initdata = { \
+ .parent = &platform_bus, \
+ .name = "r8a7778-vin", \
+ .id = idx, \
+ .res = vin##idx##_resources, \
+ .num_res = ARRAY_SIZE(vin##idx##_resources), \
+ .dma_mask = DMA_BIT_MASK(32), \
+ .data = &vin_platform_data, \
+ .size_data = sizeof(vin_platform_data), \
+}
+R8A7778_VIN(0);
+R8A7778_VIN(1);
+
static const struct pinctrl_map bockw_pinctrl_map[] = {
+ /* AUDIO */
+ PIN_MAP_MUX_GROUP_DEFAULT("rcar_sound", "pfc-r8a7778",
+ "audio_clk_a", "audio_clk"),
+ PIN_MAP_MUX_GROUP_DEFAULT("rcar_sound", "pfc-r8a7778",
+ "audio_clk_b", "audio_clk"),
+ PIN_MAP_MUX_GROUP_DEFAULT("rcar_sound", "pfc-r8a7778",
+ "ssi34_ctrl", "ssi"),
+ PIN_MAP_MUX_GROUP_DEFAULT("rcar_sound", "pfc-r8a7778",
+ "ssi3_data", "ssi"),
+ PIN_MAP_MUX_GROUP_DEFAULT("rcar_sound", "pfc-r8a7778",
+ "ssi4_data", "ssi"),
+ PIN_MAP_MUX_GROUP_DEFAULT("rcar_sound", "pfc-r8a7778",
+ "ssi5_ctrl", "ssi"),
+ PIN_MAP_MUX_GROUP_DEFAULT("rcar_sound", "pfc-r8a7778",
+ "ssi5_data", "ssi"),
+ PIN_MAP_MUX_GROUP_DEFAULT("rcar_sound", "pfc-r8a7778",
+ "ssi6_ctrl", "ssi"),
+ PIN_MAP_MUX_GROUP_DEFAULT("rcar_sound", "pfc-r8a7778",
+ "ssi6_data", "ssi"),
+ PIN_MAP_MUX_GROUP_DEFAULT("rcar_sound", "pfc-r8a7778",
+ "ssi78_ctrl", "ssi"),
+ PIN_MAP_MUX_GROUP_DEFAULT("rcar_sound", "pfc-r8a7778",
+ "ssi7_data", "ssi"),
+ PIN_MAP_MUX_GROUP_DEFAULT("rcar_sound", "pfc-r8a7778",
+ "ssi8_data", "ssi"),
/* Ether */
PIN_MAP_MUX_GROUP_DEFAULT("r8a777x-ether", "pfc-r8a7778",
"ether_rmii", "ether"),
@@ -201,7 +542,7 @@ static const struct pinctrl_map bockw_pinctrl_map[] = {
/* USB */
PIN_MAP_MUX_GROUP_DEFAULT("ehci-platform", "pfc-r8a7778",
"usb0", "usb0"),
- PIN_MAP_MUX_GROUP_DEFAULT("ehci-platform", "pfc-r8a7778",
+ PIN_MAP_MUX_GROUP_DEFAULT(USB1_DEVICE, "pfc-r8a7778",
"usb1", "usb1"),
/* SDHI0 */
PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
@@ -224,22 +565,28 @@ static const struct pinctrl_map bockw_pinctrl_map[] = {
"vin1_data8", "vin1"),
};
-#define FPGA 0x18200000
-#define IRQ0MR 0x30
#define PFC 0xfffc0000
#define PUPR4 0x110
static void __init bockw_init(void)
{
void __iomem *base;
+ struct clk *clk;
+ int i;
r8a7778_clock_init();
r8a7778_init_irq_extpin(1);
r8a7778_add_standard_devices();
- r8a7778_add_ether_device(&ether_platform_data);
- r8a7778_add_vin_device(0, &vin_platform_data);
+
+ platform_device_register_resndata(&platform_bus, "r8a777x-ether", -1,
+ ether_resources,
+ ARRAY_SIZE(ether_resources),
+ &ether_platform_data,
+ sizeof(ether_platform_data));
+
+ platform_device_register_full(&vin0_info);
/* VIN1 has a pin conflict with Ether */
if (!IS_ENABLED(CONFIG_SH_ETH))
- r8a7778_add_vin_device(1, &vin_platform_data);
+ platform_device_register_full(&vin1_info);
platform_device_register_data(&platform_bus, "soc-camera-pdrv", 0,
&iclink0_ml86v7667,
sizeof(iclink0_ml86v7667));
@@ -269,8 +616,8 @@ static void __init bockw_init(void)
/* for SMSC */
- base = ioremap_nocache(FPGA, SZ_1M);
- if (base) {
+ fpga = ioremap_nocache(FPGA, SZ_1M);
+ if (fpga) {
/*
* CAUTION
*
@@ -278,10 +625,9 @@ static void __init bockw_init(void)
* it should be cared in the future
* Now, it is assuming IRQ0 was used only from SMSC.
*/
- u16 val = ioread16(base + IRQ0MR);
+ u16 val = ioread16(fpga + IRQ0MR);
val &= ~(1 << 4); /* enable SMSC911x */
- iowrite16(val, base + IRQ0MR);
- iounmap(base);
+ iowrite16(val, fpga + IRQ0MR);
regulator_register_fixed(0, dummy_supplies,
ARRAY_SIZE(dummy_supplies));
@@ -308,6 +654,42 @@ static void __init bockw_init(void)
sdhi0_resources, ARRAY_SIZE(sdhi0_resources),
&sdhi0_info, sizeof(struct sh_mobile_sdhi_info));
}
+
+ /* for Audio */
+ clk = clk_get(NULL, "audio_clk_b");
+ clk_set_rate(clk, 24576000);
+ clk_put(clk);
+ rsnd_codec_power(5, 1); /* enable ak4642 */
+
+ platform_device_register_simple(
+ "ak4554-adc-dac", 0, NULL, 0);
+
+ platform_device_register_simple(
+ "ak4554-adc-dac", 1, NULL, 0);
+
+ platform_device_register_resndata(
+ &platform_bus, "rcar_sound", -1,
+ rsnd_resources, ARRAY_SIZE(rsnd_resources),
+ &rsnd_info, sizeof(rsnd_info));
+
+ for (i = 0; i < ARRAY_SIZE(rsnd_card_info); i++) {
+ struct platform_device_info cardinfo = {
+ .parent = &platform_bus,
+ .name = "asoc-simple-card",
+ .id = i,
+ .data = &rsnd_card_info[i],
+ .size_data = sizeof(struct asoc_simple_card_info),
+ .dma_mask = ~0,
+ };
+
+ platform_device_register_full(&cardinfo);
+ }
+}
+
+static void __init bockw_init_late(void)
+{
+ r8a7778_init_late();
+ ADD_USB_FUNC_DEVICE_IF_POSSIBLE();
}
static const char *bockw_boards_compat_dt[] __initdata = {
@@ -320,5 +702,5 @@ DT_MACHINE_START(BOCKW_DT, "bockw")
.init_irq = r8a7778_init_irq_dt,
.init_machine = bockw_init,
.dt_compat = bockw_boards_compat_dt,
- .init_late = r8a7778_init_late,
+ .init_late = bockw_init_late,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-genmai.c b/arch/arm/mach-shmobile/board-genmai.c
new file mode 100644
index 000000000000..3e92e3c62d4c
--- /dev/null
+++ b/arch/arm/mach-shmobile/board-genmai.c
@@ -0,0 +1,43 @@
+/*
+ * Genmai board support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <mach/common.h>
+#include <mach/r7s72100.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+static void __init genmai_add_standard_devices(void)
+{
+ r7s72100_clock_init();
+ r7s72100_add_dt_devices();
+}
+
+static const char * const genmai_boards_compat_dt[] __initconst = {
+ "renesas,genmai",
+ NULL,
+};
+
+DT_MACHINE_START(GENMAI_DT, "genmai")
+ .init_early = r7s72100_init_early,
+ .init_machine = genmai_add_standard_devices,
+ .dt_compat = genmai_boards_compat_dt,
+MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-koelsch.c b/arch/arm/mach-shmobile/board-koelsch.c
new file mode 100644
index 000000000000..ace1711a6cd8
--- /dev/null
+++ b/arch/arm/mach-shmobile/board-koelsch.c
@@ -0,0 +1,47 @@
+/*
+ * Koelsch board support
+ *
+ * Copyright (C) 2013 Renesas Electronics Corporation
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <mach/common.h>
+#include <mach/r8a7791.h>
+#include <mach/rcar-gen2.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+static void __init koelsch_add_standard_devices(void)
+{
+ r8a7791_clock_init();
+ r8a7791_add_standard_devices();
+}
+
+static const char * const koelsch_boards_compat_dt[] __initconst = {
+ "renesas,koelsch",
+ NULL,
+};
+
+DT_MACHINE_START(KOELSCH_DT, "koelsch")
+ .smp = smp_ops(r8a7791_smp_ops),
+ .init_early = r8a7791_init_early,
+ .init_machine = koelsch_add_standard_devices,
+ .init_time = rcar_gen2_timer_init,
+ .dt_compat = koelsch_boards_compat_dt,
+MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-kzm9d-reference.c b/arch/arm/mach-shmobile/board-kzm9d-reference.c
index 8f8bb2fab076..054d8d5c8fc1 100644
--- a/arch/arm/mach-shmobile/board-kzm9d-reference.c
+++ b/arch/arm/mach-shmobile/board-kzm9d-reference.c
@@ -33,6 +33,7 @@ static void __init kzm9d_add_standard_devices(void)
}
static const char *kzm9d_boards_compat_dt[] __initdata = {
+ "renesas,kzm9d",
"renesas,kzm9d-reference",
NULL,
};
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
index f1994968d303..fe689b7fdc9e 100644
--- a/arch/arm/mach-shmobile/board-kzm9g.c
+++ b/arch/arm/mach-shmobile/board-kzm9g.c
@@ -366,6 +366,7 @@ static struct resource sh_mmcif_resources[] = {
static struct sh_mmcif_plat_data sh_mmcif_platdata = {
.ocr = MMC_VDD_165_195,
.caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
+ .ccs_unsupported = true,
.slave_id_tx = SHDMA_SLAVE_MMCIF_TX,
.slave_id_rx = SHDMA_SLAVE_MMCIF_RX,
};
diff --git a/arch/arm/mach-shmobile/board-lager-reference.c b/arch/arm/mach-shmobile/board-lager-reference.c
index 9c316a1b2e32..1a1a4a888632 100644
--- a/arch/arm/mach-shmobile/board-lager-reference.c
+++ b/arch/arm/mach-shmobile/board-lager-reference.c
@@ -38,8 +38,9 @@ static const char *lager_boards_compat_dt[] __initdata = {
};
DT_MACHINE_START(LAGER_DT, "lager")
- .init_early = r8a7790_init_delay,
+ .smp = smp_ops(r8a7790_smp_ops),
+ .init_early = r8a7790_init_early,
+ .init_time = rcar_gen2_timer_init,
.init_machine = lager_add_standard_devices,
- .init_time = r8a7790_timer_init,
.dt_compat = lager_boards_compat_dt,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index 5930af8d434f..a8d3ce646fb9 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -28,6 +28,7 @@
#include <linux/mmc/sh_mmcif.h>
#include <linux/pinctrl/machine.h>
#include <linux/platform_data/gpio-rcar.h>
+#include <linux/platform_data/rcar-du.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
#include <linux/regulator/fixed.h>
@@ -39,6 +40,62 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
+/* DU */
+static struct rcar_du_encoder_data lager_du_encoders[] = {
+ {
+ .type = RCAR_DU_ENCODER_VGA,
+ .output = RCAR_DU_OUTPUT_DPAD0,
+ }, {
+ .type = RCAR_DU_ENCODER_NONE,
+ .output = RCAR_DU_OUTPUT_LVDS1,
+ .connector.lvds.panel = {
+ .width_mm = 210,
+ .height_mm = 158,
+ .mode = {
+ .clock = 65000,
+ .hdisplay = 1024,
+ .hsync_start = 1048,
+ .hsync_end = 1184,
+ .htotal = 1344,
+ .vdisplay = 768,
+ .vsync_start = 771,
+ .vsync_end = 777,
+ .vtotal = 806,
+ .flags = 0,
+ },
+ },
+ },
+};
+
+static const struct rcar_du_platform_data lager_du_pdata __initconst = {
+ .encoders = lager_du_encoders,
+ .num_encoders = ARRAY_SIZE(lager_du_encoders),
+};
+
+static const struct resource du_resources[] __initconst = {
+ DEFINE_RES_MEM(0xfeb00000, 0x70000),
+ DEFINE_RES_MEM_NAMED(0xfeb90000, 0x1c, "lvds.0"),
+ DEFINE_RES_MEM_NAMED(0xfeb94000, 0x1c, "lvds.1"),
+ DEFINE_RES_IRQ(gic_spi(256)),
+ DEFINE_RES_IRQ(gic_spi(268)),
+ DEFINE_RES_IRQ(gic_spi(269)),
+};
+
+static void __init lager_add_du_device(void)
+{
+ struct platform_device_info info = {
+ .name = "rcar-du-r8a7790",
+ .id = -1,
+ .res = du_resources,
+ .num_res = ARRAY_SIZE(du_resources),
+ .data = &lager_du_pdata,
+ .size_data = sizeof(lager_du_pdata),
+ .dma_mask = DMA_BIT_MASK(32),
+ };
+
+ platform_device_register_full(&info);
+}
+
/* LEDS */
static struct gpio_led lager_leds[] = {
{
@@ -56,7 +113,7 @@ static struct gpio_led lager_leds[] = {
},
};
-static __initdata struct gpio_led_platform_data lager_leds_pdata = {
+static const struct gpio_led_platform_data lager_leds_pdata __initconst = {
.leds = lager_leds,
.num_leds = ARRAY_SIZE(lager_leds),
};
@@ -72,7 +129,7 @@ static struct gpio_keys_button gpio_buttons[] = {
GPIO_KEY(KEY_1, RCAR_GP_PIN(1, 14), "SW2-pin1"),
};
-static __initdata struct gpio_keys_platform_data lager_keys_pdata = {
+static const struct gpio_keys_platform_data lager_keys_pdata __initconst = {
.buttons = gpio_buttons,
.nbuttons = ARRAY_SIZE(gpio_buttons),
};
@@ -84,29 +141,38 @@ static struct regulator_consumer_supply fixed3v3_power_consumers[] =
};
/* MMCIF */
-static struct sh_mmcif_plat_data mmcif1_pdata __initdata = {
+static const struct sh_mmcif_plat_data mmcif1_pdata __initconst = {
.caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
+ .clk_ctrl2_present = true,
+ .ccs_unsupported = true,
};
-static struct resource mmcif1_resources[] __initdata = {
+static const struct resource mmcif1_resources[] __initconst = {
DEFINE_RES_MEM_NAMED(0xee220000, 0x80, "MMCIF1"),
DEFINE_RES_IRQ(gic_spi(170)),
};
/* Ether */
-static struct sh_eth_plat_data ether_pdata __initdata = {
+static const struct sh_eth_plat_data ether_pdata __initconst = {
.phy = 0x1,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
.phy_interface = PHY_INTERFACE_MODE_RMII,
.ether_link_active_low = 1,
};
-static struct resource ether_resources[] __initdata = {
+static const struct resource ether_resources[] __initconst = {
DEFINE_RES_MEM(0xee700000, 0x400),
DEFINE_RES_IRQ(gic_spi(162)),
};
static const struct pinctrl_map lager_pinctrl_map[] = {
+ /* DU (CN10: ARGB0, CN13: LVDS) */
+ PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7790", "pfc-r8a7790",
+ "du_rgb666", "du"),
+ PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7790", "pfc-r8a7790",
+ "du_sync_1", "du"),
+ PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7790", "pfc-r8a7790",
+ "du_clk_out_0", "du"),
/* SCIF0 (CN19: DEBUG SERIAL0) */
PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.6", "pfc-r8a7790",
"scif0_data", "scif0"),
@@ -154,6 +220,8 @@ static void __init lager_add_standard_devices(void)
ether_resources,
ARRAY_SIZE(ether_resources),
&ether_pdata, sizeof(ether_pdata));
+
+ lager_add_du_device();
}
/*
@@ -180,14 +248,15 @@ static void __init lager_init(void)
phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup);
}
-static const char *lager_boards_compat_dt[] __initdata = {
+static const char * const lager_boards_compat_dt[] __initconst = {
"renesas,lager",
NULL,
};
DT_MACHINE_START(LAGER_DT, "lager")
- .init_early = r8a7790_init_delay,
- .init_time = r8a7790_timer_init,
+ .smp = smp_ops(r8a7790_smp_ops),
+ .init_early = r8a7790_init_early,
+ .init_time = rcar_gen2_timer_init,
.init_machine = lager_init,
.dt_compat = lager_boards_compat_dt,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-marzen-reference.c b/arch/arm/mach-shmobile/board-marzen-reference.c
index 3f4250a2d4eb..2773936bf7dc 100644
--- a/arch/arm/mach-shmobile/board-marzen-reference.c
+++ b/arch/arm/mach-shmobile/board-marzen-reference.c
@@ -28,6 +28,7 @@
static void __init marzen_init(void)
{
r8a7779_add_standard_devices_dt();
+ r8a7779_init_irq_extpin_dt(1); /* IRQ1 as individual interrupt */
}
static const char *marzen_boards_compat_dt[] __initdata = {
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index 3f5044fda4e3..da1352f5f71b 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -30,6 +30,7 @@
#include <linux/dma-mapping.h>
#include <linux/pinctrl/machine.h>
#include <linux/platform_data/gpio-rcar.h>
+#include <linux/platform_data/rcar-du.h>
#include <linux/platform_data/usb-rcar-phy.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
@@ -124,6 +125,8 @@ static struct resource sdhi0_resources[] = {
};
static struct sh_mobile_sdhi_info sdhi0_platform_data = {
+ .dma_slave_tx = HPBDMA_SLAVE_SDHI0_TX,
+ .dma_slave_rx = HPBDMA_SLAVE_SDHI0_RX,
.tmio_flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT,
.tmio_caps = MMC_CAP_SD_HIGHSPEED,
};
@@ -169,6 +172,63 @@ static struct platform_device hspi_device = {
.num_resources = ARRAY_SIZE(hspi_resources),
};
+/*
+ * DU
+ *
+ * The panel only specifies the [hv]display and [hv]total values. The position
+ * and width of the sync pulses don't matter, they're copied from VESA timings.
+ */
+static struct rcar_du_encoder_data du_encoders[] = {
+ {
+ .type = RCAR_DU_ENCODER_VGA,
+ .output = RCAR_DU_OUTPUT_DPAD0,
+ }, {
+ .type = RCAR_DU_ENCODER_LVDS,
+ .output = RCAR_DU_OUTPUT_DPAD1,
+ .connector.lvds.panel = {
+ .width_mm = 210,
+ .height_mm = 158,
+ .mode = {
+ .clock = 65000,
+ .hdisplay = 1024,
+ .hsync_start = 1048,
+ .hsync_end = 1184,
+ .htotal = 1344,
+ .vdisplay = 768,
+ .vsync_start = 771,
+ .vsync_end = 777,
+ .vtotal = 806,
+ .flags = 0,
+ },
+ },
+ },
+};
+
+static const struct rcar_du_platform_data du_pdata __initconst = {
+ .encoders = du_encoders,
+ .num_encoders = ARRAY_SIZE(du_encoders),
+};
+
+static const struct resource du_resources[] __initconst = {
+ DEFINE_RES_MEM(0xfff80000, 0x40000),
+ DEFINE_RES_IRQ(gic_iid(0x3f)),
+};
+
+static void __init marzen_add_du_device(void)
+{
+ struct platform_device_info info = {
+ .name = "rcar-du-r8a7779",
+ .id = -1,
+ .res = du_resources,
+ .num_res = ARRAY_SIZE(du_resources),
+ .data = &du_pdata,
+ .size_data = sizeof(du_pdata),
+ .dma_mask = DMA_BIT_MASK(32),
+ };
+
+ platform_device_register_full(&info);
+}
+
/* LEDS */
static struct gpio_led marzen_leds[] = {
{
@@ -237,6 +297,19 @@ static struct platform_device *marzen_devices[] __initdata = {
};
static const struct pinctrl_map marzen_pinctrl_map[] = {
+ /* DU (CN10: ARGB0, CN13: LVDS) */
+ PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7779", "pfc-r8a7779",
+ "du0_rgb888", "du0"),
+ PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7779", "pfc-r8a7779",
+ "du0_sync_1", "du0"),
+ PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7779", "pfc-r8a7779",
+ "du0_clk_out_0", "du0"),
+ PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7779", "pfc-r8a7779",
+ "du1_rgb666", "du1"),
+ PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7779", "pfc-r8a7779",
+ "du1_sync_1", "du1"),
+ PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7779", "pfc-r8a7779",
+ "du1_clk_out", "du1"),
/* HSPI0 */
PIN_MAP_MUX_GROUP_DEFAULT("sh-hspi.0", "pfc-r8a7779",
"hspi0", "hspi0"),
@@ -297,6 +370,7 @@ static void __init marzen_init(void)
r8a7779_add_vin_device(1, &vin_platform_data);
r8a7779_add_vin_device(3, &vin_platform_data);
platform_add_devices(marzen_devices, ARRAY_SIZE(marzen_devices));
+ marzen_add_du_device();
}
static const char *marzen_boards_compat_dt[] __initdata = {
diff --git a/arch/arm/mach-shmobile/clock-r7s72100.c b/arch/arm/mach-shmobile/clock-r7s72100.c
new file mode 100644
index 000000000000..4aba20ca127e
--- /dev/null
+++ b/arch/arm/mach-shmobile/clock-r7s72100.c
@@ -0,0 +1,202 @@
+/*
+ * r7a72100 clock framework support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2012 Phil Edworthy
+ * Copyright (C) 2011 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/sh_clk.h>
+#include <linux/clkdev.h>
+#include <mach/common.h>
+#include <mach/r7s72100.h>
+
+/* registers */
+#define FRQCR 0xfcfe0010
+#define FRQCR2 0xfcfe0014
+#define STBCR3 0xfcfe0420
+#define STBCR4 0xfcfe0424
+
+#define PLL_RATE 30
+
+static struct clk_mapping cpg_mapping = {
+ .phys = 0xfcfe0000,
+ .len = 0x1000,
+};
+
+/* Fixed 32 KHz root clock for RTC */
+static struct clk r_clk = {
+ .rate = 32768,
+};
+
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+static struct clk extal_clk = {
+ .rate = 13330000,
+ .mapping = &cpg_mapping,
+};
+
+static unsigned long pll_recalc(struct clk *clk)
+{
+ return clk->parent->rate * PLL_RATE;
+}
+
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
+};
+
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .parent = &extal_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static unsigned long bus_recalc(struct clk *clk)
+{
+ return clk->parent->rate * 2 / 3;
+}
+
+static struct sh_clk_ops bus_clk_ops = {
+ .recalc = bus_recalc,
+};
+
+static struct clk bus_clk = {
+ .ops = &bus_clk_ops,
+ .parent = &pll_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static unsigned long peripheral0_recalc(struct clk *clk)
+{
+ return clk->parent->rate / 12;
+}
+
+static struct sh_clk_ops peripheral0_clk_ops = {
+ .recalc = peripheral0_recalc,
+};
+
+static struct clk peripheral0_clk = {
+ .ops = &peripheral0_clk_ops,
+ .parent = &pll_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static unsigned long peripheral1_recalc(struct clk *clk)
+{
+ return clk->parent->rate / 6;
+}
+
+static struct sh_clk_ops peripheral1_clk_ops = {
+ .recalc = peripheral1_recalc,
+};
+
+static struct clk peripheral1_clk = {
+ .ops = &peripheral1_clk_ops,
+ .parent = &pll_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+struct clk *main_clks[] = {
+ &r_clk,
+ &extal_clk,
+ &pll_clk,
+ &bus_clk,
+ &peripheral0_clk,
+ &peripheral1_clk,
+};
+
+static int div2[] = { 1, 3, 0, 3 }; /* 1, 2/3, reserve, 1/3 */
+static int multipliers[] = { 1, 2, 1, 1 };
+
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = div2,
+ .nr_divisors = ARRAY_SIZE(div2),
+ .multipliers = multipliers,
+ .nr_multipliers = ARRAY_SIZE(multipliers),
+};
+
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
+
+enum { DIV4_I,
+ DIV4_NR };
+
+#define DIV4(_reg, _bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
+
+/* The mask field specifies the div2 entries that are valid */
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_I] = DIV4(FRQCR, 8, 0xB, CLK_ENABLE_REG_16BIT
+ | CLK_ENABLE_ON_INIT),
+};
+
+enum { MSTP47, MSTP46, MSTP45, MSTP44, MSTP43, MSTP42, MSTP41, MSTP40,
+ MSTP33, MSTP_NR };
+
+static struct clk mstp_clks[MSTP_NR] = {
+ [MSTP47] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 7, 0), /* SCIF0 */
+ [MSTP46] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 6, 0), /* SCIF1 */
+ [MSTP45] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 5, 0), /* SCIF2 */
+ [MSTP44] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 4, 0), /* SCIF3 */
+ [MSTP43] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 3, 0), /* SCIF4 */
+ [MSTP42] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 2, 0), /* SCIF5 */
+ [MSTP41] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 1, 0), /* SCIF6 */
+ [MSTP40] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 0, 0), /* SCIF7 */
+ [MSTP33] = SH_CLK_MSTP8(&peripheral0_clk, STBCR3, 3, 0), /* MTU2 */
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("rclk", &r_clk),
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+ CLKDEV_CON_ID("peripheral_clk", &peripheral1_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+
+ /* MSTP clocks */
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP47]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP46]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP45]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP44]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[MSTP43]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[MSTP42]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.6", &mstp_clks[MSTP41]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.7", &mstp_clks[MSTP40]),
+};
+
+void __init r7s72100_clock_init(void)
+{
+ int k, ret = 0;
+
+ for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
+ ret = clk_register(main_clks[k]);
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
+
+ if (!ret)
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
+
+ if (!ret)
+ shmobile_clk_init();
+ else
+ panic("failed to setup rza1 clocks\n");
+}
diff --git a/arch/arm/mach-shmobile/clock-r8a73a4.c b/arch/arm/mach-shmobile/clock-r8a73a4.c
index 5bd2e851e3c7..571409b611d3 100644
--- a/arch/arm/mach-shmobile/clock-r8a73a4.c
+++ b/arch/arm/mach-shmobile/clock-r8a73a4.c
@@ -504,7 +504,7 @@ static struct clk div6_clks[DIV6_NR] = {
/* MSTP */
enum {
- MSTP217, MSTP216, MSTP207, MSTP206, MSTP204, MSTP203,
+ MSTP218, MSTP217, MSTP216, MSTP207, MSTP206, MSTP204, MSTP203,
MSTP329, MSTP323, MSTP318, MSTP317, MSTP316,
MSTP315, MSTP314, MSTP313, MSTP312, MSTP305, MSTP300,
MSTP411, MSTP410, MSTP409,
@@ -519,6 +519,7 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP207] = SH_CLK_MSTP32(&div6_clks[DIV6_MP], SMSTPCR2, 7, 0), /* SCIFB1 */
[MSTP216] = SH_CLK_MSTP32(&div6_clks[DIV6_MP], SMSTPCR2, 16, 0), /* SCIFB2 */
[MSTP217] = SH_CLK_MSTP32(&div6_clks[DIV6_MP], SMSTPCR2, 17, 0), /* SCIFB3 */
+ [MSTP218] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC */
[MSTP300] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR3, 0, 0), /* IIC2 */
[MSTP305] = SH_CLK_MSTP32(&div6_clks[DIV6_MMC1],SMSTPCR3, 5, 0), /* MMCIF1 */
[MSTP312] = SH_CLK_MSTP32(&div6_clks[DIV6_SDHI2],SMSTPCR3, 12, 0), /* SDHI2 */
@@ -578,6 +579,8 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP207]),
CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP216]),
CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP217]),
+ CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]),
+ CLKDEV_DEV_ID("e6700020.dma-controller", &mstp_clks[MSTP218]),
CLKDEV_DEV_ID("rcar_thermal", &mstp_clks[MSTP522]),
CLKDEV_DEV_ID("e6520000.i2c", &mstp_clks[MSTP300]),
CLKDEV_DEV_ID("sh_mmcif.1", &mstp_clks[MSTP305]),
diff --git a/arch/arm/mach-shmobile/clock-r8a7778.c b/arch/arm/mach-shmobile/clock-r8a7778.c
index c4bf2d8fb111..fb6af83858e3 100644
--- a/arch/arm/mach-shmobile/clock-r8a7778.c
+++ b/arch/arm/mach-shmobile/clock-r8a7778.c
@@ -69,6 +69,15 @@ static struct clk extal_clk = {
.mapping = &cpg_mapping,
};
+static struct clk audio_clk_a = {
+};
+
+static struct clk audio_clk_b = {
+};
+
+static struct clk audio_clk_c = {
+};
+
/*
* clock ratio of these clock will be updated
* on r8a7778_clock_init()
@@ -100,18 +109,23 @@ static struct clk *main_clks[] = {
&p_clk,
&g_clk,
&z_clk,
+ &audio_clk_a,
+ &audio_clk_b,
+ &audio_clk_c,
};
enum {
MSTP331,
MSTP323, MSTP322, MSTP321,
+ MSTP311, MSTP310,
+ MSTP309, MSTP308, MSTP307,
MSTP114,
MSTP110, MSTP109,
MSTP100,
MSTP030,
MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024, MSTP023, MSTP022, MSTP021,
- MSTP016, MSTP015,
- MSTP007,
+ MSTP016, MSTP015, MSTP012, MSTP011, MSTP010,
+ MSTP009, MSTP008, MSTP007,
MSTP_NR };
static struct clk mstp_clks[MSTP_NR] = {
@@ -119,6 +133,11 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP323] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 23, 0), /* SDHI0 */
[MSTP322] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 22, 0), /* SDHI1 */
[MSTP321] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 21, 0), /* SDHI2 */
+ [MSTP311] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 11, 0), /* SSI4 */
+ [MSTP310] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 10, 0), /* SSI5 */
+ [MSTP309] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 9, 0), /* SSI6 */
+ [MSTP308] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 8, 0), /* SSI7 */
+ [MSTP307] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 7, 0), /* SSI8 */
[MSTP114] = SH_CLK_MSTP32(&p_clk, MSTPCR1, 14, 0), /* Ether */
[MSTP110] = SH_CLK_MSTP32(&s_clk, MSTPCR1, 10, 0), /* VIN0 */
[MSTP109] = SH_CLK_MSTP32(&s_clk, MSTPCR1, 9, 0), /* VIN1 */
@@ -135,11 +154,20 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP021] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 21, 0), /* SCIF5 */
[MSTP016] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 16, 0), /* TMU0 */
[MSTP015] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 15, 0), /* TMU1 */
+ [MSTP012] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 12, 0), /* SSI0 */
+ [MSTP011] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 11, 0), /* SSI1 */
+ [MSTP010] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 10, 0), /* SSI2 */
+ [MSTP009] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 9, 0), /* SSI3 */
+ [MSTP008] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 8, 0), /* SRU */
[MSTP007] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 7, 0), /* HSPI */
};
static struct clk_lookup lookups[] = {
/* main */
+ CLKDEV_CON_ID("audio_clk_a", &audio_clk_a),
+ CLKDEV_CON_ID("audio_clk_b", &audio_clk_b),
+ CLKDEV_CON_ID("audio_clk_c", &audio_clk_c),
+ CLKDEV_CON_ID("audio_clk_internal", &s1_clk),
CLKDEV_CON_ID("shyway_clk", &s_clk),
CLKDEV_CON_ID("peripheral_clk", &p_clk),
@@ -153,6 +181,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("r8a7778-vin.1", &mstp_clks[MSTP109]), /* VIN1 */
CLKDEV_DEV_ID("ehci-platform", &mstp_clks[MSTP100]), /* USB EHCI port0/1 */
CLKDEV_DEV_ID("ohci-platform", &mstp_clks[MSTP100]), /* USB OHCI port0/1 */
+ CLKDEV_DEV_ID("renesas_usbhs", &mstp_clks[MSTP100]), /* USB FUNC */
CLKDEV_DEV_ID("i2c-rcar.0", &mstp_clks[MSTP030]), /* I2C0 */
CLKDEV_DEV_ID("i2c-rcar.1", &mstp_clks[MSTP029]), /* I2C1 */
CLKDEV_DEV_ID("i2c-rcar.2", &mstp_clks[MSTP028]), /* I2C2 */
@@ -168,6 +197,17 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh-hspi.0", &mstp_clks[MSTP007]), /* HSPI0 */
CLKDEV_DEV_ID("sh-hspi.1", &mstp_clks[MSTP007]), /* HSPI1 */
CLKDEV_DEV_ID("sh-hspi.2", &mstp_clks[MSTP007]), /* HSPI2 */
+ CLKDEV_DEV_ID("rcar_sound", &mstp_clks[MSTP008]), /* SRU */
+
+ CLKDEV_ICK_ID("ssi.0", "rcar_sound", &mstp_clks[MSTP012]),
+ CLKDEV_ICK_ID("ssi.1", "rcar_sound", &mstp_clks[MSTP011]),
+ CLKDEV_ICK_ID("ssi.2", "rcar_sound", &mstp_clks[MSTP010]),
+ CLKDEV_ICK_ID("ssi.3", "rcar_sound", &mstp_clks[MSTP009]),
+ CLKDEV_ICK_ID("ssi.4", "rcar_sound", &mstp_clks[MSTP311]),
+ CLKDEV_ICK_ID("ssi.5", "rcar_sound", &mstp_clks[MSTP310]),
+ CLKDEV_ICK_ID("ssi.6", "rcar_sound", &mstp_clks[MSTP309]),
+ CLKDEV_ICK_ID("ssi.7", "rcar_sound", &mstp_clks[MSTP308]),
+ CLKDEV_ICK_ID("ssi.8", "rcar_sound", &mstp_clks[MSTP307]),
};
void __init r8a7778_clock_init(void)
diff --git a/arch/arm/mach-shmobile/clock-r8a7779.c b/arch/arm/mach-shmobile/clock-r8a7779.c
index bd6ad922eb7e..1f7080fab0a5 100644
--- a/arch/arm/mach-shmobile/clock-r8a7779.c
+++ b/arch/arm/mach-shmobile/clock-r8a7779.c
@@ -200,7 +200,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP322]), /* SDHI1 */
CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP321]), /* SDHI2 */
CLKDEV_DEV_ID("sh_mobile_sdhi.3", &mstp_clks[MSTP320]), /* SDHI3 */
- CLKDEV_DEV_ID("rcar-du.0", &mstp_clks[MSTP103]), /* DU */
+ CLKDEV_DEV_ID("rcar-du-r8a7779", &mstp_clks[MSTP103]), /* DU */
};
void __init r8a7779_clock_init(void)
diff --git a/arch/arm/mach-shmobile/clock-r8a7790.c b/arch/arm/mach-shmobile/clock-r8a7790.c
index fc36d3db0b4d..a64f965c7da1 100644
--- a/arch/arm/mach-shmobile/clock-r8a7790.c
+++ b/arch/arm/mach-shmobile/clock-r8a7790.c
@@ -52,6 +52,7 @@
#define SMSTPCR5 0xe6150144
#define SMSTPCR7 0xe615014c
#define SMSTPCR8 0xe6150990
+#define SMSTPCR9 0xe6150994
#define SDCKCR 0xE6150074
#define SD2CKCR 0xE6150078
@@ -181,8 +182,9 @@ static struct clk div6_clks[DIV6_NR] = {
/* MSTP */
enum {
+ MSTP931, MSTP930, MSTP929, MSTP928,
MSTP813,
- MSTP721, MSTP720,
+ MSTP726, MSTP725, MSTP724, MSTP723, MSTP722, MSTP721, MSTP720,
MSTP717, MSTP716,
MSTP522,
MSTP315, MSTP314, MSTP313, MSTP312, MSTP311, MSTP305, MSTP304,
@@ -192,7 +194,16 @@ enum {
};
static struct clk mstp_clks[MSTP_NR] = {
+ [MSTP931] = SH_CLK_MSTP32(&hp_clk, SMSTPCR9, 31, 0), /* I2C0 */
+ [MSTP930] = SH_CLK_MSTP32(&hp_clk, SMSTPCR9, 30, 0), /* I2C1 */
+ [MSTP929] = SH_CLK_MSTP32(&hp_clk, SMSTPCR9, 29, 0), /* I2C2 */
+ [MSTP928] = SH_CLK_MSTP32(&hp_clk, SMSTPCR9, 28, 0), /* I2C3 */
[MSTP813] = SH_CLK_MSTP32(&p_clk, SMSTPCR8, 13, 0), /* Ether */
+ [MSTP726] = SH_CLK_MSTP32(&zx_clk, SMSTPCR7, 26, 0), /* LVDS0 */
+ [MSTP725] = SH_CLK_MSTP32(&zx_clk, SMSTPCR7, 25, 0), /* LVDS1 */
+ [MSTP724] = SH_CLK_MSTP32(&zx_clk, SMSTPCR7, 24, 0), /* DU0 */
+ [MSTP723] = SH_CLK_MSTP32(&zx_clk, SMSTPCR7, 23, 0), /* DU1 */
+ [MSTP722] = SH_CLK_MSTP32(&zx_clk, SMSTPCR7, 22, 0), /* DU2 */
[MSTP721] = SH_CLK_MSTP32(&p_clk, SMSTPCR7, 21, 0), /* SCIF0 */
[MSTP720] = SH_CLK_MSTP32(&p_clk, SMSTPCR7, 20, 0), /* SCIF1 */
[MSTP717] = SH_CLK_MSTP32(&zs_clk, SMSTPCR7, 17, 0), /* HSCIF0 */
@@ -251,6 +262,11 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("ssprs", &div6_clks[DIV6_SSPRS]),
/* MSTP */
+ CLKDEV_ICK_ID("lvds.0", "rcar-du-r8a7790", &mstp_clks[MSTP726]),
+ CLKDEV_ICK_ID("lvds.1", "rcar-du-r8a7790", &mstp_clks[MSTP725]),
+ CLKDEV_ICK_ID("du.0", "rcar-du-r8a7790", &mstp_clks[MSTP724]),
+ CLKDEV_ICK_ID("du.1", "rcar-du-r8a7790", &mstp_clks[MSTP723]),
+ CLKDEV_ICK_ID("du.2", "rcar-du-r8a7790", &mstp_clks[MSTP722]),
CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]),
CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]),
CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP206]),
@@ -261,6 +277,10 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP720]),
CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP717]),
CLKDEV_DEV_ID("sh-sci.9", &mstp_clks[MSTP716]),
+ CLKDEV_DEV_ID("e6508000.i2c", &mstp_clks[MSTP931]),
+ CLKDEV_DEV_ID("e6518000.i2c", &mstp_clks[MSTP930]),
+ CLKDEV_DEV_ID("e6530000.i2c", &mstp_clks[MSTP929]),
+ CLKDEV_DEV_ID("e6540000.i2c", &mstp_clks[MSTP928]),
CLKDEV_DEV_ID("r8a7790-ether", &mstp_clks[MSTP813]),
CLKDEV_DEV_ID("rcar_thermal", &mstp_clks[MSTP522]),
CLKDEV_DEV_ID("ee200000.mmcif", &mstp_clks[MSTP315]),
@@ -290,7 +310,7 @@ static struct clk_lookup lookups[] = {
void __init r8a7790_clock_init(void)
{
- u32 mode = r8a7790_read_mode_pins();
+ u32 mode = rcar_gen2_read_mode_pins();
int k, ret = 0;
switch (mode & (MD(14) | MD(13))) {
diff --git a/arch/arm/mach-shmobile/clock-r8a7791.c b/arch/arm/mach-shmobile/clock-r8a7791.c
new file mode 100644
index 000000000000..c9a26f16ce5b
--- /dev/null
+++ b/arch/arm/mach-shmobile/clock-r8a7791.c
@@ -0,0 +1,237 @@
+/*
+ * r8a7791 clock framework support
+ *
+ * Copyright (C) 2013 Renesas Electronics Corporation
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/sh_clk.h>
+#include <linux/clkdev.h>
+#include <mach/clock.h>
+#include <mach/common.h>
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL3
+ * 14 13 19 (MHz) *1 *1
+ *---------------------------------------------------
+ * 0 0 0 15 x 1 x172/2 x208/2 x106
+ * 0 0 1 15 x 1 x172/2 x208/2 x88
+ * 0 1 0 20 x 1 x130/2 x156/2 x80
+ * 0 1 1 20 x 1 x130/2 x156/2 x66
+ * 1 0 0 26 / 2 x200/2 x240/2 x122
+ * 1 0 1 26 / 2 x200/2 x240/2 x102
+ * 1 1 0 30 / 2 x172/2 x208/2 x106
+ * 1 1 1 30 / 2 x172/2 x208/2 x88
+ *
+ * *1 : Table 7.6 indicates VCO ouput (PLLx = VCO/2)
+ * see "p1 / 2" on R8A7791_CLOCK_ROOT() below
+ */
+
+#define MD(nr) (1 << nr)
+
+#define CPG_BASE 0xe6150000
+#define CPG_LEN 0x1000
+
+#define SMSTPCR0 0xE6150130
+#define SMSTPCR1 0xE6150134
+#define SMSTPCR2 0xe6150138
+#define SMSTPCR3 0xE615013C
+#define SMSTPCR5 0xE6150144
+#define SMSTPCR7 0xe615014c
+#define SMSTPCR8 0xE6150990
+#define SMSTPCR9 0xE6150994
+#define SMSTPCR10 0xE6150998
+#define SMSTPCR11 0xE615099C
+
+#define MODEMR 0xE6160060
+#define SDCKCR 0xE6150074
+#define SD2CKCR 0xE6150078
+#define SD3CKCR 0xE615007C
+#define MMC0CKCR 0xE6150240
+#define MMC1CKCR 0xE6150244
+#define SSPCKCR 0xE6150248
+#define SSPRSCKCR 0xE615024C
+
+static struct clk_mapping cpg_mapping = {
+ .phys = CPG_BASE,
+ .len = CPG_LEN,
+};
+
+static struct clk extal_clk = {
+ /* .rate will be updated on r8a7791_clock_init() */
+ .mapping = &cpg_mapping,
+};
+
+static struct sh_clk_ops followparent_clk_ops = {
+ .recalc = followparent_recalc,
+};
+
+static struct clk main_clk = {
+ /* .parent will be set r8a73a4_clock_init */
+ .ops = &followparent_clk_ops,
+};
+
+/*
+ * clock ratio of these clock will be updated
+ * on r8a7791_clock_init()
+ */
+SH_FIXED_RATIO_CLK_SET(pll1_clk, main_clk, 1, 1);
+SH_FIXED_RATIO_CLK_SET(pll3_clk, main_clk, 1, 1);
+
+/* fixed ratio clock */
+SH_FIXED_RATIO_CLK_SET(extal_div2_clk, extal_clk, 1, 2);
+SH_FIXED_RATIO_CLK_SET(cp_clk, extal_clk, 1, 2);
+
+SH_FIXED_RATIO_CLK_SET(pll1_div2_clk, pll1_clk, 1, 2);
+SH_FIXED_RATIO_CLK_SET(hp_clk, pll1_clk, 1, 12);
+SH_FIXED_RATIO_CLK_SET(p_clk, pll1_clk, 1, 24);
+SH_FIXED_RATIO_CLK_SET(rclk_clk, pll1_clk, 1, (48 * 1024));
+SH_FIXED_RATIO_CLK_SET(mp_clk, pll1_div2_clk, 1, 15);
+
+static struct clk *main_clks[] = {
+ &extal_clk,
+ &extal_div2_clk,
+ &main_clk,
+ &pll1_clk,
+ &pll1_div2_clk,
+ &pll3_clk,
+ &hp_clk,
+ &p_clk,
+ &rclk_clk,
+ &mp_clk,
+ &cp_clk,
+};
+
+/* MSTP */
+enum {
+ MSTP721, MSTP720,
+ MSTP719, MSTP718, MSTP715, MSTP714,
+ MSTP216, MSTP207, MSTP206,
+ MSTP204, MSTP203, MSTP202, MSTP1105, MSTP1106, MSTP1107,
+ MSTP124,
+ MSTP_NR
+};
+
+static struct clk mstp_clks[MSTP_NR] = {
+ [MSTP721] = SH_CLK_MSTP32(&p_clk, SMSTPCR7, 21, 0), /* SCIF0 */
+ [MSTP720] = SH_CLK_MSTP32(&p_clk, SMSTPCR7, 20, 0), /* SCIF1 */
+ [MSTP719] = SH_CLK_MSTP32(&p_clk, SMSTPCR7, 19, 0), /* SCIF2 */
+ [MSTP718] = SH_CLK_MSTP32(&p_clk, SMSTPCR7, 18, 0), /* SCIF3 */
+ [MSTP715] = SH_CLK_MSTP32(&p_clk, SMSTPCR7, 15, 0), /* SCIF4 */
+ [MSTP714] = SH_CLK_MSTP32(&p_clk, SMSTPCR7, 14, 0), /* SCIF5 */
+ [MSTP216] = SH_CLK_MSTP32(&mp_clk, SMSTPCR2, 16, 0), /* SCIFB2 */
+ [MSTP207] = SH_CLK_MSTP32(&mp_clk, SMSTPCR2, 7, 0), /* SCIFB1 */
+ [MSTP206] = SH_CLK_MSTP32(&mp_clk, SMSTPCR2, 6, 0), /* SCIFB0 */
+ [MSTP204] = SH_CLK_MSTP32(&mp_clk, SMSTPCR2, 4, 0), /* SCIFA0 */
+ [MSTP203] = SH_CLK_MSTP32(&mp_clk, SMSTPCR2, 3, 0), /* SCIFA1 */
+ [MSTP202] = SH_CLK_MSTP32(&mp_clk, SMSTPCR2, 2, 0), /* SCIFA2 */
+ [MSTP1105] = SH_CLK_MSTP32(&mp_clk, SMSTPCR11, 5, 0), /* SCIFA3 */
+ [MSTP1106] = SH_CLK_MSTP32(&mp_clk, SMSTPCR11, 6, 0), /* SCIFA4 */
+ [MSTP1107] = SH_CLK_MSTP32(&mp_clk, SMSTPCR11, 7, 0), /* SCIFA5 */
+ [MSTP124] = SH_CLK_MSTP32(&rclk_clk, SMSTPCR1, 24, 0), /* CMT0 */
+};
+
+static struct clk_lookup lookups[] = {
+
+ /* main clocks */
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("extal_div2", &extal_div2_clk),
+ CLKDEV_CON_ID("main", &main_clk),
+ CLKDEV_CON_ID("pll1", &pll1_clk),
+ CLKDEV_CON_ID("pll1_div2", &pll1_div2_clk),
+ CLKDEV_CON_ID("pll3", &pll3_clk),
+ CLKDEV_CON_ID("hp", &hp_clk),
+ CLKDEV_CON_ID("p", &p_clk),
+ CLKDEV_CON_ID("rclk", &rclk_clk),
+ CLKDEV_CON_ID("mp", &mp_clk),
+ CLKDEV_CON_ID("cp", &cp_clk),
+ CLKDEV_CON_ID("peripheral_clk", &hp_clk),
+
+ /* MSTP */
+ CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
+ CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]), /* SCIFA1 */
+ CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP206]), /* SCIFB0 */
+ CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP207]), /* SCIFB1 */
+ CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP216]), /* SCIFB2 */
+ CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP202]), /* SCIFA2 */
+ CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP721]), /* SCIF0 */
+ CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP720]), /* SCIF1 */
+ CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP719]), /* SCIF2 */
+ CLKDEV_DEV_ID("sh-sci.9", &mstp_clks[MSTP718]), /* SCIF3 */
+ CLKDEV_DEV_ID("sh-sci.10", &mstp_clks[MSTP715]), /* SCIF4 */
+ CLKDEV_DEV_ID("sh-sci.11", &mstp_clks[MSTP714]), /* SCIF5 */
+ CLKDEV_DEV_ID("sh-sci.12", &mstp_clks[MSTP1105]), /* SCIFA3 */
+ CLKDEV_DEV_ID("sh-sci.13", &mstp_clks[MSTP1106]), /* SCIFA4 */
+ CLKDEV_DEV_ID("sh-sci.14", &mstp_clks[MSTP1107]), /* SCIFA5 */
+ CLKDEV_DEV_ID("sh_cmt.0", &mstp_clks[MSTP124]),
+};
+
+#define R8A7791_CLOCK_ROOT(e, m, p0, p1, p30, p31) \
+ extal_clk.rate = e * 1000 * 1000; \
+ main_clk.parent = m; \
+ SH_CLK_SET_RATIO(&pll1_clk_ratio, p1 / 2, 1); \
+ if (mode & MD(19)) \
+ SH_CLK_SET_RATIO(&pll3_clk_ratio, p31, 1); \
+ else \
+ SH_CLK_SET_RATIO(&pll3_clk_ratio, p30, 1)
+
+
+void __init r8a7791_clock_init(void)
+{
+ void __iomem *modemr = ioremap_nocache(MODEMR, PAGE_SIZE);
+ u32 mode;
+ int k, ret = 0;
+
+ BUG_ON(!modemr);
+ mode = ioread32(modemr);
+ iounmap(modemr);
+
+ switch (mode & (MD(14) | MD(13))) {
+ case 0:
+ R8A7791_CLOCK_ROOT(15, &extal_clk, 172, 208, 106, 88);
+ break;
+ case MD(13):
+ R8A7791_CLOCK_ROOT(20, &extal_clk, 130, 156, 80, 66);
+ break;
+ case MD(14):
+ R8A7791_CLOCK_ROOT(26, &extal_div2_clk, 200, 240, 122, 102);
+ break;
+ case MD(13) | MD(14):
+ R8A7791_CLOCK_ROOT(30, &extal_div2_clk, 172, 208, 106, 88);
+ break;
+ }
+
+ for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
+ ret = clk_register(main_clks[k]);
+
+ if (!ret)
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ if (!ret)
+ shmobile_clk_init();
+ else
+ goto epanic;
+
+ return;
+
+epanic:
+ panic("failed to setup r8a7791 clocks\n");
+}
diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
index f93751caf5cb..e5be5c88644b 100644
--- a/arch/arm/mach-shmobile/headsmp.S
+++ b/arch/arm/mach-shmobile/headsmp.S
@@ -40,6 +40,9 @@ shmobile_boot_fn:
.globl shmobile_boot_arg
shmobile_boot_arg:
2: .space 4
+ .globl shmobile_boot_size
+shmobile_boot_size:
+ .long . - shmobile_boot_vector
/*
* Per-CPU SMP boot function/argument selection code based on MPIDR
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index 7b938681e756..e31980590eb4 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -9,16 +9,23 @@ extern void shmobile_setup_console(void);
extern void shmobile_boot_vector(void);
extern unsigned long shmobile_boot_fn;
extern unsigned long shmobile_boot_arg;
+extern unsigned long shmobile_boot_size;
extern void shmobile_smp_boot(void);
extern void shmobile_smp_sleep(void);
extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
unsigned long arg);
+extern int shmobile_smp_cpu_disable(unsigned int cpu);
+extern void shmobile_invalidate_start(void);
extern void shmobile_boot_scu(void);
extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus);
-extern int shmobile_smp_scu_boot_secondary(unsigned int cpu,
- struct task_struct *idle);
extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
extern int shmobile_smp_scu_cpu_kill(unsigned int cpu);
+extern void shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus);
+extern int shmobile_smp_apmu_boot_secondary(unsigned int cpu,
+ struct task_struct *idle);
+extern void shmobile_smp_apmu_cpu_die(unsigned int cpu);
+extern int shmobile_smp_apmu_cpu_kill(unsigned int cpu);
+extern void shmobile_invalidate_start(void);
struct clk;
extern int shmobile_clk_init(void);
extern void shmobile_handle_irq_intc(struct pt_regs *);
@@ -39,7 +46,6 @@ static inline int shmobile_cpuidle_init(void) { return 0; }
#endif
extern void __iomem *shmobile_scu_base;
-extern void shmobile_smp_init_cpus(unsigned int ncores);
static inline void __init shmobile_init_late(void)
{
diff --git a/arch/arm/mach-shmobile/include/mach/r7s72100.h b/arch/arm/mach-shmobile/include/mach/r7s72100.h
new file mode 100644
index 000000000000..5f34b20ecd4a
--- /dev/null
+++ b/arch/arm/mach-shmobile/include/mach/r7s72100.h
@@ -0,0 +1,8 @@
+#ifndef __ASM_R7S72100_H__
+#define __ASM_R7S72100_H__
+
+void r7s72100_add_dt_devices(void);
+void r7s72100_clock_init(void);
+void r7s72100_init_early(void);
+
+#endif /* __ASM_R7S72100_H__ */
diff --git a/arch/arm/mach-shmobile/include/mach/r8a73a4.h b/arch/arm/mach-shmobile/include/mach/r8a73a4.h
index f3a9b702da56..ce8bdd1d8a8a 100644
--- a/arch/arm/mach-shmobile/include/mach/r8a73a4.h
+++ b/arch/arm/mach-shmobile/include/mach/r8a73a4.h
@@ -1,10 +1,19 @@
#ifndef __ASM_R8A73A4_H__
#define __ASM_R8A73A4_H__
+/* DMA slave IDs */
+enum {
+ SHDMA_SLAVE_INVALID,
+ SHDMA_SLAVE_MMCIF0_TX,
+ SHDMA_SLAVE_MMCIF0_RX,
+ SHDMA_SLAVE_MMCIF1_TX,
+ SHDMA_SLAVE_MMCIF1_RX,
+};
+
void r8a73a4_add_standard_devices(void);
void r8a73a4_add_dt_devices(void);
void r8a73a4_clock_init(void);
void r8a73a4_pinmux_init(void);
-void r8a73a4_init_delay(void);
+void r8a73a4_init_early(void);
#endif /* __ASM_R8A73A4_H__ */
diff --git a/arch/arm/mach-shmobile/include/mach/r8a7778.h b/arch/arm/mach-shmobile/include/mach/r8a7778.h
index adfcf51b163d..441886c9714b 100644
--- a/arch/arm/mach-shmobile/include/mach/r8a7778.h
+++ b/arch/arm/mach-shmobile/include/mach/r8a7778.h
@@ -1,6 +1,7 @@
/*
* Copyright (C) 2013 Renesas Solutions Corp.
* Copyright (C) 2013 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ * Copyright (C) 2013 Cogent Embedded, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,11 +22,15 @@
#include <linux/sh_eth.h>
#include <linux/platform_data/camera-rcar.h>
+/* HPB-DMA slave IDs */
+enum {
+ HPBDMA_SLAVE_DUMMY,
+ HPBDMA_SLAVE_SDHI0_TX,
+ HPBDMA_SLAVE_SDHI0_RX,
+};
+
extern void r8a7778_add_standard_devices(void);
extern void r8a7778_add_standard_devices_dt(void);
-extern void r8a7778_add_ether_device(struct sh_eth_plat_data *pdata);
-extern void r8a7778_add_vin_device(int id,
- struct rcar_vin_platform_data *pdata);
extern void r8a7778_add_dt_devices(void);
extern void r8a7778_init_late(void);
@@ -33,6 +38,9 @@ extern void r8a7778_init_delay(void);
extern void r8a7778_init_irq_dt(void);
extern void r8a7778_clock_init(void);
extern void r8a7778_init_irq_extpin(int irlm);
+extern void r8a7778_init_irq_extpin_dt(int irlm);
extern void r8a7778_pinmux_init(void);
+extern int r8a7778_usb_phy_power(bool enable);
+
#endif /* __ASM_R8A7778_H__ */
diff --git a/arch/arm/mach-shmobile/include/mach/r8a7779.h b/arch/arm/mach-shmobile/include/mach/r8a7779.h
index 11c740047e14..17af34ed89c8 100644
--- a/arch/arm/mach-shmobile/include/mach/r8a7779.h
+++ b/arch/arm/mach-shmobile/include/mach/r8a7779.h
@@ -6,6 +6,13 @@
#include <linux/sh_eth.h>
#include <linux/platform_data/camera-rcar.h>
+/* HPB-DMA slave IDs */
+enum {
+ HPBDMA_SLAVE_DUMMY,
+ HPBDMA_SLAVE_SDHI0_TX,
+ HPBDMA_SLAVE_SDHI0_RX,
+};
+
struct platform_device;
struct r8a7779_pm_ch {
@@ -26,6 +33,7 @@ static inline struct r8a7779_pm_ch *to_r8a7779_ch(struct generic_pm_domain *d)
extern void r8a7779_init_delay(void);
extern void r8a7779_init_irq_extpin(int irlm);
+extern void r8a7779_init_irq_extpin_dt(int irlm);
extern void r8a7779_init_irq_dt(void);
extern void r8a7779_map_io(void);
extern void r8a7779_earlytimer_init(void);
diff --git a/arch/arm/mach-shmobile/include/mach/r8a7790.h b/arch/arm/mach-shmobile/include/mach/r8a7790.h
index 788d55952091..5fbfa28b40b6 100644
--- a/arch/arm/mach-shmobile/include/mach/r8a7790.h
+++ b/arch/arm/mach-shmobile/include/mach/r8a7790.h
@@ -1,14 +1,13 @@
#ifndef __ASM_R8A7790_H__
#define __ASM_R8A7790_H__
+#include <mach/rcar-gen2.h>
+
void r8a7790_add_standard_devices(void);
void r8a7790_add_dt_devices(void);
void r8a7790_clock_init(void);
void r8a7790_pinmux_init(void);
-void r8a7790_init_delay(void);
-void r8a7790_timer_init(void);
-
-#define MD(nr) BIT(nr)
-u32 r8a7790_read_mode_pins(void);
+void r8a7790_init_early(void);
+extern struct smp_operations r8a7790_smp_ops;
#endif /* __ASM_R8A7790_H__ */
diff --git a/arch/arm/mach-shmobile/include/mach/r8a7791.h b/arch/arm/mach-shmobile/include/mach/r8a7791.h
new file mode 100644
index 000000000000..051ead3c286e
--- /dev/null
+++ b/arch/arm/mach-shmobile/include/mach/r8a7791.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_R8A7791_H__
+#define __ASM_R8A7791_H__
+
+void r8a7791_add_standard_devices(void);
+void r8a7791_add_dt_devices(void);
+void r8a7791_clock_init(void);
+void r8a7791_init_early(void);
+extern struct smp_operations r8a7791_smp_ops;
+
+#endif /* __ASM_R8A7791_H__ */
diff --git a/arch/arm/mach-shmobile/include/mach/rcar-gen2.h b/arch/arm/mach-shmobile/include/mach/rcar-gen2.h
new file mode 100644
index 000000000000..43f606eb2d82
--- /dev/null
+++ b/arch/arm/mach-shmobile/include/mach/rcar-gen2.h
@@ -0,0 +1,8 @@
+#ifndef __ASM_RCAR_GEN2_H__
+#define __ASM_RCAR_GEN2_H__
+
+void rcar_gen2_timer_init(void);
+#define MD(nr) BIT(nr)
+u32 rcar_gen2_read_mode_pins(void);
+
+#endif /* __ASM_RCAR_GEN2_H__ */
diff --git a/arch/arm/mach-shmobile/include/mach/timex.h b/arch/arm/mach-shmobile/include/mach/timex.h
deleted file mode 100644
index ae0d8d825c23..000000000000
--- a/arch/arm/mach-shmobile/include/mach/timex.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_MACH_TIMEX_H
-#define __ASM_MACH_TIMEX_H
-
-#define CLOCK_TICK_RATE 1193180 /* unused i8253 PIT value */
-
-#endif /* __ASM_MACH_TIMEX_H */
diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c
new file mode 100644
index 000000000000..1da5a72d9642
--- /dev/null
+++ b/arch/arm/mach-shmobile/platsmp-apmu.c
@@ -0,0 +1,195 @@
+/*
+ * SMP support for SoCs with APMU
+ *
+ * Copyright (C) 2013 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/of_address.h>
+#include <linux/smp.h>
+#include <asm/cacheflush.h>
+#include <asm/cp15.h>
+#include <asm/smp_plat.h>
+#include <mach/common.h>
+
+static struct {
+ void __iomem *iomem;
+ int bit;
+} apmu_cpus[CONFIG_NR_CPUS];
+
+#define WUPCR_OFFS 0x10
+#define PSTR_OFFS 0x40
+#define CPUNCR_OFFS(n) (0x100 + (0x10 * (n)))
+
+static int apmu_power_on(void __iomem *p, int bit)
+{
+ /* request power on */
+ writel_relaxed(BIT(bit), p + WUPCR_OFFS);
+
+ /* wait for APMU to finish */
+ while (readl_relaxed(p + WUPCR_OFFS) != 0)
+ ;
+
+ return 0;
+}
+
+static int apmu_power_off(void __iomem *p, int bit)
+{
+ /* request Core Standby for next WFI */
+ writel_relaxed(3, p + CPUNCR_OFFS(bit));
+ return 0;
+}
+
+static int apmu_power_off_poll(void __iomem *p, int bit)
+{
+ int k;
+
+ for (k = 0; k < 1000; k++) {
+ if (((readl_relaxed(p + PSTR_OFFS) >> (bit * 4)) & 0x03) == 3)
+ return 1;
+
+ mdelay(1);
+ }
+
+ return 0;
+}
+
+static int apmu_wrap(int cpu, int (*fn)(void __iomem *p, int cpu))
+{
+ void __iomem *p = apmu_cpus[cpu].iomem;
+
+ return p ? fn(p, apmu_cpus[cpu].bit) : -EINVAL;
+}
+
+static void apmu_init_cpu(struct resource *res, int cpu, int bit)
+{
+ if (apmu_cpus[cpu].iomem)
+ return;
+
+ apmu_cpus[cpu].iomem = ioremap_nocache(res->start, resource_size(res));
+ apmu_cpus[cpu].bit = bit;
+
+ pr_debug("apmu ioremap %d %d 0x%08x 0x%08x\n", cpu, bit,
+ res->start, resource_size(res));
+}
+
+static struct {
+ struct resource iomem;
+ int cpus[4];
+} apmu_config[] = {
+ {
+ .iomem = DEFINE_RES_MEM(0xe6152000, 0x88),
+ .cpus = { 0, 1, 2, 3 },
+ },
+ {
+ .iomem = DEFINE_RES_MEM(0xe6151000, 0x88),
+ .cpus = { 0x100, 0x101, 0x102, 0x103 },
+ }
+};
+
+static void apmu_parse_cfg(void (*fn)(struct resource *res, int cpu, int bit))
+{
+ u32 id;
+ int k;
+ int bit, index;
+ bool is_allowed;
+
+ for (k = 0; k < ARRAY_SIZE(apmu_config); k++) {
+ /* only enable the cluster that includes the boot CPU */
+ is_allowed = false;
+ for (bit = 0; bit < ARRAY_SIZE(apmu_config[k].cpus); bit++) {
+ id = apmu_config[k].cpus[bit];
+ if (id >= 0) {
+ if (id == cpu_logical_map(0))
+ is_allowed = true;
+ }
+ }
+ if (!is_allowed)
+ continue;
+
+ for (bit = 0; bit < ARRAY_SIZE(apmu_config[k].cpus); bit++) {
+ id = apmu_config[k].cpus[bit];
+ if (id >= 0) {
+ index = get_logical_index(id);
+ if (index >= 0)
+ fn(&apmu_config[k].iomem, index, bit);
+ }
+ }
+ }
+}
+
+void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus)
+{
+ /* install boot code shared by all CPUs */
+ shmobile_boot_fn = virt_to_phys(shmobile_smp_boot);
+ shmobile_boot_arg = MPIDR_HWID_BITMASK;
+
+ /* perform per-cpu setup */
+ apmu_parse_cfg(apmu_init_cpu);
+}
+
+int shmobile_smp_apmu_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ /* For this particular CPU register boot vector */
+ shmobile_smp_hook(cpu, virt_to_phys(shmobile_invalidate_start), 0);
+
+ return apmu_wrap(cpu, apmu_power_on);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+/* nicked from arch/arm/mach-exynos/hotplug.c */
+static inline void cpu_enter_lowpower_a15(void)
+{
+ unsigned int v;
+
+ asm volatile(
+ " mrc p15, 0, %0, c1, c0, 0\n"
+ " bic %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ : "=&r" (v)
+ : "Ir" (CR_C)
+ : "cc");
+
+ flush_cache_louis();
+
+ asm volatile(
+ /*
+ * Turn off coherency
+ */
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " bic %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ : "=&r" (v)
+ : "Ir" (0x40)
+ : "cc");
+
+ isb();
+ dsb();
+}
+
+void shmobile_smp_apmu_cpu_die(unsigned int cpu)
+{
+ /* For this particular CPU deregister boot vector */
+ shmobile_smp_hook(cpu, 0, 0);
+
+ /* Select next sleep mode using the APMU */
+ apmu_wrap(cpu, apmu_power_off);
+
+ /* Do ARM specific CPU shutdown */
+ cpu_enter_lowpower_a15();
+
+ /* jump to shared mach-shmobile sleep / reset code */
+ shmobile_smp_sleep();
+}
+
+int shmobile_smp_apmu_cpu_kill(unsigned int cpu)
+{
+ return apmu_wrap(cpu, apmu_power_off_poll);
+}
+#endif
diff --git a/arch/arm/mach-shmobile/platsmp-scu.c b/arch/arm/mach-shmobile/platsmp-scu.c
index c96f50160be6..673ad6e80869 100644
--- a/arch/arm/mach-shmobile/platsmp-scu.c
+++ b/arch/arm/mach-shmobile/platsmp-scu.c
@@ -7,6 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -16,6 +17,26 @@
#include <asm/smp_scu.h>
#include <mach/common.h>
+static int shmobile_smp_scu_notifier_call(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (long)hcpu;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ /* For this particular CPU register SCU SMP boot vector */
+ shmobile_smp_hook(cpu, virt_to_phys(shmobile_boot_scu),
+ (unsigned long)shmobile_scu_base);
+ break;
+ };
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block shmobile_smp_scu_notifier = {
+ .notifier_call = shmobile_smp_scu_notifier_call,
+};
+
void __init shmobile_smp_scu_prepare_cpus(unsigned int max_cpus)
{
/* install boot code shared by all CPUs */
@@ -25,14 +46,9 @@ void __init shmobile_smp_scu_prepare_cpus(unsigned int max_cpus)
/* enable SCU and cache coherency on booting CPU */
scu_enable(shmobile_scu_base);
scu_power_mode(shmobile_scu_base, SCU_PM_NORMAL);
-}
-int shmobile_smp_scu_boot_secondary(unsigned int cpu, struct task_struct *idle)
-{
- /* For this particular CPU register SCU boot vector */
- shmobile_smp_hook(cpu, virt_to_phys(shmobile_boot_scu),
- (unsigned long)shmobile_scu_base);
- return 0;
+ /* Use CPU notifier for reset vector control */
+ register_cpu_notifier(&shmobile_smp_scu_notifier);
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
index d4ae616bcedb..9ebc246b8d7d 100644
--- a/arch/arm/mach-shmobile/platsmp.c
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -11,25 +11,10 @@
* published by the Free Software Foundation.
*/
#include <linux/init.h>
-#include <linux/smp.h>
#include <asm/cacheflush.h>
#include <asm/smp_plat.h>
#include <mach/common.h>
-void __init shmobile_smp_init_cpus(unsigned int ncores)
-{
- unsigned int i;
-
- if (ncores > nr_cpu_ids) {
- pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
- ncores, nr_cpu_ids);
- ncores = nr_cpu_ids;
- }
-
- for (i = 0; i < ncores; i++)
- set_cpu_possible(i, true);
-}
-
extern unsigned long shmobile_smp_fn[];
extern unsigned long shmobile_smp_arg[];
extern unsigned long shmobile_smp_mpidr[];
@@ -44,3 +29,10 @@ void shmobile_smp_hook(unsigned int cpu, unsigned long fn, unsigned long arg)
shmobile_smp_arg[cpu] = arg;
flush_cache_all();
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+int shmobile_smp_cpu_disable(unsigned int cpu)
+{
+ return 0; /* Hotplug of any CPU is supported */
+}
+#endif
diff --git a/arch/arm/mach-shmobile/setup-r7s72100.c b/arch/arm/mach-shmobile/setup-r7s72100.c
new file mode 100644
index 000000000000..d4eb509a1c87
--- /dev/null
+++ b/arch/arm/mach-shmobile/setup-r7s72100.c
@@ -0,0 +1,88 @@
+/*
+ * r7s72100 processor support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/of_platform.h>
+#include <linux/serial_sci.h>
+#include <mach/common.h>
+#include <mach/irqs.h>
+#include <mach/r7s72100.h>
+#include <asm/mach/arch.h>
+
+#define SCIF_DATA(index, baseaddr, irq) \
+[index] = { \
+ .type = PORT_SCIF, \
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, \
+ .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP, \
+ .scbrr_algo_id = SCBRR_ALGO_2, \
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE | \
+ SCSCR_REIE, \
+ .mapbase = baseaddr, \
+ .irqs = { irq + 1, irq + 2, irq + 3, irq }, \
+}
+
+enum { SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5, SCIF6, SCIF7 };
+
+static const struct plat_sci_port scif[] __initconst = {
+ SCIF_DATA(SCIF0, 0xe8007000, gic_iid(221)), /* SCIF0 */
+ SCIF_DATA(SCIF1, 0xe8007800, gic_iid(225)), /* SCIF1 */
+ SCIF_DATA(SCIF2, 0xe8008000, gic_iid(229)), /* SCIF2 */
+ SCIF_DATA(SCIF3, 0xe8008800, gic_iid(233)), /* SCIF3 */
+ SCIF_DATA(SCIF4, 0xe8009000, gic_iid(237)), /* SCIF4 */
+ SCIF_DATA(SCIF5, 0xe8009800, gic_iid(241)), /* SCIF5 */
+ SCIF_DATA(SCIF6, 0xe800a000, gic_iid(245)), /* SCIF6 */
+ SCIF_DATA(SCIF7, 0xe800a800, gic_iid(249)), /* SCIF7 */
+};
+
+static inline void r7s72100_register_scif(int idx)
+{
+ platform_device_register_data(&platform_bus, "sh-sci", idx, &scif[idx],
+ sizeof(struct plat_sci_port));
+}
+
+void __init r7s72100_add_dt_devices(void)
+{
+ r7s72100_register_scif(SCIF0);
+ r7s72100_register_scif(SCIF1);
+ r7s72100_register_scif(SCIF2);
+ r7s72100_register_scif(SCIF3);
+ r7s72100_register_scif(SCIF4);
+ r7s72100_register_scif(SCIF5);
+ r7s72100_register_scif(SCIF6);
+ r7s72100_register_scif(SCIF7);
+}
+
+void __init r7s72100_init_early(void)
+{
+ shmobile_setup_delay(400, 1, 3); /* Cortex-A9 @ 400MHz */
+}
+
+#ifdef CONFIG_USE_OF
+static const char *r7s72100_boards_compat_dt[] __initdata = {
+ "renesas,r7s72100",
+ NULL,
+};
+
+DT_MACHINE_START(R7S72100_DT, "Generic R7S72100 (Flattened Device Tree)")
+ .init_early = r7s72100_init_early,
+ .dt_compat = r7s72100_boards_compat_dt,
+MACHINE_END
+#endif /* CONFIG_USE_OF */
diff --git a/arch/arm/mach-shmobile/setup-r8a73a4.c b/arch/arm/mach-shmobile/setup-r8a73a4.c
index 89491700afb7..b0f2749071be 100644
--- a/arch/arm/mach-shmobile/setup-r8a73a4.c
+++ b/arch/arm/mach-shmobile/setup-r8a73a4.c
@@ -22,8 +22,10 @@
#include <linux/of_platform.h>
#include <linux/platform_data/irq-renesas-irqc.h>
#include <linux/serial_sci.h>
+#include <linux/sh_dma.h>
#include <linux/sh_timer.h>
#include <mach/common.h>
+#include <mach/dma-register.h>
#include <mach/irqs.h>
#include <mach/r8a73a4.h>
#include <asm/mach/arch.h>
@@ -199,15 +201,104 @@ void __init r8a73a4_add_dt_devices(void)
r8a7790_register_cmt(10);
}
+/* DMA */
+static const struct sh_dmae_slave_config dma_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_MMCIF0_TX,
+ .addr = 0xee200034,
+ .chcr = CHCR_TX(XMIT_SZ_32BIT),
+ .mid_rid = 0xd1,
+ }, {
+ .slave_id = SHDMA_SLAVE_MMCIF0_RX,
+ .addr = 0xee200034,
+ .chcr = CHCR_RX(XMIT_SZ_32BIT),
+ .mid_rid = 0xd2,
+ }, {
+ .slave_id = SHDMA_SLAVE_MMCIF1_TX,
+ .addr = 0xee220034,
+ .chcr = CHCR_TX(XMIT_SZ_32BIT),
+ .mid_rid = 0xe1,
+ }, {
+ .slave_id = SHDMA_SLAVE_MMCIF1_RX,
+ .addr = 0xee220034,
+ .chcr = CHCR_RX(XMIT_SZ_32BIT),
+ .mid_rid = 0xe2,
+ },
+};
+
+#define DMAE_CHANNEL(a, b) \
+ { \
+ .offset = (a) - 0x20, \
+ .dmars = (a) - 0x20 + 0x40, \
+ .chclr_bit = (b), \
+ .chclr_offset = 0x80 - 0x20, \
+ }
+
+static const struct sh_dmae_channel dma_channels[] = {
+ DMAE_CHANNEL(0x8000, 0),
+ DMAE_CHANNEL(0x8080, 1),
+ DMAE_CHANNEL(0x8100, 2),
+ DMAE_CHANNEL(0x8180, 3),
+ DMAE_CHANNEL(0x8200, 4),
+ DMAE_CHANNEL(0x8280, 5),
+ DMAE_CHANNEL(0x8300, 6),
+ DMAE_CHANNEL(0x8380, 7),
+ DMAE_CHANNEL(0x8400, 8),
+ DMAE_CHANNEL(0x8480, 9),
+ DMAE_CHANNEL(0x8500, 10),
+ DMAE_CHANNEL(0x8580, 11),
+ DMAE_CHANNEL(0x8600, 12),
+ DMAE_CHANNEL(0x8680, 13),
+ DMAE_CHANNEL(0x8700, 14),
+ DMAE_CHANNEL(0x8780, 15),
+ DMAE_CHANNEL(0x8800, 16),
+ DMAE_CHANNEL(0x8880, 17),
+ DMAE_CHANNEL(0x8900, 18),
+ DMAE_CHANNEL(0x8980, 19),
+};
+
+static const struct sh_dmae_pdata dma_pdata = {
+ .slave = dma_slaves,
+ .slave_num = ARRAY_SIZE(dma_slaves),
+ .channel = dma_channels,
+ .channel_num = ARRAY_SIZE(dma_channels),
+ .ts_low_shift = TS_LOW_SHIFT,
+ .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT,
+ .ts_high_shift = TS_HI_SHIFT,
+ .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT,
+ .ts_shift = dma_ts_shift,
+ .ts_shift_num = ARRAY_SIZE(dma_ts_shift),
+ .dmaor_init = DMAOR_DME,
+ .chclr_present = 1,
+ .chclr_bitwise = 1,
+};
+
+static struct resource dma_resources[] = {
+ DEFINE_RES_MEM(0xe6700020, 0x89e0),
+ DEFINE_RES_IRQ_NAMED(gic_spi(220), "error_irq"),
+ {
+ /* IRQ for channels 0-19 */
+ .start = gic_spi(200),
+ .end = gic_spi(219),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+#define r8a73a4_register_dmac() \
+ platform_device_register_resndata(&platform_bus, "sh-dma-engine", 0, \
+ dma_resources, ARRAY_SIZE(dma_resources), \
+ &dma_pdata, sizeof(dma_pdata))
+
void __init r8a73a4_add_standard_devices(void)
{
r8a73a4_add_dt_devices();
r8a73a4_register_irqc(0);
r8a73a4_register_irqc(1);
r8a73a4_register_thermal();
+ r8a73a4_register_dmac();
}
-void __init r8a73a4_init_delay(void)
+void __init r8a73a4_init_early(void)
{
#ifndef CONFIG_ARM_ARCH_TIMER
shmobile_setup_delay(1500, 2, 4); /* Cortex-A15 @ 1500MHz */
@@ -222,7 +313,7 @@ static const char *r8a73a4_boards_compat_dt[] __initdata = {
};
DT_MACHINE_START(R8A73A4_DT, "Generic R8A73A4 (Flattened Device Tree)")
- .init_early = r8a73a4_init_delay,
+ .init_early = r8a73a4_init_early,
.dt_compat = r8a73a4_boards_compat_dt,
MACHINE_END
#endif /* CONFIG_USE_OF */
diff --git a/arch/arm/mach-shmobile/setup-r8a7778.c b/arch/arm/mach-shmobile/setup-r8a7778.c
index 6a2657ebd197..03fcc5974ef9 100644
--- a/arch/arm/mach-shmobile/setup-r8a7778.c
+++ b/arch/arm/mach-shmobile/setup-r8a7778.c
@@ -24,6 +24,7 @@
#include <linux/irqchip/arm-gic.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_data/dma-rcar-hpbdma.h>
#include <linux/platform_data/gpio-rcar.h>
#include <linux/platform_data/irq-renesas-intc-irqpin.h>
#include <linux/platform_device.h>
@@ -95,29 +96,46 @@ static struct sh_timer_config sh_tmu1_platform_data __initdata = {
&sh_tmu##idx##_platform_data, \
sizeof(sh_tmu##idx##_platform_data))
-/* USB */
-static struct usb_phy *phy;
+int r8a7778_usb_phy_power(bool enable)
+{
+ static struct usb_phy *phy = NULL;
+ int ret = 0;
+ if (!phy)
+ phy = usb_get_phy(USB_PHY_TYPE_USB2);
+
+ if (IS_ERR(phy)) {
+ pr_err("kernel doesn't have usb phy driver\n");
+ return PTR_ERR(phy);
+ }
+
+ if (enable)
+ ret = usb_phy_init(phy);
+ else
+ usb_phy_shutdown(phy);
+
+ return ret;
+}
+
+/* USB */
static int usb_power_on(struct platform_device *pdev)
{
- if (IS_ERR(phy))
- return PTR_ERR(phy);
+ int ret = r8a7778_usb_phy_power(true);
+
+ if (ret)
+ return ret;
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- usb_phy_init(phy);
-
return 0;
}
static void usb_power_off(struct platform_device *pdev)
{
- if (IS_ERR(phy))
+ if (r8a7778_usb_phy_power(false))
return;
- usb_phy_shutdown(phy);
-
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
}
@@ -174,20 +192,6 @@ static struct platform_device_info hci##_info __initdata = { \
USB_PLATFORM_INFO(ehci);
USB_PLATFORM_INFO(ohci);
-/* Ether */
-static struct resource ether_resources[] __initdata = {
- DEFINE_RES_MEM(0xfde00000, 0x400),
- DEFINE_RES_IRQ(gic_iid(0x89)),
-};
-
-void __init r8a7778_add_ether_device(struct sh_eth_plat_data *pdata)
-{
- platform_device_register_resndata(&platform_bus, "r8a777x-ether", -1,
- ether_resources,
- ARRAY_SIZE(ether_resources),
- pdata, sizeof(*pdata));
-}
-
/* PFC/GPIO */
static struct resource pfc_resources[] __initdata = {
DEFINE_RES_MEM(0xfffc0000, 0x118),
@@ -272,7 +276,7 @@ static struct resource hspi_resources[] __initdata = {
DEFINE_RES_IRQ(gic_iid(0x75)),
};
-void __init r8a7778_register_hspi(int id)
+static void __init r8a7778_register_hspi(int id)
{
BUG_ON(id < 0 || id > 2);
@@ -281,40 +285,6 @@ void __init r8a7778_register_hspi(int id)
hspi_resources + (2 * id), 2);
}
-/* VIN */
-#define R8A7778_VIN(idx) \
-static struct resource vin##idx##_resources[] __initdata = { \
- DEFINE_RES_MEM(0xffc50000 + 0x1000 * (idx), 0x1000), \
- DEFINE_RES_IRQ(gic_iid(0x5a)), \
-}; \
- \
-static struct platform_device_info vin##idx##_info __initdata = { \
- .parent = &platform_bus, \
- .name = "r8a7778-vin", \
- .id = idx, \
- .res = vin##idx##_resources, \
- .num_res = ARRAY_SIZE(vin##idx##_resources), \
- .dma_mask = DMA_BIT_MASK(32), \
-}
-
-R8A7778_VIN(0);
-R8A7778_VIN(1);
-
-static struct platform_device_info *vin_info_table[] __initdata = {
- &vin0_info,
- &vin1_info,
-};
-
-void __init r8a7778_add_vin_device(int id, struct rcar_vin_platform_data *pdata)
-{
- BUG_ON(id < 0 || id > 1);
-
- vin_info_table[id]->data = pdata;
- vin_info_table[id]->size_data = sizeof(*pdata);
-
- platform_device_register_full(vin_info_table[id]);
-}
-
void __init r8a7778_add_dt_devices(void)
{
int i;
@@ -339,6 +309,88 @@ void __init r8a7778_add_dt_devices(void)
r8a7778_register_tmu(1);
}
+/* HPB-DMA */
+
+/* Asynchronous mode register (ASYNCMDR) bits */
+#define HPB_DMAE_ASYNCMDR_ASMD22_MASK BIT(2) /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASMD22_SINGLE BIT(2) /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASMD22_MULTI 0 /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASMD21_MASK BIT(1) /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASMD21_SINGLE BIT(1) /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASMD21_MULTI 0 /* SDHI0 */
+
+static const struct hpb_dmae_slave_config hpb_dmae_slaves[] = {
+ {
+ .id = HPBDMA_SLAVE_SDHI0_TX,
+ .addr = 0xffe4c000 + 0x30,
+ .dcr = HPB_DMAE_DCR_SPDS_16BIT |
+ HPB_DMAE_DCR_DMDL |
+ HPB_DMAE_DCR_DPDS_16BIT,
+ .rstr = HPB_DMAE_ASYNCRSTR_ASRST21 |
+ HPB_DMAE_ASYNCRSTR_ASRST22 |
+ HPB_DMAE_ASYNCRSTR_ASRST23,
+ .mdr = HPB_DMAE_ASYNCMDR_ASMD21_MULTI,
+ .mdm = HPB_DMAE_ASYNCMDR_ASMD21_MASK,
+ .port = 0x0D0C,
+ .flags = HPB_DMAE_SET_ASYNC_RESET | HPB_DMAE_SET_ASYNC_MODE,
+ .dma_ch = 21,
+ }, {
+ .id = HPBDMA_SLAVE_SDHI0_RX,
+ .addr = 0xffe4c000 + 0x30,
+ .dcr = HPB_DMAE_DCR_SMDL |
+ HPB_DMAE_DCR_SPDS_16BIT |
+ HPB_DMAE_DCR_DPDS_16BIT,
+ .rstr = HPB_DMAE_ASYNCRSTR_ASRST21 |
+ HPB_DMAE_ASYNCRSTR_ASRST22 |
+ HPB_DMAE_ASYNCRSTR_ASRST23,
+ .mdr = HPB_DMAE_ASYNCMDR_ASMD22_MULTI,
+ .mdm = HPB_DMAE_ASYNCMDR_ASMD22_MASK,
+ .port = 0x0D0C,
+ .flags = HPB_DMAE_SET_ASYNC_RESET | HPB_DMAE_SET_ASYNC_MODE,
+ .dma_ch = 22,
+ },
+};
+
+static const struct hpb_dmae_channel hpb_dmae_channels[] = {
+ HPB_DMAE_CHANNEL(0x7e, HPBDMA_SLAVE_SDHI0_TX), /* ch. 21 */
+ HPB_DMAE_CHANNEL(0x7e, HPBDMA_SLAVE_SDHI0_RX), /* ch. 22 */
+};
+
+static struct hpb_dmae_pdata dma_platform_data __initdata = {
+ .slaves = hpb_dmae_slaves,
+ .num_slaves = ARRAY_SIZE(hpb_dmae_slaves),
+ .channels = hpb_dmae_channels,
+ .num_channels = ARRAY_SIZE(hpb_dmae_channels),
+ .ts_shift = {
+ [XMIT_SZ_8BIT] = 0,
+ [XMIT_SZ_16BIT] = 1,
+ [XMIT_SZ_32BIT] = 2,
+ },
+ .num_hw_channels = 39,
+};
+
+static struct resource hpb_dmae_resources[] __initdata = {
+ /* Channel registers */
+ DEFINE_RES_MEM(0xffc08000, 0x1000),
+ /* Common registers */
+ DEFINE_RES_MEM(0xffc09000, 0x170),
+ /* Asynchronous reset registers */
+ DEFINE_RES_MEM(0xffc00300, 4),
+ /* Asynchronous mode registers */
+ DEFINE_RES_MEM(0xffc00400, 4),
+ /* IRQ for DMA channels */
+ DEFINE_RES_NAMED(gic_iid(0x7b), 5, NULL, IORESOURCE_IRQ),
+};
+
+static void __init r8a7778_register_hpb_dmae(void)
+{
+ platform_device_register_resndata(&platform_bus, "hpb-dma-engine", -1,
+ hpb_dmae_resources,
+ ARRAY_SIZE(hpb_dmae_resources),
+ &dma_platform_data,
+ sizeof(dma_platform_data));
+}
+
void __init r8a7778_add_standard_devices(void)
{
r8a7778_add_dt_devices();
@@ -349,12 +401,12 @@ void __init r8a7778_add_standard_devices(void)
r8a7778_register_hspi(0);
r8a7778_register_hspi(1);
r8a7778_register_hspi(2);
+
+ r8a7778_register_hpb_dmae();
}
void __init r8a7778_init_late(void)
{
- phy = usb_get_phy(USB_PHY_TYPE_USB2);
-
platform_device_register_full(&ehci_info);
platform_device_register_full(&ohci_info);
}
@@ -376,7 +428,7 @@ static struct resource irqpin_resources[] __initdata = {
DEFINE_RES_IRQ(gic_iid(0x3e)), /* IRQ3 */
};
-void __init r8a7778_init_irq_extpin(int irlm)
+void __init r8a7778_init_irq_extpin_dt(int irlm)
{
void __iomem *icr0 = ioremap_nocache(0xfe780000, PAGE_SIZE);
unsigned long tmp;
@@ -394,7 +446,11 @@ void __init r8a7778_init_irq_extpin(int irlm)
tmp |= (1 << 21); /* LVLMODE = 1 */
iowrite32(tmp, icr0);
iounmap(icr0);
+}
+void __init r8a7778_init_irq_extpin(int irlm)
+{
+ r8a7778_init_irq_extpin_dt(irlm);
if (irlm)
platform_device_register_resndata(
&platform_bus, "renesas_intc_irqpin", -1,
diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c
index ecd0148ee1e1..13049e9d691c 100644
--- a/arch/arm/mach-shmobile/setup-r8a7779.c
+++ b/arch/arm/mach-shmobile/setup-r8a7779.c
@@ -25,6 +25,7 @@
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/of_platform.h>
+#include <linux/platform_data/dma-rcar-hpbdma.h>
#include <linux/platform_data/gpio-rcar.h>
#include <linux/platform_data/irq-renesas-intc-irqpin.h>
#include <linux/platform_device.h>
@@ -97,7 +98,7 @@ static struct resource irqpin0_resources[] __initdata = {
DEFINE_RES_IRQ(gic_spi(30)), /* IRQ3 */
};
-void __init r8a7779_init_irq_extpin(int irlm)
+void __init r8a7779_init_irq_extpin_dt(int irlm)
{
void __iomem *icr0 = ioremap_nocache(0xfe780000, PAGE_SIZE);
u32 tmp;
@@ -115,7 +116,11 @@ void __init r8a7779_init_irq_extpin(int irlm)
tmp |= (1 << 21); /* LVLMODE = 1 */
iowrite32(tmp, icr0);
iounmap(icr0);
+}
+void __init r8a7779_init_irq_extpin(int irlm)
+{
+ r8a7779_init_irq_extpin_dt(irlm);
if (irlm)
platform_device_register_resndata(
&platform_bus, "renesas_intc_irqpin", -1,
@@ -632,6 +637,158 @@ static struct platform_device_info *vin_info_table[] __initdata = {
&vin3_info,
};
+/* HPB-DMA */
+
+/* Asynchronous mode register bits */
+#define HPB_DMAE_ASYNCMDR_ASMD43_MASK BIT(23) /* MMC1 */
+#define HPB_DMAE_ASYNCMDR_ASMD43_SINGLE BIT(23) /* MMC1 */
+#define HPB_DMAE_ASYNCMDR_ASMD43_MULTI 0 /* MMC1 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD43_MASK BIT(22) /* MMC1 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD43_BURST BIT(22) /* MMC1 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD43_NBURST 0 /* MMC1 */
+#define HPB_DMAE_ASYNCMDR_ASMD24_MASK BIT(21) /* MMC0 */
+#define HPB_DMAE_ASYNCMDR_ASMD24_SINGLE BIT(21) /* MMC0 */
+#define HPB_DMAE_ASYNCMDR_ASMD24_MULTI 0 /* MMC0 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD24_MASK BIT(20) /* MMC0 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD24_BURST BIT(20) /* MMC0 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD24_NBURST 0 /* MMC0 */
+#define HPB_DMAE_ASYNCMDR_ASMD41_MASK BIT(19) /* SDHI3 */
+#define HPB_DMAE_ASYNCMDR_ASMD41_SINGLE BIT(19) /* SDHI3 */
+#define HPB_DMAE_ASYNCMDR_ASMD41_MULTI 0 /* SDHI3 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD41_MASK BIT(18) /* SDHI3 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD41_BURST BIT(18) /* SDHI3 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD41_NBURST 0 /* SDHI3 */
+#define HPB_DMAE_ASYNCMDR_ASMD40_MASK BIT(17) /* SDHI3 */
+#define HPB_DMAE_ASYNCMDR_ASMD40_SINGLE BIT(17) /* SDHI3 */
+#define HPB_DMAE_ASYNCMDR_ASMD40_MULTI 0 /* SDHI3 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD40_MASK BIT(16) /* SDHI3 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD40_BURST BIT(16) /* SDHI3 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD40_NBURST 0 /* SDHI3 */
+#define HPB_DMAE_ASYNCMDR_ASMD39_MASK BIT(15) /* SDHI3 */
+#define HPB_DMAE_ASYNCMDR_ASMD39_SINGLE BIT(15) /* SDHI3 */
+#define HPB_DMAE_ASYNCMDR_ASMD39_MULTI 0 /* SDHI3 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD39_MASK BIT(14) /* SDHI3 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD39_BURST BIT(14) /* SDHI3 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD39_NBURST 0 /* SDHI3 */
+#define HPB_DMAE_ASYNCMDR_ASMD27_MASK BIT(13) /* SDHI2 */
+#define HPB_DMAE_ASYNCMDR_ASMD27_SINGLE BIT(13) /* SDHI2 */
+#define HPB_DMAE_ASYNCMDR_ASMD27_MULTI 0 /* SDHI2 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD27_MASK BIT(12) /* SDHI2 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD27_BURST BIT(12) /* SDHI2 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD27_NBURST 0 /* SDHI2 */
+#define HPB_DMAE_ASYNCMDR_ASMD26_MASK BIT(11) /* SDHI2 */
+#define HPB_DMAE_ASYNCMDR_ASMD26_SINGLE BIT(11) /* SDHI2 */
+#define HPB_DMAE_ASYNCMDR_ASMD26_MULTI 0 /* SDHI2 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD26_MASK BIT(10) /* SDHI2 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD26_BURST BIT(10) /* SDHI2 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD26_NBURST 0 /* SDHI2 */
+#define HPB_DMAE_ASYNCMDR_ASMD25_MASK BIT(9) /* SDHI2 */
+#define HPB_DMAE_ASYNCMDR_ASMD25_SINGLE BIT(9) /* SDHI2 */
+#define HPB_DMAE_ASYNCMDR_ASMD25_MULTI 0 /* SDHI2 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD25_MASK BIT(8) /* SDHI2 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD25_BURST BIT(8) /* SDHI2 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD25_NBURST 0 /* SDHI2 */
+#define HPB_DMAE_ASYNCMDR_ASMD23_MASK BIT(7) /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASMD23_SINGLE BIT(7) /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASMD23_MULTI 0 /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD23_MASK BIT(6) /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD23_BURST BIT(6) /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD23_NBURST 0 /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASMD22_MASK BIT(5) /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASMD22_SINGLE BIT(5) /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASMD22_MULTI 0 /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD22_MASK BIT(4) /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD22_BURST BIT(4) /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD22_NBURST 0 /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASMD21_MASK BIT(3) /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASMD21_SINGLE BIT(3) /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASMD21_MULTI 0 /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD21_MASK BIT(2) /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD21_BURST BIT(2) /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD21_NBURST 0 /* SDHI0 */
+#define HPB_DMAE_ASYNCMDR_ASMD20_MASK BIT(1) /* SDHI1 */
+#define HPB_DMAE_ASYNCMDR_ASMD20_SINGLE BIT(1) /* SDHI1 */
+#define HPB_DMAE_ASYNCMDR_ASMD20_MULTI 0 /* SDHI1 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD20_MASK BIT(0) /* SDHI1 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD20_BURST BIT(0) /* SDHI1 */
+#define HPB_DMAE_ASYNCMDR_ASBTMD20_NBURST 0 /* SDHI1 */
+
+static const struct hpb_dmae_slave_config hpb_dmae_slaves[] = {
+ {
+ .id = HPBDMA_SLAVE_SDHI0_TX,
+ .addr = 0xffe4c000 + 0x30,
+ .dcr = HPB_DMAE_DCR_SPDS_16BIT |
+ HPB_DMAE_DCR_DMDL |
+ HPB_DMAE_DCR_DPDS_16BIT,
+ .rstr = HPB_DMAE_ASYNCRSTR_ASRST21 |
+ HPB_DMAE_ASYNCRSTR_ASRST22 |
+ HPB_DMAE_ASYNCRSTR_ASRST23,
+ .mdr = HPB_DMAE_ASYNCMDR_ASMD21_SINGLE |
+ HPB_DMAE_ASYNCMDR_ASBTMD21_NBURST,
+ .mdm = HPB_DMAE_ASYNCMDR_ASMD21_MASK |
+ HPB_DMAE_ASYNCMDR_ASBTMD21_MASK,
+ .port = 0x0D0C,
+ .flags = HPB_DMAE_SET_ASYNC_RESET | HPB_DMAE_SET_ASYNC_MODE,
+ .dma_ch = 21,
+ }, {
+ .id = HPBDMA_SLAVE_SDHI0_RX,
+ .addr = 0xffe4c000 + 0x30,
+ .dcr = HPB_DMAE_DCR_SMDL |
+ HPB_DMAE_DCR_SPDS_16BIT |
+ HPB_DMAE_DCR_DPDS_16BIT,
+ .rstr = HPB_DMAE_ASYNCRSTR_ASRST21 |
+ HPB_DMAE_ASYNCRSTR_ASRST22 |
+ HPB_DMAE_ASYNCRSTR_ASRST23,
+ .mdr = HPB_DMAE_ASYNCMDR_ASMD22_SINGLE |
+ HPB_DMAE_ASYNCMDR_ASBTMD22_NBURST,
+ .mdm = HPB_DMAE_ASYNCMDR_ASMD22_MASK |
+ HPB_DMAE_ASYNCMDR_ASBTMD22_MASK,
+ .port = 0x0D0C,
+ .flags = HPB_DMAE_SET_ASYNC_RESET | HPB_DMAE_SET_ASYNC_MODE,
+ .dma_ch = 22,
+ },
+};
+
+static const struct hpb_dmae_channel hpb_dmae_channels[] = {
+ HPB_DMAE_CHANNEL(0x93, HPBDMA_SLAVE_SDHI0_TX), /* ch. 21 */
+ HPB_DMAE_CHANNEL(0x93, HPBDMA_SLAVE_SDHI0_RX), /* ch. 22 */
+};
+
+static struct hpb_dmae_pdata dma_platform_data __initdata = {
+ .slaves = hpb_dmae_slaves,
+ .num_slaves = ARRAY_SIZE(hpb_dmae_slaves),
+ .channels = hpb_dmae_channels,
+ .num_channels = ARRAY_SIZE(hpb_dmae_channels),
+ .ts_shift = {
+ [XMIT_SZ_8BIT] = 0,
+ [XMIT_SZ_16BIT] = 1,
+ [XMIT_SZ_32BIT] = 2,
+ },
+ .num_hw_channels = 44,
+};
+
+static struct resource hpb_dmae_resources[] __initdata = {
+ /* Channel registers */
+ DEFINE_RES_MEM(0xffc08000, 0x1000),
+ /* Common registers */
+ DEFINE_RES_MEM(0xffc09000, 0x170),
+ /* Asynchronous reset registers */
+ DEFINE_RES_MEM(0xffc00300, 4),
+ /* Asynchronous mode registers */
+ DEFINE_RES_MEM(0xffc00400, 4),
+ /* IRQ for DMA channels */
+ DEFINE_RES_NAMED(gic_iid(0x8e), 12, NULL, IORESOURCE_IRQ),
+};
+
+static void __init r8a7779_register_hpb_dmae(void)
+{
+ platform_device_register_resndata(&platform_bus, "hpb-dma-engine", -1,
+ hpb_dmae_resources,
+ ARRAY_SIZE(hpb_dmae_resources),
+ &dma_platform_data,
+ sizeof(dma_platform_data));
+}
+
static struct platform_device *r8a7779_devices_dt[] __initdata = {
&scif0_device,
&scif1_device,
@@ -665,6 +822,7 @@ void __init r8a7779_add_standard_devices(void)
ARRAY_SIZE(r8a7779_devices_dt));
platform_add_devices(r8a7779_standard_devices,
ARRAY_SIZE(r8a7779_standard_devices));
+ r8a7779_register_hpb_dmae();
}
void __init r8a7779_add_ether_device(struct sh_eth_plat_data *pdata)
diff --git a/arch/arm/mach-shmobile/setup-r8a7790.c b/arch/arm/mach-shmobile/setup-r8a7790.c
index d0f5c9f9349a..c47bcebbcb00 100644
--- a/arch/arm/mach-shmobile/setup-r8a7790.c
+++ b/arch/arm/mach-shmobile/setup-r8a7790.c
@@ -18,7 +18,6 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <linux/clocksource.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/of_platform.h>
@@ -31,17 +30,18 @@
#include <mach/r8a7790.h>
#include <asm/mach/arch.h>
-static struct resource pfc_resources[] __initdata = {
+static const struct resource pfc_resources[] __initconst = {
DEFINE_RES_MEM(0xe6060000, 0x250),
};
#define R8A7790_GPIO(idx) \
-static struct resource r8a7790_gpio##idx##_resources[] __initdata = { \
+static const struct resource r8a7790_gpio##idx##_resources[] __initconst = { \
DEFINE_RES_MEM(0xe6050000 + 0x1000 * (idx), 0x50), \
DEFINE_RES_IRQ(gic_spi(4 + (idx))), \
}; \
\
-static struct gpio_rcar_config r8a7790_gpio##idx##_platform_data __initdata = { \
+static const struct gpio_rcar_config \
+r8a7790_gpio##idx##_platform_data __initconst = { \
.gpio_base = 32 * (idx), \
.irq_base = 0, \
.number_of_pins = 32, \
@@ -112,7 +112,7 @@ void __init r8a7790_pinmux_init(void)
enum { SCIFA0, SCIFA1, SCIFB0, SCIFB1, SCIFB2, SCIFA2, SCIF0, SCIF1,
HSCIF0, HSCIF1 };
-static struct plat_sci_port scif[] __initdata = {
+static const struct plat_sci_port scif[] __initconst = {
SCIFA_DATA(SCIFA0, 0xe6c40000, gic_spi(144)), /* SCIFA0 */
SCIFA_DATA(SCIFA1, 0xe6c50000, gic_spi(145)), /* SCIFA1 */
SCIFB_DATA(SCIFB0, 0xe6c20000, gic_spi(148)), /* SCIFB0 */
@@ -131,11 +131,11 @@ static inline void r8a7790_register_scif(int idx)
sizeof(struct plat_sci_port));
}
-static struct renesas_irqc_config irqc0_data __initdata = {
+static const struct renesas_irqc_config irqc0_data __initconst = {
.irq_base = irq_pin(0), /* IRQ0 -> IRQ3 */
};
-static struct resource irqc0_resources[] __initdata = {
+static const struct resource irqc0_resources[] __initconst = {
DEFINE_RES_MEM(0xe61c0000, 0x200), /* IRQC Event Detector Block_0 */
DEFINE_RES_IRQ(gic_spi(0)), /* IRQ0 */
DEFINE_RES_IRQ(gic_spi(1)), /* IRQ1 */
@@ -150,7 +150,7 @@ static struct resource irqc0_resources[] __initdata = {
&irqc##idx##_data, \
sizeof(struct renesas_irqc_config))
-static struct resource thermal_resources[] __initdata = {
+static const struct resource thermal_resources[] __initconst = {
DEFINE_RES_MEM(0xe61f0000, 0x14),
DEFINE_RES_MEM(0xe61f0100, 0x38),
DEFINE_RES_IRQ(gic_spi(69)),
@@ -161,13 +161,13 @@ static struct resource thermal_resources[] __initdata = {
thermal_resources, \
ARRAY_SIZE(thermal_resources))
-static struct sh_timer_config cmt00_platform_data __initdata = {
+static const struct sh_timer_config cmt00_platform_data __initconst = {
.name = "CMT00",
.timer_bit = 0,
.clockevent_rating = 80,
};
-static struct resource cmt00_resources[] __initdata = {
+static const struct resource cmt00_resources[] __initconst = {
DEFINE_RES_MEM(0xffca0510, 0x0c),
DEFINE_RES_MEM(0xffca0500, 0x04),
DEFINE_RES_IRQ(gic_spi(142)), /* CMT0_0 */
@@ -202,72 +202,7 @@ void __init r8a7790_add_standard_devices(void)
r8a7790_register_thermal();
}
-#define MODEMR 0xe6160060
-
-u32 __init r8a7790_read_mode_pins(void)
-{
- void __iomem *modemr = ioremap_nocache(MODEMR, 4);
- u32 mode;
-
- BUG_ON(!modemr);
- mode = ioread32(modemr);
- iounmap(modemr);
-
- return mode;
-}
-
-#define CNTCR 0
-#define CNTFID0 0x20
-
-void __init r8a7790_timer_init(void)
-{
-#ifdef CONFIG_ARM_ARCH_TIMER
- u32 mode = r8a7790_read_mode_pins();
- void __iomem *base;
- int extal_mhz = 0;
- u32 freq;
-
- /* At Linux boot time the r8a7790 arch timer comes up
- * with the counter disabled. Moreover, it may also report
- * a potentially incorrect fixed 13 MHz frequency. To be
- * correct these registers need to be updated to use the
- * frequency EXTAL / 2 which can be determined by the MD pins.
- */
-
- switch (mode & (MD(14) | MD(13))) {
- case 0:
- extal_mhz = 15;
- break;
- case MD(13):
- extal_mhz = 20;
- break;
- case MD(14):
- extal_mhz = 26;
- break;
- case MD(13) | MD(14):
- extal_mhz = 30;
- break;
- }
-
- /* The arch timer frequency equals EXTAL / 2 */
- freq = extal_mhz * (1000000 / 2);
-
- /* Remap "armgcnt address map" space */
- base = ioremap(0xe6080000, PAGE_SIZE);
-
- /* Update registers with correct frequency */
- iowrite32(freq, base + CNTFID0);
- asm volatile("mcr p15, 0, %0, c14, c0, 0" : : "r" (freq));
-
- /* make sure arch timer is started by setting bit 0 of CNTCR */
- iowrite32(1, base + CNTCR);
- iounmap(base);
-#endif /* CONFIG_ARM_ARCH_TIMER */
-
- clocksource_of_init();
-}
-
-void __init r8a7790_init_delay(void)
+void __init r8a7790_init_early(void)
{
#ifndef CONFIG_ARM_ARCH_TIMER
shmobile_setup_delay(1300, 2, 4); /* Cortex-A15 @ 1300MHz */
@@ -276,14 +211,15 @@ void __init r8a7790_init_delay(void)
#ifdef CONFIG_USE_OF
-static const char *r8a7790_boards_compat_dt[] __initdata = {
+static const char * const r8a7790_boards_compat_dt[] __initconst = {
"renesas,r8a7790",
NULL,
};
DT_MACHINE_START(R8A7790_DT, "Generic R8A7790 (Flattened Device Tree)")
- .init_early = r8a7790_init_delay,
- .init_time = r8a7790_timer_init,
+ .smp = smp_ops(r8a7790_smp_ops),
+ .init_early = r8a7790_init_early,
+ .init_time = rcar_gen2_timer_init,
.dt_compat = r8a7790_boards_compat_dt,
MACHINE_END
#endif /* CONFIG_USE_OF */
diff --git a/arch/arm/mach-shmobile/setup-r8a7791.c b/arch/arm/mach-shmobile/setup-r8a7791.c
new file mode 100644
index 000000000000..d9393d61ee27
--- /dev/null
+++ b/arch/arm/mach-shmobile/setup-r8a7791.c
@@ -0,0 +1,184 @@
+/*
+ * r8a7791 processor support
+ *
+ * Copyright (C) 2013 Renesas Electronics Corporation
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/of_platform.h>
+#include <linux/platform_data/irq-renesas-irqc.h>
+#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <mach/common.h>
+#include <mach/irqs.h>
+#include <mach/r8a7791.h>
+#include <mach/rcar-gen2.h>
+#include <asm/mach/arch.h>
+
+#define SCIF_COMMON(scif_type, baseaddr, irq) \
+ .type = scif_type, \
+ .mapbase = baseaddr, \
+ .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP, \
+ .irqs = SCIx_IRQ_MUXED(irq)
+
+#define SCIFA_DATA(index, baseaddr, irq) \
+[index] = { \
+ SCIF_COMMON(PORT_SCIFA, baseaddr, irq), \
+ .scbrr_algo_id = SCBRR_ALGO_4, \
+ .scscr = SCSCR_RE | SCSCR_TE, \
+}
+
+#define SCIFB_DATA(index, baseaddr, irq) \
+[index] = { \
+ SCIF_COMMON(PORT_SCIFB, baseaddr, irq), \
+ .scbrr_algo_id = SCBRR_ALGO_4, \
+ .scscr = SCSCR_RE | SCSCR_TE, \
+}
+
+#define SCIF_DATA(index, baseaddr, irq) \
+[index] = { \
+ SCIF_COMMON(PORT_SCIF, baseaddr, irq), \
+ .scbrr_algo_id = SCBRR_ALGO_2, \
+ .scscr = SCSCR_RE | SCSCR_TE, \
+}
+
+#define HSCIF_DATA(index, baseaddr, irq) \
+[index] = { \
+ SCIF_COMMON(PORT_HSCIF, baseaddr, irq), \
+ .scbrr_algo_id = SCBRR_ALGO_6, \
+ .scscr = SCSCR_RE | SCSCR_TE, \
+}
+
+enum { SCIFA0, SCIFA1, SCIFB0, SCIFB1, SCIFB2, SCIFA2, SCIF0, SCIF1,
+ SCIF2, SCIF3, SCIF4, SCIF5, SCIFA3, SCIFA4, SCIFA5 };
+
+static const struct plat_sci_port scif[] __initconst = {
+ SCIFA_DATA(SCIFA0, 0xe6c40000, gic_spi(144)), /* SCIFA0 */
+ SCIFA_DATA(SCIFA1, 0xe6c50000, gic_spi(145)), /* SCIFA1 */
+ SCIFB_DATA(SCIFB0, 0xe6c20000, gic_spi(148)), /* SCIFB0 */
+ SCIFB_DATA(SCIFB1, 0xe6c30000, gic_spi(149)), /* SCIFB1 */
+ SCIFB_DATA(SCIFB2, 0xe6ce0000, gic_spi(150)), /* SCIFB2 */
+ SCIFA_DATA(SCIFA2, 0xe6c60000, gic_spi(151)), /* SCIFA2 */
+ SCIF_DATA(SCIF0, 0xe6e60000, gic_spi(152)), /* SCIF0 */
+ SCIF_DATA(SCIF1, 0xe6e68000, gic_spi(153)), /* SCIF1 */
+ SCIF_DATA(SCIF2, 0xe6e58000, gic_spi(22)), /* SCIF2 */
+ SCIF_DATA(SCIF3, 0xe6ea8000, gic_spi(23)), /* SCIF3 */
+ SCIF_DATA(SCIF4, 0xe6ee0000, gic_spi(24)), /* SCIF4 */
+ SCIF_DATA(SCIF5, 0xe6ee8000, gic_spi(25)), /* SCIF5 */
+ SCIFA_DATA(SCIFA3, 0xe6c70000, gic_spi(29)), /* SCIFA3 */
+ SCIFA_DATA(SCIFA4, 0xe6c78000, gic_spi(30)), /* SCIFA4 */
+ SCIFA_DATA(SCIFA5, 0xe6c80000, gic_spi(31)), /* SCIFA5 */
+};
+
+static inline void r8a7791_register_scif(int idx)
+{
+ platform_device_register_data(&platform_bus, "sh-sci", idx, &scif[idx],
+ sizeof(struct plat_sci_port));
+}
+
+static const struct sh_timer_config cmt00_platform_data __initconst = {
+ .name = "CMT00",
+ .timer_bit = 0,
+ .clockevent_rating = 80,
+};
+
+static const struct resource cmt00_resources[] __initconst = {
+ DEFINE_RES_MEM(0xffca0510, 0x0c),
+ DEFINE_RES_MEM(0xffca0500, 0x04),
+ DEFINE_RES_IRQ(gic_spi(142)), /* CMT0_0 */
+};
+
+#define r8a7791_register_cmt(idx) \
+ platform_device_register_resndata(&platform_bus, "sh_cmt", \
+ idx, cmt##idx##_resources, \
+ ARRAY_SIZE(cmt##idx##_resources), \
+ &cmt##idx##_platform_data, \
+ sizeof(struct sh_timer_config))
+
+static struct renesas_irqc_config irqc0_data = {
+ .irq_base = irq_pin(0), /* IRQ0 -> IRQ9 */
+};
+
+static struct resource irqc0_resources[] = {
+ DEFINE_RES_MEM(0xe61c0000, 0x200), /* IRQC Event Detector Block_0 */
+ DEFINE_RES_IRQ(gic_spi(0)), /* IRQ0 */
+ DEFINE_RES_IRQ(gic_spi(1)), /* IRQ1 */
+ DEFINE_RES_IRQ(gic_spi(2)), /* IRQ2 */
+ DEFINE_RES_IRQ(gic_spi(3)), /* IRQ3 */
+ DEFINE_RES_IRQ(gic_spi(12)), /* IRQ4 */
+ DEFINE_RES_IRQ(gic_spi(13)), /* IRQ5 */
+ DEFINE_RES_IRQ(gic_spi(14)), /* IRQ6 */
+ DEFINE_RES_IRQ(gic_spi(15)), /* IRQ7 */
+ DEFINE_RES_IRQ(gic_spi(16)), /* IRQ8 */
+ DEFINE_RES_IRQ(gic_spi(17)), /* IRQ9 */
+};
+
+#define r8a7791_register_irqc(idx) \
+ platform_device_register_resndata(&platform_bus, "renesas_irqc", \
+ idx, irqc##idx##_resources, \
+ ARRAY_SIZE(irqc##idx##_resources), \
+ &irqc##idx##_data, \
+ sizeof(struct renesas_irqc_config))
+
+void __init r8a7791_add_dt_devices(void)
+{
+ r8a7791_register_scif(SCIFA0);
+ r8a7791_register_scif(SCIFA1);
+ r8a7791_register_scif(SCIFB0);
+ r8a7791_register_scif(SCIFB1);
+ r8a7791_register_scif(SCIFB2);
+ r8a7791_register_scif(SCIFA2);
+ r8a7791_register_scif(SCIF0);
+ r8a7791_register_scif(SCIF1);
+ r8a7791_register_scif(SCIF2);
+ r8a7791_register_scif(SCIF3);
+ r8a7791_register_scif(SCIF4);
+ r8a7791_register_scif(SCIF5);
+ r8a7791_register_scif(SCIFA3);
+ r8a7791_register_scif(SCIFA4);
+ r8a7791_register_scif(SCIFA5);
+ r8a7791_register_cmt(00);
+}
+
+void __init r8a7791_add_standard_devices(void)
+{
+ r8a7791_add_dt_devices();
+ r8a7791_register_irqc(0);
+}
+
+void __init r8a7791_init_early(void)
+{
+#ifndef CONFIG_ARM_ARCH_TIMER
+ shmobile_setup_delay(1300, 2, 4); /* Cortex-A15 @ 1300MHz */
+#endif
+}
+
+#ifdef CONFIG_USE_OF
+static const char *r8a7791_boards_compat_dt[] __initdata = {
+ "renesas,r8a7791",
+ NULL,
+};
+
+DT_MACHINE_START(R8A7791_DT, "Generic R8A7791 (Flattened Device Tree)")
+ .smp = smp_ops(r8a7791_smp_ops),
+ .init_early = r8a7791_init_early,
+ .init_time = rcar_gen2_timer_init,
+ .dt_compat = r8a7791_boards_compat_dt,
+MACHINE_END
+#endif /* CONFIG_USE_OF */
diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c
new file mode 100644
index 000000000000..5734c24bf6c7
--- /dev/null
+++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c
@@ -0,0 +1,91 @@
+/*
+ * R-Car Generation 2 support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/clocksource.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <mach/common.h>
+#include <mach/rcar-gen2.h>
+#include <asm/mach/arch.h>
+
+#define MODEMR 0xe6160060
+
+u32 __init rcar_gen2_read_mode_pins(void)
+{
+ void __iomem *modemr = ioremap_nocache(MODEMR, 4);
+ u32 mode;
+
+ BUG_ON(!modemr);
+ mode = ioread32(modemr);
+ iounmap(modemr);
+
+ return mode;
+}
+
+#define CNTCR 0
+#define CNTFID0 0x20
+
+void __init rcar_gen2_timer_init(void)
+{
+#ifdef CONFIG_ARM_ARCH_TIMER
+ u32 mode = rcar_gen2_read_mode_pins();
+ void __iomem *base;
+ int extal_mhz = 0;
+ u32 freq;
+
+ /* At Linux boot time the r8a7790 arch timer comes up
+ * with the counter disabled. Moreover, it may also report
+ * a potentially incorrect fixed 13 MHz frequency. To be
+ * correct these registers need to be updated to use the
+ * frequency EXTAL / 2 which can be determined by the MD pins.
+ */
+
+ switch (mode & (MD(14) | MD(13))) {
+ case 0:
+ extal_mhz = 15;
+ break;
+ case MD(13):
+ extal_mhz = 20;
+ break;
+ case MD(14):
+ extal_mhz = 26;
+ break;
+ case MD(13) | MD(14):
+ extal_mhz = 30;
+ break;
+ }
+
+ /* The arch timer frequency equals EXTAL / 2 */
+ freq = extal_mhz * (1000000 / 2);
+
+ /* Remap "armgcnt address map" space */
+ base = ioremap(0xe6080000, PAGE_SIZE);
+
+ /* Update registers with correct frequency */
+ iowrite32(freq, base + CNTFID0);
+ asm volatile("mcr p15, 0, %0, c14, c0, 0" : : "r" (freq));
+
+ /* make sure arch timer is started by setting bit 0 of CNTCR */
+ iowrite32(1, base + CNTCR);
+ iounmap(base);
+#endif /* CONFIG_ARM_ARCH_TIMER */
+
+ clocksource_of_init();
+}
diff --git a/arch/arm/mach-shmobile/smp-emev2.c b/arch/arm/mach-shmobile/smp-emev2.c
index 522de5ebb55f..f2ca92308f75 100644
--- a/arch/arm/mach-shmobile/smp-emev2.c
+++ b/arch/arm/mach-shmobile/smp-emev2.c
@@ -34,12 +34,6 @@
static int emev2_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- int ret;
-
- ret = shmobile_smp_scu_boot_secondary(cpu, idle);
- if (ret)
- return ret;
-
arch_send_wakeup_ipi_mask(cpumask_of(cpu_logical_map(cpu)));
return 0;
}
diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c
index 0f05e9fb722f..627c1f0d9478 100644
--- a/arch/arm/mach-shmobile/smp-r8a7779.c
+++ b/arch/arm/mach-shmobile/smp-r8a7779.c
@@ -87,10 +87,6 @@ static int r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle)
unsigned int lcpu = cpu_logical_map(cpu);
int ret;
- ret = shmobile_smp_scu_boot_secondary(cpu, idle);
- if (ret)
- return ret;
-
if (lcpu < ARRAY_SIZE(r8a7779_ch_cpu))
ch = r8a7779_ch_cpu[lcpu];
diff --git a/arch/arm/mach-shmobile/smp-r8a7790.c b/arch/arm/mach-shmobile/smp-r8a7790.c
new file mode 100644
index 000000000000..015e2753de1f
--- /dev/null
+++ b/arch/arm/mach-shmobile/smp-r8a7790.c
@@ -0,0 +1,67 @@
+/*
+ * SMP support for r8a7790
+ *
+ * Copyright (C) 2012-2013 Renesas Solutions Corp.
+ * Copyright (C) 2012 Takashi Yoshii <takashi.yoshii.ze@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+#include <asm/smp_plat.h>
+#include <mach/common.h>
+
+#define RST 0xe6160000
+#define CA15BAR 0x0020
+#define CA7BAR 0x0030
+#define CA15RESCNT 0x0040
+#define CA7RESCNT 0x0044
+#define MERAM 0xe8080000
+
+static void __init r8a7790_smp_prepare_cpus(unsigned int max_cpus)
+{
+ void __iomem *p;
+ u32 bar;
+
+ /* let APMU code install data related to shmobile_boot_vector */
+ shmobile_smp_apmu_prepare_cpus(max_cpus);
+
+ /* MERAM for jump stub, because BAR requires 256KB aligned address */
+ p = ioremap_nocache(MERAM, shmobile_boot_size);
+ memcpy_toio(p, shmobile_boot_vector, shmobile_boot_size);
+ iounmap(p);
+
+ /* setup reset vectors */
+ p = ioremap_nocache(RST, 0x63);
+ bar = (MERAM >> 8) & 0xfffffc00;
+ writel_relaxed(bar, p + CA15BAR);
+ writel_relaxed(bar, p + CA7BAR);
+ writel_relaxed(bar | 0x10, p + CA15BAR);
+ writel_relaxed(bar | 0x10, p + CA7BAR);
+
+ /* enable clocks to all CPUs */
+ writel_relaxed((readl_relaxed(p + CA15RESCNT) & ~0x0f) | 0xa5a50000,
+ p + CA15RESCNT);
+ writel_relaxed((readl_relaxed(p + CA7RESCNT) & ~0x0f) | 0x5a5a0000,
+ p + CA7RESCNT);
+ iounmap(p);
+}
+
+struct smp_operations r8a7790_smp_ops __initdata = {
+ .smp_prepare_cpus = r8a7790_smp_prepare_cpus,
+ .smp_boot_secondary = shmobile_smp_apmu_boot_secondary,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = shmobile_smp_cpu_disable,
+ .cpu_die = shmobile_smp_apmu_cpu_die,
+ .cpu_kill = shmobile_smp_apmu_cpu_kill,
+#endif
+};
diff --git a/arch/arm/mach-shmobile/smp-r8a7791.c b/arch/arm/mach-shmobile/smp-r8a7791.c
new file mode 100644
index 000000000000..2df5bd190fe4
--- /dev/null
+++ b/arch/arm/mach-shmobile/smp-r8a7791.c
@@ -0,0 +1,62 @@
+/*
+ * SMP support for r8a7791
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+#include <asm/smp_plat.h>
+#include <mach/common.h>
+#include <mach/r8a7791.h>
+
+#define RST 0xe6160000
+#define CA15BAR 0x0020
+#define CA15RESCNT 0x0040
+#define RAM 0xe6300000
+
+static void __init r8a7791_smp_prepare_cpus(unsigned int max_cpus)
+{
+ void __iomem *p;
+ u32 bar;
+
+ /* let APMU code install data related to shmobile_boot_vector */
+ shmobile_smp_apmu_prepare_cpus(max_cpus);
+
+ /* RAM for jump stub, because BAR requires 256KB aligned address */
+ p = ioremap_nocache(RAM, shmobile_boot_size);
+ memcpy_toio(p, shmobile_boot_vector, shmobile_boot_size);
+ iounmap(p);
+
+ /* setup reset vectors */
+ p = ioremap_nocache(RST, 0x63);
+ bar = (RAM >> 8) & 0xfffffc00;
+ writel_relaxed(bar, p + CA15BAR);
+ writel_relaxed(bar | 0x10, p + CA15BAR);
+
+ /* enable clocks to all CPUs */
+ writel_relaxed((readl_relaxed(p + CA15RESCNT) & ~0x0f) | 0xa5a50000,
+ p + CA15RESCNT);
+ iounmap(p);
+}
+
+struct smp_operations r8a7791_smp_ops __initdata = {
+ .smp_prepare_cpus = r8a7791_smp_prepare_cpus,
+ .smp_boot_secondary = shmobile_smp_apmu_boot_secondary,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = shmobile_smp_cpu_disable,
+ .cpu_die = shmobile_smp_apmu_cpu_die,
+ .cpu_kill = shmobile_smp_apmu_cpu_kill,
+#endif
+};
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index 0baa24443793..13ba36a6831f 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -46,11 +46,6 @@ void __init sh73a0_register_twd(void)
static int sh73a0_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned int lcpu = cpu_logical_map(cpu);
- int ret;
-
- ret = shmobile_smp_scu_boot_secondary(cpu, idle);
- if (ret)
- return ret;
if (((__raw_readl(PSTR) >> (4 * lcpu)) & 3) == 3)
__raw_writel(1 << lcpu, WUPCR); /* wake up */
@@ -71,18 +66,11 @@ static void __init sh73a0_smp_prepare_cpus(unsigned int max_cpus)
shmobile_smp_scu_prepare_cpus(max_cpus);
}
-#ifdef CONFIG_HOTPLUG_CPU
-static int sh73a0_cpu_disable(unsigned int cpu)
-{
- return 0; /* CPU0 and CPU1 supported */
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-
struct smp_operations sh73a0_smp_ops __initdata = {
.smp_prepare_cpus = sh73a0_smp_prepare_cpus,
.smp_boot_secondary = sh73a0_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
- .cpu_disable = sh73a0_cpu_disable,
+ .cpu_disable = shmobile_smp_cpu_disable,
.cpu_die = shmobile_smp_scu_cpu_die,
.cpu_kill = shmobile_smp_scu_cpu_kill,
#endif
diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig
index dd86db467521..037100a1563a 100644
--- a/arch/arm/mach-socfpga/Kconfig
+++ b/arch/arm/mach-socfpga/Kconfig
@@ -4,7 +4,6 @@ config ARCH_SOCFPGA
select ARM_AMBA
select ARM_GIC
select CACHE_L2X0
- select CLKDEV_LOOKUP
select COMMON_CLK
select CPU_V7
select DW_APB_TIMER_OF
diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c
index bfce9641e32f..dd0d49cdbe09 100644
--- a/arch/arm/mach-socfpga/socfpga.c
+++ b/arch/arm/mach-socfpga/socfpga.c
@@ -14,7 +14,6 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/clk-provider.h>
#include <linux/irqchip.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -107,7 +106,6 @@ static void __init socfpga_cyclone5_init(void)
{
l2x0_of_init(0, ~0UL);
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
- of_clk_init(NULL);
socfpga_init_clocks();
}
diff --git a/arch/arm/mach-spear/Kconfig b/arch/arm/mach-spear/Kconfig
index df0d59afeb40..ac1710e64d9a 100644
--- a/arch/arm/mach-spear/Kconfig
+++ b/arch/arm/mach-spear/Kconfig
@@ -7,11 +7,9 @@ menuconfig PLAT_SPEAR
default PLAT_SPEAR_SINGLE
select ARCH_REQUIRE_GPIOLIB
select ARM_AMBA
- select CLKDEV_LOOKUP
select CLKSRC_MMIO
select COMMON_CLK
select GENERIC_CLOCKEVENTS
- select HAVE_CLK
if PLAT_SPEAR
diff --git a/arch/arm/mach-spear/include/mach/timex.h b/arch/arm/mach-spear/include/mach/timex.h
deleted file mode 100644
index ef95e5b780bd..000000000000
--- a/arch/arm/mach-spear/include/mach/timex.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/plat-spear/include/plat/timex.h
- *
- * SPEAr platform specific timex definitions
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __PLAT_TIMEX_H
-#define __PLAT_TIMEX_H
-
-#define CLOCK_TICK_RATE 48000000
-
-#endif /* __PLAT_TIMEX_H */
diff --git a/arch/arm/mach-sti/Kconfig b/arch/arm/mach-sti/Kconfig
index 835833e3c4f8..a67f83fd3f78 100644
--- a/arch/arm/mach-sti/Kconfig
+++ b/arch/arm/mach-sti/Kconfig
@@ -30,7 +30,7 @@ config SOC_STIH415
default y
help
This enables support for STMicroelectronics Digital Consumer
- Electronics family StiH415 parts, primarily targetted at set-top-box
+ Electronics family StiH415 parts, primarily targeted at set-top-box
and other digital audio/video applications using Flattned Device
Trees.
@@ -39,7 +39,7 @@ config SOC_STIH416
default y
help
This enables support for STMicroelectronics Digital Consumer
- Electronics family StiH416 parts, primarily targetted at set-top-box
+ Electronics family StiH416 parts, primarily targeted at set-top-box
and other digital audio/video applications using Flattened Device
Trees.
diff --git a/arch/arm/mach-sti/board-dt.c b/arch/arm/mach-sti/board-dt.c
index 8fe6f0c46480..1217fb598cfd 100644
--- a/arch/arm/mach-sti/board-dt.c
+++ b/arch/arm/mach-sti/board-dt.c
@@ -7,9 +7,8 @@
* published by the Free Software Foundation.
*/
-#include <linux/clk-provider.h>
-#include <linux/clocksource.h>
#include <linux/irq.h>
+#include <linux/of_platform.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/mach/arch.h>
@@ -28,11 +27,10 @@ void __init stih41x_l2x0_init(void)
l2x0_of_init(aux_ctrl, L2X0_AUX_CTRL_MASK);
}
-static void __init stih41x_timer_init(void)
+static void __init stih41x_machine_init(void)
{
- of_clk_init(NULL);
- clocksource_of_init();
stih41x_l2x0_init();
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
static const char *stih41x_dt_match[] __initdata = {
@@ -42,7 +40,7 @@ static const char *stih41x_dt_match[] __initdata = {
};
DT_MACHINE_START(STM, "STiH415/416 SoC with Flattened Device Tree")
- .init_time = stih41x_timer_init,
+ .init_machine = stih41x_machine_init,
.smp = smp_ops(sti_smp_ops),
.dt_compat = stih41x_dt_match,
MACHINE_END
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
index 3ab2f65f8a50..c9e72c89066a 100644
--- a/arch/arm/mach-sunxi/Kconfig
+++ b/arch/arm/mach-sunxi/Kconfig
@@ -1,14 +1,14 @@
config ARCH_SUNXI
bool "Allwinner A1X SOCs" if ARCH_MULTI_V7
select ARCH_REQUIRE_GPIOLIB
+ select ARM_GIC
select CLKSRC_MMIO
select CLKSRC_OF
select COMMON_CLK
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_CHIP
+ select HAVE_SMP
select PINCTRL
+ select PINCTRL_SUNXI
select SPARSE_IRQ
select SUN4I_TIMER
- select PINCTRL_SUNXI
- select ARM_GIC
- select HAVE_SMP
diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c
index e79fb3469341..61d3a387f01c 100644
--- a/arch/arm/mach-sunxi/sunxi.c
+++ b/arch/arm/mach-sunxi/sunxi.c
@@ -10,7 +10,6 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/clocksource.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -20,8 +19,6 @@
#include <linux/io.h>
#include <linux/reboot.h>
-#include <linux/clk/sunxi.h>
-
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/system_misc.h>
@@ -93,14 +90,13 @@ static void sun6i_restart(enum reboot_mode mode, const char *cmd)
}
static struct of_device_id sunxi_restart_ids[] = {
- { .compatible = "allwinner,sun4i-wdt", .data = sun4i_restart },
- { .compatible = "allwinner,sun6i-wdt", .data = sun6i_restart },
+ { .compatible = "allwinner,sun4i-wdt" },
+ { .compatible = "allwinner,sun6i-wdt" },
{ /*sentinel*/ }
};
static void sunxi_setup_restart(void)
{
- const struct of_device_id *of_id;
struct device_node *np;
np = of_find_matching_node(NULL, sunxi_restart_ids);
@@ -109,17 +105,6 @@ static void sunxi_setup_restart(void)
wdt_base = of_iomap(np, 0);
WARN(!wdt_base, "failed to map watchdog base address");
-
- of_id = of_match_node(sunxi_restart_ids, np);
- WARN(!of_id, "restart function not available");
-
- arm_pm_restart = of_id->data;
-}
-
-static void __init sunxi_timer_init(void)
-{
- sunxi_init_clocks();
- clocksource_of_init();
}
static void __init sunxi_dt_init(void)
@@ -133,13 +118,33 @@ static const char * const sunxi_board_dt_compat[] = {
"allwinner,sun4i-a10",
"allwinner,sun5i-a10s",
"allwinner,sun5i-a13",
- "allwinner,sun6i-a31",
- "allwinner,sun7i-a20",
NULL,
};
DT_MACHINE_START(SUNXI_DT, "Allwinner A1X (Device Tree)")
.init_machine = sunxi_dt_init,
- .init_time = sunxi_timer_init,
.dt_compat = sunxi_board_dt_compat,
+ .restart = sun4i_restart,
+MACHINE_END
+
+static const char * const sun6i_board_dt_compat[] = {
+ "allwinner,sun6i-a31",
+ NULL,
+};
+
+DT_MACHINE_START(SUN6I_DT, "Allwinner sun6i (A31) Family")
+ .init_machine = sunxi_dt_init,
+ .dt_compat = sun6i_board_dt_compat,
+ .restart = sun6i_restart,
+MACHINE_END
+
+static const char * const sun7i_board_dt_compat[] = {
+ "allwinner,sun7i-a20",
+ NULL,
+};
+
+DT_MACHINE_START(SUN7I_DT, "Allwinner sun7i (A20) Family")
+ .init_machine = sunxi_dt_init,
+ .dt_compat = sun7i_board_dt_compat,
+ .restart = sun4i_restart,
MACHINE_END
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index 67a76f2dfb9f..09e740f58b27 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -3,7 +3,6 @@ config ARCH_TEGRA
select ARCH_HAS_CPUFREQ
select ARCH_REQUIRE_GPIOLIB
select ARM_GIC
- select CLKDEV_LOOKUP
select CLKSRC_MMIO
select CLKSRC_OF
select COMMON_CLK
@@ -11,7 +10,6 @@ config ARCH_TEGRA
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
- select HAVE_CLK
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
select MIGHT_HAVE_PCI
@@ -53,14 +51,22 @@ config ARCH_TEGRA_3x_SOC
config ARCH_TEGRA_114_SOC
bool "Enable support for Tegra114 family"
- select HAVE_ARM_ARCH_TIMER
- select ARM_ERRATA_798181
+ select ARM_ERRATA_798181 if SMP
select ARM_L1_CACHE_SHIFT_6
+ select HAVE_ARM_ARCH_TIMER
select PINCTRL_TEGRA114
help
Support for NVIDIA Tegra T114 processor family, based on the
ARM CortexA15MP CPU
+config ARCH_TEGRA_124_SOC
+ bool "Enable support for Tegra124 family"
+ select ARM_L1_CACHE_SHIFT_6
+ select HAVE_ARM_ARCH_TIMER
+ help
+ Support for NVIDIA Tegra T124 processor family, based on the
+ ARM CortexA15MP CPU
+
config TEGRA_AHB
bool "Enable AHB driver for NVIDIA Tegra SoCs"
default y
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
index e7e5f45c6558..019bb1758662 100644
--- a/arch/arm/mach-tegra/Makefile
+++ b/arch/arm/mach-tegra/Makefile
@@ -1,6 +1,5 @@
asflags-y += -march=armv7-a
-obj-y += common.o
obj-y += io.o
obj-y += irq.o
obj-y += fuse.o
@@ -36,5 +35,10 @@ obj-$(CONFIG_ARCH_TEGRA_114_SOC) += pm-tegra30.o
ifeq ($(CONFIG_CPU_IDLE),y)
obj-$(CONFIG_ARCH_TEGRA_114_SOC) += cpuidle-tegra114.o
endif
+obj-$(CONFIG_ARCH_TEGRA_124_SOC) += sleep-tegra30.o
+obj-$(CONFIG_ARCH_TEGRA_124_SOC) += pm-tegra30.o
+ifeq ($(CONFIG_CPU_IDLE),y)
+obj-$(CONFIG_ARCH_TEGRA_124_SOC) += cpuidle-tegra114.o
+endif
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += board-paz00.o
diff --git a/arch/arm/mach-tegra/board-paz00.c b/arch/arm/mach-tegra/board-paz00.c
index 740e16f64728..06f024070dab 100644
--- a/arch/arm/mach-tegra/board-paz00.c
+++ b/arch/arm/mach-tegra/board-paz00.c
@@ -20,12 +20,11 @@
#include <linux/platform_device.h>
#include <linux/rfkill-gpio.h>
#include "board.h"
-#include "board-paz00.h"
static struct rfkill_gpio_platform_data wifi_rfkill_platform_data = {
.name = "wifi_rfkill",
- .reset_gpio = TEGRA_WIFI_RST,
- .shutdown_gpio = TEGRA_WIFI_PWRN,
+ .reset_gpio = 25, /* PD1 */
+ .shutdown_gpio = 85, /* PK5 */
.type = RFKILL_TYPE_WLAN,
};
diff --git a/arch/arm/mach-tegra/board.h b/arch/arm/mach-tegra/board.h
index db6810dc0b3d..bcf5dbf69d58 100644
--- a/arch/arm/mach-tegra/board.h
+++ b/arch/arm/mach-tegra/board.h
@@ -25,20 +25,8 @@
#include <linux/types.h>
#include <linux/reboot.h>
-void tegra_assert_system_reset(enum reboot_mode mode, const char *cmd);
-
-void __init tegra_init_early(void);
void __init tegra_map_common_io(void);
void __init tegra_init_irq(void);
-void __init tegra_dt_init_irq(void);
-
-void tegra_init_late(void);
-
-#ifdef CONFIG_DEBUG_FS
-int tegra_clk_debugfs_init(void);
-#else
-static inline int tegra_clk_debugfs_init(void) { return 0; }
-#endif
int __init tegra_powergate_init(void);
#if defined(CONFIG_ARCH_TEGRA_2x_SOC) && defined(CONFIG_DEBUG_FS)
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
deleted file mode 100644
index 94a119a35af8..000000000000
--- a/arch/arm/mach-tegra/common.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * arch/arm/mach-tegra/common.c
- *
- * Copyright (c) 2013 NVIDIA Corporation. All rights reserved.
- * Copyright (C) 2010 Google, Inc.
- *
- * Author:
- * Colin Cross <ccross@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/reboot.h>
-#include <linux/irqchip.h>
-#include <linux/clk-provider.h>
-
-#include <asm/hardware/cache-l2x0.h>
-
-#include "board.h"
-#include "common.h"
-#include "cpuidle.h"
-#include "fuse.h"
-#include "iomap.h"
-#include "irq.h"
-#include "pmc.h"
-#include "apbio.h"
-#include "sleep.h"
-#include "pm.h"
-#include "reset.h"
-
-/*
- * Storage for debug-macro.S's state.
- *
- * This must be in .data not .bss so that it gets initialized each time the
- * kernel is loaded. The data is declared here rather than debug-macro.S so
- * that multiple inclusions of debug-macro.S point at the same data.
- */
-u32 tegra_uart_config[4] = {
- /* Debug UART initialization required */
- 1,
- /* Debug UART physical address */
- 0,
- /* Debug UART virtual address */
- 0,
- /* Scratch space for debug macro */
- 0,
-};
-
-#ifdef CONFIG_OF
-void __init tegra_dt_init_irq(void)
-{
- of_clk_init(NULL);
- tegra_pmc_init();
- tegra_init_irq();
- irqchip_init();
- tegra_legacy_irq_syscore_init();
-}
-#endif
-
-void tegra_assert_system_reset(enum reboot_mode mode, const char *cmd)
-{
- void __iomem *reset = IO_ADDRESS(TEGRA_PMC_BASE + 0);
- u32 reg;
-
- reg = readl_relaxed(reset);
- reg |= 0x10;
- writel_relaxed(reg, reset);
-}
-
-static void __init tegra_init_cache(void)
-{
-#ifdef CONFIG_CACHE_L2X0
- int ret;
- void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
- u32 aux_ctrl, cache_type;
-
- cache_type = readl(p + L2X0_CACHE_TYPE);
- aux_ctrl = (cache_type & 0x700) << (17-8);
- aux_ctrl |= 0x7C400001;
-
- ret = l2x0_of_init(aux_ctrl, 0x8200c3fe);
- if (!ret)
- l2x0_saved_regs_addr = virt_to_phys(&l2x0_saved_regs);
-#endif
-
-}
-
-void __init tegra_init_early(void)
-{
- tegra_cpu_reset_handler_init();
- tegra_apb_io_init();
- tegra_init_fuse();
- tegra_init_cache();
- tegra_powergate_init();
- tegra_hotplug_init();
-}
-
-void __init tegra_init_late(void)
-{
- tegra_init_suspend();
- tegra_cpuidle_init();
- tegra_powergate_debugfs_init();
-}
diff --git a/arch/arm/mach-tegra/cpuidle.c b/arch/arm/mach-tegra/cpuidle.c
index 0961dfcf83a4..7bc5d8d667fe 100644
--- a/arch/arm/mach-tegra/cpuidle.c
+++ b/arch/arm/mach-tegra/cpuidle.c
@@ -39,7 +39,9 @@ void __init tegra_cpuidle_init(void)
tegra30_cpuidle_init();
break;
case TEGRA114:
- if (IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC))
+ case TEGRA124:
+ if (IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) ||
+ IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC))
tegra114_cpuidle_init();
break;
}
diff --git a/arch/arm/mach-tegra/flowctrl.c b/arch/arm/mach-tegra/flowctrl.c
index 5348543382bf..ce8ab8abf061 100644
--- a/arch/arm/mach-tegra/flowctrl.c
+++ b/arch/arm/mach-tegra/flowctrl.c
@@ -87,6 +87,7 @@ void flowctrl_cpu_suspend_enter(unsigned int cpuid)
break;
case TEGRA30:
case TEGRA114:
+ case TEGRA124:
/* clear wfe bitmap */
reg &= ~TEGRA30_FLOW_CTRL_CSR_WFE_BITMAP;
/* clear wfi bitmap */
@@ -125,6 +126,7 @@ void flowctrl_cpu_suspend_exit(unsigned int cpuid)
break;
case TEGRA30:
case TEGRA114:
+ case TEGRA124:
/* clear wfe bitmap */
reg &= ~TEGRA30_FLOW_CTRL_CSR_WFE_BITMAP;
/* clear wfi bitmap */
diff --git a/arch/arm/mach-tegra/fuse.c b/arch/arm/mach-tegra/fuse.c
index e035cd284a6e..9a4e910c3796 100644
--- a/arch/arm/mach-tegra/fuse.c
+++ b/arch/arm/mach-tegra/fuse.c
@@ -21,14 +21,26 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/export.h>
+#include <linux/random.h>
#include <linux/tegra-soc.h>
#include "fuse.h"
#include "iomap.h"
#include "apbio.h"
+/* Tegra20 only */
#define FUSE_UID_LOW 0x108
#define FUSE_UID_HIGH 0x10c
+
+/* Tegra30 and later */
+#define FUSE_VENDOR_CODE 0x200
+#define FUSE_FAB_CODE 0x204
+#define FUSE_LOT_CODE_0 0x208
+#define FUSE_LOT_CODE_1 0x20c
+#define FUSE_WAFER_ID 0x210
+#define FUSE_X_COORDINATE 0x214
+#define FUSE_Y_COORDINATE 0x218
+
#define FUSE_SKU_INFO 0x110
#define TEGRA20_FUSE_SPARE_BIT 0x200
@@ -112,21 +124,51 @@ u32 tegra_read_chipid(void)
return readl_relaxed(IO_ADDRESS(TEGRA_APB_MISC_BASE) + 0x804);
}
-void tegra_init_fuse(void)
+static void __init tegra20_fuse_init_randomness(void)
+{
+ u32 randomness[2];
+
+ randomness[0] = tegra_fuse_readl(FUSE_UID_LOW);
+ randomness[1] = tegra_fuse_readl(FUSE_UID_HIGH);
+
+ add_device_randomness(randomness, sizeof(randomness));
+}
+
+/* Applies to Tegra30 or later */
+static void __init tegra30_fuse_init_randomness(void)
+{
+ u32 randomness[7];
+
+ randomness[0] = tegra_fuse_readl(FUSE_VENDOR_CODE);
+ randomness[1] = tegra_fuse_readl(FUSE_FAB_CODE);
+ randomness[2] = tegra_fuse_readl(FUSE_LOT_CODE_0);
+ randomness[3] = tegra_fuse_readl(FUSE_LOT_CODE_1);
+ randomness[4] = tegra_fuse_readl(FUSE_WAFER_ID);
+ randomness[5] = tegra_fuse_readl(FUSE_X_COORDINATE);
+ randomness[6] = tegra_fuse_readl(FUSE_Y_COORDINATE);
+
+ add_device_randomness(randomness, sizeof(randomness));
+}
+
+void __init tegra_init_fuse(void)
{
u32 id;
+ u32 randomness[5];
u32 reg = readl(IO_ADDRESS(TEGRA_CLK_RESET_BASE + 0x48));
reg |= 1 << 28;
writel(reg, IO_ADDRESS(TEGRA_CLK_RESET_BASE + 0x48));
reg = tegra_fuse_readl(FUSE_SKU_INFO);
+ randomness[0] = reg;
tegra_sku_id = reg & 0xFF;
reg = tegra_apb_readl(TEGRA_APB_MISC_BASE + STRAP_OPT);
+ randomness[1] = reg;
tegra_bct_strapping = (reg & RAM_ID_MASK) >> RAM_CODE_SHIFT;
id = tegra_read_chipid();
+ randomness[2] = id;
tegra_chip_id = (id >> 8) & 0xff;
switch (tegra_chip_id) {
@@ -149,19 +191,21 @@ void tegra_init_fuse(void)
tegra_revision = tegra_get_revision(id);
tegra_init_speedo_data();
+ randomness[3] = (tegra_cpu_process_id << 16) | tegra_core_process_id;
+ randomness[4] = (tegra_cpu_speedo_id << 16) | tegra_soc_speedo_id;
+
+ add_device_randomness(randomness, sizeof(randomness));
+ switch (tegra_chip_id) {
+ case TEGRA20:
+ tegra20_fuse_init_randomness();
+ case TEGRA30:
+ case TEGRA114:
+ default:
+ tegra30_fuse_init_randomness();
+ }
pr_info("Tegra Revision: %s SKU: %d CPU Process: %d Core Process: %d\n",
tegra_revision_name[tegra_revision],
tegra_sku_id, tegra_cpu_process_id,
tegra_core_process_id);
}
-
-unsigned long long tegra_chip_uid(void)
-{
- unsigned long long lo, hi;
-
- lo = tegra_fuse_readl(FUSE_UID_LOW);
- hi = tegra_fuse_readl(FUSE_UID_HIGH);
- return (hi << 32ull) | lo;
-}
-EXPORT_SYMBOL(tegra_chip_uid);
diff --git a/arch/arm/mach-tegra/fuse.h b/arch/arm/mach-tegra/fuse.h
index def79683bef6..c01d04785d67 100644
--- a/arch/arm/mach-tegra/fuse.h
+++ b/arch/arm/mach-tegra/fuse.h
@@ -29,6 +29,7 @@
#define TEGRA20 0x20
#define TEGRA30 0x30
#define TEGRA114 0x35
+#define TEGRA124 0x40
#ifndef __ASSEMBLY__
enum tegra_revision {
diff --git a/arch/arm/mach-tegra/gpio-names.h b/arch/arm/mach-tegra/gpio-names.h
deleted file mode 100644
index f28220a641b2..000000000000
--- a/arch/arm/mach-tegra/gpio-names.h
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * arch/arm/mach-tegra/include/mach/gpio-names.h
- *
- * Copyright (c) 2010 Google, Inc
- *
- * Author:
- * Erik Gilling <konkers@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MACH_TEGRA_GPIO_NAMES_H
-#define __MACH_TEGRA_GPIO_NAMES_H
-
-#define TEGRA_GPIO_PA0 0
-#define TEGRA_GPIO_PA1 1
-#define TEGRA_GPIO_PA2 2
-#define TEGRA_GPIO_PA3 3
-#define TEGRA_GPIO_PA4 4
-#define TEGRA_GPIO_PA5 5
-#define TEGRA_GPIO_PA6 6
-#define TEGRA_GPIO_PA7 7
-#define TEGRA_GPIO_PB0 8
-#define TEGRA_GPIO_PB1 9
-#define TEGRA_GPIO_PB2 10
-#define TEGRA_GPIO_PB3 11
-#define TEGRA_GPIO_PB4 12
-#define TEGRA_GPIO_PB5 13
-#define TEGRA_GPIO_PB6 14
-#define TEGRA_GPIO_PB7 15
-#define TEGRA_GPIO_PC0 16
-#define TEGRA_GPIO_PC1 17
-#define TEGRA_GPIO_PC2 18
-#define TEGRA_GPIO_PC3 19
-#define TEGRA_GPIO_PC4 20
-#define TEGRA_GPIO_PC5 21
-#define TEGRA_GPIO_PC6 22
-#define TEGRA_GPIO_PC7 23
-#define TEGRA_GPIO_PD0 24
-#define TEGRA_GPIO_PD1 25
-#define TEGRA_GPIO_PD2 26
-#define TEGRA_GPIO_PD3 27
-#define TEGRA_GPIO_PD4 28
-#define TEGRA_GPIO_PD5 29
-#define TEGRA_GPIO_PD6 30
-#define TEGRA_GPIO_PD7 31
-#define TEGRA_GPIO_PE0 32
-#define TEGRA_GPIO_PE1 33
-#define TEGRA_GPIO_PE2 34
-#define TEGRA_GPIO_PE3 35
-#define TEGRA_GPIO_PE4 36
-#define TEGRA_GPIO_PE5 37
-#define TEGRA_GPIO_PE6 38
-#define TEGRA_GPIO_PE7 39
-#define TEGRA_GPIO_PF0 40
-#define TEGRA_GPIO_PF1 41
-#define TEGRA_GPIO_PF2 42
-#define TEGRA_GPIO_PF3 43
-#define TEGRA_GPIO_PF4 44
-#define TEGRA_GPIO_PF5 45
-#define TEGRA_GPIO_PF6 46
-#define TEGRA_GPIO_PF7 47
-#define TEGRA_GPIO_PG0 48
-#define TEGRA_GPIO_PG1 49
-#define TEGRA_GPIO_PG2 50
-#define TEGRA_GPIO_PG3 51
-#define TEGRA_GPIO_PG4 52
-#define TEGRA_GPIO_PG5 53
-#define TEGRA_GPIO_PG6 54
-#define TEGRA_GPIO_PG7 55
-#define TEGRA_GPIO_PH0 56
-#define TEGRA_GPIO_PH1 57
-#define TEGRA_GPIO_PH2 58
-#define TEGRA_GPIO_PH3 59
-#define TEGRA_GPIO_PH4 60
-#define TEGRA_GPIO_PH5 61
-#define TEGRA_GPIO_PH6 62
-#define TEGRA_GPIO_PH7 63
-#define TEGRA_GPIO_PI0 64
-#define TEGRA_GPIO_PI1 65
-#define TEGRA_GPIO_PI2 66
-#define TEGRA_GPIO_PI3 67
-#define TEGRA_GPIO_PI4 68
-#define TEGRA_GPIO_PI5 69
-#define TEGRA_GPIO_PI6 70
-#define TEGRA_GPIO_PI7 71
-#define TEGRA_GPIO_PJ0 72
-#define TEGRA_GPIO_PJ1 73
-#define TEGRA_GPIO_PJ2 74
-#define TEGRA_GPIO_PJ3 75
-#define TEGRA_GPIO_PJ4 76
-#define TEGRA_GPIO_PJ5 77
-#define TEGRA_GPIO_PJ6 78
-#define TEGRA_GPIO_PJ7 79
-#define TEGRA_GPIO_PK0 80
-#define TEGRA_GPIO_PK1 81
-#define TEGRA_GPIO_PK2 82
-#define TEGRA_GPIO_PK3 83
-#define TEGRA_GPIO_PK4 84
-#define TEGRA_GPIO_PK5 85
-#define TEGRA_GPIO_PK6 86
-#define TEGRA_GPIO_PK7 87
-#define TEGRA_GPIO_PL0 88
-#define TEGRA_GPIO_PL1 89
-#define TEGRA_GPIO_PL2 90
-#define TEGRA_GPIO_PL3 91
-#define TEGRA_GPIO_PL4 92
-#define TEGRA_GPIO_PL5 93
-#define TEGRA_GPIO_PL6 94
-#define TEGRA_GPIO_PL7 95
-#define TEGRA_GPIO_PM0 96
-#define TEGRA_GPIO_PM1 97
-#define TEGRA_GPIO_PM2 98
-#define TEGRA_GPIO_PM3 99
-#define TEGRA_GPIO_PM4 100
-#define TEGRA_GPIO_PM5 101
-#define TEGRA_GPIO_PM6 102
-#define TEGRA_GPIO_PM7 103
-#define TEGRA_GPIO_PN0 104
-#define TEGRA_GPIO_PN1 105
-#define TEGRA_GPIO_PN2 106
-#define TEGRA_GPIO_PN3 107
-#define TEGRA_GPIO_PN4 108
-#define TEGRA_GPIO_PN5 109
-#define TEGRA_GPIO_PN6 110
-#define TEGRA_GPIO_PN7 111
-#define TEGRA_GPIO_PO0 112
-#define TEGRA_GPIO_PO1 113
-#define TEGRA_GPIO_PO2 114
-#define TEGRA_GPIO_PO3 115
-#define TEGRA_GPIO_PO4 116
-#define TEGRA_GPIO_PO5 117
-#define TEGRA_GPIO_PO6 118
-#define TEGRA_GPIO_PO7 119
-#define TEGRA_GPIO_PP0 120
-#define TEGRA_GPIO_PP1 121
-#define TEGRA_GPIO_PP2 122
-#define TEGRA_GPIO_PP3 123
-#define TEGRA_GPIO_PP4 124
-#define TEGRA_GPIO_PP5 125
-#define TEGRA_GPIO_PP6 126
-#define TEGRA_GPIO_PP7 127
-#define TEGRA_GPIO_PQ0 128
-#define TEGRA_GPIO_PQ1 129
-#define TEGRA_GPIO_PQ2 130
-#define TEGRA_GPIO_PQ3 131
-#define TEGRA_GPIO_PQ4 132
-#define TEGRA_GPIO_PQ5 133
-#define TEGRA_GPIO_PQ6 134
-#define TEGRA_GPIO_PQ7 135
-#define TEGRA_GPIO_PR0 136
-#define TEGRA_GPIO_PR1 137
-#define TEGRA_GPIO_PR2 138
-#define TEGRA_GPIO_PR3 139
-#define TEGRA_GPIO_PR4 140
-#define TEGRA_GPIO_PR5 141
-#define TEGRA_GPIO_PR6 142
-#define TEGRA_GPIO_PR7 143
-#define TEGRA_GPIO_PS0 144
-#define TEGRA_GPIO_PS1 145
-#define TEGRA_GPIO_PS2 146
-#define TEGRA_GPIO_PS3 147
-#define TEGRA_GPIO_PS4 148
-#define TEGRA_GPIO_PS5 149
-#define TEGRA_GPIO_PS6 150
-#define TEGRA_GPIO_PS7 151
-#define TEGRA_GPIO_PT0 152
-#define TEGRA_GPIO_PT1 153
-#define TEGRA_GPIO_PT2 154
-#define TEGRA_GPIO_PT3 155
-#define TEGRA_GPIO_PT4 156
-#define TEGRA_GPIO_PT5 157
-#define TEGRA_GPIO_PT6 158
-#define TEGRA_GPIO_PT7 159
-#define TEGRA_GPIO_PU0 160
-#define TEGRA_GPIO_PU1 161
-#define TEGRA_GPIO_PU2 162
-#define TEGRA_GPIO_PU3 163
-#define TEGRA_GPIO_PU4 164
-#define TEGRA_GPIO_PU5 165
-#define TEGRA_GPIO_PU6 166
-#define TEGRA_GPIO_PU7 167
-#define TEGRA_GPIO_PV0 168
-#define TEGRA_GPIO_PV1 169
-#define TEGRA_GPIO_PV2 170
-#define TEGRA_GPIO_PV3 171
-#define TEGRA_GPIO_PV4 172
-#define TEGRA_GPIO_PV5 173
-#define TEGRA_GPIO_PV6 174
-#define TEGRA_GPIO_PV7 175
-#define TEGRA_GPIO_PW0 176
-#define TEGRA_GPIO_PW1 177
-#define TEGRA_GPIO_PW2 178
-#define TEGRA_GPIO_PW3 179
-#define TEGRA_GPIO_PW4 180
-#define TEGRA_GPIO_PW5 181
-#define TEGRA_GPIO_PW6 182
-#define TEGRA_GPIO_PW7 183
-#define TEGRA_GPIO_PX0 184
-#define TEGRA_GPIO_PX1 185
-#define TEGRA_GPIO_PX2 186
-#define TEGRA_GPIO_PX3 187
-#define TEGRA_GPIO_PX4 188
-#define TEGRA_GPIO_PX5 189
-#define TEGRA_GPIO_PX6 190
-#define TEGRA_GPIO_PX7 191
-#define TEGRA_GPIO_PY0 192
-#define TEGRA_GPIO_PY1 193
-#define TEGRA_GPIO_PY2 194
-#define TEGRA_GPIO_PY3 195
-#define TEGRA_GPIO_PY4 196
-#define TEGRA_GPIO_PY5 197
-#define TEGRA_GPIO_PY6 198
-#define TEGRA_GPIO_PY7 199
-#define TEGRA_GPIO_PZ0 200
-#define TEGRA_GPIO_PZ1 201
-#define TEGRA_GPIO_PZ2 202
-#define TEGRA_GPIO_PZ3 203
-#define TEGRA_GPIO_PZ4 204
-#define TEGRA_GPIO_PZ5 205
-#define TEGRA_GPIO_PZ6 206
-#define TEGRA_GPIO_PZ7 207
-#define TEGRA_GPIO_PAA0 208
-#define TEGRA_GPIO_PAA1 209
-#define TEGRA_GPIO_PAA2 210
-#define TEGRA_GPIO_PAA3 211
-#define TEGRA_GPIO_PAA4 212
-#define TEGRA_GPIO_PAA5 213
-#define TEGRA_GPIO_PAA6 214
-#define TEGRA_GPIO_PAA7 215
-#define TEGRA_GPIO_PBB0 216
-#define TEGRA_GPIO_PBB1 217
-#define TEGRA_GPIO_PBB2 218
-#define TEGRA_GPIO_PBB3 219
-#define TEGRA_GPIO_PBB4 220
-#define TEGRA_GPIO_PBB5 221
-#define TEGRA_GPIO_PBB6 222
-#define TEGRA_GPIO_PBB7 223
-
-#endif
diff --git a/arch/arm/mach-tegra/hotplug.c b/arch/arm/mach-tegra/hotplug.c
index 04de2e860923..ff26af26bd0c 100644
--- a/arch/arm/mach-tegra/hotplug.c
+++ b/arch/arm/mach-tegra/hotplug.c
@@ -57,4 +57,6 @@ void __init tegra_hotplug_init(void)
tegra_hotplug_shutdown = tegra30_hotplug_shutdown;
if (IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) && tegra_chip_id == TEGRA114)
tegra_hotplug_shutdown = tegra30_hotplug_shutdown;
+ if (IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) && tegra_chip_id == TEGRA124)
+ tegra_hotplug_shutdown = tegra30_hotplug_shutdown;
}
diff --git a/arch/arm/mach-tegra/iomap.h b/arch/arm/mach-tegra/iomap.h
index 3f5fa0749bde..26b1c2ad0ceb 100644
--- a/arch/arm/mach-tegra/iomap.h
+++ b/arch/arm/mach-tegra/iomap.h
@@ -24,44 +24,12 @@
#define TEGRA_IRAM_BASE 0x40000000
#define TEGRA_IRAM_SIZE SZ_256K
-#define TEGRA_IRAM_CODE_AREA (TEGRA_IRAM_BASE + SZ_4K)
-
-#define TEGRA_HOST1X_BASE 0x50000000
-#define TEGRA_HOST1X_SIZE 0x24000
-
#define TEGRA_ARM_PERIF_BASE 0x50040000
#define TEGRA_ARM_PERIF_SIZE SZ_8K
-#define TEGRA_ARM_PL310_BASE 0x50043000
-#define TEGRA_ARM_PL310_SIZE SZ_4K
-
#define TEGRA_ARM_INT_DIST_BASE 0x50041000
#define TEGRA_ARM_INT_DIST_SIZE SZ_4K
-#define TEGRA_MPE_BASE 0x54040000
-#define TEGRA_MPE_SIZE SZ_256K
-
-#define TEGRA_VI_BASE 0x54080000
-#define TEGRA_VI_SIZE SZ_256K
-
-#define TEGRA_ISP_BASE 0x54100000
-#define TEGRA_ISP_SIZE SZ_256K
-
-#define TEGRA_DISPLAY_BASE 0x54200000
-#define TEGRA_DISPLAY_SIZE SZ_256K
-
-#define TEGRA_DISPLAY2_BASE 0x54240000
-#define TEGRA_DISPLAY2_SIZE SZ_256K
-
-#define TEGRA_HDMI_BASE 0x54280000
-#define TEGRA_HDMI_SIZE SZ_256K
-
-#define TEGRA_GART_BASE 0x58000000
-#define TEGRA_GART_SIZE SZ_32M
-
-#define TEGRA_RES_SEMA_BASE 0x60001000
-#define TEGRA_RES_SEMA_SIZE SZ_4K
-
#define TEGRA_PRIMARY_ICTLR_BASE 0x60004000
#define TEGRA_PRIMARY_ICTLR_SIZE SZ_64
@@ -98,51 +66,15 @@
#define TEGRA_FLOW_CTRL_BASE 0x60007000
#define TEGRA_FLOW_CTRL_SIZE 20
-#define TEGRA_AHB_DMA_BASE 0x60008000
-#define TEGRA_AHB_DMA_SIZE SZ_4K
-
-#define TEGRA_AHB_DMA_CH0_BASE 0x60009000
-#define TEGRA_AHB_DMA_CH0_SIZE 32
-
-#define TEGRA_APB_DMA_BASE 0x6000A000
-#define TEGRA_APB_DMA_SIZE SZ_4K
-
-#define TEGRA_APB_DMA_CH0_BASE 0x6000B000
-#define TEGRA_APB_DMA_CH0_SIZE 32
-
-#define TEGRA_AHB_GIZMO_BASE 0x6000C004
-#define TEGRA_AHB_GIZMO_SIZE 0x10C
-
#define TEGRA_SB_BASE 0x6000C200
#define TEGRA_SB_SIZE 256
-#define TEGRA_STATMON_BASE 0x6000C400
-#define TEGRA_STATMON_SIZE SZ_1K
-
-#define TEGRA_GPIO_BASE 0x6000D000
-#define TEGRA_GPIO_SIZE SZ_4K
-
#define TEGRA_EXCEPTION_VECTORS_BASE 0x6000F000
#define TEGRA_EXCEPTION_VECTORS_SIZE SZ_4K
#define TEGRA_APB_MISC_BASE 0x70000000
#define TEGRA_APB_MISC_SIZE SZ_4K
-#define TEGRA_APB_MISC_DAS_BASE 0x70000c00
-#define TEGRA_APB_MISC_DAS_SIZE SZ_128
-
-#define TEGRA_AC97_BASE 0x70002000
-#define TEGRA_AC97_SIZE SZ_512
-
-#define TEGRA_SPDIF_BASE 0x70002400
-#define TEGRA_SPDIF_SIZE SZ_512
-
-#define TEGRA_I2S1_BASE 0x70002800
-#define TEGRA_I2S1_SIZE SZ_256
-
-#define TEGRA_I2S2_BASE 0x70002A00
-#define TEGRA_I2S2_SIZE SZ_256
-
#define TEGRA_UARTA_BASE 0x70006000
#define TEGRA_UARTA_SIZE SZ_64
@@ -158,108 +90,27 @@
#define TEGRA_UARTE_BASE 0x70006400
#define TEGRA_UARTE_SIZE SZ_256
-#define TEGRA_NAND_BASE 0x70008000
-#define TEGRA_NAND_SIZE SZ_256
-
-#define TEGRA_HSMMC_BASE 0x70008500
-#define TEGRA_HSMMC_SIZE SZ_256
-
-#define TEGRA_SNOR_BASE 0x70009000
-#define TEGRA_SNOR_SIZE SZ_4K
-
-#define TEGRA_PWFM_BASE 0x7000A000
-#define TEGRA_PWFM_SIZE SZ_256
-
-#define TEGRA_PWFM0_BASE 0x7000A000
-#define TEGRA_PWFM0_SIZE 4
-
-#define TEGRA_PWFM1_BASE 0x7000A010
-#define TEGRA_PWFM1_SIZE 4
-
-#define TEGRA_PWFM2_BASE 0x7000A020
-#define TEGRA_PWFM2_SIZE 4
-
-#define TEGRA_PWFM3_BASE 0x7000A030
-#define TEGRA_PWFM3_SIZE 4
-
-#define TEGRA_MIPI_BASE 0x7000B000
-#define TEGRA_MIPI_SIZE SZ_256
-
-#define TEGRA_I2C_BASE 0x7000C000
-#define TEGRA_I2C_SIZE SZ_256
-
-#define TEGRA_TWC_BASE 0x7000C100
-#define TEGRA_TWC_SIZE SZ_256
-
-#define TEGRA_SPI_BASE 0x7000C380
-#define TEGRA_SPI_SIZE 48
-
-#define TEGRA_I2C2_BASE 0x7000C400
-#define TEGRA_I2C2_SIZE SZ_256
-
-#define TEGRA_I2C3_BASE 0x7000C500
-#define TEGRA_I2C3_SIZE SZ_256
-
-#define TEGRA_OWR_BASE 0x7000C600
-#define TEGRA_OWR_SIZE 80
-
-#define TEGRA_DVC_BASE 0x7000D000
-#define TEGRA_DVC_SIZE SZ_512
-
-#define TEGRA_SPI1_BASE 0x7000D400
-#define TEGRA_SPI1_SIZE SZ_512
-
-#define TEGRA_SPI2_BASE 0x7000D600
-#define TEGRA_SPI2_SIZE SZ_512
-
-#define TEGRA_SPI3_BASE 0x7000D800
-#define TEGRA_SPI3_SIZE SZ_512
-
-#define TEGRA_SPI4_BASE 0x7000DA00
-#define TEGRA_SPI4_SIZE SZ_512
-
-#define TEGRA_RTC_BASE 0x7000E000
-#define TEGRA_RTC_SIZE SZ_256
-
-#define TEGRA_KBC_BASE 0x7000E200
-#define TEGRA_KBC_SIZE SZ_256
-
#define TEGRA_PMC_BASE 0x7000E400
#define TEGRA_PMC_SIZE SZ_256
-#define TEGRA_MC_BASE 0x7000F000
-#define TEGRA_MC_SIZE SZ_1K
-
#define TEGRA_EMC_BASE 0x7000F400
#define TEGRA_EMC_SIZE SZ_1K
#define TEGRA_FUSE_BASE 0x7000F800
#define TEGRA_FUSE_SIZE SZ_1K
-#define TEGRA_KFUSE_BASE 0x7000FC00
-#define TEGRA_KFUSE_SIZE SZ_1K
-
#define TEGRA_EMC0_BASE 0x7001A000
#define TEGRA_EMC0_SIZE SZ_2K
#define TEGRA_EMC1_BASE 0x7001A800
#define TEGRA_EMC1_SIZE SZ_2K
+#define TEGRA124_EMC_BASE 0x7001B000
+#define TEGRA124_EMC_SIZE SZ_2K
+
#define TEGRA_CSITE_BASE 0x70040000
#define TEGRA_CSITE_SIZE SZ_256K
-#define TEGRA_SDMMC1_BASE 0xC8000000
-#define TEGRA_SDMMC1_SIZE SZ_512
-
-#define TEGRA_SDMMC2_BASE 0xC8000200
-#define TEGRA_SDMMC2_SIZE SZ_512
-
-#define TEGRA_SDMMC3_BASE 0xC8000400
-#define TEGRA_SDMMC3_SIZE SZ_512
-
-#define TEGRA_SDMMC4_BASE 0xC8000600
-#define TEGRA_SDMMC4_SIZE SZ_512
-
/* On TEGRA, many peripherals are very closely packed in
* two 256MB io windows (that actually only use about 64KB
* at the start of each).
diff --git a/arch/arm/mach-tegra/irammap.h b/arch/arm/mach-tegra/irammap.h
index 501952a84344..e32e1742c9a1 100644
--- a/arch/arm/mach-tegra/irammap.h
+++ b/arch/arm/mach-tegra/irammap.h
@@ -23,4 +23,10 @@
#define TEGRA_IRAM_RESET_HANDLER_OFFSET 0
#define TEGRA_IRAM_RESET_HANDLER_SIZE SZ_1K
+/*
+ * This area is used for LPx resume vector, only while LPx power state is
+ * active. At other times, the AVP may use this area for arbitrary purposes
+ */
+#define TEGRA_IRAM_LPx_RESUME_AREA (TEGRA_IRAM_BASE + SZ_4K)
+
#endif
diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c
index 2d0203627fbb..eb72ae709124 100644
--- a/arch/arm/mach-tegra/platsmp.c
+++ b/arch/arm/mach-tegra/platsmp.c
@@ -176,6 +176,8 @@ static int tegra_boot_secondary(unsigned int cpu,
return tegra30_boot_secondary(cpu, idle);
if (IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) && tegra_chip_id == TEGRA114)
return tegra114_boot_secondary(cpu, idle);
+ if (IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) && tegra_chip_id == TEGRA124)
+ return tegra114_boot_secondary(cpu, idle);
return -EINVAL;
}
diff --git a/arch/arm/mach-tegra/pm.c b/arch/arm/mach-tegra/pm.c
index ed294a04e1d3..4ae0286b468d 100644
--- a/arch/arm/mach-tegra/pm.c
+++ b/arch/arm/mach-tegra/pm.c
@@ -59,8 +59,10 @@ static void tegra_tear_down_cpu_init(void)
break;
case TEGRA30:
case TEGRA114:
+ case TEGRA124:
if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) ||
- IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC))
+ IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) ||
+ IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC))
tegra_tear_down_cpu = tegra30_tear_down_cpu;
break;
}
@@ -216,8 +218,10 @@ static bool tegra_lp1_iram_hook(void)
break;
case TEGRA30:
case TEGRA114:
+ case TEGRA124:
if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) ||
- IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC))
+ IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) ||
+ IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC))
tegra30_lp1_iram_hook();
break;
default:
@@ -244,8 +248,10 @@ static bool tegra_sleep_core_init(void)
break;
case TEGRA30:
case TEGRA114:
+ case TEGRA124:
if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) ||
- IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC))
+ IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) ||
+ IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC))
tegra30_sleep_core_init();
break;
default:
@@ -263,10 +269,10 @@ static void tegra_suspend_enter_lp1(void)
tegra_pmc_suspend();
/* copy the reset vector & SDRAM shutdown code into IRAM */
- memcpy(iram_save_addr, IO_ADDRESS(TEGRA_IRAM_CODE_AREA),
- iram_save_size);
- memcpy(IO_ADDRESS(TEGRA_IRAM_CODE_AREA), tegra_lp1_iram.start_addr,
+ memcpy(iram_save_addr, IO_ADDRESS(TEGRA_IRAM_LPx_RESUME_AREA),
iram_save_size);
+ memcpy(IO_ADDRESS(TEGRA_IRAM_LPx_RESUME_AREA),
+ tegra_lp1_iram.start_addr, iram_save_size);
*((u32 *)tegra_cpu_lp1_mask) = 1;
}
@@ -276,7 +282,7 @@ static void tegra_suspend_exit_lp1(void)
tegra_pmc_resume();
/* restore IRAM */
- memcpy(IO_ADDRESS(TEGRA_IRAM_CODE_AREA), iram_save_addr,
+ memcpy(IO_ADDRESS(TEGRA_IRAM_LPx_RESUME_AREA), iram_save_addr,
iram_save_size);
*(u32 *)tegra_cpu_lp1_mask = 0;
diff --git a/arch/arm/mach-tegra/pm.h b/arch/arm/mach-tegra/pm.h
index fe204e5256e7..6e92a7c2ecbd 100644
--- a/arch/arm/mach-tegra/pm.h
+++ b/arch/arm/mach-tegra/pm.h
@@ -37,9 +37,6 @@ void tegra30_sleep_core_init(void);
extern unsigned long l2x0_saved_regs_addr;
-void save_cpu_arch_register(void);
-void restore_cpu_arch_register(void);
-
void tegra_clear_cpu_in_lp2(void);
bool tegra_set_cpu_in_lp2(void);
diff --git a/arch/arm/mach-tegra/pmc.c b/arch/arm/mach-tegra/pmc.c
index 8acb881f7cfe..fb7920201ab4 100644
--- a/arch/arm/mach-tegra/pmc.c
+++ b/arch/arm/mach-tegra/pmc.c
@@ -20,6 +20,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/tegra-powergate.h>
#include "flowctrl.h"
#include "fuse.h"
@@ -43,12 +44,6 @@
#define PMC_CPUPWRGOOD_TIMER 0xc8
#define PMC_CPUPWROFF_TIMER 0xcc
-#define TEGRA_POWERGATE_PCIE 3
-#define TEGRA_POWERGATE_VDEC 4
-#define TEGRA_POWERGATE_CPU1 9
-#define TEGRA_POWERGATE_CPU2 10
-#define TEGRA_POWERGATE_CPU3 11
-
static u8 tegra_cpu_domains[] = {
0xFF, /* not available for CPU0 */
TEGRA_POWERGATE_CPU1,
@@ -166,6 +161,15 @@ int tegra_pmc_cpu_remove_clamping(int cpuid)
return tegra_pmc_powergate_remove_clamping(id);
}
+void tegra_pmc_restart(enum reboot_mode mode, const char *cmd)
+{
+ u32 val;
+
+ val = tegra_pmc_readl(0);
+ val |= 0x10;
+ tegra_pmc_writel(val, 0);
+}
+
#ifdef CONFIG_PM_SLEEP
static void set_power_timers(u32 us_on, u32 us_off, unsigned long rate)
{
@@ -279,19 +283,17 @@ void tegra_pmc_suspend_init(void)
#endif
static const struct of_device_id matches[] __initconst = {
+ { .compatible = "nvidia,tegra124-pmc" },
{ .compatible = "nvidia,tegra114-pmc" },
{ .compatible = "nvidia,tegra30-pmc" },
{ .compatible = "nvidia,tegra20-pmc" },
{ }
};
-static void __init tegra_pmc_parse_dt(void)
+void __init tegra_pmc_init_irq(void)
{
struct device_node *np;
- u32 prop;
- enum tegra_suspend_mode suspend_mode;
- u32 core_good_time[2] = {0, 0};
- u32 lp0_vec[2] = {0, 0};
+ u32 val;
np = of_find_matching_node(NULL, matches);
BUG_ON(!np);
@@ -300,6 +302,26 @@ static void __init tegra_pmc_parse_dt(void)
tegra_pmc_invert_interrupt = of_property_read_bool(np,
"nvidia,invert-interrupt");
+
+ val = tegra_pmc_readl(PMC_CTRL);
+ if (tegra_pmc_invert_interrupt)
+ val |= PMC_CTRL_INTR_LOW;
+ else
+ val &= ~PMC_CTRL_INTR_LOW;
+ tegra_pmc_writel(val, PMC_CTRL);
+}
+
+void __init tegra_pmc_init(void)
+{
+ struct device_node *np;
+ u32 prop;
+ enum tegra_suspend_mode suspend_mode;
+ u32 core_good_time[2] = {0, 0};
+ u32 lp0_vec[2] = {0, 0};
+
+ np = of_find_matching_node(NULL, matches);
+ BUG_ON(!np);
+
tegra_pclk = of_clk_get_by_name(np, "pclk");
WARN_ON(IS_ERR(tegra_pclk));
@@ -365,17 +387,3 @@ static void __init tegra_pmc_parse_dt(void)
pmc_pm_data.suspend_mode = suspend_mode;
}
-
-void __init tegra_pmc_init(void)
-{
- u32 val;
-
- tegra_pmc_parse_dt();
-
- val = tegra_pmc_readl(PMC_CTRL);
- if (tegra_pmc_invert_interrupt)
- val |= PMC_CTRL_INTR_LOW;
- else
- val &= ~PMC_CTRL_INTR_LOW;
- tegra_pmc_writel(val, PMC_CTRL);
-}
diff --git a/arch/arm/mach-tegra/pmc.h b/arch/arm/mach-tegra/pmc.h
index 549f8c7b762c..59e19c344298 100644
--- a/arch/arm/mach-tegra/pmc.h
+++ b/arch/arm/mach-tegra/pmc.h
@@ -18,6 +18,8 @@
#ifndef __MACH_TEGRA_PMC_H
#define __MACH_TEGRA_PMC_H
+#include <linux/reboot.h>
+
enum tegra_suspend_mode {
TEGRA_SUSPEND_NONE = 0,
TEGRA_SUSPEND_LP2, /* CPU voltage off */
@@ -39,6 +41,9 @@ bool tegra_pmc_cpu_is_powered(int cpuid);
int tegra_pmc_cpu_power_on(int cpuid);
int tegra_pmc_cpu_remove_clamping(int cpuid);
+void tegra_pmc_restart(enum reboot_mode mode, const char *cmd);
+
+void tegra_pmc_init_irq(void);
void tegra_pmc_init(void);
#endif
diff --git a/arch/arm/mach-tegra/powergate.c b/arch/arm/mach-tegra/powergate.c
index f076f0f80fcd..85d28e756bb7 100644
--- a/arch/arm/mach-tegra/powergate.c
+++ b/arch/arm/mach-tegra/powergate.c
@@ -42,8 +42,16 @@
static int tegra_num_powerdomains;
static int tegra_num_cpu_domains;
-static u8 *tegra_cpu_domains;
-static u8 tegra30_cpu_domains[] = {
+static const u8 *tegra_cpu_domains;
+
+static const u8 tegra30_cpu_domains[] = {
+ TEGRA_POWERGATE_CPU,
+ TEGRA_POWERGATE_CPU1,
+ TEGRA_POWERGATE_CPU2,
+ TEGRA_POWERGATE_CPU3,
+};
+
+static const u8 tegra114_cpu_domains[] = {
TEGRA_POWERGATE_CPU0,
TEGRA_POWERGATE_CPU1,
TEGRA_POWERGATE_CPU2,
@@ -189,6 +197,11 @@ int __init tegra_powergate_init(void)
tegra_num_cpu_domains = 4;
tegra_cpu_domains = tegra30_cpu_domains;
break;
+ case TEGRA114:
+ tegra_num_powerdomains = 23;
+ tegra_num_cpu_domains = 4;
+ tegra_cpu_domains = tegra114_cpu_domains;
+ break;
default:
/* Unknown Tegra variant. Disable powergating */
tegra_num_powerdomains = 0;
@@ -229,6 +242,27 @@ static const char * const powergate_name_t30[] = {
[TEGRA_POWERGATE_3D1] = "3d1",
};
+static const char * const powergate_name_t114[] = {
+ [TEGRA_POWERGATE_CPU] = "cpu0",
+ [TEGRA_POWERGATE_3D] = "3d",
+ [TEGRA_POWERGATE_VENC] = "venc",
+ [TEGRA_POWERGATE_VDEC] = "vdec",
+ [TEGRA_POWERGATE_MPE] = "mpe",
+ [TEGRA_POWERGATE_HEG] = "heg",
+ [TEGRA_POWERGATE_CPU1] = "cpu1",
+ [TEGRA_POWERGATE_CPU2] = "cpu2",
+ [TEGRA_POWERGATE_CPU3] = "cpu3",
+ [TEGRA_POWERGATE_CELP] = "celp",
+ [TEGRA_POWERGATE_CPU0] = "cpu0",
+ [TEGRA_POWERGATE_C0NC] = "c0nc",
+ [TEGRA_POWERGATE_C1NC] = "c1nc",
+ [TEGRA_POWERGATE_DIS] = "dis",
+ [TEGRA_POWERGATE_DISB] = "disb",
+ [TEGRA_POWERGATE_XUSBA] = "xusba",
+ [TEGRA_POWERGATE_XUSBB] = "xusbb",
+ [TEGRA_POWERGATE_XUSBC] = "xusbc",
+};
+
static int powergate_show(struct seq_file *s, void *data)
{
int i;
@@ -236,9 +270,14 @@ static int powergate_show(struct seq_file *s, void *data)
seq_printf(s, " powergate powered\n");
seq_printf(s, "------------------\n");
- for (i = 0; i < tegra_num_powerdomains; i++)
+ for (i = 0; i < tegra_num_powerdomains; i++) {
+ if (!powergate_name[i])
+ continue;
+
seq_printf(s, " %9s %7s\n", powergate_name[i],
tegra_powergate_is_powered(i) ? "yes" : "no");
+ }
+
return 0;
}
@@ -265,6 +304,9 @@ int __init tegra_powergate_debugfs_init(void)
case TEGRA30:
powergate_name = powergate_name_t30;
break;
+ case TEGRA114:
+ powergate_name = powergate_name_t114;
+ break;
}
if (powergate_name) {
diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S
index f527b2c2dea7..8c1ba4fea384 100644
--- a/arch/arm/mach-tegra/reset-handler.S
+++ b/arch/arm/mach-tegra/reset-handler.S
@@ -45,17 +45,11 @@
ENTRY(tegra_resume)
check_cpu_part_num 0xc09, r8, r9
bleq v7_invalidate_l1
- blne tegra_init_l2_for_a15
cpu_id r0
- tegra_get_soc_id TEGRA_APB_MISC_BASE, r6
- cmp r6, #TEGRA114
- beq no_cpu0_chk
-
cmp r0, #0 @ CPU0?
THUMB( it ne )
bne cpu_resume @ no
-no_cpu0_chk:
/* Are we on Tegra20? */
cmp r6, #TEGRA20
@@ -75,7 +69,7 @@ no_cpu0_chk:
mov32 r9, 0xc09
cmp r8, r9
- bne not_ca9
+ bne end_ca9_scu_l2_resume
#ifdef CONFIG_HAVE_ARM_SCU
/* enable SCU */
mov32 r0, TEGRA_ARM_PERIF_BASE
@@ -86,7 +80,10 @@ no_cpu0_chk:
/* L2 cache resume & re-enable */
l2_cache_resume r0, r1, r2, l2x0_saved_regs_addr
-not_ca9:
+end_ca9_scu_l2_resume:
+ mov32 r9, 0xc0f
+ cmp r8, r9
+ bleq tegra_init_l2_for_a15
b cpu_resume
ENDPROC(tegra_resume)
diff --git a/arch/arm/mach-tegra/reset.c b/arch/arm/mach-tegra/reset.c
index fd0bbf8a6c94..568f5bbf979d 100644
--- a/arch/arm/mach-tegra/reset.c
+++ b/arch/arm/mach-tegra/reset.c
@@ -82,7 +82,7 @@ void __init tegra_cpu_reset_handler_init(void)
#ifdef CONFIG_PM_SLEEP
__tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP1] =
- TEGRA_IRAM_CODE_AREA;
+ TEGRA_IRAM_LPx_RESUME_AREA;
__tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP2] =
virt_to_phys((void *)tegra_resume);
#endif
diff --git a/arch/arm/mach-tegra/sleep-tegra20.S b/arch/arm/mach-tegra/sleep-tegra20.S
index 5c3bd11c9838..aaaf3abd2688 100644
--- a/arch/arm/mach-tegra/sleep-tegra20.S
+++ b/arch/arm/mach-tegra/sleep-tegra20.S
@@ -25,6 +25,7 @@
#include <asm/cp15.h>
#include <asm/cache.h>
+#include "irammap.h"
#include "sleep.h"
#include "flowctrl.h"
@@ -235,7 +236,7 @@ ENTRY(tegra20_sleep_core_finish)
mov32 r0, tegra20_tear_down_core
mov32 r1, tegra20_iram_start
sub r0, r0, r1
- mov32 r1, TEGRA_IRAM_CODE_AREA
+ mov32 r1, TEGRA_IRAM_LPx_RESUME_AREA
add r0, r0, r1
mov pc, r3
@@ -328,7 +329,7 @@ tegra20_iram_start:
* The physical address of tegra_resume expected to be stored in
* PMC_SCRATCH41.
*
- * NOTE: THIS *MUST* BE RELOCATED TO TEGRA_IRAM_CODE_AREA.
+ * NOTE: THIS *MUST* BE RELOCATED TO TEGRA_IRAM_LPx_RESUME_AREA.
*/
ENTRY(tegra20_lp1_reset)
/*
diff --git a/arch/arm/mach-tegra/sleep-tegra30.S b/arch/arm/mach-tegra/sleep-tegra30.S
index 63fa91b5fafb..b16d4a57fa59 100644
--- a/arch/arm/mach-tegra/sleep-tegra30.S
+++ b/arch/arm/mach-tegra/sleep-tegra30.S
@@ -20,6 +20,7 @@
#include <asm/asm-offsets.h>
#include <asm/cache.h>
+#include "irammap.h"
#include "fuse.h"
#include "sleep.h"
#include "flowctrl.h"
@@ -262,7 +263,7 @@ ENTRY(tegra30_sleep_core_finish)
mov32 r0, tegra30_tear_down_core
mov32 r1, tegra30_iram_start
sub r0, r0, r1
- mov32 r1, TEGRA_IRAM_CODE_AREA
+ mov32 r1, TEGRA_IRAM_LPx_RESUME_AREA
add r0, r0, r1
mov pc, r3
@@ -314,7 +315,7 @@ tegra30_iram_start:
* The physical address of tegra_resume expected to be stored in
* PMC_SCRATCH41.
*
- * NOTE: THIS *MUST* BE RELOCATED TO TEGRA_IRAM_CODE_AREA.
+ * NOTE: THIS *MUST* BE RELOCATED TO TEGRA_IRAM_LPx_RESUME_AREA.
*/
ENTRY(tegra30_lp1_reset)
/*
@@ -382,7 +383,7 @@ _pll_m_c_x_done:
add r1, r1, #LOCK_DELAY
wait_until r1, r7, r3
- adr r5, tegra30_sdram_pad_save
+ adr r5, tegra_sdram_pad_save
ldr r4, [r5, #0x18] @ restore CLK_SOURCE_MSELECT
str r4, [r0, #CLK_RESET_CLK_SOURCE_MSELECT]
@@ -407,8 +408,12 @@ _pll_m_c_x_done:
cmp r10, #TEGRA30
movweq r0, #:lower16:TEGRA_EMC_BASE @ r0 reserved for emc base
movteq r0, #:upper16:TEGRA_EMC_BASE
- movwne r0, #:lower16:TEGRA_EMC0_BASE
- movtne r0, #:upper16:TEGRA_EMC0_BASE
+ cmp r10, #TEGRA114
+ movweq r0, #:lower16:TEGRA_EMC0_BASE
+ movteq r0, #:upper16:TEGRA_EMC0_BASE
+ cmp r10, #TEGRA124
+ movweq r0, #:lower16:TEGRA124_EMC_BASE
+ movteq r0, #:upper16:TEGRA124_EMC_BASE
exit_self_refresh:
ldr r1, [r5, #0xC] @ restore EMC_XM2VTTGENPADCTRL
@@ -537,6 +542,7 @@ tegra30_sdram_pad_address:
.word TEGRA_PMC_BASE + PMC_IO_DPD_STATUS @0x14
.word TEGRA_CLK_RESET_BASE + CLK_RESET_CLK_SOURCE_MSELECT @0x18
.word TEGRA_CLK_RESET_BASE + CLK_RESET_SCLK_BURST @0x1c
+tegra30_sdram_pad_address_end:
tegra114_sdram_pad_address:
.word TEGRA_EMC0_BASE + EMC_CFG @0x0
@@ -552,16 +558,28 @@ tegra114_sdram_pad_address:
.word TEGRA_EMC1_BASE + EMC_AUTO_CAL_INTERVAL @0x28
.word TEGRA_EMC1_BASE + EMC_XM2VTTGENPADCTRL @0x2c
.word TEGRA_EMC1_BASE + EMC_XM2VTTGENPADCTRL2 @0x30
+tegra114_sdram_pad_adress_end:
+
+tegra124_sdram_pad_address:
+ .word TEGRA124_EMC_BASE + EMC_CFG @0x0
+ .word TEGRA124_EMC_BASE + EMC_ZCAL_INTERVAL @0x4
+ .word TEGRA124_EMC_BASE + EMC_AUTO_CAL_INTERVAL @0x8
+ .word TEGRA124_EMC_BASE + EMC_XM2VTTGENPADCTRL @0xc
+ .word TEGRA124_EMC_BASE + EMC_XM2VTTGENPADCTRL2 @0x10
+ .word TEGRA_PMC_BASE + PMC_IO_DPD_STATUS @0x14
+ .word TEGRA_CLK_RESET_BASE + CLK_RESET_CLK_SOURCE_MSELECT @0x18
+ .word TEGRA_CLK_RESET_BASE + CLK_RESET_SCLK_BURST @0x1c
+tegra124_sdram_pad_address_end:
tegra30_sdram_pad_size:
- .word tegra114_sdram_pad_address - tegra30_sdram_pad_address
+ .word tegra30_sdram_pad_address_end - tegra30_sdram_pad_address
tegra114_sdram_pad_size:
- .word tegra30_sdram_pad_size - tegra114_sdram_pad_address
+ .word tegra114_sdram_pad_adress_end - tegra114_sdram_pad_address
- .type tegra30_sdram_pad_save, %object
-tegra30_sdram_pad_save:
- .rept (tegra30_sdram_pad_size - tegra114_sdram_pad_address) / 4
+ .type tegra_sdram_pad_save, %object
+tegra_sdram_pad_save:
+ .rept (tegra114_sdram_pad_adress_end - tegra114_sdram_pad_address) / 4
.long 0
.endr
@@ -692,13 +710,18 @@ halted:
*/
tegra30_sdram_self_refresh:
- adr r8, tegra30_sdram_pad_save
+ adr r8, tegra_sdram_pad_save
tegra_get_soc_id TEGRA_APB_MISC_BASE, r10
cmp r10, #TEGRA30
adreq r2, tegra30_sdram_pad_address
ldreq r3, tegra30_sdram_pad_size
- adrne r2, tegra114_sdram_pad_address
- ldrne r3, tegra114_sdram_pad_size
+ cmp r10, #TEGRA114
+ adreq r2, tegra114_sdram_pad_address
+ ldreq r3, tegra114_sdram_pad_size
+ cmp r10, #TEGRA124
+ adreq r2, tegra124_sdram_pad_address
+ ldreq r3, tegra30_sdram_pad_size
+
mov r9, #0
padsave:
@@ -716,7 +739,10 @@ padsave_done:
cmp r10, #TEGRA30
ldreq r0, =TEGRA_EMC_BASE @ r0 reserved for emc base addr
- ldrne r0, =TEGRA_EMC0_BASE
+ cmp r10, #TEGRA114
+ ldreq r0, =TEGRA_EMC0_BASE
+ cmp r10, #TEGRA124
+ ldreq r0, =TEGRA124_EMC_BASE
enter_self_refresh:
cmp r10, #TEGRA30
diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c
index 5b8605547a09..ce553d557c31 100644
--- a/arch/arm/mach-tegra/tegra.c
+++ b/arch/arm/mach-tegra/tegra.c
@@ -16,7 +16,6 @@
*
*/
-#include <linux/clocksource.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
@@ -34,16 +33,78 @@
#include <linux/sys_soc.h>
#include <linux/usb/tegra_usb_phy.h>
#include <linux/clk/tegra.h>
+#include <linux/irqchip.h>
+#include <asm/hardware/cache-l2x0.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include <asm/setup.h>
+#include "apbio.h"
#include "board.h"
#include "common.h"
+#include "cpuidle.h"
#include "fuse.h"
#include "iomap.h"
+#include "irq.h"
+#include "pmc.h"
+#include "pm.h"
+#include "reset.h"
+#include "sleep.h"
+
+/*
+ * Storage for debug-macro.S's state.
+ *
+ * This must be in .data not .bss so that it gets initialized each time the
+ * kernel is loaded. The data is declared here rather than debug-macro.S so
+ * that multiple inclusions of debug-macro.S point at the same data.
+ */
+u32 tegra_uart_config[4] = {
+ /* Debug UART initialization required */
+ 1,
+ /* Debug UART physical address */
+ 0,
+ /* Debug UART virtual address */
+ 0,
+ /* Scratch space for debug macro */
+ 0,
+};
+
+static void __init tegra_init_cache(void)
+{
+#ifdef CONFIG_CACHE_L2X0
+ int ret;
+ void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
+ u32 aux_ctrl, cache_type;
+
+ cache_type = readl(p + L2X0_CACHE_TYPE);
+ aux_ctrl = (cache_type & 0x700) << (17-8);
+ aux_ctrl |= 0x7C400001;
+
+ ret = l2x0_of_init(aux_ctrl, 0x8200c3fe);
+ if (!ret)
+ l2x0_saved_regs_addr = virt_to_phys(&l2x0_saved_regs);
+#endif
+}
+
+static void __init tegra_init_early(void)
+{
+ tegra_cpu_reset_handler_init();
+ tegra_apb_io_init();
+ tegra_init_fuse();
+ tegra_init_cache();
+ tegra_powergate_init();
+ tegra_hotplug_init();
+}
+
+static void __init tegra_dt_init_irq(void)
+{
+ tegra_pmc_init_irq();
+ tegra_init_irq();
+ irqchip_init();
+ tegra_legacy_irq_syscore_init();
+}
static void __init tegra_dt_init(void)
{
@@ -51,6 +112,8 @@ static void __init tegra_dt_init(void)
struct soc_device *soc_dev;
struct device *parent = NULL;
+ tegra_pmc_init();
+
tegra_clocks_apply_init_table();
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
@@ -97,7 +160,9 @@ static void __init tegra_dt_init_late(void)
{
int i;
- tegra_init_late();
+ tegra_init_suspend();
+ tegra_cpuidle_init();
+ tegra_powergate_debugfs_init();
for (i = 0; i < ARRAY_SIZE(board_init_funcs); i++) {
if (of_machine_is_compatible(board_init_funcs[i].machine)) {
@@ -108,6 +173,7 @@ static void __init tegra_dt_init_late(void)
}
static const char * const tegra_dt_board_compat[] = {
+ "nvidia,tegra124",
"nvidia,tegra114",
"nvidia,tegra30",
"nvidia,tegra20",
@@ -119,9 +185,8 @@ DT_MACHINE_START(TEGRA_DT, "NVIDIA Tegra SoC (Flattened Device Tree)")
.smp = smp_ops(tegra_smp_ops),
.init_early = tegra_init_early,
.init_irq = tegra_dt_init_irq,
- .init_time = clocksource_of_init,
.init_machine = tegra_dt_init,
.init_late = tegra_dt_init_late,
- .restart = tegra_assert_system_reset,
+ .restart = tegra_pmc_restart,
.dt_compat = tegra_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-u300/Kconfig b/arch/arm/mach-u300/Kconfig
index a1659863bfd5..8e23071bd1b3 100644
--- a/arch/arm/mach-u300/Kconfig
+++ b/arch/arm/mach-u300/Kconfig
@@ -5,7 +5,6 @@ config ARCH_U300
select ARM_AMBA
select ARM_PATCH_PHYS_VIRT
select ARM_VIC
- select CLKDEV_LOOKUP
select CLKSRC_MMIO
select CLKSRC_OF
select COMMON_CLK
diff --git a/arch/arm/mach-u300/timer.c b/arch/arm/mach-u300/timer.c
index b5db207dfd1e..9a5f9fb352ce 100644
--- a/arch/arm/mach-u300/timer.c
+++ b/arch/arm/mach-u300/timer.c
@@ -358,8 +358,7 @@ static struct delay_timer u300_delay_timer;
*/
static void __init u300_timer_init_of(struct device_node *np)
{
- struct resource irq_res;
- int irq;
+ unsigned int irq;
struct clk *clk;
unsigned long rate;
@@ -368,11 +367,11 @@ static void __init u300_timer_init_of(struct device_node *np)
panic("could not ioremap system timer\n");
/* Get the IRQ for the GP1 timer */
- irq = of_irq_to_resource(np, 2, &irq_res);
- if (irq <= 0)
+ irq = irq_of_parse_and_map(np, 2);
+ if (!irq)
panic("no IRQ for system timer\n");
- pr_info("U300 GP1 timer @ base: %p, IRQ: %d\n", u300_timer_base, irq);
+ pr_info("U300 GP1 timer @ base: %p, IRQ: %u\n", u300_timer_base, irq);
/* Clock the interrupt controller */
clk = of_clk_get(np, 0);
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 99a28d628297..0034d2cd6973 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -1,40 +1,34 @@
config ARCH_U8500
bool "ST-Ericsson U8500 Series" if ARCH_MULTI_V7
depends on MMU
+ select AB8500_CORE
+ select ABX500_CORE
select ARCH_HAS_CPUFREQ
select ARCH_REQUIRE_GPIOLIB
select ARM_AMBA
- select CLKDEV_LOOKUP
+ select ARM_ERRATA_754322
+ select ARM_ERRATA_764369 if SMP
+ select ARM_GIC
+ select CACHE_L2X0
+ select CLKSRC_NOMADIK_MTU
+ select COMMON_CLK
select CPU_V7
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
+ select PINCTRL
+ select PINCTRL_ABX500
+ select PINCTRL_NOMADIK
+ select PL310_ERRATA_753970 if CACHE_PL310
help
Support for ST-Ericsson's Ux500 architecture
if ARCH_U8500
-config UX500_SOC_COMMON
- bool
- default y
- select ABX500_CORE
- select AB8500_CORE
- select ARM_ERRATA_754322
- select ARM_ERRATA_764369 if SMP
- select ARM_GIC
- select CACHE_L2X0
- select CLKSRC_NOMADIK_MTU
- select COMMON_CLK
- select PINCTRL
- select PINCTRL_NOMADIK
- select PINCTRL_ABX500
- select PL310_ERRATA_753970 if CACHE_PL310
-
config UX500_SOC_DB8500
bool
- select CPU_FREQ_TABLE if CPU_FREQ
select MFD_DB8500_PRCMU
select PINCTRL_DB8500
select PINCTRL_DB8540
diff --git a/arch/arm/mach-ux500/Makefile b/arch/arm/mach-ux500/Makefile
index fe1f3e26b88b..616b96e86ad4 100644
--- a/arch/arm/mach-ux500/Makefile
+++ b/arch/arm/mach-ux500/Makefile
@@ -2,14 +2,11 @@
# Makefile for the linux kernel, U8500 machine.
#
-obj-y := cpu.o devices.o devices-common.o \
- id.o usb.o timer.o pm.o
+obj-y := cpu.o devices.o id.o timer.o pm.o
obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o
obj-$(CONFIG_MACH_MOP500) += board-mop500.o board-mop500-sdi.o \
board-mop500-regulators.o \
- board-mop500-uib.o board-mop500-stuib.o \
- board-mop500-u8500uib.o \
board-mop500-pins.o \
board-mop500-audio.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
diff --git a/arch/arm/mach-ux500/board-mop500-audio.c b/arch/arm/mach-ux500/board-mop500-audio.c
index ec0807247e60..154e15f59702 100644
--- a/arch/arm/mach-ux500/board-mop500-audio.c
+++ b/arch/arm/mach-ux500/board-mop500-audio.c
@@ -68,40 +68,6 @@ static struct stedma40_chan_cfg msp2_dma_tx = {
.phy_channel = 1,
};
-static struct platform_device *db8500_add_msp_i2s(struct device *parent,
- int id,
- resource_size_t base, int irq,
- struct msp_i2s_platform_data *pdata)
-{
- struct platform_device *pdev;
- struct resource res[] = {
- DEFINE_RES_MEM(base, SZ_4K),
- DEFINE_RES_IRQ(irq),
- };
-
- pr_info("Register platform-device 'ux500-msp-i2s', id %d, irq %d\n",
- id, irq);
- pdev = platform_device_register_resndata(parent, "ux500-msp-i2s", id,
- res, ARRAY_SIZE(res),
- pdata, sizeof(*pdata));
- if (!pdev) {
- pr_err("Failed to register platform-device 'ux500-msp-i2s.%d'!\n",
- id);
- return NULL;
- }
-
- return pdev;
-}
-
-/* Platform device for ASoC MOP500 machine */
-static struct platform_device snd_soc_mop500 = {
- .name = "snd-soc-mop500",
- .id = 0,
- .dev = {
- .platform_data = NULL,
- },
-};
-
struct msp_i2s_platform_data msp2_platform_data = {
.id = MSP_I2S_2,
.msp_i2s_dma_rx = &msp2_dma_rx,
@@ -113,19 +79,3 @@ struct msp_i2s_platform_data msp3_platform_data = {
.msp_i2s_dma_rx = &msp1_dma_rx,
.msp_i2s_dma_tx = NULL,
};
-
-void mop500_audio_init(struct device *parent)
-{
- pr_info("%s: Register platform-device 'snd-soc-mop500'.\n", __func__);
- platform_device_register(&snd_soc_mop500);
-
- pr_info("Initialize MSP I2S-devices.\n");
- db8500_add_msp_i2s(parent, 0, U8500_MSP0_BASE, IRQ_DB8500_MSP0,
- &msp0_platform_data);
- db8500_add_msp_i2s(parent, 1, U8500_MSP1_BASE, IRQ_DB8500_MSP1,
- &msp1_platform_data);
- db8500_add_msp_i2s(parent, 2, U8500_MSP2_BASE, IRQ_DB8500_MSP2,
- &msp2_platform_data);
- db8500_add_msp_i2s(parent, 3, U8500_MSP3_BASE, IRQ_DB8500_MSP1,
- &msp3_platform_data);
-}
diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c
index b3e61a38e5c8..26600a1c5319 100644
--- a/arch/arm/mach-ux500/board-mop500-sdi.c
+++ b/arch/arm/mach-ux500/board-mop500-sdi.c
@@ -65,18 +65,6 @@ struct mmci_platform_data mop500_sdi0_data = {
#endif
};
-static void sdi0_configure(struct device *parent)
-{
- /* Add the device, force v2 to subrevision 1 */
- db8500_add_sdi0(parent, &mop500_sdi0_data, U8500_SDI_V2_PERIPHID);
-}
-
-void mop500_sdi_tc35892_init(struct device *parent)
-{
- mop500_sdi0_data.gpio_cd = GPIO_SDMMC_CD;
- sdi0_configure(parent);
-}
-
/*
* SDI1 (SDIO WLAN)
*/
@@ -178,42 +166,3 @@ struct mmci_platform_data mop500_sdi4_data = {
.dma_tx_param = &mop500_sdi4_dma_cfg_tx,
#endif
};
-
-void __init mop500_sdi_init(struct device *parent)
-{
- /* PoP:ed eMMC */
- db8500_add_sdi2(parent, &mop500_sdi2_data, U8500_SDI_V2_PERIPHID);
- /* On-board eMMC */
- db8500_add_sdi4(parent, &mop500_sdi4_data, U8500_SDI_V2_PERIPHID);
-
- /*
- * On boards with the TC35892 GPIO expander, sdi0 will finally
- * be added when the TC35892 initializes and calls
- * mop500_sdi_tc35892_init() above.
- */
-}
-
-void __init snowball_sdi_init(struct device *parent)
-{
- /* On Snowball MMC_CAP_SD_HIGHSPEED isn't supported (Hardware issue?) */
- mop500_sdi0_data.capabilities &= ~MMC_CAP_SD_HIGHSPEED;
- /* On-board eMMC */
- db8500_add_sdi4(parent, &mop500_sdi4_data, U8500_SDI_V2_PERIPHID);
- /* External Micro SD slot */
- mop500_sdi0_data.gpio_cd = SNOWBALL_SDMMC_CD_GPIO;
- mop500_sdi0_data.cd_invert = true;
- sdi0_configure(parent);
-}
-
-void __init hrefv60_sdi_init(struct device *parent)
-{
- /* PoP:ed eMMC */
- db8500_add_sdi2(parent, &mop500_sdi2_data, U8500_SDI_V2_PERIPHID);
- /* On-board eMMC */
- db8500_add_sdi4(parent, &mop500_sdi4_data, U8500_SDI_V2_PERIPHID);
- /* External Micro SD slot */
- mop500_sdi0_data.gpio_cd = HREFV60_SDMMC_CD_GPIO;
- sdi0_configure(parent);
- /* WLAN SDIO channel */
- db8500_add_sdi1(parent, &mop500_sdi1_data, U8500_SDI_V2_PERIPHID);
-}
diff --git a/arch/arm/mach-ux500/board-mop500-stuib.c b/arch/arm/mach-ux500/board-mop500-stuib.c
deleted file mode 100644
index 7e1f294f0434..000000000000
--- a/arch/arm/mach-ux500/board-mop500-stuib.c
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * License terms: GNU General Public License (GPL), version 2
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/mfd/stmpe.h>
-#include <linux/input/bu21013.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/i2c.h>
-#include <linux/input/matrix_keypad.h>
-#include <asm/mach-types.h>
-
-#include "board-mop500.h"
-
-/* STMPE/SKE keypad use this key layout */
-static const unsigned int mop500_keymap[] = {
- KEY(2, 5, KEY_END),
- KEY(4, 1, KEY_POWER),
- KEY(3, 5, KEY_VOLUMEDOWN),
- KEY(1, 3, KEY_3),
- KEY(5, 2, KEY_RIGHT),
- KEY(5, 0, KEY_9),
-
- KEY(0, 5, KEY_MENU),
- KEY(7, 6, KEY_ENTER),
- KEY(4, 5, KEY_0),
- KEY(6, 7, KEY_2),
- KEY(3, 4, KEY_UP),
- KEY(3, 3, KEY_DOWN),
-
- KEY(6, 4, KEY_SEND),
- KEY(6, 2, KEY_BACK),
- KEY(4, 2, KEY_VOLUMEUP),
- KEY(5, 5, KEY_1),
- KEY(4, 3, KEY_LEFT),
- KEY(3, 2, KEY_7),
-};
-
-static const struct matrix_keymap_data mop500_keymap_data = {
- .keymap = mop500_keymap,
- .keymap_size = ARRAY_SIZE(mop500_keymap),
-};
-/*
- * STMPE1601
- */
-static struct stmpe_keypad_platform_data stmpe1601_keypad_data = {
- .debounce_ms = 64,
- .scan_count = 8,
- .no_autorepeat = true,
- .keymap_data = &mop500_keymap_data,
-};
-
-static struct stmpe_platform_data stmpe1601_data = {
- .id = 1,
- .blocks = STMPE_BLOCK_KEYPAD,
- .irq_trigger = IRQF_TRIGGER_FALLING,
- .irq_base = MOP500_STMPE1601_IRQ(0),
- .keypad = &stmpe1601_keypad_data,
- .autosleep = true,
- .autosleep_timeout = 1024,
-};
-
-static struct i2c_board_info __initdata mop500_i2c0_devices_stuib[] = {
- {
- I2C_BOARD_INFO("stmpe1601", 0x40),
- .irq = NOMADIK_GPIO_TO_IRQ(218),
- .platform_data = &stmpe1601_data,
- .flags = I2C_CLIENT_WAKE,
- },
-};
-
-/*
- * BU21013 ROHM touchscreen interface on the STUIBs
- */
-
-#define TOUCH_GPIO_PIN 84
-
-#define TOUCH_XMAX 384
-#define TOUCH_YMAX 704
-
-#define PRCMU_CLOCK_OCR 0x1CC
-#define TSC_EXT_CLOCK_9_6MHZ 0x840000
-
-static struct bu21013_platform_device tsc_plat_device = {
- .touch_pin = TOUCH_GPIO_PIN,
- .touch_x_max = TOUCH_XMAX,
- .touch_y_max = TOUCH_YMAX,
- .ext_clk = false,
- .x_flip = false,
- .y_flip = true,
-};
-
-static struct i2c_board_info __initdata u8500_i2c3_devices_stuib[] = {
- {
- I2C_BOARD_INFO("bu21013_tp", 0x5C),
- .platform_data = &tsc_plat_device,
- },
- {
- I2C_BOARD_INFO("bu21013_tp", 0x5D),
- .platform_data = &tsc_plat_device,
- },
-};
-
-void __init mop500_stuib_init(void)
-{
- if (machine_is_hrefv60())
- tsc_plat_device.cs_pin = HREFV60_TOUCH_RST_GPIO;
- else
- tsc_plat_device.cs_pin = GPIO_BU21013_CS;
-
- mop500_uib_i2c_add(0, mop500_i2c0_devices_stuib,
- ARRAY_SIZE(mop500_i2c0_devices_stuib));
-
- mop500_uib_i2c_add(3, u8500_i2c3_devices_stuib,
- ARRAY_SIZE(u8500_i2c3_devices_stuib));
-}
diff --git a/arch/arm/mach-ux500/board-mop500-u8500uib.c b/arch/arm/mach-ux500/board-mop500-u8500uib.c
deleted file mode 100644
index d397c19570af..000000000000
--- a/arch/arm/mach-ux500/board-mop500-u8500uib.c
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Board data for the U8500 UIB, also known as the New UIB
- * License terms: GNU General Public License (GPL), version 2
- */
-#include <linux/gpio.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/mfd/tc3589x.h>
-#include <linux/input/matrix_keypad.h>
-
-#include "irqs.h"
-
-#include "board-mop500.h"
-
-static struct i2c_board_info __initdata mop500_i2c3_devices_u8500[] = {
- {
- I2C_BOARD_INFO("synaptics_rmi4_i2c", 0x4B),
- .irq = NOMADIK_GPIO_TO_IRQ(84),
- },
-};
-
-/*
- * TC35893
- */
-static const unsigned int u8500_keymap[] = {
- KEY(3, 1, KEY_END),
- KEY(4, 1, KEY_POWER),
- KEY(6, 4, KEY_VOLUMEDOWN),
- KEY(4, 2, KEY_EMAIL),
- KEY(3, 3, KEY_RIGHT),
- KEY(2, 5, KEY_BACKSPACE),
-
- KEY(6, 7, KEY_MENU),
- KEY(5, 0, KEY_ENTER),
- KEY(4, 3, KEY_0),
- KEY(3, 4, KEY_DOT),
- KEY(5, 2, KEY_UP),
- KEY(3, 5, KEY_DOWN),
-
- KEY(4, 5, KEY_SEND),
- KEY(0, 5, KEY_BACK),
- KEY(6, 2, KEY_VOLUMEUP),
- KEY(1, 3, KEY_SPACE),
- KEY(7, 6, KEY_LEFT),
- KEY(5, 5, KEY_SEARCH),
-};
-
-static struct matrix_keymap_data u8500_keymap_data = {
- .keymap = u8500_keymap,
- .keymap_size = ARRAY_SIZE(u8500_keymap),
-};
-
-static struct tc3589x_keypad_platform_data tc35893_data = {
- .krow = TC_KPD_ROWS,
- .kcol = TC_KPD_COLUMNS,
- .debounce_period = TC_KPD_DEBOUNCE_PERIOD,
- .settle_time = TC_KPD_SETTLE_TIME,
- .irqtype = IRQF_TRIGGER_FALLING,
- .enable_wakeup = true,
- .keymap_data = &u8500_keymap_data,
- .no_autorepeat = true,
-};
-
-static struct tc3589x_platform_data tc3589x_keypad_data = {
- .block = TC3589x_BLOCK_KEYPAD,
- .keypad = &tc35893_data,
- .irq_base = MOP500_EGPIO_IRQ_BASE,
-};
-
-static struct i2c_board_info __initdata mop500_i2c0_devices_u8500[] = {
- {
- I2C_BOARD_INFO("tc3589x", 0x44),
- .platform_data = &tc3589x_keypad_data,
- .irq = NOMADIK_GPIO_TO_IRQ(218),
- .flags = I2C_CLIENT_WAKE,
- },
-};
-
-
-void __init mop500_u8500uib_init(void)
-{
- mop500_uib_i2c_add(3, mop500_i2c3_devices_u8500,
- ARRAY_SIZE(mop500_i2c3_devices_u8500));
-
- mop500_uib_i2c_add(0, mop500_i2c0_devices_u8500,
- ARRAY_SIZE(mop500_i2c0_devices_u8500));
-
-}
diff --git a/arch/arm/mach-ux500/board-mop500-uib.c b/arch/arm/mach-ux500/board-mop500-uib.c
deleted file mode 100644
index bdaa422da028..000000000000
--- a/arch/arm/mach-ux500/board-mop500-uib.c
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
- * License terms: GNU General Public License (GPL), version 2
- */
-
-#define pr_fmt(fmt) "mop500-uib: " fmt
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-
-#include "board-mop500.h"
-#include "id.h"
-
-enum mop500_uib {
- STUIB,
- U8500UIB,
-};
-
-struct uib {
- const char *name;
- const char *option;
- void (*init)(void);
-};
-
-static struct uib __initdata mop500_uibs[] = {
- [STUIB] = {
- .name = "ST-UIB",
- .option = "stuib",
- .init = mop500_stuib_init,
- },
- [U8500UIB] = {
- .name = "U8500-UIB",
- .option = "u8500uib",
- .init = mop500_u8500uib_init,
- },
-};
-
-static struct uib *mop500_uib;
-
-static int __init mop500_uib_setup(char *str)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mop500_uibs); i++) {
- struct uib *uib = &mop500_uibs[i];
-
- if (!strcmp(str, uib->option)) {
- mop500_uib = uib;
- break;
- }
- }
-
- if (i == ARRAY_SIZE(mop500_uibs))
- pr_err("invalid uib= option (%s)\n", str);
-
- return 1;
-}
-__setup("uib=", mop500_uib_setup);
-
-/*
- * The UIBs are detected after the I2C host controllers are registered, so
- * i2c_register_board_info() can't be used.
- */
-void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
- unsigned n)
-{
- struct i2c_adapter *adap;
- struct i2c_client *client;
- int i;
-
- adap = i2c_get_adapter(busnum);
- if (!adap) {
- pr_err("failed to get adapter i2c%d\n", busnum);
- return;
- }
-
- for (i = 0; i < n; i++) {
- client = i2c_new_device(adap, &info[i]);
- if (!client)
- pr_err("failed to register %s to i2c%d\n",
- info[i].type, busnum);
- }
-
- i2c_put_adapter(adap);
-}
-
-static void __init __mop500_uib_init(struct uib *uib, const char *why)
-{
- pr_info("%s (%s)\n", uib->name, why);
- uib->init();
-}
-
-/*
- * Detect the UIB attached based on the presence or absence of i2c devices.
- */
-int __init mop500_uib_init(void)
-{
- struct uib *uib = mop500_uib;
- struct i2c_adapter *i2c0;
- int ret;
-
- if (!cpu_is_u8500_family())
- return -ENODEV;
-
- if (uib) {
- __mop500_uib_init(uib, "from uib= boot argument");
- return 0;
- }
-
- i2c0 = i2c_get_adapter(0);
- if (!i2c0) {
- __mop500_uib_init(&mop500_uibs[STUIB],
- "fallback, could not get i2c0");
- return -ENODEV;
- }
-
- /* U8500-UIB has the TC35893 at 0x44 on I2C0, the ST-UIB doesn't. */
- ret = i2c_smbus_xfer(i2c0, 0x44, 0, I2C_SMBUS_WRITE, 0,
- I2C_SMBUS_QUICK, NULL);
- i2c_put_adapter(i2c0);
-
- if (ret == 0)
- uib = &mop500_uibs[U8500UIB];
- else
- uib = &mop500_uibs[STUIB];
-
- __mop500_uib_init(uib, "detected");
-
- return 0;
-}
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index ad0806eff762..514d40b625a4 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -14,27 +14,16 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/i2c.h>
-#include <linux/platform_data/i2c-nomadik.h>
#include <linux/platform_data/db8500_thermal.h>
-#include <linux/gpio.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl022.h>
-#include <linux/amba/serial.h>
-#include <linux/spi/spi.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/regulator/ab8500.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/driver.h>
-#include <linux/regulator/gpio-regulator.h>
-#include <linux/mfd/tc3589x.h>
#include <linux/mfd/tps6105x.h>
-#include <linux/mfd/abx500/ab8500-gpio.h>
-#include <linux/mfd/abx500/ab8500-codec.h>
#include <linux/platform_data/leds-lp55xx.h>
#include <linux/input.h>
-#include <linux/smsc911x.h>
-#include <linux/gpio_keys.h>
#include <linux/delay.h>
#include <linux/leds.h>
#include <linux/pinctrl/consumer.h>
@@ -46,7 +35,6 @@
#include "setup.h"
#include "devices.h"
#include "irqs.h"
-#include <linux/platform_data/crypto-ux500.h>
#include "ste-dma40-db8500.h"
#include "db8500-regs.h"
@@ -54,401 +42,9 @@
#include "board-mop500.h"
#include "board-mop500-regulators.h"
-static struct gpio_led snowball_led_array[] = {
- {
- .name = "user_led",
- .default_trigger = "heartbeat",
- .gpio = 142,
- },
-};
-
-static struct gpio_led_platform_data snowball_led_data = {
- .leds = snowball_led_array,
- .num_leds = ARRAY_SIZE(snowball_led_array),
-};
-
-static struct platform_device snowball_led_dev = {
- .name = "leds-gpio",
- .dev = {
- .platform_data = &snowball_led_data,
- },
-};
-
-static struct fixed_voltage_config snowball_gpio_en_3v3_data = {
- .supply_name = "EN-3V3",
- .gpio = SNOWBALL_EN_3V3_ETH_GPIO,
- .microvolts = 3300000,
- .enable_high = 1,
- .init_data = &gpio_en_3v3_regulator,
- .startup_delay = 5000, /* 1200us */
-};
-
-static struct platform_device snowball_gpio_en_3v3_regulator_dev = {
- .name = "reg-fixed-voltage",
- .id = 1,
- .dev = {
- .platform_data = &snowball_gpio_en_3v3_data,
- },
-};
-
-/* Dynamically populated. */
-static struct gpio sdi0_reg_gpios[] = {
- { 0, GPIOF_OUT_INIT_LOW, "mmci_vsel" },
-};
-
-static struct gpio_regulator_state sdi0_reg_states[] = {
- { .value = 2900000, .gpios = (0 << 0) },
- { .value = 1800000, .gpios = (1 << 0) },
-};
-
-static struct gpio_regulator_config sdi0_reg_info = {
- .supply_name = "ext-mmc-level-shifter",
- .gpios = sdi0_reg_gpios,
- .nr_gpios = ARRAY_SIZE(sdi0_reg_gpios),
- .states = sdi0_reg_states,
- .nr_states = ARRAY_SIZE(sdi0_reg_states),
- .type = REGULATOR_VOLTAGE,
- .enable_high = 1,
- .enabled_at_boot = 0,
- .init_data = &sdi0_reg_init_data,
- .startup_delay = 100,
-};
-
-static struct platform_device sdi0_regulator = {
- .name = "gpio-regulator",
- .id = -1,
- .dev = {
- .platform_data = &sdi0_reg_info,
- },
-};
-
-static struct abx500_gpio_platform_data ab8500_gpio_pdata = {
- .gpio_base = MOP500_AB8500_PIN_GPIO(1),
-};
-
-/* ab8500-codec */
-static struct ab8500_codec_platform_data ab8500_codec_pdata = {
- .amics = {
- .mic1_type = AMIC_TYPE_DIFFERENTIAL,
- .mic2_type = AMIC_TYPE_DIFFERENTIAL,
- .mic1a_micbias = AMIC_MICBIAS_VAMIC1,
- .mic1b_micbias = AMIC_MICBIAS_VAMIC1,
- .mic2_micbias = AMIC_MICBIAS_VAMIC2
- },
- .ear_cmv = EAR_CMV_0_95V
-};
-
-static struct gpio_keys_button snowball_key_array[] = {
- {
- .gpio = 32,
- .type = EV_KEY,
- .code = KEY_1,
- .desc = "userpb",
- .active_low = 1,
- .debounce_interval = 50,
- .wakeup = 1,
- },
- {
- .gpio = 151,
- .type = EV_KEY,
- .code = KEY_2,
- .desc = "extkb1",
- .active_low = 1,
- .debounce_interval = 50,
- .wakeup = 1,
- },
- {
- .gpio = 152,
- .type = EV_KEY,
- .code = KEY_3,
- .desc = "extkb2",
- .active_low = 1,
- .debounce_interval = 50,
- .wakeup = 1,
- },
- {
- .gpio = 161,
- .type = EV_KEY,
- .code = KEY_4,
- .desc = "extkb3",
- .active_low = 1,
- .debounce_interval = 50,
- .wakeup = 1,
- },
- {
- .gpio = 162,
- .type = EV_KEY,
- .code = KEY_5,
- .desc = "extkb4",
- .active_low = 1,
- .debounce_interval = 50,
- .wakeup = 1,
- },
-};
-
-static struct gpio_keys_platform_data snowball_key_data = {
- .buttons = snowball_key_array,
- .nbuttons = ARRAY_SIZE(snowball_key_array),
-};
-
-static struct platform_device snowball_key_dev = {
- .name = "gpio-keys",
- .id = -1,
- .dev = {
- .platform_data = &snowball_key_data,
- }
-};
-
-static struct smsc911x_platform_config snowball_sbnet_cfg = {
- .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH,
- .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
- .flags = SMSC911X_USE_16BIT | SMSC911X_FORCE_INTERNAL_PHY,
- .shift = 1,
-};
-
-static struct resource sbnet_res[] = {
- {
- .name = "smsc911x-memory",
- .start = (0x5000 << 16),
- .end = (0x5000 << 16) + 0xffff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = NOMADIK_GPIO_TO_IRQ(140),
- .end = NOMADIK_GPIO_TO_IRQ(140),
- .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
- },
-};
-
-static struct platform_device snowball_sbnet_dev = {
- .name = "smsc911x",
- .num_resources = ARRAY_SIZE(sbnet_res),
- .resource = sbnet_res,
- .dev = {
- .platform_data = &snowball_sbnet_cfg,
- },
-};
-
struct ab8500_platform_data ab8500_platdata = {
.irq_base = MOP500_AB8500_IRQ_BASE,
.regulator = &ab8500_regulator_plat_data,
- .gpio = &ab8500_gpio_pdata,
- .codec = &ab8500_codec_pdata,
-};
-
-static struct platform_device u8500_cpufreq_cooling_device = {
- .name = "db8500-cpufreq-cooling",
-};
-
-/*
- * TPS61052
- */
-
-static struct tps6105x_platform_data mop500_tps61052_data = {
- .mode = TPS6105X_MODE_VOLTAGE,
- .regulator_data = &tps61052_regulator,
-};
-
-/*
- * TC35892
- */
-
-static void mop500_tc35892_init(struct tc3589x *tc3589x, unsigned int base)
-{
- struct device *parent = NULL;
-#if 0
- /* FIXME: Is the sdi actually part of tc3589x? */
- parent = tc3589x->dev;
-#endif
- mop500_sdi_tc35892_init(parent);
-}
-
-static struct tc3589x_gpio_platform_data mop500_tc35892_gpio_data = {
- .gpio_base = MOP500_EGPIO(0),
- .setup = mop500_tc35892_init,
-};
-
-static struct tc3589x_platform_data mop500_tc35892_data = {
- .block = TC3589x_BLOCK_GPIO,
- .gpio = &mop500_tc35892_gpio_data,
- .irq_base = MOP500_EGPIO_IRQ_BASE,
-};
-
-static struct lp55xx_led_config lp5521_pri_led[] = {
- [0] = {
- .chan_nr = 0,
- .led_current = 0x2f,
- .max_current = 0x5f,
- },
- [1] = {
- .chan_nr = 1,
- .led_current = 0x2f,
- .max_current = 0x5f,
- },
- [2] = {
- .chan_nr = 2,
- .led_current = 0x2f,
- .max_current = 0x5f,
- },
-};
-
-static struct lp55xx_platform_data __initdata lp5521_pri_data = {
- .label = "lp5521_pri",
- .led_config = &lp5521_pri_led[0],
- .num_channels = 3,
- .clock_mode = LP55XX_CLOCK_EXT,
-};
-
-static struct lp55xx_led_config lp5521_sec_led[] = {
- [0] = {
- .chan_nr = 0,
- .led_current = 0x2f,
- .max_current = 0x5f,
- },
- [1] = {
- .chan_nr = 1,
- .led_current = 0x2f,
- .max_current = 0x5f,
- },
- [2] = {
- .chan_nr = 2,
- .led_current = 0x2f,
- .max_current = 0x5f,
- },
-};
-
-static struct lp55xx_platform_data __initdata lp5521_sec_data = {
- .label = "lp5521_sec",
- .led_config = &lp5521_sec_led[0],
- .num_channels = 3,
- .clock_mode = LP55XX_CLOCK_EXT,
-};
-
-/* I2C0 devices only available on the first HREF/MOP500 */
-static struct i2c_board_info __initdata mop500_i2c0_devices[] = {
- {
- I2C_BOARD_INFO("tc3589x", 0x42),
- .irq = NOMADIK_GPIO_TO_IRQ(217),
- .platform_data = &mop500_tc35892_data,
- },
- {
- I2C_BOARD_INFO("tps61052", 0x33),
- .platform_data = &mop500_tps61052_data,
- },
-};
-
-static struct i2c_board_info __initdata mop500_i2c2_devices[] = {
- {
- /* lp5521 LED driver, 1st device */
- I2C_BOARD_INFO("lp5521", 0x33),
- .platform_data = &lp5521_pri_data,
- },
- {
- /* lp5521 LED driver, 2st device */
- I2C_BOARD_INFO("lp5521", 0x34),
- .platform_data = &lp5521_sec_data,
- },
- {
- /* Light sensor Rohm BH1780GLI */
- I2C_BOARD_INFO("bh1780", 0x29),
- },
-};
-
-static int __init mop500_i2c_board_init(void)
-{
- if (machine_is_u8500())
- mop500_uib_i2c_add(0, mop500_i2c0_devices,
- ARRAY_SIZE(mop500_i2c0_devices));
- mop500_uib_i2c_add(2, mop500_i2c2_devices,
- ARRAY_SIZE(mop500_i2c2_devices));
- return 0;
-}
-device_initcall(mop500_i2c_board_init);
-
-static void __init mop500_i2c_init(struct device *parent)
-{
- db8500_add_i2c0(parent, NULL);
- db8500_add_i2c1(parent, NULL);
- db8500_add_i2c2(parent, NULL);
- db8500_add_i2c3(parent, NULL);
-}
-
-static struct gpio_keys_button mop500_gpio_keys[] = {
- {
- .desc = "SFH7741 Proximity Sensor",
- .type = EV_SW,
- .code = SW_FRONT_PROXIMITY,
- .active_low = 0,
- .can_disable = 1,
- }
-};
-
-static struct regulator *prox_regulator;
-static int mop500_prox_activate(struct device *dev);
-static void mop500_prox_deactivate(struct device *dev);
-
-static struct gpio_keys_platform_data mop500_gpio_keys_data = {
- .buttons = mop500_gpio_keys,
- .nbuttons = ARRAY_SIZE(mop500_gpio_keys),
- .enable = mop500_prox_activate,
- .disable = mop500_prox_deactivate,
-};
-
-static struct platform_device mop500_gpio_keys_device = {
- .name = "gpio-keys",
- .id = 0,
- .dev = {
- .platform_data = &mop500_gpio_keys_data,
- },
-};
-
-static int mop500_prox_activate(struct device *dev)
-{
- prox_regulator = regulator_get(&mop500_gpio_keys_device.dev,
- "vcc");
- if (IS_ERR(prox_regulator)) {
- dev_err(&mop500_gpio_keys_device.dev,
- "no regulator\n");
- return PTR_ERR(prox_regulator);
- }
-
- return regulator_enable(prox_regulator);
-}
-
-static void mop500_prox_deactivate(struct device *dev)
-{
- regulator_disable(prox_regulator);
- regulator_put(prox_regulator);
-}
-
-static struct cryp_platform_data u8500_cryp1_platform_data = {
- .mem_to_engine = {
- .dir = DMA_MEM_TO_DEV,
- .dev_type = DB8500_DMA_DEV48_CAC1,
- .mode = STEDMA40_MODE_LOGICAL,
- },
- .engine_to_mem = {
- .dir = DMA_DEV_TO_MEM,
- .dev_type = DB8500_DMA_DEV48_CAC1,
- .mode = STEDMA40_MODE_LOGICAL,
- }
-};
-
-static struct stedma40_chan_cfg u8500_hash_dma_cfg_tx = {
- .dir = DMA_MEM_TO_DEV,
- .dev_type = DB8500_DMA_DEV50_HAC1_TX,
- .mode = STEDMA40_MODE_LOGICAL,
-};
-
-static struct hash_platform_data u8500_hash1_platform_data = {
- .mem_to_engine = &u8500_hash_dma_cfg_tx,
- .dma_filter = stedma40_filter,
-};
-
-/* add any platform devices here - TODO */
-static struct platform_device *mop500_platform_devs[] __initdata = {
- &mop500_gpio_keys_device,
- &sdi0_regulator,
};
#ifdef CONFIG_STE_DMA40
@@ -480,236 +76,3 @@ struct pl022_ssp_controller ssp0_plat = {
*/
.num_chipselect = 5,
};
-
-static void __init mop500_spi_init(struct device *parent)
-{
- db8500_add_ssp0(parent, &ssp0_plat);
-}
-
-#ifdef CONFIG_STE_DMA40
-static struct stedma40_chan_cfg uart0_dma_cfg_rx = {
- .mode = STEDMA40_MODE_LOGICAL,
- .dir = DMA_DEV_TO_MEM,
- .dev_type = DB8500_DMA_DEV13_UART0,
-};
-
-static struct stedma40_chan_cfg uart0_dma_cfg_tx = {
- .mode = STEDMA40_MODE_LOGICAL,
- .dir = DMA_MEM_TO_DEV,
- .dev_type = DB8500_DMA_DEV13_UART0,
-};
-
-static struct stedma40_chan_cfg uart1_dma_cfg_rx = {
- .mode = STEDMA40_MODE_LOGICAL,
- .dir = DMA_DEV_TO_MEM,
- .dev_type = DB8500_DMA_DEV12_UART1,
-};
-
-static struct stedma40_chan_cfg uart1_dma_cfg_tx = {
- .mode = STEDMA40_MODE_LOGICAL,
- .dir = DMA_MEM_TO_DEV,
- .dev_type = DB8500_DMA_DEV12_UART1,
-};
-
-static struct stedma40_chan_cfg uart2_dma_cfg_rx = {
- .mode = STEDMA40_MODE_LOGICAL,
- .dir = DMA_DEV_TO_MEM,
- .dev_type = DB8500_DMA_DEV11_UART2,
-};
-
-static struct stedma40_chan_cfg uart2_dma_cfg_tx = {
- .mode = STEDMA40_MODE_LOGICAL,
- .dir = DMA_MEM_TO_DEV,
- .dev_type = DB8500_DMA_DEV11_UART2,
-};
-#endif
-
-struct amba_pl011_data uart0_plat = {
-#ifdef CONFIG_STE_DMA40
- .dma_filter = stedma40_filter,
- .dma_rx_param = &uart0_dma_cfg_rx,
- .dma_tx_param = &uart0_dma_cfg_tx,
-#endif
-};
-
-struct amba_pl011_data uart1_plat = {
-#ifdef CONFIG_STE_DMA40
- .dma_filter = stedma40_filter,
- .dma_rx_param = &uart1_dma_cfg_rx,
- .dma_tx_param = &uart1_dma_cfg_tx,
-#endif
-};
-
-struct amba_pl011_data uart2_plat = {
-#ifdef CONFIG_STE_DMA40
- .dma_filter = stedma40_filter,
- .dma_rx_param = &uart2_dma_cfg_rx,
- .dma_tx_param = &uart2_dma_cfg_tx,
-#endif
-};
-
-static void __init mop500_uart_init(struct device *parent)
-{
- db8500_add_uart0(parent, &uart0_plat);
- db8500_add_uart1(parent, &uart1_plat);
- db8500_add_uart2(parent, &uart2_plat);
-}
-
-static void __init u8500_cryp1_hash1_init(struct device *parent)
-{
- db8500_add_cryp1(parent, &u8500_cryp1_platform_data);
- db8500_add_hash1(parent, &u8500_hash1_platform_data);
-}
-
-static struct platform_device *snowball_platform_devs[] __initdata = {
- &snowball_led_dev,
- &snowball_key_dev,
- &snowball_sbnet_dev,
- &snowball_gpio_en_3v3_regulator_dev,
- &u8500_cpufreq_cooling_device,
- &sdi0_regulator,
-};
-
-static void __init mop500_init_machine(void)
-{
- struct device *parent = NULL;
- int i;
-
- platform_device_register(&db8500_prcmu_device);
- mop500_gpio_keys[0].gpio = GPIO_PROX_SENSOR;
-
- sdi0_reg_info.enable_gpio = GPIO_SDMMC_EN;
- sdi0_reg_info.gpios[0].gpio = GPIO_SDMMC_1V8_3V_SEL;
-
- mop500_pinmaps_init();
- parent = u8500_init_devices();
-
- for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++)
- mop500_platform_devs[i]->dev.parent = parent;
-
- platform_add_devices(mop500_platform_devs,
- ARRAY_SIZE(mop500_platform_devs));
-
- mop500_i2c_init(parent);
- mop500_sdi_init(parent);
- mop500_spi_init(parent);
- mop500_audio_init(parent);
- mop500_uart_init(parent);
- u8500_cryp1_hash1_init(parent);
-
- /* This board has full regulator constraints */
- regulator_has_full_constraints();
-}
-
-
-static void __init snowball_init_machine(void)
-{
- struct device *parent = NULL;
- int i;
-
- platform_device_register(&db8500_prcmu_device);
-
- sdi0_reg_info.enable_gpio = SNOWBALL_SDMMC_EN_GPIO;
- sdi0_reg_info.gpios[0].gpio = SNOWBALL_SDMMC_1V8_3V_GPIO;
-
- snowball_pinmaps_init();
- parent = u8500_init_devices();
-
- for (i = 0; i < ARRAY_SIZE(snowball_platform_devs); i++)
- snowball_platform_devs[i]->dev.parent = parent;
-
- platform_add_devices(snowball_platform_devs,
- ARRAY_SIZE(snowball_platform_devs));
-
- mop500_i2c_init(parent);
- snowball_sdi_init(parent);
- mop500_spi_init(parent);
- mop500_audio_init(parent);
- mop500_uart_init(parent);
-
- u8500_cryp1_hash1_init(parent);
-
- /* This board has full regulator constraints */
- regulator_has_full_constraints();
-}
-
-static void __init hrefv60_init_machine(void)
-{
- struct device *parent = NULL;
- int i;
-
- platform_device_register(&db8500_prcmu_device);
- /*
- * The HREFv60 board removed a GPIO expander and routed
- * all these GPIO pins to the internal GPIO controller
- * instead.
- */
- mop500_gpio_keys[0].gpio = HREFV60_PROX_SENSE_GPIO;
-
- sdi0_reg_info.enable_gpio = HREFV60_SDMMC_EN_GPIO;
- sdi0_reg_info.gpios[0].gpio = HREFV60_SDMMC_1V8_3V_GPIO;
-
- hrefv60_pinmaps_init();
- parent = u8500_init_devices();
-
- for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++)
- mop500_platform_devs[i]->dev.parent = parent;
-
- platform_add_devices(mop500_platform_devs,
- ARRAY_SIZE(mop500_platform_devs));
-
- mop500_i2c_init(parent);
- hrefv60_sdi_init(parent);
- mop500_spi_init(parent);
- mop500_audio_init(parent);
- mop500_uart_init(parent);
-
- /* This board has full regulator constraints */
- regulator_has_full_constraints();
-}
-
-MACHINE_START(U8500, "ST-Ericsson MOP500 platform")
- /* Maintainer: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> */
- .atag_offset = 0x100,
- .smp = smp_ops(ux500_smp_ops),
- .map_io = u8500_map_io,
- .init_irq = ux500_init_irq,
- /* we re-use nomadik timer here */
- .init_time = ux500_timer_init,
- .init_machine = mop500_init_machine,
- .init_late = ux500_init_late,
- .restart = ux500_restart,
-MACHINE_END
-
-MACHINE_START(U8520, "ST-Ericsson U8520 Platform HREFP520")
- .atag_offset = 0x100,
- .map_io = u8500_map_io,
- .init_irq = ux500_init_irq,
- .init_time = ux500_timer_init,
- .init_machine = mop500_init_machine,
- .init_late = ux500_init_late,
- .restart = ux500_restart,
-MACHINE_END
-
-MACHINE_START(HREFV60, "ST-Ericsson U8500 Platform HREFv60+")
- .atag_offset = 0x100,
- .smp = smp_ops(ux500_smp_ops),
- .map_io = u8500_map_io,
- .init_irq = ux500_init_irq,
- .init_time = ux500_timer_init,
- .init_machine = hrefv60_init_machine,
- .init_late = ux500_init_late,
- .restart = ux500_restart,
-MACHINE_END
-
-MACHINE_START(SNOWBALL, "Calao Systems Snowball platform")
- .atag_offset = 0x100,
- .smp = smp_ops(ux500_smp_ops),
- .map_io = u8500_map_io,
- .init_irq = ux500_init_irq,
- /* we re-use nomadik timer here */
- .init_time = ux500_timer_init,
- .init_machine = snowball_init_machine,
- .init_late = NULL,
- .restart = ux500_restart,
-MACHINE_END
diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h
index d6fab166cbf1..511d6febbe99 100644
--- a/arch/arm/mach-ux500/board-mop500.h
+++ b/arch/arm/mach-ux500/board-mop500.h
@@ -79,7 +79,6 @@
#define SNOWBALL_EN_3V3_ETH_GPIO MOP500_AB8500_PIN_GPIO(26) /* GPIO26 */
struct device;
-struct i2c_board_info;
extern struct mmci_platform_data mop500_sdi0_data;
extern struct mmci_platform_data mop500_sdi1_data;
extern struct mmci_platform_data mop500_sdi2_data;
@@ -88,25 +87,10 @@ extern struct msp_i2s_platform_data msp0_platform_data;
extern struct msp_i2s_platform_data msp1_platform_data;
extern struct msp_i2s_platform_data msp2_platform_data;
extern struct msp_i2s_platform_data msp3_platform_data;
-extern struct arm_pmu_platdata db8500_pmu_platdata;
-extern struct amba_pl011_data uart0_plat;
-extern struct amba_pl011_data uart1_plat;
-extern struct amba_pl011_data uart2_plat;
extern struct pl022_ssp_controller ssp0_plat;
-extern struct stedma40_platform_data dma40_plat_data;
-extern void mop500_sdi_init(struct device *parent);
-extern void snowball_sdi_init(struct device *parent);
-extern void hrefv60_sdi_init(struct device *parent);
-extern void mop500_sdi_tc35892_init(struct device *parent);
-void __init mop500_u8500uib_init(void);
-void __init mop500_stuib_init(void);
void __init mop500_pinmaps_init(void);
void __init snowball_pinmaps_init(void);
void __init hrefv60_pinmaps_init(void);
-void mop500_audio_init(struct device *parent);
-int __init mop500_uib_init(void);
-void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
- unsigned n);
#endif
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 301c3460d96a..2e85c1e72535 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -32,7 +32,6 @@
#include "irqs.h"
#include "devices-db8500.h"
-#include "ste-dma40-db8500.h"
#include "db8500-regs.h"
#include "board-mop500.h"
#include "id.h"
@@ -93,14 +92,6 @@ void __init u8500_map_io(void)
iotable_init(u8500_io_desc, ARRAY_SIZE(u8500_io_desc));
}
-static struct resource db8500_pmu_resources[] = {
- [0] = {
- .start = IRQ_DB8500_PMU,
- .end = IRQ_DB8500_PMU,
- .flags = IORESOURCE_IRQ,
- },
-};
-
/*
* The PMU IRQ lines of two cores are wired together into a single interrupt.
* Bounce the interrupt to the other core if it's not ours.
@@ -125,54 +116,6 @@ struct arm_pmu_platdata db8500_pmu_platdata = {
.handle_irq = db8500_pmu_handler,
};
-static struct platform_device db8500_pmu_device = {
- .name = "arm-pmu",
- .id = -1,
- .num_resources = ARRAY_SIZE(db8500_pmu_resources),
- .resource = db8500_pmu_resources,
- .dev.platform_data = &db8500_pmu_platdata,
-};
-
-static struct platform_device *platform_devs[] __initdata = {
- &u8500_dma40_device,
- &db8500_pmu_device,
-};
-
-static resource_size_t __initdata db8500_gpio_base[] = {
- U8500_GPIOBANK0_BASE,
- U8500_GPIOBANK1_BASE,
- U8500_GPIOBANK2_BASE,
- U8500_GPIOBANK3_BASE,
- U8500_GPIOBANK4_BASE,
- U8500_GPIOBANK5_BASE,
- U8500_GPIOBANK6_BASE,
- U8500_GPIOBANK7_BASE,
- U8500_GPIOBANK8_BASE,
-};
-
-static void __init db8500_add_gpios(struct device *parent)
-{
- struct nmk_gpio_platform_data pdata = {
- .supports_sleepmode = true,
- };
-
- dbx500_add_gpios(parent, db8500_gpio_base,
- ARRAY_SIZE(db8500_gpio_base),
- IRQ_DB8500_GPIO0, &pdata);
- dbx500_add_pinctrl(parent, "pinctrl-db8500", U8500_PRCMU_BASE);
-}
-
-static int usb_db8500_dma_cfg[] = {
- DB8500_DMA_DEV38_USB_OTG_IEP_AND_OEP_1_9,
- DB8500_DMA_DEV37_USB_OTG_IEP_AND_OEP_2_10,
- DB8500_DMA_DEV36_USB_OTG_IEP_AND_OEP_3_11,
- DB8500_DMA_DEV19_USB_OTG_IEP_AND_OEP_4_12,
- DB8500_DMA_DEV18_USB_OTG_IEP_AND_OEP_5_13,
- DB8500_DMA_DEV17_USB_OTG_IEP_AND_OEP_6_14,
- DB8500_DMA_DEV16_USB_OTG_IEP_AND_OEP_7_15,
- DB8500_DMA_DEV39_USB_OTG_IEP_AND_OEP_8
-};
-
static const char *db8500_read_soc_id(void)
{
void __iomem *uid = __io_address(U8500_BB_UID_BASE);
@@ -192,60 +135,22 @@ static struct device * __init db8500_soc_device_init(void)
return ux500_soc_device_init(soc_id);
}
-/*
- * This function is called from the board init
- */
-struct device * __init u8500_init_devices(void)
-{
- struct device *parent;
- int i;
-
- parent = db8500_soc_device_init();
-
- db8500_add_rtc(parent);
- db8500_add_gpios(parent);
- db8500_add_usb(parent, usb_db8500_dma_cfg, usb_db8500_dma_cfg);
-
- for (i = 0; i < ARRAY_SIZE(platform_devs); i++)
- platform_devs[i]->dev.parent = parent;
-
- platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
-
- return parent;
-}
-
#ifdef CONFIG_MACH_UX500_DT
static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
/* Requires call-back bindings. */
OF_DEV_AUXDATA("arm,cortex-a9-pmu", 0, "arm-pmu", &db8500_pmu_platdata),
/* Requires DMA bindings. */
- OF_DEV_AUXDATA("arm,pl011", 0x80120000, "uart0", NULL),
- OF_DEV_AUXDATA("arm,pl011", 0x80121000, "uart1", NULL),
- OF_DEV_AUXDATA("arm,pl011", 0x80007000, "uart2", NULL),
- OF_DEV_AUXDATA("arm,pl022", 0x80002000, "ssp0", &ssp0_plat),
- OF_DEV_AUXDATA("arm,pl18x", 0x80126000, "sdi0", NULL),
- OF_DEV_AUXDATA("arm,pl18x", 0x80118000, "sdi1", NULL),
- OF_DEV_AUXDATA("arm,pl18x", 0x80005000, "sdi2", NULL),
- OF_DEV_AUXDATA("arm,pl18x", 0x80114000, "sdi4", NULL),
- /* Requires clock name bindings. */
- OF_DEV_AUXDATA("st,nomadik-gpio", 0x8012e000, "gpio.0", NULL),
- OF_DEV_AUXDATA("st,nomadik-gpio", 0x8012e080, "gpio.1", NULL),
- OF_DEV_AUXDATA("st,nomadik-gpio", 0x8000e000, "gpio.2", NULL),
- OF_DEV_AUXDATA("st,nomadik-gpio", 0x8000e080, "gpio.3", NULL),
- OF_DEV_AUXDATA("st,nomadik-gpio", 0x8000e100, "gpio.4", NULL),
- OF_DEV_AUXDATA("st,nomadik-gpio", 0x8000e180, "gpio.5", NULL),
- OF_DEV_AUXDATA("st,nomadik-gpio", 0x8011e000, "gpio.6", NULL),
- OF_DEV_AUXDATA("st,nomadik-gpio", 0x8011e080, "gpio.7", NULL),
- OF_DEV_AUXDATA("st,nomadik-gpio", 0xa03fe000, "gpio.8", NULL),
- OF_DEV_AUXDATA("st,nomadik-i2c", 0x80004000, "nmk-i2c.0", NULL),
- OF_DEV_AUXDATA("st,nomadik-i2c", 0x80122000, "nmk-i2c.1", NULL),
- OF_DEV_AUXDATA("st,nomadik-i2c", 0x80128000, "nmk-i2c.2", NULL),
- OF_DEV_AUXDATA("st,nomadik-i2c", 0x80110000, "nmk-i2c.3", NULL),
- OF_DEV_AUXDATA("st,nomadik-i2c", 0x8012a000, "nmk-i2c.4", NULL),
- OF_DEV_AUXDATA("stericsson,db8500-musb", 0xa03e0000, "musb-ux500.0", NULL),
+ OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80123000,
+ "ux500-msp-i2s.0", &msp0_platform_data),
+ OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80124000,
+ "ux500-msp-i2s.1", &msp1_platform_data),
+ OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80117000,
+ "ux500-msp-i2s.2", &msp2_platform_data),
+ OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80125000,
+ "ux500-msp-i2s.3", &msp3_platform_data),
+ /* Requires non-DT:able platform data. */
OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu",
&db8500_prcmu_pdata),
- OF_DEV_AUXDATA("smsc,lan9115", 0x50000000, "smsc911x.0", NULL),
OF_DEV_AUXDATA("stericsson,ux500-cryp", 0xa03cb000, "cryp1", NULL),
OF_DEV_AUXDATA("stericsson,ux500-hash", 0xa03c2000, "hash1", NULL),
OF_DEV_AUXDATA("stericsson,snd-soc-mop500", 0, "snd-soc-mop500.0",
@@ -253,17 +158,6 @@ static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
/* Requires device name bindings. */
OF_DEV_AUXDATA("stericsson,db8500-pinctrl", U8500_PRCMU_BASE,
"pinctrl-db8500", NULL),
- /* Requires clock name and DMA bindings. */
- OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80123000,
- "ux500-msp-i2s.0", &msp0_platform_data),
- OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80124000,
- "ux500-msp-i2s.1", &msp1_platform_data),
- OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80117000,
- "ux500-msp-i2s.2", &msp2_platform_data),
- OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80125000,
- "ux500-msp-i2s.3", &msp3_platform_data),
- /* Requires clock name bindings and channel address lookup table. */
- OF_DEV_AUXDATA("stericsson,db8500-dma40", 0x801C0000, "dma40.0", NULL),
{},
};
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
index 5d7eebcabc63..f84d4397896b 100644
--- a/arch/arm/mach-ux500/cpu.c
+++ b/arch/arm/mach-ux500/cpu.c
@@ -78,9 +78,17 @@ void __init ux500_init_irq(void)
if (cpu_is_u8500_family()) {
prcmu_early_init(U8500_PRCMU_BASE, SZ_8K - 1);
ux500_pm_init(U8500_PRCMU_BASE, SZ_8K - 1);
- u8500_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE,
- U8500_CLKRST3_BASE, U8500_CLKRST5_BASE,
- U8500_CLKRST6_BASE);
+
+ if (of_have_populated_dt())
+ u8500_of_clk_init(U8500_CLKRST1_BASE,
+ U8500_CLKRST2_BASE,
+ U8500_CLKRST3_BASE,
+ U8500_CLKRST5_BASE,
+ U8500_CLKRST6_BASE);
+ else
+ u8500_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE,
+ U8500_CLKRST3_BASE, U8500_CLKRST5_BASE,
+ U8500_CLKRST6_BASE);
} else if (cpu_is_u9540()) {
prcmu_early_init(U8500_PRCMU_BASE, SZ_8K - 1);
ux500_pm_init(U8500_PRCMU_BASE, SZ_8K - 1);
@@ -96,11 +104,6 @@ void __init ux500_init_irq(void)
}
}
-void __init ux500_init_late(void)
-{
- mop500_uib_init();
-}
-
static const char * __init ux500_get_machine(void)
{
return kasprintf(GFP_KERNEL, "DB%4x", dbx500_partnumber());
diff --git a/arch/arm/mach-ux500/devices-common.c b/arch/arm/mach-ux500/devices-common.c
deleted file mode 100644
index f71b3d7bd4fb..000000000000
--- a/arch/arm/mach-ux500/devices-common.c
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
- * License terms: GNU General Public License (GPL), version 2.
- */
-
-#include <linux/kernel.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/irq.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/pinctrl-nomadik.h>
-
-#include "irqs.h"
-
-#include "devices-common.h"
-
-static struct platform_device *
-dbx500_add_gpio(struct device *parent, int id, resource_size_t addr, int irq,
- struct nmk_gpio_platform_data *pdata)
-{
- struct resource resources[] = {
- {
- .start = addr,
- .end = addr + 127,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = irq,
- .end = irq,
- .flags = IORESOURCE_IRQ,
- }
- };
-
- return platform_device_register_resndata(
- parent,
- "gpio",
- id,
- resources,
- ARRAY_SIZE(resources),
- pdata,
- sizeof(*pdata));
-}
-
-void dbx500_add_gpios(struct device *parent, resource_size_t *base, int num,
- int irq, struct nmk_gpio_platform_data *pdata)
-{
- int first = 0;
- int i;
-
- for (i = 0; i < num; i++, first += 32, irq++) {
- pdata->first_gpio = first;
- pdata->first_irq = NOMADIK_GPIO_TO_IRQ(first);
- pdata->num_gpio = 32;
-
- dbx500_add_gpio(parent, i, base[i], irq, pdata);
- }
-}
diff --git a/arch/arm/mach-ux500/devices-common.h b/arch/arm/mach-ux500/devices-common.h
deleted file mode 100644
index 96fa4ac89e2e..000000000000
--- a/arch/arm/mach-ux500/devices-common.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
- * License terms: GNU General Public License (GPL), version 2.
- */
-
-#ifndef __DEVICES_COMMON_H
-#define __DEVICES_COMMON_H
-
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/sys_soc.h>
-#include <linux/amba/bus.h>
-#include <linux/platform_data/i2c-nomadik.h>
-#include <linux/platform_data/crypto-ux500.h>
-
-struct spi_master_cntlr;
-
-static inline struct amba_device *
-dbx500_add_msp_spi(struct device *parent, const char *name,
- resource_size_t base, int irq,
- struct spi_master_cntlr *pdata)
-{
- return amba_ahb_device_add(parent, name, base, SZ_4K, irq, 0,
- pdata, 0);
-}
-
-static inline struct amba_device *
-dbx500_add_spi(struct device *parent, const char *name, resource_size_t base,
- int irq, struct spi_master_cntlr *pdata,
- u32 periphid)
-{
- return amba_ahb_device_add(parent, name, base, SZ_4K, irq, 0,
- pdata, periphid);
-}
-
-struct mmci_platform_data;
-
-static inline struct amba_device *
-dbx500_add_sdi(struct device *parent, const char *name, resource_size_t base,
- int irq, struct mmci_platform_data *pdata, u32 periphid)
-{
- return amba_ahb_device_add(parent, name, base, SZ_4K, irq, 0,
- pdata, periphid);
-}
-
-struct amba_pl011_data;
-
-static inline struct amba_device *
-dbx500_add_uart(struct device *parent, const char *name, resource_size_t base,
- int irq, struct amba_pl011_data *pdata)
-{
- return amba_ahb_device_add(parent, name, base, SZ_4K, irq, 0, pdata, 0);
-}
-
-struct nmk_i2c_controller;
-
-static inline struct amba_device *
-dbx500_add_i2c(struct device *parent, int id, resource_size_t base, int irq,
- struct nmk_i2c_controller *data)
-{
- /* Conjure a name similar to what the platform device used to have */
- char name[16];
-
- snprintf(name, sizeof(name), "nmk-i2c.%d", id);
- return amba_apb_device_add(parent, name, base, SZ_4K, irq, 0, data, 0);
-}
-
-static inline struct amba_device *
-dbx500_add_rtc(struct device *parent, resource_size_t base, int irq)
-{
- return amba_apb_device_add(parent, "rtc-pl031", base, SZ_4K, irq,
- 0, NULL, 0);
-}
-
-struct cryp_platform_data;
-
-static inline struct platform_device *
-dbx500_add_cryp1(struct device *parent, int id, resource_size_t base, int irq,
- struct cryp_platform_data *pdata)
-{
- struct resource res[] = {
- DEFINE_RES_MEM(base, SZ_4K),
- DEFINE_RES_IRQ(irq),
- };
-
- struct platform_device_info pdevinfo = {
- .parent = parent,
- .name = "cryp1",
- .id = id,
- .res = res,
- .num_res = ARRAY_SIZE(res),
- .data = pdata,
- .size_data = sizeof(*pdata),
- .dma_mask = DMA_BIT_MASK(32),
- };
-
- return platform_device_register_full(&pdevinfo);
-}
-
-struct hash_platform_data;
-
-static inline struct platform_device *
-dbx500_add_hash1(struct device *parent, int id, resource_size_t base,
- struct hash_platform_data *pdata)
-{
- struct resource res[] = {
- DEFINE_RES_MEM(base, SZ_4K),
- };
-
- struct platform_device_info pdevinfo = {
- .parent = parent,
- .name = "hash1",
- .id = id,
- .res = res,
- .num_res = ARRAY_SIZE(res),
- .data = pdata,
- .size_data = sizeof(*pdata),
- .dma_mask = DMA_BIT_MASK(32),
- };
-
- return platform_device_register_full(&pdevinfo);
-}
-
-struct nmk_gpio_platform_data;
-
-void dbx500_add_gpios(struct device *parent, resource_size_t *base, int num,
- int irq, struct nmk_gpio_platform_data *pdata);
-
-static inline void
-dbx500_add_pinctrl(struct device *parent, const char *name,
- resource_size_t base)
-{
- struct resource res[] = {
- DEFINE_RES_MEM(base, SZ_8K),
- };
- struct platform_device_info pdevinfo = {
- .parent = parent,
- .name = name,
- .id = -1,
- .res = res,
- .num_res = ARRAY_SIZE(res),
- };
-
- platform_device_register_full(&pdevinfo);
-}
-
-#endif
diff --git a/arch/arm/mach-ux500/devices-db8500.c b/arch/arm/mach-ux500/devices-db8500.c
index bc316062e0c2..c59f89d058ff 100644
--- a/arch/arm/mach-ux500/devices-db8500.c
+++ b/arch/arm/mach-ux500/devices-db8500.c
@@ -9,10 +9,8 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/gpio.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl022.h>
-#include <linux/platform_data/dma-ste-dma40.h>
#include <linux/mfd/dbx500-prcmu.h>
#include "setup.h"
@@ -20,62 +18,6 @@
#include "db8500-regs.h"
#include "devices-db8500.h"
-#include "ste-dma40-db8500.h"
-
-static struct resource dma40_resources[] = {
- [0] = {
- .start = U8500_DMA_BASE,
- .end = U8500_DMA_BASE + SZ_4K - 1,
- .flags = IORESOURCE_MEM,
- .name = "base",
- },
- [1] = {
- .start = U8500_DMA_LCPA_BASE,
- .end = U8500_DMA_LCPA_BASE + 2 * SZ_1K - 1,
- .flags = IORESOURCE_MEM,
- .name = "lcpa",
- },
- [2] = {
- .start = IRQ_DB8500_DMA,
- .end = IRQ_DB8500_DMA,
- .flags = IORESOURCE_IRQ,
- }
-};
-
-struct stedma40_platform_data dma40_plat_data = {
- .disabled_channels = {-1},
-};
-
-struct platform_device u8500_dma40_device = {
- .dev = {
- .platform_data = &dma40_plat_data,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
- .name = "dma40",
- .id = 0,
- .num_resources = ARRAY_SIZE(dma40_resources),
- .resource = dma40_resources
-};
-
-struct resource keypad_resources[] = {
- [0] = {
- .start = U8500_SKE_BASE,
- .end = U8500_SKE_BASE + SZ_4K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_DB8500_KB,
- .end = IRQ_DB8500_KB,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct platform_device u8500_ske_keypad_device = {
- .name = "nmk-ske-keypad",
- .id = -1,
- .num_resources = ARRAY_SIZE(keypad_resources),
- .resource = keypad_resources,
-};
struct prcmu_pdata db8500_prcmu_pdata = {
.ab_platdata = &ab8500_platdata,
@@ -84,39 +26,3 @@ struct prcmu_pdata db8500_prcmu_pdata = {
.version_offset = DB8500_PRCMU_FW_VERSION_OFFSET,
.legacy_offset = DB8500_PRCMU_LEGACY_OFFSET,
};
-
-static struct resource db8500_prcmu_res[] = {
- {
- .name = "prcmu",
- .start = U8500_PRCMU_BASE,
- .end = U8500_PRCMU_BASE + SZ_8K - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "prcmu-tcdm",
- .start = U8500_PRCMU_TCDM_BASE,
- .end = U8500_PRCMU_TCDM_BASE + SZ_4K - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "irq",
- .start = IRQ_DB8500_PRCMU1,
- .end = IRQ_DB8500_PRCMU1,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "prcmu-tcpm",
- .start = U8500_PRCMU_TCPM_BASE,
- .end = U8500_PRCMU_TCPM_BASE + SZ_32K - 1,
- .flags = IORESOURCE_MEM,
- },
-};
-
-struct platform_device db8500_prcmu_device = {
- .name = "db8500-prcmu",
- .resource = db8500_prcmu_res,
- .num_resources = ARRAY_SIZE(db8500_prcmu_res),
- .dev = {
- .platform_data = &db8500_prcmu_pdata,
- },
-};
diff --git a/arch/arm/mach-ux500/devices-db8500.h b/arch/arm/mach-ux500/devices-db8500.h
index 321998320f98..b8ffc9979bb2 100644
--- a/arch/arm/mach-ux500/devices-db8500.h
+++ b/arch/arm/mach-ux500/devices-db8500.h
@@ -8,122 +8,12 @@
#ifndef __DEVICES_DB8500_H
#define __DEVICES_DB8500_H
-#include <linux/platform_data/usb-musb-ux500.h>
#include "irqs.h"
#include "db8500-regs.h"
-#include "devices-common.h"
-struct ske_keypad_platform_data;
-struct pl022_ssp_controller;
struct platform_device;
extern struct ab8500_platform_data ab8500_platdata;
extern struct prcmu_pdata db8500_prcmu_pdata;
-extern struct platform_device db8500_prcmu_device;
-static inline struct platform_device *
-db8500_add_ske_keypad(struct device *parent,
- struct ske_keypad_platform_data *pdata,
- size_t size)
-{
- struct resource resources[] = {
- DEFINE_RES_MEM(U8500_SKE_BASE, SZ_4K),
- DEFINE_RES_IRQ(IRQ_DB8500_KB),
- };
-
- return platform_device_register_resndata(parent, "nmk-ske-keypad", -1,
- resources, 2, pdata, size);
-}
-
-static inline struct amba_device *
-db8500_add_ssp(struct device *parent, const char *name, resource_size_t base,
- int irq, struct pl022_ssp_controller *pdata)
-{
- return amba_ahb_device_add(parent, name, base, SZ_4K, irq, 0, pdata, 0);
-}
-
-#define db8500_add_i2c0(parent, pdata) \
- dbx500_add_i2c(parent, 0, U8500_I2C0_BASE, IRQ_DB8500_I2C0, pdata)
-#define db8500_add_i2c1(parent, pdata) \
- dbx500_add_i2c(parent, 1, U8500_I2C1_BASE, IRQ_DB8500_I2C1, pdata)
-#define db8500_add_i2c2(parent, pdata) \
- dbx500_add_i2c(parent, 2, U8500_I2C2_BASE, IRQ_DB8500_I2C2, pdata)
-#define db8500_add_i2c3(parent, pdata) \
- dbx500_add_i2c(parent, 3, U8500_I2C3_BASE, IRQ_DB8500_I2C3, pdata)
-#define db8500_add_i2c4(parent, pdata) \
- dbx500_add_i2c(parent, 4, U8500_I2C4_BASE, IRQ_DB8500_I2C4, pdata)
-
-#define db8500_add_msp0_spi(parent, pdata) \
- dbx500_add_msp_spi(parent, "msp0", U8500_MSP0_BASE, \
- IRQ_DB8500_MSP0, pdata)
-#define db8500_add_msp1_spi(parent, pdata) \
- dbx500_add_msp_spi(parent, "msp1", U8500_MSP1_BASE, \
- IRQ_DB8500_MSP1, pdata)
-#define db8500_add_msp2_spi(parent, pdata) \
- dbx500_add_msp_spi(parent, "msp2", U8500_MSP2_BASE, \
- IRQ_DB8500_MSP2, pdata)
-#define db8500_add_msp3_spi(parent, pdata) \
- dbx500_add_msp_spi(parent, "msp3", U8500_MSP3_BASE, \
- IRQ_DB8500_MSP1, pdata)
-
-#define db8500_add_rtc(parent) \
- dbx500_add_rtc(parent, U8500_RTC_BASE, IRQ_DB8500_RTC);
-
-#define db8500_add_usb(parent, rx_cfg, tx_cfg) \
- ux500_add_usb(parent, U8500_USBOTG_BASE, \
- IRQ_DB8500_USBOTG, rx_cfg, tx_cfg)
-
-#define db8500_add_sdi0(parent, pdata, pid) \
- dbx500_add_sdi(parent, "sdi0", U8500_SDI0_BASE, \
- IRQ_DB8500_SDMMC0, pdata, pid)
-#define db8500_add_sdi1(parent, pdata, pid) \
- dbx500_add_sdi(parent, "sdi1", U8500_SDI1_BASE, \
- IRQ_DB8500_SDMMC1, pdata, pid)
-#define db8500_add_sdi2(parent, pdata, pid) \
- dbx500_add_sdi(parent, "sdi2", U8500_SDI2_BASE, \
- IRQ_DB8500_SDMMC2, pdata, pid)
-#define db8500_add_sdi3(parent, pdata, pid) \
- dbx500_add_sdi(parent, "sdi3", U8500_SDI3_BASE, \
- IRQ_DB8500_SDMMC3, pdata, pid)
-#define db8500_add_sdi4(parent, pdata, pid) \
- dbx500_add_sdi(parent, "sdi4", U8500_SDI4_BASE, \
- IRQ_DB8500_SDMMC4, pdata, pid)
-#define db8500_add_sdi5(parent, pdata, pid) \
- dbx500_add_sdi(parent, "sdi5", U8500_SDI5_BASE, \
- IRQ_DB8500_SDMMC5, pdata, pid)
-
-#define db8500_add_ssp0(parent, pdata) \
- db8500_add_ssp(parent, "ssp0", U8500_SSP0_BASE, \
- IRQ_DB8500_SSP0, pdata)
-#define db8500_add_ssp1(parent, pdata) \
- db8500_add_ssp(parent, "ssp1", U8500_SSP1_BASE, \
- IRQ_DB8500_SSP1, pdata)
-
-#define db8500_add_spi0(parent, pdata) \
- dbx500_add_spi(parent, "spi0", U8500_SPI0_BASE, \
- IRQ_DB8500_SPI0, pdata, 0)
-#define db8500_add_spi1(parent, pdata) \
- dbx500_add_spi(parent, "spi1", U8500_SPI1_BASE, \
- IRQ_DB8500_SPI1, pdata, 0)
-#define db8500_add_spi2(parent, pdata) \
- dbx500_add_spi(parent, "spi2", U8500_SPI2_BASE, \
- IRQ_DB8500_SPI2, pdata, 0)
-#define db8500_add_spi3(parent, pdata) \
- dbx500_add_spi(parent, "spi3", U8500_SPI3_BASE, \
- IRQ_DB8500_SPI3, pdata, 0)
-
-#define db8500_add_uart0(parent, pdata) \
- dbx500_add_uart(parent, "uart0", U8500_UART0_BASE, \
- IRQ_DB8500_UART0, pdata)
-#define db8500_add_uart1(parent, pdata) \
- dbx500_add_uart(parent, "uart1", U8500_UART1_BASE, \
- IRQ_DB8500_UART1, pdata)
-#define db8500_add_uart2(parent, pdata) \
- dbx500_add_uart(parent, "uart2", U8500_UART2_BASE, \
- IRQ_DB8500_UART2, pdata)
-
-#define db8500_add_cryp1(parent, pdata) \
- dbx500_add_cryp1(parent, -1, U8500_CRYP1_BASE, IRQ_DB8500_CRYP1, pdata)
-#define db8500_add_hash1(parent, pdata) \
- dbx500_add_hash1(parent, -1, U8500_HASH1_BASE, pdata)
#endif
diff --git a/arch/arm/mach-ux500/devices.h b/arch/arm/mach-ux500/devices.h
index cbc6f1e4104d..5bca7c605cd6 100644
--- a/arch/arm/mach-ux500/devices.h
+++ b/arch/arm/mach-ux500/devices.h
@@ -10,14 +10,6 @@
struct platform_device;
struct amba_device;
-extern struct platform_device u8500_gpio_devs[];
-
extern struct amba_device ux500_pl031_device;
-extern struct platform_device ux500_hash1_device;
-extern struct platform_device ux500_cryp1_device;
-
-extern struct platform_device u8500_dma40_device;
-extern struct platform_device ux500_ske_keypad_device;
-
#endif
diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
index 656324aad18e..bdb356498a74 100644
--- a/arch/arm/mach-ux500/setup.h
+++ b/arch/arm/mach-ux500/setup.h
@@ -24,7 +24,6 @@ extern void __init u8500_map_io(void);
extern struct device * __init u8500_init_devices(void);
extern void __init ux500_init_irq(void);
-extern void __init ux500_init_late(void);
extern struct device *ux500_soc_device_init(const char *soc_id);
diff --git a/arch/arm/mach-ux500/timer.c b/arch/arm/mach-ux500/timer.c
index b6bd0efcbe64..05a4ff78b3bd 100644
--- a/arch/arm/mach-ux500/timer.c
+++ b/arch/arm/mach-ux500/timer.c
@@ -97,8 +97,8 @@ dt_fail:
* sched_clock with higher rating then MTU since is always-on.
*
*/
-
- nmdk_timer_init(mtu_timer_base, IRQ_MTU0);
+ if (!of_have_populated_dt())
+ nmdk_timer_init(mtu_timer_base, IRQ_MTU0);
clksrc_dbx500_prcmu_init(prcmu_timer_base);
ux500_twd_init();
}
diff --git a/arch/arm/mach-ux500/usb.c b/arch/arm/mach-ux500/usb.c
deleted file mode 100644
index b7bd8d3a5507..000000000000
--- a/arch/arm/mach-ux500/usb.c
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2011
- *
- * Author: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
- * License terms: GNU General Public License (GPL) version 2
- */
-#include <linux/platform_device.h>
-#include <linux/usb/musb.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_data/usb-musb-ux500.h>
-#include <linux/platform_data/dma-ste-dma40.h>
-
-#include "db8500-regs.h"
-
-#define MUSB_DMA40_RX_CH { \
- .mode = STEDMA40_MODE_LOGICAL, \
- .dir = DMA_DEV_TO_MEM, \
- }
-
-#define MUSB_DMA40_TX_CH { \
- .mode = STEDMA40_MODE_LOGICAL, \
- .dir = DMA_MEM_TO_DEV, \
- }
-
-static struct stedma40_chan_cfg musb_dma_rx_ch[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS]
- = {
- MUSB_DMA40_RX_CH,
- MUSB_DMA40_RX_CH,
- MUSB_DMA40_RX_CH,
- MUSB_DMA40_RX_CH,
- MUSB_DMA40_RX_CH,
- MUSB_DMA40_RX_CH,
- MUSB_DMA40_RX_CH,
- MUSB_DMA40_RX_CH
-};
-
-static struct stedma40_chan_cfg musb_dma_tx_ch[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS]
- = {
- MUSB_DMA40_TX_CH,
- MUSB_DMA40_TX_CH,
- MUSB_DMA40_TX_CH,
- MUSB_DMA40_TX_CH,
- MUSB_DMA40_TX_CH,
- MUSB_DMA40_TX_CH,
- MUSB_DMA40_TX_CH,
- MUSB_DMA40_TX_CH,
-};
-
-static void *ux500_dma_rx_param_array[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS] = {
- &musb_dma_rx_ch[0],
- &musb_dma_rx_ch[1],
- &musb_dma_rx_ch[2],
- &musb_dma_rx_ch[3],
- &musb_dma_rx_ch[4],
- &musb_dma_rx_ch[5],
- &musb_dma_rx_ch[6],
- &musb_dma_rx_ch[7]
-};
-
-static void *ux500_dma_tx_param_array[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS] = {
- &musb_dma_tx_ch[0],
- &musb_dma_tx_ch[1],
- &musb_dma_tx_ch[2],
- &musb_dma_tx_ch[3],
- &musb_dma_tx_ch[4],
- &musb_dma_tx_ch[5],
- &musb_dma_tx_ch[6],
- &musb_dma_tx_ch[7]
-};
-
-static struct ux500_musb_board_data musb_board_data = {
- .dma_rx_param_array = ux500_dma_rx_param_array,
- .dma_tx_param_array = ux500_dma_tx_param_array,
- .dma_filter = stedma40_filter,
-};
-
-static struct musb_hdrc_platform_data musb_platform_data = {
- .mode = MUSB_OTG,
- .board_data = &musb_board_data,
-};
-
-static struct resource usb_resources[] = {
- [0] = {
- .name = "usb-mem",
- .flags = IORESOURCE_MEM,
- },
-
- [1] = {
- .name = "mc", /* hard-coded in musb */
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct platform_device ux500_musb_device = {
- .name = "musb-ux500",
- .id = 0,
- .dev = {
- .platform_data = &musb_platform_data,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
- .num_resources = ARRAY_SIZE(usb_resources),
- .resource = usb_resources,
-};
-
-static inline void ux500_usb_dma_update_rx_ch_config(int *dev_type)
-{
- u32 idx;
-
- for (idx = 0; idx < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; idx++)
- musb_dma_rx_ch[idx].dev_type = dev_type[idx];
-}
-
-static inline void ux500_usb_dma_update_tx_ch_config(int *dev_type)
-{
- u32 idx;
-
- for (idx = 0; idx < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; idx++)
- musb_dma_tx_ch[idx].dev_type = dev_type[idx];
-}
-
-void ux500_add_usb(struct device *parent, resource_size_t base, int irq,
- int *dma_rx_cfg, int *dma_tx_cfg)
-{
- ux500_musb_device.resource[0].start = base;
- ux500_musb_device.resource[0].end = base + SZ_64K - 1;
- ux500_musb_device.resource[1].start = irq;
- ux500_musb_device.resource[1].end = irq;
-
- ux500_usb_dma_update_rx_ch_config(dma_rx_cfg);
- ux500_usb_dma_update_tx_ch_config(dma_tx_cfg);
-
- ux500_musb_device.dev.parent = parent;
-
- platform_device_register(&ux500_musb_device);
-}
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index 365795447804..4a70be485ff8 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -1,17 +1,16 @@
config ARCH_VEXPRESS
bool "ARM Ltd. Versatile Express family" if ARCH_MULTI_V7
select ARCH_REQUIRE_GPIOLIB
+ select ARCH_SUPPORTS_BIG_ENDIAN
select ARM_AMBA
select ARM_GIC
select ARM_TIMER_SP804
- select CLKDEV_LOOKUP
select COMMON_CLK
select COMMON_CLK_VERSATILE
select CPU_V7
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
- select HAVE_CLK
select HAVE_PATA_PLATFORM
select HAVE_SMP
select ICST
@@ -66,10 +65,22 @@ config ARCH_VEXPRESS_DCSCB
This is needed to provide CPU and cluster power management
on RTSM implementing big.LITTLE.
+config ARCH_VEXPRESS_SPC
+ bool "Versatile Express Serial Power Controller (SPC)"
+ select ARCH_HAS_CPUFREQ
+ select ARCH_HAS_OPP
+ select PM_OPP
+ help
+ The TC2 (A15x2 A7x3) versatile express core tile integrates a logic
+ block called Serial Power Controller (SPC) that provides the interface
+ between the dual cluster test-chip and the M3 microcontroller that
+ carries out power management.
+
config ARCH_VEXPRESS_TC2_PM
bool "Versatile Express TC2 power management"
depends on MCPM
select ARM_CCI
+ select ARCH_VEXPRESS_SPC
help
Support for CPU and cluster power management on Versatile Express
with a TC2 (A15x2 A7x3) big.LITTLE core tile.
diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile
index 505e64ab3eae..0997e0b7494c 100644
--- a/arch/arm/mach-vexpress/Makefile
+++ b/arch/arm/mach-vexpress/Makefile
@@ -8,7 +8,8 @@ obj-y := v2m.o
obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o
obj-$(CONFIG_ARCH_VEXPRESS_DCSCB) += dcscb.o dcscb_setup.o
CFLAGS_dcscb.o += -march=armv7-a
-obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += tc2_pm.o spc.o
+obj-$(CONFIG_ARCH_VEXPRESS_SPC) += spc.o
+obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += tc2_pm.o
CFLAGS_tc2_pm.o += -march=armv7-a
obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c
index 3a6384c6c435..14d499688736 100644
--- a/arch/arm/mach-vexpress/dcscb.c
+++ b/arch/arm/mach-vexpress/dcscb.c
@@ -133,38 +133,8 @@ static void dcscb_power_down(void)
if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
arch_spin_unlock(&dcscb_lock);
- /*
- * Flush all cache levels for this cluster.
- *
- * To do so we do:
- * - Clear the SCTLR.C bit to prevent further cache allocations
- * - Flush the whole cache
- * - Clear the ACTLR "SMP" bit to disable local coherency
- *
- * Let's do it in the safest possible way i.e. with
- * no memory access within the following sequence
- * including to the stack.
- *
- * Note: fp is preserved to the stack explicitly prior doing
- * this since adding it to the clobber list is incompatible
- * with having CONFIG_FRAME_POINTER=y.
- */
- asm volatile(
- "str fp, [sp, #-4]! \n\t"
- "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
- "bic r0, r0, #"__stringify(CR_C)" \n\t"
- "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
- "isb \n\t"
- "bl v7_flush_dcache_all \n\t"
- "clrex \n\t"
- "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
- "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
- "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
- "isb \n\t"
- "dsb \n\t"
- "ldr fp, [sp], #4"
- : : : "r0","r1","r2","r3","r4","r5","r6","r7",
- "r9","r10","lr","memory");
+ /* Flush all cache levels for this cluster. */
+ v7_exit_coherency_flush(all);
/*
* This is a harmless no-op. On platforms with a real
@@ -183,26 +153,8 @@ static void dcscb_power_down(void)
} else {
arch_spin_unlock(&dcscb_lock);
- /*
- * Flush the local CPU cache.
- * Let's do it in the safest possible way as above.
- */
- asm volatile(
- "str fp, [sp, #-4]! \n\t"
- "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
- "bic r0, r0, #"__stringify(CR_C)" \n\t"
- "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
- "isb \n\t"
- "bl v7_flush_dcache_louis \n\t"
- "clrex \n\t"
- "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
- "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
- "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
- "isb \n\t"
- "dsb \n\t"
- "ldr fp, [sp], #4"
- : : : "r0","r1","r2","r3","r4","r5","r6","r7",
- "r9","r10","lr","memory");
+ /* Disable and flush the local CPU cache. */
+ v7_exit_coherency_flush(louis);
}
__mcpm_cpu_down(cpu, cluster);
diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
index eefb029197ca..033d34dcbd3f 100644
--- a/arch/arm/mach-vexpress/spc.c
+++ b/arch/arm/mach-vexpress/spc.c
@@ -17,14 +17,31 @@
* GNU General Public License for more details.
*/
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
#include <linux/slab.h>
+#include <linux/semaphore.h>
#include <asm/cacheflush.h>
#define SPCLOG "vexpress-spc: "
+#define PERF_LVL_A15 0x00
+#define PERF_REQ_A15 0x04
+#define PERF_LVL_A7 0x08
+#define PERF_REQ_A7 0x0c
+#define COMMS 0x10
+#define COMMS_REQ 0x14
+#define PWC_STATUS 0x18
+#define PWC_FLAG 0x1c
+
/* SPC wake-up IRQs status and mask */
#define WAKE_INT_MASK 0x24
#define WAKE_INT_RAW 0x28
@@ -36,12 +53,45 @@
#define A15_BX_ADDR0 0x68
#define A7_BX_ADDR0 0x78
+/* SPC system config interface registers */
+#define SYSCFG_WDATA 0x70
+#define SYSCFG_RDATA 0x74
+
+/* A15/A7 OPP virtual register base */
+#define A15_PERFVAL_BASE 0xC10
+#define A7_PERFVAL_BASE 0xC30
+
+/* Config interface control bits */
+#define SYSCFG_START (1 << 31)
+#define SYSCFG_SCC (6 << 20)
+#define SYSCFG_STAT (14 << 20)
+
/* wake-up interrupt masks */
#define GBL_WAKEUP_INT_MSK (0x3 << 10)
/* TC2 static dual-cluster configuration */
#define MAX_CLUSTERS 2
+/*
+ * Even though the SPC takes max 3-5 ms to complete any OPP/COMMS
+ * operation, the operation could start just before jiffie is about
+ * to be incremented. So setting timeout value of 20ms = 2jiffies@100Hz
+ */
+#define TIMEOUT_US 20000
+
+#define MAX_OPPS 8
+#define CA15_DVFS 0
+#define CA7_DVFS 1
+#define SPC_SYS_CFG 2
+#define STAT_COMPLETE(type) ((1 << 0) << (type << 2))
+#define STAT_ERR(type) ((1 << 1) << (type << 2))
+#define RESPONSE_MASK(type) (STAT_COMPLETE(type) | STAT_ERR(type))
+
+struct ve_spc_opp {
+ unsigned long freq;
+ unsigned long u_volt;
+};
+
struct ve_spc_drvdata {
void __iomem *baseaddr;
/*
@@ -49,6 +99,12 @@ struct ve_spc_drvdata {
* It corresponds to A15 processors MPIDR[15:8] bitfield
*/
u32 a15_clusid;
+ uint32_t cur_rsp_mask;
+ uint32_t cur_rsp_stat;
+ struct semaphore sem;
+ struct completion done;
+ struct ve_spc_opp *opps[MAX_CLUSTERS];
+ int num_opps[MAX_CLUSTERS];
};
static struct ve_spc_drvdata *info;
@@ -157,8 +213,197 @@ void ve_spc_powerdown(u32 cluster, bool enable)
writel_relaxed(enable, info->baseaddr + pwdrn_reg);
}
-int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid)
+static int ve_spc_get_performance(int cluster, u32 *freq)
+{
+ struct ve_spc_opp *opps = info->opps[cluster];
+ u32 perf_cfg_reg = 0;
+ u32 perf;
+
+ perf_cfg_reg = cluster_is_a15(cluster) ? PERF_LVL_A15 : PERF_LVL_A7;
+
+ perf = readl_relaxed(info->baseaddr + perf_cfg_reg);
+ if (perf >= info->num_opps[cluster])
+ return -EINVAL;
+
+ opps += perf;
+ *freq = opps->freq;
+
+ return 0;
+}
+
+/* find closest match to given frequency in OPP table */
+static int ve_spc_round_performance(int cluster, u32 freq)
+{
+ int idx, max_opp = info->num_opps[cluster];
+ struct ve_spc_opp *opps = info->opps[cluster];
+ u32 fmin = 0, fmax = ~0, ftmp;
+
+ freq /= 1000; /* OPP entries in kHz */
+ for (idx = 0; idx < max_opp; idx++, opps++) {
+ ftmp = opps->freq;
+ if (ftmp >= freq) {
+ if (ftmp <= fmax)
+ fmax = ftmp;
+ } else {
+ if (ftmp >= fmin)
+ fmin = ftmp;
+ }
+ }
+ if (fmax != ~0)
+ return fmax * 1000;
+ else
+ return fmin * 1000;
+}
+
+static int ve_spc_find_performance_index(int cluster, u32 freq)
+{
+ int idx, max_opp = info->num_opps[cluster];
+ struct ve_spc_opp *opps = info->opps[cluster];
+
+ for (idx = 0; idx < max_opp; idx++, opps++)
+ if (opps->freq == freq)
+ break;
+ return (idx == max_opp) ? -EINVAL : idx;
+}
+
+static int ve_spc_waitforcompletion(int req_type)
+{
+ int ret = wait_for_completion_interruptible_timeout(
+ &info->done, usecs_to_jiffies(TIMEOUT_US));
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ else if (ret > 0)
+ ret = info->cur_rsp_stat & STAT_COMPLETE(req_type) ? 0 : -EIO;
+ return ret;
+}
+
+static int ve_spc_set_performance(int cluster, u32 freq)
+{
+ u32 perf_cfg_reg, perf_stat_reg;
+ int ret, perf, req_type;
+
+ if (cluster_is_a15(cluster)) {
+ req_type = CA15_DVFS;
+ perf_cfg_reg = PERF_LVL_A15;
+ perf_stat_reg = PERF_REQ_A15;
+ } else {
+ req_type = CA7_DVFS;
+ perf_cfg_reg = PERF_LVL_A7;
+ perf_stat_reg = PERF_REQ_A7;
+ }
+
+ perf = ve_spc_find_performance_index(cluster, freq);
+
+ if (perf < 0)
+ return perf;
+
+ if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US)))
+ return -ETIME;
+
+ init_completion(&info->done);
+ info->cur_rsp_mask = RESPONSE_MASK(req_type);
+
+ writel(perf, info->baseaddr + perf_cfg_reg);
+ ret = ve_spc_waitforcompletion(req_type);
+
+ info->cur_rsp_mask = 0;
+ up(&info->sem);
+
+ return ret;
+}
+
+static int ve_spc_read_sys_cfg(int func, int offset, uint32_t *data)
+{
+ int ret;
+
+ if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US)))
+ return -ETIME;
+
+ init_completion(&info->done);
+ info->cur_rsp_mask = RESPONSE_MASK(SPC_SYS_CFG);
+
+ /* Set the control value */
+ writel(SYSCFG_START | func | offset >> 2, info->baseaddr + COMMS);
+ ret = ve_spc_waitforcompletion(SPC_SYS_CFG);
+
+ if (ret == 0)
+ *data = readl(info->baseaddr + SYSCFG_RDATA);
+
+ info->cur_rsp_mask = 0;
+ up(&info->sem);
+
+ return ret;
+}
+
+static irqreturn_t ve_spc_irq_handler(int irq, void *data)
+{
+ struct ve_spc_drvdata *drv_data = data;
+ uint32_t status = readl_relaxed(drv_data->baseaddr + PWC_STATUS);
+
+ if (info->cur_rsp_mask & status) {
+ info->cur_rsp_stat = status;
+ complete(&drv_data->done);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * +--------------------------+
+ * | 31 20 | 19 0 |
+ * +--------------------------+
+ * | u_volt | freq(kHz) |
+ * +--------------------------+
+ */
+#define MULT_FACTOR 20
+#define VOLT_SHIFT 20
+#define FREQ_MASK (0xFFFFF)
+static int ve_spc_populate_opps(uint32_t cluster)
{
+ uint32_t data = 0, off, ret, idx;
+ struct ve_spc_opp *opps;
+
+ opps = kzalloc(sizeof(*opps) * MAX_OPPS, GFP_KERNEL);
+ if (!opps)
+ return -ENOMEM;
+
+ info->opps[cluster] = opps;
+
+ off = cluster_is_a15(cluster) ? A15_PERFVAL_BASE : A7_PERFVAL_BASE;
+ for (idx = 0; idx < MAX_OPPS; idx++, off += 4, opps++) {
+ ret = ve_spc_read_sys_cfg(SYSCFG_SCC, off, &data);
+ if (!ret) {
+ opps->freq = (data & FREQ_MASK) * MULT_FACTOR;
+ opps->u_volt = data >> VOLT_SHIFT;
+ } else {
+ break;
+ }
+ }
+ info->num_opps[cluster] = idx;
+
+ return ret;
+}
+
+static int ve_init_opp_table(struct device *cpu_dev)
+{
+ int cluster = topology_physical_package_id(cpu_dev->id);
+ int idx, ret = 0, max_opp = info->num_opps[cluster];
+ struct ve_spc_opp *opps = info->opps[cluster];
+
+ for (idx = 0; idx < max_opp; idx++, opps++) {
+ ret = dev_pm_opp_add(cpu_dev, opps->freq * 1000, opps->u_volt);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to add opp %lu %lu\n",
+ opps->freq, opps->u_volt);
+ return ret;
+ }
+ }
+ return ret;
+}
+
+int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid, int irq)
+{
+ int ret;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
pr_err(SPCLOG "unable to allocate mem\n");
@@ -168,6 +413,25 @@ int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid)
info->baseaddr = baseaddr;
info->a15_clusid = a15_clusid;
+ if (irq <= 0) {
+ pr_err(SPCLOG "Invalid IRQ %d\n", irq);
+ kfree(info);
+ return -EINVAL;
+ }
+
+ init_completion(&info->done);
+
+ readl_relaxed(info->baseaddr + PWC_STATUS);
+
+ ret = request_irq(irq, ve_spc_irq_handler, IRQF_TRIGGER_HIGH
+ | IRQF_ONESHOT, "vexpress-spc", info);
+ if (ret) {
+ pr_err(SPCLOG "IRQ %d request failed\n", irq);
+ kfree(info);
+ return -ENODEV;
+ }
+
+ sema_init(&info->sem, 1);
/*
* Multi-cluster systems may need this data when non-coherent, during
* cluster power-up/power-down. Make sure driver info reaches main
@@ -178,3 +442,103 @@ int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid)
return 0;
}
+
+struct clk_spc {
+ struct clk_hw hw;
+ int cluster;
+};
+
+#define to_clk_spc(spc) container_of(spc, struct clk_spc, hw)
+static unsigned long spc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_spc *spc = to_clk_spc(hw);
+ u32 freq;
+
+ if (ve_spc_get_performance(spc->cluster, &freq))
+ return -EIO;
+
+ return freq * 1000;
+}
+
+static long spc_round_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long *parent_rate)
+{
+ struct clk_spc *spc = to_clk_spc(hw);
+
+ return ve_spc_round_performance(spc->cluster, drate);
+}
+
+static int spc_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_spc *spc = to_clk_spc(hw);
+
+ return ve_spc_set_performance(spc->cluster, rate / 1000);
+}
+
+static struct clk_ops clk_spc_ops = {
+ .recalc_rate = spc_recalc_rate,
+ .round_rate = spc_round_rate,
+ .set_rate = spc_set_rate,
+};
+
+static struct clk *ve_spc_clk_register(struct device *cpu_dev)
+{
+ struct clk_init_data init;
+ struct clk_spc *spc;
+
+ spc = kzalloc(sizeof(*spc), GFP_KERNEL);
+ if (!spc) {
+ pr_err("could not allocate spc clk\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spc->hw.init = &init;
+ spc->cluster = topology_physical_package_id(cpu_dev->id);
+
+ init.name = dev_name(cpu_dev);
+ init.ops = &clk_spc_ops;
+ init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
+ init.num_parents = 0;
+
+ return devm_clk_register(cpu_dev, &spc->hw);
+}
+
+static int __init ve_spc_clk_init(void)
+{
+ int cpu;
+ struct clk *clk;
+
+ if (!info)
+ return 0; /* Continue only if SPC is initialised */
+
+ if (ve_spc_populate_opps(0) || ve_spc_populate_opps(1)) {
+ pr_err("failed to build OPP table\n");
+ return -ENODEV;
+ }
+
+ for_each_possible_cpu(cpu) {
+ struct device *cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_warn("failed to get cpu%d device\n", cpu);
+ continue;
+ }
+ clk = ve_spc_clk_register(cpu_dev);
+ if (IS_ERR(clk)) {
+ pr_warn("failed to register cpu%d clock\n", cpu);
+ continue;
+ }
+ if (clk_register_clkdev(clk, NULL, dev_name(cpu_dev))) {
+ pr_warn("failed to register cpu%d clock lookup\n", cpu);
+ continue;
+ }
+
+ if (ve_init_opp_table(cpu_dev))
+ pr_warn("failed to initialise cpu%d opp table\n", cpu);
+ }
+
+ platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0);
+ return 0;
+}
+module_init(ve_spc_clk_init);
diff --git a/arch/arm/mach-vexpress/spc.h b/arch/arm/mach-vexpress/spc.h
index 5f7e4a446a17..dbd44c3720f9 100644
--- a/arch/arm/mach-vexpress/spc.h
+++ b/arch/arm/mach-vexpress/spc.h
@@ -15,7 +15,7 @@
#ifndef __SPC_H_
#define __SPC_H_
-int __init ve_spc_init(void __iomem *base, u32 a15_clusid);
+int __init ve_spc_init(void __iomem *base, u32 a15_clusid, int irq);
void ve_spc_global_wakeup_irq(bool set);
void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set);
void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr);
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index e6eb48192912..05a364c5077a 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/irqchip/arm-gic.h>
@@ -156,32 +157,7 @@ static void tc2_pm_down(u64 residency)
: : "r" (0x400) );
}
- /*
- * We need to disable and flush the whole (L1 and L2) cache.
- * Let's do it in the safest possible way i.e. with
- * no memory access within the following sequence
- * including the stack.
- *
- * Note: fp is preserved to the stack explicitly prior doing
- * this since adding it to the clobber list is incompatible
- * with having CONFIG_FRAME_POINTER=y.
- */
- asm volatile(
- "str fp, [sp, #-4]! \n\t"
- "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
- "bic r0, r0, #"__stringify(CR_C)" \n\t"
- "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
- "isb \n\t"
- "bl v7_flush_dcache_all \n\t"
- "clrex \n\t"
- "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
- "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
- "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
- "isb \n\t"
- "dsb \n\t"
- "ldr fp, [sp], #4"
- : : : "r0","r1","r2","r3","r4","r5","r6","r7",
- "r9","r10","lr","memory");
+ v7_exit_coherency_flush(all);
cci_disable_port_by_cpu(mpidr);
@@ -197,26 +173,7 @@ static void tc2_pm_down(u64 residency)
arch_spin_unlock(&tc2_pm_lock);
- /*
- * We need to disable and flush only the L1 cache.
- * Let's do it in the safest possible way as above.
- */
- asm volatile(
- "str fp, [sp, #-4]! \n\t"
- "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
- "bic r0, r0, #"__stringify(CR_C)" \n\t"
- "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
- "isb \n\t"
- "bl v7_flush_dcache_louis \n\t"
- "clrex \n\t"
- "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
- "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
- "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
- "isb \n\t"
- "dsb \n\t"
- "ldr fp, [sp], #4"
- : : : "r0","r1","r2","r3","r4","r5","r6","r7",
- "r9","r10","lr","memory");
+ v7_exit_coherency_flush(louis);
}
__mcpm_cpu_down(cpu, cluster);
@@ -311,7 +268,7 @@ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
static int __init tc2_pm_init(void)
{
- int ret;
+ int ret, irq;
void __iomem *scc;
u32 a15_cluster_id, a7_cluster_id, sys_info;
struct device_node *np;
@@ -336,13 +293,15 @@ static int __init tc2_pm_init(void)
tc2_nr_cpus[a15_cluster_id] = (sys_info >> 16) & 0xf;
tc2_nr_cpus[a7_cluster_id] = (sys_info >> 20) & 0xf;
+ irq = irq_of_parse_and_map(np, 0);
+
/*
* A subset of the SCC registers is also used to communicate
* with the SPC (power controller). We need to be able to
* drive it very early in the boot process to power up
* processors, so we initialize the SPC driver here.
*/
- ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id);
+ ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id, irq);
if (ret)
return ret;
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 95a469e23e37..4f8b8cb17ff5 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -1,12 +1,10 @@
/*
* Versatile Express V2M Motherboard Support
*/
-#include <linux/clocksource.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/amba/mmci.h>
#include <linux/io.h>
-#include <linux/clocksource.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/of_address.h>
@@ -22,7 +20,6 @@
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <linux/vexpress.h>
-#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <asm/mach-types.h>
@@ -422,16 +419,8 @@ void __init v2m_dt_init_early(void)
pr_warning("vexpress: DT HBI (%x) is not matching "
"hardware (%x)!\n", dt_hbi, hbi);
}
-}
-
-static void __init v2m_dt_timer_init(void)
-{
- of_clk_init(NULL);
- clocksource_of_init();
-
- versatile_sched_clock_init(vexpress_get_24mhz_clock_base(),
- 24000000);
+ versatile_sched_clock_init(vexpress_get_24mhz_clock_base(), 24000000);
}
static const struct of_device_id v2m_dt_bus_match[] __initconst = {
@@ -458,6 +447,5 @@ DT_MACHINE_START(VEXPRESS_DT, "ARM-Versatile Express")
.smp_init = smp_init_ops(vexpress_smp_init_ops),
.map_io = v2m_dt_map_io,
.init_early = v2m_dt_init_early,
- .init_time = v2m_dt_timer_init,
.init_machine = v2m_dt_init,
MACHINE_END
diff --git a/arch/arm/mach-vt8500/Kconfig b/arch/arm/mach-vt8500/Kconfig
index 9b252934b206..927be93b692e 100644
--- a/arch/arm/mach-vt8500/Kconfig
+++ b/arch/arm/mach-vt8500/Kconfig
@@ -5,7 +5,6 @@ config ARCH_VT8500
select CLKDEV_LOOKUP
select CLKSRC_OF
select GENERIC_CLOCKEVENTS
- select HAVE_CLK
select VT8500_TIMER
select PINCTRL
help
diff --git a/arch/arm/mach-vt8500/common.h b/arch/arm/mach-vt8500/common.h
deleted file mode 100644
index 087787af62f1..000000000000
--- a/arch/arm/mach-vt8500/common.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* linux/arch/arm/mach-vt8500/dt_common.h
- *
- * Copyright (C) 2012 Tony Prisk <linux@prisktech.co.nz>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __ARCH_ARM_MACH_VT8500_DT_COMMON_H
-#define __ARCH_ARM_MACH_VT8500_DT_COMMON_H
-
-#include <linux/of.h>
-
-/* defined in drivers/clk/clk-vt8500.c */
-void __init vtwm_clk_init(void __iomem *pmc_base);
-
-#endif
diff --git a/arch/arm/mach-vt8500/vt8500.c b/arch/arm/mach-vt8500/vt8500.c
index eefaa60d6614..4a73464cb11b 100644
--- a/arch/arm/mach-vt8500/vt8500.c
+++ b/arch/arm/mach-vt8500/vt8500.c
@@ -18,7 +18,6 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/clocksource.h>
#include <linux/io.h>
#include <linux/pm.h>
#include <linux/reboot.h>
@@ -33,8 +32,6 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include "common.h"
-
#define LEGACY_GPIO_BASE 0xD8110000
#define LEGACY_PMC_BASE 0xD8130000
@@ -162,8 +159,6 @@ void __init vt8500_init(void)
else
pr_err("%s: PMC Hibernation register could not be remapped, not enabling power off!\n", __func__);
- vtwm_clk_init(pmc_base);
-
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
@@ -180,7 +175,6 @@ DT_MACHINE_START(WMT_DT, "VIA/Wondermedia SoC (Device Tree Support)")
.dt_compat = vt8500_dt_compat,
.map_io = vt8500_map_io,
.init_machine = vt8500_init,
- .init_time = clocksource_of_init,
.restart = vt8500_restart,
MACHINE_END
diff --git a/arch/arm/mach-w90x900/include/mach/timex.h b/arch/arm/mach-w90x900/include/mach/timex.h
deleted file mode 100644
index 164dce0b64db..000000000000
--- a/arch/arm/mach-w90x900/include/mach/timex.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * arch/arm/mach-w90x900/include/mach/timex.h
- *
- * Copyright (c) 2008 Nuvoton technology corporation
- * All rights reserved.
- *
- * Wan ZongShun <mcuos.com@gmail.com>
- *
- * Based on arch/arm/mach-s3c2410/include/mach/timex.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-
-#ifndef __ASM_ARCH_TIMEX_H
-#define __ASM_ARCH_TIMEX_H
-
-/* CLOCK_TICK_RATE Now, I don't use it. */
-
-#define CLOCK_TICK_RATE 15000000
-
-#endif /* __ASM_ARCH_TIMEX_H */
diff --git a/arch/arm/mach-zynq/Kconfig b/arch/arm/mach-zynq/Kconfig
index 04f8a4a6e755..6b04260aa142 100644
--- a/arch/arm/mach-zynq/Kconfig
+++ b/arch/arm/mach-zynq/Kconfig
@@ -13,5 +13,6 @@ config ARCH_ZYNQ
select HAVE_SMP
select SPARSE_IRQ
select CADENCE_TTC_TIMER
+ select ARM_GLOBAL_TIMER
help
Support for Xilinx Zynq ARM Cortex A9 Platform
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 5f252569c689..9a7bd137c8fd 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -44,6 +44,10 @@ static struct of_device_id zynq_of_bus_ids[] __initdata = {
{}
};
+static struct platform_device zynq_cpuidle_device = {
+ .name = "cpuidle-zynq",
+};
+
/**
* zynq_init_machine - System specific initialization, intended to be
* called from board specific initialization.
@@ -56,6 +60,8 @@ static void __init zynq_init_machine(void)
l2x0_of_init(0x02060000, 0xF0F0FFFF);
of_platform_bus_probe(NULL, zynq_of_bus_ids, NULL);
+
+ platform_device_register(&zynq_cpuidle_device);
}
static void __init zynq_timer_init(void)
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index cd2c88e7a8f7..1f8fed94c2a4 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -952,3 +952,9 @@ config ARCH_HAS_BARRIERS
help
This option allows the use of custom mandatory barriers
included via the mach/barriers.h file.
+
+config ARCH_SUPPORTS_BIG_ENDIAN
+ bool
+ help
+ This option specifies the architecture can support big endian
+ operation.
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 80741992a9fc..3815a8262af0 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -38,9 +38,8 @@ ENTRY(v6_early_abort)
bne do_DataAbort
bic r1, r1, #1 << 11 @ clear bit 11 of FSR
ldr r3, [r4] @ read aborted ARM instruction
-#ifdef CONFIG_CPU_ENDIAN_BE8
- rev r3, r3
-#endif
+ ARM_BE8(rev r3, r3)
+
do_ldrd_abort tmp=ip, insn=r3
tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes.
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 6f4585b89078..924036473b16 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -25,6 +25,7 @@
#include <asm/cp15.h>
#include <asm/system_info.h>
#include <asm/unaligned.h>
+#include <asm/opcodes.h>
#include "fault.h"
@@ -762,21 +763,25 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (thumb_mode(regs)) {
u16 *ptr = (u16 *)(instrptr & ~1);
fault = probe_kernel_address(ptr, tinstr);
+ tinstr = __mem_to_opcode_thumb16(tinstr);
if (!fault) {
if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
IS_T32(tinstr)) {
/* Thumb-2 32-bit */
u16 tinst2 = 0;
fault = probe_kernel_address(ptr + 1, tinst2);
- instr = (tinstr << 16) | tinst2;
+ tinst2 = __mem_to_opcode_thumb16(tinst2);
+ instr = __opcode_thumb32_compose(tinstr, tinst2);
thumb2_32b = 1;
} else {
isize = 2;
instr = thumb2arm(tinstr);
}
}
- } else
+ } else {
fault = probe_kernel_address(instrptr, instr);
+ instr = __mem_to_opcode_arm(instr);
+ }
if (fault) {
type = TYPE_FAULT;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1272ed202dde..79f8b39801a8 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -159,7 +159,7 @@ EXPORT_SYMBOL(arm_coherent_dma_ops);
static u64 get_coherent_dma_mask(struct device *dev)
{
- u64 mask = (u64)arm_dma_limit;
+ u64 mask = (u64)DMA_BIT_MASK(32);
if (dev) {
mask = dev->coherent_dma_mask;
@@ -173,10 +173,30 @@ static u64 get_coherent_dma_mask(struct device *dev)
return 0;
}
- if ((~mask) & (u64)arm_dma_limit) {
- dev_warn(dev, "coherent DMA mask %#llx is smaller "
- "than system GFP_DMA mask %#llx\n",
- mask, (u64)arm_dma_limit);
+ /*
+ * If the mask allows for more memory than we can address,
+ * and we actually have that much memory, then fail the
+ * allocation.
+ */
+ if (sizeof(mask) != sizeof(dma_addr_t) &&
+ mask > (dma_addr_t)~0 &&
+ dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) {
+ dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
+ mask);
+ dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
+ return 0;
+ }
+
+ /*
+ * Now check that the mask, when translated to a PFN,
+ * fits within the allowable addresses which we can
+ * allocate.
+ */
+ if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) {
+ dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
+ mask,
+ dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
+ arm_dma_pfn_limit + 1);
return 0;
}
}
@@ -687,7 +707,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp, struct dma_attrs *attrs)
{
- pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory))
@@ -700,7 +720,7 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
- pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory))
@@ -1007,8 +1027,27 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
*/
int dma_supported(struct device *dev, u64 mask)
{
- if (mask < (u64)arm_dma_limit)
+ unsigned long limit;
+
+ /*
+ * If the mask allows for more memory than we can address,
+ * and we actually have that much memory, then we must
+ * indicate that DMA to this device is not supported.
+ */
+ if (sizeof(mask) != sizeof(dma_addr_t) &&
+ mask > (dma_addr_t)~0 &&
+ dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
return 0;
+
+ /*
+ * Translate the device's DMA mask to a PFN limit. This
+ * PFN number includes the page which we can DMA to.
+ */
+ limit = dma_to_pfn(dev, mask);
+
+ if (limit < arm_dma_pfn_limit)
+ return 0;
+
return 1;
}
EXPORT_SYMBOL(dma_supported);
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 83cb3ac27095..8e0e52eb76b5 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -10,6 +10,7 @@
#include <asm/system_info.h>
pgd_t *idmap_pgd;
+phys_addr_t (*arch_virt_to_idmap) (unsigned long x);
#ifdef CONFIG_ARM_LPAE
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
@@ -67,8 +68,9 @@ static void identity_mapping_add(pgd_t *pgd, const char *text_start,
unsigned long addr, end;
unsigned long next;
- addr = virt_to_phys(text_start);
- end = virt_to_phys(text_end);
+ addr = virt_to_idmap(text_start);
+ end = virt_to_idmap(text_end);
+ pr_info("Setting up static identity map for 0x%lx - 0x%lx\n", addr, end);
prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
@@ -90,8 +92,6 @@ static int __init init_static_idmap(void)
if (!idmap_pgd)
return -ENOMEM;
- pr_info("Setting up static identity map for 0x%p - 0x%p\n",
- __idmap_text_start, __idmap_text_end);
identity_mapping_add(idmap_pgd, __idmap_text_start,
__idmap_text_end, 0);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 18ec4c504abf..3e8f106ee5fe 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -76,14 +76,6 @@ static int __init parse_tag_initrd2(const struct tag *tag)
__tagtable(ATAG_INITRD2, parse_tag_initrd2);
-#ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
-{
- phys_initrd_start = start;
- phys_initrd_size = end - start;
-}
-#endif /* CONFIG_OF_FLATTREE */
-
/*
* This keeps memory configuration data used by a couple memory
* initialization functions, as well as show_mem() for the skipping
@@ -217,6 +209,7 @@ EXPORT_SYMBOL(arm_dma_zone_size);
* so a successful GFP_DMA allocation will always satisfy this.
*/
phys_addr_t arm_dma_limit;
+unsigned long arm_dma_pfn_limit;
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
unsigned long dma_size)
@@ -239,6 +232,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
} else
arm_dma_limit = 0xffffffff;
+ arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
#endif
}
@@ -350,6 +344,11 @@ void __init arm_memblock_init(struct meminfo *mi,
memblock_reserve(__pa(_stext), _end - _stext);
#endif
#ifdef CONFIG_BLK_DEV_INITRD
+ /* FDT scan will populate initrd_start */
+ if (initrd_start) {
+ phys_initrd_start = __virt_to_phys(initrd_start);
+ phys_initrd_size = initrd_end - initrd_start;
+ }
if (phys_initrd_size &&
!memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
@@ -421,12 +420,10 @@ void __init bootmem_init(void)
* This doesn't seem to be used by the Linux memory manager any
* more, but is used by ll_rw_block. If we can get rid of it, we
* also get rid of some of the stuff above as well.
- *
- * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
- * the system, not the maximum PFN.
*/
- max_low_pfn = max_low - PHYS_PFN_OFFSET;
- max_pfn = max_high - PHYS_PFN_OFFSET;
+ min_low_pfn = min;
+ max_low_pfn = max_low;
+ max_pfn = max_high;
}
/*
@@ -532,7 +529,7 @@ static inline void free_area_high(unsigned long pfn, unsigned long end)
static void __init free_highpages(void)
{
#ifdef CONFIG_HIGHMEM
- unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
+ unsigned long max_low = max_low_pfn;
struct memblock_region *mem, *res;
/* set highmem page free */
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d5a4e9ad8f0f..d5a982d15a88 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -81,8 +81,10 @@ extern __init void add_static_vm_early(struct static_vm *svm);
#ifdef CONFIG_ZONE_DMA
extern phys_addr_t arm_dma_limit;
+extern unsigned long arm_dma_pfn_limit;
#else
#define arm_dma_limit ((phys_addr_t)~0)
+#define arm_dma_pfn_limit (~0ul >> PAGE_SHIFT)
#endif
extern phys_addr_t arm_lowmem_limit;
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 0c6356255fe3..d27158c38eb0 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -202,13 +202,11 @@ int valid_phys_addr_range(phys_addr_t addr, size_t size)
}
/*
- * We don't use supersection mappings for mmap() on /dev/mem, which
- * means that we can't map the memory area above the 4G barrier into
- * userspace.
+ * Do not allow /dev/mem mappings beyond the supported physical range.
*/
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
{
- return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
+ return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
}
#ifdef CONFIG_STRICT_DEVMEM
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index b1d17eeb59b8..78eeeca78f5a 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -28,6 +28,8 @@
#include <asm/highmem.h>
#include <asm/system_info.h>
#include <asm/traps.h>
+#include <asm/procinfo.h>
+#include <asm/memory.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -1315,6 +1317,86 @@ static void __init map_lowmem(void)
}
}
+#ifdef CONFIG_ARM_LPAE
+/*
+ * early_paging_init() recreates boot time page table setup, allowing machines
+ * to switch over to a high (>4G) address space on LPAE systems
+ */
+void __init early_paging_init(const struct machine_desc *mdesc,
+ struct proc_info_list *procinfo)
+{
+ pmdval_t pmdprot = procinfo->__cpu_mm_mmu_flags;
+ unsigned long map_start, map_end;
+ pgd_t *pgd0, *pgdk;
+ pud_t *pud0, *pudk, *pud_start;
+ pmd_t *pmd0, *pmdk;
+ phys_addr_t phys;
+ int i;
+
+ if (!(mdesc->init_meminfo))
+ return;
+
+ /* remap kernel code and data */
+ map_start = init_mm.start_code;
+ map_end = init_mm.brk;
+
+ /* get a handle on things... */
+ pgd0 = pgd_offset_k(0);
+ pud_start = pud0 = pud_offset(pgd0, 0);
+ pmd0 = pmd_offset(pud0, 0);
+
+ pgdk = pgd_offset_k(map_start);
+ pudk = pud_offset(pgdk, map_start);
+ pmdk = pmd_offset(pudk, map_start);
+
+ mdesc->init_meminfo();
+
+ /* Run the patch stub to update the constants */
+ fixup_pv_table(&__pv_table_begin,
+ (&__pv_table_end - &__pv_table_begin) << 2);
+
+ /*
+ * Cache cleaning operations for self-modifying code
+ * We should clean the entries by MVA but running a
+ * for loop over every pv_table entry pointer would
+ * just complicate the code.
+ */
+ flush_cache_louis();
+ dsb();
+ isb();
+
+ /* remap level 1 table */
+ for (i = 0; i < PTRS_PER_PGD; pud0++, i++) {
+ set_pud(pud0,
+ __pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
+ pmd0 += PTRS_PER_PMD;
+ }
+
+ /* remap pmds for kernel mapping */
+ phys = __pa(map_start) & PMD_MASK;
+ do {
+ *pmdk++ = __pmd(phys | pmdprot);
+ phys += PMD_SIZE;
+ } while (phys < map_end);
+
+ flush_cache_all();
+ cpu_switch_mm(pgd0, &init_mm);
+ cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET);
+ local_flush_bp_all();
+ local_flush_tlb_all();
+}
+
+#else
+
+void __init early_paging_init(const struct machine_desc *mdesc,
+ struct proc_info_list *procinfo)
+{
+ if (mdesc->init_meminfo)
+ mdesc->init_meminfo();
+}
+
+#endif
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 1128064fddcb..45dc29f85d56 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -220,9 +220,7 @@ __v6_setup:
#endif /* CONFIG_MMU */
adr r5, v6_crval
ldmia r5, {r5, r6}
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r6, r6, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
mrc p15, 0, r0, c1, c0, 0 @ read control register
bic r0, r0, r5 @ clear bits them
orr r0, r0, r6 @ set them
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index c63d9bdee51e..60920f62fdf5 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -367,9 +367,7 @@ __v7_setup:
#endif
adr r5, v7_crval
ldmia r5, {r5, r6}
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r6, r6, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
#ifdef CONFIG_SWP_EMULATE
orr r5, r5, #(1 << 10) @ set SW bit in "clear"
bic r6, r6, #(1 << 10) @ clear it in "mmuset"
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index f50d223a0bd3..9ed155ad0f97 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -19,6 +19,7 @@
#include <linux/if_vlan.h>
#include <asm/cacheflush.h>
#include <asm/hwcap.h>
+#include <asm/opcodes.h>
#include "bpf_jit_32.h"
@@ -113,8 +114,11 @@ static u32 jit_udiv(u32 dividend, u32 divisor)
static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
{
+ inst |= (cond << 28);
+ inst = __opcode_to_mem_arm(inst);
+
if (ctx->target != NULL)
- ctx->target[ctx->idx] = inst | (cond << 28);
+ ctx->target[ctx->idx] = inst;
ctx->idx++;
}
@@ -930,4 +934,5 @@ void bpf_jit_free(struct sk_filter *fp)
{
if (fp->bpf_func != sk_run_filter)
module_free(NULL, fp->bpf_func);
+ kfree(fp);
}
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 037660633fa4..01619c2910e3 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -1965,7 +1965,6 @@ static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
static struct irqaction omap24xx_dma_irq = {
.name = "DMA",
.handler = omap2_dma_irq_handler,
- .flags = IRQF_DISABLED
};
#else
diff --git a/arch/arm/plat-omap/include/plat/timex.h b/arch/arm/plat-omap/include/plat/timex.h
deleted file mode 100644
index e27d2daa7790..000000000000
--- a/arch/arm/plat-omap/include/plat/timex.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * arch/arm/plat-omap/include/mach/timex.h
- *
- * Copyright (C) 2000 RidgeRun, Inc.
- * Author: Greg Lonnon <glonnon@ridgerun.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#if !defined(__ASM_ARCH_OMAP_TIMEX_H)
-#define __ASM_ARCH_OMAP_TIMEX_H
-
-#define CLOCK_TICK_RATE (HZ * 100000UL)
-
-#endif /* __ASM_ARCH_OMAP_TIMEX_H */
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index 25f40c9b7f62..99a3590f0349 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -32,6 +32,7 @@
#include <linux/ioport.h>
#include <linux/platform_data/s3c-hsudc.h>
#include <linux/platform_data/s3c-hsotg.h>
+#include <linux/platform_data/dma-s3c24xx.h>
#include <media/s5p_hdmi.h>
@@ -1465,8 +1466,10 @@ void __init s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
pd.num_cs = num_cs;
pd.src_clk_nr = src_clk_nr;
pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio;
-#ifdef CONFIG_PL330_DMA
+#if defined(CONFIG_PL330_DMA)
pd.filter = pl330_filter;
+#elif defined(CONFIG_S3C24XX_DMAC)
+ pd.filter = s3c24xx_dma_filter;
#endif
s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0);
diff --git a/arch/arm/plat-samsung/include/plat/cpu.h b/arch/arm/plat-samsung/include/plat/cpu.h
index 4fb1f03a10d1..335beb341355 100644
--- a/arch/arm/plat-samsung/include/plat/cpu.h
+++ b/arch/arm/plat-samsung/include/plat/cpu.h
@@ -87,8 +87,12 @@ IS_SAMSUNG_CPU(exynos5440, EXYNOS5440_SOC_ID, EXYNOS5_SOC_MASK)
#endif
#if defined(CONFIG_CPU_S3C6400) || defined(CONFIG_CPU_S3C6410)
+# define soc_is_s3c6400() is_samsung_s3c6400()
+# define soc_is_s3c6410() is_samsung_s3c6410()
# define soc_is_s3c64xx() (is_samsung_s3c6400() || is_samsung_s3c6410())
#else
+# define soc_is_s3c6400() 0
+# define soc_is_s3c6410() 0
# define soc_is_s3c64xx() 0
#endif
diff --git a/arch/arm/plat-samsung/include/plat/uncompress.h b/arch/arm/plat-samsung/include/plat/uncompress.h
index 4afc32f90b6d..f48dc0a4736c 100644
--- a/arch/arm/plat-samsung/include/plat/uncompress.h
+++ b/arch/arm/plat-samsung/include/plat/uncompress.h
@@ -145,6 +145,8 @@ static inline void arch_enable_uart_fifo(void)
if (!(fifocon & S3C2410_UFCON_RESETBOTH))
break;
}
+
+ uart_wr(S3C2410_UFCON, S3C2410_UFCON_FIFOMODE);
}
}
#else
diff --git a/arch/arm/plat-samsung/init.c b/arch/arm/plat-samsung/init.c
index 50a3ea0037db..aa9511b6914a 100644
--- a/arch/arm/plat-samsung/init.c
+++ b/arch/arm/plat-samsung/init.c
@@ -11,12 +11,18 @@
* published by the Free Software Foundation.
*/
+/*
+ * NOTE: Code in this file is not used on S3C64xx when booting with
+ * Device Tree support.
+ */
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <mach/hardware.h>
@@ -148,8 +154,12 @@ static int __init s3c_arch_init(void)
// do the correct init for cpu
- if (cpu == NULL)
+ if (cpu == NULL) {
+ /* Not needed when booting with device tree. */
+ if (of_have_populated_dt())
+ return 0;
panic("s3c_arch_init: NULL cpu\n");
+ }
ret = (cpu->init)();
if (ret != 0)
diff --git a/arch/arm/plat-samsung/s5p-irq-eint.c b/arch/arm/plat-samsung/s5p-irq-eint.c
index faa651602780..ebee4dc11a94 100644
--- a/arch/arm/plat-samsung/s5p-irq-eint.c
+++ b/arch/arm/plat-samsung/s5p-irq-eint.c
@@ -16,6 +16,7 @@
#include <linux/device.h>
#include <linux/gpio.h>
#include <linux/irqchip/arm-vic.h>
+#include <linux/of.h>
#include <plat/regs-irqtype.h>
@@ -202,6 +203,9 @@ static int __init s5p_init_irq_eint(void)
{
int irq;
+ if (of_have_populated_dt())
+ return -ENODEV;
+
for (irq = IRQ_EINT(0); irq <= IRQ_EINT(15); irq++)
irq_set_chip(irq, &s5p_irq_vic_eint);
diff --git a/arch/arm/plat-versatile/headsmp.S b/arch/arm/plat-versatile/headsmp.S
index 2677bc3762d7..40f27e52de75 100644
--- a/arch/arm/plat-versatile/headsmp.S
+++ b/arch/arm/plat-versatile/headsmp.S
@@ -10,6 +10,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <asm/assembler.h>
/*
* Realview/Versatile Express specific entry point for secondary CPUs.
@@ -17,6 +18,7 @@
* until we're ready for them to initialise.
*/
ENTRY(versatile_secondary_startup)
+ ARM_BE8(setend be)
mrc p15, 0, r0, c0, c0, 5
bic r0, #0xff000000
adr r4, 1f
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 52b8f40b1c73..2f37e1d6cb45 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -642,9 +642,9 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
static int vfp_hotplug(struct notifier_block *b, unsigned long action,
void *hcpu)
{
- if (action == CPU_DYING || action == CPU_DYING_FROZEN) {
- vfp_force_reload((long)hcpu, current_thread_info());
- } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
+ if (action == CPU_DYING || action == CPU_DYING_FROZEN)
+ vfp_current_hw_state[(long)hcpu] = NULL;
+ else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
vfp_enable(NULL);
return NOTIFY_OK;
}
diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile
index 43841033afd3..12969523414c 100644
--- a/arch/arm/xen/Makefile
+++ b/arch/arm/xen/Makefile
@@ -1 +1 @@
-obj-y := enlighten.o hypercall.o grant-table.o
+obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
new file mode 100644
index 000000000000..b0e77de99148
--- /dev/null
+++ b/arch/arm/xen/mm.c
@@ -0,0 +1,65 @@
+#include <linux/bootmem.h>
+#include <linux/gfp.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/swiotlb.h>
+
+#include <xen/xen.h>
+#include <xen/interface/memory.h>
+#include <xen/swiotlb-xen.h>
+
+#include <asm/cacheflush.h>
+#include <asm/xen/page.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/interface.h>
+
+int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
+ unsigned int address_bits,
+ dma_addr_t *dma_handle)
+{
+ if (!xen_initial_domain())
+ return -EINVAL;
+
+ /* we assume that dom0 is mapped 1:1 for now */
+ *dma_handle = pstart;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
+
+void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
+{
+ return;
+}
+EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
+
+struct dma_map_ops *xen_dma_ops;
+EXPORT_SYMBOL_GPL(xen_dma_ops);
+
+static struct dma_map_ops xen_swiotlb_dma_ops = {
+ .mapping_error = xen_swiotlb_dma_mapping_error,
+ .alloc = xen_swiotlb_alloc_coherent,
+ .free = xen_swiotlb_free_coherent,
+ .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = xen_swiotlb_sync_single_for_device,
+ .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
+ .map_sg = xen_swiotlb_map_sg_attrs,
+ .unmap_sg = xen_swiotlb_unmap_sg_attrs,
+ .map_page = xen_swiotlb_map_page,
+ .unmap_page = xen_swiotlb_unmap_page,
+ .dma_supported = xen_swiotlb_dma_supported,
+ .set_dma_mask = xen_swiotlb_set_dma_mask,
+};
+
+int __init xen_mm_init(void)
+{
+ if (!xen_initial_domain())
+ return 0;
+ xen_swiotlb_init(1, false);
+ xen_dma_ops = &xen_swiotlb_dma_ops;
+ return 0;
+}
+arch_initcall(xen_mm_init);
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
new file mode 100644
index 000000000000..5df4a9afb8c6
--- /dev/null
+++ b/arch/arm/xen/p2m.c
@@ -0,0 +1,208 @@
+#include <linux/bootmem.h>
+#include <linux/gfp.h>
+#include <linux/export.h>
+#include <linux/rwlock.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/swiotlb.h>
+
+#include <xen/xen.h>
+#include <xen/interface/memory.h>
+#include <xen/swiotlb-xen.h>
+
+#include <asm/cacheflush.h>
+#include <asm/xen/page.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/interface.h>
+
+struct xen_p2m_entry {
+ unsigned long pfn;
+ unsigned long mfn;
+ unsigned long nr_pages;
+ struct rb_node rbnode_mach;
+ struct rb_node rbnode_phys;
+};
+
+rwlock_t p2m_lock;
+struct rb_root phys_to_mach = RB_ROOT;
+static struct rb_root mach_to_phys = RB_ROOT;
+
+static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
+{
+ struct rb_node **link = &phys_to_mach.rb_node;
+ struct rb_node *parent = NULL;
+ struct xen_p2m_entry *entry;
+ int rc = 0;
+
+ while (*link) {
+ parent = *link;
+ entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
+
+ if (new->mfn == entry->mfn)
+ goto err_out;
+ if (new->pfn == entry->pfn)
+ goto err_out;
+
+ if (new->pfn < entry->pfn)
+ link = &(*link)->rb_left;
+ else
+ link = &(*link)->rb_right;
+ }
+ rb_link_node(&new->rbnode_phys, parent, link);
+ rb_insert_color(&new->rbnode_phys, &phys_to_mach);
+ goto out;
+
+err_out:
+ rc = -EINVAL;
+ pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
+ __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
+out:
+ return rc;
+}
+
+unsigned long __pfn_to_mfn(unsigned long pfn)
+{
+ struct rb_node *n = phys_to_mach.rb_node;
+ struct xen_p2m_entry *entry;
+ unsigned long irqflags;
+
+ read_lock_irqsave(&p2m_lock, irqflags);
+ while (n) {
+ entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
+ if (entry->pfn <= pfn &&
+ entry->pfn + entry->nr_pages > pfn) {
+ read_unlock_irqrestore(&p2m_lock, irqflags);
+ return entry->mfn + (pfn - entry->pfn);
+ }
+ if (pfn < entry->pfn)
+ n = n->rb_left;
+ else
+ n = n->rb_right;
+ }
+ read_unlock_irqrestore(&p2m_lock, irqflags);
+
+ return INVALID_P2M_ENTRY;
+}
+EXPORT_SYMBOL_GPL(__pfn_to_mfn);
+
+static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new)
+{
+ struct rb_node **link = &mach_to_phys.rb_node;
+ struct rb_node *parent = NULL;
+ struct xen_p2m_entry *entry;
+ int rc = 0;
+
+ while (*link) {
+ parent = *link;
+ entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach);
+
+ if (new->mfn == entry->mfn)
+ goto err_out;
+ if (new->pfn == entry->pfn)
+ goto err_out;
+
+ if (new->mfn < entry->mfn)
+ link = &(*link)->rb_left;
+ else
+ link = &(*link)->rb_right;
+ }
+ rb_link_node(&new->rbnode_mach, parent, link);
+ rb_insert_color(&new->rbnode_mach, &mach_to_phys);
+ goto out;
+
+err_out:
+ rc = -EINVAL;
+ pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
+ __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
+out:
+ return rc;
+}
+
+unsigned long __mfn_to_pfn(unsigned long mfn)
+{
+ struct rb_node *n = mach_to_phys.rb_node;
+ struct xen_p2m_entry *entry;
+ unsigned long irqflags;
+
+ read_lock_irqsave(&p2m_lock, irqflags);
+ while (n) {
+ entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach);
+ if (entry->mfn <= mfn &&
+ entry->mfn + entry->nr_pages > mfn) {
+ read_unlock_irqrestore(&p2m_lock, irqflags);
+ return entry->pfn + (mfn - entry->mfn);
+ }
+ if (mfn < entry->mfn)
+ n = n->rb_left;
+ else
+ n = n->rb_right;
+ }
+ read_unlock_irqrestore(&p2m_lock, irqflags);
+
+ return INVALID_P2M_ENTRY;
+}
+EXPORT_SYMBOL_GPL(__mfn_to_pfn);
+
+bool __set_phys_to_machine_multi(unsigned long pfn,
+ unsigned long mfn, unsigned long nr_pages)
+{
+ int rc;
+ unsigned long irqflags;
+ struct xen_p2m_entry *p2m_entry;
+ struct rb_node *n = phys_to_mach.rb_node;
+
+ if (mfn == INVALID_P2M_ENTRY) {
+ write_lock_irqsave(&p2m_lock, irqflags);
+ while (n) {
+ p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
+ if (p2m_entry->pfn <= pfn &&
+ p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
+ rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys);
+ rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
+ write_unlock_irqrestore(&p2m_lock, irqflags);
+ kfree(p2m_entry);
+ return true;
+ }
+ if (pfn < p2m_entry->pfn)
+ n = n->rb_left;
+ else
+ n = n->rb_right;
+ }
+ write_unlock_irqrestore(&p2m_lock, irqflags);
+ return true;
+ }
+
+ p2m_entry = kzalloc(sizeof(struct xen_p2m_entry), GFP_NOWAIT);
+ if (!p2m_entry) {
+ pr_warn("cannot allocate xen_p2m_entry\n");
+ return false;
+ }
+ p2m_entry->pfn = pfn;
+ p2m_entry->nr_pages = nr_pages;
+ p2m_entry->mfn = mfn;
+
+ write_lock_irqsave(&p2m_lock, irqflags);
+ if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) ||
+ (rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) {
+ write_unlock_irqrestore(&p2m_lock, irqflags);
+ return false;
+ }
+ write_unlock_irqrestore(&p2m_lock, irqflags);
+ return true;
+}
+EXPORT_SYMBOL_GPL(__set_phys_to_machine_multi);
+
+bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+ return __set_phys_to_machine_multi(pfn, mfn, 1);
+}
+EXPORT_SYMBOL_GPL(__set_phys_to_machine);
+
+int p2m_init(void)
+{
+ rwlock_init(&p2m_lock);
+ return 0;
+}
+arch_initcall(p2m_init);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index c04454876bcb..7d704041b1ba 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1,6 +1,7 @@
config ARM64
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select ARCH_WANT_FRAME_POINTERS
@@ -14,6 +15,7 @@ config ARM64
select GENERIC_IOMAP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
+ select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
select HARDIRQS_SW_RESEND
@@ -138,6 +140,11 @@ config ARM64_64K_PAGES
look-up. AArch32 emulation is not available when this feature
is enabled.
+config CPU_BIG_ENDIAN
+ bool "Build big-endian kernel"
+ help
+ Say Y if you plan on running a kernel in big-endian mode.
+
config SMP
bool "Symmetric Multi-Processing"
select USE_GENERIC_SMP_HELPERS
@@ -160,6 +167,13 @@ config NR_CPUS
default "8" if ARCH_XGENE
default "4"
+config HOTPLUG_CPU
+ bool "Support for hot-pluggable CPUs"
+ depends on SMP
+ help
+ Say Y here to experiment with turning CPUs off and on. CPUs
+ can be controlled through /sys/devices/system/cpu.
+
source kernel/Kconfig.preempt
config HZ
@@ -211,6 +225,7 @@ config XEN_DOM0
config XEN
bool "Xen guest support on ARM64 (EXPERIMENTAL)"
depends on ARM64 && OF
+ select SWIOTLB_XEN
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64.
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index d90cf79f233a..2fceb71ac3b7 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -20,9 +20,15 @@ LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
KBUILD_DEFCONFIG := defconfig
KBUILD_CFLAGS += -mgeneral-regs-only
+ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
+KBUILD_CPPFLAGS += -mbig-endian
+AS += -EB
+LD += -EB
+else
KBUILD_CPPFLAGS += -mlittle-endian
AS += -EL
LD += -EL
+endif
comma = ,
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 79a642d199f2..519f89f5b6a3 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -50,3 +50,4 @@ generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h
generic-y += xor.h
+generic-y += preempt.h
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index c9f1d2816c2b..9400596a0f39 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -92,19 +92,49 @@ static inline u32 arch_timer_get_cntfrq(void)
return val;
}
-static inline void arch_counter_set_user_access(void)
+static inline u32 arch_timer_get_cntkctl(void)
{
u32 cntkctl;
-
- /* Disable user access to the timers and the physical counter. */
asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl));
- cntkctl &= ~((3 << 8) | (1 << 0));
+ return cntkctl;
+}
- /* Enable user access to the virtual counter and frequency. */
- cntkctl |= (1 << 1);
+static inline void arch_timer_set_cntkctl(u32 cntkctl)
+{
asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
}
+static inline void arch_counter_set_user_access(void)
+{
+ u32 cntkctl = arch_timer_get_cntkctl();
+
+ /* Disable user access to the timers and the physical counter */
+ /* Also disable virtual event stream */
+ cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
+ | ARCH_TIMER_USR_VT_ACCESS_EN
+ | ARCH_TIMER_VIRT_EVT_EN
+ | ARCH_TIMER_USR_PCT_ACCESS_EN);
+
+ /* Enable user access to the virtual counter */
+ cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
+
+ arch_timer_set_cntkctl(cntkctl);
+}
+
+static inline void arch_timer_evtstrm_enable(int divider)
+{
+ u32 cntkctl = arch_timer_get_cntkctl();
+ cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
+ /* Set the divider and enable virtual event stream */
+ cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
+ | ARCH_TIMER_VIRT_EVT_EN;
+ arch_timer_set_cntkctl(cntkctl);
+ elf_hwcap |= HWCAP_EVTSTRM;
+#ifdef CONFIG_COMPAT
+ compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
+#endif
+}
+
static inline u64 arch_counter_get_cntvct(void)
{
u64 cval;
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 5aceb83b3f5c..fd3e3924041b 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -115,3 +115,34 @@ lr .req x30 // link register
.align 7
b \label
.endm
+
+/*
+ * Select code when configured for BE.
+ */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define CPU_BE(code...) code
+#else
+#define CPU_BE(code...)
+#endif
+
+/*
+ * Select code when configured for LE.
+ */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define CPU_LE(code...)
+#else
+#define CPU_LE(code...) code
+#endif
+
+/*
+ * Define a macro that constructs a 64-bit value by concatenating two
+ * 32-bit registers. Note that on big endian systems the order of the
+ * registers is swapped.
+ */
+#ifndef CONFIG_CPU_BIG_ENDIAN
+ .macro regs_to_64, rd, lbits, hbits
+#else
+ .macro regs_to_64, rd, hbits, lbits
+#endif
+ orr \rd, \lbits, \hbits, lsl #32
+ .endm
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 8a8ce0e73a38..3914c0dcd09c 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -173,4 +173,6 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
+#define cmpxchg64_relaxed(ptr,o,n) cmpxchg_local((ptr),(o),(n))
+
#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 899af807ef0f..fda2704b3f9f 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -26,7 +26,11 @@
#include <linux/ptrace.h>
#define COMPAT_USER_HZ 100
+#ifdef __AARCH64EB__
+#define COMPAT_UTS_MACHINE "armv8b\0\0"
+#else
#define COMPAT_UTS_MACHINE "armv8l\0\0"
+#endif
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
@@ -73,13 +77,23 @@ struct compat_timeval {
};
struct compat_stat {
+#ifdef __AARCH64EB__
+ short st_dev;
+ short __pad1;
+#else
compat_dev_t st_dev;
+#endif
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_ushort_t st_nlink;
__compat_uid16_t st_uid;
__compat_gid16_t st_gid;
+#ifdef __AARCH64EB__
+ short st_rdev;
+ short __pad2;
+#else
compat_dev_t st_rdev;
+#endif
compat_off_t st_size;
compat_off_t st_blksize;
compat_off_t st_blocks;
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
new file mode 100644
index 000000000000..c4cdb5e5b73d
--- /dev/null
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_CPU_OPS_H
+#define __ASM_CPU_OPS_H
+
+#include <linux/init.h>
+#include <linux/threads.h>
+
+struct device_node;
+
+/**
+ * struct cpu_operations - Callback operations for hotplugging CPUs.
+ *
+ * @name: Name of the property as appears in a devicetree cpu node's
+ * enable-method property.
+ * @cpu_init: Reads any data necessary for a specific enable-method from the
+ * devicetree, for a given cpu node and proposed logical id.
+ * @cpu_prepare: Early one-time preparation step for a cpu. If there is a
+ * mechanism for doing so, tests whether it is possible to boot
+ * the given CPU.
+ * @cpu_boot: Boots a cpu into the kernel.
+ * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary
+ * synchronisation. Called from the cpu being booted.
+ * @cpu_disable: Prepares a cpu to die. May fail for some mechanism-specific
+ * reason, which will cause the hot unplug to be aborted. Called
+ * from the cpu to be killed.
+ * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
+ * cpu being killed.
+ */
+struct cpu_operations {
+ const char *name;
+ int (*cpu_init)(struct device_node *, unsigned int);
+ int (*cpu_prepare)(unsigned int);
+ int (*cpu_boot)(unsigned int);
+ void (*cpu_postboot)(void);
+#ifdef CONFIG_HOTPLUG_CPU
+ int (*cpu_disable)(unsigned int cpu);
+ void (*cpu_die)(unsigned int cpu);
+#endif
+};
+
+extern const struct cpu_operations *cpu_ops[NR_CPUS];
+extern int __init cpu_read_ops(struct device_node *dn, int cpu);
+extern void __init cpu_read_bootcpu_ops(void);
+
+#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 8d1810001aef..fd0c0c0e447a 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -23,11 +23,15 @@
#include <asm-generic/dma-coherent.h>
+#include <xen/xen.h>
+#include <asm/xen/hypervisor.h>
+
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
+#define DMA_ERROR_CODE (~(dma_addr_t)0)
extern struct dma_map_ops *dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
if (unlikely(!dev) || !dev->archdata.dma_ops)
return dma_ops;
@@ -35,6 +39,14 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return dev->archdata.dma_ops;
}
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ if (xen_initial_domain())
+ return xen_dma_ops;
+ else
+ return __generic_dma_ops(dev);
+}
+
#include <asm-generic/dma-mapping-common.h>
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index e7fa87f9201b..01d3aab64b79 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -90,11 +90,24 @@ typedef struct user_fpsimd_state elf_fpregset_t;
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS64
+#ifdef __AARCH64EB__
+#define ELF_DATA ELFDATA2MSB
+#else
#define ELF_DATA ELFDATA2LSB
+#endif
#define ELF_ARCH EM_AARCH64
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
#define ELF_PLATFORM_SIZE 16
+#ifdef __AARCH64EB__
+#define ELF_PLATFORM ("aarch64_be")
+#else
#define ELF_PLATFORM ("aarch64")
+#endif
/*
* This is used to ensure we don't load something for the wrong architecture.
@@ -149,7 +162,12 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
#ifdef CONFIG_COMPAT
+
+#ifdef __AARCH64EB__
+#define COMPAT_ELF_PLATFORM ("v8b")
+#else
#define COMPAT_ELF_PLATFORM ("v8l")
+#endif
#define COMPAT_ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_32 / 3))
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index e2950b098e76..6cddbb0c9f54 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -30,6 +30,7 @@
#define COMPAT_HWCAP_IDIVA (1 << 17)
#define COMPAT_HWCAP_IDIVT (1 << 18)
#define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT)
+#define COMPAT_HWCAP_EVTSTRM (1 << 21)
#ifndef __ASSEMBLY__
/*
@@ -37,11 +38,11 @@
* instruction set this cpu supports.
*/
#define ELF_HWCAP (elf_hwcap)
-#define COMPAT_ELF_HWCAP (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
- COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
- COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
- COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
- COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
+
+#ifdef CONFIG_COMPAT
+#define COMPAT_ELF_HWCAP (compat_elf_hwcap)
+extern unsigned int compat_elf_hwcap;
+#endif
extern unsigned long elf_hwcap;
#endif
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 1d12f89140ba..5a482fc7d326 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -22,11 +22,14 @@
#ifdef __KERNEL__
#include <linux/types.h>
+#include <linux/blk_types.h>
#include <asm/byteorder.h>
#include <asm/barrier.h>
#include <asm/pgtable.h>
+#include <xen/xen.h>
+
/*
* Generic IO read/write. These perform native-endian accesses.
*/
@@ -224,6 +227,7 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
*/
extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot);
extern void __iounmap(volatile void __iomem *addr);
+extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
@@ -233,7 +237,6 @@ extern void __iounmap(volatile void __iomem *addr);
#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
-#define ioremap_cached(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL))
#define iounmap __iounmap
#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
@@ -263,5 +266,11 @@ extern int devmem_is_allowed(unsigned long pfn);
*/
#define xlate_dev_kmem_ptr(p) p
+extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
+ const struct bio_vec *vec2);
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
+ (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
+
#endif /* __KERNEL__ */
#endif /* __ASM_IO_H */
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index 0332fc077f6e..e1f7ecdde11f 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -4,6 +4,7 @@
#include <asm-generic/irq.h>
extern void (*handle_arch_irq)(struct pt_regs *);
+extern void migrate_irqs(void);
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
#endif
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index eec073875218..6df93cdc652b 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -177,4 +177,9 @@ static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
}
+static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
+{
+ return vcpu_sys_reg(vcpu, MPIDR_EL1);
+}
+
#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 0859a4ddd1e7..5d85a02d1231 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -36,11 +36,6 @@
#define KVM_VCPU_MAX_FEATURES 2
-/* We don't currently support large pages. */
-#define KVM_HPAGE_GFN_SHIFT(x) 0
-#define KVM_NR_PAGE_SIZES 1
-#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
-
struct kvm_vcpu;
int kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
@@ -151,6 +146,7 @@ struct kvm_vcpu_stat {
struct kvm_vcpu_init;
int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init);
+int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
struct kvm_one_reg;
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index efe609c6a3c9..680f74e67497 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -91,6 +91,7 @@ int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
+#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
static inline bool kvm_is_write_fault(unsigned long esr)
{
@@ -116,13 +117,18 @@ static inline void kvm_set_s2pte_writable(pte_t *pte)
pte_val(*pte) |= PTE_S2_RDWR;
}
+static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
+{
+ pmd_val(*pmd) |= PMD_S2_RDWR;
+}
+
struct kvm;
-static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
+static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
+ unsigned long size)
{
if (!icache_is_aliasing()) { /* PIPT */
- unsigned long hva = gfn_to_hva(kvm, gfn);
- flush_icache_range(hva, hva + PAGE_SIZE);
+ flush_icache_range(hva, hva + size);
} else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
/* any kind of VIPT cache */
__flush_icache_all();
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index d57e66845c86..755f86143320 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -85,6 +85,8 @@
#define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */
#define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+#define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
+
/*
* Memory Attribute override for Stage-2 (MemAttr[3:0])
*/
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index ab239b2c456f..45b20cd6cbca 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -107,6 +107,11 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
regs->pstate = COMPAT_PSR_MODE_USR;
if (pc & 1)
regs->pstate |= COMPAT_PSR_T_BIT;
+
+#ifdef __AARCH64EB__
+ regs->pstate |= COMPAT_PSR_E_BIT;
+#endif
+
regs->compat_sp = sp;
}
#endif
diff --git a/arch/arm64/include/asm/prom.h b/arch/arm64/include/asm/prom.h
deleted file mode 100644
index 68b90e682957..000000000000
--- a/arch/arm64/include/asm/prom.h
+++ /dev/null
@@ -1 +0,0 @@
-/* Empty for now */
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
index 0604237ecd99..e5312ea0ec1a 100644
--- a/arch/arm64/include/asm/psci.h
+++ b/arch/arm64/include/asm/psci.h
@@ -14,25 +14,6 @@
#ifndef __ASM_PSCI_H
#define __ASM_PSCI_H
-#define PSCI_POWER_STATE_TYPE_STANDBY 0
-#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
-
-struct psci_power_state {
- u16 id;
- u8 type;
- u8 affinity_level;
-};
-
-struct psci_operations {
- int (*cpu_suspend)(struct psci_power_state state,
- unsigned long entry_point);
- int (*cpu_off)(struct psci_power_state state);
- int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
- int (*migrate)(unsigned long cpuid);
-};
-
-extern struct psci_operations psci_ops;
-
int psci_init(void);
#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 0dacbbf9458b..0e7fa4963735 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -42,6 +42,7 @@
#define COMPAT_PSR_MODE_UND 0x0000001b
#define COMPAT_PSR_MODE_SYS 0x0000001f
#define COMPAT_PSR_T_BIT 0x00000020
+#define COMPAT_PSR_E_BIT 0x00000200
#define COMPAT_PSR_F_BIT 0x00000040
#define COMPAT_PSR_I_BIT 0x00000080
#define COMPAT_PSR_A_BIT 0x00000100
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 4b8023c5d146..a498f2cd2c2a 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -60,21 +60,14 @@ struct secondary_data {
void *stack;
};
extern struct secondary_data secondary_data;
-extern void secondary_holding_pen(void);
-extern volatile unsigned long secondary_holding_pen_release;
+extern void secondary_entry(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-struct device_node;
+extern int __cpu_disable(void);
-struct smp_enable_ops {
- const char *name;
- int (*init_cpu)(struct device_node *, int);
- int (*prepare_cpu)(int);
-};
-
-extern const struct smp_enable_ops smp_spin_table_ops;
-extern const struct smp_enable_ops smp_psci_ops;
+extern void __cpu_die(unsigned int cpu);
+extern void cpu_die(void);
#endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 0defa0728a9b..3d5cf064d7a1 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -22,17 +22,10 @@
/*
* Spinlock implementation.
*
- * The old value is read exclusively and the new one, if unlocked, is written
- * exclusively. In case of failure, the loop is restarted.
- *
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
- *
- * Unlocked value: 0
- * Locked value: 1
*/
-#define arch_spin_is_locked(x) ((x)->lock != 0)
#define arch_spin_unlock_wait(lock) \
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
@@ -41,32 +34,51 @@
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int tmp;
+ arch_spinlock_t lockval, newval;
asm volatile(
- " sevl\n"
- "1: wfe\n"
- "2: ldaxr %w0, %1\n"
- " cbnz %w0, 1b\n"
- " stxr %w0, %w2, %1\n"
- " cbnz %w0, 2b\n"
- : "=&r" (tmp), "+Q" (lock->lock)
- : "r" (1)
- : "cc", "memory");
+ /* Atomically increment the next ticket. */
+" prfm pstl1strm, %3\n"
+"1: ldaxr %w0, %3\n"
+" add %w1, %w0, %w5\n"
+" stxr %w2, %w1, %3\n"
+" cbnz %w2, 1b\n"
+ /* Did we get the lock? */
+" eor %w1, %w0, %w0, ror #16\n"
+" cbz %w1, 3f\n"
+ /*
+ * No: spin on the owner. Send a local event to avoid missing an
+ * unlock before the exclusive load.
+ */
+" sevl\n"
+"2: wfe\n"
+" ldaxrh %w2, %4\n"
+" eor %w1, %w2, %w0, lsr #16\n"
+" cbnz %w1, 2b\n"
+ /* We got the lock. Critical section starts here. */
+"3:"
+ : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
+ : "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
+ : "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int tmp;
+ arch_spinlock_t lockval;
asm volatile(
- "2: ldaxr %w0, %1\n"
- " cbnz %w0, 1f\n"
- " stxr %w0, %w2, %1\n"
- " cbnz %w0, 2b\n"
- "1:\n"
- : "=&r" (tmp), "+Q" (lock->lock)
- : "r" (1)
- : "cc", "memory");
+" prfm pstl1strm, %2\n"
+"1: ldaxr %w0, %2\n"
+" eor %w1, %w0, %w0, ror #16\n"
+" cbnz %w1, 2f\n"
+" add %w0, %w0, %3\n"
+" stxr %w1, %w0, %2\n"
+" cbnz %w1, 1b\n"
+"2:"
+ : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
+ : "I" (1 << TICKET_SHIFT)
+ : "memory");
return !tmp;
}
@@ -74,9 +86,28 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(
- " stlr %w1, %0\n"
- : "=Q" (lock->lock) : "r" (0) : "memory");
+" stlrh %w1, %0\n"
+ : "=Q" (lock->owner)
+ : "r" (lock->owner + 1)
+ : "memory");
+}
+
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.owner == lock.next;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
+}
+
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+ arch_spinlock_t lockval = ACCESS_ONCE(*lock);
+ return (lockval.next - lockval.owner) > 1;
}
+#define arch_spin_is_contended arch_spin_is_contended
/*
* Write lock implementation.
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
index 9a494346efed..b8d383665f56 100644
--- a/arch/arm64/include/asm/spinlock_types.h
+++ b/arch/arm64/include/asm/spinlock_types.h
@@ -20,14 +20,19 @@
# error "please don't include this file directly"
#endif
-/* We only require natural alignment for exclusive accesses. */
-#define __lock_aligned
+#define TICKET_SHIFT 16
typedef struct {
- volatile unsigned int lock;
-} arch_spinlock_t;
+#ifdef __AARCH64EB__
+ u16 next;
+ u16 owner;
+#else
+ u16 owner;
+ u16 next;
+#endif
+} __aligned(4) arch_spinlock_t;
-#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 }
typedef struct {
volatile unsigned int lock;
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index 89c047f9a971..70ba9d4ee978 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -59,6 +59,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
unsigned long *args)
{
+ if (n == 0)
+ return;
+
if (i + n > SYSCALL_MAX_ARGS) {
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
@@ -82,6 +85,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
const unsigned long *args)
{
+ if (n == 0)
+ return;
+
if (i + n > SYSCALL_MAX_ARGS) {
pr_warning("%s called with max args %d, handling only %d\n",
__func__, i + n, SYSCALL_MAX_ARGS);
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 26e310c54344..130e2be952cf 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -18,7 +18,8 @@
#ifndef __ASM__VIRT_H
#define __ASM__VIRT_H
-#define BOOT_CPU_MODE_EL2 (0x0e12b007)
+#define BOOT_CPU_MODE_EL1 (0xe11)
+#define BOOT_CPU_MODE_EL2 (0xe12)
#ifndef __ASSEMBLY__
#include <asm/cacheflush.h>
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
new file mode 100644
index 000000000000..2820f1a6eebe
--- /dev/null
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -0,0 +1,47 @@
+#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H
+#define _ASM_ARM64_XEN_PAGE_COHERENT_H
+
+#include <asm/page.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-mapping.h>
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
+{
+ return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+}
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
+}
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
+}
+#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm64/include/uapi/asm/byteorder.h b/arch/arm64/include/uapi/asm/byteorder.h
index 2b92046aafc5..dc19e9537f0d 100644
--- a/arch/arm64/include/uapi/asm/byteorder.h
+++ b/arch/arm64/include/uapi/asm/byteorder.h
@@ -16,6 +16,10 @@
#ifndef __ASM_BYTEORDER_H
#define __ASM_BYTEORDER_H
+#ifdef __AARCH64EB__
+#include <linux/byteorder/big_endian.h>
+#else
#include <linux/byteorder/little_endian.h>
+#endif
#endif /* __ASM_BYTEORDER_H */
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index eea497578b87..9b12476e9c85 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -21,6 +21,7 @@
*/
#define HWCAP_FP (1 << 0)
#define HWCAP_ASIMD (1 << 1)
+#define HWCAP_EVTSTRM (1 << 2)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 7b4b564961d4..5ba2fd43a75b 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -9,12 +9,12 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \
- hyp-stub.o psci.o
+ hyp-stub.o psci.o cpu_ops.o
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
sys_compat.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
-arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o smp_psci.o
+arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o
arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 41b4f626d554..e7ee770c0697 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -39,6 +39,7 @@ EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(__copy_in_user);
/* physical memory */
EXPORT_SYMBOL(memstart_addr);
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
new file mode 100644
index 000000000000..d62d12fb36c8
--- /dev/null
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -0,0 +1,87 @@
+/*
+ * CPU kernel entry/exit control
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <asm/cpu_ops.h>
+#include <asm/smp_plat.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/string.h>
+
+extern const struct cpu_operations smp_spin_table_ops;
+extern const struct cpu_operations cpu_psci_ops;
+
+const struct cpu_operations *cpu_ops[NR_CPUS];
+
+static const struct cpu_operations *supported_cpu_ops[] __initconst = {
+#ifdef CONFIG_SMP
+ &smp_spin_table_ops,
+ &cpu_psci_ops,
+#endif
+ NULL,
+};
+
+static const struct cpu_operations * __init cpu_get_ops(const char *name)
+{
+ const struct cpu_operations **ops = supported_cpu_ops;
+
+ while (*ops) {
+ if (!strcmp(name, (*ops)->name))
+ return *ops;
+
+ ops++;
+ }
+
+ return NULL;
+}
+
+/*
+ * Read a cpu's enable method from the device tree and record it in cpu_ops.
+ */
+int __init cpu_read_ops(struct device_node *dn, int cpu)
+{
+ const char *enable_method = of_get_property(dn, "enable-method", NULL);
+ if (!enable_method) {
+ /*
+ * The boot CPU may not have an enable method (e.g. when
+ * spin-table is used for secondaries). Don't warn spuriously.
+ */
+ if (cpu != 0)
+ pr_err("%s: missing enable-method property\n",
+ dn->full_name);
+ return -ENOENT;
+ }
+
+ cpu_ops[cpu] = cpu_get_ops(enable_method);
+ if (!cpu_ops[cpu]) {
+ pr_warn("%s: unsupported enable-method property: %s\n",
+ dn->full_name, enable_method);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+void __init cpu_read_bootcpu_ops(void)
+{
+ struct device_node *dn = of_get_cpu_node(0, NULL);
+ if (!dn) {
+ pr_err("Failed to find device node for boot cpu\n");
+ return;
+ }
+ cpu_read_ops(dn, 0);
+}
diff --git a/arch/arm64/kernel/cputable.c b/arch/arm64/kernel/cputable.c
index 63cfc4a43f4e..fd3993cb060f 100644
--- a/arch/arm64/kernel/cputable.c
+++ b/arch/arm64/kernel/cputable.c
@@ -22,7 +22,7 @@
extern unsigned long __cpu_setup(void);
-struct cpu_info __initdata cpu_table[] = {
+struct cpu_info cpu_table[] = {
{
.cpu_id_val = 0x000f0000,
.cpu_id_mask = 0x000f0000,
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index cbfacf7fb438..6a0a9b132d7a 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -27,7 +27,6 @@
#include <linux/uaccess.h>
#include <asm/debug-monitors.h>
-#include <asm/local.h>
#include <asm/cputype.h>
#include <asm/system_misc.h>
@@ -89,8 +88,8 @@ early_param("nodebugmon", early_debug_disable);
* Keep track of debug users on each core.
* The ref counts are per-cpu so we use a local_t type.
*/
-static DEFINE_PER_CPU(local_t, mde_ref_count);
-static DEFINE_PER_CPU(local_t, kde_ref_count);
+static DEFINE_PER_CPU(int, mde_ref_count);
+static DEFINE_PER_CPU(int, kde_ref_count);
void enable_debug_monitors(enum debug_el el)
{
@@ -98,11 +97,11 @@ void enable_debug_monitors(enum debug_el el)
WARN_ON(preemptible());
- if (local_inc_return(&__get_cpu_var(mde_ref_count)) == 1)
+ if (this_cpu_inc_return(mde_ref_count) == 1)
enable = DBG_MDSCR_MDE;
if (el == DBG_ACTIVE_EL1 &&
- local_inc_return(&__get_cpu_var(kde_ref_count)) == 1)
+ this_cpu_inc_return(kde_ref_count) == 1)
enable |= DBG_MDSCR_KDE;
if (enable && debug_enabled) {
@@ -118,11 +117,11 @@ void disable_debug_monitors(enum debug_el el)
WARN_ON(preemptible());
- if (local_dec_and_test(&__get_cpu_var(mde_ref_count)))
+ if (this_cpu_dec_return(mde_ref_count) == 0)
disable = ~DBG_MDSCR_MDE;
if (el == DBG_ACTIVE_EL1 &&
- local_dec_and_test(&__get_cpu_var(kde_ref_count)))
+ this_cpu_dec_return(kde_ref_count) == 0)
disable &= ~DBG_MDSCR_KDE;
if (disable) {
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 7090c126797c..7009387348b7 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -123,8 +123,9 @@
ENTRY(stext)
mov x21, x0 // x21=FDT
+ bl el2_setup // Drop to EL1, w20=cpu_boot_mode
bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
- bl el2_setup // Drop to EL1
+ bl set_cpu_boot_mode_flag
mrs x22, midr_el1 // x22=cpuid
mov x0, x22
bl lookup_processor_type
@@ -150,21 +151,30 @@ ENDPROC(stext)
/*
* If we're fortunate enough to boot at EL2, ensure that the world is
* sane before dropping to EL1.
+ *
+ * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x20 if
+ * booted in EL1 or EL2 respectively.
*/
ENTRY(el2_setup)
mrs x0, CurrentEL
cmp x0, #PSR_MODE_EL2t
ccmp x0, #PSR_MODE_EL2h, #0x4, ne
- ldr x0, =__boot_cpu_mode // Compute __boot_cpu_mode
- add x0, x0, x28
- b.eq 1f
- str wzr, [x0] // Remember we don't have EL2...
+ b.ne 1f
+ mrs x0, sctlr_el2
+CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2
+CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2
+ msr sctlr_el2, x0
+ b 2f
+1: mrs x0, sctlr_el1
+CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1
+CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
+ msr sctlr_el1, x0
+ mov w20, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
+ isb
ret
/* Hyp configuration. */
-1: ldr w1, =BOOT_CPU_MODE_EL2
- str w1, [x0, #4] // This CPU has EL2
- mov x0, #(1 << 31) // 64-bit EL1
+2: mov x0, #(1 << 31) // 64-bit EL1
msr hcr_el2, x0
/* Generic timers. */
@@ -181,7 +191,8 @@ ENTRY(el2_setup)
/* sctlr_el1 */
mov x0, #0x0800 // Set/clear RES{1,0} bits
- movk x0, #0x30d0, lsl #16
+CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
+CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
msr sctlr_el1, x0
/* Coprocessor traps. */
@@ -204,10 +215,25 @@ ENTRY(el2_setup)
PSR_MODE_EL1h)
msr spsr_el2, x0
msr elr_el2, lr
+ mov w20, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
eret
ENDPROC(el2_setup)
/*
+ * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
+ * in x20. See arch/arm64/include/asm/virt.h for more info.
+ */
+ENTRY(set_cpu_boot_mode_flag)
+ ldr x1, =__boot_cpu_mode // Compute __boot_cpu_mode
+ add x1, x1, x28
+ cmp w20, #BOOT_CPU_MODE_EL2
+ b.ne 1f
+ add x1, x1, #4
+1: str w20, [x1] // This CPU has booted in EL1
+ ret
+ENDPROC(set_cpu_boot_mode_flag)
+
+/*
* We need to find out the CPU boot mode long after boot, so we need to
* store it in a writable variable.
*
@@ -225,7 +251,6 @@ ENTRY(__boot_cpu_mode)
.quad PAGE_OFFSET
#ifdef CONFIG_SMP
- .pushsection .smp.pen.text, "ax"
.align 3
1: .quad .
.quad secondary_holding_pen_release
@@ -235,8 +260,9 @@ ENTRY(__boot_cpu_mode)
* cores are held until we're ready for them to initialise.
*/
ENTRY(secondary_holding_pen)
- bl __calc_phys_offset // x24=phys offset
- bl el2_setup // Drop to EL1
+ bl el2_setup // Drop to EL1, w20=cpu_boot_mode
+ bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
+ bl set_cpu_boot_mode_flag
mrs x0, mpidr_el1
ldr x1, =MPIDR_HWID_BITMASK
and x0, x0, x1
@@ -250,7 +276,16 @@ pen: ldr x4, [x3]
wfe
b pen
ENDPROC(secondary_holding_pen)
- .popsection
+
+ /*
+ * Secondary entry point that jumps straight into the kernel. Only to
+ * be used where CPUs are brought online dynamically by the kernel.
+ */
+ENTRY(secondary_entry)
+ bl __calc_phys_offset // x2=phys offset
+ bl el2_setup // Drop to EL1
+ b secondary_startup
+ENDPROC(secondary_entry)
ENTRY(secondary_startup)
/*
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 329218ca9ffb..ff516f6691e4 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -184,14 +184,14 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
/* Breakpoint */
ctrl_reg = AARCH64_DBG_REG_BCR;
val_reg = AARCH64_DBG_REG_BVR;
- slots = __get_cpu_var(bp_on_reg);
+ slots = this_cpu_ptr(bp_on_reg);
max_slots = core_num_brps;
reg_enable = !debug_info->bps_disabled;
} else {
/* Watchpoint */
ctrl_reg = AARCH64_DBG_REG_WCR;
val_reg = AARCH64_DBG_REG_WVR;
- slots = __get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
max_slots = core_num_wrps;
reg_enable = !debug_info->wps_disabled;
}
@@ -230,12 +230,12 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
/* Breakpoint */
base = AARCH64_DBG_REG_BCR;
- slots = __get_cpu_var(bp_on_reg);
+ slots = this_cpu_ptr(bp_on_reg);
max_slots = core_num_brps;
} else {
/* Watchpoint */
base = AARCH64_DBG_REG_WCR;
- slots = __get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
max_slots = core_num_wrps;
}
@@ -505,11 +505,11 @@ static void toggle_bp_registers(int reg, enum debug_el el, int enable)
switch (reg) {
case AARCH64_DBG_REG_BCR:
- slots = __get_cpu_var(bp_on_reg);
+ slots = this_cpu_ptr(bp_on_reg);
max_slots = core_num_brps;
break;
case AARCH64_DBG_REG_WCR:
- slots = __get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
max_slots = core_num_wrps;
break;
default:
@@ -546,7 +546,7 @@ static int breakpoint_handler(unsigned long unused, unsigned int esr,
struct debug_info *debug_info;
struct arch_hw_breakpoint_ctrl ctrl;
- slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+ slots = this_cpu_ptr(bp_on_reg);
addr = instruction_pointer(regs);
debug_info = &current->thread.debug;
@@ -596,7 +596,7 @@ unlock:
user_enable_single_step(current);
} else {
toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
- kernel_step = &__get_cpu_var(stepping_kernel_bp);
+ kernel_step = this_cpu_ptr(&stepping_kernel_bp);
if (*kernel_step != ARM_KERNEL_STEP_NONE)
return 0;
@@ -623,7 +623,7 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl;
- slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
debug_info = &current->thread.debug;
for (i = 0; i < core_num_wrps; ++i) {
@@ -698,7 +698,7 @@ unlock:
user_enable_single_step(current);
} else {
toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
- kernel_step = &__get_cpu_var(stepping_kernel_bp);
+ kernel_step = this_cpu_ptr(&stepping_kernel_bp);
if (*kernel_step != ARM_KERNEL_STEP_NONE)
return 0;
@@ -722,7 +722,7 @@ int reinstall_suspended_bps(struct pt_regs *regs)
struct debug_info *debug_info = &current->thread.debug;
int handled_exception = 0, *kernel_step;
- kernel_step = &__get_cpu_var(stepping_kernel_bp);
+ kernel_step = this_cpu_ptr(&stepping_kernel_bp);
/*
* Called from single-step exception handler.
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index ecb3354292ed..473e5dbf8f39 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -81,3 +81,64 @@ void __init init_IRQ(void)
if (!handle_arch_irq)
panic("No interrupt controller found.");
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static bool migrate_one_irq(struct irq_desc *desc)
+{
+ struct irq_data *d = irq_desc_get_irq_data(desc);
+ const struct cpumask *affinity = d->affinity;
+ struct irq_chip *c;
+ bool ret = false;
+
+ /*
+ * If this is a per-CPU interrupt, or the affinity does not
+ * include this CPU, then we have nothing to do.
+ */
+ if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
+ return false;
+
+ if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+ affinity = cpu_online_mask;
+ ret = true;
+ }
+
+ c = irq_data_get_irq_chip(d);
+ if (!c->irq_set_affinity)
+ pr_debug("IRQ%u: unable to set affinity\n", d->irq);
+ else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
+ cpumask_copy(d->affinity, affinity);
+
+ return ret;
+}
+
+/*
+ * The current CPU has been marked offline. Migrate IRQs off this CPU.
+ * If the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ *
+ * Note: we must iterate over all IRQs, whether they have an attached
+ * action structure or not, as we need to get chained interrupts too.
+ */
+void migrate_irqs(void)
+{
+ unsigned int i;
+ struct irq_desc *desc;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ for_each_irq_desc(i, desc) {
+ bool affinity_broken;
+
+ raw_spin_lock(&desc->lock);
+ affinity_broken = migrate_one_irq(desc);
+ raw_spin_unlock(&desc->lock);
+
+ if (affinity_broken)
+ pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
+ i, smp_processor_id());
+ }
+
+ local_irq_restore(flags);
+}
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
index 8b69ecb1d8bc..63c48ffdf230 100644
--- a/arch/arm64/kernel/kuser32.S
+++ b/arch/arm64/kernel/kuser32.S
@@ -27,6 +27,9 @@
*
* See Documentation/arm/kernel_user_helpers.txt for formal definitions.
*/
+
+#include <asm/unistd32.h>
+
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
@@ -35,33 +38,30 @@ __kuser_cmpxchg64: // 0xffff0f60
.inst 0xe92d00f0 // push {r4, r5, r6, r7}
.inst 0xe1c040d0 // ldrd r4, r5, [r0]
.inst 0xe1c160d0 // ldrd r6, r7, [r1]
- .inst 0xf57ff05f // dmb sy
- .inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2]
+ .inst 0xe1b20e9f // 1: ldaexd r0, r1, [r2]
.inst 0xe0303004 // eors r3, r0, r4
.inst 0x00313005 // eoreqs r3, r1, r5
- .inst 0x01a23f96 // strexdeq r3, r6, [r2]
+ .inst 0x01a23e96 // stlexdeq r3, r6, [r2]
.inst 0x03330001 // teqeq r3, #1
.inst 0x0afffff9 // beq 1b
- .inst 0xf57ff05f // dmb sy
.inst 0xe2730000 // rsbs r0, r3, #0
.inst 0xe8bd00f0 // pop {r4, r5, r6, r7}
.inst 0xe12fff1e // bx lr
.align 5
__kuser_memory_barrier: // 0xffff0fa0
- .inst 0xf57ff05f // dmb sy
+ .inst 0xf57ff05b // dmb ish
.inst 0xe12fff1e // bx lr
.align 5
__kuser_cmpxchg: // 0xffff0fc0
- .inst 0xf57ff05f // dmb sy
- .inst 0xe1923f9f // 1: ldrex r3, [r2]
+ .inst 0xe1923e9f // 1: ldaex r3, [r2]
.inst 0xe0533000 // subs r3, r3, r0
- .inst 0x01823f91 // strexeq r3, r1, [r2]
+ .inst 0x01823e91 // stlexeq r3, r1, [r2]
.inst 0x03330001 // teqeq r3, #1
.inst 0x0afffffa // beq 1b
.inst 0xe2730000 // rsbs r0, r3, #0
- .inst 0xeaffffef // b <__kuser_memory_barrier>
+ .inst 0xe12fff1e // bx lr
.align 5
__kuser_get_tls: // 0xffff0fe0
@@ -75,3 +75,42 @@ __kuser_helper_version: // 0xffff0ffc
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
.globl __kuser_helper_end
__kuser_helper_end:
+
+/*
+ * AArch32 sigreturn code
+ *
+ * For ARM syscalls, the syscall number has to be loaded into r7.
+ * We do not support an OABI userspace.
+ *
+ * For Thumb syscalls, we also pass the syscall number via r7. We therefore
+ * need two 16-bit instructions.
+ */
+ .globl __aarch32_sigret_code_start
+__aarch32_sigret_code_start:
+
+ /*
+ * ARM Code
+ */
+ .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn
+
+ /*
+ * Thumb code
+ */
+ .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn
+
+ /*
+ * ARM code
+ */
+ .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn
+
+ /*
+ * Thumb code
+ */
+ .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn
+
+ .globl __aarch32_sigret_code_end
+__aarch32_sigret_code_end:
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index cea1594ff933..0e63c98d224c 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -784,8 +784,8 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
/*
* PMXEVTYPER: Event selection reg
*/
-#define ARMV8_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
-#define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
+#define ARMV8_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */
+#define ARMV8_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */
/*
* Event filters for PMUv3
@@ -1044,7 +1044,7 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
*/
regs = get_irq_regs();
- cpuc = &__get_cpu_var(cpu_hw_events);
+ cpuc = this_cpu_ptr(&cpu_hw_events);
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
@@ -1175,7 +1175,8 @@ static void armv8pmu_reset(void *info)
static int armv8_pmuv3_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv8_pmuv3_perf_map,
- &armv8_pmuv3_perf_cache_map, 0xFF);
+ &armv8_pmuv3_perf_cache_map,
+ ARMV8_EVTYPE_EVENT);
}
static struct arm_pmu armv8pmu = {
@@ -1257,7 +1258,7 @@ device_initcall(register_pmu_driver);
static struct pmu_hw_events *armpmu_get_cpu_events(void)
{
- return &__get_cpu_var(cpu_hw_events);
+ return this_cpu_ptr(&cpu_hw_events);
}
static void __init cpu_pmu_init(struct arm_pmu *armpmu)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 7ae8a1f00c3c..de17c89985db 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -102,6 +102,13 @@ void arch_cpu_idle(void)
local_irq_enable();
}
+#ifdef CONFIG_HOTPLUG_CPU
+void arch_cpu_idle_dead(void)
+{
+ cpu_die();
+}
+#endif
+
void machine_shutdown(void)
{
#ifdef CONFIG_SMP
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 14f73c445ff5..4f97db3d7363 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -17,12 +17,32 @@
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/smp.h>
#include <asm/compiler.h>
+#include <asm/cpu_ops.h>
#include <asm/errno.h>
#include <asm/psci.h>
+#include <asm/smp_plat.h>
-struct psci_operations psci_ops;
+#define PSCI_POWER_STATE_TYPE_STANDBY 0
+#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
+
+struct psci_power_state {
+ u16 id;
+ u8 type;
+ u8 affinity_level;
+};
+
+struct psci_operations {
+ int (*cpu_suspend)(struct psci_power_state state,
+ unsigned long entry_point);
+ int (*cpu_off)(struct psci_power_state state);
+ int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+ int (*migrate)(unsigned long cpuid);
+};
+
+static struct psci_operations psci_ops;
static int (*invoke_psci_fn)(u64, u64, u64, u64);
@@ -209,3 +229,68 @@ out_put_node:
of_node_put(np);
return err;
}
+
+#ifdef CONFIG_SMP
+
+static int __init cpu_psci_cpu_init(struct device_node *dn, unsigned int cpu)
+{
+ return 0;
+}
+
+static int __init cpu_psci_cpu_prepare(unsigned int cpu)
+{
+ if (!psci_ops.cpu_on) {
+ pr_err("no cpu_on method, not booting CPU%d\n", cpu);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int cpu_psci_cpu_boot(unsigned int cpu)
+{
+ int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry));
+ if (err)
+ pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err);
+
+ return err;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int cpu_psci_cpu_disable(unsigned int cpu)
+{
+ /* Fail early if we don't have CPU_OFF support */
+ if (!psci_ops.cpu_off)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static void cpu_psci_cpu_die(unsigned int cpu)
+{
+ int ret;
+ /*
+ * There are no known implementations of PSCI actually using the
+ * power state field, pass a sensible default for now.
+ */
+ struct psci_power_state state = {
+ .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
+ };
+
+ ret = psci_ops.cpu_off(state);
+
+ pr_crit("psci: unable to power off CPU%u (%d)\n", cpu, ret);
+}
+#endif
+
+const struct cpu_operations cpu_psci_ops = {
+ .name = "psci",
+ .cpu_init = cpu_psci_cpu_init,
+ .cpu_prepare = cpu_psci_cpu_prepare,
+ .cpu_boot = cpu_psci_cpu_boot,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = cpu_psci_cpu_disable,
+ .cpu_die = cpu_psci_cpu_die,
+#endif
+};
+
+#endif
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 055cfb80e05c..0bc5e4cbc017 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -45,6 +45,7 @@
#include <asm/cputype.h>
#include <asm/elf.h>
#include <asm/cputable.h>
+#include <asm/cpu_ops.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
@@ -60,6 +61,16 @@ EXPORT_SYMBOL(processor_id);
unsigned long elf_hwcap __read_mostly;
EXPORT_SYMBOL_GPL(elf_hwcap);
+#ifdef CONFIG_COMPAT
+#define COMPAT_ELF_HWCAP_DEFAULT \
+ (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
+ COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
+ COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
+ COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
+ COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
+unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
+#endif
+
static const char *cpu_name;
static const char *machine_name;
phys_addr_t __fdt_pointer __initdata;
@@ -97,6 +108,11 @@ void __init early_print(const char *str, ...)
printk("%s", buf);
}
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
+{
+ return phys_id == cpu_logical_map(cpu);
+}
+
static void __init setup_processor(void)
{
struct cpu_info *cpu_info;
@@ -118,76 +134,24 @@ static void __init setup_processor(void)
printk("CPU: %s [%08x] revision %d\n",
cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
- sprintf(init_utsname()->machine, "aarch64");
+ sprintf(init_utsname()->machine, ELF_PLATFORM);
elf_hwcap = 0;
}
static void __init setup_machine_fdt(phys_addr_t dt_phys)
{
- struct boot_param_header *devtree;
- unsigned long dt_root;
-
- /* Check we have a non-NULL DT pointer */
- if (!dt_phys) {
- early_print("\n"
- "Error: NULL or invalid device tree blob\n"
- "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
- "\nPlease check your bootloader.\n");
-
- while (true)
- cpu_relax();
-
- }
-
- devtree = phys_to_virt(dt_phys);
-
- /* Check device tree validity */
- if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) {
+ if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) {
early_print("\n"
"Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
- "Expected 0x%x, found 0x%x\n"
+ "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
"\nPlease check your bootloader.\n",
- dt_phys, devtree, OF_DT_HEADER,
- be32_to_cpu(devtree->magic));
+ dt_phys, phys_to_virt(dt_phys));
while (true)
cpu_relax();
}
- initial_boot_params = devtree;
- dt_root = of_get_flat_dt_root();
-
- machine_name = of_get_flat_dt_prop(dt_root, "model", NULL);
- if (!machine_name)
- machine_name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
- if (!machine_name)
- machine_name = "<unknown>";
- pr_info("Machine: %s\n", machine_name);
-
- /* Retrieve various information from the /chosen node */
- of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
- /* Initialize {size,address}-cells info */
- of_scan_flat_dt(early_init_dt_scan_root, NULL);
- /* Setup memory, calling early_init_dt_add_memory_arch */
- of_scan_flat_dt(early_init_dt_scan_memory, NULL);
-}
-
-void __init early_init_dt_add_memory_arch(u64 base, u64 size)
-{
- base &= PAGE_MASK;
- size &= PAGE_MASK;
- if (base + size < PHYS_OFFSET) {
- pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
- base, base + size);
- return;
- }
- if (base < PHYS_OFFSET) {
- pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
- base, PHYS_OFFSET);
- size -= PHYS_OFFSET - base;
- base = PHYS_OFFSET;
- }
- memblock_add(base, size);
+ machine_name = of_flat_dt_get_machine_name();
}
/*
@@ -264,6 +228,7 @@ void __init setup_arch(char **cmdline_p)
psci_init();
cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
+ cpu_read_bootcpu_ops();
#ifdef CONFIG_SMP
smp_init_cpus();
#endif
@@ -304,6 +269,7 @@ subsys_initcall(topology_init);
static const char *hwcap_str[] = {
"fp",
"asimd",
+ "evtstrm",
NULL
};
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index e393174fe859..e8772c07cf5c 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -100,34 +100,6 @@ struct compat_rt_sigframe {
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-/*
- * For ARM syscalls, the syscall number has to be loaded into r7.
- * We do not support an OABI userspace.
- */
-#define MOV_R7_NR_SIGRETURN (0xe3a07000 | __NR_compat_sigreturn)
-#define SVC_SYS_SIGRETURN (0xef000000 | __NR_compat_sigreturn)
-#define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | __NR_compat_rt_sigreturn)
-#define SVC_SYS_RT_SIGRETURN (0xef000000 | __NR_compat_rt_sigreturn)
-
-/*
- * For Thumb syscalls, we also pass the syscall number via r7. We therefore
- * need two 16-bit instructions.
- */
-#define SVC_THUMB_SIGRETURN (((0xdf00 | __NR_compat_sigreturn) << 16) | \
- 0x2700 | __NR_compat_sigreturn)
-#define SVC_THUMB_RT_SIGRETURN (((0xdf00 | __NR_compat_rt_sigreturn) << 16) | \
- 0x2700 | __NR_compat_rt_sigreturn)
-
-const compat_ulong_t aarch32_sigret_code[6] = {
- /*
- * AArch32 sigreturn code.
- * We don't construct an OABI SWI - instead we just set the imm24 field
- * to the EABI syscall number so that we create a sane disassembly.
- */
- MOV_R7_NR_SIGRETURN, SVC_SYS_SIGRETURN, SVC_THUMB_SIGRETURN,
- MOV_R7_NR_RT_SIGRETURN, SVC_SYS_RT_SIGRETURN, SVC_THUMB_RT_SIGRETURN,
-};
-
static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
{
compat_sigset_t cset;
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 78db90dcc910..3abb9e797dd4 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -39,6 +39,7 @@
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
+#include <asm/cpu_ops.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -54,7 +55,6 @@
* where to place its SVC stack
*/
struct secondary_data secondary_data;
-volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
enum ipi_msg_type {
IPI_RESCHEDULE,
@@ -63,61 +63,16 @@ enum ipi_msg_type {
IPI_CPU_STOP,
};
-static DEFINE_RAW_SPINLOCK(boot_lock);
-
-/*
- * Write secondary_holding_pen_release in a way that is guaranteed to be
- * visible to all observers, irrespective of whether they're taking part
- * in coherency or not. This is necessary for the hotplug code to work
- * reliably.
- */
-static void write_pen_release(u64 val)
-{
- void *start = (void *)&secondary_holding_pen_release;
- unsigned long size = sizeof(secondary_holding_pen_release);
-
- secondary_holding_pen_release = val;
- __flush_dcache_area(start, size);
-}
-
/*
* Boot a secondary CPU, and assign it the specified idle task.
* This also gives us the initial stack to use for this CPU.
*/
static int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- unsigned long timeout;
-
- /*
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
- raw_spin_lock(&boot_lock);
-
- /*
- * Update the pen release flag.
- */
- write_pen_release(cpu_logical_map(cpu));
-
- /*
- * Send an event, causing the secondaries to read pen_release.
- */
- sev();
-
- timeout = jiffies + (1 * HZ);
- while (time_before(jiffies, timeout)) {
- if (secondary_holding_pen_release == INVALID_HWID)
- break;
- udelay(10);
- }
-
- /*
- * Now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
- raw_spin_unlock(&boot_lock);
+ if (cpu_ops[cpu]->cpu_boot)
+ return cpu_ops[cpu]->cpu_boot(cpu);
- return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
+ return -EOPNOTSUPP;
}
static DECLARE_COMPLETION(cpu_running);
@@ -187,17 +142,13 @@ asmlinkage void secondary_start_kernel(void)
preempt_disable();
trace_hardirqs_off();
- /*
- * Let the primary processor know we're out of the
- * pen, then head off into the C entry point
- */
- write_pen_release(INVALID_HWID);
+ if (cpu_ops[cpu]->cpu_postboot)
+ cpu_ops[cpu]->cpu_postboot();
/*
- * Synchronise with the boot thread.
+ * Enable GIC and timers.
*/
- raw_spin_lock(&boot_lock);
- raw_spin_unlock(&boot_lock);
+ notify_cpu_starting(cpu);
/*
* OK, now it's safe to let the boot CPU continue. Wait for
@@ -207,11 +158,6 @@ asmlinkage void secondary_start_kernel(void)
set_cpu_online(cpu, true);
complete(&cpu_running);
- /*
- * Enable GIC and timers.
- */
- notify_cpu_starting(cpu);
-
local_irq_enable();
local_fiq_enable();
@@ -221,39 +167,113 @@ asmlinkage void secondary_start_kernel(void)
cpu_startup_entry(CPUHP_ONLINE);
}
-void __init smp_cpus_done(unsigned int max_cpus)
+#ifdef CONFIG_HOTPLUG_CPU
+static int op_cpu_disable(unsigned int cpu)
{
- pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
+ /*
+ * If we don't have a cpu_die method, abort before we reach the point
+ * of no return. CPU0 may not have an cpu_ops, so test for it.
+ */
+ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
+ return -EOPNOTSUPP;
+
+ /*
+ * We may need to abort a hot unplug for some other mechanism-specific
+ * reason.
+ */
+ if (cpu_ops[cpu]->cpu_disable)
+ return cpu_ops[cpu]->cpu_disable(cpu);
+
+ return 0;
}
-void __init smp_prepare_boot_cpu(void)
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
{
-}
+ unsigned int cpu = smp_processor_id();
+ int ret;
-static void (*smp_cross_call)(const struct cpumask *, unsigned int);
+ ret = op_cpu_disable(cpu);
+ if (ret)
+ return ret;
-static const struct smp_enable_ops *enable_ops[] __initconst = {
- &smp_spin_table_ops,
- &smp_psci_ops,
- NULL,
-};
+ /*
+ * Take this CPU offline. Once we clear this, we can't return,
+ * and we must not schedule until we're ready to give up the cpu.
+ */
+ set_cpu_online(cpu, false);
+
+ /*
+ * OK - migrate IRQs away from this CPU
+ */
+ migrate_irqs();
-static const struct smp_enable_ops *smp_enable_ops[NR_CPUS];
+ /*
+ * Remove this CPU from the vm mask set of all processes.
+ */
+ clear_tasks_mm_cpumask(cpu);
-static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
-{
- const struct smp_enable_ops **ops = enable_ops;
+ return 0;
+}
- while (*ops) {
- if (!strcmp(name, (*ops)->name))
- return *ops;
+static DECLARE_COMPLETION(cpu_died);
- ops++;
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpu_die(unsigned int cpu)
+{
+ if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
+ pr_crit("CPU%u: cpu didn't die\n", cpu);
+ return;
}
+ pr_notice("CPU%u: shutdown\n", cpu);
+}
+
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ *
+ * Note that we disable IRQs here, but do not re-enable them
+ * before returning to the caller. This is also the behaviour
+ * of the other hotplug-cpu capable cores, so presumably coming
+ * out of idle fixes this.
+ */
+void cpu_die(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ idle_task_exit();
- return NULL;
+ local_irq_disable();
+
+ /* Tell __cpu_die() that this CPU is now safe to dispose of */
+ complete(&cpu_died);
+
+ /*
+ * Actually shutdown the CPU. This must never fail. The specific hotplug
+ * mechanism must perform all required cache maintenance to ensure that
+ * no dirty lines are lost in the process of shutting down the CPU.
+ */
+ cpu_ops[cpu]->cpu_die(cpu);
+
+ BUG();
+}
+#endif
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+ pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
}
+void __init smp_prepare_boot_cpu(void)
+{
+}
+
+static void (*smp_cross_call)(const struct cpumask *, unsigned int);
+
/*
* Enumerate the possible CPU set from the device tree and build the
* cpu logical map array containing MPIDR values related to logical
@@ -261,9 +281,8 @@ static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
*/
void __init smp_init_cpus(void)
{
- const char *enable_method;
struct device_node *dn = NULL;
- int i, cpu = 1;
+ unsigned int i, cpu = 1;
bool bootcpu_valid = false;
while ((dn = of_find_node_by_type(dn, "cpu"))) {
@@ -332,25 +351,10 @@ void __init smp_init_cpus(void)
if (cpu >= NR_CPUS)
goto next;
- /*
- * We currently support only the "spin-table" enable-method.
- */
- enable_method = of_get_property(dn, "enable-method", NULL);
- if (!enable_method) {
- pr_err("%s: missing enable-method property\n",
- dn->full_name);
+ if (cpu_read_ops(dn, cpu) != 0)
goto next;
- }
-
- smp_enable_ops[cpu] = smp_get_enable_ops(enable_method);
-
- if (!smp_enable_ops[cpu]) {
- pr_err("%s: invalid enable-method property: %s\n",
- dn->full_name, enable_method);
- goto next;
- }
- if (smp_enable_ops[cpu]->init_cpu(dn, cpu))
+ if (cpu_ops[cpu]->cpu_init(dn, cpu))
goto next;
pr_debug("cpu logical map 0x%llx\n", hwid);
@@ -380,8 +384,8 @@ next:
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- int cpu, err;
- unsigned int ncores = num_possible_cpus();
+ int err;
+ unsigned int cpu, ncores = num_possible_cpus();
/*
* are we trying to boot more cores than exist?
@@ -408,10 +412,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (cpu == smp_processor_id())
continue;
- if (!smp_enable_ops[cpu])
+ if (!cpu_ops[cpu])
continue;
- err = smp_enable_ops[cpu]->prepare_cpu(cpu);
+ err = cpu_ops[cpu]->cpu_prepare(cpu);
if (err)
continue;
diff --git a/arch/arm64/kernel/smp_psci.c b/arch/arm64/kernel/smp_psci.c
deleted file mode 100644
index 0c533301be77..000000000000
--- a/arch/arm64/kernel/smp_psci.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * PSCI SMP initialisation
- *
- * Copyright (C) 2013 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/init.h>
-#include <linux/of.h>
-#include <linux/smp.h>
-
-#include <asm/psci.h>
-#include <asm/smp_plat.h>
-
-static int __init smp_psci_init_cpu(struct device_node *dn, int cpu)
-{
- return 0;
-}
-
-static int __init smp_psci_prepare_cpu(int cpu)
-{
- int err;
-
- if (!psci_ops.cpu_on) {
- pr_err("psci: no cpu_on method, not booting CPU%d\n", cpu);
- return -ENODEV;
- }
-
- err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_holding_pen));
- if (err) {
- pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err);
- return err;
- }
-
- return 0;
-}
-
-const struct smp_enable_ops smp_psci_ops __initconst = {
- .name = "psci",
- .init_cpu = smp_psci_init_cpu,
- .prepare_cpu = smp_psci_prepare_cpu,
-};
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 7c35fa682f76..44c22805d2e2 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -16,15 +16,39 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
+#include <asm/cpu_ops.h>
+#include <asm/cputype.h>
+#include <asm/smp_plat.h>
+
+extern void secondary_holding_pen(void);
+volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
static phys_addr_t cpu_release_addr[NR_CPUS];
+static DEFINE_RAW_SPINLOCK(boot_lock);
+
+/*
+ * Write secondary_holding_pen_release in a way that is guaranteed to be
+ * visible to all observers, irrespective of whether they're taking part
+ * in coherency or not. This is necessary for the hotplug code to work
+ * reliably.
+ */
+static void write_pen_release(u64 val)
+{
+ void *start = (void *)&secondary_holding_pen_release;
+ unsigned long size = sizeof(secondary_holding_pen_release);
-static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu)
+ secondary_holding_pen_release = val;
+ __flush_dcache_area(start, size);
+}
+
+
+static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu)
{
/*
* Determine the address from which the CPU is polling.
@@ -40,7 +64,7 @@ static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu)
return 0;
}
-static int __init smp_spin_table_prepare_cpu(int cpu)
+static int smp_spin_table_cpu_prepare(unsigned int cpu)
{
void **release_addr;
@@ -48,7 +72,16 @@ static int __init smp_spin_table_prepare_cpu(int cpu)
return -ENODEV;
release_addr = __va(cpu_release_addr[cpu]);
- release_addr[0] = (void *)__pa(secondary_holding_pen);
+
+ /*
+ * We write the release address as LE regardless of the native
+ * endianess of the kernel. Therefore, any boot-loaders that
+ * read this address need to convert this address to the
+ * boot-loader's endianess before jumping. This is mandated by
+ * the boot protocol.
+ */
+ release_addr[0] = (void *) cpu_to_le64(__pa(secondary_holding_pen));
+
__flush_dcache_area(release_addr, sizeof(release_addr[0]));
/*
@@ -59,8 +92,60 @@ static int __init smp_spin_table_prepare_cpu(int cpu)
return 0;
}
-const struct smp_enable_ops smp_spin_table_ops __initconst = {
+static int smp_spin_table_cpu_boot(unsigned int cpu)
+{
+ unsigned long timeout;
+
+ /*
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+ raw_spin_lock(&boot_lock);
+
+ /*
+ * Update the pen release flag.
+ */
+ write_pen_release(cpu_logical_map(cpu));
+
+ /*
+ * Send an event, causing the secondaries to read pen_release.
+ */
+ sev();
+
+ timeout = jiffies + (1 * HZ);
+ while (time_before(jiffies, timeout)) {
+ if (secondary_holding_pen_release == INVALID_HWID)
+ break;
+ udelay(10);
+ }
+
+ /*
+ * Now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+ raw_spin_unlock(&boot_lock);
+
+ return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
+}
+
+void smp_spin_table_cpu_postboot(void)
+{
+ /*
+ * Let the primary processor know we're out of the pen.
+ */
+ write_pen_release(INVALID_HWID);
+
+ /*
+ * Synchronise with the boot thread.
+ */
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
+}
+
+const struct cpu_operations smp_spin_table_ops = {
.name = "spin-table",
- .init_cpu = smp_spin_table_init_cpu,
- .prepare_cpu = smp_spin_table_prepare_cpu,
+ .cpu_init = smp_spin_table_cpu_init,
+ .cpu_prepare = smp_spin_table_cpu_prepare,
+ .cpu_boot = smp_spin_table_cpu_boot,
+ .cpu_postboot = smp_spin_table_cpu_postboot,
};
diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/sys32.S
index a1b19ed7467c..423a5b3fc2be 100644
--- a/arch/arm64/kernel/sys32.S
+++ b/arch/arm64/kernel/sys32.S
@@ -59,48 +59,48 @@ ENDPROC(compat_sys_fstatfs64_wrapper)
* extension.
*/
compat_sys_pread64_wrapper:
- orr x3, x4, x5, lsl #32
+ regs_to_64 x3, x4, x5
b sys_pread64
ENDPROC(compat_sys_pread64_wrapper)
compat_sys_pwrite64_wrapper:
- orr x3, x4, x5, lsl #32
+ regs_to_64 x3, x4, x5
b sys_pwrite64
ENDPROC(compat_sys_pwrite64_wrapper)
compat_sys_truncate64_wrapper:
- orr x1, x2, x3, lsl #32
+ regs_to_64 x1, x2, x3
b sys_truncate
ENDPROC(compat_sys_truncate64_wrapper)
compat_sys_ftruncate64_wrapper:
- orr x1, x2, x3, lsl #32
+ regs_to_64 x1, x2, x3
b sys_ftruncate
ENDPROC(compat_sys_ftruncate64_wrapper)
compat_sys_readahead_wrapper:
- orr x1, x2, x3, lsl #32
+ regs_to_64 x1, x2, x3
mov w2, w4
b sys_readahead
ENDPROC(compat_sys_readahead_wrapper)
compat_sys_fadvise64_64_wrapper:
mov w6, w1
- orr x1, x2, x3, lsl #32
- orr x2, x4, x5, lsl #32
+ regs_to_64 x1, x2, x3
+ regs_to_64 x2, x4, x5
mov w3, w6
b sys_fadvise64_64
ENDPROC(compat_sys_fadvise64_64_wrapper)
compat_sys_sync_file_range2_wrapper:
- orr x2, x2, x3, lsl #32
- orr x3, x4, x5, lsl #32
+ regs_to_64 x2, x2, x3
+ regs_to_64 x3, x4, x5
b sys_sync_file_range2
ENDPROC(compat_sys_sync_file_range2_wrapper)
compat_sys_fallocate_wrapper:
- orr x2, x2, x3, lsl #32
- orr x3, x4, x5, lsl #32
+ regs_to_64 x2, x2, x3
+ regs_to_64 x3, x4, x5
b sys_fallocate
ENDPROC(compat_sys_fallocate_wrapper)
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 03dc3718eb13..29c39d5d77e3 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -61,13 +61,6 @@ unsigned long profile_pc(struct pt_regs *regs)
EXPORT_SYMBOL(profile_pc);
#endif
-static u64 sched_clock_mult __read_mostly;
-
-unsigned long long notrace sched_clock(void)
-{
- return arch_timer_read_counter() * sched_clock_mult;
-}
-
void __init time_init(void)
{
u32 arch_timer_rate;
@@ -78,9 +71,6 @@ void __init time_init(void)
if (!arch_timer_rate)
panic("Unable to initialise architected timer.\n");
- /* Cache the sched_clock multiplier to save a divide in the hot path. */
- sched_clock_mult = NSEC_PER_SEC / arch_timer_rate;
-
/* Calibrate the delay loop directly */
lpj_fine = arch_timer_rate / HZ;
}
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 6a389dc1bd49..65d40cf6945a 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -58,7 +58,10 @@ static struct page *vectors_page[1];
static int alloc_vectors_page(void)
{
extern char __kuser_helper_start[], __kuser_helper_end[];
+ extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
+
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+ int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
unsigned long vpage;
vpage = get_zeroed_page(GFP_ATOMIC);
@@ -72,7 +75,7 @@ static int alloc_vectors_page(void)
/* sigreturn code */
memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET,
- aarch32_sigret_code, sizeof(aarch32_sigret_code));
+ __aarch32_sigret_code_start, sigret_sz);
flush_icache_range(vpage, vpage + PAGE_SIZE);
vectors_page[0] = virt_to_page(vpage);
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index f8ab9d8e2ea3..5161ad992091 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -54,7 +54,6 @@ SECTIONS
}
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
- *(.smp.pen.text)
__exception_text_start = .;
*(.exception.text)
__exception_text_end = .;
@@ -97,30 +96,13 @@ SECTIONS
PERCPU_SECTION(64)
__init_end = .;
- . = ALIGN(THREAD_SIZE);
- __data_loc = .;
-
- .data : AT(__data_loc) {
- _data = .; /* address in memory */
- _sdata = .;
-
- /*
- * first, the init task union, aligned
- * to an 8192 byte boundary.
- */
- INIT_TASK_DATA(THREAD_SIZE)
- NOSAVE_DATA
- CACHELINE_ALIGNED_DATA(64)
- READ_MOSTLY_DATA(64)
-
- /*
- * and the usual data section
- */
- DATA_DATA
- CONSTRUCTORS
-
- _edata = .;
- }
+
+ . = ALIGN(PAGE_SIZE);
+ _data = .;
+ __data_loc = _data - LOAD_OFFSET;
+ _sdata = .;
+ RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
+ _edata = .;
_edata_loc = __data_loc + SIZEOF(.data);
BSS_SECTION(0, 0, 0)
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 2c3ff67a8ecb..3f0731e53274 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -248,6 +248,26 @@ int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
return kvm_reset_vcpu(vcpu);
}
+int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
+{
+ int target = kvm_target_cpu();
+
+ if (target < 0)
+ return -ENODEV;
+
+ memset(init, 0, sizeof(*init));
+
+ /*
+ * For now, we don't return any features.
+ * In future, we might use features to return target
+ * specific features available for the preferred
+ * target type.
+ */
+ init->target = (__u32)target;
+
+ return 0;
+}
+
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -EINVAL;
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index de2de5db628d..0cb8742de4f2 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -31,7 +31,6 @@
#include <linux/sort.h>
#include <linux/of_fdt.h>
-#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
@@ -39,17 +38,9 @@
#include "mm.h"
-static unsigned long phys_initrd_start __initdata = 0;
-static unsigned long phys_initrd_size __initdata = 0;
-
phys_addr_t memstart_addr __read_mostly = 0;
-void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
-{
- phys_initrd_start = start;
- phys_initrd_size = end - start;
-}
-
+#ifdef CONFIG_BLK_DEV_INITRD
static int __init early_initrd(char *p)
{
unsigned long start, size;
@@ -59,12 +50,13 @@ static int __init early_initrd(char *p)
if (*endp == ',') {
size = memparse(endp + 1, NULL);
- phys_initrd_start = start;
- phys_initrd_size = size;
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = (unsigned long)__va(start + size);
}
return 0;
}
early_param("initrd", early_initrd);
+#endif
#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
@@ -137,13 +129,8 @@ void __init arm64_memblock_init(void)
/* Register the kernel text, kernel data and initrd with memblock */
memblock_reserve(__pa(_text), _end - _text);
#ifdef CONFIG_BLK_DEV_INITRD
- if (phys_initrd_size) {
- memblock_reserve(phys_initrd_start, phys_initrd_size);
-
- /* Now convert initrd to virtual addresses */
- initrd_start = __phys_to_virt(phys_initrd_start);
- initrd_end = initrd_start + phys_initrd_size;
- }
+ if (initrd_start)
+ memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
#endif
/*
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index 1725cd6db37a..2bb1d586664c 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -77,8 +77,24 @@ EXPORT_SYMBOL(__ioremap);
void __iounmap(volatile void __iomem *io_addr)
{
- void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
+ unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
- vunmap(addr);
+ /*
+ * We could get an address outside vmalloc range in case
+ * of ioremap_cache() reusing a RAM mapping.
+ */
+ if (VMALLOC_START <= addr && addr < VMALLOC_END)
+ vunmap((void *)addr);
}
EXPORT_SYMBOL(__iounmap);
+
+void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
+{
+ /* For normal memory we already have a cacheable mapping. */
+ if (pfn_valid(__phys_to_pfn(phys_addr)))
+ return (void __iomem *)__phys_to_virt(phys_addr);
+
+ return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_cache);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index b1b31bbc967b..421b99fd635d 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -162,9 +162,9 @@ ENDPROC(__cpu_setup)
* CE0 XWHW CZ ME TEEA S
* .... .IEE .... NEAI TE.I ..AD DEN0 ACAM
* 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved
- * .... .100 .... 01.1 11.1 ..01 0001 1101 < software settings
+ * .... .1.. .... 01.1 11.1 ..01 0001 1101 < software settings
*/
.type crval, #object
crval:
- .word 0x030802e2 // clear
+ .word 0x000802e2 // clear
.word 0x0405d11d // set
diff --git a/arch/arm64/xen/Makefile b/arch/arm64/xen/Makefile
index be240404ba96..74a8d87e542b 100644
--- a/arch/arm64/xen/Makefile
+++ b/arch/arm64/xen/Makefile
@@ -1,2 +1,2 @@
-xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o)
+xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o mm.o)
obj-y := xen-arm.o hypercall.o
diff --git a/arch/avr32/boards/atngw100/evklcd10x.c b/arch/avr32/boards/atngw100/evklcd10x.c
index 20388750d564..64919b0da7aa 100644
--- a/arch/avr32/boards/atngw100/evklcd10x.c
+++ b/arch/avr32/boards/atngw100/evklcd10x.c
@@ -58,7 +58,7 @@ static struct fb_monspecs __initdata atevklcd10x_default_monspecs = {
.dclkmax = 28330000,
};
-static struct atmel_lcdfb_info __initdata atevklcd10x_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata atevklcd10x_lcdc_data = {
.default_bpp = 16,
.default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
.default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT
@@ -96,7 +96,7 @@ static struct fb_monspecs __initdata atevklcd10x_default_monspecs = {
.dclkmax = 7000000,
};
-static struct atmel_lcdfb_info __initdata atevklcd10x_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata atevklcd10x_lcdc_data = {
.default_bpp = 16,
.default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
.default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT
@@ -134,7 +134,7 @@ static struct fb_monspecs __initdata atevklcd10x_default_monspecs = {
.dclkmax = 6400000,
};
-static struct atmel_lcdfb_info __initdata atevklcd10x_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata atevklcd10x_lcdc_data = {
.default_bpp = 16,
.default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
.default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT
@@ -145,7 +145,7 @@ static struct atmel_lcdfb_info __initdata atevklcd10x_lcdc_data = {
};
#endif
-static void atevklcd10x_lcdc_power_control(int on)
+static void atevklcd10x_lcdc_power_control(struct atmel_lcdfb_pdata *pdata, int on)
{
gpio_set_value(GPIO_PIN_PB(15), on);
}
diff --git a/arch/avr32/boards/atngw100/mrmt.c b/arch/avr32/boards/atngw100/mrmt.c
index 7de083d19b7e..1ba09e4c02b1 100644
--- a/arch/avr32/boards/atngw100/mrmt.c
+++ b/arch/avr32/boards/atngw100/mrmt.c
@@ -83,7 +83,7 @@ static struct fb_monspecs __initdata lcd_fb_default_monspecs = {
.dclkmax = 9260000,
};
-static struct atmel_lcdfb_info __initdata rmt_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata rmt_lcdc_data = {
.default_bpp = 24,
.default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
.default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT
@@ -126,7 +126,7 @@ static struct fb_monspecs __initdata lcd_fb_default_monspecs = {
.dclkmax = 9260000,
};
-static struct atmel_lcdfb_info __initdata rmt_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata rmt_lcdc_data = {
.default_bpp = 24,
.default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
.default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT
diff --git a/arch/avr32/boards/atstk1000/atstk1000.h b/arch/avr32/boards/atstk1000/atstk1000.h
index 9392d3252865..653cc09e536c 100644
--- a/arch/avr32/boards/atstk1000/atstk1000.h
+++ b/arch/avr32/boards/atstk1000/atstk1000.h
@@ -10,7 +10,7 @@
#ifndef __ARCH_AVR32_BOARDS_ATSTK1000_ATSTK1000_H
#define __ARCH_AVR32_BOARDS_ATSTK1000_ATSTK1000_H
-extern struct atmel_lcdfb_info atstk1000_lcdc_data;
+extern struct atmel_lcdfb_pdata atstk1000_lcdc_data;
void atstk1000_setup_j2_leds(void);
diff --git a/arch/avr32/boards/atstk1000/setup.c b/arch/avr32/boards/atstk1000/setup.c
index 2d6b560115d9..b6b88f5e0b43 100644
--- a/arch/avr32/boards/atstk1000/setup.c
+++ b/arch/avr32/boards/atstk1000/setup.c
@@ -55,7 +55,7 @@ static struct fb_monspecs __initdata atstk1000_default_monspecs = {
.dclkmax = 30000000,
};
-struct atmel_lcdfb_info __initdata atstk1000_lcdc_data = {
+struct atmel_lcdfb_pdata __initdata atstk1000_lcdc_data = {
.default_bpp = 24,
.default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
.default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT
diff --git a/arch/avr32/boards/favr-32/setup.c b/arch/avr32/boards/favr-32/setup.c
index 27bd6fbe21cb..7b1f2cd85400 100644
--- a/arch/avr32/boards/favr-32/setup.c
+++ b/arch/avr32/boards/favr-32/setup.c
@@ -125,7 +125,7 @@ static struct fb_monspecs __initdata favr32_default_monspecs = {
.dclkmax = 28000000,
};
-struct atmel_lcdfb_info __initdata favr32_lcdc_data = {
+struct atmel_lcdfb_pdata __initdata favr32_lcdc_data = {
.default_bpp = 16,
.default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
.default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT
diff --git a/arch/avr32/boards/hammerhead/setup.c b/arch/avr32/boards/hammerhead/setup.c
index 9d1efd1cd425..dc0e317f2ecd 100644
--- a/arch/avr32/boards/hammerhead/setup.c
+++ b/arch/avr32/boards/hammerhead/setup.c
@@ -77,7 +77,7 @@ static struct fb_monspecs __initdata hammerhead_hda350t_monspecs = {
.dclkmax = 10000000,
};
-struct atmel_lcdfb_info __initdata hammerhead_lcdc_data = {
+struct atmel_lcdfb_pdata __initdata hammerhead_lcdc_data = {
.default_bpp = 24,
.default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
.default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT
diff --git a/arch/avr32/boards/merisc/display.c b/arch/avr32/boards/merisc/display.c
index 85a543cd4abc..e7683ee7ed40 100644
--- a/arch/avr32/boards/merisc/display.c
+++ b/arch/avr32/boards/merisc/display.c
@@ -45,7 +45,7 @@ static struct fb_monspecs merisc_fb_monspecs = {
.dclkmax = 30000000,
};
-struct atmel_lcdfb_info merisc_lcdc_data = {
+struct atmel_lcdfb_pdata merisc_lcdc_data = {
.default_bpp = 24,
.default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
.default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT
diff --git a/arch/avr32/boards/mimc200/setup.c b/arch/avr32/boards/mimc200/setup.c
index 05358aa5ef7d..1cb8e9cc5cfa 100644
--- a/arch/avr32/boards/mimc200/setup.c
+++ b/arch/avr32/boards/mimc200/setup.c
@@ -8,7 +8,7 @@
* published by the Free Software Foundation.
*/
-extern struct atmel_lcdfb_info mimc200_lcdc_data;
+extern struct atmel_lcdfb_pdata mimc200_lcdc_data;
#include <linux/clk.h>
#include <linux/etherdevice.h>
@@ -71,7 +71,7 @@ static struct fb_monspecs __initdata mimc200_default_monspecs = {
.dclkmax = 25200000,
};
-struct atmel_lcdfb_info __initdata mimc200_lcdc_data = {
+struct atmel_lcdfb_pdata __initdata mimc200_lcdc_data = {
.default_bpp = 16,
.default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
.default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index fd7980743890..658001b52400 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -7,6 +7,7 @@ generic-y += div64.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += futex.h
+generic-y += preempt.h
generic-y += irq_regs.h
generic-y += param.h
generic-y += local.h
diff --git a/arch/avr32/include/uapi/asm/socket.h b/arch/avr32/include/uapi/asm/socket.h
index 11c4259c62fb..439936421434 100644
--- a/arch/avr32/include/uapi/asm/socket.h
+++ b/arch/avr32/include/uapi/asm/socket.h
@@ -76,4 +76,6 @@
#define SO_BUSY_POLL 46
+#define SO_MAX_PACING_RATE 47
+
#endif /* __ASM_AVR32_SOCKET_H */
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index a68f3cf7c3c1..a1f4d1e91b52 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1439,7 +1439,7 @@ fail:
* LCDC
* -------------------------------------------------------------------- */
#if defined(CONFIG_CPU_AT32AP7000) || defined(CONFIG_CPU_AT32AP7002)
-static struct atmel_lcdfb_info atmel_lcdfb0_data;
+static struct atmel_lcdfb_pdata atmel_lcdfb0_data;
static struct resource atmel_lcdfb0_resource[] = {
{
.start = 0xff000000,
@@ -1467,12 +1467,12 @@ static struct clk atmel_lcdfb0_pixclk = {
};
struct platform_device *__init
-at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_info *data,
+at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_pdata *data,
unsigned long fbmem_start, unsigned long fbmem_len,
u64 pin_mask)
{
struct platform_device *pdev;
- struct atmel_lcdfb_info *info;
+ struct atmel_lcdfb_pdata *info;
struct fb_monspecs *monspecs;
struct fb_videomode *modedb;
unsigned int modedb_size;
@@ -1529,7 +1529,7 @@ at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_info *data,
}
info = pdev->dev.platform_data;
- memcpy(info, data, sizeof(struct atmel_lcdfb_info));
+ memcpy(info, data, sizeof(struct atmel_lcdfb_pdata));
info->default_monspecs = monspecs;
pdev->name = "at32ap-lcdfb";
diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h
index d485b0391357..f1a316d52c73 100644
--- a/arch/avr32/mach-at32ap/include/mach/board.h
+++ b/arch/avr32/mach-at32ap/include/mach/board.h
@@ -44,9 +44,9 @@ struct platform_device *
at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n);
void at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b, unsigned int n);
-struct atmel_lcdfb_info;
+struct atmel_lcdfb_pdata;
struct platform_device *
-at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_info *data,
+at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_pdata *data,
unsigned long fbmem_start, unsigned long fbmem_len,
u64 pin_mask);
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index f78c9a2c7e28..eb382aedd9a2 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -1429,7 +1429,6 @@ source "drivers/cpufreq/Kconfig"
config BFIN_CPU_FREQ
bool
depends on CPU_FREQ
- select CPU_FREQ_TABLE
default y
config CPU_VOLTAGE
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index 127826f8a375..f2b43474b0e2 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -44,3 +44,4 @@ generic-y += ucontext.h
generic-y += unaligned.h
generic-y += user.h
generic-y += xor.h
+generic-y += preempt.h
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index 957dd00ea561..77ea09b8bce1 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -36,9 +36,6 @@ config GENERIC_HWEIGHT
config GENERIC_BUG
def_bool y
-config COMMON_CLKDEV
- def_bool y
-
config C6X_BIG_KERNEL
bool "Build a big kernel"
help
@@ -105,10 +102,6 @@ menu "Processor type and features"
source "arch/c6x/platforms/Kconfig"
-config TMS320C6X_CACHES_ON
- bool "L2 cache support"
- default y
-
config KERNEL_RAM_BASE_ADDRESS
hex "Virtual address of memory base"
default 0xe0000000 if SOC_TMS320C6455
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index e49f918531ad..fc0b3c356027 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -56,3 +56,4 @@ generic-y += ucontext.h
generic-y += user.h
generic-y += vga.h
generic-y += xor.h
+generic-y += preempt.h
diff --git a/arch/c6x/include/asm/prom.h b/arch/c6x/include/asm/prom.h
deleted file mode 100644
index b4ec95f07518..000000000000
--- a/arch/c6x/include/asm/prom.h
+++ /dev/null
@@ -1 +0,0 @@
-/* dummy prom.h; here to make linux/of.h's #includes happy */
diff --git a/arch/c6x/include/asm/setup.h b/arch/c6x/include/asm/setup.h
index ecead15872a6..696804475f55 100644
--- a/arch/c6x/include/asm/setup.h
+++ b/arch/c6x/include/asm/setup.h
@@ -14,8 +14,6 @@
#include <uapi/asm/setup.h>
#ifndef __ASSEMBLY__
-extern char c6x_command_line[COMMAND_LINE_SIZE];
-
extern int c6x_add_memory(phys_addr_t start, unsigned long size);
extern unsigned long ram_start;
diff --git a/arch/c6x/kernel/devicetree.c b/arch/c6x/kernel/devicetree.c
index 9e15ab9199b2..fa3e5741514e 100644
--- a/arch/c6x/kernel/devicetree.c
+++ b/arch/c6x/kernel/devicetree.c
@@ -10,37 +10,8 @@
*
*/
#include <linux/init.h>
-#include <linux/of.h>
-#include <linux/of_fdt.h>
-#include <linux/initrd.h>
#include <linux/memblock.h>
-void __init early_init_devtree(void *params)
-{
- /* Setup flat device-tree pointer */
- initial_boot_params = params;
-
- /* Retrieve various informations from the /chosen node of the
- * device-tree, including the platform type, initrd location and
- * size and more ...
- */
- of_scan_flat_dt(early_init_dt_scan_chosen, c6x_command_line);
-
- /* Scan memory nodes and rebuild MEMBLOCKs */
- of_scan_flat_dt(early_init_dt_scan_root, NULL);
- of_scan_flat_dt(early_init_dt_scan_memory, NULL);
-}
-
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
-{
- initrd_start = (unsigned long)__va(start);
- initrd_end = (unsigned long)__va(end);
- initrd_below_start_ok = 1;
-}
-#endif
-
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
c6x_add_memory(base, size);
diff --git a/arch/c6x/kernel/setup.c b/arch/c6x/kernel/setup.c
index f4e72bd8c103..731db4b9014d 100644
--- a/arch/c6x/kernel/setup.c
+++ b/arch/c6x/kernel/setup.c
@@ -68,13 +68,6 @@ unsigned long ram_end;
static unsigned long dma_start __initdata;
static unsigned long dma_size __initdata;
-char c6x_command_line[COMMAND_LINE_SIZE];
-
-#if defined(CONFIG_CMDLINE_BOOL)
-static const char default_command_line[COMMAND_LINE_SIZE] __section(.cmdline) =
- CONFIG_CMDLINE;
-#endif
-
struct cpuinfo_c6x {
const char *cpu_name;
const char *cpu_voltage;
@@ -294,10 +287,8 @@ notrace void __init machine_init(unsigned long dt_ptr)
fdt = dtb;
/* Do some early initialization based on the flat device tree */
- early_init_devtree(fdt);
+ early_init_dt_scan(fdt);
- /* parse_early_param needs a boot_command_line */
- strlcpy(boot_command_line, c6x_command_line, COMMAND_LINE_SIZE);
parse_early_param();
}
@@ -309,7 +300,7 @@ void __init setup_arch(char **cmdline_p)
printk(KERN_INFO "Initializing kernel\n");
/* Initialize command line */
- *cmdline_p = c6x_command_line;
+ *cmdline_p = boot_command_line;
memory_end = ram_end;
memory_end &= ~(PAGE_SIZE - 1);
diff --git a/arch/c6x/kernel/vmlinux.lds.S b/arch/c6x/kernel/vmlinux.lds.S
index 279d80725128..5a6e141d1641 100644
--- a/arch/c6x/kernel/vmlinux.lds.S
+++ b/arch/c6x/kernel/vmlinux.lds.S
@@ -37,12 +37,6 @@ SECTIONS
_vectors_end = .;
}
- . = ALIGN(0x1000);
- .cmdline :
- {
- *(.cmdline)
- }
-
/*
* This section contains data which may be shared with other
* cores. It needs to be a fixed offset from PAGE_OFFSET
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 02380bed189c..9c957c81c688 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -130,13 +130,11 @@ config SVINTO_SIM
config ETRAXFS
bool "ETRAX-FS-V32"
- select CPU_FREQ_TABLE if CPU_FREQ
help
Support CRIS V32.
config CRIS_MACH_ARTPEC3
bool "ARTPEC-3"
- select CPU_FREQ_TABLE if CPU_FREQ
help
Support Axis ARTPEC-3.
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index c8325455520e..b06caf649a95 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -11,3 +11,4 @@ generic-y += module.h
generic-y += trace_clock.h
generic-y += vga.h
generic-y += xor.h
+generic-y += preempt.h
diff --git a/arch/cris/include/asm/pci.h b/arch/cris/include/asm/pci.h
index 146da904cdd8..f666734926d5 100644
--- a/arch/cris/include/asm/pci.h
+++ b/arch/cris/include/asm/pci.h
@@ -11,7 +11,6 @@
#define pcibios_assign_all_busses(void) 1
-extern unsigned long pci_mem_start;
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000
diff --git a/arch/cris/include/uapi/asm/socket.h b/arch/cris/include/uapi/asm/socket.h
index eb723e51554e..13829aaaeec5 100644
--- a/arch/cris/include/uapi/asm/socket.h
+++ b/arch/cris/include/uapi/asm/socket.h
@@ -78,6 +78,8 @@
#define SO_BUSY_POLL 46
+#define SO_MAX_PACING_RATE 47
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild
index c5d767028306..74742dc6a3da 100644
--- a/arch/frv/include/asm/Kbuild
+++ b/arch/frv/include/asm/Kbuild
@@ -2,3 +2,4 @@
generic-y += clkdev.h
generic-y += exec.h
generic-y += trace_clock.h
+generic-y += preempt.h
diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h
index f0cb1c341163..5d4299762426 100644
--- a/arch/frv/include/uapi/asm/socket.h
+++ b/arch/frv/include/uapi/asm/socket.h
@@ -76,5 +76,7 @@
#define SO_BUSY_POLL 46
+#define SO_MAX_PACING_RATE 47
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/frv/mb93090-mb00/pci-frv.h b/arch/frv/mb93090-mb00/pci-frv.h
index 76c4e73d643d..a7e487fe76ed 100644
--- a/arch/frv/mb93090-mb00/pci-frv.h
+++ b/arch/frv/mb93090-mb00/pci-frv.h
@@ -30,7 +30,6 @@ void pcibios_resource_survey(void);
/* pci-vdk.c */
-extern int __nongpreldata pcibios_last_bus;
extern struct pci_ops *__nongpreldata pci_root_ops;
/* pci-irq.c */
diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c
index deb67843693c..efa5d65b0007 100644
--- a/arch/frv/mb93090-mb00/pci-vdk.c
+++ b/arch/frv/mb93090-mb00/pci-vdk.c
@@ -25,7 +25,6 @@
unsigned int __nongpreldata pci_probe = 1;
-int __nongpreldata pcibios_last_bus = -1;
struct pci_ops *__nongpreldata pci_root_ops;
/*
@@ -220,37 +219,6 @@ static struct pci_ops * __init pci_check_direct(void)
}
/*
- * Discover remaining PCI buses in case there are peer host bridges.
- * We use the number of last PCI bus provided by the PCI BIOS.
- */
-static void __init pcibios_fixup_peer_bridges(void)
-{
- struct pci_bus bus;
- struct pci_dev dev;
- int n;
- u16 l;
-
- if (pcibios_last_bus <= 0 || pcibios_last_bus >= 0xff)
- return;
- printk("PCI: Peer bridge fixup\n");
- for (n=0; n <= pcibios_last_bus; n++) {
- if (pci_find_bus(0, n))
- continue;
- bus.number = n;
- bus.ops = pci_root_ops;
- dev.bus = &bus;
- for(dev.devfn=0; dev.devfn<256; dev.devfn += 8)
- if (!pci_read_config_word(&dev, PCI_VENDOR_ID, &l) &&
- l != 0x0000 && l != 0xffff) {
- printk("Found device at %02x:%02x [%04x]\n", n, dev.devfn, l);
- printk("PCI: Discovered peer bus %02x\n", n);
- pci_scan_bus(n, pci_root_ops, NULL);
- break;
- }
- }
-}
-
-/*
* Exceptions for specific devices. Usually work-arounds for fatal design flaws.
*/
@@ -418,7 +386,6 @@ int __init pcibios_init(void)
pci_scan_root_bus(NULL, 0, pci_root_ops, NULL, &resources);
pcibios_irq_init();
- pcibios_fixup_peer_bridges();
pcibios_fixup_irqs();
pcibios_resource_survey();
@@ -432,9 +399,6 @@ char * __init pcibios_setup(char *str)
if (!strcmp(str, "off")) {
pci_probe = 0;
return NULL;
- } else if (!strncmp(str, "lastbus=", 8)) {
- pcibios_last_bus = simple_strtol(str+8, NULL, 0);
- return NULL;
}
return str;
}
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
deleted file mode 100644
index 24b1dc2564f1..000000000000
--- a/arch/h8300/Kconfig
+++ /dev/null
@@ -1,108 +0,0 @@
-config H8300
- bool
- default y
- select HAVE_IDE
- select GENERIC_ATOMIC64
- select HAVE_UID16
- select VIRT_TO_BUS
- select ARCH_WANT_IPC_PARSE_VERSION
- select GENERIC_IRQ_SHOW
- select GENERIC_CPU_DEVICES
- select MODULES_USE_ELF_RELA
- select OLD_SIGSUSPEND3
- select OLD_SIGACTION
- select HAVE_UNDERSCORE_SYMBOL_PREFIX
-
-config MMU
- bool
- default n
-
-config SWAP
- bool
- default n
-
-config ZONE_DMA
- bool
- default y
-
-config FPU
- bool
- default n
-
-config RWSEM_GENERIC_SPINLOCK
- bool
- default y
-
-config RWSEM_XCHGADD_ALGORITHM
- bool
- default n
-
-config ARCH_HAS_ILOG2_U32
- bool
- default n
-
-config ARCH_HAS_ILOG2_U64
- bool
- default n
-
-config GENERIC_HWEIGHT
- bool
- default y
-
-config GENERIC_CALIBRATE_DELAY
- bool
- default y
-
-config GENERIC_BUG
- bool
- depends on BUG
-
-config TIME_LOW_RES
- bool
- default y
-
-config NO_IOPORT
- def_bool y
-
-config NO_DMA
- def_bool y
-
-config ISA
- bool
- default y
-
-config PCI
- bool
- default n
-
-config HZ
- int
- default 100
-
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
-source "arch/h8300/Kconfig.cpu"
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "arch/h8300/Kconfig.ide"
-
-source "fs/Kconfig"
-
-source "arch/h8300/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/h8300/Kconfig.cpu b/arch/h8300/Kconfig.cpu
deleted file mode 100644
index cdee771460ed..000000000000
--- a/arch/h8300/Kconfig.cpu
+++ /dev/null
@@ -1,171 +0,0 @@
-menu "Processor type and features"
-
-choice
- prompt "H8/300 platform"
- default H8300H_GENERIC
-
-config H8300H_GENERIC
- bool "H8/300H Generic"
- help
- H8/300H CPU Generic Hardware Support
-
-config H8300H_AKI3068NET
- bool "AE-3068/69"
- select H83068
- help
- AKI-H8/3068F / AKI-H8/3069F Flashmicom LAN Board Support
- More Information. (Japanese Only)
- <http://akizukidenshi.com/catalog/default.aspx>
- AE-3068/69 Evaluation Board Support
- More Information.
- <http://www.microtronique.com/ae3069lan.htm>
-
-config H8300H_H8MAX
- bool "H8MAX"
- select H83068
- help
- H8MAX Evaluation Board Support
- More Information. (Japanese Only)
- <http://strawberry-linux.com/h8/index.html>
-
-config H8300H_SIM
- bool "H8/300H Simulator"
- select H83007
- help
- GDB Simulator Support
- More Information.
- <http://sourceware.org/sid/>
-
-config H8S_GENERIC
- bool "H8S Generic"
- help
- H8S CPU Generic Hardware Support
-
-config H8S_EDOSK2674
- bool "EDOSK-2674"
- select H8S2678
- help
- Renesas EDOSK-2674 Evaluation Board Support
- More Information.
- <http://www.azpower.com/H8-uClinux/index.html>
- <http://www.renesas.eu/products/tools/introductory_evaluation_tools/evaluation_development_os_kits/edosk2674r/edosk2674r_software_tools_root.jsp>
-
-config H8S_SIM
- bool "H8S Simulator"
- help
- GDB Simulator Support
- More Information.
- <http://sourceware.org/sid/>
-
-endchoice
-
-choice
- prompt "CPU Selection"
-
-config H83002
- bool "H8/3001,3002,3003"
- depends on BROKEN
- select CPU_H8300H
-
-config H83007
- bool "H8/3006,3007"
- select CPU_H8300H
-
-config H83048
- bool "H8/3044,3045,3046,3047,3048,3052"
- depends on BROKEN
- select CPU_H8300H
-
-config H83068
- bool "H8/3065,3066,3067,3068,3069"
- select CPU_H8300H
-
-config H8S2678
- bool "H8S/2670,2673,2674R,2675,2676"
- select CPU_H8S
-
-endchoice
-
-config CPU_CLOCK
- int "CPU Clock Frequency (/1KHz)"
- default "20000"
- help
- CPU Clock Frequency divide to 1000
-
-choice
- prompt "Kernel executes from"
- ---help---
- Choose the memory type that the kernel will be running in.
-
-config RAMKERNEL
- bool "RAM"
- help
- The kernel will be resident in RAM when running.
-
-config ROMKERNEL
- bool "ROM"
- help
- The kernel will be resident in FLASH/ROM when running.
-endchoice
-
-
-config CPU_H8300H
- bool
- depends on (H83002 || H83007 || H83048 || H83068)
- default y
-
-config CPU_H8S
- bool
- depends on H8S2678
- default y
-
-choice
- prompt "Timer"
-config H8300_TIMER8
- bool "8bit timer (2ch cascade)"
- depends on (H83007 || H83068 || H8S2678)
-
-config H8300_TIMER16
- bool "16bit timer"
- depends on (H83007 || H83068)
-
-config H8300_ITU
- bool "ITU"
- depends on (H83002 || H83048)
-
-config H8300_TPU
- bool "TPU"
- depends on H8S2678
-endchoice
-
-if H8300_TIMER8
-choice
- prompt "Timer Channel"
-config H8300_TIMER8_CH0
- bool "Channel 0"
-config H8300_TIMER8_CH2
- bool "Channel 2"
- depends on CPU_H8300H
-endchoice
-endif
-
-config H8300_TIMER16_CH
- int "16bit timer channel (0 - 2)"
- depends on H8300_TIMER16
- range 0 2
-
-config H8300_ITU_CH
- int "ITU channel"
- depends on H8300_ITU
- range 0 4
-
-config H8300_TPU_CH
- int "TPU channel"
- depends on H8300_TPU
- range 0 4
-
-source "kernel/Kconfig.preempt"
-
-source "mm/Kconfig"
-
-endmenu
diff --git a/arch/h8300/Kconfig.debug b/arch/h8300/Kconfig.debug
deleted file mode 100644
index e8d1b236ad8c..000000000000
--- a/arch/h8300/Kconfig.debug
+++ /dev/null
@@ -1,68 +0,0 @@
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
-
-config FULLDEBUG
- bool "Full Symbolic/Source Debugging support"
- help
- Enable debugging symbols on kernel build.
-
-config HIGHPROFILE
- bool "Use fast second timer for profiling"
- help
- Use a fast secondary clock to produce profiling information.
-
-config NO_KERNEL_MSG
- bool "Suppress Kernel BUG Messages"
- help
- Do not output any debug BUG messages within the kernel.
-
-config GDB_MAGICPRINT
- bool "Message Output for GDB MagicPrint service"
- depends on (H8300H_SIM || H8S_SIM)
- help
- kernel messages output using MagicPrint service from GDB
-
-config SYSCALL_PRINT
- bool "SystemCall trace print"
- help
- output history of systemcall
-
-config GDB_DEBUG
- bool "Use gdb stub"
- depends on (!H8300H_SIM && !H8S_SIM)
- help
- gdb stub exception support
-
-config SH_STANDARD_BIOS
- bool "Use gdb protocol serial console"
- depends on (!H8300H_SIM && !H8S_SIM)
- help
- serial console output using GDB protocol.
- Require eCos/RedBoot
-
-config DEFAULT_CMDLINE
- bool "Use builtin commandline"
- default n
- help
- builtin kernel commandline enabled.
-
-config KERNEL_COMMAND
- string "Buildin command string"
- depends on DEFAULT_CMDLINE
- help
- builtin kernel commandline strings.
-
-config BLKDEV_RESERVE
- bool "BLKDEV Reserved Memory"
- default n
- help
- Reserved BLKDEV area.
-
-config BLKDEV_RESERVE_ADDRESS
- hex 'start address'
- depends on BLKDEV_RESERVE
- help
- BLKDEV start address.
-
-endmenu
diff --git a/arch/h8300/Kconfig.ide b/arch/h8300/Kconfig.ide
deleted file mode 100644
index a38a63054ac2..000000000000
--- a/arch/h8300/Kconfig.ide
+++ /dev/null
@@ -1,44 +0,0 @@
-# uClinux H8/300 Target Board Selection Menu (IDE)
-
-if (H8300H_AKI3068NET)
-menu "IDE Extra configuration"
-
-config H8300_IDE_BASE
- hex "IDE register base address"
- depends on IDE
- default 0
- help
- IDE registers base address
-
-config H8300_IDE_ALT
- hex "IDE register alternate address"
- depends on IDE
- default 0
- help
- IDE alternate registers address
-
-config H8300_IDE_IRQ
- int "IDE IRQ no"
- depends on IDE
- default 0
- help
- IDE use IRQ no
-endmenu
-endif
-
-if (H8300H_H8MAX)
-config H8300_IDE_BASE
- hex
- depends on IDE
- default 0x200000
-
-config H8300_IDE_ALT
- hex
- depends on IDE
- default 0x60000c
-
-config H8300_IDE_IRQ
- int
- depends on IDE
- default 5
-endif
diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile
deleted file mode 100644
index a556447877b4..000000000000
--- a/arch/h8300/Makefile
+++ /dev/null
@@ -1,71 +0,0 @@
-#
-# arch/h8300/Makefile
-#
-# This file is subject to the terms and conditions of the GNU General Public
-# License. See the file "COPYING" in the main directory of this archive
-# for more details.
-#
-# (C) Copyright 2002,2003 Yoshinori Sato <ysato@users.sourceforge.jp>
-#
-
-platform-$(CONFIG_CPU_H8300H) := h8300h
-platform-$(CONFIG_CPU_H8S) := h8s
-PLATFORM := $(platform-y)
-
-board-$(CONFIG_H8300H_GENERIC) := generic
-board-$(CONFIG_H8300H_AKI3068NET) := aki3068net
-board-$(CONFIG_H8300H_H8MAX) := h8max
-board-$(CONFIG_H8300H_SIM) := generic
-board-$(CONFIG_H8S_GENERIC) := generic
-board-$(CONFIG_H8S_EDOSK2674) := edosk2674
-board-$(CONFIG_H8S_SIM) := generic
-BOARD := $(board-y)
-
-model-$(CONFIG_RAMKERNEL) := ram
-model-$(CONFIG_ROMKERNEL) := rom
-MODEL := $(model-y)
-
-cflags-$(CONFIG_CPU_H8300H) := -mh
-ldflags-$(CONFIG_CPU_H8300H) := -mh8300helf
-cflags-$(CONFIG_CPU_H8S) := -ms
-ldflags-$(CONFIG_CPU_H8S) := -mh8300self
-
-KBUILD_CFLAGS += $(cflags-y)
-KBUILD_CFLAGS += -mint32 -fno-builtin
-KBUILD_CFLAGS += -g
-KBUILD_CFLAGS += -D__linux__
-KBUILD_CFLAGS += -DUTS_SYSNAME=\"uClinux\"
-KBUILD_AFLAGS += -DPLATFORM=$(PLATFORM) -DMODEL=$(MODEL) $(cflags-y)
-LDFLAGS += $(ldflags-y)
-
-CROSS_COMPILE = h8300-elf-
-LIBGCC := $(shell $(CROSS-COMPILE)$(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
-
-head-y := arch/$(ARCH)/platform/$(PLATFORM)/$(BOARD)/crt0_$(MODEL).o
-
-core-y += arch/$(ARCH)/kernel/ \
- arch/$(ARCH)/mm/
-ifdef PLATFORM
-core-y += arch/$(ARCH)/platform/$(PLATFORM)/ \
- arch/$(ARCH)/platform/$(PLATFORM)/$(BOARD)/
-endif
-
-libs-y += arch/$(ARCH)/lib/ $(LIBGCC)
-
-boot := arch/h8300/boot
-
-export MODEL PLATFORM BOARD
-
-archmrproper:
-
-archclean:
- $(Q)$(MAKE) $(clean)=$(boot)
-
-vmlinux.srec vmlinux.bin zImage: vmlinux
- $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
-
-define archhelp
- @echo 'vmlinux.bin - Create raw binary'
- @echo 'vmlinux.srec - Create srec binary'
- @echo 'zImage - Compressed kernel image'
-endef
diff --git a/arch/h8300/README b/arch/h8300/README
deleted file mode 100644
index efa805fda19b..000000000000
--- a/arch/h8300/README
+++ /dev/null
@@ -1,38 +0,0 @@
-linux-2.6 for H8/300 README
-Yoshinori Sato <ysato@users.sourceforge.jp>
-
-* Supported CPU
-H8/300H and H8S
-
-* Supported Target
-1.simulator of GDB
- require patches.
-
-2.AE 3068/AE 3069
- more information
- MICROTRONIQUE <http://www.microtronique.com/>
- Akizuki Denshi Tsusho Ltd. <http://akizukidenshi.com/> (Japanese Only)
-
-3.H8MAX
- see http://ip-sol.jp/h8max/ (Japanese Only)
-
-4.EDOSK2674
- see http://www.eu.renesas.com/products/mpumcu/tool/edk/support/edosk2674.html
- http://www.uclinux.org/pub/uClinux/ports/h8/HITACHI-EDOSK2674-HOWTO
- http://www.azpower.com/H8-uClinux/
-
-* Toolchain Version
-gcc-3.1 or higher and patch
-see arch/h8300/tools_patch/README
-binutils-2.12 or higher
-gdb-5.2 or higher
-The environment that can compile a h8300-elf binary is necessary.
-
-* Userland Develop environment
-used h8300-elf toolchains.
-see http://www.uclinux.org/pub/uClinux/ports/h8/
-
-* A few words of thanks
-Porting to H8/300 serieses is support of Information-technology Promotion Agency, Japan.
-I thank support.
-and All developer/user.
diff --git a/arch/h8300/boot/Makefile b/arch/h8300/boot/Makefile
deleted file mode 100644
index 0bb62e064eea..000000000000
--- a/arch/h8300/boot/Makefile
+++ /dev/null
@@ -1,22 +0,0 @@
-# arch/h8300/boot/Makefile
-
-targets := vmlinux.srec vmlinux.bin zImage
-subdir- := compressed
-
-OBJCOPYFLAGS_vmlinux.srec := -Osrec
-OBJCOPYFLAGS_vmlinux.bin := -Obinary
-OBJCOPYFLAGS_zImage := -O binary -R .note -R .comment -R .stab -R .stabstr -S
-
-$(obj)/vmlinux.srec $(obj)/vmlinux.bin: vmlinux FORCE
- $(call if_changed,objcopy)
- @echo ' Kernel: $@ is ready'
-
-$(obj)/zImage: $(obj)/compressed/vmlinux FORCE
- $(call if_changed,objcopy)
- @echo 'Kernel: $@ is ready'
-
-$(obj)/compressed/vmlinux: FORCE
- $(Q)$(MAKE) $(build)=$(obj)/compressed $@
-
-CLEAN_FILES += arch/$(ARCH)/vmlinux.bin arch/$(ARCH)/vmlinux.srec
-
diff --git a/arch/h8300/boot/compressed/Makefile b/arch/h8300/boot/compressed/Makefile
deleted file mode 100644
index a6c98fe3bbc3..000000000000
--- a/arch/h8300/boot/compressed/Makefile
+++ /dev/null
@@ -1,37 +0,0 @@
-#
-# linux/arch/sh/boot/compressed/Makefile
-#
-# create a compressed vmlinux image from the original vmlinux
-#
-
-targets := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o
-asflags-y := -traditional
-
-OBJECTS = $(obj)/head.o $(obj)/misc.o
-
-#
-# IMAGE_OFFSET is the load offset of the compression loader
-# Assign dummy values if these 2 variables are not defined,
-# in order to suppress error message.
-#
-CONFIG_MEMORY_START ?= 0x00400000
-CONFIG_BOOT_LINK_OFFSET ?= 0x00140000
-IMAGE_OFFSET := $(shell printf "0x%08x" $$(($(CONFIG_MEMORY_START)+$(CONFIG_BOOT_LINK_OFFSET))))
-
-LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -estartup $(obj)/vmlinux.lds
-
-$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o FORCE
- $(call if_changed,ld)
- @:
-
-$(obj)/vmlinux.bin: vmlinux FORCE
- $(call if_changed,objcopy)
-
-$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
- $(call if_changed,gzip)
-
-LDFLAGS_piggy.o := -r --format binary --oformat elf32-h8300 -T
-OBJCOPYFLAGS := -O binary
-
-$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
- $(call if_changed,ld)
diff --git a/arch/h8300/boot/compressed/head.S b/arch/h8300/boot/compressed/head.S
deleted file mode 100644
index 10e9a2d1cc6c..000000000000
--- a/arch/h8300/boot/compressed/head.S
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * linux/arch/h8300/boot/compressed/head.S
- *
- * Copyright (C) 2006 Yoshinori Sato
- */
-
- .h8300h
-#include <linux/linkage.h>
-
-#define SRAM_START 0xff4000
-
- .section .text..startup
- .global startup
-startup:
- mov.l #SRAM_START+0x8000, sp
- mov.l #__sbss, er0
- mov.l #__ebss, er1
- sub.l er0, er1
- shlr er1
- shlr er1
- sub.l er2, er2
-1:
- mov.l er2, @er0
- adds #4, er0
- dec.l #1, er1
- bne 1b
- jsr @_decompress_kernel
- jmp @0x400000
-
- .align 9
-fake_headers_as_bzImage:
- .word 0
- .ascii "HdrS" ; header signature
- .word 0x0202 ; header version number (>= 0x0105)
- ; or else old loadlin-1.5 will fail)
- .word 0 ; default_switch
- .word 0 ; SETUPSEG
- .word 0x1000
- .word 0 ; pointing to kernel version string
- .byte 0 ; = 0, old one (LILO, Loadlin,
- ; 0xTV: T=0 for LILO
- ; V = version
- .byte 1 ; Load flags bzImage=1
- .word 0x8000 ; size to move, when setup is not
- .long 0x100000 ; 0x100000 = default for big kernel
- .long 0 ; address of loaded ramdisk image
- .long 0 ; its size in bytes
diff --git a/arch/h8300/boot/compressed/misc.c b/arch/h8300/boot/compressed/misc.c
deleted file mode 100644
index 4a1e3dd43948..000000000000
--- a/arch/h8300/boot/compressed/misc.c
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * arch/h8300/boot/compressed/misc.c
- *
- * This is a collection of several routines from gzip-1.0.3
- * adapted for Linux.
- *
- * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
- *
- * Adapted for h8300 by Yoshinori Sato 2006
- */
-
-#include <asm/uaccess.h>
-
-/*
- * gzip declarations
- */
-
-#define OF(args) args
-#define STATIC static
-
-#undef memset
-#undef memcpy
-#define memzero(s, n) memset ((s), 0, (n))
-
-typedef unsigned char uch;
-typedef unsigned short ush;
-typedef unsigned long ulg;
-
-#define WSIZE 0x8000 /* Window size must be at least 32k, */
- /* and a power of two */
-
-static uch *inbuf; /* input buffer */
-static uch window[WSIZE]; /* Sliding window buffer */
-
-static unsigned insize = 0; /* valid bytes in inbuf */
-static unsigned inptr = 0; /* index of next byte to be processed in inbuf */
-static unsigned outcnt = 0; /* bytes in output buffer */
-
-/* gzip flag byte */
-#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */
-#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
-#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
-#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
-#define COMMENT 0x10 /* bit 4 set: file comment present */
-#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
-#define RESERVED 0xC0 /* bit 6,7: reserved */
-
-#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf())
-
-/* Diagnostic functions */
-#ifdef DEBUG
-# define Assert(cond,msg) {if(!(cond)) error(msg);}
-# define Trace(x) fprintf x
-# define Tracev(x) {if (verbose) fprintf x ;}
-# define Tracevv(x) {if (verbose>1) fprintf x ;}
-# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
-# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
-#else
-# define Assert(cond,msg)
-# define Trace(x)
-# define Tracev(x)
-# define Tracevv(x)
-# define Tracec(c,x)
-# define Tracecv(c,x)
-#endif
-
-static int fill_inbuf(void);
-static void flush_window(void);
-static void error(char *m);
-
-extern char input_data[];
-extern int input_len;
-
-static long bytes_out = 0;
-static uch *output_data;
-static unsigned long output_ptr = 0;
-
-static void error(char *m);
-
-int puts(const char *);
-
-extern int _end;
-static unsigned long free_mem_ptr;
-static unsigned long free_mem_end_ptr;
-
-#define HEAP_SIZE 0x10000
-
-#include "../../../../lib/inflate.c"
-
-#define SCR *((volatile unsigned char *)0xffff8a)
-#define TDR *((volatile unsigned char *)0xffff8b)
-#define SSR *((volatile unsigned char *)0xffff8c)
-
-int puts(const char *s)
-{
- return 0;
-}
-
-void* memset(void* s, int c, size_t n)
-{
- int i;
- char *ss = (char*)s;
-
- for (i=0;i<n;i++) ss[i] = c;
- return s;
-}
-
-void* memcpy(void* __dest, __const void* __src,
- size_t __n)
-{
- int i;
- char *d = (char *)__dest, *s = (char *)__src;
-
- for (i=0;i<__n;i++) d[i] = s[i];
- return __dest;
-}
-
-/* ===========================================================================
- * Fill the input buffer. This is called only when the buffer is empty
- * and at least one byte is really needed.
- */
-static int fill_inbuf(void)
-{
- if (insize != 0) {
- error("ran out of input data");
- }
-
- inbuf = input_data;
- insize = input_len;
- inptr = 1;
- return inbuf[0];
-}
-
-/* ===========================================================================
- * Write the output window window[0..outcnt-1] and update crc and bytes_out.
- * (Used for the decompressed data only.)
- */
-static void flush_window(void)
-{
- ulg c = crc; /* temporary variable */
- unsigned n;
- uch *in, *out, ch;
-
- in = window;
- out = &output_data[output_ptr];
- for (n = 0; n < outcnt; n++) {
- ch = *out++ = *in++;
- c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
- }
- crc = c;
- bytes_out += (ulg)outcnt;
- output_ptr += (ulg)outcnt;
- outcnt = 0;
-}
-
-static void error(char *x)
-{
- puts("\n\n");
- puts(x);
- puts("\n\n -- System halted");
-
- while(1); /* Halt */
-}
-
-#define STACK_SIZE (4096)
-long user_stack [STACK_SIZE];
-long* stack_start = &user_stack[STACK_SIZE];
-
-void decompress_kernel(void)
-{
- output_data = 0;
- output_ptr = (unsigned long)0x400000;
- free_mem_ptr = (unsigned long)&_end;
- free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
-
- makecrc();
- puts("Uncompressing Linux... ");
- gunzip();
- puts("Ok, booting the kernel.\n");
-}
diff --git a/arch/h8300/boot/compressed/vmlinux.lds b/arch/h8300/boot/compressed/vmlinux.lds
deleted file mode 100644
index a0a3a0ed54ef..000000000000
--- a/arch/h8300/boot/compressed/vmlinux.lds
+++ /dev/null
@@ -1,32 +0,0 @@
-SECTIONS
-{
- .text :
- {
- __stext = . ;
- __text = .;
- *(.text..startup)
- *(.text)
- __etext = . ;
- }
-
- .rodata :
- {
- *(.rodata)
- }
- .data :
-
- {
- __sdata = . ;
- ___data_start = . ;
- *(.data.*)
- }
- .bss :
- {
- . = ALIGN(0x4) ;
- __sbss = . ;
- *(.bss*)
- . = ALIGN(0x4) ;
- __ebss = . ;
- __end = . ;
- }
-}
diff --git a/arch/h8300/boot/compressed/vmlinux.scr b/arch/h8300/boot/compressed/vmlinux.scr
deleted file mode 100644
index a0f6962736e9..000000000000
--- a/arch/h8300/boot/compressed/vmlinux.scr
+++ /dev/null
@@ -1,9 +0,0 @@
-SECTIONS
-{
- .data : {
- _input_len = .;
- LONG(_input_data_end - _input_data) _input_data = .;
- *(.data)
- _input_data_end = .;
- }
-}
diff --git a/arch/h8300/defconfig b/arch/h8300/defconfig
deleted file mode 100644
index 042425a02645..000000000000
--- a/arch/h8300/defconfig
+++ /dev/null
@@ -1,42 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EXPERT=y
-# CONFIG_UID16 is not set
-# CONFIG_SYSCTL_SYSCALL is not set
-# CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
-# CONFIG_BASE_FULL is not set
-# CONFIG_FUTEX is not set
-# CONFIG_EPOLL is not set
-# CONFIG_SIGNALFD is not set
-# CONFIG_TIMERFD is not set
-# CONFIG_EVENTFD is not set
-# CONFIG_VM_EVENT_COUNTERS is not set
-# CONFIG_COMPAT_BRK is not set
-CONFIG_SLOB=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_H83007=y
-CONFIG_BINFMT_FLAT=y
-CONFIG_BINFMT_ZFLAT=y
-CONFIG_BINFMT_MISC=y
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_REDBOOT_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_RAM=y
-CONFIG_MTD_ROM=y
-CONFIG_MTD_UCLINUX=y
-# CONFIG_BLK_DEV is not set
-# CONFIG_INPUT is not set
-# CONFIG_SERIO is not set
-# CONFIG_HWMON is not set
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_DNOTIFY is not set
-CONFIG_ROMFS_FS=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_CRC32 is not set
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
deleted file mode 100644
index 8ada3cf0c98d..000000000000
--- a/arch/h8300/include/asm/Kbuild
+++ /dev/null
@@ -1,8 +0,0 @@
-
-generic-y += clkdev.h
-generic-y += exec.h
-generic-y += linkage.h
-generic-y += mmu.h
-generic-y += module.h
-generic-y += trace_clock.h
-generic-y += xor.h
diff --git a/arch/h8300/include/asm/asm-offsets.h b/arch/h8300/include/asm/asm-offsets.h
deleted file mode 100644
index d370ee36a182..000000000000
--- a/arch/h8300/include/asm/asm-offsets.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <generated/asm-offsets.h>
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
deleted file mode 100644
index 40901e353c21..000000000000
--- a/arch/h8300/include/asm/atomic.h
+++ /dev/null
@@ -1,146 +0,0 @@
-#ifndef __ARCH_H8300_ATOMIC__
-#define __ARCH_H8300_ATOMIC__
-
-#include <linux/types.h>
-#include <asm/cmpxchg.h>
-
-/*
- * Atomic operations that C can't guarantee us. Useful for
- * resource counting etc..
- */
-
-#define ATOMIC_INIT(i) { (i) }
-
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
-#define atomic_set(v, i) (((v)->counter) = i)
-
-#include <linux/kernel.h>
-
-static __inline__ int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long flags;
- int ret;
- local_irq_save(flags);
- ret = v->counter += i;
- local_irq_restore(flags);
- return ret;
-}
-
-#define atomic_add(i, v) atomic_add_return(i, v)
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-
-static __inline__ int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long flags;
- int ret;
- local_irq_save(flags);
- ret = v->counter -= i;
- local_irq_restore(flags);
- return ret;
-}
-
-#define atomic_sub(i, v) atomic_sub_return(i, v)
-#define atomic_sub_and_test(i,v) (atomic_sub_return(i, v) == 0)
-
-static __inline__ int atomic_inc_return(atomic_t *v)
-{
- unsigned long flags;
- int ret;
- local_irq_save(flags);
- v->counter++;
- ret = v->counter;
- local_irq_restore(flags);
- return ret;
-}
-
-#define atomic_inc(v) atomic_inc_return(v)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-static __inline__ int atomic_dec_return(atomic_t *v)
-{
- unsigned long flags;
- int ret;
- local_irq_save(flags);
- --v->counter;
- ret = v->counter;
- local_irq_restore(flags);
- return ret;
-}
-
-#define atomic_dec(v) atomic_dec_return(v)
-
-static __inline__ int atomic_dec_and_test(atomic_t *v)
-{
- unsigned long flags;
- int ret;
- local_irq_save(flags);
- --v->counter;
- ret = v->counter;
- local_irq_restore(flags);
- return ret == 0;
-}
-
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
-{
- int ret;
- unsigned long flags;
-
- local_irq_save(flags);
- ret = v->counter;
- if (likely(ret == old))
- v->counter = new;
- local_irq_restore(flags);
- return ret;
-}
-
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int ret;
- unsigned long flags;
-
- local_irq_save(flags);
- ret = v->counter;
- if (ret != u)
- v->counter += a;
- local_irq_restore(flags);
- return ret;
-}
-
-static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
-{
- __asm__ __volatile__("stc ccr,r1l\n\t"
- "orc #0x80,ccr\n\t"
- "mov.l %0,er0\n\t"
- "and.l %1,er0\n\t"
- "mov.l er0,%0\n\t"
- "ldc r1l,ccr"
- : "=m" (*v) : "g" (~(mask)) :"er0","er1");
-}
-
-static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v)
-{
- __asm__ __volatile__("stc ccr,r1l\n\t"
- "orc #0x80,ccr\n\t"
- "mov.l %0,er0\n\t"
- "or.l %1,er0\n\t"
- "mov.l er0,%0\n\t"
- "ldc r1l,ccr"
- : "=m" (*v) : "g" (mask) :"er0","er1");
-}
-
-/* Atomic operations are already serializing */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
-#endif /* __ARCH_H8300_ATOMIC __ */
diff --git a/arch/h8300/include/asm/barrier.h b/arch/h8300/include/asm/barrier.h
deleted file mode 100644
index 9e0aa9fc195d..000000000000
--- a/arch/h8300/include/asm/barrier.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef _H8300_BARRIER_H
-#define _H8300_BARRIER_H
-
-#define nop() asm volatile ("nop"::)
-
-/*
- * Force strict CPU ordering.
- * Not really required on H8...
- */
-#define mb() asm volatile ("" : : :"memory")
-#define rmb() asm volatile ("" : : :"memory")
-#define wmb() asm volatile ("" : : :"memory")
-#define set_mb(var, value) do { xchg(&var, value); } while (0)
-
-#define read_barrier_depends() do { } while (0)
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_read_barrier_depends() read_barrier_depends()
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while(0)
-#endif
-
-#endif /* _H8300_BARRIER_H */
diff --git a/arch/h8300/include/asm/bitops.h b/arch/h8300/include/asm/bitops.h
deleted file mode 100644
index eb34e0cd33d5..000000000000
--- a/arch/h8300/include/asm/bitops.h
+++ /dev/null
@@ -1,211 +0,0 @@
-#ifndef _H8300_BITOPS_H
-#define _H8300_BITOPS_H
-
-/*
- * Copyright 1992, Linus Torvalds.
- * Copyright 2002, Yoshinori Sato
- */
-
-#include <linux/compiler.h>
-
-#ifdef __KERNEL__
-
-#ifndef _LINUX_BITOPS_H
-#error only <linux/bitops.h> can be included directly
-#endif
-
-/*
- * Function prototypes to keep gcc -Wall happy
- */
-
-/*
- * ffz = Find First Zero in word. Undefined if no zero exists,
- * so code should check against ~0UL first..
- */
-static __inline__ unsigned long ffz(unsigned long word)
-{
- unsigned long result;
-
- result = -1;
- __asm__("1:\n\t"
- "shlr.l %2\n\t"
- "adds #1,%0\n\t"
- "bcs 1b"
- : "=r" (result)
- : "0" (result),"r" (word));
- return result;
-}
-
-#define H8300_GEN_BITOP_CONST(OP,BIT) \
- case BIT: \
- __asm__(OP " #" #BIT ",@%0"::"r"(b_addr):"memory"); \
- break;
-
-#define H8300_GEN_BITOP(FNAME,OP) \
-static __inline__ void FNAME(int nr, volatile unsigned long* addr) \
-{ \
- volatile unsigned char *b_addr; \
- b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \
- if (__builtin_constant_p(nr)) { \
- switch(nr & 7) { \
- H8300_GEN_BITOP_CONST(OP,0) \
- H8300_GEN_BITOP_CONST(OP,1) \
- H8300_GEN_BITOP_CONST(OP,2) \
- H8300_GEN_BITOP_CONST(OP,3) \
- H8300_GEN_BITOP_CONST(OP,4) \
- H8300_GEN_BITOP_CONST(OP,5) \
- H8300_GEN_BITOP_CONST(OP,6) \
- H8300_GEN_BITOP_CONST(OP,7) \
- } \
- } else { \
- __asm__(OP " %w0,@%1"::"r"(nr),"r"(b_addr):"memory"); \
- } \
-}
-
-/*
- * clear_bit() doesn't provide any barrier for the compiler.
- */
-#define smp_mb__before_clear_bit() barrier()
-#define smp_mb__after_clear_bit() barrier()
-
-H8300_GEN_BITOP(set_bit ,"bset")
-H8300_GEN_BITOP(clear_bit ,"bclr")
-H8300_GEN_BITOP(change_bit,"bnot")
-#define __set_bit(nr,addr) set_bit((nr),(addr))
-#define __clear_bit(nr,addr) clear_bit((nr),(addr))
-#define __change_bit(nr,addr) change_bit((nr),(addr))
-
-#undef H8300_GEN_BITOP
-#undef H8300_GEN_BITOP_CONST
-
-static __inline__ int test_bit(int nr, const unsigned long* addr)
-{
- return (*((volatile unsigned char *)addr +
- ((nr >> 3) ^ 3)) & (1UL << (nr & 7))) != 0;
-}
-
-#define __test_bit(nr, addr) test_bit(nr, addr)
-
-#define H8300_GEN_TEST_BITOP_CONST_INT(OP,BIT) \
- case BIT: \
- __asm__("stc ccr,%w1\n\t" \
- "orc #0x80,ccr\n\t" \
- "bld #" #BIT ",@%4\n\t" \
- OP " #" #BIT ",@%4\n\t" \
- "rotxl.l %0\n\t" \
- "ldc %w1,ccr" \
- : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \
- : "0" (retval),"r" (b_addr) \
- : "memory"); \
- break;
-
-#define H8300_GEN_TEST_BITOP_CONST(OP,BIT) \
- case BIT: \
- __asm__("bld #" #BIT ",@%3\n\t" \
- OP " #" #BIT ",@%3\n\t" \
- "rotxl.l %0\n\t" \
- : "=r"(retval),"=m"(*b_addr) \
- : "0" (retval),"r" (b_addr) \
- : "memory"); \
- break;
-
-#define H8300_GEN_TEST_BITOP(FNNAME,OP) \
-static __inline__ int FNNAME(int nr, volatile void * addr) \
-{ \
- int retval = 0; \
- char ccrsave; \
- volatile unsigned char *b_addr; \
- b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \
- if (__builtin_constant_p(nr)) { \
- switch(nr & 7) { \
- H8300_GEN_TEST_BITOP_CONST_INT(OP,0) \
- H8300_GEN_TEST_BITOP_CONST_INT(OP,1) \
- H8300_GEN_TEST_BITOP_CONST_INT(OP,2) \
- H8300_GEN_TEST_BITOP_CONST_INT(OP,3) \
- H8300_GEN_TEST_BITOP_CONST_INT(OP,4) \
- H8300_GEN_TEST_BITOP_CONST_INT(OP,5) \
- H8300_GEN_TEST_BITOP_CONST_INT(OP,6) \
- H8300_GEN_TEST_BITOP_CONST_INT(OP,7) \
- } \
- } else { \
- __asm__("stc ccr,%w1\n\t" \
- "orc #0x80,ccr\n\t" \
- "btst %w5,@%4\n\t" \
- OP " %w5,@%4\n\t" \
- "beq 1f\n\t" \
- "inc.l #1,%0\n" \
- "1:\n\t" \
- "ldc %w1,ccr" \
- : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \
- : "0" (retval),"r" (b_addr),"r"(nr) \
- : "memory"); \
- } \
- return retval; \
-} \
- \
-static __inline__ int __ ## FNNAME(int nr, volatile void * addr) \
-{ \
- int retval = 0; \
- volatile unsigned char *b_addr; \
- b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \
- if (__builtin_constant_p(nr)) { \
- switch(nr & 7) { \
- H8300_GEN_TEST_BITOP_CONST(OP,0) \
- H8300_GEN_TEST_BITOP_CONST(OP,1) \
- H8300_GEN_TEST_BITOP_CONST(OP,2) \
- H8300_GEN_TEST_BITOP_CONST(OP,3) \
- H8300_GEN_TEST_BITOP_CONST(OP,4) \
- H8300_GEN_TEST_BITOP_CONST(OP,5) \
- H8300_GEN_TEST_BITOP_CONST(OP,6) \
- H8300_GEN_TEST_BITOP_CONST(OP,7) \
- } \
- } else { \
- __asm__("btst %w4,@%3\n\t" \
- OP " %w4,@%3\n\t" \
- "beq 1f\n\t" \
- "inc.l #1,%0\n" \
- "1:" \
- : "=r"(retval),"=m"(*b_addr) \
- : "0" (retval),"r" (b_addr),"r"(nr) \
- : "memory"); \
- } \
- return retval; \
-}
-
-H8300_GEN_TEST_BITOP(test_and_set_bit, "bset")
-H8300_GEN_TEST_BITOP(test_and_clear_bit, "bclr")
-H8300_GEN_TEST_BITOP(test_and_change_bit,"bnot")
-#undef H8300_GEN_TEST_BITOP_CONST
-#undef H8300_GEN_TEST_BITOP_CONST_INT
-#undef H8300_GEN_TEST_BITOP
-
-#include <asm-generic/bitops/ffs.h>
-
-static __inline__ unsigned long __ffs(unsigned long word)
-{
- unsigned long result;
-
- result = -1;
- __asm__("1:\n\t"
- "shlr.l %2\n\t"
- "adds #1,%0\n\t"
- "bcc 1b"
- : "=r" (result)
- : "0"(result),"r"(word));
- return result;
-}
-
-#include <asm-generic/bitops/find.h>
-#include <asm-generic/bitops/sched.h>
-#include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/lock.h>
-#include <asm-generic/bitops/le.h>
-#include <asm-generic/bitops/ext2-atomic.h>
-
-#endif /* __KERNEL__ */
-
-#include <asm-generic/bitops/fls.h>
-#include <asm-generic/bitops/__fls.h>
-#include <asm-generic/bitops/fls64.h>
-
-#endif /* _H8300_BITOPS_H */
diff --git a/arch/h8300/include/asm/bootinfo.h b/arch/h8300/include/asm/bootinfo.h
deleted file mode 100644
index 5bed7e7aac0a..000000000000
--- a/arch/h8300/include/asm/bootinfo.h
+++ /dev/null
@@ -1,2 +0,0 @@
-
-/* Nothing for h8300 */
diff --git a/arch/h8300/include/asm/bug.h b/arch/h8300/include/asm/bug.h
deleted file mode 100644
index 1e1be8119935..000000000000
--- a/arch/h8300/include/asm/bug.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _H8300_BUG_H
-#define _H8300_BUG_H
-
-/* always true */
-#define is_valid_bugaddr(addr) (1)
-
-#include <asm-generic/bug.h>
-
-struct pt_regs;
-extern void die(const char *str, struct pt_regs *fp, unsigned long err);
-
-#endif
diff --git a/arch/h8300/include/asm/bugs.h b/arch/h8300/include/asm/bugs.h
deleted file mode 100644
index 1cb4afba6eb1..000000000000
--- a/arch/h8300/include/asm/bugs.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * include/asm-h8300/bugs.h
- *
- * Copyright (C) 1994 Linus Torvalds
- */
-
-/*
- * This is included by init/main.c to check for architecture-dependent bugs.
- *
- * Needs:
- * void check_bugs(void);
- */
-
-static void check_bugs(void)
-{
-}
diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
deleted file mode 100644
index 05887a1d80e5..000000000000
--- a/arch/h8300/include/asm/cache.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ARCH_H8300_CACHE_H
-#define __ARCH_H8300_CACHE_H
-
-/* bytes per L1 cache line */
-#define L1_CACHE_SHIFT 2
-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-
-/* m68k-elf-gcc 2.95.2 doesn't like these */
-
-#define __cacheline_aligned
-#define ____cacheline_aligned
-
-#endif
diff --git a/arch/h8300/include/asm/cachectl.h b/arch/h8300/include/asm/cachectl.h
deleted file mode 100644
index c464022d8e26..000000000000
--- a/arch/h8300/include/asm/cachectl.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef _H8300_CACHECTL_H
-#define _H8300_CACHECTL_H
-
-/* Definitions for the cacheflush system call. */
-
-#define FLUSH_SCOPE_LINE 0 /* Flush a cache line */
-#define FLUSH_SCOPE_PAGE 0 /* Flush a page */
-#define FLUSH_SCOPE_ALL 0 /* Flush the whole cache -- superuser only */
-
-#define FLUSH_CACHE_DATA 0 /* Writeback and flush data cache */
-#define FLUSH_CACHE_INSN 0 /* Flush instruction cache */
-#define FLUSH_CACHE_BOTH 0 /* Flush both caches */
-
-#endif /* _H8300_CACHECTL_H */
diff --git a/arch/h8300/include/asm/cacheflush.h b/arch/h8300/include/asm/cacheflush.h
deleted file mode 100644
index 4cf2df20c1ce..000000000000
--- a/arch/h8300/include/asm/cacheflush.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * (C) Copyright 2002, Yoshinori Sato <ysato@users.sourceforge.jp>
- */
-
-#ifndef _ASM_H8300_CACHEFLUSH_H
-#define _ASM_H8300_CACHEFLUSH_H
-
-/*
- * Cache handling functions
- * No Cache memory all dummy functions
- */
-
-#define flush_cache_all()
-#define flush_cache_mm(mm)
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_range(vma,a,b)
-#define flush_cache_page(vma,p,pfn)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page)
-#define flush_dcache_mmap_lock(mapping)
-#define flush_dcache_mmap_unlock(mapping)
-#define flush_icache()
-#define flush_icache_page(vma,page)
-#define flush_icache_range(start,len)
-#define flush_cache_vmap(start, end)
-#define flush_cache_vunmap(start, end)
-#define cache_push_v(vaddr,len)
-#define cache_push(paddr,len)
-#define cache_clear(paddr,len)
-
-#define flush_dcache_range(a,b)
-
-#define flush_icache_user_range(vma,page,addr,len)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-
-#endif /* _ASM_H8300_CACHEFLUSH_H */
diff --git a/arch/h8300/include/asm/checksum.h b/arch/h8300/include/asm/checksum.h
deleted file mode 100644
index 98724e12508c..000000000000
--- a/arch/h8300/include/asm/checksum.h
+++ /dev/null
@@ -1,102 +0,0 @@
-#ifndef _H8300_CHECKSUM_H
-#define _H8300_CHECKSUM_H
-
-/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-__wsum csum_partial(const void *buff, int len, __wsum sum);
-
-/*
- * the same as csum_partial, but copies from src while it
- * checksums
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-
-__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
-
-
-/*
- * the same as csum_partial_copy, but copies from user space.
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-
-extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *csum_err);
-
-__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
-
-
-/*
- * Fold a partial checksum
- */
-
-static inline __sum16 csum_fold(__wsum sum)
-{
- __asm__("mov.l %0,er0\n\t"
- "add.w e0,r0\n\t"
- "xor.w e0,e0\n\t"
- "rotxl.w e0\n\t"
- "add.w e0,r0\n\t"
- "sub.w e0,e0\n\t"
- "mov.l er0,%0"
- : "=r"(sum)
- : "0"(sum)
- : "er0");
- return (__force __sum16)~sum;
-}
-
-
-/*
- * computes the checksum of the TCP/UDP pseudo-header
- * returns a 16-bit checksum, already complemented
- */
-
-static inline __wsum
-csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
- unsigned short proto, __wsum sum)
-{
- __asm__ ("sub.l er0,er0\n\t"
- "add.l %2,%0\n\t"
- "addx #0,r0l\n\t"
- "add.l %3,%0\n\t"
- "addx #0,r0l\n\t"
- "add.l %4,%0\n\t"
- "addx #0,r0l\n\t"
- "add.l er0,%0\n\t"
- "bcc 1f\n\t"
- "inc.l #1,%0\n"
- "1:"
- : "=&r" (sum)
- : "0" (sum), "r" (daddr), "r" (saddr), "r" (len + proto)
- :"er0");
- return sum;
-}
-
-static inline __sum16
-csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
- unsigned short proto, __wsum sum)
-{
- return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
-}
-
-/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-
-extern __sum16 ip_compute_csum(const void *buff, int len);
-
-#endif /* _H8300_CHECKSUM_H */
diff --git a/arch/h8300/include/asm/cmpxchg.h b/arch/h8300/include/asm/cmpxchg.h
deleted file mode 100644
index cdb203ef681f..000000000000
--- a/arch/h8300/include/asm/cmpxchg.h
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef __ARCH_H8300_CMPXCHG__
-#define __ARCH_H8300_CMPXCHG__
-
-#include <linux/irqflags.h>
-
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((volatile struct __xchg_dummy *)(x))
-
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
- unsigned long tmp, flags;
-
- local_irq_save(flags);
-
- switch (size) {
- case 1:
- __asm__ __volatile__
- ("mov.b %2,%0\n\t"
- "mov.b %1,%2"
- : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
- break;
- case 2:
- __asm__ __volatile__
- ("mov.w %2,%0\n\t"
- "mov.w %1,%2"
- : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
- break;
- case 4:
- __asm__ __volatile__
- ("mov.l %2,%0\n\t"
- "mov.l %1,%2"
- : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
- break;
- default:
- tmp = 0;
- }
- local_irq_restore(flags);
- return tmp;
-}
-
-#include <asm-generic/cmpxchg-local.h>
-
-/*
- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
- * them available.
- */
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
- (unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-
-#ifndef CONFIG_SMP
-#include <asm-generic/cmpxchg.h>
-#endif
-
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-#endif /* __ARCH_H8300_CMPXCHG__ */
diff --git a/arch/h8300/include/asm/cputime.h b/arch/h8300/include/asm/cputime.h
deleted file mode 100644
index 092e187c7b08..000000000000
--- a/arch/h8300/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __H8300_CPUTIME_H
-#define __H8300_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __H8300_CPUTIME_H */
diff --git a/arch/h8300/include/asm/current.h b/arch/h8300/include/asm/current.h
deleted file mode 100644
index 57d74ee55a14..000000000000
--- a/arch/h8300/include/asm/current.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef _H8300_CURRENT_H
-#define _H8300_CURRENT_H
-/*
- * current.h
- * (C) Copyright 2000, Lineo, David McCullough <davidm@lineo.com>
- * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
- *
- * rather than dedicate a register (as the m68k source does), we
- * just keep a global, we should probably just change it all to be
- * current and lose _current_task.
- */
-
-#include <linux/thread_info.h>
-#include <asm/thread_info.h>
-
-struct task_struct;
-
-static inline struct task_struct *get_current(void)
-{
- return(current_thread_info()->task);
-}
-
-#define current get_current()
-
-#endif /* _H8300_CURRENT_H */
diff --git a/arch/h8300/include/asm/dbg.h b/arch/h8300/include/asm/dbg.h
deleted file mode 100644
index 2c6d1cbcf736..000000000000
--- a/arch/h8300/include/asm/dbg.h
+++ /dev/null
@@ -1,2 +0,0 @@
-#define DEBUG 1
-#define BREAK asm volatile ("trap #3")
diff --git a/arch/h8300/include/asm/delay.h b/arch/h8300/include/asm/delay.h
deleted file mode 100644
index 743beba70f82..000000000000
--- a/arch/h8300/include/asm/delay.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef _H8300_DELAY_H
-#define _H8300_DELAY_H
-
-#include <asm/param.h>
-
-/*
- * Copyright (C) 2002 Yoshinori Sato <ysato@sourceforge.jp>
- *
- * Delay routines, using a pre-computed "loops_per_second" value.
- */
-
-static inline void __delay(unsigned long loops)
-{
- __asm__ __volatile__ ("1:\n\t"
- "dec.l #1,%0\n\t"
- "bne 1b"
- :"=r" (loops):"0"(loops));
-}
-
-/*
- * Use only for very small delays ( < 1 msec). Should probably use a
- * lookup table, really, as the multiplications take much too long with
- * short delays. This is a "reasonable" implementation, though (and the
- * first constant multiplications gets optimized away if the delay is
- * a constant)
- */
-
-extern unsigned long loops_per_jiffy;
-
-static inline void udelay(unsigned long usecs)
-{
- usecs *= 4295; /* 2**32 / 1000000 */
- usecs /= (loops_per_jiffy*HZ);
- if (usecs)
- __delay(usecs);
-}
-
-#endif /* _H8300_DELAY_H */
diff --git a/arch/h8300/include/asm/device.h b/arch/h8300/include/asm/device.h
deleted file mode 100644
index d8f9872b0e2d..000000000000
--- a/arch/h8300/include/asm/device.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/*
- * Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
- */
-#include <asm-generic/device.h>
-
diff --git a/arch/h8300/include/asm/div64.h b/arch/h8300/include/asm/div64.h
deleted file mode 100644
index 6cd978cefb28..000000000000
--- a/arch/h8300/include/asm/div64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/div64.h>
diff --git a/arch/h8300/include/asm/dma.h b/arch/h8300/include/asm/dma.h
deleted file mode 100644
index 3edbaaaedf5b..000000000000
--- a/arch/h8300/include/asm/dma.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _H8300_DMA_H
-#define _H8300_DMA_H
-
-
-/*
- * Set number of channels of DMA on ColdFire for different implementations.
- */
-#define MAX_DMA_CHANNELS 0
-#define MAX_DMA_ADDRESS PAGE_OFFSET
-
-/* These are in kernel/dma.c: */
-extern int request_dma(unsigned int dmanr, const char *device_id); /* reserve a DMA channel */
-extern void free_dma(unsigned int dmanr); /* release it again */
-
-#endif /* _H8300_DMA_H */
diff --git a/arch/h8300/include/asm/elf.h b/arch/h8300/include/asm/elf.h
deleted file mode 100644
index 6db71248a82f..000000000000
--- a/arch/h8300/include/asm/elf.h
+++ /dev/null
@@ -1,101 +0,0 @@
-#ifndef __ASMH8300_ELF_H
-#define __ASMH8300_ELF_H
-
-/*
- * ELF register definitions..
- */
-
-#include <asm/ptrace.h>
-#include <asm/user.h>
-
-typedef unsigned long elf_greg_t;
-
-#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-typedef unsigned long elf_fpregset_t;
-
-/*
- * This is used to ensure we don't load something for the wrong architecture.
- */
-#define elf_check_arch(x) ((x)->e_machine == EM_H8_300)
-
-/*
- * These are used to set parameters in the core dumps.
- */
-#define ELF_CLASS ELFCLASS32
-#define ELF_DATA ELFDATA2MSB
-#define ELF_ARCH EM_H8_300
-#if defined(__H8300H__)
-#define ELF_CORE_EFLAGS 0x810000
-#endif
-#if defined(__H8300S__)
-#define ELF_CORE_EFLAGS 0x820000
-#endif
-
-#define ELF_PLAT_INIT(_r) _r->er1 = 0
-
-#define ELF_EXEC_PAGESIZE 4096
-
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
- use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
-#define ELF_ET_DYN_BASE 0xD0000000UL
-
-/* This yields a mask that user programs can use to figure out what
- instruction set this cpu supports. */
-
-#define ELF_HWCAP (0)
-
-/* This yields a string that ld.so will use to load implementation
- specific libraries for optimization. This is more specific in
- intent than poking at uname or /proc/cpuinfo. */
-
-#define ELF_PLATFORM (NULL)
-
-#define R_H8_NONE 0
-#define R_H8_DIR32 1
-#define R_H8_DIR32_28 2
-#define R_H8_DIR32_24 3
-#define R_H8_DIR32_16 4
-#define R_H8_DIR32U 6
-#define R_H8_DIR32U_28 7
-#define R_H8_DIR32U_24 8
-#define R_H8_DIR32U_20 9
-#define R_H8_DIR32U_16 10
-#define R_H8_DIR24 11
-#define R_H8_DIR24_20 12
-#define R_H8_DIR24_16 13
-#define R_H8_DIR24U 14
-#define R_H8_DIR24U_20 15
-#define R_H8_DIR24U_16 16
-#define R_H8_DIR16 17
-#define R_H8_DIR16U 18
-#define R_H8_DIR16S_32 19
-#define R_H8_DIR16S_28 20
-#define R_H8_DIR16S_24 21
-#define R_H8_DIR16S_20 22
-#define R_H8_DIR16S 23
-#define R_H8_DIR8 24
-#define R_H8_DIR8U 25
-#define R_H8_DIR8Z_32 26
-#define R_H8_DIR8Z_28 27
-#define R_H8_DIR8Z_24 28
-#define R_H8_DIR8Z_20 29
-#define R_H8_DIR8Z_16 30
-#define R_H8_PCREL16 31
-#define R_H8_PCREL8 32
-#define R_H8_BPOS 33
-#define R_H8_PCREL32 34
-#define R_H8_GOT32O 35
-#define R_H8_GOT16O 36
-#define R_H8_DIR16A8 59
-#define R_H8_DIR16R8 60
-#define R_H8_DIR24A8 61
-#define R_H8_DIR24R8 62
-#define R_H8_DIR32A16 63
-#define R_H8_ABS32 65
-#define R_H8_ABS32A16 127
-
-#endif
diff --git a/arch/h8300/include/asm/emergency-restart.h b/arch/h8300/include/asm/emergency-restart.h
deleted file mode 100644
index 108d8c48e42e..000000000000
--- a/arch/h8300/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/h8300/include/asm/fb.h b/arch/h8300/include/asm/fb.h
deleted file mode 100644
index c7df38030992..000000000000
--- a/arch/h8300/include/asm/fb.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
-#include <linux/fb.h>
-
-#define fb_pgprotect(...) do {} while (0)
-
-static inline int fb_is_primary_device(struct fb_info *info)
-{
- return 0;
-}
-
-#endif /* _ASM_FB_H_ */
diff --git a/arch/h8300/include/asm/flat.h b/arch/h8300/include/asm/flat.h
deleted file mode 100644
index bd12b31b90e6..000000000000
--- a/arch/h8300/include/asm/flat.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * include/asm-h8300/flat.h -- uClinux flat-format executables
- */
-
-#ifndef __H8300_FLAT_H__
-#define __H8300_FLAT_H__
-
-#define flat_argvp_envp_on_stack() 1
-#define flat_old_ram_flag(flags) 1
-#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
-#define flat_set_persistent(relval, p) 0
-
-/*
- * on the H8 a couple of the relocations have an instruction in the
- * top byte. As there can only be 24bits of address space, we just
- * always preserve that 8bits at the top, when it isn't an instruction
- * is is 0 (davidm@snapgear.com)
- */
-
-#define flat_get_relocate_addr(rel) (rel)
-#define flat_get_addr_from_rp(rp, relval, flags, persistent) \
- (get_unaligned(rp) & ((flags & FLAT_FLAG_GOTPIC) ? 0xffffffff: 0x00ffffff))
-#define flat_put_addr_at_rp(rp, addr, rel) \
- put_unaligned (((*(char *)(rp)) << 24) | ((addr) & 0x00ffffff), rp)
-
-#endif /* __H8300_FLAT_H__ */
diff --git a/arch/h8300/include/asm/fpu.h b/arch/h8300/include/asm/fpu.h
deleted file mode 100644
index 4fc416e80bef..000000000000
--- a/arch/h8300/include/asm/fpu.h
+++ /dev/null
@@ -1 +0,0 @@
-/* Nothing do */
diff --git a/arch/h8300/include/asm/ftrace.h b/arch/h8300/include/asm/ftrace.h
deleted file mode 100644
index 40a8c178f10d..000000000000
--- a/arch/h8300/include/asm/ftrace.h
+++ /dev/null
@@ -1 +0,0 @@
-/* empty */
diff --git a/arch/h8300/include/asm/futex.h b/arch/h8300/include/asm/futex.h
deleted file mode 100644
index 6a332a9f099c..000000000000
--- a/arch/h8300/include/asm/futex.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_FUTEX_H
-#define _ASM_FUTEX_H
-
-#include <asm-generic/futex.h>
-
-#endif
diff --git a/arch/h8300/include/asm/gpio-internal.h b/arch/h8300/include/asm/gpio-internal.h
deleted file mode 100644
index a714f0c0efbc..000000000000
--- a/arch/h8300/include/asm/gpio-internal.h
+++ /dev/null
@@ -1,52 +0,0 @@
-#ifndef _H8300_GPIO_H
-#define _H8300_GPIO_H
-
-#define H8300_GPIO_P1 0
-#define H8300_GPIO_P2 1
-#define H8300_GPIO_P3 2
-#define H8300_GPIO_P4 3
-#define H8300_GPIO_P5 4
-#define H8300_GPIO_P6 5
-#define H8300_GPIO_P7 6
-#define H8300_GPIO_P8 7
-#define H8300_GPIO_P9 8
-#define H8300_GPIO_PA 9
-#define H8300_GPIO_PB 10
-#define H8300_GPIO_PC 11
-#define H8300_GPIO_PD 12
-#define H8300_GPIO_PE 13
-#define H8300_GPIO_PF 14
-#define H8300_GPIO_PG 15
-#define H8300_GPIO_PH 16
-
-#define H8300_GPIO_B7 0x80
-#define H8300_GPIO_B6 0x40
-#define H8300_GPIO_B5 0x20
-#define H8300_GPIO_B4 0x10
-#define H8300_GPIO_B3 0x08
-#define H8300_GPIO_B2 0x04
-#define H8300_GPIO_B1 0x02
-#define H8300_GPIO_B0 0x01
-
-#define H8300_GPIO_INPUT 0
-#define H8300_GPIO_OUTPUT 1
-
-#define H8300_GPIO_RESERVE(port, bits) \
- h8300_reserved_gpio(port, bits)
-
-#define H8300_GPIO_FREE(port, bits) \
- h8300_free_gpio(port, bits)
-
-#define H8300_GPIO_DDR(port, bit, dir) \
- h8300_set_gpio_dir(((port) << 8) | (bit), dir)
-
-#define H8300_GPIO_GETDIR(port, bit) \
- h8300_get_gpio_dir(((port) << 8) | (bit))
-
-extern int h8300_reserved_gpio(int port, int bits);
-extern int h8300_free_gpio(int port, int bits);
-extern int h8300_set_gpio_dir(int port_bit, int dir);
-extern int h8300_get_gpio_dir(int port_bit);
-extern int h8300_init_gpio(void);
-
-#endif
diff --git a/arch/h8300/include/asm/hardirq.h b/arch/h8300/include/asm/hardirq.h
deleted file mode 100644
index c2e1aa0f0d14..000000000000
--- a/arch/h8300/include/asm/hardirq.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef __H8300_HARDIRQ_H
-#define __H8300_HARDIRQ_H
-
-#include <asm/irq.h>
-
-#define HARDIRQ_BITS 8
-
-/*
- * The hardirq mask has to be large enough to have
- * space for potentially all IRQ sources in the system
- * nesting on a single CPU:
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-
-#include <asm-generic/hardirq.h>
-
-#endif
diff --git a/arch/h8300/include/asm/hw_irq.h b/arch/h8300/include/asm/hw_irq.h
deleted file mode 100644
index d75a5a1119e8..000000000000
--- a/arch/h8300/include/asm/hw_irq.h
+++ /dev/null
@@ -1 +0,0 @@
-/* Do Nothing */
diff --git a/arch/h8300/include/asm/io.h b/arch/h8300/include/asm/io.h
deleted file mode 100644
index c1a8df22080f..000000000000
--- a/arch/h8300/include/asm/io.h
+++ /dev/null
@@ -1,358 +0,0 @@
-#ifndef _H8300_IO_H
-#define _H8300_IO_H
-
-#ifdef __KERNEL__
-
-#include <asm/virtconvert.h>
-
-#if defined(CONFIG_H83007) || defined(CONFIG_H83068)
-#include <asm/regs306x.h>
-#elif defined(CONFIG_H8S2678)
-#include <asm/regs267x.h>
-#else
-#error UNKNOWN CPU TYPE
-#endif
-
-
-/*
- * These are for ISA/PCI shared memory _only_ and should never be used
- * on any other type of memory, including Zorro memory. They are meant to
- * access the bus in the bus byte order which is little-endian!.
- *
- * readX/writeX() are used to access memory mapped devices. On some
- * architectures the memory mapped IO stuff needs to be accessed
- * differently. On the m68k architecture, we just read/write the
- * memory location directly.
- */
-/* ++roman: The assignments to temp. vars avoid that gcc sometimes generates
- * two accesses to memory, which may be undesirable for some devices.
- */
-
-/*
- * swap functions are sometimes needed to interface little-endian hardware
- */
-
-static inline unsigned short _swapw(volatile unsigned short v)
-{
-#ifndef H8300_IO_NOSWAP
- unsigned short r;
- __asm__("xor.b %w0,%x0\n\t"
- "xor.b %x0,%w0\n\t"
- "xor.b %w0,%x0"
- :"=r"(r)
- :"0"(v));
- return r;
-#else
- return v;
-#endif
-}
-
-static inline unsigned long _swapl(volatile unsigned long v)
-{
-#ifndef H8300_IO_NOSWAP
- unsigned long r;
- __asm__("xor.b %w0,%x0\n\t"
- "xor.b %x0,%w0\n\t"
- "xor.b %w0,%x0\n\t"
- "xor.w %e0,%f0\n\t"
- "xor.w %f0,%e0\n\t"
- "xor.w %e0,%f0\n\t"
- "xor.b %w0,%x0\n\t"
- "xor.b %x0,%w0\n\t"
- "xor.b %w0,%x0"
- :"=r"(r)
- :"0"(v));
- return r;
-#else
- return v;
-#endif
-}
-
-#define readb(addr) \
- ({ unsigned char __v = \
- *(volatile unsigned char *)((unsigned long)(addr) & 0x00ffffff); \
- __v; })
-#define readw(addr) \
- ({ unsigned short __v = \
- *(volatile unsigned short *)((unsigned long)(addr) & 0x00ffffff); \
- __v; })
-#define readl(addr) \
- ({ unsigned long __v = \
- *(volatile unsigned long *)((unsigned long)(addr) & 0x00ffffff); \
- __v; })
-
-#define writeb(b,addr) (void)((*(volatile unsigned char *) \
- ((unsigned long)(addr) & 0x00ffffff)) = (b))
-#define writew(b,addr) (void)((*(volatile unsigned short *) \
- ((unsigned long)(addr) & 0x00ffffff)) = (b))
-#define writel(b,addr) (void)((*(volatile unsigned long *) \
- ((unsigned long)(addr) & 0x00ffffff)) = (b))
-#define readb_relaxed(addr) readb(addr)
-#define readw_relaxed(addr) readw(addr)
-#define readl_relaxed(addr) readl(addr)
-
-#define __raw_readb readb
-#define __raw_readw readw
-#define __raw_readl readl
-#define __raw_writeb writeb
-#define __raw_writew writew
-#define __raw_writel writel
-
-static inline int h8300_buswidth(unsigned int addr)
-{
- return (*(volatile unsigned char *)ABWCR & (1 << ((addr >> 21) & 7))) == 0;
-}
-
-static inline void io_outsb(unsigned int addr, const void *buf, int len)
-{
- volatile unsigned char *ap_b = (volatile unsigned char *) addr;
- volatile unsigned short *ap_w = (volatile unsigned short *) addr;
- unsigned char *bp = (unsigned char *) buf;
-
- if(h8300_buswidth(addr) && (addr & 1)) {
- while (len--)
- *ap_w = *bp++;
- } else {
- while (len--)
- *ap_b = *bp++;
- }
-}
-
-static inline void io_outsw(unsigned int addr, const void *buf, int len)
-{
- volatile unsigned short *ap = (volatile unsigned short *) addr;
- unsigned short *bp = (unsigned short *) buf;
- while (len--)
- *ap = _swapw(*bp++);
-}
-
-static inline void io_outsl(unsigned int addr, const void *buf, int len)
-{
- volatile unsigned long *ap = (volatile unsigned long *) addr;
- unsigned long *bp = (unsigned long *) buf;
- while (len--)
- *ap = _swapl(*bp++);
-}
-
-static inline void io_outsw_noswap(unsigned int addr, const void *buf, int len)
-{
- volatile unsigned short *ap = (volatile unsigned short *) addr;
- unsigned short *bp = (unsigned short *) buf;
- while (len--)
- *ap = *bp++;
-}
-
-static inline void io_outsl_noswap(unsigned int addr, const void *buf, int len)
-{
- volatile unsigned long *ap = (volatile unsigned long *) addr;
- unsigned long *bp = (unsigned long *) buf;
- while (len--)
- *ap = *bp++;
-}
-
-static inline void io_insb(unsigned int addr, void *buf, int len)
-{
- volatile unsigned char *ap_b;
- volatile unsigned short *ap_w;
- unsigned char *bp = (unsigned char *) buf;
-
- if(h8300_buswidth(addr)) {
- ap_w = (volatile unsigned short *)(addr & ~1);
- while (len--)
- *bp++ = *ap_w & 0xff;
- } else {
- ap_b = (volatile unsigned char *)addr;
- while (len--)
- *bp++ = *ap_b;
- }
-}
-
-static inline void io_insw(unsigned int addr, void *buf, int len)
-{
- volatile unsigned short *ap = (volatile unsigned short *) addr;
- unsigned short *bp = (unsigned short *) buf;
- while (len--)
- *bp++ = _swapw(*ap);
-}
-
-static inline void io_insl(unsigned int addr, void *buf, int len)
-{
- volatile unsigned long *ap = (volatile unsigned long *) addr;
- unsigned long *bp = (unsigned long *) buf;
- while (len--)
- *bp++ = _swapl(*ap);
-}
-
-static inline void io_insw_noswap(unsigned int addr, void *buf, int len)
-{
- volatile unsigned short *ap = (volatile unsigned short *) addr;
- unsigned short *bp = (unsigned short *) buf;
- while (len--)
- *bp++ = *ap;
-}
-
-static inline void io_insl_noswap(unsigned int addr, void *buf, int len)
-{
- volatile unsigned long *ap = (volatile unsigned long *) addr;
- unsigned long *bp = (unsigned long *) buf;
- while (len--)
- *bp++ = *ap;
-}
-
-/*
- * make the short names macros so specific devices
- * can override them as required
- */
-
-#define memset_io(a,b,c) memset((void *)(a),(b),(c))
-#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
-#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
-
-#define mmiowb()
-
-#define inb(addr) ((h8300_buswidth(addr))?readw((addr) & ~1) & 0xff:readb(addr))
-#define inw(addr) _swapw(readw(addr))
-#define inl(addr) _swapl(readl(addr))
-#define outb(x,addr) ((void)((h8300_buswidth(addr) && \
- ((addr) & 1))?writew(x,(addr) & ~1):writeb(x,addr)))
-#define outw(x,addr) ((void) writew(_swapw(x),addr))
-#define outl(x,addr) ((void) writel(_swapl(x),addr))
-
-#define inb_p(addr) inb(addr)
-#define inw_p(addr) inw(addr)
-#define inl_p(addr) inl(addr)
-#define outb_p(x,addr) outb(x,addr)
-#define outw_p(x,addr) outw(x,addr)
-#define outl_p(x,addr) outl(x,addr)
-
-#define outsb(a,b,l) io_outsb(a,b,l)
-#define outsw(a,b,l) io_outsw(a,b,l)
-#define outsl(a,b,l) io_outsl(a,b,l)
-
-#define insb(a,b,l) io_insb(a,b,l)
-#define insw(a,b,l) io_insw(a,b,l)
-#define insl(a,b,l) io_insl(a,b,l)
-
-#define IO_SPACE_LIMIT 0xffffff
-
-
-/* Values for nocacheflag and cmode */
-#define IOMAP_FULL_CACHING 0
-#define IOMAP_NOCACHE_SER 1
-#define IOMAP_NOCACHE_NONSER 2
-#define IOMAP_WRITETHROUGH 3
-
-extern void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag);
-extern void __iounmap(void *addr, unsigned long size);
-
-static inline void *ioremap(unsigned long physaddr, unsigned long size)
-{
- return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
-}
-static inline void *ioremap_nocache(unsigned long physaddr, unsigned long size)
-{
- return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
-}
-static inline void *ioremap_writethrough(unsigned long physaddr, unsigned long size)
-{
- return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
-}
-static inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size)
-{
- return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
-}
-
-extern void iounmap(void *addr);
-
-/* H8/300 internal I/O functions */
-static __inline__ unsigned char ctrl_inb(unsigned long addr)
-{
- return *(volatile unsigned char*)addr;
-}
-
-static __inline__ unsigned short ctrl_inw(unsigned long addr)
-{
- return *(volatile unsigned short*)addr;
-}
-
-static __inline__ unsigned long ctrl_inl(unsigned long addr)
-{
- return *(volatile unsigned long*)addr;
-}
-
-static __inline__ void ctrl_outb(unsigned char b, unsigned long addr)
-{
- *(volatile unsigned char*)addr = b;
-}
-
-static __inline__ void ctrl_outw(unsigned short b, unsigned long addr)
-{
- *(volatile unsigned short*)addr = b;
-}
-
-static __inline__ void ctrl_outl(unsigned long b, unsigned long addr)
-{
- *(volatile unsigned long*)addr = b;
-}
-
-static __inline__ void ctrl_bclr(int b, unsigned long addr)
-{
- if (__builtin_constant_p(b))
- switch (b) {
- case 0: __asm__("bclr #0,@%0"::"r"(addr)); break;
- case 1: __asm__("bclr #1,@%0"::"r"(addr)); break;
- case 2: __asm__("bclr #2,@%0"::"r"(addr)); break;
- case 3: __asm__("bclr #3,@%0"::"r"(addr)); break;
- case 4: __asm__("bclr #4,@%0"::"r"(addr)); break;
- case 5: __asm__("bclr #5,@%0"::"r"(addr)); break;
- case 6: __asm__("bclr #6,@%0"::"r"(addr)); break;
- case 7: __asm__("bclr #7,@%0"::"r"(addr)); break;
- }
- else
- __asm__("bclr %w0,@%1"::"r"(b), "r"(addr));
-}
-
-static __inline__ void ctrl_bset(int b, unsigned long addr)
-{
- if (__builtin_constant_p(b))
- switch (b) {
- case 0: __asm__("bset #0,@%0"::"r"(addr)); break;
- case 1: __asm__("bset #1,@%0"::"r"(addr)); break;
- case 2: __asm__("bset #2,@%0"::"r"(addr)); break;
- case 3: __asm__("bset #3,@%0"::"r"(addr)); break;
- case 4: __asm__("bset #4,@%0"::"r"(addr)); break;
- case 5: __asm__("bset #5,@%0"::"r"(addr)); break;
- case 6: __asm__("bset #6,@%0"::"r"(addr)); break;
- case 7: __asm__("bset #7,@%0"::"r"(addr)); break;
- }
- else
- __asm__("bset %w0,@%1"::"r"(b), "r"(addr));
-}
-
-/* Pages to physical address... */
-#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
-#define page_to_bus(page) ((page - mem_map) << PAGE_SHIFT)
-
-/*
- * Macros used for converting between virtual and physical mappings.
- */
-#define phys_to_virt(vaddr) ((void *) (vaddr))
-#define virt_to_phys(vaddr) ((unsigned long) (vaddr))
-
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p) __va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p) p
-
-#endif /* __KERNEL__ */
-
-#endif /* _H8300_IO_H */
diff --git a/arch/h8300/include/asm/irq.h b/arch/h8300/include/asm/irq.h
deleted file mode 100644
index 13d7c601cd0a..000000000000
--- a/arch/h8300/include/asm/irq.h
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef _H8300_IRQ_H_
-#define _H8300_IRQ_H_
-
-#include <asm/ptrace.h>
-
-#if defined(CONFIG_CPU_H8300H)
-#define NR_IRQS 64
-#define EXT_IRQ0 12
-#define EXT_IRQ1 13
-#define EXT_IRQ2 14
-#define EXT_IRQ3 15
-#define EXT_IRQ4 16
-#define EXT_IRQ5 17
-#define EXT_IRQ6 18
-#define EXT_IRQ7 19
-#define EXT_IRQS 5
-#define IER_REGS *(volatile unsigned char *)IER
-#endif
-#if defined(CONFIG_CPU_H8S)
-#define NR_IRQS 128
-#define EXT_IRQ0 16
-#define EXT_IRQ1 17
-#define EXT_IRQ2 18
-#define EXT_IRQ3 19
-#define EXT_IRQ4 20
-#define EXT_IRQ5 21
-#define EXT_IRQ6 22
-#define EXT_IRQ7 23
-#define EXT_IRQ8 24
-#define EXT_IRQ9 25
-#define EXT_IRQ10 26
-#define EXT_IRQ11 27
-#define EXT_IRQ12 28
-#define EXT_IRQ13 29
-#define EXT_IRQ14 30
-#define EXT_IRQ15 31
-#define EXT_IRQS 15
-
-#define IER_REGS *(volatile unsigned short *)IER
-#endif
-
-static __inline__ int irq_canonicalize(int irq)
-{
- return irq;
-}
-
-typedef void (*h8300_vector)(void);
-
-#endif /* _H8300_IRQ_H_ */
diff --git a/arch/h8300/include/asm/irq_regs.h b/arch/h8300/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/arch/h8300/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/h8300/include/asm/irqflags.h b/arch/h8300/include/asm/irqflags.h
deleted file mode 100644
index 9617cd57aebd..000000000000
--- a/arch/h8300/include/asm/irqflags.h
+++ /dev/null
@@ -1,43 +0,0 @@
-#ifndef _H8300_IRQFLAGS_H
-#define _H8300_IRQFLAGS_H
-
-static inline unsigned long arch_local_save_flags(void)
-{
- unsigned long flags;
- asm volatile ("stc ccr,%w0" : "=r" (flags));
- return flags;
-}
-
-static inline void arch_local_irq_disable(void)
-{
- asm volatile ("orc #0x80,ccr" : : : "memory");
-}
-
-static inline void arch_local_irq_enable(void)
-{
- asm volatile ("andc #0x7f,ccr" : : : "memory");
-}
-
-static inline unsigned long arch_local_irq_save(void)
-{
- unsigned long flags = arch_local_save_flags();
- arch_local_irq_disable();
- return flags;
-}
-
-static inline void arch_local_irq_restore(unsigned long flags)
-{
- asm volatile ("ldc %w0,ccr" : : "r" (flags) : "memory");
-}
-
-static inline bool arch_irqs_disabled_flags(unsigned long flags)
-{
- return (flags & 0x80) == 0x80;
-}
-
-static inline bool arch_irqs_disabled(void)
-{
- return arch_irqs_disabled_flags(arch_local_save_flags());
-}
-
-#endif /* _H8300_IRQFLAGS_H */
diff --git a/arch/h8300/include/asm/kdebug.h b/arch/h8300/include/asm/kdebug.h
deleted file mode 100644
index 6ece1b037665..000000000000
--- a/arch/h8300/include/asm/kdebug.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kdebug.h>
diff --git a/arch/h8300/include/asm/kmap_types.h b/arch/h8300/include/asm/kmap_types.h
deleted file mode 100644
index be12a7160116..000000000000
--- a/arch/h8300/include/asm/kmap_types.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_H8300_KMAP_TYPES_H
-#define _ASM_H8300_KMAP_TYPES_H
-
-#include <asm-generic/kmap_types.h>
-
-#endif
diff --git a/arch/h8300/include/asm/local.h b/arch/h8300/include/asm/local.h
deleted file mode 100644
index fdd4efe437cd..000000000000
--- a/arch/h8300/include/asm/local.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_LOCAL_H_
-#define _H8300_LOCAL_H_
-
-#include <asm-generic/local.h>
-
-#endif
diff --git a/arch/h8300/include/asm/local64.h b/arch/h8300/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/h8300/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/h8300/include/asm/mc146818rtc.h b/arch/h8300/include/asm/mc146818rtc.h
deleted file mode 100644
index ab9d9646d241..000000000000
--- a/arch/h8300/include/asm/mc146818rtc.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Machine dependent access functions for RTC registers.
- */
-#ifndef _H8300_MC146818RTC_H
-#define _H8300_MC146818RTC_H
-
-/* empty include file to satisfy the include in genrtc.c/ide-geometry.c */
-
-#endif /* _H8300_MC146818RTC_H */
diff --git a/arch/h8300/include/asm/mmu_context.h b/arch/h8300/include/asm/mmu_context.h
deleted file mode 100644
index f44b730da54d..000000000000
--- a/arch/h8300/include/asm/mmu_context.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef __H8300_MMU_CONTEXT_H
-#define __H8300_MMU_CONTEXT_H
-
-#include <asm/setup.h>
-#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm-generic/mm_hooks.h>
-
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
-static inline int
-init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-{
- // mm->context = virt_to_phys(mm->pgd);
- return(0);
-}
-
-#define destroy_context(mm) do { } while(0)
-#define deactivate_mm(tsk,mm) do { } while(0)
-
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
-{
-}
-
-static inline void activate_mm(struct mm_struct *prev_mm,
- struct mm_struct *next_mm)
-{
-}
-
-#endif
diff --git a/arch/h8300/include/asm/mutex.h b/arch/h8300/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/h8300/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/h8300/include/asm/page.h b/arch/h8300/include/asm/page.h
deleted file mode 100644
index 837381a2df46..000000000000
--- a/arch/h8300/include/asm/page.h
+++ /dev/null
@@ -1,78 +0,0 @@
-#ifndef _H8300_PAGE_H
-#define _H8300_PAGE_H
-
-/* PAGE_SHIFT determines the page size */
-
-#define PAGE_SHIFT (12)
-#define PAGE_SIZE (1UL << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
-
-#include <asm/setup.h>
-
-#ifndef __ASSEMBLY__
-
-#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
-#define free_user_page(page, addr) free_page(addr)
-
-#define clear_page(page) memset((page), 0, PAGE_SIZE)
-#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
-
-#define clear_user_page(page, vaddr, pg) clear_page(page)
-#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-
-/*
- * These are used to make use of C type-checking..
- */
-typedef struct { unsigned long pte; } pte_t;
-typedef struct { unsigned long pmd[16]; } pmd_t;
-typedef struct { unsigned long pgd; } pgd_t;
-typedef struct { unsigned long pgprot; } pgprot_t;
-typedef struct page *pgtable_t;
-
-#define pte_val(x) ((x).pte)
-#define pmd_val(x) ((&x)->pmd[0])
-#define pgd_val(x) ((x).pgd)
-#define pgprot_val(x) ((x).pgprot)
-
-#define __pte(x) ((pte_t) { (x) } )
-#define __pmd(x) ((pmd_t) { (x) } )
-#define __pgd(x) ((pgd_t) { (x) } )
-#define __pgprot(x) ((pgprot_t) { (x) } )
-
-extern unsigned long memory_start;
-extern unsigned long memory_end;
-
-#endif /* !__ASSEMBLY__ */
-
-#include <asm/page_offset.h>
-
-#define PAGE_OFFSET (PAGE_OFFSET_RAW)
-
-#ifndef __ASSEMBLY__
-
-#define __pa(vaddr) virt_to_phys(vaddr)
-#define __va(paddr) phys_to_virt((unsigned long)paddr)
-
-#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
-#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
-
-#define MAP_NR(addr) (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)
-#define virt_to_page(addr) (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
-#define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
-#define pfn_valid(page) (page < max_mapnr)
-
-#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
-
-#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
- ((void *)(kaddr) < (void *)memory_end))
-
-#endif /* __ASSEMBLY__ */
-
-#include <asm-generic/memory_model.h>
-#include <asm-generic/getorder.h>
-
-#endif /* _H8300_PAGE_H */
diff --git a/arch/h8300/include/asm/page_offset.h b/arch/h8300/include/asm/page_offset.h
deleted file mode 100644
index f8706463008c..000000000000
--- a/arch/h8300/include/asm/page_offset.h
+++ /dev/null
@@ -1,3 +0,0 @@
-
-#define PAGE_OFFSET_RAW 0x00000000
-
diff --git a/arch/h8300/include/asm/param.h b/arch/h8300/include/asm/param.h
deleted file mode 100644
index c3909e7ff178..000000000000
--- a/arch/h8300/include/asm/param.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _H8300_PARAM_H
-#define _H8300_PARAM_H
-
-#include <uapi/asm/param.h>
-
-#define HZ CONFIG_HZ
-#define USER_HZ HZ
-#define CLOCKS_PER_SEC (USER_HZ)
-#endif /* _H8300_PARAM_H */
diff --git a/arch/h8300/include/asm/pci.h b/arch/h8300/include/asm/pci.h
deleted file mode 100644
index 0b2acaa3dd84..000000000000
--- a/arch/h8300/include/asm/pci.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef _ASM_H8300_PCI_H
-#define _ASM_H8300_PCI_H
-
-/*
- * asm-h8300/pci.h - H8/300 specific PCI declarations.
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- */
-
-#define pcibios_assign_all_busses() 0
-
-static inline void pcibios_penalize_isa_irq(int irq, int active)
-{
- /* We don't do dynamic PCI IRQ allocation */
-}
-
-#define PCI_DMA_BUS_IS_PHYS (1)
-
-#endif /* _ASM_H8300_PCI_H */
diff --git a/arch/h8300/include/asm/percpu.h b/arch/h8300/include/asm/percpu.h
deleted file mode 100644
index 72c03e3666d8..000000000000
--- a/arch/h8300/include/asm/percpu.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ARCH_H8300_PERCPU__
-#define __ARCH_H8300_PERCPU__
-
-#include <asm-generic/percpu.h>
-
-#endif /* __ARCH_H8300_PERCPU__ */
diff --git a/arch/h8300/include/asm/pgalloc.h b/arch/h8300/include/asm/pgalloc.h
deleted file mode 100644
index c2e89a286d23..000000000000
--- a/arch/h8300/include/asm/pgalloc.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _H8300_PGALLOC_H
-#define _H8300_PGALLOC_H
-
-#include <asm/setup.h>
-
-#define check_pgt_cache() do { } while (0)
-
-#endif /* _H8300_PGALLOC_H */
diff --git a/arch/h8300/include/asm/pgtable.h b/arch/h8300/include/asm/pgtable.h
deleted file mode 100644
index 7ca20f894dd7..000000000000
--- a/arch/h8300/include/asm/pgtable.h
+++ /dev/null
@@ -1,73 +0,0 @@
-#ifndef _H8300_PGTABLE_H
-#define _H8300_PGTABLE_H
-
-#include <asm-generic/4level-fixup.h>
-
-#include <linux/slab.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/io.h>
-
-#define pgd_present(pgd) (1) /* pages are always present on NO_MM */
-#define pgd_none(pgd) (0)
-#define pgd_bad(pgd) (0)
-#define pgd_clear(pgdp)
-#define kern_addr_valid(addr) (1)
-#define pmd_offset(a, b) ((void *)0)
-#define pmd_none(pmd) (1)
-#define pgd_offset_k(adrdress) ((pgd_t *)0)
-#define pte_offset_kernel(dir, address) ((pte_t *)0)
-
-#define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */
-#define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */
-#define PAGE_COPY __pgprot(0) /* these mean nothing to NO_MM */
-#define PAGE_READONLY __pgprot(0) /* these mean nothing to NO_MM */
-#define PAGE_KERNEL __pgprot(0) /* these mean nothing to NO_MM */
-
-extern void paging_init(void);
-#define swapper_pg_dir ((pgd_t *) 0)
-
-#define __swp_type(x) (0)
-#define __swp_offset(x) (0)
-#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-
-static inline int pte_file(pte_t pte) { return 0; }
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-#define ZERO_PAGE(vaddr) (virt_to_page(0))
-
-/*
- * These would be in other places but having them here reduces the diffs.
- */
-extern unsigned int kobjsize(const void *objp);
-extern int is_in_rom(unsigned long);
-
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-
-/*
- * All 32bit addresses are effectively valid for vmalloc...
- * Sort of meaningless for non-VM targets.
- */
-#define VMALLOC_START 0
-#define VMALLOC_END 0xffffffff
-
-/*
- * All 32bit addresses are effectively valid for vmalloc...
- * Sort of meaningless for non-VM targets.
- */
-#define VMALLOC_START 0
-#define VMALLOC_END 0xffffffff
-
-#define arch_enter_lazy_cpu_mode() do {} while (0)
-
-#include <asm-generic/pgtable.h>
-
-#endif /* _H8300_PGTABLE_H */
diff --git a/arch/h8300/include/asm/processor.h b/arch/h8300/include/asm/processor.h
deleted file mode 100644
index 4b0ca49bb463..000000000000
--- a/arch/h8300/include/asm/processor.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * include/asm-h8300/processor.h
- *
- * Copyright (C) 2002 Yoshinori Sato
- *
- * Based on: linux/asm-m68nommu/processor.h
- *
- * Copyright (C) 1995 Hamish Macdonald
- */
-
-#ifndef __ASM_H8300_PROCESSOR_H
-#define __ASM_H8300_PROCESSOR_H
-
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
-#include <linux/compiler.h>
-#include <asm/segment.h>
-#include <asm/fpu.h>
-#include <asm/ptrace.h>
-#include <asm/current.h>
-
-static inline unsigned long rdusp(void) {
- extern unsigned int sw_usp;
- return(sw_usp);
-}
-
-static inline void wrusp(unsigned long usp) {
- extern unsigned int sw_usp;
- sw_usp = usp;
-}
-
-/*
- * User space process size: 3.75GB. This is hardcoded into a few places,
- * so don't change it unless you know what you are doing.
- */
-#define TASK_SIZE (0xFFFFFFFFUL)
-
-#ifdef __KERNEL__
-#define STACK_TOP TASK_SIZE
-#define STACK_TOP_MAX STACK_TOP
-#endif
-
-/*
- * This decides where the kernel will search for a free chunk of vm
- * space during mmap's. We won't be using it
- */
-#define TASK_UNMAPPED_BASE 0
-
-struct thread_struct {
- unsigned long ksp; /* kernel stack pointer */
- unsigned long usp; /* user stack pointer */
- unsigned long ccr; /* saved status register */
- unsigned long esp0; /* points to SR of stack frame */
- struct {
- unsigned short *addr;
- unsigned short inst;
- } breakinfo;
-};
-
-#define INIT_THREAD { \
- .ksp = sizeof(init_stack) + (unsigned long)init_stack, \
- .usp = 0, \
- .ccr = PS_S, \
- .esp0 = 0, \
- .breakinfo = { \
- .addr = (unsigned short *)-1, \
- .inst = 0 \
- } \
-}
-
-/*
- * Do necessary setup to start up a newly executed thread.
- *
- * pass the data segment into user programs if it exists,
- * it can't hurt anything as far as I can tell
- */
-#if defined(__H8300H__)
-#define start_thread(_regs, _pc, _usp) \
-do { \
- (_regs)->pc = (_pc); \
- (_regs)->ccr = 0x00; /* clear all flags */ \
- (_regs)->er5 = current->mm->start_data; /* GOT base */ \
- wrusp((unsigned long)(_usp) - sizeof(unsigned long)*3); \
-} while(0)
-#endif
-#if defined(__H8300S__)
-#define start_thread(_regs, _pc, _usp) \
-do { \
- (_regs)->pc = (_pc); \
- (_regs)->ccr = 0x00; /* clear kernel flag */ \
- (_regs)->exr = 0x78; /* enable all interrupts */ \
- (_regs)->er5 = current->mm->start_data; /* GOT base */ \
- /* 14 = space for retaddr(4), vector(4), er0(4) and ext(2) on stack */ \
- wrusp(((unsigned long)(_usp)) - 14); \
-} while(0)
-#endif
-
-/* Forward declaration, a strange C thing */
-struct task_struct;
-
-/* Free all resources held by a thread. */
-static inline void release_thread(struct task_struct *dead_task)
-{
-}
-
-/*
- * Free current thread data structures etc..
- */
-static inline void exit_thread(void)
-{
-}
-
-/*
- * Return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk);
-unsigned long get_wchan(struct task_struct *p);
-
-#define KSTK_EIP(tsk) \
- ({ \
- unsigned long eip = 0; \
- if ((tsk)->thread.esp0 > PAGE_SIZE && \
- MAP_NR((tsk)->thread.esp0) < max_mapnr) \
- eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
- eip; })
-#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
-
-#define cpu_relax() barrier()
-
-#define HARD_RESET_NOW() ({ \
- local_irq_disable(); \
- asm("jmp @@0"); \
-})
-
-#endif
diff --git a/arch/h8300/include/asm/ptrace.h b/arch/h8300/include/asm/ptrace.h
deleted file mode 100644
index c1826b95c5ca..000000000000
--- a/arch/h8300/include/asm/ptrace.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef _H8300_PTRACE_H
-#define _H8300_PTRACE_H
-
-#include <uapi/asm/ptrace.h>
-
-#ifndef __ASSEMBLY__
-#if defined(CONFIG_CPU_H8S)
-#endif
-#ifndef PS_S
-#define PS_S (0x10)
-#endif
-
-#if defined(__H8300H__)
-#define H8300_REGS_NO 11
-#endif
-#if defined(__H8300S__)
-#define H8300_REGS_NO 12
-#endif
-
-/* Find the stack offset for a register, relative to thread.esp0. */
-#define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg)
-
-#define arch_has_single_step() (1)
-
-#define user_mode(regs) (!((regs)->ccr & PS_S))
-#define instruction_pointer(regs) ((regs)->pc)
-#define profile_pc(regs) instruction_pointer(regs)
-#define current_pt_regs() ((struct pt_regs *) \
- (THREAD_SIZE + (unsigned long)current_thread_info()) - 1)
-#define signal_pt_regs() ((struct pt_regs *)current->thread.esp0)
-#define current_user_stack_pointer() rdusp()
-#endif /* __ASSEMBLY__ */
-#endif /* _H8300_PTRACE_H */
diff --git a/arch/h8300/include/asm/regs267x.h b/arch/h8300/include/asm/regs267x.h
deleted file mode 100644
index 1bff731a9f77..000000000000
--- a/arch/h8300/include/asm/regs267x.h
+++ /dev/null
@@ -1,336 +0,0 @@
-/* internal Peripherals Register address define */
-/* CPU: H8/306x */
-
-#if !defined(__REGS_H8S267x__)
-#define __REGS_H8S267x__
-
-#if defined(__KERNEL__)
-
-#define DASTCR 0xFEE01A
-#define DADR0 0xFFFFA4
-#define DADR1 0xFFFFA5
-#define DACR01 0xFFFFA6
-#define DADR2 0xFFFFA8
-#define DADR3 0xFFFFA9
-#define DACR23 0xFFFFAA
-
-#define ADDRA 0xFFFF90
-#define ADDRAH 0xFFFF90
-#define ADDRAL 0xFFFF91
-#define ADDRB 0xFFFF92
-#define ADDRBH 0xFFFF92
-#define ADDRBL 0xFFFF93
-#define ADDRC 0xFFFF94
-#define ADDRCH 0xFFFF94
-#define ADDRCL 0xFFFF95
-#define ADDRD 0xFFFF96
-#define ADDRDH 0xFFFF96
-#define ADDRDL 0xFFFF97
-#define ADDRE 0xFFFF98
-#define ADDREH 0xFFFF98
-#define ADDREL 0xFFFF99
-#define ADDRF 0xFFFF9A
-#define ADDRFH 0xFFFF9A
-#define ADDRFL 0xFFFF9B
-#define ADDRG 0xFFFF9C
-#define ADDRGH 0xFFFF9C
-#define ADDRGL 0xFFFF9D
-#define ADDRH 0xFFFF9E
-#define ADDRHH 0xFFFF9E
-#define ADDRHL 0xFFFF9F
-
-#define ADCSR 0xFFFFA0
-#define ADCR 0xFFFFA1
-
-#define ABWCR 0xFFFEC0
-#define ASTCR 0xFFFEC1
-#define WTCRAH 0xFFFEC2
-#define WTCRAL 0xFFFEC3
-#define WTCRBH 0xFFFEC4
-#define WTCRBL 0xFFFEC5
-#define RDNCR 0xFFFEC6
-#define CSACRH 0xFFFEC8
-#define CSACRL 0xFFFEC9
-#define BROMCRH 0xFFFECA
-#define BROMCRL 0xFFFECB
-#define BCR 0xFFFECC
-#define DRAMCR 0xFFFED0
-#define DRACCR 0xFFFED2
-#define REFCR 0xFFFED4
-#define RTCNT 0xFFFED6
-#define RTCOR 0xFFFED7
-
-#define MAR0AH 0xFFFEE0
-#define MAR0AL 0xFFFEE2
-#define IOAR0A 0xFFFEE4
-#define ETCR0A 0xFFFEE6
-#define MAR0BH 0xFFFEE8
-#define MAR0BL 0xFFFEEA
-#define IOAR0B 0xFFFEEC
-#define ETCR0B 0xFFFEEE
-#define MAR1AH 0xFFFEF0
-#define MAR1AL 0xFFFEF2
-#define IOAR1A 0xFFFEF4
-#define ETCR1A 0xFFFEF6
-#define MAR1BH 0xFFFEF8
-#define MAR1BL 0xFFFEFA
-#define IOAR1B 0xFFFEFC
-#define ETCR1B 0xFFFEFE
-#define DMAWER 0xFFFF20
-#define DMATCR 0xFFFF21
-#define DMACR0A 0xFFFF22
-#define DMACR0B 0xFFFF23
-#define DMACR1A 0xFFFF24
-#define DMACR1B 0xFFFF25
-#define DMABCRH 0xFFFF26
-#define DMABCRL 0xFFFF27
-
-#define EDSAR0 0xFFFDC0
-#define EDDAR0 0xFFFDC4
-#define EDTCR0 0xFFFDC8
-#define EDMDR0 0xFFFDCC
-#define EDMDR0H 0xFFFDCC
-#define EDMDR0L 0xFFFDCD
-#define EDACR0 0xFFFDCE
-#define EDSAR1 0xFFFDD0
-#define EDDAR1 0xFFFDD4
-#define EDTCR1 0xFFFDD8
-#define EDMDR1 0xFFFDDC
-#define EDMDR1H 0xFFFDDC
-#define EDMDR1L 0xFFFDDD
-#define EDACR1 0xFFFDDE
-#define EDSAR2 0xFFFDE0
-#define EDDAR2 0xFFFDE4
-#define EDTCR2 0xFFFDE8
-#define EDMDR2 0xFFFDEC
-#define EDMDR2H 0xFFFDEC
-#define EDMDR2L 0xFFFDED
-#define EDACR2 0xFFFDEE
-#define EDSAR3 0xFFFDF0
-#define EDDAR3 0xFFFDF4
-#define EDTCR3 0xFFFDF8
-#define EDMDR3 0xFFFDFC
-#define EDMDR3H 0xFFFDFC
-#define EDMDR3L 0xFFFDFD
-#define EDACR3 0xFFFDFE
-
-#define IPRA 0xFFFE00
-#define IPRB 0xFFFE02
-#define IPRC 0xFFFE04
-#define IPRD 0xFFFE06
-#define IPRE 0xFFFE08
-#define IPRF 0xFFFE0A
-#define IPRG 0xFFFE0C
-#define IPRH 0xFFFE0E
-#define IPRI 0xFFFE10
-#define IPRJ 0xFFFE12
-#define IPRK 0xFFFE14
-#define ITSR 0xFFFE16
-#define SSIER 0xFFFE18
-#define ISCRH 0xFFFE1A
-#define ISCRL 0xFFFE1C
-
-#define INTCR 0xFFFF31
-#define IER 0xFFFF32
-#define IERH 0xFFFF32
-#define IERL 0xFFFF33
-#define ISR 0xFFFF34
-#define ISRH 0xFFFF34
-#define ISRL 0xFFFF35
-
-#define P1DDR 0xFFFE20
-#define P2DDR 0xFFFE21
-#define P3DDR 0xFFFE22
-#define P4DDR 0xFFFE23
-#define P5DDR 0xFFFE24
-#define P6DDR 0xFFFE25
-#define P7DDR 0xFFFE26
-#define P8DDR 0xFFFE27
-#define P9DDR 0xFFFE28
-#define PADDR 0xFFFE29
-#define PBDDR 0xFFFE2A
-#define PCDDR 0xFFFE2B
-#define PDDDR 0xFFFE2C
-#define PEDDR 0xFFFE2D
-#define PFDDR 0xFFFE2E
-#define PGDDR 0xFFFE2F
-#define PHDDR 0xFFFF74
-
-#define PFCR0 0xFFFE32
-#define PFCR1 0xFFFE33
-#define PFCR2 0xFFFE34
-
-#define PAPCR 0xFFFE36
-#define PBPCR 0xFFFE37
-#define PCPCR 0xFFFE38
-#define PDPCR 0xFFFE39
-#define PEPCR 0xFFFE3A
-
-#define P3ODR 0xFFFE3C
-#define PAODR 0xFFFE3D
-
-#define P1DR 0xFFFF60
-#define P2DR 0xFFFF61
-#define P3DR 0xFFFF62
-#define P4DR 0xFFFF63
-#define P5DR 0xFFFF64
-#define P6DR 0xFFFF65
-#define P7DR 0xFFFF66
-#define P8DR 0xFFFF67
-#define P9DR 0xFFFF68
-#define PADR 0xFFFF69
-#define PBDR 0xFFFF6A
-#define PCDR 0xFFFF6B
-#define PDDR 0xFFFF6C
-#define PEDR 0xFFFF6D
-#define PFDR 0xFFFF6E
-#define PGDR 0xFFFF6F
-#define PHDR 0xFFFF72
-
-#define PORT1 0xFFFF50
-#define PORT2 0xFFFF51
-#define PORT3 0xFFFF52
-#define PORT4 0xFFFF53
-#define PORT5 0xFFFF54
-#define PORT6 0xFFFF55
-#define PORT7 0xFFFF56
-#define PORT8 0xFFFF57
-#define PORT9 0xFFFF58
-#define PORTA 0xFFFF59
-#define PORTB 0xFFFF5A
-#define PORTC 0xFFFF5B
-#define PORTD 0xFFFF5C
-#define PORTE 0xFFFF5D
-#define PORTF 0xFFFF5E
-#define PORTG 0xFFFF5F
-#define PORTH 0xFFFF70
-
-#define PCR 0xFFFF46
-#define PMR 0xFFFF47
-#define NDERH 0xFFFF48
-#define NDERL 0xFFFF49
-#define PODRH 0xFFFF4A
-#define PODRL 0xFFFF4B
-#define NDRH1 0xFFFF4C
-#define NDRL1 0xFFFF4D
-#define NDRH2 0xFFFF4E
-#define NDRL2 0xFFFF4F
-
-#define SMR0 0xFFFF78
-#define BRR0 0xFFFF79
-#define SCR0 0xFFFF7A
-#define TDR0 0xFFFF7B
-#define SSR0 0xFFFF7C
-#define RDR0 0xFFFF7D
-#define SCMR0 0xFFFF7E
-#define SMR1 0xFFFF80
-#define BRR1 0xFFFF81
-#define SCR1 0xFFFF82
-#define TDR1 0xFFFF83
-#define SSR1 0xFFFF84
-#define RDR1 0xFFFF85
-#define SCMR1 0xFFFF86
-#define SMR2 0xFFFF88
-#define BRR2 0xFFFF89
-#define SCR2 0xFFFF8A
-#define TDR2 0xFFFF8B
-#define SSR2 0xFFFF8C
-#define RDR2 0xFFFF8D
-#define SCMR2 0xFFFF8E
-
-#define IRCR0 0xFFFE1E
-#define SEMR 0xFFFDA8
-
-#define MDCR 0xFFFF3E
-#define SYSCR 0xFFFF3D
-#define MSTPCRH 0xFFFF40
-#define MSTPCRL 0xFFFF41
-#define FLMCR1 0xFFFFC8
-#define FLMCR2 0xFFFFC9
-#define EBR1 0xFFFFCA
-#define EBR2 0xFFFFCB
-#define CTGARC_RAMCR 0xFFFECE
-#define SBYCR 0xFFFF3A
-#define SCKCR 0xFFFF3B
-#define PLLCR 0xFFFF45
-
-#define TSTR 0xFFFFC0
-#define TSNC 0XFFFFC1
-
-#define TCR0 0xFFFFD0
-#define TMDR0 0xFFFFD1
-#define TIORH0 0xFFFFD2
-#define TIORL0 0xFFFFD3
-#define TIER0 0xFFFFD4
-#define TSR0 0xFFFFD5
-#define TCNT0 0xFFFFD6
-#define GRA0 0xFFFFD8
-#define GRB0 0xFFFFDA
-#define GRC0 0xFFFFDC
-#define GRD0 0xFFFFDE
-#define TCR1 0xFFFFE0
-#define TMDR1 0xFFFFE1
-#define TIORH1 0xFFFFE2
-#define TIORL1 0xFFFFE3
-#define TIER1 0xFFFFE4
-#define TSR1 0xFFFFE5
-#define TCNT1 0xFFFFE6
-#define GRA1 0xFFFFE8
-#define GRB1 0xFFFFEA
-#define TCR2 0xFFFFF0
-#define TMDR2 0xFFFFF1
-#define TIORH2 0xFFFFF2
-#define TIORL2 0xFFFFF3
-#define TIER2 0xFFFFF4
-#define TSR2 0xFFFFF5
-#define TCNT2 0xFFFFF6
-#define GRA2 0xFFFFF8
-#define GRB2 0xFFFFFA
-#define TCR3 0xFFFE80
-#define TMDR3 0xFFFE81
-#define TIORH3 0xFFFE82
-#define TIORL3 0xFFFE83
-#define TIER3 0xFFFE84
-#define TSR3 0xFFFE85
-#define TCNT3 0xFFFE86
-#define GRA3 0xFFFE88
-#define GRB3 0xFFFE8A
-#define GRC3 0xFFFE8C
-#define GRD3 0xFFFE8E
-#define TCR4 0xFFFE90
-#define TMDR4 0xFFFE91
-#define TIORH4 0xFFFE92
-#define TIORL4 0xFFFE93
-#define TIER4 0xFFFE94
-#define TSR4 0xFFFE95
-#define TCNT4 0xFFFE96
-#define GRA4 0xFFFE98
-#define GRB4 0xFFFE9A
-#define TCR5 0xFFFEA0
-#define TMDR5 0xFFFEA1
-#define TIORH5 0xFFFEA2
-#define TIORL5 0xFFFEA3
-#define TIER5 0xFFFEA4
-#define TSR5 0xFFFEA5
-#define TCNT5 0xFFFEA6
-#define GRA5 0xFFFEA8
-#define GRB5 0xFFFEAA
-
-#define _8TCR0 0xFFFFB0
-#define _8TCR1 0xFFFFB1
-#define _8TCSR0 0xFFFFB2
-#define _8TCSR1 0xFFFFB3
-#define _8TCORA0 0xFFFFB4
-#define _8TCORA1 0xFFFFB5
-#define _8TCORB0 0xFFFFB6
-#define _8TCORB1 0xFFFFB7
-#define _8TCNT0 0xFFFFB8
-#define _8TCNT1 0xFFFFB9
-
-#define TCSR 0xFFFFBC
-#define TCNT 0xFFFFBD
-#define RSTCSRW 0xFFFFBE
-#define RSTCSRR 0xFFFFBF
-
-#endif /* __KERNEL__ */
-#endif /* __REGS_H8S267x__ */
diff --git a/arch/h8300/include/asm/regs306x.h b/arch/h8300/include/asm/regs306x.h
deleted file mode 100644
index 027dd633fa25..000000000000
--- a/arch/h8300/include/asm/regs306x.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/* internal Peripherals Register address define */
-/* CPU: H8/306x */
-
-#if !defined(__REGS_H8306x__)
-#define __REGS_H8306x__
-
-#if defined(__KERNEL__)
-
-#define DASTCR 0xFEE01A
-#define DADR0 0xFEE09C
-#define DADR1 0xFEE09D
-#define DACR 0xFEE09E
-
-#define ADDRAH 0xFFFFE0
-#define ADDRAL 0xFFFFE1
-#define ADDRBH 0xFFFFE2
-#define ADDRBL 0xFFFFE3
-#define ADDRCH 0xFFFFE4
-#define ADDRCL 0xFFFFE5
-#define ADDRDH 0xFFFFE6
-#define ADDRDL 0xFFFFE7
-#define ADCSR 0xFFFFE8
-#define ADCR 0xFFFFE9
-
-#define BRCR 0xFEE013
-#define ADRCR 0xFEE01E
-#define CSCR 0xFEE01F
-#define ABWCR 0xFEE020
-#define ASTCR 0xFEE021
-#define WCRH 0xFEE022
-#define WCRL 0xFEE023
-#define BCR 0xFEE024
-#define DRCRA 0xFEE026
-#define DRCRB 0xFEE027
-#define RTMCSR 0xFEE028
-#define RTCNT 0xFEE029
-#define RTCOR 0xFEE02A
-
-#define MAR0AR 0xFFFF20
-#define MAR0AE 0xFFFF21
-#define MAR0AH 0xFFFF22
-#define MAR0AL 0xFFFF23
-#define ETCR0AL 0xFFFF24
-#define ETCR0AH 0xFFFF25
-#define IOAR0A 0xFFFF26
-#define DTCR0A 0xFFFF27
-#define MAR0BR 0xFFFF28
-#define MAR0BE 0xFFFF29
-#define MAR0BH 0xFFFF2A
-#define MAR0BL 0xFFFF2B
-#define ETCR0BL 0xFFFF2C
-#define ETCR0BH 0xFFFF2D
-#define IOAR0B 0xFFFF2E
-#define DTCR0B 0xFFFF2F
-#define MAR1AR 0xFFFF30
-#define MAR1AE 0xFFFF31
-#define MAR1AH 0xFFFF32
-#define MAR1AL 0xFFFF33
-#define ETCR1AL 0xFFFF34
-#define ETCR1AH 0xFFFF35
-#define IOAR1A 0xFFFF36
-#define DTCR1A 0xFFFF37
-#define MAR1BR 0xFFFF38
-#define MAR1BE 0xFFFF39
-#define MAR1BH 0xFFFF3A
-#define MAR1BL 0xFFFF3B
-#define ETCR1BL 0xFFFF3C
-#define ETCR1BH 0xFFFF3D
-#define IOAR1B 0xFFFF3E
-#define DTCR1B 0xFFFF3F
-
-#define ISCR 0xFEE014
-#define IER 0xFEE015
-#define ISR 0xFEE016
-#define IPRA 0xFEE018
-#define IPRB 0xFEE019
-
-#define P1DDR 0xFEE000
-#define P2DDR 0xFEE001
-#define P3DDR 0xFEE002
-#define P4DDR 0xFEE003
-#define P5DDR 0xFEE004
-#define P6DDR 0xFEE005
-/*#define P7DDR 0xFEE006*/
-#define P8DDR 0xFEE007
-#define P9DDR 0xFEE008
-#define PADDR 0xFEE009
-#define PBDDR 0xFEE00A
-
-#define P1DR 0xFFFFD0
-#define P2DR 0xFFFFD1
-#define P3DR 0xFFFFD2
-#define P4DR 0xFFFFD3
-#define P5DR 0xFFFFD4
-#define P6DR 0xFFFFD5
-/*#define P7DR 0xFFFFD6*/
-#define P8DR 0xFFFFD7
-#define P9DR 0xFFFFD8
-#define PADR 0xFFFFD9
-#define PBDR 0xFFFFDA
-
-#define P2CR 0xFEE03C
-#define P4CR 0xFEE03E
-#define P5CR 0xFEE03F
-
-#define SMR0 0xFFFFB0
-#define BRR0 0xFFFFB1
-#define SCR0 0xFFFFB2
-#define TDR0 0xFFFFB3
-#define SSR0 0xFFFFB4
-#define RDR0 0xFFFFB5
-#define SCMR0 0xFFFFB6
-#define SMR1 0xFFFFB8
-#define BRR1 0xFFFFB9
-#define SCR1 0xFFFFBA
-#define TDR1 0xFFFFBB
-#define SSR1 0xFFFFBC
-#define RDR1 0xFFFFBD
-#define SCMR1 0xFFFFBE
-#define SMR2 0xFFFFC0
-#define BRR2 0xFFFFC1
-#define SCR2 0xFFFFC2
-#define TDR2 0xFFFFC3
-#define SSR2 0xFFFFC4
-#define RDR2 0xFFFFC5
-#define SCMR2 0xFFFFC6
-
-#define MDCR 0xFEE011
-#define SYSCR 0xFEE012
-#define DIVCR 0xFEE01B
-#define MSTCRH 0xFEE01C
-#define MSTCRL 0xFEE01D
-#define FLMCR1 0xFEE030
-#define FLMCR2 0xFEE031
-#define EBR1 0xFEE032
-#define EBR2 0xFEE033
-#define RAMCR 0xFEE077
-
-#define TSTR 0xFFFF60
-#define TSNC 0XFFFF61
-#define TMDR 0xFFFF62
-#define TOLR 0xFFFF63
-#define TISRA 0xFFFF64
-#define TISRB 0xFFFF65
-#define TISRC 0xFFFF66
-#define TCR0 0xFFFF68
-#define TIOR0 0xFFFF69
-#define TCNT0H 0xFFFF6A
-#define TCNT0L 0xFFFF6B
-#define GRA0H 0xFFFF6C
-#define GRA0L 0xFFFF6D
-#define GRB0H 0xFFFF6E
-#define GRB0L 0xFFFF6F
-#define TCR1 0xFFFF70
-#define TIOR1 0xFFFF71
-#define TCNT1H 0xFFFF72
-#define TCNT1L 0xFFFF73
-#define GRA1H 0xFFFF74
-#define GRA1L 0xFFFF75
-#define GRB1H 0xFFFF76
-#define GRB1L 0xFFFF77
-#define TCR3 0xFFFF78
-#define TIOR3 0xFFFF79
-#define TCNT3H 0xFFFF7A
-#define TCNT3L 0xFFFF7B
-#define GRA3H 0xFFFF7C
-#define GRA3L 0xFFFF7D
-#define GRB3H 0xFFFF7E
-#define GRB3L 0xFFFF7F
-
-#define _8TCR0 0xFFFF80
-#define _8TCR1 0xFFFF81
-#define _8TCSR0 0xFFFF82
-#define _8TCSR1 0xFFFF83
-#define TCORA0 0xFFFF84
-#define TCORA1 0xFFFF85
-#define TCORB0 0xFFFF86
-#define TCORB1 0xFFFF87
-#define _8TCNT0 0xFFFF88
-#define _8TCNT1 0xFFFF89
-
-#define _8TCR2 0xFFFF90
-#define _8TCR3 0xFFFF91
-#define _8TCSR2 0xFFFF92
-#define _8TCSR3 0xFFFF93
-#define TCORA2 0xFFFF94
-#define TCORA3 0xFFFF95
-#define TCORB2 0xFFFF96
-#define TCORB3 0xFFFF97
-#define _8TCNT2 0xFFFF98
-#define _8TCNT3 0xFFFF99
-
-#define TCSR 0xFFFF8C
-#define TCNT 0xFFFF8D
-#define RSTCSR 0xFFFF8F
-
-#define TPMR 0xFFFFA0
-#define TPCR 0xFFFFA1
-#define NDERB 0xFFFFA2
-#define NDERA 0xFFFFA3
-#define NDRB1 0xFFFFA4
-#define NDRA1 0xFFFFA5
-#define NDRB2 0xFFFFA6
-#define NDRA2 0xFFFFA7
-
-#define TCSR 0xFFFF8C
-#define TCNT 0xFFFF8D
-#define RSTCSRW 0xFFFF8E
-#define RSTCSRR 0xFFFF8F
-
-#endif /* __KERNEL__ */
-#endif /* __REGS_H8306x__ */
diff --git a/arch/h8300/include/asm/scatterlist.h b/arch/h8300/include/asm/scatterlist.h
deleted file mode 100644
index 82130eda0e5f..000000000000
--- a/arch/h8300/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_SCATTERLIST_H
-#define _H8300_SCATTERLIST_H
-
-#include <asm-generic/scatterlist.h>
-
-#endif /* !(_H8300_SCATTERLIST_H) */
diff --git a/arch/h8300/include/asm/sections.h b/arch/h8300/include/asm/sections.h
deleted file mode 100644
index a81743e8b743..000000000000
--- a/arch/h8300/include/asm/sections.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_SECTIONS_H_
-#define _H8300_SECTIONS_H_
-
-#include <asm-generic/sections.h>
-
-#endif
diff --git a/arch/h8300/include/asm/segment.h b/arch/h8300/include/asm/segment.h
deleted file mode 100644
index b79a82d0f99d..000000000000
--- a/arch/h8300/include/asm/segment.h
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef _H8300_SEGMENT_H
-#define _H8300_SEGMENT_H
-
-/* define constants */
-#define USER_DATA (1)
-#ifndef __USER_DS
-#define __USER_DS (USER_DATA)
-#endif
-#define USER_PROGRAM (2)
-#define SUPER_DATA (3)
-#ifndef __KERNEL_DS
-#define __KERNEL_DS (SUPER_DATA)
-#endif
-#define SUPER_PROGRAM (4)
-
-#ifndef __ASSEMBLY__
-
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
-#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
-#define USER_DS MAKE_MM_SEG(__USER_DS)
-#define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS)
-
-/*
- * Get/set the SFC/DFC registers for MOVES instructions
- */
-
-static inline mm_segment_t get_fs(void)
-{
- return USER_DS;
-}
-
-static inline mm_segment_t get_ds(void)
-{
- /* return the supervisor data space code */
- return KERNEL_DS;
-}
-
-static inline void set_fs(mm_segment_t val)
-{
-}
-
-#define segment_eq(a,b) ((a).seg == (b).seg)
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _H8300_SEGMENT_H */
diff --git a/arch/h8300/include/asm/sh_bios.h b/arch/h8300/include/asm/sh_bios.h
deleted file mode 100644
index b6bb6e58295c..000000000000
--- a/arch/h8300/include/asm/sh_bios.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* eCos HAL interface header */
-
-#ifndef SH_BIOS_H
-#define SH_BIOS_H
-
-#define HAL_IF_VECTOR_TABLE 0xfffe20
-#define CALL_IF_SET_CONSOLE_COMM 13
-#define QUERY_CURRENT -1
-#define MANGLER -3
-
-/* Checking for GDB stub active */
-/* suggestion Jonathan Larmour */
-static int sh_bios_in_gdb_mode(void)
-{
- static int gdb_active = -1;
- if (gdb_active == -1) {
- int (*set_console_comm)(int);
- set_console_comm = ((void **)HAL_IF_VECTOR_TABLE)[CALL_IF_SET_CONSOLE_COMM];
- gdb_active = (set_console_comm(QUERY_CURRENT) == MANGLER);
- }
- return gdb_active;
-}
-
-static void sh_bios_gdb_detach(void)
-{
-
-}
-
-#endif
diff --git a/arch/h8300/include/asm/shm.h b/arch/h8300/include/asm/shm.h
deleted file mode 100644
index ed6623c0545d..000000000000
--- a/arch/h8300/include/asm/shm.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef _H8300_SHM_H
-#define _H8300_SHM_H
-
-
-/* format of page table entries that correspond to shared memory pages
- currently out in swap space (see also mm/swap.c):
- bits 0-1 (PAGE_PRESENT) is = 0
- bits 8..2 (SWP_TYPE) are = SHM_SWP_TYPE
- bits 31..9 are used like this:
- bits 15..9 (SHM_ID) the id of the shared memory segment
- bits 30..16 (SHM_IDX) the index of the page within the shared memory segment
- (actually only bits 25..16 get used since SHMMAX is so low)
- bit 31 (SHM_READ_ONLY) flag whether the page belongs to a read-only attach
-*/
-/* on the m68k both bits 0 and 1 must be zero */
-/* format on the sun3 is similar, but bits 30, 31 are set to zero and all
- others are reduced by 2. --m */
-
-#ifndef CONFIG_SUN3
-#define SHM_ID_SHIFT 9
-#else
-#define SHM_ID_SHIFT 7
-#endif
-#define _SHM_ID_BITS 7
-#define SHM_ID_MASK ((1<<_SHM_ID_BITS)-1)
-
-#define SHM_IDX_SHIFT (SHM_ID_SHIFT+_SHM_ID_BITS)
-#define _SHM_IDX_BITS 15
-#define SHM_IDX_MASK ((1<<_SHM_IDX_BITS)-1)
-
-#endif /* _H8300_SHM_H */
diff --git a/arch/h8300/include/asm/shmparam.h b/arch/h8300/include/asm/shmparam.h
deleted file mode 100644
index d1863953ec64..000000000000
--- a/arch/h8300/include/asm/shmparam.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_SHMPARAM_H
-#define _H8300_SHMPARAM_H
-
-#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
-
-#endif /* _H8300_SHMPARAM_H */
diff --git a/arch/h8300/include/asm/signal.h b/arch/h8300/include/asm/signal.h
deleted file mode 100644
index 6341e36386f8..000000000000
--- a/arch/h8300/include/asm/signal.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef _H8300_SIGNAL_H
-#define _H8300_SIGNAL_H
-
-#include <uapi/asm/signal.h>
-
-/* Most things should be clean enough to redefine this at will, if care
- is taken to make libc match. */
-
-#define _NSIG 64
-#define _NSIG_BPW 32
-#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
-
-typedef unsigned long old_sigset_t; /* at least 32 bits */
-
-typedef struct {
- unsigned long sig[_NSIG_WORDS];
-} sigset_t;
-
-#define __ARCH_HAS_SA_RESTORER
-
-#include <asm/sigcontext.h>
-#undef __HAVE_ARCH_SIG_BITOPS
-
-#endif /* _H8300_SIGNAL_H */
diff --git a/arch/h8300/include/asm/smp.h b/arch/h8300/include/asm/smp.h
deleted file mode 100644
index 9e9bd7e58922..000000000000
--- a/arch/h8300/include/asm/smp.h
+++ /dev/null
@@ -1 +0,0 @@
-/* nothing required here yet */
diff --git a/arch/h8300/include/asm/spinlock.h b/arch/h8300/include/asm/spinlock.h
deleted file mode 100644
index d5407fa173e4..000000000000
--- a/arch/h8300/include/asm/spinlock.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __H8300_SPINLOCK_H
-#define __H8300_SPINLOCK_H
-
-#error "H8/300 doesn't do SMP yet"
-
-#endif
diff --git a/arch/h8300/include/asm/string.h b/arch/h8300/include/asm/string.h
deleted file mode 100644
index ca5034897d87..000000000000
--- a/arch/h8300/include/asm/string.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _H8300_STRING_H_
-#define _H8300_STRING_H_
-
-#ifdef __KERNEL__ /* only set these up for kernel code */
-
-#include <asm/setup.h>
-#include <asm/page.h>
-
-#define __HAVE_ARCH_MEMSET
-extern void * memset(void * s, int c, size_t count);
-
-#define __HAVE_ARCH_MEMCPY
-extern void * memcpy(void *d, const void *s, size_t count);
-
-#else /* KERNEL */
-
-/*
- * let user libraries deal with these,
- * IMHO the kernel has no place defining these functions for user apps
- */
-
-#define __HAVE_ARCH_STRCPY 1
-#define __HAVE_ARCH_STRNCPY 1
-#define __HAVE_ARCH_STRCAT 1
-#define __HAVE_ARCH_STRNCAT 1
-#define __HAVE_ARCH_STRCMP 1
-#define __HAVE_ARCH_STRNCMP 1
-#define __HAVE_ARCH_STRNICMP 1
-#define __HAVE_ARCH_STRCHR 1
-#define __HAVE_ARCH_STRRCHR 1
-#define __HAVE_ARCH_STRSTR 1
-#define __HAVE_ARCH_STRLEN 1
-#define __HAVE_ARCH_STRNLEN 1
-#define __HAVE_ARCH_MEMSET 1
-#define __HAVE_ARCH_MEMCPY 1
-#define __HAVE_ARCH_MEMMOVE 1
-#define __HAVE_ARCH_MEMSCAN 1
-#define __HAVE_ARCH_MEMCMP 1
-#define __HAVE_ARCH_MEMCHR 1
-#define __HAVE_ARCH_STRTOK 1
-
-#endif /* KERNEL */
-
-#endif /* _M68K_STRING_H_ */
diff --git a/arch/h8300/include/asm/switch_to.h b/arch/h8300/include/asm/switch_to.h
deleted file mode 100644
index cdd8731ce487..000000000000
--- a/arch/h8300/include/asm/switch_to.h
+++ /dev/null
@@ -1,50 +0,0 @@
-#ifndef _H8300_SWITCH_TO_H
-#define _H8300_SWITCH_TO_H
-
-/*
- * switch_to(n) should switch tasks to task ptr, first checking that
- * ptr isn't the current task, in which case it does nothing. This
- * also clears the TS-flag if the task we switched to has used the
- * math co-processor latest.
- */
-/*
- * switch_to() saves the extra registers, that are not saved
- * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
- * a0-a1. Some of these are used by schedule() and its predecessors
- * and so we might get see unexpected behaviors when a task returns
- * with unexpected register values.
- *
- * syscall stores these registers itself and none of them are used
- * by syscall after the function in the syscall has been called.
- *
- * Beware that resume now expects *next to be in d1 and the offset of
- * tss to be in a1. This saves a few instructions as we no longer have
- * to push them onto the stack and read them back right after.
- *
- * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
- *
- * Changed 96/09/19 by Andreas Schwab
- * pass prev in a0, next in a1, offset of tss in d1, and whether
- * the mm structures are shared in d2 (to avoid atc flushing).
- *
- * H8/300 Porting 2002/09/04 Yoshinori Sato
- */
-
-asmlinkage void resume(void);
-#define switch_to(prev,next,last) { \
- void *_last; \
- __asm__ __volatile__( \
- "mov.l %1, er0\n\t" \
- "mov.l %2, er1\n\t" \
- "mov.l %3, er2\n\t" \
- "jsr @_resume\n\t" \
- "mov.l er2,%0\n\t" \
- : "=r" (_last) \
- : "r" (&(prev->thread)), \
- "r" (&(next->thread)), \
- "g" (prev) \
- : "cc", "er0", "er1", "er2", "er3"); \
- (last) = _last; \
-}
-
-#endif /* _H8300_SWITCH_TO_H */
diff --git a/arch/h8300/include/asm/target_time.h b/arch/h8300/include/asm/target_time.h
deleted file mode 100644
index 9f2a9aa1fe6f..000000000000
--- a/arch/h8300/include/asm/target_time.h
+++ /dev/null
@@ -1,4 +0,0 @@
-extern int platform_timer_setup(void (*timer_int)(int, void *, struct pt_regs *));
-extern void platform_timer_eoi(void);
-extern void platform_gettod(unsigned int *year, unsigned int *mon, unsigned int *day,
- unsigned int *hour, unsigned int *min, unsigned int *sec);
diff --git a/arch/h8300/include/asm/termios.h b/arch/h8300/include/asm/termios.h
deleted file mode 100644
index 93a63df56247..000000000000
--- a/arch/h8300/include/asm/termios.h
+++ /dev/null
@@ -1,50 +0,0 @@
-#ifndef _H8300_TERMIOS_H
-#define _H8300_TERMIOS_H
-
-#include <uapi/asm/termios.h>
-
-/* intr=^C quit=^| erase=del kill=^U
- eof=^D vtime=\0 vmin=\1 sxtc=\0
- start=^Q stop=^S susp=^Z eol=\0
- reprint=^R discard=^U werase=^W lnext=^V
- eol2=\0
-*/
-#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
-
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-#define user_termio_to_kernel_termios(termios, termio) \
-({ \
- unsigned short tmp; \
- get_user(tmp, &(termio)->c_iflag); \
- (termios)->c_iflag = (0xffff0000 & ((termios)->c_iflag)) | tmp; \
- get_user(tmp, &(termio)->c_oflag); \
- (termios)->c_oflag = (0xffff0000 & ((termios)->c_oflag)) | tmp; \
- get_user(tmp, &(termio)->c_cflag); \
- (termios)->c_cflag = (0xffff0000 & ((termios)->c_cflag)) | tmp; \
- get_user(tmp, &(termio)->c_lflag); \
- (termios)->c_lflag = (0xffff0000 & ((termios)->c_lflag)) | tmp; \
- get_user((termios)->c_line, &(termio)->c_line); \
- copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
-})
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- */
-#define kernel_termios_to_user_termio(termio, termios) \
-({ \
- put_user((termios)->c_iflag, &(termio)->c_iflag); \
- put_user((termios)->c_oflag, &(termio)->c_oflag); \
- put_user((termios)->c_cflag, &(termio)->c_cflag); \
- put_user((termios)->c_lflag, &(termio)->c_lflag); \
- put_user((termios)->c_line, &(termio)->c_line); \
- copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
-})
-
-#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
-#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
-#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
-#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
-
-#endif /* _H8300_TERMIOS_H */
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h
deleted file mode 100644
index ec2f7777c65a..000000000000
--- a/arch/h8300/include/asm/thread_info.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/* thread_info.h: h8300 low-level thread information
- * adapted from the i386 and PPC versions by Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * Copyright (C) 2002 David Howells (dhowells@redhat.com)
- * - Incorporating suggestions made by Linus Torvalds and Dave Miller
- */
-
-#ifndef _ASM_THREAD_INFO_H
-#define _ASM_THREAD_INFO_H
-
-#include <asm/page.h>
-
-#ifdef __KERNEL__
-
-#ifndef __ASSEMBLY__
-
-/*
- * low level task data.
- * If you change this, change the TI_* offsets below to match.
- */
-struct thread_info {
- struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
- unsigned long flags; /* low level flags */
- int cpu; /* cpu we're on */
- int preempt_count; /* 0 => preemptable, <0 => BUG */
- struct restart_block restart_block;
-};
-
-/*
- * macros/functions for gaining access to the thread information structure
- */
-#define INIT_THREAD_INFO(tsk) \
-{ \
- .task = &tsk, \
- .exec_domain = &default_exec_domain, \
- .flags = 0, \
- .cpu = 0, \
- .preempt_count = INIT_PREEMPT_COUNT, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
-}
-
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
-
-/*
- * Size of kernel stack for each process. This must be a power of 2...
- */
-#define THREAD_SIZE_ORDER 1
-#define THREAD_SIZE 8192 /* 2 pages */
-
-
-/* how to get the thread information struct from C */
-static inline struct thread_info *current_thread_info(void)
-{
- struct thread_info *ti;
- __asm__(
- "mov.l sp, %0 \n\t"
- "and.l %1, %0"
- : "=&r"(ti)
- : "i" (~(THREAD_SIZE-1))
- );
- return ti;
-}
-
-#endif /* __ASSEMBLY__ */
-
-/*
- * Offsets in thread_info structure, used in assembly code
- */
-#define TI_TASK 0
-#define TI_EXECDOMAIN 4
-#define TI_FLAGS 8
-#define TI_CPU 12
-#define TI_PRE_COUNT 16
-
-#define PREEMPT_ACTIVE 0x4000000
-
-/*
- * thread information flag bit numbers
- */
-#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
-#define TIF_SIGPENDING 1 /* signal pending */
-#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
-#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
-#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
-#define TIF_NOTIFY_RESUME 6 /* callback before returning to user */
-
-/* as above, but as bit values */
-#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
-#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
-#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
-#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
-
-#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
- _TIF_NOTIFY_RESUME)
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/h8300/include/asm/timer.h b/arch/h8300/include/asm/timer.h
deleted file mode 100644
index def80464d38f..000000000000
--- a/arch/h8300/include/asm/timer.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef __H8300_TIMER_H
-#define __H8300_TIMER_H
-
-void h8300_timer_tick(void);
-void h8300_timer_setup(void);
-void h8300_gettod(unsigned int *year, unsigned int *mon, unsigned int *day,
- unsigned int *hour, unsigned int *min, unsigned int *sec);
-
-#define TIMER_FREQ (CONFIG_CPU_CLOCK*10000) /* Timer input freq. */
-
-#define calc_param(cnt, div, rate, limit) \
-do { \
- cnt = TIMER_FREQ / HZ; \
- for (div = 0; div < ARRAY_SIZE(divide_rate); div++) { \
- if (rate[div] == 0) \
- continue; \
- if ((cnt / rate[div]) > limit) \
- break; \
- } \
- if (div == ARRAY_SIZE(divide_rate)) \
- panic("Timer counter overflow"); \
- cnt /= divide_rate[div]; \
-} while(0)
-
-#endif
diff --git a/arch/h8300/include/asm/timex.h b/arch/h8300/include/asm/timex.h
deleted file mode 100644
index 23e67013439f..000000000000
--- a/arch/h8300/include/asm/timex.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * linux/include/asm-h8300/timex.h
- *
- * H8/300 architecture timex specifications
- */
-#ifndef _ASM_H8300_TIMEX_H
-#define _ASM_H8300_TIMEX_H
-
-#define CLOCK_TICK_RATE (CONFIG_CPU_CLOCK*1000/8192) /* Timer input freq. */
-
-typedef unsigned long cycles_t;
-extern short h8300_timer_count;
-
-static inline cycles_t get_cycles(void)
-{
- return 0;
-}
-
-#endif
diff --git a/arch/h8300/include/asm/tlb.h b/arch/h8300/include/asm/tlb.h
deleted file mode 100644
index 7f0743051ad5..000000000000
--- a/arch/h8300/include/asm/tlb.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef __H8300_TLB_H__
-#define __H8300_TLB_H__
-
-#define tlb_flush(tlb) do { } while(0)
-
-#include <asm-generic/tlb.h>
-
-#endif
diff --git a/arch/h8300/include/asm/tlbflush.h b/arch/h8300/include/asm/tlbflush.h
deleted file mode 100644
index 41c148a9208e..000000000000
--- a/arch/h8300/include/asm/tlbflush.h
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef _H8300_TLBFLUSH_H
-#define _H8300_TLBFLUSH_H
-
-/*
- * Copyright (C) 2000 Lineo, David McCullough <davidm@uclinux.org>
- * Copyright (C) 2000-2002, Greg Ungerer <gerg@snapgear.com>
- */
-
-#include <asm/setup.h>
-
-/*
- * flush all user-space atc entries.
- */
-static inline void __flush_tlb(void)
-{
- BUG();
-}
-
-static inline void __flush_tlb_one(unsigned long addr)
-{
- BUG();
-}
-
-#define flush_tlb() __flush_tlb()
-
-/*
- * flush all atc entries (both kernel and user-space entries).
- */
-static inline void flush_tlb_all(void)
-{
- BUG();
-}
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
-{
- BUG();
-}
-
-static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
-{
- BUG();
-}
-
-static inline void flush_tlb_range(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- BUG();
-}
-
-static inline void flush_tlb_kernel_page(unsigned long addr)
-{
- BUG();
-}
-
-#endif /* _H8300_TLBFLUSH_H */
diff --git a/arch/h8300/include/asm/topology.h b/arch/h8300/include/asm/topology.h
deleted file mode 100644
index fdc121924d4c..000000000000
--- a/arch/h8300/include/asm/topology.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_H8300_TOPOLOGY_H
-#define _ASM_H8300_TOPOLOGY_H
-
-#include <asm-generic/topology.h>
-
-#endif /* _ASM_H8300_TOPOLOGY_H */
diff --git a/arch/h8300/include/asm/traps.h b/arch/h8300/include/asm/traps.h
deleted file mode 100644
index 41cf6be02f68..000000000000
--- a/arch/h8300/include/asm/traps.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * linux/include/asm-h8300/traps.h
- *
- * Copyright (C) 2003 Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-#ifndef _H8300_TRAPS_H
-#define _H8300_TRAPS_H
-
-extern void system_call(void);
-extern void interrupt_entry(void);
-extern void trace_break(void);
-
-#define JMP_OP 0x5a000000
-#define JSR_OP 0x5e000000
-#define VECTOR(address) ((JMP_OP)|((unsigned long)address))
-#define REDIRECT(address) ((JSR_OP)|((unsigned long)address))
-
-#define TRACE_VEC 5
-
-#define TRAP0_VEC 8
-#define TRAP1_VEC 9
-#define TRAP2_VEC 10
-#define TRAP3_VEC 11
-
-#if defined(__H8300H__)
-#define NR_TRAPS 12
-#endif
-#if defined(__H8300S__)
-#define NR_TRAPS 16
-#endif
-
-#endif /* _H8300_TRAPS_H */
diff --git a/arch/h8300/include/asm/types.h b/arch/h8300/include/asm/types.h
deleted file mode 100644
index c012707f6037..000000000000
--- a/arch/h8300/include/asm/types.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _H8300_TYPES_H
-#define _H8300_TYPES_H
-
-#include <uapi/asm/types.h>
-
-
-#define BITS_PER_LONG 32
-
-#endif /* _H8300_TYPES_H */
diff --git a/arch/h8300/include/asm/uaccess.h b/arch/h8300/include/asm/uaccess.h
deleted file mode 100644
index 8725d1ad4272..000000000000
--- a/arch/h8300/include/asm/uaccess.h
+++ /dev/null
@@ -1,163 +0,0 @@
-#ifndef __H8300_UACCESS_H
-#define __H8300_UACCESS_H
-
-/*
- * User space memory access functions
- */
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-
-#include <asm/segment.h>
-
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
-
-/* We let the MMU do all checking */
-#define access_ok(type, addr, size) __access_ok((unsigned long)addr,size)
-static inline int __access_ok(unsigned long addr, unsigned long size)
-{
-#define RANGE_CHECK_OK(addr, size, lower, upper) \
- (((addr) >= (lower)) && (((addr) + (size)) < (upper)))
-
- extern unsigned long _ramend;
- return(RANGE_CHECK_OK(addr, size, 0L, (unsigned long)&_ramend));
-}
-
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
- unsigned long insn, fixup;
-};
-
-/* Returns 0 if exception not found and fixup otherwise. */
-extern unsigned long search_exception_table(unsigned long);
-
-
-/*
- * These are the main single-value transfer routines. They automatically
- * use the right size if we just have the right pointer type.
- */
-
-#define put_user(x, ptr) \
-({ \
- int __pu_err = 0; \
- typeof(*(ptr)) __pu_val = (x); \
- switch (sizeof (*(ptr))) { \
- case 1: \
- case 2: \
- case 4: \
- *(ptr) = (__pu_val); \
- break; \
- case 8: \
- memcpy(ptr, &__pu_val, sizeof (*(ptr))); \
- break; \
- default: \
- __pu_err = __put_user_bad(); \
- break; \
- } \
- __pu_err; \
-})
-#define __put_user(x, ptr) put_user(x, ptr)
-
-extern int __put_user_bad(void);
-
-/*
- * Tell gcc we read from memory instead of writing: this is because
- * we do not write to any memory gcc knows about, so there are no
- * aliasing issues.
- */
-
-#define __ptr(x) ((unsigned long *)(x))
-
-/*
- * Tell gcc we read from memory instead of writing: this is because
- * we do not write to any memory gcc knows about, so there are no
- * aliasing issues.
- */
-
-#define get_user(x, ptr) \
-({ \
- int __gu_err = 0; \
- typeof(*(ptr)) __gu_val = *ptr; \
- switch (sizeof(*(ptr))) { \
- case 1: \
- case 2: \
- case 4: \
- case 8: \
- break; \
- default: \
- __gu_err = __get_user_bad(); \
- break; \
- } \
- (x) = __gu_val; \
- __gu_err; \
-})
-#define __get_user(x, ptr) get_user(x, ptr)
-
-extern int __get_user_bad(void);
-
-#define copy_from_user(to, from, n) (memcpy(to, from, n), 0)
-#define copy_to_user(to, from, n) (memcpy(to, from, n), 0)
-
-#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
-#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
-#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n)) return retval; })
-
-#define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n)) return retval; })
-
-/*
- * Copy a null terminated string from userspace.
- */
-
-static inline long
-strncpy_from_user(char *dst, const char *src, long count)
-{
- char *tmp;
- strncpy(dst, src, count);
- for (tmp = dst; *tmp && count > 0; tmp++, count--)
- ;
- return(tmp - dst); /* DAVIDM should we count a NUL ? check getname */
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 on exception, a value greater than N if too long
- */
-static inline long strnlen_user(const char *src, long n)
-{
- return(strlen(src) + 1); /* DAVIDM make safer */
-}
-
-#define strlen_user(str) strnlen_user(str, 32767)
-
-/*
- * Zero Userspace
- */
-
-static inline unsigned long
-clear_user(void *to, unsigned long n)
-{
- memset(to, 0, n);
- return 0;
-}
-
-#define __clear_user clear_user
-
-#endif /* _H8300_UACCESS_H */
diff --git a/arch/h8300/include/asm/ucontext.h b/arch/h8300/include/asm/ucontext.h
deleted file mode 100644
index 0bcf8f85fab9..000000000000
--- a/arch/h8300/include/asm/ucontext.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _H8300_UCONTEXT_H
-#define _H8300_UCONTEXT_H
-
-struct ucontext {
- unsigned long uc_flags;
- struct ucontext *uc_link;
- stack_t uc_stack;
- struct sigcontext uc_mcontext;
- sigset_t uc_sigmask; /* mask last for extensibility */
-};
-
-#endif
diff --git a/arch/h8300/include/asm/unaligned.h b/arch/h8300/include/asm/unaligned.h
deleted file mode 100644
index b8d06c70c2da..000000000000
--- a/arch/h8300/include/asm/unaligned.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _ASM_H8300_UNALIGNED_H
-#define _ASM_H8300_UNALIGNED_H
-
-#include <linux/unaligned/be_memmove.h>
-#include <linux/unaligned/le_byteshift.h>
-#include <linux/unaligned/generic.h>
-
-#define get_unaligned __get_unaligned_be
-#define put_unaligned __put_unaligned_be
-
-#endif /* _ASM_H8300_UNALIGNED_H */
diff --git a/arch/h8300/include/asm/unistd.h b/arch/h8300/include/asm/unistd.h
deleted file mode 100644
index ab671ecf5196..000000000000
--- a/arch/h8300/include/asm/unistd.h
+++ /dev/null
@@ -1,36 +0,0 @@
-#ifndef _ASM_H8300_UNISTD_H_
-#define _ASM_H8300_UNISTD_H_
-
-#include <uapi/asm/unistd.h>
-
-
-#define NR_syscalls 321
-
-#define __ARCH_WANT_OLD_READDIR
-#define __ARCH_WANT_OLD_STAT
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SYS_ALARM
-#define __ARCH_WANT_SYS_GETHOSTNAME
-#define __ARCH_WANT_SYS_IPC
-#define __ARCH_WANT_SYS_PAUSE
-#define __ARCH_WANT_SYS_SGETMASK
-#define __ARCH_WANT_SYS_SIGNAL
-#define __ARCH_WANT_SYS_TIME
-#define __ARCH_WANT_SYS_UTIME
-#define __ARCH_WANT_SYS_WAITPID
-#define __ARCH_WANT_SYS_SOCKETCALL
-#define __ARCH_WANT_SYS_FADVISE64
-#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
-#define __ARCH_WANT_SYS_NICE
-#define __ARCH_WANT_SYS_OLD_GETRLIMIT
-#define __ARCH_WANT_SYS_OLD_MMAP
-#define __ARCH_WANT_SYS_OLD_SELECT
-#define __ARCH_WANT_SYS_OLDUMOUNT
-#define __ARCH_WANT_SYS_SIGPENDING
-#define __ARCH_WANT_SYS_SIGPROCMASK
-#define __ARCH_WANT_SYS_FORK
-#define __ARCH_WANT_SYS_VFORK
-#define __ARCH_WANT_SYS_CLONE
-
-#endif /* _ASM_H8300_UNISTD_H_ */
diff --git a/arch/h8300/include/asm/user.h b/arch/h8300/include/asm/user.h
deleted file mode 100644
index 14a9e18950f1..000000000000
--- a/arch/h8300/include/asm/user.h
+++ /dev/null
@@ -1,75 +0,0 @@
-#ifndef _H8300_USER_H
-#define _H8300_USER_H
-
-#include <asm/page.h>
-
-/* Core file format: The core file is written in such a way that gdb
- can understand it and provide useful information to the user (under
- linux we use the 'trad-core' bfd). There are quite a number of
- obstacles to being able to view the contents of the floating point
- registers, and until these are solved you will not be able to view the
- contents of them. Actually, you can read in the core file and look at
- the contents of the user struct to find out what the floating point
- registers contain.
- The actual file contents are as follows:
- UPAGE: 1 page consisting of a user struct that tells gdb what is present
- in the file. Directly after this is a copy of the task_struct, which
- is currently not used by gdb, but it may come in useful at some point.
- All of the registers are stored as part of the upage. The upage should
- always be only one page.
- DATA: The data area is stored. We use current->end_text to
- current->brk to pick up all of the user variables, plus any memory
- that may have been malloced. No attempt is made to determine if a page
- is demand-zero or if a page is totally unused, we just cover the entire
- range. All of the addresses are rounded in such a way that an integral
- number of pages is written.
- STACK: We need the stack information in order to get a meaningful
- backtrace. We need to write the data from (esp) to
- current->start_stack, so we round each of these off in order to be able
- to write an integer number of pages.
- The minimum core file size is 3 pages, or 12288 bytes.
-*/
-
-/* This is the old layout of "struct pt_regs" as of Linux 1.x, and
- is still the layout used by user (the new pt_regs doesn't have
- all registers). */
-struct user_regs_struct {
- long er1,er2,er3,er4,er5,er6;
- long er0;
- long usp;
- long orig_er0;
- short ccr;
- long pc;
-};
-
-
-/* When the kernel dumps core, it starts by dumping the user struct -
- this will be used by gdb to figure out where the data and stack segments
- are within the file, and what virtual addresses to use. */
-struct user{
-/* We start with the registers, to mimic the way that "memory" is returned
- from the ptrace(3,...) function. */
- struct user_regs_struct regs; /* Where the registers are actually stored */
-/* ptrace does not yet supply these. Someday.... */
-/* The rest of this junk is to help gdb figure out what goes where */
- unsigned long int u_tsize; /* Text segment size (pages). */
- unsigned long int u_dsize; /* Data segment size (pages). */
- unsigned long int u_ssize; /* Stack segment size (pages). */
- unsigned long start_code; /* Starting virtual address of text. */
- unsigned long start_stack; /* Starting virtual address of stack area.
- This is actually the bottom of the stack,
- the top of the stack is always found in the
- esp register. */
- long int signal; /* Signal that caused the core dump. */
- int reserved; /* No longer used */
- unsigned long u_ar0; /* Used by gdb to help find the values for */
- /* the registers. */
- unsigned long magic; /* To uniquely identify a core file */
- char u_comm[32]; /* User command that was responsible */
-};
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
-
-#endif
diff --git a/arch/h8300/include/asm/virtconvert.h b/arch/h8300/include/asm/virtconvert.h
deleted file mode 100644
index 19cfd62b11c3..000000000000
--- a/arch/h8300/include/asm/virtconvert.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __H8300_VIRT_CONVERT__
-#define __H8300_VIRT_CONVERT__
-
-/*
- * Macros used for converting between virtual and physical mappings.
- */
-
-#ifdef __KERNEL__
-
-#include <asm/setup.h>
-#include <asm/page.h>
-
-#define phys_to_virt(vaddr) ((void *) (vaddr))
-#define virt_to_phys(vaddr) ((unsigned long) (vaddr))
-
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
-#endif
-#endif
diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild
deleted file mode 100644
index 040178cdb3eb..000000000000
--- a/arch/h8300/include/uapi/asm/Kbuild
+++ /dev/null
@@ -1,34 +0,0 @@
-# UAPI Header export list
-include include/uapi/asm-generic/Kbuild.asm
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/h8300/include/uapi/asm/auxvec.h b/arch/h8300/include/uapi/asm/auxvec.h
deleted file mode 100644
index 1d36fe38b088..000000000000
--- a/arch/h8300/include/uapi/asm/auxvec.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef __ASMH8300_AUXVEC_H
-#define __ASMH8300_AUXVEC_H
-
-#endif
diff --git a/arch/h8300/include/uapi/asm/bitsperlong.h b/arch/h8300/include/uapi/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0c13b2..000000000000
--- a/arch/h8300/include/uapi/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitsperlong.h>
diff --git a/arch/h8300/include/uapi/asm/byteorder.h b/arch/h8300/include/uapi/asm/byteorder.h
deleted file mode 100644
index 13539da99efd..000000000000
--- a/arch/h8300/include/uapi/asm/byteorder.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_BYTEORDER_H
-#define _H8300_BYTEORDER_H
-
-#include <linux/byteorder/big_endian.h>
-
-#endif /* _H8300_BYTEORDER_H */
diff --git a/arch/h8300/include/uapi/asm/errno.h b/arch/h8300/include/uapi/asm/errno.h
deleted file mode 100644
index 0c2f5641fdcc..000000000000
--- a/arch/h8300/include/uapi/asm/errno.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_ERRNO_H
-#define _H8300_ERRNO_H
-
-#include <asm-generic/errno.h>
-
-#endif /* _H8300_ERRNO_H */
diff --git a/arch/h8300/include/uapi/asm/fcntl.h b/arch/h8300/include/uapi/asm/fcntl.h
deleted file mode 100644
index 1952cb2e3b06..000000000000
--- a/arch/h8300/include/uapi/asm/fcntl.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _H8300_FCNTL_H
-#define _H8300_FCNTL_H
-
-#define O_DIRECTORY 040000 /* must be a directory */
-#define O_NOFOLLOW 0100000 /* don't follow links */
-#define O_DIRECT 0200000 /* direct disk access hint - currently ignored */
-#define O_LARGEFILE 0400000
-
-#include <asm-generic/fcntl.h>
-
-#endif /* _H8300_FCNTL_H */
diff --git a/arch/h8300/include/uapi/asm/ioctl.h b/arch/h8300/include/uapi/asm/ioctl.h
deleted file mode 100644
index b279fe06dfe5..000000000000
--- a/arch/h8300/include/uapi/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/arch/h8300/include/uapi/asm/ioctls.h b/arch/h8300/include/uapi/asm/ioctls.h
deleted file mode 100644
index 30eaed2facdb..000000000000
--- a/arch/h8300/include/uapi/asm/ioctls.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef __ARCH_H8300_IOCTLS_H__
-#define __ARCH_H8300_IOCTLS_H__
-
-#define FIOQSIZE 0x545E
-
-#include <asm-generic/ioctls.h>
-
-#endif /* __ARCH_H8300_IOCTLS_H__ */
diff --git a/arch/h8300/include/uapi/asm/ipcbuf.h b/arch/h8300/include/uapi/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51cb6d0..000000000000
--- a/arch/h8300/include/uapi/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipcbuf.h>
diff --git a/arch/h8300/include/uapi/asm/kvm_para.h b/arch/h8300/include/uapi/asm/kvm_para.h
deleted file mode 100644
index 14fab8f0b957..000000000000
--- a/arch/h8300/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kvm_para.h>
diff --git a/arch/h8300/include/uapi/asm/mman.h b/arch/h8300/include/uapi/asm/mman.h
deleted file mode 100644
index 8eebf89f5ab1..000000000000
--- a/arch/h8300/include/uapi/asm/mman.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mman.h>
diff --git a/arch/h8300/include/uapi/asm/msgbuf.h b/arch/h8300/include/uapi/asm/msgbuf.h
deleted file mode 100644
index 6b148cd09aa5..000000000000
--- a/arch/h8300/include/uapi/asm/msgbuf.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef _H8300_MSGBUF_H
-#define _H8300_MSGBUF_H
-
-/*
- * The msqid64_ds structure for H8/300 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct msqid64_ds {
- struct ipc64_perm msg_perm;
- __kernel_time_t msg_stime; /* last msgsnd time */
- unsigned long __unused1;
- __kernel_time_t msg_rtime; /* last msgrcv time */
- unsigned long __unused2;
- __kernel_time_t msg_ctime; /* last change time */
- unsigned long __unused3;
- unsigned long msg_cbytes; /* current number of bytes on queue */
- unsigned long msg_qnum; /* number of messages in queue */
- unsigned long msg_qbytes; /* max number of bytes on queue */
- __kernel_pid_t msg_lspid; /* pid of last msgsnd */
- __kernel_pid_t msg_lrpid; /* last receive pid */
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-#endif /* _H8300_MSGBUF_H */
diff --git a/arch/h8300/include/uapi/asm/param.h b/arch/h8300/include/uapi/asm/param.h
deleted file mode 100644
index 3dd18ae15f03..000000000000
--- a/arch/h8300/include/uapi/asm/param.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef _UAPI_H8300_PARAM_H
-#define _UAPI_H8300_PARAM_H
-
-#ifndef __KERNEL__
-#define HZ 100
-#endif
-
-#define EXEC_PAGESIZE 4096
-
-#ifndef NOGROUP
-#define NOGROUP (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64 /* max length of hostname */
-
-#endif /* _UAPI_H8300_PARAM_H */
diff --git a/arch/h8300/include/uapi/asm/poll.h b/arch/h8300/include/uapi/asm/poll.h
deleted file mode 100644
index f61540c22d94..000000000000
--- a/arch/h8300/include/uapi/asm/poll.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __H8300_POLL_H
-#define __H8300_POLL_H
-
-#define POLLWRNORM POLLOUT
-#define POLLWRBAND 256
-
-#include <asm-generic/poll.h>
-
-#undef POLLREMOVE
-
-#endif
diff --git a/arch/h8300/include/uapi/asm/posix_types.h b/arch/h8300/include/uapi/asm/posix_types.h
deleted file mode 100644
index 91e62ba4c7b0..000000000000
--- a/arch/h8300/include/uapi/asm/posix_types.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef __ARCH_H8300_POSIX_TYPES_H
-#define __ARCH_H8300_POSIX_TYPES_H
-
-/*
- * This file is generally used by user-level software, so you need to
- * be a little careful about namespace pollution etc. Also, we cannot
- * assume GCC is being used.
- */
-
-typedef unsigned short __kernel_mode_t;
-#define __kernel_mode_t __kernel_mode_t
-
-typedef unsigned short __kernel_ipc_pid_t;
-#define __kernel_ipc_pid_t __kernel_ipc_pid_t
-
-typedef unsigned short __kernel_uid_t;
-typedef unsigned short __kernel_gid_t;
-#define __kernel_uid_t __kernel_uid_t
-
-typedef unsigned short __kernel_old_uid_t;
-typedef unsigned short __kernel_old_gid_t;
-#define __kernel_old_uid_t __kernel_old_uid_t
-
-#include <asm-generic/posix_types.h>
-
-#endif
diff --git a/arch/h8300/include/uapi/asm/ptrace.h b/arch/h8300/include/uapi/asm/ptrace.h
deleted file mode 100644
index ef39ec5977b6..000000000000
--- a/arch/h8300/include/uapi/asm/ptrace.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _UAPI_H8300_PTRACE_H
-#define _UAPI_H8300_PTRACE_H
-
-#ifndef __ASSEMBLY__
-
-#define PT_ER1 0
-#define PT_ER2 1
-#define PT_ER3 2
-#define PT_ER4 3
-#define PT_ER5 4
-#define PT_ER6 5
-#define PT_ER0 6
-#define PT_ORIG_ER0 7
-#define PT_CCR 8
-#define PT_PC 9
-#define PT_USP 10
-#define PT_EXR 12
-
-/* this struct defines the way the registers are stored on the
- stack during a system call. */
-
-struct pt_regs {
- long retpc;
- long er4;
- long er5;
- long er6;
- long er3;
- long er2;
- long er1;
- long orig_er0;
- unsigned short ccr;
- long er0;
- long vector;
-#if defined(CONFIG_CPU_H8S)
- unsigned short exr;
-#endif
- unsigned long pc;
-} __attribute__((aligned(2),packed));
-
-#define PTRACE_GETREGS 12
-#define PTRACE_SETREGS 13
-
-#endif /* __ASSEMBLY__ */
-#endif /* _UAPI_H8300_PTRACE_H */
diff --git a/arch/h8300/include/uapi/asm/resource.h b/arch/h8300/include/uapi/asm/resource.h
deleted file mode 100644
index 46c5f4391607..000000000000
--- a/arch/h8300/include/uapi/asm/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_RESOURCE_H
-#define _H8300_RESOURCE_H
-
-#include <asm-generic/resource.h>
-
-#endif /* _H8300_RESOURCE_H */
diff --git a/arch/h8300/include/uapi/asm/sembuf.h b/arch/h8300/include/uapi/asm/sembuf.h
deleted file mode 100644
index e04a3ec0cb92..000000000000
--- a/arch/h8300/include/uapi/asm/sembuf.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef _H8300_SEMBUF_H
-#define _H8300_SEMBUF_H
-
-/*
- * The semid64_ds structure for m68k architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct semid64_ds {
- struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
- __kernel_time_t sem_otime; /* last semop time */
- unsigned long __unused1;
- __kernel_time_t sem_ctime; /* last change time */
- unsigned long __unused2;
- unsigned long sem_nsems; /* no. of semaphores in array */
- unsigned long __unused3;
- unsigned long __unused4;
-};
-
-#endif /* _H8300_SEMBUF_H */
diff --git a/arch/h8300/include/uapi/asm/setup.h b/arch/h8300/include/uapi/asm/setup.h
deleted file mode 100644
index e2c600e96733..000000000000
--- a/arch/h8300/include/uapi/asm/setup.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __H8300_SETUP_H
-#define __H8300_SETUP_H
-
-#define COMMAND_LINE_SIZE 512
-
-#endif
diff --git a/arch/h8300/include/uapi/asm/shmbuf.h b/arch/h8300/include/uapi/asm/shmbuf.h
deleted file mode 100644
index 64e77993a7a9..000000000000
--- a/arch/h8300/include/uapi/asm/shmbuf.h
+++ /dev/null
@@ -1,42 +0,0 @@
-#ifndef _H8300_SHMBUF_H
-#define _H8300_SHMBUF_H
-
-/*
- * The shmid64_ds structure for m68k architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct shmid64_ds {
- struct ipc64_perm shm_perm; /* operation perms */
- size_t shm_segsz; /* size of segment (bytes) */
- __kernel_time_t shm_atime; /* last attach time */
- unsigned long __unused1;
- __kernel_time_t shm_dtime; /* last detach time */
- unsigned long __unused2;
- __kernel_time_t shm_ctime; /* last change time */
- unsigned long __unused3;
- __kernel_pid_t shm_cpid; /* pid of creator */
- __kernel_pid_t shm_lpid; /* pid of last operator */
- unsigned long shm_nattch; /* no. of current attaches */
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-struct shminfo64 {
- unsigned long shmmax;
- unsigned long shmmin;
- unsigned long shmmni;
- unsigned long shmseg;
- unsigned long shmall;
- unsigned long __unused1;
- unsigned long __unused2;
- unsigned long __unused3;
- unsigned long __unused4;
-};
-
-#endif /* _H8300_SHMBUF_H */
diff --git a/arch/h8300/include/uapi/asm/sigcontext.h b/arch/h8300/include/uapi/asm/sigcontext.h
deleted file mode 100644
index e4b81505f8f8..000000000000
--- a/arch/h8300/include/uapi/asm/sigcontext.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef _ASM_H8300_SIGCONTEXT_H
-#define _ASM_H8300_SIGCONTEXT_H
-
-struct sigcontext {
- unsigned long sc_mask; /* old sigmask */
- unsigned long sc_usp; /* old user stack pointer */
- unsigned long sc_er0;
- unsigned long sc_er1;
- unsigned long sc_er2;
- unsigned long sc_er3;
- unsigned long sc_er4;
- unsigned long sc_er5;
- unsigned long sc_er6;
- unsigned short sc_ccr;
- unsigned long sc_pc;
-};
-
-#endif
diff --git a/arch/h8300/include/uapi/asm/siginfo.h b/arch/h8300/include/uapi/asm/siginfo.h
deleted file mode 100644
index bc8fbea931a5..000000000000
--- a/arch/h8300/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_SIGINFO_H
-#define _H8300_SIGINFO_H
-
-#include <asm-generic/siginfo.h>
-
-#endif
diff --git a/arch/h8300/include/uapi/asm/signal.h b/arch/h8300/include/uapi/asm/signal.h
deleted file mode 100644
index af3a6c37fee6..000000000000
--- a/arch/h8300/include/uapi/asm/signal.h
+++ /dev/null
@@ -1,115 +0,0 @@
-#ifndef _UAPI_H8300_SIGNAL_H
-#define _UAPI_H8300_SIGNAL_H
-
-#include <linux/types.h>
-
-/* Avoid too many header ordering problems. */
-struct siginfo;
-
-#ifndef __KERNEL__
-/* Here we must cater to libcs that poke about in kernel headers. */
-
-#define NSIG 32
-typedef unsigned long sigset_t;
-
-#endif /* __KERNEL__ */
-
-#define SIGHUP 1
-#define SIGINT 2
-#define SIGQUIT 3
-#define SIGILL 4
-#define SIGTRAP 5
-#define SIGABRT 6
-#define SIGIOT 6
-#define SIGBUS 7
-#define SIGFPE 8
-#define SIGKILL 9
-#define SIGUSR1 10
-#define SIGSEGV 11
-#define SIGUSR2 12
-#define SIGPIPE 13
-#define SIGALRM 14
-#define SIGTERM 15
-#define SIGSTKFLT 16
-#define SIGCHLD 17
-#define SIGCONT 18
-#define SIGSTOP 19
-#define SIGTSTP 20
-#define SIGTTIN 21
-#define SIGTTOU 22
-#define SIGURG 23
-#define SIGXCPU 24
-#define SIGXFSZ 25
-#define SIGVTALRM 26
-#define SIGPROF 27
-#define SIGWINCH 28
-#define SIGIO 29
-#define SIGPOLL SIGIO
-/*
-#define SIGLOST 29
-*/
-#define SIGPWR 30
-#define SIGSYS 31
-#define SIGUNUSED 31
-
-/* These should not be considered constants from userland. */
-#define SIGRTMIN 32
-#define SIGRTMAX _NSIG
-
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001
-#define SA_NOCLDWAIT 0x00000002 /* not supported yet */
-#define SA_SIGINFO 0x00000004
-#define SA_ONSTACK 0x08000000
-#define SA_RESTART 0x10000000
-#define SA_NODEFER 0x40000000
-#define SA_RESETHAND 0x80000000
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
-#define SA_RESTORER 0x04000000
-
-#define MINSIGSTKSZ 2048
-#define SIGSTKSZ 8192
-
-#include <asm-generic/signal-defs.h>
-
-#ifndef __KERNEL__
-/* Here we must cater to libcs that poke about in kernel headers. */
-
-struct sigaction {
- union {
- __sighandler_t _sa_handler;
- void (*_sa_sigaction)(int, struct siginfo *, void *);
- } _u;
- sigset_t sa_mask;
- unsigned long sa_flags;
- void (*sa_restorer)(void);
-};
-
-#define sa_handler _u._sa_handler
-#define sa_sigaction _u._sa_sigaction
-
-#endif /* __KERNEL__ */
-
-typedef struct sigaltstack {
- void *ss_sp;
- int ss_flags;
- size_t ss_size;
-} stack_t;
-
-
-#endif /* _UAPI_H8300_SIGNAL_H */
diff --git a/arch/h8300/include/uapi/asm/socket.h b/arch/h8300/include/uapi/asm/socket.h
deleted file mode 100644
index 9490758c5e2b..000000000000
--- a/arch/h8300/include/uapi/asm/socket.h
+++ /dev/null
@@ -1,79 +0,0 @@
-#ifndef _ASM_SOCKET_H
-#define _ASM_SOCKET_H
-
-#include <asm/sockios.h>
-
-/* For setsockoptions(2) */
-#define SOL_SOCKET 1
-
-#define SO_DEBUG 1
-#define SO_REUSEADDR 2
-#define SO_TYPE 3
-#define SO_ERROR 4
-#define SO_DONTROUTE 5
-#define SO_BROADCAST 6
-#define SO_SNDBUF 7
-#define SO_RCVBUF 8
-#define SO_SNDBUFFORCE 32
-#define SO_RCVBUFFORCE 33
-#define SO_KEEPALIVE 9
-#define SO_OOBINLINE 10
-#define SO_NO_CHECK 11
-#define SO_PRIORITY 12
-#define SO_LINGER 13
-#define SO_BSDCOMPAT 14
-#define SO_REUSEPORT 15
-#define SO_PASSCRED 16
-#define SO_PEERCRED 17
-#define SO_RCVLOWAT 18
-#define SO_SNDLOWAT 19
-#define SO_RCVTIMEO 20
-#define SO_SNDTIMEO 21
-
-/* Security levels - as per NRL IPv6 - don't actually do anything */
-#define SO_SECURITY_AUTHENTICATION 22
-#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
-#define SO_SECURITY_ENCRYPTION_NETWORK 24
-
-#define SO_BINDTODEVICE 25
-
-/* Socket filtering */
-#define SO_ATTACH_FILTER 26
-#define SO_DETACH_FILTER 27
-#define SO_GET_FILTER SO_ATTACH_FILTER
-
-#define SO_PEERNAME 28
-#define SO_TIMESTAMP 29
-#define SCM_TIMESTAMP SO_TIMESTAMP
-
-#define SO_ACCEPTCONN 30
-
-#define SO_PEERSEC 31
-#define SO_PASSSEC 34
-#define SO_TIMESTAMPNS 35
-#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
-
-#define SO_MARK 36
-
-#define SO_TIMESTAMPING 37
-#define SCM_TIMESTAMPING SO_TIMESTAMPING
-
-#define SO_PROTOCOL 38
-#define SO_DOMAIN 39
-
-#define SO_RXQ_OVFL 40
-
-#define SO_WIFI_STATUS 41
-#define SCM_WIFI_STATUS SO_WIFI_STATUS
-#define SO_PEEK_OFF 42
-
-/* Instruct lower device to use last 4-bytes of skb data as FCS */
-#define SO_NOFCS 43
-
-#define SO_LOCK_FILTER 44
-
-#define SO_SELECT_ERR_QUEUE 45
-
-#define SO_BUSY_POLL 46
-
-#endif /* _ASM_SOCKET_H */
diff --git a/arch/h8300/include/uapi/asm/sockios.h b/arch/h8300/include/uapi/asm/sockios.h
deleted file mode 100644
index e9c7ec810c23..000000000000
--- a/arch/h8300/include/uapi/asm/sockios.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ARCH_H8300_SOCKIOS__
-#define __ARCH_H8300_SOCKIOS__
-
-/* Socket-level I/O control calls. */
-#define FIOSETOWN 0x8901
-#define SIOCSPGRP 0x8902
-#define FIOGETOWN 0x8903
-#define SIOCGPGRP 0x8904
-#define SIOCATMARK 0x8905
-#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
-#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
-
-#endif /* __ARCH_H8300_SOCKIOS__ */
diff --git a/arch/h8300/include/uapi/asm/stat.h b/arch/h8300/include/uapi/asm/stat.h
deleted file mode 100644
index 62c3cc24dfe6..000000000000
--- a/arch/h8300/include/uapi/asm/stat.h
+++ /dev/null
@@ -1,78 +0,0 @@
-#ifndef _H8300_STAT_H
-#define _H8300_STAT_H
-
-struct __old_kernel_stat {
- unsigned short st_dev;
- unsigned short st_ino;
- unsigned short st_mode;
- unsigned short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
- unsigned short st_rdev;
- unsigned long st_size;
- unsigned long st_atime;
- unsigned long st_mtime;
- unsigned long st_ctime;
-};
-
-struct stat {
- unsigned short st_dev;
- unsigned short __pad1;
- unsigned long st_ino;
- unsigned short st_mode;
- unsigned short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
- unsigned short st_rdev;
- unsigned short __pad2;
- unsigned long st_size;
- unsigned long st_blksize;
- unsigned long st_blocks;
- unsigned long st_atime;
- unsigned long __unused1;
- unsigned long st_mtime;
- unsigned long __unused2;
- unsigned long st_ctime;
- unsigned long __unused3;
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-/* This matches struct stat64 in glibc2.1, hence the absolutely
- * insane amounts of padding around dev_t's.
- */
-struct stat64 {
- unsigned long long st_dev;
- unsigned char __pad1[2];
-
-#define STAT64_HAS_BROKEN_ST_INO 1
- unsigned long __st_ino;
-
- unsigned int st_mode;
- unsigned int st_nlink;
-
- unsigned long st_uid;
- unsigned long st_gid;
-
- unsigned long long st_rdev;
- unsigned char __pad3[2];
-
- long long st_size;
- unsigned long st_blksize;
-
- unsigned long __pad4; /* future possible st_blocks high bits */
- unsigned long st_blocks; /* Number 512-byte blocks allocated. */
-
- unsigned long st_atime;
- unsigned long st_atime_nsec;
-
- unsigned long st_mtime;
- unsigned long st_mtime_nsec;
-
- unsigned long st_ctime;
- unsigned long st_ctime_nsec;
-
- unsigned long long st_ino;
-};
-
-#endif /* _H8300_STAT_H */
diff --git a/arch/h8300/include/uapi/asm/statfs.h b/arch/h8300/include/uapi/asm/statfs.h
deleted file mode 100644
index b96efa712aac..000000000000
--- a/arch/h8300/include/uapi/asm/statfs.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_STATFS_H
-#define _H8300_STATFS_H
-
-#include <asm-generic/statfs.h>
-
-#endif /* _H8300_STATFS_H */
diff --git a/arch/h8300/include/uapi/asm/swab.h b/arch/h8300/include/uapi/asm/swab.h
deleted file mode 100644
index 39abbf52807d..000000000000
--- a/arch/h8300/include/uapi/asm/swab.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _H8300_SWAB_H
-#define _H8300_SWAB_H
-
-#include <linux/types.h>
-
-#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
-# define __SWAB_64_THRU_32__
-#endif
-
-#endif /* _H8300_SWAB_H */
diff --git a/arch/h8300/include/uapi/asm/termbits.h b/arch/h8300/include/uapi/asm/termbits.h
deleted file mode 100644
index 3287a6244d74..000000000000
--- a/arch/h8300/include/uapi/asm/termbits.h
+++ /dev/null
@@ -1,201 +0,0 @@
-#ifndef __ARCH_H8300_TERMBITS_H__
-#define __ARCH_H8300_TERMBITS_H__
-
-#include <linux/posix_types.h>
-
-typedef unsigned char cc_t;
-typedef unsigned int speed_t;
-typedef unsigned int tcflag_t;
-
-#define NCCS 19
-struct termios {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
-};
-
-struct termios2 {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
- speed_t c_ispeed; /* input speed */
- speed_t c_ospeed; /* output speed */
-};
-
-struct ktermios {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
- speed_t c_ispeed; /* input speed */
- speed_t c_ospeed; /* output speed */
-};
-
-/* c_cc characters */
-#define VINTR 0
-#define VQUIT 1
-#define VERASE 2
-#define VKILL 3
-#define VEOF 4
-#define VTIME 5
-#define VMIN 6
-#define VSWTC 7
-#define VSTART 8
-#define VSTOP 9
-#define VSUSP 10
-#define VEOL 11
-#define VREPRINT 12
-#define VDISCARD 13
-#define VWERASE 14
-#define VLNEXT 15
-#define VEOL2 16
-
-
-/* c_iflag bits */
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK 0000020
-#define ISTRIP 0000040
-#define INLCR 0000100
-#define IGNCR 0000200
-#define ICRNL 0000400
-#define IUCLC 0001000
-#define IXON 0002000
-#define IXANY 0004000
-#define IXOFF 0010000
-#define IMAXBEL 0020000
-#define IUTF8 0040000
-
-/* c_oflag bits */
-#define OPOST 0000001
-#define OLCUC 0000002
-#define ONLCR 0000004
-#define OCRNL 0000010
-#define ONOCR 0000020
-#define ONLRET 0000040
-#define OFILL 0000100
-#define OFDEL 0000200
-#define NLDLY 0000400
-#define NL0 0000000
-#define NL1 0000400
-#define CRDLY 0003000
-#define CR0 0000000
-#define CR1 0001000
-#define CR2 0002000
-#define CR3 0003000
-#define TABDLY 0014000
-#define TAB0 0000000
-#define TAB1 0004000
-#define TAB2 0010000
-#define TAB3 0014000
-#define XTABS 0014000
-#define BSDLY 0020000
-#define BS0 0000000
-#define BS1 0020000
-#define VTDLY 0040000
-#define VT0 0000000
-#define VT1 0040000
-#define FFDLY 0100000
-#define FF0 0000000
-#define FF1 0100000
-
-/* c_cflag bit meaning */
-#define CBAUD 0010017
-#define B0 0000000 /* hang up */
-#define B50 0000001
-#define B75 0000002
-#define B110 0000003
-#define B134 0000004
-#define B150 0000005
-#define B200 0000006
-#define B300 0000007
-#define B600 0000010
-#define B1200 0000011
-#define B1800 0000012
-#define B2400 0000013
-#define B4800 0000014
-#define B9600 0000015
-#define B19200 0000016
-#define B38400 0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CSIZE 0000060
-#define CS5 0000000
-#define CS6 0000020
-#define CS7 0000040
-#define CS8 0000060
-#define CSTOPB 0000100
-#define CREAD 0000200
-#define PARENB 0000400
-#define PARODD 0001000
-#define HUPCL 0002000
-#define CLOCAL 0004000
-#define CBAUDEX 0010000
-#define BOTHER 0010000
-#define B57600 0010001
-#define B115200 0010002
-#define B230400 0010003
-#define B460800 0010004
-#define B500000 0010005
-#define B576000 0010006
-#define B921600 0010007
-#define B1000000 0010010
-#define B1152000 0010011
-#define B1500000 0010012
-#define B2000000 0010013
-#define B2500000 0010014
-#define B3000000 0010015
-#define B3500000 0010016
-#define B4000000 0010017
-#define CIBAUD 002003600000 /* input baud rate */
-#define CMSPAR 010000000000 /* mark or space (stick) parity */
-#define CRTSCTS 020000000000 /* flow control */
-
-#define IBSHIFT 16 /* shift from CBAUD to CIBAUD */
-
-/* c_lflag bits */
-#define ISIG 0000001
-#define ICANON 0000002
-#define XCASE 0000004
-#define ECHO 0000010
-#define ECHOE 0000020
-#define ECHOK 0000040
-#define ECHONL 0000100
-#define NOFLSH 0000200
-#define TOSTOP 0000400
-#define ECHOCTL 0001000
-#define ECHOPRT 0002000
-#define ECHOKE 0004000
-#define FLUSHO 0010000
-#define PENDIN 0040000
-#define IEXTEN 0100000
-#define EXTPROC 0200000
-
-
-/* tcflow() and TCXONC use these */
-#define TCOOFF 0
-#define TCOON 1
-#define TCIOFF 2
-#define TCION 3
-
-/* tcflush() and TCFLSH use these */
-#define TCIFLUSH 0
-#define TCOFLUSH 1
-#define TCIOFLUSH 2
-
-/* tcsetattr uses these */
-#define TCSANOW 0
-#define TCSADRAIN 1
-#define TCSAFLUSH 2
-
-#endif /* __ARCH_H8300_TERMBITS_H__ */
diff --git a/arch/h8300/include/uapi/asm/termios.h b/arch/h8300/include/uapi/asm/termios.h
deleted file mode 100644
index 5a67d7e38843..000000000000
--- a/arch/h8300/include/uapi/asm/termios.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _UAPI_H8300_TERMIOS_H
-#define _UAPI_H8300_TERMIOS_H
-
-#include <asm/termbits.h>
-#include <asm/ioctls.h>
-
-struct winsize {
- unsigned short ws_row;
- unsigned short ws_col;
- unsigned short ws_xpixel;
- unsigned short ws_ypixel;
-};
-
-#define NCC 8
-struct termio {
- unsigned short c_iflag; /* input mode flags */
- unsigned short c_oflag; /* output mode flags */
- unsigned short c_cflag; /* control mode flags */
- unsigned short c_lflag; /* local mode flags */
- unsigned char c_line; /* line discipline */
- unsigned char c_cc[NCC]; /* control characters */
-};
-
-
-/* modem lines */
-#define TIOCM_LE 0x001
-#define TIOCM_DTR 0x002
-#define TIOCM_RTS 0x004
-#define TIOCM_ST 0x008
-#define TIOCM_SR 0x010
-#define TIOCM_CTS 0x020
-#define TIOCM_CAR 0x040
-#define TIOCM_RNG 0x080
-#define TIOCM_DSR 0x100
-#define TIOCM_CD TIOCM_CAR
-#define TIOCM_RI TIOCM_RNG
-#define TIOCM_OUT1 0x2000
-#define TIOCM_OUT2 0x4000
-#define TIOCM_LOOP 0x8000
-
-/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-
-
-#endif /* _UAPI_H8300_TERMIOS_H */
diff --git a/arch/h8300/include/uapi/asm/types.h b/arch/h8300/include/uapi/asm/types.h
deleted file mode 100644
index 9ec9d4c5ac4d..000000000000
--- a/arch/h8300/include/uapi/asm/types.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/int-ll64.h>
diff --git a/arch/h8300/include/uapi/asm/unistd.h b/arch/h8300/include/uapi/asm/unistd.h
deleted file mode 100644
index 8cb5d429f840..000000000000
--- a/arch/h8300/include/uapi/asm/unistd.h
+++ /dev/null
@@ -1,330 +0,0 @@
-#ifndef _UAPI_ASM_H8300_UNISTD_H_
-#define _UAPI_ASM_H8300_UNISTD_H_
-
-/*
- * This file contains the system call numbers.
- */
-
-#define __NR_restart_syscall 0
-#define __NR_exit 1
-#define __NR_fork 2
-#define __NR_read 3
-#define __NR_write 4
-#define __NR_open 5
-#define __NR_close 6
-#define __NR_waitpid 7
-#define __NR_creat 8
-#define __NR_link 9
-#define __NR_unlink 10
-#define __NR_execve 11
-#define __NR_chdir 12
-#define __NR_time 13
-#define __NR_mknod 14
-#define __NR_chmod 15
-#define __NR_lchown 16
-#define __NR_break 17
-#define __NR_oldstat 18
-#define __NR_lseek 19
-#define __NR_getpid 20
-#define __NR_mount 21
-#define __NR_umount 22
-#define __NR_setuid 23
-#define __NR_getuid 24
-#define __NR_stime 25
-#define __NR_ptrace 26
-#define __NR_alarm 27
-#define __NR_oldfstat 28
-#define __NR_pause 29
-#define __NR_utime 30
-#define __NR_stty 31
-#define __NR_gtty 32
-#define __NR_access 33
-#define __NR_nice 34
-#define __NR_ftime 35
-#define __NR_sync 36
-#define __NR_kill 37
-#define __NR_rename 38
-#define __NR_mkdir 39
-#define __NR_rmdir 40
-#define __NR_dup 41
-#define __NR_pipe 42
-#define __NR_times 43
-#define __NR_prof 44
-#define __NR_brk 45
-#define __NR_setgid 46
-#define __NR_getgid 47
-#define __NR_signal 48
-#define __NR_geteuid 49
-#define __NR_getegid 50
-#define __NR_acct 51
-#define __NR_umount2 52
-#define __NR_lock 53
-#define __NR_ioctl 54
-#define __NR_fcntl 55
-#define __NR_mpx 56
-#define __NR_setpgid 57
-#define __NR_ulimit 58
-#define __NR_oldolduname 59
-#define __NR_umask 60
-#define __NR_chroot 61
-#define __NR_ustat 62
-#define __NR_dup2 63
-#define __NR_getppid 64
-#define __NR_getpgrp 65
-#define __NR_setsid 66
-#define __NR_sigaction 67
-#define __NR_sgetmask 68
-#define __NR_ssetmask 69
-#define __NR_setreuid 70
-#define __NR_setregid 71
-#define __NR_sigsuspend 72
-#define __NR_sigpending 73
-#define __NR_sethostname 74
-#define __NR_setrlimit 75
-#define __NR_getrlimit 76
-#define __NR_getrusage 77
-#define __NR_gettimeofday 78
-#define __NR_settimeofday 79
-#define __NR_getgroups 80
-#define __NR_setgroups 81
-#define __NR_select 82
-#define __NR_symlink 83
-#define __NR_oldlstat 84
-#define __NR_readlink 85
-#define __NR_uselib 86
-#define __NR_swapon 87
-#define __NR_reboot 88
-#define __NR_readdir 89
-#define __NR_mmap 90
-#define __NR_munmap 91
-#define __NR_truncate 92
-#define __NR_ftruncate 93
-#define __NR_fchmod 94
-#define __NR_fchown 95
-#define __NR_getpriority 96
-#define __NR_setpriority 97
-#define __NR_profil 98
-#define __NR_statfs 99
-#define __NR_fstatfs 100
-#define __NR_ioperm 101
-#define __NR_socketcall 102
-#define __NR_syslog 103
-#define __NR_setitimer 104
-#define __NR_getitimer 105
-#define __NR_stat 106
-#define __NR_lstat 107
-#define __NR_fstat 108
-#define __NR_olduname 109
-#define __NR_iopl 110
-#define __NR_vhangup 111
-#define __NR_idle 112
-#define __NR_vm86old 113
-#define __NR_wait4 114
-#define __NR_swapoff 115
-#define __NR_sysinfo 116
-#define __NR_ipc 117
-#define __NR_fsync 118
-#define __NR_sigreturn 119
-#define __NR_clone 120
-#define __NR_setdomainname 121
-#define __NR_uname 122
-#define __NR_modify_ldt 123
-#define __NR_adjtimex 124
-#define __NR_mprotect 125
-#define __NR_sigprocmask 126
-#define __NR_create_module 127
-#define __NR_init_module 128
-#define __NR_delete_module 129
-#define __NR_get_kernel_syms 130
-#define __NR_quotactl 131
-#define __NR_getpgid 132
-#define __NR_fchdir 133
-#define __NR_bdflush 134
-#define __NR_sysfs 135
-#define __NR_personality 136
-#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
-#define __NR_setfsuid 138
-#define __NR_setfsgid 139
-#define __NR__llseek 140
-#define __NR_getdents 141
-#define __NR__newselect 142
-#define __NR_flock 143
-#define __NR_msync 144
-#define __NR_readv 145
-#define __NR_writev 146
-#define __NR_getsid 147
-#define __NR_fdatasync 148
-#define __NR__sysctl 149
-#define __NR_mlock 150
-#define __NR_munlock 151
-#define __NR_mlockall 152
-#define __NR_munlockall 153
-#define __NR_sched_setparam 154
-#define __NR_sched_getparam 155
-#define __NR_sched_setscheduler 156
-#define __NR_sched_getscheduler 157
-#define __NR_sched_yield 158
-#define __NR_sched_get_priority_max 159
-#define __NR_sched_get_priority_min 160
-#define __NR_sched_rr_get_interval 161
-#define __NR_nanosleep 162
-#define __NR_mremap 163
-#define __NR_setresuid 164
-#define __NR_getresuid 165
-#define __NR_vm86 166
-#define __NR_query_module 167
-#define __NR_poll 168
-#define __NR_nfsservctl 169
-#define __NR_setresgid 170
-#define __NR_getresgid 171
-#define __NR_prctl 172
-#define __NR_rt_sigreturn 173
-#define __NR_rt_sigaction 174
-#define __NR_rt_sigprocmask 175
-#define __NR_rt_sigpending 176
-#define __NR_rt_sigtimedwait 177
-#define __NR_rt_sigqueueinfo 178
-#define __NR_rt_sigsuspend 179
-#define __NR_pread64 180
-#define __NR_pwrite64 181
-#define __NR_chown 182
-#define __NR_getcwd 183
-#define __NR_capget 184
-#define __NR_capset 185
-#define __NR_sigaltstack 186
-#define __NR_sendfile 187
-#define __NR_getpmsg 188 /* some people actually want streams */
-#define __NR_putpmsg 189 /* some people actually want streams */
-#define __NR_vfork 190
-#define __NR_ugetrlimit 191
-#define __NR_mmap2 192
-#define __NR_truncate64 193
-#define __NR_ftruncate64 194
-#define __NR_stat64 195
-#define __NR_lstat64 196
-#define __NR_fstat64 197
-#define __NR_lchown32 198
-#define __NR_getuid32 199
-#define __NR_getgid32 200
-#define __NR_geteuid32 201
-#define __NR_getegid32 202
-#define __NR_setreuid32 203
-#define __NR_setregid32 204
-#define __NR_getgroups32 205
-#define __NR_setgroups32 206
-#define __NR_fchown32 207
-#define __NR_setresuid32 208
-#define __NR_getresuid32 209
-#define __NR_setresgid32 210
-#define __NR_getresgid32 211
-#define __NR_chown32 212
-#define __NR_setuid32 213
-#define __NR_setgid32 214
-#define __NR_setfsuid32 215
-#define __NR_setfsgid32 216
-#define __NR_pivot_root 217
-#define __NR_mincore 218
-#define __NR_madvise 219
-#define __NR_madvise1 219
-#define __NR_getdents64 220
-#define __NR_fcntl64 221
-/* 223 is unused */
-#define __NR_gettid 224
-#define __NR_readahead 225
-#define __NR_setxattr 226
-#define __NR_lsetxattr 227
-#define __NR_fsetxattr 228
-#define __NR_getxattr 229
-#define __NR_lgetxattr 230
-#define __NR_fgetxattr 231
-#define __NR_listxattr 232
-#define __NR_llistxattr 233
-#define __NR_flistxattr 234
-#define __NR_removexattr 235
-#define __NR_lremovexattr 236
-#define __NR_fremovexattr 237
-#define __NR_tkill 238
-#define __NR_sendfile64 239
-#define __NR_futex 240
-#define __NR_sched_setaffinity 241
-#define __NR_sched_getaffinity 242
-#define __NR_set_thread_area 243
-#define __NR_get_thread_area 244
-#define __NR_io_setup 245
-#define __NR_io_destroy 246
-#define __NR_io_getevents 247
-#define __NR_io_submit 248
-#define __NR_io_cancel 249
-#define __NR_fadvise64 250
-/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */
-#define __NR_exit_group 252
-#define __NR_lookup_dcookie 253
-#define __NR_epoll_create 254
-#define __NR_epoll_ctl 255
-#define __NR_epoll_wait 256
-#define __NR_remap_file_pages 257
-#define __NR_set_tid_address 258
-#define __NR_timer_create 259
-#define __NR_timer_settime (__NR_timer_create+1)
-#define __NR_timer_gettime (__NR_timer_create+2)
-#define __NR_timer_getoverrun (__NR_timer_create+3)
-#define __NR_timer_delete (__NR_timer_create+4)
-#define __NR_clock_settime (__NR_timer_create+5)
-#define __NR_clock_gettime (__NR_timer_create+6)
-#define __NR_clock_getres (__NR_timer_create+7)
-#define __NR_clock_nanosleep (__NR_timer_create+8)
-#define __NR_statfs64 268
-#define __NR_fstatfs64 269
-#define __NR_tgkill 270
-#define __NR_utimes 271
-#define __NR_fadvise64_64 272
-#define __NR_vserver 273
-#define __NR_mbind 274
-#define __NR_get_mempolicy 275
-#define __NR_set_mempolicy 276
-#define __NR_mq_open 277
-#define __NR_mq_unlink (__NR_mq_open+1)
-#define __NR_mq_timedsend (__NR_mq_open+2)
-#define __NR_mq_timedreceive (__NR_mq_open+3)
-#define __NR_mq_notify (__NR_mq_open+4)
-#define __NR_mq_getsetattr (__NR_mq_open+5)
-#define __NR_kexec_load 283
-#define __NR_waitid 284
-/* #define __NR_sys_setaltroot 285 */
-#define __NR_add_key 286
-#define __NR_request_key 287
-#define __NR_keyctl 288
-#define __NR_ioprio_set 289
-#define __NR_ioprio_get 290
-#define __NR_inotify_init 291
-#define __NR_inotify_add_watch 292
-#define __NR_inotify_rm_watch 293
-#define __NR_migrate_pages 294
-#define __NR_openat 295
-#define __NR_mkdirat 296
-#define __NR_mknodat 297
-#define __NR_fchownat 298
-#define __NR_futimesat 299
-#define __NR_fstatat64 300
-#define __NR_unlinkat 301
-#define __NR_renameat 302
-#define __NR_linkat 303
-#define __NR_symlinkat 304
-#define __NR_readlinkat 305
-#define __NR_fchmodat 306
-#define __NR_faccessat 307
-#define __NR_pselect6 308
-#define __NR_ppoll 309
-#define __NR_unshare 310
-#define __NR_set_robust_list 311
-#define __NR_get_robust_list 312
-#define __NR_splice 313
-#define __NR_sync_file_range 314
-#define __NR_tee 315
-#define __NR_vmsplice 316
-#define __NR_move_pages 317
-#define __NR_getcpu 318
-#define __NR_epoll_pwait 319
-#define __NR_setns 320
-
-#endif /* _UAPI_ASM_H8300_UNISTD_H_ */
diff --git a/arch/h8300/kernel/Makefile b/arch/h8300/kernel/Makefile
deleted file mode 100644
index 1cc57f872d34..000000000000
--- a/arch/h8300/kernel/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-extra-y := vmlinux.lds
-
-obj-y := process.o traps.o ptrace.o irq.o \
- sys_h8300.o time.o signal.o \
- setup.o gpio.o syscalls.o \
- entry.o timer/
-
-obj-$(CONFIG_MODULES) += module.o h8300_ksyms.o
diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c
deleted file mode 100644
index fd961e0bd741..000000000000
--- a/arch/h8300/kernel/asm-offsets.c
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * This program is used to generate definitions needed by
- * assembly language modules.
- *
- * We use the technique used in the OSF Mach kernel code:
- * generate asm statements containing #defines,
- * compile this file to assembler, and then extract the
- * #defines from the assembly-language output.
- */
-
-#include <linux/stddef.h>
-#include <linux/sched.h>
-#include <linux/kernel_stat.h>
-#include <linux/ptrace.h>
-#include <linux/hardirq.h>
-#include <linux/kbuild.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/ptrace.h>
-
-int main(void)
-{
- /* offsets into the task struct */
- DEFINE(TASK_STATE, offsetof(struct task_struct, state));
- DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
- DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
- DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
- DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
- DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
- DEFINE(TASK_MM, offsetof(struct task_struct, mm));
- DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
-
- /* offsets into the irq_cpustat_t struct */
- DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
-
- /* offsets into the thread struct */
- DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
- DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
- DEFINE(THREAD_CCR, offsetof(struct thread_struct, ccr));
-
- /* offsets into the pt_regs struct */
- DEFINE(LER0, offsetof(struct pt_regs, er0) - sizeof(long));
- DEFINE(LER1, offsetof(struct pt_regs, er1) - sizeof(long));
- DEFINE(LER2, offsetof(struct pt_regs, er2) - sizeof(long));
- DEFINE(LER3, offsetof(struct pt_regs, er3) - sizeof(long));
- DEFINE(LER4, offsetof(struct pt_regs, er4) - sizeof(long));
- DEFINE(LER5, offsetof(struct pt_regs, er5) - sizeof(long));
- DEFINE(LER6, offsetof(struct pt_regs, er6) - sizeof(long));
- DEFINE(LORIG, offsetof(struct pt_regs, orig_er0) - sizeof(long));
- DEFINE(LCCR, offsetof(struct pt_regs, ccr) - sizeof(long));
- DEFINE(LVEC, offsetof(struct pt_regs, vector) - sizeof(long));
-#if defined(__H8300S__)
- DEFINE(LEXR, offsetof(struct pt_regs, exr) - sizeof(long));
-#endif
- DEFINE(LRET, offsetof(struct pt_regs, pc) - sizeof(long));
-
- DEFINE(PT_PTRACED, PT_PTRACED);
-
- return 0;
-}
diff --git a/arch/h8300/kernel/entry.S b/arch/h8300/kernel/entry.S
deleted file mode 100644
index 94bd30f11df6..000000000000
--- a/arch/h8300/kernel/entry.S
+++ /dev/null
@@ -1,402 +0,0 @@
-/* -*- mode: asm -*-
- *
- * linux/arch/h8300/platform/h8300h/entry.S
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- * David McCullough <davidm@snapgear.com>
- *
- */
-
-/*
- * entry.S
- * include exception/interrupt gateway
- * system call entry
- */
-
-#include <linux/sys.h>
-#include <asm/unistd.h>
-#include <asm/setup.h>
-#include <asm/segment.h>
-#include <asm/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-#include <asm/errno.h>
-
-#if defined(CONFIG_CPU_H8300H)
-#define USERRET 8
-INTERRUPTS = 64
- .h8300h
- .macro SHLL2 reg
- shll.l \reg
- shll.l \reg
- .endm
- .macro SHLR2 reg
- shlr.l \reg
- shlr.l \reg
- .endm
- .macro SAVEREGS
- mov.l er0,@-sp
- mov.l er1,@-sp
- mov.l er2,@-sp
- mov.l er3,@-sp
- .endm
- .macro RESTOREREGS
- mov.l @sp+,er3
- mov.l @sp+,er2
- .endm
- .macro SAVEEXR
- .endm
- .macro RESTOREEXR
- .endm
-#endif
-#if defined(CONFIG_CPU_H8S)
-#define USERRET 10
-#define USEREXR 8
-INTERRUPTS = 128
- .h8300s
- .macro SHLL2 reg
- shll.l #2,\reg
- .endm
- .macro SHLR2 reg
- shlr.l #2,\reg
- .endm
- .macro SAVEREGS
- stm.l er0-er3,@-sp
- .endm
- .macro RESTOREREGS
- ldm.l @sp+,er2-er3
- .endm
- .macro SAVEEXR
- mov.w @(USEREXR:16,er0),r1
- mov.w r1,@(LEXR-LER3:16,sp) /* copy EXR */
- .endm
- .macro RESTOREEXR
- mov.w @(LEXR-LER1:16,sp),r1 /* restore EXR */
- mov.b r1l,r1h
- mov.w r1,@(USEREXR:16,er0)
- .endm
-#endif
-
-
-/* CPU context save/restore macros. */
-
- .macro SAVE_ALL
- mov.l er0,@-sp
- stc ccr,r0l /* check kernel mode */
- btst #4,r0l
- bne 5f
-
- /* user mode */
- mov.l sp,@_sw_usp
- mov.l @sp,er0 /* restore saved er0 */
- orc #0x10,ccr /* switch kernel stack */
- mov.l @_sw_ksp,sp
- sub.l #(LRET-LORIG),sp /* allocate LORIG - LRET */
- SAVEREGS
- mov.l @_sw_usp,er0
- mov.l @(USERRET:16,er0),er1 /* copy the RET addr */
- mov.l er1,@(LRET-LER3:16,sp)
- SAVEEXR
-
- mov.l @(LORIG-LER3:16,sp),er0
- mov.l er0,@(LER0-LER3:16,sp) /* copy ER0 */
- mov.w e1,r1 /* e1 highbyte = ccr */
- and #0xef,r1h /* mask mode? flag */
- bra 6f
-5:
- /* kernel mode */
- mov.l @sp,er0 /* restore saved er0 */
- subs #2,sp /* set dummy ccr */
- SAVEREGS
- mov.w @(LRET-LER3:16,sp),r1 /* copy old ccr */
-6:
- mov.b r1h,r1l
- mov.b #0,r1h
- mov.w r1,@(LCCR-LER3:16,sp) /* set ccr */
- mov.l er6,@-sp /* syscall arg #6 */
- mov.l er5,@-sp /* syscall arg #5 */
- mov.l er4,@-sp /* syscall arg #4 */
- .endm /* r1 = ccr */
-
- .macro RESTORE_ALL
- mov.l @sp+,er4
- mov.l @sp+,er5
- mov.l @sp+,er6
- RESTOREREGS
- mov.w @(LCCR-LER1:16,sp),r0 /* check kernel mode */
- btst #4,r0l
- bne 7f
-
- orc #0x80,ccr
- mov.l @_sw_usp,er0
- mov.l @(LER0-LER1:16,sp),er1 /* restore ER0 */
- mov.l er1,@er0
- RESTOREEXR
- mov.w @(LCCR-LER1:16,sp),r1 /* restore the RET addr */
- mov.b r1l,r1h
- mov.b @(LRET+1-LER1:16,sp),r1l
- mov.w r1,e1
- mov.w @(LRET+2-LER1:16,sp),r1
- mov.l er1,@(USERRET:16,er0)
-
- mov.l @sp+,er1
- add.l #(LRET-LER1),sp /* remove LORIG - LRET */
- mov.l sp,@_sw_ksp
- andc #0xef,ccr /* switch to user mode */
- mov.l er0,sp
- bra 8f
-7:
- mov.l @sp+,er1
- adds #4,sp
- adds #2,sp
-8:
- mov.l @sp+,er0
- adds #4,sp /* remove the sw created LVEC */
- rte
- .endm
-
-.globl _system_call
-.globl _ret_from_exception
-.globl _ret_from_fork
-.globl _ret_from_kernel_thread
-.globl _ret_from_interrupt
-.globl _interrupt_redirect_table
-.globl _sw_ksp,_sw_usp
-.globl _resume
-.globl _interrupt_entry
-.globl _trace_break
-
-#if defined(CONFIG_ROMKERNEL)
- .section .int_redirect,"ax"
-_interrupt_redirect_table:
-#if defined(CONFIG_CPU_H8300H)
- .rept 7
- .long 0
- .endr
-#endif
-#if defined(CONFIG_CPU_H8S)
- .rept 5
- .long 0
- .endr
- jmp @_trace_break
- .long 0
-#endif
-
- jsr @_interrupt_entry /* NMI */
- jmp @_system_call /* TRAPA #0 (System call) */
- .long 0
- .long 0
- jmp @_trace_break /* TRAPA #3 (breakpoint) */
- .rept INTERRUPTS-12
- jsr @_interrupt_entry
- .endr
-#endif
-#if defined(CONFIG_RAMKERNEL)
-.globl _interrupt_redirect_table
- .section .bss
-_interrupt_redirect_table:
- .space 4
-#endif
-
- .section .text
- .align 2
-_interrupt_entry:
- SAVE_ALL
- mov.l sp,er0
- add.l #LVEC,er0
- btst #4,r1l
- bne 1f
- /* user LVEC */
- mov.l @_sw_usp,er0
- adds #4,er0
-1:
- mov.l @er0,er0 /* LVEC address */
-#if defined(CONFIG_ROMKERNEL)
- sub.l #_interrupt_redirect_table,er0
-#endif
-#if defined(CONFIG_RAMKERNEL)
- mov.l @_interrupt_redirect_table,er1
- sub.l er1,er0
-#endif
- SHLR2 er0
- dec.l #1,er0
- mov.l sp,er1
- subs #4,er1 /* adjust ret_pc */
- jsr @_do_IRQ
- jmp @_ret_from_interrupt
-
-_system_call:
- subs #4,sp /* dummy LVEC */
- SAVE_ALL
- andc #0x7f,ccr
- mov.l er0,er4
-
- /* save top of frame */
- mov.l sp,er0
- jsr @_set_esp0
- mov.l sp,er2
- and.w #0xe000,r2
- mov.b @((TI_FLAGS+3-(TIF_SYSCALL_TRACE >> 3)):16,er2),r2l
- btst #(TIF_SYSCALL_TRACE & 7),r2l
- beq 1f
- jsr @_do_syscall_trace
-1:
- cmp.l #NR_syscalls,er4
- bcc badsys
- SHLL2 er4
- mov.l #_sys_call_table,er0
- add.l er4,er0
- mov.l @er0,er4
- beq _ret_from_exception:16
- mov.l @(LER1:16,sp),er0
- mov.l @(LER2:16,sp),er1
- mov.l @(LER3:16,sp),er2
- jsr @er4
- mov.l er0,@(LER0:16,sp) /* save the return value */
- mov.l sp,er2
- and.w #0xe000,r2
- mov.b @((TI_FLAGS+3-(TIF_SYSCALL_TRACE >> 3)):16,er2),r2l
- btst #(TIF_SYSCALL_TRACE & 7),r2l
- beq 2f
- jsr @_do_syscall_trace
-2:
-#if defined(CONFIG_SYSCALL_PRINT)
- jsr @_syscall_print
-#endif
- orc #0x80,ccr
- bra resume_userspace
-
-badsys:
- mov.l #-ENOSYS,er0
- mov.l er0,@(LER0:16,sp)
- bra resume_userspace
-
-#if !defined(CONFIG_PREEMPT)
-#define resume_kernel restore_all
-#endif
-
-_ret_from_exception:
-#if defined(CONFIG_PREEMPT)
- orc #0x80,ccr
-#endif
-_ret_from_interrupt:
- mov.b @(LCCR+1:16,sp),r0l
- btst #4,r0l
- bne resume_kernel:8 /* return from kernel */
-resume_userspace:
- andc #0x7f,ccr
- mov.l sp,er4
- and.w #0xe000,r4 /* er4 <- current thread info */
- mov.l @(TI_FLAGS:16,er4),er1
- and.l #_TIF_WORK_MASK,er1
- beq restore_all:8
-work_pending:
- btst #TIF_NEED_RESCHED,r1l
- bne work_resched:8
- /* work notifysig */
- mov.l sp,er0
- subs #4,er0 /* er0: pt_regs */
- jsr @_do_notify_resume
- bra restore_all:8
-work_resched:
- mov.l sp,er0
- jsr @_set_esp0
- jsr @_schedule
- bra resume_userspace:8
-restore_all:
- RESTORE_ALL /* Does RTE */
-
-#if defined(CONFIG_PREEMPT)
-resume_kernel:
- mov.l @(TI_PRE_COUNT:16,er4),er0
- bne restore_all:8
-need_resched:
- mov.l @(TI_FLAGS:16,er4),er0
- btst #TIF_NEED_RESCHED,r0l
- beq restore_all:8
- mov.b @(LCCR+1:16,sp),r0l /* Interrupt Enabled? */
- bmi restore_all:8
- mov.l #PREEMPT_ACTIVE,er0
- mov.l er0,@(TI_PRE_COUNT:16,er4)
- andc #0x7f,ccr
- mov.l sp,er0
- jsr @_set_esp0
- jsr @_schedule
- orc #0x80,ccr
- bra need_resched:8
-#endif
-
-_ret_from_fork:
- mov.l er2,er0
- jsr @_schedule_tail
- jmp @_ret_from_exception
-
-_ret_from_kernel_thread:
- mov.l er2,er0
- jsr @_schedule_tail
- mov.l @(LER4:16,sp),er0
- mov.l @(LER5:16,sp),er1
- jsr @er1
- jmp @_ret_from_exception
-
-_resume:
- /*
- * Beware - when entering resume, offset of tss is in d1,
- * prev (the current task) is in a0, next (the new task)
- * is in a1 and d2.b is non-zero if the mm structure is
- * shared between the tasks, so don't change these
- * registers until their contents are no longer needed.
- */
-
- /* save sr */
- sub.w r3,r3
- stc ccr,r3l
- mov.w r3,@(THREAD_CCR+2:16,er0)
-
- /* disable interrupts */
- orc #0x80,ccr
- mov.l @_sw_usp,er3
- mov.l er3,@(THREAD_USP:16,er0)
- mov.l sp,@(THREAD_KSP:16,er0)
-
- /* Skip address space switching if they are the same. */
- /* FIXME: what did we hack out of here, this does nothing! */
-
- mov.l @(THREAD_USP:16,er1),er0
- mov.l er0,@_sw_usp
- mov.l @(THREAD_KSP:16,er1),sp
-
- /* restore status register */
- mov.w @(THREAD_CCR+2:16,er1),r3
-
- ldc r3l,ccr
- rts
-
-_trace_break:
- subs #4,sp
- SAVE_ALL
- sub.l er1,er1
- dec.l #1,er1
- mov.l er1,@(LORIG,sp)
- mov.l sp,er0
- jsr @_set_esp0
- mov.l @_sw_usp,er0
- mov.l @er0,er1
- mov.w @(-2:16,er1),r2
- cmp.w #0x5730,r2
- beq 1f
- subs #2,er1
- mov.l er1,@er0
-1:
- and.w #0xff,e1
- mov.l er1,er0
- jsr @_trace_trap
- jmp @_ret_from_exception
-
- .section .bss
-_sw_ksp:
- .space 4
-_sw_usp:
- .space 4
-
- .end
diff --git a/arch/h8300/kernel/gpio.c b/arch/h8300/kernel/gpio.c
deleted file mode 100644
index 084bfd0c107e..000000000000
--- a/arch/h8300/kernel/gpio.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * linux/arch/h8300/kernel/gpio.c
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- */
-
-/*
- * Internal I/O Port Management
- */
-
-#include <linux/stddef.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-
-#define _(addr) (volatile unsigned char *)(addr)
-#if defined(CONFIG_H83007) || defined(CONFIG_H83068)
-#include <asm/regs306x.h>
-static volatile unsigned char *ddrs[] = {
- _(P1DDR),_(P2DDR),_(P3DDR),_(P4DDR),_(P5DDR),_(P6DDR),
- NULL, _(P8DDR),_(P9DDR),_(PADDR),_(PBDDR),
-};
-#define MAX_PORT 11
-#endif
-
- #if defined(CONFIG_H83002) || defined(CONFIG_H8048)
-/* Fix me!! */
-#include <asm/regs306x.h>
-static volatile unsigned char *ddrs[] = {
- _(P1DDR),_(P2DDR),_(P3DDR),_(P4DDR),_(P5DDR),_(P6DDR),
- NULL, _(P8DDR),_(P9DDR),_(PADDR),_(PBDDR),
-};
-#define MAX_PORT 11
-#endif
-
-#if defined(CONFIG_H8S2678)
-#include <asm/regs267x.h>
-static volatile unsigned char *ddrs[] = {
- _(P1DDR),_(P2DDR),_(P3DDR),NULL ,_(P5DDR),_(P6DDR),
- _(P7DDR),_(P8DDR),NULL, _(PADDR),_(PBDDR),_(PCDDR),
- _(PDDDR),_(PEDDR),_(PFDDR),_(PGDDR),_(PHDDR),
- _(PADDR),_(PBDDR),_(PCDDR),_(PDDDR),_(PEDDR),_(PFDDR),
- _(PGDDR),_(PHDDR)
-};
-#define MAX_PORT 17
-#endif
-#undef _
-
-#if !defined(P1DDR)
-#error Unsuppoted CPU Selection
-#endif
-
-static struct {
- unsigned char used;
- unsigned char ddr;
-} gpio_regs[MAX_PORT];
-
-extern char *_platform_gpio_table(int length);
-
-int h8300_reserved_gpio(int port, unsigned int bits)
-{
- unsigned char *used;
-
- if (port < 0 || port >= MAX_PORT)
- return -1;
- used = &(gpio_regs[port].used);
- if ((*used & bits) != 0)
- return 0;
- *used |= bits;
- return 1;
-}
-
-int h8300_free_gpio(int port, unsigned int bits)
-{
- unsigned char *used;
-
- if (port < 0 || port >= MAX_PORT)
- return -1;
- used = &(gpio_regs[port].used);
- if ((*used & bits) != bits)
- return 0;
- *used &= (~bits);
- return 1;
-}
-
-int h8300_set_gpio_dir(int port_bit,int dir)
-{
- int port = (port_bit >> 8) & 0xff;
- int bit = port_bit & 0xff;
-
- if (ddrs[port] == NULL)
- return 0;
- if (gpio_regs[port].used & bit) {
- if (dir)
- gpio_regs[port].ddr |= bit;
- else
- gpio_regs[port].ddr &= ~bit;
- *ddrs[port] = gpio_regs[port].ddr;
- return 1;
- } else
- return 0;
-}
-
-int h8300_get_gpio_dir(int port_bit)
-{
- int port = (port_bit >> 8) & 0xff;
- int bit = port_bit & 0xff;
-
- if (ddrs[port] == NULL)
- return 0;
- if (gpio_regs[port].used & bit) {
- return (gpio_regs[port].ddr & bit) != 0;
- } else
- return -1;
-}
-
-#if defined(CONFIG_PROC_FS)
-static char *port_status(int portno)
-{
- static char result[10];
- static const char io[2]={'I','O'};
- char *rp;
- int c;
- unsigned char used,ddr;
-
- used = gpio_regs[portno].used;
- ddr = gpio_regs[portno].ddr;
- result[8]='\0';
- rp = result + 7;
- for (c = 8; c > 0; c--,rp--,used >>= 1, ddr >>= 1)
- if (used & 0x01)
- *rp = io[ ddr & 0x01];
- else
- *rp = '-';
- return result;
-}
-
-static int gpio_proc_show(struct seq_file *m, void *v)
-{
- static const char port_name[]="123456789ABCDEFGH";
- int c;
-
- for (c = 0; c < MAX_PORT; c++) {
- if (ddrs[c] == NULL)
- continue;
- seq_printf(m, "P%c: %s\n", port_name[c], port_status(c));
- }
- return 0;
-}
-
-static int gpio_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, gpio_proc_show, PDE_DATA(inode));
-}
-
-static const struct file_operations gpio_proc_fops = {
- .open = gpio_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static __init int register_proc(void)
-{
- return proc_create("gpio", S_IRUGO, NULL, &gpio_proc_fops) != NULL;
-}
-
-__initcall(register_proc);
-#endif
-
-void __init h8300_gpio_init(void)
-{
- memcpy(gpio_regs,_platform_gpio_table(sizeof(gpio_regs)),sizeof(gpio_regs));
-}
diff --git a/arch/h8300/kernel/h8300_ksyms.c b/arch/h8300/kernel/h8300_ksyms.c
deleted file mode 100644
index 53d7c0e4bd83..000000000000
--- a/arch/h8300/kernel/h8300_ksyms.c
+++ /dev/null
@@ -1,100 +0,0 @@
-#include <linux/module.h>
-#include <linux/linkage.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/user.h>
-#include <linux/elfcore.h>
-#include <linux/in6.h>
-#include <linux/interrupt.h>
-
-#include <asm/setup.h>
-#include <asm/pgalloc.h>
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/checksum.h>
-#include <asm/current.h>
-#include <asm/gpio.h>
-
-//asmlinkage long long __ashrdi3 (long long, int);
-//asmlinkage long long __lshrdi3 (long long, int);
-extern char h8300_debug_device[];
-
-/* platform dependent support */
-
-EXPORT_SYMBOL(strnlen);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(strstr);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strncmp);
-
-EXPORT_SYMBOL(ip_fast_csum);
-
-EXPORT_SYMBOL(enable_irq);
-EXPORT_SYMBOL(disable_irq);
-
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-
-/* The following are special because they're not called
- explicitly (the C compiler generates them). Fortunately,
- their interface isn't gonna change any time soon now, so
- it's OK to leave it out of version control. */
-//EXPORT_SYMBOL(__ashrdi3);
-//EXPORT_SYMBOL(__lshrdi3);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memcmp);
-EXPORT_SYMBOL(memscan);
-EXPORT_SYMBOL(memmove);
-
-/*
- * libgcc functions - functions that are used internally by the
- * compiler... (prototypes are not correct though, but that
- * doesn't really matter since they're not versioned).
- */
-extern void __gcc_bcmp(void);
-extern void __ashldi3(void);
-extern void __ashrdi3(void);
-extern void __cmpdi2(void);
-extern void __divdi3(void);
-extern void __divsi3(void);
-extern void __lshrdi3(void);
-extern void __moddi3(void);
-extern void __modsi3(void);
-extern void __muldi3(void);
-extern void __mulsi3(void);
-extern void __negdi2(void);
-extern void __ucmpdi2(void);
-extern void __udivdi3(void);
-extern void __udivmoddi4(void);
-extern void __udivsi3(void);
-extern void __umoddi3(void);
-extern void __umodsi3(void);
-
- /* gcc lib functions */
-EXPORT_SYMBOL(__gcc_bcmp);
-EXPORT_SYMBOL(__ashldi3);
-EXPORT_SYMBOL(__ashrdi3);
-EXPORT_SYMBOL(__cmpdi2);
-EXPORT_SYMBOL(__divdi3);
-EXPORT_SYMBOL(__divsi3);
-EXPORT_SYMBOL(__lshrdi3);
-EXPORT_SYMBOL(__moddi3);
-EXPORT_SYMBOL(__modsi3);
-EXPORT_SYMBOL(__muldi3);
-EXPORT_SYMBOL(__mulsi3);
-EXPORT_SYMBOL(__negdi2);
-EXPORT_SYMBOL(__ucmpdi2);
-EXPORT_SYMBOL(__udivdi3);
-EXPORT_SYMBOL(__udivmoddi4);
-EXPORT_SYMBOL(__udivsi3);
-EXPORT_SYMBOL(__umoddi3);
-EXPORT_SYMBOL(__umodsi3);
-
-EXPORT_SYMBOL(h8300_reserved_gpio);
-EXPORT_SYMBOL(h8300_free_gpio);
-EXPORT_SYMBOL(h8300_set_gpio_dir);
diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c
deleted file mode 100644
index 2fa8ac7b79b5..000000000000
--- a/arch/h8300/kernel/irq.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * linux/arch/h8300/kernel/irq.c
- *
- * Copyright 2007 Yoshinori Sato <ysato@users.sourceforge.jp>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/kernel_stat.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <linux/random.h>
-#include <linux/bootmem.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-
-#include <asm/traps.h>
-#include <asm/io.h>
-#include <asm/setup.h>
-#include <asm/errno.h>
-
-/*#define DEBUG*/
-
-extern unsigned long *interrupt_redirect_table;
-extern const int h8300_saved_vectors[];
-extern const h8300_vector h8300_trap_table[];
-int h8300_enable_irq_pin(unsigned int irq);
-void h8300_disable_irq_pin(unsigned int irq);
-
-#define CPU_VECTOR ((unsigned long *)0x000000)
-#define ADDR_MASK (0xffffff)
-
-static inline int is_ext_irq(unsigned int irq)
-{
- return (irq >= EXT_IRQ0 && irq <= (EXT_IRQ0 + EXT_IRQS));
-}
-
-static void h8300_enable_irq(struct irq_data *data)
-{
- if (is_ext_irq(data->irq))
- IER_REGS |= 1 << (data->irq - EXT_IRQ0);
-}
-
-static void h8300_disable_irq(struct irq_data *data)
-{
- if (is_ext_irq(data->irq))
- IER_REGS &= ~(1 << (data->irq - EXT_IRQ0));
-}
-
-static unsigned int h8300_startup_irq(struct irq_data *data)
-{
- if (is_ext_irq(data->irq))
- return h8300_enable_irq_pin(data->irq);
- else
- return 0;
-}
-
-static void h8300_shutdown_irq(struct irq_data *data)
-{
- if (is_ext_irq(data->irq))
- h8300_disable_irq_pin(data->irq);
-}
-
-/*
- * h8300 interrupt controller implementation
- */
-struct irq_chip h8300irq_chip = {
- .name = "H8300-INTC",
- .irq_startup = h8300_startup_irq,
- .irq_shutdown = h8300_shutdown_irq,
- .irq_enable = h8300_enable_irq,
- .irq_disable = h8300_disable_irq,
-};
-
-#if defined(CONFIG_RAMKERNEL)
-static unsigned long __init *get_vector_address(void)
-{
- unsigned long *rom_vector = CPU_VECTOR;
- unsigned long base,tmp;
- int vec_no;
-
- base = rom_vector[EXT_IRQ0] & ADDR_MASK;
-
- /* check romvector format */
- for (vec_no = EXT_IRQ1; vec_no <= EXT_IRQ0+EXT_IRQS; vec_no++) {
- if ((base+(vec_no - EXT_IRQ0)*4) != (rom_vector[vec_no] & ADDR_MASK))
- return NULL;
- }
-
- /* ramvector base address */
- base -= EXT_IRQ0*4;
-
- /* writerble check */
- tmp = ~(*(volatile unsigned long *)base);
- (*(volatile unsigned long *)base) = tmp;
- if ((*(volatile unsigned long *)base) != tmp)
- return NULL;
- return (unsigned long *)base;
-}
-
-static void __init setup_vector(void)
-{
- int i;
- unsigned long *ramvec,*ramvec_p;
- const h8300_vector *trap_entry;
- const int *saved_vector;
-
- ramvec = get_vector_address();
- if (ramvec == NULL)
- panic("interrupt vector serup failed.");
- else
- printk(KERN_INFO "virtual vector at 0x%08lx\n",(unsigned long)ramvec);
-
- /* create redirect table */
- ramvec_p = ramvec;
- trap_entry = h8300_trap_table;
- saved_vector = h8300_saved_vectors;
- for ( i = 0; i < NR_IRQS; i++) {
- if (i == *saved_vector) {
- ramvec_p++;
- saved_vector++;
- } else {
- if ( i < NR_TRAPS ) {
- if (*trap_entry)
- *ramvec_p = VECTOR(*trap_entry);
- ramvec_p++;
- trap_entry++;
- } else
- *ramvec_p++ = REDIRECT(interrupt_entry);
- }
- }
- interrupt_redirect_table = ramvec;
-#ifdef DEBUG
- ramvec_p = ramvec;
- for (i = 0; i < NR_IRQS; i++) {
- if ((i % 8) == 0)
- printk(KERN_DEBUG "\n%p: ",ramvec_p);
- printk(KERN_DEBUG "%p ",*ramvec_p);
- ramvec_p++;
- }
- printk(KERN_DEBUG "\n");
-#endif
-}
-#else
-#define setup_vector() do { } while(0)
-#endif
-
-void __init init_IRQ(void)
-{
- int c;
-
- setup_vector();
-
- for (c = 0; c < NR_IRQS; c++)
- irq_set_chip_and_handler(c, &h8300irq_chip, handle_simple_irq);
-}
-
-asmlinkage void do_IRQ(int irq)
-{
- irq_enter();
- generic_handle_irq(irq);
- irq_exit();
-}
diff --git a/arch/h8300/kernel/module.c b/arch/h8300/kernel/module.c
deleted file mode 100644
index 1d526e05db19..000000000000
--- a/arch/h8300/kernel/module.c
+++ /dev/null
@@ -1,75 +0,0 @@
-#include <linux/moduleloader.h>
-#include <linux/elf.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(fmt...)
-#endif
-
-int apply_relocate_add(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- unsigned int i;
- Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
-
- DEBUGP("Applying relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
- for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
- /* This is where to make the change */
- uint32_t *loc = (uint32_t *)(sechdrs[sechdrs[relsec].sh_info].sh_addr
- + rela[i].r_offset);
- /* This is the symbol it is referring to. Note that all
- undefined symbols have been resolved. */
- Elf32_Sym *sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
- + ELF32_R_SYM(rela[i].r_info);
- uint32_t v = sym->st_value + rela[i].r_addend;
-
- switch (ELF32_R_TYPE(rela[i].r_info)) {
- case R_H8_DIR24R8:
- loc = (uint32_t *)((uint32_t)loc - 1);
- *loc = (*loc & 0xff000000) | ((*loc & 0xffffff) + v);
- break;
- case R_H8_DIR24A8:
- if (ELF32_R_SYM(rela[i].r_info))
- *loc += v;
- break;
- case R_H8_DIR32:
- case R_H8_DIR32A16:
- *loc += v;
- break;
- case R_H8_PCREL16:
- v -= (unsigned long)loc + 2;
- if ((Elf32_Sword)v > 0x7fff ||
- (Elf32_Sword)v < -(Elf32_Sword)0x8000)
- goto overflow;
- else
- *(unsigned short *)loc = v;
- break;
- case R_H8_PCREL8:
- v -= (unsigned long)loc + 1;
- if ((Elf32_Sword)v > 0x7f ||
- (Elf32_Sword)v < -(Elf32_Sword)0x80)
- goto overflow;
- else
- *(unsigned char *)loc = v;
- break;
- default:
- printk(KERN_ERR "module %s: Unknown relocation: %u\n",
- me->name, ELF32_R_TYPE(rela[i].r_info));
- return -ENOEXEC;
- }
- }
- return 0;
- overflow:
- printk(KERN_ERR "module %s: relocation offset overflow: %08x\n",
- me->name, rela[i].r_offset);
- return -ENOEXEC;
-}
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
deleted file mode 100644
index 1a744ab7e7e5..000000000000
--- a/arch/h8300/kernel/process.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * linux/arch/h8300/kernel/process.c
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * Based on:
- *
- * linux/arch/m68knommu/kernel/process.c
- *
- * Copyright (C) 1998 D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>,
- * Kenneth Albanowski <kjahds@kjahds.com>,
- * The Silver Hammer Group, Ltd.
- *
- * linux/arch/m68k/kernel/process.c
- *
- * Copyright (C) 1995 Hamish Macdonald
- *
- * 68060 fixes by Jesper Skov
- */
-
-/*
- * This file handles the architecture-dependent parts of process handling..
- */
-
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/ptrace.h>
-#include <linux/user.h>
-#include <linux/interrupt.h>
-#include <linux/reboot.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/rcupdate.h>
-
-#include <asm/uaccess.h>
-#include <asm/traps.h>
-#include <asm/setup.h>
-#include <asm/pgtable.h>
-
-void (*pm_power_off)(void) = NULL;
-EXPORT_SYMBOL(pm_power_off);
-
-asmlinkage void ret_from_fork(void);
-asmlinkage void ret_from_kernel_thread(void);
-
-/*
- * The idle loop on an H8/300..
- */
-#if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM)
-void arch_cpu_idle(void)
-{
- local_irq_enable();
- /* XXX: race here! What if need_resched() gets set now? */
- __asm__("sleep");
-}
-#endif
-
-void machine_restart(char * __unused)
-{
- local_irq_disable();
- __asm__("jmp @@0");
-}
-
-void machine_halt(void)
-{
- local_irq_disable();
- __asm__("sleep");
- for (;;);
-}
-
-void machine_power_off(void)
-{
- local_irq_disable();
- __asm__("sleep");
- for (;;);
-}
-
-void show_regs(struct pt_regs * regs)
-{
- show_regs_print_info(KERN_DEFAULT);
-
- printk("\nPC: %08lx Status: %02x",
- regs->pc, regs->ccr);
- printk("\nORIG_ER0: %08lx ER0: %08lx ER1: %08lx",
- regs->orig_er0, regs->er0, regs->er1);
- printk("\nER2: %08lx ER3: %08lx ER4: %08lx ER5: %08lx",
- regs->er2, regs->er3, regs->er4, regs->er5);
- printk("\nER6' %08lx ",regs->er6);
- if (user_mode(regs))
- printk("USP: %08lx\n", rdusp());
- else
- printk("\n");
-}
-
-void flush_thread(void)
-{
-}
-
-int copy_thread(unsigned long clone_flags,
- unsigned long usp, unsigned long topstk,
- struct task_struct * p)
-{
- struct pt_regs * childregs;
-
- childregs = (struct pt_regs *) (THREAD_SIZE + task_stack_page(p)) - 1;
-
- if (unlikely(p->flags & PF_KTHREAD)) {
- memset(childregs, 0, sizeof(struct pt_regs));
- childregs->retpc = (unsigned long) ret_from_kernel_thread;
- childregs->er4 = topstk; /* arg */
- childregs->er5 = usp; /* fn */
- p->thread.ksp = (unsigned long)childregs;
- }
- *childregs = *current_pt_regs();
- childregs->retpc = (unsigned long) ret_from_fork;
- childregs->er0 = 0;
- p->thread.usp = usp ?: rdusp();
- p->thread.ksp = (unsigned long)childregs;
-
- return 0;
-}
-
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
- return ((struct pt_regs *)tsk->thread.esp0)->pc;
-}
-
-unsigned long get_wchan(struct task_struct *p)
-{
- unsigned long fp, pc;
- unsigned long stack_page;
- int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
- return 0;
-
- stack_page = (unsigned long)p;
- fp = ((struct pt_regs *)p->thread.ksp)->er6;
- do {
- if (fp < stack_page+sizeof(struct thread_info) ||
- fp >= 8184+stack_page)
- return 0;
- pc = ((unsigned long *)fp)[1];
- if (!in_sched_functions(pc))
- return pc;
- fp = *(unsigned long *) fp;
- } while (count++ < 16);
- return 0;
-}
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c
deleted file mode 100644
index 748cf6585aa4..000000000000
--- a/arch/h8300/kernel/ptrace.c
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * linux/arch/h8300/kernel/ptrace.c
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * Based on:
- * linux/arch/m68k/kernel/ptrace.c
- *
- * Copyright (C) 1994 by Hamish Macdonald
- * Taken from linux/kernel/ptrace.c and modified for M680x0.
- * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file COPYING in the main directory of
- * this archive for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <linux/ptrace.h>
-#include <linux/user.h>
-#include <linux/signal.h>
-
-#include <asm/uaccess.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/signal.h>
-
-/* cpu depend functions */
-extern long h8300_get_reg(struct task_struct *task, int regno);
-extern int h8300_put_reg(struct task_struct *task, int regno, unsigned long data);
-
-
-void user_disable_single_step(struct task_struct *child)
-{
-}
-
-/*
- * does not yet catch signals sent when the child dies.
- * in exit.c or in signal.c.
- */
-
-void ptrace_disable(struct task_struct *child)
-{
- user_disable_single_step(child);
-}
-
-long arch_ptrace(struct task_struct *child, long request,
- unsigned long addr, unsigned long data)
-{
- int ret;
- int regno = addr >> 2;
- unsigned long __user *datap = (unsigned long __user *) data;
-
- switch (request) {
- /* read the word at location addr in the USER area. */
- case PTRACE_PEEKUSR: {
- unsigned long tmp = 0;
-
- if ((addr & 3) || addr >= sizeof(struct user)) {
- ret = -EIO;
- break ;
- }
-
- ret = 0; /* Default return condition */
-
- if (regno < H8300_REGS_NO)
- tmp = h8300_get_reg(child, regno);
- else {
- switch (regno) {
- case 49:
- tmp = child->mm->start_code;
- break ;
- case 50:
- tmp = child->mm->start_data;
- break ;
- case 51:
- tmp = child->mm->end_code;
- break ;
- case 52:
- tmp = child->mm->end_data;
- break ;
- default:
- ret = -EIO;
- }
- }
- if (!ret)
- ret = put_user(tmp, datap);
- break ;
- }
-
- /* when I and D space are separate, this will have to be fixed. */
- case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
- if ((addr & 3) || addr >= sizeof(struct user)) {
- ret = -EIO;
- break ;
- }
-
- if (regno == PT_ORIG_ER0) {
- ret = -EIO;
- break ;
- }
- if (regno < H8300_REGS_NO) {
- ret = h8300_put_reg(child, regno, data);
- break ;
- }
- ret = -EIO;
- break ;
-
- case PTRACE_GETREGS: { /* Get all gp regs from the child. */
- int i;
- unsigned long tmp;
- for (i = 0; i < H8300_REGS_NO; i++) {
- tmp = h8300_get_reg(child, i);
- if (put_user(tmp, datap)) {
- ret = -EFAULT;
- break;
- }
- datap++;
- }
- ret = 0;
- break;
- }
-
- case PTRACE_SETREGS: { /* Set all gp regs in the child. */
- int i;
- unsigned long tmp;
- for (i = 0; i < H8300_REGS_NO; i++) {
- if (get_user(tmp, datap)) {
- ret = -EFAULT;
- break;
- }
- h8300_put_reg(child, i, tmp);
- datap++;
- }
- ret = 0;
- break;
- }
-
- default:
- ret = ptrace_request(child, request, addr, data);
- break;
- }
- return ret;
-}
-
-asmlinkage void do_syscall_trace(void)
-{
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
- return;
- if (!(current->ptrace & PT_PTRACED))
- return;
- ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
- ? 0x80 : 0));
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
- */
- if (current->exit_code) {
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
-}
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
deleted file mode 100644
index d0b1607f2711..000000000000
--- a/arch/h8300/kernel/setup.c
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * linux/arch/h8300/kernel/setup.c
- *
- * Copyleft ()) 2000 James D. Schettine {james@telos-systems.com}
- * Copyright (C) 1999,2000 Greg Ungerer (gerg@snapgear.com)
- * Copyright (C) 1998,1999 D. Jeff Dionne <jeff@lineo.ca>
- * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>
- * Copyright (C) 1995 Hamish Macdonald
- * Copyright (C) 2000 Lineo Inc. (www.lineo.com)
- * Copyright (C) 2001 Lineo, Inc. <www.lineo.com>
- *
- * H8/300 porting Yoshinori Sato <ysato@users.sourceforge.jp>
- */
-
-/*
- * This file handles the architecture-dependent parts of system setup
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/fb.h>
-#include <linux/console.h>
-#include <linux/genhd.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/major.h>
-#include <linux/bootmem.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-
-#include <asm/setup.h>
-#include <asm/irq.h>
-#include <asm/pgtable.h>
-#include <asm/sections.h>
-
-#if defined(__H8300H__)
-#define CPU "H8/300H"
-#include <asm/regs306x.h>
-#endif
-
-#if defined(__H8300S__)
-#define CPU "H8S"
-#include <asm/regs267x.h>
-#endif
-
-#define STUBSIZE 0xc000
-
-unsigned long rom_length;
-unsigned long memory_start;
-unsigned long memory_end;
-
-char __initdata command_line[COMMAND_LINE_SIZE];
-
-extern int _ramstart, _ramend;
-extern char _target_name[];
-extern void h8300_gpio_init(void);
-
-#if (defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)) \
- && defined(CONFIG_GDB_MAGICPRINT)
-/* printk with gdb service */
-static void gdb_console_output(struct console *c, const char *msg, unsigned len)
-{
- for (; len > 0; len--) {
- asm("mov.w %0,r2\n\t"
- "jsr @0xc4"::"r"(*msg++):"er2");
- }
-}
-
-/*
- * Setup initial baud/bits/parity. We do two things here:
- * - construct a cflag setting for the first rs_open()
- * - initialize the serial port
- * Return non-zero if we didn't find a serial port.
- */
-static int __init gdb_console_setup(struct console *co, char *options)
-{
- return 0;
-}
-
-static const struct console gdb_console = {
- .name = "gdb_con",
- .write = gdb_console_output,
- .device = NULL,
- .setup = gdb_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
-#endif
-
-void __init setup_arch(char **cmdline_p)
-{
- int bootmap_size;
-
- memory_start = (unsigned long) &_ramstart;
-
- /* allow for ROMFS on the end of the kernel */
- if (memcmp((void *)memory_start, "-rom1fs-", 8) == 0) {
-#if defined(CONFIG_BLK_DEV_INITRD)
- initrd_start = memory_start;
- initrd_end = memory_start += be32_to_cpu(((unsigned long *) (memory_start))[2]);
-#else
- memory_start += be32_to_cpu(((unsigned long *) memory_start)[2]);
-#endif
- }
- memory_start = PAGE_ALIGN(memory_start);
-#if !defined(CONFIG_BLKDEV_RESERVE)
- memory_end = (unsigned long) &_ramend; /* by now the stack is part of the init task */
-#if defined(CONFIG_GDB_DEBUG)
- memory_end -= STUBSIZE;
-#endif
-#else
- if ((memory_end < CONFIG_BLKDEV_RESERVE_ADDRESS) &&
- (memory_end > CONFIG_BLKDEV_RESERVE_ADDRESS))
- /* overlap userarea */
- memory_end = CONFIG_BLKDEV_RESERVE_ADDRESS;
-#endif
-
- init_mm.start_code = (unsigned long) _stext;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = (unsigned long) 0;
-
-#if (defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)) && defined(CONFIG_GDB_MAGICPRINT)
- register_console((struct console *)&gdb_console);
-#endif
-
- printk(KERN_INFO "\r\n\nuClinux " CPU "\n");
- printk(KERN_INFO "Target Hardware: %s\n",_target_name);
- printk(KERN_INFO "Flat model support (C) 1998,1999 Kenneth Albanowski, D. Jeff Dionne\n");
- printk(KERN_INFO "H8/300 series support by Yoshinori Sato <ysato@users.sourceforge.jp>\n");
-
-#ifdef DEBUG
- printk(KERN_DEBUG "KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p "
- "BSS=0x%p-0x%p\n", _stext, _etext, _sdata, _edata, __bss_start,
- __bss_stop);
- printk(KERN_DEBUG "KERNEL -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx "
- "STACK=0x%06lx-0x%p\n", __bss_stop, memory_start, memory_start,
- memory_end, memory_end, &_ramend);
-#endif
-
-#ifdef CONFIG_DEFAULT_CMDLINE
- /* set from default command line */
- if (*command_line == '\0')
- strcpy(command_line,CONFIG_KERNEL_COMMAND);
-#endif
- /* Keep a copy of command line */
- *cmdline_p = &command_line[0];
- memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
- boot_command_line[COMMAND_LINE_SIZE-1] = 0;
-
-#ifdef DEBUG
- if (strlen(*cmdline_p))
- printk(KERN_DEBUG "Command line: '%s'\n", *cmdline_p);
-#endif
-
- /*
- * give all the memory to the bootmap allocator, tell it to put the
- * boot mem_map at the start of memory
- */
- bootmap_size = init_bootmem_node(
- NODE_DATA(0),
- memory_start >> PAGE_SHIFT, /* map goes here */
- PAGE_OFFSET >> PAGE_SHIFT, /* 0 on coldfire */
- memory_end >> PAGE_SHIFT);
- /*
- * free the usable memory, we have to make sure we do not free
- * the bootmem bitmap so we then reserve it after freeing it :-)
- */
- free_bootmem(memory_start, memory_end - memory_start);
- reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT);
- /*
- * get kmalloc into gear
- */
- paging_init();
- h8300_gpio_init();
-#if defined(CONFIG_H8300_AKI3068NET) && defined(CONFIG_IDE)
- {
-#define AREABIT(addr) (1 << (((addr) >> 21) & 7))
- /* setup BSC */
- volatile unsigned char *abwcr = (volatile unsigned char *)ABWCR;
- volatile unsigned char *cscr = (volatile unsigned char *)CSCR;
- *abwcr &= ~(AREABIT(CONFIG_H8300_IDE_BASE) | AREABIT(CONFIG_H8300_IDE_ALT));
- *cscr |= (AREABIT(CONFIG_H8300_IDE_BASE) | AREABIT(CONFIG_H8300_IDE_ALT)) | 0x0f;
- }
-#endif
-#ifdef DEBUG
- printk(KERN_DEBUG "Done setup_arch\n");
-#endif
-}
-
-/*
- * Get CPU information for use by the procfs.
- */
-
-static int show_cpuinfo(struct seq_file *m, void *v)
-{
- char *cpu;
- int mode;
- u_long clockfreq;
-
- cpu = CPU;
- mode = *(volatile unsigned char *)MDCR & 0x07;
-
- clockfreq = CONFIG_CPU_CLOCK;
-
- seq_printf(m, "CPU:\t\t%s (mode:%d)\n"
- "Clock:\t\t%lu.%1luMHz\n"
- "BogoMips:\t%lu.%02lu\n"
- "Calibration:\t%lu loops\n",
- cpu,mode,
- clockfreq/1000,clockfreq%1000,
- (loops_per_jiffy*HZ)/500000,((loops_per_jiffy*HZ)/5000)%100,
- (loops_per_jiffy*HZ));
-
- return 0;
-}
-
-static void *c_start(struct seq_file *m, loff_t *pos)
-{
- return *pos < NR_CPUS ? ((void *) 0x12345678) : NULL;
-}
-
-static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-{
- ++*pos;
- return c_start(m, pos);
-}
-
-static void c_stop(struct seq_file *m, void *v)
-{
-}
-
-const struct seq_operations cpuinfo_op = {
- .start = c_start,
- .next = c_next,
- .stop = c_stop,
- .show = show_cpuinfo,
-};
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
deleted file mode 100644
index a65ff3b76326..000000000000
--- a/arch/h8300/kernel/signal.c
+++ /dev/null
@@ -1,444 +0,0 @@
-/*
- * linux/arch/h8300/kernel/signal.c
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-/*
- * uClinux H8/300 support by Yoshinori Sato <ysato@users.sourceforge.jp>
- * and David McCullough <davidm@snapgear.com>
- *
- * Based on
- * Linux/m68k by Hamish Macdonald
- */
-
-/*
- * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
- * Atari :-) Current limitation: Only one sigstack can be active at one time.
- * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
- * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
- * signal handlers!
- */
-
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/syscalls.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/ptrace.h>
-#include <linux/unistd.h>
-#include <linux/stddef.h>
-#include <linux/highuid.h>
-#include <linux/personality.h>
-#include <linux/tty.h>
-#include <linux/binfmts.h>
-#include <linux/tracehook.h>
-
-#include <asm/setup.h>
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/traps.h>
-#include <asm/ucontext.h>
-
-/*
- * Do a signal return; undo the signal stack.
- *
- * Keep the return code on the stack quadword aligned!
- * That makes the cache flush below easier.
- */
-
-struct sigframe
-{
- long dummy_er0;
- long dummy_vector;
-#if defined(CONFIG_CPU_H8S)
- short dummy_exr;
-#endif
- long dummy_pc;
- char *pretcode;
- unsigned char retcode[8];
- unsigned long extramask[_NSIG_WORDS-1];
- struct sigcontext sc;
- int sig;
-} __attribute__((aligned(2),packed));
-
-struct rt_sigframe
-{
- long dummy_er0;
- long dummy_vector;
-#if defined(CONFIG_CPU_H8S)
- short dummy_exr;
-#endif
- long dummy_pc;
- char *pretcode;
- struct siginfo *pinfo;
- void *puc;
- unsigned char retcode[8];
- struct siginfo info;
- struct ucontext uc;
- int sig;
-} __attribute__((aligned(2),packed));
-
-static inline int
-restore_sigcontext(struct sigcontext *usc, int *pd0)
-{
- struct pt_regs *regs = current_pt_regs();
- int err = 0;
- unsigned int ccr;
- unsigned int usp;
- unsigned int er0;
-
- /* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
-#define COPY(r) err |= __get_user(regs->r, &usc->sc_##r) /* restore passed registers */
- COPY(er1);
- COPY(er2);
- COPY(er3);
- COPY(er5);
- COPY(pc);
- ccr = regs->ccr & 0x10;
- COPY(ccr);
-#undef COPY
- regs->ccr &= 0xef;
- regs->ccr |= ccr;
- regs->orig_er0 = -1; /* disable syscall checks */
- err |= __get_user(usp, &usc->sc_usp);
- wrusp(usp);
-
- err |= __get_user(er0, &usc->sc_er0);
- *pd0 = er0;
- return err;
-}
-
-asmlinkage int sys_sigreturn(void)
-{
- unsigned long usp = rdusp();
- struct sigframe *frame = (struct sigframe *)(usp - 4);
- sigset_t set;
- int er0;
-
- if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
- goto badframe;
- if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
- (_NSIG_WORDS > 1 &&
- __copy_from_user(&set.sig[1], &frame->extramask,
- sizeof(frame->extramask))))
- goto badframe;
-
- set_current_blocked(&set);
-
- if (restore_sigcontext(&frame->sc, &er0))
- goto badframe;
- return er0;
-
-badframe:
- force_sig(SIGSEGV, current);
- return 0;
-}
-
-asmlinkage int sys_rt_sigreturn(void)
-{
- unsigned long usp = rdusp();
- struct rt_sigframe *frame = (struct rt_sigframe *)(usp - 4);
- sigset_t set;
- int er0;
-
- if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
- goto badframe;
- if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
- goto badframe;
-
- set_current_blocked(&set);
-
- if (restore_sigcontext(&frame->uc.uc_mcontext, &er0))
- goto badframe;
-
- if (restore_altstack(&frame->uc.uc_stack))
- goto badframe;
-
- return er0;
-
-badframe:
- force_sig(SIGSEGV, current);
- return 0;
-}
-
-static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
- unsigned long mask)
-{
- int err = 0;
-
- err |= __put_user(regs->er0, &sc->sc_er0);
- err |= __put_user(regs->er1, &sc->sc_er1);
- err |= __put_user(regs->er2, &sc->sc_er2);
- err |= __put_user(regs->er3, &sc->sc_er3);
- err |= __put_user(regs->er4, &sc->sc_er4);
- err |= __put_user(regs->er5, &sc->sc_er5);
- err |= __put_user(regs->er6, &sc->sc_er6);
- err |= __put_user(rdusp(), &sc->sc_usp);
- err |= __put_user(regs->pc, &sc->sc_pc);
- err |= __put_user(regs->ccr, &sc->sc_ccr);
- err |= __put_user(mask, &sc->sc_mask);
-
- return err;
-}
-
-static inline void *
-get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
-{
- unsigned long usp;
-
- /* Default to using normal stack. */
- usp = rdusp();
-
- /* This is the X/Open sanctioned signal stack switching. */
- if (ka->sa.sa_flags & SA_ONSTACK) {
- if (!sas_ss_flags(usp))
- usp = current->sas_ss_sp + current->sas_ss_size;
- }
- return (void *)((usp - frame_size) & -8UL);
-}
-
-static int setup_frame (int sig, struct k_sigaction *ka,
- sigset_t *set, struct pt_regs *regs)
-{
- struct sigframe *frame;
- int err = 0;
- int usig;
- unsigned char *ret;
-
- frame = get_sigframe(ka, regs, sizeof(*frame));
-
- if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
- goto give_sigsegv;
-
- usig = current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32
- ? current_thread_info()->exec_domain->signal_invmap[sig]
- : sig;
-
- err |= __put_user(usig, &frame->sig);
- if (err)
- goto give_sigsegv;
-
- err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
- if (err)
- goto give_sigsegv;
-
- if (_NSIG_WORDS > 1) {
- err |= copy_to_user(frame->extramask, &set->sig[1],
- sizeof(frame->extramask));
- if (err)
- goto give_sigsegv;
- }
-
- ret = frame->retcode;
- if (ka->sa.sa_flags & SA_RESTORER)
- ret = (unsigned char *)(ka->sa.sa_restorer);
- else {
- /* sub.l er0,er0; mov.b #__NR_sigreturn,r0l; trapa #0 */
- err |= __put_user(0x1a80f800 + (__NR_sigreturn & 0xff),
- (unsigned long *)(frame->retcode + 0));
- err |= __put_user(0x5700, (unsigned short *)(frame->retcode + 4));
- }
-
- /* Set up to return from userspace. */
- err |= __put_user(ret, &frame->pretcode);
-
- if (err)
- goto give_sigsegv;
-
- /* Set up registers for signal handler */
- wrusp ((unsigned long) frame);
- regs->pc = (unsigned long) ka->sa.sa_handler;
- regs->er0 = (current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32
- ? current_thread_info()->exec_domain->signal_invmap[sig]
- : sig);
- regs->er1 = (unsigned long)&(frame->sc);
- regs->er5 = current->mm->start_data; /* GOT base */
-
- return 0;
-
-give_sigsegv:
- force_sigsegv(sig, current);
- return -EFAULT;
-}
-
-static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
- sigset_t *set, struct pt_regs *regs)
-{
- struct rt_sigframe *frame;
- int err = 0;
- int usig;
- unsigned char *ret;
-
- frame = get_sigframe(ka, regs, sizeof(*frame));
-
- if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
- goto give_sigsegv;
-
- usig = current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32
- ? current_thread_info()->exec_domain->signal_invmap[sig]
- : sig;
-
- err |= __put_user(usig, &frame->sig);
- if (err)
- goto give_sigsegv;
-
- err |= __put_user(&frame->info, &frame->pinfo);
- err |= __put_user(&frame->uc, &frame->puc);
- err |= copy_siginfo_to_user(&frame->info, info);
- if (err)
- goto give_sigsegv;
-
- /* Create the ucontext. */
- err |= __put_user(0, &frame->uc.uc_flags);
- err |= __put_user(0, &frame->uc.uc_link);
- err |= __save_altstack(&frame->uc.uc_stack, rdusp());
- err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
- err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
- if (err)
- goto give_sigsegv;
-
- /* Set up to return from userspace. */
- ret = frame->retcode;
- if (ka->sa.sa_flags & SA_RESTORER)
- ret = (unsigned char *)(ka->sa.sa_restorer);
- else {
- /* sub.l er0,er0; mov.b #__NR_sigreturn,r0l; trapa #0 */
- err |= __put_user(0x1a80f800 + (__NR_sigreturn & 0xff),
- (unsigned long *)(frame->retcode + 0));
- err |= __put_user(0x5700, (unsigned short *)(frame->retcode + 4));
- }
- err |= __put_user(ret, &frame->pretcode);
-
- if (err)
- goto give_sigsegv;
-
- /* Set up registers for signal handler */
- wrusp ((unsigned long) frame);
- regs->pc = (unsigned long) ka->sa.sa_handler;
- regs->er0 = (current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32
- ? current_thread_info()->exec_domain->signal_invmap[sig]
- : sig);
- regs->er1 = (unsigned long)&(frame->info);
- regs->er2 = (unsigned long)&frame->uc;
- regs->er5 = current->mm->start_data; /* GOT base */
-
- return 0;
-
-give_sigsegv:
- force_sigsegv(sig, current);
- return -EFAULT;
-}
-
-/*
- * OK, we're invoking a handler
- */
-static void
-handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
- struct pt_regs * regs)
-{
- sigset_t *oldset = sigmask_to_save();
- int ret;
- /* are we from a system call? */
- if (regs->orig_er0 >= 0) {
- switch (regs->er0) {
- case -ERESTART_RESTARTBLOCK:
- case -ERESTARTNOHAND:
- regs->er0 = -EINTR;
- break;
-
- case -ERESTARTSYS:
- if (!(ka->sa.sa_flags & SA_RESTART)) {
- regs->er0 = -EINTR;
- break;
- }
- /* fallthrough */
- case -ERESTARTNOINTR:
- regs->er0 = regs->orig_er0;
- regs->pc -= 2;
- }
- }
-
- /* set up the stack frame */
- if (ka->sa.sa_flags & SA_SIGINFO)
- ret = setup_rt_frame(sig, ka, info, oldset, regs);
- else
- ret = setup_frame(sig, ka, oldset, regs);
-
- if (!ret)
- signal_delivered(sig, info, ka, regs, 0);
-}
-
-/*
- * Note that 'init' is a special process: it doesn't get signals it doesn't
- * want to handle. Thus you cannot kill init even with a SIGKILL even by
- * mistake.
- */
-static void do_signal(struct pt_regs *regs)
-{
- siginfo_t info;
- int signr;
- struct k_sigaction ka;
-
- /*
- * We want the common case to go fast, which
- * is why we may in certain cases get here from
- * kernel mode. Just return without doing anything
- * if so.
- */
- if ((regs->ccr & 0x10))
- return;
-
- current->thread.esp0 = (unsigned long) regs;
-
- signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- if (signr > 0) {
- /* Whee! Actually deliver the signal. */
- handle_signal(signr, &info, &ka, regs);
- return;
- }
- /* Did we come from a system call? */
- if (regs->orig_er0 >= 0) {
- /* Restart the system call - no handlers present */
- if (regs->er0 == -ERESTARTNOHAND ||
- regs->er0 == -ERESTARTSYS ||
- regs->er0 == -ERESTARTNOINTR) {
- regs->er0 = regs->orig_er0;
- regs->pc -= 2;
- }
- if (regs->er0 == -ERESTART_RESTARTBLOCK){
- regs->er0 = __NR_restart_syscall;
- regs->pc -= 2;
- }
- }
-
- /* If there's no signal to deliver, we just restore the saved mask. */
- restore_saved_sigmask();
-}
-
-asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
-{
- if (thread_info_flags & _TIF_SIGPENDING)
- do_signal(regs);
-
- if (thread_info_flags & _TIF_NOTIFY_RESUME) {
- clear_thread_flag(TIF_NOTIFY_RESUME);
- tracehook_notify_resume(regs);
- }
-}
diff --git a/arch/h8300/kernel/sys_h8300.c b/arch/h8300/kernel/sys_h8300.c
deleted file mode 100644
index bf350cb7f597..000000000000
--- a/arch/h8300/kernel/sys_h8300.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * linux/arch/h8300/kernel/sys_h8300.c
- *
- * This file contains various random system calls that
- * have a non-standard calling sequence on the H8/300
- * platform.
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/sem.h>
-#include <linux/msg.h>
-#include <linux/shm.h>
-#include <linux/stat.h>
-#include <linux/syscalls.h>
-#include <linux/mman.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/ipc.h>
-
-#include <asm/setup.h>
-#include <asm/uaccess.h>
-#include <asm/cachectl.h>
-#include <asm/traps.h>
-#include <asm/unistd.h>
-
-/* sys_cacheflush -- no support. */
-asmlinkage int
-sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
-{
- return -EINVAL;
-}
-
-asmlinkage int sys_getpagesize(void)
-{
- return PAGE_SIZE;
-}
-
-#if defined(CONFIG_SYSCALL_PRINT)
-asmlinkage void syscall_print(void *dummy,...)
-{
- struct pt_regs *regs = (struct pt_regs *) ((unsigned char *)&dummy-4);
- printk("call %06lx:%ld 1:%08lx,2:%08lx,3:%08lx,ret:%08lx\n",
- ((regs->pc)&0xffffff)-2,regs->orig_er0,regs->er1,regs->er2,regs->er3,regs->er0);
-}
-#endif
diff --git a/arch/h8300/kernel/syscalls.S b/arch/h8300/kernel/syscalls.S
deleted file mode 100644
index c55e0ed270d5..000000000000
--- a/arch/h8300/kernel/syscalls.S
+++ /dev/null
@@ -1,338 +0,0 @@
-/* Systemcall Entry Table */
-#include <linux/sys.h>
-#include <asm/linkage.h>
-#include <asm/unistd.h>
-
-#define CALL(x) .long _ ## x
-
-.globl _sys_call_table
-
-#if defined(CONFIG_CPU_H8300H)
- .h8300h
-#endif
-#if defined(CONFIG_CPU_H8S)
- .h8300s
-#endif
- .section .text
- .align 2
-_sys_call_table:
- CALL(sys_ni_syscall) /* 0 - old "setup()" system call*/
- CALL(sys_exit)
- CALL(sys_fork)
- CALL(sys_read)
- CALL(sys_write)
- CALL(sys_open) /* 5 */
- CALL(sys_close)
- CALL(sys_waitpid)
- CALL(sys_creat)
- CALL(sys_link)
- CALL(sys_unlink) /* 10 */
- CALL(sys_execve)
- CALL(sys_chdir)
- CALL(sys_time)
- CALL(sys_mknod)
- CALL(sys_chmod) /* 15 */
- CALL(sys_chown16)
- CALL(sys_ni_syscall) /* old break syscall holder */
- CALL(sys_stat)
- CALL(sys_lseek)
- CALL(sys_getpid) /* 20 */
- CALL(sys_mount)
- CALL(sys_oldumount)
- CALL(sys_setuid16)
- CALL(sys_getuid16)
- CALL(sys_stime) /* 25 */
- CALL(sys_ptrace)
- CALL(sys_alarm)
- CALL(sys_fstat)
- CALL(sys_pause)
- CALL(sys_utime) /* 30 */
- CALL(sys_ni_syscall) /* old stty syscall holder */
- CALL(sys_ni_syscall) /* old gtty syscall holder */
- CALL(sys_access)
- CALL(sys_nice)
- CALL(sys_ni_syscall) /* 35 old ftime syscall holder */
- CALL(sys_sync)
- CALL(sys_kill)
- CALL(sys_rename)
- CALL(sys_mkdir)
- CALL(sys_rmdir) /* 40 */
- CALL(sys_dup)
- CALL(sys_pipe)
- CALL(sys_times)
- CALL(sys_ni_syscall) /* old prof syscall holder */
- CALL(sys_brk) /* 45 */
- CALL(sys_setgid16)
- CALL(sys_getgid16)
- CALL(sys_signal)
- CALL(sys_geteuid16)
- CALL(sys_getegid16) /* 50 */
- CALL(sys_acct)
- CALL(sys_umount) /* recycled never used phys() */
- CALL(sys_ni_syscall) /* old lock syscall holder */
- CALL(sys_ioctl)
- CALL(sys_fcntl) /* 55 */
- CALL(sys_ni_syscall) /* old mpx syscall holder */
- CALL(sys_setpgid)
- CALL(sys_ni_syscall) /* old ulimit syscall holder */
- CALL(sys_ni_syscall)
- CALL(sys_umask) /* 60 */
- CALL(sys_chroot)
- CALL(sys_ustat)
- CALL(sys_dup2)
- CALL(sys_getppid)
- CALL(sys_getpgrp) /* 65 */
- CALL(sys_setsid)
- CALL(sys_sigaction)
- CALL(sys_sgetmask)
- CALL(sys_ssetmask)
- CALL(sys_setreuid16) /* 70 */
- CALL(sys_setregid16)
- CALL(sys_sigsuspend)
- CALL(sys_sigpending)
- CALL(sys_sethostname)
- CALL(sys_setrlimit) /* 75 */
- CALL(sys_old_getrlimit)
- CALL(sys_getrusage)
- CALL(sys_gettimeofday)
- CALL(sys_settimeofday)
- CALL(sys_getgroups16) /* 80 */
- CALL(sys_setgroups16)
- CALL(sys_old_select)
- CALL(sys_symlink)
- CALL(sys_lstat)
- CALL(sys_readlink) /* 85 */
- CALL(sys_uselib)
- CALL(sys_swapon)
- CALL(sys_reboot)
- CALL(sys_old_readdir)
- CALL(sys_old_mmap) /* 90 */
- CALL(sys_munmap)
- CALL(sys_truncate)
- CALL(sys_ftruncate)
- CALL(sys_fchmod)
- CALL(sys_fchown16) /* 95 */
- CALL(sys_getpriority)
- CALL(sys_setpriority)
- CALL(sys_ni_syscall) /* old profil syscall holder */
- CALL(sys_statfs)
- CALL(sys_fstatfs) /* 100 */
- CALL(sys_ni_syscall) /* ioperm for i386 */
- CALL(sys_socketcall)
- CALL(sys_syslog)
- CALL(sys_setitimer)
- CALL(sys_getitimer) /* 105 */
- CALL(sys_newstat)
- CALL(sys_newlstat)
- CALL(sys_newfstat)
- CALL(sys_ni_syscall)
- CALL(sys_ni_syscall) /* iopl for i386 */ /* 110 */
- CALL(sys_vhangup)
- CALL(sys_ni_syscall) /* obsolete idle() syscall */
- CALL(sys_ni_syscall) /* vm86old for i386 */
- CALL(sys_wait4)
- CALL(sys_swapoff) /* 115 */
- CALL(sys_sysinfo)
- CALL(sys_ipc)
- CALL(sys_fsync)
- CALL(sys_sigreturn)
- CALL(sys_clone) /* 120 */
- CALL(sys_setdomainname)
- CALL(sys_newuname)
- CALL(sys_cacheflush) /* modify_ldt for i386 */
- CALL(sys_adjtimex)
- CALL(sys_ni_syscall) /* 125 sys_mprotect */
- CALL(sys_sigprocmask)
- CALL(sys_ni_syscall) /* sys_create_module */
- CALL(sys_init_module)
- CALL(sys_delete_module)
- CALL(sys_ni_syscall) /* 130 sys_get_kernel_syms */
- CALL(sys_quotactl)
- CALL(sys_getpgid)
- CALL(sys_fchdir)
- CALL(sys_bdflush)
- CALL(sys_sysfs) /* 135 */
- CALL(sys_personality)
- CALL(sys_ni_syscall) /* for afs_syscall */
- CALL(sys_setfsuid16)
- CALL(sys_setfsgid16)
- CALL(sys_llseek) /* 140 */
- CALL(sys_getdents)
- CALL(sys_select)
- CALL(sys_flock)
- CALL(sys_ni_syscall) /* sys_msync */
- CALL(sys_readv) /* 145 */
- CALL(sys_writev)
- CALL(sys_getsid)
- CALL(sys_fdatasync)
- CALL(sys_sysctl)
- CALL(sys_ni_syscall) /* 150 sys_mlock */
- CALL(sys_ni_syscall) /* sys_munlock */
- CALL(sys_ni_syscall) /* sys_mlockall */
- CALL(sys_ni_syscall) /* sys_munlockall */
- CALL(sys_sched_setparam)
- CALL(sys_sched_getparam) /* 155 */
- CALL(sys_sched_setscheduler)
- CALL(sys_sched_getscheduler)
- CALL(sys_sched_yield)
- CALL(sys_sched_get_priority_max)
- CALL(sys_sched_get_priority_min) /* 160 */
- CALL(sys_sched_rr_get_interval)
- CALL(sys_nanosleep)
- CALL(sys_ni_syscall) /* sys_mremap */
- CALL(sys_setresuid16)
- CALL(sys_getresuid16) /* 165 */
- CALL(sys_ni_syscall) /* for vm86 */
- CALL(sys_ni_syscall) /* sys_query_module */
- CALL(sys_poll)
- CALL(sys_ni_syscall) /* old nfsservctl */
- CALL(sys_setresgid16) /* 170 */
- CALL(sys_getresgid16)
- CALL(sys_prctl)
- CALL(sys_rt_sigreturn)
- CALL(sys_rt_sigaction)
- CALL(sys_rt_sigprocmask) /* 175 */
- CALL(sys_rt_sigpending)
- CALL(sys_rt_sigtimedwait)
- CALL(sys_rt_sigqueueinfo)
- CALL(sys_rt_sigsuspend)
- CALL(sys_pread64) /* 180 */
- CALL(sys_pwrite64)
- CALL(sys_lchown16);
- CALL(sys_getcwd)
- CALL(sys_capget)
- CALL(sys_capset) /* 185 */
- CALL(sys_sigaltstack)
- CALL(sys_sendfile)
- CALL(sys_ni_syscall) /* streams1 */
- CALL(sys_ni_syscall) /* streams2 */
- CALL(sys_vfork) /* 190 */
- CALL(sys_getrlimit)
- CALL(sys_mmap_pgoff)
- CALL(sys_truncate64)
- CALL(sys_ftruncate64)
- CALL(sys_stat64) /* 195 */
- CALL(sys_lstat64)
- CALL(sys_fstat64)
- CALL(sys_chown)
- CALL(sys_getuid)
- CALL(sys_getgid) /* 200 */
- CALL(sys_geteuid)
- CALL(sys_getegid)
- CALL(sys_setreuid)
- CALL(sys_setregid)
- CALL(sys_getgroups) /* 205 */
- CALL(sys_setgroups)
- CALL(sys_fchown)
- CALL(sys_setresuid)
- CALL(sys_getresuid)
- CALL(sys_setresgid) /* 210 */
- CALL(sys_getresgid)
- CALL(sys_lchown)
- CALL(sys_setuid)
- CALL(sys_setgid)
- CALL(sys_setfsuid) /* 215 */
- CALL(sys_setfsgid)
- CALL(sys_pivot_root)
- CALL(sys_ni_syscall)
- CALL(sys_ni_syscall)
- CALL(sys_getdents64) /* 220 */
- CALL(sys_fcntl64)
- CALL(sys_ni_syscall) /* reserved TUX */
- CALL(sys_ni_syscall) /* reserved Security */
- CALL(sys_gettid)
- CALL(sys_readahead) /* 225 */
- CALL(sys_setxattr)
- CALL(sys_lsetxattr)
- CALL(sys_fsetxattr)
- CALL(sys_getxattr)
- CALL(sys_lgetxattr) /* 230 */
- CALL(sys_fgetxattr)
- CALL(sys_listxattr)
- CALL(sys_llistxattr)
- CALL(sys_flistxattr)
- CALL(sys_removexattr) /* 235 */
- CALL(sys_lremovexattr)
- CALL(sys_fremovexattr)
- CALL(sys_tkill)
- CALL(sys_sendfile64)
- CALL(sys_futex) /* 240 */
- CALL(sys_sched_setaffinity)
- CALL(sys_sched_getaffinity)
- CALL(sys_ni_syscall)
- CALL(sys_ni_syscall)
- CALL(sys_io_setup) /* 245 */
- CALL(sys_io_destroy)
- CALL(sys_io_getevents)
- CALL(sys_io_submit)
- CALL(sys_io_cancel)
- CALL(sys_fadvise64) /* 250 */
- CALL(sys_ni_syscall)
- CALL(sys_exit_group)
- CALL(sys_lookup_dcookie)
- CALL(sys_epoll_create)
- CALL(sys_epoll_ctl) /* 255 */
- CALL(sys_epoll_wait)
- CALL(sys_ni_syscall) /* sys_remap_file_pages */
- CALL(sys_set_tid_address)
- CALL(sys_timer_create)
- CALL(sys_timer_settime) /* 260 */
- CALL(sys_timer_gettime)
- CALL(sys_timer_getoverrun)
- CALL(sys_timer_delete)
- CALL(sys_clock_settime)
- CALL(sys_clock_gettime) /* 265 */
- CALL(sys_clock_getres)
- CALL(sys_clock_nanosleep)
- CALL(sys_statfs64)
- CALL(sys_fstatfs64)
- CALL(sys_tgkill) /* 270 */
- CALL(sys_utimes)
- CALL(sys_fadvise64_64)
- CALL(sys_ni_syscall) /* sys_vserver */
- CALL(sys_ni_syscall)
- CALL(sys_get_mempolicy) /* 275 */
- CALL(sys_set_mempolicy)
- CALL(sys_mq_open)
- CALL(sys_mq_unlink)
- CALL(sys_mq_timedsend)
- CALL(sys_mq_timedreceive) /* 280 */
- CALL(sys_mq_notify)
- CALL(sys_mq_getsetattr)
- CALL(sys_waitid)
- CALL(sys_ni_syscall) /* sys_kexec_load */
- CALL(sys_add_key) /* 285 */
- CALL(sys_request_key)
- CALL(sys_keyctl)
- CALL(sys_ioprio_set)
- CALL(sys_ioprio_get) /* 290 */
- CALL(sys_inotify_init)
- CALL(sys_inotify_add_watch)
- CALL(sys_inotify_rm_watch)
- CALL(sys_migrate_pages)
- CALL(sys_openat) /* 295 */
- CALL(sys_mkdirat)
- CALL(sys_mknodat)
- CALL(sys_fchownat)
- CALL(sys_futimesat)
- CALL(sys_fstatat64) /* 300 */
- CALL(sys_unlinkat)
- CALL(sys_renameat)
- CALL(sys_linkat)
- CALL(sys_symlinkat)
- CALL(sys_readlinkat) /* 305 */
- CALL(sys_fchmodat)
- CALL(sys_faccessat)
- CALL(sys_ni_syscall) /* sys_pselect6 */
- CALL(sys_ni_syscall) /* sys_ppoll */
- CALL(sys_unshare) /* 310 */
- CALL(sys_set_robust_list)
- CALL(sys_get_robust_list)
- CALL(sys_splice)
- CALL(sys_sync_file_range)
- CALL(sys_tee) /* 315 */
- CALL(sys_vmsplice)
- CALL(sys_ni_syscall) /* sys_move_pages */
- CALL(sys_getcpu)
- CALL(sys_ni_syscall) /* sys_epoll_pwait */
- CALL(sys_setns) /* 320 */
diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c
deleted file mode 100644
index e0f74191d553..000000000000
--- a/arch/h8300/kernel/time.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * linux/arch/h8300/kernel/time.c
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * Copied/hacked from:
- *
- * linux/arch/m68k/kernel/time.c
- *
- * Copyright (C) 1991, 1992, 1995 Linus Torvalds
- *
- * This file contains the m68k-specific time handling details.
- * Most of the stuff is located in the machine specific files.
- *
- * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
- * "A Kernel Model for Precision Timekeeping" by Dave Mills
- */
-
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/timex.h>
-#include <linux/profile.h>
-
-#include <asm/io.h>
-#include <asm/irq_regs.h>
-#include <asm/timer.h>
-
-#define TICK_SIZE (tick_nsec / 1000)
-
-void h8300_timer_tick(void)
-{
- if (current->pid)
- profile_tick(CPU_PROFILING);
- xtime_update(1);
- update_process_times(user_mode(get_irq_regs()));
-}
-
-void read_persistent_clock(struct timespec *ts)
-{
- unsigned int year, mon, day, hour, min, sec;
-
- /* FIX by dqg : Set to zero for platforms that don't have tod */
- /* without this time is undefined and can overflow time_t, causing */
- /* very strange errors */
- year = 1980;
- mon = day = 1;
- hour = min = sec = 0;
-#ifdef CONFIG_H8300_GETTOD
- h8300_gettod (&year, &mon, &day, &hour, &min, &sec);
-#endif
- if ((year += 1900) < 1970)
- year += 100;
- ts->tv_sec = mktime(year, mon, day, hour, min, sec);
- ts->tv_nsec = 0;
-}
-
-void __init time_init(void)
-{
-
- h8300_timer_setup();
-}
diff --git a/arch/h8300/kernel/timer/Makefile b/arch/h8300/kernel/timer/Makefile
deleted file mode 100644
index bef0510ea6ad..000000000000
--- a/arch/h8300/kernel/timer/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# h8300 internal timer handler
-
-obj-$(CONFIG_H8300_TIMER8) := timer8.o
-obj-$(CONFIG_H8300_TIMER16) := timer16.o
-obj-$(CONFIG_H8300_ITU) := itu.o
-obj-$(CONFIG_H8300_TPU) := tpu.o
diff --git a/arch/h8300/kernel/timer/itu.c b/arch/h8300/kernel/timer/itu.c
deleted file mode 100644
index 0a8b5cd5bf38..000000000000
--- a/arch/h8300/kernel/timer/itu.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * linux/arch/h8300/kernel/timer/itu.c
- *
- * Yoshinori Sato <ysato@users.sourcefoge.jp>
- *
- * ITU Timer Handler
- *
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/timex.h>
-
-#include <asm/segment.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/regs306x.h>
-
-#if CONFIG_H8300_ITU_CH == 0
-#define ITUBASE 0xffff64
-#define ITUIRQ 24
-#elif CONFIG_H8300_ITU_CH == 1
-#define ITUBASE 0xffff6e
-#define ITUIRQ 28
-#elif CONFIG_H8300_ITU_CH == 2
-#define ITUBASE 0xffff78
-#define ITUIRQ 32
-#elif CONFIG_H8300_ITU_CH == 3
-#define ITUBASE 0xffff82
-#define ITUIRQ 36
-#elif CONFIG_H8300_ITU_CH == 4
-#define ITUBASE 0xffff92
-#define ITUIRQ 40
-#else
-#error Unknown timer channel.
-#endif
-
-#define TCR 0
-#define TIOR 1
-#define TIER 2
-#define TSR 3
-#define TCNT 4
-#define GRA 6
-#define GRB 8
-
-static irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
- h8300_timer_tick();
- ctrl_bclr(IMFA, ITUBASE + TSR);
- return IRQ_HANDLED;
-}
-
-static struct irqaction itu_irq = {
- .name = "itu",
- .handler = timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_TIMER,
-};
-
-static const int __initconst divide_rate[] = {1, 2, 4, 8};
-
-void __init h8300_timer_setup(void)
-{
- unsigned int div;
- unsigned int cnt;
-
- calc_param(cnt, div, divide_rate, 0x10000);
-
- setup_irq(ITUIRQ, &itu_irq);
-
- /* initialize timer */
- ctrl_outb(0, TSTR);
- ctrl_outb(CCLR0 | div, ITUBASE + TCR);
- ctrl_outb(0x01, ITUBASE + TIER);
- ctrl_outw(cnt, ITUBASE + GRA);
- ctrl_bset(CONFIG_H8300_ITU_CH, TSTR);
-}
diff --git a/arch/h8300/kernel/timer/timer16.c b/arch/h8300/kernel/timer/timer16.c
deleted file mode 100644
index 462d9f581719..000000000000
--- a/arch/h8300/kernel/timer/timer16.c
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * linux/arch/h8300/kernel/timer/timer16.c
- *
- * Yoshinori Sato <ysato@users.sourcefoge.jp>
- *
- * 16bit Timer Handler
- *
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/timex.h>
-
-#include <asm/segment.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/regs306x.h>
-
-/* 16bit timer */
-#if CONFIG_H8300_TIMER16_CH == 0
-#define _16BASE 0xffff78
-#define _16IRQ 24
-#elif CONFIG_H8300_TIMER16_CH == 1
-#define _16BASE 0xffff80
-#define _16IRQ 28
-#elif CONFIG_H8300_TIMER16_CH == 2
-#define _16BASE 0xffff88
-#define _16IRQ 32
-#else
-#error Unknown timer channel.
-#endif
-
-#define TCR 0
-#define TIOR 1
-#define TCNT 2
-#define GRA 4
-#define GRB 6
-
-#define H8300_TIMER_FREQ CONFIG_CPU_CLOCK*10000 /* Timer input freq. */
-
-static irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
- h8300_timer_tick();
- ctrl_bclr(CONFIG_H8300_TIMER16_CH, TISRA);
- return IRQ_HANDLED;
-}
-
-static struct irqaction timer16_irq = {
- .name = "timer-16",
- .handler = timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_TIMER,
-};
-
-static const int __initconst divide_rate[] = {1, 2, 4, 8};
-
-void __init h8300_timer_setup(void)
-{
- unsigned int div;
- unsigned int cnt;
-
- calc_param(cnt, div, divide_rate, 0x10000);
-
- setup_irq(_16IRQ, &timer16_irq);
-
- /* initialize timer */
- ctrl_outb(0, TSTR);
- ctrl_outb(CCLR0 | div, _16BASE + TCR);
- ctrl_outw(cnt, _16BASE + GRA);
- ctrl_bset(4 + CONFIG_H8300_TIMER16_CH, TISRA);
- ctrl_bset(CONFIG_H8300_TIMER16_CH, TSTR);
-}
diff --git a/arch/h8300/kernel/timer/timer8.c b/arch/h8300/kernel/timer/timer8.c
deleted file mode 100644
index 505f3415b40f..000000000000
--- a/arch/h8300/kernel/timer/timer8.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * linux/arch/h8300/kernel/cpu/timer/timer8.c
- *
- * Yoshinori Sato <ysato@users.sourcefoge.jp>
- *
- * 8bit Timer Handler
- *
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/profile.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/timer.h>
-#if defined(CONFIG_CPU_H8300H)
-#include <asm/regs306x.h>
-#endif
-#if defined(CONFIG_CPU_H8S)
-#include <asm/regs267x.h>
-#endif
-
-/* 8bit timer x2 */
-#define CMFA 6
-
-#if defined(CONFIG_H8300_TIMER8_CH0)
-#define _8BASE _8TCR0
-#ifdef CONFIG_CPU_H8300H
-#define _8IRQ 36
-#endif
-#ifdef CONFIG_CPU_H8S
-#define _8IRQ 72
-#endif
-#elif defined(CONFIG_H8300_TIMER8_CH2)
-#ifdef CONFIG_CPU_H8300H
-#define _8BASE _8TCR2
-#define _8IRQ 40
-#endif
-#endif
-
-#ifndef _8BASE
-#error Unknown timer channel.
-#endif
-
-#define _8TCR 0
-#define _8TCSR 2
-#define TCORA 4
-#define TCORB 6
-#define _8TCNT 8
-
-#define CMIEA 0x40
-#define CCLR_CMA 0x08
-#define CKS2 0x04
-
-/*
- * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "xtime_update()" routine every clocktick
- */
-
-static irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
- h8300_timer_tick();
- ctrl_bclr(CMFA, _8BASE + _8TCSR);
- return IRQ_HANDLED;
-}
-
-static struct irqaction timer8_irq = {
- .name = "timer-8",
- .handler = timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_TIMER,
-};
-
-static const int __initconst divide_rate[] = {8, 64, 8192};
-
-void __init h8300_timer_setup(void)
-{
- unsigned int div;
- unsigned int cnt;
-
- calc_param(cnt, div, divide_rate, 0x10000);
- div++;
-
- setup_irq(_8IRQ, &timer8_irq);
-
-#if defined(CONFIG_CPU_H8S)
- /* Timer module enable */
- ctrl_bclr(0, MSTPCRL)
-#endif
-
- /* initialize timer */
- ctrl_outw(cnt, _8BASE + TCORA);
- ctrl_outw(0x0000, _8BASE + _8TCSR);
- ctrl_outw((CMIEA|CCLR_CMA|CKS2) << 8 | div,
- _8BASE + _8TCR);
-}
diff --git a/arch/h8300/kernel/timer/tpu.c b/arch/h8300/kernel/timer/tpu.c
deleted file mode 100644
index 0350f6204ecf..000000000000
--- a/arch/h8300/kernel/timer/tpu.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * linux/arch/h8300/kernel/timer/tpu.c
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * TPU Timer Handler
- *
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/timex.h>
-
-#include <asm/segment.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/regs267x.h>
-
-/* TPU */
-#if CONFIG_H8300_TPU_CH == 0
-#define TPUBASE 0xffffd0
-#define TPUIRQ 40
-#elif CONFIG_H8300_TPU_CH == 1
-#define TPUBASE 0xffffe0
-#define TPUIRQ 48
-#elif CONFIG_H8300_TPU_CH == 2
-#define TPUBASE 0xfffff0
-#define TPUIRQ 52
-#elif CONFIG_H8300_TPU_CH == 3
-#define TPUBASE 0xfffe80
-#define TPUIRQ 56
-#elif CONFIG_H8300_TPU_CH == 4
-#define TPUBASE 0xfffe90
-#define TPUIRQ 64
-#else
-#error Unknown timer channel.
-#endif
-
-#define _TCR 0
-#define _TMDR 1
-#define _TIOR 2
-#define _TIER 4
-#define _TSR 5
-#define _TCNT 6
-#define _GRA 8
-#define _GRB 10
-
-#define CCLR0 0x20
-
-static irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
- h8300_timer_tick();
- ctrl_bclr(0, TPUBASE + _TSR);
- return IRQ_HANDLED;
-}
-
-static struct irqaction tpu_irq = {
- .name = "tpu",
- .handler = timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_TIMER,
-};
-
-static const int __initconst divide_rate[] = {
-#if CONFIG_H8300_TPU_CH == 0
- 1,4,16,64,0,0,0,0,
-#elif (CONFIG_H8300_TPU_CH == 1) || (CONFIG_H8300_TPU_CH == 5)
- 1,4,16,64,0,0,256,0,
-#elif (CONFIG_H8300_TPU_CH == 2) || (CONFIG_H8300_TPU_CH == 4)
- 1,4,16,64,0,0,0,1024,
-#elif CONFIG_H8300_TPU_CH == 3
- 1,4,16,64,0,1024,256,4096,
-#endif
-};
-
-void __init h8300_timer_setup(void)
-{
- unsigned int cnt;
- unsigned int div;
-
- calc_param(cnt, div, divide_rate, 0x10000);
-
- setup_irq(TPUIRQ, &tpu_irq);
-
- /* TPU module enabled */
- ctrl_bclr(3, MSTPCRH);
-
- ctrl_outb(0, TSTR);
- ctrl_outb(CCLR0 | div, TPUBASE + _TCR);
- ctrl_outb(0, TPUBASE + _TMDR);
- ctrl_outw(0, TPUBASE + _TIOR);
- ctrl_outb(0x01, TPUBASE + _TIER);
- ctrl_outw(cnt, TPUBASE + _GRA);
- ctrl_bset(CONFIG_H8300_TPU_CH, TSTR);
-}
diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c
deleted file mode 100644
index cfe494dbe3da..000000000000
--- a/arch/h8300/kernel/traps.c
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * linux/arch/h8300/boot/traps.c -- general exception handling code
- * H8/300 support Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * Cloned from Linux/m68k.
- *
- * No original Copyright holder listed,
- * Probable original (C) Roman Zippel (assigned DJD, 1999)
- *
- * Copyright 1999-2000 D. Jeff Dionne, <jeff@rt-control.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/bug.h>
-
-#include <asm/irq.h>
-#include <asm/traps.h>
-#include <asm/page.h>
-
-static DEFINE_SPINLOCK(die_lock);
-
-/*
- * this must be called very early as the kernel might
- * use some instruction that are emulated on the 060
- */
-
-void __init base_trap_init(void)
-{
-}
-
-void __init trap_init (void)
-{
-}
-
-asmlinkage void set_esp0 (unsigned long ssp)
-{
- current->thread.esp0 = ssp;
-}
-
-/*
- * Generic dumping code. Used for panic and debug.
- */
-
-static void dump(struct pt_regs *fp)
-{
- unsigned long *sp;
- unsigned char *tp;
- int i;
-
- printk("\nCURRENT PROCESS:\n\n");
- printk("COMM=%s PID=%d\n", current->comm, current->pid);
- if (current->mm) {
- printk("TEXT=%08x-%08x DATA=%08x-%08x BSS=%08x-%08x\n",
- (int) current->mm->start_code,
- (int) current->mm->end_code,
- (int) current->mm->start_data,
- (int) current->mm->end_data,
- (int) current->mm->end_data,
- (int) current->mm->brk);
- printk("USER-STACK=%08x KERNEL-STACK=%08lx\n\n",
- (int) current->mm->start_stack,
- (int) PAGE_SIZE+(unsigned long)current);
- }
-
- show_regs(fp);
- printk("\nCODE:");
- tp = ((unsigned char *) fp->pc) - 0x20;
- for (sp = (unsigned long *) tp, i = 0; (i < 0x40); i += 4) {
- if ((i % 0x10) == 0)
- printk("\n%08x: ", (int) (tp + i));
- printk("%08x ", (int) *sp++);
- }
- printk("\n");
-
- printk("\nKERNEL STACK:");
- tp = ((unsigned char *) fp) - 0x40;
- for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) {
- if ((i % 0x10) == 0)
- printk("\n%08x: ", (int) (tp + i));
- printk("%08x ", (int) *sp++);
- }
- printk("\n");
- if (STACK_MAGIC != *(unsigned long *)((unsigned long)current+PAGE_SIZE))
- printk("(Possibly corrupted stack page??)\n");
-
- printk("\n\n");
-}
-
-void die(const char *str, struct pt_regs *fp, unsigned long err)
-{
- static int diecount;
-
- oops_enter();
-
- console_verbose();
- spin_lock_irq(&die_lock);
- report_bug(fp->pc, fp);
- printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++diecount);
- dump(fp);
-
- spin_unlock_irq(&die_lock);
- do_exit(SIGSEGV);
-}
-
-extern char _start, _etext;
-#define check_kernel_text(addr) \
- ((addr >= (unsigned long)(&_start)) && \
- (addr < (unsigned long)(&_etext)))
-
-static int kstack_depth_to_print = 24;
-
-void show_stack(struct task_struct *task, unsigned long *esp)
-{
- unsigned long *stack, addr;
- int i;
-
- if (esp == NULL)
- esp = (unsigned long *) &esp;
-
- stack = esp;
-
- printk("Stack from %08lx:", (unsigned long)stack);
- for (i = 0; i < kstack_depth_to_print; i++) {
- if (((unsigned long)stack & (THREAD_SIZE - 1)) == 0)
- break;
- if (i % 8 == 0)
- printk("\n ");
- printk(" %08lx", *stack++);
- }
-
- printk("\nCall Trace:");
- i = 0;
- stack = esp;
- while (((unsigned long)stack & (THREAD_SIZE - 1)) != 0) {
- addr = *stack++;
- /*
- * If the address is either in the text segment of the
- * kernel, or in the region which contains vmalloc'ed
- * memory, it *may* be the address of a calling
- * routine; if so, print it so that someone tracing
- * down the cause of the crash will be able to figure
- * out the call path that was taken.
- */
- if (check_kernel_text(addr)) {
- if (i % 4 == 0)
- printk("\n ");
- printk(" [<%08lx>]", addr);
- i++;
- }
- }
- printk("\n");
-}
-
-void show_trace_task(struct task_struct *tsk)
-{
- show_stack(tsk,(unsigned long *)tsk->thread.esp0);
-}
diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S
deleted file mode 100644
index 3253fed42ac1..000000000000
--- a/arch/h8300/kernel/vmlinux.lds.S
+++ /dev/null
@@ -1,157 +0,0 @@
-#include <asm-generic/vmlinux.lds.h>
-#include <asm/page.h>
-
-/* target memory map */
-#ifdef CONFIG_H8300H_GENERIC
-#define ROMTOP 0x000000
-#define ROMSIZE 0x400000
-#define RAMTOP 0x400000
-#define RAMSIZE 0x400000
-#endif
-
-#ifdef CONFIG_H8300H_AKI3068NET
-#define ROMTOP 0x000000
-#define ROMSIZE 0x080000
-#define RAMTOP 0x400000
-#define RAMSIZE 0x200000
-#endif
-
-#ifdef CONFIG_H8300H_H8MAX
-#define ROMTOP 0x000000
-#define ROMSIZE 0x080000
-#define RAMTOP 0x400000
-#define RAMSIZE 0x200000
-#endif
-
-#ifdef CONFIG_H8300H_SIM
-#define ROMTOP 0x000000
-#define ROMSIZE 0x400000
-#define RAMTOP 0x400000
-#define RAMSIZE 0x400000
-#endif
-
-#ifdef CONFIG_H8S_SIM
-#define ROMTOP 0x000000
-#define ROMSIZE 0x400000
-#define RAMTOP 0x400000
-#define RAMSIZE 0x800000
-#endif
-
-#ifdef CONFIG_H8S_EDOSK2674
-#define ROMTOP 0x000000
-#define ROMSIZE 0x400000
-#define RAMTOP 0x400000
-#define RAMSIZE 0x800000
-#endif
-
-#if defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)
-INPUT(romfs.o)
-#endif
-
-_jiffies = _jiffies_64 + 4;
-
-ENTRY(__start)
-
-SECTIONS
-{
-#if defined(CONFIG_ROMKERNEL)
- . = ROMTOP;
- .vectors :
- {
- __vector = . ;
- *(.vectors*)
- }
-#else
- . = RAMTOP;
- .bootvec :
- {
- *(.bootvec)
- }
-#endif
- .text :
- {
- _text = .;
-#if defined(CONFIG_ROMKERNEL)
- *(.int_redirect)
-#endif
- __stext = . ;
- TEXT_TEXT
- SCHED_TEXT
- LOCK_TEXT
- __etext = . ;
- }
- EXCEPTION_TABLE(16)
-
- RODATA
-#if defined(CONFIG_ROMKERNEL)
- SECURITY_INIT
-#endif
- ROEND = .;
-#if defined(CONFIG_ROMKERNEL)
- . = RAMTOP;
- .data : AT(ROEND)
-#else
- .data :
-#endif
- {
- __sdata = . ;
- ___data_start = . ;
-
- INIT_TASK_DATA(0x2000)
- . = ALIGN(0x4) ;
- DATA_DATA
- . = ALIGN(0x4) ;
- *(.data.*)
-
- . = ALIGN(0x4) ;
- ___init_begin = .;
- __sinittext = .;
- INIT_TEXT
- __einittext = .;
- INIT_DATA
- . = ALIGN(0x4) ;
- INIT_SETUP(0x4)
- ___setup_start = .;
- *(.init.setup)
- . = ALIGN(0x4) ;
- ___setup_end = .;
- INIT_CALLS
- CON_INITCALL
- EXIT_TEXT
- EXIT_DATA
- INIT_RAM_FS
- . = ALIGN(0x4) ;
- ___init_end = .;
- __edata = . ;
- }
-#if defined(CONFIG_RAMKERNEL)
- SECURITY_INIT
-#endif
- __begin_data = LOADADDR(.data);
- .bss :
- {
- . = ALIGN(0x4) ;
- __sbss = . ;
- ___bss_start = . ;
- *(.bss*)
- . = ALIGN(0x4) ;
- *(COMMON)
- . = ALIGN(0x4) ;
- ___bss_stop = . ;
- __ebss = . ;
- __end = . ;
- __ramstart = .;
- }
- .romfs :
- {
- *(.romfs*)
- }
- . = RAMTOP+RAMSIZE;
- .dummy :
- {
- COMMAND_START = . - 0x200 ;
- __ramend = . ;
- }
-
- DISCARDS
-}
diff --git a/arch/h8300/lib/Makefile b/arch/h8300/lib/Makefile
deleted file mode 100644
index 1577f5075b10..000000000000
--- a/arch/h8300/lib/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for H8/300-specific library files..
-#
-
-lib-y = ashrdi3.o checksum.o memcpy.o memset.o abs.o romfs.o
diff --git a/arch/h8300/lib/abs.S b/arch/h8300/lib/abs.S
deleted file mode 100644
index ddd1fb3d01ad..000000000000
--- a/arch/h8300/lib/abs.S
+++ /dev/null
@@ -1,21 +0,0 @@
-;;; abs.S
-
-#include <asm/linkage.h>
-
-#if defined(__H8300H__)
- .h8300h
-#endif
-#if defined(__H8300S__)
- .h8300s
-#endif
- .text
-.global _abs
-
-;;; int abs(int n)
-_abs:
- mov.l er0,er0
- bpl 1f
- neg.l er0
-1:
- rts
-
diff --git a/arch/h8300/lib/ashrdi3.c b/arch/h8300/lib/ashrdi3.c
deleted file mode 100644
index 78efb65e315a..000000000000
--- a/arch/h8300/lib/ashrdi3.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/* ashrdi3.c extracted from gcc-2.7.2/libgcc2.c which is: */
-/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
-
-This file is part of GNU CC.
-
-GNU CC is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2, or (at your option)
-any later version.
-
-GNU CC is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
-
-#define BITS_PER_UNIT 8
-
-typedef int SItype __attribute__ ((mode (SI)));
-typedef unsigned int USItype __attribute__ ((mode (SI)));
-typedef int DItype __attribute__ ((mode (DI)));
-typedef int word_type __attribute__ ((mode (__word__)));
-
-struct DIstruct {SItype high, low;};
-
-typedef union
-{
- struct DIstruct s;
- DItype ll;
-} DIunion;
-
-DItype
-__ashrdi3 (DItype u, word_type b)
-{
- DIunion w;
- word_type bm;
- DIunion uu;
-
- if (b == 0)
- return u;
-
- uu.ll = u;
-
- bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
- if (bm <= 0)
- {
- /* w.s.high = 1..1 or 0..0 */
- w.s.high = uu.s.high >> (sizeof (SItype) * BITS_PER_UNIT - 1);
- w.s.low = uu.s.high >> -bm;
- }
- else
- {
- USItype carries = (USItype)uu.s.high << bm;
- w.s.high = uu.s.high >> b;
- w.s.low = ((USItype)uu.s.low >> b) | carries;
- }
-
- return w.ll;
-}
diff --git a/arch/h8300/lib/checksum.c b/arch/h8300/lib/checksum.c
deleted file mode 100644
index bdc5b032acd6..000000000000
--- a/arch/h8300/lib/checksum.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * INET An implementation of the TCP/IP protocol suite for the LINUX
- * operating system. INET is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * IP/TCP/UDP checksumming routines
- *
- * Authors: Jorge Cwik, <jorge@laser.satlink.net>
- * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
- * Tom May, <ftom@netcom.com>
- * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
- * Lots of code moved from tcp.c and ip.c; see those files
- * for more names.
- *
- * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
- * Fixed some nasty bugs, causing some horrible crashes.
- * A: At some points, the sum (%0) was used as
- * length-counter instead of the length counter
- * (%1). Thanks to Roman Hodek for pointing this out.
- * B: GCC seems to mess up if one uses too many
- * data-registers to hold input values and one tries to
- * specify d0 and d1 as scratch registers. Letting gcc choose these
- * registers itself solves the problem.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access kills, so most
- of the assembly has to go. */
-
-#include <net/checksum.h>
-#include <linux/module.h>
-
-static inline unsigned short from32to16(unsigned long x)
-{
- /* add up 16-bit and 16-bit for 16+c bit */
- x = (x & 0xffff) + (x >> 16);
- /* add up carry.. */
- x = (x & 0xffff) + (x >> 16);
- return x;
-}
-
-static unsigned long do_csum(const unsigned char * buff, int len)
-{
- int odd, count;
- unsigned long result = 0;
-
- if (len <= 0)
- goto out;
- odd = 1 & (unsigned long) buff;
- if (odd) {
- result = *buff;
- len--;
- buff++;
- }
- count = len >> 1; /* nr of 16-bit words.. */
- if (count) {
- if (2 & (unsigned long) buff) {
- result += *(unsigned short *) buff;
- count--;
- len -= 2;
- buff += 2;
- }
- count >>= 1; /* nr of 32-bit words.. */
- if (count) {
- unsigned long carry = 0;
- do {
- unsigned long w = *(unsigned long *) buff;
- count--;
- buff += 4;
- result += carry;
- result += w;
- carry = (w > result);
- } while (count);
- result += carry;
- result = (result & 0xffff) + (result >> 16);
- }
- if (len & 2) {
- result += *(unsigned short *) buff;
- buff += 2;
- }
- }
- if (len & 1)
- result += (*buff << 8);
- result = from32to16(result);
- if (odd)
- result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
-out:
- return result;
-}
-
-/*
- * This is a version of ip_compute_csum() optimized for IP headers,
- * which always checksum on 4 octet boundaries.
- */
-__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
-{
- return (__force __sum16)~do_csum(iph,ihl*4);
-}
-
-/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-/*
- * Egads... That thing apparently assumes that *all* checksums it ever sees will
- * be folded. Very likely a bug.
- */
-__wsum csum_partial(const void *buff, int len, __wsum sum)
-{
- unsigned int result = do_csum(buff, len);
-
- /* add in old sum, and carry.. */
- result += (__force u32)sum;
- /* 16+c bits -> 16 bits */
- result = (result & 0xffff) + (result >> 16);
- return (__force __wsum)result;
-}
-
-EXPORT_SYMBOL(csum_partial);
-
-/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-__sum16 ip_compute_csum(const void *buff, int len)
-{
- return (__force __sum16)~do_csum(buff,len);
-}
-
-/*
- * copy from fs while checksumming, otherwise like csum_partial
- */
-
-__wsum
-csum_partial_copy_from_user(const void __user *src, void *dst, int len,
- __wsum sum, int *csum_err)
-{
- if (csum_err) *csum_err = 0;
- memcpy(dst, (__force const void *)src, len);
- return csum_partial(dst, len, sum);
-}
-
-/*
- * copy from ds while checksumming, otherwise like csum_partial
- */
-
-__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
-{
- memcpy(dst, src, len);
- return csum_partial(dst, len, sum);
-}
diff --git a/arch/h8300/lib/memcpy.S b/arch/h8300/lib/memcpy.S
deleted file mode 100644
index cad325e2c0e8..000000000000
--- a/arch/h8300/lib/memcpy.S
+++ /dev/null
@@ -1,84 +0,0 @@
-;;; memcpy.S
-
-#include <asm/linkage.h>
-
-#if defined(__H8300H__)
- .h8300h
-#endif
-#if defined(__H8300S__)
- .h8300s
-#endif
-
- .text
-.global _memcpy
-
-;;; void *memcpy(void *to, void *from, size_t n)
-_memcpy:
- mov.l er2,er2
- bne 1f
- rts
-1:
- ;; address check
- bld #0,r0l
- bxor #0,r1l
- bcs 4f
- mov.l er4,@-sp
- mov.l er0,@-sp
- btst #0,r0l
- beq 1f
- ;; (aligned even) odd address
- mov.b @er1,r3l
- mov.b r3l,@er0
- adds #1,er1
- adds #1,er0
- dec.l #1,er2
- beq 3f
-1:
- ;; n < sizeof(unsigned long) check
- sub.l er4,er4
- adds #4,er4 ; loop count check value
- cmp.l er4,er2
- blo 2f
- ;; unsigned long copy
-1:
- mov.l @er1,er3
- mov.l er3,@er0
- adds #4,er0
- adds #4,er1
- subs #4,er2
- cmp.l er4,er2
- bcc 1b
- ;; rest
-2:
- mov.l er2,er2
- beq 3f
-1:
- mov.b @er1,r3l
- mov.b r3l,@er0
- adds #1,er1
- adds #1,er0
- dec.l #1,er2
- bne 1b
-3:
- mov.l @sp+,er0
- mov.l @sp+,er4
- rts
-
- ;; odd <- even / even <- odd
-4:
- mov.l er4,er3
- mov.l er2,er4
- mov.l er5,er2
- mov.l er1,er5
- mov.l er6,er1
- mov.l er0,er6
-1:
- eepmov.w
- mov.w r4,r4
- bne 1b
- dec.w #1,e4
- bpl 1b
- mov.l er1,er6
- mov.l er2,er5
- mov.l er3,er4
- rts
diff --git a/arch/h8300/lib/memset.S b/arch/h8300/lib/memset.S
deleted file mode 100644
index 4549a64c5b79..000000000000
--- a/arch/h8300/lib/memset.S
+++ /dev/null
@@ -1,61 +0,0 @@
-/* memset.S */
-
-#include <asm/linkage.h>
-
-#if defined(__H8300H__)
- .h8300h
-#endif
-#if defined(__H8300S__)
- .h8300s
-#endif
- .text
-
-.global _memset
-
-;;void *memset(*ptr, int c, size_t count)
-;; ptr = er0
-;; c = er1(r1l)
-;; count = er2
-_memset:
- btst #0,r0l
- beq 2f
-
- ;; odd address
-1:
- mov.b r1l,@er0
- adds #1,er0
- dec.l #1,er2
- beq 6f
-
- ;; even address
-2:
- mov.l er2,er3
- cmp.l #4,er2
- blo 4f
- ;; count>=4 -> count/4
-#if defined(__H8300H__)
- shlr.l er2
- shlr.l er2
-#endif
-#if defined(__H8300S__)
- shlr.l #2,er2
-#endif
- ;; byte -> long
- mov.b r1l,r1h
- mov.w r1,e1
-3:
- mov.l er1,@er0
- adds #4,er0
- dec.l #1,er2
- bne 3b
-4:
- ;; count % 4
- and.b #3,r3l
- beq 6f
-5:
- mov.b r1l,@er0
- adds #1,er0
- dec.b r3l
- bne 5b
-6:
- rts
diff --git a/arch/h8300/lib/romfs.S b/arch/h8300/lib/romfs.S
deleted file mode 100644
index 68910d8e1ff4..000000000000
--- a/arch/h8300/lib/romfs.S
+++ /dev/null
@@ -1,57 +0,0 @@
-/* romfs move to __ebss */
-
-#include <asm/linkage.h>
-
-#if defined(__H8300H__)
- .h8300h
-#endif
-#if defined(__H8300S__)
- .h8300s
-#endif
-
-#define BLKOFFSET 512
-
- .text
-.globl __move_romfs
-_romfs_sig_len = 8
-
-__move_romfs:
- mov.l #__sbss,er0
- mov.l #_romfs_sig,er1
- mov.b #_romfs_sig_len,r3l
-1: /* check romfs image */
- mov.b @er0+,r2l
- mov.b @er1+,r2h
- cmp.b r2l,r2h
- bne 2f
- dec.b r3l
- bne 1b
-
- /* find romfs image */
- mov.l @__sbss+8,er0 /* romfs length(be) */
- mov.l #__sbss,er1
- add.l er0,er1 /* romfs image end */
- mov.l #__ebss,er2
- add.l er0,er2 /* distination address */
-#if defined(CONFIG_INTELFLASH)
- add.l #BLKOFFSET,er2
-#endif
- adds #2,er0
- adds #1,er0
- shlr er0
- shlr er0 /* transfer length */
-1:
- mov.l @er1,er3 /* copy image */
- mov.l er3,@er2
- subs #4,er1
- subs #4,er2
- dec.l #1,er0
- bpl 1b
-2:
- rts
-
- .section .rodata
-_romfs_sig:
- .ascii "-rom1fs-"
-
- .end
diff --git a/arch/h8300/mm/Makefile b/arch/h8300/mm/Makefile
deleted file mode 100644
index 5f4bc42b6453..000000000000
--- a/arch/h8300/mm/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the linux m68k-specific parts of the memory manager.
-#
-
-obj-y := init.o fault.o memory.o kmap.o
diff --git a/arch/h8300/mm/fault.c b/arch/h8300/mm/fault.c
deleted file mode 100644
index 472535977006..000000000000
--- a/arch/h8300/mm/fault.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * linux/arch/h8300/mm/fault.c
- *
- * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
- * Copyright (C) 2000 Lineo, Inc. (www.lineo.com)
- *
- * Based on:
- *
- * linux/arch/m68knommu/mm/fault.c
- * linux/arch/m68k/mm/fault.c
- *
- * Copyright (C) 1995 Hamish Macdonald
- */
-
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/ptrace.h>
-
-#include <asm/pgtable.h>
-
-/*
- * This routine handles page faults. It determines the problem, and
- * then passes it off to one of the appropriate routines.
- *
- * error_code:
- * bit 0 == 0 means no page found, 1 means protection fault
- * bit 1 == 0 means read, 1 means write
- *
- * If this routine detects a bad access, it returns 1, otherwise it
- * returns 0.
- */
-asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
- unsigned long error_code)
-{
-#ifdef DEBUG
- printk ("regs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld\n",
- regs->sr, regs->pc, address, error_code);
-#endif
-
-/*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
- */
- if ((unsigned long) address < PAGE_SIZE) {
- printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
- } else
- printk(KERN_ALERT "Unable to handle kernel access");
- printk(" at virtual address %08lx\n",address);
- if (!user_mode(regs))
- die("Oops", regs, error_code);
- do_exit(SIGKILL);
-
- return 1;
-}
-
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
deleted file mode 100644
index 6c1251e491af..000000000000
--- a/arch/h8300/mm/init.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * linux/arch/h8300/mm/init.c
- *
- * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
- * Kenneth Albanowski <kjahds@kjahds.com>,
- * Copyright (C) 2000 Lineo, Inc. (www.lineo.com)
- *
- * Based on:
- *
- * linux/arch/m68knommu/mm/init.c
- * linux/arch/m68k/mm/init.c
- *
- * Copyright (C) 1995 Hamish Macdonald
- *
- * JAN/1999 -- hacked to support ColdFire (gerg@snapgear.com)
- * DEC/2000 -- linux 2.4 support <davidm@snapgear.com>
- */
-
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/init.h>
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
-#include <linux/bootmem.h>
-#include <linux/gfp.h>
-
-#include <asm/setup.h>
-#include <asm/segment.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/sections.h>
-
-#undef DEBUG
-
-/*
- * BAD_PAGE is the page that is used for page faults when linux
- * is out-of-memory. Older versions of linux just did a
- * do_exit(), but using this instead means there is less risk
- * for a process dying in kernel mode, possibly leaving a inode
- * unused etc..
- *
- * BAD_PAGETABLE is the accompanying page-table: it is initialized
- * to point to BAD_PAGE entries.
- *
- * ZERO_PAGE is a special page that is used for zero-initialized
- * data and COW.
- */
-static unsigned long empty_bad_page_table;
-
-static unsigned long empty_bad_page;
-
-unsigned long empty_zero_page;
-
-extern unsigned long rom_length;
-
-extern unsigned long memory_start;
-extern unsigned long memory_end;
-
-/*
- * paging_init() continues the virtual memory environment setup which
- * was begun by the code in arch/head.S.
- * The parameters are pointers to where to stick the starting and ending
- * addresses of available kernel virtual memory.
- */
-void __init paging_init(void)
-{
- /*
- * Make sure start_mem is page aligned, otherwise bootmem and
- * page_alloc get different views og the world.
- */
-#ifdef DEBUG
- unsigned long start_mem = PAGE_ALIGN(memory_start);
-#endif
- unsigned long end_mem = memory_end & PAGE_MASK;
-
-#ifdef DEBUG
- printk ("start_mem is %#lx\nvirtual_end is %#lx\n",
- start_mem, end_mem);
-#endif
-
- /*
- * Initialize the bad page table and bad page to point
- * to a couple of allocated pages.
- */
- empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
- empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
- empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
- memset((void *)empty_zero_page, 0, PAGE_SIZE);
-
- /*
- * Set up SFC/DFC registers (user data space).
- */
- set_fs (USER_DS);
-
-#ifdef DEBUG
- printk ("before free_area_init\n");
-
- printk ("free_area_init -> start_mem is %#lx\nvirtual_end is %#lx\n",
- start_mem, end_mem);
-#endif
-
- {
- unsigned long zones_size[MAX_NR_ZONES] = {0, };
-
- zones_size[ZONE_DMA] = 0 >> PAGE_SHIFT;
- zones_size[ZONE_NORMAL] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
-#ifdef CONFIG_HIGHMEM
- zones_size[ZONE_HIGHMEM] = 0;
-#endif
- free_area_init(zones_size);
- }
-}
-
-void __init mem_init(void)
-{
- unsigned long codesize = _etext - _stext;
-
- pr_devel("Mem_init: start=%lx, end=%lx\n", memory_start, memory_end);
-
- high_memory = (void *) (memory_end & PAGE_MASK);
- max_mapnr = MAP_NR(high_memory);
-
- /* this will put all low memory onto the freelists */
- free_all_bootmem();
-
- mem_init_print_info(NULL);
- if (rom_length > 0 && rom_length > codesize)
- pr_info("Memory available: %luK/%luK ROM\n",
- (rom_length - codesize) >> 10, rom_length >> 10);
-}
-
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
-}
-#endif
-
-void
-free_initmem(void)
-{
-#ifdef CONFIG_RAMKERNEL
- free_initmem_default(-1);
-#endif
-}
-
diff --git a/arch/h8300/mm/kmap.c b/arch/h8300/mm/kmap.c
deleted file mode 100644
index f79edcdadf39..000000000000
--- a/arch/h8300/mm/kmap.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * linux/arch/h8300/mm/kmap.c
- *
- * Based on
- * linux/arch/m68knommu/mm/kmap.c
- *
- * Copyright (C) 2000 Lineo, <davidm@snapgear.com>
- * Copyright (C) 2000-2002 David McCullough <davidm@snapgear.com>
- */
-
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-
-#include <asm/setup.h>
-#include <asm/segment.h>
-#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm/io.h>
-
-#undef DEBUG
-
-#define VIRT_OFFSET (0x01000000)
-
-/*
- * Map some physical address range into the kernel address space.
- */
-void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
-{
- return (void *)(physaddr + VIRT_OFFSET);
-}
-
-/*
- * Unmap a ioremap()ed region again.
- */
-void iounmap(void *addr)
-{
-}
-
-/*
- * __iounmap unmaps nearly everything, so be careful
- * it doesn't free currently pointer/page tables anymore but it
- * wans't used anyway and might be added later.
- */
-void __iounmap(void *addr, unsigned long size)
-{
-}
-
-/*
- * Set new cache mode for some kernel address space.
- * The caller must push data for that range itself, if such data may already
- * be in the cache.
- */
-void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
-{
-}
diff --git a/arch/h8300/mm/memory.c b/arch/h8300/mm/memory.c
deleted file mode 100644
index 06e364641392..000000000000
--- a/arch/h8300/mm/memory.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * linux/arch/h8300/mm/memory.c
- *
- * Copyright (C) 2002 Yoshinori Sato <ysato@users.sourceforge.jp>,
- *
- * Based on:
- *
- * linux/arch/m68knommu/mm/memory.c
- *
- * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>,
- * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
- *
- * Based on:
- *
- * linux/arch/m68k/mm/memory.c
- *
- * Copyright (C) 1995 Hamish Macdonald
- */
-
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-#include <asm/setup.h>
-#include <asm/segment.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/traps.h>
-#include <asm/io.h>
-
-void cache_clear (unsigned long paddr, int len)
-{
-}
-
-
-void cache_push (unsigned long paddr, int len)
-{
-}
-
-void cache_push_v (unsigned long vaddr, int len)
-{
-}
-
-/*
- * Map some physical address range into the kernel address space.
- */
-
-unsigned long kernel_map(unsigned long paddr, unsigned long size,
- int nocacheflag, unsigned long *memavailp )
-{
- return paddr;
-}
-
diff --git a/arch/h8300/platform/h8300h/Makefile b/arch/h8300/platform/h8300h/Makefile
deleted file mode 100644
index 420f73b0d962..000000000000
--- a/arch/h8300/platform/h8300h/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-# Reuse any files we can from the H8/300H
-#
-
-obj-y := irq.o ptrace_h8300h.o
diff --git a/arch/h8300/platform/h8300h/aki3068net/Makefile b/arch/h8300/platform/h8300h/aki3068net/Makefile
deleted file mode 100644
index b7ff78050b7f..000000000000
--- a/arch/h8300/platform/h8300h/aki3068net/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-extra-y := crt0_ram.o
diff --git a/arch/h8300/platform/h8300h/aki3068net/crt0_ram.S b/arch/h8300/platform/h8300h/aki3068net/crt0_ram.S
deleted file mode 100644
index b2ad0f2d0417..000000000000
--- a/arch/h8300/platform/h8300h/aki3068net/crt0_ram.S
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * linux/arch/h8300/platform/h8300h/aki3068net/crt0_ram.S
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * Platform depend startup
- * Target Archtecture: AE-3068 (aka. aki3068net)
- * Memory Layout : RAM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-
-#if !defined(CONFIG_BLKDEV_RESERVE)
-#if defined(CONFIG_GDB_DEBUG)
-#define RAMEND (__ramend - 0xc000)
-#else
-#define RAMEND __ramend
-#endif
-#else
-#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS
-#endif
-
- .global __start
- .global _command_line
- .global __platform_gpio_table
- .global __target_name
-
- .h8300h
-
- .section .text
- .file "crt0_ram.S"
-
- /* CPU Reset entry */
-__start:
- mov.l #RAMEND,sp
- ldc #0x80,ccr
-
- /* Peripheral Setup */
-
-#if defined(CONFIG_MTD_UCLINUX)
- /* move romfs image */
- jsr @__move_romfs
-#endif
-
- /* .bss clear */
- mov.l #__sbss,er5
- mov.l #__ebss,er4
- sub.l er5,er4
- shlr er4
- shlr er4
- sub.l er0,er0
-1:
- mov.l er0,@er5
- adds #4,er5
- dec.l #1,er4
- bne 1b
-
- /* copy kernel commandline */
- mov.l #COMMAND_START,er5
- mov.l #_command_line,er6
- mov.w #512,r4
- eepmov.w
-
- /* uClinux kernel start */
- ldc #0x90,ccr /* running kernel */
- mov.l #_init_thread_union,sp
- add.l #0x2000,sp
- jsr @_start_kernel
-_exit:
-
- jmp _exit
-
- rts
-
- /* I/O port assign information */
-__platform_gpio_table:
- mov.l #gpio_table,er0
- rts
-
-gpio_table:
- ;; P1DDR
- .byte 0xff,0xff
- ;; P2DDR
- .byte 0xff,0xff
- ;; P3DDR
- .byte 0xff,0x00
- ;; P4DDR
- .byte 0x00,0x00
- ;; P5DDR
- .byte 0x01,0x01
- ;; P6DDR
- .byte 0x00,0x00
- ;; dummy
- .byte 0x00,0x00
- ;; P8DDR
- .byte 0x0c,0x0c
- ;; P9DDR
- .byte 0x00,0x00
- ;; PADDR
- .byte 0x00,0x00
- ;; PBDDR
- .byte 0x30,0x30
-
-__target_name:
- .asciz "AE-3068"
-
- .section .bootvec,"ax"
- jmp @__start
diff --git a/arch/h8300/platform/h8300h/generic/Makefile b/arch/h8300/platform/h8300h/generic/Makefile
deleted file mode 100644
index 2b12a170209e..000000000000
--- a/arch/h8300/platform/h8300h/generic/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-extra-y := crt0_$(MODEL).o
diff --git a/arch/h8300/platform/h8300h/generic/crt0_ram.S b/arch/h8300/platform/h8300h/generic/crt0_ram.S
deleted file mode 100644
index 5ab7d9c12910..000000000000
--- a/arch/h8300/platform/h8300h/generic/crt0_ram.S
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * linux/arch/h8300/platform/h8300h/generic/crt0_ram.S
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * Platform depend startup
- * Target Archtecture: AE-3068 (aka. aki3068net)
- * Memory Layout : RAM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-
-#if !defined(CONFIG_BLKDEV_RESERVE)
-#if defined(CONFIG_GDB_DEBUG)
-#define RAMEND (__ramend - 0xc000)
-#else
-#define RAMEND __ramend
-#endif
-#else
-#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS
-#endif
-
- .global __start
- .global _command_line
- .global __platform_gpio_table
- .global __target_name
-
- .h8300h
-
- .section .text
- .file "crt0_ram.S"
-
- /* CPU Reset entry */
-__start:
- mov.l #RAMEND,sp
- ldc #0x80,ccr
-
- /* Peripheral Setup */
-
-#if defined(CONFIG_BLK_DEV_BLKMEM)
- /* move romfs image */
- jsr @__move_romfs
-#endif
-
- /* .bss clear */
- mov.l #__sbss,er5
- mov.l #__ebss,er4
- sub.l er5,er4
- shlr er4
- shlr er4
- sub.l er0,er0
-1:
- mov.l er0,@er5
- adds #4,er5
- dec.l #1,er4
- bne 1b
-
- /* copy kernel commandline */
- mov.l #COMMAND_START,er5
- mov.l #_command_line,er6
- mov.w #512,r4
- eepmov.w
-
- /* uClinux kernel start */
- ldc #0x90,ccr /* running kernel */
- mov.l #_init_thread_union,sp
- add.l #0x2000,sp
- jsr @_start_kernel
-_exit:
-
- jmp _exit
-
- rts
-
- /* I/O port assign information */
-__platform_gpio_table:
- mov.l #gpio_table,er0
- rts
-
-gpio_table:
- ;; P1DDR
- .byte 0x00,0x00
- ;; P2DDR
- .byte 0x00,0x00
- ;; P3DDR
- .byte 0x00,0x00
- ;; P4DDR
- .byte 0x00,0x00
- ;; P5DDR
- .byte 0x00,0x00
- ;; P6DDR
- .byte 0x00,0x00
- ;; dummy
- .byte 0x00,0x00
- ;; P8DDR
- .byte 0x00,0x00
- ;; P9DDR
- .byte 0x00,0x00
- ;; PADDR
- .byte 0x00,0x00
- ;; PBDDR
- .byte 0x00,0x00
-
-__target_name:
- .asciz "generic"
diff --git a/arch/h8300/platform/h8300h/generic/crt0_rom.S b/arch/h8300/platform/h8300h/generic/crt0_rom.S
deleted file mode 100644
index dda1dfa15a5e..000000000000
--- a/arch/h8300/platform/h8300h/generic/crt0_rom.S
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * linux/arch/h8300/platform/h8300h/generic/crt0_rom.S
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * Platform depend startup
- * Target Archtecture: generic
- * Memory Layout : ROM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-
- .global __start
- .global __command_line
- .global __platform_gpio_table
- .global __target_name
-
- .h8300h
- .section .text
- .file "crt0_rom.S"
-
- /* CPU Reset entry */
-__start:
- mov.l #__ramend,sp
- ldc #0x80,ccr
-
- /* Peripheral Setup */
-
- /* .bss clear */
- mov.l #__sbss,er5
- mov.l #__ebss,er4
- sub.l er5,er4
- shlr er4
- shlr er4
- sub.l er0,er0
-1:
- mov.l er0,@er5
- adds #4,er5
- dec.l #1,er4
- bne 1b
-
- /* copy .data */
-#if !defined(CONFIG_H8300H_SIM)
- /* copy .data */
- mov.l #__begin_data,er5
- mov.l #__sdata,er6
- mov.l #__edata,er4
- sub.l er6,er4
- shlr.l er4
- shlr.l er4
-1:
- mov.l @er5+,er0
- mov.l er0,@er6
- adds #4,er6
- dec.l #1,er4
- bne 1b
-#endif
-
- /* copy kernel commandline */
- mov.l #COMMAND_START,er5
- mov.l #__command_line,er6
- mov.w #512,r4
- eepmov.w
-
- /* linux kernel start */
- ldc #0x90,ccr /* running kernel */
- mov.l #_init_thread_union,sp
- add.l #0x2000,sp
- jsr @_start_kernel
-_exit:
-
- jmp _exit
-
- rts
-
- /* I/O port assign information */
-__platform_gpio_table:
- mov.l #gpio_table,er0
- rts
-
-gpio_table:
- ;; P1DDR
- .byte 0x00,0x00
- ;; P2DDR
- .byte 0x00,0x00
- ;; P3DDR
- .byte 0x00,0x00
- ;; P4DDR
- .byte 0x00,0x00
- ;; P5DDR
- .byte 0x00,0x00
- ;; P6DDR
- .byte 0x00,0x00
- ;; dummy
- .byte 0x00,0x00
- ;; P8DDR
- .byte 0x00,0x00
- ;; P9DDR
- .byte 0x00,0x00
- ;; PADDR
- .byte 0x00,0x00
- ;; PBDDR
- .byte 0x00,0x00
-
- .section .rodata
-__target_name:
- .asciz "generic"
-
- .section .bss
-__command_line:
- .space 512
-
- /* interrupt vector */
- .section .vectors,"ax"
- .long __start
-vector = 1
- .rept 64-1
- .long _interrupt_redirect_table+vector*4
-vector = vector + 1
- .endr
diff --git a/arch/h8300/platform/h8300h/h8max/Makefile b/arch/h8300/platform/h8300h/h8max/Makefile
deleted file mode 100644
index b7ff78050b7f..000000000000
--- a/arch/h8300/platform/h8300h/h8max/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-extra-y := crt0_ram.o
diff --git a/arch/h8300/platform/h8300h/h8max/crt0_ram.S b/arch/h8300/platform/h8300h/h8max/crt0_ram.S
deleted file mode 100644
index 6a0d4e2d9ec6..000000000000
--- a/arch/h8300/platform/h8300h/h8max/crt0_ram.S
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * linux/arch/h8300/platform/h8300h/h8max/crt0_ram.S
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * Platform depend startup
- * Target Archtecture: H8MAX
- * Memory Layout : RAM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-
-#if !defined(CONFIG_BLKDEV_RESERVE)
-#if defined(CONFIG_GDB_DEBUG)
-#define RAMEND (__ramend - 0xc000)
-#else
-#define RAMEND __ramend
-#endif
-#else
-#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS
-#endif
-
- .global __start
- .global _command_line
- .global __platform_gpio_table
- .global __target_name
-
- .h8300h
-
- .section .text
- .file "crt0_ram.S"
-
- /* CPU Reset entry */
-__start:
- mov.l #RAMEND,sp
- ldc #0x80,ccr
-
- /* Peripheral Setup */
-
-#if defined(CONFIG_MTD_UCLINUX)
- /* move romfs image */
- jsr @__move_romfs
-#endif
-
- /* .bss clear */
- mov.l #__sbss,er5
- mov.l #__ebss,er4
- sub.l er5,er4
- shlr er4
- shlr er4
- sub.l er0,er0
-1:
- mov.l er0,@er5
- adds #4,er5
- dec.l #1,er4
- bne 1b
-
- /* copy kernel commandline */
- mov.l #COMMAND_START,er5
- mov.l #_command_line,er6
- mov.w #512,r4
- eepmov.w
-
- /* uClinux kernel start */
- ldc #0x90,ccr /* running kernel */
- mov.l #_init_thread_union,sp
- add.l #0x2000,sp
- jsr @_start_kernel
-_exit:
-
- jmp _exit
-
- rts
-
- /* I/O port assign information */
-__platform_gpio_table:
- mov.l #gpio_table,er0
- rts
-
-gpio_table:
- ;; P1DDR
- .byte 0xff,0xff
- ;; P2DDR
- .byte 0xff,0xff
- ;; P3DDR
- .byte 0x00,0x00
- ;; P4DDR
- .byte 0x00,0x00
- ;; P5DDR
- .byte 0x01,0x01
- ;; P6DDR
- .byte 0xf6,0xf6
- ;; dummy
- .byte 0x00,0x00
- ;; P8DDR
- .byte 0xee,0xee
- ;; P9DDR
- .byte 0x00,0x00
- ;; PADDR
- .byte 0x00,0x00
- ;; PBDDR
- .byte 0x30,0x30
-
-__target_name:
- .asciz "H8MAX"
-
- .section .bootvec,"ax"
- jmp @__start
diff --git a/arch/h8300/platform/h8300h/irq.c b/arch/h8300/platform/h8300h/irq.c
deleted file mode 100644
index 0a50353e09d5..000000000000
--- a/arch/h8300/platform/h8300h/irq.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Interrupt handling H8/300H depend.
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- */
-
-#include <linux/init.h>
-#include <linux/errno.h>
-
-#include <asm/ptrace.h>
-#include <asm/traps.h>
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/gpio-internal.h>
-#include <asm/regs306x.h>
-
-const int __initconst h8300_saved_vectors[] = {
-#if defined(CONFIG_GDB_DEBUG)
- TRAP3_VEC, /* TRAPA #3 is GDB breakpoint */
-#endif
- -1,
-};
-
-const h8300_vector __initconst h8300_trap_table[] = {
- 0, 0, 0, 0, 0, 0, 0, 0,
- system_call,
- 0,
- 0,
- trace_break,
-};
-
-int h8300_enable_irq_pin(unsigned int irq)
-{
- int bitmask;
- if (irq < EXT_IRQ0 || irq > EXT_IRQ5)
- return 0;
-
- /* initialize IRQ pin */
- bitmask = 1 << (irq - EXT_IRQ0);
- switch(irq) {
- case EXT_IRQ0:
- case EXT_IRQ1:
- case EXT_IRQ2:
- case EXT_IRQ3:
- if (H8300_GPIO_RESERVE(H8300_GPIO_P8, bitmask) == 0)
- return -EBUSY;
- H8300_GPIO_DDR(H8300_GPIO_P8, bitmask, H8300_GPIO_INPUT);
- break;
- case EXT_IRQ4:
- case EXT_IRQ5:
- if (H8300_GPIO_RESERVE(H8300_GPIO_P9, bitmask) == 0)
- return -EBUSY;
- H8300_GPIO_DDR(H8300_GPIO_P9, bitmask, H8300_GPIO_INPUT);
- break;
- }
-
- return 0;
-}
-
-void h8300_disable_irq_pin(unsigned int irq)
-{
- int bitmask;
- if (irq < EXT_IRQ0 || irq > EXT_IRQ5)
- return;
-
- /* disable interrupt & release IRQ pin */
- bitmask = 1 << (irq - EXT_IRQ0);
- switch(irq) {
- case EXT_IRQ0:
- case EXT_IRQ1:
- case EXT_IRQ2:
- case EXT_IRQ3:
- *(volatile unsigned char *)IER &= ~bitmask;
- H8300_GPIO_FREE(H8300_GPIO_P8, bitmask);
- break ;
- case EXT_IRQ4:
- case EXT_IRQ5:
- *(volatile unsigned char *)IER &= ~bitmask;
- H8300_GPIO_FREE(H8300_GPIO_P9, bitmask);
- break;
- }
-}
diff --git a/arch/h8300/platform/h8300h/ptrace_h8300h.c b/arch/h8300/platform/h8300h/ptrace_h8300h.c
deleted file mode 100644
index 4f1ed0279633..000000000000
--- a/arch/h8300/platform/h8300h/ptrace_h8300h.c
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- * linux/arch/h8300/platform/h8300h/ptrace_h8300h.c
- * ptrace cpu depend helper functions
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file COPYING in the main directory of
- * this archive for more details.
- */
-
-#include <linux/linkage.h>
-#include <linux/sched.h>
-#include <asm/ptrace.h>
-
-#define CCR_MASK 0x6f /* mode/imask not set */
-#define BREAKINST 0x5730 /* trapa #3 */
-
-/* Mapping from PT_xxx to the stack offset at which the register is
- saved. Notice that usp has no stack-slot and needs to be treated
- specially (see get_reg/put_reg below). */
-static const int h8300_register_offset[] = {
- PT_REG(er1), PT_REG(er2), PT_REG(er3), PT_REG(er4),
- PT_REG(er5), PT_REG(er6), PT_REG(er0), PT_REG(orig_er0),
- PT_REG(ccr), PT_REG(pc)
-};
-
-/* read register */
-long h8300_get_reg(struct task_struct *task, int regno)
-{
- switch (regno) {
- case PT_USP:
- return task->thread.usp + sizeof(long)*2;
- case PT_CCR:
- return *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]);
- default:
- return *(unsigned long *)(task->thread.esp0 + h8300_register_offset[regno]);
- }
-}
-
-/* write register */
-int h8300_put_reg(struct task_struct *task, int regno, unsigned long data)
-{
- unsigned short oldccr;
- switch (regno) {
- case PT_USP:
- task->thread.usp = data - sizeof(long)*2;
- case PT_CCR:
- oldccr = *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]);
- oldccr &= ~CCR_MASK;
- data &= CCR_MASK;
- data |= oldccr;
- *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]) = data;
- break;
- default:
- *(unsigned long *)(task->thread.esp0 + h8300_register_offset[regno]) = data;
- break;
- }
- return 0;
-}
-
-/* disable singlestep */
-void user_disable_single_step(struct task_struct *child)
-{
- if((long)child->thread.breakinfo.addr != -1L) {
- *child->thread.breakinfo.addr = child->thread.breakinfo.inst;
- child->thread.breakinfo.addr = (unsigned short *)-1L;
- }
-}
-
-/* calculate next pc */
-enum jump_type {none, /* normal instruction */
- jabs, /* absolute address jump */
- ind, /* indirect address jump */
- ret, /* return to subrutine */
- reg, /* register indexed jump */
- relb, /* pc relative jump (byte offset) */
- relw, /* pc relative jump (word offset) */
- };
-
-/* opcode decode table define
- ptn: opcode pattern
- msk: opcode bitmask
- len: instruction length (<0 next table index)
- jmp: jump operation mode */
-struct optable {
- unsigned char bitpattern;
- unsigned char bitmask;
- signed char length;
- signed char type;
-} __attribute__((aligned(1),packed));
-
-#define OPTABLE(ptn,msk,len,jmp) \
- { \
- .bitpattern = ptn, \
- .bitmask = msk, \
- .length = len, \
- .type = jmp, \
- }
-
-static const struct optable optable_0[] = {
- OPTABLE(0x00,0xff, 1,none), /* 0x00 */
- OPTABLE(0x01,0xff,-1,none), /* 0x01 */
- OPTABLE(0x02,0xfe, 1,none), /* 0x02-0x03 */
- OPTABLE(0x04,0xee, 1,none), /* 0x04-0x05/0x14-0x15 */
- OPTABLE(0x06,0xfe, 1,none), /* 0x06-0x07 */
- OPTABLE(0x08,0xea, 1,none), /* 0x08-0x09/0x0c-0x0d/0x18-0x19/0x1c-0x1d */
- OPTABLE(0x0a,0xee, 1,none), /* 0x0a-0x0b/0x1a-0x1b */
- OPTABLE(0x0e,0xee, 1,none), /* 0x0e-0x0f/0x1e-0x1f */
- OPTABLE(0x10,0xfc, 1,none), /* 0x10-0x13 */
- OPTABLE(0x16,0xfe, 1,none), /* 0x16-0x17 */
- OPTABLE(0x20,0xe0, 1,none), /* 0x20-0x3f */
- OPTABLE(0x40,0xf0, 1,relb), /* 0x40-0x4f */
- OPTABLE(0x50,0xfc, 1,none), /* 0x50-0x53 */
- OPTABLE(0x54,0xfd, 1,ret ), /* 0x54/0x56 */
- OPTABLE(0x55,0xff, 1,relb), /* 0x55 */
- OPTABLE(0x57,0xff, 1,none), /* 0x57 */
- OPTABLE(0x58,0xfb, 2,relw), /* 0x58/0x5c */
- OPTABLE(0x59,0xfb, 1,reg ), /* 0x59/0x5b */
- OPTABLE(0x5a,0xfb, 2,jabs), /* 0x5a/0x5e */
- OPTABLE(0x5b,0xfb, 2,ind ), /* 0x5b/0x5f */
- OPTABLE(0x60,0xe8, 1,none), /* 0x60-0x67/0x70-0x77 */
- OPTABLE(0x68,0xfa, 1,none), /* 0x68-0x69/0x6c-0x6d */
- OPTABLE(0x6a,0xfe,-2,none), /* 0x6a-0x6b */
- OPTABLE(0x6e,0xfe, 2,none), /* 0x6e-0x6f */
- OPTABLE(0x78,0xff, 4,none), /* 0x78 */
- OPTABLE(0x79,0xff, 2,none), /* 0x79 */
- OPTABLE(0x7a,0xff, 3,none), /* 0x7a */
- OPTABLE(0x7b,0xff, 2,none), /* 0x7b */
- OPTABLE(0x7c,0xfc, 2,none), /* 0x7c-0x7f */
- OPTABLE(0x80,0x80, 1,none), /* 0x80-0xff */
-};
-
-static const struct optable optable_1[] = {
- OPTABLE(0x00,0xff,-3,none), /* 0x0100 */
- OPTABLE(0x40,0xf0,-3,none), /* 0x0140-0x14f */
- OPTABLE(0x80,0xf0, 1,none), /* 0x0180-0x018f */
- OPTABLE(0xc0,0xc0, 2,none), /* 0x01c0-0x01ff */
-};
-
-static const struct optable optable_2[] = {
- OPTABLE(0x00,0x20, 2,none), /* 0x6a0?/0x6a8?/0x6b0?/0x6b8? */
- OPTABLE(0x20,0x20, 3,none), /* 0x6a2?/0x6aa?/0x6b2?/0x6ba? */
-};
-
-static const struct optable optable_3[] = {
- OPTABLE(0x69,0xfb, 2,none), /* 0x010069/0x01006d/014069/0x01406d */
- OPTABLE(0x6b,0xff,-4,none), /* 0x01006b/0x01406b */
- OPTABLE(0x6f,0xff, 3,none), /* 0x01006f/0x01406f */
- OPTABLE(0x78,0xff, 5,none), /* 0x010078/0x014078 */
-};
-
-static const struct optable optable_4[] = {
- OPTABLE(0x00,0x78, 3,none), /* 0x0100690?/0x01006d0?/0140690/0x01406d0?/0x0100698?/0x01006d8?/0140698?/0x01406d8? */
- OPTABLE(0x20,0x78, 4,none), /* 0x0100692?/0x01006d2?/0140692/0x01406d2?/0x010069a?/0x01006da?/014069a?/0x01406da? */
-};
-
-static const struct optables_list {
- const struct optable *ptr;
- int size;
-} optables[] = {
-#define OPTABLES(no) \
- { \
- .ptr = optable_##no, \
- .size = sizeof(optable_##no) / sizeof(struct optable), \
- }
- OPTABLES(0),
- OPTABLES(1),
- OPTABLES(2),
- OPTABLES(3),
- OPTABLES(4),
-
-};
-
-const unsigned char condmask[] = {
- 0x00,0x40,0x01,0x04,0x02,0x08,0x10,0x20
-};
-
-static int isbranch(struct task_struct *task,int reson)
-{
- unsigned char cond = h8300_get_reg(task, PT_CCR);
- /* encode complex conditions */
- /* B4: N^V
- B5: Z|(N^V)
- B6: C|Z */
- __asm__("bld #3,%w0\n\t"
- "bxor #1,%w0\n\t"
- "bst #4,%w0\n\t"
- "bor #2,%w0\n\t"
- "bst #5,%w0\n\t"
- "bld #2,%w0\n\t"
- "bor #0,%w0\n\t"
- "bst #6,%w0\n\t"
- :"=&r"(cond)::"cc");
- cond &= condmask[reson >> 1];
- if (!(reson & 1))
- return cond == 0;
- else
- return cond != 0;
-}
-
-static unsigned short *getnextpc(struct task_struct *child, unsigned short *pc)
-{
- const struct optable *op;
- unsigned char *fetch_p;
- unsigned char inst;
- unsigned long addr;
- unsigned long *sp;
- int op_len,regno;
- op = optables[0].ptr;
- op_len = optables[0].size;
- fetch_p = (unsigned char *)pc;
- inst = *fetch_p++;
- do {
- if ((inst & op->bitmask) == op->bitpattern) {
- if (op->length < 0) {
- op = optables[-op->length].ptr;
- op_len = optables[-op->length].size + 1;
- inst = *fetch_p++;
- } else {
- switch (op->type) {
- case none:
- return pc + op->length;
- case jabs:
- addr = *(unsigned long *)pc;
- return (unsigned short *)(addr & 0x00ffffff);
- case ind:
- addr = *pc & 0xff;
- return (unsigned short *)(*(unsigned long *)addr);
- case ret:
- sp = (unsigned long *)h8300_get_reg(child, PT_USP);
- /* user stack frames
- | er0 | temporary saved
- +--------+
- | exp | exception stack frames
- +--------+
- | ret pc | userspace return address
- */
- return (unsigned short *)(*(sp+2) & 0x00ffffff);
- case reg:
- regno = (*pc >> 4) & 0x07;
- if (regno == 0)
- addr = h8300_get_reg(child, PT_ER0);
- else
- addr = h8300_get_reg(child, regno-1+PT_ER1);
- return (unsigned short *)addr;
- case relb:
- if (inst == 0x55 || isbranch(child,inst & 0x0f))
- pc = (unsigned short *)((unsigned long)pc +
- ((signed char)(*fetch_p)));
- return pc+1; /* skip myself */
- case relw:
- if (inst == 0x5c || isbranch(child,(*fetch_p & 0xf0) >> 4))
- pc = (unsigned short *)((unsigned long)pc +
- ((signed short)(*(pc+1))));
- return pc+2; /* skip myself */
- }
- }
- } else
- op++;
- } while(--op_len > 0);
- return NULL;
-}
-
-/* Set breakpoint(s) to simulate a single step from the current PC. */
-
-void user_enable_single_step(struct task_struct *child)
-{
- unsigned short *nextpc;
- nextpc = getnextpc(child,(unsigned short *)h8300_get_reg(child, PT_PC));
- child->thread.breakinfo.addr = nextpc;
- child->thread.breakinfo.inst = *nextpc;
- *nextpc = BREAKINST;
-}
-
-asmlinkage void trace_trap(unsigned long bp)
-{
- if ((unsigned long)current->thread.breakinfo.addr == bp) {
- user_disable_single_step(current);
- force_sig(SIGTRAP,current);
- } else
- force_sig(SIGILL,current);
-}
-
diff --git a/arch/h8300/platform/h8s/Makefile b/arch/h8300/platform/h8s/Makefile
deleted file mode 100644
index bf1241883766..000000000000
--- a/arch/h8300/platform/h8s/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-# Reuse any files we can from the H8S
-#
-
-obj-y := ints_h8s.o ptrace_h8s.o
diff --git a/arch/h8300/platform/h8s/edosk2674/Makefile b/arch/h8300/platform/h8s/edosk2674/Makefile
deleted file mode 100644
index 8e349723bb4f..000000000000
--- a/arch/h8300/platform/h8s/edosk2674/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-extra-y := crt0_$(MODEL).o
diff --git a/arch/h8300/platform/h8s/edosk2674/crt0_ram.S b/arch/h8300/platform/h8s/edosk2674/crt0_ram.S
deleted file mode 100644
index 5ed191b37cde..000000000000
--- a/arch/h8300/platform/h8s/edosk2674/crt0_ram.S
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * linux/arch/h8300/platform/h8s/edosk2674/crt0_ram.S
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * Platform depend startup
- * Target Archtecture: EDOSK-2674
- * Memory Layout : RAM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-#include <asm/regs267x.h>
-
-#if !defined(CONFIG_BLKDEV_RESERVE)
-#if defined(CONFIG_GDB_DEBUG)
-#define RAMEND (__ramend - 0xc000)
-#else
-#define RAMEND __ramend
-#endif
-#else
-#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS
-#endif
-
- .global __start
- .global __command_line
- .global __platform_gpio_table
- .global __target_name
-
- .h8300s
-
- .section .text
- .file "crt0_ram.S"
-
- /* CPU Reset entry */
-__start:
- mov.l #RAMEND,sp
- ldc #0x80,ccr
- ldc #0x00,exr
-
- /* Peripheral Setup */
- bclr #4,@INTCR:8 /* interrupt mode 2 */
- bset #5,@INTCR:8
- bclr #0,@IER+1:16
- bset #1,@ISCRL+1:16 /* IRQ0 Positive Edge */
- bclr #0,@ISCRL+1:16
-
-#if defined(CONFIG_MTD_UCLINUX)
- /* move romfs image */
- jsr @__move_romfs
-#endif
-
- /* .bss clear */
- mov.l #__sbss,er5
- mov.l er5,er6
- mov.l #__ebss,er4
- sub.l er5,er4
- shlr #2,er4
- sub.l er0,er0
-1:
- mov.l er0,@er5
- adds #4,er5
- dec.l #1,er4
- bne 1b
-
- /* copy kernel commandline */
- mov.l #COMMAND_START,er5
- mov.l #_command_line,er6
- mov.w #512,r4
- eepmov.w
-
- /* uClinux kernel start */
- ldc #0x90,ccr /* running kernel */
- mov.l #_init_thread_union,sp
- add.l #0x2000,sp
- jsr @_start_kernel
-_exit:
-
- jmp _exit
-
- rts
-
- /* I/O port assign information */
-__platform_gpio_table:
- mov.l #gpio_table,er0
- rts
-
-gpio_table:
- ;; P1DDR
- ;; used,ddr
- .byte 0x00,0x00
- ;; P2DDR
- .byte 0x00,0x00
- ;; P3DDR
- .byte 0x3f,0x3a
- ;; dummy
- .byte 0x00,0x00
- ;; P5DDR
- .byte 0x00,0x00
- ;; P6DDR
- .byte 0x00,0x00
- ;; P7DDR
- .byte 0x00,0x00
- ;; P8DDR
- .byte 0x00,0x00
- ;; dummy
- .byte 0x00,0x00
- ;; PADDR
- .byte 0xff,0xff
- ;; PBDDR
- .byte 0xff,0x00
- ;; PCDDR
- .byte 0xff,0x00
- ;; PDDDR
- .byte 0xff,0x00
- ;; PEDDR
- .byte 0xff,0x00
- ;; PFDDR
- .byte 0xff,0xff
- ;; PGDDR
- .byte 0x0f,0x0f
- ;; PHDDR
- .byte 0x0f,0x0f
-
-__target_name:
- .asciz "EDOSK-2674"
-
- .section .bootvec,"ax"
- jmp @__start
diff --git a/arch/h8300/platform/h8s/edosk2674/crt0_rom.S b/arch/h8300/platform/h8s/edosk2674/crt0_rom.S
deleted file mode 100644
index 06d1d7f324ca..000000000000
--- a/arch/h8300/platform/h8s/edosk2674/crt0_rom.S
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * linux/arch/h8300/platform/h8s/edosk2674/crt0_rom.S
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * Platform depend startup
- * Target Archtecture: EDOSK-2674
- * Memory Layout : ROM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-#include <asm/regs267x.h>
-
- .global __start
- .global __command_line
- .global __platform_gpio_table
- .global __target_name
-
- .h8300s
- .section .text
- .file "crt0_rom.S"
-
- /* CPU Reset entry */
-__start:
- mov.l #__ramend,sp
- ldc #0x80,ccr
- ldc #0,exr
-
- /* Peripheral Setup */
-;BSC/GPIO setup
- mov.l #init_regs,er0
- mov.w #0xffff,e2
-1:
- mov.w @er0+,r2
- beq 2f
- mov.w @er0+,r1
- mov.b r1l,@er2
- bra 1b
-
-2:
-;SDRAM setup
-#define SDRAM_SMR 0x400040
-
- mov.b #0,r0l
- mov.b r0l,@DRACCR:16
- mov.w #0x188,r0
- mov.w r0,@REFCR:16
- mov.w #0x85b4,r0
- mov.w r0,@DRAMCR:16
- mov.b #0,r1l
- mov.b r1l,@SDRAM_SMR
- mov.w #0x84b4,r0
- mov.w r0,@DRAMCR:16
-;special thanks to Arizona Cooperative Power
-
- /* copy .data */
- mov.l #__begin_data,er5
- mov.l #__sdata,er6
- mov.l #__edata,er4
- sub.l er6,er4
- shlr.l #2,er4
-1:
- mov.l @er5+,er0
- mov.l er0,@er6
- adds #4,er6
- dec.l #1,er4
- bne 1b
-
- /* .bss clear */
- mov.l #__sbss,er5
- mov.l #__ebss,er4
- sub.l er5,er4
- shlr.l #2,er4
- sub.l er0,er0
-1:
- mov.l er0,@er5
- adds #4,er5
- dec.l #1,er4
- bne 1b
-
- /* copy kernel commandline */
- mov.l #COMMAND_START,er5
- mov.l #__command_line,er6
- mov.w #512,r4
- eepmov.w
-
- /* linux kernel start */
- ldc #0x90,ccr /* running kernel */
- mov.l #_init_thread_union,sp
- add.l #0x2000,sp
- jsr @_start_kernel
-_exit:
-
- jmp _exit
-
- rts
-
- /* I/O port assign information */
-__platform_gpio_table:
- mov.l #gpio_table,er0
- rts
-
-#define INIT_REGS_DATA(REGS,DATA) \
- .word ((REGS) & 0xffff),DATA
-
-init_regs:
-INIT_REGS_DATA(ASTCR,0xff)
-INIT_REGS_DATA(RDNCR,0x00)
-INIT_REGS_DATA(ABWCR,0x80)
-INIT_REGS_DATA(WTCRAH,0x27)
-INIT_REGS_DATA(WTCRAL,0x77)
-INIT_REGS_DATA(WTCRBH,0x71)
-INIT_REGS_DATA(WTCRBL,0x22)
-INIT_REGS_DATA(CSACRH,0x80)
-INIT_REGS_DATA(CSACRL,0x80)
-INIT_REGS_DATA(BROMCRH,0xa0)
-INIT_REGS_DATA(BROMCRL,0xa0)
-INIT_REGS_DATA(P3DDR,0x3a)
-INIT_REGS_DATA(P3ODR,0x06)
-INIT_REGS_DATA(PADDR,0xff)
-INIT_REGS_DATA(PFDDR,0xfe)
-INIT_REGS_DATA(PGDDR,0x0f)
-INIT_REGS_DATA(PHDDR,0x0f)
-INIT_REGS_DATA(PFCR0,0xff)
-INIT_REGS_DATA(PFCR2,0x0d)
-INIT_REGS_DATA(ITSR, 0x00)
-INIT_REGS_DATA(ITSR+1,0x3f)
-INIT_REGS_DATA(INTCR,0x20)
-
- .word 0
-
-gpio_table:
- ;; P1DDR
- .byte 0x00,0x00
- ;; P2DDR
- .byte 0x00,0x00
- ;; P3DDR
- .byte 0x00,0x00
- ;; dummy
- .byte 0x00,0x00
- ;; P5DDR
- .byte 0x00,0x00
- ;; P6DDR
- .byte 0x00,0x00
- ;; P7DDR
- .byte 0x00,0x00
- ;; P8DDR
- .byte 0x00,0x00
- ;; dummy
- .byte 0x00,0x00
- ;; PADDR
- .byte 0x00,0x00
- ;; PBDDR
- .byte 0x00,0x00
- ;; PCDDR
- .byte 0x00,0x00
- ;; PDDDR
- .byte 0x00,0x00
- ;; PEDDR
- .byte 0x00,0x00
- ;; PFDDR
- .byte 0x00,0x00
- ;; PGDDR
- .byte 0x00,0x00
- ;; PHDDR
- .byte 0x00,0x00
-
- .section .rodata
-__target_name:
- .asciz "EDOSK-2674"
-
- .section .bss
-__command_line:
- .space 512
-
- /* interrupt vector */
- .section .vectors,"ax"
- .long __start
- .long __start
-vector = 2
- .rept 126
- .long _interrupt_redirect_table+vector*4
-vector = vector + 1
- .endr
diff --git a/arch/h8300/platform/h8s/generic/Makefile b/arch/h8300/platform/h8s/generic/Makefile
deleted file mode 100644
index 44b4685c664c..000000000000
--- a/arch/h8300/platform/h8s/generic/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-extra-y = crt0_$(MODEL).o
diff --git a/arch/h8300/platform/h8s/generic/crt0_ram.S b/arch/h8300/platform/h8s/generic/crt0_ram.S
deleted file mode 100644
index 7018915de74f..000000000000
--- a/arch/h8300/platform/h8s/generic/crt0_ram.S
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * linux/arch/h8300/platform/h8s/edosk2674/crt0_ram.S
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * Platform depend startup
- * Target Archtecture: generic
- * Memory Layout : RAM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-#include <asm/regs267x.h>
-
-#if !defined(CONFIG_BLKDEV_RESERVE)
-#if defined(CONFIG_GDB_DEBUG)
-#define RAMEND (__ramend - 0xc000)
-#else
-#define RAMEND __ramend
-#endif
-#else
-#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS
-#endif
-
- .global __start
- .global __command_line
- .global __platform_gpio_table
- .global __target_name
-
- .h8300s
-
- .section .text
- .file "crt0_ram.S"
-
- /* CPU Reset entry */
-__start:
- mov.l #RAMEND,sp
- ldc #0x80,ccr
- ldc #0x00,exr
-
- /* Peripheral Setup */
- bclr #4,@INTCR:8 /* interrupt mode 2 */
- bset #5,@INTCR:8
-
-#if defined(CONFIG_MTD_UCLINUX)
- /* move romfs image */
- jsr @__move_romfs
-#endif
-
- /* .bss clear */
- mov.l #__sbss,er5
- mov.l er5,er6
- mov.l #__ebss,er4
- sub.l er5,er4
- shlr #2,er4
- sub.l er0,er0
-1:
- mov.l er0,@er5
- adds #4,er5
- dec.l #1,er4
- bne 1b
-
- /* copy kernel commandline */
- mov.l #COMMAND_START,er5
- mov.l #_command_line,er6
- mov.w #512,r4
- eepmov.w
-
- /* uClinux kernel start */
- ldc #0x90,ccr /* running kernel */
- mov.l #_init_thread_union,sp
- add.l #0x2000,sp
- jsr @_start_kernel
-_exit:
-
- jmp _exit
-
- rts
-
- /* I/O port assign information */
-__platform_gpio_table:
- mov.l #gpio_table,er0
- rts
-
-gpio_table:
- ;; P1DDR
- ;; used,ddr
- .byte 0x00,0x00
- ;; P2DDR
- .byte 0x00,0x00
- ;; P3DDR
- .byte 0x00,0x00
- ;; dummy
- .byte 0x00,0x00
- ;; P5DDR
- .byte 0x00,0x00
- ;; P6DDR
- .byte 0x00,0x00
- ;; P7DDR
- .byte 0x00,0x00
- ;; P8DDR
- .byte 0x00,0x00
- ;; dummy
- .byte 0x00,0x00
- ;; PADDR
- .byte 0x00,0x00
- ;; PBDDR
- .byte 0x00,0x00
- ;; PCDDR
- .byte 0x00,0x00
- ;; PDDDR
- .byte 0x00,0x00
- ;; PEDDR
- .byte 0x00,0x00
- ;; PFDDR
- .byte 0x00,0x00
- ;; PGDDR
- .byte 0x00,0x00
- ;; PHDDR
- .byte 0x00,0x00
-
-__target_name:
- .asciz "generic"
-
- .section .bootvec,"ax"
- jmp @__start
diff --git a/arch/h8300/platform/h8s/generic/crt0_rom.S b/arch/h8300/platform/h8s/generic/crt0_rom.S
deleted file mode 100644
index 623ba7828193..000000000000
--- a/arch/h8300/platform/h8s/generic/crt0_rom.S
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * linux/arch/h8300/platform/h8s/generic/crt0_rom.S
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * Platform depend startup
- * Target Archtecture: generic
- * Memory Layout : ROM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-#include <asm/regs267x.h>
-
- .global __start
- .global __command_line
- .global __platform_gpio_table
- .global __target_name
-
- .h8300s
- .section .text
- .file "crt0_rom.S"
-
- /* CPU Reset entry */
-__start:
- mov.l #__ramend,sp
- ldc #0x80,ccr
- ldc #0,exr
- bclr #4,@INTCR:8
- bset #5,@INTCR:8 /* Interrupt mode 2 */
-
- /* Peripheral Setup */
-
- /* copy .data */
-#if !defined(CONFIG_H8S_SIM)
- mov.l #__begin_data,er5
- mov.l #__sdata,er6
- mov.l #__edata,er4
- sub.l er6,er4
- shlr.l #2,er4
-1:
- mov.l @er5+,er0
- mov.l er0,@er6
- adds #4,er6
- dec.l #1,er4
- bne 1b
-#endif
-
- /* .bss clear */
- mov.l #__sbss,er5
- mov.l #__ebss,er4
- sub.l er5,er4
- shlr.l #2,er4
- sub.l er0,er0
-1:
- mov.l er0,@er5
- adds #4,er5
- dec.l #1,er4
- bne 1b
-
- /* linux kernel start */
- ldc #0x90,ccr /* running kernel */
- mov.l #_init_thread_union,sp
- add.l #0x2000,sp
- jsr @_start_kernel
-_exit:
-
- jmp _exit
-
- rts
-
- /* I/O port assign information */
-__platform_gpio_table:
- mov.l #gpio_table,er0
- rts
-
-gpio_table:
- ;; P1DDR
- .byte 0x00,0x00
- ;; P2DDR
- .byte 0x00,0x00
- ;; P3DDR
- .byte 0x00,0x00
- ;; P4DDR
- .byte 0x00,0x00
- ;; P5DDR
- .byte 0x00,0x00
- ;; P6DDR
- .byte 0x00,0x00
- ;; dummy
- .byte 0x00,0x00
- ;; P8DDR
- .byte 0x00,0x00
- ;; PADDR
- .byte 0x00,0x00
- ;; PBDDR
- .byte 0x00,0x00
- ;; PCDDR
- .byte 0x00,0x00
- ;; PDDDR
- .byte 0x00,0x00
- ;; PEDDR
- .byte 0x00,0x00
- ;; PFDDR
- .byte 0x00,0x00
- ;; PGDDR
- .byte 0x00,0x00
- ;; PHDDR
- .byte 0x00,0x00
-
- .section .rodata
-__target_name:
- .asciz "generic"
-
- .section .bss
-__command_line:
- .space 512
-
- /* interrupt vector */
- .section .vectors,"ax"
- .long __start
- .long __start
-vector = 2
- .rept 126-1
- .long _interrupt_redirect_table+vector*4
-vector = vector + 1
- .endr
diff --git a/arch/h8300/platform/h8s/irq.c b/arch/h8300/platform/h8s/irq.c
deleted file mode 100644
index f3a5511c16b1..000000000000
--- a/arch/h8300/platform/h8s/irq.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * linux/arch/h8300/platform/h8s/ints_h8s.c
- * Interrupt handling CPU variants
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- */
-
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-
-#include <asm/ptrace.h>
-#include <asm/traps.h>
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/gpio-internal.h>
-#include <asm/regs267x.h>
-
-/* saved vector list */
-const int __initconst h8300_saved_vectors[] = {
-#if defined(CONFIG_GDB_DEBUG)
- TRACE_VEC,
- TRAP3_VEC,
-#endif
- -1
-};
-
-/* trap entry table */
-const H8300_VECTOR __initconst h8300_trap_table[] = {
- 0,0,0,0,0,
- trace_break, /* TRACE */
- 0,0,
- system_call, /* TRAPA #0 */
- 0,0,0,0,0,0,0
-};
-
-/* IRQ pin assignment */
-struct irq_pins {
- unsigned char port_no;
- unsigned char bit_no;
-} __attribute__((aligned(1),packed));
-/* ISTR = 0 */
-static const struct irq_pins irq_assign_table0[16]={
- {H8300_GPIO_P5,H8300_GPIO_B0},{H8300_GPIO_P5,H8300_GPIO_B1},
- {H8300_GPIO_P5,H8300_GPIO_B2},{H8300_GPIO_P5,H8300_GPIO_B3},
- {H8300_GPIO_P5,H8300_GPIO_B4},{H8300_GPIO_P5,H8300_GPIO_B5},
- {H8300_GPIO_P5,H8300_GPIO_B6},{H8300_GPIO_P5,H8300_GPIO_B7},
- {H8300_GPIO_P6,H8300_GPIO_B0},{H8300_GPIO_P6,H8300_GPIO_B1},
- {H8300_GPIO_P6,H8300_GPIO_B2},{H8300_GPIO_P6,H8300_GPIO_B3},
- {H8300_GPIO_P6,H8300_GPIO_B4},{H8300_GPIO_P6,H8300_GPIO_B5},
- {H8300_GPIO_PF,H8300_GPIO_B1},{H8300_GPIO_PF,H8300_GPIO_B2},
-};
-/* ISTR = 1 */
-static const struct irq_pins irq_assign_table1[16]={
- {H8300_GPIO_P8,H8300_GPIO_B0},{H8300_GPIO_P8,H8300_GPIO_B1},
- {H8300_GPIO_P8,H8300_GPIO_B2},{H8300_GPIO_P8,H8300_GPIO_B3},
- {H8300_GPIO_P8,H8300_GPIO_B4},{H8300_GPIO_P8,H8300_GPIO_B5},
- {H8300_GPIO_PH,H8300_GPIO_B2},{H8300_GPIO_PH,H8300_GPIO_B3},
- {H8300_GPIO_P2,H8300_GPIO_B0},{H8300_GPIO_P2,H8300_GPIO_B1},
- {H8300_GPIO_P2,H8300_GPIO_B2},{H8300_GPIO_P2,H8300_GPIO_B3},
- {H8300_GPIO_P2,H8300_GPIO_B4},{H8300_GPIO_P2,H8300_GPIO_B5},
- {H8300_GPIO_P2,H8300_GPIO_B6},{H8300_GPIO_P2,H8300_GPIO_B7},
-};
-
-/* IRQ to GPIO pin translation */
-#define IRQ_GPIO_MAP(irqbit,irq,port,bit) \
-do { \
- if (*(volatile unsigned short *)ITSR & irqbit) { \
- port = irq_assign_table1[irq - EXT_IRQ0].port_no; \
- bit = irq_assign_table1[irq - EXT_IRQ0].bit_no; \
- } else { \
- port = irq_assign_table0[irq - EXT_IRQ0].port_no; \
- bit = irq_assign_table0[irq - EXT_IRQ0].bit_no; \
- } \
-} while(0)
-
-int h8300_enable_irq_pin(unsigned int irq)
-{
- if (irq >= EXT_IRQ0 && irq <= EXT_IRQ15) {
- unsigned short ptn = 1 << (irq - EXT_IRQ0);
- unsigned int port_no,bit_no;
- IRQ_GPIO_MAP(ptn, irq, port_no, bit_no);
- if (H8300_GPIO_RESERVE(port_no, bit_no) == 0)
- return -EBUSY; /* pin already use */
- H8300_GPIO_DDR(port_no, bit_no, H8300_GPIO_INPUT);
- *(volatile unsigned short *)ISR &= ~ptn; /* ISR clear */
- }
-
- return 0;
-}
-
-void h8300_disable_irq_pin(unsigned int irq)
-{
- if (irq >= EXT_IRQ0 && irq <= EXT_IRQ15) {
- /* disable interrupt & release IRQ pin */
- unsigned short ptn = 1 << (irq - EXT_IRQ0);
- unsigned short port_no,bit_no;
- *(volatile unsigned short *)ISR &= ~ptn;
- *(volatile unsigned short *)IER &= ~ptn;
- IRQ_GPIO_MAP(ptn, irq, port_no, bit_no);
- H8300_GPIO_FREE(port_no, bit_no);
- }
-}
diff --git a/arch/h8300/platform/h8s/ptrace_h8s.c b/arch/h8300/platform/h8s/ptrace_h8s.c
deleted file mode 100644
index c058ab1a8495..000000000000
--- a/arch/h8300/platform/h8s/ptrace_h8s.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * linux/arch/h8300/platform/h8s/ptrace_h8s.c
- * ptrace cpu depend helper functions
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file COPYING in the main directory of
- * this archive for more details.
- */
-
-#include <linux/linkage.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <asm/ptrace.h>
-
-#define CCR_MASK 0x6f
-#define EXR_TRACE 0x80
-
-/* Mapping from PT_xxx to the stack offset at which the register is
- saved. Notice that usp has no stack-slot and needs to be treated
- specially (see get_reg/put_reg below). */
-static const int h8300_register_offset[] = {
- PT_REG(er1), PT_REG(er2), PT_REG(er3), PT_REG(er4),
- PT_REG(er5), PT_REG(er6), PT_REG(er0), PT_REG(orig_er0),
- PT_REG(ccr), PT_REG(pc), 0, PT_REG(exr)
-};
-
-/* read register */
-long h8300_get_reg(struct task_struct *task, int regno)
-{
- switch (regno) {
- case PT_USP:
- return task->thread.usp + sizeof(long)*2 + 2;
- case PT_CCR:
- case PT_EXR:
- return *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]);
- default:
- return *(unsigned long *)(task->thread.esp0 + h8300_register_offset[regno]);
- }
-}
-
-/* write register */
-int h8300_put_reg(struct task_struct *task, int regno, unsigned long data)
-{
- unsigned short oldccr;
- switch (regno) {
- case PT_USP:
- task->thread.usp = data - sizeof(long)*2 - 2;
- case PT_CCR:
- oldccr = *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]);
- oldccr &= ~CCR_MASK;
- data &= CCR_MASK;
- data |= oldccr;
- *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]) = data;
- break;
- case PT_EXR:
- /* exr modify not support */
- return -EIO;
- default:
- *(unsigned long *)(task->thread.esp0 + h8300_register_offset[regno]) = data;
- break;
- }
- return 0;
-}
-
-/* disable singlestep */
-void user_disable_single_step(struct task_struct *child)
-{
- *(unsigned short *)(child->thread.esp0 + h8300_register_offset[PT_EXR]) &= ~EXR_TRACE;
-}
-
-/* enable singlestep */
-void user_enable_single_step(struct task_struct *child)
-{
- *(unsigned short *)(child->thread.esp0 + h8300_register_offset[PT_EXR]) |= EXR_TRACE;
-}
-
-asmlinkage void trace_trap(unsigned long bp)
-{
- (void)bp;
- force_sig(SIGTRAP,current);
-}
-
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 1da17caac23c..67c3450309b7 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -53,3 +53,4 @@ generic-y += types.h
generic-y += ucontext.h
generic-y += unaligned.h
generic-y += xor.h
+generic-y += preempt.h
diff --git a/arch/hexagon/kernel/setup.c b/arch/hexagon/kernel/setup.c
index 29d1f1b00016..0e7c1dbb37b2 100644
--- a/arch/hexagon/kernel/setup.c
+++ b/arch/hexagon/kernel/setup.c
@@ -32,9 +32,6 @@
#include <asm/hexagon_vm.h>
#include <asm/vm_mmu.h>
#include <asm/time.h>
-#ifdef CONFIG_OF
-#include <asm/prom.h>
-#endif
char cmd_line[COMMAND_LINE_SIZE];
static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 7740ab10a171..b10d61bc0f2a 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -6,6 +6,7 @@ menu "Processor type and features"
config IA64
bool
+ select ARCH_MIGHT_HAVE_PC_PARPORT
select PCI if (!IA64_HP_SIM)
select ACPI if (!IA64_HP_SIM)
select PM if (!IA64_HP_SIM)
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index a3456f34f672..f93ee087e8fe 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -3,4 +3,5 @@ generic-y += clkdev.h
generic-y += exec.h
generic-y += kvm_para.h
generic-y += trace_clock.h
+generic-y += preempt.h
generic-y += vtime.h \ No newline at end of file
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 74a7cc3293bc..0d2bcb37ec35 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -424,6 +424,7 @@ extern void __iomem * ioremap(unsigned long offset, unsigned long size);
extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
extern void iounmap (volatile void __iomem *addr);
extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size);
+#define early_memremap(phys_addr, size) early_ioremap(phys_addr, size)
extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size)
{
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 989dd3fe8de1..db95f570705f 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -234,10 +234,6 @@ struct kvm_vm_data {
#define KVM_REQ_PTC_G 32
#define KVM_REQ_RESUME 33
-#define KVM_HPAGE_GFN_SHIFT(x) 0
-#define KVM_NR_PAGE_SIZES 1
-#define KVM_PAGES_PER_HPAGE(x) 1
-
struct kvm;
struct kvm_vcpu;
@@ -480,7 +476,7 @@ struct kvm_arch {
struct list_head assigned_dev_head;
struct iommu_domain *iommu_domain;
- int iommu_flags;
+ bool iommu_noncoherent;
unsigned long irq_sources_bitmap;
unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
diff --git a/arch/ia64/include/asm/xen/page-coherent.h b/arch/ia64/include/asm/xen/page-coherent.h
new file mode 100644
index 000000000000..96e42f97fa1f
--- /dev/null
+++ b/arch/ia64/include/asm/xen/page-coherent.h
@@ -0,0 +1,38 @@
+#ifndef _ASM_IA64_XEN_PAGE_COHERENT_H
+#define _ASM_IA64_XEN_PAGE_COHERENT_H
+
+#include <asm/page.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-mapping.h>
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
+{
+ void *vstart = (void*)__get_free_pages(flags, get_order(size));
+ *dma_handle = virt_to_phys(vstart);
+ return vstart;
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ free_pages((unsigned long) cpu_addr, get_order(size));
+}
+
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs) { }
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs) { }
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
+
+#endif /* _ASM_IA64_XEN_PAGE_COHERENT_H */
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
index 556d0701a155..c25302fb48d9 100644
--- a/arch/ia64/include/uapi/asm/socket.h
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -85,4 +85,6 @@
#define SO_BUSY_POLL 46
+#define SO_MAX_PACING_RATE 47
+
#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 5eb71d22c3d5..59d52e3aef12 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -882,40 +882,10 @@ __init void prefill_possible_map(void)
set_cpu_possible(i, true);
}
-static int _acpi_map_lsapic(acpi_handle handle, int *pcpu)
+static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
{
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
- struct acpi_madt_local_sapic *lsapic;
cpumask_t tmp_map;
- int cpu, physid;
-
- if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
- return -EINVAL;
-
- if (!buffer.length || !buffer.pointer)
- return -EINVAL;
-
- obj = buffer.pointer;
- if (obj->type != ACPI_TYPE_BUFFER)
- {
- kfree(buffer.pointer);
- return -EINVAL;
- }
-
- lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer;
-
- if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) ||
- (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))) {
- kfree(buffer.pointer);
- return -EINVAL;
- }
-
- physid = ((lsapic->id << 8) | (lsapic->eid));
-
- kfree(buffer.pointer);
- buffer.length = ACPI_ALLOCATE_BUFFER;
- buffer.pointer = NULL;
+ int cpu;
cpumask_complement(&tmp_map, cpu_present_mask);
cpu = cpumask_first(&tmp_map);
@@ -934,9 +904,9 @@ static int _acpi_map_lsapic(acpi_handle handle, int *pcpu)
}
/* wrapper to silence section mismatch warning */
-int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu)
+int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
{
- return _acpi_map_lsapic(handle, pcpu);
+ return _acpi_map_lsapic(handle, physid, pcpu);
}
EXPORT_SYMBOL(acpi_map_lsapic);
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 51bce594eb83..da5b462e6de6 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -44,10 +44,15 @@
#define EFI_DEBUG 0
+static __initdata unsigned long palo_phys;
+
+static __initdata efi_config_table_type_t arch_tables[] = {
+ {PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID, "PALO", &palo_phys},
+ {NULL_GUID, NULL, 0},
+};
+
extern efi_status_t efi_call_phys (void *, ...);
-struct efi efi;
-EXPORT_SYMBOL(efi);
static efi_runtime_services_t *runtime;
static u64 mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
@@ -423,9 +428,9 @@ static u8 __init palo_checksum(u8 *buffer, u32 length)
* Parse and handle PALO table which is published at:
* http://www.dig64.org/home/DIG64_PALO_R1_0.pdf
*/
-static void __init handle_palo(unsigned long palo_phys)
+static void __init handle_palo(unsigned long phys_addr)
{
- struct palo_table *palo = __va(palo_phys);
+ struct palo_table *palo = __va(phys_addr);
u8 checksum;
if (strncmp(palo->signature, PALO_SIG, sizeof(PALO_SIG) - 1)) {
@@ -467,12 +472,10 @@ void __init
efi_init (void)
{
void *efi_map_start, *efi_map_end;
- efi_config_table_t *config_tables;
efi_char16_t *c16;
u64 efi_desc_size;
char *cp, vendor[100] = "unknown";
int i;
- unsigned long palo_phys;
/*
* It's too early to be able to use the standard kernel command line
@@ -514,8 +517,6 @@ efi_init (void)
efi.systab->hdr.revision >> 16,
efi.systab->hdr.revision & 0xffff);
- config_tables = __va(efi.systab->tables);
-
/* Show what we know for posterity */
c16 = __va(efi.systab->fw_vendor);
if (c16) {
@@ -528,43 +529,10 @@ efi_init (void)
efi.systab->hdr.revision >> 16,
efi.systab->hdr.revision & 0xffff, vendor);
- efi.mps = EFI_INVALID_TABLE_ADDR;
- efi.acpi = EFI_INVALID_TABLE_ADDR;
- efi.acpi20 = EFI_INVALID_TABLE_ADDR;
- efi.smbios = EFI_INVALID_TABLE_ADDR;
- efi.sal_systab = EFI_INVALID_TABLE_ADDR;
- efi.boot_info = EFI_INVALID_TABLE_ADDR;
- efi.hcdp = EFI_INVALID_TABLE_ADDR;
- efi.uga = EFI_INVALID_TABLE_ADDR;
-
palo_phys = EFI_INVALID_TABLE_ADDR;
- for (i = 0; i < (int) efi.systab->nr_tables; i++) {
- if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
- efi.mps = config_tables[i].table;
- printk(" MPS=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) {
- efi.acpi20 = config_tables[i].table;
- printk(" ACPI 2.0=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) {
- efi.acpi = config_tables[i].table;
- printk(" ACPI=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) {
- efi.smbios = config_tables[i].table;
- printk(" SMBIOS=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) {
- efi.sal_systab = config_tables[i].table;
- printk(" SALsystab=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {
- efi.hcdp = config_tables[i].table;
- printk(" HCDP=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid,
- PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID) == 0) {
- palo_phys = config_tables[i].table;
- printk(" PALO=0x%lx", config_tables[i].table);
- }
- }
- printk("\n");
+ if (efi_config_init(arch_tables) != 0)
+ return;
if (palo_phys != EFI_INVALID_TABLE_ADDR)
handle_palo(palo_phys);
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index f8280a766a78..074fde49c9e6 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -947,7 +947,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
case KPROBE_HIT_SSDONE:
/*
* We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accouting
+ * we can also use npre/npostfault count for accounting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(cur);
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 4fc2e9569bb2..d86669bcdfb2 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -1063,6 +1063,7 @@ check_bugs (void)
static int __init run_dmi_scan(void)
{
dmi_scan_machine();
+ dmi_memdev_walk();
dmi_set_dump_stack_arch_desc();
return 0;
}
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index bdfd8789b376..985bf80c622e 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1550,12 +1550,13 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
}
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+ unsigned long npages)
{
return 0;
}
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild
index bebdc36ebb0a..2b58c5f0bc38 100644
--- a/arch/m32r/include/asm/Kbuild
+++ b/arch/m32r/include/asm/Kbuild
@@ -3,3 +3,4 @@ generic-y += clkdev.h
generic-y += exec.h
generic-y += module.h
generic-y += trace_clock.h
+generic-y += preempt.h
diff --git a/arch/m32r/include/asm/mmu_context.h b/arch/m32r/include/asm/mmu_context.h
index a979a4198168..9fc78fc44445 100644
--- a/arch/m32r/include/asm/mmu_context.h
+++ b/arch/m32r/include/asm/mmu_context.h
@@ -45,7 +45,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
Flush all TLB and start new cycle. */
local_flush_tlb_all();
/* Fix version if needed.
- Note that we avoid version #0 to distingush NO_CONTEXT. */
+ Note that we avoid version #0 to distinguish NO_CONTEXT. */
if (!mc)
mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
}
diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h
index 24be7c8da86a..52966650114f 100644
--- a/arch/m32r/include/uapi/asm/socket.h
+++ b/arch/m32r/include/uapi/asm/socket.h
@@ -76,4 +76,6 @@
#define SO_BUSY_POLL 46
+#define SO_MAX_PACING_RATE 47
+
#endif /* _ASM_M32R_SOCKET_H */
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 311a300d48cc..75f25a8e3001 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -1,6 +1,7 @@
config M68K
bool
default y
+ select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
select HAVE_IDE
select HAVE_AOUT if MMU
select HAVE_DEBUG_BUGVERBOSE
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
index 0721858fbd1e..2d75ae246167 100644
--- a/arch/m68k/emu/nfblock.c
+++ b/arch/m68k/emu/nfblock.c
@@ -62,17 +62,18 @@ struct nfhd_device {
static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
{
struct nfhd_device *dev = queue->queuedata;
- struct bio_vec *bvec;
- int i, dir, len, shift;
- sector_t sec = bio->bi_sector;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+ int dir, len, shift;
+ sector_t sec = bio->bi_iter.bi_sector;
dir = bio_data_dir(bio);
shift = dev->bshift;
- bio_for_each_segment(bvec, bio, i) {
- len = bvec->bv_len;
+ bio_for_each_segment(bvec, bio, iter) {
+ len = bvec.bv_len;
len >>= 9;
nfhd_read_write(dev->id, 0, dir, sec >> shift, len >> shift,
- bvec_to_phys(bvec));
+ bvec_to_phys(&bvec));
sec += len;
}
bio_endio(bio, 0);
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 09d77a862da3..a5d27f272a59 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -31,3 +31,4 @@ generic-y += trace_clock.h
generic-y += types.h
generic-y += word-at-a-time.h
generic-y += xor.h
+generic-y += preempt.h
diff --git a/arch/m68k/include/asm/floppy.h b/arch/m68k/include/asm/floppy.h
index 697d50393dd0..47365b1ccbec 100644
--- a/arch/m68k/include/asm/floppy.h
+++ b/arch/m68k/include/asm/floppy.h
@@ -85,7 +85,7 @@ static int fd_request_irq(void)
{
if(MACH_IS_Q40)
return request_irq(FLOPPY_IRQ, floppy_hardint,
- IRQF_DISABLED, "floppy", floppy_hardint);
+ 0, "floppy", floppy_hardint);
else if(MACH_IS_SUN3X)
return sun3xflop_request_irq();
return -ENXIO;
diff --git a/arch/m68k/include/asm/sun3xflop.h b/arch/m68k/include/asm/sun3xflop.h
index 95231e2f9d64..a02ea3a7bb20 100644
--- a/arch/m68k/include/asm/sun3xflop.h
+++ b/arch/m68k/include/asm/sun3xflop.h
@@ -207,7 +207,7 @@ static int sun3xflop_request_irq(void)
if(!once) {
once = 1;
error = request_irq(FLOPPY_IRQ, sun3xflop_hardint,
- IRQF_DISABLED, "floppy", NULL);
+ 0, "floppy", NULL);
return ((error == 0) ? 0 : -1);
} else return 0;
}
diff --git a/arch/m68k/include/asm/uaccess.h b/arch/m68k/include/asm/uaccess.h
index 639c731568b0..3fadc4a93d97 100644
--- a/arch/m68k/include/asm/uaccess.h
+++ b/arch/m68k/include/asm/uaccess.h
@@ -3,3 +3,10 @@
#else
#include <asm/uaccess_mm.h>
#endif
+
+#ifdef CONFIG_CPU_HAS_NO_UNALIGNED
+#include <asm-generic/uaccess-unaligned.h>
+#else
+#define __get_user_unaligned(x, ptr) __get_user((x), (ptr))
+#define __put_user_unaligned(x, ptr) __put_user((x), (ptr))
+#endif
diff --git a/arch/m68k/platform/68000/timers.c b/arch/m68k/platform/68000/timers.c
index ec30acbfe6db..99a98698bc95 100644
--- a/arch/m68k/platform/68000/timers.c
+++ b/arch/m68k/platform/68000/timers.c
@@ -70,7 +70,7 @@ static irqreturn_t hw_tick(int irq, void *dummy)
static struct irqaction m68328_timer_irq = {
.name = "timer",
- .flags = IRQF_DISABLED | IRQF_TIMER,
+ .flags = IRQF_TIMER,
.handler = hw_tick,
};
diff --git a/arch/m68k/platform/68360/config.c b/arch/m68k/platform/68360/config.c
index 0570741e5500..d493ac43fe3f 100644
--- a/arch/m68k/platform/68360/config.c
+++ b/arch/m68k/platform/68360/config.c
@@ -59,7 +59,7 @@ static irqreturn_t hw_tick(int irq, void *dummy)
static struct irqaction m68360_timer_irq = {
.name = "timer",
- .flags = IRQF_DISABLED | IRQF_TIMER,
+ .flags = IRQF_TIMER,
.handler = hw_tick,
};
diff --git a/arch/m68k/platform/coldfire/pit.c b/arch/m68k/platform/coldfire/pit.c
index e8f3b97b0f77..493b3111d4c1 100644
--- a/arch/m68k/platform/coldfire/pit.c
+++ b/arch/m68k/platform/coldfire/pit.c
@@ -118,7 +118,7 @@ static irqreturn_t pit_tick(int irq, void *dummy)
static struct irqaction pit_irq = {
.name = "timer",
- .flags = IRQF_DISABLED | IRQF_TIMER,
+ .flags = IRQF_TIMER,
.handler = pit_tick,
};
diff --git a/arch/m68k/platform/coldfire/sltimers.c b/arch/m68k/platform/coldfire/sltimers.c
index bb5a25ada848..831a08cf6f40 100644
--- a/arch/m68k/platform/coldfire/sltimers.c
+++ b/arch/m68k/platform/coldfire/sltimers.c
@@ -51,7 +51,7 @@ irqreturn_t mcfslt_profile_tick(int irq, void *dummy)
static struct irqaction mcfslt_profile_irq = {
.name = "profile timer",
- .flags = IRQF_DISABLED | IRQF_TIMER,
+ .flags = IRQF_TIMER,
.handler = mcfslt_profile_tick,
};
@@ -93,7 +93,7 @@ static irqreturn_t mcfslt_tick(int irq, void *dummy)
static struct irqaction mcfslt_timer_irq = {
.name = "timer",
- .flags = IRQF_DISABLED | IRQF_TIMER,
+ .flags = IRQF_TIMER,
.handler = mcfslt_tick,
};
diff --git a/arch/m68k/platform/coldfire/timers.c b/arch/m68k/platform/coldfire/timers.c
index d06068e45764..cd496a20fcc7 100644
--- a/arch/m68k/platform/coldfire/timers.c
+++ b/arch/m68k/platform/coldfire/timers.c
@@ -83,7 +83,7 @@ static irqreturn_t mcftmr_tick(int irq, void *dummy)
static struct irqaction mcftmr_timer_irq = {
.name = "timer",
- .flags = IRQF_DISABLED | IRQF_TIMER,
+ .flags = IRQF_TIMER,
.handler = mcftmr_tick,
};
@@ -171,7 +171,7 @@ irqreturn_t coldfire_profile_tick(int irq, void *dummy)
static struct irqaction coldfire_profile_irq = {
.name = "profile timer",
- .flags = IRQF_DISABLED | IRQF_TIMER,
+ .flags = IRQF_TIMER,
.handler = coldfire_profile_tick,
};
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild
index 6ae0ccb632cb..84d0c1d6b9b3 100644
--- a/arch/metag/include/asm/Kbuild
+++ b/arch/metag/include/asm/Kbuild
@@ -52,3 +52,4 @@ generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h
generic-y += xor.h
+generic-y += preempt.h
diff --git a/arch/metag/include/asm/mach/arch.h b/arch/metag/include/asm/mach/arch.h
index 12c5664fea6e..433f94624fa2 100644
--- a/arch/metag/include/asm/mach/arch.h
+++ b/arch/metag/include/asm/mach/arch.h
@@ -53,7 +53,7 @@ struct machine_desc {
/*
* Current machine - only accessible during boot.
*/
-extern struct machine_desc *machine_desc;
+extern const struct machine_desc *machine_desc;
/*
* Machine type table - also only accessible during boot
diff --git a/arch/metag/include/asm/prom.h b/arch/metag/include/asm/prom.h
deleted file mode 100644
index d2aa35d2228e..000000000000
--- a/arch/metag/include/asm/prom.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * arch/metag/include/asm/prom.h
- *
- * Copyright (C) 2012 Imagination Technologies Ltd.
- *
- * Based on ARM version:
- * Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-#ifndef __ASM_METAG_PROM_H
-#define __ASM_METAG_PROM_H
-
-#include <asm/setup.h>
-#define HAVE_ARCH_DEVTREE_FIXUPS
-
-extern struct machine_desc *setup_machine_fdt(void *dt);
-extern void copy_fdt(void);
-
-#endif /* __ASM_METAG_PROM_H */
diff --git a/arch/metag/include/asm/setup.h b/arch/metag/include/asm/setup.h
index e13083b15dd0..e9fdee9452b1 100644
--- a/arch/metag/include/asm/setup.h
+++ b/arch/metag/include/asm/setup.h
@@ -3,6 +3,7 @@
#include <uapi/asm/setup.h>
+extern const struct machine_desc *setup_machine_fdt(void *dt);
void per_cpu_trap_init(unsigned long);
extern void __init dump_machine_table(void);
#endif /* _ASM_METAG_SETUP_H */
diff --git a/arch/metag/include/asm/topology.h b/arch/metag/include/asm/topology.h
index 23f5118f58db..8e9c0b3b9691 100644
--- a/arch/metag/include/asm/topology.h
+++ b/arch/metag/include/asm/topology.h
@@ -26,6 +26,8 @@
.last_balance = jiffies, \
.balance_interval = 1, \
.nr_balance_failed = 0, \
+ .max_newidle_lb_cost = 0, \
+ .next_decay_max_lb_cost = jiffies, \
}
#define cpu_to_node(cpu) ((void)(cpu), 0)
diff --git a/arch/metag/kernel/devtree.c b/arch/metag/kernel/devtree.c
index 7cd02529636e..18dd7aea9fdc 100644
--- a/arch/metag/kernel/devtree.c
+++ b/arch/metag/kernel/devtree.c
@@ -34,6 +34,19 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
return alloc_bootmem_align(size, align);
}
+static const void * __init arch_get_next_mach(const char *const **match)
+{
+ static const struct machine_desc *mdesc = __arch_info_begin;
+ const struct machine_desc *m = mdesc;
+
+ if (m >= __arch_info_end)
+ return NULL;
+
+ mdesc++;
+ *match = m->dt_compat;
+ return m;
+}
+
/**
* setup_machine_fdt - Machine setup when an dtb was passed to the kernel
* @dt: virtual address pointer to dt blob
@@ -41,74 +54,18 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
* If a dtb was passed to the kernel, then use it to choose the correct
* machine_desc and to setup the system.
*/
-struct machine_desc * __init setup_machine_fdt(void *dt)
+const struct machine_desc * __init setup_machine_fdt(void *dt)
{
- struct boot_param_header *devtree = dt;
- struct machine_desc *mdesc, *mdesc_best = NULL;
- unsigned int score, mdesc_score = ~1;
- unsigned long dt_root;
- const char *model;
+ const struct machine_desc *mdesc;
/* check device tree validity */
- if (be32_to_cpu(devtree->magic) != OF_DT_HEADER)
+ if (!early_init_dt_scan(dt))
return NULL;
- /* Search the mdescs for the 'best' compatible value match */
- initial_boot_params = devtree;
- dt_root = of_get_flat_dt_root();
-
- for_each_machine_desc(mdesc) {
- score = of_flat_dt_match(dt_root, mdesc->dt_compat);
- if (score > 0 && score < mdesc_score) {
- mdesc_best = mdesc;
- mdesc_score = score;
- }
- }
- if (!mdesc_best) {
- const char *prop;
- long size;
-
- pr_err("\nError: unrecognized/unsupported device tree compatible list:\n[ ");
-
- prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
- if (prop) {
- while (size > 0) {
- printk("'%s' ", prop);
- size -= strlen(prop) + 1;
- prop += strlen(prop) + 1;
- }
- }
- printk("]\n\n");
-
+ mdesc = of_flat_dt_match_machine(NULL, arch_get_next_mach);
+ if (!mdesc)
dump_machine_table(); /* does not return */
- }
-
- model = of_get_flat_dt_prop(dt_root, "model", NULL);
- if (!model)
- model = of_get_flat_dt_prop(dt_root, "compatible", NULL);
- if (!model)
- model = "<unknown>";
- pr_info("Machine: %s, model: %s\n", mdesc_best->name, model);
-
- /* Retrieve various information from the /chosen node */
- of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
-
- return mdesc_best;
-}
+ pr_info("Machine name: %s\n", mdesc->name);
-/**
- * copy_fdt - Copy device tree into non-init memory.
- *
- * We must copy the flattened device tree blob into non-init memory because the
- * unflattened device tree will reference the strings in it directly.
- */
-void __init copy_fdt(void)
-{
- void *alloc = early_init_dt_alloc_memory_arch(
- be32_to_cpu(initial_boot_params->totalsize), 0x40);
- if (alloc) {
- memcpy(alloc, initial_boot_params,
- be32_to_cpu(initial_boot_params->totalsize));
- initial_boot_params = alloc;
- }
+ return mdesc;
}
diff --git a/arch/metag/kernel/irq.c b/arch/metag/kernel/irq.c
index 2a2c9d55187e..3b4b7f6c0950 100644
--- a/arch/metag/kernel/irq.c
+++ b/arch/metag/kernel/irq.c
@@ -159,44 +159,30 @@ void irq_ctx_exit(int cpu)
extern asmlinkage void __do_softirq(void);
-asmlinkage void do_softirq(void)
+void do_softirq_own_stack(void)
{
- unsigned long flags;
struct thread_info *curctx;
union irq_ctx *irqctx;
u32 *isp;
- if (in_interrupt())
- return;
-
- local_irq_save(flags);
-
- if (local_softirq_pending()) {
- curctx = current_thread_info();
- irqctx = softirq_ctx[smp_processor_id()];
- irqctx->tinfo.task = curctx->task;
-
- /* build the stack frame on the softirq stack */
- isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info));
-
- asm volatile (
- "MOV D0.5,%0\n"
- "SWAP A0StP,D0.5\n"
- "CALLR D1RtP,___do_softirq\n"
- "MOV A0StP,D0.5\n"
- :
- : "r" (isp)
- : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
- "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
- "D0.5"
- );
- /*
- * Shouldn't happen, we returned above if in_interrupt():
- */
- WARN_ON_ONCE(softirq_count());
- }
-
- local_irq_restore(flags);
+ curctx = current_thread_info();
+ irqctx = softirq_ctx[smp_processor_id()];
+ irqctx->tinfo.task = curctx->task;
+
+ /* build the stack frame on the softirq stack */
+ isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info));
+
+ asm volatile (
+ "MOV D0.5,%0\n"
+ "SWAP A0StP,D0.5\n"
+ "CALLR D1RtP,___do_softirq\n"
+ "MOV A0StP,D0.5\n"
+ :
+ : "r" (isp)
+ : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
+ "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
+ "D0.5"
+ );
}
#endif
diff --git a/arch/metag/kernel/setup.c b/arch/metag/kernel/setup.c
index c396cd0b425f..8c4b3976bebf 100644
--- a/arch/metag/kernel/setup.c
+++ b/arch/metag/kernel/setup.c
@@ -42,7 +42,6 @@
#include <asm/mmu.h>
#include <asm/mmzone.h>
#include <asm/processor.h>
-#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/traps.h>
@@ -115,7 +114,7 @@ extern u32 __dtb_start[];
extern struct console dash_console;
#endif
-struct machine_desc *machine_desc __initdata;
+const struct machine_desc *machine_desc __initdata;
/*
* Map a Linux CPU number to a hardware thread ID
@@ -408,9 +407,7 @@ void __init setup_arch(char **cmdline_p)
cpu_2_hwthread_id[smp_processor_id()] = hard_processor_id();
hwthread_id_2_cpu[hard_processor_id()] = smp_processor_id();
- /* Copy device tree blob into non-init memory before unflattening */
- copy_fdt();
- unflatten_device_tree();
+ unflatten_and_copy_device_tree();
#ifdef CONFIG_SMP
smp_init_cpus();
diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c
index 123919534b80..249fff66add3 100644
--- a/arch/metag/mm/init.c
+++ b/arch/metag/mm/init.c
@@ -12,7 +12,6 @@
#include <linux/percpu.h>
#include <linux/memblock.h>
#include <linux/initrd.h>
-#include <linux/of_fdt.h>
#include <asm/setup.h>
#include <asm/page.h>
@@ -405,11 +404,3 @@ void free_initrd_mem(unsigned long start, unsigned long end)
"initrd");
}
#endif
-
-#ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
-{
- pr_err("%s(%llx, %llx)\n",
- __func__, start, end);
-}
-#endif /* CONFIG_OF_FLATTREE */
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index b82f82b74319..8370114e78aa 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -1,5 +1,6 @@
config MICROBLAZE
def_bool y
+ select ARCH_MIGHT_HAVE_PC_PARPORT
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_FUNCTION_TRACER
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index d3c51a6a601d..ce0bbf8f5640 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -3,3 +3,4 @@ generic-y += clkdev.h
generic-y += exec.h
generic-y += trace_clock.h
generic-y += syscalls.h
+generic-y += preempt.h
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
index d52abb6812fa..935f9bec414a 100644
--- a/arch/microblaze/include/asm/pci.h
+++ b/arch/microblaze/include/asm/pci.h
@@ -127,8 +127,6 @@ extern void of_scan_pci_bridge(struct device_node *node,
extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
-extern int pci_read_irq_line(struct pci_dev *dev);
-
extern int pci_bus_find_capability(struct pci_bus *bus,
unsigned int devfn, int cap);
diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h
index 9977816c5ad3..2f03ac815851 100644
--- a/arch/microblaze/include/asm/prom.h
+++ b/arch/microblaze/include/asm/prom.h
@@ -11,19 +11,10 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
-
-#include <linux/of.h> /* linux/of.h gets to determine #include ordering */
-
#ifndef _ASM_MICROBLAZE_PROM_H
#define _ASM_MICROBLAZE_PROM_H
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-
-#include <linux/types.h>
-#include <asm/irq.h>
-#include <linux/atomic.h>
-#define HAVE_ARCH_DEVTREE_FIXUPS
+#include <linux/of.h>
/* Other Prototypes */
enum early_consoles {
@@ -33,32 +24,4 @@ enum early_consoles {
extern int of_early_console(void *version);
-/*
- * OF address retreival & translation
- */
-
-#ifdef CONFIG_PCI
-extern unsigned long pci_address_to_pio(phys_addr_t address);
-#define pci_address_to_pio pci_address_to_pio
-#endif /* CONFIG_PCI */
-
-/* Parse the ibm,dma-window property of an OF node into the busno, phys and
- * size parameters.
- */
-void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
- unsigned long *busno, unsigned long *phys, unsigned long *size);
-
-extern void kdump_move_device_tree(void);
-
-#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
-/* These includes are put at the bottom because they may contain things
- * that are overridden by this file. Ideally they shouldn't be included
- * by this file, but there are a bunch of .c files that currently depend
- * on it. Eventually they will be cleaned up. */
-#include <linux/of_fdt.h>
-#include <linux/of_irq.h>
-#include <linux/platform_device.h>
-
#endif /* _ASM_MICROBLAZE_PROM_H */
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index 0c4453f134cb..abdfb10e7eca 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -30,6 +30,7 @@
#include <linux/debugfs.h>
#include <linux/irq.h>
#include <linux/memblock.h>
+#include <linux/of_fdt.h>
#include <asm/prom.h>
#include <asm/page.h>
@@ -41,11 +42,6 @@
#include <asm/sections.h>
#include <asm/pci-bridge.h>
-void __init early_init_dt_add_memory_arch(u64 base, u64 size)
-{
- memblock_add(base, size);
-}
-
#ifdef CONFIG_EARLY_PRINTK
static char *stdout;
@@ -106,21 +102,10 @@ void __init early_init_devtree(void *params)
{
pr_debug(" -> early_init_devtree(%p)\n", params);
- /* Setup flat device-tree pointer */
- initial_boot_params = params;
-
- /* Retrieve various informations from the /chosen node of the
- * device-tree, including the platform type, initrd location and
- * size, TCE reserve, and more ...
- */
- of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line);
-
- /* Scan memory nodes and rebuild MEMBLOCKs */
- of_scan_flat_dt(early_init_dt_scan_root, NULL);
- of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+ early_init_dt_scan(params);
+ if (!strlen(boot_command_line))
+ strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
- /* Save command line for /proc/cmdline and then parse parameters */
- strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
parse_early_param();
memblock_allow_resize();
@@ -130,15 +115,6 @@ void __init early_init_devtree(void *params)
pr_debug(" <- early_init_devtree()\n");
}
-#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
-{
- initrd_start = (unsigned long)__va(start);
- initrd_end = (unsigned long)__va(end);
- initrd_below_start_ok = 1;
-}
-#endif
-
/*******
*
* New implementation of the OF "find" APIs, return a refcounted
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 0775e036c526..8de8ebc309f1 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -16,6 +16,7 @@
#include <linux/initrd.h>
#include <linux/console.h>
#include <linux/debugfs.h>
+#include <linux/of_fdt.h>
#include <asm/setup.h>
#include <asm/sections.h>
@@ -50,7 +51,7 @@ char cmd_line[COMMAND_LINE_SIZE] __attribute__ ((section(".data")));
void __init setup_arch(char **cmdline_p)
{
- *cmdline_p = cmd_line;
+ *cmdline_p = boot_command_line;
console_verbose();
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index e4b3f33ef34c..827df4d003c8 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/cpuinfo.h>
#include <linux/cnt32_to_63.h>
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 1b93bf0892a0..66804adcacf0 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/export.h>
@@ -193,76 +194,6 @@ void pcibios_set_master(struct pci_dev *dev)
}
/*
- * Reads the interrupt pin to determine if interrupt is use by card.
- * If the interrupt is used, then gets the interrupt line from the
- * openfirmware and sets it in the pci_dev and pci_config line.
- */
-int pci_read_irq_line(struct pci_dev *pci_dev)
-{
- struct of_irq oirq;
- unsigned int virq;
-
- /* The current device-tree that iSeries generates from the HV
- * PCI informations doesn't contain proper interrupt routing,
- * and all the fallback would do is print out crap, so we
- * don't attempt to resolve the interrupts here at all, some
- * iSeries specific fixup does it.
- *
- * In the long run, we will hopefully fix the generated device-tree
- * instead.
- */
- pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
-
-#ifdef DEBUG
- memset(&oirq, 0xff, sizeof(oirq));
-#endif
- /* Try to get a mapping from the device-tree */
- if (of_irq_map_pci(pci_dev, &oirq)) {
- u8 line, pin;
-
- /* If that fails, lets fallback to what is in the config
- * space and map that through the default controller. We
- * also set the type to level low since that's what PCI
- * interrupts are. If your platform does differently, then
- * either provide a proper interrupt tree or don't use this
- * function.
- */
- if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
- return -1;
- if (pin == 0)
- return -1;
- if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
- line == 0xff || line == 0) {
- return -1;
- }
- pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
- line, pin);
-
- virq = irq_create_mapping(NULL, line);
- if (virq)
- irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
- } else {
- pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
- oirq.size, oirq.specifier[0], oirq.specifier[1],
- of_node_full_name(oirq.controller));
-
- virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
- oirq.size);
- }
- if (!virq) {
- pr_debug(" Failed to map !\n");
- return -1;
- }
-
- pr_debug(" Mapped to linux irq %d\n", virq);
-
- pci_dev->irq = virq;
-
- return 0;
-}
-EXPORT_SYMBOL(pci_read_irq_line);
-
-/*
* Platform support for /proc/bus/pci/X/Y mmap()s,
* modelled on the sparc64 implementation by Dave Miller.
* -- paulus.
@@ -960,7 +891,7 @@ void pcibios_setup_bus_devices(struct pci_bus *bus)
dev->dev.archdata.dma_data = (void *)PCI_DRAM_OFFSET;
/* Read default IRQs and fixup if necessary */
- pci_read_irq_line(dev);
+ dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
}
}
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms
index d9d81c219253..6e239123d6fe 100644
--- a/arch/mips/Kbuild.platforms
+++ b/arch/mips/Kbuild.platforms
@@ -20,7 +20,6 @@ platforms += mti-sead3
platforms += netlogic
platforms += pmcs-msp71xx
platforms += pnx833x
-platforms += powertv
platforms += ralink
platforms += rb532
platforms += sgi-ip22
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index f75ab4a2f246..04957828d1b2 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1,6 +1,7 @@
config MIPS
bool
default y
+ select ARCH_MIGHT_HAVE_PC_PARPORT
select HAVE_CONTEXT_TRACKING
select HAVE_GENERIC_DMA_COHERENT
select HAVE_IDE
@@ -8,6 +9,7 @@ config MIPS
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
select HAVE_ARCH_KGDB
+ select HAVE_ARCH_TRACEHOOK
select ARCH_HAVE_CUSTOM_GPIO_H
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
@@ -18,6 +20,7 @@ config MIPS
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_DEBUG_KMEMLEAK
+ select HAVE_SYSCALL_TRACEPOINTS
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
select RTC_LIB if !MACH_LOONGSON
@@ -146,6 +149,7 @@ config MIPS_COBALT
select CSRC_R4K
select CEVT_GT641XX
select DMA_NONCOHERENT
+ select EARLY_PRINTK_8250 if EARLY_PRINTK
select HW_HAS_PCI
select I8253
select I8259
@@ -412,23 +416,6 @@ config PMC_MSP
of integrated peripherals, interfaces and DSPs in addition to
a variety of MIPS cores.
-config POWERTV
- bool "Cisco PowerTV"
- select BOOT_ELF32
- select CEVT_R4K
- select CPU_MIPSR2_IRQ_VI
- select CPU_MIPSR2_IRQ_EI
- select CSRC_POWERTV
- select DMA_NONCOHERENT
- select HW_HAS_PCI
- select SYS_HAS_CPU_MIPS32_R2
- select SYS_SUPPORTS_32BIT_KERNEL
- select SYS_SUPPORTS_BIG_ENDIAN
- select SYS_SUPPORTS_HIGHMEM
- select USB_OHCI_LITTLE_ENDIAN
- help
- This enables support for the Cisco PowerTV Platform.
-
config RALINK
bool "Ralink based machines"
select CEVT_R4K
@@ -811,7 +798,6 @@ source "arch/mips/jz4740/Kconfig"
source "arch/mips/lantiq/Kconfig"
source "arch/mips/lasat/Kconfig"
source "arch/mips/pmcs-msp71xx/Kconfig"
-source "arch/mips/powertv/Kconfig"
source "arch/mips/ralink/Kconfig"
source "arch/mips/sgi-ip27/Kconfig"
source "arch/mips/sibyte/Kconfig"
@@ -890,9 +876,6 @@ config CSRC_BCM1480
config CSRC_IOASIC
bool
-config CSRC_POWERTV
- bool
-
config CSRC_R4K
bool
@@ -1489,8 +1472,10 @@ config SYS_SUPPORTS_ZBOOT
bool
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
+ select HAVE_KERNEL_LZ4
select HAVE_KERNEL_LZMA
select HAVE_KERNEL_LZO
+ select HAVE_KERNEL_XZ
config SYS_SUPPORTS_ZBOOT_UART16550
bool
@@ -1977,6 +1962,7 @@ config MIPS_VPE_APSP_API
config MIPS_CMP
bool "MIPS CMP framework support"
depends on SYS_SUPPORTS_MIPS_CMP
+ select SMP
select SYNC_R4K
select SYS_SUPPORTS_SMP
select SYS_SUPPORTS_SCHED_SMT if SMP
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 37871f0de15e..b147e7038ff0 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -20,6 +20,14 @@ config EARLY_PRINTK
doesn't cooperate with an X server. You should normally say N here,
unless you want to debug such a crash.
+config EARLY_PRINTK_8250
+ bool "8250/16550 and compatible serial early printk driver"
+ depends on EARLY_PRINTK
+ default n
+ help
+ If you say Y here, it will be possible to use a 8250/16550 serial
+ port as the boot console.
+
config CMDLINE_BOOL
bool "Built-in kernel command line"
default n
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index ca8f8340d75f..de300b993607 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -285,15 +285,19 @@ endif
# Other need ECOFF, so we build a 32-bit ELF binary for them which we then
# convert to ECOFF using elf2ecoff.
#
+quiet_cmd_32 = OBJCOPY $@
+ cmd_32 = $(OBJCOPY) -O $(32bit-bfd) $(OBJCOPYFLAGS) $< $@
vmlinux.32: vmlinux
- $(OBJCOPY) -O $(32bit-bfd) $(OBJCOPYFLAGS) $< $@
+ $(call cmd,32)
#
# The 64-bit ELF tools are pretty broken so at this time we generate 64-bit
# ELF files from 32-bit files by conversion.
#
+quiet_cmd_64 = OBJCOPY $@
+ cmd_64 = $(OBJCOPY) -O $(64bit-bfd) $(OBJCOPYFLAGS) $< $@
vmlinux.64: vmlinux
- $(OBJCOPY) -O $(64bit-bfd) $(OBJCOPYFLAGS) $< $@
+ $(call cmd,64)
all: $(all-y)
@@ -302,10 +306,16 @@ $(boot-y): $(vmlinux-32) FORCE
$(Q)$(MAKE) $(build)=arch/mips/boot VMLINUX=$(vmlinux-32) \
$(bootvars-y) arch/mips/boot/$@
+ifdef CONFIG_SYS_SUPPORTS_ZBOOT
# boot/compressed
$(bootz-y): $(vmlinux-32) FORCE
$(Q)$(MAKE) $(build)=arch/mips/boot/compressed \
$(bootvars-y) 32bit-bfd=$(32bit-bfd) $@
+else
+vmlinuz: FORCE
+ @echo ' CONFIG_SYS_SUPPORTS_ZBOOT is not enabled'
+ /bin/false
+endif
CLEAN_FILES += vmlinux.32 vmlinux.64
diff --git a/arch/mips/alchemy/devboards/db1235.c b/arch/mips/alchemy/devboards/db1235.c
index c76a90f78664..bac19dc43d1d 100644
--- a/arch/mips/alchemy/devboards/db1235.c
+++ b/arch/mips/alchemy/devboards/db1235.c
@@ -59,7 +59,7 @@ void __init board_setup(void)
ret = -ENODEV;
}
if (ret)
- panic("cannot initialize board support\n");
+ panic("cannot initialize board support");
}
int __init db1235_arch_init(void)
diff --git a/arch/mips/ath79/dev-common.c b/arch/mips/ath79/dev-common.c
index c3b04c929f29..516225d207ee 100644
--- a/arch/mips/ath79/dev-common.c
+++ b/arch/mips/ath79/dev-common.c
@@ -20,7 +20,6 @@
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
-#include <asm/mach-ath79/ar933x_uart_platform.h>
#include "common.h"
#include "dev-common.h"
@@ -68,15 +67,11 @@ static struct resource ar933x_uart_resources[] = {
},
};
-static struct ar933x_uart_platform_data ar933x_uart_data;
static struct platform_device ar933x_uart_device = {
.name = "ar933x-uart",
.id = -1,
.resource = ar933x_uart_resources,
.num_resources = ARRAY_SIZE(ar933x_uart_resources),
- .dev = {
- .platform_data = &ar933x_uart_data,
- },
};
void __init ath79_register_uart(void)
@@ -93,7 +88,6 @@ void __init ath79_register_uart(void)
ath79_uart_data[0].uartclk = uart_clk_rate;
platform_device_register(&ath79_uart_device);
} else if (soc_is_ar933x()) {
- ar933x_uart_data.uartclk = uart_clk_rate;
platform_device_register(&ar933x_uart_device);
} else {
BUG();
diff --git a/arch/mips/bcm47xx/Makefile b/arch/mips/bcm47xx/Makefile
index f3bf6d5bfb9d..c52daf9b05c6 100644
--- a/arch/mips/bcm47xx/Makefile
+++ b/arch/mips/bcm47xx/Makefile
@@ -4,4 +4,5 @@
#
obj-y += irq.o nvram.o prom.o serial.o setup.o time.o sprom.o
+obj-y += board.o
obj-$(CONFIG_BCM47XX_SSB) += wgt634u.o
diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c
new file mode 100644
index 000000000000..f3f6bfe68a2a
--- /dev/null
+++ b/arch/mips/bcm47xx/board.c
@@ -0,0 +1,309 @@
+#include <linux/export.h>
+#include <linux/string.h>
+#include <bcm47xx_board.h>
+#include <bcm47xx_nvram.h>
+
+struct bcm47xx_board_type {
+ const enum bcm47xx_board board;
+ const char *name;
+};
+
+struct bcm47xx_board_type_list1 {
+ struct bcm47xx_board_type board;
+ const char *value1;
+};
+
+struct bcm47xx_board_type_list2 {
+ struct bcm47xx_board_type board;
+ const char *value1;
+ const char *value2;
+};
+
+struct bcm47xx_board_type_list3 {
+ struct bcm47xx_board_type board;
+ const char *value1;
+ const char *value2;
+ const char *value3;
+};
+
+struct bcm47xx_board_store {
+ enum bcm47xx_board board;
+ char name[BCM47XX_BOARD_MAX_NAME];
+};
+
+/* model_name */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_model_name[] __initconst = {
+ {{BCM47XX_BOARD_DLINK_DIR130, "D-Link DIR-130"}, "DIR-130"},
+ {{BCM47XX_BOARD_DLINK_DIR330, "D-Link DIR-330"}, "DIR-330"},
+ { {0}, 0},
+};
+
+/* model_no */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_model_no[] __initconst = {
+ {{BCM47XX_BOARD_ASUS_WL700GE, "Asus WL700"}, "WL700"},
+ { {0}, 0},
+};
+
+/* machine_name */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_machine_name[] __initconst = {
+ {{BCM47XX_BOARD_LINKSYS_WRTSL54GS, "Linksys WRTSL54GS"}, "WRTSL54GS"},
+ { {0}, 0},
+};
+
+/* hardware_version */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_hardware_version[] __initconst = {
+ {{BCM47XX_BOARD_ASUS_RTN16, "Asus RT-N16"}, "RT-N16-"},
+ {{BCM47XX_BOARD_ASUS_WL320GE, "Asus WL320GE"}, "WL320G-"},
+ {{BCM47XX_BOARD_ASUS_WL330GE, "Asus WL330GE"}, "WL330GE-"},
+ {{BCM47XX_BOARD_ASUS_WL500GD, "Asus WL500GD"}, "WL500gd-"},
+ {{BCM47XX_BOARD_ASUS_WL500GPV1, "Asus WL500GP V1"}, "WL500gp-"},
+ {{BCM47XX_BOARD_ASUS_WL500GPV2, "Asus WL500GP V2"}, "WL500GPV2-"},
+ {{BCM47XX_BOARD_ASUS_WL500W, "Asus WL500W"}, "WL500gW-"},
+ {{BCM47XX_BOARD_ASUS_WL520GC, "Asus WL520GC"}, "WL520GC-"},
+ {{BCM47XX_BOARD_ASUS_WL520GU, "Asus WL520GU"}, "WL520GU-"},
+ {{BCM47XX_BOARD_BELKIN_F7D4301, "Belkin F7D4301"}, "F7D4301"},
+ { {0}, 0},
+};
+
+/* productid */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_productid[] __initconst = {
+ {{BCM47XX_BOARD_ASUS_RTAC66U, "Asus RT-AC66U"}, "RT-AC66U"},
+ {{BCM47XX_BOARD_ASUS_RTN10, "Asus RT-N10"}, "RT-N10"},
+ {{BCM47XX_BOARD_ASUS_RTN10D, "Asus RT-N10D"}, "RT-N10D"},
+ {{BCM47XX_BOARD_ASUS_RTN10U, "Asus RT-N10U"}, "RT-N10U"},
+ {{BCM47XX_BOARD_ASUS_RTN12, "Asus RT-N12"}, "RT-N12"},
+ {{BCM47XX_BOARD_ASUS_RTN12B1, "Asus RT-N12B1"}, "RT-N12B1"},
+ {{BCM47XX_BOARD_ASUS_RTN12C1, "Asus RT-N12C1"}, "RT-N12C1"},
+ {{BCM47XX_BOARD_ASUS_RTN12D1, "Asus RT-N12D1"}, "RT-N12D1"},
+ {{BCM47XX_BOARD_ASUS_RTN12HP, "Asus RT-N12HP"}, "RT-N12HP"},
+ {{BCM47XX_BOARD_ASUS_RTN15U, "Asus RT-N15U"}, "RT-N15U"},
+ {{BCM47XX_BOARD_ASUS_RTN16, "Asus RT-N16"}, "RT-N16"},
+ {{BCM47XX_BOARD_ASUS_RTN53, "Asus RT-N53"}, "RT-N53"},
+ {{BCM47XX_BOARD_ASUS_RTN66U, "Asus RT-N66U"}, "RT-N66U"},
+ {{BCM47XX_BOARD_ASUS_WL300G, "Asus WL300G"}, "WL300g"},
+ {{BCM47XX_BOARD_ASUS_WLHDD, "Asus WLHDD"}, "WLHDD"},
+ { {0}, 0},
+};
+
+/* ModelId */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_ModelId[] __initconst = {
+ {{BCM47XX_BOARD_DELL_TM2300, "Dell WX-5565"}, "WX-5565"},
+ {{BCM47XX_BOARD_MOTOROLA_WE800G, "Motorola WE800G"}, "WE800G"},
+ {{BCM47XX_BOARD_MOTOROLA_WR850GP, "Motorola WR850GP"}, "WR850GP"},
+ {{BCM47XX_BOARD_MOTOROLA_WR850GV2V3, "Motorola WR850G"}, "WR850G"},
+ { {0}, 0},
+};
+
+/* melco_id or buf1falo_id */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_melco_id[] __initconst = {
+ {{BCM47XX_BOARD_BUFFALO_WBR2_G54, "Buffalo WBR2-G54"}, "29bb0332"},
+ {{BCM47XX_BOARD_BUFFALO_WHR2_A54G54, "Buffalo WHR2-A54G54"}, "290441dd"},
+ {{BCM47XX_BOARD_BUFFALO_WHR_G125, "Buffalo WHR-G125"}, "32093"},
+ {{BCM47XX_BOARD_BUFFALO_WHR_G54S, "Buffalo WHR-G54S"}, "30182"},
+ {{BCM47XX_BOARD_BUFFALO_WHR_HP_G54, "Buffalo WHR-HP-G54"}, "30189"},
+ {{BCM47XX_BOARD_BUFFALO_WLA2_G54L, "Buffalo WLA2-G54L"}, "29129"},
+ {{BCM47XX_BOARD_BUFFALO_WZR_G300N, "Buffalo WZR-G300N"}, "31120"},
+ {{BCM47XX_BOARD_BUFFALO_WZR_RS_G54, "Buffalo WZR-RS-G54"}, "30083"},
+ {{BCM47XX_BOARD_BUFFALO_WZR_RS_G54HP, "Buffalo WZR-RS-G54HP"}, "30103"},
+ { {0}, 0},
+};
+
+/* boot_hw_model, boot_hw_ver */
+static const
+struct bcm47xx_board_type_list2 bcm47xx_board_list_boot_hw[] __initconst = {
+ /* like WRT160N v3.0 */
+ {{BCM47XX_BOARD_CISCO_M10V1, "Cisco M10"}, "M10", "1.0"},
+ /* like WRT310N v2.0 */
+ {{BCM47XX_BOARD_CISCO_M20V1, "Cisco M20"}, "M20", "1.0"},
+ {{BCM47XX_BOARD_LINKSYS_E900V1, "Linksys E900 V1"}, "E900", "1.0"},
+ /* like WRT160N v3.0 */
+ {{BCM47XX_BOARD_LINKSYS_E1000V1, "Linksys E1000 V1"}, "E100", "1.0"},
+ {{BCM47XX_BOARD_LINKSYS_E1000V2, "Linksys E1000 V2"}, "E1000", "2.0"},
+ {{BCM47XX_BOARD_LINKSYS_E1000V21, "Linksys E1000 V2.1"}, "E1000", "2.1"},
+ {{BCM47XX_BOARD_LINKSYS_E1200V2, "Linksys E1200 V2"}, "E1200", "2.0"},
+ {{BCM47XX_BOARD_LINKSYS_E2000V1, "Linksys E2000 V1"}, "Linksys E2000", "1.0"},
+ /* like WRT610N v2.0 */
+ {{BCM47XX_BOARD_LINKSYS_E3000V1, "Linksys E3000 V1"}, "E300", "1.0"},
+ {{BCM47XX_BOARD_LINKSYS_E3200V1, "Linksys E3200 V1"}, "E3200", "1.0"},
+ {{BCM47XX_BOARD_LINKSYS_E4200V1, "Linksys E4200 V1"}, "E4200", "1.0"},
+ {{BCM47XX_BOARD_LINKSYS_WRT150NV11, "Linksys WRT150N V1.1"}, "WRT150N", "1.1"},
+ {{BCM47XX_BOARD_LINKSYS_WRT150NV1, "Linksys WRT150N V1"}, "WRT150N", "1"},
+ {{BCM47XX_BOARD_LINKSYS_WRT160NV1, "Linksys WRT160N V1"}, "WRT160N", "1.0"},
+ {{BCM47XX_BOARD_LINKSYS_WRT160NV3, "Linksys WRT160N V3"}, "WRT160N", "3.0"},
+ {{BCM47XX_BOARD_LINKSYS_WRT300NV11, "Linksys WRT300N V1.1"}, "WRT300N", "1.1"},
+ {{BCM47XX_BOARD_LINKSYS_WRT310NV1, "Linksys WRT310N V1"}, "WRT310N", "1.0"},
+ {{BCM47XX_BOARD_LINKSYS_WRT310NV2, "Linksys WRT310N V2"}, "WRT310N", "2.0"},
+ {{BCM47XX_BOARD_LINKSYS_WRT54G3GV2, "Linksys WRT54G3GV2-VF"}, "WRT54G3GV2-VF", "1.0"},
+ {{BCM47XX_BOARD_LINKSYS_WRT610NV1, "Linksys WRT610N V1"}, "WRT610N", "1.0"},
+ {{BCM47XX_BOARD_LINKSYS_WRT610NV2, "Linksys WRT610N V2"}, "WRT610N", "2.0"},
+ { {0}, 0},
+};
+
+/* board_id */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_board_id[] __initconst = {
+ {{BCM47XX_BOARD_NETGEAR_WGR614V8, "Netgear WGR614 V8"}, "U12H072T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WGR614V9, "Netgear WGR614 V9"}, "U12H094T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNDR3300, "Netgear WNDR3300"}, "U12H093T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNDR3400V1, "Netgear WNDR3400 V1"}, "U12H155T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNDR3400V2, "Netgear WNDR3400 V2"}, "U12H187T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNDR3400VCNA, "Netgear WNDR3400 Vcna"}, "U12H155T01_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNDR3700V3, "Netgear WNDR3700 V3"}, "U12H194T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNDR4000, "Netgear WNDR4000"}, "U12H181T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNDR4500V1, "Netgear WNDR4500 V1"}, "U12H189T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNDR4500V2, "Netgear WNDR4500 V2"}, "U12H224T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNR2000, "Netgear WNR2000"}, "U12H114T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNR3500L, "Netgear WNR3500L"}, "U12H136T99_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNR3500U, "Netgear WNR3500U"}, "U12H136T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNR3500V2, "Netgear WNR3500 V2"}, "U12H127T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNR3500V2VC, "Netgear WNR3500 V2vc"}, "U12H127T70_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNR834BV2, "Netgear WNR834B V2"}, "U12H081T00_NETGEAR"},
+ { {0}, 0},
+};
+
+/* boardtype, boardnum, boardrev */
+static const
+struct bcm47xx_board_type_list3 bcm47xx_board_list_board[] __initconst = {
+ {{BCM47XX_BOARD_HUAWEI_E970, "Huawei E970"}, "0x048e", "0x5347", "0x11"},
+ {{BCM47XX_BOARD_PHICOMM_M1, "Phicomm M1"}, "0x0590", "80", "0x1104"},
+ {{BCM47XX_BOARD_ZTE_H218N, "ZTE H218N"}, "0x053d", "1234", "0x1305"},
+ { {0}, 0},
+};
+
+static const
+struct bcm47xx_board_type bcm47xx_board_unknown[] __initconst = {
+ {BCM47XX_BOARD_UNKNOWN, "Unknown Board"},
+};
+
+static struct bcm47xx_board_store bcm47xx_board = {BCM47XX_BOARD_NO, "Unknown Board"};
+
+static __init const struct bcm47xx_board_type *bcm47xx_board_get_nvram(void)
+{
+ char buf1[30];
+ char buf2[30];
+ char buf3[30];
+ const struct bcm47xx_board_type_list1 *e1;
+ const struct bcm47xx_board_type_list2 *e2;
+ const struct bcm47xx_board_type_list3 *e3;
+
+ if (bcm47xx_nvram_getenv("model_name", buf1, sizeof(buf1)) >= 0) {
+ for (e1 = bcm47xx_board_list_model_name; e1->value1; e1++) {
+ if (!strcmp(buf1, e1->value1))
+ return &e1->board;
+ }
+ }
+
+ if (bcm47xx_nvram_getenv("model_no", buf1, sizeof(buf1)) >= 0) {
+ for (e1 = bcm47xx_board_list_model_no; e1->value1; e1++) {
+ if (strstarts(buf1, e1->value1))
+ return &e1->board;
+ }
+ }
+
+ if (bcm47xx_nvram_getenv("machine_name", buf1, sizeof(buf1)) >= 0) {
+ for (e1 = bcm47xx_board_list_machine_name; e1->value1; e1++) {
+ if (strstarts(buf1, e1->value1))
+ return &e1->board;
+ }
+ }
+
+ if (bcm47xx_nvram_getenv("hardware_version", buf1, sizeof(buf1)) >= 0) {
+ for (e1 = bcm47xx_board_list_hardware_version; e1->value1; e1++) {
+ if (strstarts(buf1, e1->value1))
+ return &e1->board;
+ }
+ }
+
+ if (bcm47xx_nvram_getenv("productid", buf1, sizeof(buf1)) >= 0) {
+ for (e1 = bcm47xx_board_list_productid; e1->value1; e1++) {
+ if (!strcmp(buf1, e1->value1))
+ return &e1->board;
+ }
+ }
+
+ if (bcm47xx_nvram_getenv("ModelId", buf1, sizeof(buf1)) >= 0) {
+ for (e1 = bcm47xx_board_list_ModelId; e1->value1; e1++) {
+ if (!strcmp(buf1, e1->value1))
+ return &e1->board;
+ }
+ }
+
+ if (bcm47xx_nvram_getenv("melco_id", buf1, sizeof(buf1)) >= 0 ||
+ bcm47xx_nvram_getenv("buf1falo_id", buf1, sizeof(buf1)) >= 0) {
+ /* buffalo hardware, check id for specific hardware matches */
+ for (e1 = bcm47xx_board_list_melco_id; e1->value1; e1++) {
+ if (!strcmp(buf1, e1->value1))
+ return &e1->board;
+ }
+ }
+
+ if (bcm47xx_nvram_getenv("boot_hw_model", buf1, sizeof(buf1)) >= 0 &&
+ bcm47xx_nvram_getenv("boot_hw_ver", buf2, sizeof(buf2)) >= 0) {
+ for (e2 = bcm47xx_board_list_boot_hw; e2->value1; e2++) {
+ if (!strcmp(buf1, e2->value1) &&
+ !strcmp(buf2, e2->value2))
+ return &e2->board;
+ }
+ }
+
+ if (bcm47xx_nvram_getenv("board_id", buf1, sizeof(buf1)) >= 0) {
+ for (e1 = bcm47xx_board_list_board_id; e1->value1; e1++) {
+ if (!strcmp(buf1, e1->value1))
+ return &e1->board;
+ }
+ }
+
+ if (bcm47xx_nvram_getenv("boardtype", buf1, sizeof(buf1)) >= 0 &&
+ bcm47xx_nvram_getenv("boardnum", buf2, sizeof(buf2)) >= 0 &&
+ bcm47xx_nvram_getenv("boardrev", buf3, sizeof(buf3)) >= 0) {
+ for (e3 = bcm47xx_board_list_board; e3->value1; e3++) {
+ if (!strcmp(buf1, e3->value1) &&
+ !strcmp(buf2, e3->value2) &&
+ !strcmp(buf3, e3->value3))
+ return &e3->board;
+ }
+ }
+ return bcm47xx_board_unknown;
+}
+
+void __init bcm47xx_board_detect(void)
+{
+ int err;
+ char buf[10];
+ const struct bcm47xx_board_type *board_detected;
+
+ if (bcm47xx_board.board != BCM47XX_BOARD_NO)
+ return;
+
+ /* check if the nvram is available */
+ err = bcm47xx_nvram_getenv("boardtype", buf, sizeof(buf));
+
+ /* init of nvram failed, probably too early now */
+ if (err == -ENXIO) {
+ return;
+ }
+
+ board_detected = bcm47xx_board_get_nvram();
+ bcm47xx_board.board = board_detected->board;
+ strlcpy(bcm47xx_board.name, board_detected->name,
+ BCM47XX_BOARD_MAX_NAME);
+}
+
+enum bcm47xx_board bcm47xx_board_get(void)
+{
+ return bcm47xx_board.board;
+}
+EXPORT_SYMBOL(bcm47xx_board_get);
+
+const char *bcm47xx_board_get_name(void)
+{
+ return bcm47xx_board.name;
+}
+EXPORT_SYMBOL(bcm47xx_board_get_name);
diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c
index cc40b74940f5..b4c585b1c62e 100644
--- a/arch/mips/bcm47xx/nvram.c
+++ b/arch/mips/bcm47xx/nvram.c
@@ -190,3 +190,23 @@ int bcm47xx_nvram_getenv(char *name, char *val, size_t val_len)
return -ENOENT;
}
EXPORT_SYMBOL(bcm47xx_nvram_getenv);
+
+int bcm47xx_nvram_gpio_pin(const char *name)
+{
+ int i, err;
+ char nvram_var[10];
+ char buf[30];
+
+ for (i = 0; i < 16; i++) {
+ err = snprintf(nvram_var, sizeof(nvram_var), "gpio%i", i);
+ if (err <= 0)
+ continue;
+ err = bcm47xx_nvram_getenv(nvram_var, buf, sizeof(buf));
+ if (err <= 0)
+ continue;
+ if (!strcmp(name, buf))
+ return i;
+ }
+ return -ENOENT;
+}
+EXPORT_SYMBOL(bcm47xx_nvram_gpio_pin);
diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c
index 8c155afb1299..5cba318bc1cd 100644
--- a/arch/mips/bcm47xx/prom.c
+++ b/arch/mips/bcm47xx/prom.c
@@ -32,12 +32,37 @@
#include <asm/bootinfo.h>
#include <asm/fw/cfe/cfe_api.h>
#include <asm/fw/cfe/cfe_error.h>
+#include <bcm47xx.h>
+#include <bcm47xx_board.h>
static int cfe_cons_handle;
+static u16 get_chip_id(void)
+{
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ return bcm47xx_bus.ssb.chip_id;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ return bcm47xx_bus.bcma.bus.chipinfo.id;
+#endif
+ }
+ return 0;
+}
+
const char *get_system_type(void)
{
- return "Broadcom BCM47XX";
+ static char buf[50];
+ u16 chip_id = get_chip_id();
+
+ snprintf(buf, sizeof(buf),
+ (chip_id > 0x9999) ? "Broadcom BCM%d (%s)" :
+ "Broadcom BCM%04X (%s)",
+ chip_id, bcm47xx_board_get_name());
+
+ return buf;
}
void prom_putchar(char c)
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index b2246cd9ca12..1f30571968e7 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -36,6 +36,7 @@
#include <asm/time.h>
#include <bcm47xx.h>
#include <bcm47xx_nvram.h>
+#include <bcm47xx_board.h>
union bcm47xx_bus bcm47xx_bus;
EXPORT_SYMBOL(bcm47xx_bus);
@@ -221,6 +222,7 @@ void __init plat_mem_setup(void)
_machine_restart = bcm47xx_machine_restart;
_machine_halt = bcm47xx_machine_halt;
pm_power_off = bcm47xx_machine_halt;
+ bcm47xx_board_detect();
}
static int __init bcm47xx_register_bus_complete(void)
diff --git a/arch/mips/bcm47xx/time.c b/arch/mips/bcm47xx/time.c
index 536374dcba78..2c85d9254b5e 100644
--- a/arch/mips/bcm47xx/time.c
+++ b/arch/mips/bcm47xx/time.c
@@ -27,10 +27,16 @@
#include <linux/ssb/ssb.h>
#include <asm/time.h>
#include <bcm47xx.h>
+#include <bcm47xx_nvram.h>
+#include <bcm47xx_board.h>
void __init plat_time_init(void)
{
unsigned long hz = 0;
+ u16 chip_id = 0;
+ char buf[10];
+ int len;
+ enum bcm47xx_board board = bcm47xx_board_get();
/*
* Use deterministic values for initial counter interrupt
@@ -43,15 +49,32 @@ void __init plat_time_init(void)
#ifdef CONFIG_BCM47XX_SSB
case BCM47XX_BUS_TYPE_SSB:
hz = ssb_cpu_clock(&bcm47xx_bus.ssb.mipscore) / 2;
+ chip_id = bcm47xx_bus.ssb.chip_id;
break;
#endif
#ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA:
hz = bcma_cpu_clock(&bcm47xx_bus.bcma.bus.drv_mips) / 2;
+ chip_id = bcm47xx_bus.bcma.bus.chipinfo.id;
break;
#endif
}
+ if (chip_id == 0x5354) {
+ len = bcm47xx_nvram_getenv("clkfreq", buf, sizeof(buf));
+ if (len >= 0 && !strncmp(buf, "200", 4))
+ hz = 100000000;
+ }
+
+ switch (board) {
+ case BCM47XX_BOARD_ASUS_WL520GC:
+ case BCM47XX_BOARD_ASUS_WL520GU:
+ hz = 100000000;
+ break;
+ default:
+ break;
+ }
+
if (!hz)
hz = 100000000;
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index 0048c0897896..ca0c343c9ea5 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -37,6 +37,10 @@ vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART16550) += $(obj)/uart-16550.o
vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY) += $(obj)/uart-alchemy.o
endif
+ifdef CONFIG_KERNEL_XZ
+vmlinuzobjs-y += $(obj)/../../lib/ashldi3.o
+endif
+
targets += vmlinux.bin
OBJCOPYFLAGS_vmlinux.bin := $(OBJCOPYFLAGS) -O binary -R .comment -S
$(obj)/vmlinux.bin: $(KBUILD_IMAGE) FORCE
@@ -44,8 +48,10 @@ $(obj)/vmlinux.bin: $(KBUILD_IMAGE) FORCE
tool_$(CONFIG_KERNEL_GZIP) = gzip
tool_$(CONFIG_KERNEL_BZIP2) = bzip2
+tool_$(CONFIG_KERNEL_LZ4) = lz4
tool_$(CONFIG_KERNEL_LZMA) = lzma
tool_$(CONFIG_KERNEL_LZO) = lzo
+tool_$(CONFIG_KERNEL_XZ) = xzkern
targets += vmlinux.bin.z
$(obj)/vmlinux.bin.z: $(obj)/vmlinux.bin FORCE
diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
index 2c9573098c0d..a8c6fd6a4406 100644
--- a/arch/mips/boot/compressed/decompress.c
+++ b/arch/mips/boot/compressed/decompress.c
@@ -43,7 +43,8 @@ void error(char *x)
/* activate the code for pre-boot environment */
#define STATIC static
-#ifdef CONFIG_KERNEL_GZIP
+#if defined(CONFIG_KERNEL_GZIP) || defined(CONFIG_KERNEL_XZ) || \
+ defined(CONFIG_KERNEL_LZ4)
void *memcpy(void *dest, const void *src, size_t n)
{
int i;
@@ -54,6 +55,8 @@ void *memcpy(void *dest, const void *src, size_t n)
d[i] = s[i];
return dest;
}
+#endif
+#ifdef CONFIG_KERNEL_GZIP
#include "../../../../lib/decompress_inflate.c"
#endif
@@ -70,6 +73,10 @@ void *memset(void *s, int c, size_t n)
#include "../../../../lib/decompress_bunzip2.c"
#endif
+#ifdef CONFIG_KERNEL_LZ4
+#include "../../../../lib/decompress_unlz4.c"
+#endif
+
#ifdef CONFIG_KERNEL_LZMA
#include "../../../../lib/decompress_unlzma.c"
#endif
@@ -78,6 +85,10 @@ void *memset(void *s, int c, size_t n)
#include "../../../../lib/decompress_unlzo.c"
#endif
+#ifdef CONFIG_KERNEL_XZ
+#include "../../../../lib/decompress_unxz.c"
+#endif
+
void decompress_kernel(unsigned long boot_heap_start)
{
unsigned long zimage_start, zimage_size;
diff --git a/arch/mips/boot/compressed/ld.script b/arch/mips/boot/compressed/ld.script
index 8e6b07ca2f5e..5a33409c7f63 100644
--- a/arch/mips/boot/compressed/ld.script
+++ b/arch/mips/boot/compressed/ld.script
@@ -8,6 +8,9 @@
OUTPUT_ARCH(mips)
ENTRY(start)
+PHDRS {
+ text PT_LOAD FLAGS(7); /* RWX */
+}
SECTIONS
{
/* Text and read-only data */
@@ -15,7 +18,7 @@ SECTIONS
.text : {
*(.text)
*(.rodata)
- }
+ }: text
/* End of text section */
/* Writable data */
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index b212ae12e5ac..331b837cec57 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -999,7 +999,7 @@ void __init plat_mem_setup(void)
if (total == 0)
panic("Unable to allocate memory from "
- "cvmx_bootmem_phy_alloc\n");
+ "cvmx_bootmem_phy_alloc");
}
/*
@@ -1081,7 +1081,7 @@ void __init device_tree_init(void)
/* Copy the default tree from init memory. */
initial_boot_params = early_init_dt_alloc_memory_arch(dt_size, 8);
if (initial_boot_params == NULL)
- panic("Could not allocate initial_boot_params\n");
+ panic("Could not allocate initial_boot_params");
memcpy(initial_boot_params, fdt, dt_size);
if (do_prune) {
diff --git a/arch/mips/cobalt/Makefile b/arch/mips/cobalt/Makefile
index 61a334ac43ac..558e94977942 100644
--- a/arch/mips/cobalt/Makefile
+++ b/arch/mips/cobalt/Makefile
@@ -5,5 +5,4 @@
obj-y := buttons.o irq.o lcd.o led.o reset.o rtc.o serial.o setup.o time.o
obj-$(CONFIG_PCI) += pci.o
-obj-$(CONFIG_EARLY_PRINTK) += console.o
obj-$(CONFIG_MTD_PHYSMAP) += mtd.o
diff --git a/arch/mips/cobalt/console.c b/arch/mips/cobalt/console.c
deleted file mode 100644
index d1ba701c9dd1..000000000000
--- a/arch/mips/cobalt/console.c
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * (C) P. Horton 2006
- */
-#include <linux/io.h>
-#include <linux/serial_reg.h>
-
-#include <cobalt.h>
-
-#define UART_BASE ((void __iomem *)CKSEG1ADDR(0x1c800000))
-
-void prom_putchar(char c)
-{
- if (cobalt_board_id <= COBALT_BRD_ID_QUBE1)
- return;
-
- while (!(readb(UART_BASE + UART_LSR) & UART_LSR_THRE))
- ;
-
- writeb(c, UART_BASE + UART_TX);
-}
diff --git a/arch/mips/cobalt/setup.c b/arch/mips/cobalt/setup.c
index ec3b2c417f7c..9a8c2fe8d334 100644
--- a/arch/mips/cobalt/setup.c
+++ b/arch/mips/cobalt/setup.c
@@ -17,6 +17,7 @@
#include <asm/bootinfo.h>
#include <asm/reboot.h>
+#include <asm/setup.h>
#include <asm/gt64120.h>
#include <cobalt.h>
@@ -112,6 +113,8 @@ void __init prom_init(void)
}
add_memory_region(0x0, memsz, BOOT_MEM_RAM);
+
+ setup_8250_early_printk_port(CKSEG1ADDR(0x1c800000), 0, 0);
}
void __init prom_free_prom_memory(void)
diff --git a/arch/mips/configs/db1235_defconfig b/arch/mips/configs/db1235_defconfig
index e2b4ad55462f..28e49f226dc0 100644
--- a/arch/mips/configs/db1235_defconfig
+++ b/arch/mips/configs/db1235_defconfig
@@ -351,7 +351,6 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
CONFIG_MMC=y
-CONFIG_MMC_CLKGATE=y
CONFIG_MMC_AU1X=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
diff --git a/arch/mips/configs/powertv_defconfig b/arch/mips/configs/powertv_defconfig
deleted file mode 100644
index 7fda0ce5f692..000000000000
--- a/arch/mips/configs/powertv_defconfig
+++ /dev/null
@@ -1,136 +0,0 @@
-CONFIG_POWERTV=y
-CONFIG_BOOTLOADER_FAMILY="R2"
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_HZ_1000=y
-CONFIG_PREEMPT=y
-# CONFIG_SECCOMP is not set
-CONFIG_EXPERIMENTAL=y
-CONFIG_CROSS_COMPILE=""
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_RELAY=y
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_RD_GZIP is not set
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
-CONFIG_KALLSYMS_ALL=y
-# CONFIG_PCSPKR_PLATFORM is not set
-# CONFIG_EPOLL is not set
-# CONFIG_SIGNALFD is not set
-# CONFIG_EVENTFD is not set
-# CONFIG_VM_EVENT_COUNTERS is not set
-# CONFIG_SLUB_DEBUG is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_PCI=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_PNP=y
-CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-CONFIG_IPV6=y
-CONFIG_IPV6_PRIVACY=y
-CONFIG_INET6_AH=y
-CONFIG_INET6_ESP=y
-CONFIG_INET6_IPCOMP=y
-# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET6_XFRM_MODE_BEET is not set
-# CONFIG_IPV6_SIT is not set
-CONFIG_IPV6_TUNNEL=y
-CONFIG_NETFILTER=y
-# CONFIG_BRIDGE_NETFILTER is not set
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
-CONFIG_IP_NF_IPTABLES=y
-CONFIG_IP_NF_FILTER=y
-CONFIG_IP_NF_ARPTABLES=y
-CONFIG_IP_NF_ARPFILTER=y
-CONFIG_IP6_NF_IPTABLES=y
-CONFIG_IP6_NF_FILTER=y
-CONFIG_BRIDGE=y
-CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_TBF=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=32768
-# CONFIG_MISC_DEVICES is not set
-# CONFIG_SCSI_PROC_FS is not set
-CONFIG_BLK_DEV_SD=y
-# CONFIG_SCSI_LOWLEVEL is not set
-CONFIG_ATA=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-# CONFIG_WLAN is not set
-CONFIG_USB_RTL8150=y
-# CONFIG_INPUT_MOUSEDEV is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-# CONFIG_DEVKMEM is not set
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-# CONFIG_MFD_SUPPORT is not set
-# CONFIG_VGA_ARB is not set
-CONFIG_USB_HIDDEV=y
-CONFIG_USB=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_DEVICEFS=y
-# CONFIG_USB_DEVICE_CLASS is not set
-CONFIG_USB_EHCI_HCD=y
-# CONFIG_USB_EHCI_TT_NEWSCHED is not set
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_SERIAL=y
-CONFIG_USB_SERIAL_CONSOLE=y
-CONFIG_USB_SERIAL_CP210X=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
-# CONFIG_DNOTIFY is not set
-CONFIG_FUSE_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_DEBUG_PREEMPT is not set
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-# CONFIG_EARLY_PRINTK is not set
-CONFIG_CMDLINE_BOOL=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
index 22afed16ccde..41a2fa1fa12e 100644
--- a/arch/mips/dec/int-handler.S
+++ b/arch/mips/dec/int-handler.S
@@ -118,7 +118,7 @@
* 7 FPU/R4k timer
*
* We handle the IRQ according to _our_ priority (see setup.c),
- * then we just return. If multiple IRQs are pending then we will
+ * then we just return. If multiple IRQs are pending then we will
* just take another exception, big deal.
*/
.align 5
@@ -146,7 +146,7 @@
/*
* Find irq with highest priority
*/
- PTR_LA t1,cpu_mask_nr_tbl
+ PTR_LA t1,cpu_mask_nr_tbl
1: lw t2,(t1)
nop
and t2,t0
@@ -195,7 +195,7 @@
/*
* Find irq with highest priority
*/
- PTR_LA t1,asic_mask_nr_tbl
+ PTR_LA t1,asic_mask_nr_tbl
2: lw t2,(t1)
nop
and t2,t0
@@ -221,7 +221,7 @@
FEXPORT(cpu_all_int) # HALT, timers, software junk
li a0,DEC_CPU_IRQ_BASE
srl t0,CAUSEB_IP
- li t1,CAUSEF_IP>>CAUSEB_IP # mask
+ li t1,CAUSEF_IP>>CAUSEB_IP # mask
b 1f
li t2,4 # nr of bits / 2
diff --git a/arch/mips/dec/ioasic-irq.c b/arch/mips/dec/ioasic-irq.c
index 4b3e3a4375a6..e04d973ce5aa 100644
--- a/arch/mips/dec/ioasic-irq.c
+++ b/arch/mips/dec/ioasic-irq.c
@@ -1,7 +1,7 @@
/*
* DEC I/O ASIC interrupts.
*
- * Copyright (c) 2002, 2003 Maciej W. Rozycki
+ * Copyright (c) 2002, 2003, 2013 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -51,22 +51,51 @@ static struct irq_chip ioasic_irq_type = {
.irq_unmask = unmask_ioasic_irq,
};
-void clear_ioasic_dma_irq(unsigned int irq)
+static void clear_ioasic_dma_irq(struct irq_data *d)
{
u32 sir;
- sir = ~(1 << (irq - ioasic_irq_base));
+ sir = ~(1 << (d->irq - ioasic_irq_base));
ioasic_write(IO_REG_SIR, sir);
+ fast_iob();
}
static struct irq_chip ioasic_dma_irq_type = {
.name = "IO-ASIC-DMA",
- .irq_ack = ack_ioasic_irq,
+ .irq_ack = clear_ioasic_dma_irq,
.irq_mask = mask_ioasic_irq,
- .irq_mask_ack = ack_ioasic_irq,
.irq_unmask = unmask_ioasic_irq,
+ .irq_eoi = clear_ioasic_dma_irq,
};
+/*
+ * I/O ASIC implements two kinds of DMA interrupts, informational and
+ * error interrupts.
+ *
+ * The formers do not stop DMA and should be cleared as soon as possible
+ * so that if they retrigger before the handler has completed, usually as
+ * a side effect of actions taken by the handler, then they are reissued.
+ * These use the `handle_edge_irq' handler that clears the request right
+ * away.
+ *
+ * The latters stop DMA and do not resume it until the interrupt has been
+ * cleared. This cannot be done until after a corrective action has been
+ * taken and this also means they will not retrigger. Therefore they use
+ * the `handle_fasteoi_irq' handler that only clears the request on the
+ * way out. Because MIPS processor interrupt inputs, one of which the I/O
+ * ASIC is cascaded to, are level-triggered it is recommended that error
+ * DMA interrupt action handlers are registered with the IRQF_ONESHOT flag
+ * set so that they are run with the interrupt line masked.
+ *
+ * This mask has `1' bits in the positions of informational interrupts.
+ */
+#define IO_IRQ_DMA_INFO \
+ (IO_IRQ_MASK(IO_INR_SCC0A_RXDMA) | \
+ IO_IRQ_MASK(IO_INR_SCC1A_RXDMA) | \
+ IO_IRQ_MASK(IO_INR_ISDN_TXDMA) | \
+ IO_IRQ_MASK(IO_INR_ISDN_RXDMA) | \
+ IO_IRQ_MASK(IO_INR_ASC_DMA))
+
void __init init_ioasic_irqs(int base)
{
int i;
@@ -79,7 +108,9 @@ void __init init_ioasic_irqs(int base)
irq_set_chip_and_handler(i, &ioasic_irq_type,
handle_level_irq);
for (; i < base + IO_IRQ_LINES; i++)
- irq_set_chip(i, &ioasic_dma_irq_type);
+ irq_set_chip_and_handler(i, &ioasic_dma_irq_type,
+ 1 << (i - base) & IO_IRQ_DMA_INFO ?
+ handle_edge_irq : handle_fasteoi_irq);
ioasic_irq_base = base;
}
diff --git a/arch/mips/dec/prom/call_o32.S b/arch/mips/dec/prom/call_o32.S
index c0d1522d448f..8c8498159e43 100644
--- a/arch/mips/dec/prom/call_o32.S
+++ b/arch/mips/dec/prom/call_o32.S
@@ -14,7 +14,7 @@
/* Maximum number of arguments supported. Must be even! */
#define O32_ARGC 32
-/* Number of static registers we save. */
+/* Number of static registers we save. */
#define O32_STATC 11
/* Frame size for both of the above. */
#define O32_FRAMESZ (4 * O32_ARGC + SZREG * O32_STATC)
diff --git a/arch/mips/dec/prom/init.c b/arch/mips/dec/prom/init.c
index 468f665de7bb..4e1761e0a09a 100644
--- a/arch/mips/dec/prom/init.c
+++ b/arch/mips/dec/prom/init.c
@@ -104,7 +104,7 @@ void __init prom_init(void)
if (prom_is_rex(magic))
rex_clear_cache();
- /* Register the early console. */
+ /* Register the early console. */
register_prom_console();
/* Were we compiled with the right CPU option? */
diff --git a/arch/mips/dec/prom/memory.c b/arch/mips/dec/prom/memory.c
index 0aadac742900..8c62316f22f4 100644
--- a/arch/mips/dec/prom/memory.c
+++ b/arch/mips/dec/prom/memory.c
@@ -22,7 +22,7 @@ volatile unsigned long mem_err; /* So we know an error occurred */
/*
* Probe memory in 4MB chunks, waiting for an error to tell us we've fallen
- * off the end of real memory. Only suitable for the 2100/3100's (PMAX).
+ * off the end of real memory. Only suitable for the 2100/3100's (PMAX).
*/
#define CHUNK_SIZE 0x400000
diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
index 741cb4235bde..56e6e2c23683 100644
--- a/arch/mips/dec/setup.c
+++ b/arch/mips/dec/setup.c
@@ -65,7 +65,7 @@ EXPORT_SYMBOL(ioasic_base);
/*
* IRQ routing and priority tables. Priorites are set as follows:
*
- * KN01 KN230 KN02 KN02-BA KN02-CA KN03
+ * KN01 KN230 KN02 KN02-BA KN02-CA KN03
*
* MEMORY CPU CPU CPU ASIC CPU CPU
* RTC CPU CPU CPU ASIC CPU CPU
@@ -413,7 +413,7 @@ static void __init dec_init_kn02(void)
/*
* Machine-specific initialisation for KN02-BA, aka DS5000/1xx
- * (xx = 20, 25, 33), aka 3min. Also applies to KN04(-BA), aka
+ * (xx = 20, 25, 33), aka 3min. Also applies to KN04(-BA), aka
* DS5000/150, aka 4min.
*/
static int kn02ba_interrupt[DEC_NR_INTS] __initdata = {
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 454ddf9bb76f..1acbb8b77a71 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -11,5 +11,6 @@ generic-y += sections.h
generic-y += segment.h
generic-y += serial.h
generic-y += trace_clock.h
+generic-y += preempt.h
generic-y += ucontext.h
generic-y += xor.h
diff --git a/arch/mips/include/asm/addrspace.h b/arch/mips/include/asm/addrspace.h
index 13d61c002e4f..3f745459fdb5 100644
--- a/arch/mips/include/asm/addrspace.h
+++ b/arch/mips/include/asm/addrspace.h
@@ -58,7 +58,7 @@
/*
* Memory segments (64bit kernel mode addresses)
- * The compatibility segments use the full 64-bit sign extended value. Note
+ * The compatibility segments use the full 64-bit sign extended value. Note
* the R8000 doesn't have them so don't reference these in generic MIPS code.
*/
#define XKUSEG _CONST64_(0x0000000000000000)
@@ -131,7 +131,7 @@
/*
* The ultimate limited of the 64-bit MIPS architecture: 2 bits for selecting
- * the region, 3 bits for the CCA mode. This leaves 59 bits of which the
+ * the region, 3 bits for the CCA mode. This leaves 59 bits of which the
* R8000 implements most with its 48-bit physical address space.
*/
#define TO_PHYS_MASK _CONST64_(0x07ffffffffffffff) /* 2^^59 - 1 */
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 08b607969a16..7eed2f261710 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -1,5 +1,5 @@
/*
- * Atomic operations that C can't guarantee us. Useful for
+ * Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*
* But use these as seldom as possible since they are much more slower
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index 314ab5532019..f26d8e1bf3c3 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -18,7 +18,7 @@
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any
* reads following this primitive that depend on the data return by
- * any of the preceding reads. This primitive is much lighter weight than
+ * any of the preceding reads. This primitive is much lighter weight than
* rmb() on most CPUs, and is never heavier weight than is
* rmb().
*
@@ -43,7 +43,7 @@
* </programlisting>
*
* because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends(). However,
+ * two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
@@ -57,7 +57,7 @@
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b". Therefore, on some CPUs, such
+ * the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
*/
diff --git a/arch/mips/include/asm/cacheops.h b/arch/mips/include/asm/cacheops.h
index 68f37e3eccc7..c75025f27c20 100644
--- a/arch/mips/include/asm/cacheops.h
+++ b/arch/mips/include/asm/cacheops.h
@@ -14,56 +14,52 @@
/*
* Cache Operations available on all MIPS processors with R4000-style caches
*/
-#define Index_Invalidate_I 0x00
-#define Index_Writeback_Inv_D 0x01
-#define Index_Load_Tag_I 0x04
-#define Index_Load_Tag_D 0x05
-#define Index_Store_Tag_I 0x08
-#define Index_Store_Tag_D 0x09
-#if defined(CONFIG_CPU_LOONGSON2)
-#define Hit_Invalidate_I 0x00
-#else
-#define Hit_Invalidate_I 0x10
-#endif
-#define Hit_Invalidate_D 0x11
-#define Hit_Writeback_Inv_D 0x15
+#define Index_Invalidate_I 0x00
+#define Index_Writeback_Inv_D 0x01
+#define Index_Load_Tag_I 0x04
+#define Index_Load_Tag_D 0x05
+#define Index_Store_Tag_I 0x08
+#define Index_Store_Tag_D 0x09
+#define Hit_Invalidate_I 0x10
+#define Hit_Invalidate_D 0x11
+#define Hit_Writeback_Inv_D 0x15
/*
* R4000-specific cacheops
*/
-#define Create_Dirty_Excl_D 0x0d
-#define Fill 0x14
-#define Hit_Writeback_I 0x18
-#define Hit_Writeback_D 0x19
+#define Create_Dirty_Excl_D 0x0d
+#define Fill 0x14
+#define Hit_Writeback_I 0x18
+#define Hit_Writeback_D 0x19
/*
* R4000SC and R4400SC-specific cacheops
*/
-#define Index_Invalidate_SI 0x02
-#define Index_Writeback_Inv_SD 0x03
-#define Index_Load_Tag_SI 0x06
-#define Index_Load_Tag_SD 0x07
-#define Index_Store_Tag_SI 0x0A
-#define Index_Store_Tag_SD 0x0B
-#define Create_Dirty_Excl_SD 0x0f
-#define Hit_Invalidate_SI 0x12
-#define Hit_Invalidate_SD 0x13
-#define Hit_Writeback_Inv_SD 0x17
-#define Hit_Writeback_SD 0x1b
-#define Hit_Set_Virtual_SI 0x1e
-#define Hit_Set_Virtual_SD 0x1f
+#define Index_Invalidate_SI 0x02
+#define Index_Writeback_Inv_SD 0x03
+#define Index_Load_Tag_SI 0x06
+#define Index_Load_Tag_SD 0x07
+#define Index_Store_Tag_SI 0x0A
+#define Index_Store_Tag_SD 0x0B
+#define Create_Dirty_Excl_SD 0x0f
+#define Hit_Invalidate_SI 0x12
+#define Hit_Invalidate_SD 0x13
+#define Hit_Writeback_Inv_SD 0x17
+#define Hit_Writeback_SD 0x1b
+#define Hit_Set_Virtual_SI 0x1e
+#define Hit_Set_Virtual_SD 0x1f
/*
* R5000-specific cacheops
*/
-#define R5K_Page_Invalidate_S 0x17
+#define R5K_Page_Invalidate_S 0x17
/*
* RM7000-specific cacheops
*/
-#define Page_Invalidate_T 0x16
-#define Index_Store_Tag_T 0x0a
-#define Index_Load_Tag_T 0x06
+#define Page_Invalidate_T 0x16
+#define Index_Store_Tag_T 0x0a
+#define Index_Load_Tag_T 0x06
/*
* R10000-specific cacheops
@@ -71,17 +67,22 @@
* Cacheops 0x02, 0x06, 0x0a, 0x0c-0x0e, 0x16, 0x1a and 0x1e are unused.
* Most of the _S cacheops are identical to the R4000SC _SD cacheops.
*/
-#define Index_Writeback_Inv_S 0x03
-#define Index_Load_Tag_S 0x07
-#define Index_Store_Tag_S 0x0B
-#define Hit_Invalidate_S 0x13
-#define Cache_Barrier 0x14
-#define Hit_Writeback_Inv_S 0x17
-#define Index_Load_Data_I 0x18
-#define Index_Load_Data_D 0x19
-#define Index_Load_Data_S 0x1b
-#define Index_Store_Data_I 0x1c
-#define Index_Store_Data_D 0x1d
-#define Index_Store_Data_S 0x1f
+#define Index_Writeback_Inv_S 0x03
+#define Index_Load_Tag_S 0x07
+#define Index_Store_Tag_S 0x0B
+#define Hit_Invalidate_S 0x13
+#define Cache_Barrier 0x14
+#define Hit_Writeback_Inv_S 0x17
+#define Index_Load_Data_I 0x18
+#define Index_Load_Data_D 0x19
+#define Index_Load_Data_S 0x1b
+#define Index_Store_Data_I 0x1c
+#define Index_Store_Data_D 0x1d
+#define Index_Store_Data_S 0x1f
+
+/*
+ * Loongson2-specific cacheops
+ */
+#define Hit_Invalidate_I_Loongson23 0x00
#endif /* __ASM_CACHEOPS_H */
diff --git a/arch/mips/include/asm/dec/ioasic.h b/arch/mips/include/asm/dec/ioasic.h
index a6e505a0e44b..be4d62a5a10e 100644
--- a/arch/mips/include/asm/dec/ioasic.h
+++ b/arch/mips/include/asm/dec/ioasic.h
@@ -31,8 +31,6 @@ static inline u32 ioasic_read(unsigned int reg)
return ioasic_base[reg / 4];
}
-extern void clear_ioasic_dma_irq(unsigned int irq);
-
extern void init_ioasic_irqs(int base);
extern int dec_ioasic_clocksource_init(void);
diff --git a/arch/mips/include/asm/dec/ioasic_addrs.h b/arch/mips/include/asm/dec/ioasic_addrs.h
index a8665a7611c2..8bd95971fe2d 100644
--- a/arch/mips/include/asm/dec/ioasic_addrs.h
+++ b/arch/mips/include/asm/dec/ioasic_addrs.h
@@ -40,7 +40,7 @@
#define IOASIC_FLOPPY (11*IOASIC_SLOT_SIZE) /* FDC (maxine) */
#define IOASIC_SCSI (12*IOASIC_SLOT_SIZE) /* ASC SCSI */
#define IOASIC_FDC_DMA (13*IOASIC_SLOT_SIZE) /* FDC DMA (maxine) */
-#define IOASIC_SCSI_DMA (14*IOASIC_SLOT_SIZE) /* ??? */
+#define IOASIC_SCSI_DMA (14*IOASIC_SLOT_SIZE) /* ??? */
#define IOASIC_RES_15 (15*IOASIC_SLOT_SIZE) /* unused? */
diff --git a/arch/mips/include/asm/dec/kn01.h b/arch/mips/include/asm/dec/kn01.h
index 0eb3241de706..88d9ffd74258 100644
--- a/arch/mips/include/asm/dec/kn01.h
+++ b/arch/mips/include/asm/dec/kn01.h
@@ -57,12 +57,12 @@
/*
* System Control & Status Register bits.
*/
-#define KN01_CSR_MNFMOD (1<<15) /* MNFMOD manufacturing jumper */
-#define KN01_CSR_STATUS (1<<14) /* self-test result status output */
-#define KN01_CSR_PARDIS (1<<13) /* parity error disable */
-#define KN01_CSR_CRSRTST (1<<12) /* PCC test output */
-#define KN01_CSR_MONO (1<<11) /* mono/color fb SIMM installed */
-#define KN01_CSR_MEMERR (1<<10) /* write timeout error status & ack*/
+#define KN01_CSR_MNFMOD (1<<15) /* MNFMOD manufacturing jumper */
+#define KN01_CSR_STATUS (1<<14) /* self-test result status output */
+#define KN01_CSR_PARDIS (1<<13) /* parity error disable */
+#define KN01_CSR_CRSRTST (1<<12) /* PCC test output */
+#define KN01_CSR_MONO (1<<11) /* mono/color fb SIMM installed */
+#define KN01_CSR_MEMERR (1<<10) /* write timeout error status & ack*/
#define KN01_CSR_VINT (1<<9) /* PCC area detect #2 status & ack */
#define KN01_CSR_TXDIS (1<<8) /* DZ11 transmit disable */
#define KN01_CSR_VBGTRG (1<<2) /* blue DAC voltage over green (r/o) */
diff --git a/arch/mips/include/asm/dec/kn02ca.h b/arch/mips/include/asm/dec/kn02ca.h
index 69dc2a9a2d0f..92c0fe256099 100644
--- a/arch/mips/include/asm/dec/kn02ca.h
+++ b/arch/mips/include/asm/dec/kn02ca.h
@@ -68,7 +68,7 @@
#define KN03CA_IO_SSR_ISDN_RST (1<<12) /* ~ISDN (Am79C30A) reset */
#define KN03CA_IO_SSR_FLOPPY_RST (1<<7) /* ~FDC (82077) reset */
-#define KN03CA_IO_SSR_VIDEO_RST (1<<6) /* ~framebuffer reset */
+#define KN03CA_IO_SSR_VIDEO_RST (1<<6) /* ~framebuffer reset */
#define KN03CA_IO_SSR_AB_RST (1<<5) /* ACCESS.bus reset */
#define KN03CA_IO_SSR_RES_4 (1<<4) /* unused */
#define KN03CA_IO_SSR_RES_3 (1<<4) /* unused */
diff --git a/arch/mips/include/asm/dec/prom.h b/arch/mips/include/asm/dec/prom.h
index 446577712bee..c0ead6313845 100644
--- a/arch/mips/include/asm/dec/prom.h
+++ b/arch/mips/include/asm/dec/prom.h
@@ -49,7 +49,7 @@
#ifdef CONFIG_64BIT
-#define prom_is_rex(magic) 1 /* KN04 and KN05 are REX PROMs. */
+#define prom_is_rex(magic) 1 /* KN04 and KN05 are REX PROMs. */
#else /* !CONFIG_64BIT */
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index cf3ae2480b1d..a66359ef4ece 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -331,6 +331,7 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) \
dump_task_fpu(tsk, elf_fpregs)
+#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This yields a mask that user programs can use to figure out what
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 4d6fa0bf1305..32966969f2f9 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -27,13 +27,6 @@
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
-/* Don't support huge pages */
-#define KVM_HPAGE_GFN_SHIFT(x) 0
-
-/* We don't currently support large pages. */
-#define KVM_NR_PAGE_SIZES 1
-#define KVM_PAGES_PER_HPAGE(x) 1
-
/* Special address that contains the comm page, used for reducing # of traps */
diff --git a/arch/mips/include/asm/mach-ath79/ar933x_uart_platform.h b/arch/mips/include/asm/mach-ath79/ar933x_uart_platform.h
deleted file mode 100644
index 6cb30f2b7198..000000000000
--- a/arch/mips/include/asm/mach-ath79/ar933x_uart_platform.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Platform data definition for Atheros AR933X UART
- *
- * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#ifndef _AR933X_UART_PLATFORM_H
-#define _AR933X_UART_PLATFORM_H
-
-struct ar933x_uart_platform_data {
- unsigned uartclk;
-};
-
-#endif /* _AR933X_UART_PLATFORM_H */
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
new file mode 100644
index 000000000000..00867dd05a69
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
@@ -0,0 +1,110 @@
+#ifndef __BCM47XX_BOARD_H
+#define __BCM47XX_BOARD_H
+
+enum bcm47xx_board {
+ BCM47XX_BOARD_ASUS_RTAC66U,
+ BCM47XX_BOARD_ASUS_RTN10,
+ BCM47XX_BOARD_ASUS_RTN10D,
+ BCM47XX_BOARD_ASUS_RTN10U,
+ BCM47XX_BOARD_ASUS_RTN12,
+ BCM47XX_BOARD_ASUS_RTN12B1,
+ BCM47XX_BOARD_ASUS_RTN12C1,
+ BCM47XX_BOARD_ASUS_RTN12D1,
+ BCM47XX_BOARD_ASUS_RTN12HP,
+ BCM47XX_BOARD_ASUS_RTN15U,
+ BCM47XX_BOARD_ASUS_RTN16,
+ BCM47XX_BOARD_ASUS_RTN53,
+ BCM47XX_BOARD_ASUS_RTN66U,
+ BCM47XX_BOARD_ASUS_WL300G,
+ BCM47XX_BOARD_ASUS_WL320GE,
+ BCM47XX_BOARD_ASUS_WL330GE,
+ BCM47XX_BOARD_ASUS_WL500GD,
+ BCM47XX_BOARD_ASUS_WL500GPV1,
+ BCM47XX_BOARD_ASUS_WL500GPV2,
+ BCM47XX_BOARD_ASUS_WL500W,
+ BCM47XX_BOARD_ASUS_WL520GC,
+ BCM47XX_BOARD_ASUS_WL520GU,
+ BCM47XX_BOARD_ASUS_WL700GE,
+ BCM47XX_BOARD_ASUS_WLHDD,
+
+ BCM47XX_BOARD_BELKIN_F7D4301,
+
+ BCM47XX_BOARD_BUFFALO_WBR2_G54,
+ BCM47XX_BOARD_BUFFALO_WHR2_A54G54,
+ BCM47XX_BOARD_BUFFALO_WHR_G125,
+ BCM47XX_BOARD_BUFFALO_WHR_G54S,
+ BCM47XX_BOARD_BUFFALO_WHR_HP_G54,
+ BCM47XX_BOARD_BUFFALO_WLA2_G54L,
+ BCM47XX_BOARD_BUFFALO_WZR_G300N,
+ BCM47XX_BOARD_BUFFALO_WZR_RS_G54,
+ BCM47XX_BOARD_BUFFALO_WZR_RS_G54HP,
+
+ BCM47XX_BOARD_CISCO_M10V1,
+ BCM47XX_BOARD_CISCO_M20V1,
+
+ BCM47XX_BOARD_DELL_TM2300,
+
+ BCM47XX_BOARD_DLINK_DIR130,
+ BCM47XX_BOARD_DLINK_DIR330,
+
+ BCM47XX_BOARD_HUAWEI_E970,
+
+ BCM47XX_BOARD_LINKSYS_E900V1,
+ BCM47XX_BOARD_LINKSYS_E1000V1,
+ BCM47XX_BOARD_LINKSYS_E1000V2,
+ BCM47XX_BOARD_LINKSYS_E1000V21,
+ BCM47XX_BOARD_LINKSYS_E1200V2,
+ BCM47XX_BOARD_LINKSYS_E2000V1,
+ BCM47XX_BOARD_LINKSYS_E3000V1,
+ BCM47XX_BOARD_LINKSYS_E3200V1,
+ BCM47XX_BOARD_LINKSYS_E4200V1,
+ BCM47XX_BOARD_LINKSYS_WRT150NV1,
+ BCM47XX_BOARD_LINKSYS_WRT150NV11,
+ BCM47XX_BOARD_LINKSYS_WRT160NV1,
+ BCM47XX_BOARD_LINKSYS_WRT160NV3,
+ BCM47XX_BOARD_LINKSYS_WRT300NV11,
+ BCM47XX_BOARD_LINKSYS_WRT310NV1,
+ BCM47XX_BOARD_LINKSYS_WRT310NV2,
+ BCM47XX_BOARD_LINKSYS_WRT54G3GV2,
+ BCM47XX_BOARD_LINKSYS_WRT610NV1,
+ BCM47XX_BOARD_LINKSYS_WRT610NV2,
+ BCM47XX_BOARD_LINKSYS_WRTSL54GS,
+
+ BCM47XX_BOARD_MOTOROLA_WE800G,
+ BCM47XX_BOARD_MOTOROLA_WR850GP,
+ BCM47XX_BOARD_MOTOROLA_WR850GV2V3,
+
+ BCM47XX_BOARD_NETGEAR_WGR614V8,
+ BCM47XX_BOARD_NETGEAR_WGR614V9,
+ BCM47XX_BOARD_NETGEAR_WNDR3300,
+ BCM47XX_BOARD_NETGEAR_WNDR3400V1,
+ BCM47XX_BOARD_NETGEAR_WNDR3400V2,
+ BCM47XX_BOARD_NETGEAR_WNDR3400VCNA,
+ BCM47XX_BOARD_NETGEAR_WNDR3700V3,
+ BCM47XX_BOARD_NETGEAR_WNDR4000,
+ BCM47XX_BOARD_NETGEAR_WNDR4500V1,
+ BCM47XX_BOARD_NETGEAR_WNDR4500V2,
+ BCM47XX_BOARD_NETGEAR_WNR2000,
+ BCM47XX_BOARD_NETGEAR_WNR3500L,
+ BCM47XX_BOARD_NETGEAR_WNR3500U,
+ BCM47XX_BOARD_NETGEAR_WNR3500V2,
+ BCM47XX_BOARD_NETGEAR_WNR3500V2VC,
+ BCM47XX_BOARD_NETGEAR_WNR834BV2,
+
+ BCM47XX_BOARD_PHICOMM_M1,
+
+ BCM47XX_BOARD_SIMPLETECH_SIMPLESHARE,
+
+ BCM47XX_BOARD_ZTE_H218N,
+
+ BCM47XX_BOARD_UNKNOWN,
+ BCM47XX_BOARD_NO,
+};
+
+#define BCM47XX_BOARD_MAX_NAME 30
+
+void bcm47xx_board_detect(void);
+enum bcm47xx_board bcm47xx_board_get(void);
+const char *bcm47xx_board_get_name(void);
+
+#endif /* __BCM47XX_BOARD_H */
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h
index b8e7be8f34dd..36a3fc1aa3ae 100644
--- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h
@@ -48,4 +48,6 @@ static inline void bcm47xx_nvram_parse_macaddr(char *buf, u8 macaddr[6])
printk(KERN_WARNING "Can not parse mac address: %s\n", buf);
}
+int bcm47xx_nvram_gpio_pin(const char *name);
+
#endif /* __BCM47XX_NVRAM_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
index 47fb247f9663..f9f448650505 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
@@ -52,23 +52,11 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
return 0;
}
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
- BUG();
-}
-
static inline int plat_device_is_coherent(struct device *dev)
{
return 1;
}
-static inline int plat_dma_mapping_error(struct device *dev,
- dma_addr_t dma_addr)
-{
- BUG();
- return 0;
-}
-
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
diff --git a/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h b/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h
new file mode 100644
index 000000000000..acce27fd2bb8
--- /dev/null
+++ b/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h
@@ -0,0 +1,87 @@
+/*
+ * CPU feature overrides for DECstation systems. Two variations
+ * are generally applicable.
+ *
+ * Copyright (C) 2013 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef __ASM_MACH_DEC_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_DEC_CPU_FEATURE_OVERRIDES_H
+
+/* Generic ones first. */
+#define cpu_has_tlb 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_fpu 1
+#define cpu_has_divec 0
+#define cpu_has_prefetch 0
+#define cpu_has_mcheck 0
+#define cpu_has_ejtag 0
+#define cpu_has_mips16 0
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+#define cpu_has_rixi 0
+#define cpu_has_vtag_icache 0
+#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_pindexed_dcache 0
+#define cpu_has_local_ebase 0
+#define cpu_icache_snoops_remote_store 1
+#define cpu_has_mips_4 0
+#define cpu_has_mips_5 0
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+#define cpu_has_dsp 0
+#define cpu_has_mipsmt 0
+#define cpu_has_userlocal 0
+
+/* R3k-specific ones. */
+#ifdef CONFIG_CPU_R3000
+#define cpu_has_4kex 0
+#define cpu_has_3k_cache 1
+#define cpu_has_4k_cache 0
+#define cpu_has_32fpr 0
+#define cpu_has_counter 0
+#define cpu_has_watch 0
+#define cpu_has_vce 0
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_llsc 0
+#define cpu_has_dc_aliases 0
+#define cpu_has_mips_2 0
+#define cpu_has_mips_3 0
+#define cpu_has_nofpuex 1
+#define cpu_has_inclusive_pcaches 0
+#define cpu_dcache_line_size() 4
+#define cpu_icache_line_size() 4
+#define cpu_scache_line_size() 0
+#endif /* CONFIG_CPU_R3000 */
+
+/* R4k-specific ones. */
+#ifdef CONFIG_CPU_R4X00
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_32fpr 1
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_vce 1
+#define cpu_has_cache_cdex_p 1
+#define cpu_has_cache_cdex_s 1
+#define cpu_has_llsc 1
+#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000)
+#define cpu_has_mips_2 1
+#define cpu_has_mips_3 1
+#define cpu_has_nofpuex 0
+#define cpu_has_inclusive_pcaches 1
+#define cpu_dcache_line_size() 16
+#define cpu_icache_line_size() 16
+#define cpu_scache_line_size() 32
+#endif /* CONFIG_CPU_R4X00 */
+
+#endif /* __ASM_MACH_DEC_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h
index 74cb99257d5b..a9e8f6b62b0b 100644
--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
+++ b/arch/mips/include/asm/mach-generic/dma-coherence.h
@@ -47,16 +47,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
return 1;
}
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
- dma_addr_t dma_addr)
-{
- return 0;
-}
-
static inline int plat_device_is_coherent(struct device *dev)
{
#ifdef CONFIG_DMA_COHERENT
diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h
index 06c441968e6e..4ffddfdb5062 100644
--- a/arch/mips/include/asm/mach-ip27/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip27/dma-coherence.h
@@ -58,16 +58,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
return 1;
}
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
- dma_addr_t dma_addr)
-{
- return 0;
-}
-
static inline int plat_device_is_coherent(struct device *dev)
{
return 1; /* IP27 non-cohernet mode is unsupported */
diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h
index 073f0c4760ba..104cfbc3ed63 100644
--- a/arch/mips/include/asm/mach-ip32/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip32/dma-coherence.h
@@ -80,17 +80,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
return 1;
}
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
- return;
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
- dma_addr_t dma_addr)
-{
- return 0;
-}
-
static inline int plat_device_is_coherent(struct device *dev)
{
return 0; /* IP32 is non-cohernet */
diff --git a/arch/mips/include/asm/mach-jazz/dma-coherence.h b/arch/mips/include/asm/mach-jazz/dma-coherence.h
index 9fc1e9ad7038..949003ef97b3 100644
--- a/arch/mips/include/asm/mach-jazz/dma-coherence.h
+++ b/arch/mips/include/asm/mach-jazz/dma-coherence.h
@@ -48,16 +48,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
return 1;
}
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
- dma_addr_t dma_addr)
-{
- return 0;
-}
-
static inline int plat_device_is_coherent(struct device *dev)
{
return 0;
diff --git a/arch/mips/include/asm/mach-loongson/dma-coherence.h b/arch/mips/include/asm/mach-loongson/dma-coherence.h
index e1433055fe98..aeb2c05d6145 100644
--- a/arch/mips/include/asm/mach-loongson/dma-coherence.h
+++ b/arch/mips/include/asm/mach-loongson/dma-coherence.h
@@ -53,16 +53,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
return 1;
}
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
- dma_addr_t dma_addr)
-{
- return 0;
-}
-
static inline int plat_device_is_coherent(struct device *dev)
{
return 0;
diff --git a/arch/mips/include/asm/mach-powertv/asic.h b/arch/mips/include/asm/mach-powertv/asic.h
deleted file mode 100644
index b341108d12f1..000000000000
--- a/arch/mips/include/asm/mach-powertv/asic.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (C) 2009 Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef _ASM_MACH_POWERTV_ASIC_H
-#define _ASM_MACH_POWERTV_ASIC_H
-
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <asm/mach-powertv/asic_regs.h>
-
-#define DVR_CAPABLE (1<<0)
-#define PCIE_CAPABLE (1<<1)
-#define FFS_CAPABLE (1<<2)
-#define DISPLAY_CAPABLE (1<<3)
-
-/* Platform Family types
- * For compitability, the new value must be added in the end */
-enum family_type {
- FAMILY_8500,
- FAMILY_8500RNG,
- FAMILY_4500,
- FAMILY_1500,
- FAMILY_8600,
- FAMILY_4600,
- FAMILY_4600VZA,
- FAMILY_8600VZB,
- FAMILY_1500VZE,
- FAMILY_1500VZF,
- FAMILY_8700,
- FAMILIES
-};
-
-/* Register maps for each ASIC */
-extern const struct register_map calliope_register_map;
-extern const struct register_map cronus_register_map;
-extern const struct register_map gaia_register_map;
-extern const struct register_map zeus_register_map;
-
-extern struct resource dvr_cronus_resources[];
-extern struct resource dvr_gaia_resources[];
-extern struct resource dvr_zeus_resources[];
-extern struct resource non_dvr_calliope_resources[];
-extern struct resource non_dvr_cronus_resources[];
-extern struct resource non_dvr_cronuslite_resources[];
-extern struct resource non_dvr_gaia_resources[];
-extern struct resource non_dvr_vz_calliope_resources[];
-extern struct resource non_dvr_vze_calliope_resources[];
-extern struct resource non_dvr_vzf_calliope_resources[];
-extern struct resource non_dvr_zeus_resources[];
-
-extern void powertv_platform_init(void);
-extern void platform_alloc_bootmem(void);
-extern enum asic_type platform_get_asic(void);
-extern enum family_type platform_get_family(void);
-extern int platform_supports_dvr(void);
-extern int platform_supports_ffs(void);
-extern int platform_supports_pcie(void);
-extern int platform_supports_display(void);
-extern void configure_platform(void);
-
-/* Platform Resources */
-#define ASIC_RESOURCE_GET_EXISTS 1
-extern struct resource *asic_resource_get(const char *name);
-extern void platform_release_memory(void *baddr, int size);
-
-/* USB configuration */
-struct usb_hcd; /* Forward reference */
-extern void platform_configure_usb_ehci(void);
-extern void platform_unconfigure_usb_ehci(void);
-extern void platform_configure_usb_ohci(void);
-extern void platform_unconfigure_usb_ohci(void);
-
-/* Resource for ASIC registers */
-extern struct resource asic_resource;
-extern int platform_usb_devices_init(struct platform_device **echi_dev,
- struct platform_device **ohci_dev);
-
-/* Reboot Cause */
-extern void set_reboot_cause(char code, unsigned int data, unsigned int data2);
-extern void set_locked_reboot_cause(char code, unsigned int data,
- unsigned int data2);
-
-enum sys_reboot_type {
- sys_unknown_reboot = 0x00, /* Unknown reboot cause */
- sys_davic_change = 0x01, /* Reboot due to change in DAVIC
- * mode */
- sys_user_reboot = 0x02, /* Reboot initiated by user */
- sys_system_reboot = 0x03, /* Reboot initiated by OS */
- sys_trap_reboot = 0x04, /* Reboot due to a CPU trap */
- sys_silent_reboot = 0x05, /* Silent reboot */
- sys_boot_ldr_reboot = 0x06, /* Bootloader reboot */
- sys_power_up_reboot = 0x07, /* Power on bootup. Older
- * drivers may report as
- * userReboot. */
- sys_code_change = 0x08, /* Reboot to take code change.
- * Older drivers may report as
- * userReboot. */
- sys_hardware_reset = 0x09, /* HW watchdog or front-panel
- * reset button reset. Older
- * drivers may report as
- * userReboot. */
- sys_watchdogInterrupt = 0x0A /* Pre-watchdog interrupt */
-};
-
-#endif /* _ASM_MACH_POWERTV_ASIC_H */
diff --git a/arch/mips/include/asm/mach-powertv/asic_reg_map.h b/arch/mips/include/asm/mach-powertv/asic_reg_map.h
deleted file mode 100644
index 20348e817b09..000000000000
--- a/arch/mips/include/asm/mach-powertv/asic_reg_map.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * asic_reg_map.h
- *
- * A macro-enclosed list of the elements for the register_map structure for
- * use in defining and manipulating the structure.
- *
- * Copyright (C) 2009 Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-REGISTER_MAP_ELEMENT(eic_slow0_strt_add)
-REGISTER_MAP_ELEMENT(eic_cfg_bits)
-REGISTER_MAP_ELEMENT(eic_ready_status)
-REGISTER_MAP_ELEMENT(chipver3)
-REGISTER_MAP_ELEMENT(chipver2)
-REGISTER_MAP_ELEMENT(chipver1)
-REGISTER_MAP_ELEMENT(chipver0)
-REGISTER_MAP_ELEMENT(uart1_intstat)
-REGISTER_MAP_ELEMENT(uart1_inten)
-REGISTER_MAP_ELEMENT(uart1_config1)
-REGISTER_MAP_ELEMENT(uart1_config2)
-REGISTER_MAP_ELEMENT(uart1_divisorhi)
-REGISTER_MAP_ELEMENT(uart1_divisorlo)
-REGISTER_MAP_ELEMENT(uart1_data)
-REGISTER_MAP_ELEMENT(uart1_status)
-REGISTER_MAP_ELEMENT(int_stat_3)
-REGISTER_MAP_ELEMENT(int_stat_2)
-REGISTER_MAP_ELEMENT(int_stat_1)
-REGISTER_MAP_ELEMENT(int_stat_0)
-REGISTER_MAP_ELEMENT(int_config)
-REGISTER_MAP_ELEMENT(int_int_scan)
-REGISTER_MAP_ELEMENT(ien_int_3)
-REGISTER_MAP_ELEMENT(ien_int_2)
-REGISTER_MAP_ELEMENT(ien_int_1)
-REGISTER_MAP_ELEMENT(ien_int_0)
-REGISTER_MAP_ELEMENT(int_level_3_3)
-REGISTER_MAP_ELEMENT(int_level_3_2)
-REGISTER_MAP_ELEMENT(int_level_3_1)
-REGISTER_MAP_ELEMENT(int_level_3_0)
-REGISTER_MAP_ELEMENT(int_level_2_3)
-REGISTER_MAP_ELEMENT(int_level_2_2)
-REGISTER_MAP_ELEMENT(int_level_2_1)
-REGISTER_MAP_ELEMENT(int_level_2_0)
-REGISTER_MAP_ELEMENT(int_level_1_3)
-REGISTER_MAP_ELEMENT(int_level_1_2)
-REGISTER_MAP_ELEMENT(int_level_1_1)
-REGISTER_MAP_ELEMENT(int_level_1_0)
-REGISTER_MAP_ELEMENT(int_level_0_3)
-REGISTER_MAP_ELEMENT(int_level_0_2)
-REGISTER_MAP_ELEMENT(int_level_0_1)
-REGISTER_MAP_ELEMENT(int_level_0_0)
-REGISTER_MAP_ELEMENT(int_docsis_en)
-REGISTER_MAP_ELEMENT(mips_pll_setup)
-REGISTER_MAP_ELEMENT(fs432x4b4_usb_ctl)
-REGISTER_MAP_ELEMENT(test_bus)
-REGISTER_MAP_ELEMENT(crt_spare)
-REGISTER_MAP_ELEMENT(usb2_ohci_int_mask)
-REGISTER_MAP_ELEMENT(usb2_strap)
-REGISTER_MAP_ELEMENT(ehci_hcapbase)
-REGISTER_MAP_ELEMENT(ohci_hc_revision)
-REGISTER_MAP_ELEMENT(bcm1_bs_lmi_steer)
-REGISTER_MAP_ELEMENT(usb2_control)
-REGISTER_MAP_ELEMENT(usb2_stbus_obc)
-REGISTER_MAP_ELEMENT(usb2_stbus_mess_size)
-REGISTER_MAP_ELEMENT(usb2_stbus_chunk_size)
-REGISTER_MAP_ELEMENT(pcie_regs)
-REGISTER_MAP_ELEMENT(tim_ch)
-REGISTER_MAP_ELEMENT(tim_cl)
-REGISTER_MAP_ELEMENT(gpio_dout)
-REGISTER_MAP_ELEMENT(gpio_din)
-REGISTER_MAP_ELEMENT(gpio_dir)
-REGISTER_MAP_ELEMENT(watchdog)
-REGISTER_MAP_ELEMENT(front_panel)
-REGISTER_MAP_ELEMENT(misc_clk_ctl1)
-REGISTER_MAP_ELEMENT(misc_clk_ctl2)
-REGISTER_MAP_ELEMENT(crt_ext_ctl)
-REGISTER_MAP_ELEMENT(register_maps)
diff --git a/arch/mips/include/asm/mach-powertv/asic_regs.h b/arch/mips/include/asm/mach-powertv/asic_regs.h
deleted file mode 100644
index 06712abb3e55..000000000000
--- a/arch/mips/include/asm/mach-powertv/asic_regs.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (C) 2009 Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef __ASM_MACH_POWERTV_ASIC_H_
-#define __ASM_MACH_POWERTV_ASIC_H_
-#include <linux/io.h>
-
-/* ASIC types */
-enum asic_type {
- ASIC_UNKNOWN,
- ASIC_ZEUS,
- ASIC_CALLIOPE,
- ASIC_CRONUS,
- ASIC_CRONUSLITE,
- ASIC_GAIA,
- ASICS /* Number of supported ASICs */
-};
-
-/* hardcoded values read from Chip Version registers */
-#define CRONUS_10 0x0B4C1C20
-#define CRONUS_11 0x0B4C1C21
-#define CRONUSLITE_10 0x0B4C1C40
-
-#define NAND_FLASH_BASE 0x03000000
-#define CALLIOPE_IO_BASE 0x08000000
-#define GAIA_IO_BASE 0x09000000
-#define CRONUS_IO_BASE 0x09000000
-#define ZEUS_IO_BASE 0x09000000
-
-#define ASIC_IO_SIZE 0x01000000
-
-/* Definitions for backward compatibility */
-#define UART1_INTSTAT uart1_intstat
-#define UART1_INTEN uart1_inten
-#define UART1_CONFIG1 uart1_config1
-#define UART1_CONFIG2 uart1_config2
-#define UART1_DIVISORHI uart1_divisorhi
-#define UART1_DIVISORLO uart1_divisorlo
-#define UART1_DATA uart1_data
-#define UART1_STATUS uart1_status
-
-/* ASIC register enumeration */
-union register_map_entry {
- unsigned long phys;
- u32 *virt;
-};
-
-#define REGISTER_MAP_ELEMENT(x) union register_map_entry x;
-struct register_map {
-#include <asm/mach-powertv/asic_reg_map.h>
-};
-#undef REGISTER_MAP_ELEMENT
-
-/**
- * register_map_offset_phys - add an offset to the physical address
- * @map: Pointer to the &struct register_map
- * @offset: Value to add
- *
- * Only adds the base to non-zero physical addresses
- */
-static inline void register_map_offset_phys(struct register_map *map,
- unsigned long offset)
-{
-#define REGISTER_MAP_ELEMENT(x) do { \
- if (map->x.phys != 0) \
- map->x.phys += offset; \
- } while (false);
-
-#include <asm/mach-powertv/asic_reg_map.h>
-#undef REGISTER_MAP_ELEMENT
-}
-
-/**
- * register_map_virtualize - Convert &register_map to virtual addresses
- * @map: Pointer to &register_map to virtualize
- */
-static inline void register_map_virtualize(struct register_map *map)
-{
-#define REGISTER_MAP_ELEMENT(x) do { \
- map->x.virt = (!map->x.phys) ? NULL : \
- UNCAC_ADDR(phys_to_virt(map->x.phys)); \
- } while (false);
-
-#include <asm/mach-powertv/asic_reg_map.h>
-#undef REGISTER_MAP_ELEMENT
-}
-
-extern struct register_map _asic_register_map;
-extern unsigned long asic_phy_base;
-
-/*
- * Macros to interface to registers through their ioremapped address
- * asic_reg_phys_addr Returns the physical address of the given register
- * asic_reg_addr Returns the iomapped virtual address of the given
- * register.
- */
-#define asic_reg_addr(x) (_asic_register_map.x.virt)
-#define asic_reg_phys_addr(x) (virt_to_phys((void *) CAC_ADDR( \
- (unsigned long) asic_reg_addr(x))))
-
-/*
- * The asic_reg macro is gone. It should be replaced by either asic_read or
- * asic_write, as appropriate.
- */
-
-#define asic_read(x) readl(asic_reg_addr(x))
-#define asic_write(v, x) writel(v, asic_reg_addr(x))
-
-extern void asic_irq_init(void);
-#endif
diff --git a/arch/mips/include/asm/mach-powertv/cpu-feature-overrides.h b/arch/mips/include/asm/mach-powertv/cpu-feature-overrides.h
deleted file mode 100644
index 58c76ec32a19..000000000000
--- a/arch/mips/include/asm/mach-powertv/cpu-feature-overrides.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (C) 2010 Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef _ASM_MACH_POWERTV_CPU_FEATURE_OVERRIDES_H_
-#define _ASM_MACH_POWERTV_CPU_FEATURE_OVERRIDES_H_
-#define cpu_has_tlb 1
-#define cpu_has_4kex 1
-#define cpu_has_3k_cache 0
-#define cpu_has_4k_cache 1
-#define cpu_has_tx39_cache 0
-#define cpu_has_fpu 0
-#define cpu_has_counter 1
-#define cpu_has_watch 1
-#define cpu_has_divec 1
-#define cpu_has_vce 0
-#define cpu_has_cache_cdex_p 0
-#define cpu_has_cache_cdex_s 0
-#define cpu_has_mcheck 1
-#define cpu_has_ejtag 1
-#define cpu_has_llsc 1
-#define cpu_has_mips16 0
-#define cpu_has_mdmx 0
-#define cpu_has_mips3d 0
-#define cpu_has_smartmips 0
-#define cpu_has_vtag_icache 0
-#define cpu_has_dc_aliases 0
-#define cpu_has_ic_fills_f_dc 0
-#define cpu_has_mips32r1 0
-#define cpu_has_mips32r2 1
-#define cpu_has_mips64r1 0
-#define cpu_has_mips64r2 0
-#define cpu_has_dsp 0
-#define cpu_has_dsp2 0
-#define cpu_has_mipsmt 0
-#define cpu_has_userlocal 0
-#define cpu_has_nofpuex 0
-#define cpu_has_64bits 0
-#define cpu_has_64bit_zero_reg 0
-#define cpu_has_vint 1
-#define cpu_has_veic 1
-#define cpu_has_inclusive_pcaches 0
-
-#define cpu_dcache_line_size() 32
-#define cpu_icache_line_size() 32
-#endif
diff --git a/arch/mips/include/asm/mach-powertv/dma-coherence.h b/arch/mips/include/asm/mach-powertv/dma-coherence.h
deleted file mode 100644
index f8316720a218..000000000000
--- a/arch/mips/include/asm/mach-powertv/dma-coherence.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Version from mach-generic modified to support PowerTV port
- * Portions Copyright (C) 2009 Cisco Systems, Inc.
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- *
- */
-
-#ifndef __ASM_MACH_POWERTV_DMA_COHERENCE_H
-#define __ASM_MACH_POWERTV_DMA_COHERENCE_H
-
-#include <linux/sched.h>
-#include <linux/device.h>
-#include <asm/mach-powertv/asic.h>
-
-static inline bool is_kseg2(void *addr)
-{
- return (unsigned long)addr >= KSEG2;
-}
-
-static inline unsigned long virt_to_phys_from_pte(void *addr)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *ptep, pte;
-
- unsigned long virt_addr = (unsigned long)addr;
- unsigned long phys_addr = 0UL;
-
- /* get the page global directory. */
- pgd = pgd_offset_k(virt_addr);
-
- if (!pgd_none(*pgd)) {
- /* get the page upper directory */
- pud = pud_offset(pgd, virt_addr);
- if (!pud_none(*pud)) {
- /* get the page middle directory */
- pmd = pmd_offset(pud, virt_addr);
- if (!pmd_none(*pmd)) {
- /* get a pointer to the page table entry */
- ptep = pte_offset(pmd, virt_addr);
- pte = *ptep;
- /* check for a valid page */
- if (pte_present(pte)) {
- /* get the physical address the page is
- * referring to */
- phys_addr = (unsigned long)
- page_to_phys(pte_page(pte));
- /* add the offset within the page */
- phys_addr |= (virt_addr & ~PAGE_MASK);
- }
- }
- }
- }
-
- return phys_addr;
-}
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
- size_t size)
-{
- if (is_kseg2(addr))
- return phys_to_dma(virt_to_phys_from_pte(addr));
- else
- return phys_to_dma(virt_to_phys(addr));
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
- struct page *page)
-{
- return phys_to_dma(page_to_phys(page));
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr)
-{
- return dma_to_phys(dma_addr);
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- return 1;
-}
-
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
- dma_addr_t dma_addr)
-{
- return 0;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
- return 0;
-}
-
-#endif /* __ASM_MACH_POWERTV_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-powertv/interrupts.h b/arch/mips/include/asm/mach-powertv/interrupts.h
deleted file mode 100644
index 6c463be62156..000000000000
--- a/arch/mips/include/asm/mach-powertv/interrupts.h
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * Copyright (C) 2009 Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef _ASM_MACH_POWERTV_INTERRUPTS_H_
-#define _ASM_MACH_POWERTV_INTERRUPTS_H_
-
-/*
- * Defines for all of the interrupt lines
- */
-
-/* Definitions for backward compatibility */
-#define kIrq_Uart1 irq_uart1
-
-#define ibase 0
-
-/*------------- Register: int_stat_3 */
-/* 126 unused (bit 31) */
-#define irq_asc2video (ibase+126) /* ASC 2 Video Interrupt */
-#define irq_asc1video (ibase+125) /* ASC 1 Video Interrupt */
-#define irq_comms_block_wd (ibase+124) /* ASC 1 Video Interrupt */
-#define irq_fdma_mailbox (ibase+123) /* FDMA Mailbox Output */
-#define irq_fdma_gp (ibase+122) /* FDMA GP Output */
-#define irq_mips_pic (ibase+121) /* MIPS Performance Counter
- * Interrupt */
-#define irq_mips_timer (ibase+120) /* MIPS Timer Interrupt */
-#define irq_memory_protect (ibase+119) /* Memory Protection Interrupt
- * -- Ored by glue logic inside
- * SPARC ILC (see
- * INT_MEM_PROT_STAT, below,
- * for individual interrupts)
- */
-/* 118 unused (bit 22) */
-#define irq_sbag (ibase+117) /* SBAG Interrupt -- Ored by
- * glue logic inside SPARC ILC
- * (see INT_SBAG_STAT, below,
- * for individual interrupts) */
-#define irq_qam_b_fec (ibase+116) /* QAM B FEC Interrupt */
-#define irq_qam_a_fec (ibase+115) /* QAM A FEC Interrupt */
-/* 114 unused (bit 18) */
-#define irq_mailbox (ibase+113) /* Mailbox Debug Interrupt --
- * Ored by glue logic inside
- * SPARC ILC (see
- * INT_MAILBOX_STAT, below, for
- * individual interrupts) */
-#define irq_fuse_stat1 (ibase+112) /* Fuse Status 1 */
-#define irq_fuse_stat2 (ibase+111) /* Fuse Status 2 */
-#define irq_fuse_stat3 (ibase+110) /* Blitter Interrupt / Fuse
- * Status 3 */
-#define irq_blitter (ibase+110) /* Blitter Interrupt / Fuse
- * Status 3 */
-#define irq_avc1_pp0 (ibase+109) /* AVC Decoder #1 PP0
- * Interrupt */
-#define irq_avc1_pp1 (ibase+108) /* AVC Decoder #1 PP1
- * Interrupt */
-#define irq_avc1_mbe (ibase+107) /* AVC Decoder #1 MBE
- * Interrupt */
-#define irq_avc2_pp0 (ibase+106) /* AVC Decoder #2 PP0
- * Interrupt */
-#define irq_avc2_pp1 (ibase+105) /* AVC Decoder #2 PP1
- * Interrupt */
-#define irq_avc2_mbe (ibase+104) /* AVC Decoder #2 MBE
- * Interrupt */
-#define irq_zbug_spi (ibase+103) /* Zbug SPI Slave Interrupt */
-#define irq_qam_mod2 (ibase+102) /* QAM Modulator 2 DMA
- * Interrupt */
-#define irq_ir_rx (ibase+101) /* IR RX 2 Interrupt */
-#define irq_aud_dsp2 (ibase+100) /* Audio DSP #2 Interrupt */
-#define irq_aud_dsp1 (ibase+99) /* Audio DSP #1 Interrupt */
-#define irq_docsis (ibase+98) /* DOCSIS Debug Interrupt */
-#define irq_sd_dvp1 (ibase+97) /* SD DVP #1 Interrupt */
-#define irq_sd_dvp2 (ibase+96) /* SD DVP #2 Interrupt */
-/*------------- Register: int_stat_2 */
-#define irq_hd_dvp (ibase+95) /* HD DVP Interrupt */
-#define kIrq_Prewatchdog (ibase+94) /* watchdog Pre-Interrupt */
-#define irq_timer2 (ibase+93) /* Programmable Timer
- * Interrupt 2 */
-#define irq_1394 (ibase+92) /* 1394 Firewire Interrupt */
-#define irq_usbohci (ibase+91) /* USB 2.0 OHCI Interrupt */
-#define irq_usbehci (ibase+90) /* USB 2.0 EHCI Interrupt */
-#define irq_pciexp (ibase+89) /* PCI Express 0 Interrupt */
-#define irq_pciexp0 (ibase+89) /* PCI Express 0 Interrupt */
-#define irq_afe1 (ibase+88) /* AFE 1 Interrupt */
-#define irq_sata (ibase+87) /* SATA 1 Interrupt */
-#define irq_sata1 (ibase+87) /* SATA 1 Interrupt */
-#define irq_dtcp (ibase+86) /* DTCP Interrupt */
-#define irq_pciexp1 (ibase+85) /* PCI Express 1 Interrupt */
-/* 84 unused (bit 20) */
-/* 83 unused (bit 19) */
-/* 82 unused (bit 18) */
-#define irq_sata2 (ibase+81) /* SATA2 Interrupt */
-#define irq_uart2 (ibase+80) /* UART2 Interrupt */
-#define irq_legacy_usb (ibase+79) /* Legacy USB Host ISR (1.1
- * Host module) */
-#define irq_pod (ibase+78) /* POD Interrupt */
-#define irq_slave_usb (ibase+77) /* Slave USB */
-#define irq_denc1 (ibase+76) /* DENC #1 VTG Interrupt */
-#define irq_vbi_vtg (ibase+75) /* VBI VTG Interrupt */
-#define irq_afe2 (ibase+74) /* AFE 2 Interrupt */
-#define irq_denc2 (ibase+73) /* DENC #2 VTG Interrupt */
-#define irq_asc2 (ibase+72) /* ASC #2 Interrupt */
-#define irq_asc1 (ibase+71) /* ASC #1 Interrupt */
-#define irq_mod_dma (ibase+70) /* Modulator DMA Interrupt */
-#define irq_byte_eng1 (ibase+69) /* Byte Engine Interrupt [1] */
-#define irq_byte_eng0 (ibase+68) /* Byte Engine Interrupt [0] */
-/* 67 unused (bit 03) */
-/* 66 unused (bit 02) */
-/* 65 unused (bit 01) */
-/* 64 unused (bit 00) */
-/*------------- Register: int_stat_1 */
-/* 63 unused (bit 31) */
-/* 62 unused (bit 30) */
-/* 61 unused (bit 29) */
-/* 60 unused (bit 28) */
-/* 59 unused (bit 27) */
-/* 58 unused (bit 26) */
-/* 57 unused (bit 25) */
-/* 56 unused (bit 24) */
-#define irq_buf_dma_mem2mem (ibase+55) /* BufDMA Memory to Memory
- * Interrupt */
-#define irq_buf_dma_usbtransmit (ibase+54) /* BufDMA USB Transmit
- * Interrupt */
-#define irq_buf_dma_qpskpodtransmit (ibase+53) /* BufDMA QPSK/POD Tramsit
- * Interrupt */
-#define irq_buf_dma_transmit_error (ibase+52) /* BufDMA Transmit Error
- * Interrupt */
-#define irq_buf_dma_usbrecv (ibase+51) /* BufDMA USB Receive
- * Interrupt */
-#define irq_buf_dma_qpskpodrecv (ibase+50) /* BufDMA QPSK/POD Receive
- * Interrupt */
-#define irq_buf_dma_recv_error (ibase+49) /* BufDMA Receive Error
- * Interrupt */
-#define irq_qamdma_transmit_play (ibase+48) /* QAMDMA Transmit/Play
- * Interrupt */
-#define irq_qamdma_transmit_error (ibase+47) /* QAMDMA Transmit Error
- * Interrupt */
-#define irq_qamdma_recv2high (ibase+46) /* QAMDMA Receive 2 High
- * (Chans 63-32) */
-#define irq_qamdma_recv2low (ibase+45) /* QAMDMA Receive 2 Low
- * (Chans 31-0) */
-#define irq_qamdma_recv1high (ibase+44) /* QAMDMA Receive 1 High
- * (Chans 63-32) */
-#define irq_qamdma_recv1low (ibase+43) /* QAMDMA Receive 1 Low
- * (Chans 31-0) */
-#define irq_qamdma_recv_error (ibase+42) /* QAMDMA Receive Error
- * Interrupt */
-#define irq_mpegsplice (ibase+41) /* MPEG Splice Interrupt */
-#define irq_deinterlace_rdy (ibase+40) /* Deinterlacer Frame Ready
- * Interrupt */
-#define irq_ext_in0 (ibase+39) /* External Interrupt irq_in0 */
-#define irq_gpio3 (ibase+38) /* GP I/O IRQ 3 - From GP I/O
- * Module */
-#define irq_gpio2 (ibase+37) /* GP I/O IRQ 2 - From GP I/O
- * Module (ABE_intN) */
-#define irq_pcrcmplt1 (ibase+36) /* PCR Capture Complete or
- * Discontinuity 1 */
-#define irq_pcrcmplt2 (ibase+35) /* PCR Capture Complete or
- * Discontinuity 2 */
-#define irq_parse_peierr (ibase+34) /* PID Parser Error Detect
- * (PEI) */
-#define irq_parse_cont_err (ibase+33) /* PID Parser continuity error
- * detect */
-#define irq_ds1framer (ibase+32) /* DS1 Framer Interrupt */
-/*------------- Register: int_stat_0 */
-#define irq_gpio1 (ibase+31) /* GP I/O IRQ 1 - From GP I/O
- * Module */
-#define irq_gpio0 (ibase+30) /* GP I/O IRQ 0 - From GP I/O
- * Module */
-#define irq_qpsk_out_aloha (ibase+29) /* QPSK Output Slotted Aloha
- * (chan 3) Transmission
- * Completed OK */
-#define irq_qpsk_out_tdma (ibase+28) /* QPSK Output TDMA (chan 2)
- * Transmission Completed OK */
-#define irq_qpsk_out_reserve (ibase+27) /* QPSK Output Reservation
- * (chan 1) Transmission
- * Completed OK */
-#define irq_qpsk_out_aloha_err (ibase+26) /* QPSK Output Slotted Aloha
- * (chan 3)Transmission
- * completed with Errors. */
-#define irq_qpsk_out_tdma_err (ibase+25) /* QPSK Output TDMA (chan 2)
- * Transmission completed with
- * Errors. */
-#define irq_qpsk_out_rsrv_err (ibase+24) /* QPSK Output Reservation
- * (chan 1) Transmission
- * completed with Errors */
-#define irq_aloha_fail (ibase+23) /* Unsuccessful Resend of Aloha
- * for N times. Aloha retry
- * timeout for channel 3. */
-#define irq_timer1 (ibase+22) /* Programmable Timer
- * Interrupt */
-#define irq_keyboard (ibase+21) /* Keyboard Module Interrupt */
-#define irq_i2c (ibase+20) /* I2C Module Interrupt */
-#define irq_spi (ibase+19) /* SPI Module Interrupt */
-#define irq_irblaster (ibase+18) /* IR Blaster Interrupt */
-#define irq_splice_detect (ibase+17) /* PID Key Change Interrupt or
- * Splice Detect Interrupt */
-#define irq_se_micro (ibase+16) /* Secure Micro I/F Module
- * Interrupt */
-#define irq_uart1 (ibase+15) /* UART Interrupt */
-#define irq_irrecv (ibase+14) /* IR Receiver Interrupt */
-#define irq_host_int1 (ibase+13) /* Host-to-Host Interrupt 1 */
-#define irq_host_int0 (ibase+12) /* Host-to-Host Interrupt 0 */
-#define irq_qpsk_hecerr (ibase+11) /* QPSK HEC Error Interrupt */
-#define irq_qpsk_crcerr (ibase+10) /* QPSK AAL-5 CRC Error
- * Interrupt */
-/* 9 unused (bit 09) */
-/* 8 unused (bit 08) */
-#define irq_psicrcerr (ibase+7) /* QAM PSI CRC Error
- * Interrupt */
-#define irq_psilength_err (ibase+6) /* QAM PSI Length Error
- * Interrupt */
-#define irq_esfforward (ibase+5) /* ESF Interrupt Mark From
- * Forward Path Reference -
- * every 3ms when forward Mbits
- * and forward slot control
- * bytes are updated. */
-#define irq_esfreverse (ibase+4) /* ESF Interrupt Mark from
- * Reverse Path Reference -
- * delayed from forward mark by
- * the ranging delay plus a
- * fixed amount. When reverse
- * Mbits and reverse slot
- * control bytes are updated.
- * Occurs every 3ms for 3.0M and
- * 1.554 M upstream rates and
- * every 6 ms for 256K upstream
- * rate. */
-#define irq_aloha_timeout (ibase+3) /* Slotted-Aloha timeout on
- * Channel 1. */
-#define irq_reservation (ibase+2) /* Partial (or Incremental)
- * Reservation Message Completed
- * or Slotted aloha verify for
- * channel 1. */
-#define irq_aloha3 (ibase+1) /* Slotted-Aloha Message Verify
- * Interrupt or Reservation
- * increment completed for
- * channel 3. */
-#define irq_mpeg_d (ibase+0) /* MPEG Decoder Interrupt */
-#endif /* _ASM_MACH_POWERTV_INTERRUPTS_H_ */
diff --git a/arch/mips/include/asm/mach-powertv/ioremap.h b/arch/mips/include/asm/mach-powertv/ioremap.h
deleted file mode 100644
index c86ef094ec37..000000000000
--- a/arch/mips/include/asm/mach-powertv/ioremap.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * Portions Copyright (C) Cisco Systems, Inc.
- */
-#ifndef __ASM_MACH_POWERTV_IOREMAP_H
-#define __ASM_MACH_POWERTV_IOREMAP_H
-
-#include <linux/types.h>
-#include <linux/log2.h>
-#include <linux/compiler.h>
-
-#include <asm/pgtable-bits.h>
-#include <asm/addrspace.h>
-
-/* We're going to mess with bits, so get sizes */
-#define IOR_BPC 8 /* Bits per char */
-#define IOR_PHYS_BITS (IOR_BPC * sizeof(phys_addr_t))
-#define IOR_DMA_BITS (IOR_BPC * sizeof(dma_addr_t))
-
-/*
- * Define the granularity of physical/DMA mapping in terms of the number
- * of bits that defines the offset within a grain. These will be the
- * least significant bits of the address. The rest of a physical or DMA
- * address will be used to index into an appropriate table to find the
- * offset to add to the address to yield the corresponding DMA or physical
- * address, respectively.
- */
-#define IOR_LSBITS 22 /* Bits in a grain */
-
-/*
- * Compute the number of most significant address bits after removing those
- * used for the offset within a grain and then compute the number of table
- * entries for the conversion.
- */
-#define IOR_PHYS_MSBITS (IOR_PHYS_BITS - IOR_LSBITS)
-#define IOR_NUM_PHYS_TO_DMA ((phys_addr_t) 1 << IOR_PHYS_MSBITS)
-
-#define IOR_DMA_MSBITS (IOR_DMA_BITS - IOR_LSBITS)
-#define IOR_NUM_DMA_TO_PHYS ((dma_addr_t) 1 << IOR_DMA_MSBITS)
-
-/*
- * Define data structures used as elements in the arrays for the conversion
- * between physical and DMA addresses. We do some slightly fancy math to
- * compute the width of the offset element of the conversion tables so
- * that we can have the smallest conversion tables. Next, round up the
- * sizes to the next higher power of two, i.e. the offset element will have
- * 8, 16, 32, 64, etc. bits. This eliminates the need to mask off any
- * bits. Finally, we compute a shift value that puts the most significant
- * bits of the offset into the most significant bits of the offset element.
- * This makes it more efficient on processors without barrel shifters and
- * easier to see the values if the conversion table is dumped in binary.
- */
-#define _IOR_OFFSET_WIDTH(n) (1 << order_base_2(n))
-#define IOR_OFFSET_WIDTH(n) \
- (_IOR_OFFSET_WIDTH(n) < 8 ? 8 : _IOR_OFFSET_WIDTH(n))
-
-#define IOR_PHYS_OFFSET_BITS IOR_OFFSET_WIDTH(IOR_PHYS_MSBITS)
-#define IOR_PHYS_SHIFT (IOR_PHYS_BITS - IOR_PHYS_OFFSET_BITS)
-
-#define IOR_DMA_OFFSET_BITS IOR_OFFSET_WIDTH(IOR_DMA_MSBITS)
-#define IOR_DMA_SHIFT (IOR_DMA_BITS - IOR_DMA_OFFSET_BITS)
-
-struct ior_phys_to_dma {
- dma_addr_t offset:IOR_DMA_OFFSET_BITS __packed
- __aligned((IOR_DMA_OFFSET_BITS / IOR_BPC));
-};
-
-struct ior_dma_to_phys {
- dma_addr_t offset:IOR_PHYS_OFFSET_BITS __packed
- __aligned((IOR_PHYS_OFFSET_BITS / IOR_BPC));
-};
-
-extern struct ior_phys_to_dma _ior_phys_to_dma[IOR_NUM_PHYS_TO_DMA];
-extern struct ior_dma_to_phys _ior_dma_to_phys[IOR_NUM_DMA_TO_PHYS];
-
-static inline dma_addr_t _phys_to_dma_offset_raw(phys_addr_t phys)
-{
- return (dma_addr_t)_ior_phys_to_dma[phys >> IOR_LSBITS].offset;
-}
-
-static inline dma_addr_t _dma_to_phys_offset_raw(dma_addr_t dma)
-{
- return (dma_addr_t)_ior_dma_to_phys[dma >> IOR_LSBITS].offset;
-}
-
-/* These are not portable and should not be used in drivers. Drivers should
- * be using ioremap() and friends to map physical addresses to virtual
- * addresses and dma_map*() and friends to map virtual addresses into DMA
- * addresses and back.
- */
-static inline dma_addr_t phys_to_dma(phys_addr_t phys)
-{
- return phys + (_phys_to_dma_offset_raw(phys) << IOR_PHYS_SHIFT);
-}
-
-static inline phys_addr_t dma_to_phys(dma_addr_t dma)
-{
- return dma + (_dma_to_phys_offset_raw(dma) << IOR_DMA_SHIFT);
-}
-
-extern void ioremap_add_map(dma_addr_t phys, phys_addr_t alias,
- dma_addr_t size);
-
-/*
- * Allow physical addresses to be fixed up to help peripherals located
- * outside the low 32-bit range -- generic pass-through version.
- */
-static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size)
-{
- return phys_addr;
-}
-
-/*
- * Handle the special case of addresses the area aliased into the first
- * 512 MiB of the processor's physical address space. These turn into either
- * kseg0 or kseg1 addresses, depending on flags.
- */
-static inline void __iomem *plat_ioremap(phys_t start, unsigned long size,
- unsigned long flags)
-{
- phys_addr_t start_offset;
- void __iomem *result = NULL;
-
- /* Start by checking to see whether this is an aliased address */
- start_offset = _dma_to_phys_offset_raw(start);
-
- /*
- * If:
- * o the memory is aliased into the first 512 MiB, and
- * o the start and end are in the same RAM bank, and
- * o we don't have a zero size or wrap around, and
- * o we are supposed to create an uncached mapping,
- * handle this is a kseg0 or kseg1 address
- */
- if (start_offset != 0) {
- phys_addr_t last;
- dma_addr_t dma_to_phys_offset;
-
- last = start + size - 1;
- dma_to_phys_offset =
- _dma_to_phys_offset_raw(last) << IOR_DMA_SHIFT;
-
- if (dma_to_phys_offset == start_offset &&
- size != 0 && start <= last) {
- phys_t adjusted_start;
- adjusted_start = start + start_offset;
- if (flags == _CACHE_UNCACHED)
- result = (void __iomem *) (unsigned long)
- CKSEG1ADDR(adjusted_start);
- else
- result = (void __iomem *) (unsigned long)
- CKSEG0ADDR(adjusted_start);
- }
- }
-
- return result;
-}
-
-static inline int plat_iounmap(const volatile void __iomem *addr)
-{
- return 0;
-}
-#endif /* __ASM_MACH_POWERTV_IOREMAP_H */
diff --git a/arch/mips/include/asm/mach-powertv/war.h b/arch/mips/include/asm/mach-powertv/war.h
deleted file mode 100644
index c5651c8e58d1..000000000000
--- a/arch/mips/include/asm/mach-powertv/war.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * This version for the PowerTV platform copied from the Malta version.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- */
-#ifndef __ASM_MACH_POWERTV_WAR_H
-#define __ASM_MACH_POWERTV_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 1
-#define MIPS_CACHE_SYNC_WAR 1
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 1
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MACH_POWERTV_WAR_H */
diff --git a/arch/mips/include/asm/mips-boards/piix4.h b/arch/mips/include/asm/mips-boards/piix4.h
index a02596cf1abd..e33227998713 100644
--- a/arch/mips/include/asm/mips-boards/piix4.h
+++ b/arch/mips/include/asm/mips-boards/piix4.h
@@ -1,6 +1,7 @@
/*
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
@@ -20,61 +21,26 @@
#ifndef __ASM_MIPS_BOARDS_PIIX4_H
#define __ASM_MIPS_BOARDS_PIIX4_H
-/************************************************************************
- * IO register offsets
- ************************************************************************/
-#define PIIX4_ICTLR1_ICW1 0x20
-#define PIIX4_ICTLR1_ICW2 0x21
-#define PIIX4_ICTLR1_ICW3 0x21
-#define PIIX4_ICTLR1_ICW4 0x21
-#define PIIX4_ICTLR2_ICW1 0xa0
-#define PIIX4_ICTLR2_ICW2 0xa1
-#define PIIX4_ICTLR2_ICW3 0xa1
-#define PIIX4_ICTLR2_ICW4 0xa1
-#define PIIX4_ICTLR1_OCW1 0x21
-#define PIIX4_ICTLR1_OCW2 0x20
-#define PIIX4_ICTLR1_OCW3 0x20
-#define PIIX4_ICTLR1_OCW4 0x20
-#define PIIX4_ICTLR2_OCW1 0xa1
-#define PIIX4_ICTLR2_OCW2 0xa0
-#define PIIX4_ICTLR2_OCW3 0xa0
-#define PIIX4_ICTLR2_OCW4 0xa0
-
-
-/************************************************************************
- * Register encodings.
- ************************************************************************/
-#define PIIX4_OCW2_NSEOI (0x1 << 5)
-#define PIIX4_OCW2_SEOI (0x3 << 5)
-#define PIIX4_OCW2_RNSEOI (0x5 << 5)
-#define PIIX4_OCW2_RAEOIS (0x4 << 5)
-#define PIIX4_OCW2_RAEOIC (0x0 << 5)
-#define PIIX4_OCW2_RSEOI (0x7 << 5)
-#define PIIX4_OCW2_SP (0x6 << 5)
-#define PIIX4_OCW2_NOP (0x2 << 5)
-
-#define PIIX4_OCW2_SEL (0x0 << 3)
-
-#define PIIX4_OCW2_ILS_0 0
-#define PIIX4_OCW2_ILS_1 1
-#define PIIX4_OCW2_ILS_2 2
-#define PIIX4_OCW2_ILS_3 3
-#define PIIX4_OCW2_ILS_4 4
-#define PIIX4_OCW2_ILS_5 5
-#define PIIX4_OCW2_ILS_6 6
-#define PIIX4_OCW2_ILS_7 7
-#define PIIX4_OCW2_ILS_8 0
-#define PIIX4_OCW2_ILS_9 1
-#define PIIX4_OCW2_ILS_10 2
-#define PIIX4_OCW2_ILS_11 3
-#define PIIX4_OCW2_ILS_12 4
-#define PIIX4_OCW2_ILS_13 5
-#define PIIX4_OCW2_ILS_14 6
-#define PIIX4_OCW2_ILS_15 7
-
-#define PIIX4_OCW3_SEL (0x1 << 3)
-
-#define PIIX4_OCW3_IRR 0x2
-#define PIIX4_OCW3_ISR 0x3
+/* PIRQX Route Control */
+#define PIIX4_FUNC0_PIRQRC 0x60
+#define PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_DISABLE (1 << 7)
+#define PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MASK 0xf
+#define PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MAX 16
+/* Top Of Memory */
+#define PIIX4_FUNC0_TOM 0x69
+#define PIIX4_FUNC0_TOM_TOP_OF_MEMORY_MASK 0xf0
+/* Deterministic Latency Control */
+#define PIIX4_FUNC0_DLC 0x82
+#define PIIX4_FUNC0_DLC_USBPR_EN (1 << 2)
+#define PIIX4_FUNC0_DLC_PASSIVE_RELEASE_EN (1 << 1)
+#define PIIX4_FUNC0_DLC_DELAYED_TRANSACTION_EN (1 << 0)
+
+/* IDE Timing */
+#define PIIX4_FUNC1_IDETIM_PRIMARY_LO 0x40
+#define PIIX4_FUNC1_IDETIM_PRIMARY_HI 0x41
+#define PIIX4_FUNC1_IDETIM_PRIMARY_HI_IDE_DECODE_EN (1 << 7)
+#define PIIX4_FUNC1_IDETIM_SECONDARY_LO 0x42
+#define PIIX4_FUNC1_IDETIM_SECONDARY_HI 0x43
+#define PIIX4_FUNC1_IDETIM_SECONDARY_HI_IDE_DECODE_EN (1 << 7)
#endif /* __ASM_MIPS_BOARDS_PIIX4_H */
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 3b29079b5424..e277bbad2871 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -24,21 +24,21 @@
#endif /* SMTC */
#include <asm-generic/mm_hooks.h>
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-
#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
do { \
extern void tlbmiss_handler_setup_pgd(unsigned long); \
tlbmiss_handler_setup_pgd((unsigned long)(pgd)); \
} while (0)
+#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
#define TLBMISS_HANDLER_SETUP() \
do { \
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); \
- write_c0_xcontext((unsigned long) smp_processor_id() << 51); \
+ write_c0_xcontext((unsigned long) smp_processor_id() << \
+ SMP_CPUID_REGSHIFT); \
} while (0)
-#else /* CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/
+#else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/
/*
* For the fast tlb miss handlers, we keep a per cpu array of pointers
@@ -47,21 +47,11 @@ do { \
*/
extern unsigned long pgd_current[];
-#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
- pgd_current[smp_processor_id()] = (unsigned long)(pgd)
-
-#ifdef CONFIG_32BIT
#define TLBMISS_HANDLER_SETUP() \
- write_c0_context((unsigned long) smp_processor_id() << 25); \
+ write_c0_context((unsigned long) smp_processor_id() << \
+ SMP_CPUID_REGSHIFT); \
back_to_back_c0_hazard(); \
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
-#endif
-#ifdef CONFIG_64BIT
-#define TLBMISS_HANDLER_SETUP() \
- write_c0_context((unsigned long) smp_processor_id() << 26); \
- back_to_back_c0_hazard(); \
- TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
-#endif
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
diff --git a/arch/mips/include/asm/octeon/cvmx-pip.h b/arch/mips/include/asm/octeon/cvmx-pip.h
index a76fe5a57a9f..df69bfd2b006 100644
--- a/arch/mips/include/asm/octeon/cvmx-pip.h
+++ b/arch/mips/include/asm/octeon/cvmx-pip.h
@@ -192,13 +192,13 @@ typedef struct {
/* Number of packets processed by PIP */
uint32_t packets;
/*
- * Number of indentified L2 multicast packets. Does not
+ * Number of identified L2 multicast packets. Does not
* include broadcast packets. Only includes packets whose
* parse mode is SKIP_TO_L2
*/
uint32_t multicast_packets;
/*
- * Number of indentified L2 broadcast packets. Does not
+ * Number of identified L2 broadcast packets. Does not
* include multicast packets. Only includes packets whose
* parse mode is SKIP_TO_L2
*/
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h
index 1e7e0961064b..ccd2b75f152c 100644
--- a/arch/mips/include/asm/prom.h
+++ b/arch/mips/include/asm/prom.h
@@ -17,22 +17,8 @@
#include <linux/types.h>
#include <asm/bootinfo.h>
-extern int early_init_dt_scan_memory_arch(unsigned long node,
- const char *uname, int depth, void *data);
-
extern void device_tree_init(void);
-static inline unsigned long pci_address_to_pio(phys_addr_t address)
-{
- /*
- * The ioport address can be directly used by inX() / outX()
- */
- BUG_ON(address > IO_SPACE_LIMIT);
-
- return (unsigned long) address;
-}
-#define pci_address_to_pio pci_address_to_pio
-
struct boot_param_header;
extern void __dt_setup_arch(struct boot_param_header *bph);
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index 5e6cd0947393..7bba9da110af 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -81,7 +81,6 @@ static inline long regs_return_value(struct pt_regs *regs)
#define instruction_pointer(regs) ((regs)->cp0_epc)
#define profile_pc(regs) instruction_pointer(regs)
-#define user_stack_pointer(r) ((r)->regs[29])
extern asmlinkage void syscall_trace_enter(struct pt_regs *regs);
extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
@@ -100,4 +99,17 @@ static inline void die_if_kernel(const char *str, struct pt_regs *regs)
(struct pt_regs *)((sp | (THREAD_SIZE - 1)) + 1 - 32) - 1; \
})
+/* Helpers for working with the user stack pointer */
+
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+ return regs->regs[29];
+}
+
+static inline void user_stack_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->regs[29] = val;
+}
+
#endif /* _ASM_PTRACE_H */
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index a0b2650516ac..34d1a1917125 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -15,6 +15,7 @@
#include <asm/asm.h>
#include <asm/cacheops.h>
#include <asm/cpu-features.h>
+#include <asm/cpu-type.h>
#include <asm/mipsmtregs.h>
/*
@@ -162,7 +163,15 @@ static inline void flush_scache_line_indexed(unsigned long addr)
static inline void flush_icache_line(unsigned long addr)
{
__iflush_prologue
- cache_op(Hit_Invalidate_I, addr);
+ switch (boot_cpu_type()) {
+ case CPU_LOONGSON2:
+ cache_op(Hit_Invalidate_I_Loongson23, addr);
+ break;
+
+ default:
+ cache_op(Hit_Invalidate_I, addr);
+ break;
+ }
__iflush_epilogue
}
@@ -208,7 +217,15 @@ static inline void flush_scache_line(unsigned long addr)
*/
static inline void protected_flush_icache_line(unsigned long addr)
{
- protected_cache_op(Hit_Invalidate_I, addr);
+ switch (boot_cpu_type()) {
+ case CPU_LOONGSON2:
+ protected_cache_op(Hit_Invalidate_I_Loongson23, addr);
+ break;
+
+ default:
+ protected_cache_op(Hit_Invalidate_I, addr);
+ break;
+ }
}
/*
@@ -412,8 +429,8 @@ __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128)
/* build blast_xxx_range, protected_blast_xxx_range */
-#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \
-static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
+#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
+static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
unsigned long end) \
{ \
unsigned long lsize = cpu_##desc##_line_size(); \
@@ -432,13 +449,15 @@ static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
__##pfx##flush_epilogue \
}
-__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
-__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_)
-__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_)
-__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, )
-__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, )
+__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
+__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
+__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
+__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson23, \
+ protected_, loongson23_)
+__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
+__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
/* blast_inv_dcache_range */
-__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
-__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, )
+__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
+__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
#endif /* _ASM_R4KCACHE_H */
diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h
index e26589ef36ee..d7bfdeba9e84 100644
--- a/arch/mips/include/asm/setup.h
+++ b/arch/mips/include/asm/setup.h
@@ -5,6 +5,14 @@
extern void setup_early_printk(void);
+#ifdef CONFIG_EARLY_PRINTK_8250
+extern void setup_8250_early_printk_port(unsigned long base,
+ unsigned int reg_shift, unsigned int timeout);
+#else
+static inline void setup_8250_early_printk_port(unsigned long base,
+ unsigned int reg_shift, unsigned int timeout) {}
+#endif
+
extern void set_handler(unsigned long offset, void *addr, unsigned long len);
extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len);
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index 23fc95e65673..4857e2c8df5a 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -17,6 +17,7 @@
#include <asm/asmmacro.h>
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
/*
* For SMTC kernel, global IE should be left set, and interrupts
@@ -93,21 +94,8 @@
.endm
#ifdef CONFIG_SMP
-#ifdef CONFIG_MIPS_MT_SMTC
-#define PTEBASE_SHIFT 19 /* TCBIND */
-#define CPU_ID_REG CP0_TCBIND
-#define CPU_ID_MFC0 mfc0
-#elif defined(CONFIG_MIPS_PGD_C0_CONTEXT)
-#define PTEBASE_SHIFT 48 /* XCONTEXT */
-#define CPU_ID_REG CP0_XCONTEXT
-#define CPU_ID_MFC0 MFC0
-#else
-#define PTEBASE_SHIFT 23 /* CONTEXT */
-#define CPU_ID_REG CP0_CONTEXT
-#define CPU_ID_MFC0 MFC0
-#endif
.macro get_saved_sp /* SMP variation */
- CPU_ID_MFC0 k0, CPU_ID_REG
+ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
lui k1, %hi(kernelsp)
#else
@@ -117,17 +105,17 @@
daddiu k1, %hi(kernelsp)
dsll k1, 16
#endif
- LONG_SRL k0, PTEBASE_SHIFT
+ LONG_SRL k0, SMP_CPUID_PTRSHIFT
LONG_ADDU k1, k0
LONG_L k1, %lo(kernelsp)(k1)
.endm
.macro set_saved_sp stackp temp temp2
- CPU_ID_MFC0 \temp, CPU_ID_REG
- LONG_SRL \temp, PTEBASE_SHIFT
+ ASM_CPUID_MFC0 \temp, ASM_SMP_CPUID_REG
+ LONG_SRL \temp, SMP_CPUID_PTRSHIFT
LONG_S \stackp, kernelsp(\temp)
.endm
-#else
+#else /* !CONFIG_SMP */
.macro get_saved_sp /* Uniprocessor variation */
#ifdef CONFIG_CPU_JUMP_WORKAROUNDS
/*
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
new file mode 100644
index 000000000000..81c89132c59d
--- /dev/null
+++ b/arch/mips/include/asm/syscall.h
@@ -0,0 +1,116 @@
+/*
+ * Access to user system call parameters and results
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ *
+ * Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org>
+ */
+
+#ifndef __ASM_MIPS_SYSCALL_H
+#define __ASM_MIPS_SYSCALL_H
+
+#include <linux/audit.h>
+#include <linux/elf-em.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <asm/ptrace.h>
+
+static inline long syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->regs[2];
+}
+
+static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
+ struct task_struct *task, struct pt_regs *regs, unsigned int n)
+{
+ unsigned long usp = regs->regs[29];
+
+ switch (n) {
+ case 0: case 1: case 2: case 3:
+ *arg = regs->regs[4 + n];
+
+ return 0;
+
+#ifdef CONFIG_32BIT
+ case 4: case 5: case 6: case 7:
+ return get_user(*arg, (int *)usp + 4 * n);
+#endif
+
+#ifdef CONFIG_64BIT
+ case 4: case 5: case 6: case 7:
+#ifdef CONFIG_MIPS32_O32
+ if (test_thread_flag(TIF_32BIT_REGS))
+ return get_user(*arg, (int *)usp + 4 * n);
+ else
+#endif
+ *arg = regs->regs[4 + n];
+
+ return 0;
+#endif
+
+ default:
+ BUG();
+ }
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->regs[2];
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ if (error) {
+ regs->regs[2] = -error;
+ regs->regs[7] = -1;
+ } else {
+ regs->regs[2] = val;
+ regs->regs[7] = 0;
+ }
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ unsigned long *args)
+{
+ unsigned long arg;
+ int ret;
+
+ while (n--)
+ ret |= mips_get_syscall_arg(&arg, task, regs, i++);
+
+ /*
+ * No way to communicate an error because this is a void function.
+ */
+#if 0
+ return ret;
+#endif
+}
+
+extern const unsigned long sys_call_table[];
+extern const unsigned long sys32_call_table[];
+extern const unsigned long sysn32_call_table[];
+
+static inline int __syscall_get_arch(void)
+{
+ int arch = EM_MIPS;
+#ifdef CONFIG_64BIT
+ arch |= __AUDIT_ARCH_64BIT;
+#endif
+#if defined(__LITTLE_ENDIAN)
+ arch |= __AUDIT_ARCH_LE;
+#endif
+ return arch;
+}
+
+#endif /* __ASM_MIPS_SYSCALL_H */
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 61215a34acc6..f9b24bfbdbae 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -116,6 +116,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
#define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
#define TIF_LOAD_WATCH 25 /* If set, load watch registers */
+#define TIF_SYSCALL_TRACEPOINT 26 /* syscall tracepoint instrumentation */
#define TIF_SYSCALL_TRACE 31 /* syscall trace active */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -132,21 +133,54 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
#define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
+#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
- _TIF_SYSCALL_AUDIT)
+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
/* work to do in syscall_trace_leave() */
#define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
- _TIF_SYSCALL_AUDIT)
+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
/* work to do on interrupt/exception return */
#define _TIF_WORK_MASK \
(_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
/* work to do on any return to u-space */
#define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
- _TIF_WORK_SYSCALL_EXIT)
+ _TIF_WORK_SYSCALL_EXIT | \
+ _TIF_SYSCALL_TRACEPOINT)
-#endif /* __KERNEL__ */
+/*
+ * We stash processor id into a COP0 register to retrieve it fast
+ * at kernel exception entry.
+ */
+#if defined(CONFIG_MIPS_MT_SMTC)
+#define SMP_CPUID_REG 2, 2 /* TCBIND */
+#define ASM_SMP_CPUID_REG $2, 2
+#define SMP_CPUID_PTRSHIFT 19
+#elif defined(CONFIG_MIPS_PGD_C0_CONTEXT)
+#define SMP_CPUID_REG 20, 0 /* XCONTEXT */
+#define ASM_SMP_CPUID_REG $20
+#define SMP_CPUID_PTRSHIFT 48
+#else
+#define SMP_CPUID_REG 4, 0 /* CONTEXT */
+#define ASM_SMP_CPUID_REG $4
+#define SMP_CPUID_PTRSHIFT 23
+#endif
+#ifdef CONFIG_64BIT
+#define SMP_CPUID_REGSHIFT (SMP_CPUID_PTRSHIFT + 3)
+#else
+#define SMP_CPUID_REGSHIFT (SMP_CPUID_PTRSHIFT + 2)
+#endif
+
+#ifdef CONFIG_MIPS_MT_SMTC
+#define ASM_CPUID_MFC0 mfc0
+#define UASM_i_CPUID_MFC0 uasm_i_mfc0
+#else
+#define ASM_CPUID_MFC0 MFC0
+#define UASM_i_CPUID_MFC0 UASM_i_MFC0
+#endif
+
+#endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
index 2d7b9df4542d..24f534a7fbc3 100644
--- a/arch/mips/include/asm/time.h
+++ b/arch/mips/include/asm/time.h
@@ -75,7 +75,7 @@ extern int init_r4k_clocksource(void);
static inline int init_mips_clocksource(void)
{
-#if defined(CONFIG_CSRC_R4K) && !defined(CONFIG_CSRC_GIC)
+#ifdef CONFIG_CSRC_R4K
return init_r4k_clocksource();
#else
return 0;
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index 63c9c886173a..4d3b92886665 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -14,6 +14,13 @@
#include <uapi/asm/unistd.h>
+#ifdef CONFIG_MIPS32_N32
+#define NR_syscalls (__NR_N32_Linux + __NR_N32_Linux_syscalls)
+#elif defined(CONFIG_64BIT)
+#define NR_syscalls (__NR_64_Linux + __NR_64_Linux_syscalls)
+#else
+#define NR_syscalls (__NR_O32_Linux + __NR_O32_Linux_syscalls)
+#endif
#ifndef __ASSEMBLY__
diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
index 88e292b7719e..e81174432bab 100644
--- a/arch/mips/include/uapi/asm/siginfo.h
+++ b/arch/mips/include/uapi/asm/siginfo.h
@@ -33,6 +33,8 @@ struct siginfo;
#error _MIPS_SZLONG neither 32 nor 64
#endif
+#define __ARCH_SIGSYS
+
#include <asm-generic/siginfo.h>
typedef struct siginfo {
@@ -97,6 +99,13 @@ typedef struct siginfo {
__ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
int _fd;
} _sigpoll;
+
+ /* SIGSYS */
+ struct {
+ void __user *_call_addr; /* calling user insn */
+ int _syscall; /* triggering system call number */
+ unsigned int _arch; /* AUDIT_ARCH_* of syscall */
+ } _sigsys;
} _sifields;
} siginfo_t;
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 61c01f054d1b..0df9787cd84d 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -94,4 +94,6 @@
#define SO_BUSY_POLL 46
+#define SO_MAX_PACING_RATE 47
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 423d871a946b..1c1b71752c84 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -26,7 +26,6 @@ obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o
obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o
obj-$(CONFIG_CSRC_GIC) += csrc-gic.o
obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o
-obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o
obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o
obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
@@ -35,6 +34,7 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_MODULES) += mips_ksyms.o module.o
obj-$(CONFIG_MODULES_USE_ELF_RELA) += module-rela.o
+obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o r4k_switch.o
@@ -84,6 +84,7 @@ obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o
obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o
obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 5465dc183e5a..c814287bdf5d 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -376,13 +376,33 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R4000PC";
}
} else {
+ int cca = read_c0_config() & CONF_CM_CMASK;
+ int mc;
+
+ /*
+ * SC and MC versions can't be reliably told apart,
+ * but only the latter support coherent caching
+ * modes so assume the firmware has set the KSEG0
+ * coherency attribute reasonably (if uncached, we
+ * assume SC).
+ */
+ switch (cca) {
+ case CONF_CM_CACHABLE_CE:
+ case CONF_CM_CACHABLE_COW:
+ case CONF_CM_CACHABLE_CUW:
+ mc = 1;
+ break;
+ default:
+ mc = 0;
+ break;
+ }
if ((c->processor_id & PRID_REV_MASK) >=
PRID_REV_R4400) {
- c->cputype = CPU_R4400SC;
- __cpu_name[cpu] = "R4400SC";
+ c->cputype = mc ? CPU_R4400MC : CPU_R4400SC;
+ __cpu_name[cpu] = mc ? "R4400MC" : "R4400SC";
} else {
- c->cputype = CPU_R4000SC;
- __cpu_name[cpu] = "R4000SC";
+ c->cputype = mc ? CPU_R4000MC : CPU_R4000SC;
+ __cpu_name[cpu] = mc ? "R4000MC" : "R4000SC";
}
}
@@ -1079,8 +1099,8 @@ void cpu_report(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
- printk(KERN_INFO "CPU revision is: %08x (%s)\n",
- c->processor_id, cpu_name_string());
+ pr_info("CPU%d revision is: %08x (%s)\n",
+ smp_processor_id(), c->processor_id, cpu_name_string());
if (c->options & MIPS_CPU_FPU)
printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id);
}
diff --git a/arch/mips/kernel/csrc-powertv.c b/arch/mips/kernel/csrc-powertv.c
deleted file mode 100644
index abd99ea911ae..000000000000
--- a/arch/mips/kernel/csrc-powertv.c
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright (C) 2008 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-/*
- * The file comes from kernel/csrc-r4k.c
- */
-#include <linux/clocksource.h>
-#include <linux/init.h>
-
-#include <asm/time.h> /* Not included in linux/time.h */
-
-#include <asm/mach-powertv/asic_regs.h>
-#include "powertv-clock.h"
-
-/* MIPS PLL Register Definitions */
-#define PLL_GET_M(x) (((x) >> 8) & 0x000000FF)
-#define PLL_GET_N(x) (((x) >> 16) & 0x000000FF)
-#define PLL_GET_P(x) (((x) >> 24) & 0x00000007)
-
-/*
- * returns: Clock frequency in kHz
- */
-unsigned int __init mips_get_pll_freq(void)
-{
- unsigned int pll_reg, m, n, p;
- unsigned int fin = 54000; /* Base frequency in kHz */
- unsigned int fout;
-
- /* Read PLL register setting */
- pll_reg = asic_read(mips_pll_setup);
- m = PLL_GET_M(pll_reg);
- n = PLL_GET_N(pll_reg);
- p = PLL_GET_P(pll_reg);
- pr_info("MIPS PLL Register:0x%x M=%d N=%d P=%d\n", pll_reg, m, n, p);
-
- /* Calculate clock frequency = (2 * N * 54MHz) / (M * (2**P)) */
- fout = ((2 * n * fin) / (m * (0x01 << p)));
-
- pr_info("MIPS Clock Freq=%d kHz\n", fout);
-
- return fout;
-}
-
-static cycle_t c0_hpt_read(struct clocksource *cs)
-{
- return read_c0_count();
-}
-
-static struct clocksource clocksource_mips = {
- .name = "powertv-counter",
- .read = c0_hpt_read,
- .mask = CLOCKSOURCE_MASK(32),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static void __init powertv_c0_hpt_clocksource_init(void)
-{
- unsigned int pll_freq = mips_get_pll_freq();
-
- pr_info("CPU frequency %d.%02d MHz\n", pll_freq / 1000,
- (pll_freq % 1000) * 100 / 1000);
-
- mips_hpt_frequency = pll_freq / 2 * 1000;
-
- clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
-
- clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
-}
-
-/**
- * struct tim_c - free running counter
- * @hi: High 16 bits of the counter
- * @lo: Low 32 bits of the counter
- *
- * Lays out the structure of the free running counter in memory. This counter
- * increments at a rate of 27 MHz/8 on all platforms.
- */
-struct tim_c {
- unsigned int hi;
- unsigned int lo;
-};
-
-static struct tim_c *tim_c;
-
-static cycle_t tim_c_read(struct clocksource *cs)
-{
- unsigned int hi;
- unsigned int next_hi;
- unsigned int lo;
-
- hi = readl(&tim_c->hi);
-
- for (;;) {
- lo = readl(&tim_c->lo);
- next_hi = readl(&tim_c->hi);
- if (next_hi == hi)
- break;
- hi = next_hi;
- }
-
-pr_crit("%s: read %llx\n", __func__, ((u64) hi << 32) | lo);
- return ((u64) hi << 32) | lo;
-}
-
-#define TIM_C_SIZE 48 /* # bits in the timer */
-
-static struct clocksource clocksource_tim_c = {
- .name = "powertv-tim_c",
- .read = tim_c_read,
- .mask = CLOCKSOURCE_MASK(TIM_C_SIZE),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-/**
- * powertv_tim_c_clocksource_init - set up a clock source for the TIM_C clock
- *
- * We know that TIM_C counts at 27 MHz/8, so each cycle corresponds to
- * 1 / (27,000,000/8) seconds.
- */
-static void __init powertv_tim_c_clocksource_init(void)
-{
- const unsigned long counts_per_second = 27000000 / 8;
-
- clocksource_tim_c.rating = 200;
-
- clocksource_register_hz(&clocksource_tim_c, counts_per_second);
- tim_c = (struct tim_c *) asic_reg_addr(tim_ch);
-}
-
-/**
- powertv_clocksource_init - initialize all clocksources
- */
-void __init powertv_clocksource_init(void)
-{
- powertv_c0_hpt_clocksource_init();
- powertv_tim_c_clocksource_init();
-}
diff --git a/arch/mips/kernel/early_printk_8250.c b/arch/mips/kernel/early_printk_8250.c
new file mode 100644
index 000000000000..83cea3767556
--- /dev/null
+++ b/arch/mips/kernel/early_printk_8250.c
@@ -0,0 +1,66 @@
+/*
+ * 8250/16550-type serial ports prom_putchar()
+ *
+ * Copyright (C) 2010 Yoichi Yuasa <yuasa@linux-mips.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/io.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+
+static void __iomem *serial8250_base;
+static unsigned int serial8250_reg_shift;
+static unsigned int serial8250_tx_timeout;
+
+void setup_8250_early_printk_port(unsigned long base, unsigned int reg_shift,
+ unsigned int timeout)
+{
+ serial8250_base = (void __iomem *)base;
+ serial8250_reg_shift = reg_shift;
+ serial8250_tx_timeout = timeout;
+}
+
+static inline u8 serial_in(int offset)
+{
+ return readb(serial8250_base + (offset << serial8250_reg_shift));
+}
+
+static inline void serial_out(int offset, char value)
+{
+ writeb(value, serial8250_base + (offset << serial8250_reg_shift));
+}
+
+void prom_putchar(char c)
+{
+ unsigned int timeout;
+ int status, bits;
+
+ if (!serial8250_base)
+ return;
+
+ timeout = serial8250_tx_timeout;
+ bits = UART_LSR_TEMT | UART_LSR_THRE;
+
+ do {
+ status = serial_in(UART_LSR);
+
+ if (--timeout == 0)
+ break;
+ } while ((status & bits) != bits);
+
+ if (timeout)
+ serial_out(UART_TX, c);
+}
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index dba90ec0dc38..185ba258361b 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -11,11 +11,14 @@
#include <linux/uaccess.h>
#include <linux/init.h>
#include <linux/ftrace.h>
+#include <linux/syscalls.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/cacheflush.h>
+#include <asm/syscall.h>
#include <asm/uasm.h>
+#include <asm/unistd.h>
#include <asm-generic/sections.h>
@@ -364,3 +367,33 @@ out:
WARN_ON(1);
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_FTRACE_SYSCALLS
+
+#ifdef CONFIG_32BIT
+unsigned long __init arch_syscall_addr(int nr)
+{
+ return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
+}
+#endif
+
+#ifdef CONFIG_64BIT
+
+unsigned long __init arch_syscall_addr(int nr)
+{
+#ifdef CONFIG_MIPS32_N32
+ if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls)
+ return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
+#endif
+ if (nr >= __NR_64_Linux && nr <= __NR_64_Linux + __NR_64_Linux_syscalls)
+ return (unsigned long)sys_call_table[nr - __NR_64_Linux];
+#ifdef CONFIG_MIPS32_O32
+ if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls)
+ return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
+#endif
+
+ return (unsigned long) &sys_ni_syscall;
+}
+#endif
+
+#endif /* CONFIG_FTRACE_SYSCALLS */
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 31fa856829cb..47d7583cd67f 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -374,12 +374,20 @@ NESTED(except_vec_nmi, 0, sp)
NESTED(nmi_handler, PT_SIZE, sp)
.set push
.set noat
+ /*
+ * Clear ERL - restore segment mapping
+ * Clear BEV - required for page fault exception handler to work
+ */
+ mfc0 k0, CP0_STATUS
+ ori k0, k0, ST0_EXL
+ li k1, ~(ST0_BEV | ST0_ERL)
+ and k0, k0, k1
+ mtc0 k0, CP0_STATUS
+ _ehb
SAVE_ALL
move a0, sp
jal nmi_exception_handler
- RESTORE_ALL
- .set mips3
- eret
+ /* nmi_exception_handler never returns */
.set pop
END(nmi_handler)
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index 72ef2d25cbf2..e498f2b3646a 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -150,7 +150,7 @@ int __init mips_cpu_intc_init(struct device_node *of_node,
domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
&mips_cpu_intc_irq_domain_ops, NULL);
if (!domain)
- panic("Failed to add irqdomain for MIPS CPU\n");
+ panic("Failed to add irqdomain for MIPS CPU");
return 0;
}
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 977a623d9253..2a52568dbcd6 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -23,6 +23,7 @@
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/mm.h>
+#include <linux/numa.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/fs.h>
@@ -46,7 +47,7 @@ static DEFINE_SPINLOCK(dbe_lock);
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
- GFP_KERNEL, PAGE_KERNEL, -1,
+ GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
__builtin_return_address(0));
}
#endif
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 45f1ffcf1a4b..24cdf64789c3 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -971,11 +971,11 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
- [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
- [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
},
},
[C(ITLB)] = {
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 0fa0b69cdd53..3c3b0df8f48d 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -13,12 +13,9 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/bootmem.h>
-#include <linux/initrd.h>
#include <linux/debugfs.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <asm/page.h>
#include <asm/prom.h>
@@ -40,13 +37,6 @@ char *mips_get_machine_name(void)
}
#ifdef CONFIG_OF
-int __init early_init_dt_scan_memory_arch(unsigned long node,
- const char *uname, int depth,
- void *data)
-{
- return early_init_dt_scan_memory(node, uname, depth, data);
-}
-
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
return add_memory_region(base, size, BOOT_MEM_RAM);
@@ -57,57 +47,11 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
}
-#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
-{
- initrd_start = (unsigned long)__va(start);
- initrd_end = (unsigned long)__va(end);
- initrd_below_start_ok = 1;
-}
-#endif
-
-int __init early_init_dt_scan_model(unsigned long node, const char *uname,
- int depth, void *data)
-{
- if (!depth) {
- char *model = of_get_flat_dt_prop(node, "model", NULL);
-
- if (model)
- mips_set_machine_name(model);
- }
- return 0;
-}
-
-void __init early_init_devtree(void *params)
-{
- /* Setup flat device-tree pointer */
- initial_boot_params = params;
-
- /* Retrieve various informations from the /chosen node of the
- * device-tree, including the platform type, initrd location and
- * size, and more ...
- */
- of_scan_flat_dt(early_init_dt_scan_chosen, arcs_cmdline);
-
-
- /* Scan memory nodes */
- of_scan_flat_dt(early_init_dt_scan_root, NULL);
- of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL);
-
- /* try to load the mips machine name */
- of_scan_flat_dt(early_init_dt_scan_model, NULL);
-}
-
void __init __dt_setup_arch(struct boot_param_header *bph)
{
- if (be32_to_cpu(bph->magic) != OF_DT_HEADER) {
- pr_err("DTB has bad magic, ignoring builtin OF DTB\n");
-
+ if (!early_init_dt_scan(bph))
return;
- }
-
- initial_boot_params = bph;
- early_init_devtree(initial_boot_params);
+ mips_set_machine_name(of_flat_dt_get_machine_name());
}
#endif
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 8ae1ebef8b71..b52e1d2b33e0 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -16,16 +16,20 @@
*/
#include <linux/compiler.h>
#include <linux/context_tracking.h>
+#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
+#include <linux/regset.h>
#include <linux/smp.h>
#include <linux/user.h>
#include <linux/security.h>
+#include <linux/tracehook.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
+#include <linux/ftrace.h>
#include <asm/byteorder.h>
#include <asm/cpu.h>
@@ -35,10 +39,14 @@
#include <asm/mipsmtregs.h>
#include <asm/pgtable.h>
#include <asm/page.h>
+#include <asm/syscall.h>
#include <asm/uaccess.h>
#include <asm/bootinfo.h>
#include <asm/reg.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
/*
* Called by kernel/ptrace.c when detaching..
*
@@ -255,6 +263,133 @@ int ptrace_set_watch_regs(struct task_struct *child,
return 0;
}
+/* regset get/set implementations */
+
+static int gpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ regs, 0, sizeof(*regs));
+}
+
+static int gpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs newregs;
+ int ret;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &newregs,
+ 0, sizeof(newregs));
+ if (ret)
+ return ret;
+
+ *task_pt_regs(target) = newregs;
+
+ return 0;
+}
+
+static int fpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu,
+ 0, sizeof(elf_fpregset_t));
+ /* XXX fcr31 */
+}
+
+static int fpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu,
+ 0, sizeof(elf_fpregset_t));
+ /* XXX fcr31 */
+}
+
+enum mips_regset {
+ REGSET_GPR,
+ REGSET_FPR,
+};
+
+static const struct user_regset mips_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(unsigned int),
+ .align = sizeof(unsigned int),
+ .get = gpr_get,
+ .set = gpr_set,
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG,
+ .n = ELF_NFPREG,
+ .size = sizeof(elf_fpreg_t),
+ .align = sizeof(elf_fpreg_t),
+ .get = fpr_get,
+ .set = fpr_set,
+ },
+};
+
+static const struct user_regset_view user_mips_view = {
+ .name = "mips",
+ .e_machine = ELF_ARCH,
+ .ei_osabi = ELF_OSABI,
+ .regsets = mips_regsets,
+ .n = ARRAY_SIZE(mips_regsets),
+};
+
+static const struct user_regset mips64_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(unsigned long),
+ .align = sizeof(unsigned long),
+ .get = gpr_get,
+ .set = gpr_set,
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG,
+ .n = ELF_NFPREG,
+ .size = sizeof(elf_fpreg_t),
+ .align = sizeof(elf_fpreg_t),
+ .get = fpr_get,
+ .set = fpr_set,
+ },
+};
+
+static const struct user_regset_view user_mips64_view = {
+ .name = "mips",
+ .e_machine = ELF_ARCH,
+ .ei_osabi = ELF_OSABI,
+ .regsets = mips64_regsets,
+ .n = ARRAY_SIZE(mips_regsets),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+#ifdef CONFIG_32BIT
+ return &user_mips_view;
+#endif
+
+#ifdef CONFIG_MIPS32_O32
+ if (test_thread_flag(TIF_32BIT_REGS))
+ return &user_mips_view;
+#endif
+
+ return &user_mips64_view;
+}
+
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
@@ -517,52 +652,27 @@ long arch_ptrace(struct task_struct *child, long request,
return ret;
}
-static inline int audit_arch(void)
-{
- int arch = EM_MIPS;
-#ifdef CONFIG_64BIT
- arch |= __AUDIT_ARCH_64BIT;
-#endif
-#if defined(__LITTLE_ENDIAN)
- arch |= __AUDIT_ARCH_LE;
-#endif
- return arch;
-}
-
/*
* Notification of system call entry/exit
* - triggered by current->work.syscall_trace
*/
asmlinkage void syscall_trace_enter(struct pt_regs *regs)
{
+ long ret = 0;
user_exit();
/* do the secure computing check first */
secure_computing_strict(regs->regs[2]);
- if (!(current->ptrace & PT_PTRACED))
- goto out;
-
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
- goto out;
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs))
+ ret = -1;
- /* The 0x80 provides a way for the tracing parent to distinguish
- between a syscall stop and SIGTRAP delivery */
- ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
- 0x80 : 0));
-
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
- */
- if (current->exit_code) {
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->regs[2]);
-out:
- audit_syscall_entry(audit_arch(), regs->regs[2],
+ audit_syscall_entry(__syscall_get_arch(),
+ regs->regs[2],
regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
}
@@ -582,26 +692,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
audit_syscall_exit(regs);
- if (!(current->ptrace & PT_PTRACED))
- return;
-
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
- return;
-
- /* The 0x80 provides a way for the tracing parent to distinguish
- between a syscall stop and SIGTRAP delivery */
- ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
- 0x80 : 0));
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_exit(regs, regs->regs[2]);
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
- */
- if (current->exit_code) {
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, 0);
user_enter();
}
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index d763f11e35e2..2c12ea1668d1 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -172,8 +172,9 @@ int rtlx_open(int index, int can_sleep)
if (rtlx == NULL) {
if( (p = vpe_get_shared(tclimit)) == NULL) {
if (can_sleep) {
- __wait_event_interruptible(channel_wqs[index].lx_queue,
- (p = vpe_get_shared(tclimit)), ret);
+ ret = __wait_event_interruptible(
+ channel_wqs[index].lx_queue,
+ (p = vpe_get_shared(tclimit)));
if (ret)
goto out_fail;
} else {
@@ -263,11 +264,10 @@ unsigned int rtlx_read_poll(int index, int can_sleep)
/* data available to read? */
if (chan->lx_read == chan->lx_write) {
if (can_sleep) {
- int ret = 0;
-
- __wait_event_interruptible(channel_wqs[index].lx_queue,
+ int ret = __wait_event_interruptible(
+ channel_wqs[index].lx_queue,
(chan->lx_read != chan->lx_write) ||
- sp_stopping, ret);
+ sp_stopping);
if (ret)
return ret;
@@ -440,14 +440,13 @@ static ssize_t file_write(struct file *file, const char __user * buffer,
/* any space left... */
if (!rtlx_write_poll(minor)) {
- int ret = 0;
+ int ret;
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
- __wait_event_interruptible(channel_wqs[minor].rt_queue,
- rtlx_write_poll(minor),
- ret);
+ ret = __wait_event_interruptible(channel_wqs[minor].rt_queue,
+ rtlx_write_poll(minor));
if (ret)
return ret;
}
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index e774bb1088b5..e8e541b40d86 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -40,17 +40,58 @@ NESTED(handle_sys, PT_SIZE, sp)
sw t1, PT_EPC(sp)
beqz t0, illegal_syscall
- sll t0, v0, 3
+ sll t0, v0, 2
la t1, sys_call_table
addu t1, t0
lw t2, (t1) # syscall routine
- lw t3, 4(t1) # >= 0 if we need stack arguments
beqz t2, illegal_syscall
sw a3, PT_R26(sp) # save a3 for syscall restarting
- bgez t3, stackargs
-stack_done:
+ /*
+ * More than four arguments. Try to deal with it by copying the
+ * stack arguments from the user stack to the kernel stack.
+ * This Sucks (TM).
+ */
+ lw t0, PT_R29(sp) # get old user stack pointer
+
+ /*
+ * We intentionally keep the kernel stack a little below the top of
+ * userspace so we don't have to do a slower byte accurate check here.
+ */
+ lw t5, TI_ADDR_LIMIT($28)
+ addu t4, t0, 32
+ and t5, t4
+ bltz t5, bad_stack # -> sp is bad
+
+ /*
+ * Ok, copy the args from the luser stack to the kernel stack.
+ * t3 is the precomputed number of instruction bytes needed to
+ * load or store arguments 6-8.
+ */
+
+ .set push
+ .set noreorder
+ .set nomacro
+
+1: lw t5, 16(t0) # argument #5 from usp
+4: lw t6, 20(t0) # argument #6 from usp
+3: lw t7, 24(t0) # argument #7 from usp
+2: lw t8, 28(t0) # argument #8 from usp
+
+ sw t5, 16(sp) # argument #5 to ksp
+ sw t6, 20(sp) # argument #6 to ksp
+ sw t7, 24(sp) # argument #7 to ksp
+ sw t8, 28(sp) # argument #8 to ksp
+ .set pop
+
+ .section __ex_table,"a"
+ PTR 1b,bad_stack
+ PTR 2b,bad_stack
+ PTR 3b,bad_stack
+ PTR 4b,bad_stack
+ .previous
+
lw t0, TI_FLAGS($28) # syscall tracing enabled?
li t1, _TIF_WORK_SYSCALL_ENTRY
and t0, t1
@@ -102,66 +143,6 @@ syscall_trace_entry:
/* ------------------------------------------------------------------------ */
/*
- * More than four arguments. Try to deal with it by copying the
- * stack arguments from the user stack to the kernel stack.
- * This Sucks (TM).
- */
-stackargs:
- lw t0, PT_R29(sp) # get old user stack pointer
-
- /*
- * We intentionally keep the kernel stack a little below the top of
- * userspace so we don't have to do a slower byte accurate check here.
- */
- lw t5, TI_ADDR_LIMIT($28)
- addu t4, t0, 32
- and t5, t4
- bltz t5, bad_stack # -> sp is bad
-
- /* Ok, copy the args from the luser stack to the kernel stack.
- * t3 is the precomputed number of instruction bytes needed to
- * load or store arguments 6-8.
- */
-
- la t1, 5f # load up to 3 arguments
- subu t1, t3
-1: lw t5, 16(t0) # argument #5 from usp
- .set push
- .set noreorder
- .set nomacro
- jr t1
- addiu t1, 6f - 5f
-
-2: lw t8, 28(t0) # argument #8 from usp
-3: lw t7, 24(t0) # argument #7 from usp
-4: lw t6, 20(t0) # argument #6 from usp
-5: jr t1
- sw t5, 16(sp) # argument #5 to ksp
-
-#ifdef CONFIG_CPU_MICROMIPS
- sw t8, 28(sp) # argument #8 to ksp
- nop
- sw t7, 24(sp) # argument #7 to ksp
- nop
- sw t6, 20(sp) # argument #6 to ksp
- nop
-#else
- sw t8, 28(sp) # argument #8 to ksp
- sw t7, 24(sp) # argument #7 to ksp
- sw t6, 20(sp) # argument #6 to ksp
-#endif
-6: j stack_done # go back
- nop
- .set pop
-
- .section __ex_table,"a"
- PTR 1b,bad_stack
- PTR 2b,bad_stack
- PTR 3b,bad_stack
- PTR 4b,bad_stack
- .previous
-
- /*
* The stackpointer for a call with more than 4 arguments is bad.
* We probably should handle this case a bit more drastic.
*/
@@ -187,7 +168,7 @@ illegal_syscall:
subu t0, a0, __NR_O32_Linux # check syscall number
sltiu v0, t0, __NR_O32_Linux_syscalls + 1
beqz t0, einval # do not recurse
- sll t1, t0, 3
+ sll t1, t0, 2
beqz v0, einval
lw t2, sys_call_table(t1) # syscall routine
@@ -218,260 +199,248 @@ einval: li v0, -ENOSYS
jr ra
END(sys_syscall)
- .macro fifty ptr, nargs, from=1, to=50
- sys \ptr \nargs
- .if \to-\from
- fifty \ptr,\nargs,"(\from+1)",\to
- .endif
- .endm
-
- .macro mille ptr, nargs, from=1, to=20
- fifty \ptr,\nargs
- .if \to-\from
- mille \ptr,\nargs,"(\from+1)",\to
- .endif
- .endm
-
- .macro syscalltable
- sys sys_syscall 8 /* 4000 */
- sys sys_exit 1
- sys __sys_fork 0
- sys sys_read 3
- sys sys_write 3
- sys sys_open 3 /* 4005 */
- sys sys_close 1
- sys sys_waitpid 3
- sys sys_creat 2
- sys sys_link 2
- sys sys_unlink 1 /* 4010 */
- sys sys_execve 0
- sys sys_chdir 1
- sys sys_time 1
- sys sys_mknod 3
- sys sys_chmod 2 /* 4015 */
- sys sys_lchown 3
- sys sys_ni_syscall 0
- sys sys_ni_syscall 0 /* was sys_stat */
- sys sys_lseek 3
- sys sys_getpid 0 /* 4020 */
- sys sys_mount 5
- sys sys_oldumount 1
- sys sys_setuid 1
- sys sys_getuid 0
- sys sys_stime 1 /* 4025 */
- sys sys_ptrace 4
- sys sys_alarm 1
- sys sys_ni_syscall 0 /* was sys_fstat */
- sys sys_pause 0
- sys sys_utime 2 /* 4030 */
- sys sys_ni_syscall 0
- sys sys_ni_syscall 0
- sys sys_access 2
- sys sys_nice 1
- sys sys_ni_syscall 0 /* 4035 */
- sys sys_sync 0
- sys sys_kill 2
- sys sys_rename 2
- sys sys_mkdir 2
- sys sys_rmdir 1 /* 4040 */
- sys sys_dup 1
- sys sysm_pipe 0
- sys sys_times 1
- sys sys_ni_syscall 0
- sys sys_brk 1 /* 4045 */
- sys sys_setgid 1
- sys sys_getgid 0
- sys sys_ni_syscall 0 /* was signal(2) */
- sys sys_geteuid 0
- sys sys_getegid 0 /* 4050 */
- sys sys_acct 1
- sys sys_umount 2
- sys sys_ni_syscall 0
- sys sys_ioctl 3
- sys sys_fcntl 3 /* 4055 */
- sys sys_ni_syscall 2
- sys sys_setpgid 2
- sys sys_ni_syscall 0
- sys sys_olduname 1
- sys sys_umask 1 /* 4060 */
- sys sys_chroot 1
- sys sys_ustat 2
- sys sys_dup2 2
- sys sys_getppid 0
- sys sys_getpgrp 0 /* 4065 */
- sys sys_setsid 0
- sys sys_sigaction 3
- sys sys_sgetmask 0
- sys sys_ssetmask 1
- sys sys_setreuid 2 /* 4070 */
- sys sys_setregid 2
- sys sys_sigsuspend 0
- sys sys_sigpending 1
- sys sys_sethostname 2
- sys sys_setrlimit 2 /* 4075 */
- sys sys_getrlimit 2
- sys sys_getrusage 2
- sys sys_gettimeofday 2
- sys sys_settimeofday 2
- sys sys_getgroups 2 /* 4080 */
- sys sys_setgroups 2
- sys sys_ni_syscall 0 /* old_select */
- sys sys_symlink 2
- sys sys_ni_syscall 0 /* was sys_lstat */
- sys sys_readlink 3 /* 4085 */
- sys sys_uselib 1
- sys sys_swapon 2
- sys sys_reboot 3
- sys sys_old_readdir 3
- sys sys_mips_mmap 6 /* 4090 */
- sys sys_munmap 2
- sys sys_truncate 2
- sys sys_ftruncate 2
- sys sys_fchmod 2
- sys sys_fchown 3 /* 4095 */
- sys sys_getpriority 2
- sys sys_setpriority 3
- sys sys_ni_syscall 0
- sys sys_statfs 2
- sys sys_fstatfs 2 /* 4100 */
- sys sys_ni_syscall 0 /* was ioperm(2) */
- sys sys_socketcall 2
- sys sys_syslog 3
- sys sys_setitimer 3
- sys sys_getitimer 2 /* 4105 */
- sys sys_newstat 2
- sys sys_newlstat 2
- sys sys_newfstat 2
- sys sys_uname 1
- sys sys_ni_syscall 0 /* 4110 was iopl(2) */
- sys sys_vhangup 0
- sys sys_ni_syscall 0 /* was sys_idle() */
- sys sys_ni_syscall 0 /* was sys_vm86 */
- sys sys_wait4 4
- sys sys_swapoff 1 /* 4115 */
- sys sys_sysinfo 1
- sys sys_ipc 6
- sys sys_fsync 1
- sys sys_sigreturn 0
- sys __sys_clone 6 /* 4120 */
- sys sys_setdomainname 2
- sys sys_newuname 1
- sys sys_ni_syscall 0 /* sys_modify_ldt */
- sys sys_adjtimex 1
- sys sys_mprotect 3 /* 4125 */
- sys sys_sigprocmask 3
- sys sys_ni_syscall 0 /* was create_module */
- sys sys_init_module 5
- sys sys_delete_module 1
- sys sys_ni_syscall 0 /* 4130 was get_kernel_syms */
- sys sys_quotactl 4
- sys sys_getpgid 1
- sys sys_fchdir 1
- sys sys_bdflush 2
- sys sys_sysfs 3 /* 4135 */
- sys sys_personality 1
- sys sys_ni_syscall 0 /* for afs_syscall */
- sys sys_setfsuid 1
- sys sys_setfsgid 1
- sys sys_llseek 5 /* 4140 */
- sys sys_getdents 3
- sys sys_select 5
- sys sys_flock 2
- sys sys_msync 3
- sys sys_readv 3 /* 4145 */
- sys sys_writev 3
- sys sys_cacheflush 3
- sys sys_cachectl 3
- sys sys_sysmips 4
- sys sys_ni_syscall 0 /* 4150 */
- sys sys_getsid 1
- sys sys_fdatasync 1
- sys sys_sysctl 1
- sys sys_mlock 2
- sys sys_munlock 2 /* 4155 */
- sys sys_mlockall 1
- sys sys_munlockall 0
- sys sys_sched_setparam 2
- sys sys_sched_getparam 2
- sys sys_sched_setscheduler 3 /* 4160 */
- sys sys_sched_getscheduler 1
- sys sys_sched_yield 0
- sys sys_sched_get_priority_max 1
- sys sys_sched_get_priority_min 1
- sys sys_sched_rr_get_interval 2 /* 4165 */
- sys sys_nanosleep, 2
- sys sys_mremap, 5
- sys sys_accept 3
- sys sys_bind 3
- sys sys_connect 3 /* 4170 */
- sys sys_getpeername 3
- sys sys_getsockname 3
- sys sys_getsockopt 5
- sys sys_listen 2
- sys sys_recv 4 /* 4175 */
- sys sys_recvfrom 6
- sys sys_recvmsg 3
- sys sys_send 4
- sys sys_sendmsg 3
- sys sys_sendto 6 /* 4180 */
- sys sys_setsockopt 5
- sys sys_shutdown 2
- sys sys_socket 3
- sys sys_socketpair 4
- sys sys_setresuid 3 /* 4185 */
- sys sys_getresuid 3
- sys sys_ni_syscall 0 /* was sys_query_module */
- sys sys_poll 3
- sys sys_ni_syscall 0 /* was nfsservctl */
- sys sys_setresgid 3 /* 4190 */
- sys sys_getresgid 3
- sys sys_prctl 5
- sys sys_rt_sigreturn 0
- sys sys_rt_sigaction 4
- sys sys_rt_sigprocmask 4 /* 4195 */
- sys sys_rt_sigpending 2
- sys sys_rt_sigtimedwait 4
- sys sys_rt_sigqueueinfo 3
- sys sys_rt_sigsuspend 0
- sys sys_pread64 6 /* 4200 */
- sys sys_pwrite64 6
- sys sys_chown 3
- sys sys_getcwd 2
- sys sys_capget 2
- sys sys_capset 2 /* 4205 */
- sys sys_sigaltstack 0
- sys sys_sendfile 4
- sys sys_ni_syscall 0
- sys sys_ni_syscall 0
- sys sys_mips_mmap2 6 /* 4210 */
- sys sys_truncate64 4
- sys sys_ftruncate64 4
- sys sys_stat64 2
- sys sys_lstat64 2
- sys sys_fstat64 2 /* 4215 */
- sys sys_pivot_root 2
- sys sys_mincore 3
- sys sys_madvise 3
- sys sys_getdents64 3
- sys sys_fcntl64 3 /* 4220 */
- sys sys_ni_syscall 0
- sys sys_gettid 0
- sys sys_readahead 5
- sys sys_setxattr 5
- sys sys_lsetxattr 5 /* 4225 */
- sys sys_fsetxattr 5
- sys sys_getxattr 4
- sys sys_lgetxattr 4
- sys sys_fgetxattr 4
- sys sys_listxattr 3 /* 4230 */
- sys sys_llistxattr 3
- sys sys_flistxattr 3
- sys sys_removexattr 2
- sys sys_lremovexattr 2
- sys sys_fremovexattr 2 /* 4235 */
- sys sys_tkill 2
- sys sys_sendfile64 5
- sys sys_futex 6
+ .align 2
+ .type sys_call_table, @object
+EXPORT(sys_call_table)
+ PTR sys_syscall /* 4000 */
+ PTR sys_exit
+ PTR __sys_fork
+ PTR sys_read
+ PTR sys_write
+ PTR sys_open /* 4005 */
+ PTR sys_close
+ PTR sys_waitpid
+ PTR sys_creat
+ PTR sys_link
+ PTR sys_unlink /* 4010 */
+ PTR sys_execve
+ PTR sys_chdir
+ PTR sys_time
+ PTR sys_mknod
+ PTR sys_chmod /* 4015 */
+ PTR sys_lchown
+ PTR sys_ni_syscall
+ PTR sys_ni_syscall /* was sys_stat */
+ PTR sys_lseek
+ PTR sys_getpid /* 4020 */
+ PTR sys_mount
+ PTR sys_oldumount
+ PTR sys_setuid
+ PTR sys_getuid
+ PTR sys_stime /* 4025 */
+ PTR sys_ptrace
+ PTR sys_alarm
+ PTR sys_ni_syscall /* was sys_fstat */
+ PTR sys_pause
+ PTR sys_utime /* 4030 */
+ PTR sys_ni_syscall
+ PTR sys_ni_syscall
+ PTR sys_access
+ PTR sys_nice
+ PTR sys_ni_syscall /* 4035 */
+ PTR sys_sync
+ PTR sys_kill
+ PTR sys_rename
+ PTR sys_mkdir
+ PTR sys_rmdir /* 4040 */
+ PTR sys_dup
+ PTR sysm_pipe
+ PTR sys_times
+ PTR sys_ni_syscall
+ PTR sys_brk /* 4045 */
+ PTR sys_setgid
+ PTR sys_getgid
+ PTR sys_ni_syscall /* was signal(2) */
+ PTR sys_geteuid
+ PTR sys_getegid /* 4050 */
+ PTR sys_acct
+ PTR sys_umount
+ PTR sys_ni_syscall
+ PTR sys_ioctl
+ PTR sys_fcntl /* 4055 */
+ PTR sys_ni_syscall
+ PTR sys_setpgid
+ PTR sys_ni_syscall
+ PTR sys_olduname
+ PTR sys_umask /* 4060 */
+ PTR sys_chroot
+ PTR sys_ustat
+ PTR sys_dup2
+ PTR sys_getppid
+ PTR sys_getpgrp /* 4065 */
+ PTR sys_setsid
+ PTR sys_sigaction
+ PTR sys_sgetmask
+ PTR sys_ssetmask
+ PTR sys_setreuid /* 4070 */
+ PTR sys_setregid
+ PTR sys_sigsuspend
+ PTR sys_sigpending
+ PTR sys_sethostname
+ PTR sys_setrlimit /* 4075 */
+ PTR sys_getrlimit
+ PTR sys_getrusage
+ PTR sys_gettimeofday
+ PTR sys_settimeofday
+ PTR sys_getgroups /* 4080 */
+ PTR sys_setgroups
+ PTR sys_ni_syscall /* old_select */
+ PTR sys_symlink
+ PTR sys_ni_syscall /* was sys_lstat */
+ PTR sys_readlink /* 4085 */
+ PTR sys_uselib
+ PTR sys_swapon
+ PTR sys_reboot
+ PTR sys_old_readdir
+ PTR sys_mips_mmap /* 4090 */
+ PTR sys_munmap
+ PTR sys_truncate
+ PTR sys_ftruncate
+ PTR sys_fchmod
+ PTR sys_fchown /* 4095 */
+ PTR sys_getpriority
+ PTR sys_setpriority
+ PTR sys_ni_syscall
+ PTR sys_statfs
+ PTR sys_fstatfs /* 4100 */
+ PTR sys_ni_syscall /* was ioperm(2) */
+ PTR sys_socketcall
+ PTR sys_syslog
+ PTR sys_setitimer
+ PTR sys_getitimer /* 4105 */
+ PTR sys_newstat
+ PTR sys_newlstat
+ PTR sys_newfstat
+ PTR sys_uname
+ PTR sys_ni_syscall /* 4110 was iopl(2) */
+ PTR sys_vhangup
+ PTR sys_ni_syscall /* was sys_idle() */
+ PTR sys_ni_syscall /* was sys_vm86 */
+ PTR sys_wait4
+ PTR sys_swapoff /* 4115 */
+ PTR sys_sysinfo
+ PTR sys_ipc
+ PTR sys_fsync
+ PTR sys_sigreturn
+ PTR __sys_clone /* 4120 */
+ PTR sys_setdomainname
+ PTR sys_newuname
+ PTR sys_ni_syscall /* sys_modify_ldt */
+ PTR sys_adjtimex
+ PTR sys_mprotect /* 4125 */
+ PTR sys_sigprocmask
+ PTR sys_ni_syscall /* was create_module */
+ PTR sys_init_module
+ PTR sys_delete_module
+ PTR sys_ni_syscall /* 4130 was get_kernel_syms */
+ PTR sys_quotactl
+ PTR sys_getpgid
+ PTR sys_fchdir
+ PTR sys_bdflush
+ PTR sys_sysfs /* 4135 */
+ PTR sys_personality
+ PTR sys_ni_syscall /* for afs_syscall */
+ PTR sys_setfsuid
+ PTR sys_setfsgid
+ PTR sys_llseek /* 4140 */
+ PTR sys_getdents
+ PTR sys_select
+ PTR sys_flock
+ PTR sys_msync
+ PTR sys_readv /* 4145 */
+ PTR sys_writev
+ PTR sys_cacheflush
+ PTR sys_cachectl
+ PTR sys_sysmips
+ PTR sys_ni_syscall /* 4150 */
+ PTR sys_getsid
+ PTR sys_fdatasync
+ PTR sys_sysctl
+ PTR sys_mlock
+ PTR sys_munlock /* 4155 */
+ PTR sys_mlockall
+ PTR sys_munlockall
+ PTR sys_sched_setparam
+ PTR sys_sched_getparam
+ PTR sys_sched_setscheduler /* 4160 */
+ PTR sys_sched_getscheduler
+ PTR sys_sched_yield
+ PTR sys_sched_get_priority_max
+ PTR sys_sched_get_priority_min
+ PTR sys_sched_rr_get_interval /* 4165 */
+ PTR sys_nanosleep
+ PTR sys_mremap
+ PTR sys_accept
+ PTR sys_bind
+ PTR sys_connect /* 4170 */
+ PTR sys_getpeername
+ PTR sys_getsockname
+ PTR sys_getsockopt
+ PTR sys_listen
+ PTR sys_recv /* 4175 */
+ PTR sys_recvfrom
+ PTR sys_recvmsg
+ PTR sys_send
+ PTR sys_sendmsg
+ PTR sys_sendto /* 4180 */
+ PTR sys_setsockopt
+ PTR sys_shutdown
+ PTR sys_socket
+ PTR sys_socketpair
+ PTR sys_setresuid /* 4185 */
+ PTR sys_getresuid
+ PTR sys_ni_syscall /* was sys_query_module */
+ PTR sys_poll
+ PTR sys_ni_syscall /* was nfsservctl */
+ PTR sys_setresgid /* 4190 */
+ PTR sys_getresgid
+ PTR sys_prctl
+ PTR sys_rt_sigreturn
+ PTR sys_rt_sigaction
+ PTR sys_rt_sigprocmask /* 4195 */
+ PTR sys_rt_sigpending
+ PTR sys_rt_sigtimedwait
+ PTR sys_rt_sigqueueinfo
+ PTR sys_rt_sigsuspend
+ PTR sys_pread64 /* 4200 */
+ PTR sys_pwrite64
+ PTR sys_chown
+ PTR sys_getcwd
+ PTR sys_capget
+ PTR sys_capset /* 4205 */
+ PTR sys_sigaltstack
+ PTR sys_sendfile
+ PTR sys_ni_syscall
+ PTR sys_ni_syscall
+ PTR sys_mips_mmap2 /* 4210 */
+ PTR sys_truncate64
+ PTR sys_ftruncate64
+ PTR sys_stat64
+ PTR sys_lstat64
+ PTR sys_fstat64 /* 4215 */
+ PTR sys_pivot_root
+ PTR sys_mincore
+ PTR sys_madvise
+ PTR sys_getdents64
+ PTR sys_fcntl64 /* 4220 */
+ PTR sys_ni_syscall
+ PTR sys_gettid
+ PTR sys_readahead
+ PTR sys_setxattr
+ PTR sys_lsetxattr /* 4225 */
+ PTR sys_fsetxattr
+ PTR sys_getxattr
+ PTR sys_lgetxattr
+ PTR sys_fgetxattr
+ PTR sys_listxattr /* 4230 */
+ PTR sys_llistxattr
+ PTR sys_flistxattr
+ PTR sys_removexattr
+ PTR sys_lremovexattr
+ PTR sys_fremovexattr /* 4235 */
+ PTR sys_tkill
+ PTR sys_sendfile64
+ PTR sys_futex
#ifdef CONFIG_MIPS_MT_FPAFF
/*
* For FPU affinity scheduling on MIPS MT processors, we need to
@@ -480,132 +449,117 @@ einval: li v0, -ENOSYS
* these hooks for the 32-bit kernel - there is no MIPS64 MT processor
* atm.
*/
- sys mipsmt_sys_sched_setaffinity 3
- sys mipsmt_sys_sched_getaffinity 3
+ PTR mipsmt_sys_sched_setaffinity
+ PTR mipsmt_sys_sched_getaffinity
#else
- sys sys_sched_setaffinity 3
- sys sys_sched_getaffinity 3 /* 4240 */
+ PTR sys_sched_setaffinity
+ PTR sys_sched_getaffinity /* 4240 */
#endif /* CONFIG_MIPS_MT_FPAFF */
- sys sys_io_setup 2
- sys sys_io_destroy 1
- sys sys_io_getevents 5
- sys sys_io_submit 3
- sys sys_io_cancel 3 /* 4245 */
- sys sys_exit_group 1
- sys sys_lookup_dcookie 4
- sys sys_epoll_create 1
- sys sys_epoll_ctl 4
- sys sys_epoll_wait 4 /* 4250 */
- sys sys_remap_file_pages 5
- sys sys_set_tid_address 1
- sys sys_restart_syscall 0
- sys sys_fadvise64_64 7
- sys sys_statfs64 3 /* 4255 */
- sys sys_fstatfs64 2
- sys sys_timer_create 3
- sys sys_timer_settime 4
- sys sys_timer_gettime 2
- sys sys_timer_getoverrun 1 /* 4260 */
- sys sys_timer_delete 1
- sys sys_clock_settime 2
- sys sys_clock_gettime 2
- sys sys_clock_getres 2
- sys sys_clock_nanosleep 4 /* 4265 */
- sys sys_tgkill 3
- sys sys_utimes 2
- sys sys_mbind 4
- sys sys_ni_syscall 0 /* sys_get_mempolicy */
- sys sys_ni_syscall 0 /* 4270 sys_set_mempolicy */
- sys sys_mq_open 4
- sys sys_mq_unlink 1
- sys sys_mq_timedsend 5
- sys sys_mq_timedreceive 5
- sys sys_mq_notify 2 /* 4275 */
- sys sys_mq_getsetattr 3
- sys sys_ni_syscall 0 /* sys_vserver */
- sys sys_waitid 5
- sys sys_ni_syscall 0 /* available, was setaltroot */
- sys sys_add_key 5 /* 4280 */
- sys sys_request_key 4
- sys sys_keyctl 5
- sys sys_set_thread_area 1
- sys sys_inotify_init 0
- sys sys_inotify_add_watch 3 /* 4285 */
- sys sys_inotify_rm_watch 2
- sys sys_migrate_pages 4
- sys sys_openat 4
- sys sys_mkdirat 3
- sys sys_mknodat 4 /* 4290 */
- sys sys_fchownat 5
- sys sys_futimesat 3
- sys sys_fstatat64 4
- sys sys_unlinkat 3
- sys sys_renameat 4 /* 4295 */
- sys sys_linkat 5
- sys sys_symlinkat 3
- sys sys_readlinkat 4
- sys sys_fchmodat 3
- sys sys_faccessat 3 /* 4300 */
- sys sys_pselect6 6
- sys sys_ppoll 5
- sys sys_unshare 1
- sys sys_splice 6
- sys sys_sync_file_range 7 /* 4305 */
- sys sys_tee 4
- sys sys_vmsplice 4
- sys sys_move_pages 6
- sys sys_set_robust_list 2
- sys sys_get_robust_list 3 /* 4310 */
- sys sys_kexec_load 4
- sys sys_getcpu 3
- sys sys_epoll_pwait 6
- sys sys_ioprio_set 3
- sys sys_ioprio_get 2 /* 4315 */
- sys sys_utimensat 4
- sys sys_signalfd 3
- sys sys_ni_syscall 0 /* was timerfd */
- sys sys_eventfd 1
- sys sys_fallocate 6 /* 4320 */
- sys sys_timerfd_create 2
- sys sys_timerfd_gettime 2
- sys sys_timerfd_settime 4
- sys sys_signalfd4 4
- sys sys_eventfd2 2 /* 4325 */
- sys sys_epoll_create1 1
- sys sys_dup3 3
- sys sys_pipe2 2
- sys sys_inotify_init1 1
- sys sys_preadv 6 /* 4330 */
- sys sys_pwritev 6
- sys sys_rt_tgsigqueueinfo 4
- sys sys_perf_event_open 5
- sys sys_accept4 4
- sys sys_recvmmsg 5 /* 4335 */
- sys sys_fanotify_init 2
- sys sys_fanotify_mark 6
- sys sys_prlimit64 4
- sys sys_name_to_handle_at 5
- sys sys_open_by_handle_at 3 /* 4340 */
- sys sys_clock_adjtime 2
- sys sys_syncfs 1
- sys sys_sendmmsg 4
- sys sys_setns 2
- sys sys_process_vm_readv 6 /* 4345 */
- sys sys_process_vm_writev 6
- sys sys_kcmp 5
- sys sys_finit_module 3
- .endm
-
- /* We pre-compute the number of _instruction_ bytes needed to
- load or store the arguments 6-8. Negative values are ignored. */
-
- .macro sys function, nargs
- PTR \function
- LONG (\nargs << 2) - (5 << 2)
- .endm
-
- .align 3
- .type sys_call_table,@object
-EXPORT(sys_call_table)
- syscalltable
- .size sys_call_table, . - sys_call_table
+ PTR sys_io_setup
+ PTR sys_io_destroy
+ PTR sys_io_getevents
+ PTR sys_io_submit
+ PTR sys_io_cancel /* 4245 */
+ PTR sys_exit_group
+ PTR sys_lookup_dcookie
+ PTR sys_epoll_create
+ PTR sys_epoll_ctl
+ PTR sys_epoll_wait /* 4250 */
+ PTR sys_remap_file_pages
+ PTR sys_set_tid_address
+ PTR sys_restart_syscall
+ PTR sys_fadvise64_64
+ PTR sys_statfs64 /* 4255 */
+ PTR sys_fstatfs64
+ PTR sys_timer_create
+ PTR sys_timer_settime
+ PTR sys_timer_gettime
+ PTR sys_timer_getoverrun /* 4260 */
+ PTR sys_timer_delete
+ PTR sys_clock_settime
+ PTR sys_clock_gettime
+ PTR sys_clock_getres
+ PTR sys_clock_nanosleep /* 4265 */
+ PTR sys_tgkill
+ PTR sys_utimes
+ PTR sys_mbind
+ PTR sys_ni_syscall /* sys_get_mempolicy */
+ PTR sys_ni_syscall /* 4270 sys_set_mempolicy */
+ PTR sys_mq_open
+ PTR sys_mq_unlink
+ PTR sys_mq_timedsend
+ PTR sys_mq_timedreceive
+ PTR sys_mq_notify /* 4275 */
+ PTR sys_mq_getsetattr
+ PTR sys_ni_syscall /* sys_vserver */
+ PTR sys_waitid
+ PTR sys_ni_syscall /* available, was setaltroot */
+ PTR sys_add_key /* 4280 */
+ PTR sys_request_key
+ PTR sys_keyctl
+ PTR sys_set_thread_area
+ PTR sys_inotify_init
+ PTR sys_inotify_add_watch /* 4285 */
+ PTR sys_inotify_rm_watch
+ PTR sys_migrate_pages
+ PTR sys_openat
+ PTR sys_mkdirat
+ PTR sys_mknodat /* 4290 */
+ PTR sys_fchownat
+ PTR sys_futimesat
+ PTR sys_fstatat64
+ PTR sys_unlinkat
+ PTR sys_renameat /* 4295 */
+ PTR sys_linkat
+ PTR sys_symlinkat
+ PTR sys_readlinkat
+ PTR sys_fchmodat
+ PTR sys_faccessat /* 4300 */
+ PTR sys_pselect6
+ PTR sys_ppoll
+ PTR sys_unshare
+ PTR sys_splice
+ PTR sys_sync_file_range /* 4305 */
+ PTR sys_tee
+ PTR sys_vmsplice
+ PTR sys_move_pages
+ PTR sys_set_robust_list
+ PTR sys_get_robust_list /* 4310 */
+ PTR sys_kexec_load
+ PTR sys_getcpu
+ PTR sys_epoll_pwait
+ PTR sys_ioprio_set
+ PTR sys_ioprio_get /* 4315 */
+ PTR sys_utimensat
+ PTR sys_signalfd
+ PTR sys_ni_syscall /* was timerfd */
+ PTR sys_eventfd
+ PTR sys_fallocate /* 4320 */
+ PTR sys_timerfd_create
+ PTR sys_timerfd_gettime
+ PTR sys_timerfd_settime
+ PTR sys_signalfd4
+ PTR sys_eventfd2 /* 4325 */
+ PTR sys_epoll_create1
+ PTR sys_dup3
+ PTR sys_pipe2
+ PTR sys_inotify_init1
+ PTR sys_preadv /* 4330 */
+ PTR sys_pwritev
+ PTR sys_rt_tgsigqueueinfo
+ PTR sys_perf_event_open
+ PTR sys_accept4
+ PTR sys_recvmmsg /* 4335 */
+ PTR sys_fanotify_init
+ PTR sys_fanotify_mark
+ PTR sys_prlimit64
+ PTR sys_name_to_handle_at
+ PTR sys_open_by_handle_at /* 4340 */
+ PTR sys_clock_adjtime
+ PTR sys_syncfs
+ PTR sys_sendmmsg
+ PTR sys_setns
+ PTR sys_process_vm_readv /* 4345 */
+ PTR sys_process_vm_writev
+ PTR sys_kcmp
+ PTR sys_finit_module
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index be6627ead619..57e3742fec59 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -114,7 +114,8 @@ illegal_syscall:
END(handle_sys64)
.align 3
-sys_call_table:
+ .type sys_call_table, @object
+EXPORT(sys_call_table)
PTR sys_read /* 5000 */
PTR sys_write
PTR sys_open
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index cab150789c8d..2f48f5934399 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -103,6 +103,7 @@ not_n32_scall:
END(handle_sysn32)
+ .type sysn32_call_table, @object
EXPORT(sysn32_call_table)
PTR sys_read /* 6000 */
PTR sys_write
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 37605dc8eef7..f1acdb429f4f 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -53,7 +53,7 @@ NESTED(handle_sys, PT_SIZE, sp)
sll a3, a3, 0
dsll t0, v0, 3 # offset into table
- ld t2, (sys_call_table - (__NR_O32_Linux * 8))(t0)
+ ld t2, (sys32_call_table - (__NR_O32_Linux * 8))(t0)
sd a3, PT_R26(sp) # save a3 for syscall restarting
@@ -168,7 +168,7 @@ LEAF(sys32_syscall)
beqz t0, einval # do not recurse
dsll t1, t0, 3
beqz v0, einval
- ld t2, sys_call_table(t1) # syscall routine
+ ld t2, sys32_call_table(t1) # syscall routine
move a0, a1 # shift argument registers
move a1, a2
@@ -190,8 +190,8 @@ einval: li v0, -ENOSYS
END(sys32_syscall)
.align 3
- .type sys_call_table,@object
-sys_call_table:
+ .type sys32_call_table,@object
+EXPORT(sys32_call_table)
PTR sys32_syscall /* 4000 */
PTR sys_exit
PTR __sys_fork
@@ -541,4 +541,4 @@ sys_call_table:
PTR compat_sys_process_vm_writev
PTR sys_kcmp
PTR sys_finit_module
- .size sys_call_table,.-sys_call_table
+ .size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index c538d6e01b7b..b089502cbadd 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -300,12 +300,13 @@ static void __init bootmem_init(void)
int i;
/*
- * Init any data related to initrd. It's a nop if INITRD is
- * not selected. Once that done we can determine the low bound
- * of usable memory.
+ * Sanity check any INITRD first. We don't take it into account
+ * for bootmem setup initially, rely on the end-of-kernel-code
+ * as our memory range starting point. Once bootmem is inited we
+ * will reserve the area used for the initrd.
*/
- reserved_end = max(init_initrd(),
- (unsigned long) PFN_UP(__pa_symbol(&_end)));
+ init_initrd();
+ reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
/*
* max_low_pfn is not a number of pages. The number of pages
@@ -362,6 +363,14 @@ static void __init bootmem_init(void)
max_low_pfn = PFN_DOWN(HIGHMEM_START);
}
+#ifdef CONFIG_BLK_DEV_INITRD
+ /*
+ * mapstart should be after initrd_end
+ */
+ if (initrd_end)
+ mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
+#endif
+
/*
* Initialize the boot-time allocator with low memory only.
*/
@@ -602,6 +611,8 @@ static void __init arch_mem_init(char **cmdline_p)
{
extern void plat_mem_setup(void);
+ device_tree_init();
+
/* call board setup routine */
plat_mem_setup();
@@ -662,7 +673,6 @@ static void __init arch_mem_init(char **cmdline_p)
crashk_res.end - crashk_res.start + 1,
BOOTMEM_DEFAULT);
#endif
- device_tree_init();
sparse_init();
plat_swiotlb_setup();
paging_init();
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 126da74d4c55..2362665ba496 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -136,10 +136,10 @@ static void bmips_prepare_cpus(unsigned int max_cpus)
{
if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
"smp_ipi0", NULL))
- panic("Can't request IPI0 interrupt\n");
+ panic("Can't request IPI0 interrupt");
if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
"smp_ipi1", NULL))
- panic("Can't request IPI1 interrupt\n");
+ panic("Can't request IPI1 interrupt");
}
/*
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 5c208ed8f856..0a022ee33b2a 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -150,7 +150,6 @@ asmlinkage void start_secondary(void)
void __irq_entry smp_call_function_interrupt(void)
{
irq_enter();
- generic_smp_call_function_single_interrupt();
generic_smp_call_function_interrupt();
irq_exit();
}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 524841f02803..f9c8746be8d6 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -330,6 +330,7 @@ void show_regs(struct pt_regs *regs)
void show_registers(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
+ mm_segment_t old_fs = get_fs();
__show_regs(regs);
print_modules();
@@ -344,9 +345,13 @@ void show_registers(struct pt_regs *regs)
printk("*HwTLS: %0*lx\n", field, tls);
}
+ if (!user_mode(regs))
+ /* Necessary for getting the correct stack content */
+ set_fs(KERNEL_DS);
show_stacktrace(current, regs);
show_code((unsigned int __user *) regs->cp0_epc);
printk("\n");
+ set_fs(old_fs);
}
static int regs_to_trapnr(struct pt_regs *regs)
@@ -366,7 +371,8 @@ void __noreturn die(const char *str, struct pt_regs *regs)
oops_enter();
- if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
+ if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs),
+ SIGSEGV) == NOTIFY_STOP)
sig = 0;
console_verbose();
@@ -457,8 +463,8 @@ asmlinkage void do_be(struct pt_regs *regs)
printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
data ? "Data" : "Instruction",
field, regs->cp0_epc, field, regs->regs[31]);
- if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS)
- == NOTIFY_STOP)
+ if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs),
+ SIGBUS) == NOTIFY_STOP)
goto out;
die_if_kernel("Oops", regs);
@@ -727,8 +733,8 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
siginfo_t info = {0};
prev_state = exception_enter();
- if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE)
- == NOTIFY_STOP)
+ if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
+ SIGFPE) == NOTIFY_STOP)
goto out;
die_if_kernel("FP exception in kernel code", regs);
@@ -798,7 +804,8 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
return;
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
- if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+ if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs),
+ SIGTRAP) == NOTIFY_STOP)
return;
/*
@@ -892,12 +899,14 @@ asmlinkage void do_bp(struct pt_regs *regs)
*/
switch (bcode) {
case BRK_KPROBE_BP:
- if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+ if (notify_die(DIE_BREAK, "debug", regs, bcode,
+ regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
goto out;
else
break;
case BRK_KPROBE_SSTEPBP:
- if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+ if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
+ regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
goto out;
else
break;
@@ -961,8 +970,8 @@ asmlinkage void do_ri(struct pt_regs *regs)
int status = -1;
prev_state = exception_enter();
- if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL)
- == NOTIFY_STOP)
+ if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
+ SIGILL) == NOTIFY_STOP)
goto out;
die_if_kernel("Reserved instruction in kernel code", regs);
@@ -1488,10 +1497,14 @@ int register_nmi_notifier(struct notifier_block *nb)
void __noreturn nmi_exception_handler(struct pt_regs *regs)
{
+ char str[100];
+
raw_notifier_call_chain(&nmi_chain, 0, regs);
bust_spinlocks(1);
- printk("NMI taken!!!!\n");
- die("NMI", regs);
+ snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
+ smp_processor_id(), regs->cp0_epc);
+ regs->cp0_epc = read_c0_errorepc();
+ die(str, regs);
}
#define VECTORSPACING 0x100 /* for EI/VI mode */
@@ -1554,7 +1567,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
unsigned char *b;
BUG_ON(!cpu_has_veic && !cpu_has_vint);
- BUG_ON((n < 0) && (n > 9));
if (addr == NULL) {
handler = (unsigned long) do_default_vi;
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
index a7b044536de4..73b34827826c 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/kvm_mips.c
@@ -198,12 +198,13 @@ kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
return -ENOIOCTLCMD;
}
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
}
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+ unsigned long npages)
{
return 0;
}
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index eb3e18659630..85685e1cdb89 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -390,7 +390,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
ret = of_irq_to_resource_table(eiu_node,
ltq_eiu_irq, exin_avail);
if (ret != exin_avail)
- panic("failed to load external irq resources\n");
+ panic("failed to load external irq resources");
if (request_mem_region(res.start, resource_size(&res),
res.name) < 0)
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index 49c460370285..19686c5bc5ed 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -14,6 +14,7 @@
#include <asm/bootinfo.h>
#include <asm/time.h>
+#include <asm/prom.h>
#include <lantiq.h>
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index c24924fe087d..51804b10a036 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -128,7 +128,7 @@ static int pmu_enable(struct clk *clk)
do {} while (--retry && (pmu_r32(PWDSR(clk->module)) & clk->bits));
if (!retry)
- panic("activating PMU module failed!\n");
+ panic("activating PMU module failed!");
return 0;
}
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index bc6f96fcb529..62ffd20ea869 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -346,14 +346,8 @@ static void r4k_blast_scache_setup(void)
static inline void local_r4k___flush_cache_all(void * args)
{
-#if defined(CONFIG_CPU_LOONGSON2)
- r4k_blast_scache();
- return;
-#endif
- r4k_blast_dcache();
- r4k_blast_icache();
-
switch (current_cpu_type()) {
+ case CPU_LOONGSON2:
case CPU_R4000SC:
case CPU_R4000MC:
case CPU_R4400SC:
@@ -361,7 +355,18 @@ static inline void local_r4k___flush_cache_all(void * args)
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
+ /*
+ * These caches are inclusive caches, that is, if something
+ * is not cached in the S-cache, we know it also won't be
+ * in one of the primary caches.
+ */
r4k_blast_scache();
+ break;
+
+ default:
+ r4k_blast_dcache();
+ r4k_blast_icache();
+ break;
}
}
@@ -572,8 +577,17 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo
if (end - start > icache_size)
r4k_blast_icache();
- else
- protected_blast_icache_range(start, end);
+ else {
+ switch (boot_cpu_type()) {
+ case CPU_LOONGSON2:
+ protected_blast_icache_range(start, end);
+ break;
+
+ default:
+ protected_loongson23_blast_icache_range(start, end);
+ break;
+ }
+ }
}
static inline void local_r4k_flush_icache_range_ipi(void *args)
@@ -1109,15 +1123,14 @@ static void probe_pcache(void)
case CPU_ALCHEMY:
c->icache.flags |= MIPS_CACHE_IC_F_DC;
break;
- }
-#ifdef CONFIG_CPU_LOONGSON2
- /*
- * LOONGSON2 has 4 way icache, but when using indexed cache op,
- * one op will act on all 4 ways
- */
- c->icache.ways = 1;
-#endif
+ case CPU_LOONGSON2:
+ /*
+ * LOONGSON2 has 4 way icache, but when using indexed cache op,
+ * one op will act on all 4 ways
+ */
+ c->icache.ways = 1;
+ }
printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
icache_size >> 10,
@@ -1193,7 +1206,6 @@ static int probe_scache(void)
return 1;
}
-#if defined(CONFIG_CPU_LOONGSON2)
static void __init loongson2_sc_init(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -1209,7 +1221,6 @@ static void __init loongson2_sc_init(void)
c->options |= MIPS_CPU_INCLUSIVE_CACHES;
}
-#endif
extern int r5k_sc_init(void);
extern int rm7k_sc_init(void);
@@ -1259,11 +1270,10 @@ static void setup_scache(void)
#endif
return;
-#if defined(CONFIG_CPU_LOONGSON2)
case CPU_LOONGSON2:
loongson2_sc_init();
return;
-#endif
+
case CPU_XLP:
/* don't need to worry about L2, fully coherent */
return;
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 5f8b95512580..2e9418562258 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -297,7 +297,6 @@ static void mips_dma_sync_single_for_cpu(struct device *dev,
static void mips_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
{
- plat_extra_sync_for_device(dev);
if (!plat_device_is_coherent(dev))
__dma_sync(dma_addr_to_page(dev, dma_handle),
dma_handle & ~PAGE_MASK, size, direction);
@@ -327,7 +326,7 @@ static void mips_dma_sync_sg_for_device(struct device *dev,
int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- return plat_dma_mapping_error(dev, dma_addr);
+ return 0;
}
int mips_dma_supported(struct device *dev, u64 mask)
@@ -340,7 +339,6 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
{
BUG_ON(direction == DMA_NONE);
- plat_extra_sync_for_device(dev);
if (!plat_device_is_coherent(dev))
__dma_sync_virtual(vaddr, size, direction);
}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index e205ef598e97..12156176c7ca 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -124,7 +124,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
BUG_ON(Page_dcache_dirty(page));
- inc_preempt_count();
+ pagefault_disable();
idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
#ifdef CONFIG_MIPS_MT_SMTC
idx += FIX_N_COLOURS * smp_processor_id() +
@@ -193,8 +193,7 @@ void kunmap_coherent(void)
write_c0_entryhi(old_ctx);
EXIT_CRITICAL(flags);
#endif
- dec_preempt_count();
- preempt_check_resched();
+ pagefault_enable();
}
void copy_user_highpage(struct page *to, struct page *from,
diff --git a/arch/mips/mm/tlb-funcs.S b/arch/mips/mm/tlb-funcs.S
index 79bca3130bd1..30a494db99c2 100644
--- a/arch/mips/mm/tlb-funcs.S
+++ b/arch/mips/mm/tlb-funcs.S
@@ -16,12 +16,10 @@
#define FASTPATH_SIZE 128
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
LEAF(tlbmiss_handler_setup_pgd)
.space 16 * 4
END(tlbmiss_handler_setup_pgd)
EXPORT(tlbmiss_handler_setup_pgd_end)
-#endif
LEAF(handle_tlbm)
.space FASTPATH_SIZE * 4
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index bb3a5f643e97..da3b0b9c9eae 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -52,21 +52,26 @@ extern void build_tlb_refill_handler(void);
#endif /* CONFIG_MIPS_MT_SMTC */
-#if defined(CONFIG_CPU_LOONGSON2)
/*
* LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
* unfortrunately, itlb is not totally transparent to software.
*/
-#define FLUSH_ITLB write_c0_diag(4);
-
-#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC) write_c0_diag(4); }
-
-#else
-
-#define FLUSH_ITLB
-#define FLUSH_ITLB_VM(vma)
+static inline void flush_itlb(void)
+{
+ switch (current_cpu_type()) {
+ case CPU_LOONGSON2:
+ write_c0_diag(4);
+ break;
+ default:
+ break;
+ }
+}
-#endif
+static inline void flush_itlb_vm(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_EXEC)
+ flush_itlb();
+}
void local_flush_tlb_all(void)
{
@@ -93,7 +98,7 @@ void local_flush_tlb_all(void)
}
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
- FLUSH_ITLB;
+ flush_itlb();
EXIT_CRITICAL(flags);
}
EXPORT_SYMBOL(local_flush_tlb_all);
@@ -155,7 +160,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
} else {
drop_mmu_context(mm, cpu);
}
- FLUSH_ITLB;
+ flush_itlb();
EXIT_CRITICAL(flags);
}
}
@@ -197,7 +202,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
} else {
local_flush_tlb_all();
}
- FLUSH_ITLB;
+ flush_itlb();
EXIT_CRITICAL(flags);
}
@@ -230,7 +235,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
finish:
write_c0_entryhi(oldpid);
- FLUSH_ITLB_VM(vma);
+ flush_itlb_vm(vma);
EXIT_CRITICAL(flags);
}
}
@@ -262,7 +267,7 @@ void local_flush_tlb_one(unsigned long page)
tlbw_use_hazard();
}
write_c0_entryhi(oldpid);
- FLUSH_ITLB;
+ flush_itlb();
EXIT_CRITICAL(flags);
}
@@ -335,7 +340,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
tlb_write_indexed();
}
tlbw_use_hazard();
- FLUSH_ITLB_VM(vma);
+ flush_itlb_vm(vma);
EXIT_CRITICAL(flags);
}
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 9bb3a9363b06..183f2b583e4d 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -340,10 +340,6 @@ static struct work_registers build_get_work_registers(u32 **p)
{
struct work_registers r;
- int smp_processor_id_reg;
- int smp_processor_id_sel;
- int smp_processor_id_shift;
-
if (scratch_reg >= 0) {
/* Save in CPU local C0_KScratch? */
UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg);
@@ -354,25 +350,9 @@ static struct work_registers build_get_work_registers(u32 **p)
}
if (num_possible_cpus() > 1) {
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
- smp_processor_id_shift = 51;
- smp_processor_id_reg = 20; /* XContext */
- smp_processor_id_sel = 0;
-#else
-# ifdef CONFIG_32BIT
- smp_processor_id_shift = 25;
- smp_processor_id_reg = 4; /* Context */
- smp_processor_id_sel = 0;
-# endif
-# ifdef CONFIG_64BIT
- smp_processor_id_shift = 26;
- smp_processor_id_reg = 4; /* Context */
- smp_processor_id_sel = 0;
-# endif
-#endif
/* Get smp_processor_id */
- UASM_i_MFC0(p, K0, smp_processor_id_reg, smp_processor_id_sel);
- UASM_i_SRL_SAFE(p, K0, K0, smp_processor_id_shift);
+ UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG);
+ UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT);
/* handler_reg_save index in K0 */
UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
@@ -819,11 +799,11 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
}
/* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
if (pgd_reg != -1) {
/* pgd is in pgd_reg */
UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
} else {
+#if defined(CONFIG_MIPS_PGD_C0_CONTEXT)
/*
* &pgd << 11 stored in CONTEXT [23..63].
*/
@@ -835,30 +815,18 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
/* 1 0 1 0 1 << 6 xkphys cached */
uasm_i_ori(p, ptr, ptr, 0x540);
uasm_i_drotr(p, ptr, ptr, 11);
- }
#elif defined(CONFIG_SMP)
-# ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC uses TCBind value as "CPU" index
- */
- uasm_i_mfc0(p, ptr, C0_TCBIND);
- uasm_i_dsrl_safe(p, ptr, ptr, 19);
-# else
- /*
- * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
- * stored in CONTEXT.
- */
- uasm_i_dmfc0(p, ptr, C0_CONTEXT);
- uasm_i_dsrl_safe(p, ptr, ptr, 23);
-# endif
- UASM_i_LA_mostly(p, tmp, pgdc);
- uasm_i_daddu(p, ptr, ptr, tmp);
- uasm_i_dmfc0(p, tmp, C0_BADVADDR);
- uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
+ UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG);
+ uasm_i_dsrl_safe(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
+ UASM_i_LA_mostly(p, tmp, pgdc);
+ uasm_i_daddu(p, ptr, ptr, tmp);
+ uasm_i_dmfc0(p, tmp, C0_BADVADDR);
+ uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
#else
- UASM_i_LA_mostly(p, ptr, pgdc);
- uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
+ UASM_i_LA_mostly(p, ptr, pgdc);
+ uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
#endif
+ }
uasm_l_vmalloc_done(l, *p);
@@ -953,31 +921,25 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
static void __maybe_unused
build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
{
- long pgdc = (long)pgd_current;
+ if (pgd_reg != -1) {
+ /* pgd is in pgd_reg */
+ uasm_i_mfc0(p, ptr, c0_kscratch(), pgd_reg);
+ uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
+ } else {
+ long pgdc = (long)pgd_current;
- /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
+ /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
#ifdef CONFIG_SMP
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC uses TCBind value as "CPU" index
- */
- uasm_i_mfc0(p, ptr, C0_TCBIND);
- UASM_i_LA_mostly(p, tmp, pgdc);
- uasm_i_srl(p, ptr, ptr, 19);
-#else
- /*
- * smp_processor_id() << 2 is stored in CONTEXT.
- */
- uasm_i_mfc0(p, ptr, C0_CONTEXT);
- UASM_i_LA_mostly(p, tmp, pgdc);
- uasm_i_srl(p, ptr, ptr, 23);
-#endif
- uasm_i_addu(p, ptr, tmp, ptr);
+ uasm_i_mfc0(p, ptr, SMP_CPUID_REG);
+ UASM_i_LA_mostly(p, tmp, pgdc);
+ uasm_i_srl(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
+ uasm_i_addu(p, ptr, tmp, ptr);
#else
- UASM_i_LA_mostly(p, ptr, pgdc);
+ UASM_i_LA_mostly(p, ptr, pgdc);
#endif
- uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
- uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
+ uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
+ uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
+ }
uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
@@ -1349,95 +1311,100 @@ static void build_r4000_tlb_refill_handler(void)
* need three, with the second nop'ed and the third being
* unused.
*/
- /* Loongson2 ebase is different than r4k, we have more space */
-#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
- if ((p - tlb_handler) > 64)
- panic("TLB refill handler space exceeded");
-#else
- if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
- || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
- && uasm_insn_has_bdelay(relocs,
- tlb_handler + MIPS64_REFILL_INSNS - 3)))
- panic("TLB refill handler space exceeded");
-#endif
-
- /*
- * Now fold the handler in the TLB refill handler space.
- */
-#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
- f = final_handler;
- /* Simplest case, just copy the handler. */
- uasm_copy_handler(relocs, labels, tlb_handler, p, f);
- final_len = p - tlb_handler;
-#else /* CONFIG_64BIT */
- f = final_handler + MIPS64_REFILL_INSNS;
- if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
- /* Just copy the handler. */
- uasm_copy_handler(relocs, labels, tlb_handler, p, f);
- final_len = p - tlb_handler;
- } else {
-#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
- const enum label_id ls = label_tlb_huge_update;
-#else
- const enum label_id ls = label_vmalloc;
-#endif
- u32 *split;
- int ov = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
- ;
- BUG_ON(i == ARRAY_SIZE(labels));
- split = labels[i].addr;
-
- /*
- * See if we have overflown one way or the other.
- */
- if (split > tlb_handler + MIPS64_REFILL_INSNS ||
- split < p - MIPS64_REFILL_INSNS)
- ov = 1;
-
- if (ov) {
+ switch (boot_cpu_type()) {
+ default:
+ if (sizeof(long) == 4) {
+ case CPU_LOONGSON2:
+ /* Loongson2 ebase is different than r4k, we have more space */
+ if ((p - tlb_handler) > 64)
+ panic("TLB refill handler space exceeded");
/*
- * Split two instructions before the end. One
- * for the branch and one for the instruction
- * in the delay slot.
+ * Now fold the handler in the TLB refill handler space.
*/
- split = tlb_handler + MIPS64_REFILL_INSNS - 2;
-
+ f = final_handler;
+ /* Simplest case, just copy the handler. */
+ uasm_copy_handler(relocs, labels, tlb_handler, p, f);
+ final_len = p - tlb_handler;
+ break;
+ } else {
+ if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
+ || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
+ && uasm_insn_has_bdelay(relocs,
+ tlb_handler + MIPS64_REFILL_INSNS - 3)))
+ panic("TLB refill handler space exceeded");
/*
- * If the branch would fall in a delay slot,
- * we must back up an additional instruction
- * so that it is no longer in a delay slot.
+ * Now fold the handler in the TLB refill handler space.
*/
- if (uasm_insn_has_bdelay(relocs, split - 1))
- split--;
- }
- /* Copy first part of the handler. */
- uasm_copy_handler(relocs, labels, tlb_handler, split, f);
- f += split - tlb_handler;
-
- if (ov) {
- /* Insert branch. */
- uasm_l_split(&l, final_handler);
- uasm_il_b(&f, &r, label_split);
- if (uasm_insn_has_bdelay(relocs, split))
- uasm_i_nop(&f);
- else {
- uasm_copy_handler(relocs, labels,
- split, split + 1, f);
- uasm_move_labels(labels, f, f + 1, -1);
- f++;
- split++;
+ f = final_handler + MIPS64_REFILL_INSNS;
+ if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
+ /* Just copy the handler. */
+ uasm_copy_handler(relocs, labels, tlb_handler, p, f);
+ final_len = p - tlb_handler;
+ } else {
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ const enum label_id ls = label_tlb_huge_update;
+#else
+ const enum label_id ls = label_vmalloc;
+#endif
+ u32 *split;
+ int ov = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
+ ;
+ BUG_ON(i == ARRAY_SIZE(labels));
+ split = labels[i].addr;
+
+ /*
+ * See if we have overflown one way or the other.
+ */
+ if (split > tlb_handler + MIPS64_REFILL_INSNS ||
+ split < p - MIPS64_REFILL_INSNS)
+ ov = 1;
+
+ if (ov) {
+ /*
+ * Split two instructions before the end. One
+ * for the branch and one for the instruction
+ * in the delay slot.
+ */
+ split = tlb_handler + MIPS64_REFILL_INSNS - 2;
+
+ /*
+ * If the branch would fall in a delay slot,
+ * we must back up an additional instruction
+ * so that it is no longer in a delay slot.
+ */
+ if (uasm_insn_has_bdelay(relocs, split - 1))
+ split--;
+ }
+ /* Copy first part of the handler. */
+ uasm_copy_handler(relocs, labels, tlb_handler, split, f);
+ f += split - tlb_handler;
+
+ if (ov) {
+ /* Insert branch. */
+ uasm_l_split(&l, final_handler);
+ uasm_il_b(&f, &r, label_split);
+ if (uasm_insn_has_bdelay(relocs, split))
+ uasm_i_nop(&f);
+ else {
+ uasm_copy_handler(relocs, labels,
+ split, split + 1, f);
+ uasm_move_labels(labels, f, f + 1, -1);
+ f++;
+ split++;
+ }
+ }
+
+ /* Copy the rest of the handler. */
+ uasm_copy_handler(relocs, labels, split, p, final_handler);
+ final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
+ (p - split);
}
}
-
- /* Copy the rest of the handler. */
- uasm_copy_handler(relocs, labels, split, p, final_handler);
- final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
- (p - split);
+ break;
}
-#endif /* CONFIG_64BIT */
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB refill handler (%u instructions).\n",
@@ -1451,28 +1418,30 @@ static void build_r4000_tlb_refill_handler(void)
extern u32 handle_tlbl[], handle_tlbl_end[];
extern u32 handle_tlbs[], handle_tlbs_end[];
extern u32 handle_tlbm[], handle_tlbm_end[];
-
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[];
-static void build_r4000_setup_pgd(void)
+static void build_setup_pgd(void)
{
const int a0 = 4;
- const int a1 = 5;
+ const int __maybe_unused a1 = 5;
+ const int __maybe_unused a2 = 6;
u32 *p = tlbmiss_handler_setup_pgd;
const int tlbmiss_handler_setup_pgd_size =
tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd;
- struct uasm_label *l = labels;
- struct uasm_reloc *r = relocs;
+#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
+ long pgdc = (long)pgd_current;
+#endif
memset(tlbmiss_handler_setup_pgd, 0, tlbmiss_handler_setup_pgd_size *
sizeof(tlbmiss_handler_setup_pgd[0]));
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
-
pgd_reg = allocate_kscratch();
-
+#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
if (pgd_reg == -1) {
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
/* PGD << 11 in c0_Context */
/*
* If it is a ckseg0 address, convert to a physical
@@ -1494,6 +1463,26 @@ static void build_r4000_setup_pgd(void)
uasm_i_jr(&p, 31);
UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
}
+#else
+#ifdef CONFIG_SMP
+ /* Save PGD to pgd_current[smp_processor_id()] */
+ UASM_i_CPUID_MFC0(&p, a1, SMP_CPUID_REG);
+ UASM_i_SRL_SAFE(&p, a1, a1, SMP_CPUID_PTRSHIFT);
+ UASM_i_LA_mostly(&p, a2, pgdc);
+ UASM_i_ADDU(&p, a2, a2, a1);
+ UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
+#else
+ UASM_i_LA_mostly(&p, a2, pgdc);
+ UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
+#endif /* SMP */
+ uasm_i_jr(&p, 31);
+
+ /* if pgd_reg is allocated, save PGD also to scratch register */
+ if (pgd_reg != -1)
+ UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
+ else
+ uasm_i_nop(&p);
+#endif
if (p >= tlbmiss_handler_setup_pgd_end)
panic("tlbmiss_handler_setup_pgd space exceeded");
@@ -1504,7 +1493,6 @@ static void build_r4000_setup_pgd(void)
dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd,
tlbmiss_handler_setup_pgd_size);
}
-#endif
static void
iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
@@ -2197,10 +2185,8 @@ static void flush_tlb_handlers(void)
(unsigned long)handle_tlbs_end);
local_flush_icache_range((unsigned long)handle_tlbm,
(unsigned long)handle_tlbm_end);
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
(unsigned long)tlbmiss_handler_setup_pgd_end);
-#endif
}
void build_tlb_refill_handler(void)
@@ -2232,6 +2218,7 @@ void build_tlb_refill_handler(void)
if (!run_once) {
if (!cpu_has_local_ebase)
build_r3000_tlb_refill_handler();
+ build_setup_pgd();
build_r3000_tlb_load_handler();
build_r3000_tlb_store_handler();
build_r3000_tlb_modify_handler();
@@ -2255,9 +2242,7 @@ void build_tlb_refill_handler(void)
default:
if (!run_once) {
scratch_reg = allocate_kscratch();
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
- build_r4000_setup_pgd();
-#endif
+ build_setup_pgd();
build_r4000_tlb_load_handler();
build_r4000_tlb_store_handler();
build_r4000_tlb_modify_handler();
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index c69da3734699..0892575f829d 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -37,7 +37,6 @@
#include <asm/irq_regs.h>
#include <asm/mips-boards/malta.h>
#include <asm/mips-boards/maltaint.h>
-#include <asm/mips-boards/piix4.h>
#include <asm/gt64120.h>
#include <asm/mips-boards/generic.h>
#include <asm/mips-boards/msc01_pci.h>
@@ -473,7 +472,7 @@ static void __init fill_ipi_map(void)
{
int cpu;
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
fill_ipi_map1(gic_resched_int_base, cpu, GIC_CPU_INT1);
fill_ipi_map1(gic_call_int_base, cpu, GIC_CPU_INT2);
}
@@ -574,8 +573,9 @@ void __init arch_init_irq(void)
/* FIXME */
int i;
#if defined(CONFIG_MIPS_MT_SMP)
- gic_call_int_base = GIC_NUM_INTRS - NR_CPUS;
- gic_resched_int_base = gic_call_int_base - NR_CPUS;
+ gic_call_int_base = GIC_NUM_INTRS -
+ (NR_CPUS - nr_cpu_ids) * 2 - nr_cpu_ids;
+ gic_resched_int_base = gic_call_int_base - nr_cpu_ids;
fill_ipi_map();
#endif
gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map,
@@ -599,7 +599,7 @@ void __init arch_init_irq(void)
printk("CPU%d: status register now %08x\n", smp_processor_id(), read_c0_status());
write_c0_status(0x1100dc00);
printk("CPU%d: status register frc %08x\n", smp_processor_id(), read_c0_status());
- for (i = 0; i < NR_CPUS; i++) {
+ for (i = 0; i < nr_cpu_ids; i++) {
arch_init_ipiirq(MIPS_GIC_IRQ_BASE +
GIC_RESCHED_INT(i), &irq_resched);
arch_init_ipiirq(MIPS_GIC_IRQ_BASE +
diff --git a/arch/mips/mti-sead3/sead3-setup.c b/arch/mips/mti-sead3/sead3-setup.c
index b5059dc899f4..928ba84c8a78 100644
--- a/arch/mips/mti-sead3/sead3-setup.c
+++ b/arch/mips/mti-sead3/sead3-setup.c
@@ -10,6 +10,8 @@
#include <linux/of_fdt.h>
#include <linux/bootmem.h>
+#include <asm/prom.h>
+
#include <asm/mips-boards/generic.h>
const char *get_system_type(void)
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index 6f8feb9efcff..c0eded01fde9 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -245,7 +245,7 @@ static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
return threadmode;
unsupp:
- panic("Unsupported CPU mask %lx\n",
+ panic("Unsupported CPU mask %lx",
(unsigned long)cpumask_bits(wakeup_mask)[0]);
return 0;
}
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
index 76a7131e486e..e8938b7c5013 100644
--- a/arch/mips/netlogic/xlp/setup.c
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -97,9 +97,6 @@ void __init plat_mem_setup(void)
_machine_halt = nlm_linux_exit;
pm_power_off = nlm_linux_exit;
- /* memory and bootargs from DT */
- early_init_devtree(initial_boot_params);
-
if (boot_mem_map.nr_map == 0) {
pr_info("Using DRAM BARs for memory map.\n");
xlp_init_mem_from_bars();
diff --git a/arch/mips/pci/fixup-lantiq.c b/arch/mips/pci/fixup-lantiq.c
index 6c829df28dc7..c2ce41ea61d7 100644
--- a/arch/mips/pci/fixup-lantiq.c
+++ b/arch/mips/pci/fixup-lantiq.c
@@ -25,16 +25,5 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- struct of_irq dev_irq;
- int irq;
-
- if (of_irq_map_pci(dev, &dev_irq)) {
- dev_err(&dev->dev, "trying to map irq for unknown slot:%d pin:%d\n",
- slot, pin);
- return 0;
- }
- irq = irq_create_of_mapping(dev_irq.controller, dev_irq.specifier,
- dev_irq.size);
- dev_info(&dev->dev, "SLOT:%d PIN:%d IRQ:%d\n", slot, pin, irq);
- return irq;
+ return of_irq_parse_and_map_pci(dev, slot, pin);
}
diff --git a/arch/mips/pci/fixup-malta.c b/arch/mips/pci/fixup-malta.c
index 07ada7f8441e..df36e2327c54 100644
--- a/arch/mips/pci/fixup-malta.c
+++ b/arch/mips/pci/fixup-malta.c
@@ -1,5 +1,6 @@
#include <linux/init.h>
#include <linux/pci.h>
+#include <asm/mips-boards/piix4.h>
/* PCI interrupt pins */
#define PCIA 1
@@ -53,7 +54,8 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
static void malta_piix_func0_fixup(struct pci_dev *pdev)
{
unsigned char reg_val;
- static int piixirqmap[16] = { /* PIIX PIRQC[A:D] irq mappings */
+ /* PIIX PIRQC[A:D] irq mappings */
+ static int piixirqmap[PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MAX] = {
0, 0, 0, 3,
4, 5, 6, 7,
0, 9, 10, 11,
@@ -63,11 +65,12 @@ static void malta_piix_func0_fixup(struct pci_dev *pdev)
/* Interrogate PIIX4 to get PCI IRQ mapping */
for (i = 0; i <= 3; i++) {
- pci_read_config_byte(pdev, 0x60+i, &reg_val);
- if (reg_val & 0x80)
+ pci_read_config_byte(pdev, PIIX4_FUNC0_PIRQRC+i, &reg_val);
+ if (reg_val & PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_DISABLE)
pci_irq[PCIA+i] = 0; /* Disabled */
else
- pci_irq[PCIA+i] = piixirqmap[reg_val & 15];
+ pci_irq[PCIA+i] = piixirqmap[reg_val &
+ PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MASK];
}
/* Done by YAMON 2.00 onwards */
@@ -76,8 +79,9 @@ static void malta_piix_func0_fixup(struct pci_dev *pdev)
* Set top of main memory accessible by ISA or DMA
* devices to 16 Mb.
*/
- pci_read_config_byte(pdev, 0x69, &reg_val);
- pci_write_config_byte(pdev, 0x69, reg_val | 0xf0);
+ pci_read_config_byte(pdev, PIIX4_FUNC0_TOM, &reg_val);
+ pci_write_config_byte(pdev, PIIX4_FUNC0_TOM, reg_val |
+ PIIX4_FUNC0_TOM_TOP_OF_MEMORY_MASK);
}
}
@@ -93,10 +97,14 @@ static void malta_piix_func1_fixup(struct pci_dev *pdev)
/*
* IDE Decode enable.
*/
- pci_read_config_byte(pdev, 0x41, &reg_val);
- pci_write_config_byte(pdev, 0x41, reg_val|0x80);
- pci_read_config_byte(pdev, 0x43, &reg_val);
- pci_write_config_byte(pdev, 0x43, reg_val|0x80);
+ pci_read_config_byte(pdev, PIIX4_FUNC1_IDETIM_PRIMARY_HI,
+ &reg_val);
+ pci_write_config_byte(pdev, PIIX4_FUNC1_IDETIM_PRIMARY_HI,
+ reg_val|PIIX4_FUNC1_IDETIM_PRIMARY_HI_IDE_DECODE_EN);
+ pci_read_config_byte(pdev, PIIX4_FUNC1_IDETIM_SECONDARY_HI,
+ &reg_val);
+ pci_write_config_byte(pdev, PIIX4_FUNC1_IDETIM_SECONDARY_HI,
+ reg_val|PIIX4_FUNC1_IDETIM_SECONDARY_HI_IDE_DECODE_EN);
}
}
@@ -108,10 +116,12 @@ static void quirk_dlcsetup(struct pci_dev *dev)
{
u8 odlc, ndlc;
- (void) pci_read_config_byte(dev, 0x82, &odlc);
+ (void) pci_read_config_byte(dev, PIIX4_FUNC0_DLC, &odlc);
/* Enable passive releases and delayed transaction */
- ndlc = odlc | 7;
- (void) pci_write_config_byte(dev, 0x82, ndlc);
+ ndlc = odlc | PIIX4_FUNC0_DLC_USBPR_EN |
+ PIIX4_FUNC0_DLC_PASSIVE_RELEASE_EN |
+ PIIX4_FUNC0_DLC_DELAYED_TRANSACTION_EN;
+ (void) pci_write_config_byte(dev, PIIX4_FUNC0_DLC, ndlc);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
index 18517dd0f709..d471a26dd5f8 100644
--- a/arch/mips/pci/pci-ar71xx.c
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -363,9 +363,6 @@ static int ar71xx_pci_probe(struct platform_device *pdev)
spin_lock_init(&apc->lock);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
- if (!res)
- return -EINVAL;
-
apc->cfg_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(apc->cfg_base))
return PTR_ERR(apc->cfg_base);
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 65ec032fa0b4..785b2659b519 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -362,25 +362,16 @@ static int ar724x_pci_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl_base");
- if (!res)
- return -EINVAL;
-
apc->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(apc->ctrl_base))
return PTR_ERR(apc->ctrl_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
- if (!res)
- return -EINVAL;
-
apc->devcfg_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(apc->devcfg_base))
return PTR_ERR(apc->devcfg_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crp_base");
- if (!res)
- return -EINVAL;
-
apc->crp_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(apc->crp_base))
return PTR_ERR(apc->crp_base);
diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c
index 95c9d41382e7..adeff2bfe4cd 100644
--- a/arch/mips/pci/pci-rt3883.c
+++ b/arch/mips/pci/pci-rt3883.c
@@ -583,29 +583,7 @@ err_put_intc_node:
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- struct of_irq dev_irq;
- int err;
- int irq;
-
- err = of_irq_map_pci(dev, &dev_irq);
- if (err) {
- pr_err("pci %s: unable to get irq map, err=%d\n",
- pci_name((struct pci_dev *) dev), err);
- return 0;
- }
-
- irq = irq_create_of_mapping(dev_irq.controller,
- dev_irq.specifier,
- dev_irq.size);
-
- if (irq == 0)
- pr_crit("pci %s: no irq found for pin %u\n",
- pci_name((struct pci_dev *) dev), pin);
- else
- pr_info("pci %s: using irq %d for pin %u\n",
- pci_name((struct pci_dev *) dev), irq, pin);
-
- return irq;
+ return of_irq_parse_and_map_pci(dev, slot, pin);
}
int pcibios_plat_dev_init(struct pci_dev *dev)
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 33e7aa52d9c4..1bf60b127377 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -120,51 +120,37 @@ static void pcibios_scanbus(struct pci_controller *hose)
#ifdef CONFIG_OF
void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
{
- const __be32 *ranges;
- int rlen;
- int pna = of_n_addr_cells(node);
- int np = pna + 5;
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
pr_info("PCI host bridge %s ranges:\n", node->full_name);
- ranges = of_get_property(node, "ranges", &rlen);
- if (ranges == NULL)
- return;
hose->of_node = node;
- while ((rlen -= np * 4) >= 0) {
- u32 pci_space;
+ if (of_pci_range_parser_init(&parser, node))
+ return;
+
+ for_each_of_pci_range(&parser, &range) {
struct resource *res = NULL;
- u64 addr, size;
-
- pci_space = be32_to_cpup(&ranges[0]);
- addr = of_translate_address(node, ranges + 3);
- size = of_read_number(ranges + pna + 3, 2);
- ranges += np;
- switch ((pci_space >> 24) & 0x3) {
- case 1: /* PCI IO space */
+
+ switch (range.flags & IORESOURCE_TYPE_BITS) {
+ case IORESOURCE_IO:
pr_info(" IO 0x%016llx..0x%016llx\n",
- addr, addr + size - 1);
+ range.cpu_addr,
+ range.cpu_addr + range.size - 1);
hose->io_map_base =
- (unsigned long)ioremap(addr, size);
+ (unsigned long)ioremap(range.cpu_addr,
+ range.size);
res = hose->io_resource;
- res->flags = IORESOURCE_IO;
break;
- case 2: /* PCI Memory space */
- case 3: /* PCI 64 bits Memory space */
+ case IORESOURCE_MEM:
pr_info(" MEM 0x%016llx..0x%016llx\n",
- addr, addr + size - 1);
+ range.cpu_addr,
+ range.cpu_addr + range.size - 1);
res = hose->mem_resource;
- res->flags = IORESOURCE_MEM;
break;
}
- if (res != NULL) {
- res->start = addr;
- res->name = node->full_name;
- res->end = res->start + size - 1;
- res->parent = NULL;
- res->sibling = NULL;
- res->child = NULL;
- }
+ if (res != NULL)
+ of_pci_range_to_resource(&range, node, res);
}
}
diff --git a/arch/mips/powertv/Kconfig b/arch/mips/powertv/Kconfig
deleted file mode 100644
index dd91fbacbcba..000000000000
--- a/arch/mips/powertv/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-config BOOTLOADER_FAMILY
- string "POWERTV Bootloader Family string"
- default "85"
- depends on POWERTV
- help
- This value should be specified when the bootloader driver is disabled
- and must be exactly two characters long. Families supported are:
- R1 - RNG-100 R2 - RNG-200
- A1 - Class A B1 - Class B
- E1 - Class E F1 - Class F
- 44 - 45xx 46 - 46xx
- 85 - 85xx 86 - 86xx
diff --git a/arch/mips/powertv/Makefile b/arch/mips/powertv/Makefile
deleted file mode 100644
index 39ca9f8d63ae..000000000000
--- a/arch/mips/powertv/Makefile
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-# Carsten Langgaard, carstenl@mips.com
-# Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
-#
-# Carsten Langgaard, carstenl@mips.com
-# Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
-# Portions copyright (C) 2009 Cisco Systems, Inc.
-#
-# This program is free software; you can distribute it and/or modify it
-# under the terms of the GNU General Public License (Version 2) as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
-#
-# Makefile for the Cisco PowerTV-specific kernel interface routines
-# under Linux.
-#
-
-obj-y += init.o ioremap.o memory.o powertv_setup.o reset.o time.o \
- asic/ pci/
-
-obj-$(CONFIG_USB) += powertv-usb.o
diff --git a/arch/mips/powertv/Platform b/arch/mips/powertv/Platform
deleted file mode 100644
index 4eb5af1d8eea..000000000000
--- a/arch/mips/powertv/Platform
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Cisco PowerTV Platform
-#
-platform-$(CONFIG_POWERTV) += powertv/
-cflags-$(CONFIG_POWERTV) += \
- -I$(srctree)/arch/mips/include/asm/mach-powertv
-load-$(CONFIG_POWERTV) += 0xffffffff90800000
diff --git a/arch/mips/powertv/asic/Makefile b/arch/mips/powertv/asic/Makefile
deleted file mode 100644
index 35dcc53eb25f..000000000000
--- a/arch/mips/powertv/asic/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Copyright (C) 2009 Scientific-Atlanta, Inc.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-#
-
-obj-y += asic-calliope.o asic-cronus.o asic-gaia.o asic-zeus.o \
- asic_devices.o asic_int.o irq_asic.o prealloc-calliope.o \
- prealloc-cronus.o prealloc-cronuslite.o prealloc-gaia.o prealloc-zeus.o
diff --git a/arch/mips/powertv/asic/asic-calliope.c b/arch/mips/powertv/asic/asic-calliope.c
deleted file mode 100644
index 2f539b43f56b..000000000000
--- a/arch/mips/powertv/asic/asic-calliope.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Locations of devices in the Calliope ASIC.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
- *
- * Description: Defines the platform resources for the SA settop.
- */
-
-#include <linux/init.h>
-#include <asm/mach-powertv/asic.h>
-
-#define CALLIOPE_ADDR(x) (CALLIOPE_IO_BASE + (x))
-
-const struct register_map calliope_register_map __initconst = {
- .eic_slow0_strt_add = {.phys = CALLIOPE_ADDR(0x800000)},
- .eic_cfg_bits = {.phys = CALLIOPE_ADDR(0x800038)},
- .eic_ready_status = {.phys = CALLIOPE_ADDR(0x80004c)},
-
- .chipver3 = {.phys = CALLIOPE_ADDR(0xA00800)},
- .chipver2 = {.phys = CALLIOPE_ADDR(0xA00804)},
- .chipver1 = {.phys = CALLIOPE_ADDR(0xA00808)},
- .chipver0 = {.phys = CALLIOPE_ADDR(0xA0080c)},
-
- /* The registers of IRBlaster */
- .uart1_intstat = {.phys = CALLIOPE_ADDR(0xA01800)},
- .uart1_inten = {.phys = CALLIOPE_ADDR(0xA01804)},
- .uart1_config1 = {.phys = CALLIOPE_ADDR(0xA01808)},
- .uart1_config2 = {.phys = CALLIOPE_ADDR(0xA0180C)},
- .uart1_divisorhi = {.phys = CALLIOPE_ADDR(0xA01810)},
- .uart1_divisorlo = {.phys = CALLIOPE_ADDR(0xA01814)},
- .uart1_data = {.phys = CALLIOPE_ADDR(0xA01818)},
- .uart1_status = {.phys = CALLIOPE_ADDR(0xA0181C)},
-
- .int_stat_3 = {.phys = CALLIOPE_ADDR(0xA02800)},
- .int_stat_2 = {.phys = CALLIOPE_ADDR(0xA02804)},
- .int_stat_1 = {.phys = CALLIOPE_ADDR(0xA02808)},
- .int_stat_0 = {.phys = CALLIOPE_ADDR(0xA0280c)},
- .int_config = {.phys = CALLIOPE_ADDR(0xA02810)},
- .int_int_scan = {.phys = CALLIOPE_ADDR(0xA02818)},
- .ien_int_3 = {.phys = CALLIOPE_ADDR(0xA02830)},
- .ien_int_2 = {.phys = CALLIOPE_ADDR(0xA02834)},
- .ien_int_1 = {.phys = CALLIOPE_ADDR(0xA02838)},
- .ien_int_0 = {.phys = CALLIOPE_ADDR(0xA0283c)},
- .int_level_3_3 = {.phys = CALLIOPE_ADDR(0xA02880)},
- .int_level_3_2 = {.phys = CALLIOPE_ADDR(0xA02884)},
- .int_level_3_1 = {.phys = CALLIOPE_ADDR(0xA02888)},
- .int_level_3_0 = {.phys = CALLIOPE_ADDR(0xA0288c)},
- .int_level_2_3 = {.phys = CALLIOPE_ADDR(0xA02890)},
- .int_level_2_2 = {.phys = CALLIOPE_ADDR(0xA02894)},
- .int_level_2_1 = {.phys = CALLIOPE_ADDR(0xA02898)},
- .int_level_2_0 = {.phys = CALLIOPE_ADDR(0xA0289c)},
- .int_level_1_3 = {.phys = CALLIOPE_ADDR(0xA028a0)},
- .int_level_1_2 = {.phys = CALLIOPE_ADDR(0xA028a4)},
- .int_level_1_1 = {.phys = CALLIOPE_ADDR(0xA028a8)},
- .int_level_1_0 = {.phys = CALLIOPE_ADDR(0xA028ac)},
- .int_level_0_3 = {.phys = CALLIOPE_ADDR(0xA028b0)},
- .int_level_0_2 = {.phys = CALLIOPE_ADDR(0xA028b4)},
- .int_level_0_1 = {.phys = CALLIOPE_ADDR(0xA028b8)},
- .int_level_0_0 = {.phys = CALLIOPE_ADDR(0xA028bc)},
- .int_docsis_en = {.phys = CALLIOPE_ADDR(0xA028F4)},
-
- .mips_pll_setup = {.phys = CALLIOPE_ADDR(0x980000)},
- .fs432x4b4_usb_ctl = {.phys = CALLIOPE_ADDR(0x980030)},
- .test_bus = {.phys = CALLIOPE_ADDR(0x9800CC)},
- .crt_spare = {.phys = CALLIOPE_ADDR(0x9800d4)},
- .usb2_ohci_int_mask = {.phys = CALLIOPE_ADDR(0x9A000c)},
- .usb2_strap = {.phys = CALLIOPE_ADDR(0x9A0014)},
- .ehci_hcapbase = {.phys = CALLIOPE_ADDR(0x9BFE00)},
- .ohci_hc_revision = {.phys = CALLIOPE_ADDR(0x9BFC00)},
- .bcm1_bs_lmi_steer = {.phys = CALLIOPE_ADDR(0x9E0004)},
- .usb2_control = {.phys = CALLIOPE_ADDR(0x9E0054)},
- .usb2_stbus_obc = {.phys = CALLIOPE_ADDR(0x9BFF00)},
- .usb2_stbus_mess_size = {.phys = CALLIOPE_ADDR(0x9BFF04)},
- .usb2_stbus_chunk_size = {.phys = CALLIOPE_ADDR(0x9BFF08)},
-
- .pcie_regs = {.phys = 0x000000}, /* -doesn't exist- */
- .tim_ch = {.phys = CALLIOPE_ADDR(0xA02C10)},
- .tim_cl = {.phys = CALLIOPE_ADDR(0xA02C14)},
- .gpio_dout = {.phys = CALLIOPE_ADDR(0xA02c20)},
- .gpio_din = {.phys = CALLIOPE_ADDR(0xA02c24)},
- .gpio_dir = {.phys = CALLIOPE_ADDR(0xA02c2C)},
- .watchdog = {.phys = CALLIOPE_ADDR(0xA02c30)},
- .front_panel = {.phys = 0x000000}, /* -not used- */
-};
diff --git a/arch/mips/powertv/asic/asic-cronus.c b/arch/mips/powertv/asic/asic-cronus.c
deleted file mode 100644
index 7f8f3429b35a..000000000000
--- a/arch/mips/powertv/asic/asic-cronus.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Locations of devices in the Cronus ASIC
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
- *
- * Description: Defines the platform resources for the SA settop.
- */
-
-#include <linux/init.h>
-#include <asm/mach-powertv/asic.h>
-
-#define CRONUS_ADDR(x) (CRONUS_IO_BASE + (x))
-
-const struct register_map cronus_register_map __initconst = {
- .eic_slow0_strt_add = {.phys = CRONUS_ADDR(0x000000)},
- .eic_cfg_bits = {.phys = CRONUS_ADDR(0x000038)},
- .eic_ready_status = {.phys = CRONUS_ADDR(0x00004C)},
-
- .chipver3 = {.phys = CRONUS_ADDR(0x2A0800)},
- .chipver2 = {.phys = CRONUS_ADDR(0x2A0804)},
- .chipver1 = {.phys = CRONUS_ADDR(0x2A0808)},
- .chipver0 = {.phys = CRONUS_ADDR(0x2A080C)},
-
- /* The registers of IRBlaster */
- .uart1_intstat = {.phys = CRONUS_ADDR(0x2A1800)},
- .uart1_inten = {.phys = CRONUS_ADDR(0x2A1804)},
- .uart1_config1 = {.phys = CRONUS_ADDR(0x2A1808)},
- .uart1_config2 = {.phys = CRONUS_ADDR(0x2A180C)},
- .uart1_divisorhi = {.phys = CRONUS_ADDR(0x2A1810)},
- .uart1_divisorlo = {.phys = CRONUS_ADDR(0x2A1814)},
- .uart1_data = {.phys = CRONUS_ADDR(0x2A1818)},
- .uart1_status = {.phys = CRONUS_ADDR(0x2A181C)},
-
- .int_stat_3 = {.phys = CRONUS_ADDR(0x2A2800)},
- .int_stat_2 = {.phys = CRONUS_ADDR(0x2A2804)},
- .int_stat_1 = {.phys = CRONUS_ADDR(0x2A2808)},
- .int_stat_0 = {.phys = CRONUS_ADDR(0x2A280C)},
- .int_config = {.phys = CRONUS_ADDR(0x2A2810)},
- .int_int_scan = {.phys = CRONUS_ADDR(0x2A2818)},
- .ien_int_3 = {.phys = CRONUS_ADDR(0x2A2830)},
- .ien_int_2 = {.phys = CRONUS_ADDR(0x2A2834)},
- .ien_int_1 = {.phys = CRONUS_ADDR(0x2A2838)},
- .ien_int_0 = {.phys = CRONUS_ADDR(0x2A283C)},
- .int_level_3_3 = {.phys = CRONUS_ADDR(0x2A2880)},
- .int_level_3_2 = {.phys = CRONUS_ADDR(0x2A2884)},
- .int_level_3_1 = {.phys = CRONUS_ADDR(0x2A2888)},
- .int_level_3_0 = {.phys = CRONUS_ADDR(0x2A288C)},
- .int_level_2_3 = {.phys = CRONUS_ADDR(0x2A2890)},
- .int_level_2_2 = {.phys = CRONUS_ADDR(0x2A2894)},
- .int_level_2_1 = {.phys = CRONUS_ADDR(0x2A2898)},
- .int_level_2_0 = {.phys = CRONUS_ADDR(0x2A289C)},
- .int_level_1_3 = {.phys = CRONUS_ADDR(0x2A28A0)},
- .int_level_1_2 = {.phys = CRONUS_ADDR(0x2A28A4)},
- .int_level_1_1 = {.phys = CRONUS_ADDR(0x2A28A8)},
- .int_level_1_0 = {.phys = CRONUS_ADDR(0x2A28AC)},
- .int_level_0_3 = {.phys = CRONUS_ADDR(0x2A28B0)},
- .int_level_0_2 = {.phys = CRONUS_ADDR(0x2A28B4)},
- .int_level_0_1 = {.phys = CRONUS_ADDR(0x2A28B8)},
- .int_level_0_0 = {.phys = CRONUS_ADDR(0x2A28BC)},
- .int_docsis_en = {.phys = CRONUS_ADDR(0x2A28F4)},
-
- .mips_pll_setup = {.phys = CRONUS_ADDR(0x1C0000)},
- .fs432x4b4_usb_ctl = {.phys = CRONUS_ADDR(0x1C0028)},
- .test_bus = {.phys = CRONUS_ADDR(0x1C00CC)},
- .crt_spare = {.phys = CRONUS_ADDR(0x1c00d4)},
- .usb2_ohci_int_mask = {.phys = CRONUS_ADDR(0x20000C)},
- .usb2_strap = {.phys = CRONUS_ADDR(0x200014)},
- .ehci_hcapbase = {.phys = CRONUS_ADDR(0x21FE00)},
- .ohci_hc_revision = {.phys = CRONUS_ADDR(0x21fc00)},
- .bcm1_bs_lmi_steer = {.phys = CRONUS_ADDR(0x2E0008)},
- .usb2_control = {.phys = CRONUS_ADDR(0x2E004C)},
- .usb2_stbus_obc = {.phys = CRONUS_ADDR(0x21FF00)},
- .usb2_stbus_mess_size = {.phys = CRONUS_ADDR(0x21FF04)},
- .usb2_stbus_chunk_size = {.phys = CRONUS_ADDR(0x21FF08)},
-
- .pcie_regs = {.phys = CRONUS_ADDR(0x220000)},
- .tim_ch = {.phys = CRONUS_ADDR(0x2A2C10)},
- .tim_cl = {.phys = CRONUS_ADDR(0x2A2C14)},
- .gpio_dout = {.phys = CRONUS_ADDR(0x2A2C20)},
- .gpio_din = {.phys = CRONUS_ADDR(0x2A2C24)},
- .gpio_dir = {.phys = CRONUS_ADDR(0x2A2C2C)},
- .watchdog = {.phys = CRONUS_ADDR(0x2A2C30)},
- .front_panel = {.phys = CRONUS_ADDR(0x2A3800)},
-};
diff --git a/arch/mips/powertv/asic/asic-gaia.c b/arch/mips/powertv/asic/asic-gaia.c
deleted file mode 100644
index 1265b49012e6..000000000000
--- a/arch/mips/powertv/asic/asic-gaia.c
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Locations of devices in the Gaia ASIC
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author: David VomLehn
- */
-
-#include <linux/init.h>
-#include <asm/mach-powertv/asic.h>
-
-const struct register_map gaia_register_map __initconst = {
- .eic_slow0_strt_add = {.phys = GAIA_IO_BASE + 0x000000},
- .eic_cfg_bits = {.phys = GAIA_IO_BASE + 0x000038},
- .eic_ready_status = {.phys = GAIA_IO_BASE + 0x00004C},
-
- .chipver3 = {.phys = GAIA_IO_BASE + 0x2A0800},
- .chipver2 = {.phys = GAIA_IO_BASE + 0x2A0804},
- .chipver1 = {.phys = GAIA_IO_BASE + 0x2A0808},
- .chipver0 = {.phys = GAIA_IO_BASE + 0x2A080C},
-
- /* The registers of IRBlaster */
- .uart1_intstat = {.phys = GAIA_IO_BASE + 0x2A1800},
- .uart1_inten = {.phys = GAIA_IO_BASE + 0x2A1804},
- .uart1_config1 = {.phys = GAIA_IO_BASE + 0x2A1808},
- .uart1_config2 = {.phys = GAIA_IO_BASE + 0x2A180C},
- .uart1_divisorhi = {.phys = GAIA_IO_BASE + 0x2A1810},
- .uart1_divisorlo = {.phys = GAIA_IO_BASE + 0x2A1814},
- .uart1_data = {.phys = GAIA_IO_BASE + 0x2A1818},
- .uart1_status = {.phys = GAIA_IO_BASE + 0x2A181C},
-
- .int_stat_3 = {.phys = GAIA_IO_BASE + 0x2A2800},
- .int_stat_2 = {.phys = GAIA_IO_BASE + 0x2A2804},
- .int_stat_1 = {.phys = GAIA_IO_BASE + 0x2A2808},
- .int_stat_0 = {.phys = GAIA_IO_BASE + 0x2A280C},
- .int_config = {.phys = GAIA_IO_BASE + 0x2A2810},
- .int_int_scan = {.phys = GAIA_IO_BASE + 0x2A2818},
- .ien_int_3 = {.phys = GAIA_IO_BASE + 0x2A2830},
- .ien_int_2 = {.phys = GAIA_IO_BASE + 0x2A2834},
- .ien_int_1 = {.phys = GAIA_IO_BASE + 0x2A2838},
- .ien_int_0 = {.phys = GAIA_IO_BASE + 0x2A283C},
- .int_level_3_3 = {.phys = GAIA_IO_BASE + 0x2A2880},
- .int_level_3_2 = {.phys = GAIA_IO_BASE + 0x2A2884},
- .int_level_3_1 = {.phys = GAIA_IO_BASE + 0x2A2888},
- .int_level_3_0 = {.phys = GAIA_IO_BASE + 0x2A288C},
- .int_level_2_3 = {.phys = GAIA_IO_BASE + 0x2A2890},
- .int_level_2_2 = {.phys = GAIA_IO_BASE + 0x2A2894},
- .int_level_2_1 = {.phys = GAIA_IO_BASE + 0x2A2898},
- .int_level_2_0 = {.phys = GAIA_IO_BASE + 0x2A289C},
- .int_level_1_3 = {.phys = GAIA_IO_BASE + 0x2A28A0},
- .int_level_1_2 = {.phys = GAIA_IO_BASE + 0x2A28A4},
- .int_level_1_1 = {.phys = GAIA_IO_BASE + 0x2A28A8},
- .int_level_1_0 = {.phys = GAIA_IO_BASE + 0x2A28AC},
- .int_level_0_3 = {.phys = GAIA_IO_BASE + 0x2A28B0},
- .int_level_0_2 = {.phys = GAIA_IO_BASE + 0x2A28B4},
- .int_level_0_1 = {.phys = GAIA_IO_BASE + 0x2A28B8},
- .int_level_0_0 = {.phys = GAIA_IO_BASE + 0x2A28BC},
- .int_docsis_en = {.phys = GAIA_IO_BASE + 0x2A28F4},
-
- .mips_pll_setup = {.phys = GAIA_IO_BASE + 0x1C0000},
- .fs432x4b4_usb_ctl = {.phys = GAIA_IO_BASE + 0x1C0024},
- .test_bus = {.phys = GAIA_IO_BASE + 0x1C00CC},
- .crt_spare = {.phys = GAIA_IO_BASE + 0x1c0108},
- .usb2_ohci_int_mask = {.phys = GAIA_IO_BASE + 0x20000C},
- .usb2_strap = {.phys = GAIA_IO_BASE + 0x200014},
- .ehci_hcapbase = {.phys = GAIA_IO_BASE + 0x21FE00},
- .ohci_hc_revision = {.phys = GAIA_IO_BASE + 0x21fc00},
- .bcm1_bs_lmi_steer = {.phys = GAIA_IO_BASE + 0x2E0004},
- .usb2_control = {.phys = GAIA_IO_BASE + 0x2E004C},
- .usb2_stbus_obc = {.phys = GAIA_IO_BASE + 0x21FF00},
- .usb2_stbus_mess_size = {.phys = GAIA_IO_BASE + 0x21FF04},
- .usb2_stbus_chunk_size = {.phys = GAIA_IO_BASE + 0x21FF08},
-
- .pcie_regs = {.phys = GAIA_IO_BASE + 0x220000},
- .tim_ch = {.phys = GAIA_IO_BASE + 0x2A2C10},
- .tim_cl = {.phys = GAIA_IO_BASE + 0x2A2C14},
- .gpio_dout = {.phys = GAIA_IO_BASE + 0x2A2C20},
- .gpio_din = {.phys = GAIA_IO_BASE + 0x2A2C24},
- .gpio_dir = {.phys = GAIA_IO_BASE + 0x2A2C2C},
- .watchdog = {.phys = GAIA_IO_BASE + 0x2A2C30},
- .front_panel = {.phys = GAIA_IO_BASE + 0x2A3800},
-};
diff --git a/arch/mips/powertv/asic/asic-zeus.c b/arch/mips/powertv/asic/asic-zeus.c
deleted file mode 100644
index 14e7de137e03..000000000000
--- a/arch/mips/powertv/asic/asic-zeus.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Locations of devices in the Zeus ASIC
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
- *
- * Description: Defines the platform resources for the SA settop.
- */
-
-#include <linux/init.h>
-#include <asm/mach-powertv/asic.h>
-
-#define ZEUS_ADDR(x) (ZEUS_IO_BASE + (x))
-
-const struct register_map zeus_register_map __initconst = {
- .eic_slow0_strt_add = {.phys = ZEUS_ADDR(0x000000)},
- .eic_cfg_bits = {.phys = ZEUS_ADDR(0x000038)},
- .eic_ready_status = {.phys = ZEUS_ADDR(0x00004c)},
-
- .chipver3 = {.phys = ZEUS_ADDR(0x280800)},
- .chipver2 = {.phys = ZEUS_ADDR(0x280804)},
- .chipver1 = {.phys = ZEUS_ADDR(0x280808)},
- .chipver0 = {.phys = ZEUS_ADDR(0x28080c)},
-
- /* The registers of IRBlaster */
- .uart1_intstat = {.phys = ZEUS_ADDR(0x281800)},
- .uart1_inten = {.phys = ZEUS_ADDR(0x281804)},
- .uart1_config1 = {.phys = ZEUS_ADDR(0x281808)},
- .uart1_config2 = {.phys = ZEUS_ADDR(0x28180C)},
- .uart1_divisorhi = {.phys = ZEUS_ADDR(0x281810)},
- .uart1_divisorlo = {.phys = ZEUS_ADDR(0x281814)},
- .uart1_data = {.phys = ZEUS_ADDR(0x281818)},
- .uart1_status = {.phys = ZEUS_ADDR(0x28181C)},
-
- .int_stat_3 = {.phys = ZEUS_ADDR(0x282800)},
- .int_stat_2 = {.phys = ZEUS_ADDR(0x282804)},
- .int_stat_1 = {.phys = ZEUS_ADDR(0x282808)},
- .int_stat_0 = {.phys = ZEUS_ADDR(0x28280c)},
- .int_config = {.phys = ZEUS_ADDR(0x282810)},
- .int_int_scan = {.phys = ZEUS_ADDR(0x282818)},
- .ien_int_3 = {.phys = ZEUS_ADDR(0x282830)},
- .ien_int_2 = {.phys = ZEUS_ADDR(0x282834)},
- .ien_int_1 = {.phys = ZEUS_ADDR(0x282838)},
- .ien_int_0 = {.phys = ZEUS_ADDR(0x28283c)},
- .int_level_3_3 = {.phys = ZEUS_ADDR(0x282880)},
- .int_level_3_2 = {.phys = ZEUS_ADDR(0x282884)},
- .int_level_3_1 = {.phys = ZEUS_ADDR(0x282888)},
- .int_level_3_0 = {.phys = ZEUS_ADDR(0x28288c)},
- .int_level_2_3 = {.phys = ZEUS_ADDR(0x282890)},
- .int_level_2_2 = {.phys = ZEUS_ADDR(0x282894)},
- .int_level_2_1 = {.phys = ZEUS_ADDR(0x282898)},
- .int_level_2_0 = {.phys = ZEUS_ADDR(0x28289c)},
- .int_level_1_3 = {.phys = ZEUS_ADDR(0x2828a0)},
- .int_level_1_2 = {.phys = ZEUS_ADDR(0x2828a4)},
- .int_level_1_1 = {.phys = ZEUS_ADDR(0x2828a8)},
- .int_level_1_0 = {.phys = ZEUS_ADDR(0x2828ac)},
- .int_level_0_3 = {.phys = ZEUS_ADDR(0x2828b0)},
- .int_level_0_2 = {.phys = ZEUS_ADDR(0x2828b4)},
- .int_level_0_1 = {.phys = ZEUS_ADDR(0x2828b8)},
- .int_level_0_0 = {.phys = ZEUS_ADDR(0x2828bc)},
- .int_docsis_en = {.phys = ZEUS_ADDR(0x2828F4)},
-
- .mips_pll_setup = {.phys = ZEUS_ADDR(0x1a0000)},
- .fs432x4b4_usb_ctl = {.phys = ZEUS_ADDR(0x1a0018)},
- .test_bus = {.phys = ZEUS_ADDR(0x1a0238)},
- .crt_spare = {.phys = ZEUS_ADDR(0x1a0090)},
- .usb2_ohci_int_mask = {.phys = ZEUS_ADDR(0x1e000c)},
- .usb2_strap = {.phys = ZEUS_ADDR(0x1e0014)},
- .ehci_hcapbase = {.phys = ZEUS_ADDR(0x1FFE00)},
- .ohci_hc_revision = {.phys = ZEUS_ADDR(0x1FFC00)},
- .bcm1_bs_lmi_steer = {.phys = ZEUS_ADDR(0x2C0008)},
- .usb2_control = {.phys = ZEUS_ADDR(0x2c01a0)},
- .usb2_stbus_obc = {.phys = ZEUS_ADDR(0x1FFF00)},
- .usb2_stbus_mess_size = {.phys = ZEUS_ADDR(0x1FFF04)},
- .usb2_stbus_chunk_size = {.phys = ZEUS_ADDR(0x1FFF08)},
-
- .pcie_regs = {.phys = ZEUS_ADDR(0x200000)},
- .tim_ch = {.phys = ZEUS_ADDR(0x282C10)},
- .tim_cl = {.phys = ZEUS_ADDR(0x282C14)},
- .gpio_dout = {.phys = ZEUS_ADDR(0x282c20)},
- .gpio_din = {.phys = ZEUS_ADDR(0x282c24)},
- .gpio_dir = {.phys = ZEUS_ADDR(0x282c2C)},
- .watchdog = {.phys = ZEUS_ADDR(0x282c30)},
- .front_panel = {.phys = ZEUS_ADDR(0x283800)},
-};
diff --git a/arch/mips/powertv/asic/asic_devices.c b/arch/mips/powertv/asic/asic_devices.c
deleted file mode 100644
index 8380605d597d..000000000000
--- a/arch/mips/powertv/asic/asic_devices.c
+++ /dev/null
@@ -1,549 +0,0 @@
-/*
- *
- * Description: Defines the platform resources for Gaia-based settops.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * NOTE: The bootloader allocates persistent memory at an address which is
- * 16 MiB below the end of the highest address in KSEG0. All fixed
- * address memory reservations must avoid this region.
- */
-
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/resource.h>
-#include <linux/serial_reg.h>
-#include <linux/io.h>
-#include <linux/bootmem.h>
-#include <linux/mm.h>
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <asm/page.h>
-#include <linux/swap.h>
-#include <linux/highmem.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/mach-powertv/asic.h>
-#include <asm/mach-powertv/asic_regs.h>
-#include <asm/mach-powertv/interrupts.h>
-
-#ifdef CONFIG_BOOTLOADER_DRIVER
-#include <asm/mach-powertv/kbldr.h>
-#endif
-#include <asm/bootinfo.h>
-
-#define BOOTLDRFAMILY(byte1, byte0) (((byte1) << 8) | (byte0))
-
-/*
- * Forward Prototypes
- */
-static void pmem_setup_resource(void);
-
-/*
- * Global Variables
- */
-enum asic_type asic;
-
-unsigned int platform_features;
-unsigned int platform_family;
-struct register_map _asic_register_map;
-EXPORT_SYMBOL(_asic_register_map); /* Exported for testing */
-unsigned long asic_phy_base;
-unsigned long asic_base;
-EXPORT_SYMBOL(asic_base); /* Exported for testing */
-struct resource *gp_resources;
-
-/*
- * Don't recommend to use it directly, it is usually used by kernel internally.
- * Portable code should be using interfaces such as ioremp, dma_map_single, etc.
- */
-unsigned long phys_to_dma_offset;
-EXPORT_SYMBOL(phys_to_dma_offset);
-
-/*
- *
- * IO Resource Definition
- *
- */
-
-struct resource asic_resource = {
- .name = "ASIC Resource",
- .start = 0,
- .end = ASIC_IO_SIZE,
- .flags = IORESOURCE_MEM,
-};
-
-/*
- * Allow override of bootloader-specified model
- * Returns zero on success, a negative errno value on failure. This parameter
- * allows overriding of the bootloader-specified model.
- */
-static char __initdata cmdline[COMMAND_LINE_SIZE];
-
-#define FORCEFAMILY_PARAM "forcefamily"
-
-/*
- * check_forcefamily - check for, and parse, forcefamily command line parameter
- * @forced_family: Pointer to two-character array in which to store the
- * value of the forcedfamily parameter, if any.
- */
-static __init int check_forcefamily(unsigned char forced_family[2])
-{
- const char *p;
-
- forced_family[0] = '\0';
- forced_family[1] = '\0';
-
- /* Check the command line for a forcefamily directive */
- strncpy(cmdline, arcs_cmdline, COMMAND_LINE_SIZE - 1);
- p = strstr(cmdline, FORCEFAMILY_PARAM);
- if (p && (p != cmdline) && (*(p - 1) != ' '))
- p = strstr(p, " " FORCEFAMILY_PARAM "=");
-
- if (p) {
- p += strlen(FORCEFAMILY_PARAM "=");
-
- if (*p == '\0' || *(p + 1) == '\0' ||
- (*(p + 2) != '\0' && *(p + 2) != ' '))
- pr_err(FORCEFAMILY_PARAM " must be exactly two "
- "characters long, ignoring value\n");
-
- else {
- forced_family[0] = *p;
- forced_family[1] = *(p + 1);
- }
- }
-
- return 0;
-}
-
-/*
- * platform_set_family - determine major platform family type.
- *
- * Returns family type; -1 if none
- * Returns the family type; -1 if none
- *
- */
-static __init noinline void platform_set_family(void)
-{
- unsigned char forced_family[2];
- unsigned short bootldr_family;
-
- if (check_forcefamily(forced_family) == 0)
- bootldr_family = BOOTLDRFAMILY(forced_family[0],
- forced_family[1]);
- else
- bootldr_family = (unsigned short) BOOTLDRFAMILY(
- CONFIG_BOOTLOADER_FAMILY[0],
- CONFIG_BOOTLOADER_FAMILY[1]);
-
- pr_info("Bootloader Family = 0x%04X\n", bootldr_family);
-
- switch (bootldr_family) {
- case BOOTLDRFAMILY('R', '1'):
- platform_family = FAMILY_1500;
- break;
- case BOOTLDRFAMILY('4', '4'):
- platform_family = FAMILY_4500;
- break;
- case BOOTLDRFAMILY('4', '6'):
- platform_family = FAMILY_4600;
- break;
- case BOOTLDRFAMILY('A', '1'):
- platform_family = FAMILY_4600VZA;
- break;
- case BOOTLDRFAMILY('8', '5'):
- platform_family = FAMILY_8500;
- break;
- case BOOTLDRFAMILY('R', '2'):
- platform_family = FAMILY_8500RNG;
- break;
- case BOOTLDRFAMILY('8', '6'):
- platform_family = FAMILY_8600;
- break;
- case BOOTLDRFAMILY('B', '1'):
- platform_family = FAMILY_8600VZB;
- break;
- case BOOTLDRFAMILY('E', '1'):
- platform_family = FAMILY_1500VZE;
- break;
- case BOOTLDRFAMILY('F', '1'):
- platform_family = FAMILY_1500VZF;
- break;
- case BOOTLDRFAMILY('8', '7'):
- platform_family = FAMILY_8700;
- break;
- default:
- platform_family = -1;
- }
-}
-
-unsigned int platform_get_family(void)
-{
- return platform_family;
-}
-EXPORT_SYMBOL(platform_get_family);
-
-/*
- * platform_get_asic - determine the ASIC type.
- *
- * Returns the ASIC type, or ASIC_UNKNOWN if unknown
- *
- */
-enum asic_type platform_get_asic(void)
-{
- return asic;
-}
-EXPORT_SYMBOL(platform_get_asic);
-
-/*
- * set_register_map - set ASIC register configuration
- * @phys_base: Physical address of the base of the ASIC registers
- * @map: Description of key ASIC registers
- */
-static void __init set_register_map(unsigned long phys_base,
- const struct register_map *map)
-{
- asic_phy_base = phys_base;
- _asic_register_map = *map;
- register_map_virtualize(&_asic_register_map);
- asic_base = (unsigned long)ioremap_nocache(phys_base, ASIC_IO_SIZE);
-}
-
-/**
- * configure_platform - configuration based on platform type.
- */
-void __init configure_platform(void)
-{
- platform_set_family();
-
- switch (platform_family) {
- case FAMILY_1500:
- case FAMILY_1500VZE:
- case FAMILY_1500VZF:
- platform_features = FFS_CAPABLE;
- asic = ASIC_CALLIOPE;
- set_register_map(CALLIOPE_IO_BASE, &calliope_register_map);
-
- if (platform_family == FAMILY_1500VZE) {
- gp_resources = non_dvr_vze_calliope_resources;
- pr_info("Platform: 1500/Vz Class E - "
- "CALLIOPE, NON_DVR_CAPABLE\n");
- } else if (platform_family == FAMILY_1500VZF) {
- gp_resources = non_dvr_vzf_calliope_resources;
- pr_info("Platform: 1500/Vz Class F - "
- "CALLIOPE, NON_DVR_CAPABLE\n");
- } else {
- gp_resources = non_dvr_calliope_resources;
- pr_info("Platform: 1500/RNG100 - CALLIOPE, "
- "NON_DVR_CAPABLE\n");
- }
- break;
-
- case FAMILY_4500:
- platform_features = FFS_CAPABLE | PCIE_CAPABLE |
- DISPLAY_CAPABLE;
- asic = ASIC_ZEUS;
- set_register_map(ZEUS_IO_BASE, &zeus_register_map);
- gp_resources = non_dvr_zeus_resources;
-
- pr_info("Platform: 4500 - ZEUS, NON_DVR_CAPABLE\n");
- break;
-
- case FAMILY_4600:
- {
- unsigned int chipversion = 0;
-
- /* The settop has PCIE but it isn't used, so don't advertise
- * it*/
- platform_features = FFS_CAPABLE | DISPLAY_CAPABLE;
-
- /* Cronus and Cronus Lite have the same register map */
- set_register_map(CRONUS_IO_BASE, &cronus_register_map);
-
- /* ASIC version will determine if this is a real CronusLite or
- * Castrati(Cronus) */
- chipversion = asic_read(chipver3) << 24;
- chipversion |= asic_read(chipver2) << 16;
- chipversion |= asic_read(chipver1) << 8;
- chipversion |= asic_read(chipver0);
-
- if ((chipversion == CRONUS_10) || (chipversion == CRONUS_11))
- asic = ASIC_CRONUS;
- else
- asic = ASIC_CRONUSLITE;
-
- gp_resources = non_dvr_cronuslite_resources;
- pr_info("Platform: 4600 - %s, NON_DVR_CAPABLE, "
- "chipversion=0x%08X\n",
- (asic == ASIC_CRONUS) ? "CRONUS" : "CRONUS LITE",
- chipversion);
- break;
- }
- case FAMILY_4600VZA:
- platform_features = FFS_CAPABLE | DISPLAY_CAPABLE;
- asic = ASIC_CRONUS;
- set_register_map(CRONUS_IO_BASE, &cronus_register_map);
- gp_resources = non_dvr_cronus_resources;
-
- pr_info("Platform: Vz Class A - CRONUS, NON_DVR_CAPABLE\n");
- break;
-
- case FAMILY_8500:
- case FAMILY_8500RNG:
- platform_features = DVR_CAPABLE | PCIE_CAPABLE |
- DISPLAY_CAPABLE;
- asic = ASIC_ZEUS;
- set_register_map(ZEUS_IO_BASE, &zeus_register_map);
- gp_resources = dvr_zeus_resources;
-
- pr_info("Platform: 8500/RNG200 - ZEUS, DVR_CAPABLE\n");
- break;
-
- case FAMILY_8600:
- case FAMILY_8600VZB:
- platform_features = DVR_CAPABLE | PCIE_CAPABLE |
- DISPLAY_CAPABLE;
- asic = ASIC_CRONUS;
- set_register_map(CRONUS_IO_BASE, &cronus_register_map);
- gp_resources = dvr_cronus_resources;
-
- pr_info("Platform: 8600/Vz Class B - CRONUS, "
- "DVR_CAPABLE\n");
- break;
-
- case FAMILY_8700:
- platform_features = FFS_CAPABLE | PCIE_CAPABLE;
- asic = ASIC_GAIA;
- set_register_map(GAIA_IO_BASE, &gaia_register_map);
- gp_resources = dvr_gaia_resources;
-
- pr_info("Platform: 8700 - GAIA, DVR_CAPABLE\n");
- break;
-
- default:
- pr_crit("Platform: UNKNOWN PLATFORM\n");
- break;
- }
-
- switch (asic) {
- case ASIC_ZEUS:
- phys_to_dma_offset = 0x30000000;
- break;
- case ASIC_CALLIOPE:
- phys_to_dma_offset = 0x10000000;
- break;
- case ASIC_CRONUSLITE:
- /* Fall through */
- case ASIC_CRONUS:
- /*
- * TODO: We suppose 0x10000000 aliases into 0x20000000-
- * 0x2XXXXXXX. If 0x10000000 aliases into 0x60000000-
- * 0x6XXXXXXX, the offset should be 0x50000000, not 0x10000000.
- */
- phys_to_dma_offset = 0x10000000;
- break;
- default:
- phys_to_dma_offset = 0x00000000;
- break;
- }
-}
-
-/*
- * RESOURCE ALLOCATION
- *
- */
-/*
- * Allocates/reserves the Platform memory resources early in the boot process.
- * This ignores any resources that are designated IORESOURCE_IO
- */
-void __init platform_alloc_bootmem(void)
-{
- int i;
- int total = 0;
-
- /* Get persistent memory data from command line before allocating
- * resources. This need to happen before normal command line parsing
- * has been done */
- pmem_setup_resource();
-
- /* Loop through looking for resources that want a particular address */
- for (i = 0; gp_resources[i].flags != 0; i++) {
- int size = resource_size(&gp_resources[i]);
- if ((gp_resources[i].start != 0) &&
- ((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
- reserve_bootmem(dma_to_phys(gp_resources[i].start),
- size, 0);
- total += resource_size(&gp_resources[i]);
- pr_info("reserve resource %s at %08x (%u bytes)\n",
- gp_resources[i].name, gp_resources[i].start,
- resource_size(&gp_resources[i]));
- }
- }
-
- /* Loop through assigning addresses for those that are left */
- for (i = 0; gp_resources[i].flags != 0; i++) {
- int size = resource_size(&gp_resources[i]);
- if ((gp_resources[i].start == 0) &&
- ((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
- void *mem = alloc_bootmem_pages(size);
-
- if (mem == NULL)
- pr_err("Unable to allocate bootmem pages "
- "for %s\n", gp_resources[i].name);
-
- else {
- gp_resources[i].start =
- phys_to_dma(virt_to_phys(mem));
- gp_resources[i].end =
- gp_resources[i].start + size - 1;
- total += size;
- pr_info("allocate resource %s at %08x "
- "(%u bytes)\n",
- gp_resources[i].name,
- gp_resources[i].start, size);
- }
- }
- }
-
- pr_info("Total Platform driver memory allocation: 0x%08x\n", total);
-
- /* indicate resources that are platform I/O related */
- for (i = 0; gp_resources[i].flags != 0; i++) {
- if ((gp_resources[i].start != 0) &&
- ((gp_resources[i].flags & IORESOURCE_IO) != 0)) {
- pr_info("reserved platform resource %s at %08x\n",
- gp_resources[i].name, gp_resources[i].start);
- }
- }
-}
-
-/*
- *
- * PERSISTENT MEMORY (PMEM) CONFIGURATION
- *
- */
-static unsigned long pmemaddr __initdata;
-
-static int __init early_param_pmemaddr(char *p)
-{
- pmemaddr = (unsigned long)simple_strtoul(p, NULL, 0);
- return 0;
-}
-early_param("pmemaddr", early_param_pmemaddr);
-
-static long pmemlen __initdata;
-
-static int __init early_param_pmemlen(char *p)
-{
-/* TODO: we can use this code when and if the bootloader ever changes this */
-#if 0
- pmemlen = (unsigned long)simple_strtoul(p, NULL, 0);
-#else
- pmemlen = 0x20000;
-#endif
- return 0;
-}
-early_param("pmemlen", early_param_pmemlen);
-
-/*
- * Set up persistent memory. If we were given values, we patch the array of
- * resources. Otherwise, persistent memory may be allocated anywhere at all.
- */
-static void __init pmem_setup_resource(void)
-{
- struct resource *resource;
- resource = asic_resource_get("DiagPersistentMemory");
-
- if (resource && pmemaddr && pmemlen) {
- /* The address provided by bootloader is in kseg0. Convert to
- * a bus address. */
- resource->start = phys_to_dma(pmemaddr - 0x80000000);
- resource->end = resource->start + pmemlen - 1;
-
- pr_info("persistent memory: start=0x%x end=0x%x\n",
- resource->start, resource->end);
- }
-}
-
-/*
- *
- * RESOURCE ACCESS FUNCTIONS
- *
- */
-
-/**
- * asic_resource_get - retrieves parameters for a platform resource.
- * @name: string to match resource
- *
- * Returns a pointer to a struct resource corresponding to the given name.
- *
- * CANNOT BE NAMED platform_resource_get, which would be the obvious choice,
- * as this function name is already declared
- */
-struct resource *asic_resource_get(const char *name)
-{
- int i;
-
- for (i = 0; gp_resources[i].flags != 0; i++) {
- if (strcmp(gp_resources[i].name, name) == 0)
- return &gp_resources[i];
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(asic_resource_get);
-
-/**
- * platform_release_memory - release pre-allocated memory
- * @ptr: pointer to memory to release
- * @size: size of resource
- *
- * This must only be called for memory allocated or reserved via the boot
- * memory allocator.
- */
-void platform_release_memory(void *ptr, int size)
-{
- free_reserved_area(ptr, ptr + size, -1, NULL);
-}
-EXPORT_SYMBOL(platform_release_memory);
-
-/*
- *
- * FEATURE AVAILABILITY FUNCTIONS
- *
- */
-int platform_supports_dvr(void)
-{
- return (platform_features & DVR_CAPABLE) != 0;
-}
-
-int platform_supports_ffs(void)
-{
- return (platform_features & FFS_CAPABLE) != 0;
-}
-
-int platform_supports_pcie(void)
-{
- return (platform_features & PCIE_CAPABLE) != 0;
-}
-
-int platform_supports_display(void)
-{
- return (platform_features & DISPLAY_CAPABLE) != 0;
-}
diff --git a/arch/mips/powertv/asic/asic_int.c b/arch/mips/powertv/asic/asic_int.c
deleted file mode 100644
index f44cd9295cae..000000000000
--- a/arch/mips/powertv/asic/asic_int.c
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000, 2001, 2004 MIPS Technologies, Inc.
- * Copyright (C) 2001 Ralf Baechle
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Routines for generic manipulation of the interrupts found on the PowerTV
- * platform.
- *
- * The interrupt controller is located in the South Bridge a PIIX4 device
- * with two internal 82C95 interrupt controllers.
- */
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/kernel.h>
-#include <linux/random.h>
-
-#include <asm/irq_cpu.h>
-#include <linux/io.h>
-#include <asm/irq_regs.h>
-#include <asm/setup.h>
-#include <asm/mips-boards/generic.h>
-
-#include <asm/mach-powertv/asic_regs.h>
-
-static DEFINE_RAW_SPINLOCK(asic_irq_lock);
-
-static inline int get_int(void)
-{
- unsigned long flags;
- int irq;
-
- raw_spin_lock_irqsave(&asic_irq_lock, flags);
-
- irq = (asic_read(int_int_scan) >> 4) - 1;
-
- if (irq == 0 || irq >= NR_IRQS)
- irq = -1;
-
- raw_spin_unlock_irqrestore(&asic_irq_lock, flags);
-
- return irq;
-}
-
-static void asic_irqdispatch(void)
-{
- int irq;
-
- irq = get_int();
- if (irq < 0)
- return; /* interrupt has already been cleared */
-
- do_IRQ(irq);
-}
-
-static inline int clz(unsigned long x)
-{
- __asm__(
- " .set push \n"
- " .set mips32 \n"
- " clz %0, %1 \n"
- " .set pop \n"
- : "=r" (x)
- : "r" (x));
-
- return x;
-}
-
-/*
- * Version of ffs that only looks at bits 12..15.
- */
-static inline unsigned int irq_ffs(unsigned int pending)
-{
- return fls(pending) - 1 + CAUSEB_IP;
-}
-
-/*
- * TODO: check how it works under EIC mode.
- */
-asmlinkage void plat_irq_dispatch(void)
-{
- unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
- int irq;
-
- irq = irq_ffs(pending);
-
- if (irq == CAUSEF_IP3)
- asic_irqdispatch();
- else if (irq >= 0)
- do_IRQ(irq);
- else
- spurious_interrupt();
-}
-
-void __init arch_init_irq(void)
-{
- int i;
-
- asic_irq_init();
-
- /*
- * Initialize interrupt exception vectors.
- */
- if (cpu_has_veic || cpu_has_vint) {
- int nvec = cpu_has_veic ? 64 : 8;
- for (i = 0; i < nvec; i++)
- set_vi_handler(i, asic_irqdispatch);
- }
-}
diff --git a/arch/mips/powertv/asic/irq_asic.c b/arch/mips/powertv/asic/irq_asic.c
deleted file mode 100644
index 9344902dc586..000000000000
--- a/arch/mips/powertv/asic/irq_asic.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Portions copyright (C) 2005-2009 Scientific Atlanta
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- *
- * Modified from arch/mips/kernel/irq-rm7000.c:
- * Copyright (C) 2003 Ralf Baechle
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/irq.h>
-
-#include <asm/irq_cpu.h>
-#include <asm/mipsregs.h>
-
-#include <asm/mach-powertv/asic_regs.h>
-
-static inline void unmask_asic_irq(struct irq_data *d)
-{
- unsigned long enable_bit;
- unsigned int irq = d->irq;
-
- enable_bit = (1 << (irq & 0x1f));
-
- switch (irq >> 5) {
- case 0:
- asic_write(asic_read(ien_int_0) | enable_bit, ien_int_0);
- break;
- case 1:
- asic_write(asic_read(ien_int_1) | enable_bit, ien_int_1);
- break;
- case 2:
- asic_write(asic_read(ien_int_2) | enable_bit, ien_int_2);
- break;
- case 3:
- asic_write(asic_read(ien_int_3) | enable_bit, ien_int_3);
- break;
- default:
- BUG();
- }
-}
-
-static inline void mask_asic_irq(struct irq_data *d)
-{
- unsigned long disable_mask;
- unsigned int irq = d->irq;
-
- disable_mask = ~(1 << (irq & 0x1f));
-
- switch (irq >> 5) {
- case 0:
- asic_write(asic_read(ien_int_0) & disable_mask, ien_int_0);
- break;
- case 1:
- asic_write(asic_read(ien_int_1) & disable_mask, ien_int_1);
- break;
- case 2:
- asic_write(asic_read(ien_int_2) & disable_mask, ien_int_2);
- break;
- case 3:
- asic_write(asic_read(ien_int_3) & disable_mask, ien_int_3);
- break;
- default:
- BUG();
- }
-}
-
-static struct irq_chip asic_irq_chip = {
- .name = "ASIC Level",
- .irq_mask = mask_asic_irq,
- .irq_unmask = unmask_asic_irq,
-};
-
-void __init asic_irq_init(void)
-{
- int i;
-
- /* set priority to 0 */
- write_c0_status(read_c0_status() & ~(0x0000fc00));
-
- asic_write(0, ien_int_0);
- asic_write(0, ien_int_1);
- asic_write(0, ien_int_2);
- asic_write(0, ien_int_3);
-
- asic_write(0x0fffffff, int_level_3_3);
- asic_write(0xffffffff, int_level_3_2);
- asic_write(0xffffffff, int_level_3_1);
- asic_write(0xffffffff, int_level_3_0);
- asic_write(0xffffffff, int_level_2_3);
- asic_write(0xffffffff, int_level_2_2);
- asic_write(0xffffffff, int_level_2_1);
- asic_write(0xffffffff, int_level_2_0);
- asic_write(0xffffffff, int_level_1_3);
- asic_write(0xffffffff, int_level_1_2);
- asic_write(0xffffffff, int_level_1_1);
- asic_write(0xffffffff, int_level_1_0);
- asic_write(0xffffffff, int_level_0_3);
- asic_write(0xffffffff, int_level_0_2);
- asic_write(0xffffffff, int_level_0_1);
- asic_write(0xffffffff, int_level_0_0);
-
- asic_write(0xf, int_int_scan);
-
- /*
- * Initialize interrupt handlers.
- */
- for (i = 0; i < NR_IRQS; i++)
- irq_set_chip_and_handler(i, &asic_irq_chip, handle_level_irq);
-}
diff --git a/arch/mips/powertv/asic/prealloc-calliope.c b/arch/mips/powertv/asic/prealloc-calliope.c
deleted file mode 100644
index 98dc51650577..000000000000
--- a/arch/mips/powertv/asic/prealloc-calliope.c
+++ /dev/null
@@ -1,385 +0,0 @@
-/*
- * Memory pre-allocations for Calliope boxes.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
- */
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <asm/mach-powertv/asic.h>
-#include "prealloc.h"
-
-/*
- * NON_DVR_CAPABLE CALLIOPE RESOURCES
- */
-struct resource non_dvr_calliope_resources[] __initdata =
-{
- /*
- * VIDEO / LX1
- */
- /* Delta-Mu 1 image (2MiB) */
- PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 1 monitor (8KiB) */
- PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 1 RAM (~36.9MiB (32MiB - (2MiB + 8KiB))) */
- PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x26700000-1,
- IORESOURCE_MEM)
-
- /*
- * Sysaudio Driver
- */
- /* DSP code and data images (1MiB) */
- PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC CPU PCM buffer (40KiB) */
- PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC AUX buffer (128KiB) */
- PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC Main buffer (128KiB) */
- PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * STAVEM driver/STAPI
- */
- /* 6MiB */
- PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * DOCSIS Subsystem
- */
- /* 7MiB */
- PREALLOC_DOCSIS("Docsis", 0x27500000, 0x27c00000-1, IORESOURCE_MEM)
-
- /*
- * GHW HAL Driver
- */
- /* PowerTV Graphics Heap (14MiB) */
- PREALLOC_NORMAL("GraphicsHeap", 0x26700000, 0x26700000+(14*1048576)-1,
- IORESOURCE_MEM)
-
- /*
- * multi com buffer area
- */
- /* 128KiB */
- PREALLOC_NORMAL("MulticomSHM", 0x23700000, 0x23720000-1,
- IORESOURCE_MEM)
-
- /*
- * DMA Ring buffer (don't need recording buffers)
- */
- /* 680KiB */
- PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Display bins buffer for unit0
- */
- /* 4KiB */
- PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * AVFS: player HAL memory
- */
- /* 945K * 3 for playback */
- PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * PMEM
- */
- /* Persistent memory for diagnostics (64KiB) */
- PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Smartcard
- */
- /* Read and write buffers for Internal/External cards (10KiB) */
- PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * NAND Flash
- */
- /* 10KiB */
- PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
- IORESOURCE_MEM)
-
- /*
- * Synopsys GMAC Memory Region
- */
- /* 64KiB */
- PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * TFTPBuffer
- *
- * This buffer is used in some minimal configurations (e.g. two-way
- * loader) for storing software images
- */
- PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Add other resources here
- */
-
- /*
- * End of Resource marker
- */
- {
- .flags = 0,
- },
-};
-
-
-struct resource non_dvr_vze_calliope_resources[] __initdata =
-{
- /*
- * VIDEO / LX1
- */
- /* Delta-Mu 1 image (2MiB) */
- PREALLOC_NORMAL("ST231aImage", 0x22000000, 0x22200000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 1 monitor (8KiB) */
- PREALLOC_NORMAL("ST231aMonitor", 0x22200000, 0x22202000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 1 RAM (10.12MiB) */
- PREALLOC_NORMAL("MediaMemory1", 0x22202000, 0x22C20B85-1,
- IORESOURCE_MEM)
-
- /*
- * Sysaudio Driver
- */
- /* DSP code and data images (1MiB) */
- PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC CPU PCM buffer (40KiB) */
- PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC AUX buffer (16KiB) */
- PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC Main buffer (16KiB) */
- PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * STAVEM driver/STAPI
- */
- /* 3.125MiB */
- PREALLOC_NORMAL("AVMEMPartition0", 0x20396000, 0x206B6000-1,
- IORESOURCE_MEM)
-
- /*
- * GHW HAL Driver
- */
- /* PowerTV Graphics Heap (2.59MiB) */
- PREALLOC_NORMAL("GraphicsHeap", 0x20100000, 0x20396000-1,
- IORESOURCE_MEM)
-
- /*
- * multi com buffer area
- */
- /* 128KiB */
- PREALLOC_NORMAL("MulticomSHM", 0x206B6000, 0x206D6000-1,
- IORESOURCE_MEM)
-
- /*
- * DMA Ring buffer (don't need recording buffers)
- */
- /* 680KiB */
- PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Display bins buffer for unit0
- */
- /* 4KiB */
- PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * PMEM
- */
- /* Persistent memory for diagnostics (64KiB) */
- PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Smartcard
- */
- /* Read and write buffers for Internal/External cards (10KiB) */
- PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * NAND Flash
- */
- /* 10KiB */
- PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
- IORESOURCE_MEM)
-
- /*
- * Synopsys GMAC Memory Region
- */
- /* 64KiB */
- PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Add other resources here
- */
-
- /*
- * End of Resource marker
- */
- {
- .flags = 0,
- },
-};
-
-struct resource non_dvr_vzf_calliope_resources[] __initdata =
-{
- /*
- * VIDEO / LX1
- */
- /* Delta-Mu 1 image (2MiB) */
- PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 1 monitor (8KiB) */
- PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 1 RAM (~19.4 (21.5MiB - (2MiB + 8KiB))) */
- PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x25580000-1,
- IORESOURCE_MEM)
-
- /*
- * Sysaudio Driver
- */
- /* DSP code and data images (1MiB) */
- PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC CPU PCM buffer (40KiB) */
- PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC AUX buffer (128KiB) */
- PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC Main buffer (128KiB) */
- PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * STAVEM driver/STAPI
- */
- /* 4.5MiB */
- PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00480000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * GHW HAL Driver
- */
- /* PowerTV Graphics Heap (14MiB) */
- PREALLOC_NORMAL("GraphicsHeap", 0x25600000, 0x25600000+(14*1048576)-1,
- IORESOURCE_MEM)
-
- /*
- * multi com buffer area
- */
- /* 128KiB */
- PREALLOC_NORMAL("MulticomSHM", 0x23700000, 0x23720000-1,
- IORESOURCE_MEM)
-
- /*
- * DMA Ring buffer (don't need recording buffers)
- */
- /* 680KiB */
- PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Display bins buffer for unit0
- */
- /* 4KiB */
- PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Display bins buffer for unit1
- */
- /* 4KiB */
- PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * AVFS: player HAL memory
- */
- /* 945K * 3 for playback */
- PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * PMEM
- */
- /* Persistent memory for diagnostics (64KiB) */
- PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Smartcard
- */
- /* Read and write buffers for Internal/External cards (10KiB) */
- PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * NAND Flash
- */
- /* 10KiB */
- PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
- IORESOURCE_MEM)
-
- /*
- * Synopsys GMAC Memory Region
- */
- /* 64KiB */
- PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Add other resources here
- */
-
- /*
- * End of Resource marker
- */
- {
- .flags = 0,
- },
-};
diff --git a/arch/mips/powertv/asic/prealloc-cronus.c b/arch/mips/powertv/asic/prealloc-cronus.c
deleted file mode 100644
index 7c6ce7596935..000000000000
--- a/arch/mips/powertv/asic/prealloc-cronus.c
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- * Memory pre-allocations for Cronus boxes.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
- */
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <asm/mach-powertv/asic.h>
-#include "prealloc.h"
-
-/*
- * DVR_CAPABLE CRONUS RESOURCES
- */
-struct resource dvr_cronus_resources[] __initdata =
-{
- /*
- * VIDEO1 / LX1
- */
- /* Delta-Mu 1 image (2MiB) */
- PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 1 monitor (8KiB) */
- PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
- PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x26000000-1,
- IORESOURCE_MEM)
-
- /*
- * VIDEO2 / LX2
- */
- /* Delta-Mu 2 image (2MiB) */
- PREALLOC_NORMAL("ST231bImage", 0x60000000, 0x60200000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 2 monitor (8KiB) */
- PREALLOC_NORMAL("ST231bMonitor", 0x60200000, 0x60202000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 2 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
- PREALLOC_NORMAL("MediaMemory2", 0x60202000, 0x62000000-1,
- IORESOURCE_MEM)
-
- /*
- * Sysaudio Driver
- */
- /* DSP code and data images (1MiB) */
- PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC CPU PCM buffer (40KiB) */
- PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC AUX buffer (128KiB) */
- PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC Main buffer (128KiB) */
- PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * STAVEM driver/STAPI
- *
- * This memory area is used for allocating buffers for Video decoding
- * purposes. Allocation/De-allocation within this buffer is managed
- * by the STAVMEM driver of the STAPI. They could be Decimated
- * Picture Buffers, Intermediate Buffers, as deemed necessary for
- * video decoding purposes, for any video decoders on Zeus.
- */
- /* 12MiB */
- PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00c00000-1,
- IORESOURCE_MEM)
-
- /*
- * DOCSIS Subsystem
- */
- /* 7MiB */
- PREALLOC_DOCSIS("Docsis", 0x67500000, 0x67c00000-1, IORESOURCE_MEM)
-
- /*
- * GHW HAL Driver
- */
- /* PowerTV Graphics Heap (14MiB) */
- PREALLOC_NORMAL("GraphicsHeap", 0x62700000, 0x63500000-1,
- IORESOURCE_MEM)
-
- /*
- * multi com buffer area
- */
- /* 128KiB */
- PREALLOC_NORMAL("MulticomSHM", 0x26000000, 0x26020000-1,
- IORESOURCE_MEM)
-
- /*
- * DMA Ring buffer
- */
- PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x002EA000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Display bins buffer for unit0
- */
- /* 4KiB */
- PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Display bins buffer for unit1
- */
- /* 4KiB */
- PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
- IORESOURCE_MEM)
-
- /*
- * ITFS
- */
- /* 815,104 bytes each for 2 ITFS partitions. */
- PREALLOC_NORMAL("ITFS", 0x00000000, 0x0018E000-1, IORESOURCE_MEM)
-
- /*
- * AVFS
- */
- /* (945K * 8) = (128K * 3) 5 playbacks / 3 server */
- PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x007c2000-1,
- IORESOURCE_MEM)
-
- /* 4KiB */
- PREALLOC_NORMAL("AvfsFileSys", 0x00000000, 0x00001000-1,
- IORESOURCE_MEM)
-
- /*
- * PMEM
- */
- /* Persistent memory for diagnostics (64KiB) */
- PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Smartcard
- */
- /* Read and write buffers for Internal/External cards (10KiB) */
- PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
- IORESOURCE_MEM)
-
- /*
- * KAVNET
- */
- /* NP Reset Vector - must be of the form xxCxxxxx (4KiB) */
- PREALLOC_NORMAL("NP_Reset_Vector", 0x27c00000, 0x27c01000-1,
- IORESOURCE_MEM)
- /* NP Image - must be video bank 1 (320KiB) */
- PREALLOC_NORMAL("NP_Image", 0x27020000, 0x27070000-1, IORESOURCE_MEM)
- /* NP IPC - must be video bank 2 (512KiB) */
- PREALLOC_NORMAL("NP_IPC", 0x63500000, 0x63580000-1, IORESOURCE_MEM)
-
- /*
- * TFTPBuffer
- *
- * This buffer is used in some minimal configurations (e.g. two-way
- * loader) for storing software images
- */
- PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Add other resources here
- */
-
- /*
- * End of Resource marker
- */
- {
- .flags = 0,
- },
-};
-
-/*
- * NON_DVR_CAPABLE CRONUS RESOURCES
- */
-struct resource non_dvr_cronus_resources[] __initdata =
-{
- /*
- * VIDEO1 / LX1
- */
- /* Delta-Mu 1 image (2MiB) */
- PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 1 monitor (8KiB) */
- PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
- PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x26000000-1,
- IORESOURCE_MEM)
-
- /*
- * VIDEO2 / LX2
- */
- /* Delta-Mu 2 image (2MiB) */
- PREALLOC_NORMAL("ST231bImage", 0x60000000, 0x60200000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 2 monitor (8KiB) */
- PREALLOC_NORMAL("ST231bMonitor", 0x60200000, 0x60202000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 2 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
- PREALLOC_NORMAL("MediaMemory2", 0x60202000, 0x62000000-1,
- IORESOURCE_MEM)
-
- /*
- * Sysaudio Driver
- */
- /* DSP code and data images (1MiB) */
- PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC CPU PCM buffer (40KiB) */
- PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC AUX buffer (128KiB) */
- PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC Main buffer (128KiB) */
- PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * STAVEM driver/STAPI
- *
- * This memory area is used for allocating buffers for Video decoding
- * purposes. Allocation/De-allocation within this buffer is managed
- * by the STAVMEM driver of the STAPI. They could be Decimated
- * Picture Buffers, Intermediate Buffers, as deemed necessary for
- * video decoding purposes, for any video decoders on Zeus.
- */
- /* 12MiB */
- PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00c00000-1,
- IORESOURCE_MEM)
-
- /*
- * DOCSIS Subsystem
- */
- /* 7MiB */
- PREALLOC_DOCSIS("Docsis", 0x67500000, 0x67c00000-1, IORESOURCE_MEM)
-
- /*
- * GHW HAL Driver
- */
- /* PowerTV Graphics Heap (14MiB) */
- PREALLOC_NORMAL("GraphicsHeap", 0x62700000, 0x63500000-1,
- IORESOURCE_MEM)
-
- /*
- * multi com buffer area
- */
- /* 128KiB */
- PREALLOC_NORMAL("MulticomSHM", 0x26000000, 0x26020000-1,
- IORESOURCE_MEM)
-
- /*
- * DMA Ring buffer (don't need recording buffers)
- */
- /* 680KiB */
- PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Display bins buffer for unit0
- */
- /* 4KiB */
- PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Display bins buffer for unit1
- */
- /* 4KiB */
- PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
- IORESOURCE_MEM)
-
- /*
- * AVFS: player HAL memory
- */
- /* 945K * 3 for playback */
- PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1, IORESOURCE_MEM)
-
- /*
- * PMEM
- */
- /* Persistent memory for diagnostics (64KiB) */
- PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Smartcard
- */
- /* Read and write buffers for Internal/External cards (10KiB) */
- PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1, IORESOURCE_MEM)
-
- /*
- * KAVNET
- */
- /* NP Reset Vector - must be of the form xxCxxxxx (4KiB) */
- PREALLOC_NORMAL("NP_Reset_Vector", 0x27c00000, 0x27c01000-1,
- IORESOURCE_MEM)
- /* NP Image - must be video bank 1 (320KiB) */
- PREALLOC_NORMAL("NP_Image", 0x27020000, 0x27070000-1, IORESOURCE_MEM)
- /* NP IPC - must be video bank 2 (512KiB) */
- PREALLOC_NORMAL("NP_IPC", 0x63500000, 0x63580000-1, IORESOURCE_MEM)
-
- /*
- * NAND Flash
- */
- /* 10KiB */
- PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
- IORESOURCE_MEM)
-
- /*
- * Add other resources here
- */
-
- /*
- * End of Resource marker
- */
- {
- .flags = 0,
- },
-};
diff --git a/arch/mips/powertv/asic/prealloc-cronuslite.c b/arch/mips/powertv/asic/prealloc-cronuslite.c
deleted file mode 100644
index a7937ba7b4c0..000000000000
--- a/arch/mips/powertv/asic/prealloc-cronuslite.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Memory pre-allocations for Cronus Lite boxes.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
- */
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <asm/mach-powertv/asic.h>
-#include "prealloc.h"
-
-/*
- * NON_DVR_CAPABLE CRONUSLITE RESOURCES
- */
-struct resource non_dvr_cronuslite_resources[] __initdata =
-{
- /*
- * VIDEO2 / LX2
- */
- /* Delta-Mu 1 image (2MiB) */
- PREALLOC_NORMAL("ST231aImage", 0x60000000, 0x60200000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 1 monitor (8KiB) */
- PREALLOC_NORMAL("ST231aMonitor", 0x60200000, 0x60202000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
- PREALLOC_NORMAL("MediaMemory1", 0x60202000, 0x62000000-1,
- IORESOURCE_MEM)
-
- /*
- * Sysaudio Driver
- */
- /* DSP code and data images (1MiB) */
- PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC CPU PCM buffer (40KiB) */
- PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC AUX buffer (128KiB) */
- PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC Main buffer (128KiB) */
- PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * STAVEM driver/STAPI
- *
- * This memory area is used for allocating buffers for Video decoding
- * purposes. Allocation/De-allocation within this buffer is managed
- * by the STAVMEM driver of the STAPI. They could be Decimated
- * Picture Buffers, Intermediate Buffers, as deemed necessary for
- * video decoding purposes, for any video decoders on Zeus.
- */
- /* 6MiB */
- PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1,
- IORESOURCE_MEM)
-
- /*
- * DOCSIS Subsystem
- */
- /* 7MiB */
- PREALLOC_DOCSIS("Docsis", 0x67500000, 0x67c00000-1, IORESOURCE_MEM)
-
- /*
- * GHW HAL Driver
- */
- /* PowerTV Graphics Heap (14MiB) */
- PREALLOC_NORMAL("GraphicsHeap", 0x62700000, 0x63500000-1,
- IORESOURCE_MEM)
-
- /*
- * multi com buffer area
- */
- /* 128KiB */
- PREALLOC_NORMAL("MulticomSHM", 0x26000000, 0x26020000-1,
- IORESOURCE_MEM)
-
- /*
- * DMA Ring buffer (don't need recording buffers)
- */
- /* 680KiB */
- PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Display bins buffer for unit0
- */
- /* 4KiB */
- PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Display bins buffer for unit1
- */
- /* 4KiB */
- PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
- IORESOURCE_MEM)
-
- /*
- * AVFS: player HAL memory
- */
- /* 945K * 3 for playback */
- PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * PMEM
- */
- /* Persistent memory for diagnostics (64KiB) */
- PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Smartcard
- */
- /* Read and write buffers for Internal/External cards (10KiB) */
- PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1, IORESOURCE_MEM)
-
- /*
- * KAVNET
- */
- /* NP Reset Vector - must be of the form xxCxxxxx (4KiB) */
- PREALLOC_NORMAL("NP_Reset_Vector", 0x27c00000, 0x27c01000-1,
- IORESOURCE_MEM)
- /* NP Image - must be video bank 1 (320KiB) */
- PREALLOC_NORMAL("NP_Image", 0x27020000, 0x27070000-1, IORESOURCE_MEM)
- /* NP IPC - must be video bank 2 (512KiB) */
- PREALLOC_NORMAL("NP_IPC", 0x63500000, 0x63580000-1, IORESOURCE_MEM)
-
- /*
- * NAND Flash
- */
- /* 10KiB */
- PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
- IORESOURCE_MEM)
-
- /*
- * TFTPBuffer
- *
- * This buffer is used in some minimal configurations (e.g. two-way
- * loader) for storing software images
- */
- PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Add other resources here
- */
-
- /*
- * End of Resource marker
- */
- {
- .flags = 0,
- },
-};
diff --git a/arch/mips/powertv/asic/prealloc-gaia.c b/arch/mips/powertv/asic/prealloc-gaia.c
deleted file mode 100644
index 2303bbfe6b82..000000000000
--- a/arch/mips/powertv/asic/prealloc-gaia.c
+++ /dev/null
@@ -1,589 +0,0 @@
-/*
- * Memory pre-allocations for Gaia boxes.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author: David VomLehn
- */
-
-#include <linux/init.h>
-#include <asm/mach-powertv/asic.h>
-
-/*
- * DVR_CAPABLE GAIA RESOURCES
- */
-struct resource dvr_gaia_resources[] __initdata = {
- /*
- *
- * VIDEO1 / LX1
- *
- */
- {
- .name = "ST231aImage", /* Delta-Mu 1 image and ram */
- .start = 0x24000000,
- .end = 0x241FFFFF, /* 2MiB */
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
- .start = 0x24200000,
- .end = 0x24201FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "MediaMemory1",
- .start = 0x24202000,
- .end = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_MEM,
- },
- /*
- *
- * VIDEO2 / LX2
- *
- */
- {
- .name = "ST231bImage", /* Delta-Mu 2 image and ram */
- .start = 0x60000000,
- .end = 0x601FFFFF, /* 2MiB */
- .flags = IORESOURCE_IO,
- },
- {
- .name = "ST231bMonitor", /* 8KiB block ST231b monitor */
- .start = 0x60200000,
- .end = 0x60201FFF,
- .flags = IORESOURCE_IO,
- },
- {
- .name = "MediaMemory2",
- .start = 0x60202000,
- .end = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_IO,
- },
- /*
- *
- * Sysaudio Driver
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * DSP_Image_Buff - DSP code and data images (1MB)
- * ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
- * ADSC_AUX_Buff - ADSC AUX buffer (16KB)
- * ADSC_Main_Buff - ADSC Main buffer (16KB)
- *
- */
- {
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- /*
- *
- * STAVEM driver/STAPI
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * This memory area is used for allocating buffers for Video decoding
- * purposes. Allocation/De-allocation within this buffer is managed
- * by the STAVMEM driver of the STAPI. They could be Decimated
- * Picture Buffers, Intermediate Buffers, as deemed necessary for
- * video decoding purposes, for any video decoders on Zeus.
- *
- */
- {
- .name = "AVMEMPartition0",
- .start = 0x63580000,
- .end = 0x64180000 - 1, /* 12 MB total */
- .flags = IORESOURCE_IO,
- },
- /*
- *
- * DOCSIS Subsystem
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
- */
- {
- .name = "Docsis",
- .start = 0x62000000,
- .end = 0x62700000 - 1, /* 7 MB total */
- .flags = IORESOURCE_IO,
- },
- /*
- *
- * GHW HAL Driver
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * GraphicsHeap - PowerTV Graphics Heap
- *
- */
- {
- .name = "GraphicsHeap",
- .start = 0x62700000,
- .end = 0x63500000 - 1, /* 14 MB total */
- .flags = IORESOURCE_IO,
- },
- /*
- *
- * multi com buffer area
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
- */
- {
- .name = "MulticomSHM",
- .start = 0x26000000,
- .end = 0x26020000 - 1,
- .flags = IORESOURCE_MEM,
- },
- /*
- *
- * DMA Ring buffer
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
- */
- {
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x00280000 - 1,
- .flags = IORESOURCE_MEM,
- },
- /*
- *
- * Display bins buffer for unit0
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Display Bins for unit0
- *
- */
- {
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
- },
- /*
- *
- * Display bins buffer
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Display Bins for unit1
- *
- */
- {
- .name = "DisplayBins1",
- .start = 0x64AD4000,
- .end = 0x64AD5000 - 1, /* 4 KB total */
- .flags = IORESOURCE_IO,
- },
- /*
- *
- * ITFS
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
- */
- {
- .name = "ITFS",
- .start = 0x64180000,
- /* 815,104 bytes each for 2 ITFS partitions. */
- .end = 0x6430DFFF,
- .flags = IORESOURCE_IO,
- },
- /*
- *
- * AVFS
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
- */
- {
- .name = "AvfsDmaMem",
- .start = 0x6430E000,
- /* (945K * 8) = (128K *3) 5 playbacks / 3 server */
- .end = 0x64AD0000 - 1,
- .flags = IORESOURCE_IO,
- },
- {
- .name = "AvfsFileSys",
- .start = 0x64AD0000,
- .end = 0x64AD1000 - 1, /* 4K */
- .flags = IORESOURCE_IO,
- },
- /*
- *
- * Smartcard
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Read and write buffers for Internal/External cards
- *
- */
- {
- .name = "SmartCardInfo",
- .start = 0x64AD1000,
- .end = 0x64AD3800 - 1,
- .flags = IORESOURCE_IO,
- },
- /*
- *
- * KAVNET
- * NP Reset Vector - must be of the form xxCxxxxx
- * NP Image - must be video bank 1
- * NP IPC - must be video bank 2
- */
- {
- .name = "NP_Reset_Vector",
- .start = 0x27c00000,
- .end = 0x27c01000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "NP_Image",
- .start = 0x27020000,
- .end = 0x27060000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "NP_IPC",
- .start = 0x63500000,
- .end = 0x63580000 - 1,
- .flags = IORESOURCE_IO,
- },
- /*
- * Add other resources here
- */
- { },
-};
-
-/*
- * NON_DVR_CAPABLE GAIA RESOURCES
- */
-struct resource non_dvr_gaia_resources[] __initdata = {
- /*
- *
- * VIDEO1 / LX1
- *
- */
- {
- .name = "ST231aImage", /* Delta-Mu 1 image and ram */
- .start = 0x24000000,
- .end = 0x241FFFFF, /* 2MiB */
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
- .start = 0x24200000,
- .end = 0x24201FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "MediaMemory1",
- .start = 0x24202000,
- .end = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_MEM,
- },
- /*
- *
- * VIDEO2 / LX2
- *
- */
- {
- .name = "ST231bImage", /* Delta-Mu 2 image and ram */
- .start = 0x60000000,
- .end = 0x601FFFFF, /* 2MiB */
- .flags = IORESOURCE_IO,
- },
- {
- .name = "ST231bMonitor", /* 8KiB block ST231b monitor */
- .start = 0x60200000,
- .end = 0x60201FFF,
- .flags = IORESOURCE_IO,
- },
- {
- .name = "MediaMemory2",
- .start = 0x60202000,
- .end = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_IO,
- },
- /*
- *
- * Sysaudio Driver
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * DSP_Image_Buff - DSP code and data images (1MB)
- * ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
- * ADSC_AUX_Buff - ADSC AUX buffer (16KB)
- * ADSC_Main_Buff - ADSC Main buffer (16KB)
- *
- */
- {
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- /*
- *
- * STAVEM driver/STAPI
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * This memory area is used for allocating buffers for Video decoding
- * purposes. Allocation/De-allocation within this buffer is managed
- * by the STAVMEM driver of the STAPI. They could be Decimated
- * Picture Buffers, Intermediate Buffers, as deemed necessary for
- * video decoding purposes, for any video decoders on Zeus.
- *
- */
- {
- .name = "AVMEMPartition0",
- .start = 0x63580000,
- .end = 0x64180000 - 1, /* 12 MB total */
- .flags = IORESOURCE_IO,
- },
- /*
- *
- * DOCSIS Subsystem
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
- */
- {
- .name = "Docsis",
- .start = 0x62000000,
- .end = 0x62700000 - 1, /* 7 MB total */
- .flags = IORESOURCE_IO,
- },
- /*
- *
- * GHW HAL Driver
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * GraphicsHeap - PowerTV Graphics Heap
- *
- */
- {
- .name = "GraphicsHeap",
- .start = 0x62700000,
- .end = 0x63500000 - 1, /* 14 MB total */
- .flags = IORESOURCE_IO,
- },
- /*
- *
- * multi com buffer area
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
- */
- {
- .name = "MulticomSHM",
- .start = 0x26000000,
- .end = 0x26020000 - 1,
- .flags = IORESOURCE_MEM,
- },
- /*
- *
- * DMA Ring buffer
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
- */
- {
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x000AA000 - 1,
- .flags = IORESOURCE_MEM,
- },
- /*
- *
- * Display bins buffer for unit0
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Display Bins for unit0
- *
- */
- {
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
- },
- /*
- *
- * Display bins buffer
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Display Bins for unit1
- *
- */
- {
- .name = "DisplayBins1",
- .start = 0x64AD4000,
- .end = 0x64AD5000 - 1, /* 4 KB total */
- .flags = IORESOURCE_IO,
- },
- /*
- *
- * AVFS: player HAL memory
- *
- *
- */
- {
- .name = "AvfsDmaMem",
- .start = 0x6430E000,
- .end = 0x645D2C00 - 1, /* 945K * 3 for playback */
- .flags = IORESOURCE_IO,
- },
- /*
- *
- * PMEM
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Persistent memory for diagnostics.
- *
- */
- {
- .name = "DiagPersistentMemory",
- .start = 0x00000000,
- .end = 0x10000 - 1,
- .flags = IORESOURCE_MEM,
- },
- /*
- *
- * Smartcard
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Read and write buffers for Internal/External cards
- *
- */
- {
- .name = "SmartCardInfo",
- .start = 0x64AD1000,
- .end = 0x64AD3800 - 1,
- .flags = IORESOURCE_IO,
- },
- /*
- *
- * KAVNET
- * NP Reset Vector - must be of the form xxCxxxxx
- * NP Image - must be video bank 1
- * NP IPC - must be video bank 2
- */
- {
- .name = "NP_Reset_Vector",
- .start = 0x27c00000,
- .end = 0x27c01000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "NP_Image",
- .start = 0x27020000,
- .end = 0x27060000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "NP_IPC",
- .start = 0x63500000,
- .end = 0x63580000 - 1,
- .flags = IORESOURCE_IO,
- },
- { },
-};
diff --git a/arch/mips/powertv/asic/prealloc-zeus.c b/arch/mips/powertv/asic/prealloc-zeus.c
deleted file mode 100644
index 6e76f09c68d6..000000000000
--- a/arch/mips/powertv/asic/prealloc-zeus.c
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * Memory pre-allocations for Zeus boxes.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
- */
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <asm/mach-powertv/asic.h>
-#include "prealloc.h"
-
-/*
- * DVR_CAPABLE RESOURCES
- */
-struct resource dvr_zeus_resources[] __initdata =
-{
- /*
- * VIDEO1 / LX1
- */
- /* Delta-Mu 1 image (2MiB) */
- PREALLOC_NORMAL("ST231aImage", 0x20000000, 0x20200000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 1 monitor (8KiB) */
- PREALLOC_NORMAL("ST231aMonitor", 0x20200000, 0x20202000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
- PREALLOC_NORMAL("MediaMemory1", 0x20202000, 0x22000000-1,
- IORESOURCE_MEM)
-
- /*
- * VIDEO2 / LX2
- */
- /* Delta-Mu 2 image (2MiB) */
- PREALLOC_NORMAL("ST231bImage", 0x30000000, 0x30200000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 2 monitor (8KiB) */
- PREALLOC_NORMAL("ST231bMonitor", 0x30200000, 0x30202000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 2 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
- PREALLOC_NORMAL("MediaMemory2", 0x30202000, 0x32000000-1,
- IORESOURCE_MEM)
-
- /*
- * Sysaudio Driver
- */
- /* DSP code and data images (1MiB) */
- PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC CPU PCM buffer (40KiB) */
- PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC AUX buffer (16KiB) */
- PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC Main buffer (16KiB) */
- PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * STAVEM driver/STAPI
- *
- * This memory area is used for allocating buffers for Video decoding
- * purposes. Allocation/De-allocation within this buffer is managed
- * by the STAVMEM driver of the STAPI. They could be Decimated
- * Picture Buffers, Intermediate Buffers, as deemed necessary for
- * video decoding purposes, for any video decoders on Zeus.
- */
- /* 12MiB */
- PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00c00000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * DOCSIS Subsystem
- */
- /* 7MiB */
- PREALLOC_DOCSIS("Docsis", 0x40100000, 0x40800000-1, IORESOURCE_MEM)
-
- /*
- * GHW HAL Driver
- */
- /* PowerTV Graphics Heap (14MiB) */
- PREALLOC_NORMAL("GraphicsHeap", 0x46900000, 0x47700000-1,
- IORESOURCE_MEM)
-
- /*
- * multi com buffer area
- */
- /* 128KiB */
- PREALLOC_NORMAL("MulticomSHM", 0x47900000, 0x47920000-1,
- IORESOURCE_MEM)
-
- /*
- * DMA Ring buffer
- */
- /* 2.5MiB */
- PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x00280000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Display bins buffer for unit0
- */
- /* 4KiB */
- PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Display bins buffer for unit1
- */
- /* 4KiB */
- PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * ITFS
- */
- /* 815,104 bytes each for 2 ITFS partitions. */
- PREALLOC_NORMAL("ITFS", 0x00000000, 0x0018E000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * AVFS
- */
- /* (945K * 8) = (128K * 3) 5 playbacks / 3 server */
- PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x007c2000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* 4KiB */
- PREALLOC_NORMAL("AvfsFileSys", 0x00000000, 0x00001000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * PMEM
- */
- /* Persistent memory for diagnostics (64KiB) */
- PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Smartcard
- */
- /* Read and write buffers for Internal/External cards (10KiB) */
- PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * TFTPBuffer
- *
- * This buffer is used in some minimal configurations (e.g. two-way
- * loader) for storing software images
- */
- PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Add other resources here
- */
-
- /*
- * End of Resource marker
- */
- {
- .flags = 0,
- },
-};
-
-/*
- * NON_DVR_CAPABLE ZEUS RESOURCES
- */
-struct resource non_dvr_zeus_resources[] __initdata =
-{
- /*
- * VIDEO1 / LX1
- */
- /* Delta-Mu 1 image (2MiB) */
- PREALLOC_NORMAL("ST231aImage", 0x20000000, 0x20200000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 1 monitor (8KiB) */
- PREALLOC_NORMAL("ST231aMonitor", 0x20200000, 0x20202000-1,
- IORESOURCE_MEM)
- /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
- PREALLOC_NORMAL("MediaMemory1", 0x20202000, 0x22000000-1,
- IORESOURCE_MEM)
-
- /*
- * Sysaudio Driver
- */
- /* DSP code and data images (1MiB) */
- PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC CPU PCM buffer (40KiB) */
- PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC AUX buffer (16KiB) */
- PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
- /* ADSC Main buffer (16KiB) */
- PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * STAVEM driver/STAPI
- */
- /* 6MiB */
- PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * DOCSIS Subsystem
- */
- /* 7MiB */
- PREALLOC_DOCSIS("Docsis", 0x40100000, 0x40800000-1, IORESOURCE_MEM)
-
- /*
- * GHW HAL Driver
- */
- /* PowerTV Graphics Heap (14MiB) */
- PREALLOC_NORMAL("GraphicsHeap", 0x46900000, 0x47700000-1,
- IORESOURCE_MEM)
-
- /*
- * multi com buffer area
- */
- /* 128KiB */
- PREALLOC_NORMAL("MulticomSHM", 0x47900000, 0x47920000-1,
- IORESOURCE_MEM)
-
- /*
- * DMA Ring buffer
- */
- /* 2.5MiB */
- PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x00280000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Display bins buffer for unit0
- */
- /* 4KiB */
- PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * AVFS: player HAL memory
- */
- /* 945K * 3 for playback */
- PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * PMEM
- */
- /* Persistent memory for diagnostics (64KiB) */
- PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Smartcard
- */
- /* Read and write buffers for Internal/External cards (10KiB) */
- PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * NAND Flash
- */
- /* 10KiB */
- PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
- IORESOURCE_MEM)
-
- /*
- * TFTPBuffer
- *
- * This buffer is used in some minimal configurations (e.g. two-way
- * loader) for storing software images
- */
- PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
- (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
- /*
- * Add other resources here
- */
-
- /*
- * End of Resource marker
- */
- {
- .flags = 0,
- },
-};
diff --git a/arch/mips/powertv/asic/prealloc.h b/arch/mips/powertv/asic/prealloc.h
deleted file mode 100644
index 8e682df17856..000000000000
--- a/arch/mips/powertv/asic/prealloc.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Definitions for memory preallocations
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef _ARCH_MIPS_POWERTV_ASIC_PREALLOC_H
-#define _ARCH_MIPS_POWERTV_ASIC_PREALLOC_H
-
-#define KIBIBYTE(n) ((n) * 1024) /* Number of kibibytes */
-#define MEBIBYTE(n) ((n) * KIBIBYTE(1024)) /* Number of mebibytes */
-
-/* "struct resource" array element definition */
-#define PREALLOC(NAME, START, END, FLAGS) { \
- .name = (NAME), \
- .start = (START), \
- .end = (END), \
- .flags = (FLAGS) \
- },
-
-/* Individual resources in the preallocated resource arrays are defined using
- * macros. These macros are conditionally defined based on their
- * corresponding kernel configuration flag:
- * - CONFIG_PREALLOC_NORMAL: preallocate resources for a normal settop box
- * - CONFIG_PREALLOC_TFTP: preallocate the TFTP download resource
- * - CONFIG_PREALLOC_DOCSIS: preallocate the DOCSIS resource
- * - CONFIG_PREALLOC_PMEM: reserve space for persistent memory
- */
-#ifdef CONFIG_PREALLOC_NORMAL
-#define PREALLOC_NORMAL(name, start, end, flags) \
- PREALLOC(name, start, end, flags)
-#else
-#define PREALLOC_NORMAL(name, start, end, flags)
-#endif
-
-#ifdef CONFIG_PREALLOC_TFTP
-#define PREALLOC_TFTP(name, start, end, flags) \
- PREALLOC(name, start, end, flags)
-#else
-#define PREALLOC_TFTP(name, start, end, flags)
-#endif
-
-#ifdef CONFIG_PREALLOC_DOCSIS
-#define PREALLOC_DOCSIS(name, start, end, flags) \
- PREALLOC(name, start, end, flags)
-#else
-#define PREALLOC_DOCSIS(name, start, end, flags)
-#endif
-
-#ifdef CONFIG_PREALLOC_PMEM
-#define PREALLOC_PMEM(name, start, end, flags) \
- PREALLOC(name, start, end, flags)
-#else
-#define PREALLOC_PMEM(name, start, end, flags)
-#endif
-#endif
diff --git a/arch/mips/powertv/init.c b/arch/mips/powertv/init.c
deleted file mode 100644
index 498926377e51..000000000000
--- a/arch/mips/powertv/init.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
- * All rights reserved.
- * Authors: Carsten Langgaard <carstenl@mips.com>
- * Maciej W. Rozycki <macro@mips.com>
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * PROM library initialisation code.
- */
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-
-#include <asm/bootinfo.h>
-#include <linux/io.h>
-#include <asm/cacheflush.h>
-#include <asm/traps.h>
-
-#include <asm/mips-boards/generic.h>
-#include <asm/mach-powertv/asic.h>
-
-#include "init.h"
-
-static int *_prom_envp;
-unsigned long _prom_memsize;
-
-/*
- * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
- * This macro take care of sign extension, if running in 64-bit mode.
- */
-#define prom_envp(index) ((char *)(long)_prom_envp[(index)])
-
-char *prom_getenv(char *envname)
-{
- char *result = NULL;
-
- if (_prom_envp != NULL) {
- /*
- * Return a pointer to the given environment variable.
- * In 64-bit mode: we're using 64-bit pointers, but all pointers
- * in the PROM structures are only 32-bit, so we need some
- * workarounds, if we are running in 64-bit mode.
- */
- int i, index = 0;
-
- i = strlen(envname);
-
- while (prom_envp(index)) {
- if (strncmp(envname, prom_envp(index), i) == 0) {
- result = prom_envp(index + 1);
- break;
- }
- index += 2;
- }
- }
-
- return result;
-}
-
-void __init prom_init(void)
-{
- int prom_argc;
- char *prom_argv;
-
- prom_argc = fw_arg0;
- prom_argv = (char *) fw_arg1;
- _prom_envp = (int *) fw_arg2;
- _prom_memsize = (unsigned long) fw_arg3;
-
- if (prom_argc == 1) {
- strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
- strlcat(arcs_cmdline, prom_argv, COMMAND_LINE_SIZE);
- }
-
- configure_platform();
- prom_meminit();
-}
diff --git a/arch/mips/powertv/init.h b/arch/mips/powertv/init.h
deleted file mode 100644
index c1a8bd0dbe4b..000000000000
--- a/arch/mips/powertv/init.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Definitions from powertv init.c file
- *
- * Copyright (C) 2009 Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author: David VomLehn
- */
-
-#ifndef _POWERTV_INIT_H
-#define _POWERTV_INIT_H
-extern unsigned long _prom_memsize;
-extern void prom_meminit(void);
-extern char *prom_getenv(char *name);
-#endif
diff --git a/arch/mips/powertv/ioremap.c b/arch/mips/powertv/ioremap.c
deleted file mode 100644
index d060478aab03..000000000000
--- a/arch/mips/powertv/ioremap.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * ioremap.c
- *
- * Support for mapping between dma_addr_t values a phys_addr_t values.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author: David VomLehn <dvomlehn@cisco.com>
- *
- * Description: Defines the platform resources for the SA settop.
- *
- * NOTE: The bootloader allocates persistent memory at an address which is
- * 16 MiB below the end of the highest address in KSEG0. All fixed
- * address memory reservations must avoid this region.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <asm/mach-powertv/ioremap.h>
-
-/*
- * Define the sizes of and masks for grains in physical and DMA space. The
- * values are the same but the types are not.
- */
-#define IOR_PHYS_GRAIN ((phys_addr_t) 1 << IOR_LSBITS)
-#define IOR_PHYS_GRAIN_MASK (IOR_PHYS_GRAIN - 1)
-
-#define IOR_DMA_GRAIN ((dma_addr_t) 1 << IOR_LSBITS)
-#define IOR_DMA_GRAIN_MASK (IOR_DMA_GRAIN - 1)
-
-/*
- * Values that, when accessed by an index derived from a phys_addr_t and
- * added to phys_addr_t value, yield a DMA address
- */
-struct ior_phys_to_dma _ior_phys_to_dma[IOR_NUM_PHYS_TO_DMA];
-EXPORT_SYMBOL(_ior_phys_to_dma);
-
-/*
- * Values that, when accessed by an index derived from a dma_addr_t and
- * added to that dma_addr_t value, yield a physical address
- */
-struct ior_dma_to_phys _ior_dma_to_phys[IOR_NUM_DMA_TO_PHYS];
-EXPORT_SYMBOL(_ior_dma_to_phys);
-
-/**
- * setup_dma_to_phys - set up conversion from DMA to physical addresses
- * @dma_idx: Top IOR_LSBITS bits of the DMA address, i.e. an index
- * into the array _dma_to_phys.
- * @delta: Value that, when added to the DMA address, will yield the
- * physical address
- * @s: Number of bytes in the section of memory with the given delta
- * between DMA and physical addresses.
- */
-static void setup_dma_to_phys(dma_addr_t dma, phys_addr_t delta, dma_addr_t s)
-{
- int dma_idx, first_idx, last_idx;
- phys_addr_t first, last;
-
- /*
- * Calculate the first and last indices, rounding the first up and
- * the second down.
- */
- first = dma & ~IOR_DMA_GRAIN_MASK;
- last = (dma + s - 1) & ~IOR_DMA_GRAIN_MASK;
- first_idx = first >> IOR_LSBITS; /* Convert to indices */
- last_idx = last >> IOR_LSBITS;
-
- for (dma_idx = first_idx; dma_idx <= last_idx; dma_idx++)
- _ior_dma_to_phys[dma_idx].offset = delta >> IOR_DMA_SHIFT;
-}
-
-/**
- * setup_phys_to_dma - set up conversion from DMA to physical addresses
- * @phys_idx: Top IOR_LSBITS bits of the DMA address, i.e. an index
- * into the array _phys_to_dma.
- * @delta: Value that, when added to the DMA address, will yield the
- * physical address
- * @s: Number of bytes in the section of memory with the given delta
- * between DMA and physical addresses.
- */
-static void setup_phys_to_dma(phys_addr_t phys, dma_addr_t delta, phys_addr_t s)
-{
- int phys_idx, first_idx, last_idx;
- phys_addr_t first, last;
-
- /*
- * Calculate the first and last indices, rounding the first up and
- * the second down.
- */
- first = phys & ~IOR_PHYS_GRAIN_MASK;
- last = (phys + s - 1) & ~IOR_PHYS_GRAIN_MASK;
- first_idx = first >> IOR_LSBITS; /* Convert to indices */
- last_idx = last >> IOR_LSBITS;
-
- for (phys_idx = first_idx; phys_idx <= last_idx; phys_idx++)
- _ior_phys_to_dma[phys_idx].offset = delta >> IOR_PHYS_SHIFT;
-}
-
-/**
- * ioremap_add_map - add to the physical and DMA address conversion arrays
- * @phys: Process's view of the address of the start of the memory chunk
- * @dma: DMA address of the start of the memory chunk
- * @size: Size, in bytes, of the chunk of memory
- *
- * NOTE: It might be obvious, but the assumption is that all @size bytes have
- * the same offset between the physical address and the DMA address.
- */
-void ioremap_add_map(phys_addr_t phys, phys_addr_t dma, phys_addr_t size)
-{
- if (size == 0)
- return;
-
- if ((dma & IOR_DMA_GRAIN_MASK) != 0 ||
- (phys & IOR_PHYS_GRAIN_MASK) != 0 ||
- (size & IOR_PHYS_GRAIN_MASK) != 0)
- pr_crit("Memory allocation must be in chunks of 0x%x bytes\n",
- IOR_PHYS_GRAIN);
-
- setup_dma_to_phys(dma, phys - dma, size);
- setup_phys_to_dma(phys, dma - phys, size);
-}
diff --git a/arch/mips/powertv/memory.c b/arch/mips/powertv/memory.c
deleted file mode 100644
index bc2f3ca22b41..000000000000
--- a/arch/mips/powertv/memory.c
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Apparently originally from arch/mips/malta-memory.c. Modified to work
- * with the PowerTV bootloader.
- */
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/bootmem.h>
-#include <linux/pfn.h>
-#include <linux/string.h>
-
-#include <asm/bootinfo.h>
-#include <asm/page.h>
-#include <asm/sections.h>
-
-#include <asm/mach-powertv/asic.h>
-#include <asm/mach-powertv/ioremap.h>
-
-#include "init.h"
-
-/* Memory constants */
-#define KIBIBYTE(n) ((n) * 1024) /* Number of kibibytes */
-#define MEBIBYTE(n) ((n) * KIBIBYTE(1024)) /* Number of mebibytes */
-#define DEFAULT_MEMSIZE MEBIBYTE(128) /* If no memsize provided */
-
-#define BLDR_SIZE KIBIBYTE(256) /* Memory reserved for bldr */
-#define RV_SIZE MEBIBYTE(4) /* Size of reset vector */
-
-#define LOW_MEM_END 0x20000000 /* Highest low memory address */
-#define BLDR_ALIAS 0x10000000 /* Bootloader address */
-#define RV_PHYS 0x1fc00000 /* Reset vector address */
-#define LOW_RAM_END RV_PHYS /* End of real RAM in low mem */
-
-/*
- * Very low-level conversion from processor physical address to device
- * DMA address for the first bank of memory.
- */
-#define PHYS_TO_DMA(paddr) ((paddr) + (CONFIG_LOW_RAM_DMA - LOW_RAM_ALIAS))
-
-unsigned long ptv_memsize;
-
-/*
- * struct low_mem_reserved - Items in low memory that are reserved
- * @start: Physical address of item
- * @size: Size, in bytes, of this item
- * @is_aliased: True if this is RAM aliased from another location. If false,
- * it is something other than aliased RAM and the RAM in the
- * unaliased address is still visible outside of low memory.
- */
-struct low_mem_reserved {
- phys_addr_t start;
- phys_addr_t size;
- bool is_aliased;
-};
-
-/*
- * Must be in ascending address order
- */
-struct low_mem_reserved low_mem_reserved[] = {
- {BLDR_ALIAS, BLDR_SIZE, true}, /* Bootloader RAM */
- {RV_PHYS, RV_SIZE, false}, /* Reset vector */
-};
-
-/*
- * struct mem_layout - layout of a piece of the system RAM
- * @phys: Physical address of the start of this piece of RAM. This is the
- * address at which both the processor and I/O devices see the
- * RAM.
- * @alias: Alias of this piece of memory in order to make it appear in
- * the low memory part of the processor's address space. I/O
- * devices don't see anything here.
- * @size: Size, in bytes, of this piece of RAM
- */
-struct mem_layout {
- phys_addr_t phys;
- phys_addr_t alias;
- phys_addr_t size;
-};
-
-/*
- * struct mem_layout_list - list descriptor for layouts of system RAM pieces
- * @family: Specifies the family being described
- * @n: Number of &struct mem_layout elements
- * @layout: Pointer to the list of &mem_layout structures
- */
-struct mem_layout_list {
- enum family_type family;
- size_t n;
- struct mem_layout *layout;
-};
-
-static struct mem_layout f1500_layout[] = {
- {0x20000000, 0x10000000, MEBIBYTE(256)},
-};
-
-static struct mem_layout f4500_layout[] = {
- {0x40000000, 0x10000000, MEBIBYTE(256)},
- {0x20000000, 0x20000000, MEBIBYTE(32)},
-};
-
-static struct mem_layout f8500_layout[] = {
- {0x40000000, 0x10000000, MEBIBYTE(256)},
- {0x20000000, 0x20000000, MEBIBYTE(32)},
- {0x30000000, 0x30000000, MEBIBYTE(32)},
-};
-
-static struct mem_layout fx600_layout[] = {
- {0x20000000, 0x10000000, MEBIBYTE(256)},
- {0x60000000, 0x60000000, MEBIBYTE(128)},
-};
-
-static struct mem_layout_list layout_list[] = {
- {FAMILY_1500, ARRAY_SIZE(f1500_layout), f1500_layout},
- {FAMILY_1500VZE, ARRAY_SIZE(f1500_layout), f1500_layout},
- {FAMILY_1500VZF, ARRAY_SIZE(f1500_layout), f1500_layout},
- {FAMILY_4500, ARRAY_SIZE(f4500_layout), f4500_layout},
- {FAMILY_8500, ARRAY_SIZE(f8500_layout), f8500_layout},
- {FAMILY_8500RNG, ARRAY_SIZE(f8500_layout), f8500_layout},
- {FAMILY_4600, ARRAY_SIZE(fx600_layout), fx600_layout},
- {FAMILY_4600VZA, ARRAY_SIZE(fx600_layout), fx600_layout},
- {FAMILY_8600, ARRAY_SIZE(fx600_layout), fx600_layout},
- {FAMILY_8600VZB, ARRAY_SIZE(fx600_layout), fx600_layout},
-};
-
-/* If we can't determine the layout, use this */
-static struct mem_layout default_layout[] = {
- {0x20000000, 0x10000000, MEBIBYTE(128)},
-};
-
-/**
- * register_non_ram - register low memory not available for RAM usage
- */
-static __init void register_non_ram(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(low_mem_reserved); i++)
- add_memory_region(low_mem_reserved[i].start,
- low_mem_reserved[i].size, BOOT_MEM_RESERVED);
-}
-
-/**
- * get_memsize - get the size of memory as a single bank
- */
-static phys_addr_t get_memsize(void)
-{
- static char cmdline[COMMAND_LINE_SIZE] __initdata;
- phys_addr_t memsize = 0;
- char *memsize_str;
- char *ptr;
-
- /* Check the command line first for a memsize directive */
- strcpy(cmdline, arcs_cmdline);
- ptr = strstr(cmdline, "memsize=");
- if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' '))
- ptr = strstr(ptr, " memsize=");
-
- if (ptr) {
- memsize = memparse(ptr + 8, &ptr);
- } else {
- /* otherwise look in the environment */
- memsize_str = prom_getenv("memsize");
-
- if (memsize_str != NULL) {
- pr_info("prom memsize = %s\n", memsize_str);
- memsize = simple_strtol(memsize_str, NULL, 0);
- }
-
- if (memsize == 0) {
- if (_prom_memsize != 0) {
- memsize = _prom_memsize;
- pr_info("_prom_memsize = 0x%x\n", memsize);
- /* add in memory that the bootloader doesn't
- * report */
- memsize += BLDR_SIZE;
- } else {
- memsize = DEFAULT_MEMSIZE;
- pr_info("Memsize not passed by bootloader, "
- "defaulting to 0x%x\n", memsize);
- }
- }
- }
-
- return memsize;
-}
-
-/**
- * register_low_ram - register an aliased section of RAM
- * @p: Alias address of memory
- * @n: Number of bytes in this section of memory
- *
- * Returns the number of bytes registered
- *
- */
-static __init phys_addr_t register_low_ram(phys_addr_t p, phys_addr_t n)
-{
- phys_addr_t s;
- int i;
- phys_addr_t orig_n;
-
- orig_n = n;
-
- BUG_ON(p + n > RV_PHYS);
-
- for (i = 0; n != 0 && i < ARRAY_SIZE(low_mem_reserved); i++) {
- phys_addr_t start;
- phys_addr_t size;
-
- start = low_mem_reserved[i].start;
- size = low_mem_reserved[i].size;
-
- /* Handle memory before this low memory section */
- if (p < start) {
- phys_addr_t s;
- s = min(n, start - p);
- add_memory_region(p, s, BOOT_MEM_RAM);
- p += s;
- n -= s;
- }
-
- /* Handle the low memory section itself. If it's aliased,
- * we reduce the number of byes left, but if not, the RAM
- * is available elsewhere and we don't reduce the number of
- * bytes remaining. */
- if (p == start) {
- if (low_mem_reserved[i].is_aliased) {
- s = min(n, size);
- n -= s;
- p += s;
- } else
- p += n;
- }
- }
-
- return orig_n - n;
-}
-
-/*
- * register_ram - register real RAM
- * @p: Address of memory as seen by devices
- * @alias: If the memory is seen at an additional address by the processor,
- * this will be the address, otherwise it is the same as @p.
- * @n: Number of bytes in this section of memory
- */
-static __init void register_ram(phys_addr_t p, phys_addr_t alias,
- phys_addr_t n)
-{
- /*
- * If some or all of this memory has an alias, break it into the
- * aliased and non-aliased portion.
- */
- if (p != alias) {
- phys_addr_t alias_size;
- phys_addr_t registered;
-
- alias_size = min(n, LOW_RAM_END - alias);
- registered = register_low_ram(alias, alias_size);
- ioremap_add_map(alias, p, n);
- n -= registered;
- p += registered;
- }
-
-#ifdef CONFIG_HIGHMEM
- if (n != 0) {
- add_memory_region(p, n, BOOT_MEM_RAM);
- ioremap_add_map(p, p, n);
- }
-#endif
-}
-
-/**
- * register_address_space - register things in the address space
- * @memsize: Number of bytes of RAM installed
- *
- * Takes the given number of bytes of RAM and registers as many of the regions,
- * or partial regions, as it can. So, the default configuration might have
- * two regions with 256 MiB each. If the memsize passed in on the command line
- * is 384 MiB, it will register the first region with 256 MiB and the second
- * with 128 MiB.
- */
-static __init void register_address_space(phys_addr_t memsize)
-{
- int i;
- phys_addr_t size;
- size_t n;
- struct mem_layout *layout;
- enum family_type family;
-
- /*
- * Register all of the things that aren't available to the kernel as
- * memory.
- */
- register_non_ram();
-
- /* Find the appropriate memory description */
- family = platform_get_family();
-
- for (i = 0; i < ARRAY_SIZE(layout_list); i++) {
- if (layout_list[i].family == family)
- break;
- }
-
- if (i == ARRAY_SIZE(layout_list)) {
- n = ARRAY_SIZE(default_layout);
- layout = default_layout;
- } else {
- n = layout_list[i].n;
- layout = layout_list[i].layout;
- }
-
- for (i = 0; memsize != 0 && i < n; i++) {
- size = min(memsize, layout[i].size);
- register_ram(layout[i].phys, layout[i].alias, size);
- memsize -= size;
- }
-}
-
-void __init prom_meminit(void)
-{
- ptv_memsize = get_memsize();
- register_address_space(ptv_memsize);
-}
-
-void __init prom_free_prom_memory(void)
-{
- unsigned long addr;
- int i;
-
- for (i = 0; i < boot_mem_map.nr_map; i++) {
- if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
- continue;
-
- addr = boot_mem_map.map[i].addr;
- free_init_pages("prom memory",
- addr, addr + boot_mem_map.map[i].size);
- }
-}
diff --git a/arch/mips/powertv/pci/Makefile b/arch/mips/powertv/pci/Makefile
deleted file mode 100644
index 2610a6af5b2c..000000000000
--- a/arch/mips/powertv/pci/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# Copyright (C) 2009 Scientific-Atlanta, Inc.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-#
-
-obj-$(CONFIG_PCI) += fixup-powertv.o
diff --git a/arch/mips/powertv/pci/fixup-powertv.c b/arch/mips/powertv/pci/fixup-powertv.c
deleted file mode 100644
index d7ecbae64a6e..000000000000
--- a/arch/mips/powertv/pci/fixup-powertv.c
+++ /dev/null
@@ -1,37 +0,0 @@
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/pci.h>
-#include <asm/mach-powertv/interrupts.h>
-#include "powertv-pci.h"
-
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- return asic_pcie_map_irq(dev, slot, pin);
-}
-
-/* Do platform specific device initialization at pci_enable_device() time */
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
- return 0;
-}
-
-/*
- * asic_pcie_map_irq
- *
- * Parameters:
- * *dev - pointer to a pci_dev structure (not used)
- * slot - slot number (not used)
- * pin - pin number (not used)
- *
- * Return Value:
- * Returns: IRQ number (always the PCI Express IRQ number)
- *
- * Description:
- * asic_pcie_map_irq will return the IRQ number of the PCI Express interrupt.
- *
- */
-int asic_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- return irq_pciexp;
-}
-EXPORT_SYMBOL(asic_pcie_map_irq);
diff --git a/arch/mips/powertv/pci/powertv-pci.h b/arch/mips/powertv/pci/powertv-pci.h
deleted file mode 100644
index 1b5886bbd759..000000000000
--- a/arch/mips/powertv/pci/powertv-pci.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * powertv-pci.c
- *
- * Copyright (C) 2009 Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-/*
- * Local definitions for the powertv PCI code
- */
-
-#ifndef _POWERTV_PCI_POWERTV_PCI_H_
-#define _POWERTV_PCI_POWERTV_PCI_H_
-extern int asic_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
-extern int asic_pcie_init(void);
-extern int asic_pcie_init(void);
-
-extern int log_level;
-#endif
diff --git a/arch/mips/powertv/powertv-clock.h b/arch/mips/powertv/powertv-clock.h
deleted file mode 100644
index d94c54311485..000000000000
--- a/arch/mips/powertv/powertv-clock.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2009 Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author: David VomLehn
- */
-
-#ifndef _POWERTV_POWERTV_CLOCK_H
-#define _POWERTV_POWERTV_CLOCK_H
-extern int powertv_clockevent_init(void);
-extern void powertv_clocksource_init(void);
-extern unsigned int mips_get_pll_freq(void);
-#endif
diff --git a/arch/mips/powertv/powertv-usb.c b/arch/mips/powertv/powertv-usb.c
deleted file mode 100644
index d845eace58e9..000000000000
--- a/arch/mips/powertv/powertv-usb.c
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * powertv-usb.c
- *
- * Description: ASIC-specific USB device setup and shutdown
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- * Copyright (C) 2009 Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
- *
- * NOTE: The bootloader allocates persistent memory at an address which is
- * 16 MiB below the end of the highest address in KSEG0. All fixed
- * address memory reservations must avoid this region.
- */
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <asm/mach-powertv/asic.h>
-#include <asm/mach-powertv/interrupts.h>
-
-/* misc_clk_ctl1 values */
-#define MCC1_30MHZ_POWERUP_SELECT (1 << 14)
-#define MCC1_DIV9 (1 << 13)
-#define MCC1_ETHMIPS_POWERUP_SELECT (1 << 11)
-#define MCC1_USB_POWERUP_SELECT (1 << 1)
-#define MCC1_CLOCK108_POWERUP_SELECT (1 << 0)
-
-/* Possible values for clock select */
-#define MCC1_USB_CLOCK_HIGH_Z (0 << 4)
-#define MCC1_USB_CLOCK_48MHZ (1 << 4)
-#define MCC1_USB_CLOCK_24MHZ (2 << 4)
-#define MCC1_USB_CLOCK_6MHZ (3 << 4)
-
-#define MCC1_CONFIG (MCC1_30MHZ_POWERUP_SELECT | \
- MCC1_DIV9 | \
- MCC1_ETHMIPS_POWERUP_SELECT | \
- MCC1_USB_POWERUP_SELECT | \
- MCC1_CLOCK108_POWERUP_SELECT)
-
-/* misc_clk_ctl2 values */
-#define MCC2_GMII_GCLK_TO_PAD (1 << 31)
-#define MCC2_ETHER125_0_CLOCK_SELECT (1 << 29)
-#define MCC2_RMII_0_CLOCK_SELECT (1 << 28)
-#define MCC2_GMII_TX0_CLOCK_SELECT (1 << 27)
-#define MCC2_GMII_RX0_CLOCK_SELECT (1 << 26)
-#define MCC2_ETHER125_1_CLOCK_SELECT (1 << 24)
-#define MCC2_RMII_1_CLOCK_SELECT (1 << 23)
-#define MCC2_GMII_TX1_CLOCK_SELECT (1 << 22)
-#define MCC2_GMII_RX1_CLOCK_SELECT (1 << 21)
-#define MCC2_ETHER125_2_CLOCK_SELECT (1 << 19)
-#define MCC2_RMII_2_CLOCK_SELECT (1 << 18)
-#define MCC2_GMII_TX2_CLOCK_SELECT (1 << 17)
-#define MCC2_GMII_RX2_CLOCK_SELECT (1 << 16)
-
-#define ETHER_CLK_CONFIG (MCC2_GMII_GCLK_TO_PAD | \
- MCC2_ETHER125_0_CLOCK_SELECT | \
- MCC2_RMII_0_CLOCK_SELECT | \
- MCC2_GMII_TX0_CLOCK_SELECT | \
- MCC2_GMII_RX0_CLOCK_SELECT | \
- MCC2_ETHER125_1_CLOCK_SELECT | \
- MCC2_RMII_1_CLOCK_SELECT | \
- MCC2_GMII_TX1_CLOCK_SELECT | \
- MCC2_GMII_RX1_CLOCK_SELECT | \
- MCC2_ETHER125_2_CLOCK_SELECT | \
- MCC2_RMII_2_CLOCK_SELECT | \
- MCC2_GMII_TX2_CLOCK_SELECT | \
- MCC2_GMII_RX2_CLOCK_SELECT)
-
-/* misc_clk_ctl2 definitions for Gaia */
-#define FSX4A_REF_SELECT (1 << 16)
-#define FSX4B_REF_SELECT (1 << 17)
-#define FSX4C_REF_SELECT (1 << 18)
-#define DDR_PLL_REF_SELECT (1 << 19)
-#define MIPS_PLL_REF_SELECT (1 << 20)
-
-/* Definitions for the QAM frequency select register FS432X4A4_QAM_CTL */
-#define QAM_FS_SDIV_SHIFT 29
-#define QAM_FS_MD_SHIFT 24
-#define QAM_FS_MD_MASK 0x1f /* Cut down to 5 bits */
-#define QAM_FS_PE_SHIFT 8
-
-#define QAM_FS_DISABLE_DIVIDE_BY_3 (1 << 5)
-#define QAM_FS_ENABLE_PROGRAM (1 << 4)
-#define QAM_FS_ENABLE_OUTPUT (1 << 3)
-#define QAM_FS_SELECT_TEST_BYPASS (1 << 2)
-#define QAM_FS_DISABLE_DIGITAL_STANDBY (1 << 1)
-#define QAM_FS_CHOOSE_FS (1 << 0)
-
-/* Definitions for fs432x4a_ctl register */
-#define QAM_FS_NSDIV_54MHZ (1 << 2)
-
-/* Definitions for bcm1_usb2_ctl register */
-#define BCM1_USB2_CTL_BISTOK (1 << 11)
-#define BCM1_USB2_CTL_PORT2_SHIFT_JK (1 << 7)
-#define BCM1_USB2_CTL_PORT1_SHIFT_JK (1 << 6)
-#define BCM1_USB2_CTL_PORT2_FAST_EDGE (1 << 5)
-#define BCM1_USB2_CTL_PORT1_FAST_EDGE (1 << 4)
-#define BCM1_USB2_CTL_EHCI_PRT_PWR_ACTIVE_HIGH (1 << 1)
-#define BCM1_USB2_CTL_APP_PRT_OVRCUR_IN_ACTIVE_HIGH (1 << 0)
-
-/* Definitions for crt_spare register */
-#define CRT_SPARE_PORT2_SHIFT_JK (1 << 21)
-#define CRT_SPARE_PORT1_SHIFT_JK (1 << 20)
-#define CRT_SPARE_PORT2_FAST_EDGE (1 << 19)
-#define CRT_SPARE_PORT1_FAST_EDGE (1 << 18)
-#define CRT_SPARE_DIVIDE_BY_9_FROM_432 (1 << 17)
-#define CRT_SPARE_USB_DIVIDE_BY_9 (1 << 16)
-
-/* Definitions for usb2_stbus_obc register */
-#define USB_STBUS_OBC_STORE32_LOAD32 0x3
-
-/* Definitions for usb2_stbus_mess_size register */
-#define USB2_STBUS_MESS_SIZE_2 0x1 /* 2 packets */
-
-/* Definitions for usb2_stbus_chunk_size register */
-#define USB2_STBUS_CHUNK_SIZE_2 0x1 /* 2 packets */
-
-/* Definitions for usb2_strap register */
-#define USB2_STRAP_HFREQ_SELECT 0x1
-
-/*
- * USB Host Resource Definition
- */
-
-static struct resource ehci_resources[] = {
- {
- .parent = &asic_resource,
- .start = 0,
- .end = 0xff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = irq_usbehci,
- .end = irq_usbehci,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static u64 ehci_dmamask = 0xffffffffULL;
-
-static struct platform_device ehci_device = {
- .name = "powertv-ehci",
- .id = 0,
- .num_resources = 2,
- .resource = ehci_resources,
- .dev = {
- .dma_mask = &ehci_dmamask,
- .coherent_dma_mask = 0xffffffff,
- },
-};
-
-static struct resource ohci_resources[] = {
- {
- .parent = &asic_resource,
- .start = 0,
- .end = 0xff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = irq_usbohci,
- .end = irq_usbohci,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static u64 ohci_dmamask = 0xffffffffULL;
-
-static struct platform_device ohci_device = {
- .name = "powertv-ohci",
- .id = 0,
- .num_resources = 2,
- .resource = ohci_resources,
- .dev = {
- .dma_mask = &ohci_dmamask,
- .coherent_dma_mask = 0xffffffff,
- },
-};
-
-static unsigned usb_users;
-static DEFINE_SPINLOCK(usb_regs_lock);
-
-/*
- *
- * fs_update - set frequency synthesizer for USB
- * @pe_bits Phase tap setting
- * @md_bits Coarse selector bus for algorithm of phase tap
- * @sdiv_bits Output divider setting
- * @disable_div_by_3 Either QAM_FS_DISABLE_DIVIDE_BY_3 or zero
- * @standby Either QAM_FS_DISABLE_DIGITAL_STANDBY or zero
- *
- * QAM frequency selection code, which affects the frequency at which USB
- * runs. The frequency is calculated as:
- * 2^15 * ndiv * Fin
- * Fout = ------------------------------------------------------------
- * (sdiv * (ipe * (1 + md/32) - (ipe - 2^15)*(1 + (md + 1)/32)))
- * where:
- * Fin 54 MHz
- * ndiv QAM_FS_NSDIV_54MHZ ? 8 : 16
- * sdiv 1 << (sdiv_bits + 1)
- * ipe Same as pe_bits
- * md A five-bit, two's-complement integer (range [-16, 15]), which
- * is the lower 5 bits of md_bits.
- */
-static void fs_update(u32 pe_bits, int md_bits, u32 sdiv_bits,
- u32 disable_div_by_3, u32 standby)
-{
- u32 val;
-
- val = ((sdiv_bits << QAM_FS_SDIV_SHIFT) |
- ((md_bits & QAM_FS_MD_MASK) << QAM_FS_MD_SHIFT) |
- (pe_bits << QAM_FS_PE_SHIFT) |
- QAM_FS_ENABLE_OUTPUT |
- standby |
- disable_div_by_3);
- asic_write(val, fs432x4b4_usb_ctl);
- asic_write(val | QAM_FS_ENABLE_PROGRAM, fs432x4b4_usb_ctl);
- asic_write(val | QAM_FS_ENABLE_PROGRAM | QAM_FS_CHOOSE_FS,
- fs432x4b4_usb_ctl);
-}
-
-/*
- * usb_eye_configure - for optimizing the shape USB eye waveform
- * @set: Bits to set in the register
- * @clear: Bits to clear in the register; each bit with a one will
- * be set in the register, zero bits will not be modified
- */
-static void usb_eye_configure(u32 set, u32 clear)
-{
- u32 old;
-
- old = asic_read(crt_spare);
- old |= set;
- old &= ~clear;
- asic_write(old, crt_spare);
-}
-
-/*
- * platform_configure_usb - usb configuration based on platform type.
- */
-static void platform_configure_usb(void)
-{
- u32 bcm1_usb2_ctl_value;
- enum asic_type asic_type;
- unsigned long flags;
-
- spin_lock_irqsave(&usb_regs_lock, flags);
- usb_users++;
-
- if (usb_users != 1) {
- spin_unlock_irqrestore(&usb_regs_lock, flags);
- return;
- }
-
- asic_type = platform_get_asic();
-
- switch (asic_type) {
- case ASIC_ZEUS:
- fs_update(0x0000, -15, 0x02, 0, 0);
- bcm1_usb2_ctl_value = BCM1_USB2_CTL_EHCI_PRT_PWR_ACTIVE_HIGH |
- BCM1_USB2_CTL_APP_PRT_OVRCUR_IN_ACTIVE_HIGH;
- break;
-
- case ASIC_CRONUS:
- case ASIC_CRONUSLITE:
- usb_eye_configure(0, CRT_SPARE_USB_DIVIDE_BY_9);
- fs_update(0x8000, -14, 0x03, QAM_FS_DISABLE_DIVIDE_BY_3,
- QAM_FS_DISABLE_DIGITAL_STANDBY);
- bcm1_usb2_ctl_value = BCM1_USB2_CTL_EHCI_PRT_PWR_ACTIVE_HIGH |
- BCM1_USB2_CTL_APP_PRT_OVRCUR_IN_ACTIVE_HIGH;
- break;
-
- case ASIC_CALLIOPE:
- fs_update(0x0000, -15, 0x02, QAM_FS_DISABLE_DIVIDE_BY_3,
- QAM_FS_DISABLE_DIGITAL_STANDBY);
-
- switch (platform_get_family()) {
- case FAMILY_1500VZE:
- break;
-
- case FAMILY_1500VZF:
- usb_eye_configure(CRT_SPARE_PORT2_SHIFT_JK |
- CRT_SPARE_PORT1_SHIFT_JK |
- CRT_SPARE_PORT2_FAST_EDGE |
- CRT_SPARE_PORT1_FAST_EDGE, 0);
- break;
-
- default:
- usb_eye_configure(CRT_SPARE_PORT2_SHIFT_JK |
- CRT_SPARE_PORT1_SHIFT_JK, 0);
- break;
- }
-
- bcm1_usb2_ctl_value = BCM1_USB2_CTL_BISTOK |
- BCM1_USB2_CTL_EHCI_PRT_PWR_ACTIVE_HIGH |
- BCM1_USB2_CTL_APP_PRT_OVRCUR_IN_ACTIVE_HIGH;
- break;
-
- case ASIC_GAIA:
- fs_update(0x8000, -14, 0x03, QAM_FS_DISABLE_DIVIDE_BY_3,
- QAM_FS_DISABLE_DIGITAL_STANDBY);
- bcm1_usb2_ctl_value = BCM1_USB2_CTL_BISTOK |
- BCM1_USB2_CTL_EHCI_PRT_PWR_ACTIVE_HIGH |
- BCM1_USB2_CTL_APP_PRT_OVRCUR_IN_ACTIVE_HIGH;
- break;
-
- default:
- pr_err("Unknown ASIC type: %d\n", asic_type);
- bcm1_usb2_ctl_value = 0;
- break;
- }
-
- /* turn on USB power */
- asic_write(0, usb2_strap);
- /* Enable all OHCI interrupts */
- asic_write(bcm1_usb2_ctl_value, usb2_control);
- /* usb2_stbus_obc store32/load32 */
- asic_write(USB_STBUS_OBC_STORE32_LOAD32, usb2_stbus_obc);
- /* usb2_stbus_mess_size 2 packets */
- asic_write(USB2_STBUS_MESS_SIZE_2, usb2_stbus_mess_size);
- /* usb2_stbus_chunk_size 2 packets */
- asic_write(USB2_STBUS_CHUNK_SIZE_2, usb2_stbus_chunk_size);
- spin_unlock_irqrestore(&usb_regs_lock, flags);
-}
-
-static void platform_unconfigure_usb(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&usb_regs_lock, flags);
- usb_users--;
- if (usb_users == 0)
- asic_write(USB2_STRAP_HFREQ_SELECT, usb2_strap);
- spin_unlock_irqrestore(&usb_regs_lock, flags);
-}
-
-/*
- * Set up the USB EHCI interface
- */
-void platform_configure_usb_ehci()
-{
- platform_configure_usb();
-}
-EXPORT_SYMBOL(platform_configure_usb_ehci);
-
-/*
- * Set up the USB OHCI interface
- */
-void platform_configure_usb_ohci()
-{
- platform_configure_usb();
-}
-EXPORT_SYMBOL(platform_configure_usb_ohci);
-
-/*
- * Shut the USB EHCI interface down
- */
-void platform_unconfigure_usb_ehci()
-{
- platform_unconfigure_usb();
-}
-EXPORT_SYMBOL(platform_unconfigure_usb_ehci);
-
-/*
- * Shut the USB OHCI interface down
- */
-void platform_unconfigure_usb_ohci()
-{
- platform_unconfigure_usb();
-}
-EXPORT_SYMBOL(platform_unconfigure_usb_ohci);
-
-/**
- * platform_devices_init - sets up USB device resourse.
- */
-int __init platform_usb_devices_init(struct platform_device **ehci_dev,
- struct platform_device **ohci_dev)
-{
- *ehci_dev = &ehci_device;
- ehci_resources[0].start = asic_reg_phys_addr(ehci_hcapbase);
- ehci_resources[0].end += ehci_resources[0].start;
-
- *ohci_dev = &ohci_device;
- ohci_resources[0].start = asic_reg_phys_addr(ohci_hc_revision);
- ohci_resources[0].end += ohci_resources[0].start;
-
- return 0;
-}
diff --git a/arch/mips/powertv/powertv_setup.c b/arch/mips/powertv/powertv_setup.c
deleted file mode 100644
index 24689bff1039..000000000000
--- a/arch/mips/powertv/powertv_setup.c
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/screen_info.h>
-#include <linux/notifier.h>
-#include <linux/etherdevice.h>
-#include <linux/if_ether.h>
-#include <linux/ctype.h>
-#include <linux/cpu.h>
-#include <linux/time.h>
-
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/mips-boards/generic.h>
-#include <asm/dma.h>
-#include <asm/asm.h>
-#include <asm/traps.h>
-#include <asm/asm-offsets.h>
-#include "reset.h"
-
-#define VAL(n) STR(n)
-
-/*
- * Macros for loading addresses and storing registers:
- * LONG_L_ Stringified version of LONG_L for use in asm() statement
- * LONG_S_ Stringified version of LONG_S for use in asm() statement
- * PTR_LA_ Stringified version of PTR_LA for use in asm() statement
- * REG_SIZE Number of 8-bit bytes in a full width register
- */
-#define LONG_L_ VAL(LONG_L) " "
-#define LONG_S_ VAL(LONG_S) " "
-#define PTR_LA_ VAL(PTR_LA) " "
-
-#ifdef CONFIG_64BIT
-#warning TODO: 64-bit code needs to be verified
-#define REG_SIZE "8" /* In bytes */
-#endif
-
-#ifdef CONFIG_32BIT
-#define REG_SIZE "4" /* In bytes */
-#endif
-
-static void register_panic_notifier(void);
-static int panic_handler(struct notifier_block *notifier_block,
- unsigned long event, void *cause_string);
-
-const char *get_system_type(void)
-{
- return "PowerTV";
-}
-
-void __init plat_mem_setup(void)
-{
- panic_on_oops = 1;
- register_panic_notifier();
-
-#if 0
- mips_pcibios_init();
-#endif
- mips_reboot_setup();
-}
-
-/*
- * Install a panic notifier for platform-specific diagnostics
- */
-static void register_panic_notifier()
-{
- static struct notifier_block panic_notifier = {
- .notifier_call = panic_handler,
- .next = NULL,
- .priority = INT_MAX
- };
- atomic_notifier_chain_register(&panic_notifier_list, &panic_notifier);
-}
-
-static int panic_handler(struct notifier_block *notifier_block,
- unsigned long event, void *cause_string)
-{
- struct pt_regs my_regs;
-
- /* Save all of the registers */
- {
- unsigned long at, v0, v1; /* Must be on the stack */
-
- /* Start by saving $at and v0 on the stack. We use $at
- * ourselves, but it looks like the compiler may use v0 or v1
- * to load the address of the pt_regs structure. We'll come
- * back later to store the registers in the pt_regs
- * structure. */
- __asm__ __volatile__ (
- ".set noat\n"
- LONG_S_ "$at, %[at]\n"
- LONG_S_ "$2, %[v0]\n"
- LONG_S_ "$3, %[v1]\n"
- :
- [at] "=m" (at),
- [v0] "=m" (v0),
- [v1] "=m" (v1)
- :
- : "at"
- );
-
- __asm__ __volatile__ (
- ".set noat\n"
- "move $at, %[pt_regs]\n"
-
- /* Argument registers */
- LONG_S_ "$4, " VAL(PT_R4) "($at)\n"
- LONG_S_ "$5, " VAL(PT_R5) "($at)\n"
- LONG_S_ "$6, " VAL(PT_R6) "($at)\n"
- LONG_S_ "$7, " VAL(PT_R7) "($at)\n"
-
- /* Temporary regs */
- LONG_S_ "$8, " VAL(PT_R8) "($at)\n"
- LONG_S_ "$9, " VAL(PT_R9) "($at)\n"
- LONG_S_ "$10, " VAL(PT_R10) "($at)\n"
- LONG_S_ "$11, " VAL(PT_R11) "($at)\n"
- LONG_S_ "$12, " VAL(PT_R12) "($at)\n"
- LONG_S_ "$13, " VAL(PT_R13) "($at)\n"
- LONG_S_ "$14, " VAL(PT_R14) "($at)\n"
- LONG_S_ "$15, " VAL(PT_R15) "($at)\n"
-
- /* "Saved" registers */
- LONG_S_ "$16, " VAL(PT_R16) "($at)\n"
- LONG_S_ "$17, " VAL(PT_R17) "($at)\n"
- LONG_S_ "$18, " VAL(PT_R18) "($at)\n"
- LONG_S_ "$19, " VAL(PT_R19) "($at)\n"
- LONG_S_ "$20, " VAL(PT_R20) "($at)\n"
- LONG_S_ "$21, " VAL(PT_R21) "($at)\n"
- LONG_S_ "$22, " VAL(PT_R22) "($at)\n"
- LONG_S_ "$23, " VAL(PT_R23) "($at)\n"
-
- /* Add'l temp regs */
- LONG_S_ "$24, " VAL(PT_R24) "($at)\n"
- LONG_S_ "$25, " VAL(PT_R25) "($at)\n"
-
- /* Kernel temp regs */
- LONG_S_ "$26, " VAL(PT_R26) "($at)\n"
- LONG_S_ "$27, " VAL(PT_R27) "($at)\n"
-
- /* Global pointer, stack pointer, frame pointer and
- * return address */
- LONG_S_ "$gp, " VAL(PT_R28) "($at)\n"
- LONG_S_ "$sp, " VAL(PT_R29) "($at)\n"
- LONG_S_ "$fp, " VAL(PT_R30) "($at)\n"
- LONG_S_ "$ra, " VAL(PT_R31) "($at)\n"
-
- /* Now we can get the $at and v0 registers back and
- * store them */
- LONG_L_ "$8, %[at]\n"
- LONG_S_ "$8, " VAL(PT_R1) "($at)\n"
- LONG_L_ "$8, %[v0]\n"
- LONG_S_ "$8, " VAL(PT_R2) "($at)\n"
- LONG_L_ "$8, %[v1]\n"
- LONG_S_ "$8, " VAL(PT_R3) "($at)\n"
- :
- :
- [at] "m" (at),
- [v0] "m" (v0),
- [v1] "m" (v1),
- [pt_regs] "r" (&my_regs)
- : "at", "t0"
- );
-
- /* Set the current EPC value to be the current location in this
- * function */
- __asm__ __volatile__ (
- ".set noat\n"
- "1:\n"
- PTR_LA_ "$at, 1b\n"
- LONG_S_ "$at, %[cp0_epc]\n"
- :
- [cp0_epc] "=m" (my_regs.cp0_epc)
- :
- : "at"
- );
-
- my_regs.cp0_cause = read_c0_cause();
- my_regs.cp0_status = read_c0_status();
- }
-
- pr_crit("I'm feeling a bit sleepy. hmmmmm... perhaps a nap would... "
- "zzzz... \n");
-
- return NOTIFY_DONE;
-}
-
-/* Information about the RF MAC address, if one was supplied on the
- * command line. */
-static bool have_rfmac;
-static u8 rfmac[ETH_ALEN];
-
-static int rfmac_param(char *p)
-{
- u8 *q;
- bool is_high_nibble;
- int c;
-
- /* Skip a leading "0x", if present */
- if (*p == '0' && *(p+1) == 'x')
- p += 2;
-
- q = rfmac;
- is_high_nibble = true;
-
- for (c = (unsigned char) *p++;
- isxdigit(c) && q - rfmac < ETH_ALEN;
- c = (unsigned char) *p++) {
- int nibble;
-
- nibble = (isdigit(c) ? (c - '0') :
- (isupper(c) ? c - 'A' + 10 : c - 'a' + 10));
-
- if (is_high_nibble)
- *q = nibble << 4;
- else
- *q++ |= nibble;
-
- is_high_nibble = !is_high_nibble;
- }
-
- /* If we parsed all the way to the end of the parameter value and
- * parsed all ETH_ALEN bytes, we have a usable RF MAC address */
- have_rfmac = (c == '\0' && q - rfmac == ETH_ALEN);
-
- return 0;
-}
-
-early_param("rfmac", rfmac_param);
-
-/*
- * Generate an Ethernet MAC address that has a good chance of being unique.
- * @addr: Pointer to six-byte array containing the Ethernet address
- * Generates an Ethernet MAC address that is highly likely to be unique for
- * this particular system on a network with other systems of the same type.
- *
- * The problem we are solving is that, when eth_random_addr() is used to
- * generate MAC addresses at startup, there isn't much entropy for the random
- * number generator to use and the addresses it produces are fairly likely to
- * be the same as those of other identical systems on the same local network.
- * This is true even for relatively small numbers of systems (for the reason
- * why, see the Wikipedia entry for "Birthday problem" at:
- * http://en.wikipedia.org/wiki/Birthday_problem
- *
- * The good news is that we already have a MAC address known to be unique, the
- * RF MAC address. The bad news is that this address is already in use on the
- * RF interface. Worse, the obvious trick, taking the RF MAC address and
- * turning on the locally managed bit, has already been used for other devices.
- * Still, this does give us something to work with.
- *
- * The approach we take is:
- * 1. If we can't get the RF MAC Address, just call eth_random_addr.
- * 2. Use the 24-bit NIC-specific bits of the RF MAC address as the last 24
- * bits of the new address. This is very likely to be unique, except for
- * the current box.
- * 3. To avoid using addresses already on the current box, we set the top
- * six bits of the address with a value different from any currently
- * registered Scientific Atlanta organizationally unique identifyer
- * (OUI). This avoids duplication with any addresses on the system that
- * were generated from valid Scientific Atlanta-registered address by
- * simply flipping the locally managed bit.
- * 4. We aren't generating a multicast address, so we leave the multicast
- * bit off. Since we aren't using a registered address, we have to set
- * the locally managed bit.
- * 5. We then randomly generate the remaining 16-bits. This does two
- * things:
- * a. It allows us to call this function for more than one device
- * in this system
- * b. It ensures that things will probably still work even if
- * some device on the device network has a locally managed
- * address that matches the top six bits from step 2.
- */
-void platform_random_ether_addr(u8 addr[ETH_ALEN])
-{
- const int num_random_bytes = 2;
- const unsigned char non_sciatl_oui_bits = 0xc0u;
- const unsigned char mac_addr_locally_managed = (1 << 1);
-
- if (!have_rfmac) {
- pr_warning("rfmac not available on command line; "
- "generating random MAC address\n");
- eth_random_addr(addr);
- }
-
- else {
- int i;
-
- /* Set the first byte to something that won't match a Scientific
- * Atlanta OUI, is locally managed, and isn't a multicast
- * address */
- addr[0] = non_sciatl_oui_bits | mac_addr_locally_managed;
-
- /* Get some bytes of random address information */
- get_random_bytes(&addr[1], num_random_bytes);
-
- /* Copy over the NIC-specific bits of the RF MAC address */
- for (i = 1 + num_random_bytes; i < ETH_ALEN; i++)
- addr[i] = rfmac[i];
- }
-}
diff --git a/arch/mips/powertv/reset.c b/arch/mips/powertv/reset.c
deleted file mode 100644
index 11c32fbf2784..000000000000
--- a/arch/mips/powertv/reset.c
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/pm.h>
-
-#include <linux/io.h>
-#include <asm/reboot.h> /* Not included by linux/reboot.h */
-
-#include <asm/mach-powertv/asic_regs.h>
-#include "reset.h"
-
-static void mips_machine_restart(char *command)
-{
- writel(0x1, asic_reg_addr(watchdog));
-}
-
-void mips_reboot_setup(void)
-{
- _machine_restart = mips_machine_restart;
-}
diff --git a/arch/mips/powertv/reset.h b/arch/mips/powertv/reset.h
deleted file mode 100644
index 888fd09e2620..000000000000
--- a/arch/mips/powertv/reset.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Definitions from powertv reset.c file
- *
- * Copyright (C) 2009 Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author: David VomLehn
- */
-
-#ifndef _POWERTV_POWERTV_RESET_H
-#define _POWERTV_POWERTV_RESET_H
-extern void mips_reboot_setup(void);
-#endif
diff --git a/arch/mips/powertv/time.c b/arch/mips/powertv/time.c
deleted file mode 100644
index f38b0d45eca9..000000000000
--- a/arch/mips/powertv/time.c
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Setting up the clock on the MIPS boards.
- */
-
-#include <linux/init.h>
-#include <asm/mach-powertv/interrupts.h>
-#include <asm/time.h>
-
-#include "powertv-clock.h"
-
-unsigned int get_c0_compare_int(void)
-{
- return irq_mips_timer;
-}
-
-void __init plat_time_init(void)
-{
- powertv_clocksource_init();
-}
diff --git a/arch/mips/ralink/clk.c b/arch/mips/ralink/clk.c
index bba0cdfd83bc..5d0983d47161 100644
--- a/arch/mips/ralink/clk.c
+++ b/arch/mips/ralink/clk.c
@@ -26,7 +26,7 @@ void ralink_clk_add(const char *dev, unsigned long rate)
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
if (!clk)
- panic("failed to add clock\n");
+ panic("failed to add clock");
clk->cl.dev_id = dev;
clk->cl.clk = clk;
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index d217509e5300..a3ad56c2372d 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -350,7 +350,7 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
name = "MT7620A";
soc_info->compatible = "ralink,mt7620a-soc";
} else {
- panic("mt7620: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
+ panic("mt7620: unknown SoC, n0:%08x n1:%08x", n0, n1);
}
rev = __raw_readl(sysc + SYSC_REG_CHIP_REV);
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index ce38d11f9da5..eccc5526155e 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -21,6 +21,7 @@
#include <asm/reboot.h>
#include <asm/bootinfo.h>
#include <asm/addrspace.h>
+#include <asm/prom.h>
#include "common.h"
@@ -108,7 +109,7 @@ static int __init plat_of_setup(void)
strncpy(of_ids[1].compatible, "palmbus", len);
if (of_platform_populate(NULL, of_ids, NULL, NULL))
- panic("failed to populate DT\n");
+ panic("failed to populate DT");
/* make sure ithat the reset controller is setup early */
ralink_rst_init();
diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c
index ca7ee3a33790..bb82a82da9e7 100644
--- a/arch/mips/ralink/rt305x.c
+++ b/arch/mips/ralink/rt305x.c
@@ -276,7 +276,7 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
name = "RT5350";
soc_info->compatible = "ralink,rt5350-soc";
} else {
- panic("rt305x: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
+ panic("rt305x: unknown SoC, n0:%08x n1:%08x", n0, n1);
}
id = __raw_readl(sysc + SYSC_REG_CHIP_ID);
diff --git a/arch/mips/ralink/timer.c b/arch/mips/ralink/timer.c
index e49241a2c39a..202785709441 100644
--- a/arch/mips/ralink/timer.c
+++ b/arch/mips/ralink/timer.c
@@ -126,7 +126,7 @@ static int rt_timer_probe(struct platform_device *pdev)
return -ENOENT;
}
- rt->membase = devm_request_and_ioremap(&pdev->dev, res);
+ rt->membase = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(rt->membase))
return PTR_ERR(rt->membase);
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild
index c5d767028306..74742dc6a3da 100644
--- a/arch/mn10300/include/asm/Kbuild
+++ b/arch/mn10300/include/asm/Kbuild
@@ -2,3 +2,4 @@
generic-y += clkdev.h
generic-y += exec.h
generic-y += trace_clock.h
+generic-y += preempt.h
diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h
index c67c2b5365a6..75dbe696f830 100644
--- a/arch/mn10300/include/asm/mmu_context.h
+++ b/arch/mn10300/include/asm/mmu_context.h
@@ -71,7 +71,7 @@ static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
local_flush_tlb_all();
/* fix the TLB version if needed (we avoid version #0 so as to
- * distingush MMU_NO_CONTEXT) */
+ * distinguish MMU_NO_CONTEXT) */
if (!mc)
*pmc = mc = MMU_CONTEXT_FIRST_VERSION;
}
diff --git a/arch/mn10300/include/asm/pci.h b/arch/mn10300/include/asm/pci.h
index 6f31cc0f1a87..166323824683 100644
--- a/arch/mn10300/include/asm/pci.h
+++ b/arch/mn10300/include/asm/pci.h
@@ -44,7 +44,6 @@ extern void unit_pci_init(void);
#define pcibios_assign_all_busses() 0
#endif
-extern unsigned long pci_mem_start;
#define PCIBIOS_MIN_IO 0xBE000004
#define PCIBIOS_MIN_MEM 0xB8000000
diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h
index e2a2b203eb00..71dedcae55a6 100644
--- a/arch/mn10300/include/uapi/asm/socket.h
+++ b/arch/mn10300/include/uapi/asm/socket.h
@@ -76,4 +76,6 @@
#define SO_BUSY_POLL 46
+#define SO_MAX_PACING_RATE 47
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/mn10300/kernel/setup.c b/arch/mn10300/kernel/setup.c
index ebac9c11f796..2ad7f32fa122 100644
--- a/arch/mn10300/kernel/setup.c
+++ b/arch/mn10300/kernel/setup.c
@@ -35,9 +35,6 @@
struct mn10300_cpuinfo boot_cpu_data;
-/* For PCI or other memory-mapped resources */
-unsigned long pci_mem_start = 0x18000000;
-
static char __initdata cmd_line[COMMAND_LINE_SIZE];
char redboot_command_line[COMMAND_LINE_SIZE] =
"console=ttyS0,115200 root=/dev/mtdblock3 rw";
diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.h b/arch/mn10300/unit-asb2305/pci-asb2305.h
index 7fa66a0e4624..9e17aca5a2a1 100644
--- a/arch/mn10300/unit-asb2305/pci-asb2305.h
+++ b/arch/mn10300/unit-asb2305/pci-asb2305.h
@@ -35,7 +35,6 @@ extern void pcibios_resource_survey(void);
/* pci.c */
-extern int pcibios_last_bus;
extern struct pci_ops *pci_root_ops;
extern struct irq_routing_table *pcibios_get_irq_routing_table(void);
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c
index e37fac0461f3..6b4339f8c9c2 100644
--- a/arch/mn10300/unit-asb2305/pci.c
+++ b/arch/mn10300/unit-asb2305/pci.c
@@ -24,7 +24,6 @@
unsigned int pci_probe = 1;
-int pcibios_last_bus = -1;
struct pci_ops *pci_root_ops;
/*
@@ -392,10 +391,6 @@ char *__init pcibios_setup(char *str)
if (!strcmp(str, "off")) {
pci_probe = 0;
return NULL;
-
- } else if (!strncmp(str, "lastbus=", 8)) {
- pcibios_last_bus = simple_strtol(str+8, NULL, 0);
- return NULL;
}
return str;
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index 195653e851da..78405625e799 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -67,3 +67,4 @@ generic-y += ucontext.h
generic-y += user.h
generic-y += word-at-a-time.h
generic-y += xor.h
+generic-y += preempt.h
diff --git a/arch/openrisc/include/asm/prom.h b/arch/openrisc/include/asm/prom.h
deleted file mode 100644
index 93c9980e1b6b..000000000000
--- a/arch/openrisc/include/asm/prom.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * OpenRISC Linux
- *
- * Linux architectural port borrowing liberally from similar works of
- * others. All original copyrights apply as per the original source
- * declaration.
- *
- * OpenRISC implementation:
- * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
- * et al.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#ifndef _ASM_OPENRISC_PROM_H
-#define _ASM_OPENRISC_PROM_H
-
-#define HAVE_ARCH_DEVTREE_FIXUPS
-
-#endif /* _ASM_OPENRISC_PROM_H */
diff --git a/arch/openrisc/kernel/prom.c b/arch/openrisc/kernel/prom.c
index a63e76872f84..6a44340d1b18 100644
--- a/arch/openrisc/kernel/prom.c
+++ b/arch/openrisc/kernel/prom.c
@@ -18,83 +18,15 @@
*
*/
-#include <stdarg.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
#include <linux/init.h>
-#include <linux/threads.h>
-#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/stringify.h>
-#include <linux/delay.h>
-#include <linux/initrd.h>
-#include <linux/bitops.h>
-#include <linux/module.h>
-#include <linux/kexec.h>
-#include <linux/debugfs.h>
-#include <linux/irq.h>
#include <linux/memblock.h>
#include <linux/of_fdt.h>
-#include <asm/prom.h>
#include <asm/page.h>
-#include <asm/processor.h>
-#include <asm/irq.h>
-#include <linux/io.h>
-#include <asm/mmu.h>
-#include <asm/pgtable.h>
-#include <asm/sections.h>
-#include <asm/setup.h>
-
-extern char cmd_line[COMMAND_LINE_SIZE];
-
-void __init early_init_dt_add_memory_arch(u64 base, u64 size)
-{
- size &= PAGE_MASK;
- memblock_add(base, size);
-}
void __init early_init_devtree(void *params)
{
- void *alloc;
-
- /* Setup flat device-tree pointer */
- initial_boot_params = params;
-
-
- /* Retrieve various informations from the /chosen node of the
- * device-tree, including the platform type, initrd location and
- * size, TCE reserve, and more ...
- */
- of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line);
-
- /* Scan memory nodes and rebuild MEMBLOCKs */
- of_scan_flat_dt(early_init_dt_scan_root, NULL);
- of_scan_flat_dt(early_init_dt_scan_memory, NULL);
-
- /* Save command line for /proc/cmdline and then parse parameters */
- strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
-
+ early_init_dt_scan(params);
memblock_allow_resize();
-
- /* We must copy the flattend device tree from init memory to regular
- * memory because the device tree references the strings in it
- * directly.
- */
-
- alloc = __va(memblock_alloc(initial_boot_params->totalsize, PAGE_SIZE));
-
- memcpy(alloc, initial_boot_params, initial_boot_params->totalsize);
-
- initial_boot_params = alloc;
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
-{
- initrd_start = (unsigned long)__va(start);
- initrd_end = (unsigned long)__va(end);
- initrd_below_start_ok = 1;
}
-#endif
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index d7359ffbcbdd..09a769b69572 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -50,8 +50,6 @@
#include "vmlinux.h"
-char __initdata cmd_line[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
-
static unsigned long __init setup_memory(void)
{
unsigned long bootmap_size;
@@ -285,7 +283,7 @@ void __init setup_arch(char **cmdline_p)
{
unsigned long max_low_pfn;
- unflatten_device_tree();
+ unflatten_and_copy_device_tree();
setup_cpuinfo();
@@ -316,7 +314,7 @@ void __init setup_arch(char **cmdline_p)
conswitchp = &dummy_con;
#endif
- *cmdline_p = cmd_line;
+ *cmdline_p = boot_command_line;
printk(KERN_INFO "OpenRISC Linux -- http://openrisc.net\n");
}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index ad2ce8dab996..56c9cb7c8bcf 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -1,6 +1,7 @@
config PARISC
def_bool y
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+ select ARCH_MIGHT_HAVE_PC_PARPORT
select HAVE_IDE
select HAVE_OPROFILE
select HAVE_FUNCTION_TRACER if 64BIT
@@ -287,6 +288,10 @@ config SYSVIPC_COMPAT
def_bool y
depends on COMPAT && SYSVIPC
+config AUDIT_ARCH
+ def_bool y
+ depends on COMPAT
+
config HPUX
bool "Support for HP-UX binaries"
depends on !64BIT
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index e02f665f804a..7187664034c3 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -94,7 +94,7 @@ PALOCONF := $(shell if [ -f $(src)/palo.conf ]; then echo $(src)/palo.conf; \
else echo $(obj)/palo.conf; \
fi)
-palo: vmlinuz
+palo lifimage: vmlinuz
@if test ! -x "$(PALO)"; then \
echo 'ERROR: Please install palo first (apt-get install palo)';\
echo 'or build it from source and install it somewhere in your $$PATH';\
@@ -109,16 +109,23 @@ palo: vmlinuz
fi
$(PALO) -f $(PALOCONF)
-# Shorthands for known targets not supported by parisc, use vmlinux/vmlinuz as default
+BOOT_TARGETS = zImage Image palo lifimage
+INSTALL_TARGETS = zinstall install
+
+PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
+
+bzImage zImage: vmlinuz
Image: vmlinux
-zImage bzImage: vmlinuz
vmlinuz: vmlinux
@gzip -cf -9 $< > $@
-install: vmlinuz
- sh $(src)/arch/parisc/install.sh \
- $(KERNELRELEASE) $< System.map "$(INSTALL_PATH)"
+install:
+ $(CONFIG_SHELL) $(src)/arch/parisc/install.sh \
+ $(KERNELRELEASE) vmlinux System.map "$(INSTALL_PATH)"
+zinstall:
+ $(CONFIG_SHELL) $(src)/arch/parisc/install.sh \
+ $(KERNELRELEASE) vmlinuz System.map "$(INSTALL_PATH)"
CLEAN_FILES += lifimage
MRPROPER_FILES += palo.conf
@@ -127,10 +134,11 @@ define archhelp
@echo '* vmlinux - Uncompressed kernel image (./vmlinux)'
@echo ' vmlinuz - Compressed kernel image (./vmlinuz)'
@echo ' palo - Bootable image (./lifimage)'
- @echo ' install - Install kernel using'
+ @echo ' install - Install uncompressed vmlinux kernel using'
@echo ' (your) ~/bin/$(INSTALLKERNEL) or'
@echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
@echo ' copy to $$(INSTALL_PATH)'
+ @echo ' zinstall - Install compressed vmlinuz kernel'
endef
# we require gcc 3.3 or above to compile the kernel
diff --git a/arch/parisc/configs/712_defconfig b/arch/parisc/configs/712_defconfig
index 0f90569b9d85..9387cc2693f6 100644
--- a/arch/parisc/configs/712_defconfig
+++ b/arch/parisc/configs/712_defconfig
@@ -40,6 +40,8 @@ CONFIG_IP_NF_QUEUE=m
CONFIG_LLC2=m
CONFIG_NET_PKTGEN=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_PARPORT=y
diff --git a/arch/parisc/configs/a500_defconfig b/arch/parisc/configs/a500_defconfig
index b647b182dacc..90025322b75e 100644
--- a/arch/parisc/configs/a500_defconfig
+++ b/arch/parisc/configs/a500_defconfig
@@ -79,6 +79,8 @@ CONFIG_IP_DCCP=m
CONFIG_LLC2=m
CONFIG_NET_PKTGEN=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_BLK_DEV_UMEM=m
diff --git a/arch/parisc/configs/b180_defconfig b/arch/parisc/configs/b180_defconfig
index e289f5bf3148..f1a0c25bef8d 100644
--- a/arch/parisc/configs/b180_defconfig
+++ b/arch/parisc/configs/b180_defconfig
@@ -4,6 +4,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODVERSIONS=y
@@ -27,6 +28,8 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_LRO is not set
CONFIG_IPV6=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_PARPORT=y
CONFIG_PARPORT_PC=y
diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig
index 311ca367b622..ec1b014952b6 100644
--- a/arch/parisc/configs/c3000_defconfig
+++ b/arch/parisc/configs/c3000_defconfig
@@ -5,6 +5,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
@@ -39,6 +40,8 @@ CONFIG_NETFILTER_DEBUG=y
CONFIG_IP_NF_QUEUE=m
CONFIG_NET_PKTGEN=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_BLK_DEV_UMEM=m
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
index f11006361297..e1c8d2015c89 100644
--- a/arch/parisc/configs/c8000_defconfig
+++ b/arch/parisc/configs/c8000_defconfig
@@ -62,6 +62,8 @@ CONFIG_TIPC=m
CONFIG_LLC2=m
CONFIG_DNS_RESOLVER=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
CONFIG_PARPORT=y
CONFIG_PARPORT_PC=y
diff --git a/arch/parisc/configs/default_defconfig b/arch/parisc/configs/default_defconfig
index dfe88f6c95c4..ba61495e1fa4 100644
--- a/arch/parisc/configs/default_defconfig
+++ b/arch/parisc/configs/default_defconfig
@@ -49,6 +49,8 @@ CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_LLC2=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_PARPORT=y
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
new file mode 100644
index 000000000000..33b148f825ba
--- /dev/null
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -0,0 +1,328 @@
+CONFIG_LOCALVERSION="-32bit"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_LZO=y
+CONFIG_EXPERT=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_PERF_EVENTS=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PA7100LC=y
+CONFIG_SMP=y
+CONFIG_HZ_100=y
+CONFIG_IOMMU_CCIO=y
+CONFIG_GSC_LASI=y
+CONFIG_GSC_WAX=y
+CONFIG_EISA=y
+CONFIG_PCI=y
+CONFIG_GSC_DINO=y
+CONFIG_PCI_LBA=y
+CONFIG_PCCARD=m
+CONFIG_YENTA=m
+# CONFIG_PDC_CHASSIS is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=m
+CONFIG_LLC2=m
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=m
+CONFIG_PARPORT_1284=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=6144
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_NS87415=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_LASI700=y
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_ZALON=y
+CONFIG_SCSI_DH=y
+CONFIG_ATA=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_UEVENT=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_TUN=m
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+CONFIG_NET_TULIP=y
+CONFIG_TULIP=y
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+CONFIG_LASI_82596=y
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPPOE=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_POLLDEV=y
+CONFIG_KEYBOARD_HIL_OLD=m
+CONFIG_KEYBOARD_HIL=m
+CONFIG_MOUSE_SERIAL=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=m
+CONFIG_LEGACY_PTY_COUNT=64
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=8
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_PRINTER=m
+CONFIG_PPDEV=m
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_POWER_SUPPLY=y
+# CONFIG_HWMON is not set
+CONFIG_AGP=y
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_FB_FOREIGN_ENDIAN=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_MATROX=m
+CONFIG_FB_MATROX_G=y
+CONFIG_FB_VOODOO1=m
+CONFIG_DUMMY_CONSOLE_COLUMNS=128
+CONFIG_DUMMY_CONSOLE_ROWS=48
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_SOUND=m
+CONFIG_SND=m
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_AD1889=m
+CONFIG_SND_HARMONY=m
+CONFIG_HIDRAW=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_KYE=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=m
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_DMADEVICES=y
+CONFIG_AUXDISPLAY=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_RT=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_NFS_FS=m
+# CONFIG_NFS_V2 is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_RT_MUTEX_TESTER=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_LATENCYTOP=y
+CONFIG_LKDTM=m
+CONFIG_KEYS=y
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC_CCITT=m
+CONFIG_CRC_T10DIF=y
+CONFIG_FONTS=y
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
new file mode 100644
index 000000000000..d7f5126123d8
--- /dev/null
+++ b/arch/parisc/configs/generic-64bit_defconfig
@@ -0,0 +1,345 @@
+CONFIG_LOCALVERSION="-64bit"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_BLK_DEV_INTEGRITY=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_PA8X00=y
+CONFIG_MLONGCALLS=y
+CONFIG_64BIT=y
+CONFIG_SMP=y
+# CONFIG_COMPACTION is not set
+CONFIG_HPPB=y
+CONFIG_IOMMU_CCIO=y
+CONFIG_GSC_LASI=y
+CONFIG_GSC_WAX=y
+CONFIG_PCI=y
+CONFIG_PCI_STUB=m
+CONFIG_PCI_IOV=y
+CONFIG_GSC_DINO=y
+CONFIG_PCI_LBA=y
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_LRO=m
+CONFIG_INET_DIAG=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_ADVANCED is not set
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_DCB=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_IDE=y
+CONFIG_IDE_GD=m
+CONFIG_IDE_GD_ATAPI=y
+CONFIG_BLK_DEV_IDECD=m
+CONFIG_BLK_DEV_NS87415=y
+CONFIG_BLK_DEV_SIIMAGE=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_SCSI_ISCSI_ATTRS=y
+CONFIG_SCSI_SRP_ATTRS=y
+CONFIG_ISCSI_BOOT_SYSFS=y
+CONFIG_SCSI_MPT2SAS=y
+CONFIG_SCSI_LASI700=m
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_ZALON=y
+CONFIG_SCSI_QLA_ISCSI=m
+CONFIG_SCSI_DH=y
+CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_RAID=m
+CONFIG_DM_UEVENT=y
+CONFIG_FUSION=y
+CONFIG_FUSION_SPI=y
+CONFIG_FUSION_SAS=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_TUN=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+CONFIG_NET_TULIP=y
+CONFIG_TULIP=y
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+CONFIG_HP100=m
+CONFIG_E1000=y
+CONFIG_LASI_82596=y
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+CONFIG_QLA3XXX=m
+CONFIG_QLCNIC=m
+CONFIG_QLGE=m
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_PHYLIB=y
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_NATIONAL_PHY=m
+CONFIG_STE10XP=m
+CONFIG_LSI_ET1011C_PHY=m
+CONFIG_MDIO_BITBANG=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_HIL_OLD is not set
+# CONFIG_KEYBOARD_HIL is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_HP_SDC_RTC=m
+CONFIG_SERIO_SERPORT=m
+CONFIG_HP_SDC=m
+CONFIG_HIL_MLC=m
+CONFIG_SERIO_RAW=m
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_NOZOMI=m
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=8
+CONFIG_SERIAL_8250_RUNTIME_UARTS=8
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_JSM=m
+CONFIG_HW_RANDOM_TIMERIOMEM=m
+CONFIG_TCG_TPM=m
+CONFIG_TCG_ATMEL=m
+CONFIG_PTP_1588_CLOCK=m
+CONFIG_SENSORS_I5K_AMB=m
+CONFIG_SENSORS_F71882FG=m
+CONFIG_SENSORS_PC87427=m
+CONFIG_SENSORS_VT1211=m
+CONFIG_SENSORS_VT8231=m
+CONFIG_SENSORS_W83627EHF=m
+CONFIG_WATCHDOG=y
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_SSB=m
+CONFIG_SSB_DRIVER_PCICORE=y
+CONFIG_HTC_PASIC3=m
+CONFIG_LPC_SCH=m
+CONFIG_MFD_SM501=m
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=m
+CONFIG_REGULATOR_USERSPACE_CONSUMER=m
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_AGP=y
+CONFIG_AGP_PARISC=y
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=y
+CONFIG_DRM_RADEON_UMS=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+CONFIG_HID=m
+CONFIG_HIDRAW=y
+CONFIG_HID_DRAGONRISE=m
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_KYE=m
+CONFIG_HID_GYRATION=m
+CONFIG_HID_TWINHAN=m
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_HID_NTRIG=m
+CONFIG_HID_PANTHERLORD=m
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=m
+CONFIG_HID_SAMSUNG=m
+CONFIG_HID_SONY=m
+CONFIG_HID_SUNPLUS=m
+CONFIG_HID_GREENASIA=m
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_SMARTJOYPLUS=m
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TOPSEED=m
+CONFIG_HID_THRUSTMASTER=m
+CONFIG_THRUSTMASTER_FF=y
+CONFIG_HID_ZEROPLUS=m
+CONFIG_ZEROPLUS_FF=y
+CONFIG_USB_HID=m
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_DYNAMIC_MINORS=y
+CONFIG_USB_MON=m
+CONFIG_USB_WUSB_CBAF=m
+CONFIG_USB_XHCI_HCD=m
+CONFIG_USB_EHCI_HCD=m
+CONFIG_USB_OHCI_HCD=m
+CONFIG_USB_R8A66597_HCD=m
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_WDM=m
+CONFIG_USB_TMC=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+CONFIG_LEDS_TRIGGER_BACKLIGHT=m
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
+CONFIG_UIO=y
+CONFIG_UIO_PDRV_GENIRQ=m
+CONFIG_UIO_AEC=m
+CONFIG_UIO_SERCOS3=m
+CONFIG_UIO_PCI_GENERIC=m
+CONFIG_STAGING=y
+# CONFIG_NET_VENDOR_SILICOM is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_BTRFS_FS=m
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=y
+CONFIG_ISO9660_FS=y
+CONFIG_UDF_FS=y
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_SYSV_FS=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V4=m
+CONFIG_NFS_V4_1=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V4=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_UTF8=m
+CONFIG_PRINTK_TIME=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_DEFLATE=m
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC_CCITT=m
+CONFIG_LIBCRC32C=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index ff4c9faed546..a603b9ebe54c 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -4,3 +4,4 @@ generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \
div64.h irq_regs.h kdebug.h kvm_para.h local64.h local.h param.h \
poll.h xor.h clkdev.h exec.h
generic-y += trace_clock.h
+generic-y += preempt.h
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index 0da848232344..b3069fd83468 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -515,5 +515,17 @@
nop /* 7 */
.endm
+ /*
+ * ASM_EXCEPTIONTABLE_ENTRY
+ *
+ * Creates an exception table entry.
+ * Do not convert to a assembler macro. This won't work.
+ */
+#define ASM_EXCEPTIONTABLE_ENTRY(fault_addr, except_addr) \
+ .section __ex_table,"aw" ! \
+ ASM_ULONG_INSN fault_addr, except_addr ! \
+ .previous
+
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h
index a2db278a5def..3c3cb004b7e2 100644
--- a/arch/parisc/include/asm/ptrace.h
+++ b/arch/parisc/include/asm/ptrace.h
@@ -19,5 +19,9 @@
#define user_stack_pointer(regs) ((regs)->gr[30])
unsigned long profile_pc(struct pt_regs *);
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+ return regs->gr[20];
+}
#endif
diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h
new file mode 100644
index 000000000000..8d806d80ed24
--- /dev/null
+++ b/arch/parisc/include/asm/socket.h
@@ -0,0 +1,10 @@
+#ifndef _ASM_SOCKET_H
+#define _ASM_SOCKET_H
+
+#include <uapi/asm/socket.h>
+
+/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
+ * have to define SOCK_NONBLOCK to a different value here.
+ */
+#define SOCK_NONBLOCK 0x40000000
+#endif /* _ASM_SOCKET_H */
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index 540c88fa8f86..bc7cf120106b 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -59,6 +59,7 @@ struct thread_info {
#define TIF_32BIT 4 /* 32 bit binary */
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
+#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
#define TIF_SINGLESTEP 9 /* single stepping? */
#define TIF_BLOCKSTEP 10 /* branch stepping? */
@@ -68,6 +69,7 @@ struct thread_info {
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_32BIT (1 << TIF_32BIT)
+#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
@@ -75,7 +77,7 @@ struct thread_info {
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
_TIF_NEED_RESCHED)
#define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
- _TIF_BLOCKSTEP)
+ _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT)
#endif /* __KERNEL__ */
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index e0a82358517e..4006964d8e12 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -59,12 +59,13 @@ static inline long access_ok(int type, const void __user * addr,
/*
* The exception table contains two values: the first is an address
* for an instruction that is allowed to fault, and the second is
- * the address to the fixup routine.
+ * the address to the fixup routine. Even on a 64bit kernel we could
+ * use a 32bit (unsigned int) address here.
*/
struct exception_table_entry {
- unsigned long insn; /* address of insn that is allowed to fault. */
- long fixup; /* fixup routine */
+ unsigned long insn; /* address of insn that is allowed to fault. */
+ unsigned long fixup; /* fixup routine */
};
#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 71700e636a8e..f33113a6141e 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_SOCKET_H
-#define _ASM_SOCKET_H
+#ifndef _UAPI_ASM_SOCKET_H
+#define _UAPI_ASM_SOCKET_H
#include <asm/sockios.h>
@@ -75,9 +75,6 @@
#define SO_BUSY_POLL 0x4027
-/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
- * have to define SOCK_NONBLOCK to a different value here.
- */
-#define SOCK_NONBLOCK 0x40000000
+#define SO_MAX_PACING_RATE 0x4048
-#endif /* _ASM_SOCKET_H */
+#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/parisc/install.sh b/arch/parisc/install.sh
index 4da682b466d0..6f68784fea25 100644
--- a/arch/parisc/install.sh
+++ b/arch/parisc/install.sh
@@ -19,20 +19,48 @@
# $4 - default install path (blank if root directory)
#
+verify () {
+ if [ ! -f "$1" ]; then
+ echo "" 1>&2
+ echo " *** Missing file: $1" 1>&2
+ echo ' *** You need to run "make" before "make install".' 1>&2
+ echo "" 1>&2
+ exit 1
+ fi
+}
+
+# Make sure the files actually exist
+
+verify "$2"
+verify "$3"
+
# User may have a custom install script
-if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
-if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
+if [ -n "${INSTALLKERNEL}" ]; then
+ if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+ if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
+fi
# Default install
-if [ -f $4/vmlinuz ]; then
- mv $4/vmlinuz $4/vmlinuz.old
+if [ "$(basename $2)" = "zImage" ]; then
+# Compressed install
+ echo "Installing compressed kernel"
+ base=vmlinuz
+else
+# Normal install
+ echo "Installing normal kernel"
+ base=vmlinux
+fi
+
+if [ -f $4/$base-$1 ]; then
+ mv $4/$base-$1 $4/$base-$1.old
fi
+cat $2 > $4/$base-$1
-if [ -f $4/System.map ]; then
- mv $4/System.map $4/System.old
+# Install system map file
+if [ -f $4/System.map-$1 ]; then
+ mv $4/System.map-$1 $4/System.map-$1.old
fi
+cp $3 $4/System.map-$1
-cat $2 > $4/vmlinuz
-cp $3 $4/System.map
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile
index 66ee3f12df58..ad1e3a68208c 100644
--- a/arch/parisc/kernel/Makefile
+++ b/arch/parisc/kernel/Makefile
@@ -31,5 +31,6 @@ obj-$(CONFIG_64BIT) += binfmt_elf32.o sys_parisc32.o signal32.o
obj-$(CONFIG_STACKTRACE)+= stacktrace.o
# only supported for PCX-W/U in 64-bit mode at the moment
obj-$(CONFIG_64BIT) += perf.o perf_asm.o
+obj-$(CONFIG_AUDIT_ARCH) += audit.o compat_audit.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
diff --git a/arch/parisc/kernel/audit.c b/arch/parisc/kernel/audit.c
new file mode 100644
index 000000000000..eb64a6148c82
--- /dev/null
+++ b/arch/parisc/kernel/audit.c
@@ -0,0 +1,81 @@
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/audit.h>
+#include <asm/unistd.h>
+
+static unsigned dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+static unsigned read_class[] = {
+#include <asm-generic/audit_read.h>
+~0U
+};
+
+static unsigned write_class[] = {
+#include <asm-generic/audit_write.h>
+~0U
+};
+
+static unsigned chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+static unsigned signal_class[] = {
+#include <asm-generic/audit_signal.h>
+~0U
+};
+
+int audit_classify_arch(int arch)
+{
+#ifdef CONFIG_COMPAT
+ if (arch == AUDIT_ARCH_PARISC)
+ return 1;
+#endif
+ return 0;
+}
+
+int audit_classify_syscall(int abi, unsigned syscall)
+{
+#ifdef CONFIG_COMPAT
+ extern int parisc32_classify_syscall(unsigned);
+ if (abi == AUDIT_ARCH_PARISC)
+ return parisc32_classify_syscall(syscall);
+#endif
+ switch (syscall) {
+ case __NR_open:
+ return 2;
+ case __NR_openat:
+ return 3;
+ case __NR_execve:
+ return 5;
+ default:
+ return 0;
+ }
+}
+
+static int __init audit_classes_init(void)
+{
+#ifdef CONFIG_COMPAT
+ extern __u32 parisc32_dir_class[];
+ extern __u32 parisc32_write_class[];
+ extern __u32 parisc32_read_class[];
+ extern __u32 parisc32_chattr_class[];
+ extern __u32 parisc32_signal_class[];
+ audit_register_class(AUDIT_CLASS_WRITE_32, parisc32_write_class);
+ audit_register_class(AUDIT_CLASS_READ_32, parisc32_read_class);
+ audit_register_class(AUDIT_CLASS_DIR_WRITE_32, parisc32_dir_class);
+ audit_register_class(AUDIT_CLASS_CHATTR_32, parisc32_chattr_class);
+ audit_register_class(AUDIT_CLASS_SIGNAL_32, parisc32_signal_class);
+#endif
+ audit_register_class(AUDIT_CLASS_WRITE, write_class);
+ audit_register_class(AUDIT_CLASS_READ, read_class);
+ audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
+ audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
+ audit_register_class(AUDIT_CLASS_SIGNAL, signal_class);
+ return 0;
+}
+
+__initcall(audit_classes_init);
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index b521c0adf4ec..c035673209f7 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -602,7 +602,6 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
}
-EXPORT_SYMBOL_GPL(flush_cache_page);
#ifdef CONFIG_PARISC_TMPALIAS
diff --git a/arch/parisc/kernel/compat_audit.c b/arch/parisc/kernel/compat_audit.c
new file mode 100644
index 000000000000..c74478f6bc74
--- /dev/null
+++ b/arch/parisc/kernel/compat_audit.c
@@ -0,0 +1,40 @@
+#include <asm/unistd.h>
+
+unsigned int parisc32_dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+unsigned int parisc32_chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+unsigned int parisc32_write_class[] = {
+#include <asm-generic/audit_write.h>
+~0U
+};
+
+unsigned int parisc32_read_class[] = {
+#include <asm-generic/audit_read.h>
+~0U
+};
+
+unsigned int parisc32_signal_class[] = {
+#include <asm-generic/audit_signal.h>
+~0U
+};
+
+int parisc32_classify_syscall(unsigned syscall)
+{
+ switch (syscall) {
+ case __NR_open:
+ return 2;
+ case __NR_openat:
+ return 3;
+ case __NR_execve:
+ return 5;
+ default:
+ return 1;
+ }
+}
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index 37aabd772fbb..d2d58258aea6 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -195,6 +195,8 @@ common_stext:
ldw MEM_PDC_HI(%r0),%r6
depd %r6, 31, 32, %r3 /* move to upper word */
+ mfctl %cr30,%r6 /* PCX-W2 firmware bug */
+
ldo PDC_PSW(%r0),%arg0 /* 21 */
ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
@@ -203,6 +205,8 @@ common_stext:
copy %r0,%arg3
stext_pdc_ret:
+ mtctl %r6,%cr30 /* restore task thread info */
+
/* restore rfi target address*/
ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
tophys_r1 %r10
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 2e6443b1e922..ef5927685299 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -499,22 +499,9 @@ static void execute_on_irq_stack(void *func, unsigned long param1)
*irq_stack_in_use = 1;
}
-asmlinkage void do_softirq(void)
+void do_softirq_own_stack(void)
{
- __u32 pending;
- unsigned long flags;
-
- if (in_interrupt())
- return;
-
- local_irq_save(flags);
-
- pending = local_softirq_pending();
-
- if (pending)
- execute_on_irq_stack(__do_softirq, 0);
-
- local_irq_restore(flags);
+ execute_on_irq_stack(__do_softirq, 0);
}
#endif /* CONFIG_IRQSTACKS */
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 534abd4936e1..e842ee233db4 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -19,6 +19,7 @@
#include <linux/security.h>
#include <linux/compat.h>
#include <linux/signal.h>
+#include <linux/audit.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -267,11 +268,28 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
long do_syscall_trace_enter(struct pt_regs *regs)
{
+ long ret = 0;
+
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
- return -1L;
-
- return regs->gr[20];
+ ret = -1L;
+
+#ifdef CONFIG_64BIT
+ if (!is_compat_task())
+ audit_syscall_entry(AUDIT_ARCH_PARISC64,
+ regs->gr[20],
+ regs->gr[26], regs->gr[25],
+ regs->gr[24], regs->gr[23]);
+ else
+#endif
+ audit_syscall_entry(AUDIT_ARCH_PARISC,
+ regs->gr[20] & 0xffffffff,
+ regs->gr[26] & 0xffffffff,
+ regs->gr[25] & 0xffffffff,
+ regs->gr[24] & 0xffffffff,
+ regs->gr[23] & 0xffffffff);
+
+ return ret ? : regs->gr[20];
}
void do_syscall_trace_exit(struct pt_regs *regs)
@@ -279,6 +297,8 @@ void do_syscall_trace_exit(struct pt_regs *regs)
int stepping = test_thread_flag(TIF_SINGLESTEP) ||
test_thread_flag(TIF_BLOCKSTEP);
+ audit_syscall_exit(regs);
+
if (stepping || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, stepping);
}
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index e767ab733e32..a63bb179f79a 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -649,10 +649,8 @@ cas_action:
/* Two exception table entries, one for the load,
the other for the store. Either return -EFAULT.
Each of the entries must be relocated. */
- .section __ex_table,"aw"
- ASM_ULONG_INSN (1b - linux_gateway_page), (3b - linux_gateway_page)
- ASM_ULONG_INSN (2b - linux_gateway_page), (3b - linux_gateway_page)
- .previous
+ ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 3b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page)
/* Make sure nothing else is placed on this page */
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index 6f2d9355efe2..a512f07d4feb 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -88,9 +88,7 @@ ENDPROC(lclear_user)
ldo 1(%r25),%r25
.previous
- .section __ex_table,"aw"
- ASM_ULONG_INSN 1b,2b
- .previous
+ ASM_EXCEPTIONTABLE_ENTRY(1b,2b)
.procend
@@ -129,10 +127,8 @@ ENDPROC(lstrnlen_user)
copy %r24,%r26 /* reset r26 so 0 is returned on fault */
.previous
- .section __ex_table,"aw"
- ASM_ULONG_INSN 1b,3b
- ASM_ULONG_INSN 2b,3b
- .previous
+ ASM_EXCEPTIONTABLE_ENTRY(1b,3b)
+ ASM_EXCEPTIONTABLE_ENTRY(2b,3b)
.procend
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 0293588d5b8c..df0d32971cdf 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -142,6 +142,12 @@ int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fix;
+ /* If we only stored 32bit addresses in the exception table we can drop
+ * out if we faulted on a 64bit address. */
+ if ((sizeof(regs->iaoq[0]) > sizeof(fix->insn))
+ && (regs->iaoq[0] >> 32))
+ return 0;
+
fix = search_exception_tables(regs->iaoq[0]);
if (fix) {
struct exception_data *d;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 38f3b7e47ec5..5ad1c395ca08 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -85,6 +85,7 @@ config GENERIC_HWEIGHT
config PPC
bool
default y
+ select ARCH_MIGHT_HAVE_PC_PARPORT
select BINFMT_ELF
select OF
select OF_EARLY_FLATTREE
@@ -97,7 +98,7 @@ config PPC
select VIRT_TO_BUS if !PPC64
select HAVE_IDE
select HAVE_IOREMAP_PROT
- select HAVE_EFFICIENT_UNALIGNED_ACCESS
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_LITTLE_ENDIAN
select HAVE_KPROBES
select HAVE_ARCH_KGDB
select HAVE_KRETPROBES
@@ -138,6 +139,10 @@ config PPC
select OLD_SIGSUSPEND
select OLD_SIGACTION if PPC32
select HAVE_DEBUG_STACKOVERFLOW
+ select HAVE_IRQ_EXIT_ON_IRQ_STACK
+
+config GENERIC_CSUM
+ def_bool CPU_LITTLE_ENDIAN
config EARLY_PRINTK
bool
@@ -404,7 +409,7 @@ config CRASH_DUMP
config FA_DUMP
bool "Firmware-assisted dump"
- depends on PPC64 && PPC_RTAS && CRASH_DUMP
+ depends on PPC64 && PPC_RTAS && CRASH_DUMP && KEXEC
help
A robust mechanism to get reliable kernel crash dump with
assistance from firmware. This approach does not use kexec,
@@ -417,7 +422,7 @@ config FA_DUMP
config IRQ_ALL_CPUS
bool "Distribute interrupts on all CPUs by default"
- depends on SMP && !MV64360
+ depends on SMP
help
This option gives the kernel permission to distribute IRQs across
multiple CPUs. Saying N here will route all IRQs to the first
@@ -1009,6 +1014,9 @@ config PHYSICAL_START
default "0x00000000"
endif
+config ARCH_RANDOM
+ def_bool n
+
source "net/Kconfig"
source "drivers/Kconfig"
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 51cfb78d4061..607acf54a425 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -36,17 +36,26 @@ KBUILD_DEFCONFIG := ppc64_defconfig
endif
ifeq ($(CONFIG_PPC64),y)
-OLDARCH := ppc64
-
new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi)
ifeq ($(new_nm),y)
NM := $(NM) --synthetic
endif
+endif
+ifeq ($(CONFIG_PPC64),y)
+ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
+OLDARCH := ppc64le
+else
+OLDARCH := ppc64
+endif
+else
+ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
+OLDARCH := ppcle
else
OLDARCH := ppc
endif
+endif
# It seems there are times we use this Makefile without
# including the config file, but this replicates the old behaviour
@@ -56,11 +65,29 @@ endif
UTS_MACHINE := $(OLDARCH)
+ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
+override CC += -mlittle-endian -mno-strict-align
+override AS += -mlittle-endian
+override LD += -EL
+override CROSS32CC += -mlittle-endian
+override CROSS32AS += -mlittle-endian
+LDEMULATION := lppc
+GNUTARGET := powerpcle
+MULTIPLEWORD := -mno-multiple
+else
+override CC += -mbig-endian
+override AS += -mbig-endian
+override LD += -EB
+LDEMULATION := ppc
+GNUTARGET := powerpc
+MULTIPLEWORD := -mmultiple
+endif
+
ifeq ($(HAS_BIARCH),y)
override AS += -a$(CONFIG_WORD_SIZE)
-override LD += -m elf$(CONFIG_WORD_SIZE)ppc
+override LD += -m elf$(CONFIG_WORD_SIZE)$(LDEMULATION)
override CC += -m$(CONFIG_WORD_SIZE)
-override AR := GNUTARGET=elf$(CONFIG_WORD_SIZE)-powerpc $(AR)
+override AR := GNUTARGET=elf$(CONFIG_WORD_SIZE)-$(GNUTARGET) $(AR)
endif
LDFLAGS_vmlinux-y := -Bstatic
@@ -86,7 +113,7 @@ endif
CFLAGS-$(CONFIG_PPC64) := -mtraceback=no -mcall-aixdesc
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,-mminimal-toc)
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
-CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple
+CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD)
ifeq ($(CONFIG_PPC_BOOK3S_64),y)
CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,-mtune=power4)
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 15ca2255f438..ca7f08cc4afd 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -22,7 +22,8 @@ all: $(obj)/zImage
BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -Os -msoft-float -pipe \
-fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
- -isystem $(shell $(CROSS32CC) -print-file-name=include)
+ -isystem $(shell $(CROSS32CC) -print-file-name=include) \
+ -mbig-endian
BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
ifdef CONFIG_DEBUG_INFO
diff --git a/arch/powerpc/boot/dts/b4860emu.dts b/arch/powerpc/boot/dts/b4860emu.dts
new file mode 100644
index 000000000000..7290021f2dfc
--- /dev/null
+++ b/arch/powerpc/boot/dts/b4860emu.dts
@@ -0,0 +1,218 @@
+/*
+ * B4860 emulator Device Tree Source
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * This software is provided by Freescale Semiconductor "as is" and any
+ * express or implied warranties, including, but not limited to, the implied
+ * warranties of merchantability and fitness for a particular purpose are
+ * disclaimed. In no event shall Freescale Semiconductor be liable for any
+ * direct, indirect, incidental, special, exemplary, or consequential damages
+ * (including, but not limited to, procurement of substitute goods or services;
+ * loss of use, data, or profits; or business interruption) however caused and
+ * on any theory of liability, whether in contract, strict liability, or tort
+ * (including negligence or otherwise) arising in any way out of the use of
+ * this software, even if advised of the possibility of such damage.
+ */
+
+/dts-v1/;
+
+/include/ "fsl/e6500_power_isa.dtsi"
+
+/ {
+ compatible = "fsl,B4860";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ ccsr = &soc;
+
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ serial3 = &serial3;
+ dma0 = &dma0;
+ dma1 = &dma1;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: PowerPC,e6500@0 {
+ device_type = "cpu";
+ reg = <0 1>;
+ next-level-cache = <&L2>;
+ };
+ cpu1: PowerPC,e6500@2 {
+ device_type = "cpu";
+ reg = <2 3>;
+ next-level-cache = <&L2>;
+ };
+ cpu2: PowerPC,e6500@4 {
+ device_type = "cpu";
+ reg = <4 5>;
+ next-level-cache = <&L2>;
+ };
+ cpu3: PowerPC,e6500@6 {
+ device_type = "cpu";
+ reg = <6 7>;
+ next-level-cache = <&L2>;
+ };
+ };
+};
+
+/ {
+ model = "fsl,B4860QDS";
+ compatible = "fsl,B4860EMU", "fsl,B4860QDS";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ ifc: localbus@ffe124000 {
+ reg = <0xf 0xfe124000 0 0x2000>;
+ ranges = <0 0 0xf 0xe8000000 0x08000000
+ 2 0 0xf 0xff800000 0x00010000
+ 3 0 0xf 0xffdf0000 0x00008000>;
+
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x8000000>;
+ bank-width = <2>;
+ device-width = <1>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ };
+
+ soc: soc@ffe000000 {
+ ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+ reg = <0xf 0xfe000000 0 0x00001000>;
+ };
+};
+
+&ifc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,ifc", "simple-bus";
+ interrupts = <25 2 0 0>;
+};
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "simple-bus";
+
+ soc-sram-error {
+ compatible = "fsl,soc-sram-error";
+ interrupts = <16 2 1 2>;
+ };
+
+ corenet-law@0 {
+ compatible = "fsl,corenet-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <32>;
+ };
+
+ ddr1: memory-controller@8000 {
+ compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
+ reg = <0x8000 0x1000>;
+ interrupts = <16 2 1 8>;
+ };
+
+ ddr2: memory-controller@9000 {
+ compatible = "fsl,qoriq-memory-controller-v4.5","fsl,qoriq-memory-controller";
+ reg = <0x9000 0x1000>;
+ interrupts = <16 2 1 9>;
+ };
+
+ cpc: l3-cache-controller@10000 {
+ compatible = "fsl,b4-l3-cache-controller", "cache";
+ reg = <0x10000 0x1000
+ 0x11000 0x1000>;
+ interrupts = <16 2 1 4>;
+ };
+
+ corenet-cf@18000 {
+ compatible = "fsl,b4-corenet-cf";
+ reg = <0x18000 0x1000>;
+ interrupts = <16 2 1 0>;
+ fsl,ccf-num-csdids = <32>;
+ fsl,ccf-num-snoopids = <32>;
+ };
+
+ iommu@20000 {
+ compatible = "fsl,pamu-v1.0", "fsl,pamu";
+ reg = <0x20000 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupts = <
+ 24 2 0 0
+ 16 2 1 1>;
+ pamu0: pamu@0 {
+ reg = <0 0x1000>;
+ fsl,primary-cache-geometry = <8 1>;
+ fsl,secondary-cache-geometry = <32 2>;
+ };
+ };
+
+/include/ "fsl/qoriq-mpic.dtsi"
+
+ guts: global-utilities@e0000 {
+ compatible = "fsl,b4-device-config";
+ reg = <0xe0000 0xe00>;
+ fsl,has-rstcr;
+ fsl,liodn-bits = <12>;
+ };
+
+ clockgen: global-utilities@e1000 {
+ compatible = "fsl,b4-clockgen", "fsl,qoriq-clockgen-2.0";
+ reg = <0xe1000 0x1000>;
+ };
+
+/include/ "fsl/qoriq-dma-0.dtsi"
+ dma@100300 {
+ fsl,iommu-parent = <&pamu0>;
+ fsl,liodn-reg = <&guts 0x580>; /* DMA1LIODNR */
+ };
+
+/include/ "fsl/qoriq-dma-1.dtsi"
+ dma@101300 {
+ fsl,iommu-parent = <&pamu0>;
+ fsl,liodn-reg = <&guts 0x584>; /* DMA2LIODNR */
+ };
+
+/include/ "fsl/qoriq-i2c-0.dtsi"
+/include/ "fsl/qoriq-i2c-1.dtsi"
+/include/ "fsl/qoriq-duart-0.dtsi"
+/include/ "fsl/qoriq-duart-1.dtsi"
+
+ L2: l2-cache-controller@c20000 {
+ compatible = "fsl,b4-l2-cache-controller";
+ reg = <0xc20000 0x1000>;
+ next-level-cache = <&cpc>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/b4qds.dtsi b/arch/powerpc/boot/dts/b4qds.dtsi
index e6d2f8f90544..8b47edcfabf0 100644
--- a/arch/powerpc/boot/dts/b4qds.dtsi
+++ b/arch/powerpc/boot/dts/b4qds.dtsi
@@ -120,25 +120,38 @@
};
i2c@118000 {
- eeprom@50 {
- compatible = "at24,24c64";
- reg = <0x50>;
- };
- eeprom@51 {
- compatible = "at24,24c256";
- reg = <0x51>;
- };
- eeprom@53 {
- compatible = "at24,24c256";
- reg = <0x53>;
- };
- eeprom@57 {
- compatible = "at24,24c256";
- reg = <0x57>;
- };
- rtc@68 {
- compatible = "dallas,ds3232";
- reg = <0x68>;
+ mux@77 {
+ compatible = "nxp,pca9547";
+ reg = <0x77>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ eeprom@50 {
+ compatible = "at24,24c64";
+ reg = <0x50>;
+ };
+ eeprom@51 {
+ compatible = "at24,24c256";
+ reg = <0x51>;
+ };
+ eeprom@53 {
+ compatible = "at24,24c256";
+ reg = <0x53>;
+ };
+ eeprom@57 {
+ compatible = "at24,24c256";
+ reg = <0x57>;
+ };
+ rtc@68 {
+ compatible = "dallas,ds3232";
+ reg = <0x68>;
+ };
+ };
};
};
diff --git a/arch/powerpc/boot/dts/c293pcie.dts b/arch/powerpc/boot/dts/c293pcie.dts
index 1238bda8901f..6681cc21030b 100644
--- a/arch/powerpc/boot/dts/c293pcie.dts
+++ b/arch/powerpc/boot/dts/c293pcie.dts
@@ -45,6 +45,7 @@
ifc: ifc@fffe1e000 {
reg = <0xf 0xffe1e000 0 0x2000>;
ranges = <0x0 0x0 0xf 0xec000000 0x04000000
+ 0x1 0x0 0xf 0xff800000 0x00010000
0x2 0x0 0xf 0xffdf0000 0x00010000>;
};
diff --git a/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi b/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi
index 7b4426e0a5a5..c6e451affb05 100644
--- a/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi
@@ -34,6 +34,8 @@
/dts-v1/;
+/include/ "e6500_power_isa.dtsi"
+
/ {
compatible = "fsl,B4420";
#address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
index e5cf6c81dd66..981397518fc6 100644
--- a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
@@ -41,7 +41,7 @@
&rio {
compatible = "fsl,srio";
- interrupts = <16 2 1 11>;
+ interrupts = <16 2 1 20>;
#address-cells = <2>;
#size-cells = <2>;
fsl,iommu-parent = <&pamu0>;
diff --git a/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi
index 5263fa46a3fb..9bc26b147900 100644
--- a/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi
@@ -34,6 +34,8 @@
/dts-v1/;
+/include/ "e6500_power_isa.dtsi"
+
/ {
compatible = "fsl,B4860";
#address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi b/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi
index 5180d9d37989..0c0efa94cfb4 100644
--- a/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi
@@ -130,7 +130,7 @@ usb@22000 {
/include/ "pq3-esdhc-0.dtsi"
sdhc@2e000 {
- fsl,sdhci-auto-cmd12;
+ sdhci,auto-cmd12;
interrupts = <41 0x2 0 0>;
};
diff --git a/arch/powerpc/boot/dts/fsl/bsc9131si-pre.dtsi b/arch/powerpc/boot/dts/fsl/bsc9131si-pre.dtsi
index 743e4aeda349..f6ec4a67560c 100644
--- a/arch/powerpc/boot/dts/fsl/bsc9131si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/bsc9131si-pre.dtsi
@@ -33,6 +33,9 @@
*/
/dts-v1/;
+
+/include/ "e500v2_power_isa.dtsi"
+
/ {
compatible = "fsl,BSC9131";
#address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/t4240emu.dts b/arch/powerpc/boot/dts/t4240emu.dts
new file mode 100644
index 000000000000..ee24ab335598
--- /dev/null
+++ b/arch/powerpc/boot/dts/t4240emu.dts
@@ -0,0 +1,268 @@
+/*
+ * T4240 emulator Device Tree Source
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+/include/ "fsl/e6500_power_isa.dtsi"
+/ {
+ compatible = "fsl,T4240";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ ccsr = &soc;
+
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ serial3 = &serial3;
+ dma0 = &dma0;
+ dma1 = &dma1;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: PowerPC,e6500@0 {
+ device_type = "cpu";
+ reg = <0 1>;
+ next-level-cache = <&L2_1>;
+ };
+ cpu1: PowerPC,e6500@2 {
+ device_type = "cpu";
+ reg = <2 3>;
+ next-level-cache = <&L2_1>;
+ };
+ cpu2: PowerPC,e6500@4 {
+ device_type = "cpu";
+ reg = <4 5>;
+ next-level-cache = <&L2_1>;
+ };
+ cpu3: PowerPC,e6500@6 {
+ device_type = "cpu";
+ reg = <6 7>;
+ next-level-cache = <&L2_1>;
+ };
+
+ cpu4: PowerPC,e6500@8 {
+ device_type = "cpu";
+ reg = <8 9>;
+ next-level-cache = <&L2_2>;
+ };
+ cpu5: PowerPC,e6500@10 {
+ device_type = "cpu";
+ reg = <10 11>;
+ next-level-cache = <&L2_2>;
+ };
+ cpu6: PowerPC,e6500@12 {
+ device_type = "cpu";
+ reg = <12 13>;
+ next-level-cache = <&L2_2>;
+ };
+ cpu7: PowerPC,e6500@14 {
+ device_type = "cpu";
+ reg = <14 15>;
+ next-level-cache = <&L2_2>;
+ };
+
+ cpu8: PowerPC,e6500@16 {
+ device_type = "cpu";
+ reg = <16 17>;
+ next-level-cache = <&L2_3>;
+ };
+ cpu9: PowerPC,e6500@18 {
+ device_type = "cpu";
+ reg = <18 19>;
+ next-level-cache = <&L2_3>;
+ };
+ cpu10: PowerPC,e6500@20 {
+ device_type = "cpu";
+ reg = <20 21>;
+ next-level-cache = <&L2_3>;
+ };
+ cpu11: PowerPC,e6500@22 {
+ device_type = "cpu";
+ reg = <22 23>;
+ next-level-cache = <&L2_3>;
+ };
+ };
+};
+
+/ {
+ model = "fsl,T4240QDS";
+ compatible = "fsl,T4240EMU", "fsl,T4240QDS";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ ifc: localbus@ffe124000 {
+ reg = <0xf 0xfe124000 0 0x2000>;
+ ranges = <0 0 0xf 0xe8000000 0x08000000
+ 2 0 0xf 0xff800000 0x00010000
+ 3 0 0xf 0xffdf0000 0x00008000>;
+
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x8000000>;
+
+ bank-width = <2>;
+ device-width = <1>;
+ };
+
+ };
+
+ memory {
+ device_type = "memory";
+ };
+
+ soc: soc@ffe000000 {
+ ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+ reg = <0xf 0xfe000000 0 0x00001000>;
+
+ };
+};
+
+&ifc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,ifc", "simple-bus";
+ interrupts = <25 2 0 0>;
+};
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "simple-bus";
+
+ soc-sram-error {
+ compatible = "fsl,soc-sram-error";
+ interrupts = <16 2 1 29>;
+ };
+
+ corenet-law@0 {
+ compatible = "fsl,corenet-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <32>;
+ };
+
+ ddr1: memory-controller@8000 {
+ compatible = "fsl,qoriq-memory-controller-v4.7",
+ "fsl,qoriq-memory-controller";
+ reg = <0x8000 0x1000>;
+ interrupts = <16 2 1 23>;
+ };
+
+ ddr2: memory-controller@9000 {
+ compatible = "fsl,qoriq-memory-controller-v4.7",
+ "fsl,qoriq-memory-controller";
+ reg = <0x9000 0x1000>;
+ interrupts = <16 2 1 22>;
+ };
+
+ ddr3: memory-controller@a000 {
+ compatible = "fsl,qoriq-memory-controller-v4.7",
+ "fsl,qoriq-memory-controller";
+ reg = <0xa000 0x1000>;
+ interrupts = <16 2 1 21>;
+ };
+
+ cpc: l3-cache-controller@10000 {
+ compatible = "fsl,t4240-l3-cache-controller", "cache";
+ reg = <0x10000 0x1000
+ 0x11000 0x1000
+ 0x12000 0x1000>;
+ interrupts = <16 2 1 27
+ 16 2 1 26
+ 16 2 1 25>;
+ };
+
+ corenet-cf@18000 {
+ compatible = "fsl,corenet-cf";
+ reg = <0x18000 0x1000>;
+ interrupts = <16 2 1 31>;
+ fsl,ccf-num-csdids = <32>;
+ fsl,ccf-num-snoopids = <32>;
+ };
+
+ iommu@20000 {
+ compatible = "fsl,pamu-v1.0", "fsl,pamu";
+ reg = <0x20000 0x6000>;
+ interrupts = <
+ 24 2 0 0
+ 16 2 1 30>;
+ };
+
+/include/ "fsl/qoriq-mpic.dtsi"
+
+ guts: global-utilities@e0000 {
+ compatible = "fsl,t4240-device-config", "fsl,qoriq-device-config-2.0";
+ reg = <0xe0000 0xe00>;
+ fsl,has-rstcr;
+ fsl,liodn-bits = <12>;
+ };
+
+ clockgen: global-utilities@e1000 {
+ compatible = "fsl,t4240-clockgen", "fsl,qoriq-clockgen-2.0";
+ reg = <0xe1000 0x1000>;
+ };
+
+/include/ "fsl/qoriq-dma-0.dtsi"
+/include/ "fsl/qoriq-dma-1.dtsi"
+
+/include/ "fsl/qoriq-i2c-0.dtsi"
+/include/ "fsl/qoriq-i2c-1.dtsi"
+/include/ "fsl/qoriq-duart-0.dtsi"
+/include/ "fsl/qoriq-duart-1.dtsi"
+
+ L2_1: l2-cache-controller@c20000 {
+ compatible = "fsl,t4240-l2-cache-controller";
+ reg = <0xc20000 0x40000>;
+ next-level-cache = <&cpc>;
+ };
+ L2_2: l2-cache-controller@c60000 {
+ compatible = "fsl,t4240-l2-cache-controller";
+ reg = <0xc60000 0x40000>;
+ next-level-cache = <&cpc>;
+ };
+ L2_3: l2-cache-controller@ca0000 {
+ compatible = "fsl,t4240-l2-cache-controller";
+ reg = <0xca0000 0x40000>;
+ next-level-cache = <&cpc>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/t4240qds.dts b/arch/powerpc/boot/dts/t4240qds.dts
index 0555976dd0f3..63e81b010804 100644
--- a/arch/powerpc/boot/dts/t4240qds.dts
+++ b/arch/powerpc/boot/dts/t4240qds.dts
@@ -118,36 +118,53 @@
};
i2c@118000 {
- eeprom@51 {
- compatible = "at24,24c256";
- reg = <0x51>;
- };
- eeprom@52 {
- compatible = "at24,24c256";
- reg = <0x52>;
- };
- eeprom@53 {
- compatible = "at24,24c256";
- reg = <0x53>;
- };
- eeprom@54 {
- compatible = "at24,24c256";
- reg = <0x54>;
- };
- eeprom@55 {
- compatible = "at24,24c256";
- reg = <0x55>;
- };
- eeprom@56 {
- compatible = "at24,24c256";
- reg = <0x56>;
- };
- rtc@68 {
- compatible = "dallas,ds3232";
- reg = <0x68>;
- interrupts = <0x1 0x1 0 0>;
+ mux@77 {
+ compatible = "nxp,pca9547";
+ reg = <0x77>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ eeprom@51 {
+ compatible = "at24,24c256";
+ reg = <0x51>;
+ };
+ eeprom@52 {
+ compatible = "at24,24c256";
+ reg = <0x52>;
+ };
+ eeprom@53 {
+ compatible = "at24,24c256";
+ reg = <0x53>;
+ };
+ eeprom@54 {
+ compatible = "at24,24c256";
+ reg = <0x54>;
+ };
+ eeprom@55 {
+ compatible = "at24,24c256";
+ reg = <0x55>;
+ };
+ eeprom@56 {
+ compatible = "at24,24c256";
+ reg = <0x56>;
+ };
+ rtc@68 {
+ compatible = "dallas,ds3232";
+ reg = <0x68>;
+ interrupts = <0x1 0x1 0 0>;
+ };
+ };
};
};
+
+ sdhc@114000 {
+ voltage-ranges = <1800 1800 3300 3300>;
+ };
};
pci0: pcie@ffe240000 {
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index cd7af841ba05..ac16e9984ecb 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -150,18 +150,22 @@ case "$platform" in
pseries)
platformo="$object/of.o $object/epapr.o"
link_address='0x4000000'
+ make_space=n
;;
maple)
platformo="$object/of.o $object/epapr.o"
link_address='0x400000'
+ make_space=n
;;
pmac|chrp)
platformo="$object/of.o $object/epapr.o"
+ make_space=n
;;
coff)
platformo="$object/crt0.o $object/of.o $object/epapr.o"
lds=$object/zImage.coff.lds
link_address='0x500000'
+ make_space=n
pie=
;;
miboot|uboot*)
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index 3dfab4c40c76..bbd794deb6eb 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -23,11 +23,7 @@ CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_MAC_PARTITION=y
-CONFIG_P2041_RDB=y
-CONFIG_P3041_DS=y
-CONFIG_P4080_DS=y
-CONFIG_P5020_DS=y
-CONFIG_P5040_DS=y
+CONFIG_CORENET_GENERIC=y
CONFIG_HIGHMEM=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=m
@@ -104,6 +100,7 @@ CONFIG_FSL_PQ_MDIO=y
CONFIG_E1000=y
CONFIG_E1000E=y
CONFIG_VITESSE_PHY=y
+CONFIG_AT803X_PHY=y
CONFIG_FIXED_PHY=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index fa94fb3bb44d..63508ddee11c 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -21,10 +21,7 @@ CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_MAC_PARTITION=y
-CONFIG_B4_QDS=y
-CONFIG_P5020_DS=y
-CONFIG_P5040_DS=y
-CONFIG_T4240_QDS=y
+CONFIG_CORENET_GENERIC=y
# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index dc098d988211..d2e0fab5ee5b 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -138,6 +138,7 @@ CONFIG_MARVELL_PHY=y
CONFIG_DAVICOM_PHY=y
CONFIG_CICADA_PHY=y
CONFIG_VITESSE_PHY=y
+CONFIG_AT803X_PHY=y
CONFIG_FIXED_PHY=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index 5bca60161bb3..4cb7b59e98bd 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -138,6 +138,7 @@ CONFIG_MARVELL_PHY=y
CONFIG_DAVICOM_PHY=y
CONFIG_CICADA_PHY=y
CONFIG_VITESSE_PHY=y
+CONFIG_AT803X_PHY=y
CONFIG_FIXED_PHY=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 0e8cfd09da2f..581a3bcae728 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -2,7 +2,6 @@ CONFIG_PPC64=y
CONFIG_ALTIVEC=y
CONFIG_VSX=y
CONFIG_SMP=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_IRQ_DOMAIN_DEBUG=y
@@ -25,7 +24,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_PARTITION_ADVANCED=y
-CONFIG_EFI_PARTITION=y
CONFIG_PPC_SPLPAR=y
CONFIG_SCANLOG=m
CONFIG_PPC_SMLPAR=y
@@ -50,12 +48,10 @@ CONFIG_CPU_FREQ_PMAC64=y
CONFIG_HZ_100=y
CONFIG_BINFMT_MISC=m
CONFIG_PPC_TRANSACTIONAL_MEM=y
-CONFIG_HOTPLUG_CPU=y
CONFIG_KEXEC=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_SCHED_SMT=y
-CONFIG_PPC_DENORMALISATION=y
CONFIG_PCCARD=y
CONFIG_ELECTRA_CF=y
CONFIG_HOTPLUG_PCI=y
@@ -89,7 +85,6 @@ CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
-CONFIG_NETFILTER_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
@@ -131,7 +126,6 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_U32=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -157,6 +151,7 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_VIRTIO_BLK=m
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_GENERIC=y
@@ -185,6 +180,10 @@ CONFIG_SCSI_IPR=y
CONFIG_SCSI_QLA_FC=m
CONFIG_SCSI_QLA_ISCSI=m
CONFIG_SCSI_LPFC=m
+CONFIG_SCSI_VIRTIO=m
+CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_ALUA=m
CONFIG_ATA=y
CONFIG_SATA_SIL24=y
CONFIG_SATA_SVW=y
@@ -203,6 +202,9 @@ CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_UEVENT=y
CONFIG_ADB_PMU=y
CONFIG_PMAC_SMU=y
CONFIG_THERM_PM72=y
@@ -216,6 +218,8 @@ CONFIG_DUMMY=m
CONFIG_NETCONSOLE=y
CONFIG_NETPOLL_TRAP=y
CONFIG_TUN=m
+CONFIG_VIRTIO_NET=m
+CONFIG_VHOST_NET=m
CONFIG_VORTEX=y
CONFIG_ACENIC=m
CONFIG_ACENIC_OMIT_TIGON_I=y
@@ -262,6 +266,7 @@ CONFIG_HVC_CONSOLE=y
CONFIG_HVC_RTAS=y
CONFIG_HVC_BEAT=y
CONFIG_HVCS=m
+CONFIG_VIRTIO_CONSOLE=m
CONFIG_IBM_BSR=m
CONFIG_RAW_DRIVER=y
CONFIG_I2C_CHARDEV=y
@@ -301,7 +306,6 @@ CONFIG_HID_GYRATION=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB_HIDDEV=y
CONFIG_USB=y
@@ -328,6 +332,8 @@ CONFIG_EDAC_MM_EDAC=y
CONFIG_EDAC_PASEMI=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
+CONFIG_VIRTIO_PCI=m
+CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
@@ -386,21 +392,19 @@ CONFIG_NLS_UTF8=y
CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_LOCKUP_DETECTOR=y
CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_STACK_USAGE=y
CONFIG_LATENCYTOP=y
CONFIG_SCHED_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
CONFIG_MSI_BITMAP_SELFTEST=y
CONFIG_XMON=y
CONFIG_BOOTX_TEXT=y
CONFIG_PPC_EARLY_DEBUG=y
-CONFIG_PPC_EARLY_DEBUG_BOOTX=y
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
@@ -422,4 +426,3 @@ CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=y
-CONFIG_VHOST_NET=m
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index 0085dc4642c5..f627fda08953 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -1,7 +1,6 @@
CONFIG_PPC64=y
CONFIG_PPC_BOOK3E_64=y
CONFIG_SMP=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_NO_HZ=y
@@ -23,7 +22,7 @@ CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_MAC_PARTITION=y
CONFIG_EFI_PARTITION=y
-CONFIG_P5020_DS=y
+CONFIG_CORENET_GENERIC=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
@@ -61,7 +60,6 @@ CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
-CONFIG_NETFILTER_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
@@ -103,7 +101,6 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_U32=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -193,7 +190,6 @@ CONFIG_PPP_SYNC_TTY=m
CONFIG_INPUT_EVDEV=m
CONFIG_INPUT_MISC=y
# CONFIG_SERIO_SERPORT is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
@@ -230,7 +226,6 @@ CONFIG_HID_NTRIG=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_HID_GREENASIA=y
CONFIG_HID_SMARTJOYPLUS=y
@@ -302,19 +297,18 @@ CONFIG_NLS_UTF8=y
CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_STACK_USAGE=y
CONFIG_LATENCYTOP=y
CONFIG_IRQSOFF_TRACER=y
CONFIG_SCHED_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
CONFIG_MSI_BITMAP_SELFTEST=y
CONFIG_XMON=y
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 20ebfaf7234b..c2353bf059fd 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -71,7 +71,7 @@ CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
CONFIG_PPC_BESTCOMM=y
CONFIG_GPIO_MPC8XXX=y
-CONFIG_MCU_MPC8349EMITX=m
+CONFIG_MCU_MPC8349EMITX=y
CONFIG_HIGHMEM=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 1d4b9763895d..e9a8b4e0a0f6 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -3,7 +3,6 @@ CONFIG_ALTIVEC=y
CONFIG_VSX=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2048
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
@@ -33,7 +32,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_PARTITION_ADVANCED=y
-CONFIG_EFI_PARTITION=y
CONFIG_PPC_SPLPAR=y
CONFIG_SCANLOG=m
CONFIG_PPC_SMLPAR=y
@@ -44,7 +42,6 @@ CONFIG_IBMEBUS=y
CONFIG_HZ_100=y
CONFIG_BINFMT_MISC=m
CONFIG_PPC_TRANSACTIONAL_MEM=y
-CONFIG_HOTPLUG_CPU=y
CONFIG_KEXEC=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_MEMORY_HOTPLUG=y
@@ -52,7 +49,6 @@ CONFIG_MEMORY_HOTREMOVE=y
CONFIG_PPC_64K_PAGES=y
CONFIG_PPC_SUBPAGE_PROT=y
CONFIG_SCHED_SMT=y
-CONFIG_PPC_DENORMALISATION=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_RPA=m
CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
@@ -113,7 +109,6 @@ CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -132,6 +127,7 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_VIRTIO_BLK=m
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_GENERIC=y
@@ -157,6 +153,10 @@ CONFIG_SCSI_IPR=y
CONFIG_SCSI_QLA_FC=m
CONFIG_SCSI_QLA_ISCSI=m
CONFIG_SCSI_LPFC=m
+CONFIG_SCSI_VIRTIO=m
+CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_ALUA=m
CONFIG_ATA=y
# CONFIG_ATA_SFF is not set
CONFIG_MD=y
@@ -174,11 +174,16 @@ CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_UEVENT=y
CONFIG_BONDING=m
CONFIG_DUMMY=m
CONFIG_NETCONSOLE=y
CONFIG_NETPOLL_TRAP=y
CONFIG_TUN=m
+CONFIG_VIRTIO_NET=m
+CONFIG_VHOST_NET=m
CONFIG_VORTEX=y
CONFIG_ACENIC=m
CONFIG_ACENIC_OMIT_TIGON_I=y
@@ -216,6 +221,7 @@ CONFIG_SERIAL_JSM=m
CONFIG_HVC_CONSOLE=y
CONFIG_HVC_RTAS=y
CONFIG_HVCS=m
+CONFIG_VIRTIO_CONSOLE=m
CONFIG_IBM_BSR=m
CONFIG_GEN_RTC=y
CONFIG_RAW_DRIVER=y
@@ -237,7 +243,6 @@ CONFIG_HID_GYRATION=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB_HIDDEV=y
CONFIG_USB=y
@@ -258,6 +263,8 @@ CONFIG_INFINIBAND_IPOIB=m
CONFIG_INFINIBAND_IPOIB_CM=y
CONFIG_INFINIBAND_SRP=m
CONFIG_INFINIBAND_ISER=m
+CONFIG_VIRTIO_PCI=m
+CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
@@ -314,18 +321,17 @@ CONFIG_NLS_UTF8=y
CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
-CONFIG_LOCKUP_DETECTOR=y
CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_LOCKUP_DETECTOR=y
CONFIG_LATENCYTOP=y
CONFIG_SCHED_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
CONFIG_MSI_BITMAP_SELFTEST=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
@@ -347,4 +353,3 @@ CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=y
-CONFIG_VHOST_NET=m
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 704e6f10ae80..d8f9d2f18a23 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -2,4 +2,5 @@
generic-y += clkdev.h
generic-y += rwsem.h
generic-y += trace_clock.h
+generic-y += preempt.h
generic-y += vtime.h \ No newline at end of file
diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h
new file mode 100644
index 000000000000..d853d163ba47
--- /dev/null
+++ b/arch/powerpc/include/asm/archrandom.h
@@ -0,0 +1,32 @@
+#ifndef _ASM_POWERPC_ARCHRANDOM_H
+#define _ASM_POWERPC_ARCHRANDOM_H
+
+#ifdef CONFIG_ARCH_RANDOM
+
+#include <asm/machdep.h>
+
+static inline int arch_get_random_long(unsigned long *v)
+{
+ if (ppc_md.get_random_long)
+ return ppc_md.get_random_long(v);
+
+ return 0;
+}
+
+static inline int arch_get_random_int(unsigned int *v)
+{
+ unsigned long val;
+ int rc;
+
+ rc = arch_get_random_long(&val);
+ if (rc)
+ *v = val;
+
+ return rc;
+}
+
+int powernv_get_random_long(unsigned long *v);
+
+#endif /* CONFIG_ARCH_RANDOM */
+
+#endif /* _ASM_POWERPC_ARCHRANDOM_H */
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index ce0c28495f9a..8251a3ba870f 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -14,6 +14,9 @@
* which always checksum on 4 octet boundaries. ihl is the number
* of 32-bit words and is always >= 5.
*/
+#ifdef CONFIG_GENERIC_CSUM
+#include <asm-generic/checksum.h>
+#else
extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
/*
@@ -123,5 +126,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
return sum;
#endif
}
+
+#endif
#endif /* __KERNEL__ */
#endif
diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h
index 9b198d1b3b2b..856f8deb557a 100644
--- a/arch/powerpc/include/asm/disassemble.h
+++ b/arch/powerpc/include/asm/disassemble.h
@@ -77,4 +77,8 @@ static inline unsigned int get_d(u32 inst)
return inst & 0xffff;
}
+static inline unsigned int get_oc(u32 inst)
+{
+ return (inst >> 11) & 0x7fff;
+}
#endif /* __ASM_PPC_DISASSEMBLE_H__ */
diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h
index 5a8b82aa7241..4358e3002f35 100644
--- a/arch/powerpc/include/asm/emulated_ops.h
+++ b/arch/powerpc/include/asm/emulated_ops.h
@@ -43,6 +43,7 @@ extern struct ppc_emulated {
struct ppc_emulated_entry popcntb;
struct ppc_emulated_entry spe;
struct ppc_emulated_entry string;
+ struct ppc_emulated_entry sync;
struct ppc_emulated_entry unaligned;
#ifdef CONFIG_MATH_EMULATION
struct ppc_emulated_entry math;
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index cca12f084842..894662a5d4d5 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -198,12 +198,27 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
cmpwi r10,0; \
bne do_kvm_##n
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+/*
+ * If hv is possible, interrupts come into to the hv version
+ * of the kvmppc_interrupt code, which then jumps to the PR handler,
+ * kvmppc_interrupt_pr, if the guest is a PR guest.
+ */
+#define kvmppc_interrupt kvmppc_interrupt_hv
+#else
+#define kvmppc_interrupt kvmppc_interrupt_pr
+#endif
+
#define __KVM_HANDLER(area, h, n) \
do_kvm_##n: \
BEGIN_FTR_SECTION_NESTED(947) \
ld r10,area+EX_CFAR(r13); \
std r10,HSTATE_CFAR(r13); \
END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \
+ BEGIN_FTR_SECTION_NESTED(948) \
+ ld r10,area+EX_PPR(r13); \
+ std r10,HSTATE_PPR(r13); \
+ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \
ld r10,area+EX_R10(r13); \
stw r9,HSTATE_SCRATCH1(r13); \
ld r9,area+EX_R9(r13); \
@@ -217,6 +232,10 @@ do_kvm_##n: \
ld r10,area+EX_R10(r13); \
beq 89f; \
stw r9,HSTATE_SCRATCH1(r13); \
+ BEGIN_FTR_SECTION_NESTED(948) \
+ ld r9,area+EX_PPR(r13); \
+ std r9,HSTATE_PPR(r13); \
+ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \
ld r9,area+EX_R9(r13); \
std r12,HSTATE_SCRATCH0(r13); \
li r12,n; \
@@ -236,7 +255,7 @@ do_kvm_##n: \
#define KVM_HANDLER_SKIP(area, h, n)
#endif
-#ifdef CONFIG_KVM_BOOK3S_PR
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
#define KVMTEST_PR(n) __KVMTEST(n)
#define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n)
#define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n)
diff --git a/arch/powerpc/include/asm/fsl_ifc.h b/arch/powerpc/include/asm/fsl_ifc.h
index b8a4b9bc50b3..f49ddb1b2273 100644
--- a/arch/powerpc/include/asm/fsl_ifc.h
+++ b/arch/powerpc/include/asm/fsl_ifc.h
@@ -93,6 +93,7 @@
#define CSOR_NAND_PGS_512 0x00000000
#define CSOR_NAND_PGS_2K 0x00080000
#define CSOR_NAND_PGS_4K 0x00100000
+#define CSOR_NAND_PGS_8K 0x00180000
/* Spare region Size */
#define CSOR_NAND_SPRZ_MASK 0x0000E000
#define CSOR_NAND_SPRZ_SHIFT 13
@@ -102,6 +103,7 @@
#define CSOR_NAND_SPRZ_210 0x00006000
#define CSOR_NAND_SPRZ_218 0x00008000
#define CSOR_NAND_SPRZ_224 0x0000A000
+#define CSOR_NAND_SPRZ_CSOR_EXT 0x0000C000
/* Pages Per Block */
#define CSOR_NAND_PB_MASK 0x00000700
#define CSOR_NAND_PB_SHIFT 8
diff --git a/arch/powerpc/include/asm/hvsi.h b/arch/powerpc/include/asm/hvsi.h
index d3f64f361814..d4a5315718ca 100644
--- a/arch/powerpc/include/asm/hvsi.h
+++ b/arch/powerpc/include/asm/hvsi.h
@@ -25,7 +25,7 @@
struct hvsi_header {
uint8_t type;
uint8_t len;
- uint16_t seqno;
+ __be16 seqno;
} __attribute__((packed));
struct hvsi_data {
@@ -35,24 +35,24 @@ struct hvsi_data {
struct hvsi_control {
struct hvsi_header hdr;
- uint16_t verb;
+ __be16 verb;
/* optional depending on verb: */
- uint32_t word;
- uint32_t mask;
+ __be32 word;
+ __be32 mask;
} __attribute__((packed));
struct hvsi_query {
struct hvsi_header hdr;
- uint16_t verb;
+ __be16 verb;
} __attribute__((packed));
struct hvsi_query_response {
struct hvsi_header hdr;
- uint16_t verb;
- uint16_t query_seqno;
+ __be16 verb;
+ __be16 query_seqno;
union {
uint8_t version;
- uint32_t mctrl_word;
+ __be32 mctrl_word;
} u;
} __attribute__((packed));
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 5a64757dc0d1..575fbf81fad0 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -21,7 +21,7 @@ extern struct pci_dev *isa_bridge_pcidev;
/*
* has legacy ISA devices ?
*/
-#define arch_has_dev_port() (isa_bridge_pcidev != NULL)
+#define arch_has_dev_port() (isa_bridge_pcidev != NULL || isa_io_special)
#endif
#include <linux/device.h>
@@ -113,7 +113,7 @@ extern bool isa_io_special;
/* gcc 4.0 and older doesn't have 'Z' constraint */
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 0)
-#define DEF_MMIO_IN_LE(name, size, insn) \
+#define DEF_MMIO_IN_X(name, size, insn) \
static inline u##size name(const volatile u##size __iomem *addr) \
{ \
u##size ret; \
@@ -122,7 +122,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \
return ret; \
}
-#define DEF_MMIO_OUT_LE(name, size, insn) \
+#define DEF_MMIO_OUT_X(name, size, insn) \
static inline void name(volatile u##size __iomem *addr, u##size val) \
{ \
__asm__ __volatile__("sync;"#insn" %1,0,%2" \
@@ -130,7 +130,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \
IO_SET_SYNC_FLAG(); \
}
#else /* newer gcc */
-#define DEF_MMIO_IN_LE(name, size, insn) \
+#define DEF_MMIO_IN_X(name, size, insn) \
static inline u##size name(const volatile u##size __iomem *addr) \
{ \
u##size ret; \
@@ -139,7 +139,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \
return ret; \
}
-#define DEF_MMIO_OUT_LE(name, size, insn) \
+#define DEF_MMIO_OUT_X(name, size, insn) \
static inline void name(volatile u##size __iomem *addr, u##size val) \
{ \
__asm__ __volatile__("sync;"#insn" %1,%y0" \
@@ -148,7 +148,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \
}
#endif
-#define DEF_MMIO_IN_BE(name, size, insn) \
+#define DEF_MMIO_IN_D(name, size, insn) \
static inline u##size name(const volatile u##size __iomem *addr) \
{ \
u##size ret; \
@@ -157,7 +157,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \
return ret; \
}
-#define DEF_MMIO_OUT_BE(name, size, insn) \
+#define DEF_MMIO_OUT_D(name, size, insn) \
static inline void name(volatile u##size __iomem *addr, u##size val) \
{ \
__asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \
@@ -165,22 +165,37 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \
IO_SET_SYNC_FLAG(); \
}
+DEF_MMIO_IN_D(in_8, 8, lbz);
+DEF_MMIO_OUT_D(out_8, 8, stb);
-DEF_MMIO_IN_BE(in_8, 8, lbz);
-DEF_MMIO_IN_BE(in_be16, 16, lhz);
-DEF_MMIO_IN_BE(in_be32, 32, lwz);
-DEF_MMIO_IN_LE(in_le16, 16, lhbrx);
-DEF_MMIO_IN_LE(in_le32, 32, lwbrx);
+#ifdef __BIG_ENDIAN__
+DEF_MMIO_IN_D(in_be16, 16, lhz);
+DEF_MMIO_IN_D(in_be32, 32, lwz);
+DEF_MMIO_IN_X(in_le16, 16, lhbrx);
+DEF_MMIO_IN_X(in_le32, 32, lwbrx);
-DEF_MMIO_OUT_BE(out_8, 8, stb);
-DEF_MMIO_OUT_BE(out_be16, 16, sth);
-DEF_MMIO_OUT_BE(out_be32, 32, stw);
-DEF_MMIO_OUT_LE(out_le16, 16, sthbrx);
-DEF_MMIO_OUT_LE(out_le32, 32, stwbrx);
+DEF_MMIO_OUT_D(out_be16, 16, sth);
+DEF_MMIO_OUT_D(out_be32, 32, stw);
+DEF_MMIO_OUT_X(out_le16, 16, sthbrx);
+DEF_MMIO_OUT_X(out_le32, 32, stwbrx);
+#else
+DEF_MMIO_IN_X(in_be16, 16, lhbrx);
+DEF_MMIO_IN_X(in_be32, 32, lwbrx);
+DEF_MMIO_IN_D(in_le16, 16, lhz);
+DEF_MMIO_IN_D(in_le32, 32, lwz);
+
+DEF_MMIO_OUT_X(out_be16, 16, sthbrx);
+DEF_MMIO_OUT_X(out_be32, 32, stwbrx);
+DEF_MMIO_OUT_D(out_le16, 16, sth);
+DEF_MMIO_OUT_D(out_le32, 32, stw);
+
+#endif /* __BIG_ENDIAN */
#ifdef __powerpc64__
-DEF_MMIO_OUT_BE(out_be64, 64, std);
-DEF_MMIO_IN_BE(in_be64, 64, ld);
+
+#ifdef __BIG_ENDIAN__
+DEF_MMIO_OUT_D(out_be64, 64, std);
+DEF_MMIO_IN_D(in_be64, 64, ld);
/* There is no asm instructions for 64 bits reverse loads and stores */
static inline u64 in_le64(const volatile u64 __iomem *addr)
@@ -192,6 +207,22 @@ static inline void out_le64(volatile u64 __iomem *addr, u64 val)
{
out_be64(addr, swab64(val));
}
+#else
+DEF_MMIO_OUT_D(out_le64, 64, std);
+DEF_MMIO_IN_D(in_le64, 64, ld);
+
+/* There is no asm instructions for 64 bits reverse loads and stores */
+static inline u64 in_be64(const volatile u64 __iomem *addr)
+{
+ return swab64(in_le64(addr));
+}
+
+static inline void out_be64(volatile u64 __iomem *addr, u64 val)
+{
+ out_le64(addr, swab64(val));
+}
+
+#endif
#endif /* __powerpc64__ */
/*
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 851bac7afa4b..1bd92fd43cfb 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -123,6 +123,8 @@
#define BOOK3S_HFLAG_SLB 0x2
#define BOOK3S_HFLAG_PAIRED_SINGLE 0x4
#define BOOK3S_HFLAG_NATIVE_PS 0x8
+#define BOOK3S_HFLAG_MULTI_PGSIZE 0x10
+#define BOOK3S_HFLAG_NEW_TLBIE 0x20
#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
@@ -136,6 +138,8 @@
#define KVM_GUEST_MODE_NONE 0
#define KVM_GUEST_MODE_GUEST 1
#define KVM_GUEST_MODE_SKIP 2
+#define KVM_GUEST_MODE_GUEST_HV 3
+#define KVM_GUEST_MODE_HOST_HV 4
#define KVM_INST_FETCH_FAILED -1
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index fa19e2f1a874..4a594b76674d 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -58,16 +58,18 @@ struct hpte_cache {
struct hlist_node list_pte_long;
struct hlist_node list_vpte;
struct hlist_node list_vpte_long;
+#ifdef CONFIG_PPC_BOOK3S_64
+ struct hlist_node list_vpte_64k;
+#endif
struct rcu_head rcu_head;
u64 host_vpn;
u64 pfn;
ulong slot;
struct kvmppc_pte pte;
+ int pagesize;
};
struct kvmppc_vcpu_book3s {
- struct kvm_vcpu vcpu;
- struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
struct kvmppc_sid_map sid_map[SID_MAP_NUM];
struct {
u64 esid;
@@ -99,6 +101,9 @@ struct kvmppc_vcpu_book3s {
struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
+#ifdef CONFIG_PPC_BOOK3S_64
+ struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
+#endif
int hpte_cache_count;
spinlock_t mmu_lock;
};
@@ -107,8 +112,9 @@ struct kvmppc_vcpu_book3s {
#define CONTEXT_GUEST 1
#define CONTEXT_GUEST_END 2
-#define VSID_REAL 0x0fffffffffc00000ULL
-#define VSID_BAT 0x0fffffffffb00000ULL
+#define VSID_REAL 0x07ffffffffc00000ULL
+#define VSID_BAT 0x07ffffffffb00000ULL
+#define VSID_64K 0x0800000000000000ULL
#define VSID_1T 0x1000000000000000ULL
#define VSID_REAL_DR 0x2000000000000000ULL
#define VSID_REAL_IR 0x4000000000000000ULL
@@ -118,11 +124,12 @@ extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask)
extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
-extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr);
extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
-extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
+extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
+ bool iswrite);
+extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
@@ -134,6 +141,7 @@ extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
+extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
@@ -151,7 +159,8 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
bool upper, u32 val);
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
-extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
+extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
+ bool *writable);
extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
unsigned long *rmap, long pte_index, int realmode);
extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
@@ -172,6 +181,8 @@ extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
unsigned long *hpret);
extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
struct kvm_memory_slot *memslot, unsigned long *map);
+extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
+ unsigned long mask);
extern void kvmppc_entry_trampoline(void);
extern void kvmppc_hv_entry_trampoline(void);
@@ -184,11 +195,9 @@ extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
{
- return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu);
+ return vcpu->arch.book3s;
}
-extern void kvm_return_point(void);
-
/* Also add subarch specific defines */
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
@@ -198,203 +207,6 @@ extern void kvm_return_point(void);
#include <asm/kvm_book3s_64.h>
#endif
-#ifdef CONFIG_KVM_BOOK3S_PR
-
-static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
-{
- return to_book3s(vcpu)->hior;
-}
-
-static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
- unsigned long pending_now, unsigned long old_pending)
-{
- if (pending_now)
- vcpu->arch.shared->int_pending = 1;
- else if (old_pending)
- vcpu->arch.shared->int_pending = 0;
-}
-
-static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
-{
- if ( num < 14 ) {
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- svcpu->gpr[num] = val;
- svcpu_put(svcpu);
- to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
- } else
- vcpu->arch.gpr[num] = val;
-}
-
-static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
-{
- if ( num < 14 ) {
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- ulong r = svcpu->gpr[num];
- svcpu_put(svcpu);
- return r;
- } else
- return vcpu->arch.gpr[num];
-}
-
-static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
-{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- svcpu->cr = val;
- svcpu_put(svcpu);
- to_book3s(vcpu)->shadow_vcpu->cr = val;
-}
-
-static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
-{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- u32 r;
- r = svcpu->cr;
- svcpu_put(svcpu);
- return r;
-}
-
-static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
-{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- svcpu->xer = val;
- to_book3s(vcpu)->shadow_vcpu->xer = val;
- svcpu_put(svcpu);
-}
-
-static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
-{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- u32 r;
- r = svcpu->xer;
- svcpu_put(svcpu);
- return r;
-}
-
-static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
-{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- svcpu->ctr = val;
- svcpu_put(svcpu);
-}
-
-static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
-{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- ulong r;
- r = svcpu->ctr;
- svcpu_put(svcpu);
- return r;
-}
-
-static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
-{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- svcpu->lr = val;
- svcpu_put(svcpu);
-}
-
-static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
-{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- ulong r;
- r = svcpu->lr;
- svcpu_put(svcpu);
- return r;
-}
-
-static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
-{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- svcpu->pc = val;
- svcpu_put(svcpu);
-}
-
-static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
-{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- ulong r;
- r = svcpu->pc;
- svcpu_put(svcpu);
- return r;
-}
-
-static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
-{
- ulong pc = kvmppc_get_pc(vcpu);
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- u32 r;
-
- /* Load the instruction manually if it failed to do so in the
- * exit path */
- if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
- kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
-
- r = svcpu->last_inst;
- svcpu_put(svcpu);
- return r;
-}
-
-/*
- * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
- * Because the sc instruction sets SRR0 to point to the following
- * instruction, we have to fetch from pc - 4.
- */
-static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
-{
- ulong pc = kvmppc_get_pc(vcpu) - 4;
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- u32 r;
-
- /* Load the instruction manually if it failed to do so in the
- * exit path */
- if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
- kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
-
- r = svcpu->last_inst;
- svcpu_put(svcpu);
- return r;
-}
-
-static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
-{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- ulong r;
- r = svcpu->fault_dar;
- svcpu_put(svcpu);
- return r;
-}
-
-static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
-{
- ulong crit_raw = vcpu->arch.shared->critical;
- ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
- bool crit;
-
- /* Truncate crit indicators in 32 bit mode */
- if (!(vcpu->arch.shared->msr & MSR_SF)) {
- crit_raw &= 0xffffffff;
- crit_r1 &= 0xffffffff;
- }
-
- /* Critical section when crit == r1 */
- crit = (crit_raw == crit_r1);
- /* ... and we're in supervisor mode */
- crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
-
- return crit;
-}
-#else /* CONFIG_KVM_BOOK3S_PR */
-
-static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
-{
- return 0;
-}
-
-static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
- unsigned long pending_now, unsigned long old_pending)
-{
-}
-
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
vcpu->arch.gpr[num] = val;
@@ -489,12 +301,6 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
return vcpu->arch.fault_dar;
}
-static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
-{
- return false;
-}
-#endif
-
/* Magic register values loaded into r3 and r4 before the 'sc' assembly
* instruction for the OSI hypercalls */
#define OSI_SC_MAGIC_R3 0x113724FA
diff --git a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h
index ce0ef6ce8f86..c720e0b3238d 100644
--- a/arch/powerpc/include/asm/kvm_book3s_32.h
+++ b/arch/powerpc/include/asm/kvm_book3s_32.h
@@ -22,7 +22,7 @@
static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
{
- return to_book3s(vcpu)->shadow_vcpu;
+ return vcpu->arch.shadow_vcpu;
}
static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 86d638a3b359..bf0fa8b0a883 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -20,7 +20,7 @@
#ifndef __ASM_KVM_BOOK3S_64_H__
#define __ASM_KVM_BOOK3S_64_H__
-#ifdef CONFIG_KVM_BOOK3S_PR
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
{
preempt_disable();
@@ -35,7 +35,7 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
#define SPAPR_TCE_SHIFT 12
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
extern unsigned long kvm_rma_pages;
#endif
@@ -278,7 +278,7 @@ static inline int is_vrma_hpte(unsigned long hpte_v)
(HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
}
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/*
* Note modification of an HPTE; set the HPTE modified bit
* if anyone is interested.
@@ -289,6 +289,6 @@ static inline void note_hpte_modification(struct kvm *kvm,
if (atomic_read(&kvm->arch.hpte_mod_interest))
rev->guest_rpte |= HPTE_GR_MODIFIED;
}
-#endif /* CONFIG_KVM_BOOK3S_64_HV */
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 9039d3c97eec..0bd9348a4db9 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -83,7 +83,7 @@ struct kvmppc_host_state {
u8 restore_hid5;
u8 napping;
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
u8 hwthread_req;
u8 hwthread_state;
u8 host_ipi;
@@ -101,6 +101,7 @@ struct kvmppc_host_state {
#endif
#ifdef CONFIG_PPC_BOOK3S_64
u64 cfar;
+ u64 ppr;
#endif
};
@@ -108,14 +109,14 @@ struct kvmppc_book3s_shadow_vcpu {
ulong gpr[14];
u32 cr;
u32 xer;
-
- u32 fault_dsisr;
- u32 last_inst;
ulong ctr;
ulong lr;
ulong pc;
+
ulong shadow_srr1;
ulong fault_dar;
+ u32 fault_dsisr;
+ u32 last_inst;
#ifdef CONFIG_PPC_BOOK3S_32
u32 sr[16]; /* Guest SRs */
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index d3c1eb34c986..dd8f61510dfd 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -26,7 +26,12 @@
/* LPIDs we support with this build -- runtime limit may be lower */
#define KVMPPC_NR_LPIDS 64
-#define KVMPPC_INST_EHPRIV 0x7c00021c
+#define KVMPPC_INST_EHPRIV 0x7c00021c
+#define EHPRIV_OC_SHIFT 11
+/* "ehpriv 1" : ehpriv with OC = 1 is used for debug emulation */
+#define EHPRIV_OC_DEBUG 1
+#define KVMPPC_INST_EHPRIV_DEBUG (KVMPPC_INST_EHPRIV | \
+ (EHPRIV_OC_DEBUG << EHPRIV_OC_SHIFT))
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 33283532e9d8..237d1d25b448 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -63,20 +63,17 @@ extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
#endif
-/* We don't currently support large pages. */
-#define KVM_HPAGE_GFN_SHIFT(x) 0
-#define KVM_NR_PAGE_SIZES 1
-#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
-
#define HPTEG_CACHE_NUM (1 << 15)
#define HPTEG_HASH_BITS_PTE 13
#define HPTEG_HASH_BITS_PTE_LONG 12
#define HPTEG_HASH_BITS_VPTE 13
#define HPTEG_HASH_BITS_VPTE_LONG 5
+#define HPTEG_HASH_BITS_VPTE_64K 11
#define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE)
#define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG)
#define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE)
#define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG)
+#define HPTEG_HASH_NUM_VPTE_64K (1 << HPTEG_HASH_BITS_VPTE_64K)
/* Physical Address Mask - allowed range of real mode RAM access */
#define KVM_PAM 0x0fffffffffffffffULL
@@ -89,6 +86,9 @@ struct lppaca;
struct slb_shadow;
struct dtl_entry;
+struct kvmppc_vcpu_book3s;
+struct kvmppc_book3s_shadow_vcpu;
+
struct kvm_vm_stat {
u32 remote_tlb_flush;
};
@@ -224,15 +224,15 @@ struct revmap_entry {
#define KVMPPC_GOT_PAGE 0x80
struct kvm_arch_memory_slot {
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
unsigned long *rmap;
unsigned long *slot_phys;
-#endif /* CONFIG_KVM_BOOK3S_64_HV */
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
};
struct kvm_arch {
unsigned int lpid;
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
unsigned long hpt_virt;
struct revmap_entry *revmap;
unsigned int host_lpid;
@@ -256,7 +256,10 @@ struct kvm_arch {
cpumask_t need_tlb_flush;
struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
int hpt_cma_alloc;
-#endif /* CONFIG_KVM_BOOK3S_64_HV */
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ struct mutex hpt_mutex;
+#endif
#ifdef CONFIG_PPC_BOOK3S_64
struct list_head spapr_tce_tables;
struct list_head rtas_tokens;
@@ -267,6 +270,7 @@ struct kvm_arch {
#ifdef CONFIG_KVM_XICS
struct kvmppc_xics *xics;
#endif
+ struct kvmppc_ops *kvm_ops;
};
/*
@@ -294,6 +298,10 @@ struct kvmppc_vcore {
u64 stolen_tb;
u64 preempt_tb;
struct kvm_vcpu *runner;
+ u64 tb_offset; /* guest timebase - host timebase */
+ ulong lpcr;
+ u32 arch_compat;
+ ulong pcr;
};
#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
@@ -328,6 +336,7 @@ struct kvmppc_pte {
bool may_read : 1;
bool may_write : 1;
bool may_execute : 1;
+ u8 page_size; /* MMU_PAGE_xxx */
};
struct kvmppc_mmu {
@@ -340,7 +349,8 @@ struct kvmppc_mmu {
/* book3s */
void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
- int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data);
+ int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *pte, bool data, bool iswrite);
void (*reset_msr)(struct kvm_vcpu *vcpu);
void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
@@ -360,6 +370,7 @@ struct kvmppc_slb {
bool large : 1; /* PTEs are 16MB */
bool tb : 1; /* 1TB segment */
bool class : 1;
+ u8 base_page_size; /* MMU_PAGE_xxx */
};
# ifdef CONFIG_PPC_FSL_BOOK3E
@@ -377,17 +388,6 @@ struct kvmppc_slb {
#define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */
#define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */
-struct kvmppc_booke_debug_reg {
- u32 dbcr0;
- u32 dbcr1;
- u32 dbcr2;
-#ifdef CONFIG_KVM_E500MC
- u32 dbcr4;
-#endif
- u64 iac[KVMPPC_BOOKE_MAX_IAC];
- u64 dac[KVMPPC_BOOKE_MAX_DAC];
-};
-
#define KVMPPC_IRQ_DEFAULT 0
#define KVMPPC_IRQ_MPIC 1
#define KVMPPC_IRQ_XICS 2
@@ -402,6 +402,10 @@ struct kvm_vcpu_arch {
int slb_max; /* 1 + index of last valid entry in slb[] */
int slb_nr; /* total number of entries in SLB */
struct kvmppc_mmu mmu;
+ struct kvmppc_vcpu_book3s *book3s;
+#endif
+#ifdef CONFIG_PPC_BOOK3S_32
+ struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
#endif
ulong gpr[32];
@@ -463,6 +467,8 @@ struct kvm_vcpu_arch {
u32 ctrl;
ulong dabr;
ulong cfar;
+ ulong ppr;
+ ulong shadow_srr1;
#endif
u32 vrsave; /* also USPRG0 */
u32 mmucr;
@@ -498,6 +504,8 @@ struct kvm_vcpu_arch {
u64 mmcr[3];
u32 pmc[8];
+ u64 siar;
+ u64 sdar;
#ifdef CONFIG_KVM_EXIT_TIMING
struct mutex exit_timing_lock;
@@ -531,7 +539,10 @@ struct kvm_vcpu_arch {
u32 eptcfg;
u32 epr;
u32 crit_save;
- struct kvmppc_booke_debug_reg dbg_reg;
+ /* guest debug registers*/
+ struct debug_reg dbg_reg;
+ /* hardware visible debug registers when in guest state */
+ struct debug_reg shadow_dbg_reg;
#endif
gpa_t paddr_accessed;
gva_t vaddr_accessed;
@@ -582,7 +593,7 @@ struct kvm_vcpu_arch {
struct kvmppc_icp *icp; /* XICS presentation controller */
#endif
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
struct kvm_vcpu_arch_shared shregs;
unsigned long pgfault_addr;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index b15554a26c20..c8317fbf92c4 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -106,13 +106,6 @@ extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq);
extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
-
-extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int op, int *advance);
-extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn,
- ulong val);
-extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn,
- ulong *val);
extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
extern int kvmppc_booke_init(void);
@@ -135,17 +128,17 @@ extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args);
extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce);
-extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
- struct kvm_allocate_rma *rma);
extern struct kvm_rma_info *kvm_alloc_rma(void);
extern void kvm_release_rma(struct kvm_rma_info *ri);
extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
extern int kvmppc_core_init_vm(struct kvm *kvm);
extern void kvmppc_core_destroy_vm(struct kvm *kvm);
-extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
+extern void kvmppc_core_free_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *free,
struct kvm_memory_slot *dont);
-extern int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
+extern int kvmppc_core_create_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
unsigned long npages);
extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
@@ -177,6 +170,72 @@ extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
+union kvmppc_one_reg {
+ u32 wval;
+ u64 dval;
+ vector128 vval;
+ u64 vsxval[2];
+ struct {
+ u64 addr;
+ u64 length;
+ } vpaval;
+};
+
+struct kvmppc_ops {
+ struct module *owner;
+ int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+ int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+ int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val);
+ int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val);
+ void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
+ void (*vcpu_put)(struct kvm_vcpu *vcpu);
+ void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
+ int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
+ void (*vcpu_free)(struct kvm_vcpu *vcpu);
+ int (*check_requests)(struct kvm_vcpu *vcpu);
+ int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
+ void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
+ int (*prepare_memory_region)(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_userspace_memory_region *mem);
+ void (*commit_memory_region)(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old);
+ int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
+ int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
+ unsigned long end);
+ int (*age_hva)(struct kvm *kvm, unsigned long hva);
+ int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
+ void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
+ void (*mmu_destroy)(struct kvm_vcpu *vcpu);
+ void (*free_memslot)(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont);
+ int (*create_memslot)(struct kvm_memory_slot *slot,
+ unsigned long npages);
+ int (*init_vm)(struct kvm *kvm);
+ void (*destroy_vm)(struct kvm *kvm);
+ int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
+ int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance);
+ int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
+ int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
+ void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
+ long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
+ unsigned long arg);
+
+};
+
+extern struct kvmppc_ops *kvmppc_hv_ops;
+extern struct kvmppc_ops *kvmppc_pr_ops;
+
+static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
+{
+ return kvm->arch.kvm_ops == kvmppc_hv_ops;
+}
+
/*
* Cuts out inst bits with ordering according to spec.
* That means the leftmost bit is zero. All given bits are included.
@@ -210,17 +269,6 @@ static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
return r;
}
-union kvmppc_one_reg {
- u32 wval;
- u64 dval;
- vector128 vval;
- u64 vsxval[2];
- struct {
- u64 addr;
- u64 length;
- } vpaval;
-};
-
#define one_reg_size(id) \
(1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
@@ -245,10 +293,10 @@ union kvmppc_one_reg {
__v; \
})
-void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
-void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
@@ -260,7 +308,7 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
struct openpic;
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
extern void kvm_cma_reserve(void) __init;
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
{
@@ -269,10 +317,10 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
static inline u32 kvmppc_get_xics_latch(void)
{
- u32 xirr = get_paca()->kvm_hstate.saved_xirr;
+ u32 xirr;
+ xirr = get_paca()->kvm_hstate.saved_xirr;
get_paca()->kvm_hstate.saved_xirr = 0;
-
return xirr;
}
@@ -281,7 +329,10 @@ static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
paca[cpu].kvm_hstate.host_ipi = host_ipi;
}
-extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu);
+static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+ vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
+}
#else
static inline void __init kvm_cma_reserve(void)
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 4470d1e34d23..844c28de7ec0 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -84,8 +84,8 @@ struct lppaca {
* the processor is yielded (either because of an OS yield or a
* hypervisor preempt). An even value implies that the processor is
* currently executing.
- * NOTE: This value will ALWAYS be zero for dedicated processors and
- * will NEVER be zero for shared processors (ie, initialized to a 1).
+ * NOTE: Even dedicated processor partitions can yield so this
+ * field cannot be used to determine if we are shared or dedicated.
*/
volatile __be32 yield_count;
volatile __be32 dispersion_count; /* dispatch changed physical cpu */
@@ -106,15 +106,15 @@ extern struct lppaca lppaca[];
#define lppaca_of(cpu) (*paca[cpu].lppaca_ptr)
/*
- * Old kernels used a reserved bit in the VPA to determine if it was running
- * in shared processor mode. New kernels look for a non zero yield count
- * but KVM still needs to set the bit to keep the old stuff happy.
+ * We are using a non architected field to determine if a partition is
+ * shared or dedicated. This currently works on both KVM and PHYP, but
+ * we will have to transition to something better.
*/
#define LPPACA_OLD_SHARED_PROC 2
static inline bool lppaca_shared_proc(struct lppaca *l)
{
- return l->yield_count != 0;
+ return !!(l->__old_status & LPPACA_OLD_SHARED_PROC);
}
/*
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 8b480901165a..ad3025d0880b 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -78,6 +78,18 @@ struct machdep_calls {
long index);
void (*tce_flush)(struct iommu_table *tbl);
+ /* _rm versions are for real mode use only */
+ int (*tce_build_rm)(struct iommu_table *tbl,
+ long index,
+ long npages,
+ unsigned long uaddr,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs);
+ void (*tce_free_rm)(struct iommu_table *tbl,
+ long index,
+ long npages);
+ void (*tce_flush_rm)(struct iommu_table *tbl);
+
void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
unsigned long flags, void *caller);
void (*iounmap)(volatile void __iomem *token);
@@ -263,6 +275,10 @@ struct machdep_calls {
ssize_t (*cpu_probe)(const char *, size_t);
ssize_t (*cpu_release)(const char *, size_t);
#endif
+
+#ifdef CONFIG_ARCH_RANDOM
+ int (*get_random_long)(unsigned long *v);
+#endif
};
extern void e500_idle(void);
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index c4cf01197273..807014dde821 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -135,8 +135,8 @@ extern char initial_stab[];
#ifndef __ASSEMBLY__
struct hash_pte {
- unsigned long v;
- unsigned long r;
+ __be64 v;
+ __be64 r;
};
extern struct hash_pte *htab_address;
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index c5cd72833d6e..033c06be1d84 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -129,6 +129,9 @@ extern int opal_enter_rtas(struct rtas_args *args,
#define OPAL_LPC_READ 67
#define OPAL_LPC_WRITE 68
#define OPAL_RETURN_CPU 69
+#define OPAL_FLASH_VALIDATE 76
+#define OPAL_FLASH_MANAGE 77
+#define OPAL_FLASH_UPDATE 78
#ifndef __ASSEMBLY__
@@ -460,10 +463,12 @@ enum {
enum {
OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1,
+ OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2
};
enum {
OPAL_P7IOC_NUM_PEST_REGS = 128,
+ OPAL_PHB3_NUM_PEST_REGS = 256
};
struct OpalIoPhbErrorCommon {
@@ -531,28 +536,94 @@ struct OpalIoP7IOCPhbErrorData {
uint64_t pestB[OPAL_P7IOC_NUM_PEST_REGS];
};
+struct OpalIoPhb3ErrorData {
+ struct OpalIoPhbErrorCommon common;
+
+ uint32_t brdgCtl;
+
+ /* PHB3 UTL regs */
+ uint32_t portStatusReg;
+ uint32_t rootCmplxStatus;
+ uint32_t busAgentStatus;
+
+ /* PHB3 cfg regs */
+ uint32_t deviceStatus;
+ uint32_t slotStatus;
+ uint32_t linkStatus;
+ uint32_t devCmdStatus;
+ uint32_t devSecStatus;
+
+ /* cfg AER regs */
+ uint32_t rootErrorStatus;
+ uint32_t uncorrErrorStatus;
+ uint32_t corrErrorStatus;
+ uint32_t tlpHdr1;
+ uint32_t tlpHdr2;
+ uint32_t tlpHdr3;
+ uint32_t tlpHdr4;
+ uint32_t sourceId;
+
+ uint32_t rsv3;
+
+ /* Record data about the call to allocate a buffer */
+ uint64_t errorClass;
+ uint64_t correlator;
+
+ uint64_t nFir; /* 000 */
+ uint64_t nFirMask; /* 003 */
+ uint64_t nFirWOF; /* 008 */
+
+ /* PHB3 MMIO Error Regs */
+ uint64_t phbPlssr; /* 120 */
+ uint64_t phbCsr; /* 110 */
+ uint64_t lemFir; /* C00 */
+ uint64_t lemErrorMask; /* C18 */
+ uint64_t lemWOF; /* C40 */
+ uint64_t phbErrorStatus; /* C80 */
+ uint64_t phbFirstErrorStatus; /* C88 */
+ uint64_t phbErrorLog0; /* CC0 */
+ uint64_t phbErrorLog1; /* CC8 */
+ uint64_t mmioErrorStatus; /* D00 */
+ uint64_t mmioFirstErrorStatus; /* D08 */
+ uint64_t mmioErrorLog0; /* D40 */
+ uint64_t mmioErrorLog1; /* D48 */
+ uint64_t dma0ErrorStatus; /* D80 */
+ uint64_t dma0FirstErrorStatus; /* D88 */
+ uint64_t dma0ErrorLog0; /* DC0 */
+ uint64_t dma0ErrorLog1; /* DC8 */
+ uint64_t dma1ErrorStatus; /* E00 */
+ uint64_t dma1FirstErrorStatus; /* E08 */
+ uint64_t dma1ErrorLog0; /* E40 */
+ uint64_t dma1ErrorLog1; /* E48 */
+ uint64_t pestA[OPAL_PHB3_NUM_PEST_REGS];
+ uint64_t pestB[OPAL_PHB3_NUM_PEST_REGS];
+};
+
typedef struct oppanel_line {
const char * line;
uint64_t line_len;
} oppanel_line_t;
+/* /sys/firmware/opal */
+extern struct kobject *opal_kobj;
+
/* API functions */
-int64_t opal_console_write(int64_t term_number, int64_t *length,
+int64_t opal_console_write(int64_t term_number, __be64 *length,
const uint8_t *buffer);
-int64_t opal_console_read(int64_t term_number, int64_t *length,
+int64_t opal_console_read(int64_t term_number, __be64 *length,
uint8_t *buffer);
int64_t opal_console_write_buffer_space(int64_t term_number,
- int64_t *length);
-int64_t opal_rtc_read(uint32_t *year_month_day,
- uint64_t *hour_minute_second_millisecond);
+ __be64 *length);
+int64_t opal_rtc_read(__be32 *year_month_day,
+ __be64 *hour_minute_second_millisecond);
int64_t opal_rtc_write(uint32_t year_month_day,
uint64_t hour_minute_second_millisecond);
int64_t opal_cec_power_down(uint64_t request);
int64_t opal_cec_reboot(void);
int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
-int64_t opal_handle_interrupt(uint64_t isn, uint64_t *outstanding_event_mask);
-int64_t opal_poll_events(uint64_t *outstanding_event_mask);
+int64_t opal_handle_interrupt(uint64_t isn, __be64 *outstanding_event_mask);
+int64_t opal_poll_events(__be64 *outstanding_event_mask);
int64_t opal_pci_set_hub_tce_memory(uint64_t hub_id, uint64_t tce_mem_addr,
uint64_t tce_mem_size);
int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id, uint64_t tce_mem_addr,
@@ -560,9 +631,9 @@ int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id, uint64_t tce_mem_addr,
int64_t opal_pci_config_read_byte(uint64_t phb_id, uint64_t bus_dev_func,
uint64_t offset, uint8_t *data);
int64_t opal_pci_config_read_half_word(uint64_t phb_id, uint64_t bus_dev_func,
- uint64_t offset, uint16_t *data);
+ uint64_t offset, __be16 *data);
int64_t opal_pci_config_read_word(uint64_t phb_id, uint64_t bus_dev_func,
- uint64_t offset, uint32_t *data);
+ uint64_t offset, __be32 *data);
int64_t opal_pci_config_write_byte(uint64_t phb_id, uint64_t bus_dev_func,
uint64_t offset, uint8_t data);
int64_t opal_pci_config_write_half_word(uint64_t phb_id, uint64_t bus_dev_func,
@@ -570,14 +641,14 @@ int64_t opal_pci_config_write_half_word(uint64_t phb_id, uint64_t bus_dev_func,
int64_t opal_pci_config_write_word(uint64_t phb_id, uint64_t bus_dev_func,
uint64_t offset, uint32_t data);
int64_t opal_set_xive(uint32_t isn, uint16_t server, uint8_t priority);
-int64_t opal_get_xive(uint32_t isn, uint16_t *server, uint8_t *priority);
+int64_t opal_get_xive(uint32_t isn, __be16 *server, uint8_t *priority);
int64_t opal_register_exception_handler(uint64_t opal_exception,
uint64_t handler_address,
uint64_t glue_cache_line);
int64_t opal_pci_eeh_freeze_status(uint64_t phb_id, uint64_t pe_number,
uint8_t *freeze_state,
- uint16_t *pci_error_type,
- uint64_t *phb_status);
+ __be16 *pci_error_type,
+ __be64 *phb_status);
int64_t opal_pci_eeh_freeze_clear(uint64_t phb_id, uint64_t pe_number,
uint64_t eeh_action_token);
int64_t opal_pci_shpc(uint64_t phb_id, uint64_t shpc_action, uint8_t *state);
@@ -614,13 +685,13 @@ int64_t opal_pci_msi_eoi(uint64_t phb_id, uint32_t hw_irq);
int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint32_t pe_number,
uint32_t xive_num);
int64_t opal_get_xive_source(uint64_t phb_id, uint32_t xive_num,
- int32_t *interrupt_source_number);
+ __be32 *interrupt_source_number);
int64_t opal_get_msi_32(uint64_t phb_id, uint32_t mve_number, uint32_t xive_num,
- uint8_t msi_range, uint32_t *msi_address,
- uint32_t *message_data);
+ uint8_t msi_range, __be32 *msi_address,
+ __be32 *message_data);
int64_t opal_get_msi_64(uint64_t phb_id, uint32_t mve_number,
uint32_t xive_num, uint8_t msi_range,
- uint64_t *msi_address, uint32_t *message_data);
+ __be64 *msi_address, __be32 *message_data);
int64_t opal_start_cpu(uint64_t thread_number, uint64_t start_address);
int64_t opal_query_cpu_status(uint64_t thread_number, uint8_t *thread_status);
int64_t opal_write_oppanel(oppanel_line_t *lines, uint64_t num_lines);
@@ -642,7 +713,7 @@ int64_t opal_pci_fence_phb(uint64_t phb_id);
int64_t opal_pci_reinit(uint64_t phb_id, uint8_t reinit_scope);
int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action);
int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
-int64_t opal_get_epow_status(uint64_t *status);
+int64_t opal_get_epow_status(__be64 *status);
int64_t opal_set_system_attention_led(uint8_t led_action);
int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
uint16_t *pci_error_type, uint16_t *severity);
@@ -656,6 +727,9 @@ int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
uint32_t addr, uint32_t data, uint32_t sz);
int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
uint32_t addr, uint32_t *data, uint32_t sz);
+int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
+int64_t opal_manage_flash(uint8_t op);
+int64_t opal_update_flash(uint64_t blk_list);
/* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data);
@@ -684,6 +758,7 @@ extern int opal_set_rtc_time(struct rtc_time *tm);
extern void opal_get_rtc_time(struct rtc_time *tm);
extern unsigned long opal_get_boot_time(void);
extern void opal_nvram_init(void);
+extern void opal_flash_init(void);
extern int opal_machine_check(struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index a5954cebbc55..b6ea9e068c13 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -166,7 +166,7 @@ struct paca_struct {
struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
#ifdef CONFIG_KVM_BOOK3S_HANDLER
-#ifdef CONFIG_KVM_BOOK3S_PR
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
/* We use this to store guest state in */
struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
#endif
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index b9f426212d3a..32e4e212b9c1 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -78,7 +78,7 @@ extern unsigned int HPAGE_SHIFT;
*
* Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
*
- * There are two was to determine a physical address from a virtual one:
+ * There are two ways to determine a physical address from a virtual one:
* va = pa + PAGE_OFFSET - MEMORY_START
* va = pa + KERNELBASE - PHYSICAL_START
*
@@ -403,7 +403,7 @@ void arch_free_page(struct page *page, int order);
struct vm_area_struct;
-#ifdef CONFIG_PPC_64K_PAGES
+#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC64)
typedef pte_t *pgtable_t;
#else
typedef struct page *pgtable_t;
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 46db09414a10..4a191c472867 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -394,6 +394,8 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
hpte_slot_array[index] = hidx << 4 | 0x1 << 3;
}
+struct page *realmode_pfn_to_page(unsigned long pfn);
+
static inline char *get_hpte_slot_array(pmd_t *pmdp)
{
/*
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index d7fe9f5b46d4..3132bb9365f3 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -143,6 +143,8 @@
#define PPC_INST_LSWX 0x7c00042a
#define PPC_INST_LWARX 0x7c000028
#define PPC_INST_LWSYNC 0x7c2004ac
+#define PPC_INST_SYNC 0x7c0004ac
+#define PPC_INST_SYNC_MASK 0xfc0007fe
#define PPC_INST_LXVD2X 0x7c000698
#define PPC_INST_MCRXR 0x7c000400
#define PPC_INST_MCRXR_MASK 0xfc0007fe
@@ -181,6 +183,7 @@
#define PPC_INST_TLBIVAX 0x7c000624
#define PPC_INST_TLBSRX_DOT 0x7c0006a5
#define PPC_INST_XXLOR 0xf0000510
+#define PPC_INST_XXSWAPD 0xf0000250
#define PPC_INST_XVCPSGNDP 0xf0000780
#define PPC_INST_TRECHKPT 0x7c0007dd
#define PPC_INST_TRECLAIM 0x7c00075d
@@ -200,6 +203,7 @@
/* Misc instructions for BPF compiler */
#define PPC_INST_LD 0xe8000000
#define PPC_INST_LHZ 0xa0000000
+#define PPC_INST_LHBRX 0x7c00062c
#define PPC_INST_LWZ 0x80000000
#define PPC_INST_STD 0xf8000000
#define PPC_INST_STDU 0xf8000001
@@ -218,7 +222,7 @@
#define PPC_INST_MULLW 0x7c0001d6
#define PPC_INST_MULHWU 0x7c000016
#define PPC_INST_MULLI 0x1c000000
-#define PPC_INST_DIVWU 0x7c0003d6
+#define PPC_INST_DIVWU 0x7c000396
#define PPC_INST_RLWINM 0x54000000
#define PPC_INST_RLDICR 0x78000004
#define PPC_INST_SLW 0x7c000030
@@ -344,6 +348,8 @@
VSX_XX1((s), a, b))
#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \
VSX_XX3((t), a, b))
+#define XXSWAPD(t, a) stringify_in_c(.long PPC_INST_XXSWAPD | \
+ VSX_XX3((t), a, a))
#define XVCPSGNDP(t, a, b) stringify_in_c(.long (PPC_INST_XVCPSGNDP | \
VSX_XX3((t), (a), (b))))
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 599545738af3..8deaaad3b32f 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -98,123 +98,51 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
-#define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base)
+#define SAVE_FPR(n, base) stfd n,8*TS_FPRWIDTH*(n)(base)
#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
#define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
#define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
#define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
-#define REST_FPR(n, base) lfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base)
+#define REST_FPR(n, base) lfd n,8*TS_FPRWIDTH*(n)(base)
#define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base)
#define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base)
#define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base)
#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base)
#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
-#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,base,b
+#define SAVE_VR(n,b,base) li b,16*(n); stvx n,base,b
#define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
#define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
#define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
#define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
#define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
-#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,base,b
+#define REST_VR(n,b,base) li b,16*(n); lvx n,base,b
#define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
#define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
#define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
#define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
-/* Save/restore FPRs, VRs and VSRs from their checkpointed backups in
- * thread_struct:
- */
-#define SAVE_FPR_TRANSACT(n, base) stfd n,THREAD_TRANSACT_FPR0+ \
- 8*TS_FPRWIDTH*(n)(base)
-#define SAVE_2FPRS_TRANSACT(n, base) SAVE_FPR_TRANSACT(n, base); \
- SAVE_FPR_TRANSACT(n+1, base)
-#define SAVE_4FPRS_TRANSACT(n, base) SAVE_2FPRS_TRANSACT(n, base); \
- SAVE_2FPRS_TRANSACT(n+2, base)
-#define SAVE_8FPRS_TRANSACT(n, base) SAVE_4FPRS_TRANSACT(n, base); \
- SAVE_4FPRS_TRANSACT(n+4, base)
-#define SAVE_16FPRS_TRANSACT(n, base) SAVE_8FPRS_TRANSACT(n, base); \
- SAVE_8FPRS_TRANSACT(n+8, base)
-#define SAVE_32FPRS_TRANSACT(n, base) SAVE_16FPRS_TRANSACT(n, base); \
- SAVE_16FPRS_TRANSACT(n+16, base)
-
-#define REST_FPR_TRANSACT(n, base) lfd n,THREAD_TRANSACT_FPR0+ \
- 8*TS_FPRWIDTH*(n)(base)
-#define REST_2FPRS_TRANSACT(n, base) REST_FPR_TRANSACT(n, base); \
- REST_FPR_TRANSACT(n+1, base)
-#define REST_4FPRS_TRANSACT(n, base) REST_2FPRS_TRANSACT(n, base); \
- REST_2FPRS_TRANSACT(n+2, base)
-#define REST_8FPRS_TRANSACT(n, base) REST_4FPRS_TRANSACT(n, base); \
- REST_4FPRS_TRANSACT(n+4, base)
-#define REST_16FPRS_TRANSACT(n, base) REST_8FPRS_TRANSACT(n, base); \
- REST_8FPRS_TRANSACT(n+8, base)
-#define REST_32FPRS_TRANSACT(n, base) REST_16FPRS_TRANSACT(n, base); \
- REST_16FPRS_TRANSACT(n+16, base)
-
-
-#define SAVE_VR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VR0+(16*(n)); \
- stvx n,b,base
-#define SAVE_2VRS_TRANSACT(n,b,base) SAVE_VR_TRANSACT(n,b,base); \
- SAVE_VR_TRANSACT(n+1,b,base)
-#define SAVE_4VRS_TRANSACT(n,b,base) SAVE_2VRS_TRANSACT(n,b,base); \
- SAVE_2VRS_TRANSACT(n+2,b,base)
-#define SAVE_8VRS_TRANSACT(n,b,base) SAVE_4VRS_TRANSACT(n,b,base); \
- SAVE_4VRS_TRANSACT(n+4,b,base)
-#define SAVE_16VRS_TRANSACT(n,b,base) SAVE_8VRS_TRANSACT(n,b,base); \
- SAVE_8VRS_TRANSACT(n+8,b,base)
-#define SAVE_32VRS_TRANSACT(n,b,base) SAVE_16VRS_TRANSACT(n,b,base); \
- SAVE_16VRS_TRANSACT(n+16,b,base)
-
-#define REST_VR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VR0+(16*(n)); \
- lvx n,b,base
-#define REST_2VRS_TRANSACT(n,b,base) REST_VR_TRANSACT(n,b,base); \
- REST_VR_TRANSACT(n+1,b,base)
-#define REST_4VRS_TRANSACT(n,b,base) REST_2VRS_TRANSACT(n,b,base); \
- REST_2VRS_TRANSACT(n+2,b,base)
-#define REST_8VRS_TRANSACT(n,b,base) REST_4VRS_TRANSACT(n,b,base); \
- REST_4VRS_TRANSACT(n+4,b,base)
-#define REST_16VRS_TRANSACT(n,b,base) REST_8VRS_TRANSACT(n,b,base); \
- REST_8VRS_TRANSACT(n+8,b,base)
-#define REST_32VRS_TRANSACT(n,b,base) REST_16VRS_TRANSACT(n,b,base); \
- REST_16VRS_TRANSACT(n+16,b,base)
-
-
-#define SAVE_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \
- STXVD2X(n,R##base,R##b)
-#define SAVE_2VSRS_TRANSACT(n,b,base) SAVE_VSR_TRANSACT(n,b,base); \
- SAVE_VSR_TRANSACT(n+1,b,base)
-#define SAVE_4VSRS_TRANSACT(n,b,base) SAVE_2VSRS_TRANSACT(n,b,base); \
- SAVE_2VSRS_TRANSACT(n+2,b,base)
-#define SAVE_8VSRS_TRANSACT(n,b,base) SAVE_4VSRS_TRANSACT(n,b,base); \
- SAVE_4VSRS_TRANSACT(n+4,b,base)
-#define SAVE_16VSRS_TRANSACT(n,b,base) SAVE_8VSRS_TRANSACT(n,b,base); \
- SAVE_8VSRS_TRANSACT(n+8,b,base)
-#define SAVE_32VSRS_TRANSACT(n,b,base) SAVE_16VSRS_TRANSACT(n,b,base); \
- SAVE_16VSRS_TRANSACT(n+16,b,base)
-
-#define REST_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \
- LXVD2X(n,R##base,R##b)
-#define REST_2VSRS_TRANSACT(n,b,base) REST_VSR_TRANSACT(n,b,base); \
- REST_VSR_TRANSACT(n+1,b,base)
-#define REST_4VSRS_TRANSACT(n,b,base) REST_2VSRS_TRANSACT(n,b,base); \
- REST_2VSRS_TRANSACT(n+2,b,base)
-#define REST_8VSRS_TRANSACT(n,b,base) REST_4VSRS_TRANSACT(n,b,base); \
- REST_4VSRS_TRANSACT(n+4,b,base)
-#define REST_16VSRS_TRANSACT(n,b,base) REST_8VSRS_TRANSACT(n,b,base); \
- REST_8VSRS_TRANSACT(n+8,b,base)
-#define REST_32VSRS_TRANSACT(n,b,base) REST_16VSRS_TRANSACT(n,b,base); \
- REST_16VSRS_TRANSACT(n+16,b,base)
+#ifdef __BIG_ENDIAN__
+#define STXVD2X_ROT(n,b,base) STXVD2X(n,b,base)
+#define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base)
+#else
+#define STXVD2X_ROT(n,b,base) XXSWAPD(n,n); \
+ STXVD2X(n,b,base); \
+ XXSWAPD(n,n)
+#define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base); \
+ XXSWAPD(n,n)
+#endif
/* Save the lower 32 VSRs in the thread VSR region */
-#define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,R##base,R##b)
+#define SAVE_VSR(n,b,base) li b,16*(n); STXVD2X_ROT(n,R##base,R##b)
#define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
#define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
#define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
#define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
#define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
-#define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,R##base,R##b)
+#define REST_VSR(n,b,base) li b,16*(n); LXVD2X_ROT(n,R##base,R##b)
#define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base)
#define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
#define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
@@ -832,6 +760,35 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)
#define N_SLINE 68
#define N_SO 100
-#endif /* __ASSEMBLY__ */
+/*
+ * Create an endian fixup trampoline
+ *
+ * This starts with a "tdi 0,0,0x48" instruction which is
+ * essentially a "trap never", and thus akin to a nop.
+ *
+ * The opcode for this instruction read with the wrong endian
+ * however results in a b . + 8
+ *
+ * So essentially we use that trick to execute the following
+ * trampoline in "reverse endian" if we are running with the
+ * MSR_LE bit set the "wrong" way for whatever endianness the
+ * kernel is built for.
+ */
+#ifdef CONFIG_PPC_BOOK3E
+#define FIXUP_ENDIAN
+#else
+#define FIXUP_ENDIAN \
+ tdi 0,0,0x48; /* Reverse endian of b . + 8 */ \
+ b $+36; /* Skip trampoline if endian is good */ \
+ .long 0x05009f42; /* bcl 20,31,$+4 */ \
+ .long 0xa602487d; /* mflr r10 */ \
+ .long 0x1c004a39; /* addi r10,r10,28 */ \
+ .long 0xa600607d; /* mfmsr r11 */ \
+ .long 0x01006b69; /* xori r11,r11,1 */ \
+ .long 0xa6035a7d; /* mtsrr0 r10 */ \
+ .long 0xa6037b7d; /* mtsrr1 r11 */ \
+ .long 0x2400004c /* rfid */
+#endif /* !CONFIG_PPC_BOOK3E */
+#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index ce4de5aed7b5..7794b2b04eb2 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -14,8 +14,18 @@
#ifdef CONFIG_VSX
#define TS_FPRWIDTH 2
+
+#ifdef __BIG_ENDIAN__
+#define TS_FPROFFSET 0
+#define TS_VSRLOWOFFSET 1
+#else
+#define TS_FPROFFSET 1
+#define TS_VSRLOWOFFSET 0
+#endif
+
#else
#define TS_FPRWIDTH 1
+#define TS_FPROFFSET 0
#endif
#ifdef CONFIG_PPC64
@@ -142,26 +152,22 @@ typedef struct {
unsigned long seg;
} mm_segment_t;
-#define TS_FPROFFSET 0
-#define TS_VSRLOWOFFSET 1
-#define TS_FPR(i) fpr[i][TS_FPROFFSET]
-#define TS_TRANS_FPR(i) transact_fpr[i][TS_FPROFFSET]
+#define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET]
+#define TS_TRANS_FPR(i) transact_fp.fpr[i][TS_FPROFFSET]
-struct thread_struct {
- unsigned long ksp; /* Kernel stack pointer */
-#ifdef CONFIG_PPC64
- unsigned long ksp_vsid;
-#endif
- struct pt_regs *regs; /* Pointer to saved register state */
- mm_segment_t fs; /* for get_fs() validation */
-#ifdef CONFIG_BOOKE
- /* BookE base exception scratch space; align on cacheline */
- unsigned long normsave[8] ____cacheline_aligned;
-#endif
-#ifdef CONFIG_PPC32
- void *pgdir; /* root of page-table tree */
- unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
-#endif
+/* FP and VSX 0-31 register set */
+struct thread_fp_state {
+ u64 fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
+ u64 fpscr; /* Floating point status */
+};
+
+/* Complete AltiVec register set including VSCR */
+struct thread_vr_state {
+ vector128 vr[32] __attribute__((aligned(16)));
+ vector128 vscr __attribute__((aligned(16)));
+};
+
+struct debug_reg {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
* The following help to manage the use of Debug Control Registers
@@ -198,13 +204,26 @@ struct thread_struct {
unsigned long dvc2;
#endif
#endif
- /* FP and VSX 0-31 register set */
- double fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
- struct {
+};
- unsigned int pad;
- unsigned int val; /* Floating point status */
- } fpscr;
+struct thread_struct {
+ unsigned long ksp; /* Kernel stack pointer */
+#ifdef CONFIG_PPC64
+ unsigned long ksp_vsid;
+#endif
+ struct pt_regs *regs; /* Pointer to saved register state */
+ mm_segment_t fs; /* for get_fs() validation */
+#ifdef CONFIG_BOOKE
+ /* BookE base exception scratch space; align on cacheline */
+ unsigned long normsave[8] ____cacheline_aligned;
+#endif
+#ifdef CONFIG_PPC32
+ void *pgdir; /* root of page-table tree */
+ unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
+#endif
+ struct debug_reg debug;
+ struct thread_fp_state fp_state;
+ struct thread_fp_state *fp_save_area;
int fpexc_mode; /* floating-point exception mode */
unsigned int align_ctl; /* alignment handling control */
#ifdef CONFIG_PPC64
@@ -222,10 +241,8 @@ struct thread_struct {
struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */
unsigned long trap_nr; /* last trap # on this thread */
#ifdef CONFIG_ALTIVEC
- /* Complete AltiVec register set */
- vector128 vr[32] __attribute__((aligned(16)));
- /* AltiVec status */
- vector128 vscr __attribute__((aligned(16)));
+ struct thread_vr_state vr_state;
+ struct thread_vr_state *vr_save_area;
unsigned long vrsave;
int used_vr; /* set if process has used altivec */
#endif /* CONFIG_ALTIVEC */
@@ -262,13 +279,8 @@ struct thread_struct {
* transact_fpr[] is the new set of transactional values.
* VRs work the same way.
*/
- double transact_fpr[32][TS_FPRWIDTH];
- struct {
- unsigned int pad;
- unsigned int val; /* Floating point status */
- } transact_fpscr;
- vector128 transact_vr[32] __attribute__((aligned(16)));
- vector128 transact_vscr __attribute__((aligned(16)));
+ struct thread_fp_state transact_fp;
+ struct thread_vr_state transact_vr;
unsigned long transact_vrsave;
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
@@ -322,8 +334,6 @@ struct thread_struct {
.ksp = INIT_SP, \
.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
.fs = KERNEL_DS, \
- .fpr = {{0}}, \
- .fpscr = { .val = 0, }, \
.fpexc_mode = 0, \
.ppr = INIT_PPR, \
}
@@ -361,6 +371,11 @@ extern int set_endian(struct task_struct *tsk, unsigned int val);
extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
+extern void load_fp_state(struct thread_fp_state *fp);
+extern void store_fp_state(struct thread_fp_state *fp);
+extern void load_vr_state(struct thread_vr_state *vr);
+extern void store_vr_state(struct thread_vr_state *vr);
+
static inline unsigned int __unpack_fe01(unsigned long msr_bits)
{
return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index 7d0c7f3a7171..d977b9b78696 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -1,4 +1,3 @@
-#include <linux/of.h> /* linux/of.h gets to determine #include ordering */
#ifndef _POWERPC_PROM_H
#define _POWERPC_PROM_H
#ifdef __KERNEL__
@@ -20,21 +19,17 @@
#include <asm/irq.h>
#include <linux/atomic.h>
-#define HAVE_ARCH_DEVTREE_FIXUPS
+/* These includes should be removed once implicit includes are cleaned up. */
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
/*
* OF address retreival & translation
*/
-/* Translate a DMA address from device space to CPU space */
-extern u64 of_translate_dma_address(struct device_node *dev,
- const __be32 *in_addr);
-
-#ifdef CONFIG_PCI
-extern unsigned long pci_address_to_pio(phys_addr_t address);
-#define pci_address_to_pio pci_address_to_pio
-#endif /* CONFIG_PCI */
-
/* Parse the ibm,dma-window property of an OF node into the busno, phys and
* size parameters.
*/
@@ -44,16 +39,6 @@ void of_parse_dma_window(struct device_node *dn, const __be32 *dma_window,
extern void kdump_move_device_tree(void);
-/* cache lookup */
-struct device_node *of_find_next_cache_node(struct device_node *np);
-
-#ifdef CONFIG_NUMA
-extern int of_node_to_nid(struct device_node *device);
-#else
-static inline int of_node_to_nid(struct device_node *device) { return 0; }
-#endif
-#define of_node_to_nid of_node_to_nid
-
extern void of_instantiate_rtc(void);
extern int of_get_ibm_chip_id(struct device_node *np);
@@ -143,14 +128,5 @@ struct of_drconf_cell {
*/
extern unsigned char ibm_architecture_vec[];
-/* These includes are put at the bottom because they may contain things
- * that are overridden by this file. Ideally they shouldn't be included
- * by this file, but there are a bunch of .c files that currently depend
- * on it. Eventually they will be cleaned up. */
-#include <linux/of_fdt.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/platform_device.h>
-
#endif /* __KERNEL__ */
#endif /* _POWERPC_PROM_H */
diff --git a/arch/powerpc/include/asm/pte-book3e.h b/arch/powerpc/include/asm/pte-book3e.h
index 0156702ba24e..576ad88104cb 100644
--- a/arch/powerpc/include/asm/pte-book3e.h
+++ b/arch/powerpc/include/asm/pte-book3e.h
@@ -40,7 +40,7 @@
#define _PAGE_U1 0x010000
#define _PAGE_U0 0x020000
#define _PAGE_ACCESSED 0x040000
-#define _PAGE_LENDIAN 0x080000
+#define _PAGE_ENDIAN 0x080000
#define _PAGE_GUARDED 0x100000
#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */
#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 10d1ef016bf1..5c45787d551e 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -115,7 +115,12 @@
#define MSR_64BIT MSR_SF
/* Server variant */
-#define MSR_ (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV)
+#define __MSR (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV)
+#ifdef __BIG_ENDIAN__
+#define MSR_ __MSR
+#else
+#define MSR_ (__MSR | MSR_LE)
+#endif
#define MSR_KERNEL (MSR_ | MSR_64BIT)
#define MSR_USER32 (MSR_ | MSR_PR | MSR_EE)
#define MSR_USER64 (MSR_USER32 | MSR_64BIT)
@@ -243,6 +248,7 @@
#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */
+#define SPRN_TBU40 0x11E /* Timebase upper 40 bits (hyper, R/W) */
#define SPRN_SPURR 0x134 /* Scaled PURR */
#define SPRN_HSPRG0 0x130 /* Hypervisor Scratch 0 */
#define SPRN_HSPRG1 0x131 /* Hypervisor Scratch 1 */
@@ -283,6 +289,7 @@
#define LPCR_ISL (1ul << (63-2))
#define LPCR_VC_SH (63-2)
#define LPCR_DPFD_SH (63-11)
+#define LPCR_DPFD (7ul << LPCR_DPFD_SH)
#define LPCR_VRMASD (0x1ful << (63-16))
#define LPCR_VRMA_L (1ul << (63-12))
#define LPCR_VRMA_LP0 (1ul << (63-15))
@@ -299,6 +306,7 @@
#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
#define LPCR_MER 0x00000800 /* Mediated External Exception */
#define LPCR_MER_SH 11
+#define LPCR_TC 0x00000200 /* Translation control */
#define LPCR_LPES 0x0000000c
#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */
#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */
@@ -311,6 +319,10 @@
#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */
#define SPRN_HMER 0x150 /* Hardware m? error recovery */
#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */
+#define SPRN_PCR 0x152 /* Processor compatibility register */
+#define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */
+#define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */
+#define PCR_ARCH_205 0x2 /* Architecture 2.05 */
#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */
#define SPRN_TLBVPNR 0x155 /* P7 TLB control register */
@@ -420,6 +432,7 @@
#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */
#define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */
#define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */
+#define HID4_RMOR (0xFFFFul << HID4_RMOR_SH)
#define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */
#define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */
#define HID4_LPID1_SH 0 /* partition ID top 2 bits */
@@ -1102,6 +1115,13 @@
#define PVR_BE 0x0070
#define PVR_PA6T 0x0090
+/* "Logical" PVR values defined in PAPR, representing architecture levels */
+#define PVR_ARCH_204 0x0f000001
+#define PVR_ARCH_205 0x0f000002
+#define PVR_ARCH_206 0x0f000003
+#define PVR_ARCH_206p 0x0f100003
+#define PVR_ARCH_207 0x0f000004
+
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
#define mfmsr() ({unsigned long rval; \
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index ed8f836da094..2e31aacd8acc 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -381,7 +381,7 @@
#define DBCR0_IA34T 0x00004000 /* Instr Addr 3-4 range Toggle */
#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */
-#define dbcr_iac_range(task) ((task)->thread.dbcr0)
+#define dbcr_iac_range(task) ((task)->thread.debug.dbcr0)
#define DBCR_IAC12I DBCR0_IA12 /* Range Inclusive */
#define DBCR_IAC12X (DBCR0_IA12 | DBCR0_IA12X) /* Range Exclusive */
#define DBCR_IAC12MODE (DBCR0_IA12 | DBCR0_IA12X) /* IAC 1-2 Mode Bits */
@@ -395,7 +395,7 @@
#define DBCR1_DAC1W 0x20000000 /* DAC1 Write Debug Event */
#define DBCR1_DAC2W 0x10000000 /* DAC2 Write Debug Event */
-#define dbcr_dac(task) ((task)->thread.dbcr1)
+#define dbcr_dac(task) ((task)->thread.debug.dbcr1)
#define DBCR_DAC1R DBCR1_DAC1R
#define DBCR_DAC1W DBCR1_DAC1W
#define DBCR_DAC2R DBCR1_DAC2R
@@ -441,7 +441,7 @@
#define DBCR0_CRET 0x00000020 /* Critical Return Debug Event */
#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */
-#define dbcr_dac(task) ((task)->thread.dbcr0)
+#define dbcr_dac(task) ((task)->thread.debug.dbcr0)
#define DBCR_DAC1R DBCR0_DAC1R
#define DBCR_DAC1W DBCR0_DAC1W
#define DBCR_DAC2R DBCR0_DAC2R
@@ -475,7 +475,7 @@
#define DBCR1_IAC34MX 0x000000C0 /* Instr Addr 3-4 range eXclusive */
#define DBCR1_IAC34AT 0x00000001 /* Instr Addr 3-4 range Toggle */
-#define dbcr_iac_range(task) ((task)->thread.dbcr1)
+#define dbcr_iac_range(task) ((task)->thread.debug.dbcr1)
#define DBCR_IAC12I DBCR1_IAC12M /* Range Inclusive */
#define DBCR_IAC12X DBCR1_IAC12MX /* Range Exclusive */
#define DBCR_IAC12MODE DBCR1_IAC12MX /* IAC 1-2 Mode Bits */
diff --git a/arch/powerpc/include/asm/scom.h b/arch/powerpc/include/asm/scom.h
index 0cabfd7bc2d1..07dcdcfdaefc 100644
--- a/arch/powerpc/include/asm/scom.h
+++ b/arch/powerpc/include/asm/scom.h
@@ -54,8 +54,8 @@ struct scom_controller {
scom_map_t (*map)(struct device_node *ctrl_dev, u64 reg, u64 count);
void (*unmap)(scom_map_t map);
- u64 (*read)(scom_map_t map, u32 reg);
- void (*write)(scom_map_t map, u32 reg, u64 value);
+ int (*read)(scom_map_t map, u32 reg, u64 *value);
+ int (*write)(scom_map_t map, u32 reg, u64 value);
};
extern const struct scom_controller *scom_controller;
@@ -133,10 +133,18 @@ static inline void scom_unmap(scom_map_t map)
* scom_read - Read a SCOM register
* @map: Result of scom_map
* @reg: Register index within that map
+ * @value: Updated with the value read
+ *
+ * Returns 0 (success) or a negative error code
*/
-static inline u64 scom_read(scom_map_t map, u32 reg)
+static inline int scom_read(scom_map_t map, u32 reg, u64 *value)
{
- return scom_controller->read(map, reg);
+ int rc;
+
+ rc = scom_controller->read(map, reg, value);
+ if (rc)
+ *value = 0xfffffffffffffffful;
+ return rc;
}
/**
@@ -144,12 +152,15 @@ static inline u64 scom_read(scom_map_t map, u32 reg)
* @map: Result of scom_map
* @reg: Register index within that map
* @value: Value to write
+ *
+ * Returns 0 (success) or a negative error code
*/
-static inline void scom_write(scom_map_t map, u32 reg, u64 value)
+static inline int scom_write(scom_map_t map, u32 reg, u64 value)
{
- scom_controller->write(map, reg, value);
+ return scom_controller->write(map, reg, value);
}
+
#endif /* CONFIG_PPC_SCOM */
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index d3ca85529b8b..703a8412dac2 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -23,6 +23,10 @@ extern void reloc_got2(unsigned long);
#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
+void check_for_initrd(void);
+void do_init_bootmem(void);
+void setup_panic(void);
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h
index 3a7a67a0d006..d89beaba26ff 100644
--- a/arch/powerpc/include/asm/sfp-machine.h
+++ b/arch/powerpc/include/asm/sfp-machine.h
@@ -125,7 +125,7 @@
#define FP_EX_DIVZERO (1 << (31 - 5))
#define FP_EX_INEXACT (1 << (31 - 6))
-#define __FPU_FPSCR (current->thread.fpscr.val)
+#define __FPU_FPSCR (current->thread.fp_state.fpscr)
/* We only actually write to the destination register
* if exceptions signalled (if any) will not trap.
diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h
index e40010abcaf1..0dffad6bcc84 100644
--- a/arch/powerpc/include/asm/string.h
+++ b/arch/powerpc/include/asm/string.h
@@ -10,7 +10,9 @@
#define __HAVE_ARCH_STRNCMP
#define __HAVE_ARCH_STRCAT
#define __HAVE_ARCH_MEMSET
+#ifdef __BIG_ENDIAN__
#define __HAVE_ARCH_MEMCPY
+#endif
#define __HAVE_ARCH_MEMMOVE
#define __HAVE_ARCH_MEMCMP
#define __HAVE_ARCH_MEMCHR
@@ -22,7 +24,9 @@ extern int strcmp(const char *,const char *);
extern int strncmp(const char *, const char *, __kernel_size_t);
extern char * strcat(char *, const char *);
extern void * memset(void *,int,__kernel_size_t);
+#ifdef __BIG_ENDIAN__
extern void * memcpy(void *,const void *,__kernel_size_t);
+#endif
extern void * memmove(void *,const void *,__kernel_size_t);
extern int memcmp(const void *,const void *,__kernel_size_t);
extern void * memchr(const void *,int,__kernel_size_t);
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 2be5618cdec6..9ee12610af02 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -35,6 +35,7 @@ extern void giveup_vsx(struct task_struct *);
extern void enable_kernel_spe(void);
extern void giveup_spe(struct task_struct *);
extern void load_up_spe(struct task_struct *);
+extern void switch_booke_debug_regs(struct thread_struct *new_thread);
#ifndef CONFIG_SMP
extern void discard_lazy_cpu_state(void);
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
index d0b6d4ac6dda..9a5c928bb3c6 100644
--- a/arch/powerpc/include/asm/word-at-a-time.h
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -8,6 +8,8 @@
#include <linux/kernel.h>
#include <asm/asm-compat.h>
+#ifdef __BIG_ENDIAN__
+
struct word_at_a_time {
const unsigned long high_bits, low_bits;
};
@@ -38,4 +40,80 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
return (val + c->high_bits) & ~rhs;
}
+#else
+
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+#ifdef CONFIG_64BIT
+
+/* Alan Modra's little-endian strlen tail for 64-bit */
+#define create_zero_mask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ unsigned long leading_zero_bits;
+ long trailing_zero_bit_mask;
+
+ asm ("addi %1,%2,-1\n\t"
+ "andc %1,%1,%2\n\t"
+ "popcntd %0,%1"
+ : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
+ : "r" (mask));
+ return leading_zero_bits >> 3;
+}
+
+#else /* 32-bit case */
+
+/*
+ * This is largely generic for little-endian machines, but the
+ * optimal byte mask counting is probably going to be something
+ * that is architecture-specific. If you have a reliably fast
+ * bit count instruction, that might be better than the multiply
+ * and shift, for example.
+ */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ long a = (0x0ff0001+mask) >> 23;
+ /* Fix the 1 for 00 case */
+ return a & mask;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return count_masked_bytes(mask);
+}
+
+#endif
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+#endif
+
#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/powerpc/include/asm/xor.h b/arch/powerpc/include/asm/xor.h
index c82eb12a5b18..0abb97f3be10 100644
--- a/arch/powerpc/include/asm/xor.h
+++ b/arch/powerpc/include/asm/xor.h
@@ -1 +1,68 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2012
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+#ifndef _ASM_POWERPC_XOR_H
+#define _ASM_POWERPC_XOR_H
+
+#ifdef CONFIG_ALTIVEC
+
+#include <asm/cputable.h>
+
+void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in);
+void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in);
+void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in,
+ unsigned long *v4_in);
+void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in,
+ unsigned long *v4_in, unsigned long *v5_in);
+
+static struct xor_block_template xor_block_altivec = {
+ .name = "altivec",
+ .do_2 = xor_altivec_2,
+ .do_3 = xor_altivec_3,
+ .do_4 = xor_altivec_4,
+ .do_5 = xor_altivec_5,
+};
+
+#define XOR_SPEED_ALTIVEC() \
+ do { \
+ if (cpu_has_feature(CPU_FTR_ALTIVEC)) \
+ xor_speed(&xor_block_altivec); \
+ } while (0)
+#else
+#define XOR_SPEED_ALTIVEC()
+#endif
+
+/* Also try the generic routines. */
#include <asm-generic/xor.h>
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES \
+do { \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_8regs_p); \
+ xor_speed(&xor_block_32regs); \
+ xor_speed(&xor_block_32regs_p); \
+ XOR_SPEED_ALTIVEC(); \
+} while (0)
+
+#endif /* _ASM_POWERPC_XOR_H */
diff --git a/arch/powerpc/include/uapi/asm/byteorder.h b/arch/powerpc/include/uapi/asm/byteorder.h
index aa6cc4fac965..ca931d074000 100644
--- a/arch/powerpc/include/uapi/asm/byteorder.h
+++ b/arch/powerpc/include/uapi/asm/byteorder.h
@@ -7,6 +7,10 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#ifdef __LITTLE_ENDIAN__
+#include <linux/byteorder/little_endian.h>
+#else
#include <linux/byteorder/big_endian.h>
+#endif
#endif /* _ASM_POWERPC_BYTEORDER_H */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 0fb1a6e9ff90..6836ec79a830 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -27,6 +27,7 @@
#define __KVM_HAVE_PPC_SMT
#define __KVM_HAVE_IRQCHIP
#define __KVM_HAVE_IRQ_LINE
+#define __KVM_HAVE_GUEST_DEBUG
struct kvm_regs {
__u64 pc;
@@ -269,7 +270,24 @@ struct kvm_fpu {
__u64 fpr[32];
};
+/*
+ * Defines for h/w breakpoint, watchpoint (read, write or both) and
+ * software breakpoint.
+ * These are used as "type" in KVM_SET_GUEST_DEBUG ioctl and "status"
+ * for KVM_DEBUG_EXIT.
+ */
+#define KVMPPC_DEBUG_NONE 0x0
+#define KVMPPC_DEBUG_BREAKPOINT (1UL << 1)
+#define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2)
+#define KVMPPC_DEBUG_WATCH_READ (1UL << 3)
struct kvm_debug_exit_arch {
+ __u64 address;
+ /*
+ * exiting to userspace because of h/w breakpoint, watchpoint
+ * (read, write or both) and software breakpoint.
+ */
+ __u32 status;
+ __u32 reserved;
};
/* for KVM_SET_GUEST_DEBUG */
@@ -281,10 +299,6 @@ struct kvm_guest_debug_arch {
* Type denotes h/w breakpoint, read watchpoint, write
* watchpoint or watchpoint (both read and write).
*/
-#define KVMPPC_DEBUG_NONE 0x0
-#define KVMPPC_DEBUG_BREAKPOINT (1UL << 1)
-#define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2)
-#define KVMPPC_DEBUG_WATCH_READ (1UL << 3)
__u32 type;
__u32 reserved;
} bp[16];
@@ -429,6 +443,11 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_MMCR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
#define KVM_REG_PPC_MMCR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
#define KVM_REG_PPC_MMCRA (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
+#define KVM_REG_PPC_MMCR2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x13)
+#define KVM_REG_PPC_MMCRS (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x14)
+#define KVM_REG_PPC_SIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x15)
+#define KVM_REG_PPC_SDAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x16)
+#define KVM_REG_PPC_SIER (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x17)
#define KVM_REG_PPC_PMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
#define KVM_REG_PPC_PMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
@@ -499,6 +518,65 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_TLB3PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a)
#define KVM_REG_PPC_EPTCFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b)
+/* Timebase offset */
+#define KVM_REG_PPC_TB_OFFSET (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9c)
+
+/* POWER8 registers */
+#define KVM_REG_PPC_SPMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9d)
+#define KVM_REG_PPC_SPMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9e)
+#define KVM_REG_PPC_IAMR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9f)
+#define KVM_REG_PPC_TFHAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa0)
+#define KVM_REG_PPC_TFIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa1)
+#define KVM_REG_PPC_TEXASR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa2)
+#define KVM_REG_PPC_FSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa3)
+#define KVM_REG_PPC_PSPB (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xa4)
+#define KVM_REG_PPC_EBBHR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa5)
+#define KVM_REG_PPC_EBBRR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa6)
+#define KVM_REG_PPC_BESCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa7)
+#define KVM_REG_PPC_TAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa8)
+#define KVM_REG_PPC_DPDES (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa9)
+#define KVM_REG_PPC_DAWR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaa)
+#define KVM_REG_PPC_DAWRX (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xab)
+#define KVM_REG_PPC_CIABR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xac)
+#define KVM_REG_PPC_IC (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xad)
+#define KVM_REG_PPC_VTB (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xae)
+#define KVM_REG_PPC_CSIGR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaf)
+#define KVM_REG_PPC_TACR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb0)
+#define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1)
+#define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2)
+#define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3)
+
+#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
+#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
+#define KVM_REG_PPC_PPR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6)
+
+/* Architecture compatibility level */
+#define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7)
+
+/* Transactional Memory checkpointed state:
+ * This is all GPRs, all VSX regs and a subset of SPRs
+ */
+#define KVM_REG_PPC_TM (KVM_REG_PPC | 0x80000000)
+/* TM GPRs */
+#define KVM_REG_PPC_TM_GPR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0)
+#define KVM_REG_PPC_TM_GPR(n) (KVM_REG_PPC_TM_GPR0 + (n))
+#define KVM_REG_PPC_TM_GPR31 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x1f)
+/* TM VSX */
+#define KVM_REG_PPC_TM_VSR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x20)
+#define KVM_REG_PPC_TM_VSR(n) (KVM_REG_PPC_TM_VSR0 + (n))
+#define KVM_REG_PPC_TM_VSR63 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x5f)
+/* TM SPRS */
+#define KVM_REG_PPC_TM_CR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x60)
+#define KVM_REG_PPC_TM_LR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x61)
+#define KVM_REG_PPC_TM_CTR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x62)
+#define KVM_REG_PPC_TM_FPSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x63)
+#define KVM_REG_PPC_TM_AMR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x64)
+#define KVM_REG_PPC_TM_PPR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x65)
+#define KVM_REG_PPC_TM_VRSAVE (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x66)
+#define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
+#define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
+#define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
+
/* PPC64 eXternal Interrupt Controller Specification */
#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */
diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h
index a6d74467c9ed..fa698324a1fd 100644
--- a/arch/powerpc/include/uapi/asm/socket.h
+++ b/arch/powerpc/include/uapi/asm/socket.h
@@ -83,4 +83,6 @@
#define SO_BUSY_POLL 46
+#define SO_MAX_PACING_RATE 47
+
#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index a27ccd5dc6b9..de91f3ae631e 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -54,8 +54,6 @@ struct aligninfo {
/* DSISR bits reported for a DCBZ instruction: */
#define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */
-#define SWAP(a, b) (t = (a), (a) = (b), (b) = t)
-
/*
* The PowerPC stores certain bits of the instruction that caused the
* alignment exception in the DSISR register. This array maps those
@@ -256,11 +254,17 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
* bottom 4 bytes of each register, and the loads clear the
* top 4 bytes of the affected register.
*/
+#ifdef __BIG_ENDIAN__
#ifdef CONFIG_PPC64
#define REG_BYTE(rp, i) *((u8 *)((rp) + ((i) >> 2)) + ((i) & 3) + 4)
#else
#define REG_BYTE(rp, i) *((u8 *)(rp) + (i))
#endif
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define REG_BYTE(rp, i) (*(((u8 *)((rp) + ((i)>>2)) + ((i)&3))))
+#endif
#define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
@@ -305,6 +309,15 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
nb0 = nb + reg * 4 - 128;
nb = 128 - reg * 4;
}
+#ifdef __LITTLE_ENDIAN__
+ /*
+ * String instructions are endian neutral but the code
+ * below is not. Force byte swapping on so that the
+ * effects of swizzling are undone in the load/store
+ * loops below.
+ */
+ flags ^= SW;
+#endif
} else {
/* lwm, stmw */
nb = (32 - reg) * 4;
@@ -458,7 +471,7 @@ static struct aligninfo spe_aligninfo[32] = {
static int emulate_spe(struct pt_regs *regs, unsigned int reg,
unsigned int instr)
{
- int t, ret;
+ int ret;
union {
u64 ll;
u32 w[2];
@@ -581,24 +594,18 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
if (flags & SW) {
switch (flags & 0xf0) {
case E8:
- SWAP(data.v[0], data.v[7]);
- SWAP(data.v[1], data.v[6]);
- SWAP(data.v[2], data.v[5]);
- SWAP(data.v[3], data.v[4]);
+ data.ll = swab64(data.ll);
break;
case E4:
-
- SWAP(data.v[0], data.v[3]);
- SWAP(data.v[1], data.v[2]);
- SWAP(data.v[4], data.v[7]);
- SWAP(data.v[5], data.v[6]);
+ data.w[0] = swab32(data.w[0]);
+ data.w[1] = swab32(data.w[1]);
break;
/* Its half word endian */
default:
- SWAP(data.v[0], data.v[1]);
- SWAP(data.v[2], data.v[3]);
- SWAP(data.v[4], data.v[5]);
- SWAP(data.v[6], data.v[7]);
+ data.h[0] = swab16(data.h[0]);
+ data.h[1] = swab16(data.h[1]);
+ data.h[2] = swab16(data.h[2]);
+ data.h[3] = swab16(data.h[3]);
break;
}
}
@@ -658,14 +665,31 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
flush_vsx_to_thread(current);
if (reg < 32)
- ptr = (char *) &current->thread.TS_FPR(reg);
+ ptr = (char *) &current->thread.fp_state.fpr[reg][0];
else
- ptr = (char *) &current->thread.vr[reg - 32];
+ ptr = (char *) &current->thread.vr_state.vr[reg - 32];
lptr = (unsigned long *) ptr;
+#ifdef __LITTLE_ENDIAN__
+ if (flags & SW) {
+ elsize = length;
+ sw = length-1;
+ } else {
+ /*
+ * The elements are BE ordered, even in LE mode, so process
+ * them in reverse order.
+ */
+ addr += length - elsize;
+
+ /* 8 byte memory accesses go in the top 8 bytes of the VR */
+ if (length == 8)
+ ptr += 8;
+ }
+#else
if (flags & SW)
sw = elsize-1;
+#endif
for (j = 0; j < length; j += elsize) {
for (i = 0; i < elsize; ++i) {
@@ -675,19 +699,31 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
ret |= __get_user(ptr[i^sw], addr + i);
}
ptr += elsize;
+#ifdef __LITTLE_ENDIAN__
+ addr -= elsize;
+#else
addr += elsize;
+#endif
}
+#ifdef __BIG_ENDIAN__
+#define VSX_HI 0
+#define VSX_LO 1
+#else
+#define VSX_HI 1
+#define VSX_LO 0
+#endif
+
if (!ret) {
if (flags & U)
regs->gpr[areg] = regs->dar;
/* Splat load copies the same data to top and bottom 8 bytes */
if (flags & SPLT)
- lptr[1] = lptr[0];
- /* For 8 byte loads, zero the top 8 bytes */
+ lptr[VSX_LO] = lptr[VSX_HI];
+ /* For 8 byte loads, zero the low 8 bytes */
else if (!(flags & ST) && (8 == length))
- lptr[1] = 0;
+ lptr[VSX_LO] = 0;
} else
return -EFAULT;
@@ -710,18 +746,28 @@ int fix_alignment(struct pt_regs *regs)
unsigned int dsisr;
unsigned char __user *addr;
unsigned long p, swiz;
- int ret, t;
- union {
+ int ret, i;
+ union data {
u64 ll;
double dd;
unsigned char v[8];
struct {
+#ifdef __LITTLE_ENDIAN__
+ int low32;
+ unsigned hi32;
+#else
unsigned hi32;
int low32;
+#endif
} x32;
struct {
+#ifdef __LITTLE_ENDIAN__
+ short low16;
+ unsigned char hi48[6];
+#else
unsigned char hi48[6];
short low16;
+#endif
} x16;
} data;
@@ -780,8 +826,9 @@ int fix_alignment(struct pt_regs *regs)
/* Byteswap little endian loads and stores */
swiz = 0;
- if (regs->msr & MSR_LE) {
+ if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
flags ^= SW;
+#ifdef __BIG_ENDIAN__
/*
* So-called "PowerPC little endian" mode works by
* swizzling addresses rather than by actually doing
@@ -794,6 +841,7 @@ int fix_alignment(struct pt_regs *regs)
*/
if (cpu_has_feature(CPU_FTR_PPC_LE))
swiz = 7;
+#endif
}
/* DAR has the operand effective address */
@@ -818,7 +866,7 @@ int fix_alignment(struct pt_regs *regs)
elsize = 8;
flags = 0;
- if (regs->msr & MSR_LE)
+ if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE))
flags |= SW;
if (instruction & 0x100)
flags |= ST;
@@ -878,32 +926,36 @@ int fix_alignment(struct pt_regs *regs)
* get it from register values
*/
if (!(flags & ST)) {
- data.ll = 0;
- ret = 0;
- p = (unsigned long) addr;
+ unsigned int start = 0;
+
switch (nb) {
- case 8:
- ret |= __get_user_inatomic(data.v[0], SWIZ_PTR(p++));
- ret |= __get_user_inatomic(data.v[1], SWIZ_PTR(p++));
- ret |= __get_user_inatomic(data.v[2], SWIZ_PTR(p++));
- ret |= __get_user_inatomic(data.v[3], SWIZ_PTR(p++));
case 4:
- ret |= __get_user_inatomic(data.v[4], SWIZ_PTR(p++));
- ret |= __get_user_inatomic(data.v[5], SWIZ_PTR(p++));
+ start = offsetof(union data, x32.low32);
+ break;
case 2:
- ret |= __get_user_inatomic(data.v[6], SWIZ_PTR(p++));
- ret |= __get_user_inatomic(data.v[7], SWIZ_PTR(p++));
- if (unlikely(ret))
- return -EFAULT;
+ start = offsetof(union data, x16.low16);
+ break;
}
+
+ data.ll = 0;
+ ret = 0;
+ p = (unsigned long)addr;
+
+ for (i = 0; i < nb; i++)
+ ret |= __get_user_inatomic(data.v[start + i],
+ SWIZ_PTR(p++));
+
+ if (unlikely(ret))
+ return -EFAULT;
+
} else if (flags & F) {
- data.dd = current->thread.TS_FPR(reg);
+ data.ll = current->thread.TS_FPR(reg);
if (flags & S) {
/* Single-precision FP store requires conversion... */
#ifdef CONFIG_PPC_FPU
preempt_disable();
enable_kernel_fp();
- cvt_df(&data.dd, (float *)&data.v[4]);
+ cvt_df(&data.dd, (float *)&data.x32.low32);
preempt_enable();
#else
return 0;
@@ -915,17 +967,13 @@ int fix_alignment(struct pt_regs *regs)
if (flags & SW) {
switch (nb) {
case 8:
- SWAP(data.v[0], data.v[7]);
- SWAP(data.v[1], data.v[6]);
- SWAP(data.v[2], data.v[5]);
- SWAP(data.v[3], data.v[4]);
+ data.ll = swab64(data.ll);
break;
case 4:
- SWAP(data.v[4], data.v[7]);
- SWAP(data.v[5], data.v[6]);
+ data.x32.low32 = swab32(data.x32.low32);
break;
case 2:
- SWAP(data.v[6], data.v[7]);
+ data.x16.low16 = swab16(data.x16.low16);
break;
}
}
@@ -947,7 +995,7 @@ int fix_alignment(struct pt_regs *regs)
#ifdef CONFIG_PPC_FPU
preempt_disable();
enable_kernel_fp();
- cvt_fd((float *)&data.v[4], &data.dd);
+ cvt_fd((float *)&data.x32.low32, &data.dd);
preempt_enable();
#else
return 0;
@@ -957,25 +1005,28 @@ int fix_alignment(struct pt_regs *regs)
/* Store result to memory or update registers */
if (flags & ST) {
- ret = 0;
- p = (unsigned long) addr;
+ unsigned int start = 0;
+
switch (nb) {
- case 8:
- ret |= __put_user_inatomic(data.v[0], SWIZ_PTR(p++));
- ret |= __put_user_inatomic(data.v[1], SWIZ_PTR(p++));
- ret |= __put_user_inatomic(data.v[2], SWIZ_PTR(p++));
- ret |= __put_user_inatomic(data.v[3], SWIZ_PTR(p++));
case 4:
- ret |= __put_user_inatomic(data.v[4], SWIZ_PTR(p++));
- ret |= __put_user_inatomic(data.v[5], SWIZ_PTR(p++));
+ start = offsetof(union data, x32.low32);
+ break;
case 2:
- ret |= __put_user_inatomic(data.v[6], SWIZ_PTR(p++));
- ret |= __put_user_inatomic(data.v[7], SWIZ_PTR(p++));
+ start = offsetof(union data, x16.low16);
+ break;
}
+
+ ret = 0;
+ p = (unsigned long)addr;
+
+ for (i = 0; i < nb; i++)
+ ret |= __put_user_inatomic(data.v[start + i],
+ SWIZ_PTR(p++));
+
if (unlikely(ret))
return -EFAULT;
} else if (flags & F)
- current->thread.TS_FPR(reg) = data.dd;
+ current->thread.TS_FPR(reg) = data.ll;
else
regs->gpr[reg] = data.ll;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 502c7a4e73f7..2ea5cc033ec8 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -90,16 +90,17 @@ int main(void)
DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
#endif
DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
- DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
- DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
+ DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fp_state));
+ DEFINE(THREAD_FPSAVEAREA, offsetof(struct thread_struct, fp_save_area));
+ DEFINE(FPSTATE_FPSCR, offsetof(struct thread_fp_state, fpscr));
#ifdef CONFIG_ALTIVEC
- DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
+ DEFINE(THREAD_VRSTATE, offsetof(struct thread_struct, vr_state));
+ DEFINE(THREAD_VRSAVEAREA, offsetof(struct thread_struct, vr_save_area));
DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
- DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
+ DEFINE(VRSTATE_VSCR, offsetof(struct thread_vr_state, vscr));
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
- DEFINE(THREAD_VSR0, offsetof(struct thread_struct, fpr));
DEFINE(THREAD_USED_VSR, offsetof(struct thread_struct, used_vsr));
#endif /* CONFIG_VSX */
#ifdef CONFIG_PPC64
@@ -114,7 +115,7 @@ int main(void)
#endif /* CONFIG_SPE */
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
- DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0));
+ DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, debug.dbcr0));
#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu));
@@ -143,20 +144,12 @@ int main(void)
DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr));
DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr));
DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
- DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct,
- transact_vr[0]));
- DEFINE(THREAD_TRANSACT_VSCR, offsetof(struct thread_struct,
- transact_vscr));
+ DEFINE(THREAD_TRANSACT_VRSTATE, offsetof(struct thread_struct,
+ transact_vr));
DEFINE(THREAD_TRANSACT_VRSAVE, offsetof(struct thread_struct,
transact_vrsave));
- DEFINE(THREAD_TRANSACT_FPR0, offsetof(struct thread_struct,
- transact_fpr[0]));
- DEFINE(THREAD_TRANSACT_FPSCR, offsetof(struct thread_struct,
- transact_fpscr));
-#ifdef CONFIG_VSX
- DEFINE(THREAD_TRANSACT_VSR0, offsetof(struct thread_struct,
- transact_fpr[0]));
-#endif
+ DEFINE(THREAD_TRANSACT_FPSTATE, offsetof(struct thread_struct,
+ transact_fp));
/* Local pt_regs on stack for Transactional Memory funcs. */
DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD +
sizeof(struct pt_regs) + 16);
@@ -446,7 +439,7 @@ int main(void)
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr));
DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0));
DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1));
@@ -477,7 +470,7 @@ int main(void)
DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
/* book3s */
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
@@ -509,6 +502,8 @@ int main(void)
DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
+ DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
+ DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
@@ -518,18 +513,22 @@ int main(void)
DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
+ DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
+ DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
- DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
- offsetof(struct kvmppc_vcpu_book3s, vcpu));
+ DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset));
+ DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr));
+ DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr));
DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
#ifdef CONFIG_PPC_BOOK3S_64
-#ifdef CONFIG_KVM_BOOK3S_PR
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ DEFINE(PACA_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
#else
# define SVCPU_FIELD(x, f)
@@ -581,7 +580,7 @@ int main(void)
HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
HSTATE_FIELD(HSTATE_NAPPING, napping);
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req);
HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
@@ -597,10 +596,11 @@ int main(void)
HSTATE_FIELD(HSTATE_DABR, dabr);
HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
DEFINE(IPI_PRIORITY, IPI_PRIORITY);
-#endif /* CONFIG_KVM_BOOK3S_64_HV */
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#ifdef CONFIG_PPC_BOOK3S_64
HSTATE_FIELD(HSTATE_CFAR, cfar);
+ HSTATE_FIELD(HSTATE_PPR, ppr);
#endif /* CONFIG_PPC_BOOK3S_64 */
#else /* CONFIG_PPC_BOOK3S */
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 55593ee2d5aa..58906d7f4c49 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -189,8 +189,7 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
}
/* If PCI-E capable, dump PCI-E cap 10, and the AER */
- cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
- if (cap) {
+ if (pci_is_pcie(dev)) {
n += scnprintf(buf+n, len-n, "pci-e cap10:\n");
printk(KERN_WARNING
"EEH: PCI-E capabilities and status follow:\n");
@@ -327,11 +326,11 @@ static int eeh_phb_check_failure(struct eeh_pe *pe)
/* Isolate the PHB and send event */
eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
eeh_serialize_unlock(flags);
- eeh_send_failure_event(phb_pe);
pr_err("EEH: PHB#%x failure detected\n",
phb_pe->phb->global_number);
dump_stack();
+ eeh_send_failure_event(phb_pe);
return 1;
out:
@@ -454,8 +453,6 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
eeh_serialize_unlock(flags);
- eeh_send_failure_event(pe);
-
/* Most EEH events are due to device driver bugs. Having
* a stack trace will help the device-driver authors figure
* out what happened. So print that out.
@@ -464,6 +461,8 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
pe->addr, pe->phb->global_number);
dump_stack();
+ eeh_send_failure_event(pe);
+
return 1;
dn_unlock:
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index c04cdf70d487..12679cd43e0c 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -673,9 +673,7 @@ _GLOBAL(ret_from_except_lite)
resume_kernel:
/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
- CURRENT_THREAD_INFO(r9, r1)
- ld r8,TI_FLAGS(r9)
- andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
+ andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
beq+ 1f
addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
@@ -1017,7 +1015,7 @@ _GLOBAL(enter_rtas)
li r9,1
rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
- ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
+ ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
andc r6,r0,r9
sync /* disable interrupts so SRR0/1 */
mtmsrd r0 /* don't get trashed */
@@ -1032,6 +1030,8 @@ _GLOBAL(enter_rtas)
b . /* prevent speculative execution */
_STATIC(rtas_return_loc)
+ FIXUP_ENDIAN
+
/* relocation is off at this point */
GET_PACA(r4)
clrldi r4,r4,2 /* convert to realmode address */
@@ -1103,28 +1103,30 @@ _GLOBAL(enter_prom)
std r10,_CCR(r1)
std r11,_MSR(r1)
- /* Get the PROM entrypoint */
- mtlr r4
+ /* Put PROM address in SRR0 */
+ mtsrr0 r4
- /* Switch MSR to 32 bits mode
+ /* Setup our trampoline return addr in LR */
+ bcl 20,31,$+4
+0: mflr r4
+ addi r4,r4,(1f - 0b)
+ mtlr r4
+
+ /* Prepare a 32-bit mode big endian MSR
*/
#ifdef CONFIG_PPC_BOOK3E
rlwinm r11,r11,0,1,31
- mtmsr r11
+ mtsrr1 r11
+ rfi
#else /* CONFIG_PPC_BOOK3E */
- mfmsr r11
- li r12,1
- rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
- andc r11,r11,r12
- li r12,1
- rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
- andc r11,r11,r12
- mtmsrd r11
+ LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
+ andc r11,r11,r12
+ mtsrr1 r11
+ rfid
#endif /* CONFIG_PPC_BOOK3E */
- isync
- /* Enter PROM here... */
- blrl
+1: /* Return from OF */
+ FIXUP_ENDIAN
/* Just make sure that r1 top 32 bits didn't get
* corrupt by OF
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index 6300c13bbde4..7898be90f2dc 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -18,6 +18,7 @@
*/
#include <linux/of.h>
+#include <linux/of_fdt.h>
#include <asm/epapr_hcalls.h>
#include <asm/cacheflush.h>
#include <asm/code-patching.h>
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 2d067049db27..e7751561fd1d 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -399,7 +399,7 @@ interrupt_end_book3e:
/* Altivec Unavailable Interrupt */
START_EXCEPTION(altivec_unavailable);
- NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_ALTIVEC_UNAVAIL,
+ NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL,
PROLOG_ADDITION_NONE)
/* we can probably do a shorter exception entry for that one... */
EXCEPTION_COMMON(0x200, PACA_EXGEN, INTS_KEEP)
@@ -421,7 +421,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/* AltiVec Assist */
START_EXCEPTION(altivec_assist);
- NORMAL_EXCEPTION_PROLOG(0x220, BOOKE_INTERRUPT_ALTIVEC_ASSIST,
+ NORMAL_EXCEPTION_PROLOG(0x220,
+ BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x220, PACA_EXGEN, INTS_DISABLE)
bl .save_nvgprs
@@ -607,6 +608,7 @@ kernel_dbg_exc:
NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x260, PACA_EXGEN, INTS_DISABLE)
+ CHECK_NAPPING()
addi r3,r1,STACK_FRAME_OVERHEAD
bl .performance_monitor_exception
b .ret_from_except_lite
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 3a9ed6ac224b..9f905e40922e 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -126,7 +126,7 @@ BEGIN_FTR_SECTION
bgt cr1,.
GET_PACA(r13)
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
li r0,KVM_HWTHREAD_IN_KERNEL
stb r0,HSTATE_HWTHREAD_STATE(r13)
/* Order setting hwthread_state vs. testing hwthread_req */
@@ -425,7 +425,7 @@ data_access_check_stab:
mfspr r9,SPRN_DSISR
srdi r10,r10,60
rlwimi r10,r9,16,0x20
-#ifdef CONFIG_KVM_BOOK3S_PR
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
lbz r9,HSTATE_IN_GUEST(r13)
rlwimi r10,r9,8,0x300
#endif
@@ -650,6 +650,32 @@ slb_miss_user_pseries:
b . /* prevent spec. execution */
#endif /* __DISABLED__ */
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+kvmppc_skip_interrupt:
+ /*
+ * Here all GPRs are unchanged from when the interrupt happened
+ * except for r13, which is saved in SPRG_SCRATCH0.
+ */
+ mfspr r13, SPRN_SRR0
+ addi r13, r13, 4
+ mtspr SPRN_SRR0, r13
+ GET_SCRATCH0(r13)
+ rfid
+ b .
+
+kvmppc_skip_Hinterrupt:
+ /*
+ * Here all GPRs are unchanged from when the interrupt happened
+ * except for r13, which is saved in SPRG_SCRATCH0.
+ */
+ mfspr r13, SPRN_HSRR0
+ addi r13, r13, 4
+ mtspr SPRN_HSRR0, r13
+ GET_SCRATCH0(r13)
+ hrfid
+ b .
+#endif
+
/*
* Code from here down to __end_handlers is invoked from the
* exception prologs above. Because the prologs assemble the
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index caeaabf11a2f..f7f5b8bed68f 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -35,15 +35,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
2: REST_32VSRS(n,c,base); \
3:
-#define __REST_32FPVSRS_TRANSACT(n,c,base) \
-BEGIN_FTR_SECTION \
- b 2f; \
-END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
- REST_32FPRS_TRANSACT(n,base); \
- b 3f; \
-2: REST_32VSRS_TRANSACT(n,c,base); \
-3:
-
#define __SAVE_32FPVSRS(n,c,base) \
BEGIN_FTR_SECTION \
b 2f; \
@@ -54,40 +45,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
3:
#else
#define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
-#define __REST_32FPVSRS_TRANSACT(n,b,base) REST_32FPRS(n, base)
#define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
#endif
#define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
-#define REST_32FPVSRS_TRANSACT(n,c,base) \
- __REST_32FPVSRS_TRANSACT(n,__REG_##c,__REG_##base)
#define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Wrapper to call load_up_fpu from C.
- * void do_load_up_fpu(struct pt_regs *regs);
- */
-_GLOBAL(do_load_up_fpu)
- mflr r0
- std r0, 16(r1)
- stdu r1, -112(r1)
-
- subi r6, r3, STACK_FRAME_OVERHEAD
- /* load_up_fpu expects r12=MSR, r13=PACA, and returns
- * with r12 = new MSR.
- */
- ld r12,_MSR(r6)
- GET_PACA(r13)
-
- bl load_up_fpu
- std r12,_MSR(r6)
-
- ld r0, 112+16(r1)
- addi r1, r1, 112
- mtlr r0
- blr
-
-
/* void do_load_up_transact_fpu(struct thread_struct *thread)
*
* This is similar to load_up_fpu but for the transactional version of the FP
@@ -105,9 +68,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
SYNC
MTMSRD(r5)
- lfd fr0,THREAD_TRANSACT_FPSCR(r3)
+ addi r7,r3,THREAD_TRANSACT_FPSTATE
+ lfd fr0,FPSTATE_FPSCR(r7)
MTFSF_L(fr0)
- REST_32FPVSRS_TRANSACT(0, R4, R3)
+ REST_32FPVSRS(0, R4, R7)
/* FP/VSX off again */
MTMSRD(r6)
@@ -117,11 +81,33 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
/*
+ * Load state from memory into FP registers including FPSCR.
+ * Assumes the caller has enabled FP in the MSR.
+ */
+_GLOBAL(load_fp_state)
+ lfd fr0,FPSTATE_FPSCR(r3)
+ MTFSF_L(fr0)
+ REST_32FPVSRS(0, R4, R3)
+ blr
+
+/*
+ * Store FP state into memory, including FPSCR
+ * Assumes the caller has enabled FP in the MSR.
+ */
+_GLOBAL(store_fp_state)
+ SAVE_32FPVSRS(0, R4, R3)
+ mffs fr0
+ stfd fr0,FPSTATE_FPSCR(r3)
+ blr
+
+/*
* This task wants to use the FPU now.
* On UP, disable FP for the task which had the FPU previously,
* and save its floating-point registers in its thread_struct.
* Load up this task's FP registers from its thread_struct,
* enable the FPU for the current task and return to the task.
+ * Note that on 32-bit this can only use registers that will be
+ * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
*/
_GLOBAL(load_up_fpu)
mfmsr r5
@@ -147,9 +133,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
beq 1f
toreal(r4)
addi r4,r4,THREAD /* want last_task_used_math->thread */
- SAVE_32FPVSRS(0, R5, R4)
+ addi r10,r4,THREAD_FPSTATE
+ SAVE_32FPVSRS(0, R5, R10)
mffs fr0
- stfd fr0,THREAD_FPSCR(r4)
+ stfd fr0,FPSTATE_FPSCR(r10)
PPC_LL r5,PT_REGS(r4)
toreal(r5)
PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
@@ -160,7 +147,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif /* CONFIG_SMP */
/* enable use of FP after return */
#ifdef CONFIG_PPC32
- mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
+ mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
lwz r4,THREAD_FPEXC_MODE(r5)
ori r9,r9,MSR_FP /* enable FP for current */
or r9,r9,r4
@@ -172,9 +159,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
or r12,r12,r4
std r12,_MSR(r1)
#endif
- lfd fr0,THREAD_FPSCR(r5)
+ addi r10,r5,THREAD_FPSTATE
+ lfd fr0,FPSTATE_FPSCR(r10)
MTFSF_L(fr0)
- REST_32FPVSRS(0, R4, R5)
+ REST_32FPVSRS(0, R4, R10)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
fromreal(r4)
@@ -206,11 +194,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
PPC_LCMPI 0,r3,0
beqlr- /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */
+ PPC_LL r6,THREAD_FPSAVEAREA(r3)
PPC_LL r5,PT_REGS(r3)
- PPC_LCMPI 0,r5,0
- SAVE_32FPVSRS(0, R4 ,R3)
+ PPC_LCMPI 0,r6,0
+ bne 2f
+ addi r6,r3,THREAD_FPSTATE
+2: PPC_LCMPI 0,r5,0
+ SAVE_32FPVSRS(0, R4, R6)
mffs fr0
- stfd fr0,THREAD_FPSCR(r3)
+ stfd fr0,FPSTATE_FPSCR(r6)
beq 1f
PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
li r3,MSR_FP|MSR_FE0|MSR_FE1
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 1fb78561096a..9b27b293a922 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -174,7 +174,11 @@ __ftrace_make_nop(struct module *mod,
pr_devel(" %08x %08x\n", jmp[0], jmp[1]);
+#ifdef __LITTLE_ENDIAN__
+ ptr = ((unsigned long)jmp[1] << 32) + jmp[0];
+#else
ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
+#endif
/* This should match what was called */
if (ptr != ppc_function_entry((void *)addr)) {
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 3d11d8038dee..2ae41aba4053 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -68,6 +68,7 @@ _stext:
_GLOBAL(__start)
/* NOP this out unconditionally */
BEGIN_FTR_SECTION
+ FIXUP_ENDIAN
b .__start_initialization_multiplatform
END_FTR_SECTION(0, 1)
@@ -115,6 +116,7 @@ __run_at_load:
*/
.globl __secondary_hold
__secondary_hold:
+ FIXUP_ENDIAN
#ifndef CONFIG_PPC_BOOK3E
mfmsr r24
ori r24,r24,MSR_RI
@@ -205,6 +207,7 @@ _GLOBAL(generic_secondary_thread_init)
* as SCOM before entry).
*/
_GLOBAL(generic_secondary_smp_init)
+ FIXUP_ENDIAN
mr r24,r3
mr r25,r4
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 1b92a97b1b04..7ee876d2adb5 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -858,6 +858,9 @@ initial_mmu:
addis r11, r11, 0x0080 /* Add 8M */
mtspr SPRN_MD_RPN, r11
+ addi r10, r10, 0x0100
+ mtspr SPRN_MD_CTR, r10
+
addis r8, r8, 0x0080 /* Add 8M */
mtspr SPRN_MD_EPN, r8
mtspr SPRN_MD_TWC, r9
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 289afaffbbb5..f45726a1d963 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -555,27 +555,27 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
#ifdef CONFIG_SPE
/* SPE Unavailable */
START_EXCEPTION(SPEUnavailable)
- NORMAL_EXCEPTION_PROLOG(SPE_UNAVAIL)
+ NORMAL_EXCEPTION_PROLOG(SPE_ALTIVEC_UNAVAIL)
beq 1f
bl load_up_spe
b fast_exception_return
1: addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0x2010, KernelSPE)
#else
- EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, \
+ EXCEPTION(0x2020, SPE_ALTIVEC_UNAVAIL, SPEUnavailable, \
unknown_exception, EXC_XFER_EE)
#endif /* CONFIG_SPE */
/* SPE Floating Point Data */
#ifdef CONFIG_SPE
- EXCEPTION(0x2030, SPE_FP_DATA, SPEFloatingPointData, \
- SPEFloatingPointException, EXC_XFER_EE);
+ EXCEPTION(0x2030, SPE_FP_DATA_ALTIVEC_ASSIST, SPEFloatingPointData,
+ SPEFloatingPointException, EXC_XFER_EE)
/* SPE Floating Point Round */
EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
SPEFloatingPointRoundException, EXC_XFER_EE)
#else
- EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData, \
+ EXCEPTION(0x2040, SPE_FP_DATA_ALTIVEC_ASSIST, SPEFloatingPointData,
unknown_exception, EXC_XFER_EE)
EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
unknown_exception, EXC_XFER_EE)
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 16a7c2326d48..1114d13ac19f 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -292,6 +292,7 @@ out:
return rc;
return count;
}
+static BUS_ATTR(probe, S_IWUSR, NULL, ibmebus_store_probe);
static ssize_t ibmebus_store_remove(struct bus_type *bus,
const char *buf, size_t count)
@@ -317,13 +318,14 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus,
return -ENODEV;
}
}
+static BUS_ATTR(remove, S_IWUSR, NULL, ibmebus_store_remove);
-
-static struct bus_attribute ibmebus_bus_attrs[] = {
- __ATTR(probe, S_IWUSR, NULL, ibmebus_store_probe),
- __ATTR(remove, S_IWUSR, NULL, ibmebus_store_remove),
- __ATTR_NULL
+static struct attribute *ibmbus_bus_attrs[] = {
+ &bus_attr_probe.attr,
+ &bus_attr_remove.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(ibmbus_bus);
static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv)
{
@@ -713,7 +715,7 @@ static struct dev_pm_ops ibmebus_bus_dev_pm_ops = {
struct bus_type ibmebus_bus_type = {
.name = "ibmebus",
.uevent = of_device_uevent_modalias,
- .bus_attrs = ibmebus_bus_attrs,
+ .bus_groups = ibmbus_bus_groups,
.match = ibmebus_bus_bus_match,
.probe = ibmebus_bus_device_probe,
.remove = ibmebus_bus_device_remove,
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index e11863f4e595..847e40e62fce 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -84,7 +84,7 @@ _GLOBAL(power7_nap)
std r9,_MSR(r1)
std r1,PACAR1(r13)
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/* Tell KVM we're napping */
li r4,KVM_HWTHREAD_IN_NAP
stb r4,HSTATE_HWTHREAD_STATE(r13)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index c7cb8c232d2f..ba0165615215 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -594,7 +594,7 @@ void irq_ctx_init(void)
}
}
-static inline void do_softirq_onstack(void)
+void do_softirq_own_stack(void)
{
struct thread_info *curtp, *irqtp;
@@ -612,21 +612,6 @@ static inline void do_softirq_onstack(void)
set_bits(irqtp->flags, &curtp->flags);
}
-void do_softirq(void)
-{
- unsigned long flags;
-
- if (in_interrupt())
- return;
-
- local_irq_save(flags);
-
- if (local_softirq_pending())
- do_softirq_onstack();
-
- local_irq_restore(flags);
-}
-
irq_hw_number_t virq_to_hw(unsigned int virq)
{
struct irq_data *irq_data = irq_get_irq_data(virq);
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index c1eef241017a..83e89d310734 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -151,15 +151,16 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
return 1;
}
+static DEFINE_PER_CPU(struct thread_info, kgdb_thread_info);
static int kgdb_singlestep(struct pt_regs *regs)
{
struct thread_info *thread_info, *exception_thread_info;
- struct thread_info *backup_current_thread_info;
+ struct thread_info *backup_current_thread_info =
+ &__get_cpu_var(kgdb_thread_info);
if (user_mode(regs))
return 0;
- backup_current_thread_info = kmalloc(sizeof(struct thread_info), GFP_KERNEL);
/*
* On Book E and perhaps other processors, singlestep is handled on
* the critical exception stack. This causes current_thread_info()
@@ -185,7 +186,6 @@ static int kgdb_singlestep(struct pt_regs *regs)
/* Restore current_thread_info lastly. */
memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info);
- kfree(backup_current_thread_info);
return 1;
}
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 2156ea90eb54..90fab64d911d 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -429,7 +429,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
case KPROBE_HIT_SSDONE:
/*
* We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accouting
+ * we can also use npre/npostfault count for accounting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(cur);
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 22e88dd2f34a..40bd7bd4e19a 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -35,7 +35,7 @@ static struct legacy_serial_info {
phys_addr_t taddr;
} legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS];
-static struct __initdata of_device_id legacy_serial_parents[] = {
+static struct of_device_id legacy_serial_parents[] __initdata = {
{.type = "soc",},
{.type = "tsi-bridge",},
{.type = "opb", },
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 611acdf30096..be4e6d648f60 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -312,7 +312,7 @@ static union thread_union kexec_stack __init_task_data =
*/
struct paca_struct kexec_paca;
-/* Our assembly helper, in kexec_stub.S */
+/* Our assembly helper, in misc_64.S */
extern void kexec_sequence(void *newstack, unsigned long start,
void *image, void *control,
void (*clear_all)(void)) __noreturn;
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 2b0ad9845363..e47d268727a4 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -659,6 +659,20 @@ _GLOBAL(__lshrdi3)
blr
/*
+ * 64-bit comparison: __cmpdi2(s64 a, s64 b)
+ * Returns 0 if a < b, 1 if a == b, 2 if a > b.
+ */
+_GLOBAL(__cmpdi2)
+ cmpw r3,r5
+ li r3,1
+ bne 1f
+ cmplw r4,r6
+ beqlr
+1: li r3,0
+ bltlr
+ li r3,2
+ blr
+/*
* 64-bit comparison: __ucmpdi2(u64 a, u64 b)
* Returns 0 if a < b, 1 if a == b, 2 if a > b.
*/
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 2d275707f419..9547381b631a 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -25,8 +25,7 @@
#include <asm/uaccess.h>
#include <asm/firmware.h>
#include <linux/sort.h>
-
-#include "setup.h"
+#include <asm/setup.h>
LIST_HEAD(module_bug_list);
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index 2e3200ca485f..6cff040bf456 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -26,8 +26,7 @@
#include <linux/cache.h>
#include <linux/bug.h>
#include <linux/sort.h>
-
-#include "setup.h"
+#include <asm/setup.h>
#if 0
#define DEBUGP printk
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 6ee59a0eb268..12664c130d73 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -26,8 +26,7 @@
#include <asm/firmware.h>
#include <asm/code-patching.h>
#include <linux/sort.h>
-
-#include "setup.h"
+#include <asm/setup.h>
/* FIXME: We don't do .init separately. To do this, we'd need to have
a separate r2 value in the init and core section, and stub between
@@ -62,6 +61,16 @@ struct ppc64_stub_entry
r2) into the stub. */
static struct ppc64_stub_entry ppc64_stub =
{ .jump = {
+#ifdef __LITTLE_ENDIAN__
+ 0x00, 0x00, 0x82, 0x3d, /* addis r12,r2, <high> */
+ 0x00, 0x00, 0x8c, 0x39, /* addi r12,r12, <low> */
+ /* Save current r2 value in magic place on the stack. */
+ 0x28, 0x00, 0x41, 0xf8, /* std r2,40(r1) */
+ 0x20, 0x00, 0x6c, 0xe9, /* ld r11,32(r12) */
+ 0x28, 0x00, 0x4c, 0xe8, /* ld r2,40(r12) */
+ 0xa6, 0x03, 0x69, 0x7d, /* mtctr r11 */
+ 0x20, 0x04, 0x80, 0x4e /* bctr */
+#else
0x3d, 0x82, 0x00, 0x00, /* addis r12,r2, <high> */
0x39, 0x8c, 0x00, 0x00, /* addi r12,r12, <low> */
/* Save current r2 value in magic place on the stack. */
@@ -70,6 +79,7 @@ static struct ppc64_stub_entry ppc64_stub =
0xe8, 0x4c, 0x00, 0x28, /* ld r2,40(r12) */
0x7d, 0x69, 0x03, 0xa6, /* mtctr r11 */
0x4e, 0x80, 0x04, 0x20 /* bctr */
+#endif
} };
/* Count how many different 24-bit relocations (different symbol,
@@ -269,8 +279,13 @@ static inline int create_stub(Elf64_Shdr *sechdrs,
*entry = ppc64_stub;
+#ifdef __LITTLE_ENDIAN__
+ loc1 = (Elf64_Half *)&entry->jump[0];
+ loc2 = (Elf64_Half *)&entry->jump[4];
+#else
loc1 = (Elf64_Half *)&entry->jump[2];
loc2 = (Elf64_Half *)&entry->jump[6];
+#endif
/* Stub uses address relative to r2. */
reladdr = (unsigned long)entry - my_r2(sechdrs, me);
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 8213ee1eb05a..fd82c289ab1c 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -223,9 +223,13 @@ static int __init nvram_write_header(struct nvram_partition * part)
{
loff_t tmp_index;
int rc;
-
+ struct nvram_header phead;
+
+ memcpy(&phead, &part->header, NVRAM_HEADER_LEN);
+ phead.length = cpu_to_be16(phead.length);
+
tmp_index = part->index;
- rc = ppc_md.nvram_write((char *)&part->header, NVRAM_HEADER_LEN, &tmp_index);
+ rc = ppc_md.nvram_write((char *)&phead, NVRAM_HEADER_LEN, &tmp_index);
return rc;
}
@@ -505,6 +509,8 @@ int __init nvram_scan_partitions(void)
memcpy(&phead, header, NVRAM_HEADER_LEN);
+ phead.length = be16_to_cpu(phead.length);
+
err = 0;
c_sum = nvram_checksum(&phead);
if (c_sum != phead.checksum) {
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 3fc16e3beb9f..0620eaaaad45 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -46,7 +46,7 @@ struct lppaca lppaca[] = {
static struct lppaca *extra_lppacas;
static long __initdata lppaca_size;
-static void allocate_lppacas(int nr_cpus, unsigned long limit)
+static void __init allocate_lppacas(int nr_cpus, unsigned long limit)
{
if (nr_cpus <= NR_LPPACAS)
return;
@@ -57,7 +57,7 @@ static void allocate_lppacas(int nr_cpus, unsigned long limit)
PAGE_SIZE, limit));
}
-static struct lppaca *new_lppaca(int cpu)
+static struct lppaca * __init new_lppaca(int cpu)
{
struct lppaca *lp;
@@ -70,7 +70,7 @@ static struct lppaca *new_lppaca(int cpu)
return lp;
}
-static void free_lppacas(void)
+static void __init free_lppacas(void)
{
long new_size = 0, nr;
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 905a24bb7acc..a1e3e40ca3fd 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -228,7 +228,7 @@ int pcibios_add_platform_entries(struct pci_dev *pdev)
*/
static int pci_read_irq_line(struct pci_dev *pci_dev)
{
- struct of_irq oirq;
+ struct of_phandle_args oirq;
unsigned int virq;
pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
@@ -237,7 +237,7 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
memset(&oirq, 0xff, sizeof(oirq));
#endif
/* Try to get a mapping from the device-tree */
- if (of_irq_map_pci(pci_dev, &oirq)) {
+ if (of_irq_parse_pci(pci_dev, &oirq)) {
u8 line, pin;
/* If that fails, lets fallback to what is in the config
@@ -263,11 +263,10 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
} else {
pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
- oirq.size, oirq.specifier[0], oirq.specifier[1],
- of_node_full_name(oirq.controller));
+ oirq.args_count, oirq.args[0], oirq.args[1],
+ of_node_full_name(oirq.np));
- virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
- oirq.size);
+ virq = irq_create_of_mapping(&oirq);
}
if(virq == NO_IRQ) {
pr_debug(" Failed to map !\n");
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 4368ec6fdc8c..ac0b034f9ae0 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -302,7 +302,7 @@ static struct pci_dev *of_scan_pci_dev(struct pci_bus *bus,
struct device_node *dn)
{
struct pci_dev *dev = NULL;
- const u32 *reg;
+ const __be32 *reg;
int reglen, devfn;
pr_debug(" * %s\n", dn->full_name);
@@ -312,7 +312,7 @@ static struct pci_dev *of_scan_pci_dev(struct pci_bus *bus,
reg = of_get_property(dn, "reg", &reglen);
if (reg == NULL || reglen < 20)
return NULL;
- devfn = (reg[0] >> 8) & 0xff;
+ devfn = (of_read_number(reg, 1) >> 8) & 0xff;
/* Check if the PCI device is already there */
dev = pci_get_slot(bus, devfn);
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 21646dbe1bb3..3bd77edd7610 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -79,10 +79,12 @@ EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strncmp);
+#ifndef CONFIG_GENERIC_CSUM
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_generic);
EXPORT_SYMBOL(ip_fast_csum);
EXPORT_SYMBOL(csum_tcpudp_magic);
+#endif
EXPORT_SYMBOL(__copy_tofrom_user);
EXPORT_SYMBOL(__clear_user);
@@ -98,9 +100,13 @@ EXPORT_SYMBOL(start_thread);
#ifdef CONFIG_PPC_FPU
EXPORT_SYMBOL(giveup_fpu);
+EXPORT_SYMBOL(load_fp_state);
+EXPORT_SYMBOL(store_fp_state);
#endif
#ifdef CONFIG_ALTIVEC
EXPORT_SYMBOL(giveup_altivec);
+EXPORT_SYMBOL(load_vr_state);
+EXPORT_SYMBOL(store_vr_state);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
EXPORT_SYMBOL(giveup_vsx);
@@ -143,10 +149,14 @@ EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__lshrdi3);
int __ucmpdi2(unsigned long long, unsigned long long);
EXPORT_SYMBOL(__ucmpdi2);
+int __cmpdi2(long long, long long);
+EXPORT_SYMBOL(__cmpdi2);
#endif
long long __bswapdi2(long long);
EXPORT_SYMBOL(__bswapdi2);
+#ifdef __BIG_ENDIAN__
EXPORT_SYMBOL(memcpy);
+#endif
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memcmp);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 96d2fdf3aa9e..75c2d1009985 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -314,28 +314,28 @@ static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
*/
static void set_debug_reg_defaults(struct thread_struct *thread)
{
- thread->iac1 = thread->iac2 = 0;
+ thread->debug.iac1 = thread->debug.iac2 = 0;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
- thread->iac3 = thread->iac4 = 0;
+ thread->debug.iac3 = thread->debug.iac4 = 0;
#endif
- thread->dac1 = thread->dac2 = 0;
+ thread->debug.dac1 = thread->debug.dac2 = 0;
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
- thread->dvc1 = thread->dvc2 = 0;
+ thread->debug.dvc1 = thread->debug.dvc2 = 0;
#endif
- thread->dbcr0 = 0;
+ thread->debug.dbcr0 = 0;
#ifdef CONFIG_BOOKE
/*
* Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
*/
- thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \
+ thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
DBCR1_IAC3US | DBCR1_IAC4US;
/*
* Force Data Address Compare User/Supervisor bits to be User-only
* (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
*/
- thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
+ thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
#else
- thread->dbcr1 = 0;
+ thread->debug.dbcr1 = 0;
#endif
}
@@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread)
*/
mtmsr(mfmsr() & ~MSR_DE);
- mtspr(SPRN_IAC1, thread->iac1);
- mtspr(SPRN_IAC2, thread->iac2);
+ mtspr(SPRN_IAC1, thread->debug.iac1);
+ mtspr(SPRN_IAC2, thread->debug.iac2);
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
- mtspr(SPRN_IAC3, thread->iac3);
- mtspr(SPRN_IAC4, thread->iac4);
+ mtspr(SPRN_IAC3, thread->debug.iac3);
+ mtspr(SPRN_IAC4, thread->debug.iac4);
#endif
- mtspr(SPRN_DAC1, thread->dac1);
- mtspr(SPRN_DAC2, thread->dac2);
+ mtspr(SPRN_DAC1, thread->debug.dac1);
+ mtspr(SPRN_DAC2, thread->debug.dac2);
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
- mtspr(SPRN_DVC1, thread->dvc1);
- mtspr(SPRN_DVC2, thread->dvc2);
+ mtspr(SPRN_DVC1, thread->debug.dvc1);
+ mtspr(SPRN_DVC2, thread->debug.dvc2);
#endif
- mtspr(SPRN_DBCR0, thread->dbcr0);
- mtspr(SPRN_DBCR1, thread->dbcr1);
+ mtspr(SPRN_DBCR0, thread->debug.dbcr0);
+ mtspr(SPRN_DBCR1, thread->debug.dbcr1);
#ifdef CONFIG_BOOKE
- mtspr(SPRN_DBCR2, thread->dbcr2);
+ mtspr(SPRN_DBCR2, thread->debug.dbcr2);
#endif
}
/*
@@ -371,12 +371,13 @@ static void prime_debug_regs(struct thread_struct *thread)
* debug registers, set the debug registers from the values
* stored in the new thread.
*/
-static void switch_booke_debug_regs(struct thread_struct *new_thread)
+void switch_booke_debug_regs(struct thread_struct *new_thread)
{
- if ((current->thread.dbcr0 & DBCR0_IDM)
- || (new_thread->dbcr0 & DBCR0_IDM))
+ if ((current->thread.debug.dbcr0 & DBCR0_IDM)
+ || (new_thread->debug.dbcr0 & DBCR0_IDM))
prime_debug_regs(new_thread);
}
+EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
#ifndef CONFIG_HAVE_HW_BREAKPOINT
static void set_debug_reg_defaults(struct thread_struct *thread)
@@ -596,12 +597,13 @@ struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *new)
{
struct thread_struct *new_thread, *old_thread;
- unsigned long flags;
struct task_struct *last;
#ifdef CONFIG_PPC_BOOK3S_64
struct ppc64_tlb_batch *batch;
#endif
+ WARN_ON(!irqs_disabled());
+
/* Back up the TAR across context switches.
* Note that the TAR is not available for use in the kernel. (To
* provide this, the TAR should be backed up/restored on exception
@@ -721,8 +723,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
}
#endif /* CONFIG_PPC_BOOK3S_64 */
- local_irq_save(flags);
-
/*
* We can't take a PMU exception inside _switch() since there is a
* window where the kernel stack SLB and the kernel stack are out
@@ -742,8 +742,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
}
#endif /* CONFIG_PPC_BOOK3S_64 */
- local_irq_restore(flags);
-
return last;
}
@@ -1008,6 +1006,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
p->thread.ptrace_bps[0] = NULL;
#endif
+ p->thread.fp_save_area = NULL;
+#ifdef CONFIG_ALTIVEC
+ p->thread.vr_save_area = NULL;
+#endif
+
#ifdef CONFIG_PPC_STD_MMU_64
if (mmu_has_feature(MMU_FTR_SLB)) {
unsigned long sp_vsid;
@@ -1113,12 +1116,12 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
#ifdef CONFIG_VSX
current->thread.used_vsr = 0;
#endif
- memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
- current->thread.fpscr.val = 0;
+ memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
+ current->thread.fp_save_area = NULL;
#ifdef CONFIG_ALTIVEC
- memset(current->thread.vr, 0, sizeof(current->thread.vr));
- memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
- current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
+ memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
+ current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
+ current->thread.vr_save_area = NULL;
current->thread.vrsave = 0;
current->thread.used_vr = 0;
#endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index b7634ce41dbc..f3a47098fb8e 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -546,15 +546,6 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
memblock_add(base, size);
}
-#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
-{
- initrd_start = (unsigned long)__va(start);
- initrd_end = (unsigned long)__va(end);
- initrd_below_start_ok = 1;
-}
-#endif
-
static void __init early_reserve_mem_dt(void)
{
unsigned long i, len, dt_root;
@@ -761,37 +752,6 @@ void __init early_init_devtree(void *params)
*******/
/**
- * of_find_next_cache_node - Find a node's subsidiary cache
- * @np: node of type "cpu" or "cache"
- *
- * Returns a node pointer with refcount incremented, use
- * of_node_put() on it when done. Caller should hold a reference
- * to np.
- */
-struct device_node *of_find_next_cache_node(struct device_node *np)
-{
- struct device_node *child;
- const phandle *handle;
-
- handle = of_get_property(np, "l2-cache", NULL);
- if (!handle)
- handle = of_get_property(np, "next-level-cache", NULL);
-
- if (handle)
- return of_find_node_by_phandle(*handle);
-
- /* OF on pmac has nodes instead of properties named "l2-cache"
- * beneath CPU nodes.
- */
- if (!strcmp(np->type, "cpu"))
- for_each_child_of_node(np, child)
- if (!strcmp(child->type, "cache"))
- return child;
-
- return NULL;
-}
-
-/**
* of_get_ibm_chip_id - Returns the IBM "chip-id" of a device
* @np: device node of the device
*
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 5fe2842e8bab..cb64a6e1dc51 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -858,7 +858,8 @@ static void __init prom_send_capabilities(void)
{
ihandle root;
prom_arg_t ret;
- __be32 *cores;
+ u32 cores;
+ unsigned char *ptcores;
root = call_prom("open", 1, 1, ADDR("/"));
if (root != 0) {
@@ -868,15 +869,30 @@ static void __init prom_send_capabilities(void)
* (we assume this is the same for all cores) and use it to
* divide NR_CPUS.
*/
- cores = (__be32 *)&ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET];
- if (be32_to_cpup(cores) != NR_CPUS) {
+
+ /* The core value may start at an odd address. If such a word
+ * access is made at a cache line boundary, this leads to an
+ * exception which may not be handled at this time.
+ * Forcing a per byte access to avoid exception.
+ */
+ ptcores = &ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET];
+ cores = 0;
+ cores |= ptcores[0] << 24;
+ cores |= ptcores[1] << 16;
+ cores |= ptcores[2] << 8;
+ cores |= ptcores[3];
+ if (cores != NR_CPUS) {
prom_printf("WARNING ! "
"ibm_architecture_vec structure inconsistent: %lu!\n",
- be32_to_cpup(cores));
+ cores);
} else {
- *cores = cpu_to_be32(DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()));
+ cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
- be32_to_cpup(cores), NR_CPUS);
+ cores, NR_CPUS);
+ ptcores[0] = (cores >> 24) & 0xff;
+ ptcores[1] = (cores >> 16) & 0xff;
+ ptcores[2] = (cores >> 8) & 0xff;
+ ptcores[3] = cores & 0xff;
}
/* try calling the ibm,client-architecture-support method */
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 9a0d24c390a3..75fb40498b41 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -362,7 +362,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
void *kbuf, void __user *ubuf)
{
#ifdef CONFIG_VSX
- double buf[33];
+ u64 buf[33];
int i;
#endif
flush_fp_to_thread(target);
@@ -371,15 +371,15 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
/* copy to local buffer then write that out */
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.TS_FPR(i);
- memcpy(&buf[32], &target->thread.fpscr, sizeof(double));
+ buf[32] = target->thread.fp_state.fpscr;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
#else
- BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
- offsetof(struct thread_struct, TS_FPR(32)));
+ BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
+ offsetof(struct thread_fp_state, fpr[32][0]));
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.fpr, 0, -1);
+ &target->thread.fp_state, 0, -1);
#endif
}
@@ -388,7 +388,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
#ifdef CONFIG_VSX
- double buf[33];
+ u64 buf[33];
int i;
#endif
flush_fp_to_thread(target);
@@ -400,14 +400,14 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
return i;
for (i = 0; i < 32 ; i++)
target->thread.TS_FPR(i) = buf[i];
- memcpy(&target->thread.fpscr, &buf[32], sizeof(double));
+ target->thread.fp_state.fpscr = buf[32];
return 0;
#else
- BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
- offsetof(struct thread_struct, TS_FPR(32)));
+ BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
+ offsetof(struct thread_fp_state, fpr[32][0]));
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.fpr, 0, -1);
+ &target->thread.fp_state, 0, -1);
#endif
}
@@ -440,11 +440,11 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
flush_altivec_to_thread(target);
- BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
- offsetof(struct thread_struct, vr[32]));
+ BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
+ offsetof(struct thread_vr_state, vr[32]));
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.vr, 0,
+ &target->thread.vr_state, 0,
33 * sizeof(vector128));
if (!ret) {
/*
@@ -471,11 +471,12 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
flush_altivec_to_thread(target);
- BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
- offsetof(struct thread_struct, vr[32]));
+ BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
+ offsetof(struct thread_vr_state, vr[32]));
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.vr, 0, 33 * sizeof(vector128));
+ &target->thread.vr_state, 0,
+ 33 * sizeof(vector128));
if (!ret && count > 0) {
/*
* We use only the first word of vrsave.
@@ -514,13 +515,13 @@ static int vsr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
- double buf[32];
+ u64 buf[32];
int ret, i;
flush_vsx_to_thread(target);
for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.fpr[i][TS_VSRLOWOFFSET];
+ buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
@@ -531,7 +532,7 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- double buf[32];
+ u64 buf[32];
int ret,i;
flush_vsx_to_thread(target);
@@ -539,7 +540,7 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
for (i = 0; i < 32 ; i++)
- target->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
+ target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
return ret;
@@ -657,7 +658,7 @@ static const struct user_regset native_regsets[] = {
#endif
#ifdef CONFIG_SPE
[REGSET_SPE] = {
- .n = 35,
+ .core_note_type = NT_PPC_SPE, .n = 35,
.size = sizeof(u32), .align = sizeof(u32),
.active = evr_active, .get = evr_get, .set = evr_set
},
@@ -854,8 +855,8 @@ void user_enable_single_step(struct task_struct *task)
if (regs != NULL) {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- task->thread.dbcr0 &= ~DBCR0_BT;
- task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
+ task->thread.debug.dbcr0 &= ~DBCR0_BT;
+ task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
regs->msr |= MSR_DE;
#else
regs->msr &= ~MSR_BE;
@@ -871,8 +872,8 @@ void user_enable_block_step(struct task_struct *task)
if (regs != NULL) {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- task->thread.dbcr0 &= ~DBCR0_IC;
- task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT;
+ task->thread.debug.dbcr0 &= ~DBCR0_IC;
+ task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
regs->msr |= MSR_DE;
#else
regs->msr &= ~MSR_SE;
@@ -894,16 +895,16 @@ void user_disable_single_step(struct task_struct *task)
* And, after doing so, if all debug flags are off, turn
* off DBCR0(IDM) and MSR(DE) .... Torez
*/
- task->thread.dbcr0 &= ~DBCR0_IC;
+ task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
/*
* Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
*/
- if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0,
- task->thread.dbcr1)) {
+ if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
+ task->thread.debug.dbcr1)) {
/*
* All debug events were off.....
*/
- task->thread.dbcr0 &= ~DBCR0_IDM;
+ task->thread.debug.dbcr0 &= ~DBCR0_IDM;
regs->msr &= ~MSR_DE;
}
#else
@@ -1022,14 +1023,14 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
*/
/* DAC's hold the whole address without any mode flags */
- task->thread.dac1 = data & ~0x3UL;
+ task->thread.debug.dac1 = data & ~0x3UL;
- if (task->thread.dac1 == 0) {
+ if (task->thread.debug.dac1 == 0) {
dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
- if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0,
- task->thread.dbcr1)) {
+ if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
+ task->thread.debug.dbcr1)) {
task->thread.regs->msr &= ~MSR_DE;
- task->thread.dbcr0 &= ~DBCR0_IDM;
+ task->thread.debug.dbcr0 &= ~DBCR0_IDM;
}
return 0;
}
@@ -1041,7 +1042,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
/* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
register */
- task->thread.dbcr0 |= DBCR0_IDM;
+ task->thread.debug.dbcr0 |= DBCR0_IDM;
/* Check for write and read flags and set DBCR0
accordingly */
@@ -1071,10 +1072,10 @@ static long set_instruction_bp(struct task_struct *child,
struct ppc_hw_breakpoint *bp_info)
{
int slot;
- int slot1_in_use = ((child->thread.dbcr0 & DBCR0_IAC1) != 0);
- int slot2_in_use = ((child->thread.dbcr0 & DBCR0_IAC2) != 0);
- int slot3_in_use = ((child->thread.dbcr0 & DBCR0_IAC3) != 0);
- int slot4_in_use = ((child->thread.dbcr0 & DBCR0_IAC4) != 0);
+ int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
+ int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
+ int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
+ int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
if (dbcr_iac_range(child) & DBCR_IAC12MODE)
slot2_in_use = 1;
@@ -1093,9 +1094,9 @@ static long set_instruction_bp(struct task_struct *child,
/* We need a pair of IAC regsisters */
if ((!slot1_in_use) && (!slot2_in_use)) {
slot = 1;
- child->thread.iac1 = bp_info->addr;
- child->thread.iac2 = bp_info->addr2;
- child->thread.dbcr0 |= DBCR0_IAC1;
+ child->thread.debug.iac1 = bp_info->addr;
+ child->thread.debug.iac2 = bp_info->addr2;
+ child->thread.debug.dbcr0 |= DBCR0_IAC1;
if (bp_info->addr_mode ==
PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
dbcr_iac_range(child) |= DBCR_IAC12X;
@@ -1104,9 +1105,9 @@ static long set_instruction_bp(struct task_struct *child,
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
} else if ((!slot3_in_use) && (!slot4_in_use)) {
slot = 3;
- child->thread.iac3 = bp_info->addr;
- child->thread.iac4 = bp_info->addr2;
- child->thread.dbcr0 |= DBCR0_IAC3;
+ child->thread.debug.iac3 = bp_info->addr;
+ child->thread.debug.iac4 = bp_info->addr2;
+ child->thread.debug.dbcr0 |= DBCR0_IAC3;
if (bp_info->addr_mode ==
PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
dbcr_iac_range(child) |= DBCR_IAC34X;
@@ -1126,30 +1127,30 @@ static long set_instruction_bp(struct task_struct *child,
*/
if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
slot = 1;
- child->thread.iac1 = bp_info->addr;
- child->thread.dbcr0 |= DBCR0_IAC1;
+ child->thread.debug.iac1 = bp_info->addr;
+ child->thread.debug.dbcr0 |= DBCR0_IAC1;
goto out;
}
}
if (!slot2_in_use) {
slot = 2;
- child->thread.iac2 = bp_info->addr;
- child->thread.dbcr0 |= DBCR0_IAC2;
+ child->thread.debug.iac2 = bp_info->addr;
+ child->thread.debug.dbcr0 |= DBCR0_IAC2;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
} else if (!slot3_in_use) {
slot = 3;
- child->thread.iac3 = bp_info->addr;
- child->thread.dbcr0 |= DBCR0_IAC3;
+ child->thread.debug.iac3 = bp_info->addr;
+ child->thread.debug.dbcr0 |= DBCR0_IAC3;
} else if (!slot4_in_use) {
slot = 4;
- child->thread.iac4 = bp_info->addr;
- child->thread.dbcr0 |= DBCR0_IAC4;
+ child->thread.debug.iac4 = bp_info->addr;
+ child->thread.debug.dbcr0 |= DBCR0_IAC4;
#endif
} else
return -ENOSPC;
}
out:
- child->thread.dbcr0 |= DBCR0_IDM;
+ child->thread.debug.dbcr0 |= DBCR0_IDM;
child->thread.regs->msr |= MSR_DE;
return slot;
@@ -1159,49 +1160,49 @@ static int del_instruction_bp(struct task_struct *child, int slot)
{
switch (slot) {
case 1:
- if ((child->thread.dbcr0 & DBCR0_IAC1) == 0)
+ if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
/* address range - clear slots 1 & 2 */
- child->thread.iac2 = 0;
+ child->thread.debug.iac2 = 0;
dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
}
- child->thread.iac1 = 0;
- child->thread.dbcr0 &= ~DBCR0_IAC1;
+ child->thread.debug.iac1 = 0;
+ child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
break;
case 2:
- if ((child->thread.dbcr0 & DBCR0_IAC2) == 0)
+ if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC12MODE)
/* used in a range */
return -EINVAL;
- child->thread.iac2 = 0;
- child->thread.dbcr0 &= ~DBCR0_IAC2;
+ child->thread.debug.iac2 = 0;
+ child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
break;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
case 3:
- if ((child->thread.dbcr0 & DBCR0_IAC3) == 0)
+ if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
/* address range - clear slots 3 & 4 */
- child->thread.iac4 = 0;
+ child->thread.debug.iac4 = 0;
dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
}
- child->thread.iac3 = 0;
- child->thread.dbcr0 &= ~DBCR0_IAC3;
+ child->thread.debug.iac3 = 0;
+ child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
break;
case 4:
- if ((child->thread.dbcr0 & DBCR0_IAC4) == 0)
+ if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC34MODE)
/* Used in a range */
return -EINVAL;
- child->thread.iac4 = 0;
- child->thread.dbcr0 &= ~DBCR0_IAC4;
+ child->thread.debug.iac4 = 0;
+ child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
break;
#endif
default:
@@ -1231,18 +1232,18 @@ static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
dbcr_dac(child) |= DBCR_DAC1R;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
dbcr_dac(child) |= DBCR_DAC1W;
- child->thread.dac1 = (unsigned long)bp_info->addr;
+ child->thread.debug.dac1 = (unsigned long)bp_info->addr;
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
if (byte_enable) {
- child->thread.dvc1 =
+ child->thread.debug.dvc1 =
(unsigned long)bp_info->condition_value;
- child->thread.dbcr2 |=
+ child->thread.debug.dbcr2 |=
((byte_enable << DBCR2_DVC1BE_SHIFT) |
(condition_mode << DBCR2_DVC1M_SHIFT));
}
#endif
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
- } else if (child->thread.dbcr2 & DBCR2_DAC12MODE) {
+ } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
/* Both dac1 and dac2 are part of a range */
return -ENOSPC;
#endif
@@ -1252,19 +1253,19 @@ static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
dbcr_dac(child) |= DBCR_DAC2R;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
dbcr_dac(child) |= DBCR_DAC2W;
- child->thread.dac2 = (unsigned long)bp_info->addr;
+ child->thread.debug.dac2 = (unsigned long)bp_info->addr;
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
if (byte_enable) {
- child->thread.dvc2 =
+ child->thread.debug.dvc2 =
(unsigned long)bp_info->condition_value;
- child->thread.dbcr2 |=
+ child->thread.debug.dbcr2 |=
((byte_enable << DBCR2_DVC2BE_SHIFT) |
(condition_mode << DBCR2_DVC2M_SHIFT));
}
#endif
} else
return -ENOSPC;
- child->thread.dbcr0 |= DBCR0_IDM;
+ child->thread.debug.dbcr0 |= DBCR0_IDM;
child->thread.regs->msr |= MSR_DE;
return slot + 4;
@@ -1276,32 +1277,32 @@ static int del_dac(struct task_struct *child, int slot)
if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
return -ENOENT;
- child->thread.dac1 = 0;
+ child->thread.debug.dac1 = 0;
dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
- if (child->thread.dbcr2 & DBCR2_DAC12MODE) {
- child->thread.dac2 = 0;
- child->thread.dbcr2 &= ~DBCR2_DAC12MODE;
+ if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
+ child->thread.debug.dac2 = 0;
+ child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
}
- child->thread.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
+ child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
#endif
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
- child->thread.dvc1 = 0;
+ child->thread.debug.dvc1 = 0;
#endif
} else if (slot == 2) {
if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
return -ENOENT;
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
- if (child->thread.dbcr2 & DBCR2_DAC12MODE)
+ if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
/* Part of a range */
return -EINVAL;
- child->thread.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
+ child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
#endif
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
- child->thread.dvc2 = 0;
+ child->thread.debug.dvc2 = 0;
#endif
- child->thread.dac2 = 0;
+ child->thread.debug.dac2 = 0;
dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
} else
return -EINVAL;
@@ -1343,22 +1344,22 @@ static int set_dac_range(struct task_struct *child,
return -EIO;
}
- if (child->thread.dbcr0 &
+ if (child->thread.debug.dbcr0 &
(DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
return -ENOSPC;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
- child->thread.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
+ child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
- child->thread.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
- child->thread.dac1 = bp_info->addr;
- child->thread.dac2 = bp_info->addr2;
+ child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
+ child->thread.debug.dac1 = bp_info->addr;
+ child->thread.debug.dac2 = bp_info->addr2;
if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
- child->thread.dbcr2 |= DBCR2_DAC12M;
+ child->thread.debug.dbcr2 |= DBCR2_DAC12M;
else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
- child->thread.dbcr2 |= DBCR2_DAC12MX;
+ child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
else /* PPC_BREAKPOINT_MODE_MASK */
- child->thread.dbcr2 |= DBCR2_DAC12MM;
+ child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
child->thread.regs->msr |= MSR_DE;
return 5;
@@ -1489,9 +1490,9 @@ static long ppc_del_hwdebug(struct task_struct *child, long data)
rc = del_dac(child, (int)data - 4);
if (!rc) {
- if (!DBCR_ACTIVE_EVENTS(child->thread.dbcr0,
- child->thread.dbcr1)) {
- child->thread.dbcr0 &= ~DBCR0_IDM;
+ if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
+ child->thread.debug.dbcr1)) {
+ child->thread.debug.dbcr0 &= ~DBCR0_IDM;
child->thread.regs->msr &= ~MSR_DE;
}
}
@@ -1554,10 +1555,10 @@ long arch_ptrace(struct task_struct *child, long request,
flush_fp_to_thread(child);
if (fpidx < (PT_FPSCR - PT_FPR0))
- tmp = ((unsigned long *)child->thread.fpr)
- [fpidx * TS_FPRWIDTH];
+ memcpy(&tmp, &child->thread.fp_state.fpr,
+ sizeof(long));
else
- tmp = child->thread.fpscr.val;
+ tmp = child->thread.fp_state.fpscr;
}
ret = put_user(tmp, datalp);
break;
@@ -1587,10 +1588,10 @@ long arch_ptrace(struct task_struct *child, long request,
flush_fp_to_thread(child);
if (fpidx < (PT_FPSCR - PT_FPR0))
- ((unsigned long *)child->thread.fpr)
- [fpidx * TS_FPRWIDTH] = data;
+ memcpy(&child->thread.fp_state.fpr, &data,
+ sizeof(long));
else
- child->thread.fpscr.val = data;
+ child->thread.fp_state.fpscr = data;
ret = 0;
}
break;
@@ -1669,7 +1670,7 @@ long arch_ptrace(struct task_struct *child, long request,
if (addr > 0)
break;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- ret = put_user(child->thread.dac1, datalp);
+ ret = put_user(child->thread.debug.dac1, datalp);
#else
dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
(child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index f51599e941c7..f52b7db327c8 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -43,7 +43,6 @@
#define FPRNUMBER(i) (((i) - PT_FPR0) >> 1)
#define FPRHALF(i) (((i) - PT_FPR0) & 1)
#define FPRINDEX(i) TS_FPRWIDTH * FPRNUMBER(i) * 2 + FPRHALF(i)
-#define FPRINDEX_3264(i) (TS_FPRWIDTH * ((i) - PT_FPR0))
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t caddr, compat_ulong_t cdata)
@@ -105,7 +104,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
* to be an array of unsigned int (32 bits) - the
* index passed in is based on this assumption.
*/
- tmp = ((unsigned int *)child->thread.fpr)
+ tmp = ((unsigned int *)child->thread.fp_state.fpr)
[FPRINDEX(index)];
}
ret = put_user((unsigned int)tmp, (u32 __user *)data);
@@ -147,8 +146,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if (numReg >= PT_FPR0) {
flush_fp_to_thread(child);
/* get 64 bit FPR */
- tmp = ((u64 *)child->thread.fpr)
- [FPRINDEX_3264(numReg)];
+ tmp = child->thread.fp_state.fpr[numReg - PT_FPR0][0];
} else { /* register within PT_REGS struct */
unsigned long tmp2;
ret = ptrace_get_reg(child, numReg, &tmp2);
@@ -207,7 +205,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
* to be an array of unsigned int (32 bits) - the
* index passed in is based on this assumption.
*/
- ((unsigned int *)child->thread.fpr)
+ ((unsigned int *)child->thread.fp_state.fpr)
[FPRINDEX(index)] = data;
ret = 0;
}
@@ -251,8 +249,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
u64 *tmp;
flush_fp_to_thread(child);
/* get 64 bit FPR ... */
- tmp = &(((u64 *)child->thread.fpr)
- [FPRINDEX_3264(numReg)]);
+ tmp = &child->thread.fp_state.fpr[numReg - PT_FPR0][0];
/* ... write the 32 bit part we want */
((u32 *)tmp)[index % 2] = data;
ret = 0;
@@ -269,7 +266,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if (addr > 0)
break;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- ret = put_user(child->thread.dac1, (u32 __user *)data);
+ ret = put_user(child->thread.debug.dac1, (u32 __user *)data);
#else
dabr_fake = (
(child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 6e7b7cdeec65..7d4c7172f38e 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -223,7 +223,7 @@ unsigned long get_phb_buid(struct device_node *phb)
static int phb_set_bus_ranges(struct device_node *dev,
struct pci_controller *phb)
{
- const int *bus_range;
+ const __be32 *bus_range;
unsigned int len;
bus_range = of_get_property(dev, "bus-range", &len);
@@ -231,8 +231,8 @@ static int phb_set_bus_ranges(struct device_node *dev,
return 1;
}
- phb->first_busno = bus_range[0];
- phb->last_busno = bus_range[1];
+ phb->first_busno = be32_to_cpu(bus_range[0]);
+ phb->last_busno = be32_to_cpu(bus_range[1]);
return 0;
}
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 3d261c071fc8..febc80445d25 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -62,8 +62,6 @@
#include <mm/mmu_decl.h>
#include <asm/fadump.h>
-#include "setup.h"
-
#ifdef DEBUG
#include <asm/udbg.h>
#define DBG(fmt...) udbg_printf(fmt)
diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h
deleted file mode 100644
index 4c67ad7fae08..000000000000
--- a/arch/powerpc/kernel/setup.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _POWERPC_KERNEL_SETUP_H
-#define _POWERPC_KERNEL_SETUP_H
-
-void check_for_initrd(void);
-void do_init_bootmem(void);
-void setup_panic(void);
-extern int do_early_xmon;
-
-#endif /* _POWERPC_KERNEL_SETUP_H */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index a4bbcae72578..b903dc5cf944 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -40,8 +40,6 @@
#include <asm/mmu_context.h>
#include <asm/epapr_hcalls.h>
-#include "setup.h"
-
#define DBG(fmt...)
extern void bootx_init(unsigned long r4, unsigned long phys);
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 278ca93e1f28..4085aaa9478f 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -68,8 +68,6 @@
#include <asm/hugetlb.h>
#include <asm/epapr_hcalls.h>
-#include "setup.h"
-
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
#else
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index bebdf1a1a540..1a410aa57fb7 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -265,27 +265,27 @@ struct rt_sigframe {
unsigned long copy_fpr_to_user(void __user *to,
struct task_struct *task)
{
- double buf[ELF_NFPREG];
+ u64 buf[ELF_NFPREG];
int i;
/* save FPR copy to local buffer then write to the thread_struct */
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
buf[i] = task->thread.TS_FPR(i);
- memcpy(&buf[i], &task->thread.fpscr, sizeof(double));
+ buf[i] = task->thread.fp_state.fpscr;
return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
}
unsigned long copy_fpr_from_user(struct task_struct *task,
void __user *from)
{
- double buf[ELF_NFPREG];
+ u64 buf[ELF_NFPREG];
int i;
if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
return 1;
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
task->thread.TS_FPR(i) = buf[i];
- memcpy(&task->thread.fpscr, &buf[i], sizeof(double));
+ task->thread.fp_state.fpscr = buf[i];
return 0;
}
@@ -293,25 +293,25 @@ unsigned long copy_fpr_from_user(struct task_struct *task,
unsigned long copy_vsx_to_user(void __user *to,
struct task_struct *task)
{
- double buf[ELF_NVSRHALFREG];
+ u64 buf[ELF_NVSRHALFREG];
int i;
/* save FPR copy to local buffer then write to the thread_struct */
for (i = 0; i < ELF_NVSRHALFREG; i++)
- buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET];
+ buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
}
unsigned long copy_vsx_from_user(struct task_struct *task,
void __user *from)
{
- double buf[ELF_NVSRHALFREG];
+ u64 buf[ELF_NVSRHALFREG];
int i;
if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
return 1;
for (i = 0; i < ELF_NVSRHALFREG ; i++)
- task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
+ task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
return 0;
}
@@ -319,27 +319,27 @@ unsigned long copy_vsx_from_user(struct task_struct *task,
unsigned long copy_transact_fpr_to_user(void __user *to,
struct task_struct *task)
{
- double buf[ELF_NFPREG];
+ u64 buf[ELF_NFPREG];
int i;
/* save FPR copy to local buffer then write to the thread_struct */
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
buf[i] = task->thread.TS_TRANS_FPR(i);
- memcpy(&buf[i], &task->thread.transact_fpscr, sizeof(double));
+ buf[i] = task->thread.transact_fp.fpscr;
return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
}
unsigned long copy_transact_fpr_from_user(struct task_struct *task,
void __user *from)
{
- double buf[ELF_NFPREG];
+ u64 buf[ELF_NFPREG];
int i;
if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
return 1;
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
task->thread.TS_TRANS_FPR(i) = buf[i];
- memcpy(&task->thread.transact_fpscr, &buf[i], sizeof(double));
+ task->thread.transact_fp.fpscr = buf[i];
return 0;
}
@@ -347,25 +347,25 @@ unsigned long copy_transact_fpr_from_user(struct task_struct *task,
unsigned long copy_transact_vsx_to_user(void __user *to,
struct task_struct *task)
{
- double buf[ELF_NVSRHALFREG];
+ u64 buf[ELF_NVSRHALFREG];
int i;
/* save FPR copy to local buffer then write to the thread_struct */
for (i = 0; i < ELF_NVSRHALFREG; i++)
- buf[i] = task->thread.transact_fpr[i][TS_VSRLOWOFFSET];
+ buf[i] = task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET];
return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
}
unsigned long copy_transact_vsx_from_user(struct task_struct *task,
void __user *from)
{
- double buf[ELF_NVSRHALFREG];
+ u64 buf[ELF_NVSRHALFREG];
int i;
if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
return 1;
for (i = 0; i < ELF_NVSRHALFREG ; i++)
- task->thread.transact_fpr[i][TS_VSRLOWOFFSET] = buf[i];
+ task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = buf[i];
return 0;
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
@@ -373,14 +373,14 @@ unsigned long copy_transact_vsx_from_user(struct task_struct *task,
inline unsigned long copy_fpr_to_user(void __user *to,
struct task_struct *task)
{
- return __copy_to_user(to, task->thread.fpr,
+ return __copy_to_user(to, task->thread.fp_state.fpr,
ELF_NFPREG * sizeof(double));
}
inline unsigned long copy_fpr_from_user(struct task_struct *task,
void __user *from)
{
- return __copy_from_user(task->thread.fpr, from,
+ return __copy_from_user(task->thread.fp_state.fpr, from,
ELF_NFPREG * sizeof(double));
}
@@ -388,14 +388,14 @@ inline unsigned long copy_fpr_from_user(struct task_struct *task,
inline unsigned long copy_transact_fpr_to_user(void __user *to,
struct task_struct *task)
{
- return __copy_to_user(to, task->thread.transact_fpr,
+ return __copy_to_user(to, task->thread.transact_fp.fpr,
ELF_NFPREG * sizeof(double));
}
inline unsigned long copy_transact_fpr_from_user(struct task_struct *task,
void __user *from)
{
- return __copy_from_user(task->thread.transact_fpr, from,
+ return __copy_from_user(task->thread.transact_fp.fpr, from,
ELF_NFPREG * sizeof(double));
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
@@ -423,7 +423,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
/* save altivec registers */
if (current->thread.used_vr) {
flush_altivec_to_thread(current);
- if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
+ if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
ELF_NVRREG * sizeof(vector128)))
return 1;
/* set MSR_VEC in the saved MSR value to indicate that
@@ -534,17 +534,17 @@ static int save_tm_user_regs(struct pt_regs *regs,
/* save altivec registers */
if (current->thread.used_vr) {
flush_altivec_to_thread(current);
- if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
+ if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
ELF_NVRREG * sizeof(vector128)))
return 1;
if (msr & MSR_VEC) {
if (__copy_to_user(&tm_frame->mc_vregs,
- current->thread.transact_vr,
+ &current->thread.transact_vr,
ELF_NVRREG * sizeof(vector128)))
return 1;
} else {
if (__copy_to_user(&tm_frame->mc_vregs,
- current->thread.vr,
+ &current->thread.vr_state,
ELF_NVRREG * sizeof(vector128)))
return 1;
}
@@ -692,11 +692,12 @@ static long restore_user_regs(struct pt_regs *regs,
regs->msr &= ~MSR_VEC;
if (msr & MSR_VEC) {
/* restore altivec registers from the stack */
- if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
+ if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
sizeof(sr->mc_vregs)))
return 1;
} else if (current->thread.used_vr)
- memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
+ memset(&current->thread.vr_state, 0,
+ ELF_NVRREG * sizeof(vector128));
/* Always get VRSAVE back */
if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
@@ -722,7 +723,7 @@ static long restore_user_regs(struct pt_regs *regs,
return 1;
} else if (current->thread.used_vsr)
for (i = 0; i < 32 ; i++)
- current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
+ current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
#endif /* CONFIG_VSX */
/*
* force the process to reload the FP registers from
@@ -798,15 +799,16 @@ static long restore_tm_user_regs(struct pt_regs *regs,
regs->msr &= ~MSR_VEC;
if (msr & MSR_VEC) {
/* restore altivec registers from the stack */
- if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
+ if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
sizeof(sr->mc_vregs)) ||
- __copy_from_user(current->thread.transact_vr,
+ __copy_from_user(&current->thread.transact_vr,
&tm_sr->mc_vregs,
sizeof(sr->mc_vregs)))
return 1;
} else if (current->thread.used_vr) {
- memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
- memset(current->thread.transact_vr, 0,
+ memset(&current->thread.vr_state, 0,
+ ELF_NVRREG * sizeof(vector128));
+ memset(&current->thread.transact_vr, 0,
ELF_NVRREG * sizeof(vector128));
}
@@ -838,8 +840,8 @@ static long restore_tm_user_regs(struct pt_regs *regs,
return 1;
} else if (current->thread.used_vsr)
for (i = 0; i < 32 ; i++) {
- current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
- current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0;
+ current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
+ current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0;
}
#endif /* CONFIG_VSX */
@@ -1030,7 +1032,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
if (__put_user(0, &rt_sf->uc.uc_link))
goto badframe;
- current->thread.fpscr.val = 0; /* turn off all fp exceptions */
+ current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
/* create a stack frame for the caller of the handler */
newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
@@ -1045,8 +1047,9 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
regs->gpr[5] = (unsigned long) &rt_sf->uc;
regs->gpr[6] = (unsigned long) rt_sf;
regs->nip = (unsigned long) ka->sa.sa_handler;
- /* enter the signal handler in big-endian mode */
+ /* enter the signal handler in native-endian mode */
regs->msr &= ~MSR_LE;
+ regs->msr |= (MSR_KERNEL & MSR_LE);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/* Remove TM bits from thread's MSR. The MSR in the sigcontext
* just indicates to userland that we were doing a transaction, but we
@@ -1309,7 +1312,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
unsigned char tmp;
unsigned long new_msr = regs->msr;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- unsigned long new_dbcr0 = current->thread.dbcr0;
+ unsigned long new_dbcr0 = current->thread.debug.dbcr0;
#endif
for (i=0; i<ndbg; i++) {
@@ -1324,7 +1327,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
} else {
new_dbcr0 &= ~DBCR0_IC;
if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
- current->thread.dbcr1)) {
+ current->thread.debug.dbcr1)) {
new_msr &= ~MSR_DE;
new_dbcr0 &= ~DBCR0_IDM;
}
@@ -1359,7 +1362,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
the user is really doing something wrong. */
regs->msr = new_msr;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- current->thread.dbcr0 = new_dbcr0;
+ current->thread.debug.dbcr0 = new_dbcr0;
#endif
if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
@@ -1462,7 +1465,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
regs->link = tramp;
- current->thread.fpscr.val = 0; /* turn off all fp exceptions */
+ current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
/* create a stack frame for the caller of the handler */
newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index f93ec2835a13..b3c615764c9b 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -103,7 +103,8 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
if (current->thread.used_vr) {
flush_altivec_to_thread(current);
/* Copy 33 vec registers (vr0..31 and vscr) to the stack */
- err |= __copy_to_user(v_regs, current->thread.vr, 33 * sizeof(vector128));
+ err |= __copy_to_user(v_regs, &current->thread.vr_state,
+ 33 * sizeof(vector128));
/* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
* contains valid data.
*/
@@ -195,18 +196,18 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
if (current->thread.used_vr) {
flush_altivec_to_thread(current);
/* Copy 33 vec registers (vr0..31 and vscr) to the stack */
- err |= __copy_to_user(v_regs, current->thread.vr,
+ err |= __copy_to_user(v_regs, &current->thread.vr_state,
33 * sizeof(vector128));
/* If VEC was enabled there are transactional VRs valid too,
* else they're a copy of the checkpointed VRs.
*/
if (msr & MSR_VEC)
err |= __copy_to_user(tm_v_regs,
- current->thread.transact_vr,
+ &current->thread.transact_vr,
33 * sizeof(vector128));
else
err |= __copy_to_user(tm_v_regs,
- current->thread.vr,
+ &current->thread.vr_state,
33 * sizeof(vector128));
/* set MSR_VEC in the MSR value in the frame to indicate
@@ -349,10 +350,10 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
return -EFAULT;
/* Copy 33 vec registers (vr0..31 and vscr) from the stack */
if (v_regs != NULL && (msr & MSR_VEC) != 0)
- err |= __copy_from_user(current->thread.vr, v_regs,
+ err |= __copy_from_user(&current->thread.vr_state, v_regs,
33 * sizeof(vector128));
else if (current->thread.used_vr)
- memset(current->thread.vr, 0, 33 * sizeof(vector128));
+ memset(&current->thread.vr_state, 0, 33 * sizeof(vector128));
/* Always get VRSAVE back */
if (v_regs != NULL)
err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
@@ -374,7 +375,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
err |= copy_vsx_from_user(current, v_regs);
else
for (i = 0; i < 32 ; i++)
- current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
+ current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
#endif
return err;
}
@@ -468,14 +469,14 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
return -EFAULT;
/* Copy 33 vec registers (vr0..31 and vscr) from the stack */
if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) {
- err |= __copy_from_user(current->thread.vr, v_regs,
+ err |= __copy_from_user(&current->thread.vr_state, v_regs,
33 * sizeof(vector128));
- err |= __copy_from_user(current->thread.transact_vr, tm_v_regs,
+ err |= __copy_from_user(&current->thread.transact_vr, tm_v_regs,
33 * sizeof(vector128));
}
else if (current->thread.used_vr) {
- memset(current->thread.vr, 0, 33 * sizeof(vector128));
- memset(current->thread.transact_vr, 0, 33 * sizeof(vector128));
+ memset(&current->thread.vr_state, 0, 33 * sizeof(vector128));
+ memset(&current->thread.transact_vr, 0, 33 * sizeof(vector128));
}
/* Always get VRSAVE back */
if (v_regs != NULL && tm_v_regs != NULL) {
@@ -507,8 +508,8 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
err |= copy_transact_vsx_from_user(current, tm_v_regs);
} else {
for (i = 0; i < 32 ; i++) {
- current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
- current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0;
+ current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
+ current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0;
}
}
#endif
@@ -747,7 +748,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
goto badframe;
/* Make sure signal handler doesn't get spurious FP exceptions */
- current->thread.fpscr.val = 0;
+ current->thread.fp_state.fpscr = 0;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/* Remove TM bits from thread's MSR. The MSR in the sigcontext
* just indicates to userland that we were doing a transaction, but we
@@ -773,8 +774,9 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
/* Set up "regs" so we "return" to the signal handler. */
err |= get_user(regs->nip, &funct_desc_ptr->entry);
- /* enter the signal handler in big-endian mode */
+ /* enter the signal handler in native-endian mode */
regs->msr &= ~MSR_LE;
+ regs->msr |= (MSR_KERNEL & MSR_LE);
regs->gpr[1] = newsp;
err |= get_user(regs->gpr[2], &funct_desc_ptr->toc);
regs->gpr[3] = signr;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8e59abc237d7..930cd8af3503 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -844,18 +844,6 @@ void __cpu_die(unsigned int cpu)
smp_ops->cpu_die(cpu);
}
-static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex);
-
-void cpu_hotplug_driver_lock()
-{
- mutex_lock(&powerpc_cpu_hotplug_driver_mutex);
-}
-
-void cpu_hotplug_driver_unlock()
-{
- mutex_unlock(&powerpc_cpu_hotplug_driver_mutex);
-}
-
void cpu_die(void)
{
if (ppc_md.cpu_die)
diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S
index 22045984835f..988f38dced0f 100644
--- a/arch/powerpc/kernel/swsusp_asm64.S
+++ b/arch/powerpc/kernel/swsusp_asm64.S
@@ -114,7 +114,9 @@ _GLOBAL(swsusp_arch_suspend)
SAVE_SPECIAL(MSR)
SAVE_SPECIAL(XER)
#ifdef CONFIG_PPC_BOOK3S_64
+BEGIN_FW_FTR_SECTION
SAVE_SPECIAL(SDR1)
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR)
#else
SAVE_SPR(TCR)
@@ -231,7 +233,9 @@ nothing_to_copy:
/* can't use RESTORE_SPECIAL(MSR) */
ld r0, SL_MSR(r11)
mtmsrd r0, 0
+BEGIN_FW_FTR_SECTION
RESTORE_SPECIAL(SDR1)
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR)
#else
/* Restore SPRG1, be used to save paca */
ld r0, SL_SPRG1(r11)
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index cd809eaa8b5c..ef47bcbd4352 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -12,16 +12,15 @@
#include <asm/reg.h>
#ifdef CONFIG_VSX
-/* See fpu.S, this is very similar but to save/restore checkpointed FPRs/VSRs */
-#define __SAVE_32FPRS_VSRS_TRANSACT(n,c,base) \
+/* See fpu.S, this is borrowed from there */
+#define __SAVE_32FPRS_VSRS(n,c,base) \
BEGIN_FTR_SECTION \
b 2f; \
END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
- SAVE_32FPRS_TRANSACT(n,base); \
+ SAVE_32FPRS(n,base); \
b 3f; \
-2: SAVE_32VSRS_TRANSACT(n,c,base); \
+2: SAVE_32VSRS(n,c,base); \
3:
-/* ...and this is just plain borrowed from there. */
#define __REST_32FPRS_VSRS(n,c,base) \
BEGIN_FTR_SECTION \
b 2f; \
@@ -31,11 +30,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
2: REST_32VSRS(n,c,base); \
3:
#else
-#define __SAVE_32FPRS_VSRS_TRANSACT(n,c,base) SAVE_32FPRS_TRANSACT(n, base)
-#define __REST_32FPRS_VSRS(n,c,base) REST_32FPRS(n, base)
+#define __SAVE_32FPRS_VSRS(n,c,base) SAVE_32FPRS(n, base)
+#define __REST_32FPRS_VSRS(n,c,base) REST_32FPRS(n, base)
#endif
-#define SAVE_32FPRS_VSRS_TRANSACT(n,c,base) \
- __SAVE_32FPRS_VSRS_TRANSACT(n,__REG_##c,__REG_##base)
+#define SAVE_32FPRS_VSRS(n,c,base) \
+ __SAVE_32FPRS_VSRS(n,__REG_##c,__REG_##base)
#define REST_32FPRS_VSRS(n,c,base) \
__REST_32FPRS_VSRS(n,__REG_##c,__REG_##base)
@@ -107,7 +106,7 @@ DSCR_DEFAULT:
_GLOBAL(tm_reclaim)
mfcr r6
mflr r0
- std r6, 8(r1)
+ stw r6, 8(r1)
std r0, 16(r1)
std r2, 40(r1)
stdu r1, -TM_FRAME_SIZE(r1)
@@ -157,10 +156,11 @@ _GLOBAL(tm_reclaim)
andis. r0, r4, MSR_VEC@h
beq dont_backup_vec
- SAVE_32VRS_TRANSACT(0, r6, r3) /* r6 scratch, r3 thread */
+ addi r7, r3, THREAD_TRANSACT_VRSTATE
+ SAVE_32VRS(0, r6, r7) /* r6 scratch, r7 transact vr state */
mfvscr vr0
- li r6, THREAD_TRANSACT_VSCR
- stvx vr0, r3, r6
+ li r6, VRSTATE_VSCR
+ stvx vr0, r7, r6
dont_backup_vec:
mfspr r0, SPRN_VRSAVE
std r0, THREAD_TRANSACT_VRSAVE(r3)
@@ -168,10 +168,11 @@ dont_backup_vec:
andi. r0, r4, MSR_FP
beq dont_backup_fp
- SAVE_32FPRS_VSRS_TRANSACT(0, R6, R3) /* r6 scratch, r3 thread */
+ addi r7, r3, THREAD_TRANSACT_FPSTATE
+ SAVE_32FPRS_VSRS(0, R6, R7) /* r6 scratch, r7 transact fp state */
mffs fr0
- stfd fr0,THREAD_TRANSACT_FPSCR(r3)
+ stfd fr0,FPSTATE_FPSCR(r7)
dont_backup_fp:
/* The moment we treclaim, ALL of our GPRs will switch
@@ -284,7 +285,7 @@ dont_backup_fp:
REST_NVGPRS(r1)
addi r1, r1, TM_FRAME_SIZE
- ld r4, 8(r1)
+ lwz r4, 8(r1)
ld r0, 16(r1)
mtcr r4
mtlr r0
@@ -309,7 +310,7 @@ dont_backup_fp:
_GLOBAL(tm_recheckpoint)
mfcr r5
mflr r0
- std r5, 8(r1)
+ stw r5, 8(r1)
std r0, 16(r1)
std r2, 40(r1)
stdu r1, -TM_FRAME_SIZE(r1)
@@ -358,10 +359,11 @@ _GLOBAL(tm_recheckpoint)
andis. r0, r4, MSR_VEC@h
beq dont_restore_vec
- li r5, THREAD_VSCR
- lvx vr0, r3, r5
+ addi r8, r3, THREAD_VRSTATE
+ li r5, VRSTATE_VSCR
+ lvx vr0, r8, r5
mtvscr vr0
- REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */
+ REST_32VRS(0, r5, r8) /* r5 scratch, r8 ptr */
dont_restore_vec:
ld r5, THREAD_VRSAVE(r3)
mtspr SPRN_VRSAVE, r5
@@ -370,9 +372,10 @@ dont_restore_vec:
andi. r0, r4, MSR_FP
beq dont_restore_fp
- lfd fr0, THREAD_FPSCR(r3)
+ addi r8, r3, THREAD_FPSTATE
+ lfd fr0, FPSTATE_FPSCR(r8)
MTFSF_L(fr0)
- REST_32FPRS_VSRS(0, R4, R3)
+ REST_32FPRS_VSRS(0, R4, R8)
dont_restore_fp:
mtmsr r6 /* FP/Vec off again! */
@@ -441,7 +444,7 @@ restore_gprs:
REST_NVGPRS(r1)
addi r1, r1, TM_FRAME_SIZE
- ld r4, 8(r1)
+ lwz r4, 8(r1)
ld r0, 16(r1)
mtcr r4
mtlr r0
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index f783c932faeb..62c3dd8c69f2 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -351,8 +351,8 @@ static inline int check_io_access(struct pt_regs *regs)
#define REASON_TRAP ESR_PTR
/* single-step stuff */
-#define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC)
-#define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC)
+#define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
+#define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
#else
/* On non-4xx, the reason for the machine check or program
@@ -816,7 +816,7 @@ static void parse_fpe(struct pt_regs *regs)
flush_fp_to_thread(current);
- code = __parse_fpscr(current->thread.fpscr.val);
+ code = __parse_fpscr(current->thread.fp_state.fpscr);
_exception(SIGFPE, regs, code, regs->nip);
}
@@ -1018,6 +1018,13 @@ static int emulate_instruction(struct pt_regs *regs)
return emulate_isel(regs, instword);
}
+ /* Emulate sync instruction variants */
+ if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
+ PPC_WARN_EMULATED(sync, regs);
+ asm volatile("sync");
+ return 0;
+ }
+
#ifdef CONFIG_PPC64
/* Emulate the mfspr rD, DSCR. */
if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
@@ -1069,7 +1076,7 @@ static int emulate_math(struct pt_regs *regs)
return 0;
case 1: {
int code = 0;
- code = __parse_fpscr(current->thread.fpscr.val);
+ code = __parse_fpscr(current->thread.fp_state.fpscr);
_exception(SIGFPE, regs, code, regs->nip);
return 0;
}
@@ -1371,8 +1378,6 @@ void facility_unavailable_exception(struct pt_regs *regs)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-extern void do_load_up_fpu(struct pt_regs *regs);
-
void fp_unavailable_tm(struct pt_regs *regs)
{
/* Note: This does not handle any kind of FP laziness. */
@@ -1403,8 +1408,6 @@ void fp_unavailable_tm(struct pt_regs *regs)
}
#ifdef CONFIG_ALTIVEC
-extern void do_load_up_altivec(struct pt_regs *regs);
-
void altivec_unavailable_tm(struct pt_regs *regs)
{
/* See the comments in fp_unavailable_tm(). This function operates
@@ -1465,7 +1468,8 @@ void SoftwareEmulation(struct pt_regs *regs)
if (!user_mode(regs)) {
debugger(regs);
- die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
+ die("Kernel Mode Unimplemented Instruction or SW FPU Emulation",
+ regs, SIGFPE);
}
if (!emulate_math(regs))
@@ -1486,7 +1490,7 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
- current->thread.dbcr2 &= ~DBCR2_DAC12MODE;
+ current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
#endif
do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
5);
@@ -1497,24 +1501,24 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
6);
changed |= 0x01;
} else if (debug_status & DBSR_IAC1) {
- current->thread.dbcr0 &= ~DBCR0_IAC1;
+ current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
1);
changed |= 0x01;
} else if (debug_status & DBSR_IAC2) {
- current->thread.dbcr0 &= ~DBCR0_IAC2;
+ current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
2);
changed |= 0x01;
} else if (debug_status & DBSR_IAC3) {
- current->thread.dbcr0 &= ~DBCR0_IAC3;
+ current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
3);
changed |= 0x01;
} else if (debug_status & DBSR_IAC4) {
- current->thread.dbcr0 &= ~DBCR0_IAC4;
+ current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
4);
changed |= 0x01;
@@ -1524,19 +1528,20 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
* Check all other debug flags and see if that bit needs to be turned
* back on or not.
*/
- if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1))
+ if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
+ current->thread.debug.dbcr1))
regs->msr |= MSR_DE;
else
/* Make sure the IDM flag is off */
- current->thread.dbcr0 &= ~DBCR0_IDM;
+ current->thread.debug.dbcr0 &= ~DBCR0_IDM;
if (changed & 0x01)
- mtspr(SPRN_DBCR0, current->thread.dbcr0);
+ mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
}
void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
{
- current->thread.dbsr = debug_status;
+ current->thread.debug.dbsr = debug_status;
/* Hack alert: On BookE, Branch Taken stops on the branch itself, while
* on server, it stops on the target of the branch. In order to simulate
@@ -1553,8 +1558,8 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
/* Do the single step trick only when coming from userspace */
if (user_mode(regs)) {
- current->thread.dbcr0 &= ~DBCR0_BT;
- current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
+ current->thread.debug.dbcr0 &= ~DBCR0_BT;
+ current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
regs->msr |= MSR_DE;
return;
}
@@ -1582,13 +1587,13 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
return;
if (user_mode(regs)) {
- current->thread.dbcr0 &= ~DBCR0_IC;
- if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0,
- current->thread.dbcr1))
+ current->thread.debug.dbcr0 &= ~DBCR0_IC;
+ if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
+ current->thread.debug.dbcr1))
regs->msr |= MSR_DE;
else
/* Make sure the IDM bit is off */
- current->thread.dbcr0 &= ~DBCR0_IDM;
+ current->thread.debug.dbcr0 &= ~DBCR0_IDM;
}
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
@@ -1634,7 +1639,7 @@ void altivec_assist_exception(struct pt_regs *regs)
/* XXX quick hack for now: set the non-Java bit in the VSCR */
printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
"in %s at %lx\n", current->comm, regs->nip);
- current->thread.vscr.u[3] |= 0x10000;
+ current->thread.vr_state.vscr.u[3] |= 0x10000;
}
}
#endif /* CONFIG_ALTIVEC */
@@ -1815,6 +1820,7 @@ struct ppc_emulated ppc_emulated = {
WARN_EMULATED_SETUP(popcntb),
WARN_EMULATED_SETUP(spe),
WARN_EMULATED_SETUP(string),
+ WARN_EMULATED_SETUP(sync),
WARN_EMULATED_SETUP(unaligned),
#ifdef CONFIG_MATH_EMULATION
WARN_EMULATED_SETUP(math),
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 1d9c92621b36..094e45c16a17 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -34,8 +34,7 @@
#include <asm/firmware.h>
#include <asm/vdso.h>
#include <asm/vdso_datapage.h>
-
-#include "setup.h"
+#include <asm/setup.h>
#undef DEBUG
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index f223409629b9..e58ee10fa5c0 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -4,7 +4,11 @@
*/
#include <asm/vdso.h>
+#ifdef __LITTLE_ENDIAN__
+OUTPUT_FORMAT("elf32-powerpcle", "elf32-powerpcle", "elf32-powerpcle")
+#else
OUTPUT_FORMAT("elf32-powerpc", "elf32-powerpc", "elf32-powerpc")
+#endif
OUTPUT_ARCH(powerpc:common)
ENTRY(_start)
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index e4863819663b..64fb183a47c2 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -4,7 +4,11 @@
*/
#include <asm/vdso.h>
+#ifdef __LITTLE_ENDIAN__
+OUTPUT_FORMAT("elf64-powerpcle", "elf64-powerpcle", "elf64-powerpcle")
+#else
OUTPUT_FORMAT("elf64-powerpc", "elf64-powerpc", "elf64-powerpc")
+#endif
OUTPUT_ARCH(powerpc:common64)
ENTRY(_start)
diff --git a/arch/powerpc/kernel/vecemu.c b/arch/powerpc/kernel/vecemu.c
index 604d0947cb20..c4bfadb2606b 100644
--- a/arch/powerpc/kernel/vecemu.c
+++ b/arch/powerpc/kernel/vecemu.c
@@ -271,7 +271,7 @@ int emulate_altivec(struct pt_regs *regs)
vb = (instr >> 11) & 0x1f;
vc = (instr >> 6) & 0x1f;
- vrs = current->thread.vr;
+ vrs = current->thread.vr_state.vr;
switch (instr & 0x3f) {
case 10:
switch (vc) {
@@ -320,12 +320,12 @@ int emulate_altivec(struct pt_regs *regs)
case 14: /* vctuxs */
for (i = 0; i < 4; ++i)
vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va,
- &current->thread.vscr.u[3]);
+ &current->thread.vr_state.vscr.u[3]);
break;
case 15: /* vctsxs */
for (i = 0; i < 4; ++i)
vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va,
- &current->thread.vscr.u[3]);
+ &current->thread.vr_state.vscr.u[3]);
break;
default:
return -EINVAL;
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 9e20999aaef2..0458a9aaba9d 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -8,29 +8,6 @@
#include <asm/ptrace.h>
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Wrapper to call load_up_altivec from C.
- * void do_load_up_altivec(struct pt_regs *regs);
- */
-_GLOBAL(do_load_up_altivec)
- mflr r0
- std r0, 16(r1)
- stdu r1, -112(r1)
-
- subi r6, r3, STACK_FRAME_OVERHEAD
- /* load_up_altivec expects r12=MSR, r13=PACA, and returns
- * with r12 = new MSR.
- */
- ld r12,_MSR(r6)
- GET_PACA(r13)
- bl load_up_altivec
- std r12,_MSR(r6)
-
- ld r0, 112+16(r1)
- addi r1, r1, 112
- mtlr r0
- blr
-
/* void do_load_up_transact_altivec(struct thread_struct *thread)
*
* This is similar to load_up_altivec but for the transactional version of the
@@ -46,10 +23,11 @@ _GLOBAL(do_load_up_transact_altivec)
li r4,1
stw r4,THREAD_USED_VR(r3)
- li r10,THREAD_TRANSACT_VSCR
+ li r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR
lvx vr0,r10,r3
mtvscr vr0
- REST_32VRS_TRANSACT(0,r4,r3)
+ addi r10,r3,THREAD_TRANSACT_VRSTATE
+ REST_32VRS(0,r4,r10)
/* Disable VEC again. */
MTMSRD(r6)
@@ -59,12 +37,36 @@ _GLOBAL(do_load_up_transact_altivec)
#endif
/*
- * load_up_altivec(unused, unused, tsk)
+ * Load state from memory into VMX registers including VSCR.
+ * Assumes the caller has enabled VMX in the MSR.
+ */
+_GLOBAL(load_vr_state)
+ li r4,VRSTATE_VSCR
+ lvx vr0,r4,r3
+ mtvscr vr0
+ REST_32VRS(0,r4,r3)
+ blr
+
+/*
+ * Store VMX state into memory, including VSCR.
+ * Assumes the caller has enabled VMX in the MSR.
+ */
+_GLOBAL(store_vr_state)
+ SAVE_32VRS(0, r4, r3)
+ mfvscr vr0
+ li r4, VRSTATE_VSCR
+ stvx vr0, r4, r3
+ blr
+
+/*
* Disable VMX for the task which had it previously,
* and save its vector registers in its thread_struct.
* Enables the VMX for use in the kernel on return.
* On SMP we know the VMX is free, since we give it up every
* switch (ie, no lazy save of the vector registers).
+ *
+ * Note that on 32-bit this can only use registers that will be
+ * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
*/
_GLOBAL(load_up_altivec)
mfmsr r5 /* grab the current MSR */
@@ -90,10 +92,11 @@ _GLOBAL(load_up_altivec)
/* Save VMX state to last_task_used_altivec's THREAD struct */
toreal(r4)
addi r4,r4,THREAD
- SAVE_32VRS(0,r5,r4)
+ addi r6,r4,THREAD_VRSTATE
+ SAVE_32VRS(0,r5,r6)
mfvscr vr0
- li r10,THREAD_VSCR
- stvx vr0,r10,r4
+ li r10,VRSTATE_VSCR
+ stvx vr0,r10,r6
/* Disable VMX for last_task_used_altivec */
PPC_LL r5,PT_REGS(r4)
toreal(r5)
@@ -125,12 +128,13 @@ _GLOBAL(load_up_altivec)
oris r12,r12,MSR_VEC@h
std r12,_MSR(r1)
#endif
+ addi r6,r5,THREAD_VRSTATE
li r4,1
- li r10,THREAD_VSCR
+ li r10,VRSTATE_VSCR
stw r4,THREAD_USED_VR(r5)
- lvx vr0,r10,r5
+ lvx vr0,r10,r6
mtvscr vr0
- REST_32VRS(0,r4,r5)
+ REST_32VRS(0,r4,r6)
#ifndef CONFIG_SMP
/* Update last_task_used_altivec to 'current' */
subi r4,r5,THREAD /* Back to 'current' */
@@ -165,12 +169,16 @@ _GLOBAL(giveup_altivec)
PPC_LCMPI 0,r3,0
beqlr /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */
+ PPC_LL r7,THREAD_VRSAVEAREA(r3)
PPC_LL r5,PT_REGS(r3)
- PPC_LCMPI 0,r5,0
- SAVE_32VRS(0,r4,r3)
+ PPC_LCMPI 0,r7,0
+ bne 2f
+ addi r7,r3,THREAD_VRSTATE
+2: PPC_LCMPI 0,r5,0
+ SAVE_32VRS(0,r4,r7)
mfvscr vr0
- li r4,THREAD_VSCR
- stvx vr0,r4,r3
+ li r4,VRSTATE_VSCR
+ stvx vr0,r4,r7
beq 1f
PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
#ifdef CONFIG_VSX
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index d38cc08b16c7..e7d0c88f621a 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -997,21 +997,36 @@ static struct device_attribute vio_cmo_dev_attrs[] = {
/* sysfs bus functions and data structures for CMO */
#define viobus_cmo_rd_attr(name) \
-static ssize_t \
-viobus_cmo_##name##_show(struct bus_type *bt, char *buf) \
+static ssize_t cmo_##name##_show(struct bus_type *bt, char *buf) \
{ \
return sprintf(buf, "%lu\n", vio_cmo.name); \
-}
+} \
+static BUS_ATTR_RO(cmo_##name)
#define viobus_cmo_pool_rd_attr(name, var) \
static ssize_t \
-viobus_cmo_##name##_pool_show_##var(struct bus_type *bt, char *buf) \
+cmo_##name##_##var##_show(struct bus_type *bt, char *buf) \
{ \
return sprintf(buf, "%lu\n", vio_cmo.name.var); \
+} \
+static BUS_ATTR_RO(cmo_##name##_##var)
+
+viobus_cmo_rd_attr(entitled);
+viobus_cmo_rd_attr(spare);
+viobus_cmo_rd_attr(min);
+viobus_cmo_rd_attr(desired);
+viobus_cmo_rd_attr(curr);
+viobus_cmo_pool_rd_attr(reserve, size);
+viobus_cmo_pool_rd_attr(excess, size);
+viobus_cmo_pool_rd_attr(excess, free);
+
+static ssize_t cmo_high_show(struct bus_type *bt, char *buf)
+{
+ return sprintf(buf, "%lu\n", vio_cmo.high);
}
-static ssize_t viobus_cmo_high_reset(struct bus_type *bt, const char *buf,
- size_t count)
+static ssize_t cmo_high_store(struct bus_type *bt, const char *buf,
+ size_t count)
{
unsigned long flags;
@@ -1021,35 +1036,26 @@ static ssize_t viobus_cmo_high_reset(struct bus_type *bt, const char *buf,
return count;
}
-
-viobus_cmo_rd_attr(entitled);
-viobus_cmo_pool_rd_attr(reserve, size);
-viobus_cmo_pool_rd_attr(excess, size);
-viobus_cmo_pool_rd_attr(excess, free);
-viobus_cmo_rd_attr(spare);
-viobus_cmo_rd_attr(min);
-viobus_cmo_rd_attr(desired);
-viobus_cmo_rd_attr(curr);
-viobus_cmo_rd_attr(high);
-
-static struct bus_attribute vio_cmo_bus_attrs[] = {
- __ATTR(cmo_entitled, S_IRUGO, viobus_cmo_entitled_show, NULL),
- __ATTR(cmo_reserve_size, S_IRUGO, viobus_cmo_reserve_pool_show_size, NULL),
- __ATTR(cmo_excess_size, S_IRUGO, viobus_cmo_excess_pool_show_size, NULL),
- __ATTR(cmo_excess_free, S_IRUGO, viobus_cmo_excess_pool_show_free, NULL),
- __ATTR(cmo_spare, S_IRUGO, viobus_cmo_spare_show, NULL),
- __ATTR(cmo_min, S_IRUGO, viobus_cmo_min_show, NULL),
- __ATTR(cmo_desired, S_IRUGO, viobus_cmo_desired_show, NULL),
- __ATTR(cmo_curr, S_IRUGO, viobus_cmo_curr_show, NULL),
- __ATTR(cmo_high, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
- viobus_cmo_high_show, viobus_cmo_high_reset),
- __ATTR_NULL
+static BUS_ATTR_RW(cmo_high);
+
+static struct attribute *vio_bus_attrs[] = {
+ &bus_attr_cmo_entitled.attr,
+ &bus_attr_cmo_spare.attr,
+ &bus_attr_cmo_min.attr,
+ &bus_attr_cmo_desired.attr,
+ &bus_attr_cmo_curr.attr,
+ &bus_attr_cmo_high.attr,
+ &bus_attr_cmo_reserve_size.attr,
+ &bus_attr_cmo_excess_size.attr,
+ &bus_attr_cmo_excess_free.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(vio_bus);
static void vio_cmo_sysfs_init(void)
{
vio_bus_type.dev_attrs = vio_cmo_dev_attrs;
- vio_bus_type.bus_attrs = vio_cmo_bus_attrs;
+ vio_bus_type.bus_groups = vio_bus_groups;
}
#else /* CONFIG_PPC_SMLPAR */
int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
@@ -1413,8 +1419,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
/* needed to ensure proper operation of coherent allocations
* later, in case driver doesn't set it explicitly */
- dma_set_mask(&viodev->dev, DMA_BIT_MASK(64));
- dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64));
+ dma_set_mask_and_coherent(&viodev->dev, DMA_BIT_MASK(64));
}
/* register with generic device framework */
@@ -1531,12 +1536,12 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
dn = dev->of_node;
if (!dn) {
- strcat(buf, "\n");
+ strcpy(buf, "\n");
return strlen(buf);
}
cp = of_get_property(dn, "compatible", NULL);
if (!cp) {
- strcat(buf, "\n");
+ strcpy(buf, "\n");
return strlen(buf);
}
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 2f5c6b6d6877..93221e87b911 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -31,13 +31,13 @@
#include "44x_tlb.h"
#include "booke.h"
-void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+static void kvmppc_core_vcpu_load_44x(struct kvm_vcpu *vcpu, int cpu)
{
kvmppc_booke_vcpu_load(vcpu, cpu);
kvmppc_44x_tlb_load(vcpu);
}
-void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+static void kvmppc_core_vcpu_put_44x(struct kvm_vcpu *vcpu)
{
kvmppc_44x_tlb_put(vcpu);
kvmppc_booke_vcpu_put(vcpu);
@@ -114,29 +114,32 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
return 0;
}
-void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+static int kvmppc_core_get_sregs_44x(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
{
- kvmppc_get_sregs_ivor(vcpu, sregs);
+ return kvmppc_get_sregs_ivor(vcpu, sregs);
}
-int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+static int kvmppc_core_set_sregs_44x(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
{
return kvmppc_set_sregs_ivor(vcpu, sregs);
}
-int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
- union kvmppc_one_reg *val)
+static int kvmppc_get_one_reg_44x(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
{
return -EINVAL;
}
-int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
- union kvmppc_one_reg *val)
+static int kvmppc_set_one_reg_44x(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
{
return -EINVAL;
}
-struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+static struct kvm_vcpu *kvmppc_core_vcpu_create_44x(struct kvm *kvm,
+ unsigned int id)
{
struct kvmppc_vcpu_44x *vcpu_44x;
struct kvm_vcpu *vcpu;
@@ -167,7 +170,7 @@ out:
return ERR_PTR(err);
}
-void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+static void kvmppc_core_vcpu_free_44x(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
@@ -176,28 +179,53 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
}
-int kvmppc_core_init_vm(struct kvm *kvm)
+static int kvmppc_core_init_vm_44x(struct kvm *kvm)
{
return 0;
}
-void kvmppc_core_destroy_vm(struct kvm *kvm)
+static void kvmppc_core_destroy_vm_44x(struct kvm *kvm)
{
}
+static struct kvmppc_ops kvm_ops_44x = {
+ .get_sregs = kvmppc_core_get_sregs_44x,
+ .set_sregs = kvmppc_core_set_sregs_44x,
+ .get_one_reg = kvmppc_get_one_reg_44x,
+ .set_one_reg = kvmppc_set_one_reg_44x,
+ .vcpu_load = kvmppc_core_vcpu_load_44x,
+ .vcpu_put = kvmppc_core_vcpu_put_44x,
+ .vcpu_create = kvmppc_core_vcpu_create_44x,
+ .vcpu_free = kvmppc_core_vcpu_free_44x,
+ .mmu_destroy = kvmppc_mmu_destroy_44x,
+ .init_vm = kvmppc_core_init_vm_44x,
+ .destroy_vm = kvmppc_core_destroy_vm_44x,
+ .emulate_op = kvmppc_core_emulate_op_44x,
+ .emulate_mtspr = kvmppc_core_emulate_mtspr_44x,
+ .emulate_mfspr = kvmppc_core_emulate_mfspr_44x,
+};
+
static int __init kvmppc_44x_init(void)
{
int r;
r = kvmppc_booke_init();
if (r)
- return r;
+ goto err_out;
+
+ r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE);
+ if (r)
+ goto err_out;
+ kvm_ops_44x.owner = THIS_MODULE;
+ kvmppc_pr_ops = &kvm_ops_44x;
- return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE);
+err_out:
+ return r;
}
static void __exit kvmppc_44x_exit(void)
{
+ kvmppc_pr_ops = NULL;
kvmppc_booke_exit();
}
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
index 35ec0a8547da..92c9ab4bcfec 100644
--- a/arch/powerpc/kvm/44x_emulate.c
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -91,8 +91,8 @@ static int emulate_mfdcr(struct kvm_vcpu *vcpu, int rt, int dcrn)
return EMULATE_DONE;
}
-int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int inst, int *advance)
+int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
int dcrn = get_dcrn(inst);
@@ -152,7 +152,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
+int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
int emulated = EMULATE_DONE;
@@ -172,7 +172,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
return emulated;
}
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
+int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
int emulated = EMULATE_DONE;
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index ed0385448148..0deef1082e02 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -268,7 +268,7 @@ static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x,
trace_kvm_stlb_inval(stlb_index);
}
-void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
+void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
int i;
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index ffaef2cb101a..141b2027189a 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -6,6 +6,7 @@ source "virt/kvm/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
+ depends on !CPU_LITTLE_ENDIAN
---help---
Say Y here to get to see options for using your Linux host to run
other operating systems inside virtual machines (guests).
@@ -34,17 +35,20 @@ config KVM_BOOK3S_64_HANDLER
bool
select KVM_BOOK3S_HANDLER
-config KVM_BOOK3S_PR
+config KVM_BOOK3S_PR_POSSIBLE
bool
select KVM_MMIO
select MMU_NOTIFIER
+config KVM_BOOK3S_HV_POSSIBLE
+ bool
+
config KVM_BOOK3S_32
tristate "KVM support for PowerPC book3s_32 processors"
depends on PPC_BOOK3S_32 && !SMP && !PTE_64BIT
select KVM
select KVM_BOOK3S_32_HANDLER
- select KVM_BOOK3S_PR
+ select KVM_BOOK3S_PR_POSSIBLE
---help---
Support running unmodified book3s_32 guest kernels
in virtual machines on book3s_32 host processors.
@@ -59,6 +63,7 @@ config KVM_BOOK3S_64
depends on PPC_BOOK3S_64
select KVM_BOOK3S_64_HANDLER
select KVM
+ select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
---help---
Support running unmodified book3s_64 and book3s_32 guest kernels
in virtual machines on book3s_64 host processors.
@@ -69,8 +74,9 @@ config KVM_BOOK3S_64
If unsure, say N.
config KVM_BOOK3S_64_HV
- bool "KVM support for POWER7 and PPC970 using hypervisor mode in host"
+ tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host"
depends on KVM_BOOK3S_64
+ select KVM_BOOK3S_HV_POSSIBLE
select MMU_NOTIFIER
select CMA
---help---
@@ -89,9 +95,20 @@ config KVM_BOOK3S_64_HV
If unsure, say N.
config KVM_BOOK3S_64_PR
- def_bool y
- depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV
- select KVM_BOOK3S_PR
+ tristate "KVM support without using hypervisor mode in host"
+ depends on KVM_BOOK3S_64
+ select KVM_BOOK3S_PR_POSSIBLE
+ ---help---
+ Support running guest kernels in virtual machines on processors
+ without using hypervisor mode in the host, by running the
+ guest in user mode (problem state) and emulating all
+ privileged instructions and registers.
+
+ This is not as fast as using hypervisor mode, but works on
+ machines where hypervisor mode is not available or not usable,
+ and can emulate processors that are different from the host
+ processor, including emulating 32-bit processors on a 64-bit
+ host.
config KVM_BOOKE_HV
bool
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 6646c952c5e3..ce569b6bf4d8 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -53,41 +53,51 @@ kvm-e500mc-objs := \
e500_emulate.o
kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
-kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
- $(KVM)/coalesced_mmio.o \
+kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) := \
+ book3s_64_vio_hv.o
+
+kvm-pr-y := \
fpu.o \
book3s_paired_singles.o \
book3s_pr.o \
book3s_pr_papr.o \
- book3s_64_vio_hv.o \
book3s_emulate.o \
book3s_interrupts.o \
book3s_mmu_hpte.o \
book3s_64_mmu_host.o \
book3s_64_mmu.o \
book3s_32_mmu.o
-kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
+
+ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+kvm-book3s_64-module-objs := \
+ $(KVM)/coalesced_mmio.o
+
+kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
book3s_rmhandlers.o
+endif
-kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
+kvm-hv-y += \
book3s_hv.o \
book3s_hv_interrupts.o \
book3s_64_mmu_hv.o
+
kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
book3s_hv_rm_xics.o
-kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
+
+ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
book3s_hv_rmhandlers.o \
book3s_hv_rm_mmu.o \
- book3s_64_vio_hv.o \
book3s_hv_ras.o \
book3s_hv_builtin.o \
book3s_hv_cma.o \
$(kvm-book3s_64-builtin-xics-objs-y)
+endif
kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
book3s_xics.o
-kvm-book3s_64-module-objs := \
+kvm-book3s_64-module-objs += \
$(KVM)/kvm_main.o \
$(KVM)/eventfd.o \
powerpc.o \
@@ -123,4 +133,7 @@ obj-$(CONFIG_KVM_E500MC) += kvm.o
obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o
+obj-$(CONFIG_KVM_BOOK3S_64_PR) += kvm-pr.o
+obj-$(CONFIG_KVM_BOOK3S_64_HV) += kvm-hv.o
+
obj-y += $(kvm-book3s_64-builtin-objs-y)
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 700df6f1d32c..8912608b7e1b 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -34,6 +34,7 @@
#include <linux/vmalloc.h>
#include <linux/highmem.h>
+#include "book3s.h"
#include "trace.h"
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
@@ -69,6 +70,50 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
{
}
+static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
+{
+ if (!is_kvmppc_hv_enabled(vcpu->kvm))
+ return to_book3s(vcpu)->hior;
+ return 0;
+}
+
+static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
+ unsigned long pending_now, unsigned long old_pending)
+{
+ if (is_kvmppc_hv_enabled(vcpu->kvm))
+ return;
+ if (pending_now)
+ vcpu->arch.shared->int_pending = 1;
+ else if (old_pending)
+ vcpu->arch.shared->int_pending = 0;
+}
+
+static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
+{
+ ulong crit_raw;
+ ulong crit_r1;
+ bool crit;
+
+ if (is_kvmppc_hv_enabled(vcpu->kvm))
+ return false;
+
+ crit_raw = vcpu->arch.shared->critical;
+ crit_r1 = kvmppc_get_gpr(vcpu, 1);
+
+ /* Truncate crit indicators in 32 bit mode */
+ if (!(vcpu->arch.shared->msr & MSR_SF)) {
+ crit_raw &= 0xffffffff;
+ crit_r1 &= 0xffffffff;
+ }
+
+ /* Critical section when crit == r1 */
+ crit = (crit_raw == crit_r1);
+ /* ... and we're in supervisor mode */
+ crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
+
+ return crit;
+}
+
void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
{
vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu);
@@ -126,28 +171,32 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
printk(KERN_INFO "Queueing interrupt %x\n", vec);
#endif
}
-
+EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
{
/* might as well deliver this straight away */
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
}
+EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
{
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
}
+EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
{
return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
}
+EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
{
kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
}
+EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq)
@@ -285,8 +334,10 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
return 0;
}
+EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
-pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
+ bool *writable)
{
ulong mp_pa = vcpu->arch.magic_page_pa;
@@ -302,20 +353,23 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
get_page(pfn_to_page(pfn));
+ if (writable)
+ *writable = true;
return pfn;
}
- return gfn_to_pfn(vcpu->kvm, gfn);
+ return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
}
+EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn);
static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
- struct kvmppc_pte *pte)
+ bool iswrite, struct kvmppc_pte *pte)
{
int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR));
int r;
if (relocated) {
- r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data);
+ r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
} else {
pte->eaddr = eaddr;
pte->raddr = eaddr & KVM_PAM;
@@ -361,7 +415,7 @@ int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
vcpu->stat.st++;
- if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
+ if (kvmppc_xlate(vcpu, *eaddr, data, true, &pte))
return -ENOENT;
*eaddr = pte.raddr;
@@ -374,6 +428,7 @@ int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
return EMULATE_DONE;
}
+EXPORT_SYMBOL_GPL(kvmppc_st);
int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
bool data)
@@ -383,7 +438,7 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
vcpu->stat.ld++;
- if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
+ if (kvmppc_xlate(vcpu, *eaddr, data, false, &pte))
goto nopte;
*eaddr = pte.raddr;
@@ -404,6 +459,7 @@ nopte:
mmio:
return EMULATE_DO_MMIO;
}
+EXPORT_SYMBOL_GPL(kvmppc_ld);
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
@@ -419,6 +475,18 @@ void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
}
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
+}
+
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;
@@ -495,8 +563,7 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
if (size > sizeof(val))
return -EINVAL;
- r = kvmppc_get_one_reg(vcpu, reg->id, &val);
-
+ r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
if (r == -EINVAL) {
r = 0;
switch (reg->id) {
@@ -528,6 +595,9 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
}
val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]);
break;
+ case KVM_REG_PPC_VRSAVE:
+ val = get_reg_val(reg->id, vcpu->arch.vrsave);
+ break;
#endif /* CONFIG_ALTIVEC */
case KVM_REG_PPC_DEBUG_INST: {
u32 opcode = INS_TW;
@@ -572,8 +642,7 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
return -EFAULT;
- r = kvmppc_set_one_reg(vcpu, reg->id, &val);
-
+ r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
if (r == -EINVAL) {
r = 0;
switch (reg->id) {
@@ -605,6 +674,13 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
}
vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val);
break;
+ case KVM_REG_PPC_VRSAVE:
+ if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+ r = -ENXIO;
+ break;
+ }
+ vcpu->arch.vrsave = set_reg_val(reg->id, val);
+ break;
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_KVM_XICS
case KVM_REG_PPC_ICP_STATE:
@@ -625,6 +701,27 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
return r;
}
+void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
+}
+
+void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
+}
+
+void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
+{
+ vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
+}
+EXPORT_SYMBOL_GPL(kvmppc_set_msr);
+
+int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+{
+ return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
+}
+
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr)
{
@@ -644,3 +741,141 @@ void kvmppc_decrementer_func(unsigned long data)
kvmppc_core_queue_dec(vcpu);
kvm_vcpu_kick(vcpu);
}
+
+struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+ return kvm->arch.kvm_ops->vcpu_create(kvm, id);
+}
+
+void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
+}
+
+int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
+{
+ return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
+}
+
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+ return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
+}
+
+void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont)
+{
+ kvm->arch.kvm_ops->free_memslot(free, dont);
+}
+
+int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+ unsigned long npages)
+{
+ return kvm->arch.kvm_ops->create_memslot(slot, npages);
+}
+
+void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
+{
+ kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
+}
+
+int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_userspace_memory_region *mem)
+{
+ return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
+}
+
+void kvmppc_core_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old)
+{
+ kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old);
+}
+
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+{
+ return kvm->arch.kvm_ops->unmap_hva(kvm, hva);
+}
+EXPORT_SYMBOL_GPL(kvm_unmap_hva);
+
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+{
+ return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ return kvm->arch.kvm_ops->age_hva(kvm, hva);
+}
+
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
+}
+
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+{
+ kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
+}
+
+void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
+{
+ vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
+}
+
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+
+#ifdef CONFIG_PPC64
+ INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
+ INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
+#endif
+
+ return kvm->arch.kvm_ops->init_vm(kvm);
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+ kvm->arch.kvm_ops->destroy_vm(kvm);
+
+#ifdef CONFIG_PPC64
+ kvmppc_rtas_tokens_free(kvm);
+ WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
+#endif
+}
+
+int kvmppc_core_check_processor_compat(void)
+{
+ /*
+ * We always return 0 for book3s. We check
+ * for compatability while loading the HV
+ * or PR module
+ */
+ return 0;
+}
+
+static int kvmppc_book3s_init(void)
+{
+ int r;
+
+ r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+ if (r)
+ return r;
+#ifdef CONFIG_KVM_BOOK3S_32
+ r = kvmppc_book3s_init_pr();
+#endif
+ return r;
+
+}
+
+static void kvmppc_book3s_exit(void)
+{
+#ifdef CONFIG_KVM_BOOK3S_32
+ kvmppc_book3s_exit_pr();
+#endif
+ kvm_exit();
+}
+
+module_init(kvmppc_book3s_init);
+module_exit(kvmppc_book3s_exit);
diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h
new file mode 100644
index 000000000000..4bf956cf94d6
--- /dev/null
+++ b/arch/powerpc/kvm/book3s.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright IBM Corporation, 2013
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ *
+ */
+
+#ifndef __POWERPC_KVM_BOOK3S_H__
+#define __POWERPC_KVM_BOOK3S_H__
+
+extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
+ struct kvm_memory_slot *memslot);
+extern int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva);
+extern int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start,
+ unsigned long end);
+extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva);
+extern int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva);
+extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte);
+
+extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance);
+extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu,
+ int sprn, ulong spr_val);
+extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu,
+ int sprn, ulong *spr_val);
+extern int kvmppc_book3s_init_pr(void);
+extern void kvmppc_book3s_exit_pr(void);
+
+#endif
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index c8cefdd15fd8..76a64ce6a5b6 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -84,7 +84,8 @@ static inline bool sr_nx(u32 sr_raw)
}
static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
- struct kvmppc_pte *pte, bool data);
+ struct kvmppc_pte *pte, bool data,
+ bool iswrite);
static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
u64 *vsid);
@@ -99,7 +100,7 @@ static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
u64 vsid;
struct kvmppc_pte pte;
- if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data))
+ if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false))
return pte.vpage;
kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
@@ -111,10 +112,11 @@ static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu)
kvmppc_set_msr(vcpu, 0);
}
-static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3s,
+static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu,
u32 sre, gva_t eaddr,
bool primary)
{
+ struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
u32 page, hash, pteg, htabmask;
hva_t r;
@@ -132,7 +134,7 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3
kvmppc_get_pc(&vcpu_book3s->vcpu), eaddr, vcpu_book3s->sdr1, pteg,
sr_vsid(sre));
- r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT);
+ r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
if (kvm_is_error_hva(r))
return r;
return r | (pteg & ~PAGE_MASK);
@@ -145,7 +147,8 @@ static u32 kvmppc_mmu_book3s_32_get_ptem(u32 sre, gva_t eaddr, bool primary)
}
static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
- struct kvmppc_pte *pte, bool data)
+ struct kvmppc_pte *pte, bool data,
+ bool iswrite)
{
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
struct kvmppc_bat *bat;
@@ -186,8 +189,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
printk(KERN_INFO "BAT is not readable!\n");
continue;
}
- if (!pte->may_write) {
- /* let's treat r/o BATs as not-readable for now */
+ if (iswrite && !pte->may_write) {
dprintk_pte("BAT is read-only!\n");
continue;
}
@@ -201,9 +203,8 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *pte, bool data,
- bool primary)
+ bool iswrite, bool primary)
{
- struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
u32 sre;
hva_t ptegp;
u32 pteg[16];
@@ -218,7 +219,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
- ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu_book3s, sre, eaddr, primary);
+ ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu, sre, eaddr, primary);
if (kvm_is_error_hva(ptegp)) {
printk(KERN_INFO "KVM: Invalid PTEG!\n");
goto no_page_found;
@@ -258,9 +259,6 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
break;
}
- if ( !pte->may_read )
- continue;
-
dprintk_pte("MMU: Found PTE -> %x %x - %x\n",
pteg[i], pteg[i+1], pp);
found = 1;
@@ -271,19 +269,23 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
/* Update PTE C and A bits, so the guest's swapper knows we used the
page */
if (found) {
- u32 oldpte = pteg[i+1];
-
- if (pte->may_read)
- pteg[i+1] |= PTEG_FLAG_ACCESSED;
- if (pte->may_write)
- pteg[i+1] |= PTEG_FLAG_DIRTY;
- else
- dprintk_pte("KVM: Mapping read-only page!\n");
-
- /* Write back into the PTEG */
- if (pteg[i+1] != oldpte)
- copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
-
+ u32 pte_r = pteg[i+1];
+ char __user *addr = (char __user *) &pteg[i+1];
+
+ /*
+ * Use single-byte writes to update the HPTE, to
+ * conform to what real hardware does.
+ */
+ if (pte->may_read && !(pte_r & PTEG_FLAG_ACCESSED)) {
+ pte_r |= PTEG_FLAG_ACCESSED;
+ put_user(pte_r >> 8, addr + 2);
+ }
+ if (iswrite && pte->may_write && !(pte_r & PTEG_FLAG_DIRTY)) {
+ pte_r |= PTEG_FLAG_DIRTY;
+ put_user(pte_r, addr + 3);
+ }
+ if (!pte->may_read || (iswrite && !pte->may_write))
+ return -EPERM;
return 0;
}
@@ -302,12 +304,14 @@ no_page_found:
}
static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
- struct kvmppc_pte *pte, bool data)
+ struct kvmppc_pte *pte, bool data,
+ bool iswrite)
{
int r;
ulong mp_ea = vcpu->arch.magic_page_ea;
pte->eaddr = eaddr;
+ pte->page_size = MMU_PAGE_4K;
/* Magic page override */
if (unlikely(mp_ea) &&
@@ -323,11 +327,13 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
return 0;
}
- r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data);
+ r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data, iswrite);
if (r < 0)
- r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true);
+ r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
+ data, iswrite, true);
if (r < 0)
- r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, false);
+ r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
+ data, iswrite, false);
return r;
}
@@ -347,7 +353,12 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
{
- kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000);
+ int i;
+ struct kvm_vcpu *v;
+
+ /* flush this VA on all cpus */
+ kvm_for_each_vcpu(i, v, vcpu->kvm)
+ kvmppc_mmu_pte_flush(v, ea, 0x0FFFF000);
}
static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 00e619bf608e..3a0abd2e5a15 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -138,7 +138,8 @@ static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
extern char etext[];
-int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
+int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
+ bool iswrite)
{
pfn_t hpaddr;
u64 vpn;
@@ -152,9 +153,11 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
bool evict = false;
struct hpte_cache *pte;
int r = 0;
+ bool writable;
/* Get host physical address for gpa */
- hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
+ hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT,
+ iswrite, &writable);
if (is_error_noslot_pfn(hpaddr)) {
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
orig_pte->eaddr);
@@ -204,7 +207,7 @@ next_pteg:
(primary ? 0 : PTE_SEC);
pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
- if (orig_pte->may_write) {
+ if (orig_pte->may_write && writable) {
pteg1 |= PP_RWRW;
mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
} else {
@@ -259,6 +262,11 @@ out:
return r;
}
+void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
+{
+ kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL);
+}
+
static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
{
struct kvmppc_sid_map *map;
@@ -341,7 +349,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
svcpu_put(svcpu);
}
-void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
+void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
{
int i;
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 7e345e00661a..83da1f868fd5 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -107,9 +107,20 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
return kvmppc_slb_calc_vpn(slb, eaddr);
}
+static int mmu_pagesize(int mmu_pg)
+{
+ switch (mmu_pg) {
+ case MMU_PAGE_64K:
+ return 16;
+ case MMU_PAGE_16M:
+ return 24;
+ }
+ return 12;
+}
+
static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
{
- return slbe->large ? 24 : 12;
+ return mmu_pagesize(slbe->base_page_size);
}
static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
@@ -119,11 +130,11 @@ static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p);
}
-static hva_t kvmppc_mmu_book3s_64_get_pteg(
- struct kvmppc_vcpu_book3s *vcpu_book3s,
+static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu,
struct kvmppc_slb *slbe, gva_t eaddr,
bool second)
{
+ struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
u64 hash, pteg, htabsize;
u32 ssize;
hva_t r;
@@ -148,10 +159,10 @@ static hva_t kvmppc_mmu_book3s_64_get_pteg(
/* When running a PAPR guest, SDR1 contains a HVA address instead
of a GPA */
- if (vcpu_book3s->vcpu.arch.papr_enabled)
+ if (vcpu->arch.papr_enabled)
r = pteg;
else
- r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT);
+ r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
if (kvm_is_error_hva(r))
return r;
@@ -166,18 +177,38 @@ static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p);
- if (p < 24)
- avpn >>= ((80 - p) - 56) - 8;
+ if (p < 16)
+ avpn >>= ((80 - p) - 56) - 8; /* 16 - p */
else
- avpn <<= 8;
+ avpn <<= p - 16;
return avpn;
}
+/*
+ * Return page size encoded in the second word of a HPTE, or
+ * -1 for an invalid encoding for the base page size indicated by
+ * the SLB entry. This doesn't handle mixed pagesize segments yet.
+ */
+static int decode_pagesize(struct kvmppc_slb *slbe, u64 r)
+{
+ switch (slbe->base_page_size) {
+ case MMU_PAGE_64K:
+ if ((r & 0xf000) == 0x1000)
+ return MMU_PAGE_64K;
+ break;
+ case MMU_PAGE_16M:
+ if ((r & 0xff000) == 0)
+ return MMU_PAGE_16M;
+ break;
+ }
+ return -1;
+}
+
static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
- struct kvmppc_pte *gpte, bool data)
+ struct kvmppc_pte *gpte, bool data,
+ bool iswrite)
{
- struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
struct kvmppc_slb *slbe;
hva_t ptegp;
u64 pteg[16];
@@ -189,6 +220,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
u8 pp, key = 0;
bool found = false;
bool second = false;
+ int pgsize;
ulong mp_ea = vcpu->arch.magic_page_ea;
/* Magic page override */
@@ -202,6 +234,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
gpte->may_execute = true;
gpte->may_read = true;
gpte->may_write = true;
+ gpte->page_size = MMU_PAGE_4K;
return 0;
}
@@ -222,8 +255,12 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID |
HPTE_V_SECONDARY;
+ pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K;
+
+ mutex_lock(&vcpu->kvm->arch.hpt_mutex);
+
do_second:
- ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second);
+ ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second);
if (kvm_is_error_hva(ptegp))
goto no_page_found;
@@ -240,6 +277,13 @@ do_second:
for (i=0; i<16; i+=2) {
/* Check all relevant fields of 1st dword */
if ((pteg[i] & v_mask) == v_val) {
+ /* If large page bit is set, check pgsize encoding */
+ if (slbe->large &&
+ (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
+ pgsize = decode_pagesize(slbe, pteg[i+1]);
+ if (pgsize < 0)
+ continue;
+ }
found = true;
break;
}
@@ -256,13 +300,15 @@ do_second:
v = pteg[i];
r = pteg[i+1];
pp = (r & HPTE_R_PP) | key;
- eaddr_mask = 0xFFF;
+ if (r & HPTE_R_PP0)
+ pp |= 8;
gpte->eaddr = eaddr;
gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
- if (slbe->large)
- eaddr_mask = 0xFFFFFF;
+
+ eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1;
gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
+ gpte->page_size = pgsize;
gpte->may_execute = ((r & HPTE_R_N) ? false : true);
gpte->may_read = false;
gpte->may_write = false;
@@ -277,6 +323,7 @@ do_second:
case 3:
case 5:
case 7:
+ case 10:
gpte->may_read = true;
break;
}
@@ -287,30 +334,37 @@ do_second:
/* Update PTE R and C bits, so the guest's swapper knows we used the
* page */
- if (gpte->may_read) {
- /* Set the accessed flag */
+ if (gpte->may_read && !(r & HPTE_R_R)) {
+ /*
+ * Set the accessed flag.
+ * We have to write this back with a single byte write
+ * because another vcpu may be accessing this on
+ * non-PAPR platforms such as mac99, and this is
+ * what real hardware does.
+ */
+ char __user *addr = (char __user *) &pteg[i+1];
r |= HPTE_R_R;
+ put_user(r >> 8, addr + 6);
}
- if (data && gpte->may_write) {
- /* Set the dirty flag -- XXX even if not writing */
+ if (iswrite && gpte->may_write && !(r & HPTE_R_C)) {
+ /* Set the dirty flag */
+ /* Use a single byte write */
+ char __user *addr = (char __user *) &pteg[i+1];
r |= HPTE_R_C;
+ put_user(r, addr + 7);
}
- /* Write back into the PTEG */
- if (pteg[i+1] != r) {
- pteg[i+1] = r;
- copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
- }
+ mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
- if (!gpte->may_read)
+ if (!gpte->may_read || (iswrite && !gpte->may_write))
return -EPERM;
return 0;
no_page_found:
+ mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
return -ENOENT;
no_seg_found:
-
dprintk("KVM MMU: Trigger segment fault\n");
return -EINVAL;
}
@@ -345,6 +399,21 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
slbe->nx = (rs & SLB_VSID_N) ? 1 : 0;
slbe->class = (rs & SLB_VSID_C) ? 1 : 0;
+ slbe->base_page_size = MMU_PAGE_4K;
+ if (slbe->large) {
+ if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) {
+ switch (rs & SLB_VSID_LP) {
+ case SLB_VSID_LP_00:
+ slbe->base_page_size = MMU_PAGE_16M;
+ break;
+ case SLB_VSID_LP_01:
+ slbe->base_page_size = MMU_PAGE_64K;
+ break;
+ }
+ } else
+ slbe->base_page_size = MMU_PAGE_16M;
+ }
+
slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
slbe->origv = rs;
@@ -460,14 +529,45 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
bool large)
{
u64 mask = 0xFFFFFFFFFULL;
+ long i;
+ struct kvm_vcpu *v;
dprintk("KVM MMU: tlbie(0x%lx)\n", va);
- if (large)
- mask = 0xFFFFFF000ULL;
- kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
+ /*
+ * The tlbie instruction changed behaviour starting with
+ * POWER6. POWER6 and later don't have the large page flag
+ * in the instruction but in the RB value, along with bits
+ * indicating page and segment sizes.
+ */
+ if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) {
+ /* POWER6 or later */
+ if (va & 1) { /* L bit */
+ if ((va & 0xf000) == 0x1000)
+ mask = 0xFFFFFFFF0ULL; /* 64k page */
+ else
+ mask = 0xFFFFFF000ULL; /* 16M page */
+ }
+ } else {
+ /* older processors, e.g. PPC970 */
+ if (large)
+ mask = 0xFFFFFF000ULL;
+ }
+ /* flush this VA on all vcpus */
+ kvm_for_each_vcpu(i, v, vcpu->kvm)
+ kvmppc_mmu_pte_vflush(v, va >> 12, mask);
}
+#ifdef CONFIG_PPC_64K_PAGES
+static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
+{
+ ulong mp_ea = vcpu->arch.magic_page_ea;
+
+ return mp_ea && !(vcpu->arch.shared->msr & MSR_PR) &&
+ (mp_ea >> SID_SHIFT) == esid;
+}
+#endif
+
static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
u64 *vsid)
{
@@ -475,11 +575,13 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
struct kvmppc_slb *slb;
u64 gvsid = esid;
ulong mp_ea = vcpu->arch.magic_page_ea;
+ int pagesize = MMU_PAGE_64K;
if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
if (slb) {
gvsid = slb->vsid;
+ pagesize = slb->base_page_size;
if (slb->tb) {
gvsid <<= SID_SHIFT_1T - SID_SHIFT;
gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1);
@@ -490,28 +592,41 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
case 0:
- *vsid = VSID_REAL | esid;
+ gvsid = VSID_REAL | esid;
break;
case MSR_IR:
- *vsid = VSID_REAL_IR | gvsid;
+ gvsid |= VSID_REAL_IR;
break;
case MSR_DR:
- *vsid = VSID_REAL_DR | gvsid;
+ gvsid |= VSID_REAL_DR;
break;
case MSR_DR|MSR_IR:
if (!slb)
goto no_slb;
- *vsid = gvsid;
break;
default:
BUG();
break;
}
+#ifdef CONFIG_PPC_64K_PAGES
+ /*
+ * Mark this as a 64k segment if the host is using
+ * 64k pages, the host MMU supports 64k pages and
+ * the guest segment page size is >= 64k,
+ * but not if this segment contains the magic page.
+ */
+ if (pagesize >= MMU_PAGE_64K &&
+ mmu_psize_defs[MMU_PAGE_64K].shift &&
+ !segment_contains_magic_page(vcpu, esid))
+ gvsid |= VSID_64K;
+#endif
+
if (vcpu->arch.shared->msr & MSR_PR)
- *vsid |= VSID_PR;
+ gvsid |= VSID_PR;
+ *vsid = gvsid;
return 0;
no_slb:
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index e5240524bf6c..0d513af62bba 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -27,14 +27,14 @@
#include <asm/machdep.h>
#include <asm/mmu_context.h>
#include <asm/hw_irq.h>
-#include "trace.h"
+#include "trace_pr.h"
#define PTE_SIZE 12
void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
- MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M,
+ pte->pagesize, pte->pagesize, MMU_SEGSIZE_256M,
false);
}
@@ -78,7 +78,8 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
return NULL;
}
-int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
+int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
+ bool iswrite)
{
unsigned long vpn;
pfn_t hpaddr;
@@ -90,16 +91,26 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
int attempt = 0;
struct kvmppc_sid_map *map;
int r = 0;
+ int hpsize = MMU_PAGE_4K;
+ bool writable;
+ unsigned long mmu_seq;
+ struct kvm *kvm = vcpu->kvm;
+ struct hpte_cache *cpte;
+ unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT;
+ unsigned long pfn;
+
+ /* used to check for invalidations in progress */
+ mmu_seq = kvm->mmu_notifier_seq;
+ smp_rmb();
/* Get host physical address for gpa */
- hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
- if (is_error_noslot_pfn(hpaddr)) {
- printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
+ pfn = kvmppc_gfn_to_pfn(vcpu, gfn, iswrite, &writable);
+ if (is_error_noslot_pfn(pfn)) {
+ printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", gfn);
r = -EINVAL;
goto out;
}
- hpaddr <<= PAGE_SHIFT;
- hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
+ hpaddr = pfn << PAGE_SHIFT;
/* and write the mapping ea -> hpa into the pt */
vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
@@ -117,20 +128,39 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
goto out;
}
- vsid = map->host_vsid;
- vpn = hpt_vpn(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
+ vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
- if (!orig_pte->may_write)
- rflags |= HPTE_R_PP;
- else
- mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
+ kvm_set_pfn_accessed(pfn);
+ if (!orig_pte->may_write || !writable)
+ rflags |= PP_RXRX;
+ else {
+ mark_page_dirty(vcpu->kvm, gfn);
+ kvm_set_pfn_dirty(pfn);
+ }
if (!orig_pte->may_execute)
rflags |= HPTE_R_N;
else
- kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
+ kvmppc_mmu_flush_icache(pfn);
+
+ /*
+ * Use 64K pages if possible; otherwise, on 64K page kernels,
+ * we need to transfer 4 more bits from guest real to host real addr.
+ */
+ if (vsid & VSID_64K)
+ hpsize = MMU_PAGE_64K;
+ else
+ hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
+
+ hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M);
- hash = hpt_hash(vpn, PTE_SIZE, MMU_SEGSIZE_256M);
+ cpte = kvmppc_mmu_hpte_cache_next(vcpu);
+
+ spin_lock(&kvm->mmu_lock);
+ if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
+ r = -EAGAIN;
+ goto out_unlock;
+ }
map_again:
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
@@ -139,11 +169,11 @@ map_again:
if (attempt > 1)
if (ppc_md.hpte_remove(hpteg) < 0) {
r = -1;
- goto out;
+ goto out_unlock;
}
ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
- MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M);
+ hpsize, hpsize, MMU_SEGSIZE_256M);
if (ret < 0) {
/* If we couldn't map a primary PTE, try a secondary */
@@ -152,8 +182,6 @@ map_again:
attempt++;
goto map_again;
} else {
- struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu);
-
trace_kvm_book3s_64_mmu_map(rflags, hpteg,
vpn, hpaddr, orig_pte);
@@ -164,19 +192,37 @@ map_again:
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
}
- pte->slot = hpteg + (ret & 7);
- pte->host_vpn = vpn;
- pte->pte = *orig_pte;
- pte->pfn = hpaddr >> PAGE_SHIFT;
+ cpte->slot = hpteg + (ret & 7);
+ cpte->host_vpn = vpn;
+ cpte->pte = *orig_pte;
+ cpte->pfn = pfn;
+ cpte->pagesize = hpsize;
- kvmppc_mmu_hpte_cache_map(vcpu, pte);
+ kvmppc_mmu_hpte_cache_map(vcpu, cpte);
+ cpte = NULL;
}
- kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
+
+out_unlock:
+ spin_unlock(&kvm->mmu_lock);
+ kvm_release_pfn_clean(pfn);
+ if (cpte)
+ kvmppc_mmu_hpte_cache_free(cpte);
out:
return r;
}
+void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
+{
+ u64 mask = 0xfffffffffULL;
+ u64 vsid;
+
+ vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid);
+ if (vsid & VSID_64K)
+ mask = 0xffffffff0ULL;
+ kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask);
+}
+
static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
{
struct kvmppc_sid_map *map;
@@ -291,6 +337,12 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
slb_vsid &= ~SLB_VSID_KP;
slb_esid |= slb_index;
+#ifdef CONFIG_PPC_64K_PAGES
+ /* Set host segment base page size to 64K if possible */
+ if (gvsid & VSID_64K)
+ slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp;
+#endif
+
svcpu->slb[slb_index].esid = slb_esid;
svcpu->slb[slb_index].vsid = slb_vsid;
@@ -326,7 +378,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
svcpu_put(svcpu);
}
-void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
+void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
{
kvmppc_mmu_hpte_destroy(vcpu);
__destroy_context(to_book3s(vcpu)->context_id[0]);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 043eec8461e7..f3ff587a8b7d 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -260,10 +260,6 @@ int kvmppc_mmu_hv_init(void)
return 0;
}
-void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
-{
-}
-
static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
{
kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
@@ -451,7 +447,7 @@ static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
}
static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
- struct kvmppc_pte *gpte, bool data)
+ struct kvmppc_pte *gpte, bool data, bool iswrite)
{
struct kvm *kvm = vcpu->kvm;
struct kvmppc_slb *slbe;
@@ -906,21 +902,22 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
return 0;
}
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva)
{
if (kvm->arch.using_mmu_notifiers)
kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
return 0;
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end)
{
if (kvm->arch.using_mmu_notifiers)
kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
return 0;
}
-void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
+void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
+ struct kvm_memory_slot *memslot)
{
unsigned long *rmapp;
unsigned long gfn;
@@ -994,7 +991,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
return ret;
}
-int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva)
{
if (!kvm->arch.using_mmu_notifiers)
return 0;
@@ -1032,14 +1029,14 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
return ret;
}
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva)
{
if (!kvm->arch.using_mmu_notifiers)
return 0;
return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
}
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte)
{
if (!kvm->arch.using_mmu_notifiers)
return;
@@ -1512,9 +1509,8 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
(VRMA_VSID << SLB_VSID_SHIFT_1T);
- lpcr = kvm->arch.lpcr & ~LPCR_VRMASD;
- lpcr |= senc << (LPCR_VRMASD_SH - 4);
- kvm->arch.lpcr = lpcr;
+ lpcr = senc << (LPCR_VRMASD_SH - 4);
+ kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
rma_setup = 1;
}
++i;
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 30c2f3b134c6..2c25f5412bdb 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -74,3 +74,4 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
/* Didn't find the liobn, punt it to userspace */
return H_TOO_HARD;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 360ce68c9809..99d40f8977e8 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -86,8 +86,8 @@ static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
return true;
}
-int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int inst, int *advance)
+int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
int rt = get_rt(inst);
@@ -172,7 +172,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->arch.mmu.tlbie(vcpu, addr, large);
break;
}
-#ifdef CONFIG_KVM_BOOK3S_64_PR
+#ifdef CONFIG_PPC_BOOK3S_64
case OP_31_XOP_FAKE_SC1:
{
/* SC 1 papr hypercalls */
@@ -267,12 +267,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = kvmppc_st(vcpu, &addr, 32, zeros, true);
if ((r == -ENOENT) || (r == -EPERM)) {
- struct kvmppc_book3s_shadow_vcpu *svcpu;
-
- svcpu = svcpu_get(vcpu);
*advance = 0;
vcpu->arch.shared->dar = vaddr;
- svcpu->fault_dar = vaddr;
+ vcpu->arch.fault_dar = vaddr;
dsisr = DSISR_ISSTORE;
if (r == -ENOENT)
@@ -281,8 +278,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
dsisr |= DSISR_PROTFAULT;
vcpu->arch.shared->dsisr = dsisr;
- svcpu->fault_dsisr = dsisr;
- svcpu_put(svcpu);
+ vcpu->arch.fault_dsisr = dsisr;
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_DATA_STORAGE);
@@ -349,7 +345,7 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
return bat;
}
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
+int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
int emulated = EMULATE_DONE;
@@ -472,7 +468,7 @@ unprivileged:
return emulated;
}
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
+int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
int emulated = EMULATE_DONE;
diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c
index 7057a02f0906..852989a9bad3 100644
--- a/arch/powerpc/kvm/book3s_exports.c
+++ b/arch/powerpc/kvm/book3s_exports.c
@@ -20,9 +20,10 @@
#include <linux/export.h>
#include <asm/kvm_book3s.h>
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline);
-#else
+#endif
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline);
EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
#ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 62a2b5ab08ed..072287f1c3bc 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -52,6 +52,9 @@
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
+#include <linux/module.h>
+
+#include "book3s.h"
/* #define EXIT_DEBUG */
/* #define EXIT_DEBUG_SIMPLE */
@@ -66,7 +69,7 @@
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
-void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
+static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
{
int me;
int cpu = vcpu->cpu;
@@ -125,7 +128,7 @@ void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
* purely defensive; they should never fail.)
*/
-void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
@@ -143,7 +146,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
spin_unlock(&vcpu->arch.tbacct_lock);
}
-void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
@@ -155,17 +158,46 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
spin_unlock(&vcpu->arch.tbacct_lock);
}
-void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
+static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
{
vcpu->arch.shregs.msr = msr;
kvmppc_end_cede(vcpu);
}
-void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
+void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
{
vcpu->arch.pvr = pvr;
}
+int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
+{
+ unsigned long pcr = 0;
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ if (arch_compat) {
+ if (!cpu_has_feature(CPU_FTR_ARCH_206))
+ return -EINVAL; /* 970 has no compat mode support */
+
+ switch (arch_compat) {
+ case PVR_ARCH_205:
+ pcr = PCR_ARCH_205;
+ break;
+ case PVR_ARCH_206:
+ case PVR_ARCH_206p:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ spin_lock(&vc->lock);
+ vc->arch_compat = arch_compat;
+ vc->pcr = pcr;
+ spin_unlock(&vc->lock);
+
+ return 0;
+}
+
void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
{
int r;
@@ -195,7 +227,7 @@ void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
pr_err(" ESID = %.16llx VSID = %.16llx\n",
vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
- vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1,
+ vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
vcpu->arch.last_inst);
}
@@ -489,7 +521,7 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
memset(dt, 0, sizeof(struct dtl_entry));
dt->dispatch_reason = 7;
dt->processor_id = vc->pcpu + vcpu->arch.ptid;
- dt->timebase = now;
+ dt->timebase = now + vc->tb_offset;
dt->enqueue_to_dispatch_time = stolen;
dt->srr0 = kvmppc_get_pc(vcpu);
dt->srr1 = vcpu->arch.shregs.msr;
@@ -538,6 +570,15 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
}
break;
case H_CONFER:
+ target = kvmppc_get_gpr(vcpu, 4);
+ if (target == -1)
+ break;
+ tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
+ if (!tvcpu) {
+ ret = H_PARAMETER;
+ break;
+ }
+ kvm_vcpu_yield_to(tvcpu);
break;
case H_REGISTER_VPA:
ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
@@ -576,8 +617,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
-static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
- struct task_struct *tsk)
+static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ struct task_struct *tsk)
{
int r = RESUME_HOST;
@@ -671,16 +712,16 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
vcpu->arch.trap, kvmppc_get_pc(vcpu),
vcpu->arch.shregs.msr);
+ run->hw.hardware_exit_reason = vcpu->arch.trap;
r = RESUME_HOST;
- BUG();
break;
}
return r;
}
-int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
+static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
{
int i;
@@ -694,12 +735,12 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
return 0;
}
-int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
+static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
{
int i, j;
- kvmppc_set_pvr(vcpu, sregs->pvr);
+ kvmppc_set_pvr_hv(vcpu, sregs->pvr);
j = 0;
for (i = 0; i < vcpu->arch.slb_nr; i++) {
@@ -714,7 +755,23 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return 0;
}
-int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
+static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ u64 mask;
+
+ spin_lock(&vc->lock);
+ /*
+ * Userspace can only modify DPFD (default prefetch depth),
+ * ILE (interrupt little-endian) and TC (translation control).
+ */
+ mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
+ vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
+ spin_unlock(&vc->lock);
+}
+
+static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
{
int r = 0;
long int i;
@@ -749,6 +806,12 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
i = id - KVM_REG_PPC_PMC1;
*val = get_reg_val(id, vcpu->arch.pmc[i]);
break;
+ case KVM_REG_PPC_SIAR:
+ *val = get_reg_val(id, vcpu->arch.siar);
+ break;
+ case KVM_REG_PPC_SDAR:
+ *val = get_reg_val(id, vcpu->arch.sdar);
+ break;
#ifdef CONFIG_VSX
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
@@ -787,6 +850,18 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
val->vpaval.length = vcpu->arch.dtl.len;
spin_unlock(&vcpu->arch.vpa_update_lock);
break;
+ case KVM_REG_PPC_TB_OFFSET:
+ *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
+ break;
+ case KVM_REG_PPC_LPCR:
+ *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
+ break;
+ case KVM_REG_PPC_PPR:
+ *val = get_reg_val(id, vcpu->arch.ppr);
+ break;
+ case KVM_REG_PPC_ARCH_COMPAT:
+ *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
+ break;
default:
r = -EINVAL;
break;
@@ -795,7 +870,8 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
return r;
}
-int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
+static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
{
int r = 0;
long int i;
@@ -833,6 +909,12 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
i = id - KVM_REG_PPC_PMC1;
vcpu->arch.pmc[i] = set_reg_val(id, *val);
break;
+ case KVM_REG_PPC_SIAR:
+ vcpu->arch.siar = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_SDAR:
+ vcpu->arch.sdar = set_reg_val(id, *val);
+ break;
#ifdef CONFIG_VSX
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
@@ -880,6 +962,20 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
len -= len % sizeof(struct dtl_entry);
r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
break;
+ case KVM_REG_PPC_TB_OFFSET:
+ /* round up to multiple of 2^24 */
+ vcpu->arch.vcore->tb_offset =
+ ALIGN(set_reg_val(id, *val), 1UL << 24);
+ break;
+ case KVM_REG_PPC_LPCR:
+ kvmppc_set_lpcr(vcpu, set_reg_val(id, *val));
+ break;
+ case KVM_REG_PPC_PPR:
+ vcpu->arch.ppr = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_ARCH_COMPAT:
+ r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
+ break;
default:
r = -EINVAL;
break;
@@ -888,14 +984,8 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
return r;
}
-int kvmppc_core_check_processor_compat(void)
-{
- if (cpu_has_feature(CPU_FTR_HVMODE))
- return 0;
- return -EIO;
-}
-
-struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
+ unsigned int id)
{
struct kvm_vcpu *vcpu;
int err = -EINVAL;
@@ -919,8 +1009,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
vcpu->arch.mmcr[0] = MMCR0_FC;
vcpu->arch.ctrl = CTRL_RUNLATCH;
/* default to host PVR, since we can't spoof it */
- vcpu->arch.pvr = mfspr(SPRN_PVR);
- kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
+ kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
spin_lock_init(&vcpu->arch.vpa_update_lock);
spin_lock_init(&vcpu->arch.tbacct_lock);
vcpu->arch.busy_preempt = TB_NIL;
@@ -940,6 +1029,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
spin_lock_init(&vcore->lock);
init_waitqueue_head(&vcore->wq);
vcore->preempt_tb = TB_NIL;
+ vcore->lpcr = kvm->arch.lpcr;
}
kvm->arch.vcores[core] = vcore;
kvm->arch.online_vcores++;
@@ -972,7 +1062,7 @@ static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
vpa->dirty);
}
-void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
{
spin_lock(&vcpu->arch.vpa_update_lock);
unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
@@ -983,6 +1073,12 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
kmem_cache_free(kvm_vcpu_cache, vcpu);
}
+static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
+{
+ /* Indicate we want to get back into the guest */
+ return 1;
+}
+
static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
{
unsigned long dec_nsec, now;
@@ -1264,8 +1360,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
ret = RESUME_GUEST;
if (vcpu->arch.trap)
- ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu,
- vcpu->arch.run_task);
+ ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
+ vcpu->arch.run_task);
vcpu->arch.ret = ret;
vcpu->arch.trap = 0;
@@ -1424,7 +1520,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
return vcpu->arch.ret;
}
-int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
int r;
int srcu_idx;
@@ -1546,7 +1642,8 @@ static const struct file_operations kvm_rma_fops = {
.release = kvm_rma_release,
};
-long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
+static long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
+ struct kvm_allocate_rma *ret)
{
long fd;
struct kvm_rma_info *ri;
@@ -1592,7 +1689,8 @@ static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
(*sps)++;
}
-int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
+static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
+ struct kvm_ppc_smmu_info *info)
{
struct kvm_ppc_one_seg_page_size *sps;
@@ -1613,7 +1711,8 @@ int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
/*
* Get (and clear) the dirty memory log for a memory slot.
*/
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
+ struct kvm_dirty_log *log)
{
struct kvm_memory_slot *memslot;
int r;
@@ -1667,8 +1766,8 @@ static void unpin_slot(struct kvm_memory_slot *memslot)
}
}
-void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
+static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont)
{
if (!dont || free->arch.rmap != dont->arch.rmap) {
vfree(free->arch.rmap);
@@ -1681,8 +1780,8 @@ void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
}
}
-int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
- unsigned long npages)
+static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
+ unsigned long npages)
{
slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
if (!slot->arch.rmap)
@@ -1692,9 +1791,9 @@ int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
return 0;
}
-int kvmppc_core_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- struct kvm_userspace_memory_region *mem)
+static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_userspace_memory_region *mem)
{
unsigned long *phys;
@@ -1710,9 +1809,9 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
return 0;
}
-void kvmppc_core_commit_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old)
+static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old)
{
unsigned long npages = mem->memory_size >> PAGE_SHIFT;
struct kvm_memory_slot *memslot;
@@ -1729,6 +1828,37 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
}
}
+/*
+ * Update LPCR values in kvm->arch and in vcores.
+ * Caller must hold kvm->lock.
+ */
+void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
+{
+ long int i;
+ u32 cores_done = 0;
+
+ if ((kvm->arch.lpcr & mask) == lpcr)
+ return;
+
+ kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
+
+ for (i = 0; i < KVM_MAX_VCORES; ++i) {
+ struct kvmppc_vcore *vc = kvm->arch.vcores[i];
+ if (!vc)
+ continue;
+ spin_lock(&vc->lock);
+ vc->lpcr = (vc->lpcr & ~mask) | lpcr;
+ spin_unlock(&vc->lock);
+ if (++cores_done >= kvm->arch.online_vcores)
+ break;
+ }
+}
+
+static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
+{
+ return;
+}
+
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
{
int err = 0;
@@ -1737,7 +1867,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
unsigned long hva;
struct kvm_memory_slot *memslot;
struct vm_area_struct *vma;
- unsigned long lpcr, senc;
+ unsigned long lpcr = 0, senc;
+ unsigned long lpcr_mask = 0;
unsigned long psize, porder;
unsigned long rma_size;
unsigned long rmls;
@@ -1802,9 +1933,9 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
senc = slb_pgsize_encoding(psize);
kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
(VRMA_VSID << SLB_VSID_SHIFT_1T);
- lpcr = kvm->arch.lpcr & ~LPCR_VRMASD;
- lpcr |= senc << (LPCR_VRMASD_SH - 4);
- kvm->arch.lpcr = lpcr;
+ lpcr_mask = LPCR_VRMASD;
+ /* the -4 is to account for senc values starting at 0x10 */
+ lpcr = senc << (LPCR_VRMASD_SH - 4);
/* Create HPTEs in the hash page table for the VRMA */
kvmppc_map_vrma(vcpu, memslot, porder);
@@ -1825,23 +1956,21 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
kvm->arch.rma = ri;
/* Update LPCR and RMOR */
- lpcr = kvm->arch.lpcr;
if (cpu_has_feature(CPU_FTR_ARCH_201)) {
/* PPC970; insert RMLS value (split field) in HID4 */
- lpcr &= ~((1ul << HID4_RMLS0_SH) |
- (3ul << HID4_RMLS2_SH));
- lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) |
+ lpcr_mask = (1ul << HID4_RMLS0_SH) |
+ (3ul << HID4_RMLS2_SH) | HID4_RMOR;
+ lpcr = ((rmls >> 2) << HID4_RMLS0_SH) |
((rmls & 3) << HID4_RMLS2_SH);
/* RMOR is also in HID4 */
lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
<< HID4_RMOR_SH;
} else {
/* POWER7 */
- lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
- lpcr |= rmls << LPCR_RMLS_SH;
+ lpcr_mask = LPCR_VPM0 | LPCR_VRMA_L | LPCR_RMLS;
+ lpcr = rmls << LPCR_RMLS_SH;
kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
}
- kvm->arch.lpcr = lpcr;
pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
@@ -1860,6 +1989,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
}
}
+ kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
+
/* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
smp_wmb();
kvm->arch.rma_setup_done = 1;
@@ -1875,7 +2006,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
goto out_srcu;
}
-int kvmppc_core_init_vm(struct kvm *kvm)
+static int kvmppc_core_init_vm_hv(struct kvm *kvm)
{
unsigned long lpcr, lpid;
@@ -1893,9 +2024,6 @@ int kvmppc_core_init_vm(struct kvm *kvm)
*/
cpumask_setall(&kvm->arch.need_tlb_flush);
- INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
- INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
-
kvm->arch.rma = NULL;
kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
@@ -1931,61 +2059,162 @@ int kvmppc_core_init_vm(struct kvm *kvm)
return 0;
}
-void kvmppc_core_destroy_vm(struct kvm *kvm)
+static void kvmppc_free_vcores(struct kvm *kvm)
+{
+ long int i;
+
+ for (i = 0; i < KVM_MAX_VCORES; ++i)
+ kfree(kvm->arch.vcores[i]);
+ kvm->arch.online_vcores = 0;
+}
+
+static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
{
uninhibit_secondary_onlining();
+ kvmppc_free_vcores(kvm);
if (kvm->arch.rma) {
kvm_release_rma(kvm->arch.rma);
kvm->arch.rma = NULL;
}
- kvmppc_rtas_tokens_free(kvm);
-
kvmppc_free_hpt(kvm);
- WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
}
-/* These are stubs for now */
-void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
+/* We don't need to emulate any privileged instructions or dcbz */
+static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance)
{
+ return EMULATE_FAIL;
}
-/* We don't need to emulate any privileged instructions or dcbz */
-int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int inst, int *advance)
+static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn,
+ ulong spr_val)
{
return EMULATE_FAIL;
}
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
+static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
+ ulong *spr_val)
{
return EMULATE_FAIL;
}
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
+static int kvmppc_core_check_processor_compat_hv(void)
{
- return EMULATE_FAIL;
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
+ return -EIO;
+ return 0;
}
-static int kvmppc_book3s_hv_init(void)
+static long kvm_arch_vm_ioctl_hv(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
{
- int r;
+ struct kvm *kvm __maybe_unused = filp->private_data;
+ void __user *argp = (void __user *)arg;
+ long r;
- r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+ switch (ioctl) {
- if (r)
+ case KVM_ALLOCATE_RMA: {
+ struct kvm_allocate_rma rma;
+ struct kvm *kvm = filp->private_data;
+
+ r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
+ if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
+ r = -EFAULT;
+ break;
+ }
+
+ case KVM_PPC_ALLOCATE_HTAB: {
+ u32 htab_order;
+
+ r = -EFAULT;
+ if (get_user(htab_order, (u32 __user *)argp))
+ break;
+ r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
+ if (r)
+ break;
+ r = -EFAULT;
+ if (put_user(htab_order, (u32 __user *)argp))
+ break;
+ r = 0;
+ break;
+ }
+
+ case KVM_PPC_GET_HTAB_FD: {
+ struct kvm_get_htab_fd ghf;
+
+ r = -EFAULT;
+ if (copy_from_user(&ghf, argp, sizeof(ghf)))
+ break;
+ r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
+ break;
+ }
+
+ default:
+ r = -ENOTTY;
+ }
+
+ return r;
+}
+
+static struct kvmppc_ops kvm_ops_hv = {
+ .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
+ .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
+ .get_one_reg = kvmppc_get_one_reg_hv,
+ .set_one_reg = kvmppc_set_one_reg_hv,
+ .vcpu_load = kvmppc_core_vcpu_load_hv,
+ .vcpu_put = kvmppc_core_vcpu_put_hv,
+ .set_msr = kvmppc_set_msr_hv,
+ .vcpu_run = kvmppc_vcpu_run_hv,
+ .vcpu_create = kvmppc_core_vcpu_create_hv,
+ .vcpu_free = kvmppc_core_vcpu_free_hv,
+ .check_requests = kvmppc_core_check_requests_hv,
+ .get_dirty_log = kvm_vm_ioctl_get_dirty_log_hv,
+ .flush_memslot = kvmppc_core_flush_memslot_hv,
+ .prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
+ .commit_memory_region = kvmppc_core_commit_memory_region_hv,
+ .unmap_hva = kvm_unmap_hva_hv,
+ .unmap_hva_range = kvm_unmap_hva_range_hv,
+ .age_hva = kvm_age_hva_hv,
+ .test_age_hva = kvm_test_age_hva_hv,
+ .set_spte_hva = kvm_set_spte_hva_hv,
+ .mmu_destroy = kvmppc_mmu_destroy_hv,
+ .free_memslot = kvmppc_core_free_memslot_hv,
+ .create_memslot = kvmppc_core_create_memslot_hv,
+ .init_vm = kvmppc_core_init_vm_hv,
+ .destroy_vm = kvmppc_core_destroy_vm_hv,
+ .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
+ .emulate_op = kvmppc_core_emulate_op_hv,
+ .emulate_mtspr = kvmppc_core_emulate_mtspr_hv,
+ .emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
+ .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
+ .arch_vm_ioctl = kvm_arch_vm_ioctl_hv,
+};
+
+static int kvmppc_book3s_init_hv(void)
+{
+ int r;
+ /*
+ * FIXME!! Do we need to check on all cpus ?
+ */
+ r = kvmppc_core_check_processor_compat_hv();
+ if (r < 0)
return r;
- r = kvmppc_mmu_hv_init();
+ kvm_ops_hv.owner = THIS_MODULE;
+ kvmppc_hv_ops = &kvm_ops_hv;
+ r = kvmppc_mmu_hv_init();
return r;
}
-static void kvmppc_book3s_hv_exit(void)
+static void kvmppc_book3s_exit_hv(void)
{
- kvm_exit();
+ kvmppc_hv_ops = NULL;
}
-module_init(kvmppc_book3s_hv_init);
-module_exit(kvmppc_book3s_hv_exit);
+module_init(kvmppc_book3s_init_hv);
+module_exit(kvmppc_book3s_exit_hv);
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 37f1cc417ca0..928142c64cb0 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -158,9 +158,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
* Interrupts are enabled again at this point.
*/
-.global kvmppc_handler_highmem
-kvmppc_handler_highmem:
-
/*
* Register usage at this point:
*
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index c71103b8a748..bc8de75b1925 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -33,30 +33,6 @@
#error Need to fix lppaca and SLB shadow accesses in little endian mode
#endif
-/*****************************************************************************
- * *
- * Real Mode handlers that need to be in the linear mapping *
- * *
- ****************************************************************************/
-
- .globl kvmppc_skip_interrupt
-kvmppc_skip_interrupt:
- mfspr r13,SPRN_SRR0
- addi r13,r13,4
- mtspr SPRN_SRR0,r13
- GET_SCRATCH0(r13)
- rfid
- b .
-
- .globl kvmppc_skip_Hinterrupt
-kvmppc_skip_Hinterrupt:
- mfspr r13,SPRN_HSRR0
- addi r13,r13,4
- mtspr SPRN_HSRR0,r13
- GET_SCRATCH0(r13)
- hrfid
- b .
-
/*
* Call kvmppc_hv_entry in real mode.
* Must be called with interrupts hard-disabled.
@@ -66,8 +42,11 @@ kvmppc_skip_Hinterrupt:
* LR = return address to continue at after eventually re-enabling MMU
*/
_GLOBAL(kvmppc_hv_entry_trampoline)
+ mflr r0
+ std r0, PPC_LR_STKOFF(r1)
+ stdu r1, -112(r1)
mfmsr r10
- LOAD_REG_ADDR(r5, kvmppc_hv_entry)
+ LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
li r0,MSR_RI
andc r0,r10,r0
li r6,MSR_IR | MSR_DR
@@ -77,11 +56,103 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
mtsrr1 r6
RFI
-/******************************************************************************
- * *
- * Entry code *
- * *
- *****************************************************************************/
+kvmppc_call_hv_entry:
+ bl kvmppc_hv_entry
+
+ /* Back from guest - restore host state and return to caller */
+
+ /* Restore host DABR and DABRX */
+ ld r5,HSTATE_DABR(r13)
+ li r6,7
+ mtspr SPRN_DABR,r5
+ mtspr SPRN_DABRX,r6
+
+ /* Restore SPRG3 */
+ ld r3,PACA_SPRG3(r13)
+ mtspr SPRN_SPRG3,r3
+
+ /*
+ * Reload DEC. HDEC interrupts were disabled when
+ * we reloaded the host's LPCR value.
+ */
+ ld r3, HSTATE_DECEXP(r13)
+ mftb r4
+ subf r4, r4, r3
+ mtspr SPRN_DEC, r4
+
+ /* Reload the host's PMU registers */
+ ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
+ lbz r4, LPPACA_PMCINUSE(r3)
+ cmpwi r4, 0
+ beq 23f /* skip if not */
+ lwz r3, HSTATE_PMC(r13)
+ lwz r4, HSTATE_PMC + 4(r13)
+ lwz r5, HSTATE_PMC + 8(r13)
+ lwz r6, HSTATE_PMC + 12(r13)
+ lwz r8, HSTATE_PMC + 16(r13)
+ lwz r9, HSTATE_PMC + 20(r13)
+BEGIN_FTR_SECTION
+ lwz r10, HSTATE_PMC + 24(r13)
+ lwz r11, HSTATE_PMC + 28(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ mtspr SPRN_PMC1, r3
+ mtspr SPRN_PMC2, r4
+ mtspr SPRN_PMC3, r5
+ mtspr SPRN_PMC4, r6
+ mtspr SPRN_PMC5, r8
+ mtspr SPRN_PMC6, r9
+BEGIN_FTR_SECTION
+ mtspr SPRN_PMC7, r10
+ mtspr SPRN_PMC8, r11
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ ld r3, HSTATE_MMCR(r13)
+ ld r4, HSTATE_MMCR + 8(r13)
+ ld r5, HSTATE_MMCR + 16(r13)
+ mtspr SPRN_MMCR1, r4
+ mtspr SPRN_MMCRA, r5
+ mtspr SPRN_MMCR0, r3
+ isync
+23:
+
+ /*
+ * For external and machine check interrupts, we need
+ * to call the Linux handler to process the interrupt.
+ * We do that by jumping to absolute address 0x500 for
+ * external interrupts, or the machine_check_fwnmi label
+ * for machine checks (since firmware might have patched
+ * the vector area at 0x200). The [h]rfid at the end of the
+ * handler will return to the book3s_hv_interrupts.S code.
+ * For other interrupts we do the rfid to get back
+ * to the book3s_hv_interrupts.S code here.
+ */
+ ld r8, 112+PPC_LR_STKOFF(r1)
+ addi r1, r1, 112
+ ld r7, HSTATE_HOST_MSR(r13)
+
+ cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
+ cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
+BEGIN_FTR_SECTION
+ beq 11f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /* RFI into the highmem handler, or branch to interrupt handler */
+ mfmsr r6
+ li r0, MSR_RI
+ andc r6, r6, r0
+ mtmsrd r6, 1 /* Clear RI in MSR */
+ mtsrr0 r8
+ mtsrr1 r7
+ beqa 0x500 /* external interrupt (PPC970) */
+ beq cr1, 13f /* machine check */
+ RFI
+
+ /* On POWER7, we have external interrupts set to use HSRR0/1 */
+11: mtspr SPRN_HSRR0, r8
+ mtspr SPRN_HSRR1, r7
+ ba 0x500
+
+13: b machine_check_fwnmi
+
/*
* We come in here when wakened from nap mode on a secondary hw thread.
@@ -137,7 +208,7 @@ kvm_start_guest:
cmpdi r4,0
/* if we have no vcpu to run, go back to sleep */
beq kvm_no_guest
- b kvmppc_hv_entry
+ b 30f
27: /* XXX should handle hypervisor maintenance interrupts etc. here */
b kvm_no_guest
@@ -147,6 +218,57 @@ kvm_start_guest:
stw r8,HSTATE_SAVED_XIRR(r13)
b kvm_no_guest
+30: bl kvmppc_hv_entry
+
+ /* Back from the guest, go back to nap */
+ /* Clear our vcpu pointer so we don't come back in early */
+ li r0, 0
+ std r0, HSTATE_KVM_VCPU(r13)
+ lwsync
+ /* Clear any pending IPI - we're an offline thread */
+ ld r5, HSTATE_XICS_PHYS(r13)
+ li r7, XICS_XIRR
+ lwzcix r3, r5, r7 /* ack any pending interrupt */
+ rlwinm. r0, r3, 0, 0xffffff /* any pending? */
+ beq 37f
+ sync
+ li r0, 0xff
+ li r6, XICS_MFRR
+ stbcix r0, r5, r6 /* clear the IPI */
+ stwcix r3, r5, r7 /* EOI it */
+37: sync
+
+ /* increment the nap count and then go to nap mode */
+ ld r4, HSTATE_KVM_VCORE(r13)
+ addi r4, r4, VCORE_NAP_COUNT
+ lwsync /* make previous updates visible */
+51: lwarx r3, 0, r4
+ addi r3, r3, 1
+ stwcx. r3, 0, r4
+ bne 51b
+
+kvm_no_guest:
+ li r0, KVM_HWTHREAD_IN_NAP
+ stb r0, HSTATE_HWTHREAD_STATE(r13)
+ li r3, LPCR_PECE0
+ mfspr r4, SPRN_LPCR
+ rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
+ mtspr SPRN_LPCR, r4
+ isync
+ std r0, HSTATE_SCRATCH0(r13)
+ ptesync
+ ld r0, HSTATE_SCRATCH0(r13)
+1: cmpd r0, r0
+ bne 1b
+ nap
+ b .
+
+/******************************************************************************
+ * *
+ * Entry code *
+ * *
+ *****************************************************************************/
+
.global kvmppc_hv_entry
kvmppc_hv_entry:
@@ -159,7 +281,8 @@ kvmppc_hv_entry:
* all other volatile GPRS = free
*/
mflr r0
- std r0, HSTATE_VMHANDLER(r13)
+ std r0, PPC_LR_STKOFF(r1)
+ stdu r1, -112(r1)
/* Set partition DABR */
/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
@@ -200,8 +323,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
ld r3, VCPU_MMCR(r4)
ld r5, VCPU_MMCR + 8(r4)
ld r6, VCPU_MMCR + 16(r4)
+ ld r7, VCPU_SIAR(r4)
+ ld r8, VCPU_SDAR(r4)
mtspr SPRN_MMCR1, r5
mtspr SPRN_MMCRA, r6
+ mtspr SPRN_SIAR, r7
+ mtspr SPRN_SDAR, r8
mtspr SPRN_MMCR0, r3
isync
@@ -254,22 +381,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Save R1 in the PACA */
std r1, HSTATE_HOST_R1(r13)
- /* Increment yield count if they have a VPA */
- ld r3, VCPU_VPA(r4)
- cmpdi r3, 0
- beq 25f
- lwz r5, LPPACA_YIELDCOUNT(r3)
- addi r5, r5, 1
- stw r5, LPPACA_YIELDCOUNT(r3)
- li r6, 1
- stb r6, VCPU_VPA_DIRTY(r4)
-25:
/* Load up DAR and DSISR */
ld r5, VCPU_DAR(r4)
lwz r6, VCPU_DSISR(r4)
mtspr SPRN_DAR, r5
mtspr SPRN_DSISR, r6
+ li r6, KVM_GUEST_MODE_HOST_HV
+ stb r6, HSTATE_IN_GUEST(r13)
+
BEGIN_FTR_SECTION
/* Restore AMR and UAMOR, set AMOR to all 1s */
ld r5,VCPU_AMR(r4)
@@ -343,7 +463,28 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
bdnz 28b
ptesync
-22: li r0,1
+ /* Add timebase offset onto timebase */
+22: ld r8,VCORE_TB_OFFSET(r5)
+ cmpdi r8,0
+ beq 37f
+ mftb r6 /* current host timebase */
+ add r8,r8,r6
+ mtspr SPRN_TBU40,r8 /* update upper 40 bits */
+ mftb r7 /* check if lower 24 bits overflowed */
+ clrldi r6,r6,40
+ clrldi r7,r7,40
+ cmpld r7,r6
+ bge 37f
+ addis r8,r8,0x100 /* if so, increment upper 40 bits */
+ mtspr SPRN_TBU40,r8
+
+ /* Load guest PCR value to select appropriate compat mode */
+37: ld r7, VCORE_PCR(r5)
+ cmpdi r7, 0
+ beq 38f
+ mtspr SPRN_PCR, r7
+38:
+ li r0,1
stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
b 10f
@@ -353,12 +494,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
beq 20b
/* Set LPCR and RMOR. */
-10: ld r8,KVM_LPCR(r9)
+10: ld r8,VCORE_LPCR(r5)
mtspr SPRN_LPCR,r8
ld r8,KVM_RMOR(r9)
mtspr SPRN_RMOR,r8
isync
+ /* Increment yield count if they have a VPA */
+ ld r3, VCPU_VPA(r4)
+ cmpdi r3, 0
+ beq 25f
+ lwz r5, LPPACA_YIELDCOUNT(r3)
+ addi r5, r5, 1
+ stw r5, LPPACA_YIELDCOUNT(r3)
+ li r6, 1
+ stb r6, VCPU_VPA_DIRTY(r4)
+25:
/* Check if HDEC expires soon */
mfspr r3,SPRN_HDEC
cmpwi r3,10
@@ -405,7 +556,8 @@ toc_tlbie_lock:
bne 24b
isync
- ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */
+ ld r5,HSTATE_KVM_VCORE(r13)
+ ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */
li r0,0x18f
rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
or r0,r7,r0
@@ -541,7 +693,7 @@ fast_guest_return:
mtspr SPRN_HSRR1,r11
/* Activate guest mode, so faults get handled by KVM */
- li r9, KVM_GUEST_MODE_GUEST
+ li r9, KVM_GUEST_MODE_GUEST_HV
stb r9, HSTATE_IN_GUEST(r13)
/* Enter guest */
@@ -550,13 +702,15 @@ BEGIN_FTR_SECTION
ld r5, VCPU_CFAR(r4)
mtspr SPRN_CFAR, r5
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+BEGIN_FTR_SECTION
+ ld r0, VCPU_PPR(r4)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
ld r5, VCPU_LR(r4)
lwz r6, VCPU_CR(r4)
mtlr r5
mtcr r6
- ld r0, VCPU_GPR(R0)(r4)
ld r1, VCPU_GPR(R1)(r4)
ld r2, VCPU_GPR(R2)(r4)
ld r3, VCPU_GPR(R3)(r4)
@@ -570,6 +724,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
ld r12, VCPU_GPR(R12)(r4)
ld r13, VCPU_GPR(R13)(r4)
+BEGIN_FTR_SECTION
+ mtspr SPRN_PPR, r0
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ ld r0, VCPU_GPR(R0)(r4)
ld r4, VCPU_GPR(R4)(r4)
hrfid
@@ -584,8 +742,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
/*
* We come here from the first-level interrupt handlers.
*/
- .globl kvmppc_interrupt
-kvmppc_interrupt:
+ .globl kvmppc_interrupt_hv
+kvmppc_interrupt_hv:
/*
* Register contents:
* R12 = interrupt vector
@@ -595,6 +753,19 @@ kvmppc_interrupt:
*/
/* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
std r9, HSTATE_HOST_R2(r13)
+
+ lbz r9, HSTATE_IN_GUEST(r13)
+ cmpwi r9, KVM_GUEST_MODE_HOST_HV
+ beq kvmppc_bad_host_intr
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ cmpwi r9, KVM_GUEST_MODE_GUEST
+ ld r9, HSTATE_HOST_R2(r13)
+ beq kvmppc_interrupt_pr
+#endif
+ /* We're now back in the host but in guest MMU context */
+ li r9, KVM_GUEST_MODE_HOST_HV
+ stb r9, HSTATE_IN_GUEST(r13)
+
ld r9, HSTATE_KVM_VCPU(r13)
/* Save registers */
@@ -620,6 +791,10 @@ BEGIN_FTR_SECTION
ld r3, HSTATE_CFAR(r13)
std r3, VCPU_CFAR(r9)
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+BEGIN_FTR_SECTION
+ ld r4, HSTATE_PPR(r13)
+ std r4, VCPU_PPR(r9)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* Restore R1/R2 so we can handle faults */
ld r1, HSTATE_HOST_R1(r13)
@@ -642,10 +817,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
std r3, VCPU_GPR(R13)(r9)
std r4, VCPU_LR(r9)
- /* Unset guest mode */
- li r0, KVM_GUEST_MODE_NONE
- stb r0, HSTATE_IN_GUEST(r13)
-
stw r12,VCPU_TRAP(r9)
/* Save HEIR (HV emulation assist reg) in last_inst
@@ -696,46 +867,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
* set, we know the host wants us out so let's do it now
*/
do_ext_interrupt:
- lbz r0, HSTATE_HOST_IPI(r13)
- cmpwi r0, 0
- bne ext_interrupt_to_host
-
- /* Now read the interrupt from the ICP */
- ld r5, HSTATE_XICS_PHYS(r13)
- li r7, XICS_XIRR
- cmpdi r5, 0
- beq- ext_interrupt_to_host
- lwzcix r3, r5, r7
- rlwinm. r0, r3, 0, 0xffffff
- sync
- beq 3f /* if nothing pending in the ICP */
-
- /* We found something in the ICP...
- *
- * If it's not an IPI, stash it in the PACA and return to
- * the host, we don't (yet) handle directing real external
- * interrupts directly to the guest
- */
- cmpwi r0, XICS_IPI
- bne ext_stash_for_host
-
- /* It's an IPI, clear the MFRR and EOI it */
- li r0, 0xff
- li r6, XICS_MFRR
- stbcix r0, r5, r6 /* clear the IPI */
- stwcix r3, r5, r7 /* EOI it */
- sync
-
- /* We need to re-check host IPI now in case it got set in the
- * meantime. If it's clear, we bounce the interrupt to the
- * guest
- */
- lbz r0, HSTATE_HOST_IPI(r13)
- cmpwi r0, 0
- bne- 1f
+ bl kvmppc_read_intr
+ cmpdi r3, 0
+ bgt ext_interrupt_to_host
/* Allright, looks like an IPI for the guest, we need to set MER */
-3:
/* Check if any CPU is heading out to the host, if so head out too */
ld r5, HSTATE_KVM_VCORE(r13)
lwz r0, VCORE_ENTRY_EXIT(r5)
@@ -764,27 +900,9 @@ do_ext_interrupt:
mtspr SPRN_LPCR, r8
b fast_guest_return
- /* We raced with the host, we need to resend that IPI, bummer */
-1: li r0, IPI_PRIORITY
- stbcix r0, r5, r6 /* set the IPI */
- sync
- b ext_interrupt_to_host
-
-ext_stash_for_host:
- /* It's not an IPI and it's for the host, stash it in the PACA
- * before exit, it will be picked up by the host ICP driver
- */
- stw r3, HSTATE_SAVED_XIRR(r13)
ext_interrupt_to_host:
guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
- /* Save DEC */
- mfspr r5,SPRN_DEC
- mftb r6
- extsw r5,r5
- add r5,r5,r6
- std r5,VCPU_DEC_EXPIRES(r9)
-
/* Save more register state */
mfdar r6
mfdsisr r7
@@ -954,7 +1072,30 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
mtspr SPRN_SDR1,r6 /* switch to partition page table */
mtspr SPRN_LPID,r7
isync
- li r0,0
+
+ /* Subtract timebase offset from timebase */
+ ld r8,VCORE_TB_OFFSET(r5)
+ cmpdi r8,0
+ beq 17f
+ mftb r6 /* current host timebase */
+ subf r8,r8,r6
+ mtspr SPRN_TBU40,r8 /* update upper 40 bits */
+ mftb r7 /* check if lower 24 bits overflowed */
+ clrldi r6,r6,40
+ clrldi r7,r7,40
+ cmpld r7,r6
+ bge 17f
+ addis r8,r8,0x100 /* if so, increment upper 40 bits */
+ mtspr SPRN_TBU40,r8
+
+ /* Reset PCR */
+17: ld r0, VCORE_PCR(r5)
+ cmpdi r0, 0
+ beq 18f
+ li r0, 0
+ mtspr SPRN_PCR, r0
+18:
+ /* Signal secondary CPUs to continue */
stb r0,VCORE_IN_GUEST(r5)
lis r8,0x7fff /* MAX_INT@h */
mtspr SPRN_HDEC,r8
@@ -1052,6 +1193,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1: addi r8,r8,16
.endr
+ /* Save DEC */
+ mfspr r5,SPRN_DEC
+ mftb r6
+ extsw r5,r5
+ add r5,r5,r6
+ std r5,VCPU_DEC_EXPIRES(r9)
+
/* Save and reset AMR and UAMOR before turning on the MMU */
BEGIN_FTR_SECTION
mfspr r5,SPRN_AMR
@@ -1062,6 +1210,10 @@ BEGIN_FTR_SECTION
mtspr SPRN_AMR,r6
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+ /* Unset guest mode */
+ li r0, KVM_GUEST_MODE_NONE
+ stb r0, HSTATE_IN_GUEST(r13)
+
/* Switch DSCR back to host value */
BEGIN_FTR_SECTION
mfspr r8, SPRN_DSCR
@@ -1134,9 +1286,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
b 22f
21: mfspr r5, SPRN_MMCR1
+ mfspr r7, SPRN_SIAR
+ mfspr r8, SPRN_SDAR
std r4, VCPU_MMCR(r9)
std r5, VCPU_MMCR + 8(r9)
std r6, VCPU_MMCR + 16(r9)
+ std r7, VCPU_SIAR(r9)
+ std r8, VCPU_SDAR(r9)
mfspr r3, SPRN_PMC1
mfspr r4, SPRN_PMC2
mfspr r5, SPRN_PMC3
@@ -1158,103 +1314,30 @@ BEGIN_FTR_SECTION
stw r11, VCPU_PMC + 28(r9)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
22:
+ ld r0, 112+PPC_LR_STKOFF(r1)
+ addi r1, r1, 112
+ mtlr r0
+ blr
+secondary_too_late:
+ ld r5,HSTATE_KVM_VCORE(r13)
+ HMT_LOW
+13: lbz r3,VCORE_IN_GUEST(r5)
+ cmpwi r3,0
+ bne 13b
+ HMT_MEDIUM
+ li r0, KVM_GUEST_MODE_NONE
+ stb r0, HSTATE_IN_GUEST(r13)
+ ld r11,PACA_SLBSHADOWPTR(r13)
- /* Secondary threads go off to take a nap on POWER7 */
-BEGIN_FTR_SECTION
- lwz r0,VCPU_PTID(r9)
- cmpwi r0,0
- bne secondary_nap
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-
- /* Restore host DABR and DABRX */
- ld r5,HSTATE_DABR(r13)
- li r6,7
- mtspr SPRN_DABR,r5
- mtspr SPRN_DABRX,r6
-
- /* Restore SPRG3 */
- ld r3,PACA_SPRG3(r13)
- mtspr SPRN_SPRG3,r3
-
- /*
- * Reload DEC. HDEC interrupts were disabled when
- * we reloaded the host's LPCR value.
- */
- ld r3, HSTATE_DECEXP(r13)
- mftb r4
- subf r4, r4, r3
- mtspr SPRN_DEC, r4
-
- /* Reload the host's PMU registers */
- ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
- lbz r4, LPPACA_PMCINUSE(r3)
- cmpwi r4, 0
- beq 23f /* skip if not */
- lwz r3, HSTATE_PMC(r13)
- lwz r4, HSTATE_PMC + 4(r13)
- lwz r5, HSTATE_PMC + 8(r13)
- lwz r6, HSTATE_PMC + 12(r13)
- lwz r8, HSTATE_PMC + 16(r13)
- lwz r9, HSTATE_PMC + 20(r13)
-BEGIN_FTR_SECTION
- lwz r10, HSTATE_PMC + 24(r13)
- lwz r11, HSTATE_PMC + 28(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
- mtspr SPRN_PMC1, r3
- mtspr SPRN_PMC2, r4
- mtspr SPRN_PMC3, r5
- mtspr SPRN_PMC4, r6
- mtspr SPRN_PMC5, r8
- mtspr SPRN_PMC6, r9
-BEGIN_FTR_SECTION
- mtspr SPRN_PMC7, r10
- mtspr SPRN_PMC8, r11
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
- ld r3, HSTATE_MMCR(r13)
- ld r4, HSTATE_MMCR + 8(r13)
- ld r5, HSTATE_MMCR + 16(r13)
- mtspr SPRN_MMCR1, r4
- mtspr SPRN_MMCRA, r5
- mtspr SPRN_MMCR0, r3
- isync
-23:
- /*
- * For external and machine check interrupts, we need
- * to call the Linux handler to process the interrupt.
- * We do that by jumping to absolute address 0x500 for
- * external interrupts, or the machine_check_fwnmi label
- * for machine checks (since firmware might have patched
- * the vector area at 0x200). The [h]rfid at the end of the
- * handler will return to the book3s_hv_interrupts.S code.
- * For other interrupts we do the rfid to get back
- * to the book3s_hv_interrupts.S code here.
- */
- ld r8, HSTATE_VMHANDLER(r13)
- ld r7, HSTATE_HOST_MSR(r13)
-
- cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
- cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
-BEGIN_FTR_SECTION
- beq 11f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-
- /* RFI into the highmem handler, or branch to interrupt handler */
- mfmsr r6
- li r0, MSR_RI
- andc r6, r6, r0
- mtmsrd r6, 1 /* Clear RI in MSR */
- mtsrr0 r8
- mtsrr1 r7
- beqa 0x500 /* external interrupt (PPC970) */
- beq cr1, 13f /* machine check */
- RFI
-
- /* On POWER7, we have external interrupts set to use HSRR0/1 */
-11: mtspr SPRN_HSRR0, r8
- mtspr SPRN_HSRR1, r7
- ba 0x500
-
-13: b machine_check_fwnmi
+ .rept SLB_NUM_BOLTED
+ ld r5,SLBSHADOW_SAVEAREA(r11)
+ ld r6,SLBSHADOW_SAVEAREA+8(r11)
+ andis. r7,r5,SLB_ESID_V@h
+ beq 1f
+ slbmte r6,r5
+1: addi r11,r11,16
+ .endr
+ b 22b
/*
* Check whether an HDSI is an HPTE not found fault or something else.
@@ -1333,7 +1416,7 @@ fast_interrupt_c_return:
stw r8, VCPU_LAST_INST(r9)
/* Unset guest mode. */
- li r0, KVM_GUEST_MODE_NONE
+ li r0, KVM_GUEST_MODE_HOST_HV
stb r0, HSTATE_IN_GUEST(r13)
b guest_exit_cont
@@ -1701,67 +1784,70 @@ machine_check_realmode:
rotldi r11, r11, 63
b fast_interrupt_c_return
-secondary_too_late:
- ld r5,HSTATE_KVM_VCORE(r13)
- HMT_LOW
-13: lbz r3,VCORE_IN_GUEST(r5)
- cmpwi r3,0
- bne 13b
- HMT_MEDIUM
- ld r11,PACA_SLBSHADOWPTR(r13)
-
- .rept SLB_NUM_BOLTED
- ld r5,SLBSHADOW_SAVEAREA(r11)
- ld r6,SLBSHADOW_SAVEAREA+8(r11)
- andis. r7,r5,SLB_ESID_V@h
- beq 1f
- slbmte r6,r5
-1: addi r11,r11,16
- .endr
+/*
+ * Determine what sort of external interrupt is pending (if any).
+ * Returns:
+ * 0 if no interrupt is pending
+ * 1 if an interrupt is pending that needs to be handled by the host
+ * -1 if there was a guest wakeup IPI (which has now been cleared)
+ */
+kvmppc_read_intr:
+ /* see if a host IPI is pending */
+ li r3, 1
+ lbz r0, HSTATE_HOST_IPI(r13)
+ cmpwi r0, 0
+ bne 1f
-secondary_nap:
- /* Clear our vcpu pointer so we don't come back in early */
- li r0, 0
- std r0, HSTATE_KVM_VCPU(r13)
- lwsync
- /* Clear any pending IPI - assume we're a secondary thread */
- ld r5, HSTATE_XICS_PHYS(r13)
+ /* Now read the interrupt from the ICP */
+ ld r6, HSTATE_XICS_PHYS(r13)
li r7, XICS_XIRR
- lwzcix r3, r5, r7 /* ack any pending interrupt */
- rlwinm. r0, r3, 0, 0xffffff /* any pending? */
- beq 37f
+ cmpdi r6, 0
+ beq- 1f
+ lwzcix r0, r6, r7
+ rlwinm. r3, r0, 0, 0xffffff
sync
- li r0, 0xff
- li r6, XICS_MFRR
- stbcix r0, r5, r6 /* clear the IPI */
- stwcix r3, r5, r7 /* EOI it */
-37: sync
+ beq 1f /* if nothing pending in the ICP */
- /* increment the nap count and then go to nap mode */
- ld r4, HSTATE_KVM_VCORE(r13)
- addi r4, r4, VCORE_NAP_COUNT
- lwsync /* make previous updates visible */
-51: lwarx r3, 0, r4
- addi r3, r3, 1
- stwcx. r3, 0, r4
- bne 51b
+ /* We found something in the ICP...
+ *
+ * If it's not an IPI, stash it in the PACA and return to
+ * the host, we don't (yet) handle directing real external
+ * interrupts directly to the guest
+ */
+ cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
+ li r3, 1
+ bne 42f
-kvm_no_guest:
- li r0, KVM_HWTHREAD_IN_NAP
- stb r0, HSTATE_HWTHREAD_STATE(r13)
+ /* It's an IPI, clear the MFRR and EOI it */
+ li r3, 0xff
+ li r8, XICS_MFRR
+ stbcix r3, r6, r8 /* clear the IPI */
+ stwcix r0, r6, r7 /* EOI it */
+ sync
- li r3, LPCR_PECE0
- mfspr r4, SPRN_LPCR
- rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
- mtspr SPRN_LPCR, r4
- isync
- std r0, HSTATE_SCRATCH0(r13)
- ptesync
- ld r0, HSTATE_SCRATCH0(r13)
-1: cmpd r0, r0
- bne 1b
- nap
- b .
+ /* We need to re-check host IPI now in case it got set in the
+ * meantime. If it's clear, we bounce the interrupt to the
+ * guest
+ */
+ lbz r0, HSTATE_HOST_IPI(r13)
+ cmpwi r0, 0
+ bne- 43f
+
+ /* OK, it's an IPI for us */
+ li r3, -1
+1: blr
+
+42: /* It's not an IPI and it's for the host, stash it in the PACA
+ * before exit, it will be picked up by the host ICP driver
+ */
+ stw r0, HSTATE_SAVED_XIRR(r13)
+ b 1b
+
+43: /* We raced with the host, we need to resend that IPI, bummer */
+ li r0, IPI_PRIORITY
+ stbcix r0, r6, r8 /* set the IPI */
+ sync
+ b 1b
/*
* Save away FP, VMX and VSX registers.
@@ -1879,3 +1965,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
lwz r7,VCPU_VRSAVE(r4)
mtspr SPRN_VRSAVE,r7
blr
+
+/*
+ * We come here if we get any exception or interrupt while we are
+ * executing host real mode code while in guest MMU context.
+ * For now just spin, but we should do something better.
+ */
+kvmppc_bad_host_intr:
+ b .
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 17cfae5497a3..f4dd041c14ea 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -26,8 +26,12 @@
#if defined(CONFIG_PPC_BOOK3S_64)
#define FUNC(name) GLUE(.,name)
+#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
+
#elif defined(CONFIG_PPC_BOOK3S_32)
#define FUNC(name) name
+#define GET_SHADOW_VCPU(reg) lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2)
+
#endif /* CONFIG_PPC_BOOK3S_XX */
#define VCPU_LOAD_NVGPRS(vcpu) \
@@ -87,8 +91,14 @@ kvm_start_entry:
VCPU_LOAD_NVGPRS(r4)
kvm_start_lightweight:
+ /* Copy registers into shadow vcpu so we can access them in real mode */
+ GET_SHADOW_VCPU(r3)
+ bl FUNC(kvmppc_copy_to_svcpu)
+ nop
+ REST_GPR(4, r1)
#ifdef CONFIG_PPC_BOOK3S_64
+ /* Get the dcbz32 flag */
PPC_LL r3, VCPU_HFLAGS(r4)
rldicl r3, r3, 0, 63 /* r3 &= 1 */
stb r3, HSTATE_RESTORE_HID5(r13)
@@ -111,9 +121,6 @@ kvm_start_lightweight:
*
*/
-.global kvmppc_handler_highmem
-kvmppc_handler_highmem:
-
/*
* Register usage at this point:
*
@@ -125,18 +132,31 @@ kvmppc_handler_highmem:
*
*/
- /* R7 = vcpu */
- PPC_LL r7, GPR4(r1)
+ /* Transfer reg values from shadow vcpu back to vcpu struct */
+ /* On 64-bit, interrupts are still off at this point */
+ PPC_LL r3, GPR4(r1) /* vcpu pointer */
+ GET_SHADOW_VCPU(r4)
+ bl FUNC(kvmppc_copy_from_svcpu)
+ nop
#ifdef CONFIG_PPC_BOOK3S_64
+ /* Re-enable interrupts */
+ ld r3, HSTATE_HOST_MSR(r13)
+ ori r3, r3, MSR_EE
+ MTMSR_EERI(r3)
+
/*
* Reload kernel SPRG3 value.
* No need to save guest value as usermode can't modify SPRG3.
*/
ld r3, PACA_SPRG3(r13)
mtspr SPRN_SPRG3, r3
+
#endif /* CONFIG_PPC_BOOK3S_64 */
+ /* R7 = vcpu */
+ PPC_LL r7, GPR4(r1)
+
PPC_STL r14, VCPU_GPR(R14)(r7)
PPC_STL r15, VCPU_GPR(R15)(r7)
PPC_STL r16, VCPU_GPR(R16)(r7)
@@ -161,7 +181,7 @@ kvmppc_handler_highmem:
/* Restore r3 (kvm_run) and r4 (vcpu) */
REST_2GPRS(3, r1)
- bl FUNC(kvmppc_handle_exit)
+ bl FUNC(kvmppc_handle_exit_pr)
/* If RESUME_GUEST, get back in the loop */
cmpwi r3, RESUME_GUEST
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index da8b13c4b776..5a1ab1250a05 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -28,7 +28,7 @@
#include <asm/mmu_context.h>
#include <asm/hw_irq.h>
-#include "trace.h"
+#include "trace_pr.h"
#define PTE_SIZE 12
@@ -56,6 +56,14 @@ static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
HPTEG_HASH_BITS_VPTE_LONG);
}
+#ifdef CONFIG_PPC_BOOK3S_64
+static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage)
+{
+ return hash_64((vpage & 0xffffffff0ULL) >> 4,
+ HPTEG_HASH_BITS_VPTE_64K);
+}
+#endif
+
void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
u64 index;
@@ -83,6 +91,15 @@ void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
hlist_add_head_rcu(&pte->list_vpte_long,
&vcpu3s->hpte_hash_vpte_long[index]);
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* Add to vPTE_64k list */
+ index = kvmppc_mmu_hash_vpte_64k(pte->pte.vpage);
+ hlist_add_head_rcu(&pte->list_vpte_64k,
+ &vcpu3s->hpte_hash_vpte_64k[index]);
+#endif
+
+ vcpu3s->hpte_cache_count++;
+
spin_unlock(&vcpu3s->mmu_lock);
}
@@ -113,10 +130,13 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
hlist_del_init_rcu(&pte->list_pte_long);
hlist_del_init_rcu(&pte->list_vpte);
hlist_del_init_rcu(&pte->list_vpte_long);
+#ifdef CONFIG_PPC_BOOK3S_64
+ hlist_del_init_rcu(&pte->list_vpte_64k);
+#endif
+ vcpu3s->hpte_cache_count--;
spin_unlock(&vcpu3s->mmu_lock);
- vcpu3s->hpte_cache_count--;
call_rcu(&pte->rcu_head, free_pte_rcu);
}
@@ -219,6 +239,29 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
rcu_read_unlock();
}
+#ifdef CONFIG_PPC_BOOK3S_64
+/* Flush with mask 0xffffffff0 */
+static void kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu *vcpu, u64 guest_vp)
+{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+ struct hlist_head *list;
+ struct hpte_cache *pte;
+ u64 vp_mask = 0xffffffff0ULL;
+
+ list = &vcpu3s->hpte_hash_vpte_64k[
+ kvmppc_mmu_hash_vpte_64k(guest_vp)];
+
+ rcu_read_lock();
+
+ /* Check the list for matching entries and invalidate */
+ hlist_for_each_entry_rcu(pte, list, list_vpte_64k)
+ if ((pte->pte.vpage & vp_mask) == guest_vp)
+ invalidate_pte(vcpu, pte);
+
+ rcu_read_unlock();
+}
+#endif
+
/* Flush with mask 0xffffff000 */
static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
{
@@ -249,6 +292,11 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
case 0xfffffffffULL:
kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
break;
+#ifdef CONFIG_PPC_BOOK3S_64
+ case 0xffffffff0ULL:
+ kvmppc_mmu_pte_vflush_64k(vcpu, guest_vp);
+ break;
+#endif
case 0xffffff000ULL:
kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
break;
@@ -285,15 +333,19 @@ struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hpte_cache *pte;
- pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
- vcpu3s->hpte_cache_count++;
-
if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
kvmppc_mmu_pte_flush_all(vcpu);
+ pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
+
return pte;
}
+void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte)
+{
+ kmem_cache_free(hpte_cache, pte);
+}
+
void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
{
kvmppc_mmu_pte_flush(vcpu, 0, 0);
@@ -320,6 +372,10 @@ int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
+#ifdef CONFIG_PPC_BOOK3S_64
+ kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_64k,
+ ARRAY_SIZE(vcpu3s->hpte_hash_vpte_64k));
+#endif
spin_lock_init(&vcpu3s->mmu_lock);
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 27db1e665959..fe14ca3dd171 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -40,8 +40,12 @@
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
+#include <linux/module.h>
-#include "trace.h"
+#include "book3s.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace_pr.h"
/* #define EXIT_DEBUG */
/* #define DEBUG_EXT */
@@ -56,29 +60,25 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
#define HW_PAGE_SIZE PAGE_SIZE
#endif
-void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
{
#ifdef CONFIG_PPC_BOOK3S_64
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
- memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
- sizeof(get_paca()->shadow_vcpu));
svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
svcpu_put(svcpu);
#endif
vcpu->cpu = smp_processor_id();
#ifdef CONFIG_PPC_BOOK3S_32
- current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
+ current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
#endif
}
-void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_PPC_BOOK3S_64
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
- memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
- sizeof(get_paca()->shadow_vcpu));
to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
svcpu_put(svcpu);
#endif
@@ -87,7 +87,61 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->cpu = -1;
}
-int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
+/* Copy data needed by real-mode code from vcpu to shadow vcpu */
+void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
+ struct kvm_vcpu *vcpu)
+{
+ svcpu->gpr[0] = vcpu->arch.gpr[0];
+ svcpu->gpr[1] = vcpu->arch.gpr[1];
+ svcpu->gpr[2] = vcpu->arch.gpr[2];
+ svcpu->gpr[3] = vcpu->arch.gpr[3];
+ svcpu->gpr[4] = vcpu->arch.gpr[4];
+ svcpu->gpr[5] = vcpu->arch.gpr[5];
+ svcpu->gpr[6] = vcpu->arch.gpr[6];
+ svcpu->gpr[7] = vcpu->arch.gpr[7];
+ svcpu->gpr[8] = vcpu->arch.gpr[8];
+ svcpu->gpr[9] = vcpu->arch.gpr[9];
+ svcpu->gpr[10] = vcpu->arch.gpr[10];
+ svcpu->gpr[11] = vcpu->arch.gpr[11];
+ svcpu->gpr[12] = vcpu->arch.gpr[12];
+ svcpu->gpr[13] = vcpu->arch.gpr[13];
+ svcpu->cr = vcpu->arch.cr;
+ svcpu->xer = vcpu->arch.xer;
+ svcpu->ctr = vcpu->arch.ctr;
+ svcpu->lr = vcpu->arch.lr;
+ svcpu->pc = vcpu->arch.pc;
+}
+
+/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
+void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
+ struct kvmppc_book3s_shadow_vcpu *svcpu)
+{
+ vcpu->arch.gpr[0] = svcpu->gpr[0];
+ vcpu->arch.gpr[1] = svcpu->gpr[1];
+ vcpu->arch.gpr[2] = svcpu->gpr[2];
+ vcpu->arch.gpr[3] = svcpu->gpr[3];
+ vcpu->arch.gpr[4] = svcpu->gpr[4];
+ vcpu->arch.gpr[5] = svcpu->gpr[5];
+ vcpu->arch.gpr[6] = svcpu->gpr[6];
+ vcpu->arch.gpr[7] = svcpu->gpr[7];
+ vcpu->arch.gpr[8] = svcpu->gpr[8];
+ vcpu->arch.gpr[9] = svcpu->gpr[9];
+ vcpu->arch.gpr[10] = svcpu->gpr[10];
+ vcpu->arch.gpr[11] = svcpu->gpr[11];
+ vcpu->arch.gpr[12] = svcpu->gpr[12];
+ vcpu->arch.gpr[13] = svcpu->gpr[13];
+ vcpu->arch.cr = svcpu->cr;
+ vcpu->arch.xer = svcpu->xer;
+ vcpu->arch.ctr = svcpu->ctr;
+ vcpu->arch.lr = svcpu->lr;
+ vcpu->arch.pc = svcpu->pc;
+ vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
+ vcpu->arch.fault_dar = svcpu->fault_dar;
+ vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
+ vcpu->arch.last_inst = svcpu->last_inst;
+}
+
+static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
{
int r = 1; /* Indicate we want to get back into the guest */
@@ -100,44 +154,69 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
}
/************* MMU Notifiers *************/
+static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
+ unsigned long end)
+{
+ long i;
+ struct kvm_vcpu *vcpu;
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+
+ slots = kvm_memslots(kvm);
+ kvm_for_each_memslot(memslot, slots) {
+ unsigned long hva_start, hva_end;
+ gfn_t gfn, gfn_end;
+
+ hva_start = max(start, memslot->userspace_addr);
+ hva_end = min(end, memslot->userspace_addr +
+ (memslot->npages << PAGE_SHIFT));
+ if (hva_start >= hva_end)
+ continue;
+ /*
+ * {gfn(page) | page intersects with [hva_start, hva_end)} =
+ * {gfn, gfn+1, ..., gfn_end-1}.
+ */
+ gfn = hva_to_gfn_memslot(hva_start, memslot);
+ gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
+ gfn_end << PAGE_SHIFT);
+ }
+}
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva)
{
trace_kvm_unmap_hva(hva);
- /*
- * Flush all shadow tlb entries everywhere. This is slow, but
- * we are 100% sure that we catch the to be unmapped page
- */
- kvm_flush_remote_tlbs(kvm);
+ do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
return 0;
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
+ unsigned long end)
{
- /* kvm_unmap_hva flushes everything anyways */
- kvm_unmap_hva(kvm, start);
+ do_kvm_unmap_hva(kvm, start, end);
return 0;
}
-int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+static int kvm_age_hva_pr(struct kvm *kvm, unsigned long hva)
{
/* XXX could be more clever ;) */
return 0;
}
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
{
/* XXX could be more clever ;) */
return 0;
}
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
{
/* The page will get remapped properly on its next fault */
- kvm_unmap_hva(kvm, hva);
+ do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
}
/*****************************************/
@@ -159,7 +238,7 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
vcpu->arch.shadow_msr = smsr;
}
-void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
+static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
{
ulong old_msr = vcpu->arch.shared->msr;
@@ -219,7 +298,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
}
-void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
+void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
{
u32 host_pvr;
@@ -256,6 +335,23 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
+ /*
+ * If they're asking for POWER6 or later, set the flag
+ * indicating that we can do multiple large page sizes
+ * and 1TB segments.
+ * Also set the flag that indicates that tlbie has the large
+ * page bit in the RB operand instead of the instruction.
+ */
+ switch (PVR_VER(pvr)) {
+ case PVR_POWER6:
+ case PVR_POWER7:
+ case PVR_POWER7p:
+ case PVR_POWER8:
+ vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
+ BOOK3S_HFLAG_NEW_TLBIE;
+ break;
+ }
+
#ifdef CONFIG_PPC_BOOK3S_32
/* 32 bit Book3S always has 32 byte dcbz */
vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
@@ -334,6 +430,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
ulong eaddr, int vec)
{
bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
+ bool iswrite = false;
int r = RESUME_GUEST;
int relocated;
int page_found = 0;
@@ -344,10 +441,12 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
u64 vsid;
relocated = data ? dr : ir;
+ if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
+ iswrite = true;
/* Resolve real address if translation turned on */
if (relocated) {
- page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
+ page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
} else {
pte.may_execute = true;
pte.may_read = true;
@@ -355,6 +454,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
pte.raddr = eaddr & KVM_PAM;
pte.eaddr = eaddr;
pte.vpage = eaddr >> 12;
+ pte.page_size = MMU_PAGE_64K;
}
switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
@@ -388,22 +488,18 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (page_found == -ENOENT) {
/* Page not found in guest PTE entries */
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
- vcpu->arch.shared->dsisr = svcpu->fault_dsisr;
+ vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr;
vcpu->arch.shared->msr |=
- (svcpu->shadow_srr1 & 0x00000000f8000000ULL);
- svcpu_put(svcpu);
+ vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EPERM) {
/* Storage protection */
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
- vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE;
+ vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
vcpu->arch.shared->msr |=
- svcpu->shadow_srr1 & 0x00000000f8000000ULL;
- svcpu_put(svcpu);
+ vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EINVAL) {
/* Page not found in guest SLB */
@@ -411,12 +507,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
} else if (!is_mmio &&
kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
+ if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
+ /*
+ * There is already a host HPTE there, presumably
+ * a read-only one for a page the guest thinks
+ * is writable, so get rid of it first.
+ */
+ kvmppc_mmu_unmap_page(vcpu, &pte);
+ }
/* The guest's PTE is not mapped yet. Map on the host */
- kvmppc_mmu_map_page(vcpu, &pte);
+ kvmppc_mmu_map_page(vcpu, &pte, iswrite);
if (data)
vcpu->stat.sp_storage++;
else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
- (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
+ (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
kvmppc_patch_dcbz(vcpu, &pte);
} else {
/* MMIO */
@@ -444,7 +548,7 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
#ifdef CONFIG_VSX
u64 *vcpu_vsx = vcpu->arch.vsr;
#endif
- u64 *thread_fpr = (u64*)t->fpr;
+ u64 *thread_fpr = &t->fp_state.fpr[0][0];
int i;
/*
@@ -466,14 +570,14 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
/*
* Note that on CPUs with VSX, giveup_fpu stores
* both the traditional FP registers and the added VSX
- * registers into thread.fpr[].
+ * registers into thread.fp_state.fpr[].
*/
if (current->thread.regs->msr & MSR_FP)
giveup_fpu(current);
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
- vcpu->arch.fpscr = t->fpscr.val;
+ vcpu->arch.fpscr = t->fp_state.fpscr;
#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX))
@@ -486,8 +590,8 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
if (msr & MSR_VEC) {
if (current->thread.regs->msr & MSR_VEC)
giveup_altivec(current);
- memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
- vcpu->arch.vscr = t->vscr;
+ memcpy(vcpu->arch.vr, t->vr_state.vr, sizeof(vcpu->arch.vr));
+ vcpu->arch.vscr = t->vr_state.vscr;
}
#endif
@@ -539,7 +643,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
#ifdef CONFIG_VSX
u64 *vcpu_vsx = vcpu->arch.vsr;
#endif
- u64 *thread_fpr = (u64*)t->fpr;
+ u64 *thread_fpr = &t->fp_state.fpr[0][0];
int i;
/* When we have paired singles, we emulate in software */
@@ -584,15 +688,15 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
#endif
- t->fpscr.val = vcpu->arch.fpscr;
+ t->fp_state.fpscr = vcpu->arch.fpscr;
t->fpexc_mode = 0;
kvmppc_load_up_fpu();
}
if (msr & MSR_VEC) {
#ifdef CONFIG_ALTIVEC
- memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
- t->vscr = vcpu->arch.vscr;
+ memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
+ t->vr_state.vscr = vcpu->arch.vscr;
t->vrsave = -1;
kvmppc_load_up_altivec();
#endif
@@ -619,13 +723,15 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
if (lost_ext & MSR_FP)
kvmppc_load_up_fpu();
+#ifdef CONFIG_ALTIVEC
if (lost_ext & MSR_VEC)
kvmppc_load_up_altivec();
+#endif
current->thread.regs->msr |= lost_ext;
}
-int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int exit_nr)
+int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int exit_nr)
{
int r = RESUME_HOST;
int s;
@@ -643,25 +749,32 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
switch (exit_nr) {
case BOOK3S_INTERRUPT_INST_STORAGE:
{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- ulong shadow_srr1 = svcpu->shadow_srr1;
+ ulong shadow_srr1 = vcpu->arch.shadow_srr1;
vcpu->stat.pf_instruc++;
#ifdef CONFIG_PPC_BOOK3S_32
/* We set segments as unused segments when invalidating them. So
* treat the respective fault as segment fault. */
- if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) {
- kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
- r = RESUME_GUEST;
+ {
+ struct kvmppc_book3s_shadow_vcpu *svcpu;
+ u32 sr;
+
+ svcpu = svcpu_get(vcpu);
+ sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
svcpu_put(svcpu);
- break;
+ if (sr == SR_INVALID) {
+ kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
+ r = RESUME_GUEST;
+ break;
+ }
}
#endif
- svcpu_put(svcpu);
/* only care about PTEG not found errors, but leave NX alone */
if (shadow_srr1 & 0x40000000) {
+ int idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu->stat.sp_instruc++;
} else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
(!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
@@ -682,25 +795,36 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_DATA_STORAGE:
{
ulong dar = kvmppc_get_fault_dar(vcpu);
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- u32 fault_dsisr = svcpu->fault_dsisr;
+ u32 fault_dsisr = vcpu->arch.fault_dsisr;
vcpu->stat.pf_storage++;
#ifdef CONFIG_PPC_BOOK3S_32
/* We set segments as unused segments when invalidating them. So
* treat the respective fault as segment fault. */
- if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) {
- kvmppc_mmu_map_segment(vcpu, dar);
- r = RESUME_GUEST;
+ {
+ struct kvmppc_book3s_shadow_vcpu *svcpu;
+ u32 sr;
+
+ svcpu = svcpu_get(vcpu);
+ sr = svcpu->sr[dar >> SID_SHIFT];
svcpu_put(svcpu);
- break;
+ if (sr == SR_INVALID) {
+ kvmppc_mmu_map_segment(vcpu, dar);
+ r = RESUME_GUEST;
+ break;
+ }
}
#endif
- svcpu_put(svcpu);
- /* The only case we need to handle is missing shadow PTEs */
- if (fault_dsisr & DSISR_NOHPTE) {
+ /*
+ * We need to handle missing shadow PTEs, and
+ * protection faults due to us mapping a page read-only
+ * when the guest thinks it is writable.
+ */
+ if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
+ int idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
} else {
vcpu->arch.shared->dar = dar;
vcpu->arch.shared->dsisr = fault_dsisr;
@@ -743,13 +867,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
{
enum emulation_result er;
- struct kvmppc_book3s_shadow_vcpu *svcpu;
ulong flags;
program_interrupt:
- svcpu = svcpu_get(vcpu);
- flags = svcpu->shadow_srr1 & 0x1f0000ull;
- svcpu_put(svcpu);
+ flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
if (vcpu->arch.shared->msr & MSR_PR) {
#ifdef EXIT_DEBUG
@@ -798,7 +919,7 @@ program_interrupt:
ulong cmd = kvmppc_get_gpr(vcpu, 3);
int i;
-#ifdef CONFIG_KVM_BOOK3S_64_PR
+#ifdef CONFIG_PPC_BOOK3S_64
if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
r = RESUME_GUEST;
break;
@@ -881,9 +1002,7 @@ program_interrupt:
break;
default:
{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- ulong shadow_srr1 = svcpu->shadow_srr1;
- svcpu_put(svcpu);
+ ulong shadow_srr1 = vcpu->arch.shadow_srr1;
/* Ugh - bork here! What did we get? */
printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
@@ -920,8 +1039,8 @@ program_interrupt:
return r;
}
-int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
+static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
int i;
@@ -947,13 +1066,13 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
return 0;
}
-int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
+static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
int i;
- kvmppc_set_pvr(vcpu, sregs->pvr);
+ kvmppc_set_pvr_pr(vcpu, sregs->pvr);
vcpu3s->sdr1 = sregs->u.s.sdr1;
if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
@@ -983,7 +1102,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return 0;
}
-int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
+static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
{
int r = 0;
@@ -1012,7 +1132,8 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
return r;
}
-int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
+static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
{
int r = 0;
@@ -1042,28 +1163,30 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
return r;
}
-int kvmppc_core_check_processor_compat(void)
-{
- return 0;
-}
-
-struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
+ unsigned int id)
{
struct kvmppc_vcpu_book3s *vcpu_book3s;
struct kvm_vcpu *vcpu;
int err = -ENOMEM;
unsigned long p;
- vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
- if (!vcpu_book3s)
+ vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+ if (!vcpu)
goto out;
- vcpu_book3s->shadow_vcpu =
- kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
- if (!vcpu_book3s->shadow_vcpu)
+ vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
+ if (!vcpu_book3s)
goto free_vcpu;
+ vcpu->arch.book3s = vcpu_book3s;
+
+#ifdef CONFIG_KVM_BOOK3S_32
+ vcpu->arch.shadow_vcpu =
+ kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
+ if (!vcpu->arch.shadow_vcpu)
+ goto free_vcpu3s;
+#endif
- vcpu = &vcpu_book3s->vcpu;
err = kvm_vcpu_init(vcpu, kvm, id);
if (err)
goto free_shadow_vcpu;
@@ -1076,13 +1199,19 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
#ifdef CONFIG_PPC_BOOK3S_64
- /* default to book3s_64 (970fx) */
+ /*
+ * Default to the same as the host if we're on sufficiently
+ * recent machine that we have 1TB segments;
+ * otherwise default to PPC970FX.
+ */
vcpu->arch.pvr = 0x3C0301;
+ if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
+ vcpu->arch.pvr = mfspr(SPRN_PVR);
#else
/* default to book3s_32 (750) */
vcpu->arch.pvr = 0x84202;
#endif
- kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
+ kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
vcpu->arch.slb_nr = 64;
vcpu->arch.shadow_msr = MSR_USER64;
@@ -1096,32 +1225,37 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
uninit_vcpu:
kvm_vcpu_uninit(vcpu);
free_shadow_vcpu:
- kfree(vcpu_book3s->shadow_vcpu);
-free_vcpu:
+#ifdef CONFIG_KVM_BOOK3S_32
+ kfree(vcpu->arch.shadow_vcpu);
+free_vcpu3s:
+#endif
vfree(vcpu_book3s);
+free_vcpu:
+ kmem_cache_free(kvm_vcpu_cache, vcpu);
out:
return ERR_PTR(err);
}
-void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
kvm_vcpu_uninit(vcpu);
- kfree(vcpu_book3s->shadow_vcpu);
+#ifdef CONFIG_KVM_BOOK3S_32
+ kfree(vcpu->arch.shadow_vcpu);
+#endif
vfree(vcpu_book3s);
+ kmem_cache_free(kvm_vcpu_cache, vcpu);
}
-int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
int ret;
- double fpr[32][TS_FPRWIDTH];
- unsigned int fpscr;
+ struct thread_fp_state fp;
int fpexc_mode;
#ifdef CONFIG_ALTIVEC
- vector128 vr[32];
- vector128 vscr;
+ struct thread_vr_state vr;
unsigned long uninitialized_var(vrsave);
int used_vr;
#endif
@@ -1153,8 +1287,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/* Save FPU state in stack */
if (current->thread.regs->msr & MSR_FP)
giveup_fpu(current);
- memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
- fpscr = current->thread.fpscr.val;
+ fp = current->thread.fp_state;
fpexc_mode = current->thread.fpexc_mode;
#ifdef CONFIG_ALTIVEC
@@ -1163,8 +1296,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
if (used_vr) {
if (current->thread.regs->msr & MSR_VEC)
giveup_altivec(current);
- memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
- vscr = current->thread.vscr;
+ vr = current->thread.vr_state;
vrsave = current->thread.vrsave;
}
#endif
@@ -1196,15 +1328,13 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
current->thread.regs->msr = ext_msr;
/* Restore FPU/VSX state from stack */
- memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
- current->thread.fpscr.val = fpscr;
+ current->thread.fp_state = fp;
current->thread.fpexc_mode = fpexc_mode;
#ifdef CONFIG_ALTIVEC
/* Restore Altivec state from stack */
if (used_vr && current->thread.used_vr) {
- memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
- current->thread.vscr = vscr;
+ current->thread.vr_state = vr;
current->thread.vrsave = vrsave;
}
current->thread.used_vr = used_vr;
@@ -1222,8 +1352,8 @@ out:
/*
* Get (and clear) the dirty memory log for a memory slot.
*/
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log)
+static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
+ struct kvm_dirty_log *log)
{
struct kvm_memory_slot *memslot;
struct kvm_vcpu *vcpu;
@@ -1258,67 +1388,100 @@ out:
return r;
}
-#ifdef CONFIG_PPC64
-int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
+static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
+ struct kvm_memory_slot *memslot)
{
- info->flags = KVM_PPC_1T_SEGMENTS;
-
- /* SLB is always 64 entries */
- info->slb_size = 64;
-
- /* Standard 4k base page size segment */
- info->sps[0].page_shift = 12;
- info->sps[0].slb_enc = 0;
- info->sps[0].enc[0].page_shift = 12;
- info->sps[0].enc[0].pte_enc = 0;
-
- /* Standard 16M large page size segment */
- info->sps[1].page_shift = 24;
- info->sps[1].slb_enc = SLB_VSID_L;
- info->sps[1].enc[0].page_shift = 24;
- info->sps[1].enc[0].pte_enc = 0;
+ return;
+}
+static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_userspace_memory_region *mem)
+{
return 0;
}
-#endif /* CONFIG_PPC64 */
-void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
+static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old)
{
+ return;
}
-int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
- unsigned long npages)
+static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont)
{
- return 0;
+ return;
}
-int kvmppc_core_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- struct kvm_userspace_memory_region *mem)
+static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
+ unsigned long npages)
{
return 0;
}
-void kvmppc_core_commit_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old)
+
+#ifdef CONFIG_PPC64
+static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
+ struct kvm_ppc_smmu_info *info)
{
-}
+ long int i;
+ struct kvm_vcpu *vcpu;
+
+ info->flags = 0;
+
+ /* SLB is always 64 entries */
+ info->slb_size = 64;
+
+ /* Standard 4k base page size segment */
+ info->sps[0].page_shift = 12;
+ info->sps[0].slb_enc = 0;
+ info->sps[0].enc[0].page_shift = 12;
+ info->sps[0].enc[0].pte_enc = 0;
+
+ /*
+ * 64k large page size.
+ * We only want to put this in if the CPUs we're emulating
+ * support it, but unfortunately we don't have a vcpu easily
+ * to hand here to test. Just pick the first vcpu, and if
+ * that doesn't exist yet, report the minimum capability,
+ * i.e., no 64k pages.
+ * 1T segment support goes along with 64k pages.
+ */
+ i = 1;
+ vcpu = kvm_get_vcpu(kvm, 0);
+ if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
+ info->flags = KVM_PPC_1T_SEGMENTS;
+ info->sps[i].page_shift = 16;
+ info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
+ info->sps[i].enc[0].page_shift = 16;
+ info->sps[i].enc[0].pte_enc = 1;
+ ++i;
+ }
+
+ /* Standard 16M large page size segment */
+ info->sps[i].page_shift = 24;
+ info->sps[i].slb_enc = SLB_VSID_L;
+ info->sps[i].enc[0].page_shift = 24;
+ info->sps[i].enc[0].pte_enc = 0;
-void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
+ return 0;
+}
+#else
+static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
+ struct kvm_ppc_smmu_info *info)
{
+ /* We should not get called */
+ BUG();
}
+#endif /* CONFIG_PPC64 */
static unsigned int kvm_global_user_count = 0;
static DEFINE_SPINLOCK(kvm_global_user_count_lock);
-int kvmppc_core_init_vm(struct kvm *kvm)
+static int kvmppc_core_init_vm_pr(struct kvm *kvm)
{
-#ifdef CONFIG_PPC64
- INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
- INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
-#endif
+ mutex_init(&kvm->arch.hpt_mutex);
if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
spin_lock(&kvm_global_user_count_lock);
@@ -1329,7 +1492,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
return 0;
}
-void kvmppc_core_destroy_vm(struct kvm *kvm)
+static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
{
#ifdef CONFIG_PPC64
WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
@@ -1344,26 +1507,81 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
}
}
-static int kvmppc_book3s_init(void)
+static int kvmppc_core_check_processor_compat_pr(void)
{
- int r;
+ /* we are always compatible */
+ return 0;
+}
- r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
- THIS_MODULE);
+static long kvm_arch_vm_ioctl_pr(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ return -ENOTTY;
+}
- if (r)
+static struct kvmppc_ops kvm_ops_pr = {
+ .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
+ .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
+ .get_one_reg = kvmppc_get_one_reg_pr,
+ .set_one_reg = kvmppc_set_one_reg_pr,
+ .vcpu_load = kvmppc_core_vcpu_load_pr,
+ .vcpu_put = kvmppc_core_vcpu_put_pr,
+ .set_msr = kvmppc_set_msr_pr,
+ .vcpu_run = kvmppc_vcpu_run_pr,
+ .vcpu_create = kvmppc_core_vcpu_create_pr,
+ .vcpu_free = kvmppc_core_vcpu_free_pr,
+ .check_requests = kvmppc_core_check_requests_pr,
+ .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
+ .flush_memslot = kvmppc_core_flush_memslot_pr,
+ .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
+ .commit_memory_region = kvmppc_core_commit_memory_region_pr,
+ .unmap_hva = kvm_unmap_hva_pr,
+ .unmap_hva_range = kvm_unmap_hva_range_pr,
+ .age_hva = kvm_age_hva_pr,
+ .test_age_hva = kvm_test_age_hva_pr,
+ .set_spte_hva = kvm_set_spte_hva_pr,
+ .mmu_destroy = kvmppc_mmu_destroy_pr,
+ .free_memslot = kvmppc_core_free_memslot_pr,
+ .create_memslot = kvmppc_core_create_memslot_pr,
+ .init_vm = kvmppc_core_init_vm_pr,
+ .destroy_vm = kvmppc_core_destroy_vm_pr,
+ .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
+ .emulate_op = kvmppc_core_emulate_op_pr,
+ .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
+ .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
+ .fast_vcpu_kick = kvm_vcpu_kick,
+ .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
+};
+
+
+int kvmppc_book3s_init_pr(void)
+{
+ int r;
+
+ r = kvmppc_core_check_processor_compat_pr();
+ if (r < 0)
return r;
- r = kvmppc_mmu_hpte_sysinit();
+ kvm_ops_pr.owner = THIS_MODULE;
+ kvmppc_pr_ops = &kvm_ops_pr;
+ r = kvmppc_mmu_hpte_sysinit();
return r;
}
-static void kvmppc_book3s_exit(void)
+void kvmppc_book3s_exit_pr(void)
{
+ kvmppc_pr_ops = NULL;
kvmppc_mmu_hpte_sysexit();
- kvm_exit();
}
-module_init(kvmppc_book3s_init);
-module_exit(kvmppc_book3s_exit);
+/*
+ * We only support separate modules for book3s 64
+ */
+#ifdef CONFIG_PPC_BOOK3S_64
+
+module_init(kvmppc_book3s_init_pr);
+module_exit(kvmppc_book3s_exit_pr);
+
+MODULE_LICENSE("GPL");
+#endif
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index da0e0bc268bd..5efa97b993d8 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -21,6 +21,8 @@
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
+#define HPTE_SIZE 16 /* bytes per HPT entry */
+
static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index)
{
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
@@ -40,32 +42,41 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
long pte_index = kvmppc_get_gpr(vcpu, 5);
unsigned long pteg[2 * 8];
unsigned long pteg_addr, i, *hpte;
+ long int ret;
+ i = pte_index & 7;
pte_index &= ~7UL;
pteg_addr = get_pteg_addr(vcpu, pte_index);
+ mutex_lock(&vcpu->kvm->arch.hpt_mutex);
copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
hpte = pteg;
+ ret = H_PTEG_FULL;
if (likely((flags & H_EXACT) == 0)) {
- pte_index &= ~7UL;
for (i = 0; ; ++i) {
if (i == 8)
- return H_PTEG_FULL;
+ goto done;
if ((*hpte & HPTE_V_VALID) == 0)
break;
hpte += 2;
}
} else {
- i = kvmppc_get_gpr(vcpu, 5) & 7UL;
hpte += i * 2;
+ if (*hpte & HPTE_V_VALID)
+ goto done;
}
hpte[0] = kvmppc_get_gpr(vcpu, 6);
hpte[1] = kvmppc_get_gpr(vcpu, 7);
- copy_to_user((void __user *)pteg_addr, pteg, sizeof(pteg));
- kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
+ pteg_addr += i * HPTE_SIZE;
+ copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE);
kvmppc_set_gpr(vcpu, 4, pte_index | i);
+ ret = H_SUCCESS;
+
+ done:
+ mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
+ kvmppc_set_gpr(vcpu, 3, ret);
return EMULATE_DONE;
}
@@ -77,26 +88,31 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
unsigned long v = 0, pteg, rb;
unsigned long pte[2];
+ long int ret;
pteg = get_pteg_addr(vcpu, pte_index);
+ mutex_lock(&vcpu->kvm->arch.hpt_mutex);
copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+ ret = H_NOT_FOUND;
if ((pte[0] & HPTE_V_VALID) == 0 ||
((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) ||
- ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) {
- kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
- return EMULATE_DONE;
- }
+ ((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
+ goto done;
copy_to_user((void __user *)pteg, &v, sizeof(v));
rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
- kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
+ ret = H_SUCCESS;
kvmppc_set_gpr(vcpu, 4, pte[0]);
kvmppc_set_gpr(vcpu, 5, pte[1]);
+ done:
+ mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
+ kvmppc_set_gpr(vcpu, 3, ret);
+
return EMULATE_DONE;
}
@@ -124,6 +140,7 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
int paramnr = 4;
int ret = H_SUCCESS;
+ mutex_lock(&vcpu->kvm->arch.hpt_mutex);
for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
@@ -172,6 +189,7 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
}
kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
}
+ mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
kvmppc_set_gpr(vcpu, 3, ret);
return EMULATE_DONE;
@@ -184,15 +202,16 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
unsigned long rb, pteg, r, v;
unsigned long pte[2];
+ long int ret;
pteg = get_pteg_addr(vcpu, pte_index);
+ mutex_lock(&vcpu->kvm->arch.hpt_mutex);
copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+ ret = H_NOT_FOUND;
if ((pte[0] & HPTE_V_VALID) == 0 ||
- ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) {
- kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
- return EMULATE_DONE;
- }
+ ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn))
+ goto done;
v = pte[0];
r = pte[1];
@@ -207,8 +226,11 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
rb = compute_tlbie_rb(v, r, pte_index);
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
copy_to_user((void __user *)pteg, pte, sizeof(pte));
+ ret = H_SUCCESS;
- kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
+ done:
+ mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
+ kvmppc_set_gpr(vcpu, 3, ret);
return EMULATE_DONE;
}
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 8f7633e3afb8..a38c4c9edab8 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -38,32 +38,6 @@
#define FUNC(name) GLUE(.,name)
- .globl kvmppc_skip_interrupt
-kvmppc_skip_interrupt:
- /*
- * Here all GPRs are unchanged from when the interrupt happened
- * except for r13, which is saved in SPRG_SCRATCH0.
- */
- mfspr r13, SPRN_SRR0
- addi r13, r13, 4
- mtspr SPRN_SRR0, r13
- GET_SCRATCH0(r13)
- rfid
- b .
-
- .globl kvmppc_skip_Hinterrupt
-kvmppc_skip_Hinterrupt:
- /*
- * Here all GPRs are unchanged from when the interrupt happened
- * except for r13, which is saved in SPRG_SCRATCH0.
- */
- mfspr r13, SPRN_HSRR0
- addi r13, r13, 4
- mtspr SPRN_HSRR0, r13
- GET_SCRATCH0(r13)
- hrfid
- b .
-
#elif defined(CONFIG_PPC_BOOK3S_32)
#define FUNC(name) name
@@ -179,11 +153,15 @@ _GLOBAL(kvmppc_entry_trampoline)
li r6, MSR_IR | MSR_DR
andc r6, r5, r6 /* Clear DR and IR in MSR value */
+#ifdef CONFIG_PPC_BOOK3S_32
/*
* Set EE in HOST_MSR so that it's enabled when we get into our
- * C exit handler function
+ * C exit handler function. On 64-bit we delay enabling
+ * interrupts until we have finished transferring stuff
+ * to or from the PACA.
*/
ori r5, r5, MSR_EE
+#endif
mtsrr0 r7
mtsrr1 r6
RFI
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index 3219ba895246..cf95cdef73c9 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -260,6 +260,7 @@ fail:
*/
return rc;
}
+EXPORT_SYMBOL_GPL(kvmppc_rtas_hcall);
void kvmppc_rtas_tokens_free(struct kvm *kvm)
{
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 1abe4788191a..bc50c97751d3 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -161,8 +161,8 @@ kvmppc_handler_trampoline_enter_end:
.global kvmppc_handler_trampoline_exit
kvmppc_handler_trampoline_exit:
-.global kvmppc_interrupt
-kvmppc_interrupt:
+.global kvmppc_interrupt_pr
+kvmppc_interrupt_pr:
/* Register usage at this point:
*
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index a3a5cb8ee7ea..02a17dcf1610 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -818,7 +818,7 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
}
/* Check for real mode returning too hard */
- if (xics->real_mode)
+ if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
return kvmppc_xics_rm_complete(vcpu, req);
switch (req) {
@@ -840,6 +840,7 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
return rc;
}
+EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
/* -- Initialisation code etc. -- */
@@ -1250,13 +1251,13 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
xics_debugfs_init(xics);
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
if (cpu_has_feature(CPU_FTR_ARCH_206)) {
/* Enable real mode support */
xics->real_mode = ENABLE_REALMODE;
xics->real_mode_dbg = DEBUG_REALMODE;
}
-#endif /* CONFIG_KVM_BOOK3S_64_HV */
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
return 0;
}
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 17722d82f1d1..53e65a210b9a 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -40,7 +40,9 @@
#include "timing.h"
#include "booke.h"
-#include "trace.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace_booke.h"
unsigned long kvmppc_booke_handlers;
@@ -133,6 +135,29 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
#endif
}
+static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
+{
+ /* Synchronize guest's desire to get debug interrupts into shadow MSR */
+#ifndef CONFIG_KVM_BOOKE_HV
+ vcpu->arch.shadow_msr &= ~MSR_DE;
+ vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
+#endif
+
+ /* Force enable debug interrupts when user space wants to debug */
+ if (vcpu->guest_debug) {
+#ifdef CONFIG_KVM_BOOKE_HV
+ /*
+ * Since there is no shadow MSR, sync MSR_DE into the guest
+ * visible MSR.
+ */
+ vcpu->arch.shared->msr |= MSR_DE;
+#else
+ vcpu->arch.shadow_msr |= MSR_DE;
+ vcpu->arch.shared->msr &= ~MSR_DE;
+#endif
+ }
+}
+
/*
* Helper function for "full" MSR writes. No need to call this if only
* EE/CE/ME/DE/RI are changing.
@@ -150,6 +175,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
kvmppc_mmu_msr_notify(vcpu, old_msr);
kvmppc_vcpu_sync_spe(vcpu);
kvmppc_vcpu_sync_fpu(vcpu);
+ kvmppc_vcpu_sync_debug(vcpu);
}
static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
@@ -655,10 +681,10 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
int ret, s;
+ struct thread_struct thread;
#ifdef CONFIG_PPC_FPU
- unsigned int fpscr;
+ struct thread_fp_state fp;
int fpexc_mode;
- u64 fpr[32];
#endif
if (!vcpu->arch.sane) {
@@ -677,13 +703,13 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
#ifdef CONFIG_PPC_FPU
/* Save userspace FPU state in stack */
enable_kernel_fp();
- memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
- fpscr = current->thread.fpscr.val;
+ fp = current->thread.fp_state;
fpexc_mode = current->thread.fpexc_mode;
/* Restore guest FPU state to thread */
- memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
- current->thread.fpscr.val = vcpu->arch.fpscr;
+ memcpy(current->thread.fp_state.fpr, vcpu->arch.fpr,
+ sizeof(vcpu->arch.fpr));
+ current->thread.fp_state.fpscr = vcpu->arch.fpscr;
/*
* Since we can't trap on MSR_FP in GS-mode, we consider the guest
@@ -696,6 +722,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_load_guest_fp(vcpu);
#endif
+ /* Switch to guest debug context */
+ thread.debug = vcpu->arch.shadow_dbg_reg;
+ switch_booke_debug_regs(&thread);
+ thread.debug = current->thread.debug;
+ current->thread.debug = vcpu->arch.shadow_dbg_reg;
+
kvmppc_fix_ee_before_entry();
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
@@ -703,18 +735,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/* No need for kvm_guest_exit. It's done in handle_exit.
We also get here with interrupts enabled. */
+ /* Switch back to user space debug context */
+ switch_booke_debug_regs(&thread);
+ current->thread.debug = thread.debug;
+
#ifdef CONFIG_PPC_FPU
kvmppc_save_guest_fp(vcpu);
vcpu->fpu_active = 0;
/* Save guest FPU state from thread */
- memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
- vcpu->arch.fpscr = current->thread.fpscr.val;
+ memcpy(vcpu->arch.fpr, current->thread.fp_state.fpr,
+ sizeof(vcpu->arch.fpr));
+ vcpu->arch.fpscr = current->thread.fp_state.fpscr;
/* Restore userspace FPU state from stack */
- memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
- current->thread.fpscr.val = fpscr;
+ current->thread.fp_state = fp;
current->thread.fpexc_mode = fpexc_mode;
#endif
@@ -758,6 +794,30 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
}
+static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct debug_reg *dbg_reg = &(vcpu->arch.shadow_dbg_reg);
+ u32 dbsr = vcpu->arch.dbsr;
+
+ run->debug.arch.status = 0;
+ run->debug.arch.address = vcpu->arch.pc;
+
+ if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
+ run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
+ } else {
+ if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
+ run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
+ else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
+ run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
+ if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
+ run->debug.arch.address = dbg_reg->dac1;
+ else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
+ run->debug.arch.address = dbg_reg->dac2;
+ }
+
+ return RESUME_HOST;
+}
+
static void kvmppc_fill_pt_regs(struct pt_regs *regs)
{
ulong r1, ip, msr, lr;
@@ -818,6 +878,11 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
case BOOKE_INTERRUPT_CRITICAL:
unknown_exception(&regs);
break;
+ case BOOKE_INTERRUPT_DEBUG:
+ /* Save DBSR before preemption is enabled */
+ vcpu->arch.dbsr = mfspr(SPRN_DBSR);
+ kvmppc_clear_dbsr();
+ break;
}
}
@@ -1135,18 +1200,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
case BOOKE_INTERRUPT_DEBUG: {
- u32 dbsr;
-
- vcpu->arch.pc = mfspr(SPRN_CSRR0);
-
- /* clear IAC events in DBSR register */
- dbsr = mfspr(SPRN_DBSR);
- dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
- mtspr(SPRN_DBSR, dbsr);
-
- run->exit_reason = KVM_EXIT_DEBUG;
+ r = kvmppc_handle_debug(run, vcpu);
+ if (r == RESUME_HOST)
+ run->exit_reason = KVM_EXIT_DEBUG;
kvmppc_account_exit(vcpu, DEBUG_EXITS);
- r = RESUME_HOST;
break;
}
@@ -1197,7 +1254,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
kvmppc_set_msr(vcpu, 0);
#ifndef CONFIG_KVM_BOOKE_HV
- vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
+ vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
vcpu->arch.shadow_pid = 1;
vcpu->arch.shared->msr = 0;
#endif
@@ -1359,7 +1416,7 @@ static int set_sregs_arch206(struct kvm_vcpu *vcpu,
return 0;
}
-void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
sregs->u.e.features |= KVM_SREGS_E_IVOR;
@@ -1379,6 +1436,7 @@ void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
+ return 0;
}
int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
@@ -1413,8 +1471,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
get_sregs_base(vcpu, sregs);
get_sregs_arch206(vcpu, sregs);
- kvmppc_core_get_sregs(vcpu, sregs);
- return 0;
+ return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
}
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
@@ -1433,7 +1490,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
if (ret < 0)
return ret;
- return kvmppc_core_set_sregs(vcpu, sregs);
+ return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
}
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
@@ -1441,7 +1498,6 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
int r = 0;
union kvmppc_one_reg val;
int size;
- long int i;
size = one_reg_size(reg->id);
if (size > sizeof(val))
@@ -1449,16 +1505,24 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
switch (reg->id) {
case KVM_REG_PPC_IAC1:
+ val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac1);
+ break;
case KVM_REG_PPC_IAC2:
+ val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac2);
+ break;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
case KVM_REG_PPC_IAC3:
+ val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac3);
+ break;
case KVM_REG_PPC_IAC4:
- i = reg->id - KVM_REG_PPC_IAC1;
- val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac[i]);
+ val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac4);
break;
+#endif
case KVM_REG_PPC_DAC1:
+ val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac1);
+ break;
case KVM_REG_PPC_DAC2:
- i = reg->id - KVM_REG_PPC_DAC1;
- val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac[i]);
+ val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2);
break;
case KVM_REG_PPC_EPR: {
u32 epr = get_guest_epr(vcpu);
@@ -1477,10 +1541,13 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
val = get_reg_val(reg->id, vcpu->arch.tsr);
break;
case KVM_REG_PPC_DEBUG_INST:
- val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV);
+ val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV_DEBUG);
+ break;
+ case KVM_REG_PPC_VRSAVE:
+ val = get_reg_val(reg->id, vcpu->arch.vrsave);
break;
default:
- r = kvmppc_get_one_reg(vcpu, reg->id, &val);
+ r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
break;
}
@@ -1498,7 +1565,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
int r = 0;
union kvmppc_one_reg val;
int size;
- long int i;
size = one_reg_size(reg->id);
if (size > sizeof(val))
@@ -1509,16 +1575,24 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
switch (reg->id) {
case KVM_REG_PPC_IAC1:
+ vcpu->arch.dbg_reg.iac1 = set_reg_val(reg->id, val);
+ break;
case KVM_REG_PPC_IAC2:
+ vcpu->arch.dbg_reg.iac2 = set_reg_val(reg->id, val);
+ break;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
case KVM_REG_PPC_IAC3:
+ vcpu->arch.dbg_reg.iac3 = set_reg_val(reg->id, val);
+ break;
case KVM_REG_PPC_IAC4:
- i = reg->id - KVM_REG_PPC_IAC1;
- vcpu->arch.dbg_reg.iac[i] = set_reg_val(reg->id, val);
+ vcpu->arch.dbg_reg.iac4 = set_reg_val(reg->id, val);
break;
+#endif
case KVM_REG_PPC_DAC1:
+ vcpu->arch.dbg_reg.dac1 = set_reg_val(reg->id, val);
+ break;
case KVM_REG_PPC_DAC2:
- i = reg->id - KVM_REG_PPC_DAC1;
- vcpu->arch.dbg_reg.dac[i] = set_reg_val(reg->id, val);
+ vcpu->arch.dbg_reg.dac2 = set_reg_val(reg->id, val);
break;
case KVM_REG_PPC_EPR: {
u32 new_epr = set_reg_val(reg->id, val);
@@ -1552,20 +1626,17 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
kvmppc_set_tcr(vcpu, tcr);
break;
}
+ case KVM_REG_PPC_VRSAVE:
+ vcpu->arch.vrsave = set_reg_val(reg->id, val);
+ break;
default:
- r = kvmppc_set_one_reg(vcpu, reg->id, &val);
+ r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
break;
}
return r;
}
-int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
- struct kvm_guest_debug *dbg)
-{
- return -EINVAL;
-}
-
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -ENOTSUPP;
@@ -1590,12 +1661,12 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
return -ENOTSUPP;
}
-void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
+void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
}
-int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
+int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long npages)
{
return 0;
@@ -1671,6 +1742,157 @@ void kvmppc_decrementer_func(unsigned long data)
kvmppc_set_tsr_bits(vcpu, TSR_DIS);
}
+static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
+ uint64_t addr, int index)
+{
+ switch (index) {
+ case 0:
+ dbg_reg->dbcr0 |= DBCR0_IAC1;
+ dbg_reg->iac1 = addr;
+ break;
+ case 1:
+ dbg_reg->dbcr0 |= DBCR0_IAC2;
+ dbg_reg->iac2 = addr;
+ break;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
+ case 2:
+ dbg_reg->dbcr0 |= DBCR0_IAC3;
+ dbg_reg->iac3 = addr;
+ break;
+ case 3:
+ dbg_reg->dbcr0 |= DBCR0_IAC4;
+ dbg_reg->iac4 = addr;
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ dbg_reg->dbcr0 |= DBCR0_IDM;
+ return 0;
+}
+
+static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
+ int type, int index)
+{
+ switch (index) {
+ case 0:
+ if (type & KVMPPC_DEBUG_WATCH_READ)
+ dbg_reg->dbcr0 |= DBCR0_DAC1R;
+ if (type & KVMPPC_DEBUG_WATCH_WRITE)
+ dbg_reg->dbcr0 |= DBCR0_DAC1W;
+ dbg_reg->dac1 = addr;
+ break;
+ case 1:
+ if (type & KVMPPC_DEBUG_WATCH_READ)
+ dbg_reg->dbcr0 |= DBCR0_DAC2R;
+ if (type & KVMPPC_DEBUG_WATCH_WRITE)
+ dbg_reg->dbcr0 |= DBCR0_DAC2W;
+ dbg_reg->dac2 = addr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dbg_reg->dbcr0 |= DBCR0_IDM;
+ return 0;
+}
+void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
+{
+ /* XXX: Add similar MSR protection for BookE-PR */
+#ifdef CONFIG_KVM_BOOKE_HV
+ BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
+ if (set) {
+ if (prot_bitmap & MSR_UCLE)
+ vcpu->arch.shadow_msrp |= MSRP_UCLEP;
+ if (prot_bitmap & MSR_DE)
+ vcpu->arch.shadow_msrp |= MSRP_DEP;
+ if (prot_bitmap & MSR_PMM)
+ vcpu->arch.shadow_msrp |= MSRP_PMMP;
+ } else {
+ if (prot_bitmap & MSR_UCLE)
+ vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
+ if (prot_bitmap & MSR_DE)
+ vcpu->arch.shadow_msrp &= ~MSRP_DEP;
+ if (prot_bitmap & MSR_PMM)
+ vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
+ }
+#endif
+}
+
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug *dbg)
+{
+ struct debug_reg *dbg_reg;
+ int n, b = 0, w = 0;
+
+ if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
+ vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
+ vcpu->guest_debug = 0;
+ kvm_guest_protect_msr(vcpu, MSR_DE, false);
+ return 0;
+ }
+
+ kvm_guest_protect_msr(vcpu, MSR_DE, true);
+ vcpu->guest_debug = dbg->control;
+ vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
+ /* Set DBCR0_EDM in guest visible DBCR0 register. */
+ vcpu->arch.dbg_reg.dbcr0 = DBCR0_EDM;
+
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+ vcpu->arch.shadow_dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
+
+ /* Code below handles only HW breakpoints */
+ dbg_reg = &(vcpu->arch.shadow_dbg_reg);
+
+#ifdef CONFIG_KVM_BOOKE_HV
+ /*
+ * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
+ * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
+ */
+ dbg_reg->dbcr1 = 0;
+ dbg_reg->dbcr2 = 0;
+#else
+ /*
+ * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
+ * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
+ * is set.
+ */
+ dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
+ DBCR1_IAC4US;
+ dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
+#endif
+
+ if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
+ return 0;
+
+ for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
+ uint64_t addr = dbg->arch.bp[n].addr;
+ uint32_t type = dbg->arch.bp[n].type;
+
+ if (type == KVMPPC_DEBUG_NONE)
+ continue;
+
+ if (type & !(KVMPPC_DEBUG_WATCH_READ |
+ KVMPPC_DEBUG_WATCH_WRITE |
+ KVMPPC_DEBUG_BREAKPOINT))
+ return -EINVAL;
+
+ if (type & KVMPPC_DEBUG_BREAKPOINT) {
+ /* Setting H/W breakpoint */
+ if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
+ return -EINVAL;
+ } else {
+ /* Setting H/W watchpoint */
+ if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
+ type, w++))
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
vcpu->cpu = smp_processor_id();
@@ -1681,6 +1903,44 @@ void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
{
current->thread.kvm_vcpu = NULL;
vcpu->cpu = -1;
+
+ /* Clear pending debug event in DBSR */
+ kvmppc_clear_dbsr();
+}
+
+void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
+{
+ vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
+}
+
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+ return kvm->arch.kvm_ops->init_vm(kvm);
+}
+
+struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+ return kvm->arch.kvm_ops->vcpu_create(kvm, id);
+}
+
+void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+ kvm->arch.kvm_ops->destroy_vm(kvm);
+}
+
+void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
+}
+
+void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
}
int __init kvmppc_booke_init(void)
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 5fd1ba693579..09bfd9bc7cf8 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -99,6 +99,30 @@ enum int_class {
void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
+extern void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance);
+extern int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn,
+ ulong spr_val);
+extern int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn,
+ ulong *spr_val);
+extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
+ struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance);
+extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
+ ulong spr_val);
+extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
+ ulong *spr_val);
+extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
+ struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance);
+extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
+ ulong spr_val);
+extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
+ ulong *spr_val);
+
/*
* Load up guest vcpu FP state if it's needed.
* It also set the MSR_FP in thread so that host know
@@ -129,4 +153,9 @@ static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
giveup_fpu(current);
#endif
}
+
+static inline void kvmppc_clear_dbsr(void)
+{
+ mtspr(SPRN_DBSR, mfspr(SPRN_DBSR));
+}
#endif /* __KVM_BOOKE_H__ */
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index ce6b73c29612..497b142f651c 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -305,7 +305,7 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
{
}
-void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu)
{
kvmppc_booke_vcpu_load(vcpu, cpu);
@@ -313,7 +313,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
}
-void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+static void kvmppc_core_vcpu_put_e500(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_SPE
if (vcpu->arch.shadow_msr & MSR_SPE)
@@ -367,7 +367,8 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
return 0;
}
-void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+static int kvmppc_core_get_sregs_e500(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -388,9 +389,11 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
kvmppc_get_sregs_ivor(vcpu, sregs);
kvmppc_get_sregs_e500_tlb(vcpu, sregs);
+ return 0;
}
-int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+static int kvmppc_core_set_sregs_e500(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int ret;
@@ -425,21 +428,22 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
return kvmppc_set_sregs_ivor(vcpu, sregs);
}
-int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
- union kvmppc_one_reg *val)
+static int kvmppc_get_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
{
int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
return r;
}
-int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
- union kvmppc_one_reg *val)
+static int kvmppc_set_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
{
int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
return r;
}
-struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+static struct kvm_vcpu *kvmppc_core_vcpu_create_e500(struct kvm *kvm,
+ unsigned int id)
{
struct kvmppc_vcpu_e500 *vcpu_e500;
struct kvm_vcpu *vcpu;
@@ -481,7 +485,7 @@ out:
return ERR_PTR(err);
}
-void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -492,15 +496,32 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
}
-int kvmppc_core_init_vm(struct kvm *kvm)
+static int kvmppc_core_init_vm_e500(struct kvm *kvm)
{
return 0;
}
-void kvmppc_core_destroy_vm(struct kvm *kvm)
+static void kvmppc_core_destroy_vm_e500(struct kvm *kvm)
{
}
+static struct kvmppc_ops kvm_ops_e500 = {
+ .get_sregs = kvmppc_core_get_sregs_e500,
+ .set_sregs = kvmppc_core_set_sregs_e500,
+ .get_one_reg = kvmppc_get_one_reg_e500,
+ .set_one_reg = kvmppc_set_one_reg_e500,
+ .vcpu_load = kvmppc_core_vcpu_load_e500,
+ .vcpu_put = kvmppc_core_vcpu_put_e500,
+ .vcpu_create = kvmppc_core_vcpu_create_e500,
+ .vcpu_free = kvmppc_core_vcpu_free_e500,
+ .mmu_destroy = kvmppc_mmu_destroy_e500,
+ .init_vm = kvmppc_core_init_vm_e500,
+ .destroy_vm = kvmppc_core_destroy_vm_e500,
+ .emulate_op = kvmppc_core_emulate_op_e500,
+ .emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
+ .emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
+};
+
static int __init kvmppc_e500_init(void)
{
int r, i;
@@ -512,11 +533,11 @@ static int __init kvmppc_e500_init(void)
r = kvmppc_core_check_processor_compat();
if (r)
- return r;
+ goto err_out;
r = kvmppc_booke_init();
if (r)
- return r;
+ goto err_out;
/* copy extra E500 exception handlers */
ivor[0] = mfspr(SPRN_IVOR32);
@@ -534,11 +555,19 @@ static int __init kvmppc_e500_init(void)
flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
ivor[max_ivor] + handler_len);
- return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
+ r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
+ if (r)
+ goto err_out;
+ kvm_ops_e500.owner = THIS_MODULE;
+ kvmppc_pr_ops = &kvm_ops_e500;
+
+err_out:
+ return r;
}
static void __exit kvmppc_e500_exit(void)
{
+ kvmppc_pr_ops = NULL;
kvmppc_booke_exit();
}
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index c2e5e98453a6..4fd9650eb018 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -117,7 +117,7 @@ static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
#define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
#define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
#define MAS2_ATTRIB_MASK \
- (MAS2_X0 | MAS2_X1)
+ (MAS2_X0 | MAS2_X1 | MAS2_E | MAS2_G)
#define MAS3_ATTRIB_MASK \
(MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
| E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index b10a01243abd..89b7f821f6c4 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -26,6 +26,7 @@
#define XOP_TLBRE 946
#define XOP_TLBWE 978
#define XOP_TLBILX 18
+#define XOP_EHPRIV 270
#ifdef CONFIG_KVM_E500MC
static int dbell2prio(ulong param)
@@ -82,8 +83,28 @@ static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
}
#endif
-int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int inst, int *advance)
+static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance)
+{
+ int emulated = EMULATE_DONE;
+
+ switch (get_oc(inst)) {
+ case EHPRIV_OC_DEBUG:
+ run->exit_reason = KVM_EXIT_DEBUG;
+ run->debug.arch.address = vcpu->arch.pc;
+ run->debug.arch.status = 0;
+ kvmppc_account_exit(vcpu, DEBUG_EXITS);
+ emulated = EMULATE_EXIT_USER;
+ *advance = 0;
+ break;
+ default:
+ emulated = EMULATE_FAIL;
+ }
+ return emulated;
+}
+
+int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
int ra = get_ra(inst);
@@ -130,6 +151,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
break;
+ case XOP_EHPRIV:
+ emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst,
+ advance);
+ break;
+
default:
emulated = EMULATE_FAIL;
}
@@ -146,7 +172,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
+int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
@@ -237,7 +263,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
return emulated;
}
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
+int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index 6d6f153b6c1d..ebca6b88ea5e 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -32,7 +32,7 @@
#include <asm/kvm_ppc.h>
#include "e500.h"
-#include "trace.h"
+#include "trace_booke.h"
#include "timing.h"
#include "e500_mmu_host.h"
@@ -536,7 +536,7 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
}
-void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
+void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu)
{
}
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index c65593abae8e..ecf2247b13be 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -32,10 +32,11 @@
#include <asm/kvm_ppc.h>
#include "e500.h"
-#include "trace.h"
#include "timing.h"
#include "e500_mmu_host.h"
+#include "trace_booke.h"
+
#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
@@ -253,6 +254,9 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
ref->pfn = pfn;
ref->flags |= E500_TLB_VALID;
+ /* Mark the page accessed */
+ kvm_set_pfn_accessed(pfn);
+
if (tlbe_is_writable(gtlbe))
kvm_set_pfn_dirty(pfn);
}
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 19c8379575f7..4132cd2fc171 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -110,7 +110,7 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
-void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -147,7 +147,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvmppc_load_guest_fp(vcpu);
}
-void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)
{
vcpu->arch.eplc = mfspr(SPRN_EPLC);
vcpu->arch.epsc = mfspr(SPRN_EPSC);
@@ -204,7 +204,8 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
return 0;
}
-void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+static int kvmppc_core_get_sregs_e500mc(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -224,10 +225,11 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
- kvmppc_get_sregs_ivor(vcpu, sregs);
+ return kvmppc_get_sregs_ivor(vcpu, sregs);
}
-int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+static int kvmppc_core_set_sregs_e500mc(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int ret;
@@ -260,21 +262,22 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
return kvmppc_set_sregs_ivor(vcpu, sregs);
}
-int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
- union kvmppc_one_reg *val)
+static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
{
int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
return r;
}
-int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
- union kvmppc_one_reg *val)
+static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
{
int r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val);
return r;
}
-struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+static struct kvm_vcpu *kvmppc_core_vcpu_create_e500mc(struct kvm *kvm,
+ unsigned int id)
{
struct kvmppc_vcpu_e500 *vcpu_e500;
struct kvm_vcpu *vcpu;
@@ -315,7 +318,7 @@ out:
return ERR_PTR(err);
}
-void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -325,7 +328,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
}
-int kvmppc_core_init_vm(struct kvm *kvm)
+static int kvmppc_core_init_vm_e500mc(struct kvm *kvm)
{
int lpid;
@@ -337,27 +340,52 @@ int kvmppc_core_init_vm(struct kvm *kvm)
return 0;
}
-void kvmppc_core_destroy_vm(struct kvm *kvm)
+static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm)
{
kvmppc_free_lpid(kvm->arch.lpid);
}
+static struct kvmppc_ops kvm_ops_e500mc = {
+ .get_sregs = kvmppc_core_get_sregs_e500mc,
+ .set_sregs = kvmppc_core_set_sregs_e500mc,
+ .get_one_reg = kvmppc_get_one_reg_e500mc,
+ .set_one_reg = kvmppc_set_one_reg_e500mc,
+ .vcpu_load = kvmppc_core_vcpu_load_e500mc,
+ .vcpu_put = kvmppc_core_vcpu_put_e500mc,
+ .vcpu_create = kvmppc_core_vcpu_create_e500mc,
+ .vcpu_free = kvmppc_core_vcpu_free_e500mc,
+ .mmu_destroy = kvmppc_mmu_destroy_e500,
+ .init_vm = kvmppc_core_init_vm_e500mc,
+ .destroy_vm = kvmppc_core_destroy_vm_e500mc,
+ .emulate_op = kvmppc_core_emulate_op_e500,
+ .emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
+ .emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
+};
+
static int __init kvmppc_e500mc_init(void)
{
int r;
r = kvmppc_booke_init();
if (r)
- return r;
+ goto err_out;
kvmppc_init_lpid(64);
kvmppc_claim_lpid(0); /* host */
- return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
+ r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
+ if (r)
+ goto err_out;
+ kvm_ops_e500mc.owner = THIS_MODULE;
+ kvmppc_pr_ops = &kvm_ops_e500mc;
+
+err_out:
+ return r;
}
static void __exit kvmppc_e500mc_exit(void)
{
+ kvmppc_pr_ops = NULL;
kvmppc_booke_exit();
}
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 751cd45f65a0..2f9a0873b44f 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -130,8 +130,8 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
case SPRN_PIR: break;
default:
- emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
- spr_val);
+ emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn,
+ spr_val);
if (emulated == EMULATE_FAIL)
printk(KERN_INFO "mtspr: unknown spr "
"0x%x\n", sprn);
@@ -191,8 +191,8 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
spr_val = kvmppc_get_dec(vcpu, get_tb());
break;
default:
- emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
- &spr_val);
+ emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn,
+ &spr_val);
if (unlikely(emulated == EMULATE_FAIL)) {
printk(KERN_INFO "mfspr: unknown spr "
"0x%x\n", sprn);
@@ -464,7 +464,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
if (emulated == EMULATE_FAIL) {
- emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
+ emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst,
+ &advance);
if (emulated == EMULATE_AGAIN) {
advance = 0;
} else if (emulated == EMULATE_FAIL) {
@@ -483,3 +484,4 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
return emulated;
}
+EXPORT_SYMBOL_GPL(kvmppc_emulate_instruction);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 07c0106fab76..9ae97686e9f4 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -26,6 +26,7 @@
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/file.h>
+#include <linux/module.h>
#include <asm/cputable.h>
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
@@ -39,6 +40,12 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
+struct kvmppc_ops *kvmppc_hv_ops;
+EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
+struct kvmppc_ops *kvmppc_pr_ops;
+EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
+
+
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
return !!(v->arch.pending_exceptions) ||
@@ -50,7 +57,6 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
return 1;
}
-#ifndef CONFIG_KVM_BOOK3S_64_HV
/*
* Common checks before entering the guest world. Call with interrupts
* disabled.
@@ -125,7 +131,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
return r;
}
-#endif /* CONFIG_KVM_BOOK3S_64_HV */
+EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
{
@@ -179,6 +185,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
return r;
}
+EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
{
@@ -192,11 +199,9 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
goto out;
-#ifdef CONFIG_KVM_BOOK3S_64_HV
/* HV KVM can only do PAPR mode for now */
- if (!vcpu->arch.papr_enabled)
+ if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
goto out;
-#endif
#ifdef CONFIG_KVM_BOOKE_HV
if (!cpu_has_feature(CPU_FTR_EMB_HV))
@@ -209,6 +214,7 @@ out:
vcpu->arch.sane = r;
return r ? 0 : -EINVAL;
}
+EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
@@ -243,6 +249,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
return r;
}
+EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
int kvm_arch_hardware_enable(void *garbage)
{
@@ -269,10 +276,35 @@ void kvm_arch_check_processor_compat(void *rtn)
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
- if (type)
- return -EINVAL;
-
+ struct kvmppc_ops *kvm_ops = NULL;
+ /*
+ * if we have both HV and PR enabled, default is HV
+ */
+ if (type == 0) {
+ if (kvmppc_hv_ops)
+ kvm_ops = kvmppc_hv_ops;
+ else
+ kvm_ops = kvmppc_pr_ops;
+ if (!kvm_ops)
+ goto err_out;
+ } else if (type == KVM_VM_PPC_HV) {
+ if (!kvmppc_hv_ops)
+ goto err_out;
+ kvm_ops = kvmppc_hv_ops;
+ } else if (type == KVM_VM_PPC_PR) {
+ if (!kvmppc_pr_ops)
+ goto err_out;
+ kvm_ops = kvmppc_pr_ops;
+ } else
+ goto err_out;
+
+ if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
+ return -ENOENT;
+
+ kvm->arch.kvm_ops = kvm_ops;
return kvmppc_core_init_vm(kvm);
+err_out:
+ return -EINVAL;
}
void kvm_arch_destroy_vm(struct kvm *kvm)
@@ -292,6 +324,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvmppc_core_destroy_vm(kvm);
mutex_unlock(&kvm->lock);
+
+ /* drop the module reference */
+ module_put(kvm->arch.kvm_ops->owner);
}
void kvm_arch_sync_events(struct kvm *kvm)
@@ -301,6 +336,10 @@ void kvm_arch_sync_events(struct kvm *kvm)
int kvm_dev_ioctl_check_extension(long ext)
{
int r;
+ /* FIXME!!
+ * Should some of this be vm ioctl ? is it possible now ?
+ */
+ int hv_enabled = kvmppc_hv_ops ? 1 : 0;
switch (ext) {
#ifdef CONFIG_BOOKE
@@ -320,22 +359,26 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_DEVICE_CTRL:
r = 1;
break;
-#ifndef CONFIG_KVM_BOOK3S_64_HV
case KVM_CAP_PPC_PAIRED_SINGLES:
case KVM_CAP_PPC_OSI:
case KVM_CAP_PPC_GET_PVINFO:
#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
case KVM_CAP_SW_TLB:
#endif
-#ifdef CONFIG_KVM_MPIC
- case KVM_CAP_IRQ_MPIC:
-#endif
- r = 1;
+ /* We support this only for PR */
+ r = !hv_enabled;
break;
+#ifdef CONFIG_KVM_MMIO
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
#endif
+#ifdef CONFIG_KVM_MPIC
+ case KVM_CAP_IRQ_MPIC:
+ r = 1;
+ break;
+#endif
+
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_SPAPR_TCE:
case KVM_CAP_PPC_ALLOC_HTAB:
@@ -346,32 +389,37 @@ int kvm_dev_ioctl_check_extension(long ext)
r = 1;
break;
#endif /* CONFIG_PPC_BOOK3S_64 */
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
case KVM_CAP_PPC_SMT:
- r = threads_per_core;
+ if (hv_enabled)
+ r = threads_per_core;
+ else
+ r = 0;
break;
case KVM_CAP_PPC_RMA:
- r = 1;
+ r = hv_enabled;
/* PPC970 requires an RMA */
- if (cpu_has_feature(CPU_FTR_ARCH_201))
+ if (r && cpu_has_feature(CPU_FTR_ARCH_201))
r = 2;
break;
#endif
case KVM_CAP_SYNC_MMU:
-#ifdef CONFIG_KVM_BOOK3S_64_HV
- r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ if (hv_enabled)
+ r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
+ else
+ r = 0;
#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
r = 1;
#else
r = 0;
- break;
#endif
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+ break;
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
case KVM_CAP_PPC_HTAB_FD:
- r = 1;
+ r = hv_enabled;
break;
#endif
- break;
case KVM_CAP_NR_VCPUS:
/*
* Recommending a number of CPUs is somewhat arbitrary; we
@@ -379,11 +427,10 @@ int kvm_dev_ioctl_check_extension(long ext)
* will have secondary threads "offline"), and for other KVM
* implementations just count online CPUs.
*/
-#ifdef CONFIG_KVM_BOOK3S_64_HV
- r = num_present_cpus();
-#else
- r = num_online_cpus();
-#endif
+ if (hv_enabled)
+ r = num_present_cpus();
+ else
+ r = num_online_cpus();
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
@@ -407,15 +454,16 @@ long kvm_arch_dev_ioctl(struct file *filp,
return -EINVAL;
}
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
- kvmppc_core_free_memslot(free, dont);
+ kvmppc_core_free_memslot(kvm, free, dont);
}
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+ unsigned long npages)
{
- return kvmppc_core_create_memslot(slot, npages);
+ return kvmppc_core_create_memslot(kvm, slot, npages);
}
void kvm_arch_memslots_updated(struct kvm *kvm)
@@ -659,6 +707,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_DO_MMIO;
}
+EXPORT_SYMBOL_GPL(kvmppc_handle_load);
/* Same as above, but sign extends */
int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
@@ -720,6 +769,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_DO_MMIO;
}
+EXPORT_SYMBOL_GPL(kvmppc_handle_store);
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
@@ -1024,52 +1074,12 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
goto out;
}
-#endif /* CONFIG_PPC_BOOK3S_64 */
-
-#ifdef CONFIG_KVM_BOOK3S_64_HV
- case KVM_ALLOCATE_RMA: {
- struct kvm_allocate_rma rma;
- struct kvm *kvm = filp->private_data;
-
- r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
- if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
- r = -EFAULT;
- break;
- }
-
- case KVM_PPC_ALLOCATE_HTAB: {
- u32 htab_order;
-
- r = -EFAULT;
- if (get_user(htab_order, (u32 __user *)argp))
- break;
- r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
- if (r)
- break;
- r = -EFAULT;
- if (put_user(htab_order, (u32 __user *)argp))
- break;
- r = 0;
- break;
- }
-
- case KVM_PPC_GET_HTAB_FD: {
- struct kvm_get_htab_fd ghf;
-
- r = -EFAULT;
- if (copy_from_user(&ghf, argp, sizeof(ghf)))
- break;
- r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
- break;
- }
-#endif /* CONFIG_KVM_BOOK3S_64_HV */
-
-#ifdef CONFIG_PPC_BOOK3S_64
case KVM_PPC_GET_SMMU_INFO: {
struct kvm_ppc_smmu_info info;
+ struct kvm *kvm = filp->private_data;
memset(&info, 0, sizeof(info));
- r = kvm_vm_ioctl_get_smmu_info(kvm, &info);
+ r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
r = -EFAULT;
break;
@@ -1080,11 +1090,15 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
break;
}
-#endif /* CONFIG_PPC_BOOK3S_64 */
+ default: {
+ struct kvm *kvm = filp->private_data;
+ r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
+ }
+#else /* CONFIG_PPC_BOOK3S_64 */
default:
r = -ENOTTY;
+#endif
}
-
out:
return r;
}
@@ -1106,22 +1120,26 @@ long kvmppc_alloc_lpid(void)
return lpid;
}
+EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
void kvmppc_claim_lpid(long lpid)
{
set_bit(lpid, lpid_inuse);
}
+EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
void kvmppc_free_lpid(long lpid)
{
clear_bit(lpid, lpid_inuse);
}
+EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
void kvmppc_init_lpid(unsigned long nr_lpids_param)
{
nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
memset(lpid_inuse, 0, sizeof(lpid_inuse));
}
+EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
int kvm_arch_init(void *opaque)
{
@@ -1130,4 +1148,5 @@ int kvm_arch_init(void *opaque)
void kvm_arch_exit(void)
{
+
}
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index e326489a5420..2e0e67ef3544 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -31,126 +31,6 @@ TRACE_EVENT(kvm_ppc_instr,
__entry->inst, __entry->pc, __entry->emulate)
);
-#ifdef CONFIG_PPC_BOOK3S
-#define kvm_trace_symbol_exit \
- {0x100, "SYSTEM_RESET"}, \
- {0x200, "MACHINE_CHECK"}, \
- {0x300, "DATA_STORAGE"}, \
- {0x380, "DATA_SEGMENT"}, \
- {0x400, "INST_STORAGE"}, \
- {0x480, "INST_SEGMENT"}, \
- {0x500, "EXTERNAL"}, \
- {0x501, "EXTERNAL_LEVEL"}, \
- {0x502, "EXTERNAL_HV"}, \
- {0x600, "ALIGNMENT"}, \
- {0x700, "PROGRAM"}, \
- {0x800, "FP_UNAVAIL"}, \
- {0x900, "DECREMENTER"}, \
- {0x980, "HV_DECREMENTER"}, \
- {0xc00, "SYSCALL"}, \
- {0xd00, "TRACE"}, \
- {0xe00, "H_DATA_STORAGE"}, \
- {0xe20, "H_INST_STORAGE"}, \
- {0xe40, "H_EMUL_ASSIST"}, \
- {0xf00, "PERFMON"}, \
- {0xf20, "ALTIVEC"}, \
- {0xf40, "VSX"}
-#else
-#define kvm_trace_symbol_exit \
- {0, "CRITICAL"}, \
- {1, "MACHINE_CHECK"}, \
- {2, "DATA_STORAGE"}, \
- {3, "INST_STORAGE"}, \
- {4, "EXTERNAL"}, \
- {5, "ALIGNMENT"}, \
- {6, "PROGRAM"}, \
- {7, "FP_UNAVAIL"}, \
- {8, "SYSCALL"}, \
- {9, "AP_UNAVAIL"}, \
- {10, "DECREMENTER"}, \
- {11, "FIT"}, \
- {12, "WATCHDOG"}, \
- {13, "DTLB_MISS"}, \
- {14, "ITLB_MISS"}, \
- {15, "DEBUG"}, \
- {32, "SPE_UNAVAIL"}, \
- {33, "SPE_FP_DATA"}, \
- {34, "SPE_FP_ROUND"}, \
- {35, "PERFORMANCE_MONITOR"}, \
- {36, "DOORBELL"}, \
- {37, "DOORBELL_CRITICAL"}, \
- {38, "GUEST_DBELL"}, \
- {39, "GUEST_DBELL_CRIT"}, \
- {40, "HV_SYSCALL"}, \
- {41, "HV_PRIV"}
-#endif
-
-TRACE_EVENT(kvm_exit,
- TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
- TP_ARGS(exit_nr, vcpu),
-
- TP_STRUCT__entry(
- __field( unsigned int, exit_nr )
- __field( unsigned long, pc )
- __field( unsigned long, msr )
- __field( unsigned long, dar )
-#ifdef CONFIG_KVM_BOOK3S_PR
- __field( unsigned long, srr1 )
-#endif
- __field( unsigned long, last_inst )
- ),
-
- TP_fast_assign(
-#ifdef CONFIG_KVM_BOOK3S_PR
- struct kvmppc_book3s_shadow_vcpu *svcpu;
-#endif
- __entry->exit_nr = exit_nr;
- __entry->pc = kvmppc_get_pc(vcpu);
- __entry->dar = kvmppc_get_fault_dar(vcpu);
- __entry->msr = vcpu->arch.shared->msr;
-#ifdef CONFIG_KVM_BOOK3S_PR
- svcpu = svcpu_get(vcpu);
- __entry->srr1 = svcpu->shadow_srr1;
- svcpu_put(svcpu);
-#endif
- __entry->last_inst = vcpu->arch.last_inst;
- ),
-
- TP_printk("exit=%s"
- " | pc=0x%lx"
- " | msr=0x%lx"
- " | dar=0x%lx"
-#ifdef CONFIG_KVM_BOOK3S_PR
- " | srr1=0x%lx"
-#endif
- " | last_inst=0x%lx"
- ,
- __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
- __entry->pc,
- __entry->msr,
- __entry->dar,
-#ifdef CONFIG_KVM_BOOK3S_PR
- __entry->srr1,
-#endif
- __entry->last_inst
- )
-);
-
-TRACE_EVENT(kvm_unmap_hva,
- TP_PROTO(unsigned long hva),
- TP_ARGS(hva),
-
- TP_STRUCT__entry(
- __field( unsigned long, hva )
- ),
-
- TP_fast_assign(
- __entry->hva = hva;
- ),
-
- TP_printk("unmap hva 0x%lx\n", __entry->hva)
-);
-
TRACE_EVENT(kvm_stlb_inval,
TP_PROTO(unsigned int stlb_index),
TP_ARGS(stlb_index),
@@ -236,315 +116,6 @@ TRACE_EVENT(kvm_check_requests,
__entry->cpu_nr, __entry->requests)
);
-
-/*************************************************************************
- * Book3S trace points *
- *************************************************************************/
-
-#ifdef CONFIG_KVM_BOOK3S_PR
-
-TRACE_EVENT(kvm_book3s_reenter,
- TP_PROTO(int r, struct kvm_vcpu *vcpu),
- TP_ARGS(r, vcpu),
-
- TP_STRUCT__entry(
- __field( unsigned int, r )
- __field( unsigned long, pc )
- ),
-
- TP_fast_assign(
- __entry->r = r;
- __entry->pc = kvmppc_get_pc(vcpu);
- ),
-
- TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
-);
-
-#ifdef CONFIG_PPC_BOOK3S_64
-
-TRACE_EVENT(kvm_book3s_64_mmu_map,
- TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr,
- struct kvmppc_pte *orig_pte),
- TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
-
- TP_STRUCT__entry(
- __field( unsigned char, flag_w )
- __field( unsigned char, flag_x )
- __field( unsigned long, eaddr )
- __field( unsigned long, hpteg )
- __field( unsigned long, va )
- __field( unsigned long long, vpage )
- __field( unsigned long, hpaddr )
- ),
-
- TP_fast_assign(
- __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
- __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x';
- __entry->eaddr = orig_pte->eaddr;
- __entry->hpteg = hpteg;
- __entry->va = va;
- __entry->vpage = orig_pte->vpage;
- __entry->hpaddr = hpaddr;
- ),
-
- TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
- __entry->flag_w, __entry->flag_x, __entry->eaddr,
- __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
-);
-
-#endif /* CONFIG_PPC_BOOK3S_64 */
-
-TRACE_EVENT(kvm_book3s_mmu_map,
- TP_PROTO(struct hpte_cache *pte),
- TP_ARGS(pte),
-
- TP_STRUCT__entry(
- __field( u64, host_vpn )
- __field( u64, pfn )
- __field( ulong, eaddr )
- __field( u64, vpage )
- __field( ulong, raddr )
- __field( int, flags )
- ),
-
- TP_fast_assign(
- __entry->host_vpn = pte->host_vpn;
- __entry->pfn = pte->pfn;
- __entry->eaddr = pte->pte.eaddr;
- __entry->vpage = pte->pte.vpage;
- __entry->raddr = pte->pte.raddr;
- __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
- (pte->pte.may_write ? 0x2 : 0) |
- (pte->pte.may_execute ? 0x1 : 0);
- ),
-
- TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
- __entry->host_vpn, __entry->pfn, __entry->eaddr,
- __entry->vpage, __entry->raddr, __entry->flags)
-);
-
-TRACE_EVENT(kvm_book3s_mmu_invalidate,
- TP_PROTO(struct hpte_cache *pte),
- TP_ARGS(pte),
-
- TP_STRUCT__entry(
- __field( u64, host_vpn )
- __field( u64, pfn )
- __field( ulong, eaddr )
- __field( u64, vpage )
- __field( ulong, raddr )
- __field( int, flags )
- ),
-
- TP_fast_assign(
- __entry->host_vpn = pte->host_vpn;
- __entry->pfn = pte->pfn;
- __entry->eaddr = pte->pte.eaddr;
- __entry->vpage = pte->pte.vpage;
- __entry->raddr = pte->pte.raddr;
- __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
- (pte->pte.may_write ? 0x2 : 0) |
- (pte->pte.may_execute ? 0x1 : 0);
- ),
-
- TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
- __entry->host_vpn, __entry->pfn, __entry->eaddr,
- __entry->vpage, __entry->raddr, __entry->flags)
-);
-
-TRACE_EVENT(kvm_book3s_mmu_flush,
- TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
- unsigned long long p2),
- TP_ARGS(type, vcpu, p1, p2),
-
- TP_STRUCT__entry(
- __field( int, count )
- __field( unsigned long long, p1 )
- __field( unsigned long long, p2 )
- __field( const char *, type )
- ),
-
- TP_fast_assign(
- __entry->count = to_book3s(vcpu)->hpte_cache_count;
- __entry->p1 = p1;
- __entry->p2 = p2;
- __entry->type = type;
- ),
-
- TP_printk("Flush %d %sPTEs: %llx - %llx",
- __entry->count, __entry->type, __entry->p1, __entry->p2)
-);
-
-TRACE_EVENT(kvm_book3s_slb_found,
- TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
- TP_ARGS(gvsid, hvsid),
-
- TP_STRUCT__entry(
- __field( unsigned long long, gvsid )
- __field( unsigned long long, hvsid )
- ),
-
- TP_fast_assign(
- __entry->gvsid = gvsid;
- __entry->hvsid = hvsid;
- ),
-
- TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
-);
-
-TRACE_EVENT(kvm_book3s_slb_fail,
- TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
- TP_ARGS(sid_map_mask, gvsid),
-
- TP_STRUCT__entry(
- __field( unsigned short, sid_map_mask )
- __field( unsigned long long, gvsid )
- ),
-
- TP_fast_assign(
- __entry->sid_map_mask = sid_map_mask;
- __entry->gvsid = gvsid;
- ),
-
- TP_printk("%x/%x: %llx", __entry->sid_map_mask,
- SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
-);
-
-TRACE_EVENT(kvm_book3s_slb_map,
- TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
- unsigned long long hvsid),
- TP_ARGS(sid_map_mask, gvsid, hvsid),
-
- TP_STRUCT__entry(
- __field( unsigned short, sid_map_mask )
- __field( unsigned long long, guest_vsid )
- __field( unsigned long long, host_vsid )
- ),
-
- TP_fast_assign(
- __entry->sid_map_mask = sid_map_mask;
- __entry->guest_vsid = gvsid;
- __entry->host_vsid = hvsid;
- ),
-
- TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
- __entry->guest_vsid, __entry->host_vsid)
-);
-
-TRACE_EVENT(kvm_book3s_slbmte,
- TP_PROTO(u64 slb_vsid, u64 slb_esid),
- TP_ARGS(slb_vsid, slb_esid),
-
- TP_STRUCT__entry(
- __field( u64, slb_vsid )
- __field( u64, slb_esid )
- ),
-
- TP_fast_assign(
- __entry->slb_vsid = slb_vsid;
- __entry->slb_esid = slb_esid;
- ),
-
- TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
-);
-
-#endif /* CONFIG_PPC_BOOK3S */
-
-
-/*************************************************************************
- * Book3E trace points *
- *************************************************************************/
-
-#ifdef CONFIG_BOOKE
-
-TRACE_EVENT(kvm_booke206_stlb_write,
- TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3),
- TP_ARGS(mas0, mas8, mas1, mas2, mas7_3),
-
- TP_STRUCT__entry(
- __field( __u32, mas0 )
- __field( __u32, mas8 )
- __field( __u32, mas1 )
- __field( __u64, mas2 )
- __field( __u64, mas7_3 )
- ),
-
- TP_fast_assign(
- __entry->mas0 = mas0;
- __entry->mas8 = mas8;
- __entry->mas1 = mas1;
- __entry->mas2 = mas2;
- __entry->mas7_3 = mas7_3;
- ),
-
- TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx",
- __entry->mas0, __entry->mas8, __entry->mas1,
- __entry->mas2, __entry->mas7_3)
-);
-
-TRACE_EVENT(kvm_booke206_gtlb_write,
- TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3),
- TP_ARGS(mas0, mas1, mas2, mas7_3),
-
- TP_STRUCT__entry(
- __field( __u32, mas0 )
- __field( __u32, mas1 )
- __field( __u64, mas2 )
- __field( __u64, mas7_3 )
- ),
-
- TP_fast_assign(
- __entry->mas0 = mas0;
- __entry->mas1 = mas1;
- __entry->mas2 = mas2;
- __entry->mas7_3 = mas7_3;
- ),
-
- TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx",
- __entry->mas0, __entry->mas1,
- __entry->mas2, __entry->mas7_3)
-);
-
-TRACE_EVENT(kvm_booke206_ref_release,
- TP_PROTO(__u64 pfn, __u32 flags),
- TP_ARGS(pfn, flags),
-
- TP_STRUCT__entry(
- __field( __u64, pfn )
- __field( __u32, flags )
- ),
-
- TP_fast_assign(
- __entry->pfn = pfn;
- __entry->flags = flags;
- ),
-
- TP_printk("pfn=%llx flags=%x",
- __entry->pfn, __entry->flags)
-);
-
-TRACE_EVENT(kvm_booke_queue_irqprio,
- TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority),
- TP_ARGS(vcpu, priority),
-
- TP_STRUCT__entry(
- __field( __u32, cpu_nr )
- __field( __u32, priority )
- __field( unsigned long, pending )
- ),
-
- TP_fast_assign(
- __entry->cpu_nr = vcpu->vcpu_id;
- __entry->priority = priority;
- __entry->pending = vcpu->arch.pending_exceptions;
- ),
-
- TP_printk("vcpu=%x prio=%x pending=%lx",
- __entry->cpu_nr, __entry->priority, __entry->pending)
-);
-
-#endif
-
#endif /* _TRACE_KVM_H */
/* This part must be outside protection */
diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h
new file mode 100644
index 000000000000..f7537cf26ce7
--- /dev/null
+++ b/arch/powerpc/kvm/trace_booke.h
@@ -0,0 +1,177 @@
+#if !defined(_TRACE_KVM_BOOKE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_BOOKE_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm_booke
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_booke
+
+#define kvm_trace_symbol_exit \
+ {0, "CRITICAL"}, \
+ {1, "MACHINE_CHECK"}, \
+ {2, "DATA_STORAGE"}, \
+ {3, "INST_STORAGE"}, \
+ {4, "EXTERNAL"}, \
+ {5, "ALIGNMENT"}, \
+ {6, "PROGRAM"}, \
+ {7, "FP_UNAVAIL"}, \
+ {8, "SYSCALL"}, \
+ {9, "AP_UNAVAIL"}, \
+ {10, "DECREMENTER"}, \
+ {11, "FIT"}, \
+ {12, "WATCHDOG"}, \
+ {13, "DTLB_MISS"}, \
+ {14, "ITLB_MISS"}, \
+ {15, "DEBUG"}, \
+ {32, "SPE_UNAVAIL"}, \
+ {33, "SPE_FP_DATA"}, \
+ {34, "SPE_FP_ROUND"}, \
+ {35, "PERFORMANCE_MONITOR"}, \
+ {36, "DOORBELL"}, \
+ {37, "DOORBELL_CRITICAL"}, \
+ {38, "GUEST_DBELL"}, \
+ {39, "GUEST_DBELL_CRIT"}, \
+ {40, "HV_SYSCALL"}, \
+ {41, "HV_PRIV"}
+
+TRACE_EVENT(kvm_exit,
+ TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
+ TP_ARGS(exit_nr, vcpu),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, exit_nr )
+ __field( unsigned long, pc )
+ __field( unsigned long, msr )
+ __field( unsigned long, dar )
+ __field( unsigned long, last_inst )
+ ),
+
+ TP_fast_assign(
+ __entry->exit_nr = exit_nr;
+ __entry->pc = kvmppc_get_pc(vcpu);
+ __entry->dar = kvmppc_get_fault_dar(vcpu);
+ __entry->msr = vcpu->arch.shared->msr;
+ __entry->last_inst = vcpu->arch.last_inst;
+ ),
+
+ TP_printk("exit=%s"
+ " | pc=0x%lx"
+ " | msr=0x%lx"
+ " | dar=0x%lx"
+ " | last_inst=0x%lx"
+ ,
+ __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
+ __entry->pc,
+ __entry->msr,
+ __entry->dar,
+ __entry->last_inst
+ )
+);
+
+TRACE_EVENT(kvm_unmap_hva,
+ TP_PROTO(unsigned long hva),
+ TP_ARGS(hva),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, hva )
+ ),
+
+ TP_fast_assign(
+ __entry->hva = hva;
+ ),
+
+ TP_printk("unmap hva 0x%lx\n", __entry->hva)
+);
+
+TRACE_EVENT(kvm_booke206_stlb_write,
+ TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3),
+ TP_ARGS(mas0, mas8, mas1, mas2, mas7_3),
+
+ TP_STRUCT__entry(
+ __field( __u32, mas0 )
+ __field( __u32, mas8 )
+ __field( __u32, mas1 )
+ __field( __u64, mas2 )
+ __field( __u64, mas7_3 )
+ ),
+
+ TP_fast_assign(
+ __entry->mas0 = mas0;
+ __entry->mas8 = mas8;
+ __entry->mas1 = mas1;
+ __entry->mas2 = mas2;
+ __entry->mas7_3 = mas7_3;
+ ),
+
+ TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx",
+ __entry->mas0, __entry->mas8, __entry->mas1,
+ __entry->mas2, __entry->mas7_3)
+);
+
+TRACE_EVENT(kvm_booke206_gtlb_write,
+ TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3),
+ TP_ARGS(mas0, mas1, mas2, mas7_3),
+
+ TP_STRUCT__entry(
+ __field( __u32, mas0 )
+ __field( __u32, mas1 )
+ __field( __u64, mas2 )
+ __field( __u64, mas7_3 )
+ ),
+
+ TP_fast_assign(
+ __entry->mas0 = mas0;
+ __entry->mas1 = mas1;
+ __entry->mas2 = mas2;
+ __entry->mas7_3 = mas7_3;
+ ),
+
+ TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx",
+ __entry->mas0, __entry->mas1,
+ __entry->mas2, __entry->mas7_3)
+);
+
+TRACE_EVENT(kvm_booke206_ref_release,
+ TP_PROTO(__u64 pfn, __u32 flags),
+ TP_ARGS(pfn, flags),
+
+ TP_STRUCT__entry(
+ __field( __u64, pfn )
+ __field( __u32, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("pfn=%llx flags=%x",
+ __entry->pfn, __entry->flags)
+);
+
+TRACE_EVENT(kvm_booke_queue_irqprio,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority),
+ TP_ARGS(vcpu, priority),
+
+ TP_STRUCT__entry(
+ __field( __u32, cpu_nr )
+ __field( __u32, priority )
+ __field( unsigned long, pending )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_nr = vcpu->vcpu_id;
+ __entry->priority = priority;
+ __entry->pending = vcpu->arch.pending_exceptions;
+ ),
+
+ TP_printk("vcpu=%x prio=%x pending=%lx",
+ __entry->cpu_nr, __entry->priority, __entry->pending)
+);
+
+#endif
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h
new file mode 100644
index 000000000000..8b22e4748344
--- /dev/null
+++ b/arch/powerpc/kvm/trace_pr.h
@@ -0,0 +1,297 @@
+
+#if !defined(_TRACE_KVM_PR_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_PR_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm_pr
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_pr
+
+#define kvm_trace_symbol_exit \
+ {0x100, "SYSTEM_RESET"}, \
+ {0x200, "MACHINE_CHECK"}, \
+ {0x300, "DATA_STORAGE"}, \
+ {0x380, "DATA_SEGMENT"}, \
+ {0x400, "INST_STORAGE"}, \
+ {0x480, "INST_SEGMENT"}, \
+ {0x500, "EXTERNAL"}, \
+ {0x501, "EXTERNAL_LEVEL"}, \
+ {0x502, "EXTERNAL_HV"}, \
+ {0x600, "ALIGNMENT"}, \
+ {0x700, "PROGRAM"}, \
+ {0x800, "FP_UNAVAIL"}, \
+ {0x900, "DECREMENTER"}, \
+ {0x980, "HV_DECREMENTER"}, \
+ {0xc00, "SYSCALL"}, \
+ {0xd00, "TRACE"}, \
+ {0xe00, "H_DATA_STORAGE"}, \
+ {0xe20, "H_INST_STORAGE"}, \
+ {0xe40, "H_EMUL_ASSIST"}, \
+ {0xf00, "PERFMON"}, \
+ {0xf20, "ALTIVEC"}, \
+ {0xf40, "VSX"}
+
+TRACE_EVENT(kvm_book3s_reenter,
+ TP_PROTO(int r, struct kvm_vcpu *vcpu),
+ TP_ARGS(r, vcpu),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, r )
+ __field( unsigned long, pc )
+ ),
+
+ TP_fast_assign(
+ __entry->r = r;
+ __entry->pc = kvmppc_get_pc(vcpu);
+ ),
+
+ TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
+);
+
+#ifdef CONFIG_PPC_BOOK3S_64
+
+TRACE_EVENT(kvm_book3s_64_mmu_map,
+ TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr,
+ struct kvmppc_pte *orig_pte),
+ TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
+
+ TP_STRUCT__entry(
+ __field( unsigned char, flag_w )
+ __field( unsigned char, flag_x )
+ __field( unsigned long, eaddr )
+ __field( unsigned long, hpteg )
+ __field( unsigned long, va )
+ __field( unsigned long long, vpage )
+ __field( unsigned long, hpaddr )
+ ),
+
+ TP_fast_assign(
+ __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
+ __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x';
+ __entry->eaddr = orig_pte->eaddr;
+ __entry->hpteg = hpteg;
+ __entry->va = va;
+ __entry->vpage = orig_pte->vpage;
+ __entry->hpaddr = hpaddr;
+ ),
+
+ TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
+ __entry->flag_w, __entry->flag_x, __entry->eaddr,
+ __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
+);
+
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+TRACE_EVENT(kvm_book3s_mmu_map,
+ TP_PROTO(struct hpte_cache *pte),
+ TP_ARGS(pte),
+
+ TP_STRUCT__entry(
+ __field( u64, host_vpn )
+ __field( u64, pfn )
+ __field( ulong, eaddr )
+ __field( u64, vpage )
+ __field( ulong, raddr )
+ __field( int, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->host_vpn = pte->host_vpn;
+ __entry->pfn = pte->pfn;
+ __entry->eaddr = pte->pte.eaddr;
+ __entry->vpage = pte->pte.vpage;
+ __entry->raddr = pte->pte.raddr;
+ __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
+ (pte->pte.may_write ? 0x2 : 0) |
+ (pte->pte.may_execute ? 0x1 : 0);
+ ),
+
+ TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
+ __entry->host_vpn, __entry->pfn, __entry->eaddr,
+ __entry->vpage, __entry->raddr, __entry->flags)
+);
+
+TRACE_EVENT(kvm_book3s_mmu_invalidate,
+ TP_PROTO(struct hpte_cache *pte),
+ TP_ARGS(pte),
+
+ TP_STRUCT__entry(
+ __field( u64, host_vpn )
+ __field( u64, pfn )
+ __field( ulong, eaddr )
+ __field( u64, vpage )
+ __field( ulong, raddr )
+ __field( int, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->host_vpn = pte->host_vpn;
+ __entry->pfn = pte->pfn;
+ __entry->eaddr = pte->pte.eaddr;
+ __entry->vpage = pte->pte.vpage;
+ __entry->raddr = pte->pte.raddr;
+ __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
+ (pte->pte.may_write ? 0x2 : 0) |
+ (pte->pte.may_execute ? 0x1 : 0);
+ ),
+
+ TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
+ __entry->host_vpn, __entry->pfn, __entry->eaddr,
+ __entry->vpage, __entry->raddr, __entry->flags)
+);
+
+TRACE_EVENT(kvm_book3s_mmu_flush,
+ TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
+ unsigned long long p2),
+ TP_ARGS(type, vcpu, p1, p2),
+
+ TP_STRUCT__entry(
+ __field( int, count )
+ __field( unsigned long long, p1 )
+ __field( unsigned long long, p2 )
+ __field( const char *, type )
+ ),
+
+ TP_fast_assign(
+ __entry->count = to_book3s(vcpu)->hpte_cache_count;
+ __entry->p1 = p1;
+ __entry->p2 = p2;
+ __entry->type = type;
+ ),
+
+ TP_printk("Flush %d %sPTEs: %llx - %llx",
+ __entry->count, __entry->type, __entry->p1, __entry->p2)
+);
+
+TRACE_EVENT(kvm_book3s_slb_found,
+ TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
+ TP_ARGS(gvsid, hvsid),
+
+ TP_STRUCT__entry(
+ __field( unsigned long long, gvsid )
+ __field( unsigned long long, hvsid )
+ ),
+
+ TP_fast_assign(
+ __entry->gvsid = gvsid;
+ __entry->hvsid = hvsid;
+ ),
+
+ TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
+);
+
+TRACE_EVENT(kvm_book3s_slb_fail,
+ TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
+ TP_ARGS(sid_map_mask, gvsid),
+
+ TP_STRUCT__entry(
+ __field( unsigned short, sid_map_mask )
+ __field( unsigned long long, gvsid )
+ ),
+
+ TP_fast_assign(
+ __entry->sid_map_mask = sid_map_mask;
+ __entry->gvsid = gvsid;
+ ),
+
+ TP_printk("%x/%x: %llx", __entry->sid_map_mask,
+ SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
+);
+
+TRACE_EVENT(kvm_book3s_slb_map,
+ TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
+ unsigned long long hvsid),
+ TP_ARGS(sid_map_mask, gvsid, hvsid),
+
+ TP_STRUCT__entry(
+ __field( unsigned short, sid_map_mask )
+ __field( unsigned long long, guest_vsid )
+ __field( unsigned long long, host_vsid )
+ ),
+
+ TP_fast_assign(
+ __entry->sid_map_mask = sid_map_mask;
+ __entry->guest_vsid = gvsid;
+ __entry->host_vsid = hvsid;
+ ),
+
+ TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
+ __entry->guest_vsid, __entry->host_vsid)
+);
+
+TRACE_EVENT(kvm_book3s_slbmte,
+ TP_PROTO(u64 slb_vsid, u64 slb_esid),
+ TP_ARGS(slb_vsid, slb_esid),
+
+ TP_STRUCT__entry(
+ __field( u64, slb_vsid )
+ __field( u64, slb_esid )
+ ),
+
+ TP_fast_assign(
+ __entry->slb_vsid = slb_vsid;
+ __entry->slb_esid = slb_esid;
+ ),
+
+ TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
+);
+
+TRACE_EVENT(kvm_exit,
+ TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
+ TP_ARGS(exit_nr, vcpu),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, exit_nr )
+ __field( unsigned long, pc )
+ __field( unsigned long, msr )
+ __field( unsigned long, dar )
+ __field( unsigned long, srr1 )
+ __field( unsigned long, last_inst )
+ ),
+
+ TP_fast_assign(
+ __entry->exit_nr = exit_nr;
+ __entry->pc = kvmppc_get_pc(vcpu);
+ __entry->dar = kvmppc_get_fault_dar(vcpu);
+ __entry->msr = vcpu->arch.shared->msr;
+ __entry->srr1 = vcpu->arch.shadow_srr1;
+ __entry->last_inst = vcpu->arch.last_inst;
+ ),
+
+ TP_printk("exit=%s"
+ " | pc=0x%lx"
+ " | msr=0x%lx"
+ " | dar=0x%lx"
+ " | srr1=0x%lx"
+ " | last_inst=0x%lx"
+ ,
+ __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
+ __entry->pc,
+ __entry->msr,
+ __entry->dar,
+ __entry->srr1,
+ __entry->last_inst
+ )
+);
+
+TRACE_EVENT(kvm_unmap_hva,
+ TP_PROTO(unsigned long hva),
+ TP_ARGS(hva),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, hva )
+ ),
+
+ TP_fast_assign(
+ __entry->hva = hva;
+ ),
+
+ TP_printk("unmap hva 0x%lx\n", __entry->hva)
+);
+
+#endif /* _TRACE_KVM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 450433276699..95a20e17dbff 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -10,15 +10,23 @@ CFLAGS_REMOVE_code-patching.o = -pg
CFLAGS_REMOVE_feature-fixups.o = -pg
obj-y := string.o alloc.o \
- checksum_$(CONFIG_WORD_SIZE).o crtsavres.o
+ crtsavres.o
obj-$(CONFIG_PPC32) += div64.o copy_32.o
obj-$(CONFIG_HAS_IOMEM) += devres.o
obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
- memcpy_64.o usercopy_64.o mem_64.o string.o \
- checksum_wrappers_64.o hweight_64.o \
- copyuser_power7.o string_64.o copypage_power7.o \
- memcpy_power7.o
+ usercopy_64.o mem_64.o string.o \
+ hweight_64.o \
+ copyuser_power7.o string_64.o copypage_power7.o
+ifeq ($(CONFIG_GENERIC_CSUM),)
+obj-y += checksum_$(CONFIG_WORD_SIZE).o
+obj-$(CONFIG_PPC64) += checksum_wrappers_64.o
+endif
+
+ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),)
+obj-$(CONFIG_PPC64) += memcpy_power7.o memcpy_64.o
+endif
+
obj-$(CONFIG_PPC_EMULATE_SSTEP) += sstep.o ldstfp.o
ifeq ($(CONFIG_PPC64),y)
@@ -31,3 +39,6 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
obj-y += code-patching.o
obj-y += feature-fixups.o
obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o
+
+obj-$(CONFIG_ALTIVEC) += xor_vmx.o
+CFLAGS_xor_vmx.o += -maltivec -mabi=altivec
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index d1f11795a7ad..e8e9c36dc784 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -19,6 +19,14 @@
*/
#include <asm/ppc_asm.h>
+#ifdef __BIG_ENDIAN__
+#define LVS(VRT,RA,RB) lvsl VRT,RA,RB
+#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC
+#else
+#define LVS(VRT,RA,RB) lvsr VRT,RA,RB
+#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRB,VRA,VRC
+#endif
+
.macro err1
100:
.section __ex_table,"a"
@@ -552,13 +560,13 @@ err3; stw r7,4(r3)
li r10,32
li r11,48
- lvsl vr16,0,r4 /* Setup permute control vector */
+ LVS(vr16,0,r4) /* Setup permute control vector */
err3; lvx vr0,0,r4
addi r4,r4,16
bf cr7*4+3,5f
err3; lvx vr1,r0,r4
- vperm vr8,vr0,vr1,vr16
+ VPERM(vr8,vr0,vr1,vr16)
addi r4,r4,16
err3; stvx vr8,r0,r3
addi r3,r3,16
@@ -566,9 +574,9 @@ err3; stvx vr8,r0,r3
5: bf cr7*4+2,6f
err3; lvx vr1,r0,r4
- vperm vr8,vr0,vr1,vr16
+ VPERM(vr8,vr0,vr1,vr16)
err3; lvx vr0,r4,r9
- vperm vr9,vr1,vr0,vr16
+ VPERM(vr9,vr1,vr0,vr16)
addi r4,r4,32
err3; stvx vr8,r0,r3
err3; stvx vr9,r3,r9
@@ -576,13 +584,13 @@ err3; stvx vr9,r3,r9
6: bf cr7*4+1,7f
err3; lvx vr3,r0,r4
- vperm vr8,vr0,vr3,vr16
+ VPERM(vr8,vr0,vr3,vr16)
err3; lvx vr2,r4,r9
- vperm vr9,vr3,vr2,vr16
+ VPERM(vr9,vr3,vr2,vr16)
err3; lvx vr1,r4,r10
- vperm vr10,vr2,vr1,vr16
+ VPERM(vr10,vr2,vr1,vr16)
err3; lvx vr0,r4,r11
- vperm vr11,vr1,vr0,vr16
+ VPERM(vr11,vr1,vr0,vr16)
addi r4,r4,64
err3; stvx vr8,r0,r3
err3; stvx vr9,r3,r9
@@ -611,21 +619,21 @@ err3; stvx vr11,r3,r11
.align 5
8:
err4; lvx vr7,r0,r4
- vperm vr8,vr0,vr7,vr16
+ VPERM(vr8,vr0,vr7,vr16)
err4; lvx vr6,r4,r9
- vperm vr9,vr7,vr6,vr16
+ VPERM(vr9,vr7,vr6,vr16)
err4; lvx vr5,r4,r10
- vperm vr10,vr6,vr5,vr16
+ VPERM(vr10,vr6,vr5,vr16)
err4; lvx vr4,r4,r11
- vperm vr11,vr5,vr4,vr16
+ VPERM(vr11,vr5,vr4,vr16)
err4; lvx vr3,r4,r12
- vperm vr12,vr4,vr3,vr16
+ VPERM(vr12,vr4,vr3,vr16)
err4; lvx vr2,r4,r14
- vperm vr13,vr3,vr2,vr16
+ VPERM(vr13,vr3,vr2,vr16)
err4; lvx vr1,r4,r15
- vperm vr14,vr2,vr1,vr16
+ VPERM(vr14,vr2,vr1,vr16)
err4; lvx vr0,r4,r16
- vperm vr15,vr1,vr0,vr16
+ VPERM(vr15,vr1,vr0,vr16)
addi r4,r4,128
err4; stvx vr8,r0,r3
err4; stvx vr9,r3,r9
@@ -649,13 +657,13 @@ err4; stvx vr15,r3,r16
bf cr7*4+1,9f
err3; lvx vr3,r0,r4
- vperm vr8,vr0,vr3,vr16
+ VPERM(vr8,vr0,vr3,vr16)
err3; lvx vr2,r4,r9
- vperm vr9,vr3,vr2,vr16
+ VPERM(vr9,vr3,vr2,vr16)
err3; lvx vr1,r4,r10
- vperm vr10,vr2,vr1,vr16
+ VPERM(vr10,vr2,vr1,vr16)
err3; lvx vr0,r4,r11
- vperm vr11,vr1,vr0,vr16
+ VPERM(vr11,vr1,vr0,vr16)
addi r4,r4,64
err3; stvx vr8,r0,r3
err3; stvx vr9,r3,r9
@@ -665,9 +673,9 @@ err3; stvx vr11,r3,r11
9: bf cr7*4+2,10f
err3; lvx vr1,r0,r4
- vperm vr8,vr0,vr1,vr16
+ VPERM(vr8,vr0,vr1,vr16)
err3; lvx vr0,r4,r9
- vperm vr9,vr1,vr0,vr16
+ VPERM(vr9,vr1,vr0,vr16)
addi r4,r4,32
err3; stvx vr8,r0,r3
err3; stvx vr9,r3,r9
@@ -675,7 +683,7 @@ err3; stvx vr9,r3,r9
10: bf cr7*4+3,11f
err3; lvx vr1,r0,r4
- vperm vr8,vr0,vr1,vr16
+ VPERM(vr8,vr0,vr1,vr16)
addi r4,r4,16
err3; stvx vr8,r0,r3
addi r3,r3,16
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index 0663630baf3b..e4177dbea6bd 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -20,6 +20,15 @@
#include <asm/ppc_asm.h>
_GLOBAL(memcpy_power7)
+
+#ifdef __BIG_ENDIAN__
+#define LVS(VRT,RA,RB) lvsl VRT,RA,RB
+#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC
+#else
+#define LVS(VRT,RA,RB) lvsr VRT,RA,RB
+#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRB,VRA,VRC
+#endif
+
#ifdef CONFIG_ALTIVEC
cmpldi r5,16
cmpldi cr1,r5,4096
@@ -485,13 +494,13 @@ _GLOBAL(memcpy_power7)
li r10,32
li r11,48
- lvsl vr16,0,r4 /* Setup permute control vector */
+ LVS(vr16,0,r4) /* Setup permute control vector */
lvx vr0,0,r4
addi r4,r4,16
bf cr7*4+3,5f
lvx vr1,r0,r4
- vperm vr8,vr0,vr1,vr16
+ VPERM(vr8,vr0,vr1,vr16)
addi r4,r4,16
stvx vr8,r0,r3
addi r3,r3,16
@@ -499,9 +508,9 @@ _GLOBAL(memcpy_power7)
5: bf cr7*4+2,6f
lvx vr1,r0,r4
- vperm vr8,vr0,vr1,vr16
+ VPERM(vr8,vr0,vr1,vr16)
lvx vr0,r4,r9
- vperm vr9,vr1,vr0,vr16
+ VPERM(vr9,vr1,vr0,vr16)
addi r4,r4,32
stvx vr8,r0,r3
stvx vr9,r3,r9
@@ -509,13 +518,13 @@ _GLOBAL(memcpy_power7)
6: bf cr7*4+1,7f
lvx vr3,r0,r4
- vperm vr8,vr0,vr3,vr16
+ VPERM(vr8,vr0,vr3,vr16)
lvx vr2,r4,r9
- vperm vr9,vr3,vr2,vr16
+ VPERM(vr9,vr3,vr2,vr16)
lvx vr1,r4,r10
- vperm vr10,vr2,vr1,vr16
+ VPERM(vr10,vr2,vr1,vr16)
lvx vr0,r4,r11
- vperm vr11,vr1,vr0,vr16
+ VPERM(vr11,vr1,vr0,vr16)
addi r4,r4,64
stvx vr8,r0,r3
stvx vr9,r3,r9
@@ -544,21 +553,21 @@ _GLOBAL(memcpy_power7)
.align 5
8:
lvx vr7,r0,r4
- vperm vr8,vr0,vr7,vr16
+ VPERM(vr8,vr0,vr7,vr16)
lvx vr6,r4,r9
- vperm vr9,vr7,vr6,vr16
+ VPERM(vr9,vr7,vr6,vr16)
lvx vr5,r4,r10
- vperm vr10,vr6,vr5,vr16
+ VPERM(vr10,vr6,vr5,vr16)
lvx vr4,r4,r11
- vperm vr11,vr5,vr4,vr16
+ VPERM(vr11,vr5,vr4,vr16)
lvx vr3,r4,r12
- vperm vr12,vr4,vr3,vr16
+ VPERM(vr12,vr4,vr3,vr16)
lvx vr2,r4,r14
- vperm vr13,vr3,vr2,vr16
+ VPERM(vr13,vr3,vr2,vr16)
lvx vr1,r4,r15
- vperm vr14,vr2,vr1,vr16
+ VPERM(vr14,vr2,vr1,vr16)
lvx vr0,r4,r16
- vperm vr15,vr1,vr0,vr16
+ VPERM(vr15,vr1,vr0,vr16)
addi r4,r4,128
stvx vr8,r0,r3
stvx vr9,r3,r9
@@ -582,13 +591,13 @@ _GLOBAL(memcpy_power7)
bf cr7*4+1,9f
lvx vr3,r0,r4
- vperm vr8,vr0,vr3,vr16
+ VPERM(vr8,vr0,vr3,vr16)
lvx vr2,r4,r9
- vperm vr9,vr3,vr2,vr16
+ VPERM(vr9,vr3,vr2,vr16)
lvx vr1,r4,r10
- vperm vr10,vr2,vr1,vr16
+ VPERM(vr10,vr2,vr1,vr16)
lvx vr0,r4,r11
- vperm vr11,vr1,vr0,vr16
+ VPERM(vr11,vr1,vr0,vr16)
addi r4,r4,64
stvx vr8,r0,r3
stvx vr9,r3,r9
@@ -598,9 +607,9 @@ _GLOBAL(memcpy_power7)
9: bf cr7*4+2,10f
lvx vr1,r0,r4
- vperm vr8,vr0,vr1,vr16
+ VPERM(vr8,vr0,vr1,vr16)
lvx vr0,r4,r9
- vperm vr9,vr1,vr0,vr16
+ VPERM(vr9,vr1,vr0,vr16)
addi r4,r4,32
stvx vr8,r0,r3
stvx vr9,r3,r9
@@ -608,7 +617,7 @@ _GLOBAL(memcpy_power7)
10: bf cr7*4+3,11f
lvx vr1,r0,r4
- vperm vr8,vr0,vr1,vr16
+ VPERM(vr8,vr0,vr1,vr16)
addi r4,r4,16
stvx vr8,r0,r3
addi r3,r3,16
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index b1faa1593c90..c0511c27a733 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -212,11 +212,19 @@ static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
{
int err;
unsigned long x, b, c;
+#ifdef __LITTLE_ENDIAN__
+ int len = nb; /* save a copy of the length for byte reversal */
+#endif
/* unaligned, do this in pieces */
x = 0;
for (; nb > 0; nb -= c) {
+#ifdef __LITTLE_ENDIAN__
+ c = 1;
+#endif
+#ifdef __BIG_ENDIAN__
c = max_align(ea);
+#endif
if (c > nb)
c = max_align(nb);
err = read_mem_aligned(&b, ea, c);
@@ -225,7 +233,24 @@ static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
x = (x << (8 * c)) + b;
ea += c;
}
+#ifdef __LITTLE_ENDIAN__
+ switch (len) {
+ case 2:
+ *dest = byterev_2(x);
+ break;
+ case 4:
+ *dest = byterev_4(x);
+ break;
+#ifdef __powerpc64__
+ case 8:
+ *dest = byterev_8(x);
+ break;
+#endif
+ }
+#endif
+#ifdef __BIG_ENDIAN__
*dest = x;
+#endif
return 0;
}
@@ -273,9 +298,29 @@ static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea,
int err;
unsigned long c;
+#ifdef __LITTLE_ENDIAN__
+ switch (nb) {
+ case 2:
+ val = byterev_2(val);
+ break;
+ case 4:
+ val = byterev_4(val);
+ break;
+#ifdef __powerpc64__
+ case 8:
+ val = byterev_8(val);
+ break;
+#endif
+ }
+#endif
/* unaligned or little-endian, do this in pieces */
for (; nb > 0; nb -= c) {
+#ifdef __LITTLE_ENDIAN__
+ c = 1;
+#endif
+#ifdef __BIG_ENDIAN__
c = max_align(ea);
+#endif
if (c > nb)
c = max_align(nb);
err = write_mem_aligned(val >> (nb - c) * 8, ea, c);
@@ -310,22 +355,36 @@ static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long),
struct pt_regs *regs)
{
int err;
- unsigned long val[sizeof(double) / sizeof(long)];
+ union {
+ double dbl;
+ unsigned long ul[2];
+ struct {
+#ifdef __BIG_ENDIAN__
+ unsigned _pad_;
+ unsigned word;
+#endif
+#ifdef __LITTLE_ENDIAN__
+ unsigned word;
+ unsigned _pad_;
+#endif
+ } single;
+ } data;
unsigned long ptr;
if (!address_ok(regs, ea, nb))
return -EFAULT;
if ((ea & 3) == 0)
return (*func)(rn, ea);
- ptr = (unsigned long) &val[0];
+ ptr = (unsigned long) &data.ul;
if (sizeof(unsigned long) == 8 || nb == 4) {
- err = read_mem_unaligned(&val[0], ea, nb, regs);
- ptr += sizeof(unsigned long) - nb;
+ err = read_mem_unaligned(&data.ul[0], ea, nb, regs);
+ if (nb == 4)
+ ptr = (unsigned long)&(data.single.word);
} else {
/* reading a double on 32-bit */
- err = read_mem_unaligned(&val[0], ea, 4, regs);
+ err = read_mem_unaligned(&data.ul[0], ea, 4, regs);
if (!err)
- err = read_mem_unaligned(&val[1], ea + 4, 4, regs);
+ err = read_mem_unaligned(&data.ul[1], ea + 4, 4, regs);
}
if (err)
return err;
@@ -337,28 +396,42 @@ static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long),
struct pt_regs *regs)
{
int err;
- unsigned long val[sizeof(double) / sizeof(long)];
+ union {
+ double dbl;
+ unsigned long ul[2];
+ struct {
+#ifdef __BIG_ENDIAN__
+ unsigned _pad_;
+ unsigned word;
+#endif
+#ifdef __LITTLE_ENDIAN__
+ unsigned word;
+ unsigned _pad_;
+#endif
+ } single;
+ } data;
unsigned long ptr;
if (!address_ok(regs, ea, nb))
return -EFAULT;
if ((ea & 3) == 0)
return (*func)(rn, ea);
- ptr = (unsigned long) &val[0];
+ ptr = (unsigned long) &data.ul[0];
if (sizeof(unsigned long) == 8 || nb == 4) {
- ptr += sizeof(unsigned long) - nb;
+ if (nb == 4)
+ ptr = (unsigned long)&(data.single.word);
err = (*func)(rn, ptr);
if (err)
return err;
- err = write_mem_unaligned(val[0], ea, nb, regs);
+ err = write_mem_unaligned(data.ul[0], ea, nb, regs);
} else {
/* writing a double on 32-bit */
err = (*func)(rn, ptr);
if (err)
return err;
- err = write_mem_unaligned(val[0], ea, 4, regs);
+ err = write_mem_unaligned(data.ul[0], ea, 4, regs);
if (!err)
- err = write_mem_unaligned(val[1], ea + 4, 4, regs);
+ err = write_mem_unaligned(data.ul[1], ea + 4, 4, regs);
}
return err;
}
diff --git a/arch/powerpc/lib/xor_vmx.c b/arch/powerpc/lib/xor_vmx.c
new file mode 100644
index 000000000000..e905f7c2ea7b
--- /dev/null
+++ b/arch/powerpc/lib/xor_vmx.c
@@ -0,0 +1,177 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2012
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+#include <altivec.h>
+
+#include <linux/preempt.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <asm/switch_to.h>
+
+typedef vector signed char unative_t;
+
+#define DEFINE(V) \
+ unative_t *V = (unative_t *)V##_in; \
+ unative_t V##_0, V##_1, V##_2, V##_3
+
+#define LOAD(V) \
+ do { \
+ V##_0 = V[0]; \
+ V##_1 = V[1]; \
+ V##_2 = V[2]; \
+ V##_3 = V[3]; \
+ } while (0)
+
+#define STORE(V) \
+ do { \
+ V[0] = V##_0; \
+ V[1] = V##_1; \
+ V[2] = V##_2; \
+ V[3] = V##_3; \
+ } while (0)
+
+#define XOR(V1, V2) \
+ do { \
+ V1##_0 = vec_xor(V1##_0, V2##_0); \
+ V1##_1 = vec_xor(V1##_1, V2##_1); \
+ V1##_2 = vec_xor(V1##_2, V2##_2); \
+ V1##_3 = vec_xor(V1##_3, V2##_3); \
+ } while (0)
+
+void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in)
+{
+ DEFINE(v1);
+ DEFINE(v2);
+ unsigned long lines = bytes / (sizeof(unative_t)) / 4;
+
+ preempt_disable();
+ enable_kernel_altivec();
+
+ do {
+ LOAD(v1);
+ LOAD(v2);
+ XOR(v1, v2);
+ STORE(v1);
+
+ v1 += 4;
+ v2 += 4;
+ } while (--lines > 0);
+
+ preempt_enable();
+}
+EXPORT_SYMBOL(xor_altivec_2);
+
+void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in)
+{
+ DEFINE(v1);
+ DEFINE(v2);
+ DEFINE(v3);
+ unsigned long lines = bytes / (sizeof(unative_t)) / 4;
+
+ preempt_disable();
+ enable_kernel_altivec();
+
+ do {
+ LOAD(v1);
+ LOAD(v2);
+ LOAD(v3);
+ XOR(v1, v2);
+ XOR(v1, v3);
+ STORE(v1);
+
+ v1 += 4;
+ v2 += 4;
+ v3 += 4;
+ } while (--lines > 0);
+
+ preempt_enable();
+}
+EXPORT_SYMBOL(xor_altivec_3);
+
+void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in,
+ unsigned long *v4_in)
+{
+ DEFINE(v1);
+ DEFINE(v2);
+ DEFINE(v3);
+ DEFINE(v4);
+ unsigned long lines = bytes / (sizeof(unative_t)) / 4;
+
+ preempt_disable();
+ enable_kernel_altivec();
+
+ do {
+ LOAD(v1);
+ LOAD(v2);
+ LOAD(v3);
+ LOAD(v4);
+ XOR(v1, v2);
+ XOR(v3, v4);
+ XOR(v1, v3);
+ STORE(v1);
+
+ v1 += 4;
+ v2 += 4;
+ v3 += 4;
+ v4 += 4;
+ } while (--lines > 0);
+
+ preempt_enable();
+}
+EXPORT_SYMBOL(xor_altivec_4);
+
+void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in,
+ unsigned long *v4_in, unsigned long *v5_in)
+{
+ DEFINE(v1);
+ DEFINE(v2);
+ DEFINE(v3);
+ DEFINE(v4);
+ DEFINE(v5);
+ unsigned long lines = bytes / (sizeof(unative_t)) / 4;
+
+ preempt_disable();
+ enable_kernel_altivec();
+
+ do {
+ LOAD(v1);
+ LOAD(v2);
+ LOAD(v3);
+ LOAD(v4);
+ LOAD(v5);
+ XOR(v1, v2);
+ XOR(v3, v4);
+ XOR(v1, v5);
+ XOR(v1, v3);
+ STORE(v1);
+
+ v1 += 4;
+ v2 += 4;
+ v3 += 4;
+ v4 += 4;
+ v5 += 4;
+ } while (--lines > 0);
+
+ preempt_enable();
+}
+EXPORT_SYMBOL(xor_altivec_5);
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index c33d939120c9..3ea26c25590b 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -35,7 +35,11 @@
#define DBG_LOW(fmt...)
#endif
+#ifdef __BIG_ENDIAN__
#define HPTE_LOCK_BIT 3
+#else
+#define HPTE_LOCK_BIT (56+3)
+#endif
DEFINE_RAW_SPINLOCK(native_tlbie_lock);
@@ -172,7 +176,7 @@ static inline void tlbie(unsigned long vpn, int psize, int apsize,
static inline void native_lock_hpte(struct hash_pte *hptep)
{
- unsigned long *word = &hptep->v;
+ unsigned long *word = (unsigned long *)&hptep->v;
while (1) {
if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
@@ -184,7 +188,7 @@ static inline void native_lock_hpte(struct hash_pte *hptep)
static inline void native_unlock_hpte(struct hash_pte *hptep)
{
- unsigned long *word = &hptep->v;
+ unsigned long *word = (unsigned long *)&hptep->v;
clear_bit_unlock(HPTE_LOCK_BIT, word);
}
@@ -204,10 +208,10 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
}
for (i = 0; i < HPTES_PER_GROUP; i++) {
- if (! (hptep->v & HPTE_V_VALID)) {
+ if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
/* retry with lock held */
native_lock_hpte(hptep);
- if (! (hptep->v & HPTE_V_VALID))
+ if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
break;
native_unlock_hpte(hptep);
}
@@ -226,14 +230,14 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
i, hpte_v, hpte_r);
}
- hptep->r = hpte_r;
+ hptep->r = cpu_to_be64(hpte_r);
/* Guarantee the second dword is visible before the valid bit */
eieio();
/*
* Now set the first dword including the valid bit
* NOTE: this also unlocks the hpte
*/
- hptep->v = hpte_v;
+ hptep->v = cpu_to_be64(hpte_v);
__asm__ __volatile__ ("ptesync" : : : "memory");
@@ -254,12 +258,12 @@ static long native_hpte_remove(unsigned long hpte_group)
for (i = 0; i < HPTES_PER_GROUP; i++) {
hptep = htab_address + hpte_group + slot_offset;
- hpte_v = hptep->v;
+ hpte_v = be64_to_cpu(hptep->v);
if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
/* retry with lock held */
native_lock_hpte(hptep);
- hpte_v = hptep->v;
+ hpte_v = be64_to_cpu(hptep->v);
if ((hpte_v & HPTE_V_VALID)
&& !(hpte_v & HPTE_V_BOLTED))
break;
@@ -294,7 +298,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
native_lock_hpte(hptep);
- hpte_v = hptep->v;
+ hpte_v = be64_to_cpu(hptep->v);
/*
* We need to invalidate the TLB always because hpte_remove doesn't do
* a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
@@ -308,8 +312,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
} else {
DBG_LOW(" -> hit\n");
/* Update the HPTE */
- hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
- (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
+ hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & ~(HPTE_R_PP | HPTE_R_N)) |
+ (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)));
}
native_unlock_hpte(hptep);
@@ -334,7 +338,7 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
for (i = 0; i < HPTES_PER_GROUP; i++) {
hptep = htab_address + slot;
- hpte_v = hptep->v;
+ hpte_v = be64_to_cpu(hptep->v);
if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
/* HPTE matches */
@@ -369,8 +373,9 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
hptep = htab_address + slot;
/* Update the HPTE */
- hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
- (newpp & (HPTE_R_PP | HPTE_R_N));
+ hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
+ ~(HPTE_R_PP | HPTE_R_N)) |
+ (newpp & (HPTE_R_PP | HPTE_R_N)));
/*
* Ensure it is out of the tlb too. Bolted entries base and
* actual page size will be same.
@@ -392,7 +397,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
want_v = hpte_encode_avpn(vpn, bpsize, ssize);
native_lock_hpte(hptep);
- hpte_v = hptep->v;
+ hpte_v = be64_to_cpu(hptep->v);
/*
* We need to invalidate the TLB always because hpte_remove doesn't do
@@ -458,7 +463,7 @@ static void native_hugepage_invalidate(struct mm_struct *mm,
hptep = htab_address + slot;
want_v = hpte_encode_avpn(vpn, psize, ssize);
native_lock_hpte(hptep);
- hpte_v = hptep->v;
+ hpte_v = be64_to_cpu(hptep->v);
/* Even if we miss, we need to invalidate the TLB */
if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
@@ -519,11 +524,12 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
int *psize, int *apsize, int *ssize, unsigned long *vpn)
{
unsigned long avpn, pteg, vpi;
- unsigned long hpte_v = hpte->v;
+ unsigned long hpte_v = be64_to_cpu(hpte->v);
+ unsigned long hpte_r = be64_to_cpu(hpte->r);
unsigned long vsid, seg_off;
int size, a_size, shift;
/* Look at the 8 bit LP value */
- unsigned int lp = (hpte->r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
+ unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
if (!(hpte_v & HPTE_V_LARGE)) {
size = MMU_PAGE_4K;
@@ -612,7 +618,7 @@ static void native_hpte_clear(void)
* running, right? and for crash dump, we probably
* don't want to wait for a maybe bad cpu.
*/
- hpte_v = hptep->v;
+ hpte_v = be64_to_cpu(hptep->v);
/*
* Call __tlbie() here rather than tlbie() since we
@@ -664,7 +670,7 @@ static void native_flush_hash_range(unsigned long number, int local)
hptep = htab_address + slot;
want_v = hpte_encode_avpn(vpn, psize, ssize);
native_lock_hpte(hptep);
- hpte_v = hptep->v;
+ hpte_v = be64_to_cpu(hptep->v);
if (!HPTE_V_COMPARE(hpte_v, want_v) ||
!(hpte_v & HPTE_V_VALID))
native_unlock_hpte(hptep);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index bde8b5589755..6176b3cdf579 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -251,19 +251,18 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,
void *data)
{
char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- u32 *prop;
+ __be32 *prop;
unsigned long size = 0;
/* We are scanning "cpu" nodes only */
if (type == NULL || strcmp(type, "cpu") != 0)
return 0;
- prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes",
- &size);
+ prop = of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", &size);
if (prop == NULL)
return 0;
for (; size >= 4; size -= 4, ++prop) {
- if (prop[0] == 40) {
+ if (be32_to_cpu(prop[0]) == 40) {
DBG("1T segment support detected\n");
cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT;
return 1;
@@ -307,23 +306,22 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
void *data)
{
char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- u32 *prop;
+ __be32 *prop;
unsigned long size = 0;
/* We are scanning "cpu" nodes only */
if (type == NULL || strcmp(type, "cpu") != 0)
return 0;
- prop = (u32 *)of_get_flat_dt_prop(node,
- "ibm,segment-page-sizes", &size);
+ prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size);
if (prop != NULL) {
pr_info("Page sizes from device-tree:\n");
size /= 4;
cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE);
while(size > 0) {
- unsigned int base_shift = prop[0];
- unsigned int slbenc = prop[1];
- unsigned int lpnum = prop[2];
+ unsigned int base_shift = be32_to_cpu(prop[0]);
+ unsigned int slbenc = be32_to_cpu(prop[1]);
+ unsigned int lpnum = be32_to_cpu(prop[2]);
struct mmu_psize_def *def;
int idx, base_idx;
@@ -356,8 +354,8 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
def->tlbiel = 0;
while (size > 0 && lpnum) {
- unsigned int shift = prop[0];
- int penc = prop[1];
+ unsigned int shift = be32_to_cpu(prop[0]);
+ int penc = be32_to_cpu(prop[1]);
prop += 2; size -= 2;
lpnum--;
@@ -390,8 +388,8 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
const char *uname, int depth,
void *data) {
char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- unsigned long *addr_prop;
- u32 *page_count_prop;
+ __be64 *addr_prop;
+ __be32 *page_count_prop;
unsigned int expected_pages;
long unsigned int phys_addr;
long unsigned int block_size;
@@ -405,12 +403,12 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
if (page_count_prop == NULL)
return 0;
- expected_pages = (1 << page_count_prop[0]);
+ expected_pages = (1 << be32_to_cpu(page_count_prop[0]));
addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
if (addr_prop == NULL)
return 0;
- phys_addr = addr_prop[0];
- block_size = addr_prop[1];
+ phys_addr = be64_to_cpu(addr_prop[0]);
+ block_size = be64_to_cpu(addr_prop[1]);
if (block_size != (16 * GB))
return 0;
printk(KERN_INFO "Huge page(16GB) memory: "
@@ -534,16 +532,16 @@ static int __init htab_dt_scan_pftsize(unsigned long node,
void *data)
{
char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- u32 *prop;
+ __be32 *prop;
/* We are scanning "cpu" nodes only */
if (type == NULL || strcmp(type, "cpu") != 0)
return 0;
- prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
+ prop = of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
if (prop != NULL) {
/* pft_size[0] is the NUMA CEC cookie */
- ppc64_pft_size = prop[1];
+ ppc64_pft_size = be32_to_cpu(prop[1]);
return 1;
}
return 0;
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index d47d3dab4870..cff59f1bec23 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -213,7 +213,12 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
*/
BUG_ON(first_memblock_base != 0);
+#ifdef CONFIG_PIN_TLB
+ /* 8xx can only access 24MB at the moment */
+ memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01800000));
+#else
/* 8xx can only access 8MB at the moment */
memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000));
+#endif
}
#endif /* CONFIG_8xx */
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 8ed035d2edb5..e3734edffa69 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -304,5 +304,54 @@ void register_page_bootmem_memmap(unsigned long section_nr,
struct page *start_page, unsigned long size)
{
}
-#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+/*
+ * We do not have access to the sparsemem vmemmap, so we fallback to
+ * walking the list of sparsemem blocks which we already maintain for
+ * the sake of crashdump. In the long run, we might want to maintain
+ * a tree if performance of that linear walk becomes a problem.
+ *
+ * realmode_pfn_to_page functions can fail due to:
+ * 1) As real sparsemem blocks do not lay in RAM continously (they
+ * are in virtual address space which is not available in the real mode),
+ * the requested page struct can be split between blocks so get_page/put_page
+ * may fail.
+ * 2) When huge pages are used, the get_page/put_page API will fail
+ * in real mode as the linked addresses in the page struct are virtual
+ * too.
+ */
+struct page *realmode_pfn_to_page(unsigned long pfn)
+{
+ struct vmemmap_backing *vmem_back;
+ struct page *page;
+ unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
+ unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
+
+ for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
+ if (pg_va < vmem_back->virt_addr)
+ continue;
+
+ /* Check that page struct is not split between real pages */
+ if ((pg_va + sizeof(struct page)) >
+ (vmem_back->virt_addr + page_size))
+ return NULL;
+
+ page = (struct page *) (vmem_back->phys + pg_va -
+ vmem_back->virt_addr);
+ return page;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
+
+#elif defined(CONFIG_FLATMEM)
+
+struct page *realmode_pfn_to_page(unsigned long pfn)
+{
+ struct page *page = pfn_to_page(pfn);
+ return page;
+}
+EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
+
+#endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index c916127f10c3..33d67844062c 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -195,7 +195,7 @@ static const __be32 *of_get_usable_memory(struct device_node *memory)
u32 len;
prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
if (!prop || len < sizeof(unsigned int))
- return 0;
+ return NULL;
return prop;
}
@@ -1154,7 +1154,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
* represented in the device tree as a node (i.e. memory@XXXX) for
* each memblock.
*/
-int hot_add_node_scn_to_nid(unsigned long scn_addr)
+static int hot_add_node_scn_to_nid(unsigned long scn_addr)
{
struct device_node *memory;
int nid = -1;
@@ -1235,7 +1235,7 @@ static u64 hot_add_drconf_memory_max(void)
struct device_node *memory = NULL;
unsigned int drconf_cell_cnt = 0;
u64 lmb_size = 0;
- const __be32 *dm = 0;
+ const __be32 *dm = NULL;
memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
if (memory) {
@@ -1535,7 +1535,7 @@ static void topology_work_fn(struct work_struct *work)
}
static DECLARE_WORK(topology_work, topology_work_fn);
-void topology_schedule_update(void)
+static void topology_schedule_update(void)
{
schedule_work(&topology_work);
}
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index edda589795c3..841e0d00863c 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -32,8 +32,6 @@
#include <asm/tlbflush.h>
#include <asm/tlb.h>
-#include "mmu_decl.h"
-
static inline int is_exec_fault(void)
{
return current->thread.regs && TRAP(current->thread.regs) == 0x400;
@@ -72,7 +70,7 @@ struct page * maybe_pte_to_page(pte_t pte)
* support falls into the same category.
*/
-static pte_t set_pte_filter(pte_t pte, unsigned long addr)
+static pte_t set_pte_filter(pte_t pte)
{
pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
@@ -81,17 +79,6 @@ static pte_t set_pte_filter(pte_t pte, unsigned long addr)
if (!pg)
return pte;
if (!test_bit(PG_arch_1, &pg->flags)) {
-#ifdef CONFIG_8xx
- /* On 8xx, cache control instructions (particularly
- * "dcbst" from flush_dcache_icache) fault as write
- * operation if there is an unpopulated TLB entry
- * for the address in question. To workaround that,
- * we invalidate the TLB here, thus avoiding dcbst
- * misbehaviour.
- */
- /* 8xx doesn't care about PID, size or ind args */
- _tlbil_va(addr, 0, 0, 0);
-#endif /* CONFIG_8xx */
flush_dcache_icache_page(pg);
set_bit(PG_arch_1, &pg->flags);
}
@@ -111,7 +98,7 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
* as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
* instead we "filter out" the exec permission for non clean pages.
*/
-static pte_t set_pte_filter(pte_t pte, unsigned long addr)
+static pte_t set_pte_filter(pte_t pte)
{
struct page *pg;
@@ -193,7 +180,7 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
* this context might not have been activated yet when this
* is called.
*/
- pte = set_pte_filter(pte, addr);
+ pte = set_pte_filter(pte);
/* Perform the setting of the PTE */
__set_pte_at(mm, addr, ptep, pte, 0);
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 8a5dfaf5c6b7..9aee27c582dc 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -39,6 +39,7 @@
#define r_X 5
#define r_addr 6
#define r_scratch1 7
+#define r_scratch2 8
#define r_D 14
#define r_HL 15
#define r_M 16
@@ -92,6 +93,8 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
___PPC_RA(base) | IMM_L(i))
#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \
___PPC_RA(base) | IMM_L(i))
+#define PPC_LHBRX(r, base, b) EMIT(PPC_INST_LHBRX | ___PPC_RT(r) | \
+ ___PPC_RA(base) | ___PPC_RB(b))
/* Convenience helpers for the above with 'far' offsets: */
#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \
else { PPC_ADDIS(r, base, IMM_HA(i)); \
@@ -186,6 +189,14 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \
} } while (0);
+#define PPC_LHBRX_OFFS(r, base, i) \
+ do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0)
+#ifdef __LITTLE_ENDIAN__
+#define PPC_NTOHS_OFFS(r, base, i) PPC_LHBRX_OFFS(r, base, i)
+#else
+#define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i)
+#endif
+
static inline bool is_nearbranch(int offset)
{
return (offset < 32768) && (offset >= -32768);
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S
index 7d3a3b5619a2..e76eba74d9da 100644
--- a/arch/powerpc/net/bpf_jit_64.S
+++ b/arch/powerpc/net/bpf_jit_64.S
@@ -43,8 +43,11 @@ sk_load_word_positive_offset:
cmpd r_scratch1, r_addr
blt bpf_slow_path_word
/* Nope, just hitting the header. cr0 here is eq or gt! */
+#ifdef __LITTLE_ENDIAN__
+ lwbrx r_A, r_D, r_addr
+#else
lwzx r_A, r_D, r_addr
- /* When big endian we don't need to byteswap. */
+#endif
blr /* Return success, cr0 != LT */
.globl sk_load_half
@@ -56,7 +59,11 @@ sk_load_half_positive_offset:
subi r_scratch1, r_HL, 2
cmpd r_scratch1, r_addr
blt bpf_slow_path_half
+#ifdef __LITTLE_ENDIAN__
+ lhbrx r_A, r_D, r_addr
+#else
lhzx r_A, r_D, r_addr
+#endif
blr
.globl sk_load_byte
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index bf56e33f8257..ac3c2a10dafd 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -17,14 +17,8 @@
#include "bpf_jit.h"
-#ifndef __BIG_ENDIAN
-/* There are endianness assumptions herein. */
-#error "Little-endian PPC not supported in BPF compiler"
-#endif
-
int bpf_jit_enable __read_mostly;
-
static inline void bpf_flush_icache(void *start, void *end)
{
smp_wmb();
@@ -193,6 +187,26 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
PPC_MUL(r_A, r_A, r_scratch1);
}
break;
+ case BPF_S_ALU_MOD_X: /* A %= X; */
+ ctx->seen |= SEEN_XREG;
+ PPC_CMPWI(r_X, 0);
+ if (ctx->pc_ret0 != -1) {
+ PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
+ } else {
+ PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
+ PPC_LI(r_ret, 0);
+ PPC_JMP(exit_addr);
+ }
+ PPC_DIVWU(r_scratch1, r_A, r_X);
+ PPC_MUL(r_scratch1, r_X, r_scratch1);
+ PPC_SUB(r_A, r_A, r_scratch1);
+ break;
+ case BPF_S_ALU_MOD_K: /* A %= K; */
+ PPC_LI32(r_scratch2, K);
+ PPC_DIVWU(r_scratch1, r_A, r_scratch2);
+ PPC_MUL(r_scratch1, r_scratch2, r_scratch1);
+ PPC_SUB(r_A, r_A, r_scratch1);
+ break;
case BPF_S_ALU_DIV_X: /* A /= X; */
ctx->seen |= SEEN_XREG;
PPC_CMPWI(r_X, 0);
@@ -346,18 +360,11 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
break;
/*** Ancillary info loads ***/
-
- /* None of the BPF_S_ANC* codes appear to be passed by
- * sk_chk_filter(). The interpreter and the x86 BPF
- * compiler implement them so we do too -- they may be
- * planted in future.
- */
case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
protocol) != 2);
- PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
- protocol));
- /* ntohs is a NOP with BE loads. */
+ PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
+ protocol));
break;
case BPF_S_ANC_IFINDEX:
PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
@@ -691,4 +698,5 @@ void bpf_jit_free(struct sk_filter *fp)
{
if (fp->bpf_func != sk_run_filter)
module_free(NULL, fp->bpf_func);
+ kfree(fp);
}
diff --git a/arch/powerpc/platforms/512x/clock.c b/arch/powerpc/platforms/512x/clock.c
index e504166e089a..fd8a37653417 100644
--- a/arch/powerpc/platforms/512x/clock.c
+++ b/arch/powerpc/platforms/512x/clock.c
@@ -24,6 +24,7 @@
#include <linux/mutex.h>
#include <linux/io.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/mpc5xxx.h>
#include <asm/mpc5121.h>
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index a82a41b4fd91..36b5652aada2 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -60,8 +60,6 @@ void mpc512x_restart(char *cmd)
;
}
-#if IS_ENABLED(CONFIG_FB_FSL_DIU)
-
struct fsl_diu_shared_fb {
u8 gamma[0x300]; /* 32-bit aligned! */
struct diu_ad ad0; /* 32-bit aligned! */
@@ -71,7 +69,7 @@ struct fsl_diu_shared_fb {
};
#define DIU_DIV_MASK 0x000000ff
-void mpc512x_set_pixel_clock(unsigned int pixclock)
+static void mpc512x_set_pixel_clock(unsigned int pixclock)
{
unsigned long bestval, bestfreq, speed, busfreq;
unsigned long minpixclock, maxpixclock, pixval;
@@ -164,7 +162,7 @@ void mpc512x_set_pixel_clock(unsigned int pixclock)
iounmap(ccm);
}
-enum fsl_diu_monitor_port
+static enum fsl_diu_monitor_port
mpc512x_valid_monitor_port(enum fsl_diu_monitor_port port)
{
return FSL_DIU_PORT_DVI;
@@ -179,7 +177,7 @@ static inline void mpc512x_free_bootmem(struct page *page)
free_reserved_page(page);
}
-void mpc512x_release_bootmem(void)
+static void mpc512x_release_bootmem(void)
{
unsigned long addr = diu_shared_fb.fb_phys & PAGE_MASK;
unsigned long size = diu_shared_fb.fb_len;
@@ -205,7 +203,7 @@ void mpc512x_release_bootmem(void)
* address range will be reserved in setup_arch() after bootmem
* allocator is up.
*/
-void __init mpc512x_init_diu(void)
+static void __init mpc512x_init_diu(void)
{
struct device_node *np;
struct diu __iomem *diu_reg;
@@ -274,7 +272,7 @@ out:
iounmap(diu_reg);
}
-void __init mpc512x_setup_diu(void)
+static void __init mpc512x_setup_diu(void)
{
int ret;
@@ -303,8 +301,6 @@ void __init mpc512x_setup_diu(void)
diu_ops.release_bootmem = mpc512x_release_bootmem;
}
-#endif
-
void __init mpc512x_init_IRQ(void)
{
struct device_node *np;
@@ -337,7 +333,7 @@ static struct of_device_id __initdata of_bus_ids[] = {
{},
};
-void __init mpc512x_declare_of_platform_devices(void)
+static void __init mpc512x_declare_of_platform_devices(void)
{
if (of_platform_bus_probe(NULL, of_bus_ids, NULL))
printk(KERN_ERR __FILE__ ": "
@@ -387,7 +383,7 @@ static unsigned int __init get_fifo_size(struct device_node *np,
((u32)(_base) + sizeof(struct mpc52xx_psc)))
/* Init PSC FIFO space for TX and RX slices */
-void __init mpc512x_psc_fifo_init(void)
+static void __init mpc512x_psc_fifo_init(void)
{
struct device_node *np;
void __iomem *psc;
diff --git a/arch/powerpc/platforms/512x/pdm360ng.c b/arch/powerpc/platforms/512x/pdm360ng.c
index 24b314d7bd5f..116f2325b20b 100644
--- a/arch/powerpc/platforms/512x/pdm360ng.c
+++ b/arch/powerpc/platforms/512x/pdm360ng.c
@@ -14,6 +14,8 @@
#include <linux/kernel.h>
#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig
index 90f4496017e4..af54174801f7 100644
--- a/arch/powerpc/platforms/52xx/Kconfig
+++ b/arch/powerpc/platforms/52xx/Kconfig
@@ -57,5 +57,5 @@ config PPC_MPC5200_BUGFIX
config PPC_MPC5200_LPBFIFO
tristate "MPC5200 LocalPlus bus FIFO driver"
- depends on PPC_MPC52xx
+ depends on PPC_MPC52xx && PPC_BESTCOMM
select PPC_BESTCOMM_GEN_BD
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index b69221ba07fd..2898b737deb7 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -340,7 +340,7 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
{
int l1irq;
int l2irq;
- struct irq_chip *irqchip;
+ struct irq_chip *uninitialized_var(irqchip);
void *hndlr;
int type;
u32 reg;
@@ -373,9 +373,8 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break;
case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break;
case MPC52xx_IRQ_L1_CRIT:
- default:
pr_warn("%s: Critical IRQ #%d is unsupported! Nopping it.\n",
- __func__, l1irq);
+ __func__, l2irq);
irq_set_chip(virq, &no_irq_chip);
return 0;
}
diff --git a/arch/powerpc/platforms/82xx/mpc8272_ads.c b/arch/powerpc/platforms/82xx/mpc8272_ads.c
index 30394b409b3f..6a14cf50f4a2 100644
--- a/arch/powerpc/platforms/82xx/mpc8272_ads.c
+++ b/arch/powerpc/platforms/82xx/mpc8272_ads.c
@@ -16,6 +16,8 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/fsl_devices.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/io.h>
diff --git a/arch/powerpc/platforms/82xx/pq2fads.c b/arch/powerpc/platforms/82xx/pq2fads.c
index e1dceeec4994..e5f82ec8df17 100644
--- a/arch/powerpc/platforms/82xx/pq2fads.c
+++ b/arch/powerpc/platforms/82xx/pq2fads.c
@@ -15,6 +15,8 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/fsl_devices.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <asm/io.h>
diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
index 7bc315822935..fd71cfdf2380 100644
--- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
+++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
@@ -204,7 +204,6 @@ static int mcu_remove(struct i2c_client *client)
ret = mcu_gpiochip_remove(mcu);
if (ret)
return ret;
- i2c_set_clientdata(client, NULL);
kfree(mcu);
return 0;
}
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 1d769a29249f..3d9716ccd327 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -20,6 +20,8 @@
#include <linux/freezer.h>
#include <linux/suspend.h>
#include <linux/fsl_devices.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/export.h>
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index de2eb9320993..4d4634958cfb 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -218,83 +218,16 @@ config GE_IMP3A
This board is a 3U CompactPCI Single Board Computer with a Freescale
P2020 processor.
-config P2041_RDB
- bool "Freescale P2041 RDB"
- select DEFAULT_UIMAGE
- select PPC_E500MC
- select PHYS_64BIT
- select SWIOTLB
- select ARCH_REQUIRE_GPIOLIB
- select GPIO_MPC8XXX
- select HAS_RAPIDIO
- select PPC_EPAPR_HV_PIC
- help
- This option enables support for the P2041 RDB board
-
-config P3041_DS
- bool "Freescale P3041 DS"
- select DEFAULT_UIMAGE
- select PPC_E500MC
- select PHYS_64BIT
- select SWIOTLB
- select ARCH_REQUIRE_GPIOLIB
- select GPIO_MPC8XXX
- select HAS_RAPIDIO
- select PPC_EPAPR_HV_PIC
- help
- This option enables support for the P3041 DS board
-
-config P4080_DS
- bool "Freescale P4080 DS"
- select DEFAULT_UIMAGE
- select PPC_E500MC
- select PHYS_64BIT
- select SWIOTLB
- select ARCH_REQUIRE_GPIOLIB
- select GPIO_MPC8XXX
- select HAS_RAPIDIO
- select PPC_EPAPR_HV_PIC
- help
- This option enables support for the P4080 DS board
-
config SGY_CTS1000
tristate "Servergy CTS-1000 support"
select GPIOLIB
select OF_GPIO
- depends on P4080_DS
+ depends on CORENET_GENERIC
help
Enable this to support functionality in Servergy's CTS-1000 systems.
endif # PPC32
-config P5020_DS
- bool "Freescale P5020 DS"
- select DEFAULT_UIMAGE
- select E500
- select PPC_E500MC
- select PHYS_64BIT
- select SWIOTLB
- select ARCH_REQUIRE_GPIOLIB
- select GPIO_MPC8XXX
- select HAS_RAPIDIO
- select PPC_EPAPR_HV_PIC
- help
- This option enables support for the P5020 DS board
-
-config P5040_DS
- bool "Freescale P5040 DS"
- select DEFAULT_UIMAGE
- select E500
- select PPC_E500MC
- select PHYS_64BIT
- select SWIOTLB
- select ARCH_REQUIRE_GPIOLIB
- select GPIO_MPC8XXX
- select HAS_RAPIDIO
- select PPC_EPAPR_HV_PIC
- help
- This option enables support for the P5040 DS board
-
config PPC_QEMU_E500
bool "QEMU generic e500 platform"
select DEFAULT_UIMAGE
@@ -310,10 +243,8 @@ config PPC_QEMU_E500
unset based on the emulated CPU (or actual host CPU in the case
of KVM).
-if PPC64
-
-config T4240_QDS
- bool "Freescale T4240 QDS"
+config CORENET_GENERIC
+ bool "Freescale CoreNet Generic"
select DEFAULT_UIMAGE
select E500
select PPC_E500MC
@@ -324,26 +255,14 @@ config T4240_QDS
select HAS_RAPIDIO
select PPC_EPAPR_HV_PIC
help
- This option enables support for the T4240 QDS board
-
-config B4_QDS
- bool "Freescale B4 QDS"
- select DEFAULT_UIMAGE
- select E500
- select PPC_E500MC
- select PHYS_64BIT
- select SWIOTLB
- select GPIOLIB
- select ARCH_REQUIRE_GPIOLIB
- select HAS_RAPIDIO
- select PPC_EPAPR_HV_PIC
- help
- This option enables support for the B4 QDS board
- The B4 application development system B4 QDS is a complete
- debugging environment intended for engineers developing
- applications for the B4.
+ This option enables support for the FSL CoreNet based boards.
+ For 32bit kernel, the following boards are supported:
+ P2041 RDB, P3041 DS and P4080 DS
+ For 64bit kernel, the following boards are supported:
+ T4240 QDS and B4 QDS
+ The following boards are supported for both 32bit and 64bit kernel:
+ P5020 DS and P5040 DS
-endif
endif # FSL_SOC_BOOKE
config TQM85xx
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index 53c9f75a6907..dd4c0b59577b 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -18,13 +18,7 @@ obj-$(CONFIG_P1010_RDB) += p1010rdb.o
obj-$(CONFIG_P1022_DS) += p1022_ds.o
obj-$(CONFIG_P1022_RDK) += p1022_rdk.o
obj-$(CONFIG_P1023_RDS) += p1023_rds.o
-obj-$(CONFIG_P2041_RDB) += p2041_rdb.o corenet_ds.o
-obj-$(CONFIG_P3041_DS) += p3041_ds.o corenet_ds.o
-obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o
-obj-$(CONFIG_P5020_DS) += p5020_ds.o corenet_ds.o
-obj-$(CONFIG_P5040_DS) += p5040_ds.o corenet_ds.o
-obj-$(CONFIG_T4240_QDS) += t4240_qds.o corenet_ds.o
-obj-$(CONFIG_B4_QDS) += b4_qds.o corenet_ds.o
+obj-$(CONFIG_CORENET_GENERIC) += corenet_generic.o
obj-$(CONFIG_STX_GP3) += stx_gp3.o
obj-$(CONFIG_TQM85xx) += tqm85xx.o
obj-$(CONFIG_SBC8548) += sbc8548.o
diff --git a/arch/powerpc/platforms/85xx/b4_qds.c b/arch/powerpc/platforms/85xx/b4_qds.c
deleted file mode 100644
index 0c6702f8b88e..000000000000
--- a/arch/powerpc/platforms/85xx/b4_qds.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * B4 QDS Setup
- * Should apply for QDS platform of B4860 and it's personalities.
- * viz B4860/B4420/B4220QDS
- *
- * Copyright 2012 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/phy.h>
-
-#include <asm/time.h>
-#include <asm/machdep.h>
-#include <asm/pci-bridge.h>
-#include <mm/mmu_decl.h>
-#include <asm/prom.h>
-#include <asm/udbg.h>
-#include <asm/mpic.h>
-
-#include <linux/of_platform.h>
-#include <sysdev/fsl_soc.h>
-#include <sysdev/fsl_pci.h>
-#include <asm/ehv_pic.h>
-
-#include "corenet_ds.h"
-
-/*
- * Called very early, device-tree isn't unflattened
- */
-static int __init b4_qds_probe(void)
-{
- unsigned long root = of_get_flat_dt_root();
-#ifdef CONFIG_SMP
- extern struct smp_ops_t smp_85xx_ops;
-#endif
-
- if ((of_flat_dt_is_compatible(root, "fsl,B4860QDS")) ||
- (of_flat_dt_is_compatible(root, "fsl,B4420QDS")) ||
- (of_flat_dt_is_compatible(root, "fsl,B4220QDS")))
- return 1;
-
- /* Check if we're running under the Freescale hypervisor */
- if ((of_flat_dt_is_compatible(root, "fsl,B4860QDS-hv")) ||
- (of_flat_dt_is_compatible(root, "fsl,B4420QDS-hv")) ||
- (of_flat_dt_is_compatible(root, "fsl,B4220QDS-hv"))) {
- ppc_md.init_IRQ = ehv_pic_init;
- ppc_md.get_irq = ehv_pic_get_irq;
- ppc_md.restart = fsl_hv_restart;
- ppc_md.power_off = fsl_hv_halt;
- ppc_md.halt = fsl_hv_halt;
-#ifdef CONFIG_SMP
- /*
- * Disable the timebase sync operations because we can't write
- * to the timebase registers under the hypervisor.
- */
- smp_85xx_ops.give_timebase = NULL;
- smp_85xx_ops.take_timebase = NULL;
-#endif
- return 1;
- }
-
- return 0;
-}
-
-define_machine(b4_qds) {
- .name = "B4 QDS",
- .probe = b4_qds_probe,
- .setup_arch = corenet_ds_setup_arch,
- .init_IRQ = corenet_ds_pic_init,
-#ifdef CONFIG_PCI
- .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
-#endif
-/* coreint doesn't play nice with lazy EE, use legacy mpic for now */
-#ifdef CONFIG_PPC64
- .get_irq = mpic_get_irq,
-#else
- .get_irq = mpic_get_coreint_irq,
-#endif
- .restart = fsl_rstcr_restart,
- .calibrate_decr = generic_calibrate_decr,
- .progress = udbg_progress,
-#ifdef CONFIG_PPC64
- .power_save = book3e_idle,
-#else
- .power_save = e500_idle,
-#endif
-};
-
-machine_arch_initcall(b4_qds, corenet_ds_publish_devices);
-
-#ifdef CONFIG_SWIOTLB
-machine_arch_initcall(b4_qds, swiotlb_setup_bus_notifier);
-#endif
diff --git a/arch/powerpc/platforms/85xx/c293pcie.c b/arch/powerpc/platforms/85xx/c293pcie.c
index 6208e49142bf..213d5b815827 100644
--- a/arch/powerpc/platforms/85xx/c293pcie.c
+++ b/arch/powerpc/platforms/85xx/c293pcie.c
@@ -11,6 +11,7 @@
#include <linux/stddef.h>
#include <linux/kernel.h>
+#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index d0861a0d8360..eba78c85303f 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -5,6 +5,8 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <sysdev/cpm2_pic.h>
diff --git a/arch/powerpc/platforms/85xx/corenet_ds.c b/arch/powerpc/platforms/85xx/corenet_ds.c
deleted file mode 100644
index aa3690bae415..000000000000
--- a/arch/powerpc/platforms/85xx/corenet_ds.c
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Corenet based SoC DS Setup
- *
- * Maintained by Kumar Gala (see MAINTAINERS for contact information)
- *
- * Copyright 2009-2011 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-
-#include <asm/time.h>
-#include <asm/machdep.h>
-#include <asm/pci-bridge.h>
-#include <asm/ppc-pci.h>
-#include <mm/mmu_decl.h>
-#include <asm/prom.h>
-#include <asm/udbg.h>
-#include <asm/mpic.h>
-
-#include <linux/of_platform.h>
-#include <sysdev/fsl_soc.h>
-#include <sysdev/fsl_pci.h>
-#include "smp.h"
-
-void __init corenet_ds_pic_init(void)
-{
- struct mpic *mpic;
- unsigned int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU |
- MPIC_NO_RESET;
-
- if (ppc_md.get_irq == mpic_get_coreint_irq)
- flags |= MPIC_ENABLE_COREINT;
-
- mpic = mpic_alloc(NULL, 0, flags, 0, 512, " OpenPIC ");
- BUG_ON(mpic == NULL);
-
- mpic_init(mpic);
-}
-
-/*
- * Setup the architecture
- */
-void __init corenet_ds_setup_arch(void)
-{
- mpc85xx_smp_init();
-
- swiotlb_detect_4g();
-
- pr_info("%s board from Freescale Semiconductor\n", ppc_md.name);
-}
-
-static const struct of_device_id of_device_ids[] = {
- {
- .compatible = "simple-bus"
- },
- {
- .compatible = "fsl,srio",
- },
- {
- .compatible = "fsl,p4080-pcie",
- },
- {
- .compatible = "fsl,qoriq-pcie-v2.2",
- },
- {
- .compatible = "fsl,qoriq-pcie-v2.3",
- },
- {
- .compatible = "fsl,qoriq-pcie-v2.4",
- },
- {
- .compatible = "fsl,qoriq-pcie-v3.0",
- },
- /* The following two are for the Freescale hypervisor */
- {
- .name = "hypervisor",
- },
- {
- .name = "handles",
- },
- {}
-};
-
-int __init corenet_ds_publish_devices(void)
-{
- return of_platform_bus_probe(NULL, of_device_ids, NULL);
-}
diff --git a/arch/powerpc/platforms/85xx/corenet_ds.h b/arch/powerpc/platforms/85xx/corenet_ds.h
deleted file mode 100644
index ddd700b23031..000000000000
--- a/arch/powerpc/platforms/85xx/corenet_ds.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Corenet based SoC DS Setup
- *
- * Copyright 2009 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#ifndef CORENET_DS_H
-#define CORENET_DS_H
-
-extern void __init corenet_ds_pic_init(void);
-extern void __init corenet_ds_setup_arch(void);
-extern int __init corenet_ds_publish_devices(void);
-
-#endif
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
new file mode 100644
index 000000000000..fbd871e69754
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -0,0 +1,182 @@
+/*
+ * Corenet based SoC DS Setup
+ *
+ * Maintained by Kumar Gala (see MAINTAINERS for contact information)
+ *
+ * Copyright 2009-2011 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include <asm/time.h>
+#include <asm/machdep.h>
+#include <asm/pci-bridge.h>
+#include <asm/ppc-pci.h>
+#include <mm/mmu_decl.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <asm/mpic.h>
+#include <asm/ehv_pic.h>
+
+#include <linux/of_platform.h>
+#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
+#include "smp.h"
+
+void __init corenet_gen_pic_init(void)
+{
+ struct mpic *mpic;
+ unsigned int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU |
+ MPIC_NO_RESET;
+
+ if (ppc_md.get_irq == mpic_get_coreint_irq)
+ flags |= MPIC_ENABLE_COREINT;
+
+ mpic = mpic_alloc(NULL, 0, flags, 0, 512, " OpenPIC ");
+ BUG_ON(mpic == NULL);
+
+ mpic_init(mpic);
+}
+
+/*
+ * Setup the architecture
+ */
+void __init corenet_gen_setup_arch(void)
+{
+ mpc85xx_smp_init();
+
+ swiotlb_detect_4g();
+
+ pr_info("%s board from Freescale Semiconductor\n", ppc_md.name);
+}
+
+static const struct of_device_id of_device_ids[] = {
+ {
+ .compatible = "simple-bus"
+ },
+ {
+ .compatible = "fsl,srio",
+ },
+ {
+ .compatible = "fsl,p4080-pcie",
+ },
+ {
+ .compatible = "fsl,qoriq-pcie-v2.2",
+ },
+ {
+ .compatible = "fsl,qoriq-pcie-v2.3",
+ },
+ {
+ .compatible = "fsl,qoriq-pcie-v2.4",
+ },
+ {
+ .compatible = "fsl,qoriq-pcie-v3.0",
+ },
+ /* The following two are for the Freescale hypervisor */
+ {
+ .name = "hypervisor",
+ },
+ {
+ .name = "handles",
+ },
+ {}
+};
+
+int __init corenet_gen_publish_devices(void)
+{
+ return of_platform_bus_probe(NULL, of_device_ids, NULL);
+}
+
+static const char * const boards[] __initconst = {
+ "fsl,P2041RDB",
+ "fsl,P3041DS",
+ "fsl,P4080DS",
+ "fsl,P5020DS",
+ "fsl,P5040DS",
+ "fsl,T4240QDS",
+ "fsl,B4860QDS",
+ "fsl,B4420QDS",
+ "fsl,B4220QDS",
+ NULL
+};
+
+static const char * const hv_boards[] __initconst = {
+ "fsl,P2041RDB-hv",
+ "fsl,P3041DS-hv",
+ "fsl,P4080DS-hv",
+ "fsl,P5020DS-hv",
+ "fsl,P5040DS-hv",
+ "fsl,T4240QDS-hv",
+ "fsl,B4860QDS-hv",
+ "fsl,B4420QDS-hv",
+ "fsl,B4220QDS-hv",
+ NULL
+};
+
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+static int __init corenet_generic_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+#ifdef CONFIG_SMP
+ extern struct smp_ops_t smp_85xx_ops;
+#endif
+
+ if (of_flat_dt_match(root, boards))
+ return 1;
+
+ /* Check if we're running under the Freescale hypervisor */
+ if (of_flat_dt_match(root, hv_boards)) {
+ ppc_md.init_IRQ = ehv_pic_init;
+ ppc_md.get_irq = ehv_pic_get_irq;
+ ppc_md.restart = fsl_hv_restart;
+ ppc_md.power_off = fsl_hv_halt;
+ ppc_md.halt = fsl_hv_halt;
+#ifdef CONFIG_SMP
+ /*
+ * Disable the timebase sync operations because we can't write
+ * to the timebase registers under the hypervisor.
+ */
+ smp_85xx_ops.give_timebase = NULL;
+ smp_85xx_ops.take_timebase = NULL;
+#endif
+ return 1;
+ }
+
+ return 0;
+}
+
+define_machine(corenet_generic) {
+ .name = "CoreNet Generic",
+ .probe = corenet_generic_probe,
+ .setup_arch = corenet_gen_setup_arch,
+ .init_IRQ = corenet_gen_pic_init,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+ .get_irq = mpic_get_coreint_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+#ifdef CONFIG_PPC64
+ .power_save = book3e_idle,
+#else
+ .power_save = e500_idle,
+#endif
+};
+
+machine_arch_initcall(corenet_generic, corenet_gen_publish_devices);
+
+#ifdef CONFIG_SWIOTLB
+machine_arch_initcall(corenet_generic, swiotlb_setup_bus_notifier);
+#endif
diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c
index 0252961392d5..d6a3dd311494 100644
--- a/arch/powerpc/platforms/85xx/p1010rdb.c
+++ b/arch/powerpc/platforms/85xx/p1010rdb.c
@@ -66,6 +66,8 @@ static int __init p1010_rdb_probe(void)
if (of_flat_dt_is_compatible(root, "fsl,P1010RDB"))
return 1;
+ if (of_flat_dt_is_compatible(root, "fsl,P1010RDB-PB"))
+ return 1;
return 0;
}
diff --git a/arch/powerpc/platforms/85xx/p2041_rdb.c b/arch/powerpc/platforms/85xx/p2041_rdb.c
deleted file mode 100644
index 000c0892fc40..000000000000
--- a/arch/powerpc/platforms/85xx/p2041_rdb.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * P2041 RDB Setup
- *
- * Copyright 2011 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/phy.h>
-
-#include <asm/time.h>
-#include <asm/machdep.h>
-#include <asm/pci-bridge.h>
-#include <mm/mmu_decl.h>
-#include <asm/prom.h>
-#include <asm/udbg.h>
-#include <asm/mpic.h>
-
-#include <linux/of_platform.h>
-#include <sysdev/fsl_soc.h>
-#include <sysdev/fsl_pci.h>
-#include <asm/ehv_pic.h>
-
-#include "corenet_ds.h"
-
-/*
- * Called very early, device-tree isn't unflattened
- */
-static int __init p2041_rdb_probe(void)
-{
- unsigned long root = of_get_flat_dt_root();
-#ifdef CONFIG_SMP
- extern struct smp_ops_t smp_85xx_ops;
-#endif
-
- if (of_flat_dt_is_compatible(root, "fsl,P2041RDB"))
- return 1;
-
- /* Check if we're running under the Freescale hypervisor */
- if (of_flat_dt_is_compatible(root, "fsl,P2041RDB-hv")) {
- ppc_md.init_IRQ = ehv_pic_init;
- ppc_md.get_irq = ehv_pic_get_irq;
- ppc_md.restart = fsl_hv_restart;
- ppc_md.power_off = fsl_hv_halt;
- ppc_md.halt = fsl_hv_halt;
-#ifdef CONFIG_SMP
- /*
- * Disable the timebase sync operations because we can't write
- * to the timebase registers under the hypervisor.
- */
- smp_85xx_ops.give_timebase = NULL;
- smp_85xx_ops.take_timebase = NULL;
-#endif
- return 1;
- }
-
- return 0;
-}
-
-define_machine(p2041_rdb) {
- .name = "P2041 RDB",
- .probe = p2041_rdb_probe,
- .setup_arch = corenet_ds_setup_arch,
- .init_IRQ = corenet_ds_pic_init,
-#ifdef CONFIG_PCI
- .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
-#endif
- .get_irq = mpic_get_coreint_irq,
- .restart = fsl_rstcr_restart,
- .calibrate_decr = generic_calibrate_decr,
- .progress = udbg_progress,
- .power_save = e500_idle,
-};
-
-machine_arch_initcall(p2041_rdb, corenet_ds_publish_devices);
-
-#ifdef CONFIG_SWIOTLB
-machine_arch_initcall(p2041_rdb, swiotlb_setup_bus_notifier);
-#endif
diff --git a/arch/powerpc/platforms/85xx/p3041_ds.c b/arch/powerpc/platforms/85xx/p3041_ds.c
deleted file mode 100644
index b3edc205daa9..000000000000
--- a/arch/powerpc/platforms/85xx/p3041_ds.c
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * P3041 DS Setup
- *
- * Maintained by Kumar Gala (see MAINTAINERS for contact information)
- *
- * Copyright 2009-2010 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/phy.h>
-
-#include <asm/time.h>
-#include <asm/machdep.h>
-#include <asm/pci-bridge.h>
-#include <mm/mmu_decl.h>
-#include <asm/prom.h>
-#include <asm/udbg.h>
-#include <asm/mpic.h>
-
-#include <linux/of_platform.h>
-#include <sysdev/fsl_soc.h>
-#include <sysdev/fsl_pci.h>
-#include <asm/ehv_pic.h>
-
-#include "corenet_ds.h"
-
-/*
- * Called very early, device-tree isn't unflattened
- */
-static int __init p3041_ds_probe(void)
-{
- unsigned long root = of_get_flat_dt_root();
-#ifdef CONFIG_SMP
- extern struct smp_ops_t smp_85xx_ops;
-#endif
-
- if (of_flat_dt_is_compatible(root, "fsl,P3041DS"))
- return 1;
-
- /* Check if we're running under the Freescale hypervisor */
- if (of_flat_dt_is_compatible(root, "fsl,P3041DS-hv")) {
- ppc_md.init_IRQ = ehv_pic_init;
- ppc_md.get_irq = ehv_pic_get_irq;
- ppc_md.restart = fsl_hv_restart;
- ppc_md.power_off = fsl_hv_halt;
- ppc_md.halt = fsl_hv_halt;
-#ifdef CONFIG_SMP
- /*
- * Disable the timebase sync operations because we can't write
- * to the timebase registers under the hypervisor.
- */
- smp_85xx_ops.give_timebase = NULL;
- smp_85xx_ops.take_timebase = NULL;
-#endif
- return 1;
- }
-
- return 0;
-}
-
-define_machine(p3041_ds) {
- .name = "P3041 DS",
- .probe = p3041_ds_probe,
- .setup_arch = corenet_ds_setup_arch,
- .init_IRQ = corenet_ds_pic_init,
-#ifdef CONFIG_PCI
- .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
-#endif
- .get_irq = mpic_get_coreint_irq,
- .restart = fsl_rstcr_restart,
- .calibrate_decr = generic_calibrate_decr,
- .progress = udbg_progress,
- .power_save = e500_idle,
-};
-
-machine_arch_initcall(p3041_ds, corenet_ds_publish_devices);
-
-#ifdef CONFIG_SWIOTLB
-machine_arch_initcall(p3041_ds, swiotlb_setup_bus_notifier);
-#endif
diff --git a/arch/powerpc/platforms/85xx/p4080_ds.c b/arch/powerpc/platforms/85xx/p4080_ds.c
deleted file mode 100644
index 54df10632aea..000000000000
--- a/arch/powerpc/platforms/85xx/p4080_ds.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * P4080 DS Setup
- *
- * Maintained by Kumar Gala (see MAINTAINERS for contact information)
- *
- * Copyright 2009 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-
-#include <asm/time.h>
-#include <asm/machdep.h>
-#include <asm/pci-bridge.h>
-#include <mm/mmu_decl.h>
-#include <asm/prom.h>
-#include <asm/udbg.h>
-#include <asm/mpic.h>
-
-#include <linux/of_platform.h>
-#include <sysdev/fsl_soc.h>
-#include <sysdev/fsl_pci.h>
-#include <asm/ehv_pic.h>
-
-#include "corenet_ds.h"
-
-/*
- * Called very early, device-tree isn't unflattened
- */
-static int __init p4080_ds_probe(void)
-{
- unsigned long root = of_get_flat_dt_root();
-#ifdef CONFIG_SMP
- extern struct smp_ops_t smp_85xx_ops;
-#endif
-
- if (of_flat_dt_is_compatible(root, "fsl,P4080DS"))
- return 1;
-
- /* Check if we're running under the Freescale hypervisor */
- if (of_flat_dt_is_compatible(root, "fsl,P4080DS-hv")) {
- ppc_md.init_IRQ = ehv_pic_init;
- ppc_md.get_irq = ehv_pic_get_irq;
- ppc_md.restart = fsl_hv_restart;
- ppc_md.power_off = fsl_hv_halt;
- ppc_md.halt = fsl_hv_halt;
-#ifdef CONFIG_SMP
- /*
- * Disable the timebase sync operations because we can't write
- * to the timebase registers under the hypervisor.
- */
- smp_85xx_ops.give_timebase = NULL;
- smp_85xx_ops.take_timebase = NULL;
-#endif
- return 1;
- }
-
- return 0;
-}
-
-define_machine(p4080_ds) {
- .name = "P4080 DS",
- .probe = p4080_ds_probe,
- .setup_arch = corenet_ds_setup_arch,
- .init_IRQ = corenet_ds_pic_init,
-#ifdef CONFIG_PCI
- .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
-#endif
- .get_irq = mpic_get_coreint_irq,
- .restart = fsl_rstcr_restart,
- .calibrate_decr = generic_calibrate_decr,
- .progress = udbg_progress,
- .power_save = e500_idle,
-};
-
-machine_arch_initcall(p4080_ds, corenet_ds_publish_devices);
-#ifdef CONFIG_SWIOTLB
-machine_arch_initcall(p4080_ds, swiotlb_setup_bus_notifier);
-#endif
diff --git a/arch/powerpc/platforms/85xx/p5020_ds.c b/arch/powerpc/platforms/85xx/p5020_ds.c
deleted file mode 100644
index 39cfa4044e6c..000000000000
--- a/arch/powerpc/platforms/85xx/p5020_ds.c
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * P5020 DS Setup
- *
- * Maintained by Kumar Gala (see MAINTAINERS for contact information)
- *
- * Copyright 2009-2010 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/phy.h>
-
-#include <asm/time.h>
-#include <asm/machdep.h>
-#include <asm/pci-bridge.h>
-#include <mm/mmu_decl.h>
-#include <asm/prom.h>
-#include <asm/udbg.h>
-#include <asm/mpic.h>
-
-#include <linux/of_platform.h>
-#include <sysdev/fsl_soc.h>
-#include <sysdev/fsl_pci.h>
-#include <asm/ehv_pic.h>
-
-#include "corenet_ds.h"
-
-/*
- * Called very early, device-tree isn't unflattened
- */
-static int __init p5020_ds_probe(void)
-{
- unsigned long root = of_get_flat_dt_root();
-#ifdef CONFIG_SMP
- extern struct smp_ops_t smp_85xx_ops;
-#endif
-
- if (of_flat_dt_is_compatible(root, "fsl,P5020DS"))
- return 1;
-
- /* Check if we're running under the Freescale hypervisor */
- if (of_flat_dt_is_compatible(root, "fsl,P5020DS-hv")) {
- ppc_md.init_IRQ = ehv_pic_init;
- ppc_md.get_irq = ehv_pic_get_irq;
- ppc_md.restart = fsl_hv_restart;
- ppc_md.power_off = fsl_hv_halt;
- ppc_md.halt = fsl_hv_halt;
-#ifdef CONFIG_SMP
- /*
- * Disable the timebase sync operations because we can't write
- * to the timebase registers under the hypervisor.
- */
- smp_85xx_ops.give_timebase = NULL;
- smp_85xx_ops.take_timebase = NULL;
-#endif
- return 1;
- }
-
- return 0;
-}
-
-define_machine(p5020_ds) {
- .name = "P5020 DS",
- .probe = p5020_ds_probe,
- .setup_arch = corenet_ds_setup_arch,
- .init_IRQ = corenet_ds_pic_init,
-#ifdef CONFIG_PCI
- .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
-#endif
- .get_irq = mpic_get_coreint_irq,
- .restart = fsl_rstcr_restart,
- .calibrate_decr = generic_calibrate_decr,
- .progress = udbg_progress,
-#ifdef CONFIG_PPC64
- .power_save = book3e_idle,
-#else
- .power_save = e500_idle,
-#endif
-};
-
-machine_arch_initcall(p5020_ds, corenet_ds_publish_devices);
-
-#ifdef CONFIG_SWIOTLB
-machine_arch_initcall(p5020_ds, swiotlb_setup_bus_notifier);
-#endif
diff --git a/arch/powerpc/platforms/85xx/p5040_ds.c b/arch/powerpc/platforms/85xx/p5040_ds.c
deleted file mode 100644
index f70e74cddf97..000000000000
--- a/arch/powerpc/platforms/85xx/p5040_ds.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * P5040 DS Setup
- *
- * Copyright 2009-2010 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-
-#include <asm/machdep.h>
-#include <asm/udbg.h>
-#include <asm/mpic.h>
-
-#include <linux/of_fdt.h>
-
-#include <sysdev/fsl_soc.h>
-#include <sysdev/fsl_pci.h>
-#include <asm/ehv_pic.h>
-
-#include "corenet_ds.h"
-
-/*
- * Called very early, device-tree isn't unflattened
- */
-static int __init p5040_ds_probe(void)
-{
- unsigned long root = of_get_flat_dt_root();
-#ifdef CONFIG_SMP
- extern struct smp_ops_t smp_85xx_ops;
-#endif
-
- if (of_flat_dt_is_compatible(root, "fsl,P5040DS"))
- return 1;
-
- /* Check if we're running under the Freescale hypervisor */
- if (of_flat_dt_is_compatible(root, "fsl,P5040DS-hv")) {
- ppc_md.init_IRQ = ehv_pic_init;
- ppc_md.get_irq = ehv_pic_get_irq;
- ppc_md.restart = fsl_hv_restart;
- ppc_md.power_off = fsl_hv_halt;
- ppc_md.halt = fsl_hv_halt;
-#ifdef CONFIG_SMP
- /*
- * Disable the timebase sync operations because we can't write
- * to the timebase registers under the hypervisor.
- */
- smp_85xx_ops.give_timebase = NULL;
- smp_85xx_ops.take_timebase = NULL;
-#endif
- return 1;
- }
-
- return 0;
-}
-
-define_machine(p5040_ds) {
- .name = "P5040 DS",
- .probe = p5040_ds_probe,
- .setup_arch = corenet_ds_setup_arch,
- .init_IRQ = corenet_ds_pic_init,
-#ifdef CONFIG_PCI
- .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
-#endif
- .get_irq = mpic_get_coreint_irq,
- .restart = fsl_rstcr_restart,
- .calibrate_decr = generic_calibrate_decr,
- .progress = udbg_progress,
-#ifdef CONFIG_PPC64
- .power_save = book3e_idle,
-#else
- .power_save = e500_idle,
-#endif
-};
-
-machine_arch_initcall(p5040_ds, corenet_ds_publish_devices);
-
-#ifdef CONFIG_SWIOTLB
-machine_arch_initcall(p5040_ds, swiotlb_setup_bus_notifier);
-#endif
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
index 3bbbf7489487..55a9682b9529 100644
--- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
+++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
@@ -9,6 +9,8 @@
*/
#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/io.h>
diff --git a/arch/powerpc/platforms/85xx/t4240_qds.c b/arch/powerpc/platforms/85xx/t4240_qds.c
deleted file mode 100644
index 91ead6b1b8af..000000000000
--- a/arch/powerpc/platforms/85xx/t4240_qds.c
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * T4240 QDS Setup
- *
- * Maintained by Kumar Gala (see MAINTAINERS for contact information)
- *
- * Copyright 2012 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/phy.h>
-
-#include <asm/time.h>
-#include <asm/machdep.h>
-#include <asm/pci-bridge.h>
-#include <mm/mmu_decl.h>
-#include <asm/prom.h>
-#include <asm/udbg.h>
-#include <asm/mpic.h>
-
-#include <linux/of_platform.h>
-#include <sysdev/fsl_soc.h>
-#include <sysdev/fsl_pci.h>
-#include <asm/ehv_pic.h>
-
-#include "corenet_ds.h"
-
-/*
- * Called very early, device-tree isn't unflattened
- */
-static int __init t4240_qds_probe(void)
-{
- unsigned long root = of_get_flat_dt_root();
-#ifdef CONFIG_SMP
- extern struct smp_ops_t smp_85xx_ops;
-#endif
-
- if (of_flat_dt_is_compatible(root, "fsl,T4240QDS"))
- return 1;
-
- /* Check if we're running under the Freescale hypervisor */
- if (of_flat_dt_is_compatible(root, "fsl,T4240QDS-hv")) {
- ppc_md.init_IRQ = ehv_pic_init;
- ppc_md.get_irq = ehv_pic_get_irq;
- ppc_md.restart = fsl_hv_restart;
- ppc_md.power_off = fsl_hv_halt;
- ppc_md.halt = fsl_hv_halt;
-#ifdef CONFIG_SMP
- /*
- * Disable the timebase sync operations because we can't write
- * to the timebase registers under the hypervisor.
- */
- smp_85xx_ops.give_timebase = NULL;
- smp_85xx_ops.take_timebase = NULL;
-#endif
- return 1;
- }
-
- return 0;
-}
-
-define_machine(t4240_qds) {
- .name = "T4240 QDS",
- .probe = t4240_qds_probe,
- .setup_arch = corenet_ds_setup_arch,
- .init_IRQ = corenet_ds_pic_init,
-#ifdef CONFIG_PCI
- .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
-#endif
- .get_irq = mpic_get_coreint_irq,
- .restart = fsl_rstcr_restart,
- .calibrate_decr = generic_calibrate_decr,
- .progress = udbg_progress,
-#ifdef CONFIG_PPC64
- .power_save = book3e_idle,
-#else
- .power_save = e500_idle,
-#endif
-};
-
-machine_arch_initcall(t4240_qds, corenet_ds_publish_devices);
-
-#ifdef CONFIG_SWIOTLB
-machine_arch_initcall(t4240_qds, swiotlb_setup_bus_notifier);
-#endif
diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c
index 9982f57c98b9..d5b98c0f958a 100644
--- a/arch/powerpc/platforms/86xx/pic.c
+++ b/arch/powerpc/platforms/86xx/pic.c
@@ -10,6 +10,7 @@
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/mpic.h>
diff --git a/arch/powerpc/platforms/8xx/ep88xc.c b/arch/powerpc/platforms/8xx/ep88xc.c
index 7d9ac6040d63..e62166681d08 100644
--- a/arch/powerpc/platforms/8xx/ep88xc.c
+++ b/arch/powerpc/platforms/8xx/ep88xc.c
@@ -10,6 +10,8 @@
*/
#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/8xx/mpc86xads_setup.c b/arch/powerpc/platforms/8xx/mpc86xads_setup.c
index 866feff83c91..63084640c5c5 100644
--- a/arch/powerpc/platforms/8xx/mpc86xads_setup.c
+++ b/arch/powerpc/platforms/8xx/mpc86xads_setup.c
@@ -15,6 +15,8 @@
*/
#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <asm/io.h>
diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
index 5d98398c2f5e..c1262581b63c 100644
--- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c
+++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
@@ -25,6 +25,8 @@
#include <linux/fs_uart_pd.h>
#include <linux/fsl_devices.h>
#include <linux/mii.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <asm/delay.h>
diff --git a/arch/powerpc/platforms/8xx/tqm8xx_setup.c b/arch/powerpc/platforms/8xx/tqm8xx_setup.c
index 8d21ab70e06c..251aba8759e4 100644
--- a/arch/powerpc/platforms/8xx/tqm8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/tqm8xx_setup.c
@@ -28,6 +28,7 @@
#include <linux/fs_uart_pd.h>
#include <linux/fsl_devices.h>
#include <linux/mii.h>
+#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <asm/delay.h>
@@ -48,7 +49,7 @@ struct cpm_pin {
int port, pin, flags;
};
-static struct __initdata cpm_pin tqm8xx_pins[] = {
+static struct cpm_pin tqm8xx_pins[] __initdata = {
/* SMC1 */
{CPM_PORTB, 24, CPM_PIN_INPUT}, /* RX */
{CPM_PORTB, 25, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */
@@ -63,7 +64,7 @@ static struct __initdata cpm_pin tqm8xx_pins[] = {
{CPM_PORTC, 11, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO},
};
-static struct __initdata cpm_pin tqm8xx_fec_pins[] = {
+static struct cpm_pin tqm8xx_fec_pins[] __initdata = {
/* MII */
{CPM_PORTD, 3, CPM_PIN_OUTPUT},
{CPM_PORTD, 4, CPM_PIN_OUTPUT},
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 6704e2e20e6b..c2a566fb8bb8 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -93,22 +93,23 @@ choice
config GENERIC_CPU
bool "Generic"
+ depends on !CPU_LITTLE_ENDIAN
config CELL_CPU
bool "Cell Broadband Engine"
- depends on PPC_BOOK3S_64
+ depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
config POWER4_CPU
bool "POWER4"
- depends on PPC_BOOK3S_64
+ depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
config POWER5_CPU
bool "POWER5"
- depends on PPC_BOOK3S_64
+ depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
config POWER6_CPU
bool "POWER6"
- depends on PPC_BOOK3S_64
+ depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
config POWER7_CPU
bool "POWER7"
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
index 14be2bd358b8..4278acfa2ede 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
@@ -486,7 +486,6 @@ static __init int celleb_setup_pciex(struct device_node *node,
struct pci_controller *phb)
{
struct resource r;
- struct of_irq oirq;
int virq;
/* SMMIO registers; used inside this file */
@@ -507,12 +506,11 @@ static __init int celleb_setup_pciex(struct device_node *node,
phb->ops = &scc_pciex_pci_ops;
/* internal interrupt handler */
- if (of_irq_map_one(node, 1, &oirq)) {
+ virq = irq_of_parse_and_map(node, 1);
+ if (!virq) {
pr_err("PCIEXC:Failed to map irq\n");
goto error;
}
- virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
- oirq.size);
if (request_irq(virq, pciex_handle_internal_irq,
0, "pciex", (void *)phb)) {
pr_err("PCIEXC:Failed to request irq\n");
diff --git a/arch/powerpc/platforms/cell/celleb_scc_sio.c b/arch/powerpc/platforms/cell/celleb_scc_sio.c
index 9c339ec646f5..c8eb57193826 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_sio.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_sio.c
@@ -45,7 +45,7 @@ static int __init txx9_serial_init(void)
struct device_node *node;
int i;
struct uart_port req;
- struct of_irq irq;
+ struct of_phandle_args irq;
struct resource res;
for_each_compatible_node(node, "serial", "toshiba,sio-scc") {
@@ -53,7 +53,7 @@ static int __init txx9_serial_init(void)
if (!(txx9_serial_bitmap & (1<<i)))
continue;
- if (of_irq_map_one(node, i, &irq))
+ if (of_irq_parse_one(node, i, &irq))
continue;
if (of_address_to_resource(node,
txx9_scc_tab[i].index, &res))
@@ -66,8 +66,7 @@ static int __init txx9_serial_init(void)
#ifdef CONFIG_SERIAL_TXX9_CONSOLE
req.membase = ioremap(req.mapbase, 0x24);
#endif
- req.irq = irq_create_of_mapping(irq.controller,
- irq.specifier, irq.size);
+ req.irq = irq_create_of_mapping(&irq);
req.flags |= UPF_IOREMAP | UPF_BUGGY_UART
/*HAVE_CTS_LINE*/;
req.uartclk = 83300000;
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 8e299447127e..1f72f4ab6353 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -235,12 +235,9 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
/* First, we check whether we have a real "interrupts" in the device
* tree in case the device-tree is ever fixed
*/
- struct of_irq oirq;
- if (of_irq_map_one(pic->host->of_node, 0, &oirq) == 0) {
- virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
- oirq.size);
+ virq = irq_of_parse_and_map(pic->host->of_node, 0);
+ if (virq)
return virq;
- }
/* Now do the horrible hacks */
tmp = of_get_property(pic->host->of_node, "#interrupt-cells", NULL);
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index 2bb6977c0a5a..c3327f3d8cf7 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -177,21 +177,20 @@ out:
static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
{
- struct of_irq oirq;
+ struct of_phandle_args oirq;
int ret;
int i;
for (i=0; i < 3; i++) {
- ret = of_irq_map_one(np, i, &oirq);
+ ret = of_irq_parse_one(np, i, &oirq);
if (ret) {
pr_debug("spu_new: failed to get irq %d\n", i);
goto err;
}
ret = -EINVAL;
- pr_debug(" irq %d no 0x%x on %s\n", i, oirq.specifier[0],
- oirq.controller->full_name);
- spu->irqs[i] = irq_create_of_mapping(oirq.controller,
- oirq.specifier, oirq.size);
+ pr_debug(" irq %d no 0x%x on %s\n", i, oirq.args[0],
+ oirq.np->full_name);
+ spu->irqs[i] = irq_create_of_mapping(&oirq);
if (spu->irqs[i] == NO_IRQ) {
pr_debug("spu_new: failed to map it !\n");
goto err;
@@ -200,7 +199,7 @@ static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
return 0;
err:
- pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier,
+ pr_debug("failed to map irq %x for spu %s\n", *oirq.args,
spu->name);
for (; i >= 0; i--) {
if (spu->irqs[i] != NO_IRQ)
diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c
index d3ceff04ffc7..9ef8cc3378d0 100644
--- a/arch/powerpc/platforms/chrp/nvram.c
+++ b/arch/powerpc/platforms/chrp/nvram.c
@@ -66,7 +66,7 @@ static void chrp_nvram_write(int addr, unsigned char val)
void __init chrp_nvram_init(void)
{
struct device_node *nvram;
- const unsigned int *nbytes_p;
+ const __be32 *nbytes_p;
unsigned int proplen;
nvram = of_find_node_by_type(NULL, "nvram");
@@ -79,7 +79,7 @@ void __init chrp_nvram_init(void)
return;
}
- nvram_size = *nbytes_p;
+ nvram_size = be32_to_cpup(nbytes_p);
printk(KERN_INFO "CHRP nvram contains %u bytes\n", nvram_size);
of_node_put(nvram);
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index 53d6eee01963..4cde8e7da4b8 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <asm/io.h>
#include "flipper-pic.h"
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index 3006b5117ec6..6c03034dbbd3 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -18,6 +18,8 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/io.h>
#include "hlwd-pic.h"
@@ -181,6 +183,7 @@ struct irq_domain *hlwd_pic_init(struct device_node *np)
&hlwd_irq_domain_ops, io_base);
if (!irq_domain) {
pr_err("failed to allocate irq_domain\n");
+ iounmap(io_base);
return NULL;
}
diff --git a/arch/powerpc/platforms/fsl_uli1575.c b/arch/powerpc/platforms/fsl_uli1575.c
index 92ac9b52b32d..b97f6f3d3c5b 100644
--- a/arch/powerpc/platforms/fsl_uli1575.c
+++ b/arch/powerpc/platforms/fsl_uli1575.c
@@ -321,8 +321,7 @@ static void hpcd_final_uli5288(struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
struct device_node *hosenode = hose ? hose->dn : NULL;
- struct of_irq oirq;
- int virq, pin = 2;
+ struct of_phandle_args oirq;
u32 laddr[3];
if (!machine_is(mpc86xx_hpcd))
@@ -331,12 +330,13 @@ static void hpcd_final_uli5288(struct pci_dev *dev)
if (!hosenode)
return;
+ oirq.np = hosenode;
+ oirq.args[0] = 2;
+ oirq.args_count = 1;
laddr[0] = (hose->first_busno << 16) | (PCI_DEVFN(31, 0) << 8);
laddr[1] = laddr[2] = 0;
- of_irq_map_raw(hosenode, &pin, 1, laddr, &oirq);
- virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
- oirq.size);
- dev->irq = virq;
+ of_irq_parse_raw(laddr, &oirq);
+ dev->irq = irq_create_of_mapping(&oirq);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x1575, hpcd_quirk_uli1575);
diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c
index 0237ab782fb8..15adee544638 100644
--- a/arch/powerpc/platforms/pasemi/gpio_mdio.c
+++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c
@@ -30,6 +30,7 @@
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/phy.h>
+#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c
index f5e3cda6660e..e49d07f3d542 100644
--- a/arch/powerpc/platforms/powermac/pfunc_base.c
+++ b/arch/powerpc/platforms/powermac/pfunc_base.c
@@ -4,6 +4,7 @@
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
+#include <linux/of_irq.h>
#include <asm/pmac_feature.h>
#include <asm/pmac_pfunc.h>
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 31036b56670e..4c24bf60d39d 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -393,8 +393,8 @@ static void __init pmac_pic_probe_oldstyle(void)
#endif
}
-int of_irq_map_oldworld(struct device_node *device, int index,
- struct of_irq *out_irq)
+int of_irq_parse_oldworld(struct device_node *device, int index,
+ struct of_phandle_args *out_irq)
{
const u32 *ints = NULL;
int intlen;
@@ -422,9 +422,9 @@ int of_irq_map_oldworld(struct device_node *device, int index,
if (index >= intlen)
return -EINVAL;
- out_irq->controller = NULL;
- out_irq->specifier[0] = ints[index];
- out_irq->size = 1;
+ out_irq->np = NULL;
+ out_irq->args[0] = ints[index];
+ out_irq->args_count = 1;
return 0;
}
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 6fae5eb99ea6..9fced3f6d2dc 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -9,6 +9,8 @@ config PPC_POWERNV
select EPAPR_BOOT
select PPC_INDIRECT_PIO
select PPC_UDBG_16550
+ select PPC_SCOM
+ select ARCH_RANDOM
default y
config POWERNV_MSI
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index 300c437d713c..873fa1370dc4 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -1,6 +1,8 @@
obj-y += setup.o opal-takeover.o opal-wrappers.o opal.o
-obj-y += opal-rtc.o opal-nvram.o opal-lpc.o
+obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o
+obj-y += rng.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o
obj-$(CONFIG_EEH) += eeh-ioda.o eeh-powernv.o
+obj-$(CONFIG_PPC_SCOM) += opal-xscom.o
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index cf42e74514fa..02245cee7818 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -59,26 +59,60 @@ static struct notifier_block ioda_eeh_nb = {
};
#ifdef CONFIG_DEBUG_FS
-static int ioda_eeh_dbgfs_set(void *data, u64 val)
+static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val)
{
struct pci_controller *hose = data;
struct pnv_phb *phb = hose->private_data;
- out_be64(phb->regs + 0xD10, val);
+ out_be64(phb->regs + offset, val);
return 0;
}
-static int ioda_eeh_dbgfs_get(void *data, u64 *val)
+static int ioda_eeh_dbgfs_get(void *data, int offset, u64 *val)
{
struct pci_controller *hose = data;
struct pnv_phb *phb = hose->private_data;
- *val = in_be64(phb->regs + 0xD10);
+ *val = in_be64(phb->regs + offset);
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_dbgfs_ops, ioda_eeh_dbgfs_get,
- ioda_eeh_dbgfs_set, "0x%llx\n");
+static int ioda_eeh_outb_dbgfs_set(void *data, u64 val)
+{
+ return ioda_eeh_dbgfs_set(data, 0xD10, val);
+}
+
+static int ioda_eeh_outb_dbgfs_get(void *data, u64 *val)
+{
+ return ioda_eeh_dbgfs_get(data, 0xD10, val);
+}
+
+static int ioda_eeh_inbA_dbgfs_set(void *data, u64 val)
+{
+ return ioda_eeh_dbgfs_set(data, 0xD90, val);
+}
+
+static int ioda_eeh_inbA_dbgfs_get(void *data, u64 *val)
+{
+ return ioda_eeh_dbgfs_get(data, 0xD90, val);
+}
+
+static int ioda_eeh_inbB_dbgfs_set(void *data, u64 val)
+{
+ return ioda_eeh_dbgfs_set(data, 0xE10, val);
+}
+
+static int ioda_eeh_inbB_dbgfs_get(void *data, u64 *val)
+{
+ return ioda_eeh_dbgfs_get(data, 0xE10, val);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_outb_dbgfs_ops, ioda_eeh_outb_dbgfs_get,
+ ioda_eeh_outb_dbgfs_set, "0x%llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbA_dbgfs_ops, ioda_eeh_inbA_dbgfs_get,
+ ioda_eeh_inbA_dbgfs_set, "0x%llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get,
+ ioda_eeh_inbB_dbgfs_set, "0x%llx\n");
#endif /* CONFIG_DEBUG_FS */
/**
@@ -106,27 +140,30 @@ static int ioda_eeh_post_init(struct pci_controller *hose)
ioda_eeh_nb_init = 1;
}
- /* FIXME: Enable it for PHB3 later */
- if (phb->type == PNV_PHB_IODA1) {
+ /* We needn't HUB diag-data on PHB3 */
+ if (phb->type == PNV_PHB_IODA1 && !hub_diag) {
+ hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
if (!hub_diag) {
- hub_diag = (char *)__get_free_page(GFP_KERNEL |
- __GFP_ZERO);
- if (!hub_diag) {
- pr_err("%s: Out of memory !\n",
- __func__);
- return -ENOMEM;
- }
+ pr_err("%s: Out of memory !\n", __func__);
+ return -ENOMEM;
}
+ }
#ifdef CONFIG_DEBUG_FS
- if (phb->dbgfs)
- debugfs_create_file("err_injct", 0600,
- phb->dbgfs, hose,
- &ioda_eeh_dbgfs_ops);
+ if (phb->dbgfs) {
+ debugfs_create_file("err_injct_outbound", 0600,
+ phb->dbgfs, hose,
+ &ioda_eeh_outb_dbgfs_ops);
+ debugfs_create_file("err_injct_inboundA", 0600,
+ phb->dbgfs, hose,
+ &ioda_eeh_inbA_dbgfs_ops);
+ debugfs_create_file("err_injct_inboundB", 0600,
+ phb->dbgfs, hose,
+ &ioda_eeh_inbB_dbgfs_ops);
+ }
#endif
- phb->eeh_state |= PNV_EEH_STATE_ENABLED;
- }
+ phb->eeh_state |= PNV_EEH_STATE_ENABLED;
return 0;
}
@@ -546,8 +583,8 @@ static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
if (ret) {
spin_unlock_irqrestore(&phb->lock, flags);
- pr_warning("%s: Failed to get log for PHB#%x-PE#%x\n",
- __func__, hose->global_number, pe->addr);
+ pr_warning("%s: Can't get log for PHB#%x-PE#%x (%lld)\n",
+ __func__, hose->global_number, pe->addr, ret);
return -EIO;
}
@@ -710,6 +747,73 @@ static void ioda_eeh_p7ioc_phb_diag(struct pci_controller *hose,
}
}
+static void ioda_eeh_phb3_phb_diag(struct pci_controller *hose,
+ struct OpalIoPhbErrorCommon *common)
+{
+ struct OpalIoPhb3ErrorData *data;
+ int i;
+
+ data = (struct OpalIoPhb3ErrorData*)common;
+ pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n\n",
+ hose->global_number, common->version);
+
+ pr_info(" brdgCtl: %08x\n", data->brdgCtl);
+
+ pr_info(" portStatusReg: %08x\n", data->portStatusReg);
+ pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
+ pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
+
+ pr_info(" deviceStatus: %08x\n", data->deviceStatus);
+ pr_info(" slotStatus: %08x\n", data->slotStatus);
+ pr_info(" linkStatus: %08x\n", data->linkStatus);
+ pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
+ pr_info(" devSecStatus: %08x\n", data->devSecStatus);
+
+ pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
+ pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
+ pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
+ pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
+ pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
+ pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
+ pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
+ pr_info(" sourceId: %08x\n", data->sourceId);
+ pr_info(" errorClass: %016llx\n", data->errorClass);
+ pr_info(" correlator: %016llx\n", data->correlator);
+ pr_info(" nFir: %016llx\n", data->nFir);
+ pr_info(" nFirMask: %016llx\n", data->nFirMask);
+ pr_info(" nFirWOF: %016llx\n", data->nFirWOF);
+ pr_info(" PhbPlssr: %016llx\n", data->phbPlssr);
+ pr_info(" PhbCsr: %016llx\n", data->phbCsr);
+ pr_info(" lemFir: %016llx\n", data->lemFir);
+ pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
+ pr_info(" lemWOF: %016llx\n", data->lemWOF);
+ pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
+ pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
+ pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
+ pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
+ pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
+ pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
+ pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
+ pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
+ pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
+ pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
+ pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
+ pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
+ pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
+ pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
+ pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
+ pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
+
+ for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
+ if ((data->pestA[i] >> 63) == 0 &&
+ (data->pestB[i] >> 63) == 0)
+ continue;
+
+ pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
+ pr_info(" PESTB: %016llx\n", data->pestB[i]);
+ }
+}
+
static void ioda_eeh_phb_diag(struct pci_controller *hose)
{
struct pnv_phb *phb = hose->private_data;
@@ -728,6 +832,9 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose)
case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
ioda_eeh_p7ioc_phb_diag(hose, common);
break;
+ case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
+ ioda_eeh_phb3_phb_diag(hose, common);
+ break;
default:
pr_warning("%s: Unrecognized I/O chip %d\n",
__func__, common->ioType);
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 79663d26e6ea..73b981438cc5 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -144,11 +144,8 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
/*
* Enable EEH explicitly so that we will do EEH check
* while accessing I/O stuff
- *
- * FIXME: Enable that for PHB3 later
*/
- if (phb->type == PNV_PHB_IODA1)
- eeh_subsystem_enabled = 1;
+ eeh_subsystem_enabled = 1;
/* Save memory bars */
eeh_save_bars(edev);
diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c
new file mode 100644
index 000000000000..6ffa6b1ec5b7
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-flash.c
@@ -0,0 +1,667 @@
+/*
+ * PowerNV OPAL Firmware Update Interface
+ *
+ * Copyright 2013 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define DEBUG
+
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+
+#include <asm/opal.h>
+
+/* FLASH status codes */
+#define FLASH_NO_OP -1099 /* No operation initiated by user */
+#define FLASH_NO_AUTH -9002 /* Not a service authority partition */
+
+/* Validate image status values */
+#define VALIDATE_IMG_READY -1001 /* Image ready for validation */
+#define VALIDATE_IMG_INCOMPLETE -1002 /* User copied < VALIDATE_BUF_SIZE */
+
+/* Manage image status values */
+#define MANAGE_ACTIVE_ERR -9001 /* Cannot overwrite active img */
+
+/* Flash image status values */
+#define FLASH_IMG_READY 0 /* Img ready for flash on reboot */
+#define FLASH_INVALID_IMG -1003 /* Flash image shorter than expected */
+#define FLASH_IMG_NULL_DATA -1004 /* Bad data in sg list entry */
+#define FLASH_IMG_BAD_LEN -1005 /* Bad length in sg list entry */
+
+/* Manage operation tokens */
+#define FLASH_REJECT_TMP_SIDE 0 /* Reject temporary fw image */
+#define FLASH_COMMIT_TMP_SIDE 1 /* Commit temporary fw image */
+
+/* Update tokens */
+#define FLASH_UPDATE_CANCEL 0 /* Cancel update request */
+#define FLASH_UPDATE_INIT 1 /* Initiate update */
+
+/* Validate image update result tokens */
+#define VALIDATE_TMP_UPDATE 0 /* T side will be updated */
+#define VALIDATE_FLASH_AUTH 1 /* Partition does not have authority */
+#define VALIDATE_INVALID_IMG 2 /* Candidate image is not valid */
+#define VALIDATE_CUR_UNKNOWN 3 /* Current fixpack level is unknown */
+/*
+ * Current T side will be committed to P side before being replace with new
+ * image, and the new image is downlevel from current image
+ */
+#define VALIDATE_TMP_COMMIT_DL 4
+/*
+ * Current T side will be committed to P side before being replaced with new
+ * image
+ */
+#define VALIDATE_TMP_COMMIT 5
+/*
+ * T side will be updated with a downlevel image
+ */
+#define VALIDATE_TMP_UPDATE_DL 6
+/*
+ * The candidate image's release date is later than the system's firmware
+ * service entitlement date - service warranty period has expired
+ */
+#define VALIDATE_OUT_OF_WRNTY 7
+
+/* Validate buffer size */
+#define VALIDATE_BUF_SIZE 4096
+
+/* XXX: Assume candidate image size is <= 256MB */
+#define MAX_IMAGE_SIZE 0x10000000
+
+/* Flash sg list version */
+#define SG_LIST_VERSION (1UL)
+
+/* Image status */
+enum {
+ IMAGE_INVALID,
+ IMAGE_LOADING,
+ IMAGE_READY,
+};
+
+/* Candidate image data */
+struct image_data_t {
+ int status;
+ void *data;
+ uint32_t size;
+};
+
+/* Candidate image header */
+struct image_header_t {
+ uint16_t magic;
+ uint16_t version;
+ uint32_t size;
+};
+
+/* Scatter/gather entry */
+struct opal_sg_entry {
+ void *data;
+ long length;
+};
+
+/* We calculate number of entries based on PAGE_SIZE */
+#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
+
+/*
+ * This struct is very similar but not identical to that
+ * needed by the opal flash update. All we need to do for
+ * opal is rewrite num_entries into a version/length and
+ * translate the pointers to absolute.
+ */
+struct opal_sg_list {
+ unsigned long num_entries;
+ struct opal_sg_list *next;
+ struct opal_sg_entry entry[SG_ENTRIES_PER_NODE];
+};
+
+struct validate_flash_t {
+ int status; /* Return status */
+ void *buf; /* Candiate image buffer */
+ uint32_t buf_size; /* Image size */
+ uint32_t result; /* Update results token */
+};
+
+struct manage_flash_t {
+ int status; /* Return status */
+};
+
+struct update_flash_t {
+ int status; /* Return status */
+};
+
+static struct image_header_t image_header;
+static struct image_data_t image_data;
+static struct validate_flash_t validate_flash_data;
+static struct manage_flash_t manage_flash_data;
+static struct update_flash_t update_flash_data;
+
+static DEFINE_MUTEX(image_data_mutex);
+
+/*
+ * Validate candidate image
+ */
+static inline void opal_flash_validate(void)
+{
+ struct validate_flash_t *args_buf = &validate_flash_data;
+
+ args_buf->status = opal_validate_flash(__pa(args_buf->buf),
+ &(args_buf->buf_size),
+ &(args_buf->result));
+}
+
+/*
+ * Validate output format:
+ * validate result token
+ * current image version details
+ * new image version details
+ */
+static ssize_t validate_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct validate_flash_t *args_buf = &validate_flash_data;
+ int len;
+
+ /* Candidate image is not validated */
+ if (args_buf->status < VALIDATE_TMP_UPDATE) {
+ len = sprintf(buf, "%d\n", args_buf->status);
+ goto out;
+ }
+
+ /* Result token */
+ len = sprintf(buf, "%d\n", args_buf->result);
+
+ /* Current and candidate image version details */
+ if ((args_buf->result != VALIDATE_TMP_UPDATE) &&
+ (args_buf->result < VALIDATE_CUR_UNKNOWN))
+ goto out;
+
+ if (args_buf->buf_size > (VALIDATE_BUF_SIZE - len)) {
+ memcpy(buf + len, args_buf->buf, VALIDATE_BUF_SIZE - len);
+ len = VALIDATE_BUF_SIZE;
+ } else {
+ memcpy(buf + len, args_buf->buf, args_buf->buf_size);
+ len += args_buf->buf_size;
+ }
+out:
+ /* Set status to default */
+ args_buf->status = FLASH_NO_OP;
+ return len;
+}
+
+/*
+ * Validate candidate firmware image
+ *
+ * Note:
+ * We are only interested in first 4K bytes of the
+ * candidate image.
+ */
+static ssize_t validate_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct validate_flash_t *args_buf = &validate_flash_data;
+
+ if (buf[0] != '1')
+ return -EINVAL;
+
+ mutex_lock(&image_data_mutex);
+
+ if (image_data.status != IMAGE_READY ||
+ image_data.size < VALIDATE_BUF_SIZE) {
+ args_buf->result = VALIDATE_INVALID_IMG;
+ args_buf->status = VALIDATE_IMG_INCOMPLETE;
+ goto out;
+ }
+
+ /* Copy first 4k bytes of candidate image */
+ memcpy(args_buf->buf, image_data.data, VALIDATE_BUF_SIZE);
+
+ args_buf->status = VALIDATE_IMG_READY;
+ args_buf->buf_size = VALIDATE_BUF_SIZE;
+
+ /* Validate candidate image */
+ opal_flash_validate();
+
+out:
+ mutex_unlock(&image_data_mutex);
+ return count;
+}
+
+/*
+ * Manage flash routine
+ */
+static inline void opal_flash_manage(uint8_t op)
+{
+ struct manage_flash_t *const args_buf = &manage_flash_data;
+
+ args_buf->status = opal_manage_flash(op);
+}
+
+/*
+ * Show manage flash status
+ */
+static ssize_t manage_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct manage_flash_t *const args_buf = &manage_flash_data;
+ int rc;
+
+ rc = sprintf(buf, "%d\n", args_buf->status);
+ /* Set status to default*/
+ args_buf->status = FLASH_NO_OP;
+ return rc;
+}
+
+/*
+ * Manage operations:
+ * 0 - Reject
+ * 1 - Commit
+ */
+static ssize_t manage_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ uint8_t op;
+ switch (buf[0]) {
+ case '0':
+ op = FLASH_REJECT_TMP_SIDE;
+ break;
+ case '1':
+ op = FLASH_COMMIT_TMP_SIDE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* commit/reject temporary image */
+ opal_flash_manage(op);
+ return count;
+}
+
+/*
+ * Free sg list
+ */
+static void free_sg_list(struct opal_sg_list *list)
+{
+ struct opal_sg_list *sg1;
+ while (list) {
+ sg1 = list->next;
+ kfree(list);
+ list = sg1;
+ }
+ list = NULL;
+}
+
+/*
+ * Build candidate image scatter gather list
+ *
+ * list format:
+ * -----------------------------------
+ * | VER (8) | Entry length in bytes |
+ * -----------------------------------
+ * | Pointer to next entry |
+ * -----------------------------------
+ * | Address of memory area 1 |
+ * -----------------------------------
+ * | Length of memory area 1 |
+ * -----------------------------------
+ * | ......... |
+ * -----------------------------------
+ * | ......... |
+ * -----------------------------------
+ * | Address of memory area N |
+ * -----------------------------------
+ * | Length of memory area N |
+ * -----------------------------------
+ */
+static struct opal_sg_list *image_data_to_sglist(void)
+{
+ struct opal_sg_list *sg1, *list = NULL;
+ void *addr;
+ int size;
+
+ addr = image_data.data;
+ size = image_data.size;
+
+ sg1 = kzalloc((sizeof(struct opal_sg_list)), GFP_KERNEL);
+ if (!sg1)
+ return NULL;
+
+ list = sg1;
+ sg1->num_entries = 0;
+ while (size > 0) {
+ /* Translate virtual address to physical address */
+ sg1->entry[sg1->num_entries].data =
+ (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT);
+
+ if (size > PAGE_SIZE)
+ sg1->entry[sg1->num_entries].length = PAGE_SIZE;
+ else
+ sg1->entry[sg1->num_entries].length = size;
+
+ sg1->num_entries++;
+ if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
+ sg1->next = kzalloc((sizeof(struct opal_sg_list)),
+ GFP_KERNEL);
+ if (!sg1->next) {
+ pr_err("%s : Failed to allocate memory\n",
+ __func__);
+ goto nomem;
+ }
+
+ sg1 = sg1->next;
+ sg1->num_entries = 0;
+ }
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ return list;
+nomem:
+ free_sg_list(list);
+ return NULL;
+}
+
+/*
+ * OPAL update flash
+ */
+static int opal_flash_update(int op)
+{
+ struct opal_sg_list *sg, *list, *next;
+ unsigned long addr;
+ int64_t rc = OPAL_PARAMETER;
+
+ if (op == FLASH_UPDATE_CANCEL) {
+ pr_alert("FLASH: Image update cancelled\n");
+ addr = '\0';
+ goto flash;
+ }
+
+ list = image_data_to_sglist();
+ if (!list)
+ goto invalid_img;
+
+ /* First entry address */
+ addr = __pa(list);
+
+ /* Translate sg list address to absolute */
+ for (sg = list; sg; sg = next) {
+ next = sg->next;
+ /* Don't translate NULL pointer for last entry */
+ if (sg->next)
+ sg->next = (struct opal_sg_list *)__pa(sg->next);
+ else
+ sg->next = NULL;
+
+ /* Make num_entries into the version/length field */
+ sg->num_entries = (SG_LIST_VERSION << 56) |
+ (sg->num_entries * sizeof(struct opal_sg_entry) + 16);
+ }
+
+ pr_alert("FLASH: Image is %u bytes\n", image_data.size);
+ pr_alert("FLASH: Image update requested\n");
+ pr_alert("FLASH: Image will be updated during system reboot\n");
+ pr_alert("FLASH: This will take several minutes. Do not power off!\n");
+
+flash:
+ rc = opal_update_flash(addr);
+
+invalid_img:
+ return rc;
+}
+
+/*
+ * Show candidate image status
+ */
+static ssize_t update_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct update_flash_t *const args_buf = &update_flash_data;
+ return sprintf(buf, "%d\n", args_buf->status);
+}
+
+/*
+ * Set update image flag
+ * 1 - Flash new image
+ * 0 - Cancel flash request
+ */
+static ssize_t update_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct update_flash_t *const args_buf = &update_flash_data;
+ int rc = count;
+
+ mutex_lock(&image_data_mutex);
+
+ switch (buf[0]) {
+ case '0':
+ if (args_buf->status == FLASH_IMG_READY)
+ opal_flash_update(FLASH_UPDATE_CANCEL);
+ args_buf->status = FLASH_NO_OP;
+ break;
+ case '1':
+ /* Image is loaded? */
+ if (image_data.status == IMAGE_READY)
+ args_buf->status =
+ opal_flash_update(FLASH_UPDATE_INIT);
+ else
+ args_buf->status = FLASH_INVALID_IMG;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ mutex_unlock(&image_data_mutex);
+ return rc;
+}
+
+/*
+ * Free image buffer
+ */
+static void free_image_buf(void)
+{
+ void *addr;
+ int size;
+
+ addr = image_data.data;
+ size = PAGE_ALIGN(image_data.size);
+ while (size > 0) {
+ ClearPageReserved(vmalloc_to_page(addr));
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ vfree(image_data.data);
+ image_data.data = NULL;
+ image_data.status = IMAGE_INVALID;
+}
+
+/*
+ * Allocate image buffer.
+ */
+static int alloc_image_buf(char *buffer, size_t count)
+{
+ void *addr;
+ int size;
+
+ if (count < sizeof(struct image_header_t)) {
+ pr_warn("FLASH: Invalid candidate image\n");
+ return -EINVAL;
+ }
+
+ memcpy(&image_header, (void *)buffer, sizeof(struct image_header_t));
+ image_data.size = be32_to_cpu(image_header.size);
+ pr_debug("FLASH: Candiate image size = %u\n", image_data.size);
+
+ if (image_data.size > MAX_IMAGE_SIZE) {
+ pr_warn("FLASH: Too large image\n");
+ return -EINVAL;
+ }
+ if (image_data.size < VALIDATE_BUF_SIZE) {
+ pr_warn("FLASH: Image is shorter than expected\n");
+ return -EINVAL;
+ }
+
+ image_data.data = vzalloc(PAGE_ALIGN(image_data.size));
+ if (!image_data.data) {
+ pr_err("%s : Failed to allocate memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* Pin memory */
+ addr = image_data.data;
+ size = PAGE_ALIGN(image_data.size);
+ while (size > 0) {
+ SetPageReserved(vmalloc_to_page(addr));
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+
+ image_data.status = IMAGE_LOADING;
+ return 0;
+}
+
+/*
+ * Copy candidate image
+ *
+ * Parse candidate image header to get total image size
+ * and pre-allocate required memory.
+ */
+static ssize_t image_data_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t count)
+{
+ int rc;
+
+ mutex_lock(&image_data_mutex);
+
+ /* New image ? */
+ if (pos == 0) {
+ /* Free memory, if already allocated */
+ if (image_data.data)
+ free_image_buf();
+
+ /* Cancel outstanding image update request */
+ if (update_flash_data.status == FLASH_IMG_READY)
+ opal_flash_update(FLASH_UPDATE_CANCEL);
+
+ /* Allocate memory */
+ rc = alloc_image_buf(buffer, count);
+ if (rc)
+ goto out;
+ }
+
+ if (image_data.status != IMAGE_LOADING) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if ((pos + count) > image_data.size) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ memcpy(image_data.data + pos, (void *)buffer, count);
+ rc = count;
+
+ /* Set image status */
+ if ((pos + count) == image_data.size) {
+ pr_debug("FLASH: Candidate image loaded....\n");
+ image_data.status = IMAGE_READY;
+ }
+
+out:
+ mutex_unlock(&image_data_mutex);
+ return rc;
+}
+
+/*
+ * sysfs interface :
+ * OPAL uses below sysfs files for code update.
+ * We create these files under /sys/firmware/opal.
+ *
+ * image : Interface to load candidate firmware image
+ * validate_flash : Validate firmware image
+ * manage_flash : Commit/Reject firmware image
+ * update_flash : Flash new firmware image
+ *
+ */
+static struct bin_attribute image_data_attr = {
+ .attr = {.name = "image", .mode = 0200},
+ .size = MAX_IMAGE_SIZE, /* Limit image size */
+ .write = image_data_write,
+};
+
+static struct kobj_attribute validate_attribute =
+ __ATTR(validate_flash, 0600, validate_show, validate_store);
+
+static struct kobj_attribute manage_attribute =
+ __ATTR(manage_flash, 0600, manage_show, manage_store);
+
+static struct kobj_attribute update_attribute =
+ __ATTR(update_flash, 0600, update_show, update_store);
+
+static struct attribute *image_op_attrs[] = {
+ &validate_attribute.attr,
+ &manage_attribute.attr,
+ &update_attribute.attr,
+ NULL /* need to NULL terminate the list of attributes */
+};
+
+static struct attribute_group image_op_attr_group = {
+ .attrs = image_op_attrs,
+};
+
+void __init opal_flash_init(void)
+{
+ int ret;
+
+ /* Allocate validate image buffer */
+ validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL);
+ if (!validate_flash_data.buf) {
+ pr_err("%s : Failed to allocate memory\n", __func__);
+ return;
+ }
+
+ /* Make sure /sys/firmware/opal directory is created */
+ if (!opal_kobj) {
+ pr_warn("FLASH: opal kobject is not available\n");
+ goto nokobj;
+ }
+
+ /* Create the sysfs files */
+ ret = sysfs_create_group(opal_kobj, &image_op_attr_group);
+ if (ret) {
+ pr_warn("FLASH: Failed to create sysfs files\n");
+ goto nokobj;
+ }
+
+ ret = sysfs_create_bin_file(opal_kobj, &image_data_attr);
+ if (ret) {
+ pr_warn("FLASH: Failed to create sysfs files\n");
+ goto nosysfs_file;
+ }
+
+ /* Set default status */
+ validate_flash_data.status = FLASH_NO_OP;
+ manage_flash_data.status = FLASH_NO_OP;
+ update_flash_data.status = FLASH_NO_OP;
+ image_data.status = IMAGE_INVALID;
+ return;
+
+nosysfs_file:
+ sysfs_remove_group(opal_kobj, &image_op_attr_group);
+
+nokobj:
+ kfree(validate_flash_data.buf);
+ return;
+}
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c
index a7614bb14e17..e7e59e4f9892 100644
--- a/arch/powerpc/platforms/powernv/opal-lpc.c
+++ b/arch/powerpc/platforms/powernv/opal-lpc.c
@@ -17,6 +17,7 @@
#include <asm/firmware.h>
#include <asm/xics.h>
#include <asm/opal.h>
+#include <asm/prom.h>
static int opal_lpc_chip_id = -1;
diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
index 3f83e1ae26ac..acd9f7e96678 100644
--- a/arch/powerpc/platforms/powernv/opal-nvram.c
+++ b/arch/powerpc/platforms/powernv/opal-nvram.c
@@ -65,7 +65,7 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
void __init opal_nvram_init(void)
{
struct device_node *np;
- const u32 *nbytes_p;
+ const __be32 *nbytes_p;
np = of_find_compatible_node(NULL, NULL, "ibm,opal-nvram");
if (np == NULL)
@@ -76,7 +76,7 @@ void __init opal_nvram_init(void)
of_node_put(np);
return;
}
- nvram_size = *nbytes_p;
+ nvram_size = be32_to_cpup(nbytes_p);
printk(KERN_INFO "OPAL nvram setup, %u bytes\n", nvram_size);
of_node_put(np);
diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c
index 2aa7641aac9b..7d07c7e80ec0 100644
--- a/arch/powerpc/platforms/powernv/opal-rtc.c
+++ b/arch/powerpc/platforms/powernv/opal-rtc.c
@@ -37,10 +37,12 @@ unsigned long __init opal_get_boot_time(void)
struct rtc_time tm;
u32 y_m_d;
u64 h_m_s_ms;
+ __be32 __y_m_d;
+ __be64 __h_m_s_ms;
long rc = OPAL_BUSY;
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
- rc = opal_rtc_read(&y_m_d, &h_m_s_ms);
+ rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
if (rc == OPAL_BUSY_EVENT)
opal_poll_events(NULL);
else
@@ -48,6 +50,8 @@ unsigned long __init opal_get_boot_time(void)
}
if (rc != OPAL_SUCCESS)
return 0;
+ y_m_d = be32_to_cpu(__y_m_d);
+ h_m_s_ms = be64_to_cpu(__h_m_s_ms);
opal_to_tm(y_m_d, h_m_s_ms, &tm);
return mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec);
@@ -58,9 +62,11 @@ void opal_get_rtc_time(struct rtc_time *tm)
long rc = OPAL_BUSY;
u32 y_m_d;
u64 h_m_s_ms;
+ __be32 __y_m_d;
+ __be64 __h_m_s_ms;
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
- rc = opal_rtc_read(&y_m_d, &h_m_s_ms);
+ rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
if (rc == OPAL_BUSY_EVENT)
opal_poll_events(NULL);
else
@@ -68,6 +74,8 @@ void opal_get_rtc_time(struct rtc_time *tm)
}
if (rc != OPAL_SUCCESS)
return;
+ y_m_d = be32_to_cpu(__y_m_d);
+ h_m_s_ms = be64_to_cpu(__h_m_s_ms);
opal_to_tm(y_m_d, h_m_s_ms, tm);
}
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 8f3844535fbb..e7806504e976 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -24,7 +24,7 @@
mflr r0; \
mfcr r12; \
std r0,16(r1); \
- std r12,8(r1); \
+ stw r12,8(r1); \
std r1,PACAR1(r13); \
li r0,0; \
mfmsr r12; \
@@ -34,7 +34,7 @@
mtmsrd r12,1; \
LOAD_REG_ADDR(r0,.opal_return); \
mtlr r0; \
- li r0,MSR_DR|MSR_IR; \
+ li r0,MSR_DR|MSR_IR|MSR_LE;\
andc r12,r12,r0; \
li r0,token; \
mtspr SPRN_HSRR1,r12; \
@@ -45,8 +45,15 @@
hrfid
_STATIC(opal_return)
+ /*
+ * Fixup endian on OPAL return... we should be able to simplify
+ * this by instead converting the below trampoline to a set of
+ * bytes (always BE) since MSR:LE will end up fixed up as a side
+ * effect of the rfid.
+ */
+ FIXUP_ENDIAN
ld r2,PACATOC(r13);
- ld r4,8(r1);
+ lwz r4,8(r1);
ld r5,16(r1);
ld r6,PACASAVEDMSR(r13);
mtspr SPRN_SRR0,r5;
@@ -116,3 +123,6 @@ OPAL_CALL(opal_xscom_write, OPAL_XSCOM_WRITE);
OPAL_CALL(opal_lpc_read, OPAL_LPC_READ);
OPAL_CALL(opal_lpc_write, OPAL_LPC_WRITE);
OPAL_CALL(opal_return_cpu, OPAL_RETURN_CPU);
+OPAL_CALL(opal_validate_flash, OPAL_FLASH_VALIDATE);
+OPAL_CALL(opal_manage_flash, OPAL_FLASH_MANAGE);
+OPAL_CALL(opal_update_flash, OPAL_FLASH_UPDATE);
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
new file mode 100644
index 000000000000..3ed5c6498324
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-xscom.c
@@ -0,0 +1,105 @@
+/*
+ * PowerNV LPC bus handling.
+ *
+ * Copyright 2013 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/bug.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+
+#include <asm/machdep.h>
+#include <asm/firmware.h>
+#include <asm/opal.h>
+#include <asm/scom.h>
+
+/*
+ * We could probably fit that inside the scom_map_t
+ * which is a void* after all but it's really too ugly
+ * so let's kmalloc it for now
+ */
+struct opal_scom_map {
+ uint32_t chip;
+ uint32_t addr;
+};
+
+static scom_map_t opal_scom_map(struct device_node *dev, u64 reg, u64 count)
+{
+ struct opal_scom_map *m;
+ const __be32 *gcid;
+
+ if (!of_get_property(dev, "scom-controller", NULL)) {
+ pr_err("%s: device %s is not a SCOM controller\n",
+ __func__, dev->full_name);
+ return SCOM_MAP_INVALID;
+ }
+ gcid = of_get_property(dev, "ibm,chip-id", NULL);
+ if (!gcid) {
+ pr_err("%s: device %s has no ibm,chip-id\n",
+ __func__, dev->full_name);
+ return SCOM_MAP_INVALID;
+ }
+ m = kmalloc(sizeof(struct opal_scom_map), GFP_KERNEL);
+ if (!m)
+ return NULL;
+ m->chip = be32_to_cpup(gcid);
+ m->addr = reg;
+
+ return (scom_map_t)m;
+}
+
+static void opal_scom_unmap(scom_map_t map)
+{
+ kfree(map);
+}
+
+static int opal_xscom_err_xlate(int64_t rc)
+{
+ switch(rc) {
+ case 0:
+ return 0;
+ /* Add more translations if necessary */
+ default:
+ return -EIO;
+ }
+}
+
+static int opal_scom_read(scom_map_t map, u32 reg, u64 *value)
+{
+ struct opal_scom_map *m = map;
+ int64_t rc;
+
+ rc = opal_xscom_read(m->chip, m->addr + reg, (uint64_t *)__pa(value));
+ return opal_xscom_err_xlate(rc);
+}
+
+static int opal_scom_write(scom_map_t map, u32 reg, u64 value)
+{
+ struct opal_scom_map *m = map;
+ int64_t rc;
+
+ rc = opal_xscom_write(m->chip, m->addr + reg, value);
+ return opal_xscom_err_xlate(rc);
+}
+
+static const struct scom_controller opal_scom_controller = {
+ .map = opal_scom_map,
+ .unmap = opal_scom_unmap,
+ .read = opal_scom_read,
+ .write = opal_scom_write
+};
+
+static int opal_xscom_init(void)
+{
+ if (firmware_has_feature(FW_FEATURE_OPALv3))
+ scom_init(&opal_scom_controller);
+ return 0;
+}
+arch_initcall(opal_xscom_init);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 2911abe550f1..1c798cd55372 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -13,15 +13,20 @@
#include <linux/types.h>
#include <linux/of.h>
+#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/slab.h>
+#include <linux/kobject.h>
#include <asm/opal.h>
#include <asm/firmware.h>
#include "powernv.h"
+/* /sys/firmware/opal */
+struct kobject *opal_kobj;
+
struct opal {
u64 base;
u64 entry;
@@ -77,6 +82,7 @@ int __init early_init_dt_scan_opal(unsigned long node,
static int __init opal_register_exception_handlers(void)
{
+#ifdef __BIG_ENDIAN__
u64 glue;
if (!(powerpc_firmware_features & FW_FEATURE_OPAL))
@@ -94,6 +100,7 @@ static int __init opal_register_exception_handlers(void)
0, glue);
glue += 128;
opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
+#endif
return 0;
}
@@ -164,27 +171,28 @@ void opal_notifier_disable(void)
int opal_get_chars(uint32_t vtermno, char *buf, int count)
{
- s64 len, rc;
- u64 evt;
+ s64 rc;
+ __be64 evt, len;
if (!opal.entry)
return -ENODEV;
opal_poll_events(&evt);
- if ((evt & OPAL_EVENT_CONSOLE_INPUT) == 0)
+ if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0)
return 0;
- len = count;
- rc = opal_console_read(vtermno, &len, buf);
+ len = cpu_to_be64(count);
+ rc = opal_console_read(vtermno, &len, buf);
if (rc == OPAL_SUCCESS)
- return len;
+ return be64_to_cpu(len);
return 0;
}
int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
{
int written = 0;
+ __be64 olen;
s64 len, rc;
unsigned long flags;
- u64 evt;
+ __be64 evt;
if (!opal.entry)
return -ENODEV;
@@ -199,13 +207,14 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
*/
spin_lock_irqsave(&opal_write_lock, flags);
if (firmware_has_feature(FW_FEATURE_OPALv2)) {
- rc = opal_console_write_buffer_space(vtermno, &len);
+ rc = opal_console_write_buffer_space(vtermno, &olen);
+ len = be64_to_cpu(olen);
if (rc || len < total_len) {
spin_unlock_irqrestore(&opal_write_lock, flags);
/* Closed -> drop characters */
if (rc)
return total_len;
- opal_poll_events(&evt);
+ opal_poll_events(NULL);
return -EAGAIN;
}
}
@@ -216,8 +225,9 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
rc = OPAL_BUSY;
while(total_len > 0 && (rc == OPAL_BUSY ||
rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
- len = total_len;
- rc = opal_console_write(vtermno, &len, data);
+ olen = cpu_to_be64(total_len);
+ rc = opal_console_write(vtermno, &olen, data);
+ len = be64_to_cpu(olen);
/* Closed or other error drop */
if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
@@ -237,7 +247,8 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
*/
do
opal_poll_events(&evt);
- while(rc == OPAL_SUCCESS && (evt & OPAL_EVENT_CONSOLE_OUTPUT));
+ while(rc == OPAL_SUCCESS &&
+ (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT));
}
spin_unlock_irqrestore(&opal_write_lock, flags);
return written;
@@ -360,7 +371,7 @@ int opal_machine_check(struct pt_regs *regs)
static irqreturn_t opal_interrupt(int irq, void *data)
{
- uint64_t events;
+ __be64 events;
opal_handle_interrupt(virq_to_hw(irq), &events);
@@ -369,10 +380,21 @@ static irqreturn_t opal_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
+static int opal_sysfs_init(void)
+{
+ opal_kobj = kobject_create_and_add("opal", firmware_kobj);
+ if (!opal_kobj) {
+ pr_warn("kobject_create_and_add opal failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static int __init opal_init(void)
{
struct device_node *np, *consoles;
- const u32 *irqs;
+ const __be32 *irqs;
int rc, i, irqlen;
opal_node = of_find_node_by_path("/ibm,opal");
@@ -414,6 +436,14 @@ static int __init opal_init(void)
" (0x%x)\n", rc, irq, hwirq);
opal_irqs[i] = irq;
}
+
+ /* Create "opal" kobject under /sys/firmware */
+ rc = opal_sysfs_init();
+ if (rc == 0) {
+ /* Setup code update interface */
+ opal_flash_init();
+ }
+
return 0;
}
subsys_initcall(opal_init);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 74a5a5773b1f..c639af7d4826 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -70,6 +70,16 @@ define_pe_printk_level(pe_err, KERN_ERR);
define_pe_printk_level(pe_warn, KERN_WARNING);
define_pe_printk_level(pe_info, KERN_INFO);
+/*
+ * stdcix is only supposed to be used in hypervisor real mode as per
+ * the architecture spec
+ */
+static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
+{
+ __asm__ __volatile__("stdcix %0,0,%1"
+ : : "r" (val), "r" (paddr) : "memory");
+}
+
static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
{
unsigned long pe;
@@ -454,10 +464,13 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
}
}
-static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
- u64 *startp, u64 *endp)
+static void pnv_pci_ioda1_tce_invalidate(struct pnv_ioda_pe *pe,
+ struct iommu_table *tbl,
+ __be64 *startp, __be64 *endp, bool rm)
{
- u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
+ __be64 __iomem *invalidate = rm ?
+ (__be64 __iomem *)pe->tce_inval_reg_phys :
+ (__be64 __iomem *)tbl->it_index;
unsigned long start, end, inc;
start = __pa(startp);
@@ -484,7 +497,10 @@ static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
mb(); /* Ensure above stores are visible */
while (start <= end) {
- __raw_writeq(start, invalidate);
+ if (rm)
+ __raw_rm_writeq(cpu_to_be64(start), invalidate);
+ else
+ __raw_writeq(cpu_to_be64(start), invalidate);
start += inc;
}
@@ -496,10 +512,12 @@ static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
struct iommu_table *tbl,
- u64 *startp, u64 *endp)
+ __be64 *startp, __be64 *endp, bool rm)
{
unsigned long start, end, inc;
- u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
+ __be64 __iomem *invalidate = rm ?
+ (__be64 __iomem *)pe->tce_inval_reg_phys :
+ (__be64 __iomem *)tbl->it_index;
/* We'll invalidate DMA address in PE scope */
start = 0x2ul << 60;
@@ -515,22 +533,25 @@ static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
mb();
while (start <= end) {
- __raw_writeq(start, invalidate);
+ if (rm)
+ __raw_rm_writeq(cpu_to_be64(start), invalidate);
+ else
+ __raw_writeq(cpu_to_be64(start), invalidate);
start += inc;
}
}
void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
- u64 *startp, u64 *endp)
+ __be64 *startp, __be64 *endp, bool rm)
{
struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
tce32_table);
struct pnv_phb *phb = pe->phb;
if (phb->type == PNV_PHB_IODA1)
- pnv_pci_ioda1_tce_invalidate(tbl, startp, endp);
+ pnv_pci_ioda1_tce_invalidate(pe, tbl, startp, endp, rm);
else
- pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp);
+ pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp, rm);
}
static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
@@ -603,7 +624,9 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
* bus number, print that out instead.
*/
tbl->it_busno = 0;
- tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
+ pe->tce_inval_reg_phys = be64_to_cpup(swinvp);
+ tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys,
+ 8);
tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE |
TCE_PCI_SWINV_PAIR;
}
@@ -681,7 +704,9 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
* bus number, print that out instead.
*/
tbl->it_busno = 0;
- tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
+ pe->tce_inval_reg_phys = be64_to_cpup(swinvp);
+ tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys,
+ 8);
tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
}
iommu_init_table(tbl, phb->hose->node);
@@ -786,8 +811,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
struct irq_data *idata;
struct irq_chip *ichip;
unsigned int xive_num = hwirq - phb->msi_base;
- uint64_t addr64;
- uint32_t addr32, data;
+ __be32 data;
int rc;
/* No PE assigned ? bail out ... no MSI for you ! */
@@ -811,6 +835,8 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
}
if (is_64) {
+ __be64 addr64;
+
rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
&addr64, &data);
if (rc) {
@@ -818,9 +844,11 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
pci_name(dev), rc);
return -EIO;
}
- msg->address_hi = addr64 >> 32;
- msg->address_lo = addr64 & 0xfffffffful;
+ msg->address_hi = be64_to_cpu(addr64) >> 32;
+ msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful;
} else {
+ __be32 addr32;
+
rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
&addr32, &data);
if (rc) {
@@ -829,9 +857,9 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
return -EIO;
}
msg->address_hi = 0;
- msg->address_lo = addr32;
+ msg->address_lo = be32_to_cpu(addr32);
}
- msg->data = data;
+ msg->data = be32_to_cpu(data);
/*
* Change the IRQ chip for the MSI interrupts on PHB3.
@@ -1106,8 +1134,8 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
struct pci_controller *hose;
struct pnv_phb *phb;
unsigned long size, m32map_off, iomap_off, pemap_off;
- const u64 *prop64;
- const u32 *prop32;
+ const __be64 *prop64;
+ const __be32 *prop32;
int len;
u64 phb_id;
void *aux;
@@ -1142,8 +1170,8 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
spin_lock_init(&phb->lock);
prop32 = of_get_property(np, "bus-range", &len);
if (prop32 && len == 8) {
- hose->first_busno = prop32[0];
- hose->last_busno = prop32[1];
+ hose->first_busno = be32_to_cpu(prop32[0]);
+ hose->last_busno = be32_to_cpu(prop32[1]);
} else {
pr_warn(" Broken <bus-range> on %s\n", np->full_name);
hose->first_busno = 0;
@@ -1175,7 +1203,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
if (!prop32)
phb->ioda.total_pe = 1;
else
- phb->ioda.total_pe = *prop32;
+ phb->ioda.total_pe = be32_to_cpup(prop32);
phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
/* FW Has already off top 64k of M32 space (MSI space) */
@@ -1285,7 +1313,7 @@ void __init pnv_pci_init_ioda2_phb(struct device_node *np)
void __init pnv_pci_init_ioda_hub(struct device_node *np)
{
struct device_node *phbn;
- const u64 *prop64;
+ const __be64 *prop64;
u64 hub_id;
pr_info("Probing IODA IO-Hub %s\n", np->full_name);
diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
index b68db6325c1b..f8b4bd8afb2e 100644
--- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c
+++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
@@ -99,7 +99,7 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
void *tce_mem, u64 tce_size)
{
struct pnv_phb *phb;
- const u64 *prop64;
+ const __be64 *prop64;
u64 phb_id;
int64_t rc;
static int primary = 1;
@@ -178,7 +178,7 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
void __init pnv_pci_init_p5ioc2_hub(struct device_node *np)
{
struct device_node *phbn;
- const u64 *prop64;
+ const __be64 *prop64;
u64 hub_id;
void *tce_mem;
uint64_t tce_per_phb;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index a28d3b5e6393..921ae673baf3 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -236,7 +236,7 @@ static void pnv_pci_config_check_eeh(struct pnv_phb *phb,
{
s64 rc;
u8 fstate;
- u16 pcierr;
+ __be16 pcierr;
u32 pe_no;
/*
@@ -283,16 +283,16 @@ int pnv_pci_cfg_read(struct device_node *dn,
break;
}
case 2: {
- u16 v16;
+ __be16 v16;
rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
&v16);
- *val = (rc == OPAL_SUCCESS) ? v16 : 0xffff;
+ *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
break;
}
case 4: {
- u32 v32;
+ __be32 v32;
rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
- *val = (rc == OPAL_SUCCESS) ? v32 : 0xffffffff;
+ *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
break;
}
default:
@@ -401,10 +401,10 @@ struct pci_ops pnv_pci_ops = {
static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ struct dma_attrs *attrs, bool rm)
{
u64 proto_tce;
- u64 *tcep, *tces;
+ __be64 *tcep, *tces;
u64 rpn;
proto_tce = TCE_PCI_READ; // Read allowed
@@ -412,33 +412,48 @@ static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
if (direction != DMA_TO_DEVICE)
proto_tce |= TCE_PCI_WRITE;
- tces = tcep = ((u64 *)tbl->it_base) + index - tbl->it_offset;
+ tces = tcep = ((__be64 *)tbl->it_base) + index - tbl->it_offset;
rpn = __pa(uaddr) >> TCE_SHIFT;
while (npages--)
- *(tcep++) = proto_tce | (rpn++ << TCE_RPN_SHIFT);
+ *(tcep++) = cpu_to_be64(proto_tce | (rpn++ << TCE_RPN_SHIFT));
/* Some implementations won't cache invalid TCEs and thus may not
* need that flush. We'll probably turn it_type into a bit mask
* of flags if that becomes the case
*/
if (tbl->it_type & TCE_PCI_SWINV_CREATE)
- pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1);
+ pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1, rm);
return 0;
}
-static void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
+static int pnv_tce_build_vm(struct iommu_table *tbl, long index, long npages,
+ unsigned long uaddr,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
- u64 *tcep, *tces;
+ return pnv_tce_build(tbl, index, npages, uaddr, direction, attrs,
+ false);
+}
+
+static void pnv_tce_free(struct iommu_table *tbl, long index, long npages,
+ bool rm)
+{
+ __be64 *tcep, *tces;
- tces = tcep = ((u64 *)tbl->it_base) + index - tbl->it_offset;
+ tces = tcep = ((__be64 *)tbl->it_base) + index - tbl->it_offset;
while (npages--)
- *(tcep++) = 0;
+ *(tcep++) = cpu_to_be64(0);
if (tbl->it_type & TCE_PCI_SWINV_FREE)
- pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1);
+ pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1, rm);
+}
+
+static void pnv_tce_free_vm(struct iommu_table *tbl, long index, long npages)
+{
+ pnv_tce_free(tbl, index, npages, false);
}
static unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
@@ -446,6 +461,19 @@ static unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
return ((u64 *)tbl->it_base)[index - tbl->it_offset];
}
+static int pnv_tce_build_rm(struct iommu_table *tbl, long index, long npages,
+ unsigned long uaddr,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ return pnv_tce_build(tbl, index, npages, uaddr, direction, attrs, true);
+}
+
+static void pnv_tce_free_rm(struct iommu_table *tbl, long index, long npages)
+{
+ pnv_tce_free(tbl, index, npages, true);
+}
+
void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
void *tce_mem, u64 tce_size,
u64 dma_offset)
@@ -484,8 +512,8 @@ static struct iommu_table *pnv_pci_setup_bml_iommu(struct pci_controller *hose)
swinvp = of_get_property(hose->dn, "linux,tce-sw-invalidate-info",
NULL);
if (swinvp) {
- tbl->it_busno = swinvp[1];
- tbl->it_index = (unsigned long)ioremap(swinvp[0], 8);
+ tbl->it_busno = be64_to_cpu(swinvp[1]);
+ tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
}
return tbl;
@@ -610,8 +638,10 @@ void __init pnv_pci_init(void)
/* Configure IOMMU DMA hooks */
ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup;
- ppc_md.tce_build = pnv_tce_build;
- ppc_md.tce_free = pnv_tce_free;
+ ppc_md.tce_build = pnv_tce_build_vm;
+ ppc_md.tce_free = pnv_tce_free_vm;
+ ppc_md.tce_build_rm = pnv_tce_build_rm;
+ ppc_md.tce_free_rm = pnv_tce_free_rm;
ppc_md.tce_get = pnv_tce_get;
ppc_md.pci_probe_mode = pnv_pci_probe_mode;
set_pci_dma_ops(&dma_iommu_ops);
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index d633c64e05a1..64d3b12e5b6d 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -17,7 +17,7 @@ enum pnv_phb_model {
PNV_PHB_MODEL_PHB3,
};
-#define PNV_PCI_DIAG_BUF_SIZE 4096
+#define PNV_PCI_DIAG_BUF_SIZE 8192
#define PNV_IODA_PE_DEV (1 << 0) /* PE has single PCI device */
#define PNV_IODA_PE_BUS (1 << 1) /* PE has primary PCI bus */
#define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */
@@ -52,6 +52,7 @@ struct pnv_ioda_pe {
int tce32_seg;
int tce32_segcount;
struct iommu_table tce32_table;
+ phys_addr_t tce_inval_reg_phys;
/* XXX TODO: Add support for additional 64-bit iommus */
@@ -193,6 +194,6 @@ extern void pnv_pci_init_p5ioc2_hub(struct device_node *np);
extern void pnv_pci_init_ioda_hub(struct device_node *np);
extern void pnv_pci_init_ioda2_phb(struct device_node *np);
extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
- u64 *startp, u64 *endp);
+ __be64 *startp, __be64 *endp, bool rm);
#endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c
new file mode 100644
index 000000000000..8844628915dc
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/rng.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2013, Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "powernv-rng: " fmt
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <asm/archrandom.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+
+
+struct powernv_rng {
+ void __iomem *regs;
+ unsigned long mask;
+};
+
+static DEFINE_PER_CPU(struct powernv_rng *, powernv_rng);
+
+
+static unsigned long rng_whiten(struct powernv_rng *rng, unsigned long val)
+{
+ unsigned long parity;
+
+ /* Calculate the parity of the value */
+ asm ("popcntd %0,%1" : "=r" (parity) : "r" (val));
+
+ /* xor our value with the previous mask */
+ val ^= rng->mask;
+
+ /* update the mask based on the parity of this value */
+ rng->mask = (rng->mask << 1) | (parity & 1);
+
+ return val;
+}
+
+int powernv_get_random_long(unsigned long *v)
+{
+ struct powernv_rng *rng;
+
+ rng = get_cpu_var(powernv_rng);
+
+ *v = rng_whiten(rng, in_be64(rng->regs));
+
+ put_cpu_var(rng);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(powernv_get_random_long);
+
+static __init void rng_init_per_cpu(struct powernv_rng *rng,
+ struct device_node *dn)
+{
+ int chip_id, cpu;
+
+ chip_id = of_get_ibm_chip_id(dn);
+ if (chip_id == -1)
+ pr_warn("No ibm,chip-id found for %s.\n", dn->full_name);
+
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(powernv_rng, cpu) == NULL ||
+ cpu_to_chip_id(cpu) == chip_id) {
+ per_cpu(powernv_rng, cpu) = rng;
+ }
+ }
+}
+
+static __init int rng_create(struct device_node *dn)
+{
+ struct powernv_rng *rng;
+ unsigned long val;
+
+ rng = kzalloc(sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ rng->regs = of_iomap(dn, 0);
+ if (!rng->regs) {
+ kfree(rng);
+ return -ENXIO;
+ }
+
+ val = in_be64(rng->regs);
+ rng->mask = val;
+
+ rng_init_per_cpu(rng, dn);
+
+ pr_info_once("Registering arch random hook.\n");
+
+ ppc_md.get_random_long = powernv_get_random_long;
+
+ return 0;
+}
+
+static __init int rng_init(void)
+{
+ struct device_node *dn;
+ int rc;
+
+ for_each_compatible_node(dn, NULL, "ibm,power-rng") {
+ rc = rng_create(dn);
+ if (rc) {
+ pr_err("Failed creating rng for %s (%d).\n",
+ dn->full_name, rc);
+ continue;
+ }
+
+ /* Create devices for hwrng driver */
+ of_platform_device_create(dn, NULL, NULL);
+ }
+
+ return 0;
+}
+subsys_initcall(rng_init);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index e239dcfa224c..19884b2a51b4 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -23,6 +23,7 @@
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/of.h>
+#include <linux/of_fdt.h>
#include <linux/interrupt.h>
#include <linux/bug.h>
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 6c61ec5ee914..fbccac9cd2dc 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -3,7 +3,7 @@ ccflags-$(CONFIG_PPC_PSERIES_DEBUG) += -DDEBUG
obj-y := lpar.o hvCall.o nvram.o reconfig.o \
setup.o iommu.o event_sources.o ras.o \
- firmware.o power.o dlpar.o mobility.o
+ firmware.o power.o dlpar.o mobility.o rng.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SCANLOG) += scanlog.o
obj-$(CONFIG_EEH) += eeh_pseries.o
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 7cfdaae1721a..a8fe5aa3d34f 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -404,46 +404,38 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
unsigned long drc_index;
int rc;
- cpu_hotplug_driver_lock();
rc = strict_strtoul(buf, 0, &drc_index);
- if (rc) {
- rc = -EINVAL;
- goto out;
- }
+ if (rc)
+ return -EINVAL;
parent = of_find_node_by_path("/cpus");
- if (!parent) {
- rc = -ENODEV;
- goto out;
- }
+ if (!parent)
+ return -ENODEV;
dn = dlpar_configure_connector(drc_index, parent);
- if (!dn) {
- rc = -EINVAL;
- goto out;
- }
+ if (!dn)
+ return -EINVAL;
of_node_put(parent);
rc = dlpar_acquire_drc(drc_index);
if (rc) {
dlpar_free_cc_nodes(dn);
- rc = -EINVAL;
- goto out;
+ return -EINVAL;
}
rc = dlpar_attach_node(dn);
if (rc) {
dlpar_release_drc(drc_index);
dlpar_free_cc_nodes(dn);
- goto out;
+ return rc;
}
rc = dlpar_online_cpu(dn);
-out:
- cpu_hotplug_driver_unlock();
+ if (rc)
+ return rc;
- return rc ? rc : count;
+ return count;
}
static int dlpar_offline_cpu(struct device_node *dn)
@@ -516,30 +508,27 @@ static ssize_t dlpar_cpu_release(const char *buf, size_t count)
return -EINVAL;
}
- cpu_hotplug_driver_lock();
rc = dlpar_offline_cpu(dn);
if (rc) {
of_node_put(dn);
- rc = -EINVAL;
- goto out;
+ return -EINVAL;
}
rc = dlpar_release_drc(*drc_index);
if (rc) {
of_node_put(dn);
- goto out;
+ return rc;
}
rc = dlpar_detach_node(dn);
if (rc) {
dlpar_acquire_drc(*drc_index);
- goto out;
+ return rc;
}
of_node_put(dn);
-out:
- cpu_hotplug_driver_unlock();
- return rc ? rc : count;
+
+ return count;
}
static int __init pseries_dlpar_init(void)
diff --git a/arch/powerpc/platforms/pseries/event_sources.c b/arch/powerpc/platforms/pseries/event_sources.c
index 2605c310166a..18380e8f6dfe 100644
--- a/arch/powerpc/platforms/pseries/event_sources.c
+++ b/arch/powerpc/platforms/pseries/event_sources.c
@@ -25,7 +25,7 @@ void request_event_sources_irqs(struct device_node *np,
const char *name)
{
int i, index, count = 0;
- struct of_irq oirq;
+ struct of_phandle_args oirq;
const u32 *opicprop;
unsigned int opicplen;
unsigned int virqs[16];
@@ -55,13 +55,11 @@ void request_event_sources_irqs(struct device_node *np,
/* Else use normal interrupt tree parsing */
else {
/* First try to do a proper OF tree parsing */
- for (index = 0; of_irq_map_one(np, index, &oirq) == 0;
+ for (index = 0; of_irq_parse_one(np, index, &oirq) == 0;
index++) {
if (count > 15)
break;
- virqs[count] = irq_create_of_mapping(oirq.controller,
- oirq.specifier,
- oirq.size);
+ virqs[count] = irq_create_of_mapping(&oirq);
if (virqs[count] == NO_IRQ) {
pr_err("event-sources: Unable to allocate "
"interrupt number for %s\n",
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 9a432de363b8..9590dbb756f2 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -10,12 +10,14 @@
*/
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/memblock.h>
#include <linux/vmalloc.h>
#include <linux/memory.h>
#include <asm/firmware.h>
#include <asm/machdep.h>
+#include <asm/prom.h>
#include <asm/sparsemem.h>
static unsigned long get_memblock_size(void)
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 0307901e4132..f253361552ae 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -52,7 +52,7 @@
static void tce_invalidate_pSeries_sw(struct iommu_table *tbl,
- u64 *startp, u64 *endp)
+ __be64 *startp, __be64 *endp)
{
u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
unsigned long start, end, inc;
@@ -86,7 +86,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
struct dma_attrs *attrs)
{
u64 proto_tce;
- u64 *tcep, *tces;
+ __be64 *tcep, *tces;
u64 rpn;
proto_tce = TCE_PCI_READ; // Read allowed
@@ -94,12 +94,12 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
if (direction != DMA_TO_DEVICE)
proto_tce |= TCE_PCI_WRITE;
- tces = tcep = ((u64 *)tbl->it_base) + index;
+ tces = tcep = ((__be64 *)tbl->it_base) + index;
while (npages--) {
/* can't move this out since we might cross MEMBLOCK boundary */
rpn = __pa(uaddr) >> TCE_SHIFT;
- *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
+ *tcep = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
uaddr += TCE_PAGE_SIZE;
tcep++;
@@ -113,9 +113,9 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
{
- u64 *tcep, *tces;
+ __be64 *tcep, *tces;
- tces = tcep = ((u64 *)tbl->it_base) + index;
+ tces = tcep = ((__be64 *)tbl->it_base) + index;
while (npages--)
*(tcep++) = 0;
@@ -126,11 +126,11 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
{
- u64 *tcep;
+ __be64 *tcep;
- tcep = ((u64 *)tbl->it_base) + index;
+ tcep = ((__be64 *)tbl->it_base) + index;
- return *tcep;
+ return be64_to_cpu(*tcep);
}
static void tce_free_pSeriesLP(struct iommu_table*, long, long);
@@ -177,7 +177,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
return ret;
}
-static DEFINE_PER_CPU(u64 *, tce_page);
+static DEFINE_PER_CPU(__be64 *, tce_page);
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
@@ -186,7 +186,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
{
u64 rc = 0;
u64 proto_tce;
- u64 *tcep;
+ __be64 *tcep;
u64 rpn;
long l, limit;
long tcenum_start = tcenum, npages_start = npages;
@@ -206,7 +206,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
* from iommu_alloc{,_sg}()
*/
if (!tcep) {
- tcep = (u64 *)__get_free_page(GFP_ATOMIC);
+ tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
/* If allocation fails, fall back to the loop implementation */
if (!tcep) {
local_irq_restore(flags);
@@ -230,7 +230,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE);
for (l = 0; l < limit; l++) {
- tcep[l] = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
+ tcep[l] = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
rpn++;
}
@@ -329,16 +329,16 @@ struct direct_window {
/* Dynamic DMA Window support */
struct ddw_query_response {
- u32 windows_available;
- u32 largest_available_block;
- u32 page_size;
- u32 migration_capable;
+ __be32 windows_available;
+ __be32 largest_available_block;
+ __be32 page_size;
+ __be32 migration_capable;
};
struct ddw_create_response {
- u32 liobn;
- u32 addr_hi;
- u32 addr_lo;
+ __be32 liobn;
+ __be32 addr_hi;
+ __be32 addr_lo;
};
static LIST_HEAD(direct_window_list);
@@ -392,7 +392,8 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
unsigned long num_pfn, const void *arg)
{
const struct dynamic_dma_window_prop *maprange = arg;
- u64 *tcep, tce_size, num_tce, dma_offset, next, proto_tce, liobn;
+ u64 tce_size, num_tce, dma_offset, next, proto_tce, liobn;
+ __be64 *tcep;
u32 tce_shift;
u64 rc = 0;
long l, limit;
@@ -401,7 +402,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
tcep = __get_cpu_var(tce_page);
if (!tcep) {
- tcep = (u64 *)__get_free_page(GFP_ATOMIC);
+ tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
if (!tcep) {
local_irq_enable();
return -ENOMEM;
@@ -435,7 +436,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
dma_offset = next + be64_to_cpu(maprange->dma_base);
for (l = 0; l < limit; l++) {
- tcep[l] = proto_tce | next;
+ tcep[l] = cpu_to_be64(proto_tce | next);
next += tce_size;
}
@@ -780,7 +781,7 @@ static u64 find_existing_ddw(struct device_node *pdn)
list_for_each_entry(window, &direct_window_list, list) {
if (window->device == pdn) {
direct64 = window->prop;
- dma_addr = direct64->dma_base;
+ dma_addr = be64_to_cpu(direct64->dma_base);
break;
}
}
@@ -1045,11 +1046,11 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
dev_dbg(&dev->dev, "no free dynamic windows");
goto out_restore_window;
}
- if (query.page_size & 4) {
+ if (be32_to_cpu(query.page_size) & 4) {
page_shift = 24; /* 16MB */
- } else if (query.page_size & 2) {
+ } else if (be32_to_cpu(query.page_size) & 2) {
page_shift = 16; /* 64kB */
- } else if (query.page_size & 1) {
+ } else if (be32_to_cpu(query.page_size) & 1) {
page_shift = 12; /* 4kB */
} else {
dev_dbg(&dev->dev, "no supported direct page size in mask %x",
@@ -1059,7 +1060,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
/* verify the window * number of ptes will map the partition */
/* check largest block * page size > max memory hotplug addr */
max_addr = memory_hotplug_max();
- if (query.largest_available_block < (max_addr >> page_shift)) {
+ if (be32_to_cpu(query.largest_available_block) < (max_addr >> page_shift)) {
dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u "
"%llu-sized pages\n", max_addr, query.largest_available_block,
1ULL << page_shift);
@@ -1085,7 +1086,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
if (ret != 0)
goto out_free_prop;
- ddwprop->liobn = cpu_to_be32(create.liobn);
+ ddwprop->liobn = create.liobn;
ddwprop->dma_base = cpu_to_be64(of_read_number(&create.addr_hi, 2));
ddwprop->tce_shift = cpu_to_be32(page_shift);
ddwprop->window_shift = cpu_to_be32(len);
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index d276cd3edd8f..7bfaf58d4664 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -31,7 +31,7 @@
#define NVRW_CNT 0x20
/*
- * Set oops header version to distingush between old and new format header.
+ * Set oops header version to distinguish between old and new format header.
* lnx,oops-log partition max size is 4000, header version > 4000 will
* help in identifying new header.
*/
@@ -429,9 +429,6 @@ static int __init pseries_nvram_init_os_partition(struct nvram_os_partition
loff_t p;
int size;
- /* Scan nvram for partitions */
- nvram_scan_partitions();
-
/* Look for ours */
p = nvram_find_partition(part->name, NVRAM_SIG_OS, &size);
@@ -795,6 +792,9 @@ static int __init pseries_nvram_init_log_partitions(void)
{
int rc;
+ /* Scan nvram for partitions */
+ nvram_scan_partitions();
+
rc = pseries_nvram_init_os_partition(&rtas_log_partition);
nvram_init_oops_partition(rc == 0);
return 0;
@@ -804,7 +804,7 @@ machine_arch_initcall(pseries, pseries_nvram_init_log_partitions);
int __init pSeries_nvram_init(void)
{
struct device_node *nvram;
- const unsigned int *nbytes_p;
+ const __be32 *nbytes_p;
unsigned int proplen;
nvram = of_find_node_by_type(NULL, "nvram");
@@ -817,7 +817,7 @@ int __init pSeries_nvram_init(void)
return -EIO;
}
- nvram_size = *nbytes_p;
+ nvram_size = be32_to_cpup(nbytes_p);
nvram_fetch = rtas_token("nvram-fetch");
nvram_store = rtas_token("nvram-store");
diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c
new file mode 100644
index 000000000000..a702f1c08242
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/rng.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2013, Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "pseries-rng: " fmt
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <asm/archrandom.h>
+#include <asm/machdep.h>
+
+
+static int pseries_get_random_long(unsigned long *v)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ if (plpar_hcall(H_RANDOM, retbuf) == H_SUCCESS) {
+ *v = retbuf[0];
+ return 1;
+ }
+
+ return 0;
+}
+
+static __init int rng_init(void)
+{
+ struct device_node *dn;
+
+ dn = of_find_compatible_node(NULL, NULL, "ibm,random");
+ if (!dn)
+ return -ENODEV;
+
+ pr_info("Registering arch random hook.\n");
+
+ ppc_md.get_random_long = pseries_get_random_long;
+
+ return 0;
+}
+subsys_initcall(rng_init);
diff --git a/arch/powerpc/platforms/wsp/scom_smp.c b/arch/powerpc/platforms/wsp/scom_smp.c
index b56b70aeb497..268bc899c1f7 100644
--- a/arch/powerpc/platforms/wsp/scom_smp.c
+++ b/arch/powerpc/platforms/wsp/scom_smp.c
@@ -116,7 +116,14 @@ static int a2_scom_ram(scom_map_t scom, int thread, u32 insn, int extmask)
scom_write(scom, SCOM_RAMIC, cmd);
- while (!((val = scom_read(scom, SCOM_RAMC)) & mask)) {
+ for (;;) {
+ if (scom_read(scom, SCOM_RAMC, &val) != 0) {
+ pr_err("SCOM error on instruction 0x%08x, thread %d\n",
+ insn, thread);
+ return -1;
+ }
+ if (val & mask)
+ break;
pr_devel("Waiting on RAMC = 0x%llx\n", val);
if (++n == 3) {
pr_err("RAMC timeout on instruction 0x%08x, thread %d\n",
@@ -151,9 +158,7 @@ static int a2_scom_getgpr(scom_map_t scom, int thread, int gpr, int alt,
if (rc)
return rc;
- *out_gpr = scom_read(scom, SCOM_RAMD);
-
- return 0;
+ return scom_read(scom, SCOM_RAMD, out_gpr);
}
static int a2_scom_getspr(scom_map_t scom, int thread, int spr, u64 *out_spr)
@@ -353,7 +358,10 @@ int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, struct device_node *np)
pr_devel("Bringing up CPU%d using SCOM...\n", lcpu);
- pccr0 = scom_read(scom, SCOM_PCCR0);
+ if (scom_read(scom, SCOM_PCCR0, &pccr0) != 0) {
+ printk(KERN_ERR "XSCOM failure readng PCCR0 on CPU%d\n", lcpu);
+ return -1;
+ }
scom_write(scom, SCOM_PCCR0, pccr0 | SCOM_PCCR0_ENABLE_DEBUG |
SCOM_PCCR0_ENABLE_RAM);
diff --git a/arch/powerpc/platforms/wsp/scom_wsp.c b/arch/powerpc/platforms/wsp/scom_wsp.c
index 4052e2259f30..54172c4a8a64 100644
--- a/arch/powerpc/platforms/wsp/scom_wsp.c
+++ b/arch/powerpc/platforms/wsp/scom_wsp.c
@@ -50,18 +50,22 @@ static void wsp_scom_unmap(scom_map_t map)
iounmap((void *)map);
}
-static u64 wsp_scom_read(scom_map_t map, u32 reg)
+static int wsp_scom_read(scom_map_t map, u32 reg, u64 *value)
{
u64 __iomem *addr = (u64 __iomem *)map;
- return in_be64(addr + reg);
+ *value = in_be64(addr + reg);
+
+ return 0;
}
-static void wsp_scom_write(scom_map_t map, u32 reg, u64 value)
+static int wsp_scom_write(scom_map_t map, u32 reg, u64 value)
{
u64 __iomem *addr = (u64 __iomem *)map;
- return out_be64(addr + reg, value);
+ out_be64(addr + reg, value);
+
+ return 0;
}
static const struct scom_controller wsp_scom_controller = {
diff --git a/arch/powerpc/platforms/wsp/wsp.c b/arch/powerpc/platforms/wsp/wsp.c
index d25cc96c21b8..ddb6efe88914 100644
--- a/arch/powerpc/platforms/wsp/wsp.c
+++ b/arch/powerpc/platforms/wsp/wsp.c
@@ -89,6 +89,7 @@ void wsp_halt(void)
struct device_node *dn;
struct device_node *mine;
struct device_node *me;
+ int rc;
me = of_get_cpu_node(smp_processor_id(), NULL);
mine = scom_find_parent(me);
@@ -101,15 +102,15 @@ void wsp_halt(void)
/* read-modify-write it so the HW probe does not get
* confused */
- val = scom_read(m, 0);
- val |= 1;
- scom_write(m, 0, val);
+ rc = scom_read(m, 0, &val);
+ if (rc == 0)
+ scom_write(m, 0, val | 1);
scom_unmap(m);
}
m = scom_map(mine, 0, 1);
- val = scom_read(m, 0);
- val |= 1;
- scom_write(m, 0, val);
+ rc = scom_read(m, 0, &val);
+ if (rc == 0)
+ scom_write(m, 0, val | 1);
/* should never return */
scom_unmap(m);
}
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig
index ab4cb5476472..13ec968be4c7 100644
--- a/arch/powerpc/sysdev/Kconfig
+++ b/arch/powerpc/sysdev/Kconfig
@@ -28,7 +28,7 @@ config PPC_SCOM
config SCOM_DEBUGFS
bool "Expose SCOM controllers via debugfs"
- depends on PPC_SCOM
+ depends on PPC_SCOM && DEBUG_FS
default n
config GE_FPGA
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 1c16141c031c..47b6b9f81d43 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -109,27 +109,28 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data;
unsigned long phys_mem, phys_end;
void *user_mem;
- struct bio_vec *vec;
+ struct bio_vec vec;
unsigned int transfered;
- unsigned short idx;
+ struct bvec_iter iter;
- phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT);
+ phys_mem = bank->io_addr + (bio->bi_iter.bi_sector <<
+ AXON_RAM_SECTOR_SHIFT);
phys_end = bank->io_addr + bank->size;
transfered = 0;
- bio_for_each_segment(vec, bio, idx) {
- if (unlikely(phys_mem + vec->bv_len > phys_end)) {
+ bio_for_each_segment(vec, bio, iter) {
+ if (unlikely(phys_mem + vec.bv_len > phys_end)) {
bio_io_error(bio);
return;
}
- user_mem = page_address(vec->bv_page) + vec->bv_offset;
+ user_mem = page_address(vec.bv_page) + vec.bv_offset;
if (bio_data_dir(bio) == READ)
- memcpy(user_mem, (void *) phys_mem, vec->bv_len);
+ memcpy(user_mem, (void *) phys_mem, vec.bv_len);
else
- memcpy((void *) phys_mem, user_mem, vec->bv_len);
+ memcpy((void *) phys_mem, user_mem, vec.bv_len);
- phys_mem += vec->bv_len;
- transfered += vec->bv_len;
+ phys_mem += vec.bv_len;
+ transfered += vec.bv_len;
}
bio_endio(bio, 0);
}
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 4dd534194ae8..4f7869571290 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -22,6 +22,7 @@
#include <linux/spinlock.h>
#include <linux/export.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/slab.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/sysdev/fsl_gtm.c b/arch/powerpc/sysdev/fsl_gtm.c
index 0eb871cc3437..06ac3c61b3d0 100644
--- a/arch/powerpc/sysdev/fsl_gtm.c
+++ b/arch/powerpc/sysdev/fsl_gtm.c
@@ -19,6 +19,8 @@
#include <linux/list.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/slab.h>
@@ -401,16 +403,15 @@ static int __init fsl_gtm_init(void)
gtm->clock = *clock;
for (i = 0; i < ARRAY_SIZE(gtm->timers); i++) {
- int ret;
- struct resource irq;
+ unsigned int irq;
- ret = of_irq_to_resource(np, i, &irq);
- if (ret == NO_IRQ) {
+ irq = irq_of_parse_and_map(np, i);
+ if (irq == NO_IRQ) {
pr_err("%s: not enough interrupts specified\n",
np->full_name);
goto err;
}
- gtm->timers[i].irq = irq.start;
+ gtm->timers[i].irq = irq;
gtm->timers[i].gtm = gtm;
}
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index ccfb50ddfe38..4dfd61df8aba 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -40,12 +40,12 @@
static int fsl_pcie_bus_fixup, is_mpc83xx_pci;
-static void quirk_fsl_pcie_header(struct pci_dev *dev)
+static void quirk_fsl_pcie_early(struct pci_dev *dev)
{
u8 hdr_type;
/* if we aren't a PCIe don't bother */
- if (!pci_find_capability(dev, PCI_CAP_ID_EXP))
+ if (!pci_is_pcie(dev))
return;
/* if we aren't in host mode don't bother */
@@ -562,7 +562,8 @@ no_bridge:
}
#endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_pcie_header);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID,
+ quirk_fsl_pcie_early);
#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
struct mpc83xx_pcie_priv {
diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c
index 592a0f8d527a..8cf4aa0e3a25 100644
--- a/arch/powerpc/sysdev/fsl_pmc.c
+++ b/arch/powerpc/sysdev/fsl_pmc.c
@@ -18,6 +18,7 @@
#include <linux/suspend.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
struct pmc_regs {
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index e2fb3171f41b..95dd892e9904 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -28,6 +28,8 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/delay.h>
#include <linux/slab.h>
diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c
index 14bd5221f28a..00e224a1048c 100644
--- a/arch/powerpc/sysdev/fsl_rmu.c
+++ b/arch/powerpc/sysdev/fsl_rmu.c
@@ -27,6 +27,7 @@
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
index c6d00736f07f..4c5a19ef4f0b 100644
--- a/arch/powerpc/sysdev/fsl_soc.h
+++ b/arch/powerpc/sysdev/fsl_soc.h
@@ -21,8 +21,6 @@ struct device_node;
extern void fsl_rstcr_restart(char *cmd);
-#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
-
/* The different ports that the DIU can be connected to */
enum fsl_diu_monitor_port {
FSL_DIU_PORT_DVI, /* DVI */
@@ -43,7 +41,6 @@ struct platform_diu_data_ops {
};
extern struct platform_diu_data_ops diu_ops;
-#endif
void fsl_hv_restart(char *cmd);
void fsl_hv_halt(void);
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 1be54faf60dd..0e166ed4cd16 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -535,7 +535,7 @@ static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase,
mpic->fixups[irq].data = readl(base + 4) | 0x80000000;
}
}
-
+
static void __init mpic_scan_ht_pics(struct mpic *mpic)
{
@@ -1088,8 +1088,14 @@ static int mpic_host_map(struct irq_domain *h, unsigned int virq,
* is done here.
*/
if (!mpic_is_ipi(mpic, hw) && (mpic->flags & MPIC_NO_RESET)) {
+ int cpu;
+
+ preempt_disable();
+ cpu = mpic_processor_id(mpic);
+ preempt_enable();
+
mpic_set_vector(virq, hw);
- mpic_set_destination(virq, mpic_processor_id(mpic));
+ mpic_set_destination(virq, cpu);
mpic_irq_set_priority(virq, 8);
}
@@ -1475,7 +1481,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
* as a default instead of the value read from the HW.
*/
last_irq = (greg_feature & MPIC_GREG_FEATURE_LAST_SRC_MASK)
- >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT;
+ >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT;
if (isu_size)
last_irq = isu_size * MPIC_MAX_ISU - 1;
of_property_read_u32(mpic->node, "last-interrupt-source", &last_irq);
@@ -1625,7 +1631,7 @@ void __init mpic_init(struct mpic *mpic)
/* start with vector = source number, and masked */
u32 vecpri = MPIC_VECPRI_MASK | i |
(8 << MPIC_VECPRI_PRIORITY_SHIFT);
-
+
/* check if protected */
if (mpic->protected && test_bit(i, mpic->protected))
continue;
@@ -1634,7 +1640,7 @@ void __init mpic_init(struct mpic *mpic)
mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1 << cpu);
}
}
-
+
/* Init spurious vector */
mpic_write(mpic->gregs, MPIC_INFO(GREG_SPURIOUS), mpic->spurious_vec);
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
index c75325865a85..2c9b52aa266c 100644
--- a/arch/powerpc/sysdev/mpic_msgr.c
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -237,15 +237,13 @@ static int mpic_msgr_probe(struct platform_device *dev)
raw_spin_lock_init(&msgr->lock);
if (receive_mask & (1 << i)) {
- struct resource irq;
-
- if (of_irq_to_resource(np, irq_index, &irq) == NO_IRQ) {
+ msgr->irq = irq_of_parse_and_map(np, irq_index);
+ if (msgr->irq == NO_IRQ) {
dev_err(&dev->dev,
"Missing interrupt specifier");
kfree(msgr);
return -EFAULT;
}
- msgr->irq = irq.start;
irq_index += 1;
} else {
msgr->irq = NO_IRQ;
diff --git a/arch/powerpc/sysdev/mpic_msi.c b/arch/powerpc/sysdev/mpic_msi.c
index bbf342c88314..7dc39f35a4cc 100644
--- a/arch/powerpc/sysdev/mpic_msi.c
+++ b/arch/powerpc/sysdev/mpic_msi.c
@@ -35,7 +35,7 @@ static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
const struct irq_domain_ops *ops = mpic->irqhost->ops;
struct device_node *np;
int flags, index, i;
- struct of_irq oirq;
+ struct of_phandle_args oirq;
pr_debug("mpic: found U3, guessing msi allocator setup\n");
@@ -63,9 +63,9 @@ static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
pr_debug("mpic: mapping hwirqs for %s\n", np->full_name);
index = 0;
- while (of_irq_map_one(np, index++, &oirq) == 0) {
- ops->xlate(mpic->irqhost, NULL, oirq.specifier,
- oirq.size, &hwirq, &flags);
+ while (of_irq_parse_one(np, index++, &oirq) == 0) {
+ ops->xlate(mpic->irqhost, NULL, oirq.args,
+ oirq.args_count, &hwirq, &flags);
msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, hwirq);
}
}
diff --git a/arch/powerpc/sysdev/mpic_timer.c b/arch/powerpc/sysdev/mpic_timer.c
index c06db92a4fb1..22d7d57eead9 100644
--- a/arch/powerpc/sysdev/mpic_timer.c
+++ b/arch/powerpc/sysdev/mpic_timer.c
@@ -19,7 +19,9 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/syscore_ops.h>
#include <sysdev/fsl_soc.h>
#include <asm/io.h>
diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c
index 4a25c26f0bf4..a3a8fad8537d 100644
--- a/arch/powerpc/sysdev/mv64x60_dev.c
+++ b/arch/powerpc/sysdev/mv64x60_dev.c
@@ -228,7 +228,7 @@ static struct platform_device * __init mv64x60_eth_register_shared_pdev(
if (id == 0) {
pdev = platform_device_register_simple("orion-mdio", -1, &r[1], 1);
- if (!pdev)
+ if (IS_ERR(pdev))
return pdev;
}
diff --git a/arch/powerpc/sysdev/of_rtc.c b/arch/powerpc/sysdev/of_rtc.c
index c9e803f3e267..6f54b54b1328 100644
--- a/arch/powerpc/sysdev/of_rtc.c
+++ b/arch/powerpc/sysdev/of_rtc.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/init.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
diff --git a/arch/powerpc/sysdev/ppc4xx_soc.c b/arch/powerpc/sysdev/ppc4xx_soc.c
index 0debcc31ad70..5c77c9ba33aa 100644
--- a/arch/powerpc/sysdev/ppc4xx_soc.c
+++ b/arch/powerpc/sysdev/ppc4xx_soc.c
@@ -19,6 +19,7 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/dcr.h>
diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c
index 9193e12df695..3963d995648a 100644
--- a/arch/powerpc/sysdev/scom.c
+++ b/arch/powerpc/sysdev/scom.c
@@ -53,7 +53,7 @@ scom_map_t scom_map_device(struct device_node *dev, int index)
{
struct device_node *parent;
unsigned int cells, size;
- const u32 *prop;
+ const __be32 *prop, *sprop;
u64 reg, cnt;
scom_map_t ret;
@@ -62,12 +62,24 @@ scom_map_t scom_map_device(struct device_node *dev, int index)
if (parent == NULL)
return 0;
- prop = of_get_property(parent, "#scom-cells", NULL);
- cells = prop ? *prop : 1;
-
+ /*
+ * We support "scom-reg" properties for adding scom registers
+ * to a random device-tree node with an explicit scom-parent
+ *
+ * We also support the simple "reg" property if the device is
+ * a direct child of a scom controller.
+ *
+ * In case both exist, "scom-reg" takes precedence.
+ */
prop = of_get_property(dev, "scom-reg", &size);
+ sprop = of_get_property(parent, "#scom-cells", NULL);
+ if (!prop && parent == dev->parent) {
+ prop = of_get_property(dev, "reg", &size);
+ sprop = of_get_property(parent, "#address-cells", NULL);
+ }
if (!prop)
- return 0;
+ return NULL;
+ cells = sprop ? be32_to_cpup(sprop) : 1;
size >>= 2;
if (index >= (size / (2*cells)))
@@ -137,8 +149,7 @@ static int scom_val_get(void *data, u64 *val)
if (!scom_map_ok(ent->map))
return -EFAULT;
- *val = scom_read(ent->map, 0);
- return 0;
+ return scom_read(ent->map, 0, val);
}
DEFINE_SIMPLE_ATTRIBUTE(scom_val_fops, scom_val_get, scom_val_set,
"0x%llx\n");
@@ -169,7 +180,7 @@ static int scom_debug_init_one(struct dentry *root, struct device_node *dn,
debugfs_create_file("addr", 0600, dir, ent, &scom_addr_fops);
debugfs_create_file("value", 0600, dir, ent, &scom_val_fops);
- debugfs_create_blob("path", 0400, dir, &ent->blob);
+ debugfs_create_blob("devspec", 0400, dir, &ent->blob);
return 0;
}
@@ -185,8 +196,13 @@ static int scom_debug_init(void)
return -1;
i = rc = 0;
- for_each_node_with_property(dn, "scom-controller")
- rc |= scom_debug_init_one(root, dn, i++);
+ for_each_node_with_property(dn, "scom-controller") {
+ int id = of_get_ibm_chip_id(dn);
+ if (id == -1)
+ id = i;
+ rc |= scom_debug_init_one(root, dn, id);
+ i++;
+ }
return rc;
}
diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c
index 39d72212655e..3c6ee1b64e5d 100644
--- a/arch/powerpc/sysdev/xics/ics-opal.c
+++ b/arch/powerpc/sysdev/xics/ics-opal.c
@@ -112,6 +112,7 @@ static int ics_opal_set_affinity(struct irq_data *d,
bool force)
{
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+ __be16 oserver;
int16_t server;
int8_t priority;
int64_t rc;
@@ -120,13 +121,13 @@ static int ics_opal_set_affinity(struct irq_data *d,
if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
return -1;
- rc = opal_get_xive(hw_irq, &server, &priority);
+ rc = opal_get_xive(hw_irq, &oserver, &priority);
if (rc != OPAL_SUCCESS) {
- pr_err("%s: opal_set_xive(irq=%d [hw 0x%x] server=%x)"
- " error %lld\n",
- __func__, d->irq, hw_irq, server, rc);
+ pr_err("%s: opal_get_xive(irq=%d [hw 0x%x]) error %lld\n",
+ __func__, d->irq, hw_irq, rc);
return -1;
}
+ server = be16_to_cpu(oserver);
wanted_server = xics_get_irq_server(d->irq, cpumask, 1);
if (wanted_server < 0) {
@@ -181,7 +182,7 @@ static int ics_opal_map(struct ics *ics, unsigned int virq)
{
unsigned int hw_irq = (unsigned int)virq_to_hw(virq);
int64_t rc;
- int16_t server;
+ __be16 server;
int8_t priority;
if (WARN_ON(hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS))
@@ -201,7 +202,7 @@ static int ics_opal_map(struct ics *ics, unsigned int virq)
static void ics_opal_mask_unknown(struct ics *ics, unsigned long vec)
{
int64_t rc;
- int16_t server;
+ __be16 server;
int8_t priority;
/* Check if HAL knows about this interrupt */
@@ -215,14 +216,14 @@ static void ics_opal_mask_unknown(struct ics *ics, unsigned long vec)
static long ics_opal_get_server(struct ics *ics, unsigned long vec)
{
int64_t rc;
- int16_t server;
+ __be16 server;
int8_t priority;
/* Check if HAL knows about this interrupt */
rc = opal_get_xive(vec, &server, &priority);
if (rc != OPAL_SUCCESS)
return -1;
- return ics_opal_unmangle_server(server);
+ return ics_opal_unmangle_server(be16_to_cpu(server));
}
int __init ics_opal_init(void)
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
index 8d73c3c0bee6..f4fdc94ad2c0 100644
--- a/arch/powerpc/sysdev/xilinx_intc.c
+++ b/arch/powerpc/sysdev/xilinx_intc.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/i8259.h>
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 7143793859fa..f75d7e517927 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -99,6 +99,7 @@ config S390
select CLONE_BACKWARDS2
select GENERIC_CLOCKEVENTS
select GENERIC_CPU_DEVICES if !SMP
+ select GENERIC_FIND_FIRST_BIT
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL_OLD
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
@@ -237,6 +238,67 @@ config MARCH_ZEC12
endchoice
+config MARCH_G5_TUNE
+ def_bool TUNE_G5 || MARCH_G5 && TUNE_DEFAULT
+
+config MARCH_Z900_TUNE
+ def_bool TUNE_Z900 || MARCH_Z900 && TUNE_DEFAULT
+
+config MARCH_Z990_TUNE
+ def_bool TUNE_Z990 || MARCH_Z990 && TUNE_DEFAULT
+
+config MARCH_Z9_109_TUNE
+ def_bool TUNE_Z9_109 || MARCH_Z9_109 && TUNE_DEFAULT
+
+config MARCH_Z10_TUNE
+ def_bool TUNE_Z10 || MARCH_Z10 && TUNE_DEFAULT
+
+config MARCH_Z196_TUNE
+ def_bool TUNE_Z196 || MARCH_Z196 && TUNE_DEFAULT
+
+config MARCH_ZEC12_TUNE
+ def_bool TUNE_ZEC12 || MARCH_ZEC12 && TUNE_DEFAULT
+
+choice
+ prompt "Tune code generation"
+ default TUNE_DEFAULT
+ help
+ Cause the compiler to tune (-mtune) the generated code for a machine.
+ This will make the code run faster on the selected machine but
+ somewhat slower on other machines.
+ This option only changes how the compiler emits instructions, not the
+ selection of instructions itself, so the resulting kernel will run on
+ all other machines.
+
+config TUNE_DEFAULT
+ bool "Default"
+ help
+ Tune the generated code for the target processor for which the kernel
+ will be compiled.
+
+config TUNE_G5
+ bool "System/390 model G5 and G6"
+
+config TUNE_Z900
+ bool "IBM zSeries model z800 and z900"
+
+config TUNE_Z990
+ bool "IBM zSeries model z890 and z990"
+
+config TUNE_Z9_109
+ bool "IBM System z9"
+
+config TUNE_Z10
+ bool "IBM System z10"
+
+config TUNE_Z196
+ bool "IBM zEnterprise 114 and 196"
+
+config TUNE_ZEC12
+ bool "IBM zBC12 and zEC12"
+
+endchoice
+
config 64BIT
def_bool y
prompt "64 bit kernel"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index a7d68a467ce8..874e6d6e9c5f 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -35,13 +35,21 @@ endif
export LD_BFD
-cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5)
-cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900)
-cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990)
-cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109)
-cflags-$(CONFIG_MARCH_Z10) += $(call cc-option,-march=z10)
-cflags-$(CONFIG_MARCH_Z196) += $(call cc-option,-march=z196)
-cflags-$(CONFIG_MARCH_ZEC12) += $(call cc-option,-march=zEC12)
+cflags-$(CONFIG_MARCH_G5) += -march=g5
+cflags-$(CONFIG_MARCH_Z900) += -march=z900
+cflags-$(CONFIG_MARCH_Z990) += -march=z990
+cflags-$(CONFIG_MARCH_Z9_109) += -march=z9-109
+cflags-$(CONFIG_MARCH_Z10) += -march=z10
+cflags-$(CONFIG_MARCH_Z196) += -march=z196
+cflags-$(CONFIG_MARCH_ZEC12) += -march=zEC12
+
+cflags-$(CONFIG_MARCH_G5_TUNE) += -mtune=g5
+cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900
+cflags-$(CONFIG_MARCH_Z990_TUNE) += -mtune=z990
+cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109
+cflags-$(CONFIG_MARCH_Z10_TUNE) += -mtune=z10
+cflags-$(CONFIG_MARCH_Z196_TUNE) += -mtune=z196
+cflags-$(CONFIG_MARCH_ZEC12_TUNE) += -mtune=zEC12
#KBUILD_IMAGE is necessary for make rpm
KBUILD_IMAGE :=arch/s390/boot/image
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 87a22092b68f..4c4a1cef5208 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -48,9 +48,9 @@ static struct platform_device *appldata_pdev;
* /proc entries (sysctl)
*/
static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata";
-static int appldata_timer_handler(ctl_table *ctl, int write,
+static int appldata_timer_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
-static int appldata_interval_handler(ctl_table *ctl, int write,
+static int appldata_interval_handler(struct ctl_table *ctl, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos);
@@ -201,10 +201,10 @@ static void __appldata_vtimer_setup(int cmd)
* Start/Stop timer, show status of timer (0 = not active, 1 = active)
*/
static int
-appldata_timer_handler(ctl_table *ctl, int write,
+appldata_timer_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- int len;
+ unsigned int len;
char buf[2];
if (!*lenp || *ppos) {
@@ -243,10 +243,11 @@ out:
* current timer interval.
*/
static int
-appldata_interval_handler(ctl_table *ctl, int write,
+appldata_interval_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- int len, interval;
+ unsigned int len;
+ int interval;
char buf[16];
if (!*lenp || *ppos) {
@@ -286,11 +287,12 @@ out:
* monitoring (0 = not in process, 1 = in process)
*/
static int
-appldata_generic_handler(ctl_table *ctl, int write,
+appldata_generic_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct appldata_ops *ops = NULL, *tmp_ops;
- int rc, len, found;
+ unsigned int len;
+ int rc, found;
char buf[2];
struct list_head *lh;
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
new file mode 100644
index 000000000000..e0af2ee58751
--- /dev/null
+++ b/arch/s390/configs/default_defconfig
@@ -0,0 +1,655 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_CGROUP=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IBM_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_MARCH_Z9_109=y
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_PCI=y
+CONFIG_PCI_DEBUG=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_S390=y
+CONFIG_CHSC_SCH=y
+CONFIG_CRASH_DUMP=y
+CONFIG_ZFCPDUMP=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_HIBERNATION=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_GRE=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_AUDIT=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_VS_FTP=m
+CONFIG_IP_VS_PE_SIP=m
+CONFIG_NF_CONNTRACK_IPV4=m
+# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NET_SCTPPROBE=m
+CONFIG_RDS=m
+CONFIG_RDS_RDMA=m
+CONFIG_RDS_TCP=m
+CONFIG_RDS_DEBUG=y
+CONFIG_L2TP=m
+CONFIG_L2TP_DEBUGFS=m
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=m
+CONFIG_L2TP_ETH=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFB=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_CHOKE=m
+CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_CODEL=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_PERF=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_CGROUP=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_ACT_CSUM=m
+CONFIG_DNS_RESOLVER=y
+CONFIG_BPF_JIT=y
+CONFIG_NET_PKTGEN=m
+CONFIG_NET_TCPPROBE=m
+CONFIG_DEVTMPFS=y
+CONFIG_CONNECTOR=y
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_OSD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_XIP=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=y
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_ENCLOSURE=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_SCSI_SRP_ATTRS=m
+CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_ISCSI_TCP=m
+CONFIG_LIBFCOE=m
+CONFIG_SCSI_DEBUG=m
+CONFIG_ZFCP=y
+CONFIG_SCSI_VIRTIO=m
+CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_HP_SW=m
+CONFIG_SCSI_DH_EMC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_SCSI_OSD_INITIATOR=m
+CONFIG_SCSI_OSD_ULD=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
+CONFIG_DM_LOG_USERSPACE=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_DELAY=m
+CONFIG_DM_UEVENT=y
+CONFIG_DM_FLAKEY=m
+CONFIG_DM_VERITY=m
+CONFIG_DM_SWITCH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=m
+CONFIG_NLMON=m
+CONFIG_VHOST_NET=m
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+CONFIG_MLX4_EN=m
+# CONFIG_NET_VENDOR_NATSEMI is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+CONFIG_LEGACY_PTY_COUNT=0
+CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_RAW_DRIVER=m
+CONFIG_HANGCHECK_TIMER=m
+CONFIG_TN3270_FS=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_ZVM_WATCHDOG=m
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_INFINIBAND=m
+CONFIG_INFINIBAND_USER_ACCESS=m
+CONFIG_MLX4_INFINIBAND=m
+CONFIG_VIRTIO_BALLOON=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT2_FS_XIP=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_JBD_DEBUG=y
+CONFIG_JBD2_DEBUG=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_JFS_STATISTICS=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_XFS_RT=y
+CONFIG_XFS_DEBUG=y
+CONFIG_GFS2_FS=m
+CONFIG_OCFS2_FS=m
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_NILFS2_FS=m
+CONFIG_FANOTIFY=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_FSCACHE=m
+CONFIG_CACHEFILES=m
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_NTFS_FS=m
+CONFIG_NTFS_RW=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_ROMFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=m
+CONFIG_NFS_SWAP=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_V4_SECURITY_LABEL=y
+CONFIG_CIFS=m
+CONFIG_CIFS_STATS=y
+CONFIG_CIFS_STATS2=y
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_UPCALL=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_DLM=m
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_READABLE_ASM=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_SLUB_STATS=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_VM_RB=y
+CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
+CONFIG_DEBUG_PER_CPU_MAPS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_RT_MUTEX_TESTER=y
+CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_LOCK_STAT=y
+CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
+CONFIG_DEBUG_WRITECOUNT=y
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_DEBUG_CREDENTIALS=y
+CONFIG_PROVE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=300
+CONFIG_NOTIFIER_ERROR_INJECTION=m
+CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
+CONFIG_PM_NOTIFIER_ERROR_INJECT=m
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAILSLAB=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_FAIL_MAKE_REQUEST=y
+CONFIG_FAIL_IO_TIMEOUT=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_LATENCYTOP=y
+CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_KPROBE_EVENT is not set
+CONFIG_LKDTM=m
+CONFIG_KPROBES_SANITY_TEST=y
+CONFIG_RBTREE_TEST=m
+CONFIG_INTERVAL_TREE_TEST=m
+CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_DMA_API_DEBUG=y
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_S390_PTDUMP=y
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_IMA=y
+CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_USER=m
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_ZLIB=y
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_ZCRYPT=m
+CONFIG_CRYPTO_SHA1_S390=m
+CONFIG_CRYPTO_SHA256_S390=m
+CONFIG_CRYPTO_SHA512_S390=m
+CONFIG_CRYPTO_DES_S390=m
+CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_GHASH_S390=m
+CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
+CONFIG_PUBLIC_KEY_ALGO_RSA=m
+CONFIG_X509_CERTIFICATE_PARSER=m
+CONFIG_CRC7=m
+CONFIG_CRC8=m
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_CORDIC=m
+CONFIG_CMM=m
+CONFIG_APPLDATA_BASE=y
+CONFIG_KVM=m
+CONFIG_KVM_S390_UCONTROL=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
new file mode 100644
index 000000000000..b9f6b4cab927
--- /dev/null
+++ b/arch/s390/configs/gcov_defconfig
@@ -0,0 +1,618 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_PERF=y
+CONFIG_BLK_CGROUP=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_GCOV_KERNEL=y
+CONFIG_GCOV_PROFILE_ALL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IBM_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_MARCH_Z9_109=y
+CONFIG_HZ_100=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_S390=y
+CONFIG_CHSC_SCH=y
+CONFIG_CRASH_DUMP=y
+CONFIG_ZFCPDUMP=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_HIBERNATION=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_GRE=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_AUDIT=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_VS_FTP=m
+CONFIG_IP_VS_PE_SIP=m
+CONFIG_NF_CONNTRACK_IPV4=m
+# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NET_SCTPPROBE=m
+CONFIG_RDS=m
+CONFIG_RDS_RDMA=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
+CONFIG_L2TP_DEBUGFS=m
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=m
+CONFIG_L2TP_ETH=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFB=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_CHOKE=m
+CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_CODEL=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_PERF=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_CGROUP=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_ACT_CSUM=m
+CONFIG_DNS_RESOLVER=y
+CONFIG_BPF_JIT=y
+CONFIG_NET_PKTGEN=m
+CONFIG_NET_TCPPROBE=m
+CONFIG_DEVTMPFS=y
+CONFIG_CONNECTOR=y
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_OSD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_XIP=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=y
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_ENCLOSURE=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_SCSI_SRP_ATTRS=m
+CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_ISCSI_TCP=m
+CONFIG_LIBFCOE=m
+CONFIG_SCSI_DEBUG=m
+CONFIG_ZFCP=y
+CONFIG_SCSI_VIRTIO=m
+CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_HP_SW=m
+CONFIG_SCSI_DH_EMC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_SCSI_OSD_INITIATOR=m
+CONFIG_SCSI_OSD_ULD=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
+CONFIG_DM_LOG_USERSPACE=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_DELAY=m
+CONFIG_DM_UEVENT=y
+CONFIG_DM_FLAKEY=m
+CONFIG_DM_VERITY=m
+CONFIG_DM_SWITCH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=m
+CONFIG_NLMON=m
+CONFIG_VHOST_NET=m
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+CONFIG_MLX4_EN=m
+# CONFIG_NET_VENDOR_NATSEMI is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+CONFIG_LEGACY_PTY_COUNT=0
+CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_RAW_DRIVER=m
+CONFIG_HANGCHECK_TIMER=m
+CONFIG_TN3270_FS=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_ZVM_WATCHDOG=m
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_INFINIBAND=m
+CONFIG_INFINIBAND_USER_ACCESS=m
+CONFIG_MLX4_INFINIBAND=m
+CONFIG_VIRTIO_BALLOON=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT2_FS_XIP=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_JBD_DEBUG=y
+CONFIG_JBD2_DEBUG=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_JFS_STATISTICS=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_XFS_RT=y
+CONFIG_GFS2_FS=m
+CONFIG_OCFS2_FS=m
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_NILFS2_FS=m
+CONFIG_FANOTIFY=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_FSCACHE=m
+CONFIG_CACHEFILES=m
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_NTFS_FS=m
+CONFIG_NTFS_RW=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_ROMFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=m
+CONFIG_NFS_SWAP=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_V4_SECURITY_LABEL=y
+CONFIG_CIFS=m
+CONFIG_CIFS_STATS=y
+CONFIG_CIFS_STATS2=y
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_UPCALL=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_DLM=m
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
+CONFIG_TIMER_STATS=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_NOTIFIER_ERROR_INJECTION=m
+CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
+CONFIG_PM_NOTIFIER_ERROR_INJECT=m
+CONFIG_LATENCYTOP=y
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_KPROBE_EVENT is not set
+CONFIG_LKDTM=m
+CONFIG_RBTREE_TEST=m
+CONFIG_INTERVAL_TREE_TEST=m
+CONFIG_ATOMIC64_SELFTEST=y
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_S390_PTDUMP=y
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_IMA=y
+CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_USER=m
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_ZLIB=y
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_ZCRYPT=m
+CONFIG_CRYPTO_SHA1_S390=m
+CONFIG_CRYPTO_SHA256_S390=m
+CONFIG_CRYPTO_SHA512_S390=m
+CONFIG_CRYPTO_DES_S390=m
+CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_GHASH_S390=m
+CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
+CONFIG_PUBLIC_KEY_ALGO_RSA=m
+CONFIG_X509_CERTIFICATE_PARSER=m
+CONFIG_CRC7=m
+CONFIG_CRC8=m
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_CORDIC=m
+CONFIG_CMM=m
+CONFIG_APPLDATA_BASE=y
+CONFIG_KVM=m
+CONFIG_KVM_S390_UCONTROL=y
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
new file mode 100644
index 000000000000..91087b43e8fa
--- /dev/null
+++ b/arch/s390/configs/performance_defconfig
@@ -0,0 +1,610 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_PERF=y
+CONFIG_BLK_CGROUP=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IBM_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_MARCH_Z9_109=y
+CONFIG_HZ_100=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_S390=y
+CONFIG_CHSC_SCH=y
+CONFIG_CRASH_DUMP=y
+CONFIG_ZFCPDUMP=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_HIBERNATION=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_GRE=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_AUDIT=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_VS_FTP=m
+CONFIG_IP_VS_PE_SIP=m
+CONFIG_NF_CONNTRACK_IPV4=m
+# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NET_SCTPPROBE=m
+CONFIG_RDS=m
+CONFIG_RDS_RDMA=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
+CONFIG_L2TP_DEBUGFS=m
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=m
+CONFIG_L2TP_ETH=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFB=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_CHOKE=m
+CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_CODEL=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_PERF=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_CGROUP=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_ACT_CSUM=m
+CONFIG_DNS_RESOLVER=y
+CONFIG_BPF_JIT=y
+CONFIG_NET_PKTGEN=m
+CONFIG_NET_TCPPROBE=m
+CONFIG_DEVTMPFS=y
+CONFIG_CONNECTOR=y
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_OSD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_XIP=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=y
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_ENCLOSURE=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_SCSI_SRP_ATTRS=m
+CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_ISCSI_TCP=m
+CONFIG_LIBFCOE=m
+CONFIG_SCSI_DEBUG=m
+CONFIG_ZFCP=y
+CONFIG_SCSI_VIRTIO=m
+CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_HP_SW=m
+CONFIG_SCSI_DH_EMC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_SCSI_OSD_INITIATOR=m
+CONFIG_SCSI_OSD_ULD=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
+CONFIG_DM_LOG_USERSPACE=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_DELAY=m
+CONFIG_DM_UEVENT=y
+CONFIG_DM_FLAKEY=m
+CONFIG_DM_VERITY=m
+CONFIG_DM_SWITCH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=m
+CONFIG_NLMON=m
+CONFIG_VHOST_NET=m
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+CONFIG_MLX4_EN=m
+# CONFIG_NET_VENDOR_NATSEMI is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+CONFIG_LEGACY_PTY_COUNT=0
+CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_RAW_DRIVER=m
+CONFIG_HANGCHECK_TIMER=m
+CONFIG_TN3270_FS=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_ZVM_WATCHDOG=m
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_INFINIBAND=m
+CONFIG_INFINIBAND_USER_ACCESS=m
+CONFIG_MLX4_INFINIBAND=m
+CONFIG_VIRTIO_BALLOON=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT2_FS_XIP=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_JBD_DEBUG=y
+CONFIG_JBD2_DEBUG=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_JFS_STATISTICS=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_XFS_RT=y
+CONFIG_GFS2_FS=m
+CONFIG_OCFS2_FS=m
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_NILFS2_FS=m
+CONFIG_FANOTIFY=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_FSCACHE=m
+CONFIG_CACHEFILES=m
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_NTFS_FS=m
+CONFIG_NTFS_RW=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_ROMFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=m
+CONFIG_NFS_SWAP=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_V4_SECURITY_LABEL=y
+CONFIG_CIFS=m
+CONFIG_CIFS_STATS=y
+CONFIG_CIFS_STATS2=y
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_UPCALL=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_DLM=m
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_TIMER_STATS=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_LATENCYTOP=y
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_KPROBE_EVENT is not set
+CONFIG_LKDTM=m
+CONFIG_ATOMIC64_SELFTEST=y
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_S390_PTDUMP=y
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_IMA=y
+CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_USER=m
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_ZLIB=y
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_ZCRYPT=m
+CONFIG_CRYPTO_SHA1_S390=m
+CONFIG_CRYPTO_SHA256_S390=m
+CONFIG_CRYPTO_SHA512_S390=m
+CONFIG_CRYPTO_DES_S390=m
+CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_GHASH_S390=m
+CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
+CONFIG_PUBLIC_KEY_ALGO_RSA=m
+CONFIG_X509_CERTIFICATE_PARSER=m
+CONFIG_CRC7=m
+CONFIG_CRC8=m
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_CORDIC=m
+CONFIG_CMM=m
+CONFIG_APPLDATA_BASE=y
+CONFIG_KVM=m
+CONFIG_KVM_S390_UCONTROL=y
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
new file mode 100644
index 000000000000..d725c4d956e4
--- /dev/null
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -0,0 +1,86 @@
+# CONFIG_SWAP is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IBM_PARTITION=y
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_MARCH_Z9_109=y
+# CONFIG_COMPAT is not set
+CONFIG_NR_CPUS=2
+# CONFIG_HOTPLUG_CPU is not set
+CONFIG_HZ_100=y
+# CONFIG_COMPACTION is not set
+# CONFIG_MIGRATION is not set
+# CONFIG_CHECK_STACK is not set
+# CONFIG_CHSC_SCH is not set
+# CONFIG_SCM_BUS is not set
+CONFIG_CRASH_DUMP=y
+CONFIG_ZFCPDUMP=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_SECCOMP is not set
+# CONFIG_IUCV is not set
+CONFIG_ATM=y
+CONFIG_ATM_LANE=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_BLK_DEV_XPRAM is not set
+# CONFIG_DCSSBLK is not set
+# CONFIG_DASD is not set
+CONFIG_ENCLOSURE_SERVICES=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_ENCLOSURE=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SRP_ATTRS=y
+CONFIG_ZFCP=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_HVC_IUCV is not set
+CONFIG_RAW_DRIVER=y
+# CONFIG_SCLP_ASYNC is not set
+# CONFIG_HMC_DRV is not set
+# CONFIG_S390_TAPE is not set
+# CONFIG_VMCP is not set
+# CONFIG_MONWRITER is not set
+# CONFIG_S390_VMUR is not set
+# CONFIG_HID is not set
+CONFIG_MEMSTICK=y
+CONFIG_MEMSTICK_DEBUG=y
+CONFIG_MEMSTICK_UNSAFE_RESUME=y
+CONFIG_MSPRO_BLOCK=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_INOTIFY_USER is not set
+CONFIG_CONFIGFS_FS=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_FTRACE is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+# CONFIG_PFAULT is not set
+# CONFIG_S390_HYPFS_FS is not set
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_S390_GUEST is not set
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index b4dbade8ca24..46cae138ece2 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -725,6 +725,8 @@ static struct crypto_alg xts_aes_alg = {
}
};
+static int xts_aes_alg_reg;
+
static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -846,6 +848,8 @@ static struct crypto_alg ctr_aes_alg = {
}
};
+static int ctr_aes_alg_reg;
+
static int __init aes_s390_init(void)
{
int ret;
@@ -884,6 +888,7 @@ static int __init aes_s390_init(void)
ret = crypto_register_alg(&xts_aes_alg);
if (ret)
goto xts_aes_err;
+ xts_aes_alg_reg = 1;
}
if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
@@ -902,6 +907,7 @@ static int __init aes_s390_init(void)
free_page((unsigned long) ctrblk);
goto ctr_aes_err;
}
+ ctr_aes_alg_reg = 1;
}
out:
@@ -921,9 +927,12 @@ aes_err:
static void __exit aes_s390_fini(void)
{
- crypto_unregister_alg(&ctr_aes_alg);
- free_page((unsigned long) ctrblk);
- crypto_unregister_alg(&xts_aes_alg);
+ if (ctr_aes_alg_reg) {
+ crypto_unregister_alg(&ctr_aes_alg);
+ free_page((unsigned long) ctrblk);
+ }
+ if (xts_aes_alg_reg)
+ crypto_unregister_alg(&xts_aes_alg);
crypto_unregister_alg(&cbc_aes_alg);
crypto_unregister_alg(&ecb_aes_alg);
crypto_unregister_alg(&aes_alg);
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index d204c65bf722..33f57514f424 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -38,13 +38,14 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
-# CONFIG_EFI_PARTITION is not set
CONFIG_DEFAULT_DEADLINE=y
+CONFIG_MARCH_Z196=y
CONFIG_HZ_100=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
CONFIG_CRASH_DUMP=y
CONFIG_BINFMT_MISC=m
CONFIG_HIBERNATION=y
@@ -152,6 +153,7 @@ CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_CRCT10DIF=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index f313f9cbcf44..7a5288f3479a 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -2,3 +2,4 @@
generic-y += clkdev.h
generic-y += trace_clock.h
+generic-y += preempt.h
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index c797832daa5f..fa9aaf7144b7 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -19,21 +19,50 @@
#define ATOMIC_INIT(i) { (i) }
-#define __CS_LOOP(ptr, op_val, op_string) ({ \
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __ATOMIC_OR "lao"
+#define __ATOMIC_AND "lan"
+#define __ATOMIC_ADD "laa"
+
+#define __ATOMIC_LOOP(ptr, op_val, op_string) \
+({ \
+ int old_val; \
+ \
+ typecheck(atomic_t *, ptr); \
+ asm volatile( \
+ op_string " %0,%2,%1\n" \
+ : "=d" (old_val), "+Q" ((ptr)->counter) \
+ : "d" (op_val) \
+ : "cc", "memory"); \
+ old_val; \
+})
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define __ATOMIC_OR "or"
+#define __ATOMIC_AND "nr"
+#define __ATOMIC_ADD "ar"
+
+#define __ATOMIC_LOOP(ptr, op_val, op_string) \
+({ \
int old_val, new_val; \
+ \
+ typecheck(atomic_t *, ptr); \
asm volatile( \
" l %0,%2\n" \
"0: lr %1,%0\n" \
op_string " %1,%3\n" \
" cs %0,%1,%2\n" \
" jl 0b" \
- : "=&d" (old_val), "=&d" (new_val), \
- "=Q" (((atomic_t *)(ptr))->counter) \
- : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
+ : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
+ : "d" (op_val) \
: "cc", "memory"); \
- new_val; \
+ old_val; \
})
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
static inline int atomic_read(const atomic_t *v)
{
int c;
@@ -53,32 +82,45 @@ static inline void atomic_set(atomic_t *v, int i)
static inline int atomic_add_return(int i, atomic_t *v)
{
- return __CS_LOOP(v, i, "ar");
+ return __ATOMIC_LOOP(v, i, __ATOMIC_ADD) + i;
}
-#define atomic_add(_i, _v) atomic_add_return(_i, _v)
-#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
-#define atomic_inc(_v) atomic_add_return(1, _v)
-#define atomic_inc_return(_v) atomic_add_return(1, _v)
-#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
-static inline int atomic_sub_return(int i, atomic_t *v)
+static inline void atomic_add(int i, atomic_t *v)
{
- return __CS_LOOP(v, i, "sr");
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+ if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
+ asm volatile(
+ "asi %0,%1\n"
+ : "+Q" (v->counter)
+ : "i" (i)
+ : "cc", "memory");
+ } else {
+ atomic_add_return(i, v);
+ }
+#else
+ atomic_add_return(i, v);
+#endif
}
-#define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
+
+#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
+#define atomic_inc(_v) atomic_add(1, _v)
+#define atomic_inc_return(_v) atomic_add_return(1, _v)
+#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
+#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
+#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
-#define atomic_dec(_v) atomic_sub_return(1, _v)
+#define atomic_dec(_v) atomic_sub(1, _v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
- __CS_LOOP(v, ~mask, "nr");
+ __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND);
}
-static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
- __CS_LOOP(v, mask, "or");
+ __ATOMIC_LOOP(v, mask, __ATOMIC_OR);
}
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -87,8 +129,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
asm volatile(
" cs %0,%2,%1"
- : "+d" (old), "=Q" (v->counter)
- : "d" (new), "Q" (v->counter)
+ : "+d" (old), "+Q" (v->counter)
+ : "d" (new)
: "cc", "memory");
return old;
}
@@ -109,27 +151,56 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
}
-#undef __CS_LOOP
+#undef __ATOMIC_LOOP
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_64BIT
-#define __CSG_LOOP(ptr, op_val, op_string) ({ \
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __ATOMIC64_OR "laog"
+#define __ATOMIC64_AND "lang"
+#define __ATOMIC64_ADD "laag"
+
+#define __ATOMIC64_LOOP(ptr, op_val, op_string) \
+({ \
+ long long old_val; \
+ \
+ typecheck(atomic64_t *, ptr); \
+ asm volatile( \
+ op_string " %0,%2,%1\n" \
+ : "=d" (old_val), "+Q" ((ptr)->counter) \
+ : "d" (op_val) \
+ : "cc", "memory"); \
+ old_val; \
+})
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define __ATOMIC64_OR "ogr"
+#define __ATOMIC64_AND "ngr"
+#define __ATOMIC64_ADD "agr"
+
+#define __ATOMIC64_LOOP(ptr, op_val, op_string) \
+({ \
long long old_val, new_val; \
+ \
+ typecheck(atomic64_t *, ptr); \
asm volatile( \
" lg %0,%2\n" \
"0: lgr %1,%0\n" \
op_string " %1,%3\n" \
" csg %0,%1,%2\n" \
" jl 0b" \
- : "=&d" (old_val), "=&d" (new_val), \
- "=Q" (((atomic_t *)(ptr))->counter) \
- : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
+ : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
+ : "d" (op_val) \
: "cc", "memory"); \
- new_val; \
+ old_val; \
})
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
static inline long long atomic64_read(const atomic64_t *v)
{
long long c;
@@ -149,22 +220,17 @@ static inline void atomic64_set(atomic64_t *v, long long i)
static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
- return __CSG_LOOP(v, i, "agr");
-}
-
-static inline long long atomic64_sub_return(long long i, atomic64_t *v)
-{
- return __CSG_LOOP(v, i, "sgr");
+ return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD) + i;
}
static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
{
- __CSG_LOOP(v, ~mask, "ngr");
+ __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND);
}
static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
{
- __CSG_LOOP(v, mask, "ogr");
+ __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR);
}
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
@@ -174,13 +240,13 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
{
asm volatile(
" csg %0,%2,%1"
- : "+d" (old), "=Q" (v->counter)
- : "d" (new), "Q" (v->counter)
+ : "+d" (old), "+Q" (v->counter)
+ : "d" (new)
: "cc", "memory");
return old;
}
-#undef __CSG_LOOP
+#undef __ATOMIC64_LOOP
#else /* CONFIG_64BIT */
@@ -216,8 +282,8 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new)
" lm %0,%N0,%1\n"
"0: cds %0,%2,%1\n"
" jl 0b\n"
- : "=&d" (rp_old), "=Q" (v->counter)
- : "d" (rp_new), "Q" (v->counter)
+ : "=&d" (rp_old), "+Q" (v->counter)
+ : "d" (rp_new)
: "cc");
return rp_old.pair;
}
@@ -230,8 +296,8 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
asm volatile(
" cds %0,%2,%1"
- : "+&d" (rp_old), "=Q" (v->counter)
- : "d" (rp_new), "Q" (v->counter)
+ : "+&d" (rp_old), "+Q" (v->counter)
+ : "d" (rp_new)
: "cc");
return rp_old.pair;
}
@@ -248,17 +314,6 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
return new;
}
-static inline long long atomic64_sub_return(long long i, atomic64_t *v)
-{
- long long old, new;
-
- do {
- old = atomic64_read(v);
- new = old - i;
- } while (atomic64_cmpxchg(v, old, new) != old);
- return new;
-}
-
static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
{
long long old, new;
@@ -281,7 +336,24 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
#endif /* CONFIG_64BIT */
-static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+static inline void atomic64_add(long long i, atomic64_t *v)
+{
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+ if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
+ asm volatile(
+ "agsi %0,%1\n"
+ : "+Q" (v->counter)
+ : "i" (i)
+ : "cc", "memory");
+ } else {
+ atomic64_add_return(i, v);
+ }
+#else
+ atomic64_add_return(i, v);
+#endif
+}
+
+static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
{
long long c, old;
@@ -289,7 +361,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
for (;;) {
if (unlikely(c == u))
break;
- old = atomic64_cmpxchg(v, c, c + a);
+ old = atomic64_cmpxchg(v, c, c + i);
if (likely(old == c))
break;
c = old;
@@ -314,14 +386,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
return dec;
}
-#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
-#define atomic64_inc(_v) atomic64_add_return(1, _v)
+#define atomic64_inc(_v) atomic64_add(1, _v)
#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
-#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
+#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
+#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
-#define atomic64_dec(_v) atomic64_sub_return(1, _v)
+#define atomic64_dec(_v) atomic64_sub(1, _v)
#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 10135a38673c..6e6ad0680829 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -1,10 +1,40 @@
/*
- * S390 version
- * Copyright IBM Corp. 1999
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Copyright IBM Corp. 1999,2013
*
- * Derived from "include/asm-i386/bitops.h"
- * Copyright (C) 1992, Linus Torvalds
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ *
+ * The description below was taken in large parts from the powerpc
+ * bitops header file:
+ * Within a word, bits are numbered LSB first. Lot's of places make
+ * this assumption by directly testing bits with (val & (1<<nr)).
+ * This can cause confusion for large (> 1 word) bitmaps on a
+ * big-endian system because, unlike little endian, the number of each
+ * bit depends on the word size.
+ *
+ * The bitop functions are defined to work on unsigned longs, so for an
+ * s390x system the bits end up numbered:
+ * |63..............0|127............64|191...........128|255...........196|
+ * and on s390:
+ * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
+ *
+ * There are a few little-endian macros used mostly for filesystem
+ * bitmaps, these work on similar bit arrays layouts, but
+ * byte-oriented:
+ * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
+ *
+ * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
+ * number field needs to be reversed compared to the big-endian bit
+ * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
+ *
+ * We also have special functions which work with an MSB0 encoding:
+ * on an s390x system the bits are numbered:
+ * |0..............63|64............127|128...........191|192...........255|
+ * and on s390:
+ * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
+ *
+ * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit
+ * number field needs to be reversed compared to the LSB0 encoded bit
+ * fields. This can be achieved by XOR with 0x3f (64b) or 0x1f (32b).
*
*/
@@ -15,556 +45,348 @@
#error only <linux/bitops.h> can be included directly
#endif
+#include <linux/typecheck.h>
#include <linux/compiler.h>
-/*
- * 32 bit bitops format:
- * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
- * bit 32 is the LSB of *(addr+4). That combined with the
- * big endian byte order on S390 give the following bit
- * order in memory:
- * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
- * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
- * after that follows the next long with bit numbers
- * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
- * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
- * The reason for this bit ordering is the fact that
- * in the architecture independent code bits operations
- * of the form "flags |= (1 << bitnr)" are used INTERMIXED
- * with operation of the form "set_bit(bitnr, flags)".
- *
- * 64 bit bitops format:
- * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
- * bit 64 is the LSB of *(addr+8). That combined with the
- * big endian byte order on S390 give the following bit
- * order in memory:
- * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
- * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
- * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
- * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
- * after that follows the next long with bit numbers
- * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
- * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
- * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
- * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
- * The reason for this bit ordering is the fact that
- * in the architecture independent code bits operations
- * of the form "flags |= (1 << bitnr)" are used INTERMIXED
- * with operation of the form "set_bit(bitnr, flags)".
- */
-
-/* bitmap tables from arch/s390/kernel/bitmap.c */
-extern const char _oi_bitmap[];
-extern const char _ni_bitmap[];
-extern const char _zb_findmap[];
-extern const char _sb_findmap[];
-
#ifndef CONFIG_64BIT
#define __BITOPS_OR "or"
#define __BITOPS_AND "nr"
#define __BITOPS_XOR "xr"
-#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
+#define __BITOPS_LOOP(__addr, __val, __op_string) \
+({ \
+ unsigned long __old, __new; \
+ \
+ typecheck(unsigned long *, (__addr)); \
asm volatile( \
" l %0,%2\n" \
"0: lr %1,%0\n" \
__op_string " %1,%3\n" \
" cs %0,%1,%2\n" \
" jl 0b" \
- : "=&d" (__old), "=&d" (__new), \
- "=Q" (*(unsigned long *) __addr) \
- : "d" (__val), "Q" (*(unsigned long *) __addr) \
- : "cc");
+ : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
+ : "d" (__val) \
+ : "cc"); \
+ __old; \
+})
#else /* CONFIG_64BIT */
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __BITOPS_OR "laog"
+#define __BITOPS_AND "lang"
+#define __BITOPS_XOR "laxg"
+
+#define __BITOPS_LOOP(__addr, __val, __op_string) \
+({ \
+ unsigned long __old; \
+ \
+ typecheck(unsigned long *, (__addr)); \
+ asm volatile( \
+ __op_string " %0,%2,%1\n" \
+ : "=d" (__old), "+Q" (*(__addr)) \
+ : "d" (__val) \
+ : "cc"); \
+ __old; \
+})
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
#define __BITOPS_OR "ogr"
#define __BITOPS_AND "ngr"
#define __BITOPS_XOR "xgr"
-#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
+#define __BITOPS_LOOP(__addr, __val, __op_string) \
+({ \
+ unsigned long __old, __new; \
+ \
+ typecheck(unsigned long *, (__addr)); \
asm volatile( \
" lg %0,%2\n" \
"0: lgr %1,%0\n" \
__op_string " %1,%3\n" \
" csg %0,%1,%2\n" \
" jl 0b" \
- : "=&d" (__old), "=&d" (__new), \
- "=Q" (*(unsigned long *) __addr) \
- : "d" (__val), "Q" (*(unsigned long *) __addr) \
- : "cc");
+ : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
+ : "d" (__val) \
+ : "cc"); \
+ __old; \
+})
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#endif /* CONFIG_64BIT */
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
-#ifdef CONFIG_SMP
-/*
- * SMP safe set_bit routine based on compare and swap (CS)
- */
-static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+static inline unsigned long *
+__bitops_word(unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned long addr;
+
+ addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
+ return (unsigned long *)addr;
+}
+
+static inline unsigned char *
+__bitops_byte(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr, old, new, mask;
+ return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
+}
+
+static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask;
- addr = (unsigned long) ptr;
- /* calculate address for CS */
- addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
- /* make OR mask */
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+ if (__builtin_constant_p(nr)) {
+ unsigned char *caddr = __bitops_byte(nr, ptr);
+
+ asm volatile(
+ "oi %0,%b1\n"
+ : "+Q" (*caddr)
+ : "i" (1 << (nr & 7))
+ : "cc");
+ return;
+ }
+#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- /* Do the atomic update. */
- __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
+ __BITOPS_LOOP(addr, mask, __BITOPS_OR);
}
-/*
- * SMP safe clear_bit routine based on compare and swap (CS)
- */
-static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr, old, new, mask;
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask;
+
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+ if (__builtin_constant_p(nr)) {
+ unsigned char *caddr = __bitops_byte(nr, ptr);
- addr = (unsigned long) ptr;
- /* calculate address for CS */
- addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
- /* make AND mask */
+ asm volatile(
+ "ni %0,%b1\n"
+ : "+Q" (*caddr)
+ : "i" (~(1 << (nr & 7)))
+ : "cc");
+ return;
+ }
+#endif
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- /* Do the atomic update. */
- __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
+ __BITOPS_LOOP(addr, mask, __BITOPS_AND);
}
-/*
- * SMP safe change_bit routine based on compare and swap (CS)
- */
-static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr, old, new, mask;
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask;
+
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+ if (__builtin_constant_p(nr)) {
+ unsigned char *caddr = __bitops_byte(nr, ptr);
- addr = (unsigned long) ptr;
- /* calculate address for CS */
- addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
- /* make XOR mask */
+ asm volatile(
+ "xi %0,%b1\n"
+ : "+Q" (*caddr)
+ : "i" (1 << (nr & 7))
+ : "cc");
+ return;
+ }
+#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- /* Do the atomic update. */
- __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
+ __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
}
-/*
- * SMP safe test_and_set_bit routine based on compare and swap (CS)
- */
static inline int
-test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr, old, new, mask;
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long old, mask;
- addr = (unsigned long) ptr;
- /* calculate address for CS */
- addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
- /* make OR/test mask */
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- /* Do the atomic update. */
- __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
+ old = __BITOPS_LOOP(addr, mask, __BITOPS_OR);
barrier();
return (old & mask) != 0;
}
-/*
- * SMP safe test_and_clear_bit routine based on compare and swap (CS)
- */
static inline int
-test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr, old, new, mask;
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long old, mask;
- addr = (unsigned long) ptr;
- /* calculate address for CS */
- addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
- /* make AND/test mask */
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- /* Do the atomic update. */
- __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
+ old = __BITOPS_LOOP(addr, mask, __BITOPS_AND);
barrier();
- return (old ^ new) != 0;
+ return (old & ~mask) != 0;
}
-/*
- * SMP safe test_and_change_bit routine based on compare and swap (CS)
- */
static inline int
-test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr, old, new, mask;
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long old, mask;
- addr = (unsigned long) ptr;
- /* calculate address for CS */
- addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
- /* make XOR/test mask */
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- /* Do the atomic update. */
- __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
+ old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
barrier();
return (old & mask) != 0;
}
-#endif /* CONFIG_SMP */
-/*
- * fast, non-SMP set_bit routine
- */
static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr;
-
- addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- asm volatile(
- " oc %O0(1,%R0),%1"
- : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
-}
-
-static inline void
-__constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
-{
- unsigned long addr;
+ unsigned char *addr = __bitops_byte(nr, ptr);
- addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- *(unsigned char *) addr |= 1 << (nr & 7);
+ *addr |= 1 << (nr & 7);
}
-#define set_bit_simple(nr,addr) \
-(__builtin_constant_p((nr)) ? \
- __constant_set_bit((nr),(addr)) : \
- __set_bit((nr),(addr)) )
-
-/*
- * fast, non-SMP clear_bit routine
- */
static inline void
__clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr;
-
- addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- asm volatile(
- " nc %O0(1,%R0),%1"
- : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc");
-}
-
-static inline void
-__constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
-{
- unsigned long addr;
+ unsigned char *addr = __bitops_byte(nr, ptr);
- addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- *(unsigned char *) addr &= ~(1 << (nr & 7));
+ *addr &= ~(1 << (nr & 7));
}
-#define clear_bit_simple(nr,addr) \
-(__builtin_constant_p((nr)) ? \
- __constant_clear_bit((nr),(addr)) : \
- __clear_bit((nr),(addr)) )
-
-/*
- * fast, non-SMP change_bit routine
- */
static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr;
-
- addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- asm volatile(
- " xc %O0(1,%R0),%1"
- : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
-}
-
-static inline void
-__constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
-{
- unsigned long addr;
+ unsigned char *addr = __bitops_byte(nr, ptr);
- addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- *(unsigned char *) addr ^= 1 << (nr & 7);
+ *addr ^= 1 << (nr & 7);
}
-#define change_bit_simple(nr,addr) \
-(__builtin_constant_p((nr)) ? \
- __constant_change_bit((nr),(addr)) : \
- __change_bit((nr),(addr)) )
-
-/*
- * fast, non-SMP test_and_set_bit routine
- */
static inline int
-test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
+__test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr;
+ unsigned char *addr = __bitops_byte(nr, ptr);
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- ch = *(unsigned char *) addr;
- asm volatile(
- " oc %O0(1,%R0),%1"
- : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
- : "cc", "memory");
+ ch = *addr;
+ *addr |= 1 << (nr & 7);
return (ch >> (nr & 7)) & 1;
}
-#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
-/*
- * fast, non-SMP test_and_clear_bit routine
- */
static inline int
-test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
+__test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr;
+ unsigned char *addr = __bitops_byte(nr, ptr);
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- ch = *(unsigned char *) addr;
- asm volatile(
- " nc %O0(1,%R0),%1"
- : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
- : "cc", "memory");
+ ch = *addr;
+ *addr &= ~(1 << (nr & 7));
return (ch >> (nr & 7)) & 1;
}
-#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
-/*
- * fast, non-SMP test_and_change_bit routine
- */
static inline int
-test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
+__test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned long addr;
+ unsigned char *addr = __bitops_byte(nr, ptr);
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- ch = *(unsigned char *) addr;
- asm volatile(
- " xc %O0(1,%R0),%1"
- : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
- : "cc", "memory");
+ ch = *addr;
+ *addr ^= 1 << (nr & 7);
return (ch >> (nr & 7)) & 1;
}
-#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
-
-#ifdef CONFIG_SMP
-#define set_bit set_bit_cs
-#define clear_bit clear_bit_cs
-#define change_bit change_bit_cs
-#define test_and_set_bit test_and_set_bit_cs
-#define test_and_clear_bit test_and_clear_bit_cs
-#define test_and_change_bit test_and_change_bit_cs
-#else
-#define set_bit set_bit_simple
-#define clear_bit clear_bit_simple
-#define change_bit change_bit_simple
-#define test_and_set_bit test_and_set_bit_simple
-#define test_and_clear_bit test_and_clear_bit_simple
-#define test_and_change_bit test_and_change_bit_simple
-#endif
-
-
-/*
- * This routine doesn't need to be atomic.
- */
-static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr)
+static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
{
- unsigned long addr;
- unsigned char ch;
-
- addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
- ch = *(volatile unsigned char *) addr;
- return (ch >> (nr & 7)) & 1;
-}
+ const volatile unsigned char *addr;
-static inline int
-__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
- return (((volatile char *) addr)
- [(nr^(BITS_PER_LONG-8))>>3] & (1<<(nr&7))) != 0;
+ addr = ((const volatile unsigned char *)ptr);
+ addr += (nr ^ (BITS_PER_LONG - 8)) >> 3;
+ return (*addr >> (nr & 7)) & 1;
}
-#define test_bit(nr,addr) \
-(__builtin_constant_p((nr)) ? \
- __constant_test_bit((nr),(addr)) : \
- __test_bit((nr),(addr)) )
-
/*
- * Optimized find bit helper functions.
- */
-
-/**
- * __ffz_word_loop - find byte offset of first long != -1UL
- * @addr: pointer to array of unsigned long
- * @size: size of the array in bits
+ * Functions which use MSB0 bit numbering.
+ * On an s390x system the bits are numbered:
+ * |0..............63|64............127|128...........191|192...........255|
+ * and on s390:
+ * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
*/
-static inline unsigned long __ffz_word_loop(const unsigned long *addr,
- unsigned long size)
-{
- typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
- unsigned long bytes = 0;
-
- asm volatile(
-#ifndef CONFIG_64BIT
- " ahi %1,-1\n"
- " sra %1,5\n"
- " jz 1f\n"
- "0: c %2,0(%0,%3)\n"
- " jne 1f\n"
- " la %0,4(%0)\n"
- " brct %1,0b\n"
- "1:\n"
-#else
- " aghi %1,-1\n"
- " srag %1,%1,6\n"
- " jz 1f\n"
- "0: cg %2,0(%0,%3)\n"
- " jne 1f\n"
- " la %0,8(%0)\n"
- " brct %1,0b\n"
- "1:\n"
-#endif
- : "+&a" (bytes), "+&d" (size)
- : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
- : "cc" );
- return bytes;
-}
+unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
+unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
+ unsigned long offset);
-/**
- * __ffs_word_loop - find byte offset of first long != 0UL
- * @addr: pointer to array of unsigned long
- * @size: size of the array in bits
- */
-static inline unsigned long __ffs_word_loop(const unsigned long *addr,
- unsigned long size)
+static inline void set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
{
- typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
- unsigned long bytes = 0;
-
- asm volatile(
-#ifndef CONFIG_64BIT
- " ahi %1,-1\n"
- " sra %1,5\n"
- " jz 1f\n"
- "0: c %2,0(%0,%3)\n"
- " jne 1f\n"
- " la %0,4(%0)\n"
- " brct %1,0b\n"
- "1:\n"
-#else
- " aghi %1,-1\n"
- " srag %1,%1,6\n"
- " jz 1f\n"
- "0: cg %2,0(%0,%3)\n"
- " jne 1f\n"
- " la %0,8(%0)\n"
- " brct %1,0b\n"
- "1:\n"
-#endif
- : "+&a" (bytes), "+&a" (size)
- : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
- : "cc" );
- return bytes;
+ return set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
-/**
- * __ffz_word - add number of the first unset bit
- * @nr: base value the bit number is added to
- * @word: the word that is searched for unset bits
- */
-static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
+static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
{
-#ifdef CONFIG_64BIT
- if ((word & 0xffffffff) == 0xffffffff) {
- word >>= 32;
- nr += 32;
- }
-#endif
- if ((word & 0xffff) == 0xffff) {
- word >>= 16;
- nr += 16;
- }
- if ((word & 0xff) == 0xff) {
- word >>= 8;
- nr += 8;
- }
- return nr + _zb_findmap[(unsigned char) word];
+ return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
-/**
- * __ffs_word - add number of the first set bit
- * @nr: base value the bit number is added to
- * @word: the word that is searched for set bits
- */
-static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
+static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
{
-#ifdef CONFIG_64BIT
- if ((word & 0xffffffff) == 0) {
- word >>= 32;
- nr += 32;
- }
-#endif
- if ((word & 0xffff) == 0) {
- word >>= 16;
- nr += 16;
- }
- if ((word & 0xff) == 0) {
- word >>= 8;
- nr += 8;
- }
- return nr + _sb_findmap[(unsigned char) word];
+ return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
-
-/**
- * __load_ulong_be - load big endian unsigned long
- * @p: pointer to array of unsigned long
- * @offset: byte offset of source value in the array
- */
-static inline unsigned long __load_ulong_be(const unsigned long *p,
- unsigned long offset)
+static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
{
- p = (unsigned long *)((unsigned long) p + offset);
- return *p;
+ return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
-/**
- * __load_ulong_le - load little endian unsigned long
- * @p: pointer to array of unsigned long
- * @offset: byte offset of source value in the array
- */
-static inline unsigned long __load_ulong_le(const unsigned long *p,
- unsigned long offset)
+static inline int test_bit_inv(unsigned long nr,
+ const volatile unsigned long *ptr)
{
- unsigned long word;
-
- p = (unsigned long *)((unsigned long) p + offset);
-#ifndef CONFIG_64BIT
- asm volatile(
- " ic %0,%O1(%R1)\n"
- " icm %0,2,%O1+1(%R1)\n"
- " icm %0,4,%O1+2(%R1)\n"
- " icm %0,8,%O1+3(%R1)"
- : "=&d" (word) : "Q" (*p) : "cc");
-#else
- asm volatile(
- " lrvg %0,%1"
- : "=d" (word) : "m" (*p) );
-#endif
- return word;
+ return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
-/*
- * The various find bit functions.
- */
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
-/*
- * ffz - find first zero in word.
- * @word: The word to search
+/**
+ * __flogr - find leftmost one
+ * @word - The word to search
*
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
-static inline unsigned long ffz(unsigned long word)
-{
- return __ffz_word(0, word);
+ * Returns the bit number of the most significant bit set,
+ * where the most significant bit has bit number 0.
+ * If no bit is set this function returns 64.
+ */
+static inline unsigned char __flogr(unsigned long word)
+{
+ if (__builtin_constant_p(word)) {
+ unsigned long bit = 0;
+
+ if (!word)
+ return 64;
+ if (!(word & 0xffffffff00000000UL)) {
+ word <<= 32;
+ bit += 32;
+ }
+ if (!(word & 0xffff000000000000UL)) {
+ word <<= 16;
+ bit += 16;
+ }
+ if (!(word & 0xff00000000000000UL)) {
+ word <<= 8;
+ bit += 8;
+ }
+ if (!(word & 0xf000000000000000UL)) {
+ word <<= 4;
+ bit += 4;
+ }
+ if (!(word & 0xc000000000000000UL)) {
+ word <<= 2;
+ bit += 2;
+ }
+ if (!(word & 0x8000000000000000UL)) {
+ word <<= 1;
+ bit += 1;
+ }
+ return bit;
+ } else {
+ register unsigned long bit asm("4") = word;
+ register unsigned long out asm("5");
+
+ asm volatile(
+ " flogr %[bit],%[bit]\n"
+ : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
+ return bit;
+ }
}
/**
@@ -573,337 +395,83 @@ static inline unsigned long ffz(unsigned long word)
*
* Undefined if no bit exists, so code should check against 0 first.
*/
-static inline unsigned long __ffs (unsigned long word)
+static inline unsigned long __ffs(unsigned long word)
{
- return __ffs_word(0, word);
+ return __flogr(-word & word) ^ (BITS_PER_LONG - 1);
}
/**
* ffs - find first bit set
- * @x: the word to search
+ * @word: the word to search
*
- * This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
+ * This is defined the same way as the libc and
+ * compiler builtin ffs routines (man ffs).
*/
-static inline int ffs(int x)
+static inline int ffs(int word)
{
- if (!x)
- return 0;
- return __ffs_word(1, x);
+ unsigned long mask = 2 * BITS_PER_LONG - 1;
+ unsigned int val = (unsigned int)word;
+
+ return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask;
}
/**
- * find_first_zero_bit - find the first zero bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
*
- * Returns the bit-number of the first zero bit, not the number of the byte
- * containing a bit.
+ * Undefined if no set bit exists, so code should check against 0 first.
*/
-static inline unsigned long find_first_zero_bit(const unsigned long *addr,
- unsigned long size)
+static inline unsigned long __fls(unsigned long word)
{
- unsigned long bytes, bits;
-
- if (!size)
- return 0;
- bytes = __ffz_word_loop(addr, size);
- bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
- return (bits < size) ? bits : size;
+ return __flogr(word) ^ (BITS_PER_LONG - 1);
}
-#define find_first_zero_bit find_first_zero_bit
/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
+ * fls64 - find last set bit in a 64-bit word
+ * @word: the word to search
*
- * Returns the bit-number of the first set bit, not the number of the byte
- * containing a bit.
- */
-static inline unsigned long find_first_bit(const unsigned long * addr,
- unsigned long size)
-{
- unsigned long bytes, bits;
-
- if (!size)
- return 0;
- bytes = __ffs_word_loop(addr, size);
- bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
- return (bits < size) ? bits : size;
-}
-#define find_first_bit find_first_bit
-
-/*
- * Big endian variant whichs starts bit counting from left using
- * the flogr (find leftmost one) instruction.
- */
-static inline unsigned long __flo_word(unsigned long nr, unsigned long val)
-{
- register unsigned long bit asm("2") = val;
- register unsigned long out asm("3");
-
- asm volatile (
- " .insn rre,0xb9830000,%[bit],%[bit]\n"
- : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
- return nr + bit;
-}
-
-/*
- * 64 bit special left bitops format:
- * order in memory:
- * 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f
- * 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f
- * 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f
- * 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f
- * after that follows the next long with bit numbers
- * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f
- * 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f
- * 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f
- * 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f
- * The reason for this bit ordering is the fact that
- * the hardware sets bits in a bitmap starting at bit 0
- * and we don't want to scan the bitmap from the 'wrong
- * end'.
+ * This is defined in a similar way as the libc and compiler builtin
+ * ffsll, but returns the position of the most significant set bit.
+ *
+ * fls64(value) returns 0 if value is 0 or the position of the last
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 64.
*/
-static inline unsigned long find_first_bit_left(const unsigned long *addr,
- unsigned long size)
-{
- unsigned long bytes, bits;
-
- if (!size)
- return 0;
- bytes = __ffs_word_loop(addr, size);
- bits = __flo_word(bytes * 8, __load_ulong_be(addr, bytes));
- return (bits < size) ? bits : size;
-}
-
-static inline int find_next_bit_left(const unsigned long *addr,
- unsigned long size,
- unsigned long offset)
+static inline int fls64(unsigned long word)
{
- const unsigned long *p;
- unsigned long bit, set;
-
- if (offset >= size)
- return size;
- bit = offset & (BITS_PER_LONG - 1);
- offset -= bit;
- size -= offset;
- p = addr + offset / BITS_PER_LONG;
- if (bit) {
- set = __flo_word(0, *p & (~0UL >> bit));
- if (set >= size)
- return size + offset;
- if (set < BITS_PER_LONG)
- return set + offset;
- offset += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- p++;
- }
- return offset + find_first_bit_left(p, size);
-}
-
-#define for_each_set_bit_left(bit, addr, size) \
- for ((bit) = find_first_bit_left((addr), (size)); \
- (bit) < (size); \
- (bit) = find_next_bit_left((addr), (size), (bit) + 1))
-
-/* same as for_each_set_bit() but use bit as value to start with */
-#define for_each_set_bit_left_cont(bit, addr, size) \
- for ((bit) = find_next_bit_left((addr), (size), (bit)); \
- (bit) < (size); \
- (bit) = find_next_bit_left((addr), (size), (bit) + 1))
+ unsigned long mask = 2 * BITS_PER_LONG - 1;
-/**
- * find_next_zero_bit - find the first zero bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-static inline int find_next_zero_bit (const unsigned long * addr,
- unsigned long size,
- unsigned long offset)
-{
- const unsigned long *p;
- unsigned long bit, set;
-
- if (offset >= size)
- return size;
- bit = offset & (BITS_PER_LONG - 1);
- offset -= bit;
- size -= offset;
- p = addr + offset / BITS_PER_LONG;
- if (bit) {
- /*
- * __ffz_word returns BITS_PER_LONG
- * if no zero bit is present in the word.
- */
- set = __ffz_word(bit, *p >> bit);
- if (set >= size)
- return size + offset;
- if (set < BITS_PER_LONG)
- return set + offset;
- offset += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- p++;
- }
- return offset + find_first_zero_bit(p, size);
+ return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask;
}
-#define find_next_zero_bit find_next_zero_bit
/**
- * find_next_bit - find the first set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
+ * fls - find last (most-significant) bit set
+ * @word: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static inline int find_next_bit (const unsigned long * addr,
- unsigned long size,
- unsigned long offset)
+static inline int fls(int word)
{
- const unsigned long *p;
- unsigned long bit, set;
-
- if (offset >= size)
- return size;
- bit = offset & (BITS_PER_LONG - 1);
- offset -= bit;
- size -= offset;
- p = addr + offset / BITS_PER_LONG;
- if (bit) {
- /*
- * __ffs_word returns BITS_PER_LONG
- * if no one bit is present in the word.
- */
- set = __ffs_word(0, *p & (~0UL << bit));
- if (set >= size)
- return size + offset;
- if (set < BITS_PER_LONG)
- return set + offset;
- offset += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- p++;
- }
- return offset + find_first_bit(p, size);
+ return fls64((unsigned int)word);
}
-#define find_next_bit find_next_bit
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
- */
-static inline int sched_find_first_bit(unsigned long *b)
-{
- return find_first_bit(b, 140);
-}
+#else /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
-#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/fls64.h>
+#endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
+
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
-
-/*
- * ATTENTION: intel byte ordering convention for ext2 and minix !!
- * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
- * bit 32 is the LSB of (addr+4).
- * That combined with the little endian byte order of Intel gives the
- * following bit order in memory:
- * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
- * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
- */
-
-static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
-{
- unsigned long bytes, bits;
-
- if (!size)
- return 0;
- bytes = __ffz_word_loop(vaddr, size);
- bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
- return (bits < size) ? bits : size;
-}
-#define find_first_zero_bit_le find_first_zero_bit_le
-
-static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
- unsigned long offset)
-{
- unsigned long *addr = vaddr, *p;
- unsigned long bit, set;
-
- if (offset >= size)
- return size;
- bit = offset & (BITS_PER_LONG - 1);
- offset -= bit;
- size -= offset;
- p = addr + offset / BITS_PER_LONG;
- if (bit) {
- /*
- * s390 version of ffz returns BITS_PER_LONG
- * if no zero bit is present in the word.
- */
- set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
- if (set >= size)
- return size + offset;
- if (set < BITS_PER_LONG)
- return set + offset;
- offset += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- p++;
- }
- return offset + find_first_zero_bit_le(p, size);
-}
-#define find_next_zero_bit_le find_next_zero_bit_le
-
-static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
-{
- unsigned long bytes, bits;
-
- if (!size)
- return 0;
- bytes = __ffs_word_loop(vaddr, size);
- bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
- return (bits < size) ? bits : size;
-}
-#define find_first_bit_le find_first_bit_le
-
-static inline int find_next_bit_le(void *vaddr, unsigned long size,
- unsigned long offset)
-{
- unsigned long *addr = vaddr, *p;
- unsigned long bit, set;
-
- if (offset >= size)
- return size;
- bit = offset & (BITS_PER_LONG - 1);
- offset -= bit;
- size -= offset;
- p = addr + offset / BITS_PER_LONG;
- if (bit) {
- /*
- * s390 version of ffz returns BITS_PER_LONG
- * if no zero bit is present in the word.
- */
- set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
- if (set >= size)
- return size + offset;
- if (set < BITS_PER_LONG)
- return set + offset;
- offset += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- p++;
- }
- return offset + find_first_bit_le(p, size);
-}
-#define find_next_bit_le find_next_bit_le
-
+#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/le.h>
-
#include <asm-generic/bitops/ext2-atomic-setbit.h>
#endif /* _S390_BITOPS_H */
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index c1e7c646727c..4bf9da03591e 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -22,6 +22,7 @@
#define PSW32_MASK_ASC 0x0000C000UL
#define PSW32_MASK_CC 0x00003000UL
#define PSW32_MASK_PM 0x00000f00UL
+#define PSW32_MASK_RI 0x00000080UL
#define PSW32_MASK_USER 0x0000FF00UL
@@ -35,7 +36,9 @@
#define PSW32_ASC_SECONDARY 0x00008000UL
#define PSW32_ASC_HOME 0x0000C000UL
-extern u32 psw32_user_bits;
+#define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \
+ PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \
+ PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | PSW32_ASC_HOME)
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "s390\0\0\0\0"
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index debfda33d1f8..9b69c0befdca 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -8,69 +8,59 @@
#define __ASM_CTL_REG_H
#ifdef CONFIG_64BIT
-
-#define __ctl_load(array, low, high) ({ \
- typedef struct { char _[sizeof(array)]; } addrtype; \
- asm volatile( \
- " lctlg %1,%2,%0\n" \
- : : "Q" (*(addrtype *)(&array)), \
- "i" (low), "i" (high)); \
- })
-
-#define __ctl_store(array, low, high) ({ \
- typedef struct { char _[sizeof(array)]; } addrtype; \
- asm volatile( \
- " stctg %1,%2,%0\n" \
- : "=Q" (*(addrtype *)(&array)) \
- : "i" (low), "i" (high)); \
- })
-
-#else /* CONFIG_64BIT */
-
-#define __ctl_load(array, low, high) ({ \
- typedef struct { char _[sizeof(array)]; } addrtype; \
- asm volatile( \
- " lctl %1,%2,%0\n" \
- : : "Q" (*(addrtype *)(&array)), \
- "i" (low), "i" (high)); \
-})
-
-#define __ctl_store(array, low, high) ({ \
- typedef struct { char _[sizeof(array)]; } addrtype; \
- asm volatile( \
- " stctl %1,%2,%0\n" \
- : "=Q" (*(addrtype *)(&array)) \
- : "i" (low), "i" (high)); \
- })
-
-#endif /* CONFIG_64BIT */
-
-#define __ctl_set_bit(cr, bit) ({ \
- unsigned long __dummy; \
- __ctl_store(__dummy, cr, cr); \
- __dummy |= 1UL << (bit); \
- __ctl_load(__dummy, cr, cr); \
-})
-
-#define __ctl_clear_bit(cr, bit) ({ \
- unsigned long __dummy; \
- __ctl_store(__dummy, cr, cr); \
- __dummy &= ~(1UL << (bit)); \
- __ctl_load(__dummy, cr, cr); \
-})
+# define __CTL_LOAD "lctlg"
+# define __CTL_STORE "stctg"
+#else
+# define __CTL_LOAD "lctl"
+# define __CTL_STORE "stctl"
+#endif
+
+#define __ctl_load(array, low, high) { \
+ typedef struct { char _[sizeof(array)]; } addrtype; \
+ \
+ BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
+ asm volatile( \
+ __CTL_LOAD " %1,%2,%0\n" \
+ : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
+}
+
+#define __ctl_store(array, low, high) { \
+ typedef struct { char _[sizeof(array)]; } addrtype; \
+ \
+ BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
+ asm volatile( \
+ __CTL_STORE " %1,%2,%0\n" \
+ : "=Q" (*(addrtype *)(&array)) \
+ : "i" (low), "i" (high)); \
+}
+
+static inline void __ctl_set_bit(unsigned int cr, unsigned int bit)
+{
+ unsigned long reg;
+
+ __ctl_store(reg, cr, cr);
+ reg |= 1UL << bit;
+ __ctl_load(reg, cr, cr);
+}
+
+static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
+{
+ unsigned long reg;
+
+ __ctl_store(reg, cr, cr);
+ reg &= ~(1UL << bit);
+ __ctl_load(reg, cr, cr);
+}
+
+void smp_ctl_set_bit(int cr, int bit);
+void smp_ctl_clear_bit(int cr, int bit);
#ifdef CONFIG_SMP
-
-extern void smp_ctl_set_bit(int cr, int bit);
-extern void smp_ctl_clear_bit(int cr, int bit);
-#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
-#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
-
+# define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
+# define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
#else
-
-#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
-#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
-
-#endif /* CONFIG_SMP */
+# define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
+# define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
+#endif
#endif /* __ASM_CTL_REG_H */
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 188c5052a20a..530c15eb01e9 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -107,6 +107,11 @@ void debug_set_level(debug_info_t* id, int new_level);
void debug_set_critical(void);
void debug_stop_all(void);
+static inline bool debug_level_enabled(debug_info_t* id, int level)
+{
+ return level <= id->level;
+}
+
static inline debug_entry_t*
debug_event(debug_info_t* id, int level, void* data, int length)
{
diff --git a/arch/s390/include/asm/dis.h b/arch/s390/include/asm/dis.h
new file mode 100644
index 000000000000..04a83f5773cd
--- /dev/null
+++ b/arch/s390/include/asm/dis.h
@@ -0,0 +1,52 @@
+/*
+ * Disassemble s390 instructions.
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+#ifndef __ASM_S390_DIS_H__
+#define __ASM_S390_DIS_H__
+
+/* Type of operand */
+#define OPERAND_GPR 0x1 /* Operand printed as %rx */
+#define OPERAND_FPR 0x2 /* Operand printed as %fx */
+#define OPERAND_AR 0x4 /* Operand printed as %ax */
+#define OPERAND_CR 0x8 /* Operand printed as %cx */
+#define OPERAND_DISP 0x10 /* Operand printed as displacement */
+#define OPERAND_BASE 0x20 /* Operand printed as base register */
+#define OPERAND_INDEX 0x40 /* Operand printed as index register */
+#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */
+#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */
+#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */
+
+
+struct s390_operand {
+ int bits; /* The number of bits in the operand. */
+ int shift; /* The number of bits to shift. */
+ int flags; /* One bit syntax flags. */
+};
+
+struct s390_insn {
+ const char name[5];
+ unsigned char opfrag;
+ unsigned char format;
+};
+
+
+static inline int insn_length(unsigned char code)
+{
+ return ((((int) code + 64) >> 7) + 1) << 1;
+}
+
+void show_code(struct pt_regs *regs);
+void print_fn_code(unsigned char *code, unsigned long len);
+int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len);
+struct s390_insn *find_insn(unsigned char *code);
+
+static inline int is_known_insn(unsigned char *code)
+{
+ return !!find_insn(code);
+}
+
+#endif /* __ASM_S390_DIS_H__ */
diff --git a/arch/s390/include/asm/fcx.h b/arch/s390/include/asm/fcx.h
index ef6170995076..7ecb92b469b6 100644
--- a/arch/s390/include/asm/fcx.h
+++ b/arch/s390/include/asm/fcx.h
@@ -12,9 +12,9 @@
#define TCW_FORMAT_DEFAULT 0
#define TCW_TIDAW_FORMAT_DEFAULT 0
-#define TCW_FLAGS_INPUT_TIDA 1 << (23 - 5)
-#define TCW_FLAGS_TCCB_TIDA 1 << (23 - 6)
-#define TCW_FLAGS_OUTPUT_TIDA 1 << (23 - 7)
+#define TCW_FLAGS_INPUT_TIDA (1 << (23 - 5))
+#define TCW_FLAGS_TCCB_TIDA (1 << (23 - 6))
+#define TCW_FLAGS_OUTPUT_TIDA (1 << (23 - 7))
#define TCW_FLAGS_TIDAW_FORMAT(x) ((x) & 3) << (23 - 9)
#define TCW_FLAGS_GET_TIDAW_FORMAT(x) (((x) >> (23 - 9)) & 3)
@@ -54,11 +54,11 @@ struct tcw {
u32 intrg;
} __attribute__ ((packed, aligned(64)));
-#define TIDAW_FLAGS_LAST 1 << (7 - 0)
-#define TIDAW_FLAGS_SKIP 1 << (7 - 1)
-#define TIDAW_FLAGS_DATA_INT 1 << (7 - 2)
-#define TIDAW_FLAGS_TTIC 1 << (7 - 3)
-#define TIDAW_FLAGS_INSERT_CBC 1 << (7 - 4)
+#define TIDAW_FLAGS_LAST (1 << (7 - 0))
+#define TIDAW_FLAGS_SKIP (1 << (7 - 1))
+#define TIDAW_FLAGS_DATA_INT (1 << (7 - 2))
+#define TIDAW_FLAGS_TTIC (1 << (7 - 3))
+#define TIDAW_FLAGS_INSERT_CBC (1 << (7 - 4))
/**
* struct tidaw - Transport-Indirect-Addressing Word (TIDAW)
@@ -106,9 +106,9 @@ struct tsa_ddpc {
u8 sense[32];
} __attribute__ ((packed));
-#define TSA_INTRG_FLAGS_CU_STATE_VALID 1 << (7 - 0)
-#define TSA_INTRG_FLAGS_DEV_STATE_VALID 1 << (7 - 1)
-#define TSA_INTRG_FLAGS_OP_STATE_VALID 1 << (7 - 2)
+#define TSA_INTRG_FLAGS_CU_STATE_VALID (1 << (7 - 0))
+#define TSA_INTRG_FLAGS_DEV_STATE_VALID (1 << (7 - 1))
+#define TSA_INTRG_FLAGS_OP_STATE_VALID (1 << (7 - 2))
/**
* struct tsa_intrg - Interrogate Transport-Status Area (Intrg. TSA)
@@ -140,10 +140,10 @@ struct tsa_intrg {
#define TSB_FORMAT_DDPC 2
#define TSB_FORMAT_INTRG 3
-#define TSB_FLAGS_DCW_OFFSET_VALID 1 << (7 - 0)
-#define TSB_FLAGS_COUNT_VALID 1 << (7 - 1)
-#define TSB_FLAGS_CACHE_MISS 1 << (7 - 2)
-#define TSB_FLAGS_TIME_VALID 1 << (7 - 3)
+#define TSB_FLAGS_DCW_OFFSET_VALID (1 << (7 - 0))
+#define TSB_FLAGS_COUNT_VALID (1 << (7 - 1))
+#define TSB_FLAGS_CACHE_MISS (1 << (7 - 2))
+#define TSB_FLAGS_TIME_VALID (1 << (7 - 3))
#define TSB_FLAGS_FORMAT(x) ((x) & 7)
#define TSB_FORMAT(t) ((t)->flags & 7)
@@ -179,9 +179,9 @@ struct tsb {
#define DCW_INTRG_RCQ_PRIMARY 1
#define DCW_INTRG_RCQ_SECONDARY 2
-#define DCW_INTRG_FLAGS_MPM 1 < (7 - 0)
-#define DCW_INTRG_FLAGS_PPR 1 < (7 - 1)
-#define DCW_INTRG_FLAGS_CRIT 1 < (7 - 2)
+#define DCW_INTRG_FLAGS_MPM (1 << (7 - 0))
+#define DCW_INTRG_FLAGS_PPR (1 << (7 - 1))
+#define DCW_INTRG_FLAGS_CRIT (1 << (7 - 2))
/**
* struct dcw_intrg_data - Interrogate DCW data
@@ -216,7 +216,7 @@ struct dcw_intrg_data {
u8 prog_data[0];
} __attribute__ ((packed));
-#define DCW_FLAGS_CC 1 << (7 - 1)
+#define DCW_FLAGS_CC (1 << (7 - 1))
#define DCW_CMD_WRITE 0x01
#define DCW_CMD_READ 0x02
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 2bd6cb897b90..2fcccc0c997c 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -7,6 +7,7 @@
#ifndef _ASM_S390_IPL_H
#define _ASM_S390_IPL_H
+#include <asm/lowcore.h>
#include <asm/types.h>
#include <asm/cio.h>
#include <asm/setup.h>
@@ -86,7 +87,14 @@ struct ipl_parameter_block {
*/
extern u32 ipl_flags;
extern u32 dump_prefix_page;
-extern unsigned int zfcpdump_prefix_array[];
+
+struct dump_save_areas {
+ struct save_area **areas;
+ int count;
+};
+
+extern struct dump_save_areas dump_save_areas;
+struct save_area *dump_save_area_create(int cpu);
extern void do_reipl(void);
extern void do_halt(void);
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index e87ecaa2c569..d5bc3750616e 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -38,13 +38,6 @@ struct sca_block {
struct sca_entry cpu[64];
} __attribute__((packed));
-#define KVM_NR_PAGE_SIZES 2
-#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 8)
-#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
-#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
-#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
-#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
-
#define CPUSTAT_STOPPED 0x80000000
#define CPUSTAT_WAIT 0x10000000
#define CPUSTAT_ECALL_PEND 0x08000000
@@ -220,7 +213,6 @@ struct kvm_s390_interrupt_info {
/* for local_interrupt.action_flags */
#define ACTION_STORE_ON_STOP (1<<0)
#define ACTION_STOP_ON_STOP (1<<1)
-#define ACTION_RELOADVCPU_ON_STOP (1<<2)
struct kvm_s390_local_interrupt {
spinlock_t lock;
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 9f973d8de90e..5d1f950704dc 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -40,14 +40,8 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
pgd_t *pgd = mm->pgd;
S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
- if (s390_user_mode != HOME_SPACE_MODE) {
- /* Load primary space page table origin. */
- asm volatile(LCTL_OPCODE" 1,1,%0\n"
- : : "m" (S390_lowcore.user_asce) );
- } else
- /* Load home space page table origin. */
- asm volatile(LCTL_OPCODE" 13,13,%0"
- : : "m" (S390_lowcore.user_asce) );
+ /* Load primary space page table origin. */
+ asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce));
set_fs(current->thread.mm_segment);
}
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 1e51f2915b2e..316c8503a3b4 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -30,7 +30,12 @@
#include <asm/setup.h>
#ifndef __ASSEMBLY__
-void storage_key_init_range(unsigned long start, unsigned long end);
+static inline void storage_key_init_range(unsigned long start, unsigned long end)
+{
+#if PAGE_DEFAULT_KEY
+ __storage_key_init_range(start, end);
+#endif
+}
static inline void clear_page(void *page)
{
diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h
index 1ca5d1047c71..ac24b26fc065 100644
--- a/arch/s390/include/asm/pci_debug.h
+++ b/arch/s390/include/asm/pci_debug.h
@@ -6,14 +6,9 @@
extern debug_info_t *pci_debug_msg_id;
extern debug_info_t *pci_debug_err_id;
-#ifdef CONFIG_PCI_DEBUG
#define zpci_dbg(imp, fmt, args...) \
debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args)
-#else /* !CONFIG_PCI_DEBUG */
-#define zpci_dbg(imp, fmt, args...) do { } while (0)
-#endif
-
#define zpci_err(text...) \
do { \
char debug_buffer[16]; \
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index df6eac9f0cb4..649eb62c52b3 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -54,11 +54,9 @@
struct zpci_fib {
u32 fmt : 8; /* format */
u32 : 24;
- u32 reserved1;
+ u32 : 32;
u8 fc; /* function controls */
- u8 reserved2;
- u16 reserved3;
- u32 reserved4;
+ u64 : 56;
u64 pba; /* PCI base address */
u64 pal; /* PCI address limit */
u64 iota; /* I/O Translation Anchor */
@@ -70,14 +68,13 @@ struct zpci_fib {
u32 sum : 1; /* Adapter int summary bit enabled */
u32 : 1;
u32 aisbo : 6; /* Adapter int summary bit offset */
- u32 reserved5;
+ u32 : 32;
u64 aibv; /* Adapter int bit vector address */
u64 aisb; /* Adapter int summary bit address */
u64 fmb_addr; /* Function measurement block address and key */
- u64 reserved6;
- u64 reserved7;
-} __packed;
-
+ u32 : 32;
+ u32 gd;
+} __packed __aligned(8);
int zpci_mod_fc(u64 req, struct zpci_fib *fib);
int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 86fe0ee2cee5..fa91e0097458 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -10,16 +10,22 @@
*/
#define __my_cpu_offset S390_lowcore.percpu_offset
+#ifdef CONFIG_64BIT
+
/*
* For 64 bit module code, the module may be more than 4G above the
* per cpu area, use weak definitions to force the compiler to
* generate external references.
*/
-#if defined(CONFIG_SMP) && defined(CONFIG_64BIT) && defined(MODULE)
+#if defined(CONFIG_SMP) && defined(MODULE)
#define ARCH_NEEDS_WEAK_PER_CPU
#endif
-#define arch_this_cpu_to_op(pcp, val, op) \
+/*
+ * We use a compare-and-swap loop since that uses less cpu cycles than
+ * disabling and enabling interrupts like the generic variant would do.
+ */
+#define arch_this_cpu_to_op_simple(pcp, val, op) \
({ \
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ old__, new__, prev__; \
@@ -30,42 +36,101 @@
do { \
old__ = prev__; \
new__ = old__ op (val); \
- switch (sizeof(*ptr__)) { \
- case 8: \
- prev__ = cmpxchg64(ptr__, old__, new__); \
- break; \
- default: \
- prev__ = cmpxchg(ptr__, old__, new__); \
- } \
+ prev__ = cmpxchg(ptr__, old__, new__); \
} while (prev__ != old__); \
preempt_enable(); \
new__; \
})
-#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
+#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
+#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
+
+#ifndef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
+#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define arch_this_cpu_add(pcp, val, op1, op2, szcast) \
+{ \
+ typedef typeof(pcp) pcp_op_T__; \
+ pcp_op_T__ val__ = (val); \
+ pcp_op_T__ old__, *ptr__; \
+ preempt_disable(); \
+ ptr__ = __this_cpu_ptr(&(pcp)); \
+ if (__builtin_constant_p(val__) && \
+ ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
+ asm volatile( \
+ op2 " %[ptr__],%[val__]\n" \
+ : [ptr__] "+Q" (*ptr__) \
+ : [val__] "i" ((szcast)val__) \
+ : "cc"); \
+ } else { \
+ asm volatile( \
+ op1 " %[old__],%[val__],%[ptr__]\n" \
+ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
+ : [val__] "d" (val__) \
+ : "cc"); \
+ } \
+ preempt_enable(); \
+}
-#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
+#define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
+#define this_cpu_add_8(pcp, val) arch_this_cpu_add(pcp, val, "laag", "agsi", long)
-#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &)
-#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &)
-#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &)
-#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, &)
+#define arch_this_cpu_add_return(pcp, val, op) \
+({ \
+ typedef typeof(pcp) pcp_op_T__; \
+ pcp_op_T__ val__ = (val); \
+ pcp_op_T__ old__, *ptr__; \
+ preempt_disable(); \
+ ptr__ = __this_cpu_ptr(&(pcp)); \
+ asm volatile( \
+ op " %[old__],%[val__],%[ptr__]\n" \
+ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
+ : [val__] "d" (val__) \
+ : "cc"); \
+ preempt_enable(); \
+ old__ + val__; \
+})
-#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op(pcp, val, |)
-#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op(pcp, val, |)
-#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, |)
-#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, |)
+#define this_cpu_add_return_4(pcp, val) arch_this_cpu_add_return(pcp, val, "laa")
+#define this_cpu_add_return_8(pcp, val) arch_this_cpu_add_return(pcp, val, "laag")
-#define this_cpu_xor_1(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
-#define this_cpu_xor_2(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
-#define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
-#define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
+#define arch_this_cpu_to_op(pcp, val, op) \
+{ \
+ typedef typeof(pcp) pcp_op_T__; \
+ pcp_op_T__ val__ = (val); \
+ pcp_op_T__ old__, *ptr__; \
+ preempt_disable(); \
+ ptr__ = __this_cpu_ptr(&(pcp)); \
+ asm volatile( \
+ op " %[old__],%[val__],%[ptr__]\n" \
+ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
+ : [val__] "d" (val__) \
+ : "cc"); \
+ preempt_enable(); \
+}
+
+#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan")
+#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, "lang")
+#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lao")
+#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, "laog")
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define arch_this_cpu_cmpxchg(pcp, oval, nval) \
({ \
@@ -74,13 +139,7 @@
pcp_op_T__ *ptr__; \
preempt_disable(); \
ptr__ = __this_cpu_ptr(&(pcp)); \
- switch (sizeof(*ptr__)) { \
- case 8: \
- ret__ = cmpxchg64(ptr__, oval, nval); \
- break; \
- default: \
- ret__ = cmpxchg(ptr__, oval, nval); \
- } \
+ ret__ = cmpxchg(ptr__, oval, nval); \
preempt_enable(); \
ret__; \
})
@@ -104,9 +163,7 @@
#define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
-#ifdef CONFIG_64BIT
#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
-#endif
#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \
({ \
@@ -124,9 +181,9 @@
})
#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
-#ifdef CONFIG_64BIT
#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
-#endif
+
+#endif /* CONFIG_64BIT */
#include <asm-generic/percpu.h>
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 9b60a36c348d..2204400d0bd5 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -748,7 +748,9 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
{
- if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_WRITE)) {
+ if (!MACHINE_HAS_ESOP &&
+ (pte_val(entry) & _PAGE_PRESENT) &&
+ (pte_val(entry) & _PAGE_WRITE)) {
/*
* Without enhanced suppression-on-protection force
* the dirty bit on for all writable ptes.
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index ca7821f07260..0a876bc543d3 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -134,19 +134,17 @@ struct stack_frame {
* Do necessary setup to start up a new thread.
*/
#define start_thread(regs, new_psw, new_stackp) do { \
- regs->psw.mask = psw_user_bits | PSW_MASK_EA | PSW_MASK_BA; \
+ regs->psw.mask = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \
execve_tail(); \
} while (0)
#define start_thread31(regs, new_psw, new_stackp) do { \
- regs->psw.mask = psw_user_bits | PSW_MASK_BA; \
+ regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \
- __tlb_flush_mm(current->mm); \
crst_table_downgrade(current->mm, 1UL << 31); \
- update_mm(current->mm, current); \
execve_tail(); \
} while (0)
@@ -169,17 +167,15 @@ extern void release_thread(struct task_struct *);
*/
extern unsigned long thread_saved_pc(struct task_struct *t);
-extern void show_code(struct pt_regs *regs);
-extern void print_fn_code(unsigned char *code, unsigned long len);
-extern int insn_to_mnemonic(unsigned char *instruction, char *buf,
- unsigned int len);
-
unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \
(task_stack_page(tsk) + THREAD_SIZE) - 1)
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15])
+/* Has task runtime instrumentation enabled ? */
+#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
+
static inline unsigned short stap(void)
{
unsigned short cpu_address;
@@ -348,9 +344,9 @@ __set_psw_mask(unsigned long mask)
}
#define local_mcck_enable() \
- __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK)
+ __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK)
#define local_mcck_disable() \
- __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT)
+ __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT)
/*
* Basic Machine Check/Program Check Handler.
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 52b56533c57c..9c82cebddabd 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -10,8 +10,11 @@
#ifndef __ASSEMBLY__
-extern long psw_kernel_bits;
-extern long psw_user_bits;
+#define PSW_KERNEL_BITS (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \
+ PSW_MASK_EA | PSW_MASK_BA)
+#define PSW_USER_BITS (PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \
+ PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \
+ PSW_MASK_PSTATE | PSW_ASC_PRIMARY)
/*
* The pt_regs struct defines the way the registers are stored on
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 59880dbaf360..df802ee14af6 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -48,13 +48,6 @@ void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize);
void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
unsigned long size);
-#define PRIMARY_SPACE_MODE 0
-#define ACCESS_REGISTER_MODE 1
-#define SECONDARY_SPACE_MODE 2
-#define HOME_SPACE_MODE 3
-
-extern unsigned int s390_user_mode;
-
/*
* Machine features detected in head.S
*/
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index b64f15c3b4cc..ac9bed8e103f 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -14,7 +14,6 @@
#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
extern struct mutex smp_cpu_state_mutex;
-extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 6dbd559763c9..29c81f82705e 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -13,58 +13,94 @@
extern struct task_struct *__switch_to(void *, void *);
extern void update_cr_regs(struct task_struct *task);
-static inline void save_fp_regs(s390_fp_regs *fpregs)
+static inline int test_fp_ctl(u32 fpc)
{
+ u32 orig_fpc;
+ int rc;
+
+ if (!MACHINE_HAS_IEEE)
+ return 0;
+
asm volatile(
- " std 0,%O0+8(%R0)\n"
- " std 2,%O0+24(%R0)\n"
- " std 4,%O0+40(%R0)\n"
- " std 6,%O0+56(%R0)"
- : "=Q" (*fpregs) : "Q" (*fpregs));
+ " efpc %1\n"
+ " sfpc %2\n"
+ "0: sfpc %1\n"
+ " la %0,0\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "=d" (rc), "=d" (orig_fpc)
+ : "d" (fpc), "0" (-EINVAL));
+ return rc;
+}
+
+static inline void save_fp_ctl(u32 *fpc)
+{
if (!MACHINE_HAS_IEEE)
return;
+
asm volatile(
- " stfpc %0\n"
- " std 1,%O0+16(%R0)\n"
- " std 3,%O0+32(%R0)\n"
- " std 5,%O0+48(%R0)\n"
- " std 7,%O0+64(%R0)\n"
- " std 8,%O0+72(%R0)\n"
- " std 9,%O0+80(%R0)\n"
- " std 10,%O0+88(%R0)\n"
- " std 11,%O0+96(%R0)\n"
- " std 12,%O0+104(%R0)\n"
- " std 13,%O0+112(%R0)\n"
- " std 14,%O0+120(%R0)\n"
- " std 15,%O0+128(%R0)\n"
- : "=Q" (*fpregs) : "Q" (*fpregs));
+ " stfpc %0\n"
+ : "+Q" (*fpc));
}
-static inline void restore_fp_regs(s390_fp_regs *fpregs)
+static inline int restore_fp_ctl(u32 *fpc)
{
+ int rc;
+
+ if (!MACHINE_HAS_IEEE)
+ return 0;
+
asm volatile(
- " ld 0,%O0+8(%R0)\n"
- " ld 2,%O0+24(%R0)\n"
- " ld 4,%O0+40(%R0)\n"
- " ld 6,%O0+56(%R0)"
- : : "Q" (*fpregs));
+ "0: lfpc %1\n"
+ " la %0,0\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
+ return rc;
+}
+
+static inline void save_fp_regs(freg_t *fprs)
+{
+ asm volatile("std 0,%0" : "=Q" (fprs[0]));
+ asm volatile("std 2,%0" : "=Q" (fprs[2]));
+ asm volatile("std 4,%0" : "=Q" (fprs[4]));
+ asm volatile("std 6,%0" : "=Q" (fprs[6]));
if (!MACHINE_HAS_IEEE)
return;
- asm volatile(
- " lfpc %0\n"
- " ld 1,%O0+16(%R0)\n"
- " ld 3,%O0+32(%R0)\n"
- " ld 5,%O0+48(%R0)\n"
- " ld 7,%O0+64(%R0)\n"
- " ld 8,%O0+72(%R0)\n"
- " ld 9,%O0+80(%R0)\n"
- " ld 10,%O0+88(%R0)\n"
- " ld 11,%O0+96(%R0)\n"
- " ld 12,%O0+104(%R0)\n"
- " ld 13,%O0+112(%R0)\n"
- " ld 14,%O0+120(%R0)\n"
- " ld 15,%O0+128(%R0)\n"
- : : "Q" (*fpregs));
+ asm volatile("std 1,%0" : "=Q" (fprs[1]));
+ asm volatile("std 3,%0" : "=Q" (fprs[3]));
+ asm volatile("std 5,%0" : "=Q" (fprs[5]));
+ asm volatile("std 7,%0" : "=Q" (fprs[7]));
+ asm volatile("std 8,%0" : "=Q" (fprs[8]));
+ asm volatile("std 9,%0" : "=Q" (fprs[9]));
+ asm volatile("std 10,%0" : "=Q" (fprs[10]));
+ asm volatile("std 11,%0" : "=Q" (fprs[11]));
+ asm volatile("std 12,%0" : "=Q" (fprs[12]));
+ asm volatile("std 13,%0" : "=Q" (fprs[13]));
+ asm volatile("std 14,%0" : "=Q" (fprs[14]));
+ asm volatile("std 15,%0" : "=Q" (fprs[15]));
+}
+
+static inline void restore_fp_regs(freg_t *fprs)
+{
+ asm volatile("ld 0,%0" : : "Q" (fprs[0]));
+ asm volatile("ld 2,%0" : : "Q" (fprs[2]));
+ asm volatile("ld 4,%0" : : "Q" (fprs[4]));
+ asm volatile("ld 6,%0" : : "Q" (fprs[6]));
+ if (!MACHINE_HAS_IEEE)
+ return;
+ asm volatile("ld 1,%0" : : "Q" (fprs[1]));
+ asm volatile("ld 3,%0" : : "Q" (fprs[3]));
+ asm volatile("ld 5,%0" : : "Q" (fprs[5]));
+ asm volatile("ld 7,%0" : : "Q" (fprs[7]));
+ asm volatile("ld 8,%0" : : "Q" (fprs[8]));
+ asm volatile("ld 9,%0" : : "Q" (fprs[9]));
+ asm volatile("ld 10,%0" : : "Q" (fprs[10]));
+ asm volatile("ld 11,%0" : : "Q" (fprs[11]));
+ asm volatile("ld 12,%0" : : "Q" (fprs[12]));
+ asm volatile("ld 13,%0" : : "Q" (fprs[13]));
+ asm volatile("ld 14,%0" : : "Q" (fprs[14]));
+ asm volatile("ld 15,%0" : : "Q" (fprs[15]));
}
static inline void save_access_regs(unsigned int *acrs)
@@ -83,12 +119,14 @@ static inline void restore_access_regs(unsigned int *acrs)
#define switch_to(prev,next,last) do { \
if (prev->mm) { \
- save_fp_regs(&prev->thread.fp_regs); \
+ save_fp_ctl(&prev->thread.fp_regs.fpc); \
+ save_fp_regs(prev->thread.fp_regs.fprs); \
save_access_regs(&prev->thread.acrs[0]); \
save_ri_cb(prev->thread.ri_cb); \
} \
if (next->mm) { \
- restore_fp_regs(&next->thread.fp_regs); \
+ restore_fp_ctl(&next->thread.fp_regs.fpc); \
+ restore_fp_regs(next->thread.fp_regs.fprs); \
restore_access_regs(&next->thread.acrs[0]); \
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
update_cr_regs(next); \
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 8ad8af915032..8beee1cceba4 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -71,30 +71,32 @@ static inline void local_tick_enable(unsigned long long comp)
typedef unsigned long long cycles_t;
-static inline unsigned long long get_tod_clock(void)
+static inline void get_tod_clock_ext(char clk[16])
{
- unsigned long long clk;
-
-#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
- asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc");
-#else
- asm volatile("stck %0" : "=Q" (clk) : : "cc");
-#endif
- return clk;
-}
+ typedef struct { char _[sizeof(clk)]; } addrtype;
-static inline void get_tod_clock_ext(char *clk)
-{
- asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
+ asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc");
}
-static inline unsigned long long get_tod_clock_xt(void)
+static inline unsigned long long get_tod_clock(void)
{
unsigned char clk[16];
get_tod_clock_ext(clk);
return *((unsigned long long *)&clk[1]);
}
+static inline unsigned long long get_tod_clock_fast(void)
+{
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
+ unsigned long long clk;
+
+ asm volatile("stckf %0" : "=Q" (clk) : : "cc");
+ return clk;
+#else
+ return get_tod_clock();
+#endif
+}
+
static inline cycles_t get_cycles(void)
{
return (cycles_t) get_tod_clock() >> 2;
@@ -125,7 +127,7 @@ extern u64 sched_clock_base_cc;
*/
static inline unsigned long long get_tod_clock_monotonic(void)
{
- return get_tod_clock_xt() - sched_clock_base_cc;
+ return get_tod_clock() - sched_clock_base_cc;
}
/**
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 9c33ed4e666f..79330af9a5f8 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -94,9 +94,7 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
struct uaccess_ops {
size_t (*copy_from_user)(size_t, const void __user *, void *);
- size_t (*copy_from_user_small)(size_t, const void __user *, void *);
size_t (*copy_to_user)(size_t, void __user *, const void *);
- size_t (*copy_to_user_small)(size_t, void __user *, const void *);
size_t (*copy_in_user)(size_t, void __user *, const void __user *);
size_t (*clear_user)(size_t, void __user *);
size_t (*strnlen_user)(size_t, const char __user *);
@@ -106,22 +104,20 @@ struct uaccess_ops {
};
extern struct uaccess_ops uaccess;
-extern struct uaccess_ops uaccess_std;
extern struct uaccess_ops uaccess_mvcos;
-extern struct uaccess_ops uaccess_mvcos_switch;
extern struct uaccess_ops uaccess_pt;
extern int __handle_fault(unsigned long, unsigned long, int);
static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
{
- size = uaccess.copy_to_user_small(size, ptr, x);
+ size = uaccess.copy_to_user(size, ptr, x);
return size ? -EFAULT : size;
}
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{
- size = uaccess.copy_from_user_small(size, ptr, x);
+ size = uaccess.copy_from_user(size, ptr, x);
return size ? -EFAULT : size;
}
@@ -226,10 +222,7 @@ extern int __get_user_bad(void) __attribute__((noreturn));
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (__builtin_constant_p(n) && (n <= 256))
- return uaccess.copy_to_user_small(n, to, from);
- else
- return uaccess.copy_to_user(n, to, from);
+ return uaccess.copy_to_user(n, to, from);
}
#define __copy_to_user_inatomic __copy_to_user
@@ -275,10 +268,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
static inline unsigned long __must_check
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (__builtin_constant_p(n) && (n <= 256))
- return uaccess.copy_from_user_small(n, from, to);
- else
- return uaccess.copy_from_user(n, from, to);
+ return uaccess.copy_from_user(n, from, to);
}
extern void copy_from_user_overflow(void)
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index 7a84619e315e..7e0b498a2c2b 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -199,6 +199,7 @@ typedef union
typedef struct
{
__u32 fpc;
+ __u32 pad;
freg_t fprs[NUM_FPRS];
} s390_fp_regs;
@@ -206,7 +207,6 @@ typedef struct
#define FPC_FLAGS_MASK 0x00F80000
#define FPC_DXC_MASK 0x0000FF00
#define FPC_RM_MASK 0x00000003
-#define FPC_VALID_MASK 0xF8F8FF03
/* this typedef defines how a Program Status Word looks like */
typedef struct
@@ -263,7 +263,7 @@ typedef struct
#define PSW_MASK_EA 0x0000000100000000UL
#define PSW_MASK_BA 0x0000000080000000UL
-#define PSW_MASK_USER 0x0000FF8180000000UL
+#define PSW_MASK_USER 0x0000FF0180000000UL
#define PSW_ADDR_AMODE 0x0000000000000000UL
#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
diff --git a/arch/s390/include/uapi/asm/sigcontext.h b/arch/s390/include/uapi/asm/sigcontext.h
index 584787f6ce44..b30de9c01bbe 100644
--- a/arch/s390/include/uapi/asm/sigcontext.h
+++ b/arch/s390/include/uapi/asm/sigcontext.h
@@ -49,6 +49,7 @@ typedef struct
typedef struct
{
unsigned int fpc;
+ unsigned int pad;
double fprs[__NUM_FPRS];
} _s390_fp_regs;
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index 92494494692e..c286c2e868f0 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -82,4 +82,6 @@
#define SO_BUSY_POLL 46
+#define SO_MAX_PACING_RATE 47
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 4bb2a4656163..2403303cfed7 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -28,7 +28,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
-obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o
+obj-y := traps.o time.o process.o base.o early.o setup.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
diff --git a/arch/s390/kernel/bitmap.c b/arch/s390/kernel/bitmap.c
deleted file mode 100644
index 102da5e23037..000000000000
--- a/arch/s390/kernel/bitmap.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Bitmaps for set_bit, clear_bit, test_and_set_bit, ...
- * See include/asm/{bitops.h|posix_types.h} for details
- *
- * Copyright IBM Corp. 1999, 2009
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
- */
-
-#include <linux/bitops.h>
-#include <linux/module.h>
-
-const char _oi_bitmap[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
-EXPORT_SYMBOL(_oi_bitmap);
-
-const char _ni_bitmap[] = { 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f };
-EXPORT_SYMBOL(_ni_bitmap);
-
-const char _zb_findmap[] = {
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
- 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 };
-EXPORT_SYMBOL(_zb_findmap);
-
-const char _sb_findmap[] = {
- 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
- 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 };
-EXPORT_SYMBOL(_sb_findmap);
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index dd62071624be..3a414c0f93ed 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -146,15 +146,14 @@ static void __init cache_build_info(void)
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
for (level = 0; level < CACHE_MAX_LEVEL; level++) {
switch (ct.ci[level].scope) {
- case CACHE_SCOPE_NOTEXISTS:
- case CACHE_SCOPE_RESERVED:
- return;
case CACHE_SCOPE_SHARED:
private = 0;
break;
case CACHE_SCOPE_PRIVATE:
private = 1;
break;
+ default:
+ return;
}
if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
rc = cache_add(level, private, CACHE_TYPE_DATA);
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 1f1b8c70ab97..e030d2bdec1b 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -58,10 +58,6 @@
#include "compat_linux.h"
-u32 psw32_user_bits = PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT |
- PSW32_DEFAULT_KEY | PSW32_MASK_BASE | PSW32_MASK_MCHECK |
- PSW32_MASK_PSTATE | PSW32_ASC_HOME;
-
/* For this source file, we want overflow handling. */
#undef high2lowuid
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index 976518c0592a..1bfda3eca379 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -27,6 +27,7 @@ typedef union
typedef struct
{
unsigned int fpc;
+ unsigned int pad;
freg_t32 fprs[__NUM_FPRS];
} _s390_fp_regs32;
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 1389b637dae5..5a3ab5c191fd 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -99,7 +99,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
break;
}
}
- return err;
+ return err ? -EFAULT : 0;
}
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
@@ -148,62 +148,71 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
break;
}
}
- return err;
+ return err ? -EFAULT : 0;
}
static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
{
- _s390_regs_common32 regs32;
- int err, i;
+ _sigregs32 user_sregs;
+ int i;
- regs32.psw.mask = psw32_user_bits |
- ((__u32)(regs->psw.mask >> 32) & PSW32_MASK_USER);
- regs32.psw.addr = (__u32) regs->psw.addr |
+ user_sregs.regs.psw.mask = (__u32)(regs->psw.mask >> 32);
+ user_sregs.regs.psw.mask &= PSW32_MASK_USER | PSW32_MASK_RI;
+ user_sregs.regs.psw.mask |= PSW32_USER_BITS;
+ user_sregs.regs.psw.addr = (__u32) regs->psw.addr |
(__u32)(regs->psw.mask & PSW_MASK_BA);
for (i = 0; i < NUM_GPRS; i++)
- regs32.gprs[i] = (__u32) regs->gprs[i];
+ user_sregs.regs.gprs[i] = (__u32) regs->gprs[i];
save_access_regs(current->thread.acrs);
- memcpy(regs32.acrs, current->thread.acrs, sizeof(regs32.acrs));
- err = __copy_to_user(&sregs->regs, &regs32, sizeof(regs32));
- if (err)
- return err;
- save_fp_regs(&current->thread.fp_regs);
- /* s390_fp_regs and _s390_fp_regs32 are the same ! */
- return __copy_to_user(&sregs->fpregs, &current->thread.fp_regs,
- sizeof(_s390_fp_regs32));
+ memcpy(&user_sregs.regs.acrs, current->thread.acrs,
+ sizeof(user_sregs.regs.acrs));
+ save_fp_ctl(&current->thread.fp_regs.fpc);
+ save_fp_regs(current->thread.fp_regs.fprs);
+ memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
+ sizeof(user_sregs.fpregs));
+ if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32)))
+ return -EFAULT;
+ return 0;
}
static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
{
- _s390_regs_common32 regs32;
- int err, i;
+ _sigregs32 user_sregs;
+ int i;
/* Alwys make any pending restarted system call return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
- err = __copy_from_user(&regs32, &sregs->regs, sizeof(regs32));
- if (err)
- return err;
+ if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs)))
+ return -EFAULT;
+
+ if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW32_MASK_RI))
+ return -EINVAL;
+
+ /* Loading the floating-point-control word can fail. Do that first. */
+ if (restore_fp_ctl(&user_sregs.fpregs.fpc))
+ return -EINVAL;
+
+ /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
- (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 |
- (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE);
+ (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 |
+ (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 |
+ (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE);
/* Check for invalid user address space control. */
- if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
- regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
+ if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
+ regs->psw.mask = PSW_ASC_PRIMARY |
(regs->psw.mask & ~PSW_MASK_ASC);
- regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN);
+ regs->psw.addr = (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_INSN);
for (i = 0; i < NUM_GPRS; i++)
- regs->gprs[i] = (__u64) regs32.gprs[i];
- memcpy(current->thread.acrs, regs32.acrs, sizeof(current->thread.acrs));
+ regs->gprs[i] = (__u64) user_sregs.regs.gprs[i];
+ memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
+ sizeof(current->thread.acrs));
restore_access_regs(current->thread.acrs);
- err = __copy_from_user(&current->thread.fp_regs, &sregs->fpregs,
- sizeof(_s390_fp_regs32));
- current->thread.fp_regs.fpc &= FPC_VALID_MASK;
- if (err)
- return err;
+ memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
+ sizeof(current->thread.fp_regs));
- restore_fp_regs(&current->thread.fp_regs);
+ restore_fp_regs(current->thread.fp_regs.fprs);
clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */
return 0;
}
@@ -215,18 +224,18 @@ static int save_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs)
for (i = 0; i < NUM_GPRS; i++)
gprs_high[i] = regs->gprs[i] >> 32;
-
- return __copy_to_user(uregs, &gprs_high, sizeof(gprs_high));
+ if (__copy_to_user(uregs, &gprs_high, sizeof(gprs_high)))
+ return -EFAULT;
+ return 0;
}
static int restore_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs)
{
__u32 gprs_high[NUM_GPRS];
- int err, i;
+ int i;
- err = __copy_from_user(&gprs_high, uregs, sizeof(gprs_high));
- if (err)
- return err;
+ if (__copy_from_user(&gprs_high, uregs, sizeof(gprs_high)))
+ return -EFAULT;
for (i = 0; i < NUM_GPRS; i++)
*(__u32 *)&regs->gprs[i] = gprs_high[i];
return 0;
@@ -348,7 +357,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
regs->gprs[15] = (__force __u64) frame;
/* Force 31 bit amode and default user address space control. */
regs->psw.mask = PSW_MASK_BA |
- (psw_user_bits & PSW_MASK_ASC) |
+ (PSW_USER_BITS & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__force __u64) ka->sa.sa_handler;
@@ -415,7 +424,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->gprs[15] = (__force __u64) frame;
/* Force 31 bit amode and default user address space control. */
regs->psw.mask = PSW_MASK_BA |
- (psw_user_bits & PSW_MASK_ASC) |
+ (PSW_USER_BITS & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__u64 __force) ka->sa.sa_handler;
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 7dd21720e5b0..f45b2ab0cb81 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -22,6 +22,32 @@
#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
+struct dump_save_areas dump_save_areas;
+
+/*
+ * Allocate and add a save area for a CPU
+ */
+struct save_area *dump_save_area_create(int cpu)
+{
+ struct save_area **save_areas, *save_area;
+
+ save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
+ if (!save_area)
+ return NULL;
+ if (cpu + 1 > dump_save_areas.count) {
+ dump_save_areas.count = cpu + 1;
+ save_areas = krealloc(dump_save_areas.areas,
+ dump_save_areas.count * sizeof(void *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!save_areas) {
+ kfree(save_area);
+ return NULL;
+ }
+ dump_save_areas.areas = save_areas;
+ }
+ dump_save_areas.areas[cpu] = save_area;
+ return save_area;
+}
/*
* Return physical address for virtual address
@@ -45,7 +71,6 @@ static inline void *load_real_addr(void *addr)
static int copy_from_realmem(void *dest, void *src, size_t count)
{
unsigned long size;
- int rc;
if (!count)
return 0;
@@ -451,8 +476,8 @@ static int get_cpu_cnt(void)
{
int i, cpus = 0;
- for (i = 0; zfcpdump_save_areas[i]; i++) {
- if (zfcpdump_save_areas[i]->pref_reg == 0)
+ for (i = 0; i < dump_save_areas.count; i++) {
+ if (dump_save_areas.areas[i]->pref_reg == 0)
continue;
cpus++;
}
@@ -523,8 +548,8 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
ptr = nt_prpsinfo(ptr);
- for (i = 0; zfcpdump_save_areas[i]; i++) {
- sa = zfcpdump_save_areas[i];
+ for (i = 0; i < dump_save_areas.count; i++) {
+ sa = dump_save_areas.areas[i];
if (sa->pref_reg == 0)
continue;
ptr = fill_cpu_elf_notes(ptr, sa);
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index f1279dc2e1bc..ee8390da6ea7 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -867,7 +867,7 @@ static inline void
debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level,
int exception)
{
- active->id.stck = get_tod_clock();
+ active->id.stck = get_tod_clock_fast();
active->id.fields.cpuid = smp_processor_id();
active->caller = __builtin_return_address(0);
active->id.fields.exception = exception;
@@ -889,7 +889,7 @@ static int debug_active=1;
* if debug_active is already off
*/
static int
-s390dbf_procactive(ctl_table *table, int write,
+s390dbf_procactive(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
if (!write || debug_stoppable || !debug_active)
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index be87d3e05a5b..993efe6a887c 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -23,6 +23,7 @@
#include <linux/kdebug.h>
#include <asm/uaccess.h>
+#include <asm/dis.h>
#include <asm/io.h>
#include <linux/atomic.h>
#include <asm/mathemu.h>
@@ -37,17 +38,6 @@
#define ONELONG "%016lx: "
#endif /* CONFIG_64BIT */
-#define OPERAND_GPR 0x1 /* Operand printed as %rx */
-#define OPERAND_FPR 0x2 /* Operand printed as %fx */
-#define OPERAND_AR 0x4 /* Operand printed as %ax */
-#define OPERAND_CR 0x8 /* Operand printed as %cx */
-#define OPERAND_DISP 0x10 /* Operand printed as displacement */
-#define OPERAND_BASE 0x20 /* Operand printed as base register */
-#define OPERAND_INDEX 0x40 /* Operand printed as index register */
-#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */
-#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */
-#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */
-
enum {
UNUSED, /* Indicates the end of the operand list */
R_8, /* GPR starting at position 8 */
@@ -155,19 +145,7 @@ enum {
INSTR_S_00, INSTR_S_RD,
};
-struct operand {
- int bits; /* The number of bits in the operand. */
- int shift; /* The number of bits to shift. */
- int flags; /* One bit syntax flags. */
-};
-
-struct insn {
- const char name[5];
- unsigned char opfrag;
- unsigned char format;
-};
-
-static const struct operand operands[] =
+static const struct s390_operand operands[] =
{
[UNUSED] = { 0, 0, 0 },
[R_8] = { 4, 8, OPERAND_GPR },
@@ -479,7 +457,7 @@ static char *long_insn_name[] = {
[LONG_INSN_PCISTB] = "pcistb",
};
-static struct insn opcode[] = {
+static struct s390_insn opcode[] = {
#ifdef CONFIG_64BIT
{ "bprp", 0xc5, INSTR_MII_UPI },
{ "bpp", 0xc7, INSTR_SMI_U0RDP },
@@ -668,7 +646,7 @@ static struct insn opcode[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_01[] = {
+static struct s390_insn opcode_01[] = {
#ifdef CONFIG_64BIT
{ "ptff", 0x04, INSTR_E },
{ "pfpo", 0x0a, INSTR_E },
@@ -684,7 +662,7 @@ static struct insn opcode_01[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_a5[] = {
+static struct s390_insn opcode_a5[] = {
#ifdef CONFIG_64BIT
{ "iihh", 0x00, INSTR_RI_RU },
{ "iihl", 0x01, INSTR_RI_RU },
@@ -706,7 +684,7 @@ static struct insn opcode_a5[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_a7[] = {
+static struct s390_insn opcode_a7[] = {
#ifdef CONFIG_64BIT
{ "tmhh", 0x02, INSTR_RI_RU },
{ "tmhl", 0x03, INSTR_RI_RU },
@@ -728,7 +706,7 @@ static struct insn opcode_a7[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_aa[] = {
+static struct s390_insn opcode_aa[] = {
#ifdef CONFIG_64BIT
{ { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI },
{ "rion", 0x01, INSTR_RI_RI },
@@ -739,7 +717,7 @@ static struct insn opcode_aa[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_b2[] = {
+static struct s390_insn opcode_b2[] = {
#ifdef CONFIG_64BIT
{ "stckf", 0x7c, INSTR_S_RD },
{ "lpp", 0x80, INSTR_S_RD },
@@ -851,7 +829,7 @@ static struct insn opcode_b2[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_b3[] = {
+static struct s390_insn opcode_b3[] = {
#ifdef CONFIG_64BIT
{ "maylr", 0x38, INSTR_RRF_F0FF },
{ "mylr", 0x39, INSTR_RRF_F0FF },
@@ -1034,7 +1012,7 @@ static struct insn opcode_b3[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_b9[] = {
+static struct s390_insn opcode_b9[] = {
#ifdef CONFIG_64BIT
{ "lpgr", 0x00, INSTR_RRE_RR },
{ "lngr", 0x01, INSTR_RRE_RR },
@@ -1167,7 +1145,7 @@ static struct insn opcode_b9[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_c0[] = {
+static struct s390_insn opcode_c0[] = {
#ifdef CONFIG_64BIT
{ "lgfi", 0x01, INSTR_RIL_RI },
{ "xihf", 0x06, INSTR_RIL_RU },
@@ -1187,7 +1165,7 @@ static struct insn opcode_c0[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_c2[] = {
+static struct s390_insn opcode_c2[] = {
#ifdef CONFIG_64BIT
{ "msgfi", 0x00, INSTR_RIL_RI },
{ "msfi", 0x01, INSTR_RIL_RI },
@@ -1205,7 +1183,7 @@ static struct insn opcode_c2[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_c4[] = {
+static struct s390_insn opcode_c4[] = {
#ifdef CONFIG_64BIT
{ "llhrl", 0x02, INSTR_RIL_RP },
{ "lghrl", 0x04, INSTR_RIL_RP },
@@ -1222,7 +1200,7 @@ static struct insn opcode_c4[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_c6[] = {
+static struct s390_insn opcode_c6[] = {
#ifdef CONFIG_64BIT
{ "exrl", 0x00, INSTR_RIL_RP },
{ "pfdrl", 0x02, INSTR_RIL_UP },
@@ -1240,7 +1218,7 @@ static struct insn opcode_c6[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_c8[] = {
+static struct s390_insn opcode_c8[] = {
#ifdef CONFIG_64BIT
{ "mvcos", 0x00, INSTR_SSF_RRDRD },
{ "ectg", 0x01, INSTR_SSF_RRDRD },
@@ -1251,7 +1229,7 @@ static struct insn opcode_c8[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_cc[] = {
+static struct s390_insn opcode_cc[] = {
#ifdef CONFIG_64BIT
{ "brcth", 0x06, INSTR_RIL_RP },
{ "aih", 0x08, INSTR_RIL_RI },
@@ -1263,7 +1241,7 @@ static struct insn opcode_cc[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_e3[] = {
+static struct s390_insn opcode_e3[] = {
#ifdef CONFIG_64BIT
{ "ltg", 0x02, INSTR_RXY_RRRD },
{ "lrag", 0x03, INSTR_RXY_RRRD },
@@ -1369,7 +1347,7 @@ static struct insn opcode_e3[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_e5[] = {
+static struct s390_insn opcode_e5[] = {
#ifdef CONFIG_64BIT
{ "strag", 0x02, INSTR_SSE_RDRD },
{ "mvhhi", 0x44, INSTR_SIL_RDI },
@@ -1391,7 +1369,7 @@ static struct insn opcode_e5[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_eb[] = {
+static struct s390_insn opcode_eb[] = {
#ifdef CONFIG_64BIT
{ "lmg", 0x04, INSTR_RSY_RRRD },
{ "srag", 0x0a, INSTR_RSY_RRRD },
@@ -1465,7 +1443,7 @@ static struct insn opcode_eb[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_ec[] = {
+static struct s390_insn opcode_ec[] = {
#ifdef CONFIG_64BIT
{ "brxhg", 0x44, INSTR_RIE_RRP },
{ "brxlg", 0x45, INSTR_RIE_RRP },
@@ -1504,7 +1482,7 @@ static struct insn opcode_ec[] = {
{ "", 0, INSTR_INVALID }
};
-static struct insn opcode_ed[] = {
+static struct s390_insn opcode_ed[] = {
#ifdef CONFIG_64BIT
{ "mayl", 0x38, INSTR_RXF_FRRDF },
{ "myl", 0x39, INSTR_RXF_FRRDF },
@@ -1572,7 +1550,7 @@ static struct insn opcode_ed[] = {
/* Extracts an operand value from an instruction. */
static unsigned int extract_operand(unsigned char *code,
- const struct operand *operand)
+ const struct s390_operand *operand)
{
unsigned int val;
int bits;
@@ -1608,16 +1586,11 @@ static unsigned int extract_operand(unsigned char *code,
return val;
}
-static inline int insn_length(unsigned char code)
-{
- return ((((int) code + 64) >> 7) + 1) << 1;
-}
-
-static struct insn *find_insn(unsigned char *code)
+struct s390_insn *find_insn(unsigned char *code)
{
unsigned char opfrag = code[1];
unsigned char opmask;
- struct insn *table;
+ struct s390_insn *table;
switch (code[0]) {
case 0x01:
@@ -1706,7 +1679,7 @@ static struct insn *find_insn(unsigned char *code)
*/
int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len)
{
- struct insn *insn;
+ struct s390_insn *insn;
insn = find_insn(instruction);
if (!insn)
@@ -1722,9 +1695,9 @@ EXPORT_SYMBOL_GPL(insn_to_mnemonic);
static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
{
- struct insn *insn;
+ struct s390_insn *insn;
const unsigned char *ops;
- const struct operand *operand;
+ const struct s390_operand *operand;
unsigned int value;
char separator;
char *ptr;
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 99e7f6035895..e6af9406987c 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -15,6 +15,7 @@
#include <linux/sched.h>
#include <asm/processor.h>
#include <asm/debug.h>
+#include <asm/dis.h>
#include <asm/ipl.h>
#ifndef CONFIG_64BIT
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index dc8770d7173c..96543ac400a7 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -206,6 +206,7 @@ static noinline __init void clear_bss_section(void)
*/
static noinline __init void init_kernel_storage_key(void)
{
+#if PAGE_DEFAULT_KEY
unsigned long end_pfn, init_pfn;
end_pfn = PFN_UP(__pa(&_end));
@@ -213,6 +214,7 @@ static noinline __init void init_kernel_storage_key(void)
for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
page_set_storage_key(init_pfn << PAGE_SHIFT,
PAGE_DEFAULT_KEY, 0);
+#endif
}
static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index e9b04c33d383..cb533f78c09e 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -23,7 +23,6 @@ asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
void do_protection_exception(struct pt_regs *regs);
void do_dat_exception(struct pt_regs *regs);
-void do_asce_exception(struct pt_regs *regs);
void addressing_exception(struct pt_regs *regs);
void data_exception(struct pt_regs *regs);
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 1014ad5f7693..224db03e9518 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -151,14 +151,13 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out;
ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
- if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
- goto out;
trace.func = ip;
+ trace.depth = current->curr_ret_stack + 1;
/* Only trace if the calling function expects to. */
- if (!ftrace_graph_entry(&trace)) {
- current->curr_ret_stack--;
+ if (!ftrace_graph_entry(&trace))
+ goto out;
+ if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
goto out;
- }
parent = (unsigned long) return_to_handler;
out:
return parent;
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index fd8db63dfc94..429afcc480cb 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -437,7 +437,7 @@ ENTRY(startup_kdump)
#if defined(CONFIG_64BIT)
#if defined(CONFIG_MARCH_ZEC12)
- .long 3, 0xc100efe3, 0xf46ce000, 0x00400000
+ .long 3, 0xc100efe3, 0xf46ce800, 0x00400000
#elif defined(CONFIG_MARCH_Z196)
.long 2, 0xc100efe3, 0xf46c0000
#elif defined(CONFIG_MARCH_Z10)
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index feb719d3c851..633ca7504536 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2051,12 +2051,12 @@ void s390_reset_system(void (*func)(void *), void *data)
__ctl_clear_bit(0,28);
/* Set new machine check handler */
- S390_lowcore.mcck_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT;
+ S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
S390_lowcore.mcck_new_psw.addr =
PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler;
/* Set new program check handler */
- S390_lowcore.program_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT;
+ S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
S390_lowcore.program_new_psw.addr =
PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 8ac2097f13d4..bb27a262c44a 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -157,39 +157,29 @@ int arch_show_interrupts(struct seq_file *p, int prec)
/*
* Switch to the asynchronous interrupt stack for softirq execution.
*/
-asmlinkage void do_softirq(void)
+void do_softirq_own_stack(void)
{
- unsigned long flags, old, new;
-
- if (in_interrupt())
- return;
-
- local_irq_save(flags);
-
- if (local_softirq_pending()) {
- /* Get current stack pointer. */
- asm volatile("la %0,0(15)" : "=a" (old));
- /* Check against async. stack address range. */
- new = S390_lowcore.async_stack;
- if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) {
- /* Need to switch to the async. stack. */
- new -= STACK_FRAME_OVERHEAD;
- ((struct stack_frame *) new)->back_chain = old;
-
- asm volatile(" la 15,0(%0)\n"
- " basr 14,%2\n"
- " la 15,0(%1)\n"
- : : "a" (new), "a" (old),
- "a" (__do_softirq)
- : "0", "1", "2", "3", "4", "5", "14",
- "cc", "memory" );
- } else {
- /* We are already on the async stack. */
- __do_softirq();
- }
+ unsigned long old, new;
+
+ /* Get current stack pointer. */
+ asm volatile("la %0,0(15)" : "=a" (old));
+ /* Check against async. stack address range. */
+ new = S390_lowcore.async_stack;
+ if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) {
+ /* Need to switch to the async. stack. */
+ new -= STACK_FRAME_OVERHEAD;
+ ((struct stack_frame *) new)->back_chain = old;
+ asm volatile(" la 15,0(%0)\n"
+ " basr 14,%2\n"
+ " la 15,0(%1)\n"
+ : : "a" (new), "a" (old),
+ "a" (__do_softirq)
+ : "0", "1", "2", "3", "4", "5", "14",
+ "cc", "memory" );
+ } else {
+ /* We are already on the async stack. */
+ __do_softirq();
}
-
- local_irq_restore(flags);
}
/*
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index d86e64eddb42..bc71a7b95af5 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -26,11 +26,12 @@
#include <linux/stop_machine.h>
#include <linux/kdebug.h>
#include <linux/uaccess.h>
-#include <asm/cacheflush.h>
-#include <asm/sections.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/hardirq.h>
+#include <asm/cacheflush.h>
+#include <asm/sections.h>
+#include <asm/dis.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe);
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -59,6 +60,8 @@ struct kprobe_insn_cache kprobe_dmainsn_slots = {
static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
{
+ if (!is_known_insn((unsigned char *)insn))
+ return -EINVAL;
switch (insn[0] >> 8) {
case 0x0c: /* bassm */
case 0x0b: /* bsm */
@@ -208,7 +211,7 @@ static void __kprobes copy_instruction(struct kprobe *p)
s64 disp, new_disp;
u64 addr, new_addr;
- memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2);
+ memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8));
if (!is_insn_relative_long(p->ainsn.insn))
return;
/*
@@ -252,7 +255,7 @@ static int __kprobes s390_get_insn_slot(struct kprobe *p)
p->ainsn.insn = NULL;
if (is_kernel_addr(p->addr))
p->ainsn.insn = get_dmainsn_slot();
- if (is_module_addr(p->addr))
+ else if (is_module_addr(p->addr))
p->ainsn.insn = get_insn_slot();
return p->ainsn.insn ? 0 : -ENOMEM;
}
@@ -608,7 +611,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
- int ilen = ((p->ainsn.insn[0] >> 14) + 3) & -2;
+ int ilen = insn_length(p->ainsn.insn[0] >> 8);
if (ip - (unsigned long) p->ainsn.insn == ilen)
ip = (unsigned long) p->addr + ilen;
}
@@ -677,7 +680,7 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
case KPROBE_HIT_SSDONE:
/*
* We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accouting
+ * we can also use npre/npostfault count for accounting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(p);
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index 14bdecb61923..4a460c44e17e 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -78,7 +78,7 @@ PGM_CHECK_DEFAULT /* 34 */
PGM_CHECK_DEFAULT /* 35 */
PGM_CHECK_DEFAULT /* 36 */
PGM_CHECK_DEFAULT /* 37 */
-PGM_CHECK_64BIT(do_asce_exception) /* 38 */
+PGM_CHECK_DEFAULT /* 38 */
PGM_CHECK_64BIT(do_dat_exception) /* 39 */
PGM_CHECK_64BIT(do_dat_exception) /* 3a */
PGM_CHECK_64BIT(do_dat_exception) /* 3b */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index c5dbb335716d..7ed0d4e2a435 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -139,7 +139,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
if (unlikely(p->flags & PF_KTHREAD)) {
/* kernel thread */
memset(&frame->childregs, 0, sizeof(struct pt_regs));
- frame->childregs.psw.mask = psw_kernel_bits | PSW_MASK_DAT |
+ frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
frame->childregs.psw.addr = PSW_ADDR_AMODE |
(unsigned long) kernel_thread_starter;
@@ -165,7 +165,8 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
* save fprs to current->thread.fp_regs to merge them with
* the emulated registers and then copy the result to the child.
*/
- save_fp_regs(&current->thread.fp_regs);
+ save_fp_ctl(&current->thread.fp_regs.fpc);
+ save_fp_regs(current->thread.fp_regs.fprs);
memcpy(&p->thread.fp_regs, &current->thread.fp_regs,
sizeof(s390_fp_regs));
/* Set a new TLS ? */
@@ -173,7 +174,9 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
p->thread.acrs[0] = frame->childregs.gprs[6];
#else /* CONFIG_64BIT */
/* Save the fpu registers to new thread structure. */
- save_fp_regs(&p->thread.fp_regs);
+ save_fp_ctl(&p->thread.fp_regs.fpc);
+ save_fp_regs(p->thread.fp_regs.fprs);
+ p->thread.fp_regs.pad = 0;
/* Set a new TLS ? */
if (clone_flags & CLONE_SETTLS) {
unsigned long tls = frame->childregs.gprs[6];
@@ -205,10 +208,12 @@ int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
* save fprs to current->thread.fp_regs to merge them with
* the emulated registers and then copy the result to the dump.
*/
- save_fp_regs(&current->thread.fp_regs);
+ save_fp_ctl(&current->thread.fp_regs.fpc);
+ save_fp_regs(current->thread.fp_regs.fprs);
memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs));
#else /* CONFIG_64BIT */
- save_fp_regs(fpregs);
+ save_fp_ctl(&fpregs->fpc);
+ save_fp_regs(fpregs->fprs);
#endif /* CONFIG_64BIT */
return 1;
}
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 9556905bd3ce..e65c91c591e8 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -198,9 +198,11 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
* psw and gprs are stored on the stack
*/
tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
- if (addr == (addr_t) &dummy->regs.psw.mask)
+ if (addr == (addr_t) &dummy->regs.psw.mask) {
/* Return a clean psw mask. */
- tmp = psw_user_bits | (tmp & PSW_MASK_USER);
+ tmp &= PSW_MASK_USER | PSW_MASK_RI;
+ tmp |= PSW_USER_BITS;
+ }
} else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
/*
@@ -239,8 +241,7 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
offset = addr - (addr_t) &dummy->regs.fp_regs;
tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
- tmp &= (unsigned long) FPC_VALID_MASK
- << (BITS_PER_LONG - 32);
+ tmp <<= BITS_PER_LONG - 32;
} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
/*
@@ -321,11 +322,15 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
/*
* psw and gprs are stored on the stack
*/
- if (addr == (addr_t) &dummy->regs.psw.mask &&
- ((data & ~PSW_MASK_USER) != psw_user_bits ||
- ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
- /* Invalid psw mask. */
- return -EINVAL;
+ if (addr == (addr_t) &dummy->regs.psw.mask) {
+ unsigned long mask = PSW_MASK_USER;
+
+ mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
+ if ((data & ~mask) != PSW_USER_BITS)
+ return -EINVAL;
+ if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
+ return -EINVAL;
+ }
*(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
} else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
@@ -363,10 +368,10 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
/*
* floating point regs. are stored in the thread structure
*/
- if (addr == (addr_t) &dummy->regs.fp_regs.fpc &&
- (data & ~((unsigned long) FPC_VALID_MASK
- << (BITS_PER_LONG - 32))) != 0)
- return -EINVAL;
+ if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
+ if ((unsigned int) data != 0 ||
+ test_fp_ctl(data >> (BITS_PER_LONG - 32)))
+ return -EINVAL;
offset = addr - (addr_t) &dummy->regs.fp_regs;
*(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
@@ -557,7 +562,8 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
if (addr == (addr_t) &dummy32->regs.psw.mask) {
/* Fake a 31 bit psw mask. */
tmp = (__u32)(regs->psw.mask >> 32);
- tmp = psw32_user_bits | (tmp & PSW32_MASK_USER);
+ tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
+ tmp |= PSW32_USER_BITS;
} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
/* Fake a 31 bit psw address. */
tmp = (__u32) regs->psw.addr |
@@ -654,13 +660,16 @@ static int __poke_user_compat(struct task_struct *child,
* psw, gprs, acrs and orig_gpr2 are stored on the stack
*/
if (addr == (addr_t) &dummy32->regs.psw.mask) {
+ __u32 mask = PSW32_MASK_USER;
+
+ mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
/* Build a 64 bit psw mask from 31 bit mask. */
- if ((tmp & ~PSW32_MASK_USER) != psw32_user_bits)
+ if ((tmp & ~mask) != PSW32_USER_BITS)
/* Invalid psw mask. */
return -EINVAL;
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
(regs->psw.mask & PSW_MASK_BA) |
- (__u64)(tmp & PSW32_MASK_USER) << 32;
+ (__u64)(tmp & mask) << 32;
} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
/* Build a 64 bit psw address from 31 bit address. */
regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
@@ -696,8 +705,7 @@ static int __poke_user_compat(struct task_struct *child,
* floating point regs. are stored in the thread structure
*/
if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
- (tmp & ~FPC_VALID_MASK) != 0)
- /* Invalid floating point control. */
+ test_fp_ctl(tmp))
return -EINVAL;
offset = addr - (addr_t) &dummy32->regs.fp_regs;
*(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
@@ -895,8 +903,10 @@ static int s390_fpregs_get(struct task_struct *target,
const struct user_regset *regset, unsigned int pos,
unsigned int count, void *kbuf, void __user *ubuf)
{
- if (target == current)
- save_fp_regs(&target->thread.fp_regs);
+ if (target == current) {
+ save_fp_ctl(&target->thread.fp_regs.fpc);
+ save_fp_regs(target->thread.fp_regs.fprs);
+ }
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_regs, 0, -1);
@@ -909,19 +919,21 @@ static int s390_fpregs_set(struct task_struct *target,
{
int rc = 0;
- if (target == current)
- save_fp_regs(&target->thread.fp_regs);
+ if (target == current) {
+ save_fp_ctl(&target->thread.fp_regs.fpc);
+ save_fp_regs(target->thread.fp_regs.fprs);
+ }
/* If setting FPC, must validate it first. */
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
- u32 fpc[2] = { target->thread.fp_regs.fpc, 0 };
- rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc,
+ u32 ufpc[2] = { target->thread.fp_regs.fpc, 0 };
+ rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
0, offsetof(s390_fp_regs, fprs));
if (rc)
return rc;
- if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0)
+ if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
return -EINVAL;
- target->thread.fp_regs.fpc = fpc[0];
+ target->thread.fp_regs.fpc = ufpc[0];
}
if (rc == 0 && count > 0)
@@ -929,8 +941,10 @@ static int s390_fpregs_set(struct task_struct *target,
target->thread.fp_regs.fprs,
offsetof(s390_fp_regs, fprs), -1);
- if (rc == 0 && target == current)
- restore_fp_regs(&target->thread.fp_regs);
+ if (rc == 0 && target == current) {
+ restore_fp_ctl(&target->thread.fp_regs.fpc);
+ restore_fp_regs(target->thread.fp_regs.fprs);
+ }
return rc;
}
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
index e1c9d1c292fa..d817cce7e72d 100644
--- a/arch/s390/kernel/runtime_instr.c
+++ b/arch/s390/kernel/runtime_instr.c
@@ -40,8 +40,6 @@ static void disable_runtime_instr(void)
static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
{
cb->buf_limit = 0xfff;
- if (s390_user_mode == HOME_SPACE_MODE)
- cb->home_space = 1;
cb->int_requested = 1;
cb->pstate = 1;
cb->pstate_set_buf = 1;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index aeed8a61fa0d..ffe1c53264a7 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -64,12 +64,6 @@
#include <asm/sclp.h>
#include "entry.h"
-long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY |
- PSW_MASK_EA | PSW_MASK_BA;
-long psw_user_bits = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT |
- PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK |
- PSW_MASK_PSTATE | PSW_ASC_HOME;
-
/*
* User copy operations.
*/
@@ -300,43 +294,14 @@ static int __init parse_vmalloc(char *arg)
}
early_param("vmalloc", parse_vmalloc);
-unsigned int s390_user_mode = PRIMARY_SPACE_MODE;
-EXPORT_SYMBOL_GPL(s390_user_mode);
-
-static void __init set_user_mode_primary(void)
-{
- psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME;
- psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY;
-#ifdef CONFIG_COMPAT
- psw32_user_bits =
- (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY;
-#endif
- uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos_switch : uaccess_pt;
-}
-
static int __init early_parse_user_mode(char *p)
{
- if (p && strcmp(p, "primary") == 0)
- s390_user_mode = PRIMARY_SPACE_MODE;
- else if (!p || strcmp(p, "home") == 0)
- s390_user_mode = HOME_SPACE_MODE;
- else
- return 1;
- return 0;
+ if (!p || strcmp(p, "primary") == 0)
+ return 0;
+ return 1;
}
early_param("user_mode", early_parse_user_mode);
-static void __init setup_addressing_mode(void)
-{
- if (s390_user_mode != PRIMARY_SPACE_MODE)
- return;
- set_user_mode_primary();
- if (MACHINE_HAS_MVCOS)
- pr_info("Address spaces switched, mvcos available\n");
- else
- pr_info("Address spaces switched, mvcos not available\n");
-}
-
void *restart_stack __attribute__((__section__(".data")));
static void __init setup_lowcore(void)
@@ -348,24 +313,24 @@ static void __init setup_lowcore(void)
*/
BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096);
lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
- lc->restart_psw.mask = psw_kernel_bits;
+ lc->restart_psw.mask = PSW_KERNEL_BITS;
lc->restart_psw.addr =
PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
- lc->external_new_psw.mask = psw_kernel_bits |
+ lc->external_new_psw.mask = PSW_KERNEL_BITS |
PSW_MASK_DAT | PSW_MASK_MCHECK;
lc->external_new_psw.addr =
PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
- lc->svc_new_psw.mask = psw_kernel_bits |
+ lc->svc_new_psw.mask = PSW_KERNEL_BITS |
PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
- lc->program_new_psw.mask = psw_kernel_bits |
+ lc->program_new_psw.mask = PSW_KERNEL_BITS |
PSW_MASK_DAT | PSW_MASK_MCHECK;
lc->program_new_psw.addr =
PSW_ADDR_AMODE | (unsigned long) pgm_check_handler;
- lc->mcck_new_psw.mask = psw_kernel_bits;
+ lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
lc->mcck_new_psw.addr =
PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
- lc->io_new_psw.mask = psw_kernel_bits |
+ lc->io_new_psw.mask = PSW_KERNEL_BITS |
PSW_MASK_DAT | PSW_MASK_MCHECK;
lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
lc->clock_comparator = -1ULL;
@@ -1043,10 +1008,7 @@ void __init setup_arch(char **cmdline_p)
init_mm.end_data = (unsigned long) &_edata;
init_mm.brk = (unsigned long) &_end;
- if (MACHINE_HAS_MVCOS)
- memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
- else
- memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
+ uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos : uaccess_pt;
parse_early_param();
detect_memory_layout(memory_chunk, memory_end);
@@ -1054,7 +1016,6 @@ void __init setup_arch(char **cmdline_p)
setup_ipl();
reserve_oldmem();
setup_memory_end();
- setup_addressing_mode();
reserve_crashkernel();
setup_memory();
setup_resources();
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index c45becf82e01..fb535874a246 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -57,40 +57,48 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
/* Copy a 'clean' PSW mask to the user to avoid leaking
information about whether PER is currently on. */
- user_sregs.regs.psw.mask = psw_user_bits |
- (regs->psw.mask & PSW_MASK_USER);
+ user_sregs.regs.psw.mask = PSW_USER_BITS |
+ (regs->psw.mask & (PSW_MASK_USER | PSW_MASK_RI));
user_sregs.regs.psw.addr = regs->psw.addr;
memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
memcpy(&user_sregs.regs.acrs, current->thread.acrs,
- sizeof(sregs->regs.acrs));
+ sizeof(user_sregs.regs.acrs));
/*
* We have to store the fp registers to current->thread.fp_regs
* to merge them with the emulated registers.
*/
- save_fp_regs(&current->thread.fp_regs);
+ save_fp_ctl(&current->thread.fp_regs.fpc);
+ save_fp_regs(current->thread.fp_regs.fprs);
memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
- sizeof(s390_fp_regs));
- return __copy_to_user(sregs, &user_sregs, sizeof(_sigregs));
+ sizeof(user_sregs.fpregs));
+ if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs)))
+ return -EFAULT;
+ return 0;
}
-/* Returns positive number on error */
static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
{
- int err;
_sigregs user_sregs;
/* Alwys make any pending restarted system call return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
- err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs));
- if (err)
- return err;
- /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */
+ if (__copy_from_user(&user_sregs, sregs, sizeof(user_sregs)))
+ return -EFAULT;
+
+ if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW_MASK_RI))
+ return -EINVAL;
+
+ /* Loading the floating-point-control word can fail. Do that first. */
+ if (restore_fp_ctl(&user_sregs.fpregs.fpc))
+ return -EINVAL;
+
+ /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
- (user_sregs.regs.psw.mask & PSW_MASK_USER);
+ (user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI));
/* Check for invalid user address space control. */
- if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
- regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
+ if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
+ regs->psw.mask = PSW_ASC_PRIMARY |
(regs->psw.mask & ~PSW_MASK_ASC);
/* Check for invalid amode */
if (regs->psw.mask & PSW_MASK_EA)
@@ -98,14 +106,13 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
regs->psw.addr = user_sregs.regs.psw.addr;
memcpy(&regs->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs));
memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
- sizeof(sregs->regs.acrs));
+ sizeof(current->thread.acrs));
restore_access_regs(current->thread.acrs);
memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
- sizeof(s390_fp_regs));
- current->thread.fp_regs.fpc &= FPC_VALID_MASK;
+ sizeof(current->thread.fp_regs));
- restore_fp_regs(&current->thread.fp_regs);
+ restore_fp_regs(current->thread.fp_regs.fprs);
clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */
return 0;
}
@@ -224,7 +231,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
regs->gprs[15] = (unsigned long) frame;
/* Force default amode and default user address space control. */
regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
- (psw_user_bits & PSW_MASK_ASC) |
+ (PSW_USER_BITS & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
@@ -295,7 +302,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->gprs[15] = (unsigned long) frame;
/* Force default amode and default user address space control. */
regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
- (psw_user_bits & PSW_MASK_ASC) |
+ (PSW_USER_BITS & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 1a4313a1b60f..dc4a53465060 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -283,7 +283,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
unsigned long source_cpu = stap();
- __load_psw_mask(psw_kernel_bits);
+ __load_psw_mask(PSW_KERNEL_BITS);
if (pcpu->address == source_cpu)
func(data); /* should not return */
/* Stop target cpu (if func returns this stops the current cpu). */
@@ -395,7 +395,7 @@ void smp_send_stop(void)
int cpu;
/* Disable all interrupts/machine checks */
- __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
+ __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
trace_hardirqs_off();
debug_set_critical();
@@ -533,9 +533,6 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
-struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
-EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
-
static void __init smp_get_save_area(int cpu, u16 address)
{
void *lc = pcpu_devices[0].lowcore;
@@ -546,15 +543,9 @@ static void __init smp_get_save_area(int cpu, u16 address)
if (!OLDMEM_BASE && (address == boot_cpu_address ||
ipl_info.type != IPL_TYPE_FCP_DUMP))
return;
- if (cpu >= NR_CPUS) {
- pr_warning("CPU %i exceeds the maximum %i and is excluded "
- "from the dump\n", cpu, NR_CPUS - 1);
- return;
- }
- save_area = kmalloc(sizeof(struct save_area), GFP_KERNEL);
+ save_area = dump_save_area_create(cpu);
if (!save_area)
panic("could not allocate memory for save area\n");
- zfcpdump_save_areas[cpu] = save_area;
#ifdef CONFIG_CRASH_DUMP
if (address == boot_cpu_address) {
/* Copy the registers of the boot cpu. */
@@ -693,7 +684,7 @@ static void smp_start_secondary(void *cpuvoid)
S390_lowcore.restart_source = -1UL;
restore_access_regs(S390_lowcore.access_regs_save_area);
__ctl_load(S390_lowcore.cregs_save_area, 0, 15);
- __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
+ __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
cpu_init();
preempt_disable();
init_cpu_timer();
@@ -929,7 +920,7 @@ static ssize_t show_idle_count(struct device *dev,
idle_count = ACCESS_ONCE(idle->idle_count);
if (ACCESS_ONCE(idle->clock_idle_enter))
idle_count++;
- } while ((sequence & 1) || (idle->sequence != sequence));
+ } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
return sprintf(buf, "%llu\n", idle_count);
}
static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
@@ -947,7 +938,7 @@ static ssize_t show_idle_time(struct device *dev,
idle_time = ACCESS_ONCE(idle->idle_time);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
- } while ((sequence & 1) || (idle->sequence != sequence));
+ } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
return sprintf(buf, "%llu\n", idle_time >> 12);
}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 05d75c413137..a84476f2a9bb 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -84,8 +84,7 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
*/
static void vdso_init_data(struct vdso_data *vd)
{
- vd->ectg_available =
- s390_user_mode != HOME_SPACE_MODE && test_facility(31);
+ vd->ectg_available = test_facility(31);
}
#ifdef CONFIG_64BIT
@@ -102,7 +101,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
lowcore->vdso_per_cpu_data = __LC_PASTE;
- if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
+ if (!vdso_enabled)
return 0;
segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
@@ -147,7 +146,7 @@ void vdso_free_per_cpu(struct _lowcore *lowcore)
unsigned long segment_table, page_table, page_frame;
u32 *psal, *aste;
- if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
+ if (!vdso_enabled)
return;
psal = (u32 *)(addr_t) lowcore->paste[4];
@@ -165,7 +164,7 @@ static void vdso_init_cr5(void)
{
unsigned long cr5;
- if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
+ if (!vdso_enabled)
return;
cr5 = offsetof(struct _lowcore, paste);
__ctl_load(cr5, 5, 5);
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index abcfab55f99b..8c34363d6f1e 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -161,7 +161,7 @@ void __kprobes vtime_stop_cpu(void)
trace_hardirqs_on();
/* Wait for external, I/O or machine check interrupt. */
- psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT |
+ psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
idle->nohz_delay = 0;
@@ -191,7 +191,7 @@ cputime64_t s390_get_idle_time(int cpu)
sequence = ACCESS_ONCE(idle->sequence);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
- } while ((sequence & 1) || (idle->sequence != sequence));
+ } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
}
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 3a74d8af0d69..78d967f180f4 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -107,14 +107,13 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
{
- int ret, idx;
+ int ret;
/* No virtio-ccw notification? Get out quickly. */
if (!vcpu->kvm->arch.css_support ||
(vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
return -EOPNOTSUPP;
- idx = srcu_read_lock(&vcpu->kvm->srcu);
/*
* The layout is as follows:
* - gpr 2 contains the subchannel id (passed as addr)
@@ -125,7 +124,6 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
vcpu->run->s.regs.gprs[2],
8, &vcpu->run->s.regs.gprs[3],
vcpu->run->s.regs.gprs[4]);
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
/*
* Return cookie in gpr 2, but don't overwrite the register if the
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 99d789e8a018..374a439ccc60 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -18,20 +18,27 @@
#include <asm/uaccess.h>
#include "kvm-s390.h"
+/* Convert real to absolute address by applying the prefix of the CPU */
+static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
+ unsigned long gaddr)
+{
+ unsigned long prefix = vcpu->arch.sie_block->prefix;
+ if (gaddr < 2 * PAGE_SIZE)
+ gaddr += prefix;
+ else if (gaddr >= prefix && gaddr < prefix + 2 * PAGE_SIZE)
+ gaddr -= prefix;
+ return gaddr;
+}
+
static inline void __user *__gptr_to_uptr(struct kvm_vcpu *vcpu,
void __user *gptr,
int prefixing)
{
- unsigned long prefix = vcpu->arch.sie_block->prefix;
unsigned long gaddr = (unsigned long) gptr;
unsigned long uaddr;
- if (prefixing) {
- if (gaddr < 2 * PAGE_SIZE)
- gaddr += prefix;
- else if ((gaddr >= prefix) && (gaddr < prefix + 2 * PAGE_SIZE))
- gaddr -= prefix;
- }
+ if (prefixing)
+ gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
uaddr = gmap_fault(gaddr, vcpu->arch.gmap);
if (IS_ERR_VALUE(uaddr))
uaddr = -EFAULT;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 5ee56e5acc23..5ddbbde6f65c 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -62,12 +62,6 @@ static int handle_stop(struct kvm_vcpu *vcpu)
trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);
- if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) {
- vcpu->arch.local_int.action_bits &= ~ACTION_RELOADVCPU_ON_STOP;
- rc = SIE_INTERCEPT_RERUNVCPU;
- vcpu->run->exit_reason = KVM_EXIT_INTR;
- }
-
if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
atomic_set_mask(CPUSTAT_STOPPED,
&vcpu->arch.sie_block->cpuflags);
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 7f35cb33e510..5f79d2d79ca7 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -385,7 +385,7 @@ static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
}
if ((!rc) && (vcpu->arch.sie_block->ckc <
- get_tod_clock() + vcpu->arch.sie_block->epoch)) {
+ get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) {
if ((!psw_extint_disabled(vcpu)) &&
(vcpu->arch.sie_block->gcr[0] & 0x800ul))
rc = 1;
@@ -425,7 +425,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
goto no_timer;
}
- now = get_tod_clock() + vcpu->arch.sie_block->epoch;
+ now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
if (vcpu->arch.sie_block->ckc < now) {
__unset_cpu_idle(vcpu);
return 0;
@@ -436,6 +436,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
no_timer:
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
spin_lock(&vcpu->arch.local_int.float_int->lock);
spin_lock_bh(&vcpu->arch.local_int.lock);
add_wait_queue(&vcpu->wq, &wait);
@@ -455,6 +456,8 @@ no_timer:
remove_wait_queue(&vcpu->wq, &wait);
spin_unlock_bh(&vcpu->arch.local_int.lock);
spin_unlock(&vcpu->arch.local_int.float_int->lock);
+ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+
hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
return 0;
}
@@ -515,7 +518,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
}
if ((vcpu->arch.sie_block->ckc <
- get_tod_clock() + vcpu->arch.sie_block->epoch))
+ get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
__try_deliver_ckc_interrupt(vcpu);
if (atomic_read(&fi->active)) {
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 776dafe918db..569494e01ec6 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -343,10 +343,11 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
- save_fp_regs(&vcpu->arch.host_fpregs);
+ save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
+ save_fp_regs(vcpu->arch.host_fpregs.fprs);
save_access_regs(vcpu->arch.host_acrs);
- vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
- restore_fp_regs(&vcpu->arch.guest_fpregs);
+ restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+ restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
restore_access_regs(vcpu->run->s.regs.acrs);
gmap_enable(vcpu->arch.gmap);
atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
@@ -356,9 +357,11 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
gmap_disable(vcpu->arch.gmap);
- save_fp_regs(&vcpu->arch.guest_fpregs);
+ save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+ save_fp_regs(vcpu->arch.guest_fpregs.fprs);
save_access_regs(vcpu->run->s.regs.acrs);
- restore_fp_regs(&vcpu->arch.host_fpregs);
+ restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
+ restore_fp_regs(vcpu->arch.host_fpregs.fprs);
restore_access_regs(vcpu->arch.host_acrs);
}
@@ -618,9 +621,12 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
+ if (test_fp_ctl(fpu->fpc))
+ return -EINVAL;
memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
- vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
- restore_fp_regs(&vcpu->arch.guest_fpregs);
+ vcpu->arch.guest_fpregs.fpc = fpu->fpc;
+ restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+ restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
return 0;
}
@@ -689,9 +695,9 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
return 0;
}
-static int __vcpu_run(struct kvm_vcpu *vcpu)
+static int vcpu_pre_run(struct kvm_vcpu *vcpu)
{
- int rc;
+ int rc, cpuflags;
memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
@@ -709,28 +715,24 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
return rc;
vcpu->arch.sie_block->icptcode = 0;
- VCPU_EVENT(vcpu, 6, "entering sie flags %x",
- atomic_read(&vcpu->arch.sie_block->cpuflags));
- trace_kvm_s390_sie_enter(vcpu,
- atomic_read(&vcpu->arch.sie_block->cpuflags));
+ cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
+ VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
+ trace_kvm_s390_sie_enter(vcpu, cpuflags);
- /*
- * As PF_VCPU will be used in fault handler, between guest_enter
- * and guest_exit should be no uaccess.
- */
- preempt_disable();
- kvm_guest_enter();
- preempt_enable();
- rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
- kvm_guest_exit();
+ return 0;
+}
+
+static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
+{
+ int rc;
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
vcpu->arch.sie_block->icptcode);
trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
- if (rc > 0)
+ if (exit_reason >= 0) {
rc = 0;
- if (rc < 0) {
+ } else {
if (kvm_is_ucontrol(vcpu->kvm)) {
rc = SIE_INTERCEPT_UCONTROL;
} else {
@@ -741,6 +743,49 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
}
memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
+
+ if (rc == 0) {
+ if (kvm_is_ucontrol(vcpu->kvm))
+ rc = -EOPNOTSUPP;
+ else
+ rc = kvm_handle_sie_intercept(vcpu);
+ }
+
+ return rc;
+}
+
+static int __vcpu_run(struct kvm_vcpu *vcpu)
+{
+ int rc, exit_reason;
+
+ /*
+ * We try to hold kvm->srcu during most of vcpu_run (except when run-
+ * ning the guest), so that memslots (and other stuff) are protected
+ */
+ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+ do {
+ rc = vcpu_pre_run(vcpu);
+ if (rc)
+ break;
+
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+ /*
+ * As PF_VCPU will be used in fault handler, between
+ * guest_enter and guest_exit should be no uaccess.
+ */
+ preempt_disable();
+ kvm_guest_enter();
+ preempt_enable();
+ exit_reason = sie64a(vcpu->arch.sie_block,
+ vcpu->run->s.regs.gprs);
+ kvm_guest_exit();
+ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+ rc = vcpu_post_run(vcpu, exit_reason);
+ } while (!signal_pending(current) && !rc);
+
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
return rc;
}
@@ -749,7 +794,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
int rc;
sigset_t sigsaved;
-rerun_vcpu:
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
@@ -782,19 +826,7 @@ rerun_vcpu:
}
might_fault();
-
- do {
- rc = __vcpu_run(vcpu);
- if (rc)
- break;
- if (kvm_is_ucontrol(vcpu->kvm))
- rc = -EOPNOTSUPP;
- else
- rc = kvm_handle_sie_intercept(vcpu);
- } while (!signal_pending(current) && !rc);
-
- if (rc == SIE_INTERCEPT_RERUNVCPU)
- goto rerun_vcpu;
+ rc = __vcpu_run(vcpu);
if (signal_pending(current) && !rc) {
kvm_run->exit_reason = KVM_EXIT_INTR;
@@ -876,7 +908,8 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
* copying in vcpu load/put. Lets update our copies before we save
* it into the save area
*/
- save_fp_regs(&vcpu->arch.guest_fpregs);
+ save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+ save_fp_regs(vcpu->arch.guest_fpregs.fprs);
save_access_regs(vcpu->run->s.regs.acrs);
if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
@@ -951,6 +984,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
+ int idx;
long r;
switch (ioctl) {
@@ -964,7 +998,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
}
case KVM_S390_STORE_STATUS:
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvm_s390_vcpu_store_status(vcpu, arg);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
case KVM_S390_SET_INITIAL_PSW: {
psw_t psw;
@@ -1060,12 +1096,13 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
}
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+ unsigned long npages)
{
return 0;
}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index dc99f1ca4267..b44912a32949 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -28,8 +28,7 @@ typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
extern unsigned long *vfacilities;
/* negativ values are error codes, positive values for internal conditions */
-#define SIE_INTERCEPT_RERUNVCPU (1<<0)
-#define SIE_INTERCEPT_UCONTROL (1<<1)
+#define SIE_INTERCEPT_UCONTROL (1<<0)
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
@@ -91,8 +90,10 @@ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
{
- *r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20;
- *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
+ if (r1)
+ *r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20;
+ if (r2)
+ *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
}
static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 59200ee275e5..2440602e6df1 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -30,6 +30,38 @@
#include "kvm-s390.h"
#include "trace.h"
+/* Handle SCK (SET CLOCK) interception */
+static int handle_set_clock(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu *cpup;
+ s64 hostclk, val;
+ u64 op2;
+ int i;
+
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+ return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
+ op2 = kvm_s390_get_base_disp_s(vcpu);
+ if (op2 & 7) /* Operand must be on a doubleword boundary */
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ if (get_guest(vcpu, val, (u64 __user *) op2))
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+
+ if (store_tod_clock(&hostclk)) {
+ kvm_s390_set_psw_cc(vcpu, 3);
+ return 0;
+ }
+ val = (val - hostclk) & ~0x3fUL;
+
+ mutex_lock(&vcpu->kvm->lock);
+ kvm_for_each_vcpu(i, cpup, vcpu->kvm)
+ cpup->arch.sie_block->epoch = val;
+ mutex_unlock(&vcpu->kvm->lock);
+
+ kvm_s390_set_psw_cc(vcpu, 0);
+ return 0;
+}
+
static int handle_set_prefix(struct kvm_vcpu *vcpu)
{
u64 operand2;
@@ -128,6 +160,33 @@ static int handle_skey(struct kvm_vcpu *vcpu)
return 0;
}
+static int handle_test_block(struct kvm_vcpu *vcpu)
+{
+ unsigned long hva;
+ gpa_t addr;
+ int reg2;
+
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+ return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
+ kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
+ addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
+ addr = kvm_s390_real_to_abs(vcpu, addr);
+
+ hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
+ if (kvm_is_error_hva(hva))
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ /*
+ * We don't expect errors on modern systems, and do not care
+ * about storage keys (yet), so let's just clear the page.
+ */
+ if (clear_user((void __user *)hva, PAGE_SIZE) != 0)
+ return -EFAULT;
+ kvm_s390_set_psw_cc(vcpu, 0);
+ vcpu->run->s.regs.gprs[0] = 0;
+ return 0;
+}
+
static int handle_tpi(struct kvm_vcpu *vcpu)
{
struct kvm_s390_interrupt_info *inti;
@@ -438,12 +497,14 @@ out_exception:
static const intercept_handler_t b2_handlers[256] = {
[0x02] = handle_stidp,
+ [0x04] = handle_set_clock,
[0x10] = handle_set_prefix,
[0x11] = handle_store_prefix,
[0x12] = handle_store_cpu_address,
[0x29] = handle_skey,
[0x2a] = handle_skey,
[0x2b] = handle_skey,
+ [0x2c] = handle_test_block,
[0x30] = handle_io_inst,
[0x31] = handle_io_inst,
[0x32] = handle_io_inst,
diff --git a/arch/s390/kvm/trace.h b/arch/s390/kvm/trace.h
index c2f582bb1cb2..0c991c6748ab 100644
--- a/arch/s390/kvm/trace.h
+++ b/arch/s390/kvm/trace.h
@@ -4,6 +4,7 @@
#include <linux/tracepoint.h>
#include <asm/sigp.h>
#include <asm/debug.h>
+#include <asm/dis.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 20b0e97a7df2..b068729e50ac 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -2,7 +2,7 @@
# Makefile for s390-specific library files..
#
-lib-y += delay.o string.o uaccess_std.o uaccess_pt.o
+lib-y += delay.o string.o uaccess_pt.o find.o
obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
obj-$(CONFIG_64BIT) += mem64.o
lib-$(CONFIG_64BIT) += uaccess_mvcos.o
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 57c87d7d7ede..a9f3d0042d58 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -44,7 +44,7 @@ static void __udelay_disabled(unsigned long long usecs)
do {
set_clock_comparator(end);
vtime_stop_cpu();
- } while (get_tod_clock() < end);
+ } while (get_tod_clock_fast() < end);
lockdep_on();
__ctl_load(cr0, 0, 0);
__ctl_load(cr6, 6, 6);
@@ -55,7 +55,7 @@ static void __udelay_enabled(unsigned long long usecs)
{
u64 clock_saved, end;
- end = get_tod_clock() + (usecs << 12);
+ end = get_tod_clock_fast() + (usecs << 12);
do {
clock_saved = 0;
if (end < S390_lowcore.clock_comparator) {
@@ -65,7 +65,7 @@ static void __udelay_enabled(unsigned long long usecs)
vtime_stop_cpu();
if (clock_saved)
local_tick_enable(clock_saved);
- } while (get_tod_clock() < end);
+ } while (get_tod_clock_fast() < end);
}
/*
@@ -109,8 +109,8 @@ void udelay_simple(unsigned long long usecs)
{
u64 end;
- end = get_tod_clock() + (usecs << 12);
- while (get_tod_clock() < end)
+ end = get_tod_clock_fast() + (usecs << 12);
+ while (get_tod_clock_fast() < end)
cpu_relax();
}
@@ -120,10 +120,10 @@ void __ndelay(unsigned long long nsecs)
nsecs <<= 9;
do_div(nsecs, 125);
- end = get_tod_clock() + nsecs;
+ end = get_tod_clock_fast() + nsecs;
if (nsecs & ~0xfffUL)
__udelay(nsecs >> 12);
- while (get_tod_clock() < end)
+ while (get_tod_clock_fast() < end)
barrier();
}
EXPORT_SYMBOL(__ndelay);
diff --git a/arch/s390/lib/find.c b/arch/s390/lib/find.c
new file mode 100644
index 000000000000..620d34d6487e
--- /dev/null
+++ b/arch/s390/lib/find.c
@@ -0,0 +1,77 @@
+/*
+ * MSB0 numbered special bitops handling.
+ *
+ * On s390x the bits are numbered:
+ * |0..............63|64............127|128...........191|192...........255|
+ * and on s390:
+ * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
+ *
+ * The reason for this bit numbering is the fact that the hardware sets bits
+ * in a bitmap starting at bit 0 (MSB) and we don't want to scan the bitmap
+ * from the 'wrong end'.
+ */
+
+#include <linux/compiler.h>
+#include <linux/bitops.h>
+#include <linux/export.h>
+
+unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size)
+{
+ const unsigned long *p = addr;
+ unsigned long result = 0;
+ unsigned long tmp;
+
+ while (size & ~(BITS_PER_LONG - 1)) {
+ if ((tmp = *(p++)))
+ goto found;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = (*p) & (~0UL << (BITS_PER_LONG - size));
+ if (!tmp) /* Are any bits set? */
+ return result + size; /* Nope. */
+found:
+ return result + (__fls(tmp) ^ (BITS_PER_LONG - 1));
+}
+EXPORT_SYMBOL(find_first_bit_inv);
+
+unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ const unsigned long *p = addr + (offset / BITS_PER_LONG);
+ unsigned long result = offset & ~(BITS_PER_LONG - 1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= (~0UL >> offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+found_first:
+ tmp &= (~0UL << (BITS_PER_LONG - size));
+ if (!tmp) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + (__fls(tmp) ^ (BITS_PER_LONG - 1));
+}
+EXPORT_SYMBOL(find_next_bit_inv);
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index 1829742bf479..4b7993bf69b9 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -65,13 +65,6 @@ static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
return size;
}
-static size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x)
-{
- if (size <= 256)
- return copy_from_user_std(size, ptr, x);
- return copy_from_user_mvcos(size, ptr, x);
-}
-
static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
{
register unsigned long reg0 asm("0") = 0x810000UL;
@@ -101,14 +94,6 @@ static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
return size;
}
-static size_t copy_to_user_mvcos_check(size_t size, void __user *ptr,
- const void *x)
-{
- if (size <= 256)
- return copy_to_user_std(size, ptr, x);
- return copy_to_user_mvcos(size, ptr, x);
-}
-
static size_t copy_in_user_mvcos(size_t size, void __user *to,
const void __user *from)
{
@@ -201,23 +186,8 @@ static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
}
struct uaccess_ops uaccess_mvcos = {
- .copy_from_user = copy_from_user_mvcos_check,
- .copy_from_user_small = copy_from_user_std,
- .copy_to_user = copy_to_user_mvcos_check,
- .copy_to_user_small = copy_to_user_std,
- .copy_in_user = copy_in_user_mvcos,
- .clear_user = clear_user_mvcos,
- .strnlen_user = strnlen_user_std,
- .strncpy_from_user = strncpy_from_user_std,
- .futex_atomic_op = futex_atomic_op_std,
- .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
-};
-
-struct uaccess_ops uaccess_mvcos_switch = {
.copy_from_user = copy_from_user_mvcos,
- .copy_from_user_small = copy_from_user_mvcos,
.copy_to_user = copy_to_user_mvcos,
- .copy_to_user_small = copy_to_user_mvcos,
.copy_in_user = copy_in_user_mvcos,
.clear_user = clear_user_mvcos,
.strnlen_user = strnlen_user_mvcos,
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 1694d738b175..97e03caf7825 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -461,9 +461,7 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
struct uaccess_ops uaccess_pt = {
.copy_from_user = copy_from_user_pt,
- .copy_from_user_small = copy_from_user_pt,
.copy_to_user = copy_to_user_pt,
- .copy_to_user_small = copy_to_user_pt,
.copy_in_user = copy_in_user_pt,
.clear_user = clear_user_pt,
.strnlen_user = strnlen_user_pt,
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
deleted file mode 100644
index 4a75d475b06a..000000000000
--- a/arch/s390/lib/uaccess_std.c
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * Standard user space access functions based on mvcp/mvcs and doing
- * interesting things in the secondary space mode.
- *
- * Copyright IBM Corp. 2006
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- * Gerald Schaefer (gerald.schaefer@de.ibm.com)
- */
-
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/uaccess.h>
-#include <asm/futex.h>
-#include "uaccess.h"
-
-#ifndef CONFIG_64BIT
-#define AHI "ahi"
-#define ALR "alr"
-#define CLR "clr"
-#define LHI "lhi"
-#define SLR "slr"
-#else
-#define AHI "aghi"
-#define ALR "algr"
-#define CLR "clgr"
-#define LHI "lghi"
-#define SLR "slgr"
-#endif
-
-size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
-{
- unsigned long tmp1, tmp2;
-
- tmp1 = -256UL;
- asm volatile(
- "0: mvcp 0(%0,%2),0(%1),%3\n"
- "10:jz 8f\n"
- "1:"ALR" %0,%3\n"
- " la %1,256(%1)\n"
- " la %2,256(%2)\n"
- "2: mvcp 0(%0,%2),0(%1),%3\n"
- "11:jnz 1b\n"
- " j 8f\n"
- "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
- " "LHI" %3,-4096\n"
- " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
- " jnh 5f\n"
- "4: mvcp 0(%4,%2),0(%1),%3\n"
- "12:"SLR" %0,%4\n"
- " "ALR" %2,%4\n"
- "5:"LHI" %4,-1\n"
- " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
- " bras %3,7f\n" /* memset loop */
- " xc 0(1,%2),0(%2)\n"
- "6: xc 0(256,%2),0(%2)\n"
- " la %2,256(%2)\n"
- "7:"AHI" %4,-256\n"
- " jnm 6b\n"
- " ex %4,0(%3)\n"
- " j 9f\n"
- "8:"SLR" %0,%0\n"
- "9: \n"
- EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
- EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
- : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
- : : "cc", "memory");
- return size;
-}
-
-static size_t copy_from_user_std_check(size_t size, const void __user *ptr,
- void *x)
-{
- if (size <= 1024)
- return copy_from_user_std(size, ptr, x);
- return copy_from_user_pt(size, ptr, x);
-}
-
-size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
-{
- unsigned long tmp1, tmp2;
-
- tmp1 = -256UL;
- asm volatile(
- "0: mvcs 0(%0,%1),0(%2),%3\n"
- "7: jz 5f\n"
- "1:"ALR" %0,%3\n"
- " la %1,256(%1)\n"
- " la %2,256(%2)\n"
- "2: mvcs 0(%0,%1),0(%2),%3\n"
- "8: jnz 1b\n"
- " j 5f\n"
- "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
- " "LHI" %3,-4096\n"
- " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
- " jnh 6f\n"
- "4: mvcs 0(%4,%1),0(%2),%3\n"
- "9:"SLR" %0,%4\n"
- " j 6f\n"
- "5:"SLR" %0,%0\n"
- "6: \n"
- EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
- EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
- : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
- : : "cc", "memory");
- return size;
-}
-
-static size_t copy_to_user_std_check(size_t size, void __user *ptr,
- const void *x)
-{
- if (size <= 1024)
- return copy_to_user_std(size, ptr, x);
- return copy_to_user_pt(size, ptr, x);
-}
-
-static size_t copy_in_user_std(size_t size, void __user *to,
- const void __user *from)
-{
- unsigned long tmp1;
-
- asm volatile(
- " sacf 256\n"
- " "AHI" %0,-1\n"
- " jo 5f\n"
- " bras %3,3f\n"
- "0:"AHI" %0,257\n"
- "1: mvc 0(1,%1),0(%2)\n"
- " la %1,1(%1)\n"
- " la %2,1(%2)\n"
- " "AHI" %0,-1\n"
- " jnz 1b\n"
- " j 5f\n"
- "2: mvc 0(256,%1),0(%2)\n"
- " la %1,256(%1)\n"
- " la %2,256(%2)\n"
- "3:"AHI" %0,-256\n"
- " jnm 2b\n"
- "4: ex %0,1b-0b(%3)\n"
- "5: "SLR" %0,%0\n"
- "6: sacf 0\n"
- EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
- : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
- : : "cc", "memory");
- return size;
-}
-
-static size_t clear_user_std(size_t size, void __user *to)
-{
- unsigned long tmp1, tmp2;
-
- asm volatile(
- " sacf 256\n"
- " "AHI" %0,-1\n"
- " jo 5f\n"
- " bras %3,3f\n"
- " xc 0(1,%1),0(%1)\n"
- "0:"AHI" %0,257\n"
- " la %2,255(%1)\n" /* %2 = ptr + 255 */
- " srl %2,12\n"
- " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
- " "SLR" %2,%1\n"
- " "CLR" %0,%2\n" /* clear crosses next page boundary? */
- " jnh 5f\n"
- " "AHI" %2,-1\n"
- "1: ex %2,0(%3)\n"
- " "AHI" %2,1\n"
- " "SLR" %0,%2\n"
- " j 5f\n"
- "2: xc 0(256,%1),0(%1)\n"
- " la %1,256(%1)\n"
- "3:"AHI" %0,-256\n"
- " jnm 2b\n"
- "4: ex %0,0(%3)\n"
- "5: "SLR" %0,%0\n"
- "6: sacf 0\n"
- EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
- : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
- : : "cc", "memory");
- return size;
-}
-
-size_t strnlen_user_std(size_t size, const char __user *src)
-{
- register unsigned long reg0 asm("0") = 0UL;
- unsigned long tmp1, tmp2;
-
- if (unlikely(!size))
- return 0;
- asm volatile(
- " la %2,0(%1)\n"
- " la %3,0(%0,%1)\n"
- " "SLR" %0,%0\n"
- " sacf 256\n"
- "0: srst %3,%2\n"
- " jo 0b\n"
- " la %0,1(%3)\n" /* strnlen_user results includes \0 */
- " "SLR" %0,%1\n"
- "1: sacf 0\n"
- EX_TABLE(0b,1b)
- : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
- : "d" (reg0) : "cc", "memory");
- return size;
-}
-
-size_t strncpy_from_user_std(size_t count, const char __user *src, char *dst)
-{
- size_t done, len, offset, len_str;
-
- if (unlikely(!count))
- return 0;
- done = 0;
- do {
- offset = (size_t)src & ~PAGE_MASK;
- len = min(count - done, PAGE_SIZE - offset);
- if (copy_from_user_std(len, src, dst))
- return -EFAULT;
- len_str = strnlen(dst, len);
- done += len_str;
- src += len_str;
- dst += len_str;
- } while ((len_str == len) && (done < count));
- return done;
-}
-
-#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
- asm volatile( \
- " sacf 256\n" \
- "0: l %1,0(%6)\n" \
- "1:"insn \
- "2: cs %1,%2,0(%6)\n" \
- "3: jl 1b\n" \
- " lhi %0,0\n" \
- "4: sacf 0\n" \
- EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
- : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
- "=m" (*uaddr) \
- : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
- "m" (*uaddr) : "cc");
-
-int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old)
-{
- int oldval = 0, newval, ret;
-
- switch (op) {
- case FUTEX_OP_SET:
- __futex_atomic_op("lr %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- case FUTEX_OP_ADD:
- __futex_atomic_op("lr %2,%1\nar %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- case FUTEX_OP_OR:
- __futex_atomic_op("lr %2,%1\nor %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- case FUTEX_OP_ANDN:
- __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- case FUTEX_OP_XOR:
- __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- default:
- ret = -ENOSYS;
- }
- *old = oldval;
- return ret;
-}
-
-int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
-{
- int ret;
-
- asm volatile(
- " sacf 256\n"
- "0: cs %1,%4,0(%5)\n"
- "1: la %0,0\n"
- "2: sacf 0\n"
- EX_TABLE(0b,2b) EX_TABLE(1b,2b)
- : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
- : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
- : "cc", "memory" );
- *uval = oldval;
- return ret;
-}
-
-struct uaccess_ops uaccess_std = {
- .copy_from_user = copy_from_user_std_check,
- .copy_from_user_small = copy_from_user_std,
- .copy_to_user = copy_to_user_std_check,
- .copy_to_user_small = copy_to_user_std,
- .copy_in_user = copy_in_user_std,
- .clear_user = clear_user_std,
- .strnlen_user = strnlen_user_std,
- .strncpy_from_user = strncpy_from_user_std,
- .futex_atomic_op = futex_atomic_op_std,
- .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
-};
diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c
index 58bff541fde9..a6ba0d724335 100644
--- a/arch/s390/math-emu/math.c
+++ b/arch/s390/math-emu/math.c
@@ -19,6 +19,8 @@
#include <math-emu/double.h>
#include <math-emu/quad.h>
+#define FPC_VALID_MASK 0xF8F8FF03
+
/*
* I miss a macro to round a floating point number to the
* nearest integer in the same floating point format.
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 9d84a1feefef..79ddd580d605 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -253,12 +253,12 @@ static int cmm_skip_blanks(char *cp, char **endp)
static struct ctl_table cmm_table[];
-static int cmm_pages_handler(ctl_table *ctl, int write, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+static int cmm_pages_handler(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
char buf[16], *p;
+ unsigned int len;
long nr;
- int len;
if (!*lenp || (*ppos && !write)) {
*lenp = 0;
@@ -293,12 +293,12 @@ static int cmm_pages_handler(ctl_table *ctl, int write, void __user *buffer,
return 0;
}
-static int cmm_timeout_handler(ctl_table *ctl, int write, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+static int cmm_timeout_handler(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
char buf[64], *p;
long nr, seconds;
- int len;
+ unsigned int len;
if (!*lenp || (*ppos && !write)) {
*lenp = 0;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index fc6679210d83..d95265b2719f 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -115,13 +115,8 @@ static inline int user_space_fault(unsigned long trans_exc_code)
if (trans_exc_code == 2)
/* Access via secondary space, set_fs setting decides */
return current->thread.mm_segment.ar4;
- if (s390_user_mode == HOME_SPACE_MODE)
- /* User space if the access has been done via home space. */
- return trans_exc_code == 3;
/*
- * If the user space is not the home space the kernel runs in home
- * space. Access via secondary space has already been covered,
- * access via primary space or access register is from user space
+ * Access via primary space or access register is from user space
* and access via home space is from the kernel.
*/
return trans_exc_code != 3;
@@ -428,50 +423,13 @@ void __kprobes do_dat_exception(struct pt_regs *regs)
do_fault_error(regs, fault);
}
-#ifdef CONFIG_64BIT
-void __kprobes do_asce_exception(struct pt_regs *regs)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long trans_exc_code;
-
- /*
- * The instruction that caused the program check has
- * been nullified. Don't signal single step via SIGTRAP.
- */
- clear_tsk_thread_flag(current, TIF_PER_TRAP);
-
- trans_exc_code = regs->int_parm_long;
- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
- goto no_context;
-
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
- up_read(&mm->mmap_sem);
-
- if (vma) {
- update_mm(mm, current);
- return;
- }
-
- /* User mode accesses just cause a SIGSEGV */
- if (user_mode(regs)) {
- do_sigsegv(regs, SEGV_MAPERR);
- return;
- }
-
-no_context:
- do_no_context(regs);
-}
-#endif
-
int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
{
struct pt_regs regs;
int access, fault;
/* Emulate a uaccess fault from kernel mode. */
- regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK;
+ regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK;
if (!irqs_disabled())
regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
regs.psw.addr = (unsigned long) __builtin_return_address(0);
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 5d758db27bdc..639fce464008 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -180,9 +180,15 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
- if ((end < start) || (end > TASK_SIZE))
+ if ((end <= start) || (end > TASK_SIZE))
return 0;
-
+ /*
+ * local_irq_save() doesn't prevent pagetable teardown, but does
+ * prevent the pagetables from being freed on s390.
+ *
+ * So long as we atomically load page table pointers versus teardown,
+ * we can follow the address down to the the page and take a ref on it.
+ */
local_irq_save(flags);
pgdp = pgd_offset(mm, addr);
do {
@@ -219,63 +225,22 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next;
- pgd_t *pgdp, pgd;
- int nr = 0;
+ int nr, ret;
start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
- if ((end < start) || (end > TASK_SIZE))
- goto slow_irqon;
-
- /*
- * local_irq_disable() doesn't prevent pagetable teardown, but does
- * prevent the pagetables from being freed on s390.
- *
- * So long as we atomically load page table pointers versus teardown,
- * we can follow the address down to the the page and take a ref on it.
- */
- local_irq_disable();
- pgdp = pgd_offset(mm, addr);
- do {
- pgd = *pgdp;
- barrier();
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- goto slow;
- if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
- goto slow;
- } while (pgdp++, addr = next, addr != end);
- local_irq_enable();
-
- VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
- return nr;
-
- {
- int ret;
-slow:
- local_irq_enable();
-slow_irqon:
- /* Try to get the remaining pages with get_user_pages */
- start += nr << PAGE_SHIFT;
- pages += nr;
-
- down_read(&mm->mmap_sem);
- ret = get_user_pages(current, mm, start,
- (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
- up_read(&mm->mmap_sem);
-
- /* Have to be a bit careful with return values */
- if (nr > 0) {
- if (ret < 0)
- ret = nr;
- else
- ret += nr;
- }
-
- return ret;
- }
+ nr = __get_user_pages_fast(start, nr_pages, write, pages);
+ if (nr == nr_pages)
+ return nr;
+
+ /* Try to get the remaining pages with get_user_pages */
+ start += nr << PAGE_SHIFT;
+ pages += nr;
+ down_read(&mm->mmap_sem);
+ ret = get_user_pages(current, mm, start,
+ nr_pages - nr, write, 0, pages, NULL);
+ up_read(&mm->mmap_sem);
+ /* Have to be a bit careful with return values */
+ if (nr > 0)
+ ret = (ret < 0) ? nr : ret + nr;
+ return ret;
}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 40023290ee5b..6bcb045d2bd2 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -101,18 +101,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
{
- int rc;
-
if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
return 0;
if (!(flags & MAP_FIXED))
addr = 0;
- if ((addr + len) >= TASK_SIZE) {
- rc = crst_table_upgrade(current->mm, 1UL << 53);
- if (rc)
- return rc;
- update_mm(current->mm, current);
- }
+ if ((addr + len) >= TASK_SIZE)
+ return crst_table_upgrade(current->mm, 1UL << 53);
return 0;
}
@@ -132,7 +126,6 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
rc = crst_table_upgrade(mm, 1UL << 53);
if (rc)
return (unsigned long) rc;
- update_mm(mm, current);
area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
}
return area;
@@ -155,7 +148,6 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
rc = crst_table_upgrade(mm, 1UL << 53);
if (rc)
return (unsigned long) rc;
- update_mm(mm, current);
area = arch_get_unmapped_area_topdown(filp, addr, len,
pgoff, flags);
}
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 990397420e6b..8400f494623f 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -9,6 +9,7 @@
#include <asm/pgtable.h>
#include <asm/page.h>
+#if PAGE_DEFAULT_KEY
static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
{
asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
@@ -16,7 +17,7 @@ static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
return addr;
}
-void storage_key_init_range(unsigned long start, unsigned long end)
+void __storage_key_init_range(unsigned long start, unsigned long end)
{
unsigned long boundary, size;
@@ -36,6 +37,7 @@ void storage_key_init_range(unsigned long start, unsigned long end)
start += PAGE_SIZE;
}
}
+#endif
static pte_t *walk_page_table(unsigned long addr)
{
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index de8cbc30dcd1..0a2e5e086749 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -48,12 +48,23 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
}
#ifdef CONFIG_64BIT
+static void __crst_table_upgrade(void *arg)
+{
+ struct mm_struct *mm = arg;
+
+ if (current->active_mm == mm)
+ update_mm(mm, current);
+ __tlb_flush_local();
+}
+
int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
{
unsigned long *table, *pgd;
unsigned long entry;
+ int flush;
BUG_ON(limit > (1UL << 53));
+ flush = 0;
repeat:
table = crst_table_alloc(mm);
if (!table)
@@ -79,12 +90,15 @@ repeat:
mm->pgd = (pgd_t *) table;
mm->task_size = mm->context.asce_limit;
table = NULL;
+ flush = 1;
}
spin_unlock_bh(&mm->page_table_lock);
if (table)
crst_table_free(mm, table);
if (mm->context.asce_limit < limit)
goto repeat;
+ if (flush)
+ on_each_cpu(__crst_table_upgrade, mm, 0);
return 0;
}
@@ -92,6 +106,8 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
{
pgd_t *pgd;
+ if (current->active_mm == mm)
+ __tlb_flush_mm(mm);
while (mm->context.asce_limit > limit) {
pgd = mm->pgd;
switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
@@ -114,6 +130,8 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
mm->task_size = mm->context.asce_limit;
crst_table_free(mm, (unsigned long *) pgd);
}
+ if (current->active_mm == mm)
+ update_mm(mm, current);
}
#endif
@@ -1087,10 +1105,9 @@ again:
continue;
/* Allocate new page table with pgstes */
new = page_table_alloc_pgste(mm, addr);
- if (!new) {
- mm->context.has_pgste = 0;
- continue;
- }
+ if (!new)
+ return -ENOMEM;
+
spin_lock(&mm->page_table_lock);
if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
/* Nuke pmd entry pointing to the "short" page table */
@@ -1128,13 +1145,15 @@ static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,
if (pud_none_or_clear_bad(pud))
continue;
next = page_table_realloc_pmd(tlb, mm, pud, addr, next);
+ if (unlikely(IS_ERR_VALUE(next)))
+ return next;
} while (pud++, addr = next, addr != end);
return addr;
}
-static void page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long addr, unsigned long end)
+static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long addr, unsigned long end)
{
unsigned long next;
pgd_t *pgd;
@@ -1145,7 +1164,11 @@ static void page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
if (pgd_none_or_clear_bad(pgd))
continue;
next = page_table_realloc_pud(tlb, mm, pgd, addr, next);
+ if (unlikely(IS_ERR_VALUE(next)))
+ return next;
} while (pgd++, addr = next, addr != end);
+
+ return 0;
}
/*
@@ -1157,10 +1180,6 @@ int s390_enable_sie(void)
struct mm_struct *mm = tsk->mm;
struct mmu_gather tlb;
- /* Do we have switched amode? If no, we cannot do sie */
- if (s390_user_mode == HOME_SPACE_MODE)
- return -EINVAL;
-
/* Do we have pgstes? if yes, we are done */
if (mm_has_pgste(tsk->mm))
return 0;
@@ -1169,9 +1188,9 @@ int s390_enable_sie(void)
/* split thp mappings and disable thp for future mappings */
thp_split_mm(mm);
/* Reallocate the page tables with pgstes */
- mm->context.has_pgste = 1;
tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE);
- page_table_realloc(&tlb, mm, 0, TASK_SIZE);
+ if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE))
+ mm->context.has_pgste = 1;
tlb_finish_mmu(&tlb, 0, TASK_SIZE);
up_write(&mm->mmap_sem);
return mm->context.has_pgste ? 0 : -ENOMEM;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 709239285869..16871da37371 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -12,8 +12,8 @@
#include <linux/random.h>
#include <linux/init.h>
#include <asm/cacheflush.h>
-#include <asm/processor.h>
#include <asm/facility.h>
+#include <asm/dis.h>
/*
* Conventions:
@@ -156,8 +156,8 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
EMIT6(0xeb8ff058, 0x0024);
/* lgr %r14,%r15 */
EMIT4(0xb90400ef);
- /* ahi %r15,<offset> */
- EMIT4_IMM(0xa7fa0000, (jit->seen & SEEN_MEM) ? -112 : -80);
+ /* aghi %r15,<offset> */
+ EMIT4_IMM(0xa7fb0000, (jit->seen & SEEN_MEM) ? -112 : -80);
/* stg %r14,152(%r15) */
EMIT6(0xe3e0f098, 0x0024);
} else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
@@ -881,7 +881,9 @@ void bpf_jit_free(struct sk_filter *fp)
struct bpf_binary_header *header = (void *)addr;
if (fp->bpf_func == sk_run_filter)
- return;
+ goto free_filter;
set_memory_rw(addr, header->pages);
module_free(NULL, header);
+free_filter:
+ kfree(fp);
}
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index f17a8343e360..0c9a17780e4b 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -120,26 +120,17 @@ EXPORT_SYMBOL_GPL(pci_proc_domain);
static int zpci_set_airq(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
- struct zpci_fib *fib;
- int rc;
-
- fib = (void *) get_zeroed_page(GFP_KERNEL);
- if (!fib)
- return -ENOMEM;
+ struct zpci_fib fib = {0};
- fib->isc = PCI_ISC;
- fib->sum = 1; /* enable summary notifications */
- fib->noi = airq_iv_end(zdev->aibv);
- fib->aibv = (unsigned long) zdev->aibv->vector;
- fib->aibvo = 0; /* each zdev has its own interrupt vector */
- fib->aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
- fib->aisbo = zdev->aisb & 63;
+ fib.isc = PCI_ISC;
+ fib.sum = 1; /* enable summary notifications */
+ fib.noi = airq_iv_end(zdev->aibv);
+ fib.aibv = (unsigned long) zdev->aibv->vector;
+ fib.aibvo = 0; /* each zdev has its own interrupt vector */
+ fib.aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
+ fib.aisbo = zdev->aisb & 63;
- rc = zpci_mod_fc(req, fib);
- pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
-
- free_page((unsigned long) fib);
- return rc;
+ return zpci_mod_fc(req, &fib);
}
struct mod_pci_args {
@@ -152,22 +143,14 @@ struct mod_pci_args {
static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn);
- struct zpci_fib *fib;
- int rc;
-
- /* The FIB must be available even if it's not used */
- fib = (void *) get_zeroed_page(GFP_KERNEL);
- if (!fib)
- return -ENOMEM;
+ struct zpci_fib fib = {0};
- fib->pba = args->base;
- fib->pal = args->limit;
- fib->iota = args->iota;
- fib->fmb_addr = args->fmb_addr;
+ fib.pba = args->base;
+ fib.pal = args->limit;
+ fib.iota = args->iota;
+ fib.fmb_addr = args->fmb_addr;
- rc = zpci_mod_fc(req, fib);
- free_page((unsigned long) fib);
- return rc;
+ return zpci_mod_fc(req, &fib);
}
/* Modify PCI: Register I/O address translation parameters */
@@ -424,7 +407,6 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
struct msi_msg msg;
int rc;
- pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
return -EINVAL;
msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX);
@@ -489,7 +471,6 @@ out_msi:
out_si:
airq_iv_free_bit(zpci_aisb_iv, aisb);
out:
- dev_err(&pdev->dev, "register MSI failed with: %d\n", rc);
return rc;
}
@@ -499,14 +480,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
struct msi_desc *msi;
int rc;
- pr_info("%s: on pdev: %p\n", __func__, pdev);
-
/* Disable adapter interrupts */
rc = zpci_clear_airq(zdev);
- if (rc) {
- dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc);
+ if (rc)
return;
- }
/* Release MSI interrupts */
list_for_each_entry(msi, &pdev->msi_list, list) {
@@ -625,8 +602,11 @@ static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned lo
r->name = name;
rc = request_resource(&iomem_resource, r);
- if (rc)
- pr_debug("request resource %pR failed\n", r);
+ if (rc) {
+ kfree(r->name);
+ kfree(r);
+ return ERR_PTR(-ENOMEM);
+ }
return r;
}
@@ -708,6 +688,47 @@ void pcibios_disable_device(struct pci_dev *pdev)
zdev->pdev = NULL;
}
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+static int zpci_restore(struct device *dev)
+{
+ struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+ int ret = 0;
+
+ if (zdev->state != ZPCI_FN_STATE_ONLINE)
+ goto out;
+
+ ret = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
+ if (ret)
+ goto out;
+
+ zpci_map_resources(zdev);
+ zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
+ zdev->start_dma + zdev->iommu_size - 1,
+ (u64) zdev->dma_table);
+
+out:
+ return ret;
+}
+
+static int zpci_freeze(struct device *dev)
+{
+ struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+
+ if (zdev->state != ZPCI_FN_STATE_ONLINE)
+ return 0;
+
+ zpci_unregister_ioat(zdev, 0);
+ return clp_disable_fh(zdev);
+}
+
+struct dev_pm_ops pcibios_pm_ops = {
+ .thaw_noirq = zpci_restore,
+ .freeze_noirq = zpci_freeze,
+ .restore_noirq = zpci_restore,
+ .poweroff_noirq = zpci_freeze,
+};
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
+
static int zpci_scan_bus(struct zpci_dev *zdev)
{
struct resource *res;
@@ -781,7 +802,6 @@ int zpci_enable_device(struct zpci_dev *zdev)
rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
if (rc)
goto out;
- pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev->fh, zdev->fid);
rc = zpci_dma_init_device(zdev);
if (rc)
@@ -901,10 +921,6 @@ static int __init pci_base_init(void)
|| !test_facility(71) || !test_facility(72))
return 0;
- pr_info("Probing PCI hardware: PCI:%d SID:%d AEN:%d\n",
- test_facility(69), test_facility(70),
- test_facility(71));
-
rc = zpci_debug_init();
if (rc)
goto out;
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 475563c3d1e4..84147984224a 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -16,6 +16,16 @@
#include <asm/pci_debug.h>
#include <asm/pci_clp.h>
+static inline void zpci_err_clp(unsigned int rsp, int rc)
+{
+ struct {
+ unsigned int rsp;
+ int rc;
+ } __packed data = {rsp, rc};
+
+ zpci_err_hex(&data, sizeof(data));
+}
+
/*
* Call Logical Processor
* Retry logic is handled by the caller.
@@ -54,7 +64,6 @@ static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
zdev->msi_addr = response->msia;
zdev->fmb_update = response->mui;
- pr_debug("Supported number of MSI vectors: %u\n", response->noi);
switch (response->version) {
case 1:
zdev->max_bus_speed = PCIE_SPEED_5_0GT;
@@ -84,8 +93,8 @@ static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
clp_store_query_pci_fngrp(zdev, &rrb->response);
else {
- pr_err("Query PCI FNGRP failed with response: %x cc: %d\n",
- rrb->response.hdr.rsp, rc);
+ zpci_err("Q PCI FGRP:\n");
+ zpci_err_clp(rrb->response.hdr.rsp, rc);
rc = -EIO;
}
clp_free_block(rrb);
@@ -131,8 +140,8 @@ static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
if (rrb->response.pfgid)
rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
} else {
- pr_err("Query PCI failed with response: %x cc: %d\n",
- rrb->response.hdr.rsp, rc);
+ zpci_err("Q PCI FN:\n");
+ zpci_err_clp(rrb->response.hdr.rsp, rc);
rc = -EIO;
}
out:
@@ -206,8 +215,8 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
*fh = rrb->response.fh;
else {
- zpci_dbg(0, "SPF fh:%x, cc:%d, resp:%x\n", *fh, rc,
- rrb->response.hdr.rsp);
+ zpci_err("Set PCI FN:\n");
+ zpci_err_clp(rrb->response.hdr.rsp, rc);
rc = -EIO;
}
clp_free_block(rrb);
@@ -262,8 +271,8 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
/* Get PCI function handle list */
rc = clp_instr(rrb);
if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
- pr_err("List PCI failed with response: 0x%x cc: %d\n",
- rrb->response.hdr.rsp, rc);
+ zpci_err("List PCI FN:\n");
+ zpci_err_clp(rrb->response.hdr.rsp, rc);
rc = -EIO;
goto out;
}
@@ -273,17 +282,11 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
rrb->response.entry_size;
- pr_info("Detected number of PCI functions: %u\n", entries);
- /* Store the returned resume token as input for the next call */
resume_token = rrb->response.resume_token;
-
for (i = 0; i < entries; i++)
cb(&rrb->response.fh_list[i]);
} while (resume_token);
-
- pr_debug("Maximum number of supported PCI functions: %u\n",
- rrb->response.max_fn);
out:
return rc;
}
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 7e5573acb063..9b83d080902d 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -145,10 +145,8 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
return -EINVAL;
spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
- if (!zdev->dma_table) {
- dev_err(&zdev->pdev->dev, "Missing DMA table\n");
+ if (!zdev->dma_table)
goto no_refresh;
- }
for (i = 0; i < nr_pages; i++) {
dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
@@ -280,11 +278,8 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
size = nr_pages * PAGE_SIZE;
dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
- if (dma_addr + size > zdev->end_dma) {
- dev_err(dev, "(dma_addr: 0x%16.16LX + size: 0x%16.16lx) > end_dma: 0x%16.16Lx\n",
- dma_addr, size, zdev->end_dma);
+ if (dma_addr + size > zdev->end_dma)
goto out_free;
- }
if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
flags |= ZPCI_TABLE_PROTECTED;
@@ -297,7 +292,8 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
out_free:
dma_free_iommu(zdev, iommu_page_index, nr_pages);
out_err:
- dev_err(dev, "Failed to map addr: %lx\n", pa);
+ zpci_err("map error:\n");
+ zpci_err_hex(&pa, sizeof(pa));
return DMA_ERROR_CODE;
}
@@ -312,8 +308,10 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr = dma_addr & PAGE_MASK;
if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
- ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID))
- dev_err(dev, "Failed to unmap addr: %Lx\n", dma_addr);
+ ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) {
+ zpci_err("unmap error:\n");
+ zpci_err_hex(&dma_addr, sizeof(dma_addr));
+ }
atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages);
iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 0aecaf954845..278e671ec9ac 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <asm/pci_debug.h>
/* Content Code Description for PCI Function Error */
struct zpci_ccdf_err {
@@ -41,25 +42,15 @@ struct zpci_ccdf_avail {
u16 pec; /* PCI event code */
} __packed;
-static void zpci_event_log_err(struct zpci_ccdf_err *ccdf)
-{
- struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
-
- zpci_err("SEI error CCD:\n");
- zpci_err_hex(ccdf, sizeof(*ccdf));
- dev_err(&zdev->pdev->dev, "event code: 0x%x\n", ccdf->pec);
-}
-
static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf)
{
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+ struct pci_dev *pdev = zdev ? zdev->pdev : NULL;
- pr_err("%s%s: availability event: fh: 0x%x fid: 0x%x event code: 0x%x reason:",
- (zdev) ? dev_driver_string(&zdev->pdev->dev) : "?",
- (zdev) ? dev_name(&zdev->pdev->dev) : "?",
- ccdf->fh, ccdf->fid, ccdf->pec);
- print_hex_dump(KERN_CONT, "ccdf", DUMP_PREFIX_OFFSET,
- 16, 1, ccdf, sizeof(*ccdf), false);
+ pr_info("%s: Event 0x%x reconfigured PCI function 0x%x\n",
+ pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
+ zpci_err("avail CCDF:\n");
+ zpci_err_hex(ccdf, sizeof(*ccdf));
switch (ccdf->pec) {
case 0x0301:
@@ -79,14 +70,16 @@ static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf)
void zpci_event_error(void *data)
{
struct zpci_ccdf_err *ccdf = data;
- struct zpci_dev *zdev;
+ struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+
+ zpci_err("error CCDF:\n");
+ zpci_err_hex(ccdf, sizeof(*ccdf));
- zpci_event_log_err(ccdf);
- zdev = get_zdev_by_fid(ccdf->fid);
- if (!zdev) {
- pr_err("Error event for unknown fid: %x", ccdf->fid);
+ if (!zdev)
return;
- }
+
+ pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n",
+ pci_name(zdev->pdev), ccdf->pec, ccdf->fid);
}
void zpci_event_availability(void *data)
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
index e1c7bb999b06..f3414ade77a3 100644
--- a/arch/score/include/asm/Kbuild
+++ b/arch/score/include/asm/Kbuild
@@ -4,3 +4,4 @@ header-y +=
generic-y += clkdev.h
generic-y += trace_clock.h
generic-y += xor.h
+generic-y += preempt.h
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 224f4bc9925e..f56d7f8b6f64 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -1,5 +1,6 @@
config SUPERH
def_bool y
+ select ARCH_MIGHT_HAVE_PC_PARPORT
select EXPERT
select CLKDEV_LOOKUP
select HAVE_IDE if HAS_IOPORT
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 1fa8be409771..122f737a901f 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -15,6 +15,7 @@
#include <linux/mmc/sh_mmcif.h>
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mtd/physmap.h>
+#include <linux/mfd/tmio.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 280bea9e5e2b..231efbb68108 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -34,3 +34,4 @@ generic-y += termios.h
generic-y += trace_clock.h
generic-y += ucontext.h
generic-y += xor.h
+generic-y += preempt.h
diff --git a/arch/sh/include/asm/hw_breakpoint.h b/arch/sh/include/asm/hw_breakpoint.h
index ec9ad593c3da..01a38696137e 100644
--- a/arch/sh/include/asm/hw_breakpoint.h
+++ b/arch/sh/include/asm/hw_breakpoint.h
@@ -7,6 +7,7 @@
#include <linux/kdebug.h>
#include <linux/types.h>
+#include <cpu/ubc.h>
struct arch_hw_breakpoint {
char *name; /* Contains name of the symbol to set bkpt */
@@ -15,17 +16,6 @@ struct arch_hw_breakpoint {
u16 type;
};
-enum {
- SH_BREAKPOINT_READ = (1 << 1),
- SH_BREAKPOINT_WRITE = (1 << 2),
- SH_BREAKPOINT_RW = SH_BREAKPOINT_READ | SH_BREAKPOINT_WRITE,
-
- SH_BREAKPOINT_LEN_1 = (1 << 12),
- SH_BREAKPOINT_LEN_2 = (1 << 13),
- SH_BREAKPOINT_LEN_4 = SH_BREAKPOINT_LEN_1 | SH_BREAKPOINT_LEN_2,
- SH_BREAKPOINT_LEN_8 = (1 << 14),
-};
-
struct sh_ubc {
const char *name;
unsigned int num_events;
diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h
index 21c5088788da..b9d9489a5012 100644
--- a/arch/sh/include/asm/mmu_context.h
+++ b/arch/sh/include/asm/mmu_context.h
@@ -81,7 +81,7 @@ static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
/*
* Fix version; Note that we avoid version #0
- * to distingush NO_CONTEXT.
+ * to distinguish NO_CONTEXT.
*/
if (!asid)
asid = MMU_CONTEXT_FIRST_VERSION;
diff --git a/arch/sh/include/cpu-common/cpu/ubc.h b/arch/sh/include/cpu-common/cpu/ubc.h
new file mode 100644
index 000000000000..b60461930a32
--- /dev/null
+++ b/arch/sh/include/cpu-common/cpu/ubc.h
@@ -0,0 +1,17 @@
+#ifndef __ARCH_SH_CPU_UBC_H__
+#define __ARCH_SH_CPU_UBC_H__
+
+enum {
+ SH_BREAKPOINT_READ = (1 << 1),
+ SH_BREAKPOINT_WRITE = (1 << 2),
+ SH_BREAKPOINT_RW = SH_BREAKPOINT_READ | SH_BREAKPOINT_WRITE,
+
+ SH_BREAKPOINT_LEN_1 = (1 << 12),
+ SH_BREAKPOINT_LEN_2 = (1 << 13),
+ SH_BREAKPOINT_LEN_4 = SH_BREAKPOINT_LEN_1 | SH_BREAKPOINT_LEN_2,
+ SH_BREAKPOINT_LEN_8 = (1 << 14),
+};
+
+#define UBC_64BIT 1
+
+#endif /* __ARCH_SH_CPU_UBC_H__ */
diff --git a/arch/sh/include/cpu-sh2a/cpu/ubc.h b/arch/sh/include/cpu-sh2a/cpu/ubc.h
new file mode 100644
index 000000000000..3371f9042184
--- /dev/null
+++ b/arch/sh/include/cpu-sh2a/cpu/ubc.h
@@ -0,0 +1,14 @@
+#ifndef __ARCH_SH_CPU_UBC_H__
+#define __ARCH_SH_CPU_UBC_H__
+
+enum {
+ SH_BREAKPOINT_READ = (1 << 2),
+ SH_BREAKPOINT_WRITE = (1 << 3),
+ SH_BREAKPOINT_RW = SH_BREAKPOINT_READ | SH_BREAKPOINT_WRITE,
+
+ SH_BREAKPOINT_LEN_1 = (1 << 0),
+ SH_BREAKPOINT_LEN_2 = (1 << 1),
+ SH_BREAKPOINT_LEN_4 = SH_BREAKPOINT_LEN_1 | SH_BREAKPOINT_LEN_2,
+};
+
+#endif /* __ARCH_SH_CPU_UBC_H__ */
diff --git a/arch/sh/kernel/cpu/sh2a/Makefile b/arch/sh/kernel/cpu/sh2a/Makefile
index 990195d98456..92f0da4c86a7 100644
--- a/arch/sh/kernel/cpu/sh2a/Makefile
+++ b/arch/sh/kernel/cpu/sh2a/Makefile
@@ -22,3 +22,4 @@ pinmux-$(CONFIG_CPU_SUBTYPE_SH7264) := pinmux-sh7264.o
pinmux-$(CONFIG_CPU_SUBTYPE_SH7269) := pinmux-sh7269.o
obj-$(CONFIG_GPIOLIB) += $(pinmux-y)
+obj-$(CONFIG_HAVE_HW_BREAKPOINT) += ubc.o
diff --git a/arch/sh/kernel/cpu/sh2a/ubc.c b/arch/sh/kernel/cpu/sh2a/ubc.c
new file mode 100644
index 000000000000..ef95a9b483e4
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/ubc.c
@@ -0,0 +1,154 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/ubc.c
+ *
+ * On-chip UBC support for SH-2A CPUs.
+ *
+ * Copyright (C) 2009 - 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <asm/hw_breakpoint.h>
+
+#define UBC_BAR(idx) (0xfffc0400 + (0x10 * idx))
+#define UBC_BAMR(idx) (0xfffc0404 + (0x10 * idx))
+#define UBC_BBR(idx) (0xfffc04A0 + (0x10 * idx))
+#define UBC_BDR(idx) (0xfffc0408 + (0x10 * idx))
+#define UBC_BDMR(idx) (0xfffc040C + (0x10 * idx))
+
+#define UBC_BRCR 0xfffc04C0
+
+/* BBR */
+#define UBC_BBR_UBID (1 << 13) /* User Break Interrupt Disable */
+#define UBC_BBR_DBE (1 << 12) /* Data Break Enable */
+#define UBC_BBR_CD_C (1 << 6) /* C Bus Cycle */
+#define UBC_BBR_CD_I (2 << 6) /* I Bus Cycle */
+#define UBC_BBR_ID_I (1 << 4) /* Break Condition is instruction fetch cycle */
+#define UBC_BBR_ID_D (2 << 4) /* Break Condition is data access cycle */
+#define UBC_BBR_ID_ID (3 << 4) /* Break Condition is instruction fetch or data access cycle */
+
+#define UBC_CRR_BIE (1 << 0)
+
+/* CBR */
+#define UBC_CBR_CE (1 << 0)
+
+static struct sh_ubc sh2a_ubc;
+
+static void sh2a_ubc_enable(struct arch_hw_breakpoint *info, int idx)
+{
+ __raw_writel(UBC_BBR_DBE | UBC_BBR_CD_C | UBC_BBR_ID_ID |
+ info->len | info->type, UBC_BBR(idx));
+ __raw_writel(info->address, UBC_BAR(idx));
+}
+
+static void sh2a_ubc_disable(struct arch_hw_breakpoint *info, int idx)
+{
+ __raw_writel(UBC_BBR_UBID, UBC_BBR(idx));
+ __raw_writel(0, UBC_BAR(idx));
+}
+
+static void sh2a_ubc_enable_all(unsigned long mask)
+{
+ int i;
+
+ for (i = 0; i < sh2a_ubc.num_events; i++)
+ if (mask & (1 << i))
+ __raw_writel(__raw_readl(UBC_BBR(i)) & ~UBC_BBR_UBID,
+ UBC_BBR(i));
+}
+
+static void sh2a_ubc_disable_all(void)
+{
+ int i;
+
+ for (i = 0; i < sh2a_ubc.num_events; i++)
+ __raw_writel(__raw_readl(UBC_BBR(i)) | UBC_BBR_UBID,
+ UBC_BBR(i));
+}
+
+static unsigned long sh2a_ubc_active_mask(void)
+{
+ unsigned long active = 0;
+ int i;
+
+ for (i = 0; i < sh2a_ubc.num_events; i++)
+ if (!(__raw_readl(UBC_BBR(i)) & UBC_BBR_UBID))
+ active |= (1 << i);
+
+ return active;
+}
+
+static unsigned long sh2a_ubc_triggered_mask(void)
+{
+ unsigned int ret, mask;
+
+ mask = 0;
+ ret = __raw_readl(UBC_BRCR);
+ if ((ret & (1 << 15)) || (ret & (1 << 13))) {
+ mask |= (1 << 0); /* Match condition for channel 0 */
+ } else
+ mask &= ~(1 << 0);
+
+ if ((ret & (1 << 14)) || (ret & (1 << 12))) {
+ mask |= (1 << 1); /* Match condition for channel 1 */
+ } else
+ mask &= ~(1 << 1);
+
+ return mask;
+}
+
+static void sh2a_ubc_clear_triggered_mask(unsigned long mask)
+{
+ if (mask & (1 << 0)) /* Channel 0 statisfied break condition */
+ __raw_writel(__raw_readl(UBC_BRCR) &
+ ~((1 << 15) | (1 << 13)), UBC_BRCR);
+
+ if (mask & (1 << 1)) /* Channel 1 statisfied break condition */
+ __raw_writel(__raw_readl(UBC_BRCR) &
+ ~((1 << 14) | (1 << 12)), UBC_BRCR);
+}
+
+static struct sh_ubc sh2a_ubc = {
+ .name = "SH-2A",
+ .num_events = 2,
+ .trap_nr = 0x1e0,
+ .enable = sh2a_ubc_enable,
+ .disable = sh2a_ubc_disable,
+ .enable_all = sh2a_ubc_enable_all,
+ .disable_all = sh2a_ubc_disable_all,
+ .active_mask = sh2a_ubc_active_mask,
+ .triggered_mask = sh2a_ubc_triggered_mask,
+ .clear_triggered_mask = sh2a_ubc_clear_triggered_mask,
+};
+
+static int __init sh2a_ubc_init(void)
+{
+ struct clk *ubc_iclk = clk_get(NULL, "ubc0");
+ int i;
+
+ /*
+ * The UBC MSTP bit is optional, as not all platforms will have
+ * it. Just ignore it if we can't find it.
+ */
+ if (IS_ERR(ubc_iclk))
+ ubc_iclk = NULL;
+
+ clk_enable(ubc_iclk);
+
+ for (i = 0; i < sh2a_ubc.num_events; i++) {
+ __raw_writel(0, UBC_BAMR(i));
+ __raw_writel(0, UBC_BBR(i));
+ }
+
+ clk_disable(ubc_iclk);
+
+ sh2a_ubc.clk = ubc_iclk;
+
+ return register_sh_ubc(&sh2a_ubc);
+}
+arch_initcall(sh2a_ubc_init);
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
index f9173766ec4b..ac4922ad3c14 100644
--- a/arch/sh/kernel/hw_breakpoint.c
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -113,9 +113,11 @@ static int get_hbp_len(u16 hbp_len)
case SH_BREAKPOINT_LEN_4:
len_in_bytes = 4;
break;
+#ifdef UBC_64BIT
case SH_BREAKPOINT_LEN_8:
len_in_bytes = 8;
break;
+#endif
}
return len_in_bytes;
}
@@ -149,9 +151,11 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
case SH_BREAKPOINT_LEN_4:
*gen_len = HW_BREAKPOINT_LEN_4;
break;
+#ifdef UBC_64BIT
case SH_BREAKPOINT_LEN_8:
*gen_len = HW_BREAKPOINT_LEN_8;
break;
+#endif
default:
return -EINVAL;
}
@@ -190,9 +194,11 @@ static int arch_build_bp_info(struct perf_event *bp)
case HW_BREAKPOINT_LEN_4:
info->len = SH_BREAKPOINT_LEN_4;
break;
+#ifdef UBC_64BIT
case HW_BREAKPOINT_LEN_8:
info->len = SH_BREAKPOINT_LEN_8;
break;
+#endif
default:
return -EINVAL;
}
@@ -240,9 +246,11 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
case SH_BREAKPOINT_LEN_4:
align = 3;
break;
+#ifdef UBC_64BIT
case SH_BREAKPOINT_LEN_8:
align = 7;
break;
+#endif
default:
return ret;
}
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 063af10ff3c1..0833736afa32 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -149,47 +149,32 @@ void irq_ctx_exit(int cpu)
hardirq_ctx[cpu] = NULL;
}
-asmlinkage void do_softirq(void)
+void do_softirq_own_stack(void)
{
- unsigned long flags;
struct thread_info *curctx;
union irq_ctx *irqctx;
u32 *isp;
- if (in_interrupt())
- return;
-
- local_irq_save(flags);
-
- if (local_softirq_pending()) {
- curctx = current_thread_info();
- irqctx = softirq_ctx[smp_processor_id()];
- irqctx->tinfo.task = curctx->task;
- irqctx->tinfo.previous_sp = current_stack_pointer;
-
- /* build the stack frame on the softirq stack */
- isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
-
- __asm__ __volatile__ (
- "mov r15, r9 \n"
- "jsr @%0 \n"
- /* switch to the softirq stack */
- " mov %1, r15 \n"
- /* restore the thread stack */
- "mov r9, r15 \n"
- : /* no outputs */
- : "r" (__do_softirq), "r" (isp)
- : "memory", "r0", "r1", "r2", "r3", "r4",
- "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
- );
-
- /*
- * Shouldn't happen, we returned above if in_interrupt():
- */
- WARN_ON_ONCE(softirq_count());
- }
-
- local_irq_restore(flags);
+ curctx = current_thread_info();
+ irqctx = softirq_ctx[smp_processor_id()];
+ irqctx->tinfo.task = curctx->task;
+ irqctx->tinfo.previous_sp = current_stack_pointer;
+
+ /* build the stack frame on the softirq stack */
+ isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
+
+ __asm__ __volatile__ (
+ "mov r15, r9 \n"
+ "jsr @%0 \n"
+ /* switch to the softirq stack */
+ " mov %1, r15 \n"
+ /* restore the thread stack */
+ "mov r9, r15 \n"
+ : /* no outputs */
+ : "r" (__do_softirq), "r" (isp)
+ : "memory", "r0", "r1", "r2", "r3", "r4",
+ "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
+ );
}
#else
static inline void handle_one_irq(unsigned int irq)
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 78c4fdb91bc5..258464973bcb 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -12,6 +12,7 @@ config 64BIT
config SPARC
bool
default y
+ select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI
select OF
select OF_PROMTREE
select HAVE_IDE
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 7e4a97fbded4..bf390667657a 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -16,3 +16,4 @@ generic-y += serial.h
generic-y += trace_clock.h
generic-y += types.h
generic-y += word-at-a-time.h
+generic-y += preempt.h
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h
index 67c62578d170..11ebd659e7b6 100644
--- a/arch/sparc/include/asm/prom.h
+++ b/arch/sparc/include/asm/prom.h
@@ -43,10 +43,6 @@ extern int of_getintprop_default(struct device_node *np,
const char *name,
int def);
extern int of_find_in_proplist(const char *list, const char *match, int len);
-#ifdef CONFIG_NUMA
-extern int of_node_to_nid(struct device_node *dp);
-#define of_node_to_nid of_node_to_nid
-#endif
extern void prom_build_devicetree(void);
extern void of_populate_present_mask(void);
@@ -63,13 +59,5 @@ extern char *of_console_options;
extern void irq_trans_init(struct device_node *dp);
extern char *build_path_component(struct device_node *dp);
-/* SPARC has local implementations */
-extern int of_address_to_resource(struct device_node *dev, int index,
- struct resource *r);
-#define of_address_to_resource of_address_to_resource
-
-void __iomem *of_iomap(struct device_node *node, int index);
-#define of_iomap of_iomap
-
#endif /* __KERNEL__ */
#endif /* _SPARC_PROM_H */
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 4e1d66c3ce71..0f21e9a5ca18 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -72,6 +72,8 @@
#define SO_BUSY_POLL 0x0030
+#define SO_MAX_PACING_RATE 0x0031
+
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 0x5001
#define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index d4840cec2c55..666193f4e8bb 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -698,30 +698,19 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
set_irq_regs(old_regs);
}
-void do_softirq(void)
+void do_softirq_own_stack(void)
{
- unsigned long flags;
-
- if (in_interrupt())
- return;
-
- local_irq_save(flags);
+ void *orig_sp, *sp = softirq_stack[smp_processor_id()];
- if (local_softirq_pending()) {
- void *orig_sp, *sp = softirq_stack[smp_processor_id()];
-
- sp += THREAD_SIZE - 192 - STACK_BIAS;
-
- __asm__ __volatile__("mov %%sp, %0\n\t"
- "mov %1, %%sp"
- : "=&r" (orig_sp)
- : "r" (sp));
- __do_softirq();
- __asm__ __volatile__("mov %0, %%sp"
- : : "r" (orig_sp));
- }
+ sp += THREAD_SIZE - 192 - STACK_BIAS;
- local_irq_restore(flags);
+ __asm__ __volatile__("mov %%sp, %0\n\t"
+ "mov %1, %%sp"
+ : "=&r" (orig_sp)
+ : "r" (sp));
+ __do_softirq();
+ __asm__ __volatile__("mov %0, %%sp"
+ : : "r" (orig_sp));
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index e72212148d2a..d3d5d388c557 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -349,7 +349,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
case KPROBE_HIT_SSDONE:
/*
* We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accouting
+ * we can also use npre/npostfault count for accounting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(cur);
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
index d397d7fc5c28..6b39125eb927 100644
--- a/arch/sparc/kernel/prom_64.c
+++ b/arch/sparc/kernel/prom_64.c
@@ -373,6 +373,59 @@ static const char *get_mid_prop(void)
return (tlb_type == spitfire ? "upa-portid" : "portid");
}
+bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
+ int cpu, unsigned int *thread)
+{
+ const char *mid_prop = get_mid_prop();
+ int this_cpu_id;
+
+ /* On hypervisor based platforms we interrogate the 'reg'
+ * property. On everything else we look for a 'upa-portis',
+ * 'portid', or 'cpuid' property.
+ */
+
+ if (tlb_type == hypervisor) {
+ struct property *prop = of_find_property(cpun, "reg", NULL);
+ u32 *regs;
+
+ if (!prop) {
+ pr_warn("CPU node missing reg property\n");
+ return false;
+ }
+ regs = prop->value;
+ this_cpu_id = regs[0] & 0x0fffffff;
+ } else {
+ this_cpu_id = of_getintprop_default(cpun, mid_prop, -1);
+
+ if (this_cpu_id < 0) {
+ mid_prop = "cpuid";
+ this_cpu_id = of_getintprop_default(cpun, mid_prop, -1);
+ }
+ if (this_cpu_id < 0) {
+ pr_warn("CPU node missing cpu ID property\n");
+ return false;
+ }
+ }
+ if (this_cpu_id == cpu) {
+ if (thread) {
+ int proc_id = cpu_data(cpu).proc_id;
+
+ /* On sparc64, the cpu thread information is obtained
+ * either from OBP or the machine description. We've
+ * actually probed this information already long before
+ * this interface gets called so instead of interrogating
+ * both the OF node and the MDESC again, just use what
+ * we discovered already.
+ */
+ if (proc_id < 0)
+ proc_id = 0;
+ *thread = proc_id;
+ }
+ return true;
+ }
+ return false;
+}
+
static void *of_iterate_over_cpus(void *(*func)(struct device_node *, int, int), int arg)
{
struct device_node *dp;
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index 9c7be59e6f5a..218b6b23c378 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -808,4 +808,5 @@ void bpf_jit_free(struct sk_filter *fp)
{
if (fp->bpf_func != sk_run_filter)
module_free(NULL, fp->bpf_func);
+ kfree(fp);
}
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index 664d6ad23f80..22f3bd147fa7 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -38,3 +38,4 @@ generic-y += termios.h
generic-y += trace_clock.h
generic-y += types.h
generic-y += xor.h
+generic-y += preempt.h
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index b7180e6e900d..c45593db7718 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -251,15 +251,12 @@ static void fixup_read_and_payload_sizes(void)
/* Scan for the smallest maximum payload size. */
for_each_pci_dev(dev) {
u32 devcap;
- int max_payload;
if (!pci_is_pcie(dev))
continue;
- pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &devcap);
- max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD;
- if (max_payload < smallest_max_payload)
- smallest_max_payload = max_payload;
+ if (dev->pcie_mpss < smallest_max_payload)
+ smallest_max_payload = dev->pcie_mpss;
}
/* Now, set the max_payload_size for all devices to that value. */
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index b30f34a79882..fdde187e6087 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -3,3 +3,4 @@ generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
generic-y += switch_to.h clkdev.h
generic-y += trace_clock.h
+generic-y += preempt.h
diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c
index 829df49dee99..41ebbfebb333 100644
--- a/arch/um/kernel/exitcode.c
+++ b/arch/um/kernel/exitcode.c
@@ -40,9 +40,11 @@ static ssize_t exitcode_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
char *end, buf[sizeof("nnnnn\0")];
+ size_t size;
int tmp;
- if (copy_from_user(buf, buffer, count))
+ size = min(count, sizeof(buf));
+ if (copy_from_user(buf, buffer, size))
return -EFAULT;
tmp = simple_strtol(buf, &end, 0);
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index 82cdd8906f3d..a7ba27b2752b 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -1,5 +1,6 @@
config UNICORE32
def_bool y
+ select ARCH_MIGHT_HAVE_PC_PARPORT
select HAVE_MEMBLOCK
select HAVE_GENERIC_DMA_COHERENT
select HAVE_DMA_ATTRS
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index 89d8b6c4e39a..00045cbe5c63 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -60,3 +60,4 @@ generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h
generic-y += xor.h
+generic-y += preempt.h
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index f67e839f06c8..d2cff2c55584 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -22,6 +22,7 @@ config X86_64
config X86
def_bool y
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+ select ARCH_MIGHT_HAVE_PC_PARPORT
select HAVE_AOUT if X86_32
select HAVE_UNSTABLE_SCHED_CLOCK
select ARCH_SUPPORTS_NUMA_BALANCING
@@ -123,6 +124,7 @@ config X86
select COMPAT_OLD_SIGACTION if IA32_EMULATION
select RTC_LIB
select HAVE_DEBUG_STACKOVERFLOW
+ select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
config INSTRUCTION_DECODER
def_bool y
@@ -254,10 +256,6 @@ config ARCH_HWEIGHT_CFLAGS
default "-fcall-saved-ecx -fcall-saved-edx" if X86_32
default "-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" if X86_64
-config ARCH_CPU_PROBE_RELEASE
- def_bool y
- depends on HOTPLUG_CPU
-
config ARCH_SUPPORTS_UPROBES
def_bool y
@@ -638,10 +636,10 @@ config PARAVIRT_SPINLOCKS
spinlock implementation with something virtualization-friendly
(for example, block the virtual CPU rather than spinning).
- Unfortunately the downside is an up to 5% performance hit on
- native kernels, with various workloads.
+ It has a minimal impact on native kernels and gives a nice performance
+ benefit on paravirtualized KVM / Xen kernels.
- If you are unsure how to answer this question, answer N.
+ If you are unsure how to answer this question, answer Y.
source "arch/x86/xen/Kconfig"
@@ -756,20 +754,25 @@ config DMI
BIOS code.
config GART_IOMMU
- bool "GART IOMMU support" if EXPERT
- default y
+ bool "Old AMD GART IOMMU support"
select SWIOTLB
depends on X86_64 && PCI && AMD_NB
---help---
- Support for full DMA access of devices with 32bit memory access only
- on systems with more than 3GB. This is usually needed for USB,
- sound, many IDE/SATA chipsets and some other devices.
- Provides a driver for the AMD Athlon64/Opteron/Turion/Sempron GART
- based hardware IOMMU and a software bounce buffer based IOMMU used
- on Intel systems and as fallback.
- The code is only active when needed (enough memory and limited
- device) unless CONFIG_IOMMU_DEBUG or iommu=force is specified
- too.
+ Provides a driver for older AMD Athlon64/Opteron/Turion/Sempron
+ GART based hardware IOMMUs.
+
+ The GART supports full DMA access for devices with 32-bit access
+ limitations, on systems with more than 3 GB. This is usually needed
+ for USB, sound, many IDE/SATA chipsets and some other devices.
+
+ Newer systems typically have a modern AMD IOMMU, supported via
+ the CONFIG_AMD_IOMMU=y config option.
+
+ In normal configurations this driver is only active when needed:
+ there's more than 3 GB of memory and the system contains a
+ 32-bit limited device.
+
+ If unsure, say Y.
config CALGARY_IOMMU
bool "IBM Calgary IOMMU support"
@@ -1594,7 +1597,7 @@ config EFI_STUB
This kernel feature allows a bzImage to be loaded directly
by EFI firmware without the use of a bootloader.
- See Documentation/x86/efi-stub.txt for more information.
+ See Documentation/efi-stub.txt for more information.
config SECCOMP
def_bool y
@@ -1723,16 +1726,56 @@ config RELOCATABLE
Note: If CONFIG_RELOCATABLE=y, then the kernel runs from the address
it has been loaded at and the compile time physical address
- (CONFIG_PHYSICAL_START) is ignored.
+ (CONFIG_PHYSICAL_START) is used as the minimum location.
-# Relocation on x86-32 needs some additional build support
+config RANDOMIZE_BASE
+ bool "Randomize the address of the kernel image"
+ depends on RELOCATABLE
+ depends on !HIBERNATION
+ default n
+ ---help---
+ Randomizes the physical and virtual address at which the
+ kernel image is decompressed, as a security feature that
+ deters exploit attempts relying on knowledge of the location
+ of kernel internals.
+
+ Entropy is generated using the RDRAND instruction if it
+ is supported. If not, then RDTSC is used, if supported. If
+ neither RDRAND nor RDTSC are supported, then no randomness
+ is introduced.
+
+ The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET,
+ and aligned according to PHYSICAL_ALIGN.
+
+config RANDOMIZE_BASE_MAX_OFFSET
+ hex "Maximum ASLR offset allowed"
+ depends on RANDOMIZE_BASE
+ range 0x0 0x20000000 if X86_32
+ default "0x20000000" if X86_32
+ range 0x0 0x40000000 if X86_64
+ default "0x40000000" if X86_64
+ ---help---
+ Determines the maximal offset in bytes that will be applied to the
+ kernel when Address Space Layout Randomization (ASLR) is active.
+ Must be less than or equal to the actual physical memory on the
+ system. This must be a multiple of CONFIG_PHYSICAL_ALIGN.
+
+ On 32-bit this is limited to 512MiB.
+
+ On 64-bit this is limited by how the kernel fixmap page table is
+ positioned, so this cannot be larger that 1GiB currently. Normally
+ there is a 512MiB to 1.5GiB split between kernel and modules. When
+ this is raised above the 512MiB default, the modules area will
+ shrink to compensate, up to the current maximum 1GiB to 1GiB split.
+
+# Relocation on x86 needs some additional build support
config X86_NEED_RELOCS
def_bool y
- depends on X86_32 && RELOCATABLE
+ depends on RANDOMIZE_BASE || (X86_32 && RELOCATABLE)
config PHYSICAL_ALIGN
hex "Alignment value to which kernel should be aligned"
- default "0x1000000"
+ default "0x200000"
range 0x2000 0x1000000 if X86_32
range 0x200000 0x1000000 if X86_64
---help---
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 78d91afb8e50..0f3621ed1db6 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -59,6 +59,16 @@ config EARLY_PRINTK_DBGP
with klogd/syslogd or the X server. You should normally N here,
unless you want to debug such a crash. You need usb debug device.
+config EARLY_PRINTK_EFI
+ bool "Early printk via the EFI framebuffer"
+ depends on EFI && EARLY_PRINTK
+ select FONT_SUPPORT
+ ---help---
+ Write kernel log output directly into the EFI framebuffer.
+
+ This is useful for kernel debugging when your machine crashes very
+ early before the console code is initialized.
+
config X86_PTDUMP
bool "Export kernel pagetable layout to userspace via debugfs"
depends on DEBUG_KERNEL
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 379814bc41e3..28f7db7993f5 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -20,7 +20,7 @@ targets := vmlinux.bin setup.bin setup.elf bzImage
targets += fdimage fdimage144 fdimage288 image.iso mtools.conf
subdir- := compressed
-setup-y += a20.o bioscall.o cmdline.o copy.o cpu.o cpucheck.o
+setup-y += a20.o bioscall.o cmdline.o copy.o cpu.o cpuflags.o cpucheck.o
setup-y += early_serial_console.o edd.o header.o main.o mca.o memory.o
setup-y += pm.o pmjump.o printf.o regs.o string.o tty.o video.o
setup-y += video-mode.o version.o
@@ -71,7 +71,8 @@ GCOV_PROFILE := n
$(obj)/bzImage: asflags-y := $(SVGA_MODE)
quiet_cmd_image = BUILD $@
-cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/zoffset.h > $@
+cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin \
+ $(obj)/zoffset.h $@
$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE
$(call if_changed,image)
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index ef72baeff484..50f8c5e0f37e 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -26,9 +26,8 @@
#include <asm/boot.h>
#include <asm/setup.h>
#include "bitops.h"
-#include <asm/cpufeature.h>
-#include <asm/processor-flags.h>
#include "ctype.h"
+#include "cpuflags.h"
/* Useful macros */
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
@@ -307,14 +306,7 @@ static inline int cmdline_find_option_bool(const char *option)
return __cmdline_find_option_bool(cmd_line_ptr, option);
}
-
/* cpu.c, cpucheck.c */
-struct cpu_features {
- int level; /* Family, or 64 for x86-64 */
- int model;
- u32 flags[NCAPINTS];
-};
-extern struct cpu_features cpu;
int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr);
int validate_cpu(void);
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index dcd90df10ab4..ae8b5dbbd8c5 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -27,7 +27,7 @@ HOST_EXTRACFLAGS += -I$(srctree)/tools/include
VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \
$(obj)/string.o $(obj)/cmdline.o $(obj)/early_serial_console.o \
- $(obj)/piggy.o
+ $(obj)/piggy.o $(obj)/cpuflags.o $(obj)/aslr.o
$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
new file mode 100644
index 000000000000..05957986d123
--- /dev/null
+++ b/arch/x86/boot/compressed/aslr.c
@@ -0,0 +1,267 @@
+#include "misc.h"
+
+#ifdef CONFIG_RANDOMIZE_BASE
+#include <asm/msr.h>
+#include <asm/archrandom.h>
+#include <asm/e820.h>
+
+#define I8254_PORT_CONTROL 0x43
+#define I8254_PORT_COUNTER0 0x40
+#define I8254_CMD_READBACK 0xC0
+#define I8254_SELECT_COUNTER0 0x02
+#define I8254_STATUS_NOTREADY 0x40
+static inline u16 i8254(void)
+{
+ u16 status, timer;
+
+ do {
+ outb(I8254_PORT_CONTROL,
+ I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
+ status = inb(I8254_PORT_COUNTER0);
+ timer = inb(I8254_PORT_COUNTER0);
+ timer |= inb(I8254_PORT_COUNTER0) << 8;
+ } while (status & I8254_STATUS_NOTREADY);
+
+ return timer;
+}
+
+static unsigned long get_random_long(void)
+{
+ unsigned long random;
+
+ if (has_cpuflag(X86_FEATURE_RDRAND)) {
+ debug_putstr("KASLR using RDRAND...\n");
+ if (rdrand_long(&random))
+ return random;
+ }
+
+ if (has_cpuflag(X86_FEATURE_TSC)) {
+ uint32_t raw;
+
+ debug_putstr("KASLR using RDTSC...\n");
+ rdtscl(raw);
+
+ /* Only use the low bits of rdtsc. */
+ random = raw & 0xffff;
+ } else {
+ debug_putstr("KASLR using i8254...\n");
+ random = i8254();
+ }
+
+ /* Extend timer bits poorly... */
+ random |= (random << 16);
+#ifdef CONFIG_X86_64
+ random |= (random << 32);
+#endif
+ return random;
+}
+
+struct mem_vector {
+ unsigned long start;
+ unsigned long size;
+};
+
+#define MEM_AVOID_MAX 5
+struct mem_vector mem_avoid[MEM_AVOID_MAX];
+
+static bool mem_contains(struct mem_vector *region, struct mem_vector *item)
+{
+ /* Item at least partially before region. */
+ if (item->start < region->start)
+ return false;
+ /* Item at least partially after region. */
+ if (item->start + item->size > region->start + region->size)
+ return false;
+ return true;
+}
+
+static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
+{
+ /* Item one is entirely before item two. */
+ if (one->start + one->size <= two->start)
+ return false;
+ /* Item one is entirely after item two. */
+ if (one->start >= two->start + two->size)
+ return false;
+ return true;
+}
+
+static void mem_avoid_init(unsigned long input, unsigned long input_size,
+ unsigned long output, unsigned long output_size)
+{
+ u64 initrd_start, initrd_size;
+ u64 cmd_line, cmd_line_size;
+ unsigned long unsafe, unsafe_len;
+ char *ptr;
+
+ /*
+ * Avoid the region that is unsafe to overlap during
+ * decompression (see calculations at top of misc.c).
+ */
+ unsafe_len = (output_size >> 12) + 32768 + 18;
+ unsafe = (unsigned long)input + input_size - unsafe_len;
+ mem_avoid[0].start = unsafe;
+ mem_avoid[0].size = unsafe_len;
+
+ /* Avoid initrd. */
+ initrd_start = (u64)real_mode->ext_ramdisk_image << 32;
+ initrd_start |= real_mode->hdr.ramdisk_image;
+ initrd_size = (u64)real_mode->ext_ramdisk_size << 32;
+ initrd_size |= real_mode->hdr.ramdisk_size;
+ mem_avoid[1].start = initrd_start;
+ mem_avoid[1].size = initrd_size;
+
+ /* Avoid kernel command line. */
+ cmd_line = (u64)real_mode->ext_cmd_line_ptr << 32;
+ cmd_line |= real_mode->hdr.cmd_line_ptr;
+ /* Calculate size of cmd_line. */
+ ptr = (char *)(unsigned long)cmd_line;
+ for (cmd_line_size = 0; ptr[cmd_line_size++]; )
+ ;
+ mem_avoid[2].start = cmd_line;
+ mem_avoid[2].size = cmd_line_size;
+
+ /* Avoid heap memory. */
+ mem_avoid[3].start = (unsigned long)free_mem_ptr;
+ mem_avoid[3].size = BOOT_HEAP_SIZE;
+
+ /* Avoid stack memory. */
+ mem_avoid[4].start = (unsigned long)free_mem_end_ptr;
+ mem_avoid[4].size = BOOT_STACK_SIZE;
+}
+
+/* Does this memory vector overlap a known avoided area? */
+bool mem_avoid_overlap(struct mem_vector *img)
+{
+ int i;
+
+ for (i = 0; i < MEM_AVOID_MAX; i++) {
+ if (mem_overlaps(img, &mem_avoid[i]))
+ return true;
+ }
+
+ return false;
+}
+
+unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET / CONFIG_PHYSICAL_ALIGN];
+unsigned long slot_max = 0;
+
+static void slots_append(unsigned long addr)
+{
+ /* Overflowing the slots list should be impossible. */
+ if (slot_max >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET /
+ CONFIG_PHYSICAL_ALIGN)
+ return;
+
+ slots[slot_max++] = addr;
+}
+
+static unsigned long slots_fetch_random(void)
+{
+ /* Handle case of no slots stored. */
+ if (slot_max == 0)
+ return 0;
+
+ return slots[get_random_long() % slot_max];
+}
+
+static void process_e820_entry(struct e820entry *entry,
+ unsigned long minimum,
+ unsigned long image_size)
+{
+ struct mem_vector region, img;
+
+ /* Skip non-RAM entries. */
+ if (entry->type != E820_RAM)
+ return;
+
+ /* Ignore entries entirely above our maximum. */
+ if (entry->addr >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET)
+ return;
+
+ /* Ignore entries entirely below our minimum. */
+ if (entry->addr + entry->size < minimum)
+ return;
+
+ region.start = entry->addr;
+ region.size = entry->size;
+
+ /* Potentially raise address to minimum location. */
+ if (region.start < minimum)
+ region.start = minimum;
+
+ /* Potentially raise address to meet alignment requirements. */
+ region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
+
+ /* Did we raise the address above the bounds of this e820 region? */
+ if (region.start > entry->addr + entry->size)
+ return;
+
+ /* Reduce size by any delta from the original address. */
+ region.size -= region.start - entry->addr;
+
+ /* Reduce maximum size to fit end of image within maximum limit. */
+ if (region.start + region.size > CONFIG_RANDOMIZE_BASE_MAX_OFFSET)
+ region.size = CONFIG_RANDOMIZE_BASE_MAX_OFFSET - region.start;
+
+ /* Walk each aligned slot and check for avoided areas. */
+ for (img.start = region.start, img.size = image_size ;
+ mem_contains(&region, &img) ;
+ img.start += CONFIG_PHYSICAL_ALIGN) {
+ if (mem_avoid_overlap(&img))
+ continue;
+ slots_append(img.start);
+ }
+}
+
+static unsigned long find_random_addr(unsigned long minimum,
+ unsigned long size)
+{
+ int i;
+ unsigned long addr;
+
+ /* Make sure minimum is aligned. */
+ minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
+
+ /* Verify potential e820 positions, appending to slots list. */
+ for (i = 0; i < real_mode->e820_entries; i++) {
+ process_e820_entry(&real_mode->e820_map[i], minimum, size);
+ }
+
+ return slots_fetch_random();
+}
+
+unsigned char *choose_kernel_location(unsigned char *input,
+ unsigned long input_size,
+ unsigned char *output,
+ unsigned long output_size)
+{
+ unsigned long choice = (unsigned long)output;
+ unsigned long random;
+
+ if (cmdline_find_option_bool("nokaslr")) {
+ debug_putstr("KASLR disabled...\n");
+ goto out;
+ }
+
+ /* Record the various known unsafe memory ranges. */
+ mem_avoid_init((unsigned long)input, input_size,
+ (unsigned long)output, output_size);
+
+ /* Walk e820 and find a random address. */
+ random = find_random_addr(choice, output_size);
+ if (!random) {
+ debug_putstr("KASLR could not find suitable E820 region...\n");
+ goto out;
+ }
+
+ /* Always enforce the minimum. */
+ if (random < choice)
+ goto out;
+
+ choice = random;
+out:
+ return (unsigned char *)choice;
+}
+
+#endif /* CONFIG_RANDOMIZE_BASE */
diff --git a/arch/x86/boot/compressed/cmdline.c b/arch/x86/boot/compressed/cmdline.c
index bffd73b45b1f..b68e3033e6b9 100644
--- a/arch/x86/boot/compressed/cmdline.c
+++ b/arch/x86/boot/compressed/cmdline.c
@@ -1,6 +1,6 @@
#include "misc.h"
-#ifdef CONFIG_EARLY_PRINTK
+#if CONFIG_EARLY_PRINTK || CONFIG_RANDOMIZE_BASE
static unsigned long fs;
static inline void set_fs(unsigned long seg)
diff --git a/arch/x86/boot/compressed/cpuflags.c b/arch/x86/boot/compressed/cpuflags.c
new file mode 100644
index 000000000000..aa313466118b
--- /dev/null
+++ b/arch/x86/boot/compressed/cpuflags.c
@@ -0,0 +1,12 @@
+#ifdef CONFIG_RANDOMIZE_BASE
+
+#include "../cpuflags.c"
+
+bool has_cpuflag(int flag)
+{
+ get_cpuflags();
+
+ return test_bit(flag, cpu.flags);
+}
+
+#endif
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index b7388a425f09..a7677babf946 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -19,214 +19,10 @@
static efi_system_table_t *sys_table;
-static void efi_char16_printk(efi_char16_t *str)
-{
- struct efi_simple_text_output_protocol *out;
-
- out = (struct efi_simple_text_output_protocol *)sys_table->con_out;
- efi_call_phys2(out->output_string, out, str);
-}
-
-static void efi_printk(char *str)
-{
- char *s8;
-
- for (s8 = str; *s8; s8++) {
- efi_char16_t ch[2] = { 0 };
-
- ch[0] = *s8;
- if (*s8 == '\n') {
- efi_char16_t nl[2] = { '\r', 0 };
- efi_char16_printk(nl);
- }
-
- efi_char16_printk(ch);
- }
-}
-
-static efi_status_t __get_map(efi_memory_desc_t **map, unsigned long *map_size,
- unsigned long *desc_size)
-{
- efi_memory_desc_t *m = NULL;
- efi_status_t status;
- unsigned long key;
- u32 desc_version;
-
- *map_size = sizeof(*m) * 32;
-again:
- /*
- * Add an additional efi_memory_desc_t because we're doing an
- * allocation which may be in a new descriptor region.
- */
- *map_size += sizeof(*m);
- status = efi_call_phys3(sys_table->boottime->allocate_pool,
- EFI_LOADER_DATA, *map_size, (void **)&m);
- if (status != EFI_SUCCESS)
- goto fail;
-
- status = efi_call_phys5(sys_table->boottime->get_memory_map, map_size,
- m, &key, desc_size, &desc_version);
- if (status == EFI_BUFFER_TOO_SMALL) {
- efi_call_phys1(sys_table->boottime->free_pool, m);
- goto again;
- }
-
- if (status != EFI_SUCCESS)
- efi_call_phys1(sys_table->boottime->free_pool, m);
-fail:
- *map = m;
- return status;
-}
-
-/*
- * Allocate at the highest possible address that is not above 'max'.
- */
-static efi_status_t high_alloc(unsigned long size, unsigned long align,
- unsigned long *addr, unsigned long max)
-{
- unsigned long map_size, desc_size;
- efi_memory_desc_t *map;
- efi_status_t status;
- unsigned long nr_pages;
- u64 max_addr = 0;
- int i;
-
- status = __get_map(&map, &map_size, &desc_size);
- if (status != EFI_SUCCESS)
- goto fail;
-
- nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
-again:
- for (i = 0; i < map_size / desc_size; i++) {
- efi_memory_desc_t *desc;
- unsigned long m = (unsigned long)map;
- u64 start, end;
-
- desc = (efi_memory_desc_t *)(m + (i * desc_size));
- if (desc->type != EFI_CONVENTIONAL_MEMORY)
- continue;
-
- if (desc->num_pages < nr_pages)
- continue;
+#include "../../../../drivers/firmware/efi/efi-stub-helper.c"
- start = desc->phys_addr;
- end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
- if ((start + size) > end || (start + size) > max)
- continue;
-
- if (end - size > max)
- end = max;
-
- if (round_down(end - size, align) < start)
- continue;
-
- start = round_down(end - size, align);
-
- /*
- * Don't allocate at 0x0. It will confuse code that
- * checks pointers against NULL.
- */
- if (start == 0x0)
- continue;
-
- if (start > max_addr)
- max_addr = start;
- }
-
- if (!max_addr)
- status = EFI_NOT_FOUND;
- else {
- status = efi_call_phys4(sys_table->boottime->allocate_pages,
- EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
- nr_pages, &max_addr);
- if (status != EFI_SUCCESS) {
- max = max_addr;
- max_addr = 0;
- goto again;
- }
-
- *addr = max_addr;
- }
-
-free_pool:
- efi_call_phys1(sys_table->boottime->free_pool, map);
-
-fail:
- return status;
-}
-
-/*
- * Allocate at the lowest possible address.
- */
-static efi_status_t low_alloc(unsigned long size, unsigned long align,
- unsigned long *addr)
-{
- unsigned long map_size, desc_size;
- efi_memory_desc_t *map;
- efi_status_t status;
- unsigned long nr_pages;
- int i;
-
- status = __get_map(&map, &map_size, &desc_size);
- if (status != EFI_SUCCESS)
- goto fail;
-
- nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
- for (i = 0; i < map_size / desc_size; i++) {
- efi_memory_desc_t *desc;
- unsigned long m = (unsigned long)map;
- u64 start, end;
-
- desc = (efi_memory_desc_t *)(m + (i * desc_size));
-
- if (desc->type != EFI_CONVENTIONAL_MEMORY)
- continue;
-
- if (desc->num_pages < nr_pages)
- continue;
-
- start = desc->phys_addr;
- end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
-
- /*
- * Don't allocate at 0x0. It will confuse code that
- * checks pointers against NULL. Skip the first 8
- * bytes so we start at a nice even number.
- */
- if (start == 0x0)
- start += 8;
-
- start = round_up(start, align);
- if ((start + size) > end)
- continue;
-
- status = efi_call_phys4(sys_table->boottime->allocate_pages,
- EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
- nr_pages, &start);
- if (status == EFI_SUCCESS) {
- *addr = start;
- break;
- }
- }
-
- if (i == map_size / desc_size)
- status = EFI_NOT_FOUND;
-
-free_pool:
- efi_call_phys1(sys_table->boottime->free_pool, map);
-fail:
- return status;
-}
-
-static void low_free(unsigned long size, unsigned long addr)
-{
- unsigned long nr_pages;
-
- nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
- efi_call_phys2(sys_table->boottime->free_pages, addr, nr_pages);
-}
static void find_bits(unsigned long mask, u8 *pos, u8 *size)
{
@@ -624,242 +420,6 @@ void setup_graphics(struct boot_params *boot_params)
}
}
-struct initrd {
- efi_file_handle_t *handle;
- u64 size;
-};
-
-/*
- * Check the cmdline for a LILO-style initrd= arguments.
- *
- * We only support loading an initrd from the same filesystem as the
- * kernel image.
- */
-static efi_status_t handle_ramdisks(efi_loaded_image_t *image,
- struct setup_header *hdr)
-{
- struct initrd *initrds;
- unsigned long initrd_addr;
- efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
- u64 initrd_total;
- efi_file_io_interface_t *io;
- efi_file_handle_t *fh;
- efi_status_t status;
- int nr_initrds;
- char *str;
- int i, j, k;
-
- initrd_addr = 0;
- initrd_total = 0;
-
- str = (char *)(unsigned long)hdr->cmd_line_ptr;
-
- j = 0; /* See close_handles */
-
- if (!str || !*str)
- return EFI_SUCCESS;
-
- for (nr_initrds = 0; *str; nr_initrds++) {
- str = strstr(str, "initrd=");
- if (!str)
- break;
-
- str += 7;
-
- /* Skip any leading slashes */
- while (*str == '/' || *str == '\\')
- str++;
-
- while (*str && *str != ' ' && *str != '\n')
- str++;
- }
-
- if (!nr_initrds)
- return EFI_SUCCESS;
-
- status = efi_call_phys3(sys_table->boottime->allocate_pool,
- EFI_LOADER_DATA,
- nr_initrds * sizeof(*initrds),
- &initrds);
- if (status != EFI_SUCCESS) {
- efi_printk("Failed to alloc mem for initrds\n");
- goto fail;
- }
-
- str = (char *)(unsigned long)hdr->cmd_line_ptr;
- for (i = 0; i < nr_initrds; i++) {
- struct initrd *initrd;
- efi_file_handle_t *h;
- efi_file_info_t *info;
- efi_char16_t filename_16[256];
- unsigned long info_sz;
- efi_guid_t info_guid = EFI_FILE_INFO_ID;
- efi_char16_t *p;
- u64 file_sz;
-
- str = strstr(str, "initrd=");
- if (!str)
- break;
-
- str += 7;
-
- initrd = &initrds[i];
- p = filename_16;
-
- /* Skip any leading slashes */
- while (*str == '/' || *str == '\\')
- str++;
-
- while (*str && *str != ' ' && *str != '\n') {
- if ((u8 *)p >= (u8 *)filename_16 + sizeof(filename_16))
- break;
-
- if (*str == '/') {
- *p++ = '\\';
- *str++;
- } else {
- *p++ = *str++;
- }
- }
-
- *p = '\0';
-
- /* Only open the volume once. */
- if (!i) {
- efi_boot_services_t *boottime;
-
- boottime = sys_table->boottime;
-
- status = efi_call_phys3(boottime->handle_protocol,
- image->device_handle, &fs_proto, &io);
- if (status != EFI_SUCCESS) {
- efi_printk("Failed to handle fs_proto\n");
- goto free_initrds;
- }
-
- status = efi_call_phys2(io->open_volume, io, &fh);
- if (status != EFI_SUCCESS) {
- efi_printk("Failed to open volume\n");
- goto free_initrds;
- }
- }
-
- status = efi_call_phys5(fh->open, fh, &h, filename_16,
- EFI_FILE_MODE_READ, (u64)0);
- if (status != EFI_SUCCESS) {
- efi_printk("Failed to open initrd file: ");
- efi_char16_printk(filename_16);
- efi_printk("\n");
- goto close_handles;
- }
-
- initrd->handle = h;
-
- info_sz = 0;
- status = efi_call_phys4(h->get_info, h, &info_guid,
- &info_sz, NULL);
- if (status != EFI_BUFFER_TOO_SMALL) {
- efi_printk("Failed to get initrd info size\n");
- goto close_handles;
- }
-
-grow:
- status = efi_call_phys3(sys_table->boottime->allocate_pool,
- EFI_LOADER_DATA, info_sz, &info);
- if (status != EFI_SUCCESS) {
- efi_printk("Failed to alloc mem for initrd info\n");
- goto close_handles;
- }
-
- status = efi_call_phys4(h->get_info, h, &info_guid,
- &info_sz, info);
- if (status == EFI_BUFFER_TOO_SMALL) {
- efi_call_phys1(sys_table->boottime->free_pool, info);
- goto grow;
- }
-
- file_sz = info->file_size;
- efi_call_phys1(sys_table->boottime->free_pool, info);
-
- if (status != EFI_SUCCESS) {
- efi_printk("Failed to get initrd info\n");
- goto close_handles;
- }
-
- initrd->size = file_sz;
- initrd_total += file_sz;
- }
-
- if (initrd_total) {
- unsigned long addr;
-
- /*
- * Multiple initrd's need to be at consecutive
- * addresses in memory, so allocate enough memory for
- * all the initrd's.
- */
- status = high_alloc(initrd_total, 0x1000,
- &initrd_addr, hdr->initrd_addr_max);
- if (status != EFI_SUCCESS) {
- efi_printk("Failed to alloc highmem for initrds\n");
- goto close_handles;
- }
-
- /* We've run out of free low memory. */
- if (initrd_addr > hdr->initrd_addr_max) {
- efi_printk("We've run out of free low memory\n");
- status = EFI_INVALID_PARAMETER;
- goto free_initrd_total;
- }
-
- addr = initrd_addr;
- for (j = 0; j < nr_initrds; j++) {
- u64 size;
-
- size = initrds[j].size;
- while (size) {
- u64 chunksize;
- if (size > EFI_READ_CHUNK_SIZE)
- chunksize = EFI_READ_CHUNK_SIZE;
- else
- chunksize = size;
- status = efi_call_phys3(fh->read,
- initrds[j].handle,
- &chunksize, addr);
- if (status != EFI_SUCCESS) {
- efi_printk("Failed to read initrd\n");
- goto free_initrd_total;
- }
- addr += chunksize;
- size -= chunksize;
- }
-
- efi_call_phys1(fh->close, initrds[j].handle);
- }
-
- }
-
- efi_call_phys1(sys_table->boottime->free_pool, initrds);
-
- hdr->ramdisk_image = initrd_addr;
- hdr->ramdisk_size = initrd_total;
-
- return status;
-
-free_initrd_total:
- low_free(initrd_total, initrd_addr);
-
-close_handles:
- for (k = j; k < i; k++)
- efi_call_phys1(fh->close, initrds[k].handle);
-free_initrds:
- efi_call_phys1(sys_table->boottime->free_pool, initrds);
-fail:
- hdr->ramdisk_image = 0;
- hdr->ramdisk_size = 0;
-
- return status;
-}
/*
* Because the x86 boot code expects to be passed a boot_params we
@@ -875,14 +435,15 @@ struct boot_params *make_boot_params(void *handle, efi_system_table_t *_table)
struct efi_info *efi;
efi_loaded_image_t *image;
void *options;
- u32 load_options_size;
efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
int options_size = 0;
efi_status_t status;
- unsigned long cmdline;
+ char *cmdline_ptr;
u16 *s2;
u8 *s1;
int i;
+ unsigned long ramdisk_addr;
+ unsigned long ramdisk_size;
sys_table = _table;
@@ -893,13 +454,14 @@ struct boot_params *make_boot_params(void *handle, efi_system_table_t *_table)
status = efi_call_phys3(sys_table->boottime->handle_protocol,
handle, &proto, (void *)&image);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
+ efi_printk(sys_table, "Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
return NULL;
}
- status = low_alloc(0x4000, 1, (unsigned long *)&boot_params);
+ status = efi_low_alloc(sys_table, 0x4000, 1,
+ (unsigned long *)&boot_params);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to alloc lowmem for boot params\n");
+ efi_printk(sys_table, "Failed to alloc lowmem for boot params\n");
return NULL;
}
@@ -926,40 +488,11 @@ struct boot_params *make_boot_params(void *handle, efi_system_table_t *_table)
hdr->type_of_loader = 0x21;
/* Convert unicode cmdline to ascii */
- options = image->load_options;
- load_options_size = image->load_options_size / 2; /* ASCII */
- cmdline = 0;
- s2 = (u16 *)options;
-
- if (s2) {
- while (*s2 && *s2 != '\n' && options_size < load_options_size) {
- s2++;
- options_size++;
- }
-
- if (options_size) {
- if (options_size > hdr->cmdline_size)
- options_size = hdr->cmdline_size;
-
- options_size++; /* NUL termination */
-
- status = low_alloc(options_size, 1, &cmdline);
- if (status != EFI_SUCCESS) {
- efi_printk("Failed to alloc mem for cmdline\n");
- goto fail;
- }
-
- s1 = (u8 *)(unsigned long)cmdline;
- s2 = (u16 *)options;
-
- for (i = 0; i < options_size - 1; i++)
- *s1++ = *s2++;
-
- *s1 = '\0';
- }
- }
-
- hdr->cmd_line_ptr = cmdline;
+ cmdline_ptr = efi_convert_cmdline_to_ascii(sys_table, image,
+ &options_size);
+ if (!cmdline_ptr)
+ goto fail;
+ hdr->cmd_line_ptr = (unsigned long)cmdline_ptr;
hdr->ramdisk_image = 0;
hdr->ramdisk_size = 0;
@@ -969,96 +502,64 @@ struct boot_params *make_boot_params(void *handle, efi_system_table_t *_table)
memset(sdt, 0, sizeof(*sdt));
- status = handle_ramdisks(image, hdr);
+ status = handle_cmdline_files(sys_table, image,
+ (char *)(unsigned long)hdr->cmd_line_ptr,
+ "initrd=", hdr->initrd_addr_max,
+ &ramdisk_addr, &ramdisk_size);
if (status != EFI_SUCCESS)
goto fail2;
+ hdr->ramdisk_image = ramdisk_addr;
+ hdr->ramdisk_size = ramdisk_size;
return boot_params;
fail2:
- if (options_size)
- low_free(options_size, hdr->cmd_line_ptr);
+ efi_free(sys_table, options_size, hdr->cmd_line_ptr);
fail:
- low_free(0x4000, (unsigned long)boot_params);
+ efi_free(sys_table, 0x4000, (unsigned long)boot_params);
return NULL;
}
-static efi_status_t exit_boot(struct boot_params *boot_params,
- void *handle)
+static void add_e820ext(struct boot_params *params,
+ struct setup_data *e820ext, u32 nr_entries)
{
- struct efi_info *efi = &boot_params->efi_info;
- struct e820entry *e820_map = &boot_params->e820_map[0];
- struct e820entry *prev = NULL;
- unsigned long size, key, desc_size, _size;
- efi_memory_desc_t *mem_map;
+ struct setup_data *data;
efi_status_t status;
- __u32 desc_version;
- bool called_exit = false;
- u8 nr_entries;
- int i;
-
- size = sizeof(*mem_map) * 32;
-
-again:
- size += sizeof(*mem_map) * 2;
- _size = size;
- status = low_alloc(size, 1, (unsigned long *)&mem_map);
- if (status != EFI_SUCCESS)
- return status;
-
-get_map:
- status = efi_call_phys5(sys_table->boottime->get_memory_map, &size,
- mem_map, &key, &desc_size, &desc_version);
- if (status == EFI_BUFFER_TOO_SMALL) {
- low_free(_size, (unsigned long)mem_map);
- goto again;
- }
+ unsigned long size;
- if (status != EFI_SUCCESS)
- goto free_mem_map;
+ e820ext->type = SETUP_E820_EXT;
+ e820ext->len = nr_entries * sizeof(struct e820entry);
+ e820ext->next = 0;
- memcpy(&efi->efi_loader_signature, EFI_LOADER_SIGNATURE, sizeof(__u32));
- efi->efi_systab = (unsigned long)sys_table;
- efi->efi_memdesc_size = desc_size;
- efi->efi_memdesc_version = desc_version;
- efi->efi_memmap = (unsigned long)mem_map;
- efi->efi_memmap_size = size;
-
-#ifdef CONFIG_X86_64
- efi->efi_systab_hi = (unsigned long)sys_table >> 32;
- efi->efi_memmap_hi = (unsigned long)mem_map >> 32;
-#endif
+ data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
- /* Might as well exit boot services now */
- status = efi_call_phys2(sys_table->boottime->exit_boot_services,
- handle, key);
- if (status != EFI_SUCCESS) {
- /*
- * ExitBootServices() will fail if any of the event
- * handlers change the memory map. In which case, we
- * must be prepared to retry, but only once so that
- * we're guaranteed to exit on repeated failures instead
- * of spinning forever.
- */
- if (called_exit)
- goto free_mem_map;
+ while (data && data->next)
+ data = (struct setup_data *)(unsigned long)data->next;
- called_exit = true;
- goto get_map;
- }
+ if (data)
+ data->next = (unsigned long)e820ext;
+ else
+ params->hdr.setup_data = (unsigned long)e820ext;
+}
- /* Historic? */
- boot_params->alt_mem_k = 32 * 1024;
+static efi_status_t setup_e820(struct boot_params *params,
+ struct setup_data *e820ext, u32 e820ext_size)
+{
+ struct e820entry *e820_map = &params->e820_map[0];
+ struct efi_info *efi = &params->efi_info;
+ struct e820entry *prev = NULL;
+ u32 nr_entries;
+ u32 nr_desc;
+ int i;
- /*
- * Convert the EFI memory map to E820.
- */
nr_entries = 0;
- for (i = 0; i < size / desc_size; i++) {
+ nr_desc = efi->efi_memmap_size / efi->efi_memdesc_size;
+
+ for (i = 0; i < nr_desc; i++) {
efi_memory_desc_t *d;
unsigned int e820_type = 0;
- unsigned long m = (unsigned long)mem_map;
+ unsigned long m = efi->efi_memmap;
- d = (efi_memory_desc_t *)(m + (i * desc_size));
+ d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size));
switch (d->type) {
case EFI_RESERVED_TYPE:
case EFI_RUNTIME_SERVICES_CODE:
@@ -1095,61 +596,151 @@ get_map:
/* Merge adjacent mappings */
if (prev && prev->type == e820_type &&
- (prev->addr + prev->size) == d->phys_addr)
+ (prev->addr + prev->size) == d->phys_addr) {
prev->size += d->num_pages << 12;
- else {
- e820_map->addr = d->phys_addr;
- e820_map->size = d->num_pages << 12;
- e820_map->type = e820_type;
- prev = e820_map++;
- nr_entries++;
+ continue;
+ }
+
+ if (nr_entries == ARRAY_SIZE(params->e820_map)) {
+ u32 need = (nr_desc - i) * sizeof(struct e820entry) +
+ sizeof(struct setup_data);
+
+ if (!e820ext || e820ext_size < need)
+ return EFI_BUFFER_TOO_SMALL;
+
+ /* boot_params map full, switch to e820 extended */
+ e820_map = (struct e820entry *)e820ext->data;
}
+
+ e820_map->addr = d->phys_addr;
+ e820_map->size = d->num_pages << PAGE_SHIFT;
+ e820_map->type = e820_type;
+ prev = e820_map++;
+ nr_entries++;
}
- boot_params->e820_entries = nr_entries;
+ if (nr_entries > ARRAY_SIZE(params->e820_map)) {
+ u32 nr_e820ext = nr_entries - ARRAY_SIZE(params->e820_map);
+
+ add_e820ext(params, e820ext, nr_e820ext);
+ nr_entries -= nr_e820ext;
+ }
+
+ params->e820_entries = (u8)nr_entries;
return EFI_SUCCESS;
+}
+
+static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
+ u32 *e820ext_size)
+{
+ efi_status_t status;
+ unsigned long size;
+
+ size = sizeof(struct setup_data) +
+ sizeof(struct e820entry) * nr_desc;
+
+ if (*e820ext) {
+ efi_call_phys1(sys_table->boottime->free_pool, *e820ext);
+ *e820ext = NULL;
+ *e820ext_size = 0;
+ }
+
+ status = efi_call_phys3(sys_table->boottime->allocate_pool,
+ EFI_LOADER_DATA, size, e820ext);
+
+ if (status == EFI_SUCCESS)
+ *e820ext_size = size;
-free_mem_map:
- low_free(_size, (unsigned long)mem_map);
return status;
}
-static efi_status_t relocate_kernel(struct setup_header *hdr)
+static efi_status_t exit_boot(struct boot_params *boot_params,
+ void *handle)
{
- unsigned long start, nr_pages;
+ struct efi_info *efi = &boot_params->efi_info;
+ unsigned long map_sz, key, desc_size;
+ efi_memory_desc_t *mem_map;
+ struct setup_data *e820ext;
+ __u32 e820ext_size;
+ __u32 nr_desc, prev_nr_desc;
efi_status_t status;
+ __u32 desc_version;
+ bool called_exit = false;
+ u8 nr_entries;
+ int i;
- /*
- * The EFI firmware loader could have placed the kernel image
- * anywhere in memory, but the kernel has various restrictions
- * on the max physical address it can run at. Attempt to move
- * the kernel to boot_params.pref_address, or as low as
- * possible.
- */
- start = hdr->pref_address;
- nr_pages = round_up(hdr->init_size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+ nr_desc = 0;
+ e820ext = NULL;
+ e820ext_size = 0;
- status = efi_call_phys4(sys_table->boottime->allocate_pages,
- EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
- nr_pages, &start);
- if (status != EFI_SUCCESS) {
- status = low_alloc(hdr->init_size, hdr->kernel_alignment,
- &start);
+get_map:
+ status = efi_get_memory_map(sys_table, &mem_map, &map_sz, &desc_size,
+ &desc_version, &key);
+
+ if (status != EFI_SUCCESS)
+ return status;
+
+ prev_nr_desc = nr_desc;
+ nr_desc = map_sz / desc_size;
+ if (nr_desc > prev_nr_desc &&
+ nr_desc > ARRAY_SIZE(boot_params->e820_map)) {
+ u32 nr_e820ext = nr_desc - ARRAY_SIZE(boot_params->e820_map);
+
+ status = alloc_e820ext(nr_e820ext, &e820ext, &e820ext_size);
if (status != EFI_SUCCESS)
- efi_printk("Failed to alloc mem for kernel\n");
+ goto free_mem_map;
+
+ efi_call_phys1(sys_table->boottime->free_pool, mem_map);
+ goto get_map; /* Allocated memory, get map again */
}
- if (status == EFI_SUCCESS)
- memcpy((void *)start, (void *)(unsigned long)hdr->code32_start,
- hdr->init_size);
+ memcpy(&efi->efi_loader_signature, EFI_LOADER_SIGNATURE, sizeof(__u32));
+ efi->efi_systab = (unsigned long)sys_table;
+ efi->efi_memdesc_size = desc_size;
+ efi->efi_memdesc_version = desc_version;
+ efi->efi_memmap = (unsigned long)mem_map;
+ efi->efi_memmap_size = map_sz;
+
+#ifdef CONFIG_X86_64
+ efi->efi_systab_hi = (unsigned long)sys_table >> 32;
+ efi->efi_memmap_hi = (unsigned long)mem_map >> 32;
+#endif
- hdr->pref_address = hdr->code32_start;
- hdr->code32_start = (__u32)start;
+ /* Might as well exit boot services now */
+ status = efi_call_phys2(sys_table->boottime->exit_boot_services,
+ handle, key);
+ if (status != EFI_SUCCESS) {
+ /*
+ * ExitBootServices() will fail if any of the event
+ * handlers change the memory map. In which case, we
+ * must be prepared to retry, but only once so that
+ * we're guaranteed to exit on repeated failures instead
+ * of spinning forever.
+ */
+ if (called_exit)
+ goto free_mem_map;
+ called_exit = true;
+ efi_call_phys1(sys_table->boottime->free_pool, mem_map);
+ goto get_map;
+ }
+
+ /* Historic? */
+ boot_params->alt_mem_k = 32 * 1024;
+
+ status = setup_e820(boot_params, e820ext, e820ext_size);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ return EFI_SUCCESS;
+
+free_mem_map:
+ efi_call_phys1(sys_table->boottime->free_pool, mem_map);
return status;
}
+
/*
* On success we return a pointer to a boot_params structure, and NULL
* on failure.
@@ -1157,7 +748,7 @@ static efi_status_t relocate_kernel(struct setup_header *hdr)
struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
struct boot_params *boot_params)
{
- struct desc_ptr *gdt, *idt;
+ struct desc_ptr *gdt;
efi_loaded_image_t *image;
struct setup_header *hdr = &boot_params->hdr;
efi_status_t status;
@@ -1177,37 +768,33 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
EFI_LOADER_DATA, sizeof(*gdt),
(void **)&gdt);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to alloc mem for gdt structure\n");
+ efi_printk(sys_table, "Failed to alloc mem for gdt structure\n");
goto fail;
}
gdt->size = 0x800;
- status = low_alloc(gdt->size, 8, (unsigned long *)&gdt->address);
- if (status != EFI_SUCCESS) {
- efi_printk("Failed to alloc mem for gdt\n");
- goto fail;
- }
-
- status = efi_call_phys3(sys_table->boottime->allocate_pool,
- EFI_LOADER_DATA, sizeof(*idt),
- (void **)&idt);
+ status = efi_low_alloc(sys_table, gdt->size, 8,
+ (unsigned long *)&gdt->address);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to alloc mem for idt structure\n");
+ efi_printk(sys_table, "Failed to alloc mem for gdt\n");
goto fail;
}
- idt->size = 0;
- idt->address = 0;
-
/*
* If the kernel isn't already loaded at the preferred load
* address, relocate it.
*/
if (hdr->pref_address != hdr->code32_start) {
- status = relocate_kernel(hdr);
-
+ unsigned long bzimage_addr = hdr->code32_start;
+ status = efi_relocate_kernel(sys_table, &bzimage_addr,
+ hdr->init_size, hdr->init_size,
+ hdr->pref_address,
+ hdr->kernel_alignment);
if (status != EFI_SUCCESS)
goto fail;
+
+ hdr->pref_address = hdr->code32_start;
+ hdr->code32_start = bzimage_addr;
}
status = exit_boot(boot_params, handle);
@@ -1267,10 +854,8 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
desc->base2 = 0x00;
#endif /* CONFIG_X86_64 */
- asm volatile ("lidt %0" : : "m" (*idt));
- asm volatile ("lgdt %0" : : "m" (*gdt));
-
asm volatile("cli");
+ asm volatile ("lgdt %0" : : "m" (*gdt));
return boot_params;
fail:
diff --git a/arch/x86/boot/compressed/eboot.h b/arch/x86/boot/compressed/eboot.h
index e5b0a8f91c5f..81b6b652b46a 100644
--- a/arch/x86/boot/compressed/eboot.h
+++ b/arch/x86/boot/compressed/eboot.h
@@ -11,9 +11,6 @@
#define DESC_TYPE_CODE_DATA (1 << 0)
-#define EFI_PAGE_SIZE (1UL << EFI_PAGE_SHIFT)
-#define EFI_READ_CHUNK_SIZE (1024 * 1024)
-
#define EFI_CONSOLE_OUT_DEVICE_GUID \
EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x0, 0x90, 0x27, \
0x3f, 0xc1, 0x4d)
@@ -62,10 +59,4 @@ struct efi_uga_draw_protocol {
void *blt;
};
-struct efi_simple_text_output_protocol {
- void *reset;
- void *output_string;
- void *test_string;
-};
-
#endif /* BOOT_COMPRESSED_EBOOT_H */
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 5d6f6891b188..9116aac232c7 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -117,9 +117,11 @@ preferred_addr:
addl %eax, %ebx
notl %eax
andl %eax, %ebx
-#else
- movl $LOAD_PHYSICAL_ADDR, %ebx
+ cmpl $LOAD_PHYSICAL_ADDR, %ebx
+ jge 1f
#endif
+ movl $LOAD_PHYSICAL_ADDR, %ebx
+1:
/* Target address to relocate to for decompression */
addl $z_extract_offset, %ebx
@@ -191,14 +193,14 @@ relocated:
leal boot_heap(%ebx), %eax
pushl %eax /* heap area */
pushl %esi /* real mode pointer */
- call decompress_kernel
+ call decompress_kernel /* returns kernel location in %eax */
addl $24, %esp
/*
* Jump to the decompressed kernel.
*/
xorl %ebx, %ebx
- jmp *%ebp
+ jmp *%eax
/*
* Stack and heap for uncompression
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index c337422b575d..c5c1ae0997e7 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -94,9 +94,11 @@ ENTRY(startup_32)
addl %eax, %ebx
notl %eax
andl %eax, %ebx
-#else
- movl $LOAD_PHYSICAL_ADDR, %ebx
+ cmpl $LOAD_PHYSICAL_ADDR, %ebx
+ jge 1f
#endif
+ movl $LOAD_PHYSICAL_ADDR, %ebx
+1:
/* Target address to relocate to for decompression */
addl $z_extract_offset, %ebx
@@ -269,9 +271,11 @@ preferred_addr:
addq %rax, %rbp
notq %rax
andq %rax, %rbp
-#else
- movq $LOAD_PHYSICAL_ADDR, %rbp
+ cmpq $LOAD_PHYSICAL_ADDR, %rbp
+ jge 1f
#endif
+ movq $LOAD_PHYSICAL_ADDR, %rbp
+1:
/* Target address to relocate to for decompression */
leaq z_extract_offset(%rbp), %rbx
@@ -339,13 +343,13 @@ relocated:
movl $z_input_len, %ecx /* input_len */
movq %rbp, %r8 /* output target address */
movq $z_output_len, %r9 /* decompressed length */
- call decompress_kernel
+ call decompress_kernel /* returns kernel location in %rax */
popq %rsi
/*
* Jump to the decompressed kernel.
*/
- jmp *%rbp
+ jmp *%rax
.code32
no_longmode:
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 434f077d2c4d..196eaf373a06 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -112,14 +112,8 @@ struct boot_params *real_mode; /* Pointer to real-mode data */
void *memset(void *s, int c, size_t n);
void *memcpy(void *dest, const void *src, size_t n);
-#ifdef CONFIG_X86_64
-#define memptr long
-#else
-#define memptr unsigned
-#endif
-
-static memptr free_mem_ptr;
-static memptr free_mem_end_ptr;
+memptr free_mem_ptr;
+memptr free_mem_end_ptr;
static char *vidmem;
static int vidport;
@@ -395,7 +389,7 @@ static void parse_elf(void *output)
free(phdrs);
}
-asmlinkage void decompress_kernel(void *rmode, memptr heap,
+asmlinkage void *decompress_kernel(void *rmode, memptr heap,
unsigned char *input_data,
unsigned long input_len,
unsigned char *output,
@@ -422,6 +416,10 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
free_mem_ptr = heap; /* Heap */
free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
+ output = choose_kernel_location(input_data, input_len,
+ output, output_len);
+
+ /* Validate memory location choices. */
if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1))
error("Destination address inappropriately aligned");
#ifdef CONFIG_X86_64
@@ -441,5 +439,5 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
parse_elf(output);
handle_relocations(output, output_len);
debug_putstr("done.\nBooting the kernel.\n");
- return;
+ return output;
}
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 674019d8e235..24e3e569a13c 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -23,7 +23,15 @@
#define BOOT_BOOT_H
#include "../ctype.h"
+#ifdef CONFIG_X86_64
+#define memptr long
+#else
+#define memptr unsigned
+#endif
+
/* misc.c */
+extern memptr free_mem_ptr;
+extern memptr free_mem_end_ptr;
extern struct boot_params *real_mode; /* Pointer to real-mode data */
void __putstr(const char *s);
#define error_putstr(__x) __putstr(__x)
@@ -39,23 +47,40 @@ static inline void debug_putstr(const char *s)
#endif
-#ifdef CONFIG_EARLY_PRINTK
-
+#if CONFIG_EARLY_PRINTK || CONFIG_RANDOMIZE_BASE
/* cmdline.c */
int cmdline_find_option(const char *option, char *buffer, int bufsize);
int cmdline_find_option_bool(const char *option);
+#endif
-/* early_serial_console.c */
-extern int early_serial_base;
-void console_init(void);
+#if CONFIG_RANDOMIZE_BASE
+/* aslr.c */
+unsigned char *choose_kernel_location(unsigned char *input,
+ unsigned long input_size,
+ unsigned char *output,
+ unsigned long output_size);
+/* cpuflags.c */
+bool has_cpuflag(int flag);
#else
+static inline
+unsigned char *choose_kernel_location(unsigned char *input,
+ unsigned long input_size,
+ unsigned char *output,
+ unsigned long output_size)
+{
+ return output;
+}
+#endif
+#ifdef CONFIG_EARLY_PRINTK
/* early_serial_console.c */
+extern int early_serial_base;
+void console_init(void);
+#else
static const int early_serial_base;
static inline void console_init(void)
{ }
-
#endif
#endif
diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
index 958a641483dd..b669ab65bf6c 100644
--- a/arch/x86/boot/compressed/mkpiggy.c
+++ b/arch/x86/boot/compressed/mkpiggy.c
@@ -36,11 +36,12 @@ int main(int argc, char *argv[])
uint32_t olen;
long ilen;
unsigned long offs;
- FILE *f;
+ FILE *f = NULL;
+ int retval = 1;
if (argc < 2) {
fprintf(stderr, "Usage: %s compressed_file\n", argv[0]);
- return 1;
+ goto bail;
}
/* Get the information for the compressed kernel image first */
@@ -48,7 +49,7 @@ int main(int argc, char *argv[])
f = fopen(argv[1], "r");
if (!f) {
perror(argv[1]);
- return 1;
+ goto bail;
}
@@ -58,12 +59,11 @@ int main(int argc, char *argv[])
if (fread(&olen, sizeof(olen), 1, f) != 1) {
perror(argv[1]);
- return 1;
+ goto bail;
}
ilen = ftell(f);
olen = get_unaligned_le32(&olen);
- fclose(f);
/*
* Now we have the input (compressed) and output (uncompressed)
@@ -91,5 +91,9 @@ int main(int argc, char *argv[])
printf(".incbin \"%s\"\n", argv[1]);
printf("input_data_end:\n");
- return 0;
+ retval = 0;
+bail:
+ if (f)
+ fclose(f);
+ return retval;
}
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
index 4d3ff037201f..100a9a10076a 100644
--- a/arch/x86/boot/cpucheck.c
+++ b/arch/x86/boot/cpucheck.c
@@ -28,8 +28,6 @@
#include <asm/required-features.h>
#include <asm/msr-index.h>
-struct cpu_features cpu;
-static u32 cpu_vendor[3];
static u32 err_flags[NCAPINTS];
static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY;
@@ -69,92 +67,8 @@ static int is_transmeta(void)
cpu_vendor[2] == A32('M', 'x', '8', '6');
}
-static int has_fpu(void)
-{
- u16 fcw = -1, fsw = -1;
- u32 cr0;
-
- asm("movl %%cr0,%0" : "=r" (cr0));
- if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
- cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
- asm volatile("movl %0,%%cr0" : : "r" (cr0));
- }
-
- asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
- : "+m" (fsw), "+m" (fcw));
-
- return fsw == 0 && (fcw & 0x103f) == 0x003f;
-}
-
-static int has_eflag(u32 mask)
-{
- u32 f0, f1;
-
- asm("pushfl ; "
- "pushfl ; "
- "popl %0 ; "
- "movl %0,%1 ; "
- "xorl %2,%1 ; "
- "pushl %1 ; "
- "popfl ; "
- "pushfl ; "
- "popl %1 ; "
- "popfl"
- : "=&r" (f0), "=&r" (f1)
- : "ri" (mask));
-
- return !!((f0^f1) & mask);
-}
-
-static void get_flags(void)
-{
- u32 max_intel_level, max_amd_level;
- u32 tfms;
-
- if (has_fpu())
- set_bit(X86_FEATURE_FPU, cpu.flags);
-
- if (has_eflag(X86_EFLAGS_ID)) {
- asm("cpuid"
- : "=a" (max_intel_level),
- "=b" (cpu_vendor[0]),
- "=d" (cpu_vendor[1]),
- "=c" (cpu_vendor[2])
- : "a" (0));
-
- if (max_intel_level >= 0x00000001 &&
- max_intel_level <= 0x0000ffff) {
- asm("cpuid"
- : "=a" (tfms),
- "=c" (cpu.flags[4]),
- "=d" (cpu.flags[0])
- : "a" (0x00000001)
- : "ebx");
- cpu.level = (tfms >> 8) & 15;
- cpu.model = (tfms >> 4) & 15;
- if (cpu.level >= 6)
- cpu.model += ((tfms >> 16) & 0xf) << 4;
- }
-
- asm("cpuid"
- : "=a" (max_amd_level)
- : "a" (0x80000000)
- : "ebx", "ecx", "edx");
-
- if (max_amd_level >= 0x80000001 &&
- max_amd_level <= 0x8000ffff) {
- u32 eax = 0x80000001;
- asm("cpuid"
- : "+a" (eax),
- "=c" (cpu.flags[6]),
- "=d" (cpu.flags[1])
- : : "ebx");
- }
- }
-}
-
/* Returns a bitmask of which words we have error bits in */
-static int check_flags(void)
+static int check_cpuflags(void)
{
u32 err;
int i;
@@ -187,8 +101,8 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
if (has_eflag(X86_EFLAGS_AC))
cpu.level = 4;
- get_flags();
- err = check_flags();
+ get_cpuflags();
+ err = check_cpuflags();
if (test_bit(X86_FEATURE_LM, cpu.flags))
cpu.level = 64;
@@ -207,8 +121,8 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
eax &= ~(1 << 15);
asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
- get_flags(); /* Make sure it really did something */
- err = check_flags();
+ get_cpuflags(); /* Make sure it really did something */
+ err = check_cpuflags();
} else if (err == 0x01 &&
!(err_flags[0] & ~(1 << X86_FEATURE_CX8)) &&
is_centaur() && cpu.model >= 6) {
@@ -223,7 +137,7 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
set_bit(X86_FEATURE_CX8, cpu.flags);
- err = check_flags();
+ err = check_cpuflags();
} else if (err == 0x01 && is_transmeta()) {
/* Transmeta might have masked feature bits in word 0 */
@@ -238,7 +152,7 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
: : "ecx", "ebx");
asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
- err = check_flags();
+ err = check_cpuflags();
}
if (err_flags_ptr)
diff --git a/arch/x86/boot/cpuflags.c b/arch/x86/boot/cpuflags.c
new file mode 100644
index 000000000000..a9fcb7cfb241
--- /dev/null
+++ b/arch/x86/boot/cpuflags.c
@@ -0,0 +1,104 @@
+#include <linux/types.h>
+#include "bitops.h"
+
+#include <asm/processor-flags.h>
+#include <asm/required-features.h>
+#include <asm/msr-index.h>
+#include "cpuflags.h"
+
+struct cpu_features cpu;
+u32 cpu_vendor[3];
+
+static bool loaded_flags;
+
+static int has_fpu(void)
+{
+ u16 fcw = -1, fsw = -1;
+ unsigned long cr0;
+
+ asm volatile("mov %%cr0,%0" : "=r" (cr0));
+ if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
+ cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
+ asm volatile("mov %0,%%cr0" : : "r" (cr0));
+ }
+
+ asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
+ : "+m" (fsw), "+m" (fcw));
+
+ return fsw == 0 && (fcw & 0x103f) == 0x003f;
+}
+
+int has_eflag(unsigned long mask)
+{
+ unsigned long f0, f1;
+
+ asm volatile("pushf \n\t"
+ "pushf \n\t"
+ "pop %0 \n\t"
+ "mov %0,%1 \n\t"
+ "xor %2,%1 \n\t"
+ "push %1 \n\t"
+ "popf \n\t"
+ "pushf \n\t"
+ "pop %1 \n\t"
+ "popf"
+ : "=&r" (f0), "=&r" (f1)
+ : "ri" (mask));
+
+ return !!((f0^f1) & mask);
+}
+
+/* Handle x86_32 PIC using ebx. */
+#if defined(__i386__) && defined(__PIC__)
+# define EBX_REG "=r"
+#else
+# define EBX_REG "=b"
+#endif
+
+static inline void cpuid(u32 id, u32 *a, u32 *b, u32 *c, u32 *d)
+{
+ asm volatile(".ifnc %%ebx,%3 ; movl %%ebx,%3 ; .endif \n\t"
+ "cpuid \n\t"
+ ".ifnc %%ebx,%3 ; xchgl %%ebx,%3 ; .endif \n\t"
+ : "=a" (*a), "=c" (*c), "=d" (*d), EBX_REG (*b)
+ : "a" (id)
+ );
+}
+
+void get_cpuflags(void)
+{
+ u32 max_intel_level, max_amd_level;
+ u32 tfms;
+ u32 ignored;
+
+ if (loaded_flags)
+ return;
+ loaded_flags = true;
+
+ if (has_fpu())
+ set_bit(X86_FEATURE_FPU, cpu.flags);
+
+ if (has_eflag(X86_EFLAGS_ID)) {
+ cpuid(0x0, &max_intel_level, &cpu_vendor[0], &cpu_vendor[2],
+ &cpu_vendor[1]);
+
+ if (max_intel_level >= 0x00000001 &&
+ max_intel_level <= 0x0000ffff) {
+ cpuid(0x1, &tfms, &ignored, &cpu.flags[4],
+ &cpu.flags[0]);
+ cpu.level = (tfms >> 8) & 15;
+ cpu.model = (tfms >> 4) & 15;
+ if (cpu.level >= 6)
+ cpu.model += ((tfms >> 16) & 0xf) << 4;
+ }
+
+ cpuid(0x80000000, &max_amd_level, &ignored, &ignored,
+ &ignored);
+
+ if (max_amd_level >= 0x80000001 &&
+ max_amd_level <= 0x8000ffff) {
+ cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
+ &cpu.flags[1]);
+ }
+ }
+}
diff --git a/arch/x86/boot/cpuflags.h b/arch/x86/boot/cpuflags.h
new file mode 100644
index 000000000000..ea97697e51e4
--- /dev/null
+++ b/arch/x86/boot/cpuflags.h
@@ -0,0 +1,19 @@
+#ifndef BOOT_CPUFLAGS_H
+#define BOOT_CPUFLAGS_H
+
+#include <asm/cpufeature.h>
+#include <asm/processor-flags.h>
+
+struct cpu_features {
+ int level; /* Family, or 64 for x86-64 */
+ int model;
+ u32 flags[NCAPINTS];
+};
+
+extern struct cpu_features cpu;
+extern u32 cpu_vendor[3];
+
+int has_eflag(unsigned long mask);
+void get_cpuflags(void);
+
+#endif
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index c941d6a8887f..8e15b22391fc 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -5,14 +5,15 @@
*/
/*
- * This file builds a disk-image from two different files:
+ * This file builds a disk-image from three different files:
*
* - setup: 8086 machine code, sets up system parm
* - system: 80386 code for actual system
+ * - zoffset.h: header with ZO_* defines
*
- * It does some checking that all files are of the correct type, and
- * just writes the result to stdout, removing headers and padding to
- * the right amount. It also writes some system data to stderr.
+ * It does some checking that all files are of the correct type, and writes
+ * the result to the specified destination, removing headers and padding to
+ * the right amount. It also writes some system data to stdout.
*/
/*
@@ -136,7 +137,7 @@ static void die(const char * str, ...)
static void usage(void)
{
- die("Usage: build setup system [zoffset.h] [> image]");
+ die("Usage: build setup system zoffset.h image");
}
#ifdef CONFIG_EFI_STUB
@@ -265,7 +266,7 @@ int main(int argc, char ** argv)
int c;
u32 sys_size;
struct stat sb;
- FILE *file;
+ FILE *file, *dest;
int fd;
void *kernel;
u32 crc = 0xffffffffUL;
@@ -280,10 +281,13 @@ int main(int argc, char ** argv)
startup_64 = 0x200;
#endif
- if (argc == 4)
- parse_zoffset(argv[3]);
- else if (argc != 3)
+ if (argc != 5)
usage();
+ parse_zoffset(argv[3]);
+
+ dest = fopen(argv[4], "w");
+ if (!dest)
+ die("Unable to write `%s': %m", argv[4]);
/* Copy the setup code */
file = fopen(argv[1], "r");
@@ -318,7 +322,7 @@ int main(int argc, char ** argv)
/* Set the default root device */
put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]);
- fprintf(stderr, "Setup is %d bytes (padded to %d bytes).\n", c, i);
+ printf("Setup is %d bytes (padded to %d bytes).\n", c, i);
/* Open and stat the kernel file */
fd = open(argv[2], O_RDONLY);
@@ -327,7 +331,7 @@ int main(int argc, char ** argv)
if (fstat(fd, &sb))
die("Unable to stat `%s': %m", argv[2]);
sz = sb.st_size;
- fprintf (stderr, "System is %d kB\n", (sz+1023)/1024);
+ printf("System is %d kB\n", (sz+1023)/1024);
kernel = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0);
if (kernel == MAP_FAILED)
die("Unable to mmap '%s': %m", argv[2]);
@@ -348,27 +352,31 @@ int main(int argc, char ** argv)
#endif
crc = partial_crc32(buf, i, crc);
- if (fwrite(buf, 1, i, stdout) != i)
+ if (fwrite(buf, 1, i, dest) != i)
die("Writing setup failed");
/* Copy the kernel code */
crc = partial_crc32(kernel, sz, crc);
- if (fwrite(kernel, 1, sz, stdout) != sz)
+ if (fwrite(kernel, 1, sz, dest) != sz)
die("Writing kernel failed");
/* Add padding leaving 4 bytes for the checksum */
while (sz++ < (sys_size*16) - 4) {
crc = partial_crc32_one('\0', crc);
- if (fwrite("\0", 1, 1, stdout) != 1)
+ if (fwrite("\0", 1, 1, dest) != 1)
die("Writing padding failed");
}
/* Write the CRC */
- fprintf(stderr, "CRC %x\n", crc);
+ printf("CRC %x\n", crc);
put_unaligned_le32(crc, buf);
- if (fwrite(buf, 1, 4, stdout) != 4)
+ if (fwrite(buf, 1, 4, dest) != 4)
die("Writing CRC failed");
+ /* Catch any delayed write failures */
+ if (fclose(dest))
+ die("Writing image failed");
+
close(fd);
/* Everything is OK */
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 7d6ba9db1be9..e0fc24db234a 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -3,8 +3,9 @@
#
avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no)
+avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
+ $(comma)4)$(comma)%ymm2,yes,no)
-obj-$(CONFIG_CRYPTO_ABLK_HELPER_X86) += ablk_helper.o
obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o
obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index f80e668785c0..835488b745ee 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -34,7 +34,7 @@
#include <asm/cpu_device_id.h>
#include <asm/i387.h>
#include <asm/crypto/aes.h>
-#include <asm/crypto/ablk_helper.h>
+#include <crypto/ablk_helper.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h>
#include <linux/workqueue.h>
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
index 414fe5d7946b..4209a76fcdaa 100644
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <crypto/ctr.h>
#include <crypto/lrw.h>
@@ -21,7 +22,6 @@
#include <asm/xcr.h>
#include <asm/xsave.h>
#include <asm/crypto/camellia.h>
-#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h>
#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index 37fd0c0a81ea..87a041a10f4a 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <crypto/ctr.h>
#include <crypto/lrw.h>
@@ -21,7 +22,6 @@
#include <asm/xcr.h>
#include <asm/xsave.h>
#include <asm/crypto/camellia.h>
-#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h>
#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index c6631813dc11..e6a3700489b9 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -26,13 +26,13 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <crypto/cast5.h>
#include <crypto/cryptd.h>
#include <crypto/ctr.h>
#include <asm/xcr.h>
#include <asm/xsave.h>
-#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h>
#define CAST5_PARALLEL_BLOCKS 16
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
index 8d0dfb86a559..09f3677393e4 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <crypto/cast6.h>
#include <crypto/cryptd.h>
@@ -37,7 +38,6 @@
#include <crypto/xts.h>
#include <asm/xcr.h>
#include <asm/xsave.h>
-#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h>
#define CAST6_PARALLEL_BLOCKS 8
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index 23aabc6c20a5..2fae489b1524 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <crypto/ctr.h>
#include <crypto/lrw.h>
@@ -22,7 +23,6 @@
#include <asm/xcr.h>
#include <asm/xsave.h>
#include <asm/crypto/serpent-avx.h>
-#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h>
#define SERPENT_AVX2_PARALLEL_BLOCKS 16
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index 9ae83cf8d21e..ff4870870972 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <crypto/serpent.h>
#include <crypto/cryptd.h>
@@ -38,7 +39,6 @@
#include <asm/xcr.h>
#include <asm/xsave.h>
#include <asm/crypto/serpent-avx.h>
-#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h>
/* 8-way parallel cipher functions */
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index 97a356ece24d..8c95f8637306 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -34,6 +34,7 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <crypto/serpent.h>
#include <crypto/cryptd.h>
@@ -42,7 +43,6 @@
#include <crypto/lrw.h>
#include <crypto/xts.h>
#include <asm/crypto/serpent-sse2.h>
-#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h>
static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 50226c4b86ed..f248546da1ca 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -281,7 +281,7 @@ static int __init sha256_ssse3_mod_init(void)
/* allow AVX to override SSSE3, it's a little faster */
if (avx_usable()) {
#ifdef CONFIG_AS_AVX2
- if (boot_cpu_has(X86_FEATURE_AVX2))
+ if (boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI2))
sha256_transform_asm = sha256_transform_rorx;
else
#endif
@@ -319,4 +319,4 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
MODULE_ALIAS("sha256");
-MODULE_ALIAS("sha384");
+MODULE_ALIAS("sha224");
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
index a62ba541884e..4e3c665be129 100644
--- a/arch/x86/crypto/twofish_avx_glue.c
+++ b/arch/x86/crypto/twofish_avx_glue.c
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <crypto/twofish.h>
#include <crypto/cryptd.h>
@@ -39,7 +40,6 @@
#include <asm/xcr.h>
#include <asm/xsave.h>
#include <asm/crypto/twofish.h>
-#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h>
#include <crypto/scatterwalk.h>
#include <linux/workqueue.h>
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index b1977bad5435..c8c1e700c26e 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -26,6 +26,7 @@
#include <acpi/pdc_intel.h>
#include <asm/numa.h>
+#include <asm/fixmap.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mpspec.h>
diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h
index 0d9ec770f2f8..e6a92455740e 100644
--- a/arch/x86/include/asm/archrandom.h
+++ b/arch/x86/include/asm/archrandom.h
@@ -39,6 +39,20 @@
#ifdef CONFIG_ARCH_RANDOM
+/* Instead of arch_get_random_long() when alternatives haven't run. */
+static inline int rdrand_long(unsigned long *v)
+{
+ int ok;
+ asm volatile("1: " RDRAND_LONG "\n\t"
+ "jc 2f\n\t"
+ "decl %0\n\t"
+ "jnz 1b\n\t"
+ "2:"
+ : "=r" (ok), "=a" (*v)
+ : "0" (RDRAND_RETRY_LOOPS));
+ return ok;
+}
+
#define GET_RANDOM(name, type, rdrand, nop) \
static inline int name(type *v) \
{ \
@@ -68,6 +82,13 @@ GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3);
#endif /* CONFIG_X86_64 */
+#else
+
+static inline int rdrand_long(unsigned long *v)
+{
+ return 0;
+}
+
#endif /* CONFIG_ARCH_RANDOM */
extern void x86_init_rdrand(struct cpuinfo_x86 *c);
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 722aa3b04624..da31c8b8a92d 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -6,6 +6,7 @@
#include <asm/processor.h>
#include <asm/alternative.h>
#include <asm/cmpxchg.h>
+#include <asm/rmwcc.h>
/*
* Atomic operations that C can't guarantee us. Useful for
@@ -76,12 +77,7 @@ static inline void atomic_sub(int i, atomic_t *v)
*/
static inline int atomic_sub_and_test(int i, atomic_t *v)
{
- unsigned char c;
-
- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
- : "+m" (v->counter), "=qm" (c)
- : "ir" (i) : "memory");
- return c;
+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, i, "%0", "e");
}
/**
@@ -118,12 +114,7 @@ static inline void atomic_dec(atomic_t *v)
*/
static inline int atomic_dec_and_test(atomic_t *v)
{
- unsigned char c;
-
- asm volatile(LOCK_PREFIX "decl %0; sete %1"
- : "+m" (v->counter), "=qm" (c)
- : : "memory");
- return c != 0;
+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
}
/**
@@ -136,12 +127,7 @@ static inline int atomic_dec_and_test(atomic_t *v)
*/
static inline int atomic_inc_and_test(atomic_t *v)
{
- unsigned char c;
-
- asm volatile(LOCK_PREFIX "incl %0; sete %1"
- : "+m" (v->counter), "=qm" (c)
- : : "memory");
- return c != 0;
+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
}
/**
@@ -155,12 +141,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
*/
static inline int atomic_add_negative(int i, atomic_t *v)
{
- unsigned char c;
-
- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
- : "+m" (v->counter), "=qm" (c)
- : "ir" (i) : "memory");
- return c;
+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, i, "%0", "s");
}
/**
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 0e1cbfc8ee06..3f065c985aee 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -72,12 +72,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
*/
static inline int atomic64_sub_and_test(long i, atomic64_t *v)
{
- unsigned char c;
-
- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
- : "=m" (v->counter), "=qm" (c)
- : "er" (i), "m" (v->counter) : "memory");
- return c;
+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, i, "%0", "e");
}
/**
@@ -116,12 +111,7 @@ static inline void atomic64_dec(atomic64_t *v)
*/
static inline int atomic64_dec_and_test(atomic64_t *v)
{
- unsigned char c;
-
- asm volatile(LOCK_PREFIX "decq %0; sete %1"
- : "=m" (v->counter), "=qm" (c)
- : "m" (v->counter) : "memory");
- return c != 0;
+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
}
/**
@@ -134,12 +124,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
*/
static inline int atomic64_inc_and_test(atomic64_t *v)
{
- unsigned char c;
-
- asm volatile(LOCK_PREFIX "incq %0; sete %1"
- : "=m" (v->counter), "=qm" (c)
- : "m" (v->counter) : "memory");
- return c != 0;
+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
}
/**
@@ -153,12 +138,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
*/
static inline int atomic64_add_negative(long i, atomic64_t *v)
{
- unsigned char c;
-
- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
- : "=m" (v->counter), "=qm" (c)
- : "er" (i), "m" (v->counter) : "memory");
- return c;
+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, i, "%0", "s");
}
/**
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 41639ce8fd63..6d76d0935989 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -14,6 +14,7 @@
#include <linux/compiler.h>
#include <asm/alternative.h>
+#include <asm/rmwcc.h>
#if BITS_PER_LONG == 32
# define _BITOPS_LONG_SHIFT 5
@@ -204,12 +205,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
*/
static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
{
- int oldbit;
-
- asm volatile(LOCK_PREFIX "bts %2,%1\n\t"
- "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
-
- return oldbit;
+ GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, nr, "%0", "c");
}
/**
@@ -255,13 +251,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
*/
static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- int oldbit;
-
- asm volatile(LOCK_PREFIX "btr %2,%1\n\t"
- "sbb %0,%0"
- : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
-
- return oldbit;
+ GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, nr, "%0", "c");
}
/**
@@ -314,13 +304,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
*/
static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
{
- int oldbit;
-
- asm volatile(LOCK_PREFIX "btc %2,%1\n\t"
- "sbb %0,%0"
- : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
-
- return oldbit;
+ GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, nr, "%0", "c");
}
static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index 0fa675033912..cb4c73bfeb48 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -48,6 +48,8 @@ For 32-bit we have the following conventions - kernel is built with
#include <asm/dwarf2.h>
+#ifdef CONFIG_X86_64
+
/*
* 64-bit system call stack frame layout defines and helpers,
* for assembly code:
@@ -192,3 +194,51 @@ For 32-bit we have the following conventions - kernel is built with
.macro icebp
.byte 0xf1
.endm
+
+#else /* CONFIG_X86_64 */
+
+/*
+ * For 32bit only simplified versions of SAVE_ALL/RESTORE_ALL. These
+ * are different from the entry_32.S versions in not changing the segment
+ * registers. So only suitable for in kernel use, not when transitioning
+ * from or to user space. The resulting stack frame is not a standard
+ * pt_regs frame. The main use case is calling C code from assembler
+ * when all the registers need to be preserved.
+ */
+
+ .macro SAVE_ALL
+ pushl_cfi %eax
+ CFI_REL_OFFSET eax, 0
+ pushl_cfi %ebp
+ CFI_REL_OFFSET ebp, 0
+ pushl_cfi %edi
+ CFI_REL_OFFSET edi, 0
+ pushl_cfi %esi
+ CFI_REL_OFFSET esi, 0
+ pushl_cfi %edx
+ CFI_REL_OFFSET edx, 0
+ pushl_cfi %ecx
+ CFI_REL_OFFSET ecx, 0
+ pushl_cfi %ebx
+ CFI_REL_OFFSET ebx, 0
+ .endm
+
+ .macro RESTORE_ALL
+ popl_cfi %ebx
+ CFI_RESTORE ebx
+ popl_cfi %ecx
+ CFI_RESTORE ecx
+ popl_cfi %edx
+ CFI_RESTORE edx
+ popl_cfi %esi
+ CFI_RESTORE esi
+ popl_cfi %edi
+ CFI_RESTORE edi
+ popl_cfi %ebp
+ CFI_RESTORE ebp
+ popl_cfi %eax
+ CFI_RESTORE eax
+ .endm
+
+#endif /* CONFIG_X86_64 */
+
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 0062a0125041..65c6e6e3a552 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -109,6 +109,8 @@ static inline bool efi_is_native(void)
return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
}
+extern struct console early_efi_console;
+
#else
/*
* IF EFI is not configured, have the EFI calls return -ENOSYS.
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h
new file mode 100644
index 000000000000..459769d39263
--- /dev/null
+++ b/arch/x86/include/asm/intel-mid.h
@@ -0,0 +1,113 @@
+/*
+ * intel-mid.h: Intel MID specific setup code
+ *
+ * (C) Copyright 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _ASM_X86_INTEL_MID_H
+#define _ASM_X86_INTEL_MID_H
+
+#include <linux/sfi.h>
+#include <linux/platform_device.h>
+
+extern int intel_mid_pci_init(void);
+extern int get_gpio_by_name(const char *name);
+extern void intel_scu_device_register(struct platform_device *pdev);
+extern int __init sfi_parse_mrtc(struct sfi_table_header *table);
+extern int __init sfi_parse_mtmr(struct sfi_table_header *table);
+extern int sfi_mrtc_num;
+extern struct sfi_rtc_table_entry sfi_mrtc_array[];
+
+/*
+ * Here defines the array of devices platform data that IAFW would export
+ * through SFI "DEVS" table, we use name and type to match the device and
+ * its platform data.
+ */
+struct devs_id {
+ char name[SFI_NAME_LEN + 1];
+ u8 type;
+ u8 delay;
+ void *(*get_platform_data)(void *info);
+ /* Custom handler for devices */
+ void (*device_handler)(struct sfi_device_table_entry *pentry,
+ struct devs_id *dev);
+};
+
+#define sfi_device(i) \
+ static const struct devs_id *const __intel_mid_sfi_##i##_dev __used \
+ __attribute__((__section__(".x86_intel_mid_dev.init"))) = &i
+
+/*
+ * Medfield is the follow-up of Moorestown, it combines two chip solution into
+ * one. Other than that it also added always-on and constant tsc and lapic
+ * timers. Medfield is the platform name, and the chip name is called Penwell
+ * we treat Medfield/Penwell as a variant of Moorestown. Penwell can be
+ * identified via MSRs.
+ */
+enum intel_mid_cpu_type {
+ /* 1 was Moorestown */
+ INTEL_MID_CPU_CHIP_PENWELL = 2,
+};
+
+extern enum intel_mid_cpu_type __intel_mid_cpu_chip;
+
+#ifdef CONFIG_X86_INTEL_MID
+
+static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void)
+{
+ return __intel_mid_cpu_chip;
+}
+
+static inline bool intel_mid_has_msic(void)
+{
+ return (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL);
+}
+
+#else /* !CONFIG_X86_INTEL_MID */
+
+#define intel_mid_identify_cpu() (0)
+#define intel_mid_has_msic() (0)
+
+#endif /* !CONFIG_X86_INTEL_MID */
+
+enum intel_mid_timer_options {
+ INTEL_MID_TIMER_DEFAULT,
+ INTEL_MID_TIMER_APBT_ONLY,
+ INTEL_MID_TIMER_LAPIC_APBT,
+};
+
+extern enum intel_mid_timer_options intel_mid_timer_options;
+
+/*
+ * Penwell uses spread spectrum clock, so the freq number is not exactly
+ * the same as reported by MSR based on SDM.
+ */
+#define PENWELL_FSB_FREQ_83SKU 83200
+#define PENWELL_FSB_FREQ_100SKU 99840
+
+#define SFI_MTMR_MAX_NUM 8
+#define SFI_MRTC_MAX 8
+
+extern struct console early_mrst_console;
+extern void mrst_early_console_init(void);
+
+extern struct console early_hsu_console;
+extern void hsu_early_console_init(const char *);
+
+extern void intel_scu_devices_create(void);
+extern void intel_scu_devices_destroy(void);
+
+/* VRTC timer */
+#define MRST_VRTC_MAP_SZ (1024)
+/*#define MRST_VRTC_PGOFFSET (0xc00) */
+
+extern void intel_mid_rtc_init(void);
+
+/* the offset for the mapping of global gpio pin to irq */
+#define INTEL_MID_IRQ_OFFSET 0x100
+
+#endif /* _ASM_X86_INTEL_MID_H */
diff --git a/arch/x86/include/asm/mrst-vrtc.h b/arch/x86/include/asm/intel_mid_vrtc.h
index 1e69a75412a4..86ff4685c409 100644
--- a/arch/x86/include/asm/mrst-vrtc.h
+++ b/arch/x86/include/asm/intel_mid_vrtc.h
@@ -1,5 +1,5 @@
-#ifndef _MRST_VRTC_H
-#define _MRST_VRTC_H
+#ifndef _INTEL_MID_VRTC_H
+#define _INTEL_MID_VRTC_H
extern unsigned char vrtc_cmos_read(unsigned char reg);
extern void vrtc_cmos_write(unsigned char val, unsigned char reg);
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index 2c37aadcbc35..32ce71375b21 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -21,7 +21,7 @@ enum die_val {
DIE_NMIUNKNOWN,
};
-extern void printk_address(unsigned long address, int reliable);
+extern void printk_address(unsigned long address);
extern void die(const char *, struct pt_regs *,long);
extern int __must_check __die(const char *, struct pt_regs *, long);
extern void show_trace(struct task_struct *t, struct pt_regs *regs,
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 15f960c06ff7..24ec1216596e 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -274,13 +274,17 @@ struct x86_emulate_ctxt {
bool guest_mode; /* guest running a nested guest */
bool perm_ok; /* do not check permissions if true */
- bool only_vendor_specific_insn;
+ bool ud; /* inject an #UD if host doesn't support insn */
bool have_exception;
struct x86_exception exception;
- /* decode cache */
- u8 twobyte;
+ /*
+ * decode cache
+ */
+
+ /* current opcode length in bytes */
+ u8 opcode_len;
u8 b;
u8 intercept;
u8 lock_prefix;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c76ff74a98f2..de388c55e7ec 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -79,6 +79,13 @@
#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
+static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
+{
+ /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
+ return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
+ (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
+}
+
#define SELECTOR_TI_MASK (1 << 2)
#define SELECTOR_RPL_MASK 0x03
@@ -253,7 +260,6 @@ struct kvm_pio_request {
* mode.
*/
struct kvm_mmu {
- void (*new_cr3)(struct kvm_vcpu *vcpu);
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
@@ -261,7 +267,6 @@ struct kvm_mmu {
bool prefault);
void (*inject_page_fault)(struct kvm_vcpu *vcpu,
struct x86_exception *fault);
- void (*free)(struct kvm_vcpu *vcpu);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
struct x86_exception *exception);
gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
@@ -389,6 +394,8 @@ struct kvm_vcpu_arch {
struct fpu guest_fpu;
u64 xcr0;
+ u64 guest_supported_xcr0;
+ u32 guest_xstate_size;
struct kvm_pio_request pio;
void *pio_data;
@@ -557,7 +564,9 @@ struct kvm_arch {
struct list_head assigned_dev_head;
struct iommu_domain *iommu_domain;
- int iommu_flags;
+ bool iommu_noncoherent;
+#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
+ atomic_t noncoherent_dma_count;
struct kvm_pic *vpic;
struct kvm_ioapic *vioapic;
struct kvm_pit *vpit;
@@ -780,11 +789,11 @@ void kvm_mmu_module_exit(void);
void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
-int kvm_mmu_setup(struct kvm_vcpu *vcpu);
+void kvm_mmu_setup(struct kvm_vcpu *vcpu);
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask);
-int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
+void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
@@ -922,6 +931,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
void *insn, int insn_len);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
+void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
void kvm_enable_tdp(void);
void kvm_disable_tdp(void);
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index 2d89e3980cbd..5b23e605e707 100644
--- a/arch/x86/include/asm/local.h
+++ b/arch/x86/include/asm/local.h
@@ -52,12 +52,7 @@ static inline void local_sub(long i, local_t *l)
*/
static inline int local_sub_and_test(long i, local_t *l)
{
- unsigned char c;
-
- asm volatile(_ASM_SUB "%2,%0; sete %1"
- : "+m" (l->a.counter), "=qm" (c)
- : "ir" (i) : "memory");
- return c;
+ GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, i, "%0", "e");
}
/**
@@ -70,12 +65,7 @@ static inline int local_sub_and_test(long i, local_t *l)
*/
static inline int local_dec_and_test(local_t *l)
{
- unsigned char c;
-
- asm volatile(_ASM_DEC "%0; sete %1"
- : "+m" (l->a.counter), "=qm" (c)
- : : "memory");
- return c != 0;
+ GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
}
/**
@@ -88,12 +78,7 @@ static inline int local_dec_and_test(local_t *l)
*/
static inline int local_inc_and_test(local_t *l)
{
- unsigned char c;
-
- asm volatile(_ASM_INC "%0; sete %1"
- : "+m" (l->a.counter), "=qm" (c)
- : : "memory");
- return c != 0;
+ GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
}
/**
@@ -107,12 +92,7 @@ static inline int local_inc_and_test(local_t *l)
*/
static inline int local_add_negative(long i, local_t *l)
{
- unsigned char c;
-
- asm volatile(_ASM_ADD "%2,%0; sets %1"
- : "+m" (l->a.counter), "=qm" (c)
- : "ir" (i) : "memory");
- return c;
+ GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, i, "%0", "s");
}
/**
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index cbe6b9e404ce..c696a8687567 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -16,6 +16,7 @@
#define MCG_EXT_CNT_SHIFT 16
#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
+#define MCG_ELOG_P (1ULL<<26) /* Extended error log supported */
/* MCG_STATUS register defines */
#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
diff --git a/arch/x86/include/asm/misc.h b/arch/x86/include/asm/misc.h
new file mode 100644
index 000000000000..475f5bbc7f53
--- /dev/null
+++ b/arch/x86/include/asm/misc.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_X86_MISC_H
+#define _ASM_X86_MISC_H
+
+int num_digits(int val);
+
+#endif /* _ASM_X86_MISC_H */
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index 626cf70082d7..3142a94c7b4b 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -94,7 +94,7 @@ static inline void early_reserve_e820_mpc_new(void) { }
#define default_get_smp_config x86_init_uint_noop
#endif
-void generic_processor_info(int apicid, int version);
+int generic_processor_info(int apicid, int version);
#ifdef CONFIG_ACPI
extern void mp_register_ioapic(int id, u32 address, u32 gsi_base);
extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h
deleted file mode 100644
index fc18bf3ce7c8..000000000000
--- a/arch/x86/include/asm/mrst.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * mrst.h: Intel Moorestown platform specific setup code
- *
- * (C) Copyright 2009 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-#ifndef _ASM_X86_MRST_H
-#define _ASM_X86_MRST_H
-
-#include <linux/sfi.h>
-
-extern int pci_mrst_init(void);
-extern int __init sfi_parse_mrtc(struct sfi_table_header *table);
-extern int sfi_mrtc_num;
-extern struct sfi_rtc_table_entry sfi_mrtc_array[];
-
-/*
- * Medfield is the follow-up of Moorestown, it combines two chip solution into
- * one. Other than that it also added always-on and constant tsc and lapic
- * timers. Medfield is the platform name, and the chip name is called Penwell
- * we treat Medfield/Penwell as a variant of Moorestown. Penwell can be
- * identified via MSRs.
- */
-enum mrst_cpu_type {
- /* 1 was Moorestown */
- MRST_CPU_CHIP_PENWELL = 2,
-};
-
-extern enum mrst_cpu_type __mrst_cpu_chip;
-
-#ifdef CONFIG_X86_INTEL_MID
-
-static inline enum mrst_cpu_type mrst_identify_cpu(void)
-{
- return __mrst_cpu_chip;
-}
-
-#else /* !CONFIG_X86_INTEL_MID */
-
-#define mrst_identify_cpu() (0)
-
-#endif /* !CONFIG_X86_INTEL_MID */
-
-enum mrst_timer_options {
- MRST_TIMER_DEFAULT,
- MRST_TIMER_APBT_ONLY,
- MRST_TIMER_LAPIC_APBT,
-};
-
-extern enum mrst_timer_options mrst_timer_options;
-
-/*
- * Penwell uses spread spectrum clock, so the freq number is not exactly
- * the same as reported by MSR based on SDM.
- */
-#define PENWELL_FSB_FREQ_83SKU 83200
-#define PENWELL_FSB_FREQ_100SKU 99840
-
-#define SFI_MTMR_MAX_NUM 8
-#define SFI_MRTC_MAX 8
-
-extern struct console early_mrst_console;
-extern void mrst_early_console_init(void);
-
-extern struct console early_hsu_console;
-extern void hsu_early_console_init(const char *);
-
-extern void intel_scu_devices_create(void);
-extern void intel_scu_devices_destroy(void);
-
-/* VRTC timer */
-#define MRST_VRTC_MAP_SZ (1024)
-/*#define MRST_VRTC_PGOFFSET (0xc00) */
-
-extern void mrst_rtc_init(void);
-
-#endif /* _ASM_X86_MRST_H */
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index cb7502852acb..e139b13f2a33 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -218,10 +218,14 @@ void msrs_free(struct msr *msrs);
#ifdef CONFIG_SMP
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
+int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
+int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
#else /* CONFIG_SMP */
@@ -235,6 +239,16 @@ static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
wrmsr(msr_no, l, h);
return 0;
}
+static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+{
+ rdmsrl(msr_no, *q);
+ return 0;
+}
+static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+{
+ wrmsrl(msr_no, q);
+ return 0;
+}
static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
struct msr *msrs)
{
@@ -254,6 +268,14 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
return wrmsr_safe(msr_no, l, h);
}
+static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+{
+ return rdmsrl_safe(msr_no, q);
+}
+static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+{
+ return wrmsrl_safe(msr_no, q);
+}
static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
{
return rdmsr_safe_regs(regs);
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 43dcd804ebd5..8de6d9cf3b95 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -39,9 +39,18 @@
#define __VIRTUAL_MASK_SHIFT 47
/*
- * Kernel image size is limited to 512 MB (see level2_kernel_pgt in
- * arch/x86/kernel/head_64.S), and it is mapped here:
+ * Kernel image size is limited to 1GiB due to the fixmap living in the
+ * next 1GiB (see level2_kernel_pgt in arch/x86/kernel/head_64.S). Use
+ * 512MiB by default, leaving 1.5GiB for modules once the page tables
+ * are fully set up. If kernel ASLR is configured, it can extend the
+ * kernel page table mapping, reducing the size of the modules area.
*/
-#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
+#define KERNEL_IMAGE_SIZE_DEFAULT (512 * 1024 * 1024)
+#if defined(CONFIG_RANDOMIZE_BASE) && \
+ CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE_DEFAULT
+#define KERNEL_IMAGE_SIZE CONFIG_RANDOMIZE_BASE_MAX_OFFSET
+#else
+#define KERNEL_IMAGE_SIZE KERNEL_IMAGE_SIZE_DEFAULT
+#endif
#endif /* _ASM_X86_PAGE_64_DEFS_H */
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 0da5200ee79d..94220d14d5cc 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -128,7 +128,8 @@ do { \
do { \
typedef typeof(var) pao_T__; \
const int pao_ID__ = (__builtin_constant_p(val) && \
- ((val) == 1 || (val) == -1)) ? (val) : 0; \
+ ((val) == 1 || (val) == -1)) ? \
+ (int)(val) : 0; \
if (0) { \
pao_T__ pao_tmp__; \
pao_tmp__ = (val); \
@@ -377,9 +378,6 @@ do { \
#define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
#define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
#define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
-#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
-#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
-#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
#define __this_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val)
#define __this_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val)
#define __this_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val)
@@ -399,9 +397,6 @@ do { \
#define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
#define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
#define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
-#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
-#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
-#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
@@ -446,7 +441,6 @@ do { \
#define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
-#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
#define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
#define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
@@ -456,7 +450,6 @@ do { \
#define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
-#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 2d883440cb9a..c883bf726398 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -58,7 +58,7 @@ typedef struct { pteval_t pte; } pte_t;
#define VMALLOC_START _AC(0xffffc90000000000, UL)
#define VMALLOC_END _AC(0xffffe8ffffffffff, UL)
#define VMEMMAP_START _AC(0xffffea0000000000, UL)
-#define MODULES_VADDR _AC(0xffffffffa0000000, UL)
+#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
#define MODULES_END _AC(0xffffffffff000000, UL)
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
new file mode 100644
index 000000000000..8729723636fd
--- /dev/null
+++ b/arch/x86/include/asm/preempt.h
@@ -0,0 +1,100 @@
+#ifndef __ASM_PREEMPT_H
+#define __ASM_PREEMPT_H
+
+#include <asm/rmwcc.h>
+#include <asm/percpu.h>
+#include <linux/thread_info.h>
+
+DECLARE_PER_CPU(int, __preempt_count);
+
+/*
+ * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
+ * that think a non-zero value indicates we cannot preempt.
+ */
+static __always_inline int preempt_count(void)
+{
+ return __this_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED;
+}
+
+static __always_inline void preempt_count_set(int pc)
+{
+ __this_cpu_write_4(__preempt_count, pc);
+}
+
+/*
+ * must be macros to avoid header recursion hell
+ */
+#define task_preempt_count(p) \
+ (task_thread_info(p)->saved_preempt_count & ~PREEMPT_NEED_RESCHED)
+
+#define init_task_preempt_count(p) do { \
+ task_thread_info(p)->saved_preempt_count = PREEMPT_DISABLED; \
+} while (0)
+
+#define init_idle_preempt_count(p, cpu) do { \
+ task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \
+ per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
+} while (0)
+
+/*
+ * We fold the NEED_RESCHED bit into the preempt count such that
+ * preempt_enable() can decrement and test for needing to reschedule with a
+ * single instruction.
+ *
+ * We invert the actual bit, so that when the decrement hits 0 we know we both
+ * need to resched (the bit is cleared) and can resched (no preempt count).
+ */
+
+static __always_inline void set_preempt_need_resched(void)
+{
+ __this_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED);
+}
+
+static __always_inline void clear_preempt_need_resched(void)
+{
+ __this_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED);
+}
+
+static __always_inline bool test_preempt_need_resched(void)
+{
+ return !(__this_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED);
+}
+
+/*
+ * The various preempt_count add/sub methods
+ */
+
+static __always_inline void __preempt_count_add(int val)
+{
+ __this_cpu_add_4(__preempt_count, val);
+}
+
+static __always_inline void __preempt_count_sub(int val)
+{
+ __this_cpu_add_4(__preempt_count, -val);
+}
+
+static __always_inline bool __preempt_count_dec_and_test(void)
+{
+ GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
+}
+
+/*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+static __always_inline bool should_resched(void)
+{
+ return unlikely(!__this_cpu_read_4(__preempt_count));
+}
+
+#ifdef CONFIG_PREEMPT
+ extern asmlinkage void ___preempt_schedule(void);
+# define __preempt_schedule() asm ("call ___preempt_schedule")
+ extern asmlinkage void preempt_schedule(void);
+# ifdef CONFIG_CONTEXT_TRACKING
+ extern asmlinkage void ___preempt_schedule_context(void);
+# define __preempt_schedule_context() asm ("call ___preempt_schedule_context")
+# endif
+#endif
+
+#endif /* __ASM_PREEMPT_H */
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h
index bade6ac3b14f..fbeb06ed0eaa 100644
--- a/arch/x86/include/asm/prom.h
+++ b/arch/x86/include/asm/prom.h
@@ -39,10 +39,5 @@ static inline void x86_dtb_init(void) { }
extern char cmd_line[COMMAND_LINE_SIZE];
-#define pci_address_to_pio pci_address_to_pio
-unsigned long pci_address_to_pio(phys_addr_t addr);
-
-#define HAVE_ARCH_DEVTREE_FIXUPS
-
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
new file mode 100644
index 000000000000..1ff990f1de8e
--- /dev/null
+++ b/arch/x86/include/asm/rmwcc.h
@@ -0,0 +1,41 @@
+#ifndef _ASM_X86_RMWcc
+#define _ASM_X86_RMWcc
+
+#ifdef CC_HAVE_ASM_GOTO
+
+#define __GEN_RMWcc(fullop, var, cc, ...) \
+do { \
+ asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
+ : : "m" (var), ## __VA_ARGS__ \
+ : "memory" : cc_label); \
+ return 0; \
+cc_label: \
+ return 1; \
+} while (0)
+
+#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
+ __GEN_RMWcc(op " " arg0, var, cc)
+
+#define GEN_BINARY_RMWcc(op, var, val, arg0, cc) \
+ __GEN_RMWcc(op " %1, " arg0, var, cc, "er" (val))
+
+#else /* !CC_HAVE_ASM_GOTO */
+
+#define __GEN_RMWcc(fullop, var, cc, ...) \
+do { \
+ char c; \
+ asm volatile (fullop "; set" cc " %1" \
+ : "+m" (var), "=qm" (c) \
+ : __VA_ARGS__ : "memory"); \
+ return c != 0; \
+} while (0)
+
+#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
+ __GEN_RMWcc(op " " arg0, var, cc)
+
+#define GEN_BINARY_RMWcc(op, var, val, arg0, cc) \
+ __GEN_RMWcc(op " %2, " arg0, var, cc, "er" (val))
+
+#endif /* CC_HAVE_ASM_GOTO */
+
+#endif /* _ASM_X86_RMWcc */
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 347555492dad..59bcf4e22418 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -51,9 +51,9 @@ extern void i386_reserve_resources(void);
extern void setup_default_timer_irq(void);
#ifdef CONFIG_X86_INTEL_MID
-extern void x86_mrst_early_setup(void);
+extern void x86_intel_mid_early_setup(void);
#else
-static inline void x86_mrst_early_setup(void) { }
+static inline void x86_intel_mid_early_setup(void) { }
#endif
#ifdef CONFIG_X86_INTEL_CE
diff --git a/arch/x86/include/asm/simd.h b/arch/x86/include/asm/simd.h
new file mode 100644
index 000000000000..ee80b92f0096
--- /dev/null
+++ b/arch/x86/include/asm/simd.h
@@ -0,0 +1,11 @@
+
+#include <asm/i387.h>
+
+/*
+ * may_use_simd - whether it is allowable at this time to issue SIMD
+ * instructions or access the SIMD register file
+ */
+static __must_check inline bool may_use_simd(void)
+{
+ return irq_fpu_usable();
+}
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 27811190cbd7..c46a46be1ec6 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -28,8 +28,7 @@ struct thread_info {
__u32 flags; /* low level flags */
__u32 status; /* thread synchronous flags */
__u32 cpu; /* current CPU */
- int preempt_count; /* 0 => preemptable,
- <0 => BUG */
+ int saved_preempt_count;
mm_segment_t addr_limit;
struct restart_block restart_block;
void __user *sysenter_return;
@@ -49,7 +48,7 @@ struct thread_info {
.exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
- .preempt_count = INIT_PREEMPT_COUNT, \
+ .saved_preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
.restart_block = { \
.fn = do_no_restart_syscall, \
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 5838fa911aa0..8ec57c07b125 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -542,5 +542,103 @@ extern struct movsl_mask {
# include <asm/uaccess_64.h>
#endif
+unsigned long __must_check _copy_from_user(void *to, const void __user *from,
+ unsigned n);
+unsigned long __must_check _copy_to_user(void __user *to, const void *from,
+ unsigned n);
+
+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+# define copy_user_diag __compiletime_error
+#else
+# define copy_user_diag __compiletime_warning
+#endif
+
+extern void copy_user_diag("copy_from_user() buffer size is too small")
+copy_from_user_overflow(void);
+extern void copy_user_diag("copy_to_user() buffer size is too small")
+copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
+
+#undef copy_user_diag
+
+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+
+extern void
+__compiletime_warning("copy_from_user() buffer size is not provably correct")
+__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
+#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
+
+extern void
+__compiletime_warning("copy_to_user() buffer size is not provably correct")
+__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
+#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
+
+#else
+
+static inline void
+__copy_from_user_overflow(int size, unsigned long count)
+{
+ WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
+
+#define __copy_to_user_overflow __copy_from_user_overflow
+
+#endif
+
+static inline unsigned long __must_check
+copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ int sz = __compiletime_object_size(to);
+
+ might_fault();
+
+ /*
+ * While we would like to have the compiler do the checking for us
+ * even in the non-constant size case, any false positives there are
+ * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
+ * without - the [hopefully] dangerous looking nature of the warning
+ * would make people go look at the respecitive call sites over and
+ * over again just to find that there's no problem).
+ *
+ * And there are cases where it's just not realistic for the compiler
+ * to prove the count to be in range. For example when multiple call
+ * sites of a helper function - perhaps in different source files -
+ * all doing proper range checking, yet the helper function not doing
+ * so again.
+ *
+ * Therefore limit the compile time checking to the constant size
+ * case, and do only runtime checking for non-constant sizes.
+ */
+
+ if (likely(sz < 0 || sz >= n))
+ n = _copy_from_user(to, from, n);
+ else if(__builtin_constant_p(n))
+ copy_from_user_overflow();
+ else
+ __copy_from_user_overflow(sz, n);
+
+ return n;
+}
+
+static inline unsigned long __must_check
+copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ int sz = __compiletime_object_size(from);
+
+ might_fault();
+
+ /* See the comment in copy_from_user() above. */
+ if (likely(sz < 0 || sz >= n))
+ n = _copy_to_user(to, from, n);
+ else if(__builtin_constant_p(n))
+ copy_to_user_overflow();
+ else
+ __copy_to_user_overflow(sz, n);
+
+ return n;
+}
+
+#undef __copy_from_user_overflow
+#undef __copy_to_user_overflow
+
#endif /* _ASM_X86_UACCESS_H */
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 7f760a9f1f61..3c03a5de64d3 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -184,33 +184,4 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
return __copy_from_user_ll_nocache_nozero(to, from, n);
}
-unsigned long __must_check copy_to_user(void __user *to,
- const void *from, unsigned long n);
-unsigned long __must_check _copy_from_user(void *to,
- const void __user *from,
- unsigned long n);
-
-
-extern void copy_from_user_overflow(void)
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
- __compiletime_error("copy_from_user() buffer size is not provably correct")
-#else
- __compiletime_warning("copy_from_user() buffer size is not provably correct")
-#endif
-;
-
-static inline unsigned long __must_check copy_from_user(void *to,
- const void __user *from,
- unsigned long n)
-{
- int sz = __compiletime_object_size(to);
-
- if (likely(sz == -1 || sz >= n))
- n = _copy_from_user(to, from, n);
- else
- copy_from_user_overflow();
-
- return n;
-}
-
#endif /* _ASM_X86_UACCESS_32_H */
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 4f7923dd0007..190413d0de57 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -46,42 +46,13 @@ copy_user_generic(void *to, const void *from, unsigned len)
}
__must_check unsigned long
-_copy_to_user(void __user *to, const void *from, unsigned len);
-__must_check unsigned long
-_copy_from_user(void *to, const void __user *from, unsigned len);
-__must_check unsigned long
copy_in_user(void __user *to, const void __user *from, unsigned len);
-static inline unsigned long __must_check copy_from_user(void *to,
- const void __user *from,
- unsigned long n)
-{
- int sz = __compiletime_object_size(to);
-
- might_fault();
- if (likely(sz == -1 || sz >= n))
- n = _copy_from_user(to, from, n);
-#ifdef CONFIG_DEBUG_VM
- else
- WARN(1, "Buffer overflow detected!\n");
-#endif
- return n;
-}
-
static __always_inline __must_check
-int copy_to_user(void __user *dst, const void *src, unsigned size)
-{
- might_fault();
-
- return _copy_to_user(dst, src, size);
-}
-
-static __always_inline __must_check
-int __copy_from_user(void *dst, const void __user *src, unsigned size)
+int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
{
int ret = 0;
- might_fault();
if (!__builtin_constant_p(size))
return copy_user_generic(dst, (__force void *)src, size);
switch (size) {
@@ -121,11 +92,17 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
}
static __always_inline __must_check
-int __copy_to_user(void __user *dst, const void *src, unsigned size)
+int __copy_from_user(void *dst, const void __user *src, unsigned size)
+{
+ might_fault();
+ return __copy_from_user_nocheck(dst, src, size);
+}
+
+static __always_inline __must_check
+int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
{
int ret = 0;
- might_fault();
if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst, src, size);
switch (size) {
@@ -165,6 +142,13 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
}
static __always_inline __must_check
+int __copy_to_user(void __user *dst, const void *src, unsigned size)
+{
+ might_fault();
+ return __copy_to_user_nocheck(dst, src, size);
+}
+
+static __always_inline __must_check
int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
{
int ret = 0;
@@ -220,13 +204,13 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
static __must_check __always_inline int
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
{
- return copy_user_generic(dst, (__force const void *)src, size);
+ return __copy_from_user_nocheck(dst, (__force const void *)src, size);
}
static __must_check __always_inline int
__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
{
- return copy_user_generic((__force void *)dst, src, size);
+ return __copy_to_user_nocheck((__force void *)dst, src, size);
}
extern long __copy_user_nocache(void *dst, const void __user *src,
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index 062921ef34e9..8b1283daa332 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -12,7 +12,15 @@ extern enum uv_system_type get_uv_system_type(void);
extern int is_uv_system(void);
extern void uv_cpu_init(void);
extern void uv_nmi_init(void);
+extern void uv_register_nmi_notifier(void);
extern void uv_system_init(void);
+extern void (*uv_trace_nmi_func)(unsigned int reason, struct pt_regs *regs);
+extern void (*uv_trace_func)(const char *f, const int l, const char *fmt, ...);
+#define uv_trace(fmt, ...) \
+do { \
+ if (unlikely(uv_trace_func)) \
+ (uv_trace_func)(__func__, __LINE__, fmt, ##__VA_ARGS__);\
+} while (0)
extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
unsigned long start,
@@ -25,6 +33,8 @@ static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
static inline int is_uv_system(void) { return 0; }
static inline void uv_cpu_init(void) { }
static inline void uv_system_init(void) { }
+static inline void uv_trace(void *fmt, ...) { }
+static inline void uv_register_nmi_notifier(void) { }
static inline const struct cpumask *
uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
unsigned long start, unsigned long end, unsigned int cpu)
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 2c32df95bb78..a30836c8ac4d 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -502,8 +502,8 @@ struct uv_blade_info {
unsigned short nr_online_cpus;
unsigned short pnode;
short memory_nid;
- spinlock_t nmi_lock;
- unsigned long nmi_count;
+ spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */
+ unsigned long nmi_count; /* obsolete, see uv_hub_nmi */
};
extern struct uv_blade_info *uv_blade_info;
extern short *uv_node_to_blade;
@@ -576,6 +576,59 @@ static inline int uv_num_possible_blades(void)
return uv_possible_blades;
}
+/* Per Hub NMI support */
+extern void uv_nmi_setup(void);
+
+/* BMC sets a bit this MMR non-zero before sending an NMI */
+#define UVH_NMI_MMR UVH_SCRATCH5
+#define UVH_NMI_MMR_CLEAR UVH_SCRATCH5_ALIAS
+#define UVH_NMI_MMR_SHIFT 63
+#define UVH_NMI_MMR_TYPE "SCRATCH5"
+
+/* Newer SMM NMI handler, not present in all systems */
+#define UVH_NMI_MMRX UVH_EVENT_OCCURRED0
+#define UVH_NMI_MMRX_CLEAR UVH_EVENT_OCCURRED0_ALIAS
+#define UVH_NMI_MMRX_SHIFT (is_uv1_hub() ? \
+ UV1H_EVENT_OCCURRED0_EXTIO_INT0_SHFT :\
+ UVXH_EVENT_OCCURRED0_EXTIO_INT0_SHFT)
+#define UVH_NMI_MMRX_TYPE "EXTIO_INT0"
+
+/* Non-zero indicates newer SMM NMI handler present */
+#define UVH_NMI_MMRX_SUPPORTED UVH_EXTIO_INT0_BROADCAST
+
+/* Indicates to BIOS that we want to use the newer SMM NMI handler */
+#define UVH_NMI_MMRX_REQ UVH_SCRATCH5_ALIAS_2
+#define UVH_NMI_MMRX_REQ_SHIFT 62
+
+struct uv_hub_nmi_s {
+ raw_spinlock_t nmi_lock;
+ atomic_t in_nmi; /* flag this node in UV NMI IRQ */
+ atomic_t cpu_owner; /* last locker of this struct */
+ atomic_t read_mmr_count; /* count of MMR reads */
+ atomic_t nmi_count; /* count of true UV NMIs */
+ unsigned long nmi_value; /* last value read from NMI MMR */
+};
+
+struct uv_cpu_nmi_s {
+ struct uv_hub_nmi_s *hub;
+ atomic_t state;
+ atomic_t pinging;
+ int queries;
+ int pings;
+};
+
+DECLARE_PER_CPU(struct uv_cpu_nmi_s, __uv_cpu_nmi);
+#define uv_cpu_nmi (__get_cpu_var(__uv_cpu_nmi))
+#define uv_hub_nmi (uv_cpu_nmi.hub)
+#define uv_cpu_nmi_per(cpu) (per_cpu(__uv_cpu_nmi, cpu))
+#define uv_hub_nmi_per(cpu) (uv_cpu_nmi_per(cpu).hub)
+
+/* uv_cpu_nmi_states */
+#define UV_NMI_STATE_OUT 0
+#define UV_NMI_STATE_IN 1
+#define UV_NMI_STATE_DUMP 2
+#define UV_NMI_STATE_DUMP_DONE 3
+
/* Update SCIR state */
static inline void uv_set_scir_bits(unsigned char value)
{
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index bd5f80e58a23..e42249bcf7e1 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -461,6 +461,23 @@ union uvh_event_occurred0_u {
/* ========================================================================= */
+/* UVH_EXTIO_INT0_BROADCAST */
+/* ========================================================================= */
+#define UVH_EXTIO_INT0_BROADCAST 0x61448UL
+#define UVH_EXTIO_INT0_BROADCAST_32 0x3f0
+
+#define UVH_EXTIO_INT0_BROADCAST_ENABLE_SHFT 0
+#define UVH_EXTIO_INT0_BROADCAST_ENABLE_MASK 0x0000000000000001UL
+
+union uvh_extio_int0_broadcast_u {
+ unsigned long v;
+ struct uvh_extio_int0_broadcast_s {
+ unsigned long enable:1; /* RW */
+ unsigned long rsvd_1_63:63;
+ } s;
+};
+
+/* ========================================================================= */
/* UVH_GR0_TLB_INT0_CONFIG */
/* ========================================================================= */
#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
@@ -2606,6 +2623,20 @@ union uvh_scratch5_u {
};
/* ========================================================================= */
+/* UVH_SCRATCH5_ALIAS */
+/* ========================================================================= */
+#define UVH_SCRATCH5_ALIAS 0x2d0208UL
+#define UVH_SCRATCH5_ALIAS_32 0x780
+
+
+/* ========================================================================= */
+/* UVH_SCRATCH5_ALIAS_2 */
+/* ========================================================================= */
+#define UVH_SCRATCH5_ALIAS_2 0x2d0210UL
+#define UVH_SCRATCH5_ALIAS_2_32 0x788
+
+
+/* ========================================================================= */
/* UVXH_EVENT_OCCURRED2 */
/* ========================================================================= */
#define UVXH_EVENT_OCCURRED2 0x70100UL
diff --git a/arch/x86/include/asm/xen/page-coherent.h b/arch/x86/include/asm/xen/page-coherent.h
new file mode 100644
index 000000000000..7f02fe4e2c7b
--- /dev/null
+++ b/arch/x86/include/asm/xen/page-coherent.h
@@ -0,0 +1,38 @@
+#ifndef _ASM_X86_XEN_PAGE_COHERENT_H
+#define _ASM_X86_XEN_PAGE_COHERENT_H
+
+#include <asm/page.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-mapping.h>
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
+{
+ void *vstart = (void*)__get_free_pages(flags, get_order(size));
+ *dma_handle = virt_to_phys(vstart);
+ return vstart;
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ free_pages((unsigned long) cpu_addr, get_order(size));
+}
+
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs) { }
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs) { }
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
+
+#endif /* _ASM_X86_XEN_PAGE_COHERENT_H */
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index c15ddaf90710..9c3733c5f8f7 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -158,7 +158,7 @@ enum {
X86_SUBARCH_PC = 0,
X86_SUBARCH_LGUEST,
X86_SUBARCH_XEN,
- X86_SUBARCH_MRST,
+ X86_SUBARCH_INTEL_MID,
X86_SUBARCH_CE4100,
X86_NR_SUBARCHS,
};
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index b80420bcd09d..b8f1c0176cbc 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -27,6 +27,19 @@
#define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0)
/* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/
#define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1)
+
+/*
+ * There is a single feature flag that signifies the presence of the MSR
+ * that can be used to retrieve both the local APIC Timer frequency as
+ * well as the TSC frequency.
+ */
+
+/* Local APIC timer frequency MSR (HV_X64_MSR_APIC_FREQUENCY) is available */
+#define HV_X64_MSR_APIC_FREQUENCY_AVAILABLE (1 << 11)
+
+/* TSC frequency MSR (HV_X64_MSR_TSC_FREQUENCY) is available */
+#define HV_X64_MSR_TSC_FREQUENCY_AVAILABLE (1 << 11)
+
/*
* Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM
* and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available
@@ -136,6 +149,12 @@
/* MSR used to read the per-partition time reference counter */
#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
+/* MSR used to retrieve the TSC frequency */
+#define HV_X64_MSR_TSC_FREQUENCY 0x40000022
+
+/* MSR used to retrieve the local APIC timer frequency */
+#define HV_X64_MSR_APIC_FREQUENCY 0x40000023
+
/* Define the virtual APIC registers */
#define HV_X64_MSR_EOI 0x40000070
#define HV_X64_MSR_ICR 0x40000071
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 5d9a3033b3d7..d3a87780c70b 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -211,9 +211,9 @@ struct kvm_cpuid_entry2 {
__u32 padding[3];
};
-#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX 1
-#define KVM_CPUID_FLAG_STATEFUL_FUNC 2
-#define KVM_CPUID_FLAG_STATE_READ_NEXT 4
+#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX BIT(0)
+#define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1)
+#define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2)
/* for KVM_SET_CPUID2 */
struct kvm_cpuid2 {
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index bb0465090ae5..37813b5ddc37 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -147,6 +147,8 @@
#define MSR_PP1_ENERGY_STATUS 0x00000641
#define MSR_PP1_POLICY 0x00000642
+#define MSR_CORE_C1_RES 0x00000660
+
#define MSR_AMD64_MC0_MASK 0xc0010044
#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
@@ -536,6 +538,7 @@
/* MSR_IA32_VMX_MISC bits */
#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
+#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F
/* AMD-V MSRs */
#define MSR_VM_CR 0xc0010114
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index a5408b965c9d..9b0a34e2cd79 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -36,6 +36,8 @@ obj-y += tsc.o io_delay.o rtc.o
obj-y += pci-iommu_table.o
obj-y += resource.o
+obj-$(CONFIG_PREEMPT) += preempt.o
+
obj-y += process.o
obj-y += i387.o xsave.o
obj-y += ptrace.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 40c76604199f..6c0b43bd024b 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -189,24 +189,31 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
return 0;
}
-static void acpi_register_lapic(int id, u8 enabled)
+/**
+ * acpi_register_lapic - register a local apic and generates a logic cpu number
+ * @id: local apic id to register
+ * @enabled: this cpu is enabled or not
+ *
+ * Returns the logic cpu number which maps to the local apic
+ */
+static int acpi_register_lapic(int id, u8 enabled)
{
unsigned int ver = 0;
if (id >= MAX_LOCAL_APIC) {
printk(KERN_INFO PREFIX "skipped apicid that is too big\n");
- return;
+ return -EINVAL;
}
if (!enabled) {
++disabled_cpus;
- return;
+ return -EINVAL;
}
if (boot_cpu_physical_apicid != -1U)
ver = apic_version[boot_cpu_physical_apicid];
- generic_processor_info(id, ver);
+ return generic_processor_info(id, ver);
}
static int __init
@@ -614,84 +621,27 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
#endif
}
-static int _acpi_map_lsapic(acpi_handle handle, int *pcpu)
+static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
{
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
- struct acpi_madt_local_apic *lapic;
- cpumask_var_t tmp_map, new_map;
- u8 physid;
int cpu;
- int retval = -ENOMEM;
-
- if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
- return -EINVAL;
-
- if (!buffer.length || !buffer.pointer)
- return -EINVAL;
-
- obj = buffer.pointer;
- if (obj->type != ACPI_TYPE_BUFFER ||
- obj->buffer.length < sizeof(*lapic)) {
- kfree(buffer.pointer);
- return -EINVAL;
- }
- lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
-
- if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
- !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
- kfree(buffer.pointer);
- return -EINVAL;
- }
-
- physid = lapic->id;
-
- kfree(buffer.pointer);
- buffer.length = ACPI_ALLOCATE_BUFFER;
- buffer.pointer = NULL;
- lapic = NULL;
-
- if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL))
- goto out;
-
- if (!alloc_cpumask_var(&new_map, GFP_KERNEL))
- goto free_tmp_map;
-
- cpumask_copy(tmp_map, cpu_present_mask);
- acpi_register_lapic(physid, ACPI_MADT_ENABLED);
-
- /*
- * If acpi_register_lapic successfully generates a new logical cpu
- * number, then the following will get us exactly what was mapped
- */
- cpumask_andnot(new_map, cpu_present_mask, tmp_map);
- if (cpumask_empty(new_map)) {
- printk ("Unable to map lapic to logical cpu number\n");
- retval = -EINVAL;
- goto free_new_map;
+ cpu = acpi_register_lapic(physid, ACPI_MADT_ENABLED);
+ if (cpu < 0) {
+ pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
+ return cpu;
}
acpi_processor_set_pdc(handle);
-
- cpu = cpumask_first(new_map);
acpi_map_cpu2node(handle, cpu, physid);
*pcpu = cpu;
- retval = 0;
-
-free_new_map:
- free_cpumask_var(new_map);
-free_tmp_map:
- free_cpumask_var(tmp_map);
-out:
- return retval;
+ return 0;
}
/* wrapper to silence section mismatch warning */
-int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu)
+int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
{
- return _acpi_map_lsapic(handle, pcpu);
+ return _acpi_map_lsapic(handle, physid, pcpu);
}
EXPORT_SYMBOL(acpi_map_lsapic);
@@ -745,7 +695,7 @@ static int __init acpi_parse_sbf(struct acpi_table_header *table)
#ifdef CONFIG_HPET_TIMER
#include <asm/hpet.h>
-static struct __initdata resource *hpet_res;
+static struct resource *hpet_res __initdata;
static int __init acpi_parse_hpet(struct acpi_table_header *table)
{
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 33120100ff5e..3a2ae4c88948 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -26,6 +26,17 @@ static char temp_stack[4096];
#endif
/**
+ * x86_acpi_enter_sleep_state - enter sleep state
+ * @state: Sleep state to enter.
+ *
+ * Wrapper around acpi_enter_sleep_state() to be called by assmebly.
+ */
+acpi_status asmlinkage x86_acpi_enter_sleep_state(u8 state)
+{
+ return acpi_enter_sleep_state(state);
+}
+
+/**
* x86_acpi_suspend_lowlevel - save kernel state
*
* Create an identity mapped page table and copy the wakeup routine to
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h
index c9c2c982d5e4..65c7b606b606 100644
--- a/arch/x86/kernel/acpi/sleep.h
+++ b/arch/x86/kernel/acpi/sleep.h
@@ -17,3 +17,5 @@ extern void wakeup_long64(void);
extern void do_suspend_lowlevel(void);
extern int x86_acpi_suspend_lowlevel(void);
+
+acpi_status asmlinkage x86_acpi_enter_sleep_state(u8 state);
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index d1daa66ab162..665c6b7d2ea9 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -73,7 +73,7 @@ ENTRY(do_suspend_lowlevel)
call save_processor_state
call save_registers
pushl $3
- call acpi_enter_sleep_state
+ call x86_acpi_enter_sleep_state
addl $4, %esp
# In case of S3 failure, we'll emerge here. Jump
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 8ea5164cbd04..ae693b51ed8e 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -73,7 +73,7 @@ ENTRY(do_suspend_lowlevel)
addq $8, %rsp
movl $3, %edi
xorl %eax, %eax
- call acpi_enter_sleep_state
+ call x86_acpi_enter_sleep_state
/* in case something went wrong, restore the machine status and go on */
jmp resume_point
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 15e8563e5c24..df94598ad05a 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -402,17 +402,6 @@ void alternatives_enable_smp(void)
{
struct smp_alt_module *mod;
-#ifdef CONFIG_LOCKDEP
- /*
- * Older binutils section handling bug prevented
- * alternatives-replacement from working reliably.
- *
- * If this still occurs then you should see a hang
- * or crash shortly after this line:
- */
- pr_info("lockdep: fixing up alternatives\n");
-#endif
-
/* Why bother if there are no other CPUs? */
BUG_ON(num_possible_cpus() == 1);
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index c9876efecafb..af5b08ab3b71 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -40,7 +40,7 @@
#include <asm/fixmap.h>
#include <asm/apb_timer.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#include <asm/time.h>
#define APBT_CLOCKEVENT_RATING 110
@@ -157,13 +157,13 @@ static int __init apbt_clockevent_register(void)
adev->num = smp_processor_id();
adev->timer = dw_apb_clockevent_init(smp_processor_id(), "apbt0",
- mrst_timer_options == MRST_TIMER_LAPIC_APBT ?
+ intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ?
APBT_CLOCKEVENT_RATING - 100 : APBT_CLOCKEVENT_RATING,
adev_virt_addr(adev), 0, apbt_freq);
/* Firmware does EOI handling for us. */
adev->timer->eoi = NULL;
- if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
+ if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) {
global_clock_event = &adev->timer->ced;
printk(KERN_DEBUG "%s clockevent registered as global\n",
global_clock_event->name);
@@ -253,7 +253,7 @@ static int apbt_cpuhp_notify(struct notifier_block *n,
static __init int apbt_late_init(void)
{
- if (mrst_timer_options == MRST_TIMER_LAPIC_APBT ||
+ if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ||
!apb_timer_block_enabled)
return 0;
/* This notifier should be called after workqueue is ready */
@@ -340,7 +340,7 @@ void __init apbt_time_init(void)
}
#ifdef CONFIG_SMP
/* kernel cmdline disable apb timer, so we will use lapic timers */
- if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
+ if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) {
printk(KERN_INFO "apbt: disabled per cpu timer\n");
return;
}
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index a7eb82d9b012..ed165d657380 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2107,7 +2107,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
apic_write(APIC_LVT1, value);
}
-void generic_processor_info(int apicid, int version)
+int generic_processor_info(int apicid, int version)
{
int cpu, max = nr_cpu_ids;
bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
@@ -2127,7 +2127,7 @@ void generic_processor_info(int apicid, int version)
" Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
disabled_cpus++;
- return;
+ return -ENODEV;
}
if (num_processors >= nr_cpu_ids) {
@@ -2138,7 +2138,7 @@ void generic_processor_info(int apicid, int version)
" Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
disabled_cpus++;
- return;
+ return -EINVAL;
}
num_processors++;
@@ -2183,6 +2183,8 @@ void generic_processor_info(int apicid, int version)
#endif
set_cpu_possible(cpu, true);
set_cpu_present(cpu, true);
+
+ return cpu;
}
int hard_smp_processor_id(void)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index a419814cea57..ad0dc0428baf 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -39,12 +39,6 @@
#include <asm/x86_init.h>
#include <asm/nmi.h>
-/* BMC sets a bit this MMR non-zero before sending an NMI */
-#define UVH_NMI_MMR UVH_SCRATCH5
-#define UVH_NMI_MMR_CLEAR (UVH_NMI_MMR + 8)
-#define UV_NMI_PENDING_MASK (1UL << 63)
-DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count);
-
DEFINE_PER_CPU(int, x2apic_extra_bits);
#define PR_DEVEL(fmt, args...) pr_devel("%s: " fmt, __func__, args)
@@ -58,7 +52,6 @@ int uv_min_hub_revision_id;
EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
unsigned int uv_apicid_hibits;
EXPORT_SYMBOL_GPL(uv_apicid_hibits);
-static DEFINE_SPINLOCK(uv_nmi_lock);
static struct apic apic_x2apic_uv_x;
@@ -847,68 +840,6 @@ void uv_cpu_init(void)
set_x2apic_extra_bits(uv_hub_info->pnode);
}
-/*
- * When NMI is received, print a stack trace.
- */
-int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
-{
- unsigned long real_uv_nmi;
- int bid;
-
- /*
- * Each blade has an MMR that indicates when an NMI has been sent
- * to cpus on the blade. If an NMI is detected, atomically
- * clear the MMR and update a per-blade NMI count used to
- * cause each cpu on the blade to notice a new NMI.
- */
- bid = uv_numa_blade_id();
- real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
-
- if (unlikely(real_uv_nmi)) {
- spin_lock(&uv_blade_info[bid].nmi_lock);
- real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
- if (real_uv_nmi) {
- uv_blade_info[bid].nmi_count++;
- uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK);
- }
- spin_unlock(&uv_blade_info[bid].nmi_lock);
- }
-
- if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
- return NMI_DONE;
-
- __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
-
- /*
- * Use a lock so only one cpu prints at a time.
- * This prevents intermixed output.
- */
- spin_lock(&uv_nmi_lock);
- pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id());
- dump_stack();
- spin_unlock(&uv_nmi_lock);
-
- return NMI_HANDLED;
-}
-
-void uv_register_nmi_notifier(void)
-{
- if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
- printk(KERN_WARNING "UV NMI handler failed to register\n");
-}
-
-void uv_nmi_init(void)
-{
- unsigned int value;
-
- /*
- * Unmask NMI on all cpus
- */
- value = apic_read(APIC_LVT1) | APIC_DM_NMI;
- value &= ~APIC_LVT_MASKED;
- apic_write(APIC_LVT1, value);
-}
-
void __init uv_system_init(void)
{
union uvh_rh_gam_config_mmr_u m_n_config;
@@ -1046,6 +977,7 @@ void __init uv_system_init(void)
map_mmr_high(max_pnode);
map_mmioh_high(min_pnode, max_pnode);
+ uv_nmi_setup();
uv_cpu_init();
uv_scir_register_cpu_notifier();
uv_register_nmi_notifier();
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index 28610822fb3c..9f6b9341950f 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -32,7 +32,6 @@ void common(void) {
OFFSET(TI_flags, thread_info, flags);
OFFSET(TI_status, thread_info, status);
OFFSET(TI_addr_limit, thread_info, addr_limit);
- OFFSET(TI_preempt_count, thread_info, preempt_count);
BLANK();
OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 903a264af981..bca023bdd6b2 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -339,7 +339,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
#endif
/*
- * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
+ * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
* Assumes number of cores is a power of two.
*/
static void amd_detect_cmp(struct cpuinfo_x86 *c)
@@ -823,8 +823,8 @@ static const struct cpu_dev amd_cpu_dev = {
.c_vendor = "AMD",
.c_ident = { "AuthenticAMD" },
#ifdef CONFIG_X86_32
- .c_models = {
- { .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
+ .legacy_models = {
+ { .family = 4, .model_names =
{
[3] = "486 DX/2",
[7] = "486 DX/2-WB",
@@ -835,7 +835,7 @@ static const struct cpu_dev amd_cpu_dev = {
}
},
},
- .c_size_cache = amd_size_cache,
+ .legacy_cache_size = amd_size_cache,
#endif
.c_early_init = early_init_amd,
.c_detect_tlb = cpu_detect_tlb_amd,
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index fbf6c3bc2400..8d5652dc99dd 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -468,10 +468,10 @@ static void init_centaur(struct cpuinfo_x86 *c)
#endif
}
+#ifdef CONFIG_X86_32
static unsigned int
centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
{
-#ifdef CONFIG_X86_32
/* VIA C3 CPUs (670-68F) need further shifting. */
if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
size >>= 8;
@@ -484,16 +484,18 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
if ((c->x86 == 6) && (c->x86_model == 9) &&
(c->x86_mask == 1) && (size == 65))
size -= 1;
-#endif
return size;
}
+#endif
static const struct cpu_dev centaur_cpu_dev = {
.c_vendor = "Centaur",
.c_ident = { "CentaurHauls" },
.c_early_init = early_init_centaur,
.c_init = init_centaur,
- .c_size_cache = centaur_size_cache,
+#ifdef CONFIG_X86_32
+ .legacy_cache_size = centaur_size_cache,
+#endif
.c_x86_vendor = X86_VENDOR_CENTAUR,
};
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 2793d1f095a2..6abc172b8258 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -346,7 +346,8 @@ static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
/* Look up CPU names by table lookup. */
static const char *table_lookup_model(struct cpuinfo_x86 *c)
{
- const struct cpu_model_info *info;
+#ifdef CONFIG_X86_32
+ const struct legacy_cpu_model_info *info;
if (c->x86_model >= 16)
return NULL; /* Range check */
@@ -354,13 +355,14 @@ static const char *table_lookup_model(struct cpuinfo_x86 *c)
if (!this_cpu)
return NULL;
- info = this_cpu->c_models;
+ info = this_cpu->legacy_models;
- while (info && info->family) {
+ while (info->family) {
if (info->family == c->x86)
return info->model_names[c->x86_model];
info++;
}
+#endif
return NULL; /* Not found */
}
@@ -450,8 +452,8 @@ void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
#else
/* do processor-specific cache resizing */
- if (this_cpu->c_size_cache)
- l2size = this_cpu->c_size_cache(c, l2size);
+ if (this_cpu->legacy_cache_size)
+ l2size = this_cpu->legacy_cache_size(c, l2size);
/* Allow user to override all this if necessary. */
if (cachesize_override != -1)
@@ -1095,6 +1097,9 @@ DEFINE_PER_CPU(char *, irq_stack_ptr) =
DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
+DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
+EXPORT_PER_CPU_SYMBOL(__preempt_count);
+
DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
/*
@@ -1169,6 +1174,8 @@ void debug_stack_reset(void)
DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
EXPORT_PER_CPU_SYMBOL(current_task);
+DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
+EXPORT_PER_CPU_SYMBOL(__preempt_count);
DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
#ifdef CONFIG_CC_STACKPROTECTOR
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 4041c24ae7db..c37dc37e8317 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -1,12 +1,6 @@
#ifndef ARCH_X86_CPU_H
#define ARCH_X86_CPU_H
-struct cpu_model_info {
- int vendor;
- int family;
- const char *model_names[16];
-};
-
/* attempt to consolidate cpu attributes */
struct cpu_dev {
const char *c_vendor;
@@ -14,15 +8,23 @@ struct cpu_dev {
/* some have two possibilities for cpuid string */
const char *c_ident[2];
- struct cpu_model_info c_models[4];
-
void (*c_early_init)(struct cpuinfo_x86 *);
void (*c_bsp_init)(struct cpuinfo_x86 *);
void (*c_init)(struct cpuinfo_x86 *);
void (*c_identify)(struct cpuinfo_x86 *);
void (*c_detect_tlb)(struct cpuinfo_x86 *);
- unsigned int (*c_size_cache)(struct cpuinfo_x86 *, unsigned int);
int c_x86_vendor;
+#ifdef CONFIG_X86_32
+ /* Optional vendor specific routine to obtain the cache size. */
+ unsigned int (*legacy_cache_size)(struct cpuinfo_x86 *,
+ unsigned int);
+
+ /* Family/stepping-based lookup table for model names. */
+ struct legacy_cpu_model_info {
+ int family;
+ const char *model_names[16];
+ } legacy_models[5];
+#endif
};
struct _tlb_table {
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index ec7299566f79..dc1ec0dff939 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -665,8 +665,8 @@ static const struct cpu_dev intel_cpu_dev = {
.c_vendor = "Intel",
.c_ident = { "GenuineIntel" },
#ifdef CONFIG_X86_32
- .c_models = {
- { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names =
+ .legacy_models = {
+ { .family = 4, .model_names =
{
[0] = "486 DX-25/33",
[1] = "486 DX-50",
@@ -679,7 +679,7 @@ static const struct cpu_dev intel_cpu_dev = {
[9] = "486 DX/4-WB"
}
},
- { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names =
+ { .family = 5, .model_names =
{
[0] = "Pentium 60/66 A-step",
[1] = "Pentium 60/66",
@@ -690,7 +690,7 @@ static const struct cpu_dev intel_cpu_dev = {
[8] = "Mobile Pentium MMX"
}
},
- { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
+ { .family = 6, .model_names =
{
[0] = "Pentium Pro A-step",
[1] = "Pentium Pro",
@@ -704,7 +704,7 @@ static const struct cpu_dev intel_cpu_dev = {
[11] = "Pentium III (Tualatin)",
}
},
- { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names =
+ { .family = 15, .model_names =
{
[0] = "Pentium 4 (Unknown)",
[1] = "Pentium 4 (Willamette)",
@@ -714,7 +714,7 @@ static const struct cpu_dev intel_cpu_dev = {
}
},
},
- .c_size_cache = intel_size_cache,
+ .legacy_cache_size = intel_size_cache,
#endif
.c_detect_tlb = intel_detect_tlb,
.c_early_init = early_init_intel,
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 1414c90feaba..0641113e2965 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -1,5 +1,5 @@
/*
- * Routines to indentify caches on Intel CPU.
+ * Routines to identify caches on Intel CPU.
*
* Changes:
* Venkatesh Pallipadi : Adding cache identification through cpuid(4)
diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mcheck/mce-apei.c
index cd8b166a1735..de8b60a53f69 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-apei.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-apei.c
@@ -42,8 +42,7 @@ void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err)
struct mce m;
/* Only corrected MC is reported */
- if (!corrected || !(mem_err->validation_bits &
- CPER_MEM_VALID_PHYSICAL_ADDRESS))
+ if (!corrected || !(mem_err->validation_bits & CPER_MEM_VALID_PA))
return;
mce_setup(&m);
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 71a39f3621ba..9f6e9f89d9d6 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -15,6 +15,7 @@
#include <linux/clocksource.h>
#include <linux/module.h>
#include <linux/hardirq.h>
+#include <linux/efi.h>
#include <linux/interrupt.h>
#include <asm/processor.h>
#include <asm/hypervisor.h>
@@ -23,6 +24,8 @@
#include <asm/desc.h>
#include <asm/idle.h>
#include <asm/irq_regs.h>
+#include <asm/i8259.h>
+#include <asm/apic.h>
struct ms_hyperv_info ms_hyperv;
EXPORT_SYMBOL_GPL(ms_hyperv);
@@ -67,6 +70,8 @@ static struct clocksource hyperv_cs = {
static void __init ms_hyperv_init_platform(void)
{
+ u64 hv_lapic_frequency;
+
/*
* Extract the features and hints
*/
@@ -76,6 +81,28 @@ static void __init ms_hyperv_init_platform(void)
printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n",
ms_hyperv.features, ms_hyperv.hints);
+#ifdef CONFIG_X86_LOCAL_APIC
+ if (ms_hyperv.features & HV_X64_MSR_APIC_FREQUENCY_AVAILABLE) {
+ /*
+ * Get the APIC frequency.
+ */
+ rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency);
+ hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ);
+ lapic_timer_frequency = hv_lapic_frequency;
+ printk(KERN_INFO "HyperV: LAPIC Timer Frequency: %#x\n",
+ lapic_timer_frequency);
+
+ /*
+ * On Hyper-V, when we are booting off an EFI firmware stack,
+ * we do not have many legacy devices including PIC, PIT etc.
+ */
+ if (efi_enabled(EFI_BOOT)) {
+ printk(KERN_INFO "HyperV: Using null_legacy_pic\n");
+ legacy_pic = &null_legacy_pic;
+ }
+ }
+#endif
+
if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
}
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 9d8449158cf9..8a87a3224121 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1276,16 +1276,16 @@ void perf_events_lapic_init(void)
static int __kprobes
perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
{
- int ret;
u64 start_clock;
u64 finish_clock;
+ int ret;
if (!atomic_read(&active_events))
return NMI_DONE;
- start_clock = local_clock();
+ start_clock = sched_clock();
ret = x86_pmu.handle_irq(regs);
- finish_clock = local_clock();
+ finish_clock = sched_clock();
perf_sample_event_took(finish_clock - start_clock);
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index cc16faae0538..fd00bb29425d 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -164,6 +164,11 @@ struct cpu_hw_events {
struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
/*
+ * Intel checkpoint mask
+ */
+ u64 intel_cp_status;
+
+ /*
* manage shared (per-core, per-cpu) registers
* used on Intel NHM/WSM/SNB
*/
@@ -440,6 +445,7 @@ struct x86_pmu {
int lbr_nr; /* hardware stack size */
u64 lbr_sel_mask; /* LBR_SELECT valid bits */
const int *lbr_sel_map; /* lbr_select mappings */
+ bool lbr_double_abort; /* duplicated lbr aborts */
/*
* Extra registers for events
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index f31a1655d1ff..0fa4f242f050 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -190,9 +190,9 @@ static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
EVENT_EXTRA_END
};
-EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
-EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
-EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
+EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
+EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
+EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
struct attribute *nhm_events_attrs[] = {
EVENT_PTR(mem_ld_nhm),
@@ -1184,6 +1184,11 @@ static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
wrmsrl(hwc->config_base, ctrl_val);
}
+static inline bool event_is_checkpointed(struct perf_event *event)
+{
+ return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
+}
+
static void intel_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@@ -1197,6 +1202,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
+ cpuc->intel_cp_status &= ~(1ull << hwc->idx);
/*
* must disable before any actual event
@@ -1271,6 +1277,9 @@ static void intel_pmu_enable_event(struct perf_event *event)
if (event->attr.exclude_guest)
cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
+ if (unlikely(event_is_checkpointed(event)))
+ cpuc->intel_cp_status |= (1ull << hwc->idx);
+
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_enable_fixed(hwc);
return;
@@ -1289,6 +1298,17 @@ static void intel_pmu_enable_event(struct perf_event *event)
int intel_pmu_save_and_restart(struct perf_event *event)
{
x86_perf_event_update(event);
+ /*
+ * For a checkpointed counter always reset back to 0. This
+ * avoids a situation where the counter overflows, aborts the
+ * transaction and is then set back to shortly before the
+ * overflow, and overflows and aborts again.
+ */
+ if (unlikely(event_is_checkpointed(event))) {
+ /* No race with NMIs because the counter should not be armed */
+ wrmsrl(event->hw.event_base, 0);
+ local64_set(&event->hw.prev_count, 0);
+ }
return x86_perf_event_set_period(event);
}
@@ -1372,6 +1392,13 @@ again:
x86_pmu.drain_pebs(regs);
}
+ /*
+ * Checkpointed counters can lead to 'spurious' PMIs because the
+ * rollback caused by the PMI will have cleared the overflow status
+ * bit. Therefore always force probe these counters.
+ */
+ status |= cpuc->intel_cp_status;
+
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[bit];
@@ -1837,6 +1864,20 @@ static int hsw_hw_config(struct perf_event *event)
event->attr.precise_ip > 0))
return -EOPNOTSUPP;
+ if (event_is_checkpointed(event)) {
+ /*
+ * Sampling of checkpointed events can cause situations where
+ * the CPU constantly aborts because of a overflow, which is
+ * then checkpointed back and ignored. Forbid checkpointing
+ * for sampling.
+ *
+ * But still allow a long sampling period, so that perf stat
+ * from KVM works.
+ */
+ if (event->attr.sample_period > 0 &&
+ event->attr.sample_period < 0x7fffffff)
+ return -EOPNOTSUPP;
+ }
return 0;
}
@@ -2182,10 +2223,36 @@ static __init void intel_nehalem_quirk(void)
}
}
-EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
-EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
+EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
+EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
+
+/* Haswell special events */
+EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
+EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
+EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
+EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
+EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
+EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
+EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
+EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
+EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
+EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
+EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
+EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
static struct attribute *hsw_events_attrs[] = {
+ EVENT_PTR(tx_start),
+ EVENT_PTR(tx_commit),
+ EVENT_PTR(tx_abort),
+ EVENT_PTR(tx_capacity),
+ EVENT_PTR(tx_conflict),
+ EVENT_PTR(el_start),
+ EVENT_PTR(el_commit),
+ EVENT_PTR(el_abort),
+ EVENT_PTR(el_capacity),
+ EVENT_PTR(el_conflict),
+ EVENT_PTR(cycles_t),
+ EVENT_PTR(cycles_ct),
EVENT_PTR(mem_ld_hsw),
EVENT_PTR(mem_st_hsw),
NULL
@@ -2452,6 +2519,7 @@ __init int intel_pmu_init(void)
x86_pmu.hw_config = hsw_hw_config;
x86_pmu.get_event_constraints = hsw_get_event_constraints;
x86_pmu.cpu_events = hsw_events_attrs;
+ x86_pmu.lbr_double_abort = true;
pr_cont("Haswell events, ");
break;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index ab3ba1c1b7dd..c1760ff3c757 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -12,6 +12,7 @@
#define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
#define PEBS_BUFFER_SIZE PAGE_SIZE
+#define PEBS_FIXUP_SIZE PAGE_SIZE
/*
* pebs_record_32 for p4 and core not supported
@@ -182,18 +183,32 @@ struct pebs_record_nhm {
* Same as pebs_record_nhm, with two additional fields.
*/
struct pebs_record_hsw {
- struct pebs_record_nhm nhm;
- /*
- * Real IP of the event. In the Intel documentation this
- * is called eventingrip.
- */
- u64 real_ip;
- /*
- * TSX tuning information field: abort cycles and abort flags.
- */
- u64 tsx_tuning;
+ u64 flags, ip;
+ u64 ax, bx, cx, dx;
+ u64 si, di, bp, sp;
+ u64 r8, r9, r10, r11;
+ u64 r12, r13, r14, r15;
+ u64 status, dla, dse, lat;
+ u64 real_ip, tsx_tuning;
+};
+
+union hsw_tsx_tuning {
+ struct {
+ u32 cycles_last_block : 32,
+ hle_abort : 1,
+ rtm_abort : 1,
+ instruction_abort : 1,
+ non_instruction_abort : 1,
+ retry : 1,
+ data_conflict : 1,
+ capacity_writes : 1,
+ capacity_reads : 1;
+ };
+ u64 value;
};
+#define PEBS_HSW_TSX_FLAGS 0xff00000000ULL
+
void init_debug_store_on_cpu(int cpu)
{
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
@@ -214,12 +229,14 @@ void fini_debug_store_on_cpu(int cpu)
wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
}
+static DEFINE_PER_CPU(void *, insn_buffer);
+
static int alloc_pebs_buffer(int cpu)
{
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
int node = cpu_to_node(cpu);
int max, thresh = 1; /* always use a single PEBS record */
- void *buffer;
+ void *buffer, *ibuffer;
if (!x86_pmu.pebs)
return 0;
@@ -228,6 +245,19 @@ static int alloc_pebs_buffer(int cpu)
if (unlikely(!buffer))
return -ENOMEM;
+ /*
+ * HSW+ already provides us the eventing ip; no need to allocate this
+ * buffer then.
+ */
+ if (x86_pmu.intel_cap.pebs_format < 2) {
+ ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
+ if (!ibuffer) {
+ kfree(buffer);
+ return -ENOMEM;
+ }
+ per_cpu(insn_buffer, cpu) = ibuffer;
+ }
+
max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
ds->pebs_buffer_base = (u64)(unsigned long)buffer;
@@ -248,6 +278,9 @@ static void release_pebs_buffer(int cpu)
if (!ds || !x86_pmu.pebs)
return;
+ kfree(per_cpu(insn_buffer, cpu));
+ per_cpu(insn_buffer, cpu) = NULL;
+
kfree((void *)(unsigned long)ds->pebs_buffer_base);
ds->pebs_buffer_base = 0;
}
@@ -715,6 +748,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
unsigned long old_to, to = cpuc->lbr_entries[0].to;
unsigned long ip = regs->ip;
int is_64bit = 0;
+ void *kaddr;
/*
* We don't need to fixup if the PEBS assist is fault like
@@ -738,7 +772,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
* unsigned math, either ip is before the start (impossible) or
* the basic block is larger than 1 page (sanity)
*/
- if ((ip - to) > PAGE_SIZE)
+ if ((ip - to) > PEBS_FIXUP_SIZE)
return 0;
/*
@@ -749,29 +783,33 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
return 1;
}
+ if (!kernel_ip(ip)) {
+ int size, bytes;
+ u8 *buf = this_cpu_read(insn_buffer);
+
+ size = ip - to; /* Must fit our buffer, see above */
+ bytes = copy_from_user_nmi(buf, (void __user *)to, size);
+ if (bytes != size)
+ return 0;
+
+ kaddr = buf;
+ } else {
+ kaddr = (void *)to;
+ }
+
do {
struct insn insn;
- u8 buf[MAX_INSN_SIZE];
- void *kaddr;
old_to = to;
- if (!kernel_ip(ip)) {
- int bytes, size = MAX_INSN_SIZE;
-
- bytes = copy_from_user_nmi(buf, (void __user *)to, size);
- if (bytes != size)
- return 0;
-
- kaddr = buf;
- } else
- kaddr = (void *)to;
#ifdef CONFIG_X86_64
is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
#endif
insn_init(&insn, kaddr, is_64bit);
insn_get_length(&insn);
+
to += insn.length;
+ kaddr += insn.length;
} while (to < ip);
if (to == ip) {
@@ -786,16 +824,34 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
return 0;
}
+static inline u64 intel_hsw_weight(struct pebs_record_hsw *pebs)
+{
+ if (pebs->tsx_tuning) {
+ union hsw_tsx_tuning tsx = { .value = pebs->tsx_tuning };
+ return tsx.cycles_last_block;
+ }
+ return 0;
+}
+
+static inline u64 intel_hsw_transaction(struct pebs_record_hsw *pebs)
+{
+ u64 txn = (pebs->tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
+
+ /* For RTM XABORTs also log the abort code from AX */
+ if ((txn & PERF_TXN_TRANSACTION) && (pebs->ax & 1))
+ txn |= ((pebs->ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
+ return txn;
+}
+
static void __intel_pmu_pebs_event(struct perf_event *event,
struct pt_regs *iregs, void *__pebs)
{
/*
- * We cast to pebs_record_nhm to get the load latency data
- * if extra_reg MSR_PEBS_LD_LAT_THRESHOLD used
+ * We cast to the biggest pebs_record but are careful not to
+ * unconditionally access the 'extra' entries.
*/
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct pebs_record_nhm *pebs = __pebs;
- struct pebs_record_hsw *pebs_hsw = __pebs;
+ struct pebs_record_hsw *pebs = __pebs;
struct perf_sample_data data;
struct pt_regs regs;
u64 sample_type;
@@ -854,7 +910,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
regs.sp = pebs->sp;
if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) {
- regs.ip = pebs_hsw->real_ip;
+ regs.ip = pebs->real_ip;
regs.flags |= PERF_EFLAGS_EXACT;
} else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs))
regs.flags |= PERF_EFLAGS_EXACT;
@@ -862,9 +918,18 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
regs.flags &= ~PERF_EFLAGS_EXACT;
if ((event->attr.sample_type & PERF_SAMPLE_ADDR) &&
- x86_pmu.intel_cap.pebs_format >= 1)
+ x86_pmu.intel_cap.pebs_format >= 1)
data.addr = pebs->dla;
+ if (x86_pmu.intel_cap.pebs_format >= 2) {
+ /* Only set the TSX weight when no memory weight. */
+ if ((event->attr.sample_type & PERF_SAMPLE_WEIGHT) && !fll)
+ data.weight = intel_hsw_weight(pebs);
+
+ if (event->attr.sample_type & PERF_SAMPLE_TRANSACTION)
+ data.txn = intel_hsw_transaction(pebs);
+ }
+
if (has_branch_stack(event))
data.br_stack = &cpuc->lbr_stack;
@@ -913,17 +978,34 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
__intel_pmu_pebs_event(event, iregs, at);
}
-static void __intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, void *at,
- void *top)
+static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct debug_store *ds = cpuc->ds;
struct perf_event *event = NULL;
+ void *at, *top;
u64 status = 0;
int bit;
+ if (!x86_pmu.pebs_active)
+ return;
+
+ at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
+ top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
+
ds->pebs_index = ds->pebs_buffer_base;
+ if (unlikely(at > top))
+ return;
+
+ /*
+ * Should not happen, we program the threshold at 1 and do not
+ * set a reset value.
+ */
+ WARN_ONCE(top - at > x86_pmu.max_pebs_events * x86_pmu.pebs_record_size,
+ "Unexpected number of pebs records %ld\n",
+ (long)(top - at) / x86_pmu.pebs_record_size);
+
for (; at < top; at += x86_pmu.pebs_record_size) {
struct pebs_record_nhm *p = at;
@@ -951,61 +1033,6 @@ static void __intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, void *at,
}
}
-static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct debug_store *ds = cpuc->ds;
- struct pebs_record_nhm *at, *top;
- int n;
-
- if (!x86_pmu.pebs_active)
- return;
-
- at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
- top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
-
- ds->pebs_index = ds->pebs_buffer_base;
-
- n = top - at;
- if (n <= 0)
- return;
-
- /*
- * Should not happen, we program the threshold at 1 and do not
- * set a reset value.
- */
- WARN_ONCE(n > x86_pmu.max_pebs_events,
- "Unexpected number of pebs records %d\n", n);
-
- return __intel_pmu_drain_pebs_nhm(iregs, at, top);
-}
-
-static void intel_pmu_drain_pebs_hsw(struct pt_regs *iregs)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct debug_store *ds = cpuc->ds;
- struct pebs_record_hsw *at, *top;
- int n;
-
- if (!x86_pmu.pebs_active)
- return;
-
- at = (struct pebs_record_hsw *)(unsigned long)ds->pebs_buffer_base;
- top = (struct pebs_record_hsw *)(unsigned long)ds->pebs_index;
-
- n = top - at;
- if (n <= 0)
- return;
- /*
- * Should not happen, we program the threshold at 1 and do not
- * set a reset value.
- */
- WARN_ONCE(n > x86_pmu.max_pebs_events,
- "Unexpected number of pebs records %d\n", n);
-
- return __intel_pmu_drain_pebs_nhm(iregs, at, top);
-}
-
/*
* BTS, PEBS probe and setup
*/
@@ -1040,7 +1067,7 @@ void intel_ds_init(void)
case 2:
pr_cont("PEBS fmt2%c, ", pebs_type);
x86_pmu.pebs_record_size = sizeof(struct pebs_record_hsw);
- x86_pmu.drain_pebs = intel_pmu_drain_pebs_hsw;
+ x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
break;
default:
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index d5be06a5005e..90ee6c1d0542 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -284,6 +284,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
int lbr_format = x86_pmu.intel_cap.lbr_format;
u64 tos = intel_pmu_lbr_tos();
int i;
+ int out = 0;
for (i = 0; i < x86_pmu.lbr_nr; i++) {
unsigned long lbr_idx = (tos - i) & mask;
@@ -306,15 +307,27 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
}
from = (u64)((((s64)from) << skip) >> skip);
- cpuc->lbr_entries[i].from = from;
- cpuc->lbr_entries[i].to = to;
- cpuc->lbr_entries[i].mispred = mis;
- cpuc->lbr_entries[i].predicted = pred;
- cpuc->lbr_entries[i].in_tx = in_tx;
- cpuc->lbr_entries[i].abort = abort;
- cpuc->lbr_entries[i].reserved = 0;
+ /*
+ * Some CPUs report duplicated abort records,
+ * with the second entry not having an abort bit set.
+ * Skip them here. This loop runs backwards,
+ * so we need to undo the previous record.
+ * If the abort just happened outside the window
+ * the extra entry cannot be removed.
+ */
+ if (abort && x86_pmu.lbr_double_abort && out > 0)
+ out--;
+
+ cpuc->lbr_entries[out].from = from;
+ cpuc->lbr_entries[out].to = to;
+ cpuc->lbr_entries[out].mispred = mis;
+ cpuc->lbr_entries[out].predicted = pred;
+ cpuc->lbr_entries[out].in_tx = in_tx;
+ cpuc->lbr_entries[out].abort = abort;
+ cpuc->lbr_entries[out].reserved = 0;
+ out++;
}
- cpuc->lbr_stack.nr = i;
+ cpuc->lbr_stack.nr = out;
}
void intel_pmu_lbr_read(void)
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c
index 88db010845cb..384df5105fbc 100644
--- a/arch/x86/kernel/cpu/rdrand.c
+++ b/arch/x86/kernel/cpu/rdrand.c
@@ -31,20 +31,6 @@ static int __init x86_rdrand_setup(char *s)
}
__setup("nordrand", x86_rdrand_setup);
-/* We can't use arch_get_random_long() here since alternatives haven't run */
-static inline int rdrand_long(unsigned long *v)
-{
- int ok;
- asm volatile("1: " RDRAND_LONG "\n\t"
- "jc 2f\n\t"
- "decl %0\n\t"
- "jnz 1b\n\t"
- "2:"
- : "=r" (ok), "=a" (*v)
- : "0" (RDRAND_RETRY_LOOPS));
- return ok;
-}
-
/*
* Force a reseed cycle; we are architecturally guaranteed a reseed
* after no more than 512 128-bit chunks of random data. This also
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index f2cc63e9cf08..b6f794aa1693 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -1,5 +1,5 @@
/*
- * Routines to indentify additional cpu features that are scattered in
+ * Routines to identify additional cpu features that are scattered in
* cpuid space.
*/
#include <linux/cpu.h>
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c
index 202759a14121..75c5ad5d35cc 100644
--- a/arch/x86/kernel/cpu/umc.c
+++ b/arch/x86/kernel/cpu/umc.c
@@ -11,8 +11,8 @@
static const struct cpu_dev umc_cpu_dev = {
.c_vendor = "UMC",
.c_ident = { "UMC UMC UMC" },
- .c_models = {
- { .vendor = X86_VENDOR_UMC, .family = 4, .model_names =
+ .legacy_models = {
+ { .family = 4, .model_names =
{
[1] = "U5D",
[2] = "U5S",
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 376dc7873447..d35078ea1446 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -20,22 +20,13 @@
#include <asm/hpet.h>
#include <asm/apic.h>
#include <asm/pci_x86.h>
+#include <asm/setup.h>
__initdata u64 initial_dtb;
char __initdata cmd_line[COMMAND_LINE_SIZE];
int __initdata of_ioapic;
-unsigned long pci_address_to_pio(phys_addr_t address)
-{
- /*
- * The ioport address can be directly used by inX / outX
- */
- BUG_ON(address >= (1 << 16));
- return (unsigned long)address;
-}
-EXPORT_SYMBOL_GPL(pci_address_to_pio);
-
void __init early_init_dt_scan_chosen_arch(unsigned long node)
{
BUG();
@@ -51,15 +42,6 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
}
-#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
-{
- initrd_start = (unsigned long)__va(start);
- initrd_end = (unsigned long)__va(end);
- initrd_below_start_ok = 1;
-}
-#endif
-
void __init add_dtb(u64 data)
{
initial_dtb = data + offsetof(struct setup_data, data);
@@ -105,7 +87,6 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
static int x86_of_pci_irq_enable(struct pci_dev *dev)
{
- struct of_irq oirq;
u32 virq;
int ret;
u8 pin;
@@ -116,12 +97,7 @@ static int x86_of_pci_irq_enable(struct pci_dev *dev)
if (!pin)
return 0;
- ret = of_irq_map_pci(dev, &oirq);
- if (ret)
- return ret;
-
- virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
- oirq.size);
+ virq = of_irq_parse_and_map_pci(dev, 0, 0);
if (virq == 0)
return -EINVAL;
dev->irq = virq;
@@ -230,7 +206,7 @@ static void __init dtb_apic_setup(void)
static void __init x86_flattree_get_config(void)
{
u32 size, map_len;
- void *new_dtb;
+ struct boot_param_header *dt;
if (!initial_dtb)
return;
@@ -238,24 +214,17 @@ static void __init x86_flattree_get_config(void)
map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK),
(u64)sizeof(struct boot_param_header));
- initial_boot_params = early_memremap(initial_dtb, map_len);
- size = be32_to_cpu(initial_boot_params->totalsize);
+ dt = early_memremap(initial_dtb, map_len);
+ size = be32_to_cpu(dt->totalsize);
if (map_len < size) {
- early_iounmap(initial_boot_params, map_len);
- initial_boot_params = early_memremap(initial_dtb, size);
+ early_iounmap(dt, map_len);
+ dt = early_memremap(initial_dtb, size);
map_len = size;
}
- new_dtb = alloc_bootmem(size);
- memcpy(new_dtb, initial_boot_params, size);
- early_iounmap(initial_boot_params, map_len);
-
- initial_boot_params = new_dtb;
-
- /* root level address cells */
- of_scan_flat_dt(early_init_dt_scan_root, NULL);
-
- unflatten_device_tree();
+ initial_boot_params = dt;
+ unflatten_and_copy_device_tree();
+ early_iounmap(dt, map_len);
}
#else
static inline void x86_flattree_get_config(void) { }
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index deb6421c9e69..d9c12d3022a7 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -25,12 +25,17 @@ unsigned int code_bytes = 64;
int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
static int die_counter;
-void printk_address(unsigned long address, int reliable)
+static void printk_stack_address(unsigned long address, int reliable)
{
pr_cont(" [<%p>] %s%pB\n",
(void *)address, reliable ? "" : "? ", (void *)address);
}
+void printk_address(unsigned long address)
+{
+ pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
+}
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static void
print_ftrace_graph_addr(unsigned long addr, void *data,
@@ -151,7 +156,7 @@ static void print_trace_address(void *data, unsigned long addr, int reliable)
{
touch_nmi_watchdog();
printk(data);
- printk_address(addr, reliable);
+ printk_stack_address(addr, reliable);
}
static const struct stacktrace_ops print_trace_ops = {
@@ -281,7 +286,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
#else
/* Executive summary in case the oops scrolled away */
printk(KERN_ALERT "RIP ");
- printk_address(regs->ip, 1);
+ printk_address(regs->ip);
printk(" RSP <%016lx>\n", regs->sp);
#endif
return 0;
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index d15f575a861b..01d1c187c9f9 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -14,9 +14,11 @@
#include <xen/hvc-console.h>
#include <asm/pci-direct.h>
#include <asm/fixmap.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#include <asm/pgtable.h>
#include <linux/usb/ehci_def.h>
+#include <linux/efi.h>
+#include <asm/efi.h>
/* Simple VGA output */
#define VGABASE (__ISA_IO_base + 0xb8000)
@@ -234,6 +236,11 @@ static int __init setup_early_printk(char *buf)
early_console_register(&early_hsu_console, keep);
}
#endif
+#ifdef CONFIG_EARLY_PRINTK_EFI
+ if (!strncmp(buf, "efi", 3))
+ early_console_register(&early_efi_console, keep);
+#endif
+
buf++;
}
return 0;
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index f0dcb0ceb6a2..fd1bc1b15e6d 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -362,12 +362,9 @@ END(ret_from_exception)
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
- cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
- jnz restore_all
need_resched:
- movl TI_flags(%ebp), %ecx # need_resched set ?
- testb $_TIF_NEED_RESCHED, %cl
- jz restore_all
+ cmpl $0,PER_CPU_VAR(__preempt_count)
+ jnz restore_all
testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
jz restore_all
call preempt_schedule_irq
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index b077f4cc225a..603be7c70675 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1103,10 +1103,8 @@ retint_signal:
/* Returning to kernel space. Check if we need preemption */
/* rcx: threadinfo. interrupts off. */
ENTRY(retint_kernel)
- cmpl $0,TI_preempt_count(%rcx)
+ cmpl $0,PER_CPU_VAR(__preempt_count)
jnz retint_restore_args
- bt $TIF_NEED_RESCHED,TI_flags(%rcx)
- jnc retint_restore_args
bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
jnc retint_restore_args
call preempt_schedule_irq
@@ -1342,7 +1340,7 @@ bad_gs:
.previous
/* Call softirq on interrupt stack. Interrupts are off. */
-ENTRY(call_softirq)
+ENTRY(do_softirq_own_stack)
CFI_STARTPROC
pushq_cfi %rbp
CFI_REL_OFFSET rbp,0
@@ -1359,7 +1357,7 @@ ENTRY(call_softirq)
decl PER_CPU_VAR(irq_count)
ret
CFI_ENDPROC
-END(call_softirq)
+END(do_softirq_own_stack)
#ifdef CONFIG_XEN
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 06f87bece92a..c61a14a4a310 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -35,8 +35,8 @@ asmlinkage void __init i386_start_kernel(void)
/* Call the subarch specific early setup function */
switch (boot_params.hdr.hardware_subarch) {
- case X86_SUBARCH_MRST:
- x86_mrst_early_setup();
+ case X86_SUBARCH_INTEL_MID:
+ x86_intel_mid_early_setup();
break;
case X86_SUBARCH_CE4100:
x86_ce4100_early_setup();
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
index 0fa69127209a..05fd74f537d6 100644
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ b/arch/x86/kernel/i386_ksyms_32.c
@@ -37,3 +37,10 @@ EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(empty_zero_page);
+
+#ifdef CONFIG_PREEMPT
+EXPORT_SYMBOL(___preempt_schedule);
+#ifdef CONFIG_CONTEXT_TRACKING
+EXPORT_SYMBOL(___preempt_schedule_context);
+#endif
+#endif
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 9a5c460404dc..2e977b5d61dd 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -312,8 +312,7 @@ static void init_8259A(int auto_eoi)
*/
outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
- /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 on x86-64,
- to 0x20-0x27 on i386 */
+ /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 */
outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR);
/* 8259A-1 (the master) has a slave on IR2 */
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 4186755f1d7c..d7fcbedc9c43 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -100,9 +100,6 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
irqctx->tinfo.task = curctx->tinfo.task;
irqctx->tinfo.previous_esp = current_stack_pointer;
- /* Copy the preempt_count so that the [soft]irq checks work. */
- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
-
if (unlikely(overflow))
call_on_stack(print_stack_overflow, isp);
@@ -131,7 +128,6 @@ void irq_ctx_init(int cpu)
THREAD_SIZE_ORDER));
memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
irqctx->tinfo.cpu = cpu;
- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
per_cpu(hardirq_ctx, cpu) = irqctx;
@@ -149,35 +145,21 @@ void irq_ctx_init(int cpu)
cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
}
-asmlinkage void do_softirq(void)
+void do_softirq_own_stack(void)
{
- unsigned long flags;
struct thread_info *curctx;
union irq_ctx *irqctx;
u32 *isp;
- if (in_interrupt())
- return;
-
- local_irq_save(flags);
-
- if (local_softirq_pending()) {
- curctx = current_thread_info();
- irqctx = __this_cpu_read(softirq_ctx);
- irqctx->tinfo.task = curctx->task;
- irqctx->tinfo.previous_esp = current_stack_pointer;
-
- /* build the stack frame on the softirq stack */
- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
+ curctx = current_thread_info();
+ irqctx = __this_cpu_read(softirq_ctx);
+ irqctx->tinfo.task = curctx->task;
+ irqctx->tinfo.previous_esp = current_stack_pointer;
- call_on_stack(__do_softirq, isp);
- /*
- * Shouldn't happen, we returned above if in_interrupt():
- */
- WARN_ON_ONCE(softirq_count());
- }
+ /* build the stack frame on the softirq stack */
+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
- local_irq_restore(flags);
+ call_on_stack(__do_softirq, isp);
}
bool handle_irq(unsigned irq, struct pt_regs *regs)
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index d04d3ecded62..4d1c746892eb 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -87,24 +87,3 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
generic_handle_irq_desc(irq, desc);
return true;
}
-
-
-extern void call_softirq(void);
-
-asmlinkage void do_softirq(void)
-{
- __u32 pending;
- unsigned long flags;
-
- if (in_interrupt())
- return;
-
- local_irq_save(flags);
- pending = local_softirq_pending();
- /* Switch to interrupt stack */
- if (pending) {
- call_softirq();
- WARN_ON_ONCE(softirq_count());
- }
- local_irq_restore(flags);
-}
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index ee11b7dfbfbb..26d5a55a2736 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -42,15 +42,27 @@ static void __jump_label_transform(struct jump_entry *entry,
int init)
{
union jump_code_union code;
+ const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
if (type == JUMP_LABEL_ENABLE) {
- /*
- * We are enabling this jump label. If it is not a nop
- * then something must have gone wrong.
- */
- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5) != 0))
- bug_at((void *)entry->code, __LINE__);
+ if (init) {
+ /*
+ * Jump label is enabled for the first time.
+ * So we expect a default_nop...
+ */
+ if (unlikely(memcmp((void *)entry->code, default_nop, 5)
+ != 0))
+ bug_at((void *)entry->code, __LINE__);
+ } else {
+ /*
+ * ...otherwise expect an ideal_nop. Otherwise
+ * something went horribly wrong.
+ */
+ if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
+ != 0))
+ bug_at((void *)entry->code, __LINE__);
+ }
code.jump = 0xe9;
code.offset = entry->target -
@@ -63,7 +75,6 @@ static void __jump_label_transform(struct jump_entry *entry,
* are converting the default nop to the ideal nop.
*/
if (init) {
- const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
bug_at((void *)entry->code, __LINE__);
} else {
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index a0e2a8a80c94..b2046e4d0b59 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -609,7 +609,7 @@ static struct dentry *d_kvm_debug;
struct dentry *kvm_init_debugfs(void)
{
- d_kvm_debug = debugfs_create_dir("kvm", NULL);
+ d_kvm_debug = debugfs_create_dir("kvm-guest", NULL);
if (!d_kvm_debug)
printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 88458faea2f8..05266b5aae22 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -46,7 +46,7 @@ static struct class *msr_class;
static loff_t msr_seek(struct file *file, loff_t offset, int orig)
{
loff_t ret;
- struct inode *inode = file->f_mapping->host;
+ struct inode *inode = file_inode(file);
mutex_lock(&inode->i_mutex);
switch (orig) {
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index ba77ebc2c353..6fcb49ce50a1 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -113,10 +113,10 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
u64 before, delta, whole_msecs;
int remainder_ns, decimal_msecs, thishandled;
- before = local_clock();
+ before = sched_clock();
thishandled = a->handler(type, regs);
handled += thishandled;
- delta = local_clock() - before;
+ delta = sched_clock() - before;
trace_nmi_handler(a->handler, (int)delta, thishandled);
if (delta < nmi_longest_ns)
diff --git a/arch/x86/kernel/preempt.S b/arch/x86/kernel/preempt.S
new file mode 100644
index 000000000000..ca7f0d58a87d
--- /dev/null
+++ b/arch/x86/kernel/preempt.S
@@ -0,0 +1,25 @@
+
+#include <linux/linkage.h>
+#include <asm/dwarf2.h>
+#include <asm/asm.h>
+#include <asm/calling.h>
+
+ENTRY(___preempt_schedule)
+ CFI_STARTPROC
+ SAVE_ALL
+ call preempt_schedule
+ RESTORE_ALL
+ ret
+ CFI_ENDPROC
+
+#ifdef CONFIG_CONTEXT_TRACKING
+
+ENTRY(___preempt_schedule_context)
+ CFI_STARTPROC
+ SAVE_ALL
+ call preempt_schedule_context
+ RESTORE_ALL
+ ret
+ CFI_ENDPROC
+
+#endif
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index c83516be1052..3fb8d95ab8b5 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -391,9 +391,9 @@ static void amd_e400_idle(void)
* The switch back from broadcast mode needs to be
* called with interrupts disabled.
*/
- local_irq_disable();
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
- local_irq_enable();
+ local_irq_disable();
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+ local_irq_enable();
} else
default_idle();
}
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 884f98f69354..c2ec1aa6d454 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -292,6 +292,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
set_iopl_mask(next->iopl);
/*
+ * If it were not for PREEMPT_ACTIVE we could guarantee that the
+ * preempt_count of all tasks was equal here and this would not be
+ * needed.
+ */
+ task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
+ this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
+
+ /*
* Now maybe handle debug registers and/or IO bitmaps
*/
if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index bb1dc51bab05..176ad94e1d57 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -63,7 +63,7 @@ void __show_regs(struct pt_regs *regs, int all)
unsigned int ds, cs, es;
printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
- printk_address(regs->ip, 1);
+ printk_address(regs->ip);
printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
regs->sp, regs->flags);
printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
@@ -363,6 +363,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
this_cpu_write(old_rsp, next->usersp);
this_cpu_write(current_task, next_p);
+ /*
+ * If it were not for PREEMPT_ACTIVE we could guarantee that the
+ * preempt_count of all tasks was equal here and this would not be
+ * needed.
+ */
+ task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
+ this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
+
this_cpu_write(kernel_stack,
(unsigned long)task_stack_page(next_p) +
THREAD_SIZE - KERNEL_STACK_OFFSET);
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 7e920bff99a3..0f958e19573e 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -61,7 +61,7 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
if (reboot_type != BOOT_BIOS) {
reboot_type = BOOT_BIOS;
pr_info("%s series board detected. Selecting %s-method for reboots.\n",
- "BIOS", d->ident);
+ d->ident, "BIOS");
}
return 0;
}
@@ -117,7 +117,7 @@ static int __init set_pci_reboot(const struct dmi_system_id *d)
if (reboot_type != BOOT_CF9) {
reboot_type = BOOT_CF9;
pr_info("%s series board detected. Selecting %s-method for reboots.\n",
- "PCI", d->ident);
+ d->ident, "PCI");
}
return 0;
}
@@ -127,7 +127,7 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d)
if (reboot_type != BOOT_KBD) {
reboot_type = BOOT_KBD;
pr_info("%s series board detected. Selecting %s-method for reboot.\n",
- "KBD", d->ident);
+ d->ident, "KBD");
}
return 0;
}
@@ -136,194 +136,193 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d)
* This is a single dmi_table handling all reboot quirks.
*/
static struct dmi_system_id __initdata reboot_dmi_table[] = {
- { /* Handle problems with rebooting on Dell E520's */
- .callback = set_bios_reboot,
- .ident = "Dell E520",
+
+ /* Acer */
+ { /* Handle reboot issue on Acer Aspire one */
+ .callback = set_kbd_reboot,
+ .ident = "Acer Aspire One A110",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM061"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
},
},
- { /* Handle problems with rebooting on Dell 1300's */
- .callback = set_bios_reboot,
- .ident = "Dell PowerEdge 1300",
+
+ /* Apple */
+ { /* Handle problems with rebooting on Apple MacBook5 */
+ .callback = set_pci_reboot,
+ .ident = "Apple MacBook5",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
},
},
- { /* Handle problems with rebooting on Dell 300's */
- .callback = set_bios_reboot,
- .ident = "Dell PowerEdge 300",
+ { /* Handle problems with rebooting on Apple MacBookPro5 */
+ .callback = set_pci_reboot,
+ .ident = "Apple MacBookPro5",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"),
},
},
- { /* Handle problems with rebooting on Dell Optiplex 745's SFF */
- .callback = set_bios_reboot,
- .ident = "Dell OptiPlex 745",
+ { /* Handle problems with rebooting on Apple Macmini3,1 */
+ .callback = set_pci_reboot,
+ .ident = "Apple Macmini3,1",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"),
},
},
- { /* Handle problems with rebooting on Dell Optiplex 745's DFF */
- .callback = set_bios_reboot,
- .ident = "Dell OptiPlex 745",
+ { /* Handle problems with rebooting on the iMac9,1. */
+ .callback = set_pci_reboot,
+ .ident = "Apple iMac9,1",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
- DMI_MATCH(DMI_BOARD_NAME, "0MM599"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
},
},
- { /* Handle problems with rebooting on Dell Optiplex 745 with 0KW626 */
+
+ /* ASUS */
+ { /* Handle problems with rebooting on ASUS P4S800 */
.callback = set_bios_reboot,
- .ident = "Dell OptiPlex 745",
+ .ident = "ASUS P4S800",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
- DMI_MATCH(DMI_BOARD_NAME, "0KW626"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
},
},
- { /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */
+
+ /* Dell */
+ { /* Handle problems with rebooting on Dell DXP061 */
.callback = set_bios_reboot,
- .ident = "Dell OptiPlex 330",
+ .ident = "Dell DXP061",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"),
- DMI_MATCH(DMI_BOARD_NAME, "0KP561"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"),
},
},
- { /* Handle problems with rebooting on Dell Optiplex 360 with 0T656F */
+ { /* Handle problems with rebooting on Dell E520's */
.callback = set_bios_reboot,
- .ident = "Dell OptiPlex 360",
+ .ident = "Dell E520",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 360"),
- DMI_MATCH(DMI_BOARD_NAME, "0T656F"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM061"),
},
},
- { /* Handle problems with rebooting on Dell OptiPlex 760 with 0G919G */
- .callback = set_bios_reboot,
- .ident = "Dell OptiPlex 760",
+ { /* Handle problems with rebooting on the Latitude E5420. */
+ .callback = set_pci_reboot,
+ .ident = "Dell Latitude E5420",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 760"),
- DMI_MATCH(DMI_BOARD_NAME, "0G919G"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"),
},
},
- { /* Handle problems with rebooting on Dell 2400's */
- .callback = set_bios_reboot,
- .ident = "Dell PowerEdge 2400",
+ { /* Handle problems with rebooting on the Latitude E6320. */
+ .callback = set_pci_reboot,
+ .ident = "Dell Latitude E6320",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
},
},
- { /* Handle problems with rebooting on Dell T5400's */
- .callback = set_bios_reboot,
- .ident = "Dell Precision T5400",
+ { /* Handle problems with rebooting on the Latitude E6420. */
+ .callback = set_pci_reboot,
+ .ident = "Dell Latitude E6420",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T5400"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
},
},
- { /* Handle problems with rebooting on Dell T7400's */
+ { /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */
.callback = set_bios_reboot,
- .ident = "Dell Precision T7400",
+ .ident = "Dell OptiPlex 330",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T7400"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"),
+ DMI_MATCH(DMI_BOARD_NAME, "0KP561"),
},
},
- { /* Handle problems with rebooting on HP laptops */
+ { /* Handle problems with rebooting on Dell Optiplex 360 with 0T656F */
.callback = set_bios_reboot,
- .ident = "HP Compaq Laptop",
+ .ident = "Dell OptiPlex 360",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 360"),
+ DMI_MATCH(DMI_BOARD_NAME, "0T656F"),
},
},
- { /* Handle problems with rebooting on Dell XPS710 */
+ { /* Handle problems with rebooting on Dell Optiplex 745's SFF */
.callback = set_bios_reboot,
- .ident = "Dell XPS710",
+ .ident = "Dell OptiPlex 745",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
},
},
- { /* Handle problems with rebooting on Dell DXP061 */
+ { /* Handle problems with rebooting on Dell Optiplex 745's DFF */
.callback = set_bios_reboot,
- .ident = "Dell DXP061",
+ .ident = "Dell OptiPlex 745",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
+ DMI_MATCH(DMI_BOARD_NAME, "0MM599"),
},
},
- { /* Handle problems with rebooting on Sony VGN-Z540N */
+ { /* Handle problems with rebooting on Dell Optiplex 745 with 0KW626 */
.callback = set_bios_reboot,
- .ident = "Sony VGN-Z540N",
+ .ident = "Dell OptiPlex 745",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
+ DMI_MATCH(DMI_BOARD_NAME, "0KW626"),
},
},
- { /* Handle problems with rebooting on ASUS P4S800 */
+ { /* Handle problems with rebooting on Dell OptiPlex 760 with 0G919G */
.callback = set_bios_reboot,
- .ident = "ASUS P4S800",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
- DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
- },
- },
-
- { /* Handle reboot issue on Acer Aspire one */
- .callback = set_kbd_reboot,
- .ident = "Acer Aspire One A110",
+ .ident = "Dell OptiPlex 760",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 760"),
+ DMI_MATCH(DMI_BOARD_NAME, "0G919G"),
},
},
- { /* Handle problems with rebooting on Apple MacBook5 */
+ { /* Handle problems with rebooting on the OptiPlex 990. */
.callback = set_pci_reboot,
- .ident = "Apple MacBook5",
+ .ident = "Dell OptiPlex 990",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
},
},
- { /* Handle problems with rebooting on Apple MacBookPro5 */
- .callback = set_pci_reboot,
- .ident = "Apple MacBookPro5",
+ { /* Handle problems with rebooting on Dell 300's */
+ .callback = set_bios_reboot,
+ .ident = "Dell PowerEdge 300",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"),
},
},
- { /* Handle problems with rebooting on Apple Macmini3,1 */
- .callback = set_pci_reboot,
- .ident = "Apple Macmini3,1",
+ { /* Handle problems with rebooting on Dell 1300's */
+ .callback = set_bios_reboot,
+ .ident = "Dell PowerEdge 1300",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"),
},
},
- { /* Handle problems with rebooting on the iMac9,1. */
- .callback = set_pci_reboot,
- .ident = "Apple iMac9,1",
+ { /* Handle problems with rebooting on Dell 2400's */
+ .callback = set_bios_reboot,
+ .ident = "Dell PowerEdge 2400",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
},
},
- { /* Handle problems with rebooting on the Latitude E6320. */
+ { /* Handle problems with rebooting on the Dell PowerEdge C6100. */
.callback = set_pci_reboot,
- .ident = "Dell Latitude E6320",
+ .ident = "Dell PowerEdge C6100",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
},
},
{ /* Handle problems with rebooting on the Latitude E5410. */
@@ -334,54 +333,59 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5410"),
},
},
- { /* Handle problems with rebooting on the Latitude E5420. */
+ { /* Handle problems with rebooting on the Precision M6600. */
.callback = set_pci_reboot,
- .ident = "Dell Latitude E5420",
+ .ident = "Dell Precision M6600",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
},
},
- { /* Handle problems with rebooting on the Latitude E6420. */
- .callback = set_pci_reboot,
- .ident = "Dell Latitude E6420",
+ { /* Handle problems with rebooting on Dell T5400's */
+ .callback = set_bios_reboot,
+ .ident = "Dell Precision T5400",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T5400"),
},
},
- { /* Handle problems with rebooting on the OptiPlex 990. */
- .callback = set_pci_reboot,
- .ident = "Dell OptiPlex 990",
+ { /* Handle problems with rebooting on Dell T7400's */
+ .callback = set_bios_reboot,
+ .ident = "Dell Precision T7400",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T7400"),
},
},
- { /* Handle problems with rebooting on the Precision M6600. */
- .callback = set_pci_reboot,
- .ident = "Dell Precision M6600",
+ { /* Handle problems with rebooting on Dell XPS710 */
+ .callback = set_bios_reboot,
+ .ident = "Dell XPS710",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"),
},
},
- { /* Handle problems with rebooting on the Dell PowerEdge C6100. */
- .callback = set_pci_reboot,
- .ident = "Dell PowerEdge C6100",
+
+ /* Hewlett-Packard */
+ { /* Handle problems with rebooting on HP laptops */
+ .callback = set_bios_reboot,
+ .ident = "HP Compaq Laptop",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
},
},
- { /* Some C6100 machines were shipped with vendor being 'Dell'. */
- .callback = set_pci_reboot,
- .ident = "Dell PowerEdge C6100",
+
+ /* Sony */
+ { /* Handle problems with rebooting on Sony VGN-Z540N */
+ .callback = set_bios_reboot,
+ .ident = "Sony VGN-Z540N",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
- DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
},
},
+
{ }
};
@@ -535,10 +539,13 @@ static void native_machine_emergency_restart(void)
case BOOT_CF9_COND:
if (port_cf9_safe) {
- u8 cf9 = inb(0xcf9) & ~6;
+ u8 reboot_code = reboot_mode == REBOOT_WARM ?
+ 0x06 : 0x0E;
+ u8 cf9 = inb(0xcf9) & ~reboot_code;
outb(cf9|2, 0xcf9); /* Request hard reset */
udelay(50);
- outb(cf9|6, 0xcf9); /* Actually do the reset */
+ /* Actually do the reset */
+ outb(cf9|reboot_code, 0xcf9);
udelay(50);
}
reboot_type = BOOT_KBD;
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 0aa29394ed6f..ca9622a25e95 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -12,7 +12,7 @@
#include <asm/vsyscall.h>
#include <asm/x86_init.h>
#include <asm/time.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#include <asm/rtc.h>
#ifdef CONFIG_X86_32
@@ -189,9 +189,17 @@ static __init int add_rtc_cmos(void)
return 0;
/* Intel MID platforms don't have ioport rtc */
- if (mrst_identify_cpu())
+ if (intel_mid_identify_cpu())
return -ENODEV;
+#ifdef CONFIG_ACPI
+ if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) {
+ /* This warning can likely go away again in a year or two. */
+ pr_info("ACPI: not registering RTC platform device\n");
+ return -ENODEV;
+ }
+#endif
+
platform_device_register(&rtc_device);
dev_info(&rtc_device.dev,
"registered platform RTC device (no PNP device found)\n");
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index f0de6294b955..4ad8968d6106 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -824,6 +824,20 @@ static void __init trim_low_memory_range(void)
}
/*
+ * Dump out kernel offset information on panic.
+ */
+static int
+dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
+{
+ pr_emerg("Kernel Offset: 0x%lx from 0x%lx "
+ "(relocation range: 0x%lx-0x%lx)\n",
+ (unsigned long)&_text - __START_KERNEL, __START_KERNEL,
+ __START_KERNEL_map, MODULES_VADDR-1);
+
+ return 0;
+}
+
+/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
* passed the efi memmap, systab, etc., so we should use these data structures
* for initialization. Note, the efi init code path is determined by the
@@ -993,6 +1007,7 @@ void __init setup_arch(char **cmdline_p)
efi_init();
dmi_scan_machine();
+ dmi_memdev_walk();
dmi_set_dump_stack_arch_desc();
/*
@@ -1242,3 +1257,15 @@ void __init i386_reserve_resources(void)
}
#endif /* CONFIG_X86_32 */
+
+static struct notifier_block kernel_offset_notifier = {
+ .notifier_call = dump_kernel_offset
+};
+
+static int __init register_kernel_offset_dumper(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &kernel_offset_notifier);
+ return 0;
+}
+__initcall(register_kernel_offset_dumper);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 6cacab671f9b..85dc05a3aa02 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -73,36 +73,14 @@
#include <asm/setup.h>
#include <asm/uv/uv.h>
#include <linux/mc146818rtc.h>
-
#include <asm/smpboot_hooks.h>
#include <asm/i8259.h>
-
#include <asm/realmode.h>
+#include <asm/misc.h>
/* State of each CPU */
DEFINE_PER_CPU(int, cpu_state) = { 0 };
-#ifdef CONFIG_HOTPLUG_CPU
-/*
- * We need this for trampoline_base protection from concurrent accesses when
- * off- and onlining cores wildly.
- */
-static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
-
-void cpu_hotplug_driver_lock(void)
-{
- mutex_lock(&x86_cpu_hotplug_driver_mutex);
-}
-
-void cpu_hotplug_driver_unlock(void)
-{
- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
-}
-
-ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
-ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
-#endif
-
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
EXPORT_SYMBOL(smp_num_siblings);
@@ -648,22 +626,46 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
return (send_status | accept_status);
}
+void smp_announce(void)
+{
+ int num_nodes = num_online_nodes();
+
+ printk(KERN_INFO "x86: Booted up %d node%s, %d CPUs\n",
+ num_nodes, (num_nodes > 1 ? "s" : ""), num_online_cpus());
+}
+
/* reduce the number of lines printed when booting a large cpu count system */
static void announce_cpu(int cpu, int apicid)
{
static int current_node = -1;
int node = early_cpu_to_node(cpu);
- int max_cpu_present = find_last_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
+ static int width, node_width;
+
+ if (!width)
+ width = num_digits(num_possible_cpus()) + 1; /* + '#' sign */
+
+ if (!node_width)
+ node_width = num_digits(num_possible_nodes()) + 1; /* + '#' */
+
+ if (cpu == 1)
+ printk(KERN_INFO "x86: Booting SMP configuration:\n");
if (system_state == SYSTEM_BOOTING) {
if (node != current_node) {
if (current_node > (-1))
- pr_cont(" OK\n");
+ pr_cont("\n");
current_node = node;
- pr_info("Booting Node %3d, Processors ", node);
+
+ printk(KERN_INFO ".... node %*s#%d, CPUs: ",
+ node_width - num_digits(node), " ", node);
}
- pr_cont(" #%4d%s", cpu, cpu == max_cpu_present ? " OK\n" : "");
- return;
+
+ /* Add padding for the BSP */
+ if (cpu == 1)
+ pr_cont("%*s", width + 1, " ");
+
+ pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu);
+
} else
pr_info("Booting Node %d Processor %d APIC 0x%x\n",
node, cpu, apicid);
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
index 6e60b5fe2244..649b010da00b 100644
--- a/arch/x86/kernel/topology.c
+++ b/arch/x86/kernel/topology.c
@@ -65,29 +65,32 @@ int __ref _debug_hotplug_cpu(int cpu, int action)
if (!cpu_is_hotpluggable(cpu))
return -EINVAL;
- cpu_hotplug_driver_lock();
+ lock_device_hotplug();
switch (action) {
case 0:
ret = cpu_down(cpu);
if (!ret) {
pr_info("CPU %u is now offline\n", cpu);
+ dev->offline = true;
kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
} else
pr_debug("Can't offline CPU%d.\n", cpu);
break;
case 1:
ret = cpu_up(cpu);
- if (!ret)
+ if (!ret) {
+ dev->offline = false;
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
- else
+ } else {
pr_debug("Can't online CPU%d.\n", cpu);
+ }
break;
default:
ret = -EINVAL;
}
- cpu_hotplug_driver_unlock();
+ unlock_device_hotplug();
return ret;
}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 8c8093b146ca..729aa779ff75 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -88,7 +88,7 @@ static inline void conditional_sti(struct pt_regs *regs)
static inline void preempt_conditional_sti(struct pt_regs *regs)
{
- inc_preempt_count();
+ preempt_count_inc();
if (regs->flags & X86_EFLAGS_IF)
local_irq_enable();
}
@@ -103,7 +103,7 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
{
if (regs->flags & X86_EFLAGS_IF)
local_irq_disable();
- dec_preempt_count();
+ preempt_count_dec();
}
static int __kprobes
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 10c4f3006afd..da6b35a98260 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -199,6 +199,15 @@ SECTIONS
__x86_cpu_dev_end = .;
}
+#ifdef CONFIG_X86_INTEL_MID
+ .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \
+ LOAD_OFFSET) {
+ __x86_intel_mid_dev_start = .;
+ *(.x86_intel_mid_dev.init)
+ __x86_intel_mid_dev_end = .;
+ }
+#endif
+
/*
* start address and size of operations which during runtime
* can be patched with virtualization friendly instructions or
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index b014d9414d08..040681928e9d 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -66,3 +66,10 @@ EXPORT_SYMBOL(empty_zero_page);
#ifndef CONFIG_PARAVIRT
EXPORT_SYMBOL(native_load_gs_index);
#endif
+
+#ifdef CONFIG_PREEMPT
+EXPORT_SYMBOL(___preempt_schedule);
+#ifdef CONFIG_CONTEXT_TRACKING
+EXPORT_SYMBOL(___preempt_schedule_context);
+#endif
+#endif
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index a47a3e54b964..b89c5db2b832 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -38,6 +38,7 @@ config KVM
select PERF_EVENTS
select HAVE_KVM_MSI
select HAVE_KVM_CPU_RELAX_INTERCEPT
+ select KVM_VFIO
---help---
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index bf4fb04d0112..25d22b2d6509 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -9,7 +9,7 @@ KVM := ../../../virt/kvm
kvm-y += $(KVM)/kvm_main.o $(KVM)/ioapic.o \
$(KVM)/coalesced_mmio.o $(KVM)/irq_comm.o \
- $(KVM)/eventfd.o $(KVM)/irqchip.o
+ $(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o
kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += $(KVM)/assigned-dev.o $(KVM)/iommu.o
kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index b110fe6c03d4..86d5756dda07 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -23,6 +23,26 @@
#include "mmu.h"
#include "trace.h"
+static u32 xstate_required_size(u64 xstate_bv)
+{
+ int feature_bit = 0;
+ u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
+
+ xstate_bv &= ~XSTATE_FPSSE;
+ while (xstate_bv) {
+ if (xstate_bv & 0x1) {
+ u32 eax, ebx, ecx, edx;
+ cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
+ ret = max(ret, eax + ebx);
+ }
+
+ xstate_bv >>= 1;
+ feature_bit++;
+ }
+
+ return ret;
+}
+
void kvm_update_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
@@ -46,6 +66,18 @@ void kvm_update_cpuid(struct kvm_vcpu *vcpu)
apic->lapic_timer.timer_mode_mask = 1 << 17;
}
+ best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
+ if (!best) {
+ vcpu->arch.guest_supported_xcr0 = 0;
+ vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
+ } else {
+ vcpu->arch.guest_supported_xcr0 =
+ (best->eax | ((u64)best->edx << 32)) &
+ host_xcr0 & KVM_SUPPORTED_XCR0;
+ vcpu->arch.guest_xstate_size =
+ xstate_required_size(vcpu->arch.guest_supported_xcr0);
+ }
+
kvm_pmu_cpuid_update(vcpu);
}
@@ -182,13 +214,35 @@ static bool supported_xcr0_bit(unsigned bit)
{
u64 mask = ((u64)1 << bit);
- return mask & (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) & host_xcr0;
+ return mask & KVM_SUPPORTED_XCR0 & host_xcr0;
}
#define F(x) bit(X86_FEATURE_##x)
-static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
- u32 index, int *nent, int maxnent)
+static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry,
+ u32 func, u32 index, int *nent, int maxnent)
+{
+ switch (func) {
+ case 0:
+ entry->eax = 1; /* only one leaf currently */
+ ++*nent;
+ break;
+ case 1:
+ entry->ecx = F(MOVBE);
+ ++*nent;
+ break;
+ default:
+ break;
+ }
+
+ entry->function = func;
+ entry->index = index;
+
+ return 0;
+}
+
+static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ u32 index, int *nent, int maxnent)
{
int r;
unsigned f_nx = is_efer_nx() ? F(NX) : 0;
@@ -383,6 +437,8 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
case 0xd: {
int idx, i;
+ entry->eax &= host_xcr0 & KVM_SUPPORTED_XCR0;
+ entry->edx &= (host_xcr0 & KVM_SUPPORTED_XCR0) >> 32;
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
for (idx = 1, i = 1; idx < 64; ++idx) {
if (*nent >= maxnent)
@@ -481,6 +537,15 @@ out:
return r;
}
+static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 func,
+ u32 idx, int *nent, int maxnent, unsigned int type)
+{
+ if (type == KVM_GET_EMULATED_CPUID)
+ return __do_cpuid_ent_emulated(entry, func, idx, nent, maxnent);
+
+ return __do_cpuid_ent(entry, func, idx, nent, maxnent);
+}
+
#undef F
struct kvm_cpuid_param {
@@ -495,8 +560,34 @@ static bool is_centaur_cpu(const struct kvm_cpuid_param *param)
return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
}
-int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
- struct kvm_cpuid_entry2 __user *entries)
+static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
+ __u32 num_entries, unsigned int ioctl_type)
+{
+ int i;
+
+ if (ioctl_type != KVM_GET_EMULATED_CPUID)
+ return false;
+
+ /*
+ * We want to make sure that ->padding is being passed clean from
+ * userspace in case we want to use it for something in the future.
+ *
+ * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
+ * have to give ourselves satisfied only with the emulated side. /me
+ * sheds a tear.
+ */
+ for (i = 0; i < num_entries; i++) {
+ if (entries[i].padding[0] ||
+ entries[i].padding[1] ||
+ entries[i].padding[2])
+ return true;
+ }
+ return false;
+}
+
+int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries,
+ unsigned int type)
{
struct kvm_cpuid_entry2 *cpuid_entries;
int limit, nent = 0, r = -E2BIG, i;
@@ -513,8 +604,12 @@ int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
goto out;
if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
cpuid->nent = KVM_MAX_CPUID_ENTRIES;
+
+ if (sanity_check_entries(entries, cpuid->nent, type))
+ return -EINVAL;
+
r = -ENOMEM;
- cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
+ cpuid_entries = vzalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
if (!cpuid_entries)
goto out;
@@ -526,7 +621,7 @@ int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
continue;
r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx,
- &nent, cpuid->nent);
+ &nent, cpuid->nent, type);
if (r)
goto out_free;
@@ -537,7 +632,7 @@ int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
limit = cpuid_entries[nent - 1].eax;
for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx,
- &nent, cpuid->nent);
+ &nent, cpuid->nent, type);
if (r)
goto out_free;
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index b7fd07984888..f1e4895174b2 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -6,8 +6,9 @@
void kvm_update_cpuid(struct kvm_vcpu *vcpu);
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
u32 function, u32 index);
-int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
- struct kvm_cpuid_entry2 __user *entries);
+int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries,
+ unsigned int type);
int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
struct kvm_cpuid *cpuid,
struct kvm_cpuid_entry __user *entries);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index ddc3f3d2afdb..16c037e7db7d 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -130,7 +130,7 @@
#define Mov (1<<20)
/* Misc flags */
#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
-#define VendorSpecific (1<<22) /* Vendor specific instruction */
+#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
#define Undefined (1<<25) /* No Such Instruction */
@@ -2961,6 +2961,46 @@ static int em_mov(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
+#define FFL(x) bit(X86_FEATURE_##x)
+
+static int em_movbe(struct x86_emulate_ctxt *ctxt)
+{
+ u32 ebx, ecx, edx, eax = 1;
+ u16 tmp;
+
+ /*
+ * Check MOVBE is set in the guest-visible CPUID leaf.
+ */
+ ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
+ if (!(ecx & FFL(MOVBE)))
+ return emulate_ud(ctxt);
+
+ switch (ctxt->op_bytes) {
+ case 2:
+ /*
+ * From MOVBE definition: "...When the operand size is 16 bits,
+ * the upper word of the destination register remains unchanged
+ * ..."
+ *
+ * Both casting ->valptr and ->val to u16 breaks strict aliasing
+ * rules so we have to do the operation almost per hand.
+ */
+ tmp = (u16)ctxt->src.val;
+ ctxt->dst.val &= ~0xffffUL;
+ ctxt->dst.val |= (unsigned long)swab16(tmp);
+ break;
+ case 4:
+ ctxt->dst.val = swab32((u32)ctxt->src.val);
+ break;
+ case 8:
+ ctxt->dst.val = swab64(ctxt->src.val);
+ break;
+ default:
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+ return X86EMUL_CONTINUE;
+}
+
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
@@ -3256,6 +3296,18 @@ static int em_cpuid(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
+static int em_sahf(struct x86_emulate_ctxt *ctxt)
+{
+ u32 flags;
+
+ flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
+ flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
+
+ ctxt->eflags &= ~0xffUL;
+ ctxt->eflags |= flags | X86_EFLAGS_FIXED;
+ return X86EMUL_CONTINUE;
+}
+
static int em_lahf(struct x86_emulate_ctxt *ctxt)
{
*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
@@ -3502,7 +3554,7 @@ static const struct opcode group7_rm1[] = {
static const struct opcode group7_rm3[] = {
DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
- II(SrcNone | Prot | VendorSpecific, em_vmmcall, vmmcall),
+ II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
DIP(SrcNone | Prot | Priv, stgi, check_svme),
@@ -3587,7 +3639,7 @@ static const struct group_dual group7 = { {
II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
}, {
- I(SrcNone | Priv | VendorSpecific, em_vmcall),
+ I(SrcNone | Priv | EmulateOnUD, em_vmcall),
EXT(0, group7_rm1),
N, EXT(0, group7_rm3),
II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
@@ -3750,7 +3802,8 @@ static const struct opcode opcode_table[256] = {
D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
I(SrcImmFAddr | No64, em_call_far), N,
II(ImplicitOps | Stack, em_pushf, pushf),
- II(ImplicitOps | Stack, em_popf, popf), N, I(ImplicitOps, em_lahf),
+ II(ImplicitOps | Stack, em_popf, popf),
+ I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
/* 0xA0 - 0xA7 */
I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
@@ -3810,7 +3863,7 @@ static const struct opcode opcode_table[256] = {
static const struct opcode twobyte_table[256] = {
/* 0x00 - 0x0F */
G(0, group6), GD(0, &group7), N, N,
- N, I(ImplicitOps | VendorSpecific, em_syscall),
+ N, I(ImplicitOps | EmulateOnUD, em_syscall),
II(ImplicitOps | Priv, em_clts, clts), N,
DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
N, D(ImplicitOps | ModRM), N, N,
@@ -3830,8 +3883,8 @@ static const struct opcode twobyte_table[256] = {
IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
II(ImplicitOps | Priv, em_rdmsr, rdmsr),
IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
- I(ImplicitOps | VendorSpecific, em_sysenter),
- I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
+ I(ImplicitOps | EmulateOnUD, em_sysenter),
+ I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
N, N,
N, N, N, N, N, N, N, N,
/* 0x40 - 0x4F */
@@ -3892,6 +3945,30 @@ static const struct opcode twobyte_table[256] = {
N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};
+static const struct gprefix three_byte_0f_38_f0 = {
+ I(DstReg | SrcMem | Mov, em_movbe), N, N, N
+};
+
+static const struct gprefix three_byte_0f_38_f1 = {
+ I(DstMem | SrcReg | Mov, em_movbe), N, N, N
+};
+
+/*
+ * Insns below are selected by the prefix which indexed by the third opcode
+ * byte.
+ */
+static const struct opcode opcode_map_0f_38[256] = {
+ /* 0x00 - 0x7f */
+ X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
+ /* 0x80 - 0xef */
+ X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
+ /* 0xf0 - 0xf1 */
+ GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
+ GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
+ /* 0xf2 - 0xff */
+ N, N, X4(N), X8(N)
+};
+
#undef D
#undef N
#undef G
@@ -4126,6 +4203,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
ctxt->_eip = ctxt->eip;
ctxt->fetch.start = ctxt->_eip;
ctxt->fetch.end = ctxt->fetch.start + insn_len;
+ ctxt->opcode_len = 1;
if (insn_len > 0)
memcpy(ctxt->fetch.data, insn, insn_len);
@@ -4208,9 +4286,16 @@ done_prefixes:
opcode = opcode_table[ctxt->b];
/* Two-byte opcode? */
if (ctxt->b == 0x0f) {
- ctxt->twobyte = 1;
+ ctxt->opcode_len = 2;
ctxt->b = insn_fetch(u8, ctxt);
opcode = twobyte_table[ctxt->b];
+
+ /* 0F_38 opcode map */
+ if (ctxt->b == 0x38) {
+ ctxt->opcode_len = 3;
+ ctxt->b = insn_fetch(u8, ctxt);
+ opcode = opcode_map_0f_38[ctxt->b];
+ }
}
ctxt->d = opcode.flags;
@@ -4267,7 +4352,7 @@ done_prefixes:
if (ctxt->d == 0 || (ctxt->d & NotImpl))
return EMULATION_FAILED;
- if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
+ if (!(ctxt->d & EmulateOnUD) && ctxt->ud)
return EMULATION_FAILED;
if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
@@ -4540,8 +4625,10 @@ special_insn:
goto writeback;
}
- if (ctxt->twobyte)
+ if (ctxt->opcode_len == 2)
goto twobyte_insn;
+ else if (ctxt->opcode_len == 3)
+ goto threebyte_insn;
switch (ctxt->b) {
case 0x63: /* movsxd */
@@ -4726,6 +4813,8 @@ twobyte_insn:
goto cannot_emulate;
}
+threebyte_insn:
+
if (rc != X86EMUL_CONTINUE)
goto done;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index dce0df8150df..40772ef0f2b1 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2570,11 +2570,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
kvm_release_pfn_clean(pfn);
}
-static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
-{
- mmu_free_roots(vcpu);
-}
-
static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
bool no_dirty_log)
{
@@ -3424,18 +3419,11 @@ out_unlock:
return 0;
}
-static void nonpaging_free(struct kvm_vcpu *vcpu)
-{
- mmu_free_roots(vcpu);
-}
-
-static int nonpaging_init_context(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context)
+static void nonpaging_init_context(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *context)
{
- context->new_cr3 = nonpaging_new_cr3;
context->page_fault = nonpaging_page_fault;
context->gva_to_gpa = nonpaging_gva_to_gpa;
- context->free = nonpaging_free;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
@@ -3444,7 +3432,6 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu,
context->root_hpa = INVALID_PAGE;
context->direct_map = true;
context->nx = false;
- return 0;
}
void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
@@ -3454,9 +3441,8 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_mmu_flush_tlb);
-static void paging_new_cr3(struct kvm_vcpu *vcpu)
+void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu)
{
- pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu));
mmu_free_roots(vcpu);
}
@@ -3471,11 +3457,6 @@ static void inject_page_fault(struct kvm_vcpu *vcpu,
vcpu->arch.mmu.inject_page_fault(vcpu, fault);
}
-static void paging_free(struct kvm_vcpu *vcpu)
-{
- nonpaging_free(vcpu);
-}
-
static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
unsigned access, int *nr_present)
{
@@ -3665,9 +3646,9 @@ static void update_last_pte_bitmap(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
mmu->last_pte_bitmap = map;
}
-static int paging64_init_context_common(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context,
- int level)
+static void paging64_init_context_common(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *context,
+ int level)
{
context->nx = is_nx(vcpu);
context->root_level = level;
@@ -3677,27 +3658,24 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu,
update_last_pte_bitmap(vcpu, context);
ASSERT(is_pae(vcpu));
- context->new_cr3 = paging_new_cr3;
context->page_fault = paging64_page_fault;
context->gva_to_gpa = paging64_gva_to_gpa;
context->sync_page = paging64_sync_page;
context->invlpg = paging64_invlpg;
context->update_pte = paging64_update_pte;
- context->free = paging_free;
context->shadow_root_level = level;
context->root_hpa = INVALID_PAGE;
context->direct_map = false;
- return 0;
}
-static int paging64_init_context(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context)
+static void paging64_init_context(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *context)
{
- return paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
+ paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
}
-static int paging32_init_context(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context)
+static void paging32_init_context(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *context)
{
context->nx = false;
context->root_level = PT32_ROOT_LEVEL;
@@ -3706,33 +3684,28 @@ static int paging32_init_context(struct kvm_vcpu *vcpu,
update_permission_bitmask(vcpu, context, false);
update_last_pte_bitmap(vcpu, context);
- context->new_cr3 = paging_new_cr3;
context->page_fault = paging32_page_fault;
context->gva_to_gpa = paging32_gva_to_gpa;
- context->free = paging_free;
context->sync_page = paging32_sync_page;
context->invlpg = paging32_invlpg;
context->update_pte = paging32_update_pte;
context->shadow_root_level = PT32E_ROOT_LEVEL;
context->root_hpa = INVALID_PAGE;
context->direct_map = false;
- return 0;
}
-static int paging32E_init_context(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context)
+static void paging32E_init_context(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *context)
{
- return paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
+ paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
}
-static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
+static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *context = vcpu->arch.walk_mmu;
context->base_role.word = 0;
- context->new_cr3 = nonpaging_new_cr3;
context->page_fault = tdp_page_fault;
- context->free = nonpaging_free;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
@@ -3767,37 +3740,32 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
update_permission_bitmask(vcpu, context, false);
update_last_pte_bitmap(vcpu, context);
-
- return 0;
}
-int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
+void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{
- int r;
bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
if (!is_paging(vcpu))
- r = nonpaging_init_context(vcpu, context);
+ nonpaging_init_context(vcpu, context);
else if (is_long_mode(vcpu))
- r = paging64_init_context(vcpu, context);
+ paging64_init_context(vcpu, context);
else if (is_pae(vcpu))
- r = paging32E_init_context(vcpu, context);
+ paging32E_init_context(vcpu, context);
else
- r = paging32_init_context(vcpu, context);
+ paging32_init_context(vcpu, context);
vcpu->arch.mmu.base_role.nxe = is_nx(vcpu);
vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
vcpu->arch.mmu.base_role.smep_andnot_wp
= smep && !is_write_protection(vcpu);
-
- return r;
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
-int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
+void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
bool execonly)
{
ASSERT(vcpu);
@@ -3806,37 +3774,30 @@ int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
context->shadow_root_level = kvm_x86_ops->get_tdp_level();
context->nx = true;
- context->new_cr3 = paging_new_cr3;
context->page_fault = ept_page_fault;
context->gva_to_gpa = ept_gva_to_gpa;
context->sync_page = ept_sync_page;
context->invlpg = ept_invlpg;
context->update_pte = ept_update_pte;
- context->free = paging_free;
context->root_level = context->shadow_root_level;
context->root_hpa = INVALID_PAGE;
context->direct_map = false;
update_permission_bitmask(vcpu, context, true);
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
-
- return 0;
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
-static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
+static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
{
- int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
-
+ kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3;
vcpu->arch.walk_mmu->get_cr3 = get_cr3;
vcpu->arch.walk_mmu->get_pdptr = kvm_pdptr_read;
vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
-
- return r;
}
-static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
+static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
@@ -3873,11 +3834,9 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
update_permission_bitmask(vcpu, g_context, false);
update_last_pte_bitmap(vcpu, g_context);
-
- return 0;
}
-static int init_kvm_mmu(struct kvm_vcpu *vcpu)
+static void init_kvm_mmu(struct kvm_vcpu *vcpu)
{
if (mmu_is_nested(vcpu))
return init_kvm_nested_mmu(vcpu);
@@ -3887,18 +3846,12 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
return init_kvm_softmmu(vcpu);
}
-static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
+void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);
- if (VALID_PAGE(vcpu->arch.mmu.root_hpa))
- /* mmu.free() should set root_hpa = INVALID_PAGE */
- vcpu->arch.mmu.free(vcpu);
-}
-int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
-{
- destroy_kvm_mmu(vcpu);
- return init_kvm_mmu(vcpu);
+ kvm_mmu_unload(vcpu);
+ init_kvm_mmu(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
@@ -3923,6 +3876,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load);
void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
mmu_free_roots(vcpu);
+ WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
}
EXPORT_SYMBOL_GPL(kvm_mmu_unload);
@@ -4281,12 +4235,12 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
return alloc_mmu_pages(vcpu);
}
-int kvm_mmu_setup(struct kvm_vcpu *vcpu)
+void kvm_mmu_setup(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
- return init_kvm_mmu(vcpu);
+ init_kvm_mmu(vcpu);
}
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
@@ -4428,7 +4382,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
int nr_to_scan = sc->nr_to_scan;
unsigned long freed = 0;
- raw_spin_lock(&kvm_lock);
+ spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
int idx;
@@ -4478,9 +4432,8 @@ unlock:
break;
}
- raw_spin_unlock(&kvm_lock);
+ spin_unlock(&kvm_lock);
return freed;
-
}
static unsigned long
@@ -4574,7 +4527,7 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);
- destroy_kvm_mmu(vcpu);
+ kvm_mmu_unload(vcpu);
free_mmu_pages(vcpu);
mmu_free_memory_caches(vcpu);
}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 77e044a0f5f7..292615274358 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -70,8 +70,8 @@ enum {
};
int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
-int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
-int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
+void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
+void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
bool execonly);
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c0bc80391e40..c7168a5cff1b 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1959,11 +1959,9 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
nested_svm_vmexit(svm);
}
-static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
+static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
{
- int r;
-
- r = kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
+ kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
@@ -1971,8 +1969,6 @@ static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
vcpu->arch.mmu.shadow_root_level = get_npt_level();
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
-
- return r;
}
static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2b2fce1b2009..e293a62a11d6 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1498,7 +1498,7 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
break;
if (i == NR_AUTOLOAD_MSRS) {
- printk_once(KERN_WARNING"Not enough mst switch entries. "
+ printk_once(KERN_WARNING "Not enough msr switch entries. "
"Can't add msr %x\n", msr);
return;
} else if (i == m->nr) {
@@ -1898,16 +1898,12 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
/*
* KVM wants to inject page-faults which it got to the guest. This function
* checks whether in a nested guest, we need to inject them to L1 or L2.
- * This function assumes it is called with the exit reason in vmcs02 being
- * a #PF exception (this is the only case in which KVM injects a #PF when L2
- * is running).
*/
-static int nested_pf_handled(struct kvm_vcpu *vcpu)
+static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */
- if (!(vmcs12->exception_bitmap & (1u << PF_VECTOR)))
+ if (!(vmcs12->exception_bitmap & (1u << nr)))
return 0;
nested_vmx_vmexit(vcpu);
@@ -1921,8 +1917,8 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 intr_info = nr | INTR_INFO_VALID_MASK;
- if (nr == PF_VECTOR && is_guest_mode(vcpu) &&
- !vmx->nested.nested_run_pending && nested_pf_handled(vcpu))
+ if (!reinject && is_guest_mode(vcpu) &&
+ nested_vmx_check_exception(vcpu, nr))
return;
if (has_error_code) {
@@ -2204,9 +2200,15 @@ static __init void nested_vmx_setup_ctls_msrs(void)
#ifdef CONFIG_X86_64
VM_EXIT_HOST_ADDR_SPACE_SIZE |
#endif
- VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
+ VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT |
+ VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
+ if (!(nested_vmx_pinbased_ctls_high & PIN_BASED_VMX_PREEMPTION_TIMER) ||
+ !(nested_vmx_exit_ctls_high & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)) {
+ nested_vmx_exit_ctls_high &= ~VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
+ nested_vmx_pinbased_ctls_high &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+ }
nested_vmx_exit_ctls_high |= (VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
- VM_EXIT_LOAD_IA32_EFER);
+ VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER);
/* entry controls */
rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
@@ -2226,7 +2228,8 @@ static __init void nested_vmx_setup_ctls_msrs(void)
nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high);
nested_vmx_procbased_ctls_low = 0;
nested_vmx_procbased_ctls_high &=
- CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_USE_TSC_OFFSETING |
+ CPU_BASED_VIRTUAL_INTR_PENDING |
+ CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING |
@@ -2252,13 +2255,15 @@ static __init void nested_vmx_setup_ctls_msrs(void)
nested_vmx_secondary_ctls_low = 0;
nested_vmx_secondary_ctls_high &=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ SECONDARY_EXEC_UNRESTRICTED_GUEST |
SECONDARY_EXEC_WBINVD_EXITING;
if (enable_ept) {
/* nested EPT: emulate EPT also to L1 */
nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT;
nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
- VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
+ VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
+ VMX_EPT_INVEPT_BIT;
nested_vmx_ept_caps &= vmx_capability.ept;
/*
* Since invept is completely emulated we support both global
@@ -3380,8 +3385,10 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
if (enable_ept) {
eptp = construct_eptp(cr3);
vmcs_write64(EPT_POINTER, eptp);
- guest_cr3 = is_paging(vcpu) ? kvm_read_cr3(vcpu) :
- vcpu->kvm->arch.ept_identity_map_addr;
+ if (is_paging(vcpu) || is_guest_mode(vcpu))
+ guest_cr3 = kvm_read_cr3(vcpu);
+ else
+ guest_cr3 = vcpu->kvm->arch.ept_identity_map_addr;
ept_load_pdptrs(vcpu);
}
@@ -4879,6 +4886,17 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
hypercall[2] = 0xc1;
}
+static bool nested_cr0_valid(struct vmcs12 *vmcs12, unsigned long val)
+{
+ unsigned long always_on = VMXON_CR0_ALWAYSON;
+
+ if (nested_vmx_secondary_ctls_high &
+ SECONDARY_EXEC_UNRESTRICTED_GUEST &&
+ nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
+ always_on &= ~(X86_CR0_PE | X86_CR0_PG);
+ return (val & always_on) == always_on;
+}
+
/* called to set cr0 as appropriate for a mov-to-cr0 exit. */
static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
{
@@ -4897,9 +4915,7 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
val = (val & ~vmcs12->cr0_guest_host_mask) |
(vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
- /* TODO: will have to take unrestricted guest mode into
- * account */
- if ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON)
+ if (!nested_cr0_valid(vmcs12, val))
return 1;
if (kvm_set_cr0(vcpu, val))
@@ -6722,6 +6738,27 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
*info2 = vmcs_read32(VM_EXIT_INTR_INFO);
}
+static void nested_adjust_preemption_timer(struct kvm_vcpu *vcpu)
+{
+ u64 delta_tsc_l1;
+ u32 preempt_val_l1, preempt_val_l2, preempt_scale;
+
+ if (!(get_vmcs12(vcpu)->pin_based_vm_exec_control &
+ PIN_BASED_VMX_PREEMPTION_TIMER))
+ return;
+ preempt_scale = native_read_msr(MSR_IA32_VMX_MISC) &
+ MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE;
+ preempt_val_l2 = vmcs_read32(VMX_PREEMPTION_TIMER_VALUE);
+ delta_tsc_l1 = vmx_read_l1_tsc(vcpu, native_read_tsc())
+ - vcpu->arch.last_guest_tsc;
+ preempt_val_l1 = delta_tsc_l1 >> preempt_scale;
+ if (preempt_val_l2 <= preempt_val_l1)
+ preempt_val_l2 = 0;
+ else
+ preempt_val_l2 -= preempt_val_l1;
+ vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, preempt_val_l2);
+}
+
/*
* The guest has exited. See if we can fix it or if we need userspace
* assistance.
@@ -6736,20 +6773,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
if (vmx->emulation_required)
return handle_invalid_guest_state(vcpu);
- /*
- * the KVM_REQ_EVENT optimization bit is only on for one entry, and if
- * we did not inject a still-pending event to L1 now because of
- * nested_run_pending, we need to re-enable this bit.
- */
- if (vmx->nested.nested_run_pending)
- kvm_make_request(KVM_REQ_EVENT, vcpu);
-
- if (!is_guest_mode(vcpu) && (exit_reason == EXIT_REASON_VMLAUNCH ||
- exit_reason == EXIT_REASON_VMRESUME))
- vmx->nested.nested_run_pending = 1;
- else
- vmx->nested.nested_run_pending = 0;
-
if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) {
nested_vmx_vmexit(vcpu);
return 1;
@@ -7061,9 +7084,9 @@ static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
case INTR_TYPE_HARD_EXCEPTION:
if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
u32 err = vmcs_read32(error_code_field);
- kvm_queue_exception_e(vcpu, vector, err);
+ kvm_requeue_exception_e(vcpu, vector, err);
} else
- kvm_queue_exception(vcpu, vector);
+ kvm_requeue_exception(vcpu, vector);
break;
case INTR_TYPE_SOFT_INTR:
vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
@@ -7146,6 +7169,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
atomic_switch_perf_msrs(vmx);
debugctlmsr = get_debugctlmsr();
+ if (is_guest_mode(vcpu) && !vmx->nested.nested_run_pending)
+ nested_adjust_preemption_timer(vcpu);
vmx->__launched = vmx->loaded_vmcs->launched;
asm(
/* Store host registers */
@@ -7284,6 +7309,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
+ /*
+ * the KVM_REQ_EVENT optimization bit is only on for one entry, and if
+ * we did not inject a still-pending event to L1 now because of
+ * nested_run_pending, we need to re-enable this bit.
+ */
+ if (vmx->nested.nested_run_pending)
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+
+ vmx->nested.nested_run_pending = 0;
+
vmx_complete_atomic_exit(vmx);
vmx_recover_nmi_blocking(vmx);
vmx_complete_interrupts(vmx);
@@ -7410,8 +7445,7 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
*/
if (is_mmio)
ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
- else if (vcpu->kvm->arch.iommu_domain &&
- !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY))
+ else if (kvm_arch_has_noncoherent_dma(vcpu->kvm))
ret = kvm_get_guest_memory_type(vcpu, gfn) <<
VMX_EPT_MT_EPTE_SHIFT;
else
@@ -7501,9 +7535,9 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
return get_vmcs12(vcpu)->ept_pointer;
}
-static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
+static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
{
- int r = kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu,
+ kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu,
nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT);
vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
@@ -7511,8 +7545,6 @@ static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
-
- return r;
}
static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
@@ -7520,6 +7552,20 @@ static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
vcpu->arch.walk_mmu = &vcpu->arch.mmu;
}
+static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
+ struct x86_exception *fault)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ WARN_ON(!is_guest_mode(vcpu));
+
+ /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */
+ if (vmcs12->exception_bitmap & (1u << PF_VECTOR))
+ nested_vmx_vmexit(vcpu);
+ else
+ kvm_inject_page_fault(vcpu, fault);
+}
+
/*
* prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
* L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
@@ -7533,6 +7579,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exec_control;
+ u32 exit_control;
vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
@@ -7706,7 +7753,10 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
* we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
* bits are further modified by vmx_set_efer() below.
*/
- vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
+ exit_control = vmcs_config.vmexit_ctrl;
+ if (vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER)
+ exit_control |= VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
+ vmcs_write32(VM_EXIT_CONTROLS, exit_control);
/* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
* emulated by vmx_set_efer(), below.
@@ -7773,6 +7823,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
kvm_set_cr3(vcpu, vmcs12->guest_cr3);
kvm_mmu_reset_context(vcpu);
+ if (!enable_ept)
+ vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
+
/*
* L1 may access the L2's PDPTR, so save them to construct vmcs12
*/
@@ -7876,7 +7929,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
return 1;
}
- if (((vmcs12->guest_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
+ if (!nested_cr0_valid(vmcs12, vmcs12->guest_cr0) ||
((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
@@ -7938,6 +7991,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
enter_guest_mode(vcpu);
+ vmx->nested.nested_run_pending = 1;
+
vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
cpu = get_cpu();
@@ -8005,7 +8060,7 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
u32 idt_vectoring;
unsigned int nr;
- if (vcpu->arch.exception.pending) {
+ if (vcpu->arch.exception.pending && vcpu->arch.exception.reinject) {
nr = vcpu->arch.exception.nr;
idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
@@ -8023,7 +8078,7 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
}
vmcs12->idt_vectoring_info_field = idt_vectoring;
- } else if (vcpu->arch.nmi_pending) {
+ } else if (vcpu->arch.nmi_injected) {
vmcs12->idt_vectoring_info_field =
INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
} else if (vcpu->arch.interrupt.pending) {
@@ -8105,6 +8160,11 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs12->guest_pending_dbg_exceptions =
vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
+ if ((vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER) &&
+ (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER))
+ vmcs12->vmx_preemption_timer_value =
+ vmcs_read32(VMX_PREEMPTION_TIMER_VALUE);
+
/*
* In some cases (usually, nested EPT), L2 is allowed to change its
* own CR3 without exiting. If it has changed it, we must keep it.
@@ -8130,6 +8190,8 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
+ if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
+ vmcs12->guest_ia32_efer = vcpu->arch.efer;
vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
@@ -8201,7 +8263,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
* fpu_active (which may have changed).
* Note that vmx_set_cr0 refers to efer set above.
*/
- kvm_set_cr0(vcpu, vmcs12->host_cr0);
+ vmx_set_cr0(vcpu, vmcs12->host_cr0);
/*
* If we did fpu_activate()/fpu_deactivate() during L2's run, we need
* to apply the same changes to L1's vmcs. We just set cr0 correctly,
@@ -8224,6 +8286,9 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
kvm_set_cr3(vcpu, vmcs12->host_cr3);
kvm_mmu_reset_context(vcpu);
+ if (!enable_ept)
+ vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
+
if (enable_vpid) {
/*
* Trivially support vpid by letting L2s share their parent
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e5ca72a5cdb6..07c127fc2064 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -577,6 +577,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
u64 xcr0;
+ u64 valid_bits;
/* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
if (index != XCR_XFEATURE_ENABLED_MASK)
@@ -586,8 +587,16 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
return 1;
if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
return 1;
- if (xcr0 & ~host_xcr0)
+
+ /*
+ * Do not allow the guest to set bits that we do not support
+ * saving. However, xcr0 bit 0 is always set, even if the
+ * emulated CPU does not support XSAVE (see fx_init).
+ */
+ valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP;
+ if (xcr0 & ~valid_bits)
return 1;
+
kvm_put_guest_xcr0(vcpu);
vcpu->arch.xcr0 = xcr0;
return 0;
@@ -684,7 +693,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
vcpu->arch.cr3 = cr3;
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
- vcpu->arch.mmu.new_cr3(vcpu);
+ kvm_mmu_new_cr3(vcpu);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_cr3);
@@ -2564,6 +2573,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
case KVM_CAP_SET_TSS_ADDR:
case KVM_CAP_EXT_CPUID:
+ case KVM_CAP_EXT_EMUL_CPUID:
case KVM_CAP_CLOCKSOURCE:
case KVM_CAP_PIT:
case KVM_CAP_NOP_IO_DELAY:
@@ -2673,15 +2683,17 @@ long kvm_arch_dev_ioctl(struct file *filp,
r = 0;
break;
}
- case KVM_GET_SUPPORTED_CPUID: {
+ case KVM_GET_SUPPORTED_CPUID:
+ case KVM_GET_EMULATED_CPUID: {
struct kvm_cpuid2 __user *cpuid_arg = argp;
struct kvm_cpuid2 cpuid;
r = -EFAULT;
if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
goto out;
- r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
- cpuid_arg->entries);
+
+ r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
+ ioctl);
if (r)
goto out;
@@ -2715,8 +2727,7 @@ static void wbinvd_ipi(void *garbage)
static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
{
- return vcpu->kvm->arch.iommu_domain &&
- !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY);
+ return kvm_arch_has_noncoherent_dma(vcpu->kvm);
}
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -2984,11 +2995,13 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{
- if (cpu_has_xsave)
+ if (cpu_has_xsave) {
memcpy(guest_xsave->region,
&vcpu->arch.guest_fpu.state->xsave,
- xstate_size);
- else {
+ vcpu->arch.guest_xstate_size);
+ *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] &=
+ vcpu->arch.guest_supported_xcr0 | XSTATE_FPSSE;
+ } else {
memcpy(guest_xsave->region,
&vcpu->arch.guest_fpu.state->fxsave,
sizeof(struct i387_fxsave_struct));
@@ -3003,10 +3016,19 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
u64 xstate_bv =
*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
- if (cpu_has_xsave)
+ if (cpu_has_xsave) {
+ /*
+ * Here we allow setting states that are not present in
+ * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility
+ * with old userspace.
+ */
+ if (xstate_bv & ~KVM_SUPPORTED_XCR0)
+ return -EINVAL;
+ if (xstate_bv & ~host_xcr0)
+ return -EINVAL;
memcpy(&vcpu->arch.guest_fpu.state->xsave,
- guest_xsave->region, xstate_size);
- else {
+ guest_xsave->region, vcpu->arch.guest_xstate_size);
+ } else {
if (xstate_bv & ~XSTATE_FPSSE)
return -EINVAL;
memcpy(&vcpu->arch.guest_fpu.state->fxsave,
@@ -3042,9 +3064,9 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
for (i = 0; i < guest_xcrs->nr_xcrs; i++)
/* Only support XCR0 currently */
- if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
+ if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) {
r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
- guest_xcrs->xcrs[0].value);
+ guest_xcrs->xcrs[i].value);
break;
}
if (r)
@@ -4775,8 +4797,8 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu)
static void init_decode_cache(struct x86_emulate_ctxt *ctxt)
{
- memset(&ctxt->twobyte, 0,
- (void *)&ctxt->_regs - (void *)&ctxt->twobyte);
+ memset(&ctxt->opcode_len, 0,
+ (void *)&ctxt->_regs - (void *)&ctxt->opcode_len);
ctxt->fetch.start = 0;
ctxt->fetch.end = 0;
@@ -5094,8 +5116,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
ctxt->have_exception = false;
ctxt->perm_ok = false;
- ctxt->only_vendor_specific_insn
- = emulation_type & EMULTYPE_TRAP_UD;
+ ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
r = x86_decode_insn(ctxt, insn, insn_len);
@@ -5263,7 +5284,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
- raw_spin_lock(&kvm_lock);
+ spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->cpu != freq->cpu)
@@ -5273,7 +5294,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
send_ipi = 1;
}
}
- raw_spin_unlock(&kvm_lock);
+ spin_unlock(&kvm_lock);
if (freq->old < freq->new && send_ipi) {
/*
@@ -5426,12 +5447,12 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
struct kvm_vcpu *vcpu;
int i;
- raw_spin_lock(&kvm_lock);
+ spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
kvm_for_each_vcpu(i, vcpu, kvm)
set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests);
atomic_set(&kvm_guest_has_master_clock, 0);
- raw_spin_unlock(&kvm_lock);
+ spin_unlock(&kvm_lock);
}
static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
@@ -6688,7 +6709,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
if (r)
return r;
kvm_vcpu_reset(vcpu);
- r = kvm_mmu_setup(vcpu);
+ kvm_mmu_setup(vcpu);
vcpu_put(vcpu);
return r;
@@ -6940,6 +6961,10 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
vcpu->arch.pv_time_enabled = false;
+
+ vcpu->arch.guest_supported_xcr0 = 0;
+ vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
+
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
@@ -6981,6 +7006,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
+ atomic_set(&kvm->arch.noncoherent_dma_count, 0);
/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
@@ -7065,7 +7091,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
}
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
int i;
@@ -7086,7 +7112,8 @@ void kvm_arch_free_memslot(struct kvm_memory_slot *free,
}
}
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+ unsigned long npages)
{
int i;
@@ -7283,7 +7310,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
int r;
if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
- is_error_page(work->page))
+ work->wakeup_all)
return;
r = kvm_mmu_reload(vcpu);
@@ -7393,7 +7420,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct x86_exception fault;
trace_kvm_async_pf_ready(work->arch.token, work->gva);
- if (is_error_page(work->page))
+ if (work->wakeup_all)
work->arch.token = ~0; /* broadcast wakeup */
else
kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
@@ -7420,6 +7447,24 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
kvm_x86_ops->interrupt_allowed(vcpu);
}
+void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
+{
+ atomic_inc(&kvm->arch.noncoherent_dma_count);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma);
+
+void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
+{
+ atomic_dec(&kvm->arch.noncoherent_dma_count);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma);
+
+bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
+{
+ return atomic_read(&kvm->arch.noncoherent_dma_count);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
+
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index e224f7a671b6..587fb9ede436 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -122,6 +122,7 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception);
+#define KVM_SUPPORTED_XCR0 (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
extern u64 host_xcr0;
extern struct static_key kvm_no_apic_vcpu;
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 96b2c6697c9d..992d63bb154f 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -16,7 +16,7 @@ clean-files := inat-tables.c
obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
-lib-y := delay.o
+lib-y := delay.o misc.o
lib-y += thunk_$(BITS).o
lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
lib-y += memcpy_$(BITS).o
diff --git a/arch/x86/lib/misc.c b/arch/x86/lib/misc.c
new file mode 100644
index 000000000000..76b373af03f0
--- /dev/null
+++ b/arch/x86/lib/misc.c
@@ -0,0 +1,21 @@
+/*
+ * Count the digits of @val including a possible sign.
+ *
+ * (Typed on and submitted from hpa's mobile phone.)
+ */
+int num_digits(int val)
+{
+ int m = 10;
+ int d = 1;
+
+ if (val < 0) {
+ d++;
+ val = -val;
+ }
+
+ while (val >= m) {
+ m *= 10;
+ d++;
+ }
+ return d;
+}
diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
index a6b1b86d2253..518532e6a3fa 100644
--- a/arch/x86/lib/msr-smp.c
+++ b/arch/x86/lib/msr-smp.c
@@ -47,6 +47,21 @@ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
}
EXPORT_SYMBOL(rdmsr_on_cpu);
+int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+{
+ int err;
+ struct msr_info rv;
+
+ memset(&rv, 0, sizeof(rv));
+
+ rv.msr_no = msr_no;
+ err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
+ *q = rv.reg.q;
+
+ return err;
+}
+EXPORT_SYMBOL(rdmsrl_on_cpu);
+
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
int err;
@@ -63,6 +78,22 @@ int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
}
EXPORT_SYMBOL(wrmsr_on_cpu);
+int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+{
+ int err;
+ struct msr_info rv;
+
+ memset(&rv, 0, sizeof(rv));
+
+ rv.msr_no = msr_no;
+ rv.reg.q = q;
+
+ err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
+
+ return err;
+}
+EXPORT_SYMBOL(wrmsrl_on_cpu);
+
static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
struct msr *msrs,
void (*msr_func) (void *info))
@@ -159,6 +190,37 @@ int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
}
EXPORT_SYMBOL(wrmsr_safe_on_cpu);
+int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+{
+ int err;
+ struct msr_info rv;
+
+ memset(&rv, 0, sizeof(rv));
+
+ rv.msr_no = msr_no;
+ rv.reg.q = q;
+
+ err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
+
+ return err ? err : rv.err;
+}
+EXPORT_SYMBOL(wrmsrl_safe_on_cpu);
+
+int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+{
+ int err;
+ struct msr_info rv;
+
+ memset(&rv, 0, sizeof(rv));
+
+ rv.msr_no = msr_no;
+ err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
+ *q = rv.reg.q;
+
+ return err ? err : rv.err;
+}
+EXPORT_SYMBOL(rdmsrl_safe_on_cpu);
+
/*
* These variants are significantly slower, but allows control over
* the entire 32-bit GPR set.
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index 4f74d94c8d97..5465b8613944 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -11,39 +11,26 @@
#include <linux/sched.h>
/*
- * best effort, GUP based copy_from_user() that is NMI-safe
+ * We rely on the nested NMI work to allow atomic faults from the NMI path; the
+ * nested NMI paths are careful to preserve CR2.
*/
unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
{
- unsigned long offset, addr = (unsigned long)from;
- unsigned long size, len = 0;
- struct page *page;
- void *map;
- int ret;
+ unsigned long ret;
if (__range_not_ok(from, n, TASK_SIZE))
- return len;
-
- do {
- ret = __get_user_pages_fast(addr, 1, 0, &page);
- if (!ret)
- break;
-
- offset = addr & (PAGE_SIZE - 1);
- size = min(PAGE_SIZE - offset, n - len);
-
- map = kmap_atomic(page);
- memcpy(to, map+offset, size);
- kunmap_atomic(map);
- put_page(page);
-
- len += size;
- to += size;
- addr += size;
-
- } while (len < n);
-
- return len;
+ return 0;
+
+ /*
+ * Even though this function is typically called from NMI/IRQ context
+ * disable pagefaults so that its behaviour is consistent even when
+ * called form other contexts.
+ */
+ pagefault_disable();
+ ret = __copy_from_user_inatomic(to, from, n);
+ pagefault_enable();
+
+ return n - ret;
}
EXPORT_SYMBOL_GPL(copy_from_user_nmi);
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 3eb18acd0e40..e2f5e21c03b3 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -654,14 +654,13 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
-unsigned long
-copy_to_user(void __user *to, const void *from, unsigned long n)
+unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
{
if (access_ok(VERIFY_WRITE, to, n))
n = __copy_to_user(to, from, n);
return n;
}
-EXPORT_SYMBOL(copy_to_user);
+EXPORT_SYMBOL(_copy_to_user);
/**
* copy_from_user: - Copy a block of data from user space.
@@ -679,8 +678,7 @@ EXPORT_SYMBOL(copy_to_user);
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
-unsigned long
-_copy_from_user(void *to, const void __user *from, unsigned long n)
+unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
{
if (access_ok(VERIFY_READ, from, n))
n = __copy_from_user(to, from, n);
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 3aaeffcfd67a..e7e1cac74e8d 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -51,7 +51,7 @@ kmmio_fault(struct pt_regs *regs, unsigned long addr)
return 0;
}
-static inline int __kprobes notify_page_fault(struct pt_regs *regs)
+static inline int __kprobes kprobes_fault(struct pt_regs *regs)
{
int ret = 0;
@@ -596,7 +596,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
printk(KERN_CONT " at %p\n", (void *) address);
printk(KERN_ALERT "IP:");
- printk_address(regs->ip, 1);
+ printk_address(regs->ip);
dump_pagetable(address);
}
@@ -1048,7 +1048,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
return;
/* kprobes don't want to hook the spurious faults: */
- if (notify_page_fault(regs))
+ if (kprobes_fault(regs))
return;
/*
* Don't take the mm semaphore here. If we fixup a prefetch
@@ -1060,23 +1060,8 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
}
/* kprobes don't want to hook the spurious faults: */
- if (unlikely(notify_page_fault(regs)))
+ if (unlikely(kprobes_fault(regs)))
return;
- /*
- * It's safe to allow irq's after cr2 has been saved and the
- * vmalloc fault has been handled.
- *
- * User-mode registers count as a user access even for any
- * potential system fault or CPU buglet:
- */
- if (user_mode_vm(regs)) {
- local_irq_enable();
- error_code |= PF_USER;
- flags |= FAULT_FLAG_USER;
- } else {
- if (regs->flags & X86_EFLAGS_IF)
- local_irq_enable();
- }
if (unlikely(error_code & PF_RSVD))
pgtable_bad(regs, error_code, address);
@@ -1088,8 +1073,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
}
}
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
-
/*
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
@@ -1099,6 +1082,24 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
return;
}
+ /*
+ * It's safe to allow irq's after cr2 has been saved and the
+ * vmalloc fault has been handled.
+ *
+ * User-mode registers count as a user access even for any
+ * potential system fault or CPU buglet:
+ */
+ if (user_mode_vm(regs)) {
+ local_irq_enable();
+ error_code |= PF_USER;
+ flags |= FAULT_FLAG_USER;
+ } else {
+ if (regs->flags & X86_EFLAGS_IF)
+ local_irq_enable();
+ }
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
if (error_code & PF_WRITE)
flags |= FAULT_FLAG_WRITE;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 04664cdb7fda..ce32017c5e38 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -399,8 +399,25 @@ static unsigned long __init init_range_memory_mapping(
return mapped_ram_size;
}
-/* (PUD_SHIFT-PMD_SHIFT)/2 */
-#define STEP_SIZE_SHIFT 5
+static unsigned long __init get_new_step_size(unsigned long step_size)
+{
+ /*
+ * Explain why we shift by 5 and why we don't have to worry about
+ * 'step_size << 5' overflowing:
+ *
+ * initial mapped size is PMD_SIZE (2M).
+ * We can not set step_size to be PUD_SIZE (1G) yet.
+ * In worse case, when we cross the 1G boundary, and
+ * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
+ * to map 1G range with PTE. Use 5 as shift for now.
+ *
+ * Don't need to worry about overflow, on 32bit, when step_size
+ * is 0, round_down() returns 0 for start, and that turns it
+ * into 0x100000000ULL.
+ */
+ return step_size << 5;
+}
+
void __init init_mem_mapping(void)
{
unsigned long end, real_end, start, last_start;
@@ -449,7 +466,7 @@ void __init init_mem_mapping(void)
min_pfn_mapped = last_start >> PAGE_SHIFT;
/* only increase step_size after big range get mapped */
if (new_mapped_ram_size > mapped_ram_size)
- step_size <<= STEP_SIZE_SHIFT;
+ step_size = get_new_step_size(step_size);
mapped_ram_size += new_mapped_ram_size;
}
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 4287f1ffba7e..5bdc5430597c 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -806,6 +806,9 @@ void __init mem_init(void)
BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
#undef high_memory
#undef __FIXADDR_TOP
+#ifdef CONFIG_RANDOMIZE_BASE
+ BUILD_BUG_ON(CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE);
+#endif
#ifdef CONFIG_HIGHMEM
BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 79c216aa0e2b..516593e1ce33 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -772,13 +772,21 @@ out:
return;
}
+static void bpf_jit_free_deferred(struct work_struct *work)
+{
+ struct sk_filter *fp = container_of(work, struct sk_filter, work);
+ unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
+ struct bpf_binary_header *header = (void *)addr;
+
+ set_memory_rw(addr, header->pages);
+ module_free(NULL, header);
+ kfree(fp);
+}
+
void bpf_jit_free(struct sk_filter *fp)
{
if (fp->bpf_func != sk_run_filter) {
- unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
- struct bpf_binary_header *header = (void *)addr;
-
- set_memory_rw(addr, header->pages);
- module_free(NULL, header);
+ INIT_WORK(&fp->work, bpf_jit_free_deferred);
+ schedule_work(&fp->work);
}
}
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index ee0af58ca5bd..e063eed0f912 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_X86_VISWS) += visws.o
obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
obj-$(CONFIG_X86_NUMACHIP) += numachip.o
-obj-$(CONFIG_X86_INTEL_MID) += mrst.o
+obj-$(CONFIG_X86_INTEL_MID) += intel_mid_pci.o
obj-y += common.o early.o
obj-y += bus_numa.o
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index b30e937689d6..7fb24e53d4c8 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -354,12 +354,12 @@ static void coalesce_windows(struct pci_root_info *info, unsigned long type)
* the kernel resource tree doesn't allow overlaps.
*/
if (resource_overlaps(res1, res2)) {
- res1->start = min(res1->start, res2->start);
- res1->end = max(res1->end, res2->end);
+ res2->start = min(res1->start, res2->start);
+ res2->end = max(res1->end, res2->end);
dev_info(&info->bridge->dev,
"host bridge window expanded to %pR; %pR ignored\n",
- res1, res2);
- res2->flags = 0;
+ res2, res1);
+ res1->flags = 0;
}
}
}
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index f5809fa2753e..b046e070e088 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -231,7 +231,7 @@ static int quirk_pcie_aspm_write(struct pci_bus *bus, unsigned int devfn, int wh
offset = quirk_aspm_offset[GET_INDEX(bus->self->device, devfn)];
if ((offset) && (where == offset))
- value = value & 0xfffffffc;
+ value = value & ~PCI_EXP_LNKCTL_ASPMC;
return raw_pci_write(pci_domain_nr(bus), bus->number,
devfn, where, size, value);
@@ -252,7 +252,7 @@ static struct pci_ops quirk_pcie_aspm_ops = {
*/
static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
{
- int cap_base, i;
+ int i;
struct pci_bus *pbus;
struct pci_dev *dev;
@@ -278,7 +278,7 @@ static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
for (i = GET_INDEX(pdev->device, 0); i <= GET_INDEX(pdev->device, 7); ++i)
quirk_aspm_offset[i] = 0;
- pbus->ops = pbus->parent->ops;
+ pci_bus_set_ops(pbus, pbus->parent->ops);
} else {
/*
* If devices are attached to the root port at power-up or
@@ -286,13 +286,15 @@ static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
* each root port to save the register offsets and replace the
* bus ops.
*/
- list_for_each_entry(dev, &pbus->devices, bus_list) {
+ list_for_each_entry(dev, &pbus->devices, bus_list)
/* There are 0 to 8 devices attached to this bus */
- cap_base = pci_find_capability(dev, PCI_CAP_ID_EXP);
- quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] = cap_base + 0x10;
- }
- pbus->ops = &quirk_pcie_aspm_ops;
+ quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] =
+ dev->pcie_cap + PCI_EXP_LNKCTL;
+
+ pci_bus_set_ops(pbus, &quirk_pcie_aspm_ops);
+ dev_info(&pbus->dev, "writes to ASPM control bits will be ignored\n");
}
+
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA, pcie_rootport_aspm_quirk);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA1, pcie_rootport_aspm_quirk);
diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/intel_mid_pci.c
index 903fded50786..51384ca727ad 100644
--- a/arch/x86/pci/mrst.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -1,5 +1,5 @@
/*
- * Moorestown PCI support
+ * Intel MID PCI support
* Copyright (c) 2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
@@ -150,12 +150,12 @@ static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
* shim. Therefore, use the header type in shim instead.
*/
if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE)
- return 0;
+ return false;
if (bus == 0 && (devfn == PCI_DEVFN(2, 0)
|| devfn == PCI_DEVFN(0, 0)
|| devfn == PCI_DEVFN(3, 0)))
- return 1;
- return 0; /* Langwell on others */
+ return true;
+ return false; /* Langwell on others */
}
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
@@ -205,7 +205,7 @@ static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
where, size, value);
}
-static int mrst_pci_irq_enable(struct pci_dev *dev)
+static int intel_mid_pci_irq_enable(struct pci_dev *dev)
{
u8 pin;
struct io_apic_irq_attr irq_attr;
@@ -225,23 +225,23 @@ static int mrst_pci_irq_enable(struct pci_dev *dev)
return 0;
}
-struct pci_ops pci_mrst_ops = {
+struct pci_ops intel_mid_pci_ops = {
.read = pci_read,
.write = pci_write,
};
/**
- * pci_mrst_init - installs pci_mrst_ops
+ * intel_mid_pci_init - installs intel_mid_pci_ops
*
* Moorestown has an interesting PCI implementation (see above).
* Called when the early platform detection installs it.
*/
-int __init pci_mrst_init(void)
+int __init intel_mid_pci_init(void)
{
pr_info("Intel MID platform detected, using MID PCI ops\n");
pci_mmcfg_late_init();
- pcibios_enable_irq = mrst_pci_irq_enable;
- pci_root_ops = pci_mrst_ops;
+ pcibios_enable_irq = intel_mid_pci_irq_enable;
+ pci_root_ops = intel_mid_pci_ops;
pci_soc_mode = 1;
/* Continue with standard init */
return 1;
diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile
index 01e0231a113e..20342d4c82ce 100644
--- a/arch/x86/platform/Makefile
+++ b/arch/x86/platform/Makefile
@@ -4,7 +4,7 @@ obj-y += efi/
obj-y += geode/
obj-y += goldfish/
obj-y += iris/
-obj-y += mrst/
+obj-y += intel-mid/
obj-y += olpc/
obj-y += scx200/
obj-y += sfi/
diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile
index 6db1cc4c7534..b7b0b35c1981 100644
--- a/arch/x86/platform/efi/Makefile
+++ b/arch/x86/platform/efi/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o
obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o
+obj-$(CONFIG_EARLY_PRINTK_EFI) += early_printk.o
diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c
new file mode 100644
index 000000000000..6599a0027b76
--- /dev/null
+++ b/arch/x86/platform/efi/early_printk.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2013 Intel Corporation; author Matt Fleming
+ *
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2.
+ */
+
+#include <linux/console.h>
+#include <linux/efi.h>
+#include <linux/font.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <asm/setup.h>
+
+static const struct font_desc *font;
+static u32 efi_x, efi_y;
+
+static __init void early_efi_clear_scanline(unsigned int y)
+{
+ unsigned long base, *dst;
+ u16 len;
+
+ base = boot_params.screen_info.lfb_base;
+ len = boot_params.screen_info.lfb_linelength;
+
+ dst = early_ioremap(base + y*len, len);
+ if (!dst)
+ return;
+
+ memset(dst, 0, len);
+ early_iounmap(dst, len);
+}
+
+static __init void early_efi_scroll_up(void)
+{
+ unsigned long base, *dst, *src;
+ u16 len;
+ u32 i, height;
+
+ base = boot_params.screen_info.lfb_base;
+ len = boot_params.screen_info.lfb_linelength;
+ height = boot_params.screen_info.lfb_height;
+
+ for (i = 0; i < height - font->height; i++) {
+ dst = early_ioremap(base + i*len, len);
+ if (!dst)
+ return;
+
+ src = early_ioremap(base + (i + font->height) * len, len);
+ if (!src) {
+ early_iounmap(dst, len);
+ return;
+ }
+
+ memmove(dst, src, len);
+
+ early_iounmap(src, len);
+ early_iounmap(dst, len);
+ }
+}
+
+static void early_efi_write_char(u32 *dst, unsigned char c, unsigned int h)
+{
+ const u32 color_black = 0x00000000;
+ const u32 color_white = 0x00ffffff;
+ const u8 *src;
+ u8 s8;
+ int m;
+
+ src = font->data + c * font->height;
+ s8 = *(src + h);
+
+ for (m = 0; m < 8; m++) {
+ if ((s8 >> (7 - m)) & 1)
+ *dst = color_white;
+ else
+ *dst = color_black;
+ dst++;
+ }
+}
+
+static __init void
+early_efi_write(struct console *con, const char *str, unsigned int num)
+{
+ struct screen_info *si;
+ unsigned long base;
+ unsigned int len;
+ const char *s;
+ void *dst;
+
+ base = boot_params.screen_info.lfb_base;
+ si = &boot_params.screen_info;
+ len = si->lfb_linelength;
+
+ while (num) {
+ unsigned int linemax;
+ unsigned int h, count = 0;
+
+ for (s = str; *s && *s != '\n'; s++) {
+ if (count == num)
+ break;
+ count++;
+ }
+
+ linemax = (si->lfb_width - efi_x) / font->width;
+ if (count > linemax)
+ count = linemax;
+
+ for (h = 0; h < font->height; h++) {
+ unsigned int n, x;
+
+ dst = early_ioremap(base + (efi_y + h) * len, len);
+ if (!dst)
+ return;
+
+ s = str;
+ n = count;
+ x = efi_x;
+
+ while (n-- > 0) {
+ early_efi_write_char(dst + x*4, *s, h);
+ x += font->width;
+ s++;
+ }
+
+ early_iounmap(dst, len);
+ }
+
+ num -= count;
+ efi_x += count * font->width;
+ str += count;
+
+ if (num > 0 && *s == '\n') {
+ efi_x = 0;
+ efi_y += font->height;
+ str++;
+ num--;
+ }
+
+ if (efi_x >= si->lfb_width) {
+ efi_x = 0;
+ efi_y += font->height;
+ }
+
+ if (efi_y + font->height >= si->lfb_height) {
+ u32 i;
+
+ efi_y -= font->height;
+ early_efi_scroll_up();
+
+ for (i = 0; i < font->height; i++)
+ early_efi_clear_scanline(efi_y + i);
+ }
+ }
+}
+
+static __init int early_efi_setup(struct console *con, char *options)
+{
+ struct screen_info *si;
+ u16 xres, yres;
+ u32 i;
+
+ si = &boot_params.screen_info;
+ xres = si->lfb_width;
+ yres = si->lfb_height;
+
+ /*
+ * early_efi_write_char() implicitly assumes a framebuffer with
+ * 32-bits per pixel.
+ */
+ if (si->lfb_depth != 32)
+ return -ENODEV;
+
+ font = get_default_font(xres, yres, -1, -1);
+ if (!font)
+ return -ENODEV;
+
+ efi_y = rounddown(yres, font->height) - font->height;
+ for (i = 0; i < (yres - efi_y) / font->height; i++)
+ early_efi_scroll_up();
+
+ return 0;
+}
+
+struct console early_efi_console = {
+ .name = "earlyefi",
+ .write = early_efi_write,
+ .setup = early_efi_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index c7e22ab29a5a..92c02344a060 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -60,19 +60,6 @@
static efi_char16_t efi_dummy_name[6] = { 'D', 'U', 'M', 'M', 'Y', 0 };
-struct efi __read_mostly efi = {
- .mps = EFI_INVALID_TABLE_ADDR,
- .acpi = EFI_INVALID_TABLE_ADDR,
- .acpi20 = EFI_INVALID_TABLE_ADDR,
- .smbios = EFI_INVALID_TABLE_ADDR,
- .sal_systab = EFI_INVALID_TABLE_ADDR,
- .boot_info = EFI_INVALID_TABLE_ADDR,
- .hcdp = EFI_INVALID_TABLE_ADDR,
- .uga = EFI_INVALID_TABLE_ADDR,
- .uv_systab = EFI_INVALID_TABLE_ADDR,
-};
-EXPORT_SYMBOL(efi);
-
struct efi_memory_map memmap;
static struct efi efi_phys __initdata;
@@ -80,6 +67,13 @@ static efi_system_table_t efi_systab __initdata;
unsigned long x86_efi_facility;
+static __initdata efi_config_table_type_t arch_tables[] = {
+#ifdef CONFIG_X86_UV
+ {UV_SYSTEM_TABLE_GUID, "UVsystab", &efi.uv_systab},
+#endif
+ {NULL_GUID, NULL, NULL},
+};
+
/*
* Returns 1 if 'facility' is enabled, 0 otherwise.
*/
@@ -399,6 +393,8 @@ int __init efi_memblock_x86_reserve_range(void)
memblock_reserve(pmap, memmap.nr_map * memmap.desc_size);
+ efi.memmap = &memmap;
+
return 0;
}
@@ -578,80 +574,6 @@ static int __init efi_systab_init(void *phys)
return 0;
}
-static int __init efi_config_init(u64 tables, int nr_tables)
-{
- void *config_tables, *tablep;
- int i, sz;
-
- if (efi_enabled(EFI_64BIT))
- sz = sizeof(efi_config_table_64_t);
- else
- sz = sizeof(efi_config_table_32_t);
-
- /*
- * Let's see what config tables the firmware passed to us.
- */
- config_tables = early_ioremap(tables, nr_tables * sz);
- if (config_tables == NULL) {
- pr_err("Could not map Configuration table!\n");
- return -ENOMEM;
- }
-
- tablep = config_tables;
- pr_info("");
- for (i = 0; i < efi.systab->nr_tables; i++) {
- efi_guid_t guid;
- unsigned long table;
-
- if (efi_enabled(EFI_64BIT)) {
- u64 table64;
- guid = ((efi_config_table_64_t *)tablep)->guid;
- table64 = ((efi_config_table_64_t *)tablep)->table;
- table = table64;
-#ifdef CONFIG_X86_32
- if (table64 >> 32) {
- pr_cont("\n");
- pr_err("Table located above 4GB, disabling EFI.\n");
- early_iounmap(config_tables,
- efi.systab->nr_tables * sz);
- return -EINVAL;
- }
-#endif
- } else {
- guid = ((efi_config_table_32_t *)tablep)->guid;
- table = ((efi_config_table_32_t *)tablep)->table;
- }
- if (!efi_guidcmp(guid, MPS_TABLE_GUID)) {
- efi.mps = table;
- pr_cont(" MPS=0x%lx ", table);
- } else if (!efi_guidcmp(guid, ACPI_20_TABLE_GUID)) {
- efi.acpi20 = table;
- pr_cont(" ACPI 2.0=0x%lx ", table);
- } else if (!efi_guidcmp(guid, ACPI_TABLE_GUID)) {
- efi.acpi = table;
- pr_cont(" ACPI=0x%lx ", table);
- } else if (!efi_guidcmp(guid, SMBIOS_TABLE_GUID)) {
- efi.smbios = table;
- pr_cont(" SMBIOS=0x%lx ", table);
-#ifdef CONFIG_X86_UV
- } else if (!efi_guidcmp(guid, UV_SYSTEM_TABLE_GUID)) {
- efi.uv_systab = table;
- pr_cont(" UVsystab=0x%lx ", table);
-#endif
- } else if (!efi_guidcmp(guid, HCDP_TABLE_GUID)) {
- efi.hcdp = table;
- pr_cont(" HCDP=0x%lx ", table);
- } else if (!efi_guidcmp(guid, UGA_IO_PROTOCOL_GUID)) {
- efi.uga = table;
- pr_cont(" UGA=0x%lx ", table);
- }
- tablep += sz;
- }
- pr_cont("\n");
- early_iounmap(config_tables, efi.systab->nr_tables * sz);
- return 0;
-}
-
static int __init efi_runtime_init(void)
{
efi_runtime_services_t *runtime;
@@ -745,7 +667,7 @@ void __init efi_init(void)
efi.systab->hdr.revision >> 16,
efi.systab->hdr.revision & 0xffff, vendor);
- if (efi_config_init(efi.systab->tables, efi.systab->nr_tables))
+ if (efi_config_init(arch_tables))
return;
set_bit(EFI_CONFIG_TABLES, &x86_efi_facility);
@@ -816,34 +738,6 @@ static void __init runtime_code_page_mkexec(void)
}
}
-/*
- * We can't ioremap data in EFI boot services RAM, because we've already mapped
- * it as RAM. So, look it up in the existing EFI memory map instead. Only
- * callable after efi_enter_virtual_mode and before efi_free_boot_services.
- */
-void __iomem *efi_lookup_mapped_addr(u64 phys_addr)
-{
- void *p;
- if (WARN_ON(!memmap.map))
- return NULL;
- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
- efi_memory_desc_t *md = p;
- u64 size = md->num_pages << EFI_PAGE_SHIFT;
- u64 end = md->phys_addr + size;
- if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
- md->type != EFI_BOOT_SERVICES_CODE &&
- md->type != EFI_BOOT_SERVICES_DATA)
- continue;
- if (!md->virt_addr)
- continue;
- if (phys_addr >= md->phys_addr && phys_addr < end) {
- phys_addr += md->virt_addr - md->phys_addr;
- return (__force void __iomem *)(unsigned long)phys_addr;
- }
- }
- return NULL;
-}
-
void efi_memory_uc(u64 addr, unsigned long size)
{
unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
diff --git a/arch/x86/platform/geode/alix.c b/arch/x86/platform/geode/alix.c
index 90e23e7679a5..76b6632d3143 100644
--- a/arch/x86/platform/geode/alix.c
+++ b/arch/x86/platform/geode/alix.c
@@ -98,7 +98,7 @@ static struct platform_device alix_leds_dev = {
.dev.platform_data = &alix_leds_data,
};
-static struct __initdata platform_device *alix_devs[] = {
+static struct platform_device *alix_devs[] __initdata = {
&alix_buttons_dev,
&alix_leds_dev,
};
diff --git a/arch/x86/platform/geode/geos.c b/arch/x86/platform/geode/geos.c
index c2e6d53558be..aa733fba2471 100644
--- a/arch/x86/platform/geode/geos.c
+++ b/arch/x86/platform/geode/geos.c
@@ -87,7 +87,7 @@ static struct platform_device geos_leds_dev = {
.dev.platform_data = &geos_leds_data,
};
-static struct __initdata platform_device *geos_devs[] = {
+static struct platform_device *geos_devs[] __initdata = {
&geos_buttons_dev,
&geos_leds_dev,
};
diff --git a/arch/x86/platform/geode/net5501.c b/arch/x86/platform/geode/net5501.c
index 646e3b5b4bb6..927e38c0089f 100644
--- a/arch/x86/platform/geode/net5501.c
+++ b/arch/x86/platform/geode/net5501.c
@@ -78,7 +78,7 @@ static struct platform_device net5501_leds_dev = {
.dev.platform_data = &net5501_leds_data,
};
-static struct __initdata platform_device *net5501_devs[] = {
+static struct platform_device *net5501_devs[] __initdata = {
&net5501_buttons_dev,
&net5501_leds_dev,
};
diff --git a/arch/x86/platform/intel-mid/Makefile b/arch/x86/platform/intel-mid/Makefile
new file mode 100644
index 000000000000..01cc29ea5ff7
--- /dev/null
+++ b/arch/x86/platform/intel-mid/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o
+obj-$(CONFIG_X86_INTEL_MID) += intel_mid_vrtc.o
+obj-$(CONFIG_EARLY_PRINTK_INTEL_MID) += early_printk_intel_mid.o
+# SFI specific code
+ifdef CONFIG_X86_INTEL_MID
+obj-$(CONFIG_SFI) += sfi.o device_libs/
+endif
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
new file mode 100644
index 000000000000..097e7a7940d8
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/Makefile
@@ -0,0 +1,22 @@
+# IPC Devices
+obj-y += platform_ipc.o
+obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic.o
+obj-$(subst m,y,$(CONFIG_SND_MFLD_MACHINE)) += platform_msic_audio.o
+obj-$(subst m,y,$(CONFIG_GPIO_MSIC)) += platform_msic_gpio.o
+obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic_ocd.o
+obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic_battery.o
+obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_msic_power_btn.o
+obj-$(subst m,y,$(CONFIG_GPIO_INTEL_PMIC)) += platform_pmic_gpio.o
+obj-$(subst m,y,$(CONFIG_INTEL_MFLD_THERMAL)) += platform_msic_thermal.o
+# I2C Devices
+obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o
+obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o
+obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_max7315.o
+obj-$(subst m,y,$(CONFIG_INPUT_MPU3050)) += platform_mpu3050.o
+obj-$(subst m,y,$(CONFIG_INPUT_BMA150)) += platform_bma023.o
+obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
+obj-$(subst m,y,$(CONFIG_DRM_MEDFIELD)) += platform_tc35876x.o
+# SPI Devices
+obj-$(subst m,y,$(CONFIG_SERIAL_MRST_MAX3110)) += platform_max3111.o
+# MISC Devices
+obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bma023.c b/arch/x86/platform/intel-mid/device_libs/platform_bma023.c
new file mode 100644
index 000000000000..0ae7f2ae2296
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bma023.c
@@ -0,0 +1,20 @@
+/*
+ * platform_bma023.c: bma023 platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <asm/intel-mid.h>
+
+static const struct devs_id bma023_dev_id __initconst = {
+ .name = "bma023",
+ .type = SFI_DEV_TYPE_I2C,
+ .delay = 1,
+};
+
+sfi_device(bma023_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c b/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c
new file mode 100644
index 000000000000..0d942c1d26d5
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c
@@ -0,0 +1,41 @@
+/*
+ * platform_emc1403.c: emc1403 platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <asm/intel-mid.h>
+
+static void __init *emc1403_platform_data(void *info)
+{
+ static short intr2nd_pdata;
+ struct i2c_board_info *i2c_info = info;
+ int intr = get_gpio_by_name("thermal_int");
+ int intr2nd = get_gpio_by_name("thermal_alert");
+
+ if (intr == -1 || intr2nd == -1)
+ return NULL;
+
+ i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+ intr2nd_pdata = intr2nd + INTEL_MID_IRQ_OFFSET;
+
+ return &intr2nd_pdata;
+}
+
+static const struct devs_id emc1403_dev_id __initconst = {
+ .name = "emc1403",
+ .type = SFI_DEV_TYPE_I2C,
+ .delay = 1,
+ .get_platform_data = &emc1403_platform_data,
+};
+
+sfi_device(emc1403_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c
new file mode 100644
index 000000000000..a013a4834bbe
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c
@@ -0,0 +1,83 @@
+/*
+ * platform_gpio_keys.c: gpio_keys platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/input.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/platform_device.h>
+#include <asm/intel-mid.h>
+
+#define DEVICE_NAME "gpio-keys"
+
+/*
+ * we will search these buttons in SFI GPIO table (by name)
+ * and register them dynamically. Please add all possible
+ * buttons here, we will shrink them if no GPIO found.
+ */
+static struct gpio_keys_button gpio_button[] = {
+ {KEY_POWER, -1, 1, "power_btn", EV_KEY, 0, 3000},
+ {KEY_PROG1, -1, 1, "prog_btn1", EV_KEY, 0, 20},
+ {KEY_PROG2, -1, 1, "prog_btn2", EV_KEY, 0, 20},
+ {SW_LID, -1, 1, "lid_switch", EV_SW, 0, 20},
+ {KEY_VOLUMEUP, -1, 1, "vol_up", EV_KEY, 0, 20},
+ {KEY_VOLUMEDOWN, -1, 1, "vol_down", EV_KEY, 0, 20},
+ {KEY_CAMERA, -1, 1, "camera_full", EV_KEY, 0, 20},
+ {KEY_CAMERA_FOCUS, -1, 1, "camera_half", EV_KEY, 0, 20},
+ {SW_KEYPAD_SLIDE, -1, 1, "MagSw1", EV_SW, 0, 20},
+ {SW_KEYPAD_SLIDE, -1, 1, "MagSw2", EV_SW, 0, 20},
+};
+
+static struct gpio_keys_platform_data gpio_keys = {
+ .buttons = gpio_button,
+ .rep = 1,
+ .nbuttons = -1, /* will fill it after search */
+};
+
+static struct platform_device pb_device = {
+ .name = DEVICE_NAME,
+ .id = -1,
+ .dev = {
+ .platform_data = &gpio_keys,
+ },
+};
+
+/*
+ * Shrink the non-existent buttons, register the gpio button
+ * device if there is some
+ */
+static int __init pb_keys_init(void)
+{
+ struct gpio_keys_button *gb = gpio_button;
+ int i, num, good = 0;
+
+ num = sizeof(gpio_button) / sizeof(struct gpio_keys_button);
+ for (i = 0; i < num; i++) {
+ gb[i].gpio = get_gpio_by_name(gb[i].desc);
+ pr_debug("info[%2d]: name = %s, gpio = %d\n", i, gb[i].desc,
+ gb[i].gpio);
+ if (gb[i].gpio == -1)
+ continue;
+
+ if (i != good)
+ gb[good] = gb[i];
+ good++;
+ }
+
+ if (good) {
+ gpio_keys.nbuttons = good;
+ return platform_device_register(&pb_device);
+ }
+ return 0;
+}
+late_initcall(pb_keys_init);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_ipc.c b/arch/x86/platform/intel-mid/device_libs/platform_ipc.c
new file mode 100644
index 000000000000..a84b73d6c4a0
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_ipc.c
@@ -0,0 +1,68 @@
+/*
+ * platform_ipc.c: IPC platform library file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/sfi.h>
+#include <linux/gpio.h>
+#include <asm/intel-mid.h>
+#include "platform_ipc.h"
+
+void __init ipc_device_handler(struct sfi_device_table_entry *pentry,
+ struct devs_id *dev)
+{
+ struct platform_device *pdev;
+ void *pdata = NULL;
+ static struct resource res __initdata = {
+ .name = "IRQ",
+ .flags = IORESOURCE_IRQ,
+ };
+
+ pr_debug("IPC bus, name = %16.16s, irq = 0x%2x\n",
+ pentry->name, pentry->irq);
+
+ /*
+ * We need to call platform init of IPC devices to fill misc_pdata
+ * structure. It will be used in msic_init for initialization.
+ */
+ if (dev != NULL)
+ pdata = dev->get_platform_data(pentry);
+
+ /*
+ * On Medfield the platform device creation is handled by the MSIC
+ * MFD driver so we don't need to do it here.
+ */
+ if (intel_mid_has_msic())
+ return;
+
+ pdev = platform_device_alloc(pentry->name, 0);
+ if (pdev == NULL) {
+ pr_err("out of memory for SFI platform device '%s'.\n",
+ pentry->name);
+ return;
+ }
+ res.start = pentry->irq;
+ platform_device_add_resources(pdev, &res, 1);
+
+ pdev->dev.platform_data = pdata;
+ intel_scu_device_register(pdev);
+}
+
+static const struct devs_id pmic_audio_dev_id __initconst = {
+ .name = "pmic_audio",
+ .type = SFI_DEV_TYPE_IPC,
+ .delay = 1,
+ .device_handler = &ipc_device_handler,
+};
+
+sfi_device(pmic_audio_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_ipc.h b/arch/x86/platform/intel-mid/device_libs/platform_ipc.h
new file mode 100644
index 000000000000..8f568dd79605
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_ipc.h
@@ -0,0 +1,17 @@
+/*
+ * platform_ipc.h: IPC platform library header file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_IPC_H_
+#define _PLATFORM_IPC_H_
+
+extern void __init ipc_device_handler(struct sfi_device_table_entry *pentry,
+ struct devs_id *dev) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_lis331.c b/arch/x86/platform/intel-mid/device_libs/platform_lis331.c
new file mode 100644
index 000000000000..15278c11f714
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_lis331.c
@@ -0,0 +1,39 @@
+/*
+ * platform_lis331.c: lis331 platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <asm/intel-mid.h>
+
+static void __init *lis331dl_platform_data(void *info)
+{
+ static short intr2nd_pdata;
+ struct i2c_board_info *i2c_info = info;
+ int intr = get_gpio_by_name("accel_int");
+ int intr2nd = get_gpio_by_name("accel_2");
+
+ if (intr == -1 || intr2nd == -1)
+ return NULL;
+
+ i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+ intr2nd_pdata = intr2nd + INTEL_MID_IRQ_OFFSET;
+
+ return &intr2nd_pdata;
+}
+
+static const struct devs_id lis331dl_dev_id __initconst = {
+ .name = "i2c_accel",
+ .type = SFI_DEV_TYPE_I2C,
+ .get_platform_data = &lis331dl_platform_data,
+};
+
+sfi_device(lis331dl_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max3111.c b/arch/x86/platform/intel-mid/device_libs/platform_max3111.c
new file mode 100644
index 000000000000..afd1df94e0e5
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_max3111.c
@@ -0,0 +1,35 @@
+/*
+ * platform_max3111.c: max3111 platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <asm/intel-mid.h>
+
+static void __init *max3111_platform_data(void *info)
+{
+ struct spi_board_info *spi_info = info;
+ int intr = get_gpio_by_name("max3111_int");
+
+ spi_info->mode = SPI_MODE_0;
+ if (intr == -1)
+ return NULL;
+ spi_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+ return NULL;
+}
+
+static const struct devs_id max3111_dev_id __initconst = {
+ .name = "spi_max3111",
+ .type = SFI_DEV_TYPE_SPI,
+ .get_platform_data = &max3111_platform_data,
+};
+
+sfi_device(max3111_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
new file mode 100644
index 000000000000..94ade10024ae
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
@@ -0,0 +1,79 @@
+/*
+ * platform_max7315.c: max7315 platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/platform_data/pca953x.h>
+#include <asm/intel-mid.h>
+
+#define MAX7315_NUM 2
+
+static void __init *max7315_platform_data(void *info)
+{
+ static struct pca953x_platform_data max7315_pdata[MAX7315_NUM];
+ static int nr;
+ struct pca953x_platform_data *max7315 = &max7315_pdata[nr];
+ struct i2c_board_info *i2c_info = info;
+ int gpio_base, intr;
+ char base_pin_name[SFI_NAME_LEN + 1];
+ char intr_pin_name[SFI_NAME_LEN + 1];
+
+ if (nr == MAX7315_NUM) {
+ pr_err("too many max7315s, we only support %d\n",
+ MAX7315_NUM);
+ return NULL;
+ }
+ /* we have several max7315 on the board, we only need load several
+ * instances of the same pca953x driver to cover them
+ */
+ strcpy(i2c_info->type, "max7315");
+ if (nr++) {
+ sprintf(base_pin_name, "max7315_%d_base", nr);
+ sprintf(intr_pin_name, "max7315_%d_int", nr);
+ } else {
+ strcpy(base_pin_name, "max7315_base");
+ strcpy(intr_pin_name, "max7315_int");
+ }
+
+ gpio_base = get_gpio_by_name(base_pin_name);
+ intr = get_gpio_by_name(intr_pin_name);
+
+ if (gpio_base == -1)
+ return NULL;
+ max7315->gpio_base = gpio_base;
+ if (intr != -1) {
+ i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+ max7315->irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
+ } else {
+ i2c_info->irq = -1;
+ max7315->irq_base = -1;
+ }
+ return max7315;
+}
+
+static const struct devs_id max7315_dev_id __initconst = {
+ .name = "i2c_max7315",
+ .type = SFI_DEV_TYPE_I2C,
+ .delay = 1,
+ .get_platform_data = &max7315_platform_data,
+};
+
+static const struct devs_id max7315_2_dev_id __initconst = {
+ .name = "i2c_max7315_2",
+ .type = SFI_DEV_TYPE_I2C,
+ .delay = 1,
+ .get_platform_data = &max7315_platform_data,
+};
+
+sfi_device(max7315_dev_id);
+sfi_device(max7315_2_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c b/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c
new file mode 100644
index 000000000000..dd28d63c84fb
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c
@@ -0,0 +1,36 @@
+/*
+ * platform_mpu3050.c: mpu3050 platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <asm/intel-mid.h>
+
+static void *mpu3050_platform_data(void *info)
+{
+ struct i2c_board_info *i2c_info = info;
+ int intr = get_gpio_by_name("mpu3050_int");
+
+ if (intr == -1)
+ return NULL;
+
+ i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+ return NULL;
+}
+
+static const struct devs_id mpu3050_dev_id __initconst = {
+ .name = "mpu3050",
+ .type = SFI_DEV_TYPE_I2C,
+ .delay = 1,
+ .get_platform_data = &mpu3050_platform_data,
+};
+
+sfi_device(mpu3050_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic.c b/arch/x86/platform/intel-mid/device_libs/platform_msic.c
new file mode 100644
index 000000000000..9f4a775a69d6
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic.c
@@ -0,0 +1,87 @@
+/*
+ * platform_msic.c: MSIC platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel-mid.h>
+#include "platform_msic.h"
+
+struct intel_msic_platform_data msic_pdata;
+
+static struct resource msic_resources[] = {
+ {
+ .start = INTEL_MSIC_IRQ_PHYS_BASE,
+ .end = INTEL_MSIC_IRQ_PHYS_BASE + 64 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device msic_device = {
+ .name = "intel_msic",
+ .id = -1,
+ .dev = {
+ .platform_data = &msic_pdata,
+ },
+ .num_resources = ARRAY_SIZE(msic_resources),
+ .resource = msic_resources,
+};
+
+static int msic_scu_status_change(struct notifier_block *nb,
+ unsigned long code, void *data)
+{
+ if (code == SCU_DOWN) {
+ platform_device_unregister(&msic_device);
+ return 0;
+ }
+
+ return platform_device_register(&msic_device);
+}
+
+static int __init msic_init(void)
+{
+ static struct notifier_block msic_scu_notifier = {
+ .notifier_call = msic_scu_status_change,
+ };
+
+ /*
+ * We need to be sure that the SCU IPC is ready before MSIC device
+ * can be registered.
+ */
+ if (intel_mid_has_msic())
+ intel_scu_notifier_add(&msic_scu_notifier);
+
+ return 0;
+}
+arch_initcall(msic_init);
+
+/*
+ * msic_generic_platform_data - sets generic platform data for the block
+ * @info: pointer to the SFI device table entry for this block
+ * @block: MSIC block
+ *
+ * Function sets IRQ number from the SFI table entry for given device to
+ * the MSIC platform data.
+ */
+void *msic_generic_platform_data(void *info, enum intel_msic_block block)
+{
+ struct sfi_device_table_entry *entry = info;
+
+ BUG_ON(block < 0 || block >= INTEL_MSIC_BLOCK_LAST);
+ msic_pdata.irq[block] = entry->irq;
+
+ return NULL;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic.h b/arch/x86/platform/intel-mid/device_libs/platform_msic.h
new file mode 100644
index 000000000000..917eb56d77da
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic.h
@@ -0,0 +1,19 @@
+/*
+ * platform_msic.h: MSIC platform data header file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_H_
+#define _PLATFORM_MSIC_H_
+
+extern struct intel_msic_platform_data msic_pdata;
+
+extern void *msic_generic_platform_data(void *info,
+ enum intel_msic_block block) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c
new file mode 100644
index 000000000000..29629397d2b3
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c
@@ -0,0 +1,47 @@
+/*
+ * platform_msic_audio.c: MSIC audio platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel-mid.h>
+
+#include "platform_msic.h"
+#include "platform_ipc.h"
+
+static void *msic_audio_platform_data(void *info)
+{
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_simple("sst-platform", -1, NULL, 0);
+
+ if (IS_ERR(pdev)) {
+ pr_err("failed to create audio platform device\n");
+ return NULL;
+ }
+
+ return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_AUDIO);
+}
+
+static const struct devs_id msic_audio_dev_id __initconst = {
+ .name = "msic_audio",
+ .type = SFI_DEV_TYPE_IPC,
+ .delay = 1,
+ .get_platform_data = &msic_audio_platform_data,
+ .device_handler = &ipc_device_handler,
+};
+
+sfi_device(msic_audio_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c
new file mode 100644
index 000000000000..f446c33df1a8
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c
@@ -0,0 +1,37 @@
+/*
+ * platform_msic_battery.c: MSIC battery platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel-mid.h>
+
+#include "platform_msic.h"
+#include "platform_ipc.h"
+
+static void __init *msic_battery_platform_data(void *info)
+{
+ return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_BATTERY);
+}
+
+static const struct devs_id msic_battery_dev_id __initconst = {
+ .name = "msic_battery",
+ .type = SFI_DEV_TYPE_IPC,
+ .delay = 1,
+ .get_platform_data = &msic_battery_platform_data,
+ .device_handler = &ipc_device_handler,
+};
+
+sfi_device(msic_battery_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c
new file mode 100644
index 000000000000..2a4f7b1dd917
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c
@@ -0,0 +1,48 @@
+/*
+ * platform_msic_gpio.c: MSIC GPIO platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel-mid.h>
+
+#include "platform_msic.h"
+#include "platform_ipc.h"
+
+static void __init *msic_gpio_platform_data(void *info)
+{
+ static struct intel_msic_gpio_pdata msic_gpio_pdata;
+
+ int gpio = get_gpio_by_name("msic_gpio_base");
+
+ if (gpio < 0)
+ return NULL;
+
+ msic_gpio_pdata.gpio_base = gpio;
+ msic_pdata.gpio = &msic_gpio_pdata;
+
+ return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_GPIO);
+}
+
+static const struct devs_id msic_gpio_dev_id __initconst = {
+ .name = "msic_gpio",
+ .type = SFI_DEV_TYPE_IPC,
+ .delay = 1,
+ .get_platform_data = &msic_gpio_platform_data,
+ .device_handler = &ipc_device_handler,
+};
+
+sfi_device(msic_gpio_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c
new file mode 100644
index 000000000000..6497111ddb54
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c
@@ -0,0 +1,49 @@
+/*
+ * platform_msic_ocd.c: MSIC OCD platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel-mid.h>
+
+#include "platform_msic.h"
+#include "platform_ipc.h"
+
+static void __init *msic_ocd_platform_data(void *info)
+{
+ static struct intel_msic_ocd_pdata msic_ocd_pdata;
+ int gpio;
+
+ gpio = get_gpio_by_name("ocd_gpio");
+
+ if (gpio < 0)
+ return NULL;
+
+ msic_ocd_pdata.gpio = gpio;
+ msic_pdata.ocd = &msic_ocd_pdata;
+
+ return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_OCD);
+}
+
+static const struct devs_id msic_ocd_dev_id __initconst = {
+ .name = "msic_ocd",
+ .type = SFI_DEV_TYPE_IPC,
+ .delay = 1,
+ .get_platform_data = &msic_ocd_platform_data,
+ .device_handler = &ipc_device_handler,
+};
+
+sfi_device(msic_ocd_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c
new file mode 100644
index 000000000000..83a3459bc337
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c
@@ -0,0 +1,36 @@
+/*
+ * platform_msic_power_btn.c: MSIC power btn platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/init.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel-mid.h>
+
+#include "platform_msic.h"
+#include "platform_ipc.h"
+
+static void __init *msic_power_btn_platform_data(void *info)
+{
+ return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_POWER_BTN);
+}
+
+static const struct devs_id msic_power_btn_dev_id __initconst = {
+ .name = "msic_power_btn",
+ .type = SFI_DEV_TYPE_IPC,
+ .delay = 1,
+ .get_platform_data = &msic_power_btn_platform_data,
+ .device_handler = &ipc_device_handler,
+};
+
+sfi_device(msic_power_btn_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c
new file mode 100644
index 000000000000..a351878b96bc
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c
@@ -0,0 +1,37 @@
+/*
+ * platform_msic_thermal.c: msic_thermal platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/input.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel-mid.h>
+
+#include "platform_msic.h"
+#include "platform_ipc.h"
+
+static void __init *msic_thermal_platform_data(void *info)
+{
+ return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_THERMAL);
+}
+
+static const struct devs_id msic_thermal_dev_id __initconst = {
+ .name = "msic_thermal",
+ .type = SFI_DEV_TYPE_IPC,
+ .delay = 1,
+ .get_platform_data = &msic_thermal_platform_data,
+ .device_handler = &ipc_device_handler,
+};
+
+sfi_device(msic_thermal_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c
new file mode 100644
index 000000000000..d87182a09263
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c
@@ -0,0 +1,54 @@
+/*
+ * platform_pmic_gpio.c: PMIC GPIO platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/intel_pmic_gpio.h>
+#include <asm/intel-mid.h>
+
+#include "platform_ipc.h"
+
+static void __init *pmic_gpio_platform_data(void *info)
+{
+ static struct intel_pmic_gpio_platform_data pmic_gpio_pdata;
+ int gpio_base = get_gpio_by_name("pmic_gpio_base");
+
+ if (gpio_base == -1)
+ gpio_base = 64;
+ pmic_gpio_pdata.gpio_base = gpio_base;
+ pmic_gpio_pdata.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
+ pmic_gpio_pdata.gpiointr = 0xffffeff8;
+
+ return &pmic_gpio_pdata;
+}
+
+static const struct devs_id pmic_gpio_spi_dev_id __initconst = {
+ .name = "pmic_gpio",
+ .type = SFI_DEV_TYPE_SPI,
+ .delay = 1,
+ .get_platform_data = &pmic_gpio_platform_data,
+};
+
+static const struct devs_id pmic_gpio_ipc_dev_id __initconst = {
+ .name = "pmic_gpio",
+ .type = SFI_DEV_TYPE_IPC,
+ .delay = 1,
+ .get_platform_data = &pmic_gpio_platform_data,
+ .device_handler = &ipc_device_handler
+};
+
+sfi_device(pmic_gpio_spi_dev_id);
+sfi_device(pmic_gpio_ipc_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c b/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c
new file mode 100644
index 000000000000..740fc757050c
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c
@@ -0,0 +1,36 @@
+/*
+ * platform_tc35876x.c: tc35876x platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/i2c/tc35876x.h>
+#include <asm/intel-mid.h>
+
+/*tc35876x DSI_LVDS bridge chip and panel platform data*/
+static void *tc35876x_platform_data(void *data)
+{
+ static struct tc35876x_platform_data pdata;
+
+ /* gpio pins set to -1 will not be used by the driver */
+ pdata.gpio_bridge_reset = get_gpio_by_name("LCMB_RXEN");
+ pdata.gpio_panel_bl_en = get_gpio_by_name("6S6P_BL_EN");
+ pdata.gpio_panel_vadd = get_gpio_by_name("EN_VREG_LCD_V3P3");
+
+ return &pdata;
+}
+
+static const struct devs_id tc35876x_dev_id __initconst = {
+ .name = "i2c_disp_brig",
+ .type = SFI_DEV_TYPE_I2C,
+ .get_platform_data = &tc35876x_platform_data,
+};
+
+sfi_device(tc35876x_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c
new file mode 100644
index 000000000000..22881c9a6737
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c
@@ -0,0 +1,57 @@
+/*
+ * platform_tca6416.c: tca6416 platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/platform_data/pca953x.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <asm/intel-mid.h>
+
+#define TCA6416_NAME "tca6416"
+#define TCA6416_BASE "tca6416_base"
+#define TCA6416_INTR "tca6416_int"
+
+static void *tca6416_platform_data(void *info)
+{
+ static struct pca953x_platform_data tca6416;
+ struct i2c_board_info *i2c_info = info;
+ int gpio_base, intr;
+ char base_pin_name[SFI_NAME_LEN + 1];
+ char intr_pin_name[SFI_NAME_LEN + 1];
+
+ strcpy(i2c_info->type, TCA6416_NAME);
+ strcpy(base_pin_name, TCA6416_BASE);
+ strcpy(intr_pin_name, TCA6416_INTR);
+
+ gpio_base = get_gpio_by_name(base_pin_name);
+ intr = get_gpio_by_name(intr_pin_name);
+
+ if (gpio_base == -1)
+ return NULL;
+ tca6416.gpio_base = gpio_base;
+ if (intr != -1) {
+ i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+ tca6416.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
+ } else {
+ i2c_info->irq = -1;
+ tca6416.irq_base = -1;
+ }
+ return &tca6416;
+}
+
+static const struct devs_id tca6416_dev_id __initconst = {
+ .name = "tca6416",
+ .type = SFI_DEV_TYPE_I2C,
+ .delay = 1,
+ .get_platform_data = &tca6416_platform_data,
+};
+
+sfi_device(tca6416_dev_id);
diff --git a/arch/x86/platform/mrst/early_printk_mrst.c b/arch/x86/platform/intel-mid/early_printk_intel_mid.c
index 028454f0c3a5..4f702f554f6e 100644
--- a/arch/x86/platform/mrst/early_printk_mrst.c
+++ b/arch/x86/platform/intel-mid/early_printk_intel_mid.c
@@ -1,5 +1,5 @@
/*
- * early_printk_mrst.c - early consoles for Intel MID platforms
+ * early_printk_intel_mid.c - early consoles for Intel MID platforms
*
* Copyright (c) 2008-2010, Intel Corporation
*
@@ -27,7 +27,7 @@
#include <asm/fixmap.h>
#include <asm/pgtable.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#define MRST_SPI_TIMEOUT 0x200000
#define MRST_REGBASE_SPI0 0xff128000
@@ -152,7 +152,7 @@ void mrst_early_console_init(void)
spi0_cdiv = ((*pclk_spi0) & 0xe00) >> 9;
freq = 100000000 / (spi0_cdiv + 1);
- if (mrst_identify_cpu() == MRST_CPU_CHIP_PENWELL)
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL)
mrst_spi_paddr = MRST_REGBASE_SPI1;
pspi = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE,
@@ -213,13 +213,14 @@ static void early_mrst_spi_putc(char c)
}
if (!timeout)
- pr_warning("MRST earlycon: timed out\n");
+ pr_warn("MRST earlycon: timed out\n");
else
max3110_write_data(c);
}
/* Early SPI only uses polling mode */
-static void early_mrst_spi_write(struct console *con, const char *str, unsigned n)
+static void early_mrst_spi_write(struct console *con, const char *str,
+ unsigned n)
{
int i;
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
new file mode 100644
index 000000000000..523a1c8f7f07
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -0,0 +1,213 @@
+/*
+ * intel-mid.c: Intel MID platform setup code
+ *
+ * (C) Copyright 2008, 2012 Intel Corporation
+ * Author: Jacob Pan (jacob.jun.pan@intel.com)
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#define pr_fmt(fmt) "intel_mid: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+
+#include <asm/setup.h>
+#include <asm/mpspec_def.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/io_apic.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_vrtc.h>
+#include <asm/io.h>
+#include <asm/i8259.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/apb_timer.h>
+#include <asm/reboot.h>
+
+/*
+ * the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock,
+ * cmdline option x86_intel_mid_timer can be used to override the configuration
+ * to prefer one or the other.
+ * at runtime, there are basically three timer configurations:
+ * 1. per cpu apbt clock only
+ * 2. per cpu always-on lapic clocks only, this is Penwell/Medfield only
+ * 3. per cpu lapic clock (C3STOP) and one apbt clock, with broadcast.
+ *
+ * by default (without cmdline option), platform code first detects cpu type
+ * to see if we are on lincroft or penwell, then set up both lapic or apbt
+ * clocks accordingly.
+ * i.e. by default, medfield uses configuration #2, moorestown uses #1.
+ * config #3 is supported but not recommended on medfield.
+ *
+ * rating and feature summary:
+ * lapic (with C3STOP) --------- 100
+ * apbt (always-on) ------------ 110
+ * lapic (always-on,ARAT) ------ 150
+ */
+
+enum intel_mid_timer_options intel_mid_timer_options;
+
+enum intel_mid_cpu_type __intel_mid_cpu_chip;
+EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip);
+
+static void intel_mid_power_off(void)
+{
+}
+
+static void intel_mid_reboot(void)
+{
+ intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
+}
+
+static unsigned long __init intel_mid_calibrate_tsc(void)
+{
+ unsigned long fast_calibrate;
+ u32 lo, hi, ratio, fsb;
+
+ rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi);
+ ratio = (hi >> 8) & 0x1f;
+ pr_debug("ratio is %d\n", ratio);
+ if (!ratio) {
+ pr_err("read a zero ratio, should be incorrect!\n");
+ pr_err("force tsc ratio to 16 ...\n");
+ ratio = 16;
+ }
+ rdmsr(MSR_FSB_FREQ, lo, hi);
+ if ((lo & 0x7) == 0x7)
+ fsb = PENWELL_FSB_FREQ_83SKU;
+ else
+ fsb = PENWELL_FSB_FREQ_100SKU;
+ fast_calibrate = ratio * fsb;
+ pr_debug("read penwell tsc %lu khz\n", fast_calibrate);
+ lapic_timer_frequency = fsb * 1000 / HZ;
+ /* mark tsc clocksource as reliable */
+ set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
+
+ if (fast_calibrate)
+ return fast_calibrate;
+
+ return 0;
+}
+
+static void __init intel_mid_time_init(void)
+{
+ sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr);
+ switch (intel_mid_timer_options) {
+ case INTEL_MID_TIMER_APBT_ONLY:
+ break;
+ case INTEL_MID_TIMER_LAPIC_APBT:
+ x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
+ x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
+ break;
+ default:
+ if (!boot_cpu_has(X86_FEATURE_ARAT))
+ break;
+ x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
+ x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
+ return;
+ }
+ /* we need at least one APB timer */
+ pre_init_apic_IRQ0();
+ apbt_time_init();
+}
+
+static void __cpuinit intel_mid_arch_setup(void)
+{
+ if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27)
+ __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL;
+ else {
+ pr_err("Unknown Intel MID CPU (%d:%d), default to Penwell\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+ __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL;
+ }
+}
+
+/* MID systems don't have i8042 controller */
+static int intel_mid_i8042_detect(void)
+{
+ return 0;
+}
+
+/*
+ * Moorestown does not have external NMI source nor port 0x61 to report
+ * NMI status. The possible NMI sources are from pmu as a result of NMI
+ * watchdog or lock debug. Reading io port 0x61 results in 0xff which
+ * misled NMI handler.
+ */
+static unsigned char intel_mid_get_nmi_reason(void)
+{
+ return 0;
+}
+
+/*
+ * Moorestown specific x86_init function overrides and early setup
+ * calls.
+ */
+void __init x86_intel_mid_early_setup(void)
+{
+ x86_init.resources.probe_roms = x86_init_noop;
+ x86_init.resources.reserve_resources = x86_init_noop;
+
+ x86_init.timers.timer_init = intel_mid_time_init;
+ x86_init.timers.setup_percpu_clockev = x86_init_noop;
+
+ x86_init.irqs.pre_vector_init = x86_init_noop;
+
+ x86_init.oem.arch_setup = intel_mid_arch_setup;
+
+ x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
+
+ x86_platform.calibrate_tsc = intel_mid_calibrate_tsc;
+ x86_platform.i8042_detect = intel_mid_i8042_detect;
+ x86_init.timers.wallclock_init = intel_mid_rtc_init;
+ x86_platform.get_nmi_reason = intel_mid_get_nmi_reason;
+
+ x86_init.pci.init = intel_mid_pci_init;
+ x86_init.pci.fixup_irqs = x86_init_noop;
+
+ legacy_pic = &null_legacy_pic;
+
+ pm_power_off = intel_mid_power_off;
+ machine_ops.emergency_restart = intel_mid_reboot;
+
+ /* Avoid searching for BIOS MP tables */
+ x86_init.mpparse.find_smp_config = x86_init_noop;
+ x86_init.mpparse.get_smp_config = x86_init_uint_noop;
+ set_bit(MP_BUS_ISA, mp_bus_not_pci);
+}
+
+/*
+ * if user does not want to use per CPU apb timer, just give it a lower rating
+ * than local apic timer and skip the late per cpu timer init.
+ */
+static inline int __init setup_x86_intel_mid_timer(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ if (strcmp("apbt_only", arg) == 0)
+ intel_mid_timer_options = INTEL_MID_TIMER_APBT_ONLY;
+ else if (strcmp("lapic_and_apbt", arg) == 0)
+ intel_mid_timer_options = INTEL_MID_TIMER_LAPIC_APBT;
+ else {
+ pr_warn("X86 INTEL_MID timer option %s not recognised"
+ " use x86_intel_mid_timer=apbt_only or lapic_and_apbt\n",
+ arg);
+ return -EINVAL;
+ }
+ return 0;
+}
+__setup("x86_intel_mid_timer=", setup_x86_intel_mid_timer);
+
diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/intel-mid/intel_mid_vrtc.c
index 5e355b134ba4..4762cff7facd 100644
--- a/arch/x86/platform/mrst/vrtc.c
+++ b/arch/x86/platform/intel-mid/intel_mid_vrtc.c
@@ -1,5 +1,5 @@
/*
- * vrtc.c: Driver for virtual RTC device on Intel MID platform
+ * intel_mid_vrtc.c: Driver for virtual RTC device on Intel MID platform
*
* (C) Copyright 2009 Intel Corporation
*
@@ -23,8 +23,8 @@
#include <linux/sfi.h>
#include <linux/platform_device.h>
-#include <asm/mrst.h>
-#include <asm/mrst-vrtc.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_vrtc.h>
#include <asm/time.h>
#include <asm/fixmap.h>
@@ -79,7 +79,7 @@ void vrtc_get_time(struct timespec *now)
/* vRTC YEAR reg contains the offset to 1972 */
year += 1972;
- printk(KERN_INFO "vRTC: sec: %d min: %d hour: %d day: %d "
+ pr_info("vRTC: sec: %d min: %d hour: %d day: %d "
"mon: %d year: %d\n", sec, min, hour, mday, mon, year);
now->tv_sec = mktime(year, mon, mday, hour, min, sec);
@@ -109,15 +109,14 @@ int vrtc_set_mmss(const struct timespec *now)
vrtc_cmos_write(tm.tm_sec, RTC_SECONDS);
spin_unlock_irqrestore(&rtc_lock, flags);
} else {
- printk(KERN_ERR
- "%s: Invalid vRTC value: write of %lx to vRTC failed\n",
+ pr_err("%s: Invalid vRTC value: write of %lx to vRTC failed\n",
__FUNCTION__, now->tv_sec);
retval = -EINVAL;
}
return retval;
}
-void __init mrst_rtc_init(void)
+void __init intel_mid_rtc_init(void)
{
unsigned long vrtc_paddr;
@@ -155,10 +154,10 @@ static struct platform_device vrtc_device = {
};
/* Register the RTC device if appropriate */
-static int __init mrst_device_create(void)
+static int __init intel_mid_device_create(void)
{
/* No Moorestown, no device */
- if (!mrst_identify_cpu())
+ if (!intel_mid_identify_cpu())
return -ENODEV;
/* No timer, no device */
if (!sfi_mrtc_num)
@@ -175,4 +174,4 @@ static int __init mrst_device_create(void)
return platform_device_register(&vrtc_device);
}
-module_init(mrst_device_create);
+module_init(intel_mid_device_create);
diff --git a/arch/x86/platform/intel-mid/sfi.c b/arch/x86/platform/intel-mid/sfi.c
new file mode 100644
index 000000000000..c84c1ca396bf
--- /dev/null
+++ b/arch/x86/platform/intel-mid/sfi.c
@@ -0,0 +1,488 @@
+/*
+ * intel_mid_sfi.c: Intel MID SFI initialization code
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/intel_pmic_gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c.h>
+#include <linux/skbuff.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/blkdev.h>
+
+#include <asm/setup.h>
+#include <asm/mpspec_def.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/io_apic.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_vrtc.h>
+#include <asm/io.h>
+#include <asm/i8259.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/apb_timer.h>
+#include <asm/reboot.h>
+
+#define SFI_SIG_OEM0 "OEM0"
+#define MAX_IPCDEVS 24
+#define MAX_SCU_SPI 24
+#define MAX_SCU_I2C 24
+
+static struct platform_device *ipc_devs[MAX_IPCDEVS];
+static struct spi_board_info *spi_devs[MAX_SCU_SPI];
+static struct i2c_board_info *i2c_devs[MAX_SCU_I2C];
+static struct sfi_gpio_table_entry *gpio_table;
+static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM];
+static int ipc_next_dev;
+static int spi_next_dev;
+static int i2c_next_dev;
+static int i2c_bus[MAX_SCU_I2C];
+static int gpio_num_entry;
+static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM];
+int sfi_mrtc_num;
+int sfi_mtimer_num;
+
+struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
+EXPORT_SYMBOL_GPL(sfi_mrtc_array);
+
+struct blocking_notifier_head intel_scu_notifier =
+ BLOCKING_NOTIFIER_INIT(intel_scu_notifier);
+EXPORT_SYMBOL_GPL(intel_scu_notifier);
+
+#define intel_mid_sfi_get_pdata(dev, priv) \
+ ((dev)->get_platform_data ? (dev)->get_platform_data(priv) : NULL)
+
+/* parse all the mtimer info to a static mtimer array */
+int __init sfi_parse_mtmr(struct sfi_table_header *table)
+{
+ struct sfi_table_simple *sb;
+ struct sfi_timer_table_entry *pentry;
+ struct mpc_intsrc mp_irq;
+ int totallen;
+
+ sb = (struct sfi_table_simple *)table;
+ if (!sfi_mtimer_num) {
+ sfi_mtimer_num = SFI_GET_NUM_ENTRIES(sb,
+ struct sfi_timer_table_entry);
+ pentry = (struct sfi_timer_table_entry *) sb->pentry;
+ totallen = sfi_mtimer_num * sizeof(*pentry);
+ memcpy(sfi_mtimer_array, pentry, totallen);
+ }
+
+ pr_debug("SFI MTIMER info (num = %d):\n", sfi_mtimer_num);
+ pentry = sfi_mtimer_array;
+ for (totallen = 0; totallen < sfi_mtimer_num; totallen++, pentry++) {
+ pr_debug("timer[%d]: paddr = 0x%08x, freq = %dHz, irq = %d\n",
+ totallen, (u32)pentry->phys_addr,
+ pentry->freq_hz, pentry->irq);
+ if (!pentry->irq)
+ continue;
+ mp_irq.type = MP_INTSRC;
+ mp_irq.irqtype = mp_INT;
+/* triggering mode edge bit 2-3, active high polarity bit 0-1 */
+ mp_irq.irqflag = 5;
+ mp_irq.srcbus = MP_BUS_ISA;
+ mp_irq.srcbusirq = pentry->irq; /* IRQ */
+ mp_irq.dstapic = MP_APIC_ALL;
+ mp_irq.dstirq = pentry->irq;
+ mp_save_irq(&mp_irq);
+ }
+
+ return 0;
+}
+
+struct sfi_timer_table_entry *sfi_get_mtmr(int hint)
+{
+ int i;
+ if (hint < sfi_mtimer_num) {
+ if (!sfi_mtimer_usage[hint]) {
+ pr_debug("hint taken for timer %d irq %d\n",
+ hint, sfi_mtimer_array[hint].irq);
+ sfi_mtimer_usage[hint] = 1;
+ return &sfi_mtimer_array[hint];
+ }
+ }
+ /* take the first timer available */
+ for (i = 0; i < sfi_mtimer_num;) {
+ if (!sfi_mtimer_usage[i]) {
+ sfi_mtimer_usage[i] = 1;
+ return &sfi_mtimer_array[i];
+ }
+ i++;
+ }
+ return NULL;
+}
+
+void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr)
+{
+ int i;
+ for (i = 0; i < sfi_mtimer_num;) {
+ if (mtmr->irq == sfi_mtimer_array[i].irq) {
+ sfi_mtimer_usage[i] = 0;
+ return;
+ }
+ i++;
+ }
+}
+
+/* parse all the mrtc info to a global mrtc array */
+int __init sfi_parse_mrtc(struct sfi_table_header *table)
+{
+ struct sfi_table_simple *sb;
+ struct sfi_rtc_table_entry *pentry;
+ struct mpc_intsrc mp_irq;
+
+ int totallen;
+
+ sb = (struct sfi_table_simple *)table;
+ if (!sfi_mrtc_num) {
+ sfi_mrtc_num = SFI_GET_NUM_ENTRIES(sb,
+ struct sfi_rtc_table_entry);
+ pentry = (struct sfi_rtc_table_entry *)sb->pentry;
+ totallen = sfi_mrtc_num * sizeof(*pentry);
+ memcpy(sfi_mrtc_array, pentry, totallen);
+ }
+
+ pr_debug("SFI RTC info (num = %d):\n", sfi_mrtc_num);
+ pentry = sfi_mrtc_array;
+ for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) {
+ pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n",
+ totallen, (u32)pentry->phys_addr, pentry->irq);
+ mp_irq.type = MP_INTSRC;
+ mp_irq.irqtype = mp_INT;
+ mp_irq.irqflag = 0xf; /* level trigger and active low */
+ mp_irq.srcbus = MP_BUS_ISA;
+ mp_irq.srcbusirq = pentry->irq; /* IRQ */
+ mp_irq.dstapic = MP_APIC_ALL;
+ mp_irq.dstirq = pentry->irq;
+ mp_save_irq(&mp_irq);
+ }
+ return 0;
+}
+
+
+/*
+ * Parsing GPIO table first, since the DEVS table will need this table
+ * to map the pin name to the actual pin.
+ */
+static int __init sfi_parse_gpio(struct sfi_table_header *table)
+{
+ struct sfi_table_simple *sb;
+ struct sfi_gpio_table_entry *pentry;
+ int num, i;
+
+ if (gpio_table)
+ return 0;
+ sb = (struct sfi_table_simple *)table;
+ num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry);
+ pentry = (struct sfi_gpio_table_entry *)sb->pentry;
+
+ gpio_table = kmalloc(num * sizeof(*pentry), GFP_KERNEL);
+ if (!gpio_table)
+ return -1;
+ memcpy(gpio_table, pentry, num * sizeof(*pentry));
+ gpio_num_entry = num;
+
+ pr_debug("GPIO pin info:\n");
+ for (i = 0; i < num; i++, pentry++)
+ pr_debug("info[%2d]: controller = %16.16s, pin_name = %16.16s,"
+ " pin = %d\n", i,
+ pentry->controller_name,
+ pentry->pin_name,
+ pentry->pin_no);
+ return 0;
+}
+
+int get_gpio_by_name(const char *name)
+{
+ struct sfi_gpio_table_entry *pentry = gpio_table;
+ int i;
+
+ if (!pentry)
+ return -1;
+ for (i = 0; i < gpio_num_entry; i++, pentry++) {
+ if (!strncmp(name, pentry->pin_name, SFI_NAME_LEN))
+ return pentry->pin_no;
+ }
+ return -1;
+}
+
+void __init intel_scu_device_register(struct platform_device *pdev)
+{
+ if (ipc_next_dev == MAX_IPCDEVS)
+ pr_err("too many SCU IPC devices");
+ else
+ ipc_devs[ipc_next_dev++] = pdev;
+}
+
+static void __init intel_scu_spi_device_register(struct spi_board_info *sdev)
+{
+ struct spi_board_info *new_dev;
+
+ if (spi_next_dev == MAX_SCU_SPI) {
+ pr_err("too many SCU SPI devices");
+ return;
+ }
+
+ new_dev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+ if (!new_dev) {
+ pr_err("failed to alloc mem for delayed spi dev %s\n",
+ sdev->modalias);
+ return;
+ }
+ memcpy(new_dev, sdev, sizeof(*sdev));
+
+ spi_devs[spi_next_dev++] = new_dev;
+}
+
+static void __init intel_scu_i2c_device_register(int bus,
+ struct i2c_board_info *idev)
+{
+ struct i2c_board_info *new_dev;
+
+ if (i2c_next_dev == MAX_SCU_I2C) {
+ pr_err("too many SCU I2C devices");
+ return;
+ }
+
+ new_dev = kzalloc(sizeof(*idev), GFP_KERNEL);
+ if (!new_dev) {
+ pr_err("failed to alloc mem for delayed i2c dev %s\n",
+ idev->type);
+ return;
+ }
+ memcpy(new_dev, idev, sizeof(*idev));
+
+ i2c_bus[i2c_next_dev] = bus;
+ i2c_devs[i2c_next_dev++] = new_dev;
+}
+
+/* Called by IPC driver */
+void intel_scu_devices_create(void)
+{
+ int i;
+
+ for (i = 0; i < ipc_next_dev; i++)
+ platform_device_add(ipc_devs[i]);
+
+ for (i = 0; i < spi_next_dev; i++)
+ spi_register_board_info(spi_devs[i], 1);
+
+ for (i = 0; i < i2c_next_dev; i++) {
+ struct i2c_adapter *adapter;
+ struct i2c_client *client;
+
+ adapter = i2c_get_adapter(i2c_bus[i]);
+ if (adapter) {
+ client = i2c_new_device(adapter, i2c_devs[i]);
+ if (!client)
+ pr_err("can't create i2c device %s\n",
+ i2c_devs[i]->type);
+ } else
+ i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1);
+ }
+ intel_scu_notifier_post(SCU_AVAILABLE, NULL);
+}
+EXPORT_SYMBOL_GPL(intel_scu_devices_create);
+
+/* Called by IPC driver */
+void intel_scu_devices_destroy(void)
+{
+ int i;
+
+ intel_scu_notifier_post(SCU_DOWN, NULL);
+
+ for (i = 0; i < ipc_next_dev; i++)
+ platform_device_del(ipc_devs[i]);
+}
+EXPORT_SYMBOL_GPL(intel_scu_devices_destroy);
+
+static void __init install_irq_resource(struct platform_device *pdev, int irq)
+{
+ /* Single threaded */
+ static struct resource res __initdata = {
+ .name = "IRQ",
+ .flags = IORESOURCE_IRQ,
+ };
+ res.start = irq;
+ platform_device_add_resources(pdev, &res, 1);
+}
+
+static void __init sfi_handle_ipc_dev(struct sfi_device_table_entry *pentry,
+ struct devs_id *dev)
+{
+ struct platform_device *pdev;
+ void *pdata = NULL;
+
+ pr_debug("IPC bus, name = %16.16s, irq = 0x%2x\n",
+ pentry->name, pentry->irq);
+ pdata = intel_mid_sfi_get_pdata(dev, pentry);
+
+ pdev = platform_device_alloc(pentry->name, 0);
+ if (pdev == NULL) {
+ pr_err("out of memory for SFI platform device '%s'.\n",
+ pentry->name);
+ return;
+ }
+ install_irq_resource(pdev, pentry->irq);
+
+ pdev->dev.platform_data = pdata;
+ platform_device_add(pdev);
+}
+
+static void __init sfi_handle_spi_dev(struct sfi_device_table_entry *pentry,
+ struct devs_id *dev)
+{
+ struct spi_board_info spi_info;
+ void *pdata = NULL;
+
+ memset(&spi_info, 0, sizeof(spi_info));
+ strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN);
+ spi_info.irq = ((pentry->irq == (u8)0xff) ? 0 : pentry->irq);
+ spi_info.bus_num = pentry->host_num;
+ spi_info.chip_select = pentry->addr;
+ spi_info.max_speed_hz = pentry->max_freq;
+ pr_debug("SPI bus=%d, name=%16.16s, irq=0x%2x, max_freq=%d, cs=%d\n",
+ spi_info.bus_num,
+ spi_info.modalias,
+ spi_info.irq,
+ spi_info.max_speed_hz,
+ spi_info.chip_select);
+
+ pdata = intel_mid_sfi_get_pdata(dev, &spi_info);
+
+ spi_info.platform_data = pdata;
+ if (dev->delay)
+ intel_scu_spi_device_register(&spi_info);
+ else
+ spi_register_board_info(&spi_info, 1);
+}
+
+static void __init sfi_handle_i2c_dev(struct sfi_device_table_entry *pentry,
+ struct devs_id *dev)
+{
+ struct i2c_board_info i2c_info;
+ void *pdata = NULL;
+
+ memset(&i2c_info, 0, sizeof(i2c_info));
+ strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN);
+ i2c_info.irq = ((pentry->irq == (u8)0xff) ? 0 : pentry->irq);
+ i2c_info.addr = pentry->addr;
+ pr_debug("I2C bus = %d, name = %16.16s, irq = 0x%2x, addr = 0x%x\n",
+ pentry->host_num,
+ i2c_info.type,
+ i2c_info.irq,
+ i2c_info.addr);
+ pdata = intel_mid_sfi_get_pdata(dev, &i2c_info);
+ i2c_info.platform_data = pdata;
+
+ if (dev->delay)
+ intel_scu_i2c_device_register(pentry->host_num, &i2c_info);
+ else
+ i2c_register_board_info(pentry->host_num, &i2c_info, 1);
+}
+
+extern struct devs_id *const __x86_intel_mid_dev_start[],
+ *const __x86_intel_mid_dev_end[];
+
+static struct devs_id __init *get_device_id(u8 type, char *name)
+{
+ struct devs_id *const *dev_table;
+
+ for (dev_table = __x86_intel_mid_dev_start;
+ dev_table < __x86_intel_mid_dev_end; dev_table++) {
+ struct devs_id *dev = *dev_table;
+ if (dev->type == type &&
+ !strncmp(dev->name, name, SFI_NAME_LEN)) {
+ return dev;
+ }
+ }
+
+ return NULL;
+}
+
+static int __init sfi_parse_devs(struct sfi_table_header *table)
+{
+ struct sfi_table_simple *sb;
+ struct sfi_device_table_entry *pentry;
+ struct devs_id *dev = NULL;
+ int num, i;
+ int ioapic;
+ struct io_apic_irq_attr irq_attr;
+
+ sb = (struct sfi_table_simple *)table;
+ num = SFI_GET_NUM_ENTRIES(sb, struct sfi_device_table_entry);
+ pentry = (struct sfi_device_table_entry *)sb->pentry;
+
+ for (i = 0; i < num; i++, pentry++) {
+ int irq = pentry->irq;
+
+ if (irq != (u8)0xff) { /* native RTE case */
+ /* these SPI2 devices are not exposed to system as PCI
+ * devices, but they have separate RTE entry in IOAPIC
+ * so we have to enable them one by one here
+ */
+ ioapic = mp_find_ioapic(irq);
+ irq_attr.ioapic = ioapic;
+ irq_attr.ioapic_pin = irq;
+ irq_attr.trigger = 1;
+ irq_attr.polarity = 1;
+ io_apic_set_pci_routing(NULL, irq, &irq_attr);
+ } else
+ irq = 0; /* No irq */
+
+ dev = get_device_id(pentry->type, pentry->name);
+
+ if (!dev)
+ continue;
+
+ if (dev->device_handler) {
+ dev->device_handler(pentry, dev);
+ } else {
+ switch (pentry->type) {
+ case SFI_DEV_TYPE_IPC:
+ sfi_handle_ipc_dev(pentry, dev);
+ break;
+ case SFI_DEV_TYPE_SPI:
+ sfi_handle_spi_dev(pentry, dev);
+ break;
+ case SFI_DEV_TYPE_I2C:
+ sfi_handle_i2c_dev(pentry, dev);
+ break;
+ case SFI_DEV_TYPE_UART:
+ case SFI_DEV_TYPE_HSI:
+ default:
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+static int __init intel_mid_platform_init(void)
+{
+ sfi_table_parse(SFI_SIG_GPIO, NULL, NULL, sfi_parse_gpio);
+ sfi_table_parse(SFI_SIG_DEVS, NULL, NULL, sfi_parse_devs);
+ return 0;
+}
+arch_initcall(intel_mid_platform_init);
diff --git a/arch/x86/platform/mrst/Makefile b/arch/x86/platform/mrst/Makefile
deleted file mode 100644
index af1da7e623f9..000000000000
--- a/arch/x86/platform/mrst/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_X86_INTEL_MID) += mrst.o
-obj-$(CONFIG_X86_INTEL_MID) += vrtc.o
-obj-$(CONFIG_EARLY_PRINTK_INTEL_MID) += early_printk_mrst.o
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
deleted file mode 100644
index 3ca5957b7a34..000000000000
--- a/arch/x86/platform/mrst/mrst.c
+++ /dev/null
@@ -1,1052 +0,0 @@
-/*
- * mrst.c: Intel Moorestown platform specific setup code
- *
- * (C) Copyright 2008 Intel Corporation
- * Author: Jacob Pan (jacob.jun.pan@intel.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#define pr_fmt(fmt) "mrst: " fmt
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/scatterlist.h>
-#include <linux/sfi.h>
-#include <linux/intel_pmic_gpio.h>
-#include <linux/spi/spi.h>
-#include <linux/i2c.h>
-#include <linux/platform_data/pca953x.h>
-#include <linux/gpio_keys.h>
-#include <linux/input.h>
-#include <linux/platform_device.h>
-#include <linux/irq.h>
-#include <linux/module.h>
-#include <linux/notifier.h>
-#include <linux/mfd/intel_msic.h>
-#include <linux/gpio.h>
-#include <linux/i2c/tc35876x.h>
-
-#include <asm/setup.h>
-#include <asm/mpspec_def.h>
-#include <asm/hw_irq.h>
-#include <asm/apic.h>
-#include <asm/io_apic.h>
-#include <asm/mrst.h>
-#include <asm/mrst-vrtc.h>
-#include <asm/io.h>
-#include <asm/i8259.h>
-#include <asm/intel_scu_ipc.h>
-#include <asm/apb_timer.h>
-#include <asm/reboot.h>
-
-/*
- * the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock,
- * cmdline option x86_mrst_timer can be used to override the configuration
- * to prefer one or the other.
- * at runtime, there are basically three timer configurations:
- * 1. per cpu apbt clock only
- * 2. per cpu always-on lapic clocks only, this is Penwell/Medfield only
- * 3. per cpu lapic clock (C3STOP) and one apbt clock, with broadcast.
- *
- * by default (without cmdline option), platform code first detects cpu type
- * to see if we are on lincroft or penwell, then set up both lapic or apbt
- * clocks accordingly.
- * i.e. by default, medfield uses configuration #2, moorestown uses #1.
- * config #3 is supported but not recommended on medfield.
- *
- * rating and feature summary:
- * lapic (with C3STOP) --------- 100
- * apbt (always-on) ------------ 110
- * lapic (always-on,ARAT) ------ 150
- */
-
-enum mrst_timer_options mrst_timer_options;
-
-static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM];
-static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM];
-enum mrst_cpu_type __mrst_cpu_chip;
-EXPORT_SYMBOL_GPL(__mrst_cpu_chip);
-
-int sfi_mtimer_num;
-
-struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
-EXPORT_SYMBOL_GPL(sfi_mrtc_array);
-int sfi_mrtc_num;
-
-static void mrst_power_off(void)
-{
-}
-
-static void mrst_reboot(void)
-{
- intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
-}
-
-/* parse all the mtimer info to a static mtimer array */
-static int __init sfi_parse_mtmr(struct sfi_table_header *table)
-{
- struct sfi_table_simple *sb;
- struct sfi_timer_table_entry *pentry;
- struct mpc_intsrc mp_irq;
- int totallen;
-
- sb = (struct sfi_table_simple *)table;
- if (!sfi_mtimer_num) {
- sfi_mtimer_num = SFI_GET_NUM_ENTRIES(sb,
- struct sfi_timer_table_entry);
- pentry = (struct sfi_timer_table_entry *) sb->pentry;
- totallen = sfi_mtimer_num * sizeof(*pentry);
- memcpy(sfi_mtimer_array, pentry, totallen);
- }
-
- pr_debug("SFI MTIMER info (num = %d):\n", sfi_mtimer_num);
- pentry = sfi_mtimer_array;
- for (totallen = 0; totallen < sfi_mtimer_num; totallen++, pentry++) {
- pr_debug("timer[%d]: paddr = 0x%08x, freq = %dHz,"
- " irq = %d\n", totallen, (u32)pentry->phys_addr,
- pentry->freq_hz, pentry->irq);
- if (!pentry->irq)
- continue;
- mp_irq.type = MP_INTSRC;
- mp_irq.irqtype = mp_INT;
-/* triggering mode edge bit 2-3, active high polarity bit 0-1 */
- mp_irq.irqflag = 5;
- mp_irq.srcbus = MP_BUS_ISA;
- mp_irq.srcbusirq = pentry->irq; /* IRQ */
- mp_irq.dstapic = MP_APIC_ALL;
- mp_irq.dstirq = pentry->irq;
- mp_save_irq(&mp_irq);
- }
-
- return 0;
-}
-
-struct sfi_timer_table_entry *sfi_get_mtmr(int hint)
-{
- int i;
- if (hint < sfi_mtimer_num) {
- if (!sfi_mtimer_usage[hint]) {
- pr_debug("hint taken for timer %d irq %d\n",\
- hint, sfi_mtimer_array[hint].irq);
- sfi_mtimer_usage[hint] = 1;
- return &sfi_mtimer_array[hint];
- }
- }
- /* take the first timer available */
- for (i = 0; i < sfi_mtimer_num;) {
- if (!sfi_mtimer_usage[i]) {
- sfi_mtimer_usage[i] = 1;
- return &sfi_mtimer_array[i];
- }
- i++;
- }
- return NULL;
-}
-
-void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr)
-{
- int i;
- for (i = 0; i < sfi_mtimer_num;) {
- if (mtmr->irq == sfi_mtimer_array[i].irq) {
- sfi_mtimer_usage[i] = 0;
- return;
- }
- i++;
- }
-}
-
-/* parse all the mrtc info to a global mrtc array */
-int __init sfi_parse_mrtc(struct sfi_table_header *table)
-{
- struct sfi_table_simple *sb;
- struct sfi_rtc_table_entry *pentry;
- struct mpc_intsrc mp_irq;
-
- int totallen;
-
- sb = (struct sfi_table_simple *)table;
- if (!sfi_mrtc_num) {
- sfi_mrtc_num = SFI_GET_NUM_ENTRIES(sb,
- struct sfi_rtc_table_entry);
- pentry = (struct sfi_rtc_table_entry *)sb->pentry;
- totallen = sfi_mrtc_num * sizeof(*pentry);
- memcpy(sfi_mrtc_array, pentry, totallen);
- }
-
- pr_debug("SFI RTC info (num = %d):\n", sfi_mrtc_num);
- pentry = sfi_mrtc_array;
- for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) {
- pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n",
- totallen, (u32)pentry->phys_addr, pentry->irq);
- mp_irq.type = MP_INTSRC;
- mp_irq.irqtype = mp_INT;
- mp_irq.irqflag = 0xf; /* level trigger and active low */
- mp_irq.srcbus = MP_BUS_ISA;
- mp_irq.srcbusirq = pentry->irq; /* IRQ */
- mp_irq.dstapic = MP_APIC_ALL;
- mp_irq.dstirq = pentry->irq;
- mp_save_irq(&mp_irq);
- }
- return 0;
-}
-
-static unsigned long __init mrst_calibrate_tsc(void)
-{
- unsigned long fast_calibrate;
- u32 lo, hi, ratio, fsb;
-
- rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
- pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi);
- ratio = (hi >> 8) & 0x1f;
- pr_debug("ratio is %d\n", ratio);
- if (!ratio) {
- pr_err("read a zero ratio, should be incorrect!\n");
- pr_err("force tsc ratio to 16 ...\n");
- ratio = 16;
- }
- rdmsr(MSR_FSB_FREQ, lo, hi);
- if ((lo & 0x7) == 0x7)
- fsb = PENWELL_FSB_FREQ_83SKU;
- else
- fsb = PENWELL_FSB_FREQ_100SKU;
- fast_calibrate = ratio * fsb;
- pr_debug("read penwell tsc %lu khz\n", fast_calibrate);
- lapic_timer_frequency = fsb * 1000 / HZ;
- /* mark tsc clocksource as reliable */
- set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
-
- if (fast_calibrate)
- return fast_calibrate;
-
- return 0;
-}
-
-static void __init mrst_time_init(void)
-{
- sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr);
- switch (mrst_timer_options) {
- case MRST_TIMER_APBT_ONLY:
- break;
- case MRST_TIMER_LAPIC_APBT:
- x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
- x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
- break;
- default:
- if (!boot_cpu_has(X86_FEATURE_ARAT))
- break;
- x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
- x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
- return;
- }
- /* we need at least one APB timer */
- pre_init_apic_IRQ0();
- apbt_time_init();
-}
-
-static void mrst_arch_setup(void)
-{
- if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27)
- __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL;
- else {
- pr_err("Unknown Intel MID CPU (%d:%d), default to Penwell\n",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
- __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL;
- }
-}
-
-/* MID systems don't have i8042 controller */
-static int mrst_i8042_detect(void)
-{
- return 0;
-}
-
-/*
- * Moorestown does not have external NMI source nor port 0x61 to report
- * NMI status. The possible NMI sources are from pmu as a result of NMI
- * watchdog or lock debug. Reading io port 0x61 results in 0xff which
- * misled NMI handler.
- */
-static unsigned char mrst_get_nmi_reason(void)
-{
- return 0;
-}
-
-/*
- * Moorestown specific x86_init function overrides and early setup
- * calls.
- */
-void __init x86_mrst_early_setup(void)
-{
- x86_init.resources.probe_roms = x86_init_noop;
- x86_init.resources.reserve_resources = x86_init_noop;
-
- x86_init.timers.timer_init = mrst_time_init;
- x86_init.timers.setup_percpu_clockev = x86_init_noop;
-
- x86_init.irqs.pre_vector_init = x86_init_noop;
-
- x86_init.oem.arch_setup = mrst_arch_setup;
-
- x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
-
- x86_platform.calibrate_tsc = mrst_calibrate_tsc;
- x86_platform.i8042_detect = mrst_i8042_detect;
- x86_init.timers.wallclock_init = mrst_rtc_init;
- x86_platform.get_nmi_reason = mrst_get_nmi_reason;
-
- x86_init.pci.init = pci_mrst_init;
- x86_init.pci.fixup_irqs = x86_init_noop;
-
- legacy_pic = &null_legacy_pic;
-
- /* Moorestown specific power_off/restart method */
- pm_power_off = mrst_power_off;
- machine_ops.emergency_restart = mrst_reboot;
-
- /* Avoid searching for BIOS MP tables */
- x86_init.mpparse.find_smp_config = x86_init_noop;
- x86_init.mpparse.get_smp_config = x86_init_uint_noop;
- set_bit(MP_BUS_ISA, mp_bus_not_pci);
-}
-
-/*
- * if user does not want to use per CPU apb timer, just give it a lower rating
- * than local apic timer and skip the late per cpu timer init.
- */
-static inline int __init setup_x86_mrst_timer(char *arg)
-{
- if (!arg)
- return -EINVAL;
-
- if (strcmp("apbt_only", arg) == 0)
- mrst_timer_options = MRST_TIMER_APBT_ONLY;
- else if (strcmp("lapic_and_apbt", arg) == 0)
- mrst_timer_options = MRST_TIMER_LAPIC_APBT;
- else {
- pr_warning("X86 MRST timer option %s not recognised"
- " use x86_mrst_timer=apbt_only or lapic_and_apbt\n",
- arg);
- return -EINVAL;
- }
- return 0;
-}
-__setup("x86_mrst_timer=", setup_x86_mrst_timer);
-
-/*
- * Parsing GPIO table first, since the DEVS table will need this table
- * to map the pin name to the actual pin.
- */
-static struct sfi_gpio_table_entry *gpio_table;
-static int gpio_num_entry;
-
-static int __init sfi_parse_gpio(struct sfi_table_header *table)
-{
- struct sfi_table_simple *sb;
- struct sfi_gpio_table_entry *pentry;
- int num, i;
-
- if (gpio_table)
- return 0;
- sb = (struct sfi_table_simple *)table;
- num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry);
- pentry = (struct sfi_gpio_table_entry *)sb->pentry;
-
- gpio_table = kmalloc(num * sizeof(*pentry), GFP_KERNEL);
- if (!gpio_table)
- return -1;
- memcpy(gpio_table, pentry, num * sizeof(*pentry));
- gpio_num_entry = num;
-
- pr_debug("GPIO pin info:\n");
- for (i = 0; i < num; i++, pentry++)
- pr_debug("info[%2d]: controller = %16.16s, pin_name = %16.16s,"
- " pin = %d\n", i,
- pentry->controller_name,
- pentry->pin_name,
- pentry->pin_no);
- return 0;
-}
-
-static int get_gpio_by_name(const char *name)
-{
- struct sfi_gpio_table_entry *pentry = gpio_table;
- int i;
-
- if (!pentry)
- return -1;
- for (i = 0; i < gpio_num_entry; i++, pentry++) {
- if (!strncmp(name, pentry->pin_name, SFI_NAME_LEN))
- return pentry->pin_no;
- }
- return -1;
-}
-
-/*
- * Here defines the array of devices platform data that IAFW would export
- * through SFI "DEVS" table, we use name and type to match the device and
- * its platform data.
- */
-struct devs_id {
- char name[SFI_NAME_LEN + 1];
- u8 type;
- u8 delay;
- void *(*get_platform_data)(void *info);
-};
-
-/* the offset for the mapping of global gpio pin to irq */
-#define MRST_IRQ_OFFSET 0x100
-
-static void __init *pmic_gpio_platform_data(void *info)
-{
- static struct intel_pmic_gpio_platform_data pmic_gpio_pdata;
- int gpio_base = get_gpio_by_name("pmic_gpio_base");
-
- if (gpio_base == -1)
- gpio_base = 64;
- pmic_gpio_pdata.gpio_base = gpio_base;
- pmic_gpio_pdata.irq_base = gpio_base + MRST_IRQ_OFFSET;
- pmic_gpio_pdata.gpiointr = 0xffffeff8;
-
- return &pmic_gpio_pdata;
-}
-
-static void __init *max3111_platform_data(void *info)
-{
- struct spi_board_info *spi_info = info;
- int intr = get_gpio_by_name("max3111_int");
-
- spi_info->mode = SPI_MODE_0;
- if (intr == -1)
- return NULL;
- spi_info->irq = intr + MRST_IRQ_OFFSET;
- return NULL;
-}
-
-/* we have multiple max7315 on the board ... */
-#define MAX7315_NUM 2
-static void __init *max7315_platform_data(void *info)
-{
- static struct pca953x_platform_data max7315_pdata[MAX7315_NUM];
- static int nr;
- struct pca953x_platform_data *max7315 = &max7315_pdata[nr];
- struct i2c_board_info *i2c_info = info;
- int gpio_base, intr;
- char base_pin_name[SFI_NAME_LEN + 1];
- char intr_pin_name[SFI_NAME_LEN + 1];
-
- if (nr == MAX7315_NUM) {
- pr_err("too many max7315s, we only support %d\n",
- MAX7315_NUM);
- return NULL;
- }
- /* we have several max7315 on the board, we only need load several
- * instances of the same pca953x driver to cover them
- */
- strcpy(i2c_info->type, "max7315");
- if (nr++) {
- sprintf(base_pin_name, "max7315_%d_base", nr);
- sprintf(intr_pin_name, "max7315_%d_int", nr);
- } else {
- strcpy(base_pin_name, "max7315_base");
- strcpy(intr_pin_name, "max7315_int");
- }
-
- gpio_base = get_gpio_by_name(base_pin_name);
- intr = get_gpio_by_name(intr_pin_name);
-
- if (gpio_base == -1)
- return NULL;
- max7315->gpio_base = gpio_base;
- if (intr != -1) {
- i2c_info->irq = intr + MRST_IRQ_OFFSET;
- max7315->irq_base = gpio_base + MRST_IRQ_OFFSET;
- } else {
- i2c_info->irq = -1;
- max7315->irq_base = -1;
- }
- return max7315;
-}
-
-static void *tca6416_platform_data(void *info)
-{
- static struct pca953x_platform_data tca6416;
- struct i2c_board_info *i2c_info = info;
- int gpio_base, intr;
- char base_pin_name[SFI_NAME_LEN + 1];
- char intr_pin_name[SFI_NAME_LEN + 1];
-
- strcpy(i2c_info->type, "tca6416");
- strcpy(base_pin_name, "tca6416_base");
- strcpy(intr_pin_name, "tca6416_int");
-
- gpio_base = get_gpio_by_name(base_pin_name);
- intr = get_gpio_by_name(intr_pin_name);
-
- if (gpio_base == -1)
- return NULL;
- tca6416.gpio_base = gpio_base;
- if (intr != -1) {
- i2c_info->irq = intr + MRST_IRQ_OFFSET;
- tca6416.irq_base = gpio_base + MRST_IRQ_OFFSET;
- } else {
- i2c_info->irq = -1;
- tca6416.irq_base = -1;
- }
- return &tca6416;
-}
-
-static void *mpu3050_platform_data(void *info)
-{
- struct i2c_board_info *i2c_info = info;
- int intr = get_gpio_by_name("mpu3050_int");
-
- if (intr == -1)
- return NULL;
-
- i2c_info->irq = intr + MRST_IRQ_OFFSET;
- return NULL;
-}
-
-static void __init *emc1403_platform_data(void *info)
-{
- static short intr2nd_pdata;
- struct i2c_board_info *i2c_info = info;
- int intr = get_gpio_by_name("thermal_int");
- int intr2nd = get_gpio_by_name("thermal_alert");
-
- if (intr == -1 || intr2nd == -1)
- return NULL;
-
- i2c_info->irq = intr + MRST_IRQ_OFFSET;
- intr2nd_pdata = intr2nd + MRST_IRQ_OFFSET;
-
- return &intr2nd_pdata;
-}
-
-static void __init *lis331dl_platform_data(void *info)
-{
- static short intr2nd_pdata;
- struct i2c_board_info *i2c_info = info;
- int intr = get_gpio_by_name("accel_int");
- int intr2nd = get_gpio_by_name("accel_2");
-
- if (intr == -1 || intr2nd == -1)
- return NULL;
-
- i2c_info->irq = intr + MRST_IRQ_OFFSET;
- intr2nd_pdata = intr2nd + MRST_IRQ_OFFSET;
-
- return &intr2nd_pdata;
-}
-
-static void __init *no_platform_data(void *info)
-{
- return NULL;
-}
-
-static struct resource msic_resources[] = {
- {
- .start = INTEL_MSIC_IRQ_PHYS_BASE,
- .end = INTEL_MSIC_IRQ_PHYS_BASE + 64 - 1,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct intel_msic_platform_data msic_pdata;
-
-static struct platform_device msic_device = {
- .name = "intel_msic",
- .id = -1,
- .dev = {
- .platform_data = &msic_pdata,
- },
- .num_resources = ARRAY_SIZE(msic_resources),
- .resource = msic_resources,
-};
-
-static inline bool mrst_has_msic(void)
-{
- return mrst_identify_cpu() == MRST_CPU_CHIP_PENWELL;
-}
-
-static int msic_scu_status_change(struct notifier_block *nb,
- unsigned long code, void *data)
-{
- if (code == SCU_DOWN) {
- platform_device_unregister(&msic_device);
- return 0;
- }
-
- return platform_device_register(&msic_device);
-}
-
-static int __init msic_init(void)
-{
- static struct notifier_block msic_scu_notifier = {
- .notifier_call = msic_scu_status_change,
- };
-
- /*
- * We need to be sure that the SCU IPC is ready before MSIC device
- * can be registered.
- */
- if (mrst_has_msic())
- intel_scu_notifier_add(&msic_scu_notifier);
-
- return 0;
-}
-arch_initcall(msic_init);
-
-/*
- * msic_generic_platform_data - sets generic platform data for the block
- * @info: pointer to the SFI device table entry for this block
- * @block: MSIC block
- *
- * Function sets IRQ number from the SFI table entry for given device to
- * the MSIC platform data.
- */
-static void *msic_generic_platform_data(void *info, enum intel_msic_block block)
-{
- struct sfi_device_table_entry *entry = info;
-
- BUG_ON(block < 0 || block >= INTEL_MSIC_BLOCK_LAST);
- msic_pdata.irq[block] = entry->irq;
-
- return no_platform_data(info);
-}
-
-static void *msic_battery_platform_data(void *info)
-{
- return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_BATTERY);
-}
-
-static void *msic_gpio_platform_data(void *info)
-{
- static struct intel_msic_gpio_pdata pdata;
- int gpio = get_gpio_by_name("msic_gpio_base");
-
- if (gpio < 0)
- return NULL;
-
- pdata.gpio_base = gpio;
- msic_pdata.gpio = &pdata;
-
- return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_GPIO);
-}
-
-static void *msic_audio_platform_data(void *info)
-{
- struct platform_device *pdev;
-
- pdev = platform_device_register_simple("sst-platform", -1, NULL, 0);
- if (IS_ERR(pdev)) {
- pr_err("failed to create audio platform device\n");
- return NULL;
- }
-
- return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_AUDIO);
-}
-
-static void *msic_power_btn_platform_data(void *info)
-{
- return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_POWER_BTN);
-}
-
-static void *msic_ocd_platform_data(void *info)
-{
- static struct intel_msic_ocd_pdata pdata;
- int gpio = get_gpio_by_name("ocd_gpio");
-
- if (gpio < 0)
- return NULL;
-
- pdata.gpio = gpio;
- msic_pdata.ocd = &pdata;
-
- return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_OCD);
-}
-
-static void *msic_thermal_platform_data(void *info)
-{
- return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_THERMAL);
-}
-
-/* tc35876x DSI-LVDS bridge chip and panel platform data */
-static void *tc35876x_platform_data(void *data)
-{
- static struct tc35876x_platform_data pdata;
-
- /* gpio pins set to -1 will not be used by the driver */
- pdata.gpio_bridge_reset = get_gpio_by_name("LCMB_RXEN");
- pdata.gpio_panel_bl_en = get_gpio_by_name("6S6P_BL_EN");
- pdata.gpio_panel_vadd = get_gpio_by_name("EN_VREG_LCD_V3P3");
-
- return &pdata;
-}
-
-static const struct devs_id __initconst device_ids[] = {
- {"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data},
- {"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data},
- {"pmic_gpio", SFI_DEV_TYPE_IPC, 1, &pmic_gpio_platform_data},
- {"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data},
- {"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
- {"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
- {"tca6416", SFI_DEV_TYPE_I2C, 1, &tca6416_platform_data},
- {"emc1403", SFI_DEV_TYPE_I2C, 1, &emc1403_platform_data},
- {"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data},
- {"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data},
- {"mpu3050", SFI_DEV_TYPE_I2C, 1, &mpu3050_platform_data},
- {"i2c_disp_brig", SFI_DEV_TYPE_I2C, 0, &tc35876x_platform_data},
-
- /* MSIC subdevices */
- {"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data},
- {"msic_gpio", SFI_DEV_TYPE_IPC, 1, &msic_gpio_platform_data},
- {"msic_audio", SFI_DEV_TYPE_IPC, 1, &msic_audio_platform_data},
- {"msic_power_btn", SFI_DEV_TYPE_IPC, 1, &msic_power_btn_platform_data},
- {"msic_ocd", SFI_DEV_TYPE_IPC, 1, &msic_ocd_platform_data},
- {"msic_thermal", SFI_DEV_TYPE_IPC, 1, &msic_thermal_platform_data},
-
- {},
-};
-
-#define MAX_IPCDEVS 24
-static struct platform_device *ipc_devs[MAX_IPCDEVS];
-static int ipc_next_dev;
-
-#define MAX_SCU_SPI 24
-static struct spi_board_info *spi_devs[MAX_SCU_SPI];
-static int spi_next_dev;
-
-#define MAX_SCU_I2C 24
-static struct i2c_board_info *i2c_devs[MAX_SCU_I2C];
-static int i2c_bus[MAX_SCU_I2C];
-static int i2c_next_dev;
-
-static void __init intel_scu_device_register(struct platform_device *pdev)
-{
- if(ipc_next_dev == MAX_IPCDEVS)
- pr_err("too many SCU IPC devices");
- else
- ipc_devs[ipc_next_dev++] = pdev;
-}
-
-static void __init intel_scu_spi_device_register(struct spi_board_info *sdev)
-{
- struct spi_board_info *new_dev;
-
- if (spi_next_dev == MAX_SCU_SPI) {
- pr_err("too many SCU SPI devices");
- return;
- }
-
- new_dev = kzalloc(sizeof(*sdev), GFP_KERNEL);
- if (!new_dev) {
- pr_err("failed to alloc mem for delayed spi dev %s\n",
- sdev->modalias);
- return;
- }
- memcpy(new_dev, sdev, sizeof(*sdev));
-
- spi_devs[spi_next_dev++] = new_dev;
-}
-
-static void __init intel_scu_i2c_device_register(int bus,
- struct i2c_board_info *idev)
-{
- struct i2c_board_info *new_dev;
-
- if (i2c_next_dev == MAX_SCU_I2C) {
- pr_err("too many SCU I2C devices");
- return;
- }
-
- new_dev = kzalloc(sizeof(*idev), GFP_KERNEL);
- if (!new_dev) {
- pr_err("failed to alloc mem for delayed i2c dev %s\n",
- idev->type);
- return;
- }
- memcpy(new_dev, idev, sizeof(*idev));
-
- i2c_bus[i2c_next_dev] = bus;
- i2c_devs[i2c_next_dev++] = new_dev;
-}
-
-BLOCKING_NOTIFIER_HEAD(intel_scu_notifier);
-EXPORT_SYMBOL_GPL(intel_scu_notifier);
-
-/* Called by IPC driver */
-void intel_scu_devices_create(void)
-{
- int i;
-
- for (i = 0; i < ipc_next_dev; i++)
- platform_device_add(ipc_devs[i]);
-
- for (i = 0; i < spi_next_dev; i++)
- spi_register_board_info(spi_devs[i], 1);
-
- for (i = 0; i < i2c_next_dev; i++) {
- struct i2c_adapter *adapter;
- struct i2c_client *client;
-
- adapter = i2c_get_adapter(i2c_bus[i]);
- if (adapter) {
- client = i2c_new_device(adapter, i2c_devs[i]);
- if (!client)
- pr_err("can't create i2c device %s\n",
- i2c_devs[i]->type);
- } else
- i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1);
- }
- intel_scu_notifier_post(SCU_AVAILABLE, NULL);
-}
-EXPORT_SYMBOL_GPL(intel_scu_devices_create);
-
-/* Called by IPC driver */
-void intel_scu_devices_destroy(void)
-{
- int i;
-
- intel_scu_notifier_post(SCU_DOWN, NULL);
-
- for (i = 0; i < ipc_next_dev; i++)
- platform_device_del(ipc_devs[i]);
-}
-EXPORT_SYMBOL_GPL(intel_scu_devices_destroy);
-
-static void __init install_irq_resource(struct platform_device *pdev, int irq)
-{
- /* Single threaded */
- static struct resource __initdata res = {
- .name = "IRQ",
- .flags = IORESOURCE_IRQ,
- };
- res.start = irq;
- platform_device_add_resources(pdev, &res, 1);
-}
-
-static void __init sfi_handle_ipc_dev(struct sfi_device_table_entry *entry)
-{
- const struct devs_id *dev = device_ids;
- struct platform_device *pdev;
- void *pdata = NULL;
-
- while (dev->name[0]) {
- if (dev->type == SFI_DEV_TYPE_IPC &&
- !strncmp(dev->name, entry->name, SFI_NAME_LEN)) {
- pdata = dev->get_platform_data(entry);
- break;
- }
- dev++;
- }
-
- /*
- * On Medfield the platform device creation is handled by the MSIC
- * MFD driver so we don't need to do it here.
- */
- if (mrst_has_msic())
- return;
-
- pdev = platform_device_alloc(entry->name, 0);
- if (pdev == NULL) {
- pr_err("out of memory for SFI platform device '%s'.\n",
- entry->name);
- return;
- }
- install_irq_resource(pdev, entry->irq);
-
- pdev->dev.platform_data = pdata;
- intel_scu_device_register(pdev);
-}
-
-static void __init sfi_handle_spi_dev(struct spi_board_info *spi_info)
-{
- const struct devs_id *dev = device_ids;
- void *pdata = NULL;
-
- while (dev->name[0]) {
- if (dev->type == SFI_DEV_TYPE_SPI &&
- !strncmp(dev->name, spi_info->modalias, SFI_NAME_LEN)) {
- pdata = dev->get_platform_data(spi_info);
- break;
- }
- dev++;
- }
- spi_info->platform_data = pdata;
- if (dev->delay)
- intel_scu_spi_device_register(spi_info);
- else
- spi_register_board_info(spi_info, 1);
-}
-
-static void __init sfi_handle_i2c_dev(int bus, struct i2c_board_info *i2c_info)
-{
- const struct devs_id *dev = device_ids;
- void *pdata = NULL;
-
- while (dev->name[0]) {
- if (dev->type == SFI_DEV_TYPE_I2C &&
- !strncmp(dev->name, i2c_info->type, SFI_NAME_LEN)) {
- pdata = dev->get_platform_data(i2c_info);
- break;
- }
- dev++;
- }
- i2c_info->platform_data = pdata;
-
- if (dev->delay)
- intel_scu_i2c_device_register(bus, i2c_info);
- else
- i2c_register_board_info(bus, i2c_info, 1);
- }
-
-
-static int __init sfi_parse_devs(struct sfi_table_header *table)
-{
- struct sfi_table_simple *sb;
- struct sfi_device_table_entry *pentry;
- struct spi_board_info spi_info;
- struct i2c_board_info i2c_info;
- int num, i, bus;
- int ioapic;
- struct io_apic_irq_attr irq_attr;
-
- sb = (struct sfi_table_simple *)table;
- num = SFI_GET_NUM_ENTRIES(sb, struct sfi_device_table_entry);
- pentry = (struct sfi_device_table_entry *)sb->pentry;
-
- for (i = 0; i < num; i++, pentry++) {
- int irq = pentry->irq;
-
- if (irq != (u8)0xff) { /* native RTE case */
- /* these SPI2 devices are not exposed to system as PCI
- * devices, but they have separate RTE entry in IOAPIC
- * so we have to enable them one by one here
- */
- ioapic = mp_find_ioapic(irq);
- irq_attr.ioapic = ioapic;
- irq_attr.ioapic_pin = irq;
- irq_attr.trigger = 1;
- irq_attr.polarity = 1;
- io_apic_set_pci_routing(NULL, irq, &irq_attr);
- } else
- irq = 0; /* No irq */
-
- switch (pentry->type) {
- case SFI_DEV_TYPE_IPC:
- pr_debug("info[%2d]: IPC bus, name = %16.16s, "
- "irq = 0x%2x\n", i, pentry->name, pentry->irq);
- sfi_handle_ipc_dev(pentry);
- break;
- case SFI_DEV_TYPE_SPI:
- memset(&spi_info, 0, sizeof(spi_info));
- strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN);
- spi_info.irq = irq;
- spi_info.bus_num = pentry->host_num;
- spi_info.chip_select = pentry->addr;
- spi_info.max_speed_hz = pentry->max_freq;
- pr_debug("info[%2d]: SPI bus = %d, name = %16.16s, "
- "irq = 0x%2x, max_freq = %d, cs = %d\n", i,
- spi_info.bus_num,
- spi_info.modalias,
- spi_info.irq,
- spi_info.max_speed_hz,
- spi_info.chip_select);
- sfi_handle_spi_dev(&spi_info);
- break;
- case SFI_DEV_TYPE_I2C:
- memset(&i2c_info, 0, sizeof(i2c_info));
- bus = pentry->host_num;
- strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN);
- i2c_info.irq = irq;
- i2c_info.addr = pentry->addr;
- pr_debug("info[%2d]: I2C bus = %d, name = %16.16s, "
- "irq = 0x%2x, addr = 0x%x\n", i, bus,
- i2c_info.type,
- i2c_info.irq,
- i2c_info.addr);
- sfi_handle_i2c_dev(bus, &i2c_info);
- break;
- case SFI_DEV_TYPE_UART:
- case SFI_DEV_TYPE_HSI:
- default:
- ;
- }
- }
- return 0;
-}
-
-static int __init mrst_platform_init(void)
-{
- sfi_table_parse(SFI_SIG_GPIO, NULL, NULL, sfi_parse_gpio);
- sfi_table_parse(SFI_SIG_DEVS, NULL, NULL, sfi_parse_devs);
- return 0;
-}
-arch_initcall(mrst_platform_init);
-
-/*
- * we will search these buttons in SFI GPIO table (by name)
- * and register them dynamically. Please add all possible
- * buttons here, we will shrink them if no GPIO found.
- */
-static struct gpio_keys_button gpio_button[] = {
- {KEY_POWER, -1, 1, "power_btn", EV_KEY, 0, 3000},
- {KEY_PROG1, -1, 1, "prog_btn1", EV_KEY, 0, 20},
- {KEY_PROG2, -1, 1, "prog_btn2", EV_KEY, 0, 20},
- {SW_LID, -1, 1, "lid_switch", EV_SW, 0, 20},
- {KEY_VOLUMEUP, -1, 1, "vol_up", EV_KEY, 0, 20},
- {KEY_VOLUMEDOWN, -1, 1, "vol_down", EV_KEY, 0, 20},
- {KEY_CAMERA, -1, 1, "camera_full", EV_KEY, 0, 20},
- {KEY_CAMERA_FOCUS, -1, 1, "camera_half", EV_KEY, 0, 20},
- {SW_KEYPAD_SLIDE, -1, 1, "MagSw1", EV_SW, 0, 20},
- {SW_KEYPAD_SLIDE, -1, 1, "MagSw2", EV_SW, 0, 20},
-};
-
-static struct gpio_keys_platform_data mrst_gpio_keys = {
- .buttons = gpio_button,
- .rep = 1,
- .nbuttons = -1, /* will fill it after search */
-};
-
-static struct platform_device pb_device = {
- .name = "gpio-keys",
- .id = -1,
- .dev = {
- .platform_data = &mrst_gpio_keys,
- },
-};
-
-/*
- * Shrink the non-existent buttons, register the gpio button
- * device if there is some
- */
-static int __init pb_keys_init(void)
-{
- struct gpio_keys_button *gb = gpio_button;
- int i, num, good = 0;
-
- num = sizeof(gpio_button) / sizeof(struct gpio_keys_button);
- for (i = 0; i < num; i++) {
- gb[i].gpio = get_gpio_by_name(gb[i].desc);
- pr_debug("info[%2d]: name = %s, gpio = %d\n", i, gb[i].desc, gb[i].gpio);
- if (gb[i].gpio == -1)
- continue;
-
- if (i != good)
- gb[good] = gb[i];
- good++;
- }
-
- if (good) {
- mrst_gpio_keys.nbuttons = good;
- return platform_device_register(&pb_device);
- }
- return 0;
-}
-late_initcall(pb_keys_init);
diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c
index fef7d0ba7e3a..649a12befba9 100644
--- a/arch/x86/platform/olpc/olpc-xo15-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo15-sci.c
@@ -40,16 +40,9 @@ static bool lid_wake_on_close;
*/
static int set_lid_wake_behavior(bool wake_on_close)
{
- struct acpi_object_list arg_list;
- union acpi_object arg;
acpi_status status;
- arg_list.count = 1;
- arg_list.pointer = &arg;
- arg.type = ACPI_TYPE_INTEGER;
- arg.integer.value = wake_on_close;
-
- status = acpi_evaluate_object(NULL, "\\_SB.PCI0.LID.LIDW", &arg_list, NULL);
+ status = acpi_execute_simple_method(NULL, "\\_SB.PCI0.LID.LIDW", wake_on_close);
if (ACPI_FAILURE(status)) {
pr_warning(PFX "failed to set lid behavior\n");
return 1;
diff --git a/arch/x86/platform/uv/Makefile b/arch/x86/platform/uv/Makefile
index 6c40995fefb8..52079bebd014 100644
--- a/arch/x86/platform/uv/Makefile
+++ b/arch/x86/platform/uv/Makefile
@@ -1 +1 @@
-obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o
+obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o uv_nmi.o
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
new file mode 100644
index 000000000000..d3d02688f6f7
--- /dev/null
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -0,0 +1,711 @@
+/*
+ * SGI NMI/TRACE support routines
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (c) 2009-2013 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) Mike Travis
+ */
+
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/kdb.h>
+#include <linux/kexec.h>
+#include <linux/kgdb.h>
+#include <linux/module.h>
+#include <linux/nmi.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <asm/apic.h>
+#include <asm/current.h>
+#include <asm/kdebug.h>
+#include <asm/local64.h>
+#include <asm/nmi.h>
+#include <asm/traps.h>
+#include <asm/uv/uv.h>
+#include <asm/uv/uv_hub.h>
+#include <asm/uv/uv_mmrs.h>
+
+void (*uv_trace_func)(const char *f, const int l, const char *fmt, ...);
+EXPORT_SYMBOL(uv_trace_func);
+
+void (*uv_trace_nmi_func)(unsigned int reason, struct pt_regs *regs);
+EXPORT_SYMBOL(uv_trace_nmi_func);
+
+
+/*
+ * UV handler for NMI
+ *
+ * Handle system-wide NMI events generated by the global 'power nmi' command.
+ *
+ * Basic operation is to field the NMI interrupt on each cpu and wait
+ * until all cpus have arrived into the nmi handler. If some cpus do not
+ * make it into the handler, try and force them in with the IPI(NMI) signal.
+ *
+ * We also have to lessen UV Hub MMR accesses as much as possible as this
+ * disrupts the UV Hub's primary mission of directing NumaLink traffic and
+ * can cause system problems to occur.
+ *
+ * To do this we register our primary NMI notifier on the NMI_UNKNOWN
+ * chain. This reduces the number of false NMI calls when the perf
+ * tools are running which generate an enormous number of NMIs per
+ * second (~4M/s for 1024 cpu threads). Our secondary NMI handler is
+ * very short as it only checks that if it has been "pinged" with the
+ * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR.
+ *
+ */
+
+static struct uv_hub_nmi_s **uv_hub_nmi_list;
+
+DEFINE_PER_CPU(struct uv_cpu_nmi_s, __uv_cpu_nmi);
+EXPORT_PER_CPU_SYMBOL_GPL(__uv_cpu_nmi);
+
+static unsigned long nmi_mmr;
+static unsigned long nmi_mmr_clear;
+static unsigned long nmi_mmr_pending;
+
+static atomic_t uv_in_nmi;
+static atomic_t uv_nmi_cpu = ATOMIC_INIT(-1);
+static atomic_t uv_nmi_cpus_in_nmi = ATOMIC_INIT(-1);
+static atomic_t uv_nmi_slave_continue;
+static atomic_t uv_nmi_kexec_failed;
+static cpumask_var_t uv_nmi_cpu_mask;
+
+/* Values for uv_nmi_slave_continue */
+#define SLAVE_CLEAR 0
+#define SLAVE_CONTINUE 1
+#define SLAVE_EXIT 2
+
+/*
+ * Default is all stack dumps go to the console and buffer.
+ * Lower level to send to log buffer only.
+ */
+static int uv_nmi_loglevel = 7;
+module_param_named(dump_loglevel, uv_nmi_loglevel, int, 0644);
+
+/*
+ * The following values show statistics on how perf events are affecting
+ * this system.
+ */
+static int param_get_local64(char *buffer, const struct kernel_param *kp)
+{
+ return sprintf(buffer, "%lu\n", local64_read((local64_t *)kp->arg));
+}
+
+static int param_set_local64(const char *val, const struct kernel_param *kp)
+{
+ /* clear on any write */
+ local64_set((local64_t *)kp->arg, 0);
+ return 0;
+}
+
+static struct kernel_param_ops param_ops_local64 = {
+ .get = param_get_local64,
+ .set = param_set_local64,
+};
+#define param_check_local64(name, p) __param_check(name, p, local64_t)
+
+static local64_t uv_nmi_count;
+module_param_named(nmi_count, uv_nmi_count, local64, 0644);
+
+static local64_t uv_nmi_misses;
+module_param_named(nmi_misses, uv_nmi_misses, local64, 0644);
+
+static local64_t uv_nmi_ping_count;
+module_param_named(ping_count, uv_nmi_ping_count, local64, 0644);
+
+static local64_t uv_nmi_ping_misses;
+module_param_named(ping_misses, uv_nmi_ping_misses, local64, 0644);
+
+/*
+ * Following values allow tuning for large systems under heavy loading
+ */
+static int uv_nmi_initial_delay = 100;
+module_param_named(initial_delay, uv_nmi_initial_delay, int, 0644);
+
+static int uv_nmi_slave_delay = 100;
+module_param_named(slave_delay, uv_nmi_slave_delay, int, 0644);
+
+static int uv_nmi_loop_delay = 100;
+module_param_named(loop_delay, uv_nmi_loop_delay, int, 0644);
+
+static int uv_nmi_trigger_delay = 10000;
+module_param_named(trigger_delay, uv_nmi_trigger_delay, int, 0644);
+
+static int uv_nmi_wait_count = 100;
+module_param_named(wait_count, uv_nmi_wait_count, int, 0644);
+
+static int uv_nmi_retry_count = 500;
+module_param_named(retry_count, uv_nmi_retry_count, int, 0644);
+
+/*
+ * Valid NMI Actions:
+ * "dump" - dump process stack for each cpu
+ * "ips" - dump IP info for each cpu
+ * "kdump" - do crash dump
+ * "kdb" - enter KDB/KGDB (default)
+ */
+static char uv_nmi_action[8] = "kdb";
+module_param_string(action, uv_nmi_action, sizeof(uv_nmi_action), 0644);
+
+static inline bool uv_nmi_action_is(const char *action)
+{
+ return (strncmp(uv_nmi_action, action, strlen(action)) == 0);
+}
+
+/* Setup which NMI support is present in system */
+static void uv_nmi_setup_mmrs(void)
+{
+ if (uv_read_local_mmr(UVH_NMI_MMRX_SUPPORTED)) {
+ uv_write_local_mmr(UVH_NMI_MMRX_REQ,
+ 1UL << UVH_NMI_MMRX_REQ_SHIFT);
+ nmi_mmr = UVH_NMI_MMRX;
+ nmi_mmr_clear = UVH_NMI_MMRX_CLEAR;
+ nmi_mmr_pending = 1UL << UVH_NMI_MMRX_SHIFT;
+ pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMRX_TYPE);
+ } else {
+ nmi_mmr = UVH_NMI_MMR;
+ nmi_mmr_clear = UVH_NMI_MMR_CLEAR;
+ nmi_mmr_pending = 1UL << UVH_NMI_MMR_SHIFT;
+ pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMR_TYPE);
+ }
+}
+
+/* Read NMI MMR and check if NMI flag was set by BMC. */
+static inline int uv_nmi_test_mmr(struct uv_hub_nmi_s *hub_nmi)
+{
+ hub_nmi->nmi_value = uv_read_local_mmr(nmi_mmr);
+ atomic_inc(&hub_nmi->read_mmr_count);
+ return !!(hub_nmi->nmi_value & nmi_mmr_pending);
+}
+
+static inline void uv_local_mmr_clear_nmi(void)
+{
+ uv_write_local_mmr(nmi_mmr_clear, nmi_mmr_pending);
+}
+
+/*
+ * If first cpu in on this hub, set hub_nmi "in_nmi" and "owner" values and
+ * return true. If first cpu in on the system, set global "in_nmi" flag.
+ */
+static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
+{
+ int first = atomic_add_unless(&hub_nmi->in_nmi, 1, 1);
+
+ if (first) {
+ atomic_set(&hub_nmi->cpu_owner, cpu);
+ if (atomic_add_unless(&uv_in_nmi, 1, 1))
+ atomic_set(&uv_nmi_cpu, cpu);
+
+ atomic_inc(&hub_nmi->nmi_count);
+ }
+ return first;
+}
+
+/* Check if this is a system NMI event */
+static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
+{
+ int cpu = smp_processor_id();
+ int nmi = 0;
+
+ local64_inc(&uv_nmi_count);
+ uv_cpu_nmi.queries++;
+
+ do {
+ nmi = atomic_read(&hub_nmi->in_nmi);
+ if (nmi)
+ break;
+
+ if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
+
+ /* check hub MMR NMI flag */
+ if (uv_nmi_test_mmr(hub_nmi)) {
+ uv_set_in_nmi(cpu, hub_nmi);
+ nmi = 1;
+ break;
+ }
+
+ /* MMR NMI flag is clear */
+ raw_spin_unlock(&hub_nmi->nmi_lock);
+
+ } else {
+ /* wait a moment for the hub nmi locker to set flag */
+ cpu_relax();
+ udelay(uv_nmi_slave_delay);
+
+ /* re-check hub in_nmi flag */
+ nmi = atomic_read(&hub_nmi->in_nmi);
+ if (nmi)
+ break;
+ }
+
+ /* check if this BMC missed setting the MMR NMI flag */
+ if (!nmi) {
+ nmi = atomic_read(&uv_in_nmi);
+ if (nmi)
+ uv_set_in_nmi(cpu, hub_nmi);
+ }
+
+ } while (0);
+
+ if (!nmi)
+ local64_inc(&uv_nmi_misses);
+
+ return nmi;
+}
+
+/* Need to reset the NMI MMR register, but only once per hub. */
+static inline void uv_clear_nmi(int cpu)
+{
+ struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
+
+ if (cpu == atomic_read(&hub_nmi->cpu_owner)) {
+ atomic_set(&hub_nmi->cpu_owner, -1);
+ atomic_set(&hub_nmi->in_nmi, 0);
+ uv_local_mmr_clear_nmi();
+ raw_spin_unlock(&hub_nmi->nmi_lock);
+ }
+}
+
+/* Print non-responding cpus */
+static void uv_nmi_nr_cpus_pr(char *fmt)
+{
+ static char cpu_list[1024];
+ int len = sizeof(cpu_list);
+ int c = cpumask_weight(uv_nmi_cpu_mask);
+ int n = cpulist_scnprintf(cpu_list, len, uv_nmi_cpu_mask);
+
+ if (n >= len-1)
+ strcpy(&cpu_list[len - 6], "...\n");
+
+ printk(fmt, c, cpu_list);
+}
+
+/* Ping non-responding cpus attemping to force them into the NMI handler */
+static void uv_nmi_nr_cpus_ping(void)
+{
+ int cpu;
+
+ for_each_cpu(cpu, uv_nmi_cpu_mask)
+ atomic_set(&uv_cpu_nmi_per(cpu).pinging, 1);
+
+ apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
+}
+
+/* Clean up flags for cpus that ignored both NMI and ping */
+static void uv_nmi_cleanup_mask(void)
+{
+ int cpu;
+
+ for_each_cpu(cpu, uv_nmi_cpu_mask) {
+ atomic_set(&uv_cpu_nmi_per(cpu).pinging, 0);
+ atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_OUT);
+ cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
+ }
+}
+
+/* Loop waiting as cpus enter nmi handler */
+static int uv_nmi_wait_cpus(int first)
+{
+ int i, j, k, n = num_online_cpus();
+ int last_k = 0, waiting = 0;
+
+ if (first) {
+ cpumask_copy(uv_nmi_cpu_mask, cpu_online_mask);
+ k = 0;
+ } else {
+ k = n - cpumask_weight(uv_nmi_cpu_mask);
+ }
+
+ udelay(uv_nmi_initial_delay);
+ for (i = 0; i < uv_nmi_retry_count; i++) {
+ int loop_delay = uv_nmi_loop_delay;
+
+ for_each_cpu(j, uv_nmi_cpu_mask) {
+ if (atomic_read(&uv_cpu_nmi_per(j).state)) {
+ cpumask_clear_cpu(j, uv_nmi_cpu_mask);
+ if (++k >= n)
+ break;
+ }
+ }
+ if (k >= n) { /* all in? */
+ k = n;
+ break;
+ }
+ if (last_k != k) { /* abort if no new cpus coming in */
+ last_k = k;
+ waiting = 0;
+ } else if (++waiting > uv_nmi_wait_count)
+ break;
+
+ /* extend delay if waiting only for cpu 0 */
+ if (waiting && (n - k) == 1 &&
+ cpumask_test_cpu(0, uv_nmi_cpu_mask))
+ loop_delay *= 100;
+
+ udelay(loop_delay);
+ }
+ atomic_set(&uv_nmi_cpus_in_nmi, k);
+ return n - k;
+}
+
+/* Wait until all slave cpus have entered UV NMI handler */
+static void uv_nmi_wait(int master)
+{
+ /* indicate this cpu is in */
+ atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_IN);
+
+ /* if not the first cpu in (the master), then we are a slave cpu */
+ if (!master)
+ return;
+
+ do {
+ /* wait for all other cpus to gather here */
+ if (!uv_nmi_wait_cpus(1))
+ break;
+
+ /* if not all made it in, send IPI NMI to them */
+ uv_nmi_nr_cpus_pr(KERN_ALERT
+ "UV: Sending NMI IPI to %d non-responding CPUs: %s\n");
+ uv_nmi_nr_cpus_ping();
+
+ /* if all cpus are in, then done */
+ if (!uv_nmi_wait_cpus(0))
+ break;
+
+ uv_nmi_nr_cpus_pr(KERN_ALERT
+ "UV: %d CPUs not in NMI loop: %s\n");
+ } while (0);
+
+ pr_alert("UV: %d of %d CPUs in NMI\n",
+ atomic_read(&uv_nmi_cpus_in_nmi), num_online_cpus());
+}
+
+static void uv_nmi_dump_cpu_ip_hdr(void)
+{
+ printk(KERN_DEFAULT
+ "\nUV: %4s %6s %-32s %s (Note: PID 0 not listed)\n",
+ "CPU", "PID", "COMMAND", "IP");
+}
+
+static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs)
+{
+ printk(KERN_DEFAULT "UV: %4d %6d %-32.32s ",
+ cpu, current->pid, current->comm);
+
+ printk_address(regs->ip);
+}
+
+/* Dump this cpu's state */
+static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
+{
+ const char *dots = " ................................. ";
+
+ if (uv_nmi_action_is("ips")) {
+ if (cpu == 0)
+ uv_nmi_dump_cpu_ip_hdr();
+
+ if (current->pid != 0)
+ uv_nmi_dump_cpu_ip(cpu, regs);
+
+ } else if (uv_nmi_action_is("dump")) {
+ printk(KERN_DEFAULT
+ "UV:%sNMI process trace for CPU %d\n", dots, cpu);
+ show_regs(regs);
+ }
+ atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
+}
+
+/* Trigger a slave cpu to dump it's state */
+static void uv_nmi_trigger_dump(int cpu)
+{
+ int retry = uv_nmi_trigger_delay;
+
+ if (atomic_read(&uv_cpu_nmi_per(cpu).state) != UV_NMI_STATE_IN)
+ return;
+
+ atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_DUMP);
+ do {
+ cpu_relax();
+ udelay(10);
+ if (atomic_read(&uv_cpu_nmi_per(cpu).state)
+ != UV_NMI_STATE_DUMP)
+ return;
+ } while (--retry > 0);
+
+ pr_crit("UV: CPU %d stuck in process dump function\n", cpu);
+ atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_DUMP_DONE);
+}
+
+/* Wait until all cpus ready to exit */
+static void uv_nmi_sync_exit(int master)
+{
+ atomic_dec(&uv_nmi_cpus_in_nmi);
+ if (master) {
+ while (atomic_read(&uv_nmi_cpus_in_nmi) > 0)
+ cpu_relax();
+ atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
+ } else {
+ while (atomic_read(&uv_nmi_slave_continue))
+ cpu_relax();
+ }
+}
+
+/* Walk through cpu list and dump state of each */
+static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
+{
+ if (master) {
+ int tcpu;
+ int ignored = 0;
+ int saved_console_loglevel = console_loglevel;
+
+ pr_alert("UV: tracing %s for %d CPUs from CPU %d\n",
+ uv_nmi_action_is("ips") ? "IPs" : "processes",
+ atomic_read(&uv_nmi_cpus_in_nmi), cpu);
+
+ console_loglevel = uv_nmi_loglevel;
+ atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
+ for_each_online_cpu(tcpu) {
+ if (cpumask_test_cpu(tcpu, uv_nmi_cpu_mask))
+ ignored++;
+ else if (tcpu == cpu)
+ uv_nmi_dump_state_cpu(tcpu, regs);
+ else
+ uv_nmi_trigger_dump(tcpu);
+ }
+ if (ignored)
+ printk(KERN_DEFAULT "UV: %d CPUs ignored NMI\n",
+ ignored);
+
+ console_loglevel = saved_console_loglevel;
+ pr_alert("UV: process trace complete\n");
+ } else {
+ while (!atomic_read(&uv_nmi_slave_continue))
+ cpu_relax();
+ while (atomic_read(&uv_cpu_nmi.state) != UV_NMI_STATE_DUMP)
+ cpu_relax();
+ uv_nmi_dump_state_cpu(cpu, regs);
+ }
+ uv_nmi_sync_exit(master);
+}
+
+static void uv_nmi_touch_watchdogs(void)
+{
+ touch_softlockup_watchdog_sync();
+ clocksource_touch_watchdog();
+ rcu_cpu_stall_reset();
+ touch_nmi_watchdog();
+}
+
+#if defined(CONFIG_KEXEC)
+static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
+{
+ /* Call crash to dump system state */
+ if (master) {
+ pr_emerg("UV: NMI executing crash_kexec on CPU%d\n", cpu);
+ crash_kexec(regs);
+
+ pr_emerg("UV: crash_kexec unexpectedly returned, ");
+ if (!kexec_crash_image) {
+ pr_cont("crash kernel not loaded\n");
+ atomic_set(&uv_nmi_kexec_failed, 1);
+ uv_nmi_sync_exit(1);
+ return;
+ }
+ pr_cont("kexec busy, stalling cpus while waiting\n");
+ }
+
+ /* If crash exec fails the slaves should return, otherwise stall */
+ while (atomic_read(&uv_nmi_kexec_failed) == 0)
+ mdelay(10);
+
+ /* Crash kernel most likely not loaded, return in an orderly fashion */
+ uv_nmi_sync_exit(0);
+}
+
+#else /* !CONFIG_KEXEC */
+static inline void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
+{
+ if (master)
+ pr_err("UV: NMI kdump: KEXEC not supported in this kernel\n");
+}
+#endif /* !CONFIG_KEXEC */
+
+#ifdef CONFIG_KGDB_KDB
+/* Call KDB from NMI handler */
+static void uv_call_kdb(int cpu, struct pt_regs *regs, int master)
+{
+ int ret;
+
+ if (master) {
+ /* call KGDB NMI handler as MASTER */
+ ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs,
+ &uv_nmi_slave_continue);
+ if (ret) {
+ pr_alert("KDB returned error, is kgdboc set?\n");
+ atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
+ }
+ } else {
+ /* wait for KGDB signal that it's ready for slaves to enter */
+ int sig;
+
+ do {
+ cpu_relax();
+ sig = atomic_read(&uv_nmi_slave_continue);
+ } while (!sig);
+
+ /* call KGDB as slave */
+ if (sig == SLAVE_CONTINUE)
+ kgdb_nmicallback(cpu, regs);
+ }
+ uv_nmi_sync_exit(master);
+}
+
+#else /* !CONFIG_KGDB_KDB */
+static inline void uv_call_kdb(int cpu, struct pt_regs *regs, int master)
+{
+ pr_err("UV: NMI error: KGDB/KDB is not enabled in this kernel\n");
+}
+#endif /* !CONFIG_KGDB_KDB */
+
+/*
+ * UV NMI handler
+ */
+int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
+{
+ struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
+ int cpu = smp_processor_id();
+ int master = 0;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /* If not a UV System NMI, ignore */
+ if (!atomic_read(&uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) {
+ local_irq_restore(flags);
+ return NMI_DONE;
+ }
+
+ /* Call possible NMI trace function */
+ if (unlikely(uv_trace_nmi_func))
+ (uv_trace_nmi_func)(reason, regs);
+
+ /* Indicate we are the first CPU into the NMI handler */
+ master = (atomic_read(&uv_nmi_cpu) == cpu);
+
+ /* If NMI action is "kdump", then attempt to do it */
+ if (uv_nmi_action_is("kdump"))
+ uv_nmi_kdump(cpu, master, regs);
+
+ /* Pause as all cpus enter the NMI handler */
+ uv_nmi_wait(master);
+
+ /* Dump state of each cpu */
+ if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump"))
+ uv_nmi_dump_state(cpu, regs, master);
+
+ /* Call KDB if enabled */
+ else if (uv_nmi_action_is("kdb"))
+ uv_call_kdb(cpu, regs, master);
+
+ /* Clear per_cpu "in nmi" flag */
+ atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_OUT);
+
+ /* Clear MMR NMI flag on each hub */
+ uv_clear_nmi(cpu);
+
+ /* Clear global flags */
+ if (master) {
+ if (cpumask_weight(uv_nmi_cpu_mask))
+ uv_nmi_cleanup_mask();
+ atomic_set(&uv_nmi_cpus_in_nmi, -1);
+ atomic_set(&uv_nmi_cpu, -1);
+ atomic_set(&uv_in_nmi, 0);
+ }
+
+ uv_nmi_touch_watchdogs();
+ local_irq_restore(flags);
+
+ return NMI_HANDLED;
+}
+
+/*
+ * NMI handler for pulling in CPUs when perf events are grabbing our NMI
+ */
+int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
+{
+ int ret;
+
+ uv_cpu_nmi.queries++;
+ if (!atomic_read(&uv_cpu_nmi.pinging)) {
+ local64_inc(&uv_nmi_ping_misses);
+ return NMI_DONE;
+ }
+
+ uv_cpu_nmi.pings++;
+ local64_inc(&uv_nmi_ping_count);
+ ret = uv_handle_nmi(reason, regs);
+ atomic_set(&uv_cpu_nmi.pinging, 0);
+ return ret;
+}
+
+void uv_register_nmi_notifier(void)
+{
+ if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
+ pr_warn("UV: NMI handler failed to register\n");
+
+ if (register_nmi_handler(NMI_LOCAL, uv_handle_nmi_ping, 0, "uvping"))
+ pr_warn("UV: PING NMI handler failed to register\n");
+}
+
+void uv_nmi_init(void)
+{
+ unsigned int value;
+
+ /*
+ * Unmask NMI on all cpus
+ */
+ value = apic_read(APIC_LVT1) | APIC_DM_NMI;
+ value &= ~APIC_LVT_MASKED;
+ apic_write(APIC_LVT1, value);
+}
+
+void uv_nmi_setup(void)
+{
+ int size = sizeof(void *) * (1 << NODES_SHIFT);
+ int cpu, nid;
+
+ /* Setup hub nmi info */
+ uv_nmi_setup_mmrs();
+ uv_hub_nmi_list = kzalloc(size, GFP_KERNEL);
+ pr_info("UV: NMI hub list @ 0x%p (%d)\n", uv_hub_nmi_list, size);
+ BUG_ON(!uv_hub_nmi_list);
+ size = sizeof(struct uv_hub_nmi_s);
+ for_each_present_cpu(cpu) {
+ nid = cpu_to_node(cpu);
+ if (uv_hub_nmi_list[nid] == NULL) {
+ uv_hub_nmi_list[nid] = kzalloc_node(size,
+ GFP_KERNEL, nid);
+ BUG_ON(!uv_hub_nmi_list[nid]);
+ raw_spin_lock_init(&(uv_hub_nmi_list[nid]->nmi_lock));
+ atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1);
+ }
+ uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid];
+ }
+ BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL));
+}
+
+
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index f7bab68a4b83..11f9285a2ff6 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -722,15 +722,25 @@ static void percpu_init(void)
/*
* Check to see if a symbol lies in the .data..percpu section.
- * For some as yet not understood reason the "__init_begin"
- * symbol which immediately preceeds the .data..percpu section
- * also shows up as it it were part of it so we do an explict
- * check for that symbol name and ignore it.
+ *
+ * The linker incorrectly associates some symbols with the
+ * .data..percpu section so we also need to check the symbol
+ * name to make sure that we classify the symbol correctly.
+ *
+ * The GNU linker incorrectly associates:
+ * __init_begin
+ * __per_cpu_load
+ *
+ * The "gold" linker incorrectly associates:
+ * init_per_cpu__irq_stack_union
+ * init_per_cpu__gdt_page
*/
static int is_percpu_sym(ElfW(Sym) *sym, const char *symname)
{
return (sym->st_shndx == per_cpu_shndx) &&
- strcmp(symname, "__init_begin");
+ strcmp(symname, "__init_begin") &&
+ strcmp(symname, "__per_cpu_load") &&
+ strncmp(symname, "init_per_cpu_", 13);
}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index fdc3ba28ca38..b3f36369e667 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -468,8 +468,8 @@ PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
* 3 PCD PWT UC UC UC
* 4 PAT WB WC WB
* 5 PAT PWT WC WP WT
- * 6 PAT PCD UC- UC UC-
- * 7 PAT PCD PWT UC UC UC
+ * 6 PAT PCD UC- rsv UC-
+ * 7 PAT PCD PWT UC rsv UC
*/
void xen_set_pat(u64 pat)
@@ -2328,12 +2328,14 @@ static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
return success;
}
-int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
- unsigned int address_bits)
+int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
+ unsigned int address_bits,
+ dma_addr_t *dma_handle)
{
unsigned long *in_frames = discontig_frames, out_frame;
unsigned long flags;
int success;
+ unsigned long vstart = (unsigned long)phys_to_virt(pstart);
/*
* Currently an auto-translated guest will not perform I/O, nor will
@@ -2368,15 +2370,17 @@ int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
spin_unlock_irqrestore(&xen_reservation_lock, flags);
+ *dma_handle = virt_to_machine(vstart).maddr;
return success ? 0 : -ENOMEM;
}
EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
-void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
+void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
{
unsigned long *out_frames = discontig_frames, in_frame;
unsigned long flags;
int success;
+ unsigned long vstart;
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
@@ -2384,6 +2388,7 @@ void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
if (unlikely(order > MAX_CONTIG_ORDER))
return;
+ vstart = (unsigned long)phys_to_virt(pstart);
memset((void *) vstart, 0, PAGE_SIZE << order);
spin_lock_irqsave(&xen_reservation_lock, flags);
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index a61c7d5811be..2ae8699e8767 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -799,10 +799,10 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
unsigned topidx, mididx, idx;
- if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
- BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
+ /* don't track P2M changes in autotranslate guests */
+ if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
return true;
- }
+
if (unlikely(pfn >= MAX_P2M_PFN)) {
BUG_ON(mfn != INVALID_P2M_ENTRY);
return true;
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 1b982641ec35..228d6aee3a16 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -28,3 +28,4 @@ generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += xor.h
+generic-y += preempt.h
diff --git a/arch/xtensa/include/asm/prom.h b/arch/xtensa/include/asm/prom.h
deleted file mode 100644
index f3d7cd2c0de7..000000000000
--- a/arch/xtensa/include/asm/prom.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _XTENSA_ASM_PROM_H
-#define _XTENSA_ASM_PROM_H
-
-#define HAVE_ARCH_DEVTREE_FIXUPS
-
-#endif /* _XTENSA_ASM_PROM_H */
diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h
index c114483010c1..7db5c22faa68 100644
--- a/arch/xtensa/include/uapi/asm/socket.h
+++ b/arch/xtensa/include/uapi/asm/socket.h
@@ -87,4 +87,6 @@
#define SO_BUSY_POLL 46
+#define SO_MAX_PACING_RATE 47
+
#endif /* _XTENSA_SOCKET_H */
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index de1dfa18d0a1..21dbe6bdb8ed 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -1122,7 +1122,7 @@ ENDPROC(fast_syscall_spill_registers)
* a3: exctable, original value in excsave1
*/
-fast_syscall_spill_registers_fixup:
+ENTRY(fast_syscall_spill_registers_fixup)
rsr a2, windowbase # get current windowbase (a2 is saved)
xsr a0, depc # restore depc and a0
@@ -1134,22 +1134,26 @@ fast_syscall_spill_registers_fixup:
*/
xsr a3, excsave1 # get spill-mask
- slli a2, a3, 1 # shift left by one
+ slli a3, a3, 1 # shift left by one
- slli a3, a2, 32-WSBITS
- src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy......
+ slli a2, a3, 32-WSBITS
+ src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy......
wsr a2, windowstart # set corrected windowstart
- rsr a3, excsave1
- l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2
- l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task)
+ srli a3, a3, 1
+ rsr a2, excsave1
+ l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2
+ xsr a2, excsave1
+ s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3
+ l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task)
+ xsr a2, excsave1
/* Return to the original (user task) WINDOWBASE.
* We leave the following frame behind:
* a0, a1, a2 same
- * a3: trashed (saved in excsave_1)
+ * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE)
* depc: depc (we have to return to that address)
- * excsave_1: a3
+ * excsave_1: exctable
*/
wsr a3, windowbase
@@ -1159,9 +1163,9 @@ fast_syscall_spill_registers_fixup:
* a0: return address
* a1: used, stack pointer
* a2: kernel stack pointer
- * a3: available, saved in EXCSAVE_1
+ * a3: available
* depc: exception address
- * excsave: a3
+ * excsave: exctable
* Note: This frame might be the same as above.
*/
@@ -1181,9 +1185,12 @@ fast_syscall_spill_registers_fixup:
rsr a0, exccause
addx4 a0, a0, a3 # find entry in table
l32i a0, a0, EXC_TABLE_FAST_USER # load handler
+ l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
jx a0
-fast_syscall_spill_registers_fixup_return:
+ENDPROC(fast_syscall_spill_registers_fixup)
+
+ENTRY(fast_syscall_spill_registers_fixup_return)
/* When we return here, all registers have been restored (a2: DEPC) */
@@ -1191,13 +1198,13 @@ fast_syscall_spill_registers_fixup_return:
/* Restore fixup handler. */
- xsr a3, excsave1
- movi a2, fast_syscall_spill_registers_fixup
- s32i a2, a3, EXC_TABLE_FIXUP
- s32i a0, a3, EXC_TABLE_DOUBLE_SAVE
- rsr a2, windowbase
- s32i a2, a3, EXC_TABLE_PARAM
- l32i a2, a3, EXC_TABLE_KSTK
+ rsr a2, excsave1
+ s32i a3, a2, EXC_TABLE_DOUBLE_SAVE
+ movi a3, fast_syscall_spill_registers_fixup
+ s32i a3, a2, EXC_TABLE_FIXUP
+ rsr a3, windowbase
+ s32i a3, a2, EXC_TABLE_PARAM
+ l32i a2, a2, EXC_TABLE_KSTK
/* Load WB at the time the exception occurred. */
@@ -1206,8 +1213,12 @@ fast_syscall_spill_registers_fixup_return:
wsr a3, windowbase
rsync
+ rsr a3, excsave1
+ l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
+
rfde
+ENDPROC(fast_syscall_spill_registers_fixup_return)
/*
* spill all registers.
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 946fb8d06c8b..6e2b6638122d 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -21,11 +21,8 @@
#include <linux/screen_info.h>
#include <linux/bootmem.h>
#include <linux/kernel.h>
-
-#ifdef CONFIG_OF
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
-#endif
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
# include <linux/console.h>
@@ -64,8 +61,8 @@ extern struct rtc_ops no_rtc_ops;
struct rtc_ops *rtc_ops;
#ifdef CONFIG_BLK_DEV_INITRD
-extern void *initrd_start;
-extern void *initrd_end;
+extern unsigned long initrd_start;
+extern unsigned long initrd_end;
int initrd_is_mapped = 0;
extern int initrd_below_start_ok;
#endif
@@ -152,8 +149,8 @@ static int __init parse_tag_initrd(const bp_tag_t* tag)
{
meminfo_t* mi;
mi = (meminfo_t*)(tag->data);
- initrd_start = __va(mi->start);
- initrd_end = __va(mi->end);
+ initrd_start = (unsigned long)__va(mi->start);
+ initrd_end = (unsigned long)__va(mi->end);
return 0;
}
@@ -170,13 +167,6 @@ static int __init parse_tag_fdt(const bp_tag_t *tag)
__tagtable(BP_TAG_FDT, parse_tag_fdt);
-void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
-{
- initrd_start = (void *)__va(start);
- initrd_end = (void *)__va(end);
- initrd_below_start_ok = 1;
-}
-
#endif /* CONFIG_OF */
#endif /* CONFIG_BLK_DEV_INITRD */
@@ -222,9 +212,13 @@ static int __init parse_bootparam(const bp_tag_t* tag)
}
#ifdef CONFIG_OF
+bool __initdata dt_memory_scan = false;
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
+ if (!dt_memory_scan)
+ return;
+
size &= PAGE_MASK;
add_sysmem_bank(MEMORY_TYPE_CONVENTIONAL, base, base + size);
}
@@ -236,31 +230,13 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
void __init early_init_devtree(void *params)
{
- /* Setup flat device-tree pointer */
- initial_boot_params = params;
-
- /* Retrieve various informations from the /chosen node of the
- * device-tree, including the platform type, initrd location and
- * size, TCE reserve, and more ...
- */
- if (!command_line[0])
- of_scan_flat_dt(early_init_dt_scan_chosen, command_line);
-
- /* Scan memory nodes and rebuild MEMBLOCKs */
- of_scan_flat_dt(early_init_dt_scan_root, NULL);
if (sysmem.nr_banks == 0)
- of_scan_flat_dt(early_init_dt_scan_memory, NULL);
-}
+ dt_memory_scan = true;
-static void __init copy_devtree(void)
-{
- void *alloc = early_init_dt_alloc_memory_arch(
- be32_to_cpu(initial_boot_params->totalsize), 8);
- if (alloc) {
- memcpy(alloc, initial_boot_params,
- be32_to_cpu(initial_boot_params->totalsize));
- initial_boot_params = alloc;
- }
+ early_init_dt_scan(params);
+
+ if (!command_line[0])
+ strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
}
static int __init xtensa_device_probe(void)
@@ -525,10 +501,7 @@ void __init setup_arch(char **cmdline_p)
bootmem_init();
-#ifdef CONFIG_OF
- copy_devtree();
- unflatten_device_tree();
-#endif
+ unflatten_and_copy_device_tree();
platform_setup(cmdline_p);
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index 718eca1850bd..98b67d5f1514 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -341,7 +341,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sp = regs->areg[1];
- if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) {
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) {
sp = current->sas_ss_sp + current->sas_ss_size;
}
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index 56f88b7afe2f..e9e1aad8c271 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -737,7 +737,8 @@ static int __init iss_net_setup(char *str)
return 1;
}
- if ((new = alloc_bootmem(sizeof new)) == NULL) {
+ new = alloc_bootmem(sizeof(*new));
+ if (new == NULL) {
printk("Alloc_bootmem failed\n");
return 1;
}
diff --git a/block/Makefile b/block/Makefile
index 671a83d063a5..20645e88fb57 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -5,8 +5,9 @@
obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
- blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o \
- partition-generic.o partitions/
+ blk-iopoll.o blk-lib.o blk-mq.o blk-mq-tag.o \
+ blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \
+ genhd.o scsi_ioctl.o partition-generic.o partitions/
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
diff --git a/block/blk-core.c b/block/blk-core.c
index 0a00e4ecf87c..d9cab97773ef 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -16,6 +16,7 @@
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/kernel_stat.h>
@@ -48,7 +49,7 @@ DEFINE_IDA(blk_queue_ida);
/*
* For the allocated request tables
*/
-static struct kmem_cache *request_cachep;
+struct kmem_cache *request_cachep = NULL;
/*
* For queue allocation
@@ -60,42 +61,6 @@ struct kmem_cache *blk_requestq_cachep;
*/
static struct workqueue_struct *kblockd_workqueue;
-static void drive_stat_acct(struct request *rq, int new_io)
-{
- struct hd_struct *part;
- int rw = rq_data_dir(rq);
- int cpu;
-
- if (!blk_do_io_stat(rq))
- return;
-
- cpu = part_stat_lock();
-
- if (!new_io) {
- part = rq->part;
- part_stat_inc(cpu, part, merges[rw]);
- } else {
- part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
- if (!hd_struct_try_get(part)) {
- /*
- * The partition is already being removed,
- * the request will be accounted on the disk only
- *
- * We take a reference on disk->part0 although that
- * partition will never be deleted, so we can treat
- * it as any other partition.
- */
- part = &rq->rq_disk->part0;
- hd_struct_get(part);
- }
- part_round_stats(cpu, part);
- part_inc_in_flight(part, rw);
- rq->part = part;
- }
-
- part_stat_unlock();
-}
-
void blk_queue_congestion_threshold(struct request_queue *q)
{
int nr;
@@ -145,7 +110,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->cmd = rq->__cmd;
rq->cmd_len = BLK_MAX_CDB;
rq->tag = -1;
- rq->ref_count = 1;
rq->start_time = jiffies;
set_start_time_ns(rq);
rq->part = NULL;
@@ -166,7 +130,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
bio_advance(bio, nbytes);
/* don't actually finish bio if it's part of flush sequence */
- if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
+ if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
bio_endio(bio, error);
}
@@ -174,9 +138,9 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
{
int bit;
- printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
+ printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx\n", msg,
rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
- rq->cmd_flags);
+ (unsigned long long) rq->cmd_flags);
printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
(unsigned long long)blk_rq_pos(rq),
@@ -595,9 +559,12 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q)
return NULL;
+ if (percpu_counter_init(&q->mq_usage_counter, 0))
+ goto fail_q;
+
q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
if (q->id < 0)
- goto fail_q;
+ goto fail_c;
q->backing_dev_info.ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
@@ -644,13 +611,19 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
q->bypass_depth = 1;
__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
+ init_waitqueue_head(&q->mq_freeze_wq);
+
if (blkcg_init_queue(q))
- goto fail_id;
+ goto fail_bdi;
return q;
+fail_bdi:
+ bdi_destroy(&q->backing_dev_info);
fail_id:
ida_simple_remove(&blk_queue_ida, q->id);
+fail_c:
+ percpu_counter_destroy(&q->mq_usage_counter);
fail_q:
kmem_cache_free(blk_requestq_cachep, q);
return NULL;
@@ -739,9 +712,17 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
q->sg_reserved_size = INT_MAX;
+ /* Protect q->elevator from elevator_change */
+ mutex_lock(&q->sysfs_lock);
+
/* init elevator */
- if (elevator_init(q, NULL))
+ if (elevator_init(q, NULL)) {
+ mutex_unlock(&q->sysfs_lock);
return NULL;
+ }
+
+ mutex_unlock(&q->sysfs_lock);
+
return q;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -1109,7 +1090,8 @@ retry:
goto retry;
}
-struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
+static struct request *blk_old_get_request(struct request_queue *q, int rw,
+ gfp_t gfp_mask)
{
struct request *rq;
@@ -1126,6 +1108,14 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
return rq;
}
+
+struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
+{
+ if (q->mq_ops)
+ return blk_mq_alloc_request(q, rw, gfp_mask, false);
+ else
+ return blk_old_get_request(q, rw, gfp_mask);
+}
EXPORT_SYMBOL(blk_get_request);
/**
@@ -1211,7 +1201,7 @@ EXPORT_SYMBOL(blk_requeue_request);
static void add_acct_request(struct request_queue *q, struct request *rq,
int where)
{
- drive_stat_acct(rq, 1);
+ blk_account_io_start(rq, true);
__elv_add_request(q, rq, where);
}
@@ -1272,8 +1262,6 @@ void __blk_put_request(struct request_queue *q, struct request *req)
{
if (unlikely(!q))
return;
- if (unlikely(--req->ref_count))
- return;
blk_pm_put_request(req);
@@ -1302,12 +1290,17 @@ EXPORT_SYMBOL_GPL(__blk_put_request);
void blk_put_request(struct request *req)
{
- unsigned long flags;
struct request_queue *q = req->q;
- spin_lock_irqsave(q->queue_lock, flags);
- __blk_put_request(q, req);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ if (q->mq_ops)
+ blk_mq_free_request(req);
+ else {
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __blk_put_request(q, req);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
}
EXPORT_SYMBOL(blk_put_request);
@@ -1333,7 +1326,7 @@ void blk_add_request_payload(struct request *rq, struct page *page,
bio->bi_io_vec->bv_offset = 0;
bio->bi_io_vec->bv_len = len;
- bio->bi_size = len;
+ bio->bi_iter.bi_size = len;
bio->bi_vcnt = 1;
bio->bi_phys_segments = 1;
@@ -1343,8 +1336,8 @@ void blk_add_request_payload(struct request *rq, struct page *page,
}
EXPORT_SYMBOL_GPL(blk_add_request_payload);
-static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
- struct bio *bio)
+bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
+ struct bio *bio)
{
const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
@@ -1358,15 +1351,15 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
req->biotail->bi_next = bio;
req->biotail = bio;
- req->__data_len += bio->bi_size;
+ req->__data_len += bio->bi_iter.bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
- drive_stat_acct(req, 0);
+ blk_account_io_start(req, false);
return true;
}
-static bool bio_attempt_front_merge(struct request_queue *q,
- struct request *req, struct bio *bio)
+bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
+ struct bio *bio)
{
const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
@@ -1387,16 +1380,16 @@ static bool bio_attempt_front_merge(struct request_queue *q,
* not touch req->buffer either...
*/
req->buffer = bio_data(bio);
- req->__sector = bio->bi_sector;
- req->__data_len += bio->bi_size;
+ req->__sector = bio->bi_iter.bi_sector;
+ req->__data_len += bio->bi_iter.bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
- drive_stat_acct(req, 0);
+ blk_account_io_start(req, false);
return true;
}
/**
- * attempt_plug_merge - try to merge with %current's plugged list
+ * blk_attempt_plug_merge - try to merge with %current's plugged list
* @q: request_queue new bio is being queued at
* @bio: new bio being queued
* @request_count: out parameter for number of traversed plugged requests
@@ -1412,13 +1405,16 @@ static bool bio_attempt_front_merge(struct request_queue *q,
* reliable access to the elevator outside queue lock. Only check basic
* merging parameters without querying the elevator.
*/
-static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
- unsigned int *request_count)
+bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
+ unsigned int *request_count)
{
struct blk_plug *plug;
struct request *rq;
bool ret = false;
+ if (blk_queue_nomerges(q))
+ goto out;
+
plug = current->plug;
if (!plug)
goto out;
@@ -1457,7 +1453,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
req->cmd_flags |= REQ_FAILFAST_MASK;
req->errors = 0;
- req->__sector = bio->bi_sector;
+ req->__sector = bio->bi_iter.bi_sector;
req->ioprio = bio_prio(bio);
blk_rq_bio_prep(req->q, req, bio);
}
@@ -1492,7 +1488,7 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
* Check if we can merge with the plugged list before grabbing
* any locks.
*/
- if (attempt_plug_merge(q, bio, &request_count))
+ if (blk_attempt_plug_merge(q, bio, &request_count))
return;
spin_lock_irq(q->queue_lock);
@@ -1560,7 +1556,7 @@ get_rq:
}
}
list_add_tail(&req->queuelist, &plug->list);
- drive_stat_acct(req, 1);
+ blk_account_io_start(req, true);
} else {
spin_lock_irq(q->queue_lock);
add_acct_request(q, req, where);
@@ -1581,12 +1577,12 @@ static inline void blk_partition_remap(struct bio *bio)
if (bio_sectors(bio) && bdev != bdev->bd_contains) {
struct hd_struct *p = bdev->bd_part;
- bio->bi_sector += p->start_sect;
+ bio->bi_iter.bi_sector += p->start_sect;
bio->bi_bdev = bdev->bd_contains;
trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
bdev->bd_dev,
- bio->bi_sector - p->start_sect);
+ bio->bi_iter.bi_sector - p->start_sect);
}
}
@@ -1652,7 +1648,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
/* Test device or partition size, when known. */
maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
if (maxsector) {
- sector_t sector = bio->bi_sector;
+ sector_t sector = bio->bi_iter.bi_sector;
if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
/*
@@ -1688,7 +1684,7 @@ generic_make_request_checks(struct bio *bio)
"generic_make_request: Trying to access "
"nonexistent block-device %s (%Lu)\n",
bdevname(bio->bi_bdev, b),
- (long long) bio->bi_sector);
+ (long long) bio->bi_iter.bi_sector);
goto end_io;
}
@@ -1702,9 +1698,9 @@ generic_make_request_checks(struct bio *bio)
}
part = bio->bi_bdev->bd_part;
- if (should_fail_request(part, bio->bi_size) ||
+ if (should_fail_request(part, bio->bi_iter.bi_size) ||
should_fail_request(&part_to_disk(part)->part0,
- bio->bi_size))
+ bio->bi_iter.bi_size))
goto end_io;
/*
@@ -1863,7 +1859,7 @@ void submit_bio(int rw, struct bio *bio)
if (rw & WRITE) {
count_vm_events(PGPGOUT, count);
} else {
- task_io_account_read(bio->bi_size);
+ task_io_account_read(bio->bi_iter.bi_size);
count_vm_events(PGPGIN, count);
}
@@ -1872,7 +1868,7 @@ void submit_bio(int rw, struct bio *bio)
printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
current->comm, task_pid_nr(current),
(rw & WRITE) ? "WRITE" : "READ",
- (unsigned long long)bio->bi_sector,
+ (unsigned long long)bio->bi_iter.bi_sector,
bdevname(bio->bi_bdev, b),
count);
}
@@ -2005,7 +2001,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
for (bio = rq->bio; bio; bio = bio->bi_next) {
if ((bio->bi_rw & ff) != ff)
break;
- bytes += bio->bi_size;
+ bytes += bio->bi_iter.bi_size;
}
/* this could lead to infinite loop */
@@ -2014,7 +2010,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
}
EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
-static void blk_account_io_completion(struct request *req, unsigned int bytes)
+void blk_account_io_completion(struct request *req, unsigned int bytes)
{
if (blk_do_io_stat(req)) {
const int rw = rq_data_dir(req);
@@ -2028,7 +2024,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
}
}
-static void blk_account_io_done(struct request *req)
+void blk_account_io_done(struct request *req)
{
/*
* Account IO completion. flush_rq isn't accounted as a
@@ -2076,6 +2072,42 @@ static inline struct request *blk_pm_peek_request(struct request_queue *q,
}
#endif
+void blk_account_io_start(struct request *rq, bool new_io)
+{
+ struct hd_struct *part;
+ int rw = rq_data_dir(rq);
+ int cpu;
+
+ if (!blk_do_io_stat(rq))
+ return;
+
+ cpu = part_stat_lock();
+
+ if (!new_io) {
+ part = rq->part;
+ part_stat_inc(cpu, part, merges[rw]);
+ } else {
+ part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
+ if (!hd_struct_try_get(part)) {
+ /*
+ * The partition is already being removed,
+ * the request will be accounted on the disk only
+ *
+ * We take a reference on disk->part0 although that
+ * partition will never be deleted, so we can treat
+ * it as any other partition.
+ */
+ part = &rq->rq_disk->part0;
+ hd_struct_get(part);
+ }
+ part_round_stats(cpu, part);
+ part_inc_in_flight(part, rw);
+ rq->part = part;
+ }
+
+ part_stat_unlock();
+}
+
/**
* blk_peek_request - peek at the top of a request queue
* @q: request queue to peek at
@@ -2227,6 +2259,7 @@ void blk_start_request(struct request *req)
if (unlikely(blk_bidi_rq(req)))
req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
+ BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
blk_add_timer(req);
}
EXPORT_SYMBOL(blk_start_request);
@@ -2339,9 +2372,9 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
total_bytes = 0;
while (req->bio) {
struct bio *bio = req->bio;
- unsigned bio_bytes = min(bio->bi_size, nr_bytes);
+ unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
- if (bio_bytes == bio->bi_size)
+ if (bio_bytes == bio->bi_iter.bi_size)
req->bio = bio->bi_next;
req_bio_endio(req, bio, bio_bytes, error);
@@ -2451,7 +2484,6 @@ static void blk_finish_request(struct request *req, int error)
if (req->cmd_flags & REQ_DONTPREP)
blk_unprep_request(req);
-
blk_account_io_done(req);
if (req->end_io)
@@ -2690,7 +2722,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
rq->nr_phys_segments = bio_phys_segments(q, bio);
rq->buffer = bio_data(bio);
}
- rq->__data_len = bio->bi_size;
+ rq->__data_len = bio->bi_iter.bi_size;
rq->bio = rq->biotail = bio;
if (bio->bi_bdev)
@@ -2708,10 +2740,10 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
void rq_flush_dcache_pages(struct request *rq)
{
struct req_iterator iter;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
rq_for_each_segment(bvec, rq, iter)
- flush_dcache_page(bvec->bv_page);
+ flush_dcache_page(bvec.bv_page);
}
EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
#endif
@@ -2873,6 +2905,7 @@ void blk_start_plug(struct blk_plug *plug)
plug->magic = PLUG_MAGIC;
INIT_LIST_HEAD(&plug->list);
+ INIT_LIST_HEAD(&plug->mq_list);
INIT_LIST_HEAD(&plug->cb_list);
/*
@@ -2970,6 +3003,10 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
BUG_ON(plug->magic != PLUG_MAGIC);
flush_plug_callbacks(plug, from_schedule);
+
+ if (!list_empty(&plug->mq_list))
+ blk_mq_flush_plug_list(plug, from_schedule);
+
if (list_empty(&plug->list))
return;
diff --git a/block/blk-exec.c b/block/blk-exec.c
index ae4f27d7944e..c3edf9dff566 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -5,6 +5,7 @@
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/sched/sysctl.h>
#include "blk.h"
@@ -24,7 +25,6 @@ static void blk_end_sync_rq(struct request *rq, int error)
struct completion *waiting = rq->end_io_data;
rq->end_io_data = NULL;
- __blk_put_request(rq->q, rq);
/*
* complete last, if this is a stack request the process (and thus
@@ -59,6 +59,12 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
rq->rq_disk = bd_disk;
rq->end_io = done;
+
+ if (q->mq_ops) {
+ blk_mq_insert_request(q, rq, true);
+ return;
+ }
+
/*
* need to check this before __blk_run_queue(), because rq can
* be freed before that returns.
@@ -103,12 +109,6 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
int err = 0;
unsigned long hang_check;
- /*
- * we need an extra reference to the request, so we can look at
- * it after io completion
- */
- rq->ref_count++;
-
if (!rq->sense) {
memset(sense, 0, sizeof(sense));
rq->sense = sense;
diff --git a/block/blk-flush.c b/block/blk-flush.c
index cc2b827a853c..5580b050ce71 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -69,8 +69,10 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/gfp.h>
+#include <linux/blk-mq.h>
#include "blk.h"
+#include "blk-mq.h"
/* FLUSH/FUA sequences */
enum {
@@ -124,6 +126,24 @@ static void blk_flush_restore_request(struct request *rq)
/* make @rq a normal request */
rq->cmd_flags &= ~REQ_FLUSH_SEQ;
rq->end_io = rq->flush.saved_end_io;
+
+ blk_clear_rq_complete(rq);
+}
+
+static void mq_flush_data_run(struct work_struct *work)
+{
+ struct request *rq;
+
+ rq = container_of(work, struct request, mq_flush_data);
+
+ memset(&rq->csd, 0, sizeof(rq->csd));
+ blk_mq_run_request(rq, true, false);
+}
+
+static void blk_mq_flush_data_insert(struct request *rq)
+{
+ INIT_WORK(&rq->mq_flush_data, mq_flush_data_run);
+ kblockd_schedule_work(rq->q, &rq->mq_flush_data);
}
/**
@@ -136,7 +156,7 @@ static void blk_flush_restore_request(struct request *rq)
* completion and trigger the next step.
*
* CONTEXT:
- * spin_lock_irq(q->queue_lock)
+ * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
*
* RETURNS:
* %true if requests were added to the dispatch queue, %false otherwise.
@@ -146,7 +166,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
{
struct request_queue *q = rq->q;
struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
- bool queued = false;
+ bool queued = false, kicked;
BUG_ON(rq->flush.seq & seq);
rq->flush.seq |= seq;
@@ -167,8 +187,12 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
case REQ_FSEQ_DATA:
list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
- list_add(&rq->queuelist, &q->queue_head);
- queued = true;
+ if (q->mq_ops)
+ blk_mq_flush_data_insert(rq);
+ else {
+ list_add(&rq->queuelist, &q->queue_head);
+ queued = true;
+ }
break;
case REQ_FSEQ_DONE:
@@ -181,28 +205,43 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
BUG_ON(!list_empty(&rq->queuelist));
list_del_init(&rq->flush.list);
blk_flush_restore_request(rq);
- __blk_end_request_all(rq, error);
+ if (q->mq_ops)
+ blk_mq_end_io(rq, error);
+ else
+ __blk_end_request_all(rq, error);
break;
default:
BUG();
}
- return blk_kick_flush(q) | queued;
+ kicked = blk_kick_flush(q);
+ /* blk_mq_run_flush will run queue */
+ if (q->mq_ops)
+ return queued;
+ return kicked | queued;
}
static void flush_end_io(struct request *flush_rq, int error)
{
struct request_queue *q = flush_rq->q;
- struct list_head *running = &q->flush_queue[q->flush_running_idx];
+ struct list_head *running;
bool queued = false;
struct request *rq, *n;
+ unsigned long flags = 0;
+ if (q->mq_ops) {
+ blk_mq_free_request(flush_rq);
+ spin_lock_irqsave(&q->mq_flush_lock, flags);
+ }
+ running = &q->flush_queue[q->flush_running_idx];
BUG_ON(q->flush_pending_idx == q->flush_running_idx);
/* account completion of the flush request */
q->flush_running_idx ^= 1;
- elv_completed_request(q, flush_rq);
+
+ if (!q->mq_ops)
+ elv_completed_request(q, flush_rq);
/* and push the waiting requests to the next stage */
list_for_each_entry_safe(rq, n, running, flush.list) {
@@ -223,9 +262,48 @@ static void flush_end_io(struct request *flush_rq, int error)
* directly into request_fn may confuse the driver. Always use
* kblockd.
*/
- if (queued || q->flush_queue_delayed)
- blk_run_queue_async(q);
+ if (queued || q->flush_queue_delayed) {
+ if (!q->mq_ops)
+ blk_run_queue_async(q);
+ else
+ /*
+ * This can be optimized to only run queues with requests
+ * queued if necessary.
+ */
+ blk_mq_run_queues(q, true);
+ }
q->flush_queue_delayed = 0;
+ if (q->mq_ops)
+ spin_unlock_irqrestore(&q->mq_flush_lock, flags);
+}
+
+static void mq_flush_work(struct work_struct *work)
+{
+ struct request_queue *q;
+ struct request *rq;
+
+ q = container_of(work, struct request_queue, mq_flush_work);
+
+ /* We don't need set REQ_FLUSH_SEQ, it's for consistency */
+ rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ,
+ __GFP_WAIT|GFP_ATOMIC, true);
+ rq->cmd_type = REQ_TYPE_FS;
+ rq->end_io = flush_end_io;
+
+ blk_mq_run_request(rq, true, false);
+}
+
+/*
+ * We can't directly use q->flush_rq, because it doesn't have tag and is not in
+ * hctx->rqs[]. so we must allocate a new request, since we can't sleep here,
+ * so offload the work to workqueue.
+ *
+ * Note: we assume a flush request finished in any hardware queue will flush
+ * the whole disk cache.
+ */
+static void mq_run_flush(struct request_queue *q)
+{
+ kblockd_schedule_work(q, &q->mq_flush_work);
}
/**
@@ -236,7 +314,7 @@ static void flush_end_io(struct request *flush_rq, int error)
* Please read the comment at the top of this file for more info.
*
* CONTEXT:
- * spin_lock_irq(q->queue_lock)
+ * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
*
* RETURNS:
* %true if flush was issued, %false otherwise.
@@ -261,13 +339,18 @@ static bool blk_kick_flush(struct request_queue *q)
* Issue flush and toggle pending_idx. This makes pending_idx
* different from running_idx, which means flush is in flight.
*/
+ q->flush_pending_idx ^= 1;
+ if (q->mq_ops) {
+ mq_run_flush(q);
+ return true;
+ }
+
blk_rq_init(q, &q->flush_rq);
q->flush_rq.cmd_type = REQ_TYPE_FS;
q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
q->flush_rq.rq_disk = first_rq->rq_disk;
q->flush_rq.end_io = flush_end_io;
- q->flush_pending_idx ^= 1;
list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
return true;
}
@@ -284,16 +367,37 @@ static void flush_data_end_io(struct request *rq, int error)
blk_run_queue_async(q);
}
+static void mq_flush_data_end_io(struct request *rq, int error)
+{
+ struct request_queue *q = rq->q;
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx;
+ unsigned long flags;
+
+ ctx = rq->mq_ctx;
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+ /*
+ * After populating an empty queue, kick it to avoid stall. Read
+ * the comment in flush_end_io().
+ */
+ spin_lock_irqsave(&q->mq_flush_lock, flags);
+ if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
+ blk_mq_run_hw_queue(hctx, true);
+ spin_unlock_irqrestore(&q->mq_flush_lock, flags);
+}
+
/**
* blk_insert_flush - insert a new FLUSH/FUA request
* @rq: request to insert
*
* To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
+ * or __blk_mq_run_hw_queue() to dispatch request.
* @rq is being submitted. Analyze what needs to be done and put it on the
* right queue.
*
* CONTEXT:
- * spin_lock_irq(q->queue_lock)
+ * spin_lock_irq(q->queue_lock) in !mq case
*/
void blk_insert_flush(struct request *rq)
{
@@ -316,7 +420,10 @@ void blk_insert_flush(struct request *rq)
* complete the request.
*/
if (!policy) {
- __blk_end_bidi_request(rq, 0, 0, 0);
+ if (q->mq_ops)
+ blk_mq_end_io(rq, 0);
+ else
+ __blk_end_bidi_request(rq, 0, 0, 0);
return;
}
@@ -329,7 +436,10 @@ void blk_insert_flush(struct request *rq)
*/
if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
- list_add_tail(&rq->queuelist, &q->queue_head);
+ if (q->mq_ops) {
+ blk_mq_run_request(rq, false, true);
+ } else
+ list_add_tail(&rq->queuelist, &q->queue_head);
return;
}
@@ -341,6 +451,14 @@ void blk_insert_flush(struct request *rq)
INIT_LIST_HEAD(&rq->flush.list);
rq->cmd_flags |= REQ_FLUSH_SEQ;
rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
+ if (q->mq_ops) {
+ rq->end_io = mq_flush_data_end_io;
+
+ spin_lock_irq(&q->mq_flush_lock);
+ blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
+ spin_unlock_irq(&q->mq_flush_lock);
+ return;
+ }
rq->end_io = flush_data_end_io;
blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
@@ -444,7 +562,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
* copied from blk_rq_pos(rq).
*/
if (error_sector)
- *error_sector = bio->bi_sector;
+ *error_sector = bio->bi_iter.bi_sector;
if (!bio_flagged(bio, BIO_UPTODATE))
ret = -EIO;
@@ -453,3 +571,9 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
return ret;
}
EXPORT_SYMBOL(blkdev_issue_flush);
+
+void blk_mq_init_flush(struct request_queue *q)
+{
+ spin_lock_init(&q->mq_flush_lock);
+ INIT_WORK(&q->mq_flush_work, mq_flush_work);
+}
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 03cf7179e8ef..7fbab84399e6 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -43,30 +43,32 @@ static const char *bi_unsupported_name = "unsupported";
*/
int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
{
- struct bio_vec *iv, *ivprv = NULL;
+ struct bio_vec iv, ivprv = { NULL };
unsigned int segments = 0;
unsigned int seg_size = 0;
- unsigned int i = 0;
+ struct bvec_iter iter;
+ int prev = 0;
- bio_for_each_integrity_vec(iv, bio, i) {
+ bio_for_each_integrity_vec(iv, bio, iter) {
- if (ivprv) {
- if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
+ if (prev) {
+ if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
+ if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
goto new_segment;
- if (seg_size + iv->bv_len > queue_max_segment_size(q))
+ if (seg_size + iv.bv_len > queue_max_segment_size(q))
goto new_segment;
- seg_size += iv->bv_len;
+ seg_size += iv.bv_len;
} else {
new_segment:
segments++;
- seg_size = iv->bv_len;
+ seg_size = iv.bv_len;
}
+ prev = 1;
ivprv = iv;
}
@@ -87,24 +89,25 @@ EXPORT_SYMBOL(blk_rq_count_integrity_sg);
int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
struct scatterlist *sglist)
{
- struct bio_vec *iv, *ivprv = NULL;
+ struct bio_vec iv, ivprv = { NULL };
struct scatterlist *sg = NULL;
unsigned int segments = 0;
- unsigned int i = 0;
+ struct bvec_iter iter;
+ int prev = 0;
- bio_for_each_integrity_vec(iv, bio, i) {
+ bio_for_each_integrity_vec(iv, bio, iter) {
- if (ivprv) {
- if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
+ if (prev) {
+ if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
+ if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
goto new_segment;
- if (sg->length + iv->bv_len > queue_max_segment_size(q))
+ if (sg->length + iv.bv_len > queue_max_segment_size(q))
goto new_segment;
- sg->length += iv->bv_len;
+ sg->length += iv.bv_len;
} else {
new_segment:
if (!sg)
@@ -114,10 +117,11 @@ new_segment:
sg = sg_next(sg);
}
- sg_set_page(sg, iv->bv_page, iv->bv_len, iv->bv_offset);
+ sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset);
segments++;
}
+ prev = 1;
ivprv = iv;
}
diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
index 4b8d9b541112..1855bf51edb0 100644
--- a/block/blk-iopoll.c
+++ b/block/blk-iopoll.c
@@ -35,7 +35,7 @@ void blk_iopoll_sched(struct blk_iopoll *iop)
unsigned long flags;
local_irq_save(flags);
- list_add_tail(&iop->list, &__get_cpu_var(blk_cpu_iopoll));
+ list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
local_irq_restore(flags);
}
@@ -79,7 +79,7 @@ EXPORT_SYMBOL(blk_iopoll_complete);
static void blk_iopoll_softirq(struct softirq_action *h)
{
- struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
+ struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
int rearm = 0, budget = blk_iopoll_budget;
unsigned long start_time = jiffies;
@@ -201,7 +201,7 @@ static int blk_iopoll_cpu_notify(struct notifier_block *self,
local_irq_disable();
list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
- &__get_cpu_var(blk_cpu_iopoll));
+ this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
local_irq_enable();
}
diff --git a/block/blk-lib.c b/block/blk-lib.c
index d6f50d572565..2da76c999ef3 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -43,8 +43,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q = bdev_get_queue(bdev);
int type = REQ_WRITE | REQ_DISCARD;
- sector_t max_discard_sectors;
- sector_t granularity, alignment;
+ unsigned int max_discard_sectors, granularity;
+ int alignment;
struct bio_batch bb;
struct bio *bio;
int ret = 0;
@@ -58,16 +58,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
- alignment = bdev_discard_alignment(bdev) >> 9;
- alignment = sector_div(alignment, granularity);
+ alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
/*
* Ensure that max_discard_sectors is of the proper
* granularity, so that requests stay aligned after a split.
*/
max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
- sector_div(max_discard_sectors, granularity);
- max_discard_sectors *= granularity;
+ max_discard_sectors -= max_discard_sectors % granularity;
if (unlikely(!max_discard_sectors)) {
/* Avoid infinite loop below. Being cautious never hurts. */
return -EOPNOTSUPP;
@@ -110,12 +108,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
req_sects = end_sect - sector;
}
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_batch_end_io;
bio->bi_bdev = bdev;
bio->bi_private = &bb;
- bio->bi_size = req_sects << 9;
+ bio->bi_iter.bi_size = req_sects << 9;
nr_sects -= req_sects;
sector = end_sect;
@@ -176,7 +174,7 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
break;
}
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_batch_end_io;
bio->bi_bdev = bdev;
bio->bi_private = &bb;
@@ -186,11 +184,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
if (nr_sects > max_write_same_sectors) {
- bio->bi_size = max_write_same_sectors << 9;
+ bio->bi_iter.bi_size = max_write_same_sectors << 9;
nr_sects -= max_write_same_sectors;
sector += max_write_same_sectors;
} else {
- bio->bi_size = nr_sects << 9;
+ bio->bi_iter.bi_size = nr_sects << 9;
nr_sects = 0;
}
@@ -242,7 +240,7 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
break;
}
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev;
bio->bi_end_io = bio_batch_end_io;
bio->bi_private = &bb;
diff --git a/block/blk-map.c b/block/blk-map.c
index 623e1cd4cffe..ae4ae1047fd9 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -20,7 +20,7 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
rq->biotail->bi_next = bio;
rq->biotail = bio;
- rq->__data_len += bio->bi_size;
+ rq->__data_len += bio->bi_iter.bi_size;
}
return 0;
}
@@ -76,7 +76,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
ret = blk_rq_append_bio(q, rq, bio);
if (!ret)
- return bio->bi_size;
+ return bio->bi_iter.bi_size;
/* if it was boucned we must call the end io function */
bio_endio(bio, 0);
@@ -220,7 +220,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
if (IS_ERR(bio))
return PTR_ERR(bio);
- if (bio->bi_size != len) {
+ if (bio->bi_iter.bi_size != len) {
/*
* Grab an extra reference to this bio, as bio_unmap_user()
* expects to be able to drop it twice as it happens on the
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 5f2448253797..953b8dfbdc4b 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -12,10 +12,11 @@
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
struct bio *bio)
{
- struct bio_vec *bv, *bvprv = NULL;
- int cluster, i, high, highprv = 1;
+ struct bio_vec bv, bvprv = { NULL };
+ int cluster, high, highprv = 1;
unsigned int seg_size, nr_phys_segs;
struct bio *fbio, *bbio;
+ struct bvec_iter iter;
if (!bio)
return 0;
@@ -25,25 +26,23 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
seg_size = 0;
nr_phys_segs = 0;
for_each_bio(bio) {
- bio_for_each_segment(bv, bio, i) {
+ bio_for_each_segment(bv, bio, iter) {
/*
* the trick here is making sure that a high page is
* never considered part of another segment, since that
* might change with the bounce page.
*/
- high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
- if (high || highprv)
- goto new_segment;
- if (cluster) {
- if (seg_size + bv->bv_len
+ high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
+ if (!high && !highprv && cluster) {
+ if (seg_size + bv.bv_len
> queue_max_segment_size(q))
goto new_segment;
- if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
+ if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+ if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
goto new_segment;
- seg_size += bv->bv_len;
+ seg_size += bv.bv_len;
bvprv = bv;
continue;
}
@@ -54,7 +53,7 @@ new_segment:
nr_phys_segs++;
bvprv = bv;
- seg_size = bv->bv_len;
+ seg_size = bv.bv_len;
highprv = high;
}
bbio = bio;
@@ -87,6 +86,9 @@ EXPORT_SYMBOL(blk_recount_segments);
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
{
+ struct bio_vec end_bv, nxt_bv;
+ struct bvec_iter iter;
+
if (!blk_queue_cluster(q))
return 0;
@@ -97,34 +99,40 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
if (!bio_has_data(bio))
return 1;
- if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+ bio_for_each_segment(end_bv, bio, iter)
+ if (end_bv.bv_len == iter.bi_size)
+ break;
+
+ nxt_bv = bio_iovec(nxt);
+
+ if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
return 0;
/*
* bio and nxt are contiguous in memory; check if the queue allows
* these two to be merged into one
*/
- if (BIO_SEG_BOUNDARY(q, bio, nxt))
+ if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
return 1;
return 0;
}
-static void
+static inline void
__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
- struct scatterlist *sglist, struct bio_vec **bvprv,
+ struct scatterlist *sglist, struct bio_vec *bvprv,
struct scatterlist **sg, int *nsegs, int *cluster)
{
int nbytes = bvec->bv_len;
- if (*bvprv && *cluster) {
+ if (*sg && *cluster) {
if ((*sg)->length + nbytes > queue_max_segment_size(q))
goto new_segment;
- if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
+ if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
+ if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
goto new_segment;
(*sg)->length += nbytes;
@@ -150,7 +158,7 @@ new_segment:
sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
(*nsegs)++;
}
- *bvprv = bvec;
+ *bvprv = *bvec;
}
/*
@@ -160,7 +168,7 @@ new_segment:
int blk_rq_map_sg(struct request_queue *q, struct request *rq,
struct scatterlist *sglist)
{
- struct bio_vec *bvec, *bvprv;
+ struct bio_vec bvec, bvprv;
struct req_iterator iter;
struct scatterlist *sg;
int nsegs, cluster;
@@ -171,10 +179,9 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
/*
* for each bio in rq
*/
- bvprv = NULL;
sg = NULL;
rq_for_each_segment(bvec, rq, iter) {
- __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
+ __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
&nsegs, &cluster);
} /* segments in rq */
@@ -223,18 +230,17 @@ EXPORT_SYMBOL(blk_rq_map_sg);
int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
struct scatterlist *sglist)
{
- struct bio_vec *bvec, *bvprv;
+ struct bio_vec bvec, bvprv;
struct scatterlist *sg;
int nsegs, cluster;
- unsigned long i;
+ struct bvec_iter iter;
nsegs = 0;
cluster = blk_queue_cluster(q);
- bvprv = NULL;
sg = NULL;
- bio_for_each_segment(bvec, bio, i) {
- __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
+ bio_for_each_segment(bvec, bio, iter) {
+ __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
&nsegs, &cluster);
} /* segments in bio */
@@ -532,9 +538,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
int blk_try_merge(struct request *rq, struct bio *bio)
{
- if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
+ if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
return ELEVATOR_BACK_MERGE;
- else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
+ else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
return ELEVATOR_FRONT_MERGE;
return ELEVATOR_NO_MERGE;
}
diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
new file mode 100644
index 000000000000..f8ea39d7ae54
--- /dev/null
+++ b/block/blk-mq-cpu.c
@@ -0,0 +1,93 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/list.h>
+#include <linux/llist.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+
+#include <linux/blk-mq.h>
+#include "blk-mq.h"
+
+static LIST_HEAD(blk_mq_cpu_notify_list);
+static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
+
+static int __cpuinit blk_mq_main_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long) hcpu;
+ struct blk_mq_cpu_notifier *notify;
+
+ spin_lock(&blk_mq_cpu_notify_lock);
+
+ list_for_each_entry(notify, &blk_mq_cpu_notify_list, list)
+ notify->notify(notify->data, action, cpu);
+
+ spin_unlock(&blk_mq_cpu_notify_lock);
+ return NOTIFY_OK;
+}
+
+static void __cpuinit blk_mq_cpu_notify(void *data, unsigned long action,
+ unsigned int cpu)
+{
+ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
+ /*
+ * If the CPU goes away, ensure that we run any pending
+ * completions.
+ */
+ struct llist_node *node;
+ struct request *rq;
+
+ local_irq_disable();
+
+ node = llist_del_all(&per_cpu(ipi_lists, cpu));
+ while (node) {
+ struct llist_node *next = node->next;
+
+ rq = llist_entry(node, struct request, ll_list);
+ __blk_mq_end_io(rq, rq->errors);
+ node = next;
+ }
+
+ local_irq_enable();
+ }
+}
+
+static struct notifier_block __cpuinitdata blk_mq_main_cpu_notifier = {
+ .notifier_call = blk_mq_main_cpu_notify,
+};
+
+void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
+{
+ BUG_ON(!notifier->notify);
+
+ spin_lock(&blk_mq_cpu_notify_lock);
+ list_add_tail(&notifier->list, &blk_mq_cpu_notify_list);
+ spin_unlock(&blk_mq_cpu_notify_lock);
+}
+
+void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
+{
+ spin_lock(&blk_mq_cpu_notify_lock);
+ list_del(&notifier->list);
+ spin_unlock(&blk_mq_cpu_notify_lock);
+}
+
+void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
+ void (*fn)(void *, unsigned long, unsigned int),
+ void *data)
+{
+ notifier->notify = fn;
+ notifier->data = data;
+}
+
+static struct blk_mq_cpu_notifier __cpuinitdata cpu_notifier = {
+ .notify = blk_mq_cpu_notify,
+};
+
+void __init blk_mq_cpu_init(void)
+{
+ register_hotcpu_notifier(&blk_mq_main_cpu_notifier);
+ blk_mq_register_cpu_notifier(&cpu_notifier);
+}
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
new file mode 100644
index 000000000000..f8721278601c
--- /dev/null
+++ b/block/blk-mq-cpumap.c
@@ -0,0 +1,108 @@
+#include <linux/kernel.h>
+#include <linux/threads.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+
+#include <linux/blk-mq.h>
+#include "blk.h"
+#include "blk-mq.h"
+
+static void show_map(unsigned int *map, unsigned int nr)
+{
+ int i;
+
+ pr_info("blk-mq: CPU -> queue map\n");
+ for_each_online_cpu(i)
+ pr_info(" CPU%2u -> Queue %u\n", i, map[i]);
+}
+
+static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
+ const int cpu)
+{
+ return cpu / ((nr_cpus + nr_queues - 1) / nr_queues);
+}
+
+static int get_first_sibling(unsigned int cpu)
+{
+ unsigned int ret;
+
+ ret = cpumask_first(topology_thread_cpumask(cpu));
+ if (ret < nr_cpu_ids)
+ return ret;
+
+ return cpu;
+}
+
+int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
+{
+ unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
+ cpumask_var_t cpus;
+
+ if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
+ return 1;
+
+ cpumask_clear(cpus);
+ nr_cpus = nr_uniq_cpus = 0;
+ for_each_online_cpu(i) {
+ nr_cpus++;
+ first_sibling = get_first_sibling(i);
+ if (!cpumask_test_cpu(first_sibling, cpus))
+ nr_uniq_cpus++;
+ cpumask_set_cpu(i, cpus);
+ }
+
+ queue = 0;
+ for_each_possible_cpu(i) {
+ if (!cpu_online(i)) {
+ map[i] = 0;
+ continue;
+ }
+
+ /*
+ * Easy case - we have equal or more hardware queues. Or
+ * there are no thread siblings to take into account. Do
+ * 1:1 if enough, or sequential mapping if less.
+ */
+ if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) {
+ map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue);
+ queue++;
+ continue;
+ }
+
+ /*
+ * Less then nr_cpus queues, and we have some number of
+ * threads per cores. Map sibling threads to the same
+ * queue.
+ */
+ first_sibling = get_first_sibling(i);
+ if (first_sibling == i) {
+ map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues,
+ queue);
+ queue++;
+ } else
+ map[i] = map[first_sibling];
+ }
+
+ show_map(map, nr_cpus);
+ free_cpumask_var(cpus);
+ return 0;
+}
+
+unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg)
+{
+ unsigned int *map;
+
+ /* If cpus are offline, map them to first hctx */
+ map = kzalloc_node(sizeof(*map) * num_possible_cpus(), GFP_KERNEL,
+ reg->numa_node);
+ if (!map)
+ return NULL;
+
+ if (!blk_mq_update_queue_map(map, reg->nr_hw_queues))
+ return map;
+
+ kfree(map);
+ return NULL;
+}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
new file mode 100644
index 000000000000..ba6cf8e9aa0a
--- /dev/null
+++ b/block/blk-mq-sysfs.c
@@ -0,0 +1,384 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/backing-dev.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/smp.h>
+
+#include <linux/blk-mq.h>
+#include "blk-mq.h"
+#include "blk-mq-tag.h"
+
+static void blk_mq_sysfs_release(struct kobject *kobj)
+{
+}
+
+struct blk_mq_ctx_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct blk_mq_ctx *, char *);
+ ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
+};
+
+struct blk_mq_hw_ctx_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
+ ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
+};
+
+static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
+ char *page)
+{
+ struct blk_mq_ctx_sysfs_entry *entry;
+ struct blk_mq_ctx *ctx;
+ struct request_queue *q;
+ ssize_t res;
+
+ entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
+ ctx = container_of(kobj, struct blk_mq_ctx, kobj);
+ q = ctx->queue;
+
+ if (!entry->show)
+ return -EIO;
+
+ res = -ENOENT;
+ mutex_lock(&q->sysfs_lock);
+ if (!blk_queue_dying(q))
+ res = entry->show(ctx, page);
+ mutex_unlock(&q->sysfs_lock);
+ return res;
+}
+
+static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
+ const char *page, size_t length)
+{
+ struct blk_mq_ctx_sysfs_entry *entry;
+ struct blk_mq_ctx *ctx;
+ struct request_queue *q;
+ ssize_t res;
+
+ entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
+ ctx = container_of(kobj, struct blk_mq_ctx, kobj);
+ q = ctx->queue;
+
+ if (!entry->store)
+ return -EIO;
+
+ res = -ENOENT;
+ mutex_lock(&q->sysfs_lock);
+ if (!blk_queue_dying(q))
+ res = entry->store(ctx, page, length);
+ mutex_unlock(&q->sysfs_lock);
+ return res;
+}
+
+static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
+ struct attribute *attr, char *page)
+{
+ struct blk_mq_hw_ctx_sysfs_entry *entry;
+ struct blk_mq_hw_ctx *hctx;
+ struct request_queue *q;
+ ssize_t res;
+
+ entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
+ hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
+ q = hctx->queue;
+
+ if (!entry->show)
+ return -EIO;
+
+ res = -ENOENT;
+ mutex_lock(&q->sysfs_lock);
+ if (!blk_queue_dying(q))
+ res = entry->show(hctx, page);
+ mutex_unlock(&q->sysfs_lock);
+ return res;
+}
+
+static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
+ struct attribute *attr, const char *page,
+ size_t length)
+{
+ struct blk_mq_hw_ctx_sysfs_entry *entry;
+ struct blk_mq_hw_ctx *hctx;
+ struct request_queue *q;
+ ssize_t res;
+
+ entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
+ hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
+ q = hctx->queue;
+
+ if (!entry->store)
+ return -EIO;
+
+ res = -ENOENT;
+ mutex_lock(&q->sysfs_lock);
+ if (!blk_queue_dying(q))
+ res = entry->store(hctx, page, length);
+ mutex_unlock(&q->sysfs_lock);
+ return res;
+}
+
+static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
+{
+ return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
+ ctx->rq_dispatched[0]);
+}
+
+static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
+{
+ return sprintf(page, "%lu\n", ctx->rq_merged);
+}
+
+static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
+{
+ return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
+ ctx->rq_completed[0]);
+}
+
+static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
+{
+ char *start_page = page;
+ struct request *rq;
+
+ page += sprintf(page, "%s:\n", msg);
+
+ list_for_each_entry(rq, list, queuelist)
+ page += sprintf(page, "\t%p\n", rq);
+
+ return page - start_page;
+}
+
+static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
+{
+ ssize_t ret;
+
+ spin_lock(&ctx->lock);
+ ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
+ spin_unlock(&ctx->lock);
+
+ return ret;
+}
+
+static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
+ char *page)
+{
+ return sprintf(page, "%lu\n", hctx->queued);
+}
+
+static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
+{
+ return sprintf(page, "%lu\n", hctx->run);
+}
+
+static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
+ char *page)
+{
+ char *start_page = page;
+ int i;
+
+ page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
+
+ for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
+ unsigned long d = 1U << (i - 1);
+
+ page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
+ }
+
+ return page - start_page;
+}
+
+static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
+ char *page)
+{
+ ssize_t ret;
+
+ spin_lock(&hctx->lock);
+ ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
+ spin_unlock(&hctx->lock);
+
+ return ret;
+}
+
+static ssize_t blk_mq_hw_sysfs_ipi_show(struct blk_mq_hw_ctx *hctx, char *page)
+{
+ ssize_t ret;
+
+ spin_lock(&hctx->lock);
+ ret = sprintf(page, "%u\n", !!(hctx->flags & BLK_MQ_F_SHOULD_IPI));
+ spin_unlock(&hctx->lock);
+
+ return ret;
+}
+
+static ssize_t blk_mq_hw_sysfs_ipi_store(struct blk_mq_hw_ctx *hctx,
+ const char *page, size_t len)
+{
+ struct blk_mq_ctx *ctx;
+ unsigned long ret;
+ unsigned int i;
+
+ if (kstrtoul(page, 10, &ret)) {
+ pr_err("blk-mq-sysfs: invalid input '%s'\n", page);
+ return -EINVAL;
+ }
+
+ spin_lock(&hctx->lock);
+ if (ret)
+ hctx->flags |= BLK_MQ_F_SHOULD_IPI;
+ else
+ hctx->flags &= ~BLK_MQ_F_SHOULD_IPI;
+ spin_unlock(&hctx->lock);
+
+ hctx_for_each_ctx(hctx, ctx, i)
+ ctx->ipi_redirect = !!ret;
+
+ return len;
+}
+
+static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
+{
+ return blk_mq_tag_sysfs_show(hctx->tags, page);
+}
+
+static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
+ .attr = {.name = "dispatched", .mode = S_IRUGO },
+ .show = blk_mq_sysfs_dispatched_show,
+};
+static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
+ .attr = {.name = "merged", .mode = S_IRUGO },
+ .show = blk_mq_sysfs_merged_show,
+};
+static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
+ .attr = {.name = "completed", .mode = S_IRUGO },
+ .show = blk_mq_sysfs_completed_show,
+};
+static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
+ .attr = {.name = "rq_list", .mode = S_IRUGO },
+ .show = blk_mq_sysfs_rq_list_show,
+};
+
+static struct attribute *default_ctx_attrs[] = {
+ &blk_mq_sysfs_dispatched.attr,
+ &blk_mq_sysfs_merged.attr,
+ &blk_mq_sysfs_completed.attr,
+ &blk_mq_sysfs_rq_list.attr,
+ NULL,
+};
+
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
+ .attr = {.name = "queued", .mode = S_IRUGO },
+ .show = blk_mq_hw_sysfs_queued_show,
+};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
+ .attr = {.name = "run", .mode = S_IRUGO },
+ .show = blk_mq_hw_sysfs_run_show,
+};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
+ .attr = {.name = "dispatched", .mode = S_IRUGO },
+ .show = blk_mq_hw_sysfs_dispatched_show,
+};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
+ .attr = {.name = "pending", .mode = S_IRUGO },
+ .show = blk_mq_hw_sysfs_rq_list_show,
+};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_ipi = {
+ .attr = {.name = "ipi_redirect", .mode = S_IRUGO | S_IWUSR},
+ .show = blk_mq_hw_sysfs_ipi_show,
+ .store = blk_mq_hw_sysfs_ipi_store,
+};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
+ .attr = {.name = "tags", .mode = S_IRUGO },
+ .show = blk_mq_hw_sysfs_tags_show,
+};
+
+static struct attribute *default_hw_ctx_attrs[] = {
+ &blk_mq_hw_sysfs_queued.attr,
+ &blk_mq_hw_sysfs_run.attr,
+ &blk_mq_hw_sysfs_dispatched.attr,
+ &blk_mq_hw_sysfs_pending.attr,
+ &blk_mq_hw_sysfs_ipi.attr,
+ &blk_mq_hw_sysfs_tags.attr,
+ NULL,
+};
+
+static const struct sysfs_ops blk_mq_sysfs_ops = {
+ .show = blk_mq_sysfs_show,
+ .store = blk_mq_sysfs_store,
+};
+
+static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
+ .show = blk_mq_hw_sysfs_show,
+ .store = blk_mq_hw_sysfs_store,
+};
+
+static struct kobj_type blk_mq_ktype = {
+ .sysfs_ops = &blk_mq_sysfs_ops,
+ .release = blk_mq_sysfs_release,
+};
+
+static struct kobj_type blk_mq_ctx_ktype = {
+ .sysfs_ops = &blk_mq_sysfs_ops,
+ .default_attrs = default_ctx_attrs,
+ .release = blk_mq_sysfs_release,
+};
+
+static struct kobj_type blk_mq_hw_ktype = {
+ .sysfs_ops = &blk_mq_hw_sysfs_ops,
+ .default_attrs = default_hw_ctx_attrs,
+ .release = blk_mq_sysfs_release,
+};
+
+void blk_mq_unregister_disk(struct gendisk *disk)
+{
+ struct request_queue *q = disk->queue;
+
+ kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
+ kobject_del(&q->mq_kobj);
+
+ kobject_put(&disk_to_dev(disk)->kobj);
+}
+
+int blk_mq_register_disk(struct gendisk *disk)
+{
+ struct device *dev = disk_to_dev(disk);
+ struct request_queue *q = disk->queue;
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx;
+ int ret, i, j;
+
+ kobject_init(&q->mq_kobj, &blk_mq_ktype);
+
+ ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
+ if (ret < 0)
+ return ret;
+
+ kobject_uevent(&q->mq_kobj, KOBJ_ADD);
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
+ ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", i);
+ if (ret)
+ break;
+
+ if (!hctx->nr_ctx)
+ continue;
+
+ hctx_for_each_ctx(hctx, ctx, j) {
+ kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
+ ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
+ if (ret)
+ break;
+ }
+ }
+
+ if (ret) {
+ blk_mq_unregister_disk(disk);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
new file mode 100644
index 000000000000..d64a02fb1f73
--- /dev/null
+++ b/block/blk-mq-tag.c
@@ -0,0 +1,204 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/percpu_ida.h>
+
+#include <linux/blk-mq.h>
+#include "blk.h"
+#include "blk-mq.h"
+#include "blk-mq-tag.h"
+
+/*
+ * Per tagged queue (tag address space) map
+ */
+struct blk_mq_tags {
+ unsigned int nr_tags;
+ unsigned int nr_reserved_tags;
+ unsigned int nr_batch_move;
+ unsigned int nr_max_cache;
+
+ struct percpu_ida free_tags;
+ struct percpu_ida reserved_tags;
+};
+
+void blk_mq_wait_for_tags(struct blk_mq_tags *tags)
+{
+ int tag = blk_mq_get_tag(tags, __GFP_WAIT, false);
+ blk_mq_put_tag(tags, tag);
+}
+
+bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
+{
+ return !tags ||
+ percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids) != 0;
+}
+
+static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp)
+{
+ int tag;
+
+ tag = percpu_ida_alloc(&tags->free_tags, gfp);
+ if (tag < 0)
+ return BLK_MQ_TAG_FAIL;
+ return tag + tags->nr_reserved_tags;
+}
+
+static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags,
+ gfp_t gfp)
+{
+ int tag;
+
+ if (unlikely(!tags->nr_reserved_tags)) {
+ WARN_ON_ONCE(1);
+ return BLK_MQ_TAG_FAIL;
+ }
+
+ tag = percpu_ida_alloc(&tags->reserved_tags, gfp);
+ if (tag < 0)
+ return BLK_MQ_TAG_FAIL;
+ return tag;
+}
+
+unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved)
+{
+ if (!reserved)
+ return __blk_mq_get_tag(tags, gfp);
+
+ return __blk_mq_get_reserved_tag(tags, gfp);
+}
+
+static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
+{
+ BUG_ON(tag >= tags->nr_tags);
+
+ percpu_ida_free(&tags->free_tags, tag - tags->nr_reserved_tags);
+}
+
+static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
+ unsigned int tag)
+{
+ BUG_ON(tag >= tags->nr_reserved_tags);
+
+ percpu_ida_free(&tags->reserved_tags, tag);
+}
+
+void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
+{
+ if (tag >= tags->nr_reserved_tags)
+ __blk_mq_put_tag(tags, tag);
+ else
+ __blk_mq_put_reserved_tag(tags, tag);
+}
+
+static int __blk_mq_tag_iter(unsigned id, void *data)
+{
+ unsigned long *tag_map = data;
+ __set_bit(id, tag_map);
+ return 0;
+}
+
+void blk_mq_tag_busy_iter(struct blk_mq_tags *tags,
+ void (*fn)(void *, unsigned long *), void *data)
+{
+ unsigned long *tag_map;
+ size_t map_size;
+
+ map_size = ALIGN(tags->nr_tags, BITS_PER_LONG) / BITS_PER_LONG;
+ tag_map = kzalloc(map_size * sizeof(unsigned long), GFP_ATOMIC);
+ if (!tag_map)
+ return;
+
+ percpu_ida_for_each_free(&tags->free_tags, __blk_mq_tag_iter, tag_map);
+ if (tags->nr_reserved_tags)
+ percpu_ida_for_each_free(&tags->reserved_tags, __blk_mq_tag_iter,
+ tag_map);
+
+ fn(data, tag_map);
+ kfree(tag_map);
+}
+
+struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
+ unsigned int reserved_tags, int node)
+{
+ unsigned int nr_tags, nr_cache;
+ struct blk_mq_tags *tags;
+ int ret;
+
+ if (total_tags > BLK_MQ_TAG_MAX) {
+ pr_err("blk-mq: tag depth too large\n");
+ return NULL;
+ }
+
+ tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
+ if (!tags)
+ return NULL;
+
+ nr_tags = total_tags - reserved_tags;
+ nr_cache = nr_tags / num_possible_cpus();
+
+ if (nr_cache < BLK_MQ_TAG_CACHE_MIN)
+ nr_cache = BLK_MQ_TAG_CACHE_MIN;
+ else if (nr_cache > BLK_MQ_TAG_CACHE_MAX)
+ nr_cache = BLK_MQ_TAG_CACHE_MAX;
+
+ tags->nr_tags = total_tags;
+ tags->nr_reserved_tags = reserved_tags;
+ tags->nr_max_cache = nr_cache;
+ tags->nr_batch_move = max(1u, nr_cache / 2);
+
+ ret = __percpu_ida_init(&tags->free_tags, tags->nr_tags -
+ tags->nr_reserved_tags,
+ tags->nr_max_cache,
+ tags->nr_batch_move);
+ if (ret)
+ goto err_free_tags;
+
+ if (reserved_tags) {
+ /*
+ * With max_cahe and batch set to 1, the allocator fallbacks to
+ * no cached. It's fine reserved tags allocation is slow.
+ */
+ ret = __percpu_ida_init(&tags->reserved_tags, reserved_tags,
+ 1, 1);
+ if (ret)
+ goto err_reserved_tags;
+ }
+
+ return tags;
+
+err_reserved_tags:
+ percpu_ida_destroy(&tags->free_tags);
+err_free_tags:
+ kfree(tags);
+ return NULL;
+}
+
+void blk_mq_free_tags(struct blk_mq_tags *tags)
+{
+ percpu_ida_destroy(&tags->free_tags);
+ percpu_ida_destroy(&tags->reserved_tags);
+ kfree(tags);
+}
+
+ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
+{
+ char *orig_page = page;
+ int cpu;
+
+ if (!tags)
+ return 0;
+
+ page += sprintf(page, "nr_tags=%u, reserved_tags=%u, batch_move=%u,"
+ " max_cache=%u\n", tags->nr_tags, tags->nr_reserved_tags,
+ tags->nr_batch_move, tags->nr_max_cache);
+
+ page += sprintf(page, "nr_free=%u, nr_reserved=%u\n",
+ percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids),
+ percpu_ida_free_tags(&tags->reserved_tags, nr_cpu_ids));
+
+ for_each_possible_cpu(cpu) {
+ page += sprintf(page, " cpu%02u: nr_free=%u\n", cpu,
+ percpu_ida_free_tags(&tags->free_tags, cpu));
+ }
+
+ return page - orig_page;
+}
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
new file mode 100644
index 000000000000..947ba2c6148e
--- /dev/null
+++ b/block/blk-mq-tag.h
@@ -0,0 +1,27 @@
+#ifndef INT_BLK_MQ_TAG_H
+#define INT_BLK_MQ_TAG_H
+
+struct blk_mq_tags;
+
+extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node);
+extern void blk_mq_free_tags(struct blk_mq_tags *tags);
+
+extern unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved);
+extern void blk_mq_wait_for_tags(struct blk_mq_tags *tags);
+extern void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag);
+extern void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data);
+extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
+extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
+
+enum {
+ BLK_MQ_TAG_CACHE_MIN = 1,
+ BLK_MQ_TAG_CACHE_MAX = 64,
+};
+
+enum {
+ BLK_MQ_TAG_FAIL = -1U,
+ BLK_MQ_TAG_MIN = BLK_MQ_TAG_CACHE_MIN,
+ BLK_MQ_TAG_MAX = BLK_MQ_TAG_FAIL - 1,
+};
+
+#endif
diff --git a/block/blk-mq.c b/block/blk-mq.c
new file mode 100644
index 000000000000..5a94b2736a69
--- /dev/null
+++ b/block/blk-mq.c
@@ -0,0 +1,1500 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/backing-dev.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/smp.h>
+#include <linux/llist.h>
+#include <linux/list_sort.h>
+#include <linux/cpu.h>
+#include <linux/cache.h>
+#include <linux/sched/sysctl.h>
+#include <linux/delay.h>
+
+#include <trace/events/block.h>
+
+#include <linux/blk-mq.h>
+#include "blk.h"
+#include "blk-mq.h"
+#include "blk-mq-tag.h"
+
+static DEFINE_MUTEX(all_q_mutex);
+static LIST_HEAD(all_q_list);
+
+static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
+
+DEFINE_PER_CPU(struct llist_head, ipi_lists);
+
+static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
+ unsigned int cpu)
+{
+ return per_cpu_ptr(q->queue_ctx, cpu);
+}
+
+/*
+ * This assumes per-cpu software queueing queues. They could be per-node
+ * as well, for instance. For now this is hardcoded as-is. Note that we don't
+ * care about preemption, since we know the ctx's are persistent. This does
+ * mean that we can't rely on ctx always matching the currently running CPU.
+ */
+static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
+{
+ return __blk_mq_get_ctx(q, get_cpu());
+}
+
+static void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
+{
+ put_cpu();
+}
+
+/*
+ * Check if any of the ctx's have pending work in this hardware queue
+ */
+static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
+{
+ unsigned int i;
+
+ for (i = 0; i < hctx->nr_ctx_map; i++)
+ if (hctx->ctx_map[i])
+ return true;
+
+ return false;
+}
+
+/*
+ * Mark this ctx as having pending work in this hardware queue
+ */
+static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx)
+{
+ if (!test_bit(ctx->index_hw, hctx->ctx_map))
+ set_bit(ctx->index_hw, hctx->ctx_map);
+}
+
+static struct request *blk_mq_alloc_rq(struct blk_mq_hw_ctx *hctx, gfp_t gfp,
+ bool reserved)
+{
+ struct request *rq;
+ unsigned int tag;
+
+ tag = blk_mq_get_tag(hctx->tags, gfp, reserved);
+ if (tag != BLK_MQ_TAG_FAIL) {
+ rq = hctx->rqs[tag];
+ rq->tag = tag;
+
+ return rq;
+ }
+
+ return NULL;
+}
+
+static int blk_mq_queue_enter(struct request_queue *q)
+{
+ int ret;
+
+ __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
+ smp_wmb();
+ /* we have problems to freeze the queue if it's initializing */
+ if (!blk_queue_bypass(q) || !blk_queue_init_done(q))
+ return 0;
+
+ __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
+
+ spin_lock_irq(q->queue_lock);
+ ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
+ !blk_queue_bypass(q), *q->queue_lock);
+ /* inc usage with lock hold to avoid freeze_queue runs here */
+ if (!ret)
+ __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
+ spin_unlock_irq(q->queue_lock);
+
+ return ret;
+}
+
+static void blk_mq_queue_exit(struct request_queue *q)
+{
+ __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
+}
+
+/*
+ * Guarantee no request is in use, so we can change any data structure of
+ * the queue afterward.
+ */
+static void blk_mq_freeze_queue(struct request_queue *q)
+{
+ bool drain;
+
+ spin_lock_irq(q->queue_lock);
+ drain = !q->bypass_depth++;
+ queue_flag_set(QUEUE_FLAG_BYPASS, q);
+ spin_unlock_irq(q->queue_lock);
+
+ if (!drain)
+ return;
+
+ while (true) {
+ s64 count;
+
+ spin_lock_irq(q->queue_lock);
+ count = percpu_counter_sum(&q->mq_usage_counter);
+ spin_unlock_irq(q->queue_lock);
+
+ if (count == 0)
+ break;
+ blk_mq_run_queues(q, false);
+ msleep(10);
+ }
+}
+
+static void blk_mq_unfreeze_queue(struct request_queue *q)
+{
+ bool wake = false;
+
+ spin_lock_irq(q->queue_lock);
+ if (!--q->bypass_depth) {
+ queue_flag_clear(QUEUE_FLAG_BYPASS, q);
+ wake = true;
+ }
+ WARN_ON_ONCE(q->bypass_depth < 0);
+ spin_unlock_irq(q->queue_lock);
+ if (wake)
+ wake_up_all(&q->mq_freeze_wq);
+}
+
+bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
+{
+ return blk_mq_has_free_tags(hctx->tags);
+}
+EXPORT_SYMBOL(blk_mq_can_queue);
+
+static void blk_mq_rq_ctx_init(struct blk_mq_ctx *ctx, struct request *rq,
+ unsigned int rw_flags)
+{
+ rq->mq_ctx = ctx;
+ rq->cmd_flags = rw_flags;
+ ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
+}
+
+static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
+ gfp_t gfp, bool reserved)
+{
+ return blk_mq_alloc_rq(hctx, gfp, reserved);
+}
+
+static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
+ int rw, gfp_t gfp,
+ bool reserved)
+{
+ struct request *rq;
+
+ do {
+ struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
+ struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+ rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved);
+ if (rq) {
+ blk_mq_rq_ctx_init(ctx, rq, rw);
+ break;
+ } else if (!(gfp & __GFP_WAIT))
+ break;
+
+ blk_mq_put_ctx(ctx);
+ __blk_mq_run_hw_queue(hctx);
+ blk_mq_wait_for_tags(hctx->tags);
+ } while (1);
+
+ return rq;
+}
+
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
+ gfp_t gfp, bool reserved)
+{
+ struct request *rq;
+
+ if (blk_mq_queue_enter(q))
+ return NULL;
+
+ rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
+ blk_mq_put_ctx(rq->mq_ctx);
+ return rq;
+}
+
+struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw,
+ gfp_t gfp)
+{
+ struct request *rq;
+
+ if (blk_mq_queue_enter(q))
+ return NULL;
+
+ rq = blk_mq_alloc_request_pinned(q, rw, gfp, true);
+ blk_mq_put_ctx(rq->mq_ctx);
+ return rq;
+}
+EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
+
+/*
+ * Re-init and set pdu, if we have it
+ */
+static void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
+{
+ blk_rq_init(hctx->queue, rq);
+
+ if (hctx->cmd_size)
+ rq->special = blk_mq_rq_to_pdu(rq);
+}
+
+static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx, struct request *rq)
+{
+ const int tag = rq->tag;
+ struct request_queue *q = rq->q;
+
+ blk_mq_rq_init(hctx, rq);
+ blk_mq_put_tag(hctx->tags, tag);
+
+ blk_mq_queue_exit(q);
+}
+
+void blk_mq_free_request(struct request *rq)
+{
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
+ struct blk_mq_hw_ctx *hctx;
+ struct request_queue *q = rq->q;
+
+ ctx->rq_completed[rq_is_sync(rq)]++;
+
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ __blk_mq_free_request(hctx, ctx, rq);
+}
+
+static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error)
+{
+ if (error)
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+ error = -EIO;
+
+ if (unlikely(rq->cmd_flags & REQ_QUIET))
+ set_bit(BIO_QUIET, &bio->bi_flags);
+
+ /* don't actually finish bio if it's part of flush sequence */
+ if (!(rq->cmd_flags & REQ_FLUSH_SEQ))
+ bio_endio(bio, error);
+}
+
+void blk_mq_complete_request(struct request *rq, int error)
+{
+ struct bio *bio = rq->bio;
+ unsigned int bytes = 0;
+
+ trace_block_rq_complete(rq->q, rq);
+
+ while (bio) {
+ struct bio *next = bio->bi_next;
+
+ bio->bi_next = NULL;
+ bytes += bio->bi_iter.bi_size;
+ blk_mq_bio_endio(rq, bio, error);
+ bio = next;
+ }
+
+ blk_account_io_completion(rq, bytes);
+
+ if (rq->end_io)
+ rq->end_io(rq, error);
+ else
+ blk_mq_free_request(rq);
+
+ blk_account_io_done(rq);
+}
+
+void __blk_mq_end_io(struct request *rq, int error)
+{
+ if (!blk_mark_rq_complete(rq))
+ blk_mq_complete_request(rq, error);
+}
+
+#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+
+/*
+ * Called with interrupts disabled.
+ */
+static void ipi_end_io(void *data)
+{
+ struct llist_head *list = &per_cpu(ipi_lists, smp_processor_id());
+ struct llist_node *entry, *next;
+ struct request *rq;
+
+ entry = llist_del_all(list);
+
+ while (entry) {
+ next = entry->next;
+ rq = llist_entry(entry, struct request, ll_list);
+ __blk_mq_end_io(rq, rq->errors);
+ entry = next;
+ }
+}
+
+static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
+ struct request *rq, const int error)
+{
+ struct call_single_data *data = &rq->csd;
+
+ rq->errors = error;
+ rq->ll_list.next = NULL;
+
+ /*
+ * If the list is non-empty, an existing IPI must already
+ * be "in flight". If that is the case, we need not schedule
+ * a new one.
+ */
+ if (llist_add(&rq->ll_list, &per_cpu(ipi_lists, ctx->cpu))) {
+ data->func = ipi_end_io;
+ data->flags = 0;
+ __smp_call_function_single(ctx->cpu, data, 0);
+ }
+
+ return true;
+}
+#else /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */
+static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
+ struct request *rq, const int error)
+{
+ return false;
+}
+#endif
+
+/*
+ * End IO on this request on a multiqueue enabled driver. We'll either do
+ * it directly inline, or punt to a local IPI handler on the matching
+ * remote CPU.
+ */
+void blk_mq_end_io(struct request *rq, int error)
+{
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
+ int cpu;
+
+ if (!ctx->ipi_redirect)
+ return __blk_mq_end_io(rq, error);
+
+ cpu = get_cpu();
+
+ if (cpu == ctx->cpu || !cpu_online(ctx->cpu) ||
+ !ipi_remote_cpu(ctx, cpu, rq, error))
+ __blk_mq_end_io(rq, error);
+
+ put_cpu();
+}
+EXPORT_SYMBOL(blk_mq_end_io);
+
+static void blk_mq_start_request(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+
+ trace_block_rq_issue(q, rq);
+
+ /*
+ * Just mark start time and set the started bit. Due to memory
+ * ordering, we know we'll see the correct deadline as long as
+ * REQ_ATOMIC_STARTED is seen.
+ */
+ rq->deadline = jiffies + q->rq_timeout;
+ set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+}
+
+static void blk_mq_requeue_request(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+
+ trace_block_rq_requeue(q, rq);
+ clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+}
+
+struct blk_mq_timeout_data {
+ struct blk_mq_hw_ctx *hctx;
+ unsigned long *next;
+ unsigned int *next_set;
+};
+
+static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
+{
+ struct blk_mq_timeout_data *data = __data;
+ struct blk_mq_hw_ctx *hctx = data->hctx;
+ unsigned int tag;
+
+ /* It may not be in flight yet (this is where
+ * the REQ_ATOMIC_STARTED flag comes in). The requests are
+ * statically allocated, so we know it's always safe to access the
+ * memory associated with a bit offset into ->rqs[].
+ */
+ tag = 0;
+ do {
+ struct request *rq;
+
+ tag = find_next_zero_bit(free_tags, hctx->queue_depth, tag);
+ if (tag >= hctx->queue_depth)
+ break;
+
+ rq = hctx->rqs[tag++];
+
+ if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+ continue;
+
+ blk_rq_check_expired(rq, data->next, data->next_set);
+ } while (1);
+}
+
+static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
+ unsigned long *next,
+ unsigned int *next_set)
+{
+ struct blk_mq_timeout_data data = {
+ .hctx = hctx,
+ .next = next,
+ .next_set = next_set,
+ };
+
+ /*
+ * Ask the tagging code to iterate busy requests, so we can
+ * check them for timeout.
+ */
+ blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
+}
+
+static void blk_mq_rq_timer(unsigned long data)
+{
+ struct request_queue *q = (struct request_queue *) data;
+ struct blk_mq_hw_ctx *hctx;
+ unsigned long next = 0;
+ int i, next_set = 0;
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
+
+ if (next_set)
+ mod_timer(&q->timeout, round_jiffies_up(next));
+}
+
+/*
+ * Reverse check our software queue for entries that we could potentially
+ * merge with. Currently includes a hand-wavy stop count of 8, to not spend
+ * too much time checking for merges.
+ */
+static bool blk_mq_attempt_merge(struct request_queue *q,
+ struct blk_mq_ctx *ctx, struct bio *bio)
+{
+ struct request *rq;
+ int checked = 8;
+
+ list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
+ int el_ret;
+
+ if (!checked--)
+ break;
+
+ if (!blk_rq_merge_ok(rq, bio))
+ continue;
+
+ el_ret = blk_try_merge(rq, bio);
+ if (el_ret == ELEVATOR_BACK_MERGE) {
+ if (bio_attempt_back_merge(q, rq, bio)) {
+ ctx->rq_merged++;
+ return true;
+ }
+ break;
+ } else if (el_ret == ELEVATOR_FRONT_MERGE) {
+ if (bio_attempt_front_merge(q, rq, bio)) {
+ ctx->rq_merged++;
+ return true;
+ }
+ break;
+ }
+ }
+
+ return false;
+}
+
+void blk_mq_add_timer(struct request *rq)
+{
+ __blk_add_timer(rq, NULL);
+}
+
+/*
+ * Run this hardware queue, pulling any software queues mapped to it in.
+ * Note that this function currently has various problems around ordering
+ * of IO. In particular, we'd like FIFO behaviour on handling existing
+ * items on the hctx->dispatch list. Ignore that for now.
+ */
+static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
+{
+ struct request_queue *q = hctx->queue;
+ struct blk_mq_ctx *ctx;
+ struct request *rq;
+ LIST_HEAD(rq_list);
+ int bit, queued;
+
+ if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags)))
+ return;
+
+ hctx->run++;
+
+ /*
+ * Touch any software queue that has pending entries.
+ */
+ for_each_set_bit(bit, hctx->ctx_map, hctx->nr_ctx) {
+ clear_bit(bit, hctx->ctx_map);
+ ctx = hctx->ctxs[bit];
+ BUG_ON(bit != ctx->index_hw);
+
+ spin_lock(&ctx->lock);
+ list_splice_tail_init(&ctx->rq_list, &rq_list);
+ spin_unlock(&ctx->lock);
+ }
+
+ /*
+ * If we have previous entries on our dispatch list, grab them
+ * and stuff them at the front for more fair dispatch.
+ */
+ if (!list_empty_careful(&hctx->dispatch)) {
+ spin_lock(&hctx->lock);
+ if (!list_empty(&hctx->dispatch))
+ list_splice_init(&hctx->dispatch, &rq_list);
+ spin_unlock(&hctx->lock);
+ }
+
+ /*
+ * Delete and return all entries from our dispatch list
+ */
+ queued = 0;
+
+ /*
+ * Now process all the entries, sending them to the driver.
+ */
+ while (!list_empty(&rq_list)) {
+ int ret;
+
+ rq = list_first_entry(&rq_list, struct request, queuelist);
+ list_del_init(&rq->queuelist);
+ blk_mq_start_request(rq);
+
+ /*
+ * Last request in the series. Flag it as such, this
+ * enables drivers to know when IO should be kicked off,
+ * if they don't do it on a per-request basis.
+ *
+ * Note: the flag isn't the only condition drivers
+ * should do kick off. If drive is busy, the last
+ * request might not have the bit set.
+ */
+ if (list_empty(&rq_list))
+ rq->cmd_flags |= REQ_END;
+
+ ret = q->mq_ops->queue_rq(hctx, rq);
+ switch (ret) {
+ case BLK_MQ_RQ_QUEUE_OK:
+ queued++;
+ continue;
+ case BLK_MQ_RQ_QUEUE_BUSY:
+ /*
+ * FIXME: we should have a mechanism to stop the queue
+ * like blk_stop_queue, otherwise we will waste cpu
+ * time
+ */
+ list_add(&rq->queuelist, &rq_list);
+ blk_mq_requeue_request(rq);
+ break;
+ default:
+ pr_err("blk-mq: bad return on queue: %d\n", ret);
+ rq->errors = -EIO;
+ case BLK_MQ_RQ_QUEUE_ERROR:
+ blk_mq_end_io(rq, rq->errors);
+ break;
+ }
+
+ if (ret == BLK_MQ_RQ_QUEUE_BUSY)
+ break;
+ }
+
+ if (!queued)
+ hctx->dispatched[0]++;
+ else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
+ hctx->dispatched[ilog2(queued) + 1]++;
+
+ /*
+ * Any items that need requeuing? Stuff them into hctx->dispatch,
+ * that is where we will continue on next queue run.
+ */
+ if (!list_empty(&rq_list)) {
+ spin_lock(&hctx->lock);
+ list_splice(&rq_list, &hctx->dispatch);
+ spin_unlock(&hctx->lock);
+ }
+}
+
+void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+{
+ if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags)))
+ return;
+
+ if (!async)
+ __blk_mq_run_hw_queue(hctx);
+ else {
+ struct request_queue *q = hctx->queue;
+
+ kblockd_schedule_delayed_work(q, &hctx->delayed_work, 0);
+ }
+}
+
+void blk_mq_run_queues(struct request_queue *q, bool async)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if ((!blk_mq_hctx_has_pending(hctx) &&
+ list_empty_careful(&hctx->dispatch)) ||
+ test_bit(BLK_MQ_S_STOPPED, &hctx->flags))
+ continue;
+
+ blk_mq_run_hw_queue(hctx, async);
+ }
+}
+EXPORT_SYMBOL(blk_mq_run_queues);
+
+void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
+{
+ cancel_delayed_work(&hctx->delayed_work);
+ set_bit(BLK_MQ_S_STOPPED, &hctx->state);
+}
+EXPORT_SYMBOL(blk_mq_stop_hw_queue);
+
+void blk_mq_stop_hw_queues(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_stop_hw_queue(hctx);
+}
+EXPORT_SYMBOL(blk_mq_stop_hw_queues);
+
+void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
+{
+ clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
+ __blk_mq_run_hw_queue(hctx);
+}
+EXPORT_SYMBOL(blk_mq_start_hw_queue);
+
+void blk_mq_start_stopped_hw_queues(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
+ continue;
+
+ clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
+ blk_mq_run_hw_queue(hctx, true);
+ }
+}
+EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
+
+static void blk_mq_work_fn(struct work_struct *work)
+{
+ struct blk_mq_hw_ctx *hctx;
+
+ hctx = container_of(work, struct blk_mq_hw_ctx, delayed_work.work);
+ __blk_mq_run_hw_queue(hctx);
+}
+
+static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
+ struct request *rq)
+{
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
+
+ list_add_tail(&rq->queuelist, &ctx->rq_list);
+ blk_mq_hctx_mark_pending(hctx, ctx);
+
+ /*
+ * We do this early, to ensure we are on the right CPU.
+ */
+ blk_mq_add_timer(rq);
+}
+
+void blk_mq_insert_request(struct request_queue *q, struct request *rq,
+ bool run_queue)
+{
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx, *current_ctx;
+
+ ctx = rq->mq_ctx;
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+ if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
+ blk_insert_flush(rq);
+ } else {
+ current_ctx = blk_mq_get_ctx(q);
+
+ if (!cpu_online(ctx->cpu)) {
+ ctx = current_ctx;
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ rq->mq_ctx = ctx;
+ }
+ spin_lock(&ctx->lock);
+ __blk_mq_insert_request(hctx, rq);
+ spin_unlock(&ctx->lock);
+
+ blk_mq_put_ctx(current_ctx);
+ }
+
+ if (run_queue)
+ __blk_mq_run_hw_queue(hctx);
+}
+EXPORT_SYMBOL(blk_mq_insert_request);
+
+/*
+ * This is a special version of blk_mq_insert_request to bypass FLUSH request
+ * check. Should only be used internally.
+ */
+void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
+{
+ struct request_queue *q = rq->q;
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx, *current_ctx;
+
+ current_ctx = blk_mq_get_ctx(q);
+
+ ctx = rq->mq_ctx;
+ if (!cpu_online(ctx->cpu)) {
+ ctx = current_ctx;
+ rq->mq_ctx = ctx;
+ }
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+ /* ctx->cpu might be offline */
+ spin_lock(&ctx->lock);
+ __blk_mq_insert_request(hctx, rq);
+ spin_unlock(&ctx->lock);
+
+ blk_mq_put_ctx(current_ctx);
+
+ if (run_queue)
+ blk_mq_run_hw_queue(hctx, async);
+}
+
+static void blk_mq_insert_requests(struct request_queue *q,
+ struct blk_mq_ctx *ctx,
+ struct list_head *list,
+ int depth,
+ bool from_schedule)
+
+{
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *current_ctx;
+
+ trace_block_unplug(q, depth, !from_schedule);
+
+ current_ctx = blk_mq_get_ctx(q);
+
+ if (!cpu_online(ctx->cpu))
+ ctx = current_ctx;
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+ /*
+ * preemption doesn't flush plug list, so it's possible ctx->cpu is
+ * offline now
+ */
+ spin_lock(&ctx->lock);
+ while (!list_empty(list)) {
+ struct request *rq;
+
+ rq = list_first_entry(list, struct request, queuelist);
+ list_del_init(&rq->queuelist);
+ rq->mq_ctx = ctx;
+ __blk_mq_insert_request(hctx, rq);
+ }
+ spin_unlock(&ctx->lock);
+
+ blk_mq_put_ctx(current_ctx);
+
+ blk_mq_run_hw_queue(hctx, from_schedule);
+}
+
+static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct request *rqa = container_of(a, struct request, queuelist);
+ struct request *rqb = container_of(b, struct request, queuelist);
+
+ return !(rqa->mq_ctx < rqb->mq_ctx ||
+ (rqa->mq_ctx == rqb->mq_ctx &&
+ blk_rq_pos(rqa) < blk_rq_pos(rqb)));
+}
+
+void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+{
+ struct blk_mq_ctx *this_ctx;
+ struct request_queue *this_q;
+ struct request *rq;
+ LIST_HEAD(list);
+ LIST_HEAD(ctx_list);
+ unsigned int depth;
+
+ list_splice_init(&plug->mq_list, &list);
+
+ list_sort(NULL, &list, plug_ctx_cmp);
+
+ this_q = NULL;
+ this_ctx = NULL;
+ depth = 0;
+
+ while (!list_empty(&list)) {
+ rq = list_entry_rq(list.next);
+ list_del_init(&rq->queuelist);
+ BUG_ON(!rq->q);
+ if (rq->mq_ctx != this_ctx) {
+ if (this_ctx) {
+ blk_mq_insert_requests(this_q, this_ctx,
+ &ctx_list, depth,
+ from_schedule);
+ }
+
+ this_ctx = rq->mq_ctx;
+ this_q = rq->q;
+ depth = 0;
+ }
+
+ depth++;
+ list_add_tail(&rq->queuelist, &ctx_list);
+ }
+
+ /*
+ * If 'this_ctx' is set, we know we have entries to complete
+ * on 'ctx_list'. Do those.
+ */
+ if (this_ctx) {
+ blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
+ from_schedule);
+ }
+}
+
+static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
+{
+ init_request_from_bio(rq, bio);
+ blk_account_io_start(rq, 1);
+}
+
+static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx;
+ const int is_sync = rw_is_sync(bio->bi_rw);
+ const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
+ int rw = bio_data_dir(bio);
+ struct request *rq;
+ unsigned int use_plug, request_count = 0;
+
+ /*
+ * If we have multiple hardware queues, just go directly to
+ * one of those for sync IO.
+ */
+ use_plug = !is_flush_fua && ((q->nr_hw_queues == 1) || !is_sync);
+
+ blk_queue_bounce(q, &bio);
+
+ if (use_plug && blk_attempt_plug_merge(q, bio, &request_count))
+ return;
+
+ if (blk_mq_queue_enter(q)) {
+ bio_endio(bio, -EIO);
+ return;
+ }
+
+ ctx = blk_mq_get_ctx(q);
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+ trace_block_getrq(q, bio, rw);
+ rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
+ if (likely(rq))
+ blk_mq_rq_ctx_init(ctx, rq, rw);
+ else {
+ blk_mq_put_ctx(ctx);
+ trace_block_sleeprq(q, bio, rw);
+ rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC,
+ false);
+ ctx = rq->mq_ctx;
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ }
+
+ hctx->queued++;
+
+ if (unlikely(is_flush_fua)) {
+ blk_mq_bio_to_request(rq, bio);
+ blk_mq_put_ctx(ctx);
+ blk_insert_flush(rq);
+ goto run_queue;
+ }
+
+ /*
+ * A task plug currently exists. Since this is completely lockless,
+ * utilize that to temporarily store requests until the task is
+ * either done or scheduled away.
+ */
+ if (use_plug) {
+ struct blk_plug *plug = current->plug;
+
+ if (plug) {
+ blk_mq_bio_to_request(rq, bio);
+ if (list_empty(&plug->list))
+ trace_block_plug(q);
+ else if (request_count >= BLK_MAX_REQUEST_COUNT) {
+ blk_flush_plug_list(plug, false);
+ trace_block_plug(q);
+ }
+ list_add_tail(&rq->queuelist, &plug->mq_list);
+ blk_mq_put_ctx(ctx);
+ return;
+ }
+ }
+
+ spin_lock(&ctx->lock);
+
+ if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
+ blk_mq_attempt_merge(q, ctx, bio))
+ __blk_mq_free_request(hctx, ctx, rq);
+ else {
+ blk_mq_bio_to_request(rq, bio);
+ __blk_mq_insert_request(hctx, rq);
+ }
+
+ spin_unlock(&ctx->lock);
+ blk_mq_put_ctx(ctx);
+
+ /*
+ * For a SYNC request, send it to the hardware immediately. For an
+ * ASYNC request, just ensure that we run it later on. The latter
+ * allows for merging opportunities and more efficient dispatching.
+ */
+run_queue:
+ blk_mq_run_hw_queue(hctx, !is_sync || is_flush_fua);
+}
+
+/*
+ * Default mapping to a software queue, since we use one per CPU.
+ */
+struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
+{
+ return q->queue_hw_ctx[q->mq_map[cpu]];
+}
+EXPORT_SYMBOL(blk_mq_map_queue);
+
+struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *reg,
+ unsigned int hctx_index)
+{
+ return kmalloc_node(sizeof(struct blk_mq_hw_ctx),
+ GFP_KERNEL | __GFP_ZERO, reg->numa_node);
+}
+EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue);
+
+void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx,
+ unsigned int hctx_index)
+{
+ kfree(hctx);
+}
+EXPORT_SYMBOL(blk_mq_free_single_hw_queue);
+
+static void blk_mq_hctx_notify(void *data, unsigned long action,
+ unsigned int cpu)
+{
+ struct blk_mq_hw_ctx *hctx = data;
+ struct blk_mq_ctx *ctx;
+ LIST_HEAD(tmp);
+
+ if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
+ return;
+
+ /*
+ * Move ctx entries to new CPU, if this one is going away.
+ */
+ ctx = __blk_mq_get_ctx(hctx->queue, cpu);
+
+ spin_lock(&ctx->lock);
+ if (!list_empty(&ctx->rq_list)) {
+ list_splice_init(&ctx->rq_list, &tmp);
+ clear_bit(ctx->index_hw, hctx->ctx_map);
+ }
+ spin_unlock(&ctx->lock);
+
+ if (list_empty(&tmp))
+ return;
+
+ ctx = blk_mq_get_ctx(hctx->queue);
+ spin_lock(&ctx->lock);
+
+ while (!list_empty(&tmp)) {
+ struct request *rq;
+
+ rq = list_first_entry(&tmp, struct request, queuelist);
+ rq->mq_ctx = ctx;
+ list_move_tail(&rq->queuelist, &ctx->rq_list);
+ }
+
+ blk_mq_hctx_mark_pending(hctx, ctx);
+
+ spin_unlock(&ctx->lock);
+ blk_mq_put_ctx(ctx);
+}
+
+static void blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
+ void (*init)(void *, struct blk_mq_hw_ctx *,
+ struct request *, unsigned int),
+ void *data)
+{
+ unsigned int i;
+
+ for (i = 0; i < hctx->queue_depth; i++) {
+ struct request *rq = hctx->rqs[i];
+
+ init(data, hctx, rq, i);
+ }
+}
+
+void blk_mq_init_commands(struct request_queue *q,
+ void (*init)(void *, struct blk_mq_hw_ctx *,
+ struct request *, unsigned int),
+ void *data)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i;
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_init_hw_commands(hctx, init, data);
+}
+EXPORT_SYMBOL(blk_mq_init_commands);
+
+static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx)
+{
+ struct page *page;
+
+ while (!list_empty(&hctx->page_list)) {
+ page = list_first_entry(&hctx->page_list, struct page, list);
+ list_del_init(&page->list);
+ __free_pages(page, page->private);
+ }
+
+ kfree(hctx->rqs);
+
+ if (hctx->tags)
+ blk_mq_free_tags(hctx->tags);
+}
+
+static size_t order_to_size(unsigned int order)
+{
+ size_t ret = PAGE_SIZE;
+
+ while (order--)
+ ret *= 2;
+
+ return ret;
+}
+
+static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx,
+ unsigned int reserved_tags, int node)
+{
+ unsigned int i, j, entries_per_page, max_order = 4;
+ size_t rq_size, left;
+
+ INIT_LIST_HEAD(&hctx->page_list);
+
+ hctx->rqs = kmalloc_node(hctx->queue_depth * sizeof(struct request *),
+ GFP_KERNEL, node);
+ if (!hctx->rqs)
+ return -ENOMEM;
+
+ /*
+ * rq_size is the size of the request plus driver payload, rounded
+ * to the cacheline size
+ */
+ rq_size = round_up(sizeof(struct request) + hctx->cmd_size,
+ cache_line_size());
+ left = rq_size * hctx->queue_depth;
+
+ for (i = 0; i < hctx->queue_depth;) {
+ int this_order = max_order;
+ struct page *page;
+ int to_do;
+ void *p;
+
+ while (left < order_to_size(this_order - 1) && this_order)
+ this_order--;
+
+ do {
+ page = alloc_pages_node(node, GFP_KERNEL, this_order);
+ if (page)
+ break;
+ if (!this_order--)
+ break;
+ if (order_to_size(this_order) < rq_size)
+ break;
+ } while (1);
+
+ if (!page)
+ break;
+
+ page->private = this_order;
+ list_add_tail(&page->list, &hctx->page_list);
+
+ p = page_address(page);
+ entries_per_page = order_to_size(this_order) / rq_size;
+ to_do = min(entries_per_page, hctx->queue_depth - i);
+ left -= to_do * rq_size;
+ for (j = 0; j < to_do; j++) {
+ hctx->rqs[i] = p;
+ blk_mq_rq_init(hctx, hctx->rqs[i]);
+ p += rq_size;
+ i++;
+ }
+ }
+
+ if (i < (reserved_tags + BLK_MQ_TAG_MIN))
+ goto err_rq_map;
+ else if (i != hctx->queue_depth) {
+ hctx->queue_depth = i;
+ pr_warn("%s: queue depth set to %u because of low memory\n",
+ __func__, i);
+ }
+
+ hctx->tags = blk_mq_init_tags(hctx->queue_depth, reserved_tags, node);
+ if (!hctx->tags) {
+err_rq_map:
+ blk_mq_free_rq_map(hctx);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int blk_mq_init_hw_queues(struct request_queue *q,
+ struct blk_mq_reg *reg, void *driver_data)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i, j;
+
+ /*
+ * Initialize hardware queues
+ */
+ queue_for_each_hw_ctx(q, hctx, i) {
+ unsigned int num_maps;
+ int node;
+
+ node = hctx->numa_node;
+ if (node == NUMA_NO_NODE)
+ node = hctx->numa_node = reg->numa_node;
+
+ INIT_DELAYED_WORK(&hctx->delayed_work, blk_mq_work_fn);
+ spin_lock_init(&hctx->lock);
+ INIT_LIST_HEAD(&hctx->dispatch);
+ hctx->queue = q;
+ hctx->queue_num = i;
+ hctx->flags = reg->flags;
+ hctx->queue_depth = reg->queue_depth;
+ hctx->cmd_size = reg->cmd_size;
+
+ blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
+ blk_mq_hctx_notify, hctx);
+ blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
+
+ if (blk_mq_init_rq_map(hctx, reg->reserved_tags, node))
+ break;
+
+ /*
+ * Allocate space for all possible cpus to avoid allocation in
+ * runtime
+ */
+ hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
+ GFP_KERNEL, node);
+ if (!hctx->ctxs)
+ break;
+
+ num_maps = ALIGN(nr_cpu_ids, BITS_PER_LONG) / BITS_PER_LONG;
+ hctx->ctx_map = kzalloc_node(num_maps * sizeof(unsigned long),
+ GFP_KERNEL, node);
+ if (!hctx->ctx_map)
+ break;
+
+ hctx->nr_ctx_map = num_maps;
+ hctx->nr_ctx = 0;
+
+ if (reg->ops->init_hctx &&
+ reg->ops->init_hctx(hctx, driver_data, i))
+ break;
+ }
+
+ if (i == q->nr_hw_queues)
+ return 0;
+
+ /*
+ * Init failed
+ */
+ queue_for_each_hw_ctx(q, hctx, j) {
+ if (i == j)
+ break;
+
+ if (reg->ops->exit_hctx)
+ reg->ops->exit_hctx(hctx, j);
+
+ blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
+ blk_mq_free_rq_map(hctx);
+ kfree(hctx->ctxs);
+ }
+
+ return 1;
+}
+
+static void blk_mq_init_cpu_queues(struct request_queue *q,
+ unsigned int nr_hw_queues)
+{
+ unsigned int i;
+
+ for_each_possible_cpu(i) {
+ struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
+ struct blk_mq_hw_ctx *hctx;
+
+ memset(__ctx, 0, sizeof(*__ctx));
+ __ctx->cpu = i;
+ spin_lock_init(&__ctx->lock);
+ INIT_LIST_HEAD(&__ctx->rq_list);
+ __ctx->queue = q;
+
+ /* If the cpu isn't online, the cpu is mapped to first hctx */
+ hctx = q->mq_ops->map_queue(q, i);
+ hctx->nr_ctx++;
+
+ if (!cpu_online(i))
+ continue;
+
+ /*
+ * Set local node, IFF we have more than one hw queue. If
+ * not, we remain on the home node of the device
+ */
+ if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
+ hctx->numa_node = cpu_to_node(i);
+ }
+}
+
+static void blk_mq_map_swqueue(struct request_queue *q)
+{
+ unsigned int i;
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ hctx->nr_ctx = 0;
+ }
+
+ /*
+ * Map software to hardware queues
+ */
+ queue_for_each_ctx(q, ctx, i) {
+ /* If the cpu isn't online, the cpu is mapped to first hctx */
+ hctx = q->mq_ops->map_queue(q, i);
+ ctx->index_hw = hctx->nr_ctx;
+ hctx->ctxs[hctx->nr_ctx++] = ctx;
+ }
+}
+
+struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
+ void *driver_data)
+{
+ struct blk_mq_hw_ctx **hctxs;
+ struct blk_mq_ctx *ctx;
+ struct request_queue *q;
+ int i;
+
+ if (!reg->nr_hw_queues ||
+ !reg->ops->queue_rq || !reg->ops->map_queue ||
+ !reg->ops->alloc_hctx || !reg->ops->free_hctx)
+ return ERR_PTR(-EINVAL);
+
+ if (!reg->queue_depth)
+ reg->queue_depth = BLK_MQ_MAX_DEPTH;
+ else if (reg->queue_depth > BLK_MQ_MAX_DEPTH) {
+ pr_err("blk-mq: queuedepth too large (%u)\n", reg->queue_depth);
+ reg->queue_depth = BLK_MQ_MAX_DEPTH;
+ }
+
+ /*
+ * Set aside a tag for flush requests. It will only be used while
+ * another flush request is in progress but outside the driver.
+ *
+ * TODO: only allocate if flushes are supported
+ */
+ reg->queue_depth++;
+ reg->reserved_tags++;
+
+ if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN))
+ return ERR_PTR(-EINVAL);
+
+ ctx = alloc_percpu(struct blk_mq_ctx);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ hctxs = kmalloc_node(reg->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
+ reg->numa_node);
+
+ if (!hctxs)
+ goto err_percpu;
+
+ for (i = 0; i < reg->nr_hw_queues; i++) {
+ hctxs[i] = reg->ops->alloc_hctx(reg, i);
+ if (!hctxs[i])
+ goto err_hctxs;
+
+ hctxs[i]->numa_node = NUMA_NO_NODE;
+ hctxs[i]->queue_num = i;
+ }
+
+ q = blk_alloc_queue_node(GFP_KERNEL, reg->numa_node);
+ if (!q)
+ goto err_hctxs;
+
+ q->mq_map = blk_mq_make_queue_map(reg);
+ if (!q->mq_map)
+ goto err_map;
+
+ setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
+ blk_queue_rq_timeout(q, 30000);
+
+ q->nr_queues = nr_cpu_ids;
+ q->nr_hw_queues = reg->nr_hw_queues;
+
+ q->queue_ctx = ctx;
+ q->queue_hw_ctx = hctxs;
+
+ q->mq_ops = reg->ops;
+
+ blk_queue_make_request(q, blk_mq_make_request);
+ blk_queue_rq_timed_out(q, reg->ops->timeout);
+ if (reg->timeout)
+ blk_queue_rq_timeout(q, reg->timeout);
+
+ blk_mq_init_flush(q);
+ blk_mq_init_cpu_queues(q, reg->nr_hw_queues);
+
+ if (blk_mq_init_hw_queues(q, reg, driver_data))
+ goto err_hw;
+
+ blk_mq_map_swqueue(q);
+
+ mutex_lock(&all_q_mutex);
+ list_add_tail(&q->all_q_node, &all_q_list);
+ mutex_unlock(&all_q_mutex);
+
+ return q;
+err_hw:
+ kfree(q->mq_map);
+err_map:
+ blk_cleanup_queue(q);
+err_hctxs:
+ for (i = 0; i < reg->nr_hw_queues; i++) {
+ if (!hctxs[i])
+ break;
+ reg->ops->free_hctx(hctxs[i], i);
+ }
+ kfree(hctxs);
+err_percpu:
+ free_percpu(ctx);
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(blk_mq_init_queue);
+
+void blk_mq_free_queue(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ cancel_delayed_work_sync(&hctx->delayed_work);
+ kfree(hctx->ctx_map);
+ kfree(hctx->ctxs);
+ blk_mq_free_rq_map(hctx);
+ blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
+ if (q->mq_ops->exit_hctx)
+ q->mq_ops->exit_hctx(hctx, i);
+ q->mq_ops->free_hctx(hctx, i);
+ }
+
+ free_percpu(q->queue_ctx);
+ kfree(q->queue_hw_ctx);
+ kfree(q->mq_map);
+
+ q->queue_ctx = NULL;
+ q->queue_hw_ctx = NULL;
+ q->mq_map = NULL;
+
+ mutex_lock(&all_q_mutex);
+ list_del_init(&q->all_q_node);
+ mutex_unlock(&all_q_mutex);
+}
+EXPORT_SYMBOL(blk_mq_free_queue);
+
+/* Basically redo blk_mq_init_queue with queue frozen */
+static void __cpuinit blk_mq_queue_reinit(struct request_queue *q)
+{
+ blk_mq_freeze_queue(q);
+
+ blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
+
+ /*
+ * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
+ * we should change hctx numa_node according to new topology (this
+ * involves free and re-allocate memory, worthy doing?)
+ */
+
+ blk_mq_map_swqueue(q);
+
+ blk_mq_unfreeze_queue(q);
+}
+
+static int __cpuinit blk_mq_queue_reinit_notify(struct notifier_block *nb,
+ unsigned long action, void *hcpu)
+{
+ struct request_queue *q;
+
+ /*
+ * Before new mapping is established, hotadded cpu might already start
+ * handling requests. This doesn't break anything as we map offline
+ * CPUs to first hardware queue. We will re-init queue below to get
+ * optimal settings.
+ */
+ if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
+ action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
+ return NOTIFY_OK;
+
+ mutex_lock(&all_q_mutex);
+ list_for_each_entry(q, &all_q_list, all_q_node)
+ blk_mq_queue_reinit(q);
+ mutex_unlock(&all_q_mutex);
+ return NOTIFY_OK;
+}
+
+static int __init blk_mq_init(void)
+{
+ unsigned int i;
+
+ for_each_possible_cpu(i)
+ init_llist_head(&per_cpu(ipi_lists, i));
+
+ blk_mq_cpu_init();
+
+ /* Must be called after percpu_counter_hotcpu_callback() */
+ hotcpu_notifier(blk_mq_queue_reinit_notify, -10);
+
+ return 0;
+}
+subsys_initcall(blk_mq_init);
diff --git a/block/blk-mq.h b/block/blk-mq.h
new file mode 100644
index 000000000000..52bf1f96a2c2
--- /dev/null
+++ b/block/blk-mq.h
@@ -0,0 +1,52 @@
+#ifndef INT_BLK_MQ_H
+#define INT_BLK_MQ_H
+
+struct blk_mq_ctx {
+ struct {
+ spinlock_t lock;
+ struct list_head rq_list;
+ } ____cacheline_aligned_in_smp;
+
+ unsigned int cpu;
+ unsigned int index_hw;
+ unsigned int ipi_redirect;
+
+ /* incremented at dispatch time */
+ unsigned long rq_dispatched[2];
+ unsigned long rq_merged;
+
+ /* incremented at completion time */
+ unsigned long ____cacheline_aligned_in_smp rq_completed[2];
+
+ struct request_queue *queue;
+ struct kobject kobj;
+};
+
+void __blk_mq_end_io(struct request *rq, int error);
+void blk_mq_complete_request(struct request *rq, int error);
+void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
+void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
+void blk_mq_init_flush(struct request_queue *q);
+
+/*
+ * CPU hotplug helpers
+ */
+struct blk_mq_cpu_notifier;
+void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
+ void (*fn)(void *, unsigned long, unsigned int),
+ void *data);
+void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
+void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
+void blk_mq_cpu_init(void);
+DECLARE_PER_CPU(struct llist_head, ipi_lists);
+
+/*
+ * CPU -> queue mappings
+ */
+struct blk_mq_reg;
+extern unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg);
+extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
+
+void blk_mq_add_timer(struct request *rq);
+
+#endif
diff --git a/block/blk-settings.c b/block/blk-settings.c
index c50ecf0ea3b1..05e826793e4e 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -144,6 +144,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
lim->discard_zeroes_data = 1;
lim->max_segments = USHRT_MAX;
lim->max_hw_sectors = UINT_MAX;
+ lim->max_segment_size = UINT_MAX;
lim->max_sectors = UINT_MAX;
lim->max_write_same_sectors = UINT_MAX;
}
@@ -195,17 +196,17 @@ EXPORT_SYMBOL(blk_queue_make_request);
/**
* blk_queue_bounce_limit - set bounce buffer limit for queue
* @q: the request queue for the device
- * @dma_mask: the maximum address the device can handle
+ * @max_addr: the maximum address the device can handle
*
* Description:
* Different hardware can have different requirements as to what pages
* it can do I/O directly to. A low level driver can call
* blk_queue_bounce_limit to have lower memory pages allocated as bounce
- * buffers for doing I/O to pages residing above @dma_mask.
+ * buffers for doing I/O to pages residing above @max_addr.
**/
-void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
+void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
{
- unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
+ unsigned long b_pfn = max_addr >> PAGE_SHIFT;
int dma = 0;
q->bounce_gfp = GFP_NOIO;
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index ec9e60636f43..ce4b8bfd3d27 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -23,7 +23,7 @@ static void blk_done_softirq(struct softirq_action *h)
struct list_head *cpu_list, local_list;
local_irq_disable();
- cpu_list = &__get_cpu_var(blk_cpu_done);
+ cpu_list = this_cpu_ptr(&blk_cpu_done);
list_replace_init(cpu_list, &local_list);
local_irq_enable();
@@ -44,7 +44,7 @@ static void trigger_softirq(void *data)
struct list_head *list;
local_irq_save(flags);
- list = &__get_cpu_var(blk_cpu_done);
+ list = this_cpu_ptr(&blk_cpu_done);
list_add_tail(&rq->csd.list, list);
if (list->next == &rq->csd.list)
@@ -90,7 +90,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
local_irq_disable();
list_splice_init(&per_cpu(blk_cpu_done, cpu),
- &__get_cpu_var(blk_cpu_done));
+ this_cpu_ptr(&blk_cpu_done));
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_enable();
}
@@ -135,7 +135,7 @@ void __blk_complete_request(struct request *req)
if (ccpu == cpu || shared) {
struct list_head *list;
do_local:
- list = &__get_cpu_var(blk_cpu_done);
+ list = this_cpu_ptr(&blk_cpu_done);
list_add_tail(&req->csd.list, list);
/*
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 3aa5b195f4dd..4f8c4d90ec73 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -7,6 +7,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blktrace_api.h>
+#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-cgroup.h"
@@ -542,6 +543,11 @@ static void blk_release_queue(struct kobject *kobj)
if (q->queue_tags)
__blk_queue_free_tags(q);
+ percpu_counter_destroy(&q->mq_usage_counter);
+
+ if (q->mq_ops)
+ blk_mq_free_queue(q);
+
blk_trace_shutdown(q);
bdi_destroy(&q->backing_dev_info);
@@ -575,6 +581,7 @@ int blk_register_queue(struct gendisk *disk)
* bypass from queue allocation.
*/
blk_queue_bypass_end(q);
+ queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
ret = blk_trace_init_sysfs(dev);
if (ret)
@@ -588,6 +595,9 @@ int blk_register_queue(struct gendisk *disk)
kobject_uevent(&q->kobj, KOBJ_ADD);
+ if (q->mq_ops)
+ blk_mq_register_disk(disk);
+
if (!q->request_fn)
return 0;
@@ -610,6 +620,9 @@ void blk_unregister_queue(struct gendisk *disk)
if (WARN_ON(!q))
return;
+ if (q->mq_ops)
+ blk_mq_unregister_disk(disk);
+
if (q->request_fn)
elv_unregister_queue(q);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 8331aba9426f..91573efcf8e7 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -867,14 +867,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
do_div(tmp, HZ);
bytes_allowed = tmp;
- if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
+ if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
if (wait)
*wait = 0;
return 1;
}
/* Calc approx time to dispatch */
- extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
+ extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
if (!jiffy_wait)
@@ -977,7 +977,7 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
bool rw = bio_data_dir(bio);
/* Charge the bio to the group */
- tg->bytes_disp[rw] += bio->bi_size;
+ tg->bytes_disp[rw] += bio->bi_iter.bi_size;
tg->io_disp[rw]++;
/*
@@ -993,8 +993,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
*/
if (!(bio->bi_rw & REQ_THROTTLED)) {
bio->bi_rw |= REQ_THROTTLED;
- throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size,
- bio->bi_rw);
+ throtl_update_dispatch_stats(tg_to_blkg(tg),
+ bio->bi_iter.bi_size, bio->bi_rw);
}
}
@@ -1498,7 +1498,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
if (tg) {
if (!tg->has_rules[rw]) {
throtl_update_dispatch_stats(tg_to_blkg(tg),
- bio->bi_size, bio->bi_rw);
+ bio->bi_iter.bi_size, bio->bi_rw);
goto out_unlock_rcu;
}
}
@@ -1554,7 +1554,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
/* out-of-limit, queue to @tg */
throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
rw == READ ? 'R' : 'W',
- tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
+ tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw],
tg->io_disp[rw], tg->iops[rw],
sq->nr_queued[READ], sq->nr_queued[WRITE]);
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 65f103563969..4e9cfd16183c 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -7,6 +7,7 @@
#include <linux/fault-inject.h>
#include "blk.h"
+#include "blk-mq.h"
#ifdef CONFIG_FAIL_IO_TIMEOUT
@@ -88,11 +89,18 @@ static void blk_rq_timed_out(struct request *req)
ret = q->rq_timed_out_fn(req);
switch (ret) {
case BLK_EH_HANDLED:
- __blk_complete_request(req);
+ /* Can we use req->errors here? */
+ if (q->mq_ops)
+ blk_mq_complete_request(req, req->errors);
+ else
+ __blk_complete_request(req);
break;
case BLK_EH_RESET_TIMER:
+ if (q->mq_ops)
+ blk_mq_add_timer(req);
+ else
+ blk_add_timer(req);
blk_clear_rq_complete(req);
- blk_add_timer(req);
break;
case BLK_EH_NOT_HANDLED:
/*
@@ -108,6 +116,23 @@ static void blk_rq_timed_out(struct request *req)
}
}
+void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
+ unsigned int *next_set)
+{
+ if (time_after_eq(jiffies, rq->deadline)) {
+ list_del_init(&rq->timeout_list);
+
+ /*
+ * Check if we raced with end io completion
+ */
+ if (!blk_mark_rq_complete(rq))
+ blk_rq_timed_out(rq);
+ } else if (!*next_set || time_after(*next_timeout, rq->deadline)) {
+ *next_timeout = rq->deadline;
+ *next_set = 1;
+ }
+}
+
void blk_rq_timed_out_timer(unsigned long data)
{
struct request_queue *q = (struct request_queue *) data;
@@ -117,21 +142,8 @@ void blk_rq_timed_out_timer(unsigned long data)
spin_lock_irqsave(q->queue_lock, flags);
- list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) {
- if (time_after_eq(jiffies, rq->deadline)) {
- list_del_init(&rq->timeout_list);
-
- /*
- * Check if we raced with end io completion
- */
- if (blk_mark_rq_complete(rq))
- continue;
- blk_rq_timed_out(rq);
- } else if (!next_set || time_after(next, rq->deadline)) {
- next = rq->deadline;
- next_set = 1;
- }
- }
+ list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
+ blk_rq_check_expired(rq, &next, &next_set);
if (next_set)
mod_timer(&q->timeout, round_jiffies_up(next));
@@ -157,15 +169,7 @@ void blk_abort_request(struct request *req)
}
EXPORT_SYMBOL_GPL(blk_abort_request);
-/**
- * blk_add_timer - Start timeout timer for a single request
- * @req: request that is about to start running.
- *
- * Notes:
- * Each request has its own timer, and as it is added to the queue, we
- * set up the timer. When the request completes, we cancel the timer.
- */
-void blk_add_timer(struct request *req)
+void __blk_add_timer(struct request *req, struct list_head *timeout_list)
{
struct request_queue *q = req->q;
unsigned long expiry;
@@ -174,7 +178,6 @@ void blk_add_timer(struct request *req)
return;
BUG_ON(!list_empty(&req->timeout_list));
- BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
/*
* Some LLDs, like scsi, peek at the timeout to prevent a
@@ -184,7 +187,8 @@ void blk_add_timer(struct request *req)
req->timeout = q->rq_timeout;
req->deadline = jiffies + req->timeout;
- list_add_tail(&req->timeout_list, &q->timeout_list);
+ if (timeout_list)
+ list_add_tail(&req->timeout_list, timeout_list);
/*
* If the timer isn't already pending or this timeout is earlier
@@ -196,5 +200,19 @@ void blk_add_timer(struct request *req)
if (!timer_pending(&q->timeout) ||
time_before(expiry, q->timeout.expires))
mod_timer(&q->timeout, expiry);
+
+}
+
+/**
+ * blk_add_timer - Start timeout timer for a single request
+ * @req: request that is about to start running.
+ *
+ * Notes:
+ * Each request has its own timer, and as it is added to the queue, we
+ * set up the timer. When the request completes, we cancel the timer.
+ */
+void blk_add_timer(struct request *req)
+{
+ __blk_add_timer(req, &req->q->timeout_list);
}
diff --git a/block/blk.h b/block/blk.h
index e837b8f619b7..c90e1d8f7a2b 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -10,6 +10,7 @@
#define BLK_BATCH_REQ 32
extern struct kmem_cache *blk_requestq_cachep;
+extern struct kmem_cache *request_cachep;
extern struct kobj_type blk_queue_ktype;
extern struct ida blk_queue_ida;
@@ -34,14 +35,30 @@ bool __blk_end_bidi_request(struct request *rq, int error,
unsigned int nr_bytes, unsigned int bidi_bytes);
void blk_rq_timed_out_timer(unsigned long data);
+void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
+ unsigned int *next_set);
+void __blk_add_timer(struct request *req, struct list_head *timeout_list);
void blk_delete_timer(struct request *);
void blk_add_timer(struct request *);
+
+bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
+ struct bio *bio);
+bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
+ struct bio *bio);
+bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
+ unsigned int *request_count);
+
+void blk_account_io_start(struct request *req, bool new_io);
+void blk_account_io_completion(struct request *req, unsigned int bytes);
+void blk_account_io_done(struct request *req);
+
/*
* Internal atomic flags for request handling
*/
enum rq_atomic_flags {
REQ_ATOM_COMPLETE = 0,
+ REQ_ATOM_STARTED,
};
/*
diff --git a/block/elevator.c b/block/elevator.c
index 2bcbd8cc14d4..42c45a7d6714 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -186,6 +186,12 @@ int elevator_init(struct request_queue *q, char *name)
struct elevator_type *e = NULL;
int err;
+ /*
+ * q->sysfs_lock must be held to provide mutual exclusion between
+ * elevator_switch() and here.
+ */
+ lockdep_assert_held(&q->sysfs_lock);
+
if (unlikely(q->elevator))
return 0;
@@ -434,7 +440,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
/*
* See if our hash lookup can find a potential backmerge.
*/
- __rq = elv_rqhash_find(q, bio->bi_sector);
+ __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
if (__rq && elv_rq_merge_ok(__rq, bio)) {
*req = __rq;
return ELEVATOR_BACK_MERGE;
@@ -959,7 +965,7 @@ fail_init:
/*
* Switch this queue to the given IO scheduler.
*/
-int elevator_change(struct request_queue *q, const char *name)
+static int __elevator_change(struct request_queue *q, const char *name)
{
char elevator_name[ELV_NAME_MAX];
struct elevator_type *e;
@@ -981,6 +987,18 @@ int elevator_change(struct request_queue *q, const char *name)
return elevator_switch(q, e);
}
+
+int elevator_change(struct request_queue *q, const char *name)
+{
+ int ret;
+
+ /* Protect q->elevator from elevator_init() */
+ mutex_lock(&q->sysfs_lock);
+ ret = __elevator_change(q, name);
+ mutex_unlock(&q->sysfs_lock);
+
+ return ret;
+}
EXPORT_SYMBOL(elevator_change);
ssize_t elv_iosched_store(struct request_queue *q, const char *name,
@@ -991,7 +1009,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
if (!q->elevator)
return count;
- ret = elevator_change(q, name);
+ ret = __elevator_change(q, name);
if (!ret)
return count;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index a5ffcc988f0b..625e3e471d65 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -286,7 +286,8 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
struct sg_io_hdr *hdr, fmode_t mode)
{
unsigned long start_time;
- int writing = 0, ret = 0;
+ ssize_t ret = 0;
+ int writing = 0;
struct request *rq;
char sense[SCSI_SENSE_BUFFERSIZE];
struct bio *bio;
@@ -321,37 +322,16 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
}
if (hdr->iovec_count) {
- const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
size_t iov_data_len;
- struct sg_iovec *sg_iov;
struct iovec *iov;
- int i;
- sg_iov = kmalloc(size, GFP_KERNEL);
- if (!sg_iov) {
- ret = -ENOMEM;
+ ret = rw_copy_check_uvector(-1, hdr->dxferp, hdr->iovec_count,
+ 0, NULL, &iov);
+ if (ret < 0)
goto out;
- }
-
- if (copy_from_user(sg_iov, hdr->dxferp, size)) {
- kfree(sg_iov);
- ret = -EFAULT;
- goto out;
- }
- /*
- * Sum up the vecs, making sure they don't overflow
- */
- iov = (struct iovec *) sg_iov;
- iov_data_len = 0;
- for (i = 0; i < hdr->iovec_count; i++) {
- if (iov_data_len + iov[i].iov_len < iov_data_len) {
- kfree(sg_iov);
- ret = -EINVAL;
- goto out;
- }
- iov_data_len += iov[i].iov_len;
- }
+ iov_data_len = ret;
+ ret = 0;
/* SG_IO howto says that the shorter of the two wins */
if (hdr->dxfer_len < iov_data_len) {
@@ -361,9 +341,10 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
iov_data_len = hdr->dxfer_len;
}
- ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
+ ret = blk_rq_map_user_iov(q, rq, NULL, (struct sg_iovec *) iov,
+ hdr->iovec_count,
iov_data_len, GFP_KERNEL);
- kfree(sg_iov);
+ kfree(iov);
} else if (hdr->dxfer_len)
ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
GFP_KERNEL);
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 69ce573f1224..7bcb70d216e1 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -174,9 +174,8 @@ config CRYPTO_TEST
help
Quick & dirty crypto test module.
-config CRYPTO_ABLK_HELPER_X86
+config CRYPTO_ABLK_HELPER
tristate
- depends on X86
select CRYPTO_CRYPTD
config CRYPTO_GLUE_HELPER_X86
@@ -695,7 +694,7 @@ config CRYPTO_AES_NI_INTEL
select CRYPTO_AES_X86_64 if 64BIT
select CRYPTO_AES_586 if !64BIT
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_ABLK_HELPER
select CRYPTO_ALGAPI
select CRYPTO_GLUE_HELPER_X86 if 64BIT
select CRYPTO_LRW
@@ -776,6 +775,22 @@ config CRYPTO_AES_ARM
See <http://csrc.nist.gov/encryption/aes/> for more information.
+config CRYPTO_AES_ARM_BS
+ tristate "Bit sliced AES using NEON instructions"
+ depends on ARM && KERNEL_MODE_NEON
+ select CRYPTO_ALGAPI
+ select CRYPTO_AES_ARM
+ select CRYPTO_ABLK_HELPER
+ help
+ Use a faster and more secure NEON based implementation of AES in CBC,
+ CTR and XTS modes
+
+ Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode
+ and for XTS mode encryption, CBC and XTS mode decryption speedup is
+ around 25%. (CBC encryption speed is not affected by this driver.)
+ This implementation does not rely on any lookup tables so it is
+ believed to be invulnerable to cache timing attacks.
+
config CRYPTO_ANUBIS
tristate "Anubis cipher algorithm"
select CRYPTO_ALGAPI
@@ -879,7 +894,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
depends on CRYPTO
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_CAMELLIA_X86_64
select CRYPTO_LRW
@@ -901,7 +916,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
depends on CRYPTO
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_CAMELLIA_X86_64
select CRYPTO_CAMELLIA_AESNI_AVX_X86_64
@@ -953,7 +968,7 @@ config CRYPTO_CAST5_AVX_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_ABLK_HELPER
select CRYPTO_CAST_COMMON
select CRYPTO_CAST5
help
@@ -976,7 +991,7 @@ config CRYPTO_CAST6_AVX_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_CAST_COMMON
select CRYPTO_CAST6
@@ -1094,7 +1109,7 @@ config CRYPTO_SERPENT_SSE2_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_LRW
@@ -1116,7 +1131,7 @@ config CRYPTO_SERPENT_SSE2_586
depends on X86 && !64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_LRW
@@ -1138,7 +1153,7 @@ config CRYPTO_SERPENT_AVX_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_LRW
@@ -1160,7 +1175,7 @@ config CRYPTO_SERPENT_AVX2_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_SERPENT_AVX_X86_64
@@ -1276,7 +1291,7 @@ config CRYPTO_TWOFISH_AVX_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_TWOFISH_COMMON
select CRYPTO_TWOFISH_X86_64
@@ -1386,6 +1401,9 @@ config CRYPTO_USER_API_SKCIPHER
This option enables the user-spaces interface for symmetric
key cipher algorithms.
+config CRYPTO_HASH_INFO
+ bool
+
source "drivers/crypto/Kconfig"
source crypto/asymmetric_keys/Kconfig
diff --git a/crypto/Makefile b/crypto/Makefile
index 80019ba8da3a..55af02aeec27 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -2,8 +2,13 @@
# Cryptographic API
#
+# memneq MUST be built with -Os or -O0 to prevent early-return optimizations
+# that will defeat memneq's actual purpose to prevent timing attacks.
+CFLAGS_REMOVE_memneq.o := -O1 -O2 -O3
+CFLAGS_memneq.o := -Os
+
obj-$(CONFIG_CRYPTO) += crypto.o
-crypto-y := api.o cipher.o compress.o
+crypto-y := api.o cipher.o compress.o memneq.o
obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
@@ -104,3 +109,5 @@ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
obj-$(CONFIG_XOR_BLOCKS) += xor.o
obj-$(CONFIG_ASYNC_CORE) += async_tx/
obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/
+obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o
+obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o
diff --git a/arch/x86/crypto/ablk_helper.c b/crypto/ablk_helper.c
index 43282fe04a8b..ffe7278d4bd8 100644
--- a/arch/x86/crypto/ablk_helper.c
+++ b/crypto/ablk_helper.c
@@ -28,10 +28,11 @@
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/hardirq.h>
#include <crypto/algapi.h>
#include <crypto/cryptd.h>
-#include <asm/i387.h>
-#include <asm/crypto/ablk_helper.h>
+#include <crypto/ablk_helper.h>
+#include <asm/simd.h>
int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
@@ -70,11 +71,11 @@ int ablk_encrypt(struct ablkcipher_request *req)
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- if (!irq_fpu_usable()) {
+ if (!may_use_simd()) {
struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req);
- memcpy(cryptd_req, req, sizeof(*req));
+ *cryptd_req = *req;
ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
return crypto_ablkcipher_encrypt(cryptd_req);
@@ -89,11 +90,11 @@ int ablk_decrypt(struct ablkcipher_request *req)
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- if (!irq_fpu_usable()) {
+ if (!may_use_simd()) {
struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req);
- memcpy(cryptd_req, req, sizeof(*req));
+ *cryptd_req = *req;
ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
return crypto_ablkcipher_decrypt(cryptd_req);
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index 7d4a8d28277e..40886c489903 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -16,9 +16,7 @@
#include <crypto/internal/skcipher.h>
#include <linux/cpumask.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -30,8 +28,6 @@
#include "internal.h"
-static const char *skcipher_default_geniv __read_mostly;
-
struct ablkcipher_buffer {
struct list_head entry;
struct scatter_walk dst;
@@ -527,8 +523,7 @@ const char *crypto_default_geniv(const struct crypto_alg *alg)
alg->cra_blocksize)
return "chainiv";
- return alg->cra_flags & CRYPTO_ALG_ASYNC ?
- "eseqiv" : skcipher_default_geniv;
+ return "eseqiv";
}
static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
@@ -709,17 +704,3 @@ err:
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
-
-static int __init skcipher_module_init(void)
-{
- skcipher_default_geniv = num_possible_cpus() > 1 ?
- "eseqiv" : "chainiv";
- return 0;
-}
-
-static void skcipher_module_exit(void)
-{
-}
-
-module_init(skcipher_module_init);
-module_exit(skcipher_module_exit);
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index c0bb3778f1ae..666f1962a160 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -230,11 +230,11 @@ remainder:
*/
if (byte_count < DEFAULT_BLK_SZ) {
empty_rbuf:
- for (; ctx->rand_data_valid < DEFAULT_BLK_SZ;
- ctx->rand_data_valid++) {
+ while (ctx->rand_data_valid < DEFAULT_BLK_SZ) {
*ptr = ctx->rand_data[ctx->rand_data_valid];
ptr++;
byte_count--;
+ ctx->rand_data_valid++;
if (byte_count == 0)
goto done;
}
diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
index 6d2c2ea12559..82e7d6b0c276 100644
--- a/crypto/asymmetric_keys/Kconfig
+++ b/crypto/asymmetric_keys/Kconfig
@@ -12,6 +12,8 @@ if ASYMMETRIC_KEY_TYPE
config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
tristate "Asymmetric public-key crypto algorithm subtype"
select MPILIB
+ select PUBLIC_KEY_ALGO_RSA
+ select CRYPTO_HASH_INFO
help
This option provides support for asymmetric public key type handling.
If signature generation and/or verification are to be used,
@@ -20,7 +22,6 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
config PUBLIC_KEY_ALGO_RSA
tristate "RSA public-key algorithm"
- depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
select MPILIB_EXTRA
help
This option enables support for the RSA algorithm (PKCS#1, RFC3447).
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index cf807654d221..b77eb5304788 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -209,6 +209,7 @@ struct key_type key_type_asymmetric = {
.match = asymmetric_key_match,
.destroy = asymmetric_key_destroy,
.describe = asymmetric_key_describe,
+ .def_lookup_type = KEYRING_SEARCH_LOOKUP_ITERATE,
};
EXPORT_SYMBOL_GPL(key_type_asymmetric);
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index cb2e29180a87..97eb001960b9 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -22,29 +22,25 @@
MODULE_LICENSE("GPL");
-const char *const pkey_algo[PKEY_ALGO__LAST] = {
+const char *const pkey_algo_name[PKEY_ALGO__LAST] = {
[PKEY_ALGO_DSA] = "DSA",
[PKEY_ALGO_RSA] = "RSA",
};
-EXPORT_SYMBOL_GPL(pkey_algo);
+EXPORT_SYMBOL_GPL(pkey_algo_name);
-const char *const pkey_hash_algo[PKEY_HASH__LAST] = {
- [PKEY_HASH_MD4] = "md4",
- [PKEY_HASH_MD5] = "md5",
- [PKEY_HASH_SHA1] = "sha1",
- [PKEY_HASH_RIPE_MD_160] = "rmd160",
- [PKEY_HASH_SHA256] = "sha256",
- [PKEY_HASH_SHA384] = "sha384",
- [PKEY_HASH_SHA512] = "sha512",
- [PKEY_HASH_SHA224] = "sha224",
+const struct public_key_algorithm *pkey_algo[PKEY_ALGO__LAST] = {
+#if defined(CONFIG_PUBLIC_KEY_ALGO_RSA) || \
+ defined(CONFIG_PUBLIC_KEY_ALGO_RSA_MODULE)
+ [PKEY_ALGO_RSA] = &RSA_public_key_algorithm,
+#endif
};
-EXPORT_SYMBOL_GPL(pkey_hash_algo);
+EXPORT_SYMBOL_GPL(pkey_algo);
-const char *const pkey_id_type[PKEY_ID_TYPE__LAST] = {
+const char *const pkey_id_type_name[PKEY_ID_TYPE__LAST] = {
[PKEY_ID_PGP] = "PGP",
[PKEY_ID_X509] = "X509",
};
-EXPORT_SYMBOL_GPL(pkey_id_type);
+EXPORT_SYMBOL_GPL(pkey_id_type_name);
/*
* Provide a part of a description of the key for /proc/keys.
@@ -56,7 +52,7 @@ static void public_key_describe(const struct key *asymmetric_key,
if (key)
seq_printf(m, "%s.%s",
- pkey_id_type[key->id_type], key->algo->name);
+ pkey_id_type_name[key->id_type], key->algo->name);
}
/*
@@ -78,21 +74,45 @@ EXPORT_SYMBOL_GPL(public_key_destroy);
/*
* Verify a signature using a public key.
*/
-static int public_key_verify_signature(const struct key *key,
- const struct public_key_signature *sig)
+int public_key_verify_signature(const struct public_key *pk,
+ const struct public_key_signature *sig)
{
- const struct public_key *pk = key->payload.data;
+ const struct public_key_algorithm *algo;
+
+ BUG_ON(!pk);
+ BUG_ON(!pk->mpi[0]);
+ BUG_ON(!pk->mpi[1]);
+ BUG_ON(!sig);
+ BUG_ON(!sig->digest);
+ BUG_ON(!sig->mpi[0]);
+
+ algo = pk->algo;
+ if (!algo) {
+ if (pk->pkey_algo >= PKEY_ALGO__LAST)
+ return -ENOPKG;
+ algo = pkey_algo[pk->pkey_algo];
+ if (!algo)
+ return -ENOPKG;
+ }
- if (!pk->algo->verify_signature)
+ if (!algo->verify_signature)
return -ENOTSUPP;
- if (sig->nr_mpi != pk->algo->n_sig_mpi) {
+ if (sig->nr_mpi != algo->n_sig_mpi) {
pr_debug("Signature has %u MPI not %u\n",
- sig->nr_mpi, pk->algo->n_sig_mpi);
+ sig->nr_mpi, algo->n_sig_mpi);
return -EINVAL;
}
- return pk->algo->verify_signature(pk, sig);
+ return algo->verify_signature(pk, sig);
+}
+EXPORT_SYMBOL_GPL(public_key_verify_signature);
+
+static int public_key_verify_signature_2(const struct key *key,
+ const struct public_key_signature *sig)
+{
+ const struct public_key *pk = key->payload.data;
+ return public_key_verify_signature(pk, sig);
}
/*
@@ -103,6 +123,6 @@ struct asymmetric_key_subtype public_key_subtype = {
.name = "public_key",
.describe = public_key_describe,
.destroy = public_key_destroy,
- .verify_signature = public_key_verify_signature,
+ .verify_signature = public_key_verify_signature_2,
};
EXPORT_SYMBOL_GPL(public_key_subtype);
diff --git a/crypto/asymmetric_keys/public_key.h b/crypto/asymmetric_keys/public_key.h
index 5e5e35626899..5c37a22a0637 100644
--- a/crypto/asymmetric_keys/public_key.h
+++ b/crypto/asymmetric_keys/public_key.h
@@ -28,3 +28,9 @@ struct public_key_algorithm {
};
extern const struct public_key_algorithm RSA_public_key_algorithm;
+
+/*
+ * public_key.c
+ */
+extern int public_key_verify_signature(const struct public_key *pk,
+ const struct public_key_signature *sig);
diff --git a/crypto/asymmetric_keys/rsa.c b/crypto/asymmetric_keys/rsa.c
index 4a6a0696f8a3..459cf97a75e2 100644
--- a/crypto/asymmetric_keys/rsa.c
+++ b/crypto/asymmetric_keys/rsa.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <crypto/algapi.h>
#include "public_key.h"
MODULE_LICENSE("GPL");
@@ -73,13 +74,13 @@ static const struct {
size_t size;
} RSA_ASN1_templates[PKEY_HASH__LAST] = {
#define _(X) { RSA_digest_info_##X, sizeof(RSA_digest_info_##X) }
- [PKEY_HASH_MD5] = _(MD5),
- [PKEY_HASH_SHA1] = _(SHA1),
- [PKEY_HASH_RIPE_MD_160] = _(RIPE_MD_160),
- [PKEY_HASH_SHA256] = _(SHA256),
- [PKEY_HASH_SHA384] = _(SHA384),
- [PKEY_HASH_SHA512] = _(SHA512),
- [PKEY_HASH_SHA224] = _(SHA224),
+ [HASH_ALGO_MD5] = _(MD5),
+ [HASH_ALGO_SHA1] = _(SHA1),
+ [HASH_ALGO_RIPE_MD_160] = _(RIPE_MD_160),
+ [HASH_ALGO_SHA256] = _(SHA256),
+ [HASH_ALGO_SHA384] = _(SHA384),
+ [HASH_ALGO_SHA512] = _(SHA512),
+ [HASH_ALGO_SHA224] = _(SHA224),
#undef _
};
@@ -189,12 +190,12 @@ static int RSA_verify(const u8 *H, const u8 *EM, size_t k, size_t hash_size,
}
}
- if (memcmp(asn1_template, EM + T_offset, asn1_size) != 0) {
+ if (crypto_memneq(asn1_template, EM + T_offset, asn1_size) != 0) {
kleave(" = -EBADMSG [EM[T] ASN.1 mismatch]");
return -EBADMSG;
}
- if (memcmp(H, EM + T_offset + asn1_size, hash_size) != 0) {
+ if (crypto_memneq(H, EM + T_offset + asn1_size, hash_size) != 0) {
kleave(" = -EKEYREJECTED [EM[T] hash mismatch]");
return -EKEYREJECTED;
}
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index facbf26bc6bb..29893162497c 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -47,6 +47,8 @@ void x509_free_certificate(struct x509_certificate *cert)
kfree(cert->subject);
kfree(cert->fingerprint);
kfree(cert->authority);
+ kfree(cert->sig.digest);
+ mpi_free(cert->sig.rsa.s);
kfree(cert);
}
}
@@ -152,33 +154,33 @@ int x509_note_pkey_algo(void *context, size_t hdrlen,
return -ENOPKG; /* Unsupported combination */
case OID_md4WithRSAEncryption:
- ctx->cert->sig_hash_algo = PKEY_HASH_MD5;
- ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig.pkey_hash_algo = HASH_ALGO_MD5;
+ ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
break;
case OID_sha1WithRSAEncryption:
- ctx->cert->sig_hash_algo = PKEY_HASH_SHA1;
- ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA1;
+ ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
break;
case OID_sha256WithRSAEncryption:
- ctx->cert->sig_hash_algo = PKEY_HASH_SHA256;
- ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA256;
+ ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
break;
case OID_sha384WithRSAEncryption:
- ctx->cert->sig_hash_algo = PKEY_HASH_SHA384;
- ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA384;
+ ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
break;
case OID_sha512WithRSAEncryption:
- ctx->cert->sig_hash_algo = PKEY_HASH_SHA512;
- ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA512;
+ ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
break;
case OID_sha224WithRSAEncryption:
- ctx->cert->sig_hash_algo = PKEY_HASH_SHA224;
- ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA224;
+ ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
break;
}
@@ -203,8 +205,8 @@ int x509_note_signature(void *context, size_t hdrlen,
return -EINVAL;
}
- ctx->cert->sig = value;
- ctx->cert->sig_size = vlen;
+ ctx->cert->raw_sig = value;
+ ctx->cert->raw_sig_size = vlen;
return 0;
}
@@ -343,8 +345,9 @@ int x509_extract_key_data(void *context, size_t hdrlen,
if (ctx->last_oid != OID_rsaEncryption)
return -ENOPKG;
- /* There seems to be an extraneous 0 byte on the front of the data */
- ctx->cert->pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->pub->pkey_algo = PKEY_ALGO_RSA;
+
+ /* Discard the BIT STRING metadata */
ctx->key = value + 1;
ctx->key_size = vlen - 1;
return 0;
diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h
index f86dc5fcc4ad..87d9cc26f630 100644
--- a/crypto/asymmetric_keys/x509_parser.h
+++ b/crypto/asymmetric_keys/x509_parser.h
@@ -9,6 +9,7 @@
* 2 of the Licence, or (at your option) any later version.
*/
+#include <linux/time.h>
#include <crypto/public_key.h>
struct x509_certificate {
@@ -20,13 +21,11 @@ struct x509_certificate {
char *authority; /* Authority key fingerprint as hex */
struct tm valid_from;
struct tm valid_to;
- enum pkey_algo pkey_algo : 8; /* Public key algorithm */
- enum pkey_algo sig_pkey_algo : 8; /* Signature public key algorithm */
- enum pkey_hash_algo sig_hash_algo : 8; /* Signature hash algorithm */
const void *tbs; /* Signed data */
- size_t tbs_size; /* Size of signed data */
- const void *sig; /* Signature data */
- size_t sig_size; /* Size of sigature */
+ unsigned tbs_size; /* Size of signed data */
+ unsigned raw_sig_size; /* Size of sigature */
+ const void *raw_sig; /* Signature data */
+ struct public_key_signature sig; /* Signature parameters */
};
/*
@@ -34,3 +33,10 @@ struct x509_certificate {
*/
extern void x509_free_certificate(struct x509_certificate *cert);
extern struct x509_certificate *x509_cert_parse(const void *data, size_t datalen);
+
+/*
+ * x509_public_key.c
+ */
+extern int x509_get_sig_params(struct x509_certificate *cert);
+extern int x509_check_signature(const struct public_key *pub,
+ struct x509_certificate *cert);
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 06007f0e880c..f83300b6e8c1 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -18,85 +18,162 @@
#include <linux/asn1_decoder.h>
#include <keys/asymmetric-subtype.h>
#include <keys/asymmetric-parser.h>
+#include <keys/system_keyring.h>
#include <crypto/hash.h>
#include "asymmetric_keys.h"
#include "public_key.h"
#include "x509_parser.h"
-static const
-struct public_key_algorithm *x509_public_key_algorithms[PKEY_ALGO__LAST] = {
- [PKEY_ALGO_DSA] = NULL,
-#if defined(CONFIG_PUBLIC_KEY_ALGO_RSA) || \
- defined(CONFIG_PUBLIC_KEY_ALGO_RSA_MODULE)
- [PKEY_ALGO_RSA] = &RSA_public_key_algorithm,
-#endif
-};
+/*
+ * Find a key in the given keyring by issuer and authority.
+ */
+static struct key *x509_request_asymmetric_key(
+ struct key *keyring,
+ const char *signer, size_t signer_len,
+ const char *authority, size_t auth_len)
+{
+ key_ref_t key;
+ char *id;
+
+ /* Construct an identifier. */
+ id = kmalloc(signer_len + 2 + auth_len + 1, GFP_KERNEL);
+ if (!id)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(id, signer, signer_len);
+ id[signer_len + 0] = ':';
+ id[signer_len + 1] = ' ';
+ memcpy(id + signer_len + 2, authority, auth_len);
+ id[signer_len + 2 + auth_len] = 0;
+
+ pr_debug("Look up: \"%s\"\n", id);
+
+ key = keyring_search(make_key_ref(keyring, 1),
+ &key_type_asymmetric, id);
+ if (IS_ERR(key))
+ pr_debug("Request for module key '%s' err %ld\n",
+ id, PTR_ERR(key));
+ kfree(id);
+
+ if (IS_ERR(key)) {
+ switch (PTR_ERR(key)) {
+ /* Hide some search errors */
+ case -EACCES:
+ case -ENOTDIR:
+ case -EAGAIN:
+ return ERR_PTR(-ENOKEY);
+ default:
+ return ERR_CAST(key);
+ }
+ }
+
+ pr_devel("<==%s() = 0 [%x]\n", __func__, key_serial(key_ref_to_ptr(key)));
+ return key_ref_to_ptr(key);
+}
/*
- * Check the signature on a certificate using the provided public key
+ * Set up the signature parameters in an X.509 certificate. This involves
+ * digesting the signed data and extracting the signature.
*/
-static int x509_check_signature(const struct public_key *pub,
- const struct x509_certificate *cert)
+int x509_get_sig_params(struct x509_certificate *cert)
{
- struct public_key_signature *sig;
struct crypto_shash *tfm;
struct shash_desc *desc;
size_t digest_size, desc_size;
+ void *digest;
int ret;
pr_devel("==>%s()\n", __func__);
-
+
+ if (cert->sig.rsa.s)
+ return 0;
+
+ cert->sig.rsa.s = mpi_read_raw_data(cert->raw_sig, cert->raw_sig_size);
+ if (!cert->sig.rsa.s)
+ return -ENOMEM;
+ cert->sig.nr_mpi = 1;
+
/* Allocate the hashing algorithm we're going to need and find out how
* big the hash operational data will be.
*/
- tfm = crypto_alloc_shash(pkey_hash_algo[cert->sig_hash_algo], 0, 0);
+ tfm = crypto_alloc_shash(hash_algo_name[cert->sig.pkey_hash_algo], 0, 0);
if (IS_ERR(tfm))
return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm);
desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
digest_size = crypto_shash_digestsize(tfm);
- /* We allocate the hash operational data storage on the end of our
- * context data.
+ /* We allocate the hash operational data storage on the end of the
+ * digest storage space.
*/
ret = -ENOMEM;
- sig = kzalloc(sizeof(*sig) + desc_size + digest_size, GFP_KERNEL);
- if (!sig)
- goto error_no_sig;
+ digest = kzalloc(digest_size + desc_size, GFP_KERNEL);
+ if (!digest)
+ goto error;
- sig->pkey_hash_algo = cert->sig_hash_algo;
- sig->digest = (u8 *)sig + sizeof(*sig) + desc_size;
- sig->digest_size = digest_size;
+ cert->sig.digest = digest;
+ cert->sig.digest_size = digest_size;
- desc = (void *)sig + sizeof(*sig);
- desc->tfm = tfm;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc = digest + digest_size;
+ desc->tfm = tfm;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
ret = crypto_shash_init(desc);
if (ret < 0)
goto error;
+ might_sleep();
+ ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, digest);
+error:
+ crypto_free_shash(tfm);
+ pr_devel("<==%s() = %d\n", __func__, ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(x509_get_sig_params);
- ret = -ENOMEM;
- sig->rsa.s = mpi_read_raw_data(cert->sig, cert->sig_size);
- if (!sig->rsa.s)
- goto error;
+/*
+ * Check the signature on a certificate using the provided public key
+ */
+int x509_check_signature(const struct public_key *pub,
+ struct x509_certificate *cert)
+{
+ int ret;
- ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, sig->digest);
- if (ret < 0)
- goto error_mpi;
+ pr_devel("==>%s()\n", __func__);
- ret = pub->algo->verify_signature(pub, sig);
+ ret = x509_get_sig_params(cert);
+ if (ret < 0)
+ return ret;
+ ret = public_key_verify_signature(pub, &cert->sig);
pr_debug("Cert Verification: %d\n", ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(x509_check_signature);
-error_mpi:
- mpi_free(sig->rsa.s);
-error:
- kfree(sig);
-error_no_sig:
- crypto_free_shash(tfm);
+/*
+ * Check the new certificate against the ones in the trust keyring. If one of
+ * those is the signing key and validates the new certificate, then mark the
+ * new certificate as being trusted.
+ *
+ * Return 0 if the new certificate was successfully validated, 1 if we couldn't
+ * find a matching parent certificate in the trusted list and an error if there
+ * is a matching certificate but the signature check fails.
+ */
+static int x509_validate_trust(struct x509_certificate *cert,
+ struct key *trust_keyring)
+{
+ const struct public_key *pk;
+ struct key *key;
+ int ret = 1;
- pr_devel("<==%s() = %d\n", __func__, ret);
+ key = x509_request_asymmetric_key(trust_keyring,
+ cert->issuer, strlen(cert->issuer),
+ cert->authority,
+ strlen(cert->authority));
+ if (!IS_ERR(key)) {
+ pk = key->payload.data;
+ ret = x509_check_signature(pk, cert);
+ }
return ret;
}
@@ -106,7 +183,6 @@ error_no_sig:
static int x509_key_preparse(struct key_preparsed_payload *prep)
{
struct x509_certificate *cert;
- struct tm now;
size_t srlen, sulen;
char *desc = NULL;
int ret;
@@ -117,7 +193,18 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
pr_devel("Cert Issuer: %s\n", cert->issuer);
pr_devel("Cert Subject: %s\n", cert->subject);
- pr_devel("Cert Key Algo: %s\n", pkey_algo[cert->pkey_algo]);
+
+ if (cert->pub->pkey_algo >= PKEY_ALGO__LAST ||
+ cert->sig.pkey_algo >= PKEY_ALGO__LAST ||
+ cert->sig.pkey_hash_algo >= PKEY_HASH__LAST ||
+ !pkey_algo[cert->pub->pkey_algo] ||
+ !pkey_algo[cert->sig.pkey_algo] ||
+ !hash_algo_name[cert->sig.pkey_hash_algo]) {
+ ret = -ENOPKG;
+ goto error_free_cert;
+ }
+
+ pr_devel("Cert Key Algo: %s\n", pkey_algo_name[cert->pub->pkey_algo]);
pr_devel("Cert Valid From: %04ld-%02d-%02d %02d:%02d:%02d\n",
cert->valid_from.tm_year + 1900, cert->valid_from.tm_mon + 1,
cert->valid_from.tm_mday, cert->valid_from.tm_hour,
@@ -127,61 +214,29 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
cert->valid_to.tm_mday, cert->valid_to.tm_hour,
cert->valid_to.tm_min, cert->valid_to.tm_sec);
pr_devel("Cert Signature: %s + %s\n",
- pkey_algo[cert->sig_pkey_algo],
- pkey_hash_algo[cert->sig_hash_algo]);
+ pkey_algo_name[cert->sig.pkey_algo],
+ hash_algo_name[cert->sig.pkey_hash_algo]);
- if (!cert->fingerprint || !cert->authority) {
- pr_warn("Cert for '%s' must have SubjKeyId and AuthKeyId extensions\n",
+ if (!cert->fingerprint) {
+ pr_warn("Cert for '%s' must have a SubjKeyId extension\n",
cert->subject);
ret = -EKEYREJECTED;
goto error_free_cert;
}
- time_to_tm(CURRENT_TIME.tv_sec, 0, &now);
- pr_devel("Now: %04ld-%02d-%02d %02d:%02d:%02d\n",
- now.tm_year + 1900, now.tm_mon + 1, now.tm_mday,
- now.tm_hour, now.tm_min, now.tm_sec);
- if (now.tm_year < cert->valid_from.tm_year ||
- (now.tm_year == cert->valid_from.tm_year &&
- (now.tm_mon < cert->valid_from.tm_mon ||
- (now.tm_mon == cert->valid_from.tm_mon &&
- (now.tm_mday < cert->valid_from.tm_mday ||
- (now.tm_mday == cert->valid_from.tm_mday &&
- (now.tm_hour < cert->valid_from.tm_hour ||
- (now.tm_hour == cert->valid_from.tm_hour &&
- (now.tm_min < cert->valid_from.tm_min ||
- (now.tm_min == cert->valid_from.tm_min &&
- (now.tm_sec < cert->valid_from.tm_sec
- ))))))))))) {
- pr_warn("Cert %s is not yet valid\n", cert->fingerprint);
- ret = -EKEYREJECTED;
- goto error_free_cert;
- }
- if (now.tm_year > cert->valid_to.tm_year ||
- (now.tm_year == cert->valid_to.tm_year &&
- (now.tm_mon > cert->valid_to.tm_mon ||
- (now.tm_mon == cert->valid_to.tm_mon &&
- (now.tm_mday > cert->valid_to.tm_mday ||
- (now.tm_mday == cert->valid_to.tm_mday &&
- (now.tm_hour > cert->valid_to.tm_hour ||
- (now.tm_hour == cert->valid_to.tm_hour &&
- (now.tm_min > cert->valid_to.tm_min ||
- (now.tm_min == cert->valid_to.tm_min &&
- (now.tm_sec > cert->valid_to.tm_sec
- ))))))))))) {
- pr_warn("Cert %s has expired\n", cert->fingerprint);
- ret = -EKEYEXPIRED;
- goto error_free_cert;
- }
-
- cert->pub->algo = x509_public_key_algorithms[cert->pkey_algo];
+ cert->pub->algo = pkey_algo[cert->pub->pkey_algo];
cert->pub->id_type = PKEY_ID_X509;
- /* Check the signature on the key */
- if (strcmp(cert->fingerprint, cert->authority) == 0) {
- ret = x509_check_signature(cert->pub, cert);
+ /* Check the signature on the key if it appears to be self-signed */
+ if (!cert->authority ||
+ strcmp(cert->fingerprint, cert->authority) == 0) {
+ ret = x509_check_signature(cert->pub, cert); /* self-signed */
if (ret < 0)
goto error_free_cert;
+ } else {
+ ret = x509_validate_trust(cert, system_trusted_keyring);
+ if (!ret)
+ prep->trusted = 1;
}
/* Propose a description */
@@ -237,3 +292,6 @@ static void __exit x509_key_exit(void)
module_init(x509_key_init);
module_exit(x509_key_exit);
+
+MODULE_DESCRIPTION("X.509 certificate parser");
+MODULE_LICENSE("GPL");
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 7be34248b450..39ea4791a3c9 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -128,7 +128,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
}
device->device_issue_pending(chan);
} else {
- if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS)
+ if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE)
panic("%s: DMA error waiting for depend_tx\n",
__func__);
tx->tx_submit(tx);
@@ -280,7 +280,7 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
* we are referring to the correct operation
*/
BUG_ON(async_tx_test_ack(*tx));
- if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS)
+ if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE)
panic("%s: DMA error waiting for transaction\n",
__func__);
async_tx_ack(*tx);
diff --git a/crypto/authenc.c b/crypto/authenc.c
index ffce19de05cf..1875e7026e8f 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -52,40 +52,52 @@ static void authenc_request_complete(struct aead_request *req, int err)
aead_request_complete(req, err);
}
-static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
- unsigned int keylen)
+int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
+ unsigned int keylen)
{
- unsigned int authkeylen;
- unsigned int enckeylen;
- struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- struct crypto_ahash *auth = ctx->auth;
- struct crypto_ablkcipher *enc = ctx->enc;
- struct rtattr *rta = (void *)key;
+ struct rtattr *rta = (struct rtattr *)key;
struct crypto_authenc_key_param *param;
- int err = -EINVAL;
if (!RTA_OK(rta, keylen))
- goto badkey;
+ return -EINVAL;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
+ return -EINVAL;
if (RTA_PAYLOAD(rta) < sizeof(*param))
- goto badkey;
+ return -EINVAL;
param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
+ keys->enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
- if (keylen < enckeylen)
- goto badkey;
+ if (keylen < keys->enckeylen)
+ return -EINVAL;
- authkeylen = keylen - enckeylen;
+ keys->authkeylen = keylen - keys->enckeylen;
+ keys->authkey = key;
+ keys->enckey = key + keys->authkeylen;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_authenc_extractkeys);
+
+static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ struct crypto_ahash *auth = ctx->auth;
+ struct crypto_ablkcipher *enc = ctx->enc;
+ struct crypto_authenc_keys keys;
+ int err = -EINVAL;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_ahash_setkey(auth, key, authkeylen);
+ err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) &
CRYPTO_TFM_RES_MASK);
@@ -95,7 +107,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen);
+ err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);
crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) &
CRYPTO_TFM_RES_MASK);
@@ -188,7 +200,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
- err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
@@ -227,7 +239,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
- err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
@@ -462,7 +474,7 @@ static int crypto_authenc_verify(struct aead_request *req,
ihash = ohash + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
- return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0;
+ return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
}
static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index ab53762fc309..4be0dd4373a9 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -59,37 +59,19 @@ static void authenc_esn_request_complete(struct aead_request *req, int err)
static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key,
unsigned int keylen)
{
- unsigned int authkeylen;
- unsigned int enckeylen;
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct crypto_ahash *auth = ctx->auth;
struct crypto_ablkcipher *enc = ctx->enc;
- struct rtattr *rta = (void *)key;
- struct crypto_authenc_key_param *param;
+ struct crypto_authenc_keys keys;
int err = -EINVAL;
- if (!RTA_OK(rta, keylen))
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
- goto badkey;
-
- param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
-
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
-
- if (keylen < enckeylen)
- goto badkey;
-
- authkeylen = keylen - enckeylen;
crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_ahash_setkey(auth, key, authkeylen);
+ err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) &
CRYPTO_TFM_RES_MASK);
@@ -99,7 +81,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen);
+ err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);
crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) &
CRYPTO_TFM_RES_MASK);
@@ -247,7 +229,7 @@ static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *ar
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
- err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
@@ -296,7 +278,7 @@ static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *a
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
- err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
@@ -336,7 +318,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
- err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
@@ -568,7 +550,7 @@ static int crypto_authenc_esn_verify(struct aead_request *req)
ihash = ohash + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
- return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0;
+ return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
}
static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 499c91717d93..3e05499d183a 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -363,7 +363,7 @@ static void crypto_ccm_decrypt_done(struct crypto_async_request *areq,
if (!err) {
err = crypto_ccm_auth(req, req->dst, cryptlen);
- if (!err && memcmp(pctx->auth_tag, pctx->odata, authsize))
+ if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize))
err = -EBADMSG;
}
aead_request_complete(req, err);
@@ -422,7 +422,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
return err;
/* verify */
- if (memcmp(authtag, odata, authsize))
+ if (crypto_memneq(authtag, odata, authsize))
return -EBADMSG;
return err;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 43e1fb05ea54..b4f017939004 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -582,7 +582,7 @@ static int crypto_gcm_verify(struct aead_request *req,
crypto_xor(auth_tag, iauth_tag, 16);
scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0);
- return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
+ return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
}
static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
diff --git a/crypto/hash_info.c b/crypto/hash_info.c
new file mode 100644
index 000000000000..3e7ff46f26e8
--- /dev/null
+++ b/crypto/hash_info.c
@@ -0,0 +1,56 @@
+/*
+ * Hash Info: Hash algorithms information
+ *
+ * Copyright (c) 2013 Dmitry Kasatkin <d.kasatkin@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <linux/export.h>
+#include <crypto/hash_info.h>
+
+const char *const hash_algo_name[HASH_ALGO__LAST] = {
+ [HASH_ALGO_MD4] = "md4",
+ [HASH_ALGO_MD5] = "md5",
+ [HASH_ALGO_SHA1] = "sha1",
+ [HASH_ALGO_RIPE_MD_160] = "rmd160",
+ [HASH_ALGO_SHA256] = "sha256",
+ [HASH_ALGO_SHA384] = "sha384",
+ [HASH_ALGO_SHA512] = "sha512",
+ [HASH_ALGO_SHA224] = "sha224",
+ [HASH_ALGO_RIPE_MD_128] = "rmd128",
+ [HASH_ALGO_RIPE_MD_256] = "rmd256",
+ [HASH_ALGO_RIPE_MD_320] = "rmd320",
+ [HASH_ALGO_WP_256] = "wp256",
+ [HASH_ALGO_WP_384] = "wp384",
+ [HASH_ALGO_WP_512] = "wp512",
+ [HASH_ALGO_TGR_128] = "tgr128",
+ [HASH_ALGO_TGR_160] = "tgr160",
+ [HASH_ALGO_TGR_192] = "tgr192",
+};
+EXPORT_SYMBOL_GPL(hash_algo_name);
+
+const int hash_digest_size[HASH_ALGO__LAST] = {
+ [HASH_ALGO_MD4] = MD5_DIGEST_SIZE,
+ [HASH_ALGO_MD5] = MD5_DIGEST_SIZE,
+ [HASH_ALGO_SHA1] = SHA1_DIGEST_SIZE,
+ [HASH_ALGO_RIPE_MD_160] = RMD160_DIGEST_SIZE,
+ [HASH_ALGO_SHA256] = SHA256_DIGEST_SIZE,
+ [HASH_ALGO_SHA384] = SHA384_DIGEST_SIZE,
+ [HASH_ALGO_SHA512] = SHA512_DIGEST_SIZE,
+ [HASH_ALGO_SHA224] = SHA224_DIGEST_SIZE,
+ [HASH_ALGO_RIPE_MD_128] = RMD128_DIGEST_SIZE,
+ [HASH_ALGO_RIPE_MD_256] = RMD256_DIGEST_SIZE,
+ [HASH_ALGO_RIPE_MD_320] = RMD320_DIGEST_SIZE,
+ [HASH_ALGO_WP_256] = WP256_DIGEST_SIZE,
+ [HASH_ALGO_WP_384] = WP384_DIGEST_SIZE,
+ [HASH_ALGO_WP_512] = WP512_DIGEST_SIZE,
+ [HASH_ALGO_TGR_128] = TGR128_DIGEST_SIZE,
+ [HASH_ALGO_TGR_160] = TGR160_DIGEST_SIZE,
+ [HASH_ALGO_TGR_192] = TGR192_DIGEST_SIZE,
+};
+EXPORT_SYMBOL_GPL(hash_digest_size);
diff --git a/crypto/memneq.c b/crypto/memneq.c
new file mode 100644
index 000000000000..cd0162221c14
--- /dev/null
+++ b/crypto/memneq.c
@@ -0,0 +1,138 @@
+/*
+ * Constant-time equality testing of memory regions.
+ *
+ * Authors:
+ *
+ * James Yonan <james@openvpn.net>
+ * Daniel Borkmann <dborkman@redhat.com>
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of OpenVPN Technologies nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <crypto/algapi.h>
+
+#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ
+
+/* Generic path for arbitrary size */
+static inline unsigned long
+__crypto_memneq_generic(const void *a, const void *b, size_t size)
+{
+ unsigned long neq = 0;
+
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ while (size >= sizeof(unsigned long)) {
+ neq |= *(unsigned long *)a ^ *(unsigned long *)b;
+ a += sizeof(unsigned long);
+ b += sizeof(unsigned long);
+ size -= sizeof(unsigned long);
+ }
+#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+ while (size > 0) {
+ neq |= *(unsigned char *)a ^ *(unsigned char *)b;
+ a += 1;
+ b += 1;
+ size -= 1;
+ }
+ return neq;
+}
+
+/* Loop-free fast-path for frequently used 16-byte size */
+static inline unsigned long __crypto_memneq_16(const void *a, const void *b)
+{
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (sizeof(unsigned long) == 8)
+ return ((*(unsigned long *)(a) ^ *(unsigned long *)(b))
+ | (*(unsigned long *)(a+8) ^ *(unsigned long *)(b+8)));
+ else if (sizeof(unsigned int) == 4)
+ return ((*(unsigned int *)(a) ^ *(unsigned int *)(b))
+ | (*(unsigned int *)(a+4) ^ *(unsigned int *)(b+4))
+ | (*(unsigned int *)(a+8) ^ *(unsigned int *)(b+8))
+ | (*(unsigned int *)(a+12) ^ *(unsigned int *)(b+12)));
+ else
+#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+ return ((*(unsigned char *)(a) ^ *(unsigned char *)(b))
+ | (*(unsigned char *)(a+1) ^ *(unsigned char *)(b+1))
+ | (*(unsigned char *)(a+2) ^ *(unsigned char *)(b+2))
+ | (*(unsigned char *)(a+3) ^ *(unsigned char *)(b+3))
+ | (*(unsigned char *)(a+4) ^ *(unsigned char *)(b+4))
+ | (*(unsigned char *)(a+5) ^ *(unsigned char *)(b+5))
+ | (*(unsigned char *)(a+6) ^ *(unsigned char *)(b+6))
+ | (*(unsigned char *)(a+7) ^ *(unsigned char *)(b+7))
+ | (*(unsigned char *)(a+8) ^ *(unsigned char *)(b+8))
+ | (*(unsigned char *)(a+9) ^ *(unsigned char *)(b+9))
+ | (*(unsigned char *)(a+10) ^ *(unsigned char *)(b+10))
+ | (*(unsigned char *)(a+11) ^ *(unsigned char *)(b+11))
+ | (*(unsigned char *)(a+12) ^ *(unsigned char *)(b+12))
+ | (*(unsigned char *)(a+13) ^ *(unsigned char *)(b+13))
+ | (*(unsigned char *)(a+14) ^ *(unsigned char *)(b+14))
+ | (*(unsigned char *)(a+15) ^ *(unsigned char *)(b+15)));
+}
+
+/* Compare two areas of memory without leaking timing information,
+ * and with special optimizations for common sizes. Users should
+ * not call this function directly, but should instead use
+ * crypto_memneq defined in crypto/algapi.h.
+ */
+noinline unsigned long __crypto_memneq(const void *a, const void *b,
+ size_t size)
+{
+ switch (size) {
+ case 16:
+ return __crypto_memneq_16(a, b);
+ default:
+ return __crypto_memneq_generic(a, b, size);
+ }
+}
+EXPORT_SYMBOL(__crypto_memneq);
+
+#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 8f451449abd3..97536a2c3ba2 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -166,6 +166,8 @@ source "drivers/reset/Kconfig"
source "drivers/fmc/Kconfig"
+source "drivers/powercap/Kconfig"
+
source "drivers/phy/Kconfig"
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 687da899cadb..3cc8214f9b26 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -154,3 +154,4 @@ obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_IPACK_BUS) += ipack/
obj-$(CONFIG_NTB) += ntb/
obj-$(CONFIG_FMC) += fmc/
+obj-$(CONFIG_POWERCAP) += powercap/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 6efe2ac6902f..b3ebdec8dafd 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -56,23 +56,6 @@ config ACPI_PROCFS
Say N to delete /proc/acpi/ files that have moved to /sys/
-config ACPI_PROCFS_POWER
- bool "Deprecated power /proc/acpi directories"
- depends on PROC_FS
- help
- For backwards compatibility, this option allows
- deprecated power /proc/acpi/ directories to exist, even when
- they have been replaced by functions in /sys.
- The deprecated directories (and their replacements) include:
- /proc/acpi/battery/* (/sys/class/power_supply/*)
- /proc/acpi/ac_adapter/* (sys/class/power_supply/*)
- This option has no effect on /proc/acpi/ directories
- and functions, which do not yet exist in /sys
- This option, together with the proc directories, will be
- deleted in 2.6.39.
-
- Say N to delete power /proc/acpi/ directories that have moved to /sys/
-
config ACPI_EC_DEBUGFS
tristate "EC read/write access through /sys/kernel/debug/ec"
default n
@@ -175,9 +158,10 @@ config ACPI_PROCESSOR
To compile this driver as a module, choose M here:
the module will be called processor.
+
config ACPI_IPMI
tristate "IPMI"
- depends on IPMI_SI && IPMI_HANDLER
+ depends on IPMI_SI
default n
help
This driver enables the ACPI to access the BMC controller. And it
@@ -372,4 +356,25 @@ config ACPI_BGRT
source "drivers/acpi/apei/Kconfig"
+config ACPI_EXTLOG
+ tristate "Extended Error Log support"
+ depends on X86_MCE
+ select EFI
+ select UEFI_CPER
+ default n
+ help
+ Certain usages such as Predictive Failure Analysis (PFA) require
+ more information about the error than what can be described in
+ processor machine check banks. Most server processors log
+ additional information about the error in processor uncore
+ registers. Since the addresses and layout of these registers vary
+ widely from one processor to another, system software cannot
+ readily make use of them. To complicate matters further, some of
+ the additional error information cannot be constructed without
+ detailed knowledge about platform topology.
+
+ Enhanced MCA Logging allows firmware to provide additional error
+ information to system software, synchronous with MCE or CMCI. This
+ driver adds support for that functionality.
+
endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index cdaf68b58b00..0331f91d56e6 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -47,7 +47,6 @@ acpi-y += sysfs.o
acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
acpi-$(CONFIG_DEBUG_FS) += debugfs.o
acpi-$(CONFIG_ACPI_NUMA) += numa.o
-acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
ifdef CONFIG_ACPI_VIDEO
acpi-y += video_detect.o
endif
@@ -82,3 +81,5 @@ processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
obj-$(CONFIG_ACPI_APEI) += apei/
+
+obj-$(CONFIG_ACPI_EXTLOG) += acpi_extlog.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index f37beaa32750..b9f0d5f4bba5 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -30,10 +30,7 @@
#include <linux/types.h>
#include <linux/dmi.h>
#include <linux/delay.h>
-#ifdef CONFIG_ACPI_PROCFS_POWER
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#endif
+#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -55,75 +52,30 @@ MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI AC Adapter Driver");
MODULE_LICENSE("GPL");
-#ifdef CONFIG_ACPI_PROCFS_POWER
-extern struct proc_dir_entry *acpi_lock_ac_dir(void);
-extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
-static int acpi_ac_open_fs(struct inode *inode, struct file *file);
-#endif
-
-static int acpi_ac_add(struct acpi_device *device);
-static int acpi_ac_remove(struct acpi_device *device);
-static void acpi_ac_notify(struct acpi_device *device, u32 event);
-
-static const struct acpi_device_id ac_device_ids[] = {
- {"ACPI0003", 0},
- {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, ac_device_ids);
-
-#ifdef CONFIG_PM_SLEEP
-static int acpi_ac_resume(struct device *dev);
-#endif
-static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
-
static int ac_sleep_before_get_state_ms;
-static struct acpi_driver acpi_ac_driver = {
- .name = "ac",
- .class = ACPI_AC_CLASS,
- .ids = ac_device_ids,
- .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
- .ops = {
- .add = acpi_ac_add,
- .remove = acpi_ac_remove,
- .notify = acpi_ac_notify,
- },
- .drv.pm = &acpi_ac_pm,
-};
-
struct acpi_ac {
struct power_supply charger;
- struct acpi_device * device;
+ struct acpi_device *adev;
+ struct platform_device *pdev;
unsigned long long state;
};
#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger)
-#ifdef CONFIG_ACPI_PROCFS_POWER
-static const struct file_operations acpi_ac_fops = {
- .owner = THIS_MODULE,
- .open = acpi_ac_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif
-
/* --------------------------------------------------------------------------
AC Adapter Management
-------------------------------------------------------------------------- */
static int acpi_ac_get_state(struct acpi_ac *ac)
{
- acpi_status status = AE_OK;
-
-
- if (!ac)
- return -EINVAL;
+ acpi_status status;
- status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL, &ac->state);
+ status = acpi_evaluate_integer(ac->adev->handle, "_PSR", NULL,
+ &ac->state);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Error reading AC Adapter state"));
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Error reading AC Adapter state"));
ac->state = ACPI_AC_STATUS_UNKNOWN;
return -ENODEV;
}
@@ -160,91 +112,13 @@ static enum power_supply_property ac_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
-#ifdef CONFIG_ACPI_PROCFS_POWER
-/* --------------------------------------------------------------------------
- FS Interface (/proc)
- -------------------------------------------------------------------------- */
-
-static struct proc_dir_entry *acpi_ac_dir;
-
-static int acpi_ac_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_ac *ac = seq->private;
-
-
- if (!ac)
- return 0;
-
- if (acpi_ac_get_state(ac)) {
- seq_puts(seq, "ERROR: Unable to read AC Adapter state\n");
- return 0;
- }
-
- seq_puts(seq, "state: ");
- switch (ac->state) {
- case ACPI_AC_STATUS_OFFLINE:
- seq_puts(seq, "off-line\n");
- break;
- case ACPI_AC_STATUS_ONLINE:
- seq_puts(seq, "on-line\n");
- break;
- default:
- seq_puts(seq, "unknown\n");
- break;
- }
-
- return 0;
-}
-
-static int acpi_ac_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_ac_seq_show, PDE_DATA(inode));
-}
-
-static int acpi_ac_add_fs(struct acpi_device *device)
-{
- struct proc_dir_entry *entry = NULL;
-
- printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded,"
- " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
- if (!acpi_device_dir(device)) {
- acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
- acpi_ac_dir);
- if (!acpi_device_dir(device))
- return -ENODEV;
- }
-
- /* 'state' [R] */
- entry = proc_create_data(ACPI_AC_FILE_STATE,
- S_IRUGO, acpi_device_dir(device),
- &acpi_ac_fops, acpi_driver_data(device));
- if (!entry)
- return -ENODEV;
- return 0;
-}
-
-static int acpi_ac_remove_fs(struct acpi_device *device)
-{
-
- if (acpi_device_dir(device)) {
- remove_proc_entry(ACPI_AC_FILE_STATE, acpi_device_dir(device));
-
- remove_proc_entry(acpi_device_bid(device), acpi_ac_dir);
- acpi_device_dir(device) = NULL;
- }
-
- return 0;
-}
-#endif
-
/* --------------------------------------------------------------------------
Driver Model
-------------------------------------------------------------------------- */
-static void acpi_ac_notify(struct acpi_device *device, u32 event)
+static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
{
- struct acpi_ac *ac = acpi_driver_data(device);
-
+ struct acpi_ac *ac = data;
if (!ac)
return;
@@ -267,10 +141,10 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
msleep(ac_sleep_before_get_state_ms);
acpi_ac_get_state(ac);
- acpi_bus_generate_netlink_event(device->pnp.device_class,
- dev_name(&device->dev), event,
- (u32) ac->state);
- acpi_notifier_call_chain(device, event, (u32) ac->state);
+ acpi_bus_generate_netlink_event(ac->adev->pnp.device_class,
+ dev_name(&ac->pdev->dev),
+ event, (u32) ac->state);
+ acpi_notifier_call_chain(ac->adev, event, (u32) ac->state);
kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
}
@@ -295,53 +169,55 @@ static struct dmi_system_id ac_dmi_table[] = {
{},
};
-static int acpi_ac_add(struct acpi_device *device)
+static int acpi_ac_probe(struct platform_device *pdev)
{
int result = 0;
struct acpi_ac *ac = NULL;
+ struct acpi_device *adev;
-
- if (!device)
+ if (!pdev)
return -EINVAL;
+ result = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev);
+ if (result)
+ return -ENODEV;
+
ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL);
if (!ac)
return -ENOMEM;
- ac->device = device;
- strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_AC_CLASS);
- device->driver_data = ac;
+ strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME);
+ strcpy(acpi_device_class(adev), ACPI_AC_CLASS);
+ ac->adev = adev;
+ ac->pdev = pdev;
+ platform_set_drvdata(pdev, ac);
result = acpi_ac_get_state(ac);
if (result)
goto end;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- result = acpi_ac_add_fs(device);
-#endif
- if (result)
- goto end;
- ac->charger.name = acpi_device_bid(device);
+ ac->charger.name = acpi_device_bid(adev);
ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
ac->charger.properties = ac_props;
ac->charger.num_properties = ARRAY_SIZE(ac_props);
ac->charger.get_property = get_ac_property;
- result = power_supply_register(&ac->device->dev, &ac->charger);
+ result = power_supply_register(&pdev->dev, &ac->charger);
if (result)
goto end;
+ result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
+ ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler, ac);
+ if (result) {
+ power_supply_unregister(&ac->charger);
+ goto end;
+ }
printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
- acpi_device_name(device), acpi_device_bid(device),
+ acpi_device_name(adev), acpi_device_bid(adev),
ac->state ? "on-line" : "off-line");
- end:
- if (result) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_ac_remove_fs(device);
-#endif
+end:
+ if (result)
kfree(ac);
- }
dmi_check_system(ac_dmi_table);
return result;
@@ -356,7 +232,7 @@ static int acpi_ac_resume(struct device *dev)
if (!dev)
return -EINVAL;
- ac = acpi_driver_data(to_acpi_device(dev));
+ ac = platform_get_drvdata(to_platform_device(dev));
if (!ac)
return -EINVAL;
@@ -368,28 +244,44 @@ static int acpi_ac_resume(struct device *dev)
return 0;
}
#endif
+static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume);
-static int acpi_ac_remove(struct acpi_device *device)
+static int acpi_ac_remove(struct platform_device *pdev)
{
- struct acpi_ac *ac = NULL;
-
+ struct acpi_ac *ac;
- if (!device || !acpi_driver_data(device))
+ if (!pdev)
return -EINVAL;
- ac = acpi_driver_data(device);
+ acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
+ ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler);
+ ac = platform_get_drvdata(pdev);
if (ac->charger.dev)
power_supply_unregister(&ac->charger);
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_ac_remove_fs(device);
-#endif
kfree(ac);
return 0;
}
+static const struct acpi_device_id acpi_ac_match[] = {
+ { "ACPI0003", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, acpi_ac_match);
+
+static struct platform_driver acpi_ac_driver = {
+ .probe = acpi_ac_probe,
+ .remove = acpi_ac_remove,
+ .driver = {
+ .name = "acpi-ac",
+ .owner = THIS_MODULE,
+ .pm = &acpi_ac_pm_ops,
+ .acpi_match_table = ACPI_PTR(acpi_ac_match),
+ },
+};
+
static int __init acpi_ac_init(void)
{
int result;
@@ -397,34 +289,16 @@ static int __init acpi_ac_init(void)
if (acpi_disabled)
return -ENODEV;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_ac_dir = acpi_lock_ac_dir();
- if (!acpi_ac_dir)
+ result = platform_driver_register(&acpi_ac_driver);
+ if (result < 0)
return -ENODEV;
-#endif
-
- result = acpi_bus_register_driver(&acpi_ac_driver);
- if (result < 0) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_unlock_ac_dir(acpi_ac_dir);
-#endif
- return -ENODEV;
- }
return 0;
}
static void __exit acpi_ac_exit(void)
{
-
- acpi_bus_unregister_driver(&acpi_ac_driver);
-
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_unlock_ac_dir(acpi_ac_dir);
-#endif
-
- return;
+ platform_driver_unregister(&acpi_ac_driver);
}
-
module_init(acpi_ac_init);
module_exit(acpi_ac_exit);
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
new file mode 100644
index 000000000000..a6869e110ce5
--- /dev/null
+++ b/drivers/acpi/acpi_extlog.c
@@ -0,0 +1,327 @@
+/*
+ * Extended Error Log driver
+ *
+ * Copyright (C) 2013 Intel Corp.
+ * Author: Chen, Gong <gong.chen@intel.com>
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <linux/cper.h>
+#include <linux/ratelimit.h>
+#include <asm/cpu.h>
+#include <asm/mce.h>
+
+#include "apei/apei-internal.h"
+
+#define EXT_ELOG_ENTRY_MASK GENMASK_ULL(51, 0) /* elog entry address mask */
+
+#define EXTLOG_DSM_REV 0x0
+#define EXTLOG_FN_QUERY 0x0
+#define EXTLOG_FN_ADDR 0x1
+
+#define FLAG_OS_OPTIN BIT(0)
+#define EXTLOG_QUERY_L1_EXIST BIT(1)
+#define ELOG_ENTRY_VALID (1ULL<<63)
+#define ELOG_ENTRY_LEN 0x1000
+
+#define EMCA_BUG \
+ "Can not request iomem region <0x%016llx-0x%016llx> - eMCA disabled\n"
+
+struct extlog_l1_head {
+ u32 ver; /* Header Version */
+ u32 hdr_len; /* Header Length */
+ u64 total_len; /* entire L1 Directory length including this header */
+ u64 elog_base; /* MCA Error Log Directory base address */
+ u64 elog_len; /* MCA Error Log Directory length */
+ u32 flags; /* bit 0 - OS/VMM Opt-in */
+ u8 rev0[12];
+ u32 entries; /* Valid L1 Directory entries per logical processor */
+ u8 rev1[12];
+};
+
+static u8 extlog_dsm_uuid[] = "663E35AF-CC10-41A4-88EA-5470AF055295";
+
+/* L1 table related physical address */
+static u64 elog_base;
+static size_t elog_size;
+static u64 l1_dirbase;
+static size_t l1_size;
+
+/* L1 table related virtual address */
+static void __iomem *extlog_l1_addr;
+static void __iomem *elog_addr;
+
+static void *elog_buf;
+
+static u64 *l1_entry_base;
+static u32 l1_percpu_entry;
+
+#define ELOG_IDX(cpu, bank) \
+ (cpu_physical_id(cpu) * l1_percpu_entry + (bank))
+
+#define ELOG_ENTRY_DATA(idx) \
+ (*(l1_entry_base + (idx)))
+
+#define ELOG_ENTRY_ADDR(phyaddr) \
+ (phyaddr - elog_base + (u8 *)elog_addr)
+
+static struct acpi_generic_status *extlog_elog_entry_check(int cpu, int bank)
+{
+ int idx;
+ u64 data;
+ struct acpi_generic_status *estatus;
+
+ WARN_ON(cpu < 0);
+ idx = ELOG_IDX(cpu, bank);
+ data = ELOG_ENTRY_DATA(idx);
+ if ((data & ELOG_ENTRY_VALID) == 0)
+ return NULL;
+
+ data &= EXT_ELOG_ENTRY_MASK;
+ estatus = (struct acpi_generic_status *)ELOG_ENTRY_ADDR(data);
+
+ /* if no valid data in elog entry, just return */
+ if (estatus->block_status == 0)
+ return NULL;
+
+ return estatus;
+}
+
+static void __print_extlog_rcd(const char *pfx,
+ struct acpi_generic_status *estatus, int cpu)
+{
+ static atomic_t seqno;
+ unsigned int curr_seqno;
+ char pfx_seq[64];
+
+ if (!pfx) {
+ if (estatus->error_severity <= CPER_SEV_CORRECTED)
+ pfx = KERN_INFO;
+ else
+ pfx = KERN_ERR;
+ }
+ curr_seqno = atomic_inc_return(&seqno);
+ snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}", pfx, curr_seqno);
+ printk("%s""Hardware error detected on CPU%d\n", pfx_seq, cpu);
+ cper_estatus_print(pfx_seq, estatus);
+}
+
+static int print_extlog_rcd(const char *pfx,
+ struct acpi_generic_status *estatus, int cpu)
+{
+ /* Not more than 2 messages every 5 seconds */
+ static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
+ static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
+ struct ratelimit_state *ratelimit;
+
+ if (estatus->error_severity == CPER_SEV_CORRECTED ||
+ (estatus->error_severity == CPER_SEV_INFORMATIONAL))
+ ratelimit = &ratelimit_corrected;
+ else
+ ratelimit = &ratelimit_uncorrected;
+ if (__ratelimit(ratelimit)) {
+ __print_extlog_rcd(pfx, estatus, cpu);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int extlog_print(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct mce *mce = (struct mce *)data;
+ int bank = mce->bank;
+ int cpu = mce->extcpu;
+ struct acpi_generic_status *estatus;
+ int rc;
+
+ estatus = extlog_elog_entry_check(cpu, bank);
+ if (estatus == NULL)
+ return NOTIFY_DONE;
+
+ memcpy(elog_buf, (void *)estatus, ELOG_ENTRY_LEN);
+ /* clear record status to enable BIOS to update it again */
+ estatus->block_status = 0;
+
+ rc = print_extlog_rcd(NULL, (struct acpi_generic_status *)elog_buf, cpu);
+
+ return NOTIFY_DONE;
+}
+
+static int extlog_get_dsm(acpi_handle handle, int rev, int func, u64 *ret)
+{
+ struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct acpi_object_list input;
+ union acpi_object params[4], *obj;
+ u8 uuid[16];
+ int i;
+
+ acpi_str_to_uuid(extlog_dsm_uuid, uuid);
+ input.count = 4;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_BUFFER;
+ params[0].buffer.length = 16;
+ params[0].buffer.pointer = uuid;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = rev;
+ params[2].type = ACPI_TYPE_INTEGER;
+ params[2].integer.value = func;
+ params[3].type = ACPI_TYPE_PACKAGE;
+ params[3].package.count = 0;
+ params[3].package.elements = NULL;
+
+ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DSM", &input, &buf)))
+ return -1;
+
+ *ret = 0;
+ obj = (union acpi_object *)buf.pointer;
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ *ret = obj->integer.value;
+ } else if (obj->type == ACPI_TYPE_BUFFER) {
+ if (obj->buffer.length <= 8) {
+ for (i = 0; i < obj->buffer.length; i++)
+ *ret |= (obj->buffer.pointer[i] << (i * 8));
+ }
+ }
+ kfree(buf.pointer);
+
+ return 0;
+}
+
+static bool extlog_get_l1addr(void)
+{
+ acpi_handle handle;
+ u64 ret;
+
+ if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
+ return false;
+
+ if (extlog_get_dsm(handle, EXTLOG_DSM_REV, EXTLOG_FN_QUERY, &ret) ||
+ !(ret & EXTLOG_QUERY_L1_EXIST))
+ return false;
+
+ if (extlog_get_dsm(handle, EXTLOG_DSM_REV, EXTLOG_FN_ADDR, &ret))
+ return false;
+
+ l1_dirbase = ret;
+ /* Spec says L1 directory must be 4K aligned, bail out if it isn't */
+ if (l1_dirbase & ((1 << 12) - 1)) {
+ pr_warn(FW_BUG "L1 Directory is invalid at physical %llx\n",
+ l1_dirbase);
+ return false;
+ }
+
+ return true;
+}
+static struct notifier_block extlog_mce_dec = {
+ .notifier_call = extlog_print,
+};
+
+static int __init extlog_init(void)
+{
+ struct extlog_l1_head *l1_head;
+ void __iomem *extlog_l1_hdr;
+ size_t l1_hdr_size;
+ struct resource *r;
+ u64 cap;
+ int rc;
+
+ rc = -ENODEV;
+
+ rdmsrl(MSR_IA32_MCG_CAP, cap);
+ if (!(cap & MCG_ELOG_P))
+ return rc;
+
+ if (!extlog_get_l1addr())
+ return rc;
+
+ rc = -EINVAL;
+ /* get L1 header to fetch necessary information */
+ l1_hdr_size = sizeof(struct extlog_l1_head);
+ r = request_mem_region(l1_dirbase, l1_hdr_size, "L1 DIR HDR");
+ if (!r) {
+ pr_warn(FW_BUG EMCA_BUG,
+ (unsigned long long)l1_dirbase,
+ (unsigned long long)l1_dirbase + l1_hdr_size);
+ goto err;
+ }
+
+ extlog_l1_hdr = acpi_os_map_memory(l1_dirbase, l1_hdr_size);
+ l1_head = (struct extlog_l1_head *)extlog_l1_hdr;
+ l1_size = l1_head->total_len;
+ l1_percpu_entry = l1_head->entries;
+ elog_base = l1_head->elog_base;
+ elog_size = l1_head->elog_len;
+ acpi_os_unmap_memory(extlog_l1_hdr, l1_hdr_size);
+ release_mem_region(l1_dirbase, l1_hdr_size);
+
+ /* remap L1 header again based on completed information */
+ r = request_mem_region(l1_dirbase, l1_size, "L1 Table");
+ if (!r) {
+ pr_warn(FW_BUG EMCA_BUG,
+ (unsigned long long)l1_dirbase,
+ (unsigned long long)l1_dirbase + l1_size);
+ goto err;
+ }
+ extlog_l1_addr = acpi_os_map_memory(l1_dirbase, l1_size);
+ l1_entry_base = (u64 *)((u8 *)extlog_l1_addr + l1_hdr_size);
+
+ /* remap elog table */
+ r = request_mem_region(elog_base, elog_size, "Elog Table");
+ if (!r) {
+ pr_warn(FW_BUG EMCA_BUG,
+ (unsigned long long)elog_base,
+ (unsigned long long)elog_base + elog_size);
+ goto err_release_l1_dir;
+ }
+ elog_addr = acpi_os_map_memory(elog_base, elog_size);
+
+ rc = -ENOMEM;
+ /* allocate buffer to save elog record */
+ elog_buf = kmalloc(ELOG_ENTRY_LEN, GFP_KERNEL);
+ if (elog_buf == NULL)
+ goto err_release_elog;
+
+ mce_register_decode_chain(&extlog_mce_dec);
+ /* enable OS to be involved to take over management from BIOS */
+ ((struct extlog_l1_head *)extlog_l1_addr)->flags |= FLAG_OS_OPTIN;
+
+ return 0;
+
+err_release_elog:
+ if (elog_addr)
+ acpi_os_unmap_memory(elog_addr, elog_size);
+ release_mem_region(elog_base, elog_size);
+err_release_l1_dir:
+ if (extlog_l1_addr)
+ acpi_os_unmap_memory(extlog_l1_addr, l1_size);
+ release_mem_region(l1_dirbase, l1_size);
+err:
+ pr_warn(FW_BUG "Extended error log disabled because of problems parsing f/w tables\n");
+ return rc;
+}
+
+static void __exit extlog_exit(void)
+{
+ mce_unregister_decode_chain(&extlog_mce_dec);
+ ((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN;
+ if (extlog_l1_addr)
+ acpi_os_unmap_memory(extlog_l1_addr, l1_size);
+ if (elog_addr)
+ acpi_os_unmap_memory(elog_addr, elog_size);
+ release_mem_region(elog_base, elog_size);
+ release_mem_region(l1_dirbase, l1_size);
+ kfree(elog_buf);
+}
+
+module_init(extlog_init);
+module_exit(extlog_exit);
+
+MODULE_AUTHOR("Chen, Gong <gong.chen@intel.com>");
+MODULE_DESCRIPTION("Extended MCA Error Log Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
index a6977e12d574..ac0f52f6df2b 100644
--- a/drivers/acpi/acpi_ipmi.c
+++ b/drivers/acpi/acpi_ipmi.c
@@ -1,8 +1,9 @@
/*
* acpi_ipmi.c - ACPI IPMI opregion
*
- * Copyright (C) 2010 Intel Corporation
- * Copyright (C) 2010 Zhao Yakui <yakui.zhao@intel.com>
+ * Copyright (C) 2010, 2013 Intel Corporation
+ * Author: Zhao Yakui <yakui.zhao@intel.com>
+ * Lv Zheng <lv.zheng@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
@@ -23,60 +24,58 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <linux/ipmi.h>
-#include <linux/device.h>
-#include <linux/pnp.h>
#include <linux/spinlock.h>
MODULE_AUTHOR("Zhao Yakui");
MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
MODULE_LICENSE("GPL");
-#define IPMI_FLAGS_HANDLER_INSTALL 0
-
#define ACPI_IPMI_OK 0
#define ACPI_IPMI_TIMEOUT 0x10
#define ACPI_IPMI_UNKNOWN 0x07
/* the IPMI timeout is 5s */
-#define IPMI_TIMEOUT (5 * HZ)
+#define IPMI_TIMEOUT (5000)
+#define ACPI_IPMI_MAX_MSG_LENGTH 64
struct acpi_ipmi_device {
/* the device list attached to driver_data.ipmi_devices */
struct list_head head;
+
/* the IPMI request message list */
struct list_head tx_msg_list;
- spinlock_t tx_msg_lock;
+
+ spinlock_t tx_msg_lock;
acpi_handle handle;
- struct pnp_dev *pnp_dev;
- ipmi_user_t user_interface;
+ struct device *dev;
+ ipmi_user_t user_interface;
int ipmi_ifnum; /* IPMI interface number */
long curr_msgid;
- unsigned long flags;
- struct ipmi_smi_info smi_data;
+ bool dead;
+ struct kref kref;
};
struct ipmi_driver_data {
- struct list_head ipmi_devices;
- struct ipmi_smi_watcher bmc_events;
- struct ipmi_user_hndl ipmi_hndlrs;
- struct mutex ipmi_lock;
+ struct list_head ipmi_devices;
+ struct ipmi_smi_watcher bmc_events;
+ struct ipmi_user_hndl ipmi_hndlrs;
+ struct mutex ipmi_lock;
+
+ /*
+ * NOTE: IPMI System Interface Selection
+ * There is no system interface specified by the IPMI operation
+ * region access. We try to select one system interface with ACPI
+ * handle set. IPMI messages passed from the ACPI codes are sent
+ * to this selected global IPMI system interface.
+ */
+ struct acpi_ipmi_device *selected_smi;
};
struct acpi_ipmi_msg {
struct list_head head;
+
/*
* General speaking the addr type should be SI_ADDR_TYPE. And
* the addr channel should be BMC.
@@ -86,30 +85,31 @@ struct acpi_ipmi_msg {
*/
struct ipmi_addr addr;
long tx_msgid;
+
/* it is used to track whether the IPMI message is finished */
struct completion tx_complete;
+
struct kernel_ipmi_msg tx_message;
- int msg_done;
- /* tx data . And copy it from ACPI object buffer */
- u8 tx_data[64];
- int tx_len;
- u8 rx_data[64];
- int rx_len;
+ int msg_done;
+
+ /* tx/rx data . And copy it from/to ACPI object buffer */
+ u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
+ u8 rx_len;
+
struct acpi_ipmi_device *device;
+ struct kref kref;
};
/* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */
struct acpi_ipmi_buffer {
u8 status;
u8 length;
- u8 data[64];
+ u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
};
static void ipmi_register_bmc(int iface, struct device *dev);
static void ipmi_bmc_gone(int iface);
static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
-static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device);
-static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device);
static struct ipmi_driver_data driver_data = {
.ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices),
@@ -121,29 +121,142 @@ static struct ipmi_driver_data driver_data = {
.ipmi_hndlrs = {
.ipmi_recv_hndl = ipmi_msg_handler,
},
+ .ipmi_lock = __MUTEX_INITIALIZER(driver_data.ipmi_lock)
};
-static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi)
+static struct acpi_ipmi_device *
+ipmi_dev_alloc(int iface, struct device *dev, acpi_handle handle)
+{
+ struct acpi_ipmi_device *ipmi_device;
+ int err;
+ ipmi_user_t user;
+
+ ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
+ if (!ipmi_device)
+ return NULL;
+
+ kref_init(&ipmi_device->kref);
+ INIT_LIST_HEAD(&ipmi_device->head);
+ INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
+ spin_lock_init(&ipmi_device->tx_msg_lock);
+ ipmi_device->handle = handle;
+ ipmi_device->dev = get_device(dev);
+ ipmi_device->ipmi_ifnum = iface;
+
+ err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
+ ipmi_device, &user);
+ if (err) {
+ put_device(dev);
+ kfree(ipmi_device);
+ return NULL;
+ }
+ ipmi_device->user_interface = user;
+
+ return ipmi_device;
+}
+
+static void ipmi_dev_release(struct acpi_ipmi_device *ipmi_device)
+{
+ ipmi_destroy_user(ipmi_device->user_interface);
+ put_device(ipmi_device->dev);
+ kfree(ipmi_device);
+}
+
+static void ipmi_dev_release_kref(struct kref *kref)
+{
+ struct acpi_ipmi_device *ipmi =
+ container_of(kref, struct acpi_ipmi_device, kref);
+
+ ipmi_dev_release(ipmi);
+}
+
+static void __ipmi_dev_kill(struct acpi_ipmi_device *ipmi_device)
+{
+ list_del(&ipmi_device->head);
+ if (driver_data.selected_smi == ipmi_device)
+ driver_data.selected_smi = NULL;
+
+ /*
+ * Always setting dead flag after deleting from the list or
+ * list_for_each_entry() codes must get changed.
+ */
+ ipmi_device->dead = true;
+}
+
+static struct acpi_ipmi_device *acpi_ipmi_dev_get(void)
+{
+ struct acpi_ipmi_device *ipmi_device = NULL;
+
+ mutex_lock(&driver_data.ipmi_lock);
+ if (driver_data.selected_smi) {
+ ipmi_device = driver_data.selected_smi;
+ kref_get(&ipmi_device->kref);
+ }
+ mutex_unlock(&driver_data.ipmi_lock);
+
+ return ipmi_device;
+}
+
+static void acpi_ipmi_dev_put(struct acpi_ipmi_device *ipmi_device)
+{
+ kref_put(&ipmi_device->kref, ipmi_dev_release_kref);
+}
+
+static struct acpi_ipmi_msg *ipmi_msg_alloc(void)
{
+ struct acpi_ipmi_device *ipmi;
struct acpi_ipmi_msg *ipmi_msg;
- struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+
+ ipmi = acpi_ipmi_dev_get();
+ if (!ipmi)
+ return NULL;
ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
- if (!ipmi_msg) {
- dev_warn(&pnp_dev->dev, "Can't allocate memory for ipmi_msg\n");
+ if (!ipmi_msg) {
+ acpi_ipmi_dev_put(ipmi);
return NULL;
}
+
+ kref_init(&ipmi_msg->kref);
init_completion(&ipmi_msg->tx_complete);
INIT_LIST_HEAD(&ipmi_msg->head);
ipmi_msg->device = ipmi;
+ ipmi_msg->msg_done = ACPI_IPMI_UNKNOWN;
+
return ipmi_msg;
}
-#define IPMI_OP_RGN_NETFN(offset) ((offset >> 8) & 0xff)
-#define IPMI_OP_RGN_CMD(offset) (offset & 0xff)
-static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
- acpi_physical_address address,
- acpi_integer *value)
+static void ipmi_msg_release(struct acpi_ipmi_msg *tx_msg)
+{
+ acpi_ipmi_dev_put(tx_msg->device);
+ kfree(tx_msg);
+}
+
+static void ipmi_msg_release_kref(struct kref *kref)
+{
+ struct acpi_ipmi_msg *tx_msg =
+ container_of(kref, struct acpi_ipmi_msg, kref);
+
+ ipmi_msg_release(tx_msg);
+}
+
+static struct acpi_ipmi_msg *acpi_ipmi_msg_get(struct acpi_ipmi_msg *tx_msg)
+{
+ kref_get(&tx_msg->kref);
+
+ return tx_msg;
+}
+
+static void acpi_ipmi_msg_put(struct acpi_ipmi_msg *tx_msg)
+{
+ kref_put(&tx_msg->kref, ipmi_msg_release_kref);
+}
+
+#define IPMI_OP_RGN_NETFN(offset) ((offset >> 8) & 0xff)
+#define IPMI_OP_RGN_CMD(offset) (offset & 0xff)
+static int acpi_format_ipmi_request(struct acpi_ipmi_msg *tx_msg,
+ acpi_physical_address address,
+ acpi_integer *value)
{
struct kernel_ipmi_msg *msg;
struct acpi_ipmi_buffer *buffer;
@@ -151,21 +264,31 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
unsigned long flags;
msg = &tx_msg->tx_message;
+
/*
* IPMI network function and command are encoded in the address
* within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3.
*/
msg->netfn = IPMI_OP_RGN_NETFN(address);
msg->cmd = IPMI_OP_RGN_CMD(address);
- msg->data = tx_msg->tx_data;
+ msg->data = tx_msg->data;
+
/*
* value is the parameter passed by the IPMI opregion space handler.
* It points to the IPMI request message buffer
*/
buffer = (struct acpi_ipmi_buffer *)value;
+
/* copy the tx message data */
+ if (buffer->length > ACPI_IPMI_MAX_MSG_LENGTH) {
+ dev_WARN_ONCE(tx_msg->device->dev, true,
+ "Unexpected request (msg len %d).\n",
+ buffer->length);
+ return -EINVAL;
+ }
msg->data_len = buffer->length;
- memcpy(tx_msg->tx_data, buffer->data, msg->data_len);
+ memcpy(tx_msg->data, buffer->data, msg->data_len);
+
/*
* now the default type is SYSTEM_INTERFACE and channel type is BMC.
* If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE,
@@ -179,14 +302,17 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
/* Get the msgid */
device = tx_msg->device;
+
spin_lock_irqsave(&device->tx_msg_lock, flags);
device->curr_msgid++;
tx_msg->tx_msgid = device->curr_msgid;
spin_unlock_irqrestore(&device->tx_msg_lock, flags);
+
+ return 0;
}
static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
- acpi_integer *value, int rem_time)
+ acpi_integer *value)
{
struct acpi_ipmi_buffer *buffer;
@@ -195,110 +321,158 @@ static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
* IPMI message returned by IPMI command.
*/
buffer = (struct acpi_ipmi_buffer *)value;
- if (!rem_time && !msg->msg_done) {
- buffer->status = ACPI_IPMI_TIMEOUT;
- return;
- }
+
/*
- * If the flag of msg_done is not set or the recv length is zero, it
- * means that the IPMI command is not executed correctly.
- * The status code will be ACPI_IPMI_UNKNOWN.
+ * If the flag of msg_done is not set, it means that the IPMI command is
+ * not executed correctly.
*/
- if (!msg->msg_done || !msg->rx_len) {
- buffer->status = ACPI_IPMI_UNKNOWN;
+ buffer->status = msg->msg_done;
+ if (msg->msg_done != ACPI_IPMI_OK)
return;
- }
+
/*
* If the IPMI response message is obtained correctly, the status code
* will be ACPI_IPMI_OK
*/
- buffer->status = ACPI_IPMI_OK;
buffer->length = msg->rx_len;
- memcpy(buffer->data, msg->rx_data, msg->rx_len);
+ memcpy(buffer->data, msg->data, msg->rx_len);
}
static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
{
- struct acpi_ipmi_msg *tx_msg, *temp;
- int count = HZ / 10;
- struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+ struct acpi_ipmi_msg *tx_msg;
+ unsigned long flags;
+
+ /*
+ * NOTE: On-going ipmi_recv_msg
+ * ipmi_msg_handler() may still be invoked by ipmi_si after
+ * flushing. But it is safe to do a fast flushing on module_exit()
+ * without waiting for all ipmi_recv_msg(s) to complete from
+ * ipmi_msg_handler() as it is ensured by ipmi_si that all
+ * ipmi_recv_msg(s) are freed after invoking ipmi_destroy_user().
+ */
+ spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
+ while (!list_empty(&ipmi->tx_msg_list)) {
+ tx_msg = list_first_entry(&ipmi->tx_msg_list,
+ struct acpi_ipmi_msg,
+ head);
+ list_del(&tx_msg->head);
+ spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
- list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
/* wake up the sleep thread on the Tx msg */
complete(&tx_msg->tx_complete);
+ acpi_ipmi_msg_put(tx_msg);
+ spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
}
+ spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
+}
+
+static void ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi,
+ struct acpi_ipmi_msg *msg)
+{
+ struct acpi_ipmi_msg *tx_msg, *temp;
+ bool msg_found = false;
+ unsigned long flags;
- /* wait for about 100ms to flush the tx message list */
- while (count--) {
- if (list_empty(&ipmi->tx_msg_list))
+ spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
+ list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
+ if (msg == tx_msg) {
+ msg_found = true;
+ list_del(&tx_msg->head);
break;
- schedule_timeout(1);
+ }
}
- if (!list_empty(&ipmi->tx_msg_list))
- dev_warn(&pnp_dev->dev, "tx msg list is not NULL\n");
+ spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
+
+ if (msg_found)
+ acpi_ipmi_msg_put(tx_msg);
}
static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
{
struct acpi_ipmi_device *ipmi_device = user_msg_data;
- int msg_found = 0;
- struct acpi_ipmi_msg *tx_msg;
- struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
+ bool msg_found = false;
+ struct acpi_ipmi_msg *tx_msg, *temp;
+ struct device *dev = ipmi_device->dev;
unsigned long flags;
if (msg->user != ipmi_device->user_interface) {
- dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
- "returned user %p, expected user %p\n",
- msg->user, ipmi_device->user_interface);
- ipmi_free_recv_msg(msg);
- return;
+ dev_warn(dev,
+ "Unexpected response is returned. returned user %p, expected user %p\n",
+ msg->user, ipmi_device->user_interface);
+ goto out_msg;
}
+
spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
- list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
+ list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) {
if (msg->msgid == tx_msg->tx_msgid) {
- msg_found = 1;
+ msg_found = true;
+ list_del(&tx_msg->head);
break;
}
}
-
spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
+
if (!msg_found) {
- dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
- "returned.\n", msg->msgid);
- ipmi_free_recv_msg(msg);
- return;
+ dev_warn(dev,
+ "Unexpected response (msg id %ld) is returned.\n",
+ msg->msgid);
+ goto out_msg;
}
- if (msg->msg.data_len) {
- /* copy the response data to Rx_data buffer */
- memcpy(tx_msg->rx_data, msg->msg_data, msg->msg.data_len);
- tx_msg->rx_len = msg->msg.data_len;
- tx_msg->msg_done = 1;
+ /* copy the response data to Rx_data buffer */
+ if (msg->msg.data_len > ACPI_IPMI_MAX_MSG_LENGTH) {
+ dev_WARN_ONCE(dev, true,
+ "Unexpected response (msg len %d).\n",
+ msg->msg.data_len);
+ goto out_comp;
}
+
+ /* response msg is an error msg */
+ msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ if (msg->recv_type == IPMI_RESPONSE_RECV_TYPE &&
+ msg->msg.data_len == 1) {
+ if (msg->msg.data[0] == IPMI_TIMEOUT_COMPLETION_CODE) {
+ dev_WARN_ONCE(dev, true,
+ "Unexpected response (timeout).\n");
+ tx_msg->msg_done = ACPI_IPMI_TIMEOUT;
+ }
+ goto out_comp;
+ }
+
+ tx_msg->rx_len = msg->msg.data_len;
+ memcpy(tx_msg->data, msg->msg.data, tx_msg->rx_len);
+ tx_msg->msg_done = ACPI_IPMI_OK;
+
+out_comp:
complete(&tx_msg->tx_complete);
+ acpi_ipmi_msg_put(tx_msg);
+out_msg:
ipmi_free_recv_msg(msg);
-};
+}
static void ipmi_register_bmc(int iface, struct device *dev)
{
struct acpi_ipmi_device *ipmi_device, *temp;
- struct pnp_dev *pnp_dev;
- ipmi_user_t user;
int err;
struct ipmi_smi_info smi_data;
acpi_handle handle;
err = ipmi_get_smi_info(iface, &smi_data);
-
if (err)
return;
- if (smi_data.addr_src != SI_ACPI) {
- put_device(smi_data.dev);
- return;
- }
-
+ if (smi_data.addr_src != SI_ACPI)
+ goto err_ref;
handle = smi_data.addr_info.acpi_info.acpi_handle;
+ if (!handle)
+ goto err_ref;
+
+ ipmi_device = ipmi_dev_alloc(iface, smi_data.dev, handle);
+ if (!ipmi_device) {
+ dev_warn(smi_data.dev, "Can't create IPMI user interface\n");
+ goto err_ref;
+ }
mutex_lock(&driver_data.ipmi_lock);
list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
@@ -307,34 +481,20 @@ static void ipmi_register_bmc(int iface, struct device *dev)
* to the device list, don't add it again.
*/
if (temp->handle == handle)
- goto out;
+ goto err_lock;
}
-
- ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
-
- if (!ipmi_device)
- goto out;
-
- pnp_dev = to_pnp_dev(smi_data.dev);
- ipmi_device->handle = handle;
- ipmi_device->pnp_dev = pnp_dev;
-
- err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
- ipmi_device, &user);
- if (err) {
- dev_warn(&pnp_dev->dev, "Can't create IPMI user interface\n");
- kfree(ipmi_device);
- goto out;
- }
- acpi_add_ipmi_device(ipmi_device);
- ipmi_device->user_interface = user;
- ipmi_device->ipmi_ifnum = iface;
+ if (!driver_data.selected_smi)
+ driver_data.selected_smi = ipmi_device;
+ list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
mutex_unlock(&driver_data.ipmi_lock);
- memcpy(&ipmi_device->smi_data, &smi_data, sizeof(struct ipmi_smi_info));
+
+ put_device(smi_data.dev);
return;
-out:
+err_lock:
mutex_unlock(&driver_data.ipmi_lock);
+ ipmi_dev_release(ipmi_device);
+err_ref:
put_device(smi_data.dev);
return;
}
@@ -342,23 +502,29 @@ out:
static void ipmi_bmc_gone(int iface)
{
struct acpi_ipmi_device *ipmi_device, *temp;
+ bool dev_found = false;
mutex_lock(&driver_data.ipmi_lock);
list_for_each_entry_safe(ipmi_device, temp,
- &driver_data.ipmi_devices, head) {
- if (ipmi_device->ipmi_ifnum != iface)
- continue;
-
- acpi_remove_ipmi_device(ipmi_device);
- put_device(ipmi_device->smi_data.dev);
- kfree(ipmi_device);
- break;
+ &driver_data.ipmi_devices, head) {
+ if (ipmi_device->ipmi_ifnum != iface) {
+ dev_found = true;
+ __ipmi_dev_kill(ipmi_device);
+ break;
+ }
}
+ if (!driver_data.selected_smi)
+ driver_data.selected_smi = list_first_entry_or_null(
+ &driver_data.ipmi_devices,
+ struct acpi_ipmi_device, head);
mutex_unlock(&driver_data.ipmi_lock);
+
+ if (dev_found) {
+ ipmi_flush_tx_msg(ipmi_device);
+ acpi_ipmi_dev_put(ipmi_device);
+ }
}
-/* --------------------------------------------------------------------------
- * Address Space Management
- * -------------------------------------------------------------------------- */
+
/*
* This is the IPMI opregion space handler.
* @function: indicates the read/write. In fact as the IPMI message is driven
@@ -371,17 +537,17 @@ static void ipmi_bmc_gone(int iface)
* the response IPMI message returned by IPMI command.
* @handler_context: IPMI device context.
*/
-
static acpi_status
acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
- u32 bits, acpi_integer *value,
- void *handler_context, void *region_context)
+ u32 bits, acpi_integer *value,
+ void *handler_context, void *region_context)
{
struct acpi_ipmi_msg *tx_msg;
- struct acpi_ipmi_device *ipmi_device = handler_context;
- int err, rem_time;
+ struct acpi_ipmi_device *ipmi_device;
+ int err;
acpi_status status;
unsigned long flags;
+
/*
* IPMI opregion message.
* IPMI message is firstly written to the BMC and system software
@@ -391,118 +557,75 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
if ((function & ACPI_IO_MASK) == ACPI_READ)
return AE_TYPE;
- if (!ipmi_device->user_interface)
+ tx_msg = ipmi_msg_alloc();
+ if (!tx_msg)
return AE_NOT_EXIST;
+ ipmi_device = tx_msg->device;
- tx_msg = acpi_alloc_ipmi_msg(ipmi_device);
- if (!tx_msg)
- return AE_NO_MEMORY;
+ if (acpi_format_ipmi_request(tx_msg, address, value) != 0) {
+ ipmi_msg_release(tx_msg);
+ return AE_TYPE;
+ }
- acpi_format_ipmi_msg(tx_msg, address, value);
+ acpi_ipmi_msg_get(tx_msg);
+ mutex_lock(&driver_data.ipmi_lock);
+ /* Do not add a tx_msg that can not be flushed. */
+ if (ipmi_device->dead) {
+ mutex_unlock(&driver_data.ipmi_lock);
+ ipmi_msg_release(tx_msg);
+ return AE_NOT_EXIST;
+ }
spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
+ mutex_unlock(&driver_data.ipmi_lock);
+
err = ipmi_request_settime(ipmi_device->user_interface,
- &tx_msg->addr,
- tx_msg->tx_msgid,
- &tx_msg->tx_message,
- NULL, 0, 0, 0);
+ &tx_msg->addr,
+ tx_msg->tx_msgid,
+ &tx_msg->tx_message,
+ NULL, 0, 0, IPMI_TIMEOUT);
if (err) {
status = AE_ERROR;
- goto end_label;
+ goto out_msg;
}
- rem_time = wait_for_completion_timeout(&tx_msg->tx_complete,
- IPMI_TIMEOUT);
- acpi_format_ipmi_response(tx_msg, value, rem_time);
+ wait_for_completion(&tx_msg->tx_complete);
+
+ acpi_format_ipmi_response(tx_msg, value);
status = AE_OK;
-end_label:
- spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
- list_del(&tx_msg->head);
- spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
- kfree(tx_msg);
+out_msg:
+ ipmi_cancel_tx_msg(ipmi_device, tx_msg);
+ acpi_ipmi_msg_put(tx_msg);
return status;
}
-static void ipmi_remove_space_handler(struct acpi_ipmi_device *ipmi)
-{
- if (!test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
- return;
-
- acpi_remove_address_space_handler(ipmi->handle,
- ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler);
-
- clear_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
-}
-
-static int ipmi_install_space_handler(struct acpi_ipmi_device *ipmi)
+static int __init acpi_ipmi_init(void)
{
+ int result;
acpi_status status;
- if (test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
+ if (acpi_disabled)
return 0;
- status = acpi_install_address_space_handler(ipmi->handle,
+ status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_IPMI,
&acpi_ipmi_space_handler,
- NULL, ipmi);
+ NULL, NULL);
if (ACPI_FAILURE(status)) {
- struct pnp_dev *pnp_dev = ipmi->pnp_dev;
- dev_warn(&pnp_dev->dev, "Can't register IPMI opregion space "
- "handle\n");
+ pr_warn("Can't register IPMI opregion space handle\n");
return -EINVAL;
}
- set_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
- return 0;
-}
-
-static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
-{
-
- INIT_LIST_HEAD(&ipmi_device->head);
-
- spin_lock_init(&ipmi_device->tx_msg_lock);
- INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
- ipmi_install_space_handler(ipmi_device);
-
- list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
-}
-
-static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device)
-{
- /*
- * If the IPMI user interface is created, it should be
- * destroyed.
- */
- if (ipmi_device->user_interface) {
- ipmi_destroy_user(ipmi_device->user_interface);
- ipmi_device->user_interface = NULL;
- }
- /* flush the Tx_msg list */
- if (!list_empty(&ipmi_device->tx_msg_list))
- ipmi_flush_tx_msg(ipmi_device);
-
- list_del(&ipmi_device->head);
- ipmi_remove_space_handler(ipmi_device);
-}
-
-static int __init acpi_ipmi_init(void)
-{
- int result = 0;
-
- if (acpi_disabled)
- return result;
-
- mutex_init(&driver_data.ipmi_lock);
-
result = ipmi_smi_watcher_register(&driver_data.bmc_events);
+ if (result)
+ pr_err("Can't register IPMI system interface watcher\n");
return result;
}
static void __exit acpi_ipmi_exit(void)
{
- struct acpi_ipmi_device *ipmi_device, *temp;
+ struct acpi_ipmi_device *ipmi_device;
if (acpi_disabled)
return;
@@ -516,13 +639,22 @@ static void __exit acpi_ipmi_exit(void)
* handler and free it.
*/
mutex_lock(&driver_data.ipmi_lock);
- list_for_each_entry_safe(ipmi_device, temp,
- &driver_data.ipmi_devices, head) {
- acpi_remove_ipmi_device(ipmi_device);
- put_device(ipmi_device->smi_data.dev);
- kfree(ipmi_device);
+ while (!list_empty(&driver_data.ipmi_devices)) {
+ ipmi_device = list_first_entry(&driver_data.ipmi_devices,
+ struct acpi_ipmi_device,
+ head);
+ __ipmi_dev_kill(ipmi_device);
+ mutex_unlock(&driver_data.ipmi_lock);
+
+ ipmi_flush_tx_msg(ipmi_device);
+ acpi_ipmi_dev_put(ipmi_device);
+
+ mutex_lock(&driver_data.ipmi_lock);
}
mutex_unlock(&driver_data.ipmi_lock);
+ acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
+ ACPI_ADR_SPACE_IPMI,
+ &acpi_ipmi_space_handler);
}
module_init(acpi_ipmi_init);
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index fb78bb9ad8f6..d3961014aad7 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -30,6 +30,7 @@ ACPI_MODULE_NAME("acpi_lpss");
/* Offsets relative to LPSS_PRIVATE_OFFSET */
#define LPSS_GENERAL 0x08
#define LPSS_GENERAL_LTR_MODE_SW BIT(2)
+#define LPSS_GENERAL_UART_RTS_OVRD BIT(3)
#define LPSS_SW_LTR 0x10
#define LPSS_AUTO_LTR 0x14
#define LPSS_TX_INT 0x20
@@ -68,11 +69,16 @@ struct lpss_private_data {
static void lpss_uart_setup(struct lpss_private_data *pdata)
{
- unsigned int tx_int_offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
+ unsigned int offset;
u32 reg;
- reg = readl(pdata->mmio_base + tx_int_offset);
- writel(reg | LPSS_TX_INT_MASK, pdata->mmio_base + tx_int_offset);
+ offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
+ reg = readl(pdata->mmio_base + offset);
+ writel(reg | LPSS_TX_INT_MASK, pdata->mmio_base + offset);
+
+ offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
+ reg = readl(pdata->mmio_base + offset);
+ writel(reg | LPSS_GENERAL_UART_RTS_OVRD, pdata->mmio_base + offset);
}
static struct lpss_device_desc lpt_dev_desc = {
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 999adb5499c7..551dad712ffe 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -152,8 +152,9 @@ static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
unsigned long long current_status;
/* Get device present/absent information from the _STA */
- if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle, "_STA",
- NULL, &current_status)))
+ if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle,
+ METHOD_NAME__STA, NULL,
+ &current_status)))
return -ENODEV;
/*
* Check for device status. Device should be
@@ -281,7 +282,7 @@ static void acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
if (!info->enabled)
continue;
- if (nid < 0)
+ if (nid == NUMA_NO_NODE)
nid = memory_add_physaddr_to_nid(info->start_addr);
acpi_unbind_memory_blocks(info, handle);
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 1bde12708f9e..8a4cfc7e71f0 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -29,6 +29,13 @@ ACPI_MODULE_NAME("platform");
static const struct acpi_device_id acpi_platform_device_ids[] = {
{ "PNP0D40" },
+ { "ACPI0003" },
+ { "VPC2004" },
+ { "BCM4752" },
+
+ /* Intel Smart Sound Technology */
+ { "INT33C8" },
+ { "80860F28" },
{ }
};
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index f29e06efa479..3c1d6b0c09a4 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -140,15 +140,11 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev)
return 0;
}
-static int acpi_processor_errata(struct acpi_processor *pr)
+static int acpi_processor_errata(void)
{
int result = 0;
struct pci_dev *dev = NULL;
-
- if (!pr)
- return -EINVAL;
-
/*
* PIIX4
*/
@@ -181,7 +177,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
cpu_maps_update_begin();
cpu_hotplug_begin();
- ret = acpi_map_lsapic(pr->handle, &pr->id);
+ ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id);
if (ret)
goto out;
@@ -219,11 +215,9 @@ static int acpi_processor_get_info(struct acpi_device *device)
int cpu_index, device_declaration = 0;
acpi_status status = AE_OK;
static int cpu0_initialized;
+ unsigned long long value;
- if (num_online_cpus() > 1)
- errata.smp = TRUE;
-
- acpi_processor_errata(pr);
+ acpi_processor_errata();
/*
* Check to see if we have bus mastering arbitration control. This
@@ -247,18 +241,12 @@ static int acpi_processor_get_info(struct acpi_device *device)
return -ENODEV;
}
- /*
- * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
- * >>> 'acpi_get_processor_id(acpi_id, &id)' in
- * arch/xxx/acpi.c
- */
pr->acpi_id = object.processor.proc_id;
} else {
/*
* Declared with "Device" statement; match _UID.
* Note that we don't handle string _UIDs yet.
*/
- unsigned long long value;
status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
NULL, &value);
if (ACPI_FAILURE(status)) {
@@ -270,7 +258,9 @@ static int acpi_processor_get_info(struct acpi_device *device)
device_declaration = 1;
pr->acpi_id = value;
}
- cpu_index = acpi_get_cpuid(pr->handle, device_declaration, pr->acpi_id);
+ pr->apic_id = acpi_get_apicid(pr->handle, device_declaration,
+ pr->acpi_id);
+ cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
/* Handle UP system running SMP kernel, with no LAPIC in MADT */
if (!cpu0_initialized && (cpu_index == -1) &&
@@ -332,9 +322,9 @@ static int acpi_processor_get_info(struct acpi_device *device)
* ensure we get the right value in the "physical id" field
* of /proc/cpuinfo
*/
- status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
+ status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value);
if (ACPI_SUCCESS(status))
- arch_fix_phys_package_id(pr->id, object.integer.value);
+ arch_fix_phys_package_id(pr->id, value);
return 0;
}
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 9feba08c29fe..a9fd0b872062 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -114,10 +114,12 @@ ACPI_HW_DEPENDENT_RETURN_VOID(void
acpi_db_generate_gpe(char *gpe_arg,
char *block_arg))
+ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_generate_sci(void))
+
/*
* dbconvert - miscellaneous conversion routines
*/
- acpi_status acpi_db_hex_char_to_value(int hex_char, u8 *return_value);
+acpi_status acpi_db_hex_char_to_value(int hex_char, u8 *return_value);
acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object);
@@ -154,6 +156,8 @@ void acpi_db_set_scope(char *name);
void acpi_db_dump_namespace(char *start_arg, char *depth_arg);
+void acpi_db_dump_namespace_paths(void);
+
void acpi_db_dump_namespace_by_owner(char *owner_arg, char *depth_arg);
acpi_status acpi_db_find_name_in_namespace(char *name_arg);
@@ -240,6 +244,8 @@ void acpi_db_display_history(void);
char *acpi_db_get_from_history(char *command_num_arg);
+char *acpi_db_get_history_by_index(u32 commandd_num);
+
/*
* dbinput - user front-end to the AML debugger
*/
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index ab0e97710381..41abe552c7a3 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -71,7 +71,8 @@ acpi_status acpi_ev_init_global_lock_handler(void);
ACPI_HW_DEPENDENT_RETURN_OK(acpi_status
acpi_ev_acquire_global_lock(u16 timeout))
- ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_ev_release_global_lock(void))
+
+ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_ev_release_global_lock(void))
acpi_status acpi_ev_remove_global_lock_handler(void);
/*
@@ -242,11 +243,11 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
*/
u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context);
-u32 acpi_ev_install_sci_handler(void);
+u32 acpi_ev_sci_dispatch(void);
-acpi_status acpi_ev_remove_sci_handler(void);
+u32 acpi_ev_install_sci_handler(void);
-u32 acpi_ev_initialize_SCI(u32 program_SCI);
+acpi_status acpi_ev_remove_all_sci_handlers(void);
ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_ev_terminate(void))
#endif /* __ACEVENTS_H__ */
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 90e846f985fa..e9f1fc7f99c7 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -269,6 +269,7 @@ ACPI_EXTERN acpi_table_handler acpi_gbl_table_handler;
ACPI_EXTERN void *acpi_gbl_table_handler_context;
ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
ACPI_EXTERN acpi_interface_handler acpi_gbl_interface_handler;
+ACPI_EXTERN struct acpi_sci_handler_info *acpi_gbl_sci_handler_list;
/* Owner ID support */
@@ -405,7 +406,9 @@ extern u32 acpi_gbl_nesting_level;
/* Event counters */
+ACPI_EXTERN u32 acpi_method_count;
ACPI_EXTERN u32 acpi_gpe_count;
+ACPI_EXTERN u32 acpi_sci_count;
ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS];
/* Support for dynamic control method tracing mechanism */
@@ -445,13 +448,6 @@ ACPI_EXTERN u8 acpi_gbl_db_opt_tables;
ACPI_EXTERN u8 acpi_gbl_db_opt_stats;
ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods;
ACPI_EXTERN u8 acpi_gbl_db_opt_no_region_support;
-
-ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
-ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS];
-ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_scope_buf[80];
-ACPI_EXTERN char acpi_gbl_db_debug_filename[80];
ACPI_EXTERN u8 acpi_gbl_db_output_to_file;
ACPI_EXTERN char *acpi_gbl_db_buffer;
ACPI_EXTERN char *acpi_gbl_db_filename;
@@ -459,6 +455,16 @@ ACPI_EXTERN u32 acpi_gbl_db_debug_level;
ACPI_EXTERN u32 acpi_gbl_db_console_debug_level;
ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_db_scope_node;
+ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
+ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS];
+
+/* These buffers should all be the same size */
+
+ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE];
+
/*
* Statistic globals
*/
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 0ed00669cd21..53ed1a8ba4f0 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -398,6 +398,14 @@ struct acpi_simple_repair_info {
*
****************************************************************************/
+/* Dispatch info for each host-installed SCI handler */
+
+struct acpi_sci_handler_info {
+ struct acpi_sci_handler_info *next;
+ acpi_sci_handler address; /* Address of handler */
+ void *context; /* Context to be passed to handler */
+};
+
/* Dispatch info for each GPE -- either a method or handler, cannot be both */
struct acpi_gpe_handler_info {
@@ -1064,7 +1072,7 @@ struct acpi_db_method_info {
char *name;
u32 flags;
u32 num_loops;
- char pathname[128];
+ char pathname[ACPI_DB_LINE_BUFFER_SIZE];
char **args;
acpi_object_type *types;
@@ -1086,6 +1094,7 @@ struct acpi_integrity_info {
u32 objects;
};
+#define ACPI_DB_DISABLE_OUTPUT 0x00
#define ACPI_DB_REDIRECTABLE_OUTPUT 0x01
#define ACPI_DB_CONSOLE_OUTPUT 0x02
#define ACPI_DB_DUPLICATE_OUTPUT 0x03
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 530a2f8c1252..2a86c65d873b 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -410,37 +410,6 @@
#endif
/*
- * Memory allocation tracking (DEBUG ONLY)
- */
-#define ACPI_MEM_PARAMETERS _COMPONENT, _acpi_module_name, __LINE__
-
-#ifndef ACPI_DBG_TRACK_ALLOCATIONS
-
-/* Memory allocation */
-
-#ifndef ACPI_ALLOCATE
-#define ACPI_ALLOCATE(a) acpi_ut_allocate((acpi_size) (a), ACPI_MEM_PARAMETERS)
-#endif
-#ifndef ACPI_ALLOCATE_ZEROED
-#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed((acpi_size) (a), ACPI_MEM_PARAMETERS)
-#endif
-#ifndef ACPI_FREE
-#define ACPI_FREE(a) acpi_os_free(a)
-#endif
-#define ACPI_MEM_TRACKING(a)
-
-#else
-
-/* Memory allocation */
-
-#define ACPI_ALLOCATE(a) acpi_ut_allocate_and_track((acpi_size) (a), ACPI_MEM_PARAMETERS)
-#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed_and_track((acpi_size) (a), ACPI_MEM_PARAMETERS)
-#define ACPI_FREE(a) acpi_ut_free_and_track(a, ACPI_MEM_PARAMETERS)
-#define ACPI_MEM_TRACKING(a) a
-
-#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
-
-/*
* Macros used for ACPICA utilities only
*/
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 40b04bd5579e..e6138ac4a160 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -213,6 +213,12 @@ acpi_ns_dump_objects(acpi_object_type type,
u8 display_type,
u32 max_depth,
acpi_owner_id owner_id, acpi_handle start_handle);
+
+void
+acpi_ns_dump_object_paths(acpi_object_type type,
+ u8 display_type,
+ u32 max_depth,
+ acpi_owner_id owner_id, acpi_handle start_handle);
#endif /* ACPI_FUTURE_USAGE */
/*
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index d5a62a6182bb..be8180c17d7e 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -628,6 +628,17 @@ u8 acpi_ut_valid_acpi_char(char character, u32 position);
void acpi_ut_repair_name(char *name);
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source);
+
+u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source);
+
+u8
+acpi_ut_safe_strncat(char *dest,
+ acpi_size dest_size,
+ char *source, acpi_size max_transfer_length);
+#endif
+
/*
* utmutex - mutex support
*/
@@ -652,12 +663,6 @@ acpi_status
acpi_ut_initialize_buffer(struct acpi_buffer *buffer,
acpi_size required_length);
-void *acpi_ut_allocate(acpi_size size,
- u32 component, const char *module, u32 line);
-
-void *acpi_ut_allocate_zeroed(acpi_size size,
- u32 component, const char *module, u32 line);
-
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
void *acpi_ut_allocate_and_track(acpi_size size,
u32 component, const char *module, u32 line);
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index fb09b08d7080..afdc6df17abf 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -158,7 +158,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
walk_state->deferred_node = node;
status = acpi_ps_parse_aml(walk_state);
- cleanup:
+cleanup:
acpi_ps_delete_parse_tree(op);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index d4bfe7b7f90a..2d4c07322576 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -259,7 +259,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
goto cleanup;
}
- cleanup:
+cleanup:
/* Remove local reference to the object */
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index a9ffd44c18fe..81a78ba84311 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -292,9 +292,10 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
* reentered one more time (even if it is the same thread)
*/
obj_desc->method.thread_count++;
+ acpi_method_count++;
return_ACPI_STATUS(status);
- cleanup:
+cleanup:
/* On error, must release the method mutex (if present) */
if (obj_desc->method.mutex) {
@@ -424,7 +425,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(status);
- cleanup:
+cleanup:
/* On error, we must terminate the method properly */
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 63f0d220ca3d..b1746a68dad1 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -240,7 +240,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(status);
}
- exit:
+exit:
*obj_desc_ptr = obj_desc;
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 1fc1ff114f26..5205edcf2c01 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -257,7 +257,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
(buffer_desc->common.reference_count +
obj_desc->common.reference_count);
- cleanup:
+cleanup:
/* Always delete the operands */
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index c666fc014987..ade44e49deb4 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -299,7 +299,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
goto result_used;
}
- result_used:
+result_used:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Result of [%s] used by Parent [%s] Op=%p\n",
acpi_ps_get_opcode_name(op->common.aml_opcode),
@@ -308,7 +308,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
return_UINT8(TRUE);
- result_not_used:
+result_not_used:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Result of [%s] not used by Parent [%s] Op=%p\n",
acpi_ps_get_opcode_name(op->common.aml_opcode),
@@ -752,7 +752,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(status);
- cleanup:
+cleanup:
/*
* We must undo everything done above; meaning that we must
* pop everything off of the operand stack and delete those
@@ -851,7 +851,7 @@ acpi_status acpi_ds_evaluate_name_path(struct acpi_walk_state *walk_state)
goto exit;
}
- push_result:
+push_result:
walk_state->result_obj = new_obj_desc;
@@ -863,7 +863,7 @@ acpi_status acpi_ds_evaluate_name_path(struct acpi_walk_state *walk_state)
op->common.flags |= ACPI_PARSEOP_IN_STACK;
}
- exit:
+exit:
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 151d924817e1..1bbb22fd6fa0 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -170,7 +170,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
(void)acpi_ds_do_implicit_return(local_obj_desc, walk_state, TRUE);
- cleanup:
+cleanup:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Completed a predicate eval=%X Op=%p\n",
walk_state->control_state->common.value,
@@ -335,7 +335,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(status);
- error_exit:
+error_exit:
status = acpi_ds_method_error(status, walk_state);
return_ACPI_STATUS(status);
}
@@ -722,7 +722,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
walk_state->result_obj = NULL;
}
- cleanup:
+cleanup:
if (walk_state->result_obj) {
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index b1f8f4725c23..7f569d573027 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -728,7 +728,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
break;
}
- cleanup:
+cleanup:
/* Remove the Node pushed at the very beginning */
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index fdb0a76e40a3..4c67193a9fa7 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -173,7 +173,7 @@ static u32 acpi_ev_global_lock_handler(void *context)
acpi_gbl_global_lock_pending = FALSE;
- cleanup_and_exit:
+cleanup_and_exit:
acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
return (ACPI_INTERRUPT_HANDLED);
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index c8a1f7d5931f..a9cb4a1a4bb8 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -458,7 +458,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
gpe_block = gpe_block->next;
}
- unlock_and_exit:
+unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return (int_status);
@@ -522,6 +522,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
+ ACPI_FREE(local_gpe_event_info);
return_VOID;
}
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index c1aa1eda26c3..a9e76bc4ad97 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -111,7 +111,7 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
gpe_block->xrupt_block = gpe_xrupt_block;
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
- unlock_and_exit:
+unlock_and_exit:
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
@@ -178,7 +178,7 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
ACPI_FREE(gpe_block->event_info);
ACPI_FREE(gpe_block);
- unlock_and_exit:
+unlock_and_exit:
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
@@ -302,7 +302,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
return_ACPI_STATUS(AE_OK);
- error_exit:
+error_exit:
if (gpe_register_info) {
ACPI_FREE(gpe_register_info);
}
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 7842700346a4..a3e2f38aadf6 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -203,7 +203,7 @@ acpi_status acpi_ev_gpe_initialize(void)
goto cleanup;
}
- cleanup:
+cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index b24dbb80fab8..d3f5e1e2a2b1 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -101,7 +101,7 @@ acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
gpe_xrupt_info = gpe_xrupt_info->next;
}
- unlock_and_exit:
+unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -196,7 +196,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
*
* FUNCTION: acpi_ev_get_gpe_xrupt_block
*
- * PARAMETERS: interrupt_number - Interrupt for a GPE block
+ * PARAMETERS: interrupt_number - Interrupt for a GPE block
*
* RETURN: A GPE interrupt block
*
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 068af96134b8..e3157313eb27 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -129,7 +129,7 @@ acpi_status acpi_ev_install_region_handlers(void)
}
}
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
@@ -531,6 +531,6 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
acpi_ev_install_handler, NULL,
handler_obj, NULL);
- unlock_and_exit:
+unlock_and_exit:
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 1b111ef74903..a5687540e9a6 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -264,13 +264,6 @@ void acpi_ev_terminate(void)
status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL);
- /* Remove SCI handler */
-
- status = acpi_ev_remove_sci_handler();
- if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
- }
-
status = acpi_ev_remove_global_lock_handler();
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO,
@@ -280,6 +273,13 @@ void acpi_ev_terminate(void)
acpi_gbl_events_initialized = FALSE;
}
+ /* Remove SCI handlers */
+
+ status = acpi_ev_remove_all_sci_handlers();
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
+ }
+
/* Deallocate all handler objects installed within GPE info structs */
status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers, NULL);
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index cea14d6fc76c..144cbb9b73bc 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -217,16 +217,11 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) {
region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE;
- if (region_obj2->extra.region_context) {
-
- /* The handler for this region was already installed */
-
- ACPI_FREE(region_context);
- } else {
- /*
- * Save the returned context for use in all accesses to
- * this particular region
- */
+ /*
+ * Save the returned context for use in all accesses to
+ * the handler for this particular region
+ */
+ if (!(region_obj2->extra.region_context)) {
region_obj2->extra.region_context =
region_context;
}
@@ -402,6 +397,14 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
handler_obj->address_space.
context, region_context);
+ /*
+ * region_context should have been released by the deactivate
+ * operation. We don't need access to it anymore here.
+ */
+ if (region_context) {
+ *region_context = NULL;
+ }
+
/* Init routine may fail, Just ignore errors */
if (ACPI_FAILURE(status)) {
@@ -570,10 +573,10 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
status = acpi_ns_evaluate(info);
acpi_ut_remove_reference(args[1]);
- cleanup2:
+cleanup2:
acpi_ut_remove_reference(args[0]);
- cleanup1:
+cleanup1:
ACPI_FREE(info);
return_ACPI_STATUS(status);
}
@@ -758,7 +761,7 @@ acpi_ev_orphan_ec_reg_method(struct acpi_namespace_node *ec_device_node)
status = acpi_evaluate_object(reg_method, NULL, &args, NULL);
- exit:
+exit:
/* We ignore all errors from above, don't care */
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index b905acf7aacd..9e9e3454d893 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -54,6 +54,50 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context);
/*******************************************************************************
*
+ * FUNCTION: acpi_ev_sci_dispatch
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status code indicates whether interrupt was handled.
+ *
+ * DESCRIPTION: Dispatch the SCI to all host-installed SCI handlers.
+ *
+ ******************************************************************************/
+
+u32 acpi_ev_sci_dispatch(void)
+{
+ struct acpi_sci_handler_info *sci_handler;
+ acpi_cpu_flags flags;
+ u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
+
+ ACPI_FUNCTION_NAME(ev_sci_dispatch);
+
+ /* Are there any host-installed SCI handlers? */
+
+ if (!acpi_gbl_sci_handler_list) {
+ return (int_status);
+ }
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Invoke all host-installed SCI handlers */
+
+ sci_handler = acpi_gbl_sci_handler_list;
+ while (sci_handler) {
+
+ /* Invoke the installed handler (at interrupt level) */
+
+ int_status |= sci_handler->address(sci_handler->context);
+
+ sci_handler = sci_handler->next;
+ }
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return (int_status);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ev_sci_xrupt_handler
*
* PARAMETERS: context - Calling Context
@@ -89,6 +133,11 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
*/
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
+ /* Invoke all host-installed SCI handlers */
+
+ interrupt_handled |= acpi_ev_sci_dispatch();
+
+ acpi_sci_count++;
return_UINT32(interrupt_handled);
}
@@ -112,14 +161,13 @@ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
ACPI_FUNCTION_TRACE(ev_gpe_xrupt_handler);
/*
- * We are guaranteed by the ACPI CA initialization/shutdown code that
+ * We are guaranteed by the ACPICA initialization/shutdown code that
* if this interrupt handler is installed, ACPI is enabled.
*/
/* GPEs: Check for and dispatch any GPEs that have occurred */
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
-
return_UINT32(interrupt_handled);
}
@@ -150,15 +198,15 @@ u32 acpi_ev_install_sci_handler(void)
/******************************************************************************
*
- * FUNCTION: acpi_ev_remove_sci_handler
+ * FUNCTION: acpi_ev_remove_all_sci_handlers
*
* PARAMETERS: none
*
- * RETURN: E_OK if handler uninstalled OK, E_ERROR if handler was not
+ * RETURN: AE_OK if handler uninstalled, AE_ERROR if handler was not
* installed to begin with
*
* DESCRIPTION: Remove the SCI interrupt handler. No further SCIs will be
- * taken.
+ * taken. Remove all host-installed SCI handlers.
*
* Note: It doesn't seem important to disable all events or set the event
* enable registers to their original values. The OS should disable
@@ -167,11 +215,13 @@ u32 acpi_ev_install_sci_handler(void)
*
******************************************************************************/
-acpi_status acpi_ev_remove_sci_handler(void)
+acpi_status acpi_ev_remove_all_sci_handlers(void)
{
+ struct acpi_sci_handler_info *sci_handler;
+ acpi_cpu_flags flags;
acpi_status status;
- ACPI_FUNCTION_TRACE(ev_remove_sci_handler);
+ ACPI_FUNCTION_TRACE(ev_remove_all_sci_handlers);
/* Just let the OS remove the handler and disable the level */
@@ -179,6 +229,21 @@ acpi_status acpi_ev_remove_sci_handler(void)
acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
acpi_ev_sci_xrupt_handler);
+ if (!acpi_gbl_sci_handler_list) {
+ return (status);
+ }
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Free all host-installed SCI handlers */
+
+ while (acpi_gbl_sci_handler_list) {
+ sci_handler = acpi_gbl_sci_handler_list;
+ acpi_gbl_sci_handler_list = sci_handler->next;
+ ACPI_FREE(sci_handler);
+ }
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index ca5fba99c33b..23a7fadca412 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -374,7 +375,7 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
acpi_gbl_exception_handler = handler;
- cleanup:
+cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
@@ -385,6 +386,144 @@ ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
#if (!ACPI_REDUCED_HARDWARE)
/*******************************************************************************
*
+ * FUNCTION: acpi_install_sci_handler
+ *
+ * PARAMETERS: address - Address of the handler
+ * context - Value passed to the handler on each SCI
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for a System Control Interrupt.
+ *
+ ******************************************************************************/
+acpi_status acpi_install_sci_handler(acpi_sci_handler address, void *context)
+{
+ struct acpi_sci_handler_info *new_sci_handler;
+ struct acpi_sci_handler_info *sci_handler;
+ acpi_cpu_flags flags;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_install_sci_handler);
+
+ if (!address) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Allocate and init a handler object */
+
+ new_sci_handler = ACPI_ALLOCATE(sizeof(struct acpi_sci_handler_info));
+ if (!new_sci_handler) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ new_sci_handler->address = address;
+ new_sci_handler->context = context;
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* Lock list during installation */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ sci_handler = acpi_gbl_sci_handler_list;
+
+ /* Ensure handler does not already exist */
+
+ while (sci_handler) {
+ if (address == sci_handler->address) {
+ status = AE_ALREADY_EXISTS;
+ goto unlock_and_exit;
+ }
+
+ sci_handler = sci_handler->next;
+ }
+
+ /* Install the new handler into the global list (at head) */
+
+ new_sci_handler->next = acpi_gbl_sci_handler_list;
+ acpi_gbl_sci_handler_list = new_sci_handler;
+
+unlock_and_exit:
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+exit:
+ if (ACPI_FAILURE(status)) {
+ ACPI_FREE(new_sci_handler);
+ }
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_remove_sci_handler
+ *
+ * PARAMETERS: address - Address of the handler
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove a handler for a System Control Interrupt.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_remove_sci_handler(acpi_sci_handler address)
+{
+ struct acpi_sci_handler_info *prev_sci_handler;
+ struct acpi_sci_handler_info *next_sci_handler;
+ acpi_cpu_flags flags;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_remove_sci_handler);
+
+ if (!address) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Remove the SCI handler with lock */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ prev_sci_handler = NULL;
+ next_sci_handler = acpi_gbl_sci_handler_list;
+ while (next_sci_handler) {
+ if (next_sci_handler->address == address) {
+
+ /* Unlink and free the SCI handler info block */
+
+ if (prev_sci_handler) {
+ prev_sci_handler->next = next_sci_handler->next;
+ } else {
+ acpi_gbl_sci_handler_list =
+ next_sci_handler->next;
+ }
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ ACPI_FREE(next_sci_handler);
+ goto unlock_and_exit;
+ }
+
+ prev_sci_handler = next_sci_handler;
+ next_sci_handler = next_sci_handler->next;
+ }
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ status = AE_NOT_EXIST;
+
+unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_install_global_event_handler
*
* PARAMETERS: handler - Pointer to the global event handler function
@@ -398,6 +537,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
* Can be used to update event counters, etc.
*
******************************************************************************/
+
acpi_status
acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context)
{
@@ -426,7 +566,7 @@ acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context)
acpi_gbl_global_event_handler = handler;
acpi_gbl_global_event_handler_context = context;
- cleanup:
+cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
@@ -498,7 +638,7 @@ acpi_install_fixed_event_handler(u32 event,
handler));
}
- cleanup:
+cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 7039606a0ba8..39d06af5e347 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "actables.h"
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 7662f1a42ff6..5713da77c665 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acevents.h"
@@ -471,7 +472,7 @@ acpi_get_gpe_status(acpi_handle gpe_device,
if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
*event_status |= ACPI_EVENT_FLAG_HANDLE;
- unlock_and_exit:
+unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -624,7 +625,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
obj_desc->device.gpe_block = gpe_block;
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
@@ -679,7 +680,7 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
obj_desc->device.gpe_block = NULL;
}
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 80cecf838591..02ed75ac56cd 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -42,7 +42,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -147,7 +148,7 @@ acpi_install_address_space_handler(acpi_handle device,
status = acpi_ev_execute_reg_methods(node, space_id);
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
@@ -286,7 +287,7 @@ acpi_remove_address_space_handler(acpi_handle device,
status = AE_NOT_EXIST;
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 269e81d86ef4..3c2e6dcdad3e 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -193,7 +193,7 @@ acpi_status acpi_ex_create_event(struct acpi_walk_state *walk_state)
acpi_ns_attach_object((struct acpi_namespace_node *)walk_state->
operands[0], obj_desc, ACPI_TYPE_EVENT);
- cleanup:
+cleanup:
/*
* Remove local reference to the object (on error, will cause deletion
* of both object and semaphore if present.)
@@ -248,7 +248,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
acpi_ns_attach_object(obj_desc->mutex.node, obj_desc,
ACPI_TYPE_MUTEX);
- cleanup:
+cleanup:
/*
* Remove local reference to the object (on error, will cause deletion
* of both object and semaphore if present.)
@@ -347,7 +347,7 @@ acpi_ex_create_region(u8 * aml_start,
status = acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_REGION);
- cleanup:
+cleanup:
/* Remove local reference to the object */
@@ -520,7 +520,7 @@ acpi_ex_create_method(u8 * aml_start,
acpi_ut_remove_reference(obj_desc);
- exit:
+exit:
/* Remove a reference to the operand */
acpi_ut_remove_reference(operand[1]);
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index c2a65aaf29af..cfd875243421 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -197,7 +197,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
status = acpi_ex_extract_from_field(obj_desc, buffer, (u32) length);
acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
- exit:
+exit:
if (ACPI_FAILURE(status)) {
acpi_ut_remove_reference(buffer_desc);
} else {
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 7e0afe72487e..49fb742d61b9 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -123,12 +123,6 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
}
}
- /* Exit if Address/Length have been disallowed by the host OS */
-
- if (rgn_desc->common.flags & AOPOBJ_INVALID) {
- return_ACPI_STATUS(AE_AML_ILLEGAL_ADDRESS);
- }
-
/*
* Exit now for SMBus, GSBus or IPMI address space, it has a non-linear
* address space and the request cannot be directly validated
@@ -1002,7 +996,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
mask, merged_datum,
field_offset);
- exit:
+exit:
/* Free temporary buffer if we used one */
if (new_buffer) {
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 00bf29877574..65d93607f368 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -388,7 +388,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
*actual_return_desc = return_desc;
- cleanup:
+cleanup:
if (local_operand1 != operand1) {
acpi_ut_remove_reference(local_operand1);
}
@@ -718,7 +718,7 @@ acpi_ex_do_logical_op(u16 opcode,
}
}
- cleanup:
+cleanup:
/* New object was created if implicit conversion performed - delete */
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 2cdd41d8ade6..d74cea416ca0 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -115,7 +115,7 @@ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
break;
}
- cleanup:
+cleanup:
/* Delete return object on error */
@@ -234,7 +234,7 @@ acpi_status acpi_ex_opcode_1A_1T_0R(struct acpi_walk_state *walk_state)
goto cleanup;
}
- cleanup:
+cleanup:
return_ACPI_STATUS(status);
}
@@ -551,7 +551,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
status = acpi_ex_store(return_desc, operand[1], walk_state);
}
- cleanup:
+cleanup:
/* Delete return object on error */
@@ -1054,7 +1054,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
goto cleanup;
}
- cleanup:
+cleanup:
/* Delete return object on error */
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index d5088f7030c7..d6fa0fce1fc9 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -215,7 +215,7 @@ acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
goto cleanup;
}
- cleanup:
+cleanup:
/*
* Since the remainder is not returned indirectly, remove a reference to
* it. Only the quotient is returned indirectly.
@@ -445,7 +445,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
break;
}
- store_result_to_target:
+store_result_to_target:
if (ACPI_SUCCESS(status)) {
/*
@@ -462,7 +462,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
}
}
- cleanup:
+cleanup:
/* Delete return object on error */
@@ -553,7 +553,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
goto cleanup;
}
- store_logical_result:
+store_logical_result:
/*
* Set return value to according to logical_result. logical TRUE (all ones)
* Default is FALSE (zero)
@@ -562,7 +562,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
return_desc->integer.value = ACPI_UINT64_MAX;
}
- cleanup:
+cleanup:
/* Delete return object on error */
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 37656f12f204..bc042adf8804 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -124,7 +124,7 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
goto cleanup;
}
- cleanup:
+cleanup:
return_ACPI_STATUS(status);
}
@@ -252,7 +252,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
status = acpi_ex_store(return_desc, operand[3], walk_state);
- cleanup:
+cleanup:
/* Delete return object on error */
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 879b6cd8319c..4459e32c683d 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -314,7 +314,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
goto cleanup;
}
- cleanup:
+cleanup:
/* Delete return object on error */
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 303429bb4d5d..9d28867e60dc 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -400,6 +400,7 @@ acpi_ex_pci_config_space_handler(u32 function,
switch (function) {
case ACPI_READ:
+ *value = 0;
status = acpi_os_read_pci_configuration(pci_id, pci_register,
value, bit_width);
break;
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index ac04278ad28f..1606524312e3 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -521,7 +521,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
*/
type = obj_desc->common.type;
- exit:
+exit:
/* Convert internal types to external types */
switch (type) {
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index 00e5af7129c1..be3f66973ee8 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -683,7 +683,7 @@ acpi_ex_resolve_operands(u16 opcode,
return_ACPI_STATUS(status);
}
- next_operand:
+next_operand:
/*
* If more operands needed, decrement stack_ptr to point
* to next operand on stack
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 8d2e866be15f..12e6cff54f78 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -560,7 +560,7 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value)
break;
}
- exit:
+exit:
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 2d7d22ebc782..3c498dc1636e 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 5ee7a814cd92..b4b47db2dee2 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -83,11 +84,17 @@ acpi_status acpi_reset(void)
* For I/O space, write directly to the OSL. This bypasses the port
* validation mechanism, which may block a valid write to the reset
* register.
- * Spec section 4.7.3.6 requires register width to be 8.
+ *
+ * NOTE:
+ * The ACPI spec requires the reset register width to be 8, so we
+ * hardcode it here and ignore the FADT value. This maintains
+ * compatibility with other ACPI implementations that have allowed
+ * BIOS code with bad register width values to go unnoticed.
*/
status =
acpi_os_write_port((acpi_io_address) reset_reg->address,
- acpi_gbl_FADT.reset_value, 8);
+ acpi_gbl_FADT.reset_value,
+ ACPI_RESET_REGISTER_WIDTH);
} else {
/* Write the reset value to the reset register */
@@ -119,7 +126,8 @@ ACPI_EXPORT_SYMBOL(acpi_reset)
******************************************************************************/
acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
{
- u32 value;
+ u32 value_lo;
+ u32 value_hi;
u32 width;
u64 address;
acpi_status status;
@@ -137,13 +145,8 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
return (status);
}
- /* Initialize entire 64-bit return value to zero */
-
- *return_value = 0;
- value = 0;
-
/*
- * Two address spaces supported: Memory or IO. PCI_Config is
+ * Two address spaces supported: Memory or I/O. PCI_Config is
* not supported here because the GAS structure is insufficient
*/
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
@@ -155,29 +158,35 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
}
} else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
+ value_lo = 0;
+ value_hi = 0;
+
width = reg->bit_width;
if (width == 64) {
width = 32; /* Break into two 32-bit transfers */
}
status = acpi_hw_read_port((acpi_io_address)
- address, &value, width);
+ address, &value_lo, width);
if (ACPI_FAILURE(status)) {
return (status);
}
- *return_value = value;
if (reg->bit_width == 64) {
/* Read the top 32 bits */
status = acpi_hw_read_port((acpi_io_address)
- (address + 4), &value, 32);
+ (address + 4), &value_hi,
+ 32);
if (ACPI_FAILURE(status)) {
return (status);
}
- *return_value |= ((u64)value << 32);
}
+
+ /* Set the return value only if status is AE_OK */
+
+ *return_value = (value_lo | ((u64)value_hi << 32));
}
ACPI_DEBUG_PRINT((ACPI_DB_IO,
@@ -186,7 +195,7 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
ACPI_FORMAT_UINT64(address),
acpi_ut_get_region_name(reg->space_id)));
- return (status);
+ return (AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_read)
@@ -561,10 +570,10 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b)
break;
}
- cleanup1:
+cleanup1:
acpi_ut_remove_reference(info->return_object);
- cleanup:
+cleanup:
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"While evaluating Sleep State [%s]",
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index f2e669db8b65..15dddc10fc9b 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
@@ -166,7 +167,7 @@ ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector64)
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
-acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
+acpi_status acpi_enter_sleep_state_s4bios(void)
{
u32 in_value;
acpi_status status;
@@ -360,7 +361,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
-acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
+acpi_status acpi_enter_sleep_state(u8 sleep_state)
{
acpi_status status;
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index c5316e5bd4ab..14f65f6345b9 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -240,7 +240,7 @@ acpi_status acpi_ns_root_initialize(void)
}
}
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
/* Save a handle to "_GPE", it is always present */
@@ -424,8 +424,9 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
/* Current scope has no parent scope */
ACPI_ERROR((AE_INFO,
- "ACPI path has too many parent prefixes (^) "
- "- reached beyond root node"));
+ "%s: Path has too many parent prefixes (^) "
+ "- reached beyond root node",
+ pathname));
return_ACPI_STATUS(AE_NOT_FOUND);
}
}
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 7418c77fde8c..48b9c6f12643 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -59,6 +59,17 @@ acpi_ns_dump_one_device(acpi_handle obj_handle,
#endif
#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
+#ifdef ACPI_FUTURE_USAGE
+static acpi_status
+acpi_ns_dump_one_object_path(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+
+static acpi_status
+acpi_ns_get_max_depth(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+#endif /* ACPI_FUTURE_USAGE */
+
/*******************************************************************************
*
* FUNCTION: acpi_ns_print_pathname
@@ -609,7 +620,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
obj_type = ACPI_TYPE_INVALID; /* Terminate loop after next pass */
}
- cleanup:
+cleanup:
acpi_os_printf("\n");
return (AE_OK);
}
@@ -671,6 +682,136 @@ acpi_ns_dump_objects(acpi_object_type type,
}
#endif /* ACPI_FUTURE_USAGE */
+#ifdef ACPI_FUTURE_USAGE
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_dump_one_object_path, acpi_ns_get_max_depth
+ *
+ * PARAMETERS: obj_handle - Node to be dumped
+ * level - Nesting level of the handle
+ * context - Passed into walk_namespace
+ * return_value - Not used
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Dump the full pathname to a namespace object. acp_ns_get_max_depth
+ * computes the maximum nesting depth in the namespace tree, in
+ * order to simplify formatting in acpi_ns_dump_one_object_path.
+ * These procedures are user_functions called by acpi_ns_walk_namespace.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_dump_one_object_path(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ u32 max_level = *((u32 *)context);
+ char *pathname;
+ struct acpi_namespace_node *node;
+ int path_indent;
+
+ if (!obj_handle) {
+ return (AE_OK);
+ }
+
+ node = acpi_ns_validate_handle(obj_handle);
+ if (!node) {
+
+ /* Ignore bad node during namespace walk */
+
+ return (AE_OK);
+ }
+
+ pathname = acpi_ns_get_external_pathname(node);
+
+ path_indent = 1;
+ if (level <= max_level) {
+ path_indent = max_level - level + 1;
+ }
+
+ acpi_os_printf("%2d%*s%-12s%*s",
+ level, level, " ", acpi_ut_get_type_name(node->type),
+ path_indent, " ");
+
+ acpi_os_printf("%s\n", &pathname[1]);
+ ACPI_FREE(pathname);
+ return (AE_OK);
+}
+
+static acpi_status
+acpi_ns_get_max_depth(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ u32 *max_level = (u32 *)context;
+
+ if (level > *max_level) {
+ *max_level = level;
+ }
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_dump_object_paths
+ *
+ * PARAMETERS: type - Object type to be dumped
+ * display_type - 0 or ACPI_DISPLAY_SUMMARY
+ * max_depth - Maximum depth of dump. Use ACPI_UINT32_MAX
+ * for an effectively unlimited depth.
+ * owner_id - Dump only objects owned by this ID. Use
+ * ACPI_UINT32_MAX to match all owners.
+ * start_handle - Where in namespace to start/end search
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Dump full object pathnames within the loaded namespace. Uses
+ * acpi_ns_walk_namespace in conjunction with acpi_ns_dump_one_object_path.
+ *
+ ******************************************************************************/
+
+void
+acpi_ns_dump_object_paths(acpi_object_type type,
+ u8 display_type,
+ u32 max_depth,
+ acpi_owner_id owner_id, acpi_handle start_handle)
+{
+ acpi_status status;
+ u32 max_level = 0;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /*
+ * Just lock the entire namespace for the duration of the dump.
+ * We don't want any changes to the namespace during this time,
+ * especially the temporary nodes since we are going to display
+ * them also.
+ */
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("Could not acquire namespace mutex\n");
+ return;
+ }
+
+ /* Get the max depth of the namespace tree, for formatting later */
+
+ (void)acpi_ns_walk_namespace(type, start_handle, max_depth,
+ ACPI_NS_WALK_NO_UNLOCK |
+ ACPI_NS_WALK_TEMP_NODES,
+ acpi_ns_get_max_depth, NULL,
+ (void *)&max_level, NULL);
+
+ /* Now dump the entire namespace */
+
+ (void)acpi_ns_walk_namespace(type, start_handle, max_depth,
+ ACPI_NS_WALK_NO_UNLOCK |
+ ACPI_NS_WALK_TEMP_NODES,
+ acpi_ns_dump_one_object_path, NULL,
+ (void *)&max_level, NULL);
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+}
+#endif /* ACPI_FUTURE_USAGE */
+
/*******************************************************************************
*
* FUNCTION: acpi_ns_dump_entry
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 409ae80824d1..283762511b73 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -69,6 +69,7 @@ static acpi_status
acpi_ns_dump_one_device(acpi_handle obj_handle,
u32 level, void *context, void **return_value)
{
+ struct acpi_buffer buffer;
struct acpi_device_info *info;
acpi_status status;
u32 i;
@@ -78,15 +79,17 @@ acpi_ns_dump_one_device(acpi_handle obj_handle,
status =
acpi_ns_dump_one_object(obj_handle, level, context, return_value);
- status = acpi_get_object_info(obj_handle, &info);
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+ status = acpi_get_object_info(obj_handle, &buffer);
if (ACPI_SUCCESS(status)) {
+ info = buffer.pointer;
for (i = 0; i < level; i++) {
ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES, " "));
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES,
" HID: %s, ADR: %8.8X%8.8X, Status: %X\n",
- info->hardware_id.string,
+ info->hardware_id.value,
ACPI_FORMAT_UINT64(info->address),
info->current_status));
ACPI_FREE(info);
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 18108bc2e51c..963ceef063f8 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -314,7 +314,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
"*** Completed evaluation of object %s ***\n",
info->relative_pathname));
- cleanup:
+cleanup:
/*
* Namespace was unlocked by the handling acpi_ns* function, so we
* just free the pathname and return
@@ -486,7 +486,7 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
parent_node->type = (u8)type;
}
- exit:
+exit:
if (parent_obj) {
acpi_ut_remove_reference(parent_obj);
}
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index dd2ceae3f717..3a0423af968c 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -213,7 +213,7 @@ acpi_status acpi_ns_initialize_devices(void)
return_ACPI_STATUS(status);
- error_exit:
+error_exit:
ACPI_EXCEPTION((AE_INFO, status, "During device initialization"));
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 0a7badc3179f..89ec645e7730 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -114,7 +114,7 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
(void)acpi_tb_release_owner_id(table_index);
}
- unlock:
+unlock:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 35dde8151c0d..177857340271 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -140,7 +140,7 @@ acpi_ns_one_complete_parse(u32 pass_number,
pass_number));
status = acpi_ps_parse_aml(walk_state);
- cleanup:
+cleanup:
acpi_ps_delete_parse_tree(parse_root);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 098e7666cbc9..d2855d9857c4 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -271,7 +271,7 @@ acpi_ns_check_object_type(struct acpi_evaluate_info *info,
return (AE_OK); /* Successful repair */
}
- type_error_exit:
+type_error_exit:
/* Create a string with all expected types for this predefined object */
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 6d55cef7916c..3d5391f9bcb5 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -330,7 +330,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
return (status);
- package_too_small:
+package_too_small:
/* Error exit for the case with an incorrect package count */
@@ -555,7 +555,7 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
return (AE_OK);
- package_too_small:
+package_too_small:
/* The sub-package count was smaller than required */
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index f8e71ea60319..a05afff50eb9 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -263,7 +263,7 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
return (AE_AML_OPERAND_TYPE);
- object_repaired:
+object_repaired:
/* Object was successfully repaired */
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index c84603ee83ae..6a25d320b169 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -478,7 +478,7 @@ acpi_ns_repair_CST(struct acpi_evaluate_info *info,
removing = TRUE;
}
- remove_element:
+remove_element:
if (removing) {
acpi_ns_remove_element(return_object, i + 1);
outer_element_count--;
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 5d43efc53a61..47420faef073 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -381,7 +381,8 @@ acpi_ns_search_and_enter(u32 target_name,
/* Node is an object defined by an External() statement */
- if (flags & ACPI_NS_EXTERNAL) {
+ if (flags & ACPI_NS_EXTERNAL ||
+ (walk_state && walk_state->opcode == AML_SCOPE_OP)) {
new_node->flags |= ANOBJ_IS_EXTERNAL;
}
#endif
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 08c0b5beec88..cc2fea94c5f0 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -722,7 +722,7 @@ acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- cleanup:
+cleanup:
ACPI_FREE(internal_path);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index b38b4b07f86e..e973e311f856 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -42,7 +42,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -138,7 +139,7 @@ acpi_evaluate_object_typed(acpi_handle handle,
/* Caller used ACPI_ALLOCATE_BUFFER, free the return buffer */
- ACPI_FREE(return_buffer->pointer);
+ ACPI_FREE_BUFFER(*return_buffer);
return_buffer->pointer = NULL;
}
@@ -441,7 +442,7 @@ acpi_evaluate_object(acpi_handle handle,
acpi_ex_exit_interpreter();
}
- cleanup:
+cleanup:
/* Free the input parameter list (if we created one) */
@@ -605,14 +606,22 @@ acpi_walk_namespace(acpi_object_type type,
goto unlock_and_exit;
}
+ /* Now we can validate the starting node */
+
+ if (!acpi_ns_validate_handle(start_object)) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit2;
+ }
+
status = acpi_ns_walk_namespace(type, start_object, max_depth,
ACPI_NS_WALK_UNLOCK,
descending_callback, ascending_callback,
context, return_value);
+unlock_and_exit2:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_read_lock(&acpi_gbl_namespace_rw_lock);
return_ACPI_STATUS(status);
}
@@ -856,7 +865,7 @@ acpi_attach_data(acpi_handle obj_handle,
status = acpi_ns_attach_data(node, handler, data);
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
}
@@ -902,7 +911,7 @@ acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler)
status = acpi_ns_detach_data(node, handler);
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
}
@@ -949,7 +958,7 @@ acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
status = acpi_ns_get_attached_data(node, handler, data);
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
}
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 83c164434580..3a4bd3ff49a3 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -42,7 +42,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -208,7 +209,7 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0;
status = AE_OK;
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
@@ -496,7 +497,7 @@ acpi_get_object_info(acpi_handle handle,
*return_buffer = info;
status = AE_OK;
- cleanup:
+cleanup:
if (hid) {
ACPI_FREE(hid);
}
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index c0853ef294e4..0e6d79e462d4 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -42,7 +42,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -200,7 +201,7 @@ acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
status = AE_NULL_ENTRY;
}
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
@@ -280,7 +281,7 @@ acpi_get_next_object(acpi_object_type type,
*ret_handle = ACPI_CAST_PTR(acpi_handle, node);
}
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 86198a9139b5..79d9a28dedef 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -297,7 +297,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
}
}
- cleanup:
+cleanup:
/* Now we can actually delete the subtree rooted at Op */
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 11b99ab20bb3..fcb7a840e996 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -142,7 +142,7 @@ static void acpi_ps_start_trace(struct acpi_evaluate_info *info)
acpi_dbg_layer = acpi_gbl_trace_dbg_layer;
}
- exit:
+exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
}
@@ -185,7 +185,7 @@ static void acpi_ps_stop_trace(struct acpi_evaluate_info *info)
acpi_dbg_level = acpi_gbl_original_dbg_level;
acpi_dbg_layer = acpi_gbl_original_dbg_layer;
- exit:
+exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
}
@@ -323,7 +323,7 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
/* walk_state was deleted by parse_aml */
- cleanup:
+cleanup:
acpi_ps_delete_parse_tree(op);
/* End optional tracing */
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 80d12994e0d0..c99cec9cefde 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -440,7 +440,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
info++;
}
- exit:
+exit:
if (!flags_mode) {
/* Round the resource struct length up to the next boundary (32 or 64) */
@@ -783,7 +783,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
info++;
}
- exit:
+exit:
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 480b6b40c5ea..aef303d56d86 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -784,7 +784,7 @@ acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
acpi_ut_remove_reference(args[0]);
- cleanup:
+cleanup:
ACPI_FREE(info);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 94e3517554f9..01e476988aae 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acresrc.h"
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 42a13c0d7015..634357d51fe9 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -80,16 +80,10 @@ acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc)
}
}
- /* FACS is the odd table, has no standard ACPI header and no checksum */
+ /* Always calculate checksum, ignore bad checksum if requested */
- if (!ACPI_COMPARE_NAME(&table_desc->signature, ACPI_SIG_FACS)) {
-
- /* Always calculate checksum, ignore bad checksum if requested */
-
- status =
- acpi_tb_verify_checksum(table_desc->pointer,
- table_desc->length);
- }
+ status =
+ acpi_tb_verify_checksum(table_desc->pointer, table_desc->length);
return_ACPI_STATUS(status);
}
@@ -237,10 +231,10 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
goto release;
}
- print_header:
+print_header:
acpi_tb_print_table_header(table_desc->address, table_desc->pointer);
- release:
+release:
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(status);
}
@@ -312,7 +306,7 @@ struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
return (NULL); /* There was no override */
- finish_override:
+finish_override:
ACPI_INFO((AE_INFO,
"%4.4s %p %s table override, new table: %p",
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index dc963f823d2c..6866e767ba90 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -135,10 +135,10 @@ acpi_tb_print_table_header(acpi_physical_address address,
/* FACS only has signature and length fields */
- ACPI_INFO((AE_INFO, "%4.4s %p %05X",
+ ACPI_INFO((AE_INFO, "%4.4s %p %06X",
header->signature, ACPI_CAST_PTR(void, address),
header->length));
- } else if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP)) {
+ } else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
/* RSDP has no common fields */
@@ -147,7 +147,7 @@ acpi_tb_print_table_header(acpi_physical_address address,
header)->oem_id, ACPI_OEM_ID_SIZE);
acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
- ACPI_INFO((AE_INFO, "RSDP %p %05X (v%.2d %6.6s)",
+ ACPI_INFO((AE_INFO, "RSDP %p %06X (v%.2d %6.6s)",
ACPI_CAST_PTR(void, address),
(ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
revision >
@@ -162,7 +162,7 @@ acpi_tb_print_table_header(acpi_physical_address address,
acpi_tb_cleanup_table_header(&local_header, header);
ACPI_INFO((AE_INFO,
- "%4.4s %p %05X (v%.2d %6.6s %8.8s %08X %4.4s %08X)",
+ "%4.4s %p %06X (v%.2d %6.6s %8.8s %08X %4.4s %08X)",
local_header.signature, ACPI_CAST_PTR(void, address),
local_header.length, local_header.revision,
local_header.oem_id, local_header.oem_table_id,
@@ -190,6 +190,16 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
{
u8 checksum;
+ /*
+ * FACS/S3PT:
+ * They are the odd tables, have no standard ACPI header and no checksum
+ */
+
+ if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_S3PT) ||
+ ACPI_COMPARE_NAME(table->signature, ACPI_SIG_FACS)) {
+ return (AE_OK);
+ }
+
/* Compute the checksum on the table */
checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, table), length);
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index bffdfc7b8322..3d6bb83aa7e7 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -350,7 +350,7 @@ acpi_tb_install_table(acpi_physical_address address,
acpi_tb_delete_table(table_desc);
}
- unmap_and_exit:
+unmap_and_exit:
/* Always unmap the table header that we mapped above */
@@ -430,8 +430,7 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
*
******************************************************************************/
-acpi_status __init
-acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
+acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
{
struct acpi_table_rsdp *rsdp;
u32 table_entry_size;
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index ad11162482ff..db826eaadd1c 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "actables.h"
@@ -147,6 +148,8 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_tables)
+
/*******************************************************************************
*
* FUNCTION: acpi_reallocate_root_table
@@ -161,7 +164,7 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
* kernel.
*
******************************************************************************/
-acpi_status acpi_reallocate_root_table(void)
+acpi_status __init acpi_reallocate_root_table(void)
{
acpi_status status;
@@ -181,6 +184,8 @@ acpi_status acpi_reallocate_root_table(void)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL_INIT(acpi_reallocate_root_table)
+
/*******************************************************************************
*
* FUNCTION: acpi_get_table_header
@@ -356,6 +361,7 @@ acpi_get_table_with_size(char *signature,
return (AE_NOT_FOUND);
}
+
ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)
acpi_status
@@ -367,6 +373,7 @@ acpi_get_table(char *signature,
return acpi_get_table_with_size(signature,
instance, out_table, &tbl_size);
}
+
ACPI_EXPORT_SYMBOL(acpi_get_table)
/*******************************************************************************
@@ -424,7 +431,6 @@ acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
-
/*******************************************************************************
*
* FUNCTION: acpi_install_table_handler
@@ -465,7 +471,7 @@ acpi_install_table_handler(acpi_table_handler handler, void *context)
acpi_gbl_table_handler = handler;
acpi_gbl_table_handler_context = context;
- cleanup:
+cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
@@ -506,7 +512,7 @@ acpi_status acpi_remove_table_handler(acpi_table_handler handler)
acpi_gbl_table_handler = NULL;
- cleanup:
+cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 0ba9e328d5d7..60b5a871833c 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -65,7 +66,7 @@ static acpi_status acpi_tb_load_namespace(void);
*
******************************************************************************/
-acpi_status acpi_load_tables(void)
+acpi_status __init acpi_load_tables(void)
{
acpi_status status;
@@ -82,7 +83,7 @@ acpi_status acpi_load_tables(void)
return_ACPI_STATUS(status);
}
-ACPI_EXPORT_SYMBOL(acpi_load_tables)
+ACPI_EXPORT_SYMBOL_INIT(acpi_load_tables)
/*******************************************************************************
*
@@ -200,7 +201,7 @@ static acpi_status acpi_tb_load_namespace(void)
ACPI_INFO((AE_INFO, "All ACPI Tables successfully acquired"));
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(status);
}
@@ -268,7 +269,7 @@ acpi_status acpi_load_table(struct acpi_table_header *table)
acpi_gbl_table_handler_context);
}
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 948c95e80d44..e4e1468877c3 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -68,8 +68,7 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
* Note: Sometimes there exists more than one RSDP in memory; the valid
* RSDP has a valid checksum, all others have an invalid checksum.
*/
- if (ACPI_STRNCMP((char *)rsdp->signature, ACPI_SIG_RSDP,
- sizeof(ACPI_SIG_RSDP) - 1) != 0) {
+ if (!ACPI_VALIDATE_RSDP_SIG(rsdp->signature)) {
/* Nope, BAD Signature */
@@ -112,7 +111,7 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
*
******************************************************************************/
-acpi_status acpi_find_root_pointer(acpi_size *table_address)
+acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
{
u8 *table_ptr;
u8 *mem_rover;
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index e0ffb580f4b0..814267f52715 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -48,6 +48,39 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utalloc")
+#if !defined (USE_NATIVE_ALLOCATE_ZEROED)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_allocate_zeroed
+ *
+ * PARAMETERS: size - Size of the allocation
+ *
+ * RETURN: Address of the allocated memory on success, NULL on failure.
+ *
+ * DESCRIPTION: Subsystem equivalent of calloc. Allocate and zero memory.
+ * This is the default implementation. Can be overridden via the
+ * USE_NATIVE_ALLOCATE_ZEROED flag.
+ *
+ ******************************************************************************/
+void *acpi_os_allocate_zeroed(acpi_size size)
+{
+ void *allocation;
+
+ ACPI_FUNCTION_ENTRY();
+
+ allocation = acpi_os_allocate(size);
+ if (allocation) {
+
+ /* Clear the memory block */
+
+ ACPI_MEMSET(allocation, 0, size);
+ }
+
+ return (allocation);
+}
+
+#endif /* !USE_NATIVE_ALLOCATE_ZEROED */
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_caches
@@ -59,6 +92,7 @@ ACPI_MODULE_NAME("utalloc")
* DESCRIPTION: Create all local caches
*
******************************************************************************/
+
acpi_status acpi_ut_create_caches(void)
{
acpi_status status;
@@ -175,10 +209,10 @@ acpi_status acpi_ut_delete_caches(void)
/* Free memory lists */
- ACPI_FREE(acpi_gbl_global_list);
+ acpi_os_free(acpi_gbl_global_list);
acpi_gbl_global_list = NULL;
- ACPI_FREE(acpi_gbl_ns_node_list);
+ acpi_os_free(acpi_gbl_ns_node_list);
acpi_gbl_ns_node_list = NULL;
#endif
@@ -302,82 +336,3 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
ACPI_MEMSET(buffer->pointer, 0, required_length);
return (AE_OK);
}
-
-#ifdef NOT_USED_BY_LINUX
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_allocate
- *
- * PARAMETERS: size - Size of the allocation
- * component - Component type of caller
- * module - Source file name of caller
- * line - Line number of caller
- *
- * RETURN: Address of the allocated memory on success, NULL on failure.
- *
- * DESCRIPTION: Subsystem equivalent of malloc.
- *
- ******************************************************************************/
-
-void *acpi_ut_allocate(acpi_size size,
- u32 component, const char *module, u32 line)
-{
- void *allocation;
-
- ACPI_FUNCTION_TRACE_U32(ut_allocate, size);
-
- /* Check for an inadvertent size of zero bytes */
-
- if (!size) {
- ACPI_WARNING((module, line,
- "Attempt to allocate zero bytes, allocating 1 byte"));
- size = 1;
- }
-
- allocation = acpi_os_allocate(size);
- if (!allocation) {
-
- /* Report allocation error */
-
- ACPI_WARNING((module, line,
- "Could not allocate size %u", (u32) size));
-
- return_PTR(NULL);
- }
-
- return_PTR(allocation);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_allocate_zeroed
- *
- * PARAMETERS: size - Size of the allocation
- * component - Component type of caller
- * module - Source file name of caller
- * line - Line number of caller
- *
- * RETURN: Address of the allocated memory on success, NULL on failure.
- *
- * DESCRIPTION: Subsystem equivalent of calloc. Allocate and zero memory.
- *
- ******************************************************************************/
-
-void *acpi_ut_allocate_zeroed(acpi_size size,
- u32 component, const char *module, u32 line)
-{
- void *allocation;
-
- ACPI_FUNCTION_ENTRY();
-
- allocation = acpi_ut_allocate(size, component, module, line);
- if (allocation) {
-
- /* Clear the memory block */
-
- ACPI_MEMSET(allocation, 0, size);
- }
-
- return (allocation);
-}
-#endif
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index a877a9647fd9..366bfec4b770 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -65,7 +65,7 @@ ACPI_MODULE_NAME("utcache")
acpi_status
acpi_os_create_cache(char *cache_name,
u16 object_size,
- u16 max_depth, struct acpi_memory_list ** return_cache)
+ u16 max_depth, struct acpi_memory_list **return_cache)
{
struct acpi_memory_list *cache;
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 1731c27c36a6..edff4e653d9a 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -552,7 +552,7 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
*ret_internal_object = internal_object;
return_ACPI_STATUS(AE_OK);
- error_exit:
+error_exit:
acpi_ut_remove_reference(internal_object);
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -899,7 +899,7 @@ acpi_ut_copy_ielement_to_ielement(u8 object_type,
return (status);
- error_exit:
+error_exit:
acpi_ut_remove_reference(target_object);
return (status);
}
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 5796e11a0671..1a67b3944b3b 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
@@ -190,7 +191,7 @@ acpi_debug_print(u32 requested_debug_level,
* Display the module name, current line number, thread ID (if requested),
* current procedure nesting level, and the current procedure name
*/
- acpi_os_printf("%8s-%04ld ", module_name, line_number);
+ acpi_os_printf("%9s-%04ld ", module_name, line_number);
if (ACPI_LV_THREADS & acpi_dbg_level) {
acpi_os_printf("[%u] ", (u32)thread_id);
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 11e2e02e1618..b3f31dd89a45 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -41,7 +41,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index d6b33f29d327..c07d2227ea42 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -649,7 +649,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
return (AE_OK);
- error_exit:
+error_exit:
ACPI_EXCEPTION((AE_INFO, status,
"Could not update object reference count"));
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 4fd68971019b..16fb90506db7 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -181,7 +181,7 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
*return_desc = info->return_object;
- cleanup:
+cleanup:
ACPI_FREE(info);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c
index ff6d9e8aa842..3cf7b597edb9 100644
--- a/drivers/acpi/acpica/utexcep.c
+++ b/drivers/acpi/acpica/utexcep.c
@@ -41,8 +41,9 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#define EXPORT_ACPI_INTERFACES
+
#define ACPI_DEFINE_EXCEPTION_TABLE
-#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index d6f26bf8a062..81f9a9584451 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -41,9 +41,9 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#define EXPORT_ACPI_INTERFACES
#define DEFINE_ACPI_GLOBALS
-#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
@@ -289,9 +289,19 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000;
+ /* Event counters */
+
+ acpi_method_count = 0;
+ acpi_sci_count = 0;
+ acpi_gpe_count = 0;
+
+ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
+ acpi_fixed_event_count[i] = 0;
+ }
+
#if (!ACPI_REDUCED_HARDWARE)
- /* GPE support */
+ /* GPE/SCI support */
acpi_gbl_all_gpes_initialized = FALSE;
acpi_gbl_gpe_xrupt_list_head = NULL;
@@ -300,6 +310,7 @@ acpi_status acpi_ut_init_globals(void)
acpi_current_gpe_count = 0;
acpi_gbl_global_event_handler = NULL;
+ acpi_gbl_sci_handler_list = NULL;
#endif /* !ACPI_REDUCED_HARDWARE */
@@ -377,6 +388,11 @@ acpi_status acpi_ut_init_globals(void)
/* Public globals */
ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
+
ACPI_EXPORT_SYMBOL(acpi_dbg_level)
+
ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
+
+ACPI_EXPORT_SYMBOL(acpi_gpe_count)
+
ACPI_EXPORT_SYMBOL(acpi_current_gpe_count)
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index fa69071db418..bfca7b4b6731 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -184,7 +184,7 @@ acpi_ut_execute_SUB(struct acpi_namespace_node *device_node,
sub->length = length;
*return_id = sub;
- cleanup:
+cleanup:
/* On exit, we must delete the return object */
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index aa61f66ee861..517af700399d 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -180,7 +180,7 @@ union acpi_operand_object *acpi_ut_create_package_object(u32 count)
package_elements = ACPI_ALLOCATE_ZEROED(((acpi_size) count +
1) * sizeof(void *));
if (!package_elements) {
- acpi_ut_remove_reference(package_desc);
+ ACPI_FREE(package_desc);
return_PTR(NULL);
}
@@ -356,7 +356,7 @@ u8 acpi_ut_valid_internal_object(void *object)
default:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "%p is not not an ACPI operand obj [%s]\n",
+ "%p is not an ACPI operand obj [%s]\n",
object, acpi_ut_get_descriptor_name(object)));
break;
}
@@ -396,7 +396,6 @@ void *acpi_ut_allocate_object_desc_dbg(const char *module_name,
/* Mark the descriptor type */
- memset(object, 0, sizeof(union acpi_operand_object));
ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_OPERAND);
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p Size %X\n",
@@ -461,25 +460,28 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
ACPI_FUNCTION_TRACE_PTR(ut_get_simple_object_size, internal_object);
+ /* Start with the length of the (external) Acpi object */
+
+ length = sizeof(union acpi_object);
+
+ /* A NULL object is allowed, can be a legal uninitialized package element */
+
+ if (!internal_object) {
/*
- * Handle a null object (Could be a uninitialized package
- * element -- which is legal)
+ * Object is NULL, just return the length of union acpi_object
+ * (A NULL union acpi_object is an object of all zeroes.)
*/
- if (!internal_object) {
- *obj_length = sizeof(union acpi_object);
+ *obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD(length);
return_ACPI_STATUS(AE_OK);
}
- /* Start with the length of the Acpi object */
-
- length = sizeof(union acpi_object);
+ /* A Namespace Node should never appear here */
if (ACPI_GET_DESCRIPTOR_TYPE(internal_object) == ACPI_DESC_TYPE_NAMED) {
- /* Object is a named object (reference), just return the length */
+ /* A namespace node should never get here */
- *obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD(length);
- return_ACPI_STATUS(status);
+ return_ACPI_STATUS(AE_AML_INTERNAL);
}
/*
diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c
index 835340b26d37..eb3aca761369 100644
--- a/drivers/acpi/acpica/utownerid.c
+++ b/drivers/acpi/acpica/utownerid.c
@@ -148,7 +148,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
ACPI_ERROR((AE_INFO,
"Could not allocate new OwnerId (255 max), AE_OWNER_ID_LIMIT"));
- exit:
+exit:
(void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index cb7fa491decf..2c2accb9e534 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -643,7 +643,7 @@ acpi_ut_validate_resource(struct acpi_walk_state *walk_state,
return (AE_OK);
- invalid_resource:
+invalid_resource:
if (walk_state) {
ACPI_ERROR((AE_INFO,
@@ -652,7 +652,7 @@ acpi_ut_validate_resource(struct acpi_walk_state *walk_state,
}
return (AE_AML_INVALID_RESOURCE_TYPE);
- bad_resource_length:
+bad_resource_length:
if (walk_state) {
ACPI_ERROR((AE_INFO,
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index a6b729d4c1dc..03c4c2febd84 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -161,7 +161,6 @@ union acpi_generic_state *acpi_ut_create_generic_state(void)
if (state) {
/* Initialize */
- memset(state, 0, sizeof(union acpi_generic_state));
state->common.descriptor_type = ACPI_DESC_TYPE_STATE;
}
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index cb1e9cc32d5f..45c0eb26b33d 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -310,7 +310,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
/* All done, normal exit */
- all_done:
+all_done:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
ACPI_FORMAT_UINT64(return_value)));
@@ -318,7 +318,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
*ret_integer = return_value;
return_ACPI_STATUS(AE_OK);
- error_exit:
+error_exit:
/* Base was set/validated above */
if (base == 10) {
@@ -584,3 +584,65 @@ void ut_convert_backslashes(char *pathname)
}
}
#endif
+
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
+ *
+ * PARAMETERS: Adds a "DestSize" parameter to each of the standard string
+ * functions. This is the size of the Destination buffer.
+ *
+ * RETURN: TRUE if the operation would overflow the destination buffer.
+ *
+ * DESCRIPTION: Safe versions of standard Clib string functions. Ensure that
+ * the result of the operation will not overflow the output string
+ * buffer.
+ *
+ * NOTE: These functions are typically only helpful for processing
+ * user input and command lines. For most ACPICA code, the
+ * required buffer length is precisely calculated before buffer
+ * allocation, so the use of these functions is unnecessary.
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source)
+{
+
+ if (ACPI_STRLEN(source) >= dest_size) {
+ return (TRUE);
+ }
+
+ ACPI_STRCPY(dest, source);
+ return (FALSE);
+}
+
+u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source)
+{
+
+ if ((ACPI_STRLEN(dest) + ACPI_STRLEN(source)) >= dest_size) {
+ return (TRUE);
+ }
+
+ ACPI_STRCAT(dest, source);
+ return (FALSE);
+}
+
+u8
+acpi_ut_safe_strncat(char *dest,
+ acpi_size dest_size,
+ char *source, acpi_size max_transfer_length)
+{
+ acpi_size actual_transfer_length;
+
+ actual_transfer_length =
+ ACPI_MIN(max_transfer_length, ACPI_STRLEN(source));
+
+ if ((ACPI_STRLEN(dest) + actual_transfer_length) >= dest_size) {
+ return (TRUE);
+ }
+
+ ACPI_STRNCAT(dest, source, max_transfer_length);
+ return (FALSE);
+}
+#endif
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 160f13f4aab5..c0027773cccb 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -130,10 +130,23 @@ void *acpi_ut_allocate_and_track(acpi_size size,
struct acpi_debug_mem_block *allocation;
acpi_status status;
+ /* Check for an inadvertent size of zero bytes */
+
+ if (!size) {
+ ACPI_WARNING((module, line,
+ "Attempt to allocate zero bytes, allocating 1 byte"));
+ size = 1;
+ }
+
allocation =
- acpi_ut_allocate(size + sizeof(struct acpi_debug_mem_header),
- component, module, line);
+ acpi_os_allocate(size + sizeof(struct acpi_debug_mem_header));
if (!allocation) {
+
+ /* Report allocation error */
+
+ ACPI_WARNING((module, line,
+ "Could not allocate size %u", (u32)size));
+
return (NULL);
}
@@ -179,9 +192,17 @@ void *acpi_ut_allocate_zeroed_and_track(acpi_size size,
struct acpi_debug_mem_block *allocation;
acpi_status status;
+ /* Check for an inadvertent size of zero bytes */
+
+ if (!size) {
+ ACPI_WARNING((module, line,
+ "Attempt to allocate zero bytes, allocating 1 byte"));
+ size = 1;
+ }
+
allocation =
- acpi_ut_allocate_zeroed(size + sizeof(struct acpi_debug_mem_header),
- component, module, line);
+ acpi_os_allocate_zeroed(size +
+ sizeof(struct acpi_debug_mem_header));
if (!allocation) {
/* Report allocation error */
@@ -409,7 +430,7 @@ acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation,
element->next = allocation;
}
- unlock_and_exit:
+unlock_and_exit:
status = acpi_ut_release_mutex(ACPI_MTX_MEMORY);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 03a211e6e26a..be322c83643a 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acdebug.h"
@@ -60,7 +61,7 @@ ACPI_MODULE_NAME("utxface")
* DESCRIPTION: Shutdown the ACPICA subsystem and release all resources.
*
******************************************************************************/
-acpi_status acpi_terminate(void)
+acpi_status __init acpi_terminate(void)
{
acpi_status status;
@@ -104,7 +105,7 @@ acpi_status acpi_terminate(void)
return_ACPI_STATUS(status);
}
-ACPI_EXPORT_SYMBOL(acpi_terminate)
+ACPI_EXPORT_SYMBOL_INIT(acpi_terminate)
#ifndef ACPI_ASL_COMPILER
#ifdef ACPI_FUTURE_USAGE
@@ -207,6 +208,44 @@ acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
ACPI_EXPORT_SYMBOL(acpi_get_system_info)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_statistics
+ *
+ * PARAMETERS: stats - Where the statistics are returned
+ *
+ * RETURN: status - the status of the call
+ *
+ * DESCRIPTION: Get the contents of the various system counters
+ *
+ ******************************************************************************/
+acpi_status acpi_get_statistics(struct acpi_statistics *stats)
+{
+ ACPI_FUNCTION_TRACE(acpi_get_statistics);
+
+ /* Parameter validation */
+
+ if (!stats) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Various interrupt-based event counters */
+
+ stats->sci_count = acpi_sci_count;
+ stats->gpe_count = acpi_gpe_count;
+
+ ACPI_MEMCPY(stats->fixed_event_count, acpi_fixed_event_count,
+ sizeof(acpi_fixed_event_count));
+
+ /* Other counters */
+
+ stats->method_count = acpi_method_count;
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_statistics)
+
/*****************************************************************************
*
* FUNCTION: acpi_install_initialization_handler
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index e966a2e47b76..f7edb88f6054 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 41ebaaf8bb1a..75efea0539c1 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acevents.h"
@@ -64,7 +65,7 @@ ACPI_MODULE_NAME("utxfinit")
* called, so any early initialization belongs here.
*
******************************************************************************/
-acpi_status acpi_initialize_subsystem(void)
+acpi_status __init acpi_initialize_subsystem(void)
{
acpi_status status;
@@ -124,7 +125,8 @@ acpi_status acpi_initialize_subsystem(void)
ACPI_DEBUGGER_EXEC(status = acpi_db_initialize());
return_ACPI_STATUS(status);
}
-ACPI_EXPORT_SYMBOL(acpi_initialize_subsystem)
+
+ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_subsystem)
/*******************************************************************************
*
@@ -138,7 +140,7 @@ ACPI_EXPORT_SYMBOL(acpi_initialize_subsystem)
* Puts system into ACPI mode if it isn't already.
*
******************************************************************************/
-acpi_status acpi_enable_subsystem(u32 flags)
+acpi_status __init acpi_enable_subsystem(u32 flags)
{
acpi_status status = AE_OK;
@@ -228,7 +230,8 @@ acpi_status acpi_enable_subsystem(u32 flags)
return_ACPI_STATUS(status);
}
-ACPI_EXPORT_SYMBOL(acpi_enable_subsystem)
+
+ACPI_EXPORT_SYMBOL_INIT(acpi_enable_subsystem)
/*******************************************************************************
*
@@ -242,7 +245,7 @@ ACPI_EXPORT_SYMBOL(acpi_enable_subsystem)
* objects and executing AML code for Regions, buffers, etc.
*
******************************************************************************/
-acpi_status acpi_initialize_objects(u32 flags)
+acpi_status __init acpi_initialize_objects(u32 flags)
{
acpi_status status = AE_OK;
@@ -314,4 +317,5 @@ acpi_status acpi_initialize_objects(u32 flags)
acpi_gbl_startup_flags |= ACPI_INITIALIZED_OK;
return_ACPI_STATUS(status);
}
-ACPI_EXPORT_SYMBOL(acpi_initialize_objects)
+
+ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_objects)
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index f0c1ce95a0ec..786294bb682c 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -2,6 +2,8 @@ config ACPI_APEI
bool "ACPI Platform Error Interface (APEI)"
select MISC_FILESYSTEMS
select PSTORE
+ select EFI
+ select UEFI_CPER
depends on X86
help
APEI allows to report errors (for example from the chipset)
diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile
index d1d1bc0a4ee1..5d575a955940 100644
--- a/drivers/acpi/apei/Makefile
+++ b/drivers/acpi/apei/Makefile
@@ -3,4 +3,4 @@ obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o
obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
obj-$(CONFIG_ACPI_APEI_ERST_DEBUG) += erst-dbg.o
-apei-y := apei-base.o hest.o cper.o erst.o
+apei-y := apei-base.o hest.o erst.o
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 46f80e2c92f7..6d2c49b86b7f 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -758,9 +758,9 @@ int apei_osc_setup(void)
.cap.pointer = capbuf,
};
- capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
- capbuf[OSC_SUPPORT_TYPE] = 1;
- capbuf[OSC_CONTROL_TYPE] = 0;
+ capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
+ capbuf[OSC_SUPPORT_DWORD] = 1;
+ capbuf[OSC_CONTROL_DWORD] = 0;
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
|| ACPI_FAILURE(acpi_run_osc(handle, &context)))
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index f220d642136e..21ba34a73883 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -122,11 +122,11 @@ struct dentry;
struct dentry *apei_get_debugfs_dir(void);
#define apei_estatus_for_each_section(estatus, section) \
- for (section = (struct acpi_hest_generic_data *)(estatus + 1); \
+ for (section = (struct acpi_generic_data *)(estatus + 1); \
(void *)section - (void *)estatus < estatus->data_length; \
section = (void *)(section+1) + section->error_data_length)
-static inline u32 apei_estatus_len(struct acpi_hest_generic_status *estatus)
+static inline u32 cper_estatus_len(struct acpi_generic_status *estatus)
{
if (estatus->raw_data_length)
return estatus->raw_data_offset + \
@@ -135,10 +135,10 @@ static inline u32 apei_estatus_len(struct acpi_hest_generic_status *estatus)
return sizeof(*estatus) + estatus->data_length;
}
-void apei_estatus_print(const char *pfx,
- const struct acpi_hest_generic_status *estatus);
-int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus);
-int apei_estatus_check(const struct acpi_hest_generic_status *estatus);
+void cper_estatus_print(const char *pfx,
+ const struct acpi_generic_status *estatus);
+int cper_estatus_check_header(const struct acpi_generic_status *estatus);
+int cper_estatus_check(const struct acpi_generic_status *estatus);
int apei_osc_setup(void);
#endif
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 8ec37bbdd699..a30bc313787b 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -75,13 +75,13 @@
#define GHES_ESTATUS_CACHE_LEN(estatus_len) \
(sizeof(struct ghes_estatus_cache) + (estatus_len))
#define GHES_ESTATUS_FROM_CACHE(estatus_cache) \
- ((struct acpi_hest_generic_status *) \
+ ((struct acpi_generic_status *) \
((struct ghes_estatus_cache *)(estatus_cache) + 1))
#define GHES_ESTATUS_NODE_LEN(estatus_len) \
(sizeof(struct ghes_estatus_node) + (estatus_len))
-#define GHES_ESTATUS_FROM_NODE(estatus_node) \
- ((struct acpi_hest_generic_status *) \
+#define GHES_ESTATUS_FROM_NODE(estatus_node) \
+ ((struct acpi_generic_status *) \
((struct ghes_estatus_node *)(estatus_node) + 1))
bool ghes_disable;
@@ -378,17 +378,17 @@ static int ghes_read_estatus(struct ghes *ghes, int silent)
ghes->flags |= GHES_TO_CLEAR;
rc = -EIO;
- len = apei_estatus_len(ghes->estatus);
+ len = cper_estatus_len(ghes->estatus);
if (len < sizeof(*ghes->estatus))
goto err_read_block;
if (len > ghes->generic->error_block_length)
goto err_read_block;
- if (apei_estatus_check_header(ghes->estatus))
+ if (cper_estatus_check_header(ghes->estatus))
goto err_read_block;
ghes_copy_tofrom_phys(ghes->estatus + 1,
buf_paddr + sizeof(*ghes->estatus),
len - sizeof(*ghes->estatus), 1);
- if (apei_estatus_check(ghes->estatus))
+ if (cper_estatus_check(ghes->estatus))
goto err_read_block;
rc = 0;
@@ -409,7 +409,7 @@ static void ghes_clear_estatus(struct ghes *ghes)
ghes->flags &= ~GHES_TO_CLEAR;
}
-static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int sev)
+static void ghes_handle_memory_failure(struct acpi_generic_data *gdata, int sev)
{
#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
unsigned long pfn;
@@ -419,7 +419,7 @@ static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int
if (sec_sev == GHES_SEV_CORRECTED &&
(gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED) &&
- (mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS)) {
+ (mem_err->validation_bits & CPER_MEM_VALID_PA)) {
pfn = mem_err->physical_addr >> PAGE_SHIFT;
if (pfn_valid(pfn))
memory_failure_queue(pfn, 0, MF_SOFT_OFFLINE);
@@ -430,7 +430,7 @@ static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int
}
if (sev == GHES_SEV_RECOVERABLE &&
sec_sev == GHES_SEV_RECOVERABLE &&
- mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) {
+ mem_err->validation_bits & CPER_MEM_VALID_PA) {
pfn = mem_err->physical_addr >> PAGE_SHIFT;
memory_failure_queue(pfn, 0, 0);
}
@@ -438,10 +438,10 @@ static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int
}
static void ghes_do_proc(struct ghes *ghes,
- const struct acpi_hest_generic_status *estatus)
+ const struct acpi_generic_status *estatus)
{
int sev, sec_sev;
- struct acpi_hest_generic_data *gdata;
+ struct acpi_generic_data *gdata;
sev = ghes_severity(estatus->error_severity);
apei_estatus_for_each_section(estatus, gdata) {
@@ -496,7 +496,7 @@ static void ghes_do_proc(struct ghes *ghes,
static void __ghes_print_estatus(const char *pfx,
const struct acpi_hest_generic *generic,
- const struct acpi_hest_generic_status *estatus)
+ const struct acpi_generic_status *estatus)
{
static atomic_t seqno;
unsigned int curr_seqno;
@@ -513,12 +513,12 @@ static void __ghes_print_estatus(const char *pfx,
snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
pfx_seq, generic->header.source_id);
- apei_estatus_print(pfx_seq, estatus);
+ cper_estatus_print(pfx_seq, estatus);
}
static int ghes_print_estatus(const char *pfx,
const struct acpi_hest_generic *generic,
- const struct acpi_hest_generic_status *estatus)
+ const struct acpi_generic_status *estatus)
{
/* Not more than 2 messages every 5 seconds */
static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
@@ -540,15 +540,15 @@ static int ghes_print_estatus(const char *pfx,
* GHES error status reporting throttle, to report more kinds of
* errors, instead of just most frequently occurred errors.
*/
-static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
+static int ghes_estatus_cached(struct acpi_generic_status *estatus)
{
u32 len;
int i, cached = 0;
unsigned long long now;
struct ghes_estatus_cache *cache;
- struct acpi_hest_generic_status *cache_estatus;
+ struct acpi_generic_status *cache_estatus;
- len = apei_estatus_len(estatus);
+ len = cper_estatus_len(estatus);
rcu_read_lock();
for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
cache = rcu_dereference(ghes_estatus_caches[i]);
@@ -571,19 +571,19 @@ static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
struct acpi_hest_generic *generic,
- struct acpi_hest_generic_status *estatus)
+ struct acpi_generic_status *estatus)
{
int alloced;
u32 len, cache_len;
struct ghes_estatus_cache *cache;
- struct acpi_hest_generic_status *cache_estatus;
+ struct acpi_generic_status *cache_estatus;
alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
atomic_dec(&ghes_estatus_cache_alloced);
return NULL;
}
- len = apei_estatus_len(estatus);
+ len = cper_estatus_len(estatus);
cache_len = GHES_ESTATUS_CACHE_LEN(len);
cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
if (!cache) {
@@ -603,7 +603,7 @@ static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache)
{
u32 len;
- len = apei_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
+ len = cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
len = GHES_ESTATUS_CACHE_LEN(len);
gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len);
atomic_dec(&ghes_estatus_cache_alloced);
@@ -619,7 +619,7 @@ static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
static void ghes_estatus_cache_add(
struct acpi_hest_generic *generic,
- struct acpi_hest_generic_status *estatus)
+ struct acpi_generic_status *estatus)
{
int i, slot = -1, count;
unsigned long long now, duration, period, max_period = 0;
@@ -751,7 +751,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
struct llist_node *llnode, *next;
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
- struct acpi_hest_generic_status *estatus;
+ struct acpi_generic_status *estatus;
u32 len, node_len;
llnode = llist_del_all(&ghes_estatus_llist);
@@ -765,7 +765,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
estatus_node = llist_entry(llnode, struct ghes_estatus_node,
llnode);
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
- len = apei_estatus_len(estatus);
+ len = cper_estatus_len(estatus);
node_len = GHES_ESTATUS_NODE_LEN(len);
ghes_do_proc(estatus_node->ghes, estatus);
if (!ghes_estatus_cached(estatus)) {
@@ -784,7 +784,7 @@ static void ghes_print_queued_estatus(void)
struct llist_node *llnode;
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
- struct acpi_hest_generic_status *estatus;
+ struct acpi_generic_status *estatus;
u32 len, node_len;
llnode = llist_del_all(&ghes_estatus_llist);
@@ -797,7 +797,7 @@ static void ghes_print_queued_estatus(void)
estatus_node = llist_entry(llnode, struct ghes_estatus_node,
llnode);
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
- len = apei_estatus_len(estatus);
+ len = cper_estatus_len(estatus);
node_len = GHES_ESTATUS_NODE_LEN(len);
generic = estatus_node->generic;
ghes_print_estatus(NULL, generic, estatus);
@@ -843,7 +843,7 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
u32 len, node_len;
struct ghes_estatus_node *estatus_node;
- struct acpi_hest_generic_status *estatus;
+ struct acpi_generic_status *estatus;
#endif
if (!(ghes->flags & GHES_TO_CLEAR))
continue;
@@ -851,7 +851,7 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
if (ghes_estatus_cached(ghes->estatus))
goto next;
/* Save estatus for further processing in IRQ context */
- len = apei_estatus_len(ghes->estatus);
+ len = cper_estatus_len(ghes->estatus);
node_len = GHES_ESTATUS_NODE_LEN(len);
estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool,
node_len);
@@ -923,7 +923,7 @@ static int ghes_probe(struct platform_device *ghes_dev)
rc = -EIO;
if (generic->error_block_length <
- sizeof(struct acpi_hest_generic_status)) {
+ sizeof(struct acpi_generic_status)) {
pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
generic->error_block_length,
generic->header.source_id);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 2c9958cd7a43..fbf1aceda8b8 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -36,12 +36,6 @@
#include <linux/suspend.h>
#include <asm/unaligned.h>
-#ifdef CONFIG_ACPI_PROCFS_POWER
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <asm/uaccess.h>
-#endif
-
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <linux/power_supply.h>
@@ -72,19 +66,6 @@ static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
-#ifdef CONFIG_ACPI_PROCFS_POWER
-extern struct proc_dir_entry *acpi_lock_battery_dir(void);
-extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
-
-enum acpi_battery_files {
- info_tag = 0,
- state_tag,
- alarm_tag,
- ACPI_BATTERY_NUMFILES,
-};
-
-#endif
-
static const struct acpi_device_id battery_device_ids[] = {
{"PNP0C0A", 0},
{"", 0},
@@ -320,14 +301,6 @@ static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
-#ifdef CONFIG_ACPI_PROCFS_POWER
-inline char *acpi_battery_units(struct acpi_battery *battery)
-{
- return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ?
- "mA" : "mW";
-}
-#endif
-
/* --------------------------------------------------------------------------
Battery Management
-------------------------------------------------------------------------- */
@@ -741,279 +714,6 @@ static void acpi_battery_refresh(struct acpi_battery *battery)
}
/* --------------------------------------------------------------------------
- FS Interface (/proc)
- -------------------------------------------------------------------------- */
-
-#ifdef CONFIG_ACPI_PROCFS_POWER
-static struct proc_dir_entry *acpi_battery_dir;
-
-static int acpi_battery_print_info(struct seq_file *seq, int result)
-{
- struct acpi_battery *battery = seq->private;
-
- if (result)
- goto end;
-
- seq_printf(seq, "present: %s\n",
- acpi_battery_present(battery) ? "yes" : "no");
- if (!acpi_battery_present(battery))
- goto end;
- if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "design capacity: unknown\n");
- else
- seq_printf(seq, "design capacity: %d %sh\n",
- battery->design_capacity,
- acpi_battery_units(battery));
-
- if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "last full capacity: unknown\n");
- else
- seq_printf(seq, "last full capacity: %d %sh\n",
- battery->full_charge_capacity,
- acpi_battery_units(battery));
-
- seq_printf(seq, "battery technology: %srechargeable\n",
- (!battery->technology)?"non-":"");
-
- if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "design voltage: unknown\n");
- else
- seq_printf(seq, "design voltage: %d mV\n",
- battery->design_voltage);
- seq_printf(seq, "design capacity warning: %d %sh\n",
- battery->design_capacity_warning,
- acpi_battery_units(battery));
- seq_printf(seq, "design capacity low: %d %sh\n",
- battery->design_capacity_low,
- acpi_battery_units(battery));
- seq_printf(seq, "cycle count: %i\n", battery->cycle_count);
- seq_printf(seq, "capacity granularity 1: %d %sh\n",
- battery->capacity_granularity_1,
- acpi_battery_units(battery));
- seq_printf(seq, "capacity granularity 2: %d %sh\n",
- battery->capacity_granularity_2,
- acpi_battery_units(battery));
- seq_printf(seq, "model number: %s\n", battery->model_number);
- seq_printf(seq, "serial number: %s\n", battery->serial_number);
- seq_printf(seq, "battery type: %s\n", battery->type);
- seq_printf(seq, "OEM info: %s\n", battery->oem_info);
- end:
- if (result)
- seq_printf(seq, "ERROR: Unable to read battery info\n");
- return result;
-}
-
-static int acpi_battery_print_state(struct seq_file *seq, int result)
-{
- struct acpi_battery *battery = seq->private;
-
- if (result)
- goto end;
-
- seq_printf(seq, "present: %s\n",
- acpi_battery_present(battery) ? "yes" : "no");
- if (!acpi_battery_present(battery))
- goto end;
-
- seq_printf(seq, "capacity state: %s\n",
- (battery->state & 0x04) ? "critical" : "ok");
- if ((battery->state & 0x01) && (battery->state & 0x02))
- seq_printf(seq,
- "charging state: charging/discharging\n");
- else if (battery->state & 0x01)
- seq_printf(seq, "charging state: discharging\n");
- else if (battery->state & 0x02)
- seq_printf(seq, "charging state: charging\n");
- else
- seq_printf(seq, "charging state: charged\n");
-
- if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "present rate: unknown\n");
- else
- seq_printf(seq, "present rate: %d %s\n",
- battery->rate_now, acpi_battery_units(battery));
-
- if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "remaining capacity: unknown\n");
- else
- seq_printf(seq, "remaining capacity: %d %sh\n",
- battery->capacity_now, acpi_battery_units(battery));
- if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "present voltage: unknown\n");
- else
- seq_printf(seq, "present voltage: %d mV\n",
- battery->voltage_now);
- end:
- if (result)
- seq_printf(seq, "ERROR: Unable to read battery state\n");
-
- return result;
-}
-
-static int acpi_battery_print_alarm(struct seq_file *seq, int result)
-{
- struct acpi_battery *battery = seq->private;
-
- if (result)
- goto end;
-
- if (!acpi_battery_present(battery)) {
- seq_printf(seq, "present: no\n");
- goto end;
- }
- seq_printf(seq, "alarm: ");
- if (!battery->alarm)
- seq_printf(seq, "unsupported\n");
- else
- seq_printf(seq, "%u %sh\n", battery->alarm,
- acpi_battery_units(battery));
- end:
- if (result)
- seq_printf(seq, "ERROR: Unable to read battery alarm\n");
- return result;
-}
-
-static ssize_t acpi_battery_write_alarm(struct file *file,
- const char __user * buffer,
- size_t count, loff_t * ppos)
-{
- int result = 0;
- char alarm_string[12] = { '\0' };
- struct seq_file *m = file->private_data;
- struct acpi_battery *battery = m->private;
-
- if (!battery || (count > sizeof(alarm_string) - 1))
- return -EINVAL;
- if (!acpi_battery_present(battery)) {
- result = -ENODEV;
- goto end;
- }
- if (copy_from_user(alarm_string, buffer, count)) {
- result = -EFAULT;
- goto end;
- }
- alarm_string[count] = '\0';
- battery->alarm = simple_strtol(alarm_string, NULL, 0);
- result = acpi_battery_set_alarm(battery);
- end:
- if (!result)
- return count;
- return result;
-}
-
-typedef int(*print_func)(struct seq_file *seq, int result);
-
-static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = {
- acpi_battery_print_info,
- acpi_battery_print_state,
- acpi_battery_print_alarm,
-};
-
-static int acpi_battery_read(int fid, struct seq_file *seq)
-{
- struct acpi_battery *battery = seq->private;
- int result = acpi_battery_update(battery);
- return acpi_print_funcs[fid](seq, result);
-}
-
-#define DECLARE_FILE_FUNCTIONS(_name) \
-static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \
-{ \
- return acpi_battery_read(_name##_tag, seq); \
-} \
-static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, acpi_battery_read_##_name, PDE_DATA(inode)); \
-}
-
-DECLARE_FILE_FUNCTIONS(info);
-DECLARE_FILE_FUNCTIONS(state);
-DECLARE_FILE_FUNCTIONS(alarm);
-
-#undef DECLARE_FILE_FUNCTIONS
-
-#define FILE_DESCRIPTION_RO(_name) \
- { \
- .name = __stringify(_name), \
- .mode = S_IRUGO, \
- .ops = { \
- .open = acpi_battery_##_name##_open_fs, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
- .owner = THIS_MODULE, \
- }, \
- }
-
-#define FILE_DESCRIPTION_RW(_name) \
- { \
- .name = __stringify(_name), \
- .mode = S_IFREG | S_IRUGO | S_IWUSR, \
- .ops = { \
- .open = acpi_battery_##_name##_open_fs, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .write = acpi_battery_write_##_name, \
- .release = single_release, \
- .owner = THIS_MODULE, \
- }, \
- }
-
-static const struct battery_file {
- struct file_operations ops;
- umode_t mode;
- const char *name;
-} acpi_battery_file[] = {
- FILE_DESCRIPTION_RO(info),
- FILE_DESCRIPTION_RO(state),
- FILE_DESCRIPTION_RW(alarm),
-};
-
-#undef FILE_DESCRIPTION_RO
-#undef FILE_DESCRIPTION_RW
-
-static int acpi_battery_add_fs(struct acpi_device *device)
-{
- struct proc_dir_entry *entry = NULL;
- int i;
-
- printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
- " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
- if (!acpi_device_dir(device)) {
- acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
- acpi_battery_dir);
- if (!acpi_device_dir(device))
- return -ENODEV;
- }
-
- for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
- entry = proc_create_data(acpi_battery_file[i].name,
- acpi_battery_file[i].mode,
- acpi_device_dir(device),
- &acpi_battery_file[i].ops,
- acpi_driver_data(device));
- if (!entry)
- return -ENODEV;
- }
- return 0;
-}
-
-static void acpi_battery_remove_fs(struct acpi_device *device)
-{
- int i;
- if (!acpi_device_dir(device))
- return;
- for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i)
- remove_proc_entry(acpi_battery_file[i].name,
- acpi_device_dir(device));
-
- remove_proc_entry(acpi_device_bid(device), acpi_battery_dir);
- acpi_device_dir(device) = NULL;
-}
-
-#endif
-
-/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
@@ -1075,15 +775,6 @@ static int acpi_battery_add(struct acpi_device *device)
result = acpi_battery_update(battery);
if (result)
goto fail;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- result = acpi_battery_add_fs(device);
-#endif
- if (result) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_battery_remove_fs(device);
-#endif
- goto fail;
- }
printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
@@ -1110,9 +801,6 @@ static int acpi_battery_remove(struct acpi_device *device)
return -EINVAL;
battery = acpi_driver_data(device);
unregister_pm_notifier(&battery->pm_nb);
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_battery_remove_fs(device);
-#endif
sysfs_remove_battery(battery);
mutex_destroy(&battery->lock);
mutex_destroy(&battery->sysfs_lock);
@@ -1158,18 +846,7 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
{
if (acpi_disabled)
return;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_battery_dir = acpi_lock_battery_dir();
- if (!acpi_battery_dir)
- return;
-#endif
- if (acpi_bus_register_driver(&acpi_battery_driver) < 0) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_unlock_battery_dir(acpi_battery_dir);
-#endif
- return;
- }
- return;
+ acpi_bus_register_driver(&acpi_battery_driver);
}
static int __init acpi_battery_init(void)
@@ -1181,9 +858,6 @@ static int __init acpi_battery_init(void)
static void __exit acpi_battery_exit(void)
{
acpi_bus_unregister_driver(&acpi_battery_driver);
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_unlock_battery_dir(acpi_battery_dir);
-#endif
}
module_init(acpi_battery_init);
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 9515f18898b2..b7fd1aeb6c42 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -274,6 +274,19 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
},
{
+ .callback = dmi_disable_osi_vista,
+ .ident = "Toshiba NB100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "NB100"),
+ },
+ },
+
+ /*
+ * The following machines have broken backlight support when reporting
+ * the Windows 2012 OSI, so disable it until their support is fixed.
+ */
+ {
.callback = dmi_disable_osi_win8,
.ident = "ASUS Zenbook Prime UX31A",
.matches = {
@@ -297,6 +310,54 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"),
},
},
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "ThinkPad Edge E530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "ThinkPad Edge E530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Acer Aspire V5-573G",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Acer Aspire V5-572G",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "ThinkPad T431s",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "ThinkPad T430",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
+ },
+ },
/*
* BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index b587ec8257b2..bba9b72e25f8 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -174,7 +174,7 @@ static void acpi_print_osc_error(acpi_handle handle,
printk("\n");
}
-static acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
+acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
{
int i;
static int opc_map_to_uuid[16] = {6, 4, 2, 0, 11, 9, 16, 14, 19, 21,
@@ -195,6 +195,7 @@ static acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
}
return AE_OK;
}
+EXPORT_SYMBOL_GPL(acpi_str_to_uuid);
acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
{
@@ -255,7 +256,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
acpi_print_osc_error(handle, context,
"_OSC invalid revision");
if (errors & OSC_CAPABILITIES_MASK_ERROR) {
- if (((u32 *)context->cap.pointer)[OSC_QUERY_TYPE]
+ if (((u32 *)context->cap.pointer)[OSC_QUERY_DWORD]
& OSC_QUERY_ENABLE)
goto out_success;
status = AE_SUPPORT;
@@ -295,30 +296,30 @@ static void acpi_bus_osc_support(void)
};
acpi_handle handle;
- capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
- capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
+ capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
+ capbuf[OSC_SUPPORT_DWORD] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
#if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\
defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
- capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT;
#endif
#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
- capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
#endif
#ifdef ACPI_HOTPLUG_OST
- capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_HOTPLUG_OST_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
#endif
if (!ghes_disable)
- capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_APEI_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_APEI_SUPPORT;
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
return;
if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) {
u32 *capbuf_ret = context.ret.pointer;
- if (context.ret.length > OSC_SUPPORT_TYPE)
+ if (context.ret.length > OSC_SUPPORT_DWORD)
osc_sb_apei_support_acked =
- capbuf_ret[OSC_SUPPORT_TYPE] & OSC_SB_APEI_SUPPORT;
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
kfree(context.ret.pointer);
}
/* do we need to check other returned cap? Sounds no */
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index a55773801c5f..c971929d75c2 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -383,18 +383,15 @@ static int acpi_button_add(struct acpi_device *device)
switch (button->type) {
case ACPI_BUTTON_TYPE_POWER:
- input->evbit[0] = BIT_MASK(EV_KEY);
- set_bit(KEY_POWER, input->keybit);
+ input_set_capability(input, EV_KEY, KEY_POWER);
break;
case ACPI_BUTTON_TYPE_SLEEP:
- input->evbit[0] = BIT_MASK(EV_KEY);
- set_bit(KEY_SLEEP, input->keybit);
+ input_set_capability(input, EV_KEY, KEY_SLEEP);
break;
case ACPI_BUTTON_TYPE_LID:
- input->evbit[0] = BIT_MASK(EV_SW);
- set_bit(SW_LID, input->swbit);
+ input_set_capability(input, EV_SW, SW_LID);
break;
}
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
deleted file mode 100644
index 6c9ee68e46fb..000000000000
--- a/drivers/acpi/cm_sbs.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/acpi.h>
-#include <linux/types.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-
-#define PREFIX "ACPI: "
-
-ACPI_MODULE_NAME("cm_sbs");
-#define ACPI_AC_CLASS "ac_adapter"
-#define ACPI_BATTERY_CLASS "battery"
-#define _COMPONENT ACPI_SBS_COMPONENT
-static struct proc_dir_entry *acpi_ac_dir;
-static struct proc_dir_entry *acpi_battery_dir;
-
-static DEFINE_MUTEX(cm_sbs_mutex);
-
-static int lock_ac_dir_cnt;
-static int lock_battery_dir_cnt;
-
-struct proc_dir_entry *acpi_lock_ac_dir(void)
-{
- mutex_lock(&cm_sbs_mutex);
- if (!acpi_ac_dir)
- acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir);
- if (acpi_ac_dir) {
- lock_ac_dir_cnt++;
- } else {
- printk(KERN_ERR PREFIX
- "Cannot create %s\n", ACPI_AC_CLASS);
- }
- mutex_unlock(&cm_sbs_mutex);
- return acpi_ac_dir;
-}
-EXPORT_SYMBOL(acpi_lock_ac_dir);
-
-void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param)
-{
- mutex_lock(&cm_sbs_mutex);
- if (acpi_ac_dir_param)
- lock_ac_dir_cnt--;
- if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) {
- remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir);
- acpi_ac_dir = NULL;
- }
- mutex_unlock(&cm_sbs_mutex);
-}
-EXPORT_SYMBOL(acpi_unlock_ac_dir);
-
-struct proc_dir_entry *acpi_lock_battery_dir(void)
-{
- mutex_lock(&cm_sbs_mutex);
- if (!acpi_battery_dir) {
- acpi_battery_dir =
- proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir);
- }
- if (acpi_battery_dir) {
- lock_battery_dir_cnt++;
- } else {
- printk(KERN_ERR PREFIX
- "Cannot create %s\n", ACPI_BATTERY_CLASS);
- }
- mutex_unlock(&cm_sbs_mutex);
- return acpi_battery_dir;
-}
-EXPORT_SYMBOL(acpi_lock_battery_dir);
-
-void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param)
-{
- mutex_lock(&cm_sbs_mutex);
- if (acpi_battery_dir_param)
- lock_battery_dir_cnt--;
- if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param
- && acpi_battery_dir) {
- remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir);
- acpi_battery_dir = NULL;
- }
- mutex_unlock(&cm_sbs_mutex);
- return;
-}
-EXPORT_SYMBOL(acpi_unlock_battery_dir);
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index a94383d1f350..d42b2fb5a7e9 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -118,9 +118,10 @@ int acpi_device_get_power(struct acpi_device *device, int *state)
/*
* If we were unsure about the device parent's power state up to this
* point, the fact that the device is in D0 implies that the parent has
- * to be in D0 too.
+ * to be in D0 too, except if ignore_parent is set.
*/
- if (device->parent && device->parent->power.state == ACPI_STATE_UNKNOWN
+ if (!device->power.flags.ignore_parent && device->parent
+ && device->parent->power.state == ACPI_STATE_UNKNOWN
&& result == ACPI_STATE_D0)
device->parent->power.state = ACPI_STATE_D0;
@@ -177,7 +178,8 @@ int acpi_device_set_power(struct acpi_device *device, int state)
acpi_power_state_string(state));
return -ENODEV;
}
- if (device->parent && (state < device->parent->power.state)) {
+ if (!device->power.flags.ignore_parent &&
+ device->parent && (state < device->parent->power.state)) {
dev_warn(&device->dev,
"Cannot transition to power state %s for parent in %s\n",
acpi_power_state_string(state),
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 05ea4be01a83..ca86c1ce7c8a 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -441,7 +441,7 @@ static void handle_dock(struct dock_station *ds, int dock)
acpi_status status;
struct acpi_object_list arg_list;
union acpi_object arg;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ unsigned long long value;
acpi_handle_info(ds->handle, "%s\n", dock ? "docking" : "undocking");
@@ -450,12 +450,10 @@ static void handle_dock(struct dock_station *ds, int dock)
arg_list.pointer = &arg;
arg.type = ACPI_TYPE_INTEGER;
arg.integer.value = dock;
- status = acpi_evaluate_object(ds->handle, "_DCK", &arg_list, &buffer);
+ status = acpi_evaluate_integer(ds->handle, "_DCK", &arg_list, &value);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
acpi_handle_err(ds->handle, "Failed to execute _DCK (0x%x)\n",
status);
-
- kfree(buffer.pointer);
}
static inline void dock(struct dock_station *ds)
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index a06d98374705..d5309fd49458 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -28,6 +28,7 @@
/* Uncomment next line to get verbose printout */
/* #define DEBUG */
+#define pr_fmt(fmt) "ACPI : EC: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
@@ -49,9 +50,6 @@
#define ACPI_EC_DEVICE_NAME "Embedded Controller"
#define ACPI_EC_FILE_INFO "info"
-#undef PREFIX
-#define PREFIX "ACPI: EC: "
-
/* EC status register */
#define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
#define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
@@ -131,26 +129,26 @@ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
{
u8 x = inb(ec->command_addr);
- pr_debug(PREFIX "---> status = 0x%2.2x\n", x);
+ pr_debug("---> status = 0x%2.2x\n", x);
return x;
}
static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
{
u8 x = inb(ec->data_addr);
- pr_debug(PREFIX "---> data = 0x%2.2x\n", x);
+ pr_debug("---> data = 0x%2.2x\n", x);
return x;
}
static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
{
- pr_debug(PREFIX "<--- command = 0x%2.2x\n", command);
+ pr_debug("<--- command = 0x%2.2x\n", command);
outb(command, ec->command_addr);
}
static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
{
- pr_debug(PREFIX "<--- data = 0x%2.2x\n", data);
+ pr_debug("<--- data = 0x%2.2x\n", data);
outb(data, ec->data_addr);
}
@@ -241,7 +239,7 @@ static int ec_poll(struct acpi_ec *ec)
}
advance_transaction(ec, acpi_ec_read_status(ec));
} while (time_before(jiffies, delay));
- pr_debug(PREFIX "controller reset, restart transaction\n");
+ pr_debug("controller reset, restart transaction\n");
spin_lock_irqsave(&ec->lock, flags);
start_transaction(ec);
spin_unlock_irqrestore(&ec->lock, flags);
@@ -309,12 +307,12 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
}
}
if (ec_wait_ibf0(ec)) {
- pr_err(PREFIX "input buffer is not empty, "
+ pr_err("input buffer is not empty, "
"aborting transaction\n");
status = -ETIME;
goto end;
}
- pr_debug(PREFIX "transaction start (cmd=0x%02x, addr=0x%02x)\n",
+ pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n",
t->command, t->wdata ? t->wdata[0] : 0);
/* disable GPE during transaction if storm is detected */
if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
@@ -331,12 +329,12 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
/* It is safe to enable the GPE outside of the transaction. */
acpi_enable_gpe(NULL, ec->gpe);
} else if (t->irq_count > ec_storm_threshold) {
- pr_info(PREFIX "GPE storm detected(%d GPEs), "
+ pr_info("GPE storm detected(%d GPEs), "
"transactions will use polling mode\n",
t->irq_count);
set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
}
- pr_debug(PREFIX "transaction end\n");
+ pr_debug("transaction end\n");
end:
if (ec->global_lock)
acpi_release_global_lock(glk);
@@ -570,12 +568,12 @@ static void acpi_ec_run(void *cxt)
struct acpi_ec_query_handler *handler = cxt;
if (!handler)
return;
- pr_debug(PREFIX "start query execution\n");
+ pr_debug("start query execution\n");
if (handler->func)
handler->func(handler->data);
else if (handler->handle)
acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
- pr_debug(PREFIX "stop query execution\n");
+ pr_debug("stop query execution\n");
kfree(handler);
}
@@ -593,7 +591,8 @@ static int acpi_ec_sync_query(struct acpi_ec *ec)
if (!copy)
return -ENOMEM;
memcpy(copy, handler, sizeof(*copy));
- pr_debug(PREFIX "push query execution (0x%2x) on queue\n", value);
+ pr_debug("push query execution (0x%2x) on queue\n",
+ value);
return acpi_os_execute((copy->func) ?
OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
acpi_ec_run, copy);
@@ -616,7 +615,7 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state)
{
if (state & ACPI_EC_FLAG_SCI) {
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
- pr_debug(PREFIX "push gpe query to the queue\n");
+ pr_debug("push gpe query to the queue\n");
return acpi_os_execute(OSL_NOTIFY_HANDLER,
acpi_ec_gpe_query, ec);
}
@@ -630,7 +629,7 @@ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
struct acpi_ec *ec = data;
u8 status = acpi_ec_read_status(ec);
- pr_debug(PREFIX "~~~> interrupt, status:0x%02x\n", status);
+ pr_debug("~~~> interrupt, status:0x%02x\n", status);
advance_transaction(ec, status);
if (ec_transaction_done(ec) &&
@@ -776,7 +775,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
* The AE_NOT_FOUND error will be ignored and OS
* continue to initialize EC.
*/
- printk(KERN_ERR "Fail in evaluating the _REG object"
+ pr_err("Fail in evaluating the _REG object"
" of EC device. Broken bios is suspected.\n");
} else {
acpi_remove_gpe_handler(NULL, ec->gpe,
@@ -795,10 +794,10 @@ static void ec_remove_handlers(struct acpi_ec *ec)
acpi_disable_gpe(NULL, ec->gpe);
if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
- pr_err(PREFIX "failed to remove space handler\n");
+ pr_err("failed to remove space handler\n");
if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
&acpi_ec_gpe_handler)))
- pr_err(PREFIX "failed to remove gpe handler\n");
+ pr_err("failed to remove gpe handler\n");
clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
}
@@ -840,7 +839,7 @@ static int acpi_ec_add(struct acpi_device *device)
ret = !!request_region(ec->command_addr, 1, "EC cmd");
WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
- pr_info(PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
+ pr_info("GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
ec->gpe, ec->command_addr, ec->data_addr);
ret = ec_install_handlers(ec);
@@ -931,7 +930,7 @@ static int ec_validate_ecdt(const struct dmi_system_id *id)
/* MSI EC needs special treatment, enable it */
static int ec_flag_msi(const struct dmi_system_id *id)
{
- printk(KERN_DEBUG PREFIX "Detected MSI hardware, enabling workarounds.\n");
+ pr_debug("Detected MSI hardware, enabling workarounds.\n");
EC_FLAGS_MSI = 1;
EC_FLAGS_VALIDATE_ECDT = 1;
return 0;
@@ -1010,7 +1009,7 @@ int __init acpi_ec_ecdt_probe(void)
status = acpi_get_table(ACPI_SIG_ECDT, 1,
(struct acpi_table_header **)&ecdt_ptr);
if (ACPI_SUCCESS(status)) {
- pr_info(PREFIX "EC description table is found, configuring boot EC\n");
+ pr_info("EC description table is found, configuring boot EC\n");
boot_ec->command_addr = ecdt_ptr->control.address;
boot_ec->data_addr = ecdt_ptr->data.address;
boot_ec->gpe = ecdt_ptr->gpe;
@@ -1030,7 +1029,7 @@ int __init acpi_ec_ecdt_probe(void)
/* This workaround is needed only on some broken machines,
* which require early EC, but fail to provide ECDT */
- printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
+ pr_debug("Look up EC in DSDT\n");
status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
boot_ec, NULL);
/* Check that acpi_get_devices actually find something */
@@ -1042,7 +1041,7 @@ int __init acpi_ec_ecdt_probe(void)
saved_ec->data_addr != boot_ec->data_addr ||
saved_ec->gpe != boot_ec->gpe ||
saved_ec->handle != boot_ec->handle)
- pr_info(PREFIX "ASUSTek keeps feeding us with broken "
+ pr_info("ASUSTek keeps feeding us with broken "
"ECDT tables, which are very hard to workaround. "
"Trying to use DSDT EC info instead. Please send "
"output of acpidump to linux-acpi@vger.kernel.org\n");
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 41ade6570bc0..ba3da88cee45 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -168,7 +168,7 @@ static int acpi_fan_add(struct acpi_device *device)
acpi_device_name(device), acpi_device_bid(device),
!device->power.state ? "on" : "off");
- end:
+end:
return result;
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 20f423337e1f..e9304dc7ebfa 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -169,9 +169,7 @@ int acpi_create_platform_device(struct acpi_device *adev,
Video
-------------------------------------------------------------------------- */
#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
-bool acpi_video_backlight_quirks(void);
-#else
-static inline bool acpi_video_backlight_quirks(void) { return false; }
+bool acpi_osi_is_win8(void);
#endif
#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 2e82e5d76930..a2343a1d9e0b 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -73,7 +73,7 @@ int acpi_map_pxm_to_node(int pxm)
{
int node = pxm_to_node_map[pxm];
- if (node < 0) {
+ if (node == NUMA_NO_NODE) {
if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
return NUMA_NO_NODE;
node = first_unset_node(nodes_found_map);
@@ -334,7 +334,7 @@ int acpi_get_pxm(acpi_handle h)
int acpi_get_node(acpi_handle *handle)
{
- int pxm, node = -1;
+ int pxm, node = NUMA_NO_NODE;
pxm = acpi_get_pxm(handle);
if (pxm >= 0 && pxm < MAX_PXM_DOMAINS)
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index e5f416c7f66e..a0c09adf7e7d 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -569,8 +569,10 @@ static const char * const table_sigs[] = {
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
-/* Must not increase 10 or needs code modification below */
-#define ACPI_OVERRIDE_TABLES 10
+#define ACPI_OVERRIDE_TABLES 64
+static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES];
+
+#define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT)
void __init acpi_initrd_override(void *data, size_t size)
{
@@ -579,8 +581,6 @@ void __init acpi_initrd_override(void *data, size_t size)
struct acpi_table_header *table;
char cpio_path[32] = "kernel/firmware/acpi/";
struct cpio_data file;
- struct cpio_data early_initrd_files[ACPI_OVERRIDE_TABLES];
- char *p;
if (data == NULL || size == 0)
return;
@@ -625,8 +625,8 @@ void __init acpi_initrd_override(void *data, size_t size)
table->signature, cpio_path, file.name, table->length);
all_tables_size += table->length;
- early_initrd_files[table_nr].data = file.data;
- early_initrd_files[table_nr].size = file.size;
+ acpi_initrd_files[table_nr].data = file.data;
+ acpi_initrd_files[table_nr].size = file.size;
table_nr++;
}
if (table_nr == 0)
@@ -652,14 +652,34 @@ void __init acpi_initrd_override(void *data, size_t size)
memblock_reserve(acpi_tables_addr, all_tables_size);
arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
- p = early_ioremap(acpi_tables_addr, all_tables_size);
-
+ /*
+ * early_ioremap only can remap 256k one time. If we map all
+ * tables one time, we will hit the limit. Need to map chunks
+ * one by one during copying the same as that in relocate_initrd().
+ */
for (no = 0; no < table_nr; no++) {
- memcpy(p + total_offset, early_initrd_files[no].data,
- early_initrd_files[no].size);
- total_offset += early_initrd_files[no].size;
+ unsigned char *src_p = acpi_initrd_files[no].data;
+ phys_addr_t size = acpi_initrd_files[no].size;
+ phys_addr_t dest_addr = acpi_tables_addr + total_offset;
+ phys_addr_t slop, clen;
+ char *dest_p;
+
+ total_offset += size;
+
+ while (size) {
+ slop = dest_addr & ~PAGE_MASK;
+ clen = size;
+ if (clen > MAP_CHUNK_SIZE - slop)
+ clen = MAP_CHUNK_SIZE - slop;
+ dest_p = early_ioremap(dest_addr & PAGE_MASK,
+ clen + slop);
+ memcpy(dest_p + slop, src_p, clen);
+ early_iounmap(dest_p, clen + slop);
+ src_p += clen;
+ dest_addr += clen;
+ size -= clen;
+ }
}
- early_iounmap(p, all_tables_size);
}
#endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
@@ -820,7 +840,7 @@ acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
void acpi_os_sleep(u64 ms)
{
- schedule_timeout_interruptible(msecs_to_jiffies(ms));
+ msleep(ms);
}
void acpi_os_stall(u32 us)
@@ -1335,7 +1355,7 @@ static int __init acpi_os_name_setup(char *str)
if (!str || !*str)
return 0;
- for (; count-- && str && *str; str++) {
+ for (; count-- && *str; str++) {
if (isalnum(*str) || *str == ' ' || *str == ':')
*p++ = *str;
else if (*str == '\'' || *str == '"')
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index d3874f425653..924ad92852c1 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -49,10 +49,10 @@ static int acpi_pci_root_add(struct acpi_device *device,
const struct acpi_device_id *not_used);
static void acpi_pci_root_remove(struct acpi_device *device);
-#define ACPI_PCIE_REQ_SUPPORT (OSC_EXT_PCI_CONFIG_SUPPORT \
- | OSC_ACTIVE_STATE_PWR_SUPPORT \
- | OSC_CLOCK_PWR_CAPABILITY_SUPPORT \
- | OSC_MSI_SUPPORT)
+#define ACPI_PCIE_REQ_SUPPORT (OSC_PCI_EXT_CONFIG_SUPPORT \
+ | OSC_PCI_ASPM_SUPPORT \
+ | OSC_PCI_CLOCK_PM_SUPPORT \
+ | OSC_PCI_MSI_SUPPORT)
static const struct acpi_device_id root_device_ids[] = {
{"PNP0A03", 0},
@@ -127,6 +127,55 @@ static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
return AE_OK;
}
+struct pci_osc_bit_struct {
+ u32 bit;
+ char *desc;
+};
+
+static struct pci_osc_bit_struct pci_osc_support_bit[] = {
+ { OSC_PCI_EXT_CONFIG_SUPPORT, "ExtendedConfig" },
+ { OSC_PCI_ASPM_SUPPORT, "ASPM" },
+ { OSC_PCI_CLOCK_PM_SUPPORT, "ClockPM" },
+ { OSC_PCI_SEGMENT_GROUPS_SUPPORT, "Segments" },
+ { OSC_PCI_MSI_SUPPORT, "MSI" },
+};
+
+static struct pci_osc_bit_struct pci_osc_control_bit[] = {
+ { OSC_PCI_EXPRESS_NATIVE_HP_CONTROL, "PCIeHotplug" },
+ { OSC_PCI_SHPC_NATIVE_HP_CONTROL, "SHPCHotplug" },
+ { OSC_PCI_EXPRESS_PME_CONTROL, "PME" },
+ { OSC_PCI_EXPRESS_AER_CONTROL, "AER" },
+ { OSC_PCI_EXPRESS_CAPABILITY_CONTROL, "PCIeCapability" },
+};
+
+static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word,
+ struct pci_osc_bit_struct *table, int size)
+{
+ char buf[80];
+ int i, len = 0;
+ struct pci_osc_bit_struct *entry;
+
+ buf[0] = '\0';
+ for (i = 0, entry = table; i < size; i++, entry++)
+ if (word & entry->bit)
+ len += snprintf(buf + len, sizeof(buf) - len, "%s%s",
+ len ? " " : "", entry->desc);
+
+ dev_info(&root->device->dev, "_OSC: %s [%s]\n", msg, buf);
+}
+
+static void decode_osc_support(struct acpi_pci_root *root, char *msg, u32 word)
+{
+ decode_osc_bits(root, msg, word, pci_osc_support_bit,
+ ARRAY_SIZE(pci_osc_support_bit));
+}
+
+static void decode_osc_control(struct acpi_pci_root *root, char *msg, u32 word)
+{
+ decode_osc_bits(root, msg, word, pci_osc_control_bit,
+ ARRAY_SIZE(pci_osc_control_bit));
+}
+
static u8 pci_osc_uuid_str[] = "33DB4D5B-1FF7-401C-9657-7441C03DD766";
static acpi_status acpi_pci_run_osc(acpi_handle handle,
@@ -158,14 +207,14 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
support &= OSC_PCI_SUPPORT_MASKS;
support |= root->osc_support_set;
- capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
- capbuf[OSC_SUPPORT_TYPE] = support;
+ capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
+ capbuf[OSC_SUPPORT_DWORD] = support;
if (control) {
*control &= OSC_PCI_CONTROL_MASKS;
- capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
+ capbuf[OSC_CONTROL_DWORD] = *control | root->osc_control_set;
} else {
/* Run _OSC query only with existing controls. */
- capbuf[OSC_CONTROL_TYPE] = root->osc_control_set;
+ capbuf[OSC_CONTROL_DWORD] = root->osc_control_set;
}
status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
@@ -180,11 +229,7 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
{
acpi_status status;
- acpi_handle tmp;
- status = acpi_get_handle(root->device->handle, "_OSC", &tmp);
- if (ACPI_FAILURE(status))
- return status;
mutex_lock(&osc_lock);
status = acpi_pci_query_osc(root, flags, NULL);
mutex_unlock(&osc_lock);
@@ -316,9 +361,8 @@ EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
{
struct acpi_pci_root *root;
- acpi_status status;
+ acpi_status status = AE_OK;
u32 ctrl, capbuf[3];
- acpi_handle tmp;
if (!mask)
return AE_BAD_PARAMETER;
@@ -331,10 +375,6 @@ acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
if (!root)
return AE_NOT_EXIST;
- status = acpi_get_handle(handle, "_OSC", &tmp);
- if (ACPI_FAILURE(status))
- return status;
-
mutex_lock(&osc_lock);
*mask = ctrl | root->osc_control_set;
@@ -349,17 +389,21 @@ acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
goto out;
if (ctrl == *mask)
break;
+ decode_osc_control(root, "platform does not support",
+ ctrl & ~(*mask));
ctrl = *mask;
}
if ((ctrl & req) != req) {
+ decode_osc_control(root, "not requesting control; platform does not support",
+ req & ~(ctrl));
status = AE_SUPPORT;
goto out;
}
- capbuf[OSC_QUERY_TYPE] = 0;
- capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set;
- capbuf[OSC_CONTROL_TYPE] = ctrl;
+ capbuf[OSC_QUERY_DWORD] = 0;
+ capbuf[OSC_SUPPORT_DWORD] = root->osc_support_set;
+ capbuf[OSC_CONTROL_DWORD] = ctrl;
status = acpi_pci_run_osc(handle, capbuf, mask);
if (ACPI_SUCCESS(status))
root->osc_control_set = *mask;
@@ -369,6 +413,87 @@ out:
}
EXPORT_SYMBOL(acpi_pci_osc_control_set);
+static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
+ int *clear_aspm)
+{
+ u32 support, control, requested;
+ acpi_status status;
+ struct acpi_device *device = root->device;
+ acpi_handle handle = device->handle;
+
+ /*
+ * All supported architectures that use ACPI have support for
+ * PCI domains, so we indicate this in _OSC support capabilities.
+ */
+ support = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
+ if (pci_ext_cfg_avail())
+ support |= OSC_PCI_EXT_CONFIG_SUPPORT;
+ if (pcie_aspm_support_enabled())
+ support |= OSC_PCI_ASPM_SUPPORT | OSC_PCI_CLOCK_PM_SUPPORT;
+ if (pci_msi_enabled())
+ support |= OSC_PCI_MSI_SUPPORT;
+
+ decode_osc_support(root, "OS supports", support);
+ status = acpi_pci_osc_support(root, support);
+ if (ACPI_FAILURE(status)) {
+ dev_info(&device->dev, "_OSC failed (%s); disabling ASPM\n",
+ acpi_format_exception(status));
+ *no_aspm = 1;
+ return;
+ }
+
+ if (pcie_ports_disabled) {
+ dev_info(&device->dev, "PCIe port services disabled; not requesting _OSC control\n");
+ return;
+ }
+
+ if ((support & ACPI_PCIE_REQ_SUPPORT) != ACPI_PCIE_REQ_SUPPORT) {
+ decode_osc_support(root, "not requesting OS control; OS requires",
+ ACPI_PCIE_REQ_SUPPORT);
+ return;
+ }
+
+ control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL
+ | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
+ | OSC_PCI_EXPRESS_PME_CONTROL;
+
+ if (pci_aer_available()) {
+ if (aer_acpi_firmware_first())
+ dev_info(&device->dev,
+ "PCIe AER handled by firmware\n");
+ else
+ control |= OSC_PCI_EXPRESS_AER_CONTROL;
+ }
+
+ requested = control;
+ status = acpi_pci_osc_control_set(handle, &control,
+ OSC_PCI_EXPRESS_CAPABILITY_CONTROL);
+ if (ACPI_SUCCESS(status)) {
+ decode_osc_control(root, "OS now controls", control);
+ if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
+ /*
+ * We have ASPM control, but the FADT indicates
+ * that it's unsupported. Clear it.
+ */
+ *clear_aspm = 1;
+ }
+ } else {
+ decode_osc_control(root, "OS requested", requested);
+ decode_osc_control(root, "platform willing to grant", control);
+ dev_info(&device->dev, "_OSC failed (%s); disabling ASPM\n",
+ acpi_format_exception(status));
+
+ /*
+ * We want to disable ASPM here, but aspm_disabled
+ * needs to remain in its state from boot so that we
+ * properly handle PCIe 1.1 devices. So we set this
+ * flag here, to defer the action until after the ACPI
+ * root scan.
+ */
+ *no_aspm = 1;
+ }
+}
+
static int acpi_pci_root_add(struct acpi_device *device,
const struct acpi_device_id *not_used)
{
@@ -376,9 +501,8 @@ static int acpi_pci_root_add(struct acpi_device *device,
acpi_status status;
int result;
struct acpi_pci_root *root;
- u32 flags, base_flags;
acpi_handle handle = device->handle;
- bool no_aspm = false, clear_aspm = false;
+ int no_aspm = 0, clear_aspm = 0;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
@@ -431,81 +555,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
root->mcfg_addr = acpi_pci_root_get_mcfg_addr(handle);
- /*
- * All supported architectures that use ACPI have support for
- * PCI domains, so we indicate this in _OSC support capabilities.
- */
- flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
- acpi_pci_osc_support(root, flags);
-
- if (pci_ext_cfg_avail())
- flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
- if (pcie_aspm_support_enabled()) {
- flags |= OSC_ACTIVE_STATE_PWR_SUPPORT |
- OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
- }
- if (pci_msi_enabled())
- flags |= OSC_MSI_SUPPORT;
- if (flags != base_flags) {
- status = acpi_pci_osc_support(root, flags);
- if (ACPI_FAILURE(status)) {
- dev_info(&device->dev, "ACPI _OSC support "
- "notification failed, disabling PCIe ASPM\n");
- no_aspm = true;
- flags = base_flags;
- }
- }
-
- if (!pcie_ports_disabled
- && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
- flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
- | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
- | OSC_PCI_EXPRESS_PME_CONTROL;
-
- if (pci_aer_available()) {
- if (aer_acpi_firmware_first())
- dev_dbg(&device->dev,
- "PCIe errors handled by BIOS.\n");
- else
- flags |= OSC_PCI_EXPRESS_AER_CONTROL;
- }
-
- dev_info(&device->dev,
- "Requesting ACPI _OSC control (0x%02x)\n", flags);
-
- status = acpi_pci_osc_control_set(handle, &flags,
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
- if (ACPI_SUCCESS(status)) {
- dev_info(&device->dev,
- "ACPI _OSC control (0x%02x) granted\n", flags);
- if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
- /*
- * We have ASPM control, but the FADT indicates
- * that it's unsupported. Clear it.
- */
- clear_aspm = true;
- }
- } else {
- dev_info(&device->dev,
- "ACPI _OSC request failed (%s), "
- "returned control mask: 0x%02x\n",
- acpi_format_exception(status), flags);
- dev_info(&device->dev,
- "ACPI _OSC control for PCIe not granted, disabling ASPM\n");
- /*
- * We want to disable ASPM here, but aspm_disabled
- * needs to remain in its state from boot so that we
- * properly handle PCIe 1.1 devices. So we set this
- * flag here, to defer the action until after the ACPI
- * root scan.
- */
- no_aspm = true;
- }
- } else {
- dev_info(&device->dev,
- "Unable to request _OSC control "
- "(_OSC support mask: 0x%02x)\n", flags);
- }
+ negotiate_os_control(root, &no_aspm, &clear_aspm);
/*
* TBD: Need PCI interface for enumeration/configuration of roots.
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 04a13784dd20..6a5b152ad4d0 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -8,289 +8,17 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
-#ifdef CONFIG_X86
-#include <linux/mc146818rtc.h>
-#endif
-
#include "sleep.h"
#define _COMPONENT ACPI_SYSTEM_COMPONENT
/*
* this file provides support for:
- * /proc/acpi/alarm
* /proc/acpi/wakeup
*/
ACPI_MODULE_NAME("sleep")
-#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || !defined(CONFIG_X86)
-/* use /sys/class/rtc/rtcX/wakealarm instead; it's not ACPI-specific */
-#else
-#define HAVE_ACPI_LEGACY_ALARM
-#endif
-
-#ifdef HAVE_ACPI_LEGACY_ALARM
-
-static u32 cmos_bcd_read(int offset, int rtc_control);
-
-static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
-{
- u32 sec, min, hr;
- u32 day, mo, yr, cent = 0;
- u32 today = 0;
- unsigned char rtc_control = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&rtc_lock, flags);
-
- rtc_control = CMOS_READ(RTC_CONTROL);
- sec = cmos_bcd_read(RTC_SECONDS_ALARM, rtc_control);
- min = cmos_bcd_read(RTC_MINUTES_ALARM, rtc_control);
- hr = cmos_bcd_read(RTC_HOURS_ALARM, rtc_control);
-
- /* If we ever get an FACP with proper values... */
- if (acpi_gbl_FADT.day_alarm) {
- /* ACPI spec: only low 6 its should be cared */
- day = CMOS_READ(acpi_gbl_FADT.day_alarm) & 0x3F;
- if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- day = bcd2bin(day);
- } else
- day = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
- if (acpi_gbl_FADT.month_alarm)
- mo = cmos_bcd_read(acpi_gbl_FADT.month_alarm, rtc_control);
- else {
- mo = cmos_bcd_read(RTC_MONTH, rtc_control);
- today = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
- }
- if (acpi_gbl_FADT.century)
- cent = cmos_bcd_read(acpi_gbl_FADT.century, rtc_control);
-
- yr = cmos_bcd_read(RTC_YEAR, rtc_control);
-
- spin_unlock_irqrestore(&rtc_lock, flags);
-
- /* we're trusting the FADT (see above) */
- if (!acpi_gbl_FADT.century)
- /* If we're not trusting the FADT, we should at least make it
- * right for _this_ century... ehm, what is _this_ century?
- *
- * TBD:
- * ASAP: find piece of code in the kernel, e.g. star tracker driver,
- * which we can trust to determine the century correctly. Atom
- * watch driver would be nice, too...
- *
- * if that has not happened, change for first release in 2050:
- * if (yr<50)
- * yr += 2100;
- * else
- * yr += 2000; // current line of code
- *
- * if that has not happened either, please do on 2099/12/31:23:59:59
- * s/2000/2100
- *
- */
- yr += 2000;
- else
- yr += cent * 100;
-
- /*
- * Show correct dates for alarms up to a month into the future.
- * This solves issues for nearly all situations with the common
- * 30-day alarm clocks in PC hardware.
- */
- if (day < today) {
- if (mo < 12) {
- mo += 1;
- } else {
- mo = 1;
- yr += 1;
- }
- }
-
- seq_printf(seq, "%4.4u-", yr);
- (mo > 12) ? seq_puts(seq, "**-") : seq_printf(seq, "%2.2u-", mo);
- (day > 31) ? seq_puts(seq, "** ") : seq_printf(seq, "%2.2u ", day);
- (hr > 23) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", hr);
- (min > 59) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", min);
- (sec > 59) ? seq_puts(seq, "**\n") : seq_printf(seq, "%2.2u\n", sec);
-
- return 0;
-}
-
-static int acpi_system_alarm_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_system_alarm_seq_show, PDE_DATA(inode));
-}
-
-static int get_date_field(char **p, u32 * value)
-{
- char *next = NULL;
- char *string_end = NULL;
- int result = -EINVAL;
-
- /*
- * Try to find delimeter, only to insert null. The end of the
- * string won't have one, but is still valid.
- */
- if (*p == NULL)
- return result;
-
- next = strpbrk(*p, "- :");
- if (next)
- *next++ = '\0';
-
- *value = simple_strtoul(*p, &string_end, 10);
-
- /* Signal success if we got a good digit */
- if (string_end != *p)
- result = 0;
-
- if (next)
- *p = next;
- else
- *p = NULL;
-
- return result;
-}
-
-/* Read a possibly BCD register, always return binary */
-static u32 cmos_bcd_read(int offset, int rtc_control)
-{
- u32 val = CMOS_READ(offset);
- if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- val = bcd2bin(val);
- return val;
-}
-
-/* Write binary value into possibly BCD register */
-static void cmos_bcd_write(u32 val, int offset, int rtc_control)
-{
- if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- val = bin2bcd(val);
- CMOS_WRITE(val, offset);
-}
-
-static ssize_t
-acpi_system_write_alarm(struct file *file,
- const char __user * buffer, size_t count, loff_t * ppos)
-{
- int result = 0;
- char alarm_string[30] = { '\0' };
- char *p = alarm_string;
- u32 sec, min, hr, day, mo, yr;
- int adjust = 0;
- unsigned char rtc_control = 0;
-
- if (count > sizeof(alarm_string) - 1)
- return -EINVAL;
-
- if (copy_from_user(alarm_string, buffer, count))
- return -EFAULT;
-
- alarm_string[count] = '\0';
-
- /* check for time adjustment */
- if (alarm_string[0] == '+') {
- p++;
- adjust = 1;
- }
-
- if ((result = get_date_field(&p, &yr)))
- goto end;
- if ((result = get_date_field(&p, &mo)))
- goto end;
- if ((result = get_date_field(&p, &day)))
- goto end;
- if ((result = get_date_field(&p, &hr)))
- goto end;
- if ((result = get_date_field(&p, &min)))
- goto end;
- if ((result = get_date_field(&p, &sec)))
- goto end;
-
- spin_lock_irq(&rtc_lock);
-
- rtc_control = CMOS_READ(RTC_CONTROL);
-
- if (adjust) {
- yr += cmos_bcd_read(RTC_YEAR, rtc_control);
- mo += cmos_bcd_read(RTC_MONTH, rtc_control);
- day += cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
- hr += cmos_bcd_read(RTC_HOURS, rtc_control);
- min += cmos_bcd_read(RTC_MINUTES, rtc_control);
- sec += cmos_bcd_read(RTC_SECONDS, rtc_control);
- }
-
- spin_unlock_irq(&rtc_lock);
-
- if (sec > 59) {
- min += sec/60;
- sec = sec%60;
- }
- if (min > 59) {
- hr += min/60;
- min = min%60;
- }
- if (hr > 23) {
- day += hr/24;
- hr = hr%24;
- }
- if (day > 31) {
- mo += day/32;
- day = day%32;
- }
- if (mo > 12) {
- yr += mo/13;
- mo = mo%13;
- }
-
- spin_lock_irq(&rtc_lock);
- /*
- * Disable alarm interrupt before setting alarm timer or else
- * when ACPI_EVENT_RTC is enabled, a spurious ACPI interrupt occurs
- */
- rtc_control &= ~RTC_AIE;
- CMOS_WRITE(rtc_control, RTC_CONTROL);
- CMOS_READ(RTC_INTR_FLAGS);
-
- /* write the fields the rtc knows about */
- cmos_bcd_write(hr, RTC_HOURS_ALARM, rtc_control);
- cmos_bcd_write(min, RTC_MINUTES_ALARM, rtc_control);
- cmos_bcd_write(sec, RTC_SECONDS_ALARM, rtc_control);
-
- /*
- * If the system supports an enhanced alarm it will have non-zero
- * offsets into the CMOS RAM here -- which for some reason are pointing
- * to the RTC area of memory.
- */
- if (acpi_gbl_FADT.day_alarm)
- cmos_bcd_write(day, acpi_gbl_FADT.day_alarm, rtc_control);
- if (acpi_gbl_FADT.month_alarm)
- cmos_bcd_write(mo, acpi_gbl_FADT.month_alarm, rtc_control);
- if (acpi_gbl_FADT.century) {
- if (adjust)
- yr += cmos_bcd_read(acpi_gbl_FADT.century, rtc_control) * 100;
- cmos_bcd_write(yr / 100, acpi_gbl_FADT.century, rtc_control);
- }
- /* enable the rtc alarm interrupt */
- rtc_control |= RTC_AIE;
- CMOS_WRITE(rtc_control, RTC_CONTROL);
- CMOS_READ(RTC_INTR_FLAGS);
-
- spin_unlock_irq(&rtc_lock);
-
- acpi_clear_event(ACPI_EVENT_RTC);
- acpi_enable_event(ACPI_EVENT_RTC, 0);
-
- *ppos += count;
-
- result = 0;
- end:
- return result ? result : count;
-}
-#endif /* HAVE_ACPI_LEGACY_ALARM */
-
static int
acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
{
@@ -417,41 +145,8 @@ static const struct file_operations acpi_system_wakeup_device_fops = {
.release = single_release,
};
-#ifdef HAVE_ACPI_LEGACY_ALARM
-static const struct file_operations acpi_system_alarm_fops = {
- .owner = THIS_MODULE,
- .open = acpi_system_alarm_open_fs,
- .read = seq_read,
- .write = acpi_system_write_alarm,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static u32 rtc_handler(void *context)
-{
- acpi_clear_event(ACPI_EVENT_RTC);
- acpi_disable_event(ACPI_EVENT_RTC, 0);
-
- return ACPI_INTERRUPT_HANDLED;
-}
-#endif /* HAVE_ACPI_LEGACY_ALARM */
-
int __init acpi_sleep_proc_init(void)
{
-#ifdef HAVE_ACPI_LEGACY_ALARM
- /* 'alarm' [R/W] */
- proc_create("alarm", S_IFREG | S_IRUGO | S_IWUSR,
- acpi_root_dir, &acpi_system_alarm_fops);
-
- acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
- /*
- * Disable the RTC event after installing RTC handler.
- * Only when RTC alarm is set will it be enabled.
- */
- acpi_clear_event(ACPI_EVENT_RTC);
- acpi_disable_event(ACPI_EVENT_RTC, 0);
-#endif /* HAVE_ACPI_LEGACY_ALARM */
-
/* 'wakeup device' [R/W] */
proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
acpi_root_dir, &acpi_system_wakeup_device_fops);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index cf34d903f4fb..b3171f30b319 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -162,16 +162,23 @@ exit:
return apic_id;
}
-int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
+int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id)
{
-#ifdef CONFIG_SMP
- int i;
-#endif
- int apic_id = -1;
+ int apic_id;
apic_id = map_mat_entry(handle, type, acpi_id);
if (apic_id == -1)
apic_id = map_madt_entry(type, acpi_id);
+
+ return apic_id;
+}
+
+int acpi_map_cpuid(int apic_id, u32 acpi_id)
+{
+#ifdef CONFIG_SMP
+ int i;
+#endif
+
if (apic_id == -1) {
/*
* On UP processor, there is no _MAT or MADT table.
@@ -211,6 +218,15 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
#endif
return -1;
}
+
+int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
+{
+ int apic_id;
+
+ apic_id = acpi_get_apicid(handle, type, acpi_id);
+
+ return acpi_map_cpuid(apic_id, acpi_id);
+}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
static bool __init processor_physically_present(acpi_handle handle)
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index e534ba66d5b8..146ab7e2b81d 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -153,8 +153,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block __refdata acpi_cpu_notifier =
-{
+static struct notifier_block __refdata acpi_cpu_notifier = {
.notifier_call = acpi_cpu_soft_notify,
};
@@ -172,7 +171,6 @@ static int __acpi_processor_start(struct acpi_device *device)
#ifdef CONFIG_CPU_FREQ
acpi_processor_ppc_has_changed(pr, 0);
- acpi_processor_load_module(pr);
#endif
acpi_processor_get_throttling_info(pr);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f98dd00b51a9..644516d9bde6 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -119,17 +119,10 @@ static struct dmi_system_id processor_power_dmi_table[] = {
*/
static void acpi_safe_halt(void)
{
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we
- * test NEED_RESCHED:
- */
- smp_mb();
- if (!need_resched()) {
+ if (!tif_need_resched()) {
safe_halt();
local_irq_disable();
}
- current_thread_info()->status |= TS_POLLING;
}
#ifdef ARCH_APICTIMER_STOPS_ON_C3
@@ -272,9 +265,6 @@ static void tsc_check_state(int state) { return; }
static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
{
- if (!pr)
- return -EINVAL;
-
if (!pr->pblk)
return -ENODEV;
@@ -737,6 +727,11 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
if (unlikely(!pr))
return -EINVAL;
+ if (cx->entry_method == ACPI_CSTATE_FFH) {
+ if (current_set_polling_and_test())
+ return -EINVAL;
+ }
+
lapic_timer_state_broadcast(pr, cx, 1);
acpi_idle_do_entry(cx);
@@ -790,18 +785,9 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
if (unlikely(!pr))
return -EINVAL;
- if (cx->entry_method != ACPI_CSTATE_FFH) {
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we test
- * NEED_RESCHED:
- */
- smp_mb();
-
- if (unlikely(need_resched())) {
- current_thread_info()->status |= TS_POLLING;
+ if (cx->entry_method == ACPI_CSTATE_FFH) {
+ if (current_set_polling_and_test())
return -EINVAL;
- }
}
/*
@@ -819,9 +805,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
sched_clock_idle_wakeup_event(0);
- if (cx->entry_method != ACPI_CSTATE_FFH)
- current_thread_info()->status |= TS_POLLING;
-
lapic_timer_state_broadcast(pr, cx, 0);
return index;
}
@@ -858,18 +841,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
}
}
- if (cx->entry_method != ACPI_CSTATE_FFH) {
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we test
- * NEED_RESCHED:
- */
- smp_mb();
-
- if (unlikely(need_resched())) {
- current_thread_info()->status |= TS_POLLING;
+ if (cx->entry_method == ACPI_CSTATE_FFH) {
+ if (current_set_polling_and_test())
return -EINVAL;
- }
}
acpi_unlazy_tlb(smp_processor_id());
@@ -915,9 +889,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
sched_clock_idle_wakeup_event(0);
- if (cx->entry_method != ACPI_CSTATE_FFH)
- current_thread_info()->status |= TS_POLLING;
-
lapic_timer_state_broadcast(pr, cx, 0);
return index;
}
@@ -1076,12 +1047,8 @@ int acpi_processor_hotplug(struct acpi_processor *pr)
if (disabled_by_idle_boot_param())
return 0;
- if (!pr)
- return -EINVAL;
-
- if (nocst) {
+ if (nocst)
return -ENODEV;
- }
if (!pr->flags.power_setup_done)
return -ENODEV;
@@ -1108,9 +1075,6 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
if (disabled_by_idle_boot_param())
return 0;
- if (!pr)
- return -EINVAL;
-
if (nocst)
return -ENODEV;
@@ -1183,9 +1147,6 @@ int acpi_processor_power_init(struct acpi_processor *pr)
first_run++;
}
- if (!pr)
- return -EINVAL;
-
if (acpi_gbl_FADT.cst_control && !nocst) {
status =
acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 51d7948611da..60a7c28fc167 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -235,28 +235,6 @@ void acpi_processor_ppc_exit(void)
acpi_processor_ppc_status &= ~PPC_REGISTERED;
}
-/*
- * Do a quick check if the systems looks like it should use ACPI
- * cpufreq. We look at a _PCT method being available, but don't
- * do a whole lot of sanity checks.
- */
-void acpi_processor_load_module(struct acpi_processor *pr)
-{
- static int requested;
- acpi_status status = 0;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-
- if (!arch_has_acpi_pdc() || requested)
- return;
- status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
- if (!ACPI_FAILURE(status)) {
- printk(KERN_INFO PREFIX "Requesting acpi_cpufreq\n");
- request_module_nowait("acpi_cpufreq");
- requested = 1;
- }
- kfree(buffer.pointer);
-}
-
static int acpi_processor_get_performance_control(struct acpi_processor *pr)
{
int result = 0;
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index aef7e1cd1e5d..d465ae6cdd00 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -30,12 +30,6 @@
#include <linux/moduleparam.h>
#include <linux/kernel.h>
-#ifdef CONFIG_ACPI_PROCFS_POWER
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <asm/uaccess.h>
-#endif
-
#include <linux/acpi.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
@@ -67,11 +61,6 @@ static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
-extern struct proc_dir_entry *acpi_lock_ac_dir(void);
-extern struct proc_dir_entry *acpi_lock_battery_dir(void);
-extern void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
-extern void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
-
#define MAX_SBS_BAT 4
#define ACPI_SBS_BLOCK_MAX 32
@@ -84,9 +73,6 @@ MODULE_DEVICE_TABLE(acpi, sbs_device_ids);
struct acpi_battery {
struct power_supply bat;
struct acpi_sbs *sbs;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- struct proc_dir_entry *proc_entry;
-#endif
unsigned long update_time;
char name[8];
char manufacturer_name[ACPI_SBS_BLOCK_MAX];
@@ -119,9 +105,6 @@ struct acpi_sbs {
struct acpi_device *device;
struct acpi_smb_hc *hc;
struct mutex lock;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- struct proc_dir_entry *charger_entry;
-#endif
struct acpi_battery battery[MAX_SBS_BAT];
u8 batteries_supported:4;
u8 manager_present:1;
@@ -482,261 +465,6 @@ static struct device_attribute alarm_attr = {
};
/* --------------------------------------------------------------------------
- FS Interface (/proc/acpi)
- -------------------------------------------------------------------------- */
-
-#ifdef CONFIG_ACPI_PROCFS_POWER
-/* Generic Routines */
-static int
-acpi_sbs_add_fs(struct proc_dir_entry **dir,
- struct proc_dir_entry *parent_dir,
- char *dir_name,
- const struct file_operations *info_fops,
- const struct file_operations *state_fops,
- const struct file_operations *alarm_fops, void *data)
-{
- printk(KERN_WARNING PREFIX "Deprecated procfs I/F for SBS is loaded,"
- " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
- if (!*dir) {
- *dir = proc_mkdir(dir_name, parent_dir);
- if (!*dir) {
- return -ENODEV;
- }
- }
-
- /* 'info' [R] */
- if (info_fops)
- proc_create_data(ACPI_SBS_FILE_INFO, S_IRUGO, *dir,
- info_fops, data);
-
- /* 'state' [R] */
- if (state_fops)
- proc_create_data(ACPI_SBS_FILE_STATE, S_IRUGO, *dir,
- state_fops, data);
-
- /* 'alarm' [R/W] */
- if (alarm_fops)
- proc_create_data(ACPI_SBS_FILE_ALARM, S_IRUGO, *dir,
- alarm_fops, data);
- return 0;
-}
-
-/* Smart Battery Interface */
-static struct proc_dir_entry *acpi_battery_dir = NULL;
-
-static inline char *acpi_battery_units(struct acpi_battery *battery)
-{
- return acpi_battery_mode(battery) ? " mW" : " mA";
-}
-
-
-static int acpi_battery_read_info(struct seq_file *seq, void *offset)
-{
- struct acpi_battery *battery = seq->private;
- struct acpi_sbs *sbs = battery->sbs;
- int result = 0;
-
- mutex_lock(&sbs->lock);
-
- seq_printf(seq, "present: %s\n",
- (battery->present) ? "yes" : "no");
- if (!battery->present)
- goto end;
-
- seq_printf(seq, "design capacity: %i%sh\n",
- battery->design_capacity * acpi_battery_scale(battery),
- acpi_battery_units(battery));
- seq_printf(seq, "last full capacity: %i%sh\n",
- battery->full_charge_capacity * acpi_battery_scale(battery),
- acpi_battery_units(battery));
- seq_printf(seq, "battery technology: rechargeable\n");
- seq_printf(seq, "design voltage: %i mV\n",
- battery->design_voltage * acpi_battery_vscale(battery));
- seq_printf(seq, "design capacity warning: unknown\n");
- seq_printf(seq, "design capacity low: unknown\n");
- seq_printf(seq, "cycle count: %i\n", battery->cycle_count);
- seq_printf(seq, "capacity granularity 1: unknown\n");
- seq_printf(seq, "capacity granularity 2: unknown\n");
- seq_printf(seq, "model number: %s\n", battery->device_name);
- seq_printf(seq, "serial number: %i\n",
- battery->serial_number);
- seq_printf(seq, "battery type: %s\n",
- battery->device_chemistry);
- seq_printf(seq, "OEM info: %s\n",
- battery->manufacturer_name);
- end:
- mutex_unlock(&sbs->lock);
- return result;
-}
-
-static int acpi_battery_info_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_battery_read_info, PDE_DATA(inode));
-}
-
-static int acpi_battery_read_state(struct seq_file *seq, void *offset)
-{
- struct acpi_battery *battery = seq->private;
- struct acpi_sbs *sbs = battery->sbs;
- int rate;
-
- mutex_lock(&sbs->lock);
- seq_printf(seq, "present: %s\n",
- (battery->present) ? "yes" : "no");
- if (!battery->present)
- goto end;
-
- acpi_battery_get_state(battery);
- seq_printf(seq, "capacity state: %s\n",
- (battery->state & 0x0010) ? "critical" : "ok");
- seq_printf(seq, "charging state: %s\n",
- (battery->rate_now < 0) ? "discharging" :
- ((battery->rate_now > 0) ? "charging" : "charged"));
- rate = abs(battery->rate_now) * acpi_battery_ipscale(battery);
- rate *= (acpi_battery_mode(battery))?(battery->voltage_now *
- acpi_battery_vscale(battery)/1000):1;
- seq_printf(seq, "present rate: %d%s\n", rate,
- acpi_battery_units(battery));
- seq_printf(seq, "remaining capacity: %i%sh\n",
- battery->capacity_now * acpi_battery_scale(battery),
- acpi_battery_units(battery));
- seq_printf(seq, "present voltage: %i mV\n",
- battery->voltage_now * acpi_battery_vscale(battery));
-
- end:
- mutex_unlock(&sbs->lock);
- return 0;
-}
-
-static int acpi_battery_state_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_battery_read_state, PDE_DATA(inode));
-}
-
-static int acpi_battery_read_alarm(struct seq_file *seq, void *offset)
-{
- struct acpi_battery *battery = seq->private;
- struct acpi_sbs *sbs = battery->sbs;
- int result = 0;
-
- mutex_lock(&sbs->lock);
-
- if (!battery->present) {
- seq_printf(seq, "present: no\n");
- goto end;
- }
-
- acpi_battery_get_alarm(battery);
- seq_printf(seq, "alarm: ");
- if (battery->alarm_capacity)
- seq_printf(seq, "%i%sh\n",
- battery->alarm_capacity *
- acpi_battery_scale(battery),
- acpi_battery_units(battery));
- else
- seq_printf(seq, "disabled\n");
- end:
- mutex_unlock(&sbs->lock);
- return result;
-}
-
-static ssize_t
-acpi_battery_write_alarm(struct file *file, const char __user * buffer,
- size_t count, loff_t * ppos)
-{
- struct seq_file *seq = file->private_data;
- struct acpi_battery *battery = seq->private;
- struct acpi_sbs *sbs = battery->sbs;
- char alarm_string[12] = { '\0' };
- int result = 0;
- mutex_lock(&sbs->lock);
- if (!battery->present) {
- result = -ENODEV;
- goto end;
- }
- if (count > sizeof(alarm_string) - 1) {
- result = -EINVAL;
- goto end;
- }
- if (copy_from_user(alarm_string, buffer, count)) {
- result = -EFAULT;
- goto end;
- }
- alarm_string[count] = 0;
- battery->alarm_capacity = simple_strtoul(alarm_string, NULL, 0) /
- acpi_battery_scale(battery);
- acpi_battery_set_alarm(battery);
- end:
- mutex_unlock(&sbs->lock);
- if (result)
- return result;
- return count;
-}
-
-static int acpi_battery_alarm_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_battery_read_alarm, PDE_DATA(inode));
-}
-
-static const struct file_operations acpi_battery_info_fops = {
- .open = acpi_battery_info_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static const struct file_operations acpi_battery_state_fops = {
- .open = acpi_battery_state_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static const struct file_operations acpi_battery_alarm_fops = {
- .open = acpi_battery_alarm_open_fs,
- .read = seq_read,
- .write = acpi_battery_write_alarm,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-/* Legacy AC Adapter Interface */
-
-static struct proc_dir_entry *acpi_ac_dir = NULL;
-
-static int acpi_ac_read_state(struct seq_file *seq, void *offset)
-{
-
- struct acpi_sbs *sbs = seq->private;
-
- mutex_lock(&sbs->lock);
-
- seq_printf(seq, "state: %s\n",
- sbs->charger_present ? "on-line" : "off-line");
-
- mutex_unlock(&sbs->lock);
- return 0;
-}
-
-static int acpi_ac_state_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_ac_read_state, PDE_DATA(inode));
-}
-
-static const struct file_operations acpi_ac_state_fops = {
- .open = acpi_ac_state_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-#endif
-
-/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
static int acpi_battery_read(struct acpi_battery *battery)
@@ -781,12 +509,6 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
return result;
sprintf(battery->name, ACPI_BATTERY_DIR_NAME, id);
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_sbs_add_fs(&battery->proc_entry, acpi_battery_dir,
- battery->name, &acpi_battery_info_fops,
- &acpi_battery_state_fops, &acpi_battery_alarm_fops,
- battery);
-#endif
battery->bat.name = battery->name;
battery->bat.type = POWER_SUPPLY_TYPE_BATTERY;
if (!acpi_battery_mode(battery)) {
@@ -822,10 +544,6 @@ static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
device_remove_file(battery->bat.dev, &alarm_attr);
power_supply_unregister(&battery->bat);
}
-#ifdef CONFIG_ACPI_PROCFS_POWER
- proc_remove(battery->proc_entry);
- battery->proc_entry = NULL;
-#endif
}
static int acpi_charger_add(struct acpi_sbs *sbs)
@@ -835,13 +553,7 @@ static int acpi_charger_add(struct acpi_sbs *sbs)
result = acpi_ac_get_present(sbs);
if (result)
goto end;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- result = acpi_sbs_add_fs(&sbs->charger_entry, acpi_ac_dir,
- ACPI_AC_DIR_NAME, NULL,
- &acpi_ac_state_fops, NULL, sbs);
- if (result)
- goto end;
-#endif
+
sbs->charger.name = "sbs-charger";
sbs->charger.type = POWER_SUPPLY_TYPE_MAINS;
sbs->charger.properties = sbs_ac_props;
@@ -859,10 +571,6 @@ static void acpi_charger_remove(struct acpi_sbs *sbs)
{
if (sbs->charger.dev)
power_supply_unregister(&sbs->charger);
-#ifdef CONFIG_ACPI_PROCFS_POWER
- proc_remove(sbs->charger_entry);
- sbs->charger_entry = NULL;
-#endif
}
static void acpi_sbs_callback(void *context)
@@ -950,20 +658,6 @@ static int acpi_sbs_remove(struct acpi_device *device)
return 0;
}
-static void acpi_sbs_rmdirs(void)
-{
-#ifdef CONFIG_ACPI_PROCFS_POWER
- if (acpi_ac_dir) {
- acpi_unlock_ac_dir(acpi_ac_dir);
- acpi_ac_dir = NULL;
- }
- if (acpi_battery_dir) {
- acpi_unlock_battery_dir(acpi_battery_dir);
- acpi_battery_dir = NULL;
- }
-#endif
-}
-
#ifdef CONFIG_PM_SLEEP
static int acpi_sbs_resume(struct device *dev)
{
@@ -995,28 +689,17 @@ static int __init acpi_sbs_init(void)
if (acpi_disabled)
return -ENODEV;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_ac_dir = acpi_lock_ac_dir();
- if (!acpi_ac_dir)
- return -ENODEV;
- acpi_battery_dir = acpi_lock_battery_dir();
- if (!acpi_battery_dir) {
- acpi_sbs_rmdirs();
- return -ENODEV;
- }
-#endif
+
result = acpi_bus_register_driver(&acpi_sbs_driver);
- if (result < 0) {
- acpi_sbs_rmdirs();
+ if (result < 0)
return -ENODEV;
- }
+
return 0;
}
static void __exit acpi_sbs_exit(void)
{
acpi_bus_unregister_driver(&acpi_sbs_driver);
- acpi_sbs_rmdirs();
return;
}
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 05306a59aedc..db5293650f62 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -564,6 +564,7 @@ static ssize_t counter_set(struct kobject *kobj,
acpi_event_status status;
acpi_handle handle;
int result = 0;
+ unsigned long tmp;
if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
int i;
@@ -596,8 +597,10 @@ static ssize_t counter_set(struct kobject *kobj,
else if (!strcmp(buf, "clear\n") &&
(status & ACPI_EVENT_FLAG_SET))
result = acpi_clear_gpe(handle, index);
+ else if (!kstrtoul(buf, 0, &tmp))
+ all_counters[index].count = tmp;
else
- all_counters[index].count = strtoul(buf, NULL, 0);
+ result = -EINVAL;
} else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
int event = index - num_gpes;
if (!strcmp(buf, "disable\n") &&
@@ -609,8 +612,10 @@ static ssize_t counter_set(struct kobject *kobj,
else if (!strcmp(buf, "clear\n") &&
(status & ACPI_EVENT_FLAG_SET))
result = acpi_clear_event(event);
+ else if (!kstrtoul(buf, 0, &tmp))
+ all_counters[index].count = tmp;
else
- all_counters[index].count = strtoul(buf, NULL, 0);
+ result = -EINVAL;
} else
all_counters[index].count = strtoul(buf, NULL, 0);
@@ -762,13 +767,8 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
if (!hotplug_kobj)
goto err_out;
- kobject_init(&hotplug->kobj, &acpi_hotplug_profile_ktype);
- error = kobject_set_name(&hotplug->kobj, "%s", name);
- if (error)
- goto err_out;
-
- hotplug->kobj.parent = hotplug_kobj;
- error = kobject_add(&hotplug->kobj, hotplug_kobj, NULL);
+ error = kobject_init_and_add(&hotplug->kobj,
+ &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
if (error)
goto err_out;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 6a0329340b42..0d9f46b5ae6d 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -299,8 +299,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"No critical threshold\n"));
} else if (tmp <= 2732) {
- printk(KERN_WARNING FW_BUG "Invalid critical threshold "
- "(%llu)\n", tmp);
+ pr_warn(FW_BUG "Invalid critical threshold (%llu)\n",
+ tmp);
tz->trips.critical.flags.valid = 0;
} else {
tz->trips.critical.flags.valid = 1;
@@ -317,8 +317,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
* Allow override critical threshold
*/
if (crt_k > tz->trips.critical.temperature)
- printk(KERN_WARNING PREFIX
- "Critical threshold %d C\n", crt);
+ pr_warn(PREFIX "Critical threshold %d C\n",
+ crt);
tz->trips.critical.temperature = crt_k;
}
}
@@ -390,8 +390,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
status = acpi_evaluate_reference(tz->device->handle, "_PSL",
NULL, &devices);
if (ACPI_FAILURE(status)) {
- printk(KERN_WARNING PREFIX
- "Invalid passive threshold\n");
+ pr_warn(PREFIX "Invalid passive threshold\n");
tz->trips.passive.flags.valid = 0;
}
else
@@ -453,8 +452,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
status = acpi_evaluate_reference(tz->device->handle,
name, NULL, &devices);
if (ACPI_FAILURE(status)) {
- printk(KERN_WARNING PREFIX
- "Invalid active%d threshold\n", i);
+ pr_warn(PREFIX "Invalid active%d threshold\n",
+ i);
tz->trips.active[i].flags.valid = 0;
}
else
@@ -505,7 +504,7 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
valid |= tz->trips.active[i].flags.valid;
if (!valid) {
- printk(KERN_WARNING FW_BUG "No valid trip found\n");
+ pr_warn(FW_BUG "No valid trip found\n");
return -ENODEV;
}
return 0;
@@ -515,10 +514,9 @@ static void acpi_thermal_check(void *data)
{
struct acpi_thermal *tz = data;
- if (!tz->tz_enabled) {
- pr_warn("thermal zone is disabled \n");
+ if (!tz->tz_enabled)
return;
- }
+
thermal_zone_device_update(tz->thermal_zone);
}
@@ -570,9 +568,10 @@ static int thermal_set_mode(struct thermal_zone_device *thermal,
*/
if (mode == THERMAL_DEVICE_ENABLED)
enable = 1;
- else if (mode == THERMAL_DEVICE_DISABLED)
+ else if (mode == THERMAL_DEVICE_DISABLED) {
enable = 0;
- else
+ pr_warn("thermal zone will be disabled\n");
+ } else
return -EINVAL;
if (enable != tz->tz_enabled) {
@@ -923,8 +922,7 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
acpi_bus_private_data_handler,
tz->thermal_zone);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Error attaching device data\n");
+ pr_err(PREFIX "Error attaching device data\n");
return -ENODEV;
}
@@ -1094,9 +1092,8 @@ static int acpi_thermal_add(struct acpi_device *device)
if (result)
goto free_memory;
- printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n",
- acpi_device_name(device), acpi_device_bid(device),
- KELVIN_TO_CELSIUS(tz->temperature));
+ pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
+ acpi_device_bid(device), KELVIN_TO_CELSIUS(tz->temperature));
goto end;
free_memory:
@@ -1159,24 +1156,24 @@ static int acpi_thermal_resume(struct device *dev)
static int thermal_act(const struct dmi_system_id *d) {
if (act == 0) {
- printk(KERN_NOTICE "ACPI: %s detected: "
- "disabling all active thermal trip points\n", d->ident);
+ pr_notice(PREFIX "%s detected: "
+ "disabling all active thermal trip points\n", d->ident);
act = -1;
}
return 0;
}
static int thermal_nocrt(const struct dmi_system_id *d) {
- printk(KERN_NOTICE "ACPI: %s detected: "
- "disabling all critical thermal trip point actions.\n", d->ident);
+ pr_notice(PREFIX "%s detected: "
+ "disabling all critical thermal trip point actions.\n", d->ident);
nocrt = 1;
return 0;
}
static int thermal_tzp(const struct dmi_system_id *d) {
if (tzp == 0) {
- printk(KERN_NOTICE "ACPI: %s detected: "
- "enabling thermal zone polling\n", d->ident);
+ pr_notice(PREFIX "%s detected: "
+ "enabling thermal zone polling\n", d->ident);
tzp = 300; /* 300 dS = 30 Seconds */
}
return 0;
@@ -1184,8 +1181,8 @@ static int thermal_tzp(const struct dmi_system_id *d) {
static int thermal_psv(const struct dmi_system_id *d) {
if (psv == 0) {
- printk(KERN_NOTICE "ACPI: %s detected: "
- "disabling all passive thermal trip points\n", d->ident);
+ pr_notice(PREFIX "%s detected: "
+ "disabling all passive thermal trip points\n", d->ident);
psv = -1;
}
return 0;
@@ -1238,7 +1235,7 @@ static int __init acpi_thermal_init(void)
dmi_check_system(thermal_dmi_table);
if (off) {
- printk(KERN_NOTICE "ACPI: thermal control disabled\n");
+ pr_notice(PREFIX "thermal control disabled\n");
return -ENODEV;
}
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 552248b0005b..6d408bfbbb1d 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -121,7 +121,7 @@ acpi_extract_package(union acpi_object *package,
break;
default:
printk(KERN_WARNING PREFIX "Invalid package element"
- " [%d]: got number, expecing"
+ " [%d]: got number, expecting"
" [%c]\n",
i, format_string[i]);
return AE_BAD_DATA;
@@ -148,7 +148,7 @@ acpi_extract_package(union acpi_object *package,
default:
printk(KERN_WARNING PREFIX "Invalid package element"
" [%d] got string/buffer,"
- " expecing [%c]\n",
+ " expecting [%c]\n",
i, format_string[i]);
return AE_BAD_DATA;
break;
@@ -169,11 +169,20 @@ acpi_extract_package(union acpi_object *package,
/*
* Validate output buffer.
*/
- if (buffer->length < size_required) {
+ if (buffer->length == ACPI_ALLOCATE_BUFFER) {
+ buffer->pointer = ACPI_ALLOCATE(size_required);
+ if (!buffer->pointer)
+ return AE_NO_MEMORY;
buffer->length = size_required;
- return AE_BUFFER_OVERFLOW;
- } else if (buffer->length != size_required || !buffer->pointer) {
- return AE_BAD_PARAMETER;
+ memset(buffer->pointer, 0, size_required);
+ } else {
+ if (buffer->length < size_required) {
+ buffer->length = size_required;
+ return AE_BUFFER_OVERFLOW;
+ } else if (buffer->length != size_required ||
+ !buffer->pointer) {
+ return AE_BAD_PARAMETER;
+ }
}
head = buffer->pointer;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index aebcf6355df4..38c3a28d6392 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -88,7 +88,16 @@ module_param(allow_duplicates, bool, 0644);
static bool use_bios_initial_backlight = 1;
module_param(use_bios_initial_backlight, bool, 0644);
+/*
+ * For Windows 8 systems: if set ture and the GPU driver has
+ * registered a backlight interface, skip registering ACPI video's.
+ */
+static bool use_native_backlight = false;
+module_param(use_native_backlight, bool, 0644);
+
static int register_count;
+static struct mutex video_list_lock;
+static struct list_head video_bus_head;
static int acpi_video_bus_add(struct acpi_device *device);
static int acpi_video_bus_remove(struct acpi_device *device);
static void acpi_video_bus_notify(struct acpi_device *device, u32 event);
@@ -157,6 +166,7 @@ struct acpi_video_bus {
struct acpi_video_bus_flags flags;
struct list_head video_device_list;
struct mutex device_list_lock; /* protects video_device_list */
+ struct list_head entry;
struct input_dev *input;
char phys[32]; /* for input device */
struct notifier_block pm_nb;
@@ -229,6 +239,14 @@ static int acpi_video_get_next_level(struct acpi_video_device *device,
static int acpi_video_switch_brightness(struct acpi_video_device *device,
int event);
+static bool acpi_video_verify_backlight_support(void)
+{
+ if (acpi_osi_is_win8() && use_native_backlight &&
+ backlight_device_registered(BACKLIGHT_RAW))
+ return false;
+ return acpi_video_backlight_support();
+}
+
/* backlight device sysfs support */
static int acpi_video_get_brightness(struct backlight_device *bd)
{
@@ -486,6 +504,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion m4 Notebook PC"),
},
},
+ {
+ .callback = video_ignore_initial_backlight,
+ .ident = "HP 250 G1",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP 250 G1 Notebook PC"),
+ },
+ },
{}
};
@@ -884,79 +910,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
if (acpi_has_method(device->dev->handle, "_DDC"))
device->cap._DDC = 1;
-
- if (acpi_video_backlight_support()) {
- struct backlight_properties props;
- struct pci_dev *pdev;
- acpi_handle acpi_parent;
- struct device *parent = NULL;
- int result;
- static int count;
- char *name;
-
- result = acpi_video_init_brightness(device);
- if (result)
- return;
- name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
- if (!name)
- return;
- count++;
-
- acpi_get_parent(device->dev->handle, &acpi_parent);
-
- pdev = acpi_get_pci_dev(acpi_parent);
- if (pdev) {
- parent = &pdev->dev;
- pci_dev_put(pdev);
- }
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_FIRMWARE;
- props.max_brightness = device->brightness->count - 3;
- device->backlight = backlight_device_register(name,
- parent,
- device,
- &acpi_backlight_ops,
- &props);
- kfree(name);
- if (IS_ERR(device->backlight))
- return;
-
- /*
- * Save current brightness level in case we have to restore it
- * before acpi_video_device_lcd_set_level() is called next time.
- */
- device->backlight->props.brightness =
- acpi_video_get_brightness(device->backlight);
-
- device->cooling_dev = thermal_cooling_device_register("LCD",
- device->dev, &video_cooling_ops);
- if (IS_ERR(device->cooling_dev)) {
- /*
- * Set cooling_dev to NULL so we don't crash trying to
- * free it.
- * Also, why the hell we are returning early and
- * not attempt to register video output if cooling
- * device registration failed?
- * -- dtor
- */
- device->cooling_dev = NULL;
- return;
- }
-
- dev_info(&device->dev->dev, "registered as cooling_device%d\n",
- device->cooling_dev->id);
- result = sysfs_create_link(&device->dev->dev.kobj,
- &device->cooling_dev->device.kobj,
- "thermal_cooling");
- if (result)
- printk(KERN_ERR PREFIX "Create sysfs link\n");
- result = sysfs_create_link(&device->cooling_dev->device.kobj,
- &device->dev->dev.kobj, "device");
- if (result)
- printk(KERN_ERR PREFIX "Create sysfs link\n");
-
- }
}
/*
@@ -1143,13 +1096,6 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
acpi_video_device_bind(video, data);
acpi_video_device_find_cap(data);
- status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
- acpi_video_device_notify, data);
- if (ACPI_FAILURE(status))
- dev_err(&device->dev, "Error installing notify handler\n");
- else
- data->flags.notify = 1;
-
mutex_lock(&video->device_list_lock);
list_add_tail(&data->entry, &video->video_device_list);
mutex_unlock(&video->device_list_lock);
@@ -1333,8 +1279,8 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event)
unsigned long long level_current, level_next;
int result = -EINVAL;
- /* no warning message if acpi_backlight=vendor is used */
- if (!acpi_video_backlight_support())
+ /* no warning message if acpi_backlight=vendor or a quirk is used */
+ if (!acpi_video_verify_backlight_support())
return 0;
if (!device->brightness)
@@ -1454,64 +1400,6 @@ acpi_video_bus_get_devices(struct acpi_video_bus *video,
return status;
}
-static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
-{
- acpi_status status;
-
- if (!device || !device->video)
- return -ENOENT;
-
- if (device->flags.notify) {
- status = acpi_remove_notify_handler(device->dev->handle,
- ACPI_DEVICE_NOTIFY, acpi_video_device_notify);
- if (ACPI_FAILURE(status))
- dev_err(&device->dev->dev,
- "Can't remove video notify handler\n");
- }
-
- if (device->backlight) {
- backlight_device_unregister(device->backlight);
- device->backlight = NULL;
- }
- if (device->cooling_dev) {
- sysfs_remove_link(&device->dev->dev.kobj,
- "thermal_cooling");
- sysfs_remove_link(&device->cooling_dev->device.kobj,
- "device");
- thermal_cooling_device_unregister(device->cooling_dev);
- device->cooling_dev = NULL;
- }
-
- return 0;
-}
-
-static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
-{
- int status;
- struct acpi_video_device *dev, *next;
-
- mutex_lock(&video->device_list_lock);
-
- list_for_each_entry_safe(dev, next, &video->video_device_list, entry) {
-
- status = acpi_video_bus_put_one_device(dev);
- if (ACPI_FAILURE(status))
- printk(KERN_WARNING PREFIX
- "hhuuhhuu bug in acpi video driver.\n");
-
- if (dev->brightness) {
- kfree(dev->brightness->levels);
- kfree(dev->brightness);
- }
- list_del(&dev->entry);
- kfree(dev);
- }
-
- mutex_unlock(&video->device_list_lock);
-
- return 0;
-}
-
/* acpi_video interface */
/*
@@ -1521,13 +1409,13 @@ static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
static int acpi_video_bus_start_devices(struct acpi_video_bus *video)
{
return acpi_video_bus_DOS(video, 0,
- acpi_video_backlight_quirks() ? 1 : 0);
+ acpi_osi_is_win8() ? 1 : 0);
}
static int acpi_video_bus_stop_devices(struct acpi_video_bus *video)
{
return acpi_video_bus_DOS(video, 0,
- acpi_video_backlight_quirks() ? 0 : 1);
+ acpi_osi_is_win8() ? 0 : 1);
}
static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
@@ -1536,7 +1424,7 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
struct input_dev *input;
int keycode = 0;
- if (!video)
+ if (!video || !video->input)
return;
input = video->input;
@@ -1691,12 +1579,236 @@ acpi_video_bus_match(acpi_handle handle, u32 level, void *context,
return AE_OK;
}
+static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
+{
+ if (acpi_video_verify_backlight_support()) {
+ struct backlight_properties props;
+ struct pci_dev *pdev;
+ acpi_handle acpi_parent;
+ struct device *parent = NULL;
+ int result;
+ static int count;
+ char *name;
+
+ result = acpi_video_init_brightness(device);
+ if (result)
+ return;
+ name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
+ if (!name)
+ return;
+ count++;
+
+ acpi_get_parent(device->dev->handle, &acpi_parent);
+
+ pdev = acpi_get_pci_dev(acpi_parent);
+ if (pdev) {
+ parent = &pdev->dev;
+ pci_dev_put(pdev);
+ }
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_FIRMWARE;
+ props.max_brightness = device->brightness->count - 3;
+ device->backlight = backlight_device_register(name,
+ parent,
+ device,
+ &acpi_backlight_ops,
+ &props);
+ kfree(name);
+ if (IS_ERR(device->backlight))
+ return;
+
+ /*
+ * Save current brightness level in case we have to restore it
+ * before acpi_video_device_lcd_set_level() is called next time.
+ */
+ device->backlight->props.brightness =
+ acpi_video_get_brightness(device->backlight);
+
+ device->cooling_dev = thermal_cooling_device_register("LCD",
+ device->dev, &video_cooling_ops);
+ if (IS_ERR(device->cooling_dev)) {
+ /*
+ * Set cooling_dev to NULL so we don't crash trying to
+ * free it.
+ * Also, why the hell we are returning early and
+ * not attempt to register video output if cooling
+ * device registration failed?
+ * -- dtor
+ */
+ device->cooling_dev = NULL;
+ return;
+ }
+
+ dev_info(&device->dev->dev, "registered as cooling_device%d\n",
+ device->cooling_dev->id);
+ result = sysfs_create_link(&device->dev->dev.kobj,
+ &device->cooling_dev->device.kobj,
+ "thermal_cooling");
+ if (result)
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+ result = sysfs_create_link(&device->cooling_dev->device.kobj,
+ &device->dev->dev.kobj, "device");
+ if (result)
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+ }
+}
+
+static int acpi_video_bus_register_backlight(struct acpi_video_bus *video)
+{
+ struct acpi_video_device *dev;
+
+ mutex_lock(&video->device_list_lock);
+ list_for_each_entry(dev, &video->video_device_list, entry)
+ acpi_video_dev_register_backlight(dev);
+ mutex_unlock(&video->device_list_lock);
+
+ video->pm_nb.notifier_call = acpi_video_resume;
+ video->pm_nb.priority = 0;
+ return register_pm_notifier(&video->pm_nb);
+}
+
+static void acpi_video_dev_unregister_backlight(struct acpi_video_device *device)
+{
+ if (device->backlight) {
+ backlight_device_unregister(device->backlight);
+ device->backlight = NULL;
+ }
+ if (device->brightness) {
+ kfree(device->brightness->levels);
+ kfree(device->brightness);
+ device->brightness = NULL;
+ }
+ if (device->cooling_dev) {
+ sysfs_remove_link(&device->dev->dev.kobj, "thermal_cooling");
+ sysfs_remove_link(&device->cooling_dev->device.kobj, "device");
+ thermal_cooling_device_unregister(device->cooling_dev);
+ device->cooling_dev = NULL;
+ }
+}
+
+static int acpi_video_bus_unregister_backlight(struct acpi_video_bus *video)
+{
+ struct acpi_video_device *dev;
+ int error = unregister_pm_notifier(&video->pm_nb);
+
+ mutex_lock(&video->device_list_lock);
+ list_for_each_entry(dev, &video->video_device_list, entry)
+ acpi_video_dev_unregister_backlight(dev);
+ mutex_unlock(&video->device_list_lock);
+
+ return error;
+}
+
+static void acpi_video_dev_add_notify_handler(struct acpi_video_device *device)
+{
+ acpi_status status;
+ struct acpi_device *adev = device->dev;
+
+ status = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY,
+ acpi_video_device_notify, device);
+ if (ACPI_FAILURE(status))
+ dev_err(&adev->dev, "Error installing notify handler\n");
+ else
+ device->flags.notify = 1;
+}
+
+static int acpi_video_bus_add_notify_handler(struct acpi_video_bus *video)
+{
+ struct input_dev *input;
+ struct acpi_video_device *dev;
+ int error;
+
+ video->input = input = input_allocate_device();
+ if (!input) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ error = acpi_video_bus_start_devices(video);
+ if (error)
+ goto err_free_input;
+
+ snprintf(video->phys, sizeof(video->phys),
+ "%s/video/input0", acpi_device_hid(video->device));
+
+ input->name = acpi_device_name(video->device);
+ input->phys = video->phys;
+ input->id.bustype = BUS_HOST;
+ input->id.product = 0x06;
+ input->dev.parent = &video->device->dev;
+ input->evbit[0] = BIT(EV_KEY);
+ set_bit(KEY_SWITCHVIDEOMODE, input->keybit);
+ set_bit(KEY_VIDEO_NEXT, input->keybit);
+ set_bit(KEY_VIDEO_PREV, input->keybit);
+ set_bit(KEY_BRIGHTNESS_CYCLE, input->keybit);
+ set_bit(KEY_BRIGHTNESSUP, input->keybit);
+ set_bit(KEY_BRIGHTNESSDOWN, input->keybit);
+ set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
+ set_bit(KEY_DISPLAY_OFF, input->keybit);
+
+ error = input_register_device(input);
+ if (error)
+ goto err_stop_dev;
+
+ mutex_lock(&video->device_list_lock);
+ list_for_each_entry(dev, &video->video_device_list, entry)
+ acpi_video_dev_add_notify_handler(dev);
+ mutex_unlock(&video->device_list_lock);
+
+ return 0;
+
+err_stop_dev:
+ acpi_video_bus_stop_devices(video);
+err_free_input:
+ input_free_device(input);
+ video->input = NULL;
+out:
+ return error;
+}
+
+static void acpi_video_dev_remove_notify_handler(struct acpi_video_device *dev)
+{
+ if (dev->flags.notify) {
+ acpi_remove_notify_handler(dev->dev->handle, ACPI_DEVICE_NOTIFY,
+ acpi_video_device_notify);
+ dev->flags.notify = 0;
+ }
+}
+
+static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video)
+{
+ struct acpi_video_device *dev;
+
+ mutex_lock(&video->device_list_lock);
+ list_for_each_entry(dev, &video->video_device_list, entry)
+ acpi_video_dev_remove_notify_handler(dev);
+ mutex_unlock(&video->device_list_lock);
+
+ acpi_video_bus_stop_devices(video);
+ input_unregister_device(video->input);
+ video->input = NULL;
+}
+
+static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
+{
+ struct acpi_video_device *dev, *next;
+
+ mutex_lock(&video->device_list_lock);
+ list_for_each_entry_safe(dev, next, &video->video_device_list, entry) {
+ list_del(&dev->entry);
+ kfree(dev);
+ }
+ mutex_unlock(&video->device_list_lock);
+
+ return 0;
+}
+
static int instance;
static int acpi_video_bus_add(struct acpi_device *device)
{
struct acpi_video_bus *video;
- struct input_dev *input;
int error;
acpi_status status;
@@ -1748,62 +1860,24 @@ static int acpi_video_bus_add(struct acpi_device *device)
if (error)
goto err_put_video;
- video->input = input = input_allocate_device();
- if (!input) {
- error = -ENOMEM;
- goto err_put_video;
- }
-
- error = acpi_video_bus_start_devices(video);
- if (error)
- goto err_free_input_dev;
-
- snprintf(video->phys, sizeof(video->phys),
- "%s/video/input0", acpi_device_hid(video->device));
-
- input->name = acpi_device_name(video->device);
- input->phys = video->phys;
- input->id.bustype = BUS_HOST;
- input->id.product = 0x06;
- input->dev.parent = &device->dev;
- input->evbit[0] = BIT(EV_KEY);
- set_bit(KEY_SWITCHVIDEOMODE, input->keybit);
- set_bit(KEY_VIDEO_NEXT, input->keybit);
- set_bit(KEY_VIDEO_PREV, input->keybit);
- set_bit(KEY_BRIGHTNESS_CYCLE, input->keybit);
- set_bit(KEY_BRIGHTNESSUP, input->keybit);
- set_bit(KEY_BRIGHTNESSDOWN, input->keybit);
- set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
- set_bit(KEY_DISPLAY_OFF, input->keybit);
-
printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n",
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
video->flags.multihead ? "yes" : "no",
video->flags.rom ? "yes" : "no",
video->flags.post ? "yes" : "no");
+ mutex_lock(&video_list_lock);
+ list_add_tail(&video->entry, &video_bus_head);
+ mutex_unlock(&video_list_lock);
- video->pm_nb.notifier_call = acpi_video_resume;
- video->pm_nb.priority = 0;
- error = register_pm_notifier(&video->pm_nb);
- if (error)
- goto err_stop_video;
-
- error = input_register_device(input);
- if (error)
- goto err_unregister_pm_notifier;
+ acpi_video_bus_register_backlight(video);
+ acpi_video_bus_add_notify_handler(video);
return 0;
- err_unregister_pm_notifier:
- unregister_pm_notifier(&video->pm_nb);
- err_stop_video:
- acpi_video_bus_stop_devices(video);
- err_free_input_dev:
- input_free_device(input);
- err_put_video:
+err_put_video:
acpi_video_bus_put_devices(video);
kfree(video->attached_array);
- err_free_video:
+err_free_video:
kfree(video);
device->driver_data = NULL;
@@ -1820,12 +1894,14 @@ static int acpi_video_bus_remove(struct acpi_device *device)
video = acpi_driver_data(device);
- unregister_pm_notifier(&video->pm_nb);
-
- acpi_video_bus_stop_devices(video);
+ acpi_video_bus_remove_notify_handler(video);
+ acpi_video_bus_unregister_backlight(video);
acpi_video_bus_put_devices(video);
- input_unregister_device(video->input);
+ mutex_lock(&video_list_lock);
+ list_del(&video->entry);
+ mutex_unlock(&video_list_lock);
+
kfree(video->attached_array);
kfree(video);
@@ -1874,6 +1950,9 @@ int acpi_video_register(void)
return 0;
}
+ mutex_init(&video_list_lock);
+ INIT_LIST_HEAD(&video_bus_head);
+
result = acpi_bus_register_driver(&acpi_video_bus);
if (result < 0)
return -ENODEV;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 940edbf2fe8f..84875fd4c74f 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -168,6 +168,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
},
},
+ {
+ .callback = video_detect_force_vendor,
+ .ident = "Lenovo Yoga 13",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
+ },
+ },
{ },
};
@@ -233,11 +241,11 @@ static void acpi_video_caps_check(void)
acpi_video_get_capabilities(NULL);
}
-bool acpi_video_backlight_quirks(void)
+bool acpi_osi_is_win8(void)
{
return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
}
-EXPORT_SYMBOL(acpi_video_backlight_quirks);
+EXPORT_SYMBOL(acpi_osi_is_win8);
/* Promote the vendor interface instead of the generic video module.
* This function allow DMI blacklists to be implemented by externals
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index c6707278a6bb..c4876ac9151a 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -552,7 +552,6 @@ amba_aphb_device_add(struct device *parent, const char *name,
if (!dev)
return ERR_PTR(-ENOMEM);
- dev->dma_mask = dma_mask;
dev->dev.coherent_dma_mask = dma_mask;
dev->irq[0] = irq1;
dev->irq[1] = irq2;
@@ -619,7 +618,7 @@ static void amba_device_initialize(struct amba_device *dev, const char *name)
dev_set_name(&dev->dev, "%s", name);
dev->dev.release = amba_device_release;
dev->dev.bus = &amba_bustype;
- dev->dev.dma_mask = &dev->dma_mask;
+ dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
dev->res.name = dev_name(&dev->dev);
}
@@ -663,9 +662,6 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
amba_device_initialize(dev, dev->dev.init_name);
dev->dev.init_name = NULL;
- if (!dev->dev.coherent_dma_mask && dev->dma_mask)
- dev_warn(&dev->dev, "coherent dma mask is unset\n");
-
return amba_device_add(dev, parent);
}
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 9d715ae5ff6b..e2903d03180e 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -292,6 +292,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c83), board_ahci }, /* Wildcat Point-LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -1343,7 +1347,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
host->flags |= ATA_HOST_PARALLEL_SCAN;
else
- printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
+ dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n");
if (pi.flags & ATA_FLAG_EM)
ahci_reset_em(host);
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 11456371f29b..2289efdf8203 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -339,6 +339,7 @@ extern struct device_attribute *ahci_sdev_attrs[];
.sdev_attrs = ahci_sdev_attrs
extern struct ata_port_operations ahci_ops;
+extern struct ata_port_operations ahci_platform_ops;
extern struct ata_port_operations ahci_pmp_retry_srst_ops;
unsigned int ahci_dev_classify(struct ata_port *ap);
@@ -368,6 +369,7 @@ irqreturn_t ahci_hw_interrupt(int irq, void *dev_instance);
irqreturn_t ahci_thread_fn(int irq, void *dev_instance);
void ahci_print_info(struct ata_host *host, const char *scc_s);
int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis);
+void ahci_error_handler(struct ata_port *ap);
static inline void __iomem *__ahci_port_base(struct ata_host *host,
unsigned int port_no)
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 58debb0acc3a..ae2d73fe321e 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -1,6 +1,6 @@
/*
+ * copyright (c) 2013 Freescale Semiconductor, Inc.
* Freescale IMX AHCI SATA platform driver
- * Copyright 2013 Freescale Semiconductor, Inc.
*
* based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
*
@@ -25,10 +25,13 @@
#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/libata.h>
#include "ahci.h"
enum {
- HOST_TIMER1MS = 0xe0, /* Timer 1-ms */
+ PORT_PHY_CTL = 0x178, /* Port0 PHY Control */
+ PORT_PHY_CTL_PDDQ_LOC = 0x100000, /* PORT_PHY_CTL bits */
+ HOST_TIMER1MS = 0xe0, /* Timer 1-ms */
};
struct imx_ahci_priv {
@@ -36,6 +39,56 @@ struct imx_ahci_priv {
struct clk *sata_ref_clk;
struct clk *ahb_clk;
struct regmap *gpr;
+ bool no_device;
+ bool first_time;
+};
+
+static int ahci_imx_hotplug;
+module_param_named(hotplug, ahci_imx_hotplug, int, 0644);
+MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support)");
+
+static void ahci_imx_error_handler(struct ata_port *ap)
+{
+ u32 reg_val;
+ struct ata_device *dev;
+ struct ata_host *host = dev_get_drvdata(ap->dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ struct imx_ahci_priv *imxpriv = dev_get_drvdata(ap->dev->parent);
+
+ ahci_error_handler(ap);
+
+ if (!(imxpriv->first_time) || ahci_imx_hotplug)
+ return;
+
+ imxpriv->first_time = false;
+
+ ata_for_each_dev(dev, &ap->link, ENABLED)
+ return;
+ /*
+ * Disable link to save power. An imx ahci port can't be recovered
+ * without full reset once the pddq mode is enabled making it
+ * impossible to use as part of libata LPM.
+ */
+ reg_val = readl(mmio + PORT_PHY_CTL);
+ writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL);
+ regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+ IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+ !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+ clk_disable_unprepare(imxpriv->sata_ref_clk);
+ imxpriv->no_device = true;
+}
+
+static struct ata_port_operations ahci_imx_ops = {
+ .inherits = &ahci_platform_ops,
+ .error_handler = ahci_imx_error_handler,
+};
+
+static const struct ata_port_info ahci_imx_port_info = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_imx_ops,
};
static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
@@ -117,9 +170,51 @@ static void imx6q_sata_exit(struct device *dev)
clk_disable_unprepare(imxpriv->sata_ref_clk);
}
+static int imx_ahci_suspend(struct device *dev)
+{
+ struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
+
+ /*
+ * If no_device is set, The CLKs had been gated off in the
+ * initialization so don't do it again here.
+ */
+ if (!imxpriv->no_device) {
+ regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+ IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+ !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+ clk_disable_unprepare(imxpriv->sata_ref_clk);
+ }
+
+ return 0;
+}
+
+static int imx_ahci_resume(struct device *dev)
+{
+ struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
+ int ret;
+
+ if (!imxpriv->no_device) {
+ ret = clk_prepare_enable(imxpriv->sata_ref_clk);
+ if (ret < 0) {
+ dev_err(dev, "pre-enable sata_ref clock err:%d\n", ret);
+ return ret;
+ }
+
+ regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+ IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+ IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+}
+
static struct ahci_platform_data imx6q_sata_pdata = {
.init = imx6q_sata_init,
.exit = imx6q_sata_exit,
+ .ata_port_info = &ahci_imx_port_info,
+ .suspend = imx_ahci_suspend,
+ .resume = imx_ahci_resume,
};
static const struct of_device_id imx_ahci_of_match[] = {
@@ -152,6 +247,8 @@ static int imx_ahci_probe(struct platform_device *pdev)
ahci_dev = &ahci_pdev->dev;
ahci_dev->parent = dev;
+ imxpriv->no_device = false;
+ imxpriv->first_time = true;
imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
if (IS_ERR(imxpriv->ahb_clk)) {
dev_err(dev, "can't get ahb clock.\n");
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 2daaee05cab1..f9554318504f 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -49,10 +49,11 @@ static struct platform_device_id ahci_devtype[] = {
};
MODULE_DEVICE_TABLE(platform, ahci_devtype);
-static struct ata_port_operations ahci_platform_ops = {
+struct ata_port_operations ahci_platform_ops = {
.inherits = &ahci_ops,
.host_stop = ahci_host_stop,
};
+EXPORT_SYMBOL_GPL(ahci_platform_ops);
static struct ata_port_operations ahci_platform_retry_srst_ops = {
.inherits = &ahci_pmp_retry_srst_ops,
@@ -184,7 +185,7 @@ static int ahci_probe(struct platform_device *pdev)
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
host->flags |= ATA_HOST_PARALLEL_SCAN;
else
- printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
+ dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
if (pi.flags & ATA_FLAG_EM)
ahci_reset_em(host);
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 513ad7ed0c99..6334c8d7c3f1 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -100,7 +100,7 @@
enum {
PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
- ICH5_PMR = 0x90, /* port mapping register */
+ ICH5_PMR = 0x90, /* address map register */
ICH5_PCS = 0x92, /* port control and status */
PIIX_SIDPR_BAR = 5,
PIIX_SIDPR_LEN = 16,
@@ -233,7 +233,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
PCI_CLASS_STORAGE_IDE << 8, 0xffff00, ich6m_sata },
/* 82801GB/GR/GH (ICH7, identical to ICH6) */
{ 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
- /* 2801GBM/GHM (ICH7M, identical to ICH6M) */
+ /* 82801GBM/GHM (ICH7M, identical to ICH6M) */
{ 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata },
/* Enterprise Southbridge 2 (631xESB/632xESB) */
{ 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
@@ -517,7 +517,7 @@ static int ich_pata_cable_detect(struct ata_port *ap)
const struct ich_laptop *lap = &ich_laptop[0];
u8 mask;
- /* Check for specials - Acer Aspire 5602WLMi */
+ /* Check for specials */
while (lap->device) {
if (lap->device == pdev->device &&
lap->subvendor == pdev->subsystem_vendor &&
@@ -1366,38 +1366,39 @@ static const int *piix_init_sata_map(struct pci_dev *pdev,
const int *map;
int i, invalid_map = 0;
u8 map_value;
+ char buf[32];
+ char *p = buf, *end = buf + sizeof(buf);
pci_read_config_byte(pdev, ICH5_PMR, &map_value);
map = map_db->map[map_value & map_db->mask];
- dev_info(&pdev->dev, "MAP [");
for (i = 0; i < 4; i++) {
switch (map[i]) {
case RV:
invalid_map = 1;
- pr_cont(" XX");
+ p += scnprintf(p, end - p, " XX");
break;
case NA:
- pr_cont(" --");
+ p += scnprintf(p, end - p, " --");
break;
case IDE:
WARN_ON((i & 1) || map[i + 1] != IDE);
pinfo[i / 2] = piix_port_info[ich_pata_100];
i++;
- pr_cont(" IDE IDE");
+ p += scnprintf(p, end - p, " IDE IDE");
break;
default:
- pr_cont(" P%d", map[i]);
+ p += scnprintf(p, end - p, " P%d", map[i]);
if (i & 1)
pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS;
break;
}
}
- pr_cont(" ]\n");
+ dev_info(&pdev->dev, "MAP [%s ]\n", buf);
if (invalid_map)
dev_err(&pdev->dev, "invalid MAP value %u\n", map_value);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index acfd0f711069..c482f8cadd7a 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -89,7 +89,6 @@ static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
static int ahci_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static void ahci_postreset(struct ata_link *link, unsigned int *class);
-static void ahci_error_handler(struct ata_port *ap);
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
static void ahci_dev_config(struct ata_device *dev);
#ifdef CONFIG_PM
@@ -189,14 +188,15 @@ struct ata_port_operations ahci_pmp_retry_srst_ops = {
};
EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops);
-int ahci_em_messages = 1;
+static bool ahci_em_messages __read_mostly = true;
EXPORT_SYMBOL_GPL(ahci_em_messages);
-module_param(ahci_em_messages, int, 0444);
+module_param(ahci_em_messages, bool, 0444);
/* add other LED protocol types when they become supported */
MODULE_PARM_DESC(ahci_em_messages,
"AHCI Enclosure Management Message control (0 = off, 1 = on)");
-int devslp_idle_timeout = 1000; /* device sleep idle timeout in ms */
+/* device sleep idle timeout in ms */
+static int devslp_idle_timeout __read_mostly = 1000;
module_param(devslp_idle_timeout, int, 0644);
MODULE_PARM_DESC(devslp_idle_timeout, "device sleep idle timeout");
@@ -778,8 +778,16 @@ static void ahci_start_port(struct ata_port *ap)
rc = ap->ops->transmit_led_message(ap,
emp->led_state,
4);
+ /*
+ * If busy, give a breather but do not
+ * release EH ownership by using msleep()
+ * instead of ata_msleep(). EM Transmit
+ * bit is busy for the whole host and
+ * releasing ownership will cause other
+ * ports to fail the same way.
+ */
if (rc == -EBUSY)
- ata_msleep(ap, 1);
+ msleep(1);
else
break;
}
@@ -1267,9 +1275,11 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
{
struct ata_port *ap = link->ap;
struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
const char *reason = NULL;
unsigned long now, msecs;
struct ata_taskfile tf;
+ bool fbs_disabled = false;
int rc;
DPRINTK("ENTER\n");
@@ -1279,6 +1289,16 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
if (rc && rc != -EOPNOTSUPP)
ata_link_warn(link, "failed to reset engine (errno=%d)\n", rc);
+ /*
+ * According to AHCI-1.2 9.3.9: if FBS is enable, software shall
+ * clear PxFBS.EN to '0' prior to issuing software reset to devices
+ * that is attached to port multiplier.
+ */
+ if (!ata_is_host_link(link) && pp->fbs_enabled) {
+ ahci_disable_fbs(ap);
+ fbs_disabled = true;
+ }
+
ata_tf_init(link->device, &tf);
/* issue the first D2H Register FIS */
@@ -1319,6 +1339,10 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
} else
*class = ahci_dev_classify(ap);
+ /* re-enable FBS if disabled before */
+ if (fbs_disabled)
+ ahci_enable_fbs(ap);
+
DPRINTK("EXIT, class=%u\n", *class);
return 0;
@@ -1981,7 +2005,7 @@ static void ahci_thaw(struct ata_port *ap)
writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
}
-static void ahci_error_handler(struct ata_port *ap)
+void ahci_error_handler(struct ata_port *ap)
{
if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
/* restart engine */
@@ -1994,6 +2018,7 @@ static void ahci_error_handler(struct ata_port *ap)
if (!ata_dev_enabled(ap->link.device))
ahci_stop_engine(ap);
}
+EXPORT_SYMBOL_GPL(ahci_error_handler);
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
{
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 83b1a9fb2d44..81a94a3919db 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4126,6 +4126,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
{ "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
{ "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
+ { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
/* Devices we expect to fail diagnostics */
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c69fcce505c0..440ecc477cfa 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1322,14 +1322,14 @@ void ata_eh_qc_complete(struct ata_queued_cmd *qc)
* should be retried. To be used from EH.
*
* SCSI midlayer limits the number of retries to scmd->allowed.
- * scmd->retries is decremented for commands which get retried
+ * scmd->allowed is incremented for commands which get retried
* due to unrelated failures (qc->err_mask is zero).
*/
void ata_eh_qc_retry(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
- if (!qc->err_mask && scmd->retries)
- scmd->retries--;
+ if (!qc->err_mask)
+ scmd->allowed++;
__ata_eh_qc_complete(qc);
}
@@ -2293,6 +2293,7 @@ const char *ata_get_cmd_descript(u8 command)
{ ATA_CMD_IDLE, "IDLE" },
{ ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
{ ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
+ { ATA_CMD_DOWNLOAD_MICRO_DMA, "DOWNLOAD MICROCODE DMA" },
{ ATA_CMD_NOP, "NOP" },
{ ATA_CMD_FLUSH, "FLUSH CACHE" },
{ ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
@@ -2313,6 +2314,8 @@ const char *ata_get_cmd_descript(u8 command)
{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
{ ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
{ ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
+ { ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" },
+ { ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" },
{ ATA_CMD_PIO_READ, "READ SECTOR(S)" },
{ ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
{ ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
@@ -2339,12 +2342,15 @@ const char *ata_get_cmd_descript(u8 command)
{ ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
{ ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
{ ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
+ { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" },
{ ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
{ ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
{ ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
{ ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
{ ATA_CMD_PMP_READ, "READ BUFFER" },
+ { ATA_CMD_PMP_READ_DMA, "READ BUFFER DMA" },
{ ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
+ { ATA_CMD_PMP_WRITE_DMA, "WRITE BUFFER DMA" },
{ ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
{ ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
{ ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
@@ -2363,6 +2369,8 @@ const char *ata_get_cmd_descript(u8 command)
{ ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
{ ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
{ ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
+ { ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" },
+ { ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" },
{ ATA_CMD_READ_LONG, "READ LONG (with retries)" },
{ ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
{ ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
@@ -2394,7 +2402,7 @@ static void ata_eh_link_report(struct ata_link *link)
struct ata_port *ap = link->ap;
struct ata_eh_context *ehc = &link->eh_context;
const char *frozen, *desc;
- char tries_buf[6];
+ char tries_buf[6] = "";
int tag, nr_failed = 0;
if (ehc->i.flags & ATA_EHI_QUIET)
@@ -2425,9 +2433,8 @@ static void ata_eh_link_report(struct ata_link *link)
if (ap->pflags & ATA_PFLAG_FROZEN)
frozen = " frozen";
- memset(tries_buf, 0, sizeof(tries_buf));
if (ap->eh_tries < ATA_EH_MAX_TRIES)
- snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
+ snprintf(tries_buf, sizeof(tries_buf), " t%d",
ap->eh_tries);
if (ehc->i.dev) {
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 150a917f0c3c..e37413228228 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -321,25 +321,25 @@ int ata_tport_add(struct device *parent,
/*
* ATA link attributes
*/
+static int noop(int x) { return x; }
-
-#define ata_link_show_linkspeed(field) \
+#define ata_link_show_linkspeed(field, format) \
static ssize_t \
show_ata_link_##field(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct ata_link *link = transport_class_to_link(dev); \
\
- return sprintf(buf,"%s\n", sata_spd_string(fls(link->field))); \
+ return sprintf(buf, "%s\n", sata_spd_string(format(link->field))); \
}
-#define ata_link_linkspeed_attr(field) \
- ata_link_show_linkspeed(field) \
+#define ata_link_linkspeed_attr(field, format) \
+ ata_link_show_linkspeed(field, format) \
static DEVICE_ATTR(field, S_IRUGO, show_ata_link_##field, NULL)
-ata_link_linkspeed_attr(hw_sata_spd_limit);
-ata_link_linkspeed_attr(sata_spd_limit);
-ata_link_linkspeed_attr(sata_spd);
+ata_link_linkspeed_attr(hw_sata_spd_limit, fls);
+ata_link_linkspeed_attr(sata_spd_limit, fls);
+ata_link_linkspeed_attr(sata_spd, noop);
static DECLARE_TRANSPORT_CLASS(ata_link_class,
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
index 4bceb8803a10..b33d1f99b3a4 100644
--- a/drivers/ata/pata_isapnp.c
+++ b/drivers/ata/pata_isapnp.c
@@ -78,7 +78,7 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
ap->ioaddr.cmd_addr = cmd_addr;
- if (pnp_port_valid(idev, 1) == 0) {
+ if (pnp_port_valid(idev, 1)) {
ctl_addr = devm_ioport_map(&idev->dev,
pnp_port_start(idev, 1), 1);
ap->ioaddr.altstatus_addr = ctl_addr;
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 1ec53f8ca96f..ddf470c2341d 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -144,6 +144,7 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
struct ata_host *host;
struct ata_port *ap;
struct ixp4xx_pata_data *data = dev_get_platdata(&pdev->dev);
+ int ret;
cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -157,7 +158,9 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
return -ENOMEM;
/* acquire resources and fill host */
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
data->cs0 = devm_ioremap(&pdev->dev, cs0->start, 0x1000);
data->cs1 = devm_ioremap(&pdev->dev, cs1->start, 0x1000);
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index c51bbb9ea8e8..83c4ddb1bc7f 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -1014,8 +1014,9 @@ static int octeon_cf_probe(struct platform_device *pdev)
}
cf_port->c0 = ap->ioaddr.ctl_addr;
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ rv = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rv)
+ return rv;
ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 851bd3f43ac6..fb0b40a191c2 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -24,6 +24,8 @@
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
#include <asm/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
static unsigned int intr_coalescing_count;
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 7f5e5d96327f..ea3b3dc10f33 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -343,13 +343,11 @@ static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
{
struct device_node *sata_node = dev->of_node;
int phy_count = 0, phy, port = 0, i;
- void __iomem *cphy_base[CPHY_PHY_COUNT];
- struct device_node *phy_nodes[CPHY_PHY_COUNT];
- u32 tx_atten[CPHY_PORT_COUNT];
+ void __iomem *cphy_base[CPHY_PHY_COUNT] = {};
+ struct device_node *phy_nodes[CPHY_PHY_COUNT] = {};
+ u32 tx_atten[CPHY_PORT_COUNT] = {};
memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT);
- memset(phy_nodes, 0, sizeof(struct device_node*) * CPHY_PHY_COUNT);
- memset(tx_atten, 0xff, CPHY_PORT_COUNT);
do {
u32 tmp;
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index c2d95e9fb971..1dae9a9009f7 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -792,7 +792,7 @@ static int sata_rcar_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get access to sata clock\n");
return PTR_ERR(priv->clk);
}
- clk_enable(priv->clk);
+ clk_prepare_enable(priv->clk);
host = ata_host_alloc(&pdev->dev, 1);
if (!host) {
@@ -822,7 +822,7 @@ static int sata_rcar_probe(struct platform_device *pdev)
return 0;
cleanup:
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -841,7 +841,7 @@ static int sata_rcar_remove(struct platform_device *pdev)
iowrite32(0, base + SATAINTSTAT_REG);
iowrite32(0x7ff, base + SATAINTMASK_REG);
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
return 0;
}
@@ -861,7 +861,7 @@ static int sata_rcar_suspend(struct device *dev)
/* mask */
iowrite32(0x7ff, base + SATAINTMASK_REG);
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
}
return ret;
@@ -873,7 +873,7 @@ static int sata_rcar_resume(struct device *dev)
struct sata_rcar_priv *priv = host->private_data;
void __iomem *base = priv->base;
- clk_enable(priv->clk);
+ clk_prepare_enable(priv->clk);
/* ack and mask */
iowrite32(0, base + SATAINTSTAT_REG);
diff --git a/drivers/atm/firestream.h b/drivers/atm/firestream.h
index 49e783e35ee9..364eded31881 100644
--- a/drivers/atm/firestream.h
+++ b/drivers/atm/firestream.h
@@ -420,7 +420,6 @@ struct fs_transmit_config {
#define RC_FLAGS_BFPS_BFP27 (0xd << 17)
#define RC_FLAGS_BFPS_BFP47 (0xe << 17)
-#define RC_FLAGS_BFPS (0x1 << 17)
#define RC_FLAGS_BFPP (0x1 << 21)
#define RC_FLAGS_TEVC (0x1 << 22)
#define RC_FLAGS_TEP (0x1 << 23)
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index d585735430dd..a3874034e2ce 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -102,8 +102,7 @@ static int cfag12864bfb_probe(struct platform_device *device)
platform_set_drvdata(device, info);
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 4c289ab91357..73f6c2925281 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -591,37 +591,6 @@ void bus_remove_device(struct device *dev)
bus_put(dev->bus);
}
-static int driver_add_attrs(struct bus_type *bus, struct device_driver *drv)
-{
- int error = 0;
- int i;
-
- if (bus->drv_attrs) {
- for (i = 0; bus->drv_attrs[i].attr.name; i++) {
- error = driver_create_file(drv, &bus->drv_attrs[i]);
- if (error)
- goto err;
- }
- }
-done:
- return error;
-err:
- while (--i >= 0)
- driver_remove_file(drv, &bus->drv_attrs[i]);
- goto done;
-}
-
-static void driver_remove_attrs(struct bus_type *bus,
- struct device_driver *drv)
-{
- int i;
-
- if (bus->drv_attrs) {
- for (i = 0; bus->drv_attrs[i].attr.name; i++)
- driver_remove_file(drv, &bus->drv_attrs[i]);
- }
-}
-
static int __must_check add_bind_files(struct device_driver *drv)
{
int ret;
@@ -720,16 +689,12 @@ int bus_add_driver(struct device_driver *drv)
printk(KERN_ERR "%s: uevent attr (%s) failed\n",
__func__, drv->name);
}
- error = driver_add_attrs(bus, drv);
+ error = driver_add_groups(drv, bus->drv_groups);
if (error) {
/* How the hell do we get out of this pickle? Give up */
- printk(KERN_ERR "%s: driver_add_attrs(%s) failed\n",
- __func__, drv->name);
- }
- error = driver_add_groups(drv, bus->drv_groups);
- if (error)
printk(KERN_ERR "%s: driver_create_groups(%s) failed\n",
__func__, drv->name);
+ }
if (!drv->suppress_bind_attrs) {
error = add_bind_files(drv);
@@ -766,7 +731,6 @@ void bus_remove_driver(struct device_driver *drv)
if (!drv->suppress_bind_attrs)
remove_bind_files(drv);
- driver_remove_attrs(drv->bus, drv);
driver_remove_groups(drv, drv->bus->drv_groups);
driver_remove_file(drv, &driver_attr_uevent);
klist_remove(&drv->p->knode_bus);
@@ -846,42 +810,6 @@ struct bus_type *find_bus(char *name)
}
#endif /* 0 */
-
-/**
- * bus_add_attrs - Add default attributes for this bus.
- * @bus: Bus that has just been registered.
- */
-
-static int bus_add_attrs(struct bus_type *bus)
-{
- int error = 0;
- int i;
-
- if (bus->bus_attrs) {
- for (i = 0; bus->bus_attrs[i].attr.name; i++) {
- error = bus_create_file(bus, &bus->bus_attrs[i]);
- if (error)
- goto err;
- }
- }
-done:
- return error;
-err:
- while (--i >= 0)
- bus_remove_file(bus, &bus->bus_attrs[i]);
- goto done;
-}
-
-static void bus_remove_attrs(struct bus_type *bus)
-{
- int i;
-
- if (bus->bus_attrs) {
- for (i = 0; bus->bus_attrs[i].attr.name; i++)
- bus_remove_file(bus, &bus->bus_attrs[i]);
- }
-}
-
static int bus_add_groups(struct bus_type *bus,
const struct attribute_group **groups)
{
@@ -983,9 +911,6 @@ int bus_register(struct bus_type *bus)
if (retval)
goto bus_probe_files_fail;
- retval = bus_add_attrs(bus);
- if (retval)
- goto bus_attrs_fail;
retval = bus_add_groups(bus, bus->bus_groups);
if (retval)
goto bus_groups_fail;
@@ -994,8 +919,6 @@ int bus_register(struct bus_type *bus)
return 0;
bus_groups_fail:
- bus_remove_attrs(bus);
-bus_attrs_fail:
remove_probe_files(bus);
bus_probe_files_fail:
kset_unregister(bus->p->drivers_kset);
@@ -1024,7 +947,6 @@ void bus_unregister(struct bus_type *bus)
pr_debug("bus: '%s': unregistering\n", bus->name);
if (bus->dev_root)
device_unregister(bus->dev_root);
- bus_remove_attrs(bus);
bus_remove_groups(bus, bus->bus_groups);
remove_probe_files(bus);
kset_unregister(bus->p->drivers_kset);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 8b7818b80056..f96f70419a78 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -47,18 +47,6 @@ static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr,
return ret;
}
-static const void *class_attr_namespace(struct kobject *kobj,
- const struct attribute *attr)
-{
- struct class_attribute *class_attr = to_class_attr(attr);
- struct subsys_private *cp = to_subsys_private(kobj);
- const void *ns = NULL;
-
- if (class_attr->namespace)
- ns = class_attr->namespace(cp->class, class_attr);
- return ns;
-}
-
static void class_release(struct kobject *kobj)
{
struct subsys_private *cp = to_subsys_private(kobj);
@@ -86,7 +74,6 @@ static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject
static const struct sysfs_ops class_sysfs_ops = {
.show = class_attr_show,
.store = class_attr_store,
- .namespace = class_attr_namespace,
};
static struct kobj_type class_ktype = {
@@ -99,21 +86,23 @@ static struct kobj_type class_ktype = {
static struct kset *class_kset;
-int class_create_file(struct class *cls, const struct class_attribute *attr)
+int class_create_file_ns(struct class *cls, const struct class_attribute *attr,
+ const void *ns)
{
int error;
if (cls)
- error = sysfs_create_file(&cls->p->subsys.kobj,
- &attr->attr);
+ error = sysfs_create_file_ns(&cls->p->subsys.kobj,
+ &attr->attr, ns);
else
error = -EINVAL;
return error;
}
-void class_remove_file(struct class *cls, const struct class_attribute *attr)
+void class_remove_file_ns(struct class *cls, const struct class_attribute *attr,
+ const void *ns)
{
if (cls)
- sysfs_remove_file(&cls->p->subsys.kobj, &attr->attr);
+ sysfs_remove_file_ns(&cls->p->subsys.kobj, &attr->attr, ns);
}
static struct class *class_get(struct class *cls)
@@ -600,8 +589,8 @@ int __init classes_init(void)
return 0;
}
-EXPORT_SYMBOL_GPL(class_create_file);
-EXPORT_SYMBOL_GPL(class_remove_file);
+EXPORT_SYMBOL_GPL(class_create_file_ns);
+EXPORT_SYMBOL_GPL(class_remove_file_ns);
EXPORT_SYMBOL_GPL(class_unregister);
EXPORT_SYMBOL_GPL(class_destroy);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 34abf4d8a45f..67b180d855b2 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -455,64 +455,6 @@ static ssize_t online_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RW(online);
-static int device_add_attributes(struct device *dev,
- struct device_attribute *attrs)
-{
- int error = 0;
- int i;
-
- if (attrs) {
- for (i = 0; attrs[i].attr.name; i++) {
- error = device_create_file(dev, &attrs[i]);
- if (error)
- break;
- }
- if (error)
- while (--i >= 0)
- device_remove_file(dev, &attrs[i]);
- }
- return error;
-}
-
-static void device_remove_attributes(struct device *dev,
- struct device_attribute *attrs)
-{
- int i;
-
- if (attrs)
- for (i = 0; attrs[i].attr.name; i++)
- device_remove_file(dev, &attrs[i]);
-}
-
-static int device_add_bin_attributes(struct device *dev,
- struct bin_attribute *attrs)
-{
- int error = 0;
- int i;
-
- if (attrs) {
- for (i = 0; attrs[i].attr.name; i++) {
- error = device_create_bin_file(dev, &attrs[i]);
- if (error)
- break;
- }
- if (error)
- while (--i >= 0)
- device_remove_bin_file(dev, &attrs[i]);
- }
- return error;
-}
-
-static void device_remove_bin_attributes(struct device *dev,
- struct bin_attribute *attrs)
-{
- int i;
-
- if (attrs)
- for (i = 0; attrs[i].attr.name; i++)
- device_remove_bin_file(dev, &attrs[i]);
-}
-
int device_add_groups(struct device *dev, const struct attribute_group **groups)
{
return sysfs_create_groups(&dev->kobj, groups);
@@ -534,18 +476,12 @@ static int device_add_attrs(struct device *dev)
error = device_add_groups(dev, class->dev_groups);
if (error)
return error;
- error = device_add_attributes(dev, class->dev_attrs);
- if (error)
- goto err_remove_class_groups;
- error = device_add_bin_attributes(dev, class->dev_bin_attrs);
- if (error)
- goto err_remove_class_attrs;
}
if (type) {
error = device_add_groups(dev, type->groups);
if (error)
- goto err_remove_class_bin_attrs;
+ goto err_remove_class_groups;
}
error = device_add_groups(dev, dev->groups);
@@ -563,12 +499,6 @@ static int device_add_attrs(struct device *dev)
err_remove_type_groups:
if (type)
device_remove_groups(dev, type->groups);
- err_remove_class_bin_attrs:
- if (class)
- device_remove_bin_attributes(dev, class->dev_bin_attrs);
- err_remove_class_attrs:
- if (class)
- device_remove_attributes(dev, class->dev_attrs);
err_remove_class_groups:
if (class)
device_remove_groups(dev, class->dev_groups);
@@ -587,11 +517,8 @@ static void device_remove_attrs(struct device *dev)
if (type)
device_remove_groups(dev, type->groups);
- if (class) {
- device_remove_attributes(dev, class->dev_attrs);
- device_remove_bin_attributes(dev, class->dev_bin_attrs);
+ if (class)
device_remove_groups(dev, class->dev_groups);
- }
}
static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
@@ -1881,6 +1808,7 @@ EXPORT_SYMBOL_GPL(device_destroy);
*/
int device_rename(struct device *dev, const char *new_name)
{
+ struct kobject *kobj = &dev->kobj;
char *old_device_name = NULL;
int error;
@@ -1888,8 +1816,7 @@ int device_rename(struct device *dev, const char *new_name)
if (!dev)
return -EINVAL;
- pr_debug("device: '%s': %s: renaming to '%s'\n", dev_name(dev),
- __func__, new_name);
+ dev_dbg(dev, "renaming to %s\n", new_name);
old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
if (!old_device_name) {
@@ -1898,13 +1825,14 @@ int device_rename(struct device *dev, const char *new_name)
}
if (dev->class) {
- error = sysfs_rename_link(&dev->class->p->subsys.kobj,
- &dev->kobj, old_device_name, new_name);
+ error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj,
+ kobj, old_device_name,
+ new_name, kobject_namespace(kobj));
if (error)
goto out;
}
- error = kobject_rename(&dev->kobj, new_name);
+ error = kobject_rename(kobj, new_name);
if (error)
goto out;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 848ebbd25717..f48370dfc908 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -44,13 +44,11 @@ static int __ref cpu_subsys_online(struct device *dev)
struct cpu *cpu = container_of(dev, struct cpu, dev);
int cpuid = dev->id;
int from_nid, to_nid;
- int ret = -ENODEV;
-
- cpu_hotplug_driver_lock();
+ int ret;
from_nid = cpu_to_node(cpuid);
if (from_nid == NUMA_NO_NODE)
- goto out;
+ return -ENODEV;
ret = cpu_up(cpuid);
/*
@@ -61,19 +59,12 @@ static int __ref cpu_subsys_online(struct device *dev)
if (from_nid != to_nid)
change_cpu_under_node(cpu, from_nid, to_nid);
- out:
- cpu_hotplug_driver_unlock();
return ret;
}
static int cpu_subsys_offline(struct device *dev)
{
- int ret;
-
- cpu_hotplug_driver_lock();
- ret = cpu_down(dev->id);
- cpu_hotplug_driver_unlock();
- return ret;
+ return cpu_down(dev->id);
}
void unregister_cpu(struct cpu *cpu)
@@ -93,7 +84,17 @@ static ssize_t cpu_probe_store(struct device *dev,
const char *buf,
size_t count)
{
- return arch_cpu_probe(buf, count);
+ ssize_t cnt;
+ int ret;
+
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ return ret;
+
+ cnt = arch_cpu_probe(buf, count);
+
+ unlock_device_hotplug();
+ return cnt;
}
static ssize_t cpu_release_store(struct device *dev,
@@ -101,7 +102,17 @@ static ssize_t cpu_release_store(struct device *dev,
const char *buf,
size_t count)
{
- return arch_cpu_release(buf, count);
+ ssize_t cnt;
+ int ret;
+
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ return ret;
+
+ cnt = arch_cpu_release(buf, count);
+
+ unlock_device_hotplug();
+ return cnt;
}
static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 507379e7b763..545c4de412c3 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -91,7 +91,8 @@ static __always_inline struct devres * alloc_dr(dr_release_t release,
if (unlikely(!dr))
return NULL;
- memset(dr, 0, tot_size);
+ memset(dr, 0, offsetof(struct devres, data));
+
INIT_LIST_HEAD(&dr->node.entry);
dr->node.release = release;
return dr;
@@ -110,7 +111,7 @@ void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
{
struct devres *dr;
- dr = alloc_dr(release, size, gfp);
+ dr = alloc_dr(release, size, gfp | __GFP_ZERO);
if (unlikely(!dr))
return NULL;
set_node_dbginfo(&dr->node, name, size);
@@ -135,7 +136,7 @@ void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
{
struct devres *dr;
- dr = alloc_dr(release, size, gfp);
+ dr = alloc_dr(release, size, gfp | __GFP_ZERO);
if (unlikely(!dr))
return NULL;
return dr->data;
@@ -745,58 +746,62 @@ void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
EXPORT_SYMBOL_GPL(devm_remove_action);
/*
- * Managed kzalloc/kfree
+ * Managed kmalloc/kfree
*/
-static void devm_kzalloc_release(struct device *dev, void *res)
+static void devm_kmalloc_release(struct device *dev, void *res)
{
/* noop */
}
-static int devm_kzalloc_match(struct device *dev, void *res, void *data)
+static int devm_kmalloc_match(struct device *dev, void *res, void *data)
{
return res == data;
}
/**
- * devm_kzalloc - Resource-managed kzalloc
+ * devm_kmalloc - Resource-managed kmalloc
* @dev: Device to allocate memory for
* @size: Allocation size
* @gfp: Allocation gfp flags
*
- * Managed kzalloc. Memory allocated with this function is
+ * Managed kmalloc. Memory allocated with this function is
* automatically freed on driver detach. Like all other devres
* resources, guaranteed alignment is unsigned long long.
*
* RETURNS:
* Pointer to allocated memory on success, NULL on failure.
*/
-void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
+void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
{
struct devres *dr;
/* use raw alloc_dr for kmalloc caller tracing */
- dr = alloc_dr(devm_kzalloc_release, size, gfp);
+ dr = alloc_dr(devm_kmalloc_release, size, gfp);
if (unlikely(!dr))
return NULL;
+ /*
+ * This is named devm_kzalloc_release for historical reasons
+ * The initial implementation did not support kmalloc, only kzalloc
+ */
set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
devres_add(dev, dr->data);
return dr->data;
}
-EXPORT_SYMBOL_GPL(devm_kzalloc);
+EXPORT_SYMBOL_GPL(devm_kmalloc);
/**
* devm_kfree - Resource-managed kfree
* @dev: Device this memory belongs to
* @p: Memory to free
*
- * Free memory allocated with devm_kzalloc().
+ * Free memory allocated with devm_kmalloc().
*/
void devm_kfree(struct device *dev, void *p)
{
int rc;
- rc = devres_destroy(dev, devm_kzalloc_release, devm_kzalloc_match, p);
+ rc = devres_destroy(dev, devm_kmalloc_release, devm_kmalloc_match, p);
WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_kfree);
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 99802d6f3c60..165c2c299e57 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -49,7 +49,7 @@ struct cma *dma_contiguous_default_area;
/*
* Default global CMA area size can be defined in kernel's .config.
- * This is usefull mainly for distro maintainers to create a kernel
+ * This is useful mainly for distro maintainers to create a kernel
* that works correctly for most supported systems.
* The size can be set in bytes or as a percentage of the total memory
* in the system.
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 10a4467c63f1..eb8fb94ae2c5 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -282,31 +282,35 @@ static noinline_for_stack long fw_file_size(struct file *file)
return st.size;
}
-static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
+static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
{
long size;
char *buf;
+ int rc;
size = fw_file_size(file);
if (size <= 0)
- return false;
+ return -EINVAL;
buf = vmalloc(size);
if (!buf)
- return false;
- if (kernel_read(file, 0, buf, size) != size) {
+ return -ENOMEM;
+ rc = kernel_read(file, 0, buf, size);
+ if (rc != size) {
+ if (rc > 0)
+ rc = -EIO;
vfree(buf);
- return false;
+ return rc;
}
fw_buf->data = buf;
fw_buf->size = size;
- return true;
+ return 0;
}
-static bool fw_get_filesystem_firmware(struct device *device,
+static int fw_get_filesystem_firmware(struct device *device,
struct firmware_buf *buf)
{
int i;
- bool success = false;
+ int rc = -ENOENT;
char *path = __getname();
for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
@@ -321,14 +325,17 @@ static bool fw_get_filesystem_firmware(struct device *device,
file = filp_open(path, O_RDONLY, 0);
if (IS_ERR(file))
continue;
- success = fw_read_file_contents(file, buf);
+ rc = fw_read_file_contents(file, buf);
fput(file);
- if (success)
+ if (rc)
+ dev_warn(device, "firmware, attempted to load %s, but failed with error %d\n",
+ path, rc);
+ else
break;
}
__putname(path);
- if (success) {
+ if (!rc) {
dev_dbg(device, "firmware: direct-loading firmware %s\n",
buf->fw_id);
mutex_lock(&fw_lock);
@@ -337,7 +344,7 @@ static bool fw_get_filesystem_firmware(struct device *device,
mutex_unlock(&fw_lock);
}
- return success;
+ return rc;
}
/* firmware holds the ownership of pages */
@@ -1086,9 +1093,14 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
}
}
- if (!fw_get_filesystem_firmware(device, fw->priv))
+ ret = fw_get_filesystem_firmware(device, fw->priv);
+ if (ret) {
+ dev_warn(device, "Direct firmware load failed with error %d\n",
+ ret);
+ dev_warn(device, "Falling back to user helper\n");
ret = fw_load_from_user_helper(fw, name, device,
uevent, nowait, timeout);
+ }
/* don't cache firmware handled without uevent */
if (!ret)
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 4f8bef3eb5a8..47051cd25113 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -488,6 +488,11 @@ static int platform_drv_probe(struct device *_dev)
if (ret && ACPI_HANDLE(_dev))
acpi_dev_pm_detach(_dev, true);
+ if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
+ dev_warn(_dev, "probe deferral not supported\n");
+ ret = -ENXIO;
+ }
+
return ret;
}
@@ -553,8 +558,7 @@ EXPORT_SYMBOL_GPL(platform_driver_unregister);
/**
* platform_driver_probe - register driver for non-hotpluggable device
* @drv: platform driver structure
- * @probe: the driver probe routine, probably from an __init section,
- * must not return -EPROBE_DEFER.
+ * @probe: the driver probe routine, probably from an __init section
*
* Use this instead of platform_driver_register() when you know the device
* is not hotpluggable and has already been registered, and you want to
@@ -565,8 +569,7 @@ EXPORT_SYMBOL_GPL(platform_driver_unregister);
* into system-on-chip processors, where the controller devices have been
* configured as part of board setup.
*
- * This is incompatible with deferred probing so probe() must not
- * return -EPROBE_DEFER.
+ * Note that this is incompatible with deferred probing.
*
* Returns zero if the driver registered and bound to a device, else returns
* a negative error code and with the driver not registered.
@@ -576,6 +579,12 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv,
{
int retval, code;
+ /*
+ * Prevent driver from requesting probe deferral to avoid further
+ * futile probe attempts.
+ */
+ drv->prevent_deferred_probe = true;
+
/* make sure driver won't have bind/unbind attributes */
drv->driver.suppress_bind_attrs = true;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 9f098a82cf04..ee039afe9078 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -30,6 +30,8 @@
#include <linux/suspend.h>
#include <trace/events/power.h>
#include <linux/cpuidle.h>
+#include <linux/timer.h>
+
#include "../base.h"
#include "power.h"
@@ -390,6 +392,71 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
return error;
}
+#ifdef CONFIG_DPM_WATCHDOG
+struct dpm_watchdog {
+ struct device *dev;
+ struct task_struct *tsk;
+ struct timer_list timer;
+};
+
+#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
+ struct dpm_watchdog wd
+
+/**
+ * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
+ * @data: Watchdog object address.
+ *
+ * Called when a driver has timed out suspending or resuming.
+ * There's not much we can do here to recover so panic() to
+ * capture a crash-dump in pstore.
+ */
+static void dpm_watchdog_handler(unsigned long data)
+{
+ struct dpm_watchdog *wd = (void *)data;
+
+ dev_emerg(wd->dev, "**** DPM device timeout ****\n");
+ show_stack(wd->tsk, NULL);
+ panic("%s %s: unrecoverable failure\n",
+ dev_driver_string(wd->dev), dev_name(wd->dev));
+}
+
+/**
+ * dpm_watchdog_set - Enable pm watchdog for given device.
+ * @wd: Watchdog. Must be allocated on the stack.
+ * @dev: Device to handle.
+ */
+static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
+{
+ struct timer_list *timer = &wd->timer;
+
+ wd->dev = dev;
+ wd->tsk = current;
+
+ init_timer_on_stack(timer);
+ /* use same timeout value for both suspend and resume */
+ timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
+ timer->function = dpm_watchdog_handler;
+ timer->data = (unsigned long)wd;
+ add_timer(timer);
+}
+
+/**
+ * dpm_watchdog_clear - Disable suspend/resume watchdog.
+ * @wd: Watchdog to disable.
+ */
+static void dpm_watchdog_clear(struct dpm_watchdog *wd)
+{
+ struct timer_list *timer = &wd->timer;
+
+ del_timer_sync(timer);
+ destroy_timer_on_stack(timer);
+}
+#else
+#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
+#define dpm_watchdog_set(x, y)
+#define dpm_watchdog_clear(x)
+#endif
+
/*------------------------- Resume routines -------------------------*/
/**
@@ -576,6 +643,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
pm_callback_t callback = NULL;
char *info = NULL;
int error = 0;
+ DECLARE_DPM_WATCHDOG_ON_STACK(wd);
TRACE_DEVICE(dev);
TRACE_RESUME(0);
@@ -584,6 +652,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
goto Complete;
dpm_wait(dev->parent, async);
+ dpm_watchdog_set(&wd, dev);
device_lock(dev);
/*
@@ -642,6 +711,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
Unlock:
device_unlock(dev);
+ dpm_watchdog_clear(&wd);
Complete:
complete_all(&dev->power.completion);
@@ -1060,6 +1130,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
pm_callback_t callback = NULL;
char *info = NULL;
int error = 0;
+ DECLARE_DPM_WATCHDOG_ON_STACK(wd);
dpm_wait_for_children(dev, async);
@@ -1083,6 +1154,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (dev->power.syscore)
goto Complete;
+ dpm_watchdog_set(&wd, dev);
device_lock(dev);
if (dev->pm_domain) {
@@ -1139,6 +1211,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
device_unlock(dev);
+ dpm_watchdog_clear(&wd);
Complete:
complete_all(&dev->power.completion);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index ef89897c6043..fa4187418440 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -21,7 +21,7 @@
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/of.h>
#include <linux/export.h>
@@ -42,7 +42,7 @@
*/
/**
- * struct opp - Generic OPP description structure
+ * struct dev_pm_opp - Generic OPP description structure
* @node: opp list node. The nodes are maintained throughout the lifetime
* of boot. It is expected only an optimal set of OPPs are
* added to the library by the SoC framework.
@@ -59,7 +59,7 @@
*
* This structure stores the OPP information for a given device.
*/
-struct opp {
+struct dev_pm_opp {
struct list_head node;
bool available;
@@ -136,7 +136,7 @@ static struct device_opp *find_device_opp(struct device *dev)
}
/**
- * opp_get_voltage() - Gets the voltage corresponding to an available opp
+ * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
* @opp: opp for which voltage has to be returned for
*
* Return voltage in micro volt corresponding to the opp, else
@@ -150,9 +150,9 @@ static struct device_opp *find_device_opp(struct device *dev)
* prior to unlocking with rcu_read_unlock() to maintain the integrity of the
* pointer.
*/
-unsigned long opp_get_voltage(struct opp *opp)
+unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
{
- struct opp *tmp_opp;
+ struct dev_pm_opp *tmp_opp;
unsigned long v = 0;
tmp_opp = rcu_dereference(opp);
@@ -163,10 +163,10 @@ unsigned long opp_get_voltage(struct opp *opp)
return v;
}
-EXPORT_SYMBOL_GPL(opp_get_voltage);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
/**
- * opp_get_freq() - Gets the frequency corresponding to an available opp
+ * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
* @opp: opp for which frequency has to be returned for
*
* Return frequency in hertz corresponding to the opp, else
@@ -180,9 +180,9 @@ EXPORT_SYMBOL_GPL(opp_get_voltage);
* prior to unlocking with rcu_read_unlock() to maintain the integrity of the
* pointer.
*/
-unsigned long opp_get_freq(struct opp *opp)
+unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
{
- struct opp *tmp_opp;
+ struct dev_pm_opp *tmp_opp;
unsigned long f = 0;
tmp_opp = rcu_dereference(opp);
@@ -193,10 +193,10 @@ unsigned long opp_get_freq(struct opp *opp)
return f;
}
-EXPORT_SYMBOL_GPL(opp_get_freq);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
/**
- * opp_get_opp_count() - Get number of opps available in the opp list
+ * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
* @dev: device for which we do this operation
*
* This function returns the number of available opps if there are any,
@@ -206,10 +206,10 @@ EXPORT_SYMBOL_GPL(opp_get_freq);
* internally references two RCU protected structures: device_opp and opp which
* are safe as long as we are under a common RCU locked section.
*/
-int opp_get_opp_count(struct device *dev)
+int dev_pm_opp_get_opp_count(struct device *dev)
{
struct device_opp *dev_opp;
- struct opp *temp_opp;
+ struct dev_pm_opp *temp_opp;
int count = 0;
dev_opp = find_device_opp(dev);
@@ -226,10 +226,10 @@ int opp_get_opp_count(struct device *dev)
return count;
}
-EXPORT_SYMBOL_GPL(opp_get_opp_count);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
/**
- * opp_find_freq_exact() - search for an exact frequency
+ * dev_pm_opp_find_freq_exact() - search for an exact frequency
* @dev: device for which we do this operation
* @freq: frequency to search for
* @available: true/false - match for available opp
@@ -254,11 +254,12 @@ EXPORT_SYMBOL_GPL(opp_get_opp_count);
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
-struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
- bool available)
+struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+ unsigned long freq,
+ bool available)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
@@ -277,10 +278,10 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
return opp;
}
-EXPORT_SYMBOL_GPL(opp_find_freq_exact);
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
/**
- * opp_find_freq_ceil() - Search for an rounded ceil freq
+ * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
* @dev: device for which we do this operation
* @freq: Start frequency
*
@@ -300,10 +301,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_exact);
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
-struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
+ unsigned long *freq)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -324,10 +326,10 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
return opp;
}
-EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
/**
- * opp_find_freq_floor() - Search for a rounded floor freq
+ * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
* @dev: device for which we do this operation
* @freq: Start frequency
*
@@ -347,10 +349,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
-struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
+struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+ unsigned long *freq)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -375,17 +378,17 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
return opp;
}
-EXPORT_SYMBOL_GPL(opp_find_freq_floor);
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
/**
- * opp_add() - Add an OPP table from a table definitions
+ * dev_pm_opp_add() - Add an OPP table from a table definitions
* @dev: device for which we do this operation
* @freq: Frequency in Hz for this OPP
* @u_volt: Voltage in uVolts for this OPP
*
* This function adds an opp definition to the opp list and returns status.
* The opp is made available by default and it can be controlled using
- * opp_enable/disable functions.
+ * dev_pm_opp_enable/disable functions.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
@@ -393,14 +396,14 @@ EXPORT_SYMBOL_GPL(opp_find_freq_floor);
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
*/
-int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
+int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
{
struct device_opp *dev_opp = NULL;
- struct opp *opp, *new_opp;
+ struct dev_pm_opp *opp, *new_opp;
struct list_head *head;
/* allocate new OPP node */
- new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL);
+ new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
if (!new_opp) {
dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
return -ENOMEM;
@@ -460,7 +463,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
return 0;
}
-EXPORT_SYMBOL_GPL(opp_add);
+EXPORT_SYMBOL_GPL(dev_pm_opp_add);
/**
* opp_set_availability() - helper to set the availability of an opp
@@ -485,11 +488,11 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
bool availability_req)
{
struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
- struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
+ struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
int r = 0;
/* keep the node allocated */
- new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL);
+ new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
if (!new_opp) {
dev_warn(dev, "%s: Unable to create OPP\n", __func__);
return -ENOMEM;
@@ -552,13 +555,13 @@ unlock:
}
/**
- * opp_enable() - Enable a specific OPP
+ * dev_pm_opp_enable() - Enable a specific OPP
* @dev: device for which we do this operation
* @freq: OPP frequency to enable
*
* Enables a provided opp. If the operation is valid, this returns 0, else the
* corresponding error value. It is meant to be used for users an OPP available
- * after being temporarily made unavailable with opp_disable.
+ * after being temporarily made unavailable with dev_pm_opp_disable.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function indirectly uses RCU and mutex locks to keep the
@@ -566,21 +569,21 @@ unlock:
* this function is *NOT* called under RCU protection or in contexts where
* mutex locking or synchronize_rcu() blocking calls cannot be used.
*/
-int opp_enable(struct device *dev, unsigned long freq)
+int dev_pm_opp_enable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, true);
}
-EXPORT_SYMBOL_GPL(opp_enable);
+EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
/**
- * opp_disable() - Disable a specific OPP
+ * dev_pm_opp_disable() - Disable a specific OPP
* @dev: device for which we do this operation
* @freq: OPP frequency to disable
*
* Disables a provided opp. If the operation is valid, this returns
* 0, else the corresponding error value. It is meant to be a temporary
* control by users to make this OPP not available until the circumstances are
- * right to make it available again (with a call to opp_enable).
+ * right to make it available again (with a call to dev_pm_opp_enable).
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function indirectly uses RCU and mutex locks to keep the
@@ -588,15 +591,15 @@ EXPORT_SYMBOL_GPL(opp_enable);
* this function is *NOT* called under RCU protection or in contexts where
* mutex locking or synchronize_rcu() blocking calls cannot be used.
*/
-int opp_disable(struct device *dev, unsigned long freq)
+int dev_pm_opp_disable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, false);
}
-EXPORT_SYMBOL_GPL(opp_disable);
+EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
#ifdef CONFIG_CPU_FREQ
/**
- * opp_init_cpufreq_table() - create a cpufreq table for a device
+ * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
* @dev: device for which we do this operation
* @table: Cpufreq table returned back to caller
*
@@ -619,11 +622,11 @@ EXPORT_SYMBOL_GPL(opp_disable);
* Callers should ensure that this function is *NOT* called under RCU protection
* or in contexts where mutex locking cannot be used.
*/
-int opp_init_cpufreq_table(struct device *dev,
+int dev_pm_opp_init_cpufreq_table(struct device *dev,
struct cpufreq_frequency_table **table)
{
struct device_opp *dev_opp;
- struct opp *opp;
+ struct dev_pm_opp *opp;
struct cpufreq_frequency_table *freq_table;
int i = 0;
@@ -639,7 +642,7 @@ int opp_init_cpufreq_table(struct device *dev,
}
freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
- (opp_get_opp_count(dev) + 1), GFP_KERNEL);
+ (dev_pm_opp_get_opp_count(dev) + 1), GFP_KERNEL);
if (!freq_table) {
mutex_unlock(&dev_opp_list_lock);
dev_warn(dev, "%s: Unable to allocate frequency table\n",
@@ -663,16 +666,16 @@ int opp_init_cpufreq_table(struct device *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(opp_init_cpufreq_table);
+EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
/**
- * opp_free_cpufreq_table() - free the cpufreq table
+ * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
* @dev: device for which we do this operation
* @table: table to free
*
- * Free up the table allocated by opp_init_cpufreq_table
+ * Free up the table allocated by dev_pm_opp_init_cpufreq_table
*/
-void opp_free_cpufreq_table(struct device *dev,
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
struct cpufreq_frequency_table **table)
{
if (!table)
@@ -681,14 +684,14 @@ void opp_free_cpufreq_table(struct device *dev,
kfree(*table);
*table = NULL;
}
-EXPORT_SYMBOL_GPL(opp_free_cpufreq_table);
+EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
#endif /* CONFIG_CPU_FREQ */
/**
- * opp_get_notifier() - find notifier_head of the device with opp
+ * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
* @dev: device pointer used to lookup device OPPs.
*/
-struct srcu_notifier_head *opp_get_notifier(struct device *dev)
+struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
{
struct device_opp *dev_opp = find_device_opp(dev);
@@ -732,7 +735,7 @@ int of_init_opp_table(struct device *dev)
unsigned long freq = be32_to_cpup(val++) * 1000;
unsigned long volt = be32_to_cpup(val++);
- if (opp_add(dev, freq, volt)) {
+ if (dev_pm_opp_add(dev, freq, volt)) {
dev_warn(dev, "%s: Failed to add OPP %ld\n",
__func__, freq);
continue;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 268a35097578..72e00e66ecc5 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -258,7 +258,8 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
* Check if the device's runtime PM status allows it to be suspended. If
* another idle notification has been started earlier, return immediately. If
* the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
- * run the ->runtime_idle() callback directly.
+ * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
+ * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
@@ -331,7 +332,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
out:
trace_rpm_return_int(dev, _THIS_IP_, retval);
- return retval ? retval : rpm_suspend(dev, rpmflags);
+ return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
}
/**
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index f0d30543fcce..4251570610c9 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -3,7 +3,7 @@
# subsystems should select the appropriate symbols.
config REGMAP
- default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_MMIO || REGMAP_IRQ)
+ default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_MMIO || REGMAP_IRQ)
select LZO_COMPRESS
select LZO_DECOMPRESS
select IRQ_DOMAIN if REGMAP_IRQ
@@ -15,6 +15,9 @@ config REGMAP_I2C
config REGMAP_SPI
tristate
+config REGMAP_SPMI
+ tristate
+
config REGMAP_MMIO
tristate
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index cf129980abd0..a7c670b4123a 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -3,5 +3,6 @@ obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o
obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
+obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o
obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 57f777835d97..33414b1de201 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -44,7 +44,6 @@ struct regmap_format {
struct regmap_async {
struct list_head list;
- struct work_struct cleanup;
struct regmap *map;
void *work_buf;
};
@@ -64,9 +63,11 @@ struct regmap {
void *bus_context;
const char *name;
+ bool async;
spinlock_t async_lock;
wait_queue_head_t async_waitq;
struct list_head async_list;
+ struct list_head async_free;
int async_ret;
#ifdef CONFIG_DEBUG_FS
@@ -179,6 +180,9 @@ struct regmap_field {
/* lsb */
unsigned int shift;
unsigned int reg;
+
+ unsigned int id_size;
+ unsigned int id_offset;
};
#ifdef CONFIG_DEBUG_FS
@@ -218,7 +222,7 @@ bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
int regcache_lookup_reg(struct regmap *map, unsigned int reg);
int _regmap_raw_write(struct regmap *map, unsigned int reg,
- const void *val, size_t val_len, bool async);
+ const void *val, size_t val_len);
void regmap_async_complete_cb(struct regmap_async *async, int ret);
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index d6c2d691b6e8..d4dd77134814 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -307,6 +307,8 @@ int regcache_sync(struct regmap *map)
if (!map->cache_dirty)
goto out;
+ map->async = true;
+
/* Apply any patch first */
map->cache_bypass = 1;
for (i = 0; i < map->patch_regs; i++) {
@@ -332,11 +334,15 @@ int regcache_sync(struct regmap *map)
map->cache_dirty = false;
out:
- trace_regcache_sync(map->dev, name, "stop");
/* Restore the bypass state */
+ map->async = false;
map->cache_bypass = bypass;
map->unlock(map->lock_arg);
+ regmap_async_complete(map);
+
+ trace_regcache_sync(map->dev, name, "stop");
+
return ret;
}
EXPORT_SYMBOL_GPL(regcache_sync);
@@ -375,17 +381,23 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
if (!map->cache_dirty)
goto out;
+ map->async = true;
+
if (map->cache_ops->sync)
ret = map->cache_ops->sync(map, min, max);
else
ret = regcache_default_sync(map, min, max);
out:
- trace_regcache_sync(map->dev, name, "stop region");
/* Restore the bypass state */
map->cache_bypass = bypass;
+ map->async = false;
map->unlock(map->lock_arg);
+ regmap_async_complete(map);
+
+ trace_regcache_sync(map->dev, name, "stop region");
+
return ret;
}
EXPORT_SYMBOL_GPL(regcache_sync_region);
@@ -631,8 +643,7 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
map->cache_bypass = 1;
- ret = _regmap_raw_write(map, base, *data, count * val_bytes,
- false);
+ ret = _regmap_raw_write(map, base, *data, count * val_bytes);
map->cache_bypass = 0;
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index de11ecaf3833..c5471cd6ebb7 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -15,10 +15,19 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/device.h>
+#include <linux/list.h>
#include "internal.h"
+struct regmap_debugfs_node {
+ struct regmap *map;
+ const char *name;
+ struct list_head link;
+};
+
static struct dentry *regmap_debugfs_root;
+static LIST_HEAD(regmap_debugfs_early_list);
+static DEFINE_MUTEX(regmap_debugfs_early_lock);
/* Calculate the length of a fixed format */
static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
@@ -465,6 +474,20 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
struct rb_node *next;
struct regmap_range_node *range_node;
+ /* If we don't have the debugfs root yet, postpone init */
+ if (!regmap_debugfs_root) {
+ struct regmap_debugfs_node *node;
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return;
+ node->map = map;
+ node->name = name;
+ mutex_lock(&regmap_debugfs_early_lock);
+ list_add(&node->link, &regmap_debugfs_early_list);
+ mutex_unlock(&regmap_debugfs_early_lock);
+ return;
+ }
+
INIT_LIST_HEAD(&map->debugfs_off_cache);
mutex_init(&map->cache_lock);
@@ -519,18 +542,42 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
void regmap_debugfs_exit(struct regmap *map)
{
- debugfs_remove_recursive(map->debugfs);
- mutex_lock(&map->cache_lock);
- regmap_debugfs_free_dump_cache(map);
- mutex_unlock(&map->cache_lock);
- kfree(map->debugfs_name);
+ if (map->debugfs) {
+ debugfs_remove_recursive(map->debugfs);
+ mutex_lock(&map->cache_lock);
+ regmap_debugfs_free_dump_cache(map);
+ mutex_unlock(&map->cache_lock);
+ kfree(map->debugfs_name);
+ } else {
+ struct regmap_debugfs_node *node, *tmp;
+
+ mutex_lock(&regmap_debugfs_early_lock);
+ list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list,
+ link) {
+ if (node->map == map) {
+ list_del(&node->link);
+ kfree(node);
+ }
+ }
+ mutex_unlock(&regmap_debugfs_early_lock);
+ }
}
void regmap_debugfs_initcall(void)
{
+ struct regmap_debugfs_node *node, *tmp;
+
regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
if (!regmap_debugfs_root) {
pr_warn("regmap: Failed to create debugfs root\n");
return;
}
+
+ mutex_lock(&regmap_debugfs_early_lock);
+ list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list, link) {
+ regmap_debugfs_init(node->map, node->name);
+ list_del(&node->link);
+ kfree(node);
+ }
+ mutex_unlock(&regmap_debugfs_early_lock);
}
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index d10456ffd811..763c60d3d277 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -105,6 +105,22 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
"Failed to sync wakes in %x: %d\n",
reg, ret);
}
+
+ if (!d->chip->init_ack_masked)
+ continue;
+ /*
+ * Ack all the masked interrupts uncondictionly,
+ * OR if there is masked interrupt which hasn't been Acked,
+ * it'll be ignored in irq handler, then may introduce irq storm
+ */
+ if (d->mask_buf[i] && d->chip->ack_base) {
+ reg = d->chip->ack_base +
+ (i * map->reg_stride * d->irq_reg_stride);
+ ret = regmap_write(map, reg, d->mask_buf[i]);
+ if (ret != 0)
+ dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
+ reg, ret);
+ }
}
if (d->chip->runtime_pm)
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 4c506bd940f3..37f12ae7aada 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -73,7 +73,8 @@ static int regmap_spi_async_write(void *context,
spi_message_init(&async->m);
spi_message_add_tail(&async->t[0], &async->m);
- spi_message_add_tail(&async->t[1], &async->m);
+ if (val)
+ spi_message_add_tail(&async->t[1], &async->m);
async->m.complete = regmap_spi_complete;
async->m.context = async;
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
new file mode 100644
index 000000000000..ac2391013db1
--- /dev/null
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -0,0 +1,90 @@
+/*
+ * Register map access API - SPMI support
+ *
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * Based on regmap-i2c.c:
+ * Copyright 2011 Wolfson Microelectronics plc
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/regmap.h>
+#include <linux/spmi.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+static int regmap_spmi_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ BUG_ON(reg_size != 2);
+ return spmi_ext_register_readl(context, *(u16 *)reg,
+ val, val_size);
+}
+
+static int regmap_spmi_gather_write(void *context,
+ const void *reg, size_t reg_size,
+ const void *val, size_t val_size)
+{
+ BUG_ON(reg_size != 2);
+ return spmi_ext_register_writel(context, *(u16 *)reg, val, val_size);
+}
+
+static int regmap_spmi_write(void *context, const void *data,
+ size_t count)
+{
+ BUG_ON(count < 2);
+ return regmap_spmi_gather_write(context, data, 2, data + 2, count - 2);
+}
+
+static struct regmap_bus regmap_spmi = {
+ .read = regmap_spmi_read,
+ .write = regmap_spmi_write,
+ .gather_write = regmap_spmi_gather_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+/**
+ * regmap_init_spmi(): Initialize register map
+ *
+ * @sdev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+struct regmap *regmap_init_spmi(struct spmi_device *sdev,
+ const struct regmap_config *config)
+{
+ return regmap_init(&sdev->dev, &regmap_spmi, sdev, config);
+}
+EXPORT_SYMBOL_GPL(regmap_init_spmi);
+
+/**
+ * devm_regmap_init_spmi(): Initialise managed register map
+ *
+ * @sdev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+struct regmap *devm_regmap_init_spmi(struct spmi_device *sdev,
+ const struct regmap_config *config)
+{
+ return devm_regmap_init(&sdev->dev, &regmap_spmi, sdev, config);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_spmi);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 7d689a15c500..9c021d9cace0 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -42,15 +42,6 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
static int _regmap_bus_raw_write(void *context, unsigned int reg,
unsigned int val);
-static void async_cleanup(struct work_struct *work)
-{
- struct regmap_async *async = container_of(work, struct regmap_async,
- cleanup);
-
- kfree(async->work_buf);
- kfree(async);
-}
-
bool regmap_reg_in_ranges(unsigned int reg,
const struct regmap_range *ranges,
unsigned int nranges)
@@ -465,6 +456,7 @@ struct regmap *regmap_init(struct device *dev,
spin_lock_init(&map->async_lock);
INIT_LIST_HEAD(&map->async_list);
+ INIT_LIST_HEAD(&map->async_free);
init_waitqueue_head(&map->async_waitq);
if (config->read_flag_mask || config->write_flag_mask) {
@@ -821,6 +813,8 @@ static void regmap_field_init(struct regmap_field *rm_field,
rm_field->reg = reg_field.reg;
rm_field->shift = reg_field.lsb;
rm_field->mask = ((BIT(field_bits) - 1) << reg_field.lsb);
+ rm_field->id_size = reg_field.id_size;
+ rm_field->id_offset = reg_field.id_offset;
}
/**
@@ -942,12 +936,22 @@ EXPORT_SYMBOL_GPL(regmap_reinit_cache);
*/
void regmap_exit(struct regmap *map)
{
+ struct regmap_async *async;
+
regcache_exit(map);
regmap_debugfs_exit(map);
regmap_range_exit(map);
if (map->bus && map->bus->free_context)
map->bus->free_context(map->bus_context);
kfree(map->work_buf);
+ while (!list_empty(&map->async_free)) {
+ async = list_first_entry_or_null(&map->async_free,
+ struct regmap_async,
+ list);
+ list_del(&async->list);
+ kfree(async->work_buf);
+ kfree(async);
+ }
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
@@ -1039,7 +1043,7 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
}
int _regmap_raw_write(struct regmap *map, unsigned int reg,
- const void *val, size_t val_len, bool async)
+ const void *val, size_t val_len)
{
struct regmap_range_node *range;
unsigned long flags;
@@ -1091,7 +1095,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
dev_dbg(map->dev, "Writing window %d/%zu\n",
win_residue, val_len / map->format.val_bytes);
ret = _regmap_raw_write(map, reg, val, win_residue *
- map->format.val_bytes, async);
+ map->format.val_bytes);
if (ret != 0)
return ret;
@@ -1114,49 +1118,72 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
u8[0] |= map->write_flag_mask;
- if (async && map->bus->async_write) {
- struct regmap_async *async = map->bus->async_alloc();
- if (!async)
- return -ENOMEM;
+ /*
+ * Essentially all I/O mechanisms will be faster with a single
+ * buffer to write. Since register syncs often generate raw
+ * writes of single registers optimise that case.
+ */
+ if (val != work_val && val_len == map->format.val_bytes) {
+ memcpy(work_val, val, map->format.val_bytes);
+ val = work_val;
+ }
+
+ if (map->async && map->bus->async_write) {
+ struct regmap_async *async;
trace_regmap_async_write_start(map->dev, reg, val_len);
- async->work_buf = kzalloc(map->format.buf_size,
- GFP_KERNEL | GFP_DMA);
- if (!async->work_buf) {
- kfree(async);
- return -ENOMEM;
+ spin_lock_irqsave(&map->async_lock, flags);
+ async = list_first_entry_or_null(&map->async_free,
+ struct regmap_async,
+ list);
+ if (async)
+ list_del(&async->list);
+ spin_unlock_irqrestore(&map->async_lock, flags);
+
+ if (!async) {
+ async = map->bus->async_alloc();
+ if (!async)
+ return -ENOMEM;
+
+ async->work_buf = kzalloc(map->format.buf_size,
+ GFP_KERNEL | GFP_DMA);
+ if (!async->work_buf) {
+ kfree(async);
+ return -ENOMEM;
+ }
}
- INIT_WORK(&async->cleanup, async_cleanup);
async->map = map;
/* If the caller supplied the value we can use it safely. */
memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
map->format.reg_bytes + map->format.val_bytes);
- if (val == work_val)
- val = async->work_buf + map->format.pad_bytes +
- map->format.reg_bytes;
spin_lock_irqsave(&map->async_lock, flags);
list_add_tail(&async->list, &map->async_list);
spin_unlock_irqrestore(&map->async_lock, flags);
- ret = map->bus->async_write(map->bus_context, async->work_buf,
- map->format.reg_bytes +
- map->format.pad_bytes,
- val, val_len, async);
+ if (val != work_val)
+ ret = map->bus->async_write(map->bus_context,
+ async->work_buf,
+ map->format.reg_bytes +
+ map->format.pad_bytes,
+ val, val_len, async);
+ else
+ ret = map->bus->async_write(map->bus_context,
+ async->work_buf,
+ map->format.reg_bytes +
+ map->format.pad_bytes +
+ val_len, NULL, 0, async);
if (ret != 0) {
dev_err(map->dev, "Failed to schedule write: %d\n",
ret);
spin_lock_irqsave(&map->async_lock, flags);
- list_del(&async->list);
+ list_move(&async->list, &map->async_free);
spin_unlock_irqrestore(&map->async_lock, flags);
-
- kfree(async->work_buf);
- kfree(async);
}
return ret;
@@ -1253,7 +1280,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
map->work_buf +
map->format.reg_bytes +
map->format.pad_bytes,
- map->format.val_bytes, false);
+ map->format.val_bytes);
}
static inline void *_regmap_map_get_context(struct regmap *map)
@@ -1318,6 +1345,37 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
EXPORT_SYMBOL_GPL(regmap_write);
/**
+ * regmap_write_async(): Write a value to a single register asynchronously
+ *
+ * @map: Register map to write to
+ * @reg: Register to write to
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
+{
+ int ret;
+
+ if (reg % map->reg_stride)
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ map->async = true;
+
+ ret = _regmap_write(map, reg, val);
+
+ map->async = false;
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_write_async);
+
+/**
* regmap_raw_write(): Write raw values to one or more registers
*
* @map: Register map to write to
@@ -1345,7 +1403,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
map->lock(map->lock_arg);
- ret = _regmap_raw_write(map, reg, val, val_len, false);
+ ret = _regmap_raw_write(map, reg, val, val_len);
map->unlock(map->lock_arg);
@@ -1369,6 +1427,74 @@ int regmap_field_write(struct regmap_field *field, unsigned int val)
}
EXPORT_SYMBOL_GPL(regmap_field_write);
+/**
+ * regmap_field_update_bits(): Perform a read/modify/write cycle
+ * on the register field
+ *
+ * @field: Register field to write to
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_field_update_bits(struct regmap_field *field, unsigned int mask, unsigned int val)
+{
+ mask = (mask << field->shift) & field->mask;
+
+ return regmap_update_bits(field->regmap, field->reg,
+ mask, val << field->shift);
+}
+EXPORT_SYMBOL_GPL(regmap_field_update_bits);
+
+/**
+ * regmap_fields_write(): Write a value to a single register field with port ID
+ *
+ * @field: Register field to write to
+ * @id: port ID
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_fields_write(struct regmap_field *field, unsigned int id,
+ unsigned int val)
+{
+ if (id >= field->id_size)
+ return -EINVAL;
+
+ return regmap_update_bits(field->regmap,
+ field->reg + (field->id_offset * id),
+ field->mask, val << field->shift);
+}
+EXPORT_SYMBOL_GPL(regmap_fields_write);
+
+/**
+ * regmap_fields_update_bits(): Perform a read/modify/write cycle
+ * on the register field
+ *
+ * @field: Register field to write to
+ * @id: port ID
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ if (id >= field->id_size)
+ return -EINVAL;
+
+ mask = (mask << field->shift) & field->mask;
+
+ return regmap_update_bits(field->regmap,
+ field->reg + (field->id_offset * id),
+ mask, val << field->shift);
+}
+EXPORT_SYMBOL_GPL(regmap_fields_update_bits);
+
/*
* regmap_bulk_write(): Write multiple registers to the device
*
@@ -1418,16 +1544,15 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
*/
if (map->use_single_rw) {
for (i = 0; i < val_count; i++) {
- ret = regmap_raw_write(map,
- reg + (i * map->reg_stride),
- val + (i * val_bytes),
- val_bytes);
+ ret = _regmap_raw_write(map,
+ reg + (i * map->reg_stride),
+ val + (i * val_bytes),
+ val_bytes);
if (ret != 0)
return ret;
}
} else {
- ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count,
- false);
+ ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
}
if (val_bytes != 1)
@@ -1439,6 +1564,47 @@ out:
}
EXPORT_SYMBOL_GPL(regmap_bulk_write);
+/*
+ * regmap_multi_reg_write(): Write multiple registers to the device
+ *
+ * where the set of register are supplied in any order
+ *
+ * @map: Register map to write to
+ * @regs: Array of structures containing register,value to be written
+ * @num_regs: Number of registers to write
+ *
+ * This function is intended to be used for writing a large block of data
+ * atomically to the device in single transfer for those I2C client devices
+ * that implement this alternative block write mode.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_multi_reg_write(struct regmap *map, struct reg_default *regs,
+ int num_regs)
+{
+ int ret = 0, i;
+
+ for (i = 0; i < num_regs; i++) {
+ int reg = regs[i].reg;
+ if (reg % map->reg_stride)
+ return -EINVAL;
+ }
+
+ map->lock(map->lock_arg);
+
+ for (i = 0; i < num_regs; i++) {
+ ret = _regmap_write(map, regs[i].reg, regs[i].def);
+ if (ret != 0)
+ goto out;
+ }
+out:
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
+
/**
* regmap_raw_write_async(): Write raw values to one or more registers
* asynchronously
@@ -1473,7 +1639,11 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
map->lock(map->lock_arg);
- ret = _regmap_raw_write(map, reg, val, val_len, true);
+ map->async = true;
+
+ ret = _regmap_raw_write(map, reg, val, val_len);
+
+ map->async = false;
map->unlock(map->lock_arg);
@@ -1677,6 +1847,39 @@ int regmap_field_read(struct regmap_field *field, unsigned int *val)
EXPORT_SYMBOL_GPL(regmap_field_read);
/**
+ * regmap_fields_read(): Read a value to a single register field with port ID
+ *
+ * @field: Register field to read from
+ * @id: port ID
+ * @val: Pointer to store read value
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_fields_read(struct regmap_field *field, unsigned int id,
+ unsigned int *val)
+{
+ int ret;
+ unsigned int reg_val;
+
+ if (id >= field->id_size)
+ return -EINVAL;
+
+ ret = regmap_read(field->regmap,
+ field->reg + (field->id_offset * id),
+ &reg_val);
+ if (ret != 0)
+ return ret;
+
+ reg_val &= field->mask;
+ reg_val >>= field->shift;
+ *val = reg_val;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_fields_read);
+
+/**
* regmap_bulk_read(): Read multiple registers from the device
*
* @map: Register map to write to
@@ -1788,6 +1991,41 @@ int regmap_update_bits(struct regmap *map, unsigned int reg,
EXPORT_SYMBOL_GPL(regmap_update_bits);
/**
+ * regmap_update_bits_async: Perform a read/modify/write cycle on the register
+ * map asynchronously
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ *
+ * With most buses the read must be done synchronously so this is most
+ * useful for devices with a cache which do not need to interact with
+ * the hardware to determine the current register value.
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_update_bits_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ bool change;
+ int ret;
+
+ map->lock(map->lock_arg);
+
+ map->async = true;
+
+ ret = _regmap_update_bits(map, reg, mask, val, &change);
+
+ map->async = false;
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_update_bits_async);
+
+/**
* regmap_update_bits_check: Perform a read/modify/write cycle on the
* register map and report if updated
*
@@ -1812,6 +2050,43 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
}
EXPORT_SYMBOL_GPL(regmap_update_bits_check);
+/**
+ * regmap_update_bits_check_async: Perform a read/modify/write cycle on the
+ * register map asynchronously and report if
+ * updated
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ * @change: Boolean indicating if a write was done
+ *
+ * With most buses the read must be done synchronously so this is most
+ * useful for devices with a cache which do not need to interact with
+ * the hardware to determine the current register value.
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ int ret;
+
+ map->lock(map->lock_arg);
+
+ map->async = true;
+
+ ret = _regmap_update_bits(map, reg, mask, val, change);
+
+ map->async = false;
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_update_bits_check_async);
+
void regmap_async_complete_cb(struct regmap_async *async, int ret)
{
struct regmap *map = async->map;
@@ -1820,8 +2095,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
trace_regmap_async_io_complete(map->dev);
spin_lock(&map->async_lock);
-
- list_del(&async->list);
+ list_move(&async->list, &map->async_free);
wake = list_empty(&map->async_list);
if (ret != 0)
@@ -1829,8 +2103,6 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
spin_unlock(&map->async_lock);
- schedule_work(&async->cleanup);
-
if (wake)
wake_up(&map->async_waitq);
}
@@ -1906,6 +2178,7 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
bypass = map->cache_bypass;
map->cache_bypass = true;
+ map->async = true;
/* Write out first; it's useful to apply even if we fail later. */
for (i = 0; i < num_regs; i++) {
@@ -1929,10 +2202,13 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
}
out:
+ map->async = false;
map->cache_bypass = bypass;
map->unlock(map->lock_arg);
+ regmap_async_complete(map);
+
return ret;
}
EXPORT_SYMBOL_GPL(regmap_register_patch);
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index a355e63a3838..6fb98b53533f 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -188,8 +188,11 @@ static int bcma_host_pci_probe(struct pci_dev *dev,
pci_write_config_dword(dev, 0x40, val & 0xffff00ff);
/* SSB needed additional powering up, do we have any AMBA PCI cards? */
- if (!pci_is_pcie(dev))
- bcma_err(bus, "PCI card detected, report problems.\n");
+ if (!pci_is_pcie(dev)) {
+ bcma_err(bus, "PCI card detected, they are not supported.\n");
+ err = -ENXIO;
+ goto err_pci_release_regions;
+ }
/* Map MMIO */
err = -ENOMEM;
@@ -269,6 +272,7 @@ static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 90ee350442a9..e15430a82e90 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -30,28 +30,37 @@ static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, cha
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return sprintf(buf, "0x%03X\n", core->id.manuf);
}
+static DEVICE_ATTR_RO(manuf);
+
static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return sprintf(buf, "0x%03X\n", core->id.id);
}
+static DEVICE_ATTR_RO(id);
+
static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return sprintf(buf, "0x%02X\n", core->id.rev);
}
+static DEVICE_ATTR_RO(rev);
+
static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return sprintf(buf, "0x%X\n", core->id.class);
}
-static struct device_attribute bcma_device_attrs[] = {
- __ATTR_RO(manuf),
- __ATTR_RO(id),
- __ATTR_RO(rev),
- __ATTR_RO(class),
- __ATTR_NULL,
+static DEVICE_ATTR_RO(class);
+
+static struct attribute *bcma_device_attrs[] = {
+ &dev_attr_manuf.attr,
+ &dev_attr_id.attr,
+ &dev_attr_rev.attr,
+ &dev_attr_class.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(bcma_device);
static struct bus_type bcma_bus_type = {
.name = "bcma",
@@ -59,7 +68,7 @@ static struct bus_type bcma_bus_type = {
.probe = bcma_device_probe,
.remove = bcma_device_remove,
.uevent = bcma_device_uevent,
- .dev_attrs = bcma_device_attrs,
+ .dev_groups = bcma_device_groups,
};
static u16 bcma_cc_core_id(struct bcma_bus *bus)
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index e07a5fd58ad7..86b9f37d102e 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -15,6 +15,9 @@ menuconfig BLK_DEV
if BLK_DEV
+config BLK_DEV_NULL_BLK
+ tristate "Null test block driver"
+
config BLK_DEV_FD
tristate "Normal floppy disk support"
depends on ARCH_MAY_HAVE_PC_FDC
@@ -107,7 +110,7 @@ source "drivers/block/mtip32xx/Kconfig"
config BLK_CPQ_DA
tristate "Compaq SMART2 support"
- depends on PCI && VIRT_TO_BUS
+ depends on PCI && VIRT_TO_BUS && 0
help
This is the driver for Compaq Smart Array controllers. Everyone
using these boards should say Y here. See the file
@@ -316,6 +319,16 @@ config BLK_DEV_NVME
To compile this driver as a module, choose M here: the
module will be called nvme.
+config BLK_DEV_SKD
+ tristate "STEC S1120 Block Driver"
+ depends on PCI
+ depends on 64BIT
+ ---help---
+ Saying Y or M here will enable support for the
+ STEC, Inc. S1120 PCIe SSD.
+
+ Use device /dev/skd$N amd /dev/skd$Np$M.
+
config BLK_DEV_OSD
tristate "OSD object-as-blkdev support"
depends on SCSI_OSD_ULD
@@ -505,7 +518,7 @@ config VIRTIO_BLK
config BLK_DEV_HD
bool "Very old hard disk (MFM/RLL/IDE) driver"
depends on HAVE_IDE
- depends on !ARM || ARCH_RPC || ARCH_SHARK || BROKEN
+ depends on !ARM || ARCH_RPC || BROKEN
help
This is a very old hard disk driver that lacks the enhanced
functionality of the newer ones.
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index ca07399a8d99..8cc98cd0d4a8 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_MG_DISK) += mg_disk.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
+obj-$(CONFIG_BLK_DEV_SKD) += skd.o
obj-$(CONFIG_BLK_DEV_OSD) += osdblk.o
obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
@@ -41,6 +42,8 @@ obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
+obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
nvme-y := nvme-core.o nvme-scsi.o
+skd-y := skd_main.o
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 14a9d1912318..9220f8e833d0 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -100,11 +100,8 @@ enum {
struct buf {
ulong nframesout;
- ulong resid;
- ulong bv_resid;
- sector_t sector;
struct bio *bio;
- struct bio_vec *bv;
+ struct bvec_iter iter;
struct request *rq;
};
@@ -120,13 +117,10 @@ struct frame {
ulong waited;
ulong waited_total;
struct aoetgt *t; /* parent target I belong to */
- sector_t lba;
struct sk_buff *skb; /* command skb freed on module exit */
struct sk_buff *r_skb; /* response skb for async processing */
struct buf *buf;
- struct bio_vec *bv;
- ulong bcnt;
- ulong bv_off;
+ struct bvec_iter iter;
char flags;
};
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index d2515435e23f..8184451b57c0 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -196,8 +196,7 @@ aoe_freetframe(struct frame *f)
t = f->t;
f->buf = NULL;
- f->lba = 0;
- f->bv = NULL;
+ memset(&f->iter, 0, sizeof(f->iter));
f->r_skb = NULL;
f->flags = 0;
list_add(&f->head, &t->ffree);
@@ -295,21 +294,14 @@ newframe(struct aoedev *d)
}
static void
-skb_fillup(struct sk_buff *skb, struct bio_vec *bv, ulong off, ulong cnt)
+skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
{
int frag = 0;
- ulong fcnt;
-loop:
- fcnt = bv->bv_len - (off - bv->bv_offset);
- if (fcnt > cnt)
- fcnt = cnt;
- skb_fill_page_desc(skb, frag++, bv->bv_page, off, fcnt);
- cnt -= fcnt;
- if (cnt <= 0)
- return;
- bv++;
- off = bv->bv_offset;
- goto loop;
+ struct bio_vec bv;
+
+ __bio_for_each_segment(bv, bio, iter, iter)
+ skb_fill_page_desc(skb, frag++, bv.bv_page,
+ bv.bv_offset, bv.bv_len);
}
static void
@@ -346,12 +338,10 @@ ata_rw_frameinit(struct frame *f)
t->nout++;
f->waited = 0;
f->waited_total = 0;
- if (f->buf)
- f->lba = f->buf->sector;
/* set up ata header */
- ah->scnt = f->bcnt >> 9;
- put_lba(ah, f->lba);
+ ah->scnt = f->iter.bi_size >> 9;
+ put_lba(ah, f->iter.bi_sector);
if (t->d->flags & DEVFL_EXT) {
ah->aflags |= AOEAFL_EXT;
} else {
@@ -360,11 +350,11 @@ ata_rw_frameinit(struct frame *f)
ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
}
if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
- skb_fillup(skb, f->bv, f->bv_off, f->bcnt);
+ skb_fillup(skb, f->buf->bio, f->iter);
ah->aflags |= AOEAFL_WRITE;
- skb->len += f->bcnt;
- skb->data_len = f->bcnt;
- skb->truesize += f->bcnt;
+ skb->len += f->iter.bi_size;
+ skb->data_len = f->iter.bi_size;
+ skb->truesize += f->iter.bi_size;
t->wpkts++;
} else {
t->rpkts++;
@@ -382,7 +372,6 @@ aoecmd_ata_rw(struct aoedev *d)
struct buf *buf;
struct sk_buff *skb;
struct sk_buff_head queue;
- ulong bcnt, fbcnt;
buf = nextbuf(d);
if (buf == NULL)
@@ -390,39 +379,22 @@ aoecmd_ata_rw(struct aoedev *d)
f = newframe(d);
if (f == NULL)
return 0;
- bcnt = d->maxbcnt;
- if (bcnt == 0)
- bcnt = DEFAULTBCNT;
- if (bcnt > buf->resid)
- bcnt = buf->resid;
- fbcnt = bcnt;
- f->bv = buf->bv;
- f->bv_off = f->bv->bv_offset + (f->bv->bv_len - buf->bv_resid);
- do {
- if (fbcnt < buf->bv_resid) {
- buf->bv_resid -= fbcnt;
- buf->resid -= fbcnt;
- break;
- }
- fbcnt -= buf->bv_resid;
- buf->resid -= buf->bv_resid;
- if (buf->resid == 0) {
- d->ip.buf = NULL;
- break;
- }
- buf->bv++;
- buf->bv_resid = buf->bv->bv_len;
- WARN_ON(buf->bv_resid == 0);
- } while (fbcnt);
/* initialize the headers & frame */
f->buf = buf;
- f->bcnt = bcnt;
- ata_rw_frameinit(f);
+ f->iter = buf->iter;
+ f->iter.bi_size = min_t(unsigned long,
+ d->maxbcnt ?: DEFAULTBCNT,
+ f->iter.bi_size);
+ bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size);
+
+ if (!buf->iter.bi_size)
+ d->ip.buf = NULL;
/* mark all tracking fields and load out */
buf->nframesout += 1;
- buf->sector += bcnt >> 9;
+
+ ata_rw_frameinit(f);
skb = skb_clone(f->skb, GFP_ATOMIC);
if (skb) {
@@ -613,10 +585,7 @@ reassign_frame(struct frame *f)
skb = nf->skb;
nf->skb = f->skb;
nf->buf = f->buf;
- nf->bcnt = f->bcnt;
- nf->lba = f->lba;
- nf->bv = f->bv;
- nf->bv_off = f->bv_off;
+ nf->iter = f->iter;
nf->waited = 0;
nf->waited_total = f->waited_total;
nf->sent = f->sent;
@@ -648,19 +617,19 @@ probe(struct aoetgt *t)
}
f->flags |= FFL_PROBE;
ifrotate(t);
- f->bcnt = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
+ f->iter.bi_size = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
ata_rw_frameinit(f);
skb = f->skb;
- for (frag = 0, n = f->bcnt; n > 0; ++frag, n -= m) {
+ for (frag = 0, n = f->iter.bi_size; n > 0; ++frag, n -= m) {
if (n < PAGE_SIZE)
m = n;
else
m = PAGE_SIZE;
skb_fill_page_desc(skb, frag, empty_page, 0, m);
}
- skb->len += f->bcnt;
- skb->data_len = f->bcnt;
- skb->truesize += f->bcnt;
+ skb->len += f->iter.bi_size;
+ skb->data_len = f->iter.bi_size;
+ skb->truesize += f->iter.bi_size;
skb = skb_clone(f->skb, GFP_ATOMIC);
if (skb) {
@@ -897,15 +866,15 @@ rqbiocnt(struct request *r)
static void
bio_pageinc(struct bio *bio)
{
- struct bio_vec *bv;
+ struct bio_vec bv;
struct page *page;
- int i;
+ struct bvec_iter iter;
- bio_for_each_segment(bv, bio, i) {
+ bio_for_each_segment(bv, bio, iter) {
/* Non-zero page count for non-head members of
* compound pages is no longer allowed by the kernel.
*/
- page = compound_trans_head(bv->bv_page);
+ page = compound_trans_head(bv.bv_page);
atomic_inc(&page->_count);
}
}
@@ -913,12 +882,12 @@ bio_pageinc(struct bio *bio)
static void
bio_pagedec(struct bio *bio)
{
- struct bio_vec *bv;
struct page *page;
- int i;
+ struct bio_vec bv;
+ struct bvec_iter iter;
- bio_for_each_segment(bv, bio, i) {
- page = compound_trans_head(bv->bv_page);
+ bio_for_each_segment(bv, bio, iter) {
+ page = compound_trans_head(bv.bv_page);
atomic_dec(&page->_count);
}
}
@@ -929,12 +898,8 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
memset(buf, 0, sizeof(*buf));
buf->rq = rq;
buf->bio = bio;
- buf->resid = bio->bi_size;
- buf->sector = bio->bi_sector;
+ buf->iter = bio->bi_iter;
bio_pageinc(bio);
- buf->bv = bio_iovec(bio);
- buf->bv_resid = buf->bv->bv_len;
- WARN_ON(buf->bv_resid == 0);
}
static struct buf *
@@ -1119,24 +1084,18 @@ gettgt(struct aoedev *d, char *addr)
}
static void
-bvcpy(struct bio_vec *bv, ulong off, struct sk_buff *skb, long cnt)
+bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
{
- ulong fcnt;
- char *p;
int soff = 0;
-loop:
- fcnt = bv->bv_len - (off - bv->bv_offset);
- if (fcnt > cnt)
- fcnt = cnt;
- p = page_address(bv->bv_page) + off;
- skb_copy_bits(skb, soff, p, fcnt);
- soff += fcnt;
- cnt -= fcnt;
- if (cnt <= 0)
- return;
- bv++;
- off = bv->bv_offset;
- goto loop;
+ struct bio_vec bv;
+
+ iter.bi_size = cnt;
+
+ __bio_for_each_segment(bv, bio, iter, iter) {
+ char *p = page_address(bv.bv_page) + bv.bv_offset;
+ skb_copy_bits(skb, soff, p, bv.bv_len);
+ soff += bv.bv_len;
+ }
}
void
@@ -1152,7 +1111,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
do {
bio = rq->bio;
bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
- } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size));
+ } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
/* cf. http://lkml.org/lkml/2006/10/31/28 */
if (!fastfail)
@@ -1229,7 +1188,15 @@ noskb: if (buf)
clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
break;
}
- bvcpy(f->bv, f->bv_off, skb, n);
+ if (n > f->iter.bi_size) {
+ pr_err_ratelimited("%s e%ld.%d. bytes=%ld need=%u\n",
+ "aoe: too-large data size in read from",
+ (long) d->aoemajor, d->aoeminor,
+ n, f->iter.bi_size);
+ clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
+ break;
+ }
+ bvcpy(skb, f->buf->bio, f->iter, n);
case ATA_CMD_PIO_WRITE:
case ATA_CMD_PIO_WRITE_EXT:
spin_lock_irq(&d->lock);
@@ -1272,7 +1239,7 @@ out:
aoe_freetframe(f);
- if (buf && --buf->nframesout == 0 && buf->resid == 0)
+ if (buf && --buf->nframesout == 0 && buf->iter.bi_size == 0)
aoe_end_buf(d, buf);
spin_unlock_irq(&d->lock);
@@ -1727,7 +1694,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf)
{
if (buf == NULL)
return;
- buf->resid = 0;
+ buf->iter.bi_size = 0;
clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
if (buf->nframesout == 0)
aoe_end_buf(d, buf);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 9bf4371755f2..e73b85cf0756 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -328,18 +328,18 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
struct block_device *bdev = bio->bi_bdev;
struct brd_device *brd = bdev->bd_disk->private_data;
int rw;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
sector_t sector;
- int i;
+ struct bvec_iter iter;
int err = -EIO;
- sector = bio->bi_sector;
+ sector = bio->bi_iter.bi_sector;
if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
goto out;
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
err = 0;
- discard_from_brd(brd, sector, bio->bi_size);
+ discard_from_brd(brd, sector, bio->bi_iter.bi_size);
goto out;
}
@@ -347,10 +347,10 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
if (rw == READA)
rw = READ;
- bio_for_each_segment(bvec, bio, i) {
- unsigned int len = bvec->bv_len;
- err = brd_do_bvec(brd, bvec->bv_page, len,
- bvec->bv_offset, rw, sector);
+ bio_for_each_segment(bvec, bio, iter) {
+ unsigned int len = bvec.bv_len;
+ err = brd_do_bvec(brd, bvec.bv_page, len,
+ bvec.bv_offset, rw, sector);
if (err)
break;
sector += len >> SECTOR_SHIFT;
@@ -545,7 +545,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
mutex_lock(&brd_devices_mutex);
brd = brd_init_one(MINOR(dev) >> part_shift);
- kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM);
+ kobj = brd ? get_disk(brd->brd_disk) : NULL;
mutex_unlock(&brd_devices_mutex);
*part = 0;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index edfa2515bc86..0c004ac05811 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -5183,7 +5183,7 @@ reinit_after_soft_reset:
rebuild_lun_table(h, 1, 0);
cciss_engage_scsi(h);
h->busy_initializing = 0;
- return 1;
+ return 0;
clean4:
cciss_free_cmd_pool(h);
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 28c73ca320a8..a9b13f2cc420 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -159,7 +159,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
bio = bio_alloc_drbd(GFP_NOIO);
bio->bi_bdev = bdev->md_bdev;
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
err = -EIO;
if (bio_add_page(bio, page, size, 0) != size)
goto out;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index b12c11ec4bd2..597f111df67b 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1028,7 +1028,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
} else
page = b->bm_pages[page_nr];
bio->bi_bdev = mdev->ldev->md_bdev;
- bio->bi_sector = on_disk_sector;
+ bio->bi_iter.bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed,
* according to api. Do we want to assert that? */
bio_add_page(bio, page, len, 0);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 2d7f608d181c..0e06f0c5dd1e 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1474,7 +1474,8 @@ enum determine_dev_size {
DS_ERROR = -1,
DS_UNCHANGED = 0,
DS_SHRUNK = 1,
- DS_GREW = 2
+ DS_GREW = 2,
+ DS_GREW_FROM_ZERO = 3,
};
extern enum determine_dev_size
drbd_determine_dev_size(struct drbd_conf *, enum dds_flags, struct resize_parms *) __must_hold(local);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 55635edf563b..929468e1512a 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1537,15 +1537,17 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
{
- struct bio_vec *bvec;
- int i;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+
/* hint all but last page with MSG_MORE */
- bio_for_each_segment(bvec, bio, i) {
+ bio_for_each_segment(bvec, bio, iter) {
int err;
- err = _drbd_no_send_page(mdev, bvec->bv_page,
- bvec->bv_offset, bvec->bv_len,
- i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
+ err = _drbd_no_send_page(mdev, bvec.bv_page,
+ bvec.bv_offset, bvec.bv_len,
+ bio_iter_last(bvec, iter)
+ ? 0 : MSG_MORE);
if (err)
return err;
}
@@ -1554,15 +1556,16 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
{
- struct bio_vec *bvec;
- int i;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+
/* hint all but last page with MSG_MORE */
- bio_for_each_segment(bvec, bio, i) {
+ bio_for_each_segment(bvec, bio, iter) {
int err;
- err = _drbd_send_page(mdev, bvec->bv_page,
- bvec->bv_offset, bvec->bv_len,
- i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
+ err = _drbd_send_page(mdev, bvec.bv_page,
+ bvec.bv_offset, bvec.bv_len,
+ bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
if (err)
return err;
}
@@ -2750,13 +2753,6 @@ int __init drbd_init(void)
return err;
}
- err = drbd_genl_register();
- if (err) {
- printk(KERN_ERR "drbd: unable to register generic netlink family\n");
- goto fail;
- }
-
-
register_reboot_notifier(&drbd_notifier);
/*
@@ -2767,6 +2763,15 @@ int __init drbd_init(void)
drbd_proc = NULL; /* play safe for drbd_cleanup */
idr_init(&minors);
+ rwlock_init(&global_state_lock);
+ INIT_LIST_HEAD(&drbd_tconns);
+
+ err = drbd_genl_register();
+ if (err) {
+ printk(KERN_ERR "drbd: unable to register generic netlink family\n");
+ goto fail;
+ }
+
err = drbd_create_mempools();
if (err)
goto fail;
@@ -2778,9 +2783,6 @@ int __init drbd_init(void)
goto fail;
}
- rwlock_init(&global_state_lock);
- INIT_LIST_HEAD(&drbd_tconns);
-
retry.wq = create_singlethread_workqueue("drbd-reissue");
if (!retry.wq) {
printk(KERN_ERR "drbd: unable to create retry workqueue\n");
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 8cc1e640f485..c706d50a8b06 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -955,7 +955,7 @@ drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct res
}
if (size > la_size_sect)
- rv = DS_GREW;
+ rv = la_size_sect ? DS_GREW : DS_GREW_FROM_ZERO;
if (size < la_size_sect)
rv = DS_SHRUNK;
@@ -1132,9 +1132,9 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
/* We may ignore peer limits if the peer is modern enough.
Because new from 8.3.8 onwards the peer can use multiple
BIOs for a single peer_request */
- if (mdev->state.conn >= C_CONNECTED) {
+ if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
if (mdev->tconn->agreed_pro_version < 94)
- peer = min( mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+ peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
/* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
else if (mdev->tconn->agreed_pro_version == 94)
peer = DRBD_MAX_SIZE_H80_PACKET;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index cc29cd3bf78b..d073305ffd5e 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1333,7 +1333,7 @@ next_bio:
goto fail;
}
/* > peer_req->i.sector, unless this is the first bio */
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
bio->bi_bdev = mdev->ldev->backing_bdev;
bio->bi_rw = rw;
bio->bi_private = peer_req;
@@ -1353,7 +1353,7 @@ next_bio:
dev_err(DEV,
"bio_add_page failed for len=%u, "
"bi_vcnt=0 (bi_sector=%llu)\n",
- len, (unsigned long long)bio->bi_sector);
+ len, (uint64_t)bio->bi_iter.bi_sector);
err = -ENOSPC;
goto fail;
}
@@ -1595,9 +1595,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
sector_t sector, int data_size)
{
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
struct bio *bio;
- int dgs, err, i, expect;
+ int dgs, err, expect;
void *dig_in = mdev->tconn->int_dig_in;
void *dig_vv = mdev->tconn->int_dig_vv;
@@ -1615,13 +1616,13 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
mdev->recv_cnt += data_size>>9;
bio = req->master_bio;
- D_ASSERT(sector == bio->bi_sector);
+ D_ASSERT(sector == bio->bi_iter.bi_sector);
- bio_for_each_segment(bvec, bio, i) {
- void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
- expect = min_t(int, data_size, bvec->bv_len);
+ bio_for_each_segment(bvec, bio, iter) {
+ void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
+ expect = min_t(int, data_size, bvec.bv_len);
err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
- kunmap(bvec->bv_page);
+ kunmap(bvec.bv_page);
if (err)
return err;
data_size -= expect;
@@ -1890,29 +1891,11 @@ static u32 seq_max(u32 a, u32 b)
return seq_greater(a, b) ? a : b;
}
-static bool need_peer_seq(struct drbd_conf *mdev)
-{
- struct drbd_tconn *tconn = mdev->tconn;
- int tp;
-
- /*
- * We only need to keep track of the last packet_seq number of our peer
- * if we are in dual-primary mode and we have the resolve-conflicts flag set; see
- * handle_write_conflicts().
- */
-
- rcu_read_lock();
- tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
- rcu_read_unlock();
-
- return tp && test_bit(RESOLVE_CONFLICTS, &tconn->flags);
-}
-
static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
{
unsigned int newest_peer_seq;
- if (need_peer_seq(mdev)) {
+ if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)) {
spin_lock(&mdev->peer_seq_lock);
newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
mdev->peer_seq = newest_peer_seq;
@@ -1972,22 +1955,31 @@ static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_s
{
DEFINE_WAIT(wait);
long timeout;
- int ret;
+ int ret = 0, tp;
- if (!need_peer_seq(mdev))
+ if (!test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags))
return 0;
spin_lock(&mdev->peer_seq_lock);
for (;;) {
if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
- ret = 0;
break;
}
+
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
+
+ rcu_read_lock();
+ tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
+ rcu_read_unlock();
+
+ if (!tp)
+ break;
+
+ /* Only need to wait if two_primaries is enabled */
prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
spin_unlock(&mdev->peer_seq_lock);
rcu_read_lock();
@@ -2228,8 +2220,10 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
}
goto out_interrupted;
}
- } else
+ } else {
+ update_peer_seq(mdev, peer_seq);
spin_lock_irq(&mdev->tconn->req_lock);
+ }
list_add(&peer_req->w.list, &mdev->active_ee);
spin_unlock_irq(&mdev->tconn->req_lock);
@@ -4132,7 +4126,11 @@ recv_bm_rle_bits(struct drbd_conf *mdev,
(unsigned int)bs.buf_len);
return -EIO;
}
- look_ahead >>= bits;
+ /* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */
+ if (likely(bits < 64))
+ look_ahead >>= bits;
+ else
+ look_ahead = 0;
have -= bits;
bits = bitstream_get_bits(&bs, &tmp, 64 - have);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index c24379ffd4e3..104a040f24de 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -77,8 +77,8 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
req->epoch = 0;
drbd_clear_interval(&req->i);
- req->i.sector = bio_src->bi_sector;
- req->i.size = bio_src->bi_size;
+ req->i.sector = bio_src->bi_iter.bi_sector;
+ req->i.size = bio_src->bi_iter.bi_size;
req->i.local = true;
req->i.waiting = false;
@@ -1280,7 +1280,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
/*
* what we "blindly" assume:
*/
- D_ASSERT(IS_ALIGNED(bio->bi_size, 512));
+ D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512));
inc_ap_bio(mdev);
__drbd_make_request(mdev, bio, start_time);
@@ -1306,6 +1306,7 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
int backing_limit;
if (bio_size && get_ldev(mdev)) {
+ unsigned int max_hw_sectors = queue_max_hw_sectors(q);
struct request_queue * const b =
mdev->ldev->backing_bdev->bd_disk->queue;
if (b->merge_bvec_fn) {
@@ -1313,6 +1314,8 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
limit = min(limit, backing_limit);
}
put_ldev(mdev);
+ if ((limit >> 9) > max_hw_sectors)
+ limit = max_hw_sectors << 9;
}
return limit;
}
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 978cb1addc98..28e15d91197a 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -269,7 +269,7 @@ static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bi
/* Short lived temporary struct on the stack.
* We could squirrel the error to be returned into
- * bio->bi_size, or similar. But that would be too ugly. */
+ * bio->bi_iter.bi_size, or similar. But that would be too ugly. */
struct bio_and_error {
struct bio *bio;
int error;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 891c0ecaa292..84d3175d493a 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -313,8 +313,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
{
struct hash_desc desc;
struct scatterlist sg;
- struct bio_vec *bvec;
- int i;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
desc.tfm = tfm;
desc.flags = 0;
@@ -322,8 +322,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
sg_init_table(&sg, 1);
crypto_hash_init(&desc);
- bio_for_each_segment(bvec, bio, i) {
- sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
+ bio_for_each_segment(bvec, bio, iter) {
+ sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
crypto_hash_update(&desc, &sg, sg.length);
}
crypto_hash_final(&desc, digest);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 04ceb7e2fadd..6b29c4422828 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2351,7 +2351,7 @@ static void rw_interrupt(void)
/* Compute maximal contiguous buffer size. */
static int buffer_chain_size(void)
{
- struct bio_vec *bv;
+ struct bio_vec bv;
int size;
struct req_iterator iter;
char *base;
@@ -2360,10 +2360,10 @@ static int buffer_chain_size(void)
size = 0;
rq_for_each_segment(bv, current_req, iter) {
- if (page_address(bv->bv_page) + bv->bv_offset != base + size)
+ if (page_address(bv.bv_page) + bv.bv_offset != base + size)
break;
- size += bv->bv_len;
+ size += bv.bv_len;
}
return size >> 9;
@@ -2389,7 +2389,7 @@ static int transfer_size(int ssize, int max_sector, int max_size)
static void copy_buffer(int ssize, int max_sector, int max_sector_2)
{
int remaining; /* number of transferred 512-byte sectors */
- struct bio_vec *bv;
+ struct bio_vec bv;
char *buffer;
char *dma_buffer;
int size;
@@ -2427,10 +2427,10 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
if (!remaining)
break;
- size = bv->bv_len;
+ size = bv.bv_len;
SUPBOUND(size, remaining);
- buffer = page_address(bv->bv_page) + bv->bv_offset;
+ buffer = page_address(bv.bv_page) + bv.bv_offset;
if (dma_buffer + size >
floppy_track_buffer + (max_buffer_sectors << 10) ||
dma_buffer < floppy_track_buffer) {
@@ -2886,9 +2886,9 @@ static void do_fd_request(struct request_queue *q)
return;
if (WARN(atomic_read(&usage_count) == 0,
- "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%x\n",
+ "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%llx\n",
current_req, (long)blk_rq_pos(current_req), current_req->cmd_type,
- current_req->cmd_flags))
+ (unsigned long long) current_req->cmd_flags))
return;
if (test_and_set_bit(0, &fdc_busy)) {
@@ -3775,9 +3775,9 @@ static int __floppy_read_block_0(struct block_device *bdev)
bio_vec.bv_len = size;
bio_vec.bv_offset = 0;
bio.bi_vcnt = 1;
- bio.bi_size = size;
+ bio.bi_iter.bi_size = size;
bio.bi_bdev = bdev;
- bio.bi_sector = 0;
+ bio.bi_iter.bi_sector = 0;
bio.bi_flags = (1 << BIO_QUIET);
init_completion(&complete);
bio.bi_private = &complete;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 40e715531aa6..ce68515ccb8d 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -75,6 +75,7 @@
#include <linux/sysfs.h>
#include <linux/miscdevice.h>
#include <linux/falloc.h>
+#include <linux/aio.h>
#include "loop.h"
#include <asm/uaccess.h>
@@ -218,6 +219,55 @@ lo_do_transfer(struct loop_device *lo, int cmd,
return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
}
+#ifdef CONFIG_AIO
+static void lo_rw_aio_complete(u64 data, long res)
+{
+ struct bio *bio = (struct bio *)(uintptr_t)data;
+
+ if (res > 0)
+ res = 0;
+ else if (res < 0)
+ res = -EIO;
+
+ bio_endio(bio, res);
+}
+
+static int lo_rw_aio(struct loop_device *lo, struct bio *bio)
+{
+ struct file *file = lo->lo_backing_file;
+ struct kiocb *iocb;
+ unsigned int op;
+ struct iov_iter iov_iter;
+ struct bvec_iter iter;
+ struct bio_vec bvec;
+ size_t nr_segs;
+ loff_t pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset;
+ int rc;
+
+ iocb = aio_kernel_alloc(GFP_NOIO);
+ if (!iocb)
+ return -ENOMEM;
+
+ if (bio_rw(bio) & WRITE)
+ op = IOCB_CMD_WRITE_ITER;
+ else
+ op = IOCB_CMD_READ_ITER;
+
+ bio_for_each_segment(bvec, bio, iter) {
+ nr_segs = bio_segments(bio);
+ iov_iter_init_bvec(&iov_iter, &bvec, nr_segs,
+ bvec_length(&bvec, nr_segs), 0);
+ aio_kernel_init_rw(iocb, file, iov_iter_count(&iov_iter), pos);
+ aio_kernel_init_callback(iocb, lo_rw_aio_complete,
+ (u64)(uintptr_t)bio);
+ rc = aio_kernel_submit(iocb, op, &iov_iter);
+ if (rc)
+ break;
+ }
+ return rc;
+}
+#endif /* CONFIG_AIO */
+
/**
* __do_lo_send_write - helper for writing data to a loop device
*
@@ -288,9 +338,10 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
{
int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
struct page *page);
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
struct page *page = NULL;
- int i, ret = 0;
+ int ret = 0;
if (lo->transfer != transfer_none) {
page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
@@ -302,11 +353,11 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
do_lo_send = do_lo_send_direct_write;
}
- bio_for_each_segment(bvec, bio, i) {
- ret = do_lo_send(lo, bvec, pos, page);
+ bio_for_each_segment(bvec, bio, iter) {
+ ret = do_lo_send(lo, &bvec, pos, page);
if (ret < 0)
break;
- pos += bvec->bv_len;
+ pos += bvec.bv_len;
}
if (page) {
kunmap(page);
@@ -392,20 +443,20 @@ do_lo_receive(struct loop_device *lo,
static int
lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
{
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
ssize_t s;
- int i;
- bio_for_each_segment(bvec, bio, i) {
- s = do_lo_receive(lo, bvec, bsize, pos);
+ bio_for_each_segment(bvec, bio, iter) {
+ s = do_lo_receive(lo, &bvec, bsize, pos);
if (s < 0)
return s;
- if (s != bvec->bv_len) {
+ if (s != bvec.bv_len) {
zero_fill_bio(bio);
break;
}
- pos += bvec->bv_len;
+ pos += bvec.bv_len;
}
return 0;
}
@@ -415,53 +466,36 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
loff_t pos;
int ret;
- pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
+ pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset;
if (bio_rw(bio) == WRITE) {
- struct file *file = lo->lo_backing_file;
+ ret = lo_send(lo, bio, pos);
+ } else
+ ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
- if (bio->bi_rw & REQ_FLUSH) {
- ret = vfs_fsync(file, 0);
- if (unlikely(ret && ret != -EINVAL)) {
- ret = -EIO;
- goto out;
- }
- }
+ return ret;
+}
- /*
- * We use punch hole to reclaim the free space used by the
- * image a.k.a. discard. However we do not support discard if
- * encryption is enabled, because it may give an attacker
- * useful information.
- */
- if (bio->bi_rw & REQ_DISCARD) {
- struct file *file = lo->lo_backing_file;
- int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
+static int lo_discard(struct loop_device *lo, struct bio *bio)
+{
+ struct file *file = lo->lo_backing_file;
+ int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
+ loff_t pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset;
+ int ret;
- if ((!file->f_op->fallocate) ||
- lo->lo_encrypt_key_size) {
- ret = -EOPNOTSUPP;
- goto out;
- }
- ret = file->f_op->fallocate(file, mode, pos,
- bio->bi_size);
- if (unlikely(ret && ret != -EINVAL &&
- ret != -EOPNOTSUPP))
- ret = -EIO;
- goto out;
- }
+ /*
+ * We use punch hole to reclaim the free space used by the
+ * image a.k.a. discard. However we do not support discard if
+ * encryption is enabled, because it may give an attacker
+ * useful information.
+ */
- ret = lo_send(lo, bio, pos);
+ if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size)
+ return -EOPNOTSUPP;
- if ((bio->bi_rw & REQ_FUA) && !ret) {
- ret = vfs_fsync(file, 0);
- if (unlikely(ret && ret != -EINVAL))
- ret = -EIO;
- }
- } else
- ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
-
-out:
+ ret = file->f_op->fallocate(file, mode, pos, bio->bi_iter.bi_size);
+ if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
+ ret = -EIO;
return ret;
}
@@ -525,7 +559,35 @@ static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
do_loop_switch(lo, bio->bi_private);
bio_put(bio);
} else {
- int ret = do_bio_filebacked(lo, bio);
+ int ret;
+
+ if (bio_rw(bio) == WRITE) {
+ if (bio->bi_rw & REQ_FLUSH) {
+ ret = vfs_fsync(lo->lo_backing_file, 1);
+ if (unlikely(ret && ret != -EINVAL))
+ goto out;
+ }
+ if (bio->bi_rw & REQ_DISCARD) {
+ ret = lo_discard(lo, bio);
+ goto out;
+ }
+ }
+#ifdef CONFIG_AIO
+ if (lo->lo_flags & LO_FLAGS_USE_AIO &&
+ lo->transfer == transfer_none) {
+ ret = lo_rw_aio(lo, bio);
+ if (ret == 0)
+ return;
+ } else
+#endif
+ ret = do_bio_filebacked(lo, bio);
+
+ if ((bio_rw(bio) == WRITE) && bio->bi_rw & REQ_FUA && !ret) {
+ ret = vfs_fsync(lo->lo_backing_file, 0);
+ if (unlikely(ret && ret != -EINVAL))
+ ret = -EIO;
+ }
+out:
bio_endio(bio, ret);
}
}
@@ -547,6 +609,12 @@ static int loop_thread(void *data)
struct loop_device *lo = data;
struct bio *bio;
+ /*
+ * In cases where the underlying filesystem calls balance_dirty_pages()
+ * we want less throttling to avoid lock ups trying to write dirty
+ * pages through the loop device
+ */
+ current->flags |= PF_LESS_THROTTLE;
set_user_nice(current, -20);
while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
@@ -869,6 +937,14 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
!file->f_op->write)
lo_flags |= LO_FLAGS_READ_ONLY;
+#ifdef CONFIG_AIO
+ if (file->f_op->write_iter && file->f_op->read_iter &&
+ mapping->a_ops->direct_IO) {
+ file->f_flags |= O_DIRECT;
+ lo_flags |= LO_FLAGS_USE_AIO;
+ }
+#endif
+
lo_blocksize = S_ISBLK(inode->i_mode) ?
inode->i_bdev->bd_block_size : PAGE_SIZE;
@@ -894,13 +970,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
bio_list_init(&lo->lo_bio_list);
- /*
- * set queue make_request_fn, and add limits based on lower level
- * device
- */
- blk_queue_make_request(lo->lo_queue, loop_make_request);
- lo->lo_queue->queuedata = lo;
-
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
blk_queue_flush(lo->lo_queue, REQ_FLUSH);
@@ -912,6 +981,16 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
set_blocksize(bdev, lo_blocksize);
+#ifdef CONFIG_AIO
+ /*
+ * We must not send too-small direct-io requests, so we inherit
+ * the logical block size from the underlying device
+ */
+ if ((lo_flags & LO_FLAGS_USE_AIO) && inode->i_sb->s_bdev)
+ blk_queue_logical_block_size(lo->lo_queue,
+ bdev_logical_block_size(inode->i_sb->s_bdev));
+#endif
+
lo->lo_thread = kthread_create(loop_thread, lo, "loop%d",
lo->lo_number);
if (IS_ERR(lo->lo_thread)) {
@@ -1618,6 +1697,8 @@ static int loop_add(struct loop_device **l, int i)
if (!lo)
goto out;
+ lo->lo_state = Lo_unbound;
+
/* allocate id, if @id >= 0, we're requesting that specific id */
if (i >= 0) {
err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
@@ -1633,7 +1714,13 @@ static int loop_add(struct loop_device **l, int i)
err = -ENOMEM;
lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
if (!lo->lo_queue)
- goto out_free_dev;
+ goto out_free_idr;
+
+ /*
+ * set queue make_request_fn
+ */
+ blk_queue_make_request(lo->lo_queue, loop_make_request);
+ lo->lo_queue->queuedata = lo;
disk = lo->lo_disk = alloc_disk(1 << part_shift);
if (!disk)
@@ -1678,6 +1765,8 @@ static int loop_add(struct loop_device **l, int i)
out_free_queue:
blk_cleanup_queue(lo->lo_queue);
+out_free_idr:
+ idr_remove(&loop_index_idr, i);
out_free_dev:
kfree(lo);
out:
@@ -1741,7 +1830,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
if (err < 0)
err = loop_add(&lo, MINOR(dev) >> part_shift);
if (err < 0)
- kobj = ERR_PTR(err);
+ kobj = NULL;
else
kobj = get_disk(lo->lo_disk);
mutex_unlock(&loop_index_mutex);
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index 77a60bedd7a3..7bc363f1ee82 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -936,7 +936,7 @@ static int mg_probe(struct platform_device *plat_dev)
goto probe_err_3b;
}
err = request_irq(host->irq, mg_irq,
- IRQF_DISABLED | IRQF_TRIGGER_RISING,
+ IRQF_TRIGGER_RISING,
MG_DEV_NAME, host);
if (err) {
printk(KERN_ERR "%s:%d fail (request_irq err=%d)\n",
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 952dbfe22126..52b2f2a71470 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -126,64 +126,30 @@ struct mtip_compat_ide_task_request_s {
static bool mtip_check_surprise_removal(struct pci_dev *pdev)
{
u16 vendor_id = 0;
+ struct driver_data *dd = pci_get_drvdata(pdev);
+
+ if (dd->sr)
+ return true;
/* Read the vendorID from the configuration space */
pci_read_config_word(pdev, 0x00, &vendor_id);
- if (vendor_id == 0xFFFF)
+ if (vendor_id == 0xFFFF) {
+ dd->sr = true;
+ if (dd->queue)
+ set_bit(QUEUE_FLAG_DEAD, &dd->queue->queue_flags);
+ else
+ dev_warn(&dd->pdev->dev,
+ "%s: dd->queue is NULL\n", __func__);
+ if (dd->port) {
+ set_bit(MTIP_PF_SR_CLEANUP_BIT, &dd->port->flags);
+ wake_up_interruptible(&dd->port->svc_wait);
+ } else
+ dev_warn(&dd->pdev->dev,
+ "%s: dd->port is NULL\n", __func__);
return true; /* device removed */
-
- return false; /* device present */
-}
-
-/*
- * This function is called for clean the pending command in the
- * command slot during the surprise removal of device and return
- * error to the upper layer.
- *
- * @dd Pointer to the DRIVER_DATA structure.
- *
- * return value
- * None
- */
-static void mtip_command_cleanup(struct driver_data *dd)
-{
- int group = 0, commandslot = 0, commandindex = 0;
- struct mtip_cmd *command;
- struct mtip_port *port = dd->port;
- static int in_progress;
-
- if (in_progress)
- return;
-
- in_progress = 1;
-
- for (group = 0; group < 4; group++) {
- for (commandslot = 0; commandslot < 32; commandslot++) {
- if (!(port->allocated[group] & (1 << commandslot)))
- continue;
-
- commandindex = group << 5 | commandslot;
- command = &port->commands[commandindex];
-
- if (atomic_read(&command->active)
- && (command->async_callback)) {
- command->async_callback(command->async_data,
- -ENODEV);
- command->async_callback = NULL;
- command->async_data = NULL;
- }
-
- dma_unmap_sg(&port->dd->pdev->dev,
- command->sg,
- command->scatter_ents,
- command->direction);
- }
}
- up(&port->cmd_slot);
-
- set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag);
- in_progress = 0;
+ return false; /* device present */
}
/*
@@ -222,10 +188,7 @@ static int get_slot(struct mtip_port *port)
}
dev_warn(&port->dd->pdev->dev, "Failed to get a tag.\n");
- if (mtip_check_surprise_removal(port->dd->pdev)) {
- /* Device not present, clean outstanding commands */
- mtip_command_cleanup(port->dd);
- }
+ mtip_check_surprise_removal(port->dd->pdev);
return -1;
}
@@ -246,6 +209,107 @@ static inline void release_slot(struct mtip_port *port, int tag)
}
/*
+ * IO completion function.
+ *
+ * This completion function is called by the driver ISR when a
+ * command that was issued by the kernel completes. It first calls the
+ * asynchronous completion function which normally calls back into the block
+ * layer passing the asynchronous callback data, then unmaps the
+ * scatter list associated with the completed command, and finally
+ * clears the allocated bit associated with the completed command.
+ *
+ * @port Pointer to the port data structure.
+ * @tag Tag of the command.
+ * @data Pointer to driver_data.
+ * @status Completion status.
+ *
+ * return value
+ * None
+ */
+static void mtip_async_complete(struct mtip_port *port,
+ int tag,
+ void *data,
+ int status)
+{
+ struct mtip_cmd *command;
+ struct driver_data *dd = data;
+ int cb_status = status ? -EIO : 0;
+
+ if (unlikely(!dd) || unlikely(!port))
+ return;
+
+ command = &port->commands[tag];
+
+ if (unlikely(status == PORT_IRQ_TF_ERR)) {
+ dev_warn(&port->dd->pdev->dev,
+ "Command tag %d failed due to TFE\n", tag);
+ }
+
+ /* Upper layer callback */
+ if (likely(command->async_callback))
+ command->async_callback(command->async_data, cb_status);
+
+ command->async_callback = NULL;
+ command->comp_func = NULL;
+
+ /* Unmap the DMA scatter list entries */
+ dma_unmap_sg(&dd->pdev->dev,
+ command->sg,
+ command->scatter_ents,
+ command->direction);
+
+ /* Clear the allocated and active bits for the command */
+ atomic_set(&port->commands[tag].active, 0);
+ release_slot(port, tag);
+
+ up(&port->cmd_slot);
+}
+
+/*
+ * This function is called for clean the pending command in the
+ * command slot during the surprise removal of device and return
+ * error to the upper layer.
+ *
+ * @dd Pointer to the DRIVER_DATA structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_command_cleanup(struct driver_data *dd)
+{
+ int tag = 0;
+ struct mtip_cmd *cmd;
+ struct mtip_port *port = dd->port;
+ unsigned int num_cmd_slots = dd->slot_groups * 32;
+
+ if (!test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
+ return;
+
+ if (!port)
+ return;
+
+ cmd = &port->commands[MTIP_TAG_INTERNAL];
+ if (atomic_read(&cmd->active))
+ if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) &
+ (1 << MTIP_TAG_INTERNAL))
+ if (cmd->comp_func)
+ cmd->comp_func(port, MTIP_TAG_INTERNAL,
+ cmd->comp_data, -ENODEV);
+
+ while (1) {
+ tag = find_next_bit(port->allocated, num_cmd_slots, tag);
+ if (tag >= num_cmd_slots)
+ break;
+
+ cmd = &port->commands[tag];
+ if (atomic_read(&cmd->active))
+ mtip_async_complete(port, tag, dd, -ENODEV);
+ }
+
+ set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag);
+}
+
+/*
* Reset the HBA (without sleeping)
*
* @dd Pointer to the driver data structure.
@@ -584,6 +648,9 @@ static void mtip_timeout_function(unsigned long int data)
if (unlikely(!port))
return;
+ if (unlikely(port->dd->sr))
+ return;
+
if (test_bit(MTIP_DDF_RESUME_BIT, &port->dd->dd_flag)) {
mod_timer(&port->cmd_timer,
jiffies + msecs_to_jiffies(30000));
@@ -675,66 +742,6 @@ static void mtip_timeout_function(unsigned long int data)
}
/*
- * IO completion function.
- *
- * This completion function is called by the driver ISR when a
- * command that was issued by the kernel completes. It first calls the
- * asynchronous completion function which normally calls back into the block
- * layer passing the asynchronous callback data, then unmaps the
- * scatter list associated with the completed command, and finally
- * clears the allocated bit associated with the completed command.
- *
- * @port Pointer to the port data structure.
- * @tag Tag of the command.
- * @data Pointer to driver_data.
- * @status Completion status.
- *
- * return value
- * None
- */
-static void mtip_async_complete(struct mtip_port *port,
- int tag,
- void *data,
- int status)
-{
- struct mtip_cmd *command;
- struct driver_data *dd = data;
- int cb_status = status ? -EIO : 0;
-
- if (unlikely(!dd) || unlikely(!port))
- return;
-
- command = &port->commands[tag];
-
- if (unlikely(status == PORT_IRQ_TF_ERR)) {
- dev_warn(&port->dd->pdev->dev,
- "Command tag %d failed due to TFE\n", tag);
- }
-
- /* Upper layer callback */
- if (likely(command->async_callback))
- command->async_callback(command->async_data, cb_status);
-
- command->async_callback = NULL;
- command->comp_func = NULL;
-
- /* Unmap the DMA scatter list entries */
- dma_unmap_sg(&dd->pdev->dev,
- command->sg,
- command->scatter_ents,
- command->direction);
-
- /* Clear the allocated and active bits for the command */
- atomic_set(&port->commands[tag].active, 0);
- release_slot(port, tag);
-
- if (unlikely(command->unaligned))
- up(&port->cmd_slot_unal);
- else
- up(&port->cmd_slot);
-}
-
-/*
* Internal command completion callback function.
*
* This function is normally called by the driver ISR when an internal
@@ -854,7 +861,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
"Missing completion func for tag %d",
tag);
if (mtip_check_surprise_removal(dd->pdev)) {
- mtip_command_cleanup(dd);
/* don't proceed further */
return;
}
@@ -1018,14 +1024,12 @@ static inline void mtip_workq_sdbfx(struct mtip_port *port, int group,
command->comp_data,
0);
} else {
- dev_warn(&dd->pdev->dev,
- "Null completion "
- "for tag %d",
+ dev_dbg(&dd->pdev->dev,
+ "Null completion for tag %d",
tag);
if (mtip_check_surprise_removal(
dd->pdev)) {
- mtip_command_cleanup(dd);
return;
}
}
@@ -1145,7 +1149,6 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
if (unlikely(port_stat & PORT_IRQ_ERR)) {
if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
- mtip_command_cleanup(dd);
/* don't proceed further */
return IRQ_HANDLED;
}
@@ -2806,34 +2809,51 @@ static ssize_t show_device_status(struct device_driver *drv, char *buf)
static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf,
size_t len, loff_t *offset)
{
+ struct driver_data *dd = (struct driver_data *)f->private_data;
int size = *offset;
- char buf[MTIP_DFS_MAX_BUF_SIZE];
+ char *buf;
+ int rv = 0;
if (!len || *offset)
return 0;
+ buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
+ if (!buf) {
+ dev_err(&dd->pdev->dev,
+ "Memory allocation: status buffer\n");
+ return -ENOMEM;
+ }
+
size += show_device_status(NULL, buf);
*offset = size <= len ? size : len;
size = copy_to_user(ubuf, buf, *offset);
if (size)
- return -EFAULT;
+ rv = -EFAULT;
- return *offset;
+ kfree(buf);
+ return rv ? rv : *offset;
}
static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
size_t len, loff_t *offset)
{
struct driver_data *dd = (struct driver_data *)f->private_data;
- char buf[MTIP_DFS_MAX_BUF_SIZE];
+ char *buf;
u32 group_allocated;
int size = *offset;
- int n;
+ int n, rv = 0;
if (!len || size)
return 0;
+ buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
+ if (!buf) {
+ dev_err(&dd->pdev->dev,
+ "Memory allocation: register buffer\n");
+ return -ENOMEM;
+ }
+
size += sprintf(&buf[size], "H/ S ACTive : [ 0x");
for (n = dd->slot_groups-1; n >= 0; n--)
@@ -2888,21 +2908,30 @@ static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
*offset = size <= len ? size : len;
size = copy_to_user(ubuf, buf, *offset);
if (size)
- return -EFAULT;
+ rv = -EFAULT;
- return *offset;
+ kfree(buf);
+ return rv ? rv : *offset;
}
static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
size_t len, loff_t *offset)
{
struct driver_data *dd = (struct driver_data *)f->private_data;
- char buf[MTIP_DFS_MAX_BUF_SIZE];
+ char *buf;
int size = *offset;
+ int rv = 0;
if (!len || size)
return 0;
+ buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
+ if (!buf) {
+ dev_err(&dd->pdev->dev,
+ "Memory allocation: flag buffer\n");
+ return -ENOMEM;
+ }
+
size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
dd->port->flags);
size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n",
@@ -2911,9 +2940,10 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
*offset = size <= len ? size : len;
size = copy_to_user(ubuf, buf, *offset);
if (size)
- return -EFAULT;
+ rv = -EFAULT;
- return *offset;
+ kfree(buf);
+ return rv ? rv : *offset;
}
static const struct file_operations mtip_device_status_fops = {
@@ -3006,6 +3036,46 @@ static void mtip_hw_debugfs_exit(struct driver_data *dd)
debugfs_remove_recursive(dd->dfs_node);
}
+static int mtip_free_orphan(struct driver_data *dd)
+{
+ struct kobject *kobj;
+
+ if (dd->bdev) {
+ if (dd->bdev->bd_holders >= 1)
+ return -2;
+
+ bdput(dd->bdev);
+ dd->bdev = NULL;
+ }
+
+ mtip_hw_debugfs_exit(dd);
+
+ spin_lock(&rssd_index_lock);
+ ida_remove(&rssd_index_ida, dd->index);
+ spin_unlock(&rssd_index_lock);
+
+ if (!test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag) &&
+ test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) {
+ put_disk(dd->disk);
+ } else {
+ if (dd->disk) {
+ kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
+ if (kobj) {
+ mtip_hw_sysfs_exit(dd, kobj);
+ kobject_put(kobj);
+ }
+ del_gendisk(dd->disk);
+ dd->disk = NULL;
+ }
+ if (dd->queue) {
+ dd->queue->queuedata = NULL;
+ blk_cleanup_queue(dd->queue);
+ dd->queue = NULL;
+ }
+ }
+ kfree(dd);
+ return 0;
+}
/*
* Perform any init/resume time hardware setup
@@ -3154,6 +3224,7 @@ static int mtip_service_thread(void *data)
unsigned long slot, slot_start, slot_wrap;
unsigned int num_cmd_slots = dd->slot_groups * 32;
struct mtip_port *port = dd->port;
+ int ret;
while (1) {
/*
@@ -3164,13 +3235,18 @@ static int mtip_service_thread(void *data)
!(port->flags & MTIP_PF_PAUSE_IO));
if (kthread_should_stop())
+ goto st_out;
+
+ set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
+
+ /* If I am an orphan, start self cleanup */
+ if (test_bit(MTIP_PF_SR_CLEANUP_BIT, &port->flags))
break;
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
&dd->dd_flag)))
- break;
+ goto st_out;
- set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
slot = 1;
/* used to restrict the loop to one iteration */
@@ -3201,7 +3277,7 @@ static int mtip_service_thread(void *data)
clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
} else if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
- if (!mtip_ftl_rebuild_poll(dd))
+ if (mtip_ftl_rebuild_poll(dd) < 0)
set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
&dd->dd_flag);
clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
@@ -3209,8 +3285,30 @@ static int mtip_service_thread(void *data)
clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
if (test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
+ goto st_out;
+ }
+
+ /* wait for pci remove to exit */
+ while (1) {
+ if (test_bit(MTIP_DDF_REMOVE_DONE_BIT, &dd->dd_flag))
break;
+ msleep_interruptible(1000);
+ if (kthread_should_stop())
+ goto st_out;
+ }
+
+ while (1) {
+ ret = mtip_free_orphan(dd);
+ if (!ret) {
+ /* NOTE: All data structures are invalid, do not
+ * access any here */
+ return 0;
+ }
+ msleep_interruptible(1000);
+ if (kthread_should_stop())
+ goto st_out;
}
+st_out:
return 0;
}
@@ -3437,13 +3535,13 @@ static int mtip_hw_init(struct driver_data *dd)
rv = -EFAULT;
goto out3;
}
+ mtip_dump_identify(dd->port);
if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
MTIP_FTL_REBUILD_MAGIC) {
set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
return MTIP_FTL_REBUILD_MAGIC;
}
- mtip_dump_identify(dd->port);
/* check write protect, over temp and rebuild statuses */
rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
@@ -3467,8 +3565,8 @@ static int mtip_hw_init(struct driver_data *dd)
}
if (buf[288] == 0xBF) {
dev_info(&dd->pdev->dev,
- "Drive indicates rebuild has failed.\n");
- /* TODO */
+ "Drive is in security locked state.\n");
+ set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
}
}
@@ -3523,9 +3621,8 @@ static int mtip_hw_exit(struct driver_data *dd)
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
- if (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
-
- if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags))
+ if (!dd->sr) {
+ if (!test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
if (mtip_standby_immediate(dd->port))
dev_warn(&dd->pdev->dev,
"STANDBY IMMEDIATE failed\n");
@@ -3551,6 +3648,7 @@ static int mtip_hw_exit(struct driver_data *dd)
dd->port->command_list_dma);
/* Free the memory allocated for the for structure. */
kfree(dd->port);
+ dd->port = NULL;
return 0;
}
@@ -3572,7 +3670,8 @@ static int mtip_hw_shutdown(struct driver_data *dd)
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
- mtip_standby_immediate(dd->port);
+ if (!dd->sr && dd->port)
+ mtip_standby_immediate(dd->port);
return 0;
}
@@ -3863,8 +3962,9 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
{
struct driver_data *dd = queue->queuedata;
struct scatterlist *sg;
- struct bio_vec *bvec;
- int i, nents = 0;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+ int nents = 0;
int tag = 0, unaligned = 0;
if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
@@ -3887,10 +3987,14 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
bio_endio(bio, -ENODATA);
return;
}
+ if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) {
+ bio_endio(bio, -ENXIO);
+ return;
+ }
}
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
- bio_endio(bio, mtip_send_trim(dd, bio->bi_sector,
+ bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector,
bio_sectors(bio)));
return;
}
@@ -3903,7 +4007,8 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 &&
dd->unal_qdepth) {
- if (bio->bi_sector % 8 != 0) /* Unaligned on 4k boundaries */
+ if (bio->bi_iter.bi_sector % 8 != 0)
+ /* Unaligned on 4k boundaries */
unaligned = 1;
else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */
unaligned = 1;
@@ -3922,17 +4027,17 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
}
/* Create the scatter list for this bio. */
- bio_for_each_segment(bvec, bio, i) {
+ bio_for_each_segment(bvec, bio, iter) {
sg_set_page(&sg[nents],
- bvec->bv_page,
- bvec->bv_len,
- bvec->bv_offset);
+ bvec.bv_page,
+ bvec.bv_len,
+ bvec.bv_offset);
nents++;
}
/* Issue the read/write. */
mtip_hw_submit_io(dd,
- bio->bi_sector,
+ bio->bi_iter.bi_sector,
bio_sectors(bio),
nents,
tag,
@@ -4010,6 +4115,8 @@ static int mtip_block_initialize(struct driver_data *dd)
dd->disk->private_data = dd;
dd->index = index;
+ mtip_hw_debugfs_init(dd);
+
/*
* if rebuild pending, start the service thread, and delay the block
* queue creation and add_disk()
@@ -4068,6 +4175,7 @@ skip_create_disk:
/* Enable the block device and add it to /dev */
add_disk(dd->disk);
+ dd->bdev = bdget_disk(dd->disk, 0);
/*
* Now that the disk is active, initialize any sysfs attributes
* managed by the protocol layer.
@@ -4077,7 +4185,6 @@ skip_create_disk:
mtip_hw_sysfs_init(dd, kobj);
kobject_put(kobj);
}
- mtip_hw_debugfs_init(dd);
if (dd->mtip_svc_handler) {
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
@@ -4103,7 +4210,8 @@ start_service_thread:
return rv;
kthread_run_error:
- mtip_hw_debugfs_exit(dd);
+ bdput(dd->bdev);
+ dd->bdev = NULL;
/* Delete our gendisk. This also removes the device from /dev */
del_gendisk(dd->disk);
@@ -4112,6 +4220,7 @@ read_capacity_error:
blk_cleanup_queue(dd->queue);
block_queue_alloc_init_error:
+ mtip_hw_debugfs_exit(dd);
disk_index_error:
spin_lock(&rssd_index_lock);
ida_remove(&rssd_index_ida, index);
@@ -4141,40 +4250,48 @@ static int mtip_block_remove(struct driver_data *dd)
{
struct kobject *kobj;
- if (dd->mtip_svc_handler) {
- set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
- wake_up_interruptible(&dd->port->svc_wait);
- kthread_stop(dd->mtip_svc_handler);
- }
+ if (!dd->sr) {
+ mtip_hw_debugfs_exit(dd);
- /* Clean up the sysfs attributes, if created */
- if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
- kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
- if (kobj) {
- mtip_hw_sysfs_exit(dd, kobj);
- kobject_put(kobj);
+ if (dd->mtip_svc_handler) {
+ set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
+ wake_up_interruptible(&dd->port->svc_wait);
+ kthread_stop(dd->mtip_svc_handler);
}
- }
- mtip_hw_debugfs_exit(dd);
- /*
- * Delete our gendisk structure. This also removes the device
- * from /dev
- */
- if (dd->disk) {
- if (dd->disk->queue)
- del_gendisk(dd->disk);
- else
- put_disk(dd->disk);
- }
-
- spin_lock(&rssd_index_lock);
- ida_remove(&rssd_index_ida, dd->index);
- spin_unlock(&rssd_index_lock);
+ /* Clean up the sysfs attributes, if created */
+ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
+ kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
+ if (kobj) {
+ mtip_hw_sysfs_exit(dd, kobj);
+ kobject_put(kobj);
+ }
+ }
+ /*
+ * Delete our gendisk structure. This also removes the device
+ * from /dev
+ */
+ if (dd->bdev) {
+ bdput(dd->bdev);
+ dd->bdev = NULL;
+ }
+ if (dd->disk) {
+ if (dd->disk->queue) {
+ del_gendisk(dd->disk);
+ blk_cleanup_queue(dd->queue);
+ dd->queue = NULL;
+ } else
+ put_disk(dd->disk);
+ }
+ dd->disk = NULL;
- blk_cleanup_queue(dd->queue);
- dd->disk = NULL;
- dd->queue = NULL;
+ spin_lock(&rssd_index_lock);
+ ida_remove(&rssd_index_ida, dd->index);
+ spin_unlock(&rssd_index_lock);
+ } else {
+ dev_info(&dd->pdev->dev, "device %s surprise removal\n",
+ dd->disk->disk_name);
+ }
/* De-initialize the protocol layer. */
mtip_hw_exit(dd);
@@ -4490,8 +4607,7 @@ done:
static void mtip_pci_remove(struct pci_dev *pdev)
{
struct driver_data *dd = pci_get_drvdata(pdev);
- int counter = 0;
- unsigned long flags;
+ unsigned long flags, to;
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
@@ -4500,17 +4616,22 @@ static void mtip_pci_remove(struct pci_dev *pdev)
list_add(&dd->remove_list, &removing_list);
spin_unlock_irqrestore(&dev_lock, flags);
- if (mtip_check_surprise_removal(pdev)) {
- while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
- counter++;
- msleep(20);
- if (counter == 10) {
- /* Cleanup the outstanding commands */
- mtip_command_cleanup(dd);
- break;
- }
- }
+ mtip_check_surprise_removal(pdev);
+ synchronize_irq(dd->pdev->irq);
+
+ /* Spin until workers are done */
+ to = jiffies + msecs_to_jiffies(4000);
+ do {
+ msleep(20);
+ } while (atomic_read(&dd->irq_workers_active) != 0 &&
+ time_before(jiffies, to));
+
+ if (atomic_read(&dd->irq_workers_active) != 0) {
+ dev_warn(&dd->pdev->dev,
+ "Completion workers still active!\n");
}
+ /* Cleanup the outstanding commands */
+ mtip_command_cleanup(dd);
/* Clean up the block layer. */
mtip_block_remove(dd);
@@ -4529,8 +4650,15 @@ static void mtip_pci_remove(struct pci_dev *pdev)
list_del_init(&dd->remove_list);
spin_unlock_irqrestore(&dev_lock, flags);
- kfree(dd);
+ if (!dd->sr)
+ kfree(dd);
+ else
+ set_bit(MTIP_DDF_REMOVE_DONE_BIT, &dd->dd_flag);
+
pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
+ pci_set_drvdata(pdev, NULL);
+ pci_dev_put(pdev);
+
}
/*
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 3bb8a295fbe4..9be7a1582ad3 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -140,6 +140,7 @@ enum {
MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
MTIP_PF_ISSUE_CMDS_BIT = 5,
MTIP_PF_REBUILD_BIT = 6,
+ MTIP_PF_SR_CLEANUP_BIT = 7,
MTIP_PF_SVC_THD_STOP_BIT = 8,
/* below are bit numbers in 'dd_flag' defined in driver_data */
@@ -147,15 +148,18 @@ enum {
MTIP_DDF_REMOVE_PENDING_BIT = 1,
MTIP_DDF_OVER_TEMP_BIT = 2,
MTIP_DDF_WRITE_PROTECT_BIT = 3,
- MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
- (1 << MTIP_DDF_SEC_LOCK_BIT) |
- (1 << MTIP_DDF_OVER_TEMP_BIT) |
- (1 << MTIP_DDF_WRITE_PROTECT_BIT)),
-
+ MTIP_DDF_REMOVE_DONE_BIT = 4,
MTIP_DDF_CLEANUP_BIT = 5,
MTIP_DDF_RESUME_BIT = 6,
MTIP_DDF_INIT_DONE_BIT = 7,
MTIP_DDF_REBUILD_FAILED_BIT = 8,
+
+ MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
+ (1 << MTIP_DDF_SEC_LOCK_BIT) |
+ (1 << MTIP_DDF_OVER_TEMP_BIT) |
+ (1 << MTIP_DDF_WRITE_PROTECT_BIT) |
+ (1 << MTIP_DDF_REBUILD_FAILED_BIT)),
+
};
struct smart_attr {
@@ -499,6 +503,8 @@ struct driver_data {
bool trim_supp; /* flag indicating trim support */
+ bool sr;
+
int numa_node; /* NUMA support */
char workq_name[32];
@@ -511,6 +517,8 @@ struct driver_data {
int isr_binding;
+ struct block_device *bdev;
+
int unal_qdepth; /* qdepth of unaligned IO queue */
struct list_head online_list; /* linkage for online list */
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 2dc3b5153f0d..55298db36b2d 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -271,18 +271,18 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
if (nbd_cmd(req) == NBD_CMD_WRITE) {
struct req_iterator iter;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
/*
* we are really probing at internals to determine
* whether to set MSG_MORE or not...
*/
rq_for_each_segment(bvec, req, iter) {
flags = 0;
- if (!rq_iter_last(req, iter))
+ if (!rq_iter_last(bvec, iter))
flags = MSG_MORE;
dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
- nbd->disk->disk_name, req, bvec->bv_len);
- result = sock_send_bvec(nbd, bvec, flags);
+ nbd->disk->disk_name, req, bvec.bv_len);
+ result = sock_send_bvec(nbd, &bvec, flags);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
@@ -378,10 +378,10 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
nbd->disk->disk_name, req);
if (nbd_cmd(req) == NBD_CMD_READ) {
struct req_iterator iter;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
rq_for_each_segment(bvec, req, iter) {
- result = sock_recv_bvec(nbd, bvec);
+ result = sock_recv_bvec(nbd, &bvec);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
result);
@@ -389,7 +389,7 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
return req;
}
dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
- nbd->disk->disk_name, req, bvec->bv_len);
+ nbd->disk->disk_name, req, bvec.bv_len);
}
}
return req;
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
new file mode 100644
index 000000000000..b5d842370cc9
--- /dev/null
+++ b/drivers/block/null_blk.c
@@ -0,0 +1,635 @@
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/blk-mq.h>
+#include <linux/hrtimer.h>
+
+struct nullb_cmd {
+ struct list_head list;
+ struct llist_node ll_list;
+ struct call_single_data csd;
+ struct request *rq;
+ struct bio *bio;
+ unsigned int tag;
+ struct nullb_queue *nq;
+};
+
+struct nullb_queue {
+ unsigned long *tag_map;
+ wait_queue_head_t wait;
+ unsigned int queue_depth;
+
+ struct nullb_cmd *cmds;
+};
+
+struct nullb {
+ struct list_head list;
+ unsigned int index;
+ struct request_queue *q;
+ struct gendisk *disk;
+ struct hrtimer timer;
+ unsigned int queue_depth;
+ spinlock_t lock;
+
+ struct nullb_queue *queues;
+ unsigned int nr_queues;
+};
+
+static LIST_HEAD(nullb_list);
+static struct mutex lock;
+static int null_major;
+static int nullb_indexes;
+
+struct completion_queue {
+ struct llist_head list;
+ struct hrtimer timer;
+};
+
+/*
+ * These are per-cpu for now, they will need to be configured by the
+ * complete_queues parameter and appropriately mapped.
+ */
+static DEFINE_PER_CPU(struct completion_queue, completion_queues);
+
+enum {
+ NULL_IRQ_NONE = 0,
+ NULL_IRQ_SOFTIRQ = 1,
+ NULL_IRQ_TIMER = 2,
+
+ NULL_Q_BIO = 0,
+ NULL_Q_RQ = 1,
+ NULL_Q_MQ = 2,
+};
+
+static int submit_queues = 1;
+module_param(submit_queues, int, S_IRUGO);
+MODULE_PARM_DESC(submit_queues, "Number of submission queues");
+
+static int home_node = NUMA_NO_NODE;
+module_param(home_node, int, S_IRUGO);
+MODULE_PARM_DESC(home_node, "Home node for the device");
+
+static int queue_mode = NULL_Q_MQ;
+module_param(queue_mode, int, S_IRUGO);
+MODULE_PARM_DESC(use_mq, "Use blk-mq interface (0=bio,1=rq,2=multiqueue)");
+
+static int gb = 250;
+module_param(gb, int, S_IRUGO);
+MODULE_PARM_DESC(gb, "Size in GB");
+
+static int bs = 512;
+module_param(bs, int, S_IRUGO);
+MODULE_PARM_DESC(bs, "Block size (in bytes)");
+
+static int nr_devices = 2;
+module_param(nr_devices, int, S_IRUGO);
+MODULE_PARM_DESC(nr_devices, "Number of devices to register");
+
+static int irqmode = NULL_IRQ_SOFTIRQ;
+module_param(irqmode, int, S_IRUGO);
+MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
+
+static int completion_nsec = 10000;
+module_param(completion_nsec, int, S_IRUGO);
+MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
+
+static int hw_queue_depth = 64;
+module_param(hw_queue_depth, int, S_IRUGO);
+MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
+
+static bool use_per_node_hctx = true;
+module_param(use_per_node_hctx, bool, S_IRUGO);
+MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true");
+
+static void put_tag(struct nullb_queue *nq, unsigned int tag)
+{
+ clear_bit_unlock(tag, nq->tag_map);
+
+ if (waitqueue_active(&nq->wait))
+ wake_up(&nq->wait);
+}
+
+static unsigned int get_tag(struct nullb_queue *nq)
+{
+ unsigned int tag;
+
+ do {
+ tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
+ if (tag >= nq->queue_depth)
+ return -1U;
+ } while (test_and_set_bit_lock(tag, nq->tag_map));
+
+ return tag;
+}
+
+static void free_cmd(struct nullb_cmd *cmd)
+{
+ put_tag(cmd->nq, cmd->tag);
+}
+
+static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
+{
+ struct nullb_cmd *cmd;
+ unsigned int tag;
+
+ tag = get_tag(nq);
+ if (tag != -1U) {
+ cmd = &nq->cmds[tag];
+ cmd->tag = tag;
+ cmd->nq = nq;
+ return cmd;
+ }
+
+ return NULL;
+}
+
+static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
+{
+ struct nullb_cmd *cmd;
+ DEFINE_WAIT(wait);
+
+ cmd = __alloc_cmd(nq);
+ if (cmd || !can_wait)
+ return cmd;
+
+ do {
+ prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
+ cmd = __alloc_cmd(nq);
+ if (cmd)
+ break;
+
+ io_schedule();
+ } while (1);
+
+ finish_wait(&nq->wait, &wait);
+ return cmd;
+}
+
+static void end_cmd(struct nullb_cmd *cmd)
+{
+ if (cmd->rq) {
+ if (queue_mode == NULL_Q_MQ)
+ blk_mq_end_io(cmd->rq, 0);
+ else {
+ INIT_LIST_HEAD(&cmd->rq->queuelist);
+ blk_end_request_all(cmd->rq, 0);
+ }
+ } else if (cmd->bio)
+ bio_endio(cmd->bio, 0);
+
+ if (queue_mode != NULL_Q_MQ)
+ free_cmd(cmd);
+}
+
+static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
+{
+ struct completion_queue *cq;
+ struct llist_node *entry;
+ struct nullb_cmd *cmd;
+
+ cq = &per_cpu(completion_queues, smp_processor_id());
+
+ while ((entry = llist_del_all(&cq->list)) != NULL) {
+ do {
+ cmd = container_of(entry, struct nullb_cmd, ll_list);
+ end_cmd(cmd);
+ entry = entry->next;
+ } while (entry);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+static void null_cmd_end_timer(struct nullb_cmd *cmd)
+{
+ struct completion_queue *cq = &per_cpu(completion_queues, get_cpu());
+
+ cmd->ll_list.next = NULL;
+ if (llist_add(&cmd->ll_list, &cq->list)) {
+ ktime_t kt = ktime_set(0, completion_nsec);
+
+ hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL);
+ }
+
+ put_cpu();
+}
+
+static void null_softirq_done_fn(struct request *rq)
+{
+ blk_end_request_all(rq, 0);
+}
+
+#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+
+static void null_ipi_cmd_end_io(void *data)
+{
+ struct completion_queue *cq;
+ struct llist_node *entry, *next;
+ struct nullb_cmd *cmd;
+
+ cq = &per_cpu(completion_queues, smp_processor_id());
+
+ entry = llist_del_all(&cq->list);
+
+ while (entry) {
+ next = entry->next;
+ cmd = llist_entry(entry, struct nullb_cmd, ll_list);
+ end_cmd(cmd);
+ entry = next;
+ }
+}
+
+static void null_cmd_end_ipi(struct nullb_cmd *cmd)
+{
+ struct call_single_data *data = &cmd->csd;
+ int cpu = get_cpu();
+ struct completion_queue *cq = &per_cpu(completion_queues, cpu);
+
+ cmd->ll_list.next = NULL;
+
+ if (llist_add(&cmd->ll_list, &cq->list)) {
+ data->func = null_ipi_cmd_end_io;
+ data->flags = 0;
+ __smp_call_function_single(cpu, data, 0);
+ }
+
+ put_cpu();
+}
+
+#endif /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */
+
+static inline void null_handle_cmd(struct nullb_cmd *cmd)
+{
+ /* Complete IO by inline, softirq or timer */
+ switch (irqmode) {
+ case NULL_IRQ_NONE:
+ end_cmd(cmd);
+ break;
+ case NULL_IRQ_SOFTIRQ:
+#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+ null_cmd_end_ipi(cmd);
+#else
+ end_cmd(cmd);
+#endif
+ break;
+ case NULL_IRQ_TIMER:
+ null_cmd_end_timer(cmd);
+ break;
+ }
+}
+
+static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
+{
+ int index = 0;
+
+ if (nullb->nr_queues != 1)
+ index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
+
+ return &nullb->queues[index];
+}
+
+static void null_queue_bio(struct request_queue *q, struct bio *bio)
+{
+ struct nullb *nullb = q->queuedata;
+ struct nullb_queue *nq = nullb_to_queue(nullb);
+ struct nullb_cmd *cmd;
+
+ cmd = alloc_cmd(nq, 1);
+ cmd->bio = bio;
+
+ null_handle_cmd(cmd);
+}
+
+static int null_rq_prep_fn(struct request_queue *q, struct request *req)
+{
+ struct nullb *nullb = q->queuedata;
+ struct nullb_queue *nq = nullb_to_queue(nullb);
+ struct nullb_cmd *cmd;
+
+ cmd = alloc_cmd(nq, 0);
+ if (cmd) {
+ cmd->rq = req;
+ req->special = cmd;
+ return BLKPREP_OK;
+ }
+
+ return BLKPREP_DEFER;
+}
+
+static void null_request_fn(struct request_queue *q)
+{
+ struct request *rq;
+
+ while ((rq = blk_fetch_request(q)) != NULL) {
+ struct nullb_cmd *cmd = rq->special;
+
+ spin_unlock_irq(q->queue_lock);
+ null_handle_cmd(cmd);
+ spin_lock_irq(q->queue_lock);
+ }
+}
+
+static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
+{
+ struct nullb_cmd *cmd = rq->special;
+
+ cmd->rq = rq;
+ cmd->nq = hctx->driver_data;
+
+ null_handle_cmd(cmd);
+ return BLK_MQ_RQ_QUEUE_OK;
+}
+
+static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index)
+{
+ return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL,
+ hctx_index);
+}
+
+static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
+{
+ kfree(hctx);
+}
+
+static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int index)
+{
+ struct nullb *nullb = data;
+ struct nullb_queue *nq = &nullb->queues[index];
+
+ init_waitqueue_head(&nq->wait);
+ nq->queue_depth = nullb->queue_depth;
+ nullb->nr_queues++;
+ hctx->driver_data = nq;
+
+ return 0;
+}
+
+static struct blk_mq_ops null_mq_ops = {
+ .queue_rq = null_queue_rq,
+ .map_queue = blk_mq_map_queue,
+ .init_hctx = null_init_hctx,
+};
+
+static struct blk_mq_reg null_mq_reg = {
+ .ops = &null_mq_ops,
+ .queue_depth = 64,
+ .cmd_size = sizeof(struct nullb_cmd),
+ .flags = BLK_MQ_F_SHOULD_MERGE,
+};
+
+static void null_del_dev(struct nullb *nullb)
+{
+ list_del_init(&nullb->list);
+
+ del_gendisk(nullb->disk);
+ if (queue_mode == NULL_Q_MQ)
+ blk_mq_free_queue(nullb->q);
+ else
+ blk_cleanup_queue(nullb->q);
+ put_disk(nullb->disk);
+ kfree(nullb);
+}
+
+static int null_open(struct block_device *bdev, fmode_t mode)
+{
+ return 0;
+}
+
+static void null_release(struct gendisk *disk, fmode_t mode)
+{
+}
+
+static const struct block_device_operations null_fops = {
+ .owner = THIS_MODULE,
+ .open = null_open,
+ .release = null_release,
+};
+
+static int setup_commands(struct nullb_queue *nq)
+{
+ struct nullb_cmd *cmd;
+ int i, tag_size;
+
+ nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
+ if (!nq->cmds)
+ return 1;
+
+ tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
+ nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
+ if (!nq->tag_map) {
+ kfree(nq->cmds);
+ return 1;
+ }
+
+ for (i = 0; i < nq->queue_depth; i++) {
+ cmd = &nq->cmds[i];
+ INIT_LIST_HEAD(&cmd->list);
+ cmd->ll_list.next = NULL;
+ cmd->tag = -1U;
+ }
+
+ return 0;
+}
+
+static void cleanup_queue(struct nullb_queue *nq)
+{
+ kfree(nq->tag_map);
+ kfree(nq->cmds);
+}
+
+static void cleanup_queues(struct nullb *nullb)
+{
+ int i;
+
+ for (i = 0; i < nullb->nr_queues; i++)
+ cleanup_queue(&nullb->queues[i]);
+
+ kfree(nullb->queues);
+}
+
+static int setup_queues(struct nullb *nullb)
+{
+ struct nullb_queue *nq;
+ int i;
+
+ nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL);
+ if (!nullb->queues)
+ return 1;
+
+ nullb->nr_queues = 0;
+ nullb->queue_depth = hw_queue_depth;
+
+ if (queue_mode == NULL_Q_MQ)
+ return 0;
+
+ for (i = 0; i < submit_queues; i++) {
+ nq = &nullb->queues[i];
+ init_waitqueue_head(&nq->wait);
+ nq->queue_depth = hw_queue_depth;
+ if (setup_commands(nq))
+ break;
+ nullb->nr_queues++;
+ }
+
+ if (i == submit_queues)
+ return 0;
+
+ cleanup_queues(nullb);
+ return 1;
+}
+
+static int null_add_dev(void)
+{
+ struct gendisk *disk;
+ struct nullb *nullb;
+ sector_t size;
+
+ nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
+ if (!nullb)
+ return -ENOMEM;
+
+ spin_lock_init(&nullb->lock);
+
+ if (setup_queues(nullb))
+ goto err;
+
+ if (queue_mode == NULL_Q_MQ) {
+ null_mq_reg.numa_node = home_node;
+ null_mq_reg.queue_depth = hw_queue_depth;
+
+ if (use_per_node_hctx) {
+ null_mq_reg.ops->alloc_hctx = null_alloc_hctx;
+ null_mq_reg.ops->free_hctx = null_free_hctx;
+
+ null_mq_reg.nr_hw_queues = nr_online_nodes;
+ } else {
+ null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue;
+ null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue;
+
+ null_mq_reg.nr_hw_queues = submit_queues;
+ }
+
+ nullb->q = blk_mq_init_queue(&null_mq_reg, nullb);
+ } else if (queue_mode == NULL_Q_BIO) {
+ nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
+ blk_queue_make_request(nullb->q, null_queue_bio);
+ } else {
+ nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
+ blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
+ if (nullb->q)
+ blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
+ }
+
+ if (!nullb->q)
+ goto queue_fail;
+
+ nullb->q->queuedata = nullb;
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
+
+ disk = nullb->disk = alloc_disk_node(1, home_node);
+ if (!disk) {
+queue_fail:
+ if (queue_mode == NULL_Q_MQ)
+ blk_mq_free_queue(nullb->q);
+ else
+ blk_cleanup_queue(nullb->q);
+ cleanup_queues(nullb);
+err:
+ kfree(nullb);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&lock);
+ list_add_tail(&nullb->list, &nullb_list);
+ nullb->index = nullb_indexes++;
+ mutex_unlock(&lock);
+
+ blk_queue_logical_block_size(nullb->q, bs);
+ blk_queue_physical_block_size(nullb->q, bs);
+
+ size = gb * 1024 * 1024 * 1024ULL;
+ sector_div(size, bs);
+ set_capacity(disk, size);
+
+ disk->flags |= GENHD_FL_EXT_DEVT;
+ disk->major = null_major;
+ disk->first_minor = nullb->index;
+ disk->fops = &null_fops;
+ disk->private_data = nullb;
+ disk->queue = nullb->q;
+ sprintf(disk->disk_name, "nullb%d", nullb->index);
+ add_disk(disk);
+ return 0;
+}
+
+static int __init null_init(void)
+{
+ unsigned int i;
+
+#if !defined(CONFIG_SMP) || !defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+ if (irqmode == NULL_IRQ_SOFTIRQ) {
+ pr_warn("null_blk: softirq completions not available.\n");
+ pr_warn("null_blk: using direct completions.\n");
+ irqmode = NULL_IRQ_NONE;
+ }
+#endif
+
+ if (submit_queues > nr_cpu_ids)
+ submit_queues = nr_cpu_ids;
+ else if (!submit_queues)
+ submit_queues = 1;
+
+ mutex_init(&lock);
+
+ /* Initialize a separate list for each CPU for issuing softirqs */
+ for_each_possible_cpu(i) {
+ struct completion_queue *cq = &per_cpu(completion_queues, i);
+
+ init_llist_head(&cq->list);
+
+ if (irqmode != NULL_IRQ_TIMER)
+ continue;
+
+ hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ cq->timer.function = null_cmd_timer_expired;
+ }
+
+ null_major = register_blkdev(0, "nullb");
+ if (null_major < 0)
+ return null_major;
+
+ for (i = 0; i < nr_devices; i++) {
+ if (null_add_dev()) {
+ unregister_blkdev(null_major, "nullb");
+ return -EINVAL;
+ }
+ }
+
+ pr_info("null: module loaded\n");
+ return 0;
+}
+
+static void __exit null_exit(void)
+{
+ struct nullb *nullb;
+
+ unregister_blkdev(null_major, "nullb");
+
+ mutex_lock(&lock);
+ while (!list_empty(&nullb_list)) {
+ nullb = list_entry(nullb_list.next, struct nullb, list);
+ null_del_dev(nullb);
+ }
+ mutex_unlock(&lock);
+}
+
+module_init(null_init);
+module_exit(null_exit);
+
+MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index da52092980e2..1f14ac403945 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -441,104 +441,19 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
return total_len;
}
-struct nvme_bio_pair {
- struct bio b1, b2, *parent;
- struct bio_vec *bv1, *bv2;
- int err;
- atomic_t cnt;
-};
-
-static void nvme_bio_pair_endio(struct bio *bio, int err)
-{
- struct nvme_bio_pair *bp = bio->bi_private;
-
- if (err)
- bp->err = err;
-
- if (atomic_dec_and_test(&bp->cnt)) {
- bio_endio(bp->parent, bp->err);
- kfree(bp->bv1);
- kfree(bp->bv2);
- kfree(bp);
- }
-}
-
-static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
- int len, int offset)
-{
- struct nvme_bio_pair *bp;
-
- BUG_ON(len > bio->bi_size);
- BUG_ON(idx > bio->bi_vcnt);
-
- bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
- if (!bp)
- return NULL;
- bp->err = 0;
-
- bp->b1 = *bio;
- bp->b2 = *bio;
-
- bp->b1.bi_size = len;
- bp->b2.bi_size -= len;
- bp->b1.bi_vcnt = idx;
- bp->b2.bi_idx = idx;
- bp->b2.bi_sector += len >> 9;
-
- if (offset) {
- bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
- GFP_ATOMIC);
- if (!bp->bv1)
- goto split_fail_1;
-
- bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
- GFP_ATOMIC);
- if (!bp->bv2)
- goto split_fail_2;
-
- memcpy(bp->bv1, bio->bi_io_vec,
- bio->bi_max_vecs * sizeof(struct bio_vec));
- memcpy(bp->bv2, bio->bi_io_vec,
- bio->bi_max_vecs * sizeof(struct bio_vec));
-
- bp->b1.bi_io_vec = bp->bv1;
- bp->b2.bi_io_vec = bp->bv2;
- bp->b2.bi_io_vec[idx].bv_offset += offset;
- bp->b2.bi_io_vec[idx].bv_len -= offset;
- bp->b1.bi_io_vec[idx].bv_len = offset;
- bp->b1.bi_vcnt++;
- } else
- bp->bv1 = bp->bv2 = NULL;
-
- bp->b1.bi_private = bp;
- bp->b2.bi_private = bp;
-
- bp->b1.bi_end_io = nvme_bio_pair_endio;
- bp->b2.bi_end_io = nvme_bio_pair_endio;
-
- bp->parent = bio;
- atomic_set(&bp->cnt, 2);
-
- return bp;
-
- split_fail_2:
- kfree(bp->bv1);
- split_fail_1:
- kfree(bp);
- return NULL;
-}
-
static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
- int idx, int len, int offset)
+ int len)
{
- struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset);
- if (!bp)
+ struct bio *split = bio_split(bio, len >> 9, GFP_ATOMIC, NULL);
+ if (!split)
return -ENOMEM;
+ bio_chain(split, bio);
+
if (bio_list_empty(&nvmeq->sq_cong))
add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
- bio_list_add(&nvmeq->sq_cong, &bp->b1);
- bio_list_add(&nvmeq->sq_cong, &bp->b2);
+ bio_list_add(&nvmeq->sq_cong, split);
+ bio_list_add(&nvmeq->sq_cong, bio);
return 0;
}
@@ -550,41 +465,44 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
struct bio *bio, enum dma_data_direction dma_dir, int psegs)
{
- struct bio_vec *bvec, *bvprv = NULL;
+ struct bio_vec bvec, bvprv;
+ struct bvec_iter iter;
struct scatterlist *sg = NULL;
- int i, length = 0, nsegs = 0, split_len = bio->bi_size;
+ int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
+ int first = 1;
if (nvmeq->dev->stripe_size)
split_len = nvmeq->dev->stripe_size -
- ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1));
+ ((bio->bi_iter.bi_sector << 9) &
+ (nvmeq->dev->stripe_size - 1));
sg_init_table(iod->sg, psegs);
- bio_for_each_segment(bvec, bio, i) {
- if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
- sg->length += bvec->bv_len;
+ bio_for_each_segment(bvec, bio, iter) {
+ if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) {
+ sg->length += bvec.bv_len;
} else {
- if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
- return nvme_split_and_submit(bio, nvmeq, i,
- length, 0);
+ if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec))
+ return nvme_split_and_submit(bio, nvmeq,
+ length);
sg = sg ? sg + 1 : iod->sg;
- sg_set_page(sg, bvec->bv_page, bvec->bv_len,
- bvec->bv_offset);
+ sg_set_page(sg, bvec.bv_page,
+ bvec.bv_len, bvec.bv_offset);
nsegs++;
}
- if (split_len - length < bvec->bv_len)
- return nvme_split_and_submit(bio, nvmeq, i, split_len,
- split_len - length);
- length += bvec->bv_len;
+ if (split_len - length < bvec.bv_len)
+ return nvme_split_and_submit(bio, nvmeq, split_len);
+ length += bvec.bv_len;
bvprv = bvec;
+ first = 0;
}
iod->nents = nsegs;
sg_mark_end(sg);
if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
return -ENOMEM;
- BUG_ON(length != bio->bi_size);
+ BUG_ON(length != bio->bi_iter.bi_size);
return length;
}
@@ -608,8 +526,8 @@ static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
iod->npages = 0;
range->cattr = cpu_to_le32(0);
- range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift);
- range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
+ range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
+ range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
memset(cmnd, 0, sizeof(*cmnd));
cmnd->dsm.opcode = nvme_cmd_dsm;
@@ -674,7 +592,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
}
result = -ENOMEM;
- iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
+ iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
if (!iod)
goto nomem;
iod->private = bio;
@@ -723,7 +641,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
GFP_ATOMIC);
- cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
+ cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
cmnd->rw.control = cpu_to_le16(control);
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
@@ -1949,12 +1867,9 @@ static int nvme_dev_map(struct nvme_dev *dev)
if (pci_request_selected_regions(pdev, bars, "nvme"))
goto disable_pci;
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- else
- goto disable_pci;
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ goto disable;
pci_set_drvdata(pdev, dev);
dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
@@ -2168,6 +2083,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&dev->namespaces);
dev->pci_dev = pdev;
+
result = nvme_set_instance(dev);
if (result)
goto free;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 56188475cfd3..1bf1f22f6442 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -665,7 +665,7 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
for (;;) {
tmp = rb_entry(n, struct pkt_rb_node, rb_node);
- if (s <= tmp->bio->bi_sector)
+ if (s <= tmp->bio->bi_iter.bi_sector)
next = n->rb_left;
else
next = n->rb_right;
@@ -674,12 +674,12 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
n = next;
}
- if (s > tmp->bio->bi_sector) {
+ if (s > tmp->bio->bi_iter.bi_sector) {
tmp = pkt_rbtree_next(tmp);
if (!tmp)
return NULL;
}
- BUG_ON(s > tmp->bio->bi_sector);
+ BUG_ON(s > tmp->bio->bi_iter.bi_sector);
return tmp;
}
@@ -690,13 +690,13 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod
{
struct rb_node **p = &pd->bio_queue.rb_node;
struct rb_node *parent = NULL;
- sector_t s = node->bio->bi_sector;
+ sector_t s = node->bio->bi_iter.bi_sector;
struct pkt_rb_node *tmp;
while (*p) {
parent = *p;
tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
- if (s < tmp->bio->bi_sector)
+ if (s < tmp->bio->bi_iter.bi_sector)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
@@ -871,7 +871,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
spin_lock(&pd->iosched.lock);
bio = bio_list_peek(&pd->iosched.write_queue);
spin_unlock(&pd->iosched.lock);
- if (bio && (bio->bi_sector == pd->iosched.last_write))
+ if (bio && (bio->bi_iter.bi_sector ==
+ pd->iosched.last_write))
need_write_seek = 0;
if (need_write_seek && reads_queued) {
if (atomic_read(&pd->cdrw.pending_bios) > 0) {
@@ -902,7 +903,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
continue;
if (bio_data_dir(bio) == READ)
- pd->iosched.successive_reads += bio->bi_size >> 10;
+ pd->iosched.successive_reads +=
+ bio->bi_iter.bi_size >> 10;
else {
pd->iosched.successive_reads = 0;
pd->iosched.last_write = bio_end_sector(bio);
@@ -992,7 +994,7 @@ static void pkt_end_io_read(struct bio *bio, int err)
pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
bio, (unsigned long long)pkt->sector,
- (unsigned long long)bio->bi_sector, err);
+ (unsigned long long)bio->bi_iter.bi_sector, err);
if (err)
atomic_inc(&pkt->io_errors);
@@ -1040,8 +1042,9 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
memset(written, 0, sizeof(written));
spin_lock(&pkt->lock);
bio_list_for_each(bio, &pkt->orig_bios) {
- int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
- int num_frames = bio->bi_size / CD_FRAMESIZE;
+ int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
+ (CD_FRAMESIZE >> 9);
+ int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
BUG_ON(first_frame < 0);
BUG_ON(first_frame + num_frames > pkt->frames);
@@ -1067,7 +1070,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
bio = pkt->r_bios[f];
bio_reset(bio);
- bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
+ bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
bio->bi_bdev = pd->bdev;
bio->bi_end_io = pkt_end_io_read;
bio->bi_private = pkt;
@@ -1164,8 +1167,8 @@ static int pkt_start_recovery(struct packet_data *pkt)
bio_reset(pkt->bio);
pkt->bio->bi_bdev = pd->bdev;
pkt->bio->bi_rw = REQ_WRITE;
- pkt->bio->bi_sector = new_sector;
- pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE;
+ pkt->bio->bi_iter.bi_sector = new_sector;
+ pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
pkt->bio->bi_vcnt = pkt->frames;
pkt->bio->bi_end_io = pkt_end_io_packet_write;
@@ -1227,7 +1230,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
node = first_node;
while (node) {
bio = node->bio;
- zone = get_zone(bio->bi_sector, pd);
+ zone = get_zone(bio->bi_iter.bi_sector, pd);
list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
if (p->sector == zone) {
bio = NULL;
@@ -1266,14 +1269,14 @@ try_next_bio:
pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
bio = node->bio;
- pkt_dbg(2, pd, "found zone=%llx\n",
- (unsigned long long)get_zone(bio->bi_sector, pd));
- if (get_zone(bio->bi_sector, pd) != zone)
+ pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
+ get_zone(bio->bi_iter.bi_sector, pd));
+ if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
break;
pkt_rbtree_erase(pd, node);
spin_lock(&pkt->lock);
bio_list_add(&pkt->orig_bios, bio);
- pkt->write_size += bio->bi_size / CD_FRAMESIZE;
+ pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
spin_unlock(&pkt->lock);
}
/* check write congestion marks, and if bio_queue_size is
@@ -1307,7 +1310,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
bio_reset(pkt->w_bio);
- pkt->w_bio->bi_sector = pkt->sector;
+ pkt->w_bio->bi_iter.bi_sector = pkt->sector;
pkt->w_bio->bi_bdev = pd->bdev;
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt;
@@ -2349,75 +2352,29 @@ static void pkt_end_io_read_cloned(struct bio *bio, int err)
pkt_bio_finished(pd);
}
-static void pkt_make_request(struct request_queue *q, struct bio *bio)
+static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
{
- struct pktcdvd_device *pd;
- char b[BDEVNAME_SIZE];
+ struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
+ struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
+
+ psd->pd = pd;
+ psd->bio = bio;
+ cloned_bio->bi_bdev = pd->bdev;
+ cloned_bio->bi_private = psd;
+ cloned_bio->bi_end_io = pkt_end_io_read_cloned;
+ pd->stats.secs_r += bio_sectors(bio);
+ pkt_queue_bio(pd, cloned_bio);
+}
+
+static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
+{
+ struct pktcdvd_device *pd = q->queuedata;
sector_t zone;
struct packet_data *pkt;
int was_empty, blocked_bio;
struct pkt_rb_node *node;
- pd = q->queuedata;
- if (!pd) {
- pr_err("%s incorrect request queue\n",
- bdevname(bio->bi_bdev, b));
- goto end_io;
- }
-
- /*
- * Clone READ bios so we can have our own bi_end_io callback.
- */
- if (bio_data_dir(bio) == READ) {
- struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
- struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
-
- psd->pd = pd;
- psd->bio = bio;
- cloned_bio->bi_bdev = pd->bdev;
- cloned_bio->bi_private = psd;
- cloned_bio->bi_end_io = pkt_end_io_read_cloned;
- pd->stats.secs_r += bio_sectors(bio);
- pkt_queue_bio(pd, cloned_bio);
- return;
- }
-
- if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
- pkt_notice(pd, "WRITE for ro device (%llu)\n",
- (unsigned long long)bio->bi_sector);
- goto end_io;
- }
-
- if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
- pkt_err(pd, "wrong bio size\n");
- goto end_io;
- }
-
- blk_queue_bounce(q, &bio);
-
- zone = get_zone(bio->bi_sector, pd);
- pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
- (unsigned long long)bio->bi_sector,
- (unsigned long long)bio_end_sector(bio));
-
- /* Check if we have to split the bio */
- {
- struct bio_pair *bp;
- sector_t last_zone;
- int first_sectors;
-
- last_zone = get_zone(bio_end_sector(bio) - 1, pd);
- if (last_zone != zone) {
- BUG_ON(last_zone != zone + pd->settings.size);
- first_sectors = last_zone - bio->bi_sector;
- bp = bio_split(bio, first_sectors);
- BUG_ON(!bp);
- pkt_make_request(q, &bp->bio1);
- pkt_make_request(q, &bp->bio2);
- bio_pair_release(bp);
- return;
- }
- }
+ zone = get_zone(bio->bi_iter.bi_sector, pd);
/*
* If we find a matching packet in state WAITING or READ_WAIT, we can
@@ -2431,7 +2388,8 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
if ((pkt->state == PACKET_WAITING_STATE) ||
(pkt->state == PACKET_READ_WAIT_STATE)) {
bio_list_add(&pkt->orig_bios, bio);
- pkt->write_size += bio->bi_size / CD_FRAMESIZE;
+ pkt->write_size +=
+ bio->bi_iter.bi_size / CD_FRAMESIZE;
if ((pkt->write_size >= pkt->frames) &&
(pkt->state == PACKET_WAITING_STATE)) {
atomic_inc(&pkt->run_sm);
@@ -2490,6 +2448,64 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
*/
wake_up(&pd->wqueue);
}
+}
+
+static void pkt_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct pktcdvd_device *pd;
+ char b[BDEVNAME_SIZE];
+ struct bio *split;
+
+ pd = q->queuedata;
+ if (!pd) {
+ pr_err("%s incorrect request queue\n",
+ bdevname(bio->bi_bdev, b));
+ goto end_io;
+ }
+
+ pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
+ (unsigned long long)bio->bi_iter.bi_sector,
+ (unsigned long long)bio_end_sector(bio));
+
+ /*
+ * Clone READ bios so we can have our own bi_end_io callback.
+ */
+ if (bio_data_dir(bio) == READ) {
+ pkt_make_request_read(pd, bio);
+ return;
+ }
+
+ if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
+ pkt_notice(pd, "WRITE for ro device (%llu)\n",
+ (unsigned long long)bio->bi_iter.bi_sector);
+ goto end_io;
+ }
+
+ if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
+ pkt_err(pd, "wrong bio size\n");
+ goto end_io;
+ }
+
+ blk_queue_bounce(q, &bio);
+
+ do {
+ sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
+ sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
+
+ if (last_zone != zone) {
+ BUG_ON(last_zone != zone + pd->settings.size);
+
+ split = bio_split(bio, last_zone -
+ bio->bi_iter.bi_sector,
+ GFP_NOIO, fs_bio_set);
+ bio_chain(split, bio);
+ } else {
+ split = bio;
+ }
+
+ pkt_make_request_write(q, split);
+ } while (split != bio);
+
return;
end_io:
bio_io_error(bio);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index d754a88d7585..c120d70d3fb3 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -94,26 +94,25 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
{
unsigned int offset = 0;
struct req_iterator iter;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
unsigned int i = 0;
size_t size;
void *buf;
rq_for_each_segment(bvec, req, iter) {
unsigned long flags;
- dev_dbg(&dev->sbd.core,
- "%s:%u: bio %u: %u segs %u sectors from %lu\n",
- __func__, __LINE__, i, bio_segments(iter.bio),
- bio_sectors(iter.bio), iter.bio->bi_sector);
+ dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %lu\n",
+ __func__, __LINE__, i, bio_sectors(iter.bio),
+ iter.bio->bi_iter.bi_sector);
- size = bvec->bv_len;
- buf = bvec_kmap_irq(bvec, &flags);
+ size = bvec.bv_len;
+ buf = bvec_kmap_irq(&bvec, &flags);
if (gather)
memcpy(dev->bounce_buf+offset, buf, size);
else
memcpy(buf, dev->bounce_buf+offset, size);
offset += size;
- flush_kernel_dcache_page(bvec->bv_page);
+ flush_kernel_dcache_page(bvec.bv_page);
bvec_kunmap_irq(buf, &flags);
i++;
}
@@ -130,7 +129,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
#ifdef DEBUG
unsigned int n = 0;
- struct bio_vec *bv;
+ struct bio_vec bv;
struct req_iterator iter;
rq_for_each_segment(bv, req, iter)
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 06a2e53e5f37..e473c2eec6f5 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -555,14 +555,14 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
const char *op = write ? "write" : "read";
loff_t offset = bio->bi_sector << 9;
int error = 0;
- struct bio_vec *bvec;
- unsigned int i;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
struct bio *next;
- bio_for_each_segment(bvec, bio, i) {
+ bio_for_each_segment(bvec, bio, iter) {
/* PS3 is ppc64, so we don't handle highmem */
- char *ptr = page_address(bvec->bv_page) + bvec->bv_offset;
- size_t len = bvec->bv_len, retlen;
+ char *ptr = page_address(bvec.bv_page) + bvec.bv_offset;
+ size_t len = bvec.bv_len, retlen;
dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op,
len, offset);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index cb1db2979d3d..3624368b910d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1109,23 +1109,23 @@ static void bio_chain_put(struct bio *chain)
*/
static void zero_bio_chain(struct bio *chain, int start_ofs)
{
- struct bio_vec *bv;
+ struct bio_vec bv;
+ struct bvec_iter iter;
unsigned long flags;
void *buf;
- int i;
int pos = 0;
while (chain) {
- bio_for_each_segment(bv, chain, i) {
- if (pos + bv->bv_len > start_ofs) {
+ bio_for_each_segment(bv, chain, iter) {
+ if (pos + bv.bv_len > start_ofs) {
int remainder = max(start_ofs - pos, 0);
- buf = bvec_kmap_irq(bv, &flags);
+ buf = bvec_kmap_irq(&bv, &flags);
memset(buf + remainder, 0,
- bv->bv_len - remainder);
- flush_dcache_page(bv->bv_page);
+ bv.bv_len - remainder);
+ flush_dcache_page(bv.bv_page);
bvec_kunmap_irq(buf, &flags);
}
- pos += bv->bv_len;
+ pos += bv.bv_len;
}
chain = chain->bi_next;
@@ -1173,74 +1173,14 @@ static struct bio *bio_clone_range(struct bio *bio_src,
unsigned int len,
gfp_t gfpmask)
{
- struct bio_vec *bv;
- unsigned int resid;
- unsigned short idx;
- unsigned int voff;
- unsigned short end_idx;
- unsigned short vcnt;
struct bio *bio;
- /* Handle the easy case for the caller */
-
- if (!offset && len == bio_src->bi_size)
- return bio_clone(bio_src, gfpmask);
-
- if (WARN_ON_ONCE(!len))
- return NULL;
- if (WARN_ON_ONCE(len > bio_src->bi_size))
- return NULL;
- if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
- return NULL;
-
- /* Find first affected segment... */
-
- resid = offset;
- bio_for_each_segment(bv, bio_src, idx) {
- if (resid < bv->bv_len)
- break;
- resid -= bv->bv_len;
- }
- voff = resid;
-
- /* ...and the last affected segment */
-
- resid += len;
- __bio_for_each_segment(bv, bio_src, end_idx, idx) {
- if (resid <= bv->bv_len)
- break;
- resid -= bv->bv_len;
- }
- vcnt = end_idx - idx + 1;
-
- /* Build the clone */
-
- bio = bio_alloc(gfpmask, (unsigned int) vcnt);
+ bio = bio_clone(bio_src, gfpmask);
if (!bio)
return NULL; /* ENOMEM */
- bio->bi_bdev = bio_src->bi_bdev;
- bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
- bio->bi_rw = bio_src->bi_rw;
- bio->bi_flags |= 1 << BIO_CLONED;
-
- /*
- * Copy over our part of the bio_vec, then update the first
- * and last (or only) entries.
- */
- memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
- vcnt * sizeof (struct bio_vec));
- bio->bi_io_vec[0].bv_offset += voff;
- if (vcnt > 1) {
- bio->bi_io_vec[0].bv_len -= voff;
- bio->bi_io_vec[vcnt - 1].bv_len = resid;
- } else {
- bio->bi_io_vec[0].bv_len = len;
- }
-
- bio->bi_vcnt = vcnt;
- bio->bi_size = len;
- bio->bi_idx = 0;
+ bio_advance(bio, offset);
+ bio->bi_iter.bi_size = len;
return bio;
}
@@ -1271,7 +1211,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
/* Build up a chain of clone bios up to the limit */
- if (!bi || off >= bi->bi_size || !len)
+ if (!bi || off >= bi->bi_iter.bi_size || !len)
return NULL; /* Nothing to clone */
end = &chain;
@@ -1283,7 +1223,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
rbd_warn(NULL, "bio_chain exhausted with %u left", len);
goto out_err; /* EINVAL; ran out of bio's */
}
- bi_size = min_t(unsigned int, bi->bi_size - off, len);
+ bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
bio = bio_clone_range(bi, off, bi_size, gfpmask);
if (!bio)
goto out_err; /* ENOMEM */
@@ -1292,7 +1232,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
end = &bio->bi_next;
off += bi_size;
- if (off == bi->bi_size) {
+ if (off == bi->bi_iter.bi_size) {
bi = bi->bi_next;
off = 0;
}
@@ -2186,7 +2126,8 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
if (type == OBJ_REQUEST_BIO) {
bio_list = data_desc;
- rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
+ rbd_assert(img_offset ==
+ bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
} else {
rbd_assert(type == OBJ_REQUEST_PAGES);
pages = data_desc;
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 6e85e21445eb..a8de2eec6ff3 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -654,7 +654,8 @@ static void rsxx_eeh_failure(struct pci_dev *dev)
for (i = 0; i < card->n_targets; i++) {
spin_lock_bh(&card->ctrl[i].queue_lock);
cnt = rsxx_cleanup_dma_queue(&card->ctrl[i],
- &card->ctrl[i].queue);
+ &card->ctrl[i].queue,
+ COMPLETE_DMA);
spin_unlock_bh(&card->ctrl[i].queue_lock);
cnt += rsxx_dma_cancel(&card->ctrl[i]);
@@ -748,10 +749,6 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
card->eeh_state = 0;
- st = rsxx_eeh_remap_dmas(card);
- if (st)
- goto failed_remap_dmas;
-
spin_lock_irqsave(&card->irq_lock, flags);
if (card->n_targets & RSXX_MAX_TARGETS)
rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
@@ -778,7 +775,6 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
return PCI_ERS_RESULT_RECOVERED;
failed_hw_buffers_init:
-failed_remap_dmas:
for (i = 0; i < card->n_targets; i++) {
if (card->ctrl[i].status.buf)
pci_free_consistent(card->dev,
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index d7af441880be..2839d37e5af7 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -174,7 +174,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
if (!card)
goto req_err;
- if (bio->bi_sector + (bio->bi_size >> 9) > get_capacity(card->gendisk))
+ if (bio_end_sector(bio) > get_capacity(card->gendisk))
goto req_err;
if (unlikely(card->halt)) {
@@ -187,7 +187,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
goto req_err;
}
- if (bio->bi_size == 0) {
+ if (bio->bi_iter.bi_size == 0) {
dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
goto req_err;
}
@@ -208,7 +208,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
bio_data_dir(bio) ? 'W' : 'R', bio_meta,
- (u64)bio->bi_sector << 9, bio->bi_size);
+ (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
bio_dma_done_cb, bio_meta);
@@ -295,13 +295,15 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
return -ENOMEM;
}
- blk_size = card->config.data.block_size;
+ if (card->config_valid) {
+ blk_size = card->config.data.block_size;
+ blk_queue_dma_alignment(card->queue, blk_size - 1);
+ blk_queue_logical_block_size(card->queue, blk_size);
+ }
blk_queue_make_request(card->queue, rsxx_make_request);
blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY);
- blk_queue_dma_alignment(card->queue, blk_size - 1);
blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
- blk_queue_logical_block_size(card->queue, blk_size);
blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue);
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index bed32f16b084..cf8cd293abb5 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -221,6 +221,21 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
}
/*----------------- RSXX DMA Handling -------------------*/
+static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma)
+{
+ if (dma->cmd != HW_CMD_BLK_DISCARD) {
+ if (!pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
+ pci_unmap_page(ctrl->card->dev, dma->dma_addr,
+ get_dma_size(dma),
+ dma->cmd == HW_CMD_BLK_WRITE ?
+ PCI_DMA_TODEVICE :
+ PCI_DMA_FROMDEVICE);
+ }
+ }
+
+ kmem_cache_free(rsxx_dma_pool, dma);
+}
+
static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
struct rsxx_dma *dma,
unsigned int status)
@@ -232,21 +247,14 @@ static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
if (status & DMA_CANCELLED)
ctrl->stats.dma_cancelled++;
- if (dma->dma_addr)
- pci_unmap_page(ctrl->card->dev, dma->dma_addr,
- get_dma_size(dma),
- dma->cmd == HW_CMD_BLK_WRITE ?
- PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE);
-
if (dma->cb)
dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
- kmem_cache_free(rsxx_dma_pool, dma);
+ rsxx_free_dma(ctrl, dma);
}
int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
- struct list_head *q)
+ struct list_head *q, unsigned int done)
{
struct rsxx_dma *dma;
struct rsxx_dma *tmp;
@@ -254,7 +262,10 @@ int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
list_for_each_entry_safe(dma, tmp, q, list) {
list_del(&dma->list);
- rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
+ if (done & COMPLETE_DMA)
+ rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
+ else
+ rsxx_free_dma(ctrl, dma);
cnt++;
}
@@ -370,7 +381,7 @@ static void dma_engine_stalled(unsigned long data)
/* Clean up the DMA queue */
spin_lock(&ctrl->queue_lock);
- cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue);
+ cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
spin_unlock(&ctrl->queue_lock);
cnt += rsxx_dma_cancel(ctrl);
@@ -388,6 +399,7 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
int tag;
int cmds_pending = 0;
struct hw_cmd *hw_cmd_buf;
+ int dir;
hw_cmd_buf = ctrl->cmd.buf;
@@ -424,6 +436,31 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
continue;
}
+ if (dma->cmd != HW_CMD_BLK_DISCARD) {
+ if (dma->cmd == HW_CMD_BLK_WRITE)
+ dir = PCI_DMA_TODEVICE;
+ else
+ dir = PCI_DMA_FROMDEVICE;
+
+ /*
+ * The function pci_map_page is placed here because we
+ * can only, by design, issue up to 255 commands to the
+ * hardware at one time per DMA channel. So the maximum
+ * amount of mapped memory would be 255 * 4 channels *
+ * 4096 Bytes which is less than 2GB, the limit of a x8
+ * Non-HWWD PCIe slot. This way the pci_map_page
+ * function should never fail because of a lack of
+ * mappable memory.
+ */
+ dma->dma_addr = pci_map_page(ctrl->card->dev, dma->page,
+ dma->pg_off, dma->sub_page.cnt << 9, dir);
+ if (pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
+ push_tracker(ctrl->trackers, tag);
+ rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
+ continue;
+ }
+ }
+
set_tracker_dma(ctrl->trackers, tag, dma);
hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd;
hw_cmd_buf[ctrl->cmd.idx].tag = tag;
@@ -620,14 +657,6 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card,
if (!dma)
return -ENOMEM;
- dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len,
- dir ? PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE);
- if (!dma->dma_addr) {
- kmem_cache_free(rsxx_dma_pool, dma);
- return -ENOMEM;
- }
-
dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
dma->laddr = laddr;
dma->sub_page.off = (dma_off >> 9);
@@ -655,7 +684,8 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
void *cb_data)
{
struct list_head dma_list[RSXX_MAX_TARGETS];
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
unsigned long long addr8;
unsigned int laddr;
unsigned int bv_len;
@@ -667,7 +697,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
int st;
int i;
- addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */
+ addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
atomic_set(n_dmas, 0);
for (i = 0; i < card->n_targets; i++) {
@@ -676,7 +706,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
}
if (bio->bi_rw & REQ_DISCARD) {
- bv_len = bio->bi_size;
+ bv_len = bio->bi_iter.bi_size;
while (bv_len > 0) {
tgt = rsxx_get_dma_tgt(card, addr8);
@@ -693,9 +723,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
bv_len -= RSXX_HW_BLK_SIZE;
}
} else {
- bio_for_each_segment(bvec, bio, i) {
- bv_len = bvec->bv_len;
- bv_off = bvec->bv_offset;
+ bio_for_each_segment(bvec, bio, iter) {
+ bv_len = bvec.bv_len;
+ bv_off = bvec.bv_offset;
while (bv_len > 0) {
tgt = rsxx_get_dma_tgt(card, addr8);
@@ -707,7 +737,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
st = rsxx_queue_dma(card, &dma_list[tgt],
bio_data_dir(bio),
dma_off, dma_len,
- laddr, bvec->bv_page,
+ laddr, bvec.bv_page,
bv_off, cb, cb_data);
if (st)
goto bvec_err;
@@ -736,11 +766,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
return 0;
bvec_err:
- for (i = 0; i < card->n_targets; i++) {
- spin_lock_bh(&card->ctrl[i].queue_lock);
- rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i]);
- spin_unlock_bh(&card->ctrl[i].queue_lock);
- }
+ for (i = 0; i < card->n_targets; i++)
+ rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i],
+ FREE_DMA);
return st;
}
@@ -990,7 +1018,7 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card)
/* Clean up the DMA queue */
spin_lock_bh(&ctrl->queue_lock);
- rsxx_cleanup_dma_queue(ctrl, &ctrl->queue);
+ rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
spin_unlock_bh(&ctrl->queue_lock);
rsxx_dma_cancel(ctrl);
@@ -1032,6 +1060,14 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
else
card->ctrl[i].stats.reads_issued--;
+ if (dma->cmd != HW_CMD_BLK_DISCARD) {
+ pci_unmap_page(card->dev, dma->dma_addr,
+ get_dma_size(dma),
+ dma->cmd == HW_CMD_BLK_WRITE ?
+ PCI_DMA_TODEVICE :
+ PCI_DMA_FROMDEVICE);
+ }
+
list_add_tail(&dma->list, &issued_dmas[i]);
push_tracker(card->ctrl[i].trackers, j);
cnt++;
@@ -1043,15 +1079,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
card->ctrl[i].stats.sw_q_depth += cnt;
card->ctrl[i].e_cnt = 0;
-
- list_for_each_entry(dma, &card->ctrl[i].queue, list) {
- if (dma->dma_addr)
- pci_unmap_page(card->dev, dma->dma_addr,
- get_dma_size(dma),
- dma->cmd == HW_CMD_BLK_WRITE ?
- PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE);
- }
spin_unlock_bh(&card->ctrl[i].queue_lock);
}
@@ -1060,31 +1087,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
return 0;
}
-int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
-{
- struct rsxx_dma *dma;
- int i;
-
- for (i = 0; i < card->n_targets; i++) {
- spin_lock_bh(&card->ctrl[i].queue_lock);
- list_for_each_entry(dma, &card->ctrl[i].queue, list) {
- dma->dma_addr = pci_map_page(card->dev, dma->page,
- dma->pg_off, get_dma_size(dma),
- dma->cmd == HW_CMD_BLK_WRITE ?
- PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE);
- if (!dma->dma_addr) {
- spin_unlock_bh(&card->ctrl[i].queue_lock);
- kmem_cache_free(rsxx_dma_pool, dma);
- return -ENOMEM;
- }
- }
- spin_unlock_bh(&card->ctrl[i].queue_lock);
- }
-
- return 0;
-}
-
int rsxx_dma_init(void)
{
rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h
index 5ad5055a4104..6bbc64d0f690 100644
--- a/drivers/block/rsxx/rsxx_priv.h
+++ b/drivers/block/rsxx/rsxx_priv.h
@@ -52,7 +52,7 @@ struct proc_cmd;
#define RS70_PCI_REV_SUPPORTED 4
#define DRIVER_NAME "rsxx"
-#define DRIVER_VERSION "4.0"
+#define DRIVER_VERSION "4.0.3.2516"
/* Block size is 4096 */
#define RSXX_HW_BLK_SHIFT 12
@@ -345,6 +345,11 @@ enum rsxx_creg_stat {
CREG_STAT_TAG_MASK = 0x0000ff00,
};
+enum rsxx_dma_finish {
+ FREE_DMA = 0x0,
+ COMPLETE_DMA = 0x1,
+};
+
static inline unsigned int CREG_DATA(int N)
{
return CREG_DATA0 + (N << 2);
@@ -379,7 +384,9 @@ typedef void (*rsxx_dma_cb)(struct rsxx_cardinfo *card,
int rsxx_dma_setup(struct rsxx_cardinfo *card);
void rsxx_dma_destroy(struct rsxx_cardinfo *card);
int rsxx_dma_init(void);
-int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, struct list_head *q);
+int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
+ struct list_head *q,
+ unsigned int done);
int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl);
void rsxx_dma_cleanup(void);
void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
new file mode 100644
index 000000000000..5dc5b39e5b85
--- /dev/null
+++ b/drivers/block/skd_main.c
@@ -0,0 +1,5473 @@
+/* Copyright 2012 STEC, Inc.
+ *
+ * This file is licensed under the terms of the 3-clause
+ * BSD License (http://opensource.org/licenses/BSD-3-Clause)
+ * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
+ * at your option. Both licenses are also available in the LICENSE file
+ * distributed with this project. This file may not be copied, modified,
+ * or distributed except in accordance with those terms.
+ * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
+ * Initial Driver Design!
+ * Thomas Swann <tswann@stec-inc.com>
+ * Interrupt handling.
+ * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
+ * biomode implementation.
+ * Akhil Bhansali <abhansali@stec-inc.com>
+ * Added support for DISCARD / FLUSH and FUA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+#include <linux/hdreg.h>
+#include <linux/dma-mapping.h>
+#include <linux/completion.h>
+#include <linux/scatterlist.h>
+#include <linux/version.h>
+#include <linux/err.h>
+#include <linux/scatterlist.h>
+#include <linux/aer.h>
+#include <linux/ctype.h>
+#include <linux/wait.h>
+#include <linux/uio.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/sg.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <asm-generic/unaligned.h>
+
+#include "skd_s1120.h"
+
+static int skd_dbg_level;
+static int skd_isr_comp_limit = 4;
+
+enum {
+ STEC_LINK_2_5GTS = 0,
+ STEC_LINK_5GTS = 1,
+ STEC_LINK_8GTS = 2,
+ STEC_LINK_UNKNOWN = 0xFF
+};
+
+enum {
+ SKD_FLUSH_INITIALIZER,
+ SKD_FLUSH_ZERO_SIZE_FIRST,
+ SKD_FLUSH_DATA_SECOND,
+};
+
+#define SKD_ASSERT(expr) \
+ do { \
+ if (unlikely(!(expr))) { \
+ pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
+ # expr, __FILE__, __func__, __LINE__); \
+ } \
+ } while (0)
+
+#define DRV_NAME "skd"
+#define DRV_VERSION "2.2.1"
+#define DRV_BUILD_ID "0260"
+#define PFX DRV_NAME ": "
+#define DRV_BIN_VERSION 0x100
+#define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
+
+MODULE_AUTHOR("bug-reports: support@stec-inc.com");
+MODULE_LICENSE("Dual BSD/GPL");
+
+MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
+MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
+
+#define PCI_VENDOR_ID_STEC 0x1B39
+#define PCI_DEVICE_ID_S1120 0x0001
+
+#define SKD_FUA_NV (1 << 1)
+#define SKD_MINORS_PER_DEVICE 16
+
+#define SKD_MAX_QUEUE_DEPTH 200u
+
+#define SKD_PAUSE_TIMEOUT (5 * 1000)
+
+#define SKD_N_FITMSG_BYTES (512u)
+
+#define SKD_N_SPECIAL_CONTEXT 32u
+#define SKD_N_SPECIAL_FITMSG_BYTES (128u)
+
+/* SG elements are 32 bytes, so we can make this 4096 and still be under the
+ * 128KB limit. That allows 4096*4K = 16M xfer size
+ */
+#define SKD_N_SG_PER_REQ_DEFAULT 256u
+#define SKD_N_SG_PER_SPECIAL 256u
+
+#define SKD_N_COMPLETION_ENTRY 256u
+#define SKD_N_READ_CAP_BYTES (8u)
+
+#define SKD_N_INTERNAL_BYTES (512u)
+
+/* 5 bits of uniqifier, 0xF800 */
+#define SKD_ID_INCR (0x400)
+#define SKD_ID_TABLE_MASK (3u << 8u)
+#define SKD_ID_RW_REQUEST (0u << 8u)
+#define SKD_ID_INTERNAL (1u << 8u)
+#define SKD_ID_SPECIAL_REQUEST (2u << 8u)
+#define SKD_ID_FIT_MSG (3u << 8u)
+#define SKD_ID_SLOT_MASK 0x00FFu
+#define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
+
+#define SKD_N_TIMEOUT_SLOT 4u
+#define SKD_TIMEOUT_SLOT_MASK 3u
+
+#define SKD_N_MAX_SECTORS 2048u
+
+#define SKD_MAX_RETRIES 2u
+
+#define SKD_TIMER_SECONDS(seconds) (seconds)
+#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
+
+#define INQ_STD_NBYTES 36
+#define SKD_DISCARD_CDB_LENGTH 24
+
+enum skd_drvr_state {
+ SKD_DRVR_STATE_LOAD,
+ SKD_DRVR_STATE_IDLE,
+ SKD_DRVR_STATE_BUSY,
+ SKD_DRVR_STATE_STARTING,
+ SKD_DRVR_STATE_ONLINE,
+ SKD_DRVR_STATE_PAUSING,
+ SKD_DRVR_STATE_PAUSED,
+ SKD_DRVR_STATE_DRAINING_TIMEOUT,
+ SKD_DRVR_STATE_RESTARTING,
+ SKD_DRVR_STATE_RESUMING,
+ SKD_DRVR_STATE_STOPPING,
+ SKD_DRVR_STATE_FAULT,
+ SKD_DRVR_STATE_DISAPPEARED,
+ SKD_DRVR_STATE_PROTOCOL_MISMATCH,
+ SKD_DRVR_STATE_BUSY_ERASE,
+ SKD_DRVR_STATE_BUSY_SANITIZE,
+ SKD_DRVR_STATE_BUSY_IMMINENT,
+ SKD_DRVR_STATE_WAIT_BOOT,
+ SKD_DRVR_STATE_SYNCING,
+};
+
+#define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
+#define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
+#define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
+#define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
+#define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
+#define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
+#define SKD_START_WAIT_SECONDS 90u
+
+enum skd_req_state {
+ SKD_REQ_STATE_IDLE,
+ SKD_REQ_STATE_SETUP,
+ SKD_REQ_STATE_BUSY,
+ SKD_REQ_STATE_COMPLETED,
+ SKD_REQ_STATE_TIMEOUT,
+ SKD_REQ_STATE_ABORTED,
+};
+
+enum skd_fit_msg_state {
+ SKD_MSG_STATE_IDLE,
+ SKD_MSG_STATE_BUSY,
+};
+
+enum skd_check_status_action {
+ SKD_CHECK_STATUS_REPORT_GOOD,
+ SKD_CHECK_STATUS_REPORT_SMART_ALERT,
+ SKD_CHECK_STATUS_REQUEUE_REQUEST,
+ SKD_CHECK_STATUS_REPORT_ERROR,
+ SKD_CHECK_STATUS_BUSY_IMMINENT,
+};
+
+struct skd_fitmsg_context {
+ enum skd_fit_msg_state state;
+
+ struct skd_fitmsg_context *next;
+
+ u32 id;
+ u16 outstanding;
+
+ u32 length;
+ u32 offset;
+
+ u8 *msg_buf;
+ dma_addr_t mb_dma_address;
+};
+
+struct skd_request_context {
+ enum skd_req_state state;
+
+ struct skd_request_context *next;
+
+ u16 id;
+ u32 fitmsg_id;
+
+ struct request *req;
+ u8 flush_cmd;
+ u8 discard_page;
+
+ u32 timeout_stamp;
+ u8 sg_data_dir;
+ struct scatterlist *sg;
+ u32 n_sg;
+ u32 sg_byte_count;
+
+ struct fit_sg_descriptor *sksg_list;
+ dma_addr_t sksg_dma_address;
+
+ struct fit_completion_entry_v1 completion;
+
+ struct fit_comp_error_info err_info;
+
+};
+#define SKD_DATA_DIR_HOST_TO_CARD 1
+#define SKD_DATA_DIR_CARD_TO_HOST 2
+#define SKD_DATA_DIR_NONE 3 /* especially for DISCARD requests. */
+
+struct skd_special_context {
+ struct skd_request_context req;
+
+ u8 orphaned;
+
+ void *data_buf;
+ dma_addr_t db_dma_address;
+
+ u8 *msg_buf;
+ dma_addr_t mb_dma_address;
+};
+
+struct skd_sg_io {
+ fmode_t mode;
+ void __user *argp;
+
+ struct sg_io_hdr sg;
+
+ u8 cdb[16];
+
+ u32 dxfer_len;
+ u32 iovcnt;
+ struct sg_iovec *iov;
+ struct sg_iovec no_iov_iov;
+
+ struct skd_special_context *skspcl;
+};
+
+typedef enum skd_irq_type {
+ SKD_IRQ_LEGACY,
+ SKD_IRQ_MSI,
+ SKD_IRQ_MSIX
+} skd_irq_type_t;
+
+#define SKD_MAX_BARS 2
+
+struct skd_device {
+ volatile void __iomem *mem_map[SKD_MAX_BARS];
+ resource_size_t mem_phys[SKD_MAX_BARS];
+ u32 mem_size[SKD_MAX_BARS];
+
+ skd_irq_type_t irq_type;
+ u32 msix_count;
+ struct skd_msix_entry *msix_entries;
+
+ struct pci_dev *pdev;
+ int pcie_error_reporting_is_enabled;
+
+ spinlock_t lock;
+ struct gendisk *disk;
+ struct request_queue *queue;
+ struct device *class_dev;
+ int gendisk_on;
+ int sync_done;
+
+ atomic_t device_count;
+ u32 devno;
+ u32 major;
+ char name[32];
+ char isr_name[30];
+
+ enum skd_drvr_state state;
+ u32 drive_state;
+
+ u32 in_flight;
+ u32 cur_max_queue_depth;
+ u32 queue_low_water_mark;
+ u32 dev_max_queue_depth;
+
+ u32 num_fitmsg_context;
+ u32 num_req_context;
+
+ u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
+ u32 timeout_stamp;
+ struct skd_fitmsg_context *skmsg_free_list;
+ struct skd_fitmsg_context *skmsg_table;
+
+ struct skd_request_context *skreq_free_list;
+ struct skd_request_context *skreq_table;
+
+ struct skd_special_context *skspcl_free_list;
+ struct skd_special_context *skspcl_table;
+
+ struct skd_special_context internal_skspcl;
+ u32 read_cap_blocksize;
+ u32 read_cap_last_lba;
+ int read_cap_is_valid;
+ int inquiry_is_valid;
+ u8 inq_serial_num[13]; /*12 chars plus null term */
+ u8 id_str[80]; /* holds a composite name (pci + sernum) */
+
+ u8 skcomp_cycle;
+ u32 skcomp_ix;
+ struct fit_completion_entry_v1 *skcomp_table;
+ struct fit_comp_error_info *skerr_table;
+ dma_addr_t cq_dma_address;
+
+ wait_queue_head_t waitq;
+
+ struct timer_list timer;
+ u32 timer_countdown;
+ u32 timer_substate;
+
+ int n_special;
+ int sgs_per_request;
+ u32 last_mtd;
+
+ u32 proto_ver;
+
+ int dbg_level;
+ u32 connect_time_stamp;
+ int connect_retries;
+#define SKD_MAX_CONNECT_RETRIES 16
+ u32 drive_jiffies;
+
+ u32 timo_slot;
+
+
+ struct work_struct completion_worker;
+};
+
+#define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
+#define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
+#define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
+
+static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
+{
+ u32 val;
+
+ if (likely(skdev->dbg_level < 2))
+ return readl(skdev->mem_map[1] + offset);
+ else {
+ barrier();
+ val = readl(skdev->mem_map[1] + offset);
+ barrier();
+ pr_debug("%s:%s:%d offset %x = %x\n",
+ skdev->name, __func__, __LINE__, offset, val);
+ return val;
+ }
+
+}
+
+static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
+ u32 offset)
+{
+ if (likely(skdev->dbg_level < 2)) {
+ writel(val, skdev->mem_map[1] + offset);
+ barrier();
+ } else {
+ barrier();
+ writel(val, skdev->mem_map[1] + offset);
+ barrier();
+ pr_debug("%s:%s:%d offset %x = %x\n",
+ skdev->name, __func__, __LINE__, offset, val);
+ }
+}
+
+static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
+ u32 offset)
+{
+ if (likely(skdev->dbg_level < 2)) {
+ writeq(val, skdev->mem_map[1] + offset);
+ barrier();
+ } else {
+ barrier();
+ writeq(val, skdev->mem_map[1] + offset);
+ barrier();
+ pr_debug("%s:%s:%d offset %x = %016llx\n",
+ skdev->name, __func__, __LINE__, offset, val);
+ }
+}
+
+
+#define SKD_IRQ_DEFAULT SKD_IRQ_MSI
+static int skd_isr_type = SKD_IRQ_DEFAULT;
+
+module_param(skd_isr_type, int, 0444);
+MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
+ " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
+
+#define SKD_MAX_REQ_PER_MSG_DEFAULT 1
+static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
+
+module_param(skd_max_req_per_msg, int, 0444);
+MODULE_PARM_DESC(skd_max_req_per_msg,
+ "Maximum SCSI requests packed in a single message."
+ " (1-14, default==1)");
+
+#define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
+#define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
+static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
+
+module_param(skd_max_queue_depth, int, 0444);
+MODULE_PARM_DESC(skd_max_queue_depth,
+ "Maximum SCSI requests issued to s1120."
+ " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
+
+static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
+module_param(skd_sgs_per_request, int, 0444);
+MODULE_PARM_DESC(skd_sgs_per_request,
+ "Maximum SG elements per block request."
+ " (1-4096, default==256)");
+
+static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
+module_param(skd_max_pass_thru, int, 0444);
+MODULE_PARM_DESC(skd_max_pass_thru,
+ "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
+
+module_param(skd_dbg_level, int, 0444);
+MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
+
+module_param(skd_isr_comp_limit, int, 0444);
+MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
+
+/* Major device number dynamically assigned. */
+static u32 skd_major;
+
+static struct skd_device *skd_construct(struct pci_dev *pdev);
+static void skd_destruct(struct skd_device *skdev);
+static const struct block_device_operations skd_blockdev_ops;
+static void skd_send_fitmsg(struct skd_device *skdev,
+ struct skd_fitmsg_context *skmsg);
+static void skd_send_special_fitmsg(struct skd_device *skdev,
+ struct skd_special_context *skspcl);
+static void skd_request_fn(struct request_queue *rq);
+static void skd_end_request(struct skd_device *skdev,
+ struct skd_request_context *skreq, int error);
+static int skd_preop_sg_list(struct skd_device *skdev,
+ struct skd_request_context *skreq);
+static void skd_postop_sg_list(struct skd_device *skdev,
+ struct skd_request_context *skreq);
+
+static void skd_restart_device(struct skd_device *skdev);
+static int skd_quiesce_dev(struct skd_device *skdev);
+static int skd_unquiesce_dev(struct skd_device *skdev);
+static void skd_release_special(struct skd_device *skdev,
+ struct skd_special_context *skspcl);
+static void skd_disable_interrupts(struct skd_device *skdev);
+static void skd_isr_fwstate(struct skd_device *skdev);
+static void skd_recover_requests(struct skd_device *skdev, int requeue);
+static void skd_soft_reset(struct skd_device *skdev);
+
+static const char *skd_name(struct skd_device *skdev);
+const char *skd_drive_state_to_str(int state);
+const char *skd_skdev_state_to_str(enum skd_drvr_state state);
+static void skd_log_skdev(struct skd_device *skdev, const char *event);
+static void skd_log_skmsg(struct skd_device *skdev,
+ struct skd_fitmsg_context *skmsg, const char *event);
+static void skd_log_skreq(struct skd_device *skdev,
+ struct skd_request_context *skreq, const char *event);
+
+/*
+ *****************************************************************************
+ * READ/WRITE REQUESTS
+ *****************************************************************************
+ */
+static void skd_fail_all_pending(struct skd_device *skdev)
+{
+ struct request_queue *q = skdev->queue;
+ struct request *req;
+
+ for (;; ) {
+ req = blk_peek_request(q);
+ if (req == NULL)
+ break;
+ blk_start_request(req);
+ __blk_end_request_all(req, -EIO);
+ }
+}
+
+static void
+skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
+ int data_dir, unsigned lba,
+ unsigned count)
+{
+ if (data_dir == READ)
+ scsi_req->cdb[0] = 0x28;
+ else
+ scsi_req->cdb[0] = 0x2a;
+
+ scsi_req->cdb[1] = 0;
+ scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
+ scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
+ scsi_req->cdb[4] = (lba & 0xff00) >> 8;
+ scsi_req->cdb[5] = (lba & 0xff);
+ scsi_req->cdb[6] = 0;
+ scsi_req->cdb[7] = (count & 0xff00) >> 8;
+ scsi_req->cdb[8] = count & 0xff;
+ scsi_req->cdb[9] = 0;
+}
+
+static void
+skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
+ struct skd_request_context *skreq)
+{
+ skreq->flush_cmd = 1;
+
+ scsi_req->cdb[0] = 0x35;
+ scsi_req->cdb[1] = 0;
+ scsi_req->cdb[2] = 0;
+ scsi_req->cdb[3] = 0;
+ scsi_req->cdb[4] = 0;
+ scsi_req->cdb[5] = 0;
+ scsi_req->cdb[6] = 0;
+ scsi_req->cdb[7] = 0;
+ scsi_req->cdb[8] = 0;
+ scsi_req->cdb[9] = 0;
+}
+
+static void
+skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
+ struct skd_request_context *skreq,
+ struct page *page,
+ u32 lba, u32 count)
+{
+ char *buf;
+ unsigned long len;
+ struct request *req;
+
+ buf = page_address(page);
+ len = SKD_DISCARD_CDB_LENGTH;
+
+ scsi_req->cdb[0] = UNMAP;
+ scsi_req->cdb[8] = len;
+
+ put_unaligned_be16(6 + 16, &buf[0]);
+ put_unaligned_be16(16, &buf[2]);
+ put_unaligned_be64(lba, &buf[8]);
+ put_unaligned_be32(count, &buf[16]);
+
+ req = skreq->req;
+ blk_add_request_payload(req, page, len);
+ req->buffer = buf;
+}
+
+static void skd_request_fn_not_online(struct request_queue *q);
+
+static void skd_request_fn(struct request_queue *q)
+{
+ struct skd_device *skdev = q->queuedata;
+ struct skd_fitmsg_context *skmsg = NULL;
+ struct fit_msg_hdr *fmh = NULL;
+ struct skd_request_context *skreq;
+ struct request *req = NULL;
+ struct skd_scsi_request *scsi_req;
+ struct page *page;
+ unsigned long io_flags;
+ int error;
+ u32 lba;
+ u32 count;
+ int data_dir;
+ u32 be_lba;
+ u32 be_count;
+ u64 be_dmaa;
+ u64 cmdctxt;
+ u32 timo_slot;
+ void *cmd_ptr;
+ int flush, fua;
+
+ if (skdev->state != SKD_DRVR_STATE_ONLINE) {
+ skd_request_fn_not_online(q);
+ return;
+ }
+
+ if (blk_queue_stopped(skdev->queue)) {
+ if (skdev->skmsg_free_list == NULL ||
+ skdev->skreq_free_list == NULL ||
+ skdev->in_flight >= skdev->queue_low_water_mark)
+ /* There is still some kind of shortage */
+ return;
+
+ queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
+ }
+
+ /*
+ * Stop conditions:
+ * - There are no more native requests
+ * - There are already the maximum number of requests in progress
+ * - There are no more skd_request_context entries
+ * - There are no more FIT msg buffers
+ */
+ for (;; ) {
+
+ flush = fua = 0;
+
+ req = blk_peek_request(q);
+
+ /* Are there any native requests to start? */
+ if (req == NULL)
+ break;
+
+ lba = (u32)blk_rq_pos(req);
+ count = blk_rq_sectors(req);
+ data_dir = rq_data_dir(req);
+ io_flags = req->cmd_flags;
+
+ if (io_flags & REQ_FLUSH)
+ flush++;
+
+ if (io_flags & REQ_FUA)
+ fua++;
+
+ pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
+ "count=%u(0x%x) dir=%d\n",
+ skdev->name, __func__, __LINE__,
+ req, lba, lba, count, count, data_dir);
+
+ /* At this point we know there is a request */
+
+ /* Are too many requets already in progress? */
+ if (skdev->in_flight >= skdev->cur_max_queue_depth) {
+ pr_debug("%s:%s:%d qdepth %d, limit %d\n",
+ skdev->name, __func__, __LINE__,
+ skdev->in_flight, skdev->cur_max_queue_depth);
+ break;
+ }
+
+ /* Is a skd_request_context available? */
+ skreq = skdev->skreq_free_list;
+ if (skreq == NULL) {
+ pr_debug("%s:%s:%d Out of req=%p\n",
+ skdev->name, __func__, __LINE__, q);
+ break;
+ }
+ SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
+ SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
+
+ /* Now we check to see if we can get a fit msg */
+ if (skmsg == NULL) {
+ if (skdev->skmsg_free_list == NULL) {
+ pr_debug("%s:%s:%d Out of msg\n",
+ skdev->name, __func__, __LINE__);
+ break;
+ }
+ }
+
+ skreq->flush_cmd = 0;
+ skreq->n_sg = 0;
+ skreq->sg_byte_count = 0;
+ skreq->discard_page = 0;
+
+ /*
+ * OK to now dequeue request from q.
+ *
+ * At this point we are comitted to either start or reject
+ * the native request. Note that skd_request_context is
+ * available but is still at the head of the free list.
+ */
+ blk_start_request(req);
+ skreq->req = req;
+ skreq->fitmsg_id = 0;
+
+ /* Either a FIT msg is in progress or we have to start one. */
+ if (skmsg == NULL) {
+ /* Are there any FIT msg buffers available? */
+ skmsg = skdev->skmsg_free_list;
+ if (skmsg == NULL) {
+ pr_debug("%s:%s:%d Out of msg skdev=%p\n",
+ skdev->name, __func__, __LINE__,
+ skdev);
+ break;
+ }
+ SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
+ SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
+
+ skdev->skmsg_free_list = skmsg->next;
+
+ skmsg->state = SKD_MSG_STATE_BUSY;
+ skmsg->id += SKD_ID_INCR;
+
+ /* Initialize the FIT msg header */
+ fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
+ memset(fmh, 0, sizeof(*fmh));
+ fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
+ skmsg->length = sizeof(*fmh);
+ }
+
+ skreq->fitmsg_id = skmsg->id;
+
+ /*
+ * Note that a FIT msg may have just been started
+ * but contains no SoFIT requests yet.
+ */
+
+ /*
+ * Transcode the request, checking as we go. The outcome of
+ * the transcoding is represented by the error variable.
+ */
+ cmd_ptr = &skmsg->msg_buf[skmsg->length];
+ memset(cmd_ptr, 0, 32);
+
+ be_lba = cpu_to_be32(lba);
+ be_count = cpu_to_be32(count);
+ be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
+ cmdctxt = skreq->id + SKD_ID_INCR;
+
+ scsi_req = cmd_ptr;
+ scsi_req->hdr.tag = cmdctxt;
+ scsi_req->hdr.sg_list_dma_address = be_dmaa;
+
+ if (data_dir == READ)
+ skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
+ else
+ skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
+
+ if (io_flags & REQ_DISCARD) {
+ page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
+ if (!page) {
+ pr_err("request_fn:Page allocation failed.\n");
+ skd_end_request(skdev, skreq, -ENOMEM);
+ break;
+ }
+ skreq->discard_page = 1;
+ skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
+
+ } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
+ skd_prep_zerosize_flush_cdb(scsi_req, skreq);
+ SKD_ASSERT(skreq->flush_cmd == 1);
+
+ } else {
+ skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
+ }
+
+ if (fua)
+ scsi_req->cdb[1] |= SKD_FUA_NV;
+
+ if (!req->bio)
+ goto skip_sg;
+
+ error = skd_preop_sg_list(skdev, skreq);
+
+ if (error != 0) {
+ /*
+ * Complete the native request with error.
+ * Note that the request context is still at the
+ * head of the free list, and that the SoFIT request
+ * was encoded into the FIT msg buffer but the FIT
+ * msg length has not been updated. In short, the
+ * only resource that has been allocated but might
+ * not be used is that the FIT msg could be empty.
+ */
+ pr_debug("%s:%s:%d error Out\n",
+ skdev->name, __func__, __LINE__);
+ skd_end_request(skdev, skreq, error);
+ continue;
+ }
+
+skip_sg:
+ scsi_req->hdr.sg_list_len_bytes =
+ cpu_to_be32(skreq->sg_byte_count);
+
+ /* Complete resource allocations. */
+ skdev->skreq_free_list = skreq->next;
+ skreq->state = SKD_REQ_STATE_BUSY;
+ skreq->id += SKD_ID_INCR;
+
+ skmsg->length += sizeof(struct skd_scsi_request);
+ fmh->num_protocol_cmds_coalesced++;
+
+ /*
+ * Update the active request counts.
+ * Capture the timeout timestamp.
+ */
+ skreq->timeout_stamp = skdev->timeout_stamp;
+ timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
+ skdev->timeout_slot[timo_slot]++;
+ skdev->in_flight++;
+ pr_debug("%s:%s:%d req=0x%x busy=%d\n",
+ skdev->name, __func__, __LINE__,
+ skreq->id, skdev->in_flight);
+
+ /*
+ * If the FIT msg buffer is full send it.
+ */
+ if (skmsg->length >= SKD_N_FITMSG_BYTES ||
+ fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
+ skd_send_fitmsg(skdev, skmsg);
+ skmsg = NULL;
+ fmh = NULL;
+ }
+ }
+
+ /*
+ * Is a FIT msg in progress? If it is empty put the buffer back
+ * on the free list. If it is non-empty send what we got.
+ * This minimizes latency when there are fewer requests than
+ * what fits in a FIT msg.
+ */
+ if (skmsg != NULL) {
+ /* Bigger than just a FIT msg header? */
+ if (skmsg->length > sizeof(struct fit_msg_hdr)) {
+ pr_debug("%s:%s:%d sending msg=%p, len %d\n",
+ skdev->name, __func__, __LINE__,
+ skmsg, skmsg->length);
+ skd_send_fitmsg(skdev, skmsg);
+ } else {
+ /*
+ * The FIT msg is empty. It means we got started
+ * on the msg, but the requests were rejected.
+ */
+ skmsg->state = SKD_MSG_STATE_IDLE;
+ skmsg->id += SKD_ID_INCR;
+ skmsg->next = skdev->skmsg_free_list;
+ skdev->skmsg_free_list = skmsg;
+ }
+ skmsg = NULL;
+ fmh = NULL;
+ }
+
+ /*
+ * If req is non-NULL it means there is something to do but
+ * we are out of a resource.
+ */
+ if (req)
+ blk_stop_queue(skdev->queue);
+}
+
+static void skd_end_request(struct skd_device *skdev,
+ struct skd_request_context *skreq, int error)
+{
+ struct request *req = skreq->req;
+ unsigned int io_flags = req->cmd_flags;
+
+ if ((io_flags & REQ_DISCARD) &&
+ (skreq->discard_page == 1)) {
+ pr_debug("%s:%s:%d, free the page!",
+ skdev->name, __func__, __LINE__);
+ free_page((unsigned long)req->buffer);
+ req->buffer = NULL;
+ }
+
+ if (unlikely(error)) {
+ struct request *req = skreq->req;
+ char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
+ u32 lba = (u32)blk_rq_pos(req);
+ u32 count = blk_rq_sectors(req);
+
+ pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
+ skd_name(skdev), cmd, lba, count, skreq->id);
+ } else
+ pr_debug("%s:%s:%d id=0x%x error=%d\n",
+ skdev->name, __func__, __LINE__, skreq->id, error);
+
+ __blk_end_request_all(skreq->req, error);
+}
+
+static int skd_preop_sg_list(struct skd_device *skdev,
+ struct skd_request_context *skreq)
+{
+ struct request *req = skreq->req;
+ int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
+ int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
+ struct scatterlist *sg = &skreq->sg[0];
+ int n_sg;
+ int i;
+
+ skreq->sg_byte_count = 0;
+
+ /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
+ skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
+
+ n_sg = blk_rq_map_sg(skdev->queue, req, sg);
+ if (n_sg <= 0)
+ return -EINVAL;
+
+ /*
+ * Map scatterlist to PCI bus addresses.
+ * Note PCI might change the number of entries.
+ */
+ n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
+ if (n_sg <= 0)
+ return -EINVAL;
+
+ SKD_ASSERT(n_sg <= skdev->sgs_per_request);
+
+ skreq->n_sg = n_sg;
+
+ for (i = 0; i < n_sg; i++) {
+ struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
+ u32 cnt = sg_dma_len(&sg[i]);
+ uint64_t dma_addr = sg_dma_address(&sg[i]);
+
+ sgd->control = FIT_SGD_CONTROL_NOT_LAST;
+ sgd->byte_count = cnt;
+ skreq->sg_byte_count += cnt;
+ sgd->host_side_addr = dma_addr;
+ sgd->dev_side_addr = 0;
+ }
+
+ skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
+ skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
+
+ if (unlikely(skdev->dbg_level > 1)) {
+ pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
+ skdev->name, __func__, __LINE__,
+ skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
+ for (i = 0; i < n_sg; i++) {
+ struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
+ pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
+ "addr=0x%llx next=0x%llx\n",
+ skdev->name, __func__, __LINE__,
+ i, sgd->byte_count, sgd->control,
+ sgd->host_side_addr, sgd->next_desc_ptr);
+ }
+ }
+
+ return 0;
+}
+
+static void skd_postop_sg_list(struct skd_device *skdev,
+ struct skd_request_context *skreq)
+{
+ int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
+ int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
+
+ /*
+ * restore the next ptr for next IO request so we
+ * don't have to set it every time.
+ */
+ skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
+ skreq->sksg_dma_address +
+ ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
+ pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
+}
+
+static void skd_request_fn_not_online(struct request_queue *q)
+{
+ struct skd_device *skdev = q->queuedata;
+ int error;
+
+ SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
+
+ skd_log_skdev(skdev, "req_not_online");
+ switch (skdev->state) {
+ case SKD_DRVR_STATE_PAUSING:
+ case SKD_DRVR_STATE_PAUSED:
+ case SKD_DRVR_STATE_STARTING:
+ case SKD_DRVR_STATE_RESTARTING:
+ case SKD_DRVR_STATE_WAIT_BOOT:
+ /* In case of starting, we haven't started the queue,
+ * so we can't get here... but requests are
+ * possibly hanging out waiting for us because we
+ * reported the dev/skd0 already. They'll wait
+ * forever if connect doesn't complete.
+ * What to do??? delay dev/skd0 ??
+ */
+ case SKD_DRVR_STATE_BUSY:
+ case SKD_DRVR_STATE_BUSY_IMMINENT:
+ case SKD_DRVR_STATE_BUSY_ERASE:
+ case SKD_DRVR_STATE_DRAINING_TIMEOUT:
+ return;
+
+ case SKD_DRVR_STATE_BUSY_SANITIZE:
+ case SKD_DRVR_STATE_STOPPING:
+ case SKD_DRVR_STATE_SYNCING:
+ case SKD_DRVR_STATE_FAULT:
+ case SKD_DRVR_STATE_DISAPPEARED:
+ default:
+ error = -EIO;
+ break;
+ }
+
+ /* If we get here, terminate all pending block requeusts
+ * with EIO and any scsi pass thru with appropriate sense
+ */
+
+ skd_fail_all_pending(skdev);
+}
+
+/*
+ *****************************************************************************
+ * TIMER
+ *****************************************************************************
+ */
+
+static void skd_timer_tick_not_online(struct skd_device *skdev);
+
+static void skd_timer_tick(ulong arg)
+{
+ struct skd_device *skdev = (struct skd_device *)arg;
+
+ u32 timo_slot;
+ u32 overdue_timestamp;
+ unsigned long reqflags;
+ u32 state;
+
+ if (skdev->state == SKD_DRVR_STATE_FAULT)
+ /* The driver has declared fault, and we want it to
+ * stay that way until driver is reloaded.
+ */
+ return;
+
+ spin_lock_irqsave(&skdev->lock, reqflags);
+
+ state = SKD_READL(skdev, FIT_STATUS);
+ state &= FIT_SR_DRIVE_STATE_MASK;
+ if (state != skdev->drive_state)
+ skd_isr_fwstate(skdev);
+
+ if (skdev->state != SKD_DRVR_STATE_ONLINE) {
+ skd_timer_tick_not_online(skdev);
+ goto timer_func_out;
+ }
+ skdev->timeout_stamp++;
+ timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
+
+ /*
+ * All requests that happened during the previous use of
+ * this slot should be done by now. The previous use was
+ * over 7 seconds ago.
+ */
+ if (skdev->timeout_slot[timo_slot] == 0)
+ goto timer_func_out;
+
+ /* Something is overdue */
+ overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
+
+ pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
+ skdev->name, __func__, __LINE__,
+ skdev->timeout_slot[timo_slot], skdev->in_flight);
+ pr_err("(%s): Overdue IOs (%d), busy %d\n",
+ skd_name(skdev), skdev->timeout_slot[timo_slot],
+ skdev->in_flight);
+
+ skdev->timer_countdown = SKD_DRAINING_TIMO;
+ skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
+ skdev->timo_slot = timo_slot;
+ blk_stop_queue(skdev->queue);
+
+timer_func_out:
+ mod_timer(&skdev->timer, (jiffies + HZ));
+
+ spin_unlock_irqrestore(&skdev->lock, reqflags);
+}
+
+static void skd_timer_tick_not_online(struct skd_device *skdev)
+{
+ switch (skdev->state) {
+ case SKD_DRVR_STATE_IDLE:
+ case SKD_DRVR_STATE_LOAD:
+ break;
+ case SKD_DRVR_STATE_BUSY_SANITIZE:
+ pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
+ skdev->name, __func__, __LINE__,
+ skdev->drive_state, skdev->state);
+ /* If we've been in sanitize for 3 seconds, we figure we're not
+ * going to get anymore completions, so recover requests now
+ */
+ if (skdev->timer_countdown > 0) {
+ skdev->timer_countdown--;
+ return;
+ }
+ skd_recover_requests(skdev, 0);
+ break;
+
+ case SKD_DRVR_STATE_BUSY:
+ case SKD_DRVR_STATE_BUSY_IMMINENT:
+ case SKD_DRVR_STATE_BUSY_ERASE:
+ pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
+ skdev->name, __func__, __LINE__,
+ skdev->state, skdev->timer_countdown);
+ if (skdev->timer_countdown > 0) {
+ skdev->timer_countdown--;
+ return;
+ }
+ pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
+ skdev->name, __func__, __LINE__,
+ skdev->state, skdev->timer_countdown);
+ skd_restart_device(skdev);
+ break;
+
+ case SKD_DRVR_STATE_WAIT_BOOT:
+ case SKD_DRVR_STATE_STARTING:
+ if (skdev->timer_countdown > 0) {
+ skdev->timer_countdown--;
+ return;
+ }
+ /* For now, we fault the drive. Could attempt resets to
+ * revcover at some point. */
+ skdev->state = SKD_DRVR_STATE_FAULT;
+
+ pr_err("(%s): DriveFault Connect Timeout (%x)\n",
+ skd_name(skdev), skdev->drive_state);
+
+ /*start the queue so we can respond with error to requests */
+ /* wakeup anyone waiting for startup complete */
+ blk_start_queue(skdev->queue);
+ skdev->gendisk_on = -1;
+ wake_up_interruptible(&skdev->waitq);
+ break;
+
+ case SKD_DRVR_STATE_ONLINE:
+ /* shouldn't get here. */
+ break;
+
+ case SKD_DRVR_STATE_PAUSING:
+ case SKD_DRVR_STATE_PAUSED:
+ break;
+
+ case SKD_DRVR_STATE_DRAINING_TIMEOUT:
+ pr_debug("%s:%s:%d "
+ "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
+ skdev->name, __func__, __LINE__,
+ skdev->timo_slot,
+ skdev->timer_countdown,
+ skdev->in_flight,
+ skdev->timeout_slot[skdev->timo_slot]);
+ /* if the slot has cleared we can let the I/O continue */
+ if (skdev->timeout_slot[skdev->timo_slot] == 0) {
+ pr_debug("%s:%s:%d Slot drained, starting queue.\n",
+ skdev->name, __func__, __LINE__);
+ skdev->state = SKD_DRVR_STATE_ONLINE;
+ blk_start_queue(skdev->queue);
+ return;
+ }
+ if (skdev->timer_countdown > 0) {
+ skdev->timer_countdown--;
+ return;
+ }
+ skd_restart_device(skdev);
+ break;
+
+ case SKD_DRVR_STATE_RESTARTING:
+ if (skdev->timer_countdown > 0) {
+ skdev->timer_countdown--;
+ return;
+ }
+ /* For now, we fault the drive. Could attempt resets to
+ * revcover at some point. */
+ skdev->state = SKD_DRVR_STATE_FAULT;
+ pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
+ skd_name(skdev), skdev->drive_state);
+
+ /*
+ * Recovering does two things:
+ * 1. completes IO with error
+ * 2. reclaims dma resources
+ * When is it safe to recover requests?
+ * - if the drive state is faulted
+ * - if the state is still soft reset after out timeout
+ * - if the drive registers are dead (state = FF)
+ * If it is "unsafe", we still need to recover, so we will
+ * disable pci bus mastering and disable our interrupts.
+ */
+
+ if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
+ (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
+ (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
+ /* It never came out of soft reset. Try to
+ * recover the requests and then let them
+ * fail. This is to mitigate hung processes. */
+ skd_recover_requests(skdev, 0);
+ else {
+ pr_err("(%s): Disable BusMaster (%x)\n",
+ skd_name(skdev), skdev->drive_state);
+ pci_disable_device(skdev->pdev);
+ skd_disable_interrupts(skdev);
+ skd_recover_requests(skdev, 0);
+ }
+
+ /*start the queue so we can respond with error to requests */
+ /* wakeup anyone waiting for startup complete */
+ blk_start_queue(skdev->queue);
+ skdev->gendisk_on = -1;
+ wake_up_interruptible(&skdev->waitq);
+ break;
+
+ case SKD_DRVR_STATE_RESUMING:
+ case SKD_DRVR_STATE_STOPPING:
+ case SKD_DRVR_STATE_SYNCING:
+ case SKD_DRVR_STATE_FAULT:
+ case SKD_DRVR_STATE_DISAPPEARED:
+ default:
+ break;
+ }
+}
+
+static int skd_start_timer(struct skd_device *skdev)
+{
+ int rc;
+
+ init_timer(&skdev->timer);
+ setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
+
+ rc = mod_timer(&skdev->timer, (jiffies + HZ));
+ if (rc)
+ pr_err("%s: failed to start timer %d\n",
+ __func__, rc);
+ return rc;
+}
+
+static void skd_kill_timer(struct skd_device *skdev)
+{
+ del_timer_sync(&skdev->timer);
+}
+
+/*
+ *****************************************************************************
+ * IOCTL
+ *****************************************************************************
+ */
+static int skd_ioctl_sg_io(struct skd_device *skdev,
+ fmode_t mode, void __user *argp);
+static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
+ struct skd_sg_io *sksgio);
+static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
+ struct skd_sg_io *sksgio);
+static int skd_sg_io_prep_buffering(struct skd_device *skdev,
+ struct skd_sg_io *sksgio);
+static int skd_sg_io_copy_buffer(struct skd_device *skdev,
+ struct skd_sg_io *sksgio, int dxfer_dir);
+static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
+ struct skd_sg_io *sksgio);
+static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
+static int skd_sg_io_release_skspcl(struct skd_device *skdev,
+ struct skd_sg_io *sksgio);
+static int skd_sg_io_put_status(struct skd_device *skdev,
+ struct skd_sg_io *sksgio);
+
+static void skd_complete_special(struct skd_device *skdev,
+ volatile struct fit_completion_entry_v1
+ *skcomp,
+ volatile struct fit_comp_error_info *skerr,
+ struct skd_special_context *skspcl);
+
+static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
+ uint cmd_in, ulong arg)
+{
+ int rc = 0;
+ struct gendisk *disk = bdev->bd_disk;
+ struct skd_device *skdev = disk->private_data;
+ void __user *p = (void *)arg;
+
+ pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
+ skdev->name, __func__, __LINE__,
+ disk->disk_name, current->comm, mode, cmd_in, arg);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ switch (cmd_in) {
+ case SG_SET_TIMEOUT:
+ case SG_GET_TIMEOUT:
+ case SG_GET_VERSION_NUM:
+ rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p);
+ break;
+ case SG_IO:
+ rc = skd_ioctl_sg_io(skdev, mode, p);
+ break;
+
+ default:
+ rc = -ENOTTY;
+ break;
+ }
+
+ pr_debug("%s:%s:%d %s: completion rc %d\n",
+ skdev->name, __func__, __LINE__, disk->disk_name, rc);
+ return rc;
+}
+
+static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
+ void __user *argp)
+{
+ int rc;
+ struct skd_sg_io sksgio;
+
+ memset(&sksgio, 0, sizeof(sksgio));
+ sksgio.mode = mode;
+ sksgio.argp = argp;
+ sksgio.iov = &sksgio.no_iov_iov;
+
+ switch (skdev->state) {
+ case SKD_DRVR_STATE_ONLINE:
+ case SKD_DRVR_STATE_BUSY_IMMINENT:
+ break;
+
+ default:
+ pr_debug("%s:%s:%d drive not online\n",
+ skdev->name, __func__, __LINE__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
+ if (rc)
+ goto out;
+
+ rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
+ if (rc)
+ goto out;
+
+ rc = skd_sg_io_prep_buffering(skdev, &sksgio);
+ if (rc)
+ goto out;
+
+ rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
+ if (rc)
+ goto out;
+
+ rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
+ if (rc)
+ goto out;
+
+ rc = skd_sg_io_await(skdev, &sksgio);
+ if (rc)
+ goto out;
+
+ rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
+ if (rc)
+ goto out;
+
+ rc = skd_sg_io_put_status(skdev, &sksgio);
+ if (rc)
+ goto out;
+
+ rc = 0;
+
+out:
+ skd_sg_io_release_skspcl(skdev, &sksgio);
+
+ if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
+ kfree(sksgio.iov);
+ return rc;
+}
+
+static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
+ struct skd_sg_io *sksgio)
+{
+ struct sg_io_hdr *sgp = &sksgio->sg;
+ int i, acc;
+
+ if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
+ pr_debug("%s:%s:%d access sg failed %p\n",
+ skdev->name, __func__, __LINE__, sksgio->argp);
+ return -EFAULT;
+ }
+
+ if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
+ pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
+ skdev->name, __func__, __LINE__, sksgio->argp);
+ return -EFAULT;
+ }
+
+ if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
+ pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
+ skdev->name, __func__, __LINE__, sgp->interface_id);
+ return -EINVAL;
+ }
+
+ if (sgp->cmd_len > sizeof(sksgio->cdb)) {
+ pr_debug("%s:%s:%d cmd_len invalid %d\n",
+ skdev->name, __func__, __LINE__, sgp->cmd_len);
+ return -EINVAL;
+ }
+
+ if (sgp->iovec_count > 256) {
+ pr_debug("%s:%s:%d iovec_count invalid %d\n",
+ skdev->name, __func__, __LINE__, sgp->iovec_count);
+ return -EINVAL;
+ }
+
+ if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
+ pr_debug("%s:%s:%d dxfer_len invalid %d\n",
+ skdev->name, __func__, __LINE__, sgp->dxfer_len);
+ return -EINVAL;
+ }
+
+ switch (sgp->dxfer_direction) {
+ case SG_DXFER_NONE:
+ acc = -1;
+ break;
+
+ case SG_DXFER_TO_DEV:
+ acc = VERIFY_READ;
+ break;
+
+ case SG_DXFER_FROM_DEV:
+ case SG_DXFER_TO_FROM_DEV:
+ acc = VERIFY_WRITE;
+ break;
+
+ default:
+ pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
+ skdev->name, __func__, __LINE__, sgp->dxfer_direction);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
+ pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
+ skdev->name, __func__, __LINE__, sgp->cmdp);
+ return -EFAULT;
+ }
+
+ if (sgp->mx_sb_len != 0) {
+ if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
+ pr_debug("%s:%s:%d access sbp failed %p\n",
+ skdev->name, __func__, __LINE__, sgp->sbp);
+ return -EFAULT;
+ }
+ }
+
+ if (sgp->iovec_count == 0) {
+ sksgio->iov[0].iov_base = sgp->dxferp;
+ sksgio->iov[0].iov_len = sgp->dxfer_len;
+ sksgio->iovcnt = 1;
+ sksgio->dxfer_len = sgp->dxfer_len;
+ } else {
+ struct sg_iovec *iov;
+ uint nbytes = sizeof(*iov) * sgp->iovec_count;
+ size_t iov_data_len;
+
+ iov = kmalloc(nbytes, GFP_KERNEL);
+ if (iov == NULL) {
+ pr_debug("%s:%s:%d alloc iovec failed %d\n",
+ skdev->name, __func__, __LINE__,
+ sgp->iovec_count);
+ return -ENOMEM;
+ }
+ sksgio->iov = iov;
+ sksgio->iovcnt = sgp->iovec_count;
+
+ if (copy_from_user(iov, sgp->dxferp, nbytes)) {
+ pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
+ skdev->name, __func__, __LINE__, sgp->dxferp);
+ return -EFAULT;
+ }
+
+ /*
+ * Sum up the vecs, making sure they don't overflow
+ */
+ iov_data_len = 0;
+ for (i = 0; i < sgp->iovec_count; i++) {
+ if (iov_data_len + iov[i].iov_len < iov_data_len)
+ return -EINVAL;
+ iov_data_len += iov[i].iov_len;
+ }
+
+ /* SG_IO howto says that the shorter of the two wins */
+ if (sgp->dxfer_len < iov_data_len) {
+ sksgio->iovcnt = iov_shorten((struct iovec *)iov,
+ sgp->iovec_count,
+ sgp->dxfer_len);
+ sksgio->dxfer_len = sgp->dxfer_len;
+ } else
+ sksgio->dxfer_len = iov_data_len;
+ }
+
+ if (sgp->dxfer_direction != SG_DXFER_NONE) {
+ struct sg_iovec *iov = sksgio->iov;
+ for (i = 0; i < sksgio->iovcnt; i++, iov++) {
+ if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
+ pr_debug("%s:%s:%d access data failed %p/%d\n",
+ skdev->name, __func__, __LINE__,
+ iov->iov_base, (int)iov->iov_len);
+ return -EFAULT;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
+ struct skd_sg_io *sksgio)
+{
+ struct skd_special_context *skspcl = NULL;
+ int rc;
+
+ for (;;) {
+ ulong flags;
+
+ spin_lock_irqsave(&skdev->lock, flags);
+ skspcl = skdev->skspcl_free_list;
+ if (skspcl != NULL) {
+ skdev->skspcl_free_list =
+ (struct skd_special_context *)skspcl->req.next;
+ skspcl->req.id += SKD_ID_INCR;
+ skspcl->req.state = SKD_REQ_STATE_SETUP;
+ skspcl->orphaned = 0;
+ skspcl->req.n_sg = 0;
+ }
+ spin_unlock_irqrestore(&skdev->lock, flags);
+
+ if (skspcl != NULL) {
+ rc = 0;
+ break;
+ }
+
+ pr_debug("%s:%s:%d blocking\n",
+ skdev->name, __func__, __LINE__);
+
+ rc = wait_event_interruptible_timeout(
+ skdev->waitq,
+ (skdev->skspcl_free_list != NULL),
+ msecs_to_jiffies(sksgio->sg.timeout));
+
+ pr_debug("%s:%s:%d unblocking, rc=%d\n",
+ skdev->name, __func__, __LINE__, rc);
+
+ if (rc <= 0) {
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ else
+ rc = -EINTR;
+ break;
+ }
+ /*
+ * If we get here rc > 0 meaning the timeout to
+ * wait_event_interruptible_timeout() had time left, hence the
+ * sought event -- non-empty free list -- happened.
+ * Retry the allocation.
+ */
+ }
+ sksgio->skspcl = skspcl;
+
+ return rc;
+}
+
+static int skd_skreq_prep_buffering(struct skd_device *skdev,
+ struct skd_request_context *skreq,
+ u32 dxfer_len)
+{
+ u32 resid = dxfer_len;
+
+ /*
+ * The DMA engine must have aligned addresses and byte counts.
+ */
+ resid += (-resid) & 3;
+ skreq->sg_byte_count = resid;
+
+ skreq->n_sg = 0;
+
+ while (resid > 0) {
+ u32 nbytes = PAGE_SIZE;
+ u32 ix = skreq->n_sg;
+ struct scatterlist *sg = &skreq->sg[ix];
+ struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
+ struct page *page;
+
+ if (nbytes > resid)
+ nbytes = resid;
+
+ page = alloc_page(GFP_KERNEL);
+ if (page == NULL)
+ return -ENOMEM;
+
+ sg_set_page(sg, page, nbytes, 0);
+
+ /* TODO: This should be going through a pci_???()
+ * routine to do proper mapping. */
+ sksg->control = FIT_SGD_CONTROL_NOT_LAST;
+ sksg->byte_count = nbytes;
+
+ sksg->host_side_addr = sg_phys(sg);
+
+ sksg->dev_side_addr = 0;
+ sksg->next_desc_ptr = skreq->sksg_dma_address +
+ (ix + 1) * sizeof(*sksg);
+
+ skreq->n_sg++;
+ resid -= nbytes;
+ }
+
+ if (skreq->n_sg > 0) {
+ u32 ix = skreq->n_sg - 1;
+ struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
+
+ sksg->control = FIT_SGD_CONTROL_LAST;
+ sksg->next_desc_ptr = 0;
+ }
+
+ if (unlikely(skdev->dbg_level > 1)) {
+ u32 i;
+
+ pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
+ skdev->name, __func__, __LINE__,
+ skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
+ for (i = 0; i < skreq->n_sg; i++) {
+ struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
+
+ pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
+ "addr=0x%llx next=0x%llx\n",
+ skdev->name, __func__, __LINE__,
+ i, sgd->byte_count, sgd->control,
+ sgd->host_side_addr, sgd->next_desc_ptr);
+ }
+ }
+
+ return 0;
+}
+
+static int skd_sg_io_prep_buffering(struct skd_device *skdev,
+ struct skd_sg_io *sksgio)
+{
+ struct skd_special_context *skspcl = sksgio->skspcl;
+ struct skd_request_context *skreq = &skspcl->req;
+ u32 dxfer_len = sksgio->dxfer_len;
+ int rc;
+
+ rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
+ /*
+ * Eventually, errors or not, skd_release_special() is called
+ * to recover allocations including partial allocations.
+ */
+ return rc;
+}
+
+static int skd_sg_io_copy_buffer(struct skd_device *skdev,
+ struct skd_sg_io *sksgio, int dxfer_dir)
+{
+ struct skd_special_context *skspcl = sksgio->skspcl;
+ u32 iov_ix = 0;
+ struct sg_iovec curiov;
+ u32 sksg_ix = 0;
+ u8 *bufp = NULL;
+ u32 buf_len = 0;
+ u32 resid = sksgio->dxfer_len;
+ int rc;
+
+ curiov.iov_len = 0;
+ curiov.iov_base = NULL;
+
+ if (dxfer_dir != sksgio->sg.dxfer_direction) {
+ if (dxfer_dir != SG_DXFER_TO_DEV ||
+ sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
+ return 0;
+ }
+
+ while (resid > 0) {
+ u32 nbytes = PAGE_SIZE;
+
+ if (curiov.iov_len == 0) {
+ curiov = sksgio->iov[iov_ix++];
+ continue;
+ }
+
+ if (buf_len == 0) {
+ struct page *page;
+ page = sg_page(&skspcl->req.sg[sksg_ix++]);
+ bufp = page_address(page);
+ buf_len = PAGE_SIZE;
+ }
+
+ nbytes = min_t(u32, nbytes, resid);
+ nbytes = min_t(u32, nbytes, curiov.iov_len);
+ nbytes = min_t(u32, nbytes, buf_len);
+
+ if (dxfer_dir == SG_DXFER_TO_DEV)
+ rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
+ else
+ rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
+
+ if (rc)
+ return -EFAULT;
+
+ resid -= nbytes;
+ curiov.iov_len -= nbytes;
+ curiov.iov_base += nbytes;
+ buf_len -= nbytes;
+ }
+
+ return 0;
+}
+
+static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
+ struct skd_sg_io *sksgio)
+{
+ struct skd_special_context *skspcl = sksgio->skspcl;
+ struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
+ struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
+
+ memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
+
+ /* Initialize the FIT msg header */
+ fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
+ fmh->num_protocol_cmds_coalesced = 1;
+
+ /* Initialize the SCSI request */
+ if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
+ scsi_req->hdr.sg_list_dma_address =
+ cpu_to_be64(skspcl->req.sksg_dma_address);
+ scsi_req->hdr.tag = skspcl->req.id;
+ scsi_req->hdr.sg_list_len_bytes =
+ cpu_to_be32(skspcl->req.sg_byte_count);
+ memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
+
+ skspcl->req.state = SKD_REQ_STATE_BUSY;
+ skd_send_special_fitmsg(skdev, skspcl);
+
+ return 0;
+}
+
+static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
+{
+ unsigned long flags;
+ int rc;
+
+ rc = wait_event_interruptible_timeout(skdev->waitq,
+ (sksgio->skspcl->req.state !=
+ SKD_REQ_STATE_BUSY),
+ msecs_to_jiffies(sksgio->sg.
+ timeout));
+
+ spin_lock_irqsave(&skdev->lock, flags);
+
+ if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
+ pr_debug("%s:%s:%d skspcl %p aborted\n",
+ skdev->name, __func__, __LINE__, sksgio->skspcl);
+
+ /* Build check cond, sense and let command finish. */
+ /* For a timeout, we must fabricate completion and sense
+ * data to complete the command */
+ sksgio->skspcl->req.completion.status =
+ SAM_STAT_CHECK_CONDITION;
+
+ memset(&sksgio->skspcl->req.err_info, 0,
+ sizeof(sksgio->skspcl->req.err_info));
+ sksgio->skspcl->req.err_info.type = 0x70;
+ sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
+ sksgio->skspcl->req.err_info.code = 0x44;
+ sksgio->skspcl->req.err_info.qual = 0;
+ rc = 0;
+ } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
+ /* No longer on the adapter. We finish. */
+ rc = 0;
+ else {
+ /* Something's gone wrong. Still busy. Timeout or
+ * user interrupted (control-C). Mark as an orphan
+ * so it will be disposed when completed. */
+ sksgio->skspcl->orphaned = 1;
+ sksgio->skspcl = NULL;
+ if (rc == 0) {
+ pr_debug("%s:%s:%d timed out %p (%u ms)\n",
+ skdev->name, __func__, __LINE__,
+ sksgio, sksgio->sg.timeout);
+ rc = -ETIMEDOUT;
+ } else {
+ pr_debug("%s:%s:%d cntlc %p\n",
+ skdev->name, __func__, __LINE__, sksgio);
+ rc = -EINTR;
+ }
+ }
+
+ spin_unlock_irqrestore(&skdev->lock, flags);
+
+ return rc;
+}
+
+static int skd_sg_io_put_status(struct skd_device *skdev,
+ struct skd_sg_io *sksgio)
+{
+ struct sg_io_hdr *sgp = &sksgio->sg;
+ struct skd_special_context *skspcl = sksgio->skspcl;
+ int resid = 0;
+
+ u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
+
+ sgp->status = skspcl->req.completion.status;
+ resid = sksgio->dxfer_len - nb;
+
+ sgp->masked_status = sgp->status & STATUS_MASK;
+ sgp->msg_status = 0;
+ sgp->host_status = 0;
+ sgp->driver_status = 0;
+ sgp->resid = resid;
+ if (sgp->masked_status || sgp->host_status || sgp->driver_status)
+ sgp->info |= SG_INFO_CHECK;
+
+ pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
+ skdev->name, __func__, __LINE__,
+ sgp->status, sgp->masked_status, sgp->resid);
+
+ if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
+ if (sgp->mx_sb_len > 0) {
+ struct fit_comp_error_info *ei = &skspcl->req.err_info;
+ u32 nbytes = sizeof(*ei);
+
+ nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
+
+ sgp->sb_len_wr = nbytes;
+
+ if (__copy_to_user(sgp->sbp, ei, nbytes)) {
+ pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
+ skdev->name, __func__, __LINE__,
+ sgp->sbp);
+ return -EFAULT;
+ }
+ }
+ }
+
+ if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
+ pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
+ skdev->name, __func__, __LINE__, sksgio->argp);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int skd_sg_io_release_skspcl(struct skd_device *skdev,
+ struct skd_sg_io *sksgio)
+{
+ struct skd_special_context *skspcl = sksgio->skspcl;
+
+ if (skspcl != NULL) {
+ ulong flags;
+
+ sksgio->skspcl = NULL;
+
+ spin_lock_irqsave(&skdev->lock, flags);
+ skd_release_special(skdev, skspcl);
+ spin_unlock_irqrestore(&skdev->lock, flags);
+ }
+
+ return 0;
+}
+
+/*
+ *****************************************************************************
+ * INTERNAL REQUESTS -- generated by driver itself
+ *****************************************************************************
+ */
+
+static int skd_format_internal_skspcl(struct skd_device *skdev)
+{
+ struct skd_special_context *skspcl = &skdev->internal_skspcl;
+ struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
+ struct fit_msg_hdr *fmh;
+ uint64_t dma_address;
+ struct skd_scsi_request *scsi;
+
+ fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
+ fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
+ fmh->num_protocol_cmds_coalesced = 1;
+
+ scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
+ memset(scsi, 0, sizeof(*scsi));
+ dma_address = skspcl->req.sksg_dma_address;
+ scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
+ sgd->control = FIT_SGD_CONTROL_LAST;
+ sgd->byte_count = 0;
+ sgd->host_side_addr = skspcl->db_dma_address;
+ sgd->dev_side_addr = 0;
+ sgd->next_desc_ptr = 0LL;
+
+ return 1;
+}
+
+#define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
+
+static void skd_send_internal_skspcl(struct skd_device *skdev,
+ struct skd_special_context *skspcl,
+ u8 opcode)
+{
+ struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
+ struct skd_scsi_request *scsi;
+ unsigned char *buf = skspcl->data_buf;
+ int i;
+
+ if (skspcl->req.state != SKD_REQ_STATE_IDLE)
+ /*
+ * A refresh is already in progress.
+ * Just wait for it to finish.
+ */
+ return;
+
+ SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
+ skspcl->req.state = SKD_REQ_STATE_BUSY;
+ skspcl->req.id += SKD_ID_INCR;
+
+ scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
+ scsi->hdr.tag = skspcl->req.id;
+
+ memset(scsi->cdb, 0, sizeof(scsi->cdb));
+
+ switch (opcode) {
+ case TEST_UNIT_READY:
+ scsi->cdb[0] = TEST_UNIT_READY;
+ sgd->byte_count = 0;
+ scsi->hdr.sg_list_len_bytes = 0;
+ break;
+
+ case READ_CAPACITY:
+ scsi->cdb[0] = READ_CAPACITY;
+ sgd->byte_count = SKD_N_READ_CAP_BYTES;
+ scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
+ break;
+
+ case INQUIRY:
+ scsi->cdb[0] = INQUIRY;
+ scsi->cdb[1] = 0x01; /* evpd */
+ scsi->cdb[2] = 0x80; /* serial number page */
+ scsi->cdb[4] = 0x10;
+ sgd->byte_count = 16;
+ scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
+ break;
+
+ case SYNCHRONIZE_CACHE:
+ scsi->cdb[0] = SYNCHRONIZE_CACHE;
+ sgd->byte_count = 0;
+ scsi->hdr.sg_list_len_bytes = 0;
+ break;
+
+ case WRITE_BUFFER:
+ scsi->cdb[0] = WRITE_BUFFER;
+ scsi->cdb[1] = 0x02;
+ scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
+ scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
+ sgd->byte_count = WR_BUF_SIZE;
+ scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
+ /* fill incrementing byte pattern */
+ for (i = 0; i < sgd->byte_count; i++)
+ buf[i] = i & 0xFF;
+ break;
+
+ case READ_BUFFER:
+ scsi->cdb[0] = READ_BUFFER;
+ scsi->cdb[1] = 0x02;
+ scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
+ scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
+ sgd->byte_count = WR_BUF_SIZE;
+ scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
+ memset(skspcl->data_buf, 0, sgd->byte_count);
+ break;
+
+ default:
+ SKD_ASSERT("Don't know what to send");
+ return;
+
+ }
+ skd_send_special_fitmsg(skdev, skspcl);
+}
+
+static void skd_refresh_device_data(struct skd_device *skdev)
+{
+ struct skd_special_context *skspcl = &skdev->internal_skspcl;
+
+ skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
+}
+
+static int skd_chk_read_buf(struct skd_device *skdev,
+ struct skd_special_context *skspcl)
+{
+ unsigned char *buf = skspcl->data_buf;
+ int i;
+
+ /* check for incrementing byte pattern */
+ for (i = 0; i < WR_BUF_SIZE; i++)
+ if (buf[i] != (i & 0xFF))
+ return 1;
+
+ return 0;
+}
+
+static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
+ u8 code, u8 qual, u8 fruc)
+{
+ /* If the check condition is of special interest, log a message */
+ if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
+ && (code == 0x04) && (qual == 0x06)) {
+ pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
+ "ascq/fruc %02x/%02x/%02x/%02x\n",
+ skd_name(skdev), key, code, qual, fruc);
+ }
+}
+
+static void skd_complete_internal(struct skd_device *skdev,
+ volatile struct fit_completion_entry_v1
+ *skcomp,
+ volatile struct fit_comp_error_info *skerr,
+ struct skd_special_context *skspcl)
+{
+ u8 *buf = skspcl->data_buf;
+ u8 status;
+ int i;
+ struct skd_scsi_request *scsi =
+ (struct skd_scsi_request *)&skspcl->msg_buf[64];
+
+ SKD_ASSERT(skspcl == &skdev->internal_skspcl);
+
+ pr_debug("%s:%s:%d complete internal %x\n",
+ skdev->name, __func__, __LINE__, scsi->cdb[0]);
+
+ skspcl->req.completion = *skcomp;
+ skspcl->req.state = SKD_REQ_STATE_IDLE;
+ skspcl->req.id += SKD_ID_INCR;
+
+ status = skspcl->req.completion.status;
+
+ skd_log_check_status(skdev, status, skerr->key, skerr->code,
+ skerr->qual, skerr->fruc);
+
+ switch (scsi->cdb[0]) {
+ case TEST_UNIT_READY:
+ if (status == SAM_STAT_GOOD)
+ skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
+ else if ((status == SAM_STAT_CHECK_CONDITION) &&
+ (skerr->key == MEDIUM_ERROR))
+ skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
+ else {
+ if (skdev->state == SKD_DRVR_STATE_STOPPING) {
+ pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
+ skdev->name, __func__, __LINE__,
+ skdev->state);
+ return;
+ }
+ pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
+ skdev->name, __func__, __LINE__);
+ skd_send_internal_skspcl(skdev, skspcl, 0x00);
+ }
+ break;
+
+ case WRITE_BUFFER:
+ if (status == SAM_STAT_GOOD)
+ skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
+ else {
+ if (skdev->state == SKD_DRVR_STATE_STOPPING) {
+ pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
+ skdev->name, __func__, __LINE__,
+ skdev->state);
+ return;
+ }
+ pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
+ skdev->name, __func__, __LINE__);
+ skd_send_internal_skspcl(skdev, skspcl, 0x00);
+ }
+ break;
+
+ case READ_BUFFER:
+ if (status == SAM_STAT_GOOD) {
+ if (skd_chk_read_buf(skdev, skspcl) == 0)
+ skd_send_internal_skspcl(skdev, skspcl,
+ READ_CAPACITY);
+ else {
+ pr_err(
+ "(%s):*** W/R Buffer mismatch %d ***\n",
+ skd_name(skdev), skdev->connect_retries);
+ if (skdev->connect_retries <
+ SKD_MAX_CONNECT_RETRIES) {
+ skdev->connect_retries++;
+ skd_soft_reset(skdev);
+ } else {
+ pr_err(
+ "(%s): W/R Buffer Connect Error\n",
+ skd_name(skdev));
+ return;
+ }
+ }
+
+ } else {
+ if (skdev->state == SKD_DRVR_STATE_STOPPING) {
+ pr_debug("%s:%s:%d "
+ "read buffer failed, don't send anymore state 0x%x\n",
+ skdev->name, __func__, __LINE__,
+ skdev->state);
+ return;
+ }
+ pr_debug("%s:%s:%d "
+ "**** read buffer failed, retry skerr\n",
+ skdev->name, __func__, __LINE__);
+ skd_send_internal_skspcl(skdev, skspcl, 0x00);
+ }
+ break;
+
+ case READ_CAPACITY:
+ skdev->read_cap_is_valid = 0;
+ if (status == SAM_STAT_GOOD) {
+ skdev->read_cap_last_lba =
+ (buf[0] << 24) | (buf[1] << 16) |
+ (buf[2] << 8) | buf[3];
+ skdev->read_cap_blocksize =
+ (buf[4] << 24) | (buf[5] << 16) |
+ (buf[6] << 8) | buf[7];
+
+ pr_debug("%s:%s:%d last lba %d, bs %d\n",
+ skdev->name, __func__, __LINE__,
+ skdev->read_cap_last_lba,
+ skdev->read_cap_blocksize);
+
+ set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
+
+ skdev->read_cap_is_valid = 1;
+
+ skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
+ } else if ((status == SAM_STAT_CHECK_CONDITION) &&
+ (skerr->key == MEDIUM_ERROR)) {
+ skdev->read_cap_last_lba = ~0;
+ set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
+ pr_debug("%s:%s:%d "
+ "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
+ skdev->name, __func__, __LINE__);
+ skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
+ } else {
+ pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
+ skdev->name, __func__, __LINE__);
+ skd_send_internal_skspcl(skdev, skspcl,
+ TEST_UNIT_READY);
+ }
+ break;
+
+ case INQUIRY:
+ skdev->inquiry_is_valid = 0;
+ if (status == SAM_STAT_GOOD) {
+ skdev->inquiry_is_valid = 1;
+
+ for (i = 0; i < 12; i++)
+ skdev->inq_serial_num[i] = buf[i + 4];
+ skdev->inq_serial_num[12] = 0;
+ }
+
+ if (skd_unquiesce_dev(skdev) < 0)
+ pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
+ skdev->name, __func__, __LINE__);
+ /* connection is complete */
+ skdev->connect_retries = 0;
+ break;
+
+ case SYNCHRONIZE_CACHE:
+ if (status == SAM_STAT_GOOD)
+ skdev->sync_done = 1;
+ else
+ skdev->sync_done = -1;
+ wake_up_interruptible(&skdev->waitq);
+ break;
+
+ default:
+ SKD_ASSERT("we didn't send this");
+ }
+}
+
+/*
+ *****************************************************************************
+ * FIT MESSAGES
+ *****************************************************************************
+ */
+
+static void skd_send_fitmsg(struct skd_device *skdev,
+ struct skd_fitmsg_context *skmsg)
+{
+ u64 qcmd;
+ struct fit_msg_hdr *fmh;
+
+ pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
+ skdev->name, __func__, __LINE__,
+ skmsg->mb_dma_address, skdev->in_flight);
+ pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
+ skdev->name, __func__, __LINE__,
+ skmsg->msg_buf, skmsg->offset);
+
+ qcmd = skmsg->mb_dma_address;
+ qcmd |= FIT_QCMD_QID_NORMAL;
+
+ fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
+ skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
+
+ if (unlikely(skdev->dbg_level > 1)) {
+ u8 *bp = (u8 *)skmsg->msg_buf;
+ int i;
+ for (i = 0; i < skmsg->length; i += 8) {
+ pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x "
+ "%02x %02x %02x %02x\n",
+ skdev->name, __func__, __LINE__,
+ i, bp[i + 0], bp[i + 1], bp[i + 2],
+ bp[i + 3], bp[i + 4], bp[i + 5],
+ bp[i + 6], bp[i + 7]);
+ if (i == 0)
+ i = 64 - 8;
+ }
+ }
+
+ if (skmsg->length > 256)
+ qcmd |= FIT_QCMD_MSGSIZE_512;
+ else if (skmsg->length > 128)
+ qcmd |= FIT_QCMD_MSGSIZE_256;
+ else if (skmsg->length > 64)
+ qcmd |= FIT_QCMD_MSGSIZE_128;
+ else
+ /*
+ * This makes no sense because the FIT msg header is
+ * 64 bytes. If the msg is only 64 bytes long it has
+ * no payload.
+ */
+ qcmd |= FIT_QCMD_MSGSIZE_64;
+
+ SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
+
+}
+
+static void skd_send_special_fitmsg(struct skd_device *skdev,
+ struct skd_special_context *skspcl)
+{
+ u64 qcmd;
+
+ if (unlikely(skdev->dbg_level > 1)) {
+ u8 *bp = (u8 *)skspcl->msg_buf;
+ int i;
+
+ for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
+ pr_debug("%s:%s:%d spcl[%2d] %02x %02x %02x %02x "
+ "%02x %02x %02x %02x\n",
+ skdev->name, __func__, __LINE__, i,
+ bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
+ bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
+ if (i == 0)
+ i = 64 - 8;
+ }
+
+ pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
+ skdev->name, __func__, __LINE__,
+ skspcl, skspcl->req.id, skspcl->req.sksg_list,
+ skspcl->req.sksg_dma_address);
+ for (i = 0; i < skspcl->req.n_sg; i++) {
+ struct fit_sg_descriptor *sgd =
+ &skspcl->req.sksg_list[i];
+
+ pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
+ "addr=0x%llx next=0x%llx\n",
+ skdev->name, __func__, __LINE__,
+ i, sgd->byte_count, sgd->control,
+ sgd->host_side_addr, sgd->next_desc_ptr);
+ }
+ }
+
+ /*
+ * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
+ * and one 64-byte SSDI command.
+ */
+ qcmd = skspcl->mb_dma_address;
+ qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
+
+ SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
+}
+
+/*
+ *****************************************************************************
+ * COMPLETION QUEUE
+ *****************************************************************************
+ */
+
+static void skd_complete_other(struct skd_device *skdev,
+ volatile struct fit_completion_entry_v1 *skcomp,
+ volatile struct fit_comp_error_info *skerr);
+
+struct sns_info {
+ u8 type;
+ u8 stat;
+ u8 key;
+ u8 asc;
+ u8 ascq;
+ u8 mask;
+ enum skd_check_status_action action;
+};
+
+static struct sns_info skd_chkstat_table[] = {
+ /* Good */
+ { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
+ SKD_CHECK_STATUS_REPORT_GOOD },
+
+ /* Smart alerts */
+ { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
+ SKD_CHECK_STATUS_REPORT_SMART_ALERT },
+ { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
+ SKD_CHECK_STATUS_REPORT_SMART_ALERT },
+ { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
+ SKD_CHECK_STATUS_REPORT_SMART_ALERT },
+
+ /* Retry (with limits) */
+ { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
+ SKD_CHECK_STATUS_REQUEUE_REQUEST },
+ { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
+ SKD_CHECK_STATUS_REQUEUE_REQUEST },
+ { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
+ SKD_CHECK_STATUS_REQUEUE_REQUEST },
+ { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
+ SKD_CHECK_STATUS_REQUEUE_REQUEST },
+
+ /* Busy (or about to be) */
+ { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
+ SKD_CHECK_STATUS_BUSY_IMMINENT },
+};
+
+/*
+ * Look up status and sense data to decide how to handle the error
+ * from the device.
+ * mask says which fields must match e.g., mask=0x18 means check
+ * type and stat, ignore key, asc, ascq.
+ */
+
+static enum skd_check_status_action
+skd_check_status(struct skd_device *skdev,
+ u8 cmp_status, volatile struct fit_comp_error_info *skerr)
+{
+ int i, n;
+
+ pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
+ skd_name(skdev), skerr->key, skerr->code, skerr->qual,
+ skerr->fruc);
+
+ pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
+ skdev->name, __func__, __LINE__, skerr->type, cmp_status,
+ skerr->key, skerr->code, skerr->qual, skerr->fruc);
+
+ /* Does the info match an entry in the good category? */
+ n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
+ for (i = 0; i < n; i++) {
+ struct sns_info *sns = &skd_chkstat_table[i];
+
+ if (sns->mask & 0x10)
+ if (skerr->type != sns->type)
+ continue;
+
+ if (sns->mask & 0x08)
+ if (cmp_status != sns->stat)
+ continue;
+
+ if (sns->mask & 0x04)
+ if (skerr->key != sns->key)
+ continue;
+
+ if (sns->mask & 0x02)
+ if (skerr->code != sns->asc)
+ continue;
+
+ if (sns->mask & 0x01)
+ if (skerr->qual != sns->ascq)
+ continue;
+
+ if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
+ pr_err("(%s): SMART Alert: sense key/asc/ascq "
+ "%02x/%02x/%02x\n",
+ skd_name(skdev), skerr->key,
+ skerr->code, skerr->qual);
+ }
+ return sns->action;
+ }
+
+ /* No other match, so nonzero status means error,
+ * zero status means good
+ */
+ if (cmp_status) {
+ pr_debug("%s:%s:%d status check: error\n",
+ skdev->name, __func__, __LINE__);
+ return SKD_CHECK_STATUS_REPORT_ERROR;
+ }
+
+ pr_debug("%s:%s:%d status check good default\n",
+ skdev->name, __func__, __LINE__);
+ return SKD_CHECK_STATUS_REPORT_GOOD;
+}
+
+static void skd_resolve_req_exception(struct skd_device *skdev,
+ struct skd_request_context *skreq)
+{
+ u8 cmp_status = skreq->completion.status;
+
+ switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
+ case SKD_CHECK_STATUS_REPORT_GOOD:
+ case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
+ skd_end_request(skdev, skreq, 0);
+ break;
+
+ case SKD_CHECK_STATUS_BUSY_IMMINENT:
+ skd_log_skreq(skdev, skreq, "retry(busy)");
+ blk_requeue_request(skdev->queue, skreq->req);
+ pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
+ skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
+ skdev->timer_countdown = SKD_TIMER_MINUTES(20);
+ skd_quiesce_dev(skdev);
+ break;
+
+ case SKD_CHECK_STATUS_REQUEUE_REQUEST:
+ if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
+ skd_log_skreq(skdev, skreq, "retry");
+ blk_requeue_request(skdev->queue, skreq->req);
+ break;
+ }
+ /* fall through to report error */
+
+ case SKD_CHECK_STATUS_REPORT_ERROR:
+ default:
+ skd_end_request(skdev, skreq, -EIO);
+ break;
+ }
+}
+
+/* assume spinlock is already held */
+static void skd_release_skreq(struct skd_device *skdev,
+ struct skd_request_context *skreq)
+{
+ u32 msg_slot;
+ struct skd_fitmsg_context *skmsg;
+
+ u32 timo_slot;
+
+ /*
+ * Reclaim the FIT msg buffer if this is
+ * the first of the requests it carried to
+ * be completed. The FIT msg buffer used to
+ * send this request cannot be reused until
+ * we are sure the s1120 card has copied
+ * it to its memory. The FIT msg might have
+ * contained several requests. As soon as
+ * any of them are completed we know that
+ * the entire FIT msg was transferred.
+ * Only the first completed request will
+ * match the FIT msg buffer id. The FIT
+ * msg buffer id is immediately updated.
+ * When subsequent requests complete the FIT
+ * msg buffer id won't match, so we know
+ * quite cheaply that it is already done.
+ */
+ msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
+ SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
+
+ skmsg = &skdev->skmsg_table[msg_slot];
+ if (skmsg->id == skreq->fitmsg_id) {
+ SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
+ SKD_ASSERT(skmsg->outstanding > 0);
+ skmsg->outstanding--;
+ if (skmsg->outstanding == 0) {
+ skmsg->state = SKD_MSG_STATE_IDLE;
+ skmsg->id += SKD_ID_INCR;
+ skmsg->next = skdev->skmsg_free_list;
+ skdev->skmsg_free_list = skmsg;
+ }
+ }
+
+ /*
+ * Decrease the number of active requests.
+ * Also decrements the count in the timeout slot.
+ */
+ SKD_ASSERT(skdev->in_flight > 0);
+ skdev->in_flight -= 1;
+
+ timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
+ SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
+ skdev->timeout_slot[timo_slot] -= 1;
+
+ /*
+ * Reset backpointer
+ */
+ skreq->req = NULL;
+
+ /*
+ * Reclaim the skd_request_context
+ */
+ skreq->state = SKD_REQ_STATE_IDLE;
+ skreq->id += SKD_ID_INCR;
+ skreq->next = skdev->skreq_free_list;
+ skdev->skreq_free_list = skreq;
+}
+
+#define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
+
+static void skd_do_inq_page_00(struct skd_device *skdev,
+ volatile struct fit_completion_entry_v1 *skcomp,
+ volatile struct fit_comp_error_info *skerr,
+ uint8_t *cdb, uint8_t *buf)
+{
+ uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
+
+ /* Caller requested "supported pages". The driver needs to insert
+ * its page.
+ */
+ pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
+ skdev->name, __func__, __LINE__);
+
+ /* If the device rejected the request because the CDB was
+ * improperly formed, then just leave.
+ */
+ if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
+ skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
+ return;
+
+ /* Get the amount of space the caller allocated */
+ max_bytes = (cdb[3] << 8) | cdb[4];
+
+ /* Get the number of pages actually returned by the device */
+ drive_pages = (buf[2] << 8) | buf[3];
+ drive_bytes = drive_pages + 4;
+ new_size = drive_pages + 1;
+
+ /* Supported pages must be in numerical order, so find where
+ * the driver page needs to be inserted into the list of
+ * pages returned by the device.
+ */
+ for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
+ if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
+ return; /* Device using this page code. abort */
+ else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
+ break;
+ }
+
+ if (insert_pt < max_bytes) {
+ uint16_t u;
+
+ /* Shift everything up one byte to make room. */
+ for (u = new_size + 3; u > insert_pt; u--)
+ buf[u] = buf[u - 1];
+ buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
+
+ /* SCSI byte order increment of num_returned_bytes by 1 */
+ skcomp->num_returned_bytes =
+ be32_to_cpu(skcomp->num_returned_bytes) + 1;
+ skcomp->num_returned_bytes =
+ be32_to_cpu(skcomp->num_returned_bytes);
+ }
+
+ /* update page length field to reflect the driver's page too */
+ buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
+ buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
+}
+
+static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
+{
+ int pcie_reg;
+ u16 pci_bus_speed;
+ u8 pci_lanes;
+
+ pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (pcie_reg) {
+ u16 linksta;
+ pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
+
+ pci_bus_speed = linksta & 0xF;
+ pci_lanes = (linksta & 0x3F0) >> 4;
+ } else {
+ *speed = STEC_LINK_UNKNOWN;
+ *width = 0xFF;
+ return;
+ }
+
+ switch (pci_bus_speed) {
+ case 1:
+ *speed = STEC_LINK_2_5GTS;
+ break;
+ case 2:
+ *speed = STEC_LINK_5GTS;
+ break;
+ case 3:
+ *speed = STEC_LINK_8GTS;
+ break;
+ default:
+ *speed = STEC_LINK_UNKNOWN;
+ break;
+ }
+
+ if (pci_lanes <= 0x20)
+ *width = pci_lanes;
+ else
+ *width = 0xFF;
+}
+
+static void skd_do_inq_page_da(struct skd_device *skdev,
+ volatile struct fit_completion_entry_v1 *skcomp,
+ volatile struct fit_comp_error_info *skerr,
+ uint8_t *cdb, uint8_t *buf)
+{
+ unsigned max_bytes;
+ struct driver_inquiry_data inq;
+ u16 val;
+
+ pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
+ skdev->name, __func__, __LINE__);
+
+ memset(&inq, 0, sizeof(inq));
+
+ inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
+
+ if (skdev->pdev && skdev->pdev->bus) {
+ skd_get_link_info(skdev->pdev,
+ &inq.pcie_link_speed, &inq.pcie_link_lanes);
+ inq.pcie_bus_number = cpu_to_be16(skdev->pdev->bus->number);
+ inq.pcie_device_number = PCI_SLOT(skdev->pdev->devfn);
+ inq.pcie_function_number = PCI_FUNC(skdev->pdev->devfn);
+
+ pci_read_config_word(skdev->pdev, PCI_VENDOR_ID, &val);
+ inq.pcie_vendor_id = cpu_to_be16(val);
+
+ pci_read_config_word(skdev->pdev, PCI_DEVICE_ID, &val);
+ inq.pcie_device_id = cpu_to_be16(val);
+
+ pci_read_config_word(skdev->pdev, PCI_SUBSYSTEM_VENDOR_ID,
+ &val);
+ inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
+
+ pci_read_config_word(skdev->pdev, PCI_SUBSYSTEM_ID, &val);
+ inq.pcie_subsystem_device_id = cpu_to_be16(val);
+ } else {
+ inq.pcie_bus_number = 0xFFFF;
+ inq.pcie_device_number = 0xFF;
+ inq.pcie_function_number = 0xFF;
+ inq.pcie_link_speed = 0xFF;
+ inq.pcie_link_lanes = 0xFF;
+ inq.pcie_vendor_id = 0xFFFF;
+ inq.pcie_device_id = 0xFFFF;
+ inq.pcie_subsystem_vendor_id = 0xFFFF;
+ inq.pcie_subsystem_device_id = 0xFFFF;
+ }
+
+ /* Driver version, fixed lenth, padded with spaces on the right */
+ inq.driver_version_length = sizeof(inq.driver_version);
+ memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
+ memcpy(inq.driver_version, DRV_VER_COMPL,
+ min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
+
+ inq.page_length = cpu_to_be16((sizeof(inq) - 4));
+
+ /* Clear the error set by the device */
+ skcomp->status = SAM_STAT_GOOD;
+ memset((void *)skerr, 0, sizeof(*skerr));
+
+ /* copy response into output buffer */
+ max_bytes = (cdb[3] << 8) | cdb[4];
+ memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
+
+ skcomp->num_returned_bytes =
+ be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
+}
+
+static void skd_do_driver_inq(struct skd_device *skdev,
+ volatile struct fit_completion_entry_v1 *skcomp,
+ volatile struct fit_comp_error_info *skerr,
+ uint8_t *cdb, uint8_t *buf)
+{
+ if (!buf)
+ return;
+ else if (cdb[0] != INQUIRY)
+ return; /* Not an INQUIRY */
+ else if ((cdb[1] & 1) == 0)
+ return; /* EVPD not set */
+ else if (cdb[2] == 0)
+ /* Need to add driver's page to supported pages list */
+ skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
+ else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
+ /* Caller requested driver's page */
+ skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
+}
+
+static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
+{
+ if (!sg)
+ return NULL;
+ if (!sg_page(sg))
+ return NULL;
+ return sg_virt(sg);
+}
+
+static void skd_process_scsi_inq(struct skd_device *skdev,
+ volatile struct fit_completion_entry_v1
+ *skcomp,
+ volatile struct fit_comp_error_info *skerr,
+ struct skd_special_context *skspcl)
+{
+ uint8_t *buf;
+ struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
+ struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
+
+ dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
+ skspcl->req.sg_data_dir);
+ buf = skd_sg_1st_page_ptr(skspcl->req.sg);
+
+ if (buf)
+ skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
+}
+
+
+static int skd_isr_completion_posted(struct skd_device *skdev,
+ int limit, int *enqueued)
+{
+ volatile struct fit_completion_entry_v1 *skcmp = NULL;
+ volatile struct fit_comp_error_info *skerr;
+ u16 req_id;
+ u32 req_slot;
+ struct skd_request_context *skreq;
+ u16 cmp_cntxt = 0;
+ u8 cmp_status = 0;
+ u8 cmp_cycle = 0;
+ u32 cmp_bytes = 0;
+ int rc = 0;
+ int processed = 0;
+
+ for (;; ) {
+ SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
+
+ skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
+ cmp_cycle = skcmp->cycle;
+ cmp_cntxt = skcmp->tag;
+ cmp_status = skcmp->status;
+ cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
+
+ skerr = &skdev->skerr_table[skdev->skcomp_ix];
+
+ pr_debug("%s:%s:%d "
+ "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
+ "busy=%d rbytes=0x%x proto=%d\n",
+ skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
+ skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
+ skdev->in_flight, cmp_bytes, skdev->proto_ver);
+
+ if (cmp_cycle != skdev->skcomp_cycle) {
+ pr_debug("%s:%s:%d end of completions\n",
+ skdev->name, __func__, __LINE__);
+ break;
+ }
+ /*
+ * Update the completion queue head index and possibly
+ * the completion cycle count. 8-bit wrap-around.
+ */
+ skdev->skcomp_ix++;
+ if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
+ skdev->skcomp_ix = 0;
+ skdev->skcomp_cycle++;
+ }
+
+ /*
+ * The command context is a unique 32-bit ID. The low order
+ * bits help locate the request. The request is usually a
+ * r/w request (see skd_start() above) or a special request.
+ */
+ req_id = cmp_cntxt;
+ req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
+
+ /* Is this other than a r/w request? */
+ if (req_slot >= skdev->num_req_context) {
+ /*
+ * This is not a completion for a r/w request.
+ */
+ skd_complete_other(skdev, skcmp, skerr);
+ continue;
+ }
+
+ skreq = &skdev->skreq_table[req_slot];
+
+ /*
+ * Make sure the request ID for the slot matches.
+ */
+ if (skreq->id != req_id) {
+ pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
+ skdev->name, __func__, __LINE__,
+ req_id, skreq->id);
+ {
+ u16 new_id = cmp_cntxt;
+ pr_err("(%s): Completion mismatch "
+ "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
+ skd_name(skdev), req_id,
+ skreq->id, new_id);
+
+ continue;
+ }
+ }
+
+ SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
+
+ if (skreq->state == SKD_REQ_STATE_ABORTED) {
+ pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
+ skdev->name, __func__, __LINE__,
+ skreq, skreq->id);
+ /* a previously timed out command can
+ * now be cleaned up */
+ skd_release_skreq(skdev, skreq);
+ continue;
+ }
+
+ skreq->completion = *skcmp;
+ if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
+ skreq->err_info = *skerr;
+ skd_log_check_status(skdev, cmp_status, skerr->key,
+ skerr->code, skerr->qual,
+ skerr->fruc);
+ }
+ /* Release DMA resources for the request. */
+ if (skreq->n_sg > 0)
+ skd_postop_sg_list(skdev, skreq);
+
+ if (!skreq->req) {
+ pr_debug("%s:%s:%d NULL backptr skdreq %p, "
+ "req=0x%x req_id=0x%x\n",
+ skdev->name, __func__, __LINE__,
+ skreq, skreq->id, req_id);
+ } else {
+ /*
+ * Capture the outcome and post it back to the
+ * native request.
+ */
+ if (likely(cmp_status == SAM_STAT_GOOD))
+ skd_end_request(skdev, skreq, 0);
+ else
+ skd_resolve_req_exception(skdev, skreq);
+ }
+
+ /*
+ * Release the skreq, its FIT msg (if one), timeout slot,
+ * and queue depth.
+ */
+ skd_release_skreq(skdev, skreq);
+
+ /* skd_isr_comp_limit equal zero means no limit */
+ if (limit) {
+ if (++processed >= limit) {
+ rc = 1;
+ break;
+ }
+ }
+ }
+
+ if ((skdev->state == SKD_DRVR_STATE_PAUSING)
+ && (skdev->in_flight) == 0) {
+ skdev->state = SKD_DRVR_STATE_PAUSED;
+ wake_up_interruptible(&skdev->waitq);
+ }
+
+ return rc;
+}
+
+static void skd_complete_other(struct skd_device *skdev,
+ volatile struct fit_completion_entry_v1 *skcomp,
+ volatile struct fit_comp_error_info *skerr)
+{
+ u32 req_id = 0;
+ u32 req_table;
+ u32 req_slot;
+ struct skd_special_context *skspcl;
+
+ req_id = skcomp->tag;
+ req_table = req_id & SKD_ID_TABLE_MASK;
+ req_slot = req_id & SKD_ID_SLOT_MASK;
+
+ pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
+ skdev->name, __func__, __LINE__,
+ req_table, req_id, req_slot);
+
+ /*
+ * Based on the request id, determine how to dispatch this completion.
+ * This swich/case is finding the good cases and forwarding the
+ * completion entry. Errors are reported below the switch.
+ */
+ switch (req_table) {
+ case SKD_ID_RW_REQUEST:
+ /*
+ * The caller, skd_completion_posted_isr() above,
+ * handles r/w requests. The only way we get here
+ * is if the req_slot is out of bounds.
+ */
+ break;
+
+ case SKD_ID_SPECIAL_REQUEST:
+ /*
+ * Make sure the req_slot is in bounds and that the id
+ * matches.
+ */
+ if (req_slot < skdev->n_special) {
+ skspcl = &skdev->skspcl_table[req_slot];
+ if (skspcl->req.id == req_id &&
+ skspcl->req.state == SKD_REQ_STATE_BUSY) {
+ skd_complete_special(skdev,
+ skcomp, skerr, skspcl);
+ return;
+ }
+ }
+ break;
+
+ case SKD_ID_INTERNAL:
+ if (req_slot == 0) {
+ skspcl = &skdev->internal_skspcl;
+ if (skspcl->req.id == req_id &&
+ skspcl->req.state == SKD_REQ_STATE_BUSY) {
+ skd_complete_internal(skdev,
+ skcomp, skerr, skspcl);
+ return;
+ }
+ }
+ break;
+
+ case SKD_ID_FIT_MSG:
+ /*
+ * These id's should never appear in a completion record.
+ */
+ break;
+
+ default:
+ /*
+ * These id's should never appear anywhere;
+ */
+ break;
+ }
+
+ /*
+ * If we get here it is a bad or stale id.
+ */
+}
+
+static void skd_complete_special(struct skd_device *skdev,
+ volatile struct fit_completion_entry_v1
+ *skcomp,
+ volatile struct fit_comp_error_info *skerr,
+ struct skd_special_context *skspcl)
+{
+ pr_debug("%s:%s:%d completing special request %p\n",
+ skdev->name, __func__, __LINE__, skspcl);
+ if (skspcl->orphaned) {
+ /* Discard orphaned request */
+ /* ?: Can this release directly or does it need
+ * to use a worker? */
+ pr_debug("%s:%s:%d release orphaned %p\n",
+ skdev->name, __func__, __LINE__, skspcl);
+ skd_release_special(skdev, skspcl);
+ return;
+ }
+
+ skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
+
+ skspcl->req.state = SKD_REQ_STATE_COMPLETED;
+ skspcl->req.completion = *skcomp;
+ skspcl->req.err_info = *skerr;
+
+ skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
+ skerr->code, skerr->qual, skerr->fruc);
+
+ wake_up_interruptible(&skdev->waitq);
+}
+
+/* assume spinlock is already held */
+static void skd_release_special(struct skd_device *skdev,
+ struct skd_special_context *skspcl)
+{
+ int i, was_depleted;
+
+ for (i = 0; i < skspcl->req.n_sg; i++) {
+ struct page *page = sg_page(&skspcl->req.sg[i]);
+ __free_page(page);
+ }
+
+ was_depleted = (skdev->skspcl_free_list == NULL);
+
+ skspcl->req.state = SKD_REQ_STATE_IDLE;
+ skspcl->req.id += SKD_ID_INCR;
+ skspcl->req.next =
+ (struct skd_request_context *)skdev->skspcl_free_list;
+ skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
+
+ if (was_depleted) {
+ pr_debug("%s:%s:%d skspcl was depleted\n",
+ skdev->name, __func__, __LINE__);
+ /* Free list was depleted. Their might be waiters. */
+ wake_up_interruptible(&skdev->waitq);
+ }
+}
+
+static void skd_reset_skcomp(struct skd_device *skdev)
+{
+ u32 nbytes;
+ struct fit_completion_entry_v1 *skcomp;
+
+ nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
+ nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
+
+ memset(skdev->skcomp_table, 0, nbytes);
+
+ skdev->skcomp_ix = 0;
+ skdev->skcomp_cycle = 1;
+}
+
+/*
+ *****************************************************************************
+ * INTERRUPTS
+ *****************************************************************************
+ */
+static void skd_completion_worker(struct work_struct *work)
+{
+ struct skd_device *skdev =
+ container_of(work, struct skd_device, completion_worker);
+ unsigned long flags;
+ int flush_enqueued = 0;
+
+ spin_lock_irqsave(&skdev->lock, flags);
+
+ /*
+ * pass in limit=0, which means no limit..
+ * process everything in compq
+ */
+ skd_isr_completion_posted(skdev, 0, &flush_enqueued);
+ skd_request_fn(skdev->queue);
+
+ spin_unlock_irqrestore(&skdev->lock, flags);
+}
+
+static void skd_isr_msg_from_dev(struct skd_device *skdev);
+
+irqreturn_t
+static skd_isr(int irq, void *ptr)
+{
+ struct skd_device *skdev;
+ u32 intstat;
+ u32 ack;
+ int rc = 0;
+ int deferred = 0;
+ int flush_enqueued = 0;
+
+ skdev = (struct skd_device *)ptr;
+ spin_lock(&skdev->lock);
+
+ for (;; ) {
+ intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
+
+ ack = FIT_INT_DEF_MASK;
+ ack &= intstat;
+
+ pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
+ skdev->name, __func__, __LINE__, intstat, ack);
+
+ /* As long as there is an int pending on device, keep
+ * running loop. When none, get out, but if we've never
+ * done any processing, call completion handler?
+ */
+ if (ack == 0) {
+ /* No interrupts on device, but run the completion
+ * processor anyway?
+ */
+ if (rc == 0)
+ if (likely (skdev->state
+ == SKD_DRVR_STATE_ONLINE))
+ deferred = 1;
+ break;
+ }
+
+ rc = IRQ_HANDLED;
+
+ SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
+
+ if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
+ (skdev->state != SKD_DRVR_STATE_STOPPING))) {
+ if (intstat & FIT_ISH_COMPLETION_POSTED) {
+ /*
+ * If we have already deferred completion
+ * processing, don't bother running it again
+ */
+ if (deferred == 0)
+ deferred =
+ skd_isr_completion_posted(skdev,
+ skd_isr_comp_limit, &flush_enqueued);
+ }
+
+ if (intstat & FIT_ISH_FW_STATE_CHANGE) {
+ skd_isr_fwstate(skdev);
+ if (skdev->state == SKD_DRVR_STATE_FAULT ||
+ skdev->state ==
+ SKD_DRVR_STATE_DISAPPEARED) {
+ spin_unlock(&skdev->lock);
+ return rc;
+ }
+ }
+
+ if (intstat & FIT_ISH_MSG_FROM_DEV)
+ skd_isr_msg_from_dev(skdev);
+ }
+ }
+
+ if (unlikely(flush_enqueued))
+ skd_request_fn(skdev->queue);
+
+ if (deferred)
+ schedule_work(&skdev->completion_worker);
+ else if (!flush_enqueued)
+ skd_request_fn(skdev->queue);
+
+ spin_unlock(&skdev->lock);
+
+ return rc;
+}
+
+static void skd_drive_fault(struct skd_device *skdev)
+{
+ skdev->state = SKD_DRVR_STATE_FAULT;
+ pr_err("(%s): Drive FAULT\n", skd_name(skdev));
+}
+
+static void skd_drive_disappeared(struct skd_device *skdev)
+{
+ skdev->state = SKD_DRVR_STATE_DISAPPEARED;
+ pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
+}
+
+static void skd_isr_fwstate(struct skd_device *skdev)
+{
+ u32 sense;
+ u32 state;
+ u32 mtd;
+ int prev_driver_state = skdev->state;
+
+ sense = SKD_READL(skdev, FIT_STATUS);
+ state = sense & FIT_SR_DRIVE_STATE_MASK;
+
+ pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
+ skd_name(skdev),
+ skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
+ skd_drive_state_to_str(state), state);
+
+ skdev->drive_state = state;
+
+ switch (skdev->drive_state) {
+ case FIT_SR_DRIVE_INIT:
+ if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
+ skd_disable_interrupts(skdev);
+ break;
+ }
+ if (skdev->state == SKD_DRVR_STATE_RESTARTING)
+ skd_recover_requests(skdev, 0);
+ if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
+ skdev->timer_countdown = SKD_STARTING_TIMO;
+ skdev->state = SKD_DRVR_STATE_STARTING;
+ skd_soft_reset(skdev);
+ break;
+ }
+ mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
+ SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+ skdev->last_mtd = mtd;
+ break;
+
+ case FIT_SR_DRIVE_ONLINE:
+ skdev->cur_max_queue_depth = skd_max_queue_depth;
+ if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
+ skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
+
+ skdev->queue_low_water_mark =
+ skdev->cur_max_queue_depth * 2 / 3 + 1;
+ if (skdev->queue_low_water_mark < 1)
+ skdev->queue_low_water_mark = 1;
+ pr_info(
+ "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
+ skd_name(skdev),
+ skdev->cur_max_queue_depth,
+ skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
+
+ skd_refresh_device_data(skdev);
+ break;
+
+ case FIT_SR_DRIVE_BUSY:
+ skdev->state = SKD_DRVR_STATE_BUSY;
+ skdev->timer_countdown = SKD_BUSY_TIMO;
+ skd_quiesce_dev(skdev);
+ break;
+ case FIT_SR_DRIVE_BUSY_SANITIZE:
+ /* set timer for 3 seconds, we'll abort any unfinished
+ * commands after that expires
+ */
+ skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
+ skdev->timer_countdown = SKD_TIMER_SECONDS(3);
+ blk_start_queue(skdev->queue);
+ break;
+ case FIT_SR_DRIVE_BUSY_ERASE:
+ skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
+ skdev->timer_countdown = SKD_BUSY_TIMO;
+ break;
+ case FIT_SR_DRIVE_OFFLINE:
+ skdev->state = SKD_DRVR_STATE_IDLE;
+ break;
+ case FIT_SR_DRIVE_SOFT_RESET:
+ switch (skdev->state) {
+ case SKD_DRVR_STATE_STARTING:
+ case SKD_DRVR_STATE_RESTARTING:
+ /* Expected by a caller of skd_soft_reset() */
+ break;
+ default:
+ skdev->state = SKD_DRVR_STATE_RESTARTING;
+ break;
+ }
+ break;
+ case FIT_SR_DRIVE_FW_BOOTING:
+ pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
+ skdev->name, __func__, __LINE__, skdev->name);
+ skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
+ skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
+ break;
+
+ case FIT_SR_DRIVE_DEGRADED:
+ case FIT_SR_PCIE_LINK_DOWN:
+ case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
+ break;
+
+ case FIT_SR_DRIVE_FAULT:
+ skd_drive_fault(skdev);
+ skd_recover_requests(skdev, 0);
+ blk_start_queue(skdev->queue);
+ break;
+
+ /* PCIe bus returned all Fs? */
+ case 0xFF:
+ pr_info("(%s): state=0x%x sense=0x%x\n",
+ skd_name(skdev), state, sense);
+ skd_drive_disappeared(skdev);
+ skd_recover_requests(skdev, 0);
+ blk_start_queue(skdev->queue);
+ break;
+ default:
+ /*
+ * Uknown FW State. Wait for a state we recognize.
+ */
+ break;
+ }
+ pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
+ skd_name(skdev),
+ skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
+ skd_skdev_state_to_str(skdev->state), skdev->state);
+}
+
+static void skd_recover_requests(struct skd_device *skdev, int requeue)
+{
+ int i;
+
+ for (i = 0; i < skdev->num_req_context; i++) {
+ struct skd_request_context *skreq = &skdev->skreq_table[i];
+
+ if (skreq->state == SKD_REQ_STATE_BUSY) {
+ skd_log_skreq(skdev, skreq, "recover");
+
+ SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
+ SKD_ASSERT(skreq->req != NULL);
+
+ /* Release DMA resources for the request. */
+ if (skreq->n_sg > 0)
+ skd_postop_sg_list(skdev, skreq);
+
+ if (requeue &&
+ (unsigned long) ++skreq->req->special <
+ SKD_MAX_RETRIES)
+ blk_requeue_request(skdev->queue, skreq->req);
+ else
+ skd_end_request(skdev, skreq, -EIO);
+
+ skreq->req = NULL;
+
+ skreq->state = SKD_REQ_STATE_IDLE;
+ skreq->id += SKD_ID_INCR;
+ }
+ if (i > 0)
+ skreq[-1].next = skreq;
+ skreq->next = NULL;
+ }
+ skdev->skreq_free_list = skdev->skreq_table;
+
+ for (i = 0; i < skdev->num_fitmsg_context; i++) {
+ struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
+
+ if (skmsg->state == SKD_MSG_STATE_BUSY) {
+ skd_log_skmsg(skdev, skmsg, "salvaged");
+ SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
+ skmsg->state = SKD_MSG_STATE_IDLE;
+ skmsg->id += SKD_ID_INCR;
+ }
+ if (i > 0)
+ skmsg[-1].next = skmsg;
+ skmsg->next = NULL;
+ }
+ skdev->skmsg_free_list = skdev->skmsg_table;
+
+ for (i = 0; i < skdev->n_special; i++) {
+ struct skd_special_context *skspcl = &skdev->skspcl_table[i];
+
+ /* If orphaned, reclaim it because it has already been reported
+ * to the process as an error (it was just waiting for
+ * a completion that didn't come, and now it will never come)
+ * If busy, change to a state that will cause it to error
+ * out in the wait routine and let it do the normal
+ * reporting and reclaiming
+ */
+ if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
+ if (skspcl->orphaned) {
+ pr_debug("%s:%s:%d orphaned %p\n",
+ skdev->name, __func__, __LINE__,
+ skspcl);
+ skd_release_special(skdev, skspcl);
+ } else {
+ pr_debug("%s:%s:%d not orphaned %p\n",
+ skdev->name, __func__, __LINE__,
+ skspcl);
+ skspcl->req.state = SKD_REQ_STATE_ABORTED;
+ }
+ }
+ }
+ skdev->skspcl_free_list = skdev->skspcl_table;
+
+ for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
+ skdev->timeout_slot[i] = 0;
+
+ skdev->in_flight = 0;
+}
+
+static void skd_isr_msg_from_dev(struct skd_device *skdev)
+{
+ u32 mfd;
+ u32 mtd;
+ u32 data;
+
+ mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
+
+ pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
+ skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
+
+ /* ignore any mtd that is an ack for something we didn't send */
+ if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
+ return;
+
+ switch (FIT_MXD_TYPE(mfd)) {
+ case FIT_MTD_FITFW_INIT:
+ skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
+
+ if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
+ pr_err("(%s): protocol mismatch\n",
+ skdev->name);
+ pr_err("(%s): got=%d support=%d\n",
+ skdev->name, skdev->proto_ver,
+ FIT_PROTOCOL_VERSION_1);
+ pr_err("(%s): please upgrade driver\n",
+ skdev->name);
+ skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
+ skd_soft_reset(skdev);
+ break;
+ }
+ mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
+ SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+ skdev->last_mtd = mtd;
+ break;
+
+ case FIT_MTD_GET_CMDQ_DEPTH:
+ skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
+ mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
+ SKD_N_COMPLETION_ENTRY);
+ SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+ skdev->last_mtd = mtd;
+ break;
+
+ case FIT_MTD_SET_COMPQ_DEPTH:
+ SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
+ mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
+ SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+ skdev->last_mtd = mtd;
+ break;
+
+ case FIT_MTD_SET_COMPQ_ADDR:
+ skd_reset_skcomp(skdev);
+ mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
+ SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+ skdev->last_mtd = mtd;
+ break;
+
+ case FIT_MTD_CMD_LOG_HOST_ID:
+ skdev->connect_time_stamp = get_seconds();
+ data = skdev->connect_time_stamp & 0xFFFF;
+ mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
+ SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+ skdev->last_mtd = mtd;
+ break;
+
+ case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
+ skdev->drive_jiffies = FIT_MXD_DATA(mfd);
+ data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
+ mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
+ SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+ skdev->last_mtd = mtd;
+ break;
+
+ case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
+ skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
+ mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
+ SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
+ skdev->last_mtd = mtd;
+
+ pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
+ skd_name(skdev),
+ skdev->connect_time_stamp, skdev->drive_jiffies);
+ break;
+
+ case FIT_MTD_ARM_QUEUE:
+ skdev->last_mtd = 0;
+ /*
+ * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
+ */
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void skd_disable_interrupts(struct skd_device *skdev)
+{
+ u32 sense;
+
+ sense = SKD_READL(skdev, FIT_CONTROL);
+ sense &= ~FIT_CR_ENABLE_INTERRUPTS;
+ SKD_WRITEL(skdev, sense, FIT_CONTROL);
+ pr_debug("%s:%s:%d sense 0x%x\n",
+ skdev->name, __func__, __LINE__, sense);
+
+ /* Note that the 1s is written. A 1-bit means
+ * disable, a 0 means enable.
+ */
+ SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
+}
+
+static void skd_enable_interrupts(struct skd_device *skdev)
+{
+ u32 val;
+
+ /* unmask interrupts first */
+ val = FIT_ISH_FW_STATE_CHANGE +
+ FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
+
+ /* Note that the compliment of mask is written. A 1-bit means
+ * disable, a 0 means enable. */
+ SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
+ pr_debug("%s:%s:%d interrupt mask=0x%x\n",
+ skdev->name, __func__, __LINE__, ~val);
+
+ val = SKD_READL(skdev, FIT_CONTROL);
+ val |= FIT_CR_ENABLE_INTERRUPTS;
+ pr_debug("%s:%s:%d control=0x%x\n",
+ skdev->name, __func__, __LINE__, val);
+ SKD_WRITEL(skdev, val, FIT_CONTROL);
+}
+
+/*
+ *****************************************************************************
+ * START, STOP, RESTART, QUIESCE, UNQUIESCE
+ *****************************************************************************
+ */
+
+static void skd_soft_reset(struct skd_device *skdev)
+{
+ u32 val;
+
+ val = SKD_READL(skdev, FIT_CONTROL);
+ val |= (FIT_CR_SOFT_RESET);
+ pr_debug("%s:%s:%d control=0x%x\n",
+ skdev->name, __func__, __LINE__, val);
+ SKD_WRITEL(skdev, val, FIT_CONTROL);
+}
+
+static void skd_start_device(struct skd_device *skdev)
+{
+ unsigned long flags;
+ u32 sense;
+ u32 state;
+
+ spin_lock_irqsave(&skdev->lock, flags);
+
+ /* ack all ghost interrupts */
+ SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
+
+ sense = SKD_READL(skdev, FIT_STATUS);
+
+ pr_debug("%s:%s:%d initial status=0x%x\n",
+ skdev->name, __func__, __LINE__, sense);
+
+ state = sense & FIT_SR_DRIVE_STATE_MASK;
+ skdev->drive_state = state;
+ skdev->last_mtd = 0;
+
+ skdev->state = SKD_DRVR_STATE_STARTING;
+ skdev->timer_countdown = SKD_STARTING_TIMO;
+
+ skd_enable_interrupts(skdev);
+
+ switch (skdev->drive_state) {
+ case FIT_SR_DRIVE_OFFLINE:
+ pr_err("(%s): Drive offline...\n", skd_name(skdev));
+ break;
+
+ case FIT_SR_DRIVE_FW_BOOTING:
+ pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
+ skdev->name, __func__, __LINE__, skdev->name);
+ skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
+ skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
+ break;
+
+ case FIT_SR_DRIVE_BUSY_SANITIZE:
+ pr_info("(%s): Start: BUSY_SANITIZE\n",
+ skd_name(skdev));
+ skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
+ skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
+ break;
+
+ case FIT_SR_DRIVE_BUSY_ERASE:
+ pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
+ skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
+ skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
+ break;
+
+ case FIT_SR_DRIVE_INIT:
+ case FIT_SR_DRIVE_ONLINE:
+ skd_soft_reset(skdev);
+ break;
+
+ case FIT_SR_DRIVE_BUSY:
+ pr_err("(%s): Drive Busy...\n", skd_name(skdev));
+ skdev->state = SKD_DRVR_STATE_BUSY;
+ skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
+ break;
+
+ case FIT_SR_DRIVE_SOFT_RESET:
+ pr_err("(%s) drive soft reset in prog\n",
+ skd_name(skdev));
+ break;
+
+ case FIT_SR_DRIVE_FAULT:
+ /* Fault state is bad...soft reset won't do it...
+ * Hard reset, maybe, but does it work on device?
+ * For now, just fault so the system doesn't hang.
+ */
+ skd_drive_fault(skdev);
+ /*start the queue so we can respond with error to requests */
+ pr_debug("%s:%s:%d starting %s queue\n",
+ skdev->name, __func__, __LINE__, skdev->name);
+ blk_start_queue(skdev->queue);
+ skdev->gendisk_on = -1;
+ wake_up_interruptible(&skdev->waitq);
+ break;
+
+ case 0xFF:
+ /* Most likely the device isn't there or isn't responding
+ * to the BAR1 addresses. */
+ skd_drive_disappeared(skdev);
+ /*start the queue so we can respond with error to requests */
+ pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
+ skdev->name, __func__, __LINE__, skdev->name);
+ blk_start_queue(skdev->queue);
+ skdev->gendisk_on = -1;
+ wake_up_interruptible(&skdev->waitq);
+ break;
+
+ default:
+ pr_err("(%s) Start: unknown state %x\n",
+ skd_name(skdev), skdev->drive_state);
+ break;
+ }
+
+ state = SKD_READL(skdev, FIT_CONTROL);
+ pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
+ skdev->name, __func__, __LINE__, state);
+
+ state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
+ pr_debug("%s:%s:%d Intr Status=0x%x\n",
+ skdev->name, __func__, __LINE__, state);
+
+ state = SKD_READL(skdev, FIT_INT_MASK_HOST);
+ pr_debug("%s:%s:%d Intr Mask=0x%x\n",
+ skdev->name, __func__, __LINE__, state);
+
+ state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
+ pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
+ skdev->name, __func__, __LINE__, state);
+
+ state = SKD_READL(skdev, FIT_HW_VERSION);
+ pr_debug("%s:%s:%d HW version=0x%x\n",
+ skdev->name, __func__, __LINE__, state);
+
+ spin_unlock_irqrestore(&skdev->lock, flags);
+}
+
+static void skd_stop_device(struct skd_device *skdev)
+{
+ unsigned long flags;
+ struct skd_special_context *skspcl = &skdev->internal_skspcl;
+ u32 dev_state;
+ int i;
+
+ spin_lock_irqsave(&skdev->lock, flags);
+
+ if (skdev->state != SKD_DRVR_STATE_ONLINE) {
+ pr_err("(%s): skd_stop_device not online no sync\n",
+ skd_name(skdev));
+ goto stop_out;
+ }
+
+ if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
+ pr_err("(%s): skd_stop_device no special\n",
+ skd_name(skdev));
+ goto stop_out;
+ }
+
+ skdev->state = SKD_DRVR_STATE_SYNCING;
+ skdev->sync_done = 0;
+
+ skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
+
+ spin_unlock_irqrestore(&skdev->lock, flags);
+
+ wait_event_interruptible_timeout(skdev->waitq,
+ (skdev->sync_done), (10 * HZ));
+
+ spin_lock_irqsave(&skdev->lock, flags);
+
+ switch (skdev->sync_done) {
+ case 0:
+ pr_err("(%s): skd_stop_device no sync\n",
+ skd_name(skdev));
+ break;
+ case 1:
+ pr_err("(%s): skd_stop_device sync done\n",
+ skd_name(skdev));
+ break;
+ default:
+ pr_err("(%s): skd_stop_device sync error\n",
+ skd_name(skdev));
+ }
+
+stop_out:
+ skdev->state = SKD_DRVR_STATE_STOPPING;
+ spin_unlock_irqrestore(&skdev->lock, flags);
+
+ skd_kill_timer(skdev);
+
+ spin_lock_irqsave(&skdev->lock, flags);
+ skd_disable_interrupts(skdev);
+
+ /* ensure all ints on device are cleared */
+ /* soft reset the device to unload with a clean slate */
+ SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
+ SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
+
+ spin_unlock_irqrestore(&skdev->lock, flags);
+
+ /* poll every 100ms, 1 second timeout */
+ for (i = 0; i < 10; i++) {
+ dev_state =
+ SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
+ if (dev_state == FIT_SR_DRIVE_INIT)
+ break;
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(100));
+ }
+
+ if (dev_state != FIT_SR_DRIVE_INIT)
+ pr_err("(%s): skd_stop_device state error 0x%02x\n",
+ skd_name(skdev), dev_state);
+}
+
+/* assume spinlock is held */
+static void skd_restart_device(struct skd_device *skdev)
+{
+ u32 state;
+
+ /* ack all ghost interrupts */
+ SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
+
+ state = SKD_READL(skdev, FIT_STATUS);
+
+ pr_debug("%s:%s:%d drive status=0x%x\n",
+ skdev->name, __func__, __LINE__, state);
+
+ state &= FIT_SR_DRIVE_STATE_MASK;
+ skdev->drive_state = state;
+ skdev->last_mtd = 0;
+
+ skdev->state = SKD_DRVR_STATE_RESTARTING;
+ skdev->timer_countdown = SKD_RESTARTING_TIMO;
+
+ skd_soft_reset(skdev);
+}
+
+/* assume spinlock is held */
+static int skd_quiesce_dev(struct skd_device *skdev)
+{
+ int rc = 0;
+
+ switch (skdev->state) {
+ case SKD_DRVR_STATE_BUSY:
+ case SKD_DRVR_STATE_BUSY_IMMINENT:
+ pr_debug("%s:%s:%d stopping %s queue\n",
+ skdev->name, __func__, __LINE__, skdev->name);
+ blk_stop_queue(skdev->queue);
+ break;
+ case SKD_DRVR_STATE_ONLINE:
+ case SKD_DRVR_STATE_STOPPING:
+ case SKD_DRVR_STATE_SYNCING:
+ case SKD_DRVR_STATE_PAUSING:
+ case SKD_DRVR_STATE_PAUSED:
+ case SKD_DRVR_STATE_STARTING:
+ case SKD_DRVR_STATE_RESTARTING:
+ case SKD_DRVR_STATE_RESUMING:
+ default:
+ rc = -EINVAL;
+ pr_debug("%s:%s:%d state [%d] not implemented\n",
+ skdev->name, __func__, __LINE__, skdev->state);
+ }
+ return rc;
+}
+
+/* assume spinlock is held */
+static int skd_unquiesce_dev(struct skd_device *skdev)
+{
+ int prev_driver_state = skdev->state;
+
+ skd_log_skdev(skdev, "unquiesce");
+ if (skdev->state == SKD_DRVR_STATE_ONLINE) {
+ pr_debug("%s:%s:%d **** device already ONLINE\n",
+ skdev->name, __func__, __LINE__);
+ return 0;
+ }
+ if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
+ /*
+ * If there has been an state change to other than
+ * ONLINE, we will rely on controller state change
+ * to come back online and restart the queue.
+ * The BUSY state means that driver is ready to
+ * continue normal processing but waiting for controller
+ * to become available.
+ */
+ skdev->state = SKD_DRVR_STATE_BUSY;
+ pr_debug("%s:%s:%d drive BUSY state\n",
+ skdev->name, __func__, __LINE__);
+ return 0;
+ }
+
+ /*
+ * Drive has just come online, driver is either in startup,
+ * paused performing a task, or bust waiting for hardware.
+ */
+ switch (skdev->state) {
+ case SKD_DRVR_STATE_PAUSED:
+ case SKD_DRVR_STATE_BUSY:
+ case SKD_DRVR_STATE_BUSY_IMMINENT:
+ case SKD_DRVR_STATE_BUSY_ERASE:
+ case SKD_DRVR_STATE_STARTING:
+ case SKD_DRVR_STATE_RESTARTING:
+ case SKD_DRVR_STATE_FAULT:
+ case SKD_DRVR_STATE_IDLE:
+ case SKD_DRVR_STATE_LOAD:
+ skdev->state = SKD_DRVR_STATE_ONLINE;
+ pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
+ skd_name(skdev),
+ skd_skdev_state_to_str(prev_driver_state),
+ prev_driver_state, skd_skdev_state_to_str(skdev->state),
+ skdev->state);
+ pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
+ skdev->name, __func__, __LINE__);
+ pr_debug("%s:%s:%d starting %s queue\n",
+ skdev->name, __func__, __LINE__, skdev->name);
+ pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
+ blk_start_queue(skdev->queue);
+ skdev->gendisk_on = 1;
+ wake_up_interruptible(&skdev->waitq);
+ break;
+
+ case SKD_DRVR_STATE_DISAPPEARED:
+ default:
+ pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
+ skdev->name, __func__, __LINE__,
+ skdev->state);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/*
+ *****************************************************************************
+ * PCIe MSI/MSI-X INTERRUPT HANDLERS
+ *****************************************************************************
+ */
+
+static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
+{
+ struct skd_device *skdev = skd_host_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&skdev->lock, flags);
+ pr_debug("%s:%s:%d MSIX = 0x%x\n",
+ skdev->name, __func__, __LINE__,
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
+ irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
+ spin_unlock_irqrestore(&skdev->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
+{
+ struct skd_device *skdev = skd_host_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&skdev->lock, flags);
+ pr_debug("%s:%s:%d MSIX = 0x%x\n",
+ skdev->name, __func__, __LINE__,
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
+ skd_isr_fwstate(skdev);
+ spin_unlock_irqrestore(&skdev->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
+{
+ struct skd_device *skdev = skd_host_data;
+ unsigned long flags;
+ int flush_enqueued = 0;
+ int deferred;
+
+ spin_lock_irqsave(&skdev->lock, flags);
+ pr_debug("%s:%s:%d MSIX = 0x%x\n",
+ skdev->name, __func__, __LINE__,
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
+ deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
+ &flush_enqueued);
+ if (flush_enqueued)
+ skd_request_fn(skdev->queue);
+
+ if (deferred)
+ schedule_work(&skdev->completion_worker);
+ else if (!flush_enqueued)
+ skd_request_fn(skdev->queue);
+
+ spin_unlock_irqrestore(&skdev->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
+{
+ struct skd_device *skdev = skd_host_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&skdev->lock, flags);
+ pr_debug("%s:%s:%d MSIX = 0x%x\n",
+ skdev->name, __func__, __LINE__,
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
+ skd_isr_msg_from_dev(skdev);
+ spin_unlock_irqrestore(&skdev->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
+{
+ struct skd_device *skdev = skd_host_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&skdev->lock, flags);
+ pr_debug("%s:%s:%d MSIX = 0x%x\n",
+ skdev->name, __func__, __LINE__,
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
+ spin_unlock_irqrestore(&skdev->lock, flags);
+ return IRQ_HANDLED;
+}
+
+/*
+ *****************************************************************************
+ * PCIe MSI/MSI-X SETUP
+ *****************************************************************************
+ */
+
+struct skd_msix_entry {
+ int have_irq;
+ u32 vector;
+ u32 entry;
+ struct skd_device *rsp;
+ char isr_name[30];
+};
+
+struct skd_init_msix_entry {
+ const char *name;
+ irq_handler_t handler;
+};
+
+#define SKD_MAX_MSIX_COUNT 13
+#define SKD_MIN_MSIX_COUNT 7
+#define SKD_BASE_MSIX_IRQ 4
+
+static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
+ { "(DMA 0)", skd_reserved_isr },
+ { "(DMA 1)", skd_reserved_isr },
+ { "(DMA 2)", skd_reserved_isr },
+ { "(DMA 3)", skd_reserved_isr },
+ { "(State Change)", skd_statec_isr },
+ { "(COMPL_Q)", skd_comp_q },
+ { "(MSG)", skd_msg_isr },
+ { "(Reserved)", skd_reserved_isr },
+ { "(Reserved)", skd_reserved_isr },
+ { "(Queue Full 0)", skd_qfull_isr },
+ { "(Queue Full 1)", skd_qfull_isr },
+ { "(Queue Full 2)", skd_qfull_isr },
+ { "(Queue Full 3)", skd_qfull_isr },
+};
+
+static void skd_release_msix(struct skd_device *skdev)
+{
+ struct skd_msix_entry *qentry;
+ int i;
+
+ if (skdev->msix_entries == NULL)
+ return;
+ for (i = 0; i < skdev->msix_count; i++) {
+ qentry = &skdev->msix_entries[i];
+ skdev = qentry->rsp;
+
+ if (qentry->have_irq)
+ devm_free_irq(&skdev->pdev->dev,
+ qentry->vector, qentry->rsp);
+ }
+ pci_disable_msix(skdev->pdev);
+ kfree(skdev->msix_entries);
+ skdev->msix_count = 0;
+ skdev->msix_entries = NULL;
+}
+
+static int skd_acquire_msix(struct skd_device *skdev)
+{
+ int i, rc;
+ struct pci_dev *pdev;
+ struct msix_entry *entries = NULL;
+ struct skd_msix_entry *qentry;
+
+ pdev = skdev->pdev;
+ skdev->msix_count = SKD_MAX_MSIX_COUNT;
+ entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT,
+ GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
+ entries[i].entry = i;
+
+ rc = pci_enable_msix(pdev, entries, SKD_MAX_MSIX_COUNT);
+ if (rc < 0)
+ goto msix_out;
+ if (rc) {
+ if (rc < SKD_MIN_MSIX_COUNT) {
+ pr_err("(%s): failed to enable MSI-X %d\n",
+ skd_name(skdev), rc);
+ goto msix_out;
+ }
+ pr_debug("%s:%s:%d %s: <%s> allocated %d MSI-X vectors\n",
+ skdev->name, __func__, __LINE__,
+ pci_name(pdev), skdev->name, rc);
+
+ skdev->msix_count = rc;
+ rc = pci_enable_msix(pdev, entries, skdev->msix_count);
+ if (rc) {
+ pr_err("(%s): failed to enable MSI-X "
+ "support (%d) %d\n",
+ skd_name(skdev), skdev->msix_count, rc);
+ goto msix_out;
+ }
+ }
+ skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
+ skdev->msix_count, GFP_KERNEL);
+ if (!skdev->msix_entries) {
+ rc = -ENOMEM;
+ skdev->msix_count = 0;
+ pr_err("(%s): msix table allocation error\n",
+ skd_name(skdev));
+ goto msix_out;
+ }
+
+ qentry = skdev->msix_entries;
+ for (i = 0; i < skdev->msix_count; i++) {
+ qentry->vector = entries[i].vector;
+ qentry->entry = entries[i].entry;
+ qentry->rsp = NULL;
+ qentry->have_irq = 0;
+ pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n",
+ skdev->name, __func__, __LINE__,
+ pci_name(pdev), skdev->name,
+ i, qentry->vector, qentry->entry);
+ qentry++;
+ }
+
+ /* Enable MSI-X vectors for the base queue */
+ for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
+ qentry = &skdev->msix_entries[i];
+ snprintf(qentry->isr_name, sizeof(qentry->isr_name),
+ "%s%d-msix %s", DRV_NAME, skdev->devno,
+ msix_entries[i].name);
+ rc = devm_request_irq(&skdev->pdev->dev, qentry->vector,
+ msix_entries[i].handler, 0,
+ qentry->isr_name, skdev);
+ if (rc) {
+ pr_err("(%s): Unable to register(%d) MSI-X "
+ "handler %d: %s\n",
+ skd_name(skdev), rc, i, qentry->isr_name);
+ goto msix_out;
+ } else {
+ qentry->have_irq = 1;
+ qentry->rsp = skdev;
+ }
+ }
+ pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
+ skdev->name, __func__, __LINE__,
+ pci_name(pdev), skdev->name, skdev->msix_count);
+ return 0;
+
+msix_out:
+ if (entries)
+ kfree(entries);
+ skd_release_msix(skdev);
+ return rc;
+}
+
+static int skd_acquire_irq(struct skd_device *skdev)
+{
+ int rc;
+ struct pci_dev *pdev;
+
+ pdev = skdev->pdev;
+ skdev->msix_count = 0;
+
+RETRY_IRQ_TYPE:
+ switch (skdev->irq_type) {
+ case SKD_IRQ_MSIX:
+ rc = skd_acquire_msix(skdev);
+ if (!rc)
+ pr_info("(%s): MSI-X %d irqs enabled\n",
+ skd_name(skdev), skdev->msix_count);
+ else {
+ pr_err(
+ "(%s): failed to enable MSI-X, re-trying with MSI %d\n",
+ skd_name(skdev), rc);
+ skdev->irq_type = SKD_IRQ_MSI;
+ goto RETRY_IRQ_TYPE;
+ }
+ break;
+ case SKD_IRQ_MSI:
+ snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi",
+ DRV_NAME, skdev->devno);
+ rc = pci_enable_msi(pdev);
+ if (!rc) {
+ rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0,
+ skdev->isr_name, skdev);
+ if (rc) {
+ pci_disable_msi(pdev);
+ pr_err(
+ "(%s): failed to allocate the MSI interrupt %d\n",
+ skd_name(skdev), rc);
+ goto RETRY_IRQ_LEGACY;
+ }
+ pr_info("(%s): MSI irq %d enabled\n",
+ skd_name(skdev), pdev->irq);
+ } else {
+RETRY_IRQ_LEGACY:
+ pr_err(
+ "(%s): failed to enable MSI, re-trying with LEGACY %d\n",
+ skd_name(skdev), rc);
+ skdev->irq_type = SKD_IRQ_LEGACY;
+ goto RETRY_IRQ_TYPE;
+ }
+ break;
+ case SKD_IRQ_LEGACY:
+ snprintf(skdev->isr_name, sizeof(skdev->isr_name),
+ "%s%d-legacy", DRV_NAME, skdev->devno);
+ rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
+ IRQF_SHARED, skdev->isr_name, skdev);
+ if (!rc)
+ pr_info("(%s): LEGACY irq %d enabled\n",
+ skd_name(skdev), pdev->irq);
+ else
+ pr_err("(%s): request LEGACY irq error %d\n",
+ skd_name(skdev), rc);
+ break;
+ default:
+ pr_info("(%s): irq_type %d invalid, re-set to %d\n",
+ skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT);
+ skdev->irq_type = SKD_IRQ_LEGACY;
+ goto RETRY_IRQ_TYPE;
+ }
+ return rc;
+}
+
+static void skd_release_irq(struct skd_device *skdev)
+{
+ switch (skdev->irq_type) {
+ case SKD_IRQ_MSIX:
+ skd_release_msix(skdev);
+ break;
+ case SKD_IRQ_MSI:
+ devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
+ pci_disable_msi(skdev->pdev);
+ break;
+ case SKD_IRQ_LEGACY:
+ devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
+ break;
+ default:
+ pr_err("(%s): wrong irq type %d!",
+ skd_name(skdev), skdev->irq_type);
+ break;
+ }
+}
+
+/*
+ *****************************************************************************
+ * CONSTRUCT
+ *****************************************************************************
+ */
+
+static int skd_cons_skcomp(struct skd_device *skdev);
+static int skd_cons_skmsg(struct skd_device *skdev);
+static int skd_cons_skreq(struct skd_device *skdev);
+static int skd_cons_skspcl(struct skd_device *skdev);
+static int skd_cons_sksb(struct skd_device *skdev);
+static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
+ u32 n_sg,
+ dma_addr_t *ret_dma_addr);
+static int skd_cons_disk(struct skd_device *skdev);
+
+#define SKD_N_DEV_TABLE 16u
+static u32 skd_next_devno;
+
+static struct skd_device *skd_construct(struct pci_dev *pdev)
+{
+ struct skd_device *skdev;
+ int blk_major = skd_major;
+ int rc;
+
+ skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
+
+ if (!skdev) {
+ pr_err(PFX "(%s): memory alloc failure\n",
+ pci_name(pdev));
+ return NULL;
+ }
+
+ skdev->state = SKD_DRVR_STATE_LOAD;
+ skdev->pdev = pdev;
+ skdev->devno = skd_next_devno++;
+ skdev->major = blk_major;
+ skdev->irq_type = skd_isr_type;
+ sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
+ skdev->dev_max_queue_depth = 0;
+
+ skdev->num_req_context = skd_max_queue_depth;
+ skdev->num_fitmsg_context = skd_max_queue_depth;
+ skdev->n_special = skd_max_pass_thru;
+ skdev->cur_max_queue_depth = 1;
+ skdev->queue_low_water_mark = 1;
+ skdev->proto_ver = 99;
+ skdev->sgs_per_request = skd_sgs_per_request;
+ skdev->dbg_level = skd_dbg_level;
+
+ atomic_set(&skdev->device_count, 0);
+
+ spin_lock_init(&skdev->lock);
+
+ INIT_WORK(&skdev->completion_worker, skd_completion_worker);
+
+ pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
+ rc = skd_cons_skcomp(skdev);
+ if (rc < 0)
+ goto err_out;
+
+ pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
+ rc = skd_cons_skmsg(skdev);
+ if (rc < 0)
+ goto err_out;
+
+ pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
+ rc = skd_cons_skreq(skdev);
+ if (rc < 0)
+ goto err_out;
+
+ pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
+ rc = skd_cons_skspcl(skdev);
+ if (rc < 0)
+ goto err_out;
+
+ pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
+ rc = skd_cons_sksb(skdev);
+ if (rc < 0)
+ goto err_out;
+
+ pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
+ rc = skd_cons_disk(skdev);
+ if (rc < 0)
+ goto err_out;
+
+ pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
+ return skdev;
+
+err_out:
+ pr_debug("%s:%s:%d construct failed\n",
+ skdev->name, __func__, __LINE__);
+ skd_destruct(skdev);
+ return NULL;
+}
+
+static int skd_cons_skcomp(struct skd_device *skdev)
+{
+ int rc = 0;
+ struct fit_completion_entry_v1 *skcomp;
+ u32 nbytes;
+
+ nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
+ nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
+
+ pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
+ skdev->name, __func__, __LINE__,
+ nbytes, SKD_N_COMPLETION_ENTRY);
+
+ skcomp = pci_alloc_consistent(skdev->pdev, nbytes,
+ &skdev->cq_dma_address);
+
+ if (skcomp == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ memset(skcomp, 0, nbytes);
+
+ skdev->skcomp_table = skcomp;
+ skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
+ sizeof(*skcomp) *
+ SKD_N_COMPLETION_ENTRY);
+
+err_out:
+ return rc;
+}
+
+static int skd_cons_skmsg(struct skd_device *skdev)
+{
+ int rc = 0;
+ u32 i;
+
+ pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
+ skdev->name, __func__, __LINE__,
+ sizeof(struct skd_fitmsg_context),
+ skdev->num_fitmsg_context,
+ sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
+
+ skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
+ *skdev->num_fitmsg_context, GFP_KERNEL);
+ if (skdev->skmsg_table == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ for (i = 0; i < skdev->num_fitmsg_context; i++) {
+ struct skd_fitmsg_context *skmsg;
+
+ skmsg = &skdev->skmsg_table[i];
+
+ skmsg->id = i + SKD_ID_FIT_MSG;
+
+ skmsg->state = SKD_MSG_STATE_IDLE;
+ skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
+ SKD_N_FITMSG_BYTES + 64,
+ &skmsg->mb_dma_address);
+
+ if (skmsg->msg_buf == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ skmsg->offset = (u32)((u64)skmsg->msg_buf &
+ (~FIT_QCMD_BASE_ADDRESS_MASK));
+ skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
+ skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
+ FIT_QCMD_BASE_ADDRESS_MASK);
+ skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
+ skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
+ memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
+
+ skmsg->next = &skmsg[1];
+ }
+
+ /* Free list is in order starting with the 0th entry. */
+ skdev->skmsg_table[i - 1].next = NULL;
+ skdev->skmsg_free_list = skdev->skmsg_table;
+
+err_out:
+ return rc;
+}
+
+static int skd_cons_skreq(struct skd_device *skdev)
+{
+ int rc = 0;
+ u32 i;
+
+ pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
+ skdev->name, __func__, __LINE__,
+ sizeof(struct skd_request_context),
+ skdev->num_req_context,
+ sizeof(struct skd_request_context) * skdev->num_req_context);
+
+ skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
+ * skdev->num_req_context, GFP_KERNEL);
+ if (skdev->skreq_table == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
+ skdev->name, __func__, __LINE__,
+ skdev->sgs_per_request, sizeof(struct scatterlist),
+ skdev->sgs_per_request * sizeof(struct scatterlist));
+
+ for (i = 0; i < skdev->num_req_context; i++) {
+ struct skd_request_context *skreq;
+
+ skreq = &skdev->skreq_table[i];
+
+ skreq->id = i + SKD_ID_RW_REQUEST;
+ skreq->state = SKD_REQ_STATE_IDLE;
+
+ skreq->sg = kzalloc(sizeof(struct scatterlist) *
+ skdev->sgs_per_request, GFP_KERNEL);
+ if (skreq->sg == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ sg_init_table(skreq->sg, skdev->sgs_per_request);
+
+ skreq->sksg_list = skd_cons_sg_list(skdev,
+ skdev->sgs_per_request,
+ &skreq->sksg_dma_address);
+
+ if (skreq->sksg_list == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ skreq->next = &skreq[1];
+ }
+
+ /* Free list is in order starting with the 0th entry. */
+ skdev->skreq_table[i - 1].next = NULL;
+ skdev->skreq_free_list = skdev->skreq_table;
+
+err_out:
+ return rc;
+}
+
+static int skd_cons_skspcl(struct skd_device *skdev)
+{
+ int rc = 0;
+ u32 i, nbytes;
+
+ pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
+ skdev->name, __func__, __LINE__,
+ sizeof(struct skd_special_context),
+ skdev->n_special,
+ sizeof(struct skd_special_context) * skdev->n_special);
+
+ skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
+ * skdev->n_special, GFP_KERNEL);
+ if (skdev->skspcl_table == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ for (i = 0; i < skdev->n_special; i++) {
+ struct skd_special_context *skspcl;
+
+ skspcl = &skdev->skspcl_table[i];
+
+ skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
+ skspcl->req.state = SKD_REQ_STATE_IDLE;
+
+ skspcl->req.next = &skspcl[1].req;
+
+ nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
+
+ skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes,
+ &skspcl->mb_dma_address);
+ if (skspcl->msg_buf == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ memset(skspcl->msg_buf, 0, nbytes);
+
+ skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
+ SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
+ if (skspcl->req.sg == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ skspcl->req.sksg_list = skd_cons_sg_list(skdev,
+ SKD_N_SG_PER_SPECIAL,
+ &skspcl->req.
+ sksg_dma_address);
+ if (skspcl->req.sksg_list == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ }
+
+ /* Free list is in order starting with the 0th entry. */
+ skdev->skspcl_table[i - 1].req.next = NULL;
+ skdev->skspcl_free_list = skdev->skspcl_table;
+
+ return rc;
+
+err_out:
+ return rc;
+}
+
+static int skd_cons_sksb(struct skd_device *skdev)
+{
+ int rc = 0;
+ struct skd_special_context *skspcl;
+ u32 nbytes;
+
+ skspcl = &skdev->internal_skspcl;
+
+ skspcl->req.id = 0 + SKD_ID_INTERNAL;
+ skspcl->req.state = SKD_REQ_STATE_IDLE;
+
+ nbytes = SKD_N_INTERNAL_BYTES;
+
+ skspcl->data_buf = pci_alloc_consistent(skdev->pdev, nbytes,
+ &skspcl->db_dma_address);
+ if (skspcl->data_buf == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ memset(skspcl->data_buf, 0, nbytes);
+
+ nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
+ skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes,
+ &skspcl->mb_dma_address);
+ if (skspcl->msg_buf == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ memset(skspcl->msg_buf, 0, nbytes);
+
+ skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
+ &skspcl->req.sksg_dma_address);
+ if (skspcl->req.sksg_list == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ if (!skd_format_internal_skspcl(skdev)) {
+ rc = -EINVAL;
+ goto err_out;
+ }
+
+err_out:
+ return rc;
+}
+
+static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
+ u32 n_sg,
+ dma_addr_t *ret_dma_addr)
+{
+ struct fit_sg_descriptor *sg_list;
+ u32 nbytes;
+
+ nbytes = sizeof(*sg_list) * n_sg;
+
+ sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
+
+ if (sg_list != NULL) {
+ uint64_t dma_address = *ret_dma_addr;
+ u32 i;
+
+ memset(sg_list, 0, nbytes);
+
+ for (i = 0; i < n_sg - 1; i++) {
+ uint64_t ndp_off;
+ ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
+
+ sg_list[i].next_desc_ptr = dma_address + ndp_off;
+ }
+ sg_list[i].next_desc_ptr = 0LL;
+ }
+
+ return sg_list;
+}
+
+static int skd_cons_disk(struct skd_device *skdev)
+{
+ int rc = 0;
+ struct gendisk *disk;
+ struct request_queue *q;
+ unsigned long flags;
+
+ disk = alloc_disk(SKD_MINORS_PER_DEVICE);
+ if (!disk) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ skdev->disk = disk;
+ sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
+
+ disk->major = skdev->major;
+ disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
+ disk->fops = &skd_blockdev_ops;
+ disk->private_data = skdev;
+
+ q = blk_init_queue(skd_request_fn, &skdev->lock);
+ if (!q) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ skdev->queue = q;
+ disk->queue = q;
+ q->queuedata = skdev;
+
+ blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
+ blk_queue_max_segments(q, skdev->sgs_per_request);
+ blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
+
+ /* set sysfs ptimal_io_size to 8K */
+ blk_queue_io_opt(q, 8192);
+
+ /* DISCARD Flag initialization. */
+ q->limits.discard_granularity = 8192;
+ q->limits.discard_alignment = 0;
+ q->limits.max_discard_sectors = UINT_MAX >> 9;
+ q->limits.discard_zeroes_data = 1;
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+
+ spin_lock_irqsave(&skdev->lock, flags);
+ pr_debug("%s:%s:%d stopping %s queue\n",
+ skdev->name, __func__, __LINE__, skdev->name);
+ blk_stop_queue(skdev->queue);
+ spin_unlock_irqrestore(&skdev->lock, flags);
+
+err_out:
+ return rc;
+}
+
+/*
+ *****************************************************************************
+ * DESTRUCT (FREE)
+ *****************************************************************************
+ */
+
+static void skd_free_skcomp(struct skd_device *skdev);
+static void skd_free_skmsg(struct skd_device *skdev);
+static void skd_free_skreq(struct skd_device *skdev);
+static void skd_free_skspcl(struct skd_device *skdev);
+static void skd_free_sksb(struct skd_device *skdev);
+static void skd_free_sg_list(struct skd_device *skdev,
+ struct fit_sg_descriptor *sg_list,
+ u32 n_sg, dma_addr_t dma_addr);
+static void skd_free_disk(struct skd_device *skdev);
+
+static void skd_destruct(struct skd_device *skdev)
+{
+ if (skdev == NULL)
+ return;
+
+
+ pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
+ skd_free_disk(skdev);
+
+ pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
+ skd_free_sksb(skdev);
+
+ pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
+ skd_free_skspcl(skdev);
+
+ pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
+ skd_free_skreq(skdev);
+
+ pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
+ skd_free_skmsg(skdev);
+
+ pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
+ skd_free_skcomp(skdev);
+
+ pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
+ kfree(skdev);
+}
+
+static void skd_free_skcomp(struct skd_device *skdev)
+{
+ if (skdev->skcomp_table != NULL) {
+ u32 nbytes;
+
+ nbytes = sizeof(skdev->skcomp_table[0]) *
+ SKD_N_COMPLETION_ENTRY;
+ pci_free_consistent(skdev->pdev, nbytes,
+ skdev->skcomp_table, skdev->cq_dma_address);
+ }
+
+ skdev->skcomp_table = NULL;
+ skdev->cq_dma_address = 0;
+}
+
+static void skd_free_skmsg(struct skd_device *skdev)
+{
+ u32 i;
+
+ if (skdev->skmsg_table == NULL)
+ return;
+
+ for (i = 0; i < skdev->num_fitmsg_context; i++) {
+ struct skd_fitmsg_context *skmsg;
+
+ skmsg = &skdev->skmsg_table[i];
+
+ if (skmsg->msg_buf != NULL) {
+ skmsg->msg_buf += skmsg->offset;
+ skmsg->mb_dma_address += skmsg->offset;
+ pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
+ skmsg->msg_buf,
+ skmsg->mb_dma_address);
+ }
+ skmsg->msg_buf = NULL;
+ skmsg->mb_dma_address = 0;
+ }
+
+ kfree(skdev->skmsg_table);
+ skdev->skmsg_table = NULL;
+}
+
+static void skd_free_skreq(struct skd_device *skdev)
+{
+ u32 i;
+
+ if (skdev->skreq_table == NULL)
+ return;
+
+ for (i = 0; i < skdev->num_req_context; i++) {
+ struct skd_request_context *skreq;
+
+ skreq = &skdev->skreq_table[i];
+
+ skd_free_sg_list(skdev, skreq->sksg_list,
+ skdev->sgs_per_request,
+ skreq->sksg_dma_address);
+
+ skreq->sksg_list = NULL;
+ skreq->sksg_dma_address = 0;
+
+ kfree(skreq->sg);
+ }
+
+ kfree(skdev->skreq_table);
+ skdev->skreq_table = NULL;
+}
+
+static void skd_free_skspcl(struct skd_device *skdev)
+{
+ u32 i;
+ u32 nbytes;
+
+ if (skdev->skspcl_table == NULL)
+ return;
+
+ for (i = 0; i < skdev->n_special; i++) {
+ struct skd_special_context *skspcl;
+
+ skspcl = &skdev->skspcl_table[i];
+
+ if (skspcl->msg_buf != NULL) {
+ nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
+ pci_free_consistent(skdev->pdev, nbytes,
+ skspcl->msg_buf,
+ skspcl->mb_dma_address);
+ }
+
+ skspcl->msg_buf = NULL;
+ skspcl->mb_dma_address = 0;
+
+ skd_free_sg_list(skdev, skspcl->req.sksg_list,
+ SKD_N_SG_PER_SPECIAL,
+ skspcl->req.sksg_dma_address);
+
+ skspcl->req.sksg_list = NULL;
+ skspcl->req.sksg_dma_address = 0;
+
+ kfree(skspcl->req.sg);
+ }
+
+ kfree(skdev->skspcl_table);
+ skdev->skspcl_table = NULL;
+}
+
+static void skd_free_sksb(struct skd_device *skdev)
+{
+ struct skd_special_context *skspcl;
+ u32 nbytes;
+
+ skspcl = &skdev->internal_skspcl;
+
+ if (skspcl->data_buf != NULL) {
+ nbytes = SKD_N_INTERNAL_BYTES;
+
+ pci_free_consistent(skdev->pdev, nbytes,
+ skspcl->data_buf, skspcl->db_dma_address);
+ }
+
+ skspcl->data_buf = NULL;
+ skspcl->db_dma_address = 0;
+
+ if (skspcl->msg_buf != NULL) {
+ nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
+ pci_free_consistent(skdev->pdev, nbytes,
+ skspcl->msg_buf, skspcl->mb_dma_address);
+ }
+
+ skspcl->msg_buf = NULL;
+ skspcl->mb_dma_address = 0;
+
+ skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
+ skspcl->req.sksg_dma_address);
+
+ skspcl->req.sksg_list = NULL;
+ skspcl->req.sksg_dma_address = 0;
+}
+
+static void skd_free_sg_list(struct skd_device *skdev,
+ struct fit_sg_descriptor *sg_list,
+ u32 n_sg, dma_addr_t dma_addr)
+{
+ if (sg_list != NULL) {
+ u32 nbytes;
+
+ nbytes = sizeof(*sg_list) * n_sg;
+
+ pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
+ }
+}
+
+static void skd_free_disk(struct skd_device *skdev)
+{
+ struct gendisk *disk = skdev->disk;
+
+ if (disk != NULL) {
+ struct request_queue *q = disk->queue;
+
+ if (disk->flags & GENHD_FL_UP)
+ del_gendisk(disk);
+ if (q)
+ blk_cleanup_queue(q);
+ put_disk(disk);
+ }
+ skdev->disk = NULL;
+}
+
+
+
+/*
+ *****************************************************************************
+ * BLOCK DEVICE (BDEV) GLUE
+ *****************************************************************************
+ */
+
+static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct skd_device *skdev;
+ u64 capacity;
+
+ skdev = bdev->bd_disk->private_data;
+
+ pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
+ skdev->name, __func__, __LINE__,
+ bdev->bd_disk->disk_name, current->comm);
+
+ if (skdev->read_cap_is_valid) {
+ capacity = get_capacity(skdev->disk);
+ geo->heads = 64;
+ geo->sectors = 255;
+ geo->cylinders = (capacity) / (255 * 64);
+
+ return 0;
+ }
+ return -EIO;
+}
+
+static int skd_bdev_attach(struct skd_device *skdev)
+{
+ pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
+ add_disk(skdev->disk);
+ return 0;
+}
+
+static const struct block_device_operations skd_blockdev_ops = {
+ .owner = THIS_MODULE,
+ .ioctl = skd_bdev_ioctl,
+ .getgeo = skd_bdev_getgeo,
+};
+
+
+/*
+ *****************************************************************************
+ * PCIe DRIVER GLUE
+ *****************************************************************************
+ */
+
+static DEFINE_PCI_DEVICE_TABLE(skd_pci_tbl) = {
+ { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ { 0 } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
+
+static char *skd_pci_info(struct skd_device *skdev, char *str)
+{
+ int pcie_reg;
+
+ strcpy(str, "PCIe (");
+ pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
+
+ if (pcie_reg) {
+
+ char lwstr[6];
+ uint16_t pcie_lstat, lspeed, lwidth;
+
+ pcie_reg += 0x12;
+ pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
+ lspeed = pcie_lstat & (0xF);
+ lwidth = (pcie_lstat & 0x3F0) >> 4;
+
+ if (lspeed == 1)
+ strcat(str, "2.5GT/s ");
+ else if (lspeed == 2)
+ strcat(str, "5.0GT/s ");
+ else
+ strcat(str, "<unknown> ");
+ snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
+ strcat(str, lwstr);
+ }
+ return str;
+}
+
+static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int i;
+ int rc = 0;
+ char pci_str[32];
+ struct skd_device *skdev;
+
+ pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
+ DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
+ pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
+ pci_name(pdev), pdev->vendor, pdev->device);
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return rc;
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out;
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (!rc) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+
+ pr_err("(%s): consistent DMA mask error %d\n",
+ pci_name(pdev), rc);
+ }
+ } else {
+ (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
+ if (rc) {
+
+ pr_err("(%s): DMA mask error %d\n",
+ pci_name(pdev), rc);
+ goto err_out_regions;
+ }
+ }
+
+ skdev = skd_construct(pdev);
+ if (skdev == NULL) {
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+
+ skd_pci_info(skdev, pci_str);
+ pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
+
+ pci_set_master(pdev);
+ rc = pci_enable_pcie_error_reporting(pdev);
+ if (rc) {
+ pr_err(
+ "(%s): bad enable of PCIe error reporting rc=%d\n",
+ skd_name(skdev), rc);
+ skdev->pcie_error_reporting_is_enabled = 0;
+ } else
+ skdev->pcie_error_reporting_is_enabled = 1;
+
+
+ pci_set_drvdata(pdev, skdev);
+ skdev->pdev = pdev;
+ skdev->disk->driverfs_dev = &pdev->dev;
+
+ for (i = 0; i < SKD_MAX_BARS; i++) {
+ skdev->mem_phys[i] = pci_resource_start(pdev, i);
+ skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
+ skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
+ skdev->mem_size[i]);
+ if (!skdev->mem_map[i]) {
+ pr_err("(%s): Unable to map adapter memory!\n",
+ skd_name(skdev));
+ rc = -ENODEV;
+ goto err_out_iounmap;
+ }
+ pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
+ skdev->name, __func__, __LINE__,
+ skdev->mem_map[i],
+ (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
+ }
+
+ rc = skd_acquire_irq(skdev);
+ if (rc) {
+ pr_err("(%s): interrupt resource error %d\n",
+ skd_name(skdev), rc);
+ goto err_out_iounmap;
+ }
+
+ rc = skd_start_timer(skdev);
+ if (rc)
+ goto err_out_timer;
+
+ init_waitqueue_head(&skdev->waitq);
+
+ skd_start_device(skdev);
+
+ rc = wait_event_interruptible_timeout(skdev->waitq,
+ (skdev->gendisk_on),
+ (SKD_START_WAIT_SECONDS * HZ));
+ if (skdev->gendisk_on > 0) {
+ /* device came on-line after reset */
+ skd_bdev_attach(skdev);
+ rc = 0;
+ } else {
+ /* we timed out, something is wrong with the device,
+ don't add the disk structure */
+ pr_err(
+ "(%s): error: waiting for s1120 timed out %d!\n",
+ skd_name(skdev), rc);
+ /* in case of no error; we timeout with ENXIO */
+ if (!rc)
+ rc = -ENXIO;
+ goto err_out_timer;
+ }
+
+
+#ifdef SKD_VMK_POLL_HANDLER
+ if (skdev->irq_type == SKD_IRQ_MSIX) {
+ /* MSIX completion handler is being used for coredump */
+ vmklnx_scsi_register_poll_handler(skdev->scsi_host,
+ skdev->msix_entries[5].vector,
+ skd_comp_q, skdev);
+ } else {
+ vmklnx_scsi_register_poll_handler(skdev->scsi_host,
+ skdev->pdev->irq, skd_isr,
+ skdev);
+ }
+#endif /* SKD_VMK_POLL_HANDLER */
+
+ return rc;
+
+err_out_timer:
+ skd_stop_device(skdev);
+ skd_release_irq(skdev);
+
+err_out_iounmap:
+ for (i = 0; i < SKD_MAX_BARS; i++)
+ if (skdev->mem_map[i])
+ iounmap(skdev->mem_map[i]);
+
+ if (skdev->pcie_error_reporting_is_enabled)
+ pci_disable_pcie_error_reporting(pdev);
+
+ skd_destruct(skdev);
+
+err_out_regions:
+ pci_release_regions(pdev);
+
+err_out:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return rc;
+}
+
+static void skd_pci_remove(struct pci_dev *pdev)
+{
+ int i;
+ struct skd_device *skdev;
+
+ skdev = pci_get_drvdata(pdev);
+ if (!skdev) {
+ pr_err("%s: no device data for PCI\n", pci_name(pdev));
+ return;
+ }
+ skd_stop_device(skdev);
+ skd_release_irq(skdev);
+
+ for (i = 0; i < SKD_MAX_BARS; i++)
+ if (skdev->mem_map[i])
+ iounmap((u32 *)skdev->mem_map[i]);
+
+ if (skdev->pcie_error_reporting_is_enabled)
+ pci_disable_pcie_error_reporting(pdev);
+
+ skd_destruct(skdev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ return;
+}
+
+static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int i;
+ struct skd_device *skdev;
+
+ skdev = pci_get_drvdata(pdev);
+ if (!skdev) {
+ pr_err("%s: no device data for PCI\n", pci_name(pdev));
+ return -EIO;
+ }
+
+ skd_stop_device(skdev);
+
+ skd_release_irq(skdev);
+
+ for (i = 0; i < SKD_MAX_BARS; i++)
+ if (skdev->mem_map[i])
+ iounmap((u32 *)skdev->mem_map[i]);
+
+ if (skdev->pcie_error_reporting_is_enabled)
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_release_regions(pdev);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+
+static int skd_pci_resume(struct pci_dev *pdev)
+{
+ int i;
+ int rc = 0;
+ struct skd_device *skdev;
+
+ skdev = pci_get_drvdata(pdev);
+ if (!skdev) {
+ pr_err("%s: no device data for PCI\n", pci_name(pdev));
+ return -1;
+ }
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return rc;
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out;
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (!rc) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+
+ pr_err("(%s): consistent DMA mask error %d\n",
+ pci_name(pdev), rc);
+ }
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+
+ pr_err("(%s): DMA mask error %d\n",
+ pci_name(pdev), rc);
+ goto err_out_regions;
+ }
+ }
+
+ pci_set_master(pdev);
+ rc = pci_enable_pcie_error_reporting(pdev);
+ if (rc) {
+ pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
+ skdev->name, rc);
+ skdev->pcie_error_reporting_is_enabled = 0;
+ } else
+ skdev->pcie_error_reporting_is_enabled = 1;
+
+ for (i = 0; i < SKD_MAX_BARS; i++) {
+
+ skdev->mem_phys[i] = pci_resource_start(pdev, i);
+ skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
+ skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
+ skdev->mem_size[i]);
+ if (!skdev->mem_map[i]) {
+ pr_err("(%s): Unable to map adapter memory!\n",
+ skd_name(skdev));
+ rc = -ENODEV;
+ goto err_out_iounmap;
+ }
+ pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
+ skdev->name, __func__, __LINE__,
+ skdev->mem_map[i],
+ (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
+ }
+ rc = skd_acquire_irq(skdev);
+ if (rc) {
+
+ pr_err("(%s): interrupt resource error %d\n",
+ pci_name(pdev), rc);
+ goto err_out_iounmap;
+ }
+
+ rc = skd_start_timer(skdev);
+ if (rc)
+ goto err_out_timer;
+
+ init_waitqueue_head(&skdev->waitq);
+
+ skd_start_device(skdev);
+
+ return rc;
+
+err_out_timer:
+ skd_stop_device(skdev);
+ skd_release_irq(skdev);
+
+err_out_iounmap:
+ for (i = 0; i < SKD_MAX_BARS; i++)
+ if (skdev->mem_map[i])
+ iounmap(skdev->mem_map[i]);
+
+ if (skdev->pcie_error_reporting_is_enabled)
+ pci_disable_pcie_error_reporting(pdev);
+
+err_out_regions:
+ pci_release_regions(pdev);
+
+err_out:
+ pci_disable_device(pdev);
+ return rc;
+}
+
+static void skd_pci_shutdown(struct pci_dev *pdev)
+{
+ struct skd_device *skdev;
+
+ pr_err("skd_pci_shutdown called\n");
+
+ skdev = pci_get_drvdata(pdev);
+ if (!skdev) {
+ pr_err("%s: no device data for PCI\n", pci_name(pdev));
+ return;
+ }
+
+ pr_err("%s: calling stop\n", skd_name(skdev));
+ skd_stop_device(skdev);
+}
+
+static struct pci_driver skd_driver = {
+ .name = DRV_NAME,
+ .id_table = skd_pci_tbl,
+ .probe = skd_pci_probe,
+ .remove = skd_pci_remove,
+ .suspend = skd_pci_suspend,
+ .resume = skd_pci_resume,
+ .shutdown = skd_pci_shutdown,
+};
+
+/*
+ *****************************************************************************
+ * LOGGING SUPPORT
+ *****************************************************************************
+ */
+
+static const char *skd_name(struct skd_device *skdev)
+{
+ memset(skdev->id_str, 0, sizeof(skdev->id_str));
+
+ if (skdev->inquiry_is_valid)
+ snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
+ skdev->name, skdev->inq_serial_num,
+ pci_name(skdev->pdev));
+ else
+ snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
+ skdev->name, pci_name(skdev->pdev));
+
+ return skdev->id_str;
+}
+
+const char *skd_drive_state_to_str(int state)
+{
+ switch (state) {
+ case FIT_SR_DRIVE_OFFLINE:
+ return "OFFLINE";
+ case FIT_SR_DRIVE_INIT:
+ return "INIT";
+ case FIT_SR_DRIVE_ONLINE:
+ return "ONLINE";
+ case FIT_SR_DRIVE_BUSY:
+ return "BUSY";
+ case FIT_SR_DRIVE_FAULT:
+ return "FAULT";
+ case FIT_SR_DRIVE_DEGRADED:
+ return "DEGRADED";
+ case FIT_SR_PCIE_LINK_DOWN:
+ return "INK_DOWN";
+ case FIT_SR_DRIVE_SOFT_RESET:
+ return "SOFT_RESET";
+ case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
+ return "NEED_FW";
+ case FIT_SR_DRIVE_INIT_FAULT:
+ return "INIT_FAULT";
+ case FIT_SR_DRIVE_BUSY_SANITIZE:
+ return "BUSY_SANITIZE";
+ case FIT_SR_DRIVE_BUSY_ERASE:
+ return "BUSY_ERASE";
+ case FIT_SR_DRIVE_FW_BOOTING:
+ return "FW_BOOTING";
+ default:
+ return "???";
+ }
+}
+
+const char *skd_skdev_state_to_str(enum skd_drvr_state state)
+{
+ switch (state) {
+ case SKD_DRVR_STATE_LOAD:
+ return "LOAD";
+ case SKD_DRVR_STATE_IDLE:
+ return "IDLE";
+ case SKD_DRVR_STATE_BUSY:
+ return "BUSY";
+ case SKD_DRVR_STATE_STARTING:
+ return "STARTING";
+ case SKD_DRVR_STATE_ONLINE:
+ return "ONLINE";
+ case SKD_DRVR_STATE_PAUSING:
+ return "PAUSING";
+ case SKD_DRVR_STATE_PAUSED:
+ return "PAUSED";
+ case SKD_DRVR_STATE_DRAINING_TIMEOUT:
+ return "DRAINING_TIMEOUT";
+ case SKD_DRVR_STATE_RESTARTING:
+ return "RESTARTING";
+ case SKD_DRVR_STATE_RESUMING:
+ return "RESUMING";
+ case SKD_DRVR_STATE_STOPPING:
+ return "STOPPING";
+ case SKD_DRVR_STATE_SYNCING:
+ return "SYNCING";
+ case SKD_DRVR_STATE_FAULT:
+ return "FAULT";
+ case SKD_DRVR_STATE_DISAPPEARED:
+ return "DISAPPEARED";
+ case SKD_DRVR_STATE_BUSY_ERASE:
+ return "BUSY_ERASE";
+ case SKD_DRVR_STATE_BUSY_SANITIZE:
+ return "BUSY_SANITIZE";
+ case SKD_DRVR_STATE_BUSY_IMMINENT:
+ return "BUSY_IMMINENT";
+ case SKD_DRVR_STATE_WAIT_BOOT:
+ return "WAIT_BOOT";
+
+ default:
+ return "???";
+ }
+}
+
+const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
+{
+ switch (state) {
+ case SKD_MSG_STATE_IDLE:
+ return "IDLE";
+ case SKD_MSG_STATE_BUSY:
+ return "BUSY";
+ default:
+ return "???";
+ }
+}
+
+const char *skd_skreq_state_to_str(enum skd_req_state state)
+{
+ switch (state) {
+ case SKD_REQ_STATE_IDLE:
+ return "IDLE";
+ case SKD_REQ_STATE_SETUP:
+ return "SETUP";
+ case SKD_REQ_STATE_BUSY:
+ return "BUSY";
+ case SKD_REQ_STATE_COMPLETED:
+ return "COMPLETED";
+ case SKD_REQ_STATE_TIMEOUT:
+ return "TIMEOUT";
+ case SKD_REQ_STATE_ABORTED:
+ return "ABORTED";
+ default:
+ return "???";
+ }
+}
+
+static void skd_log_skdev(struct skd_device *skdev, const char *event)
+{
+ pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
+ skdev->name, __func__, __LINE__, skdev->name, skdev, event);
+ pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n",
+ skdev->name, __func__, __LINE__,
+ skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
+ skd_skdev_state_to_str(skdev->state), skdev->state);
+ pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n",
+ skdev->name, __func__, __LINE__,
+ skdev->in_flight, skdev->cur_max_queue_depth,
+ skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
+ pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n",
+ skdev->name, __func__, __LINE__,
+ skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
+}
+
+static void skd_log_skmsg(struct skd_device *skdev,
+ struct skd_fitmsg_context *skmsg, const char *event)
+{
+ pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
+ skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
+ pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n",
+ skdev->name, __func__, __LINE__,
+ skd_skmsg_state_to_str(skmsg->state), skmsg->state,
+ skmsg->id, skmsg->length);
+}
+
+static void skd_log_skreq(struct skd_device *skdev,
+ struct skd_request_context *skreq, const char *event)
+{
+ pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
+ skdev->name, __func__, __LINE__, skdev->name, skreq, event);
+ pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
+ skdev->name, __func__, __LINE__,
+ skd_skreq_state_to_str(skreq->state), skreq->state,
+ skreq->id, skreq->fitmsg_id);
+ pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n",
+ skdev->name, __func__, __LINE__,
+ skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
+
+ if (skreq->req != NULL) {
+ struct request *req = skreq->req;
+ u32 lba = (u32)blk_rq_pos(req);
+ u32 count = blk_rq_sectors(req);
+
+ pr_debug("%s:%s:%d "
+ "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
+ skdev->name, __func__, __LINE__,
+ req, lba, lba, count, count,
+ (int)rq_data_dir(req));
+ } else
+ pr_debug("%s:%s:%d req=NULL\n",
+ skdev->name, __func__, __LINE__);
+}
+
+/*
+ *****************************************************************************
+ * MODULE GLUE
+ *****************************************************************************
+ */
+
+static int __init skd_init(void)
+{
+ int rc = 0;
+
+ pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
+
+ switch (skd_isr_type) {
+ case SKD_IRQ_LEGACY:
+ case SKD_IRQ_MSI:
+ case SKD_IRQ_MSIX:
+ break;
+ default:
+ pr_info("skd_isr_type %d invalid, re-set to %d\n",
+ skd_isr_type, SKD_IRQ_DEFAULT);
+ skd_isr_type = SKD_IRQ_DEFAULT;
+ }
+
+ if (skd_max_queue_depth < 1
+ || skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
+ pr_info(
+ "skd_max_queue_depth %d invalid, re-set to %d\n",
+ skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
+ skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
+ }
+
+ if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
+ pr_info(
+ "skd_max_req_per_msg %d invalid, re-set to %d\n",
+ skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
+ skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
+ }
+
+ if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
+ pr_info(
+ "skd_sg_per_request %d invalid, re-set to %d\n",
+ skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
+ skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
+ }
+
+ if (skd_dbg_level < 0 || skd_dbg_level > 2) {
+ pr_info("skd_dbg_level %d invalid, re-set to %d\n",
+ skd_dbg_level, 0);
+ skd_dbg_level = 0;
+ }
+
+ if (skd_isr_comp_limit < 0) {
+ pr_info("skd_isr_comp_limit %d invalid, set to %d\n",
+ skd_isr_comp_limit, 0);
+ skd_isr_comp_limit = 0;
+ }
+
+ if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
+ pr_info("skd_max_pass_thru %d invalid, re-set to %d\n",
+ skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
+ skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
+ }
+
+ /* Obtain major device number. */
+ rc = register_blkdev(0, DRV_NAME);
+ if (rc < 0)
+ return rc;
+
+ skd_major = rc;
+
+ return pci_register_driver(&skd_driver);
+}
+
+static void __exit skd_exit(void)
+{
+ pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
+
+ unregister_blkdev(skd_major, DRV_NAME);
+ pci_unregister_driver(&skd_driver);
+}
+
+module_init(skd_init);
+module_exit(skd_exit);
diff --git a/drivers/block/skd_s1120.h b/drivers/block/skd_s1120.h
new file mode 100644
index 000000000000..bf01941cdd62
--- /dev/null
+++ b/drivers/block/skd_s1120.h
@@ -0,0 +1,354 @@
+/* Copyright 2012 STEC, Inc.
+ *
+ * This file is licensed under the terms of the 3-clause
+ * BSD License (http://opensource.org/licenses/BSD-3-Clause)
+ * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
+ * at your option. Both licenses are also available in the LICENSE file
+ * distributed with this project. This file may not be copied, modified,
+ * or distributed except in accordance with those terms.
+ */
+
+
+#ifndef SKD_S1120_H
+#define SKD_S1120_H
+
+#pragma pack(push, s1120_h, 1)
+
+/*
+ * Q-channel, 64-bit r/w
+ */
+#define FIT_Q_COMMAND 0x400u
+#define FIT_QCMD_QID_MASK (0x3 << 1)
+#define FIT_QCMD_QID0 (0x0 << 1)
+#define FIT_QCMD_QID_NORMAL FIT_QCMD_QID0
+#ifndef SKD_OMIT_FROM_SRC_DIST
+#define FIT_QCMD_QID1 (0x1 << 1)
+#define FIT_QCMD_QID2 (0x2 << 1)
+#define FIT_QCMD_QID3 (0x3 << 1)
+#endif /* SKD_OMIT_FROM_SRC_DIST */
+#define FIT_QCMD_FLUSH_QUEUE (0ull) /* add QID */
+#define FIT_QCMD_MSGSIZE_MASK (0x3 << 4)
+#define FIT_QCMD_MSGSIZE_64 (0x0 << 4)
+#define FIT_QCMD_MSGSIZE_128 (0x1 << 4)
+#define FIT_QCMD_MSGSIZE_256 (0x2 << 4)
+#define FIT_QCMD_MSGSIZE_512 (0x3 << 4)
+#define FIT_QCMD_BASE_ADDRESS_MASK (0xFFFFFFFFFFFFFFC0ull)
+
+
+/*
+ * Control, 32-bit r/w
+ */
+#define FIT_CONTROL 0x500u
+#ifndef SKD_OMIT_FROM_SRC_DIST
+#define FIT_CR_HARD_RESET (1u << 0u)
+#endif /* SKD_OMIT_FROM_SRC_DIST */
+#define FIT_CR_SOFT_RESET (1u << 1u)
+#ifndef SKD_OMIT_FROM_SRC_DIST
+#define FIT_CR_DIS_TIMESTAMPS (1u << 6u)
+#endif /* SKD_OMIT_FROM_SRC_DIST */
+#define FIT_CR_ENABLE_INTERRUPTS (1u << 7u)
+
+/*
+ * Status, 32-bit, r/o
+ */
+#define FIT_STATUS 0x510u
+#define FIT_SR_DRIVE_STATE_MASK 0x000000FFu
+#ifndef SKD_OMIT_FROM_SRC_DIST
+#define FIT_SR_SIGNATURE (0xFF << 8)
+#define FIT_SR_PIO_DMA (1 << 16)
+#endif /* SKD_OMIT_FROM_SRC_DIST */
+#define FIT_SR_DRIVE_OFFLINE 0x00
+#define FIT_SR_DRIVE_INIT 0x01
+/* #define FIT_SR_DRIVE_READY 0x02 */
+#define FIT_SR_DRIVE_ONLINE 0x03
+#define FIT_SR_DRIVE_BUSY 0x04
+#define FIT_SR_DRIVE_FAULT 0x05
+#define FIT_SR_DRIVE_DEGRADED 0x06
+#define FIT_SR_PCIE_LINK_DOWN 0x07
+#define FIT_SR_DRIVE_SOFT_RESET 0x08
+#define FIT_SR_DRIVE_INIT_FAULT 0x09
+#define FIT_SR_DRIVE_BUSY_SANITIZE 0x0A
+#define FIT_SR_DRIVE_BUSY_ERASE 0x0B
+#define FIT_SR_DRIVE_FW_BOOTING 0x0C
+#define FIT_SR_DRIVE_NEED_FW_DOWNLOAD 0xFE
+#define FIT_SR_DEVICE_MISSING 0xFF
+#define FIT_SR__RESERVED 0xFFFFFF00u
+
+#ifndef SKD_OMIT_FROM_SRC_DIST
+/*
+ * FIT_STATUS - Status register data definition
+ */
+#define FIT_SR_STATE_MASK (0xFF << 0)
+#define FIT_SR_SIGNATURE (0xFF << 8)
+#define FIT_SR_PIO_DMA (1 << 16)
+#endif /* SKD_OMIT_FROM_SRC_DIST */
+
+
+/*
+ * Interrupt status, 32-bit r/w1c (w1c ==> write 1 to clear)
+ */
+#define FIT_INT_STATUS_HOST 0x520u
+#define FIT_ISH_FW_STATE_CHANGE (1u << 0u)
+#define FIT_ISH_COMPLETION_POSTED (1u << 1u)
+#define FIT_ISH_MSG_FROM_DEV (1u << 2u)
+#define FIT_ISH_UNDEFINED_3 (1u << 3u)
+#define FIT_ISH_UNDEFINED_4 (1u << 4u)
+#define FIT_ISH_Q0_FULL (1u << 5u)
+#define FIT_ISH_Q1_FULL (1u << 6u)
+#define FIT_ISH_Q2_FULL (1u << 7u)
+#define FIT_ISH_Q3_FULL (1u << 8u)
+#define FIT_ISH_QCMD_FIFO_OVERRUN (1u << 9u)
+#define FIT_ISH_BAD_EXP_ROM_READ (1u << 10u)
+
+
+#define FIT_INT_DEF_MASK \
+ (FIT_ISH_FW_STATE_CHANGE | \
+ FIT_ISH_COMPLETION_POSTED | \
+ FIT_ISH_MSG_FROM_DEV | \
+ FIT_ISH_Q0_FULL | \
+ FIT_ISH_Q1_FULL | \
+ FIT_ISH_Q2_FULL | \
+ FIT_ISH_Q3_FULL | \
+ FIT_ISH_QCMD_FIFO_OVERRUN | \
+ FIT_ISH_BAD_EXP_ROM_READ)
+
+#define FIT_INT_QUEUE_FULL \
+ (FIT_ISH_Q0_FULL | \
+ FIT_ISH_Q1_FULL | \
+ FIT_ISH_Q2_FULL | \
+ FIT_ISH_Q3_FULL)
+
+
+#define MSI_MSG_NWL_ERROR_0 0x00000000
+#define MSI_MSG_NWL_ERROR_1 0x00000001
+#define MSI_MSG_NWL_ERROR_2 0x00000002
+#define MSI_MSG_NWL_ERROR_3 0x00000003
+#define MSI_MSG_STATE_CHANGE 0x00000004
+#define MSI_MSG_COMPLETION_POSTED 0x00000005
+#define MSI_MSG_MSG_FROM_DEV 0x00000006
+#define MSI_MSG_RESERVED_0 0x00000007
+#define MSI_MSG_RESERVED_1 0x00000008
+#define MSI_MSG_QUEUE_0_FULL 0x00000009
+#define MSI_MSG_QUEUE_1_FULL 0x0000000A
+#define MSI_MSG_QUEUE_2_FULL 0x0000000B
+#define MSI_MSG_QUEUE_3_FULL 0x0000000C
+
+
+
+#define FIT_INT_RESERVED_MASK \
+ (FIT_ISH_UNDEFINED_3 | \
+ FIT_ISH_UNDEFINED_4)
+/*
+ * Interrupt mask, 32-bit r/w
+ * Bit definitions are the same as FIT_INT_STATUS_HOST
+ */
+#define FIT_INT_MASK_HOST 0x528u
+
+
+/*
+ * Message to device, 32-bit r/w
+ */
+#define FIT_MSG_TO_DEVICE 0x540u
+
+/*
+ * Message from device, 32-bit, r/o
+ */
+#define FIT_MSG_FROM_DEVICE 0x548u
+
+
+/*
+ * 32-bit messages to/from device, composition/extraction macros
+ */
+#define FIT_MXD_CONS(TYPE, PARAM, DATA) \
+ ((((TYPE) & 0xFFu) << 24u) | \
+ (((PARAM) & 0xFFu) << 16u) | \
+ (((DATA) & 0xFFFFu) << 0u))
+#define FIT_MXD_TYPE(MXD) (((MXD) >> 24u) & 0xFFu)
+#define FIT_MXD_PARAM(MXD) (((MXD) >> 16u) & 0xFFu)
+#define FIT_MXD_DATA(MXD) (((MXD) >> 0u) & 0xFFFFu)
+
+
+/*
+ * Types of messages to/from device
+ */
+#define FIT_MTD_FITFW_INIT 0x01u
+#define FIT_MTD_GET_CMDQ_DEPTH 0x02u
+#define FIT_MTD_SET_COMPQ_DEPTH 0x03u
+#define FIT_MTD_SET_COMPQ_ADDR 0x04u
+#define FIT_MTD_ARM_QUEUE 0x05u
+#define FIT_MTD_CMD_LOG_HOST_ID 0x07u
+#define FIT_MTD_CMD_LOG_TIME_STAMP_LO 0x08u
+#define FIT_MTD_CMD_LOG_TIME_STAMP_HI 0x09u
+#define FIT_MFD_SMART_EXCEEDED 0x10u
+#define FIT_MFD_POWER_DOWN 0x11u
+#define FIT_MFD_OFFLINE 0x12u
+#define FIT_MFD_ONLINE 0x13u
+#define FIT_MFD_FW_RESTARTING 0x14u
+#define FIT_MFD_PM_ACTIVE 0x15u
+#define FIT_MFD_PM_STANDBY 0x16u
+#define FIT_MFD_PM_SLEEP 0x17u
+#define FIT_MFD_CMD_PROGRESS 0x18u
+
+#ifndef SKD_OMIT_FROM_SRC_DIST
+#define FIT_MTD_DEBUG 0xFEu
+#define FIT_MFD_DEBUG 0xFFu
+#endif /* SKD_OMIT_FROM_SRC_DIST */
+
+#define FIT_MFD_MASK (0xFFu)
+#define FIT_MFD_DATA_MASK (0xFFu)
+#define FIT_MFD_MSG(x) (((x) >> 24) & FIT_MFD_MASK)
+#define FIT_MFD_DATA(x) ((x) & FIT_MFD_MASK)
+
+
+/*
+ * Extra arg to FIT_MSG_TO_DEVICE, 64-bit r/w
+ * Used to set completion queue address (FIT_MTD_SET_COMPQ_ADDR)
+ * (was Response buffer in docs)
+ */
+#define FIT_MSG_TO_DEVICE_ARG 0x580u
+
+/*
+ * Hardware (ASIC) version, 32-bit r/o
+ */
+#define FIT_HW_VERSION 0x588u
+
+/*
+ * Scatter/gather list descriptor.
+ * 32-bytes and must be aligned on a 32-byte boundary.
+ * All fields are in little endian order.
+ */
+struct fit_sg_descriptor {
+ uint32_t control;
+ uint32_t byte_count;
+ uint64_t host_side_addr;
+ uint64_t dev_side_addr;
+ uint64_t next_desc_ptr;
+};
+
+#define FIT_SGD_CONTROL_NOT_LAST 0x000u
+#define FIT_SGD_CONTROL_LAST 0x40Eu
+
+/*
+ * Header at the beginning of a FIT message. The header
+ * is followed by SSDI requests each 64 bytes.
+ * A FIT message can be up to 512 bytes long and must start
+ * on a 64-byte boundary.
+ */
+struct fit_msg_hdr {
+ uint8_t protocol_id;
+ uint8_t num_protocol_cmds_coalesced;
+ uint8_t _reserved[62];
+};
+
+#define FIT_PROTOCOL_ID_FIT 1
+#define FIT_PROTOCOL_ID_SSDI 2
+#define FIT_PROTOCOL_ID_SOFIT 3
+
+
+#define FIT_PROTOCOL_MINOR_VER(mtd_val) ((mtd_val >> 16) & 0xF)
+#define FIT_PROTOCOL_MAJOR_VER(mtd_val) ((mtd_val >> 20) & 0xF)
+
+#ifndef SKD_OMIT_FROM_SRC_DIST
+/*
+ * Format of a completion entry. The completion queue is circular
+ * and must have at least as many entries as the maximum number
+ * of commands that may be issued to the device.
+ *
+ * There are no head/tail pointers. The cycle value is used to
+ * infer the presence of new completion records.
+ * Initially the cycle in all entries is 0, the index is 0, and
+ * the cycle value to expect is 1. When completions are added
+ * their cycle values are set to 1. When the index wraps the
+ * cycle value to expect is incremented.
+ *
+ * Command_context is opaque and taken verbatim from the SSDI command.
+ * All other fields are big endian.
+ */
+#endif /* SKD_OMIT_FROM_SRC_DIST */
+#define FIT_PROTOCOL_VERSION_0 0
+
+/*
+ * Protocol major version 1 completion entry.
+ * The major protocol version is found in bits
+ * 20-23 of the FIT_MTD_FITFW_INIT response.
+ */
+struct fit_completion_entry_v1 {
+ uint32_t num_returned_bytes;
+ uint16_t tag;
+ uint8_t status; /* SCSI status */
+ uint8_t cycle;
+};
+#define FIT_PROTOCOL_VERSION_1 1
+#define FIT_PROTOCOL_VERSION_CURRENT FIT_PROTOCOL_VERSION_1
+
+struct fit_comp_error_info {
+ uint8_t type:7; /* 00: Bits0-6 indicates the type of sense data. */
+ uint8_t valid:1; /* 00: Bit 7 := 1 ==> info field is valid. */
+ uint8_t reserved0; /* 01: Obsolete field */
+ uint8_t key:4; /* 02: Bits0-3 indicate the sense key. */
+ uint8_t reserved2:1; /* 02: Reserved bit. */
+ uint8_t bad_length:1; /* 02: Incorrect Length Indicator */
+ uint8_t end_medium:1; /* 02: End of Medium */
+ uint8_t file_mark:1; /* 02: Filemark */
+ uint8_t info[4]; /* 03: */
+ uint8_t reserved1; /* 07: Additional Sense Length */
+ uint8_t cmd_spec[4]; /* 08: Command Specific Information */
+ uint8_t code; /* 0C: Additional Sense Code */
+ uint8_t qual; /* 0D: Additional Sense Code Qualifier */
+ uint8_t fruc; /* 0E: Field Replaceable Unit Code */
+ uint8_t sks_high:7; /* 0F: Sense Key Specific (MSB) */
+ uint8_t sks_valid:1; /* 0F: Sense Key Specific Valid */
+ uint16_t sks_low; /* 10: Sense Key Specific (LSW) */
+ uint16_t reserved3; /* 12: Part of additional sense bytes (unused) */
+ uint16_t uec; /* 14: Additional Sense Bytes */
+ uint64_t per; /* 16: Additional Sense Bytes */
+ uint8_t reserved4[2]; /* 1E: Additional Sense Bytes (unused) */
+};
+
+
+/* Task management constants */
+#define SOFT_TASK_SIMPLE 0x00
+#define SOFT_TASK_HEAD_OF_QUEUE 0x01
+#define SOFT_TASK_ORDERED 0x02
+
+
+/* Version zero has the last 32 bits reserved,
+ * Version one has the last 32 bits sg_list_len_bytes;
+ */
+struct skd_command_header {
+ uint64_t sg_list_dma_address;
+ uint16_t tag;
+ uint8_t attribute;
+ uint8_t add_cdb_len; /* In 32 bit words */
+ uint32_t sg_list_len_bytes;
+};
+
+struct skd_scsi_request {
+ struct skd_command_header hdr;
+ unsigned char cdb[16];
+/* unsigned char _reserved[16]; */
+};
+
+struct driver_inquiry_data {
+ uint8_t peripheral_device_type:5;
+ uint8_t qualifier:3;
+ uint8_t page_code;
+ uint16_t page_length;
+ uint16_t pcie_bus_number;
+ uint8_t pcie_device_number;
+ uint8_t pcie_function_number;
+ uint8_t pcie_link_speed;
+ uint8_t pcie_link_lanes;
+ uint16_t pcie_vendor_id;
+ uint16_t pcie_device_id;
+ uint16_t pcie_subsystem_vendor_id;
+ uint16_t pcie_subsystem_device_id;
+ uint8_t reserved1[2];
+ uint8_t reserved2[3];
+ uint8_t driver_version_length;
+ uint8_t driver_version[0x14];
+};
+
+#pragma pack(pop, s1120_h)
+
+#endif /* SKD_S1120_H */
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index ad70868f8a96..4cf81b5bf0f7 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -108,8 +108,7 @@ struct cardinfo {
* have been written
*/
struct bio *bio, *currentbio, **biotail;
- int current_idx;
- sector_t current_sector;
+ struct bvec_iter current_iter;
struct request_queue *queue;
@@ -118,7 +117,7 @@ struct cardinfo {
struct mm_dma_desc *desc;
int cnt, headcnt;
struct bio *bio, **biotail;
- int idx;
+ struct bvec_iter iter;
} mm_pages[2];
#define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc))
@@ -344,16 +343,13 @@ static int add_bio(struct cardinfo *card)
dma_addr_t dma_handle;
int offset;
struct bio *bio;
- struct bio_vec *vec;
- int idx;
+ struct bio_vec vec;
int rw;
- int len;
bio = card->currentbio;
if (!bio && card->bio) {
card->currentbio = card->bio;
- card->current_idx = card->bio->bi_idx;
- card->current_sector = card->bio->bi_sector;
+ card->current_iter = card->bio->bi_iter;
card->bio = card->bio->bi_next;
if (card->bio == NULL)
card->biotail = &card->bio;
@@ -362,18 +358,17 @@ static int add_bio(struct cardinfo *card)
}
if (!bio)
return 0;
- idx = card->current_idx;
rw = bio_rw(bio);
if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
return 0;
- vec = bio_iovec_idx(bio, idx);
- len = vec->bv_len;
+ vec = bio_iter_iovec(bio, card->current_iter);
+
dma_handle = pci_map_page(card->dev,
- vec->bv_page,
- vec->bv_offset,
- len,
+ vec.bv_page,
+ vec.bv_offset,
+ vec.bv_len,
(rw == READ) ?
PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
@@ -381,7 +376,7 @@ static int add_bio(struct cardinfo *card)
desc = &p->desc[p->cnt];
p->cnt++;
if (p->bio == NULL)
- p->idx = idx;
+ p->iter = card->current_iter;
if ((p->biotail) != &bio->bi_next) {
*(p->biotail) = bio;
p->biotail = &(bio->bi_next);
@@ -391,8 +386,8 @@ static int add_bio(struct cardinfo *card)
desc->data_dma_handle = dma_handle;
desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle);
- desc->local_addr = cpu_to_le64(card->current_sector << 9);
- desc->transfer_size = cpu_to_le32(len);
+ desc->local_addr = cpu_to_le64(card->current_iter.bi_sector << 9);
+ desc->transfer_size = cpu_to_le32(vec.bv_len);
offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc));
desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset));
desc->zero1 = desc->zero2 = 0;
@@ -407,10 +402,9 @@ static int add_bio(struct cardinfo *card)
desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
desc->sem_control_bits = desc->control_bits;
- card->current_sector += (len >> 9);
- idx++;
- card->current_idx = idx;
- if (idx >= bio->bi_vcnt)
+
+ bio_advance_iter(bio, &card->current_iter, vec.bv_len);
+ if (!card->current_iter.bi_size)
card->currentbio = NULL;
return 1;
@@ -439,23 +433,25 @@ static void process_page(unsigned long data)
struct mm_dma_desc *desc = &page->desc[page->headcnt];
int control = le32_to_cpu(desc->sem_control_bits);
int last = 0;
- int idx;
+ struct bio_vec vec;
if (!(control & DMASCR_DMA_COMPLETE)) {
control = dma_status;
last = 1;
}
+
page->headcnt++;
- idx = page->idx;
- page->idx++;
- if (page->idx >= bio->bi_vcnt) {
+ vec = bio_iter_iovec(bio, page->iter);
+ bio_advance_iter(bio, &page->iter, vec.bv_len);
+
+ if (!page->iter.bi_size) {
page->bio = bio->bi_next;
if (page->bio)
- page->idx = page->bio->bi_idx;
+ page->iter = page->bio->bi_iter;
}
pci_unmap_page(card->dev, desc->data_dma_handle,
- bio_iovec_idx(bio, idx)->bv_len,
+ vec.bv_len,
(control & DMASCR_TRANSFER_READ) ?
PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
if (control & DMASCR_HARD_ERROR) {
@@ -532,7 +528,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
{
struct cardinfo *card = q->queuedata;
pr_debug("mm_make_request %llu %u\n",
- (unsigned long long)bio->bi_sector, bio->bi_size);
+ (unsigned long long)bio->bi_iter.bi_sector,
+ bio->bi_iter.bi_size);
spin_lock_irq(&card->lock);
*card->biotail = bio;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 5cdf88b7ad9e..75307b6061c2 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -184,7 +184,7 @@ static void virtblk_bio_send_data(struct virtblk_req *vbr)
vbr->flags &= ~VBLK_IS_FLUSH;
vbr->out_hdr.type = 0;
- vbr->out_hdr.sector = bio->bi_sector;
+ vbr->out_hdr.sector = bio->bi_iter.bi_sector;
vbr->out_hdr.ioprio = bio_prio(bio);
if (blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg)) {
@@ -292,6 +292,8 @@ static void virtblk_done(struct virtqueue *vq)
req_done = true;
}
}
+ if (unlikely(virtqueue_is_broken(vq)))
+ break;
} while (!virtqueue_enable_cb(vq));
/* In case queue is stopped waiting for more buffers. */
if (req_done)
@@ -400,7 +402,7 @@ static void virtblk_make_request(struct request_queue *q, struct bio *bio)
vbr->flags |= VBLK_REQ_FLUSH;
if (bio->bi_rw & REQ_FUA)
vbr->flags |= VBLK_REQ_FUA;
- if (bio->bi_size)
+ if (bio->bi_iter.bi_size)
vbr->flags |= VBLK_REQ_DATA;
if (unlikely(vbr->flags & VBLK_REQ_FLUSH))
@@ -456,18 +458,15 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
struct virtio_blk *vblk = bd->bd_disk->private_data;
- struct virtio_blk_geometry vgeo;
- int err;
/* see if the host passed in geometry config */
- err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY,
- offsetof(struct virtio_blk_config, geometry),
- &vgeo);
-
- if (!err) {
- geo->heads = vgeo.heads;
- geo->sectors = vgeo.sectors;
- geo->cylinders = vgeo.cylinders;
+ if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
+ virtio_cread(vblk->vdev, struct virtio_blk_config,
+ geometry.cylinders, &geo->cylinders);
+ virtio_cread(vblk->vdev, struct virtio_blk_config,
+ geometry.heads, &geo->heads);
+ virtio_cread(vblk->vdev, struct virtio_blk_config,
+ geometry.sectors, &geo->sectors);
} else {
/* some standard values, similar to sd */
geo->heads = 1 << 6;
@@ -529,8 +528,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
goto done;
/* Host must always specify the capacity. */
- vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
- &capacity, sizeof(capacity));
+ virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
/* If capacity is too big, truncate with warning. */
if ((sector_t)capacity != capacity) {
@@ -608,9 +606,9 @@ static int virtblk_get_cache_mode(struct virtio_device *vdev)
u8 writeback;
int err;
- err = virtio_config_val(vdev, VIRTIO_BLK_F_CONFIG_WCE,
- offsetof(struct virtio_blk_config, wce),
- &writeback);
+ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
+ struct virtio_blk_config, wce,
+ &writeback);
if (err)
writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE);
@@ -642,7 +640,6 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
struct virtio_blk *vblk = disk->private_data;
struct virtio_device *vdev = vblk->vdev;
int i;
- u8 writeback;
BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
@@ -652,11 +649,7 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
if (i < 0)
return -EINVAL;
- writeback = i;
- vdev->config->set(vdev,
- offsetof(struct virtio_blk_config, wce),
- &writeback, sizeof(writeback));
-
+ virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
virtblk_update_cache_mode(vdev);
return count;
}
@@ -699,9 +692,9 @@ static int virtblk_probe(struct virtio_device *vdev)
index = err;
/* We need to know how many segments before we allocate. */
- err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
- offsetof(struct virtio_blk_config, seg_max),
- &sg_elems);
+ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
+ struct virtio_blk_config, seg_max,
+ &sg_elems);
/* We need at least one SG element, whatever they say. */
if (err || !sg_elems)
@@ -772,8 +765,7 @@ static int virtblk_probe(struct virtio_device *vdev)
set_disk_ro(vblk->disk, 1);
/* Host must always specify the capacity. */
- vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
- &cap, sizeof(cap));
+ virtio_cread(vdev, struct virtio_blk_config, capacity, &cap);
/* If capacity is too big, truncate with warning. */
if ((sector_t)cap != cap) {
@@ -794,46 +786,45 @@ static int virtblk_probe(struct virtio_device *vdev)
/* Host can optionally specify maximum segment size and number of
* segments. */
- err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX,
- offsetof(struct virtio_blk_config, size_max),
- &v);
+ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
+ struct virtio_blk_config, size_max, &v);
if (!err)
blk_queue_max_segment_size(q, v);
else
blk_queue_max_segment_size(q, -1U);
/* Host can optionally specify the block size of the device */
- err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
- offsetof(struct virtio_blk_config, blk_size),
- &blk_size);
+ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
+ struct virtio_blk_config, blk_size,
+ &blk_size);
if (!err)
blk_queue_logical_block_size(q, blk_size);
else
blk_size = queue_logical_block_size(q);
/* Use topology information if available */
- err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
- offsetof(struct virtio_blk_config, physical_block_exp),
- &physical_block_exp);
+ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
+ struct virtio_blk_config, physical_block_exp,
+ &physical_block_exp);
if (!err && physical_block_exp)
blk_queue_physical_block_size(q,
blk_size * (1 << physical_block_exp));
- err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
- offsetof(struct virtio_blk_config, alignment_offset),
- &alignment_offset);
+ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
+ struct virtio_blk_config, alignment_offset,
+ &alignment_offset);
if (!err && alignment_offset)
blk_queue_alignment_offset(q, blk_size * alignment_offset);
- err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
- offsetof(struct virtio_blk_config, min_io_size),
- &min_io_size);
+ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
+ struct virtio_blk_config, min_io_size,
+ &min_io_size);
if (!err && min_io_size)
blk_queue_io_min(q, blk_size * min_io_size);
- err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
- offsetof(struct virtio_blk_config, opt_io_size),
- &opt_io_size);
+ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
+ struct virtio_blk_config, opt_io_size,
+ &opt_io_size);
if (!err && opt_io_size)
blk_queue_io_opt(q, blk_size * opt_io_size);
@@ -899,7 +890,7 @@ static void virtblk_remove(struct virtio_device *vdev)
ida_simple_remove(&vd_index_ida, index);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int virtblk_freeze(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
@@ -959,7 +950,7 @@ static struct virtio_driver virtio_blk = {
.probe = virtblk_probe,
.remove = virtblk_remove,
.config_changed = virtblk_config_changed,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.freeze = virtblk_freeze,
.restore = virtblk_restore,
#endif
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index bf4b9d282c04..4b97b86da926 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -887,6 +887,8 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
unsigned long secure;
struct phys_req preq;
+ xen_blkif_get(blkif);
+
preq.sector_number = req->u.discard.sector_number;
preq.nr_sects = req->u.discard.nr_sectors;
@@ -899,7 +901,6 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
}
blkif->st_ds_req++;
- xen_blkif_get(blkif);
secure = (blkif->vbd.discard_secure &&
(req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
BLKDEV_DISCARD_SECURE : 0;
@@ -1256,7 +1257,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
bio->bi_bdev = preq.bdev;
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
- bio->bi_sector = preq.sector_number;
+ bio->bi_iter.bi_sector = preq.sector_number;
}
preq.sector_number += seg[i].nsec;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index a4660bbee8a6..557c590df001 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -400,10 +400,13 @@ static int blkif_queue_request(struct request *req)
if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
return 1;
- max_grefs = info->max_indirect_segments ?
- info->max_indirect_segments +
- INDIRECT_GREFS(info->max_indirect_segments) :
- BLKIF_MAX_SEGMENTS_PER_REQUEST;
+ max_grefs = req->nr_phys_segments;
+ if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
+ /*
+ * If we are using indirect segments we need to account
+ * for the indirect grefs used in the request.
+ */
+ max_grefs += INDIRECT_GREFS(req->nr_phys_segments);
/* Check if we have enough grants to allocate a requests */
if (info->persistent_gnts_c < max_grefs) {
@@ -1013,13 +1016,38 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
}
/* Add the persistent grant into the list of free grants */
for (i = 0; i < nseg; i++) {
- list_add(&s->grants_used[i]->node, &info->persistent_gnts);
- info->persistent_gnts_c++;
+ if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
+ /*
+ * If the grant is still mapped by the backend (the
+ * backend has chosen to make this grant persistent)
+ * we add it at the head of the list, so it will be
+ * reused first.
+ */
+ list_add(&s->grants_used[i]->node, &info->persistent_gnts);
+ info->persistent_gnts_c++;
+ } else {
+ /*
+ * If the grant is not mapped by the backend we end the
+ * foreign access and add it to the tail of the list,
+ * so it will not be picked again unless we run out of
+ * persistent grants.
+ */
+ gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
+ s->grants_used[i]->gref = GRANT_INVALID_REF;
+ list_add_tail(&s->grants_used[i]->node, &info->persistent_gnts);
+ }
}
if (s->req.operation == BLKIF_OP_INDIRECT) {
for (i = 0; i < INDIRECT_GREFS(nseg); i++) {
- list_add(&s->indirect_grants[i]->node, &info->persistent_gnts);
- info->persistent_gnts_c++;
+ if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
+ list_add(&s->indirect_grants[i]->node, &info->persistent_gnts);
+ info->persistent_gnts_c++;
+ } else {
+ gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
+ s->indirect_grants[i]->gref = GRANT_INVALID_REF;
+ list_add_tail(&s->indirect_grants[i]->node,
+ &info->persistent_gnts);
+ }
}
}
}
@@ -1336,57 +1364,6 @@ static int blkfront_probe(struct xenbus_device *dev,
return 0;
}
-/*
- * This is a clone of md_trim_bio, used to split a bio into smaller ones
- */
-static void trim_bio(struct bio *bio, int offset, int size)
-{
- /* 'bio' is a cloned bio which we need to trim to match
- * the given offset and size.
- * This requires adjusting bi_sector, bi_size, and bi_io_vec
- */
- int i;
- struct bio_vec *bvec;
- int sofar = 0;
-
- size <<= 9;
- if (offset == 0 && size == bio->bi_size)
- return;
-
- bio->bi_sector += offset;
- bio->bi_size = size;
- offset <<= 9;
- clear_bit(BIO_SEG_VALID, &bio->bi_flags);
-
- while (bio->bi_idx < bio->bi_vcnt &&
- bio->bi_io_vec[bio->bi_idx].bv_len <= offset) {
- /* remove this whole bio_vec */
- offset -= bio->bi_io_vec[bio->bi_idx].bv_len;
- bio->bi_idx++;
- }
- if (bio->bi_idx < bio->bi_vcnt) {
- bio->bi_io_vec[bio->bi_idx].bv_offset += offset;
- bio->bi_io_vec[bio->bi_idx].bv_len -= offset;
- }
- /* avoid any complications with bi_idx being non-zero*/
- if (bio->bi_idx) {
- memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
- (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
- bio->bi_vcnt -= bio->bi_idx;
- bio->bi_idx = 0;
- }
- /* Make sure vcnt and last bv are not too big */
- bio_for_each_segment(bvec, bio, i) {
- if (sofar + bvec->bv_len > size)
- bvec->bv_len = size - sofar;
- if (bvec->bv_len == 0) {
- bio->bi_vcnt = i;
- break;
- }
- sofar += bvec->bv_len;
- }
-}
-
static void split_bio_end(struct bio *bio, int error)
{
struct split_bio *split_bio = bio->bi_private;
@@ -1519,10 +1496,10 @@ static int blkif_recover(struct blkfront_info *info)
for (i = 0; i < pending; i++) {
offset = (i * segs * PAGE_SIZE) >> 9;
size = min((unsigned int)(segs * PAGE_SIZE) >> 9,
- (unsigned int)(bio->bi_size >> 9) - offset);
+ (unsigned int)bio_sectors(bio) - offset);
cloned_bio = bio_clone(bio, GFP_NOIO);
BUG_ON(cloned_bio == NULL);
- trim_bio(cloned_bio, offset, size);
+ bio_trim(cloned_bio, offset, size);
cloned_bio->bi_private = split_bio;
cloned_bio->bi_end_io = split_bio_end;
submit_bio(cloned_bio->bi_rw, cloned_bio);
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 4afae20df512..9fe8a875a827 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -30,3 +30,5 @@ hci_uart-$(CONFIG_BT_HCIUART_LL) += hci_ll.o
hci_uart-$(CONFIG_BT_HCIUART_ATH3K) += hci_ath.o
hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o
hci_uart-objs := $(hci_uart-y)
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 0a327f4154a2..6bfc1bb318f6 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -57,7 +57,7 @@ struct ath3k_version {
unsigned char reserved[0x07];
};
-static struct usb_device_id ath3k_table[] = {
+static const struct usb_device_id ath3k_table[] = {
/* Atheros AR3011 */
{ USB_DEVICE(0x0CF3, 0x3000) },
@@ -112,7 +112,7 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
#define BTUSB_ATH3012 0x80
/* This table is to load patch and sysconfig files
* for AR3012 */
-static struct usb_device_id ath3k_blist_tbl[] = {
+static const struct usb_device_id ath3k_blist_tbl[] = {
/* Atheros AR3012 with sflash firmware*/
{ USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 995aee9cba22..31386998c9a7 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -42,7 +42,7 @@
static struct usb_driver bfusb_driver;
-static struct usb_device_id bfusb_table[] = {
+static const struct usb_device_id bfusb_table[] = {
/* AVM BlueFRITZ! USB */
{ USB_DEVICE(0x057c, 0x2200) },
@@ -318,7 +318,6 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
return -ENOMEM;
}
- skb->dev = (void *) data->hdev;
bt_cb(skb)->pkt_type = pkt_type;
data->reassembly = skb;
@@ -333,7 +332,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
memcpy(skb_put(data->reassembly, len), buf, len);
if (hdr & 0x08) {
- hci_recv_frame(data->reassembly);
+ hci_recv_frame(data->hdev, data->reassembly);
data->reassembly = NULL;
}
@@ -465,26 +464,18 @@ static int bfusb_close(struct hci_dev *hdev)
return 0;
}
-static int bfusb_send_frame(struct sk_buff *skb)
+static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_dev *hdev = (struct hci_dev *) skb->dev;
- struct bfusb_data *data;
+ struct bfusb_data *data = hci_get_drvdata(hdev);
struct sk_buff *nskb;
unsigned char buf[3];
int sent = 0, size, count;
BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len);
- if (!hdev) {
- BT_ERR("Frame for unknown HCI device (hdev=NULL)");
- return -ENODEV;
- }
-
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
- data = hci_get_drvdata(hdev);
-
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
hdev->stat.cmd_tx++;
@@ -544,11 +535,6 @@ static int bfusb_send_frame(struct sk_buff *skb)
return 0;
}
-static int bfusb_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
-{
- return -ENOIOCTLCMD;
-}
-
static int bfusb_load_firmware(struct bfusb_data *data,
const unsigned char *firmware, int count)
{
@@ -699,11 +685,10 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
hci_set_drvdata(hdev, data);
SET_HCIDEV_DEV(hdev, &intf->dev);
- hdev->open = bfusb_open;
- hdev->close = bfusb_close;
- hdev->flush = bfusb_flush;
- hdev->send = bfusb_send_frame;
- hdev->ioctl = bfusb_ioctl;
+ hdev->open = bfusb_open;
+ hdev->close = bfusb_close;
+ hdev->flush = bfusb_flush;
+ hdev->send = bfusb_send_frame;
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 6c3e3d43c718..57427de864a6 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -399,7 +399,6 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
- info->rx_skb->dev = (void *) info->hdev;
bt_cb(info->rx_skb)->pkt_type = buf[i];
switch (bt_cb(info->rx_skb)->pkt_type) {
@@ -477,7 +476,7 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
break;
case RECV_WAIT_DATA:
- hci_recv_frame(info->rx_skb);
+ hci_recv_frame(info->hdev, info->rx_skb);
info->rx_skb = NULL;
break;
@@ -659,17 +658,9 @@ static int bluecard_hci_close(struct hci_dev *hdev)
}
-static int bluecard_hci_send_frame(struct sk_buff *skb)
+static int bluecard_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- bluecard_info_t *info;
- struct hci_dev *hdev = (struct hci_dev *)(skb->dev);
-
- if (!hdev) {
- BT_ERR("Frame for unknown HCI device (hdev=NULL)");
- return -ENODEV;
- }
-
- info = hci_get_drvdata(hdev);
+ bluecard_info_t *info = hci_get_drvdata(hdev);
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
@@ -693,12 +684,6 @@ static int bluecard_hci_send_frame(struct sk_buff *skb)
}
-static int bluecard_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
-{
- return -ENOIOCTLCMD;
-}
-
-
/* ======================== Card services HCI interaction ======================== */
@@ -734,11 +719,10 @@ static int bluecard_open(bluecard_info_t *info)
hci_set_drvdata(hdev, info);
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
- hdev->open = bluecard_hci_open;
- hdev->close = bluecard_hci_close;
- hdev->flush = bluecard_hci_flush;
- hdev->send = bluecard_hci_send_frame;
- hdev->ioctl = bluecard_hci_ioctl;
+ hdev->open = bluecard_hci_open;
+ hdev->close = bluecard_hci_close;
+ hdev->flush = bluecard_hci_flush;
+ hdev->send = bluecard_hci_send_frame;
id = inb(iobase + 0x30);
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 2fe4a8031348..8a319913c9a9 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -37,7 +37,7 @@
#define VERSION "0.10"
-static struct usb_device_id bpa10x_table[] = {
+static const struct usb_device_id bpa10x_table[] = {
/* Tektronix BPA 100/105 (Digianswer) */
{ USB_DEVICE(0x08fd, 0x0002) },
@@ -129,8 +129,6 @@ static int bpa10x_recv(struct hci_dev *hdev, int queue, void *buf, int count)
return -ENOMEM;
}
- skb->dev = (void *) hdev;
-
data->rx_skb[queue] = skb;
scb = (void *) skb->cb;
@@ -155,7 +153,7 @@ static int bpa10x_recv(struct hci_dev *hdev, int queue, void *buf, int count)
data->rx_skb[queue] = NULL;
bt_cb(skb)->pkt_type = scb->type;
- hci_recv_frame(skb);
+ hci_recv_frame(hdev, skb);
}
count -= len; buf += len;
@@ -352,9 +350,8 @@ static int bpa10x_flush(struct hci_dev *hdev)
return 0;
}
-static int bpa10x_send_frame(struct sk_buff *skb)
+static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_dev *hdev = (struct hci_dev *) skb->dev;
struct bpa10x_data *data = hci_get_drvdata(hdev);
struct usb_ctrlrequest *dr;
struct urb *urb;
@@ -366,6 +363,8 @@ static int bpa10x_send_frame(struct sk_buff *skb)
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
+ skb->dev = (void *) hdev;
+
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb)
return -ENOMEM;
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index a1aaa3ba2a4b..73d87994d028 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -247,7 +247,6 @@ static void bt3c_receive(bt3c_info_t *info)
if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
- info->rx_skb->dev = (void *) info->hdev;
bt_cb(info->rx_skb)->pkt_type = inb(iobase + DATA_L);
inb(iobase + DATA_H);
//printk("bt3c: PACKET_TYPE=%02x\n", bt_cb(info->rx_skb)->pkt_type);
@@ -318,7 +317,7 @@ static void bt3c_receive(bt3c_info_t *info)
break;
case RECV_WAIT_DATA:
- hci_recv_frame(info->rx_skb);
+ hci_recv_frame(info->hdev, info->rx_skb);
info->rx_skb = NULL;
break;
@@ -416,19 +415,11 @@ static int bt3c_hci_close(struct hci_dev *hdev)
}
-static int bt3c_hci_send_frame(struct sk_buff *skb)
+static int bt3c_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- bt3c_info_t *info;
- struct hci_dev *hdev = (struct hci_dev *)(skb->dev);
+ bt3c_info_t *info = hci_get_drvdata(hdev);
unsigned long flags;
- if (!hdev) {
- BT_ERR("Frame for unknown HCI device (hdev=NULL)");
- return -ENODEV;
- }
-
- info = hci_get_drvdata(hdev);
-
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
hdev->stat.cmd_tx++;
@@ -455,12 +446,6 @@ static int bt3c_hci_send_frame(struct sk_buff *skb)
}
-static int bt3c_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
-{
- return -ENOIOCTLCMD;
-}
-
-
/* ======================== Card services HCI interaction ======================== */
@@ -577,11 +562,10 @@ static int bt3c_open(bt3c_info_t *info)
hci_set_drvdata(hdev, info);
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
- hdev->open = bt3c_hci_open;
- hdev->close = bt3c_hci_close;
- hdev->flush = bt3c_hci_flush;
- hdev->send = bt3c_hci_send_frame;
- hdev->ioctl = bt3c_hci_ioctl;
+ hdev->open = bt3c_hci_open;
+ hdev->close = bt3c_hci_close;
+ hdev->flush = bt3c_hci_flush;
+ hdev->send = bt3c_hci_send_frame;
/* Load firmware */
err = request_firmware(&firmware, "BT3CPCC.bin", &info->p_dev->dev);
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 27068d149380..7399303d7d99 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -86,11 +86,12 @@ struct btmrvl_private {
#define MRVL_VENDOR_PKT 0xFE
-/* Bluetooth commands */
-#define BT_CMD_AUTO_SLEEP_MODE 0x23
-#define BT_CMD_HOST_SLEEP_CONFIG 0x59
-#define BT_CMD_HOST_SLEEP_ENABLE 0x5A
-#define BT_CMD_MODULE_CFG_REQ 0x5B
+/* Vendor specific Bluetooth commands */
+#define BT_CMD_AUTO_SLEEP_MODE 0xFC23
+#define BT_CMD_HOST_SLEEP_CONFIG 0xFC59
+#define BT_CMD_HOST_SLEEP_ENABLE 0xFC5A
+#define BT_CMD_MODULE_CFG_REQ 0xFC5B
+#define BT_CMD_LOAD_CONFIG_DATA 0xFC61
/* Sub-commands: Module Bringup/Shutdown Request/Response */
#define MODULE_BRINGUP_REQ 0xF1
@@ -99,6 +100,11 @@ struct btmrvl_private {
#define MODULE_SHUTDOWN_REQ 0xF2
+/* Vendor specific Bluetooth events */
+#define BT_EVENT_AUTO_SLEEP_MODE 0x23
+#define BT_EVENT_HOST_SLEEP_CONFIG 0x59
+#define BT_EVENT_HOST_SLEEP_ENABLE 0x5A
+#define BT_EVENT_MODULE_CFG_REQ 0x5B
#define BT_EVENT_POWER_STATE 0x20
/* Bluetooth Power States */
@@ -106,8 +112,6 @@ struct btmrvl_private {
#define BT_PS_DISABLE 0x03
#define BT_PS_SLEEP 0x01
-#define OGF 0x3F
-
/* Host Sleep states */
#define HS_ACTIVATED 0x01
#define HS_DEACTIVATED 0x00
@@ -116,11 +120,8 @@ struct btmrvl_private {
#define PS_SLEEP 0x01
#define PS_AWAKE 0x00
-struct btmrvl_cmd {
- __le16 ocf_ogf;
- u8 length;
- u8 data[4];
-} __packed;
+#define BT_CAL_HDR_LEN 4
+#define BT_CAL_DATA_SIZE 28
struct btmrvl_event {
u8 ec; /* event counter */
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 9a9f51875df5..1e0320af00c6 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -19,7 +19,7 @@
**/
#include <linux/module.h>
-
+#include <linux/of.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -50,23 +50,19 @@ bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
if (hdr->evt == HCI_EV_CMD_COMPLETE) {
struct hci_ev_cmd_complete *ec;
- u16 opcode, ocf, ogf;
+ u16 opcode;
ec = (void *) (skb->data + HCI_EVENT_HDR_SIZE);
opcode = __le16_to_cpu(ec->opcode);
- ocf = hci_opcode_ocf(opcode);
- ogf = hci_opcode_ogf(opcode);
- if (ocf == BT_CMD_MODULE_CFG_REQ &&
- priv->btmrvl_dev.sendcmdflag) {
+ if (priv->btmrvl_dev.sendcmdflag) {
priv->btmrvl_dev.sendcmdflag = false;
priv->adapter->cmd_complete = true;
wake_up_interruptible(&priv->adapter->cmd_wait_q);
}
- if (ogf == OGF) {
- BT_DBG("vendor event skipped: ogf 0x%4.4x ocf 0x%4.4x",
- ogf, ocf);
+ if (hci_opcode_ogf(opcode) == 0x3F) {
+ BT_DBG("vendor event skipped: opcode=%#4.4x", opcode);
kfree_skb(skb);
return false;
}
@@ -90,7 +86,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
}
switch (event->data[0]) {
- case BT_CMD_AUTO_SLEEP_MODE:
+ case BT_EVENT_AUTO_SLEEP_MODE:
if (!event->data[2]) {
if (event->data[1] == BT_PS_ENABLE)
adapter->psmode = 1;
@@ -103,7 +99,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
}
break;
- case BT_CMD_HOST_SLEEP_CONFIG:
+ case BT_EVENT_HOST_SLEEP_CONFIG:
if (!event->data[3])
BT_DBG("gpio=%x, gap=%x", event->data[1],
event->data[2]);
@@ -111,19 +107,18 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
BT_DBG("HSCFG command failed");
break;
- case BT_CMD_HOST_SLEEP_ENABLE:
+ case BT_EVENT_HOST_SLEEP_ENABLE:
if (!event->data[1]) {
adapter->hs_state = HS_ACTIVATED;
if (adapter->psmode)
adapter->ps_state = PS_SLEEP;
- wake_up_interruptible(&adapter->cmd_wait_q);
BT_DBG("HS ACTIVATED!");
} else {
BT_DBG("HS Enable failed");
}
break;
- case BT_CMD_MODULE_CFG_REQ:
+ case BT_EVENT_MODULE_CFG_REQ:
if (priv->btmrvl_dev.sendcmdflag &&
event->data[1] == MODULE_BRINGUP_REQ) {
BT_DBG("EVENT:%s",
@@ -168,45 +163,50 @@ exit:
}
EXPORT_SYMBOL_GPL(btmrvl_process_event);
-int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
+static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode,
+ const void *param, u8 len)
{
struct sk_buff *skb;
- struct btmrvl_cmd *cmd;
- int ret = 0;
+ struct hci_command_hdr *hdr;
- skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
+ skb = bt_skb_alloc(HCI_COMMAND_HDR_SIZE + len, GFP_ATOMIC);
if (skb == NULL) {
BT_ERR("No free skb");
return -ENOMEM;
}
- cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
- cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_MODULE_CFG_REQ));
- cmd->length = 1;
- cmd->data[0] = subcmd;
+ hdr = (struct hci_command_hdr *)skb_put(skb, HCI_COMMAND_HDR_SIZE);
+ hdr->opcode = cpu_to_le16(opcode);
+ hdr->plen = len;
+
+ if (len)
+ memcpy(skb_put(skb, len), param, len);
bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
- skb->dev = (void *) priv->btmrvl_dev.hcidev;
skb_queue_head(&priv->adapter->tx_queue, skb);
priv->btmrvl_dev.sendcmdflag = true;
priv->adapter->cmd_complete = false;
- BT_DBG("Queue module cfg Command");
-
wake_up_interruptible(&priv->main_thread.wait_q);
if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q,
priv->adapter->cmd_complete,
- msecs_to_jiffies(WAIT_UNTIL_CMD_RESP))) {
- ret = -ETIMEDOUT;
- BT_ERR("module_cfg_cmd(%x): timeout: %d",
- subcmd, priv->btmrvl_dev.sendcmdflag);
- }
+ msecs_to_jiffies(WAIT_UNTIL_CMD_RESP)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
+{
+ int ret;
- BT_DBG("module cfg Command done");
+ ret = btmrvl_send_sync_cmd(priv, BT_CMD_MODULE_CFG_REQ, &subcmd, 1);
+ if (ret)
+ BT_ERR("module_cfg_cmd(%x) failed\n", subcmd);
return ret;
}
@@ -214,61 +214,36 @@ EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd);
int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv)
{
- struct sk_buff *skb;
- struct btmrvl_cmd *cmd;
-
- skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
- if (!skb) {
- BT_ERR("No free skb");
- return -ENOMEM;
- }
-
- cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
- cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF,
- BT_CMD_HOST_SLEEP_CONFIG));
- cmd->length = 2;
- cmd->data[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
- cmd->data[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
+ int ret;
+ u8 param[2];
- bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
+ param[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
+ param[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
- skb->dev = (void *) priv->btmrvl_dev.hcidev;
- skb_queue_head(&priv->adapter->tx_queue, skb);
+ BT_DBG("Sending HSCFG Command, gpio=0x%x, gap=0x%x",
+ param[0], param[1]);
- BT_DBG("Queue HSCFG Command, gpio=0x%x, gap=0x%x", cmd->data[0],
- cmd->data[1]);
+ ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_CONFIG, param, 2);
+ if (ret)
+ BT_ERR("HSCFG command failed\n");
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(btmrvl_send_hscfg_cmd);
int btmrvl_enable_ps(struct btmrvl_private *priv)
{
- struct sk_buff *skb;
- struct btmrvl_cmd *cmd;
-
- skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
- if (skb == NULL) {
- BT_ERR("No free skb");
- return -ENOMEM;
- }
-
- cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
- cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF,
- BT_CMD_AUTO_SLEEP_MODE));
- cmd->length = 1;
+ int ret;
+ u8 param;
if (priv->btmrvl_dev.psmode)
- cmd->data[0] = BT_PS_ENABLE;
+ param = BT_PS_ENABLE;
else
- cmd->data[0] = BT_PS_DISABLE;
-
- bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
-
- skb->dev = (void *) priv->btmrvl_dev.hcidev;
- skb_queue_head(&priv->adapter->tx_queue, skb);
+ param = BT_PS_DISABLE;
- BT_DBG("Queue PSMODE Command:%d", cmd->data[0]);
+ ret = btmrvl_send_sync_cmd(priv, BT_CMD_AUTO_SLEEP_MODE, &param, 1);
+ if (ret)
+ BT_ERR("PSMODE command failed\n");
return 0;
}
@@ -276,37 +251,11 @@ EXPORT_SYMBOL_GPL(btmrvl_enable_ps);
int btmrvl_enable_hs(struct btmrvl_private *priv)
{
- struct sk_buff *skb;
- struct btmrvl_cmd *cmd;
- int ret = 0;
-
- skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
- if (skb == NULL) {
- BT_ERR("No free skb");
- return -ENOMEM;
- }
-
- cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
- cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_HOST_SLEEP_ENABLE));
- cmd->length = 0;
-
- bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
-
- skb->dev = (void *) priv->btmrvl_dev.hcidev;
- skb_queue_head(&priv->adapter->tx_queue, skb);
-
- BT_DBG("Queue hs enable Command");
-
- wake_up_interruptible(&priv->main_thread.wait_q);
+ int ret;
- if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q,
- priv->adapter->hs_state,
- msecs_to_jiffies(WAIT_UNTIL_HS_STATE_CHANGED))) {
- ret = -ETIMEDOUT;
- BT_ERR("timeout: %d, %d,%d", priv->adapter->hs_state,
- priv->adapter->ps_state,
- priv->adapter->wakeup_tries);
- }
+ ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_ENABLE, NULL, 0);
+ if (ret)
+ BT_ERR("Host sleep enable command failed\n");
return ret;
}
@@ -403,26 +352,12 @@ static void btmrvl_free_adapter(struct btmrvl_private *priv)
priv->adapter = NULL;
}
-static int btmrvl_ioctl(struct hci_dev *hdev,
- unsigned int cmd, unsigned long arg)
+static int btmrvl_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- return -ENOIOCTLCMD;
-}
-
-static int btmrvl_send_frame(struct sk_buff *skb)
-{
- struct hci_dev *hdev = (struct hci_dev *) skb->dev;
- struct btmrvl_private *priv = NULL;
+ struct btmrvl_private *priv = hci_get_drvdata(hdev);
BT_DBG("type=%d, len=%d", skb->pkt_type, skb->len);
- if (!hdev) {
- BT_ERR("Frame for unknown HCI device");
- return -ENODEV;
- }
-
- priv = hci_get_drvdata(hdev);
-
if (!test_bit(HCI_RUNNING, &hdev->flags)) {
BT_ERR("Failed testing HCI_RUNING, flags=%lx", hdev->flags);
print_hex_dump_bytes("data: ", DUMP_PREFIX_OFFSET,
@@ -479,6 +414,72 @@ static int btmrvl_open(struct hci_dev *hdev)
return 0;
}
+static int btmrvl_download_cal_data(struct btmrvl_private *priv,
+ u8 *data, int len)
+{
+ int ret;
+
+ data[0] = 0x00;
+ data[1] = 0x00;
+ data[2] = 0x00;
+ data[3] = len;
+
+ print_hex_dump_bytes("Calibration data: ",
+ DUMP_PREFIX_OFFSET, data, BT_CAL_HDR_LEN + len);
+
+ ret = btmrvl_send_sync_cmd(priv, BT_CMD_LOAD_CONFIG_DATA, data,
+ BT_CAL_HDR_LEN + len);
+ if (ret)
+ BT_ERR("Failed to download caibration data\n");
+
+ return 0;
+}
+
+static int btmrvl_cal_data_dt(struct btmrvl_private *priv)
+{
+ struct device_node *dt_node;
+ u8 cal_data[BT_CAL_HDR_LEN + BT_CAL_DATA_SIZE];
+ const char name[] = "btmrvl_caldata";
+ const char property[] = "btmrvl,caldata";
+ int ret;
+
+ dt_node = of_find_node_by_name(NULL, name);
+ if (!dt_node)
+ return -ENODEV;
+
+ ret = of_property_read_u8_array(dt_node, property,
+ cal_data + BT_CAL_HDR_LEN,
+ BT_CAL_DATA_SIZE);
+ if (ret)
+ return ret;
+
+ BT_DBG("Use cal data from device tree");
+ ret = btmrvl_download_cal_data(priv, cal_data, BT_CAL_DATA_SIZE);
+ if (ret) {
+ BT_ERR("Fail to download calibrate data");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int btmrvl_setup(struct hci_dev *hdev)
+{
+ struct btmrvl_private *priv = hci_get_drvdata(hdev);
+
+ btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
+
+ btmrvl_cal_data_dt(priv);
+
+ priv->btmrvl_dev.psmode = 1;
+ btmrvl_enable_ps(priv);
+
+ priv->btmrvl_dev.gpio_gap = 0xffff;
+ btmrvl_send_hscfg_cmd(priv);
+
+ return 0;
+}
+
/*
* This function handles the event generated by firmware, rx data
* received from firmware, and tx data sent from kernel.
@@ -566,14 +567,12 @@ int btmrvl_register_hdev(struct btmrvl_private *priv)
priv->btmrvl_dev.hcidev = hdev;
hci_set_drvdata(hdev, priv);
- hdev->bus = HCI_SDIO;
- hdev->open = btmrvl_open;
+ hdev->bus = HCI_SDIO;
+ hdev->open = btmrvl_open;
hdev->close = btmrvl_close;
hdev->flush = btmrvl_flush;
- hdev->send = btmrvl_send_frame;
- hdev->ioctl = btmrvl_ioctl;
-
- btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
+ hdev->send = btmrvl_send_frame;
+ hdev->setup = btmrvl_setup;
hdev->dev_type = priv->btmrvl_dev.dev_type;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 00da6df9f71e..1b52c9f5230d 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -597,15 +597,14 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
case HCI_SCODATA_PKT:
case HCI_EVENT_PKT:
bt_cb(skb)->pkt_type = type;
- skb->dev = (void *)hdev;
skb_put(skb, buf_len);
skb_pull(skb, SDIO_HEADER_LEN);
if (type == HCI_EVENT_PKT) {
if (btmrvl_check_evtpkt(priv, skb))
- hci_recv_frame(skb);
+ hci_recv_frame(hdev, skb);
} else {
- hci_recv_frame(skb);
+ hci_recv_frame(hdev, skb);
}
hdev->stat.byte_rx += buf_len;
@@ -613,12 +612,11 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
case MRVL_VENDOR_PKT:
bt_cb(skb)->pkt_type = HCI_VENDOR_PKT;
- skb->dev = (void *)hdev;
skb_put(skb, buf_len);
skb_pull(skb, SDIO_HEADER_LEN);
if (btmrvl_process_event(priv, skb))
- hci_recv_frame(skb);
+ hci_recv_frame(hdev, skb);
hdev->stat.byte_rx += buf_len;
break;
@@ -1046,12 +1044,6 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
goto disable_host_int;
}
- priv->btmrvl_dev.psmode = 1;
- btmrvl_enable_ps(priv);
-
- priv->btmrvl_dev.gpio_gap = 0xffff;
- btmrvl_send_hscfg_cmd(priv);
-
return 0;
disable_host_int:
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 4a9909713874..b61440aaee65 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -157,10 +157,9 @@ static int btsdio_rx_packet(struct btsdio_data *data)
data->hdev->stat.byte_rx += len;
- skb->dev = (void *) data->hdev;
bt_cb(skb)->pkt_type = hdr[3];
- err = hci_recv_frame(skb);
+ err = hci_recv_frame(data->hdev, skb);
if (err < 0)
return err;
@@ -255,9 +254,8 @@ static int btsdio_flush(struct hci_dev *hdev)
return 0;
}
-static int btsdio_send_frame(struct sk_buff *skb)
+static int btsdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_dev *hdev = (struct hci_dev *) skb->dev;
struct btsdio_data *data = hci_get_drvdata(hdev);
BT_DBG("%s", hdev->name);
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index beb262f2dc4d..a03ecc22a561 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -198,7 +198,6 @@ static void btuart_receive(btuart_info_t *info)
if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
- info->rx_skb->dev = (void *) info->hdev;
bt_cb(info->rx_skb)->pkt_type = inb(iobase + UART_RX);
switch (bt_cb(info->rx_skb)->pkt_type) {
@@ -265,7 +264,7 @@ static void btuart_receive(btuart_info_t *info)
break;
case RECV_WAIT_DATA:
- hci_recv_frame(info->rx_skb);
+ hci_recv_frame(info->hdev, info->rx_skb);
info->rx_skb = NULL;
break;
@@ -424,17 +423,9 @@ static int btuart_hci_close(struct hci_dev *hdev)
}
-static int btuart_hci_send_frame(struct sk_buff *skb)
+static int btuart_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- btuart_info_t *info;
- struct hci_dev *hdev = (struct hci_dev *)(skb->dev);
-
- if (!hdev) {
- BT_ERR("Frame for unknown HCI device (hdev=NULL)");
- return -ENODEV;
- }
-
- info = hci_get_drvdata(hdev);
+ btuart_info_t *info = hci_get_drvdata(hdev);
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
@@ -458,12 +449,6 @@ static int btuart_hci_send_frame(struct sk_buff *skb)
}
-static int btuart_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
-{
- return -ENOIOCTLCMD;
-}
-
-
/* ======================== Card services HCI interaction ======================== */
@@ -495,11 +480,10 @@ static int btuart_open(btuart_info_t *info)
hci_set_drvdata(hdev, info);
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
- hdev->open = btuart_hci_open;
- hdev->close = btuart_hci_close;
- hdev->flush = btuart_hci_flush;
- hdev->send = btuart_hci_send_frame;
- hdev->ioctl = btuart_hci_ioctl;
+ hdev->open = btuart_hci_open;
+ hdev->close = btuart_hci_close;
+ hdev->flush = btuart_hci_flush;
+ hdev->send = btuart_hci_send_frame;
spin_lock_irqsave(&(info->lock), flags);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f3dfc0a88fdc..30868fa870f6 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -50,7 +50,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_ATH3012 0x80
#define BTUSB_INTEL 0x100
-static struct usb_device_id btusb_table[] = {
+static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
{ USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
@@ -121,7 +121,7 @@ static struct usb_device_id btusb_table[] = {
MODULE_DEVICE_TABLE(usb, btusb_table);
-static struct usb_device_id blacklist_table[] = {
+static const struct usb_device_id blacklist_table[] = {
/* CSR BlueCore devices */
{ USB_DEVICE(0x0a12, 0x0001), .driver_info = BTUSB_CSR },
@@ -716,9 +716,8 @@ static int btusb_flush(struct hci_dev *hdev)
return 0;
}
-static int btusb_send_frame(struct sk_buff *skb)
+static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_dev *hdev = (struct hci_dev *) skb->dev;
struct btusb_data *data = hci_get_drvdata(hdev);
struct usb_ctrlrequest *dr;
struct urb *urb;
@@ -730,6 +729,8 @@ static int btusb_send_frame(struct sk_buff *skb)
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
+ skb->dev = (void *) hdev;
+
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
urb = usb_alloc_urb(0, GFP_ATOMIC);
@@ -774,7 +775,7 @@ static int btusb_send_frame(struct sk_buff *skb)
break;
case HCI_SCODATA_PKT:
- if (!data->isoc_tx_ep || hdev->conn_hash.sco_num < 1)
+ if (!data->isoc_tx_ep || hci_conn_num(hdev, SCO_LINK) < 1)
return -ENODEV;
urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_ATOMIC);
@@ -833,8 +834,8 @@ static void btusb_notify(struct hci_dev *hdev, unsigned int evt)
BT_DBG("%s evt %d", hdev->name, evt);
- if (hdev->conn_hash.sco_num != data->sco_num) {
- data->sco_num = hdev->conn_hash.sco_num;
+ if (hci_conn_num(hdev, SCO_LINK) != data->sco_num) {
+ data->sco_num = hci_conn_num(hdev, SCO_LINK);
schedule_work(&data->work);
}
}
@@ -889,7 +890,7 @@ static void btusb_work(struct work_struct *work)
int new_alts;
int err;
- if (hdev->conn_hash.sco_num > 0) {
+ if (data->sco_num > 0) {
if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) {
err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf);
if (err < 0) {
@@ -903,9 +904,9 @@ static void btusb_work(struct work_struct *work)
if (hdev->voice_setting & 0x0020) {
static const int alts[3] = { 2, 4, 5 };
- new_alts = alts[hdev->conn_hash.sco_num - 1];
+ new_alts = alts[data->sco_num - 1];
} else {
- new_alts = hdev->conn_hash.sco_num;
+ new_alts = data->sco_num;
}
if (data->isoc_altsetting != new_alts) {
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 60abf596f60e..f038dba19e36 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -108,10 +108,8 @@ static long st_receive(void *priv_data, struct sk_buff *skb)
return -EFAULT;
}
- skb->dev = (void *) lhst->hdev;
-
/* Forward skb to HCI core layer */
- err = hci_recv_frame(skb);
+ err = hci_recv_frame(lhst->hdev, skb);
if (err < 0) {
BT_ERR("Unable to push skb to HCI core(%d)", err);
return err;
@@ -253,14 +251,11 @@ static int ti_st_close(struct hci_dev *hdev)
return err;
}
-static int ti_st_send_frame(struct sk_buff *skb)
+static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_dev *hdev;
struct ti_st *hst;
long len;
- hdev = (struct hci_dev *)skb->dev;
-
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 33f3a6950c0e..52eed1f3565d 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -256,9 +256,8 @@ static void dtl1_receive(dtl1_info_t *info)
case 0x83:
case 0x84:
/* send frame to the HCI layer */
- info->rx_skb->dev = (void *) info->hdev;
bt_cb(info->rx_skb)->pkt_type &= 0x0f;
- hci_recv_frame(info->rx_skb);
+ hci_recv_frame(info->hdev, info->rx_skb);
break;
default:
/* unknown packet */
@@ -383,20 +382,12 @@ static int dtl1_hci_close(struct hci_dev *hdev)
}
-static int dtl1_hci_send_frame(struct sk_buff *skb)
+static int dtl1_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- dtl1_info_t *info;
- struct hci_dev *hdev = (struct hci_dev *)(skb->dev);
+ dtl1_info_t *info = hci_get_drvdata(hdev);
struct sk_buff *s;
nsh_t nsh;
- if (!hdev) {
- BT_ERR("Frame for unknown HCI device (hdev=NULL)");
- return -ENODEV;
- }
-
- info = hci_get_drvdata(hdev);
-
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
hdev->stat.cmd_tx++;
@@ -438,12 +429,6 @@ static int dtl1_hci_send_frame(struct sk_buff *skb)
}
-static int dtl1_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
-{
- return -ENOIOCTLCMD;
-}
-
-
/* ======================== Card services HCI interaction ======================== */
@@ -477,11 +462,10 @@ static int dtl1_open(dtl1_info_t *info)
hci_set_drvdata(hdev, info);
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
- hdev->open = dtl1_hci_open;
- hdev->close = dtl1_hci_close;
- hdev->flush = dtl1_hci_flush;
- hdev->send = dtl1_hci_send_frame;
- hdev->ioctl = dtl1_hci_ioctl;
+ hdev->open = dtl1_hci_open;
+ hdev->close = dtl1_hci_close;
+ hdev->flush = dtl1_hci_flush;
+ hdev->send = dtl1_hci_send_frame;
spin_lock_irqsave(&(info->lock), flags);
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 57e502e06080..0bc87f7abd95 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -522,7 +522,7 @@ static void bcsp_complete_rx_pkt(struct hci_uart *hu)
memcpy(skb_push(bcsp->rx_skb, HCI_EVENT_HDR_SIZE), &hdr, HCI_EVENT_HDR_SIZE);
bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT;
- hci_recv_frame(bcsp->rx_skb);
+ hci_recv_frame(hu->hdev, bcsp->rx_skb);
} else {
BT_ERR ("Packet for unknown channel (%u %s)",
bcsp->rx_skb->data[1] & 0x0f,
@@ -536,7 +536,7 @@ static void bcsp_complete_rx_pkt(struct hci_uart *hu)
/* Pull out BCSP hdr */
skb_pull(bcsp->rx_skb, 4);
- hci_recv_frame(bcsp->rx_skb);
+ hci_recv_frame(hu->hdev, bcsp->rx_skb);
}
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
@@ -655,7 +655,6 @@ static int bcsp_recv(struct hci_uart *hu, void *data, int count)
bcsp->rx_count = 0;
return 0;
}
- bcsp->rx_skb->dev = (void *) hu->hdev;
break;
}
break;
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 8ae9f1ea2bb5..7048a583fe51 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -124,30 +124,6 @@ static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb)
return 0;
}
-static inline int h4_check_data_len(struct h4_struct *h4, int len)
-{
- int room = skb_tailroom(h4->rx_skb);
-
- BT_DBG("len %d room %d", len, room);
-
- if (!len) {
- hci_recv_frame(h4->rx_skb);
- } else if (len > room) {
- BT_ERR("Data length is too large");
- kfree_skb(h4->rx_skb);
- } else {
- h4->rx_state = H4_W4_DATA;
- h4->rx_count = len;
- return len;
- }
-
- h4->rx_state = H4_W4_PACKET_TYPE;
- h4->rx_skb = NULL;
- h4->rx_count = 0;
-
- return 0;
-}
-
/* Recv data */
static int h4_recv(struct hci_uart *hu, void *data, int count)
{
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index b6154d5a07a5..f6f497450560 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -340,7 +340,7 @@ static void h5_complete_rx_pkt(struct hci_uart *hu)
/* Remove Three-wire header */
skb_pull(h5->rx_skb, 4);
- hci_recv_frame(h5->rx_skb);
+ hci_recv_frame(hu->hdev, h5->rx_skb);
h5->rx_skb = NULL;
break;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index bc68a440d432..6e06f6f69152 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -234,21 +234,13 @@ static int hci_uart_close(struct hci_dev *hdev)
}
/* Send frames from HCI layer */
-static int hci_uart_send_frame(struct sk_buff *skb)
+static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_dev* hdev = (struct hci_dev *) skb->dev;
- struct hci_uart *hu;
-
- if (!hdev) {
- BT_ERR("Frame for unknown device (hdev=NULL)");
- return -ENODEV;
- }
+ struct hci_uart *hu = hci_get_drvdata(hdev);
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
- hu = hci_get_drvdata(hdev);
-
BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
hu->proto->enqueue(hu, skb);
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index cfc767938589..69a90b1b5ff5 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -110,7 +110,6 @@ static int send_hcill_cmd(u8 cmd, struct hci_uart *hu)
/* prepare packet */
hcill_packet = (struct hcill_cmd *) skb_put(skb, 1);
hcill_packet->cmd = cmd;
- skb->dev = (void *) hu->hdev;
/* send packet */
skb_queue_tail(&ll->txq, skb);
@@ -346,14 +345,14 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
return 0;
}
-static inline int ll_check_data_len(struct ll_struct *ll, int len)
+static inline int ll_check_data_len(struct hci_dev *hdev, struct ll_struct *ll, int len)
{
int room = skb_tailroom(ll->rx_skb);
BT_DBG("len %d room %d", len, room);
if (!len) {
- hci_recv_frame(ll->rx_skb);
+ hci_recv_frame(hdev, ll->rx_skb);
} else if (len > room) {
BT_ERR("Data length is too large");
kfree_skb(ll->rx_skb);
@@ -395,7 +394,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
switch (ll->rx_state) {
case HCILL_W4_DATA:
BT_DBG("Complete data");
- hci_recv_frame(ll->rx_skb);
+ hci_recv_frame(hu->hdev, ll->rx_skb);
ll->rx_state = HCILL_W4_PACKET_TYPE;
ll->rx_skb = NULL;
@@ -406,7 +405,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen);
- ll_check_data_len(ll, eh->plen);
+ ll_check_data_len(hu->hdev, ll, eh->plen);
continue;
case HCILL_W4_ACL_HDR:
@@ -415,7 +414,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
BT_DBG("ACL header: dlen %d", dlen);
- ll_check_data_len(ll, dlen);
+ ll_check_data_len(hu->hdev, ll, dlen);
continue;
case HCILL_W4_SCO_HDR:
@@ -423,7 +422,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
BT_DBG("SCO header: dlen %d", sh->dlen);
- ll_check_data_len(ll, sh->dlen);
+ ll_check_data_len(hu->hdev, ll, sh->dlen);
continue;
}
}
@@ -494,7 +493,6 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
return -ENOMEM;
}
- ll->rx_skb->dev = (void *) hu->hdev;
bt_cb(ll->rx_skb)->pkt_type = type;
}
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index d8b7aed6e4a9..7b167385a1c4 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -24,6 +24,7 @@
*/
#include <linux/module.h>
+#include <asm/unaligned.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -39,17 +40,17 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#define VERSION "1.3"
+#define VERSION "1.4"
static bool amp;
struct vhci_data {
struct hci_dev *hdev;
- unsigned long flags;
-
wait_queue_head_t read_wait;
struct sk_buff_head readq;
+
+ struct delayed_work open_timeout;
};
static int vhci_open_dev(struct hci_dev *hdev)
@@ -80,35 +81,73 @@ static int vhci_flush(struct hci_dev *hdev)
return 0;
}
-static int vhci_send_frame(struct sk_buff *skb)
+static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_dev* hdev = (struct hci_dev *) skb->dev;
- struct vhci_data *data;
+ struct vhci_data *data = hci_get_drvdata(hdev);
+
+ if (!test_bit(HCI_RUNNING, &hdev->flags))
+ return -EBUSY;
+
+ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+ skb_queue_tail(&data->readq, skb);
+
+ wake_up_interruptible(&data->read_wait);
+ return 0;
+}
+static int vhci_create_device(struct vhci_data *data, __u8 dev_type)
+{
+ struct hci_dev *hdev;
+ struct sk_buff *skb;
+
+ skb = bt_skb_alloc(4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdev = hci_alloc_dev();
if (!hdev) {
- BT_ERR("Frame for unknown HCI device (hdev=NULL)");
- return -ENODEV;
+ kfree_skb(skb);
+ return -ENOMEM;
}
- if (!test_bit(HCI_RUNNING, &hdev->flags))
+ data->hdev = hdev;
+
+ hdev->bus = HCI_VIRTUAL;
+ hdev->dev_type = dev_type;
+ hci_set_drvdata(hdev, data);
+
+ hdev->open = vhci_open_dev;
+ hdev->close = vhci_close_dev;
+ hdev->flush = vhci_flush;
+ hdev->send = vhci_send_frame;
+
+ if (hci_register_dev(hdev) < 0) {
+ BT_ERR("Can't register HCI device");
+ hci_free_dev(hdev);
+ data->hdev = NULL;
+ kfree_skb(skb);
return -EBUSY;
+ }
- data = hci_get_drvdata(hdev);
+ bt_cb(skb)->pkt_type = HCI_VENDOR_PKT;
- memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+ *skb_put(skb, 1) = 0xff;
+ *skb_put(skb, 1) = dev_type;
+ put_unaligned_le16(hdev->id, skb_put(skb, 2));
skb_queue_tail(&data->readq, skb);
wake_up_interruptible(&data->read_wait);
-
return 0;
}
static inline ssize_t vhci_get_user(struct vhci_data *data,
- const char __user *buf, size_t count)
+ const char __user *buf, size_t count)
{
struct sk_buff *skb;
+ __u8 pkt_type, dev_type;
+ int ret;
- if (count > HCI_MAX_FRAME_SIZE)
+ if (count < 2 || count > HCI_MAX_FRAME_SIZE)
return -EINVAL;
skb = bt_skb_alloc(count, GFP_KERNEL);
@@ -120,27 +159,69 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
return -EFAULT;
}
- skb->dev = (void *) data->hdev;
- bt_cb(skb)->pkt_type = *((__u8 *) skb->data);
+ pkt_type = *((__u8 *) skb->data);
skb_pull(skb, 1);
- hci_recv_frame(skb);
+ switch (pkt_type) {
+ case HCI_EVENT_PKT:
+ case HCI_ACLDATA_PKT:
+ case HCI_SCODATA_PKT:
+ if (!data->hdev) {
+ kfree_skb(skb);
+ return -ENODEV;
+ }
+
+ bt_cb(skb)->pkt_type = pkt_type;
+
+ ret = hci_recv_frame(data->hdev, skb);
+ break;
- return count;
+ case HCI_VENDOR_PKT:
+ if (data->hdev) {
+ kfree_skb(skb);
+ return -EBADFD;
+ }
+
+ cancel_delayed_work_sync(&data->open_timeout);
+
+ dev_type = *((__u8 *) skb->data);
+ skb_pull(skb, 1);
+
+ if (skb->len > 0) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ kfree_skb(skb);
+
+ if (dev_type != HCI_BREDR && dev_type != HCI_AMP)
+ return -EINVAL;
+
+ ret = vhci_create_device(data, dev_type);
+ break;
+
+ default:
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ return (ret < 0) ? ret : count;
}
static inline ssize_t vhci_put_user(struct vhci_data *data,
- struct sk_buff *skb, char __user *buf, int count)
+ struct sk_buff *skb,
+ char __user *buf, int count)
{
char __user *ptr = buf;
- int len, total = 0;
+ int len;
len = min_t(unsigned int, skb->len, count);
if (copy_to_user(ptr, skb->data, len))
return -EFAULT;
- total += len;
+ if (!data->hdev)
+ return len;
data->hdev->stat.byte_tx += len;
@@ -148,21 +229,19 @@ static inline ssize_t vhci_put_user(struct vhci_data *data,
case HCI_COMMAND_PKT:
data->hdev->stat.cmd_tx++;
break;
-
case HCI_ACLDATA_PKT:
data->hdev->stat.acl_tx++;
break;
-
case HCI_SCODATA_PKT:
data->hdev->stat.sco_tx++;
break;
}
- return total;
+ return len;
}
static ssize_t vhci_read(struct file *file,
- char __user *buf, size_t count, loff_t *pos)
+ char __user *buf, size_t count, loff_t *pos)
{
struct vhci_data *data = file->private_data;
struct sk_buff *skb;
@@ -185,7 +264,7 @@ static ssize_t vhci_read(struct file *file,
}
ret = wait_event_interruptible(data->read_wait,
- !skb_queue_empty(&data->readq));
+ !skb_queue_empty(&data->readq));
if (ret < 0)
break;
}
@@ -194,7 +273,7 @@ static ssize_t vhci_read(struct file *file,
}
static ssize_t vhci_write(struct file *file,
- const char __user *buf, size_t count, loff_t *pos)
+ const char __user *buf, size_t count, loff_t *pos)
{
struct vhci_data *data = file->private_data;
@@ -213,10 +292,17 @@ static unsigned int vhci_poll(struct file *file, poll_table *wait)
return POLLOUT | POLLWRNORM;
}
+static void vhci_open_timeout(struct work_struct *work)
+{
+ struct vhci_data *data = container_of(work, struct vhci_data,
+ open_timeout.work);
+
+ vhci_create_device(data, amp ? HCI_AMP : HCI_BREDR);
+}
+
static int vhci_open(struct inode *inode, struct file *file)
{
struct vhci_data *data;
- struct hci_dev *hdev;
data = kzalloc(sizeof(struct vhci_data), GFP_KERNEL);
if (!data)
@@ -225,35 +311,13 @@ static int vhci_open(struct inode *inode, struct file *file)
skb_queue_head_init(&data->readq);
init_waitqueue_head(&data->read_wait);
- hdev = hci_alloc_dev();
- if (!hdev) {
- kfree(data);
- return -ENOMEM;
- }
-
- data->hdev = hdev;
-
- hdev->bus = HCI_VIRTUAL;
- hci_set_drvdata(hdev, data);
-
- if (amp)
- hdev->dev_type = HCI_AMP;
-
- hdev->open = vhci_open_dev;
- hdev->close = vhci_close_dev;
- hdev->flush = vhci_flush;
- hdev->send = vhci_send_frame;
-
- if (hci_register_dev(hdev) < 0) {
- BT_ERR("Can't register HCI device");
- kfree(data);
- hci_free_dev(hdev);
- return -EBUSY;
- }
+ INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout);
file->private_data = data;
nonseekable_open(inode, file);
+ schedule_delayed_work(&data->open_timeout, msecs_to_jiffies(1000));
+
return 0;
}
@@ -262,8 +326,12 @@ static int vhci_release(struct inode *inode, struct file *file)
struct vhci_data *data = file->private_data;
struct hci_dev *hdev = data->hdev;
- hci_unregister_dev(hdev);
- hci_free_dev(hdev);
+ cancel_delayed_work_sync(&data->open_timeout);
+
+ if (hdev) {
+ hci_unregister_dev(hdev);
+ hci_free_dev(hdev);
+ }
file->private_data = NULL;
kfree(data);
@@ -309,3 +377,4 @@ MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
+MODULE_ALIAS("devname:vhci");
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 200926699778..b6739cb78e32 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -18,11 +18,21 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <asm/cacheflush.h>
+#include <asm/irq_regs.h>
+#include <asm/pmu.h>
#include <asm/smp_plat.h>
+#define DRIVER_NAME "CCI-400"
+#define DRIVER_NAME_PMU DRIVER_NAME " PMU"
+#define PMU_NAME "CCI_400"
+
#define CCI_PORT_CTRL 0x0
#define CCI_CTRL_STATUS 0xc
@@ -54,6 +64,568 @@ static unsigned int nb_cci_ports;
static void __iomem *cci_ctrl_base;
static unsigned long cci_ctrl_phys;
+#ifdef CONFIG_HW_PERF_EVENTS
+
+#define CCI_PMCR 0x0100
+#define CCI_PID2 0x0fe8
+
+#define CCI_PMCR_CEN 0x00000001
+#define CCI_PMCR_NCNT_MASK 0x0000f800
+#define CCI_PMCR_NCNT_SHIFT 11
+
+#define CCI_PID2_REV_MASK 0xf0
+#define CCI_PID2_REV_SHIFT 4
+
+/* Port ids */
+#define CCI_PORT_S0 0
+#define CCI_PORT_S1 1
+#define CCI_PORT_S2 2
+#define CCI_PORT_S3 3
+#define CCI_PORT_S4 4
+#define CCI_PORT_M0 5
+#define CCI_PORT_M1 6
+#define CCI_PORT_M2 7
+
+#define CCI_REV_R0 0
+#define CCI_REV_R1 1
+#define CCI_REV_R0_P4 4
+#define CCI_REV_R1_P2 6
+
+#define CCI_PMU_EVT_SEL 0x000
+#define CCI_PMU_CNTR 0x004
+#define CCI_PMU_CNTR_CTRL 0x008
+#define CCI_PMU_OVRFLW 0x00c
+
+#define CCI_PMU_OVRFLW_FLAG 1
+
+#define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K)
+
+/*
+ * Instead of an event id to monitor CCI cycles, a dedicated counter is
+ * provided. Use 0xff to represent CCI cycles and hope that no future revisions
+ * make use of this event in hardware.
+ */
+enum cci400_perf_events {
+ CCI_PMU_CYCLES = 0xff
+};
+
+#define CCI_PMU_EVENT_MASK 0xff
+#define CCI_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7)
+#define CCI_PMU_EVENT_CODE(event) (event & 0x1f)
+
+#define CCI_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */
+
+#define CCI_PMU_CYCLE_CNTR_IDX 0
+#define CCI_PMU_CNTR0_IDX 1
+#define CCI_PMU_CNTR_LAST(cci_pmu) (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1)
+
+/*
+ * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
+ * ports and bits 4:0 are event codes. There are different event codes
+ * associated with each port type.
+ *
+ * Additionally, the range of events associated with the port types changed
+ * between Rev0 and Rev1.
+ *
+ * The constants below define the range of valid codes for each port type for
+ * the different revisions and are used to validate the event to be monitored.
+ */
+
+#define CCI_REV_R0_SLAVE_PORT_MIN_EV 0x00
+#define CCI_REV_R0_SLAVE_PORT_MAX_EV 0x13
+#define CCI_REV_R0_MASTER_PORT_MIN_EV 0x14
+#define CCI_REV_R0_MASTER_PORT_MAX_EV 0x1a
+
+#define CCI_REV_R1_SLAVE_PORT_MIN_EV 0x00
+#define CCI_REV_R1_SLAVE_PORT_MAX_EV 0x14
+#define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00
+#define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11
+
+struct pmu_port_event_ranges {
+ u8 slave_min;
+ u8 slave_max;
+ u8 master_min;
+ u8 master_max;
+};
+
+static struct pmu_port_event_ranges port_event_range[] = {
+ [CCI_REV_R0] = {
+ .slave_min = CCI_REV_R0_SLAVE_PORT_MIN_EV,
+ .slave_max = CCI_REV_R0_SLAVE_PORT_MAX_EV,
+ .master_min = CCI_REV_R0_MASTER_PORT_MIN_EV,
+ .master_max = CCI_REV_R0_MASTER_PORT_MAX_EV,
+ },
+ [CCI_REV_R1] = {
+ .slave_min = CCI_REV_R1_SLAVE_PORT_MIN_EV,
+ .slave_max = CCI_REV_R1_SLAVE_PORT_MAX_EV,
+ .master_min = CCI_REV_R1_MASTER_PORT_MIN_EV,
+ .master_max = CCI_REV_R1_MASTER_PORT_MAX_EV,
+ },
+};
+
+struct cci_pmu_drv_data {
+ void __iomem *base;
+ struct arm_pmu *cci_pmu;
+ int nr_irqs;
+ int irqs[CCI_PMU_MAX_HW_EVENTS];
+ unsigned long active_irqs;
+ struct perf_event *events[CCI_PMU_MAX_HW_EVENTS];
+ unsigned long used_mask[BITS_TO_LONGS(CCI_PMU_MAX_HW_EVENTS)];
+ struct pmu_port_event_ranges *port_ranges;
+ struct pmu_hw_events hw_events;
+};
+static struct cci_pmu_drv_data *pmu;
+
+static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++)
+ if (irq == irqs[i])
+ return true;
+
+ return false;
+}
+
+static int probe_cci_revision(void)
+{
+ int rev;
+ rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
+ rev >>= CCI_PID2_REV_SHIFT;
+
+ if (rev <= CCI_REV_R0_P4)
+ return CCI_REV_R0;
+ else if (rev <= CCI_REV_R1_P2)
+ return CCI_REV_R1;
+
+ return -ENOENT;
+}
+
+static struct pmu_port_event_ranges *port_range_by_rev(void)
+{
+ int rev = probe_cci_revision();
+
+ if (rev < 0)
+ return NULL;
+
+ return &port_event_range[rev];
+}
+
+static int pmu_is_valid_slave_event(u8 ev_code)
+{
+ return pmu->port_ranges->slave_min <= ev_code &&
+ ev_code <= pmu->port_ranges->slave_max;
+}
+
+static int pmu_is_valid_master_event(u8 ev_code)
+{
+ return pmu->port_ranges->master_min <= ev_code &&
+ ev_code <= pmu->port_ranges->master_max;
+}
+
+static int pmu_validate_hw_event(u8 hw_event)
+{
+ u8 ev_source = CCI_PMU_EVENT_SOURCE(hw_event);
+ u8 ev_code = CCI_PMU_EVENT_CODE(hw_event);
+
+ switch (ev_source) {
+ case CCI_PORT_S0:
+ case CCI_PORT_S1:
+ case CCI_PORT_S2:
+ case CCI_PORT_S3:
+ case CCI_PORT_S4:
+ /* Slave Interface */
+ if (pmu_is_valid_slave_event(ev_code))
+ return hw_event;
+ break;
+ case CCI_PORT_M0:
+ case CCI_PORT_M1:
+ case CCI_PORT_M2:
+ /* Master Interface */
+ if (pmu_is_valid_master_event(ev_code))
+ return hw_event;
+ break;
+ }
+
+ return -ENOENT;
+}
+
+static int pmu_is_valid_counter(struct arm_pmu *cci_pmu, int idx)
+{
+ return CCI_PMU_CYCLE_CNTR_IDX <= idx &&
+ idx <= CCI_PMU_CNTR_LAST(cci_pmu);
+}
+
+static u32 pmu_read_register(int idx, unsigned int offset)
+{
+ return readl_relaxed(pmu->base + CCI_PMU_CNTR_BASE(idx) + offset);
+}
+
+static void pmu_write_register(u32 value, int idx, unsigned int offset)
+{
+ return writel_relaxed(value, pmu->base + CCI_PMU_CNTR_BASE(idx) + offset);
+}
+
+static void pmu_disable_counter(int idx)
+{
+ pmu_write_register(0, idx, CCI_PMU_CNTR_CTRL);
+}
+
+static void pmu_enable_counter(int idx)
+{
+ pmu_write_register(1, idx, CCI_PMU_CNTR_CTRL);
+}
+
+static void pmu_set_event(int idx, unsigned long event)
+{
+ event &= CCI_PMU_EVENT_MASK;
+ pmu_write_register(event, idx, CCI_PMU_EVT_SEL);
+}
+
+static u32 pmu_get_max_counters(void)
+{
+ u32 n_cnts = (readl_relaxed(cci_ctrl_base + CCI_PMCR) &
+ CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT;
+
+ /* add 1 for cycle counter */
+ return n_cnts + 1;
+}
+
+static struct pmu_hw_events *pmu_get_hw_events(void)
+{
+ return &pmu->hw_events;
+}
+
+static int pmu_get_event_idx(struct pmu_hw_events *hw, struct perf_event *event)
+{
+ struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hw_event = &event->hw;
+ unsigned long cci_event = hw_event->config_base & CCI_PMU_EVENT_MASK;
+ int idx;
+
+ if (cci_event == CCI_PMU_CYCLES) {
+ if (test_and_set_bit(CCI_PMU_CYCLE_CNTR_IDX, hw->used_mask))
+ return -EAGAIN;
+
+ return CCI_PMU_CYCLE_CNTR_IDX;
+ }
+
+ for (idx = CCI_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
+ if (!test_and_set_bit(idx, hw->used_mask))
+ return idx;
+
+ /* No counters available */
+ return -EAGAIN;
+}
+
+static int pmu_map_event(struct perf_event *event)
+{
+ int mapping;
+ u8 config = event->attr.config & CCI_PMU_EVENT_MASK;
+
+ if (event->attr.type < PERF_TYPE_MAX)
+ return -ENOENT;
+
+ if (config == CCI_PMU_CYCLES)
+ mapping = config;
+ else
+ mapping = pmu_validate_hw_event(config);
+
+ return mapping;
+}
+
+static int pmu_request_irq(struct arm_pmu *cci_pmu, irq_handler_t handler)
+{
+ int i;
+ struct platform_device *pmu_device = cci_pmu->plat_device;
+
+ if (unlikely(!pmu_device))
+ return -ENODEV;
+
+ if (pmu->nr_irqs < 1) {
+ dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Register all available CCI PMU interrupts. In the interrupt handler
+ * we iterate over the counters checking for interrupt source (the
+ * overflowing counter) and clear it.
+ *
+ * This should allow handling of non-unique interrupt for the counters.
+ */
+ for (i = 0; i < pmu->nr_irqs; i++) {
+ int err = request_irq(pmu->irqs[i], handler, IRQF_SHARED,
+ "arm-cci-pmu", cci_pmu);
+ if (err) {
+ dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n",
+ pmu->irqs[i]);
+ return err;
+ }
+
+ set_bit(i, &pmu->active_irqs);
+ }
+
+ return 0;
+}
+
+static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
+{
+ unsigned long flags;
+ struct arm_pmu *cci_pmu = (struct arm_pmu *)dev;
+ struct pmu_hw_events *events = cci_pmu->get_hw_events();
+ struct perf_sample_data data;
+ struct pt_regs *regs;
+ int idx, handled = IRQ_NONE;
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ regs = get_irq_regs();
+ /*
+ * Iterate over counters and update the corresponding perf events.
+ * This should work regardless of whether we have per-counter overflow
+ * interrupt or a combined overflow interrupt.
+ */
+ for (idx = CCI_PMU_CYCLE_CNTR_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
+ struct perf_event *event = events->events[idx];
+ struct hw_perf_event *hw_counter;
+
+ if (!event)
+ continue;
+
+ hw_counter = &event->hw;
+
+ /* Did this counter overflow? */
+ if (!pmu_read_register(idx, CCI_PMU_OVRFLW) & CCI_PMU_OVRFLW_FLAG)
+ continue;
+
+ pmu_write_register(CCI_PMU_OVRFLW_FLAG, idx, CCI_PMU_OVRFLW);
+
+ handled = IRQ_HANDLED;
+
+ armpmu_event_update(event);
+ perf_sample_data_init(&data, 0, hw_counter->last_period);
+ if (!armpmu_event_set_period(event))
+ continue;
+
+ if (perf_event_overflow(event, &data, regs))
+ cci_pmu->disable(event);
+ }
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+static void pmu_free_irq(struct arm_pmu *cci_pmu)
+{
+ int i;
+
+ for (i = 0; i < pmu->nr_irqs; i++) {
+ if (!test_and_clear_bit(i, &pmu->active_irqs))
+ continue;
+
+ free_irq(pmu->irqs[i], cci_pmu);
+ }
+}
+
+static void pmu_enable_event(struct perf_event *event)
+{
+ unsigned long flags;
+ struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+ struct pmu_hw_events *events = cci_pmu->get_hw_events();
+ struct hw_perf_event *hw_counter = &event->hw;
+ int idx = hw_counter->idx;
+
+ if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
+ dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+ return;
+ }
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ /* Configure the event to count, unless you are counting cycles */
+ if (idx != CCI_PMU_CYCLE_CNTR_IDX)
+ pmu_set_event(idx, hw_counter->config_base);
+
+ pmu_enable_counter(idx);
+
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void pmu_disable_event(struct perf_event *event)
+{
+ struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hw_counter = &event->hw;
+ int idx = hw_counter->idx;
+
+ if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
+ dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+ return;
+ }
+
+ pmu_disable_counter(idx);
+}
+
+static void pmu_start(struct arm_pmu *cci_pmu)
+{
+ u32 val;
+ unsigned long flags;
+ struct pmu_hw_events *events = cci_pmu->get_hw_events();
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ /* Enable all the PMU counters. */
+ val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
+ writel(val, cci_ctrl_base + CCI_PMCR);
+
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void pmu_stop(struct arm_pmu *cci_pmu)
+{
+ u32 val;
+ unsigned long flags;
+ struct pmu_hw_events *events = cci_pmu->get_hw_events();
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ /* Disable all the PMU counters. */
+ val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
+ writel(val, cci_ctrl_base + CCI_PMCR);
+
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static u32 pmu_read_counter(struct perf_event *event)
+{
+ struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hw_counter = &event->hw;
+ int idx = hw_counter->idx;
+ u32 value;
+
+ if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
+ dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+ return 0;
+ }
+ value = pmu_read_register(idx, CCI_PMU_CNTR);
+
+ return value;
+}
+
+static void pmu_write_counter(struct perf_event *event, u32 value)
+{
+ struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hw_counter = &event->hw;
+ int idx = hw_counter->idx;
+
+ if (unlikely(!pmu_is_valid_counter(cci_pmu, idx)))
+ dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+ else
+ pmu_write_register(value, idx, CCI_PMU_CNTR);
+}
+
+static int cci_pmu_init(struct arm_pmu *cci_pmu, struct platform_device *pdev)
+{
+ *cci_pmu = (struct arm_pmu){
+ .name = PMU_NAME,
+ .max_period = (1LLU << 32) - 1,
+ .get_hw_events = pmu_get_hw_events,
+ .get_event_idx = pmu_get_event_idx,
+ .map_event = pmu_map_event,
+ .request_irq = pmu_request_irq,
+ .handle_irq = pmu_handle_irq,
+ .free_irq = pmu_free_irq,
+ .enable = pmu_enable_event,
+ .disable = pmu_disable_event,
+ .start = pmu_start,
+ .stop = pmu_stop,
+ .read_counter = pmu_read_counter,
+ .write_counter = pmu_write_counter,
+ };
+
+ cci_pmu->plat_device = pdev;
+ cci_pmu->num_events = pmu_get_max_counters();
+
+ return armpmu_register(cci_pmu, -1);
+}
+
+static const struct of_device_id arm_cci_pmu_matches[] = {
+ {
+ .compatible = "arm,cci-400-pmu",
+ },
+ {},
+};
+
+static int cci_pmu_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int i, ret, irq;
+
+ pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
+ if (!pmu)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pmu->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmu->base))
+ return -ENOMEM;
+
+ /*
+ * CCI PMU has 5 overflow signals - one per counter; but some may be tied
+ * together to a common interrupt.
+ */
+ pmu->nr_irqs = 0;
+ for (i = 0; i < CCI_PMU_MAX_HW_EVENTS; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ break;
+
+ if (is_duplicate_irq(irq, pmu->irqs, pmu->nr_irqs))
+ continue;
+
+ pmu->irqs[pmu->nr_irqs++] = irq;
+ }
+
+ /*
+ * Ensure that the device tree has as many interrupts as the number
+ * of counters.
+ */
+ if (i < CCI_PMU_MAX_HW_EVENTS) {
+ dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n",
+ i, CCI_PMU_MAX_HW_EVENTS);
+ return -EINVAL;
+ }
+
+ pmu->port_ranges = port_range_by_rev();
+ if (!pmu->port_ranges) {
+ dev_warn(&pdev->dev, "CCI PMU version not supported\n");
+ return -EINVAL;
+ }
+
+ pmu->cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*(pmu->cci_pmu)), GFP_KERNEL);
+ if (!pmu->cci_pmu)
+ return -ENOMEM;
+
+ pmu->hw_events.events = pmu->events;
+ pmu->hw_events.used_mask = pmu->used_mask;
+ raw_spin_lock_init(&pmu->hw_events.pmu_lock);
+
+ ret = cci_pmu_init(pmu->cci_pmu, pdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int cci_platform_probe(struct platform_device *pdev)
+{
+ if (!cci_probed())
+ return -ENODEV;
+
+ return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+}
+
+#endif /* CONFIG_HW_PERF_EVENTS */
+
struct cpu_port {
u64 mpidr;
u32 port;
@@ -120,7 +692,7 @@ int cci_ace_get_port(struct device_node *dn)
}
EXPORT_SYMBOL_GPL(cci_ace_get_port);
-static void __init cci_ace_init_ports(void)
+static void cci_ace_init_ports(void)
{
int port, cpu;
struct device_node *cpun;
@@ -280,7 +852,7 @@ asmlinkage void __naked cci_enable_port_for_self(void)
/* Enable the CCI port */
" ldr r0, [r0, %[offsetof_port_phys]] \n"
-" mov r3, #"__stringify(CCI_ENABLE_REQ)" \n"
+" mov r3, %[cci_enable_req]\n"
" str r3, [r0, #"__stringify(CCI_PORT_CTRL)"] \n"
/* poll the status reg for completion */
@@ -288,7 +860,7 @@ asmlinkage void __naked cci_enable_port_for_self(void)
" ldr r0, [r1] \n"
" ldr r0, [r0, r1] @ cci_ctrl_base \n"
"4: ldr r1, [r0, #"__stringify(CCI_CTRL_STATUS)"] \n"
-" tst r1, #1 \n"
+" tst r1, %[cci_control_status_bits] \n"
" bne 4b \n"
" mov r0, #0 \n"
@@ -301,6 +873,8 @@ asmlinkage void __naked cci_enable_port_for_self(void)
"7: .word cci_ctrl_phys - . \n"
: :
[sizeof_cpu_port] "i" (sizeof(cpu_port)),
+ [cci_enable_req] "i" cpu_to_le32(CCI_ENABLE_REQ),
+ [cci_control_status_bits] "i" cpu_to_le32(1),
#ifndef __ARMEB__
[offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)),
#else
@@ -386,7 +960,7 @@ static const struct of_device_id arm_cci_ctrl_if_matches[] = {
{},
};
-static int __init cci_probe(void)
+static int cci_probe(void)
{
struct cci_nb_ports const *cci_config;
int ret, i, nb_ace = 0, nb_ace_lite = 0;
@@ -490,7 +1064,7 @@ memalloc_err:
static int cci_init_status = -EAGAIN;
static DEFINE_MUTEX(cci_probing);
-static int __init cci_init(void)
+static int cci_init(void)
{
if (cci_init_status != -EAGAIN)
return cci_init_status;
@@ -502,18 +1076,55 @@ static int __init cci_init(void)
return cci_init_status;
}
+#ifdef CONFIG_HW_PERF_EVENTS
+static struct platform_driver cci_pmu_driver = {
+ .driver = {
+ .name = DRIVER_NAME_PMU,
+ .of_match_table = arm_cci_pmu_matches,
+ },
+ .probe = cci_pmu_probe,
+};
+
+static struct platform_driver cci_platform_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = arm_cci_matches,
+ },
+ .probe = cci_platform_probe,
+};
+
+static int __init cci_platform_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&cci_pmu_driver);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&cci_platform_driver);
+}
+
+#else
+
+static int __init cci_platform_init(void)
+{
+ return 0;
+}
+
+#endif
/*
* To sort out early init calls ordering a helper function is provided to
* check if the CCI driver has beed initialized. Function check if the driver
* has been initialized, if not it calls the init function that probes
* the driver and updates the return value.
*/
-bool __init cci_probed(void)
+bool cci_probed(void)
{
return cci_init() == 0;
}
EXPORT_SYMBOL_GPL(cci_probed);
early_initcall(cci_init);
+core_initcall(cci_platform_init);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ARM CCI support");
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index 0671e45daa57..8fedbc250414 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/fs.h>
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 0aa9d91daef5..2f2b08457c67 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -165,6 +165,19 @@ config HW_RANDOM_OMAP
If unsure, say Y.
+config HW_RANDOM_OMAP3_ROM
+ tristate "OMAP3 ROM Random Number Generator support"
+ depends on HW_RANDOM && ARCH_OMAP3
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on OMAP34xx processors.
+
+ To compile this driver as a module, choose M here: the
+ module will be called omap3-rom-rng.
+
+ If unsure, say Y.
+
config HW_RANDOM_OCTEON
tristate "Octeon Random Number Generator support"
depends on HW_RANDOM && CAVIUM_OCTEON_SOC
@@ -290,6 +303,19 @@ config HW_RANDOM_PSERIES
If unsure, say Y.
+config HW_RANDOM_POWERNV
+ tristate "PowerNV Random Number Generator support"
+ depends on HW_RANDOM && PPC_POWERNV
+ default HW_RANDOM
+ ---help---
+ This is the driver for Random Number Generator hardware found
+ in POWER7+ and above machines for PowerNV platform.
+
+ To compile this driver as a module, choose M here: the
+ module will be called powernv-rng.
+
+ If unsure, say Y.
+
config HW_RANDOM_EXYNOS
tristate "EXYNOS HW random number generator support"
depends on HW_RANDOM && HAS_IOMEM && HAVE_CLK
@@ -314,3 +340,15 @@ config HW_RANDOM_TPM
module will be called tpm-rng.
If unsure, say Y.
+
+config HW_RANDOM_MSM
+ tristate "Qualcomm MSM Random Number Generator support"
+ depends on HW_RANDOM && ARCH_MSM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Qualcomm MSM SoCs.
+
+ To compile this driver as a module, choose M here. the
+ module will be called msm-rng.
+
+ If unsure, say Y.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index bed467c9300e..3ae7755a52e7 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -15,6 +15,7 @@ n2-rng-y := n2-drv.o n2-asm.o
obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
+obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o
obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
@@ -24,6 +25,8 @@ obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o
obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
+obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
+obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c
new file mode 100644
index 000000000000..148521e51dc6
--- /dev/null
+++ b/drivers/char/hw_random/msm-rng.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+/* Device specific register offsets */
+#define PRNG_DATA_OUT 0x0000
+#define PRNG_STATUS 0x0004
+#define PRNG_LFSR_CFG 0x0100
+#define PRNG_CONFIG 0x0104
+
+/* Device specific register masks and config values */
+#define PRNG_LFSR_CFG_MASK 0x0000ffff
+#define PRNG_LFSR_CFG_CLOCKS 0x0000dddd
+#define PRNG_CONFIG_HW_ENABLE BIT(1)
+#define PRNG_STATUS_DATA_AVAIL BIT(0)
+
+#define MAX_HW_FIFO_DEPTH 16
+#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4)
+#define WORD_SZ 4
+
+struct msm_rng {
+ void __iomem *base;
+ struct clk *clk;
+ struct hwrng hwrng;
+};
+
+#define to_msm_rng(p) container_of(p, struct msm_rng, hwrng)
+
+static int msm_rng_enable(struct hwrng *hwrng, int enable)
+{
+ struct msm_rng *rng = to_msm_rng(hwrng);
+ u32 val;
+ int ret;
+
+ ret = clk_prepare_enable(rng->clk);
+ if (ret)
+ return ret;
+
+ if (enable) {
+ /* Enable PRNG only if it is not already enabled */
+ val = readl_relaxed(rng->base + PRNG_CONFIG);
+ if (val & PRNG_CONFIG_HW_ENABLE)
+ goto already_enabled;
+
+ val = readl_relaxed(rng->base + PRNG_LFSR_CFG);
+ val &= ~PRNG_LFSR_CFG_MASK;
+ val |= PRNG_LFSR_CFG_CLOCKS;
+ writel(val, rng->base + PRNG_LFSR_CFG);
+
+ val = readl_relaxed(rng->base + PRNG_CONFIG);
+ val |= PRNG_CONFIG_HW_ENABLE;
+ writel(val, rng->base + PRNG_CONFIG);
+ } else {
+ val = readl_relaxed(rng->base + PRNG_CONFIG);
+ val &= ~PRNG_CONFIG_HW_ENABLE;
+ writel(val, rng->base + PRNG_CONFIG);
+ }
+
+already_enabled:
+ clk_disable_unprepare(rng->clk);
+ return 0;
+}
+
+static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait)
+{
+ struct msm_rng *rng = to_msm_rng(hwrng);
+ size_t currsize = 0;
+ u32 *retdata = data;
+ size_t maxsize;
+ int ret;
+ u32 val;
+
+ /* calculate max size bytes to transfer back to caller */
+ maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max);
+
+ /* no room for word data */
+ if (maxsize < WORD_SZ)
+ return 0;
+
+ ret = clk_prepare_enable(rng->clk);
+ if (ret)
+ return ret;
+
+ /* read random data from hardware */
+ do {
+ val = readl_relaxed(rng->base + PRNG_STATUS);
+ if (!(val & PRNG_STATUS_DATA_AVAIL))
+ break;
+
+ val = readl_relaxed(rng->base + PRNG_DATA_OUT);
+ if (!val)
+ break;
+
+ *retdata++ = val;
+ currsize += WORD_SZ;
+
+ /* make sure we stay on 32bit boundary */
+ if ((maxsize - currsize) < WORD_SZ)
+ break;
+ } while (currsize < maxsize);
+
+ clk_disable_unprepare(rng->clk);
+
+ return currsize;
+}
+
+static int msm_rng_init(struct hwrng *hwrng)
+{
+ return msm_rng_enable(hwrng, 1);
+}
+
+static void msm_rng_cleanup(struct hwrng *hwrng)
+{
+ msm_rng_enable(hwrng, 0);
+}
+
+static int msm_rng_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct msm_rng *rng;
+ int ret;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rng);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rng->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rng->base))
+ return PTR_ERR(rng->base);
+
+ rng->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(rng->clk))
+ return PTR_ERR(rng->clk);
+
+ rng->hwrng.name = KBUILD_MODNAME,
+ rng->hwrng.init = msm_rng_init,
+ rng->hwrng.cleanup = msm_rng_cleanup,
+ rng->hwrng.read = msm_rng_read,
+
+ ret = hwrng_register(&rng->hwrng);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register hwrng\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int msm_rng_remove(struct platform_device *pdev)
+{
+ struct msm_rng *rng = platform_get_drvdata(pdev);
+
+ hwrng_unregister(&rng->hwrng);
+ return 0;
+}
+
+static const struct of_device_id msm_rng_of_match[] = {
+ { .compatible = "qcom,prng", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, msm_rng_of_match);
+
+static struct platform_driver msm_rng_driver = {
+ .probe = msm_rng_probe,
+ .remove = msm_rng_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(msm_rng_of_match),
+ }
+};
+module_platform_driver(msm_rng_driver);
+
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_AUTHOR("The Linux Foundation");
+MODULE_DESCRIPTION("Qualcomm MSM random number generator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
new file mode 100644
index 000000000000..c853e9e68573
--- /dev/null
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -0,0 +1,141 @@
+/*
+ * omap3-rom-rng.c - RNG driver for TI OMAP3 CPU family
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Juha Yrjola <juha.yrjola@solidboot.com>
+ *
+ * Copyright (C) 2013 Pali Rohár <pali.rohar@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/random.h>
+#include <linux/hw_random.h>
+#include <linux/timer.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+
+#define RNG_RESET 0x01
+#define RNG_GEN_PRNG_HW_INIT 0x02
+#define RNG_GEN_HW 0x08
+
+/* param1: ptr, param2: count, param3: flag */
+static u32 (*omap3_rom_rng_call)(u32, u32, u32);
+
+static struct timer_list idle_timer;
+static int rng_idle;
+static struct clk *rng_clk;
+
+static void omap3_rom_rng_idle(unsigned long data)
+{
+ int r;
+
+ r = omap3_rom_rng_call(0, 0, RNG_RESET);
+ if (r != 0) {
+ pr_err("reset failed: %d\n", r);
+ return;
+ }
+ clk_disable_unprepare(rng_clk);
+ rng_idle = 1;
+}
+
+static int omap3_rom_rng_get_random(void *buf, unsigned int count)
+{
+ u32 r;
+ u32 ptr;
+
+ del_timer_sync(&idle_timer);
+ if (rng_idle) {
+ clk_prepare_enable(rng_clk);
+ r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT);
+ if (r != 0) {
+ clk_disable_unprepare(rng_clk);
+ pr_err("HW init failed: %d\n", r);
+ return -EIO;
+ }
+ rng_idle = 0;
+ }
+
+ ptr = virt_to_phys(buf);
+ r = omap3_rom_rng_call(ptr, count, RNG_GEN_HW);
+ mod_timer(&idle_timer, jiffies + msecs_to_jiffies(500));
+ if (r != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int omap3_rom_rng_data_present(struct hwrng *rng, int wait)
+{
+ return 1;
+}
+
+static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ int r;
+
+ r = omap3_rom_rng_get_random(data, 4);
+ if (r < 0)
+ return r;
+ return 4;
+}
+
+static struct hwrng omap3_rom_rng_ops = {
+ .name = "omap3-rom",
+ .data_present = omap3_rom_rng_data_present,
+ .data_read = omap3_rom_rng_data_read,
+};
+
+static int omap3_rom_rng_probe(struct platform_device *pdev)
+{
+ pr_info("initializing\n");
+
+ omap3_rom_rng_call = pdev->dev.platform_data;
+ if (!omap3_rom_rng_call) {
+ pr_err("omap3_rom_rng_call is NULL\n");
+ return -EINVAL;
+ }
+
+ setup_timer(&idle_timer, omap3_rom_rng_idle, 0);
+ rng_clk = clk_get(&pdev->dev, "ick");
+ if (IS_ERR(rng_clk)) {
+ pr_err("unable to get RNG clock\n");
+ return PTR_ERR(rng_clk);
+ }
+
+ /* Leave the RNG in reset state. */
+ clk_prepare_enable(rng_clk);
+ omap3_rom_rng_idle(0);
+
+ return hwrng_register(&omap3_rom_rng_ops);
+}
+
+static int omap3_rom_rng_remove(struct platform_device *pdev)
+{
+ hwrng_unregister(&omap3_rom_rng_ops);
+ clk_disable_unprepare(rng_clk);
+ clk_put(rng_clk);
+ return 0;
+}
+
+static struct platform_driver omap3_rom_rng_driver = {
+ .driver = {
+ .name = "omap3-rom-rng",
+ .owner = THIS_MODULE,
+ },
+ .probe = omap3_rom_rng_probe,
+ .remove = omap3_rom_rng_remove,
+};
+
+module_platform_driver(omap3_rom_rng_driver);
+
+MODULE_ALIAS("platform:omap3-rom-rng");
+MODULE_AUTHOR("Juha Yrjola");
+MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index c6df5b29af08..c66279bb6ef3 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/hw_random.h>
#include <linux/delay.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/io.h>
diff --git a/drivers/char/hw_random/powernv-rng.c b/drivers/char/hw_random/powernv-rng.c
new file mode 100644
index 000000000000..3f4f63204560
--- /dev/null
+++ b/drivers/char/hw_random/powernv-rng.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2013 Michael Ellerman, Guo Chao, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/hw_random.h>
+
+static int powernv_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ unsigned long *buf;
+ int i, len;
+
+ /* We rely on rng_buffer_size() being >= sizeof(unsigned long) */
+ len = max / sizeof(unsigned long);
+
+ buf = (unsigned long *)data;
+
+ for (i = 0; i < len; i++)
+ powernv_get_random_long(buf++);
+
+ return len * sizeof(unsigned long);
+}
+
+static struct hwrng powernv_hwrng = {
+ .name = "powernv-rng",
+ .read = powernv_rng_read,
+};
+
+static int powernv_rng_remove(struct platform_device *pdev)
+{
+ hwrng_unregister(&powernv_hwrng);
+
+ return 0;
+}
+
+static int powernv_rng_probe(struct platform_device *pdev)
+{
+ int rc;
+
+ rc = hwrng_register(&powernv_hwrng);
+ if (rc) {
+ /* We only register one device, ignore any others */
+ if (rc == -EEXIST)
+ rc = -ENODEV;
+
+ return rc;
+ }
+
+ pr_info("Registered powernv hwrng.\n");
+
+ return 0;
+}
+
+static struct of_device_id powernv_rng_match[] = {
+ { .compatible = "ibm,power-rng",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, powernv_rng_match);
+
+static struct platform_driver powernv_rng_driver = {
+ .driver = {
+ .name = "powernv_rng",
+ .of_match_table = powernv_rng_match,
+ },
+ .probe = powernv_rng_probe,
+ .remove = powernv_rng_remove,
+};
+module_platform_driver(powernv_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Bare metal HWRNG driver for POWER7+ and above");
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
index 5f1197929f0c..ab7ffdec0ec3 100644
--- a/drivers/char/hw_random/pseries-rng.c
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -17,18 +17,25 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/hw_random.h>
#include <asm/vio.h>
-#define MODULE_NAME "pseries-rng"
static int pseries_rng_data_read(struct hwrng *rng, u32 *data)
{
- if (plpar_hcall(H_RANDOM, (unsigned long *)data) != H_SUCCESS) {
- printk(KERN_ERR "pseries rng hcall error\n");
- return 0;
+ int rc;
+
+ rc = plpar_hcall(H_RANDOM, (unsigned long *)data);
+ if (rc != H_SUCCESS) {
+ pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
+ return -EIO;
}
+
+ /* The hypervisor interface returns 64 bits */
return 8;
}
@@ -47,7 +54,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev)
};
static struct hwrng pseries_rng = {
- .name = MODULE_NAME,
+ .name = KBUILD_MODNAME,
.data_read = pseries_rng_data_read,
};
@@ -70,7 +77,7 @@ static struct vio_device_id pseries_rng_driver_ids[] = {
MODULE_DEVICE_TABLE(vio, pseries_rng_driver_ids);
static struct vio_driver pseries_rng_driver = {
- .name = MODULE_NAME,
+ .name = KBUILD_MODNAME,
.probe = pseries_rng_probe,
.remove = pseries_rng_remove,
.get_desired_dma = pseries_rng_get_desired_dma,
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index e737772ad69a..de5a6dcfb3e2 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -221,7 +221,7 @@ static void __exit mod_exit(void)
module_init(mod_init);
module_exit(mod_exit);
-static struct x86_cpu_id via_rng_cpu_id[] = {
+static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_XSTORE),
{}
};
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index ef46a9cfd832..c12398d1517c 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -133,7 +133,7 @@ static void virtrng_remove(struct virtio_device *vdev)
remove_common(vdev);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int virtrng_freeze(struct virtio_device *vdev)
{
remove_common(vdev);
@@ -157,7 +157,7 @@ static struct virtio_driver virtio_rng_driver = {
.id_table = id_table,
.probe = virtrng_probe,
.remove = virtrng_remove,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.freeze = virtrng_freeze,
.restore = virtrng_restore,
#endif
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index f3223aac4df1..db5fa4e9b9e5 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -285,9 +285,9 @@ static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations raw_fops = {
.read = do_sync_read,
- .aio_read = generic_file_aio_read,
+ .read_iter = generic_file_read_iter,
.write = do_sync_write,
- .aio_write = blkdev_aio_write,
+ .write_iter = blkdev_write_iter,
.fsync = blkdev_fsync,
.open = raw_open,
.release = raw_release,
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 94c0c74434ea..1a65838888cd 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -33,6 +33,15 @@ config TCG_TIS
from within Linux. To compile this driver as a module, choose
M here; the module will be called tpm_tis.
+config TCG_TIS_I2C_ATMEL
+ tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)"
+ depends on I2C
+ ---help---
+ If you have an Atmel I2C TPM security chip say Yes and it will be
+ accessible from within Linux.
+ To compile this driver as a module, choose M here; the module will
+ be called tpm_tis_i2c_atmel.
+
config TCG_TIS_I2C_INFINEON
tristate "TPM Interface Specification 1.2 Interface (I2C - Infineon)"
depends on I2C
@@ -42,7 +51,17 @@ config TCG_TIS_I2C_INFINEON
Specification 0.20 say Yes and it will be accessible from within
Linux.
To compile this driver as a module, choose M here; the module
- will be called tpm_tis_i2c_infineon.
+ will be called tpm_i2c_infineon.
+
+config TCG_TIS_I2C_NUVOTON
+ tristate "TPM Interface Specification 1.2 Interface (I2C - Nuvoton)"
+ depends on I2C
+ ---help---
+ If you have a TPM security chip with an I2C interface from
+ Nuvoton Technology Corp. say Yes and it will be accessible
+ from within Linux.
+ To compile this driver as a module, choose M here; the module
+ will be called tpm_i2c_nuvoton.
config TCG_NSC
tristate "National Semiconductor TPM Interface"
@@ -82,14 +101,14 @@ config TCG_IBMVTPM
as a module, choose M here; the module will be called tpm_ibmvtpm.
config TCG_ST33_I2C
- tristate "STMicroelectronics ST33 I2C TPM"
- depends on I2C
- depends on GPIOLIB
- ---help---
- If you have a TPM security chip from STMicroelectronics working with
- an I2C bus say Yes and it will be accessible from within Linux.
- To compile this driver as a module, choose M here; the module will be
- called tpm_stm_st33_i2c.
+ tristate "STMicroelectronics ST33 I2C TPM"
+ depends on I2C
+ depends on GPIOLIB
+ ---help---
+ If you have a TPM security chip from STMicroelectronics working with
+ an I2C bus say Yes and it will be accessible from within Linux.
+ To compile this driver as a module, choose M here; the module will be
+ called tpm_stm_st33_i2c.
config TCG_XEN
tristate "XEN TPM Interface"
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index eb41ff97d0ad..b80a4000daee 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -2,17 +2,20 @@
# Makefile for the kernel tpm device drivers.
#
obj-$(CONFIG_TCG_TPM) += tpm.o
+tpm-y := tpm-interface.o
+tpm-$(CONFIG_ACPI) += tpm_ppi.o
+
ifdef CONFIG_ACPI
- obj-$(CONFIG_TCG_TPM) += tpm_bios.o
- tpm_bios-objs += tpm_eventlog.o tpm_acpi.o tpm_ppi.o
+ tpm-y += tpm_eventlog.o tpm_acpi.o
else
ifdef CONFIG_TCG_IBMVTPM
- obj-$(CONFIG_TCG_TPM) += tpm_bios.o
- tpm_bios-objs += tpm_eventlog.o tpm_of.o
+ tpm-y += tpm_eventlog.o tpm_of.o
endif
endif
obj-$(CONFIG_TCG_TIS) += tpm_tis.o
+obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o
obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
+obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o
obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm-interface.c
index e3c974a6c522..6ae41d337630 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -10,13 +10,13 @@
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
* Device driver for TCG/TCPA TPM (trusted platform module).
- * Specifications at www.trustedcomputinggroup.org
+ * Specifications at www.trustedcomputinggroup.org
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
- *
+ *
* Note, the TPM chip is not interrupt driven (only polling)
* and can have very long timeouts (minutes!). Hence the unusual
* calls to msleep.
@@ -371,13 +371,14 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
return -ENODATA;
if (count > bufsiz) {
dev_err(chip->dev,
- "invalid count value %x %zx \n", count, bufsiz);
+ "invalid count value %x %zx\n", count, bufsiz);
return -E2BIG;
}
mutex_lock(&chip->tpm_mutex);
- if ((rc = chip->vendor.send(chip, (u8 *) buf, count)) < 0) {
+ rc = chip->vendor.send(chip, (u8 *) buf, count);
+ if (rc < 0) {
dev_err(chip->dev,
"tpm_transmit: tpm_send: error %zd\n", rc);
goto out;
@@ -444,7 +445,7 @@ static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
{
int err;
- len = tpm_transmit(chip,(u8 *) cmd, len);
+ len = tpm_transmit(chip, (u8 *) cmd, len);
if (len < 0)
return len;
else if (len < TPM_HEADER_SIZE)
@@ -658,7 +659,7 @@ static int tpm_continue_selftest(struct tpm_chip *chip)
return rc;
}
-ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr,
+ssize_t tpm_show_enabled(struct device *dev, struct device_attribute *attr,
char *buf)
{
cap_t cap;
@@ -674,7 +675,7 @@ ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr,
}
EXPORT_SYMBOL_GPL(tpm_show_enabled);
-ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr,
+ssize_t tpm_show_active(struct device *dev, struct device_attribute *attr,
char *buf)
{
cap_t cap;
@@ -690,7 +691,7 @@ ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr,
}
EXPORT_SYMBOL_GPL(tpm_show_active);
-ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr,
+ssize_t tpm_show_owned(struct device *dev, struct device_attribute *attr,
char *buf)
{
cap_t cap;
@@ -706,8 +707,8 @@ ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr,
}
EXPORT_SYMBOL_GPL(tpm_show_owned);
-ssize_t tpm_show_temp_deactivated(struct device * dev,
- struct device_attribute * attr, char *buf)
+ssize_t tpm_show_temp_deactivated(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
cap_t cap;
ssize_t rc;
@@ -769,10 +770,10 @@ static int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
/**
* tpm_pcr_read - read a pcr value
- * @chip_num: tpm idx # or ANY
+ * @chip_num: tpm idx # or ANY
* @pcr_idx: pcr idx to retrieve
- * @res_buf: TPM_PCR value
- * size of res_buf is 20 bytes (or NULL if you don't care)
+ * @res_buf: TPM_PCR value
+ * size of res_buf is 20 bytes (or NULL if you don't care)
*
* The TPM driver should be built-in, but for whatever reason it
* isn't, protect against the chip disappearing, by incrementing
@@ -794,9 +795,9 @@ EXPORT_SYMBOL_GPL(tpm_pcr_read);
/**
* tpm_pcr_extend - extend pcr value with hash
- * @chip_num: tpm idx # or AN&
+ * @chip_num: tpm idx # or AN&
* @pcr_idx: pcr idx to extend
- * @hash: hash value used to extend pcr value
+ * @hash: hash value used to extend pcr value
*
* The TPM driver should be built-in, but for whatever reason it
* isn't, protect against the chip disappearing, by incrementing
@@ -847,8 +848,7 @@ int tpm_do_selftest(struct tpm_chip *chip)
unsigned long duration;
struct tpm_cmd_t cmd;
- duration = tpm_calc_ordinal_duration(chip,
- TPM_ORD_CONTINUE_SELFTEST);
+ duration = tpm_calc_ordinal_duration(chip, TPM_ORD_CONTINUE_SELFTEST);
loops = jiffies_to_msecs(duration) / delay_msec;
@@ -965,12 +965,12 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
if (err)
goto out;
- /*
+ /*
ignore header 10 bytes
algorithm 32 bits (1 == RSA )
encscheme 16 bits
sigscheme 16 bits
- parameters (RSA 12->bytes: keybit, #primes, expbit)
+ parameters (RSA 12->bytes: keybit, #primes, expbit)
keylenbytes 32 bits
256 byte modulus
ignore checksum 20 bytes
@@ -1020,43 +1020,33 @@ ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr,
str += sprintf(str, "Manufacturer: 0x%x\n",
be32_to_cpu(cap.manufacturer_id));
- rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap,
- "attempting to determine the 1.1 version");
- if (rc)
- return 0;
- str += sprintf(str,
- "TCG version: %d.%d\nFirmware version: %d.%d\n",
- cap.tpm_version.Major, cap.tpm_version.Minor,
- cap.tpm_version.revMajor, cap.tpm_version.revMinor);
- return str - buf;
-}
-EXPORT_SYMBOL_GPL(tpm_show_caps);
-
-ssize_t tpm_show_caps_1_2(struct device * dev,
- struct device_attribute * attr, char *buf)
-{
- cap_t cap;
- ssize_t rc;
- char *str = buf;
-
- rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap,
- "attempting to determine the manufacturer");
- if (rc)
- return 0;
- str += sprintf(str, "Manufacturer: 0x%x\n",
- be32_to_cpu(cap.manufacturer_id));
+ /* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */
rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap,
"attempting to determine the 1.2 version");
- if (rc)
- return 0;
- str += sprintf(str,
- "TCG version: %d.%d\nFirmware version: %d.%d\n",
- cap.tpm_version_1_2.Major, cap.tpm_version_1_2.Minor,
- cap.tpm_version_1_2.revMajor,
- cap.tpm_version_1_2.revMinor);
+ if (!rc) {
+ str += sprintf(str,
+ "TCG version: %d.%d\nFirmware version: %d.%d\n",
+ cap.tpm_version_1_2.Major,
+ cap.tpm_version_1_2.Minor,
+ cap.tpm_version_1_2.revMajor,
+ cap.tpm_version_1_2.revMinor);
+ } else {
+ /* Otherwise just use TPM_STRUCT_VER */
+ rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap,
+ "attempting to determine the 1.1 version");
+ if (rc)
+ return 0;
+ str += sprintf(str,
+ "TCG version: %d.%d\nFirmware version: %d.%d\n",
+ cap.tpm_version.Major,
+ cap.tpm_version.Minor,
+ cap.tpm_version.revMajor,
+ cap.tpm_version.revMinor);
+ }
+
return str - buf;
}
-EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
+EXPORT_SYMBOL_GPL(tpm_show_caps);
ssize_t tpm_show_durations(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1102,8 +1092,8 @@ ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
}
EXPORT_SYMBOL_GPL(tpm_store_cancel);
-static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask, bool check_cancel,
- bool *canceled)
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+ bool check_cancel, bool *canceled)
{
u8 status = chip->vendor.status(chip);
@@ -1170,38 +1160,25 @@ EXPORT_SYMBOL_GPL(wait_for_tpm_stat);
*/
int tpm_open(struct inode *inode, struct file *file)
{
- int minor = iminor(inode);
- struct tpm_chip *chip = NULL, *pos;
-
- rcu_read_lock();
- list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
- if (pos->vendor.miscdev.minor == minor) {
- chip = pos;
- get_device(chip->dev);
- break;
- }
- }
- rcu_read_unlock();
-
- if (!chip)
- return -ENODEV;
+ struct miscdevice *misc = file->private_data;
+ struct tpm_chip *chip = container_of(misc, struct tpm_chip,
+ vendor.miscdev);
if (test_and_set_bit(0, &chip->is_open)) {
dev_dbg(chip->dev, "Another process owns this TPM\n");
- put_device(chip->dev);
return -EBUSY;
}
chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL);
if (chip->data_buffer == NULL) {
clear_bit(0, &chip->is_open);
- put_device(chip->dev);
return -ENOMEM;
}
atomic_set(&chip->data_pending, 0);
file->private_data = chip;
+ get_device(chip->dev);
return 0;
}
EXPORT_SYMBOL_GPL(tpm_open);
@@ -1463,7 +1440,6 @@ void tpm_dev_vendor_release(struct tpm_chip *chip)
chip->vendor.release(chip->dev);
clear_bit(chip->dev_num, dev_mask);
- kfree(chip->vendor.miscdev.name);
}
EXPORT_SYMBOL_GPL(tpm_dev_vendor_release);
@@ -1487,7 +1463,7 @@ void tpm_dev_release(struct device *dev)
EXPORT_SYMBOL_GPL(tpm_dev_release);
/*
- * Called from tpm_<specific>.c probe function only for devices
+ * Called from tpm_<specific>.c probe function only for devices
* the driver has determined it should claim. Prior to calling
* this function the specific probe function has called pci_enable_device
* upon errant exit from this function specific probe function should call
@@ -1496,17 +1472,13 @@ EXPORT_SYMBOL_GPL(tpm_dev_release);
struct tpm_chip *tpm_register_hardware(struct device *dev,
const struct tpm_vendor_specific *entry)
{
-#define DEVNAME_SIZE 7
-
- char *devname;
struct tpm_chip *chip;
/* Driver specific per-device data */
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
- devname = kmalloc(DEVNAME_SIZE, GFP_KERNEL);
- if (chip == NULL || devname == NULL)
- goto out_free;
+ if (chip == NULL)
+ return NULL;
mutex_init(&chip->buffer_mutex);
mutex_init(&chip->tpm_mutex);
@@ -1531,8 +1503,9 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
set_bit(chip->dev_num, dev_mask);
- scnprintf(devname, DEVNAME_SIZE, "%s%d", "tpm", chip->dev_num);
- chip->vendor.miscdev.name = devname;
+ scnprintf(chip->devname, sizeof(chip->devname), "%s%d", "tpm",
+ chip->dev_num);
+ chip->vendor.miscdev.name = chip->devname;
chip->vendor.miscdev.parent = dev;
chip->dev = get_device(dev);
@@ -1558,7 +1531,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
goto put_device;
}
- chip->bios_dir = tpm_bios_log_setup(devname);
+ chip->bios_dir = tpm_bios_log_setup(chip->devname);
/* Make chip available */
spin_lock(&driver_lock);
@@ -1571,7 +1544,6 @@ put_device:
put_device(chip->dev);
out_free:
kfree(chip);
- kfree(devname);
return NULL;
}
EXPORT_SYMBOL_GPL(tpm_register_hardware);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index a7bfc176ed43..f32847872193 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -59,8 +59,6 @@ extern ssize_t tpm_show_pcrs(struct device *, struct device_attribute *attr,
char *);
extern ssize_t tpm_show_caps(struct device *, struct device_attribute *attr,
char *);
-extern ssize_t tpm_show_caps_1_2(struct device *, struct device_attribute *attr,
- char *);
extern ssize_t tpm_store_cancel(struct device *, struct device_attribute *attr,
const char *, size_t);
extern ssize_t tpm_show_enabled(struct device *, struct device_attribute *attr,
@@ -122,6 +120,7 @@ struct tpm_chip {
struct device *dev; /* Device stuff */
int dev_num; /* /dev/tpm# */
+ char devname[7];
unsigned long is_open; /* only one allowed */
int time_expired;
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index 99d6820c611d..c9a528d25d22 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -202,7 +202,7 @@ static int __init init_atmel(void)
have_region =
(atmel_request_region
- (tpm_atmel.base, region_size, "tpm_atmel0") == NULL) ? 0 : 1;
+ (base, region_size, "tpm_atmel0") == NULL) ? 0 : 1;
pdev = platform_device_register_simple("tpm_atmel", -1, NULL, 0);
if (IS_ERR(pdev)) {
diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
index 84ddc557b8f8..59f7cb28260b 100644
--- a/drivers/char/tpm/tpm_eventlog.c
+++ b/drivers/char/tpm/tpm_eventlog.c
@@ -406,7 +406,6 @@ out_tpm:
out:
return NULL;
}
-EXPORT_SYMBOL_GPL(tpm_bios_log_setup);
void tpm_bios_log_teardown(struct dentry **lst)
{
@@ -415,5 +414,3 @@ void tpm_bios_log_teardown(struct dentry **lst)
for (i = 0; i < 3; i++)
securityfs_remove(lst[i]);
}
-EXPORT_SYMBOL_GPL(tpm_bios_log_teardown);
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
new file mode 100644
index 000000000000..c3cd7fe481a1
--- /dev/null
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -0,0 +1,284 @@
+/*
+ * ATMEL I2C TPM AT97SC3204T
+ *
+ * Copyright (C) 2012 V Lab Technologies
+ * Teddy Reed <teddy@prosauce.org>
+ * Copyright (C) 2013, Obsidian Research Corp.
+ * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ * Device driver for ATMEL I2C TPMs.
+ *
+ * Teddy Reed determined the basic I2C command flow, unlike other I2C TPM
+ * devices the raw TCG formatted TPM command data is written via I2C and then
+ * raw TCG formatted TPM command data is returned via I2C.
+ *
+ * TGC status/locality/etc functions seen in the LPC implementation do not
+ * seem to be present.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see http://www.gnu.org/licenses/>.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include "tpm.h"
+
+#define I2C_DRIVER_NAME "tpm_i2c_atmel"
+
+#define TPM_I2C_SHORT_TIMEOUT 750 /* ms */
+#define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */
+
+#define ATMEL_STS_OK 1
+
+struct priv_data {
+ size_t len;
+ /* This is the amount we read on the first try. 25 was chosen to fit a
+ * fair number of read responses in the buffer so a 2nd retry can be
+ * avoided in small message cases. */
+ u8 buffer[sizeof(struct tpm_output_header) + 25];
+};
+
+static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct priv_data *priv = chip->vendor.priv;
+ struct i2c_client *client = to_i2c_client(chip->dev);
+ s32 status;
+
+ priv->len = 0;
+
+ if (len <= 2)
+ return -EIO;
+
+ status = i2c_master_send(client, buf, len);
+
+ dev_dbg(chip->dev,
+ "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__,
+ (int)min_t(size_t, 64, len), buf, len, status);
+ return status;
+}
+
+static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct priv_data *priv = chip->vendor.priv;
+ struct i2c_client *client = to_i2c_client(chip->dev);
+ struct tpm_output_header *hdr =
+ (struct tpm_output_header *)priv->buffer;
+ u32 expected_len;
+ int rc;
+
+ if (priv->len == 0)
+ return -EIO;
+
+ /* Get the message size from the message header, if we didn't get the
+ * whole message in read_status then we need to re-read the
+ * message. */
+ expected_len = be32_to_cpu(hdr->length);
+ if (expected_len > count)
+ return -ENOMEM;
+
+ if (priv->len >= expected_len) {
+ dev_dbg(chip->dev,
+ "%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
+ (int)min_t(size_t, 64, expected_len), buf, count,
+ expected_len);
+ memcpy(buf, priv->buffer, expected_len);
+ return expected_len;
+ }
+
+ rc = i2c_master_recv(client, buf, expected_len);
+ dev_dbg(chip->dev,
+ "%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
+ (int)min_t(size_t, 64, expected_len), buf, count,
+ expected_len);
+ return rc;
+}
+
+static void i2c_atmel_cancel(struct tpm_chip *chip)
+{
+ dev_err(chip->dev, "TPM operation cancellation was requested, but is not supported");
+}
+
+static u8 i2c_atmel_read_status(struct tpm_chip *chip)
+{
+ struct priv_data *priv = chip->vendor.priv;
+ struct i2c_client *client = to_i2c_client(chip->dev);
+ int rc;
+
+ /* The TPM fails the I2C read until it is ready, so we do the entire
+ * transfer here and buffer it locally. This way the common code can
+ * properly handle the timeouts. */
+ priv->len = 0;
+ memset(priv->buffer, 0, sizeof(priv->buffer));
+
+
+ /* Once the TPM has completed the command the command remains readable
+ * until another command is issued. */
+ rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer));
+ dev_dbg(chip->dev,
+ "%s: sts=%d", __func__, rc);
+ if (rc <= 0)
+ return 0;
+
+ priv->len = rc;
+
+ return ATMEL_STS_OK;
+}
+
+static const struct file_operations i2c_atmel_ops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = tpm_open,
+ .read = tpm_read,
+ .write = tpm_write,
+ .release = tpm_release,
+};
+
+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
+static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
+static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
+static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
+static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
+static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
+static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
+static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
+
+static struct attribute *i2c_atmel_attrs[] = {
+ &dev_attr_pubek.attr,
+ &dev_attr_pcrs.attr,
+ &dev_attr_enabled.attr,
+ &dev_attr_active.attr,
+ &dev_attr_owned.attr,
+ &dev_attr_temp_deactivated.attr,
+ &dev_attr_caps.attr,
+ &dev_attr_cancel.attr,
+ &dev_attr_durations.attr,
+ &dev_attr_timeouts.attr,
+ NULL,
+};
+
+static struct attribute_group i2c_atmel_attr_grp = {
+ .attrs = i2c_atmel_attrs
+};
+
+static bool i2c_atmel_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return 0;
+}
+
+static const struct tpm_vendor_specific i2c_atmel = {
+ .status = i2c_atmel_read_status,
+ .recv = i2c_atmel_recv,
+ .send = i2c_atmel_send,
+ .cancel = i2c_atmel_cancel,
+ .req_complete_mask = ATMEL_STS_OK,
+ .req_complete_val = ATMEL_STS_OK,
+ .req_canceled = i2c_atmel_req_canceled,
+ .attr_group = &i2c_atmel_attr_grp,
+ .miscdev.fops = &i2c_atmel_ops,
+};
+
+static int i2c_atmel_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc;
+ struct tpm_chip *chip;
+ struct device *dev = &client->dev;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ chip = tpm_register_hardware(dev, &i2c_atmel);
+ if (!chip) {
+ dev_err(dev, "%s() error in tpm_register_hardware\n", __func__);
+ return -ENODEV;
+ }
+
+ chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data),
+ GFP_KERNEL);
+
+ /* Default timeouts */
+ chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT);
+ chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->vendor.irq = 0;
+
+ /* There is no known way to probe for this device, and all version
+ * information seems to be read via TPM commands. Thus we rely on the
+ * TPM startup process in the common code to detect the device. */
+ if (tpm_get_timeouts(chip)) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ if (tpm_do_selftest(chip)) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ tpm_dev_vendor_release(chip);
+ tpm_remove_hardware(chip->dev);
+ return rc;
+}
+
+static int i2c_atmel_remove(struct i2c_client *client)
+{
+ struct device *dev = &(client->dev);
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ if (chip)
+ tpm_dev_vendor_release(chip);
+ tpm_remove_hardware(dev);
+ kfree(chip);
+ return 0;
+}
+
+static const struct i2c_device_id i2c_atmel_id[] = {
+ {I2C_DRIVER_NAME, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, i2c_atmel_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id i2c_atmel_of_match[] = {
+ {.compatible = "atmel,at97sc3204t"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_atmel_of_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(i2c_atmel_pm_ops, tpm_pm_suspend, tpm_pm_resume);
+
+static struct i2c_driver i2c_atmel_driver = {
+ .id_table = i2c_atmel_id,
+ .probe = i2c_atmel_probe,
+ .remove = i2c_atmel_remove,
+ .driver = {
+ .name = I2C_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &i2c_atmel_pm_ops,
+ .of_match_table = of_match_ptr(i2c_atmel_of_match),
+ },
+};
+
+module_i2c_driver(i2c_atmel_driver);
+
+MODULE_AUTHOR("Jason Gunthorpe <jgunthorpe@obsidianresearch.com>");
+MODULE_DESCRIPTION("Atmel TPM I2C Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index b8735de8ce95..fefd2aa5c81e 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -581,7 +581,7 @@ static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
@@ -685,7 +685,6 @@ out_vendor:
chip->dev->release = NULL;
chip->release = NULL;
tpm_dev.client = NULL;
- dev_set_drvdata(chip->dev, chip);
out_err:
return rc;
}
@@ -766,7 +765,6 @@ static int tpm_tis_i2c_remove(struct i2c_client *client)
chip->dev->release = NULL;
chip->release = NULL;
tpm_dev.client = NULL;
- dev_set_drvdata(chip->dev, chip);
return 0;
}
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
new file mode 100644
index 000000000000..6276fea01ff0
--- /dev/null
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -0,0 +1,710 @@
+/******************************************************************************
+ * Nuvoton TPM I2C Device Driver Interface for WPCT301/NPCT501,
+ * based on the TCG TPM Interface Spec version 1.2.
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * Copyright (C) 2011, Nuvoton Technology Corporation.
+ * Dan Morav <dan.morav@nuvoton.com>
+ * Copyright (C) 2013, Obsidian Research Corp.
+ * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see http://www.gnu.org/licenses/>.
+ *
+ * Nuvoton contact information: APC.Support@nuvoton.com
+ *****************************************************************************/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include "tpm.h"
+
+/* I2C interface offsets */
+#define TPM_STS 0x00
+#define TPM_BURST_COUNT 0x01
+#define TPM_DATA_FIFO_W 0x20
+#define TPM_DATA_FIFO_R 0x40
+#define TPM_VID_DID_RID 0x60
+/* TPM command header size */
+#define TPM_HEADER_SIZE 10
+#define TPM_RETRY 5
+/*
+ * I2C bus device maximum buffer size w/o counting I2C address or command
+ * i.e. max size required for I2C write is 34 = addr, command, 32 bytes data
+ */
+#define TPM_I2C_MAX_BUF_SIZE 32
+#define TPM_I2C_RETRY_COUNT 32
+#define TPM_I2C_BUS_DELAY 1 /* msec */
+#define TPM_I2C_RETRY_DELAY_SHORT 2 /* msec */
+#define TPM_I2C_RETRY_DELAY_LONG 10 /* msec */
+
+#define I2C_DRIVER_NAME "tpm_i2c_nuvoton"
+
+struct priv_data {
+ unsigned int intrs;
+};
+
+static s32 i2c_nuvoton_read_buf(struct i2c_client *client, u8 offset, u8 size,
+ u8 *data)
+{
+ s32 status;
+
+ status = i2c_smbus_read_i2c_block_data(client, offset, size, data);
+ dev_dbg(&client->dev,
+ "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__,
+ offset, size, (int)size, data, status);
+ return status;
+}
+
+static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size,
+ u8 *data)
+{
+ s32 status;
+
+ status = i2c_smbus_write_i2c_block_data(client, offset, size, data);
+ dev_dbg(&client->dev,
+ "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__,
+ offset, size, (int)size, data, status);
+ return status;
+}
+
+#define TPM_STS_VALID 0x80
+#define TPM_STS_COMMAND_READY 0x40
+#define TPM_STS_GO 0x20
+#define TPM_STS_DATA_AVAIL 0x10
+#define TPM_STS_EXPECT 0x08
+#define TPM_STS_RESPONSE_RETRY 0x02
+#define TPM_STS_ERR_VAL 0x07 /* bit2...bit0 reads always 0 */
+
+#define TPM_I2C_SHORT_TIMEOUT 750 /* ms */
+#define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */
+
+/* read TPM_STS register */
+static u8 i2c_nuvoton_read_status(struct tpm_chip *chip)
+{
+ struct i2c_client *client = to_i2c_client(chip->dev);
+ s32 status;
+ u8 data;
+
+ status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data);
+ if (status <= 0) {
+ dev_err(chip->dev, "%s() error return %d\n", __func__,
+ status);
+ data = TPM_STS_ERR_VAL;
+ }
+
+ return data;
+}
+
+/* write byte to TPM_STS register */
+static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data)
+{
+ s32 status;
+ int i;
+
+ /* this causes the current command to be aborted */
+ for (i = 0, status = -1; i < TPM_I2C_RETRY_COUNT && status < 0; i++) {
+ status = i2c_nuvoton_write_buf(client, TPM_STS, 1, &data);
+ msleep(TPM_I2C_BUS_DELAY);
+ }
+ return status;
+}
+
+/* write commandReady to TPM_STS register */
+static void i2c_nuvoton_ready(struct tpm_chip *chip)
+{
+ struct i2c_client *client = to_i2c_client(chip->dev);
+ s32 status;
+
+ /* this causes the current command to be aborted */
+ status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY);
+ if (status < 0)
+ dev_err(chip->dev,
+ "%s() fail to write TPM_STS.commandReady\n", __func__);
+}
+
+/* read burstCount field from TPM_STS register
+ * return -1 on fail to read */
+static int i2c_nuvoton_get_burstcount(struct i2c_client *client,
+ struct tpm_chip *chip)
+{
+ unsigned long stop = jiffies + chip->vendor.timeout_d;
+ s32 status;
+ int burst_count = -1;
+ u8 data;
+
+ /* wait for burstcount to be non-zero */
+ do {
+ /* in I2C burstCount is 1 byte */
+ status = i2c_nuvoton_read_buf(client, TPM_BURST_COUNT, 1,
+ &data);
+ if (status > 0 && data > 0) {
+ burst_count = min_t(u8, TPM_I2C_MAX_BUF_SIZE, data);
+ break;
+ }
+ msleep(TPM_I2C_BUS_DELAY);
+ } while (time_before(jiffies, stop));
+
+ return burst_count;
+}
+
+/*
+ * WPCT301/NPCT501 SINT# supports only dataAvail
+ * any call to this function which is not waiting for dataAvail will
+ * set queue to NULL to avoid waiting for interrupt
+ */
+static bool i2c_nuvoton_check_status(struct tpm_chip *chip, u8 mask, u8 value)
+{
+ u8 status = i2c_nuvoton_read_status(chip);
+ return (status != TPM_STS_ERR_VAL) && ((status & mask) == value);
+}
+
+static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value,
+ u32 timeout, wait_queue_head_t *queue)
+{
+ if (chip->vendor.irq && queue) {
+ s32 rc;
+ DEFINE_WAIT(wait);
+ struct priv_data *priv = chip->vendor.priv;
+ unsigned int cur_intrs = priv->intrs;
+
+ enable_irq(chip->vendor.irq);
+ rc = wait_event_interruptible_timeout(*queue,
+ cur_intrs != priv->intrs,
+ timeout);
+ if (rc > 0)
+ return 0;
+ /* At this point we know that the SINT pin is asserted, so we
+ * do not need to do i2c_nuvoton_check_status */
+ } else {
+ unsigned long ten_msec, stop;
+ bool status_valid;
+
+ /* check current status */
+ status_valid = i2c_nuvoton_check_status(chip, mask, value);
+ if (status_valid)
+ return 0;
+
+ /* use polling to wait for the event */
+ ten_msec = jiffies + msecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG);
+ stop = jiffies + timeout;
+ do {
+ if (time_before(jiffies, ten_msec))
+ msleep(TPM_I2C_RETRY_DELAY_SHORT);
+ else
+ msleep(TPM_I2C_RETRY_DELAY_LONG);
+ status_valid = i2c_nuvoton_check_status(chip, mask,
+ value);
+ if (status_valid)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+ dev_err(chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask,
+ value);
+ return -ETIMEDOUT;
+}
+
+/* wait for dataAvail field to be set in the TPM_STS register */
+static int i2c_nuvoton_wait_for_data_avail(struct tpm_chip *chip, u32 timeout,
+ wait_queue_head_t *queue)
+{
+ return i2c_nuvoton_wait_for_stat(chip,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ timeout, queue);
+}
+
+/* Read @count bytes into @buf from TPM_RD_FIFO register */
+static int i2c_nuvoton_recv_data(struct i2c_client *client,
+ struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ s32 rc;
+ int burst_count, bytes2read, size = 0;
+
+ while (size < count &&
+ i2c_nuvoton_wait_for_data_avail(chip,
+ chip->vendor.timeout_c,
+ &chip->vendor.read_queue) == 0) {
+ burst_count = i2c_nuvoton_get_burstcount(client, chip);
+ if (burst_count < 0) {
+ dev_err(chip->dev,
+ "%s() fail to read burstCount=%d\n", __func__,
+ burst_count);
+ return -EIO;
+ }
+ bytes2read = min_t(size_t, burst_count, count - size);
+ rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R,
+ bytes2read, &buf[size]);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "%s() fail on i2c_nuvoton_read_buf()=%d\n",
+ __func__, rc);
+ return -EIO;
+ }
+ dev_dbg(chip->dev, "%s(%d):", __func__, bytes2read);
+ size += bytes2read;
+ }
+
+ return size;
+}
+
+/* Read TPM command results */
+static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct device *dev = chip->dev;
+ struct i2c_client *client = to_i2c_client(dev);
+ s32 rc;
+ int expected, status, burst_count, retries, size = 0;
+
+ if (count < TPM_HEADER_SIZE) {
+ i2c_nuvoton_ready(chip); /* return to idle */
+ dev_err(dev, "%s() count < header size\n", __func__);
+ return -EIO;
+ }
+ for (retries = 0; retries < TPM_RETRY; retries++) {
+ if (retries > 0) {
+ /* if this is not the first trial, set responseRetry */
+ i2c_nuvoton_write_status(client,
+ TPM_STS_RESPONSE_RETRY);
+ }
+ /*
+ * read first available (> 10 bytes), including:
+ * tag, paramsize, and result
+ */
+ status = i2c_nuvoton_wait_for_data_avail(
+ chip, chip->vendor.timeout_c, &chip->vendor.read_queue);
+ if (status != 0) {
+ dev_err(dev, "%s() timeout on dataAvail\n", __func__);
+ size = -ETIMEDOUT;
+ continue;
+ }
+ burst_count = i2c_nuvoton_get_burstcount(client, chip);
+ if (burst_count < 0) {
+ dev_err(dev, "%s() fail to get burstCount\n", __func__);
+ size = -EIO;
+ continue;
+ }
+ size = i2c_nuvoton_recv_data(client, chip, buf,
+ burst_count);
+ if (size < TPM_HEADER_SIZE) {
+ dev_err(dev, "%s() fail to read header\n", __func__);
+ size = -EIO;
+ continue;
+ }
+ /*
+ * convert number of expected bytes field from big endian 32 bit
+ * to machine native
+ */
+ expected = be32_to_cpu(*(__be32 *) (buf + 2));
+ if (expected > count) {
+ dev_err(dev, "%s() expected > count\n", __func__);
+ size = -EIO;
+ continue;
+ }
+ rc = i2c_nuvoton_recv_data(client, chip, &buf[size],
+ expected - size);
+ size += rc;
+ if (rc < 0 || size < expected) {
+ dev_err(dev, "%s() fail to read remainder of result\n",
+ __func__);
+ size = -EIO;
+ continue;
+ }
+ if (i2c_nuvoton_wait_for_stat(
+ chip, TPM_STS_VALID | TPM_STS_DATA_AVAIL,
+ TPM_STS_VALID, chip->vendor.timeout_c,
+ NULL)) {
+ dev_err(dev, "%s() error left over data\n", __func__);
+ size = -ETIMEDOUT;
+ continue;
+ }
+ break;
+ }
+ i2c_nuvoton_ready(chip);
+ dev_dbg(chip->dev, "%s() -> %d\n", __func__, size);
+ return size;
+}
+
+/*
+ * Send TPM command.
+ *
+ * If interrupts are used (signaled by an irq set in the vendor structure)
+ * tpm.c can skip polling for the data to be available as the interrupt is
+ * waited for here
+ */
+static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct device *dev = chip->dev;
+ struct i2c_client *client = to_i2c_client(dev);
+ u32 ordinal;
+ size_t count = 0;
+ int burst_count, bytes2write, retries, rc = -EIO;
+
+ for (retries = 0; retries < TPM_RETRY; retries++) {
+ i2c_nuvoton_ready(chip);
+ if (i2c_nuvoton_wait_for_stat(chip, TPM_STS_COMMAND_READY,
+ TPM_STS_COMMAND_READY,
+ chip->vendor.timeout_b, NULL)) {
+ dev_err(dev, "%s() timeout on commandReady\n",
+ __func__);
+ rc = -EIO;
+ continue;
+ }
+ rc = 0;
+ while (count < len - 1) {
+ burst_count = i2c_nuvoton_get_burstcount(client,
+ chip);
+ if (burst_count < 0) {
+ dev_err(dev, "%s() fail get burstCount\n",
+ __func__);
+ rc = -EIO;
+ break;
+ }
+ bytes2write = min_t(size_t, burst_count,
+ len - 1 - count);
+ rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W,
+ bytes2write, &buf[count]);
+ if (rc < 0) {
+ dev_err(dev, "%s() fail i2cWriteBuf\n",
+ __func__);
+ break;
+ }
+ dev_dbg(dev, "%s(%d):", __func__, bytes2write);
+ count += bytes2write;
+ rc = i2c_nuvoton_wait_for_stat(chip,
+ TPM_STS_VALID |
+ TPM_STS_EXPECT,
+ TPM_STS_VALID |
+ TPM_STS_EXPECT,
+ chip->vendor.timeout_c,
+ NULL);
+ if (rc < 0) {
+ dev_err(dev, "%s() timeout on Expect\n",
+ __func__);
+ rc = -ETIMEDOUT;
+ break;
+ }
+ }
+ if (rc < 0)
+ continue;
+
+ /* write last byte */
+ rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W, 1,
+ &buf[count]);
+ if (rc < 0) {
+ dev_err(dev, "%s() fail to write last byte\n",
+ __func__);
+ rc = -EIO;
+ continue;
+ }
+ dev_dbg(dev, "%s(last): %02x", __func__, buf[count]);
+ rc = i2c_nuvoton_wait_for_stat(chip,
+ TPM_STS_VALID | TPM_STS_EXPECT,
+ TPM_STS_VALID,
+ chip->vendor.timeout_c, NULL);
+ if (rc) {
+ dev_err(dev, "%s() timeout on Expect to clear\n",
+ __func__);
+ rc = -ETIMEDOUT;
+ continue;
+ }
+ break;
+ }
+ if (rc < 0) {
+ /* retries == TPM_RETRY */
+ i2c_nuvoton_ready(chip);
+ return rc;
+ }
+ /* execute the TPM command */
+ rc = i2c_nuvoton_write_status(client, TPM_STS_GO);
+ if (rc < 0) {
+ dev_err(dev, "%s() fail to write Go\n", __func__);
+ i2c_nuvoton_ready(chip);
+ return rc;
+ }
+ ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
+ rc = i2c_nuvoton_wait_for_data_avail(chip,
+ tpm_calc_ordinal_duration(chip,
+ ordinal),
+ &chip->vendor.read_queue);
+ if (rc) {
+ dev_err(dev, "%s() timeout command duration\n", __func__);
+ i2c_nuvoton_ready(chip);
+ return rc;
+ }
+
+ dev_dbg(dev, "%s() -> %zd\n", __func__, len);
+ return len;
+}
+
+static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == TPM_STS_COMMAND_READY);
+}
+
+static const struct file_operations i2c_nuvoton_ops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = tpm_open,
+ .read = tpm_read,
+ .write = tpm_write,
+ .release = tpm_release,
+};
+
+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
+static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
+static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
+static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
+static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
+static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
+static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
+static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
+
+static struct attribute *i2c_nuvoton_attrs[] = {
+ &dev_attr_pubek.attr,
+ &dev_attr_pcrs.attr,
+ &dev_attr_enabled.attr,
+ &dev_attr_active.attr,
+ &dev_attr_owned.attr,
+ &dev_attr_temp_deactivated.attr,
+ &dev_attr_caps.attr,
+ &dev_attr_cancel.attr,
+ &dev_attr_durations.attr,
+ &dev_attr_timeouts.attr,
+ NULL,
+};
+
+static struct attribute_group i2c_nuvoton_attr_grp = {
+ .attrs = i2c_nuvoton_attrs
+};
+
+static const struct tpm_vendor_specific tpm_i2c = {
+ .status = i2c_nuvoton_read_status,
+ .recv = i2c_nuvoton_recv,
+ .send = i2c_nuvoton_send,
+ .cancel = i2c_nuvoton_ready,
+ .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_canceled = i2c_nuvoton_req_canceled,
+ .attr_group = &i2c_nuvoton_attr_grp,
+ .miscdev.fops = &i2c_nuvoton_ops,
+};
+
+/* The only purpose for the handler is to signal to any waiting threads that
+ * the interrupt is currently being asserted. The driver does not do any
+ * processing triggered by interrupts, and the chip provides no way to mask at
+ * the source (plus that would be slow over I2C). Run the IRQ as a one-shot,
+ * this means it cannot be shared. */
+static irqreturn_t i2c_nuvoton_int_handler(int dummy, void *dev_id)
+{
+ struct tpm_chip *chip = dev_id;
+ struct priv_data *priv = chip->vendor.priv;
+
+ priv->intrs++;
+ wake_up(&chip->vendor.read_queue);
+ disable_irq_nosync(chip->vendor.irq);
+ return IRQ_HANDLED;
+}
+
+static int get_vid(struct i2c_client *client, u32 *res)
+{
+ static const u8 vid_did_rid_value[] = { 0x50, 0x10, 0xfe };
+ u32 temp;
+ s32 rc;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+ rc = i2c_nuvoton_read_buf(client, TPM_VID_DID_RID, 4, (u8 *)&temp);
+ if (rc < 0)
+ return rc;
+
+ /* check WPCT301 values - ignore RID */
+ if (memcmp(&temp, vid_did_rid_value, sizeof(vid_did_rid_value))) {
+ /*
+ * f/w rev 2.81 has an issue where the VID_DID_RID is not
+ * reporting the right value. so give it another chance at
+ * offset 0x20 (FIFO_W).
+ */
+ rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_W, 4,
+ (u8 *) (&temp));
+ if (rc < 0)
+ return rc;
+
+ /* check WPCT301 values - ignore RID */
+ if (memcmp(&temp, vid_did_rid_value,
+ sizeof(vid_did_rid_value)))
+ return -ENODEV;
+ }
+
+ *res = temp;
+ return 0;
+}
+
+static int i2c_nuvoton_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc;
+ struct tpm_chip *chip;
+ struct device *dev = &client->dev;
+ u32 vid = 0;
+
+ rc = get_vid(client, &vid);
+ if (rc)
+ return rc;
+
+ dev_info(dev, "VID: %04X DID: %02X RID: %02X\n", (u16) vid,
+ (u8) (vid >> 16), (u8) (vid >> 24));
+
+ chip = tpm_register_hardware(dev, &tpm_i2c);
+ if (!chip) {
+ dev_err(dev, "%s() error in tpm_register_hardware\n", __func__);
+ return -ENODEV;
+ }
+
+ chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data),
+ GFP_KERNEL);
+ init_waitqueue_head(&chip->vendor.read_queue);
+ init_waitqueue_head(&chip->vendor.int_queue);
+
+ /* Default timeouts */
+ chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT);
+ chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+
+ /*
+ * I2C intfcaps (interrupt capabilitieis) in the chip are hard coded to:
+ * TPM_INTF_INT_LEVEL_LOW | TPM_INTF_DATA_AVAIL_INT
+ * The IRQ should be set in the i2c_board_info (which is done
+ * automatically in of_i2c_register_devices, for device tree users */
+ chip->vendor.irq = client->irq;
+
+ if (chip->vendor.irq) {
+ dev_dbg(dev, "%s() chip-vendor.irq\n", __func__);
+ rc = devm_request_irq(dev, chip->vendor.irq,
+ i2c_nuvoton_int_handler,
+ IRQF_TRIGGER_LOW,
+ chip->vendor.miscdev.name,
+ chip);
+ if (rc) {
+ dev_err(dev, "%s() Unable to request irq: %d for use\n",
+ __func__, chip->vendor.irq);
+ chip->vendor.irq = 0;
+ } else {
+ /* Clear any pending interrupt */
+ i2c_nuvoton_ready(chip);
+ /* - wait for TPM_STS==0xA0 (stsValid, commandReady) */
+ rc = i2c_nuvoton_wait_for_stat(chip,
+ TPM_STS_COMMAND_READY,
+ TPM_STS_COMMAND_READY,
+ chip->vendor.timeout_b,
+ NULL);
+ if (rc == 0) {
+ /*
+ * TIS is in ready state
+ * write dummy byte to enter reception state
+ * TPM_DATA_FIFO_W <- rc (0)
+ */
+ rc = i2c_nuvoton_write_buf(client,
+ TPM_DATA_FIFO_W,
+ 1, (u8 *) (&rc));
+ if (rc < 0)
+ goto out_err;
+ /* TPM_STS <- 0x40 (commandReady) */
+ i2c_nuvoton_ready(chip);
+ } else {
+ /*
+ * timeout_b reached - command was
+ * aborted. TIS should now be in idle state -
+ * only TPM_STS_VALID should be set
+ */
+ if (i2c_nuvoton_read_status(chip) !=
+ TPM_STS_VALID) {
+ rc = -EIO;
+ goto out_err;
+ }
+ }
+ }
+ }
+
+ if (tpm_get_timeouts(chip)) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ if (tpm_do_selftest(chip)) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ tpm_dev_vendor_release(chip);
+ tpm_remove_hardware(chip->dev);
+ return rc;
+}
+
+static int i2c_nuvoton_remove(struct i2c_client *client)
+{
+ struct device *dev = &(client->dev);
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ if (chip)
+ tpm_dev_vendor_release(chip);
+ tpm_remove_hardware(dev);
+ kfree(chip);
+ return 0;
+}
+
+
+static const struct i2c_device_id i2c_nuvoton_id[] = {
+ {I2C_DRIVER_NAME, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, i2c_nuvoton_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id i2c_nuvoton_of_match[] = {
+ {.compatible = "nuvoton,npct501"},
+ {.compatible = "winbond,wpct301"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_nuvoton_of_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(i2c_nuvoton_pm_ops, tpm_pm_suspend, tpm_pm_resume);
+
+static struct i2c_driver i2c_nuvoton_driver = {
+ .id_table = i2c_nuvoton_id,
+ .probe = i2c_nuvoton_probe,
+ .remove = i2c_nuvoton_remove,
+ .driver = {
+ .name = I2C_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &i2c_nuvoton_pm_ops,
+ .of_match_table = of_match_ptr(i2c_nuvoton_of_match),
+ },
+};
+
+module_i2c_driver(i2c_nuvoton_driver);
+
+MODULE_AUTHOR("Dan Morav (dan.morav@nuvoton.com)");
+MODULE_DESCRIPTION("Nuvoton TPM I2C Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
index 5bb8e2ddd3b3..a0d6ceb5d005 100644
--- a/drivers/char/tpm/tpm_i2c_stm_st33.c
+++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
@@ -584,7 +584,7 @@ static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
static struct attribute *stm_tpm_attrs[] = {
@@ -746,8 +746,6 @@ tpm_st33_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
tpm_get_timeouts(chip);
- i2c_set_clientdata(client, chip);
-
dev_info(chip->dev, "TPM I2C Initialized\n");
return 0;
_irq_set:
@@ -807,24 +805,18 @@ static int tpm_st33_i2c_remove(struct i2c_client *client)
#ifdef CONFIG_PM_SLEEP
/*
* tpm_st33_i2c_pm_suspend suspend the TPM device
- * Added: Work around when suspend and no tpm application is running, suspend
- * may fail because chip->data_buffer is not set (only set in tpm_open in Linux
- * TPM core)
* @param: client, the i2c_client drescription (TPM I2C description).
* @param: mesg, the power management message.
* @return: 0 in case of success.
*/
static int tpm_st33_i2c_pm_suspend(struct device *dev)
{
- struct tpm_chip *chip = dev_get_drvdata(dev);
struct st33zp24_platform_data *pin_infos = dev->platform_data;
int ret = 0;
if (power_mgt) {
gpio_set_value(pin_infos->io_lpcpd, 0);
} else {
- if (chip->data_buffer == NULL)
- chip->data_buffer = pin_infos->tpm_i2c_buffer[0];
ret = tpm_pm_suspend(dev);
}
return ret;
@@ -849,8 +841,6 @@ static int tpm_st33_i2c_pm_resume(struct device *dev)
TPM_STS_VALID) == TPM_STS_VALID,
chip->vendor.timeout_b);
} else {
- if (chip->data_buffer == NULL)
- chip->data_buffer = pin_infos->tpm_i2c_buffer[0];
ret = tpm_pm_resume(dev);
if (!ret)
tpm_do_selftest(chip);
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 56b07c35a13e..2783a42aa732 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -98,7 +98,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
if (count < len) {
dev_err(ibmvtpm->dev,
- "Invalid size in recv: count=%ld, crq_size=%d\n",
+ "Invalid size in recv: count=%zd, crq_size=%d\n",
count, len);
return -EIO;
}
@@ -136,7 +136,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
if (count > ibmvtpm->rtce_size) {
dev_err(ibmvtpm->dev,
- "Invalid size in send: count=%ld, rtce_size=%d\n",
+ "Invalid size in send: count=%zd, rtce_size=%d\n",
count, ibmvtpm->rtce_size);
return -EIO;
}
@@ -419,7 +419,7 @@ static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index 2168d15bc728..8e562dc65601 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -452,12 +452,8 @@ int tpm_add_ppi(struct kobject *parent)
{
return sysfs_create_group(parent, &ppi_attr_grp);
}
-EXPORT_SYMBOL_GPL(tpm_add_ppi);
void tpm_remove_ppi(struct kobject *parent)
{
sysfs_remove_group(parent, &ppi_attr_grp);
}
-EXPORT_SYMBOL_GPL(tpm_remove_ppi);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 5796d0157ce0..1b74459c0723 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -448,7 +448,7 @@ static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 94c280d36e8b..c8ff4df81779 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -351,8 +351,6 @@ static int tpmfront_probe(struct xenbus_device *dev,
tpm_get_timeouts(priv->chip);
- dev_set_drvdata(&dev->dev, priv->chip);
-
return rv;
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index b79cf3e1b793..feea87cc6b8f 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -577,7 +577,8 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
spin_lock(&portdev->c_ovq_lock);
if (virtqueue_add_outbuf(vq, sg, 1, &cpkt, GFP_ATOMIC) == 0) {
virtqueue_kick(vq);
- while (!virtqueue_get_buf(vq, &len))
+ while (!virtqueue_get_buf(vq, &len)
+ && !virtqueue_is_broken(vq))
cpu_relax();
}
spin_unlock(&portdev->c_ovq_lock);
@@ -650,7 +651,8 @@ static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
* we need to kmalloc a GFP_ATOMIC buffer each time the
* console driver writes something out.
*/
- while (!virtqueue_get_buf(out_vq, &len))
+ while (!virtqueue_get_buf(out_vq, &len)
+ && !virtqueue_is_broken(out_vq))
cpu_relax();
done:
spin_unlock_irqrestore(&port->outvq_lock, flags);
@@ -1837,12 +1839,8 @@ static void config_intr(struct virtio_device *vdev)
struct port *port;
u16 rows, cols;
- vdev->config->get(vdev,
- offsetof(struct virtio_console_config, cols),
- &cols, sizeof(u16));
- vdev->config->get(vdev,
- offsetof(struct virtio_console_config, rows),
- &rows, sizeof(u16));
+ virtio_cread(vdev, struct virtio_console_config, cols, &cols);
+ virtio_cread(vdev, struct virtio_console_config, rows, &rows);
port = find_port_by_id(portdev, 0);
set_console_size(port, rows, cols);
@@ -2014,10 +2012,9 @@ static int virtcons_probe(struct virtio_device *vdev)
/* Don't test MULTIPORT at all if we're rproc: not a valid feature! */
if (!is_rproc_serial(vdev) &&
- virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
- offsetof(struct virtio_console_config,
- max_nr_ports),
- &portdev->config.max_nr_ports) == 0) {
+ virtio_cread_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
+ struct virtio_console_config, max_nr_ports,
+ &portdev->config.max_nr_ports) == 0) {
multiport = true;
}
@@ -2142,7 +2139,7 @@ static struct virtio_device_id rproc_serial_id_table[] = {
static unsigned int rproc_serial_features[] = {
};
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int virtcons_freeze(struct virtio_device *vdev)
{
struct ports_device *portdev;
@@ -2220,7 +2217,7 @@ static struct virtio_driver virtio_console = {
.probe = virtcons_probe,
.remove = virtcons_remove,
.config_changed = config_intr,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.freeze = virtcons_freeze,
.restore = virtcons_restore,
#endif
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 7b111062ccba..d25da26ad113 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
obj-$(CONFIG_ARCH_ZYNQ) += zynq/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_PLAT_SAMSUNG) += samsung/
+obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += shmobile/
obj-$(CONFIG_X86) += x86/
diff --git a/drivers/clk/clk-bcm2835.c b/drivers/clk/clk-bcm2835.c
index 5fb4ff53d088..6b950ca8b711 100644
--- a/drivers/clk/clk-bcm2835.c
+++ b/drivers/clk/clk-bcm2835.c
@@ -20,14 +20,8 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/bcm2835.h>
-#include <linux/clk-provider.h>
#include <linux/of.h>
-static const struct of_device_id clk_match[] __initconst = {
- { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
- { }
-};
-
/*
* These are fixed clocks. They're probably not all root clocks and it may
* be possible to turn them on and off but until this is mapped out better
@@ -63,6 +57,4 @@ void __init bcm2835_init_clocks(void)
ret = clk_register_clkdev(clk, NULL, "20215000.uart");
if (ret)
pr_err("uart1_pclk alias not registered\n");
-
- of_clk_init(clk_match);
}
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 0e1d89b4321b..d9e3f671c2ea 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -117,7 +117,7 @@ void __init of_fixed_factor_clk_setup(struct device_node *node)
}
if (of_property_read_u32(node, "clock-mult", &mult)) {
- pr_err("%s Fixed factor clock <%s> must have a clokc-mult property\n",
+ pr_err("%s Fixed factor clock <%s> must have a clock-mult property\n",
__func__, node->name);
return;
}
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
index 2e08cb001936..2e7e9d9798cb 100644
--- a/drivers/clk/clk-highbank.c
+++ b/drivers/clk/clk-highbank.c
@@ -20,8 +20,7 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/of.h>
-
-extern void __iomem *sregs_base;
+#include <linux/of_address.h>
#define HB_PLL_LOCK_500 0x20000000
#define HB_PLL_LOCK 0x10000000
@@ -280,6 +279,7 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk
const char *clk_name = node->name;
const char *parent_name;
struct clk_init_data init;
+ struct device_node *srnp;
int rc;
rc = of_property_read_u32(node, "reg", &reg);
@@ -290,7 +290,11 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk
if (WARN_ON(!hb_clk))
return NULL;
- hb_clk->reg = sregs_base + reg;
+ /* Map system registers */
+ srnp = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs");
+ hb_clk->reg = of_iomap(srnp, 0);
+ BUG_ON(!hb_clk->reg);
+ hb_clk->reg += reg;
of_property_read_string(node, "clock-output-names", &clk_name);
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 51410c2ac2cb..6a934a5296bd 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -27,6 +27,14 @@
*/
#define SRC_CR 0x00U
+#define SRC_CR_T0_ENSEL BIT(15)
+#define SRC_CR_T1_ENSEL BIT(17)
+#define SRC_CR_T2_ENSEL BIT(19)
+#define SRC_CR_T3_ENSEL BIT(21)
+#define SRC_CR_T4_ENSEL BIT(23)
+#define SRC_CR_T5_ENSEL BIT(25)
+#define SRC_CR_T6_ENSEL BIT(27)
+#define SRC_CR_T7_ENSEL BIT(29)
#define SRC_XTALCR 0x0CU
#define SRC_XTALCR_XTALTIMEN BIT(20)
#define SRC_XTALCR_SXTALDIS BIT(19)
@@ -54,6 +62,79 @@ static DEFINE_SPINLOCK(src_lock);
/* Base address of the SRC */
static void __iomem *src_base;
+static int nomadik_clk_reboot_handler(struct notifier_block *this,
+ unsigned long code,
+ void *unused)
+{
+ u32 val;
+
+ /* The main chrystal need to be enabled for reboot to work */
+ val = readl(src_base + SRC_XTALCR);
+ val &= ~SRC_XTALCR_MXTALOVER;
+ val |= SRC_XTALCR_MXTALEN;
+ pr_crit("force-enabling MXTALO\n");
+ writel(val, src_base + SRC_XTALCR);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block nomadik_clk_reboot_notifier = {
+ .notifier_call = nomadik_clk_reboot_handler,
+};
+
+static const struct of_device_id nomadik_src_match[] __initconst = {
+ { .compatible = "stericsson,nomadik-src" },
+ { /* sentinel */ }
+};
+
+static void __init nomadik_src_init(void)
+{
+ struct device_node *np;
+ u32 val;
+
+ np = of_find_matching_node(NULL, nomadik_src_match);
+ if (!np) {
+ pr_crit("no matching node for SRC, aborting clock init\n");
+ return;
+ }
+ src_base = of_iomap(np, 0);
+ if (!src_base) {
+ pr_err("%s: must have src parent node with REGS (%s)\n",
+ __func__, np->name);
+ return;
+ }
+
+ /* Set all timers to use the 2.4 MHz TIMCLK */
+ val = readl(src_base + SRC_CR);
+ val |= SRC_CR_T0_ENSEL;
+ val |= SRC_CR_T1_ENSEL;
+ val |= SRC_CR_T2_ENSEL;
+ val |= SRC_CR_T3_ENSEL;
+ val |= SRC_CR_T4_ENSEL;
+ val |= SRC_CR_T5_ENSEL;
+ val |= SRC_CR_T6_ENSEL;
+ val |= SRC_CR_T7_ENSEL;
+ writel(val, src_base + SRC_CR);
+
+ val = readl(src_base + SRC_XTALCR);
+ pr_info("SXTALO is %s\n",
+ (val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled");
+ pr_info("MXTAL is %s\n",
+ (val & SRC_XTALCR_MXTALSTAT) ? "enabled" : "disabled");
+ if (of_property_read_bool(np, "disable-sxtalo")) {
+ /* The machine uses an external oscillator circuit */
+ val |= SRC_XTALCR_SXTALDIS;
+ pr_info("disabling SXTALO\n");
+ }
+ if (of_property_read_bool(np, "disable-mxtalo")) {
+ /* Disable this too: also run by external oscillator */
+ val |= SRC_XTALCR_MXTALOVER;
+ val &= ~SRC_XTALCR_MXTALEN;
+ pr_info("disabling MXTALO\n");
+ }
+ writel(val, src_base + SRC_XTALCR);
+ register_reboot_notifier(&nomadik_clk_reboot_notifier);
+}
+
/**
* struct clk_pll1 - Nomadik PLL1 clock
* @hw: corresponding clock hardware entry
@@ -431,6 +512,9 @@ static void __init of_nomadik_pll_setup(struct device_node *np)
const char *parent_name;
u32 pll_id;
+ if (!src_base)
+ nomadik_src_init();
+
if (of_property_read_u32(np, "pll-id", &pll_id)) {
pr_err("%s: PLL \"%s\" missing pll-id property\n",
__func__, clk_name);
@@ -441,6 +525,8 @@ static void __init of_nomadik_pll_setup(struct device_node *np)
if (!IS_ERR(clk))
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(nomadik_pll_clk,
+ "st,nomadik-pll-clock", of_nomadik_pll_setup);
static void __init of_nomadik_hclk_setup(struct device_node *np)
{
@@ -448,6 +534,9 @@ static void __init of_nomadik_hclk_setup(struct device_node *np)
const char *clk_name = np->name;
const char *parent_name;
+ if (!src_base)
+ nomadik_src_init();
+
parent_name = of_clk_get_parent_name(np, 0);
/*
* The HCLK divides PLL1 with 1 (passthru), 2, 3 or 4.
@@ -460,6 +549,8 @@ static void __init of_nomadik_hclk_setup(struct device_node *np)
if (!IS_ERR(clk))
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(nomadik_hclk_clk,
+ "st,nomadik-hclk-clock", of_nomadik_hclk_setup);
static void __init of_nomadik_src_clk_setup(struct device_node *np)
{
@@ -468,6 +559,9 @@ static void __init of_nomadik_src_clk_setup(struct device_node *np)
const char *parent_name;
u32 clk_id;
+ if (!src_base)
+ nomadik_src_init();
+
if (of_property_read_u32(np, "clock-id", &clk_id)) {
pr_err("%s: SRC clock \"%s\" missing clock-id property\n",
__func__, clk_name);
@@ -478,89 +572,5 @@ static void __init of_nomadik_src_clk_setup(struct device_node *np)
if (!IS_ERR(clk))
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
-
-static const struct of_device_id nomadik_src_match[] __initconst = {
- { .compatible = "stericsson,nomadik-src" },
- { /* sentinel */ }
-};
-
-static const struct of_device_id nomadik_src_clk_match[] __initconst = {
- {
- .compatible = "fixed-clock",
- .data = of_fixed_clk_setup,
- },
- {
- .compatible = "fixed-factor-clock",
- .data = of_fixed_factor_clk_setup,
- },
- {
- .compatible = "st,nomadik-pll-clock",
- .data = of_nomadik_pll_setup,
- },
- {
- .compatible = "st,nomadik-hclk-clock",
- .data = of_nomadik_hclk_setup,
- },
- {
- .compatible = "st,nomadik-src-clock",
- .data = of_nomadik_src_clk_setup,
- },
- { /* sentinel */ }
-};
-
-static int nomadik_clk_reboot_handler(struct notifier_block *this,
- unsigned long code,
- void *unused)
-{
- u32 val;
-
- /* The main chrystal need to be enabled for reboot to work */
- val = readl(src_base + SRC_XTALCR);
- val &= ~SRC_XTALCR_MXTALOVER;
- val |= SRC_XTALCR_MXTALEN;
- pr_crit("force-enabling MXTALO\n");
- writel(val, src_base + SRC_XTALCR);
- return NOTIFY_OK;
-}
-
-static struct notifier_block nomadik_clk_reboot_notifier = {
- .notifier_call = nomadik_clk_reboot_handler,
-};
-
-void __init nomadik_clk_init(void)
-{
- struct device_node *np;
- u32 val;
-
- np = of_find_matching_node(NULL, nomadik_src_match);
- if (!np) {
- pr_crit("no matching node for SRC, aborting clock init\n");
- return;
- }
- src_base = of_iomap(np, 0);
- if (!src_base) {
- pr_err("%s: must have src parent node with REGS (%s)\n",
- __func__, np->name);
- return;
- }
- val = readl(src_base + SRC_XTALCR);
- pr_info("SXTALO is %s\n",
- (val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled");
- pr_info("MXTAL is %s\n",
- (val & SRC_XTALCR_MXTALSTAT) ? "enabled" : "disabled");
- if (of_property_read_bool(np, "disable-sxtalo")) {
- /* The machine uses an external oscillator circuit */
- val |= SRC_XTALCR_SXTALDIS;
- pr_info("disabling SXTALO\n");
- }
- if (of_property_read_bool(np, "disable-mxtalo")) {
- /* Disable this too: also run by external oscillator */
- val |= SRC_XTALCR_MXTALOVER;
- val &= ~SRC_XTALCR_MXTALEN;
- pr_info("disabling MXTALO\n");
- }
- writel(val, src_base + SRC_XTALCR);
- register_reboot_notifier(&nomadik_clk_reboot_notifier);
-
- of_clk_init(nomadik_src_clk_match);
-}
+CLK_OF_DECLARE(nomadik_src_clk,
+ "st,nomadik-src-clock", of_nomadik_src_clk_setup);
diff --git a/drivers/clk/clk-prima2.c b/drivers/clk/clk-prima2.c
index 5ab95f1ad579..6c15e3316137 100644
--- a/drivers/clk/clk-prima2.c
+++ b/drivers/clk/clk-prima2.c
@@ -1015,16 +1015,6 @@ static struct clk_std clk_usb1 = {
},
};
-static struct of_device_id clkc_ids[] = {
- { .compatible = "sirf,prima2-clkc" },
- {},
-};
-
-static struct of_device_id rsc_ids[] = {
- { .compatible = "sirf,prima2-rsc" },
- {},
-};
-
enum prima2_clk_index {
/* 0 1 2 3 4 5 6 7 8 9 */
rtc, osc, pll1, pll2, pll3, mem, sys, security, dsp, gps,
@@ -1082,24 +1072,16 @@ static struct clk_hw *prima2_clk_hw_array[maxclk] __initdata = {
static struct clk *prima2_clks[maxclk];
static struct clk_onecell_data clk_data;
-void __init sirfsoc_of_clk_init(void)
+static void __init sirfsoc_clk_init(struct device_node *np)
{
- struct device_node *np;
+ struct device_node *rscnp;
int i;
- np = of_find_matching_node(NULL, rsc_ids);
- if (!np)
- panic("unable to find compatible rsc node in dtb\n");
-
- sirfsoc_rsc_vbase = of_iomap(np, 0);
+ rscnp = of_find_compatible_node(NULL, NULL, "sirf,prima2-rsc");
+ sirfsoc_rsc_vbase = of_iomap(rscnp, 0);
if (!sirfsoc_rsc_vbase)
panic("unable to map rsc registers\n");
-
- of_node_put(np);
-
- np = of_find_matching_node(NULL, clkc_ids);
- if (!np)
- return;
+ of_node_put(rscnp);
sirfsoc_clk_vbase = of_iomap(np, 0);
if (!sirfsoc_clk_vbase)
@@ -1124,3 +1106,4 @@ void __init sirfsoc_of_clk_init(void)
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
}
+CLK_OF_DECLARE(sirfsoc_clk, "sirf,prima2-clkc", sirfsoc_clk_init);
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c
index 82306f5fb9c2..7fd5c5e9e25d 100644
--- a/drivers/clk/clk-vt8500.c
+++ b/drivers/clk/clk-vt8500.c
@@ -15,11 +15,14 @@
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
+#define LEGACY_PMC_BASE 0xD8130000
+
/* All clocks share the same lock as none can be changed concurrently */
static DEFINE_SPINLOCK(_lock);
@@ -53,6 +56,21 @@ struct clk_pll {
static void __iomem *pmc_base;
+static __init void vtwm_set_pmc_base(void)
+{
+ struct device_node *np =
+ of_find_compatible_node(NULL, NULL, "via,vt8500-pmc");
+
+ if (np)
+ pmc_base = of_iomap(np, 0);
+ else
+ pmc_base = ioremap(LEGACY_PMC_BASE, 0x1000);
+ of_node_put(np);
+
+ if (!pmc_base)
+ pr_err("%s:of_iomap(pmc) failed\n", __func__);
+}
+
#define to_clk_device(_hw) container_of(_hw, struct clk_device, hw)
#define VT8500_PMC_BUSY_MASK 0x18
@@ -222,6 +240,9 @@ static __init void vtwm_device_clk_init(struct device_node *node)
int rc;
int clk_init_flags = 0;
+ if (!pmc_base)
+ vtwm_set_pmc_base();
+
dev_clk = kzalloc(sizeof(*dev_clk), GFP_KERNEL);
if (WARN_ON(!dev_clk))
return;
@@ -636,6 +657,9 @@ static __init void vtwm_pll_clk_init(struct device_node *node, int pll_type)
struct clk_init_data init;
int rc;
+ if (!pmc_base)
+ vtwm_set_pmc_base();
+
rc = of_property_read_u32(node, "reg", &reg);
if (WARN_ON(rc))
return;
@@ -694,13 +718,3 @@ static void __init wm8850_pll_init(struct device_node *node)
vtwm_pll_clk_init(node, PLL_TYPE_WM8850);
}
CLK_OF_DECLARE(wm8850_pll, "wm,wm8850-pll-clock", wm8850_pll_init);
-
-void __init vtwm_clk_init(void __iomem *base)
-{
- if (!base)
- return;
-
- pmc_base = base;
-
- of_clk_init(NULL);
-}
diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c
index fc777bdc1886..81a202d12a7a 100644
--- a/drivers/clk/mvebu/armada-370.c
+++ b/drivers/clk/mvebu/armada-370.c
@@ -39,8 +39,8 @@ static const struct coreclk_ratio a370_coreclk_ratios[] __initconst = {
};
static const u32 a370_tclk_freqs[] __initconst = {
- 16600000,
- 20000000,
+ 166000000,
+ 200000000,
};
static u32 __init a370_get_tclk_freq(void __iomem *sar)
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
index c396fe361589..9fc9359f5133 100644
--- a/drivers/clk/mxs/clk-imx23.c
+++ b/drivers/clk/mxs/clk-imx23.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/clk/mxs.h>
#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -100,16 +101,16 @@ static enum imx23_clk clks_init_on[] __initdata = {
cpu, hbus, xbus, emi, uart,
};
-int __init mx23_clocks_init(void)
+static void __init mx23_clocks_init(struct device_node *np)
{
- struct device_node *np;
+ struct device_node *dcnp;
u32 i;
- np = of_find_compatible_node(NULL, NULL, "fsl,imx23-digctl");
- digctrl = of_iomap(np, 0);
+ dcnp = of_find_compatible_node(NULL, NULL, "fsl,imx23-digctl");
+ digctrl = of_iomap(dcnp, 0);
WARN_ON(!digctrl);
+ of_node_put(dcnp);
- np = of_find_compatible_node(NULL, NULL, "fsl,imx23-clkctrl");
clkctrl = of_iomap(np, 0);
WARN_ON(!clkctrl);
@@ -162,7 +163,7 @@ int __init mx23_clocks_init(void)
if (IS_ERR(clks[i])) {
pr_err("i.MX23 clk %d: register failed with %ld\n",
i, PTR_ERR(clks[i]));
- return PTR_ERR(clks[i]);
+ return;
}
clk_data.clks = clks;
@@ -172,5 +173,5 @@ int __init mx23_clocks_init(void)
for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
clk_prepare_enable(clks[clks_init_on[i]]);
- return 0;
}
+CLK_OF_DECLARE(imx23_clkctrl, "fsl,imx23-clkctrl", mx23_clocks_init);
diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c
index 4faf0afc44cd..a6c35010e4e5 100644
--- a/drivers/clk/mxs/clk-imx28.c
+++ b/drivers/clk/mxs/clk-imx28.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/clk/mxs.h>
#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -154,16 +155,16 @@ static enum imx28_clk clks_init_on[] __initdata = {
cpu, hbus, xbus, emi, uart,
};
-int __init mx28_clocks_init(void)
+static void __init mx28_clocks_init(struct device_node *np)
{
- struct device_node *np;
+ struct device_node *dcnp;
u32 i;
- np = of_find_compatible_node(NULL, NULL, "fsl,imx28-digctl");
- digctrl = of_iomap(np, 0);
+ dcnp = of_find_compatible_node(NULL, NULL, "fsl,imx28-digctl");
+ digctrl = of_iomap(dcnp, 0);
WARN_ON(!digctrl);
+ of_node_put(dcnp);
- np = of_find_compatible_node(NULL, NULL, "fsl,imx28-clkctrl");
clkctrl = of_iomap(np, 0);
WARN_ON(!clkctrl);
@@ -239,7 +240,7 @@ int __init mx28_clocks_init(void)
if (IS_ERR(clks[i])) {
pr_err("i.MX28 clk %d: register failed with %ld\n",
i, PTR_ERR(clks[i]));
- return PTR_ERR(clks[i]);
+ return;
}
clk_data.clks = clks;
@@ -250,6 +251,5 @@ int __init mx28_clocks_init(void)
for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
clk_prepare_enable(clks[clks_init_on[i]]);
-
- return 0;
}
+CLK_OF_DECLARE(imx28_clkctrl, "fsl,imx28-clkctrl", mx28_clocks_init);
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 3413380086d5..8eb4799237f0 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -8,6 +8,4 @@ obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o
obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o
obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o
-ifdef CONFIG_COMMON_CLK
obj-$(CONFIG_ARCH_S3C64XX) += clk-s3c64xx.o
-endif
diff --git a/drivers/clk/shmobile/Makefile b/drivers/clk/shmobile/Makefile
new file mode 100644
index 000000000000..2240730f7320
--- /dev/null
+++ b/drivers/clk/shmobile/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_ARCH_EMEV2) += clk-emev2.o
+# for emply built-in.o
+obj-n := dummy
diff --git a/drivers/clk/shmobile/clk-emev2.c b/drivers/clk/shmobile/clk-emev2.c
new file mode 100644
index 000000000000..6c7c929c7765
--- /dev/null
+++ b/drivers/clk/shmobile/clk-emev2.c
@@ -0,0 +1,104 @@
+/*
+ * EMMA Mobile EV2 common clock framework support
+ *
+ * Copyright (C) 2013 Takashi Yoshii <takashi.yoshii.ze@renesas.com>
+ * Copyright (C) 2012 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+/* EMEV2 SMU registers */
+#define USIAU0_RSTCTRL 0x094
+#define USIBU1_RSTCTRL 0x0ac
+#define USIBU2_RSTCTRL 0x0b0
+#define USIBU3_RSTCTRL 0x0b4
+#define STI_RSTCTRL 0x124
+#define STI_CLKSEL 0x688
+
+static DEFINE_SPINLOCK(lock);
+
+/* not pretty, but hey */
+void __iomem *smu_base;
+
+static void __init emev2_smu_write(unsigned long value, int offs)
+{
+ BUG_ON(!smu_base || (offs >= PAGE_SIZE));
+ writel_relaxed(value, smu_base + offs);
+}
+
+static const struct of_device_id smu_id[] __initconst = {
+ { .compatible = "renesas,emev2-smu", },
+ {},
+};
+
+static void __init emev2_smu_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, smu_id);
+ BUG_ON(!np);
+ smu_base = of_iomap(np, 0);
+ BUG_ON(!smu_base);
+ of_node_put(np);
+
+ /* setup STI timer to run on 32.768 kHz and deassert reset */
+ emev2_smu_write(0, STI_CLKSEL);
+ emev2_smu_write(1, STI_RSTCTRL);
+
+ /* deassert reset for UART0->UART3 */
+ emev2_smu_write(2, USIAU0_RSTCTRL);
+ emev2_smu_write(2, USIBU1_RSTCTRL);
+ emev2_smu_write(2, USIBU2_RSTCTRL);
+ emev2_smu_write(2, USIBU3_RSTCTRL);
+}
+
+static void __init emev2_smu_clkdiv_init(struct device_node *np)
+{
+ u32 reg[2];
+ struct clk *clk;
+ const char *parent_name = of_clk_get_parent_name(np, 0);
+ if (WARN_ON(of_property_read_u32_array(np, "reg", reg, 2)))
+ return;
+ if (!smu_base)
+ emev2_smu_init();
+ clk = clk_register_divider(NULL, np->name, parent_name, 0,
+ smu_base + reg[0], reg[1], 8, 0, &lock);
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ clk_register_clkdev(clk, np->name, NULL);
+ pr_debug("## %s %s %p\n", __func__, np->name, clk);
+}
+CLK_OF_DECLARE(emev2_smu_clkdiv, "renesas,emev2-smu-clkdiv",
+ emev2_smu_clkdiv_init);
+
+static void __init emev2_smu_gclk_init(struct device_node *np)
+{
+ u32 reg[2];
+ struct clk *clk;
+ const char *parent_name = of_clk_get_parent_name(np, 0);
+ if (WARN_ON(of_property_read_u32_array(np, "reg", reg, 2)))
+ return;
+ if (!smu_base)
+ emev2_smu_init();
+ clk = clk_register_gate(NULL, np->name, parent_name, 0,
+ smu_base + reg[0], reg[1], 0, &lock);
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ clk_register_clkdev(clk, np->name, NULL);
+ pr_debug("## %s %s %p\n", __func__, np->name, clk);
+}
+CLK_OF_DECLARE(emev2_smu_gclk, "renesas,emev2-smu-gclk", emev2_smu_gclk_init);
diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
index 5bb848cac6ec..81dd31a686df 100644
--- a/drivers/clk/socfpga/clk.c
+++ b/drivers/clk/socfpga/clk.c
@@ -49,7 +49,7 @@
#define SOCFPGA_L4_SP_CLK "l4_sp_clk"
#define SOCFPGA_NAND_CLK "nand_clk"
#define SOCFPGA_NAND_X_CLK "nand_x_clk"
-#define SOCFPGA_MMC_CLK "mmc_clk"
+#define SOCFPGA_MMC_CLK "sdmmc_clk"
#define SOCFPGA_DB_CLK "gpio_db_clk"
#define div_mask(width) ((1 << (width)) - 1)
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 34ee69f4d50c..9bbd03514540 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -16,7 +16,6 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
-#include <linux/clk/sunxi.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -617,11 +616,8 @@ static void __init of_sunxi_table_clock_setup(const struct of_device_id *clk_mat
}
}
-void __init sunxi_init_clocks(void)
+static void __init sunxi_init_clocks(struct device_node *np)
{
- /* Register all the simple and basic clocks on DT */
- of_clk_init(NULL);
-
/* Register factor clocks */
of_sunxi_table_clock_setup(clk_factors_match, sunxi_factors_clk_setup);
@@ -634,3 +630,8 @@ void __init sunxi_init_clocks(void)
/* Register gate clocks */
of_sunxi_table_clock_setup(clk_gates_match, sunxi_gates_clk_setup);
}
+CLK_OF_DECLARE(sun4i_a10_clk_init, "allwinner,sun4i-a10", sunxi_init_clocks);
+CLK_OF_DECLARE(sun5i_a10s_clk_init, "allwinner,sun5i-a10s", sunxi_init_clocks);
+CLK_OF_DECLARE(sun5i_a13_clk_init, "allwinner,sun5i-a13", sunxi_init_clocks);
+CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sunxi_init_clocks);
+CLK_OF_DECLARE(sun7i_a20_clk_init, "allwinner,sun7i-a20", sunxi_init_clocks);
diff --git a/drivers/clk/ux500/Makefile b/drivers/clk/ux500/Makefile
index c6a806ed0e8c..521483f0ba33 100644
--- a/drivers/clk/ux500/Makefile
+++ b/drivers/clk/ux500/Makefile
@@ -8,6 +8,7 @@ obj-y += clk-prcmu.o
obj-y += clk-sysctrl.o
# Clock definitions
+obj-y += u8500_of_clk.o
obj-y += u8500_clk.o
obj-y += u9540_clk.o
obj-y += u8540_clk.o
diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c
new file mode 100644
index 000000000000..cdeff299de26
--- /dev/null
+++ b/drivers/clk/ux500/u8500_of_clk.c
@@ -0,0 +1,559 @@
+/*
+ * Clock definitions for u8500 platform.
+ *
+ * Copyright (C) 2012 ST-Ericsson SA
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/platform_data/clk-ux500.h>
+#include "clk.h"
+
+#define PRCC_NUM_PERIPH_CLUSTERS 6
+#define PRCC_PERIPHS_PER_CLUSTER 32
+
+static struct clk *prcmu_clk[PRCMU_NUM_CLKS];
+static struct clk *prcc_pclk[(PRCC_NUM_PERIPH_CLUSTERS + 1) * PRCC_PERIPHS_PER_CLUSTER];
+static struct clk *prcc_kclk[(PRCC_NUM_PERIPH_CLUSTERS + 1) * PRCC_PERIPHS_PER_CLUSTER];
+
+#define PRCC_SHOW(clk, base, bit) \
+ clk[(base * PRCC_PERIPHS_PER_CLUSTER) + bit]
+#define PRCC_PCLK_STORE(clk, base, bit) \
+ prcc_pclk[(base * PRCC_PERIPHS_PER_CLUSTER) + bit] = clk
+#define PRCC_KCLK_STORE(clk, base, bit) \
+ prcc_kclk[(base * PRCC_PERIPHS_PER_CLUSTER) + bit] = clk
+
+struct clk *ux500_twocell_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct clk **clk_data = data;
+ unsigned int base, bit;
+
+ if (clkspec->args_count != 2)
+ return ERR_PTR(-EINVAL);
+
+ base = clkspec->args[0];
+ bit = clkspec->args[1];
+
+ if (base != 1 && base != 2 && base != 3 && base != 5 && base != 6) {
+ pr_err("%s: invalid PRCC base %d\n", __func__, base);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return PRCC_SHOW(clk_data, base, bit);
+}
+
+static const struct of_device_id u8500_clk_of_match[] = {
+ { .compatible = "stericsson,u8500-clks", },
+ { },
+};
+
+void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
+ u32 clkrst5_base, u32 clkrst6_base)
+{
+ struct prcmu_fw_version *fw_version;
+ struct device_node *np = NULL;
+ struct device_node *child = NULL;
+ const char *sgaclk_parent = NULL;
+ struct clk *clk, *rtc_clk, *twd_clk;
+
+ if (of_have_populated_dt())
+ np = of_find_matching_node(NULL, u8500_clk_of_match);
+ if (!np) {
+ pr_err("Either DT or U8500 Clock node not found\n");
+ return;
+ }
+
+ /* Clock sources */
+ clk = clk_reg_prcmu_gate("soc0_pll", NULL, PRCMU_PLLSOC0,
+ CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ prcmu_clk[PRCMU_PLLSOC0] = clk;
+
+ clk = clk_reg_prcmu_gate("soc1_pll", NULL, PRCMU_PLLSOC1,
+ CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ prcmu_clk[PRCMU_PLLSOC1] = clk;
+
+ clk = clk_reg_prcmu_gate("ddr_pll", NULL, PRCMU_PLLDDR,
+ CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ prcmu_clk[PRCMU_PLLDDR] = clk;
+
+ /* FIXME: Add sys, ulp and int clocks here. */
+
+ rtc_clk = clk_register_fixed_rate(NULL, "rtc32k", "NULL",
+ CLK_IS_ROOT|CLK_IGNORE_UNUSED,
+ 32768);
+
+ /* PRCMU clocks */
+ fw_version = prcmu_get_fw_version();
+ if (fw_version != NULL) {
+ switch (fw_version->project) {
+ case PRCMU_FW_PROJECT_U8500_C2:
+ case PRCMU_FW_PROJECT_U8520:
+ case PRCMU_FW_PROJECT_U8420:
+ sgaclk_parent = "soc0_pll";
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (sgaclk_parent)
+ clk = clk_reg_prcmu_gate("sgclk", sgaclk_parent,
+ PRCMU_SGACLK, 0);
+ else
+ clk = clk_reg_prcmu_gate("sgclk", NULL,
+ PRCMU_SGACLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_SGACLK] = clk;
+
+ clk = clk_reg_prcmu_gate("uartclk", NULL, PRCMU_UARTCLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_UARTCLK] = clk;
+
+ clk = clk_reg_prcmu_gate("msp02clk", NULL, PRCMU_MSP02CLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_MSP02CLK] = clk;
+
+ clk = clk_reg_prcmu_gate("msp1clk", NULL, PRCMU_MSP1CLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_MSP1CLK] = clk;
+
+ clk = clk_reg_prcmu_gate("i2cclk", NULL, PRCMU_I2CCLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_I2CCLK] = clk;
+
+ clk = clk_reg_prcmu_gate("slimclk", NULL, PRCMU_SLIMCLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_SLIMCLK] = clk;
+
+ clk = clk_reg_prcmu_gate("per1clk", NULL, PRCMU_PER1CLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_PER1CLK] = clk;
+
+ clk = clk_reg_prcmu_gate("per2clk", NULL, PRCMU_PER2CLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_PER2CLK] = clk;
+
+ clk = clk_reg_prcmu_gate("per3clk", NULL, PRCMU_PER3CLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_PER3CLK] = clk;
+
+ clk = clk_reg_prcmu_gate("per5clk", NULL, PRCMU_PER5CLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_PER5CLK] = clk;
+
+ clk = clk_reg_prcmu_gate("per6clk", NULL, PRCMU_PER6CLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_PER6CLK] = clk;
+
+ clk = clk_reg_prcmu_gate("per7clk", NULL, PRCMU_PER7CLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_PER7CLK] = clk;
+
+ clk = clk_reg_prcmu_scalable("lcdclk", NULL, PRCMU_LCDCLK, 0,
+ CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_LCDCLK] = clk;
+
+ clk = clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BMLCLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_BMLCLK] = clk;
+
+ clk = clk_reg_prcmu_scalable("hsitxclk", NULL, PRCMU_HSITXCLK, 0,
+ CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_HSITXCLK] = clk;
+
+ clk = clk_reg_prcmu_scalable("hsirxclk", NULL, PRCMU_HSIRXCLK, 0,
+ CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_HSIRXCLK] = clk;
+
+ clk = clk_reg_prcmu_scalable("hdmiclk", NULL, PRCMU_HDMICLK, 0,
+ CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_HDMICLK] = clk;
+
+ clk = clk_reg_prcmu_gate("apeatclk", NULL, PRCMU_APEATCLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_APEATCLK] = clk;
+
+ clk = clk_reg_prcmu_gate("apetraceclk", NULL, PRCMU_APETRACECLK,
+ CLK_IS_ROOT);
+ prcmu_clk[PRCMU_APETRACECLK] = clk;
+
+ clk = clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_MCDECLK] = clk;
+
+ clk = clk_reg_prcmu_opp_gate("ipi2cclk", NULL, PRCMU_IPI2CCLK,
+ CLK_IS_ROOT);
+ prcmu_clk[PRCMU_IPI2CCLK] = clk;
+
+ clk = clk_reg_prcmu_gate("dsialtclk", NULL, PRCMU_DSIALTCLK,
+ CLK_IS_ROOT);
+ prcmu_clk[PRCMU_DSIALTCLK] = clk;
+
+ clk = clk_reg_prcmu_gate("dmaclk", NULL, PRCMU_DMACLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_DMACLK] = clk;
+
+ clk = clk_reg_prcmu_gate("b2r2clk", NULL, PRCMU_B2R2CLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_B2R2CLK] = clk;
+
+ clk = clk_reg_prcmu_scalable("tvclk", NULL, PRCMU_TVCLK, 0,
+ CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_TVCLK] = clk;
+
+ clk = clk_reg_prcmu_gate("sspclk", NULL, PRCMU_SSPCLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_SSPCLK] = clk;
+
+ clk = clk_reg_prcmu_gate("rngclk", NULL, PRCMU_RNGCLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_RNGCLK] = clk;
+
+ clk = clk_reg_prcmu_gate("uiccclk", NULL, PRCMU_UICCCLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_UICCCLK] = clk;
+
+ clk = clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, CLK_IS_ROOT);
+ prcmu_clk[PRCMU_TIMCLK] = clk;
+
+ clk = clk_reg_prcmu_opp_volt_scalable("sdmmcclk", NULL, PRCMU_SDMMCCLK,
+ 100000000,
+ CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_SDMMCCLK] = clk;
+
+ clk = clk_reg_prcmu_scalable("dsi_pll", "hdmiclk",
+ PRCMU_PLLDSI, 0, CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_PLLDSI] = clk;
+
+ clk = clk_reg_prcmu_scalable("dsi0clk", "dsi_pll",
+ PRCMU_DSI0CLK, 0, CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_DSI0CLK] = clk;
+
+ clk = clk_reg_prcmu_scalable("dsi1clk", "dsi_pll",
+ PRCMU_DSI1CLK, 0, CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_DSI1CLK] = clk;
+
+ clk = clk_reg_prcmu_scalable("dsi0escclk", "tvclk",
+ PRCMU_DSI0ESCCLK, 0, CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_DSI0ESCCLK] = clk;
+
+ clk = clk_reg_prcmu_scalable("dsi1escclk", "tvclk",
+ PRCMU_DSI1ESCCLK, 0, CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_DSI1ESCCLK] = clk;
+
+ clk = clk_reg_prcmu_scalable("dsi2escclk", "tvclk",
+ PRCMU_DSI2ESCCLK, 0, CLK_SET_RATE_GATE);
+ prcmu_clk[PRCMU_DSI2ESCCLK] = clk;
+
+ clk = clk_reg_prcmu_scalable_rate("armss", NULL,
+ PRCMU_ARMSS, 0, CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ prcmu_clk[PRCMU_ARMSS] = clk;
+
+ twd_clk = clk_register_fixed_factor(NULL, "smp_twd", "armss",
+ CLK_IGNORE_UNUSED, 1, 2);
+
+ /*
+ * FIXME: Add special handled PRCMU clocks here:
+ * 1. clkout0yuv, use PRCMU as parent + need regulator + pinctrl.
+ * 2. ab9540_clkout1yuv, see clkout0yuv
+ */
+
+ /* PRCC P-clocks */
+ clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", clkrst1_base,
+ BIT(0), 0);
+ PRCC_PCLK_STORE(clk, 1, 0);
+
+ clk = clk_reg_prcc_pclk("p1_pclk1", "per1clk", clkrst1_base,
+ BIT(1), 0);
+ PRCC_PCLK_STORE(clk, 1, 1);
+
+ clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", clkrst1_base,
+ BIT(2), 0);
+ PRCC_PCLK_STORE(clk, 1, 2);
+
+ clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", clkrst1_base,
+ BIT(3), 0);
+ PRCC_PCLK_STORE(clk, 1, 3);
+
+ clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", clkrst1_base,
+ BIT(4), 0);
+ PRCC_PCLK_STORE(clk, 1, 4);
+
+ clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", clkrst1_base,
+ BIT(5), 0);
+ PRCC_PCLK_STORE(clk, 1, 5);
+
+ clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", clkrst1_base,
+ BIT(6), 0);
+ PRCC_PCLK_STORE(clk, 1, 6);
+
+ clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", clkrst1_base,
+ BIT(7), 0);
+ PRCC_PCLK_STORE(clk, 1, 7);
+
+ clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", clkrst1_base,
+ BIT(8), 0);
+ PRCC_PCLK_STORE(clk, 1, 8);
+
+ clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", clkrst1_base,
+ BIT(9), 0);
+ PRCC_PCLK_STORE(clk, 1, 9);
+
+ clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", clkrst1_base,
+ BIT(10), 0);
+ PRCC_PCLK_STORE(clk, 1, 10);
+
+ clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", clkrst1_base,
+ BIT(11), 0);
+ PRCC_PCLK_STORE(clk, 1, 11);
+
+ clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", clkrst2_base,
+ BIT(0), 0);
+ PRCC_PCLK_STORE(clk, 2, 0);
+
+ clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", clkrst2_base,
+ BIT(1), 0);
+ PRCC_PCLK_STORE(clk, 2, 1);
+
+ clk = clk_reg_prcc_pclk("p2_pclk2", "per2clk", clkrst2_base,
+ BIT(2), 0);
+ PRCC_PCLK_STORE(clk, 2, 2);
+
+ clk = clk_reg_prcc_pclk("p2_pclk3", "per2clk", clkrst2_base,
+ BIT(3), 0);
+ PRCC_PCLK_STORE(clk, 2, 3);
+
+ clk = clk_reg_prcc_pclk("p2_pclk4", "per2clk", clkrst2_base,
+ BIT(4), 0);
+ PRCC_PCLK_STORE(clk, 2, 4);
+
+ clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", clkrst2_base,
+ BIT(5), 0);
+ PRCC_PCLK_STORE(clk, 2, 5);
+
+ clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", clkrst2_base,
+ BIT(6), 0);
+ PRCC_PCLK_STORE(clk, 2, 6);
+
+ clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", clkrst2_base,
+ BIT(7), 0);
+ PRCC_PCLK_STORE(clk, 2, 7);
+
+ clk = clk_reg_prcc_pclk("p2_pclk8", "per2clk", clkrst2_base,
+ BIT(8), 0);
+ PRCC_PCLK_STORE(clk, 2, 8);
+
+ clk = clk_reg_prcc_pclk("p2_pclk9", "per2clk", clkrst2_base,
+ BIT(9), 0);
+ PRCC_PCLK_STORE(clk, 2, 9);
+
+ clk = clk_reg_prcc_pclk("p2_pclk10", "per2clk", clkrst2_base,
+ BIT(10), 0);
+ PRCC_PCLK_STORE(clk, 2, 10);
+
+ clk = clk_reg_prcc_pclk("p2_pclk11", "per2clk", clkrst2_base,
+ BIT(11), 0);
+ PRCC_PCLK_STORE(clk, 2, 11);
+
+ clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", clkrst2_base,
+ BIT(12), 0);
+ PRCC_PCLK_STORE(clk, 2, 12);
+
+ clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", clkrst3_base,
+ BIT(0), 0);
+ PRCC_PCLK_STORE(clk, 3, 0);
+
+ clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", clkrst3_base,
+ BIT(1), 0);
+ PRCC_PCLK_STORE(clk, 3, 1);
+
+ clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", clkrst3_base,
+ BIT(2), 0);
+ PRCC_PCLK_STORE(clk, 3, 2);
+
+ clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", clkrst3_base,
+ BIT(3), 0);
+ PRCC_PCLK_STORE(clk, 3, 3);
+
+ clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", clkrst3_base,
+ BIT(4), 0);
+ PRCC_PCLK_STORE(clk, 3, 4);
+
+ clk = clk_reg_prcc_pclk("p3_pclk5", "per3clk", clkrst3_base,
+ BIT(5), 0);
+ PRCC_PCLK_STORE(clk, 3, 5);
+
+ clk = clk_reg_prcc_pclk("p3_pclk6", "per3clk", clkrst3_base,
+ BIT(6), 0);
+ PRCC_PCLK_STORE(clk, 3, 6);
+
+ clk = clk_reg_prcc_pclk("p3_pclk7", "per3clk", clkrst3_base,
+ BIT(7), 0);
+ PRCC_PCLK_STORE(clk, 3, 7);
+
+ clk = clk_reg_prcc_pclk("p3_pclk8", "per3clk", clkrst3_base,
+ BIT(8), 0);
+ PRCC_PCLK_STORE(clk, 3, 8);
+
+ clk = clk_reg_prcc_pclk("p5_pclk0", "per5clk", clkrst5_base,
+ BIT(0), 0);
+ PRCC_PCLK_STORE(clk, 5, 0);
+
+ clk = clk_reg_prcc_pclk("p5_pclk1", "per5clk", clkrst5_base,
+ BIT(1), 0);
+ PRCC_PCLK_STORE(clk, 5, 1);
+
+ clk = clk_reg_prcc_pclk("p6_pclk0", "per6clk", clkrst6_base,
+ BIT(0), 0);
+ PRCC_PCLK_STORE(clk, 6, 0);
+
+ clk = clk_reg_prcc_pclk("p6_pclk1", "per6clk", clkrst6_base,
+ BIT(1), 0);
+ PRCC_PCLK_STORE(clk, 6, 1);
+
+ clk = clk_reg_prcc_pclk("p6_pclk2", "per6clk", clkrst6_base,
+ BIT(2), 0);
+ PRCC_PCLK_STORE(clk, 6, 2);
+
+ clk = clk_reg_prcc_pclk("p6_pclk3", "per6clk", clkrst6_base,
+ BIT(3), 0);
+ PRCC_PCLK_STORE(clk, 6, 3);
+
+ clk = clk_reg_prcc_pclk("p6_pclk4", "per6clk", clkrst6_base,
+ BIT(4), 0);
+ PRCC_PCLK_STORE(clk, 6, 4);
+
+ clk = clk_reg_prcc_pclk("p6_pclk5", "per6clk", clkrst6_base,
+ BIT(5), 0);
+ PRCC_PCLK_STORE(clk, 6, 5);
+
+ clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", clkrst6_base,
+ BIT(6), 0);
+ PRCC_PCLK_STORE(clk, 6, 6);
+
+ clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", clkrst6_base,
+ BIT(7), 0);
+ PRCC_PCLK_STORE(clk, 6, 7);
+
+ /* PRCC K-clocks
+ *
+ * FIXME: Some drivers requires PERPIH[n| to be automatically enabled
+ * by enabling just the K-clock, even if it is not a valid parent to
+ * the K-clock. Until drivers get fixed we might need some kind of
+ * "parent muxed join".
+ */
+
+ /* Periph1 */
+ clk = clk_reg_prcc_kclk("p1_uart0_kclk", "uartclk",
+ clkrst1_base, BIT(0), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 1, 0);
+
+ clk = clk_reg_prcc_kclk("p1_uart1_kclk", "uartclk",
+ clkrst1_base, BIT(1), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 1, 1);
+
+ clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk",
+ clkrst1_base, BIT(2), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 1, 2);
+
+ clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk",
+ clkrst1_base, BIT(3), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 1, 3);
+
+ clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk",
+ clkrst1_base, BIT(4), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 1, 4);
+
+ clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmcclk",
+ clkrst1_base, BIT(5), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 1, 5);
+
+ clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk",
+ clkrst1_base, BIT(6), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 1, 6);
+
+ clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk",
+ clkrst1_base, BIT(8), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 1, 8);
+
+ clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk",
+ clkrst1_base, BIT(9), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 1, 9);
+
+ clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk",
+ clkrst1_base, BIT(10), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 1, 10);
+
+ /* Periph2 */
+ clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk",
+ clkrst2_base, BIT(0), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 2, 0);
+
+ clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmcclk",
+ clkrst2_base, BIT(2), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 2, 2);
+
+ clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk",
+ clkrst2_base, BIT(3), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 2, 3);
+
+ clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmcclk",
+ clkrst2_base, BIT(4), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 2, 4);
+
+ clk = clk_reg_prcc_kclk("p2_sdi3_kclk", "sdmmcclk",
+ clkrst2_base, BIT(5), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 2, 5);
+
+ /* Note that rate is received from parent. */
+ clk = clk_reg_prcc_kclk("p2_ssirx_kclk", "hsirxclk",
+ clkrst2_base, BIT(6),
+ CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
+ PRCC_KCLK_STORE(clk, 2, 6);
+
+ clk = clk_reg_prcc_kclk("p2_ssitx_kclk", "hsitxclk",
+ clkrst2_base, BIT(7),
+ CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
+ PRCC_KCLK_STORE(clk, 2, 7);
+
+ /* Periph3 */
+ clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk",
+ clkrst3_base, BIT(1), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 3, 1);
+
+ clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk",
+ clkrst3_base, BIT(2), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 3, 2);
+
+ clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk",
+ clkrst3_base, BIT(3), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 3, 3);
+
+ clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmcclk",
+ clkrst3_base, BIT(4), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 3, 4);
+
+ clk = clk_reg_prcc_kclk("p3_ske_kclk", "rtc32k",
+ clkrst3_base, BIT(5), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 3, 5);
+
+ clk = clk_reg_prcc_kclk("p3_uart2_kclk", "uartclk",
+ clkrst3_base, BIT(6), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 3, 6);
+
+ clk = clk_reg_prcc_kclk("p3_sdi5_kclk", "sdmmcclk",
+ clkrst3_base, BIT(7), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 3, 7);
+
+ /* Periph6 */
+ clk = clk_reg_prcc_kclk("p3_rng_kclk", "rngclk",
+ clkrst6_base, BIT(0), CLK_SET_RATE_GATE);
+ PRCC_KCLK_STORE(clk, 6, 0);
+
+ for_each_child_of_node(np, child) {
+ static struct clk_onecell_data clk_data;
+
+ if (!of_node_cmp(child->name, "prcmu-clock")) {
+ clk_data.clks = prcmu_clk;
+ clk_data.clk_num = ARRAY_SIZE(prcmu_clk);
+ of_clk_add_provider(child, of_clk_src_onecell_get, &clk_data);
+ }
+ if (!of_node_cmp(child->name, "prcc-periph-clock"))
+ of_clk_add_provider(child, ux500_twocell_get, prcc_pclk);
+
+ if (!of_node_cmp(child->name, "prcc-kernel-clock"))
+ of_clk_add_provider(child, ux500_twocell_get, prcc_kclk);
+
+ if (!of_node_cmp(child->name, "rtc32k-clock"))
+ of_clk_add_provider(child, of_clk_src_simple_get, rtc_clk);
+
+ if (!of_node_cmp(child->name, "smp-twd-clock"))
+ of_clk_add_provider(child, of_clk_src_simple_get, twd_clk);
+ }
+}
diff --git a/drivers/clk/ux500/u8540_clk.c b/drivers/clk/ux500/u8540_clk.c
index f26258869deb..20c8add90d11 100644
--- a/drivers/clk/ux500/u8540_clk.c
+++ b/drivers/clk/ux500/u8540_clk.c
@@ -83,7 +83,7 @@ void u8540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
clk_register_clkdev(clk, NULL, "lcd");
clk_register_clkdev(clk, "lcd", "mcde");
- clk = clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BML8580CLK,
+ clk = clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BMLCLK,
CLK_IS_ROOT);
clk_register_clkdev(clk, NULL, "bml");
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index 67ccf4aa7277..f5e4c21b301f 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -107,7 +107,7 @@ static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
vco = icst_hz_to_vco(icst->params, rate);
icst->rate = icst_hz(icst->params, vco);
- vco_set(icst->vcoreg, icst->lockreg, vco);
+ vco_set(icst->lockreg, icst->vcoreg, vco);
return 0;
}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 971d796e071d..bdb953e15d2a 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -34,6 +34,7 @@ config ORION_TIMER
bool
config SUN4I_TIMER
+ select CLKSRC_MMIO
bool
config VT8500_TIMER
@@ -71,10 +72,33 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
help
Use the always on PRCMU Timer as sched_clock
+config CLKSRC_EFM32
+ bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32
+ depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
+ default ARCH_EFM32
+ help
+ Support to use the timers of EFM32 SoCs as clock source and clock
+ event device.
+
config ARM_ARCH_TIMER
bool
select CLKSRC_OF if OF
+config ARM_ARCH_TIMER_EVTSTREAM
+ bool "Support for ARM architected timer event stream generation"
+ default y if ARM_ARCH_TIMER
+ help
+ This option enables support for event stream generation based on
+ the ARM architected timer. It is used for waking up CPUs executing
+ the wfe instruction at a frequency represented as a power-of-2
+ divisor of the clock rate.
+ The main use of the event stream is wfe-based timeouts of userspace
+ locking implementations. It might also be useful for imposing timeout
+ on wfe to safeguard against any programming errors in case an expected
+ event is not generated.
+ This must be disabled for hardware validation purposes to detect any
+ hardware anomalies of missing events.
+
config ARM_GLOBAL_TIMER
bool
select CLKSRC_OF if OF
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 704d6d342adc..33621efb9148 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
obj-$(CONFIG_ARCH_NSPIRE) += zevio-timer.o
obj-$(CONFIG_ARCH_BCM) += bcm_kona_timer.o
obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o
+obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o
obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o
obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index fbd9ccd5e114..95fb944e15ee 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -13,12 +13,14 @@
#include <linux/device.h>
#include <linux/smp.h>
#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/sched_clock.h>
#include <asm/arch_timer.h>
#include <asm/virt.h>
@@ -294,6 +296,19 @@ static void __arch_timer_setup(unsigned type,
clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
}
+static void arch_timer_configure_evtstream(void)
+{
+ int evt_stream_div, pos;
+
+ /* Find the closest power of two to the divisor */
+ evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
+ pos = fls(evt_stream_div);
+ if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
+ pos--;
+ /* enable event stream */
+ arch_timer_evtstrm_enable(min(pos, 15));
+}
+
static int arch_timer_setup(struct clock_event_device *clk)
{
__arch_timer_setup(ARCH_CP15_TIMER, clk);
@@ -307,6 +322,8 @@ static int arch_timer_setup(struct clock_event_device *clk)
}
arch_counter_set_user_access();
+ if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM))
+ arch_timer_configure_evtstream();
return 0;
}
@@ -389,7 +406,7 @@ static struct clocksource clocksource_counter = {
.rating = 400,
.read = arch_counter_read,
.mask = CLOCKSOURCE_MASK(56),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
};
static struct cyclecounter cyclecounter = {
@@ -419,6 +436,9 @@ static void __init arch_counter_register(unsigned type)
cyclecounter.mult = clocksource_counter.mult;
cyclecounter.shift = clocksource_counter.shift;
timecounter_init(&timecounter, &cyclecounter, start_count);
+
+ /* 56 bits minimum, so we assume worst case rollover */
+ sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
}
static void arch_timer_stop(struct clock_event_device *clk)
@@ -460,6 +480,33 @@ static struct notifier_block arch_timer_cpu_nb = {
.notifier_call = arch_timer_cpu_notify,
};
+#ifdef CONFIG_CPU_PM
+static unsigned int saved_cntkctl;
+static int arch_timer_cpu_pm_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ if (action == CPU_PM_ENTER)
+ saved_cntkctl = arch_timer_get_cntkctl();
+ else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
+ arch_timer_set_cntkctl(saved_cntkctl);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block arch_timer_cpu_pm_notifier = {
+ .notifier_call = arch_timer_cpu_pm_notify,
+};
+
+static int __init arch_timer_cpu_pm_init(void)
+{
+ return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
+}
+#else
+static int __init arch_timer_cpu_pm_init(void)
+{
+ return 0;
+}
+#endif
+
static int __init arch_timer_register(void)
{
int err;
@@ -499,11 +546,17 @@ static int __init arch_timer_register(void)
if (err)
goto out_free_irq;
+ err = arch_timer_cpu_pm_init();
+ if (err)
+ goto out_unreg_notify;
+
/* Immediately configure the timer on the boot CPU */
arch_timer_setup(this_cpu_ptr(arch_timer_evt));
return 0;
+out_unreg_notify:
+ unregister_cpu_notifier(&arch_timer_cpu_nb);
out_free_irq:
if (arch_timer_use_virtual)
free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index b66c1f36066c..c639b1a9e996 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -169,7 +169,8 @@ static int gt_clockevents_init(struct clock_event_device *clk)
int cpu = smp_processor_id();
clk->name = "arm_global_timer";
- clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERCPU;
clk->set_mode = gt_clockevent_set_mode;
clk->set_next_event = gt_clockevent_set_next_event;
clk->cpumask = cpumask_of(cpu);
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index 07ea7ce900dc..26ed331b1aad 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -49,7 +49,7 @@ struct bcm2835_timer {
static void __iomem *system_clock __read_mostly;
-static u32 notrace bcm2835_sched_read(void)
+static u64 notrace bcm2835_sched_read(void)
{
return readl_relaxed(system_clock);
}
@@ -110,7 +110,7 @@ static void __init bcm2835_timer_init(struct device_node *node)
panic("Can't read clock-frequency");
system_clock = base + REG_COUNTER_LO;
- setup_sched_clock(bcm2835_sched_read, 32, freq);
+ sched_clock_register(bcm2835_sched_read, 32, freq);
clocksource_mmio_init(base + REG_COUNTER_LO, node->name,
freq, 300, 32, clocksource_mmio_readl_up);
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index a9fd4ad25674..b375106844d8 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -53,7 +53,7 @@ static struct clocksource clocksource_dbx500_prcmu = {
#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
-static u32 notrace dbx500_prcmu_sched_clock_read(void)
+static u64 notrace dbx500_prcmu_sched_clock_read(void)
{
if (unlikely(!clksrc_dbx500_timer_base))
return 0;
@@ -81,8 +81,7 @@ void __init clksrc_dbx500_prcmu_init(void __iomem *base)
clksrc_dbx500_timer_base + PRCMU_TIMER_REF);
}
#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
- setup_sched_clock(dbx500_prcmu_sched_clock_read,
- 32, RATE_32K);
+ sched_clock_register(dbx500_prcmu_sched_clock_read, 32, RATE_32K);
#endif
clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K);
}
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
index b9ddd9e3a2f5..35639cf4e5a2 100644
--- a/drivers/clocksource/clksrc-of.c
+++ b/drivers/clocksource/clksrc-of.c
@@ -35,5 +35,6 @@ void __init clocksource_of_init(void)
init_func = match->data;
init_func(np);
+ of_node_put(np);
}
}
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 4cbae4f762b1..45ba8aecc729 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -23,7 +23,7 @@
#include <linux/clk.h>
#include <linux/sched_clock.h>
-static void timer_get_base_and_rate(struct device_node *np,
+static void __init timer_get_base_and_rate(struct device_node *np,
void __iomem **base, u32 *rate)
{
struct clk *timer_clk;
@@ -55,11 +55,11 @@ static void timer_get_base_and_rate(struct device_node *np,
try_clock_freq:
if (of_property_read_u32(np, "clock-freq", rate) &&
- of_property_read_u32(np, "clock-frequency", rate))
+ of_property_read_u32(np, "clock-frequency", rate))
panic("No clock nor clock-frequency property for %s", np->name);
}
-static void add_clockevent(struct device_node *event_timer)
+static void __init add_clockevent(struct device_node *event_timer)
{
void __iomem *iobase;
struct dw_apb_clock_event_device *ced;
@@ -82,7 +82,7 @@ static void add_clockevent(struct device_node *event_timer)
static void __iomem *sched_io_base;
static u32 sched_rate;
-static void add_clocksource(struct device_node *source_timer)
+static void __init add_clocksource(struct device_node *source_timer)
{
void __iomem *iobase;
struct dw_apb_clocksource *cs;
@@ -106,7 +106,7 @@ static void add_clocksource(struct device_node *source_timer)
sched_rate = rate;
}
-static u32 read_sched_clock(void)
+static u64 read_sched_clock(void)
{
return __raw_readl(sched_io_base);
}
@@ -117,7 +117,7 @@ static const struct of_device_id sptimer_ids[] __initconst = {
{ /* Sentinel */ },
};
-static void init_sched_clock(void)
+static void __init init_sched_clock(void)
{
struct device_node *sched_timer;
@@ -128,7 +128,7 @@ static void init_sched_clock(void)
of_node_put(sched_timer);
}
- setup_sched_clock(read_sched_clock, 32, sched_rate);
+ sched_clock_register(read_sched_clock, 32, sched_rate);
}
static int num_called;
@@ -138,12 +138,10 @@ static void __init dw_apb_timer_init(struct device_node *timer)
case 0:
pr_debug("%s: found clockevent timer\n", __func__);
add_clockevent(timer);
- of_node_put(timer);
break;
case 1:
pr_debug("%s: found clocksource timer\n", __func__);
add_clocksource(timer);
- of_node_put(timer);
init_sched_clock();
break;
default:
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index 3a5909c12d42..9d170834fcf3 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -78,7 +78,7 @@ static int em_sti_enable(struct em_sti_priv *p)
int ret;
/* enable clock */
- ret = clk_enable(p->clk);
+ ret = clk_prepare_enable(p->clk);
if (ret) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
return ret;
@@ -107,7 +107,7 @@ static void em_sti_disable(struct em_sti_priv *p)
em_sti_write(p, STI_INTENCLR, 3);
/* stop clock */
- clk_disable(p->clk);
+ clk_disable_unprepare(p->clk);
}
static cycle_t em_sti_count(struct em_sti_priv *p)
diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c
index 0f5e65f74dc3..445b68a01dc5 100644
--- a/drivers/clocksource/mxs_timer.c
+++ b/drivers/clocksource/mxs_timer.c
@@ -222,7 +222,7 @@ static struct clocksource clocksource_mxs = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static u32 notrace mxs_read_sched_clock_v2(void)
+static u64 notrace mxs_read_sched_clock_v2(void)
{
return ~readl_relaxed(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1));
}
@@ -236,7 +236,7 @@ static int __init mxs_clocksource_init(struct clk *timer_clk)
else {
clocksource_mmio_init(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1),
"mxs_timer", c, 200, 32, clocksource_mmio_readl_down);
- setup_sched_clock(mxs_read_sched_clock_v2, 32, c);
+ sched_clock_register(mxs_read_sched_clock_v2, 32, c);
}
return 0;
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index 1b74bea12385..ed7b73b508e0 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -76,7 +76,7 @@ static struct delay_timer mtu_delay_timer;
* local implementation which uses the clocksource to get some
* better resolution when scheduling the kernel.
*/
-static u32 notrace nomadik_read_sched_clock(void)
+static u64 notrace nomadik_read_sched_clock(void)
{
if (unlikely(!mtu_base))
return 0;
@@ -231,7 +231,7 @@ static void __init __nmdk_timer_init(void __iomem *base, int irq,
"mtu_0");
#ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK
- setup_sched_clock(nomadik_read_sched_clock, 32, rate);
+ sched_clock_register(nomadik_read_sched_clock, 32, rate);
#endif
/* Timer 1 is used for events, register irq and clockevents */
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index ab29476ee5f9..85082e8d3052 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -331,7 +331,7 @@ static struct clocksource samsung_clocksource = {
* this wraps around for now, since it is just a relative time
* stamp. (Inspired by U300 implementation.)
*/
-static u32 notrace samsung_read_sched_clock(void)
+static u64 notrace samsung_read_sched_clock(void)
{
return samsung_clocksource_read(NULL);
}
@@ -357,7 +357,7 @@ static void __init samsung_clocksource_init(void)
else
pwm.source_reg = pwm.base + pwm.source_id * 0x0c + 0x14;
- setup_sched_clock(samsung_read_sched_clock,
+ sched_clock_register(samsung_read_sched_clock,
pwm.variant.bits, clock_rate);
samsung_clocksource.mask = CLOCKSOURCE_MASK(pwm.variant.bits);
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index 8ead0258740a..2fb4695a28d8 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -37,6 +37,8 @@
#define TIMER_INTVAL_REG(val) (0x10 * (val) + 0x14)
#define TIMER_CNTVAL_REG(val) (0x10 * (val) + 0x18)
+#define TIMER_SYNC_TICKS 3
+
static void __iomem *timer_base;
static u32 ticks_per_jiffy;
@@ -50,7 +52,7 @@ static void sun4i_clkevt_sync(void)
{
u32 old = readl(timer_base + TIMER_CNTVAL_REG(1));
- while ((old - readl(timer_base + TIMER_CNTVAL_REG(1))) < 3)
+ while ((old - readl(timer_base + TIMER_CNTVAL_REG(1))) < TIMER_SYNC_TICKS)
cpu_relax();
}
@@ -104,7 +106,7 @@ static int sun4i_clkevt_next_event(unsigned long evt,
struct clock_event_device *unused)
{
sun4i_clkevt_time_stop(0);
- sun4i_clkevt_time_setup(0, evt);
+ sun4i_clkevt_time_setup(0, evt - TIMER_SYNC_TICKS);
sun4i_clkevt_time_start(0, false);
return 0;
@@ -131,7 +133,7 @@ static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id)
static struct irqaction sun4i_timer_irq = {
.name = "sun4i_timer0",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = sun4i_timer_interrupt,
.dev_id = &sun4i_clockevent,
};
@@ -187,8 +189,8 @@ static void __init sun4i_timer_init(struct device_node *node)
sun4i_clockevent.cpumask = cpumask_of(0);
- clockevents_config_and_register(&sun4i_clockevent, rate, 0x1,
- 0xffffffff);
+ clockevents_config_and_register(&sun4i_clockevent, rate,
+ TIMER_SYNC_TICKS, 0xffffffff);
}
CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-timer",
sun4i_timer_init);
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 8a6187225dd0..00fdd1170284 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -100,7 +100,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
|| tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) {
__raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
__raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
- clk_disable(tcd->clk);
+ clk_disable_unprepare(tcd->clk);
}
switch (m) {
@@ -109,7 +109,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
* of oneshot, we get lower overhead and improved accuracy.
*/
case CLOCK_EVT_MODE_PERIODIC:
- clk_enable(tcd->clk);
+ clk_prepare_enable(tcd->clk);
/* slow clock, count up to RC, then irq and restart */
__raw_writel(timer_clock
@@ -126,7 +126,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
break;
case CLOCK_EVT_MODE_ONESHOT:
- clk_enable(tcd->clk);
+ clk_prepare_enable(tcd->clk);
/* slow clock, count up to RC, then irq and stop */
__raw_writel(timer_clock | ATMEL_TC_CPCSTOP
@@ -180,15 +180,22 @@ static irqreturn_t ch2_irq(int irq, void *handle)
static struct irqaction tc_irqaction = {
.name = "tc_clkevt",
- .flags = IRQF_TIMER | IRQF_DISABLED,
+ .flags = IRQF_TIMER,
.handler = ch2_irq,
};
-static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
{
+ int ret;
struct clk *t2_clk = tc->clk[2];
int irq = tc->irq[2];
+ /* try to enable t2 clk to avoid future errors in mode change */
+ ret = clk_prepare_enable(t2_clk);
+ if (ret)
+ return ret;
+ clk_disable_unprepare(t2_clk);
+
clkevt.regs = tc->regs;
clkevt.clk = t2_clk;
tc_irqaction.dev_id = &clkevt;
@@ -197,16 +204,21 @@ static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
clkevt.clkevt.cpumask = cpumask_of(0);
+ ret = setup_irq(irq, &tc_irqaction);
+ if (ret)
+ return ret;
+
clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
- setup_irq(irq, &tc_irqaction);
+ return ret;
}
#else /* !CONFIG_GENERIC_CLOCKEVENTS */
-static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
{
/* NOTHING */
+ return 0;
}
#endif
@@ -265,6 +277,7 @@ static int __init tcb_clksrc_init(void)
int best_divisor_idx = -1;
int clk32k_divisor_idx = -1;
int i;
+ int ret;
tc = atmel_tc_alloc(CONFIG_ATMEL_TCB_CLKSRC_BLOCK, clksrc.name);
if (!tc) {
@@ -275,7 +288,11 @@ static int __init tcb_clksrc_init(void)
pdev = tc->pdev;
t0_clk = tc->clk[0];
- clk_enable(t0_clk);
+ ret = clk_prepare_enable(t0_clk);
+ if (ret) {
+ pr_debug("can't enable T0 clk\n");
+ goto err_free_tc;
+ }
/* How fast will we be counting? Pick something over 5 MHz. */
rate = (u32) clk_get_rate(t0_clk);
@@ -313,17 +330,39 @@ static int __init tcb_clksrc_init(void)
/* tclib will give us three clocks no matter what the
* underlying platform supports.
*/
- clk_enable(tc->clk[1]);
+ ret = clk_prepare_enable(tc->clk[1]);
+ if (ret) {
+ pr_debug("can't enable T1 clk\n");
+ goto err_disable_t0;
+ }
/* setup both channel 0 & 1 */
tcb_setup_dual_chan(tc, best_divisor_idx);
}
/* and away we go! */
- clocksource_register_hz(&clksrc, divided_rate);
+ ret = clocksource_register_hz(&clksrc, divided_rate);
+ if (ret)
+ goto err_disable_t1;
/* channel 2: periodic and oneshot timer support */
- setup_clkevents(tc, clk32k_divisor_idx);
+ ret = setup_clkevents(tc, clk32k_divisor_idx);
+ if (ret)
+ goto err_unregister_clksrc;
return 0;
+
+err_unregister_clksrc:
+ clocksource_unregister(&clksrc);
+
+err_disable_t1:
+ if (!tc->tcb_config || tc->tcb_config->counter_width != 32)
+ clk_disable_unprepare(tc->clk[1]);
+
+err_disable_t0:
+ clk_disable_unprepare(t0_clk);
+
+err_free_tc:
+ atmel_tc_free(tc);
+ return ret;
}
arch_initcall(tcb_clksrc_init);
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index 93961703b887..642849256d82 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -98,7 +98,7 @@ static struct clock_event_device tegra_clockevent = {
.set_mode = tegra_timer_set_mode,
};
-static u32 notrace tegra_read_sched_clock(void)
+static u64 notrace tegra_read_sched_clock(void)
{
return timer_readl(TIMERUS_CNTR_1US);
}
@@ -181,8 +181,6 @@ static void __init tegra20_init_timer(struct device_node *np)
rate = clk_get_rate(clk);
}
- of_node_put(np);
-
switch (rate) {
case 12000000:
timer_writel(0x000b, TIMERUS_USEC_CFG);
@@ -200,7 +198,7 @@ static void __init tegra20_init_timer(struct device_node *np)
WARN(1, "Unknown clock rate");
}
- setup_sched_clock(tegra_read_sched_clock, 32, 1000000);
+ sched_clock_register(tegra_read_sched_clock, 32, 1000000);
if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
"timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) {
@@ -241,8 +239,6 @@ static void __init tegra20_init_rtc(struct device_node *np)
else
clk_prepare_enable(clk);
- of_node_put(np);
-
register_persistent_clock(NULL, tegra_read_persistent_clock);
}
CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 0198504ef6b0..d8e47e502785 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -96,7 +96,7 @@ static void local_timer_ctrl_clrset(u32 clr, u32 set)
local_base + TIMER_CTRL_OFF);
}
-static u32 notrace armada_370_xp_read_sched_clock(void)
+static u64 notrace armada_370_xp_read_sched_clock(void)
{
return ~readl(timer_base + TIMER0_VAL_OFF);
}
@@ -258,7 +258,7 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
/*
* Set scale and timer for sched_clock.
*/
- setup_sched_clock(armada_370_xp_read_sched_clock, 32, timer_clk);
+ sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
/*
* Setup free-running clocksource timer (interrupts
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c
new file mode 100644
index 000000000000..1a6205b7bed3
--- /dev/null
+++ b/drivers/clocksource/time-efm32.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2013 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/clk.h>
+
+#define TIMERn_CTRL 0x00
+#define TIMERn_CTRL_PRESC(val) (((val) & 0xf) << 24)
+#define TIMERn_CTRL_PRESC_1024 TIMERn_CTRL_PRESC(10)
+#define TIMERn_CTRL_CLKSEL(val) (((val) & 0x3) << 16)
+#define TIMERn_CTRL_CLKSEL_PRESCHFPERCLK TIMERn_CTRL_CLKSEL(0)
+#define TIMERn_CTRL_OSMEN 0x00000010
+#define TIMERn_CTRL_MODE(val) (((val) & 0x3) << 0)
+#define TIMERn_CTRL_MODE_UP TIMERn_CTRL_MODE(0)
+#define TIMERn_CTRL_MODE_DOWN TIMERn_CTRL_MODE(1)
+
+#define TIMERn_CMD 0x04
+#define TIMERn_CMD_START 0x00000001
+#define TIMERn_CMD_STOP 0x00000002
+
+#define TIMERn_IEN 0x0c
+#define TIMERn_IF 0x10
+#define TIMERn_IFS 0x14
+#define TIMERn_IFC 0x18
+#define TIMERn_IRQ_UF 0x00000002
+
+#define TIMERn_TOP 0x1c
+#define TIMERn_CNT 0x24
+
+struct efm32_clock_event_ddata {
+ struct clock_event_device evtdev;
+ void __iomem *base;
+ unsigned periodic_top;
+};
+
+static void efm32_clock_event_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evtdev)
+{
+ struct efm32_clock_event_ddata *ddata =
+ container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
+ writel_relaxed(ddata->periodic_top, ddata->base + TIMERn_TOP);
+ writel_relaxed(TIMERn_CTRL_PRESC_1024 |
+ TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
+ TIMERn_CTRL_MODE_DOWN,
+ ddata->base + TIMERn_CTRL);
+ writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD);
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
+ writel_relaxed(TIMERn_CTRL_PRESC_1024 |
+ TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
+ TIMERn_CTRL_OSMEN |
+ TIMERn_CTRL_MODE_DOWN,
+ ddata->base + TIMERn_CTRL);
+ break;
+
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
+ break;
+
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+}
+
+static int efm32_clock_event_set_next_event(unsigned long evt,
+ struct clock_event_device *evtdev)
+{
+ struct efm32_clock_event_ddata *ddata =
+ container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
+
+ writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
+ writel_relaxed(evt, ddata->base + TIMERn_CNT);
+ writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD);
+
+ return 0;
+}
+
+static irqreturn_t efm32_clock_event_handler(int irq, void *dev_id)
+{
+ struct efm32_clock_event_ddata *ddata = dev_id;
+
+ writel_relaxed(TIMERn_IRQ_UF, ddata->base + TIMERn_IFC);
+
+ ddata->evtdev.event_handler(&ddata->evtdev);
+
+ return IRQ_HANDLED;
+}
+
+static struct efm32_clock_event_ddata clock_event_ddata = {
+ .evtdev = {
+ .name = "efm32 clockevent",
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_MODE_PERIODIC,
+ .set_mode = efm32_clock_event_set_mode,
+ .set_next_event = efm32_clock_event_set_next_event,
+ .rating = 200,
+ },
+};
+
+static struct irqaction efm32_clock_event_irq = {
+ .name = "efm32 clockevent",
+ .flags = IRQF_TIMER,
+ .handler = efm32_clock_event_handler,
+ .dev_id = &clock_event_ddata,
+};
+
+static int __init efm32_clocksource_init(struct device_node *np)
+{
+ struct clk *clk;
+ void __iomem *base;
+ unsigned long rate;
+ int ret;
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ pr_err("failed to get clock for clocksource (%d)\n", ret);
+ goto err_clk_get;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("failed to enable timer clock for clocksource (%d)\n",
+ ret);
+ goto err_clk_enable;
+ }
+ rate = clk_get_rate(clk);
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ ret = -EADDRNOTAVAIL;
+ pr_err("failed to map registers for clocksource\n");
+ goto err_iomap;
+ }
+
+ writel_relaxed(TIMERn_CTRL_PRESC_1024 |
+ TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
+ TIMERn_CTRL_MODE_UP, base + TIMERn_CTRL);
+ writel_relaxed(TIMERn_CMD_START, base + TIMERn_CMD);
+
+ ret = clocksource_mmio_init(base + TIMERn_CNT, "efm32 timer",
+ DIV_ROUND_CLOSEST(rate, 1024), 200, 16,
+ clocksource_mmio_readl_up);
+ if (ret) {
+ pr_err("failed to init clocksource (%d)\n", ret);
+ goto err_clocksource_init;
+ }
+
+ return 0;
+
+err_clocksource_init:
+
+ iounmap(base);
+err_iomap:
+
+ clk_disable_unprepare(clk);
+err_clk_enable:
+
+ clk_put(clk);
+err_clk_get:
+
+ return ret;
+}
+
+static int __init efm32_clockevent_init(struct device_node *np)
+{
+ struct clk *clk;
+ void __iomem *base;
+ unsigned long rate;
+ int irq;
+ int ret;
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ pr_err("failed to get clock for clockevent (%d)\n", ret);
+ goto err_clk_get;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("failed to enable timer clock for clockevent (%d)\n",
+ ret);
+ goto err_clk_enable;
+ }
+ rate = clk_get_rate(clk);
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ ret = -EADDRNOTAVAIL;
+ pr_err("failed to map registers for clockevent\n");
+ goto err_iomap;
+ }
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ ret = -ENOENT;
+ pr_err("failed to get irq for clockevent\n");
+ goto err_get_irq;
+ }
+
+ writel_relaxed(TIMERn_IRQ_UF, base + TIMERn_IEN);
+
+ clock_event_ddata.base = base;
+ clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ);
+
+ setup_irq(irq, &efm32_clock_event_irq);
+
+ clockevents_config_and_register(&clock_event_ddata.evtdev,
+ DIV_ROUND_CLOSEST(rate, 1024),
+ 0xf, 0xffff);
+
+ return 0;
+
+err_get_irq:
+
+ iounmap(base);
+err_iomap:
+
+ clk_disable_unprepare(clk);
+err_clk_enable:
+
+ clk_put(clk);
+err_clk_get:
+
+ return ret;
+}
+
+/*
+ * This function asserts that we have exactly one clocksource and one
+ * clock_event_device in the end.
+ */
+static void __init efm32_timer_init(struct device_node *np)
+{
+ static int has_clocksource, has_clockevent;
+ int ret;
+
+ if (!has_clocksource) {
+ ret = efm32_clocksource_init(np);
+ if (!ret) {
+ has_clocksource = 1;
+ return;
+ }
+ }
+
+ if (!has_clockevent) {
+ ret = efm32_clockevent_init(np);
+ if (!ret) {
+ has_clockevent = 1;
+ return;
+ }
+ }
+}
+CLOCKSOURCE_OF_DECLARE(efm32, "efm32,timer", efm32_timer_init);
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c
index ef3cfb269d8b..8a492d34ff9f 100644
--- a/drivers/clocksource/timer-prima2.c
+++ b/drivers/clocksource/timer-prima2.c
@@ -165,9 +165,9 @@ static struct irqaction sirfsoc_timer_irq = {
};
/* Overwrite weak default sched_clock with more precise one */
-static u32 notrace sirfsoc_read_sched_clock(void)
+static u64 notrace sirfsoc_read_sched_clock(void)
{
- return (u32)(sirfsoc_timer_read(NULL) & 0xffffffff);
+ return sirfsoc_timer_read(NULL);
}
static void __init sirfsoc_clockevent_init(void)
@@ -206,7 +206,7 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np)
BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE));
- setup_sched_clock(sirfsoc_read_sched_clock, 32, CLOCK_TICK_RATE);
+ sched_clock_register(sirfsoc_read_sched_clock, 64, CLOCK_TICK_RATE);
BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq));
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c
index 587e0202a70b..02821b06a39e 100644
--- a/drivers/clocksource/vf_pit_timer.c
+++ b/drivers/clocksource/vf_pit_timer.c
@@ -52,7 +52,7 @@ static inline void pit_irq_acknowledge(void)
__raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
}
-static unsigned int pit_read_sched_clock(void)
+static u64 pit_read_sched_clock(void)
{
return __raw_readl(clksrc_base + PITCVAL);
}
@@ -64,7 +64,7 @@ static int __init pit_clocksource_init(unsigned long rate)
__raw_writel(~0UL, clksrc_base + PITLDVAL);
__raw_writel(PITTCTRL_TEN, clksrc_base + PITTCTRL);
- setup_sched_clock(pit_read_sched_clock, 32, rate);
+ sched_clock_register(pit_read_sched_clock, 32, rate);
return clocksource_mmio_init(clksrc_base + PITCVAL, "vf-pit", rate,
300, 32, clocksource_mmio_readl_down);
}
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
index 64f553f04fa4..ad3c0e83a779 100644
--- a/drivers/clocksource/vt8500_timer.c
+++ b/drivers/clocksource/vt8500_timer.c
@@ -137,14 +137,12 @@ static void __init vt8500_timer_init(struct device_node *np)
if (!regbase) {
pr_err("%s: Missing iobase description in Device Tree\n",
__func__);
- of_node_put(np);
return;
}
timer_irq = irq_of_parse_and_map(np, 0);
if (!timer_irq) {
pr_err("%s: Missing irq description in Device Tree\n",
__func__);
- of_node_put(np);
return;
}
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 08ae128cce9b..c73fc2b74de2 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -65,6 +65,7 @@ void proc_fork_connector(struct task_struct *task)
msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -80,6 +81,7 @@ void proc_fork_connector(struct task_struct *task)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
+ msg->flags = 0; /* not used */
/* If cn_netlink_send() failed, the data is not sent */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
@@ -96,6 +98,7 @@ void proc_exec_connector(struct task_struct *task)
msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -106,6 +109,7 @@ void proc_exec_connector(struct task_struct *task)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
+ msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
@@ -122,6 +126,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
ev->what = which_id;
ev->event_data.id.process_pid = task->pid;
ev->event_data.id.process_tgid = task->tgid;
@@ -145,6 +150,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
+ msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
@@ -160,6 +166,7 @@ void proc_sid_connector(struct task_struct *task)
msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -170,6 +177,7 @@ void proc_sid_connector(struct task_struct *task)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
+ msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
@@ -185,6 +193,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -203,6 +212,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
+ msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
@@ -218,6 +228,7 @@ void proc_comm_connector(struct task_struct *task)
msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -229,6 +240,7 @@ void proc_comm_connector(struct task_struct *task)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
+ msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
@@ -244,6 +256,7 @@ void proc_coredump_connector(struct task_struct *task)
msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -254,6 +267,7 @@ void proc_coredump_connector(struct task_struct *task)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
+ msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
@@ -269,6 +283,7 @@ void proc_exit_connector(struct task_struct *task)
msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -281,6 +296,7 @@ void proc_exit_connector(struct task_struct *task)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
+ msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
@@ -304,6 +320,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
msg->seq = rcvd_seq;
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -313,6 +330,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = rcvd_ack + 1;
msg->len = sizeof(*ev);
+ msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 6ecfa758942c..a36749f1e44a 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -109,7 +109,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
data = nlmsg_data(nlh);
- memcpy(data, msg, sizeof(*data) + msg->len);
+ memcpy(data, msg, size);
NETLINK_CB(skb).dst_group = group;
@@ -157,17 +157,18 @@ static int cn_call_callback(struct sk_buff *skb)
static void cn_rx_skb(struct sk_buff *__skb)
{
struct nlmsghdr *nlh;
- int err;
struct sk_buff *skb;
+ int len, err;
skb = skb_get(__skb);
if (skb->len >= NLMSG_HDRLEN) {
nlh = nlmsg_hdr(skb);
+ len = nlmsg_len(nlh);
- if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
+ if (len < (int)sizeof(struct cn_msg) ||
skb->len < nlh->nlmsg_len ||
- nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) {
+ len > CONNECTOR_MAX_MSG_SIZE) {
kfree_skb(skb);
return;
}
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 534fcb825153..38093e272377 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -17,15 +17,11 @@ config CPU_FREQ
if CPU_FREQ
-config CPU_FREQ_TABLE
- tristate
-
config CPU_FREQ_GOV_COMMON
bool
config CPU_FREQ_STAT
tristate "CPU frequency translation statistics"
- select CPU_FREQ_TABLE
default y
help
This driver exports CPU frequency statistics information through sysfs
@@ -143,7 +139,6 @@ config CPU_FREQ_GOV_USERSPACE
config CPU_FREQ_GOV_ONDEMAND
tristate "'ondemand' cpufreq policy governor"
- select CPU_FREQ_TABLE
select CPU_FREQ_GOV_COMMON
help
'ondemand' - This driver adds a dynamic cpufreq policy governor.
@@ -187,7 +182,6 @@ config CPU_FREQ_GOV_CONSERVATIVE
config GENERIC_CPUFREQ_CPU0
tristate "Generic CPU0 cpufreq driver"
depends on HAVE_CLK && REGULATOR && PM_OPP && OF
- select CPU_FREQ_TABLE
help
This adds a generic cpufreq driver for CPU0 frequency management.
It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
@@ -223,7 +217,6 @@ depends on IA64
config IA64_ACPI_CPUFREQ
tristate "ACPI Processor P-States driver"
- select CPU_FREQ_TABLE
depends on ACPI_PROCESSOR
help
This driver adds a CPUFreq driver which utilizes the ACPI
@@ -240,7 +233,6 @@ depends on MIPS
config LOONGSON2_CPUFREQ
tristate "Loongson2 CPUFreq Driver"
- select CPU_FREQ_TABLE
help
This option adds a CPUFreq driver for loongson processors which
support software configurable cpu frequency.
@@ -262,7 +254,6 @@ menu "SPARC CPU frequency scaling drivers"
depends on SPARC64
config SPARC_US3_CPUFREQ
tristate "UltraSPARC-III CPU Frequency driver"
- select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for UltraSPARC-III processors.
@@ -272,7 +263,6 @@ config SPARC_US3_CPUFREQ
config SPARC_US2E_CPUFREQ
tristate "UltraSPARC-IIe CPU Frequency driver"
- select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for UltraSPARC-IIe processors.
@@ -285,7 +275,6 @@ menu "SH CPU Frequency scaling"
depends on SUPERH
config SH_CPU_FREQ
tristate "SuperH CPU Frequency driver"
- select CPU_FREQ_TABLE
help
This adds the cpufreq driver for SuperH. Any CPU that supports
clock rate rounding through the clock framework can use this
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 0fa204b244bd..ce52ed949249 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -5,7 +5,6 @@
config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver"
depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK
- select CPU_FREQ_TABLE
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
@@ -18,7 +17,6 @@ config ARM_DT_BL_CPUFREQ
config ARM_EXYNOS_CPUFREQ
bool
- select CPU_FREQ_TABLE
config ARM_EXYNOS4210_CPUFREQ
bool "SAMSUNG EXYNOS4210"
@@ -58,7 +56,6 @@ config ARM_EXYNOS5440_CPUFREQ
depends on SOC_EXYNOS5440
depends on HAVE_CLK && PM_OPP && OF
default y
- select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for Samsung EXYNOS5440
SoC. The nature of exynos5440 clock controller is
@@ -85,7 +82,6 @@ config ARM_IMX6Q_CPUFREQ
tristate "Freescale i.MX6Q cpufreq support"
depends on SOC_IMX6Q
depends on REGULATOR_ANATOP
- select CPU_FREQ_TABLE
help
This adds cpufreq driver support for Freescale i.MX6Q SOC.
@@ -101,7 +97,6 @@ config ARM_INTEGRATOR
config ARM_KIRKWOOD_CPUFREQ
def_bool ARCH_KIRKWOOD && OF
- select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for Marvell Kirkwood
SoCs.
@@ -110,7 +105,6 @@ config ARM_OMAP2PLUS_CPUFREQ
bool "TI OMAP2+"
depends on ARCH_OMAP2PLUS
default ARCH_OMAP2PLUS
- select CPU_FREQ_TABLE
config ARM_S3C_CPUFREQ
bool
@@ -165,7 +159,6 @@ config ARM_S3C2412_CPUFREQ
config ARM_S3C2416_CPUFREQ
bool "S3C2416 CPU Frequency scaling support"
depends on CPU_S3C2416
- select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for the Samsung S3C2416 and
S3C2450 SoC. The S3C2416 supports changing the rate of the
@@ -196,7 +189,6 @@ config ARM_S3C2440_CPUFREQ
config ARM_S3C64XX_CPUFREQ
bool "Samsung S3C64XX"
depends on CPU_S3C6410
- select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver for Samsung S3C6410 SoC.
@@ -206,7 +198,6 @@ config ARM_S3C64XX_CPUFREQ
config ARM_S5PV210_CPUFREQ
bool "Samsung S5PV210 and S5PC110"
depends on CPU_S5PV210
- select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver for Samsung S5PV210 and
@@ -223,7 +214,6 @@ config ARM_SA1110_CPUFREQ
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
depends on PLAT_SPEAR
- select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver support for SPEAr SOCs.
@@ -231,7 +221,14 @@ config ARM_SPEAR_CPUFREQ
config ARM_TEGRA_CPUFREQ
bool "TEGRA CPUFreq support"
depends on ARCH_TEGRA
- select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver support for TEGRA SOCs.
+
+config ARM_VEXPRESS_SPC_CPUFREQ
+ tristate "Versatile Express SPC based CPUfreq driver"
+ select ARM_BIG_LITTLE_CPUFREQ
+ depends on ARCH_VEXPRESS_SPC
+ help
+ This add the CPUfreq driver support for Versatile Express
+ big.LITTLE platforms using SPC for power management.
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index 25ca9db62e09..ca0021a96e19 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -1,7 +1,6 @@
config CPU_FREQ_CBE
tristate "CBE frequency scaling"
depends on CBE_RAS && PPC_CELL
- select CPU_FREQ_TABLE
default m
help
This adds the cpufreq driver for Cell BE processors.
@@ -20,7 +19,6 @@ config CPU_FREQ_CBE_PMI
config CPU_FREQ_MAPLE
bool "Support for Maple 970FX Evaluation Board"
depends on PPC_MAPLE
- select CPU_FREQ_TABLE
help
This adds support for frequency switching on Maple 970FX
Evaluation Board and compatible boards (IBM JS2x blades).
@@ -28,7 +26,6 @@ config CPU_FREQ_MAPLE
config PPC_CORENET_CPUFREQ
tristate "CPU frequency scaling driver for Freescale E500MC SoCs"
depends on PPC_E500MC && OF && COMMON_CLK
- select CPU_FREQ_TABLE
select CLK_PPC_CORENET
help
This adds the CPUFreq driver support for Freescale e500mc,
@@ -38,7 +35,6 @@ config PPC_CORENET_CPUFREQ
config CPU_FREQ_PMAC
bool "Support for Apple PowerBooks"
depends on ADB_PMU && PPC32
- select CPU_FREQ_TABLE
help
This adds support for frequency switching on Apple PowerBooks,
this currently includes some models of iBook & Titanium
@@ -47,7 +43,6 @@ config CPU_FREQ_PMAC
config CPU_FREQ_PMAC64
bool "Support for some Apple G5s"
depends on PPC_PMAC && PPC64
- select CPU_FREQ_TABLE
help
This adds support for frequency switching on Apple iMac G5,
and some of the more recent desktop G5 machines as well.
@@ -55,7 +50,6 @@ config CPU_FREQ_PMAC64
config PPC_PASEMI_CPUFREQ
bool "Support for PA Semi PWRficient"
depends on PPC_PASEMI
- select CPU_FREQ_TABLE
default y
help
This adds the support for frequency switching on PA Semi
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index e2b6eabef221..d369349eeaab 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -31,7 +31,6 @@ config X86_PCC_CPUFREQ
config X86_ACPI_CPUFREQ
tristate "ACPI Processor P-States driver"
- select CPU_FREQ_TABLE
depends on ACPI_PROCESSOR
help
This driver adds a CPUFreq driver which utilizes the ACPI
@@ -60,7 +59,6 @@ config X86_ACPI_CPUFREQ_CPB
config ELAN_CPUFREQ
tristate "AMD Elan SC400 and SC410"
- select CPU_FREQ_TABLE
depends on MELAN
---help---
This adds the CPUFreq driver for AMD Elan SC400 and SC410
@@ -76,7 +74,6 @@ config ELAN_CPUFREQ
config SC520_CPUFREQ
tristate "AMD Elan SC520"
- select CPU_FREQ_TABLE
depends on MELAN
---help---
This adds the CPUFreq driver for AMD Elan SC520 processor.
@@ -88,7 +85,6 @@ config SC520_CPUFREQ
config X86_POWERNOW_K6
tristate "AMD Mobile K6-2/K6-3 PowerNow!"
- select CPU_FREQ_TABLE
depends on X86_32
help
This adds the CPUFreq driver for mobile AMD K6-2+ and mobile
@@ -100,7 +96,6 @@ config X86_POWERNOW_K6
config X86_POWERNOW_K7
tristate "AMD Mobile Athlon/Duron PowerNow!"
- select CPU_FREQ_TABLE
depends on X86_32
help
This adds the CPUFreq driver for mobile AMD K7 mobile processors.
@@ -118,7 +113,6 @@ config X86_POWERNOW_K7_ACPI
config X86_POWERNOW_K8
tristate "AMD Opteron/Athlon64 PowerNow!"
- select CPU_FREQ_TABLE
depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
help
This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
@@ -132,11 +126,10 @@ config X86_POWERNOW_K8
config X86_AMD_FREQ_SENSITIVITY
tristate "AMD frequency sensitivity feedback powersave bias"
depends on CPU_FREQ_GOV_ONDEMAND && X86_ACPI_CPUFREQ && CPU_SUP_AMD
- select CPU_FREQ_TABLE
help
This adds AMD-specific powersave bias function to the ondemand
governor, which allows it to make more power-conscious frequency
- change decisions based on feedback from hardware (availble on AMD
+ change decisions based on feedback from hardware (available on AMD
Family 16h and above).
Hardware feedback tells software how "sensitive" to frequency changes
@@ -160,7 +153,6 @@ config X86_GX_SUSPMOD
config X86_SPEEDSTEP_CENTRINO
tristate "Intel Enhanced SpeedStep (deprecated)"
- select CPU_FREQ_TABLE
select X86_SPEEDSTEP_CENTRINO_TABLE if X86_32
depends on X86_32 || (X86_64 && ACPI_PROCESSOR)
help
@@ -190,7 +182,6 @@ config X86_SPEEDSTEP_CENTRINO_TABLE
config X86_SPEEDSTEP_ICH
tristate "Intel Speedstep on ICH-M chipsets (ioport interface)"
- select CPU_FREQ_TABLE
depends on X86_32
help
This adds the CPUFreq driver for certain mobile Intel Pentium III
@@ -204,7 +195,6 @@ config X86_SPEEDSTEP_ICH
config X86_SPEEDSTEP_SMI
tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)"
- select CPU_FREQ_TABLE
depends on X86_32
help
This adds the CPUFreq driver for certain mobile Intel Pentium III
@@ -217,7 +207,6 @@ config X86_SPEEDSTEP_SMI
config X86_P4_CLOCKMOD
tristate "Intel Pentium 4 clock modulation"
- select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for Intel Pentium 4 / XEON
processors. When enabled it will lower CPU temperature by skipping
@@ -259,7 +248,6 @@ config X86_LONGRUN
config X86_LONGHAUL
tristate "VIA Cyrix III Longhaul"
- select CPU_FREQ_TABLE
depends on X86_32 && ACPI_PROCESSOR
help
This adds the CPUFreq driver for VIA Samuel/CyrixIII,
@@ -272,7 +260,6 @@ config X86_LONGHAUL
config X86_E_POWERSAVER
tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)"
- select CPU_FREQ_TABLE
depends on X86_32 && ACPI_PROCESSOR
help
This adds the CPUFreq driver for VIA C7 processors. However, this driver
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index ad5866c2ada0..74945652dd7a 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -1,5 +1,5 @@
# CPUfreq core
-obj-$(CONFIG_CPU_FREQ) += cpufreq.o
+obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o
# CPUfreq stats
obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o
@@ -11,9 +11,6 @@ obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
-# CPUfreq cross-arch helpers
-obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
-
obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
##################################################################################
@@ -77,6 +74,7 @@ obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o
obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
obj-$(CONFIG_ARM_TEGRA_CPUFREQ) += tegra-cpufreq.o
+obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
##################################################################################
# PowerPC platform drivers
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index d2c3253e015e..caf41ebea184 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -424,34 +424,21 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
}
static int acpi_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int index)
{
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
struct acpi_processor_performance *perf;
- struct cpufreq_freqs freqs;
struct drv_cmd cmd;
- unsigned int next_state = 0; /* Index into freq_table */
unsigned int next_perf_state = 0; /* Index into perf table */
int result = 0;
- pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
-
if (unlikely(data == NULL ||
data->acpi_data == NULL || data->freq_table == NULL)) {
return -ENODEV;
}
perf = data->acpi_data;
- result = cpufreq_frequency_table_target(policy,
- data->freq_table,
- target_freq,
- relation, &next_state);
- if (unlikely(result)) {
- result = -ENODEV;
- goto out;
- }
-
- next_perf_state = data->freq_table[next_state].driver_data;
+ next_perf_state = data->freq_table[index].driver_data;
if (perf->state == next_perf_state) {
if (unlikely(data->resume)) {
pr_debug("Called after resume, resetting to P%d\n",
@@ -492,23 +479,17 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
else
cmd.mask = cpumask_of(policy->cpu);
- freqs.old = perf->states[perf->state].core_frequency * 1000;
- freqs.new = data->freq_table[next_state].frequency;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
drv_write(&cmd);
if (acpi_pstate_strict) {
- if (!check_freqs(cmd.mask, freqs.new, data)) {
+ if (!check_freqs(cmd.mask, data->freq_table[index].frequency,
+ data)) {
pr_debug("acpi_cpufreq_target failed (%d)\n",
policy->cpu);
result = -EAGAIN;
- freqs.new = freqs.old;
}
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
if (!result)
perf->state = next_perf_state;
@@ -516,15 +497,6 @@ out:
return result;
}
-static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
-{
- struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
-
- pr_debug("acpi_cpufreq_verify\n");
-
- return cpufreq_frequency_table_verify(policy, data->freq_table);
-}
-
static unsigned long
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
{
@@ -837,7 +809,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
perf->state = 0;
- result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
+ result = cpufreq_table_validate_and_show(policy, data->freq_table);
if (result)
goto err_freqfree;
@@ -846,12 +818,16 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
switch (perf->control_register.space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
- /* Current speed is unknown and not detectable by IO port */
+ /*
+ * The core will not set policy->cur, because
+ * cpufreq_driver->get is NULL, so we need to set it here.
+ * However, we have to guess it, because the current speed is
+ * unknown and not detectable via IO ports.
+ */
policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
break;
case ACPI_ADR_SPACE_FIXED_HARDWARE:
acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
- policy->cur = get_cur_freq_on_cpu(cpu);
break;
default:
break;
@@ -868,8 +844,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
(u32) perf->states[i].power,
(u32) perf->states[i].transition_latency);
- cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
-
/*
* the first call to ->target() should result in us actually
* writing something to the appropriate registers.
@@ -929,8 +903,8 @@ static struct freq_attr *acpi_cpufreq_attr[] = {
};
static struct cpufreq_driver acpi_cpufreq_driver = {
- .verify = acpi_cpufreq_verify,
- .target = acpi_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = acpi_cpufreq_target,
.bios_limit = acpi_processor_get_bios_limit,
.init = acpi_cpufreq_cpu_init,
.exit = acpi_cpufreq_cpu_exit,
@@ -986,12 +960,12 @@ static int __init acpi_cpufreq_init(void)
{
int ret;
+ if (acpi_disabled)
+ return -ENODEV;
+
/* don't keep reloading if cpufreq_driver exists */
if (cpufreq_get_current_driver())
- return 0;
-
- if (acpi_disabled)
- return 0;
+ return -EEXIST;
pr_debug("acpi_cpufreq_init\n");
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index 3549f0784af1..5519933813ea 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -24,110 +24,323 @@
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/export.h>
+#include <linux/mutex.h>
#include <linux/of_platform.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/topology.h>
#include <linux/types.h>
+#include <asm/bL_switcher.h>
#include "arm_big_little.h"
/* Currently we support only two clusters */
+#define A15_CLUSTER 0
+#define A7_CLUSTER 1
#define MAX_CLUSTERS 2
+#ifdef CONFIG_BL_SWITCHER
+static bool bL_switching_enabled;
+#define is_bL_switching_enabled() bL_switching_enabled
+#define set_switching_enabled(x) (bL_switching_enabled = (x))
+#else
+#define is_bL_switching_enabled() false
+#define set_switching_enabled(x) do { } while (0)
+#endif
+
+#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
+#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
+
static struct cpufreq_arm_bL_ops *arm_bL_ops;
static struct clk *clk[MAX_CLUSTERS];
-static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
-static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)};
+static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
+static atomic_t cluster_usage[MAX_CLUSTERS + 1];
+
+static unsigned int clk_big_min; /* (Big) clock frequencies */
+static unsigned int clk_little_max; /* Maximum clock frequency (Little) */
+
+static DEFINE_PER_CPU(unsigned int, physical_cluster);
+static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
-static unsigned int bL_cpufreq_get(unsigned int cpu)
+static struct mutex cluster_lock[MAX_CLUSTERS];
+
+static inline int raw_cpu_to_cluster(int cpu)
{
- u32 cur_cluster = cpu_to_cluster(cpu);
+ return topology_physical_package_id(cpu);
+}
- return clk_get_rate(clk[cur_cluster]) / 1000;
+static inline int cpu_to_cluster(int cpu)
+{
+ return is_bL_switching_enabled() ?
+ MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
}
-/* Validate policy frequency range */
-static int bL_cpufreq_verify_policy(struct cpufreq_policy *policy)
+static unsigned int find_cluster_maxfreq(int cluster)
{
- u32 cur_cluster = cpu_to_cluster(policy->cpu);
+ int j;
+ u32 max_freq = 0, cpu_freq;
+
+ for_each_online_cpu(j) {
+ cpu_freq = per_cpu(cpu_last_req_freq, j);
+
+ if ((cluster == per_cpu(physical_cluster, j)) &&
+ (max_freq < cpu_freq))
+ max_freq = cpu_freq;
+ }
+
+ pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster,
+ max_freq);
+
+ return max_freq;
+}
+
+static unsigned int clk_get_cpu_rate(unsigned int cpu)
+{
+ u32 cur_cluster = per_cpu(physical_cluster, cpu);
+ u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
+
+ /* For switcher we use virtual A7 clock rates */
+ if (is_bL_switching_enabled())
+ rate = VIRT_FREQ(cur_cluster, rate);
+
+ pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu,
+ cur_cluster, rate);
+
+ return rate;
+}
+
+static unsigned int bL_cpufreq_get_rate(unsigned int cpu)
+{
+ if (is_bL_switching_enabled()) {
+ pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq,
+ cpu));
+
+ return per_cpu(cpu_last_req_freq, cpu);
+ } else {
+ return clk_get_cpu_rate(cpu);
+ }
+}
+
+static unsigned int
+bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
+{
+ u32 new_rate, prev_rate;
+ int ret;
+ bool bLs = is_bL_switching_enabled();
+
+ mutex_lock(&cluster_lock[new_cluster]);
- return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
+ if (bLs) {
+ prev_rate = per_cpu(cpu_last_req_freq, cpu);
+ per_cpu(cpu_last_req_freq, cpu) = rate;
+ per_cpu(physical_cluster, cpu) = new_cluster;
+
+ new_rate = find_cluster_maxfreq(new_cluster);
+ new_rate = ACTUAL_FREQ(new_cluster, new_rate);
+ } else {
+ new_rate = rate;
+ }
+
+ pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n",
+ __func__, cpu, old_cluster, new_cluster, new_rate);
+
+ ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
+ if (WARN_ON(ret)) {
+ pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret,
+ new_cluster);
+ if (bLs) {
+ per_cpu(cpu_last_req_freq, cpu) = prev_rate;
+ per_cpu(physical_cluster, cpu) = old_cluster;
+ }
+
+ mutex_unlock(&cluster_lock[new_cluster]);
+
+ return ret;
+ }
+
+ mutex_unlock(&cluster_lock[new_cluster]);
+
+ /* Recalc freq for old cluster when switching clusters */
+ if (old_cluster != new_cluster) {
+ pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n",
+ __func__, cpu, old_cluster, new_cluster);
+
+ /* Switch cluster */
+ bL_switch_request(cpu, new_cluster);
+
+ mutex_lock(&cluster_lock[old_cluster]);
+
+ /* Set freq of old cluster if there are cpus left on it */
+ new_rate = find_cluster_maxfreq(old_cluster);
+ new_rate = ACTUAL_FREQ(old_cluster, new_rate);
+
+ if (new_rate) {
+ pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n",
+ __func__, old_cluster, new_rate);
+
+ if (clk_set_rate(clk[old_cluster], new_rate * 1000))
+ pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
+ __func__, ret, old_cluster);
+ }
+ mutex_unlock(&cluster_lock[old_cluster]);
+ }
+
+ return 0;
}
/* Set clock frequency */
static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int index)
{
- struct cpufreq_freqs freqs;
- u32 cpu = policy->cpu, freq_tab_idx, cur_cluster;
- int ret = 0;
+ u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
+ unsigned int freqs_new;
+
+ cur_cluster = cpu_to_cluster(cpu);
+ new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
+
+ freqs_new = freq_table[cur_cluster][index].frequency;
+
+ if (is_bL_switching_enabled()) {
+ if ((actual_cluster == A15_CLUSTER) &&
+ (freqs_new < clk_big_min)) {
+ new_cluster = A7_CLUSTER;
+ } else if ((actual_cluster == A7_CLUSTER) &&
+ (freqs_new > clk_little_max)) {
+ new_cluster = A15_CLUSTER;
+ }
+ }
- cur_cluster = cpu_to_cluster(policy->cpu);
+ return bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new);
+}
- freqs.old = bL_cpufreq_get(policy->cpu);
+static inline u32 get_table_count(struct cpufreq_frequency_table *table)
+{
+ int count;
- /* Determine valid target frequency using freq_table */
- cpufreq_frequency_table_target(policy, freq_table[cur_cluster],
- target_freq, relation, &freq_tab_idx);
- freqs.new = freq_table[cur_cluster][freq_tab_idx].frequency;
+ for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
+ ;
- pr_debug("%s: cpu: %d, cluster: %d, oldfreq: %d, target freq: %d, new freq: %d\n",
- __func__, cpu, cur_cluster, freqs.old, target_freq,
- freqs.new);
+ return count;
+}
- if (freqs.old == freqs.new)
- return 0;
+/* get the minimum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_min(struct cpufreq_frequency_table *table)
+{
+ int i;
+ uint32_t min_freq = ~0;
+ for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++)
+ if (table[i].frequency < min_freq)
+ min_freq = table[i].frequency;
+ return min_freq;
+}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+/* get the maximum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_max(struct cpufreq_frequency_table *table)
+{
+ int i;
+ uint32_t max_freq = 0;
+ for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++)
+ if (table[i].frequency > max_freq)
+ max_freq = table[i].frequency;
+ return max_freq;
+}
- ret = clk_set_rate(clk[cur_cluster], freqs.new * 1000);
- if (ret) {
- pr_err("clk_set_rate failed: %d\n", ret);
- freqs.new = freqs.old;
+static int merge_cluster_tables(void)
+{
+ int i, j, k = 0, count = 1;
+ struct cpufreq_frequency_table *table;
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ count += get_table_count(freq_table[i]);
+
+ table = kzalloc(sizeof(*table) * count, GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ freq_table[MAX_CLUSTERS] = table;
+
+ /* Add in reverse order to get freqs in increasing order */
+ for (i = MAX_CLUSTERS - 1; i >= 0; i--) {
+ for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
+ j++) {
+ table[k].frequency = VIRT_FREQ(i,
+ freq_table[i][j].frequency);
+ pr_debug("%s: index: %d, freq: %d\n", __func__, k,
+ table[k].frequency);
+ k++;
+ }
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ table[k].driver_data = k;
+ table[k].frequency = CPUFREQ_TABLE_END;
- return ret;
+ pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k);
+
+ return 0;
+}
+
+static void _put_cluster_clk_and_freq_table(struct device *cpu_dev)
+{
+ u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
+
+ if (!freq_table[cluster])
+ return;
+
+ clk_put(clk[cluster]);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+ dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
}
static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
{
u32 cluster = cpu_to_cluster(cpu_dev->id);
+ int i;
+
+ if (atomic_dec_return(&cluster_usage[cluster]))
+ return;
+
+ if (cluster < MAX_CLUSTERS)
+ return _put_cluster_clk_and_freq_table(cpu_dev);
- if (!atomic_dec_return(&cluster_usage[cluster])) {
- clk_put(clk[cluster]);
- opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
- dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+ if (!cdev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, i);
+ return;
+ }
+
+ _put_cluster_clk_and_freq_table(cdev);
}
+
+ /* free virtual table */
+ kfree(freq_table[cluster]);
}
-static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
+static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
{
- u32 cluster = cpu_to_cluster(cpu_dev->id);
+ u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
char name[14] = "cpu-cluster.";
int ret;
- if (atomic_inc_return(&cluster_usage[cluster]) != 1)
+ if (freq_table[cluster])
return 0;
ret = arm_bL_ops->init_opp_table(cpu_dev);
if (ret) {
dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
__func__, cpu_dev->id, ret);
- goto atomic_dec;
+ goto out;
}
- ret = opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
if (ret) {
dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
__func__, cpu_dev->id, ret);
- goto atomic_dec;
+ goto out;
}
name[12] = cluster + '0';
- clk[cluster] = clk_get_sys(name, NULL);
+ clk[cluster] = clk_get(cpu_dev, name);
if (!IS_ERR(clk[cluster])) {
dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
__func__, clk[cluster], freq_table[cluster],
@@ -138,15 +351,74 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
__func__, cpu_dev->id, cluster);
ret = PTR_ERR(clk[cluster]);
- opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
-atomic_dec:
- atomic_dec(&cluster_usage[cluster]);
+out:
dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
cluster);
return ret;
}
+static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
+{
+ u32 cluster = cpu_to_cluster(cpu_dev->id);
+ int i, ret;
+
+ if (atomic_inc_return(&cluster_usage[cluster]) != 1)
+ return 0;
+
+ if (cluster < MAX_CLUSTERS) {
+ ret = _get_cluster_clk_and_freq_table(cpu_dev);
+ if (ret)
+ atomic_dec(&cluster_usage[cluster]);
+ return ret;
+ }
+
+ /*
+ * Get data for all clusters and fill virtual cluster with a merge of
+ * both
+ */
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+ if (!cdev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, i);
+ return -ENODEV;
+ }
+
+ ret = _get_cluster_clk_and_freq_table(cdev);
+ if (ret)
+ goto put_clusters;
+ }
+
+ ret = merge_cluster_tables();
+ if (ret)
+ goto put_clusters;
+
+ /* Assuming 2 cluster, set clk_big_min and clk_little_max */
+ clk_big_min = get_table_min(freq_table[0]);
+ clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1]));
+
+ pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n",
+ __func__, cluster, clk_big_min, clk_little_max);
+
+ return 0;
+
+put_clusters:
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+ if (!cdev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, i);
+ return -ENODEV;
+ }
+
+ _put_cluster_clk_and_freq_table(cdev);
+ }
+
+ atomic_dec(&cluster_usage[cluster]);
+
+ return ret;
+}
+
/* Per-CPU initialization */
static int bL_cpufreq_init(struct cpufreq_policy *policy)
{
@@ -165,7 +437,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
if (ret)
return ret;
- ret = cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
+ ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
if (ret) {
dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
policy->cpu, cur_cluster);
@@ -173,7 +445,14 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
return ret;
}
- cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
+ if (cur_cluster < MAX_CLUSTERS) {
+ cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+
+ per_cpu(physical_cluster, policy->cpu) = cur_cluster;
+ } else {
+ /* Assumption: during init, we are always running on A15 */
+ per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
+ }
if (arm_bL_ops->get_transition_latency)
policy->cpuinfo.transition_latency =
@@ -181,9 +460,8 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
else
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- policy->cur = bL_cpufreq_get(policy->cpu);
-
- cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+ if (is_bL_switching_enabled())
+ per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
return 0;
@@ -200,33 +478,60 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
return -ENODEV;
}
+ cpufreq_frequency_table_put_attr(policy->cpu);
put_cluster_clk_and_freq_table(cpu_dev);
dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
return 0;
}
-/* Export freq_table to sysfs */
-static struct freq_attr *bL_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver bL_cpufreq_driver = {
.name = "arm-big-little",
- .flags = CPUFREQ_STICKY,
- .verify = bL_cpufreq_verify_policy,
- .target = bL_cpufreq_set_target,
- .get = bL_cpufreq_get,
+ .flags = CPUFREQ_STICKY |
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = bL_cpufreq_set_target,
+ .get = bL_cpufreq_get_rate,
.init = bL_cpufreq_init,
.exit = bL_cpufreq_exit,
- .have_governor_per_policy = true,
- .attr = bL_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
+};
+
+static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
+ unsigned long action, void *_arg)
+{
+ pr_debug("%s: action: %ld\n", __func__, action);
+
+ switch (action) {
+ case BL_NOTIFY_PRE_ENABLE:
+ case BL_NOTIFY_PRE_DISABLE:
+ cpufreq_unregister_driver(&bL_cpufreq_driver);
+ break;
+
+ case BL_NOTIFY_POST_ENABLE:
+ set_switching_enabled(true);
+ cpufreq_register_driver(&bL_cpufreq_driver);
+ break;
+
+ case BL_NOTIFY_POST_DISABLE:
+ set_switching_enabled(false);
+ cpufreq_register_driver(&bL_cpufreq_driver);
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block bL_switcher_notifier = {
+ .notifier_call = bL_cpufreq_switcher_notifier,
};
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
{
- int ret;
+ int ret, i;
if (arm_bL_ops) {
pr_debug("%s: Already registered: %s, exiting\n", __func__,
@@ -241,16 +546,29 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
arm_bL_ops = ops;
+ ret = bL_switcher_get_enabled();
+ set_switching_enabled(ret);
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ mutex_init(&cluster_lock[i]);
+
ret = cpufreq_register_driver(&bL_cpufreq_driver);
if (ret) {
pr_info("%s: Failed registering platform driver: %s, err: %d\n",
__func__, ops->name, ret);
arm_bL_ops = NULL;
} else {
- pr_info("%s: Registered platform driver: %s\n", __func__,
- ops->name);
+ ret = bL_switcher_register_notifier(&bL_switcher_notifier);
+ if (ret) {
+ cpufreq_unregister_driver(&bL_cpufreq_driver);
+ arm_bL_ops = NULL;
+ } else {
+ pr_info("%s: Registered platform driver: %s\n",
+ __func__, ops->name);
+ }
}
+ bL_switcher_put_enabled();
return ret;
}
EXPORT_SYMBOL_GPL(bL_cpufreq_register);
@@ -263,7 +581,10 @@ void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
return;
}
+ bL_switcher_get_enabled();
+ bL_switcher_unregister_notifier(&bL_switcher_notifier);
cpufreq_unregister_driver(&bL_cpufreq_driver);
+ bL_switcher_put_enabled();
pr_info("%s: Un-registered platform driver: %s\n", __func__,
arm_bL_ops->name);
arm_bL_ops = NULL;
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
index 79b2ce17884d..70f18fc12d4a 100644
--- a/drivers/cpufreq/arm_big_little.h
+++ b/drivers/cpufreq/arm_big_little.h
@@ -34,11 +34,6 @@ struct cpufreq_arm_bL_ops {
int (*init_opp_table)(struct device *cpu_dev);
};
-static inline int cpu_to_cluster(int cpu)
-{
- return topology_physical_package_id(cpu);
-}
-
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
index 480c0bd0468d..8d9d59108906 100644
--- a/drivers/cpufreq/arm_big_little_dt.c
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -24,7 +24,7 @@
#include <linux/export.h>
#include <linux/module.h>
#include <linux/of_device.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c
index e0c38d938997..856ad80418ae 100644
--- a/drivers/cpufreq/at32ap-cpufreq.c
+++ b/drivers/cpufreq/at32ap-cpufreq.c
@@ -19,18 +19,10 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/export.h>
+#include <linux/slab.h>
static struct clk *cpuclk;
-
-static int at32_verify_speed(struct cpufreq_policy *policy)
-{
- if (policy->cpu != 0)
- return -EINVAL;
-
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
- return 0;
-}
+static struct cpufreq_frequency_table *freq_table;
static unsigned int at32_get_speed(unsigned int cpu)
{
@@ -43,74 +35,94 @@ static unsigned int at32_get_speed(unsigned int cpu)
static unsigned int ref_freq;
static unsigned long loops_per_jiffy_ref;
-static int at32_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
{
- struct cpufreq_freqs freqs;
- long freq;
-
- /* Convert target_freq from kHz to Hz */
- freq = clk_round_rate(cpuclk, target_freq * 1000);
-
- /* Check if policy->min <= new_freq <= policy->max */
- if(freq < (policy->min * 1000) || freq > (policy->max * 1000))
- return -EINVAL;
-
- pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
+ unsigned int old_freq, new_freq;
- freqs.old = at32_get_speed(0);
- freqs.new = (freq + 500) / 1000;
- freqs.flags = 0;
+ old_freq = at32_get_speed(0);
+ new_freq = freq_table[index].frequency;
if (!ref_freq) {
- ref_freq = freqs.old;
+ ref_freq = old_freq;
loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- if (freqs.old < freqs.new)
+ if (old_freq < new_freq)
boot_cpu_data.loops_per_jiffy = cpufreq_scale(
- loops_per_jiffy_ref, ref_freq, freqs.new);
- clk_set_rate(cpuclk, freq);
- if (freqs.new < freqs.old)
+ loops_per_jiffy_ref, ref_freq, new_freq);
+ clk_set_rate(cpuclk, new_freq * 1000);
+ if (new_freq < old_freq)
boot_cpu_data.loops_per_jiffy = cpufreq_scale(
- loops_per_jiffy_ref, ref_freq, freqs.new);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- pr_debug("cpufreq: set frequency %lu Hz\n", freq);
+ loops_per_jiffy_ref, ref_freq, new_freq);
return 0;
}
static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy)
{
+ unsigned int frequency, rate, min_freq;
+ int retval, steps, i;
+
if (policy->cpu != 0)
return -EINVAL;
cpuclk = clk_get(NULL, "cpu");
if (IS_ERR(cpuclk)) {
pr_debug("cpufreq: could not get CPU clk\n");
- return PTR_ERR(cpuclk);
+ retval = PTR_ERR(cpuclk);
+ goto out_err;
}
- policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
- policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
+ min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
+ frequency = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
policy->cpuinfo.transition_latency = 0;
- policy->cur = at32_get_speed(0);
- policy->min = policy->cpuinfo.min_freq;
- policy->max = policy->cpuinfo.max_freq;
- printk("cpufreq: AT32AP CPU frequency driver\n");
+ /*
+ * AVR32 CPU frequency rate scales in power of two between maximum and
+ * minimum, also add space for the table end marker.
+ *
+ * Further validate that the frequency is usable, and append it to the
+ * frequency table.
+ */
+ steps = fls(frequency / min_freq) + 1;
+ freq_table = kzalloc(steps * sizeof(struct cpufreq_frequency_table),
+ GFP_KERNEL);
+ if (!freq_table) {
+ retval = -ENOMEM;
+ goto out_err_put_clk;
+ }
- return 0;
+ for (i = 0; i < (steps - 1); i++) {
+ rate = clk_round_rate(cpuclk, frequency * 1000) / 1000;
+
+ if (rate != frequency)
+ freq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ else
+ freq_table[i].frequency = frequency;
+
+ frequency /= 2;
+ }
+
+ freq_table[steps - 1].frequency = CPUFREQ_TABLE_END;
+
+ retval = cpufreq_table_validate_and_show(policy, freq_table);
+ if (!retval) {
+ printk("cpufreq: AT32AP CPU frequency driver\n");
+ return 0;
+ }
+
+ kfree(freq_table);
+out_err_put_clk:
+ clk_put(cpuclk);
+out_err:
+ return retval;
}
static struct cpufreq_driver at32_driver = {
.name = "at32ap",
.init = at32_cpufreq_driver_init,
- .verify = at32_verify_speed,
- .target = at32_set_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = at32_set_target,
.get = at32_get_speed,
.flags = CPUFREQ_STICKY,
};
diff --git a/drivers/cpufreq/blackfin-cpufreq.c b/drivers/cpufreq/blackfin-cpufreq.c
index ef05978a7237..e9e63fc9c2c9 100644
--- a/drivers/cpufreq/blackfin-cpufreq.c
+++ b/drivers/cpufreq/blackfin-cpufreq.c
@@ -127,41 +127,28 @@ unsigned long cpu_set_cclk(int cpu, unsigned long new)
}
#endif
-static int bfin_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+static int bfin_target(struct cpufreq_policy *policy, unsigned int index)
{
#ifndef CONFIG_BF60x
unsigned int plldiv;
#endif
- unsigned int index;
- unsigned long cclk_hz;
- struct cpufreq_freqs freqs;
static unsigned long lpj_ref;
static unsigned int lpj_ref_freq;
+ unsigned int old_freq, new_freq;
int ret = 0;
#if defined(CONFIG_CYCLES_CLOCKSOURCE)
cycles_t cycles;
#endif
- if (cpufreq_frequency_table_target(policy, bfin_freq_table, target_freq,
- relation, &index))
- return -EINVAL;
+ old_freq = bfin_getfreq_khz(0);
+ new_freq = bfin_freq_table[index].frequency;
- cclk_hz = bfin_freq_table[index].frequency;
-
- freqs.old = bfin_getfreq_khz(0);
- freqs.new = cclk_hz;
-
- pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
- cclk_hz, target_freq, freqs.old);
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
#ifndef CONFIG_BF60x
plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel;
bfin_write_PLL_DIV(plldiv);
#else
- ret = cpu_set_cclk(policy->cpu, freqs.new * 1000);
+ ret = cpu_set_cclk(policy->cpu, new_freq * 1000);
if (ret != 0) {
WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret);
return ret;
@@ -177,25 +164,16 @@ static int bfin_target(struct cpufreq_policy *policy,
#endif
if (!lpj_ref_freq) {
lpj_ref = loops_per_jiffy;
- lpj_ref_freq = freqs.old;
+ lpj_ref_freq = old_freq;
}
- if (freqs.new != freqs.old) {
+ if (new_freq != old_freq) {
loops_per_jiffy = cpufreq_scale(lpj_ref,
- lpj_ref_freq, freqs.new);
+ lpj_ref_freq, new_freq);
}
- /* TODO: just test case for cycles clock source, remove later */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- pr_debug("cpufreq: done\n");
return ret;
}
-static int bfin_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, bfin_freq_table);
-}
-
static int __bfin_cpu_init(struct cpufreq_policy *policy)
{
@@ -209,23 +187,17 @@ static int __bfin_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
- policy->cur = cclk;
- cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
- return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table);
+ return cpufreq_table_validate_and_show(policy, bfin_freq_table);
}
-static struct freq_attr *bfin_freq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver bfin_driver = {
- .verify = bfin_verify_speed,
- .target = bfin_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = bfin_target,
.get = bfin_getfreq_khz,
.init = __bfin_cpu_init,
+ .exit = cpufreq_generic_exit,
.name = "bfin cpufreq",
- .attr = bfin_freq_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init bfin_cpu_init(void)
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index c522a95c0e16..d4585ce2346c 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -17,7 +17,7 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -30,73 +30,51 @@ static struct clk *cpu_clk;
static struct regulator *cpu_reg;
static struct cpufreq_frequency_table *freq_table;
-static int cpu0_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
static unsigned int cpu0_get_speed(unsigned int cpu)
{
return clk_get_rate(cpu_clk) / 1000;
}
-static int cpu0_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
{
- struct cpufreq_freqs freqs;
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long volt = 0, volt_old = 0, tol = 0;
+ unsigned int old_freq, new_freq;
long freq_Hz, freq_exact;
- unsigned int index;
int ret;
- ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
- relation, &index);
- if (ret) {
- pr_err("failed to match target freqency %d: %d\n",
- target_freq, ret);
- return ret;
- }
-
freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
if (freq_Hz < 0)
freq_Hz = freq_table[index].frequency * 1000;
- freq_exact = freq_Hz;
- freqs.new = freq_Hz / 1000;
- freqs.old = clk_get_rate(cpu_clk) / 1000;
- if (freqs.old == freqs.new)
- return 0;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ freq_exact = freq_Hz;
+ new_freq = freq_Hz / 1000;
+ old_freq = clk_get_rate(cpu_clk) / 1000;
if (!IS_ERR(cpu_reg)) {
rcu_read_lock();
- opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
if (IS_ERR(opp)) {
rcu_read_unlock();
pr_err("failed to find OPP for %ld\n", freq_Hz);
- freqs.new = freqs.old;
- ret = PTR_ERR(opp);
- goto post_notify;
+ return PTR_ERR(opp);
}
- volt = opp_get_voltage(opp);
+ volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
tol = volt * voltage_tolerance / 100;
volt_old = regulator_get_voltage(cpu_reg);
}
pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n",
- freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
- freqs.new / 1000, volt ? volt / 1000 : -1);
+ old_freq / 1000, volt_old ? volt_old / 1000 : -1,
+ new_freq / 1000, volt ? volt / 1000 : -1);
/* scaling up? scale voltage before frequency */
- if (!IS_ERR(cpu_reg) && freqs.new > freqs.old) {
+ if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
if (ret) {
pr_err("failed to scale voltage up: %d\n", ret);
- freqs.new = freqs.old;
- goto post_notify;
+ return ret;
}
}
@@ -105,72 +83,35 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
pr_err("failed to set clock rate: %d\n", ret);
if (!IS_ERR(cpu_reg))
regulator_set_voltage_tol(cpu_reg, volt_old, tol);
- freqs.new = freqs.old;
- goto post_notify;
+ return ret;
}
/* scaling down? scale voltage after frequency */
- if (!IS_ERR(cpu_reg) && freqs.new < freqs.old) {
+ if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
if (ret) {
pr_err("failed to scale voltage down: %d\n", ret);
- clk_set_rate(cpu_clk, freqs.old * 1000);
- freqs.new = freqs.old;
+ clk_set_rate(cpu_clk, old_freq * 1000);
}
}
-post_notify:
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
return ret;
}
static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
{
- int ret;
-
- ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (ret) {
- pr_err("invalid frequency table: %d\n", ret);
- return ret;
- }
-
- policy->cpuinfo.transition_latency = transition_latency;
- policy->cur = clk_get_rate(cpu_clk) / 1000;
-
- /*
- * The driver only supports the SMP configuartion where all processors
- * share the clock and voltage and clock. Use cpufreq affected_cpus
- * interface to have all CPUs scaled together.
- */
- cpumask_setall(policy->cpus);
-
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
- return 0;
+ return cpufreq_generic_init(policy, freq_table, transition_latency);
}
-static int cpu0_cpufreq_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
-
- return 0;
-}
-
-static struct freq_attr *cpu0_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver cpu0_cpufreq_driver = {
.flags = CPUFREQ_STICKY,
- .verify = cpu0_verify_speed,
- .target = cpu0_set_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = cpu0_set_target,
.get = cpu0_get_speed,
.init = cpu0_cpufreq_init,
- .exit = cpu0_cpufreq_exit,
+ .exit = cpufreq_generic_exit,
.name = "generic_cpu0",
- .attr = cpu0_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int cpu0_cpufreq_probe(struct platform_device *pdev)
@@ -218,7 +159,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
goto out_put_node;
}
- ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
pr_err("failed to init cpufreq table: %d\n", ret);
goto out_put_node;
@@ -230,7 +171,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
transition_latency = CPUFREQ_ETERNAL;
if (!IS_ERR(cpu_reg)) {
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long min_uV, max_uV;
int i;
@@ -242,12 +183,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
;
rcu_read_lock();
- opp = opp_find_freq_exact(cpu_dev,
+ opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[0].frequency * 1000, true);
- min_uV = opp_get_voltage(opp);
- opp = opp_find_freq_exact(cpu_dev,
+ min_uV = dev_pm_opp_get_voltage(opp);
+ opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[i-1].frequency * 1000, true);
- max_uV = opp_get_voltage(opp);
+ max_uV = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
if (ret > 0)
@@ -264,7 +205,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
return 0;
out_free_table:
- opp_free_cpufreq_table(cpu_dev, &freq_table);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
out_put_node:
of_node_put(np);
return ret;
@@ -273,7 +214,7 @@ out_put_node:
static int cpu0_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&cpu0_cpufreq_driver);
- opp_free_cpufreq_table(cpu_dev, &freq_table);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
return 0;
}
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index b83d45f68574..a05b876f375e 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -303,9 +303,7 @@ static int nforce2_verify(struct cpufreq_policy *policy)
if (policy->min < (fsb_pol_max * fid * 100))
policy->max = (fsb_pol_max + 1) * fid * 100;
- cpufreq_verify_within_limits(policy,
- policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
return 0;
}
@@ -362,7 +360,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100;
policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- policy->cur = nforce2_get(policy->cpu);
return 0;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 04548f7023af..02d534da22dd 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -47,49 +47,11 @@ static LIST_HEAD(cpufreq_policy_list);
static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
#endif
-/*
- * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
- * all cpufreq/hotplug/workqueue/etc related lock issues.
- *
- * The rules for this semaphore:
- * - Any routine that wants to read from the policy structure will
- * do a down_read on this semaphore.
- * - Any routine that will write to the policy structure and/or may take away
- * the policy altogether (eg. CPU hotplug), will hold this lock in write
- * mode before doing so.
- *
- * Additional rules:
- * - Governor routines that can be called in cpufreq hotplug path should not
- * take this sem as top level hotplug notifier handler takes this.
- * - Lock should not be held across
- * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
- */
-static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
-
-#define lock_policy_rwsem(mode, cpu) \
-static int lock_policy_rwsem_##mode(int cpu) \
-{ \
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
- BUG_ON(!policy); \
- down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
- \
- return 0; \
-}
-
-lock_policy_rwsem(read, cpu);
-lock_policy_rwsem(write, cpu);
-
-#define unlock_policy_rwsem(mode, cpu) \
-static void unlock_policy_rwsem_##mode(int cpu) \
-{ \
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
- BUG_ON(!policy); \
- up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
+static inline bool has_target(void)
+{
+ return cpufreq_driver->target_index || cpufreq_driver->target;
}
-unlock_policy_rwsem(read, cpu);
-unlock_policy_rwsem(write, cpu);
-
/*
* rwsem to guarantee that cpufreq driver module doesn't unload during critical
* sections
@@ -135,7 +97,7 @@ static DEFINE_MUTEX(cpufreq_governor_mutex);
bool have_governor_per_policy(void)
{
- return cpufreq_driver->have_governor_per_policy;
+ return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
}
EXPORT_SYMBOL_GPL(have_governor_per_policy);
@@ -183,6 +145,37 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
}
EXPORT_SYMBOL_GPL(get_cpu_idle_time);
+/*
+ * This is a generic cpufreq init() routine which can be used by cpufreq
+ * drivers of SMP systems. It will do following:
+ * - validate & show freq table passed
+ * - set policies transition latency
+ * - policy->cpus with all possible CPUs
+ */
+int cpufreq_generic_init(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table,
+ unsigned int transition_latency)
+{
+ int ret;
+
+ ret = cpufreq_table_validate_and_show(policy, table);
+ if (ret) {
+ pr_err("%s: invalid frequency table: %d\n", __func__, ret);
+ return ret;
+ }
+
+ policy->cpuinfo.transition_latency = transition_latency;
+
+ /*
+ * The driver only supports the SMP configuartion where all processors
+ * share the clock and voltage and clock.
+ */
+ cpumask_setall(policy->cpus);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_generic_init);
+
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
struct cpufreq_policy *policy = NULL;
@@ -363,7 +356,7 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
*policy = CPUFREQ_POLICY_POWERSAVE;
err = 0;
}
- } else if (cpufreq_driver->target) {
+ } else if (has_target()) {
struct cpufreq_governor *t;
mutex_lock(&cpufreq_governor_mutex);
@@ -414,7 +407,7 @@ show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
show_one(scaling_cur_freq, cur);
-static int __cpufreq_set_policy(struct cpufreq_policy *policy,
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy);
/**
@@ -435,7 +428,7 @@ static ssize_t store_##file_name \
if (ret != 1) \
return -EINVAL; \
\
- ret = __cpufreq_set_policy(policy, &new_policy); \
+ ret = cpufreq_set_policy(policy, &new_policy); \
policy->user_policy.object = policy->object; \
\
return ret ? ret : count; \
@@ -493,11 +486,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
&new_policy.governor))
return -EINVAL;
- /*
- * Do not use cpufreq_set_policy here or the user_policy.max
- * will be wrongly overridden
- */
- ret = __cpufreq_set_policy(policy, &new_policy);
+ ret = cpufreq_set_policy(policy, &new_policy);
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
@@ -525,7 +514,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
ssize_t i = 0;
struct cpufreq_governor *t;
- if (!cpufreq_driver->target) {
+ if (!has_target()) {
i += sprintf(buf, "performance powersave");
goto out;
}
@@ -653,24 +642,21 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
- ssize_t ret = -EINVAL;
+ ssize_t ret;
if (!down_read_trylock(&cpufreq_rwsem))
- goto exit;
+ return -EINVAL;
- if (lock_policy_rwsem_read(policy->cpu) < 0)
- goto up_read;
+ down_read(&policy->rwsem);
if (fattr->show)
ret = fattr->show(policy, buf);
else
ret = -EIO;
- unlock_policy_rwsem_read(policy->cpu);
-
-up_read:
+ up_read(&policy->rwsem);
up_read(&cpufreq_rwsem);
-exit:
+
return ret;
}
@@ -689,17 +675,15 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
if (!down_read_trylock(&cpufreq_rwsem))
goto unlock;
- if (lock_policy_rwsem_write(policy->cpu) < 0)
- goto up_read;
+ down_write(&policy->rwsem);
if (fattr->store)
ret = fattr->store(policy, buf, count);
else
ret = -EIO;
- unlock_policy_rwsem_write(policy->cpu);
+ up_write(&policy->rwsem);
-up_read:
up_read(&cpufreq_rwsem);
unlock:
put_online_cpus();
@@ -815,7 +799,7 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
if (ret)
goto err_out_kobj_put;
}
- if (cpufreq_driver->target) {
+ if (has_target()) {
ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
if (ret)
goto err_out_kobj_put;
@@ -844,11 +828,11 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
int ret = 0;
memcpy(&new_policy, policy, sizeof(*policy));
- /* assure that the starting sequence is run in __cpufreq_set_policy */
+ /* assure that the starting sequence is run in cpufreq_set_policy */
policy->governor = NULL;
/* set default policy */
- ret = __cpufreq_set_policy(policy, &new_policy);
+ ret = cpufreq_set_policy(policy, &new_policy);
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
@@ -864,10 +848,10 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
unsigned int cpu, struct device *dev,
bool frozen)
{
- int ret = 0, has_target = !!cpufreq_driver->target;
+ int ret = 0;
unsigned long flags;
- if (has_target) {
+ if (has_target()) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret) {
pr_err("%s: Failed to stop governor\n", __func__);
@@ -875,7 +859,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
}
}
- lock_policy_rwsem_write(policy->cpu);
+ down_write(&policy->rwsem);
write_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -883,9 +867,9 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
per_cpu(cpufreq_cpu_data, cpu) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- unlock_policy_rwsem_write(policy->cpu);
+ up_write(&policy->rwsem);
- if (has_target) {
+ if (has_target()) {
if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
(ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
pr_err("%s: Failed to start governor\n", __func__);
@@ -930,6 +914,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
goto err_free_cpumask;
INIT_LIST_HEAD(&policy->policy_list);
+ init_rwsem(&policy->rwsem);
+
return policy;
err_free_cpumask:
@@ -949,26 +935,17 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
{
- if (cpu == policy->cpu)
+ if (WARN_ON(cpu == policy->cpu))
return;
- /*
- * Take direct locks as lock_policy_rwsem_write wouldn't work here.
- * Also lock for last cpu is enough here as contention will happen only
- * after policy->cpu is changed and after it is changed, other threads
- * will try to acquire lock for new cpu. And policy is already updated
- * by then.
- */
- down_write(&per_cpu(cpu_policy_rwsem, policy->cpu));
+ down_write(&policy->rwsem);
policy->last_cpu = policy->cpu;
policy->cpu = cpu;
- up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu));
+ up_write(&policy->rwsem);
-#ifdef CONFIG_CPU_FREQ_TABLE
cpufreq_frequency_table_update_policy_cpu(policy);
-#endif
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_UPDATE_POLICY_CPU, policy);
}
@@ -1053,6 +1030,14 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
goto err_set_policy_cpu;
}
+ if (cpufreq_driver->get) {
+ policy->cur = cpufreq_driver->get(policy->cpu);
+ if (!policy->cur) {
+ pr_err("%s: ->get() failed\n", __func__);
+ goto err_get_freq;
+ }
+ }
+
/* related cpus should atleast have policy->cpus */
cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
@@ -1107,6 +1092,9 @@ err_out_unregister:
per_cpu(cpufreq_cpu_data, j) = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+err_get_freq:
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
err_set_policy_cpu:
cpufreq_policy_free(policy);
nomem_out:
@@ -1147,9 +1135,9 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
if (ret) {
pr_err("%s: Failed to move kobj: %d", __func__, ret);
- WARN_ON(lock_policy_rwsem_write(old_cpu));
+ down_write(&policy->rwsem);
cpumask_set_cpu(old_cpu, policy->cpus);
- unlock_policy_rwsem_write(old_cpu);
+ up_write(&policy->rwsem);
ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
"cpufreq");
@@ -1186,7 +1174,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
return -EINVAL;
}
- if (cpufreq_driver->target) {
+ if (has_target()) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret) {
pr_err("%s: Failed to stop governor\n", __func__);
@@ -1200,22 +1188,21 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
policy->governor->name, CPUFREQ_NAME_LEN);
#endif
- lock_policy_rwsem_read(cpu);
+ down_read(&policy->rwsem);
cpus = cpumask_weight(policy->cpus);
- unlock_policy_rwsem_read(cpu);
+ up_read(&policy->rwsem);
if (cpu != policy->cpu) {
if (!frozen)
sysfs_remove_link(&dev->kobj, "cpufreq");
} else if (cpus > 1) {
-
new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
if (new_cpu >= 0) {
update_policy_cpu(policy, new_cpu);
if (!frozen) {
- pr_debug("%s: policy Kobject moved to cpu: %d "
- "from: %d\n",__func__, new_cpu, cpu);
+ pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
+ __func__, new_cpu, cpu);
}
}
}
@@ -1243,16 +1230,16 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
return -EINVAL;
}
- WARN_ON(lock_policy_rwsem_write(cpu));
+ down_write(&policy->rwsem);
cpus = cpumask_weight(policy->cpus);
if (cpus > 1)
cpumask_clear_cpu(cpu, policy->cpus);
- unlock_policy_rwsem_write(cpu);
+ up_write(&policy->rwsem);
/* If cpu is last user of policy, free policy */
if (cpus == 1) {
- if (cpufreq_driver->target) {
+ if (has_target()) {
ret = __cpufreq_governor(policy,
CPUFREQ_GOV_POLICY_EXIT);
if (ret) {
@@ -1263,10 +1250,10 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
}
if (!frozen) {
- lock_policy_rwsem_read(cpu);
+ down_read(&policy->rwsem);
kobj = &policy->kobj;
cmp = &policy->kobj_unregister;
- unlock_policy_rwsem_read(cpu);
+ up_read(&policy->rwsem);
kobject_put(kobj);
/*
@@ -1295,7 +1282,7 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
if (!frozen)
cpufreq_policy_free(policy);
} else {
- if (cpufreq_driver->target) {
+ if (has_target()) {
if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
(ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
pr_err("%s: Failed to start governor\n",
@@ -1310,36 +1297,24 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
}
/**
- * __cpufreq_remove_dev - remove a CPU device
+ * cpufreq_remove_dev - remove a CPU device
*
* Removes the cpufreq interface for a CPU device.
- * Caller should already have policy_rwsem in write mode for this CPU.
- * This routine frees the rwsem before returning.
*/
-static inline int __cpufreq_remove_dev(struct device *dev,
- struct subsys_interface *sif,
- bool frozen)
-{
- int ret;
-
- ret = __cpufreq_remove_dev_prepare(dev, sif, frozen);
-
- if (!ret)
- ret = __cpufreq_remove_dev_finish(dev, sif, frozen);
-
- return ret;
-}
-
static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
unsigned int cpu = dev->id;
- int retval;
+ int ret;
if (cpu_is_offline(cpu))
return 0;
- retval = __cpufreq_remove_dev(dev, sif, false);
- return retval;
+ ret = __cpufreq_remove_dev_prepare(dev, sif, false);
+
+ if (!ret)
+ ret = __cpufreq_remove_dev_finish(dev, sif, false);
+
+ return ret;
}
static void handle_update(struct work_struct *work)
@@ -1458,22 +1433,22 @@ static unsigned int __cpufreq_get(unsigned int cpu)
*/
unsigned int cpufreq_get(unsigned int cpu)
{
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
unsigned int ret_freq = 0;
if (cpufreq_disabled() || !cpufreq_driver)
return -ENOENT;
+ BUG_ON(!policy);
+
if (!down_read_trylock(&cpufreq_rwsem))
return 0;
- if (unlikely(lock_policy_rwsem_read(cpu)))
- goto out_policy;
+ down_read(&policy->rwsem);
ret_freq = __cpufreq_get(cpu);
- unlock_policy_rwsem_read(cpu);
-
-out_policy:
+ up_read(&policy->rwsem);
up_read(&cpufreq_rwsem);
return ret_freq;
@@ -1681,12 +1656,75 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
policy->cpu, target_freq, relation, old_target_freq);
+ /*
+ * This might look like a redundant call as we are checking it again
+ * after finding index. But it is left intentionally for cases where
+ * exactly same freq is called again and so we can save on few function
+ * calls.
+ */
if (target_freq == policy->cur)
return 0;
if (cpufreq_driver->target)
retval = cpufreq_driver->target(policy, target_freq, relation);
+ else if (cpufreq_driver->target_index) {
+ struct cpufreq_frequency_table *freq_table;
+ struct cpufreq_freqs freqs;
+ bool notify;
+ int index;
+
+ freq_table = cpufreq_frequency_get_table(policy->cpu);
+ if (unlikely(!freq_table)) {
+ pr_err("%s: Unable to find freq_table\n", __func__);
+ goto out;
+ }
+
+ retval = cpufreq_frequency_table_target(policy, freq_table,
+ target_freq, relation, &index);
+ if (unlikely(retval)) {
+ pr_err("%s: Unable to find matching freq\n", __func__);
+ goto out;
+ }
+
+ if (freq_table[index].frequency == policy->cur) {
+ retval = 0;
+ goto out;
+ }
+
+ notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
+
+ if (notify) {
+ freqs.old = policy->cur;
+ freqs.new = freq_table[index].frequency;
+ freqs.flags = 0;
+
+ pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
+ __func__, policy->cpu, freqs.old,
+ freqs.new);
+
+ cpufreq_notify_transition(policy, &freqs,
+ CPUFREQ_PRECHANGE);
+ }
+
+ retval = cpufreq_driver->target_index(policy, index);
+ if (retval)
+ pr_err("%s: Failed to change cpu frequency: %d\n",
+ __func__, retval);
+
+ if (notify) {
+ /*
+ * Notify with old freq in case we failed to change
+ * frequency
+ */
+ if (retval)
+ freqs.new = freqs.old;
+
+ cpufreq_notify_transition(policy, &freqs,
+ CPUFREQ_POSTCHANGE);
+ }
+ }
+out:
return retval;
}
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
@@ -1697,14 +1735,12 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
{
int ret = -EINVAL;
- if (unlikely(lock_policy_rwsem_write(policy->cpu)))
- goto fail;
+ down_write(&policy->rwsem);
ret = __cpufreq_driver_target(policy, target_freq, relation);
- unlock_policy_rwsem_write(policy->cpu);
+ up_write(&policy->rwsem);
-fail:
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
@@ -1871,10 +1907,10 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
EXPORT_SYMBOL(cpufreq_get_policy);
/*
- * data : current policy.
- * policy : policy to be set.
+ * policy : current policy.
+ * new_policy: policy to be set.
*/
-static int __cpufreq_set_policy(struct cpufreq_policy *policy,
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy)
{
int ret = 0, failed = 1;
@@ -1934,10 +1970,10 @@ static int __cpufreq_set_policy(struct cpufreq_policy *policy,
/* end old governor */
if (policy->governor) {
__cpufreq_governor(policy, CPUFREQ_GOV_STOP);
- unlock_policy_rwsem_write(new_policy->cpu);
+ up_write(&policy->rwsem);
__cpufreq_governor(policy,
CPUFREQ_GOV_POLICY_EXIT);
- lock_policy_rwsem_write(new_policy->cpu);
+ down_write(&policy->rwsem);
}
/* start new governor */
@@ -1946,10 +1982,10 @@ static int __cpufreq_set_policy(struct cpufreq_policy *policy,
if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
failed = 0;
} else {
- unlock_policy_rwsem_write(new_policy->cpu);
+ up_write(&policy->rwsem);
__cpufreq_governor(policy,
CPUFREQ_GOV_POLICY_EXIT);
- lock_policy_rwsem_write(new_policy->cpu);
+ down_write(&policy->rwsem);
}
}
@@ -1995,10 +2031,7 @@ int cpufreq_update_policy(unsigned int cpu)
goto no_policy;
}
- if (unlikely(lock_policy_rwsem_write(cpu))) {
- ret = -EINVAL;
- goto fail;
- }
+ down_write(&policy->rwsem);
pr_debug("updating policy for CPU %u\n", cpu);
memcpy(&new_policy, policy, sizeof(*policy));
@@ -2017,17 +2050,16 @@ int cpufreq_update_policy(unsigned int cpu)
pr_debug("Driver did not initialize current freq");
policy->cur = new_policy.cur;
} else {
- if (policy->cur != new_policy.cur && cpufreq_driver->target)
+ if (policy->cur != new_policy.cur && has_target())
cpufreq_out_of_sync(cpu, policy->cur,
new_policy.cur);
}
}
- ret = __cpufreq_set_policy(policy, &new_policy);
+ ret = cpufreq_set_policy(policy, &new_policy);
- unlock_policy_rwsem_write(cpu);
+ up_write(&policy->rwsem);
-fail:
cpufreq_cpu_put(policy);
no_policy:
return ret;
@@ -2096,7 +2128,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
return -ENODEV;
if (!driver_data || !driver_data->verify || !driver_data->init ||
- ((!driver_data->setpolicy) && (!driver_data->target)))
+ !(driver_data->setpolicy || driver_data->target_index ||
+ driver_data->target))
return -EINVAL;
pr_debug("trying to register driver %s\n", driver_data->name);
@@ -2183,14 +2216,9 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
static int __init cpufreq_core_init(void)
{
- int cpu;
-
if (cpufreq_disabled())
return -ENODEV;
- for_each_possible_cpu(cpu)
- init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
-
cpufreq_global_kobject = kobject_create();
BUG_ON(!cpufreq_global_kobject);
register_syscore_ops(&cpufreq_syscore_ops);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 88cd39f7b0e9..b5f2b8618949 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -191,7 +191,10 @@ struct common_dbs_data {
struct attribute_group *attr_group_gov_sys; /* one governor - system */
struct attribute_group *attr_group_gov_pol; /* one governor - policy */
- /* Common data for platforms that don't set have_governor_per_policy */
+ /*
+ * Common data for platforms that don't set
+ * CPUFREQ_HAVE_GOVERNOR_PER_POLICY
+ */
struct dbs_data *gdbs_data;
struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 32f26f6e17c5..18d409189092 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -168,7 +168,6 @@ static void od_check_cpu(int cpu, unsigned int load)
dbs_info->rate_mult =
od_tuners->sampling_down_factor;
dbs_freq_increase(policy, policy->max);
- return;
} else {
/* Calculate the next frequency proportional to load */
unsigned int freq_next;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 03078090b5f7..4dbf1db16aca 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -38,18 +38,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
if (!per_cpu(cpu_is_managed, policy->cpu))
goto err;
- /*
- * We're safe from concurrent calls to ->target() here
- * as we hold the userspace_mutex lock. If we were calling
- * cpufreq_driver_target, a deadlock situation might occur:
- * A: cpufreq_set (lock userspace_mutex) ->
- * cpufreq_driver_target(lock policy->lock)
- * B: cpufreq_set_policy(lock policy->lock) ->
- * __cpufreq_governor ->
- * cpufreq_governor_userspace (lock userspace_mutex)
- */
ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
-
err:
mutex_unlock(&userspace_mutex);
return ret;
diff --git a/drivers/cpufreq/cris-artpec3-cpufreq.c b/drivers/cpufreq/cris-artpec3-cpufreq.c
index cb8276dd19ca..86559040c54c 100644
--- a/drivers/cpufreq/cris-artpec3-cpufreq.c
+++ b/drivers/cpufreq/cris-artpec3-cpufreq.c
@@ -27,18 +27,11 @@ static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
return clk_ctrl.pll ? 200000 : 6000;
}
-static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
- unsigned int state)
+static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state)
{
- struct cpufreq_freqs freqs;
reg_clkgen_rw_clk_ctrl clk_ctrl;
clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
- freqs.old = cris_freq_get_cpu_frequency(policy->cpu);
- freqs.new = cris_freq_table[state].frequency;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
local_irq_disable();
/* Even though we may be SMP they will share the same clock
@@ -51,67 +44,22 @@ static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
local_irq_enable();
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-};
-
-static int cris_freq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
-}
-
-static int cris_freq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int newstate = 0;
-
- if (cpufreq_frequency_table_target(policy, cris_freq_table,
- target_freq, relation, &newstate))
- return -EINVAL;
-
- cris_freq_set_cpu_state(policy, newstate);
-
return 0;
}
static int cris_freq_cpu_init(struct cpufreq_policy *policy)
{
- int result;
-
- /* cpuinfo and default policy values */
- policy->cpuinfo.transition_latency = 1000000; /* 1ms */
- policy->cur = cris_freq_get_cpu_frequency(0);
-
- result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
- if (result)
- return (result);
-
- cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
-
- return 0;
-}
-
-
-static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_generic_init(policy, cris_freq_table, 1000000);
}
-
-static struct freq_attr *cris_freq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver cris_freq_driver = {
.get = cris_freq_get_cpu_frequency,
- .verify = cris_freq_verify,
- .target = cris_freq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = cris_freq_target,
.init = cris_freq_cpu_init,
- .exit = cris_freq_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "cris_freq",
- .attr = cris_freq_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init cris_freq_init(void)
diff --git a/drivers/cpufreq/cris-etraxfs-cpufreq.c b/drivers/cpufreq/cris-etraxfs-cpufreq.c
index 72328f77dc53..26d940d40b1d 100644
--- a/drivers/cpufreq/cris-etraxfs-cpufreq.c
+++ b/drivers/cpufreq/cris-etraxfs-cpufreq.c
@@ -27,18 +27,11 @@ static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
return clk_ctrl.pll ? 200000 : 6000;
}
-static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
- unsigned int state)
+static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state)
{
- struct cpufreq_freqs freqs;
reg_config_rw_clk_ctrl clk_ctrl;
clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl);
- freqs.old = cris_freq_get_cpu_frequency(policy->cpu);
- freqs.new = cris_freq_table[state].frequency;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
local_irq_disable();
/* Even though we may be SMP they will share the same clock
@@ -51,64 +44,22 @@ static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
local_irq_enable();
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-};
-
-static int cris_freq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
-}
-
-static int cris_freq_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
-{
- unsigned int newstate = 0;
-
- if (cpufreq_frequency_table_target
- (policy, cris_freq_table, target_freq, relation, &newstate))
- return -EINVAL;
-
- cris_freq_set_cpu_state(policy, newstate);
-
return 0;
}
static int cris_freq_cpu_init(struct cpufreq_policy *policy)
{
- int result;
-
- /* cpuinfo and default policy values */
- policy->cpuinfo.transition_latency = 1000000; /* 1ms */
- policy->cur = cris_freq_get_cpu_frequency(0);
-
- result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
- if (result)
- return (result);
-
- cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
-
- return 0;
+ return cpufreq_generic_init(policy, cris_freq_table, 1000000);
}
-static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
-static struct freq_attr *cris_freq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver cris_freq_driver = {
.get = cris_freq_get_cpu_frequency,
- .verify = cris_freq_verify,
- .target = cris_freq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = cris_freq_target,
.init = cris_freq_cpu_init,
- .exit = cris_freq_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "cris_freq",
- .attr = cris_freq_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init cris_freq_init(void)
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 551dd655c6f2..5e8a854381b7 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -50,9 +50,7 @@ static int davinci_verify_speed(struct cpufreq_policy *policy)
if (policy->cpu)
return -EINVAL;
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
-
+ cpufreq_verify_within_cpu_limits(policy);
policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000;
policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000;
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
@@ -68,58 +66,38 @@ static unsigned int davinci_getspeed(unsigned int cpu)
return clk_get_rate(cpufreq.armclk) / 1000;
}
-static int davinci_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+static int davinci_target(struct cpufreq_policy *policy, unsigned int idx)
{
- int ret = 0;
- unsigned int idx;
- struct cpufreq_freqs freqs;
struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
struct clk *armclk = cpufreq.armclk;
+ unsigned int old_freq, new_freq;
+ int ret = 0;
- freqs.old = davinci_getspeed(0);
- freqs.new = clk_round_rate(armclk, target_freq * 1000) / 1000;
-
- if (freqs.old == freqs.new)
- return ret;
-
- dev_dbg(cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new);
-
- ret = cpufreq_frequency_table_target(policy, pdata->freq_table,
- freqs.new, relation, &idx);
- if (ret)
- return -EINVAL;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ old_freq = davinci_getspeed(0);
+ new_freq = pdata->freq_table[idx].frequency;
/* if moving to higher frequency, up the voltage beforehand */
- if (pdata->set_voltage && freqs.new > freqs.old) {
+ if (pdata->set_voltage && new_freq > old_freq) {
ret = pdata->set_voltage(idx);
if (ret)
- goto out;
+ return ret;
}
ret = clk_set_rate(armclk, idx);
if (ret)
- goto out;
+ return ret;
if (cpufreq.asyncclk) {
ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate);
if (ret)
- goto out;
+ return ret;
}
/* if moving to lower freq, lower the voltage after lowering freq */
- if (pdata->set_voltage && freqs.new < freqs.old)
+ if (pdata->set_voltage && new_freq < old_freq)
pdata->set_voltage(idx);
-out:
- if (ret)
- freqs.new = freqs.old;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- return ret;
+ return 0;
}
static int davinci_cpu_init(struct cpufreq_policy *policy)
@@ -138,47 +116,24 @@ static int davinci_cpu_init(struct cpufreq_policy *policy)
return result;
}
- policy->cur = davinci_getspeed(0);
-
- result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (result) {
- pr_err("%s: cpufreq_frequency_table_cpuinfo() failed",
- __func__);
- return result;
- }
-
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
/*
* Time measurement across the target() function yields ~1500-1800us
* time taken with no drivers on notification list.
* Setting the latency to 2000 us to accommodate addition of drivers
* to pre/post change notification list.
*/
- policy->cpuinfo.transition_latency = 2000 * 1000;
- return 0;
+ return cpufreq_generic_init(policy, freq_table, 2000 * 1000);
}
-static int davinci_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
-static struct freq_attr *davinci_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver davinci_driver = {
.flags = CPUFREQ_STICKY,
.verify = davinci_verify_speed,
- .target = davinci_target,
+ .target_index = davinci_target,
.get = davinci_getspeed,
.init = davinci_cpu_init,
- .exit = davinci_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "davinci",
- .attr = davinci_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init davinci_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
index 26321cdc1946..0e67ab96321a 100644
--- a/drivers/cpufreq/dbx500-cpufreq.c
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -19,51 +19,11 @@
static struct cpufreq_frequency_table *freq_table;
static struct clk *armss_clk;
-static struct freq_attr *dbx500_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-static int dbx500_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int index)
{
- struct cpufreq_freqs freqs;
- unsigned int idx;
- int ret;
-
- /* Lookup the next frequency */
- if (cpufreq_frequency_table_target(policy, freq_table, target_freq,
- relation, &idx))
- return -EINVAL;
-
- freqs.old = policy->cur;
- freqs.new = freq_table[idx].frequency;
-
- if (freqs.old == freqs.new)
- return 0;
-
- /* pre-change notification */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
/* update armss clk frequency */
- ret = clk_set_rate(armss_clk, freqs.new * 1000);
-
- if (ret) {
- pr_err("dbx500-cpufreq: Failed to set armss_clk to %d Hz: error %d\n",
- freqs.new * 1000, ret);
- freqs.new = freqs.old;
- }
-
- /* post change notification */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- return ret;
+ return clk_set_rate(armss_clk, freq_table[index].frequency * 1000);
}
static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
@@ -84,43 +44,17 @@ static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
static int dbx500_cpufreq_init(struct cpufreq_policy *policy)
{
- int res;
-
- /* get policy fields based on the table */
- res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (!res)
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
- else {
- pr_err("dbx500-cpufreq: Failed to read policy table\n");
- return res;
- }
-
- policy->min = policy->cpuinfo.min_freq;
- policy->max = policy->cpuinfo.max_freq;
- policy->cur = dbx500_cpufreq_getspeed(policy->cpu);
- policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
-
- /*
- * FIXME : Need to take time measurement across the target()
- * function with no/some/all drivers in the notification
- * list.
- */
- policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
-
- /* policy sharing between dual CPUs */
- cpumask_setall(policy->cpus);
-
- return 0;
+ return cpufreq_generic_init(policy, freq_table, 20 * 1000);
}
static struct cpufreq_driver dbx500_cpufreq_driver = {
.flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
- .verify = dbx500_cpufreq_verify_speed,
- .target = dbx500_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = dbx500_cpufreq_target,
.get = dbx500_cpufreq_getspeed,
.init = dbx500_cpufreq_init,
.name = "DBX500",
- .attr = dbx500_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int dbx500_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 09f64cc83019..9012b8bb6b64 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -107,15 +107,9 @@ static int eps_set_state(struct eps_cpu_data *centaur,
struct cpufreq_policy *policy,
u32 dest_state)
{
- struct cpufreq_freqs freqs;
u32 lo, hi;
- int err = 0;
int i;
- freqs.old = eps_get(policy->cpu);
- freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
/* Wait while CPU is busy */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
i = 0;
@@ -124,8 +118,7 @@ static int eps_set_state(struct eps_cpu_data *centaur,
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
i++;
if (unlikely(i > 64)) {
- err = -ENODEV;
- goto postchange;
+ return -ENODEV;
}
}
/* Set new multiplier and voltage */
@@ -137,16 +130,10 @@ static int eps_set_state(struct eps_cpu_data *centaur,
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
i++;
if (unlikely(i > 64)) {
- err = -ENODEV;
- goto postchange;
+ return -ENODEV;
}
} while (lo & ((1 << 16) | (1 << 17)));
- /* Return current frequency */
-postchange:
- rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
- freqs.new = centaur->fsb * ((lo >> 8) & 0xff);
-
#ifdef DEBUG
{
u8 current_multiplier, current_voltage;
@@ -161,19 +148,12 @@ postchange:
current_multiplier);
}
#endif
- if (err)
- freqs.new = freqs.old;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- return err;
+ return 0;
}
-static int eps_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int eps_target(struct cpufreq_policy *policy, unsigned int index)
{
struct eps_cpu_data *centaur;
- unsigned int newstate = 0;
unsigned int cpu = policy->cpu;
unsigned int dest_state;
int ret;
@@ -182,28 +162,14 @@ static int eps_target(struct cpufreq_policy *policy,
return -ENODEV;
centaur = eps_cpu[cpu];
- if (unlikely(cpufreq_frequency_table_target(policy,
- &eps_cpu[cpu]->freq_table[0],
- target_freq,
- relation,
- &newstate))) {
- return -EINVAL;
- }
-
/* Make frequency transition */
- dest_state = centaur->freq_table[newstate].driver_data & 0xffff;
+ dest_state = centaur->freq_table[index].driver_data & 0xffff;
ret = eps_set_state(centaur, policy, dest_state);
if (ret)
printk(KERN_ERR "eps: Timeout!\n");
return ret;
}
-static int eps_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy,
- &eps_cpu[policy->cpu]->freq_table[0]);
-}
-
static int eps_cpu_init(struct cpufreq_policy *policy)
{
unsigned int i;
@@ -401,15 +367,13 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
}
policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */
- policy->cur = fsb * current_multiplier;
- ret = cpufreq_frequency_table_cpuinfo(policy, &centaur->freq_table[0]);
+ ret = cpufreq_table_validate_and_show(policy, &centaur->freq_table[0]);
if (ret) {
kfree(centaur);
return ret;
}
- cpufreq_frequency_table_get_attr(&centaur->freq_table[0], policy->cpu);
return 0;
}
@@ -424,19 +388,14 @@ static int eps_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static struct freq_attr *eps_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver eps_driver = {
- .verify = eps_verify,
- .target = eps_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = eps_target,
.init = eps_cpu_init,
.exit = eps_cpu_exit,
.get = eps_get,
.name = "e_powersaver",
- .attr = eps_attr,
+ .attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index 823a400d98fd..de08acff5101 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -105,32 +105,9 @@ static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu)
}
-/**
- * elanfreq_set_cpu_frequency: Change the CPU core frequency
- * @cpu: cpu number
- * @freq: frequency in kHz
- *
- * This function takes a frequency value and changes the CPU frequency
- * according to this. Note that the frequency has to be checked by
- * elanfreq_validatespeed() for correctness!
- *
- * There is no return value.
- */
-
-static void elanfreq_set_cpu_state(struct cpufreq_policy *policy,
- unsigned int state)
+static int elanfreq_target(struct cpufreq_policy *policy,
+ unsigned int state)
{
- struct cpufreq_freqs freqs;
-
- freqs.old = elanfreq_get_cpu_frequency(0);
- freqs.new = elan_multiplier[state].clock;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
- printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n",
- elan_multiplier[state].clock);
-
-
/*
* Access to the Elan's internal registers is indexed via
* 0x22: Chip Setup & Control Register Index Register (CSCI)
@@ -161,39 +138,8 @@ static void elanfreq_set_cpu_state(struct cpufreq_policy *policy,
udelay(10000);
local_irq_enable();
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-};
-
-
-/**
- * elanfreq_validatespeed: test if frequency range is valid
- * @policy: the policy to validate
- *
- * This function checks if a given frequency range in kHz is valid
- * for the hardware supported by the driver.
- */
-
-static int elanfreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]);
-}
-
-static int elanfreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int newstate = 0;
-
- if (cpufreq_frequency_table_target(policy, &elanfreq_table[0],
- target_freq, relation, &newstate))
- return -EINVAL;
-
- elanfreq_set_cpu_state(policy, newstate);
-
return 0;
}
-
-
/*
* Module init and exit code
*/
@@ -202,7 +148,6 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *c = &cpu_data(0);
unsigned int i;
- int result;
/* capability check */
if ((c->x86_vendor != X86_VENDOR_AMD) ||
@@ -221,21 +166,8 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- policy->cur = elanfreq_get_cpu_frequency(0);
-
- result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table);
- if (result)
- return result;
- cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu);
- return 0;
-}
-
-
-static int elanfreq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_table_validate_and_show(policy, elanfreq_table);
}
@@ -261,20 +193,14 @@ __setup("elanfreq=", elanfreq_setup);
#endif
-static struct freq_attr *elanfreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-
static struct cpufreq_driver elanfreq_driver = {
.get = elanfreq_get_cpu_frequency,
- .verify = elanfreq_verify,
- .target = elanfreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = elanfreq_target,
.init = elanfreq_cpu_init,
- .exit = elanfreq_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "elanfreq",
- .attr = elanfreq_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id elan_id[] = {
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index 0fac34439e31..f3c22874da75 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -25,18 +25,11 @@
static struct exynos_dvfs_info *exynos_info;
static struct regulator *arm_regulator;
-static struct cpufreq_freqs freqs;
static unsigned int locking_frequency;
static bool frequency_locked;
static DEFINE_MUTEX(cpufreq_lock);
-static int exynos_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy,
- exynos_info->freq_table);
-}
-
static unsigned int exynos_getspeed(unsigned int cpu)
{
return clk_get_rate(exynos_info->cpu_clk) / 1000;
@@ -65,21 +58,18 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
struct cpufreq_policy *policy = cpufreq_cpu_get(0);
unsigned int arm_volt, safe_arm_volt = 0;
unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz;
+ unsigned int old_freq;
int index, old_index;
int ret = 0;
- freqs.old = policy->cur;
- freqs.new = target_freq;
-
- if (freqs.new == freqs.old)
- goto out;
+ old_freq = policy->cur;
/*
* The policy max have been changed so that we cannot get proper
* old_index with cpufreq_frequency_table_target(). Thus, ignore
- * policy and get the index from the raw freqeuncy table.
+ * policy and get the index from the raw frequency table.
*/
- old_index = exynos_cpufreq_get_index(freqs.old);
+ old_index = exynos_cpufreq_get_index(old_freq);
if (old_index < 0) {
ret = old_index;
goto out;
@@ -104,17 +94,14 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
}
arm_volt = volt_table[index];
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
/* When the new frequency is higher than current frequency */
- if ((freqs.new > freqs.old) && !safe_arm_volt) {
+ if ((target_freq > old_freq) && !safe_arm_volt) {
/* Firstly, voltage up to increase frequency */
ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
if (ret) {
pr_err("%s: failed to set cpu voltage to %d\n",
__func__, arm_volt);
- freqs.new = freqs.old;
- goto post_notify;
+ return ret;
}
}
@@ -124,24 +111,17 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
if (ret) {
pr_err("%s: failed to set cpu voltage to %d\n",
__func__, safe_arm_volt);
- freqs.new = freqs.old;
- goto post_notify;
+ return ret;
}
}
exynos_info->set_freq(old_index, index);
-post_notify:
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- if (ret)
- goto out;
-
/* When the new frequency is lower than current frequency */
- if ((freqs.new < freqs.old) ||
- ((freqs.new > freqs.old) && safe_arm_volt)) {
+ if ((target_freq < old_freq) ||
+ ((target_freq > old_freq) && safe_arm_volt)) {
/* down the voltage after frequency change */
- regulator_set_voltage(arm_regulator, arm_volt,
+ ret = regulator_set_voltage(arm_regulator, arm_volt,
arm_volt);
if (ret) {
pr_err("%s: failed to set cpu voltage to %d\n",
@@ -151,19 +131,14 @@ post_notify:
}
out:
-
cpufreq_cpu_put(policy);
return ret;
}
-static int exynos_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
{
struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
- unsigned int index;
- unsigned int new_freq;
int ret = 0;
mutex_lock(&cpufreq_lock);
@@ -171,15 +146,7 @@ static int exynos_target(struct cpufreq_policy *policy,
if (frequency_locked)
goto out;
- if (cpufreq_frequency_table_target(policy, freq_table,
- target_freq, relation, &index)) {
- ret = -EINVAL;
- goto out;
- }
-
- new_freq = freq_table[index].frequency;
-
- ret = exynos_cpufreq_scale(new_freq);
+ ret = exynos_cpufreq_scale(freq_table[index].frequency);
out:
mutex_unlock(&cpufreq_lock);
@@ -247,38 +214,18 @@ static struct notifier_block exynos_cpufreq_nb = {
static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- policy->cur = policy->min = policy->max = exynos_getspeed(policy->cpu);
-
- cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu);
-
- /* set the transition latency value */
- policy->cpuinfo.transition_latency = 100000;
-
- cpumask_setall(policy->cpus);
-
- return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table);
-}
-
-static int exynos_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_generic_init(policy, exynos_info->freq_table, 100000);
}
-static struct freq_attr *exynos_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver exynos_driver = {
.flags = CPUFREQ_STICKY,
- .verify = exynos_verify_speed,
- .target = exynos_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = exynos_target,
.get = exynos_getspeed,
.init = exynos_cpufreq_cpu_init,
- .exit = exynos_cpufreq_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "exynos_cpufreq",
- .attr = exynos_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
#ifdef CONFIG_PM
.suspend = exynos_cpufreq_suspend,
.resume = exynos_cpufreq_resume,
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index add7fbec4fc9..f2c75065ce19 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -81,9 +81,9 @@ static void exynos4210_set_clkdiv(unsigned int div_index)
static void exynos4210_set_apll(unsigned int index)
{
- unsigned int tmp;
+ unsigned int tmp, freq = apll_freq_4210[index].freq;
- /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
+ /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
clk_set_parent(moutcore, mout_mpll);
do {
@@ -92,21 +92,9 @@ static void exynos4210_set_apll(unsigned int index)
tmp &= 0x7;
} while (tmp != 0x2);
- /* 2. Set APLL Lock time */
- __raw_writel(EXYNOS4_APLL_LOCKTIME, EXYNOS4_APLL_LOCK);
-
- /* 3. Change PLL PMS values */
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
- tmp |= apll_freq_4210[index].mps;
- __raw_writel(tmp, EXYNOS4_APLL_CON0);
+ clk_set_rate(mout_apll, freq * 1000);
- /* 4. wait_lock_time */
- do {
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- } while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT)));
-
- /* 5. MUX_CORE_SEL = APLL */
+ /* MUX_CORE_SEL = APLL */
clk_set_parent(moutcore, mout_apll);
do {
@@ -115,53 +103,15 @@ static void exynos4210_set_apll(unsigned int index)
} while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
}
-static bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index)
-{
- unsigned int old_pm = apll_freq_4210[old_index].mps >> 8;
- unsigned int new_pm = apll_freq_4210[new_index].mps >> 8;
-
- return (old_pm == new_pm) ? 0 : 1;
-}
-
static void exynos4210_set_frequency(unsigned int old_index,
unsigned int new_index)
{
- unsigned int tmp;
-
if (old_index > new_index) {
- if (!exynos4210_pms_change(old_index, new_index)) {
- /* 1. Change the system clock divider values */
- exynos4210_set_clkdiv(new_index);
-
- /* 2. Change just s value in apll m,p,s value */
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- tmp &= ~(0x7 << 0);
- tmp |= apll_freq_4210[new_index].mps & 0x7;
- __raw_writel(tmp, EXYNOS4_APLL_CON0);
- } else {
- /* Clock Configuration Procedure */
- /* 1. Change the system clock divider values */
- exynos4210_set_clkdiv(new_index);
- /* 2. Change the apll m,p,s value */
- exynos4210_set_apll(new_index);
- }
+ exynos4210_set_clkdiv(new_index);
+ exynos4210_set_apll(new_index);
} else if (old_index < new_index) {
- if (!exynos4210_pms_change(old_index, new_index)) {
- /* 1. Change just s value in apll m,p,s value */
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- tmp &= ~(0x7 << 0);
- tmp |= apll_freq_4210[new_index].mps & 0x7;
- __raw_writel(tmp, EXYNOS4_APLL_CON0);
-
- /* 2. Change the system clock divider values */
- exynos4210_set_clkdiv(new_index);
- } else {
- /* Clock Configuration Procedure */
- /* 1. Change the apll m,p,s value */
- exynos4210_set_apll(new_index);
- /* 2. Change the system clock divider values */
- exynos4210_set_clkdiv(new_index);
- }
+ exynos4210_set_apll(new_index);
+ exynos4210_set_clkdiv(new_index);
}
}
@@ -194,7 +144,6 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
info->volt_table = exynos4210_volt_table;
info->freq_table = exynos4210_freq_table;
info->set_freq = exynos4210_set_frequency;
- info->need_apll_change = exynos4210_pms_change;
return 0;
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index 08b7477b0aa2..8683304ce62c 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -128,9 +128,9 @@ static void exynos4x12_set_clkdiv(unsigned int div_index)
static void exynos4x12_set_apll(unsigned int index)
{
- unsigned int tmp, pdiv;
+ unsigned int tmp, freq = apll_freq_4x12[index].freq;
- /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
+ /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
clk_set_parent(moutcore, mout_mpll);
do {
@@ -140,24 +140,9 @@ static void exynos4x12_set_apll(unsigned int index)
tmp &= 0x7;
} while (tmp != 0x2);
- /* 2. Set APLL Lock time */
- pdiv = ((apll_freq_4x12[index].mps >> 8) & 0x3f);
+ clk_set_rate(mout_apll, freq * 1000);
- __raw_writel((pdiv * 250), EXYNOS4_APLL_LOCK);
-
- /* 3. Change PLL PMS values */
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
- tmp |= apll_freq_4x12[index].mps;
- __raw_writel(tmp, EXYNOS4_APLL_CON0);
-
- /* 4. wait_lock_time */
- do {
- cpu_relax();
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- } while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT)));
-
- /* 5. MUX_CORE_SEL = APLL */
+ /* MUX_CORE_SEL = APLL */
clk_set_parent(moutcore, mout_apll);
do {
@@ -167,52 +152,15 @@ static void exynos4x12_set_apll(unsigned int index)
} while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
}
-static bool exynos4x12_pms_change(unsigned int old_index, unsigned int new_index)
-{
- unsigned int old_pm = apll_freq_4x12[old_index].mps >> 8;
- unsigned int new_pm = apll_freq_4x12[new_index].mps >> 8;
-
- return (old_pm == new_pm) ? 0 : 1;
-}
-
static void exynos4x12_set_frequency(unsigned int old_index,
unsigned int new_index)
{
- unsigned int tmp;
-
if (old_index > new_index) {
- if (!exynos4x12_pms_change(old_index, new_index)) {
- /* 1. Change the system clock divider values */
- exynos4x12_set_clkdiv(new_index);
- /* 2. Change just s value in apll m,p,s value */
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- tmp &= ~(0x7 << 0);
- tmp |= apll_freq_4x12[new_index].mps & 0x7;
- __raw_writel(tmp, EXYNOS4_APLL_CON0);
-
- } else {
- /* Clock Configuration Procedure */
- /* 1. Change the system clock divider values */
- exynos4x12_set_clkdiv(new_index);
- /* 2. Change the apll m,p,s value */
- exynos4x12_set_apll(new_index);
- }
+ exynos4x12_set_clkdiv(new_index);
+ exynos4x12_set_apll(new_index);
} else if (old_index < new_index) {
- if (!exynos4x12_pms_change(old_index, new_index)) {
- /* 1. Change just s value in apll m,p,s value */
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- tmp &= ~(0x7 << 0);
- tmp |= apll_freq_4x12[new_index].mps & 0x7;
- __raw_writel(tmp, EXYNOS4_APLL_CON0);
- /* 2. Change the system clock divider values */
- exynos4x12_set_clkdiv(new_index);
- } else {
- /* Clock Configuration Procedure */
- /* 1. Change the apll m,p,s value */
- exynos4x12_set_apll(new_index);
- /* 2. Change the system clock divider values */
- exynos4x12_set_clkdiv(new_index);
- }
+ exynos4x12_set_apll(new_index);
+ exynos4x12_set_clkdiv(new_index);
}
}
@@ -250,7 +198,6 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
info->volt_table = exynos4x12_volt_table;
info->freq_table = exynos4x12_freq_table;
info->set_freq = exynos4x12_set_frequency;
- info->need_apll_change = exynos4x12_pms_change;
return 0;
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index be5380ecdcd4..76bef8b078cb 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -20,7 +20,7 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -118,12 +118,12 @@ static int init_div_table(void)
struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
unsigned int tmp, clk_div, ema_div, freq, volt_id;
int i = 0;
- struct opp *opp;
+ struct dev_pm_opp *opp;
rcu_read_lock();
for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) {
- opp = opp_find_freq_exact(dvfs_info->dev,
+ opp = dev_pm_opp_find_freq_exact(dvfs_info->dev,
freq_tbl[i].frequency * 1000, true);
if (IS_ERR(opp)) {
rcu_read_unlock();
@@ -142,7 +142,7 @@ static int init_div_table(void)
<< P0_7_CSCLKDEV_SHIFT;
/* Calculate EMA */
- volt_id = opp_get_voltage(opp);
+ volt_id = dev_pm_opp_get_voltage(opp);
volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP;
if (volt_id < PMIC_HIGH_VOLT) {
ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) |
@@ -209,38 +209,22 @@ static void exynos_enable_dvfs(void)
dvfs_info->base + XMU_DVFS_CTRL);
}
-static int exynos_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy,
- dvfs_info->freq_table);
-}
-
static unsigned int exynos_getspeed(unsigned int cpu)
{
return dvfs_info->cur_frequency;
}
-static int exynos_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int index, tmp;
- int ret = 0, i;
+ unsigned int tmp;
+ int i;
struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
mutex_lock(&cpufreq_lock);
- ret = cpufreq_frequency_table_target(policy, freq_table,
- target_freq, relation, &index);
- if (ret)
- goto out;
-
freqs.old = dvfs_info->cur_frequency;
freqs.new = freq_table[index].frequency;
- if (freqs.old == freqs.new)
- goto out;
-
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* Set the target frequency in all C0_3_PSTATE register */
@@ -251,9 +235,8 @@ static int exynos_target(struct cpufreq_policy *policy,
__raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
}
-out:
mutex_unlock(&cpufreq_lock);
- return ret;
+ return 0;
}
static void exynos_cpufreq_work(struct work_struct *work)
@@ -324,30 +307,19 @@ static void exynos_sort_descend_freq_table(void)
static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- int ret;
-
- ret = cpufreq_frequency_table_cpuinfo(policy, dvfs_info->freq_table);
- if (ret) {
- dev_err(dvfs_info->dev, "Invalid frequency table: %d\n", ret);
- return ret;
- }
-
- policy->cur = dvfs_info->cur_frequency;
- policy->cpuinfo.transition_latency = dvfs_info->latency;
- cpumask_setall(policy->cpus);
-
- cpufreq_frequency_table_get_attr(dvfs_info->freq_table, policy->cpu);
-
- return 0;
+ return cpufreq_generic_init(policy, dvfs_info->freq_table,
+ dvfs_info->latency);
}
static struct cpufreq_driver exynos_driver = {
- .flags = CPUFREQ_STICKY,
- .verify = exynos_verify_speed,
- .target = exynos_target,
+ .flags = CPUFREQ_STICKY | CPUFREQ_ASYNC_NOTIFICATION,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = exynos_target,
.get = exynos_getspeed,
.init = exynos_cpufreq_cpu_init,
+ .exit = cpufreq_generic_exit,
.name = CPUFREQ_NAME,
+ .attr = cpufreq_generic_attr,
};
static const struct of_device_id exynos_cpufreq_match[] = {
@@ -399,13 +371,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
goto err_put_node;
}
- ret = opp_init_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+ ret = dev_pm_opp_init_cpufreq_table(dvfs_info->dev,
+ &dvfs_info->freq_table);
if (ret) {
dev_err(dvfs_info->dev,
"failed to init cpufreq table: %d\n", ret);
goto err_put_node;
}
- dvfs_info->freq_count = opp_get_opp_count(dvfs_info->dev);
+ dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev);
exynos_sort_descend_freq_table();
if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency))
@@ -454,7 +427,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
return 0;
err_free_table:
- opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+ dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
err_put_node:
of_node_put(np);
dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
@@ -464,7 +437,7 @@ err_put_node:
static int exynos_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&exynos_driver);
- opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+ dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
return 0;
}
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index f111454a7aea..3458d27f63b4 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -54,31 +54,30 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo);
int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
{
- unsigned int next_larger = ~0;
- unsigned int i;
- unsigned int count = 0;
+ unsigned int next_larger = ~0, freq, i = 0;
+ bool found = false;
pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
policy->min, policy->max, policy->cpu);
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
- for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
- unsigned int freq = table[i].frequency;
+ for (; freq = table[i].frequency, freq != CPUFREQ_TABLE_END; i++) {
if (freq == CPUFREQ_ENTRY_INVALID)
continue;
- if ((freq >= policy->min) && (freq <= policy->max))
- count++;
- else if ((next_larger > freq) && (freq > policy->max))
+ if ((freq >= policy->min) && (freq <= policy->max)) {
+ found = true;
+ break;
+ }
+
+ if ((next_larger > freq) && (freq > policy->max))
next_larger = freq;
}
- if (!count)
+ if (!found) {
policy->max = next_larger;
-
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
+ }
pr_debug("verification lead to (%u - %u kHz) for cpu %u\n",
policy->min, policy->max, policy->cpu);
@@ -87,6 +86,20 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
+/*
+ * Generic routine to verify policy & frequency table, requires driver to call
+ * cpufreq_frequency_table_get_attr() prior to it.
+ */
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *table =
+ cpufreq_frequency_get_table(policy->cpu);
+ if (!table)
+ return -ENODEV;
+
+ return cpufreq_frequency_table_verify(policy, table);
+}
+EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
@@ -200,6 +213,12 @@ struct freq_attr cpufreq_freq_attr_scaling_available_freqs = {
};
EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
+struct freq_attr *cpufreq_generic_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
+
/*
* if you use these, you must assure that the frequency table is valid
* all the time between get_attr and put_attr!
@@ -219,6 +238,18 @@ void cpufreq_frequency_table_put_attr(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
+int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table)
+{
+ int ret = cpufreq_frequency_table_cpuinfo(policy, table);
+
+ if (!ret)
+ cpufreq_frequency_table_get_attr(table, policy->cpu);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
+
void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy)
{
pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n",
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c
index 70442c7b5e71..d83e8266a58e 100644
--- a/drivers/cpufreq/gx-suspmod.c
+++ b/drivers/cpufreq/gx-suspmod.c
@@ -401,7 +401,7 @@ static int cpufreq_gx_target(struct cpufreq_policy *policy,
static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
{
- unsigned int maxfreq, curfreq;
+ unsigned int maxfreq;
if (!policy || policy->cpu != 0)
return -ENODEV;
@@ -415,10 +415,8 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f];
stock_freq = maxfreq;
- curfreq = gx_get_cpuspeed(0);
pr_debug("cpu max frequency is %d.\n", maxfreq);
- pr_debug("cpu current frequency is %dkHz.\n", curfreq);
/* setup basic struct for cpufreq API */
policy->cpu = 0;
@@ -428,7 +426,6 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
else
policy->min = maxfreq / POLICY_MIN_DIV;
policy->max = maxfreq;
- policy->cur = curfreq;
policy->cpuinfo.min_freq = maxfreq / max_duration;
policy->cpuinfo.max_freq = maxfreq;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
index 794123fcf3e3..bf8902a0866d 100644
--- a/drivers/cpufreq/highbank-cpufreq.c
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -66,7 +66,8 @@ static int hb_cpufreq_driver_init(void)
struct device_node *np;
int ret;
- if (!of_machine_is_compatible("calxeda,highbank"))
+ if ((!of_machine_is_compatible("calxeda,highbank")) &&
+ (!of_machine_is_compatible("calxeda,ecx-2000")))
return -ENODEV;
cpu_dev = get_cpu_device(0);
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
index 3e14f0317175..53c6ac637e10 100644
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -141,7 +141,6 @@ processor_set_freq (
{
int ret = 0;
u32 value = 0;
- struct cpufreq_freqs cpufreq_freqs;
cpumask_t saved_mask;
int retval;
@@ -168,13 +167,6 @@ processor_set_freq (
pr_debug("Transitioning from P%d to P%d\n",
data->acpi_data.state, state);
- /* cpufreq frequency struct */
- cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
- cpufreq_freqs.new = data->freq_table[state].frequency;
-
- /* notify cpufreq */
- cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_PRECHANGE);
-
/*
* First we write the target state's 'control' value to the
* control_register.
@@ -186,22 +178,11 @@ processor_set_freq (
ret = processor_set_pstate(value);
if (ret) {
- unsigned int tmp = cpufreq_freqs.new;
- cpufreq_notify_transition(policy, &cpufreq_freqs,
- CPUFREQ_POSTCHANGE);
- cpufreq_freqs.new = cpufreq_freqs.old;
- cpufreq_freqs.old = tmp;
- cpufreq_notify_transition(policy, &cpufreq_freqs,
- CPUFREQ_PRECHANGE);
- cpufreq_notify_transition(policy, &cpufreq_freqs,
- CPUFREQ_POSTCHANGE);
printk(KERN_WARNING "Transition failed with error %d\n", ret);
retval = -ENODEV;
goto migrate_end;
}
- cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_POSTCHANGE);
-
data->acpi_data.state = state;
retval = 0;
@@ -227,42 +208,11 @@ acpi_cpufreq_get (
static int
acpi_cpufreq_target (
struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
- unsigned int next_state = 0;
- unsigned int result = 0;
-
- pr_debug("acpi_cpufreq_setpolicy\n");
-
- result = cpufreq_frequency_table_target(policy,
- data->freq_table, target_freq, relation, &next_state);
- if (result)
- return (result);
-
- result = processor_set_freq(data, policy, next_state);
-
- return (result);
-}
-
-
-static int
-acpi_cpufreq_verify (
- struct cpufreq_policy *policy)
+ unsigned int index)
{
- unsigned int result = 0;
- struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
-
- pr_debug("acpi_cpufreq_verify\n");
-
- result = cpufreq_frequency_table_verify(policy,
- data->freq_table);
-
- return (result);
+ return processor_set_freq(acpi_io_data[policy->cpu], policy, index);
}
-
static int
acpi_cpufreq_cpu_init (
struct cpufreq_policy *policy)
@@ -321,7 +271,6 @@ acpi_cpufreq_cpu_init (
data->acpi_data.states[i].transition_latency * 1000;
}
}
- policy->cur = processor_get_freq(data, policy->cpu);
/* table init */
for (i = 0; i <= data->acpi_data.state_count; i++)
@@ -335,7 +284,7 @@ acpi_cpufreq_cpu_init (
}
}
- result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
+ result = cpufreq_table_validate_and_show(policy, data->freq_table);
if (result) {
goto err_freqfree;
}
@@ -356,8 +305,6 @@ acpi_cpufreq_cpu_init (
(u32) data->acpi_data.states[i].status,
(u32) data->acpi_data.states[i].control);
- cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
-
/* the first call to ->target() should result in us actually
* writing something to the appropriate registers. */
data->resume = 1;
@@ -396,20 +343,14 @@ acpi_cpufreq_cpu_exit (
}
-static struct freq_attr* acpi_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-
static struct cpufreq_driver acpi_cpufreq_driver = {
- .verify = acpi_cpufreq_verify,
- .target = acpi_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = acpi_cpufreq_target,
.get = acpi_cpufreq_get,
.init = acpi_cpufreq_cpu_init,
.exit = acpi_cpufreq_cpu_exit,
.name = "acpi-cpufreq",
- .attr = acpi_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index c3fd2a101ca0..4b3f18e5f36b 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -13,7 +13,7 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
@@ -35,73 +35,52 @@ static struct device *cpu_dev;
static struct cpufreq_frequency_table *freq_table;
static unsigned int transition_latency;
-static int imx6q_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
static unsigned int imx6q_get_speed(unsigned int cpu)
{
return clk_get_rate(arm_clk) / 1000;
}
-static int imx6q_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
{
- struct cpufreq_freqs freqs;
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long freq_hz, volt, volt_old;
- unsigned int index;
+ unsigned int old_freq, new_freq;
int ret;
- ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
- relation, &index);
- if (ret) {
- dev_err(cpu_dev, "failed to match target frequency %d: %d\n",
- target_freq, ret);
- return ret;
- }
-
- freqs.new = freq_table[index].frequency;
- freq_hz = freqs.new * 1000;
- freqs.old = clk_get_rate(arm_clk) / 1000;
-
- if (freqs.old == freqs.new)
- return 0;
+ new_freq = freq_table[index].frequency;
+ freq_hz = new_freq * 1000;
+ old_freq = clk_get_rate(arm_clk) / 1000;
rcu_read_lock();
- opp = opp_find_freq_ceil(cpu_dev, &freq_hz);
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
if (IS_ERR(opp)) {
rcu_read_unlock();
dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz);
return PTR_ERR(opp);
}
- volt = opp_get_voltage(opp);
+ volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
volt_old = regulator_get_voltage(arm_reg);
dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
- freqs.old / 1000, volt_old / 1000,
- freqs.new / 1000, volt / 1000);
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ old_freq / 1000, volt_old / 1000,
+ new_freq / 1000, volt / 1000);
/* scaling up? scale voltage before frequency */
- if (freqs.new > freqs.old) {
+ if (new_freq > old_freq) {
ret = regulator_set_voltage_tol(arm_reg, volt, 0);
if (ret) {
dev_err(cpu_dev,
"failed to scale vddarm up: %d\n", ret);
- freqs.new = freqs.old;
- goto post_notify;
+ return ret;
}
/*
* Need to increase vddpu and vddsoc for safety
* if we are about to run at 1.2 GHz.
*/
- if (freqs.new == FREQ_1P2_GHZ / 1000) {
+ if (new_freq == FREQ_1P2_GHZ / 1000) {
regulator_set_voltage_tol(pu_reg,
PU_SOC_VOLTAGE_HIGH, 0);
regulator_set_voltage_tol(soc_reg,
@@ -121,21 +100,20 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
clk_set_parent(step_clk, pll2_pfd2_396m_clk);
clk_set_parent(pll1_sw_clk, step_clk);
if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
- clk_set_rate(pll1_sys_clk, freqs.new * 1000);
+ clk_set_rate(pll1_sys_clk, new_freq * 1000);
clk_set_parent(pll1_sw_clk, pll1_sys_clk);
}
/* Ensure the arm clock divider is what we expect */
- ret = clk_set_rate(arm_clk, freqs.new * 1000);
+ ret = clk_set_rate(arm_clk, new_freq * 1000);
if (ret) {
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
regulator_set_voltage_tol(arm_reg, volt_old, 0);
- freqs.new = freqs.old;
- goto post_notify;
+ return ret;
}
/* scaling down? scale voltage after frequency */
- if (freqs.new < freqs.old) {
+ if (new_freq < old_freq) {
ret = regulator_set_voltage_tol(arm_reg, volt, 0);
if (ret) {
dev_warn(cpu_dev,
@@ -143,7 +121,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
ret = 0;
}
- if (freqs.old == FREQ_1P2_GHZ / 1000) {
+ if (old_freq == FREQ_1P2_GHZ / 1000) {
regulator_set_voltage_tol(pu_reg,
PU_SOC_VOLTAGE_NORMAL, 0);
regulator_set_voltage_tol(soc_reg,
@@ -151,55 +129,28 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
}
}
-post_notify:
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- return ret;
-}
-
-static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
-{
- int ret;
-
- ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (ret) {
- dev_err(cpu_dev, "invalid frequency table: %d\n", ret);
- return ret;
- }
-
- policy->cpuinfo.transition_latency = transition_latency;
- policy->cur = clk_get_rate(arm_clk) / 1000;
- cpumask_setall(policy->cpus);
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
return 0;
}
-static int imx6q_cpufreq_exit(struct cpufreq_policy *policy)
+static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_generic_init(policy, freq_table, transition_latency);
}
-static struct freq_attr *imx6q_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver imx6q_cpufreq_driver = {
- .verify = imx6q_verify_speed,
- .target = imx6q_set_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = imx6q_set_target,
.get = imx6q_get_speed,
.init = imx6q_cpufreq_init,
- .exit = imx6q_cpufreq_exit,
+ .exit = cpufreq_generic_exit,
.name = "imx6q-cpufreq",
- .attr = imx6q_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int imx6q_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long min_volt, max_volt;
int num, ret;
@@ -237,14 +188,14 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
}
/* We expect an OPP table supplied by platform */
- num = opp_get_opp_count(cpu_dev);
+ num = dev_pm_opp_get_opp_count(cpu_dev);
if (num < 0) {
ret = num;
dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
goto put_node;
}
- ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
goto put_node;
@@ -259,12 +210,12 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
* same order.
*/
rcu_read_lock();
- opp = opp_find_freq_exact(cpu_dev,
+ opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[0].frequency * 1000, true);
- min_volt = opp_get_voltage(opp);
- opp = opp_find_freq_exact(cpu_dev,
+ min_volt = dev_pm_opp_get_voltage(opp);
+ opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[--num].frequency * 1000, true);
- max_volt = opp_get_voltage(opp);
+ max_volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
if (ret > 0)
@@ -292,7 +243,7 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
return 0;
free_freq_table:
- opp_free_cpufreq_table(cpu_dev, &freq_table);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
put_node:
of_node_put(np);
return ret;
@@ -301,7 +252,7 @@ put_node:
static int imx6q_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&imx6q_cpufreq_driver);
- opp_free_cpufreq_table(cpu_dev, &freq_table);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
return 0;
}
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
index f7c99df0880b..7d8ab000d317 100644
--- a/drivers/cpufreq/integrator-cpufreq.c
+++ b/drivers/cpufreq/integrator-cpufreq.c
@@ -15,18 +15,19 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
-#include <mach/hardware.h>
-#include <mach/platform.h>
#include <asm/mach-types.h>
#include <asm/hardware/icst.h>
-static struct cpufreq_driver integrator_driver;
+static void __iomem *cm_base;
+/* The cpufreq driver only use the OSC register */
+#define INTEGRATOR_HDR_OSC_OFFSET 0x08
+#define INTEGRATOR_HDR_LOCK_OFFSET 0x14
-#define CM_ID __io_address(INTEGRATOR_HDR_ID)
-#define CM_OSC __io_address(INTEGRATOR_HDR_OSC)
-#define CM_STAT __io_address(INTEGRATOR_HDR_STAT)
-#define CM_LOCK __io_address(INTEGRATOR_HDR_LOCK)
+static struct cpufreq_driver integrator_driver;
static const struct icst_params lclk_params = {
.ref = 24000000,
@@ -59,9 +60,7 @@ static int integrator_verify_policy(struct cpufreq_policy *policy)
{
struct icst_vco vco;
- cpufreq_verify_within_limits(policy,
- policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
vco = icst_hz_to_vco(&cclk_params, policy->max * 1000);
policy->max = icst_hz(&cclk_params, vco) / 1000;
@@ -69,10 +68,7 @@ static int integrator_verify_policy(struct cpufreq_policy *policy)
vco = icst_hz_to_vco(&cclk_params, policy->min * 1000);
policy->min = icst_hz(&cclk_params, vco) / 1000;
- cpufreq_verify_within_limits(policy,
- policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
-
+ cpufreq_verify_within_cpu_limits(policy);
return 0;
}
@@ -100,7 +96,7 @@ static int integrator_set_target(struct cpufreq_policy *policy,
BUG_ON(cpu != smp_processor_id());
/* get current setting */
- cm_osc = __raw_readl(CM_OSC);
+ cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
if (machine_is_integrator()) {
vco.s = (cm_osc >> 8) & 7;
@@ -128,7 +124,7 @@ static int integrator_set_target(struct cpufreq_policy *policy,
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- cm_osc = __raw_readl(CM_OSC);
+ cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
if (machine_is_integrator()) {
cm_osc &= 0xfffff800;
@@ -138,9 +134,9 @@ static int integrator_set_target(struct cpufreq_policy *policy,
}
cm_osc |= vco.v;
- __raw_writel(0xa05f, CM_LOCK);
- __raw_writel(cm_osc, CM_OSC);
- __raw_writel(0, CM_LOCK);
+ __raw_writel(0xa05f, cm_base + INTEGRATOR_HDR_LOCK_OFFSET);
+ __raw_writel(cm_osc, cm_base + INTEGRATOR_HDR_OSC_OFFSET);
+ __raw_writel(0, cm_base + INTEGRATOR_HDR_LOCK_OFFSET);
/*
* Restore the CPUs allowed mask.
@@ -165,7 +161,7 @@ static unsigned int integrator_get(unsigned int cpu)
BUG_ON(cpu != smp_processor_id());
/* detect memory etc. */
- cm_osc = __raw_readl(CM_OSC);
+ cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
if (machine_is_integrator()) {
vco.s = (cm_osc >> 8) & 7;
@@ -186,10 +182,9 @@ static int integrator_cpufreq_init(struct cpufreq_policy *policy)
{
/* set default policy and cpuinfo */
- policy->cpuinfo.max_freq = 160000;
- policy->cpuinfo.min_freq = 12000;
+ policy->max = policy->cpuinfo.max_freq = 160000;
+ policy->min = policy->cpuinfo.min_freq = 12000;
policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */
- policy->cur = policy->min = policy->max = integrator_get(policy->cpu);
return 0;
}
@@ -202,19 +197,43 @@ static struct cpufreq_driver integrator_driver = {
.name = "integrator",
};
-static int __init integrator_cpu_init(void)
+static int __init integrator_cpufreq_probe(struct platform_device *pdev)
{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ cm_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!cm_base)
+ return -ENODEV;
+
return cpufreq_register_driver(&integrator_driver);
}
-static void __exit integrator_cpu_exit(void)
+static void __exit integrator_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&integrator_driver);
}
+static const struct of_device_id integrator_cpufreq_match[] = {
+ { .compatible = "arm,core-module-integrator"},
+ { },
+};
+
+static struct platform_driver integrator_cpufreq_driver = {
+ .driver = {
+ .name = "integrator-cpufreq",
+ .owner = THIS_MODULE,
+ .of_match_table = integrator_cpufreq_match,
+ },
+ .remove = __exit_p(integrator_cpufreq_remove),
+};
+
+module_platform_driver_probe(integrator_cpufreq_driver,
+ integrator_cpufreq_probe);
+
MODULE_AUTHOR ("Russell M. King");
MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs");
MODULE_LICENSE ("GPL");
-
-module_init(integrator_cpu_init);
-module_exit(integrator_cpu_exit);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index badf6206b2b2..6ab92210403e 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -33,6 +33,8 @@
#define SAMPLE_COUNT 3
+#define BYT_RATIOS 0x66a
+
#define FRAC_BITS 8
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
#define fp_toint(X) ((X) >> FRAC_BITS)
@@ -48,7 +50,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
}
struct sample {
- int core_pct_busy;
+ int32_t core_pct_busy;
u64 aperf;
u64 mperf;
int freq;
@@ -68,7 +70,7 @@ struct _pid {
int32_t i_gain;
int32_t d_gain;
int deadband;
- int last_err;
+ int32_t last_err;
};
struct cpudata {
@@ -78,7 +80,6 @@ struct cpudata {
struct timer_list timer;
- struct pstate_adjust_policy *pstate_policy;
struct pstate_data pstate;
struct _pid pid;
@@ -100,15 +101,21 @@ struct pstate_adjust_policy {
int i_gain_pct;
};
-static struct pstate_adjust_policy default_policy = {
- .sample_rate_ms = 10,
- .deadband = 0,
- .setpoint = 97,
- .p_gain_pct = 20,
- .d_gain_pct = 0,
- .i_gain_pct = 0,
+struct pstate_funcs {
+ int (*get_max)(void);
+ int (*get_min)(void);
+ int (*get_turbo)(void);
+ void (*set)(int pstate);
+};
+
+struct cpu_defaults {
+ struct pstate_adjust_policy pid_policy;
+ struct pstate_funcs funcs;
};
+static struct pstate_adjust_policy pid_params;
+static struct pstate_funcs pstate_funcs;
+
struct perf_limits {
int no_turbo;
int max_perf_pct;
@@ -153,16 +160,15 @@ static inline void pid_d_gain_set(struct _pid *pid, int percent)
pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
}
-static signed int pid_calc(struct _pid *pid, int busy)
+static signed int pid_calc(struct _pid *pid, int32_t busy)
{
- signed int err, result;
+ signed int result;
int32_t pterm, dterm, fp_error;
int32_t integral_limit;
- err = pid->setpoint - busy;
- fp_error = int_tofp(err);
+ fp_error = int_tofp(pid->setpoint) - busy;
- if (abs(err) <= pid->deadband)
+ if (abs(fp_error) <= int_tofp(pid->deadband))
return 0;
pterm = mul_fp(pid->p_gain, fp_error);
@@ -176,8 +182,8 @@ static signed int pid_calc(struct _pid *pid, int busy)
if (pid->integral < -integral_limit)
pid->integral = -integral_limit;
- dterm = mul_fp(pid->d_gain, (err - pid->last_err));
- pid->last_err = err;
+ dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
+ pid->last_err = fp_error;
result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
@@ -186,14 +192,14 @@ static signed int pid_calc(struct _pid *pid, int busy)
static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
{
- pid_p_gain_set(&cpu->pid, cpu->pstate_policy->p_gain_pct);
- pid_d_gain_set(&cpu->pid, cpu->pstate_policy->d_gain_pct);
- pid_i_gain_set(&cpu->pid, cpu->pstate_policy->i_gain_pct);
+ pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
+ pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
+ pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
pid_reset(&cpu->pid,
- cpu->pstate_policy->setpoint,
+ pid_params.setpoint,
100,
- cpu->pstate_policy->deadband,
+ pid_params.deadband,
0);
}
@@ -227,12 +233,12 @@ struct pid_param {
};
static struct pid_param pid_files[] = {
- {"sample_rate_ms", &default_policy.sample_rate_ms},
- {"d_gain_pct", &default_policy.d_gain_pct},
- {"i_gain_pct", &default_policy.i_gain_pct},
- {"deadband", &default_policy.deadband},
- {"setpoint", &default_policy.setpoint},
- {"p_gain_pct", &default_policy.p_gain_pct},
+ {"sample_rate_ms", &pid_params.sample_rate_ms},
+ {"d_gain_pct", &pid_params.d_gain_pct},
+ {"i_gain_pct", &pid_params.i_gain_pct},
+ {"deadband", &pid_params.deadband},
+ {"setpoint", &pid_params.setpoint},
+ {"p_gain_pct", &pid_params.p_gain_pct},
{NULL, NULL}
};
@@ -337,42 +343,102 @@ static void intel_pstate_sysfs_expose_params(void)
}
/************************** sysfs end ************************/
+static int byt_get_min_pstate(void)
+{
+ u64 value;
+ rdmsrl(BYT_RATIOS, value);
+ return value & 0xFF;
+}
+
+static int byt_get_max_pstate(void)
+{
+ u64 value;
+ rdmsrl(BYT_RATIOS, value);
+ return (value >> 16) & 0xFF;
+}
-static int intel_pstate_min_pstate(void)
+static int core_get_min_pstate(void)
{
u64 value;
rdmsrl(MSR_PLATFORM_INFO, value);
return (value >> 40) & 0xFF;
}
-static int intel_pstate_max_pstate(void)
+static int core_get_max_pstate(void)
{
u64 value;
rdmsrl(MSR_PLATFORM_INFO, value);
return (value >> 8) & 0xFF;
}
-static int intel_pstate_turbo_pstate(void)
+static int core_get_turbo_pstate(void)
{
u64 value;
int nont, ret;
rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
- nont = intel_pstate_max_pstate();
+ nont = core_get_max_pstate();
ret = ((value) & 255);
if (ret <= nont)
ret = nont;
return ret;
}
+static void core_set_pstate(int pstate)
+{
+ u64 val;
+
+ val = pstate << 8;
+ if (limits.no_turbo)
+ val |= (u64)1 << 32;
+
+ wrmsrl(MSR_IA32_PERF_CTL, val);
+}
+
+static struct cpu_defaults core_params = {
+ .pid_policy = {
+ .sample_rate_ms = 10,
+ .deadband = 0,
+ .setpoint = 97,
+ .p_gain_pct = 20,
+ .d_gain_pct = 0,
+ .i_gain_pct = 0,
+ },
+ .funcs = {
+ .get_max = core_get_max_pstate,
+ .get_min = core_get_min_pstate,
+ .get_turbo = core_get_turbo_pstate,
+ .set = core_set_pstate,
+ },
+};
+
+static struct cpu_defaults byt_params = {
+ .pid_policy = {
+ .sample_rate_ms = 10,
+ .deadband = 0,
+ .setpoint = 97,
+ .p_gain_pct = 14,
+ .d_gain_pct = 0,
+ .i_gain_pct = 4,
+ },
+ .funcs = {
+ .get_max = byt_get_max_pstate,
+ .get_min = byt_get_min_pstate,
+ .get_turbo = byt_get_max_pstate,
+ .set = core_set_pstate,
+ },
+};
+
+
static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
{
int max_perf = cpu->pstate.turbo_pstate;
+ int max_perf_adj;
int min_perf;
if (limits.no_turbo)
max_perf = cpu->pstate.max_pstate;
- max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
- *max = clamp_t(int, max_perf,
+ max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
+ *max = clamp_t(int, max_perf_adj,
cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
@@ -383,7 +449,6 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
{
int max_perf, min_perf;
- u64 val;
intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
@@ -395,11 +460,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
trace_cpu_frequency(pstate * 100000, cpu->cpu);
cpu->pstate.current_pstate = pstate;
- val = pstate << 8;
- if (limits.no_turbo)
- val |= (u64)1 << 32;
- wrmsrl(MSR_IA32_PERF_CTL, val);
+ pstate_funcs.set(pstate);
}
static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
@@ -421,9 +483,9 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
sprintf(cpu->name, "Intel 2nd generation core");
- cpu->pstate.min_pstate = intel_pstate_min_pstate();
- cpu->pstate.max_pstate = intel_pstate_max_pstate();
- cpu->pstate.turbo_pstate = intel_pstate_turbo_pstate();
+ cpu->pstate.min_pstate = pstate_funcs.get_min();
+ cpu->pstate.max_pstate = pstate_funcs.get_max();
+ cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
/*
* goto max pstate so we don't slow up boot if we are built-in if we are
@@ -436,8 +498,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
struct sample *sample)
{
u64 core_pct;
- core_pct = div64_u64(sample->aperf * 100, sample->mperf);
- sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
+ core_pct = div64_u64(int_tofp(sample->aperf * 100),
+ sample->mperf);
+ sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
sample->core_pct_busy = core_pct;
}
@@ -464,27 +527,24 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
{
int sample_time, delay;
- sample_time = cpu->pstate_policy->sample_rate_ms;
+ sample_time = pid_params.sample_rate_ms;
delay = msecs_to_jiffies(sample_time);
mod_timer_pinned(&cpu->timer, jiffies + delay);
}
-static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
+static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
{
- int32_t busy_scaled;
int32_t core_busy, max_pstate, current_pstate;
- core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy);
+ core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
max_pstate = int_tofp(cpu->pstate.max_pstate);
current_pstate = int_tofp(cpu->pstate.current_pstate);
- busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
-
- return fp_toint(busy_scaled);
+ return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
}
static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
{
- int busy_scaled;
+ int32_t busy_scaled;
struct _pid *pid;
signed int ctl = 0;
int steps;
@@ -523,14 +583,15 @@ static void intel_pstate_timer_func(unsigned long __data)
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy }
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
- ICPU(0x2a, default_policy),
- ICPU(0x2d, default_policy),
- ICPU(0x3a, default_policy),
- ICPU(0x3c, default_policy),
- ICPU(0x3e, default_policy),
- ICPU(0x3f, default_policy),
- ICPU(0x45, default_policy),
- ICPU(0x46, default_policy),
+ ICPU(0x2a, core_params),
+ ICPU(0x2d, core_params),
+ ICPU(0x37, byt_params),
+ ICPU(0x3a, core_params),
+ ICPU(0x3c, core_params),
+ ICPU(0x3e, core_params),
+ ICPU(0x3f, core_params),
+ ICPU(0x45, core_params),
+ ICPU(0x46, core_params),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
@@ -554,8 +615,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
intel_pstate_get_cpu_pstates(cpu);
cpu->cpu = cpunum;
- cpu->pstate_policy =
- (struct pstate_adjust_policy *)id->driver_data;
+
init_timer_deferrable(&cpu->timer);
cpu->timer.function = intel_pstate_timer_func;
cpu->timer.data =
@@ -615,9 +675,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
{
- cpufreq_verify_within_limits(policy,
- policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
(policy->policy != CPUFREQ_POLICY_PERFORMANCE))
@@ -685,9 +743,9 @@ static int intel_pstate_msrs_not_valid(void)
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
- if (!intel_pstate_min_pstate() ||
- !intel_pstate_max_pstate() ||
- !intel_pstate_turbo_pstate())
+ if (!pstate_funcs.get_max() ||
+ !pstate_funcs.get_min() ||
+ !pstate_funcs.get_turbo())
return -ENODEV;
rdmsrl(MSR_IA32_APERF, tmp);
@@ -700,10 +758,30 @@ static int intel_pstate_msrs_not_valid(void)
return 0;
}
+
+static void copy_pid_params(struct pstate_adjust_policy *policy)
+{
+ pid_params.sample_rate_ms = policy->sample_rate_ms;
+ pid_params.p_gain_pct = policy->p_gain_pct;
+ pid_params.i_gain_pct = policy->i_gain_pct;
+ pid_params.d_gain_pct = policy->d_gain_pct;
+ pid_params.deadband = policy->deadband;
+ pid_params.setpoint = policy->setpoint;
+}
+
+static void copy_cpu_funcs(struct pstate_funcs *funcs)
+{
+ pstate_funcs.get_max = funcs->get_max;
+ pstate_funcs.get_min = funcs->get_min;
+ pstate_funcs.get_turbo = funcs->get_turbo;
+ pstate_funcs.set = funcs->set;
+}
+
static int __init intel_pstate_init(void)
{
int cpu, rc = 0;
const struct x86_cpu_id *id;
+ struct cpu_defaults *cpu_info;
if (no_load)
return -ENODEV;
@@ -712,6 +790,11 @@ static int __init intel_pstate_init(void)
if (!id)
return -ENODEV;
+ cpu_info = (struct cpu_defaults *)id->driver_data;
+
+ copy_pid_params(&cpu_info->pid_policy);
+ copy_cpu_funcs(&cpu_info->funcs);
+
if (intel_pstate_msrs_not_valid())
return -ENODEV;
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index ba10658a9394..0767a4e29dfe 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -55,69 +55,37 @@ static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu)
return kirkwood_freq_table[0].frequency;
}
-static void kirkwood_cpufreq_set_cpu_state(struct cpufreq_policy *policy,
- unsigned int index)
+static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int index)
{
- struct cpufreq_freqs freqs;
unsigned int state = kirkwood_freq_table[index].driver_data;
unsigned long reg;
- freqs.old = kirkwood_cpufreq_get_cpu_frequency(0);
- freqs.new = kirkwood_freq_table[index].frequency;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
- dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n",
- kirkwood_freq_table[index].frequency);
- dev_dbg(priv.dev, "old frequency was %i KHz\n",
- kirkwood_cpufreq_get_cpu_frequency(0));
-
- if (freqs.old != freqs.new) {
- local_irq_disable();
-
- /* Disable interrupts to the CPU */
- reg = readl_relaxed(priv.base);
- reg |= CPU_SW_INT_BLK;
- writel_relaxed(reg, priv.base);
-
- switch (state) {
- case STATE_CPU_FREQ:
- clk_disable(priv.powersave_clk);
- break;
- case STATE_DDR_FREQ:
- clk_enable(priv.powersave_clk);
- break;
- }
+ local_irq_disable();
- /* Wait-for-Interrupt, while the hardware changes frequency */
- cpu_do_idle();
+ /* Disable interrupts to the CPU */
+ reg = readl_relaxed(priv.base);
+ reg |= CPU_SW_INT_BLK;
+ writel_relaxed(reg, priv.base);
- /* Enable interrupts to the CPU */
- reg = readl_relaxed(priv.base);
- reg &= ~CPU_SW_INT_BLK;
- writel_relaxed(reg, priv.base);
-
- local_irq_enable();
+ switch (state) {
+ case STATE_CPU_FREQ:
+ clk_disable(priv.powersave_clk);
+ break;
+ case STATE_DDR_FREQ:
+ clk_enable(priv.powersave_clk);
+ break;
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-};
-
-static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, kirkwood_freq_table);
-}
-static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int index = 0;
+ /* Wait-for-Interrupt, while the hardware changes frequency */
+ cpu_do_idle();
- if (cpufreq_frequency_table_target(policy, kirkwood_freq_table,
- target_freq, relation, &index))
- return -EINVAL;
+ /* Enable interrupts to the CPU */
+ reg = readl_relaxed(priv.base);
+ reg &= ~CPU_SW_INT_BLK;
+ writel_relaxed(reg, priv.base);
- kirkwood_cpufreq_set_cpu_state(policy, index);
+ local_irq_enable();
return 0;
}
@@ -125,40 +93,17 @@ static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
/* Module init and exit code */
static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- int result;
-
- /* cpuinfo and default policy values */
- policy->cpuinfo.transition_latency = 5000; /* 5uS */
- policy->cur = kirkwood_cpufreq_get_cpu_frequency(0);
-
- result = cpufreq_frequency_table_cpuinfo(policy, kirkwood_freq_table);
- if (result)
- return result;
-
- cpufreq_frequency_table_get_attr(kirkwood_freq_table, policy->cpu);
-
- return 0;
-}
-
-static int kirkwood_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_generic_init(policy, kirkwood_freq_table, 5000);
}
-static struct freq_attr *kirkwood_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver kirkwood_cpufreq_driver = {
.get = kirkwood_cpufreq_get_cpu_frequency,
- .verify = kirkwood_cpufreq_verify,
- .target = kirkwood_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = kirkwood_cpufreq_target,
.init = kirkwood_cpufreq_cpu_init,
- .exit = kirkwood_cpufreq_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "kirkwood-cpufreq",
- .attr = kirkwood_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int kirkwood_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 4ada1cccb052..45bafddfd8ea 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -625,28 +625,13 @@ static void longhaul_setup_voltagescaling(void)
}
-static int longhaul_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, longhaul_table);
-}
-
-
static int longhaul_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int table_index)
{
- unsigned int table_index = 0;
unsigned int i;
unsigned int dir = 0;
u8 vid, current_vid;
- if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq,
- relation, &table_index))
- return -EINVAL;
-
- /* Don't set same frequency again */
- if (longhaul_index == table_index)
- return 0;
-
if (!can_scale_voltage)
longhaul_setstate(policy, table_index);
else {
@@ -919,36 +904,18 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
longhaul_setup_voltagescaling();
policy->cpuinfo.transition_latency = 200000; /* nsec */
- policy->cur = calc_speed(longhaul_get_cpu_mult());
-
- ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table);
- if (ret)
- return ret;
-
- cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu);
- return 0;
+ return cpufreq_table_validate_and_show(policy, longhaul_table);
}
-static int longhaul_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
-static struct freq_attr *longhaul_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver longhaul_driver = {
- .verify = longhaul_verify,
- .target = longhaul_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = longhaul_target,
.get = longhaul_get,
.init = longhaul_cpu_init,
- .exit = longhaul_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "longhaul",
- .attr = longhaul_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id longhaul_id[] = {
diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c
index 5aa031612d53..074971b12635 100644
--- a/drivers/cpufreq/longrun.c
+++ b/drivers/cpufreq/longrun.c
@@ -129,9 +129,7 @@ static int longrun_verify_policy(struct cpufreq_policy *policy)
return -EINVAL;
policy->cpu = 0;
- cpufreq_verify_within_limits(policy,
- policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
(policy->policy != CPUFREQ_POLICY_PERFORMANCE))
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index 7bc3c44d34e2..a43609218105 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -53,51 +53,24 @@ static unsigned int loongson2_cpufreq_get(unsigned int cpu)
* Here we notify other drivers of the proposed change and the final change.
*/
static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int index)
{
unsigned int cpu = policy->cpu;
- unsigned int newstate = 0;
cpumask_t cpus_allowed;
- struct cpufreq_freqs freqs;
unsigned int freq;
cpus_allowed = current->cpus_allowed;
set_cpus_allowed_ptr(current, cpumask_of(cpu));
- if (cpufreq_frequency_table_target
- (policy, &loongson2_clockmod_table[0], target_freq, relation,
- &newstate))
- return -EINVAL;
-
freq =
((cpu_clock_freq / 1000) *
- loongson2_clockmod_table[newstate].driver_data) / 8;
- if (freq < policy->min || freq > policy->max)
- return -EINVAL;
-
- pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
-
- freqs.old = loongson2_cpufreq_get(cpu);
- freqs.new = freq;
- freqs.flags = 0;
-
- if (freqs.new == freqs.old)
- return 0;
-
- /* notifiers */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ loongson2_clockmod_table[index].driver_data) / 8;
set_cpus_allowed_ptr(current, &cpus_allowed);
/* setting the cpu frequency */
clk_set_rate(cpuclk, freq);
- /* notifiers */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- pr_debug("cpufreq: set frequency %u kHz\n", freq);
-
return 0;
}
@@ -131,40 +104,24 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
return ret;
}
- policy->cur = loongson2_cpufreq_get(policy->cpu);
-
- cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
- policy->cpu);
-
- return cpufreq_frequency_table_cpuinfo(policy,
- &loongson2_clockmod_table[0]);
-}
-
-static int loongson2_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy,
- &loongson2_clockmod_table[0]);
+ return cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0);
}
static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
{
+ cpufreq_frequency_table_put_attr(policy->cpu);
clk_put(cpuclk);
return 0;
}
-static struct freq_attr *loongson2_table_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver loongson2_cpufreq_driver = {
.name = "loongson2",
.init = loongson2_cpufreq_cpu_init,
- .verify = loongson2_cpufreq_verify,
- .target = loongson2_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = loongson2_cpufreq_target,
.get = loongson2_cpufreq_get,
.exit = loongson2_cpufreq_exit,
- .attr = loongson2_table_attr,
+ .attr = cpufreq_generic_attr,
};
static struct platform_device_id platform_device_ids[] = {
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index 6168d77b296d..c4dfa42a75ac 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -64,18 +64,11 @@ static struct cpufreq_frequency_table maple_cpu_freqs[] = {
{0, CPUFREQ_TABLE_END},
};
-static struct freq_attr *maple_cpu_freqs_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
/* Power mode data is an array of the 32 bits PCR values to use for
* the various frequencies, retrieved from the device-tree
*/
static int maple_pmode_cur;
-static DEFINE_MUTEX(maple_switch_mutex);
-
static const u32 *maple_pmode_data;
static int maple_pmode_max;
@@ -135,37 +128,10 @@ static int maple_scom_query_freq(void)
* Common interface to the cpufreq core
*/
-static int maple_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, maple_cpu_freqs);
-}
-
static int maple_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int index)
{
- unsigned int newstate = 0;
- struct cpufreq_freqs freqs;
- int rc;
-
- if (cpufreq_frequency_table_target(policy, maple_cpu_freqs,
- target_freq, relation, &newstate))
- return -EINVAL;
-
- if (maple_pmode_cur == newstate)
- return 0;
-
- mutex_lock(&maple_switch_mutex);
-
- freqs.old = maple_cpu_freqs[maple_pmode_cur].frequency;
- freqs.new = maple_cpu_freqs[newstate].frequency;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- rc = maple_scom_switch_freq(newstate);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- mutex_unlock(&maple_switch_mutex);
-
- return rc;
+ return maple_scom_switch_freq(index);
}
static unsigned int maple_cpufreq_get_speed(unsigned int cpu)
@@ -175,27 +141,17 @@ static unsigned int maple_cpufreq_get_speed(unsigned int cpu)
static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- policy->cpuinfo.transition_latency = 12000;
- policy->cur = maple_cpu_freqs[maple_scom_query_freq()].frequency;
- /* secondary CPUs are tied to the primary one by the
- * cpufreq core if in the secondary policy we tell it that
- * it actually must be one policy together with all others. */
- cpumask_setall(policy->cpus);
- cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu);
-
- return cpufreq_frequency_table_cpuinfo(policy,
- maple_cpu_freqs);
+ return cpufreq_generic_init(policy, maple_cpu_freqs, 12000);
}
-
static struct cpufreq_driver maple_cpufreq_driver = {
.name = "maple",
.flags = CPUFREQ_CONST_LOOPS,
.init = maple_cpufreq_cpu_init,
- .verify = maple_cpufreq_verify,
- .target = maple_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = maple_cpufreq_target,
.get = maple_cpufreq_get_speed,
- .attr = maple_cpu_freqs_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init maple_cpufreq_init(void)
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index f31fcfcad514..be6d14307aa8 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -22,7 +22,7 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -40,13 +40,6 @@ static struct clk *mpu_clk;
static struct device *mpu_dev;
static struct regulator *mpu_reg;
-static int omap_verify_speed(struct cpufreq_policy *policy)
-{
- if (!freq_table)
- return -EINVAL;
- return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
static unsigned int omap_getspeed(unsigned int cpu)
{
unsigned long rate;
@@ -58,42 +51,16 @@ static unsigned int omap_getspeed(unsigned int cpu)
return rate;
}
-static int omap_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int omap_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int i;
- int r, ret = 0;
- struct cpufreq_freqs freqs;
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long freq, volt = 0, volt_old = 0, tol = 0;
+ unsigned int old_freq, new_freq;
- if (!freq_table) {
- dev_err(mpu_dev, "%s: cpu%d: no freq table!\n", __func__,
- policy->cpu);
- return -EINVAL;
- }
+ old_freq = omap_getspeed(policy->cpu);
+ new_freq = freq_table[index].frequency;
- ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
- relation, &i);
- if (ret) {
- dev_dbg(mpu_dev, "%s: cpu%d: no freq match for %d(ret=%d)\n",
- __func__, policy->cpu, target_freq, ret);
- return ret;
- }
- freqs.new = freq_table[i].frequency;
- if (!freqs.new) {
- dev_err(mpu_dev, "%s: cpu%d: no match for freq %d\n", __func__,
- policy->cpu, target_freq);
- return -EINVAL;
- }
-
- freqs.old = omap_getspeed(policy->cpu);
-
- if (freqs.old == freqs.new && policy->cur == freqs.new)
- return ret;
-
- freq = freqs.new * 1000;
+ freq = new_freq * 1000;
ret = clk_round_rate(mpu_clk, freq);
if (IS_ERR_VALUE(ret)) {
dev_warn(mpu_dev,
@@ -105,143 +72,103 @@ static int omap_target(struct cpufreq_policy *policy,
if (mpu_reg) {
rcu_read_lock();
- opp = opp_find_freq_ceil(mpu_dev, &freq);
+ opp = dev_pm_opp_find_freq_ceil(mpu_dev, &freq);
if (IS_ERR(opp)) {
rcu_read_unlock();
dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n",
- __func__, freqs.new);
+ __func__, new_freq);
return -EINVAL;
}
- volt = opp_get_voltage(opp);
+ volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
tol = volt * OPP_TOLERANCE / 100;
volt_old = regulator_get_voltage(mpu_reg);
}
dev_dbg(mpu_dev, "cpufreq-omap: %u MHz, %ld mV --> %u MHz, %ld mV\n",
- freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
- freqs.new / 1000, volt ? volt / 1000 : -1);
-
- /* notifiers */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ old_freq / 1000, volt_old ? volt_old / 1000 : -1,
+ new_freq / 1000, volt ? volt / 1000 : -1);
/* scaling up? scale voltage before frequency */
- if (mpu_reg && (freqs.new > freqs.old)) {
+ if (mpu_reg && (new_freq > old_freq)) {
r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol);
if (r < 0) {
dev_warn(mpu_dev, "%s: unable to scale voltage up.\n",
__func__);
- freqs.new = freqs.old;
- goto done;
+ return r;
}
}
- ret = clk_set_rate(mpu_clk, freqs.new * 1000);
+ ret = clk_set_rate(mpu_clk, new_freq * 1000);
/* scaling down? scale voltage after frequency */
- if (mpu_reg && (freqs.new < freqs.old)) {
+ if (mpu_reg && (new_freq < old_freq)) {
r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol);
if (r < 0) {
dev_warn(mpu_dev, "%s: unable to scale voltage down.\n",
__func__);
- ret = clk_set_rate(mpu_clk, freqs.old * 1000);
- freqs.new = freqs.old;
- goto done;
+ clk_set_rate(mpu_clk, old_freq * 1000);
+ return r;
}
}
- freqs.new = omap_getspeed(policy->cpu);
-
-done:
- /* notifiers */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
return ret;
}
static inline void freq_table_free(void)
{
if (atomic_dec_and_test(&freq_table_users))
- opp_free_cpufreq_table(mpu_dev, &freq_table);
+ dev_pm_opp_free_cpufreq_table(mpu_dev, &freq_table);
}
static int omap_cpu_init(struct cpufreq_policy *policy)
{
- int result = 0;
+ int result;
mpu_clk = clk_get(NULL, "cpufreq_ck");
if (IS_ERR(mpu_clk))
return PTR_ERR(mpu_clk);
- if (policy->cpu >= NR_CPUS) {
- result = -EINVAL;
- goto fail_ck;
- }
-
- policy->cur = omap_getspeed(policy->cpu);
-
- if (!freq_table)
- result = opp_init_cpufreq_table(mpu_dev, &freq_table);
-
- if (result) {
- dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n",
+ if (!freq_table) {
+ result = dev_pm_opp_init_cpufreq_table(mpu_dev, &freq_table);
+ if (result) {
+ dev_err(mpu_dev,
+ "%s: cpu%d: failed creating freq table[%d]\n",
__func__, policy->cpu, result);
- goto fail_ck;
+ goto fail;
+ }
}
atomic_inc_return(&freq_table_users);
- result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (result)
- goto fail_table;
-
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
- policy->cur = omap_getspeed(policy->cpu);
-
- /*
- * On OMAP SMP configuartion, both processors share the voltage
- * and clock. So both CPUs needs to be scaled together and hence
- * needs software co-ordination. Use cpufreq affected_cpus
- * interface to handle this scenario. Additional is_smp() check
- * is to keep SMP_ON_UP build working.
- */
- if (is_smp())
- cpumask_setall(policy->cpus);
-
/* FIXME: what's the actual transition time? */
- policy->cpuinfo.transition_latency = 300 * 1000;
-
- return 0;
+ result = cpufreq_generic_init(policy, freq_table, 300 * 1000);
+ if (!result)
+ return 0;
-fail_table:
freq_table_free();
-fail_ck:
+fail:
clk_put(mpu_clk);
return result;
}
static int omap_cpu_exit(struct cpufreq_policy *policy)
{
+ cpufreq_frequency_table_put_attr(policy->cpu);
freq_table_free();
clk_put(mpu_clk);
return 0;
}
-static struct freq_attr *omap_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver omap_driver = {
.flags = CPUFREQ_STICKY,
- .verify = omap_verify_speed,
- .target = omap_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = omap_target,
.get = omap_getspeed,
.init = omap_cpu_init,
.exit = omap_cpu_exit,
.name = "omap",
- .attr = omap_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int omap_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index 2f0a2a65c37f..3d1cba9fd5f9 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -105,47 +105,21 @@ static struct cpufreq_frequency_table p4clockmod_table[] = {
};
-static int cpufreq_p4_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int cpufreq_p4_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int newstate = DC_RESV;
- struct cpufreq_freqs freqs;
int i;
- if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0],
- target_freq, relation, &newstate))
- return -EINVAL;
-
- freqs.old = cpufreq_p4_get(policy->cpu);
- freqs.new = stock_freq * p4clockmod_table[newstate].driver_data / 8;
-
- if (freqs.new == freqs.old)
- return 0;
-
- /* notifiers */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
/* run on each logical CPU,
* see section 13.15.3 of IA32 Intel Architecture Software
* Developer's Manual, Volume 3
*/
for_each_cpu(i, policy->cpus)
- cpufreq_p4_setdc(i, p4clockmod_table[newstate].driver_data);
-
- /* notifiers */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_p4_setdc(i, p4clockmod_table[index].driver_data);
return 0;
}
-static int cpufreq_p4_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]);
-}
-
-
static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
{
if (c->x86 == 0x06) {
@@ -230,25 +204,17 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
else
p4clockmod_table[i].frequency = (stock_freq * i)/8;
}
- cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu);
/* cpuinfo and default policy values */
/* the transition latency is set to be 1 higher than the maximum
* transition latency of the ondemand governor */
policy->cpuinfo.transition_latency = 10000001;
- policy->cur = stock_freq;
- return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]);
+ return cpufreq_table_validate_and_show(policy, &p4clockmod_table[0]);
}
-static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
static unsigned int cpufreq_p4_get(unsigned int cpu)
{
u32 l, h;
@@ -267,19 +233,14 @@ static unsigned int cpufreq_p4_get(unsigned int cpu)
return stock_freq;
}
-static struct freq_attr *p4clockmod_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver p4clockmod_driver = {
- .verify = cpufreq_p4_verify,
- .target = cpufreq_p4_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = cpufreq_p4_target,
.init = cpufreq_p4_cpu_init,
- .exit = cpufreq_p4_cpu_exit,
+ .exit = cpufreq_generic_exit,
.get = cpufreq_p4_get,
.name = "p4-clockmod",
- .attr = p4clockmod_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id cpufreq_p4_id[] = {
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 534e43a60d1f..0426008380d8 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -28,6 +28,7 @@
#include <linux/cpufreq.h>
#include <linux/timer.h>
#include <linux/module.h>
+#include <linux/of_address.h>
#include <asm/hw_irq.h>
#include <asm/io.h>
@@ -51,8 +52,6 @@
static void __iomem *sdcpwr_mapbase;
static void __iomem *sdcasr_mapbase;
-static DEFINE_MUTEX(pas_switch_mutex);
-
/* Current astate, is used when waking up from power savings on
* one core, in case the other core has switched states during
* the idle time.
@@ -69,11 +68,6 @@ static struct cpufreq_frequency_table pas_freqs[] = {
{0, CPUFREQ_TABLE_END},
};
-static struct freq_attr *pas_cpu_freqs_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
/*
* hardware specific functions
*/
@@ -209,22 +203,13 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
pr_debug("%d: %d\n", i, pas_freqs[i].frequency);
}
- policy->cpuinfo.transition_latency = get_gizmo_latency();
-
cur_astate = get_cur_astate(policy->cpu);
pr_debug("current astate is at %d\n",cur_astate);
policy->cur = pas_freqs[cur_astate].frequency;
- cpumask_copy(policy->cpus, cpu_online_mask);
-
ppc_proc_freq = policy->cur * 1000ul;
- cpufreq_frequency_table_get_attr(pas_freqs, policy->cpu);
-
- /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max
- * are set correctly
- */
- return cpufreq_frequency_table_cpuinfo(policy, pas_freqs);
+ return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
out_unmap_sdcpwr:
iounmap(sdcpwr_mapbase);
@@ -253,31 +238,11 @@ static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static int pas_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, pas_freqs);
-}
-
static int pas_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int pas_astate_new)
{
- struct cpufreq_freqs freqs;
- int pas_astate_new;
int i;
- cpufreq_frequency_table_target(policy,
- pas_freqs,
- target_freq,
- relation,
- &pas_astate_new);
-
- freqs.old = policy->cur;
- freqs.new = pas_freqs[pas_astate_new].frequency;
-
- mutex_lock(&pas_switch_mutex);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n",
policy->cpu,
pas_freqs[pas_astate_new].frequency,
@@ -288,10 +253,7 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy,
for_each_online_cpu(i)
set_astate(i, pas_astate_new);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- mutex_unlock(&pas_switch_mutex);
-
- ppc_proc_freq = freqs.new * 1000ul;
+ ppc_proc_freq = pas_freqs[pas_astate_new].frequency * 1000ul;
return 0;
}
@@ -300,9 +262,9 @@ static struct cpufreq_driver pas_cpufreq_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.init = pas_cpufreq_cpu_init,
.exit = pas_cpufreq_cpu_exit,
- .verify = pas_cpufreq_verify,
- .target = pas_cpufreq_target,
- .attr = pas_cpu_freqs_attr,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = pas_cpufreq_target,
+ .attr = cpufreq_generic_attr,
};
/*
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index d81c4e5ea0ad..e2b4f40ff69a 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -111,8 +111,7 @@ static struct pcc_cpu __percpu *pcc_cpu_info;
static int pcc_cpufreq_verify(struct cpufreq_policy *policy)
{
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
return 0;
}
@@ -396,15 +395,14 @@ static int __init pcc_cpufreq_probe(void)
struct pcc_memory_resource *mem_resource;
struct pcc_register_resource *reg_resource;
union acpi_object *out_obj, *member;
- acpi_handle handle, osc_handle, pcch_handle;
+ acpi_handle handle, osc_handle;
int ret = 0;
status = acpi_get_handle(NULL, "\\_SB", &handle);
if (ACPI_FAILURE(status))
return -ENODEV;
- status = acpi_get_handle(handle, "PCCH", &pcch_handle);
- if (ACPI_FAILURE(status))
+ if (!acpi_has_method(handle, "PCCH"))
return -ENODEV;
status = acpi_get_handle(handle, "_OSC", &osc_handle);
@@ -560,13 +558,6 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
ioread32(&pcch_hdr->nominal) * 1000;
policy->min = policy->cpuinfo.min_freq =
ioread32(&pcch_hdr->minimum_frequency) * 1000;
- policy->cur = pcc_get_freq(cpu);
-
- if (!policy->cur) {
- pr_debug("init: Unable to get current CPU frequency\n");
- result = -EINVAL;
- goto out;
- }
pr_debug("init: policy->max is %d, policy->min is %d\n",
policy->max, policy->min);
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index a096cd3fa23d..cf55d202f332 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -86,11 +86,6 @@ static struct cpufreq_frequency_table pmac_cpu_freqs[] = {
{0, CPUFREQ_TABLE_END},
};
-static struct freq_attr* pmac_cpu_freqs_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static inline void local_delay(unsigned long ms)
{
if (no_schedule)
@@ -336,21 +331,11 @@ static int pmu_set_cpu_speed(int low_speed)
return 0;
}
-static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode,
- int notify)
+static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode)
{
- struct cpufreq_freqs freqs;
unsigned long l3cr;
static unsigned long prev_l3cr;
- freqs.old = cur_freq;
- freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
-
- if (freqs.old == freqs.new)
- return 0;
-
- if (notify)
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
if (speed_mode == CPUFREQ_LOW &&
cpu_has_feature(CPU_FTR_L3CR)) {
l3cr = _get_L3CR();
@@ -366,8 +351,6 @@ static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode,
if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr)
_set_L3CR(prev_l3cr);
}
- if (notify)
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
return 0;
@@ -378,23 +361,12 @@ static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
return cur_freq;
}
-static int pmac_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs);
-}
-
static int pmac_cpufreq_target( struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int index)
{
- unsigned int newstate = 0;
int rc;
- if (cpufreq_frequency_table_target(policy, pmac_cpu_freqs,
- target_freq, relation, &newstate))
- return -EINVAL;
-
- rc = do_set_cpu_speed(policy, newstate, 1);
+ rc = do_set_cpu_speed(policy, index);
ppc_proc_freq = cur_freq * 1000ul;
return rc;
@@ -402,14 +374,7 @@ static int pmac_cpufreq_target( struct cpufreq_policy *policy,
static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- if (policy->cpu != 0)
- return -ENODEV;
-
- policy->cpuinfo.transition_latency = transition_latency;
- policy->cur = cur_freq;
-
- cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu);
- return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
+ return cpufreq_generic_init(policy, pmac_cpu_freqs, transition_latency);
}
static u32 read_gpio(struct device_node *np)
@@ -443,7 +408,7 @@ static int pmac_cpufreq_suspend(struct cpufreq_policy *policy)
no_schedule = 1;
sleep_freq = cur_freq;
if (cur_freq == low_freq && !is_pmu_based)
- do_set_cpu_speed(policy, CPUFREQ_HIGH, 0);
+ do_set_cpu_speed(policy, CPUFREQ_HIGH);
return 0;
}
@@ -460,7 +425,7 @@ static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
* probably high speed due to our suspend() routine
*/
do_set_cpu_speed(policy, sleep_freq == low_freq ?
- CPUFREQ_LOW : CPUFREQ_HIGH, 0);
+ CPUFREQ_LOW : CPUFREQ_HIGH);
ppc_proc_freq = cur_freq * 1000ul;
@@ -469,14 +434,14 @@ static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
}
static struct cpufreq_driver pmac_cpufreq_driver = {
- .verify = pmac_cpufreq_verify,
- .target = pmac_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = pmac_cpufreq_target,
.get = pmac_cpufreq_get_speed,
.init = pmac_cpufreq_cpu_init,
.suspend = pmac_cpufreq_suspend,
.resume = pmac_cpufreq_resume,
.flags = CPUFREQ_PM_NO_WARN,
- .attr = pmac_cpu_freqs_attr,
+ .attr = cpufreq_generic_attr,
.name = "powermac",
};
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
index 3a51ad7e47c8..6a338f8c3860 100644
--- a/drivers/cpufreq/pmac64-cpufreq.c
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -70,11 +70,6 @@ static struct cpufreq_frequency_table g5_cpu_freqs[] = {
{0, CPUFREQ_TABLE_END},
};
-static struct freq_attr* g5_cpu_freqs_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
/* Power mode data is an array of the 32 bits PCR values to use for
* the various frequencies, retrieved from the device-tree
*/
@@ -84,8 +79,6 @@ static void (*g5_switch_volt)(int speed_mode);
static int (*g5_switch_freq)(int speed_mode);
static int (*g5_query_freq)(void);
-static DEFINE_MUTEX(g5_switch_mutex);
-
static unsigned long transition_latency;
#ifdef CONFIG_PMAC_SMU
@@ -142,7 +135,7 @@ static void g5_vdnap_switch_volt(int speed_mode)
pmf_call_one(pfunc_vdnap0_complete, &args);
if (done)
break;
- msleep(1);
+ usleep_range(1000, 1000);
}
if (done == 0)
printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
@@ -241,7 +234,7 @@ static void g5_pfunc_switch_volt(int speed_mode)
if (pfunc_cpu1_volt_low)
pmf_call_one(pfunc_cpu1_volt_low, NULL);
}
- msleep(10); /* should be faster , to fix */
+ usleep_range(10000, 10000); /* should be faster , to fix */
}
/*
@@ -286,7 +279,7 @@ static int g5_pfunc_switch_freq(int speed_mode)
pmf_call_one(pfunc_slewing_done, &args);
if (done)
break;
- msleep(1);
+ usleep_range(500, 500);
}
if (done == 0)
printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
@@ -317,37 +310,9 @@ static int g5_pfunc_query_freq(void)
* Common interface to the cpufreq core
*/
-static int g5_cpufreq_verify(struct cpufreq_policy *policy)
+static int g5_cpufreq_target(struct cpufreq_policy *policy, unsigned int index)
{
- return cpufreq_frequency_table_verify(policy, g5_cpu_freqs);
-}
-
-static int g5_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
-{
- unsigned int newstate = 0;
- struct cpufreq_freqs freqs;
- int rc;
-
- if (cpufreq_frequency_table_target(policy, g5_cpu_freqs,
- target_freq, relation, &newstate))
- return -EINVAL;
-
- if (g5_pmode_cur == newstate)
- return 0;
-
- mutex_lock(&g5_switch_mutex);
-
- freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency;
- freqs.new = g5_cpu_freqs[newstate].frequency;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- rc = g5_switch_freq(newstate);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- mutex_unlock(&g5_switch_mutex);
-
- return rc;
+ return g5_switch_freq(index);
}
static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
@@ -357,27 +322,17 @@ static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- policy->cpuinfo.transition_latency = transition_latency;
- policy->cur = g5_cpu_freqs[g5_query_freq()].frequency;
- /* secondary CPUs are tied to the primary one by the
- * cpufreq core if in the secondary policy we tell it that
- * it actually must be one policy together with all others. */
- cpumask_copy(policy->cpus, cpu_online_mask);
- cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
-
- return cpufreq_frequency_table_cpuinfo(policy,
- g5_cpu_freqs);
+ return cpufreq_generic_init(policy, g5_cpu_freqs, transition_latency);
}
-
static struct cpufreq_driver g5_cpufreq_driver = {
.name = "powermac",
.flags = CPUFREQ_CONST_LOOPS,
.init = g5_cpufreq_cpu_init,
- .verify = g5_cpufreq_verify,
- .target = g5_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = g5_cpufreq_target,
.get = g5_cpufreq_get_speed,
- .attr = g5_cpu_freqs_attr,
+ .attr = cpufreq_generic_attr,
};
@@ -397,7 +352,8 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
/* Check supported platforms */
if (of_machine_is_compatible("PowerMac8,1") ||
of_machine_is_compatible("PowerMac8,2") ||
- of_machine_is_compatible("PowerMac9,1"))
+ of_machine_is_compatible("PowerMac9,1") ||
+ of_machine_is_compatible("PowerMac12,1"))
use_volts_smu = 1;
else if (of_machine_is_compatible("PowerMac11,2"))
use_volts_vdnap = 1;
@@ -647,8 +603,10 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
g5_cpu_freqs[0].frequency = max_freq;
g5_cpu_freqs[1].frequency = min_freq;
+ /* Based on a measurement on Xserve G5, rounded up. */
+ transition_latency = 10 * NSEC_PER_MSEC;
+
/* Set callbacks */
- transition_latency = CPUFREQ_ETERNAL;
g5_switch_volt = g5_pfunc_switch_volt;
g5_switch_freq = g5_pfunc_switch_freq;
g5_query_freq = g5_pfunc_query_freq;
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index 85f1c8c25ddc..643e7952cad3 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -63,12 +63,12 @@ static int powernow_k6_get_cpu_multiplier(void)
/**
- * powernow_k6_set_state - set the PowerNow! multiplier
+ * powernow_k6_target - set the PowerNow! multiplier
* @best_i: clock_ratio[best_i] is the target multiplier
*
* Tries to change the PowerNow! multiplier
*/
-static void powernow_k6_set_state(struct cpufreq_policy *policy,
+static int powernow_k6_target(struct cpufreq_policy *policy,
unsigned int best_i)
{
unsigned long outvalue = 0, invalue = 0;
@@ -77,7 +77,7 @@ static void powernow_k6_set_state(struct cpufreq_policy *policy,
if (clock_ratio[best_i].driver_data > max_multiplier) {
printk(KERN_ERR PFX "invalid target frequency\n");
- return;
+ return -EINVAL;
}
freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
@@ -100,44 +100,6 @@ static void powernow_k6_set_state(struct cpufreq_policy *policy,
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- return;
-}
-
-
-/**
- * powernow_k6_verify - verifies a new CPUfreq policy
- * @policy: new policy
- *
- * Policy must be within lowest and highest possible CPU Frequency,
- * and at least one possible state must be within min and max.
- */
-static int powernow_k6_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &clock_ratio[0]);
-}
-
-
-/**
- * powernow_k6_setpolicy - sets a new CPUFreq policy
- * @policy: new policy
- * @target_freq: the target frequency
- * @relation: how that frequency relates to achieved frequency
- * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
- *
- * sets a new CPUFreq policy
- */
-static int powernow_k6_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int newstate = 0;
-
- if (cpufreq_frequency_table_target(policy, &clock_ratio[0],
- target_freq, relation, &newstate))
- return -EINVAL;
-
- powernow_k6_set_state(policy, newstate);
-
return 0;
}
@@ -145,7 +107,6 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
{
unsigned int i, f;
- int result;
if (policy->cpu != 0)
return -ENODEV;
@@ -165,15 +126,8 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = 200000;
- policy->cur = busfreq * max_multiplier;
-
- result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio);
- if (result)
- return result;
-
- cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu);
- return 0;
+ return cpufreq_table_validate_and_show(policy, clock_ratio);
}
@@ -182,7 +136,7 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
unsigned int i;
for (i = 0; i < 8; i++) {
if (i == max_multiplier)
- powernow_k6_set_state(policy, i);
+ powernow_k6_target(policy, i);
}
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
@@ -195,19 +149,14 @@ static unsigned int powernow_k6_get(unsigned int cpu)
return ret;
}
-static struct freq_attr *powernow_k6_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver powernow_k6_driver = {
- .verify = powernow_k6_verify,
- .target = powernow_k6_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = powernow_k6_target,
.init = powernow_k6_cpu_init,
.exit = powernow_k6_cpu_exit,
.get = powernow_k6_get,
.name = "powernow-k6",
- .attr = powernow_k6_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id powernow_k6_ids[] = {
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 14ce480be8ab..946708a1d745 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -248,7 +248,7 @@ static void change_VID(int vid)
}
-static void change_speed(struct cpufreq_policy *policy, unsigned int index)
+static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
{
u8 fid, vid;
struct cpufreq_freqs freqs;
@@ -291,6 +291,8 @@ static void change_speed(struct cpufreq_policy *policy, unsigned int index)
local_irq_enable();
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ return 0;
}
@@ -533,27 +535,6 @@ static int powernow_decode_bios(int maxfid, int startvid)
}
-static int powernow_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int newstate;
-
- if (cpufreq_frequency_table_target(policy, powernow_table, target_freq,
- relation, &newstate))
- return -EINVAL;
-
- change_speed(policy, newstate);
-
- return 0;
-}
-
-
-static int powernow_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, powernow_table);
-}
-
/*
* We use the fact that the bus frequency is somehow
* a multiple of 100000/3 khz, then we compute sgtc according
@@ -678,11 +659,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency =
cpufreq_scale(2000000UL, fsb, latency);
- policy->cur = powernow_get(0);
-
- cpufreq_frequency_table_get_attr(powernow_table, policy->cpu);
-
- return cpufreq_frequency_table_cpuinfo(policy, powernow_table);
+ return cpufreq_table_validate_and_show(policy, powernow_table);
}
static int powernow_cpu_exit(struct cpufreq_policy *policy)
@@ -701,14 +678,9 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static struct freq_attr *powernow_table_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver powernow_driver = {
- .verify = powernow_verify,
- .target = powernow_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = powernow_target,
.get = powernow_get,
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
.bios_limit = acpi_processor_get_bios_limit,
@@ -716,7 +688,7 @@ static struct cpufreq_driver powernow_driver = {
.init = powernow_cpu_init,
.exit = powernow_cpu_exit,
.name = "powernow-k7",
- .attr = powernow_table_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init powernow_init(void)
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 2344a9ed17f3..0023c7d40a51 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -977,20 +977,17 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
struct powernowk8_target_arg {
struct cpufreq_policy *pol;
- unsigned targfreq;
- unsigned relation;
+ unsigned newstate;
};
static long powernowk8_target_fn(void *arg)
{
struct powernowk8_target_arg *pta = arg;
struct cpufreq_policy *pol = pta->pol;
- unsigned targfreq = pta->targfreq;
- unsigned relation = pta->relation;
+ unsigned newstate = pta->newstate;
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
u32 checkfid;
u32 checkvid;
- unsigned int newstate;
int ret;
if (!data)
@@ -1004,8 +1001,9 @@ static long powernowk8_target_fn(void *arg)
return -EIO;
}
- pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
- pol->cpu, targfreq, pol->min, pol->max, relation);
+ pr_debug("targ: cpu %d, %d kHz, min %d, max %d\n",
+ pol->cpu, data->powernow_table[newstate].frequency, pol->min,
+ pol->max);
if (query_current_values_with_pending_wait(data))
return -EIO;
@@ -1021,10 +1019,6 @@ static long powernowk8_target_fn(void *arg)
checkvid, data->currvid);
}
- if (cpufreq_frequency_table_target(pol, data->powernow_table,
- targfreq, relation, &newstate))
- return -EIO;
-
mutex_lock(&fidvid_mutex);
powernow_k8_acpi_pst_values(data, newstate);
@@ -1044,26 +1038,13 @@ static long powernowk8_target_fn(void *arg)
}
/* Driver entry point to switch to the target frequency */
-static int powernowk8_target(struct cpufreq_policy *pol,
- unsigned targfreq, unsigned relation)
+static int powernowk8_target(struct cpufreq_policy *pol, unsigned index)
{
- struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq,
- .relation = relation };
+ struct powernowk8_target_arg pta = { .pol = pol, .newstate = index };
return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
}
-/* Driver entry point to verify the policy and range of frequencies */
-static int powernowk8_verify(struct cpufreq_policy *pol)
-{
- struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
-
- if (!data)
- return -EINVAL;
-
- return cpufreq_frequency_table_verify(pol, data->powernow_table);
-}
-
struct init_on_cpu {
struct powernow_k8_data *data;
int rc;
@@ -1152,11 +1133,8 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
data->available_cores = pol->cpus;
- pol->cur = find_khz_freq_from_fid(data->currfid);
- pr_debug("policy current frequency %d kHz\n", pol->cur);
-
/* min/max the cpu is capable of */
- if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) {
+ if (cpufreq_table_validate_and_show(pol, data->powernow_table)) {
printk(KERN_ERR FW_BUG PFX "invalid powernow_table\n");
powernow_k8_cpu_exit_acpi(data);
kfree(data->powernow_table);
@@ -1164,8 +1142,6 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
return -EINVAL;
}
- cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
-
pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
data->currfid, data->currvid);
@@ -1227,20 +1203,16 @@ out:
return khz;
}
-static struct freq_attr *powernow_k8_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver cpufreq_amd64_driver = {
- .verify = powernowk8_verify,
- .target = powernowk8_target,
+ .flags = CPUFREQ_ASYNC_NOTIFICATION,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = powernowk8_target,
.bios_limit = acpi_processor_get_bios_limit,
.init = powernowk8_cpu_init,
.exit = powernowk8_cpu_exit,
.get = powernowk8_get,
.name = "powernow-k8",
- .attr = powernow_k8_attr,
+ .attr = cpufreq_generic_attr,
};
static void __request_acpi_cpufreq(void)
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index 60e81d524ea8..3f7be46d2b27 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -69,8 +69,6 @@ static const struct soc_data sdata[] = {
static u32 min_cpufreq;
static const u32 *fmask;
-/* serialize frequency changes */
-static DEFINE_MUTEX(cpufreq_lock);
static DEFINE_PER_CPU(struct cpu_data *, cpu_data);
/* cpumask in a cluster */
@@ -202,7 +200,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
table[i].frequency = CPUFREQ_TABLE_END;
/* set the min and max frequency properly */
- ret = cpufreq_frequency_table_cpuinfo(policy, table);
+ ret = cpufreq_table_validate_and_show(policy, table);
if (ret) {
pr_err("invalid frequency table: %d\n", ret);
goto err_nomem1;
@@ -217,9 +215,6 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
per_cpu(cpu_data, i) = data;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- policy->cur = corenet_cpufreq_get_speed(policy->cpu);
-
- cpufreq_frequency_table_get_attr(table, cpu);
of_node_put(np);
return 0;
@@ -253,60 +248,25 @@ static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static int corenet_cpufreq_verify(struct cpufreq_policy *policy)
-{
- struct cpufreq_frequency_table *table =
- per_cpu(cpu_data, policy->cpu)->table;
-
- return cpufreq_frequency_table_verify(policy, table);
-}
-
static int corenet_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int index)
{
- struct cpufreq_freqs freqs;
- unsigned int new;
struct clk *parent;
- int ret;
struct cpu_data *data = per_cpu(cpu_data, policy->cpu);
- cpufreq_frequency_table_target(policy, data->table,
- target_freq, relation, &new);
-
- if (policy->cur == data->table[new].frequency)
- return 0;
-
- freqs.old = policy->cur;
- freqs.new = data->table[new].frequency;
-
- mutex_lock(&cpufreq_lock);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
- parent = of_clk_get(data->parent, data->table[new].driver_data);
- ret = clk_set_parent(data->clk, parent);
- if (ret)
- freqs.new = freqs.old;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- mutex_unlock(&cpufreq_lock);
-
- return ret;
+ parent = of_clk_get(data->parent, data->table[index].driver_data);
+ return clk_set_parent(data->clk, parent);
}
-static struct freq_attr *corenet_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
.name = "ppc_cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
.init = corenet_cpufreq_cpu_init,
.exit = __exit_p(corenet_cpufreq_cpu_exit),
- .verify = corenet_cpufreq_verify,
- .target = corenet_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = corenet_cpufreq_target,
.get = corenet_cpufreq_get_speed,
- .attr = corenet_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct of_device_id node_matches[] __initdata = {
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index 2e448f0bbdc5..e42ca9c31cea 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -30,9 +30,6 @@
#include "ppc_cbe_cpufreq.h"
-static DEFINE_MUTEX(cbe_switch_mutex);
-
-
/* the CBE supports an 8 step frequency scaling */
static struct cpufreq_frequency_table cbe_freqs[] = {
{1, 0},
@@ -123,63 +120,28 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
#endif
- cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
-
/* this ensures that policy->cpuinfo_min
* and policy->cpuinfo_max are set correctly */
- return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs);
-}
-
-static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
-static int cbe_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, cbe_freqs);
+ return cpufreq_table_validate_and_show(policy, cbe_freqs);
}
static int cbe_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int cbe_pmode_new)
{
- int rc;
- struct cpufreq_freqs freqs;
- unsigned int cbe_pmode_new;
-
- cpufreq_frequency_table_target(policy,
- cbe_freqs,
- target_freq,
- relation,
- &cbe_pmode_new);
-
- freqs.old = policy->cur;
- freqs.new = cbe_freqs[cbe_pmode_new].frequency;
-
- mutex_lock(&cbe_switch_mutex);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
pr_debug("setting frequency for cpu %d to %d kHz, " \
"1/%d of max frequency\n",
policy->cpu,
cbe_freqs[cbe_pmode_new].frequency,
cbe_freqs[cbe_pmode_new].driver_data);
- rc = set_pmode(policy->cpu, cbe_pmode_new);
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- mutex_unlock(&cbe_switch_mutex);
-
- return rc;
+ return set_pmode(policy->cpu, cbe_pmode_new);
}
static struct cpufreq_driver cbe_cpufreq_driver = {
- .verify = cbe_cpufreq_verify,
- .target = cbe_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = cbe_cpufreq_target,
.init = cbe_cpufreq_cpu_init,
- .exit = cbe_cpufreq_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "cbe-cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
};
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index 8749eaf18793..0a0f4369636a 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -262,36 +262,15 @@ static u32 mdrefr_dri(unsigned int freq)
return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32;
}
-/* find a valid frequency point */
-static int pxa_verify_policy(struct cpufreq_policy *policy)
-{
- struct cpufreq_frequency_table *pxa_freqs_table;
- pxa_freqs_t *pxa_freqs;
- int ret;
-
- find_freq_tables(&pxa_freqs_table, &pxa_freqs);
- ret = cpufreq_frequency_table_verify(policy, pxa_freqs_table);
-
- if (freq_debug)
- pr_debug("Verified CPU policy: %dKhz min to %dKhz max\n",
- policy->min, policy->max);
-
- return ret;
-}
-
static unsigned int pxa_cpufreq_get(unsigned int cpu)
{
return get_clk_frequency_khz(0);
}
-static int pxa_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
{
struct cpufreq_frequency_table *pxa_freqs_table;
pxa_freqs_t *pxa_freq_settings;
- struct cpufreq_freqs freqs;
- unsigned int idx;
unsigned long flags;
unsigned int new_freq_cpu, new_freq_mem;
unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg;
@@ -300,32 +279,19 @@ static int pxa_set_target(struct cpufreq_policy *policy,
/* Get the current policy */
find_freq_tables(&pxa_freqs_table, &pxa_freq_settings);
- /* Lookup the next frequency */
- if (cpufreq_frequency_table_target(policy, pxa_freqs_table,
- target_freq, relation, &idx)) {
- return -EINVAL;
- }
-
new_freq_cpu = pxa_freq_settings[idx].khz;
new_freq_mem = pxa_freq_settings[idx].membus;
- freqs.old = policy->cur;
- freqs.new = new_freq_cpu;
if (freq_debug)
pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
- freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
+ new_freq_cpu / 1000, (pxa_freq_settings[idx].div2) ?
(new_freq_mem / 2000) : (new_freq_mem / 1000));
- if (vcc_core && freqs.new > freqs.old)
+ if (vcc_core && new_freq_cpu > policy->cur) {
ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
- if (ret)
- return ret;
- /*
- * Tell everyone what we're about to do...
- * you should add a notify client with any platform specific
- * Vcc changing capability
- */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ if (ret)
+ return ret;
+ }
/* Calculate the next MDREFR. If we're slowing down the SDRAM clock
* we need to preset the smaller DRI before the change. If we're
@@ -376,13 +342,6 @@ static int pxa_set_target(struct cpufreq_policy *policy,
local_irq_restore(flags);
/*
- * Tell everyone what we've just done...
- * you should add a notify client with any platform specific
- * SDRAM refresh timer adjustments
- */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- /*
* Even if voltage setting fails, we don't report it, as the frequency
* change succeeded. The voltage reduction is not a critical failure,
* only power savings will suffer from this.
@@ -391,7 +350,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
* bug is triggered (seems a deadlock). Should anybody find out where,
* the "return 0" should become a "return ret".
*/
- if (vcc_core && freqs.new < freqs.old)
+ if (vcc_core && new_freq_cpu < policy->cur)
ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
return 0;
@@ -414,8 +373,6 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
/* set default policy and cpuinfo */
policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
- policy->cur = get_clk_frequency_khz(0); /* current freq */
- policy->min = policy->max = policy->cur;
/* Generate pxa25x the run cpufreq_frequency_table struct */
for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) {
@@ -453,10 +410,12 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
find_freq_tables(&pxa255_freq_table, &pxa255_freqs);
pr_info("PXA255 cpufreq using %s frequency table\n",
pxa255_turbo_table ? "turbo" : "run");
- cpufreq_frequency_table_cpuinfo(policy, pxa255_freq_table);
+
+ cpufreq_table_validate_and_show(policy, pxa255_freq_table);
+ }
+ else if (cpu_is_pxa27x()) {
+ cpufreq_table_validate_and_show(policy, pxa27x_freq_table);
}
- else if (cpu_is_pxa27x())
- cpufreq_frequency_table_cpuinfo(policy, pxa27x_freq_table);
printk(KERN_INFO "PXA CPU frequency change support initialized\n");
@@ -464,9 +423,10 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver pxa_cpufreq_driver = {
- .verify = pxa_verify_policy,
- .target = pxa_set_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = pxa_set_target,
.init = pxa_cpufreq_init,
+ .exit = cpufreq_generic_exit,
.get = pxa_cpufreq_get,
.name = "PXA2xx",
};
diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c
index d26306fb00d2..93840048dd11 100644
--- a/drivers/cpufreq/pxa3xx-cpufreq.c
+++ b/drivers/cpufreq/pxa3xx-cpufreq.c
@@ -108,7 +108,7 @@ static int setup_freqs_table(struct cpufreq_policy *policy,
pxa3xx_freqs_num = num;
pxa3xx_freqs_table = table;
- return cpufreq_frequency_table_cpuinfo(policy, table);
+ return cpufreq_table_validate_and_show(policy, table);
}
static void __update_core_freq(struct pxa3xx_freq_info *info)
@@ -150,54 +150,26 @@ static void __update_bus_freq(struct pxa3xx_freq_info *info)
cpu_relax();
}
-static int pxa3xx_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, pxa3xx_freqs_table);
-}
-
static unsigned int pxa3xx_cpufreq_get(unsigned int cpu)
{
return pxa3xx_get_clk_frequency_khz(0);
}
-static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, unsigned int index)
{
struct pxa3xx_freq_info *next;
- struct cpufreq_freqs freqs;
unsigned long flags;
- int idx;
if (policy->cpu != 0)
return -EINVAL;
- /* Lookup the next frequency */
- if (cpufreq_frequency_table_target(policy, pxa3xx_freqs_table,
- target_freq, relation, &idx))
- return -EINVAL;
-
- next = &pxa3xx_freqs[idx];
-
- freqs.old = policy->cur;
- freqs.new = next->cpufreq_mhz * 1000;
-
- pr_debug("CPU frequency from %d MHz to %d MHz%s\n",
- freqs.old / 1000, freqs.new / 1000,
- (freqs.old == freqs.new) ? " (skipped)" : "");
-
- if (freqs.old == target_freq)
- return 0;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ next = &pxa3xx_freqs[index];
local_irq_save(flags);
__update_core_freq(next);
__update_bus_freq(next);
local_irq_restore(flags);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
return 0;
}
@@ -206,11 +178,10 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
int ret = -EINVAL;
/* set default policy and cpuinfo */
- policy->cpuinfo.min_freq = 104000;
- policy->cpuinfo.max_freq = (cpu_is_pxa320()) ? 806000 : 624000;
+ policy->min = policy->cpuinfo.min_freq = 104000;
+ policy->max = policy->cpuinfo.max_freq =
+ (cpu_is_pxa320()) ? 806000 : 624000;
policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
- policy->max = pxa3xx_get_clk_frequency_khz(0);
- policy->cur = policy->min = policy->max;
if (cpu_is_pxa300() || cpu_is_pxa310())
ret = setup_freqs_table(policy, pxa300_freqs,
@@ -230,9 +201,10 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver pxa3xx_cpufreq_driver = {
- .verify = pxa3xx_cpufreq_verify,
- .target = pxa3xx_cpufreq_set,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = pxa3xx_cpufreq_set,
.init = pxa3xx_cpufreq_init,
+ .exit = cpufreq_generic_exit,
.get = pxa3xx_cpufreq_get,
.name = "pxa3xx-cpufreq",
};
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index 22dcb81ef9d0..8d904a00027b 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -87,16 +87,6 @@ static struct cpufreq_frequency_table s3c2450_freq_table[] = {
{ 0, CPUFREQ_TABLE_END },
};
-static int s3c2416_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
- struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
-
- if (policy->cpu != 0)
- return -EINVAL;
-
- return cpufreq_frequency_table_verify(policy, s3c_freq->freq_table);
-}
-
static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu)
{
struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
@@ -227,24 +217,15 @@ static int s3c2416_cpufreq_leave_dvs(struct s3c2416_data *s3c_freq, int idx)
}
static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int index)
{
struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
- struct cpufreq_freqs freqs;
+ unsigned int new_freq;
int idx, ret, to_dvs = 0;
- unsigned int i;
mutex_lock(&cpufreq_lock);
- pr_debug("cpufreq: to %dKHz, relation %d\n", target_freq, relation);
-
- ret = cpufreq_frequency_table_target(policy, s3c_freq->freq_table,
- target_freq, relation, &i);
- if (ret != 0)
- goto out;
-
- idx = s3c_freq->freq_table[i].driver_data;
+ idx = s3c_freq->freq_table[index].driver_data;
if (idx == SOURCE_HCLK)
to_dvs = 1;
@@ -256,24 +237,13 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
goto out;
}
- freqs.flags = 0;
- freqs.old = s3c_freq->is_dvs ? FREQ_DVS
- : clk_get_rate(s3c_freq->armclk) / 1000;
-
/* When leavin dvs mode, always switch the armdiv to the hclk rate
* The S3C2416 has stability issues when switching directly to
* higher frequencies.
*/
- freqs.new = (s3c_freq->is_dvs && !to_dvs)
+ new_freq = (s3c_freq->is_dvs && !to_dvs)
? clk_get_rate(s3c_freq->hclk) / 1000
- : s3c_freq->freq_table[i].frequency;
-
- pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new);
-
- if (!to_dvs && freqs.old == freqs.new)
- goto out;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ : s3c_freq->freq_table[index].frequency;
if (to_dvs) {
pr_debug("cpufreq: enter dvs\n");
@@ -282,12 +252,10 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
pr_debug("cpufreq: leave dvs\n");
ret = s3c2416_cpufreq_leave_dvs(s3c_freq, idx);
} else {
- pr_debug("cpufreq: change armdiv to %dkHz\n", freqs.new);
- ret = s3c2416_cpufreq_set_armdiv(s3c_freq, freqs.new);
+ pr_debug("cpufreq: change armdiv to %dkHz\n", new_freq);
+ ret = s3c2416_cpufreq_set_armdiv(s3c_freq, new_freq);
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
out:
mutex_unlock(&cpufreq_lock);
@@ -486,20 +454,14 @@ static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
freq++;
}
- policy->cur = clk_get_rate(s3c_freq->armclk) / 1000;
-
/* Datasheet says PLL stabalisation time must be at least 300us,
* so but add some fudge. (reference in LOCKCON0 register description)
*/
- policy->cpuinfo.transition_latency = (500 * 1000) +
- s3c_freq->regulator_latency;
-
- ret = cpufreq_frequency_table_cpuinfo(policy, s3c_freq->freq_table);
+ ret = cpufreq_generic_init(policy, s3c_freq->freq_table,
+ (500 * 1000) + s3c_freq->regulator_latency);
if (ret)
goto err_freq_table;
- cpufreq_frequency_table_get_attr(s3c_freq->freq_table, 0);
-
register_reboot_notifier(&s3c2416_cpufreq_reboot_notifier);
return 0;
@@ -518,19 +480,14 @@ err_hclk:
return ret;
}
-static struct freq_attr *s3c2416_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver s3c2416_cpufreq_driver = {
.flags = 0,
- .verify = s3c2416_cpufreq_verify_speed,
- .target = s3c2416_cpufreq_set_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = s3c2416_cpufreq_set_target,
.get = s3c2416_cpufreq_get_speed,
.init = s3c2416_cpufreq_driver_init,
.name = "s3c2416",
- .attr = s3c2416_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init s3c2416_cpufreq_init(void)
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index b0f343fcb7ee..485088253358 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -373,23 +373,7 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
static int s3c_cpufreq_init(struct cpufreq_policy *policy)
{
- printk(KERN_INFO "%s: initialising policy %p\n", __func__, policy);
-
- if (policy->cpu != 0)
- return -EINVAL;
-
- policy->cur = s3c_cpufreq_get(0);
- policy->min = policy->cpuinfo.min_freq = 0;
- policy->max = policy->cpuinfo.max_freq = cpu_cur.info->max.fclk / 1000;
- policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
-
- /* feed the latency information from the cpu driver */
- policy->cpuinfo.transition_latency = cpu_cur.info->latency;
-
- if (ftab)
- cpufreq_frequency_table_cpuinfo(policy, ftab);
-
- return 0;
+ return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
}
static int __init s3c_cpufreq_initclks(void)
@@ -416,14 +400,6 @@ static int __init s3c_cpufreq_initclks(void)
return 0;
}
-static int s3c_cpufreq_verify(struct cpufreq_policy *policy)
-{
- if (policy->cpu != 0)
- return -EINVAL;
-
- return 0;
-}
-
#ifdef CONFIG_PM
static struct cpufreq_frequency_table suspend_pll;
static unsigned int suspend_freq;
@@ -473,7 +449,6 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
static struct cpufreq_driver s3c24xx_driver = {
.flags = CPUFREQ_STICKY,
- .verify = s3c_cpufreq_verify,
.target = s3c_cpufreq_target,
.get = s3c_cpufreq_get,
.init = s3c_cpufreq_init,
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 15631f92ab7d..67e302eeefec 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -54,14 +54,6 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
};
#endif
-static int s3c64xx_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
- if (policy->cpu != 0)
- return -EINVAL;
-
- return cpufreq_frequency_table_verify(policy, s3c64xx_freq_table);
-}
-
static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
{
if (cpu != 0)
@@ -71,66 +63,48 @@ static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
}
static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int index)
{
- int ret;
- unsigned int i;
- struct cpufreq_freqs freqs;
struct s3c64xx_dvfs *dvfs;
+ unsigned int old_freq, new_freq;
+ int ret;
- ret = cpufreq_frequency_table_target(policy, s3c64xx_freq_table,
- target_freq, relation, &i);
- if (ret != 0)
- return ret;
-
- freqs.old = clk_get_rate(armclk) / 1000;
- freqs.new = s3c64xx_freq_table[i].frequency;
- freqs.flags = 0;
- dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].driver_data];
-
- if (freqs.old == freqs.new)
- return 0;
-
- pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new);
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ old_freq = clk_get_rate(armclk) / 1000;
+ new_freq = s3c64xx_freq_table[index].frequency;
+ dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data];
#ifdef CONFIG_REGULATOR
- if (vddarm && freqs.new > freqs.old) {
+ if (vddarm && new_freq > old_freq) {
ret = regulator_set_voltage(vddarm,
dvfs->vddarm_min,
dvfs->vddarm_max);
if (ret != 0) {
pr_err("Failed to set VDDARM for %dkHz: %d\n",
- freqs.new, ret);
- freqs.new = freqs.old;
- goto post_notify;
+ new_freq, ret);
+ return ret;
}
}
#endif
- ret = clk_set_rate(armclk, freqs.new * 1000);
+ ret = clk_set_rate(armclk, new_freq * 1000);
if (ret < 0) {
pr_err("Failed to set rate %dkHz: %d\n",
- freqs.new, ret);
- freqs.new = freqs.old;
+ new_freq, ret);
+ return ret;
}
-post_notify:
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- if (ret)
- goto err;
-
#ifdef CONFIG_REGULATOR
- if (vddarm && freqs.new < freqs.old) {
+ if (vddarm && new_freq < old_freq) {
ret = regulator_set_voltage(vddarm,
dvfs->vddarm_min,
dvfs->vddarm_max);
if (ret != 0) {
pr_err("Failed to set VDDARM for %dkHz: %d\n",
- freqs.new, ret);
- goto err_clk;
+ new_freq, ret);
+ if (clk_set_rate(armclk, old_freq * 1000) < 0)
+ pr_err("Failed to restore original clock rate\n");
+
+ return ret;
}
}
#endif
@@ -139,14 +113,6 @@ post_notify:
clk_get_rate(armclk) / 1000);
return 0;
-
-err_clk:
- if (clk_set_rate(armclk, freqs.old * 1000) < 0)
- pr_err("Failed to restore original clock rate\n");
-err:
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- return ret;
}
#ifdef CONFIG_REGULATOR
@@ -243,15 +209,12 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
freq++;
}
- policy->cur = clk_get_rate(armclk) / 1000;
-
/* Datasheet says PLL stabalisation time (if we were to use
* the PLLs, which we don't currently) is ~300us worst case,
* but add some fudge.
*/
- policy->cpuinfo.transition_latency = (500 * 1000) + regulator_latency;
-
- ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table);
+ ret = cpufreq_generic_init(policy, s3c64xx_freq_table,
+ (500 * 1000) + regulator_latency);
if (ret != 0) {
pr_err("Failed to configure frequency table: %d\n",
ret);
@@ -264,8 +227,8 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
static struct cpufreq_driver s3c64xx_cpufreq_driver = {
.flags = 0,
- .verify = s3c64xx_cpufreq_verify_speed,
- .target = s3c64xx_cpufreq_set_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = s3c64xx_cpufreq_set_target,
.get = s3c64xx_cpufreq_get_speed,
.init = s3c64xx_cpufreq_driver_init,
.name = "s3c",
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 5c7757073793..e3973dae28a7 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -26,7 +26,6 @@
static struct clk *cpu_clk;
static struct clk *dmc0_clk;
static struct clk *dmc1_clk;
-static struct cpufreq_freqs freqs;
static DEFINE_MUTEX(set_freq_lock);
/* APLL M,P,S values for 1G/800Mhz */
@@ -36,16 +35,7 @@ static DEFINE_MUTEX(set_freq_lock);
/* Use 800MHz when entering sleep mode */
#define SLEEP_FREQ (800 * 1000)
-/*
- * relation has an additional symantics other than the standard of cpufreq
- * DISALBE_FURTHER_CPUFREQ: disable further access to target
- * ENABLE_FURTUER_CPUFREQ: enable access to target
- */
-enum cpufreq_access {
- DISABLE_FURTHER_CPUFREQ = 0x10,
- ENABLE_FURTHER_CPUFREQ = 0x20,
-};
-
+/* Tracks if cpu freqency can be updated anymore */
static bool no_cpufreq_access;
/*
@@ -174,14 +164,6 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
__raw_writel(tmp1, reg);
}
-static int s5pv210_verify_speed(struct cpufreq_policy *policy)
-{
- if (policy->cpu)
- return -EINVAL;
-
- return cpufreq_frequency_table_verify(policy, s5pv210_freq_table);
-}
-
static unsigned int s5pv210_getspeed(unsigned int cpu)
{
if (cpu)
@@ -190,22 +172,18 @@ static unsigned int s5pv210_getspeed(unsigned int cpu)
return clk_get_rate(cpu_clk) / 1000;
}
-static int s5pv210_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned long reg;
- unsigned int index, priv_index;
+ unsigned int priv_index;
unsigned int pll_changing = 0;
unsigned int bus_speed_changing = 0;
+ unsigned int old_freq, new_freq;
int arm_volt, int_volt;
int ret = 0;
mutex_lock(&set_freq_lock);
- if (relation & ENABLE_FURTHER_CPUFREQ)
- no_cpufreq_access = false;
-
if (no_cpufreq_access) {
#ifdef CONFIG_PM_VERBOSE
pr_err("%s:%d denied access to %s as it is disabled"
@@ -215,27 +193,13 @@ static int s5pv210_target(struct cpufreq_policy *policy,
goto exit;
}
- if (relation & DISABLE_FURTHER_CPUFREQ)
- no_cpufreq_access = true;
-
- relation &= ~(ENABLE_FURTHER_CPUFREQ | DISABLE_FURTHER_CPUFREQ);
-
- freqs.old = s5pv210_getspeed(0);
-
- if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
- target_freq, relation, &index)) {
- ret = -EINVAL;
- goto exit;
- }
-
- freqs.new = s5pv210_freq_table[index].frequency;
-
- if (freqs.new == freqs.old)
- goto exit;
+ old_freq = s5pv210_getspeed(0);
+ new_freq = s5pv210_freq_table[index].frequency;
/* Finding current running level index */
if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
- freqs.old, relation, &priv_index)) {
+ old_freq, CPUFREQ_RELATION_H,
+ &priv_index)) {
ret = -EINVAL;
goto exit;
}
@@ -243,7 +207,7 @@ static int s5pv210_target(struct cpufreq_policy *policy,
arm_volt = dvs_conf[index].arm_volt;
int_volt = dvs_conf[index].int_volt;
- if (freqs.new > freqs.old) {
+ if (new_freq > old_freq) {
ret = regulator_set_voltage(arm_regulator,
arm_volt, arm_volt_max);
if (ret)
@@ -255,8 +219,6 @@ static int s5pv210_target(struct cpufreq_policy *policy,
goto exit;
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
/* Check if there need to change PLL */
if ((index == L0) || (priv_index == L0))
pll_changing = 1;
@@ -467,9 +429,7 @@ static int s5pv210_target(struct cpufreq_policy *policy,
}
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- if (freqs.new < freqs.old) {
+ if (new_freq < old_freq) {
regulator_set_voltage(int_regulator,
int_volt, int_volt_max);
@@ -551,13 +511,7 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
- policy->cur = policy->min = policy->max = s5pv210_getspeed(0);
-
- cpufreq_frequency_table_get_attr(s5pv210_freq_table, policy->cpu);
-
- policy->cpuinfo.transition_latency = 40000;
-
- return cpufreq_frequency_table_cpuinfo(policy, s5pv210_freq_table);
+ return cpufreq_generic_init(policy, s5pv210_freq_table, 40000);
out_dmc1:
clk_put(dmc0_clk);
@@ -573,16 +527,18 @@ static int s5pv210_cpufreq_notifier_event(struct notifier_block *this,
switch (event) {
case PM_SUSPEND_PREPARE:
- ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
- DISABLE_FURTHER_CPUFREQ);
+ ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
if (ret < 0)
return NOTIFY_BAD;
+ /* Disable updation of cpu frequency */
+ no_cpufreq_access = true;
return NOTIFY_OK;
case PM_POST_RESTORE:
case PM_POST_SUSPEND:
- cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
- ENABLE_FURTHER_CPUFREQ);
+ /* Enable updation of cpu frequency */
+ no_cpufreq_access = false;
+ cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
return NOTIFY_OK;
}
@@ -595,18 +551,18 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
{
int ret;
- ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
- DISABLE_FURTHER_CPUFREQ);
+ ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
if (ret < 0)
return NOTIFY_BAD;
+ no_cpufreq_access = true;
return NOTIFY_DONE;
}
static struct cpufreq_driver s5pv210_driver = {
.flags = CPUFREQ_STICKY,
- .verify = s5pv210_verify_speed,
- .target = s5pv210_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = s5pv210_target,
.get = s5pv210_getspeed,
.init = s5pv210_cpu_init,
.name = "s5pv210",
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c
index cff18e87ca58..623da742f8e7 100644
--- a/drivers/cpufreq/sa1100-cpufreq.c
+++ b/drivers/cpufreq/sa1100-cpufreq.c
@@ -177,60 +177,33 @@ static void sa1100_update_dram_timings(int current_speed, int new_speed)
}
}
-static int sa1100_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr)
{
unsigned int cur = sa11x0_getspeed(0);
- unsigned int new_ppcr;
- struct cpufreq_freqs freqs;
-
- new_ppcr = sa11x0_freq_to_ppcr(target_freq);
- switch (relation) {
- case CPUFREQ_RELATION_L:
- if (sa11x0_ppcr_to_freq(new_ppcr) > policy->max)
- new_ppcr--;
- break;
- case CPUFREQ_RELATION_H:
- if ((sa11x0_ppcr_to_freq(new_ppcr) > target_freq) &&
- (sa11x0_ppcr_to_freq(new_ppcr - 1) >= policy->min))
- new_ppcr--;
- break;
- }
-
- freqs.old = cur;
- freqs.new = sa11x0_ppcr_to_freq(new_ppcr);
+ unsigned int new_freq;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ new_freq = sa11x0_freq_table[ppcr].frequency;
- if (freqs.new > cur)
- sa1100_update_dram_timings(cur, freqs.new);
+ if (new_freq > cur)
+ sa1100_update_dram_timings(cur, new_freq);
- PPCR = new_ppcr;
+ PPCR = ppcr;
- if (freqs.new < cur)
- sa1100_update_dram_timings(cur, freqs.new);
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ if (new_freq < cur)
+ sa1100_update_dram_timings(cur, new_freq);
return 0;
}
static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
{
- if (policy->cpu != 0)
- return -EINVAL;
- policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
- policy->cpuinfo.min_freq = 59000;
- policy->cpuinfo.max_freq = 287000;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- return 0;
+ return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL);
}
static struct cpufreq_driver sa1100_driver __refdata = {
.flags = CPUFREQ_STICKY,
- .verify = sa11x0_verify_speed,
- .target = sa1100_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = sa1100_target,
.get = sa11x0_getspeed,
.init = sa1100_cpu_init,
.name = "sa1100",
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index 39c90b6f4286..2c2b2e601d13 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -229,36 +229,14 @@ sdram_update_refresh(u_int cpu_khz, struct sdram_params *sdram)
/*
* Ok, set the CPU frequency.
*/
-static int sa1110_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr)
{
struct sdram_params *sdram = &sdram_params;
- struct cpufreq_freqs freqs;
struct sdram_info sd;
unsigned long flags;
- unsigned int ppcr, unused;
-
- switch (relation) {
- case CPUFREQ_RELATION_L:
- ppcr = sa11x0_freq_to_ppcr(target_freq);
- if (sa11x0_ppcr_to_freq(ppcr) > policy->max)
- ppcr--;
- break;
- case CPUFREQ_RELATION_H:
- ppcr = sa11x0_freq_to_ppcr(target_freq);
- if (ppcr && (sa11x0_ppcr_to_freq(ppcr) > target_freq) &&
- (sa11x0_ppcr_to_freq(ppcr-1) >= policy->min))
- ppcr--;
- break;
- default:
- return -EINVAL;
- }
-
- freqs.old = sa11x0_getspeed(0);
- freqs.new = sa11x0_ppcr_to_freq(ppcr);
+ unsigned int unused;
- sdram_calculate_timing(&sd, freqs.new, sdram);
+ sdram_calculate_timing(&sd, sa11x0_freq_table[ppcr].frequency, sdram);
#if 0
/*
@@ -277,8 +255,6 @@ static int sa1110_target(struct cpufreq_policy *policy,
sd.mdcas[2] = 0xaaaaaaaa;
#endif
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
/*
* The clock could be going away for some time. Set the SDRAMs
* to refresh rapidly (every 64 memory clock cycles). To get
@@ -323,30 +299,22 @@ static int sa1110_target(struct cpufreq_policy *policy,
/*
* Now, return the SDRAM refresh back to normal.
*/
- sdram_update_refresh(freqs.new, sdram);
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ sdram_update_refresh(sa11x0_freq_table[ppcr].frequency, sdram);
return 0;
}
static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
{
- if (policy->cpu != 0)
- return -EINVAL;
- policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
- policy->cpuinfo.min_freq = 59000;
- policy->cpuinfo.max_freq = 287000;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- return 0;
+ return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL);
}
/* sa1110_driver needs __refdata because it must remain after init registers
* it with cpufreq_register_driver() */
static struct cpufreq_driver sa1110_driver __refdata = {
.flags = CPUFREQ_STICKY,
- .verify = sa11x0_verify_speed,
- .target = sa1110_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = sa1110_target,
.get = sa11x0_getspeed,
.init = sa1110_cpu_init,
.name = "sa1110",
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index d6f6c6f4efa7..6adb354e359c 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -53,21 +53,11 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
}
}
-static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy,
- unsigned int state)
+static int sc520_freq_target(struct cpufreq_policy *policy, unsigned int state)
{
- struct cpufreq_freqs freqs;
u8 clockspeed_reg;
- freqs.old = sc520_freq_get_cpu_frequency(0);
- freqs.new = sc520_freq_table[state].frequency;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
- pr_debug("attempting to set frequency to %i kHz\n",
- sc520_freq_table[state].frequency);
-
local_irq_disable();
clockspeed_reg = *cpuctl & ~0x03;
@@ -75,30 +65,9 @@ static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy,
local_irq_enable();
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-};
-
-static int sc520_freq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &sc520_freq_table[0]);
-}
-
-static int sc520_freq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int newstate = 0;
-
- if (cpufreq_frequency_table_target(policy, sc520_freq_table,
- target_freq, relation, &newstate))
- return -EINVAL;
-
- sc520_freq_set_cpu_state(policy, newstate);
-
return 0;
}
-
/*
* Module init and exit code
*/
@@ -106,7 +75,6 @@ static int sc520_freq_target(struct cpufreq_policy *policy,
static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *c = &cpu_data(0);
- int result;
/* capability check */
if (c->x86_vendor != X86_VENDOR_AMD ||
@@ -115,39 +83,19 @@ static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = 1000000; /* 1ms */
- policy->cur = sc520_freq_get_cpu_frequency(0);
-
- result = cpufreq_frequency_table_cpuinfo(policy, sc520_freq_table);
- if (result)
- return result;
-
- cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu);
-
- return 0;
-}
-
-static int sc520_freq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_table_validate_and_show(policy, sc520_freq_table);
}
-static struct freq_attr *sc520_freq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-
static struct cpufreq_driver sc520_freq_driver = {
.get = sc520_freq_get_cpu_frequency,
- .verify = sc520_freq_verify,
- .target = sc520_freq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = sc520_freq_target,
.init = sc520_freq_cpu_init,
- .exit = sc520_freq_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "sc520_freq",
- .attr = sc520_freq_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id sc520_ids[] = {
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index ffc6d24b0cfb..387af12503a6 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -87,15 +87,12 @@ static int sh_cpufreq_verify(struct cpufreq_policy *policy)
if (freq_table)
return cpufreq_frequency_table_verify(policy, freq_table);
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000;
policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
-
+ cpufreq_verify_within_cpu_limits(policy);
return 0;
}
@@ -114,15 +111,13 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
return PTR_ERR(cpuclk);
}
- policy->cur = sh_cpufreq_get(cpu);
-
freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
if (freq_table) {
int result;
- result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (!result)
- cpufreq_frequency_table_get_attr(freq_table, cpu);
+ result = cpufreq_table_validate_and_show(policy, freq_table);
+ if (result)
+ return result;
} else {
dev_notice(dev, "no frequency table found, falling back "
"to rate rounding.\n");
@@ -154,11 +149,6 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static struct freq_attr *sh_freq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver sh_cpufreq_driver = {
.name = "sh",
.get = sh_cpufreq_get,
@@ -166,7 +156,7 @@ static struct cpufreq_driver sh_cpufreq_driver = {
.verify = sh_cpufreq_verify,
.init = sh_cpufreq_cpu_init,
.exit = sh_cpufreq_cpu_exit,
- .attr = sh_freq_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init sh_cpufreq_module_init(void)
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
index cf5bc2ca16fa..62aa23e219d4 100644
--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -245,14 +245,12 @@ static unsigned int us2e_freq_get(unsigned int cpu)
return clock_tick / estar_to_divisor(estar);
}
-static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy,
- unsigned int index)
+static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned int cpu = policy->cpu;
unsigned long new_bits, new_freq;
unsigned long clock_tick, divisor, old_divisor, estar;
cpumask_t cpus_allowed;
- struct cpufreq_freqs freqs;
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(cpu));
@@ -266,41 +264,15 @@ static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy,
old_divisor = estar_to_divisor(estar);
- freqs.old = clock_tick / old_divisor;
- freqs.new = new_freq;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
if (old_divisor != divisor)
us2e_transition(estar, new_bits, clock_tick * 1000,
old_divisor, divisor);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
set_cpus_allowed_ptr(current, &cpus_allowed);
-}
-
-static int us2e_freq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int new_index = 0;
-
- if (cpufreq_frequency_table_target(policy,
- &us2e_freq_table[policy->cpu].table[0],
- target_freq, relation, &new_index))
- return -EINVAL;
-
- us2e_set_cpu_divider_index(policy, new_index);
return 0;
}
-static int us2e_freq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy,
- &us2e_freq_table[policy->cpu].table[0]);
-}
-
static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
@@ -324,13 +296,15 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = 0;
policy->cur = clock_tick;
- return cpufreq_frequency_table_cpuinfo(policy, table);
+ return cpufreq_table_validate_and_show(policy, table);
}
static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
{
- if (cpufreq_us2e_driver)
- us2e_set_cpu_divider_index(policy, 0);
+ if (cpufreq_us2e_driver) {
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ us2e_freq_target(policy, 0);
+ }
return 0;
}
@@ -361,8 +335,8 @@ static int __init us2e_freq_init(void)
goto err_out;
driver->init = us2e_freq_cpu_init;
- driver->verify = us2e_freq_verify;
- driver->target = us2e_freq_target;
+ driver->verify = cpufreq_generic_frequency_table_verify;
+ driver->target_index = us2e_freq_target;
driver->get = us2e_freq_get;
driver->exit = us2e_freq_cpu_exit;
strcpy(driver->name, "UltraSPARC-IIe");
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
index ac76b489979d..724ffbd7105d 100644
--- a/drivers/cpufreq/sparc-us3-cpufreq.c
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -93,13 +93,11 @@ static unsigned int us3_freq_get(unsigned int cpu)
return ret;
}
-static void us3_set_cpu_divider_index(struct cpufreq_policy *policy,
- unsigned int index)
+static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned int cpu = policy->cpu;
unsigned long new_bits, new_freq, reg;
cpumask_t cpus_allowed;
- struct cpufreq_freqs freqs;
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(cpu));
@@ -125,43 +123,15 @@ static void us3_set_cpu_divider_index(struct cpufreq_policy *policy,
reg = read_safari_cfg();
- freqs.old = get_current_freq(cpu, reg);
- freqs.new = new_freq;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
reg &= ~SAFARI_CFG_DIV_MASK;
reg |= new_bits;
write_safari_cfg(reg);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
set_cpus_allowed_ptr(current, &cpus_allowed);
-}
-
-static int us3_freq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int new_index = 0;
-
- if (cpufreq_frequency_table_target(policy,
- &us3_freq_table[policy->cpu].table[0],
- target_freq,
- relation,
- &new_index))
- return -EINVAL;
-
- us3_set_cpu_divider_index(policy, new_index);
return 0;
}
-static int us3_freq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy,
- &us3_freq_table[policy->cpu].table[0]);
-}
-
static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
@@ -181,13 +151,15 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = 0;
policy->cur = clock_tick;
- return cpufreq_frequency_table_cpuinfo(policy, table);
+ return cpufreq_table_validate_and_show(policy, table);
}
static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
{
- if (cpufreq_us3_driver)
- us3_set_cpu_divider_index(policy, 0);
+ if (cpufreq_us3_driver) {
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ us3_freq_target(policy, 0);
+ }
return 0;
}
@@ -222,8 +194,8 @@ static int __init us3_freq_init(void)
goto err_out;
driver->init = us3_freq_cpu_init;
- driver->verify = us3_freq_verify;
- driver->target = us3_freq_target;
+ driver->verify = cpufreq_generic_frequency_table_verify;
+ driver->target_index = us3_freq_target;
driver->get = us3_freq_get;
driver->exit = us3_freq_cpu_exit;
strcpy(driver->name, "UltraSPARC-III");
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 3f418166ce02..d02ccd19c9c4 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -30,11 +30,6 @@ static struct {
u32 cnt;
} spear_cpufreq;
-static int spear_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl);
-}
-
static unsigned int spear_cpufreq_get(unsigned int cpu)
{
return clk_get_rate(spear_cpufreq.clk) / 1000;
@@ -110,20 +105,14 @@ static int spear1340_set_cpu_rate(struct clk *sys_pclk, unsigned long newfreq)
}
static int spear_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int index)
{
- struct cpufreq_freqs freqs;
long newfreq;
struct clk *srcclk;
- int index, ret, mult = 1;
-
- if (cpufreq_frequency_table_target(policy, spear_cpufreq.freq_tbl,
- target_freq, relation, &index))
- return -EINVAL;
-
- freqs.old = spear_cpufreq_get(0);
+ int ret, mult = 1;
newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000;
+
if (of_machine_is_compatible("st,spear1340")) {
/*
* SPEAr1340 is special in the sense that due to the possibility
@@ -154,65 +143,32 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
return newfreq;
}
- freqs.new = newfreq / 1000;
- freqs.new /= mult;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
if (mult == 2)
ret = spear1340_set_cpu_rate(srcclk, newfreq);
else
ret = clk_set_rate(spear_cpufreq.clk, newfreq);
- /* Get current rate after clk_set_rate, in case of failure */
- if (ret) {
+ if (ret)
pr_err("CPU Freq: cpu clk_set_rate failed: %d\n", ret);
- freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000;
- }
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return ret;
}
static int spear_cpufreq_init(struct cpufreq_policy *policy)
{
- int ret;
-
- ret = cpufreq_frequency_table_cpuinfo(policy, spear_cpufreq.freq_tbl);
- if (ret) {
- pr_err("cpufreq_frequency_table_cpuinfo() failed");
- return ret;
- }
-
- cpufreq_frequency_table_get_attr(spear_cpufreq.freq_tbl, policy->cpu);
- policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency;
- policy->cur = spear_cpufreq_get(0);
-
- cpumask_setall(policy->cpus);
-
- return 0;
-}
-
-static int spear_cpufreq_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_generic_init(policy, spear_cpufreq.freq_tbl,
+ spear_cpufreq.transition_latency);
}
-static struct freq_attr *spear_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver spear_cpufreq_driver = {
.name = "cpufreq-spear",
.flags = CPUFREQ_STICKY,
- .verify = spear_cpufreq_verify,
- .target = spear_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = spear_cpufreq_target,
.get = spear_cpufreq_get,
.init = spear_cpufreq_init,
- .exit = spear_cpufreq_exit,
- .attr = spear_cpufreq_attr,
+ .exit = cpufreq_generic_exit,
+ .attr = cpufreq_generic_attr,
};
static int spear_cpufreq_driver_init(void)
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index f897d5105842..4e1daca5ce3b 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -343,9 +343,7 @@ static unsigned int get_cur_freq(unsigned int cpu)
static int centrino_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
- unsigned freq;
unsigned l, h;
- int ret;
int i;
/* Only Intel makes Enhanced Speedstep-capable CPUs */
@@ -373,9 +371,8 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
}
- if (centrino_cpu_init_table(policy)) {
+ if (centrino_cpu_init_table(policy))
return -ENODEV;
- }
/* Check to see if Enhanced SpeedStep is enabled, and try to
enable it if not. */
@@ -395,22 +392,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
}
}
- freq = get_cur_freq(policy->cpu);
policy->cpuinfo.transition_latency = 10000;
/* 10uS transition latency */
- policy->cur = freq;
-
- pr_debug("centrino_cpu_init: cur=%dkHz\n", policy->cur);
- ret = cpufreq_frequency_table_cpuinfo(policy,
+ return cpufreq_table_validate_and_show(policy,
per_cpu(centrino_model, policy->cpu)->op_points);
- if (ret)
- return (ret);
-
- cpufreq_frequency_table_get_attr(
- per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu);
-
- return 0;
}
static int centrino_cpu_exit(struct cpufreq_policy *policy)
@@ -428,36 +414,18 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
}
/**
- * centrino_verify - verifies a new CPUFreq policy
- * @policy: new policy
- *
- * Limit must be within this model's frequency range at least one
- * border included.
- */
-static int centrino_verify (struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy,
- per_cpu(centrino_model, policy->cpu)->op_points);
-}
-
-/**
* centrino_setpolicy - set a new CPUFreq policy
* @policy: new policy
- * @target_freq: the target frequency
- * @relation: how that frequency relates to achieved frequency
- * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
+ * @index: index of target frequency
*
* Sets a new CPUFreq policy.
*/
-static int centrino_target (struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int centrino_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int newstate = 0;
unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
- struct cpufreq_freqs freqs;
int retval = 0;
- unsigned int j, first_cpu, tmp;
+ unsigned int j, first_cpu;
+ struct cpufreq_frequency_table *op_points;
cpumask_var_t covered_cpus;
if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)))
@@ -468,16 +436,8 @@ static int centrino_target (struct cpufreq_policy *policy,
goto out;
}
- if (unlikely(cpufreq_frequency_table_target(policy,
- per_cpu(centrino_model, cpu)->op_points,
- target_freq,
- relation,
- &newstate))) {
- retval = -EINVAL;
- goto out;
- }
-
first_cpu = 1;
+ op_points = &per_cpu(centrino_model, cpu)->op_points[index];
for_each_cpu(j, policy->cpus) {
int good_cpu;
@@ -501,7 +461,7 @@ static int centrino_target (struct cpufreq_policy *policy,
break;
}
- msr = per_cpu(centrino_model, cpu)->op_points[newstate].driver_data;
+ msr = op_points->driver_data;
if (first_cpu) {
rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h);
@@ -512,15 +472,6 @@ static int centrino_target (struct cpufreq_policy *policy,
goto out;
}
- freqs.old = extract_clock(oldmsr, cpu, 0);
- freqs.new = extract_clock(msr, cpu, 0);
-
- pr_debug("target=%dkHz old=%d new=%d msr=%04x\n",
- target_freq, freqs.old, freqs.new, msr);
-
- cpufreq_notify_transition(policy, &freqs,
- CPUFREQ_PRECHANGE);
-
first_cpu = 0;
/* all but 16 LSB are reserved, treat them with care */
oldmsr &= ~0xffff;
@@ -535,8 +486,6 @@ static int centrino_target (struct cpufreq_policy *policy,
cpumask_set_cpu(j, covered_cpus);
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
if (unlikely(retval)) {
/*
* We have failed halfway through the frequency change.
@@ -547,12 +496,6 @@ static int centrino_target (struct cpufreq_policy *policy,
for_each_cpu(j, covered_cpus)
wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h);
-
- tmp = freqs.new;
- freqs.new = freqs.old;
- freqs.old = tmp;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
}
retval = 0;
@@ -561,20 +504,15 @@ out:
return retval;
}
-static struct freq_attr* centrino_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver centrino_driver = {
.name = "centrino", /* should be speedstep-centrino,
but there's a 16 char limit */
.init = centrino_cpu_init,
.exit = centrino_cpu_exit,
- .verify = centrino_verify,
- .target = centrino_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = centrino_target,
.get = get_cur_freq,
- .attr = centrino_attr,
+ .attr = cpufreq_generic_attr,
};
/*
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index 5355abb69afc..7639b2be2a90 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -251,56 +251,23 @@ static unsigned int speedstep_get(unsigned int cpu)
/**
* speedstep_target - set a new CPUFreq policy
* @policy: new policy
- * @target_freq: the target frequency
- * @relation: how that frequency relates to achieved frequency
- * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
+ * @index: index of target frequency
*
* Sets a new CPUFreq policy.
*/
-static int speedstep_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int speedstep_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int newstate = 0, policy_cpu;
- struct cpufreq_freqs freqs;
-
- if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0],
- target_freq, relation, &newstate))
- return -EINVAL;
+ unsigned int policy_cpu;
policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
- freqs.old = speedstep_get(policy_cpu);
- freqs.new = speedstep_freqs[newstate].frequency;
-
- pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new);
-
- /* no transition necessary */
- if (freqs.old == freqs.new)
- return 0;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
- smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate,
+ smp_call_function_single(policy_cpu, _speedstep_set_state, &index,
true);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
return 0;
}
-/**
- * speedstep_verify - verifies a new CPUFreq policy
- * @policy: new policy
- *
- * Limit must be within speedstep_low_freq and speedstep_high_freq, with
- * at least one border included.
- */
-static int speedstep_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
-}
-
struct get_freqs {
struct cpufreq_policy *policy;
int ret;
@@ -320,8 +287,7 @@ static void get_freqs_on_cpu(void *_get_freqs)
static int speedstep_cpu_init(struct cpufreq_policy *policy)
{
- int result;
- unsigned int policy_cpu, speed;
+ unsigned int policy_cpu;
struct get_freqs gf;
/* only run on CPU to be set, or on its sibling */
@@ -336,49 +302,18 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
if (gf.ret)
return gf.ret;
- /* get current speed setting */
- speed = speedstep_get(policy_cpu);
- if (!speed)
- return -EIO;
-
- pr_debug("currently at %s speed setting - %i MHz\n",
- (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency)
- ? "low" : "high",
- (speed / 1000));
-
- /* cpuinfo and default policy values */
- policy->cur = speed;
-
- result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
- if (result)
- return result;
-
- cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
-
- return 0;
+ return cpufreq_table_validate_and_show(policy, speedstep_freqs);
}
-static int speedstep_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
-static struct freq_attr *speedstep_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-
static struct cpufreq_driver speedstep_driver = {
.name = "speedstep-ich",
- .verify = speedstep_verify,
- .target = speedstep_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = speedstep_target,
.init = speedstep_cpu_init,
- .exit = speedstep_cpu_exit,
+ .exit = cpufreq_generic_exit,
.get = speedstep_get,
- .attr = speedstep_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id ss_smi_ids[] = {
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index abfba4f731eb..0f5326d6f79f 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -235,52 +235,21 @@ static void speedstep_set_state(unsigned int state)
/**
* speedstep_target - set a new CPUFreq policy
* @policy: new policy
- * @target_freq: new freq
- * @relation:
+ * @index: index of new freq
*
* Sets a new CPUFreq policy/freq.
*/
-static int speedstep_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+static int speedstep_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int newstate = 0;
- struct cpufreq_freqs freqs;
-
- if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0],
- target_freq, relation, &newstate))
- return -EINVAL;
-
- freqs.old = speedstep_freqs[speedstep_get_state()].frequency;
- freqs.new = speedstep_freqs[newstate].frequency;
-
- if (freqs.old == freqs.new)
- return 0;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- speedstep_set_state(newstate);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ speedstep_set_state(index);
return 0;
}
-/**
- * speedstep_verify - verifies a new CPUFreq policy
- * @policy: new policy
- *
- * Limit must be within speedstep_low_freq and speedstep_high_freq, with
- * at least one border included.
- */
-static int speedstep_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
-}
-
-
static int speedstep_cpu_init(struct cpufreq_policy *policy)
{
int result;
- unsigned int speed, state;
unsigned int *low, *high;
/* capability check */
@@ -316,32 +285,8 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
pr_debug("workaround worked.\n");
}
- /* get current speed setting */
- state = speedstep_get_state();
- speed = speedstep_freqs[state].frequency;
-
- pr_debug("currently at %s speed setting - %i MHz\n",
- (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency)
- ? "low" : "high",
- (speed / 1000));
-
- /* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- policy->cur = speed;
-
- result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
- if (result)
- return result;
-
- cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
-
- return 0;
-}
-
-static int speedstep_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_table_validate_and_show(policy, speedstep_freqs);
}
static unsigned int speedstep_get(unsigned int cpu)
@@ -362,20 +307,15 @@ static int speedstep_resume(struct cpufreq_policy *policy)
return result;
}
-static struct freq_attr *speedstep_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver speedstep_driver = {
.name = "speedstep-smi",
- .verify = speedstep_verify,
- .target = speedstep_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = speedstep_target,
.init = speedstep_cpu_init,
- .exit = speedstep_cpu_exit,
+ .exit = cpufreq_generic_exit,
.get = speedstep_get,
.resume = speedstep_resume,
- .attr = speedstep_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id ss_smi_ids[] = {
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c
index a7b876fdc1d8..f42df7ec03c5 100644
--- a/drivers/cpufreq/tegra-cpufreq.c
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -51,11 +51,6 @@ static unsigned long target_cpu_speed[NUM_CPUS];
static DEFINE_MUTEX(tegra_cpu_lock);
static bool is_suspended;
-static int tegra_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
static unsigned int tegra_getspeed(unsigned int cpu)
{
unsigned long rate;
@@ -107,12 +102,8 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
unsigned long rate)
{
int ret = 0;
- struct cpufreq_freqs freqs;
- freqs.old = tegra_getspeed(0);
- freqs.new = rate;
-
- if (freqs.old == freqs.new)
+ if (tegra_getspeed(0) == rate)
return ret;
/*
@@ -126,21 +117,10 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
else
clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
-#ifdef CONFIG_CPU_FREQ_DEBUG
- printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n",
- freqs.old, freqs.new);
-#endif
-
- ret = tegra_cpu_clk_set_rate(freqs.new * 1000);
- if (ret) {
- pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n",
- freqs.new);
- freqs.new = freqs.old;
- }
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ ret = tegra_cpu_clk_set_rate(rate * 1000);
+ if (ret)
+ pr_err("cpu-tegra: Failed to set cpu frequency to %lu kHz\n",
+ rate);
return ret;
}
@@ -155,11 +135,8 @@ static unsigned long tegra_cpu_highest_speed(void)
return rate;
}
-static int tegra_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int idx;
unsigned int freq;
int ret = 0;
@@ -170,10 +147,7 @@ static int tegra_target(struct cpufreq_policy *policy,
goto out;
}
- cpufreq_frequency_table_target(policy, freq_table, target_freq,
- relation, &idx);
-
- freq = freq_table[idx].frequency;
+ freq = freq_table[index].frequency;
target_cpu_speed[policy->cpu] = freq;
@@ -209,21 +183,23 @@ static struct notifier_block tegra_cpu_pm_notifier = {
static int tegra_cpu_init(struct cpufreq_policy *policy)
{
+ int ret;
+
if (policy->cpu >= NUM_CPUS)
return -EINVAL;
clk_prepare_enable(emc_clk);
clk_prepare_enable(cpu_clk);
- cpufreq_frequency_table_cpuinfo(policy, freq_table);
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
- policy->cur = tegra_getspeed(policy->cpu);
- target_cpu_speed[policy->cpu] = policy->cur;
+ target_cpu_speed[policy->cpu] = tegra_getspeed(policy->cpu);
/* FIXME: what's the actual transition time? */
- policy->cpuinfo.transition_latency = 300 * 1000;
-
- cpumask_copy(policy->cpus, cpu_possible_mask);
+ ret = cpufreq_generic_init(policy, freq_table, 300 * 1000);
+ if (ret) {
+ clk_disable_unprepare(cpu_clk);
+ clk_disable_unprepare(emc_clk);
+ return ret;
+ }
if (policy->cpu == 0)
register_pm_notifier(&tegra_cpu_pm_notifier);
@@ -233,24 +209,20 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
static int tegra_cpu_exit(struct cpufreq_policy *policy)
{
- cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ clk_disable_unprepare(cpu_clk);
clk_disable_unprepare(emc_clk);
return 0;
}
-static struct freq_attr *tegra_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver tegra_cpufreq_driver = {
- .verify = tegra_verify_speed,
- .target = tegra_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = tegra_target,
.get = tegra_getspeed,
.init = tegra_cpu_init,
.exit = tegra_cpu_exit,
.name = "tegra",
- .attr = tegra_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init tegra_cpufreq_init(void)
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
index b225f04d8ae5..653ae2955b55 100644
--- a/drivers/cpufreq/unicore2-cpufreq.c
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -29,9 +29,7 @@ static int ucv2_verify_speed(struct cpufreq_policy *policy)
if (policy->cpu)
return -EINVAL;
- cpufreq_verify_within_limits(policy,
- policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
-
+ cpufreq_verify_within_cpu_limits(policy);
return 0;
}
@@ -68,7 +66,6 @@ static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
{
if (policy->cpu != 0)
return -EINVAL;
- policy->cur = ucv2_getspeed(0);
policy->min = policy->cpuinfo.min_freq = 250000;
policy->max = policy->cpuinfo.max_freq = 1000000;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
new file mode 100644
index 000000000000..7f7c9c01b44e
--- /dev/null
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -0,0 +1,70 @@
+/*
+ * Versatile Express SPC CPUFreq Interface driver
+ *
+ * It provides necessary ops to arm_big_little cpufreq driver.
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/types.h>
+
+#include "arm_big_little.h"
+
+static int ve_spc_init_opp_table(struct device *cpu_dev)
+{
+ /*
+ * platform specific SPC code must initialise the opp table
+ * so just check if the OPP count is non-zero
+ */
+ return dev_pm_opp_get_opp_count(cpu_dev) <= 0;
+}
+
+static int ve_spc_get_transition_latency(struct device *cpu_dev)
+{
+ return 1000000; /* 1 ms */
+}
+
+static struct cpufreq_arm_bL_ops ve_spc_cpufreq_ops = {
+ .name = "vexpress-spc",
+ .get_transition_latency = ve_spc_get_transition_latency,
+ .init_opp_table = ve_spc_init_opp_table,
+};
+
+static int ve_spc_cpufreq_probe(struct platform_device *pdev)
+{
+ return bL_cpufreq_register(&ve_spc_cpufreq_ops);
+}
+
+static int ve_spc_cpufreq_remove(struct platform_device *pdev)
+{
+ bL_cpufreq_unregister(&ve_spc_cpufreq_ops);
+ return 0;
+}
+
+static struct platform_driver ve_spc_cpufreq_platdrv = {
+ .driver = {
+ .name = "vexpress-spc-cpufreq",
+ .owner = THIS_MODULE,
+ },
+ .probe = ve_spc_cpufreq_probe,
+ .remove = ve_spc_cpufreq_remove,
+};
+module_platform_driver(ve_spc_cpufreq_platdrv);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 8e3660322308..d988948a89a0 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -2,9 +2,20 @@
# ARM CPU Idle drivers
#
+config ARM_BIG_LITTLE_CPUIDLE
+ bool "Support for ARM big.LITTLE processors"
+ depends on ARCH_VEXPRESS_TC2_PM
+ select ARM_CPU_SUSPEND
+ select CPU_IDLE_MULTIPLE_DRIVERS
+ help
+ Select this option to enable CPU idle driver for big.LITTLE based
+ ARM systems. Driver manages CPUs coordination through MCPM and
+ define different C-states for little and big cores through the
+ multiple CPU idle drivers infrastructure.
+
config ARM_HIGHBANK_CPUIDLE
bool "CPU Idle Driver for Calxeda processors"
- depends on ARCH_HIGHBANK
+ depends on ARM_PSCI
select ARM_CPU_SUSPEND
help
Select this to enable cpuidle on Calxeda processors.
@@ -27,13 +38,9 @@ config ARM_U8500_CPUIDLE
help
Select this to enable cpuidle for ST-E u8500 processors
-config CPU_IDLE_BIG_LITTLE
- bool "Support for ARM big.LITTLE processors"
- depends on ARCH_VEXPRESS_TC2_PM
- select ARM_CPU_SUSPEND
- select CPU_IDLE_MULTIPLE_DRIVERS
+config ARM_AT91_CPUIDLE
+ bool "Cpu Idle Driver for the AT91 processors"
+ default y
+ depends on ARCH_AT91
help
- Select this option to enable CPU idle driver for big.LITTLE based
- ARM systems. Driver manages CPUs coordination through MCPM and
- define different C-states for little and big cores through the
- multiple CPU idle drivers infrastructure.
+ Select this to enable cpuidle for AT91 processors
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index cea5ef58876d..527be28e5c1e 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -7,8 +7,9 @@ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
##################################################################################
# ARM SoC drivers
+obj-$(CONFIG_ARM_BIG_LITTLE_CPUIDLE) += cpuidle-big_little.o
obj-$(CONFIG_ARM_HIGHBANK_CPUIDLE) += cpuidle-calxeda.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE) += cpuidle-kirkwood.o
obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o
obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o
-obj-$(CONFIG_CPU_IDLE_BIG_LITTLE) += cpuidle-big_little.o
+obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index f8a86364c6b6..e952936418d0 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -147,7 +147,7 @@ static cpumask_t cpuidle_coupled_poked;
* has returned from this function, the barrier is immediately available for
* reuse.
*
- * The atomic variable a must be initialized to 0 before any cpu calls
+ * The atomic variable must be initialized to 0 before any cpu calls
* this function, will be reset to 0 before any cpu returns from this function.
*
* Must only be called from within a coupled idle state handler
diff --git a/arch/arm/mach-at91/cpuidle.c b/drivers/cpuidle/cpuidle-at91.c
index 4ec6a6d9b9be..a0774370c6bc 100644
--- a/arch/arm/mach-at91/cpuidle.c
+++ b/drivers/cpuidle/cpuidle-at91.c
@@ -21,26 +21,17 @@
#include <linux/export.h>
#include <asm/proc-fns.h>
#include <asm/cpuidle.h>
-#include <mach/cpu.h>
-
-#include "pm.h"
#define AT91_MAX_STATES 2
+static void (*at91_standby)(void);
+
/* Actual code that puts the SoC in different idle states */
static int at91_enter_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- if (cpu_is_at91rm9200())
- at91rm9200_standby();
- else if (cpu_is_at91sam9g45())
- at91sam9g45_standby();
- else if (cpu_is_at91sam9263())
- at91sam9263_standby();
- else
- at91sam9_standby();
-
+ at91_standby();
return index;
}
@@ -60,9 +51,19 @@ static struct cpuidle_driver at91_idle_driver = {
};
/* Initialize CPU idle by registering the idle states */
-static int __init at91_init_cpuidle(void)
+static int at91_cpuidle_probe(struct platform_device *dev)
{
+ at91_standby = (void *)(dev->dev.platform_data);
+
return cpuidle_register(&at91_idle_driver, NULL);
}
-device_initcall(at91_init_cpuidle);
+static struct platform_driver at91_cpuidle_driver = {
+ .driver = {
+ .name = "cpuidle-at91",
+ .owner = THIS_MODULE,
+ },
+ .probe = at91_cpuidle_probe,
+};
+
+module_platform_driver(at91_cpuidle_driver);
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index 346058479572..36795639df0d 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -21,53 +21,30 @@
*/
#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/time.h>
-#include <linux/delay.h>
-#include <linux/suspend.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
#include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
-#include <asm/smp_scu.h>
#include <asm/suspend.h>
-#include <asm/cacheflush.h>
-#include <asm/cp15.h>
-
-extern void highbank_set_cpu_jump(int cpu, void *jump_addr);
-extern void __iomem *scu_base_addr;
-
-static noinline void calxeda_idle_restore(void)
-{
- set_cr(get_cr() | CR_C);
- set_auxcr(get_auxcr() | 0x40);
- scu_power_mode(scu_base_addr, SCU_PM_NORMAL);
-}
+#include <asm/psci.h>
static int calxeda_idle_finish(unsigned long val)
{
- /* Already flushed cache, but do it again as the outer cache functions
- * dirty the cache with spinlocks */
- flush_cache_all();
-
- set_auxcr(get_auxcr() & ~0x40);
- set_cr(get_cr() & ~CR_C);
-
- scu_power_mode(scu_base_addr, SCU_PM_DORMANT);
-
- cpu_do_idle();
-
- /* Restore things if we didn't enter power-gating */
- calxeda_idle_restore();
- return 1;
+ const struct psci_power_state ps = {
+ .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
+ };
+ return psci_ops.cpu_suspend(ps, __pa(cpu_resume));
}
static int calxeda_pwrdown_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- highbank_set_cpu_jump(smp_processor_id(), cpu_resume);
+ cpu_pm_enter();
cpu_suspend(0, calxeda_idle_finish);
+ cpu_pm_exit();
+
return index;
}
@@ -88,11 +65,17 @@ static struct cpuidle_driver calxeda_idle_driver = {
.state_count = 2,
};
-static int __init calxeda_cpuidle_init(void)
+static int __init calxeda_cpuidle_probe(struct platform_device *pdev)
{
- if (!of_machine_is_compatible("calxeda,highbank"))
- return -ENODEV;
-
return cpuidle_register(&calxeda_idle_driver, NULL);
}
-module_init(calxeda_cpuidle_init);
+
+static struct platform_driver calxeda_cpuidle_plat_driver = {
+ .driver = {
+ .name = "cpuidle-calxeda",
+ .owner = THIS_MODULE,
+ },
+ .probe = calxeda_cpuidle_probe,
+};
+
+module_platform_driver(calxeda_cpuidle_plat_driver);
diff --git a/drivers/cpuidle/cpuidle-ux500.c b/drivers/cpuidle/cpuidle-ux500.c
index e0564652af35..5e35804b1a95 100644
--- a/drivers/cpuidle/cpuidle-ux500.c
+++ b/drivers/cpuidle/cpuidle-ux500.c
@@ -111,7 +111,7 @@ static struct cpuidle_driver ux500_idle_driver = {
.state_count = 2,
};
-static int __init dbx500_cpuidle_probe(struct platform_device *pdev)
+static int dbx500_cpuidle_probe(struct platform_device *pdev)
{
/* Configure wake up reasons */
prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
diff --git a/drivers/cpuidle/cpuidle-zynq.c b/drivers/cpuidle/cpuidle-zynq.c
index 38e03a183591..aded75928028 100644
--- a/drivers/cpuidle/cpuidle-zynq.c
+++ b/drivers/cpuidle/cpuidle-zynq.c
@@ -28,7 +28,7 @@
#include <linux/init.h>
#include <linux/cpu_pm.h>
#include <linux/cpuidle.h>
-#include <linux/of.h>
+#include <linux/platform_device.h>
#include <asm/proc-fns.h>
#include <asm/cpuidle.h>
@@ -70,14 +70,19 @@ static struct cpuidle_driver zynq_idle_driver = {
};
/* Initialize CPU idle by registering the idle states */
-static int __init zynq_cpuidle_init(void)
+static int zynq_cpuidle_probe(struct platform_device *pdev)
{
- if (!of_machine_is_compatible("xlnx,zynq-7000"))
- return -ENODEV;
-
pr_info("Xilinx Zynq CpuIdle Driver started\n");
return cpuidle_register(&zynq_idle_driver, NULL);
}
-device_initcall(zynq_cpuidle_init);
+static struct platform_driver zynq_cpuidle_driver = {
+ .driver = {
+ .name = "cpuidle-zynq",
+ .owner = THIS_MODULE,
+ },
+ .probe = zynq_cpuidle_probe,
+};
+
+module_platform_driver(zynq_cpuidle_driver);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index d75040ddd2b3..2a991e468f78 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -118,11 +118,9 @@ int cpuidle_idle_call(void)
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
struct cpuidle_driver *drv;
int next_state, entered_state;
+ bool broadcast;
- if (off)
- return -ENODEV;
-
- if (!initialized)
+ if (off || !initialized)
return -ENODEV;
/* check if the device is ready */
@@ -144,9 +142,10 @@ int cpuidle_idle_call(void)
trace_cpu_idle_rcuidle(next_state, dev->cpu);
- if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
- &dev->cpu);
+ broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP);
+
+ if (broadcast)
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
if (cpuidle_state_is_coupled(dev, drv, next_state))
entered_state = cpuidle_enter_state_coupled(dev, drv,
@@ -154,9 +153,8 @@ int cpuidle_idle_call(void)
else
entered_state = cpuidle_enter_state(dev, drv, next_state);
- if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
- &dev->cpu);
+ if (broadcast)
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
@@ -228,45 +226,6 @@ void cpuidle_resume(void)
mutex_unlock(&cpuidle_lock);
}
-#ifdef CONFIG_ARCH_HAS_CPU_RELAX
-static int poll_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
-{
- ktime_t t1, t2;
- s64 diff;
-
- t1 = ktime_get();
- local_irq_enable();
- while (!need_resched())
- cpu_relax();
-
- t2 = ktime_get();
- diff = ktime_to_us(ktime_sub(t2, t1));
- if (diff > INT_MAX)
- diff = INT_MAX;
-
- dev->last_residency = (int) diff;
-
- return index;
-}
-
-static void poll_idle_init(struct cpuidle_driver *drv)
-{
- struct cpuidle_state *state = &drv->states[0];
-
- snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
- snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
- state->exit_latency = 0;
- state->target_residency = 0;
- state->power_usage = -1;
- state->flags = 0;
- state->enter = poll_idle;
- state->disabled = false;
-}
-#else
-static void poll_idle_init(struct cpuidle_driver *drv) {}
-#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
-
/**
* cpuidle_enable_device - enables idle PM for a CPU
* @dev: the CPU
@@ -296,8 +255,6 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
if (!dev->state_count)
dev->state_count = drv->state_count;
- poll_idle_init(drv);
-
ret = cpuidle_add_device_sysfs(dev);
if (ret)
return ret;
@@ -358,12 +315,10 @@ static void __cpuidle_unregister_device(struct cpuidle_device *dev)
module_put(drv->owner);
}
-static int __cpuidle_device_init(struct cpuidle_device *dev)
+static void __cpuidle_device_init(struct cpuidle_device *dev)
{
memset(dev->states_usage, 0, sizeof(dev->states_usage));
dev->last_residency = 0;
-
- return 0;
}
/**
@@ -385,13 +340,12 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
list_add(&dev->device_list, &cpuidle_detected_devices);
ret = cpuidle_coupled_register_device(dev);
- if (ret) {
+ if (ret)
__cpuidle_unregister_device(dev);
- return ret;
- }
+ else
+ dev->registered = 1;
- dev->registered = 1;
- return 0;
+ return ret;
}
/**
@@ -410,9 +364,7 @@ int cpuidle_register_device(struct cpuidle_device *dev)
if (dev->registered)
goto out_unlock;
- ret = __cpuidle_device_init(dev);
- if (ret)
- goto out_unlock;
+ __cpuidle_device_init(dev);
ret = __cpuidle_register_device(dev);
if (ret)
@@ -516,7 +468,7 @@ int cpuidle_register(struct cpuidle_driver *drv,
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
/*
- * On multiplatform for ARM, the coupled idle states could
+ * On multiplatform for ARM, the coupled idle states could be
* enabled in the kernel even if the cpuidle driver does not
* use it. Note, coupled_cpus is a struct copy.
*/
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 6e11701f0fca..06dbe7c86199 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -10,6 +10,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
+#include <linux/sched.h>
#include <linux/cpuidle.h>
#include <linux/cpumask.h>
#include <linux/clockchips.h>
@@ -56,7 +57,7 @@ static inline void __cpuidle_unset_driver(struct cpuidle_driver *drv)
}
/**
- * __cpuidle_set_driver - set per CPU driver variables the the given driver.
+ * __cpuidle_set_driver - set per CPU driver variables for the given driver.
* @drv: a valid pointer to a struct cpuidle_driver
*
* For each CPU in the driver's cpumask, unset the registered driver per CPU
@@ -132,7 +133,7 @@ static inline void __cpuidle_unset_driver(struct cpuidle_driver *drv)
* cpuidle_setup_broadcast_timer - enable/disable the broadcast timer
* @arg: a void pointer used to match the SMP cross call API
*
- * @arg is used as a value of type 'long' with on of the two values:
+ * @arg is used as a value of type 'long' with one of the two values:
* - CLOCK_EVT_NOTIFY_BROADCAST_ON
* - CLOCK_EVT_NOTIFY_BROADCAST_OFF
*
@@ -149,10 +150,8 @@ static void cpuidle_setup_broadcast_timer(void *arg)
/**
* __cpuidle_driver_init - initialize the driver's internal data
* @drv: a valid pointer to a struct cpuidle_driver
- *
- * Returns 0 on success, a negative error code otherwise.
*/
-static int __cpuidle_driver_init(struct cpuidle_driver *drv)
+static void __cpuidle_driver_init(struct cpuidle_driver *drv)
{
int i;
@@ -169,20 +168,55 @@ static int __cpuidle_driver_init(struct cpuidle_driver *drv)
/*
* Look for the timer stop flag in the different states, so that we know
* if the broadcast timer has to be set up. The loop is in the reverse
- * order, because usually on of the the deeper states has this flag set.
+ * order, because usually one of the deeper states have this flag set.
*/
for (i = drv->state_count - 1; i >= 0 ; i--) {
+ if (drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP) {
+ drv->bctimer = 1;
+ break;
+ }
+ }
+}
- if (!(drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP))
- continue;
+#ifdef CONFIG_ARCH_HAS_CPU_RELAX
+static int poll_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ ktime_t t1, t2;
+ s64 diff;
- drv->bctimer = 1;
- break;
- }
+ t1 = ktime_get();
+ local_irq_enable();
+ while (!need_resched())
+ cpu_relax();
- return 0;
+ t2 = ktime_get();
+ diff = ktime_to_us(ktime_sub(t2, t1));
+ if (diff > INT_MAX)
+ diff = INT_MAX;
+
+ dev->last_residency = (int) diff;
+
+ return index;
}
+static void poll_idle_init(struct cpuidle_driver *drv)
+{
+ struct cpuidle_state *state = &drv->states[0];
+
+ snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
+ snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
+ state->exit_latency = 0;
+ state->target_residency = 0;
+ state->power_usage = -1;
+ state->flags = 0;
+ state->enter = poll_idle;
+ state->disabled = false;
+}
+#else
+static void poll_idle_init(struct cpuidle_driver *drv) {}
+#endif /* !CONFIG_ARCH_HAS_CPU_RELAX */
+
/**
* __cpuidle_register_driver: register the driver
* @drv: a valid pointer to a struct cpuidle_driver
@@ -206,9 +240,7 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv)
if (cpuidle_disabled())
return -ENODEV;
- ret = __cpuidle_driver_init(drv);
- if (ret)
- return ret;
+ __cpuidle_driver_init(drv);
ret = __cpuidle_set_driver(drv);
if (ret)
@@ -218,6 +250,8 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv)
on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer,
(void *)CLOCK_EVT_NOTIFY_BROADCAST_ON, 1);
+ poll_idle_init(drv);
+
return 0;
}
@@ -346,10 +380,11 @@ struct cpuidle_driver *cpuidle_driver_ref(void)
*/
void cpuidle_driver_unref(void)
{
- struct cpuidle_driver *drv = cpuidle_get_driver();
+ struct cpuidle_driver *drv;
spin_lock(&cpuidle_driver_lock);
+ drv = cpuidle_get_driver();
if (drv && !WARN_ON(drv->refcnt <= 0))
drv->refcnt--;
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index ea2f8e7aa24a..ca89412f5122 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -96,46 +96,3 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
return ret;
}
-
-/**
- * cpuidle_replace_governor - find a replacement governor
- * @exclude_rating: the rating that will be skipped while looking for
- * new governor.
- */
-static struct cpuidle_governor *cpuidle_replace_governor(int exclude_rating)
-{
- struct cpuidle_governor *gov;
- struct cpuidle_governor *ret_gov = NULL;
- unsigned int max_rating = 0;
-
- list_for_each_entry(gov, &cpuidle_governors, governor_list) {
- if (gov->rating == exclude_rating)
- continue;
- if (gov->rating > max_rating) {
- max_rating = gov->rating;
- ret_gov = gov;
- }
- }
-
- return ret_gov;
-}
-
-/**
- * cpuidle_unregister_governor - unregisters a governor
- * @gov: the governor
- */
-void cpuidle_unregister_governor(struct cpuidle_governor *gov)
-{
- if (!gov)
- return;
-
- mutex_lock(&cpuidle_lock);
- if (gov == cpuidle_curr_governor) {
- struct cpuidle_governor *new_gov;
- new_gov = cpuidle_replace_governor(gov->rating);
- cpuidle_switch_governor(new_gov);
- }
- list_del(&gov->governor_list);
- mutex_unlock(&cpuidle_lock);
-}
-
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 8739cc05228c..e918b6d0caf7 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -52,11 +52,12 @@ static ssize_t show_current_driver(struct device *dev,
char *buf)
{
ssize_t ret;
- struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
+ struct cpuidle_driver *drv;
spin_lock(&cpuidle_driver_lock);
- if (cpuidle_driver)
- ret = sprintf(buf, "%s\n", cpuidle_driver->name);
+ drv = cpuidle_get_driver();
+ if (drv)
+ ret = sprintf(buf, "%s\n", drv->name);
else
ret = sprintf(buf, "none\n");
spin_unlock(&cpuidle_driver_lock);
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index ca89f6b84b06..e7555ff4cafd 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -4,16 +4,29 @@ config CRYPTO_DEV_FSL_CAAM
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
- This module adds a job ring operation interface, and configures h/w
+ This module creates job ring devices, and configures h/w
to operate as a DPAA component automatically, depending
on h/w feature availability.
To compile this driver as a module, choose M here: the module
will be called caam.
+config CRYPTO_DEV_FSL_CAAM_JR
+ tristate "Freescale CAAM Job Ring driver backend"
+ depends on CRYPTO_DEV_FSL_CAAM
+ default y
+ help
+ Enables the driver module for Job Rings which are part of
+ Freescale's Cryptographic Accelerator
+ and Assurance Module (CAAM). This module adds a job ring operation
+ interface.
+
+ To compile this driver as a module, choose M here: the module
+ will be called caam_jr.
+
config CRYPTO_DEV_FSL_CAAM_RINGSIZE
int "Job Ring size"
- depends on CRYPTO_DEV_FSL_CAAM
+ depends on CRYPTO_DEV_FSL_CAAM_JR
range 2 9
default "9"
help
@@ -31,7 +44,7 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
config CRYPTO_DEV_FSL_CAAM_INTC
bool "Job Ring interrupt coalescing"
- depends on CRYPTO_DEV_FSL_CAAM
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default n
help
Enable the Job Ring's interrupt coalescing feature.
@@ -62,7 +75,7 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
tristate "Register algorithm implementations with the Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM
+ depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
@@ -76,7 +89,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
config CRYPTO_DEV_FSL_CAAM_AHASH_API
tristate "Register hash algorithm implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM
+ depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_HASH
help
@@ -88,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
config CRYPTO_DEV_FSL_CAAM_RNG_API
tristate "Register caam device for hwrng API"
- depends on CRYPTO_DEV_FSL_CAAM
+ depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RNG
select HW_RANDOM
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index d56bd0ec65d8..550758a333e7 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -6,8 +6,10 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
endif
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
-caam-objs := ctrl.o jr.o error.o key_gen.o
+caam-objs := ctrl.o
+caam_jr-objs := jr.o key_gen.o error.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 7c63b72ecd75..4f44b71b9e24 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -86,6 +86,7 @@
#else
#define debug(format, arg...)
#endif
+static struct list_head alg_list;
/* Set DK bit in class 1 operation if shared */
static inline void append_dec_op1(u32 *desc, u32 type)
@@ -2057,7 +2058,6 @@ static struct caam_alg_template driver_algs[] = {
struct caam_crypto_alg {
struct list_head entry;
- struct device *ctrldev;
int class1_alg_type;
int class2_alg_type;
int alg_op;
@@ -2070,14 +2070,12 @@ static int caam_cra_init(struct crypto_tfm *tfm)
struct caam_crypto_alg *caam_alg =
container_of(alg, struct caam_crypto_alg, crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
- struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
- int tgt_jr = atomic_inc_return(&priv->tfm_count);
- /*
- * distribute tfms across job rings to ensure in-order
- * crypto request processing per tfm
- */
- ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs];
+ ctx->jrdev = caam_jr_alloc();
+ if (IS_ERR(ctx->jrdev)) {
+ pr_err("Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(ctx->jrdev);
+ }
/* copy descriptor header template value */
ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
@@ -2104,44 +2102,26 @@ static void caam_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
desc_bytes(ctx->sh_desc_givenc),
DMA_TO_DEVICE);
+
+ caam_jr_free(ctx->jrdev);
}
static void __exit caam_algapi_exit(void)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
struct caam_crypto_alg *t_alg, *n;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
- return;
-
- ctrldev = &pdev->dev;
- of_node_put(dev_node);
- priv = dev_get_drvdata(ctrldev);
-
- if (!priv->alg_list.next)
+ if (!alg_list.next)
return;
- list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
+ list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
crypto_unregister_alg(&t_alg->crypto_alg);
list_del(&t_alg->entry);
kfree(t_alg);
}
}
-static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
- struct caam_alg_template
+static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
*template)
{
struct caam_crypto_alg *t_alg;
@@ -2149,7 +2129,7 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
if (!t_alg) {
- dev_err(ctrldev, "failed to allocate t_alg\n");
+ pr_err("failed to allocate t_alg\n");
return ERR_PTR(-ENOMEM);
}
@@ -2181,62 +2161,39 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
t_alg->class1_alg_type = template->class1_alg_type;
t_alg->class2_alg_type = template->class2_alg_type;
t_alg->alg_op = template->alg_op;
- t_alg->ctrldev = ctrldev;
return t_alg;
}
static int __init caam_algapi_init(void)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
int i = 0, err = 0;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
- return -ENODEV;
-
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
- of_node_put(dev_node);
-
- INIT_LIST_HEAD(&priv->alg_list);
-
- atomic_set(&priv->tfm_count, -1);
+ INIT_LIST_HEAD(&alg_list);
/* register crypto algorithms the device supports */
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
/* TODO: check if h/w supports alg */
struct caam_crypto_alg *t_alg;
- t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
+ t_alg = caam_alg_alloc(&driver_algs[i]);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
- dev_warn(ctrldev, "%s alg allocation failed\n",
- driver_algs[i].driver_name);
+ pr_warn("%s alg allocation failed\n",
+ driver_algs[i].driver_name);
continue;
}
err = crypto_register_alg(&t_alg->crypto_alg);
if (err) {
- dev_warn(ctrldev, "%s alg registration failed\n",
+ pr_warn("%s alg registration failed\n",
t_alg->crypto_alg.cra_driver_name);
kfree(t_alg);
} else
- list_add_tail(&t_alg->entry, &priv->alg_list);
+ list_add_tail(&t_alg->entry, &alg_list);
}
- if (!list_empty(&priv->alg_list))
- dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",
- (char *)of_get_property(dev_node, "compatible", NULL));
+ if (!list_empty(&alg_list))
+ pr_info("caam algorithms registered in /proc/crypto\n");
return err;
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index e732bd962e98..0378328f47a7 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -94,6 +94,9 @@
#define debug(format, arg...)
#endif
+
+static struct list_head hash_list;
+
/* ahash per-session context */
struct caam_hash_ctx {
struct device *jrdev;
@@ -1653,7 +1656,6 @@ static struct caam_hash_template driver_hash[] = {
struct caam_hash_alg {
struct list_head entry;
- struct device *ctrldev;
int alg_type;
int alg_op;
struct ahash_alg ahash_alg;
@@ -1670,7 +1672,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
struct caam_hash_alg *caam_hash =
container_of(alg, struct caam_hash_alg, ahash_alg);
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev);
/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
HASH_MSG_LEN + SHA1_DIGEST_SIZE,
@@ -1678,15 +1679,17 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
HASH_MSG_LEN + SHA256_DIGEST_SIZE,
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
- int tgt_jr = atomic_inc_return(&priv->tfm_count);
int ret = 0;
/*
- * distribute tfms across job rings to ensure in-order
+ * Get a Job ring from Job Ring driver to ensure in-order
* crypto request processing per tfm
*/
- ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs];
-
+ ctx->jrdev = caam_jr_alloc();
+ if (IS_ERR(ctx->jrdev)) {
+ pr_err("Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(ctx->jrdev);
+ }
/* copy descriptor header template value */
ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
@@ -1729,35 +1732,18 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
+
+ caam_jr_free(ctx->jrdev);
}
static void __exit caam_algapi_hash_exit(void)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
struct caam_hash_alg *t_alg, *n;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
+ if (!hash_list.next)
return;
- ctrldev = &pdev->dev;
- of_node_put(dev_node);
- priv = dev_get_drvdata(ctrldev);
-
- if (!priv->hash_list.next)
- return;
-
- list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) {
+ list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
crypto_unregister_ahash(&t_alg->ahash_alg);
list_del(&t_alg->entry);
kfree(t_alg);
@@ -1765,7 +1751,7 @@ static void __exit caam_algapi_hash_exit(void)
}
static struct caam_hash_alg *
-caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
+caam_hash_alloc(struct caam_hash_template *template,
bool keyed)
{
struct caam_hash_alg *t_alg;
@@ -1774,7 +1760,7 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
if (!t_alg) {
- dev_err(ctrldev, "failed to allocate t_alg\n");
+ pr_err("failed to allocate t_alg\n");
return ERR_PTR(-ENOMEM);
}
@@ -1805,37 +1791,15 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
t_alg->alg_type = template->alg_type;
t_alg->alg_op = template->alg_op;
- t_alg->ctrldev = ctrldev;
return t_alg;
}
static int __init caam_algapi_hash_init(void)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
int i = 0, err = 0;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
- return -ENODEV;
-
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
- of_node_put(dev_node);
-
- INIT_LIST_HEAD(&priv->hash_list);
-
- atomic_set(&priv->tfm_count, -1);
+ INIT_LIST_HEAD(&hash_list);
/* register crypto algorithms the device supports */
for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
@@ -1843,38 +1807,38 @@ static int __init caam_algapi_hash_init(void)
struct caam_hash_alg *t_alg;
/* register hmac version */
- t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true);
+ t_alg = caam_hash_alloc(&driver_hash[i], true);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
- dev_warn(ctrldev, "%s alg allocation failed\n",
- driver_hash[i].driver_name);
+ pr_warn("%s alg allocation failed\n",
+ driver_hash[i].driver_name);
continue;
}
err = crypto_register_ahash(&t_alg->ahash_alg);
if (err) {
- dev_warn(ctrldev, "%s alg registration failed\n",
+ pr_warn("%s alg registration failed\n",
t_alg->ahash_alg.halg.base.cra_driver_name);
kfree(t_alg);
} else
- list_add_tail(&t_alg->entry, &priv->hash_list);
+ list_add_tail(&t_alg->entry, &hash_list);
/* register unkeyed version */
- t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false);
+ t_alg = caam_hash_alloc(&driver_hash[i], false);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
- dev_warn(ctrldev, "%s alg allocation failed\n",
- driver_hash[i].driver_name);
+ pr_warn("%s alg allocation failed\n",
+ driver_hash[i].driver_name);
continue;
}
err = crypto_register_ahash(&t_alg->ahash_alg);
if (err) {
- dev_warn(ctrldev, "%s alg registration failed\n",
+ pr_warn("%s alg registration failed\n",
t_alg->ahash_alg.halg.base.cra_driver_name);
kfree(t_alg);
} else
- list_add_tail(&t_alg->entry, &priv->hash_list);
+ list_add_tail(&t_alg->entry, &hash_list);
}
return err;
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index d1939a9539c0..28486b19fc36 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -273,34 +273,23 @@ static struct hwrng caam_rng = {
static void __exit caam_rng_exit(void)
{
+ caam_jr_free(rng_ctx.jrdev);
hwrng_unregister(&caam_rng);
}
static int __init caam_rng_init(void)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
-
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
- return -ENODEV;
+ struct device *dev;
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
- of_node_put(dev_node);
+ dev = caam_jr_alloc();
+ if (IS_ERR(dev)) {
+ pr_err("Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(dev);
+ }
- caam_init_rng(&rng_ctx, priv->jrdev[0]);
+ caam_init_rng(&rng_ctx, dev);
- dev_info(priv->jrdev[0], "registering rng-caam\n");
+ dev_info(dev, "registering rng-caam\n");
return hwrng_register(&caam_rng);
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index b010d42a1803..63fb1af2c431 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -5,6 +5,9 @@
* Copyright 2008-2012 Freescale Semiconductor, Inc.
*/
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
#include "compat.h"
#include "regs.h"
#include "intern.h"
@@ -13,82 +16,75 @@
#include "error.h"
#include "ctrl.h"
-static int caam_remove(struct platform_device *pdev)
-{
- struct device *ctrldev;
- struct caam_drv_private *ctrlpriv;
- struct caam_drv_private_jr *jrpriv;
- struct caam_full __iomem *topregs;
- int ring, ret = 0;
-
- ctrldev = &pdev->dev;
- ctrlpriv = dev_get_drvdata(ctrldev);
- topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
-
- /* shut down JobRs */
- for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
- ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]);
- jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
- irq_dispose_mapping(jrpriv->irq);
- }
-
- /* Shut down debug views */
-#ifdef CONFIG_DEBUG_FS
- debugfs_remove_recursive(ctrlpriv->dfs_root);
-#endif
-
- /* Unmap controller region */
- iounmap(&topregs->ctrl);
-
- kfree(ctrlpriv->jrdev);
- kfree(ctrlpriv);
-
- return ret;
-}
-
/*
* Descriptor to instantiate RNG State Handle 0 in normal mode and
* load the JDKEK, TDKEK and TDSK registers
*/
-static void build_instantiation_desc(u32 *desc)
+static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
{
- u32 *jump_cmd;
+ u32 *jump_cmd, op_flags;
init_job_desc(desc, 0);
+ op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+ (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
+
/* INIT RNG in non-test mode */
- append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
- OP_ALG_AS_INIT);
+ append_operation(desc, op_flags);
+
+ if (!handle && do_sk) {
+ /*
+ * For SH0, Secure Keys must be generated as well
+ */
+
+ /* wait for done */
+ jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ /*
+ * load 1 to clear written reg:
+ * resets the done interrrupt and returns the RNG to idle.
+ */
+ append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
+
+ /* Initialize State Handle */
+ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+ OP_ALG_AAI_RNG4_SK);
+ }
- /* wait for done */
- jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
- set_jump_tgt_here(desc, jump_cmd);
+ append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
+}
- /*
- * load 1 to clear written reg:
- * resets the done interrupt and returns the RNG to idle.
- */
- append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
+/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
+static void build_deinstantiation_desc(u32 *desc, int handle)
+{
+ init_job_desc(desc, 0);
- /* generate secure keys (non-test) */
+ /* Uninstantiate State Handle 0 */
append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
- OP_ALG_RNG4_SK);
+ (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
+
+ append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
}
-static int instantiate_rng(struct device *ctrldev)
+/*
+ * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
+ * the software (no JR/QI used).
+ * @ctrldev - pointer to device
+ * @status - descriptor status, after being run
+ *
+ * Return: - 0 if no error occurred
+ * - -ENODEV if the DECO couldn't be acquired
+ * - -EAGAIN if an error occurred while executing the descriptor
+ */
+static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
+ u32 *status)
{
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
struct caam_full __iomem *topregs;
unsigned int timeout = 100000;
- u32 *desc;
- int i, ret = 0;
-
- desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA);
- if (!desc) {
- dev_err(ctrldev, "can't allocate RNG init descriptor memory\n");
- return -ENOMEM;
- }
- build_instantiation_desc(desc);
+ u32 deco_dbg_reg, flags;
+ int i;
/* Set the bit to request direct access to DECO0 */
topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
@@ -100,36 +96,219 @@ static int instantiate_rng(struct device *ctrldev)
if (!timeout) {
dev_err(ctrldev, "failed to acquire DECO 0\n");
- ret = -EIO;
- goto out;
+ clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
+ return -ENODEV;
}
for (i = 0; i < desc_len(desc); i++)
- topregs->deco.descbuf[i] = *(desc + i);
+ wr_reg32(&topregs->deco.descbuf[i], *(desc + i));
+
+ flags = DECO_JQCR_WHL;
+ /*
+ * If the descriptor length is longer than 4 words, then the
+ * FOUR bit in JRCTRL register must be set.
+ */
+ if (desc_len(desc) >= 4)
+ flags |= DECO_JQCR_FOUR;
- wr_reg32(&topregs->deco.jr_ctl_hi, DECO_JQCR_WHL | DECO_JQCR_FOUR);
+ /* Instruct the DECO to execute it */
+ wr_reg32(&topregs->deco.jr_ctl_hi, flags);
timeout = 10000000;
- while ((rd_reg32(&topregs->deco.desc_dbg) & DECO_DBG_VALID) &&
- --timeout)
+ do {
+ deco_dbg_reg = rd_reg32(&topregs->deco.desc_dbg);
+ /*
+ * If an error occured in the descriptor, then
+ * the DECO status field will be set to 0x0D
+ */
+ if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
+ DESC_DBG_DECO_STAT_HOST_ERR)
+ break;
cpu_relax();
+ } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
- if (!timeout) {
- dev_err(ctrldev, "failed to instantiate RNG\n");
- ret = -EIO;
- }
+ *status = rd_reg32(&topregs->deco.op_status_hi) &
+ DECO_OP_STATUS_HI_ERR_MASK;
+ /* Mark the DECO as free */
clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
-out:
+
+ if (!timeout)
+ return -EAGAIN;
+
+ return 0;
+}
+
+/*
+ * instantiate_rng - builds and executes a descriptor on DECO0,
+ * which initializes the RNG block.
+ * @ctrldev - pointer to device
+ * @state_handle_mask - bitmask containing the instantiation status
+ * for the RNG4 state handles which exist in
+ * the RNG4 block: 1 if it's been instantiated
+ * by an external entry, 0 otherwise.
+ * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
+ * Caution: this can be done only once; if the keys need to be
+ * regenerated, a POR is required
+ *
+ * Return: - 0 if no error occurred
+ * - -ENOMEM if there isn't enough memory to allocate the descriptor
+ * - -ENODEV if DECO0 couldn't be acquired
+ * - -EAGAIN if an error occurred when executing the descriptor
+ * f.i. there was a RNG hardware error due to not "good enough"
+ * entropy being aquired.
+ */
+static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
+ int gen_sk)
+{
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+ struct caam_full __iomem *topregs;
+ struct rng4tst __iomem *r4tst;
+ u32 *desc, status, rdsta_val;
+ int ret = 0, sh_idx;
+
+ topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+ r4tst = &topregs->ctrl.r4tst[0];
+
+ desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+ /*
+ * If the corresponding bit is set, this state handle
+ * was initialized by somebody else, so it's left alone.
+ */
+ if ((1 << sh_idx) & state_handle_mask)
+ continue;
+
+ /* Create the descriptor for instantiating RNG State Handle */
+ build_instantiation_desc(desc, sh_idx, gen_sk);
+
+ /* Try to run it through DECO0 */
+ ret = run_descriptor_deco0(ctrldev, desc, &status);
+
+ /*
+ * If ret is not 0, or descriptor status is not 0, then
+ * something went wrong. No need to try the next state
+ * handle (if available), bail out here.
+ * Also, if for some reason, the State Handle didn't get
+ * instantiated although the descriptor has finished
+ * without any error (HW optimizations for later
+ * CAAM eras), then try again.
+ */
+ rdsta_val =
+ rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IFMASK;
+ if (status || !(rdsta_val & (1 << sh_idx)))
+ ret = -EAGAIN;
+ if (ret)
+ break;
+
+ dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
+ /* Clear the contents before recreating the descriptor */
+ memset(desc, 0x00, CAAM_CMD_SZ * 7);
+ }
+
+ kfree(desc);
+
+ return ret;
+}
+
+/*
+ * deinstantiate_rng - builds and executes a descriptor on DECO0,
+ * which deinitializes the RNG block.
+ * @ctrldev - pointer to device
+ * @state_handle_mask - bitmask containing the instantiation status
+ * for the RNG4 state handles which exist in
+ * the RNG4 block: 1 if it's been instantiated
+ *
+ * Return: - 0 if no error occurred
+ * - -ENOMEM if there isn't enough memory to allocate the descriptor
+ * - -ENODEV if DECO0 couldn't be acquired
+ * - -EAGAIN if an error occurred when executing the descriptor
+ */
+static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
+{
+ u32 *desc, status;
+ int sh_idx, ret = 0;
+
+ desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+ /*
+ * If the corresponding bit is set, then it means the state
+ * handle was initialized by us, and thus it needs to be
+ * deintialized as well
+ */
+ if ((1 << sh_idx) & state_handle_mask) {
+ /*
+ * Create the descriptor for deinstantating this state
+ * handle
+ */
+ build_deinstantiation_desc(desc, sh_idx);
+
+ /* Try to run it through DECO0 */
+ ret = run_descriptor_deco0(ctrldev, desc, &status);
+
+ if (ret || status) {
+ dev_err(ctrldev,
+ "Failed to deinstantiate RNG4 SH%d\n",
+ sh_idx);
+ break;
+ }
+ dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
+ }
+ }
+
kfree(desc);
+
+ return ret;
+}
+
+static int caam_remove(struct platform_device *pdev)
+{
+ struct device *ctrldev;
+ struct caam_drv_private *ctrlpriv;
+ struct caam_full __iomem *topregs;
+ int ring, ret = 0;
+
+ ctrldev = &pdev->dev;
+ ctrlpriv = dev_get_drvdata(ctrldev);
+ topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+
+ /* Remove platform devices for JobRs */
+ for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
+ if (ctrlpriv->jrpdev[ring])
+ of_device_unregister(ctrlpriv->jrpdev[ring]);
+ }
+
+ /* De-initialize RNG state handles initialized by this driver. */
+ if (ctrlpriv->rng4_sh_init)
+ deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
+
+ /* Shut down debug views */
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(ctrlpriv->dfs_root);
+#endif
+
+ /* Unmap controller region */
+ iounmap(&topregs->ctrl);
+
+ kfree(ctrlpriv->jrpdev);
+ kfree(ctrlpriv);
+
return ret;
}
/*
- * By default, the TRNG runs for 200 clocks per sample;
- * 1600 clocks per sample generates better entropy.
+ * kick_trng - sets the various parameters for enabling the initialization
+ * of the RNG4 block in CAAM
+ * @pdev - pointer to the platform device
+ * @ent_delay - Defines the length (in system clocks) of each entropy sample.
*/
-static void kick_trng(struct platform_device *pdev)
+static void kick_trng(struct platform_device *pdev, int ent_delay)
{
struct device *ctrldev = &pdev->dev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
@@ -142,14 +321,31 @@ static void kick_trng(struct platform_device *pdev)
/* put RNG4 into program mode */
setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
- /* 1600 clocks per sample */
+
+ /*
+ * Performance-wise, it does not make sense to
+ * set the delay to a value that is lower
+ * than the last one that worked (i.e. the state handles
+ * were instantiated properly. Thus, instead of wasting
+ * time trying to set the values controlling the sample
+ * frequency, the function simply returns.
+ */
+ val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
+ >> RTSDCTL_ENT_DLY_SHIFT;
+ if (ent_delay <= val) {
+ /* put RNG4 into run mode */
+ clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
+ return;
+ }
+
val = rd_reg32(&r4tst->rtsdctl);
- val = (val & ~RTSDCTL_ENT_DLY_MASK) | (1600 << RTSDCTL_ENT_DLY_SHIFT);
+ val = (val & ~RTSDCTL_ENT_DLY_MASK) |
+ (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
wr_reg32(&r4tst->rtsdctl, val);
- /* min. freq. count */
- wr_reg32(&r4tst->rtfrqmin, 400);
- /* max. freq. count */
- wr_reg32(&r4tst->rtfrqmax, 6400);
+ /* min. freq. count, equal to 1/4 of the entropy sample length */
+ wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
+ /* max. freq. count, equal to 8 times the entropy sample length */
+ wr_reg32(&r4tst->rtfrqmax, ent_delay << 3);
/* put RNG4 into run mode */
clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
}
@@ -190,7 +386,7 @@ EXPORT_SYMBOL(caam_get_era);
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
- int ret, ring, rspec;
+ int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
u64 caam_id;
struct device *dev;
struct device_node *nprop, *np;
@@ -224,7 +420,7 @@ static int caam_probe(struct platform_device *pdev)
topregs = (struct caam_full __iomem *)ctrl;
/* Get the IRQ of the controller (for security violations only) */
- ctrlpriv->secvio_irq = of_irq_to_resource(nprop, 0, NULL);
+ ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
/*
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
@@ -255,8 +451,9 @@ static int caam_probe(struct platform_device *pdev)
rspec++;
}
- ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL);
- if (ctrlpriv->jrdev == NULL) {
+ ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec,
+ GFP_KERNEL);
+ if (ctrlpriv->jrpdev == NULL) {
iounmap(&topregs->ctrl);
return -ENOMEM;
}
@@ -264,13 +461,24 @@ static int caam_probe(struct platform_device *pdev)
ring = 0;
ctrlpriv->total_jobrs = 0;
for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
- caam_jr_probe(pdev, np, ring);
+ ctrlpriv->jrpdev[ring] =
+ of_platform_device_create(np, NULL, dev);
+ if (!ctrlpriv->jrpdev[ring]) {
+ pr_warn("JR%d Platform device creation error\n", ring);
+ continue;
+ }
ctrlpriv->total_jobrs++;
ring++;
}
if (!ring) {
for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") {
- caam_jr_probe(pdev, np, ring);
+ ctrlpriv->jrpdev[ring] =
+ of_platform_device_create(np, NULL, dev);
+ if (!ctrlpriv->jrpdev[ring]) {
+ pr_warn("JR%d Platform device creation error\n",
+ ring);
+ continue;
+ }
ctrlpriv->total_jobrs++;
ring++;
}
@@ -296,16 +504,55 @@ static int caam_probe(struct platform_device *pdev)
/*
* If SEC has RNG version >= 4 and RNG state handle has not been
- * already instantiated ,do RNG instantiation
+ * already instantiated, do RNG instantiation
*/
- if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 &&
- !(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) {
- kick_trng(pdev);
- ret = instantiate_rng(dev);
+ if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) {
+ ctrlpriv->rng4_sh_init =
+ rd_reg32(&topregs->ctrl.r4tst[0].rdsta);
+ /*
+ * If the secure keys (TDKEK, JDKEK, TDSK), were already
+ * generated, signal this to the function that is instantiating
+ * the state handles. An error would occur if RNG4 attempts
+ * to regenerate these keys before the next POR.
+ */
+ gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
+ ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
+ do {
+ int inst_handles =
+ rd_reg32(&topregs->ctrl.r4tst[0].rdsta) &
+ RDSTA_IFMASK;
+ /*
+ * If either SH were instantiated by somebody else
+ * (e.g. u-boot) then it is assumed that the entropy
+ * parameters are properly set and thus the function
+ * setting these (kick_trng(...)) is skipped.
+ * Also, if a handle was instantiated, do not change
+ * the TRNG parameters.
+ */
+ if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
+ kick_trng(pdev, ent_delay);
+ ent_delay += 400;
+ }
+ /*
+ * if instantiate_rng(...) fails, the loop will rerun
+ * and the kick_trng(...) function will modfiy the
+ * upper and lower limits of the entropy sampling
+ * interval, leading to a sucessful initialization of
+ * the RNG.
+ */
+ ret = instantiate_rng(dev, inst_handles,
+ gen_sk);
+ } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
if (ret) {
+ dev_err(dev, "failed to instantiate RNG");
caam_remove(pdev);
return ret;
}
+ /*
+ * Set handles init'ed by this module as the complement of the
+ * already initialized ones
+ */
+ ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
/* Enable RDB bit so that RNG works faster */
setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE);
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 53b296f78b0d..7e4500f18df6 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -1155,8 +1155,15 @@ struct sec4_sg_entry {
/* randomizer AAI set */
#define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT)
-#define OP_ALG_AAI_RNG_NOZERO (0x10 << OP_ALG_AAI_SHIFT)
-#define OP_ALG_AAI_RNG_ODD (0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_NZB (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_OBP (0x20 << OP_ALG_AAI_SHIFT)
+
+/* RNG4 AAI set */
+#define OP_ALG_AAI_RNG4_SH_0 (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_SH_1 (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_PS (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
/* hmac/smac AAI set */
#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
@@ -1178,12 +1185,6 @@ struct sec4_sg_entry {
#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT)
#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT)
-/* RNG4 set */
-#define OP_ALG_RNG4_SHIFT 4
-#define OP_ALG_RNG4_MASK (0x1f3 << OP_ALG_RNG4_SHIFT)
-
-#define OP_ALG_RNG4_SK (0x100 << OP_ALG_RNG4_SHIFT)
-
#define OP_ALG_AS_SHIFT 2
#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT)
#define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT)
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 34c4b9f7fbfa..6d85fcc5bd0a 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -37,13 +37,16 @@ struct caam_jrentry_info {
/* Private sub-storage for a single JobR */
struct caam_drv_private_jr {
- struct device *parentdev; /* points back to controller dev */
- struct platform_device *jr_pdev;/* points to platform device for JR */
+ struct list_head list_node; /* Job Ring device list */
+ struct device *dev;
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
struct tasklet_struct irqtask;
int irq; /* One per queue */
+ /* Number of scatterlist crypt transforms active on the JobR */
+ atomic_t tfm_count ____cacheline_aligned;
+
/* Job ring info */
int ringsize; /* Size of rings (assume input = output) */
struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */
@@ -63,7 +66,7 @@ struct caam_drv_private_jr {
struct caam_drv_private {
struct device *dev;
- struct device **jrdev; /* Alloc'ed array per sub-device */
+ struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
struct platform_device *pdev;
/* Physical-presence section */
@@ -80,12 +83,11 @@ struct caam_drv_private {
u8 qi_present; /* Nonzero if QI present in device */
int secvio_irq; /* Security violation interrupt number */
- /* which jr allocated to scatterlist crypto */
- atomic_t tfm_count ____cacheline_aligned;
- /* list of registered crypto algorithms (mk generic context handle?) */
- struct list_head alg_list;
- /* list of registered hash algorithms (mk generic context handle?) */
- struct list_head hash_list;
+#define RNG4_MAX_HANDLES 2
+ /* RNG4 block */
+ u32 rng4_sh_init; /* This bitmap shows which of the State
+ Handles of the RNG4 block are initialized
+ by this driver */
/*
* debugfs entries for developer view into driver/device
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 105ba4da6180..d23356d20e1c 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -5,12 +5,121 @@
* Copyright 2008-2012 Freescale Semiconductor, Inc.
*/
+#include <linux/of_irq.h>
+
#include "compat.h"
#include "regs.h"
#include "jr.h"
#include "desc.h"
#include "intern.h"
+struct jr_driver_data {
+ /* List of Physical JobR's with the Driver */
+ struct list_head jr_list;
+ spinlock_t jr_alloc_lock; /* jr_list lock */
+} ____cacheline_aligned;
+
+static struct jr_driver_data driver_data;
+
+static int caam_reset_hw_jr(struct device *dev)
+{
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+ unsigned int timeout = 100000;
+
+ /*
+ * mask interrupts since we are going to poll
+ * for reset completion status
+ */
+ setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+
+ /* initiate flush (required prior to reset) */
+ wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+ while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
+ JRINT_ERR_HALT_INPROGRESS) && --timeout)
+ cpu_relax();
+
+ if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
+ JRINT_ERR_HALT_COMPLETE || timeout == 0) {
+ dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
+ return -EIO;
+ }
+
+ /* initiate reset */
+ timeout = 100000;
+ wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+ while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
+ cpu_relax();
+
+ if (timeout == 0) {
+ dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
+ return -EIO;
+ }
+
+ /* unmask interrupts */
+ clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+
+ return 0;
+}
+
+/*
+ * Shutdown JobR independent of platform property code
+ */
+int caam_jr_shutdown(struct device *dev)
+{
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+ dma_addr_t inpbusaddr, outbusaddr;
+ int ret;
+
+ ret = caam_reset_hw_jr(dev);
+
+ tasklet_kill(&jrp->irqtask);
+
+ /* Release interrupt */
+ free_irq(jrp->irq, dev);
+
+ /* Free rings */
+ inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
+ outbusaddr = rd_reg64(&jrp->rregs->outring_base);
+ dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
+ jrp->inpring, inpbusaddr);
+ dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
+ jrp->outring, outbusaddr);
+ kfree(jrp->entinfo);
+
+ return ret;
+}
+
+static int caam_jr_remove(struct platform_device *pdev)
+{
+ int ret;
+ struct device *jrdev;
+ struct caam_drv_private_jr *jrpriv;
+
+ jrdev = &pdev->dev;
+ jrpriv = dev_get_drvdata(jrdev);
+
+ /*
+ * Return EBUSY if job ring already allocated.
+ */
+ if (atomic_read(&jrpriv->tfm_count)) {
+ dev_err(jrdev, "Device is busy\n");
+ return -EBUSY;
+ }
+
+ /* Remove the node from Physical JobR list maintained by driver */
+ spin_lock(&driver_data.jr_alloc_lock);
+ list_del(&jrpriv->list_node);
+ spin_unlock(&driver_data.jr_alloc_lock);
+
+ /* Release ring */
+ ret = caam_jr_shutdown(jrdev);
+ if (ret)
+ dev_err(jrdev, "Failed to shut down job ring\n");
+ irq_dispose_mapping(jrpriv->irq);
+
+ return ret;
+}
+
/* Main per-ring interrupt handler */
static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
{
@@ -126,6 +235,59 @@ static void caam_jr_dequeue(unsigned long devarg)
}
/**
+ * caam_jr_alloc() - Alloc a job ring for someone to use as needed.
+ *
+ * returns : pointer to the newly allocated physical
+ * JobR dev can be written to if successful.
+ **/
+struct device *caam_jr_alloc(void)
+{
+ struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
+ struct device *dev = NULL;
+ int min_tfm_cnt = INT_MAX;
+ int tfm_cnt;
+
+ spin_lock(&driver_data.jr_alloc_lock);
+
+ if (list_empty(&driver_data.jr_list)) {
+ spin_unlock(&driver_data.jr_alloc_lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
+ tfm_cnt = atomic_read(&jrpriv->tfm_count);
+ if (tfm_cnt < min_tfm_cnt) {
+ min_tfm_cnt = tfm_cnt;
+ min_jrpriv = jrpriv;
+ }
+ if (!min_tfm_cnt)
+ break;
+ }
+
+ if (min_jrpriv) {
+ atomic_inc(&min_jrpriv->tfm_count);
+ dev = min_jrpriv->dev;
+ }
+ spin_unlock(&driver_data.jr_alloc_lock);
+
+ return dev;
+}
+EXPORT_SYMBOL(caam_jr_alloc);
+
+/**
+ * caam_jr_free() - Free the Job Ring
+ * @rdev - points to the dev that identifies the Job ring to
+ * be released.
+ **/
+void caam_jr_free(struct device *rdev)
+{
+ struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
+
+ atomic_dec(&jrpriv->tfm_count);
+}
+EXPORT_SYMBOL(caam_jr_free);
+
+/**
* caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
* -EBUSY if the queue is full, -EIO if it cannot map the caller's
* descriptor.
@@ -205,46 +367,6 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
}
EXPORT_SYMBOL(caam_jr_enqueue);
-static int caam_reset_hw_jr(struct device *dev)
-{
- struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
- unsigned int timeout = 100000;
-
- /*
- * mask interrupts since we are going to poll
- * for reset completion status
- */
- setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
-
- /* initiate flush (required prior to reset) */
- wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
- while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
- JRINT_ERR_HALT_INPROGRESS) && --timeout)
- cpu_relax();
-
- if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
- JRINT_ERR_HALT_COMPLETE || timeout == 0) {
- dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
- return -EIO;
- }
-
- /* initiate reset */
- timeout = 100000;
- wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
- while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
- cpu_relax();
-
- if (timeout == 0) {
- dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
- return -EIO;
- }
-
- /* unmask interrupts */
- clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
-
- return 0;
-}
-
/*
* Init JobR independent of platform property detection
*/
@@ -260,7 +382,7 @@ static int caam_jr_init(struct device *dev)
/* Connect job ring interrupt handler. */
error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
- "caam-jobr", dev);
+ dev_name(dev), dev);
if (error) {
dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
jrp->ridx, jrp->irq);
@@ -316,86 +438,43 @@ static int caam_jr_init(struct device *dev)
return 0;
}
-/*
- * Shutdown JobR independent of platform property code
- */
-int caam_jr_shutdown(struct device *dev)
-{
- struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
- dma_addr_t inpbusaddr, outbusaddr;
- int ret;
-
- ret = caam_reset_hw_jr(dev);
-
- tasklet_kill(&jrp->irqtask);
-
- /* Release interrupt */
- free_irq(jrp->irq, dev);
-
- /* Free rings */
- inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
- outbusaddr = rd_reg64(&jrp->rregs->outring_base);
- dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
- jrp->inpring, inpbusaddr);
- dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
- jrp->outring, outbusaddr);
- kfree(jrp->entinfo);
- of_device_unregister(jrp->jr_pdev);
-
- return ret;
-}
/*
- * Probe routine for each detected JobR subsystem. It assumes that
- * property detection was picked up externally.
+ * Probe routine for each detected JobR subsystem.
*/
-int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
- int ring)
+static int caam_jr_probe(struct platform_device *pdev)
{
- struct device *ctrldev, *jrdev;
- struct platform_device *jr_pdev;
- struct caam_drv_private *ctrlpriv;
+ struct device *jrdev;
+ struct device_node *nprop;
+ struct caam_job_ring __iomem *ctrl;
struct caam_drv_private_jr *jrpriv;
- u32 *jroffset;
+ static int total_jobrs;
int error;
- ctrldev = &pdev->dev;
- ctrlpriv = dev_get_drvdata(ctrldev);
-
+ jrdev = &pdev->dev;
jrpriv = kmalloc(sizeof(struct caam_drv_private_jr),
GFP_KERNEL);
- if (jrpriv == NULL) {
- dev_err(ctrldev, "can't alloc private mem for job ring %d\n",
- ring);
+ if (!jrpriv)
return -ENOMEM;
- }
- jrpriv->parentdev = ctrldev; /* point back to parent */
- jrpriv->ridx = ring; /* save ring identity relative to detection */
- /*
- * Derive a pointer to the detected JobRs regs
- * Driver has already iomapped the entire space, we just
- * need to add in the offset to this JobR. Don't know if I
- * like this long-term, but it'll run
- */
- jroffset = (u32 *)of_get_property(np, "reg", NULL);
- jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl
- + *jroffset);
+ dev_set_drvdata(jrdev, jrpriv);
- /* Build a local dev for each detected queue */
- jr_pdev = of_platform_device_create(np, NULL, ctrldev);
- if (jr_pdev == NULL) {
- kfree(jrpriv);
- return -EINVAL;
+ /* save ring identity relative to detection */
+ jrpriv->ridx = total_jobrs++;
+
+ nprop = pdev->dev.of_node;
+ /* Get configuration properties from device tree */
+ /* First, get register page */
+ ctrl = of_iomap(nprop, 0);
+ if (!ctrl) {
+ dev_err(jrdev, "of_iomap() failed\n");
+ return -ENOMEM;
}
- jrpriv->jr_pdev = jr_pdev;
- jrdev = &jr_pdev->dev;
- dev_set_drvdata(jrdev, jrpriv);
- ctrlpriv->jrdev[ring] = jrdev;
+ jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
if (sizeof(dma_addr_t) == sizeof(u64))
- if (of_device_is_compatible(np, "fsl,sec-v5.0-job-ring"))
+ if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
dma_set_mask(jrdev, DMA_BIT_MASK(40));
else
dma_set_mask(jrdev, DMA_BIT_MASK(36));
@@ -403,15 +482,61 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
dma_set_mask(jrdev, DMA_BIT_MASK(32));
/* Identify the interrupt */
- jrpriv->irq = of_irq_to_resource(np, 0, NULL);
+ jrpriv->irq = irq_of_parse_and_map(nprop, 0);
/* Now do the platform independent part */
error = caam_jr_init(jrdev); /* now turn on hardware */
if (error) {
- of_device_unregister(jr_pdev);
kfree(jrpriv);
return error;
}
- return error;
+ jrpriv->dev = jrdev;
+ spin_lock(&driver_data.jr_alloc_lock);
+ list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
+ spin_unlock(&driver_data.jr_alloc_lock);
+
+ atomic_set(&jrpriv->tfm_count, 0);
+
+ return 0;
+}
+
+static struct of_device_id caam_jr_match[] = {
+ {
+ .compatible = "fsl,sec-v4.0-job-ring",
+ },
+ {
+ .compatible = "fsl,sec4.0-job-ring",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, caam_jr_match);
+
+static struct platform_driver caam_jr_driver = {
+ .driver = {
+ .name = "caam_jr",
+ .owner = THIS_MODULE,
+ .of_match_table = caam_jr_match,
+ },
+ .probe = caam_jr_probe,
+ .remove = caam_jr_remove,
+};
+
+static int __init jr_driver_init(void)
+{
+ spin_lock_init(&driver_data.jr_alloc_lock);
+ INIT_LIST_HEAD(&driver_data.jr_list);
+ return platform_driver_register(&caam_jr_driver);
+}
+
+static void __exit jr_driver_exit(void)
+{
+ platform_driver_unregister(&caam_jr_driver);
}
+
+module_init(jr_driver_init);
+module_exit(jr_driver_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM JR request backend");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h
index 9d8741a59037..97113a6d6c58 100644
--- a/drivers/crypto/caam/jr.h
+++ b/drivers/crypto/caam/jr.h
@@ -8,12 +8,11 @@
#define JR_H
/* Prototypes for backend-level services exposed to APIs */
+struct device *caam_jr_alloc(void);
+void caam_jr_free(struct device *rdev);
int caam_jr_enqueue(struct device *dev, u32 *desc,
void (*cbk)(struct device *dev, u32 *desc, u32 status,
void *areq),
void *areq);
-extern int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
- int ring);
-extern int caam_jr_shutdown(struct device *dev);
#endif /* JR_H */
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 4455396918de..d50174f45b21 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -245,7 +245,7 @@ struct rngtst {
/* RNG4 TRNG test registers */
struct rng4tst {
-#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
+#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
u32 rtmctl; /* misc. control register */
u32 rtscmisc; /* statistical check misc. register */
u32 rtpkrrng; /* poker range register */
@@ -255,6 +255,8 @@ struct rng4tst {
};
#define RTSDCTL_ENT_DLY_SHIFT 16
#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
+#define RTSDCTL_ENT_DLY_MIN 1200
+#define RTSDCTL_ENT_DLY_MAX 12800
u32 rtsdctl; /* seed control register */
union {
u32 rtsblim; /* PRGM=1: sparse bit limit register */
@@ -266,7 +268,11 @@ struct rng4tst {
u32 rtfrqcnt; /* PRGM=0: freq. count register */
};
u32 rsvd1[40];
+#define RDSTA_SKVT 0x80000000
+#define RDSTA_SKVN 0x40000000
#define RDSTA_IF0 0x00000001
+#define RDSTA_IF1 0x00000002
+#define RDSTA_IFMASK (RDSTA_IF1 | RDSTA_IF0)
u32 rdsta;
u32 rsvd2[15];
};
@@ -692,6 +698,7 @@ struct caam_deco {
u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */
u32 jr_ctl_lo;
u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */
+#define DECO_OP_STATUS_HI_ERR_MASK 0xF00000FF
u32 op_status_hi; /* DxOPSTA - DECO Operation Status */
u32 op_status_lo;
u32 rsvd24[2];
@@ -706,12 +713,13 @@ struct caam_deco {
u32 rsvd29[48];
u32 descbuf[64]; /* DxDESB - Descriptor buffer */
u32 rscvd30[193];
+#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000
+#define DESC_DBG_DECO_STAT_VALID 0x80000000
+#define DESC_DBG_DECO_STAT_MASK 0x00F00000
u32 desc_dbg; /* DxDDR - DECO Debug Register */
u32 rsvd31[126];
};
-/* DECO DBG Register Valid Bit*/
-#define DECO_DBG_VALID 0x80000000
#define DECO_JQCR_WHL 0x20000000
#define DECO_JQCR_FOUR 0x10000000
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index e0037c8ee243..b12ff85f4241 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -117,6 +117,21 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
return nents;
}
+/* Map SG page in kernel virtual address space and copy */
+static inline void sg_map_copy(u8 *dest, struct scatterlist *sg,
+ int len, int offset)
+{
+ u8 *mapped_addr;
+
+ /*
+ * Page here can be user-space pinned using get_user_pages
+ * Same must be kmapped before use and kunmapped subsequently
+ */
+ mapped_addr = kmap_atomic(sg_page(sg));
+ memcpy(dest, mapped_addr + offset, len);
+ kunmap_atomic(mapped_addr);
+}
+
/* Copy from len bytes of sg to dest, starting from beginning */
static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
{
@@ -124,15 +139,15 @@ static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
int cpy_index = 0, next_cpy_index = current_sg->length;
while (next_cpy_index < len) {
- memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg),
- current_sg->length);
+ sg_map_copy(dest + cpy_index, current_sg, current_sg->length,
+ current_sg->offset);
current_sg = scatterwalk_sg_next(current_sg);
cpy_index = next_cpy_index;
next_cpy_index += current_sg->length;
}
if (cpy_index < len)
- memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg),
- len - cpy_index);
+ sg_map_copy(dest + cpy_index, current_sg, len-cpy_index,
+ current_sg->offset);
}
/* Copy sg data, from to_skip to end, to dest */
@@ -140,7 +155,7 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
int to_skip, unsigned int end)
{
struct scatterlist *current_sg = sg;
- int sg_index, cpy_index;
+ int sg_index, cpy_index, offset;
sg_index = current_sg->length;
while (sg_index <= to_skip) {
@@ -148,9 +163,10 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
sg_index += current_sg->length;
}
cpy_index = sg_index - to_skip;
- memcpy(dest, (u8 *) sg_virt(current_sg) +
- current_sg->length - cpy_index, cpy_index);
- current_sg = scatterwalk_sg_next(current_sg);
- if (end - sg_index)
+ offset = current_sg->offset + current_sg->length - cpy_index;
+ sg_map_copy(dest, current_sg, cpy_index, offset);
+ if (end - sg_index) {
+ current_sg = scatterwalk_sg_next(current_sg);
sg_copy(dest + cpy_index, current_sg, end - sg_index);
+ }
}
diff --git a/drivers/crypto/dcp.c b/drivers/crypto/dcp.c
index a8a7dd4b0d25..247ab8048f5b 100644
--- a/drivers/crypto/dcp.c
+++ b/drivers/crypto/dcp.c
@@ -733,12 +733,9 @@ static int dcp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "failed to get IORESOURCE_MEM\n");
- return -ENXIO;
- }
- dev->dcp_regs_base = devm_ioremap(&pdev->dev, r->start,
- resource_size(r));
+ dev->dcp_regs_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(dev->dcp_regs_base))
+ return PTR_ERR(dev->dcp_regs_base);
dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL);
udelay(10);
@@ -762,7 +759,8 @@ static int dcp_probe(struct platform_device *pdev)
return -EIO;
}
dev->dcp_vmi_irq = r->start;
- ret = request_irq(dev->dcp_vmi_irq, dcp_vmi_irq, 0, "dcp", dev);
+ ret = devm_request_irq(&pdev->dev, dev->dcp_vmi_irq, dcp_vmi_irq, 0,
+ "dcp", dev);
if (ret != 0) {
dev_err(&pdev->dev, "can't request_irq (0)\n");
return -EIO;
@@ -771,15 +769,14 @@ static int dcp_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
if (!r) {
dev_err(&pdev->dev, "can't get IRQ resource (1)\n");
- ret = -EIO;
- goto err_free_irq0;
+ return -EIO;
}
dev->dcp_irq = r->start;
- ret = request_irq(dev->dcp_irq, dcp_irq, 0, "dcp", dev);
+ ret = devm_request_irq(&pdev->dev, dev->dcp_irq, dcp_irq, 0, "dcp",
+ dev);
if (ret != 0) {
dev_err(&pdev->dev, "can't request_irq (1)\n");
- ret = -EIO;
- goto err_free_irq0;
+ return -EIO;
}
dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev,
@@ -788,8 +785,7 @@ static int dcp_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!dev->hw_pkg[0]) {
dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
- ret = -ENOMEM;
- goto err_free_irq1;
+ return -ENOMEM;
}
for (i = 1; i < DCP_MAX_PKG; i++) {
@@ -848,16 +844,14 @@ err_unregister:
for (j = 0; j < i; j++)
crypto_unregister_alg(&algs[j]);
err_free_key_iv:
+ tasklet_kill(&dev->done_task);
+ tasklet_kill(&dev->queue_task);
dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
dev->payload_base_dma);
err_free_hw_packet:
dma_free_coherent(&pdev->dev, DCP_MAX_PKG *
sizeof(struct dcp_hw_packet), dev->hw_pkg[0],
dev->hw_phys_pkg);
-err_free_irq1:
- free_irq(dev->dcp_irq, dev);
-err_free_irq0:
- free_irq(dev->dcp_vmi_irq, dev);
return ret;
}
@@ -868,23 +862,20 @@ static int dcp_remove(struct platform_device *pdev)
int j;
dev = platform_get_drvdata(pdev);
- dma_free_coherent(&pdev->dev,
- DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
- dev->hw_pkg[0], dev->hw_phys_pkg);
-
- dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
- dev->payload_base_dma);
+ misc_deregister(&dev->dcp_bootstream_misc);
- free_irq(dev->dcp_irq, dev);
- free_irq(dev->dcp_vmi_irq, dev);
+ for (j = 0; j < ARRAY_SIZE(algs); j++)
+ crypto_unregister_alg(&algs[j]);
tasklet_kill(&dev->done_task);
tasklet_kill(&dev->queue_task);
- for (j = 0; j < ARRAY_SIZE(algs); j++)
- crypto_unregister_alg(&algs[j]);
+ dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
+ dev->payload_base_dma);
- misc_deregister(&dev->dcp_bootstream_misc);
+ dma_free_coherent(&pdev->dev,
+ DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
+ dev->hw_pkg[0], dev->hw_phys_pkg);
return 0;
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 21180d6cad6e..9dd6e01eac33 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -218,23 +218,9 @@ static dma_addr_t crypt_phys;
static int support_aes = 1;
-static void dev_release(struct device *dev)
-{
- return;
-}
-
#define DRIVER_NAME "ixp4xx_crypto"
-static struct platform_device pseudo_dev = {
- .name = DRIVER_NAME,
- .id = 0,
- .num_resources = 0,
- .dev = {
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .release = dev_release,
- }
-};
-static struct device *dev = &pseudo_dev.dev;
+static struct platform_device *pdev;
static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
{
@@ -263,6 +249,7 @@ static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
static int setup_crypt_desc(void)
{
+ struct device *dev = &pdev->dev;
BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
crypt_virt = dma_alloc_coherent(dev,
NPE_QLEN * sizeof(struct crypt_ctl),
@@ -363,6 +350,7 @@ static void finish_scattered_hmac(struct crypt_ctl *crypt)
static void one_packet(dma_addr_t phys)
{
+ struct device *dev = &pdev->dev;
struct crypt_ctl *crypt;
struct ixp_ctx *ctx;
int failed;
@@ -432,7 +420,7 @@ static void crypto_done_action(unsigned long arg)
tasklet_schedule(&crypto_done_tasklet);
}
-static int init_ixp_crypto(void)
+static int init_ixp_crypto(struct device *dev)
{
int ret = -ENODEV;
u32 msg[2] = { 0, 0 };
@@ -519,7 +507,7 @@ err:
return ret;
}
-static void release_ixp_crypto(void)
+static void release_ixp_crypto(struct device *dev)
{
qmgr_disable_irq(RECV_QID);
tasklet_kill(&crypto_done_tasklet);
@@ -886,6 +874,7 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
struct buffer_desc src_hook;
+ struct device *dev = &pdev->dev;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
@@ -1010,6 +999,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
unsigned int cryptlen;
struct buffer_desc *buf, src_hook;
struct aead_ctx *req_ctx = aead_request_ctx(req);
+ struct device *dev = &pdev->dev;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
@@ -1159,32 +1149,24 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
- struct rtattr *rta = (struct rtattr *)key;
- struct crypto_authenc_key_param *param;
+ struct crypto_authenc_keys keys;
- if (!RTA_OK(rta, keylen))
- goto badkey;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
- param = RTA_DATA(rta);
- ctx->enckey_len = be32_to_cpu(param->enckeylen);
-
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
+ if (keys.authkeylen > sizeof(ctx->authkey))
+ goto badkey;
- if (keylen < ctx->enckey_len)
+ if (keys.enckeylen > sizeof(ctx->enckey))
goto badkey;
- ctx->authkey_len = keylen - ctx->enckey_len;
- memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len);
- memcpy(ctx->authkey, key, ctx->authkey_len);
+ memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
+ memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
+ ctx->authkey_len = keys.authkeylen;
+ ctx->enckey_len = keys.enckeylen;
return aead_setup(tfm, crypto_aead_authsize(tfm));
badkey:
- ctx->enckey_len = 0;
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1418,20 +1400,30 @@ static struct ixp_alg ixp4xx_algos[] = {
} };
#define IXP_POSTFIX "-ixp4xx"
+
+static const struct platform_device_info ixp_dev_info __initdata = {
+ .name = DRIVER_NAME,
+ .id = 0,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
static int __init ixp_module_init(void)
{
int num = ARRAY_SIZE(ixp4xx_algos);
- int i,err ;
+ int i, err ;
- if (platform_device_register(&pseudo_dev))
- return -ENODEV;
+ pdev = platform_device_register_full(&ixp_dev_info);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ dev = &pdev->dev;
spin_lock_init(&desc_lock);
spin_lock_init(&emerg_lock);
- err = init_ixp_crypto();
+ err = init_ixp_crypto(&pdev->dev);
if (err) {
- platform_device_unregister(&pseudo_dev);
+ platform_device_unregister(pdev);
return err;
}
for (i=0; i< num; i++) {
@@ -1495,8 +1487,8 @@ static void __exit ixp_module_exit(void)
if (ixp4xx_algos[i].registered)
crypto_unregister_alg(&ixp4xx_algos[i].crypto);
}
- release_ixp_crypto();
- platform_device_unregister(&pseudo_dev);
+ release_ixp_crypto(&pdev->dev);
+ platform_device_unregister(pdev);
}
module_init(ixp_module_init);
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 3374a3ebe4c7..8d1e6f8e9e9c 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -907,7 +907,7 @@ static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
}
-irqreturn_t crypto_int(int irq, void *priv)
+static irqreturn_t crypto_int(int irq, void *priv)
{
u32 val;
@@ -928,7 +928,7 @@ irqreturn_t crypto_int(int irq, void *priv)
return IRQ_HANDLED;
}
-struct crypto_alg mv_aes_alg_ecb = {
+static struct crypto_alg mv_aes_alg_ecb = {
.cra_name = "ecb(aes)",
.cra_driver_name = "mv-ecb-aes",
.cra_priority = 300,
@@ -951,7 +951,7 @@ struct crypto_alg mv_aes_alg_ecb = {
},
};
-struct crypto_alg mv_aes_alg_cbc = {
+static struct crypto_alg mv_aes_alg_cbc = {
.cra_name = "cbc(aes)",
.cra_driver_name = "mv-cbc-aes",
.cra_priority = 300,
@@ -975,7 +975,7 @@ struct crypto_alg mv_aes_alg_cbc = {
},
};
-struct ahash_alg mv_sha1_alg = {
+static struct ahash_alg mv_sha1_alg = {
.init = mv_hash_init,
.update = mv_hash_update,
.final = mv_hash_final,
@@ -999,7 +999,7 @@ struct ahash_alg mv_sha1_alg = {
}
};
-struct ahash_alg mv_hmac_sha1_alg = {
+static struct ahash_alg mv_hmac_sha1_alg = {
.init = mv_hash_init,
.update = mv_hash_update,
.final = mv_hash_final,
@@ -1084,7 +1084,7 @@ static int mv_probe(struct platform_device *pdev)
goto err_unmap_sram;
}
- ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
+ ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev),
cp);
if (ret)
goto err_thread;
@@ -1187,7 +1187,7 @@ static struct platform_driver marvell_crypto = {
.driver = {
.owner = THIS_MODULE,
.name = "mv_crypto",
- .of_match_table = of_match_ptr(mv_cesa_of_match_table),
+ .of_match_table = mv_cesa_of_match_table,
},
};
MODULE_ALIAS("platform:mv_crypto");
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index ce791c2f81f7..a9ccbf14096e 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -275,7 +275,7 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
if (dd->flags & FLAGS_CBC)
val |= AES_REG_CTRL_CBC;
if (dd->flags & FLAGS_CTR) {
- val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_32;
+ val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK;
}
if (dd->flags & FLAGS_ENCRYPT)
@@ -554,7 +554,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
return err;
}
-int omap_aes_check_aligned(struct scatterlist *sg)
+static int omap_aes_check_aligned(struct scatterlist *sg)
{
while (sg) {
if (!IS_ALIGNED(sg->offset, 4))
@@ -566,7 +566,7 @@ int omap_aes_check_aligned(struct scatterlist *sg)
return 0;
}
-int omap_aes_copy_sgs(struct omap_aes_dev *dd)
+static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
{
void *buf_in, *buf_out;
int pages;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 8bdde57f6bb1..e45aaaf0db30 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1818,7 +1818,7 @@ static int omap_sham_get_res_of(struct omap_sham_dev *dd,
goto err;
}
- dd->irq = of_irq_to_resource(node, 0, NULL);
+ dd->irq = irq_of_parse_and_map(node, 0);
if (!dd->irq) {
dev_err(dev, "can't translate OF irq value\n");
err = -EINVAL;
@@ -2033,3 +2033,4 @@ module_platform_driver(omap_sham_driver);
MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Dmitry Kasatkin");
+MODULE_ALIAS("platform:omap-sham");
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 888f7f4a6d3f..a6175ba6d238 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -495,45 +495,29 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
{
struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
- struct rtattr *rta = (void *)key;
- struct crypto_authenc_key_param *param;
- unsigned int authkeylen, enckeylen;
+ struct crypto_authenc_keys keys;
int err = -EINVAL;
- if (!RTA_OK(rta, keylen))
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ if (keys.enckeylen > AES_MAX_KEY_SIZE)
goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
- goto badkey;
-
- param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
-
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
-
- if (keylen < enckeylen)
- goto badkey;
-
- authkeylen = keylen - enckeylen;
-
- if (enckeylen > AES_MAX_KEY_SIZE)
+ if (keys.authkeylen > sizeof(ctx->hash_ctx))
goto badkey;
if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
SPA_CTRL_CIPH_ALG_AES)
- err = spacc_aead_aes_setkey(tfm, key + authkeylen, enckeylen);
+ err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen);
else
- err = spacc_aead_des_setkey(tfm, key + authkeylen, enckeylen);
+ err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen);
if (err)
goto badkey;
- memcpy(ctx->hash_ctx, key, authkeylen);
- ctx->hash_key_len = authkeylen;
+ memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
+ ctx->hash_key_len = keys.authkeylen;
return 0;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index d7bb8bac36e9..785a9ded7bdf 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1058,7 +1058,7 @@ static struct platform_driver sahara_driver = {
.driver = {
.name = SAHARA_NAME,
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(sahara_dt_ids),
+ .of_match_table = sahara_dt_ids,
},
.id_table = sahara_platform_ids,
};
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 661dc3eb1d66..905de4427e7c 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -32,6 +32,8 @@
#include <linux/interrupt.h>
#include <linux/crypto.h>
#include <linux/hw_random.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -671,39 +673,20 @@ static int aead_setkey(struct crypto_aead *authenc,
const u8 *key, unsigned int keylen)
{
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
- struct rtattr *rta = (void *)key;
- struct crypto_authenc_key_param *param;
- unsigned int authkeylen;
- unsigned int enckeylen;
-
- if (!RTA_OK(rta, keylen))
- goto badkey;
+ struct crypto_authenc_keys keys;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
+ if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
goto badkey;
- param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
-
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
-
- if (keylen < enckeylen)
- goto badkey;
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
- authkeylen = keylen - enckeylen;
-
- if (keylen > TALITOS_MAX_KEY_SIZE)
- goto badkey;
-
- memcpy(&ctx->key, key, keylen);
-
- ctx->keylen = keylen;
- ctx->enckeylen = enckeylen;
- ctx->authkeylen = authkeylen;
+ ctx->keylen = keys.authkeylen + keys.enckeylen;
+ ctx->enckeylen = keys.enckeylen;
+ ctx->authkeylen = keys.authkeylen;
return 0;
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c
index 2d58da972ae2..d8c7a132fea4 100644
--- a/drivers/crypto/tegra-aes.c
+++ b/drivers/crypto/tegra-aes.c
@@ -27,6 +27,8 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
@@ -199,8 +201,6 @@ static void aes_workqueue_handler(struct work_struct *work);
static DECLARE_WORK(aes_work, aes_workqueue_handler);
static struct workqueue_struct *aes_wq;
-extern unsigned long long tegra_chip_uid(void);
-
static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset)
{
return readl(dd->io_base + offset);
@@ -713,13 +713,12 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
struct tegra_aes_dev *dd = aes_dev;
struct tegra_aes_ctx *ctx = &rng_ctx;
struct tegra_aes_slot *key_slot;
- struct timespec ts;
int ret = 0;
- u64 nsec, tmp[2];
+ u8 tmp[16]; /* 16 bytes = 128 bits of entropy */
u8 *dt;
if (!ctx || !dd) {
- dev_err(dd->dev, "ctx=0x%x, dd=0x%x\n",
+ pr_err("ctx=0x%x, dd=0x%x\n",
(unsigned int)ctx, (unsigned int)dd);
return -EINVAL;
}
@@ -778,14 +777,8 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128;
} else {
- getnstimeofday(&ts);
- nsec = timespec_to_ns(&ts);
- do_div(nsec, 1000);
- nsec ^= dd->ctr << 56;
- dd->ctr++;
- tmp[0] = nsec;
- tmp[1] = tegra_chip_uid();
- dt = (u8 *)tmp;
+ get_random_bytes(tmp, sizeof(tmp));
+ dt = tmp;
}
memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ);
@@ -804,7 +797,7 @@ static int tegra_aes_cra_init(struct crypto_tfm *tfm)
return 0;
}
-void tegra_aes_cra_exit(struct crypto_tfm *tfm)
+static void tegra_aes_cra_exit(struct crypto_tfm *tfm)
{
struct tegra_aes_ctx *ctx =
crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm);
@@ -924,7 +917,7 @@ static int tegra_aes_probe(struct platform_device *pdev)
}
/* Initialize the vde clock */
- dd->aes_clk = clk_get(dev, "vde");
+ dd->aes_clk = devm_clk_get(dev, "vde");
if (IS_ERR(dd->aes_clk)) {
dev_err(dev, "iclock intialization failed.\n");
err = -ENODEV;
@@ -1033,8 +1026,6 @@ out:
if (dd->buf_out)
dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
dd->buf_out, dd->dma_buf_out);
- if (!IS_ERR(dd->aes_clk))
- clk_put(dd->aes_clk);
if (aes_wq)
destroy_workqueue(aes_wq);
spin_lock(&list_lock);
@@ -1068,7 +1059,6 @@ static int tegra_aes_remove(struct platform_device *pdev)
dd->buf_in, dd->dma_buf_in);
dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
dd->buf_out, dd->dma_buf_out);
- clk_put(dd->aes_clk);
aes_dev = NULL;
return 0;
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index c99c00d35d34..a0b2f7e0eedb 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -18,7 +18,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/stat.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/devfreq.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
@@ -902,13 +902,13 @@ static ssize_t available_frequencies_show(struct device *d,
{
struct devfreq *df = to_devfreq(d);
struct device *dev = df->dev.parent;
- struct opp *opp;
+ struct dev_pm_opp *opp;
ssize_t count = 0;
unsigned long freq = 0;
rcu_read_lock();
do {
- opp = opp_find_freq_ceil(dev, &freq);
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
if (IS_ERR(opp))
break;
@@ -993,10 +993,10 @@ static int __init devfreq_init(void)
}
devfreq_wq = create_freezable_workqueue("devfreq_wq");
- if (IS_ERR(devfreq_wq)) {
+ if (!devfreq_wq) {
class_destroy(devfreq_class);
pr_err("%s: couldn't create workqueue\n", __FILE__);
- return PTR_ERR(devfreq_wq);
+ return -ENOMEM;
}
devfreq_class->dev_groups = devfreq_groups;
@@ -1029,25 +1029,26 @@ module_exit(devfreq_exit);
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
-struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
- u32 flags)
+struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
+ unsigned long *freq,
+ u32 flags)
{
- struct opp *opp;
+ struct dev_pm_opp *opp;
if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
/* The freq is an upper bound. opp should be lower */
- opp = opp_find_freq_floor(dev, freq);
+ opp = dev_pm_opp_find_freq_floor(dev, freq);
/* If not available, use the closest opp */
if (opp == ERR_PTR(-ERANGE))
- opp = opp_find_freq_ceil(dev, freq);
+ opp = dev_pm_opp_find_freq_ceil(dev, freq);
} else {
/* The freq is an lower bound. opp should be higher */
- opp = opp_find_freq_ceil(dev, freq);
+ opp = dev_pm_opp_find_freq_ceil(dev, freq);
/* If not available, use the closest opp */
if (opp == ERR_PTR(-ERANGE))
- opp = opp_find_freq_floor(dev, freq);
+ opp = dev_pm_opp_find_freq_floor(dev, freq);
}
return opp;
@@ -1066,7 +1067,7 @@ int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
int ret = 0;
rcu_read_lock();
- nh = opp_get_notifier(dev);
+ nh = dev_pm_opp_get_notifier(dev);
if (IS_ERR(nh))
ret = PTR_ERR(nh);
rcu_read_unlock();
@@ -1092,7 +1093,7 @@ int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
int ret = 0;
rcu_read_lock();
- nh = opp_get_notifier(dev);
+ nh = dev_pm_opp_get_notifier(dev);
if (IS_ERR(nh))
ret = PTR_ERR(nh);
rcu_read_unlock();
diff --git a/drivers/devfreq/exynos/exynos4_bus.c b/drivers/devfreq/exynos/exynos4_bus.c
index c5f86d8caca3..cede6f71cd63 100644
--- a/drivers/devfreq/exynos/exynos4_bus.c
+++ b/drivers/devfreq/exynos/exynos4_bus.c
@@ -19,7 +19,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/suspend.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/devfreq.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
@@ -639,7 +639,7 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
struct platform_device *pdev = container_of(dev, struct platform_device,
dev);
struct busfreq_data *data = platform_get_drvdata(pdev);
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long freq;
unsigned long old_freq = data->curr_oppinfo.rate;
struct busfreq_opp_info new_oppinfo;
@@ -650,8 +650,8 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
rcu_read_unlock();
return PTR_ERR(opp);
}
- new_oppinfo.rate = opp_get_freq(opp);
- new_oppinfo.volt = opp_get_voltage(opp);
+ new_oppinfo.rate = dev_pm_opp_get_freq(opp);
+ new_oppinfo.volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
freq = new_oppinfo.rate;
@@ -873,7 +873,7 @@ static int exynos4210_init_tables(struct busfreq_data *data)
exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i];
for (i = LV_0; i < EX4210_LV_NUM; i++) {
- err = opp_add(data->dev, exynos4210_busclk_table[i].clk,
+ err = dev_pm_opp_add(data->dev, exynos4210_busclk_table[i].clk,
exynos4210_busclk_table[i].volt);
if (err) {
dev_err(data->dev, "Cannot add opp entries.\n");
@@ -940,7 +940,7 @@ static int exynos4x12_init_tables(struct busfreq_data *data)
}
for (i = 0; i < EX4x12_LV_NUM; i++) {
- ret = opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
+ ret = dev_pm_opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
exynos4x12_mifclk_table[i].volt);
if (ret) {
dev_err(data->dev, "Fail to add opp entries.\n");
@@ -956,7 +956,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
{
struct busfreq_data *data = container_of(this, struct busfreq_data,
pm_notifier);
- struct opp *opp;
+ struct dev_pm_opp *opp;
struct busfreq_opp_info new_oppinfo;
unsigned long maxfreq = ULONG_MAX;
int err = 0;
@@ -969,7 +969,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
data->disabled = true;
rcu_read_lock();
- opp = opp_find_freq_floor(data->dev, &maxfreq);
+ opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq);
if (IS_ERR(opp)) {
rcu_read_unlock();
dev_err(data->dev, "%s: unable to find a min freq\n",
@@ -977,8 +977,8 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
mutex_unlock(&data->lock);
return PTR_ERR(opp);
}
- new_oppinfo.rate = opp_get_freq(opp);
- new_oppinfo.volt = opp_get_voltage(opp);
+ new_oppinfo.rate = dev_pm_opp_get_freq(opp);
+ new_oppinfo.volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
err = exynos4_bus_setvolt(data, &new_oppinfo,
@@ -1020,7 +1020,7 @@ unlock:
static int exynos4_busfreq_probe(struct platform_device *pdev)
{
struct busfreq_data *data;
- struct opp *opp;
+ struct dev_pm_opp *opp;
struct device *dev = &pdev->dev;
int err = 0;
@@ -1065,15 +1065,16 @@ static int exynos4_busfreq_probe(struct platform_device *pdev)
}
rcu_read_lock();
- opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
+ opp = dev_pm_opp_find_freq_floor(dev,
+ &exynos4_devfreq_profile.initial_freq);
if (IS_ERR(opp)) {
rcu_read_unlock();
dev_err(dev, "Invalid initial frequency %lu kHz.\n",
exynos4_devfreq_profile.initial_freq);
return PTR_ERR(opp);
}
- data->curr_oppinfo.rate = opp_get_freq(opp);
- data->curr_oppinfo.volt = opp_get_voltage(opp);
+ data->curr_oppinfo.rate = dev_pm_opp_get_freq(opp);
+ data->curr_oppinfo.volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
platform_set_drvdata(pdev, data);
diff --git a/drivers/devfreq/exynos/exynos5_bus.c b/drivers/devfreq/exynos/exynos5_bus.c
index 574b16b59be5..a60da3c1c48e 100644
--- a/drivers/devfreq/exynos/exynos5_bus.c
+++ b/drivers/devfreq/exynos/exynos5_bus.c
@@ -15,10 +15,9 @@
#include <linux/module.h>
#include <linux/devfreq.h>
#include <linux/io.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/suspend.h>
-#include <linux/opp.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
@@ -132,7 +131,7 @@ static int exynos5_busfreq_int_target(struct device *dev, unsigned long *_freq,
struct platform_device *pdev = container_of(dev, struct platform_device,
dev);
struct busfreq_data_int *data = platform_get_drvdata(pdev);
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long old_freq, freq;
unsigned long volt;
@@ -144,8 +143,8 @@ static int exynos5_busfreq_int_target(struct device *dev, unsigned long *_freq,
return PTR_ERR(opp);
}
- freq = opp_get_freq(opp);
- volt = opp_get_voltage(opp);
+ freq = dev_pm_opp_get_freq(opp);
+ volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
old_freq = data->curr_freq;
@@ -246,7 +245,7 @@ static int exynos5250_init_int_tables(struct busfreq_data_int *data)
int i, err = 0;
for (i = LV_0; i < _LV_END; i++) {
- err = opp_add(data->dev, exynos5_int_opp_table[i].clk,
+ err = dev_pm_opp_add(data->dev, exynos5_int_opp_table[i].clk,
exynos5_int_opp_table[i].volt);
if (err) {
dev_err(data->dev, "Cannot add opp entries.\n");
@@ -262,7 +261,7 @@ static int exynos5_busfreq_int_pm_notifier_event(struct notifier_block *this,
{
struct busfreq_data_int *data = container_of(this,
struct busfreq_data_int, pm_notifier);
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long maxfreq = ULONG_MAX;
unsigned long freq;
unsigned long volt;
@@ -276,14 +275,14 @@ static int exynos5_busfreq_int_pm_notifier_event(struct notifier_block *this,
data->disabled = true;
rcu_read_lock();
- opp = opp_find_freq_floor(data->dev, &maxfreq);
+ opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq);
if (IS_ERR(opp)) {
rcu_read_unlock();
err = PTR_ERR(opp);
goto unlock;
}
- freq = opp_get_freq(opp);
- volt = opp_get_voltage(opp);
+ freq = dev_pm_opp_get_freq(opp);
+ volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
err = exynos5_int_setvolt(data, volt);
@@ -316,7 +315,7 @@ unlock:
static int exynos5_busfreq_int_probe(struct platform_device *pdev)
{
struct busfreq_data_int *data;
- struct opp *opp;
+ struct dev_pm_opp *opp;
struct device *dev = &pdev->dev;
struct device_node *np;
unsigned long initial_freq;
@@ -351,46 +350,43 @@ static int exynos5_busfreq_int_probe(struct platform_device *pdev)
err = exynos5250_init_int_tables(data);
if (err)
- goto err_regulator;
+ return err;
- data->vdd_int = regulator_get(dev, "vdd_int");
+ data->vdd_int = devm_regulator_get(dev, "vdd_int");
if (IS_ERR(data->vdd_int)) {
dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
- err = PTR_ERR(data->vdd_int);
- goto err_regulator;
+ return PTR_ERR(data->vdd_int);
}
- data->int_clk = clk_get(dev, "int_clk");
+ data->int_clk = devm_clk_get(dev, "int_clk");
if (IS_ERR(data->int_clk)) {
dev_err(dev, "Cannot get clock \"int_clk\"\n");
- err = PTR_ERR(data->int_clk);
- goto err_clock;
+ return PTR_ERR(data->int_clk);
}
rcu_read_lock();
- opp = opp_find_freq_floor(dev,
+ opp = dev_pm_opp_find_freq_floor(dev,
&exynos5_devfreq_int_profile.initial_freq);
if (IS_ERR(opp)) {
rcu_read_unlock();
dev_err(dev, "Invalid initial frequency %lu kHz.\n",
exynos5_devfreq_int_profile.initial_freq);
- err = PTR_ERR(opp);
- goto err_opp_add;
+ return PTR_ERR(opp);
}
- initial_freq = opp_get_freq(opp);
- initial_volt = opp_get_voltage(opp);
+ initial_freq = dev_pm_opp_get_freq(opp);
+ initial_volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
data->curr_freq = initial_freq;
err = clk_set_rate(data->int_clk, initial_freq * 1000);
if (err) {
dev_err(dev, "Failed to set initial frequency\n");
- goto err_opp_add;
+ return err;
}
err = exynos5_int_setvolt(data, initial_volt);
if (err)
- goto err_opp_add;
+ return err;
platform_set_drvdata(pdev, data);
@@ -419,12 +415,6 @@ static int exynos5_busfreq_int_probe(struct platform_device *pdev)
err_devfreq_add:
devfreq_remove_device(data->devfreq);
- platform_set_drvdata(pdev, NULL);
-err_opp_add:
- clk_put(data->int_clk);
-err_clock:
- regulator_put(data->vdd_int);
-err_regulator:
return err;
}
@@ -435,9 +425,6 @@ static int exynos5_busfreq_int_remove(struct platform_device *pdev)
pm_qos_remove_request(&data->int_req);
unregister_pm_notifier(&data->pm_notifier);
devfreq_remove_device(data->devfreq);
- regulator_put(data->vdd_int);
- clk_put(data->int_clk);
- platform_set_drvdata(pdev, NULL);
return 0;
}
@@ -479,7 +466,7 @@ static int __init exynos5_busfreq_int_init(void)
exynos5_devfreq_pdev =
platform_device_register_simple("exynos5-bus-int", -1, NULL, 0);
- if (IS_ERR_OR_NULL(exynos5_devfreq_pdev)) {
+ if (IS_ERR(exynos5_devfreq_pdev)) {
ret = PTR_ERR(exynos5_devfreq_pdev);
goto out1;
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index f238cfd33847..dd2874ec1927 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -154,6 +154,18 @@ config TEGRA20_APB_DMA
This DMA controller transfers data from memory to peripheral fifo
or vice versa. It does not support memory to memory data transfer.
+config S3C24XX_DMAC
+ tristate "Samsung S3C24XX DMA support"
+ depends on ARCH_S3C24XX && !S3C24XX_DMA
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support for the Samsung S3C24XX DMA controller driver. The
+ DMA controller is having multiple DMA channels which can be
+ configured for different peripherals like audio, UART, SPI.
+ The DMA controller can transfer data from memory to peripheral,
+ periphal to memory, periphal to periphal and memory to memory.
+
source "drivers/dma/sh/Kconfig"
config COH901318
@@ -195,7 +207,7 @@ config SIRF_DMA
config TI_EDMA
bool "TI EDMA support"
- depends on ARCH_DAVINCI || ARCH_OMAP
+ depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
select TI_PRIV_EDMA
@@ -301,7 +313,7 @@ config MMP_PDMA
depends on (ARCH_MMP || ARCH_PXA)
select DMA_ENGINE
help
- Support the MMP PDMA engine for PXA and MMP platfrom.
+ Support the MMP PDMA engine for PXA and MMP platform.
config DMA_JZ4740
tristate "JZ4740 DMA support"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index db89035b3626..0ce2da97e429 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
+obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index fce46c5bf1c7..8c56d7856cb2 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1252,7 +1252,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
size_t bytes = 0;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
/*
@@ -1267,7 +1267,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
spin_lock_irqsave(&plchan->vc.lock, flags);
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS) {
+ if (ret != DMA_COMPLETE) {
vd = vchan_find_desc(&plchan->vc, cookie);
if (vd) {
/* On the issued list, so hasn't been processed yet */
@@ -2055,6 +2055,11 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
if (ret)
return ret;
+ /* Ensure that we can do DMA */
+ ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto out_no_pl08x;
+
/* Create the driver state holder */
pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
if (!pl08x) {
@@ -2133,8 +2138,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
- ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
- DRIVER_NAME, pl08x);
+ ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x);
if (ret) {
dev_err(&adev->dev, "%s failed to request interrupt %d\n",
__func__, adev->irq[0]);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index c787f38a186a..1ef74579447d 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1102,7 +1102,7 @@ atc_tx_status(struct dma_chan *chan,
int bytes = 0;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
/*
* There's no point calculating the residue if there's
diff --git a/drivers/dma/bestcomm/sram.c b/drivers/dma/bestcomm/sram.c
index 5e2ed30ba2c4..2074e0e3fa21 100644
--- a/drivers/dma/bestcomm/sram.c
+++ b/drivers/dma/bestcomm/sram.c
@@ -19,6 +19,7 @@
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <asm/io.h>
#include <asm/mmu.h>
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 31011d2a26fc..3c6716e0b78e 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -2369,7 +2369,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
dma_set_residue(txstate, coh901318_get_bytes_left(chan));
@@ -2694,7 +2694,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED,
+ err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, 0,
"coh901318", base);
if (err)
return err;
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index 7c82b92f9b16..278b3058919a 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -353,7 +353,7 @@ static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
/* lock */
ret = dma_cookie_status(chan, cookie, txstate);
- if (txstate && ret == DMA_SUCCESS)
+ if (txstate && ret == DMA_COMPLETE)
txstate->residue = c->residue;
/* unlock */
@@ -674,14 +674,14 @@ static void cleanup_chans(struct cppi41_dd *cdd)
}
}
-static int cppi41_add_chans(struct platform_device *pdev, struct cppi41_dd *cdd)
+static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
{
struct cppi41_channel *cchan;
int i;
int ret;
u32 n_chans;
- ret = of_property_read_u32(pdev->dev.of_node, "#dma-channels",
+ ret = of_property_read_u32(dev->of_node, "#dma-channels",
&n_chans);
if (ret)
return ret;
@@ -719,7 +719,7 @@ err:
return -ENOMEM;
}
-static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
+static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
{
unsigned int mem_decs;
int i;
@@ -731,7 +731,7 @@ static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
- dma_free_coherent(&pdev->dev, mem_decs, cdd->cd,
+ dma_free_coherent(dev, mem_decs, cdd->cd,
cdd->descs_phys);
}
}
@@ -741,19 +741,19 @@ static void disable_sched(struct cppi41_dd *cdd)
cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
}
-static void deinit_cpii41(struct platform_device *pdev, struct cppi41_dd *cdd)
+static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd)
{
disable_sched(cdd);
- purge_descs(pdev, cdd);
+ purge_descs(dev, cdd);
cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
- dma_free_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
+ dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
cdd->scratch_phys);
}
-static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
+static int init_descs(struct device *dev, struct cppi41_dd *cdd)
{
unsigned int desc_size;
unsigned int mem_decs;
@@ -777,7 +777,7 @@ static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
reg |= ilog2(ALLOC_DECS_NUM) - 5;
BUILD_BUG_ON(DESCS_AREAS != 1);
- cdd->cd = dma_alloc_coherent(&pdev->dev, mem_decs,
+ cdd->cd = dma_alloc_coherent(dev, mem_decs,
&cdd->descs_phys, GFP_KERNEL);
if (!cdd->cd)
return -ENOMEM;
@@ -813,12 +813,12 @@ static void init_sched(struct cppi41_dd *cdd)
cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
}
-static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
+static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
{
int ret;
BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
- cdd->qmgr_scratch = dma_alloc_coherent(&pdev->dev, QMGR_SCRATCH_SIZE,
+ cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE,
&cdd->scratch_phys, GFP_KERNEL);
if (!cdd->qmgr_scratch)
return -ENOMEM;
@@ -827,7 +827,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
- ret = init_descs(pdev, cdd);
+ ret = init_descs(dev, cdd);
if (ret)
goto err_td;
@@ -835,7 +835,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
init_sched(cdd);
return 0;
err_td:
- deinit_cpii41(pdev, cdd);
+ deinit_cppi41(dev, cdd);
return ret;
}
@@ -914,11 +914,11 @@ static const struct of_device_id cppi41_dma_ids[] = {
};
MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
-static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev)
+static const struct cppi_glue_infos *get_glue_info(struct device *dev)
{
const struct of_device_id *of_id;
- of_id = of_match_node(cppi41_dma_ids, pdev->dev.of_node);
+ of_id = of_match_node(cppi41_dma_ids, dev->of_node);
if (!of_id)
return NULL;
return of_id->data;
@@ -927,11 +927,12 @@ static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev)
static int cppi41_dma_probe(struct platform_device *pdev)
{
struct cppi41_dd *cdd;
+ struct device *dev = &pdev->dev;
const struct cppi_glue_infos *glue_info;
int irq;
int ret;
- glue_info = get_glue_info(pdev);
+ glue_info = get_glue_info(dev);
if (!glue_info)
return -EINVAL;
@@ -946,14 +947,14 @@ static int cppi41_dma_probe(struct platform_device *pdev)
cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
cdd->ddev.device_control = cppi41_dma_control;
- cdd->ddev.dev = &pdev->dev;
+ cdd->ddev.dev = dev;
INIT_LIST_HEAD(&cdd->ddev.channels);
cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
- cdd->usbss_mem = of_iomap(pdev->dev.of_node, 0);
- cdd->ctrl_mem = of_iomap(pdev->dev.of_node, 1);
- cdd->sched_mem = of_iomap(pdev->dev.of_node, 2);
- cdd->qmgr_mem = of_iomap(pdev->dev.of_node, 3);
+ cdd->usbss_mem = of_iomap(dev->of_node, 0);
+ cdd->ctrl_mem = of_iomap(dev->of_node, 1);
+ cdd->sched_mem = of_iomap(dev->of_node, 2);
+ cdd->qmgr_mem = of_iomap(dev->of_node, 3);
if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
!cdd->qmgr_mem) {
@@ -961,8 +962,8 @@ static int cppi41_dma_probe(struct platform_device *pdev)
goto err_remap;
}
- pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
if (ret)
goto err_get_sync;
@@ -970,22 +971,22 @@ static int cppi41_dma_probe(struct platform_device *pdev)
cdd->queues_tx = glue_info->queues_tx;
cdd->td_queue = glue_info->td_queue;
- ret = init_cppi41(pdev, cdd);
+ ret = init_cppi41(dev, cdd);
if (ret)
goto err_init_cppi;
- ret = cppi41_add_chans(pdev, cdd);
+ ret = cppi41_add_chans(dev, cdd);
if (ret)
goto err_chans;
- irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ irq = irq_of_parse_and_map(dev->of_node, 0);
if (!irq)
goto err_irq;
cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
ret = request_irq(irq, glue_info->isr, IRQF_SHARED,
- dev_name(&pdev->dev), cdd);
+ dev_name(dev), cdd);
if (ret)
goto err_irq;
cdd->irq = irq;
@@ -994,7 +995,7 @@ static int cppi41_dma_probe(struct platform_device *pdev)
if (ret)
goto err_dma_reg;
- ret = of_dma_controller_register(pdev->dev.of_node,
+ ret = of_dma_controller_register(dev->of_node,
cppi41_dma_xlate, &cpp41_dma_info);
if (ret)
goto err_of;
@@ -1009,11 +1010,11 @@ err_irq:
cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
cleanup_chans(cdd);
err_chans:
- deinit_cpii41(pdev, cdd);
+ deinit_cppi41(dev, cdd);
err_init_cppi:
- pm_runtime_put(&pdev->dev);
+ pm_runtime_put(dev);
err_get_sync:
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
iounmap(cdd->usbss_mem);
iounmap(cdd->ctrl_mem);
iounmap(cdd->sched_mem);
@@ -1033,7 +1034,7 @@ static int cppi41_dma_remove(struct platform_device *pdev)
cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
free_irq(cdd->irq, cdd);
cleanup_chans(cdd);
- deinit_cpii41(pdev, cdd);
+ deinit_cppi41(&pdev->dev, cdd);
iounmap(cdd->usbss_mem);
iounmap(cdd->ctrl_mem);
iounmap(cdd->sched_mem);
@@ -1044,12 +1045,41 @@ static int cppi41_dma_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int cppi41_suspend(struct device *dev)
+{
+ struct cppi41_dd *cdd = dev_get_drvdata(dev);
+
+ cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
+ disable_sched(cdd);
+
+ return 0;
+}
+
+static int cppi41_resume(struct device *dev)
+{
+ struct cppi41_dd *cdd = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < DESCS_AREAS; i++)
+ cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
+
+ init_sched(cdd);
+ cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume);
+
static struct platform_driver cpp41_dma_driver = {
.probe = cppi41_dma_probe,
.remove = cppi41_dma_remove,
.driver = {
.name = "cppi41-dma-engine",
.owner = THIS_MODULE,
+ .pm = &cppi41_pm_ops,
.of_match_table = of_match_ptr(cppi41_dma_ids),
},
};
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index b0c0c8268d42..94c380f07538 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -491,7 +491,7 @@ static enum dma_status jz4740_dma_tx_status(struct dma_chan *c,
unsigned long flags;
status = dma_cookie_status(c, cookie, state);
- if (status == DMA_SUCCESS || !state)
+ if (status == DMA_COMPLETE || !state)
return status;
spin_lock_irqsave(&chan->vchan.lock, flags);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 9162ac80c18f..81d876528c70 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1062,7 +1062,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
if (!tx)
- return DMA_SUCCESS;
+ return DMA_COMPLETE;
while (tx->cookie == -EBUSY) {
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 92f796cdc6ab..59e287f56dfc 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -740,7 +740,7 @@ static int dmatest_func(void *data)
len, 0);
failed_tests++;
continue;
- } else if (status != DMA_SUCCESS) {
+ } else if (status != DMA_COMPLETE) {
enum dmatest_error_type type = (status == DMA_ERROR) ?
DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS;
thread_result_add(info, result, type,
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 89eb89f22284..2c29331571e4 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1098,13 +1098,13 @@ dwc_tx_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS)
+ if (ret != DMA_COMPLETE)
dma_set_residue(txstate, dwc_get_residue(dwc));
if (dwc->paused && ret == DMA_IN_PROGRESS)
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index e35d97590311..453822cc4f9d 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -191,11 +191,9 @@ static int dw_probe(struct platform_device *pdev)
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
- /* Apply default dma_mask if needed */
- if (!dev->dma_mask) {
- dev->dma_mask = &dev->coherent_dma_mask;
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
- }
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
pdata = dev_get_platdata(dev);
if (!pdata)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 3519111c566b..ea4abaa4f82e 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -46,8 +46,14 @@
#define EDMA_CHANS 64
#endif /* CONFIG_ARCH_DAVINCI_DA8XX */
-/* Max of 16 segments per channel to conserve PaRAM slots */
-#define MAX_NR_SG 16
+/*
+ * Max of 20 segments per channel to conserve PaRAM slots
+ * Also note that MAX_NR_SG should be atleast the no.of periods
+ * that are required for ASoC, otherwise DMA prep calls will
+ * fail. Today davinci-pcm is the only user of this driver and
+ * requires atleast 17 slots, so we setup the default to 20.
+ */
+#define MAX_NR_SG 20
#define EDMA_MAX_SLOTS MAX_NR_SG
#define EDMA_DESCRIPTORS 16
@@ -250,6 +256,117 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
return ret;
}
+/*
+ * A PaRAM set configuration abstraction used by other modes
+ * @chan: Channel who's PaRAM set we're configuring
+ * @pset: PaRAM set to initialize and setup.
+ * @src_addr: Source address of the DMA
+ * @dst_addr: Destination address of the DMA
+ * @burst: In units of dev_width, how much to send
+ * @dev_width: How much is the dev_width
+ * @dma_length: Total length of the DMA transfer
+ * @direction: Direction of the transfer
+ */
+static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
+ dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
+ enum dma_slave_buswidth dev_width, unsigned int dma_length,
+ enum dma_transfer_direction direction)
+{
+ struct edma_chan *echan = to_edma_chan(chan);
+ struct device *dev = chan->device->dev;
+ int acnt, bcnt, ccnt, cidx;
+ int src_bidx, dst_bidx, src_cidx, dst_cidx;
+ int absync;
+
+ acnt = dev_width;
+ /*
+ * If the maxburst is equal to the fifo width, use
+ * A-synced transfers. This allows for large contiguous
+ * buffer transfers using only one PaRAM set.
+ */
+ if (burst == 1) {
+ /*
+ * For the A-sync case, bcnt and ccnt are the remainder
+ * and quotient respectively of the division of:
+ * (dma_length / acnt) by (SZ_64K -1). This is so
+ * that in case bcnt over flows, we have ccnt to use.
+ * Note: In A-sync tranfer only, bcntrld is used, but it
+ * only applies for sg_dma_len(sg) >= SZ_64K.
+ * In this case, the best way adopted is- bccnt for the
+ * first frame will be the remainder below. Then for
+ * every successive frame, bcnt will be SZ_64K-1. This
+ * is assured as bcntrld = 0xffff in end of function.
+ */
+ absync = false;
+ ccnt = dma_length / acnt / (SZ_64K - 1);
+ bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
+ /*
+ * If bcnt is non-zero, we have a remainder and hence an
+ * extra frame to transfer, so increment ccnt.
+ */
+ if (bcnt)
+ ccnt++;
+ else
+ bcnt = SZ_64K - 1;
+ cidx = acnt;
+ } else {
+ /*
+ * If maxburst is greater than the fifo address_width,
+ * use AB-synced transfers where A count is the fifo
+ * address_width and B count is the maxburst. In this
+ * case, we are limited to transfers of C count frames
+ * of (address_width * maxburst) where C count is limited
+ * to SZ_64K-1. This places an upper bound on the length
+ * of an SG segment that can be handled.
+ */
+ absync = true;
+ bcnt = burst;
+ ccnt = dma_length / (acnt * bcnt);
+ if (ccnt > (SZ_64K - 1)) {
+ dev_err(dev, "Exceeded max SG segment size\n");
+ return -EINVAL;
+ }
+ cidx = acnt * bcnt;
+ }
+
+ if (direction == DMA_MEM_TO_DEV) {
+ src_bidx = acnt;
+ src_cidx = cidx;
+ dst_bidx = 0;
+ dst_cidx = 0;
+ } else if (direction == DMA_DEV_TO_MEM) {
+ src_bidx = 0;
+ src_cidx = 0;
+ dst_bidx = acnt;
+ dst_cidx = cidx;
+ } else {
+ dev_err(dev, "%s: direction not implemented yet\n", __func__);
+ return -EINVAL;
+ }
+
+ pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
+ /* Configure A or AB synchronized transfers */
+ if (absync)
+ pset->opt |= SYNCDIM;
+
+ pset->src = src_addr;
+ pset->dst = dst_addr;
+
+ pset->src_dst_bidx = (dst_bidx << 16) | src_bidx;
+ pset->src_dst_cidx = (dst_cidx << 16) | src_cidx;
+
+ pset->a_b_cnt = bcnt << 16 | acnt;
+ pset->ccnt = ccnt;
+ /*
+ * Only time when (bcntrld) auto reload is required is for
+ * A-sync case, and in this case, a requirement of reload value
+ * of SZ_64K-1 only is assured. 'link' is initially set to NULL
+ * and then later will be populated by edma_execute.
+ */
+ pset->link_bcntrld = 0xffffffff;
+ return absync;
+}
+
static struct dma_async_tx_descriptor *edma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@@ -258,23 +375,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
struct edma_chan *echan = to_edma_chan(chan);
struct device *dev = chan->device->dev;
struct edma_desc *edesc;
- dma_addr_t dev_addr;
+ dma_addr_t src_addr = 0, dst_addr = 0;
enum dma_slave_buswidth dev_width;
u32 burst;
struct scatterlist *sg;
- int acnt, bcnt, ccnt, src, dst, cidx;
- int src_bidx, dst_bidx, src_cidx, dst_cidx;
- int i, nslots;
+ int i, nslots, ret;
if (unlikely(!echan || !sgl || !sg_len))
return NULL;
if (direction == DMA_DEV_TO_MEM) {
- dev_addr = echan->cfg.src_addr;
+ src_addr = echan->cfg.src_addr;
dev_width = echan->cfg.src_addr_width;
burst = echan->cfg.src_maxburst;
} else if (direction == DMA_MEM_TO_DEV) {
- dev_addr = echan->cfg.dst_addr;
+ dst_addr = echan->cfg.dst_addr;
dev_width = echan->cfg.dst_addr_width;
burst = echan->cfg.dst_maxburst;
} else {
@@ -305,6 +420,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
edma_alloc_slot(EDMA_CTLR(echan->ch_num),
EDMA_SLOT_ANY);
if (echan->slot[i] < 0) {
+ kfree(edesc);
dev_err(dev, "Failed to allocate slot\n");
kfree(edesc);
return NULL;
@@ -314,63 +430,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
/* Configure PaRAM sets for each SG */
for_each_sg(sgl, sg, sg_len, i) {
-
- acnt = dev_width;
-
- /*
- * If the maxburst is equal to the fifo width, use
- * A-synced transfers. This allows for large contiguous
- * buffer transfers using only one PaRAM set.
- */
- if (burst == 1) {
- edesc->absync = false;
- ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
- bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
- if (bcnt)
- ccnt++;
- else
- bcnt = SZ_64K - 1;
- cidx = acnt;
- /*
- * If maxburst is greater than the fifo address_width,
- * use AB-synced transfers where A count is the fifo
- * address_width and B count is the maxburst. In this
- * case, we are limited to transfers of C count frames
- * of (address_width * maxburst) where C count is limited
- * to SZ_64K-1. This places an upper bound on the length
- * of an SG segment that can be handled.
- */
- } else {
- edesc->absync = true;
- bcnt = burst;
- ccnt = sg_dma_len(sg) / (acnt * bcnt);
- if (ccnt > (SZ_64K - 1)) {
- dev_err(dev, "Exceeded max SG segment size\n");
- return NULL;
- }
- cidx = acnt * bcnt;
- }
-
- if (direction == DMA_MEM_TO_DEV) {
- src = sg_dma_address(sg);
- dst = dev_addr;
- src_bidx = acnt;
- src_cidx = cidx;
- dst_bidx = 0;
- dst_cidx = 0;
- } else {
- src = dev_addr;
- dst = sg_dma_address(sg);
- src_bidx = 0;
- src_cidx = 0;
- dst_bidx = acnt;
- dst_cidx = cidx;
+ /* Get address for each SG */
+ if (direction == DMA_DEV_TO_MEM)
+ dst_addr = sg_dma_address(sg);
+ else
+ src_addr = sg_dma_address(sg);
+
+ ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
+ dst_addr, burst, dev_width,
+ sg_dma_len(sg), direction);
+ if (ret < 0) {
+ kfree(edesc);
+ return NULL;
}
- edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
- /* Configure A or AB synchronized transfers */
- if (edesc->absync)
- edesc->pset[i].opt |= SYNCDIM;
+ edesc->absync = ret;
/* If this is the last in a current SG set of transactions,
enable interrupts so that next set is processed */
@@ -380,17 +454,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
/* If this is the last set, enable completion interrupt flag */
if (i == sg_len - 1)
edesc->pset[i].opt |= TCINTEN;
-
- edesc->pset[i].src = src;
- edesc->pset[i].dst = dst;
-
- edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx;
- edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx;
-
- edesc->pset[i].a_b_cnt = bcnt << 16 | acnt;
- edesc->pset[i].ccnt = ccnt;
- edesc->pset[i].link_bcntrld = 0xffffffff;
-
}
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
@@ -408,7 +471,7 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
edma_pause(echan->ch_num);
switch (ch_status) {
- case DMA_COMPLETE:
+ case EDMA_DMA_COMPLETE:
spin_lock_irqsave(&echan->vchan.lock, flags);
edesc = echan->edesc;
@@ -427,7 +490,7 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
spin_unlock_irqrestore(&echan->vchan.lock, flags);
break;
- case DMA_CC_ERROR:
+ case EDMA_DMA_CC_ERROR:
spin_lock_irqsave(&echan->vchan.lock, flags);
edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);
@@ -577,7 +640,7 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
unsigned long flags;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS || !txstate)
+ if (ret == DMA_COMPLETE || !txstate)
return ret;
spin_lock_irqsave(&echan->vchan.lock, flags);
@@ -632,6 +695,10 @@ static int edma_probe(struct platform_device *pdev)
struct edma_cc *ecc;
int ret;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
if (!ecc) {
dev_err(&pdev->dev, "Can't allocate controller\n");
@@ -703,11 +770,13 @@ static struct platform_device *pdev0, *pdev1;
static const struct platform_device_info edma_dev_info0 = {
.name = "edma-dma-engine",
.id = 0,
+ .dma_mask = DMA_BIT_MASK(32),
};
static const struct platform_device_info edma_dev_info1 = {
.name = "edma-dma-engine",
.id = 1,
+ .dma_mask = DMA_BIT_MASK(32),
};
static int edma_init(void)
@@ -721,8 +790,6 @@ static int edma_init(void)
ret = PTR_ERR(pdev0);
goto out;
}
- pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask;
- pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32);
}
if (EDMA_CTLRS == 2) {
@@ -732,8 +799,6 @@ static int edma_init(void)
platform_device_unregister(pdev0);
ret = PTR_ERR(pdev1);
}
- pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask;
- pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32);
}
out:
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index b3f3e90054f2..61517dd0d0b7 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -33,6 +33,8 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include "dmaengine.h"
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 55852c026791..6f9ac2022abd 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -572,9 +572,11 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
- dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x "
- "dma_length=%d\n", __func__, imxdmac->channel,
- d->dest, d->src, d->len);
+ dev_dbg(imxdma->dev,
+ "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n",
+ __func__, imxdmac->channel,
+ (unsigned long long)d->dest,
+ (unsigned long long)d->src, d->len);
break;
/* Cyclic transfer is the same as slave_sg with special sg configuration. */
@@ -586,20 +588,22 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
DMA_CCR(imxdmac->channel));
- dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
- "total length=%d dev_addr=0x%08x (dev2mem)\n",
- __func__, imxdmac->channel, d->sg, d->sgcount,
- d->len, imxdmac->per_address);
+ dev_dbg(imxdma->dev,
+ "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n",
+ __func__, imxdmac->channel,
+ d->sg, d->sgcount, d->len,
+ (unsigned long long)imxdmac->per_address);
} else if (d->direction == DMA_MEM_TO_DEV) {
imx_dmav1_writel(imxdma, imxdmac->per_address,
DMA_DAR(imxdmac->channel));
imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
DMA_CCR(imxdmac->channel));
- dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
- "total length=%d dev_addr=0x%08x (mem2dev)\n",
- __func__, imxdmac->channel, d->sg, d->sgcount,
- d->len, imxdmac->per_address);
+ dev_dbg(imxdma->dev,
+ "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n",
+ __func__, imxdmac->channel,
+ d->sg, d->sgcount, d->len,
+ (unsigned long long)imxdmac->per_address);
} else {
dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
__func__, imxdmac->channel);
@@ -771,7 +775,7 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan)
desc->desc.tx_submit = imxdma_tx_submit;
/* txd.flags will be overwritten in prep funcs */
desc->desc.flags = DMA_CTRL_ACK;
- desc->status = DMA_SUCCESS;
+ desc->status = DMA_COMPLETE;
list_add_tail(&desc->node, &imxdmac->ld_free);
imxdmac->descs_allocated++;
@@ -870,7 +874,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
int i;
unsigned int periods = buf_len / period_len;
- dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
+ dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n",
__func__, imxdmac->channel, buf_len, period_len);
if (list_empty(&imxdmac->ld_free) ||
@@ -926,8 +930,9 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
struct imxdma_engine *imxdma = imxdmac->imxdma;
struct imxdma_desc *desc;
- dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
- __func__, imxdmac->channel, src, dest, len);
+ dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n",
+ __func__, imxdmac->channel, (unsigned long long)src,
+ (unsigned long long)dest, len);
if (list_empty(&imxdmac->ld_free) ||
imxdma_chan_is_doing_cyclic(imxdmac))
@@ -956,9 +961,10 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
struct imxdma_engine *imxdma = imxdmac->imxdma;
struct imxdma_desc *desc;
- dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n"
- " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__,
- imxdmac->channel, xt->src_start, xt->dst_start,
+ dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n"
+ " src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__,
+ imxdmac->channel, (unsigned long long)xt->src_start,
+ (unsigned long long) xt->dst_start,
xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
xt->numf, xt->frame_size);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index fc43603cf0bb..e43c040dfe0b 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -638,7 +638,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
if (error)
sdmac->status = DMA_ERROR;
else
- sdmac->status = DMA_SUCCESS;
+ sdmac->status = DMA_COMPLETE;
dma_cookie_complete(&sdmac->desc);
if (sdmac->desc.callback)
@@ -1432,6 +1432,10 @@ static int __init sdma_probe(struct platform_device *pdev)
return -EINVAL;
}
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
if (!sdma)
return -ENOMEM;
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index a975ebebea8a..1aab8130efa1 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -309,7 +309,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
callback_txd(param_txd);
}
if (midc->raw_tfr) {
- desc->status = DMA_SUCCESS;
+ desc->status = DMA_COMPLETE;
if (desc->lli != NULL) {
pci_pool_free(desc->lli_pool, desc->lli,
desc->lli_phys);
@@ -481,7 +481,7 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS) {
+ if (ret != DMA_COMPLETE) {
spin_lock_bh(&midc->lock);
midc_scan_descriptors(to_middma_device(chan->device), midc);
spin_unlock_bh(&midc->lock);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 5ff6fc1819dc..a0f0fce5a84e 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -733,7 +733,7 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(c, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
device->cleanup_fn((unsigned long) c);
@@ -859,7 +859,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
if (tmo == 0 ||
dma->device_tx_status(dma_chan, cookie, NULL)
- != DMA_SUCCESS) {
+ != DMA_COMPLETE) {
dev_err(dev, "Self-test copy timed out, disabling\n");
err = -ENODEV;
goto unmap_dma;
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index d8ececaf1b57..806b4ce5e38c 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -807,7 +807,7 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(c, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
ioat3_cleanup(ioat);
@@ -1468,7 +1468,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dev, "Self-test xor timed out\n");
err = -ENODEV;
goto dma_unmap;
@@ -1530,7 +1530,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dev, "Self-test validate timed out\n");
err = -ENODEV;
goto dma_unmap;
@@ -1577,7 +1577,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dev, "Self-test 2nd validate timed out\n");
err = -ENODEV;
goto dma_unmap;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index dd8b44a56e5d..408fe6be15f4 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -864,7 +864,7 @@ static enum dma_status iop_adma_status(struct dma_chan *chan,
int ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
iop_adma_slot_cleanup(iop_chan);
@@ -983,7 +983,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
msleep(1);
if (iop_adma_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test copy timed out, disabling\n");
err = -ENODEV;
@@ -1083,7 +1083,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test xor timed out, disabling\n");
err = -ENODEV;
@@ -1129,7 +1129,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+ if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test zero sum timed out, disabling\n");
err = -ENODEV;
@@ -1158,7 +1158,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+ if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test non-zero sum timed out, disabling\n");
err = -ENODEV;
@@ -1254,7 +1254,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
dev_err(dev, "Self-test pq timed out, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1291,7 +1291,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1323,7 +1323,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
err = -ENODEV;
goto free_resources;
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index a2c330f5f952..e26075408e9b 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -344,7 +344,7 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
size_t bytes = 0;
ret = dma_cookie_status(&c->vc.chan, cookie, state);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
spin_lock_irqsave(&c->vc.lock, flags);
@@ -693,7 +693,7 @@ static int k3_dma_probe(struct platform_device *op)
irq = platform_get_irq(op, 0);
ret = devm_request_irq(&op->dev, irq,
- k3_dma_int_handler, IRQF_DISABLED, DRIVER_NAME, d);
+ k3_dma_int_handler, 0, DRIVER_NAME, d);
if (ret)
return ret;
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index ff8d7827f8cb..dcb1e05149a7 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -798,8 +798,7 @@ static void dma_do_tasklet(unsigned long data)
* move the descriptors to a temporary list so we can drop
* the lock during the entire cleanup operation
*/
- list_del(&desc->node);
- list_add(&desc->node, &chain_cleanup);
+ list_move(&desc->node, &chain_cleanup);
/*
* Look for the first list entry which has the ENDIRQEN flag
@@ -863,7 +862,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
if (irq) {
ret = devm_request_irq(pdev->dev, irq,
- mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy);
+ mmp_pdma_chan_handler, 0, "pdma", phy);
if (ret) {
dev_err(pdev->dev, "channel request irq fail!\n");
return ret;
@@ -970,7 +969,7 @@ static int mmp_pdma_probe(struct platform_device *op)
/* all chan share one irq, demux inside */
irq = platform_get_irq(op, 0);
ret = devm_request_irq(pdev->dev, irq,
- mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev);
+ mmp_pdma_int_handler, 0, "pdma", pdev);
if (ret)
return ret;
}
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 38cb517fb2eb..2b4026d1f31d 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -62,6 +62,11 @@
#define TDCR_BURSTSZ_16B (0x3 << 6)
#define TDCR_BURSTSZ_32B (0x6 << 6)
#define TDCR_BURSTSZ_64B (0x7 << 6)
+#define TDCR_BURSTSZ_SQU_1B (0x5 << 6)
+#define TDCR_BURSTSZ_SQU_2B (0x6 << 6)
+#define TDCR_BURSTSZ_SQU_4B (0x0 << 6)
+#define TDCR_BURSTSZ_SQU_8B (0x1 << 6)
+#define TDCR_BURSTSZ_SQU_16B (0x3 << 6)
#define TDCR_BURSTSZ_SQU_32B (0x7 << 6)
#define TDCR_BURSTSZ_128B (0x5 << 6)
#define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */
@@ -158,7 +163,7 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac)
/* disable irq */
writel(0, tdmac->reg_base + TDIMR);
- tdmac->status = DMA_SUCCESS;
+ tdmac->status = DMA_COMPLETE;
}
static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac)
@@ -228,8 +233,31 @@ static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
return -EINVAL;
}
} else if (tdmac->type == PXA910_SQU) {
- tdcr |= TDCR_BURSTSZ_SQU_32B;
tdcr |= TDCR_SSPMOD;
+
+ switch (tdmac->burst_sz) {
+ case 1:
+ tdcr |= TDCR_BURSTSZ_SQU_1B;
+ break;
+ case 2:
+ tdcr |= TDCR_BURSTSZ_SQU_2B;
+ break;
+ case 4:
+ tdcr |= TDCR_BURSTSZ_SQU_4B;
+ break;
+ case 8:
+ tdcr |= TDCR_BURSTSZ_SQU_8B;
+ break;
+ case 16:
+ tdcr |= TDCR_BURSTSZ_SQU_16B;
+ break;
+ case 32:
+ tdcr |= TDCR_BURSTSZ_SQU_32B;
+ break;
+ default:
+ dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n");
+ return -EINVAL;
+ }
}
writel(tdcr, tdmac->reg_base + TDCR);
@@ -324,7 +352,7 @@ static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan)
if (tdmac->irq) {
ret = devm_request_irq(tdmac->dev, tdmac->irq,
- mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac);
+ mmp_tdma_chan_handler, 0, "tdma", tdmac);
if (ret)
return ret;
}
@@ -370,7 +398,7 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
int num_periods = buf_len / period_len;
int i = 0, buf = 0;
- if (tdmac->status != DMA_SUCCESS)
+ if (tdmac->status != DMA_COMPLETE)
return NULL;
if (period_len > TDMA_MAX_XFER_BYTES) {
@@ -504,7 +532,7 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
tdmac->idx = idx;
tdmac->type = type;
tdmac->reg_base = (unsigned long)tdev->base + idx * 4;
- tdmac->status = DMA_SUCCESS;
+ tdmac->status = DMA_COMPLETE;
tdev->tdmac[tdmac->idx] = tdmac;
tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
@@ -559,7 +587,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
if (irq_num != chan_num) {
irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, irq,
- mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev);
+ mmp_tdma_int_handler, 0, "tdma", tdev);
if (ret)
return ret;
}
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 2fe435377333..448750da4402 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -39,7 +39,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/random.h>
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 536dcb8ba5fd..8d5bce9e867e 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -749,7 +749,7 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS) {
+ if (ret == DMA_COMPLETE) {
mv_xor_clean_completed_slots(mv_chan);
return ret;
}
@@ -874,7 +874,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
msleep(1);
if (mv_xor_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test copy timed out, disabling\n");
err = -ENODEV;
@@ -968,7 +968,7 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
msleep(8);
if (mv_xor_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test xor timed out, disabling\n");
err = -ENODEV;
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index ccd13df841db..7ab7cecc48a4 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -224,7 +224,7 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
{
- mxs_chan->status = DMA_SUCCESS;
+ mxs_chan->status = DMA_COMPLETE;
}
static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
@@ -312,12 +312,12 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
if (mxs_chan->flags & MXS_DMA_SG_LOOP)
mxs_chan->status = DMA_IN_PROGRESS;
else
- mxs_chan->status = DMA_SUCCESS;
+ mxs_chan->status = DMA_COMPLETE;
}
stat1 &= ~(1 << channel);
- if (mxs_chan->status == DMA_SUCCESS)
+ if (mxs_chan->status == DMA_COMPLETE)
dma_cookie_complete(&mxs_chan->desc);
/* schedule tasklet on this channel */
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index ec3fc4fd9160..2f66cf4e54fe 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -248,7 +248,7 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
unsigned long flags;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS || !txstate)
+ if (ret == DMA_COMPLETE || !txstate)
return ret;
spin_lock_irqsave(&c->vc.lock, flags);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index a562d24d20bf..4b2583c675a9 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2903,6 +2903,10 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pdat = dev_get_platdata(&adev->dev);
+ ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
/* Allocate a new DMAC and its Channels */
pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
if (!pdmac) {
@@ -2922,16 +2926,23 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
amba_set_drvdata(adev, pdmac);
- irq = adev->irq[0];
- ret = request_irq(irq, pl330_irq_handler, 0,
- dev_name(&adev->dev), pi);
- if (ret)
- return ret;
+ for (i = 0; i <= AMBA_NR_IRQS; i++) {
+ irq = adev->irq[i];
+ if (irq) {
+ ret = devm_request_irq(&adev->dev, irq,
+ pl330_irq_handler, 0,
+ dev_name(&adev->dev), pi);
+ if (ret)
+ return ret;
+ } else {
+ break;
+ }
+ }
pi->pcfg.periph_id = adev->periphid;
ret = pl330_add(pi);
if (ret)
- goto probe_err1;
+ return ret;
INIT_LIST_HEAD(&pdmac->desc_pool);
spin_lock_init(&pdmac->pool_lock);
@@ -3029,8 +3040,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
probe_err3:
- amba_set_drvdata(adev, NULL);
-
/* Idle the DMAC */
list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
chan.device_node) {
@@ -3044,8 +3053,6 @@ probe_err3:
}
probe_err2:
pl330_del(pi);
-probe_err1:
- free_irq(irq, pi);
return ret;
}
@@ -3055,7 +3062,6 @@ static int pl330_remove(struct amba_device *adev)
struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
struct dma_pl330_chan *pch, *_p;
struct pl330_info *pi;
- int irq;
if (!pdmac)
return 0;
@@ -3064,7 +3070,6 @@ static int pl330_remove(struct amba_device *adev)
of_dma_controller_free(adev->dev.of_node);
dma_async_device_unregister(&pdmac->ddma);
- amba_set_drvdata(adev, NULL);
/* Idle the DMAC */
list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
@@ -3082,9 +3087,6 @@ static int pl330_remove(struct amba_device *adev)
pl330_del(pi);
- irq = adev->irq[0];
- free_irq(irq, pi);
-
return 0;
}
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 370ff8265630..60e02ae38b04 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -3891,7 +3891,7 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
ppc440spe_chan = to_ppc440spe_adma_chan(chan);
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
ppc440spe_adma_slot_cleanup(ppc440spe_chan);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
new file mode 100644
index 000000000000..4cb127978636
--- /dev/null
+++ b/drivers/dma/s3c24xx-dma.c
@@ -0,0 +1,1350 @@
+/*
+ * S3C24XX DMA handling
+ *
+ * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on amba-pl08x.c
+ *
+ * Copyright (c) 2006 ARM Ltd.
+ * Copyright (c) 2010 ST-Ericsson SA
+ *
+ * Author: Peter Pearse <peter.pearse@arm.com>
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * The DMA controllers in S3C24XX SoCs have a varying number of DMA signals
+ * that can be routed to any of the 4 to 8 hardware-channels.
+ *
+ * Therefore on these DMA controllers the number of channels
+ * and the number of incoming DMA signals are two totally different things.
+ * It is usually not possible to theoretically handle all physical signals,
+ * so a multiplexing scheme with possible denial of use is necessary.
+ *
+ * Open items:
+ * - bursts
+ */
+
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_data/dma-s3c24xx.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define MAX_DMA_CHANNELS 8
+
+#define S3C24XX_DISRC 0x00
+#define S3C24XX_DISRCC 0x04
+#define S3C24XX_DISRCC_INC_INCREMENT 0
+#define S3C24XX_DISRCC_INC_FIXED BIT(0)
+#define S3C24XX_DISRCC_LOC_AHB 0
+#define S3C24XX_DISRCC_LOC_APB BIT(1)
+
+#define S3C24XX_DIDST 0x08
+#define S3C24XX_DIDSTC 0x0c
+#define S3C24XX_DIDSTC_INC_INCREMENT 0
+#define S3C24XX_DIDSTC_INC_FIXED BIT(0)
+#define S3C24XX_DIDSTC_LOC_AHB 0
+#define S3C24XX_DIDSTC_LOC_APB BIT(1)
+#define S3C24XX_DIDSTC_INT_TC0 0
+#define S3C24XX_DIDSTC_INT_RELOAD BIT(2)
+
+#define S3C24XX_DCON 0x10
+
+#define S3C24XX_DCON_TC_MASK 0xfffff
+#define S3C24XX_DCON_DSZ_BYTE (0 << 20)
+#define S3C24XX_DCON_DSZ_HALFWORD (1 << 20)
+#define S3C24XX_DCON_DSZ_WORD (2 << 20)
+#define S3C24XX_DCON_DSZ_MASK (3 << 20)
+#define S3C24XX_DCON_DSZ_SHIFT 20
+#define S3C24XX_DCON_AUTORELOAD 0
+#define S3C24XX_DCON_NORELOAD BIT(22)
+#define S3C24XX_DCON_HWTRIG BIT(23)
+#define S3C24XX_DCON_HWSRC_SHIFT 24
+#define S3C24XX_DCON_SERV_SINGLE 0
+#define S3C24XX_DCON_SERV_WHOLE BIT(27)
+#define S3C24XX_DCON_TSZ_UNIT 0
+#define S3C24XX_DCON_TSZ_BURST4 BIT(28)
+#define S3C24XX_DCON_INT BIT(29)
+#define S3C24XX_DCON_SYNC_PCLK 0
+#define S3C24XX_DCON_SYNC_HCLK BIT(30)
+#define S3C24XX_DCON_DEMAND 0
+#define S3C24XX_DCON_HANDSHAKE BIT(31)
+
+#define S3C24XX_DSTAT 0x14
+#define S3C24XX_DSTAT_STAT_BUSY BIT(20)
+#define S3C24XX_DSTAT_CURRTC_MASK 0xfffff
+
+#define S3C24XX_DMASKTRIG 0x20
+#define S3C24XX_DMASKTRIG_SWTRIG BIT(0)
+#define S3C24XX_DMASKTRIG_ON BIT(1)
+#define S3C24XX_DMASKTRIG_STOP BIT(2)
+
+#define S3C24XX_DMAREQSEL 0x24
+#define S3C24XX_DMAREQSEL_HW BIT(0)
+
+/*
+ * S3C2410, S3C2440 and S3C2442 SoCs cannot select any physical channel
+ * for a DMA source. Instead only specific channels are valid.
+ * All of these SoCs have 4 physical channels and the number of request
+ * source bits is 3. Additionally we also need 1 bit to mark the channel
+ * as valid.
+ * Therefore we separate the chansel element of the channel data into 4
+ * parts of 4 bits each, to hold the information if the channel is valid
+ * and the hw request source to use.
+ *
+ * Example:
+ * SDI is valid on channels 0, 2 and 3 - with varying hw request sources.
+ * For it the chansel field would look like
+ *
+ * ((BIT(3) | 1) << 3 * 4) | // channel 3, with request source 1
+ * ((BIT(3) | 2) << 2 * 4) | // channel 2, with request source 2
+ * ((BIT(3) | 2) << 0 * 4) // channel 0, with request source 2
+ */
+#define S3C24XX_CHANSEL_WIDTH 4
+#define S3C24XX_CHANSEL_VALID BIT(3)
+#define S3C24XX_CHANSEL_REQ_MASK 7
+
+/*
+ * struct soc_data - vendor-specific config parameters for individual SoCs
+ * @stride: spacing between the registers of each channel
+ * @has_reqsel: does the controller use the newer requestselection mechanism
+ * @has_clocks: are controllable dma-clocks present
+ */
+struct soc_data {
+ int stride;
+ bool has_reqsel;
+ bool has_clocks;
+};
+
+/*
+ * enum s3c24xx_dma_chan_state - holds the virtual channel states
+ * @S3C24XX_DMA_CHAN_IDLE: the channel is idle
+ * @S3C24XX_DMA_CHAN_RUNNING: the channel has allocated a physical transport
+ * channel and is running a transfer on it
+ * @S3C24XX_DMA_CHAN_WAITING: the channel is waiting for a physical transport
+ * channel to become available (only pertains to memcpy channels)
+ */
+enum s3c24xx_dma_chan_state {
+ S3C24XX_DMA_CHAN_IDLE,
+ S3C24XX_DMA_CHAN_RUNNING,
+ S3C24XX_DMA_CHAN_WAITING,
+};
+
+/*
+ * struct s3c24xx_sg - structure containing data per sg
+ * @src_addr: src address of sg
+ * @dst_addr: dst address of sg
+ * @len: transfer len in bytes
+ * @node: node for txd's dsg_list
+ */
+struct s3c24xx_sg {
+ dma_addr_t src_addr;
+ dma_addr_t dst_addr;
+ size_t len;
+ struct list_head node;
+};
+
+/*
+ * struct s3c24xx_txd - wrapper for struct dma_async_tx_descriptor
+ * @vd: virtual DMA descriptor
+ * @dsg_list: list of children sg's
+ * @at: sg currently being transfered
+ * @width: transfer width
+ * @disrcc: value for source control register
+ * @didstc: value for destination control register
+ * @dcon: base value for dcon register
+ */
+struct s3c24xx_txd {
+ struct virt_dma_desc vd;
+ struct list_head dsg_list;
+ struct list_head *at;
+ u8 width;
+ u32 disrcc;
+ u32 didstc;
+ u32 dcon;
+};
+
+struct s3c24xx_dma_chan;
+
+/*
+ * struct s3c24xx_dma_phy - holder for the physical channels
+ * @id: physical index to this channel
+ * @valid: does the channel have all required elements
+ * @base: virtual memory base (remapped) for the this channel
+ * @irq: interrupt for this channel
+ * @clk: clock for this channel
+ * @lock: a lock to use when altering an instance of this struct
+ * @serving: virtual channel currently being served by this physicalchannel
+ * @host: a pointer to the host (internal use)
+ */
+struct s3c24xx_dma_phy {
+ unsigned int id;
+ bool valid;
+ void __iomem *base;
+ unsigned int irq;
+ struct clk *clk;
+ spinlock_t lock;
+ struct s3c24xx_dma_chan *serving;
+ struct s3c24xx_dma_engine *host;
+};
+
+/*
+ * struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel
+ * @id: the id of the channel
+ * @name: name of the channel
+ * @vc: wrappped virtual channel
+ * @phy: the physical channel utilized by this channel, if there is one
+ * @runtime_addr: address for RX/TX according to the runtime config
+ * @at: active transaction on this channel
+ * @lock: a lock for this channel data
+ * @host: a pointer to the host (internal use)
+ * @state: whether the channel is idle, running etc
+ * @slave: whether this channel is a device (slave) or for memcpy
+ */
+struct s3c24xx_dma_chan {
+ int id;
+ const char *name;
+ struct virt_dma_chan vc;
+ struct s3c24xx_dma_phy *phy;
+ struct dma_slave_config cfg;
+ struct s3c24xx_txd *at;
+ struct s3c24xx_dma_engine *host;
+ enum s3c24xx_dma_chan_state state;
+ bool slave;
+};
+
+/*
+ * struct s3c24xx_dma_engine - the local state holder for the S3C24XX
+ * @pdev: the corresponding platform device
+ * @pdata: platform data passed in from the platform/machine
+ * @base: virtual memory base (remapped)
+ * @slave: slave engine for this instance
+ * @memcpy: memcpy engine for this instance
+ * @phy_chans: array of data for the physical channels
+ */
+struct s3c24xx_dma_engine {
+ struct platform_device *pdev;
+ const struct s3c24xx_dma_platdata *pdata;
+ struct soc_data *sdata;
+ void __iomem *base;
+ struct dma_device slave;
+ struct dma_device memcpy;
+ struct s3c24xx_dma_phy *phy_chans;
+};
+
+/*
+ * Physical channel handling
+ */
+
+/*
+ * Check whether a certain channel is busy or not.
+ */
+static int s3c24xx_dma_phy_busy(struct s3c24xx_dma_phy *phy)
+{
+ unsigned int val = readl(phy->base + S3C24XX_DSTAT);
+ return val & S3C24XX_DSTAT_STAT_BUSY;
+}
+
+static bool s3c24xx_dma_phy_valid(struct s3c24xx_dma_chan *s3cchan,
+ struct s3c24xx_dma_phy *phy)
+{
+ struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
+ struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
+ int phyvalid;
+
+ /* every phy is valid for memcopy channels */
+ if (!s3cchan->slave)
+ return true;
+
+ /* On newer variants all phys can be used for all virtual channels */
+ if (s3cdma->sdata->has_reqsel)
+ return true;
+
+ phyvalid = (cdata->chansel >> (phy->id * S3C24XX_CHANSEL_WIDTH));
+ return (phyvalid & S3C24XX_CHANSEL_VALID) ? true : false;
+}
+
+/*
+ * Allocate a physical channel for a virtual channel
+ *
+ * Try to locate a physical channel to be used for this transfer. If all
+ * are taken return NULL and the requester will have to cope by using
+ * some fallback PIO mode or retrying later.
+ */
+static
+struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan)
+{
+ struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
+ struct s3c24xx_dma_channel *cdata;
+ struct s3c24xx_dma_phy *phy = NULL;
+ unsigned long flags;
+ int i;
+ int ret;
+
+ if (s3cchan->slave)
+ cdata = &pdata->channels[s3cchan->id];
+
+ for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
+ phy = &s3cdma->phy_chans[i];
+
+ if (!phy->valid)
+ continue;
+
+ if (!s3c24xx_dma_phy_valid(s3cchan, phy))
+ continue;
+
+ spin_lock_irqsave(&phy->lock, flags);
+
+ if (!phy->serving) {
+ phy->serving = s3cchan;
+ spin_unlock_irqrestore(&phy->lock, flags);
+ break;
+ }
+
+ spin_unlock_irqrestore(&phy->lock, flags);
+ }
+
+ /* No physical channel available, cope with it */
+ if (i == s3cdma->pdata->num_phy_channels) {
+ dev_warn(&s3cdma->pdev->dev, "no phy channel available\n");
+ return NULL;
+ }
+
+ /* start the phy clock */
+ if (s3cdma->sdata->has_clocks) {
+ ret = clk_enable(phy->clk);
+ if (ret) {
+ dev_err(&s3cdma->pdev->dev, "could not enable clock for channel %d, err %d\n",
+ phy->id, ret);
+ phy->serving = NULL;
+ return NULL;
+ }
+ }
+
+ return phy;
+}
+
+/*
+ * Mark the physical channel as free.
+ *
+ * This drops the link between the physical and virtual channel.
+ */
+static inline void s3c24xx_dma_put_phy(struct s3c24xx_dma_phy *phy)
+{
+ struct s3c24xx_dma_engine *s3cdma = phy->host;
+
+ if (s3cdma->sdata->has_clocks)
+ clk_disable(phy->clk);
+
+ phy->serving = NULL;
+}
+
+/*
+ * Stops the channel by writing the stop bit.
+ * This should not be used for an on-going transfer, but as a method of
+ * shutting down a channel (eg, when it's no longer used) or terminating a
+ * transfer.
+ */
+static void s3c24xx_dma_terminate_phy(struct s3c24xx_dma_phy *phy)
+{
+ writel(S3C24XX_DMASKTRIG_STOP, phy->base + S3C24XX_DMASKTRIG);
+}
+
+/*
+ * Virtual channel handling
+ */
+
+static inline
+struct s3c24xx_dma_chan *to_s3c24xx_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct s3c24xx_dma_chan, vc.chan);
+}
+
+static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan)
+{
+ struct s3c24xx_dma_phy *phy = s3cchan->phy;
+ struct s3c24xx_txd *txd = s3cchan->at;
+ u32 tc = readl(phy->base + S3C24XX_DSTAT) & S3C24XX_DSTAT_CURRTC_MASK;
+
+ return tc * txd->width;
+}
+
+static int s3c24xx_dma_set_runtime_config(struct s3c24xx_dma_chan *s3cchan,
+ struct dma_slave_config *config)
+{
+ if (!s3cchan->slave)
+ return -EINVAL;
+
+ /* Reject definitely invalid configurations */
+ if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+ config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+ return -EINVAL;
+
+ s3cchan->cfg = *config;
+
+ return 0;
+}
+
+/*
+ * Transfer handling
+ */
+
+static inline
+struct s3c24xx_txd *to_s3c24xx_txd(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct s3c24xx_txd, vd.tx);
+}
+
+static struct s3c24xx_txd *s3c24xx_dma_get_txd(void)
+{
+ struct s3c24xx_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
+
+ if (txd) {
+ INIT_LIST_HEAD(&txd->dsg_list);
+ txd->dcon = S3C24XX_DCON_INT | S3C24XX_DCON_NORELOAD;
+ }
+
+ return txd;
+}
+
+static void s3c24xx_dma_free_txd(struct s3c24xx_txd *txd)
+{
+ struct s3c24xx_sg *dsg, *_dsg;
+
+ list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
+ list_del(&dsg->node);
+ kfree(dsg);
+ }
+
+ kfree(txd);
+}
+
+static void s3c24xx_dma_start_next_sg(struct s3c24xx_dma_chan *s3cchan,
+ struct s3c24xx_txd *txd)
+{
+ struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ struct s3c24xx_dma_phy *phy = s3cchan->phy;
+ const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
+ struct s3c24xx_sg *dsg = list_entry(txd->at, struct s3c24xx_sg, node);
+ u32 dcon = txd->dcon;
+ u32 val;
+
+ /* transfer-size and -count from len and width */
+ switch (txd->width) {
+ case 1:
+ dcon |= S3C24XX_DCON_DSZ_BYTE | dsg->len;
+ break;
+ case 2:
+ dcon |= S3C24XX_DCON_DSZ_HALFWORD | (dsg->len / 2);
+ break;
+ case 4:
+ dcon |= S3C24XX_DCON_DSZ_WORD | (dsg->len / 4);
+ break;
+ }
+
+ if (s3cchan->slave) {
+ struct s3c24xx_dma_channel *cdata =
+ &pdata->channels[s3cchan->id];
+
+ if (s3cdma->sdata->has_reqsel) {
+ writel_relaxed((cdata->chansel << 1) |
+ S3C24XX_DMAREQSEL_HW,
+ phy->base + S3C24XX_DMAREQSEL);
+ } else {
+ int csel = cdata->chansel >> (phy->id *
+ S3C24XX_CHANSEL_WIDTH);
+
+ csel &= S3C24XX_CHANSEL_REQ_MASK;
+ dcon |= csel << S3C24XX_DCON_HWSRC_SHIFT;
+ dcon |= S3C24XX_DCON_HWTRIG;
+ }
+ } else {
+ if (s3cdma->sdata->has_reqsel)
+ writel_relaxed(0, phy->base + S3C24XX_DMAREQSEL);
+ }
+
+ writel_relaxed(dsg->src_addr, phy->base + S3C24XX_DISRC);
+ writel_relaxed(txd->disrcc, phy->base + S3C24XX_DISRCC);
+ writel_relaxed(dsg->dst_addr, phy->base + S3C24XX_DIDST);
+ writel_relaxed(txd->didstc, phy->base + S3C24XX_DIDSTC);
+ writel_relaxed(dcon, phy->base + S3C24XX_DCON);
+
+ val = readl_relaxed(phy->base + S3C24XX_DMASKTRIG);
+ val &= ~S3C24XX_DMASKTRIG_STOP;
+ val |= S3C24XX_DMASKTRIG_ON;
+
+ /* trigger the dma operation for memcpy transfers */
+ if (!s3cchan->slave)
+ val |= S3C24XX_DMASKTRIG_SWTRIG;
+
+ writel(val, phy->base + S3C24XX_DMASKTRIG);
+}
+
+/*
+ * Set the initial DMA register values and start first sg.
+ */
+static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan)
+{
+ struct s3c24xx_dma_phy *phy = s3cchan->phy;
+ struct virt_dma_desc *vd = vchan_next_desc(&s3cchan->vc);
+ struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
+
+ list_del(&txd->vd.node);
+
+ s3cchan->at = txd;
+
+ /* Wait for channel inactive */
+ while (s3c24xx_dma_phy_busy(phy))
+ cpu_relax();
+
+ /* point to the first element of the sg list */
+ txd->at = txd->dsg_list.next;
+ s3c24xx_dma_start_next_sg(s3cchan, txd);
+}
+
+static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma,
+ struct s3c24xx_dma_chan *s3cchan)
+{
+ LIST_HEAD(head);
+
+ vchan_get_all_descriptors(&s3cchan->vc, &head);
+ vchan_dma_desc_free_list(&s3cchan->vc, &head);
+}
+
+/*
+ * Try to allocate a physical channel. When successful, assign it to
+ * this virtual channel, and initiate the next descriptor. The
+ * virtual channel lock must be held at this point.
+ */
+static void s3c24xx_dma_phy_alloc_and_start(struct s3c24xx_dma_chan *s3cchan)
+{
+ struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ struct s3c24xx_dma_phy *phy;
+
+ phy = s3c24xx_dma_get_phy(s3cchan);
+ if (!phy) {
+ dev_dbg(&s3cdma->pdev->dev, "no physical channel available for xfer on %s\n",
+ s3cchan->name);
+ s3cchan->state = S3C24XX_DMA_CHAN_WAITING;
+ return;
+ }
+
+ dev_dbg(&s3cdma->pdev->dev, "allocated physical channel %d for xfer on %s\n",
+ phy->id, s3cchan->name);
+
+ s3cchan->phy = phy;
+ s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
+
+ s3c24xx_dma_start_next_txd(s3cchan);
+}
+
+static void s3c24xx_dma_phy_reassign_start(struct s3c24xx_dma_phy *phy,
+ struct s3c24xx_dma_chan *s3cchan)
+{
+ struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+
+ dev_dbg(&s3cdma->pdev->dev, "reassigned physical channel %d for xfer on %s\n",
+ phy->id, s3cchan->name);
+
+ /*
+ * We do this without taking the lock; we're really only concerned
+ * about whether this pointer is NULL or not, and we're guaranteed
+ * that this will only be called when it _already_ is non-NULL.
+ */
+ phy->serving = s3cchan;
+ s3cchan->phy = phy;
+ s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
+ s3c24xx_dma_start_next_txd(s3cchan);
+}
+
+/*
+ * Free a physical DMA channel, potentially reallocating it to another
+ * virtual channel if we have any pending.
+ */
+static void s3c24xx_dma_phy_free(struct s3c24xx_dma_chan *s3cchan)
+{
+ struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ struct s3c24xx_dma_chan *p, *next;
+
+retry:
+ next = NULL;
+
+ /* Find a waiting virtual channel for the next transfer. */
+ list_for_each_entry(p, &s3cdma->memcpy.channels, vc.chan.device_node)
+ if (p->state == S3C24XX_DMA_CHAN_WAITING) {
+ next = p;
+ break;
+ }
+
+ if (!next) {
+ list_for_each_entry(p, &s3cdma->slave.channels,
+ vc.chan.device_node)
+ if (p->state == S3C24XX_DMA_CHAN_WAITING &&
+ s3c24xx_dma_phy_valid(p, s3cchan->phy)) {
+ next = p;
+ break;
+ }
+ }
+
+ /* Ensure that the physical channel is stopped */
+ s3c24xx_dma_terminate_phy(s3cchan->phy);
+
+ if (next) {
+ bool success;
+
+ /*
+ * Eww. We know this isn't going to deadlock
+ * but lockdep probably doesn't.
+ */
+ spin_lock(&next->vc.lock);
+ /* Re-check the state now that we have the lock */
+ success = next->state == S3C24XX_DMA_CHAN_WAITING;
+ if (success)
+ s3c24xx_dma_phy_reassign_start(s3cchan->phy, next);
+ spin_unlock(&next->vc.lock);
+
+ /* If the state changed, try to find another channel */
+ if (!success)
+ goto retry;
+ } else {
+ /* No more jobs, so free up the physical channel */
+ s3c24xx_dma_put_phy(s3cchan->phy);
+ }
+
+ s3cchan->phy = NULL;
+ s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
+}
+
+static void s3c24xx_dma_unmap_buffers(struct s3c24xx_txd *txd)
+{
+ struct device *dev = txd->vd.tx.chan->device->dev;
+ struct s3c24xx_sg *dsg;
+
+ if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_single(dev, dsg->src_addr, dsg->len,
+ DMA_TO_DEVICE);
+ else {
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_page(dev, dsg->src_addr, dsg->len,
+ DMA_TO_DEVICE);
+ }
+ }
+
+ if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_single(dev, dsg->dst_addr, dsg->len,
+ DMA_FROM_DEVICE);
+ else
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_page(dev, dsg->dst_addr, dsg->len,
+ DMA_FROM_DEVICE);
+ }
+}
+
+static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd)
+{
+ struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
+ struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan);
+
+ if (!s3cchan->slave)
+ s3c24xx_dma_unmap_buffers(txd);
+
+ s3c24xx_dma_free_txd(txd);
+}
+
+static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
+{
+ struct s3c24xx_dma_phy *phy = data;
+ struct s3c24xx_dma_chan *s3cchan = phy->serving;
+ struct s3c24xx_txd *txd;
+
+ dev_dbg(&phy->host->pdev->dev, "interrupt on channel %d\n", phy->id);
+
+ /*
+ * Interrupts happen to notify the completion of a transfer and the
+ * channel should have moved into its stop state already on its own.
+ * Therefore interrupts on channels not bound to a virtual channel
+ * should never happen. Nevertheless send a terminate command to the
+ * channel if the unlikely case happens.
+ */
+ if (unlikely(!s3cchan)) {
+ dev_err(&phy->host->pdev->dev, "interrupt on unused channel %d\n",
+ phy->id);
+
+ s3c24xx_dma_terminate_phy(phy);
+
+ return IRQ_HANDLED;
+ }
+
+ spin_lock(&s3cchan->vc.lock);
+ txd = s3cchan->at;
+ if (txd) {
+ /* when more sg's are in this txd, start the next one */
+ if (!list_is_last(txd->at, &txd->dsg_list)) {
+ txd->at = txd->at->next;
+ s3c24xx_dma_start_next_sg(s3cchan, txd);
+ } else {
+ s3cchan->at = NULL;
+ vchan_cookie_complete(&txd->vd);
+
+ /*
+ * And start the next descriptor (if any),
+ * otherwise free this channel.
+ */
+ if (vchan_next_desc(&s3cchan->vc))
+ s3c24xx_dma_start_next_txd(s3cchan);
+ else
+ s3c24xx_dma_phy_free(s3cchan);
+ }
+ }
+ spin_unlock(&s3cchan->vc.lock);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * The DMA ENGINE API
+ */
+
+static int s3c24xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+ struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&s3cchan->vc.lock, flags);
+
+ switch (cmd) {
+ case DMA_SLAVE_CONFIG:
+ ret = s3c24xx_dma_set_runtime_config(s3cchan,
+ (struct dma_slave_config *)arg);
+ break;
+ case DMA_TERMINATE_ALL:
+ if (!s3cchan->phy && !s3cchan->at) {
+ dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
+ s3cchan->id);
+ ret = -EINVAL;
+ break;
+ }
+
+ s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
+
+ /* Mark physical channel as free */
+ if (s3cchan->phy)
+ s3c24xx_dma_phy_free(s3cchan);
+
+ /* Dequeue current job */
+ if (s3cchan->at) {
+ s3c24xx_dma_desc_free(&s3cchan->at->vd);
+ s3cchan->at = NULL;
+ }
+
+ /* Dequeue jobs not yet fired as well */
+ s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
+ break;
+ default:
+ /* Unknown command */
+ ret = -ENXIO;
+ break;
+ }
+
+ spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+
+ return ret;
+}
+
+static int s3c24xx_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ return 0;
+}
+
+static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan)
+{
+ /* Ensure all queued descriptors are freed */
+ vchan_free_chan_resources(to_virt_chan(chan));
+}
+
+static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+ struct s3c24xx_txd *txd;
+ struct s3c24xx_sg *dsg;
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ enum dma_status ret;
+ size_t bytes = 0;
+
+ spin_lock_irqsave(&s3cchan->vc.lock, flags);
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_SUCCESS) {
+ spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+ return ret;
+ }
+
+ /*
+ * There's no point calculating the residue if there's
+ * no txstate to store the value.
+ */
+ if (!txstate) {
+ spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+ return ret;
+ }
+
+ vd = vchan_find_desc(&s3cchan->vc, cookie);
+ if (vd) {
+ /* On the issued list, so hasn't been processed yet */
+ txd = to_s3c24xx_txd(&vd->tx);
+
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ bytes += dsg->len;
+ } else {
+ /*
+ * Currently running, so sum over the pending sg's and
+ * the currently active one.
+ */
+ txd = s3cchan->at;
+
+ dsg = list_entry(txd->at, struct s3c24xx_sg, node);
+ list_for_each_entry_from(dsg, &txd->dsg_list, node)
+ bytes += dsg->len;
+
+ bytes += s3c24xx_dma_getbytes_chan(s3cchan);
+ }
+ spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+
+ /*
+ * This cookie not complete yet
+ * Get number of bytes left in the active transactions and queue
+ */
+ dma_set_residue(txstate, bytes);
+
+ /* Whether waiting or running, we're in progress */
+ return ret;
+}
+
+/*
+ * Initialize a descriptor to be used by memcpy submit
+ */
+static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+ struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ struct s3c24xx_txd *txd;
+ struct s3c24xx_sg *dsg;
+ int src_mod, dest_mod;
+
+ dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %d bytes from %s\n",
+ len, s3cchan->name);
+
+ if ((len & S3C24XX_DCON_TC_MASK) != len) {
+ dev_err(&s3cdma->pdev->dev, "memcpy size %d to large\n", len);
+ return NULL;
+ }
+
+ txd = s3c24xx_dma_get_txd();
+ if (!txd)
+ return NULL;
+
+ dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
+ if (!dsg) {
+ s3c24xx_dma_free_txd(txd);
+ return NULL;
+ }
+ list_add_tail(&dsg->node, &txd->dsg_list);
+
+ dsg->src_addr = src;
+ dsg->dst_addr = dest;
+ dsg->len = len;
+
+ /*
+ * Determine a suitable transfer width.
+ * The DMA controller cannot fetch/store information which is not
+ * naturally aligned on the bus, i.e., a 4 byte fetch must start at
+ * an address divisible by 4 - more generally addr % width must be 0.
+ */
+ src_mod = src % 4;
+ dest_mod = dest % 4;
+ switch (len % 4) {
+ case 0:
+ txd->width = (src_mod == 0 && dest_mod == 0) ? 4 : 1;
+ break;
+ case 2:
+ txd->width = ((src_mod == 2 || src_mod == 0) &&
+ (dest_mod == 2 || dest_mod == 0)) ? 2 : 1;
+ break;
+ default:
+ txd->width = 1;
+ break;
+ }
+
+ txd->disrcc = S3C24XX_DISRCC_LOC_AHB | S3C24XX_DISRCC_INC_INCREMENT;
+ txd->didstc = S3C24XX_DIDSTC_LOC_AHB | S3C24XX_DIDSTC_INC_INCREMENT;
+ txd->dcon |= S3C24XX_DCON_DEMAND | S3C24XX_DCON_SYNC_HCLK |
+ S3C24XX_DCON_SERV_WHOLE;
+
+ return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+ struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
+ struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
+ struct s3c24xx_txd *txd;
+ struct s3c24xx_sg *dsg;
+ struct scatterlist *sg;
+ dma_addr_t slave_addr;
+ u32 hwcfg = 0;
+ int tmp;
+
+ dev_dbg(&s3cdma->pdev->dev, "prepare transaction of %d bytes from %s\n",
+ sg_dma_len(sgl), s3cchan->name);
+
+ txd = s3c24xx_dma_get_txd();
+ if (!txd)
+ return NULL;
+
+ if (cdata->handshake)
+ txd->dcon |= S3C24XX_DCON_HANDSHAKE;
+
+ switch (cdata->bus) {
+ case S3C24XX_DMA_APB:
+ txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
+ hwcfg |= S3C24XX_DISRCC_LOC_APB;
+ break;
+ case S3C24XX_DMA_AHB:
+ txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
+ hwcfg |= S3C24XX_DISRCC_LOC_AHB;
+ break;
+ }
+
+ /*
+ * Always assume our peripheral desintation is a fixed
+ * address in memory.
+ */
+ hwcfg |= S3C24XX_DISRCC_INC_FIXED;
+
+ /*
+ * Individual dma operations are requested by the slave,
+ * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
+ */
+ txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
+
+ if (direction == DMA_MEM_TO_DEV) {
+ txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
+ S3C24XX_DISRCC_INC_INCREMENT;
+ txd->didstc = hwcfg;
+ slave_addr = s3cchan->cfg.dst_addr;
+ txd->width = s3cchan->cfg.dst_addr_width;
+ } else if (direction == DMA_DEV_TO_MEM) {
+ txd->disrcc = hwcfg;
+ txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
+ S3C24XX_DIDSTC_INC_INCREMENT;
+ slave_addr = s3cchan->cfg.src_addr;
+ txd->width = s3cchan->cfg.src_addr_width;
+ } else {
+ s3c24xx_dma_free_txd(txd);
+ dev_err(&s3cdma->pdev->dev,
+ "direction %d unsupported\n", direction);
+ return NULL;
+ }
+
+ for_each_sg(sgl, sg, sg_len, tmp) {
+ dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
+ if (!dsg) {
+ s3c24xx_dma_free_txd(txd);
+ return NULL;
+ }
+ list_add_tail(&dsg->node, &txd->dsg_list);
+
+ dsg->len = sg_dma_len(sg);
+ if (direction == DMA_MEM_TO_DEV) {
+ dsg->src_addr = sg_dma_address(sg);
+ dsg->dst_addr = slave_addr;
+ } else { /* DMA_DEV_TO_MEM */
+ dsg->src_addr = slave_addr;
+ dsg->dst_addr = sg_dma_address(sg);
+ }
+ break;
+ }
+
+ return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
+}
+
+/*
+ * Slave transactions callback to the slave device to allow
+ * synchronization of slave DMA signals with the DMAC enable
+ */
+static void s3c24xx_dma_issue_pending(struct dma_chan *chan)
+{
+ struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&s3cchan->vc.lock, flags);
+ if (vchan_issue_pending(&s3cchan->vc)) {
+ if (!s3cchan->phy && s3cchan->state != S3C24XX_DMA_CHAN_WAITING)
+ s3c24xx_dma_phy_alloc_and_start(s3cchan);
+ }
+ spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+}
+
+/*
+ * Bringup and teardown
+ */
+
+/*
+ * Initialise the DMAC memcpy/slave channels.
+ * Make a local wrapper to hold required data
+ */
+static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma,
+ struct dma_device *dmadev, unsigned int channels, bool slave)
+{
+ struct s3c24xx_dma_chan *chan;
+ int i;
+
+ INIT_LIST_HEAD(&dmadev->channels);
+
+ /*
+ * Register as many many memcpy as we have physical channels,
+ * we won't always be able to use all but the code will have
+ * to cope with that situation.
+ */
+ for (i = 0; i < channels; i++) {
+ chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan) {
+ dev_err(dmadev->dev,
+ "%s no memory for channel\n", __func__);
+ return -ENOMEM;
+ }
+
+ chan->id = i;
+ chan->host = s3cdma;
+ chan->state = S3C24XX_DMA_CHAN_IDLE;
+
+ if (slave) {
+ chan->slave = true;
+ chan->name = kasprintf(GFP_KERNEL, "slave%d", i);
+ if (!chan->name)
+ return -ENOMEM;
+ } else {
+ chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
+ if (!chan->name)
+ return -ENOMEM;
+ }
+ dev_dbg(dmadev->dev,
+ "initialize virtual channel \"%s\"\n",
+ chan->name);
+
+ chan->vc.desc_free = s3c24xx_dma_desc_free;
+ vchan_init(&chan->vc, dmadev);
+ }
+ dev_info(dmadev->dev, "initialized %d virtual %s channels\n",
+ i, slave ? "slave" : "memcpy");
+ return i;
+}
+
+static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev)
+{
+ struct s3c24xx_dma_chan *chan = NULL;
+ struct s3c24xx_dma_chan *next;
+
+ list_for_each_entry_safe(chan,
+ next, &dmadev->channels, vc.chan.device_node)
+ list_del(&chan->vc.chan.device_node);
+}
+
+/* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */
+static struct soc_data soc_s3c2410 = {
+ .stride = 0x40,
+ .has_reqsel = false,
+ .has_clocks = false,
+};
+
+/* s3c2412 and s3c2413 have a 0x40 stride and dmareqsel mechanism */
+static struct soc_data soc_s3c2412 = {
+ .stride = 0x40,
+ .has_reqsel = true,
+ .has_clocks = true,
+};
+
+/* s3c2443 and following have a 0x100 stride and dmareqsel mechanism */
+static struct soc_data soc_s3c2443 = {
+ .stride = 0x100,
+ .has_reqsel = true,
+ .has_clocks = true,
+};
+
+static struct platform_device_id s3c24xx_dma_driver_ids[] = {
+ {
+ .name = "s3c2410-dma",
+ .driver_data = (kernel_ulong_t)&soc_s3c2410,
+ }, {
+ .name = "s3c2412-dma",
+ .driver_data = (kernel_ulong_t)&soc_s3c2412,
+ }, {
+ .name = "s3c2443-dma",
+ .driver_data = (kernel_ulong_t)&soc_s3c2443,
+ },
+ { },
+};
+
+static struct soc_data *s3c24xx_dma_get_soc_data(struct platform_device *pdev)
+{
+ return (struct soc_data *)
+ platform_get_device_id(pdev)->driver_data;
+}
+
+static int s3c24xx_dma_probe(struct platform_device *pdev)
+{
+ const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
+ struct s3c24xx_dma_engine *s3cdma;
+ struct soc_data *sdata;
+ struct resource *res;
+ int ret;
+ int i;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform data missing\n");
+ return -ENODEV;
+ }
+
+ /* Basic sanity check */
+ if (pdata->num_phy_channels > MAX_DMA_CHANNELS) {
+ dev_err(&pdev->dev, "to many dma channels %d, max %d\n",
+ pdata->num_phy_channels, MAX_DMA_CHANNELS);
+ return -EINVAL;
+ }
+
+ sdata = s3c24xx_dma_get_soc_data(pdev);
+ if (!sdata)
+ return -EINVAL;
+
+ s3cdma = devm_kzalloc(&pdev->dev, sizeof(*s3cdma), GFP_KERNEL);
+ if (!s3cdma)
+ return -ENOMEM;
+
+ s3cdma->pdev = pdev;
+ s3cdma->pdata = pdata;
+ s3cdma->sdata = sdata;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ s3cdma->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(s3cdma->base))
+ return PTR_ERR(s3cdma->base);
+
+ s3cdma->phy_chans = devm_kzalloc(&pdev->dev,
+ sizeof(struct s3c24xx_dma_phy) *
+ pdata->num_phy_channels,
+ GFP_KERNEL);
+ if (!s3cdma->phy_chans)
+ return -ENOMEM;
+
+ /* aquire irqs and clocks for all physical channels */
+ for (i = 0; i < pdata->num_phy_channels; i++) {
+ struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
+ char clk_name[6];
+
+ phy->id = i;
+ phy->base = s3cdma->base + (i * sdata->stride);
+ phy->host = s3cdma;
+
+ phy->irq = platform_get_irq(pdev, i);
+ if (phy->irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq %d, err %d\n",
+ i, phy->irq);
+ continue;
+ }
+
+ ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq,
+ 0, pdev->name, phy);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to request irq for channel %d, error %d\n",
+ i, ret);
+ continue;
+ }
+
+ if (sdata->has_clocks) {
+ sprintf(clk_name, "dma.%d", i);
+ phy->clk = devm_clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(phy->clk) && sdata->has_clocks) {
+ dev_err(&pdev->dev, "unable to aquire clock for channel %d, error %lu",
+ i, PTR_ERR(phy->clk));
+ continue;
+ }
+
+ ret = clk_prepare(phy->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "clock for phy %d failed, error %d\n",
+ i, ret);
+ continue;
+ }
+ }
+
+ spin_lock_init(&phy->lock);
+ phy->valid = true;
+
+ dev_dbg(&pdev->dev, "physical channel %d is %s\n",
+ i, s3c24xx_dma_phy_busy(phy) ? "BUSY" : "FREE");
+ }
+
+ /* Initialize memcpy engine */
+ dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask);
+ dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask);
+ s3cdma->memcpy.dev = &pdev->dev;
+ s3cdma->memcpy.device_alloc_chan_resources =
+ s3c24xx_dma_alloc_chan_resources;
+ s3cdma->memcpy.device_free_chan_resources =
+ s3c24xx_dma_free_chan_resources;
+ s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
+ s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status;
+ s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
+ s3cdma->memcpy.device_control = s3c24xx_dma_control;
+
+ /* Initialize slave engine for SoC internal dedicated peripherals */
+ dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
+ dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask);
+ s3cdma->slave.dev = &pdev->dev;
+ s3cdma->slave.device_alloc_chan_resources =
+ s3c24xx_dma_alloc_chan_resources;
+ s3cdma->slave.device_free_chan_resources =
+ s3c24xx_dma_free_chan_resources;
+ s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status;
+ s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
+ s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
+ s3cdma->slave.device_control = s3c24xx_dma_control;
+
+ /* Register as many memcpy channels as there are physical channels */
+ ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,
+ pdata->num_phy_channels, false);
+ if (ret <= 0) {
+ dev_warn(&pdev->dev,
+ "%s failed to enumerate memcpy channels - %d\n",
+ __func__, ret);
+ goto err_memcpy;
+ }
+
+ /* Register slave channels */
+ ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->slave,
+ pdata->num_channels, true);
+ if (ret <= 0) {
+ dev_warn(&pdev->dev,
+ "%s failed to enumerate slave channels - %d\n",
+ __func__, ret);
+ goto err_slave;
+ }
+
+ ret = dma_async_device_register(&s3cdma->memcpy);
+ if (ret) {
+ dev_warn(&pdev->dev,
+ "%s failed to register memcpy as an async device - %d\n",
+ __func__, ret);
+ goto err_memcpy_reg;
+ }
+
+ ret = dma_async_device_register(&s3cdma->slave);
+ if (ret) {
+ dev_warn(&pdev->dev,
+ "%s failed to register slave as an async device - %d\n",
+ __func__, ret);
+ goto err_slave_reg;
+ }
+
+ platform_set_drvdata(pdev, s3cdma);
+ dev_info(&pdev->dev, "Loaded dma driver with %d physical channels\n",
+ pdata->num_phy_channels);
+
+ return 0;
+
+err_slave_reg:
+ dma_async_device_unregister(&s3cdma->memcpy);
+err_memcpy_reg:
+ s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
+err_slave:
+ s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
+err_memcpy:
+ if (sdata->has_clocks)
+ for (i = 0; i < pdata->num_phy_channels; i++) {
+ struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
+ if (phy->valid)
+ clk_unprepare(phy->clk);
+ }
+
+ return ret;
+}
+
+static int s3c24xx_dma_remove(struct platform_device *pdev)
+{
+ const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
+ struct s3c24xx_dma_engine *s3cdma = platform_get_drvdata(pdev);
+ struct soc_data *sdata = s3c24xx_dma_get_soc_data(pdev);
+ int i;
+
+ dma_async_device_unregister(&s3cdma->slave);
+ dma_async_device_unregister(&s3cdma->memcpy);
+
+ s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
+ s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
+
+ if (sdata->has_clocks)
+ for (i = 0; i < pdata->num_phy_channels; i++) {
+ struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
+ if (phy->valid)
+ clk_unprepare(phy->clk);
+ }
+
+ return 0;
+}
+
+static struct platform_driver s3c24xx_dma_driver = {
+ .driver = {
+ .name = "s3c24xx-dma",
+ .owner = THIS_MODULE,
+ },
+ .id_table = s3c24xx_dma_driver_ids,
+ .probe = s3c24xx_dma_probe,
+ .remove = s3c24xx_dma_remove,
+};
+
+module_platform_driver(s3c24xx_dma_driver);
+
+bool s3c24xx_dma_filter(struct dma_chan *chan, void *param)
+{
+ struct s3c24xx_dma_chan *s3cchan;
+
+ if (chan->device->dev->driver != &s3c24xx_dma_driver.driver)
+ return false;
+
+ s3cchan = to_s3c24xx_dma_chan(chan);
+
+ return s3cchan->id == (int)param;
+}
+EXPORT_SYMBOL(s3c24xx_dma_filter);
+
+MODULE_DESCRIPTION("S3C24XX DMA Driver");
+MODULE_AUTHOR("Heiko Stuebner");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 461a91ab70bb..ab26d46bbe15 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -436,7 +436,7 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(&c->vc.chan, cookie, state);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
if (!state)
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index d94ab592cc1b..2e7b394def80 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -724,7 +724,7 @@ static enum dma_status shdma_tx_status(struct dma_chan *chan,
* If we don't find cookie on the queue, it has been aborted and we have
* to report error
*/
- if (status != DMA_SUCCESS) {
+ if (status != DMA_COMPLETE) {
struct shdma_desc *sdesc;
status = DMA_ERROR;
list_for_each_entry(sdesc, &schan->ld_queue, node)
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 1069e8869f20..0d765c0e21ec 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -685,7 +685,7 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
static int sh_dmae_probe(struct platform_device *pdev)
{
const struct sh_dmae_pdata *pdata;
- unsigned long irqflags = IRQF_DISABLED,
+ unsigned long irqflags = 0,
chan_flag[SH_DMAE_MAX_CHANNELS] = {};
int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
@@ -838,7 +838,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
IORESOURCE_IRQ_SHAREABLE)
chan_flag[irq_cnt] = IRQF_SHARED;
else
- chan_flag[irq_cnt] = IRQF_DISABLED;
+ chan_flag[irq_cnt] = 0;
dev_dbg(&pdev->dev,
"Found IRQ %d for channel %d\n",
i, irq_cnt);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 82d2b97ad942..b8c031b7de4e 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/log2.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/err.h>
@@ -2626,7 +2627,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
}
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS)
+ if (ret != DMA_COMPLETE)
dma_set_residue(txstate, stedma40_residue(chan));
if (d40_is_paused(d40c))
@@ -2796,8 +2797,8 @@ static int d40_set_runtime_config(struct dma_chan *chan,
src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
- ((src_addr_width > 1) && (src_addr_width & 1)) ||
- ((dst_addr_width > 1) && (dst_addr_width & 1)))
+ !is_power_of_2(src_addr_width) ||
+ !is_power_of_2(dst_addr_width))
return -EINVAL;
cfg->src_info.data_width = src_addr_width;
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 5d4986e5f5fa..73654e33f13b 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -570,7 +570,7 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc,
list_del(&sgreq->node);
if (sgreq->last_sg) {
- dma_desc->dma_status = DMA_SUCCESS;
+ dma_desc->dma_status = DMA_COMPLETE;
dma_cookie_complete(&dma_desc->txd);
if (!dma_desc->cb_count)
list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
@@ -768,7 +768,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
unsigned int residual;
ret = dma_cookie_status(dc, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
spin_lock_irqsave(&tdc->lock, flags);
@@ -1018,7 +1018,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
return &dma_desc->txd;
}
-struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
+static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
unsigned long flags, void *context)
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 71e8e775189e..c2829b481bf2 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -962,8 +962,8 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS)
- return DMA_SUCCESS;
+ if (ret == DMA_COMPLETE)
+ return DMA_COMPLETE;
spin_lock_bh(&dc->lock);
txx9dmac_scan_descriptors(dc);
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 3c9e4e98c651..13debd6e876a 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -339,8 +339,8 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
csbase = pvt->csels[dct].csbases[csrow];
csmask = pvt->csels[dct].csmasks[csrow];
- base_bits = GENMASK(21, 31) | GENMASK(9, 15);
- mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
+ base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
+ mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
addr_shift = 4;
/*
@@ -352,16 +352,16 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
csbase = pvt->csels[dct].csbases[csrow];
csmask = pvt->csels[dct].csmasks[csrow >> 1];
- *base = (csbase & GENMASK(5, 15)) << 6;
- *base |= (csbase & GENMASK(19, 30)) << 8;
+ *base = (csbase & GENMASK_ULL(15, 5)) << 6;
+ *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
*mask = ~0ULL;
/* poke holes for the csmask */
- *mask &= ~((GENMASK(5, 15) << 6) |
- (GENMASK(19, 30) << 8));
+ *mask &= ~((GENMASK_ULL(15, 5) << 6) |
+ (GENMASK_ULL(30, 19) << 8));
- *mask |= (csmask & GENMASK(5, 15)) << 6;
- *mask |= (csmask & GENMASK(19, 30)) << 8;
+ *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
+ *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
return;
} else {
@@ -370,9 +370,11 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
addr_shift = 8;
if (pvt->fam == 0x15)
- base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
+ base_bits = mask_bits =
+ GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
else
- base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
+ base_bits = mask_bits =
+ GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
}
*base = (csbase & base_bits) << addr_shift;
@@ -561,7 +563,7 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
* section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
* Programmer's Manual Volume 1 Application Programming.
*/
- dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
+ dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
(unsigned long)sys_addr, (unsigned long)dram_addr);
@@ -597,7 +599,7 @@ static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
* concerning translating a DramAddr to an InputAddr.
*/
intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
- input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
+ input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
(dram_addr & 0xfff);
edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
@@ -849,7 +851,7 @@ static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
end_bit = 39;
}
- addr = m->addr & GENMASK(start_bit, end_bit);
+ addr = m->addr & GENMASK_ULL(end_bit, start_bit);
/*
* Erratum 637 workaround
@@ -861,7 +863,7 @@ static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
u16 mce_nid;
u8 intlv_en;
- if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7)
+ if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
return addr;
mce_nid = amd_get_nb_id(m->extcpu);
@@ -871,7 +873,7 @@ static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
intlv_en = tmp >> 21 & 0x7;
/* add [47:27] + 3 trailing bits */
- cc6_base = (tmp & GENMASK(0, 20)) << 3;
+ cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
/* reverse and add DramIntlvEn */
cc6_base |= intlv_en ^ 0x7;
@@ -880,18 +882,18 @@ static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
cc6_base <<= 24;
if (!intlv_en)
- return cc6_base | (addr & GENMASK(0, 23));
+ return cc6_base | (addr & GENMASK_ULL(23, 0));
amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
/* faster log2 */
- tmp_addr = (addr & GENMASK(12, 23)) << __fls(intlv_en + 1);
+ tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
/* OR DramIntlvSel into bits [14:12] */
- tmp_addr |= (tmp & GENMASK(21, 23)) >> 9;
+ tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
/* add remaining [11:0] bits from original MC4_ADDR */
- tmp_addr |= addr & GENMASK(0, 11);
+ tmp_addr |= addr & GENMASK_ULL(11, 0);
return cc6_base | tmp_addr;
}
@@ -952,12 +954,12 @@ static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
- pvt->ranges[range].lim.lo &= GENMASK(0, 15);
+ pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
/* {[39:27],111b} */
pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
- pvt->ranges[range].lim.hi &= GENMASK(0, 7);
+ pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
/* [47:40] */
pvt->ranges[range].lim.hi |= llim >> 13;
@@ -1330,7 +1332,7 @@ static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
chan_off = dram_base;
}
- return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
+ return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
}
/*
@@ -1576,7 +1578,7 @@ static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
num_dcts_intlv, dct_sel);
/* Verify we stay within the MAX number of channels allowed */
- if (channel > 4 || channel < 0)
+ if (channel > 4)
return -EINVAL;
leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index d2443cfa0698..6dc1fcc25afb 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -160,14 +160,6 @@
#define OFF false
/*
- * Create a contiguous bitmask starting at bit position @lo and ending at
- * position @hi. For example
- *
- * GENMASK(21, 39) gives us the 64bit vector 0x000000ffffe00000.
- */
-#define GENMASK(lo, hi) (((1ULL << ((hi) - (lo) + 1)) - 1) << (lo))
-
-/*
* PCI-defined configuration space registers
*/
#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F1 0x141b
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index c2eaf334b90b..9ee1c76da7b9 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/stop_machine.h>
#include <linux/io.h>
+#include <linux/of_address.h>
#include <asm/machdep.h>
#include <asm/cell-regs.h>
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 211021dfec73..102674346035 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -530,12 +530,9 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
/* Report action taken */
edac_device_printk(edac_dev, KERN_INFO,
- "Giving out device to module '%s' controller "
- "'%s': DEV '%s' (%s)\n",
- edac_dev->mod_name,
- edac_dev->ctl_name,
- edac_dev_name(edac_dev),
- edac_op_state_to_string(edac_dev->op_state));
+ "Giving out device to module %s controller %s: DEV %s (%s)\n",
+ edac_dev->mod_name, edac_dev->ctl_name, edac_dev->dev_name,
+ edac_op_state_to_string(edac_dev->op_state));
mutex_unlock(&device_ctls_mutex);
return 0;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 89e109022d78..e8c9ef03495b 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -788,8 +788,10 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
}
/* Report action taken */
- edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
- " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci));
+ edac_mc_printk(mci, KERN_INFO,
+ "Giving out device to module %s controller %s: DEV %s (%s)\n",
+ mci->mod_name, mci->ctl_name, mci->dev_name,
+ edac_op_state_to_string(mci->op_state));
edac_mc_owner = mci->mod_name;
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index dd370f92ace3..2cf44b4db80c 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -358,11 +358,9 @@ int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
}
edac_pci_printk(pci, KERN_INFO,
- "Giving out device to module '%s' controller '%s':"
- " DEV '%s' (%s)\n",
- pci->mod_name,
- pci->ctl_name,
- edac_dev_name(pci), edac_op_state_to_string(pci->op_state));
+ "Giving out device to module %s controller %s: DEV %s (%s)\n",
+ pci->mod_name, pci->ctl_name, pci->dev_name,
+ edac_op_state_to_string(pci->op_state));
mutex_unlock(&edac_pci_ctls_mutex);
return 0;
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index bb534670ec02..d5a98a45c062 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -297,15 +297,14 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
}
/* Error address */
- if (mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) {
+ if (mem_err->validation_bits & CPER_MEM_VALID_PA) {
e->page_frame_number = mem_err->physical_addr >> PAGE_SHIFT;
e->offset_in_page = mem_err->physical_addr & ~PAGE_MASK;
}
/* Error grain */
- if (mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK) {
+ if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK)
e->grain = ~(mem_err->physical_addr_mask & ~PAGE_MASK);
- }
/* Memory error location, mapped on e->location */
p = e->location;
@@ -315,6 +314,8 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
p += sprintf(p, "card:%d ", mem_err->card);
if (mem_err->validation_bits & CPER_MEM_VALID_MODULE)
p += sprintf(p, "module:%d ", mem_err->module);
+ if (mem_err->validation_bits & CPER_MEM_VALID_RANK_NUMBER)
+ p += sprintf(p, "rank:%d ", mem_err->rank);
if (mem_err->validation_bits & CPER_MEM_VALID_BANK)
p += sprintf(p, "bank:%d ", mem_err->bank);
if (mem_err->validation_bits & CPER_MEM_VALID_ROW)
@@ -323,6 +324,15 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
p += sprintf(p, "col:%d ", mem_err->column);
if (mem_err->validation_bits & CPER_MEM_VALID_BIT_POSITION)
p += sprintf(p, "bit_pos:%d ", mem_err->bit_pos);
+ if (mem_err->validation_bits & CPER_MEM_VALID_MODULE_HANDLE) {
+ const char *bank = NULL, *device = NULL;
+ dmi_memdev_name(mem_err->mem_dev_handle, &bank, &device);
+ if (bank != NULL && device != NULL)
+ p += sprintf(p, "DIMM location:%s %s ", bank, device);
+ else
+ p += sprintf(p, "DIMM DMI handle: 0x%.4x ",
+ mem_err->mem_dev_handle);
+ }
if (p > e->location)
*(p - 1) = '\0';
diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c
index c2bd8c6a4349..2f193668ebc7 100644
--- a/drivers/edac/highbank_l2_edac.c
+++ b/drivers/edac/highbank_l2_edac.c
@@ -50,8 +50,15 @@ static irqreturn_t highbank_l2_err_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static const struct of_device_id hb_l2_err_of_match[] = {
+ { .compatible = "calxeda,hb-sregs-l2-ecc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hb_l2_err_of_match);
+
static int highbank_l2_err_probe(struct platform_device *pdev)
{
+ const struct of_device_id *id;
struct edac_device_ctl_info *dci;
struct hb_l2_drvdata *drvdata;
struct resource *r;
@@ -90,28 +97,32 @@ static int highbank_l2_err_probe(struct platform_device *pdev)
goto err;
}
+ id = of_match_device(hb_l2_err_of_match, &pdev->dev);
+ dci->mod_name = pdev->dev.driver->name;
+ dci->ctl_name = id ? id->compatible : "unknown";
+ dci->dev_name = dev_name(&pdev->dev);
+
+ if (edac_device_add_device(dci))
+ goto err;
+
drvdata->db_irq = platform_get_irq(pdev, 0);
res = devm_request_irq(&pdev->dev, drvdata->db_irq,
highbank_l2_err_handler,
0, dev_name(&pdev->dev), dci);
if (res < 0)
- goto err;
+ goto err2;
drvdata->sb_irq = platform_get_irq(pdev, 1);
res = devm_request_irq(&pdev->dev, drvdata->sb_irq,
highbank_l2_err_handler,
0, dev_name(&pdev->dev), dci);
if (res < 0)
- goto err;
-
- dci->mod_name = dev_name(&pdev->dev);
- dci->dev_name = dev_name(&pdev->dev);
-
- if (edac_device_add_device(dci))
- goto err;
+ goto err2;
devres_close_group(&pdev->dev, NULL);
return 0;
+err2:
+ edac_device_del_device(&pdev->dev);
err:
devres_release_group(&pdev->dev, NULL);
edac_device_free_ctl_info(dci);
@@ -127,12 +138,6 @@ static int highbank_l2_err_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id hb_l2_err_of_match[] = {
- { .compatible = "calxeda,hb-sregs-l2-ecc", },
- {},
-};
-MODULE_DEVICE_TABLE(of, hb_l2_err_of_match);
-
static struct platform_driver highbank_l2_edac_driver = {
.probe = highbank_l2_err_probe,
.remove = highbank_l2_err_remove,
diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
index 4695dd2d71fd..f784de1dc793 100644
--- a/drivers/edac/highbank_mc_edac.c
+++ b/drivers/edac/highbank_mc_edac.c
@@ -26,31 +26,40 @@
#include "edac_module.h"
/* DDR Ctrlr Error Registers */
-#define HB_DDR_ECC_OPT 0x128
-#define HB_DDR_ECC_U_ERR_ADDR 0x130
-#define HB_DDR_ECC_U_ERR_STAT 0x134
-#define HB_DDR_ECC_U_ERR_DATAL 0x138
-#define HB_DDR_ECC_U_ERR_DATAH 0x13c
-#define HB_DDR_ECC_C_ERR_ADDR 0x140
-#define HB_DDR_ECC_C_ERR_STAT 0x144
-#define HB_DDR_ECC_C_ERR_DATAL 0x148
-#define HB_DDR_ECC_C_ERR_DATAH 0x14c
-#define HB_DDR_ECC_INT_STATUS 0x180
-#define HB_DDR_ECC_INT_ACK 0x184
-#define HB_DDR_ECC_U_ERR_ID 0x424
-#define HB_DDR_ECC_C_ERR_ID 0x428
-#define HB_DDR_ECC_INT_STAT_CE 0x8
-#define HB_DDR_ECC_INT_STAT_DOUBLE_CE 0x10
-#define HB_DDR_ECC_INT_STAT_UE 0x20
-#define HB_DDR_ECC_INT_STAT_DOUBLE_UE 0x40
+#define HB_DDR_ECC_ERR_BASE 0x128
+#define MW_DDR_ECC_ERR_BASE 0x1b4
+
+#define HB_DDR_ECC_OPT 0x00
+#define HB_DDR_ECC_U_ERR_ADDR 0x08
+#define HB_DDR_ECC_U_ERR_STAT 0x0c
+#define HB_DDR_ECC_U_ERR_DATAL 0x10
+#define HB_DDR_ECC_U_ERR_DATAH 0x14
+#define HB_DDR_ECC_C_ERR_ADDR 0x18
+#define HB_DDR_ECC_C_ERR_STAT 0x1c
+#define HB_DDR_ECC_C_ERR_DATAL 0x20
+#define HB_DDR_ECC_C_ERR_DATAH 0x24
#define HB_DDR_ECC_OPT_MODE_MASK 0x3
#define HB_DDR_ECC_OPT_FWC 0x100
#define HB_DDR_ECC_OPT_XOR_SHIFT 16
+/* DDR Ctrlr Interrupt Registers */
+
+#define HB_DDR_ECC_INT_BASE 0x180
+#define MW_DDR_ECC_INT_BASE 0x218
+
+#define HB_DDR_ECC_INT_STATUS 0x00
+#define HB_DDR_ECC_INT_ACK 0x04
+
+#define HB_DDR_ECC_INT_STAT_CE 0x8
+#define HB_DDR_ECC_INT_STAT_DOUBLE_CE 0x10
+#define HB_DDR_ECC_INT_STAT_UE 0x20
+#define HB_DDR_ECC_INT_STAT_DOUBLE_UE 0x40
+
struct hb_mc_drvdata {
- void __iomem *mc_vbase;
+ void __iomem *mc_err_base;
+ void __iomem *mc_int_base;
};
static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id)
@@ -60,10 +69,10 @@ static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id)
u32 status, err_addr;
/* Read the interrupt status register */
- status = readl(drvdata->mc_vbase + HB_DDR_ECC_INT_STATUS);
+ status = readl(drvdata->mc_int_base + HB_DDR_ECC_INT_STATUS);
if (status & HB_DDR_ECC_INT_STAT_UE) {
- err_addr = readl(drvdata->mc_vbase + HB_DDR_ECC_U_ERR_ADDR);
+ err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_U_ERR_ADDR);
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
err_addr >> PAGE_SHIFT,
err_addr & ~PAGE_MASK, 0,
@@ -71,9 +80,9 @@ static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id)
mci->ctl_name, "");
}
if (status & HB_DDR_ECC_INT_STAT_CE) {
- u32 syndrome = readl(drvdata->mc_vbase + HB_DDR_ECC_C_ERR_STAT);
+ u32 syndrome = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_STAT);
syndrome = (syndrome >> 8) & 0xff;
- err_addr = readl(drvdata->mc_vbase + HB_DDR_ECC_C_ERR_ADDR);
+ err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_ADDR);
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
err_addr >> PAGE_SHIFT,
err_addr & ~PAGE_MASK, syndrome,
@@ -82,66 +91,79 @@ static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id)
}
/* clear the error, clears the interrupt */
- writel(status, drvdata->mc_vbase + HB_DDR_ECC_INT_ACK);
+ writel(status, drvdata->mc_int_base + HB_DDR_ECC_INT_ACK);
return IRQ_HANDLED;
}
-#ifdef CONFIG_EDAC_DEBUG
-static ssize_t highbank_mc_err_inject_write(struct file *file,
- const char __user *data,
- size_t count, loff_t *ppos)
+static void highbank_mc_err_inject(struct mem_ctl_info *mci, u8 synd)
{
- struct mem_ctl_info *mci = file->private_data;
struct hb_mc_drvdata *pdata = mci->pvt_info;
- char buf[32];
- size_t buf_size;
u32 reg;
+
+ reg = readl(pdata->mc_err_base + HB_DDR_ECC_OPT);
+ reg &= HB_DDR_ECC_OPT_MODE_MASK;
+ reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC;
+ writel(reg, pdata->mc_err_base + HB_DDR_ECC_OPT);
+}
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+
+static ssize_t highbank_mc_inject_ctrl(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
u8 synd;
- buf_size = min(count, (sizeof(buf)-1));
- if (copy_from_user(buf, data, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
+ if (kstrtou8(buf, 16, &synd))
+ return -EINVAL;
- if (!kstrtou8(buf, 16, &synd)) {
- reg = readl(pdata->mc_vbase + HB_DDR_ECC_OPT);
- reg &= HB_DDR_ECC_OPT_MODE_MASK;
- reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC;
- writel(reg, pdata->mc_vbase + HB_DDR_ECC_OPT);
- }
+ highbank_mc_err_inject(mci, synd);
return count;
}
-static const struct file_operations highbank_mc_debug_inject_fops = {
- .open = simple_open,
- .write = highbank_mc_err_inject_write,
- .llseek = generic_file_llseek,
+static DEVICE_ATTR(inject_ctrl, S_IWUSR, NULL, highbank_mc_inject_ctrl);
+
+struct hb_mc_settings {
+ int err_offset;
+ int int_offset;
};
-static void highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
-{
- if (mci->debugfs)
- debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci,
- &highbank_mc_debug_inject_fops);
-;
-}
-#else
-static void highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
-{}
-#endif
+static struct hb_mc_settings hb_settings = {
+ .err_offset = HB_DDR_ECC_ERR_BASE,
+ .int_offset = HB_DDR_ECC_INT_BASE,
+};
+
+static struct hb_mc_settings mw_settings = {
+ .err_offset = MW_DDR_ECC_ERR_BASE,
+ .int_offset = MW_DDR_ECC_INT_BASE,
+};
+
+static struct of_device_id hb_ddr_ctrl_of_match[] = {
+ { .compatible = "calxeda,hb-ddr-ctrl", .data = &hb_settings },
+ { .compatible = "calxeda,ecx-2000-ddr-ctrl", .data = &mw_settings },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match);
static int highbank_mc_probe(struct platform_device *pdev)
{
+ const struct of_device_id *id;
+ const struct hb_mc_settings *settings;
struct edac_mc_layer layers[2];
struct mem_ctl_info *mci;
struct hb_mc_drvdata *drvdata;
struct dimm_info *dimm;
struct resource *r;
+ void __iomem *base;
u32 control;
int irq;
int res = 0;
+ id = of_match_device(hb_ddr_ctrl_of_match, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = 1;
layers[0].is_virt_csrow = true;
@@ -174,35 +196,31 @@ static int highbank_mc_probe(struct platform_device *pdev)
goto err;
}
- drvdata->mc_vbase = devm_ioremap(&pdev->dev,
- r->start, resource_size(r));
- if (!drvdata->mc_vbase) {
+ base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
+ if (!base) {
dev_err(&pdev->dev, "Unable to map regs\n");
res = -ENOMEM;
goto err;
}
- control = readl(drvdata->mc_vbase + HB_DDR_ECC_OPT) & 0x3;
+ settings = id->data;
+ drvdata->mc_err_base = base + settings->err_offset;
+ drvdata->mc_int_base = base + settings->int_offset;
+
+ control = readl(drvdata->mc_err_base + HB_DDR_ECC_OPT) & 0x3;
if (!control || (control == 0x2)) {
dev_err(&pdev->dev, "No ECC present, or ECC disabled\n");
res = -ENODEV;
goto err;
}
- irq = platform_get_irq(pdev, 0);
- res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler,
- 0, dev_name(&pdev->dev), mci);
- if (res < 0) {
- dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
- goto err;
- }
-
mci->mtype_cap = MEM_FLAG_DDR3;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
- mci->mod_name = dev_name(&pdev->dev);
+ mci->mod_name = pdev->dev.driver->name;
mci->mod_ver = "1";
- mci->ctl_name = dev_name(&pdev->dev);
+ mci->ctl_name = id->compatible;
+ mci->dev_name = dev_name(&pdev->dev);
mci->scrub_mode = SCRUB_SW_SRC;
/* Only a single 4GB DIMM is supported */
@@ -217,10 +235,20 @@ static int highbank_mc_probe(struct platform_device *pdev)
if (res < 0)
goto err;
- highbank_mc_create_debugfs_nodes(mci);
+ irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler,
+ 0, dev_name(&pdev->dev), mci);
+ if (res < 0) {
+ dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
+ goto err2;
+ }
+
+ device_create_file(&mci->dev, &dev_attr_inject_ctrl);
devres_close_group(&pdev->dev, NULL);
return 0;
+err2:
+ edac_mc_del_mc(&pdev->dev);
err:
devres_release_group(&pdev->dev, NULL);
edac_mc_free(mci);
@@ -231,17 +259,12 @@ static int highbank_mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+ device_remove_file(&mci->dev, &dev_attr_inject_ctrl);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
return 0;
}
-static const struct of_device_id hb_ddr_ctrl_of_match[] = {
- { .compatible = "calxeda,hb-ddr-ctrl", },
- {},
-};
-MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match);
-
static struct platform_driver highbank_mc_edac_driver = {
.probe = highbank_mc_probe,
.remove = highbank_mc_remove,
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index e04462b60756..88f60c5fecbc 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -50,7 +50,7 @@ static int probed;
* Get a bit field at register value <v>, from bit <lo> to bit <hi>
*/
#define GET_BITFIELD(v, lo, hi) \
- (((v) & ((1ULL << ((hi) - (lo) + 1)) - 1) << (lo)) >> (lo))
+ (((v) & GENMASK_ULL(hi, lo)) >> (lo))
/*
* sbridge Memory Controller Registers
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index ff080ee20197..1b5e8e46226d 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -545,12 +545,15 @@ static int dcdbas_probe(struct platform_device *dev)
host_control_action = HC_ACTION_NONE;
host_control_smi_type = HC_SMITYPE_NONE;
+ dcdbas_pdev = dev;
+
/*
* BIOS SMI calls require buffer addresses be in 32-bit address space.
* This is done by setting the DMA mask below.
*/
- dcdbas_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- dcdbas_pdev->dev.dma_mask = &dcdbas_pdev->dev.coherent_dma_mask;
+ error = dma_set_coherent_mask(&dcdbas_pdev->dev, DMA_BIT_MASK(32));
+ if (error)
+ return error;
error = sysfs_create_group(&dev->dev.kobj, &dcdbas_attr_group);
if (error)
@@ -581,6 +584,14 @@ static struct platform_driver dcdbas_driver = {
.remove = dcdbas_remove,
};
+static const struct platform_device_info dcdbas_dev_info __initdata = {
+ .name = DRIVER_NAME,
+ .id = -1,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static struct platform_device *dcdbas_pdev_reg;
+
/**
* dcdbas_init: initialize driver
*/
@@ -592,20 +603,14 @@ static int __init dcdbas_init(void)
if (error)
return error;
- dcdbas_pdev = platform_device_alloc(DRIVER_NAME, -1);
- if (!dcdbas_pdev) {
- error = -ENOMEM;
+ dcdbas_pdev_reg = platform_device_register_full(&dcdbas_dev_info);
+ if (IS_ERR(dcdbas_pdev_reg)) {
+ error = PTR_ERR(dcdbas_pdev_reg);
goto err_unregister_driver;
}
- error = platform_device_add(dcdbas_pdev);
- if (error)
- goto err_free_device;
-
return 0;
- err_free_device:
- platform_device_put(dcdbas_pdev);
err_unregister_driver:
platform_driver_unregister(&dcdbas_driver);
return error;
@@ -628,8 +633,9 @@ static void __exit dcdbas_exit(void)
* all sysfs attributes belonging to this module have been
* released.
*/
- smi_data_buf_free();
- platform_device_unregister(dcdbas_pdev);
+ if (dcdbas_pdev)
+ smi_data_buf_free();
+ platform_device_unregister(dcdbas_pdev_reg);
platform_driver_unregister(&dcdbas_driver);
}
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index fa0affb699b4..59579a744d58 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -25,6 +25,13 @@ static int dmi_initialized;
/* DMI system identification string used during boot */
static char dmi_ids_string[128] __initdata;
+static struct dmi_memdev_info {
+ const char *device;
+ const char *bank;
+ u16 handle;
+} *dmi_memdev;
+static int dmi_memdev_nr;
+
static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
{
const u8 *bp = ((u8 *) dm) + dm->length;
@@ -322,6 +329,42 @@ static void __init dmi_save_extended_devices(const struct dmi_header *dm)
dmi_save_one_device(*d & 0x7f, dmi_string_nosave(dm, *(d - 1)));
}
+static void __init count_mem_devices(const struct dmi_header *dm, void *v)
+{
+ if (dm->type != DMI_ENTRY_MEM_DEVICE)
+ return;
+ dmi_memdev_nr++;
+}
+
+static void __init save_mem_devices(const struct dmi_header *dm, void *v)
+{
+ const char *d = (const char *)dm;
+ static int nr;
+
+ if (dm->type != DMI_ENTRY_MEM_DEVICE)
+ return;
+ if (nr >= dmi_memdev_nr) {
+ pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n");
+ return;
+ }
+ dmi_memdev[nr].handle = dm->handle;
+ dmi_memdev[nr].device = dmi_string(dm, d[0x10]);
+ dmi_memdev[nr].bank = dmi_string(dm, d[0x11]);
+ nr++;
+}
+
+void __init dmi_memdev_walk(void)
+{
+ if (!dmi_available)
+ return;
+
+ if (dmi_walk_early(count_mem_devices) == 0 && dmi_memdev_nr) {
+ dmi_memdev = dmi_alloc(sizeof(*dmi_memdev) * dmi_memdev_nr);
+ if (dmi_memdev)
+ dmi_walk_early(save_mem_devices);
+ }
+}
+
/*
* Process a DMI table entry. Right now all we care about are the BIOS
* and machine entries. For 2.5 we should pull the smbus controller info
@@ -815,3 +858,20 @@ bool dmi_match(enum dmi_field f, const char *str)
return !strcmp(info, str);
}
EXPORT_SYMBOL_GPL(dmi_match);
+
+void dmi_memdev_name(u16 handle, const char **bank, const char **device)
+{
+ int n;
+
+ if (dmi_memdev == NULL)
+ return;
+
+ for (n = 0; n < dmi_memdev_nr; n++) {
+ if (handle == dmi_memdev[n].handle) {
+ *bank = dmi_memdev[n].bank;
+ *device = dmi_memdev[n].device;
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(dmi_memdev_name);
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index b0fc7c79dfbb..3150aa4874e8 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -36,4 +36,7 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE
backend for pstore by default. This setting can be overridden
using the efivars module's pstore_disable parameter.
+config UEFI_CPER
+ def_bool n
+
endmenu
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 99245ab5a79c..9ba156d3c775 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -4,3 +4,4 @@
obj-y += efi.o vars.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
+obj-$(CONFIG_UEFI_CPER) += cper.o
diff --git a/drivers/acpi/apei/cper.c b/drivers/firmware/efi/cper.c
index 33dc6a004802..1491dd4f08f9 100644
--- a/drivers/acpi/apei/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -5,10 +5,10 @@
* Author: Huang Ying <ying.huang@intel.com>
*
* CPER is the format used to describe platform hardware error by
- * various APEI tables, such as ERST, BERT and HEST etc.
+ * various tables, such as ERST, BERT and HEST etc.
*
* For more information about CPER, please refer to Appendix N of UEFI
- * Specification version 2.3.
+ * Specification version 2.4.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
@@ -28,10 +28,12 @@
#include <linux/module.h>
#include <linux/time.h>
#include <linux/cper.h>
+#include <linux/dmi.h>
#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/aer.h>
+#define INDENT_SP " "
/*
* CPER record ID need to be unique even after reboot, because record
* ID is used as index for ERST storage, while CPER records from
@@ -73,7 +75,7 @@ static const char *cper_severity_str(unsigned int severity)
* printed, with @pfx is printed at the beginning of each line.
*/
void cper_print_bits(const char *pfx, unsigned int bits,
- const char *strs[], unsigned int strs_size)
+ const char * const strs[], unsigned int strs_size)
{
int i, len = 0;
const char *str;
@@ -98,32 +100,32 @@ void cper_print_bits(const char *pfx, unsigned int bits,
printk("%s\n", buf);
}
-static const char *cper_proc_type_strs[] = {
+static const char * const cper_proc_type_strs[] = {
"IA32/X64",
"IA64",
};
-static const char *cper_proc_isa_strs[] = {
+static const char * const cper_proc_isa_strs[] = {
"IA32",
"IA64",
"X64",
};
-static const char *cper_proc_error_type_strs[] = {
+static const char * const cper_proc_error_type_strs[] = {
"cache error",
"TLB error",
"bus error",
"micro-architectural error",
};
-static const char *cper_proc_op_strs[] = {
+static const char * const cper_proc_op_strs[] = {
"unknown or generic",
"data read",
"data write",
"instruction execution",
};
-static const char *cper_proc_flag_strs[] = {
+static const char * const cper_proc_flag_strs[] = {
"restartable",
"precise IP",
"overflow",
@@ -191,46 +193,58 @@ static const char *cper_mem_err_type_strs[] = {
"memory sparing",
"scrub corrected error",
"scrub uncorrected error",
+ "physical memory map-out event",
};
static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem)
{
if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
- if (mem->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS)
+ if (mem->validation_bits & CPER_MEM_VALID_PA)
printk("%s""physical_address: 0x%016llx\n",
pfx, mem->physical_addr);
- if (mem->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK)
+ if (mem->validation_bits & CPER_MEM_VALID_PA_MASK)
printk("%s""physical_address_mask: 0x%016llx\n",
pfx, mem->physical_addr_mask);
if (mem->validation_bits & CPER_MEM_VALID_NODE)
- printk("%s""node: %d\n", pfx, mem->node);
+ pr_debug("node: %d\n", mem->node);
if (mem->validation_bits & CPER_MEM_VALID_CARD)
- printk("%s""card: %d\n", pfx, mem->card);
+ pr_debug("card: %d\n", mem->card);
if (mem->validation_bits & CPER_MEM_VALID_MODULE)
- printk("%s""module: %d\n", pfx, mem->module);
+ pr_debug("module: %d\n", mem->module);
+ if (mem->validation_bits & CPER_MEM_VALID_RANK_NUMBER)
+ pr_debug("rank: %d\n", mem->rank);
if (mem->validation_bits & CPER_MEM_VALID_BANK)
- printk("%s""bank: %d\n", pfx, mem->bank);
+ pr_debug("bank: %d\n", mem->bank);
if (mem->validation_bits & CPER_MEM_VALID_DEVICE)
- printk("%s""device: %d\n", pfx, mem->device);
+ pr_debug("device: %d\n", mem->device);
if (mem->validation_bits & CPER_MEM_VALID_ROW)
- printk("%s""row: %d\n", pfx, mem->row);
+ pr_debug("row: %d\n", mem->row);
if (mem->validation_bits & CPER_MEM_VALID_COLUMN)
- printk("%s""column: %d\n", pfx, mem->column);
+ pr_debug("column: %d\n", mem->column);
if (mem->validation_bits & CPER_MEM_VALID_BIT_POSITION)
- printk("%s""bit_position: %d\n", pfx, mem->bit_pos);
+ pr_debug("bit_position: %d\n", mem->bit_pos);
if (mem->validation_bits & CPER_MEM_VALID_REQUESTOR_ID)
- printk("%s""requestor_id: 0x%016llx\n", pfx, mem->requestor_id);
+ pr_debug("requestor_id: 0x%016llx\n", mem->requestor_id);
if (mem->validation_bits & CPER_MEM_VALID_RESPONDER_ID)
- printk("%s""responder_id: 0x%016llx\n", pfx, mem->responder_id);
+ pr_debug("responder_id: 0x%016llx\n", mem->responder_id);
if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID)
- printk("%s""target_id: 0x%016llx\n", pfx, mem->target_id);
+ pr_debug("target_id: 0x%016llx\n", mem->target_id);
if (mem->validation_bits & CPER_MEM_VALID_ERROR_TYPE) {
u8 etype = mem->error_type;
printk("%s""error_type: %d, %s\n", pfx, etype,
etype < ARRAY_SIZE(cper_mem_err_type_strs) ?
cper_mem_err_type_strs[etype] : "unknown");
}
+ if (mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE) {
+ const char *bank = NULL, *device = NULL;
+ dmi_memdev_name(mem->mem_dev_handle, &bank, &device);
+ if (bank != NULL && device != NULL)
+ printk("%s""DIMM location: %s %s", pfx, bank, device);
+ else
+ printk("%s""DIMM DMI handle: 0x%.4x",
+ pfx, mem->mem_dev_handle);
+ }
}
static const char *cper_pcie_port_type_strs[] = {
@@ -248,7 +262,7 @@ static const char *cper_pcie_port_type_strs[] = {
};
static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
- const struct acpi_hest_generic_data *gdata)
+ const struct acpi_generic_data *gdata)
{
if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
@@ -283,55 +297,45 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
pfx, pcie->bridge.secondary_status, pcie->bridge.control);
}
-static const char *apei_estatus_section_flag_strs[] = {
- "primary",
- "containment warning",
- "reset",
- "threshold exceeded",
- "resource not accessible",
- "latent error",
-};
-
-static void apei_estatus_print_section(
- const char *pfx, const struct acpi_hest_generic_data *gdata, int sec_no)
+static void cper_estatus_print_section(
+ const char *pfx, const struct acpi_generic_data *gdata, int sec_no)
{
uuid_le *sec_type = (uuid_le *)gdata->section_type;
__u16 severity;
+ char newpfx[64];
severity = gdata->error_severity;
- printk("%s""section: %d, severity: %d, %s\n", pfx, sec_no, severity,
+ printk("%s""Error %d, type: %s\n", pfx, sec_no,
cper_severity_str(severity));
- printk("%s""flags: 0x%02x\n", pfx, gdata->flags);
- cper_print_bits(pfx, gdata->flags, apei_estatus_section_flag_strs,
- ARRAY_SIZE(apei_estatus_section_flag_strs));
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
printk("%s""fru_id: %pUl\n", pfx, (uuid_le *)gdata->fru_id);
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text);
+ snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_GENERIC)) {
struct cper_sec_proc_generic *proc_err = (void *)(gdata + 1);
- printk("%s""section_type: general processor error\n", pfx);
+ printk("%s""section_type: general processor error\n", newpfx);
if (gdata->error_data_length >= sizeof(*proc_err))
- cper_print_proc_generic(pfx, proc_err);
+ cper_print_proc_generic(newpfx, proc_err);
else
goto err_section_too_small;
} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem_err = (void *)(gdata + 1);
- printk("%s""section_type: memory error\n", pfx);
+ printk("%s""section_type: memory error\n", newpfx);
if (gdata->error_data_length >= sizeof(*mem_err))
- cper_print_mem(pfx, mem_err);
+ cper_print_mem(newpfx, mem_err);
else
goto err_section_too_small;
} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
struct cper_sec_pcie *pcie = (void *)(gdata + 1);
- printk("%s""section_type: PCIe error\n", pfx);
+ printk("%s""section_type: PCIe error\n", newpfx);
if (gdata->error_data_length >= sizeof(*pcie))
- cper_print_pcie(pfx, pcie, gdata);
+ cper_print_pcie(newpfx, pcie, gdata);
else
goto err_section_too_small;
} else
- printk("%s""section type: unknown, %pUl\n", pfx, sec_type);
+ printk("%s""section type: unknown, %pUl\n", newpfx, sec_type);
return;
@@ -339,34 +343,38 @@ err_section_too_small:
pr_err(FW_WARN "error section length is too small\n");
}
-void apei_estatus_print(const char *pfx,
- const struct acpi_hest_generic_status *estatus)
+void cper_estatus_print(const char *pfx,
+ const struct acpi_generic_status *estatus)
{
- struct acpi_hest_generic_data *gdata;
+ struct acpi_generic_data *gdata;
unsigned int data_len, gedata_len;
int sec_no = 0;
+ char newpfx[64];
__u16 severity;
- printk("%s""APEI generic hardware error status\n", pfx);
severity = estatus->error_severity;
- printk("%s""severity: %d, %s\n", pfx, severity,
- cper_severity_str(severity));
+ if (severity == CPER_SEV_CORRECTED)
+ printk("%s%s\n", pfx,
+ "It has been corrected by h/w "
+ "and requires no further action");
+ printk("%s""event severity: %s\n", pfx, cper_severity_str(severity));
data_len = estatus->data_length;
- gdata = (struct acpi_hest_generic_data *)(estatus + 1);
- while (data_len > sizeof(*gdata)) {
+ gdata = (struct acpi_generic_data *)(estatus + 1);
+ snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
+ while (data_len >= sizeof(*gdata)) {
gedata_len = gdata->error_data_length;
- apei_estatus_print_section(pfx, gdata, sec_no);
+ cper_estatus_print_section(newpfx, gdata, sec_no);
data_len -= gedata_len + sizeof(*gdata);
gdata = (void *)(gdata + 1) + gedata_len;
sec_no++;
}
}
-EXPORT_SYMBOL_GPL(apei_estatus_print);
+EXPORT_SYMBOL_GPL(cper_estatus_print);
-int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus)
+int cper_estatus_check_header(const struct acpi_generic_status *estatus)
{
if (estatus->data_length &&
- estatus->data_length < sizeof(struct acpi_hest_generic_data))
+ estatus->data_length < sizeof(struct acpi_generic_data))
return -EINVAL;
if (estatus->raw_data_length &&
estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length)
@@ -374,19 +382,19 @@ int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus)
return 0;
}
-EXPORT_SYMBOL_GPL(apei_estatus_check_header);
+EXPORT_SYMBOL_GPL(cper_estatus_check_header);
-int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
+int cper_estatus_check(const struct acpi_generic_status *estatus)
{
- struct acpi_hest_generic_data *gdata;
+ struct acpi_generic_data *gdata;
unsigned int data_len, gedata_len;
int rc;
- rc = apei_estatus_check_header(estatus);
+ rc = cper_estatus_check_header(estatus);
if (rc)
return rc;
data_len = estatus->data_length;
- gdata = (struct acpi_hest_generic_data *)(estatus + 1);
+ gdata = (struct acpi_generic_data *)(estatus + 1);
while (data_len >= sizeof(*gdata)) {
gedata_len = gdata->error_data_length;
if (gedata_len > data_len - sizeof(*gdata))
@@ -399,4 +407,4 @@ int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
return 0;
}
-EXPORT_SYMBOL_GPL(apei_estatus_check);
+EXPORT_SYMBOL_GPL(cper_estatus_check);
diff --git a/drivers/firmware/efi/efi-stub-helper.c b/drivers/firmware/efi/efi-stub-helper.c
new file mode 100644
index 000000000000..b6bffbfd3be7
--- /dev/null
+++ b/drivers/firmware/efi/efi-stub-helper.c
@@ -0,0 +1,636 @@
+/*
+ * Helper functions used by the EFI stub on multiple
+ * architectures. This should be #included by the EFI stub
+ * implementation files.
+ *
+ * Copyright 2011 Intel Corporation; author Matt Fleming
+ *
+ * This file is part of the Linux kernel, and is made available
+ * under the terms of the GNU General Public License version 2.
+ *
+ */
+#define EFI_READ_CHUNK_SIZE (1024 * 1024)
+
+struct file_info {
+ efi_file_handle_t *handle;
+ u64 size;
+};
+
+
+
+
+static void efi_char16_printk(efi_system_table_t *sys_table_arg,
+ efi_char16_t *str)
+{
+ struct efi_simple_text_output_protocol *out;
+
+ out = (struct efi_simple_text_output_protocol *)sys_table_arg->con_out;
+ efi_call_phys2(out->output_string, out, str);
+}
+
+static void efi_printk(efi_system_table_t *sys_table_arg, char *str)
+{
+ char *s8;
+
+ for (s8 = str; *s8; s8++) {
+ efi_char16_t ch[2] = { 0 };
+
+ ch[0] = *s8;
+ if (*s8 == '\n') {
+ efi_char16_t nl[2] = { '\r', 0 };
+ efi_char16_printk(sys_table_arg, nl);
+ }
+
+ efi_char16_printk(sys_table_arg, ch);
+ }
+}
+
+
+static efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
+ efi_memory_desc_t **map,
+ unsigned long *map_size,
+ unsigned long *desc_size,
+ u32 *desc_ver,
+ unsigned long *key_ptr)
+{
+ efi_memory_desc_t *m = NULL;
+ efi_status_t status;
+ unsigned long key;
+ u32 desc_version;
+
+ *map_size = sizeof(*m) * 32;
+again:
+ /*
+ * Add an additional efi_memory_desc_t because we're doing an
+ * allocation which may be in a new descriptor region.
+ */
+ *map_size += sizeof(*m);
+ status = efi_call_phys3(sys_table_arg->boottime->allocate_pool,
+ EFI_LOADER_DATA, *map_size, (void **)&m);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ status = efi_call_phys5(sys_table_arg->boottime->get_memory_map,
+ map_size, m, &key, desc_size, &desc_version);
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ efi_call_phys1(sys_table_arg->boottime->free_pool, m);
+ goto again;
+ }
+
+ if (status != EFI_SUCCESS)
+ efi_call_phys1(sys_table_arg->boottime->free_pool, m);
+ if (key_ptr && status == EFI_SUCCESS)
+ *key_ptr = key;
+ if (desc_ver && status == EFI_SUCCESS)
+ *desc_ver = desc_version;
+
+fail:
+ *map = m;
+ return status;
+}
+
+/*
+ * Allocate at the highest possible address that is not above 'max'.
+ */
+static efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
+ unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long max)
+{
+ unsigned long map_size, desc_size;
+ efi_memory_desc_t *map;
+ efi_status_t status;
+ unsigned long nr_pages;
+ u64 max_addr = 0;
+ int i;
+
+ status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size,
+ NULL, NULL);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ /*
+ * Enforce minimum alignment that EFI requires when requesting
+ * a specific address. We are doing page-based allocations,
+ * so we must be aligned to a page.
+ */
+ if (align < EFI_PAGE_SIZE)
+ align = EFI_PAGE_SIZE;
+
+ nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+again:
+ for (i = 0; i < map_size / desc_size; i++) {
+ efi_memory_desc_t *desc;
+ unsigned long m = (unsigned long)map;
+ u64 start, end;
+
+ desc = (efi_memory_desc_t *)(m + (i * desc_size));
+ if (desc->type != EFI_CONVENTIONAL_MEMORY)
+ continue;
+
+ if (desc->num_pages < nr_pages)
+ continue;
+
+ start = desc->phys_addr;
+ end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
+
+ if ((start + size) > end || (start + size) > max)
+ continue;
+
+ if (end - size > max)
+ end = max;
+
+ if (round_down(end - size, align) < start)
+ continue;
+
+ start = round_down(end - size, align);
+
+ /*
+ * Don't allocate at 0x0. It will confuse code that
+ * checks pointers against NULL.
+ */
+ if (start == 0x0)
+ continue;
+
+ if (start > max_addr)
+ max_addr = start;
+ }
+
+ if (!max_addr)
+ status = EFI_NOT_FOUND;
+ else {
+ status = efi_call_phys4(sys_table_arg->boottime->allocate_pages,
+ EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
+ nr_pages, &max_addr);
+ if (status != EFI_SUCCESS) {
+ max = max_addr;
+ max_addr = 0;
+ goto again;
+ }
+
+ *addr = max_addr;
+ }
+
+ efi_call_phys1(sys_table_arg->boottime->free_pool, map);
+
+fail:
+ return status;
+}
+
+/*
+ * Allocate at the lowest possible address.
+ */
+static efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
+ unsigned long size, unsigned long align,
+ unsigned long *addr)
+{
+ unsigned long map_size, desc_size;
+ efi_memory_desc_t *map;
+ efi_status_t status;
+ unsigned long nr_pages;
+ int i;
+
+ status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size,
+ NULL, NULL);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ /*
+ * Enforce minimum alignment that EFI requires when requesting
+ * a specific address. We are doing page-based allocations,
+ * so we must be aligned to a page.
+ */
+ if (align < EFI_PAGE_SIZE)
+ align = EFI_PAGE_SIZE;
+
+ nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+ for (i = 0; i < map_size / desc_size; i++) {
+ efi_memory_desc_t *desc;
+ unsigned long m = (unsigned long)map;
+ u64 start, end;
+
+ desc = (efi_memory_desc_t *)(m + (i * desc_size));
+
+ if (desc->type != EFI_CONVENTIONAL_MEMORY)
+ continue;
+
+ if (desc->num_pages < nr_pages)
+ continue;
+
+ start = desc->phys_addr;
+ end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
+
+ /*
+ * Don't allocate at 0x0. It will confuse code that
+ * checks pointers against NULL. Skip the first 8
+ * bytes so we start at a nice even number.
+ */
+ if (start == 0x0)
+ start += 8;
+
+ start = round_up(start, align);
+ if ((start + size) > end)
+ continue;
+
+ status = efi_call_phys4(sys_table_arg->boottime->allocate_pages,
+ EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
+ nr_pages, &start);
+ if (status == EFI_SUCCESS) {
+ *addr = start;
+ break;
+ }
+ }
+
+ if (i == map_size / desc_size)
+ status = EFI_NOT_FOUND;
+
+ efi_call_phys1(sys_table_arg->boottime->free_pool, map);
+fail:
+ return status;
+}
+
+static void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
+ unsigned long addr)
+{
+ unsigned long nr_pages;
+
+ if (!size)
+ return;
+
+ nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+ efi_call_phys2(sys_table_arg->boottime->free_pages, addr, nr_pages);
+}
+
+
+/*
+ * Check the cmdline for a LILO-style file= arguments.
+ *
+ * We only support loading a file from the same filesystem as
+ * the kernel image.
+ */
+static efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
+ efi_loaded_image_t *image,
+ char *cmd_line, char *option_string,
+ unsigned long max_addr,
+ unsigned long *load_addr,
+ unsigned long *load_size)
+{
+ struct file_info *files;
+ unsigned long file_addr;
+ efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
+ u64 file_size_total;
+ efi_file_io_interface_t *io;
+ efi_file_handle_t *fh;
+ efi_status_t status;
+ int nr_files;
+ char *str;
+ int i, j, k;
+
+ file_addr = 0;
+ file_size_total = 0;
+
+ str = cmd_line;
+
+ j = 0; /* See close_handles */
+
+ if (!load_addr || !load_size)
+ return EFI_INVALID_PARAMETER;
+
+ *load_addr = 0;
+ *load_size = 0;
+
+ if (!str || !*str)
+ return EFI_SUCCESS;
+
+ for (nr_files = 0; *str; nr_files++) {
+ str = strstr(str, option_string);
+ if (!str)
+ break;
+
+ str += strlen(option_string);
+
+ /* Skip any leading slashes */
+ while (*str == '/' || *str == '\\')
+ str++;
+
+ while (*str && *str != ' ' && *str != '\n')
+ str++;
+ }
+
+ if (!nr_files)
+ return EFI_SUCCESS;
+
+ status = efi_call_phys3(sys_table_arg->boottime->allocate_pool,
+ EFI_LOADER_DATA,
+ nr_files * sizeof(*files),
+ (void **)&files);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to alloc mem for file handle list\n");
+ goto fail;
+ }
+
+ str = cmd_line;
+ for (i = 0; i < nr_files; i++) {
+ struct file_info *file;
+ efi_file_handle_t *h;
+ efi_file_info_t *info;
+ efi_char16_t filename_16[256];
+ unsigned long info_sz;
+ efi_guid_t info_guid = EFI_FILE_INFO_ID;
+ efi_char16_t *p;
+ u64 file_sz;
+
+ str = strstr(str, option_string);
+ if (!str)
+ break;
+
+ str += strlen(option_string);
+
+ file = &files[i];
+ p = filename_16;
+
+ /* Skip any leading slashes */
+ while (*str == '/' || *str == '\\')
+ str++;
+
+ while (*str && *str != ' ' && *str != '\n') {
+ if ((u8 *)p >= (u8 *)filename_16 + sizeof(filename_16))
+ break;
+
+ if (*str == '/') {
+ *p++ = '\\';
+ str++;
+ } else {
+ *p++ = *str++;
+ }
+ }
+
+ *p = '\0';
+
+ /* Only open the volume once. */
+ if (!i) {
+ efi_boot_services_t *boottime;
+
+ boottime = sys_table_arg->boottime;
+
+ status = efi_call_phys3(boottime->handle_protocol,
+ image->device_handle, &fs_proto,
+ (void **)&io);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
+ goto free_files;
+ }
+
+ status = efi_call_phys2(io->open_volume, io, &fh);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to open volume\n");
+ goto free_files;
+ }
+ }
+
+ status = efi_call_phys5(fh->open, fh, &h, filename_16,
+ EFI_FILE_MODE_READ, (u64)0);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to open file: ");
+ efi_char16_printk(sys_table_arg, filename_16);
+ efi_printk(sys_table_arg, "\n");
+ goto close_handles;
+ }
+
+ file->handle = h;
+
+ info_sz = 0;
+ status = efi_call_phys4(h->get_info, h, &info_guid,
+ &info_sz, NULL);
+ if (status != EFI_BUFFER_TOO_SMALL) {
+ efi_printk(sys_table_arg, "Failed to get file info size\n");
+ goto close_handles;
+ }
+
+grow:
+ status = efi_call_phys3(sys_table_arg->boottime->allocate_pool,
+ EFI_LOADER_DATA, info_sz,
+ (void **)&info);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to alloc mem for file info\n");
+ goto close_handles;
+ }
+
+ status = efi_call_phys4(h->get_info, h, &info_guid,
+ &info_sz, info);
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ efi_call_phys1(sys_table_arg->boottime->free_pool,
+ info);
+ goto grow;
+ }
+
+ file_sz = info->file_size;
+ efi_call_phys1(sys_table_arg->boottime->free_pool, info);
+
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to get file info\n");
+ goto close_handles;
+ }
+
+ file->size = file_sz;
+ file_size_total += file_sz;
+ }
+
+ if (file_size_total) {
+ unsigned long addr;
+
+ /*
+ * Multiple files need to be at consecutive addresses in memory,
+ * so allocate enough memory for all the files. This is used
+ * for loading multiple files.
+ */
+ status = efi_high_alloc(sys_table_arg, file_size_total, 0x1000,
+ &file_addr, max_addr);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to alloc highmem for files\n");
+ goto close_handles;
+ }
+
+ /* We've run out of free low memory. */
+ if (file_addr > max_addr) {
+ efi_printk(sys_table_arg, "We've run out of free low memory\n");
+ status = EFI_INVALID_PARAMETER;
+ goto free_file_total;
+ }
+
+ addr = file_addr;
+ for (j = 0; j < nr_files; j++) {
+ unsigned long size;
+
+ size = files[j].size;
+ while (size) {
+ unsigned long chunksize;
+ if (size > EFI_READ_CHUNK_SIZE)
+ chunksize = EFI_READ_CHUNK_SIZE;
+ else
+ chunksize = size;
+ status = efi_call_phys3(fh->read,
+ files[j].handle,
+ &chunksize,
+ (void *)addr);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to read file\n");
+ goto free_file_total;
+ }
+ addr += chunksize;
+ size -= chunksize;
+ }
+
+ efi_call_phys1(fh->close, files[j].handle);
+ }
+
+ }
+
+ efi_call_phys1(sys_table_arg->boottime->free_pool, files);
+
+ *load_addr = file_addr;
+ *load_size = file_size_total;
+
+ return status;
+
+free_file_total:
+ efi_free(sys_table_arg, file_size_total, file_addr);
+
+close_handles:
+ for (k = j; k < i; k++)
+ efi_call_phys1(fh->close, files[k].handle);
+free_files:
+ efi_call_phys1(sys_table_arg->boottime->free_pool, files);
+fail:
+ *load_addr = 0;
+ *load_size = 0;
+
+ return status;
+}
+/*
+ * Relocate a kernel image, either compressed or uncompressed.
+ * In the ARM64 case, all kernel images are currently
+ * uncompressed, and as such when we relocate it we need to
+ * allocate additional space for the BSS segment. Any low
+ * memory that this function should avoid needs to be
+ * unavailable in the EFI memory map, as if the preferred
+ * address is not available the lowest available address will
+ * be used.
+ */
+static efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
+ unsigned long *image_addr,
+ unsigned long image_size,
+ unsigned long alloc_size,
+ unsigned long preferred_addr,
+ unsigned long alignment)
+{
+ unsigned long cur_image_addr;
+ unsigned long new_addr = 0;
+ efi_status_t status;
+ unsigned long nr_pages;
+ efi_physical_addr_t efi_addr = preferred_addr;
+
+ if (!image_addr || !image_size || !alloc_size)
+ return EFI_INVALID_PARAMETER;
+ if (alloc_size < image_size)
+ return EFI_INVALID_PARAMETER;
+
+ cur_image_addr = *image_addr;
+
+ /*
+ * The EFI firmware loader could have placed the kernel image
+ * anywhere in memory, but the kernel has restrictions on the
+ * max physical address it can run at. Some architectures
+ * also have a prefered address, so first try to relocate
+ * to the preferred address. If that fails, allocate as low
+ * as possible while respecting the required alignment.
+ */
+ nr_pages = round_up(alloc_size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+ status = efi_call_phys4(sys_table_arg->boottime->allocate_pages,
+ EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
+ nr_pages, &efi_addr);
+ new_addr = efi_addr;
+ /*
+ * If preferred address allocation failed allocate as low as
+ * possible.
+ */
+ if (status != EFI_SUCCESS) {
+ status = efi_low_alloc(sys_table_arg, alloc_size, alignment,
+ &new_addr);
+ }
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "ERROR: Failed to allocate usable memory for kernel.\n");
+ return status;
+ }
+
+ /*
+ * We know source/dest won't overlap since both memory ranges
+ * have been allocated by UEFI, so we can safely use memcpy.
+ */
+ memcpy((void *)new_addr, (void *)cur_image_addr, image_size);
+
+ /* Return the new address of the relocated image. */
+ *image_addr = new_addr;
+
+ return status;
+}
+
+/*
+ * Convert the unicode UEFI command line to ASCII to pass to kernel.
+ * Size of memory allocated return in *cmd_line_len.
+ * Returns NULL on error.
+ */
+static char *efi_convert_cmdline_to_ascii(efi_system_table_t *sys_table_arg,
+ efi_loaded_image_t *image,
+ int *cmd_line_len)
+{
+ u16 *s2;
+ u8 *s1 = NULL;
+ unsigned long cmdline_addr = 0;
+ int load_options_size = image->load_options_size / 2; /* ASCII */
+ void *options = image->load_options;
+ int options_size = 0;
+ efi_status_t status;
+ int i;
+ u16 zero = 0;
+
+ if (options) {
+ s2 = options;
+ while (*s2 && *s2 != '\n' && options_size < load_options_size) {
+ s2++;
+ options_size++;
+ }
+ }
+
+ if (options_size == 0) {
+ /* No command line options, so return empty string*/
+ options_size = 1;
+ options = &zero;
+ }
+
+ options_size++; /* NUL termination */
+#ifdef CONFIG_ARM
+ /*
+ * For ARM, allocate at a high address to avoid reserved
+ * regions at low addresses that we don't know the specfics of
+ * at the time we are processing the command line.
+ */
+ status = efi_high_alloc(sys_table_arg, options_size, 0,
+ &cmdline_addr, 0xfffff000);
+#else
+ status = efi_low_alloc(sys_table_arg, options_size, 0,
+ &cmdline_addr);
+#endif
+ if (status != EFI_SUCCESS)
+ return NULL;
+
+ s1 = (u8 *)cmdline_addr;
+ s2 = (u16 *)options;
+
+ for (i = 0; i < options_size - 1; i++)
+ *s1++ = *s2++;
+
+ *s1 = '\0';
+
+ *cmd_line_len = options_size;
+ return (char *)cmdline_addr;
+}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 5145fa344ad5..2e2fbdec0845 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -13,11 +13,27 @@
* This file is released under the GPLv2.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/efi.h>
+#include <linux/io.h>
+
+struct efi __read_mostly efi = {
+ .mps = EFI_INVALID_TABLE_ADDR,
+ .acpi = EFI_INVALID_TABLE_ADDR,
+ .acpi20 = EFI_INVALID_TABLE_ADDR,
+ .smbios = EFI_INVALID_TABLE_ADDR,
+ .sal_systab = EFI_INVALID_TABLE_ADDR,
+ .boot_info = EFI_INVALID_TABLE_ADDR,
+ .hcdp = EFI_INVALID_TABLE_ADDR,
+ .uga = EFI_INVALID_TABLE_ADDR,
+ .uv_systab = EFI_INVALID_TABLE_ADDR,
+};
+EXPORT_SYMBOL(efi);
static struct kobject *efi_kobj;
static struct kobject *efivars_kobj;
@@ -132,3 +148,127 @@ err_put:
}
subsys_initcall(efisubsys_init);
+
+
+/*
+ * We can't ioremap data in EFI boot services RAM, because we've already mapped
+ * it as RAM. So, look it up in the existing EFI memory map instead. Only
+ * callable after efi_enter_virtual_mode and before efi_free_boot_services.
+ */
+void __iomem *efi_lookup_mapped_addr(u64 phys_addr)
+{
+ struct efi_memory_map *map;
+ void *p;
+ map = efi.memmap;
+ if (!map)
+ return NULL;
+ if (WARN_ON(!map->map))
+ return NULL;
+ for (p = map->map; p < map->map_end; p += map->desc_size) {
+ efi_memory_desc_t *md = p;
+ u64 size = md->num_pages << EFI_PAGE_SHIFT;
+ u64 end = md->phys_addr + size;
+ if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
+ md->type != EFI_BOOT_SERVICES_CODE &&
+ md->type != EFI_BOOT_SERVICES_DATA)
+ continue;
+ if (!md->virt_addr)
+ continue;
+ if (phys_addr >= md->phys_addr && phys_addr < end) {
+ phys_addr += md->virt_addr - md->phys_addr;
+ return (__force void __iomem *)(unsigned long)phys_addr;
+ }
+ }
+ return NULL;
+}
+
+static __initdata efi_config_table_type_t common_tables[] = {
+ {ACPI_20_TABLE_GUID, "ACPI 2.0", &efi.acpi20},
+ {ACPI_TABLE_GUID, "ACPI", &efi.acpi},
+ {HCDP_TABLE_GUID, "HCDP", &efi.hcdp},
+ {MPS_TABLE_GUID, "MPS", &efi.mps},
+ {SAL_SYSTEM_TABLE_GUID, "SALsystab", &efi.sal_systab},
+ {SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios},
+ {UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga},
+ {NULL_GUID, NULL, 0},
+};
+
+static __init int match_config_table(efi_guid_t *guid,
+ unsigned long table,
+ efi_config_table_type_t *table_types)
+{
+ u8 str[EFI_VARIABLE_GUID_LEN + 1];
+ int i;
+
+ if (table_types) {
+ efi_guid_unparse(guid, str);
+
+ for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
+ efi_guid_unparse(&table_types[i].guid, str);
+
+ if (!efi_guidcmp(*guid, table_types[i].guid)) {
+ *(table_types[i].ptr) = table;
+ pr_cont(" %s=0x%lx ",
+ table_types[i].name, table);
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int __init efi_config_init(efi_config_table_type_t *arch_tables)
+{
+ void *config_tables, *tablep;
+ int i, sz;
+
+ if (efi_enabled(EFI_64BIT))
+ sz = sizeof(efi_config_table_64_t);
+ else
+ sz = sizeof(efi_config_table_32_t);
+
+ /*
+ * Let's see what config tables the firmware passed to us.
+ */
+ config_tables = early_memremap(efi.systab->tables,
+ efi.systab->nr_tables * sz);
+ if (config_tables == NULL) {
+ pr_err("Could not map Configuration table!\n");
+ return -ENOMEM;
+ }
+
+ tablep = config_tables;
+ pr_info("");
+ for (i = 0; i < efi.systab->nr_tables; i++) {
+ efi_guid_t guid;
+ unsigned long table;
+
+ if (efi_enabled(EFI_64BIT)) {
+ u64 table64;
+ guid = ((efi_config_table_64_t *)tablep)->guid;
+ table64 = ((efi_config_table_64_t *)tablep)->table;
+ table = table64;
+#ifndef CONFIG_64BIT
+ if (table64 >> 32) {
+ pr_cont("\n");
+ pr_err("Table located above 4GB, disabling EFI.\n");
+ early_iounmap(config_tables,
+ efi.systab->nr_tables * sz);
+ return -EINVAL;
+ }
+#endif
+ } else {
+ guid = ((efi_config_table_32_t *)tablep)->guid;
+ table = ((efi_config_table_32_t *)tablep)->table;
+ }
+
+ if (!match_config_table(&guid, table, common_tables))
+ match_config_table(&guid, table, arch_tables);
+
+ tablep += sz;
+ }
+ pr_cont("\n");
+ early_iounmap(config_tables, efi.systab->nr_tables * sz);
+ return 0;
+}
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 8a7432a4b413..933eb027d527 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -564,7 +564,7 @@ static int efivar_sysfs_destroy(struct efivar_entry *entry, void *data)
return 0;
}
-void efivars_sysfs_exit(void)
+static void efivars_sysfs_exit(void)
{
/* Remove all entries and destroy */
__efivar_entry_iter(efivar_sysfs_destroy, &efivar_sysfs_list, NULL, NULL);
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index 6eb535ffeddc..e5a67b24587a 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -764,6 +764,13 @@ static __init int gsmi_system_valid(void)
static struct kobject *gsmi_kobj;
static struct efivars efivars;
+static const struct platform_device_info gsmi_dev_info = {
+ .name = "gsmi",
+ .id = -1,
+ /* SMI callbacks require 32bit addresses */
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
static __init int gsmi_init(void)
{
unsigned long flags;
@@ -776,7 +783,7 @@ static __init int gsmi_init(void)
gsmi_dev.smi_cmd = acpi_gbl_FADT.smi_command;
/* register device */
- gsmi_dev.pdev = platform_device_register_simple("gsmi", -1, NULL, 0);
+ gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info);
if (IS_ERR(gsmi_dev.pdev)) {
printk(KERN_ERR "gsmi: unable to register platform device\n");
return PTR_ERR(gsmi_dev.pdev);
@@ -785,10 +792,6 @@ static __init int gsmi_init(void)
/* SMI access needs to be serialized */
spin_lock_init(&gsmi_dev.lock);
- /* SMI callbacks require 32bit addresses */
- gsmi_dev.pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- gsmi_dev.pdev->dev.dma_mask =
- &gsmi_dev.pdev->dev.coherent_dma_mask;
ret = -ENOMEM;
gsmi_dev.dma_pool = dma_pool_create("gsmi", &gsmi_dev.pdev->dev,
GSMI_BUF_SIZE, GSMI_BUF_ALIGN, 0);
diff --git a/drivers/fmc/Kconfig b/drivers/fmc/Kconfig
index c01cf45bc3d8..3a75f4256d08 100644
--- a/drivers/fmc/Kconfig
+++ b/drivers/fmc/Kconfig
@@ -46,6 +46,6 @@ config FMC_CHARDEV
This driver matches every mezzanine device and allows user
space to read and write registers using a char device. It
can be used to write user-space drivers, or just get
- aquainted with a mezzanine before writing its specific driver.
+ acquainted with a mezzanine before writing its specific driver.
endif # FMC
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 17df6db5dca7..8847adf392b7 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -15,8 +15,9 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
-
-#include <asm/mach/irq.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/gpio-davinci.h>
struct davinci_gpio_regs {
u32 dir;
@@ -31,13 +32,14 @@ struct davinci_gpio_regs {
u32 intstat;
};
+#define BINTEN 0x8 /* GPIO Interrupt Per-Bank Enable Register */
+
#define chip2controller(chip) \
container_of(chip, struct davinci_gpio_controller, chip)
-static struct davinci_gpio_controller chips[DIV_ROUND_UP(DAVINCI_N_GPIO, 32)];
static void __iomem *gpio_base;
-static struct davinci_gpio_regs __iomem __init *gpio2regs(unsigned gpio)
+static struct davinci_gpio_regs __iomem *gpio2regs(unsigned gpio)
{
void __iomem *ptr;
@@ -65,7 +67,7 @@ static inline struct davinci_gpio_regs __iomem *irq2regs(int irq)
return g;
}
-static int __init davinci_gpio_irq_setup(void);
+static int davinci_gpio_irq_setup(struct platform_device *pdev);
/*--------------------------------------------------------------------------*/
@@ -131,33 +133,53 @@ davinci_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
__raw_writel((1 << offset), value ? &g->set_data : &g->clr_data);
}
-static int __init davinci_gpio_setup(void)
+static int davinci_gpio_probe(struct platform_device *pdev)
{
int i, base;
unsigned ngpio;
- struct davinci_soc_info *soc_info = &davinci_soc_info;
- struct davinci_gpio_regs *regs;
-
- if (soc_info->gpio_type != GPIO_TYPE_DAVINCI)
- return 0;
+ struct davinci_gpio_controller *chips;
+ struct davinci_gpio_platform_data *pdata;
+ struct davinci_gpio_regs __iomem *regs;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+
+ pdata = dev->platform_data;
+ if (!pdata) {
+ dev_err(dev, "No platform data found\n");
+ return -EINVAL;
+ }
/*
* The gpio banks conceptually expose a segmented bitmap,
* and "ngpio" is one more than the largest zero-based
* bit index that's valid.
*/
- ngpio = soc_info->gpio_num;
+ ngpio = pdata->ngpio;
if (ngpio == 0) {
- pr_err("GPIO setup: how many GPIOs?\n");
+ dev_err(dev, "How many GPIOs?\n");
return -EINVAL;
}
if (WARN_ON(DAVINCI_N_GPIO < ngpio))
ngpio = DAVINCI_N_GPIO;
- gpio_base = ioremap(soc_info->gpio_base, SZ_4K);
- if (WARN_ON(!gpio_base))
+ chips = devm_kzalloc(dev,
+ ngpio * sizeof(struct davinci_gpio_controller),
+ GFP_KERNEL);
+ if (!chips) {
+ dev_err(dev, "Memory allocation failed\n");
return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Invalid memory resource\n");
+ return -EBUSY;
+ }
+
+ gpio_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(gpio_base))
+ return PTR_ERR(gpio_base);
for (i = 0, base = 0; base < ngpio; i++, base += 32) {
chips[i].chip.label = "DaVinci";
@@ -183,13 +205,10 @@ static int __init davinci_gpio_setup(void)
gpiochip_add(&chips[i].chip);
}
- soc_info->gpio_ctlrs = chips;
- soc_info->gpio_ctlrs_num = DIV_ROUND_UP(ngpio, 32);
-
- davinci_gpio_irq_setup();
+ platform_set_drvdata(pdev, chips);
+ davinci_gpio_irq_setup(pdev);
return 0;
}
-pure_initcall(davinci_gpio_setup);
/*--------------------------------------------------------------------------*/
/*
@@ -302,13 +321,14 @@ static int gpio_to_irq_banked(struct gpio_chip *chip, unsigned offset)
static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset)
{
- struct davinci_soc_info *soc_info = &davinci_soc_info;
+ struct davinci_gpio_controller *d = chip2controller(chip);
- /* NOTE: we assume for now that only irqs in the first gpio_chip
+ /*
+ * NOTE: we assume for now that only irqs in the first gpio_chip
* can provide direct-mapped IRQs to AINTC (up to 32 GPIOs).
*/
- if (offset < soc_info->gpio_unbanked)
- return soc_info->gpio_irq + offset;
+ if (offset < d->irq_base)
+ return d->gpio_irq + offset;
else
return -ENODEV;
}
@@ -317,12 +337,11 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger)
{
struct davinci_gpio_controller *d;
struct davinci_gpio_regs __iomem *g;
- struct davinci_soc_info *soc_info = &davinci_soc_info;
u32 mask;
d = (struct davinci_gpio_controller *)data->handler_data;
g = (struct davinci_gpio_regs __iomem *)d->regs;
- mask = __gpio_mask(data->irq - soc_info->gpio_irq);
+ mask = __gpio_mask(data->irq - d->gpio_irq);
if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
return -EINVAL;
@@ -343,24 +362,33 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger)
* (dm6446) can be set appropriately for GPIOV33 pins.
*/
-static int __init davinci_gpio_irq_setup(void)
+static int davinci_gpio_irq_setup(struct platform_device *pdev)
{
unsigned gpio, irq, bank;
struct clk *clk;
u32 binten = 0;
unsigned ngpio, bank_irq;
- struct davinci_soc_info *soc_info = &davinci_soc_info;
- struct davinci_gpio_regs __iomem *g;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct davinci_gpio_controller *chips = platform_get_drvdata(pdev);
+ struct davinci_gpio_platform_data *pdata = dev->platform_data;
+ struct davinci_gpio_regs __iomem *g;
- ngpio = soc_info->gpio_num;
+ ngpio = pdata->ngpio;
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "Invalid IRQ resource\n");
+ return -EBUSY;
+ }
- bank_irq = soc_info->gpio_irq;
- if (bank_irq == 0) {
- printk(KERN_ERR "Don't know first GPIO bank IRQ.\n");
- return -EINVAL;
+ bank_irq = res->start;
+
+ if (!bank_irq) {
+ dev_err(dev, "Invalid IRQ resource\n");
+ return -ENODEV;
}
- clk = clk_get(NULL, "gpio");
+ clk = devm_clk_get(dev, "gpio");
if (IS_ERR(clk)) {
printk(KERN_ERR "Error %ld getting gpio clock?\n",
PTR_ERR(clk));
@@ -368,16 +396,17 @@ static int __init davinci_gpio_irq_setup(void)
}
clk_prepare_enable(clk);
- /* Arrange gpio_to_irq() support, handling either direct IRQs or
+ /*
+ * Arrange gpio_to_irq() support, handling either direct IRQs or
* banked IRQs. Having GPIOs in the first GPIO bank use direct
* IRQs, while the others use banked IRQs, would need some setup
* tweaks to recognize hardware which can do that.
*/
for (gpio = 0, bank = 0; gpio < ngpio; bank++, gpio += 32) {
chips[bank].chip.to_irq = gpio_to_irq_banked;
- chips[bank].irq_base = soc_info->gpio_unbanked
+ chips[bank].irq_base = pdata->gpio_unbanked
? -EINVAL
- : (soc_info->intc_irq_num + gpio);
+ : (pdata->intc_irq_num + gpio);
}
/*
@@ -385,7 +414,7 @@ static int __init davinci_gpio_irq_setup(void)
* controller only handling trigger modes. We currently assume no
* IRQ mux conflicts; gpio_irq_type_unbanked() is only for GPIOs.
*/
- if (soc_info->gpio_unbanked) {
+ if (pdata->gpio_unbanked) {
static struct irq_chip_type gpio_unbanked;
/* pass "bank 0" GPIO IRQs to AINTC */
@@ -405,7 +434,7 @@ static int __init davinci_gpio_irq_setup(void)
__raw_writel(~0, &g->set_rising);
/* set the direct IRQs up to use that irqchip */
- for (gpio = 0; gpio < soc_info->gpio_unbanked; gpio++, irq++) {
+ for (gpio = 0; gpio < pdata->gpio_unbanked; gpio++, irq++) {
irq_set_chip(irq, &gpio_unbanked.chip);
irq_set_handler_data(irq, &chips[gpio / 32]);
irq_set_status_flags(irq, IRQ_TYPE_EDGE_BOTH);
@@ -450,12 +479,31 @@ static int __init davinci_gpio_irq_setup(void)
}
done:
- /* BINTEN -- per-bank interrupt enable. genirq would also let these
+ /*
+ * BINTEN -- per-bank interrupt enable. genirq would also let these
* bits be set/cleared dynamically.
*/
- __raw_writel(binten, gpio_base + 0x08);
+ __raw_writel(binten, gpio_base + BINTEN);
printk(KERN_INFO "DaVinci: %d gpio irqs\n", irq - gpio_to_irq(0));
return 0;
}
+
+static struct platform_driver davinci_gpio_driver = {
+ .probe = davinci_gpio_probe,
+ .driver = {
+ .name = "davinci_gpio",
+ .owner = THIS_MODULE,
+ },
+};
+
+/**
+ * GPIO driver registration needs to be done before machine_init functions
+ * access GPIO. Hence davinci_gpio_drv_reg() is a postcore_initcall.
+ */
+static int __init davinci_gpio_drv_reg(void)
+{
+ return platform_driver_register(&davinci_gpio_driver);
+}
+postcore_initcall(davinci_gpio_drv_reg);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index a0b33a216d4a..de9630b08b99 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -14,6 +14,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/irq.h>
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 8ea3b33d4b40..a90be34e4d5c 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -10,7 +10,7 @@
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/module.h>
-
+#include <linux/io.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 358a21c2d811..76e02b9460e6 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -1033,7 +1033,7 @@ static int s3c24xx_gpiolib_fbank_to_irq(struct gpio_chip *chip, unsigned offset)
}
#endif
-#ifdef CONFIG_PLAT_S3C64XX
+#ifdef CONFIG_ARCH_S3C64XX
static int s3c64xx_gpiolib_mbank_to_irq(struct gpio_chip *chip, unsigned pin)
{
return pin < 5 ? IRQ_EINT(23) + pin : -ENXIO;
@@ -1174,7 +1174,7 @@ struct samsung_gpio_chip s3c24xx_gpios[] = {
*/
static struct samsung_gpio_chip s3c64xx_gpios_4bit[] = {
-#ifdef CONFIG_PLAT_S3C64XX
+#ifdef CONFIG_ARCH_S3C64XX
{
.chip = {
.base = S3C64XX_GPA(0),
@@ -1227,7 +1227,7 @@ static struct samsung_gpio_chip s3c64xx_gpios_4bit[] = {
};
static struct samsung_gpio_chip s3c64xx_gpios_4bit2[] = {
-#ifdef CONFIG_PLAT_S3C64XX
+#ifdef CONFIG_ARCH_S3C64XX
{
.base = S3C64XX_GPH_BASE + 0x4,
.chip = {
@@ -1257,7 +1257,7 @@ static struct samsung_gpio_chip s3c64xx_gpios_4bit2[] = {
};
static struct samsung_gpio_chip s3c64xx_gpios_2bit[] = {
-#ifdef CONFIG_PLAT_S3C64XX
+#ifdef CONFIG_ARCH_S3C64XX
{
.base = S3C64XX_GPF_BASE,
.config = &samsung_gpio_cfgs[6],
@@ -2082,34 +2082,14 @@ static __init int samsung_gpiolib_init(void)
int i, nr_chips;
int group = 0;
-#if defined(CONFIG_PINCTRL_EXYNOS) || defined(CONFIG_PINCTRL_EXYNOS5440)
/*
- * This gpio driver includes support for device tree support and there
- * are platforms using it. In order to maintain compatibility with those
- * platforms, and to allow non-dt Exynos4210 platforms to use this
- * gpiolib support, a check is added to find out if there is a active
- * pin-controller driver support available. If it is available, this
- * gpiolib support is ignored and the gpiolib support available in
- * pin-controller driver is used. This is a temporary check and will go
- * away when all of the Exynos4210 platforms have switched to using
- * device tree and the pin-ctrl driver.
- */
- struct device_node *pctrl_np;
- static const struct of_device_id exynos_pinctrl_ids[] = {
- { .compatible = "samsung,s3c2412-pinctrl", },
- { .compatible = "samsung,s3c2416-pinctrl", },
- { .compatible = "samsung,s3c2440-pinctrl", },
- { .compatible = "samsung,s3c2450-pinctrl", },
- { .compatible = "samsung,exynos4210-pinctrl", },
- { .compatible = "samsung,exynos4x12-pinctrl", },
- { .compatible = "samsung,exynos5250-pinctrl", },
- { .compatible = "samsung,exynos5440-pinctrl", },
- { }
- };
- for_each_matching_node(pctrl_np, exynos_pinctrl_ids)
- if (pctrl_np && of_device_is_available(pctrl_np))
- return -ENODEV;
-#endif
+ * Currently there are two drivers that can provide GPIO support for
+ * Samsung SoCs. For device tree enabled platforms, the new
+ * pinctrl-samsung driver is used, providing both GPIO and pin control
+ * interfaces. For legacy (non-DT) platforms this driver is used.
+ */
+ if (of_have_populated_dt())
+ return -ENODEV;
samsung_gpiolib_set_cfg(samsung_gpio_cfgs, ARRAY_SIZE(samsung_gpio_cfgs));
diff --git a/drivers/gpio/gpio-tnetv107x.c b/drivers/gpio/gpio-tnetv107x.c
index 3fa3e2867e19..58445bb69106 100644
--- a/drivers/gpio/gpio-tnetv107x.c
+++ b/drivers/gpio/gpio-tnetv107x.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/gpio.h>
+#include <linux/platform_data/gpio-davinci.h>
#include <mach/common.h>
#include <mach/tnetv107x.h>
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 5c1ef2b3ef18..f2beb728ed8f 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -73,15 +73,8 @@ static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
static irqreturn_t acpi_gpio_irq_handler_evt(int irq, void *data)
{
struct acpi_gpio_evt_pin *evt_pin = data;
- struct acpi_object_list args;
- union acpi_object arg;
- arg.type = ACPI_TYPE_INTEGER;
- arg.integer.value = evt_pin->pin;
- args.count = 1;
- args.pointer = &arg;
-
- acpi_evaluate_object(evt_pin->evt_handle, NULL, &args, NULL);
+ acpi_execute_simple_method(evt_pin->evt_handle, NULL, evt_pin->pin);
return IRQ_HANDLED;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 0dee0e0c247a..dadbac277267 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -408,7 +408,7 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (!value_sd) {
- value_sd = sysfs_get_dirent(dev->kobj.sd, NULL, "value");
+ value_sd = sysfs_get_dirent(dev->kobj.sd, "value");
if (!value_sd) {
ret = -ENODEV;
goto err_out;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 955555d6ec88..f86427591167 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -29,11 +29,17 @@ config DRM_USB
config DRM_KMS_HELPER
tristate
depends on DRM
+ help
+ CRTC helpers for KMS drivers.
+
+config DRM_KMS_FB_HELPER
+ bool
+ depends on DRM_KMS_HELPER
select FB
select FRAMEBUFFER_CONSOLE if !EXPERT
select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
help
- FB and CRTC helpers for KMS drivers.
+ FBDEV helpers for KMS drivers.
config DRM_LOAD_EDID_FIRMWARE
bool "Allow to specify an EDID data set instead of probing for it"
@@ -64,6 +70,7 @@ config DRM_GEM_CMA_HELPER
config DRM_KMS_CMA_HELPER
bool
select DRM_GEM_CMA_HELPER
+ select DRM_KMS_FB_HELPER
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
@@ -96,6 +103,7 @@ config DRM_RADEON
select FB_CFB_IMAGEBLIT
select FW_LOADER
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select DRM_TTM
select POWER_SUPPLY
select HWMON
@@ -120,64 +128,7 @@ config DRM_I810
selected, the module will be called i810. AGP support is required
for this driver to work.
-config DRM_I915
- tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
- depends on DRM
- depends on AGP
- depends on AGP_INTEL
- # we need shmfs for the swappable backing store, and in particular
- # the shmem_readpage() which depends upon tmpfs
- select SHMEM
- select TMPFS
- select DRM_KMS_HELPER
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- # i915 depends on ACPI_VIDEO when ACPI is enabled
- # but for select to work, need to select ACPI_VIDEO's dependencies, ick
- select BACKLIGHT_LCD_SUPPORT if ACPI
- select BACKLIGHT_CLASS_DEVICE if ACPI
- select VIDEO_OUTPUT_CONTROL if ACPI
- select INPUT if ACPI
- select THERMAL if ACPI
- select ACPI_VIDEO if ACPI
- select ACPI_BUTTON if ACPI
- help
- Choose this option if you have a system that has "Intel Graphics
- Media Accelerator" or "HD Graphics" integrated graphics,
- including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
- G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
- Core i5, Core i7 as well as Atom CPUs with integrated graphics.
- If M is selected, the module will be called i915. AGP support
- is required for this driver to work. This driver is used by
- the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
- replaces the older i830 module that supported a subset of the
- hardware in older X.org releases.
-
- Note that the older i810/i815 chipsets require the use of the
- i810 driver instead, and the Atom z5xx series has an entirely
- different implementation.
-
-config DRM_I915_KMS
- bool "Enable modesetting on intel by default"
- depends on DRM_I915
- help
- Choose this option if you want kernel modesetting enabled by default,
- and you have a new enough userspace to support this. Running old
- userspaces with this enabled will cause pain. Note that this causes
- the driver to bind to PCI devices, which precludes loading things
- like intelfb.
-
-config DRM_I915_PRELIMINARY_HW_SUPPORT
- bool "Enable preliminary support for prerelease Intel hardware by default"
- depends on DRM_I915
- help
- Choose this option if you have prerelease Intel hardware and want the
- i915 driver to support it by default. You can enable such support at
- runtime with the module option i915.preliminary_hw_support=1; this
- option changes the default for that module option.
-
- If in doubt, say "N".
+source "drivers/gpu/drm/i915/Kconfig"
config DRM_MGA
tristate "Matrox g200/g400"
@@ -225,6 +176,8 @@ source "drivers/gpu/drm/mgag200/Kconfig"
source "drivers/gpu/drm/cirrus/Kconfig"
+source "drivers/gpu/drm/armada/Kconfig"
+
source "drivers/gpu/drm/rcar-du/Kconfig"
source "drivers/gpu/drm/shmobile/Kconfig"
@@ -236,3 +189,5 @@ source "drivers/gpu/drm/tilcdc/Kconfig"
source "drivers/gpu/drm/qxl/Kconfig"
source "drivers/gpu/drm/msm/Kconfig"
+
+source "drivers/gpu/drm/tegra/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index f089adfe70ee..cc08b845f965 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -21,8 +21,9 @@ drm-$(CONFIG_PCI) += ati_pcigart.o
drm-usb-y := drm_usb.o
-drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o
+drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
+drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@@ -49,10 +50,12 @@ obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/
+obj-$(CONFIG_DRM_ARMADA) += armada/
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
obj-$(CONFIG_DRM_OMAP) += omapdrm/
obj-$(CONFIG_DRM_TILCDC) += tilcdc/
obj-$(CONFIG_DRM_QXL) += qxl/
obj-$(CONFIG_DRM_MSM) += msm/
+obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-y += i2c/
diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig
new file mode 100644
index 000000000000..40d371521fe1
--- /dev/null
+++ b/drivers/gpu/drm/armada/Kconfig
@@ -0,0 +1,24 @@
+config DRM_ARMADA
+ tristate "DRM support for Marvell Armada SoCs"
+ depends on DRM && HAVE_CLK && ARM
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select DRM_KMS_HELPER
+ help
+ Support the "LCD" controllers found on the Marvell Armada 510
+ devices. There are two controllers on the device, each controller
+ supports graphics and video overlays.
+
+ This driver provides no built-in acceleration; acceleration is
+ performed by other IP found on the SoC. This driver provides
+ kernel mode setting and buffer management to userspace.
+
+config DRM_ARMADA_TDA1998X
+ bool "Support TDA1998X HDMI output"
+ depends on DRM_ARMADA != n
+ depends on I2C && DRM_I2C_NXP_TDA998X = y
+ default y
+ help
+ Support the TDA1998x HDMI output device found on the Solid-Run
+ CuBox.
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
new file mode 100644
index 000000000000..d6f43e06150a
--- /dev/null
+++ b/drivers/gpu/drm/armada/Makefile
@@ -0,0 +1,7 @@
+armada-y := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
+ armada_gem.o armada_output.o armada_overlay.o \
+ armada_slave.o
+armada-y += armada_510.o
+armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
+
+obj-$(CONFIG_DRM_ARMADA) := armada.o
diff --git a/drivers/gpu/drm/armada/armada_510.c b/drivers/gpu/drm/armada/armada_510.c
new file mode 100644
index 000000000000..59948eff6095
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_510.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Armada 510 (aka Dove) variant support
+ */
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_hw.h"
+
+static int armada510_init(struct armada_private *priv, struct device *dev)
+{
+ priv->extclk[0] = devm_clk_get(dev, "ext_ref_clk_1");
+
+ if (IS_ERR(priv->extclk[0]) && PTR_ERR(priv->extclk[0]) == -ENOENT)
+ priv->extclk[0] = ERR_PTR(-EPROBE_DEFER);
+
+ return PTR_RET(priv->extclk[0]);
+}
+
+static int armada510_crtc_init(struct armada_crtc *dcrtc)
+{
+ /* Lower the watermark so to eliminate jitter at higher bandwidths */
+ armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F);
+ return 0;
+}
+
+/*
+ * Armada510 specific SCLK register selection.
+ * This gets called with sclk = NULL to test whether the mode is
+ * supportable, and again with sclk != NULL to set the clocks up for
+ * that. The former can return an error, but the latter is expected
+ * not to.
+ *
+ * We currently are pretty rudimentary here, always selecting
+ * EXT_REF_CLK_1 for LCD0 and erroring LCD1. This needs improvement!
+ */
+static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
+ const struct drm_display_mode *mode, uint32_t *sclk)
+{
+ struct armada_private *priv = dcrtc->crtc.dev->dev_private;
+ struct clk *clk = priv->extclk[0];
+ int ret;
+
+ if (dcrtc->num == 1)
+ return -EINVAL;
+
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ if (dcrtc->clk != clk) {
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+ dcrtc->clk = clk;
+ }
+
+ if (sclk) {
+ uint32_t rate, ref, div;
+
+ rate = mode->clock * 1000;
+ ref = clk_round_rate(clk, rate);
+ div = DIV_ROUND_UP(ref, rate);
+ if (div < 1)
+ div = 1;
+
+ clk_set_rate(clk, ref);
+ *sclk = div | SCLK_510_EXTCLK1;
+ }
+
+ return 0;
+}
+
+const struct armada_variant armada510_ops = {
+ .has_spu_adv_reg = true,
+ .spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
+ .init = armada510_init,
+ .crtc_init = armada510_crtc_init,
+ .crtc_compute_clock = armada510_crtc_compute_clock,
+};
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
new file mode 100644
index 000000000000..d8e398275ca8
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -0,0 +1,1098 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+
+struct armada_frame_work {
+ struct drm_pending_vblank_event *event;
+ struct armada_regs regs[4];
+ struct drm_framebuffer *old_fb;
+};
+
+enum csc_mode {
+ CSC_AUTO = 0,
+ CSC_YUV_CCIR601 = 1,
+ CSC_YUV_CCIR709 = 2,
+ CSC_RGB_COMPUTER = 1,
+ CSC_RGB_STUDIO = 2,
+};
+
+/*
+ * A note about interlacing. Let's consider HDMI 1920x1080i.
+ * The timing parameters we have from X are:
+ * Hact HsyA HsyI Htot Vact VsyA VsyI Vtot
+ * 1920 2448 2492 2640 1080 1084 1094 1125
+ * Which get translated to:
+ * Hact HsyA HsyI Htot Vact VsyA VsyI Vtot
+ * 1920 2448 2492 2640 540 542 547 562
+ *
+ * This is how it is defined by CEA-861-D - line and pixel numbers are
+ * referenced to the rising edge of VSYNC and HSYNC. Total clocks per
+ * line: 2640. The odd frame, the first active line is at line 21, and
+ * the even frame, the first active line is 584.
+ *
+ * LN: 560 561 562 563 567 568 569
+ * DE: ~~~|____________________________//__________________________
+ * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
+ * VSYNC: _________________________|~~~~~~//~~~~~~~~~~~~~~~|__________
+ * 22 blanking lines. VSYNC at 1320 (referenced to the HSYNC rising edge).
+ *
+ * LN: 1123 1124 1125 1 5 6 7
+ * DE: ~~~|____________________________//__________________________
+ * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
+ * VSYNC: ____________________|~~~~~~~~~~~//~~~~~~~~~~|_______________
+ * 23 blanking lines
+ *
+ * The Armada LCD Controller line and pixel numbers are, like X timings,
+ * referenced to the top left of the active frame.
+ *
+ * So, translating these to our LCD controller:
+ * Odd frame, 563 total lines, VSYNC at line 543-548, pixel 1128.
+ * Even frame, 562 total lines, VSYNC at line 542-547, pixel 2448.
+ * Note: Vsync front porch remains constant!
+ *
+ * if (odd_frame) {
+ * vtotal = mode->crtc_vtotal + 1;
+ * vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay + 1;
+ * vhorizpos = mode->crtc_hsync_start - mode->crtc_htotal / 2
+ * } else {
+ * vtotal = mode->crtc_vtotal;
+ * vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay;
+ * vhorizpos = mode->crtc_hsync_start;
+ * }
+ * vfrontporch = mode->crtc_vtotal - mode->crtc_vsync_end;
+ *
+ * So, we need to reprogram these registers on each vsync event:
+ * LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
+ *
+ * Note: we do not use the frame done interrupts because these appear
+ * to happen too early, and lead to jitter on the display (presumably
+ * they occur at the end of the last active line, before the vsync back
+ * porch, which we're reprogramming.)
+ */
+
+void
+armada_drm_crtc_update_regs(struct armada_crtc *dcrtc, struct armada_regs *regs)
+{
+ while (regs->offset != ~0) {
+ void __iomem *reg = dcrtc->base + regs->offset;
+ uint32_t val;
+
+ val = regs->mask;
+ if (val != 0)
+ val &= readl_relaxed(reg);
+ writel_relaxed(val | regs->val, reg);
+ ++regs;
+ }
+}
+
+#define dpms_blanked(dpms) ((dpms) != DRM_MODE_DPMS_ON)
+
+static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
+{
+ uint32_t dumb_ctrl;
+
+ dumb_ctrl = dcrtc->cfg_dumb_ctrl;
+
+ if (!dpms_blanked(dcrtc->dpms))
+ dumb_ctrl |= CFG_DUMB_ENA;
+
+ /*
+ * When the dumb interface isn't in DUMB24_RGB888_0 mode, it might
+ * be using SPI or GPIO. If we set this to DUMB_BLANK, we will
+ * force LCD_D[23:0] to output blank color, overriding the GPIO or
+ * SPI usage. So leave it as-is unless in DUMB24_RGB888_0 mode.
+ */
+ if (dpms_blanked(dcrtc->dpms) &&
+ (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) {
+ dumb_ctrl &= ~DUMB_MASK;
+ dumb_ctrl |= DUMB_BLANK;
+ }
+
+ /*
+ * The documentation doesn't indicate what the normal state of
+ * the sync signals are. Sebastian Hesselbart kindly probed
+ * these signals on his board to determine their state.
+ *
+ * The non-inverted state of the sync signals is active high.
+ * Setting these bits makes the appropriate signal active low.
+ */
+ if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NCSYNC)
+ dumb_ctrl |= CFG_INV_CSYNC;
+ if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NHSYNC)
+ dumb_ctrl |= CFG_INV_HSYNC;
+ if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NVSYNC)
+ dumb_ctrl |= CFG_INV_VSYNC;
+
+ if (dcrtc->dumb_ctrl != dumb_ctrl) {
+ dcrtc->dumb_ctrl = dumb_ctrl;
+ writel_relaxed(dumb_ctrl, dcrtc->base + LCD_SPU_DUMB_CTRL);
+ }
+}
+
+static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
+ int x, int y, struct armada_regs *regs, bool interlaced)
+{
+ struct armada_gem_object *obj = drm_fb_obj(fb);
+ unsigned pitch = fb->pitches[0];
+ unsigned offset = y * pitch + x * fb->bits_per_pixel / 8;
+ uint32_t addr_odd, addr_even;
+ unsigned i = 0;
+
+ DRM_DEBUG_DRIVER("pitch %u x %d y %d bpp %d\n",
+ pitch, x, y, fb->bits_per_pixel);
+
+ addr_odd = addr_even = obj->dev_addr + offset;
+
+ if (interlaced) {
+ addr_even += pitch;
+ pitch *= 2;
+ }
+
+ /* write offset, base, and pitch */
+ armada_reg_queue_set(regs, i, addr_odd, LCD_CFG_GRA_START_ADDR0);
+ armada_reg_queue_set(regs, i, addr_even, LCD_CFG_GRA_START_ADDR1);
+ armada_reg_queue_mod(regs, i, pitch, 0xffff, LCD_CFG_GRA_PITCH);
+
+ return i;
+}
+
+static int armada_drm_crtc_queue_frame_work(struct armada_crtc *dcrtc,
+ struct armada_frame_work *work)
+{
+ struct drm_device *dev = dcrtc->crtc.dev;
+ unsigned long flags;
+ int ret;
+
+ ret = drm_vblank_get(dev, dcrtc->num);
+ if (ret) {
+ DRM_ERROR("failed to acquire vblank counter\n");
+ return ret;
+ }
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (!dcrtc->frame_work)
+ dcrtc->frame_work = work;
+ else
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (ret)
+ drm_vblank_put(dev, dcrtc->num);
+
+ return ret;
+}
+
+static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc)
+{
+ struct drm_device *dev = dcrtc->crtc.dev;
+ struct armada_frame_work *work = dcrtc->frame_work;
+
+ dcrtc->frame_work = NULL;
+
+ armada_drm_crtc_update_regs(dcrtc, work->regs);
+
+ if (work->event)
+ drm_send_vblank_event(dev, dcrtc->num, work->event);
+
+ drm_vblank_put(dev, dcrtc->num);
+
+ /* Finally, queue the process-half of the cleanup. */
+ __armada_drm_queue_unref_work(dcrtc->crtc.dev, work->old_fb);
+ kfree(work);
+}
+
+static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
+ struct drm_framebuffer *fb, bool force)
+{
+ struct armada_frame_work *work;
+
+ if (!fb)
+ return;
+
+ if (force) {
+ /* Display is disabled, so just drop the old fb */
+ drm_framebuffer_unreference(fb);
+ return;
+ }
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (work) {
+ int i = 0;
+ work->event = NULL;
+ work->old_fb = fb;
+ armada_reg_queue_end(work->regs, i);
+
+ if (armada_drm_crtc_queue_frame_work(dcrtc, work) == 0)
+ return;
+
+ kfree(work);
+ }
+
+ /*
+ * Oops - just drop the reference immediately and hope for
+ * the best. The worst that will happen is the buffer gets
+ * reused before it has finished being displayed.
+ */
+ drm_framebuffer_unreference(fb);
+}
+
+static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
+{
+ struct drm_device *dev = dcrtc->crtc.dev;
+
+ /*
+ * Tell the DRM core that vblank IRQs aren't going to happen for
+ * a while. This cleans up any pending vblank events for us.
+ */
+ drm_vblank_off(dev, dcrtc->num);
+
+ /* Handle any pending flip event. */
+ spin_lock_irq(&dev->event_lock);
+ if (dcrtc->frame_work)
+ armada_drm_crtc_complete_frame_work(dcrtc);
+ spin_unlock_irq(&dev->event_lock);
+}
+
+void armada_drm_crtc_gamma_set(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
+ int idx)
+{
+}
+
+void armada_drm_crtc_gamma_get(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+ int idx)
+{
+}
+
+/* The mode_config.mutex will be held for this call */
+static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+ if (dcrtc->dpms != dpms) {
+ dcrtc->dpms = dpms;
+ armada_drm_crtc_update(dcrtc);
+ if (dpms_blanked(dpms))
+ armada_drm_vblank_off(dcrtc);
+ }
+}
+
+/*
+ * Prepare for a mode set. Turn off overlay to ensure that we don't end
+ * up with the overlay size being bigger than the active screen size.
+ * We rely upon X refreshing this state after the mode set has completed.
+ *
+ * The mode_config.mutex will be held for this call
+ */
+static void armada_drm_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct drm_plane *plane;
+
+ /*
+ * If we have an overlay plane associated with this CRTC, disable
+ * it before the modeset to avoid its coordinates being outside
+ * the new mode parameters. DRM doesn't provide help with this.
+ */
+ plane = dcrtc->plane;
+ if (plane) {
+ struct drm_framebuffer *fb = plane->fb;
+
+ plane->funcs->disable_plane(plane);
+ plane->fb = NULL;
+ plane->crtc = NULL;
+ drm_framebuffer_unreference(fb);
+ }
+}
+
+/* The mode_config.mutex will be held for this call */
+static void armada_drm_crtc_commit(struct drm_crtc *crtc)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+ if (dcrtc->dpms != DRM_MODE_DPMS_ON) {
+ dcrtc->dpms = DRM_MODE_DPMS_ON;
+ armada_drm_crtc_update(dcrtc);
+ }
+}
+
+/* The mode_config.mutex will be held for this call */
+static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode, struct drm_display_mode *adj)
+{
+ struct armada_private *priv = crtc->dev->dev_private;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ int ret;
+
+ /* We can't do interlaced modes if we don't have the SPU_ADV_REG */
+ if (!priv->variant->has_spu_adv_reg &&
+ adj->flags & DRM_MODE_FLAG_INTERLACE)
+ return false;
+
+ /* Check whether the display mode is possible */
+ ret = priv->variant->crtc_compute_clock(dcrtc, adj, NULL);
+ if (ret)
+ return false;
+
+ return true;
+}
+
+void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
+{
+ struct armada_vbl_event *e, *n;
+ void __iomem *base = dcrtc->base;
+
+ if (stat & DMA_FF_UNDERFLOW)
+ DRM_ERROR("video underflow on crtc %u\n", dcrtc->num);
+ if (stat & GRA_FF_UNDERFLOW)
+ DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num);
+
+ if (stat & VSYNC_IRQ)
+ drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num);
+
+ spin_lock(&dcrtc->irq_lock);
+
+ list_for_each_entry_safe(e, n, &dcrtc->vbl_list, node) {
+ list_del_init(&e->node);
+ drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+ e->fn(dcrtc, e->data);
+ }
+
+ if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
+ int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
+ uint32_t val;
+
+ writel_relaxed(dcrtc->v[i].spu_v_porch, base + LCD_SPU_V_PORCH);
+ writel_relaxed(dcrtc->v[i].spu_v_h_total,
+ base + LCD_SPUT_V_H_TOTAL);
+
+ val = readl_relaxed(base + LCD_SPU_ADV_REG);
+ val &= ~(ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF | ADV_VSYNCOFFEN);
+ val |= dcrtc->v[i].spu_adv_reg;
+ writel_relaxed(val, base + LCD_SPU_ADV_REG);
+ }
+
+ if (stat & DUMB_FRAMEDONE && dcrtc->cursor_update) {
+ writel_relaxed(dcrtc->cursor_hw_pos,
+ base + LCD_SPU_HWC_OVSA_HPXL_VLN);
+ writel_relaxed(dcrtc->cursor_hw_sz,
+ base + LCD_SPU_HWC_HPXL_VLN);
+ armada_updatel(CFG_HWC_ENA,
+ CFG_HWC_ENA | CFG_HWC_1BITMOD | CFG_HWC_1BITENA,
+ base + LCD_SPU_DMA_CTRL0);
+ dcrtc->cursor_update = false;
+ armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+ }
+
+ spin_unlock(&dcrtc->irq_lock);
+
+ if (stat & GRA_FRAME_IRQ) {
+ struct drm_device *dev = dcrtc->crtc.dev;
+
+ spin_lock(&dev->event_lock);
+ if (dcrtc->frame_work)
+ armada_drm_crtc_complete_frame_work(dcrtc);
+ spin_unlock(&dev->event_lock);
+
+ wake_up(&dcrtc->frame_wait);
+ }
+}
+
+/* These are locked by dev->vbl_lock */
+void armada_drm_crtc_disable_irq(struct armada_crtc *dcrtc, u32 mask)
+{
+ if (dcrtc->irq_ena & mask) {
+ dcrtc->irq_ena &= ~mask;
+ writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+ }
+}
+
+void armada_drm_crtc_enable_irq(struct armada_crtc *dcrtc, u32 mask)
+{
+ if ((dcrtc->irq_ena & mask) != mask) {
+ dcrtc->irq_ena |= mask;
+ writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+ if (readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR) & mask)
+ writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+ }
+}
+
+static uint32_t armada_drm_crtc_calculate_csc(struct armada_crtc *dcrtc)
+{
+ struct drm_display_mode *adj = &dcrtc->crtc.mode;
+ uint32_t val = 0;
+
+ if (dcrtc->csc_yuv_mode == CSC_YUV_CCIR709)
+ val |= CFG_CSC_YUV_CCIR709;
+ if (dcrtc->csc_rgb_mode == CSC_RGB_STUDIO)
+ val |= CFG_CSC_RGB_STUDIO;
+
+ /*
+ * In auto mode, set the colorimetry, based upon the HDMI spec.
+ * 1280x720p, 1920x1080p and 1920x1080i use ITU709, others use
+ * ITU601. It may be more appropriate to set this depending on
+ * the source - but what if the graphic frame is YUV and the
+ * video frame is RGB?
+ */
+ if ((adj->hdisplay == 1280 && adj->vdisplay == 720 &&
+ !(adj->flags & DRM_MODE_FLAG_INTERLACE)) ||
+ (adj->hdisplay == 1920 && adj->vdisplay == 1080)) {
+ if (dcrtc->csc_yuv_mode == CSC_AUTO)
+ val |= CFG_CSC_YUV_CCIR709;
+ }
+
+ /*
+ * We assume we're connected to a TV-like device, so the YUV->RGB
+ * conversion should produce a limited range. We should set this
+ * depending on the connectors attached to this CRTC, and what
+ * kind of device they report being connected.
+ */
+ if (dcrtc->csc_rgb_mode == CSC_AUTO)
+ val |= CFG_CSC_RGB_STUDIO;
+
+ return val;
+}
+
+/* The mode_config.mutex will be held for this call */
+static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode, struct drm_display_mode *adj,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct armada_private *priv = crtc->dev->dev_private;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_regs regs[17];
+ uint32_t lm, rm, tm, bm, val, sclk;
+ unsigned long flags;
+ unsigned i;
+ bool interlaced;
+
+ drm_framebuffer_reference(crtc->fb);
+
+ interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
+
+ i = armada_drm_crtc_calc_fb(dcrtc->crtc.fb, x, y, regs, interlaced);
+
+ rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
+ lm = adj->crtc_htotal - adj->crtc_hsync_end;
+ bm = adj->crtc_vsync_start - adj->crtc_vdisplay;
+ tm = adj->crtc_vtotal - adj->crtc_vsync_end;
+
+ DRM_DEBUG_DRIVER("H: %d %d %d %d lm %d rm %d\n",
+ adj->crtc_hdisplay,
+ adj->crtc_hsync_start,
+ adj->crtc_hsync_end,
+ adj->crtc_htotal, lm, rm);
+ DRM_DEBUG_DRIVER("V: %d %d %d %d tm %d bm %d\n",
+ adj->crtc_vdisplay,
+ adj->crtc_vsync_start,
+ adj->crtc_vsync_end,
+ adj->crtc_vtotal, tm, bm);
+
+ /* Wait for pending flips to complete */
+ wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
+
+ drm_vblank_pre_modeset(crtc->dev, dcrtc->num);
+
+ crtc->mode = *adj;
+
+ val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
+ if (val != dcrtc->dumb_ctrl) {
+ dcrtc->dumb_ctrl = val;
+ writel_relaxed(val, dcrtc->base + LCD_SPU_DUMB_CTRL);
+ }
+
+ /* Now compute the divider for real */
+ priv->variant->crtc_compute_clock(dcrtc, adj, &sclk);
+
+ /* Ensure graphic fifo is enabled */
+ armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
+ armada_reg_queue_set(regs, i, sclk, LCD_CFG_SCLK_DIV);
+
+ if (interlaced ^ dcrtc->interlaced) {
+ if (adj->flags & DRM_MODE_FLAG_INTERLACE)
+ drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+ else
+ drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+ dcrtc->interlaced = interlaced;
+ }
+
+ spin_lock_irqsave(&dcrtc->irq_lock, flags);
+
+ /* Even interlaced/progressive frame */
+ dcrtc->v[1].spu_v_h_total = adj->crtc_vtotal << 16 |
+ adj->crtc_htotal;
+ dcrtc->v[1].spu_v_porch = tm << 16 | bm;
+ val = adj->crtc_hsync_start;
+ dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
+ priv->variant->spu_adv_reg;
+
+ if (interlaced) {
+ /* Odd interlaced frame */
+ dcrtc->v[0].spu_v_h_total = dcrtc->v[1].spu_v_h_total +
+ (1 << 16);
+ dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
+ val = adj->crtc_hsync_start - adj->crtc_htotal / 2;
+ dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
+ priv->variant->spu_adv_reg;
+ } else {
+ dcrtc->v[0] = dcrtc->v[1];
+ }
+
+ val = adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
+
+ armada_reg_queue_set(regs, i, val, LCD_SPU_V_H_ACTIVE);
+ armada_reg_queue_set(regs, i, val, LCD_SPU_GRA_HPXL_VLN);
+ armada_reg_queue_set(regs, i, val, LCD_SPU_GZM_HPXL_VLN);
+ armada_reg_queue_set(regs, i, (lm << 16) | rm, LCD_SPU_H_PORCH);
+ armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_porch, LCD_SPU_V_PORCH);
+ armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
+ LCD_SPUT_V_H_TOTAL);
+
+ if (priv->variant->has_spu_adv_reg) {
+ armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
+ ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
+ ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
+ }
+
+ val = CFG_GRA_ENA | CFG_GRA_HSMOOTH;
+ val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.fb)->fmt);
+ val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.fb)->mod);
+
+ if (drm_fb_to_armada_fb(dcrtc->crtc.fb)->fmt > CFG_420)
+ val |= CFG_PALETTE_ENA;
+
+ if (interlaced)
+ val |= CFG_GRA_FTOGGLE;
+
+ armada_reg_queue_mod(regs, i, val, CFG_GRAFORMAT |
+ CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
+ CFG_SWAPYU | CFG_YUV2RGB) |
+ CFG_PALETTE_ENA | CFG_GRA_FTOGGLE,
+ LCD_SPU_DMA_CTRL0);
+
+ val = adj->flags & DRM_MODE_FLAG_NVSYNC ? CFG_VSYNC_INV : 0;
+ armada_reg_queue_mod(regs, i, val, CFG_VSYNC_INV, LCD_SPU_DMA_CTRL1);
+
+ val = dcrtc->spu_iopad_ctrl | armada_drm_crtc_calculate_csc(dcrtc);
+ armada_reg_queue_set(regs, i, val, LCD_SPU_IOPAD_CONTROL);
+ armada_reg_queue_end(regs, i);
+
+ armada_drm_crtc_update_regs(dcrtc, regs);
+ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+
+ armada_drm_crtc_update(dcrtc);
+
+ drm_vblank_post_modeset(crtc->dev, dcrtc->num);
+ armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+
+ return 0;
+}
+
+/* The mode_config.mutex will be held for this call */
+static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_regs regs[4];
+ unsigned i;
+
+ i = armada_drm_crtc_calc_fb(crtc->fb, crtc->x, crtc->y, regs,
+ dcrtc->interlaced);
+ armada_reg_queue_end(regs, i);
+
+ /* Wait for pending flips to complete */
+ wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
+
+ /* Take a reference to the new fb as we're using it */
+ drm_framebuffer_reference(crtc->fb);
+
+ /* Update the base in the CRTC */
+ armada_drm_crtc_update_regs(dcrtc, regs);
+
+ /* Drop our previously held reference */
+ armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+
+ return 0;
+}
+
+static void armada_drm_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+/* The mode_config.mutex will be held for this call */
+static void armada_drm_crtc_disable(struct drm_crtc *crtc)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+ armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ armada_drm_crtc_finish_fb(dcrtc, crtc->fb, true);
+
+ /* Power down most RAMs and FIFOs */
+ writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+ CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
+ CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
+}
+
+static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
+ .dpms = armada_drm_crtc_dpms,
+ .prepare = armada_drm_crtc_prepare,
+ .commit = armada_drm_crtc_commit,
+ .mode_fixup = armada_drm_crtc_mode_fixup,
+ .mode_set = armada_drm_crtc_mode_set,
+ .mode_set_base = armada_drm_crtc_mode_set_base,
+ .load_lut = armada_drm_crtc_load_lut,
+ .disable = armada_drm_crtc_disable,
+};
+
+static void armada_load_cursor_argb(void __iomem *base, uint32_t *pix,
+ unsigned stride, unsigned width, unsigned height)
+{
+ uint32_t addr;
+ unsigned y;
+
+ addr = SRAM_HWC32_RAM1;
+ for (y = 0; y < height; y++) {
+ uint32_t *p = &pix[y * stride];
+ unsigned x;
+
+ for (x = 0; x < width; x++, p++) {
+ uint32_t val = *p;
+
+ val = (val & 0xff00ff00) |
+ (val & 0x000000ff) << 16 |
+ (val & 0x00ff0000) >> 16;
+
+ writel_relaxed(val,
+ base + LCD_SPU_SRAM_WRDAT);
+ writel_relaxed(addr | SRAM_WRITE,
+ base + LCD_SPU_SRAM_CTRL);
+ addr += 1;
+ if ((addr & 0x00ff) == 0)
+ addr += 0xf00;
+ if ((addr & 0x30ff) == 0)
+ addr = SRAM_HWC32_RAM2;
+ }
+ }
+}
+
+static void armada_drm_crtc_cursor_tran(void __iomem *base)
+{
+ unsigned addr;
+
+ for (addr = 0; addr < 256; addr++) {
+ /* write the default value */
+ writel_relaxed(0x55555555, base + LCD_SPU_SRAM_WRDAT);
+ writel_relaxed(addr | SRAM_WRITE | SRAM_HWC32_TRAN,
+ base + LCD_SPU_SRAM_CTRL);
+ }
+}
+
+static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
+{
+ uint32_t xoff, xscr, w = dcrtc->cursor_w, s;
+ uint32_t yoff, yscr, h = dcrtc->cursor_h;
+ uint32_t para1;
+
+ /*
+ * Calculate the visible width and height of the cursor,
+ * screen position, and the position in the cursor bitmap.
+ */
+ if (dcrtc->cursor_x < 0) {
+ xoff = -dcrtc->cursor_x;
+ xscr = 0;
+ w -= min(xoff, w);
+ } else if (dcrtc->cursor_x + w > dcrtc->crtc.mode.hdisplay) {
+ xoff = 0;
+ xscr = dcrtc->cursor_x;
+ w = max_t(int, dcrtc->crtc.mode.hdisplay - dcrtc->cursor_x, 0);
+ } else {
+ xoff = 0;
+ xscr = dcrtc->cursor_x;
+ }
+
+ if (dcrtc->cursor_y < 0) {
+ yoff = -dcrtc->cursor_y;
+ yscr = 0;
+ h -= min(yoff, h);
+ } else if (dcrtc->cursor_y + h > dcrtc->crtc.mode.vdisplay) {
+ yoff = 0;
+ yscr = dcrtc->cursor_y;
+ h = max_t(int, dcrtc->crtc.mode.vdisplay - dcrtc->cursor_y, 0);
+ } else {
+ yoff = 0;
+ yscr = dcrtc->cursor_y;
+ }
+
+ /* On interlaced modes, the vertical cursor size must be halved */
+ s = dcrtc->cursor_w;
+ if (dcrtc->interlaced) {
+ s *= 2;
+ yscr /= 2;
+ h /= 2;
+ }
+
+ if (!dcrtc->cursor_obj || !h || !w) {
+ spin_lock_irq(&dcrtc->irq_lock);
+ armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+ dcrtc->cursor_update = false;
+ armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
+ spin_unlock_irq(&dcrtc->irq_lock);
+ return 0;
+ }
+
+ para1 = readl_relaxed(dcrtc->base + LCD_SPU_SRAM_PARA1);
+ armada_updatel(CFG_CSB_256x32, CFG_CSB_256x32 | CFG_PDWN256x32,
+ dcrtc->base + LCD_SPU_SRAM_PARA1);
+
+ /*
+ * Initialize the transparency if the SRAM was powered down.
+ * We must also reload the cursor data as well.
+ */
+ if (!(para1 & CFG_CSB_256x32)) {
+ armada_drm_crtc_cursor_tran(dcrtc->base);
+ reload = true;
+ }
+
+ if (dcrtc->cursor_hw_sz != (h << 16 | w)) {
+ spin_lock_irq(&dcrtc->irq_lock);
+ armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+ dcrtc->cursor_update = false;
+ armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
+ spin_unlock_irq(&dcrtc->irq_lock);
+ reload = true;
+ }
+ if (reload) {
+ struct armada_gem_object *obj = dcrtc->cursor_obj;
+ uint32_t *pix;
+ /* Set the top-left corner of the cursor image */
+ pix = obj->addr;
+ pix += yoff * s + xoff;
+ armada_load_cursor_argb(dcrtc->base, pix, s, w, h);
+ }
+
+ /* Reload the cursor position, size and enable in the IRQ handler */
+ spin_lock_irq(&dcrtc->irq_lock);
+ dcrtc->cursor_hw_pos = yscr << 16 | xscr;
+ dcrtc->cursor_hw_sz = h << 16 | w;
+ dcrtc->cursor_update = true;
+ armada_drm_crtc_enable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+ spin_unlock_irq(&dcrtc->irq_lock);
+
+ return 0;
+}
+
+static void cursor_update(void *data)
+{
+ armada_drm_crtc_cursor_update(data, true);
+}
+
+static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file, uint32_t handle, uint32_t w, uint32_t h)
+{
+ struct drm_device *dev = crtc->dev;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_private *priv = crtc->dev->dev_private;
+ struct armada_gem_object *obj = NULL;
+ int ret;
+
+ /* If no cursor support, replicate drm's return value */
+ if (!priv->variant->has_spu_adv_reg)
+ return -ENXIO;
+
+ if (handle && w > 0 && h > 0) {
+ /* maximum size is 64x32 or 32x64 */
+ if (w > 64 || h > 64 || (w > 32 && h > 32))
+ return -ENOMEM;
+
+ obj = armada_gem_object_lookup(dev, file, handle);
+ if (!obj)
+ return -ENOENT;
+
+ /* Must be a kernel-mapped object */
+ if (!obj->addr) {
+ drm_gem_object_unreference_unlocked(&obj->obj);
+ return -EINVAL;
+ }
+
+ if (obj->obj.size < w * h * 4) {
+ DRM_ERROR("buffer is too small\n");
+ drm_gem_object_unreference_unlocked(&obj->obj);
+ return -ENOMEM;
+ }
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ if (dcrtc->cursor_obj) {
+ dcrtc->cursor_obj->update = NULL;
+ dcrtc->cursor_obj->update_data = NULL;
+ drm_gem_object_unreference(&dcrtc->cursor_obj->obj);
+ }
+ dcrtc->cursor_obj = obj;
+ dcrtc->cursor_w = w;
+ dcrtc->cursor_h = h;
+ ret = armada_drm_crtc_cursor_update(dcrtc, true);
+ if (obj) {
+ obj->update_data = dcrtc;
+ obj->update = cursor_update;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_private *priv = crtc->dev->dev_private;
+ int ret;
+
+ /* If no cursor support, replicate drm's return value */
+ if (!priv->variant->has_spu_adv_reg)
+ return -EFAULT;
+
+ mutex_lock(&dev->struct_mutex);
+ dcrtc->cursor_x = x;
+ dcrtc->cursor_y = y;
+ ret = armada_drm_crtc_cursor_update(dcrtc, false);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_private *priv = crtc->dev->dev_private;
+
+ if (dcrtc->cursor_obj)
+ drm_gem_object_unreference(&dcrtc->cursor_obj->obj);
+
+ priv->dcrtc[dcrtc->num] = NULL;
+ drm_crtc_cleanup(&dcrtc->crtc);
+
+ if (!IS_ERR(dcrtc->clk))
+ clk_disable_unprepare(dcrtc->clk);
+
+ kfree(dcrtc);
+}
+
+/*
+ * The mode_config lock is held here, to prevent races between this
+ * and a mode_set.
+ */
+static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_frame_work *work;
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+ unsigned i;
+ int ret;
+
+ /* We don't support changing the pixel format */
+ if (fb->pixel_format != crtc->fb->pixel_format)
+ return -EINVAL;
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ work->event = event;
+ work->old_fb = dcrtc->crtc.fb;
+
+ i = armada_drm_crtc_calc_fb(fb, crtc->x, crtc->y, work->regs,
+ dcrtc->interlaced);
+ armada_reg_queue_end(work->regs, i);
+
+ /*
+ * Hold the old framebuffer for the work - DRM appears to drop our
+ * reference to the old framebuffer in drm_mode_page_flip_ioctl().
+ */
+ drm_framebuffer_reference(work->old_fb);
+
+ ret = armada_drm_crtc_queue_frame_work(dcrtc, work);
+ if (ret) {
+ /*
+ * Undo our reference above; DRM does not drop the reference
+ * to this object on error, so that's okay.
+ */
+ drm_framebuffer_unreference(work->old_fb);
+ kfree(work);
+ return ret;
+ }
+
+ /*
+ * Don't take a reference on the new framebuffer;
+ * drm_mode_page_flip_ioctl() has already grabbed a reference and
+ * will _not_ drop that reference on successful return from this
+ * function. Simply mark this new framebuffer as the current one.
+ */
+ dcrtc->crtc.fb = fb;
+
+ /*
+ * Finally, if the display is blanked, we won't receive an
+ * interrupt, so complete it now.
+ */
+ if (dpms_blanked(dcrtc->dpms)) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (dcrtc->frame_work)
+ armada_drm_crtc_complete_frame_work(dcrtc);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ return 0;
+}
+
+static int
+armada_drm_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val)
+{
+ struct armada_private *priv = crtc->dev->dev_private;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ bool update_csc = false;
+
+ if (property == priv->csc_yuv_prop) {
+ dcrtc->csc_yuv_mode = val;
+ update_csc = true;
+ } else if (property == priv->csc_rgb_prop) {
+ dcrtc->csc_rgb_mode = val;
+ update_csc = true;
+ }
+
+ if (update_csc) {
+ uint32_t val;
+
+ val = dcrtc->spu_iopad_ctrl |
+ armada_drm_crtc_calculate_csc(dcrtc);
+ writel_relaxed(val, dcrtc->base + LCD_SPU_IOPAD_CONTROL);
+ }
+
+ return 0;
+}
+
+static struct drm_crtc_funcs armada_crtc_funcs = {
+ .cursor_set = armada_drm_crtc_cursor_set,
+ .cursor_move = armada_drm_crtc_cursor_move,
+ .destroy = armada_drm_crtc_destroy,
+ .set_config = drm_crtc_helper_set_config,
+ .page_flip = armada_drm_crtc_page_flip,
+ .set_property = armada_drm_crtc_set_property,
+};
+
+static struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = {
+ { CSC_AUTO, "Auto" },
+ { CSC_YUV_CCIR601, "CCIR601" },
+ { CSC_YUV_CCIR709, "CCIR709" },
+};
+
+static struct drm_prop_enum_list armada_drm_csc_rgb_enum_list[] = {
+ { CSC_AUTO, "Auto" },
+ { CSC_RGB_COMPUTER, "Computer system" },
+ { CSC_RGB_STUDIO, "Studio" },
+};
+
+static int armada_drm_crtc_create_properties(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+
+ if (priv->csc_yuv_prop)
+ return 0;
+
+ priv->csc_yuv_prop = drm_property_create_enum(dev, 0,
+ "CSC_YUV", armada_drm_csc_yuv_enum_list,
+ ARRAY_SIZE(armada_drm_csc_yuv_enum_list));
+ priv->csc_rgb_prop = drm_property_create_enum(dev, 0,
+ "CSC_RGB", armada_drm_csc_rgb_enum_list,
+ ARRAY_SIZE(armada_drm_csc_rgb_enum_list));
+
+ if (!priv->csc_yuv_prop || !priv->csc_rgb_prop)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
+ struct resource *res)
+{
+ struct armada_private *priv = dev->dev_private;
+ struct armada_crtc *dcrtc;
+ void __iomem *base;
+ int ret;
+
+ ret = armada_drm_crtc_create_properties(dev);
+ if (ret)
+ return ret;
+
+ base = devm_request_and_ioremap(dev->dev, res);
+ if (!base) {
+ DRM_ERROR("failed to ioremap register\n");
+ return -ENOMEM;
+ }
+
+ dcrtc = kzalloc(sizeof(*dcrtc), GFP_KERNEL);
+ if (!dcrtc) {
+ DRM_ERROR("failed to allocate Armada crtc\n");
+ return -ENOMEM;
+ }
+
+ dcrtc->base = base;
+ dcrtc->num = num;
+ dcrtc->clk = ERR_PTR(-EINVAL);
+ dcrtc->csc_yuv_mode = CSC_AUTO;
+ dcrtc->csc_rgb_mode = CSC_AUTO;
+ dcrtc->cfg_dumb_ctrl = DUMB24_RGB888_0;
+ dcrtc->spu_iopad_ctrl = CFG_VSCALE_LN_EN | CFG_IOPAD_DUMB24;
+ spin_lock_init(&dcrtc->irq_lock);
+ dcrtc->irq_ena = CLEAN_SPU_IRQ_ISR;
+ INIT_LIST_HEAD(&dcrtc->vbl_list);
+ init_waitqueue_head(&dcrtc->frame_wait);
+
+ /* Initialize some registers which we don't otherwise set */
+ writel_relaxed(0x00000001, dcrtc->base + LCD_CFG_SCLK_DIV);
+ writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_BLANKCOLOR);
+ writel_relaxed(dcrtc->spu_iopad_ctrl,
+ dcrtc->base + LCD_SPU_IOPAD_CONTROL);
+ writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_SRAM_PARA0);
+ writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+ CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
+ CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
+ writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
+ writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN);
+
+ if (priv->variant->crtc_init) {
+ ret = priv->variant->crtc_init(dcrtc);
+ if (ret) {
+ kfree(dcrtc);
+ return ret;
+ }
+ }
+
+ /* Ensure AXI pipeline is enabled */
+ armada_updatel(CFG_ARBFAST_ENA, 0, dcrtc->base + LCD_SPU_DMA_CTRL0);
+
+ priv->dcrtc[dcrtc->num] = dcrtc;
+
+ drm_crtc_init(dev, &dcrtc->crtc, &armada_crtc_funcs);
+ drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
+
+ drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
+ dcrtc->csc_yuv_mode);
+ drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop,
+ dcrtc->csc_rgb_mode);
+
+ return armada_overlay_plane_create(dev, 1 << dcrtc->num);
+}
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
new file mode 100644
index 000000000000..9c10a07e7492
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_CRTC_H
+#define ARMADA_CRTC_H
+
+struct armada_gem_object;
+
+struct armada_regs {
+ uint32_t offset;
+ uint32_t mask;
+ uint32_t val;
+};
+
+#define armada_reg_queue_mod(_r, _i, _v, _m, _o) \
+ do { \
+ struct armada_regs *__reg = _r; \
+ __reg[_i].offset = _o; \
+ __reg[_i].mask = ~(_m); \
+ __reg[_i].val = _v; \
+ _i++; \
+ } while (0)
+
+#define armada_reg_queue_set(_r, _i, _v, _o) \
+ armada_reg_queue_mod(_r, _i, _v, ~0, _o)
+
+#define armada_reg_queue_end(_r, _i) \
+ armada_reg_queue_mod(_r, _i, 0, 0, ~0)
+
+struct armada_frame_work;
+
+struct armada_crtc {
+ struct drm_crtc crtc;
+ unsigned num;
+ void __iomem *base;
+ struct clk *clk;
+ struct {
+ uint32_t spu_v_h_total;
+ uint32_t spu_v_porch;
+ uint32_t spu_adv_reg;
+ } v[2];
+ bool interlaced;
+ bool cursor_update;
+ uint8_t csc_yuv_mode;
+ uint8_t csc_rgb_mode;
+
+ struct drm_plane *plane;
+
+ struct armada_gem_object *cursor_obj;
+ int cursor_x;
+ int cursor_y;
+ uint32_t cursor_hw_pos;
+ uint32_t cursor_hw_sz;
+ uint32_t cursor_w;
+ uint32_t cursor_h;
+
+ int dpms;
+ uint32_t cfg_dumb_ctrl;
+ uint32_t dumb_ctrl;
+ uint32_t spu_iopad_ctrl;
+
+ wait_queue_head_t frame_wait;
+ struct armada_frame_work *frame_work;
+
+ spinlock_t irq_lock;
+ uint32_t irq_ena;
+ struct list_head vbl_list;
+};
+#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
+
+int armada_drm_crtc_create(struct drm_device *, unsigned, struct resource *);
+void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int);
+void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int);
+void armada_drm_crtc_irq(struct armada_crtc *, u32);
+void armada_drm_crtc_disable_irq(struct armada_crtc *, u32);
+void armada_drm_crtc_enable_irq(struct armada_crtc *, u32);
+void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
new file mode 100644
index 000000000000..612f3753cd92
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <drm/drmP.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+
+static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct armada_private *priv = dev->dev_private;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_mm_dump_table(m, &priv->linear);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+static int armada_debugfs_reg_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev = m->private;
+ struct armada_private *priv = dev->dev_private;
+ int n, i;
+
+ if (priv) {
+ for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
+ struct armada_crtc *dcrtc = priv->dcrtc[n];
+ if (!dcrtc)
+ continue;
+
+ for (i = 0x84; i <= 0x1c4; i += 4) {
+ uint32_t v = readl_relaxed(dcrtc->base + i);
+ seq_printf(m, "%u: 0x%04x: 0x%08x\n", n, i, v);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int armada_debugfs_reg_r_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, armada_debugfs_reg_show, inode->i_private);
+}
+
+static const struct file_operations fops_reg_r = {
+ .owner = THIS_MODULE,
+ .open = armada_debugfs_reg_r_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int armada_debugfs_write(struct file *file, const char __user *ptr,
+ size_t len, loff_t *off)
+{
+ struct drm_device *dev = file->private_data;
+ struct armada_private *priv = dev->dev_private;
+ struct armada_crtc *dcrtc = priv->dcrtc[0];
+ char buf[32], *p;
+ uint32_t reg, val;
+ int ret;
+
+ if (*off != 0)
+ return 0;
+
+ if (len > sizeof(buf) - 1)
+ len = sizeof(buf) - 1;
+
+ ret = strncpy_from_user(buf, ptr, len);
+ if (ret < 0)
+ return ret;
+ buf[len] = '\0';
+
+ reg = simple_strtoul(buf, &p, 16);
+ if (!isspace(*p))
+ return -EINVAL;
+ val = simple_strtoul(p + 1, NULL, 16);
+
+ if (reg >= 0x84 && reg <= 0x1c4)
+ writel(val, dcrtc->base + reg);
+
+ return len;
+}
+
+static int armada_debugfs_reg_w_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations fops_reg_w = {
+ .owner = THIS_MODULE,
+ .open = armada_debugfs_reg_w_open,
+ .write = armada_debugfs_write,
+ .llseek = noop_llseek,
+};
+
+static struct drm_info_list armada_debugfs_list[] = {
+ { "gem_linear", armada_debugfs_gem_linear_show, 0 },
+};
+#define ARMADA_DEBUGFS_ENTRIES ARRAY_SIZE(armada_debugfs_list)
+
+static int drm_add_fake_info_node(struct drm_minor *minor, struct dentry *ent,
+ const void *key)
+{
+ struct drm_info_node *node;
+
+ node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+ if (node == NULL) {
+ debugfs_remove(ent);
+ return -ENOMEM;
+ }
+
+ node->minor = minor;
+ node->dent = ent;
+ node->info_ent = (void *) key;
+
+ mutex_lock(&minor->debugfs_lock);
+ list_add(&node->list, &minor->debugfs_list);
+ mutex_unlock(&minor->debugfs_lock);
+
+ return 0;
+}
+
+static int armada_debugfs_create(struct dentry *root, struct drm_minor *minor,
+ const char *name, umode_t mode, const struct file_operations *fops)
+{
+ struct dentry *de;
+
+ de = debugfs_create_file(name, mode, root, minor->dev, fops);
+
+ return drm_add_fake_info_node(minor, de, fops);
+}
+
+int armada_drm_debugfs_init(struct drm_minor *minor)
+{
+ int ret;
+
+ ret = drm_debugfs_create_files(armada_debugfs_list,
+ ARMADA_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
+ if (ret)
+ return ret;
+
+ ret = armada_debugfs_create(minor->debugfs_root, minor,
+ "reg", S_IFREG | S_IRUSR, &fops_reg_r);
+ if (ret)
+ goto err_1;
+
+ ret = armada_debugfs_create(minor->debugfs_root, minor,
+ "reg_wr", S_IFREG | S_IWUSR, &fops_reg_w);
+ if (ret)
+ goto err_2;
+ return ret;
+
+ err_2:
+ drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
+ err_1:
+ drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
+ minor);
+ return ret;
+}
+
+void armada_drm_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_w, 1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
+ drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
+ minor);
+}
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
new file mode 100644
index 000000000000..eef09ec9a5ff
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_DRM_H
+#define ARMADA_DRM_H
+
+#include <linux/kfifo.h>
+#include <linux/io.h>
+#include <linux/workqueue.h>
+#include <drm/drmP.h>
+
+struct armada_crtc;
+struct armada_gem_object;
+struct clk;
+struct drm_fb_helper;
+
+static inline void
+armada_updatel(uint32_t val, uint32_t mask, void __iomem *ptr)
+{
+ uint32_t ov, v;
+
+ ov = v = readl_relaxed(ptr);
+ v = (v & ~mask) | val;
+ if (ov != v)
+ writel_relaxed(v, ptr);
+}
+
+static inline uint32_t armada_pitch(uint32_t width, uint32_t bpp)
+{
+ uint32_t pitch = bpp != 4 ? width * ((bpp + 7) / 8) : width / 2;
+
+ /* 88AP510 spec recommends pitch be a multiple of 128 */
+ return ALIGN(pitch, 128);
+}
+
+struct armada_vbl_event {
+ struct list_head node;
+ void *data;
+ void (*fn)(struct armada_crtc *, void *);
+};
+void armada_drm_vbl_event_add(struct armada_crtc *,
+ struct armada_vbl_event *);
+void armada_drm_vbl_event_remove(struct armada_crtc *,
+ struct armada_vbl_event *);
+void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *,
+ struct armada_vbl_event *);
+#define armada_drm_vbl_event_init(_e, _f, _d) do { \
+ struct armada_vbl_event *__e = _e; \
+ INIT_LIST_HEAD(&__e->node); \
+ __e->data = _d; \
+ __e->fn = _f; \
+} while (0)
+
+
+struct armada_private;
+
+struct armada_variant {
+ bool has_spu_adv_reg;
+ uint32_t spu_adv_reg;
+ int (*init)(struct armada_private *, struct device *);
+ int (*crtc_init)(struct armada_crtc *);
+ int (*crtc_compute_clock)(struct armada_crtc *,
+ const struct drm_display_mode *,
+ uint32_t *);
+};
+
+/* Variant ops */
+extern const struct armada_variant armada510_ops;
+
+struct armada_private {
+ const struct armada_variant *variant;
+ struct work_struct fb_unref_work;
+ DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
+ struct drm_fb_helper *fbdev;
+ struct armada_crtc *dcrtc[2];
+ struct drm_mm linear;
+ struct clk *extclk[2];
+ struct drm_property *csc_yuv_prop;
+ struct drm_property *csc_rgb_prop;
+ struct drm_property *colorkey_prop;
+ struct drm_property *colorkey_min_prop;
+ struct drm_property *colorkey_max_prop;
+ struct drm_property *colorkey_val_prop;
+ struct drm_property *colorkey_alpha_prop;
+ struct drm_property *colorkey_mode_prop;
+ struct drm_property *brightness_prop;
+ struct drm_property *contrast_prop;
+ struct drm_property *saturation_prop;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *de;
+#endif
+};
+
+void __armada_drm_queue_unref_work(struct drm_device *,
+ struct drm_framebuffer *);
+void armada_drm_queue_unref_work(struct drm_device *,
+ struct drm_framebuffer *);
+
+extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
+
+int armada_fbdev_init(struct drm_device *);
+void armada_fbdev_fini(struct drm_device *);
+
+int armada_overlay_plane_create(struct drm_device *, unsigned long);
+
+int armada_drm_debugfs_init(struct drm_minor *);
+void armada_drm_debugfs_cleanup(struct drm_minor *);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
new file mode 100644
index 000000000000..4f2b28354915
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+#include <drm/armada_drm.h>
+#include "armada_ioctlP.h"
+
+#ifdef CONFIG_DRM_ARMADA_TDA1998X
+#include <drm/i2c/tda998x.h>
+#include "armada_slave.h"
+
+static struct tda998x_encoder_params params = {
+ /* With 0x24, there is no translation between vp_out and int_vp
+ FB LCD out Pins VIP Int Vp
+ R:23:16 R:7:0 VPC7:0 7:0 7:0[R]
+ G:15:8 G:15:8 VPB7:0 23:16 23:16[G]
+ B:7:0 B:23:16 VPA7:0 15:8 15:8[B]
+ */
+ .swap_a = 2,
+ .swap_b = 3,
+ .swap_c = 4,
+ .swap_d = 5,
+ .swap_e = 0,
+ .swap_f = 1,
+ .audio_cfg = BIT(2),
+ .audio_frame[1] = 1,
+ .audio_format = AFMT_SPDIF,
+ .audio_sample_rate = 44100,
+};
+
+static const struct armada_drm_slave_config tda19988_config = {
+ .i2c_adapter_id = 0,
+ .crtcs = 1 << 0, /* Only LCD0 at the moment */
+ .polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT,
+ .interlace_allowed = true,
+ .info = {
+ .type = "tda998x",
+ .addr = 0x70,
+ .platform_data = &params,
+ },
+};
+#endif
+
+static void armada_drm_unref_work(struct work_struct *work)
+{
+ struct armada_private *priv =
+ container_of(work, struct armada_private, fb_unref_work);
+ struct drm_framebuffer *fb;
+
+ while (kfifo_get(&priv->fb_unref, &fb))
+ drm_framebuffer_unreference(fb);
+}
+
+/* Must be called with dev->event_lock held */
+void __armada_drm_queue_unref_work(struct drm_device *dev,
+ struct drm_framebuffer *fb)
+{
+ struct armada_private *priv = dev->dev_private;
+
+ /*
+ * Yes, we really must jump through these hoops just to store a
+ * _pointer_ to something into the kfifo. This is utterly insane
+ * and idiotic, because it kfifo requires the _data_ pointed to by
+ * the pointer const, not the pointer itself. Not only that, but
+ * you have to pass a pointer _to_ the pointer you want stored.
+ */
+ const struct drm_framebuffer *silly_api_alert = fb;
+ WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert));
+ schedule_work(&priv->fb_unref_work);
+}
+
+void armada_drm_queue_unref_work(struct drm_device *dev,
+ struct drm_framebuffer *fb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ __armada_drm_queue_unref_work(dev, fb);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static int armada_drm_load(struct drm_device *dev, unsigned long flags)
+{
+ const struct platform_device_id *id;
+ struct armada_private *priv;
+ struct resource *res[ARRAY_SIZE(priv->dcrtc)];
+ struct resource *mem = NULL;
+ int ret, n, i;
+
+ memset(res, 0, sizeof(res));
+
+ for (n = i = 0; ; n++) {
+ struct resource *r = platform_get_resource(dev->platformdev,
+ IORESOURCE_MEM, n);
+ if (!r)
+ break;
+
+ /* Resources above 64K are graphics memory */
+ if (resource_size(r) > SZ_64K)
+ mem = r;
+ else if (i < ARRAY_SIZE(priv->dcrtc))
+ res[i++] = r;
+ else
+ return -EINVAL;
+ }
+
+ if (!res[0] || !mem)
+ return -ENXIO;
+
+ if (!devm_request_mem_region(dev->dev, mem->start,
+ resource_size(mem), "armada-drm"))
+ return -EBUSY;
+
+ priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ DRM_ERROR("failed to allocate private\n");
+ return -ENOMEM;
+ }
+
+ dev->dev_private = priv;
+
+ /* Get the implementation specific driver data. */
+ id = platform_get_device_id(dev->platformdev);
+ if (!id)
+ return -ENXIO;
+
+ priv->variant = (struct armada_variant *)id->driver_data;
+
+ ret = priv->variant->init(priv, dev->dev);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
+ INIT_KFIFO(priv->fb_unref);
+
+ /* Mode setting support */
+ drm_mode_config_init(dev);
+ dev->mode_config.min_width = 320;
+ dev->mode_config.min_height = 200;
+
+ /*
+ * With vscale enabled, the maximum width is 1920 due to the
+ * 1920 by 3 lines RAM
+ */
+ dev->mode_config.max_width = 1920;
+ dev->mode_config.max_height = 2048;
+
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.funcs = &armada_drm_mode_config_funcs;
+ drm_mm_init(&priv->linear, mem->start, resource_size(mem));
+
+ /* Create all LCD controllers */
+ for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
+ if (!res[n])
+ break;
+
+ ret = armada_drm_crtc_create(dev, n, res[n]);
+ if (ret)
+ goto err_kms;
+ }
+
+#ifdef CONFIG_DRM_ARMADA_TDA1998X
+ ret = armada_drm_connector_slave_create(dev, &tda19988_config);
+ if (ret)
+ goto err_kms;
+#endif
+
+ ret = drm_vblank_init(dev, n);
+ if (ret)
+ goto err_kms;
+
+ ret = drm_irq_install(dev);
+ if (ret)
+ goto err_kms;
+
+ dev->vblank_disable_allowed = 1;
+
+ ret = armada_fbdev_init(dev);
+ if (ret)
+ goto err_irq;
+
+ drm_kms_helper_poll_init(dev);
+
+ return 0;
+
+ err_irq:
+ drm_irq_uninstall(dev);
+ err_kms:
+ drm_mode_config_cleanup(dev);
+ drm_mm_takedown(&priv->linear);
+ flush_work(&priv->fb_unref_work);
+
+ return ret;
+}
+
+static int armada_drm_unload(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+
+ drm_kms_helper_poll_fini(dev);
+ armada_fbdev_fini(dev);
+ drm_irq_uninstall(dev);
+ drm_mode_config_cleanup(dev);
+ drm_mm_takedown(&priv->linear);
+ flush_work(&priv->fb_unref_work);
+ dev->dev_private = NULL;
+
+ return 0;
+}
+
+void armada_drm_vbl_event_add(struct armada_crtc *dcrtc,
+ struct armada_vbl_event *evt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dcrtc->irq_lock, flags);
+ if (list_empty(&evt->node)) {
+ list_add_tail(&evt->node, &dcrtc->vbl_list);
+
+ drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+ }
+ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+}
+
+void armada_drm_vbl_event_remove(struct armada_crtc *dcrtc,
+ struct armada_vbl_event *evt)
+{
+ if (!list_empty(&evt->node)) {
+ list_del_init(&evt->node);
+ drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+ }
+}
+
+void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *dcrtc,
+ struct armada_vbl_event *evt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dcrtc->irq_lock, flags);
+ armada_drm_vbl_event_remove(dcrtc, evt);
+ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+}
+
+/* These are called under the vbl_lock. */
+static int armada_drm_enable_vblank(struct drm_device *dev, int crtc)
+{
+ struct armada_private *priv = dev->dev_private;
+ armada_drm_crtc_enable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
+ return 0;
+}
+
+static void armada_drm_disable_vblank(struct drm_device *dev, int crtc)
+{
+ struct armada_private *priv = dev->dev_private;
+ armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
+}
+
+static irqreturn_t armada_drm_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct armada_private *priv = dev->dev_private;
+ struct armada_crtc *dcrtc = priv->dcrtc[0];
+ uint32_t v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
+ irqreturn_t handled = IRQ_NONE;
+
+ /*
+ * This is rediculous - rather than writing bits to clear, we
+ * have to set the actual status register value. This is racy.
+ */
+ writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+
+ /* Mask out those interrupts we haven't enabled */
+ v = stat & dcrtc->irq_ena;
+
+ if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
+ armada_drm_crtc_irq(dcrtc, stat);
+ handled = IRQ_HANDLED;
+ }
+
+ return handled;
+}
+
+static int armada_drm_irq_postinstall(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+ struct armada_crtc *dcrtc = priv->dcrtc[0];
+
+ spin_lock_irq(&dev->vbl_lock);
+ writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+ writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+ spin_unlock_irq(&dev->vbl_lock);
+
+ return 0;
+}
+
+static void armada_drm_irq_uninstall(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+ struct armada_crtc *dcrtc = priv->dcrtc[0];
+
+ writel(0, dcrtc->base + LCD_SPU_IRQ_ENA);
+}
+
+static struct drm_ioctl_desc armada_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,
+ DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl,
+ DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl,
+ DRM_UNLOCKED),
+};
+
+static const struct file_operations armada_drm_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = drm_read,
+ .poll = drm_poll,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_mmap,
+ .open = drm_open,
+ .release = drm_release,
+};
+
+static struct drm_driver armada_drm_driver = {
+ .load = armada_drm_load,
+ .open = NULL,
+ .preclose = NULL,
+ .postclose = NULL,
+ .lastclose = NULL,
+ .unload = armada_drm_unload,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = armada_drm_enable_vblank,
+ .disable_vblank = armada_drm_disable_vblank,
+ .irq_handler = armada_drm_irq_handler,
+ .irq_postinstall = armada_drm_irq_postinstall,
+ .irq_uninstall = armada_drm_irq_uninstall,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = armada_drm_debugfs_init,
+ .debugfs_cleanup = armada_drm_debugfs_cleanup,
+#endif
+ .gem_free_object = armada_gem_free_object,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = armada_gem_prime_export,
+ .gem_prime_import = armada_gem_prime_import,
+ .dumb_create = armada_gem_dumb_create,
+ .dumb_map_offset = armada_gem_dumb_map_offset,
+ .dumb_destroy = armada_gem_dumb_destroy,
+ .gem_vm_ops = &armada_gem_vm_ops,
+ .major = 1,
+ .minor = 0,
+ .name = "armada-drm",
+ .desc = "Armada SoC DRM",
+ .date = "20120730",
+ .driver_features = DRIVER_GEM | DRIVER_MODESET |
+ DRIVER_HAVE_IRQ | DRIVER_PRIME,
+ .ioctls = armada_ioctls,
+ .fops = &armada_drm_fops,
+};
+
+static int armada_drm_probe(struct platform_device *pdev)
+{
+ return drm_platform_init(&armada_drm_driver, pdev);
+}
+
+static int armada_drm_remove(struct platform_device *pdev)
+{
+ drm_platform_exit(&armada_drm_driver, pdev);
+ return 0;
+}
+
+static const struct platform_device_id armada_drm_platform_ids[] = {
+ {
+ .name = "armada-drm",
+ .driver_data = (unsigned long)&armada510_ops,
+ }, {
+ .name = "armada-510-drm",
+ .driver_data = (unsigned long)&armada510_ops,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, armada_drm_platform_ids);
+
+static struct platform_driver armada_drm_platform_driver = {
+ .probe = armada_drm_probe,
+ .remove = armada_drm_remove,
+ .driver = {
+ .name = "armada-drm",
+ .owner = THIS_MODULE,
+ },
+ .id_table = armada_drm_platform_ids,
+};
+
+static int __init armada_drm_init(void)
+{
+ armada_drm_driver.num_ioctls = DRM_ARRAY_SIZE(armada_ioctls);
+ return platform_driver_register(&armada_drm_platform_driver);
+}
+module_init(armada_drm_init);
+
+static void __exit armada_drm_exit(void)
+{
+ platform_driver_unregister(&armada_drm_platform_driver);
+}
+module_exit(armada_drm_exit);
+
+MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
+MODULE_DESCRIPTION("Armada DRM Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:armada-drm");
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
new file mode 100644
index 000000000000..1c90969def3e
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+
+static void armada_fb_destroy(struct drm_framebuffer *fb)
+{
+ struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
+
+ drm_framebuffer_cleanup(&dfb->fb);
+ drm_gem_object_unreference_unlocked(&dfb->obj->obj);
+ kfree(dfb);
+}
+
+static int armada_fb_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *dfile, unsigned int *handle)
+{
+ struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
+ return drm_gem_handle_create(dfile, &dfb->obj->obj, handle);
+}
+
+static const struct drm_framebuffer_funcs armada_fb_funcs = {
+ .destroy = armada_fb_destroy,
+ .create_handle = armada_fb_create_handle,
+};
+
+struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj)
+{
+ struct armada_framebuffer *dfb;
+ uint8_t format, config;
+ int ret;
+
+ switch (mode->pixel_format) {
+#define FMT(drm, fmt, mod) \
+ case DRM_FORMAT_##drm: \
+ format = CFG_##fmt; \
+ config = mod; \
+ break
+ FMT(RGB565, 565, CFG_SWAPRB);
+ FMT(BGR565, 565, 0);
+ FMT(ARGB1555, 1555, CFG_SWAPRB);
+ FMT(ABGR1555, 1555, 0);
+ FMT(RGB888, 888PACK, CFG_SWAPRB);
+ FMT(BGR888, 888PACK, 0);
+ FMT(XRGB8888, X888, CFG_SWAPRB);
+ FMT(XBGR8888, X888, 0);
+ FMT(ARGB8888, 8888, CFG_SWAPRB);
+ FMT(ABGR8888, 8888, 0);
+ FMT(YUYV, 422PACK, CFG_YUV2RGB | CFG_SWAPYU | CFG_SWAPUV);
+ FMT(UYVY, 422PACK, CFG_YUV2RGB);
+ FMT(VYUY, 422PACK, CFG_YUV2RGB | CFG_SWAPUV);
+ FMT(YVYU, 422PACK, CFG_YUV2RGB | CFG_SWAPYU);
+ FMT(YUV422, 422, CFG_YUV2RGB);
+ FMT(YVU422, 422, CFG_YUV2RGB | CFG_SWAPUV);
+ FMT(YUV420, 420, CFG_YUV2RGB);
+ FMT(YVU420, 420, CFG_YUV2RGB | CFG_SWAPUV);
+ FMT(C8, PSEUDO8, 0);
+#undef FMT
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ dfb = kzalloc(sizeof(*dfb), GFP_KERNEL);
+ if (!dfb) {
+ DRM_ERROR("failed to allocate Armada fb object\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dfb->fmt = format;
+ dfb->mod = config;
+ dfb->obj = obj;
+
+ drm_helper_mode_fill_fb_struct(&dfb->fb, mode);
+
+ ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs);
+ if (ret) {
+ kfree(dfb);
+ return ERR_PTR(ret);
+ }
+
+ /*
+ * Take a reference on our object as we're successful - the
+ * caller already holds a reference, which keeps us safe for
+ * the above call, but the caller will drop their reference
+ * to it. Hence we need to take our own reference.
+ */
+ drm_gem_object_reference(&obj->obj);
+
+ return dfb;
+}
+
+static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
+ struct drm_file *dfile, struct drm_mode_fb_cmd2 *mode)
+{
+ struct armada_gem_object *obj;
+ struct armada_framebuffer *dfb;
+ int ret;
+
+ DRM_DEBUG_DRIVER("w%u h%u pf%08x f%u p%u,%u,%u\n",
+ mode->width, mode->height, mode->pixel_format,
+ mode->flags, mode->pitches[0], mode->pitches[1],
+ mode->pitches[2]);
+
+ /* We can only handle a single plane at the moment */
+ if (drm_format_num_planes(mode->pixel_format) > 1 &&
+ (mode->handles[0] != mode->handles[1] ||
+ mode->handles[0] != mode->handles[2])) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ obj = armada_gem_object_lookup(dev, dfile, mode->handles[0]);
+ if (!obj) {
+ ret = -ENOENT;
+ goto err;
+ }
+
+ if (obj->obj.import_attach && !obj->sgt) {
+ ret = armada_gem_map_import(obj);
+ if (ret)
+ goto err_unref;
+ }
+
+ /* Framebuffer objects must have a valid device address for scanout */
+ if (obj->dev_addr == DMA_ERROR_CODE) {
+ ret = -EINVAL;
+ goto err_unref;
+ }
+
+ dfb = armada_framebuffer_create(dev, mode, obj);
+ if (IS_ERR(dfb)) {
+ ret = PTR_ERR(dfb);
+ goto err;
+ }
+
+ drm_gem_object_unreference_unlocked(&obj->obj);
+
+ return &dfb->fb;
+
+ err_unref:
+ drm_gem_object_unreference_unlocked(&obj->obj);
+ err:
+ DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
+ return ERR_PTR(ret);
+}
+
+static void armada_output_poll_changed(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+ struct drm_fb_helper *fbh = priv->fbdev;
+
+ if (fbh)
+ drm_fb_helper_hotplug_event(fbh);
+}
+
+const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
+ .fb_create = armada_fb_create,
+ .output_poll_changed = armada_output_poll_changed,
+};
diff --git a/drivers/gpu/drm/armada/armada_fb.h b/drivers/gpu/drm/armada/armada_fb.h
new file mode 100644
index 000000000000..ce3f12ebfc53
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_fb.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_FB_H
+#define ARMADA_FB_H
+
+struct armada_framebuffer {
+ struct drm_framebuffer fb;
+ struct armada_gem_object *obj;
+ uint8_t fmt;
+ uint8_t mod;
+};
+#define drm_fb_to_armada_fb(dfb) \
+ container_of(dfb, struct armada_framebuffer, fb)
+#define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj
+
+struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
+ struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
new file mode 100644
index 000000000000..dd5ea77dac96
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Written from the i915 driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+
+static /*const*/ struct fb_ops armada_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int armada_fb_create(struct drm_fb_helper *fbh,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = fbh->dev;
+ struct drm_mode_fb_cmd2 mode;
+ struct armada_framebuffer *dfb;
+ struct armada_gem_object *obj;
+ struct fb_info *info;
+ int size, ret;
+ void *ptr;
+
+ memset(&mode, 0, sizeof(mode));
+ mode.width = sizes->surface_width;
+ mode.height = sizes->surface_height;
+ mode.pitches[0] = armada_pitch(mode.width, sizes->surface_bpp);
+ mode.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode.pitches[0] * mode.height;
+ obj = armada_gem_alloc_private_object(dev, size);
+ if (!obj) {
+ DRM_ERROR("failed to allocate fb memory\n");
+ return -ENOMEM;
+ }
+
+ ret = armada_gem_linear_back(dev, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(&obj->obj);
+ return ret;
+ }
+
+ ptr = armada_gem_map_object(dev, obj);
+ if (!ptr) {
+ drm_gem_object_unreference_unlocked(&obj->obj);
+ return -ENOMEM;
+ }
+
+ dfb = armada_framebuffer_create(dev, &mode, obj);
+
+ /*
+ * A reference is now held by the framebuffer object if
+ * successful, otherwise this drops the ref for the error path.
+ */
+ drm_gem_object_unreference_unlocked(&obj->obj);
+
+ if (IS_ERR(dfb))
+ return PTR_ERR(dfb);
+
+ info = framebuffer_alloc(0, dev->dev);
+ if (!info) {
+ ret = -ENOMEM;
+ goto err_fballoc;
+ }
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto err_fbcmap;
+ }
+
+ strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
+ info->par = fbh;
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+ info->fbops = &armada_fb_ops;
+ info->fix.smem_start = obj->phys_addr;
+ info->fix.smem_len = obj->obj.size;
+ info->screen_size = obj->obj.size;
+ info->screen_base = ptr;
+ fbh->fb = &dfb->fb;
+ fbh->fbdev = info;
+ drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
+ drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
+
+ DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n",
+ dfb->fb.width, dfb->fb.height,
+ dfb->fb.bits_per_pixel, obj->phys_addr);
+
+ return 0;
+
+ err_fbcmap:
+ framebuffer_release(info);
+ err_fballoc:
+ dfb->fb.funcs->destroy(&dfb->fb);
+ return ret;
+}
+
+static int armada_fb_probe(struct drm_fb_helper *fbh,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ int ret = 0;
+
+ if (!fbh->fb) {
+ ret = armada_fb_create(fbh, sizes);
+ if (ret == 0)
+ ret = 1;
+ }
+ return ret;
+}
+
+static struct drm_fb_helper_funcs armada_fb_helper_funcs = {
+ .gamma_set = armada_drm_crtc_gamma_set,
+ .gamma_get = armada_drm_crtc_gamma_get,
+ .fb_probe = armada_fb_probe,
+};
+
+int armada_fbdev_init(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+ struct drm_fb_helper *fbh;
+ int ret;
+
+ fbh = devm_kzalloc(dev->dev, sizeof(*fbh), GFP_KERNEL);
+ if (!fbh)
+ return -ENOMEM;
+
+ priv->fbdev = fbh;
+
+ fbh->funcs = &armada_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(dev, fbh, 1, 1);
+ if (ret) {
+ DRM_ERROR("failed to initialize drm fb helper\n");
+ goto err_fb_helper;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(fbh);
+ if (ret) {
+ DRM_ERROR("failed to add fb connectors\n");
+ goto err_fb_setup;
+ }
+
+ ret = drm_fb_helper_initial_config(fbh, 32);
+ if (ret) {
+ DRM_ERROR("failed to set initial config\n");
+ goto err_fb_setup;
+ }
+
+ return 0;
+ err_fb_setup:
+ drm_fb_helper_fini(fbh);
+ err_fb_helper:
+ priv->fbdev = NULL;
+ return ret;
+}
+
+void armada_fbdev_fini(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+ struct drm_fb_helper *fbh = priv->fbdev;
+
+ if (fbh) {
+ struct fb_info *info = fbh->fbdev;
+
+ if (info) {
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (fbh->fb)
+ fbh->fb->funcs->destroy(fbh->fb);
+
+ drm_fb_helper_fini(fbh);
+
+ priv->fbdev = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
new file mode 100644
index 000000000000..9f2356bae7fd
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -0,0 +1,611 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/shmem_fs.h>
+#include <drm/drmP.h>
+#include "armada_drm.h"
+#include "armada_gem.h"
+#include <drm/armada_drm.h>
+#include "armada_ioctlP.h"
+
+static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
+ unsigned long addr = (unsigned long)vmf->virtual_address;
+ unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
+ int ret;
+
+ pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
+ ret = vm_insert_pfn(vma, addr, pfn);
+
+ switch (ret) {
+ case 0:
+ case -EBUSY:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+const struct vm_operations_struct armada_gem_vm_ops = {
+ .fault = armada_gem_vm_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static size_t roundup_gem_size(size_t size)
+{
+ return roundup(size, PAGE_SIZE);
+}
+
+/* dev->struct_mutex is held here */
+void armada_gem_free_object(struct drm_gem_object *obj)
+{
+ struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+
+ DRM_DEBUG_DRIVER("release obj %p\n", dobj);
+
+ drm_gem_free_mmap_offset(&dobj->obj);
+
+ if (dobj->page) {
+ /* page backed memory */
+ unsigned int order = get_order(dobj->obj.size);
+ __free_pages(dobj->page, order);
+ } else if (dobj->linear) {
+ /* linear backed memory */
+ drm_mm_remove_node(dobj->linear);
+ kfree(dobj->linear);
+ if (dobj->addr)
+ iounmap(dobj->addr);
+ }
+
+ if (dobj->obj.import_attach) {
+ /* We only ever display imported data */
+ dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt,
+ DMA_TO_DEVICE);
+ drm_prime_gem_destroy(&dobj->obj, NULL);
+ }
+
+ drm_gem_object_release(&dobj->obj);
+
+ kfree(dobj);
+}
+
+int
+armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
+{
+ struct armada_private *priv = dev->dev_private;
+ size_t size = obj->obj.size;
+
+ if (obj->page || obj->linear)
+ return 0;
+
+ /*
+ * If it is a small allocation (typically cursor, which will
+ * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
+ * Framebuffers will never be this small (our minimum size for
+ * framebuffers is larger than this anyway.) Such objects are
+ * only accessed by the CPU so we don't need any special handing
+ * here.
+ */
+ if (size <= 8192) {
+ unsigned int order = get_order(size);
+ struct page *p = alloc_pages(GFP_KERNEL, order);
+
+ if (p) {
+ obj->addr = page_address(p);
+ obj->phys_addr = page_to_phys(p);
+ obj->page = p;
+
+ memset(obj->addr, 0, PAGE_ALIGN(size));
+ }
+ }
+
+ /*
+ * We could grab something from CMA if it's enabled, but that
+ * involves building in a problem:
+ *
+ * CMA's interface uses dma_alloc_coherent(), which provides us
+ * with an CPU virtual address and a device address.
+ *
+ * The CPU virtual address may be either an address in the kernel
+ * direct mapped region (for example, as it would be on x86) or
+ * it may be remapped into another part of kernel memory space
+ * (eg, as it would be on ARM.) This means virt_to_phys() on the
+ * returned virtual address is invalid depending on the architecture
+ * implementation.
+ *
+ * The device address may also not be a physical address; it may
+ * be that there is some kind of remapping between the device and
+ * system RAM, which makes the use of the device address also
+ * unsafe to re-use as a physical address.
+ *
+ * This makes DRM usage of dma_alloc_coherent() in a generic way
+ * at best very questionable and unsafe.
+ */
+
+ /* Otherwise, grab it from our linear allocation */
+ if (!obj->page) {
+ struct drm_mm_node *node;
+ unsigned align = min_t(unsigned, size, SZ_2M);
+ void __iomem *ptr;
+ int ret;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOSPC;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_mm_insert_node(&priv->linear, node, size, align,
+ DRM_MM_SEARCH_DEFAULT);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret) {
+ kfree(node);
+ return ret;
+ }
+
+ obj->linear = node;
+
+ /* Ensure that the memory we're returning is cleared. */
+ ptr = ioremap_wc(obj->linear->start, size);
+ if (!ptr) {
+ mutex_lock(&dev->struct_mutex);
+ drm_mm_remove_node(obj->linear);
+ mutex_unlock(&dev->struct_mutex);
+ kfree(obj->linear);
+ obj->linear = NULL;
+ return -ENOMEM;
+ }
+
+ memset_io(ptr, 0, size);
+ iounmap(ptr);
+
+ obj->phys_addr = obj->linear->start;
+ obj->dev_addr = obj->linear->start;
+ }
+
+ DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n",
+ obj, obj->phys_addr, obj->dev_addr);
+
+ return 0;
+}
+
+void *
+armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
+{
+ /* only linear objects need to be ioremap'd */
+ if (!dobj->addr && dobj->linear)
+ dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
+ return dobj->addr;
+}
+
+struct armada_gem_object *
+armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
+{
+ struct armada_gem_object *obj;
+
+ size = roundup_gem_size(size);
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return NULL;
+
+ drm_gem_private_object_init(dev, &obj->obj, size);
+ obj->dev_addr = DMA_ERROR_CODE;
+
+ DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
+
+ return obj;
+}
+
+struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
+ size_t size)
+{
+ struct armada_gem_object *obj;
+ struct address_space *mapping;
+
+ size = roundup_gem_size(size);
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return NULL;
+
+ if (drm_gem_object_init(dev, &obj->obj, size)) {
+ kfree(obj);
+ return NULL;
+ }
+
+ obj->dev_addr = DMA_ERROR_CODE;
+
+ mapping = obj->obj.filp->f_path.dentry->d_inode->i_mapping;
+ mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+
+ DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
+
+ return obj;
+}
+
+/* Dumb alloc support */
+int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct armada_gem_object *dobj;
+ u32 handle;
+ size_t size;
+ int ret;
+
+ args->pitch = armada_pitch(args->width, args->bpp);
+ args->size = size = args->pitch * args->height;
+
+ dobj = armada_gem_alloc_private_object(dev, size);
+ if (dobj == NULL)
+ return -ENOMEM;
+
+ ret = armada_gem_linear_back(dev, dobj);
+ if (ret)
+ goto err;
+
+ ret = drm_gem_handle_create(file, &dobj->obj, &handle);
+ if (ret)
+ goto err;
+
+ args->handle = handle;
+
+ /* drop reference from allocate - handle holds it now */
+ DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
+ err:
+ drm_gem_object_unreference_unlocked(&dobj->obj);
+ return ret;
+}
+
+int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ struct armada_gem_object *obj;
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = armada_gem_object_lookup(dev, file, handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ /* Don't allow imported objects to be mapped */
+ if (obj->obj.import_attach) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ ret = drm_gem_create_mmap_offset(&obj->obj);
+ if (ret == 0) {
+ *offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
+ DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
+ }
+
+ drm_gem_object_unreference(&obj->obj);
+ err_unlock:
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+
+/* Private driver gem ioctls */
+int armada_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_armada_gem_create *args = data;
+ struct armada_gem_object *dobj;
+ size_t size;
+ u32 handle;
+ int ret;
+
+ if (args->size == 0)
+ return -ENOMEM;
+
+ size = args->size;
+
+ dobj = armada_gem_alloc_object(dev, size);
+ if (dobj == NULL)
+ return -ENOMEM;
+
+ ret = drm_gem_handle_create(file, &dobj->obj, &handle);
+ if (ret)
+ goto err;
+
+ args->handle = handle;
+
+ /* drop reference from allocate - handle holds it now */
+ DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
+ err:
+ drm_gem_object_unreference_unlocked(&dobj->obj);
+ return ret;
+}
+
+/* Map a shmem-backed object into process memory space */
+int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_armada_gem_mmap *args = data;
+ struct armada_gem_object *dobj;
+ unsigned long addr;
+
+ dobj = armada_gem_object_lookup(dev, file, args->handle);
+ if (dobj == NULL)
+ return -ENOENT;
+
+ if (!dobj->obj.filp) {
+ drm_gem_object_unreference(&dobj->obj);
+ return -EINVAL;
+ }
+
+ addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, args->offset);
+ drm_gem_object_unreference(&dobj->obj);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+
+ args->addr = addr;
+
+ return 0;
+}
+
+int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_armada_gem_pwrite *args = data;
+ struct armada_gem_object *dobj;
+ char __user *ptr;
+ int ret;
+
+ DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
+ args->handle, args->offset, args->size, args->ptr);
+
+ if (args->size == 0)
+ return 0;
+
+ ptr = (char __user *)(uintptr_t)args->ptr;
+
+ if (!access_ok(VERIFY_READ, ptr, args->size))
+ return -EFAULT;
+
+ ret = fault_in_multipages_readable(ptr, args->size);
+ if (ret)
+ return ret;
+
+ dobj = armada_gem_object_lookup(dev, file, args->handle);
+ if (dobj == NULL)
+ return -ENOENT;
+
+ /* Must be a kernel-mapped object */
+ if (!dobj->addr)
+ return -EINVAL;
+
+ if (args->offset > dobj->obj.size ||
+ args->size > dobj->obj.size - args->offset) {
+ DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
+ ret = -EINVAL;
+ goto unref;
+ }
+
+ if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
+ ret = -EFAULT;
+ } else if (dobj->update) {
+ dobj->update(dobj->update_data);
+ ret = 0;
+ }
+
+ unref:
+ drm_gem_object_unreference_unlocked(&dobj->obj);
+ return ret;
+}
+
+/* Prime support */
+struct sg_table *
+armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+ struct scatterlist *sg;
+ struct sg_table *sgt;
+ int i, num;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return NULL;
+
+ if (dobj->obj.filp) {
+ struct address_space *mapping;
+ gfp_t gfp;
+ int count;
+
+ count = dobj->obj.size / PAGE_SIZE;
+ if (sg_alloc_table(sgt, count, GFP_KERNEL))
+ goto free_sgt;
+
+ mapping = file_inode(dobj->obj.filp)->i_mapping;
+ gfp = mapping_gfp_mask(mapping);
+
+ for_each_sg(sgt->sgl, sg, count, i) {
+ struct page *page;
+
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ if (IS_ERR(page)) {
+ num = i;
+ goto release;
+ }
+
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ }
+
+ if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
+ num = sgt->nents;
+ goto release;
+ }
+ } else if (dobj->page) {
+ /* Single contiguous page */
+ if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+ goto free_sgt;
+
+ sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
+
+ if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+ goto free_table;
+ } else if (dobj->linear) {
+ /* Single contiguous physical region - no struct page */
+ if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+ goto free_sgt;
+ sg_dma_address(sgt->sgl) = dobj->dev_addr;
+ sg_dma_len(sgt->sgl) = dobj->obj.size;
+ } else {
+ goto free_sgt;
+ }
+ return sgt;
+
+ release:
+ for_each_sg(sgt->sgl, sg, num, i)
+ page_cache_release(sg_page(sg));
+ free_table:
+ sg_free_table(sgt);
+ free_sgt:
+ kfree(sgt);
+ return NULL;
+}
+
+static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+ int i;
+
+ if (!dobj->linear)
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+
+ if (dobj->obj.filp) {
+ struct scatterlist *sg;
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ page_cache_release(sg_page(sg));
+ }
+
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
+{
+ return NULL;
+}
+
+static void
+armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
+{
+}
+
+static int
+armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
+ .map_dma_buf = armada_gem_prime_map_dma_buf,
+ .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .kmap_atomic = armada_gem_dmabuf_no_kmap,
+ .kunmap_atomic = armada_gem_dmabuf_no_kunmap,
+ .kmap = armada_gem_dmabuf_no_kmap,
+ .kunmap = armada_gem_dmabuf_no_kunmap,
+ .mmap = armada_gem_dmabuf_mmap,
+};
+
+struct dma_buf *
+armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
+ int flags)
+{
+ return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size,
+ O_RDWR);
+}
+
+struct drm_gem_object *
+armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
+{
+ struct dma_buf_attachment *attach;
+ struct armada_gem_object *dobj;
+
+ if (buf->ops == &armada_gem_prime_dmabuf_ops) {
+ struct drm_gem_object *obj = buf->priv;
+ if (obj->dev == dev) {
+ /*
+ * Importing our own dmabuf(s) increases the
+ * refcount on the gem object itself.
+ */
+ drm_gem_object_reference(obj);
+ dma_buf_put(buf);
+ return obj;
+ }
+ }
+
+ attach = dma_buf_attach(buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ dobj = armada_gem_alloc_private_object(dev, buf->size);
+ if (!dobj) {
+ dma_buf_detach(buf, attach);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dobj->obj.import_attach = attach;
+
+ /*
+ * Don't call dma_buf_map_attachment() here - it maps the
+ * scatterlist immediately for DMA, and this is not always
+ * an appropriate thing to do.
+ */
+ return &dobj->obj;
+}
+
+int armada_gem_map_import(struct armada_gem_object *dobj)
+{
+ int ret;
+
+ dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
+ DMA_TO_DEVICE);
+ if (!dobj->sgt) {
+ DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
+ return -EINVAL;
+ }
+ if (IS_ERR(dobj->sgt)) {
+ ret = PTR_ERR(dobj->sgt);
+ dobj->sgt = NULL;
+ DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
+ return ret;
+ }
+ if (dobj->sgt->nents > 1) {
+ DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
+ return -EINVAL;
+ }
+ if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
+ DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
+ return -EINVAL;
+ }
+ dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
+ return 0;
+}
diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h
new file mode 100644
index 000000000000..00b6cd461a03
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_gem.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_GEM_H
+#define ARMADA_GEM_H
+
+/* GEM */
+struct armada_gem_object {
+ struct drm_gem_object obj;
+ void *addr;
+ phys_addr_t phys_addr;
+ resource_size_t dev_addr;
+ struct drm_mm_node *linear; /* for linear backed */
+ struct page *page; /* for page backed */
+ struct sg_table *sgt; /* for imported */
+ void (*update)(void *);
+ void *update_data;
+};
+
+extern const struct vm_operations_struct armada_gem_vm_ops;
+
+#define drm_to_armada_gem(o) container_of(o, struct armada_gem_object, obj)
+
+void armada_gem_free_object(struct drm_gem_object *);
+int armada_gem_linear_back(struct drm_device *, struct armada_gem_object *);
+void *armada_gem_map_object(struct drm_device *, struct armada_gem_object *);
+struct armada_gem_object *armada_gem_alloc_private_object(struct drm_device *,
+ size_t);
+int armada_gem_dumb_create(struct drm_file *, struct drm_device *,
+ struct drm_mode_create_dumb *);
+int armada_gem_dumb_map_offset(struct drm_file *, struct drm_device *,
+ uint32_t, uint64_t *);
+int armada_gem_dumb_destroy(struct drm_file *, struct drm_device *,
+ uint32_t);
+struct dma_buf *armada_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
+struct drm_gem_object *armada_gem_prime_import(struct drm_device *,
+ struct dma_buf *);
+int armada_gem_map_import(struct armada_gem_object *);
+
+static inline struct armada_gem_object *armada_gem_object_lookup(
+ struct drm_device *dev, struct drm_file *dfile, unsigned handle)
+{
+ struct drm_gem_object *obj = drm_gem_object_lookup(dev, dfile, handle);
+
+ return obj ? drm_to_armada_gem(obj) : NULL;
+}
+#endif
diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h
new file mode 100644
index 000000000000..27319a8335e2
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_hw.h
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_HW_H
+#define ARMADA_HW_H
+
+/*
+ * Note: the following registers are written from IRQ context:
+ * LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
+ * LCD_SPU_DMA_START_ADDR_[YUV][01], LCD_SPU_DMA_PITCH_YC,
+ * LCD_SPU_DMA_PITCH_UV, LCD_SPU_DMA_OVSA_HPXL_VLN,
+ * LCD_SPU_DMA_HPXL_VLN, LCD_SPU_DZM_HPXL_VLN, LCD_SPU_DMA_CTRL0
+ */
+enum {
+ LCD_SPU_ADV_REG = 0x0084, /* Armada 510 */
+ LCD_SPU_DMA_START_ADDR_Y0 = 0x00c0,
+ LCD_SPU_DMA_START_ADDR_U0 = 0x00c4,
+ LCD_SPU_DMA_START_ADDR_V0 = 0x00c8,
+ LCD_CFG_DMA_START_ADDR_0 = 0x00cc,
+ LCD_SPU_DMA_START_ADDR_Y1 = 0x00d0,
+ LCD_SPU_DMA_START_ADDR_U1 = 0x00d4,
+ LCD_SPU_DMA_START_ADDR_V1 = 0x00d8,
+ LCD_CFG_DMA_START_ADDR_1 = 0x00dc,
+ LCD_SPU_DMA_PITCH_YC = 0x00e0,
+ LCD_SPU_DMA_PITCH_UV = 0x00e4,
+ LCD_SPU_DMA_OVSA_HPXL_VLN = 0x00e8,
+ LCD_SPU_DMA_HPXL_VLN = 0x00ec,
+ LCD_SPU_DZM_HPXL_VLN = 0x00f0,
+ LCD_CFG_GRA_START_ADDR0 = 0x00f4,
+ LCD_CFG_GRA_START_ADDR1 = 0x00f8,
+ LCD_CFG_GRA_PITCH = 0x00fc,
+ LCD_SPU_GRA_OVSA_HPXL_VLN = 0x0100,
+ LCD_SPU_GRA_HPXL_VLN = 0x0104,
+ LCD_SPU_GZM_HPXL_VLN = 0x0108,
+ LCD_SPU_HWC_OVSA_HPXL_VLN = 0x010c,
+ LCD_SPU_HWC_HPXL_VLN = 0x0110,
+ LCD_SPUT_V_H_TOTAL = 0x0114,
+ LCD_SPU_V_H_ACTIVE = 0x0118,
+ LCD_SPU_H_PORCH = 0x011c,
+ LCD_SPU_V_PORCH = 0x0120,
+ LCD_SPU_BLANKCOLOR = 0x0124,
+ LCD_SPU_ALPHA_COLOR1 = 0x0128,
+ LCD_SPU_ALPHA_COLOR2 = 0x012c,
+ LCD_SPU_COLORKEY_Y = 0x0130,
+ LCD_SPU_COLORKEY_U = 0x0134,
+ LCD_SPU_COLORKEY_V = 0x0138,
+ LCD_CFG_RDREG4F = 0x013c, /* Armada 510 */
+ LCD_SPU_SPI_RXDATA = 0x0140,
+ LCD_SPU_ISA_RXDATA = 0x0144,
+ LCD_SPU_HWC_RDDAT = 0x0158,
+ LCD_SPU_GAMMA_RDDAT = 0x015c,
+ LCD_SPU_PALETTE_RDDAT = 0x0160,
+ LCD_SPU_IOPAD_IN = 0x0178,
+ LCD_CFG_RDREG5F = 0x017c,
+ LCD_SPU_SPI_CTRL = 0x0180,
+ LCD_SPU_SPI_TXDATA = 0x0184,
+ LCD_SPU_SMPN_CTRL = 0x0188,
+ LCD_SPU_DMA_CTRL0 = 0x0190,
+ LCD_SPU_DMA_CTRL1 = 0x0194,
+ LCD_SPU_SRAM_CTRL = 0x0198,
+ LCD_SPU_SRAM_WRDAT = 0x019c,
+ LCD_SPU_SRAM_PARA0 = 0x01a0, /* Armada 510 */
+ LCD_SPU_SRAM_PARA1 = 0x01a4,
+ LCD_CFG_SCLK_DIV = 0x01a8,
+ LCD_SPU_CONTRAST = 0x01ac,
+ LCD_SPU_SATURATION = 0x01b0,
+ LCD_SPU_CBSH_HUE = 0x01b4,
+ LCD_SPU_DUMB_CTRL = 0x01b8,
+ LCD_SPU_IOPAD_CONTROL = 0x01bc,
+ LCD_SPU_IRQ_ENA = 0x01c0,
+ LCD_SPU_IRQ_ISR = 0x01c4,
+};
+
+/* For LCD_SPU_ADV_REG */
+enum {
+ ADV_VSYNC_L_OFF = 0xfff << 20,
+ ADV_GRACOLORKEY = 1 << 19,
+ ADV_VIDCOLORKEY = 1 << 18,
+ ADV_HWC32BLEND = 1 << 15,
+ ADV_HWC32ARGB = 1 << 14,
+ ADV_HWC32ENABLE = 1 << 13,
+ ADV_VSYNCOFFEN = 1 << 12,
+ ADV_VSYNC_H_OFF = 0xfff << 0,
+};
+
+enum {
+ CFG_565 = 0,
+ CFG_1555 = 1,
+ CFG_888PACK = 2,
+ CFG_X888 = 3,
+ CFG_8888 = 4,
+ CFG_422PACK = 5,
+ CFG_422 = 6,
+ CFG_420 = 7,
+ CFG_PSEUDO4 = 9,
+ CFG_PSEUDO8 = 10,
+ CFG_SWAPRB = 1 << 4,
+ CFG_SWAPUV = 1 << 3,
+ CFG_SWAPYU = 1 << 2,
+ CFG_YUV2RGB = 1 << 1,
+};
+
+/* For LCD_SPU_DMA_CTRL0 */
+enum {
+ CFG_NOBLENDING = 1 << 31,
+ CFG_GAMMA_ENA = 1 << 30,
+ CFG_CBSH_ENA = 1 << 29,
+ CFG_PALETTE_ENA = 1 << 28,
+ CFG_ARBFAST_ENA = 1 << 27,
+ CFG_HWC_1BITMOD = 1 << 26,
+ CFG_HWC_1BITENA = 1 << 25,
+ CFG_HWC_ENA = 1 << 24,
+ CFG_DMAFORMAT = 0xf << 20,
+#define CFG_DMA_FMT(x) ((x) << 20)
+ CFG_GRAFORMAT = 0xf << 16,
+#define CFG_GRA_FMT(x) ((x) << 16)
+#define CFG_GRA_MOD(x) ((x) << 8)
+ CFG_GRA_FTOGGLE = 1 << 15,
+ CFG_GRA_HSMOOTH = 1 << 14,
+ CFG_GRA_TSTMODE = 1 << 13,
+ CFG_GRA_ENA = 1 << 8,
+#define CFG_DMA_MOD(x) ((x) << 0)
+ CFG_DMA_FTOGGLE = 1 << 7,
+ CFG_DMA_HSMOOTH = 1 << 6,
+ CFG_DMA_TSTMODE = 1 << 5,
+ CFG_DMA_ENA = 1 << 0,
+};
+
+enum {
+ CKMODE_DISABLE = 0,
+ CKMODE_Y = 1,
+ CKMODE_U = 2,
+ CKMODE_RGB = 3,
+ CKMODE_V = 4,
+ CKMODE_R = 5,
+ CKMODE_G = 6,
+ CKMODE_B = 7,
+};
+
+/* For LCD_SPU_DMA_CTRL1 */
+enum {
+ CFG_FRAME_TRIG = 1 << 31,
+ CFG_VSYNC_INV = 1 << 27,
+ CFG_CKMODE_MASK = 0x7 << 24,
+#define CFG_CKMODE(x) ((x) << 24)
+ CFG_CARRY = 1 << 23,
+ CFG_GATED_CLK = 1 << 21,
+ CFG_PWRDN_ENA = 1 << 20,
+ CFG_DSCALE_MASK = 0x3 << 18,
+ CFG_DSCALE_NONE = 0x0 << 18,
+ CFG_DSCALE_HALF = 0x1 << 18,
+ CFG_DSCALE_QUAR = 0x2 << 18,
+ CFG_ALPHAM_MASK = 0x3 << 16,
+ CFG_ALPHAM_VIDEO = 0x0 << 16,
+ CFG_ALPHAM_GRA = 0x1 << 16,
+ CFG_ALPHAM_CFG = 0x2 << 16,
+ CFG_ALPHA_MASK = 0xff << 8,
+ CFG_PIXCMD_MASK = 0xff,
+};
+
+/* For LCD_SPU_SRAM_CTRL */
+enum {
+ SRAM_READ = 0 << 14,
+ SRAM_WRITE = 2 << 14,
+ SRAM_INIT = 3 << 14,
+ SRAM_HWC32_RAM1 = 0xc << 8,
+ SRAM_HWC32_RAM2 = 0xd << 8,
+ SRAM_HWC32_RAMR = SRAM_HWC32_RAM1,
+ SRAM_HWC32_RAMG = SRAM_HWC32_RAM2,
+ SRAM_HWC32_RAMB = 0xe << 8,
+ SRAM_HWC32_TRAN = 0xf << 8,
+ SRAM_HWC = 0xf << 8,
+};
+
+/* For LCD_SPU_SRAM_PARA1 */
+enum {
+ CFG_CSB_256x32 = 1 << 15, /* cursor */
+ CFG_CSB_256x24 = 1 << 14, /* palette */
+ CFG_CSB_256x8 = 1 << 13, /* gamma */
+ CFG_PDWN1920x32 = 1 << 8, /* Armada 510: power down vscale ram */
+ CFG_PDWN256x32 = 1 << 7, /* power down cursor */
+ CFG_PDWN256x24 = 1 << 6, /* power down palette */
+ CFG_PDWN256x8 = 1 << 5, /* power down gamma */
+ CFG_PDWNHWC = 1 << 4, /* Armada 510: power down all hwc ram */
+ CFG_PDWN32x32 = 1 << 3, /* power down slave->smart ram */
+ CFG_PDWN16x66 = 1 << 2, /* power down UV fifo */
+ CFG_PDWN32x66 = 1 << 1, /* power down Y fifo */
+ CFG_PDWN64x66 = 1 << 0, /* power down graphic fifo */
+};
+
+/* For LCD_CFG_SCLK_DIV */
+enum {
+ /* Armada 510 */
+ SCLK_510_AXI = 0x0 << 30,
+ SCLK_510_EXTCLK0 = 0x1 << 30,
+ SCLK_510_PLL = 0x2 << 30,
+ SCLK_510_EXTCLK1 = 0x3 << 30,
+ SCLK_510_DIV_CHANGE = 1 << 29,
+ SCLK_510_FRAC_DIV_MASK = 0xfff << 16,
+ SCLK_510_INT_DIV_MASK = 0xffff << 0,
+
+ /* Armada 16x */
+ SCLK_16X_AHB = 0x0 << 28,
+ SCLK_16X_PCLK = 0x1 << 28,
+ SCLK_16X_AXI = 0x4 << 28,
+ SCLK_16X_PLL = 0x8 << 28,
+ SCLK_16X_FRAC_DIV_MASK = 0xfff << 16,
+ SCLK_16X_INT_DIV_MASK = 0xffff << 0,
+};
+
+/* For LCD_SPU_DUMB_CTRL */
+enum {
+ DUMB16_RGB565_0 = 0x0 << 28,
+ DUMB16_RGB565_1 = 0x1 << 28,
+ DUMB18_RGB666_0 = 0x2 << 28,
+ DUMB18_RGB666_1 = 0x3 << 28,
+ DUMB12_RGB444_0 = 0x4 << 28,
+ DUMB12_RGB444_1 = 0x5 << 28,
+ DUMB24_RGB888_0 = 0x6 << 28,
+ DUMB_BLANK = 0x7 << 28,
+ DUMB_MASK = 0xf << 28,
+ CFG_BIAS_OUT = 1 << 8,
+ CFG_REV_RGB = 1 << 7,
+ CFG_INV_CBLANK = 1 << 6,
+ CFG_INV_CSYNC = 1 << 5, /* Normally active high */
+ CFG_INV_HENA = 1 << 4,
+ CFG_INV_VSYNC = 1 << 3, /* Normally active high */
+ CFG_INV_HSYNC = 1 << 2, /* Normally active high */
+ CFG_INV_PCLK = 1 << 1,
+ CFG_DUMB_ENA = 1 << 0,
+};
+
+/* For LCD_SPU_IOPAD_CONTROL */
+enum {
+ CFG_VSCALE_LN_EN = 3 << 18,
+ CFG_GRA_VM_ENA = 1 << 15,
+ CFG_DMA_VM_ENA = 1 << 13,
+ CFG_CMD_VM_ENA = 1 << 11,
+ CFG_CSC_MASK = 3 << 8,
+ CFG_CSC_YUV_CCIR709 = 1 << 9,
+ CFG_CSC_YUV_CCIR601 = 0 << 9,
+ CFG_CSC_RGB_STUDIO = 1 << 8,
+ CFG_CSC_RGB_COMPUTER = 0 << 8,
+ CFG_IOPAD_MASK = 0xf << 0,
+ CFG_IOPAD_DUMB24 = 0x0 << 0,
+ CFG_IOPAD_DUMB18SPI = 0x1 << 0,
+ CFG_IOPAD_DUMB18GPIO = 0x2 << 0,
+ CFG_IOPAD_DUMB16SPI = 0x3 << 0,
+ CFG_IOPAD_DUMB16GPIO = 0x4 << 0,
+ CFG_IOPAD_DUMB12GPIO = 0x5 << 0,
+ CFG_IOPAD_SMART18 = 0x6 << 0,
+ CFG_IOPAD_SMART16 = 0x7 << 0,
+ CFG_IOPAD_SMART8 = 0x8 << 0,
+};
+
+#define IOPAD_DUMB24 0x0
+
+/* For LCD_SPU_IRQ_ENA */
+enum {
+ DMA_FRAME_IRQ0_ENA = 1 << 31,
+ DMA_FRAME_IRQ1_ENA = 1 << 30,
+ DMA_FRAME_IRQ_ENA = DMA_FRAME_IRQ0_ENA | DMA_FRAME_IRQ1_ENA,
+ DMA_FF_UNDERFLOW_ENA = 1 << 29,
+ GRA_FRAME_IRQ0_ENA = 1 << 27,
+ GRA_FRAME_IRQ1_ENA = 1 << 26,
+ GRA_FRAME_IRQ_ENA = GRA_FRAME_IRQ0_ENA | GRA_FRAME_IRQ1_ENA,
+ GRA_FF_UNDERFLOW_ENA = 1 << 25,
+ VSYNC_IRQ_ENA = 1 << 23,
+ DUMB_FRAMEDONE_ENA = 1 << 22,
+ TWC_FRAMEDONE_ENA = 1 << 21,
+ HWC_FRAMEDONE_ENA = 1 << 20,
+ SLV_IRQ_ENA = 1 << 19,
+ SPI_IRQ_ENA = 1 << 18,
+ PWRDN_IRQ_ENA = 1 << 17,
+ ERR_IRQ_ENA = 1 << 16,
+ CLEAN_SPU_IRQ_ISR = 0xffff,
+};
+
+/* For LCD_SPU_IRQ_ISR */
+enum {
+ DMA_FRAME_IRQ0 = 1 << 31,
+ DMA_FRAME_IRQ1 = 1 << 30,
+ DMA_FRAME_IRQ = DMA_FRAME_IRQ0 | DMA_FRAME_IRQ1,
+ DMA_FF_UNDERFLOW = 1 << 29,
+ GRA_FRAME_IRQ0 = 1 << 27,
+ GRA_FRAME_IRQ1 = 1 << 26,
+ GRA_FRAME_IRQ = GRA_FRAME_IRQ0 | GRA_FRAME_IRQ1,
+ GRA_FF_UNDERFLOW = 1 << 25,
+ VSYNC_IRQ = 1 << 23,
+ DUMB_FRAMEDONE = 1 << 22,
+ TWC_FRAMEDONE = 1 << 21,
+ HWC_FRAMEDONE = 1 << 20,
+ SLV_IRQ = 1 << 19,
+ SPI_IRQ = 1 << 18,
+ PWRDN_IRQ = 1 << 17,
+ ERR_IRQ = 1 << 16,
+ DMA_FRAME_IRQ0_LEVEL = 1 << 15,
+ DMA_FRAME_IRQ1_LEVEL = 1 << 14,
+ DMA_FRAME_CNT_ISR = 3 << 12,
+ GRA_FRAME_IRQ0_LEVEL = 1 << 11,
+ GRA_FRAME_IRQ1_LEVEL = 1 << 10,
+ GRA_FRAME_CNT_ISR = 3 << 8,
+ VSYNC_IRQ_LEVEL = 1 << 7,
+ DUMB_FRAMEDONE_LEVEL = 1 << 6,
+ TWC_FRAMEDONE_LEVEL = 1 << 5,
+ HWC_FRAMEDONE_LEVEL = 1 << 4,
+ SLV_FF_EMPTY = 1 << 3,
+ DMA_FF_ALLEMPTY = 1 << 2,
+ GRA_FF_ALLEMPTY = 1 << 1,
+ PWRDN_IRQ_LEVEL = 1 << 0,
+};
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_ioctlP.h b/drivers/gpu/drm/armada/armada_ioctlP.h
new file mode 100644
index 000000000000..bd8c4562066c
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_ioctlP.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_IOCTLP_H
+#define ARMADA_IOCTLP_H
+
+#define ARMADA_IOCTL_PROTO(name)\
+extern int armada_##name##_ioctl(struct drm_device *, void *, struct drm_file *)
+
+ARMADA_IOCTL_PROTO(gem_create);
+ARMADA_IOCTL_PROTO(gem_mmap);
+ARMADA_IOCTL_PROTO(gem_pwrite);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_output.c b/drivers/gpu/drm/armada/armada_output.c
new file mode 100644
index 000000000000..d685a5421485
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_output.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+#include "armada_output.h"
+#include "armada_drm.h"
+
+struct armada_connector {
+ struct drm_connector conn;
+ const struct armada_output_type *type;
+};
+
+#define drm_to_armada_conn(c) container_of(c, struct armada_connector, conn)
+
+struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn)
+{
+ struct drm_encoder *enc = conn->encoder;
+
+ return enc ? enc : drm_encoder_find(conn->dev, conn->encoder_ids[0]);
+}
+
+static enum drm_connector_status armada_drm_connector_detect(
+ struct drm_connector *conn, bool force)
+{
+ struct armada_connector *dconn = drm_to_armada_conn(conn);
+ enum drm_connector_status status = connector_status_disconnected;
+
+ if (dconn->type->detect) {
+ status = dconn->type->detect(conn, force);
+ } else {
+ struct drm_encoder *enc = armada_drm_connector_encoder(conn);
+
+ if (enc)
+ status = encoder_helper_funcs(enc)->detect(enc, conn);
+ }
+
+ return status;
+}
+
+static void armada_drm_connector_destroy(struct drm_connector *conn)
+{
+ struct armada_connector *dconn = drm_to_armada_conn(conn);
+
+ drm_sysfs_connector_remove(conn);
+ drm_connector_cleanup(conn);
+ kfree(dconn);
+}
+
+static int armada_drm_connector_set_property(struct drm_connector *conn,
+ struct drm_property *property, uint64_t value)
+{
+ struct armada_connector *dconn = drm_to_armada_conn(conn);
+
+ if (!dconn->type->set_property)
+ return -EINVAL;
+
+ return dconn->type->set_property(conn, property, value);
+}
+
+static const struct drm_connector_funcs armada_drm_conn_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = armada_drm_connector_detect,
+ .destroy = armada_drm_connector_destroy,
+ .set_property = armada_drm_connector_set_property,
+};
+
+void armada_drm_encoder_prepare(struct drm_encoder *encoder)
+{
+ encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void armada_drm_encoder_commit(struct drm_encoder *encoder)
+{
+ encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode, struct drm_display_mode *adjusted)
+{
+ return true;
+}
+
+/* Shouldn't this be a generic helper function? */
+int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
+ struct drm_display_mode *mode)
+{
+ struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
+ int valid = MODE_BAD;
+
+ if (encoder) {
+ struct drm_encoder_slave *slave = to_encoder_slave(encoder);
+
+ valid = slave->slave_funcs->mode_valid(encoder, mode);
+ }
+ return valid;
+}
+
+int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
+ struct drm_property *property, uint64_t value)
+{
+ struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
+ int rc = -EINVAL;
+
+ if (encoder) {
+ struct drm_encoder_slave *slave = to_encoder_slave(encoder);
+
+ rc = slave->slave_funcs->set_property(encoder, conn, property,
+ value);
+ }
+ return rc;
+}
+
+int armada_output_create(struct drm_device *dev,
+ const struct armada_output_type *type, const void *data)
+{
+ struct armada_connector *dconn;
+ int ret;
+
+ dconn = kzalloc(sizeof(*dconn), GFP_KERNEL);
+ if (!dconn)
+ return -ENOMEM;
+
+ dconn->type = type;
+
+ ret = drm_connector_init(dev, &dconn->conn, &armada_drm_conn_funcs,
+ type->connector_type);
+ if (ret) {
+ DRM_ERROR("unable to init connector\n");
+ goto err_destroy_dconn;
+ }
+
+ ret = type->create(&dconn->conn, data);
+ if (ret)
+ goto err_conn;
+
+ ret = drm_sysfs_connector_add(&dconn->conn);
+ if (ret)
+ goto err_sysfs;
+
+ return 0;
+
+ err_sysfs:
+ if (dconn->conn.encoder)
+ dconn->conn.encoder->funcs->destroy(dconn->conn.encoder);
+ err_conn:
+ drm_connector_cleanup(&dconn->conn);
+ err_destroy_dconn:
+ kfree(dconn);
+ return ret;
+}
diff --git a/drivers/gpu/drm/armada/armada_output.h b/drivers/gpu/drm/armada/armada_output.h
new file mode 100644
index 000000000000..4126d43b5057
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_output.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_CONNETOR_H
+#define ARMADA_CONNETOR_H
+
+#define encoder_helper_funcs(encoder) \
+ ((struct drm_encoder_helper_funcs *)encoder->helper_private)
+
+struct armada_output_type {
+ int connector_type;
+ enum drm_connector_status (*detect)(struct drm_connector *, bool);
+ int (*create)(struct drm_connector *, const void *);
+ int (*set_property)(struct drm_connector *, struct drm_property *,
+ uint64_t);
+};
+
+struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn);
+
+void armada_drm_encoder_prepare(struct drm_encoder *encoder);
+void armada_drm_encoder_commit(struct drm_encoder *encoder);
+
+bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode, struct drm_display_mode *adj);
+
+int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
+ struct drm_display_mode *mode);
+
+int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
+ struct drm_property *property, uint64_t value);
+
+int armada_output_create(struct drm_device *dev,
+ const struct armada_output_type *type, const void *data);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
new file mode 100644
index 000000000000..c5b06fdb459c
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -0,0 +1,477 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+#include <drm/armada_drm.h>
+#include "armada_ioctlP.h"
+
+struct armada_plane_properties {
+ uint32_t colorkey_yr;
+ uint32_t colorkey_ug;
+ uint32_t colorkey_vb;
+#define K2R(val) (((val) >> 0) & 0xff)
+#define K2G(val) (((val) >> 8) & 0xff)
+#define K2B(val) (((val) >> 16) & 0xff)
+ int16_t brightness;
+ uint16_t contrast;
+ uint16_t saturation;
+ uint32_t colorkey_mode;
+};
+
+struct armada_plane {
+ struct drm_plane base;
+ spinlock_t lock;
+ struct drm_framebuffer *old_fb;
+ uint32_t src_hw;
+ uint32_t dst_hw;
+ uint32_t dst_yx;
+ uint32_t ctrl0;
+ struct {
+ struct armada_vbl_event update;
+ struct armada_regs regs[13];
+ wait_queue_head_t wait;
+ } vbl;
+ struct armada_plane_properties prop;
+};
+#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
+
+
+static void
+armada_ovl_update_attr(struct armada_plane_properties *prop,
+ struct armada_crtc *dcrtc)
+{
+ writel_relaxed(prop->colorkey_yr, dcrtc->base + LCD_SPU_COLORKEY_Y);
+ writel_relaxed(prop->colorkey_ug, dcrtc->base + LCD_SPU_COLORKEY_U);
+ writel_relaxed(prop->colorkey_vb, dcrtc->base + LCD_SPU_COLORKEY_V);
+
+ writel_relaxed(prop->brightness << 16 | prop->contrast,
+ dcrtc->base + LCD_SPU_CONTRAST);
+ /* Docs say 15:0, but it seems to actually be 31:16 on Armada 510 */
+ writel_relaxed(prop->saturation << 16,
+ dcrtc->base + LCD_SPU_SATURATION);
+ writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
+
+ spin_lock_irq(&dcrtc->irq_lock);
+ armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
+ CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
+ dcrtc->base + LCD_SPU_DMA_CTRL1);
+
+ armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
+ spin_unlock_irq(&dcrtc->irq_lock);
+}
+
+/* === Plane support === */
+static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data)
+{
+ struct armada_plane *dplane = data;
+ struct drm_framebuffer *fb;
+
+ armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs);
+
+ spin_lock(&dplane->lock);
+ fb = dplane->old_fb;
+ dplane->old_fb = NULL;
+ spin_unlock(&dplane->lock);
+
+ if (fb)
+ armada_drm_queue_unref_work(dcrtc->crtc.dev, fb);
+}
+
+static unsigned armada_limit(int start, unsigned size, unsigned max)
+{
+ int end = start + size;
+ if (end < 0)
+ return 0;
+ if (start < 0)
+ start = 0;
+ return (unsigned)end > max ? max - start : end - start;
+}
+
+static int
+armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
+ uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h)
+{
+ struct armada_plane *dplane = drm_to_armada_plane(plane);
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ uint32_t val, ctrl0;
+ unsigned idx = 0;
+ int ret;
+
+ crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay);
+ crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay);
+ ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) |
+ CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) |
+ CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA;
+
+ /* Does the position/size result in nothing to display? */
+ if (crtc_w == 0 || crtc_h == 0) {
+ ctrl0 &= ~CFG_DMA_ENA;
+ }
+
+ /*
+ * FIXME: if the starting point is off screen, we need to
+ * adjust src_x, src_y, src_w, src_h appropriately, and
+ * according to the scale.
+ */
+
+ if (!dcrtc->plane) {
+ dcrtc->plane = plane;
+ armada_ovl_update_attr(&dplane->prop, dcrtc);
+ }
+
+ /* FIXME: overlay on an interlaced display */
+ /* Just updating the position/size? */
+ if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
+ val = (src_h & 0xffff0000) | src_w >> 16;
+ dplane->src_hw = val;
+ writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
+ val = crtc_h << 16 | crtc_w;
+ dplane->dst_hw = val;
+ writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
+ val = crtc_y << 16 | crtc_x;
+ dplane->dst_yx = val;
+ writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
+ return 0;
+ } else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
+ /* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
+ armada_updatel(0, CFG_PDWN16x66 | CFG_PDWN32x66,
+ dcrtc->base + LCD_SPU_SRAM_PARA1);
+ }
+
+ ret = wait_event_timeout(dplane->vbl.wait,
+ list_empty(&dplane->vbl.update.node),
+ HZ/25);
+ if (ret < 0)
+ return ret;
+
+ if (plane->fb != fb) {
+ struct armada_gem_object *obj = drm_fb_obj(fb);
+ uint32_t sy, su, sv;
+
+ /*
+ * Take a reference on the new framebuffer - we want to
+ * hold on to it while the hardware is displaying it.
+ */
+ drm_framebuffer_reference(fb);
+
+ if (plane->fb) {
+ struct drm_framebuffer *older_fb;
+
+ spin_lock_irq(&dplane->lock);
+ older_fb = dplane->old_fb;
+ dplane->old_fb = plane->fb;
+ spin_unlock_irq(&dplane->lock);
+ if (older_fb)
+ armada_drm_queue_unref_work(dcrtc->crtc.dev,
+ older_fb);
+ }
+
+ src_y >>= 16;
+ src_x >>= 16;
+ sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] +
+ src_x * fb->bits_per_pixel / 8;
+ su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] +
+ src_x;
+ sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] +
+ src_x;
+
+ armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+ LCD_SPU_DMA_START_ADDR_Y0);
+ armada_reg_queue_set(dplane->vbl.regs, idx, su,
+ LCD_SPU_DMA_START_ADDR_U0);
+ armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+ LCD_SPU_DMA_START_ADDR_V0);
+ armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+ LCD_SPU_DMA_START_ADDR_Y1);
+ armada_reg_queue_set(dplane->vbl.regs, idx, su,
+ LCD_SPU_DMA_START_ADDR_U1);
+ armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+ LCD_SPU_DMA_START_ADDR_V1);
+
+ val = fb->pitches[0] << 16 | fb->pitches[0];
+ armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ LCD_SPU_DMA_PITCH_YC);
+ val = fb->pitches[1] << 16 | fb->pitches[2];
+ armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ LCD_SPU_DMA_PITCH_UV);
+ }
+
+ val = (src_h & 0xffff0000) | src_w >> 16;
+ if (dplane->src_hw != val) {
+ dplane->src_hw = val;
+ armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ LCD_SPU_DMA_HPXL_VLN);
+ }
+ val = crtc_h << 16 | crtc_w;
+ if (dplane->dst_hw != val) {
+ dplane->dst_hw = val;
+ armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ LCD_SPU_DZM_HPXL_VLN);
+ }
+ val = crtc_y << 16 | crtc_x;
+ if (dplane->dst_yx != val) {
+ dplane->dst_yx = val;
+ armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ LCD_SPU_DMA_OVSA_HPXL_VLN);
+ }
+ if (dplane->ctrl0 != ctrl0) {
+ dplane->ctrl0 = ctrl0;
+ armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
+ CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
+ CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
+ CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV | CFG_SWAPYU |
+ CFG_YUV2RGB) | CFG_DMA_ENA,
+ LCD_SPU_DMA_CTRL0);
+ }
+ if (idx) {
+ armada_reg_queue_end(dplane->vbl.regs, idx);
+ armada_drm_vbl_event_add(dcrtc, &dplane->vbl.update);
+ }
+ return 0;
+}
+
+static int armada_plane_disable(struct drm_plane *plane)
+{
+ struct armada_plane *dplane = drm_to_armada_plane(plane);
+ struct drm_framebuffer *fb;
+ struct armada_crtc *dcrtc;
+
+ if (!dplane->base.crtc)
+ return 0;
+
+ dcrtc = drm_to_armada_crtc(dplane->base.crtc);
+ dcrtc->plane = NULL;
+
+ spin_lock_irq(&dcrtc->irq_lock);
+ armada_drm_vbl_event_remove(dcrtc, &dplane->vbl.update);
+ armada_updatel(0, CFG_DMA_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
+ dplane->ctrl0 = 0;
+ spin_unlock_irq(&dcrtc->irq_lock);
+
+ /* Power down the Y/U/V FIFOs */
+ armada_updatel(CFG_PDWN16x66 | CFG_PDWN32x66, 0,
+ dcrtc->base + LCD_SPU_SRAM_PARA1);
+
+ if (plane->fb)
+ drm_framebuffer_unreference(plane->fb);
+
+ spin_lock_irq(&dplane->lock);
+ fb = dplane->old_fb;
+ dplane->old_fb = NULL;
+ spin_unlock_irq(&dplane->lock);
+ if (fb)
+ drm_framebuffer_unreference(fb);
+
+ return 0;
+}
+
+static void armada_plane_destroy(struct drm_plane *plane)
+{
+ kfree(plane);
+}
+
+static int armada_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val)
+{
+ struct armada_private *priv = plane->dev->dev_private;
+ struct armada_plane *dplane = drm_to_armada_plane(plane);
+ bool update_attr = false;
+
+ if (property == priv->colorkey_prop) {
+#define CCC(v) ((v) << 24 | (v) << 16 | (v) << 8)
+ dplane->prop.colorkey_yr = CCC(K2R(val));
+ dplane->prop.colorkey_ug = CCC(K2G(val));
+ dplane->prop.colorkey_vb = CCC(K2B(val));
+#undef CCC
+ update_attr = true;
+ } else if (property == priv->colorkey_min_prop) {
+ dplane->prop.colorkey_yr &= ~0x00ff0000;
+ dplane->prop.colorkey_yr |= K2R(val) << 16;
+ dplane->prop.colorkey_ug &= ~0x00ff0000;
+ dplane->prop.colorkey_ug |= K2G(val) << 16;
+ dplane->prop.colorkey_vb &= ~0x00ff0000;
+ dplane->prop.colorkey_vb |= K2B(val) << 16;
+ update_attr = true;
+ } else if (property == priv->colorkey_max_prop) {
+ dplane->prop.colorkey_yr &= ~0xff000000;
+ dplane->prop.colorkey_yr |= K2R(val) << 24;
+ dplane->prop.colorkey_ug &= ~0xff000000;
+ dplane->prop.colorkey_ug |= K2G(val) << 24;
+ dplane->prop.colorkey_vb &= ~0xff000000;
+ dplane->prop.colorkey_vb |= K2B(val) << 24;
+ update_attr = true;
+ } else if (property == priv->colorkey_val_prop) {
+ dplane->prop.colorkey_yr &= ~0x0000ff00;
+ dplane->prop.colorkey_yr |= K2R(val) << 8;
+ dplane->prop.colorkey_ug &= ~0x0000ff00;
+ dplane->prop.colorkey_ug |= K2G(val) << 8;
+ dplane->prop.colorkey_vb &= ~0x0000ff00;
+ dplane->prop.colorkey_vb |= K2B(val) << 8;
+ update_attr = true;
+ } else if (property == priv->colorkey_alpha_prop) {
+ dplane->prop.colorkey_yr &= ~0x000000ff;
+ dplane->prop.colorkey_yr |= K2R(val);
+ dplane->prop.colorkey_ug &= ~0x000000ff;
+ dplane->prop.colorkey_ug |= K2G(val);
+ dplane->prop.colorkey_vb &= ~0x000000ff;
+ dplane->prop.colorkey_vb |= K2B(val);
+ update_attr = true;
+ } else if (property == priv->colorkey_mode_prop) {
+ dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
+ dplane->prop.colorkey_mode |= CFG_CKMODE(val);
+ update_attr = true;
+ } else if (property == priv->brightness_prop) {
+ dplane->prop.brightness = val - 256;
+ update_attr = true;
+ } else if (property == priv->contrast_prop) {
+ dplane->prop.contrast = val;
+ update_attr = true;
+ } else if (property == priv->saturation_prop) {
+ dplane->prop.saturation = val;
+ update_attr = true;
+ }
+
+ if (update_attr && dplane->base.crtc)
+ armada_ovl_update_attr(&dplane->prop,
+ drm_to_armada_crtc(dplane->base.crtc));
+
+ return 0;
+}
+
+static const struct drm_plane_funcs armada_plane_funcs = {
+ .update_plane = armada_plane_update,
+ .disable_plane = armada_plane_disable,
+ .destroy = armada_plane_destroy,
+ .set_property = armada_plane_set_property,
+};
+
+static const uint32_t armada_formats[] = {
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YVU420,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YVU422,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+};
+
+static struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = {
+ { CKMODE_DISABLE, "disabled" },
+ { CKMODE_Y, "Y component" },
+ { CKMODE_U, "U component" },
+ { CKMODE_V, "V component" },
+ { CKMODE_RGB, "RGB" },
+ { CKMODE_R, "R component" },
+ { CKMODE_G, "G component" },
+ { CKMODE_B, "B component" },
+};
+
+static int armada_overlay_create_properties(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+
+ if (priv->colorkey_prop)
+ return 0;
+
+ priv->colorkey_prop = drm_property_create_range(dev, 0,
+ "colorkey", 0, 0xffffff);
+ priv->colorkey_min_prop = drm_property_create_range(dev, 0,
+ "colorkey_min", 0, 0xffffff);
+ priv->colorkey_max_prop = drm_property_create_range(dev, 0,
+ "colorkey_max", 0, 0xffffff);
+ priv->colorkey_val_prop = drm_property_create_range(dev, 0,
+ "colorkey_val", 0, 0xffffff);
+ priv->colorkey_alpha_prop = drm_property_create_range(dev, 0,
+ "colorkey_alpha", 0, 0xffffff);
+ priv->colorkey_mode_prop = drm_property_create_enum(dev, 0,
+ "colorkey_mode",
+ armada_drm_colorkey_enum_list,
+ ARRAY_SIZE(armada_drm_colorkey_enum_list));
+ priv->brightness_prop = drm_property_create_range(dev, 0,
+ "brightness", 0, 256 + 255);
+ priv->contrast_prop = drm_property_create_range(dev, 0,
+ "contrast", 0, 0x7fff);
+ priv->saturation_prop = drm_property_create_range(dev, 0,
+ "saturation", 0, 0x7fff);
+
+ if (!priv->colorkey_prop)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
+{
+ struct armada_private *priv = dev->dev_private;
+ struct drm_mode_object *mobj;
+ struct armada_plane *dplane;
+ int ret;
+
+ ret = armada_overlay_create_properties(dev);
+ if (ret)
+ return ret;
+
+ dplane = kzalloc(sizeof(*dplane), GFP_KERNEL);
+ if (!dplane)
+ return -ENOMEM;
+
+ spin_lock_init(&dplane->lock);
+ init_waitqueue_head(&dplane->vbl.wait);
+ armada_drm_vbl_event_init(&dplane->vbl.update, armada_plane_vbl,
+ dplane);
+
+ drm_plane_init(dev, &dplane->base, crtcs, &armada_plane_funcs,
+ armada_formats, ARRAY_SIZE(armada_formats), false);
+
+ dplane->prop.colorkey_yr = 0xfefefe00;
+ dplane->prop.colorkey_ug = 0x01010100;
+ dplane->prop.colorkey_vb = 0x01010100;
+ dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
+ dplane->prop.brightness = 0;
+ dplane->prop.contrast = 0x4000;
+ dplane->prop.saturation = 0x4000;
+
+ mobj = &dplane->base.base;
+ drm_object_attach_property(mobj, priv->colorkey_prop,
+ 0x0101fe);
+ drm_object_attach_property(mobj, priv->colorkey_min_prop,
+ 0x0101fe);
+ drm_object_attach_property(mobj, priv->colorkey_max_prop,
+ 0x0101fe);
+ drm_object_attach_property(mobj, priv->colorkey_val_prop,
+ 0x0101fe);
+ drm_object_attach_property(mobj, priv->colorkey_alpha_prop,
+ 0x000000);
+ drm_object_attach_property(mobj, priv->colorkey_mode_prop,
+ CKMODE_RGB);
+ drm_object_attach_property(mobj, priv->brightness_prop, 256);
+ drm_object_attach_property(mobj, priv->contrast_prop,
+ dplane->prop.contrast);
+ drm_object_attach_property(mobj, priv->saturation_prop,
+ dplane->prop.saturation);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/armada/armada_slave.c b/drivers/gpu/drm/armada/armada_slave.c
new file mode 100644
index 000000000000..00d0facb42f3
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_slave.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+#include "armada_drm.h"
+#include "armada_output.h"
+#include "armada_slave.h"
+
+static int armada_drm_slave_get_modes(struct drm_connector *conn)
+{
+ struct drm_encoder *enc = armada_drm_connector_encoder(conn);
+ int count = 0;
+
+ if (enc) {
+ struct drm_encoder_slave *slave = to_encoder_slave(enc);
+
+ count = slave->slave_funcs->get_modes(enc, conn);
+ }
+
+ return count;
+}
+
+static void armada_drm_slave_destroy(struct drm_encoder *enc)
+{
+ struct drm_encoder_slave *slave = to_encoder_slave(enc);
+ struct i2c_client *client = drm_i2c_encoder_get_client(enc);
+
+ if (slave->slave_funcs)
+ slave->slave_funcs->destroy(enc);
+ if (client)
+ i2c_put_adapter(client->adapter);
+
+ drm_encoder_cleanup(&slave->base);
+ kfree(slave);
+}
+
+static const struct drm_encoder_funcs armada_drm_slave_encoder_funcs = {
+ .destroy = armada_drm_slave_destroy,
+};
+
+static const struct drm_connector_helper_funcs armada_drm_slave_helper_funcs = {
+ .get_modes = armada_drm_slave_get_modes,
+ .mode_valid = armada_drm_slave_encoder_mode_valid,
+ .best_encoder = armada_drm_connector_encoder,
+};
+
+static const struct drm_encoder_helper_funcs drm_slave_encoder_helpers = {
+ .dpms = drm_i2c_encoder_dpms,
+ .save = drm_i2c_encoder_save,
+ .restore = drm_i2c_encoder_restore,
+ .mode_fixup = drm_i2c_encoder_mode_fixup,
+ .prepare = drm_i2c_encoder_prepare,
+ .commit = drm_i2c_encoder_commit,
+ .mode_set = drm_i2c_encoder_mode_set,
+ .detect = drm_i2c_encoder_detect,
+};
+
+static int
+armada_drm_conn_slave_create(struct drm_connector *conn, const void *data)
+{
+ const struct armada_drm_slave_config *config = data;
+ struct drm_encoder_slave *slave;
+ struct i2c_adapter *adap;
+ int ret;
+
+ conn->interlace_allowed = config->interlace_allowed;
+ conn->doublescan_allowed = config->doublescan_allowed;
+ conn->polled = config->polled;
+
+ drm_connector_helper_add(conn, &armada_drm_slave_helper_funcs);
+
+ slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+ if (!slave)
+ return -ENOMEM;
+
+ slave->base.possible_crtcs = config->crtcs;
+
+ adap = i2c_get_adapter(config->i2c_adapter_id);
+ if (!adap) {
+ kfree(slave);
+ return -EPROBE_DEFER;
+ }
+
+ ret = drm_encoder_init(conn->dev, &slave->base,
+ &armada_drm_slave_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ if (ret) {
+ DRM_ERROR("unable to init encoder\n");
+ i2c_put_adapter(adap);
+ kfree(slave);
+ return ret;
+ }
+
+ ret = drm_i2c_encoder_init(conn->dev, slave, adap, &config->info);
+ i2c_put_adapter(adap);
+ if (ret) {
+ DRM_ERROR("unable to init encoder slave\n");
+ armada_drm_slave_destroy(&slave->base);
+ return ret;
+ }
+
+ drm_encoder_helper_add(&slave->base, &drm_slave_encoder_helpers);
+
+ ret = slave->slave_funcs->create_resources(&slave->base, conn);
+ if (ret) {
+ armada_drm_slave_destroy(&slave->base);
+ return ret;
+ }
+
+ ret = drm_mode_connector_attach_encoder(conn, &slave->base);
+ if (ret) {
+ armada_drm_slave_destroy(&slave->base);
+ return ret;
+ }
+
+ conn->encoder = &slave->base;
+
+ return ret;
+}
+
+static const struct armada_output_type armada_drm_conn_slave = {
+ .connector_type = DRM_MODE_CONNECTOR_HDMIA,
+ .create = armada_drm_conn_slave_create,
+ .set_property = armada_drm_slave_encoder_set_property,
+};
+
+int armada_drm_connector_slave_create(struct drm_device *dev,
+ const struct armada_drm_slave_config *config)
+{
+ return armada_output_create(dev, &armada_drm_conn_slave, config);
+}
diff --git a/drivers/gpu/drm/armada/armada_slave.h b/drivers/gpu/drm/armada/armada_slave.h
new file mode 100644
index 000000000000..bf2374c96fc1
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_slave.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_SLAVE_H
+#define ARMADA_SLAVE_H
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+
+struct armada_drm_slave_config {
+ int i2c_adapter_id;
+ uint32_t crtcs;
+ uint8_t polled;
+ bool interlace_allowed;
+ bool doublescan_allowed;
+ struct i2c_board_info info;
+};
+
+int armada_drm_connector_slave_create(struct drm_device *dev,
+ const struct armada_drm_slave_config *);
+
+#endif
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index da4a51eae824..8a784c460c89 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -6,6 +6,7 @@ config DRM_AST
select FB_SYS_FILLRECT
select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select DRM_TTM
help
Say yes for experimental AST GPU driver. Do not enable
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 32e270dc714e..5137f15dba19 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -211,7 +211,6 @@ static struct drm_driver driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- .gem_init_object = ast_gem_init_object,
.gem_free_object = ast_gem_free_object,
.dumb_create = ast_dumb_create,
.dumb_map_offset = ast_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 8492b68e873c..9833a1b1acc1 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -323,7 +323,6 @@ extern int ast_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-extern int ast_gem_init_object(struct drm_gem_object *obj);
extern void ast_gem_free_object(struct drm_gem_object *obj);
extern int ast_dumb_mmap_offset(struct drm_file *file,
struct drm_device *dev,
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 7f6152d374ca..af0b868a9dfd 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -449,12 +449,6 @@ int ast_dumb_create(struct drm_file *file,
return 0;
}
-int ast_gem_init_object(struct drm_gem_object *obj)
-{
- BUG();
- return 0;
-}
-
void ast_bo_unref(struct ast_bo **bo)
{
struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index bf67b22723f9..9864559e5fb9 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -5,6 +5,7 @@ config DRM_CIRRUS_QEMU
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select DRM_TTM
help
This is a KMS driver for emulated cirrus device in qemu.
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 138364d91782..953fc8aea69c 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -97,7 +97,6 @@ static struct drm_driver driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- .gem_init_object = cirrus_gem_init_object,
.gem_free_object = cirrus_gem_free_object,
.dumb_create = cirrus_dumb_create,
.dumb_map_offset = cirrus_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 9b0bb9184afd..b6aded73838b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -191,7 +191,6 @@ int cirrus_device_init(struct cirrus_device *cdev,
struct pci_dev *pdev,
uint32_t flags);
void cirrus_device_fini(struct cirrus_device *cdev);
-int cirrus_gem_init_object(struct drm_gem_object *obj);
void cirrus_gem_free_object(struct drm_gem_object *obj);
int cirrus_dumb_mmap_offset(struct drm_file *file,
struct drm_device *dev,
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index f130a533a512..78e76f24343d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -255,12 +255,6 @@ int cirrus_dumb_create(struct drm_file *file,
return 0;
}
-int cirrus_gem_init_object(struct drm_gem_object *obj)
-{
- BUG();
- return 0;
-}
-
void cirrus_bo_unref(struct cirrus_bo **bo)
{
struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 224ff965bcf7..a4b017b6849e 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -334,7 +334,6 @@ int drm_addctx(struct drm_device *dev, void *data,
mutex_lock(&dev->ctxlist_mutex);
list_add(&ctx_entry->head, &dev->ctxlist);
- ++dev->ctx_count;
mutex_unlock(&dev->ctxlist_mutex);
return 0;
@@ -432,7 +431,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
if (pos->handle == ctx->handle) {
list_del(&pos->head);
kfree(pos);
- --dev->ctx_count;
}
}
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index bff2fa941f60..2447bd94a654 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -202,6 +202,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
{ DRM_MODE_CONNECTOR_TV, "TV" },
{ DRM_MODE_CONNECTOR_eDP, "eDP" },
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
+ { DRM_MODE_CONNECTOR_DSI, "DSI" },
};
static const struct drm_prop_enum_list drm_encoder_enum_list[] =
@@ -211,6 +212,7 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
{ DRM_MODE_ENCODER_LVDS, "LVDS" },
{ DRM_MODE_ENCODER_TVDAC, "TV" },
{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
+ { DRM_MODE_ENCODER_DSI, "DSI" },
};
void drm_connector_ida_init(void)
@@ -1301,7 +1303,7 @@ static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
}
/**
- * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
+ * drm_crtc_convert_umode - convert a modeinfo into a drm_display_mode
* @out: drm_display_mode to return to the user
* @in: drm_mode_modeinfo to use
*
@@ -1317,6 +1319,9 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out,
if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
return -ERANGE;
+ if ((in->flags & DRM_MODE_FLAG_3D_MASK) > DRM_MODE_FLAG_3D_MAX)
+ return -EINVAL;
+
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
@@ -1579,6 +1584,19 @@ out:
return ret;
}
+static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
+ const struct drm_file *file_priv)
+{
+ /*
+ * If user-space hasn't configured the driver to expose the stereo 3D
+ * modes, don't expose them.
+ */
+ if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
+ return false;
+
+ return true;
+}
+
/**
* drm_mode_getconnector - get connector configuration
* @dev: drm device for the ioctl
@@ -1644,7 +1662,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
/* delayed so we get modes regardless of pre-fill_modes state */
list_for_each_entry(mode, &connector->modes, head)
- mode_count++;
+ if (drm_mode_expose_to_userspace(mode, file_priv))
+ mode_count++;
out_resp->connector_id = connector->base.id;
out_resp->connector_type = connector->connector_type;
@@ -1666,6 +1685,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
copied = 0;
mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
list_for_each_entry(mode, &connector->modes, head) {
+ if (!drm_mode_expose_to_userspace(mode, file_priv))
+ continue;
+
drm_crtc_convert_to_umode(&u_mode, mode);
if (copy_to_user(mode_ptr + copied,
&u_mode, sizeof(u_mode))) {
@@ -2040,6 +2062,45 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
}
EXPORT_SYMBOL(drm_mode_set_config_internal);
+/*
+ * Checks that the framebuffer is big enough for the CRTC viewport
+ * (x, y, hdisplay, vdisplay)
+ */
+static int drm_crtc_check_viewport(const struct drm_crtc *crtc,
+ int x, int y,
+ const struct drm_display_mode *mode,
+ const struct drm_framebuffer *fb)
+
+{
+ int hdisplay, vdisplay;
+
+ hdisplay = mode->hdisplay;
+ vdisplay = mode->vdisplay;
+
+ if (drm_mode_is_stereo(mode)) {
+ struct drm_display_mode adjusted = *mode;
+
+ drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE);
+ hdisplay = adjusted.crtc_hdisplay;
+ vdisplay = adjusted.crtc_vdisplay;
+ }
+
+ if (crtc->invert_dimensions)
+ swap(hdisplay, vdisplay);
+
+ if (hdisplay > fb->width ||
+ vdisplay > fb->height ||
+ x > fb->width - hdisplay ||
+ y > fb->height - vdisplay) {
+ DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+ fb->width, fb->height, hdisplay, vdisplay, x, y,
+ crtc->invert_dimensions ? " (inverted)" : "");
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
/**
* drm_mode_setcrtc - set CRTC configuration
* @dev: drm device for the ioctl
@@ -2087,7 +2148,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
if (crtc_req->mode_valid) {
- int hdisplay, vdisplay;
/* If we have a mode we need a framebuffer. */
/* If we pass -1, set the mode with the currently bound fb */
if (crtc_req->fb_id == -1) {
@@ -2123,23 +2183,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- hdisplay = mode->hdisplay;
- vdisplay = mode->vdisplay;
-
- if (crtc->invert_dimensions)
- swap(hdisplay, vdisplay);
-
- if (hdisplay > fb->width ||
- vdisplay > fb->height ||
- crtc_req->x > fb->width - hdisplay ||
- crtc_req->y > fb->height - vdisplay) {
- DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
- fb->width, fb->height,
- hdisplay, vdisplay, crtc_req->x, crtc_req->y,
- crtc->invert_dimensions ? " (inverted)" : "");
- ret = -ENOSPC;
+ ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
+ mode, fb);
+ if (ret)
goto out;
- }
+
}
if (crtc_req->count_connectors == 0 && mode) {
@@ -3556,7 +3604,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
struct drm_framebuffer *fb = NULL, *old_fb = NULL;
struct drm_pending_vblank_event *e = NULL;
unsigned long flags;
- int hdisplay, vdisplay;
int ret = -EINVAL;
if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
@@ -3588,22 +3635,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
if (!fb)
goto out;
- hdisplay = crtc->mode.hdisplay;
- vdisplay = crtc->mode.vdisplay;
-
- if (crtc->invert_dimensions)
- swap(hdisplay, vdisplay);
-
- if (hdisplay > fb->width ||
- vdisplay > fb->height ||
- crtc->x > fb->width - hdisplay ||
- crtc->y > fb->height - vdisplay) {
- DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
- fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
- crtc->invert_dimensions ? " (inverted)" : "");
- ret = -ENOSPC;
+ ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
+ if (ret)
goto out;
- }
if (crtc->fb->pixel_format != fb->pixel_format) {
DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index c722c3b5404d..305b4cdcfb29 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -39,6 +39,10 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_edid.h>
+MODULE_AUTHOR("David Airlie, Jesse Barnes");
+MODULE_DESCRIPTION("DRM KMS helper");
+MODULE_LICENSE("GPL and additional rights");
+
/**
* drm_helper_move_panel_connectors_to_head() - move panels to the front in the
* connector list
@@ -76,7 +80,8 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
{
struct drm_display_mode *mode;
- if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
+ if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
+ DRM_MODE_FLAG_3D_MASK))
return;
list_for_each_entry(mode, &connector->modes, head) {
@@ -86,6 +91,9 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
!(flags & DRM_MODE_FLAG_DBLSCAN))
mode->status = MODE_NO_DBLESCAN;
+ if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
+ !(flags & DRM_MODE_FLAG_3D_MASK))
+ mode->status = MODE_NO_STEREO;
}
return;
@@ -105,9 +113,9 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
* then culled (based on validity and the @maxX, @maxY parameters) and put into
* the normal modes list.
*
- * Intended to be use as a generic implementation of the ->probe() @connector
- * callback for drivers that use the crtc helpers for output mode filtering and
- * detection.
+ * Intended to be use as a generic implementation of the ->fill_modes()
+ * @connector vfunc for drivers that use the crtc helpers for output mode
+ * filtering and detection.
*
* RETURNS:
* Number of modes found on @connector.
@@ -175,6 +183,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
mode_flags |= DRM_MODE_FLAG_INTERLACE;
if (connector->doublescan_allowed)
mode_flags |= DRM_MODE_FLAG_DBLSCAN;
+ if (connector->stereo_allowed)
+ mode_flags |= DRM_MODE_FLAG_3D_MASK;
drm_mode_validate_flag(connector, mode_flags);
list_for_each_entry(mode, &connector->modes, head) {
@@ -557,6 +567,14 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
continue;
connector->encoder = NULL;
+
+ /*
+ * drm_helper_disable_unused_functions() ought to be
+ * doing this, but since we've decoupled the encoder
+ * from the connector above, the required connection
+ * between them is henceforth no longer available.
+ */
+ connector->dpms = DRM_MODE_DPMS_OFF;
}
}
@@ -1125,14 +1143,14 @@ void drm_kms_helper_poll_fini(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
-void drm_helper_hpd_irq_event(struct drm_device *dev)
+bool drm_helper_hpd_irq_event(struct drm_device *dev)
{
struct drm_connector *connector;
enum drm_connector_status old_status;
bool changed = false;
if (!dev->mode_config.poll_enabled)
- return;
+ return false;
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -1157,5 +1175,7 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
if (changed)
drm_kms_helper_hotplug_event(dev);
+
+ return changed;
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 89e196627160..9e978aae8972 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -228,12 +228,12 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
EXPORT_SYMBOL(i2c_dp_aux_add_bus);
/* Helpers for DP link training */
-static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
{
return link_status[r - DP_LANE0_1_STATUS];
}
-static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+static u8 dp_get_lane_status(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_LANE0_1_STATUS + (lane >> 1);
@@ -242,7 +242,7 @@ static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
return (l >> s) & 0xf;
}
-bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
u8 lane_align;
@@ -262,7 +262,7 @@ bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
}
EXPORT_SYMBOL(drm_dp_channel_eq_ok);
-bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
int lane;
@@ -277,7 +277,7 @@ bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
}
EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
-u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@@ -290,7 +290,7 @@ u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
-u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@@ -303,7 +303,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
-void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
udelay(100);
else
@@ -311,7 +311,7 @@ void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
}
EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
-void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
udelay(400);
else
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index e572dd20bdee..d9137e49c4e8 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -61,7 +61,7 @@ static int drm_version(struct drm_device *dev, void *data,
/** Ioctl table */
static const struct drm_ioctl_desc drm_ioctls[] = {
- DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
@@ -69,6 +69,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -170,76 +171,6 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
-/**
- * drm_legacy_dev_reinit
- *
- * Reinitializes a legacy/ums drm device in it's lastclose function.
- */
-static void drm_legacy_dev_reinit(struct drm_device *dev)
-{
- int i;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- atomic_set(&dev->ioctl_count, 0);
- atomic_set(&dev->vma_count, 0);
-
- for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
- atomic_set(&dev->counts[i], 0);
-
- dev->sigdata.lock = NULL;
-
- dev->context_flag = 0;
- dev->last_context = 0;
- dev->if_version = 0;
-}
-
-/**
- * Take down the DRM device.
- *
- * \param dev DRM device structure.
- *
- * Frees every resource in \p dev.
- *
- * \sa drm_device
- */
-int drm_lastclose(struct drm_device * dev)
-{
- struct drm_vma_entry *vma, *vma_temp;
-
- DRM_DEBUG("\n");
-
- if (dev->driver->lastclose)
- dev->driver->lastclose(dev);
- DRM_DEBUG("driver lastclose completed\n");
-
- if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
- drm_irq_uninstall(dev);
-
- mutex_lock(&dev->struct_mutex);
-
- drm_agp_clear(dev);
-
- drm_legacy_sg_cleanup(dev);
-
- /* Clear vma list (only built for debugging) */
- list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
- list_del(&vma->head);
- kfree(vma);
- }
-
- drm_legacy_dma_takedown(dev);
-
- dev->dev_mapping = NULL;
- mutex_unlock(&dev->struct_mutex);
-
- drm_legacy_dev_reinit(dev);
-
- DRM_DEBUG("lastclose completed\n");
- return 0;
-}
-
/** File operations structure */
static const struct file_operations drm_stub_fops = {
.owner = THIS_MODULE,
@@ -385,7 +316,6 @@ long drm_ioctl(struct file *filp,
return -ENODEV;
atomic_inc(&dev->ioctl_count);
- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++file_priv->ioctl_count;
if ((nr >= DRM_CORE_IOCTL_COUNT) &&
@@ -402,9 +332,16 @@ long drm_ioctl(struct file *filp,
cmd = ioctl->cmd_drv;
}
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
+ u32 drv_size;
+
ioctl = &drm_ioctls[nr];
- cmd = ioctl->cmd;
+
+ drv_size = _IOC_SIZE(ioctl->cmd);
usize = asize = _IOC_SIZE(cmd);
+ if (drv_size > asize)
+ asize = drv_size;
+
+ cmd = ioctl->cmd;
} else
goto err_i1;
@@ -466,7 +403,7 @@ long drm_ioctl(struct file *filp,
err_i1:
if (!ioctl)
- DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
+ DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
task_pid_nr(current),
(long)old_encode_dev(file_priv->minor->device),
file_priv->authenticated, cmd, nr);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 830f7501cb4d..f1764ec5818b 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1264,6 +1264,18 @@ struct edid *drm_get_edid(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_get_edid);
+/**
+ * drm_edid_duplicate - duplicate an EDID and the extensions
+ * @edid: EDID to duplicate
+ *
+ * Return duplicate edid or NULL on allocation failure.
+ */
+struct edid *drm_edid_duplicate(const struct edid *edid)
+{
+ return kmemdup(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+}
+EXPORT_SYMBOL(drm_edid_duplicate);
+
/*** EDID parsing ***/
/**
@@ -2404,7 +2416,7 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
- drm_mode_equal_no_clocks(to_match, cea_mode))
+ drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode))
return mode + 1;
}
return 0;
@@ -2453,7 +2465,7 @@ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
- drm_mode_equal_no_clocks(to_match, hdmi_mode))
+ drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
return mode + 1;
}
return 0;
@@ -2507,6 +2519,9 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
if (!newmode)
continue;
+ /* Carry over the stereo flags */
+ newmode->flags |= mode->flags & DRM_MODE_FLAG_3D_MASK;
+
/*
* The current mode could be either variant. Make
* sure to pick the "other" clock for the new mode.
@@ -2553,20 +2568,151 @@ do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
return modes;
}
+struct stereo_mandatory_mode {
+ int width, height, vrefresh;
+ unsigned int flags;
+};
+
+static const struct stereo_mandatory_mode stereo_mandatory_modes[] = {
+ { 1920, 1080, 24, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
+ { 1920, 1080, 24, DRM_MODE_FLAG_3D_FRAME_PACKING },
+ { 1920, 1080, 50,
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
+ { 1920, 1080, 60,
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
+ { 1280, 720, 50, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
+ { 1280, 720, 50, DRM_MODE_FLAG_3D_FRAME_PACKING },
+ { 1280, 720, 60, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
+ { 1280, 720, 60, DRM_MODE_FLAG_3D_FRAME_PACKING }
+};
+
+static bool
+stereo_match_mandatory(const struct drm_display_mode *mode,
+ const struct stereo_mandatory_mode *stereo_mode)
+{
+ unsigned int interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+
+ return mode->hdisplay == stereo_mode->width &&
+ mode->vdisplay == stereo_mode->height &&
+ interlaced == (stereo_mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+ drm_mode_vrefresh(mode) == stereo_mode->vrefresh;
+}
+
+static int add_hdmi_mandatory_stereo_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ const struct drm_display_mode *mode;
+ struct list_head stereo_modes;
+ int modes = 0, i;
+
+ INIT_LIST_HEAD(&stereo_modes);
+
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ for (i = 0; i < ARRAY_SIZE(stereo_mandatory_modes); i++) {
+ const struct stereo_mandatory_mode *mandatory;
+ struct drm_display_mode *new_mode;
+
+ if (!stereo_match_mandatory(mode,
+ &stereo_mandatory_modes[i]))
+ continue;
+
+ mandatory = &stereo_mandatory_modes[i];
+ new_mode = drm_mode_duplicate(dev, mode);
+ if (!new_mode)
+ continue;
+
+ new_mode->flags |= mandatory->flags;
+ list_add_tail(&new_mode->head, &stereo_modes);
+ modes++;
+ }
+ }
+
+ list_splice_tail(&stereo_modes, &connector->probed_modes);
+
+ return modes;
+}
+
+static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *newmode;
+
+ vic--; /* VICs start at 1 */
+ if (vic >= ARRAY_SIZE(edid_4k_modes)) {
+ DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
+ return 0;
+ }
+
+ newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
+ if (!newmode)
+ return 0;
+
+ drm_mode_probed_add(connector, newmode);
+
+ return 1;
+}
+
+static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
+ const u8 *video_db, u8 video_len, u8 video_index)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *newmode;
+ int modes = 0;
+ u8 cea_mode;
+
+ if (video_db == NULL || video_index > video_len)
+ return 0;
+
+ /* CEA modes are numbered 1..127 */
+ cea_mode = (video_db[video_index] & 127) - 1;
+ if (cea_mode >= ARRAY_SIZE(edid_cea_modes))
+ return 0;
+
+ if (structure & (1 << 0)) {
+ newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+ if (newmode) {
+ newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING;
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ if (structure & (1 << 6)) {
+ newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+ if (newmode) {
+ newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ if (structure & (1 << 8)) {
+ newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+ if (newmode) {
+ newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+
+ return modes;
+}
+
/*
* do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
* @connector: connector corresponding to the HDMI sink
* @db: start of the CEA vendor specific block
* @len: length of the CEA block payload, ie. one can access up to db[len]
*
- * Parses the HDMI VSDB looking for modes to add to @connector.
+ * Parses the HDMI VSDB looking for modes to add to @connector. This function
+ * also adds the stereo 3d modes when applicable.
*/
static int
-do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
+do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
+ const u8 *video_db, u8 video_len)
{
- struct drm_device *dev = connector->dev;
- int modes = 0, offset = 0, i;
- u8 vic_len;
+ int modes = 0, offset = 0, i, multi_present = 0;
+ u8 vic_len, hdmi_3d_len = 0;
+ u16 mask;
+ u16 structure_all;
if (len < 8)
goto out;
@@ -2585,30 +2731,56 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
/* the declared length is not long enough for the 2 first bytes
* of additional video format capabilities */
- offset += 2;
- if (len < (8 + offset))
+ if (len < (8 + offset + 2))
goto out;
+ /* 3D_Present */
+ offset++;
+ if (db[8 + offset] & (1 << 7)) {
+ modes += add_hdmi_mandatory_stereo_modes(connector);
+
+ /* 3D_Multi_present */
+ multi_present = (db[8 + offset] & 0x60) >> 5;
+ }
+
+ offset++;
vic_len = db[8 + offset] >> 5;
+ hdmi_3d_len = db[8 + offset] & 0x1f;
for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
- struct drm_display_mode *newmode;
u8 vic;
vic = db[9 + offset + i];
+ modes += add_hdmi_mode(connector, vic);
+ }
+ offset += 1 + vic_len;
- vic--; /* VICs start at 1 */
- if (vic >= ARRAY_SIZE(edid_4k_modes)) {
- DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
- continue;
- }
+ if (!(multi_present == 1 || multi_present == 2))
+ goto out;
- newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
- if (!newmode)
- continue;
+ if ((multi_present == 1 && len < (9 + offset)) ||
+ (multi_present == 2 && len < (11 + offset)))
+ goto out;
- drm_mode_probed_add(connector, newmode);
- modes++;
+ if ((multi_present == 1 && hdmi_3d_len < 2) ||
+ (multi_present == 2 && hdmi_3d_len < 4))
+ goto out;
+
+ /* 3D_Structure_ALL */
+ structure_all = (db[8 + offset] << 8) | db[9 + offset];
+
+ /* check if 3D_MASK is present */
+ if (multi_present == 2)
+ mask = (db[10 + offset] << 8) | db[11 + offset];
+ else
+ mask = 0xffff;
+
+ for (i = 0; i < 16; i++) {
+ if (mask & (1 << i))
+ modes += add_3d_struct_modes(connector,
+ structure_all,
+ video_db,
+ video_len, i);
}
out:
@@ -2668,8 +2840,8 @@ static int
add_cea_modes(struct drm_connector *connector, struct edid *edid)
{
const u8 *cea = drm_find_cea_extension(edid);
- const u8 *db;
- u8 dbl;
+ const u8 *db, *hdmi = NULL, *video = NULL;
+ u8 dbl, hdmi_len, video_len = 0;
int modes = 0;
if (cea && cea_revision(cea) >= 3) {
@@ -2682,13 +2854,26 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
db = &cea[i];
dbl = cea_db_payload_len(db);
- if (cea_db_tag(db) == VIDEO_BLOCK)
- modes += do_cea_modes(connector, db + 1, dbl);
- else if (cea_db_is_hdmi_vsdb(db))
- modes += do_hdmi_vsdb_modes(connector, db, dbl);
+ if (cea_db_tag(db) == VIDEO_BLOCK) {
+ video = db + 1;
+ video_len = dbl;
+ modes += do_cea_modes(connector, video, dbl);
+ }
+ else if (cea_db_is_hdmi_vsdb(db)) {
+ hdmi = db;
+ hdmi_len = dbl;
+ }
}
}
+ /*
+ * We parse the HDMI VSDB after having added the cea modes as we will
+ * be patching their flags when the sink supports stereo 3D.
+ */
+ if (hdmi)
+ modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len, video,
+ video_len);
+
return modes;
}
@@ -3321,6 +3506,33 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
+static enum hdmi_3d_structure
+s3d_structure_from_display_mode(const struct drm_display_mode *mode)
+{
+ u32 layout = mode->flags & DRM_MODE_FLAG_3D_MASK;
+
+ switch (layout) {
+ case DRM_MODE_FLAG_3D_FRAME_PACKING:
+ return HDMI_3D_STRUCTURE_FRAME_PACKING;
+ case DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE:
+ return HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE;
+ case DRM_MODE_FLAG_3D_LINE_ALTERNATIVE:
+ return HDMI_3D_STRUCTURE_LINE_ALTERNATIVE;
+ case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL:
+ return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL;
+ case DRM_MODE_FLAG_3D_L_DEPTH:
+ return HDMI_3D_STRUCTURE_L_DEPTH;
+ case DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH:
+ return HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH;
+ case DRM_MODE_FLAG_3D_TOP_AND_BOTTOM:
+ return HDMI_3D_STRUCTURE_TOP_AND_BOTTOM;
+ case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF:
+ return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF;
+ default:
+ return HDMI_3D_STRUCTURE_INVALID;
+ }
+}
+
/**
* drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
* data from a DRM display mode
@@ -3338,20 +3550,29 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
const struct drm_display_mode *mode)
{
int err;
+ u32 s3d_flags;
u8 vic;
if (!frame || !mode)
return -EINVAL;
vic = drm_match_hdmi_mode(mode);
- if (!vic)
+ s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK;
+
+ if (!vic && !s3d_flags)
+ return -EINVAL;
+
+ if (vic && s3d_flags)
return -EINVAL;
err = hdmi_vendor_infoframe_init(frame);
if (err < 0)
return err;
- frame->vic = vic;
+ if (vic)
+ frame->vic = vic;
+ else
+ frame->s3d_struct = s3d_structure_from_display_mode(mode);
return 0;
}
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 271b42bbfb72..9081172ef057 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -32,7 +32,7 @@ MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
"from built-in data or /lib/firmware instead. ");
#define GENERIC_EDIDS 5
-static char *generic_edid_name[GENERIC_EDIDS] = {
+static const char *generic_edid_name[GENERIC_EDIDS] = {
"edid/1024x768.bin",
"edid/1280x1024.bin",
"edid/1600x1200.bin",
@@ -40,7 +40,7 @@ static char *generic_edid_name[GENERIC_EDIDS] = {
"edid/1920x1080.bin",
};
-static u8 generic_edid[GENERIC_EDIDS][128] = {
+static const u8 generic_edid[GENERIC_EDIDS][128] = {
{
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -133,63 +133,68 @@ static u8 generic_edid[GENERIC_EDIDS][128] = {
},
};
+static int edid_size(const u8 *edid, int data_size)
+{
+ if (data_size < EDID_LENGTH)
+ return 0;
+
+ return (edid[0x7e] + 1) * EDID_LENGTH;
+}
+
static u8 *edid_load(struct drm_connector *connector, const char *name,
const char *connector_name)
{
- const struct firmware *fw;
- struct platform_device *pdev;
- u8 *fwdata = NULL, *edid, *new_edid;
- int fwsize, expected;
- int builtin = 0, err = 0;
+ const struct firmware *fw = NULL;
+ const u8 *fwdata;
+ u8 *edid;
+ int fwsize, builtin;
int i, valid_extensions = 0;
bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
- pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
- if (IS_ERR(pdev)) {
- DRM_ERROR("Failed to register EDID firmware platform device "
- "for connector \"%s\"\n", connector_name);
- err = -EINVAL;
- goto out;
- }
-
- err = request_firmware(&fw, name, &pdev->dev);
- platform_device_unregister(pdev);
-
- if (err) {
- i = 0;
- while (i < GENERIC_EDIDS && strcmp(name, generic_edid_name[i]))
- i++;
- if (i < GENERIC_EDIDS) {
- err = 0;
- builtin = 1;
+ builtin = 0;
+ for (i = 0; i < GENERIC_EDIDS; i++) {
+ if (strcmp(name, generic_edid_name[i]) == 0) {
fwdata = generic_edid[i];
fwsize = sizeof(generic_edid[i]);
+ builtin = 1;
+ break;
}
}
+ if (!builtin) {
+ struct platform_device *pdev;
+ int err;
- if (err) {
- DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
- name, err);
- goto out;
- }
+ pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ DRM_ERROR("Failed to register EDID firmware platform device "
+ "for connector \"%s\"\n", connector_name);
+ return ERR_CAST(pdev);
+ }
+
+ err = request_firmware(&fw, name, &pdev->dev);
+ platform_device_unregister(pdev);
+ if (err) {
+ DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
+ name, err);
+ return ERR_PTR(err);
+ }
- if (fwdata == NULL) {
- fwdata = (u8 *) fw->data;
+ fwdata = fw->data;
fwsize = fw->size;
}
- expected = (fwdata[0x7e] + 1) * EDID_LENGTH;
- if (expected != fwsize) {
+ if (edid_size(fwdata, fwsize) != fwsize) {
DRM_ERROR("Size of EDID firmware \"%s\" is invalid "
- "(expected %d, got %d)\n", name, expected, (int) fwsize);
- err = -EINVAL;
- goto relfw_out;
+ "(expected %d, got %d\n", name,
+ edid_size(fwdata, fwsize), (int)fwsize);
+ edid = ERR_PTR(-EINVAL);
+ goto out;
}
edid = kmemdup(fwdata, fwsize, GFP_KERNEL);
if (edid == NULL) {
- err = -ENOMEM;
- goto relfw_out;
+ edid = ERR_PTR(-ENOMEM);
+ goto out;
}
if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
@@ -197,8 +202,8 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
name);
kfree(edid);
- err = -EINVAL;
- goto relfw_out;
+ edid = ERR_PTR(-EINVAL);
+ goto out;
}
for (i = 1; i <= edid[0x7e]; i++) {
@@ -210,19 +215,18 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
}
if (valid_extensions != edid[0x7e]) {
+ u8 *new_edid;
+
edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
DRM_INFO("Found %d valid extensions instead of %d in EDID data "
"\"%s\" for connector \"%s\"\n", valid_extensions,
edid[0x7e], name, connector_name);
edid[0x7e] = valid_extensions;
+
new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
- GFP_KERNEL);
- if (new_edid == NULL) {
- err = -ENOMEM;
- kfree(edid);
- goto relfw_out;
- }
- edid = new_edid;
+ GFP_KERNEL);
+ if (new_edid)
+ edid = new_edid;
}
DRM_INFO("Got %s EDID base block and %d extension%s from "
@@ -230,13 +234,9 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
"external", valid_extensions, valid_extensions == 1 ? "" : "s",
name, connector_name);
-relfw_out:
- release_firmware(fw);
-
out:
- if (err)
- return ERR_PTR(err);
-
+ if (fw)
+ release_firmware(fw);
return edid;
}
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index 0cfb60f54766..d18b88b755c3 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -67,12 +67,12 @@ int drm_i2c_encoder_init(struct drm_device *dev,
goto fail;
}
- if (!client->driver) {
+ if (!client->dev.driver) {
err = -ENODEV;
goto fail_unregister;
}
- module = client->driver->driver.owner;
+ module = client->dev.driver->owner;
if (!try_module_get(module)) {
err = -ENODEV;
goto fail_unregister;
@@ -80,7 +80,7 @@ int drm_i2c_encoder_init(struct drm_device *dev,
encoder->bus_priv = client;
- encoder_drv = to_drm_i2c_encoder_driver(client->driver);
+ encoder_drv = to_drm_i2c_encoder_driver(to_i2c_driver(client->dev.driver));
err = encoder_drv->encoder_init(client, dev, encoder);
if (err)
@@ -111,7 +111,7 @@ void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
{
struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder);
struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder);
- struct module *module = client->driver->driver.owner;
+ struct module *module = client->dev.driver->owner;
i2c_unregister_device(client);
encoder->bus_priv = NULL;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 3d13ca6e257f..0a19401aff80 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -39,10 +39,6 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
-MODULE_AUTHOR("David Airlie, Jesse Barnes");
-MODULE_DESCRIPTION("DRM KMS helper");
-MODULE_LICENSE("GPL and additional rights");
-
static LIST_HEAD(kernel_fb_helper_list);
/**
@@ -844,7 +840,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_mode_set *modeset;
- struct drm_crtc *crtc;
int ret = 0;
int i;
@@ -855,8 +850,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
}
for (i = 0; i < fb_helper->crtc_count; i++) {
- crtc = fb_helper->crtc_info[i].mode_set.crtc;
-
modeset = &fb_helper->crtc_info[i].mode_set;
modeset->x = var->xoffset;
@@ -1352,7 +1345,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
struct drm_encoder *encoder;
- struct drm_fb_helper_crtc *best_crtc;
int my_score, best_score, score;
struct drm_fb_helper_crtc **crtcs, *crtc;
struct drm_fb_helper_connector *fb_helper_conn;
@@ -1364,7 +1356,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
connector = fb_helper_conn->connector;
best_crtcs[n] = NULL;
- best_crtc = NULL;
best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
if (modes[n] == NULL)
return best_score;
@@ -1413,7 +1404,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
width, height);
if (score > best_score) {
- best_crtc = crtc;
best_score = score;
memcpy(best_crtcs, crtcs,
dev->mode_config.num_connector *
@@ -1580,8 +1570,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
- int count = 0;
- u32 max_width, max_height, bpp_sel;
+ u32 max_width, max_height;
if (!fb_helper->fb)
return 0;
@@ -1596,10 +1585,8 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
max_width = fb_helper->fb->width;
max_height = fb_helper->fb->height;
- bpp_sel = fb_helper->fb->bits_per_pixel;
- count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
- max_height);
+ drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height);
mutex_unlock(&fb_helper->dev->mode_config.mutex);
drm_modeset_lock_all(dev);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 3f84277d7036..d0e27667a4eb 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -113,7 +113,6 @@ int drm_open(struct inode *inode, struct file *filp)
retcode = drm_open_helper(inode, filp, dev);
if (retcode)
goto err_undo;
- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
if (need_setup) {
retcode = drm_setup(dev);
if (retcode)
@@ -386,6 +385,71 @@ static void drm_events_release(struct drm_file *file_priv)
}
/**
+ * drm_legacy_dev_reinit
+ *
+ * Reinitializes a legacy/ums drm device in it's lastclose function.
+ */
+static void drm_legacy_dev_reinit(struct drm_device *dev)
+{
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ atomic_set(&dev->ioctl_count, 0);
+ atomic_set(&dev->vma_count, 0);
+
+ dev->sigdata.lock = NULL;
+
+ dev->context_flag = 0;
+ dev->last_context = 0;
+ dev->if_version = 0;
+}
+
+/**
+ * Take down the DRM device.
+ *
+ * \param dev DRM device structure.
+ *
+ * Frees every resource in \p dev.
+ *
+ * \sa drm_device
+ */
+int drm_lastclose(struct drm_device * dev)
+{
+ struct drm_vma_entry *vma, *vma_temp;
+
+ DRM_DEBUG("\n");
+
+ if (dev->driver->lastclose)
+ dev->driver->lastclose(dev);
+ DRM_DEBUG("driver lastclose completed\n");
+
+ if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_irq_uninstall(dev);
+
+ mutex_lock(&dev->struct_mutex);
+
+ drm_agp_clear(dev);
+
+ drm_legacy_sg_cleanup(dev);
+
+ /* Clear vma list (only built for debugging) */
+ list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
+ list_del(&vma->head);
+ kfree(vma);
+ }
+
+ drm_legacy_dma_takedown(dev);
+
+ dev->dev_mapping = NULL;
+ mutex_unlock(&dev->struct_mutex);
+
+ drm_legacy_dev_reinit(dev);
+
+ DRM_DEBUG("lastclose completed\n");
+ return 0;
+}
+
+/**
* Release file.
*
* \param inode device inode
@@ -454,7 +518,6 @@ int drm_release(struct inode *inode, struct file *filp)
list_del(&pos->head);
kfree(pos);
- --dev->ctx_count;
}
}
}
@@ -516,7 +579,6 @@ int drm_release(struct inode *inode, struct file *filp)
* End inline drm_release
*/
- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count)) {
DRM_ERROR("Device busy: %d\n",
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 49293bdc972a..4761adedad2a 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -160,35 +160,6 @@ void drm_gem_private_object_init(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_gem_private_object_init);
-/**
- * Allocate a GEM object of the specified size with shmfs backing store
- */
-struct drm_gem_object *
-drm_gem_object_alloc(struct drm_device *dev, size_t size)
-{
- struct drm_gem_object *obj;
-
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
- if (!obj)
- goto free;
-
- if (drm_gem_object_init(dev, obj, size) != 0)
- goto free;
-
- if (dev->driver->gem_init_object != NULL &&
- dev->driver->gem_init_object(obj) != 0) {
- goto fput;
- }
- return obj;
-fput:
- /* Object_init mangles the global counters - readjust them. */
- fput(obj->filp);
-free:
- kfree(obj);
- return NULL;
-}
-EXPORT_SYMBOL(drm_gem_object_alloc);
-
static void
drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
{
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
index f7311162a61d..3d2e91c4d78e 100644
--- a/drivers/gpu/drm/drm_global.c
+++ b/drivers/gpu/drm/drm_global.c
@@ -67,7 +67,6 @@ int drm_global_item_ref(struct drm_global_reference *ref)
{
int ret;
struct drm_global_item *item = &glob[ref->global_type];
- void *object;
mutex_lock(&item->mutex);
if (item->refcount == 0) {
@@ -85,7 +84,6 @@ int drm_global_item_ref(struct drm_global_reference *ref)
}
++item->refcount;
ref->object = item->object;
- object = item->object;
mutex_unlock(&item->mutex);
return 0;
out_err:
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 53298320080b..7d5a152eeb02 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -163,13 +163,13 @@ int drm_vblank_info(struct seq_file *m, void *data)
mutex_lock(&dev->struct_mutex);
for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
seq_printf(m, "CRTC %d enable: %d\n",
- crtc, atomic_read(&dev->vblank_refcount[crtc]));
+ crtc, atomic_read(&dev->vblank[crtc].refcount));
seq_printf(m, "CRTC %d counter: %d\n",
crtc, drm_vblank_count(dev, crtc));
seq_printf(m, "CRTC %d last wait: %d\n",
- crtc, dev->last_vblank_wait[crtc]);
+ crtc, dev->vblank[crtc].last_wait);
seq_printf(m, "CRTC %d in modeset: %d\n",
- crtc, dev->vblank_inmodeset[crtc]);
+ crtc, dev->vblank[crtc].inmodeset);
}
mutex_unlock(&dev->struct_mutex);
return 0;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 07247e2855a2..dffc836144cc 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -303,6 +303,27 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
}
/**
+ * Set device/driver capabilities
+ */
+int
+drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_set_client_cap *req = data;
+
+ switch (req->capability) {
+ case DRM_CLIENT_CAP_STEREO_3D:
+ if (req->value > 1)
+ return -EINVAL;
+ file_priv->stereo_allowed = req->value;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* Setversion ioctl.
*
* \param inode device inode.
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f92da0a32f0d..c8226e1dd99d 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -43,9 +43,8 @@
#include <linux/export.h>
/* Access macro for slots in vblank timestamp ringbuffer. */
-#define vblanktimestamp(dev, crtc, count) ( \
- (dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
- ((count) % DRM_VBLANKTIME_RBSIZE)])
+#define vblanktimestamp(dev, crtc, count) \
+ ((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE])
/* Retry timestamp calculation up to 3 times to satisfy
* drm_timestamp_precision before giving up.
@@ -89,8 +88,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
*/
static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
{
- memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0,
- DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
+ memset(dev->vblank[crtc].time, 0, sizeof(dev->vblank[crtc].time));
}
/*
@@ -115,7 +113,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
dev->driver->disable_vblank(dev, crtc);
- dev->vblank_enabled[crtc] = 0;
+ dev->vblank[crtc].enabled = false;
/* No further vblank irq's will be processed after
* this point. Get current hardware vblank count and
@@ -130,9 +128,9 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* delayed gpu counter increment.
*/
do {
- dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+ dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc);
vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
- } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+ } while (dev->vblank[crtc].last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
if (!count)
vblrc = 0;
@@ -140,7 +138,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
/* Compute time difference to stored timestamp of last vblank
* as updated by last invocation of drm_handle_vblank() in vblank irq.
*/
- vblcount = atomic_read(&dev->_vblank_count[crtc]);
+ vblcount = atomic_read(&dev->vblank[crtc].count);
diff_ns = timeval_to_ns(&tvblank) -
timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
@@ -157,7 +155,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* hope for the best.
*/
if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
- atomic_inc(&dev->_vblank_count[crtc]);
+ atomic_inc(&dev->vblank[crtc].count);
smp_mb__after_atomic_inc();
}
@@ -178,8 +176,8 @@ static void vblank_disable_fn(unsigned long arg)
for (i = 0; i < dev->num_crtcs; i++) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
- if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
- dev->vblank_enabled[i]) {
+ if (atomic_read(&dev->vblank[i].refcount) == 0 &&
+ dev->vblank[i].enabled) {
DRM_DEBUG("disabling vblank on crtc %d\n", i);
vblank_disable_and_save(dev, i);
}
@@ -197,14 +195,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
vblank_disable_fn((unsigned long)dev);
- kfree(dev->vbl_queue);
- kfree(dev->_vblank_count);
- kfree(dev->vblank_refcount);
- kfree(dev->vblank_enabled);
- kfree(dev->last_vblank);
- kfree(dev->last_vblank_wait);
- kfree(dev->vblank_inmodeset);
- kfree(dev->_vblank_time);
+ kfree(dev->vblank);
dev->num_crtcs = 0;
}
@@ -221,40 +212,12 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
dev->num_crtcs = num_crtcs;
- dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
- GFP_KERNEL);
- if (!dev->vbl_queue)
+ dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
+ if (!dev->vblank)
goto err;
- dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL);
- if (!dev->_vblank_count)
- goto err;
-
- dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
- GFP_KERNEL);
- if (!dev->vblank_refcount)
- goto err;
-
- dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
- if (!dev->vblank_enabled)
- goto err;
-
- dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
- if (!dev->last_vblank)
- goto err;
-
- dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
- if (!dev->last_vblank_wait)
- goto err;
-
- dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
- if (!dev->vblank_inmodeset)
- goto err;
-
- dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
- sizeof(struct timeval), GFP_KERNEL);
- if (!dev->_vblank_time)
- goto err;
+ for (i = 0; i < num_crtcs; i++)
+ init_waitqueue_head(&dev->vblank[i].queue);
DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
@@ -264,14 +227,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
else
DRM_INFO("No driver support for vblank timestamp query.\n");
- /* Zero per-crtc vblank stuff */
- for (i = 0; i < num_crtcs; i++) {
- init_waitqueue_head(&dev->vbl_queue[i]);
- atomic_set(&dev->_vblank_count[i], 0);
- atomic_set(&dev->vblank_refcount[i], 0);
- }
+ dev->vblank_disable_allowed = false;
- dev->vblank_disable_allowed = 0;
return 0;
err:
@@ -336,7 +293,7 @@ int drm_irq_install(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
return -EBUSY;
}
- dev->irq_enabled = 1;
+ dev->irq_enabled = true;
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
@@ -359,7 +316,7 @@ int drm_irq_install(struct drm_device *dev)
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
- dev->irq_enabled = 0;
+ dev->irq_enabled = false;
mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -373,7 +330,7 @@ int drm_irq_install(struct drm_device *dev)
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
- dev->irq_enabled = 0;
+ dev->irq_enabled = false;
mutex_unlock(&dev->struct_mutex);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -394,14 +351,15 @@ EXPORT_SYMBOL(drm_irq_install);
int drm_irq_uninstall(struct drm_device *dev)
{
unsigned long irqflags;
- int irq_enabled, i;
+ bool irq_enabled;
+ int i;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
mutex_lock(&dev->struct_mutex);
irq_enabled = dev->irq_enabled;
- dev->irq_enabled = 0;
+ dev->irq_enabled = false;
mutex_unlock(&dev->struct_mutex);
/*
@@ -410,9 +368,9 @@ int drm_irq_uninstall(struct drm_device *dev)
if (dev->num_crtcs) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
for (i = 0; i < dev->num_crtcs; i++) {
- DRM_WAKEUP(&dev->vbl_queue[i]);
- dev->vblank_enabled[i] = 0;
- dev->last_vblank[i] =
+ DRM_WAKEUP(&dev->vblank[i].queue);
+ dev->vblank[i].enabled = false;
+ dev->vblank[i].last =
dev->driver->get_vblank_counter(dev, i);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@@ -497,8 +455,8 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
/* Dot clock in Hz: */
dotclock = (u64) crtc->hwmode.clock * 1000;
- /* Fields of interlaced scanout modes are only halve a frame duration.
- * Double the dotclock to get halve the frame-/line-/pixelduration.
+ /* Fields of interlaced scanout modes are only half a frame duration.
+ * Double the dotclock to get half the frame-/line-/pixelduration.
*/
if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
dotclock *= 2;
@@ -795,7 +753,7 @@ EXPORT_SYMBOL(drm_get_last_vbltimestamp);
*/
u32 drm_vblank_count(struct drm_device *dev, int crtc)
{
- return atomic_read(&dev->_vblank_count[crtc]);
+ return atomic_read(&dev->vblank[crtc].count);
}
EXPORT_SYMBOL(drm_vblank_count);
@@ -824,10 +782,10 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
* a seqlock.
*/
do {
- cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
+ cur_vblank = atomic_read(&dev->vblank[crtc].count);
*vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
smp_rmb();
- } while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
+ } while (cur_vblank != atomic_read(&dev->vblank[crtc].count));
return cur_vblank;
}
@@ -914,12 +872,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
} while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
/* Deal with counter wrap */
- diff = cur_vblank - dev->last_vblank[crtc];
- if (cur_vblank < dev->last_vblank[crtc]) {
+ diff = cur_vblank - dev->vblank[crtc].last;
+ if (cur_vblank < dev->vblank[crtc].last) {
diff += dev->max_vblank_count;
DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
- crtc, dev->last_vblank[crtc], cur_vblank, diff);
+ crtc, dev->vblank[crtc].last, cur_vblank, diff);
}
DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
@@ -930,12 +888,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
* reinitialize delayed at next vblank interrupt in that case.
*/
if (rc) {
- tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
+ tslot = atomic_read(&dev->vblank[crtc].count) + diff;
vblanktimestamp(dev, crtc, tslot) = t_vblank;
}
smp_mb__before_atomic_inc();
- atomic_add(diff, &dev->_vblank_count[crtc]);
+ atomic_add(diff, &dev->vblank[crtc].count);
smp_mb__after_atomic_inc();
}
@@ -957,9 +915,9 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
- if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+ if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) {
spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
- if (!dev->vblank_enabled[crtc]) {
+ if (!dev->vblank[crtc].enabled) {
/* Enable vblank irqs under vblank_time_lock protection.
* All vblank count & timestamp updates are held off
* until we are done reinitializing master counter and
@@ -970,16 +928,16 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
crtc, ret);
if (ret)
- atomic_dec(&dev->vblank_refcount[crtc]);
+ atomic_dec(&dev->vblank[crtc].refcount);
else {
- dev->vblank_enabled[crtc] = 1;
+ dev->vblank[crtc].enabled = true;
drm_update_vblank_count(dev, crtc);
}
}
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
} else {
- if (!dev->vblank_enabled[crtc]) {
- atomic_dec(&dev->vblank_refcount[crtc]);
+ if (!dev->vblank[crtc].enabled) {
+ atomic_dec(&dev->vblank[crtc].refcount);
ret = -EINVAL;
}
}
@@ -999,10 +957,10 @@ EXPORT_SYMBOL(drm_vblank_get);
*/
void drm_vblank_put(struct drm_device *dev, int crtc)
{
- BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0);
+ BUG_ON(atomic_read(&dev->vblank[crtc].refcount) == 0);
/* Last user schedules interrupt disable */
- if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) &&
+ if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
(drm_vblank_offdelay > 0))
mod_timer(&dev->vblank_disable_timer,
jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
@@ -1025,7 +983,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
vblank_disable_and_save(dev, crtc);
- DRM_WAKEUP(&dev->vbl_queue[crtc]);
+ DRM_WAKEUP(&dev->vblank[crtc].queue);
/* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, crtc, &now);
@@ -1067,10 +1025,10 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
* to avoid corrupting the count if multiple, mismatch calls occur),
* so that interrupts remain enabled in the interim.
*/
- if (!dev->vblank_inmodeset[crtc]) {
- dev->vblank_inmodeset[crtc] = 0x1;
+ if (!dev->vblank[crtc].inmodeset) {
+ dev->vblank[crtc].inmodeset = 0x1;
if (drm_vblank_get(dev, crtc) == 0)
- dev->vblank_inmodeset[crtc] |= 0x2;
+ dev->vblank[crtc].inmodeset |= 0x2;
}
}
EXPORT_SYMBOL(drm_vblank_pre_modeset);
@@ -1083,15 +1041,15 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
if (!dev->num_crtcs)
return;
- if (dev->vblank_inmodeset[crtc]) {
+ if (dev->vblank[crtc].inmodeset) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
- dev->vblank_disable_allowed = 1;
+ dev->vblank_disable_allowed = true;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
- if (dev->vblank_inmodeset[crtc] & 0x2)
+ if (dev->vblank[crtc].inmodeset & 0x2)
drm_vblank_put(dev, crtc);
- dev->vblank_inmodeset[crtc] = 0;
+ dev->vblank[crtc].inmodeset = 0;
}
}
EXPORT_SYMBOL(drm_vblank_post_modeset);
@@ -1288,8 +1246,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
vblwait->request.sequence, crtc);
- dev->last_vblank_wait[crtc] = vblwait->request.sequence;
- DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
+ dev->vblank[crtc].last_wait = vblwait->request.sequence;
+ DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * DRM_HZ,
(((drm_vblank_count(dev, crtc) -
vblwait->request.sequence) <= (1 << 23)) ||
!dev->irq_enabled));
@@ -1367,7 +1325,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
/* Vblank irq handling disabled. Nothing to do. */
- if (!dev->vblank_enabled[crtc]) {
+ if (!dev->vblank[crtc].enabled) {
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
return false;
}
@@ -1377,7 +1335,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
*/
/* Get current timestamp and count. */
- vblcount = atomic_read(&dev->_vblank_count[crtc]);
+ vblcount = atomic_read(&dev->vblank[crtc].count);
drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
/* Compute time difference to timestamp of last vblank */
@@ -1401,14 +1359,14 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
* the timestamp computed above.
*/
smp_mb__before_atomic_inc();
- atomic_inc(&dev->_vblank_count[crtc]);
+ atomic_inc(&dev->vblank[crtc].count);
smp_mb__after_atomic_inc();
} else {
DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
crtc, (int) diff_ns);
}
- DRM_WAKEUP(&dev->vbl_queue[crtc]);
+ DRM_WAKEUP(&dev->vblank[crtc].queue);
drm_handle_vblank_events(dev, crtc);
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index d752c96d6090..f6452682141b 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -86,7 +86,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
if (drm_lock_take(&master->lock, lock->context)) {
master->lock.file_priv = file_priv;
master->lock.lock_time = jiffies;
- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
break; /* Got lock */
}
@@ -157,8 +156,6 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
}
- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
-
if (drm_lock_free(&master->lock, lock->context)) {
/* FIXME: Should really bail out here. */
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index fc2adb62b757..85071a1c4547 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -707,18 +707,25 @@ EXPORT_SYMBOL(drm_mode_vrefresh);
/**
* drm_mode_set_crtcinfo - set CRTC modesetting parameters
* @p: mode
- * @adjust_flags: unused? (FIXME)
+ * @adjust_flags: a combination of adjustment flags
*
* LOCKING:
* None.
*
* Setup the CRTC modesetting parameters for @p, adjusting if necessary.
+ *
+ * - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of
+ * interlaced modes.
+ * - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
+ * buffers containing two eyes (only adjust the timings when needed, eg. for
+ * "frame packing" or "side by side full").
*/
void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
{
if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
return;
+ p->crtc_clock = p->clock;
p->crtc_hdisplay = p->hdisplay;
p->crtc_hsync_start = p->hsync_start;
p->crtc_hsync_end = p->hsync_end;
@@ -752,6 +759,20 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
p->crtc_vtotal *= p->vscan;
}
+ if (adjust_flags & CRTC_STEREO_DOUBLE) {
+ unsigned int layout = p->flags & DRM_MODE_FLAG_3D_MASK;
+
+ switch (layout) {
+ case DRM_MODE_FLAG_3D_FRAME_PACKING:
+ p->crtc_clock *= 2;
+ p->crtc_vdisplay += p->crtc_vtotal;
+ p->crtc_vsync_start += p->crtc_vtotal;
+ p->crtc_vsync_end += p->crtc_vtotal;
+ p->crtc_vtotal += p->crtc_vtotal;
+ break;
+ }
+ }
+
p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
@@ -830,12 +851,16 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
} else if (mode1->clock != mode2->clock)
return false;
- return drm_mode_equal_no_clocks(mode1, mode2);
+ if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) !=
+ (mode2->flags & DRM_MODE_FLAG_3D_MASK))
+ return false;
+
+ return drm_mode_equal_no_clocks_no_stereo(mode1, mode2);
}
EXPORT_SYMBOL(drm_mode_equal);
/**
- * drm_mode_equal_no_clocks - test modes for equality
+ * drm_mode_equal_no_clocks_no_stereo - test modes for equality
* @mode1: first mode
* @mode2: second mode
*
@@ -843,12 +868,13 @@ EXPORT_SYMBOL(drm_mode_equal);
* None.
*
* Check to see if @mode1 and @mode2 are equivalent, but
- * don't check the pixel clocks.
+ * don't check the pixel clocks nor the stereo layout.
*
* RETURNS:
* True if the modes are equal, false otherwise.
*/
-bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
+bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
+ const struct drm_display_mode *mode2)
{
if (mode1->hdisplay == mode2->hdisplay &&
mode1->hsync_start == mode2->hsync_start &&
@@ -860,12 +886,13 @@ bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct
mode1->vsync_end == mode2->vsync_end &&
mode1->vtotal == mode2->vtotal &&
mode1->vscan == mode2->vscan &&
- mode1->flags == mode2->flags)
+ (mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
+ (mode2->flags & ~DRM_MODE_FLAG_3D_MASK))
return true;
return false;
}
-EXPORT_SYMBOL(drm_mode_equal_no_clocks);
+EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
/**
* drm_mode_validate_size - make sure modes adhere to size constraints
@@ -1014,7 +1041,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
/* if equal delete the probed mode */
mode->status = pmode->status;
/* Merge type bits together */
- mode->type |= pmode->type;
+ mode->type = pmode->type;
list_del(&pmode->head);
drm_mode_destroy(connector->dev, pmode);
break;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 1f96cee6eee8..f00d7a9671ea 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -322,83 +322,36 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
DRM_DEBUG("\n");
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ dev = drm_dev_alloc(driver, &pdev->dev);
if (!dev)
return -ENOMEM;
ret = pci_enable_device(pdev);
if (ret)
- goto err_g1;
+ goto err_free;
dev->pdev = pdev;
- dev->dev = &pdev->dev;
-
- dev->pci_device = pdev->device;
- dev->pci_vendor = pdev->vendor;
-
#ifdef __alpha__
dev->hose = pdev->sysdata;
#endif
- mutex_lock(&drm_global_mutex);
-
- if ((ret = drm_fill_in_dev(dev, ent, driver))) {
- printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
- goto err_g2;
- }
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
pci_set_drvdata(pdev, dev);
- ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
- if (ret)
- goto err_g2;
- }
-
- if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
- ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
- if (ret)
- goto err_g21;
- }
-
- if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
- goto err_g3;
-
- if (dev->driver->load) {
- ret = dev->driver->load(dev, ent->driver_data);
- if (ret)
- goto err_g4;
- }
- /* setup the grouping for the legacy output */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_mode_group_init_legacy_group(dev,
- &dev->primary->mode_group);
- if (ret)
- goto err_g4;
- }
-
- list_add_tail(&dev->driver_item, &driver->device_list);
+ ret = drm_dev_register(dev, ent->driver_data);
+ if (ret)
+ goto err_pci;
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, pci_name(pdev), dev->primary->index);
- mutex_unlock(&drm_global_mutex);
return 0;
-err_g4:
- drm_put_minor(&dev->primary);
-err_g3:
- if (dev->render)
- drm_put_minor(&dev->render);
-err_g21:
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_put_minor(&dev->control);
-err_g2:
+err_pci:
pci_disable_device(pdev);
-err_g1:
- kfree(dev);
- mutex_unlock(&drm_global_mutex);
+err_free:
+ drm_dev_free(dev);
return ret;
}
EXPORT_SYMBOL(drm_get_pci_dev);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index f7a18c6ba4c4..fc24fee8ec83 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -47,55 +47,15 @@ static int drm_get_platform_dev(struct platform_device *platdev,
DRM_DEBUG("\n");
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ dev = drm_dev_alloc(driver, &platdev->dev);
if (!dev)
return -ENOMEM;
dev->platformdev = platdev;
- dev->dev = &platdev->dev;
- mutex_lock(&drm_global_mutex);
-
- ret = drm_fill_in_dev(dev, NULL, driver);
-
- if (ret) {
- printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
- goto err_g1;
- }
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
- if (ret)
- goto err_g1;
- }
-
- if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
- ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
- if (ret)
- goto err_g11;
- }
-
- ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+ ret = drm_dev_register(dev, 0);
if (ret)
- goto err_g2;
-
- if (dev->driver->load) {
- ret = dev->driver->load(dev, 0);
- if (ret)
- goto err_g3;
- }
-
- /* setup the grouping for the legacy output */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_mode_group_init_legacy_group(dev,
- &dev->primary->mode_group);
- if (ret)
- goto err_g3;
- }
-
- list_add_tail(&dev->driver_item, &driver->device_list);
-
- mutex_unlock(&drm_global_mutex);
+ goto err_free;
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -103,17 +63,8 @@ static int drm_get_platform_dev(struct platform_device *platdev,
return 0;
-err_g3:
- drm_put_minor(&dev->primary);
-err_g2:
- if (dev->render)
- drm_put_minor(&dev->render);
-err_g11:
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_put_minor(&dev->control);
-err_g1:
- kfree(dev);
- mutex_unlock(&drm_global_mutex);
+err_free:
+ drm_dev_free(dev);
return ret;
}
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 276d470f7b3e..56805c39c906 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -637,14 +637,13 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
unsigned count;
struct scatterlist *sg;
struct page *page;
- u32 len, offset;
+ u32 len;
int pg_index;
dma_addr_t addr;
pg_index = 0;
for_each_sg(sgt->sgl, sg, sgt->nents, count) {
len = sg->length;
- offset = sg->offset;
page = sg_page(sg);
addr = sg_dma_address(sg);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 39d864576be4..26055abf94ee 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -254,70 +254,6 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
return 0;
}
-int drm_fill_in_dev(struct drm_device *dev,
- const struct pci_device_id *ent,
- struct drm_driver *driver)
-{
- int retcode;
-
- INIT_LIST_HEAD(&dev->filelist);
- INIT_LIST_HEAD(&dev->ctxlist);
- INIT_LIST_HEAD(&dev->vmalist);
- INIT_LIST_HEAD(&dev->maplist);
- INIT_LIST_HEAD(&dev->vblank_event_list);
-
- spin_lock_init(&dev->count_lock);
- spin_lock_init(&dev->event_lock);
- mutex_init(&dev->struct_mutex);
- mutex_init(&dev->ctxlist_mutex);
-
- if (drm_ht_create(&dev->map_hash, 12)) {
- return -ENOMEM;
- }
-
- /* the DRM has 6 basic counters */
- dev->counters = 6;
- dev->types[0] = _DRM_STAT_LOCK;
- dev->types[1] = _DRM_STAT_OPENS;
- dev->types[2] = _DRM_STAT_CLOSES;
- dev->types[3] = _DRM_STAT_IOCTLS;
- dev->types[4] = _DRM_STAT_LOCKS;
- dev->types[5] = _DRM_STAT_UNLOCKS;
-
- dev->driver = driver;
-
- if (dev->driver->bus->agp_init) {
- retcode = dev->driver->bus->agp_init(dev);
- if (retcode)
- goto error_out_unreg;
- }
-
-
-
- retcode = drm_ctxbitmap_init(dev);
- if (retcode) {
- DRM_ERROR("Cannot allocate memory for context bitmap.\n");
- goto error_out_unreg;
- }
-
- if (driver->driver_features & DRIVER_GEM) {
- retcode = drm_gem_init(dev);
- if (retcode) {
- DRM_ERROR("Cannot initialize graphics execution "
- "manager (GEM)\n");
- goto error_out_unreg;
- }
- }
-
- return 0;
-
- error_out_unreg:
- drm_lastclose(dev);
- return retcode;
-}
-EXPORT_SYMBOL(drm_fill_in_dev);
-
-
/**
* Get a secondary minor number.
*
@@ -427,66 +363,237 @@ static void drm_unplug_minor(struct drm_minor *minor)
*/
void drm_put_dev(struct drm_device *dev)
{
- struct drm_driver *driver;
- struct drm_map_list *r_list, *list_temp;
-
DRM_DEBUG("\n");
if (!dev) {
DRM_ERROR("cleanup called no dev\n");
return;
}
- driver = dev->driver;
- drm_lastclose(dev);
+ drm_dev_unregister(dev);
+ drm_dev_free(dev);
+}
+EXPORT_SYMBOL(drm_put_dev);
- if (dev->driver->unload)
- dev->driver->unload(dev);
+void drm_unplug_dev(struct drm_device *dev)
+{
+ /* for a USB device */
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_unplug_minor(dev->control);
+ if (dev->render)
+ drm_unplug_minor(dev->render);
+ drm_unplug_minor(dev->primary);
- if (dev->driver->bus->agp_destroy)
- dev->driver->bus->agp_destroy(dev);
+ mutex_lock(&drm_global_mutex);
- drm_vblank_cleanup(dev);
+ drm_device_set_unplugged(dev);
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
- drm_rmmap(dev, r_list->map);
- drm_ht_remove(&dev->map_hash);
+ if (dev->open_count == 0) {
+ drm_put_dev(dev);
+ }
+ mutex_unlock(&drm_global_mutex);
+}
+EXPORT_SYMBOL(drm_unplug_dev);
- drm_ctxbitmap_cleanup(dev);
+/**
+ * drm_dev_alloc - Allocate new drm device
+ * @driver: DRM driver to allocate device for
+ * @parent: Parent device object
+ *
+ * Allocate and initialize a new DRM device. No device registration is done.
+ * Call drm_dev_register() to advertice the device to user space and register it
+ * with other core subsystems.
+ *
+ * RETURNS:
+ * Pointer to new DRM device, or NULL if out of memory.
+ */
+struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+ struct device *parent)
+{
+ struct drm_device *dev;
+ int ret;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_put_minor(&dev->control);
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
- if (dev->render)
- drm_put_minor(&dev->render);
+ dev->dev = parent;
+ dev->driver = driver;
+
+ INIT_LIST_HEAD(&dev->filelist);
+ INIT_LIST_HEAD(&dev->ctxlist);
+ INIT_LIST_HEAD(&dev->vmalist);
+ INIT_LIST_HEAD(&dev->maplist);
+ INIT_LIST_HEAD(&dev->vblank_event_list);
+
+ spin_lock_init(&dev->count_lock);
+ spin_lock_init(&dev->event_lock);
+ mutex_init(&dev->struct_mutex);
+ mutex_init(&dev->ctxlist_mutex);
+
+ if (drm_ht_create(&dev->map_hash, 12))
+ goto err_free;
- if (driver->driver_features & DRIVER_GEM)
+ ret = drm_ctxbitmap_init(dev);
+ if (ret) {
+ DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+ goto err_ht;
+ }
+
+ if (driver->driver_features & DRIVER_GEM) {
+ ret = drm_gem_init(dev);
+ if (ret) {
+ DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
+ goto err_ctxbitmap;
+ }
+ }
+
+ return dev;
+
+err_ctxbitmap:
+ drm_ctxbitmap_cleanup(dev);
+err_ht:
+ drm_ht_remove(&dev->map_hash);
+err_free:
+ kfree(dev);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_dev_alloc);
+
+/**
+ * drm_dev_free - Free DRM device
+ * @dev: DRM device to free
+ *
+ * Free a DRM device that has previously been allocated via drm_dev_alloc().
+ * You must not use kfree() instead or you will leak memory.
+ *
+ * This must not be called once the device got registered. Use drm_put_dev()
+ * instead, which then calls drm_dev_free().
+ */
+void drm_dev_free(struct drm_device *dev)
+{
+ if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_destroy(dev);
- drm_put_minor(&dev->primary);
+ drm_ctxbitmap_cleanup(dev);
+ drm_ht_remove(&dev->map_hash);
- list_del(&dev->driver_item);
kfree(dev->devname);
kfree(dev);
}
-EXPORT_SYMBOL(drm_put_dev);
+EXPORT_SYMBOL(drm_dev_free);
-void drm_unplug_dev(struct drm_device *dev)
+/**
+ * drm_dev_register - Register DRM device
+ * @dev: Device to register
+ *
+ * Register the DRM device @dev with the system, advertise device to user-space
+ * and start normal device operation. @dev must be allocated via drm_dev_alloc()
+ * previously.
+ *
+ * Never call this twice on any device!
+ *
+ * RETURNS:
+ * 0 on success, negative error code on failure.
+ */
+int drm_dev_register(struct drm_device *dev, unsigned long flags)
{
- /* for a USB device */
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_unplug_minor(dev->control);
- if (dev->render)
- drm_unplug_minor(dev->render);
- drm_unplug_minor(dev->primary);
+ int ret;
mutex_lock(&drm_global_mutex);
- drm_device_set_unplugged(dev);
+ if (dev->driver->bus->agp_init) {
+ ret = dev->driver->bus->agp_init(dev);
+ if (ret)
+ goto out_unlock;
+ }
- if (dev->open_count == 0) {
- drm_put_dev(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+ if (ret)
+ goto err_agp;
}
+
+ if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
+ ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
+ if (ret)
+ goto err_control_node;
+ }
+
+ ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+ if (ret)
+ goto err_render_node;
+
+ if (dev->driver->load) {
+ ret = dev->driver->load(dev, flags);
+ if (ret)
+ goto err_primary_node;
+ }
+
+ /* setup grouping for legacy outputs */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_mode_group_init_legacy_group(dev,
+ &dev->primary->mode_group);
+ if (ret)
+ goto err_unload;
+ }
+
+ list_add_tail(&dev->driver_item, &dev->driver->device_list);
+
+ ret = 0;
+ goto out_unlock;
+
+err_unload:
+ if (dev->driver->unload)
+ dev->driver->unload(dev);
+err_primary_node:
+ drm_put_minor(&dev->primary);
+err_render_node:
+ if (dev->render)
+ drm_put_minor(&dev->render);
+err_control_node:
+ if (dev->control)
+ drm_put_minor(&dev->control);
+err_agp:
+ if (dev->driver->bus->agp_destroy)
+ dev->driver->bus->agp_destroy(dev);
+out_unlock:
mutex_unlock(&drm_global_mutex);
+ return ret;
}
-EXPORT_SYMBOL(drm_unplug_dev);
+EXPORT_SYMBOL(drm_dev_register);
+
+/**
+ * drm_dev_unregister - Unregister DRM device
+ * @dev: Device to unregister
+ *
+ * Unregister the DRM device from the system. This does the reverse of
+ * drm_dev_register() but does not deallocate the device. The caller must call
+ * drm_dev_free() to free all resources.
+ */
+void drm_dev_unregister(struct drm_device *dev)
+{
+ struct drm_map_list *r_list, *list_temp;
+
+ drm_lastclose(dev);
+
+ if (dev->driver->unload)
+ dev->driver->unload(dev);
+
+ if (dev->driver->bus->agp_destroy)
+ dev->driver->bus->agp_destroy(dev);
+
+ drm_vblank_cleanup(dev);
+
+ list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
+ drm_rmmap(dev, r_list->map);
+
+ if (dev->control)
+ drm_put_minor(&dev->control);
+ if (dev->render)
+ drm_put_minor(&dev->render);
+ drm_put_minor(&dev->primary);
+
+ list_del(&dev->driver_item);
+}
+EXPORT_SYMBOL(drm_dev_unregister);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 2290b3b73832..db1c8f958bab 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -22,8 +22,8 @@
#include <drm/drm_core.h>
#include <drm/drmP.h>
-#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
-#define to_drm_connector(d) container_of(d, struct drm_connector, kdev)
+#define to_drm_minor(d) dev_get_drvdata(d)
+#define to_drm_connector(d) dev_get_drvdata(d)
static struct device_type drm_sysfs_device_minor = {
.name = "drm_minor"
@@ -162,20 +162,6 @@ void drm_sysfs_destroy(void)
drm_class = NULL;
}
-/**
- * drm_sysfs_device_release - do nothing
- * @dev: Linux device
- *
- * Normally, this would free the DRM device associated with @dev, along
- * with cleaning up any other stuff. But we do that in the DRM core, so
- * this function can just return and hope that the core does its job.
- */
-static void drm_sysfs_device_release(struct device *dev)
-{
- memset(dev, 0, sizeof(struct device));
- return;
-}
-
/*
* Connector properties
*/
@@ -394,29 +380,26 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
int i;
int ret;
- /* We shouldn't get called more than once for the same connector */
- BUG_ON(device_is_registered(&connector->kdev));
-
- connector->kdev.parent = &dev->primary->kdev;
- connector->kdev.class = drm_class;
- connector->kdev.release = drm_sysfs_device_release;
+ if (connector->kdev)
+ return 0;
+ /* We shouldn't get called more than once for the same connector */
+ connector->kdev = device_create(drm_class, dev->primary->kdev,
+ 0, connector, "card%d-%s",
+ dev->primary->index, drm_get_connector_name(connector));
DRM_DEBUG("adding \"%s\" to sysfs\n",
drm_get_connector_name(connector));
- dev_set_name(&connector->kdev, "card%d-%s",
- dev->primary->index, drm_get_connector_name(connector));
- ret = device_register(&connector->kdev);
-
- if (ret) {
- DRM_ERROR("failed to register connector device: %d\n", ret);
+ if (IS_ERR(connector->kdev)) {
+ DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
+ ret = PTR_ERR(connector->kdev);
goto out;
}
/* Standard attributes */
for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
- ret = device_create_file(&connector->kdev, &connector_attrs[attr_cnt]);
+ ret = device_create_file(connector->kdev, &connector_attrs[attr_cnt]);
if (ret)
goto err_out_files;
}
@@ -433,7 +416,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
case DRM_MODE_CONNECTOR_Component:
case DRM_MODE_CONNECTOR_TV:
for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
- ret = device_create_file(&connector->kdev, &connector_attrs_opt1[opt_cnt]);
+ ret = device_create_file(connector->kdev, &connector_attrs_opt1[opt_cnt]);
if (ret)
goto err_out_files;
}
@@ -442,7 +425,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
break;
}
- ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr);
+ ret = sysfs_create_bin_file(&connector->kdev->kobj, &edid_attr);
if (ret)
goto err_out_files;
@@ -453,10 +436,10 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
err_out_files:
for (i = 0; i < opt_cnt; i++)
- device_remove_file(&connector->kdev, &connector_attrs_opt1[i]);
+ device_remove_file(connector->kdev, &connector_attrs_opt1[i]);
for (i = 0; i < attr_cnt; i++)
- device_remove_file(&connector->kdev, &connector_attrs[i]);
- device_unregister(&connector->kdev);
+ device_remove_file(connector->kdev, &connector_attrs[i]);
+ device_unregister(connector->kdev);
out:
return ret;
@@ -480,16 +463,16 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
{
int i;
- if (!connector->kdev.parent)
+ if (!connector->kdev)
return;
DRM_DEBUG("removing \"%s\" from sysfs\n",
drm_get_connector_name(connector));
for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
- device_remove_file(&connector->kdev, &connector_attrs[i]);
- sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
- device_unregister(&connector->kdev);
- connector->kdev.parent = NULL;
+ device_remove_file(connector->kdev, &connector_attrs[i]);
+ sysfs_remove_bin_file(&connector->kdev->kobj, &edid_attr);
+ device_unregister(connector->kdev);
+ connector->kdev = NULL;
}
EXPORT_SYMBOL(drm_sysfs_connector_remove);
@@ -508,7 +491,7 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
DRM_DEBUG("generating hotplug event\n");
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
+ kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
}
EXPORT_SYMBOL(drm_sysfs_hotplug_event);
@@ -523,15 +506,8 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
*/
int drm_sysfs_device_add(struct drm_minor *minor)
{
- int err;
char *minor_str;
- minor->kdev.parent = minor->dev->dev;
-
- minor->kdev.class = drm_class;
- minor->kdev.release = drm_sysfs_device_release;
- minor->kdev.devt = minor->device;
- minor->kdev.type = &drm_sysfs_device_minor;
if (minor->type == DRM_MINOR_CONTROL)
minor_str = "controlD%d";
else if (minor->type == DRM_MINOR_RENDER)
@@ -539,18 +515,14 @@ int drm_sysfs_device_add(struct drm_minor *minor)
else
minor_str = "card%d";
- dev_set_name(&minor->kdev, minor_str, minor->index);
-
- err = device_register(&minor->kdev);
- if (err) {
- DRM_ERROR("device add failed: %d\n", err);
- goto err_out;
+ minor->kdev = device_create(drm_class, minor->dev->dev,
+ MKDEV(DRM_MAJOR, minor->index),
+ minor, minor_str, minor->index);
+ if (IS_ERR(minor->kdev)) {
+ DRM_ERROR("device create failed %ld\n", PTR_ERR(minor->kdev));
+ return PTR_ERR(minor->kdev);
}
-
return 0;
-
-err_out:
- return err;
}
/**
@@ -562,9 +534,9 @@ err_out:
*/
void drm_sysfs_device_remove(struct drm_minor *minor)
{
- if (minor->kdev.parent)
- device_unregister(&minor->kdev);
- minor->kdev.parent = NULL;
+ if (minor->kdev)
+ device_destroy(drm_class, MKDEV(DRM_MAJOR, minor->index));
+ minor->kdev = NULL;
}
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index 87664723b9ce..b179b70e7853 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -7,57 +7,20 @@ int drm_get_usb_dev(struct usb_interface *interface,
struct drm_driver *driver)
{
struct drm_device *dev;
- struct usb_device *usbdev;
int ret;
DRM_DEBUG("\n");
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ dev = drm_dev_alloc(driver, &interface->dev);
if (!dev)
return -ENOMEM;
- usbdev = interface_to_usbdev(interface);
- dev->usbdev = usbdev;
- dev->dev = &interface->dev;
-
- mutex_lock(&drm_global_mutex);
-
- ret = drm_fill_in_dev(dev, NULL, driver);
- if (ret) {
- printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
- goto err_g1;
- }
-
+ dev->usbdev = interface_to_usbdev(interface);
usb_set_intfdata(interface, dev);
- ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
- if (ret)
- goto err_g1;
-
- if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
- ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
- if (ret)
- goto err_g11;
- }
- ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+ ret = drm_dev_register(dev, 0);
if (ret)
- goto err_g2;
-
- if (dev->driver->load) {
- ret = dev->driver->load(dev, 0);
- if (ret)
- goto err_g3;
- }
-
- /* setup the grouping for the legacy output */
- ret = drm_mode_group_init_legacy_group(dev,
- &dev->primary->mode_group);
- if (ret)
- goto err_g3;
-
- list_add_tail(&dev->driver_item, &driver->device_list);
-
- mutex_unlock(&drm_global_mutex);
+ goto err_free;
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -65,16 +28,8 @@ int drm_get_usb_dev(struct usb_interface *interface,
return 0;
-err_g3:
- drm_put_minor(&dev->primary);
-err_g2:
- if (dev->render)
- drm_put_minor(&dev->render);
-err_g11:
- drm_put_minor(&dev->control);
-err_g1:
- kfree(dev);
- mutex_unlock(&drm_global_mutex);
+err_free:
+ drm_dev_free(dev);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 45b6ef595965..f227f544aa36 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -2,6 +2,7 @@ config DRM_EXYNOS
tristate "DRM Support for Samsung SoC EXYNOS Series"
depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index bb82ef78ca85..b676006a95a0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -264,7 +264,6 @@ static struct drm_driver exynos_drm_driver = {
.get_vblank_counter = drm_vblank_count,
.enable_vblank = exynos_drm_crtc_enable_vblank,
.disable_vblank = exynos_drm_crtc_disable_vblank,
- .gem_init_object = exynos_drm_gem_init_object,
.gem_free_object = exynos_drm_gem_free_object,
.gem_vm_ops = &exynos_drm_gem_vm_ops,
.dumb_create = exynos_drm_gem_dumb_create,
@@ -286,7 +285,11 @@ static struct drm_driver exynos_drm_driver = {
static int exynos_drm_platform_probe(struct platform_device *pdev)
{
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ int ret;
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
return drm_platform_init(&exynos_drm_driver, pdev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 868a14d52995..23da72b5eae9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -716,20 +716,20 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
{
/*
* enable drm irq mode.
- * - with irq_enabled = 1, we can use the vblank feature.
+ * - with irq_enabled = true, we can use the vblank feature.
*
* P.S. note that we wouldn't use drm irq handler but
* just specific driver own one instead because
* drm framework supports only one irq handler.
*/
- drm_dev->irq_enabled = 1;
+ drm_dev->irq_enabled = true;
/*
- * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+ * with vblank_disable_allowed = true, vblank interrupt will be disabled
* by drm timer once a current process gives up ownership of
* vblank event.(after drm_vblank_put function is called)
*/
- drm_dev->vblank_disable_allowed = 1;
+ drm_dev->vblank_disable_allowed = true;
/* attach this sub driver to iommu mapping if supported. */
if (is_drm_iommu_supported(drm_dev))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 49f9cd232757..1ade191d84f4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -630,11 +630,6 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
}
-int exynos_drm_gem_init_object(struct drm_gem_object *obj)
-{
- return 0;
-}
-
void exynos_drm_gem_free_object(struct drm_gem_object *obj)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 09555afdfe9c..702ec3abe85c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -135,9 +135,6 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *file_priv);
-/* initialize gem object. */
-int exynos_drm_gem_init_object(struct drm_gem_object *obj);
-
/* free gem object. */
void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 4400330e4449..ddaaedde173d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -101,7 +101,6 @@ static struct edid *vidi_get_edid(struct device *dev,
{
struct vidi_context *ctx = get_vidi_context(dev);
struct edid *edid;
- int edid_len;
/*
* the edid data comes from user side and it would be set
@@ -112,8 +111,7 @@ static struct edid *vidi_get_edid(struct device *dev,
return ERR_PTR(-EFAULT);
}
- edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
- edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
+ edid = drm_edid_duplicate(ctx->raw_edid);
if (!edid) {
DRM_DEBUG_KMS("failed to allocate edid\n");
return ERR_PTR(-ENOMEM);
@@ -385,20 +383,20 @@ static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
{
/*
* enable drm irq mode.
- * - with irq_enabled = 1, we can use the vblank feature.
+ * - with irq_enabled = true, we can use the vblank feature.
*
* P.S. note that we wouldn't use drm irq handler but
* just specific driver own one instead because
* drm framework supports only one irq handler.
*/
- drm_dev->irq_enabled = 1;
+ drm_dev->irq_enabled = true;
/*
- * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+ * with vblank_disable_allowed = true, vblank interrupt will be disabled
* by drm timer once a current process gives up ownership of
* vblank event.(after drm_vblank_put function is called)
*/
- drm_dev->vblank_disable_allowed = 1;
+ drm_dev->vblank_disable_allowed = true;
return 0;
}
@@ -485,7 +483,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
struct exynos_drm_manager *manager;
struct exynos_drm_display_ops *display_ops;
struct drm_exynos_vidi_connection *vidi = data;
- int edid_len;
if (!vidi) {
DRM_DEBUG_KMS("user data for vidi is null.\n");
@@ -524,8 +521,7 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
DRM_DEBUG_KMS("edid data is invalid.\n");
return -EINVAL;
}
- edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
- ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL);
+ ctx->raw_edid = drm_edid_duplicate(raw_edid);
if (!ctx->raw_edid) {
DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 1f6e2dfaaeae..508cf99a292d 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -5,6 +5,7 @@ config DRM_GMA500
select FB_CFB_FILLRECT
select FB_CFB_IMAGEBLIT
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select DRM_TTM
# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
select ACPI_VIDEO if ACPI
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index f4eb43573cad..f88a1815d87c 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -666,7 +666,7 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector,
strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
intel_dp->adapter.algo_data = &intel_dp->algo;
- intel_dp->adapter.dev.parent = &connector->base.kdev;
+ intel_dp->adapter.dev.parent = connector->base.kdev;
if (is_edp(encoder))
cdv_intel_edp_panel_vdd_on(encoder);
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 10ae8c52d06f..e2db48a81ed0 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -29,11 +29,6 @@
#include <drm/drm_vma_manager.h>
#include "psb_drv.h"
-int psb_gem_init_object(struct drm_gem_object *obj)
-{
- return -EINVAL;
-}
-
void psb_gem_free_object(struct drm_gem_object *obj)
{
struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
index 45d5af0546bf..5b646c1f0c3e 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.h
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
@@ -39,7 +39,7 @@
#include "psb_intel_reg.h"
#include "mdfld_output.h"
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 08747fd7105c..7a9ce000fd86 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -26,7 +26,7 @@
#include "psb_drv.h"
#include "psb_reg.h"
#include "psb_intel_reg.h"
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#include <asm/intel_scu_ipc.h>
#include "mid_bios.h"
#include "intel_bios.h"
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index e77d7214fca4..3ece553311fe 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -22,7 +22,7 @@
#include <linux/i2c.h>
#include <drm/drmP.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#include "intel_bios.h"
#include "psb_drv.h"
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index fcb4e9ff1f20..dd607f820a26 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -359,7 +359,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
drm_irq_install(dev);
- dev->vblank_disable_allowed = 1;
+ dev->vblank_disable_allowed = true;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
@@ -646,7 +646,6 @@ static struct drm_driver driver = {
.preclose = psb_driver_preclose,
.postclose = psb_driver_close,
- .gem_init_object = psb_gem_init_object,
.gem_free_object = psb_gem_free_object,
.gem_vm_ops = &psb_gem_vm_ops,
.dumb_create = psb_gem_dumb_create,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 4535ac7708f8..0bab46bd73d2 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -44,10 +44,10 @@ enum {
CHIP_MFLD_0130 = 3, /* Medfield */
};
-#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
-#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
-#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
-#define IS_CDV(dev) (((dev)->pci_device & 0xfff0) == 0x0be0)
+#define IS_PSB(dev) (((dev)->pdev->device & 0xfffe) == 0x8108)
+#define IS_MRST(dev) (((dev)->pdev->device & 0xfffc) == 0x4100)
+#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
+#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
/*
* Driver definitions
@@ -837,7 +837,6 @@ extern const struct drm_connector_helper_funcs
extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
/* gem.c */
-extern int psb_gem_init_object(struct drm_gem_object *obj);
extern void psb_gem_free_object(struct drm_gem_object *obj);
extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
struct drm_file *file);
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 029eccf30137..ba4830342d34 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -271,15 +271,15 @@ void psb_irq_preinstall(struct drm_device *dev)
if (gma_power_is_on(dev))
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
- if (dev->vblank_enabled[0])
+ if (dev->vblank[0].enabled)
dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
- if (dev->vblank_enabled[1])
+ if (dev->vblank[1].enabled)
dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
/* FIXME: Handle Medfield irq mask
- if (dev->vblank_enabled[1])
+ if (dev->vblank[1].enabled)
dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
- if (dev->vblank_enabled[2])
+ if (dev->vblank[2].enabled)
dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
*/
@@ -305,17 +305,17 @@ int psb_irq_postinstall(struct drm_device *dev)
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
- if (dev->vblank_enabled[0])
+ if (dev->vblank[0].enabled)
psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
else
psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
- if (dev->vblank_enabled[1])
+ if (dev->vblank[1].enabled)
psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
else
psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
- if (dev->vblank_enabled[2])
+ if (dev->vblank[2].enabled)
psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
else
psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
@@ -339,13 +339,13 @@ void psb_irq_uninstall(struct drm_device *dev)
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
- if (dev->vblank_enabled[0])
+ if (dev->vblank[0].enabled)
psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
- if (dev->vblank_enabled[1])
+ if (dev->vblank[1].enabled)
psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
- if (dev->vblank_enabled[2])
+ if (dev->vblank[2].enabled)
psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
@@ -456,7 +456,7 @@ static int psb_vblank_do_wait(struct drm_device *dev,
{
unsigned int cur_vblank;
int ret = 0;
- DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+ DRM_WAIT_ON(ret, dev->vblank.queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(counter))
- *sequence) <= (1 << 23)));
*sequence = cur_vblank;
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 60e84043aa34..400b0c4a10fb 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -17,6 +17,7 @@
+#include <linux/hdmi.h>
#include <linux/module.h>
#include <drm/drmP.h>
@@ -549,6 +550,8 @@ tda998x_write_avi(struct drm_encoder *encoder, struct drm_display_mode *mode)
buf[HB(0)] = 0x82;
buf[HB(1)] = 0x02;
buf[HB(2)] = 13;
+ buf[PB(1)] = HDMI_SCAN_MODE_UNDERSCAN;
+ buf[PB(3)] = HDMI_QUANTIZATION_RANGE_FULL << 2;
buf[PB(4)] = drm_match_cea_mode(mode);
tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index ab1892eb1074..249fdff305c6 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -944,8 +944,6 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
dma->buflist[vertex->idx],
vertex->discard, vertex->used);
- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
sarea_priv->last_enqueue = dev_priv->counter - 1;
sarea_priv->last_dispatch = (int)hw_status[5];
@@ -1105,8 +1103,6 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
mc->last_render);
- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
sarea_priv->last_enqueue = dev_priv->counter - 1;
sarea_priv->last_dispatch = (int)hw_status[5];
@@ -1197,13 +1193,6 @@ static int i810_flip_bufs(struct drm_device *dev, void *data,
int i810_driver_load(struct drm_device *dev, unsigned long flags)
{
- /* i810 has 4 more counters */
- dev->counters += 4;
- dev->types[6] = _DRM_STAT_IRQ;
- dev->types[7] = _DRM_STAT_PRIMARY;
- dev->types[8] = _DRM_STAT_SECONDARY;
- dev->types[9] = _DRM_STAT_DMA;
-
pci_set_master(dev->pdev);
return 0;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
new file mode 100644
index 000000000000..6199d0b5b958
--- /dev/null
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -0,0 +1,67 @@
+config DRM_I915
+ tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
+ depends on DRM
+ depends on AGP
+ depends on AGP_INTEL
+ # we need shmfs for the swappable backing store, and in particular
+ # the shmem_readpage() which depends upon tmpfs
+ select SHMEM
+ select TMPFS
+ select DRM_KMS_HELPER
+ # i915 depends on ACPI_VIDEO when ACPI is enabled
+ # but for select to work, need to select ACPI_VIDEO's dependencies, ick
+ select BACKLIGHT_LCD_SUPPORT if ACPI
+ select BACKLIGHT_CLASS_DEVICE if ACPI
+ select VIDEO_OUTPUT_CONTROL if ACPI
+ select INPUT if ACPI
+ select ACPI_VIDEO if ACPI
+ select ACPI_BUTTON if ACPI
+ help
+ Choose this option if you have a system that has "Intel Graphics
+ Media Accelerator" or "HD Graphics" integrated graphics,
+ including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
+ G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
+ Core i5, Core i7 as well as Atom CPUs with integrated graphics.
+ If M is selected, the module will be called i915. AGP support
+ is required for this driver to work. This driver is used by
+ the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
+ replaces the older i830 module that supported a subset of the
+ hardware in older X.org releases.
+
+ Note that the older i810/i815 chipsets require the use of the
+ i810 driver instead, and the Atom z5xx series has an entirely
+ different implementation.
+
+config DRM_I915_KMS
+ bool "Enable modesetting on intel by default"
+ depends on DRM_I915
+ help
+ Choose this option if you want kernel modesetting enabled by default,
+ and you have a new enough userspace to support this. Running old
+ userspaces with this enabled will cause pain. Note that this causes
+ the driver to bind to PCI devices, which precludes loading things
+ like intelfb.
+
+config DRM_I915_FBDEV
+ bool "Enable legacy fbdev support for the modesettting intel driver"
+ depends on DRM_I915
+ select DRM_KMS_FB_HELPER
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ default y
+ help
+ Choose this option if you have a need for the legacy fbdev
+ support. Note that this support also provide the linux console
+ support on top of the intel modesetting driver.
+
+config DRM_I915_PRELIMINARY_HW_SUPPORT
+ bool "Enable preliminary support for prerelease Intel hardware by default"
+ depends on DRM_I915
+ help
+ Choose this option if you have prerelease Intel hardware and want the
+ i915 driver to support it by default. You can enable such support at
+ runtime with the module option i915.preliminary_hw_support=1; this
+ option changes the default for that module option.
+
+ If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b8449a84a0dc..41838eaa799c 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -21,6 +21,9 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
intel_display.o \
intel_crt.o \
intel_lvds.o \
+ intel_dsi.o \
+ intel_dsi_cmd.o \
+ intel_dsi_pll.o \
intel_bios.o \
intel_ddi.o \
intel_dp.o \
@@ -30,7 +33,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
intel_panel.o \
intel_pm.o \
intel_i2c.o \
- intel_fb.o \
intel_tv.o \
intel_dvo.o \
intel_ringbuffer.o \
@@ -51,6 +53,8 @@ i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_ACPI) += intel_acpi.o
+i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
+
obj-$(CONFIG_DRM_I915) += i915.o
CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 33a62ad80100..312163379db9 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -77,17 +77,6 @@ struct intel_dvo_dev_ops {
struct drm_display_mode *mode);
/*
- * Callback to adjust the mode to be set in the CRTC.
- *
- * This allows an output to adjust the clock or even the entire set of
- * timings, which is used for panels with fixed timings or for
- * buses with clock limitations.
- */
- bool (*mode_fixup)(struct intel_dvo_device *dvo,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-
- /*
* Callback for preparing mode changes on an output
*/
void (*prepare)(struct intel_dvo_device *dvo);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a6f4cb5af185..7008aacfc3c9 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -27,6 +27,8 @@
*/
#include <linux/seq_file.h>
+#include <linux/circ_buf.h>
+#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/export.h>
@@ -38,9 +40,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#define DRM_I915_RING_DEBUG 1
-
-
#if defined(CONFIG_DEBUG_FS)
enum {
@@ -54,6 +53,32 @@ static const char *yesno(int v)
return v ? "yes" : "no";
}
+/* As the drm_debugfs_init() routines are called before dev->dev_private is
+ * allocated we need to hook into the minor for release. */
+static int
+drm_add_fake_info_node(struct drm_minor *minor,
+ struct dentry *ent,
+ const void *key)
+{
+ struct drm_info_node *node;
+
+ node = kmalloc(sizeof(*node), GFP_KERNEL);
+ if (node == NULL) {
+ debugfs_remove(ent);
+ return -ENOMEM;
+ }
+
+ node->minor = minor;
+ node->dent = ent;
+ node->info_ent = (void *) key;
+
+ mutex_lock(&minor->debugfs_lock);
+ list_add(&node->list, &minor->debugfs_list);
+ mutex_unlock(&minor->debugfs_lock);
+
+ return 0;
+}
+
static int i915_capabilities(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -145,6 +170,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (%s)", obj->ring->name);
}
+static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
+{
+ seq_putc(m, ctx->is_initialized ? 'I' : 'i');
+ seq_putc(m, ctx->remap_slice ? 'R' : 'r');
+ seq_putc(m, ' ');
+}
+
static int i915_gem_object_list_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -843,6 +875,8 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
if (IS_GEN5(dev)) {
u16 rgvswctl = I915_READ16(MEMSWCTL);
u16 rgvstat = I915_READ16(MEMSTAT_ILK);
@@ -1321,6 +1355,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
return 0;
}
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
@@ -1395,12 +1431,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_fbdev *ifbdev;
+ struct intel_fbdev *ifbdev = NULL;
struct intel_framebuffer *fb;
- int ret;
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+#ifdef CONFIG_DRM_I915_FBDEV
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
if (ret)
return ret;
@@ -1416,10 +1452,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
describe_obj(m, fb->obj);
seq_putc(m, '\n');
mutex_unlock(&dev->mode_config.mutex);
+#endif
mutex_lock(&dev->mode_config.fb_lock);
list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
- if (&fb->base == ifbdev->helper.fb)
+ if (ifbdev && &fb->base == ifbdev->helper.fb)
continue;
seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
@@ -1442,6 +1479,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
+ struct i915_hw_context *ctx;
int ret, i;
ret = mutex_lock_interruptible(&dev->mode_config.mutex);
@@ -1460,12 +1498,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_putc(m, '\n');
}
- for_each_ring(ring, dev_priv, i) {
- if (ring->default_context) {
- seq_printf(m, "HW default context %s ring ", ring->name);
- describe_obj(m, ring->default_context->obj);
- seq_putc(m, '\n');
- }
+ list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ seq_puts(m, "HW context ");
+ describe_ctx(m, ctx);
+ for_each_ring(ring, dev_priv, i)
+ if (ring->default_context == ctx)
+ seq_printf(m, "(default context %s) ", ring->name);
+
+ describe_obj(m, ctx->obj);
+ seq_putc(m, '\n');
}
mutex_unlock(&dev->mode_config.mutex);
@@ -1610,27 +1651,27 @@ static int i915_dpio_info(struct seq_file *m, void *data)
seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
- vlv_dpio_read(dev_priv, _DPIO_DIV_A));
+ vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A));
seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
- vlv_dpio_read(dev_priv, _DPIO_DIV_B));
+ vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B));
seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
- vlv_dpio_read(dev_priv, _DPIO_REFSFR_A));
+ vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A));
seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
- vlv_dpio_read(dev_priv, _DPIO_REFSFR_B));
+ vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B));
seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
- vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
+ vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A));
seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
- vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
+ vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B));
seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
- vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_A));
+ vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A));
seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
- vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_B));
+ vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B));
seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
- vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
+ vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE));
mutex_unlock(&dev_priv->dpio_lock);
@@ -1655,126 +1696,20 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 psrstat, psrperf;
-
- if (!IS_HASWELL(dev)) {
- seq_puts(m, "PSR not supported on this platform\n");
- } else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) {
- seq_puts(m, "PSR enabled\n");
- } else {
- seq_puts(m, "PSR disabled: ");
- switch (dev_priv->no_psr_reason) {
- case PSR_NO_SOURCE:
- seq_puts(m, "not supported on this platform");
- break;
- case PSR_NO_SINK:
- seq_puts(m, "not supported by panel");
- break;
- case PSR_MODULE_PARAM:
- seq_puts(m, "disabled by flag");
- break;
- case PSR_CRTC_NOT_ACTIVE:
- seq_puts(m, "crtc not active");
- break;
- case PSR_PWR_WELL_ENABLED:
- seq_puts(m, "power well enabled");
- break;
- case PSR_NOT_TILED:
- seq_puts(m, "not tiled");
- break;
- case PSR_SPRITE_ENABLED:
- seq_puts(m, "sprite enabled");
- break;
- case PSR_S3D_ENABLED:
- seq_puts(m, "stereo 3d enabled");
- break;
- case PSR_INTERLACED_ENABLED:
- seq_puts(m, "interlaced enabled");
- break;
- case PSR_HSW_NOT_DDIA:
- seq_puts(m, "HSW ties PSR to DDI A (eDP)");
- break;
- default:
- seq_puts(m, "unknown reason");
- }
- seq_puts(m, "\n");
- return 0;
- }
-
- psrstat = I915_READ(EDP_PSR_STATUS_CTL);
-
- seq_puts(m, "PSR Current State: ");
- switch (psrstat & EDP_PSR_STATUS_STATE_MASK) {
- case EDP_PSR_STATUS_STATE_IDLE:
- seq_puts(m, "Reset state\n");
- break;
- case EDP_PSR_STATUS_STATE_SRDONACK:
- seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
- break;
- case EDP_PSR_STATUS_STATE_SRDENT:
- seq_puts(m, "SRD entry\n");
- break;
- case EDP_PSR_STATUS_STATE_BUFOFF:
- seq_puts(m, "Wait for buffer turn off\n");
- break;
- case EDP_PSR_STATUS_STATE_BUFON:
- seq_puts(m, "Wait for buffer turn on\n");
- break;
- case EDP_PSR_STATUS_STATE_AUXACK:
- seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n");
- break;
- case EDP_PSR_STATUS_STATE_SRDOFFACK:
- seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
- break;
- default:
- seq_puts(m, "Unknown\n");
- break;
- }
+ u32 psrperf = 0;
+ bool enabled = false;
- seq_puts(m, "Link Status: ");
- switch (psrstat & EDP_PSR_STATUS_LINK_MASK) {
- case EDP_PSR_STATUS_LINK_FULL_OFF:
- seq_puts(m, "Link is fully off\n");
- break;
- case EDP_PSR_STATUS_LINK_FULL_ON:
- seq_puts(m, "Link is fully on\n");
- break;
- case EDP_PSR_STATUS_LINK_STANDBY:
- seq_puts(m, "Link is in standby\n");
- break;
- default:
- seq_puts(m, "Unknown\n");
- break;
- }
+ seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
+ seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
- seq_printf(m, "PSR Entry Count: %u\n",
- psrstat >> EDP_PSR_STATUS_COUNT_SHIFT &
- EDP_PSR_STATUS_COUNT_MASK);
+ enabled = HAS_PSR(dev) &&
+ I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+ seq_printf(m, "Enabled: %s\n", yesno(enabled));
- seq_printf(m, "Max Sleep Timer Counter: %u\n",
- psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT &
- EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK);
-
- seq_printf(m, "Had AUX error: %s\n",
- yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR));
-
- seq_printf(m, "Sending AUX: %s\n",
- yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING));
-
- seq_printf(m, "Sending Idle: %s\n",
- yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE));
-
- seq_printf(m, "Sending TP2 TP3: %s\n",
- yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3));
-
- seq_printf(m, "Sending TP1: %s\n",
- yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1));
-
- seq_printf(m, "Idle Count: %u\n",
- psrstat & EDP_PSR_STATUS_IDLE_MASK);
-
- psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK;
- seq_printf(m, "Performance Counter: %u\n", psrperf);
+ if (HAS_PSR(dev))
+ psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
+ EDP_PSR_PERF_CNT_MASK;
+ seq_printf(m, "Performance_Counter: %u\n", psrperf);
return 0;
}
@@ -1825,6 +1760,751 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
return 0;
}
+struct pipe_crc_info {
+ const char *name;
+ struct drm_device *dev;
+ enum pipe pipe;
+};
+
+static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
+{
+ struct pipe_crc_info *info = inode->i_private;
+ struct drm_i915_private *dev_priv = info->dev->dev_private;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+
+ spin_lock_irq(&pipe_crc->lock);
+
+ if (pipe_crc->opened) {
+ spin_unlock_irq(&pipe_crc->lock);
+ return -EBUSY; /* already open */
+ }
+
+ pipe_crc->opened = true;
+ filep->private_data = inode->i_private;
+
+ spin_unlock_irq(&pipe_crc->lock);
+
+ return 0;
+}
+
+static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
+{
+ struct pipe_crc_info *info = inode->i_private;
+ struct drm_i915_private *dev_priv = info->dev->dev_private;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+
+ spin_lock_irq(&pipe_crc->lock);
+ pipe_crc->opened = false;
+ spin_unlock_irq(&pipe_crc->lock);
+
+ return 0;
+}
+
+/* (6 fields, 8 chars each, space separated (5) + '\n') */
+#define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
+/* account for \'0' */
+#define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
+
+static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
+{
+ assert_spin_locked(&pipe_crc->lock);
+ return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
+ INTEL_PIPE_CRC_ENTRIES_NR);
+}
+
+static ssize_t
+i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
+ loff_t *pos)
+{
+ struct pipe_crc_info *info = filep->private_data;
+ struct drm_device *dev = info->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+ char buf[PIPE_CRC_BUFFER_LEN];
+ int head, tail, n_entries, n;
+ ssize_t bytes_read;
+
+ /*
+ * Don't allow user space to provide buffers not big enough to hold
+ * a line of data.
+ */
+ if (count < PIPE_CRC_LINE_LEN)
+ return -EINVAL;
+
+ if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
+ return 0;
+
+ /* nothing to read */
+ spin_lock_irq(&pipe_crc->lock);
+ while (pipe_crc_data_count(pipe_crc) == 0) {
+ int ret;
+
+ if (filep->f_flags & O_NONBLOCK) {
+ spin_unlock_irq(&pipe_crc->lock);
+ return -EAGAIN;
+ }
+
+ ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
+ pipe_crc_data_count(pipe_crc), pipe_crc->lock);
+ if (ret) {
+ spin_unlock_irq(&pipe_crc->lock);
+ return ret;
+ }
+ }
+
+ /* We now have one or more entries to read */
+ head = pipe_crc->head;
+ tail = pipe_crc->tail;
+ n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
+ count / PIPE_CRC_LINE_LEN);
+ spin_unlock_irq(&pipe_crc->lock);
+
+ bytes_read = 0;
+ n = 0;
+ do {
+ struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
+ int ret;
+
+ bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
+ "%8u %8x %8x %8x %8x %8x\n",
+ entry->frame, entry->crc[0],
+ entry->crc[1], entry->crc[2],
+ entry->crc[3], entry->crc[4]);
+
+ ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
+ buf, PIPE_CRC_LINE_LEN);
+ if (ret == PIPE_CRC_LINE_LEN)
+ return -EFAULT;
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
+ tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+ n++;
+ } while (--n_entries);
+
+ spin_lock_irq(&pipe_crc->lock);
+ pipe_crc->tail = tail;
+ spin_unlock_irq(&pipe_crc->lock);
+
+ return bytes_read;
+}
+
+static const struct file_operations i915_pipe_crc_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_pipe_crc_open,
+ .read = i915_pipe_crc_read,
+ .release = i915_pipe_crc_release,
+};
+
+static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
+ {
+ .name = "i915_pipe_A_crc",
+ .pipe = PIPE_A,
+ },
+ {
+ .name = "i915_pipe_B_crc",
+ .pipe = PIPE_B,
+ },
+ {
+ .name = "i915_pipe_C_crc",
+ .pipe = PIPE_C,
+ },
+};
+
+static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
+ enum pipe pipe)
+{
+ struct drm_device *dev = minor->dev;
+ struct dentry *ent;
+ struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
+
+ info->dev = dev;
+ ent = debugfs_create_file(info->name, S_IRUGO, root, info,
+ &i915_pipe_crc_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ return drm_add_fake_info_node(minor, ent, info);
+}
+
+static const char * const pipe_crc_sources[] = {
+ "none",
+ "plane1",
+ "plane2",
+ "pf",
+ "pipe",
+ "TV",
+ "DP-B",
+ "DP-C",
+ "DP-D",
+ "auto",
+};
+
+static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
+ return pipe_crc_sources[source];
+}
+
+static int display_crc_ctl_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < I915_MAX_PIPES; i++)
+ seq_printf(m, "%c %s\n", pipe_name(i),
+ pipe_crc_source_name(dev_priv->pipe_crc[i].source));
+
+ return 0;
+}
+
+static int display_crc_ctl_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+
+ return single_open(file, display_crc_ctl_show, dev);
+}
+
+static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
+ enum intel_pipe_crc_source *source)
+{
+ struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
+ struct intel_digital_port *dig_port;
+ int ret = 0;
+
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (!encoder->base.crtc)
+ continue;
+
+ crtc = to_intel_crtc(encoder->base.crtc);
+
+ if (crtc->pipe != pipe)
+ continue;
+
+ switch (encoder->type) {
+ case INTEL_OUTPUT_TVOUT:
+ *source = INTEL_PIPE_CRC_SOURCE_TV;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_EDP:
+ dig_port = enc_to_dig_port(&encoder->base);
+ switch (dig_port->port) {
+ case PORT_B:
+ *source = INTEL_PIPE_CRC_SOURCE_DP_B;
+ break;
+ case PORT_C:
+ *source = INTEL_PIPE_CRC_SOURCE_DP_C;
+ break;
+ case PORT_D:
+ *source = INTEL_PIPE_CRC_SOURCE_DP_D;
+ break;
+ default:
+ WARN(1, "nonexisting DP port %c\n",
+ port_name(dig_port->port));
+ break;
+ }
+ break;
+ }
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool need_stable_symbols = false;
+
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+ int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+ if (ret)
+ return ret;
+ }
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_B:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_C:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * When the pipe CRC tap point is after the transcoders we need
+ * to tweak symbol-level features to produce a deterministic series of
+ * symbols for a given frame. We need to reset those features only once
+ * a frame (instead of every nth symbol):
+ * - DC-balance: used to ensure a better clock recovery from the data
+ * link (SDVO)
+ * - DisplayPort scrambling: used for EMI reduction
+ */
+ if (need_stable_symbols) {
+ uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+ WARN_ON(!IS_G4X(dev));
+
+ tmp |= DC_BALANCE_RESET_VLV;
+ if (pipe == PIPE_A)
+ tmp |= PIPE_A_SCRAMBLE_RESET;
+ else
+ tmp |= PIPE_B_SCRAMBLE_RESET;
+
+ I915_WRITE(PORT_DFT2_G4X, tmp);
+ }
+
+ return 0;
+}
+
+static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool need_stable_symbols = false;
+
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+ int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+ if (ret)
+ return ret;
+ }
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_TV:
+ if (!SUPPORTS_TV(dev))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_B:
+ if (!IS_G4X(dev))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_C:
+ if (!IS_G4X(dev))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_D:
+ if (!IS_G4X(dev))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * When the pipe CRC tap point is after the transcoders we need
+ * to tweak symbol-level features to produce a deterministic series of
+ * symbols for a given frame. We need to reset those features only once
+ * a frame (instead of every nth symbol):
+ * - DC-balance: used to ensure a better clock recovery from the data
+ * link (SDVO)
+ * - DisplayPort scrambling: used for EMI reduction
+ */
+ if (need_stable_symbols) {
+ uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+ WARN_ON(!IS_G4X(dev));
+
+ I915_WRITE(PORT_DFT_I9XX,
+ I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
+
+ if (pipe == PIPE_A)
+ tmp |= PIPE_A_SCRAMBLE_RESET;
+ else
+ tmp |= PIPE_B_SCRAMBLE_RESET;
+
+ I915_WRITE(PORT_DFT2_G4X, tmp);
+ }
+
+ return 0;
+}
+
+static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
+ enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+ if (pipe == PIPE_A)
+ tmp &= ~PIPE_A_SCRAMBLE_RESET;
+ else
+ tmp &= ~PIPE_B_SCRAMBLE_RESET;
+ if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
+ tmp &= ~DC_BALANCE_RESET_VLV;
+ I915_WRITE(PORT_DFT2_G4X, tmp);
+
+}
+
+static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
+ enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+ if (pipe == PIPE_A)
+ tmp &= ~PIPE_A_SCRAMBLE_RESET;
+ else
+ tmp &= ~PIPE_B_SCRAMBLE_RESET;
+ I915_WRITE(PORT_DFT2_G4X, tmp);
+
+ if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
+ I915_WRITE(PORT_DFT_I9XX,
+ I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
+ }
+}
+
+static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PF;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PF:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
+ enum intel_pipe_crc_source source)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+ u32 val;
+ int ret;
+
+ if (pipe_crc->source == source)
+ return 0;
+
+ /* forbid changing the source without going back to 'none' */
+ if (pipe_crc->source && source)
+ return -EINVAL;
+
+ if (IS_GEN2(dev))
+ ret = i8xx_pipe_crc_ctl_reg(&source, &val);
+ else if (INTEL_INFO(dev)->gen < 5)
+ ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
+ else if (IS_VALLEYVIEW(dev))
+ ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val);
+ else if (IS_GEN5(dev) || IS_GEN6(dev))
+ ret = ilk_pipe_crc_ctl_reg(&source, &val);
+ else
+ ret = ivb_pipe_crc_ctl_reg(&source, &val);
+
+ if (ret != 0)
+ return ret;
+
+ /* none -> real source transition */
+ if (source) {
+ DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
+ pipe_name(pipe), pipe_crc_source_name(source));
+
+ pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
+ INTEL_PIPE_CRC_ENTRIES_NR,
+ GFP_KERNEL);
+ if (!pipe_crc->entries)
+ return -ENOMEM;
+
+ spin_lock_irq(&pipe_crc->lock);
+ pipe_crc->head = 0;
+ pipe_crc->tail = 0;
+ spin_unlock_irq(&pipe_crc->lock);
+ }
+
+ pipe_crc->source = source;
+
+ I915_WRITE(PIPE_CRC_CTL(pipe), val);
+ POSTING_READ(PIPE_CRC_CTL(pipe));
+
+ /* real source -> none transition */
+ if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
+ struct intel_pipe_crc_entry *entries;
+
+ DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
+ pipe_name(pipe));
+
+ intel_wait_for_vblank(dev, pipe);
+
+ spin_lock_irq(&pipe_crc->lock);
+ entries = pipe_crc->entries;
+ pipe_crc->entries = NULL;
+ spin_unlock_irq(&pipe_crc->lock);
+
+ kfree(entries);
+
+ if (IS_G4X(dev))
+ g4x_undo_pipe_scramble_reset(dev, pipe);
+ else if (IS_VALLEYVIEW(dev))
+ vlv_undo_pipe_scramble_reset(dev, pipe);
+ }
+
+ return 0;
+}
+
+/*
+ * Parse pipe CRC command strings:
+ * command: wsp* object wsp+ name wsp+ source wsp*
+ * object: 'pipe'
+ * name: (A | B | C)
+ * source: (none | plane1 | plane2 | pf)
+ * wsp: (#0x20 | #0x9 | #0xA)+
+ *
+ * eg.:
+ * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
+ * "pipe A none" -> Stop CRC
+ */
+static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
+{
+ int n_words = 0;
+
+ while (*buf) {
+ char *end;
+
+ /* skip leading white space */
+ buf = skip_spaces(buf);
+ if (!*buf)
+ break; /* end of buffer */
+
+ /* find end of word */
+ for (end = buf; *end && !isspace(*end); end++)
+ ;
+
+ if (n_words == max_words) {
+ DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
+ max_words);
+ return -EINVAL; /* ran out of words[] before bytes */
+ }
+
+ if (*end)
+ *end++ = '\0';
+ words[n_words++] = buf;
+ buf = end;
+ }
+
+ return n_words;
+}
+
+enum intel_pipe_crc_object {
+ PIPE_CRC_OBJECT_PIPE,
+};
+
+static const char * const pipe_crc_objects[] = {
+ "pipe",
+};
+
+static int
+display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
+ if (!strcmp(buf, pipe_crc_objects[i])) {
+ *o = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
+{
+ const char name = buf[0];
+
+ if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
+ return -EINVAL;
+
+ *pipe = name - 'A';
+
+ return 0;
+}
+
+static int
+display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
+ if (!strcmp(buf, pipe_crc_sources[i])) {
+ *s = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
+{
+#define N_WORDS 3
+ int n_words;
+ char *words[N_WORDS];
+ enum pipe pipe;
+ enum intel_pipe_crc_object object;
+ enum intel_pipe_crc_source source;
+
+ n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
+ if (n_words != N_WORDS) {
+ DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
+ N_WORDS);
+ return -EINVAL;
+ }
+
+ if (display_crc_ctl_parse_object(words[0], &object) < 0) {
+ DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
+ return -EINVAL;
+ }
+
+ if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
+ DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
+ return -EINVAL;
+ }
+
+ if (display_crc_ctl_parse_source(words[2], &source) < 0) {
+ DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
+ return -EINVAL;
+ }
+
+ return pipe_crc_set_source(dev, pipe, source);
+}
+
+static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_device *dev = m->private;
+ char *tmpbuf;
+ int ret;
+
+ if (len == 0)
+ return 0;
+
+ if (len > PAGE_SIZE - 1) {
+ DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
+ PAGE_SIZE);
+ return -E2BIG;
+ }
+
+ tmpbuf = kmalloc(len + 1, GFP_KERNEL);
+ if (!tmpbuf)
+ return -ENOMEM;
+
+ if (copy_from_user(tmpbuf, ubuf, len)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ tmpbuf[len] = '\0';
+
+ ret = display_crc_ctl_parse(dev, tmpbuf, len);
+
+out:
+ kfree(tmpbuf);
+ if (ret < 0)
+ return ret;
+
+ *offp += len;
+ return len;
+}
+
+static const struct file_operations i915_display_crc_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = display_crc_ctl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = display_crc_ctl_write
+};
+
static int
i915_wedged_get(void *data, u64 *val)
{
@@ -1885,6 +2565,72 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
i915_ring_stop_get, i915_ring_stop_set,
"0x%08llx\n");
+static int
+i915_ring_missed_irq_get(void *data, u64 *val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ *val = dev_priv->gpu_error.missed_irq_rings;
+ return 0;
+}
+
+static int
+i915_ring_missed_irq_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ /* Lock against concurrent debugfs callers */
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ dev_priv->gpu_error.missed_irq_rings = val;
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
+ i915_ring_missed_irq_get, i915_ring_missed_irq_set,
+ "0x%08llx\n");
+
+static int
+i915_ring_test_irq_get(void *data, u64 *val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ *val = dev_priv->gpu_error.test_irq_rings;
+
+ return 0;
+}
+
+static int
+i915_ring_test_irq_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
+
+ /* Lock against concurrent debugfs callers */
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ dev_priv->gpu_error.test_irq_rings = val;
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
+ i915_ring_test_irq_get, i915_ring_test_irq_set,
+ "0x%08llx\n");
+
#define DROP_UNBOUND 0x1
#define DROP_BOUND 0x2
#define DROP_RETIRE 0x4
@@ -1972,6 +2718,8 @@ i915_max_freq_get(void *data, u64 *val)
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
@@ -1996,6 +2744,8 @@ i915_max_freq_set(void *data, u64 val)
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -2034,6 +2784,8 @@ i915_min_freq_get(void *data, u64 *val)
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
@@ -2058,6 +2810,8 @@ i915_min_freq_set(void *data, u64 val)
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -2136,32 +2890,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
i915_cache_sharing_get, i915_cache_sharing_set,
"%llu\n");
-/* As the drm_debugfs_init() routines are called before dev->dev_private is
- * allocated we need to hook into the minor for release. */
-static int
-drm_add_fake_info_node(struct drm_minor *minor,
- struct dentry *ent,
- const void *key)
-{
- struct drm_info_node *node;
-
- node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
- if (node == NULL) {
- debugfs_remove(ent);
- return -ENOMEM;
- }
-
- node->minor = minor;
- node->dent = ent;
- node->info_ent = (void *) key;
-
- mutex_lock(&minor->debugfs_lock);
- list_add(&node->list, &minor->debugfs_list);
- mutex_unlock(&minor->debugfs_lock);
-
- return 0;
-}
-
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
@@ -2278,11 +3006,28 @@ static struct i915_debugfs_files {
{"i915_min_freq", &i915_min_freq_fops},
{"i915_cache_sharing", &i915_cache_sharing_fops},
{"i915_ring_stop", &i915_ring_stop_fops},
+ {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
+ {"i915_ring_test_irq", &i915_ring_test_irq_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
{"i915_error_state", &i915_error_state_fops},
{"i915_next_seqno", &i915_next_seqno_fops},
+ {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
};
+void intel_display_crc_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i];
+
+ pipe_crc->opened = false;
+ spin_lock_init(&pipe_crc->lock);
+ init_waitqueue_head(&pipe_crc->wq);
+ }
+}
+
int i915_debugfs_init(struct drm_minor *minor)
{
int ret, i;
@@ -2291,6 +3036,12 @@ int i915_debugfs_init(struct drm_minor *minor)
if (ret)
return ret;
+ for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
+ ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
+ if (ret)
+ return ret;
+ }
+
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
ret = i915_debugfs_create(minor->debugfs_root, minor,
i915_debugfs_files[i].name,
@@ -2310,8 +3061,17 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
drm_debugfs_remove_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES, minor);
+
drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
1, minor);
+
+ for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
+ struct drm_info_list *info_list =
+ (struct drm_info_list *)&i915_pipe_crc_data[i];
+
+ drm_debugfs_remove_files(info_list, 1, minor);
+ }
+
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
struct drm_info_list *info_list =
(struct drm_info_list *) i915_debugfs_files[i].fops;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d5c784d48671..01233c2fb0d4 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -52,7 +52,7 @@
intel_ring_emit(LP_RING(dev_priv), x)
#define ADVANCE_LP_RING() \
- intel_ring_advance(LP_RING(dev_priv))
+ __intel_ring_advance(LP_RING(dev_priv))
/**
* Lock test for when it's just for synchronization of ring access.
@@ -641,7 +641,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
if (batch->num_cliprects) {
cliprects = kcalloc(batch->num_cliprects,
- sizeof(struct drm_clip_rect),
+ sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL)
return -ENOMEM;
@@ -703,7 +703,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
if (cmdbuf->num_cliprects) {
cliprects = kcalloc(cmdbuf->num_cliprects,
- sizeof(struct drm_clip_rect), GFP_KERNEL);
+ sizeof(*cliprects), GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
goto fail_batch_free;
@@ -931,7 +931,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = READ_BREADCRUMB(dev_priv);
break;
case I915_PARAM_CHIPSET_ID:
- value = dev->pci_device;
+ value = dev->pdev->device;
break;
case I915_PARAM_HAS_GEM:
value = 1;
@@ -1311,13 +1311,15 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_gem_stolen;
+ intel_power_domains_init_hw(dev);
+
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
ret = i915_gem_init(dev);
if (ret)
- goto cleanup_irq;
+ goto cleanup_power;
INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
@@ -1325,9 +1327,11 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
- dev->vblank_disable_allowed = 1;
- if (INTEL_INFO(dev)->num_pipes == 0)
+ dev->vblank_disable_allowed = true;
+ if (INTEL_INFO(dev)->num_pipes == 0) {
+ intel_display_power_put(dev, POWER_DOMAIN_VGA);
return 0;
+ }
ret = intel_fbdev_init(dev);
if (ret)
@@ -1348,6 +1352,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
*/
intel_fbdev_initial_config(dev);
+ intel_display_power_put(dev, POWER_DOMAIN_VGA);
+
/* Only enable hotplug handling once the fbdev is fully set up. */
dev_priv->enable_hotplug_processing = true;
@@ -1362,7 +1368,8 @@ cleanup_gem:
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
drm_mm_takedown(&dev_priv->gtt.base.mm);
-cleanup_irq:
+cleanup_power:
+ intel_display_power_put(dev, POWER_DOMAIN_VGA);
drm_irq_uninstall(dev);
cleanup_gem_stolen:
i915_gem_cleanup_stolen(dev);
@@ -1398,6 +1405,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
master->driver_priv = NULL;
}
+#ifdef CONFIG_DRM_I915_FBDEV
static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
struct apertures_struct *ap;
@@ -1418,6 +1426,11 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
kfree(ap);
}
+#else
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+}
+#endif
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
{
@@ -1459,17 +1472,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
info = (struct intel_device_info *) flags;
/* Refuse to load on gen6+ without kms enabled. */
- if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
+ if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
+ DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
+ DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
return -ENODEV;
+ }
- /* i915 has 4 more counters */
- dev->counters += 4;
- dev->types[6] = _DRM_STAT_IRQ;
- dev->types[7] = _DRM_STAT_PRIMARY;
- dev->types[8] = _DRM_STAT_SECONDARY;
- dev->types[9] = _DRM_STAT_DMA;
-
- dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
+ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
@@ -1494,6 +1503,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
+ intel_display_crc_init(dev);
+
i915_dump_device_info(dev_priv);
/* Not all pre-production machines fall into this category, only the
@@ -1531,19 +1542,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_uncore_early_sanitize(dev);
- if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) {
- /* The docs do not explain exactly how the calculation can be
- * made. It is somewhat guessable, but for now, it's always
- * 128MB.
- * NB: We can't write IDICR yet because we do not have gt funcs
- * set up */
- dev_priv->ellc_size = 128;
- DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
- }
+ /* This must be called before any calls to HAS_PCH_* */
+ intel_detect_pch(dev);
+
+ intel_uncore_init(dev);
ret = i915_gem_gtt_init(dev);
if (ret)
- goto put_bridge;
+ goto out_regs;
if (drm_core_check_feature(dev, DRIVER_MODESET))
i915_kick_out_firmware_fb(dev_priv);
@@ -1572,7 +1578,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
aperture_size);
if (dev_priv->gtt.mappable == NULL) {
ret = -EIO;
- goto out_rmmap;
+ goto out_gtt;
}
dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
@@ -1598,13 +1604,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_mtrrfree;
}
- /* This must be called before any calls to HAS_PCH_* */
- intel_detect_pch(dev);
-
intel_irq_init(dev);
intel_pm_init(dev);
intel_uncore_sanitize(dev);
- intel_uncore_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
@@ -1640,13 +1642,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
}
if (HAS_POWER_WELL(dev))
- i915_init_power_well(dev);
+ intel_power_domains_init(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
- goto out_gem_unload;
+ goto out_power_well;
}
} else {
/* Start out suspended in ums mode. */
@@ -1666,6 +1668,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
+out_power_well:
+ if (HAS_POWER_WELL(dev))
+ intel_power_domains_remove(dev);
+ drm_vblank_cleanup(dev);
out_gem_unload:
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
@@ -1679,12 +1685,18 @@ out_gem_unload:
out_mtrrfree:
arch_phys_wc_del(dev_priv->gtt.mtrr);
io_mapping_free(dev_priv->gtt.mappable);
+out_gtt:
+ list_del(&dev_priv->gtt.base.global_link);
+ drm_mm_takedown(&dev_priv->gtt.base.mm);
dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
-out_rmmap:
+out_regs:
+ intel_uncore_fini(dev);
pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
+ if (dev_priv->slab)
+ kmem_cache_destroy(dev_priv->slab);
kfree(dev_priv);
return ret;
}
@@ -1700,8 +1712,8 @@ int i915_driver_unload(struct drm_device *dev)
/* The i915.ko module is still not prepared to be loaded when
* the power well is not enabled, so just enable it in case
* we're going to unload/reload. */
- intel_set_power_well(dev, true);
- i915_remove_power_well(dev);
+ intel_display_set_init_power(dev, true);
+ intel_power_domains_remove(dev);
}
i915_teardown_sysfs(dev);
@@ -1709,15 +1721,9 @@ int i915_driver_unload(struct drm_device *dev)
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
- mutex_lock(&dev->struct_mutex);
- ret = i915_gpu_idle(dev);
+ ret = i915_gem_suspend(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
- i915_gem_retire_requests(dev);
- mutex_unlock(&dev->struct_mutex);
-
- /* Cancel the retire work handler, which should be idle now. */
- cancel_delayed_work_sync(&dev_priv->mm.retire_work);
io_mapping_free(dev_priv->gtt.mappable);
arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1774,8 +1780,8 @@ int i915_driver_unload(struct drm_device *dev)
list_del(&dev_priv->gtt.base.global_link);
WARN_ON(!list_empty(&dev_priv->vm_list));
drm_mm_takedown(&dev_priv->gtt.base.mm);
- if (dev_priv->regs != NULL)
- pci_iounmap(dev->pdev, dev_priv->regs);
+
+ drm_vblank_cleanup(dev);
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
@@ -1785,6 +1791,10 @@ int i915_driver_unload(struct drm_device *dev)
dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
+ intel_uncore_fini(dev);
+ if (dev_priv->regs != NULL)
+ pci_iounmap(dev->pdev, dev_priv->regs);
+
if (dev_priv->slab)
kmem_cache_destroy(dev_priv->slab);
@@ -1796,19 +1806,11 @@ int i915_driver_unload(struct drm_device *dev)
int i915_driver_open(struct drm_device *dev, struct drm_file *file)
{
- struct drm_i915_file_private *file_priv;
-
- DRM_DEBUG_DRIVER("\n");
- file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
- if (!file_priv)
- return -ENOMEM;
-
- file->driver_priv = file_priv;
-
- spin_lock_init(&file_priv->mm.lock);
- INIT_LIST_HEAD(&file_priv->mm.request_list);
+ int ret;
- idr_init(&file_priv->context_idr);
+ ret = i915_gem_open(dev, file);
+ if (ret)
+ return ret;
return 0;
}
@@ -1836,7 +1838,7 @@ void i915_driver_lastclose(struct drm_device * dev)
return;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- intel_fb_restore_mode(dev);
+ intel_fbdev_restore_mode(dev);
vga_switcheroo_process_delayed_switch();
return;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 69d8ed5416c3..a0804fa1e306 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -160,49 +160,58 @@ extern int intel_agp_enabled;
static const struct intel_device_info intel_i830_info = {
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_845g_info = {
.gen = 2, .num_pipes = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i85x_info = {
.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
.cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i865g_info = {
.gen = 2, .num_pipes = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i915g_info = {
.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i915gm_info = {
.gen = 3, .is_mobile = 1, .num_pipes = 2,
.cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i945g_info = {
.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i945gm_info = {
.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
.has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i965g_info = {
.gen = 4, .is_broadwater = 1, .num_pipes = 2,
.has_hotplug = 1,
.has_overlay = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i965gm_info = {
@@ -210,18 +219,20 @@ static const struct intel_device_info intel_i965gm_info = {
.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
.has_overlay = 1,
.supports_tv = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_g33_info = {
.gen = 3, .is_g33 = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_overlay = 1,
+ .ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_g45_info = {
.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
.has_pipe_cxsr = 1, .has_hotplug = 1,
- .has_bsd_ring = 1,
+ .ring_mask = RENDER_RING | BSD_RING,
};
static const struct intel_device_info intel_gm45_info = {
@@ -229,7 +240,7 @@ static const struct intel_device_info intel_gm45_info = {
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
.has_pipe_cxsr = 1, .has_hotplug = 1,
.supports_tv = 1,
- .has_bsd_ring = 1,
+ .ring_mask = RENDER_RING | BSD_RING,
};
static const struct intel_device_info intel_pineview_info = {
@@ -241,42 +252,36 @@ static const struct intel_device_info intel_pineview_info = {
static const struct intel_device_info intel_ironlake_d_info = {
.gen = 5, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
- .has_bsd_ring = 1,
+ .ring_mask = RENDER_RING | BSD_RING,
};
static const struct intel_device_info intel_ironlake_m_info = {
.gen = 5, .is_mobile = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
- .has_bsd_ring = 1,
+ .ring_mask = RENDER_RING | BSD_RING,
};
static const struct intel_device_info intel_sandybridge_d_info = {
.gen = 6, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
- .has_bsd_ring = 1,
- .has_blt_ring = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
- .has_force_wake = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
.gen = 6, .is_mobile = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
- .has_bsd_ring = 1,
- .has_blt_ring = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
- .has_force_wake = 1,
};
#define GEN7_FEATURES \
.gen = 7, .num_pipes = 3, \
.need_gfx_hws = 1, .has_hotplug = 1, \
- .has_bsd_ring = 1, \
- .has_blt_ring = 1, \
- .has_llc = 1, \
- .has_force_wake = 1
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+ .has_llc = 1
static const struct intel_device_info intel_ivybridge_d_info = {
GEN7_FEATURES,
@@ -318,7 +323,7 @@ static const struct intel_device_info intel_haswell_d_info = {
.is_haswell = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
- .has_vebox_ring = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
};
static const struct intel_device_info intel_haswell_m_info = {
@@ -328,7 +333,7 @@ static const struct intel_device_info intel_haswell_m_info = {
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
- .has_vebox_ring = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
};
/*
@@ -416,7 +421,7 @@ void intel_detect_pch(struct drm_device *dev)
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
- DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+ DRM_DEBUG_KMS("Found PantherPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
@@ -472,7 +477,7 @@ static int i915_drm_freeze(struct drm_device *dev)
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
hsw_disable_package_c8(dev_priv);
- intel_set_power_well(dev, true);
+ intel_display_set_init_power(dev, true);
drm_kms_helper_poll_disable(dev);
@@ -482,9 +487,7 @@ static int i915_drm_freeze(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
int error;
- mutex_lock(&dev->struct_mutex);
- error = i915_gem_idle(dev);
- mutex_unlock(&dev->struct_mutex);
+ error = i915_gem_suspend(dev);
if (error) {
dev_err(&dev->pdev->dev,
"GEM idle failed, resume might fail\n");
@@ -505,6 +508,8 @@ static int i915_drm_freeze(struct drm_device *dev)
intel_modeset_suspend_hw(dev);
}
+ i915_gem_suspend_gtt_mappings(dev);
+
i915_save_state(dev);
intel_opregion_fini(dev);
@@ -576,11 +581,24 @@ static void intel_resume_hotplug(struct drm_device *dev)
drm_helper_hpd_irq_event(dev);
}
-static int __i915_drm_thaw(struct drm_device *dev)
+static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
+ intel_uncore_early_sanitize(dev);
+
+ intel_uncore_sanitize(dev);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET) &&
+ restore_gtt_mappings) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_restore_gtt_mappings(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ intel_power_domains_init_hw(dev);
+
i915_restore_state(dev);
intel_opregion_setup(dev);
@@ -640,19 +658,10 @@ static int __i915_drm_thaw(struct drm_device *dev)
static int i915_drm_thaw(struct drm_device *dev)
{
- int error = 0;
-
- intel_uncore_sanitize(dev);
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- mutex_lock(&dev->struct_mutex);
- i915_gem_restore_gtt_mappings(dev);
- mutex_unlock(&dev->struct_mutex);
- }
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_check_and_clear_faults(dev);
- __i915_drm_thaw(dev);
-
- return error;
+ return __i915_drm_thaw(dev, true);
}
int i915_resume(struct drm_device *dev)
@@ -668,20 +677,12 @@ int i915_resume(struct drm_device *dev)
pci_set_master(dev->pdev);
- intel_uncore_sanitize(dev);
-
/*
* Platforms with opregion should have sane BIOS, older ones (gen3 and
- * earlier) need this since the BIOS might clear all our scratch PTEs.
+ * earlier) need to restore the GTT mappings since the BIOS might clear
+ * all our scratch PTEs.
*/
- if (drm_core_check_feature(dev, DRIVER_MODESET) &&
- !dev_priv->opregion.header) {
- mutex_lock(&dev->struct_mutex);
- i915_gem_restore_gtt_mappings(dev);
- mutex_unlock(&dev->struct_mutex);
- }
-
- ret = __i915_drm_thaw(dev);
+ ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
if (ret)
return ret;
@@ -719,24 +720,19 @@ int i915_reset(struct drm_device *dev)
simulated = dev_priv->gpu_error.stop_rings != 0;
- if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) {
- DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
- ret = -ENODEV;
- } else {
- ret = intel_gpu_reset(dev);
-
- /* Also reset the gpu hangman. */
- if (simulated) {
- DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
- dev_priv->gpu_error.stop_rings = 0;
- if (ret == -ENODEV) {
- DRM_ERROR("Reset not implemented, but ignoring "
- "error for simulated gpu hangs\n");
- ret = 0;
- }
- } else
- dev_priv->gpu_error.last_reset = get_seconds();
+ ret = intel_gpu_reset(dev);
+
+ /* Also reset the gpu hangman. */
+ if (simulated) {
+ DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
+ dev_priv->gpu_error.stop_rings = 0;
+ if (ret == -ENODEV) {
+ DRM_ERROR("Reset not implemented, but ignoring "
+ "error for simulated gpu hangs\n");
+ ret = 0;
+ }
}
+
if (ret) {
DRM_ERROR("Failed to reset chip.\n");
mutex_unlock(&dev->struct_mutex);
@@ -759,30 +755,17 @@ int i915_reset(struct drm_device *dev)
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->ums.mm_suspended) {
- struct intel_ring_buffer *ring;
- int i;
-
+ bool hw_contexts_disabled = dev_priv->hw_contexts_disabled;
dev_priv->ums.mm_suspended = 0;
- i915_gem_init_swizzling(dev);
-
- for_each_ring(ring, dev_priv, i)
- ring->init(ring);
-
- i915_gem_context_init(dev);
- if (dev_priv->mm.aliasing_ppgtt) {
- ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
- if (ret)
- i915_gem_cleanup_aliasing_ppgtt(dev);
- }
-
- /*
- * It would make sense to re-init all the other hw state, at
- * least the rps/rc6/emon init done within modeset_init_hw. For
- * some unknown reason, this blows up my ilk, so don't.
- */
-
+ ret = i915_gem_init_hw(dev);
+ if (!hw_contexts_disabled && dev_priv->hw_contexts_disabled)
+ DRM_ERROR("HW contexts didn't survive reset\n");
mutex_unlock(&dev->struct_mutex);
+ if (ret) {
+ DRM_ERROR("Failed hw init on reset %d\n", ret);
+ return ret;
+ }
drm_irq_uninstall(dev);
drm_irq_install(dev);
@@ -799,6 +782,12 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
+ if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) {
+ DRM_INFO("This hardware requires preliminary hardware support.\n"
+ "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
+ return -ENODEV;
+ }
+
/* Only bind to function 0 of the device. Early generations
* used function 1 as a placeholder for multi-head. This causes
* us confusion instead, especially on the systems where both
@@ -946,7 +935,6 @@ static struct drm_driver driver = {
.debugfs_init = i915_debugfs_init,
.debugfs_cleanup = i915_debugfs_cleanup,
#endif
- .gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 35874b3a86dc..b12d942ab09c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -98,13 +98,25 @@ enum intel_display_power_domain {
POWER_DOMAIN_TRANSCODER_A,
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
- POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
+ POWER_DOMAIN_TRANSCODER_EDP,
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_INIT,
+
+ POWER_DOMAIN_NUM,
};
+#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
+
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
-#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
+#define POWER_DOMAIN_TRANSCODER(tran) \
+ ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
+ (tran) + POWER_DOMAIN_TRANSCODER_A)
+
+#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_A) | \
+ BIT(POWER_DOMAIN_TRANSCODER_EDP))
enum hpd_pin {
HPD_NONE = 0,
@@ -225,6 +237,8 @@ struct intel_opregion {
struct opregion_header __iomem *header;
struct opregion_acpi __iomem *acpi;
struct opregion_swsci __iomem *swsci;
+ u32 swsci_gbda_sub_functions;
+ u32 swsci_sbcb_sub_functions;
struct opregion_asle __iomem *asle;
void __iomem *vbt;
u32 __iomem *lid_state;
@@ -285,6 +299,7 @@ struct drm_i915_error_state {
u32 cpu_ring_tail[I915_NUM_RINGS];
u32 error; /* gen6+ */
u32 err_int; /* gen7 */
+ u32 bbstate[I915_NUM_RINGS];
u32 instpm[I915_NUM_RINGS];
u32 instps[I915_NUM_RINGS];
u32 extra_instdone[I915_NUM_INSTDONE_REG];
@@ -321,11 +336,13 @@ struct drm_i915_error_state {
u32 dirty:1;
u32 purgeable:1;
s32 ring:4;
- u32 cache_level:2;
+ u32 cache_level:3;
} **active_bo, **pinned_bo;
u32 *active_bo_count, *pinned_bo_count;
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
+ int hangcheck_score[I915_NUM_RINGS];
+ enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
};
struct intel_crtc_config;
@@ -357,7 +374,7 @@ struct drm_i915_display_funcs {
int target, int refclk,
struct dpll *match_clock,
struct dpll *best_clock);
- void (*update_wm)(struct drm_device *dev);
+ void (*update_wm)(struct drm_crtc *crtc);
void (*update_sprite_wm)(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
@@ -367,7 +384,6 @@ struct drm_i915_display_funcs {
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
struct intel_crtc_config *);
- void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
int (*crtc_mode_set)(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *old_fb);
@@ -375,7 +391,8 @@ struct drm_i915_display_funcs {
void (*crtc_disable)(struct drm_crtc *crtc);
void (*off)(struct drm_crtc *crtc);
void (*write_eld)(struct drm_connector *connector,
- struct drm_crtc *crtc);
+ struct drm_crtc *crtc,
+ struct drm_display_mode *mode);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
@@ -395,6 +412,20 @@ struct drm_i915_display_funcs {
struct intel_uncore_funcs {
void (*force_wake_get)(struct drm_i915_private *dev_priv);
void (*force_wake_put)(struct drm_i915_private *dev_priv);
+
+ uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+ uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+ uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+ uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+
+ void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
+ uint8_t val, bool trace);
+ void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
+ uint16_t val, bool trace);
+ void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
+ uint32_t val, bool trace);
+ void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
+ uint64_t val, bool trace);
};
struct intel_uncore {
@@ -404,6 +435,8 @@ struct intel_uncore {
unsigned fifo_count;
unsigned forcewake_count;
+
+ struct delayed_work force_wake_work;
};
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -420,7 +453,7 @@ struct intel_uncore {
func(is_ivybridge) sep \
func(is_valleyview) sep \
func(is_haswell) sep \
- func(has_force_wake) sep \
+ func(is_preliminary) sep \
func(has_fbc) sep \
func(has_pipe_cxsr) sep \
func(has_hotplug) sep \
@@ -428,9 +461,6 @@ struct intel_uncore {
func(has_overlay) sep \
func(overlay_needs_physical) sep \
func(supports_tv) sep \
- func(has_bsd_ring) sep \
- func(has_blt_ring) sep \
- func(has_vebox_ring) sep \
func(has_llc) sep \
func(has_ddi) sep \
func(has_fpga_dbg)
@@ -442,6 +472,7 @@ struct intel_device_info {
u32 display_mmio_offset;
u8 num_pipes:3;
u8 gen;
+ u8 ring_mask; /* Rings supported by the HW */
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
};
@@ -497,10 +528,12 @@ struct i915_address_space {
/* FIXME: Need a more generic return type */
gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
- enum i915_cache_level level);
+ enum i915_cache_level level,
+ bool valid); /* Create a valid PTE */
void (*clear_range)(struct i915_address_space *vm,
unsigned int first_entry,
- unsigned int num_entries);
+ unsigned int num_entries,
+ bool use_scratch);
void (*insert_entries)(struct i915_address_space *vm,
struct sg_table *st,
unsigned int first_entry,
@@ -568,6 +601,13 @@ struct i915_vma {
/** This vma's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
+ /**
+ * Used for performing relocations during execbuffer insertion.
+ */
+ struct hlist_node exec_node;
+ unsigned long exec_handle;
+ struct drm_i915_gem_exec_object2 *exec_entry;
+
};
struct i915_ctx_hang_stats {
@@ -576,6 +616,12 @@ struct i915_ctx_hang_stats {
/* This context had batch active when hang was declared */
unsigned batch_active;
+
+ /* Time when this context was last blamed for a GPU reset */
+ unsigned long guilty_ts;
+
+ /* This context is banned to submit more work */
+ bool banned;
};
/* This must match up with the value previously used for execbuf2.rsvd1. */
@@ -584,10 +630,13 @@ struct i915_hw_context {
struct kref ref;
int id;
bool is_initialized;
+ uint8_t remap_slice;
struct drm_i915_file_private *file_priv;
struct intel_ring_buffer *ring;
struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats;
+
+ struct list_head link;
};
struct i915_fbc {
@@ -621,17 +670,9 @@ struct i915_fbc {
} no_fbc_reason;
};
-enum no_psr_reason {
- PSR_NO_SOURCE, /* Not supported on platform */
- PSR_NO_SINK, /* Not supported by panel */
- PSR_MODULE_PARAM,
- PSR_CRTC_NOT_ACTIVE,
- PSR_PWR_WELL_ENABLED,
- PSR_NOT_TILED,
- PSR_SPRITE_ENABLED,
- PSR_S3D_ENABLED,
- PSR_INTERLACED_ENABLED,
- PSR_HSW_NOT_DDIA,
+struct i915_psr {
+ bool sink_support;
+ bool source_ok;
};
enum intel_pch {
@@ -821,17 +862,20 @@ struct intel_gen6_power_mgmt {
struct work_struct work;
u32 pm_iir;
- /* On vlv we need to manually drop to Vmin with a delayed work. */
- struct delayed_work vlv_work;
-
/* The below variables an all the rps hw state are protected by
* dev->struct mutext. */
u8 cur_delay;
u8 min_delay;
u8 max_delay;
u8 rpe_delay;
+ u8 rp1_delay;
+ u8 rp0_delay;
u8 hw_max;
+ int last_adj;
+ enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
+
+ bool enabled;
struct delayed_work delayed_resume_work;
/*
@@ -868,11 +912,21 @@ struct intel_ilk_power_mgmt {
/* Power well structure for haswell */
struct i915_power_well {
- struct drm_device *device;
- spinlock_t lock;
/* power well enable/disable usage count */
int count;
- int i915_request;
+};
+
+#define I915_MAX_POWER_WELLS 1
+
+struct i915_power_domains {
+ /*
+ * Power wells needed for initialization at driver init and suspend
+ * time are on. They are kept on until after the first modeset.
+ */
+ bool init_power_on;
+
+ struct mutex lock;
+ struct i915_power_well power_wells[I915_MAX_POWER_WELLS];
};
struct i915_dri1_state {
@@ -900,9 +954,11 @@ struct i915_ums_state {
int mm_suspended;
};
+#define MAX_L3_SLICES 2
struct intel_l3_parity {
- u32 *remap_info;
+ u32 *remap_info[MAX_L3_SLICES];
struct work_struct error_work;
+ int which_slice;
};
struct i915_gem_mm {
@@ -940,6 +996,15 @@ struct i915_gem_mm {
struct delayed_work retire_work;
/**
+ * When we detect an idle GPU, we want to turn on
+ * powersaving features. So once we see that there
+ * are no more requests outstanding and no more
+ * arrive within a small period of time, we fire
+ * off the idle_work.
+ */
+ struct delayed_work idle_work;
+
+ /**
* Are we in a non-interruptible section of code like
* modesetting?
*/
@@ -977,6 +1042,9 @@ struct i915_gpu_error {
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+ /* Hang gpu twice in this window and your context gets banned */
+#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
+
struct timer_list hangcheck_timer;
/* For reset and error_state handling. */
@@ -985,7 +1053,8 @@ struct i915_gpu_error {
struct drm_i915_error_state *first_error;
struct work_struct work;
- unsigned long last_reset;
+
+ unsigned long missed_irq_rings;
/**
* State variable and reset counter controlling the reset flow
@@ -1025,6 +1094,9 @@ struct i915_gpu_error {
/* For gpu hang simulation. */
unsigned int stop_rings;
+
+ /* For missed irq/seqno simulation. */
+ unsigned int test_irq_rings;
};
enum modeset_restore {
@@ -1033,6 +1105,14 @@ enum modeset_restore {
MODESET_SUSPENDED,
};
+struct ddi_vbt_port_info {
+ uint8_t hdmi_level_shift;
+
+ uint8_t supports_dvi:1;
+ uint8_t supports_hdmi:1;
+ uint8_t supports_dp:1;
+};
+
struct intel_vbt_data {
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -1058,10 +1138,17 @@ struct intel_vbt_data {
int edp_bpp;
struct edp_power_seq edp_pps;
+ /* MIPI DSI */
+ struct {
+ u16 panel_id;
+ } dsi;
+
int crt_ddc_pin;
int child_dev_num;
- struct child_device_config *child_dev;
+ union child_device_config *child_dev;
+
+ struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
};
enum intel_ddb_partitioning {
@@ -1077,6 +1164,15 @@ struct intel_wm_level {
uint32_t fbc_val;
};
+struct hsw_wm_values {
+ uint32_t wm_pipe[3];
+ uint32_t wm_lp[3];
+ uint32_t wm_lp_spr[3];
+ uint32_t wm_linetime[3];
+ bool enable_fbc_wm;
+ enum intel_ddb_partitioning partitioning;
+};
+
/*
* This struct tracks the state needed for the Package C8+ feature.
*
@@ -1146,6 +1242,36 @@ struct i915_package_c8 {
} regsave;
};
+enum intel_pipe_crc_source {
+ INTEL_PIPE_CRC_SOURCE_NONE,
+ INTEL_PIPE_CRC_SOURCE_PLANE1,
+ INTEL_PIPE_CRC_SOURCE_PLANE2,
+ INTEL_PIPE_CRC_SOURCE_PF,
+ INTEL_PIPE_CRC_SOURCE_PIPE,
+ /* TV/DP on pre-gen5/vlv can't use the pipe source. */
+ INTEL_PIPE_CRC_SOURCE_TV,
+ INTEL_PIPE_CRC_SOURCE_DP_B,
+ INTEL_PIPE_CRC_SOURCE_DP_C,
+ INTEL_PIPE_CRC_SOURCE_DP_D,
+ INTEL_PIPE_CRC_SOURCE_AUTO,
+ INTEL_PIPE_CRC_SOURCE_MAX,
+};
+
+struct intel_pipe_crc_entry {
+ uint32_t frame;
+ uint32_t crc[5];
+};
+
+#define INTEL_PIPE_CRC_ENTRIES_NR 128
+struct intel_pipe_crc {
+ spinlock_t lock;
+ bool opened; /* exclusive access to the result file */
+ struct intel_pipe_crc_entry *entries;
+ enum intel_pipe_crc_source source;
+ int head, tail;
+ wait_queue_head_t wq;
+};
+
typedef struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
@@ -1270,6 +1396,10 @@ typedef struct drm_i915_private {
struct drm_crtc *pipe_to_crtc_mapping[3];
wait_queue_head_t pending_flip_queue;
+#ifdef CONFIG_DEBUG_FS
+ struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
+#endif
+
int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
struct intel_ddi_plls ddi_plls;
@@ -1295,17 +1425,18 @@ typedef struct drm_i915_private {
* mchdev_lock in intel_pm.c */
struct intel_ilk_power_mgmt ips;
- /* Haswell power well */
- struct i915_power_well power_well;
+ struct i915_power_domains power_domains;
- enum no_psr_reason no_psr_reason;
+ struct i915_psr psr;
struct i915_gpu_error gpu_error;
struct drm_i915_gem_object *vlv_pctx;
+#ifdef CONFIG_DRM_I915_FBDEV
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
+#endif
/*
* The console may be contended at resume, but we don't
@@ -1318,6 +1449,7 @@ typedef struct drm_i915_private {
bool hw_contexts_disabled;
uint32_t hw_context_size;
+ struct list_head context_list;
u32 fdi_rx_config;
@@ -1335,6 +1467,9 @@ typedef struct drm_i915_private {
uint16_t spr_latency[5];
/* cursor */
uint16_t cur_latency[5];
+
+ /* current hardware state */
+ struct hsw_wm_values hw;
} wm;
struct i915_package_c8 pc8;
@@ -1398,8 +1533,6 @@ struct drm_i915_gem_object {
struct list_head ring_list;
/** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link;
- /** This object's place in the batchbuffer or on the eviction list */
- struct list_head exec_list;
/**
* This is set if the object is on the active lists (has pending
@@ -1485,13 +1618,6 @@ struct drm_i915_gem_object {
void *dma_buf_vmapping;
int vmapping_count;
- /**
- * Used for performing relocations during execbuffer insertion.
- */
- struct hlist_node exec_node;
- unsigned long exec_handle;
- struct drm_i915_gem_exec_object2 *exec_entry;
-
struct intel_ring_buffer *ring;
/** Breadcrumb of last rendering to the buffer. */
@@ -1503,11 +1629,14 @@ struct drm_i915_gem_object {
/** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
+ /** References from framebuffers, locks out tiling changes. */
+ unsigned long framebuffer_references;
+
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
/** User space pin count and filp owning the pin */
- uint32_t user_pin_count;
+ unsigned long user_pin_count;
struct drm_file *pin_filp;
/** for phy allocated objects */
@@ -1558,48 +1687,55 @@ struct drm_i915_gem_request {
};
struct drm_i915_file_private {
+ struct drm_i915_private *dev_priv;
+
struct {
spinlock_t lock;
struct list_head request_list;
+ struct delayed_work idle_work;
} mm;
struct idr context_idr;
struct i915_ctx_hang_stats hang_stats;
+ atomic_t rps_wait_boost;
};
#define INTEL_INFO(dev) (to_i915(dev)->info)
-#define IS_I830(dev) ((dev)->pci_device == 0x3577)
-#define IS_845G(dev) ((dev)->pci_device == 0x2562)
+#define IS_I830(dev) ((dev)->pdev->device == 0x3577)
+#define IS_845G(dev) ((dev)->pdev->device == 0x2562)
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+#define IS_I865G(dev) ((dev)->pdev->device == 0x2572)
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
-#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+#define IS_I915GM(dev) ((dev)->pdev->device == 0x2592)
+#define IS_I945G(dev) ((dev)->pdev->device == 0x2772)
#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
+#define IS_GM45(dev) ((dev)->pdev->device == 0x2A42)
#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
-#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001)
+#define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011)
#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
+#define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
-#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
- (dev)->pci_device == 0x0152 || \
- (dev)->pci_device == 0x015a)
-#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
- (dev)->pci_device == 0x0106 || \
- (dev)->pci_device == 0x010A)
+#define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \
+ (dev)->pdev->device == 0x0152 || \
+ (dev)->pdev->device == 0x015a)
+#define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \
+ (dev)->pdev->device == 0x0106 || \
+ (dev)->pdev->device == 0x010A)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
- ((dev)->pci_device & 0xFF00) == 0x0C00)
+ ((dev)->pdev->device & 0xFF00) == 0x0C00)
#define IS_ULT(dev) (IS_HASWELL(dev) && \
- ((dev)->pci_device & 0xFF00) == 0x0A00)
+ ((dev)->pdev->device & 0xFF00) == 0x0A00)
+#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
+ ((dev)->pdev->device & 0x00F0) == 0x0020)
+#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
/*
* The genX designation typically refers to the render engine, so render
@@ -1614,9 +1750,13 @@ struct drm_i915_file_private {
#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
-#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
-#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
-#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring)
+#define RENDER_RING (1<<RCS)
+#define BSD_RING (1<<VCS)
+#define BLT_RING (1<<BCS)
+#define VEBOX_RING (1<<VECS)
+#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
+#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
+#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
@@ -1638,7 +1778,6 @@ struct drm_i915_file_private {
#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
@@ -1651,6 +1790,7 @@ struct drm_i915_file_private {
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
+#define HAS_PSR(dev) (IS_HASWELL(dev))
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -1666,35 +1806,14 @@ struct drm_i915_file_private {
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
-#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
-
-#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+/* DPF == dynamic parity feature */
+#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
#define GT_FREQUENCY_MULTIPLIER 50
#include "i915_trace.h"
-/**
- * RC6 is a special power stage which allows the GPU to enter an very
- * low-voltage mode when idle, using down to 0V while at this stage. This
- * stage is entered automatically when the GPU is idle when RC6 support is
- * enabled, and as soon as new workload arises GPU wakes up automatically as well.
- *
- * There are different RC6 modes available in Intel GPU, which differentiate
- * among each other with the latency required to enter and leave RC6 and
- * voltage consumed by the GPU in different states.
- *
- * The combination of the following flags define which states GPU is allowed
- * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
- * RC6pp is deepest RC6. Their support by hardware varies according to the
- * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
- * which brings the most power savings; deeper states save more power, but
- * require higher latency to switch to and wake up.
- */
-#define INTEL_RC6_ENABLE (1<<0)
-#define INTEL_RC6p_ENABLE (1<<1)
-#define INTEL_RC6pp_ENABLE (1<<2)
-
extern const struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc __always_unused;
@@ -1765,12 +1884,13 @@ extern void intel_uncore_early_sanitize(struct drm_device *dev);
extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_clear_errors(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
+extern void intel_uncore_fini(struct drm_device *dev);
void
-i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
void
-i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
@@ -1822,14 +1942,11 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
void i915_gem_load(struct drm_device *dev);
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
-int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
-struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm);
void i915_gem_vma_destroy(struct i915_vma *vma);
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
@@ -1868,9 +1985,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
-void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring);
-
+void i915_vma_move_to_active(struct i915_vma *vma,
+ struct intel_ring_buffer *ring);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -1911,7 +2027,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
}
}
-void i915_gem_retire_requests(struct drm_device *dev);
+bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
@@ -1931,11 +2047,11 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
-void i915_gem_l3_remap(struct drm_device *dev);
+int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
-int __must_check i915_gem_idle(struct drm_device *dev);
+int __must_check i915_gem_suspend(struct drm_device *dev);
int __i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_object *batch_obj,
@@ -1962,6 +2078,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
+int i915_gem_open(struct drm_device *dev, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
uint32_t
@@ -1993,6 +2110,9 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
+
+struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
+
/* Some GGTT VM helpers */
#define obj_to_ggtt(obj) \
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
@@ -2029,7 +2149,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
map_and_fenceable, nonblocking);
}
-#undef obj_to_ggtt
/* i915_gem_context.c */
void i915_gem_context_init(struct drm_device *dev);
@@ -2065,6 +2184,8 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj);
+void i915_check_and_clear_faults(struct drm_device *dev);
+void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
@@ -2090,6 +2211,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
unsigned cache_level,
bool mappable,
bool nonblock);
+int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
int i915_gem_evict_everything(struct drm_device *dev);
/* i915_gem_stolen.c */
@@ -2129,6 +2251,11 @@ int i915_verify_lists(struct drm_device *dev);
/* i915_debugfs.c */
int i915_debugfs_init(struct drm_minor *minor);
void i915_debugfs_cleanup(struct drm_minor *minor);
+#ifdef CONFIG_DEBUG_FS
+void intel_display_crc_init(struct drm_device *dev);
+#else
+static inline void intel_display_crc_init(struct drm_device *dev) {}
+#endif
/* i915_gpu_error.c */
__printf(2, 3)
@@ -2182,15 +2309,30 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
extern void intel_i2c_reset(struct drm_device *dev);
/* intel_opregion.c */
+struct intel_encoder;
extern int intel_opregion_setup(struct drm_device *dev);
#ifdef CONFIG_ACPI
extern void intel_opregion_init(struct drm_device *dev);
extern void intel_opregion_fini(struct drm_device *dev);
extern void intel_opregion_asle_intr(struct drm_device *dev);
+extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+ bool enable);
+extern int intel_opregion_notify_adapter(struct drm_device *dev,
+ pci_power_t state);
#else
static inline void intel_opregion_init(struct drm_device *dev) { return; }
static inline void intel_opregion_fini(struct drm_device *dev) { return; }
static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
+static inline int
+intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
+{
+ return 0;
+}
+static inline int
+intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+{
+ return 0;
+}
#endif
/* intel_acpi.c */
@@ -2252,8 +2394,16 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
-u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg);
-void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val);
+u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
+void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
enum intel_sbi_destination destination);
void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
@@ -2262,37 +2412,21 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
int vlv_gpu_freq(int ddr_freq, int val);
int vlv_freq_opcode(int ddr_freq, int val);
-#define __i915_read(x) \
- u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace);
-__i915_read(8)
-__i915_read(16)
-__i915_read(32)
-__i915_read(64)
-#undef __i915_read
-
-#define __i915_write(x) \
- void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace);
-__i915_write(8)
-__i915_write(16)
-__i915_write(32)
-__i915_write(64)
-#undef __i915_write
-
-#define I915_READ8(reg) i915_read8(dev_priv, (reg), true)
-#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true)
-
-#define I915_READ16(reg) i915_read16(dev_priv, (reg), true)
-#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true)
-#define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false)
-#define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false)
-
-#define I915_READ(reg) i915_read32(dev_priv, (reg), true)
-#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true)
-#define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false)
-#define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false)
-
-#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true)
-#define I915_READ64(reg) i915_read64(dev_priv, (reg), true)
+#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
+#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
+
+#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
+#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
+#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
+#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
+
+#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
+#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
+#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
+#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
+
+#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
+#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cdfb9da0e4ce..e7b39d731db6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -41,6 +41,9 @@ static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *o
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
bool force);
static __must_check int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+ bool readonly);
+static __must_check int
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
unsigned alignment,
@@ -61,8 +64,8 @@ static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
struct shrink_control *sc);
static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
struct shrink_control *sc);
-static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
-static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
+static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
+static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -258,7 +261,7 @@ i915_gem_dumb_create(struct drm_file *file,
struct drm_mode_create_dumb *args)
{
/* have to work out size/pitch and return them */
- args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
+ args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
args->size = args->pitch * args->height;
return i915_gem_create(file, dev,
args->size, &args->handle);
@@ -432,11 +435,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
* optimizes for the case when the gpu will dirty the data
* anyway again before the next pread happens. */
needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
- if (i915_gem_obj_bound_any(obj)) {
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
- if (ret)
- return ret;
- }
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
}
ret = i915_gem_object_get_pages(obj);
@@ -748,11 +749,9 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
* optimizes for the case when the gpu will use the data
* right away and we therefore have to clflush anyway. */
needs_clflush_after = cpu_write_needs_clflush(obj);
- if (i915_gem_obj_bound_any(obj)) {
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- return ret;
- }
+ ret = i915_gem_object_wait_rendering(obj, false);
+ if (ret)
+ return ret;
}
/* Same trick applies to invalidate partially written cachelines read
* before writing. */
@@ -966,12 +965,31 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
ret = 0;
- if (seqno == ring->outstanding_lazy_request)
+ if (seqno == ring->outstanding_lazy_seqno)
ret = i915_add_request(ring, NULL);
return ret;
}
+static void fake_irq(unsigned long data)
+{
+ wake_up_process((struct task_struct *)data);
+}
+
+static bool missed_irq(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
+{
+ return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
+}
+
+static bool can_wait_boost(struct drm_i915_file_private *file_priv)
+{
+ if (file_priv == NULL)
+ return true;
+
+ return !atomic_xchg(&file_priv->rps_wait_boost, true);
+}
+
/**
* __wait_seqno - wait until execution of seqno has finished
* @ring: the ring expected to report seqno
@@ -992,13 +1010,14 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
*/
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
unsigned reset_counter,
- bool interruptible, struct timespec *timeout)
+ bool interruptible,
+ struct timespec *timeout,
+ struct drm_i915_file_private *file_priv)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
- struct timespec before, now, wait_time={1,0};
- unsigned long timeout_jiffies;
- long end;
- bool wait_forever = true;
+ struct timespec before, now;
+ DEFINE_WAIT(wait);
+ long timeout_jiffies;
int ret;
WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
@@ -1006,51 +1025,79 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
- trace_i915_gem_request_wait_begin(ring, seqno);
+ timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
- if (timeout != NULL) {
- wait_time = *timeout;
- wait_forever = false;
+ if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
+ gen6_rps_boost(dev_priv);
+ if (file_priv)
+ mod_delayed_work(dev_priv->wq,
+ &file_priv->mm.idle_work,
+ msecs_to_jiffies(100));
}
- timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
-
- if (WARN_ON(!ring->irq_get(ring)))
+ if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) &&
+ WARN_ON(!ring->irq_get(ring)))
return -ENODEV;
- /* Record current time in case interrupted by signal, or wedged * */
+ /* Record current time in case interrupted by signal, or wedged */
+ trace_i915_gem_request_wait_begin(ring, seqno);
getrawmonotonic(&before);
+ for (;;) {
+ struct timer_list timer;
+ unsigned long expire;
-#define EXIT_COND \
- (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
- i915_reset_in_progress(&dev_priv->gpu_error) || \
- reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
- do {
- if (interruptible)
- end = wait_event_interruptible_timeout(ring->irq_queue,
- EXIT_COND,
- timeout_jiffies);
- else
- end = wait_event_timeout(ring->irq_queue, EXIT_COND,
- timeout_jiffies);
+ prepare_to_wait(&ring->irq_queue, &wait,
+ interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
/* We need to check whether any gpu reset happened in between
* the caller grabbing the seqno and now ... */
- if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
- end = -EAGAIN;
+ if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
+ /* ... but upgrade the -EAGAIN to an -EIO if the gpu
+ * is truely gone. */
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+ if (ret == 0)
+ ret = -EAGAIN;
+ break;
+ }
- /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
- * gone. */
- ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
- if (ret)
- end = ret;
- } while (end == 0 && wait_forever);
+ if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
+ ret = 0;
+ break;
+ }
+ if (interruptible && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ if (timeout_jiffies <= 0) {
+ ret = -ETIME;
+ break;
+ }
+
+ timer.function = NULL;
+ if (timeout || missed_irq(dev_priv, ring)) {
+ setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
+ expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies);
+ mod_timer(&timer, expire);
+ }
+
+ io_schedule();
+
+ if (timeout)
+ timeout_jiffies = expire - jiffies;
+
+ if (timer.function) {
+ del_singleshot_timer_sync(&timer);
+ destroy_timer_on_stack(&timer);
+ }
+ }
getrawmonotonic(&now);
+ trace_i915_gem_request_wait_end(ring, seqno);
ring->irq_put(ring);
- trace_i915_gem_request_wait_end(ring, seqno);
-#undef EXIT_COND
+
+ finish_wait(&ring->irq_queue, &wait);
if (timeout) {
struct timespec sleep_time = timespec_sub(now, before);
@@ -1059,17 +1106,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
set_normalized_timespec(timeout, 0, 0);
}
- switch (end) {
- case -EIO:
- case -EAGAIN: /* Wedged */
- case -ERESTARTSYS: /* Signal */
- return (int)end;
- case 0: /* Timeout */
- return -ETIME;
- default: /* Completed */
- WARN_ON(end < 0); /* We're not aware of other errors */
- return 0;
- }
+ return ret;
}
/**
@@ -1097,7 +1134,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
return __wait_seqno(ring, seqno,
atomic_read(&dev_priv->gpu_error.reset_counter),
- interruptible, NULL);
+ interruptible, NULL, NULL);
}
static int
@@ -1147,6 +1184,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
*/
static __must_check int
i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
+ struct drm_file *file,
bool readonly)
{
struct drm_device *dev = obj->base.dev;
@@ -1173,7 +1211,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
- ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
+ ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
mutex_lock(&dev->struct_mutex);
if (ret)
return ret;
@@ -1222,7 +1260,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
* We will repeat the flush holding the lock in the normal manner
* to catch cases where we are gazumped.
*/
- ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
+ ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
if (ret)
goto unref;
@@ -1690,13 +1728,13 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
return 0;
}
-static long
+static unsigned long
__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
bool purgeable_only)
{
struct list_head still_bound_list;
struct drm_i915_gem_object *obj, *next;
- long count = 0;
+ unsigned long count = 0;
list_for_each_entry_safe(obj, next,
&dev_priv->mm.unbound_list,
@@ -1762,13 +1800,13 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
return count;
}
-static long
+static unsigned long
i915_gem_purge(struct drm_i915_private *dev_priv, long target)
{
return __i915_gem_shrink(dev_priv, target, true);
}
-static long
+static unsigned long
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj, *next;
@@ -1778,9 +1816,8 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv)
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
global_list) {
- if (obj->pages_pin_count == 0)
+ if (i915_gem_object_put_pages(obj) == 0)
freed += obj->base.size >> PAGE_SHIFT;
- i915_gem_object_put_pages(obj);
}
return freed;
}
@@ -1865,6 +1902,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
sg->length += PAGE_SIZE;
}
last_pfn = page_to_pfn(page);
+
+ /* Check that the i965g/gm workaround works. */
+ WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
}
#ifdef CONFIG_SWIOTLB
if (!swiotlb_nr_tbl())
@@ -1918,7 +1958,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
return 0;
}
-void
+static void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
{
@@ -1957,6 +1997,13 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
}
}
+void i915_vma_move_to_active(struct i915_vma *vma,
+ struct intel_ring_buffer *ring)
+{
+ list_move_tail(&vma->mm_list, &vma->vm->active_list);
+ return i915_gem_object_move_to_active(vma->obj, ring);
+}
+
static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
@@ -2078,11 +2125,10 @@ int __i915_add_request(struct intel_ring_buffer *ring,
if (ret)
return ret;
- request = kmalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
+ request = ring->preallocated_lazy_request;
+ if (WARN_ON(request == NULL))
return -ENOMEM;
-
/* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the
* GPU processing the request, we never over-estimate the
@@ -2091,17 +2137,13 @@ int __i915_add_request(struct intel_ring_buffer *ring,
request_ring_position = intel_ring_get_tail(ring);
ret = ring->add_request(ring);
- if (ret) {
- kfree(request);
+ if (ret)
return ret;
- }
request->seqno = intel_ring_get_seqno(ring);
request->ring = ring;
request->head = request_start;
request->tail = request_ring_position;
- request->ctx = ring->last_context;
- request->batch_obj = obj;
/* Whilst this request exists, batch_obj will be on the
* active_list, and so will hold the active reference. Only when this
@@ -2109,7 +2151,12 @@ int __i915_add_request(struct intel_ring_buffer *ring,
* inactive_list and lose its active reference. Hence we do not need
* to explicitly hold another reference here.
*/
+ request->batch_obj = obj;
+ /* Hold a reference to the current context so that we can inspect
+ * it later in case a hangcheck error event fires.
+ */
+ request->ctx = ring->last_context;
if (request->ctx)
i915_gem_context_reference(request->ctx);
@@ -2129,12 +2176,14 @@ int __i915_add_request(struct intel_ring_buffer *ring,
}
trace_i915_gem_request_add(ring, request->seqno);
- ring->outstanding_lazy_request = 0;
+ ring->outstanding_lazy_seqno = 0;
+ ring->preallocated_lazy_request = NULL;
if (!dev_priv->ums.mm_suspended) {
i915_queue_hangcheck(ring->dev);
if (was_empty) {
+ cancel_delayed_work_sync(&dev_priv->mm.idle_work);
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
@@ -2156,10 +2205,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
return;
spin_lock(&file_priv->mm.lock);
- if (request->file_priv) {
- list_del(&request->client_list);
- request->file_priv = NULL;
- }
+ list_del(&request->client_list);
+ request->file_priv = NULL;
spin_unlock(&file_priv->mm.lock);
}
@@ -2224,6 +2271,21 @@ static bool i915_request_guilty(struct drm_i915_gem_request *request,
return false;
}
+static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
+{
+ const unsigned long elapsed = get_seconds() - hs->guilty_ts;
+
+ if (hs->banned)
+ return true;
+
+ if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
+ DRM_ERROR("context hanging too fast, declaring banned!\n");
+ return true;
+ }
+
+ return false;
+}
+
static void i915_set_reset_status(struct intel_ring_buffer *ring,
struct drm_i915_gem_request *request,
u32 acthd)
@@ -2260,10 +2322,13 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
hs = &request->file_priv->hang_stats;
if (hs) {
- if (guilty)
+ if (guilty) {
+ hs->banned = i915_context_is_banned(hs);
hs->batch_active++;
- else
+ hs->guilty_ts = get_seconds();
+ } else {
hs->batch_pending++;
+ }
}
}
@@ -2341,6 +2406,8 @@ void i915_gem_reset(struct drm_device *dev)
for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_lists(dev_priv, ring);
+ i915_gem_cleanup_ringbuffer(dev);
+
i915_gem_restore_fences(dev);
}
@@ -2405,57 +2472,53 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
WARN_ON(i915_verify_lists(ring->dev));
}
-void
+bool
i915_gem_retire_requests(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
+ bool idle = true;
int i;
- for_each_ring(ring, dev_priv, i)
+ for_each_ring(ring, dev_priv, i) {
i915_gem_retire_requests_ring(ring);
+ idle &= list_empty(&ring->request_list);
+ }
+
+ if (idle)
+ mod_delayed_work(dev_priv->wq,
+ &dev_priv->mm.idle_work,
+ msecs_to_jiffies(100));
+
+ return idle;
}
static void
i915_gem_retire_work_handler(struct work_struct *work)
{
- drm_i915_private_t *dev_priv;
- struct drm_device *dev;
- struct intel_ring_buffer *ring;
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), mm.retire_work.work);
+ struct drm_device *dev = dev_priv->dev;
bool idle;
- int i;
-
- dev_priv = container_of(work, drm_i915_private_t,
- mm.retire_work.work);
- dev = dev_priv->dev;
/* Come back later if the device is busy... */
- if (!mutex_trylock(&dev->struct_mutex)) {
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
- round_jiffies_up_relative(HZ));
- return;
- }
-
- i915_gem_retire_requests(dev);
-
- /* Send a periodic flush down the ring so we don't hold onto GEM
- * objects indefinitely.
- */
- idle = true;
- for_each_ring(ring, dev_priv, i) {
- if (ring->gpu_caches_dirty)
- i915_add_request(ring, NULL);
-
- idle &= list_empty(&ring->request_list);
+ idle = false;
+ if (mutex_trylock(&dev->struct_mutex)) {
+ idle = i915_gem_retire_requests(dev);
+ mutex_unlock(&dev->struct_mutex);
}
-
- if (!dev_priv->ums.mm_suspended && !idle)
+ if (!idle)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
- if (idle)
- intel_mark_idle(dev);
+}
- mutex_unlock(&dev->struct_mutex);
+static void
+i915_gem_idle_work_handler(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), mm.idle_work.work);
+
+ intel_mark_idle(dev_priv->dev);
}
/**
@@ -2553,7 +2616,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
- ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
+ ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
if (timeout)
args->timeout_ns = timespec_to_ns(timeout);
return ret;
@@ -2600,6 +2663,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (ret)
return ret;
+ trace_i915_gem_ring_sync_to(from, to, seqno);
ret = to->sync_to(to, from, seqno);
if (!ret)
/* We use last_read_seqno because sync_to()
@@ -2641,11 +2705,17 @@ int i915_vma_unbind(struct i915_vma *vma)
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
int ret;
+ /* For now we only ever use 1 vma per object */
+ WARN_ON(!list_is_singular(&obj->vma_list));
+
if (list_empty(&vma->vma_link))
return 0;
- if (!drm_mm_node_allocated(&vma->node))
- goto destroy;
+ if (!drm_mm_node_allocated(&vma->node)) {
+ i915_gem_vma_destroy(vma);
+
+ return 0;
+ }
if (obj->pin_count)
return -EBUSY;
@@ -2685,13 +2755,10 @@ int i915_vma_unbind(struct i915_vma *vma)
drm_mm_remove_node(&vma->node);
-destroy:
i915_gem_vma_destroy(vma);
/* Since the unbound list is global, only move to that list if
- * no more VMAs exist.
- * NB: Until we have real VMAs there will only ever be one */
- WARN_ON(!list_empty(&obj->vma_list));
+ * no more VMAs exist. */
if (list_empty(&obj->vma_list))
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
@@ -3389,8 +3456,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
/* And bump the LRU for this access */
if (i915_gem_object_is_inactive(obj)) {
- struct i915_vma *vma = i915_gem_obj_to_vma(obj,
- &dev_priv->gtt.base);
+ struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
if (vma)
list_move_tail(&vma->mm_list,
&dev_priv->gtt.base.inactive_list);
@@ -3761,7 +3827,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (seqno == 0)
return 0;
- ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
+ ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@@ -3865,6 +3931,11 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
goto out;
}
+ if (obj->user_pin_count == ULONG_MAX) {
+ ret = -EBUSY;
+ goto out;
+ }
+
if (obj->user_pin_count == 0) {
ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
if (ret)
@@ -4015,7 +4086,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
{
INIT_LIST_HEAD(&obj->global_list);
INIT_LIST_HEAD(&obj->ring_list);
- INIT_LIST_HEAD(&obj->exec_list);
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
@@ -4087,13 +4157,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
return obj;
}
-int i915_gem_init_object(struct drm_gem_object *obj)
-{
- BUG();
-
- return 0;
-}
-
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
@@ -4147,9 +4210,20 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
i915_gem_object_free(obj);
}
-struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
+ struct i915_vma *vma;
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->vm == vm)
+ return vma;
+
+ return NULL;
+}
+
+static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
@@ -4169,76 +4243,103 @@ struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
return vma;
}
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+
+ vma = i915_gem_obj_to_vma(obj, vm);
+ if (!vma)
+ vma = __i915_gem_vma_create(obj, vm);
+
+ return vma;
+}
+
void i915_gem_vma_destroy(struct i915_vma *vma)
{
WARN_ON(vma->node.allocated);
+
+ /* Keep the vma as a placeholder in the execbuffer reservation lists */
+ if (!list_empty(&vma->exec_list))
+ return;
+
list_del(&vma->vma_link);
+
kfree(vma);
}
int
-i915_gem_idle(struct drm_device *dev)
+i915_gem_suspend(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ int ret = 0;
- if (dev_priv->ums.mm_suspended) {
- mutex_unlock(&dev->struct_mutex);
- return 0;
- }
+ mutex_lock(&dev->struct_mutex);
+ if (dev_priv->ums.mm_suspended)
+ goto err;
ret = i915_gpu_idle(dev);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ if (ret)
+ goto err;
+
i915_gem_retire_requests(dev);
/* Under UMS, be paranoid and evict. */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_gem_evict_everything(dev);
- del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
-
i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
- /* Cancel the retire work handler, which should be idle now. */
+ /* Hack! Don't let anybody do execbuf while we don't control the chip.
+ * We need to replace this with a semaphore, or something.
+ * And not confound ums.mm_suspended!
+ */
+ dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
+ DRIVER_MODESET);
+ mutex_unlock(&dev->struct_mutex);
+
+ del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
+ cancel_delayed_work_sync(&dev_priv->mm.idle_work);
return 0;
+
+err:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
}
-void i915_gem_l3_remap(struct drm_device *dev)
+int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
{
+ struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 misccpctl;
- int i;
-
- if (!HAS_L3_GPU_CACHE(dev))
- return;
+ u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
+ u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
+ int i, ret;
- if (!dev_priv->l3_parity.remap_info)
- return;
+ if (!HAS_L3_DPF(dev) || !remap_info)
+ return 0;
- misccpctl = I915_READ(GEN7_MISCCPCTL);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
- POSTING_READ(GEN7_MISCCPCTL);
+ ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
+ if (ret)
+ return ret;
+ /*
+ * Note: We do not worry about the concurrent register cacheline hang
+ * here because no other code should access these registers other than
+ * at initialization time.
+ */
for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
- u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
- if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
- DRM_DEBUG("0x%x was already programmed to %x\n",
- GEN7_L3LOG_BASE + i, remap);
- if (remap && !dev_priv->l3_parity.remap_info[i/4])
- DRM_DEBUG_DRIVER("Clearing remapped register\n");
- I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, reg_base + i);
+ intel_ring_emit(ring, remap_info[i/4]);
}
- /* Make sure all the writes land before disabling dop clock gating */
- POSTING_READ(GEN7_L3LOG_BASE);
+ intel_ring_advance(ring);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+ return ret;
}
void i915_gem_init_swizzling(struct drm_device *dev)
@@ -4330,7 +4431,7 @@ int
i915_gem_init_hw(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ int ret, i;
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO;
@@ -4338,20 +4439,26 @@ i915_gem_init_hw(struct drm_device *dev)
if (dev_priv->ellc_size)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
+ if (IS_HSW_GT3(dev))
+ I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED);
+ else
+ I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
+
if (HAS_PCH_NOP(dev)) {
u32 temp = I915_READ(GEN7_MSG_CTL);
temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
I915_WRITE(GEN7_MSG_CTL, temp);
}
- i915_gem_l3_remap(dev);
-
i915_gem_init_swizzling(dev);
ret = i915_gem_init_rings(dev);
if (ret)
return ret;
+ for (i = 0; i < NUM_L3_SLICES(dev); i++)
+ i915_gem_l3_remap(&dev_priv->ring[RCS], i);
+
/*
* XXX: There was some w/a described somewhere suggesting loading
* contexts before PPGTT.
@@ -4454,26 +4561,12 @@ int
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
drm_irq_uninstall(dev);
- mutex_lock(&dev->struct_mutex);
- ret = i915_gem_idle(dev);
-
- /* Hack! Don't let anybody do execbuf while we don't control the chip.
- * We need to replace this with a semaphore, or something.
- * And not confound ums.mm_suspended!
- */
- if (ret != 0)
- dev_priv->ums.mm_suspended = 1;
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
+ return i915_gem_suspend(dev);
}
void
@@ -4484,11 +4577,9 @@ i915_gem_lastclose(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- mutex_lock(&dev->struct_mutex);
- ret = i915_gem_idle(dev);
+ ret = i915_gem_suspend(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
- mutex_unlock(&dev->struct_mutex);
}
static void
@@ -4523,6 +4614,7 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->vm_list);
i915_init_vm(dev_priv, &dev_priv->gtt.base);
+ INIT_LIST_HEAD(&dev_priv->context_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4532,6 +4624,8 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
+ INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
+ i915_gem_idle_work_handler);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
@@ -4582,7 +4676,7 @@ static int i915_gem_init_phys_object(struct drm_device *dev,
if (dev_priv->mm.phys_objs[id - 1] || !size)
return 0;
- phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
+ phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
if (!phys_obj)
return -ENOMEM;
@@ -4756,6 +4850,8 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
+ cancel_delayed_work_sync(&file_priv->mm.idle_work);
+
/* Clean up our request list when the client is going away, so that
* later retire_requests won't dereference our soon-to-be-gone
* file_priv.
@@ -4773,6 +4869,38 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
spin_unlock(&file_priv->mm.lock);
}
+static void
+i915_gem_file_idle_work_handler(struct work_struct *work)
+{
+ struct drm_i915_file_private *file_priv =
+ container_of(work, typeof(*file_priv), mm.idle_work.work);
+
+ atomic_set(&file_priv->rps_wait_boost, false);
+}
+
+int i915_gem_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv)
+ return -ENOMEM;
+
+ file->driver_priv = file_priv;
+ file_priv->dev_priv = dev->dev_private;
+
+ spin_lock_init(&file_priv->mm.lock);
+ INIT_LIST_HEAD(&file_priv->mm.request_list);
+ INIT_DELAYED_WORK(&file_priv->mm.idle_work,
+ i915_gem_file_idle_work_handler);
+
+ idr_init(&file_priv->context_idr);
+
+ return 0;
+}
+
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
{
if (!mutex_is_locked(mutex))
@@ -4823,6 +4951,7 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
if (unlock)
mutex_unlock(&dev->struct_mutex);
+
return count;
}
@@ -4859,11 +4988,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
{
- struct drm_i915_private *dev_priv = o->base.dev->dev_private;
- struct i915_address_space *vm;
+ struct i915_vma *vma;
- list_for_each_entry(vm, &dev_priv->vm_list, global_link)
- if (i915_gem_obj_bound(o, vm))
+ list_for_each_entry(vma, &o->vma_list, vma_link)
+ if (drm_mm_node_allocated(&vma->node))
return true;
return false;
@@ -4895,7 +5023,6 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
struct drm_i915_private,
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
- int nr_to_scan = sc->nr_to_scan;
unsigned long freed;
bool unlock = true;
@@ -4909,38 +5036,30 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
unlock = false;
}
- freed = i915_gem_purge(dev_priv, nr_to_scan);
- if (freed < nr_to_scan)
- freed += __i915_gem_shrink(dev_priv, nr_to_scan,
- false);
- if (freed < nr_to_scan)
+ freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
+ if (freed < sc->nr_to_scan)
+ freed += __i915_gem_shrink(dev_priv,
+ sc->nr_to_scan - freed,
+ false);
+ if (freed < sc->nr_to_scan)
freed += i915_gem_shrink_all(dev_priv);
if (unlock)
mutex_unlock(&dev->struct_mutex);
+
return freed;
}
-struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
+struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->vm == vm)
- return vma;
- return NULL;
-}
-
-struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
-{
- struct i915_vma *vma;
+ if (WARN_ON(list_empty(&obj->vma_list)))
+ return NULL;
- vma = i915_gem_obj_to_vma(obj, vm);
- if (!vma)
- vma = i915_gem_vma_create(obj, vm);
+ vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
+ if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
+ return NULL;
return vma;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 403309c2a7d6..cc619c138777 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -73,7 +73,7 @@
*
* There are two confusing terms used above:
* The "current context" means the context which is currently running on the
- * GPU. The GPU has loaded it's state already and has stored away the gtt
+ * GPU. The GPU has loaded its state already and has stored away the gtt
* offset of the BO. The GPU is not actively referencing the data at this
* offset, but it will on the next context switch. The only way to avoid this
* is to do a GPU reset.
@@ -129,6 +129,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
struct i915_hw_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
+ list_del(&ctx->link);
drm_gem_object_unreference(&ctx->obj->base);
kfree(ctx);
}
@@ -147,6 +148,7 @@ create_hw_context(struct drm_device *dev,
kref_init(&ctx->ref);
ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
+ INIT_LIST_HEAD(&ctx->link);
if (ctx->obj == NULL) {
kfree(ctx);
DRM_DEBUG_DRIVER("Context object allocated failed\n");
@@ -166,6 +168,7 @@ create_hw_context(struct drm_device *dev,
* assertion in the context switch code.
*/
ctx->ring = &dev_priv->ring[RCS];
+ list_add_tail(&ctx->link, &dev_priv->context_list);
/* Default context will never have a file_priv */
if (file_priv == NULL)
@@ -178,6 +181,10 @@ create_hw_context(struct drm_device *dev,
ctx->file_priv = file_priv;
ctx->id = ret;
+ /* NB: Mark all slices as needing a remap so that when the context first
+ * loads it will restore whatever remap state already exists. If there
+ * is no remap info, it will be a NOP. */
+ ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
return ctx;
@@ -213,7 +220,6 @@ static int create_default_context(struct drm_i915_private *dev_priv)
* may not be available. To avoid this we always pin the
* default context.
*/
- dev_priv->ring[RCS].default_context = ctx;
ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
@@ -226,6 +232,8 @@ static int create_default_context(struct drm_i915_private *dev_priv)
goto err_unpin;
}
+ dev_priv->ring[RCS].default_context = ctx;
+
DRM_DEBUG_DRIVER("Default HW context loaded\n");
return 0;
@@ -281,16 +289,24 @@ void i915_gem_context_fini(struct drm_device *dev)
* other code, leading to spurious errors. */
intel_gpu_reset(dev);
- i915_gem_object_unpin(dctx->obj);
-
/* When default context is created and switched to, base object refcount
* will be 2 (+1 from object creation and +1 from do_switch()).
* i915_gem_context_fini() will be called after gpu_idle() has switched
* to default context. So we need to unreference the base object once
* to offset the do_switch part, so that i915_gem_context_unreference()
* can then free the base object correctly. */
- drm_gem_object_unreference(&dctx->obj->base);
+ WARN_ON(!dev_priv->ring[RCS].last_context);
+ if (dev_priv->ring[RCS].last_context == dctx) {
+ /* Fake switch to NULL context */
+ WARN_ON(dctx->obj->active);
+ i915_gem_object_unpin(dctx->obj);
+ i915_gem_context_unreference(dctx);
+ }
+
+ i915_gem_object_unpin(dctx->obj);
i915_gem_context_unreference(dctx);
+ dev_priv->ring[RCS].default_context = NULL;
+ dev_priv->ring[RCS].last_context = NULL;
}
static int context_idr_cleanup(int id, void *p, void *data)
@@ -393,11 +409,11 @@ static int do_switch(struct i915_hw_context *to)
struct intel_ring_buffer *ring = to->ring;
struct i915_hw_context *from = ring->last_context;
u32 hw_flags = 0;
- int ret;
+ int ret, i;
BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
- if (from == to)
+ if (from == to && !to->remap_slice)
return 0;
ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
@@ -420,8 +436,6 @@ static int do_switch(struct i915_hw_context *to)
if (!to->is_initialized || is_default_context(to))
hw_flags |= MI_RESTORE_INHIBIT;
- else if (WARN_ON_ONCE(from == to)) /* not yet expected */
- hw_flags |= MI_FORCE_RESTORE;
ret = mi_set_context(ring, to, hw_flags);
if (ret) {
@@ -429,6 +443,18 @@ static int do_switch(struct i915_hw_context *to)
return ret;
}
+ for (i = 0; i < MAX_L3_SLICES; i++) {
+ if (!(to->remap_slice & (1<<i)))
+ continue;
+
+ ret = i915_gem_l3_remap(ring, i);
+ /* If it failed, try again next round */
+ if (ret)
+ DRM_DEBUG_DRIVER("L3 remapping failed\n");
+ else
+ to->remap_slice &= ~(1<<i);
+ }
+
/* The backing object for the context is done after switching to the
* *next* context. Therefore we cannot retire the previous context until
* the next context has already started running. In fact, the below code
@@ -436,11 +462,8 @@ static int do_switch(struct i915_hw_context *to)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
- struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
- struct i915_address_space *ggtt = &dev_priv->gtt.base;
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
- i915_gem_object_move_to_active(from->obj, ring);
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
@@ -451,17 +474,7 @@ static int do_switch(struct i915_hw_context *to)
from->obj->dirty = 1;
BUG_ON(from->obj->ring != ring);
- ret = i915_add_request(ring, NULL);
- if (ret) {
- /* Too late, we've already scheduled a context switch.
- * Try to undo the change so that the hw state is
- * consistent with out tracking. In case of emergency,
- * scream.
- */
- WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
- return ret;
- }
-
+ /* obj is kept alive until the next request by its active ref */
i915_gem_object_unpin(from->obj);
i915_gem_context_unreference(from);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 91b700155850..b7376533633d 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -37,6 +37,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
if (vma->obj->pin_count)
return false;
+ if (WARN_ON(!list_empty(&vma->exec_list)))
+ return false;
+
list_add(&vma->exec_list, unwind);
return drm_mm_scan_add_block(&vma->node);
}
@@ -113,7 +116,7 @@ none:
}
/* We expect the caller to unpin, evict all and try again, or give up.
- * So calling i915_gem_evict_everything() is unnecessary.
+ * So calling i915_gem_evict_vm() is unnecessary.
*/
return -ENOSPC;
@@ -152,12 +155,48 @@ found:
return ret;
}
+/**
+ * i915_gem_evict_vm - Try to free up VM space
+ *
+ * @vm: Address space to evict from
+ * @do_idle: Boolean directing whether to idle first.
+ *
+ * VM eviction is about freeing up virtual address space. If one wants fine
+ * grained eviction, they should see evict something for more details. In terms
+ * of freeing up actual system memory, this function may not accomplish the
+ * desired result. An object may be shared in multiple address space, and this
+ * function will not assert those objects be freed.
+ *
+ * Using do_idle will result in a more complete eviction because it retires, and
+ * inactivates current BOs.
+ */
+int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
+{
+ struct i915_vma *vma, *next;
+ int ret;
+
+ trace_i915_gem_evict_vm(vm);
+
+ if (do_idle) {
+ ret = i915_gpu_idle(vm->dev);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests(vm->dev);
+ }
+
+ list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
+ if (vma->obj->pin_count == 0)
+ WARN_ON(i915_vma_unbind(vma));
+
+ return 0;
+}
+
int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_address_space *vm;
- struct i915_vma *vma, *next;
bool lists_empty = true;
int ret;
@@ -184,11 +223,8 @@ i915_gem_evict_everything(struct drm_device *dev)
i915_gem_retire_requests(dev);
/* Having flushed everything, unbind() should never raise an error */
- list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
- list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
- if (vma->obj->pin_count == 0)
- WARN_ON(i915_vma_unbind(vma));
- }
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ WARN_ON(i915_gem_evict_vm(vm, false));
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index bf345777ae9f..0ce0d47e4b0f 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,35 +33,35 @@
#include "intel_drv.h"
#include <linux/dma_remapping.h>
-struct eb_objects {
- struct list_head objects;
+struct eb_vmas {
+ struct list_head vmas;
int and;
union {
- struct drm_i915_gem_object *lut[0];
+ struct i915_vma *lut[0];
struct hlist_head buckets[0];
};
};
-static struct eb_objects *
-eb_create(struct drm_i915_gem_execbuffer2 *args)
+static struct eb_vmas *
+eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
{
- struct eb_objects *eb = NULL;
+ struct eb_vmas *eb = NULL;
if (args->flags & I915_EXEC_HANDLE_LUT) {
- int size = args->buffer_count;
- size *= sizeof(struct drm_i915_gem_object *);
- size += sizeof(struct eb_objects);
+ unsigned size = args->buffer_count;
+ size *= sizeof(struct i915_vma *);
+ size += sizeof(struct eb_vmas);
eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
}
if (eb == NULL) {
- int size = args->buffer_count;
- int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+ unsigned size = args->buffer_count;
+ unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
while (count > 2*size)
count >>= 1;
eb = kzalloc(count*sizeof(struct hlist_head) +
- sizeof(struct eb_objects),
+ sizeof(struct eb_vmas),
GFP_TEMPORARY);
if (eb == NULL)
return eb;
@@ -70,64 +70,102 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
} else
eb->and = -args->buffer_count;
- INIT_LIST_HEAD(&eb->objects);
+ INIT_LIST_HEAD(&eb->vmas);
return eb;
}
static void
-eb_reset(struct eb_objects *eb)
+eb_reset(struct eb_vmas *eb)
{
if (eb->and >= 0)
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
}
static int
-eb_lookup_objects(struct eb_objects *eb,
- struct drm_i915_gem_exec_object2 *exec,
- const struct drm_i915_gem_execbuffer2 *args,
- struct drm_file *file)
+eb_lookup_vmas(struct eb_vmas *eb,
+ struct drm_i915_gem_exec_object2 *exec,
+ const struct drm_i915_gem_execbuffer2 *args,
+ struct i915_address_space *vm,
+ struct drm_file *file)
{
- int i;
+ struct drm_i915_gem_object *obj;
+ struct list_head objects;
+ int i, ret = 0;
+ INIT_LIST_HEAD(&objects);
spin_lock(&file->table_lock);
+ /* Grab a reference to the object and release the lock so we can lookup
+ * or create the VMA without using GFP_ATOMIC */
for (i = 0; i < args->buffer_count; i++) {
- struct drm_i915_gem_object *obj;
-
obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
if (obj == NULL) {
spin_unlock(&file->table_lock);
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
- return -ENOENT;
+ ret = -ENOENT;
+ goto out;
}
- if (!list_empty(&obj->exec_list)) {
+ if (!list_empty(&obj->obj_exec_link)) {
spin_unlock(&file->table_lock);
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
drm_gem_object_reference(&obj->base);
- list_add_tail(&obj->exec_list, &eb->objects);
+ list_add_tail(&obj->obj_exec_link, &objects);
+ }
+ spin_unlock(&file->table_lock);
+
+ i = 0;
+ list_for_each_entry(obj, &objects, obj_exec_link) {
+ struct i915_vma *vma;
- obj->exec_entry = &exec[i];
+ /*
+ * NOTE: We can leak any vmas created here when something fails
+ * later on. But that's no issue since vma_unbind can deal with
+ * vmas which are not actually bound. And since only
+ * lookup_or_create exists as an interface to get at the vma
+ * from the (obj, vm) we don't run the risk of creating
+ * duplicated vmas for the same vm.
+ */
+ vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+ if (IS_ERR(vma)) {
+ DRM_DEBUG("Failed to lookup VMA\n");
+ ret = PTR_ERR(vma);
+ goto out;
+ }
+
+ list_add_tail(&vma->exec_list, &eb->vmas);
+
+ vma->exec_entry = &exec[i];
if (eb->and < 0) {
- eb->lut[i] = obj;
+ eb->lut[i] = vma;
} else {
uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
- obj->exec_handle = handle;
- hlist_add_head(&obj->exec_node,
+ vma->exec_handle = handle;
+ hlist_add_head(&vma->exec_node,
&eb->buckets[handle & eb->and]);
}
+ ++i;
}
- spin_unlock(&file->table_lock);
- return 0;
+
+out:
+ while (!list_empty(&objects)) {
+ obj = list_first_entry(&objects,
+ struct drm_i915_gem_object,
+ obj_exec_link);
+ list_del_init(&obj->obj_exec_link);
+ if (ret)
+ drm_gem_object_unreference(&obj->base);
+ }
+ return ret;
}
-static struct drm_i915_gem_object *
-eb_get_object(struct eb_objects *eb, unsigned long handle)
+static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
{
if (eb->and < 0) {
if (handle >= -eb->and)
@@ -139,34 +177,33 @@ eb_get_object(struct eb_objects *eb, unsigned long handle)
head = &eb->buckets[handle & eb->and];
hlist_for_each(node, head) {
- struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
- obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
- if (obj->exec_handle == handle)
- return obj;
+ vma = hlist_entry(node, struct i915_vma, exec_node);
+ if (vma->exec_handle == handle)
+ return vma;
}
return NULL;
}
}
-static void
-eb_destroy(struct eb_objects *eb)
-{
- while (!list_empty(&eb->objects)) {
- struct drm_i915_gem_object *obj;
+static void eb_destroy(struct eb_vmas *eb) {
+ while (!list_empty(&eb->vmas)) {
+ struct i915_vma *vma;
- obj = list_first_entry(&eb->objects,
- struct drm_i915_gem_object,
+ vma = list_first_entry(&eb->vmas,
+ struct i915_vma,
exec_list);
- list_del_init(&obj->exec_list);
- drm_gem_object_unreference(&obj->base);
+ list_del_init(&vma->exec_list);
+ drm_gem_object_unreference(&vma->obj->base);
}
kfree(eb);
}
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
- return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+ return (HAS_LLC(obj->base.dev) ||
+ obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
!obj->map_and_fenceable ||
obj->cache_level != I915_CACHE_NONE);
}
@@ -179,7 +216,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
char *vaddr;
int ret = -EINVAL;
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (ret)
return ret;
@@ -223,22 +260,24 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
- struct eb_objects *eb,
+ struct eb_vmas *eb,
struct drm_i915_gem_relocation_entry *reloc,
struct i915_address_space *vm)
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
struct drm_i915_gem_object *target_i915_obj;
+ struct i915_vma *target_vma;
uint32_t target_offset;
int ret = -EINVAL;
/* we've already hold a reference to all valid objects */
- target_obj = &eb_get_object(eb, reloc->target_handle)->base;
- if (unlikely(target_obj == NULL))
+ target_vma = eb_get_vma(eb, reloc->target_handle);
+ if (unlikely(target_vma == NULL))
return -ENOENT;
+ target_i915_obj = target_vma->obj;
+ target_obj = &target_vma->obj->base;
- target_i915_obj = to_intel_bo(target_obj);
target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
@@ -320,14 +359,13 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
}
static int
-i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
- struct eb_objects *eb,
- struct i915_address_space *vm)
+i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
+ struct eb_vmas *eb)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
struct drm_i915_gem_relocation_entry __user *user_relocs;
- struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
int remain, ret;
user_relocs = to_user_ptr(entry->relocs_ptr);
@@ -346,8 +384,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
do {
u64 offset = r->presumed_offset;
- ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
- vm);
+ ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
+ vma->vm);
if (ret)
return ret;
@@ -368,17 +406,16 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
}
static int
-i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
- struct eb_objects *eb,
- struct drm_i915_gem_relocation_entry *relocs,
- struct i915_address_space *vm)
+i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
+ struct eb_vmas *eb,
+ struct drm_i915_gem_relocation_entry *relocs)
{
- const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
int i, ret;
for (i = 0; i < entry->relocation_count; i++) {
- ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
- vm);
+ ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
+ vma->vm);
if (ret)
return ret;
}
@@ -387,10 +424,10 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
}
static int
-i915_gem_execbuffer_relocate(struct eb_objects *eb,
+i915_gem_execbuffer_relocate(struct eb_vmas *eb,
struct i915_address_space *vm)
{
- struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
int ret = 0;
/* This is the fast path and we cannot handle a pagefault whilst
@@ -401,8 +438,8 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
* lockdep complains vehemently.
*/
pagefault_disable();
- list_for_each_entry(obj, &eb->objects, exec_list) {
- ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
+ list_for_each_entry(vma, &eb->vmas, exec_list) {
+ ret = i915_gem_execbuffer_relocate_vma(vma, eb);
if (ret)
break;
}
@@ -415,31 +452,32 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
static int
-need_reloc_mappable(struct drm_i915_gem_object *obj)
+need_reloc_mappable(struct i915_vma *vma)
{
- struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
- return entry->relocation_count && !use_cpu_reloc(obj);
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+ return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
+ i915_is_ggtt(vma->vm);
}
static int
-i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring,
- struct i915_address_space *vm,
- bool *need_reloc)
+i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
+ struct intel_ring_buffer *ring,
+ bool *need_reloc)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence, need_mappable;
+ struct drm_i915_gem_object *obj = vma->obj;
int ret;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable = need_fence || need_reloc_mappable(obj);
+ need_mappable = need_fence || need_reloc_mappable(vma);
- ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
+ ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
false);
if (ret)
return ret;
@@ -467,8 +505,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->has_aliasing_ppgtt_mapping = 1;
}
- if (entry->offset != i915_gem_obj_offset(obj, vm)) {
- entry->offset = i915_gem_obj_offset(obj, vm);
+ if (entry->offset != vma->node.start) {
+ entry->offset = vma->node.start;
*need_reloc = true;
}
@@ -485,14 +523,15 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
}
static void
-i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
+i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry;
+ struct drm_i915_gem_object *obj = vma->obj;
- if (!i915_gem_obj_bound_any(obj))
+ if (!drm_mm_node_allocated(&vma->node))
return;
- entry = obj->exec_entry;
+ entry = vma->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
i915_gem_object_unpin_fence(obj);
@@ -505,41 +544,46 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
- struct list_head *objects,
- struct i915_address_space *vm,
+ struct list_head *vmas,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
- struct list_head ordered_objects;
+ struct i915_vma *vma;
+ struct i915_address_space *vm;
+ struct list_head ordered_vmas;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
int retry;
- INIT_LIST_HEAD(&ordered_objects);
- while (!list_empty(objects)) {
+ if (list_empty(vmas))
+ return 0;
+
+ vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
+
+ INIT_LIST_HEAD(&ordered_vmas);
+ while (!list_empty(vmas)) {
struct drm_i915_gem_exec_object2 *entry;
bool need_fence, need_mappable;
- obj = list_first_entry(objects,
- struct drm_i915_gem_object,
- exec_list);
- entry = obj->exec_entry;
+ vma = list_first_entry(vmas, struct i915_vma, exec_list);
+ obj = vma->obj;
+ entry = vma->exec_entry;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable = need_fence || need_reloc_mappable(obj);
+ need_mappable = need_fence || need_reloc_mappable(vma);
if (need_mappable)
- list_move(&obj->exec_list, &ordered_objects);
+ list_move(&vma->exec_list, &ordered_vmas);
else
- list_move_tail(&obj->exec_list, &ordered_objects);
+ list_move_tail(&vma->exec_list, &ordered_vmas);
obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
obj->base.pending_write_domain = 0;
obj->pending_fenced_gpu_access = false;
}
- list_splice(&ordered_objects, objects);
+ list_splice(&ordered_vmas, vmas);
/* Attempt to pin all of the buffers into the GTT.
* This is done in 3 phases:
@@ -558,52 +602,52 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
int ret = 0;
/* Unbind any ill-fitting objects or pin. */
- list_for_each_entry(obj, objects, exec_list) {
- struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ list_for_each_entry(vma, vmas, exec_list) {
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
bool need_fence, need_mappable;
- u32 obj_offset;
- if (!i915_gem_obj_bound(obj, vm))
+ obj = vma->obj;
+
+ if (!drm_mm_node_allocated(&vma->node))
continue;
- obj_offset = i915_gem_obj_offset(obj, vm);
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable = need_fence || need_reloc_mappable(obj);
+ need_mappable = need_fence || need_reloc_mappable(vma);
WARN_ON((need_mappable || need_fence) &&
- !i915_is_ggtt(vm));
+ !i915_is_ggtt(vma->vm));
if ((entry->alignment &&
- obj_offset & (entry->alignment - 1)) ||
+ vma->node.start & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
- ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
+ ret = i915_vma_unbind(vma);
else
- ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
+ ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
if (ret)
goto err;
}
/* Bind fresh objects */
- list_for_each_entry(obj, objects, exec_list) {
- if (i915_gem_obj_bound(obj, vm))
+ list_for_each_entry(vma, vmas, exec_list) {
+ if (drm_mm_node_allocated(&vma->node))
continue;
- ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
+ ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
if (ret)
goto err;
}
err: /* Decrement pin count for bound objects */
- list_for_each_entry(obj, objects, exec_list)
- i915_gem_execbuffer_unreserve_object(obj);
+ list_for_each_entry(vma, vmas, exec_list)
+ i915_gem_execbuffer_unreserve_vma(vma);
if (ret != -ENOSPC || retry++)
return ret;
- ret = i915_gem_evict_everything(ring->dev);
+ ret = i915_gem_evict_vm(vm, true);
if (ret)
return ret;
} while (1);
@@ -614,24 +658,27 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file,
struct intel_ring_buffer *ring,
- struct eb_objects *eb,
- struct drm_i915_gem_exec_object2 *exec,
- struct i915_address_space *vm)
+ struct eb_vmas *eb,
+ struct drm_i915_gem_exec_object2 *exec)
{
struct drm_i915_gem_relocation_entry *reloc;
- struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
+ struct i915_vma *vma;
bool need_relocs;
int *reloc_offset;
int i, total, ret;
- int count = args->buffer_count;
+ unsigned count = args->buffer_count;
+
+ if (WARN_ON(list_empty(&eb->vmas)))
+ return 0;
+
+ vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
/* We may process another execbuffer during the unlock... */
- while (!list_empty(&eb->objects)) {
- obj = list_first_entry(&eb->objects,
- struct drm_i915_gem_object,
- exec_list);
- list_del_init(&obj->exec_list);
- drm_gem_object_unreference(&obj->base);
+ while (!list_empty(&eb->vmas)) {
+ vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
+ list_del_init(&vma->exec_list);
+ drm_gem_object_unreference(&vma->obj->base);
}
mutex_unlock(&dev->struct_mutex);
@@ -695,20 +742,19 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
/* reacquire the objects */
eb_reset(eb);
- ret = eb_lookup_objects(eb, exec, args, file);
+ ret = eb_lookup_vmas(eb, exec, args, vm, file);
if (ret)
goto err;
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
if (ret)
goto err;
- list_for_each_entry(obj, &eb->objects, exec_list) {
- int offset = obj->exec_entry - exec;
- ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
- reloc + reloc_offset[offset],
- vm);
+ list_for_each_entry(vma, &eb->vmas, exec_list) {
+ int offset = vma->exec_entry - exec;
+ ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
+ reloc + reloc_offset[offset]);
if (ret)
goto err;
}
@@ -727,14 +773,15 @@ err:
static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
- struct list_head *objects)
+ struct list_head *vmas)
{
- struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
int ret;
- list_for_each_entry(obj, objects, exec_list) {
+ list_for_each_entry(vma, vmas, exec_list) {
+ struct drm_i915_gem_object *obj = vma->obj;
ret = i915_gem_object_sync(obj, ring);
if (ret)
return ret;
@@ -771,8 +818,8 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
int count)
{
int i;
- int relocs_total = 0;
- int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
+ unsigned relocs_total = 0;
+ unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
for (i = 0; i < count; i++) {
char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
@@ -809,13 +856,13 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
}
static void
-i915_gem_execbuffer_move_to_active(struct list_head *objects,
- struct i915_address_space *vm,
+i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct intel_ring_buffer *ring)
{
- struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
- list_for_each_entry(obj, objects, exec_list) {
+ list_for_each_entry(vma, vmas, exec_list) {
+ struct drm_i915_gem_object *obj = vma->obj;
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
@@ -825,9 +872,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
- /* FIXME: This lookup gets fixed later <-- danvet */
- list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
- i915_gem_object_move_to_active(obj, ring);
+ i915_vma_move_to_active(vma, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
@@ -885,10 +930,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct i915_address_space *vm)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct eb_objects *eb;
+ struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
+ struct i915_ctx_hang_stats *hs;
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
u32 mask, flags;
@@ -1000,7 +1046,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
- cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
+ cliprects = kcalloc(args->num_cliprects,
+ sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
@@ -1025,7 +1072,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto pre_mutex_err;
}
- eb = eb_create(args);
+ eb = eb_create(args, vm);
if (eb == NULL) {
mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM;
@@ -1033,18 +1080,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
/* Look up object handles */
- ret = eb_lookup_objects(eb, exec, args, file);
+ ret = eb_lookup_vmas(eb, exec, args, vm, file);
if (ret)
goto err;
/* take note of the batch buffer before we might reorder the lists */
- batch_obj = list_entry(eb->objects.prev,
- struct drm_i915_gem_object,
- exec_list);
+ batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
if (ret)
goto err;
@@ -1054,7 +1099,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
- eb, exec, vm);
+ eb, exec);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
@@ -1076,10 +1121,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
- ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
+ ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
if (ret)
goto err;
+ hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
+ if (IS_ERR(hs)) {
+ ret = PTR_ERR(hs);
+ goto err;
+ }
+
+ if (hs->banned) {
+ ret = -EIO;
+ goto err;
+ }
+
ret = i915_switch_context(ring, file, ctx_id);
if (ret)
goto err;
@@ -1131,7 +1187,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
- i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
+ i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
err:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 212f6d8c35ec..c4c42e7cbd7b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -58,9 +58,10 @@
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level)
+ enum i915_cache_level level,
+ bool valid)
{
- gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+ gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -79,9 +80,10 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
}
static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level)
+ enum i915_cache_level level,
+ bool valid)
{
- gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+ gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -105,9 +107,10 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
- enum i915_cache_level level)
+ enum i915_cache_level level,
+ bool valid)
{
- gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+ gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
/* Mark the page as writeable. Other platforms don't have a
@@ -122,9 +125,10 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
}
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
- enum i915_cache_level level)
+ enum i915_cache_level level,
+ bool valid)
{
- gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+ gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
if (level != I915_CACHE_NONE)
@@ -134,9 +138,10 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
}
static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
- enum i915_cache_level level)
+ enum i915_cache_level level,
+ bool valid)
{
- gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+ gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -236,7 +241,8 @@ static int gen6_ppgtt_enable(struct drm_device *dev)
/* PPGTT support for Sandybdrige/Gen6 and later */
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_entry,
- unsigned num_entries)
+ unsigned num_entries,
+ bool use_scratch)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
@@ -245,7 +251,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
while (num_entries) {
last_pte = first_pte + num_entries;
@@ -282,7 +288,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
dma_addr_t page_addr;
page_addr = sg_page_iter_dma_address(&sg_iter);
- pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level);
+ pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
kunmap_atomic(pt_vaddr);
act_pt++;
@@ -336,7 +342,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
- ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
+ ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
GFP_KERNEL);
if (!ppgtt->pt_pages)
return -ENOMEM;
@@ -347,7 +353,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
goto err_pt_alloc;
}
- ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
+ ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
GFP_KERNEL);
if (!ppgtt->pt_dma_addr)
goto err_pt_alloc;
@@ -367,7 +373,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
}
ppgtt->base.clear_range(&ppgtt->base, 0,
- ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES);
+ ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
@@ -444,7 +450,8 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
{
ppgtt->base.clear_range(&ppgtt->base,
i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
+ obj->base.size >> PAGE_SHIFT,
+ true);
}
extern int intel_iommu_gfx_mapped;
@@ -485,15 +492,65 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
dev_priv->mm.interruptible = interruptible;
}
+void i915_check_and_clear_faults(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int i;
+
+ if (INTEL_INFO(dev)->gen < 6)
+ return;
+
+ for_each_ring(ring, dev_priv, i) {
+ u32 fault_reg;
+ fault_reg = I915_READ(RING_FAULT_REG(ring));
+ if (fault_reg & RING_FAULT_VALID) {
+ DRM_DEBUG_DRIVER("Unexpected fault\n"
+ "\tAddr: 0x%08lx\\n"
+ "\tAddress space: %s\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ fault_reg & PAGE_MASK,
+ fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
+ RING_FAULT_SRCID(fault_reg),
+ RING_FAULT_FAULT_TYPE(fault_reg));
+ I915_WRITE(RING_FAULT_REG(ring),
+ fault_reg & ~RING_FAULT_VALID);
+ }
+ }
+ POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
+}
+
+void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Don't bother messing with faults pre GEN6 as we have little
+ * documentation supporting that it's a good idea.
+ */
+ if (INTEL_INFO(dev)->gen < 6)
+ return;
+
+ i915_check_and_clear_faults(dev);
+
+ dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+ dev_priv->gtt.base.start / PAGE_SIZE,
+ dev_priv->gtt.base.total / PAGE_SIZE,
+ false);
+}
+
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
+ i915_check_and_clear_faults(dev);
+
/* First fill our portion of the GTT with scratch pages */
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
dev_priv->gtt.base.start / PAGE_SIZE,
- dev_priv->gtt.base.total / PAGE_SIZE);
+ dev_priv->gtt.base.total / PAGE_SIZE,
+ true);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
i915_gem_clflush_object(obj, obj->pin_display);
@@ -536,7 +593,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter);
- iowrite32(vm->pte_encode(addr, level), &gtt_entries[i]);
+ iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
i++;
}
@@ -548,7 +605,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
*/
if (i != 0)
WARN_ON(readl(&gtt_entries[i-1]) !=
- vm->pte_encode(addr, level));
+ vm->pte_encode(addr, level, true));
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -560,7 +617,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
- unsigned int num_entries)
+ unsigned int num_entries,
+ bool use_scratch)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
@@ -573,7 +631,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
+
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
@@ -594,7 +653,8 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
static void i915_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
- unsigned int num_entries)
+ unsigned int num_entries,
+ bool unused)
{
intel_gtt_clear_range(first_entry, num_entries);
}
@@ -622,7 +682,8 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
entry,
- obj->base.size >> PAGE_SHIFT);
+ obj->base.size >> PAGE_SHIFT,
+ true);
obj->has_global_gtt_mapping = 0;
}
@@ -709,11 +770,11 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
- ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count);
+ ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
}
/* And finally clear the reserved guard page */
- ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1);
+ ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
}
static bool
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index e15a1d90037d..d284d892ed94 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -395,7 +395,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
- vma = i915_gem_vma_create(obj, ggtt);
+ vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_out;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 032e9ef9c896..b13905348048 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -308,7 +308,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (obj->pin_count) {
+ if (obj->pin_count || obj->framebuffer_references) {
drm_gem_object_unreference_unlocked(&obj->base);
return -EBUSY;
}
@@ -393,7 +393,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
if (obj->bit_17 == NULL) {
- obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) *
+ obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
sizeof(long), GFP_KERNEL);
}
} else {
@@ -504,8 +504,8 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
int i;
if (obj->bit_17 == NULL) {
- obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
- sizeof(long), GFP_KERNEL);
+ obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
+ sizeof(long), GFP_KERNEL);
if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n");
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index dae364f0028c..a8bb213da79f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -215,6 +215,24 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
}
}
+static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
+{
+ switch (a) {
+ case HANGCHECK_IDLE:
+ return "idle";
+ case HANGCHECK_WAIT:
+ return "wait";
+ case HANGCHECK_ACTIVE:
+ return "active";
+ case HANGCHECK_KICK:
+ return "kick";
+ case HANGCHECK_HUNG:
+ return "hung";
+ }
+
+ return "unknown";
+}
+
static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
struct drm_device *dev,
struct drm_i915_error_state *error,
@@ -231,7 +249,8 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
-
+ if (INTEL_INFO(dev)->gen >= 4)
+ err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]);
if (INTEL_INFO(dev)->gen >= 4)
err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
@@ -255,6 +274,9 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+ err_printf(m, " hangcheck: %s [%d]\n",
+ hangcheck_action_to_str(error->hangcheck_action[ring]),
+ error->hangcheck_score[ring]);
}
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -283,13 +305,14 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
err_printf(m, "Kernel: " UTS_RELEASE "\n");
- err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
+ err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier);
err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
err_printf(m, "CCID: 0x%08x\n", error->ccid);
+ err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
for (i = 0; i < dev_priv->num_fence_regs; i++)
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
@@ -703,6 +726,7 @@ static void i915_record_ring_state(struct drm_device *dev,
error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
if (ring->id == RCS)
error->bbaddr = I915_READ64(BB_ADDR);
+ error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
} else {
error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
error->ipeir[ring->id] = I915_READ(IPEIR);
@@ -720,6 +744,9 @@ static void i915_record_ring_state(struct drm_device *dev,
error->cpu_ring_head[ring->id] = ring->head;
error->cpu_ring_tail[ring->id] = ring->tail;
+
+ error->hangcheck_score[ring->id] = ring->hangcheck.score;
+ error->hangcheck_action[ring->id] = ring->hangcheck.action;
}
@@ -769,7 +796,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
error->ring[i].num_requests = count;
error->ring[i].requests =
- kmalloc(count*sizeof(struct drm_i915_error_request),
+ kcalloc(count, sizeof(*error->ring[i].requests),
GFP_ATOMIC);
if (error->ring[i].requests == NULL) {
error->ring[i].num_requests = 0;
@@ -811,7 +838,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
if (i) {
- active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC);
+ active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
if (active_bo)
pinned_bo = active_bo + error->active_bo_count[ndx];
}
@@ -885,8 +912,12 @@ void i915_capture_error_state(struct drm_device *dev)
return;
}
- DRM_INFO("capturing error event; look for more information in "
- "/sys/class/drm/card%d/error\n", dev->primary->index);
+ DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
+ dev->primary->index);
+ DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
+ DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
+ DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
+ DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
kref_init(&error->ref);
error->eir = I915_READ(EIR);
@@ -988,6 +1019,7 @@ const char *i915_cache_level_str(int type)
case I915_CACHE_NONE: return " uncached";
case I915_CACHE_LLC: return " snooped or LLC";
case I915_CACHE_L3_LLC: return " L3+LLC";
+ case I915_CACHE_WT: return " WT";
default: return "";
}
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4b91228fd9bd..2a44816ae9d5 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -30,6 +30,7 @@
#include <linux/sysrq.h>
#include <linux/slab.h>
+#include <linux/circ_buf.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -441,7 +442,7 @@ done:
void
-i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
{
u32 reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0x7fff0000;
@@ -458,7 +459,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
}
void
-i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
{
u32 reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0x7fff0000;
@@ -486,9 +487,10 @@ static void i915_enable_asle_pipestat(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
+ i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
if (INTEL_INFO(dev)->gen >= 4)
- i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
+ i915_enable_pipestat(dev_priv, PIPE_A,
+ PIPE_LEGACY_BLC_EVENT_ENABLE);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -518,6 +520,12 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
}
}
+static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+ /* Gen2 doesn't have a hardware frame counter */
+ return 0;
+}
+
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
@@ -526,7 +534,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long high_frame;
unsigned long low_frame;
- u32 high1, high2, low;
+ u32 high1, high2, low, pixel, vbl_start;
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
@@ -534,6 +542,24 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
return 0;
}
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ const struct drm_display_mode *mode =
+ &intel_crtc->config.adjusted_mode;
+
+ vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
+ } else {
+ enum transcoder cpu_transcoder =
+ intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+ u32 htotal;
+
+ htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
+ vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
+
+ vbl_start *= htotal;
+ }
+
high_frame = PIPEFRAME(pipe);
low_frame = PIPEFRAMEPIXEL(pipe);
@@ -544,13 +570,20 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
*/
do {
high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
- low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
+ low = I915_READ(low_frame);
high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
} while (high1 != high2);
high1 >>= PIPE_FRAME_HIGH_SHIFT;
+ pixel = low & PIPE_PIXEL_MASK;
low >>= PIPE_FRAME_LOW_SHIFT;
- return (high1 << 8) | low;
+
+ /*
+ * The frame counter increments at beginning of active.
+ * Cook up a vblank counter by also checking the pixel
+ * counter against vblank start.
+ */
+ return ((high1 << 8) | low) + (pixel >= vbl_start);
}
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -567,37 +600,98 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
return I915_READ(reg);
}
+static bool intel_pipe_in_vblank(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t status;
+
+ if (IS_VALLEYVIEW(dev)) {
+ status = pipe == PIPE_A ?
+ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+ return I915_READ(VLV_ISR) & status;
+ } else if (IS_GEN2(dev)) {
+ status = pipe == PIPE_A ?
+ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+ return I915_READ16(ISR) & status;
+ } else if (INTEL_INFO(dev)->gen < 5) {
+ status = pipe == PIPE_A ?
+ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+ return I915_READ(ISR) & status;
+ } else if (INTEL_INFO(dev)->gen < 7) {
+ status = pipe == PIPE_A ?
+ DE_PIPEA_VBLANK :
+ DE_PIPEB_VBLANK;
+
+ return I915_READ(DEISR) & status;
+ } else {
+ switch (pipe) {
+ default:
+ case PIPE_A:
+ status = DE_PIPEA_VBLANK_IVB;
+ break;
+ case PIPE_B:
+ status = DE_PIPEB_VBLANK_IVB;
+ break;
+ case PIPE_C:
+ status = DE_PIPEC_VBLANK_IVB;
+ break;
+ }
+
+ return I915_READ(DEISR) & status;
+ }
+}
+
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
int *vpos, int *hpos)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 vbl = 0, position = 0;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+ int position;
int vbl_start, vbl_end, htotal, vtotal;
bool in_vbl = true;
int ret = 0;
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
- if (!i915_pipe_enabled(dev, pipe)) {
+ if (!intel_crtc->active) {
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
"pipe %c\n", pipe_name(pipe));
return 0;
}
- /* Get vtotal. */
- vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
+ htotal = mode->crtc_htotal;
+ vtotal = mode->crtc_vtotal;
+ vbl_start = mode->crtc_vblank_start;
+ vbl_end = mode->crtc_vblank_end;
- if (INTEL_INFO(dev)->gen >= 4) {
+ ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+
+ if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
/* No obvious pixelcount register. Only query vertical
* scanout position from Display scan line register.
*/
- position = I915_READ(PIPEDSL(pipe));
+ if (IS_GEN2(dev))
+ position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
+ else
+ position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
- /* Decode into vertical scanout position. Don't have
- * horizontal scanout position.
+ /*
+ * The scanline counter increments at the leading edge
+ * of hsync, ie. it completely misses the active portion
+ * of the line. Fix up the counter at both edges of vblank
+ * to get a more accurate picture whether we're in vblank
+ * or not.
*/
- *vpos = position & 0x1fff;
- *hpos = 0;
+ in_vbl = intel_pipe_in_vblank(dev, pipe);
+ if ((in_vbl && position == vbl_start - 1) ||
+ (!in_vbl && position == vbl_end - 1))
+ position = (position + 1) % vtotal;
} else {
/* Have access to pixelcount since start of frame.
* We can split this into vertical and horizontal
@@ -605,28 +699,32 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
*/
position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
- htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
- *vpos = position / htotal;
- *hpos = position - (*vpos * htotal);
+ /* convert to pixel counts */
+ vbl_start *= htotal;
+ vbl_end *= htotal;
+ vtotal *= htotal;
}
- /* Query vblank area. */
- vbl = I915_READ(VBLANK(cpu_transcoder));
-
- /* Test position against vblank region. */
- vbl_start = vbl & 0x1fff;
- vbl_end = (vbl >> 16) & 0x1fff;
-
- if ((*vpos < vbl_start) || (*vpos > vbl_end))
- in_vbl = false;
+ in_vbl = position >= vbl_start && position < vbl_end;
- /* Inside "upper part" of vblank area? Apply corrective offset: */
- if (in_vbl && (*vpos >= vbl_start))
- *vpos = *vpos - vtotal;
+ /*
+ * While in vblank, position will be negative
+ * counting up towards 0 at vbl_end. And outside
+ * vblank, position will be positive counting
+ * up since vbl_end.
+ */
+ if (position >= vbl_start)
+ position -= vbl_end;
+ else
+ position += vtotal - vbl_end;
- /* Readouts valid? */
- if (vbl > 0)
- ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+ if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ *vpos = position;
+ *hpos = 0;
+ } else {
+ *vpos = position / htotal;
+ *hpos = position - (*vpos * htotal);
+ }
/* In vblank? */
if (in_vbl)
@@ -665,7 +763,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
crtc);
}
-static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
+static bool intel_hpd_irq_event(struct drm_device *dev,
+ struct drm_connector *connector)
{
enum drm_connector_status old_status;
@@ -673,11 +772,16 @@ static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *con
old_status = connector->status;
connector->status = connector->funcs->detect(connector, false);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+ if (old_status == connector->status)
+ return false;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
drm_get_connector_name(connector),
- old_status, connector->status);
- return (old_status != connector->status);
+ drm_get_connector_status_name(old_status),
+ drm_get_connector_status_name(connector->status));
+
+ return true;
}
/*
@@ -801,7 +905,7 @@ static void notify_ring(struct drm_device *dev,
if (ring->obj == NULL)
return;
- trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
+ trace_i915_gem_request_complete(ring);
wake_up_all(&ring->irq_queue);
i915_queue_hangcheck(dev);
@@ -812,7 +916,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
rps.work);
u32 pm_iir;
- u8 new_delay;
+ int new_delay, adj;
spin_lock_irq(&dev_priv->irq_lock);
pm_iir = dev_priv->rps.pm_iir;
@@ -829,40 +933,49 @@ static void gen6_pm_rps_work(struct work_struct *work)
mutex_lock(&dev_priv->rps.hw_lock);
+ adj = dev_priv->rps.last_adj;
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
- new_delay = dev_priv->rps.cur_delay + 1;
+ if (adj > 0)
+ adj *= 2;
+ else
+ adj = 1;
+ new_delay = dev_priv->rps.cur_delay + adj;
/*
* For better performance, jump directly
* to RPe if we're below it.
*/
- if (IS_VALLEYVIEW(dev_priv->dev) &&
- dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
+ if (new_delay < dev_priv->rps.rpe_delay)
+ new_delay = dev_priv->rps.rpe_delay;
+ } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
+ if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
new_delay = dev_priv->rps.rpe_delay;
- } else
- new_delay = dev_priv->rps.cur_delay - 1;
+ else
+ new_delay = dev_priv->rps.min_delay;
+ adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
+ if (adj < 0)
+ adj *= 2;
+ else
+ adj = -1;
+ new_delay = dev_priv->rps.cur_delay + adj;
+ } else { /* unknown event */
+ new_delay = dev_priv->rps.cur_delay;
+ }
/* sysfs frequency interfaces may have snuck in while servicing the
* interrupt
*/
- if (new_delay >= dev_priv->rps.min_delay &&
- new_delay <= dev_priv->rps.max_delay) {
- if (IS_VALLEYVIEW(dev_priv->dev))
- valleyview_set_rps(dev_priv->dev, new_delay);
- else
- gen6_set_rps(dev_priv->dev, new_delay);
- }
-
- if (IS_VALLEYVIEW(dev_priv->dev)) {
- /*
- * On VLV, when we enter RC6 we may not be at the minimum
- * voltage level, so arm a timer to check. It should only
- * fire when there's activity or once after we've entered
- * RC6, and then won't be re-armed until the next RPS interrupt.
- */
- mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
- msecs_to_jiffies(100));
- }
+ if (new_delay < (int)dev_priv->rps.min_delay)
+ new_delay = dev_priv->rps.min_delay;
+ if (new_delay > (int)dev_priv->rps.max_delay)
+ new_delay = dev_priv->rps.max_delay;
+ dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
+
+ if (IS_VALLEYVIEW(dev_priv->dev))
+ valleyview_set_rps(dev_priv->dev, new_delay);
+ else
+ gen6_set_rps(dev_priv->dev, new_delay);
mutex_unlock(&dev_priv->rps.hw_lock);
}
@@ -882,9 +995,10 @@ static void ivybridge_parity_work(struct work_struct *work)
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
l3_parity.error_work);
u32 error_status, row, bank, subbank;
- char *parity_event[5];
+ char *parity_event[6];
uint32_t misccpctl;
unsigned long flags;
+ uint8_t slice = 0;
/* We must turn off DOP level clock gating to access the L3 registers.
* In order to prevent a get/put style interface, acquire struct mutex
@@ -892,55 +1006,81 @@ static void ivybridge_parity_work(struct work_struct *work)
*/
mutex_lock(&dev_priv->dev->struct_mutex);
+ /* If we've screwed up tracking, just let the interrupt fire again */
+ if (WARN_ON(!dev_priv->l3_parity.which_slice))
+ goto out;
+
misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
POSTING_READ(GEN7_MISCCPCTL);
- error_status = I915_READ(GEN7_L3CDERRST1);
- row = GEN7_PARITY_ERROR_ROW(error_status);
- bank = GEN7_PARITY_ERROR_BANK(error_status);
- subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
+ while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
+ u32 reg;
- I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
- GEN7_L3CDERRST1_ENABLE);
- POSTING_READ(GEN7_L3CDERRST1);
+ slice--;
+ if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
+ break;
- I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+ dev_priv->l3_parity.which_slice &= ~(1<<slice);
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ reg = GEN7_L3CDERRST1 + (slice * 0x200);
- mutex_unlock(&dev_priv->dev->struct_mutex);
+ error_status = I915_READ(reg);
+ row = GEN7_PARITY_ERROR_ROW(error_status);
+ bank = GEN7_PARITY_ERROR_BANK(error_status);
+ subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
+
+ I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
+ POSTING_READ(reg);
- parity_event[0] = I915_L3_PARITY_UEVENT "=1";
- parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
- parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
- parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
- parity_event[4] = NULL;
+ parity_event[0] = I915_L3_PARITY_UEVENT "=1";
+ parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
+ parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
+ parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
+ parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
+ parity_event[5] = NULL;
- kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
- KOBJ_CHANGE, parity_event);
+ kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
+ KOBJ_CHANGE, parity_event);
- DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
- row, bank, subbank);
+ DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
+ slice, row, bank, subbank);
- kfree(parity_event[3]);
- kfree(parity_event[2]);
- kfree(parity_event[1]);
+ kfree(parity_event[4]);
+ kfree(parity_event[3]);
+ kfree(parity_event[2]);
+ kfree(parity_event[1]);
+ }
+
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+
+out:
+ WARN_ON(dev_priv->l3_parity.which_slice);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ mutex_unlock(&dev_priv->dev->struct_mutex);
}
-static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
+static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- if (!HAS_L3_GPU_CACHE(dev))
+ if (!HAS_L3_DPF(dev))
return;
spin_lock(&dev_priv->irq_lock);
- ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+ ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
spin_unlock(&dev_priv->irq_lock);
+ iir &= GT_PARITY_ERROR(dev);
+ if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
+ dev_priv->l3_parity.which_slice |= 1 << 1;
+
+ if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
+ dev_priv->l3_parity.which_slice |= 1 << 0;
+
queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
}
@@ -975,8 +1115,8 @@ static void snb_gt_irq_handler(struct drm_device *dev,
i915_handle_error(dev, false);
}
- if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
- ivybridge_parity_error_irq_handler(dev);
+ if (gt_iir & GT_PARITY_ERROR(dev))
+ ivybridge_parity_error_irq_handler(dev, gt_iir);
}
#define HPD_STORM_DETECT_PERIOD 1000
@@ -1050,6 +1190,102 @@ static void dp_aux_irq_handler(struct drm_device *dev)
wake_up_all(&dev_priv->gmbus_wait_queue);
}
+#if defined(CONFIG_DEBUG_FS)
+static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+ uint32_t crc0, uint32_t crc1,
+ uint32_t crc2, uint32_t crc3,
+ uint32_t crc4)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+ struct intel_pipe_crc_entry *entry;
+ int head, tail;
+
+ spin_lock(&pipe_crc->lock);
+
+ if (!pipe_crc->entries) {
+ spin_unlock(&pipe_crc->lock);
+ DRM_ERROR("spurious interrupt\n");
+ return;
+ }
+
+ head = pipe_crc->head;
+ tail = pipe_crc->tail;
+
+ if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
+ spin_unlock(&pipe_crc->lock);
+ DRM_ERROR("CRC buffer overflowing\n");
+ return;
+ }
+
+ entry = &pipe_crc->entries[head];
+
+ entry->frame = dev->driver->get_vblank_counter(dev, pipe);
+ entry->crc[0] = crc0;
+ entry->crc[1] = crc1;
+ entry->crc[2] = crc2;
+ entry->crc[3] = crc3;
+ entry->crc[4] = crc4;
+
+ head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+ pipe_crc->head = head;
+
+ spin_unlock(&pipe_crc->lock);
+
+ wake_up_interruptible(&pipe_crc->wq);
+}
+#else
+static inline void
+display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+ uint32_t crc0, uint32_t crc1,
+ uint32_t crc2, uint32_t crc3,
+ uint32_t crc4) {}
+#endif
+
+
+static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ display_pipe_crc_irq_handler(dev, pipe,
+ I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
+ 0, 0, 0, 0);
+}
+
+static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ display_pipe_crc_irq_handler(dev, pipe,
+ I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
+ I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
+ I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
+ I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
+ I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
+}
+
+static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t res1, res2;
+
+ if (INTEL_INFO(dev)->gen >= 3)
+ res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
+ else
+ res1 = 0;
+
+ if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
+ else
+ res2 = 0;
+
+ display_pipe_crc_irq_handler(dev, pipe,
+ I915_READ(PIPE_CRC_RES_RED(pipe)),
+ I915_READ(PIPE_CRC_RES_GREEN(pipe)),
+ I915_READ(PIPE_CRC_RES_BLUE(pipe)),
+ res1, res2);
+}
+
/* The RPS events need forcewake, so we add them to a work queue and mask their
* IMR bits until the work is done. Other interrupts can be processed without
* the work queue. */
@@ -1124,6 +1360,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
intel_prepare_page_flip(dev, pipe);
intel_finish_page_flip(dev, pipe);
}
+
+ if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+ i9xx_pipe_crc_irq_handler(dev, pipe);
}
/* Consume port. Then clear IIR or we'll miss events */
@@ -1212,21 +1451,26 @@ static void ivb_err_int_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 err_int = I915_READ(GEN7_ERR_INT);
+ enum pipe pipe;
if (err_int & ERR_INT_POISON)
DRM_ERROR("Poison interrupt\n");
- if (err_int & ERR_INT_FIFO_UNDERRUN_A)
- if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
- DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
-
- if (err_int & ERR_INT_FIFO_UNDERRUN_B)
- if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
- DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
+ for_each_pipe(pipe) {
+ if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
+ if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
+ false))
+ DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
+ pipe_name(pipe));
+ }
- if (err_int & ERR_INT_FIFO_UNDERRUN_C)
- if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
- DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
+ if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
+ if (IS_IVYBRIDGE(dev))
+ ivb_pipe_crc_irq_handler(dev, pipe);
+ else
+ hsw_pipe_crc_irq_handler(dev, pipe);
+ }
+ }
I915_WRITE(GEN7_ERR_INT, err_int);
}
@@ -1297,6 +1541,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
if (de_iir & DE_AUX_CHANNEL_A)
dp_aux_irq_handler(dev);
@@ -1304,31 +1549,26 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
if (de_iir & DE_GSE)
intel_opregion_asle_intr(dev);
- if (de_iir & DE_PIPEA_VBLANK)
- drm_handle_vblank(dev, 0);
-
- if (de_iir & DE_PIPEB_VBLANK)
- drm_handle_vblank(dev, 1);
-
if (de_iir & DE_POISON)
DRM_ERROR("Poison interrupt\n");
- if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
- if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
- DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
+ for_each_pipe(pipe) {
+ if (de_iir & DE_PIPE_VBLANK(pipe))
+ drm_handle_vblank(dev, pipe);
- if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
- if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
- DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
+ if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
+ if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+ DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
+ pipe_name(pipe));
- if (de_iir & DE_PLANEA_FLIP_DONE) {
- intel_prepare_page_flip(dev, 0);
- intel_finish_page_flip_plane(dev, 0);
- }
+ if (de_iir & DE_PIPE_CRC_DONE(pipe))
+ i9xx_pipe_crc_irq_handler(dev, pipe);
- if (de_iir & DE_PLANEB_FLIP_DONE) {
- intel_prepare_page_flip(dev, 1);
- intel_finish_page_flip_plane(dev, 1);
+ /* plane/pipes map 1:1 on ilk+ */
+ if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip_plane(dev, pipe);
+ }
}
/* check event from PCH */
@@ -1351,7 +1591,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
+ enum pipe i;
if (de_iir & DE_ERR_INT_IVB)
ivb_err_int_handler(dev);
@@ -1362,10 +1602,12 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
if (de_iir & DE_GSE_IVB)
intel_opregion_asle_intr(dev);
- for (i = 0; i < 3; i++) {
- if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+ for_each_pipe(i) {
+ if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
drm_handle_vblank(dev, i);
- if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
+
+ /* plane/pipes map 1:1 on ilk+ */
+ if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
intel_prepare_page_flip(dev, i);
intel_finish_page_flip_plane(dev, i);
}
@@ -1388,7 +1630,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
- bool err_int_reenable = false;
atomic_inc(&dev_priv->irq_received);
@@ -1412,17 +1653,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
POSTING_READ(SDEIER);
}
- /* On Haswell, also mask ERR_INT because we don't want to risk
- * generating "unclaimed register" interrupts from inside the interrupt
- * handler. */
- if (IS_HASWELL(dev)) {
- spin_lock(&dev_priv->irq_lock);
- err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
- if (err_int_reenable)
- ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
- spin_unlock(&dev_priv->irq_lock);
- }
-
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
if (INTEL_INFO(dev)->gen >= 6)
@@ -1452,13 +1682,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
}
}
- if (err_int_reenable) {
- spin_lock(&dev_priv->irq_lock);
- if (ivb_can_enable_err_int(dev))
- ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
- spin_unlock(&dev_priv->irq_lock);
- }
-
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
if (!HAS_PCH_NOP(dev)) {
@@ -1516,7 +1739,7 @@ static void i915_error_work_func(struct work_struct *work)
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
int ret;
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
+ kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
/*
* Note that there's only one work item which does gpu resets, so we
@@ -1530,7 +1753,7 @@ static void i915_error_work_func(struct work_struct *work)
*/
if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
DRM_DEBUG_DRIVER("resetting chip\n");
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
+ kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
reset_event);
/*
@@ -1557,7 +1780,7 @@ static void i915_error_work_func(struct work_struct *work)
smp_mb__before_atomic_inc();
atomic_inc(&dev_priv->gpu_error.reset_counter);
- kobject_uevent_env(&dev->primary->kdev.kobj,
+ kobject_uevent_env(&dev->primary->kdev->kobj,
KOBJ_CHANGE, reset_done_event);
} else {
atomic_set(&error->reset_counter, I915_WEDGED);
@@ -1787,7 +2010,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
- DE_PIPE_VBLANK_ILK(pipe);
+ DE_PIPE_VBLANK(pipe);
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
@@ -1810,7 +2033,7 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
imr = I915_READ(VLV_IMR);
- if (pipe == 0)
+ if (pipe == PIPE_A)
imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
else
imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
@@ -1845,7 +2068,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
- DE_PIPE_VBLANK_ILK(pipe);
+ DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_disable_display_irq(dev_priv, bit);
@@ -1862,7 +2085,7 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
i915_disable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
imr = I915_READ(VLV_IMR);
- if (pipe == 0)
+ if (pipe == PIPE_A)
imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
else
imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
@@ -1965,6 +2188,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
if (tmp & RING_WAIT) {
DRM_ERROR("Kicking stuck wait on %s\n",
ring->name);
+ i915_handle_error(dev, false);
I915_WRITE_CTL(ring, tmp);
return HANGCHECK_KICK;
}
@@ -1976,6 +2200,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
case 1:
DRM_ERROR("Kicking stuck semaphore on %s\n",
ring->name);
+ i915_handle_error(dev, false);
I915_WRITE_CTL(ring, tmp);
return HANGCHECK_KICK;
case 0:
@@ -2021,12 +2246,21 @@ static void i915_hangcheck_elapsed(unsigned long data)
if (ring->hangcheck.seqno == seqno) {
if (ring_idle(ring, seqno)) {
+ ring->hangcheck.action = HANGCHECK_IDLE;
+
if (waitqueue_active(&ring->irq_queue)) {
/* Issue a wake-up to catch stuck h/w. */
- DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
- ring->name);
- wake_up_all(&ring->irq_queue);
- ring->hangcheck.score += HUNG;
+ if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
+ if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
+ DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+ ring->name);
+ else
+ DRM_INFO("Fake missed irq on %s\n",
+ ring->name);
+ wake_up_all(&ring->irq_queue);
+ }
+ /* Safeguard against driver failure */
+ ring->hangcheck.score += BUSY;
} else
busy = false;
} else {
@@ -2049,6 +2283,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
acthd);
switch (ring->hangcheck.action) {
+ case HANGCHECK_IDLE:
case HANGCHECK_WAIT:
break;
case HANGCHECK_ACTIVE:
@@ -2064,6 +2299,8 @@ static void i915_hangcheck_elapsed(unsigned long data)
}
}
} else {
+ ring->hangcheck.action = HANGCHECK_ACTIVE;
+
/* Gradually reduce the count so that we catch DoS
* attempts across multiple batches.
*/
@@ -2254,10 +2491,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
pm_irqs = gt_irqs = 0;
dev_priv->gt_irq_mask = ~0;
- if (HAS_L3_GPU_CACHE(dev)) {
+ if (HAS_L3_DPF(dev)) {
/* L3 parity interrupt is always unmasked. */
- dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+ dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
+ gt_irqs |= GT_PARITY_ERROR(dev);
}
gt_irqs |= GT_RENDER_USER_INTERRUPT;
@@ -2306,8 +2543,10 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
} else {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
- DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
- DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
+ DE_AUX_CHANNEL_A |
+ DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
+ DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
+ DE_POISON);
extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
}
@@ -2341,7 +2580,8 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask;
- u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
+ u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
+ PIPE_CRC_DONE_ENABLE;
unsigned long irqflags;
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
@@ -2371,9 +2611,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, 0, pipestat_enable);
- i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
- i915_enable_pipestat(dev_priv, 1, pipestat_enable);
+ i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
+ i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
I915_WRITE(VLV_IIR, 0xffffffff);
@@ -2464,6 +2704,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
static int i8xx_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
I915_WRITE16(EMR,
~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -2484,6 +2725,13 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
I915_USER_INTERRUPT);
POSTING_READ16(IER);
+ /* Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy. */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
+ i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
return 0;
}
@@ -2570,13 +2818,14 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
if (iir & I915_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[RCS]);
- if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
- i8xx_handle_vblank(dev, 0, iir))
- flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
+ for_each_pipe(pipe) {
+ if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ i8xx_handle_vblank(dev, pipe, iir))
+ flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
- if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
- i8xx_handle_vblank(dev, 1, iir))
- flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
+ if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+ i9xx_pipe_crc_irq_handler(dev, pipe);
+ }
iir = new_iir;
}
@@ -2623,6 +2872,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask;
+ unsigned long irqflags;
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -2658,6 +2908,13 @@ static int i915_irq_postinstall(struct drm_device *dev)
i915_enable_asle_pipestat(dev);
+ /* Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy. */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
+ i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
return 0;
}
@@ -2769,6 +3026,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
+
+ if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+ i9xx_pipe_crc_irq_handler(dev, pipe);
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -2867,7 +3127,9 @@ static int i965_irq_postinstall(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
+ i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
/*
@@ -3013,6 +3275,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
+
+ if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+ i9xx_pipe_crc_irq_handler(dev, pipe);
}
@@ -3122,18 +3387,21 @@ void intel_irq_init(struct drm_device *dev)
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
- dev->driver->get_vblank_counter = i915_get_vblank_counter;
- dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ if (IS_GEN2(dev)) {
+ dev->max_vblank_count = 0;
+ dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
+ } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+ } else {
+ dev->driver->get_vblank_counter = i915_get_vblank_counter;
+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
}
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
- else
- dev->driver->get_vblank_timestamp = NULL;
- dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+ dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+ }
if (IS_VALLEYVIEW(dev)) {
dev->driver->irq_handler = valleyview_irq_handler;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 38f96f65d87a..3f303ba995c5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,6 +26,7 @@
#define _I915_REG_H_
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _PIPE_INC(pipe, base, inc) ((base) + (pipe)*(inc))
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
@@ -264,6 +265,11 @@
#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
+
+#define MI_PREDICATE_RESULT_2 (0x2214)
+#define LOWER_SLICE_ENABLED (1<<0)
+#define LOWER_SLICE_DISABLED (0<<0)
+
/*
* 3D instructions used by the kernel
*/
@@ -346,12 +352,25 @@
#define IOSF_PORT_PUNIT 0x4
#define IOSF_PORT_NC 0x11
#define IOSF_PORT_DPIO 0x12
+#define IOSF_PORT_GPIO_NC 0x13
+#define IOSF_PORT_CCK 0x14
+#define IOSF_PORT_CCU 0xA9
+#define IOSF_PORT_GPS_CORE 0x48
#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104)
#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108)
#define PUNIT_OPCODE_REG_READ 6
#define PUNIT_OPCODE_REG_WRITE 7
+#define PUNIT_REG_PWRGT_CTRL 0x60
+#define PUNIT_REG_PWRGT_STATUS 0x61
+#define PUNIT_CLK_GATE 1
+#define PUNIT_PWR_RESET 2
+#define PUNIT_PWR_GATE 3
+#define RENDER_PWRGT (PUNIT_PWR_GATE << 0)
+#define MEDIA_PWRGT (PUNIT_PWR_GATE << 2)
+#define DISP2D_PWRGT (PUNIT_PWR_GATE << 6)
+
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
#define PUNIT_REG_GPU_FREQ_STS 0xd8
@@ -372,6 +391,40 @@
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
+/* vlv2 north clock has */
+#define CCK_FUSE_REG 0x8
+#define CCK_FUSE_HPLL_FREQ_MASK 0x3
+#define CCK_REG_DSI_PLL_FUSE 0x44
+#define CCK_REG_DSI_PLL_CONTROL 0x48
+#define DSI_PLL_VCO_EN (1 << 31)
+#define DSI_PLL_LDO_GATE (1 << 30)
+#define DSI_PLL_P1_POST_DIV_SHIFT 17
+#define DSI_PLL_P1_POST_DIV_MASK (0x1ff << 17)
+#define DSI_PLL_P2_MUX_DSI0_DIV2 (1 << 13)
+#define DSI_PLL_P3_MUX_DSI1_DIV2 (1 << 12)
+#define DSI_PLL_MUX_MASK (3 << 9)
+#define DSI_PLL_MUX_DSI0_DSIPLL (0 << 10)
+#define DSI_PLL_MUX_DSI0_CCK (1 << 10)
+#define DSI_PLL_MUX_DSI1_DSIPLL (0 << 9)
+#define DSI_PLL_MUX_DSI1_CCK (1 << 9)
+#define DSI_PLL_CLK_GATE_MASK (0xf << 5)
+#define DSI_PLL_CLK_GATE_DSI0_DSIPLL (1 << 8)
+#define DSI_PLL_CLK_GATE_DSI1_DSIPLL (1 << 7)
+#define DSI_PLL_CLK_GATE_DSI0_CCK (1 << 6)
+#define DSI_PLL_CLK_GATE_DSI1_CCK (1 << 5)
+#define DSI_PLL_LOCK (1 << 0)
+#define CCK_REG_DSI_PLL_DIVIDER 0x4c
+#define DSI_PLL_LFSR (1 << 31)
+#define DSI_PLL_FRACTION_EN (1 << 30)
+#define DSI_PLL_FRAC_COUNTER_SHIFT 27
+#define DSI_PLL_FRAC_COUNTER_MASK (7 << 27)
+#define DSI_PLL_USYNC_CNT_SHIFT 18
+#define DSI_PLL_USYNC_CNT_MASK (0x1ff << 18)
+#define DSI_PLL_N1_DIV_SHIFT 16
+#define DSI_PLL_N1_DIV_MASK (3 << 16)
+#define DSI_PLL_M1_DIV_SHIFT 0
+#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
+
/*
* DPIO - a special bus for various display related registers to hide behind
*
@@ -387,11 +440,11 @@
#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
#define DPIO_SFR_BYPASS (1<<1)
-#define DPIO_RESET (1<<0)
+#define DPIO_CMNRST (1<<0)
#define _DPIO_TX3_SWING_CTL4_A 0x690
#define _DPIO_TX3_SWING_CTL4_B 0x2a90
-#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX_SWING_CTL4_A, \
+#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX3_SWING_CTL4_A, \
_DPIO_TX3_SWING_CTL4_B)
/*
@@ -604,6 +657,10 @@
#define ARB_MODE_SWIZZLE_IVB (1<<5)
#define RENDER_HWS_PGA_GEN7 (0x04080)
#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
+#define RING_FAULT_GTTSEL_MASK (1<<11)
+#define RING_FAULT_SRCID(x) ((x >> 3) & 0xff)
+#define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3)
+#define RING_FAULT_VALID (1<<0)
#define DONE_REG 0x40b0
#define BSD_HWS_PGA_GEN7 (0x04180)
#define BLT_HWS_PGA_GEN7 (0x04280)
@@ -665,13 +722,18 @@
#define NOPID 0x02094
#define HWSTAM 0x02098
#define DMA_FADD_I8XX 0x020d0
+#define RING_BBSTATE(base) ((base)+0x110)
#define ERROR_GEN6 0x040a0
#define GEN7_ERR_INT 0x44040
#define ERR_INT_POISON (1<<31)
#define ERR_INT_MMIO_UNCLAIMED (1<<13)
+#define ERR_INT_PIPE_CRC_DONE_C (1<<8)
#define ERR_INT_FIFO_UNDERRUN_C (1<<6)
+#define ERR_INT_PIPE_CRC_DONE_B (1<<5)
#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
+#define ERR_INT_PIPE_CRC_DONE_A (1<<2)
+#define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + pipe*3))
#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
@@ -886,6 +948,7 @@
#define GT_BLT_USER_INTERRUPT (1 << 22)
#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
#define GT_BSD_USER_INTERRUPT (1 << 12)
+#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
@@ -896,6 +959,10 @@
#define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */
#define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */
+#define GT_PARITY_ERROR(dev) \
+ (GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
+ (IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
+
/* These are all the "old" interrupts */
#define ILK_BSD_USER_INTERRUPT (1<<5)
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
@@ -1044,9 +1111,6 @@
_HSW_PIPE_SLICE_CHICKEN_1_A, + \
_HSW_PIPE_SLICE_CHICKEN_1_B)
-#define HSW_CLKGATE_DISABLE_PART_1 0x46500
-#define HSW_DPFC_GATING_DISABLE (1<<23)
-
/*
* GPIO regs
*/
@@ -1383,6 +1447,12 @@
#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504)
+#define CZCLK_CDCLK_FREQ_RATIO (VLV_DISPLAY_BASE + 0x6508)
+#define CDCLK_FREQ_SHIFT 4
+#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
+#define CZCLK_FREQ_MASK 0xf
+#define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510)
+
/*
* Palette regs
*/
@@ -1400,13 +1470,15 @@
* device 0 function 0's pci config register 0x44 or 0x48 and matches it in
* every way. It is not accessible from the CP register read instructions.
*
+ * Starting from Haswell, you can't write registers using the MCHBAR mirror,
+ * just read.
*/
#define MCHBAR_MIRROR_BASE 0x10000
#define MCHBAR_MIRROR_BASE_SNB 0x140000
/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
-#define DCLK 0x5e04
+#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04)
/** 915-945 and GM965 MCH register controlling DRAM channel access */
#define DCC 0x10200
@@ -1701,9 +1773,9 @@
#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
-#define GEN6_GT_PERF_STATUS 0x145948
-#define GEN6_RP_STATE_LIMITS 0x145994
-#define GEN6_RP_STATE_CAP 0x145998
+#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948)
+#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
+#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998)
/*
* Logical Context regs
@@ -1749,6 +1821,9 @@
*/
#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
+#define VLV_CLK_CTL2 0x101104
+#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
+
/*
* Overlay regs
*/
@@ -1767,6 +1842,83 @@
* Display engine regs
*/
+/* Pipe A CRC regs */
+#define _PIPE_CRC_CTL_A (dev_priv->info->display_mmio_offset + 0x60050)
+#define PIPE_CRC_ENABLE (1 << 31)
+/* ivb+ source selection */
+#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29)
+#define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29)
+#define PIPE_CRC_SOURCE_PF_IVB (2 << 29)
+/* ilk+ source selection */
+#define PIPE_CRC_SOURCE_PRIMARY_ILK (0 << 28)
+#define PIPE_CRC_SOURCE_SPRITE_ILK (1 << 28)
+#define PIPE_CRC_SOURCE_PIPE_ILK (2 << 28)
+/* embedded DP port on the north display block, reserved on ivb */
+#define PIPE_CRC_SOURCE_PORT_A_ILK (4 << 28)
+#define PIPE_CRC_SOURCE_FDI_ILK (5 << 28) /* reserved on ivb */
+/* vlv source selection */
+#define PIPE_CRC_SOURCE_PIPE_VLV (0 << 27)
+#define PIPE_CRC_SOURCE_HDMIB_VLV (1 << 27)
+#define PIPE_CRC_SOURCE_HDMIC_VLV (2 << 27)
+/* with DP port the pipe source is invalid */
+#define PIPE_CRC_SOURCE_DP_D_VLV (3 << 27)
+#define PIPE_CRC_SOURCE_DP_B_VLV (6 << 27)
+#define PIPE_CRC_SOURCE_DP_C_VLV (7 << 27)
+/* gen3+ source selection */
+#define PIPE_CRC_SOURCE_PIPE_I9XX (0 << 28)
+#define PIPE_CRC_SOURCE_SDVOB_I9XX (1 << 28)
+#define PIPE_CRC_SOURCE_SDVOC_I9XX (2 << 28)
+/* with DP/TV port the pipe source is invalid */
+#define PIPE_CRC_SOURCE_DP_D_G4X (3 << 28)
+#define PIPE_CRC_SOURCE_TV_PRE (4 << 28)
+#define PIPE_CRC_SOURCE_TV_POST (5 << 28)
+#define PIPE_CRC_SOURCE_DP_B_G4X (6 << 28)
+#define PIPE_CRC_SOURCE_DP_C_G4X (7 << 28)
+/* gen2 doesn't have source selection bits */
+#define PIPE_CRC_INCLUDE_BORDER_I8XX (1 << 30)
+
+#define _PIPE_CRC_RES_1_A_IVB 0x60064
+#define _PIPE_CRC_RES_2_A_IVB 0x60068
+#define _PIPE_CRC_RES_3_A_IVB 0x6006c
+#define _PIPE_CRC_RES_4_A_IVB 0x60070
+#define _PIPE_CRC_RES_5_A_IVB 0x60074
+
+#define _PIPE_CRC_RES_RED_A (dev_priv->info->display_mmio_offset + 0x60060)
+#define _PIPE_CRC_RES_GREEN_A (dev_priv->info->display_mmio_offset + 0x60064)
+#define _PIPE_CRC_RES_BLUE_A (dev_priv->info->display_mmio_offset + 0x60068)
+#define _PIPE_CRC_RES_RES1_A_I915 (dev_priv->info->display_mmio_offset + 0x6006c)
+#define _PIPE_CRC_RES_RES2_A_G4X (dev_priv->info->display_mmio_offset + 0x60080)
+
+/* Pipe B CRC regs */
+#define _PIPE_CRC_RES_1_B_IVB 0x61064
+#define _PIPE_CRC_RES_2_B_IVB 0x61068
+#define _PIPE_CRC_RES_3_B_IVB 0x6106c
+#define _PIPE_CRC_RES_4_B_IVB 0x61070
+#define _PIPE_CRC_RES_5_B_IVB 0x61074
+
+#define PIPE_CRC_CTL(pipe) _PIPE_INC(pipe, _PIPE_CRC_CTL_A, 0x01000)
+#define PIPE_CRC_RES_1_IVB(pipe) \
+ _PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB)
+#define PIPE_CRC_RES_2_IVB(pipe) \
+ _PIPE(pipe, _PIPE_CRC_RES_2_A_IVB, _PIPE_CRC_RES_2_B_IVB)
+#define PIPE_CRC_RES_3_IVB(pipe) \
+ _PIPE(pipe, _PIPE_CRC_RES_3_A_IVB, _PIPE_CRC_RES_3_B_IVB)
+#define PIPE_CRC_RES_4_IVB(pipe) \
+ _PIPE(pipe, _PIPE_CRC_RES_4_A_IVB, _PIPE_CRC_RES_4_B_IVB)
+#define PIPE_CRC_RES_5_IVB(pipe) \
+ _PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB)
+
+#define PIPE_CRC_RES_RED(pipe) \
+ _PIPE_INC(pipe, _PIPE_CRC_RES_RED_A, 0x01000)
+#define PIPE_CRC_RES_GREEN(pipe) \
+ _PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A, 0x01000)
+#define PIPE_CRC_RES_BLUE(pipe) \
+ _PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A, 0x01000)
+#define PIPE_CRC_RES_RES1_I915(pipe) \
+ _PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_I915, 0x01000)
+#define PIPE_CRC_RES_RES2_G4X(pipe) \
+ _PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_G4X, 0x01000)
+
/* Pipe A timing regs */
#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000)
#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004)
@@ -1789,7 +1941,6 @@
#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020)
#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028)
-
#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
@@ -1800,7 +1951,8 @@
#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
/* HSW eDP PSR registers */
-#define EDP_PSR_CTL 0x64800
+#define EDP_PSR_BASE(dev) 0x64800
+#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
#define EDP_PSR_ENABLE (1<<31)
#define EDP_PSR_LINK_DISABLE (0<<27)
#define EDP_PSR_LINK_STANDBY (1<<27)
@@ -1823,16 +1975,16 @@
#define EDP_PSR_TP1_TIME_0us (3<<4)
#define EDP_PSR_IDLE_FRAME_SHIFT 0
-#define EDP_PSR_AUX_CTL 0x64810
-#define EDP_PSR_AUX_DATA1 0x64814
+#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10)
+#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14)
#define EDP_PSR_DPCD_COMMAND 0x80060000
-#define EDP_PSR_AUX_DATA2 0x64818
+#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18)
#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
-#define EDP_PSR_AUX_DATA3 0x6481c
-#define EDP_PSR_AUX_DATA4 0x64820
-#define EDP_PSR_AUX_DATA5 0x64824
+#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c)
+#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20)
+#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24)
-#define EDP_PSR_STATUS_CTL 0x64840
+#define EDP_PSR_STATUS_CTL(dev) (EDP_PSR_BASE(dev) + 0x40)
#define EDP_PSR_STATUS_STATE_MASK (7<<29)
#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
@@ -1856,10 +2008,10 @@
#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
#define EDP_PSR_STATUS_IDLE_MASK 0xf
-#define EDP_PSR_PERF_CNT 0x64844
+#define EDP_PSR_PERF_CNT(dev) (EDP_PSR_BASE(dev) + 0x44)
#define EDP_PSR_PERF_CNT_MASK 0xffffff
-#define EDP_PSR_DEBUG_CTL 0x64860
+#define EDP_PSR_DEBUG_CTL(dev) (EDP_PSR_BASE(dev) + 0x60)
#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
@@ -2002,6 +2154,14 @@
#define PCH_HDMIC 0xe1150
#define PCH_HDMID 0xe1160
+#define PORT_DFT_I9XX 0x61150
+#define DC_BALANCE_RESET (1 << 25)
+#define PORT_DFT2_G4X 0x61154
+#define DC_BALANCE_RESET_VLV (1 << 31)
+#define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0)
+#define PIPE_B_SCRAMBLE_RESET (1 << 1)
+#define PIPE_A_SCRAMBLE_RESET (1 << 0)
+
/* Gen 3 SDVO bits: */
#define SDVO_ENABLE (1 << 31)
#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
@@ -2030,6 +2190,7 @@
/* Gen 4 SDVO/HDMI bits: */
#define SDVO_COLOR_FORMAT_8bpc (0 << 26)
+#define SDVO_COLOR_FORMAT_MASK (7 << 26)
#define SDVO_ENCODING_SDVO (0 << 10)
#define SDVO_ENCODING_HDMI (2 << 10)
#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
@@ -2982,6 +3143,7 @@
#define PIPECONF_DISABLE 0
#define PIPECONF_DOUBLE_WIDE (1<<30)
#define I965_PIPECONF_ACTIVE (1<<30)
+#define PIPECONF_DSI_PLL_LOCKED (1<<29) /* vlv & pipe A only */
#define PIPECONF_FRAME_START_DELAY_MASK (3<<27)
#define PIPECONF_SINGLE_WIDE 0
#define PIPECONF_PIPE_UNLOCKED 0
@@ -3180,11 +3342,11 @@
/* define the Watermark register on Ironlake */
#define WM0_PIPEA_ILK 0x45100
-#define WM0_PIPE_PLANE_MASK (0x7f<<16)
+#define WM0_PIPE_PLANE_MASK (0xffff<<16)
#define WM0_PIPE_PLANE_SHIFT 16
-#define WM0_PIPE_SPRITE_MASK (0x3f<<8)
+#define WM0_PIPE_SPRITE_MASK (0xff<<8)
#define WM0_PIPE_SPRITE_SHIFT 8
-#define WM0_PIPE_CURSOR_MASK (0x1f)
+#define WM0_PIPE_CURSOR_MASK (0xff)
#define WM0_PIPEB_ILK 0x45104
#define WM0_PIPEC_IVB 0x45200
@@ -3194,9 +3356,9 @@
#define WM1_LP_LATENCY_MASK (0x7f<<24)
#define WM1_LP_FBC_MASK (0xf<<20)
#define WM1_LP_FBC_SHIFT 20
-#define WM1_LP_SR_MASK (0x1ff<<8)
+#define WM1_LP_SR_MASK (0x7ff<<8)
#define WM1_LP_SR_SHIFT 8
-#define WM1_LP_CURSOR_MASK (0x3f)
+#define WM1_LP_CURSOR_MASK (0xff)
#define WM2_LP_ILK 0x4510c
#define WM2_LP_EN (1<<31)
#define WM3_LP_ILK 0x45110
@@ -3277,17 +3439,17 @@
* } while (high1 != high2);
* frame = (high1 << 8) | low1;
*/
-#define _PIPEAFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x70040)
+#define _PIPEAFRAMEHIGH 0x70040
#define PIPE_FRAME_HIGH_MASK 0x0000ffff
#define PIPE_FRAME_HIGH_SHIFT 0
-#define _PIPEAFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x70044)
+#define _PIPEAFRAMEPIXEL 0x70044
#define PIPE_FRAME_LOW_MASK 0xff000000
#define PIPE_FRAME_LOW_SHIFT 24
#define PIPE_PIXEL_MASK 0x00ffffff
#define PIPE_PIXEL_SHIFT 0
/* GM45+ just has to be different */
-#define _PIPEA_FRMCOUNT_GM45 0x70040
-#define _PIPEA_FLIPCOUNT_GM45 0x70044
+#define _PIPEA_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70040)
+#define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70044)
#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
/* Cursor A & B regs */
@@ -3418,10 +3580,10 @@
#define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000)
#define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008)
#define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024)
-#define _PIPEBFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x71040)
-#define _PIPEBFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x71044)
-#define _PIPEB_FRMCOUNT_GM45 0x71040
-#define _PIPEB_FLIPCOUNT_GM45 0x71044
+#define _PIPEBFRAMEHIGH 0x71040
+#define _PIPEBFRAMEPIXEL 0x71044
+#define _PIPEB_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71040)
+#define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71044)
/* Display B control */
@@ -3776,6 +3938,7 @@
#define DE_SPRITEA_FLIP_DONE (1 << 28)
#define DE_PLANEB_FLIP_DONE (1 << 27)
#define DE_PLANEA_FLIP_DONE (1 << 26)
+#define DE_PLANE_FLIP_DONE(plane) (1 << (26 + (plane)))
#define DE_PCU_EVENT (1 << 25)
#define DE_GTT_FAULT (1 << 24)
#define DE_POISON (1 << 23)
@@ -3789,13 +3952,18 @@
#define DE_PIPEB_ODD_FIELD (1 << 13)
#define DE_PIPEB_LINE_COMPARE (1 << 12)
#define DE_PIPEB_VSYNC (1 << 11)
+#define DE_PIPEB_CRC_DONE (1 << 10)
#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
#define DE_PIPEA_VBLANK (1 << 7)
+#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8*(pipe)))
#define DE_PIPEA_EVEN_FIELD (1 << 6)
#define DE_PIPEA_ODD_FIELD (1 << 5)
#define DE_PIPEA_LINE_COMPARE (1 << 4)
#define DE_PIPEA_VSYNC (1 << 3)
+#define DE_PIPEA_CRC_DONE (1 << 2)
+#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8*(pipe)))
#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
+#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8*(pipe)))
/* More Ivybridge lolz */
#define DE_ERR_INT_IVB (1<<30)
@@ -3811,9 +3979,8 @@
#define DE_PIPEB_VBLANK_IVB (1<<5)
#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
+#define DE_PLANE_FLIP_DONE_IVB(plane) (1<< (3 + 5*(plane)))
#define DE_PIPEA_VBLANK_IVB (1<<0)
-
-#define DE_PIPE_VBLANK_ILK(pipe) (1 << ((pipe * 8) + 7))
#define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5))
#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
@@ -4279,7 +4446,9 @@
#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
#define SOUTH_DSPCLK_GATE_D 0xc2020
+#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
+#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
/* CPU: FDI_TX */
@@ -4410,6 +4579,8 @@
#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
+#define PANEL_PORT_SELECT_DPB_VLV (1 << 30)
+#define PANEL_PORT_SELECT_DPC_VLV (2 << 30)
#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
@@ -4441,7 +4612,6 @@
#define PANEL_PORT_SELECT_MASK (3 << 30)
#define PANEL_PORT_SELECT_LVDS (0 << 30)
#define PANEL_PORT_SELECT_DPA (1 << 30)
-#define EDP_PANEL (1 << 30)
#define PANEL_PORT_SELECT_DPC (2 << 30)
#define PANEL_PORT_SELECT_DPD (3 << 30)
#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000)
@@ -4450,11 +4620,6 @@
#define PANEL_LIGHT_ON_DELAY_SHIFT 0
#define PCH_PP_OFF_DELAYS 0xc720c
-#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30)
-#define PANEL_POWER_PORT_LVDS (0 << 30)
-#define PANEL_POWER_PORT_DP_A (1 << 30)
-#define PANEL_POWER_PORT_DP_C (2 << 30)
-#define PANEL_POWER_PORT_DP_D (3 << 30)
#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
#define PANEL_POWER_DOWN_DELAY_SHIFT 16
#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
@@ -4632,7 +4797,7 @@
#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
-#define GEN7_RP_DOWN_IDLE_AVG (0x2<<0)
+#define GEN6_RP_DOWN_IDLE_AVG (0x2<<0)
#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
#define GEN6_RP_UP_THRESHOLD 0xA02C
#define GEN6_RP_DOWN_THRESHOLD 0xA030
@@ -4677,6 +4842,10 @@
GEN6_PM_RP_DOWN_TIMEOUT)
#define GEN6_GT_GFX_RC6_LOCKED 0x138104
+#define VLV_COUNTER_CONTROL 0x138104
+#define VLV_COUNT_RANGE_HIGH (1<<15)
+#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
+#define VLV_RENDER_RC6_COUNT_EN (1<<0)
#define GEN6_GT_GFX_RC6 0x138108
#define GEN6_GT_GFX_RC6p 0x13810C
#define GEN6_GT_GFX_RC6pp 0x138110
@@ -4688,6 +4857,8 @@
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
+#define GEN6_PCODE_READ_D_COMP 0x10
+#define GEN6_PCODE_WRITE_D_COMP 0x11
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define GEN6_PCODE_DATA 0x138128
@@ -4707,6 +4878,7 @@
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */
+#define HSW_L3CDERRST11 0xB208 /* L3CD Error Status register 1 slice 1 */
#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14)
#define GEN7_PARITY_ERROR_VALID (1<<13)
#define GEN7_L3CDERRST1_BANK_MASK (3<<11)
@@ -4720,6 +4892,7 @@
#define GEN7_L3CDERRST1_ENABLE (1<<7)
#define GEN7_L3LOG_BASE 0xB070
+#define HSW_L3LOG_BASE_SLICE1 0xB270
#define GEN7_L3LOG_SIZE 0x80
#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
@@ -4798,7 +4971,17 @@
#define AUD_CONFIG_LOWER_N_SHIFT 4
#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 (1 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 (2 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 (3 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 (4 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 (5 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 (6 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16)
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
/* HSW Audio */
@@ -5122,4 +5305,414 @@
#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
+/* VLV MIPI registers */
+
+#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
+#define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
+#define MIPI_PORT_CTRL(pipe) _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL)
+#define DPI_ENABLE (1 << 31) /* A + B */
+#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
+#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
+#define DUAL_LINK_MODE_MASK (1 << 26)
+#define DUAL_LINK_MODE_FRONT_BACK (0 << 26)
+#define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26)
+#define DITHERING_ENABLE (1 << 25) /* A + B */
+#define FLOPPED_HSTX (1 << 23)
+#define DE_INVERT (1 << 19) /* XXX */
+#define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18
+#define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18)
+#define AFE_LATCHOUT (1 << 17)
+#define LP_OUTPUT_HOLD (1 << 16)
+#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15
+#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15)
+#define MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT 11
+#define MIPIB_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11)
+#define CSB_SHIFT 9
+#define CSB_MASK (3 << 9)
+#define CSB_20MHZ (0 << 9)
+#define CSB_10MHZ (1 << 9)
+#define CSB_40MHZ (2 << 9)
+#define BANDGAP_MASK (1 << 8)
+#define BANDGAP_PNW_CIRCUIT (0 << 8)
+#define BANDGAP_LNC_CIRCUIT (1 << 8)
+#define MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT 5
+#define MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5)
+#define TEARING_EFFECT_DELAY (1 << 4) /* A + B */
+#define TEARING_EFFECT_SHIFT 2 /* A + B */
+#define TEARING_EFFECT_MASK (3 << 2)
+#define TEARING_EFFECT_OFF (0 << 2)
+#define TEARING_EFFECT_DSI (1 << 2)
+#define TEARING_EFFECT_GPIO (2 << 2)
+#define LANE_CONFIGURATION_SHIFT 0
+#define LANE_CONFIGURATION_MASK (3 << 0)
+#define LANE_CONFIGURATION_4LANE (0 << 0)
+#define LANE_CONFIGURATION_DUAL_LINK_A (1 << 0)
+#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0)
+
+#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
+#define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
+#define MIPI_TEARING_CTRL(pipe) _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
+#define TEARING_EFFECT_DELAY_SHIFT 0
+#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
+
+/* XXX: all bits reserved */
+#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
+
+/* MIPI DSI Controller and D-PHY registers */
+
+#define _MIPIA_DEVICE_READY (VLV_DISPLAY_BASE + 0xb000)
+#define _MIPIB_DEVICE_READY (VLV_DISPLAY_BASE + 0xb800)
+#define MIPI_DEVICE_READY(pipe) _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY)
+#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
+#define ULPS_STATE_MASK (3 << 1)
+#define ULPS_STATE_ENTER (2 << 1)
+#define ULPS_STATE_EXIT (1 << 1)
+#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
+#define DEVICE_READY (1 << 0)
+
+#define _MIPIA_INTR_STAT (VLV_DISPLAY_BASE + 0xb004)
+#define _MIPIB_INTR_STAT (VLV_DISPLAY_BASE + 0xb804)
+#define MIPI_INTR_STAT(pipe) _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT)
+#define _MIPIA_INTR_EN (VLV_DISPLAY_BASE + 0xb008)
+#define _MIPIB_INTR_EN (VLV_DISPLAY_BASE + 0xb808)
+#define MIPI_INTR_EN(pipe) _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN)
+#define TEARING_EFFECT (1 << 31)
+#define SPL_PKT_SENT_INTERRUPT (1 << 30)
+#define GEN_READ_DATA_AVAIL (1 << 29)
+#define LP_GENERIC_WR_FIFO_FULL (1 << 28)
+#define HS_GENERIC_WR_FIFO_FULL (1 << 27)
+#define RX_PROT_VIOLATION (1 << 26)
+#define RX_INVALID_TX_LENGTH (1 << 25)
+#define ACK_WITH_NO_ERROR (1 << 24)
+#define TURN_AROUND_ACK_TIMEOUT (1 << 23)
+#define LP_RX_TIMEOUT (1 << 22)
+#define HS_TX_TIMEOUT (1 << 21)
+#define DPI_FIFO_UNDERRUN (1 << 20)
+#define LOW_CONTENTION (1 << 19)
+#define HIGH_CONTENTION (1 << 18)
+#define TXDSI_VC_ID_INVALID (1 << 17)
+#define TXDSI_DATA_TYPE_NOT_RECOGNISED (1 << 16)
+#define TXCHECKSUM_ERROR (1 << 15)
+#define TXECC_MULTIBIT_ERROR (1 << 14)
+#define TXECC_SINGLE_BIT_ERROR (1 << 13)
+#define TXFALSE_CONTROL_ERROR (1 << 12)
+#define RXDSI_VC_ID_INVALID (1 << 11)
+#define RXDSI_DATA_TYPE_NOT_REGOGNISED (1 << 10)
+#define RXCHECKSUM_ERROR (1 << 9)
+#define RXECC_MULTIBIT_ERROR (1 << 8)
+#define RXECC_SINGLE_BIT_ERROR (1 << 7)
+#define RXFALSE_CONTROL_ERROR (1 << 6)
+#define RXHS_RECEIVE_TIMEOUT_ERROR (1 << 5)
+#define RX_LP_TX_SYNC_ERROR (1 << 4)
+#define RXEXCAPE_MODE_ENTRY_ERROR (1 << 3)
+#define RXEOT_SYNC_ERROR (1 << 2)
+#define RXSOT_SYNC_ERROR (1 << 1)
+#define RXSOT_ERROR (1 << 0)
+
+#define _MIPIA_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb00c)
+#define _MIPIB_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb80c)
+#define MIPI_DSI_FUNC_PRG(pipe) _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG)
+#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
+#define CMD_MODE_NOT_SUPPORTED (0 << 13)
+#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
+#define CMD_MODE_DATA_WIDTH_9_BIT (2 << 13)
+#define CMD_MODE_DATA_WIDTH_8_BIT (3 << 13)
+#define CMD_MODE_DATA_WIDTH_OPTION1 (4 << 13)
+#define CMD_MODE_DATA_WIDTH_OPTION2 (5 << 13)
+#define VID_MODE_FORMAT_MASK (0xf << 7)
+#define VID_MODE_NOT_SUPPORTED (0 << 7)
+#define VID_MODE_FORMAT_RGB565 (1 << 7)
+#define VID_MODE_FORMAT_RGB666 (2 << 7)
+#define VID_MODE_FORMAT_RGB666_LOOSE (3 << 7)
+#define VID_MODE_FORMAT_RGB888 (4 << 7)
+#define CMD_MODE_CHANNEL_NUMBER_SHIFT 5
+#define CMD_MODE_CHANNEL_NUMBER_MASK (3 << 5)
+#define VID_MODE_CHANNEL_NUMBER_SHIFT 3
+#define VID_MODE_CHANNEL_NUMBER_MASK (3 << 3)
+#define DATA_LANES_PRG_REG_SHIFT 0
+#define DATA_LANES_PRG_REG_MASK (7 << 0)
+
+#define _MIPIA_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb010)
+#define _MIPIB_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb810)
+#define MIPI_HS_TX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT)
+#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
+
+#define _MIPIA_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb014)
+#define _MIPIB_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb814)
+#define MIPI_LP_RX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT)
+#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
+
+#define _MIPIA_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb018)
+#define _MIPIB_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb818)
+#define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
+#define TURN_AROUND_TIMEOUT_MASK 0x3f
+
+#define _MIPIA_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb01c)
+#define _MIPIB_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb81c)
+#define MIPI_DEVICE_RESET_TIMER(pipe) _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
+#define DEVICE_RESET_TIMER_MASK 0xffff
+
+#define _MIPIA_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb020)
+#define _MIPIB_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb820)
+#define MIPI_DPI_RESOLUTION(pipe) _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION)
+#define VERTICAL_ADDRESS_SHIFT 16
+#define VERTICAL_ADDRESS_MASK (0xffff << 16)
+#define HORIZONTAL_ADDRESS_SHIFT 0
+#define HORIZONTAL_ADDRESS_MASK 0xffff
+
+#define _MIPIA_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb024)
+#define _MIPIB_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb824)
+#define MIPI_DBI_FIFO_THROTTLE(pipe) _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
+#define DBI_FIFO_EMPTY_HALF (0 << 0)
+#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
+#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
+
+/* regs below are bits 15:0 */
+#define _MIPIA_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb028)
+#define _MIPIB_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb828)
+#define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
+
+#define _MIPIA_HBP_COUNT (VLV_DISPLAY_BASE + 0xb02c)
+#define _MIPIB_HBP_COUNT (VLV_DISPLAY_BASE + 0xb82c)
+#define MIPI_HBP_COUNT(pipe) _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT)
+
+#define _MIPIA_HFP_COUNT (VLV_DISPLAY_BASE + 0xb030)
+#define _MIPIB_HFP_COUNT (VLV_DISPLAY_BASE + 0xb830)
+#define MIPI_HFP_COUNT(pipe) _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT)
+
+#define _MIPIA_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb034)
+#define _MIPIB_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb834)
+#define MIPI_HACTIVE_AREA_COUNT(pipe) _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
+
+#define _MIPIA_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb038)
+#define _MIPIB_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb838)
+#define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
+
+#define _MIPIA_VBP_COUNT (VLV_DISPLAY_BASE + 0xb03c)
+#define _MIPIB_VBP_COUNT (VLV_DISPLAY_BASE + 0xb83c)
+#define MIPI_VBP_COUNT(pipe) _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT)
+
+#define _MIPIA_VFP_COUNT (VLV_DISPLAY_BASE + 0xb040)
+#define _MIPIB_VFP_COUNT (VLV_DISPLAY_BASE + 0xb840)
+#define MIPI_VFP_COUNT(pipe) _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT)
+
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb044)
+#define _MIPIB_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb844)
+#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe) _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
+/* regs above are bits 15:0 */
+
+#define _MIPIA_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb048)
+#define _MIPIB_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb848)
+#define MIPI_DPI_CONTROL(pipe) _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL)
+#define DPI_LP_MODE (1 << 6)
+#define BACKLIGHT_OFF (1 << 5)
+#define BACKLIGHT_ON (1 << 4)
+#define COLOR_MODE_OFF (1 << 3)
+#define COLOR_MODE_ON (1 << 2)
+#define TURN_ON (1 << 1)
+#define SHUTDOWN (1 << 0)
+
+#define _MIPIA_DPI_DATA (VLV_DISPLAY_BASE + 0xb04c)
+#define _MIPIB_DPI_DATA (VLV_DISPLAY_BASE + 0xb84c)
+#define MIPI_DPI_DATA(pipe) _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA)
+#define COMMAND_BYTE_SHIFT 0
+#define COMMAND_BYTE_MASK (0x3f << 0)
+
+#define _MIPIA_INIT_COUNT (VLV_DISPLAY_BASE + 0xb050)
+#define _MIPIB_INIT_COUNT (VLV_DISPLAY_BASE + 0xb850)
+#define MIPI_INIT_COUNT(pipe) _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT)
+#define MASTER_INIT_TIMER_SHIFT 0
+#define MASTER_INIT_TIMER_MASK (0xffff << 0)
+
+#define _MIPIA_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb054)
+#define _MIPIB_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb854)
+#define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
+#define MAX_RETURN_PKT_SIZE_SHIFT 0
+#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
+
+#define _MIPIA_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb058)
+#define _MIPIB_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb858)
+#define MIPI_VIDEO_MODE_FORMAT(pipe) _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
+#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
+#define DISABLE_VIDEO_BTA (1 << 3)
+#define IP_TG_CONFIG (1 << 2)
+#define VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE (1 << 0)
+#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
+#define VIDEO_MODE_BURST (3 << 0)
+
+#define _MIPIA_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb05c)
+#define _MIPIB_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb85c)
+#define MIPI_EOT_DISABLE(pipe) _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE)
+#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
+#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
+#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
+#define HIGH_CONTENTION_RECOVERY_DISABLE (1 << 4)
+#define TXDSI_TYPE_NOT_RECOGNISED_ERROR_RECOVERY_DISABLE (1 << 3)
+#define TXECC_MULTIBIT_ERROR_RECOVERY_DISABLE (1 << 2)
+#define CLOCKSTOP (1 << 1)
+#define EOT_DISABLE (1 << 0)
+
+#define _MIPIA_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb060)
+#define _MIPIB_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb860)
+#define MIPI_LP_BYTECLK(pipe) _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK)
+#define LP_BYTECLK_SHIFT 0
+#define LP_BYTECLK_MASK (0xffff << 0)
+
+/* bits 31:0 */
+#define _MIPIA_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb064)
+#define _MIPIB_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb864)
+#define MIPI_LP_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA)
+
+/* bits 31:0 */
+#define _MIPIA_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb068)
+#define _MIPIB_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb868)
+#define MIPI_HS_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA)
+
+#define _MIPIA_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb06c)
+#define _MIPIB_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb86c)
+#define MIPI_LP_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL)
+#define _MIPIA_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb070)
+#define _MIPIB_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb870)
+#define MIPI_HS_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL)
+#define LONG_PACKET_WORD_COUNT_SHIFT 8
+#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
+#define SHORT_PACKET_PARAM_SHIFT 8
+#define SHORT_PACKET_PARAM_MASK (0xffff << 8)
+#define VIRTUAL_CHANNEL_SHIFT 6
+#define VIRTUAL_CHANNEL_MASK (3 << 6)
+#define DATA_TYPE_SHIFT 0
+#define DATA_TYPE_MASK (3f << 0)
+/* data type values, see include/video/mipi_display.h */
+
+#define _MIPIA_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb074)
+#define _MIPIB_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb874)
+#define MIPI_GEN_FIFO_STAT(pipe) _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT)
+#define DPI_FIFO_EMPTY (1 << 28)
+#define DBI_FIFO_EMPTY (1 << 27)
+#define LP_CTRL_FIFO_EMPTY (1 << 26)
+#define LP_CTRL_FIFO_HALF_EMPTY (1 << 25)
+#define LP_CTRL_FIFO_FULL (1 << 24)
+#define HS_CTRL_FIFO_EMPTY (1 << 18)
+#define HS_CTRL_FIFO_HALF_EMPTY (1 << 17)
+#define HS_CTRL_FIFO_FULL (1 << 16)
+#define LP_DATA_FIFO_EMPTY (1 << 10)
+#define LP_DATA_FIFO_HALF_EMPTY (1 << 9)
+#define LP_DATA_FIFO_FULL (1 << 8)
+#define HS_DATA_FIFO_EMPTY (1 << 2)
+#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
+#define HS_DATA_FIFO_FULL (1 << 0)
+
+#define _MIPIA_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb078)
+#define _MIPIB_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb878)
+#define MIPI_HS_LP_DBI_ENABLE(pipe) _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
+#define DBI_HS_LP_MODE_MASK (1 << 0)
+#define DBI_LP_MODE (1 << 0)
+#define DBI_HS_MODE (0 << 0)
+
+#define _MIPIA_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb080)
+#define _MIPIB_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb880)
+#define MIPI_DPHY_PARAM(pipe) _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM)
+#define EXIT_ZERO_COUNT_SHIFT 24
+#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
+#define TRAIL_COUNT_SHIFT 16
+#define TRAIL_COUNT_MASK (0x1f << 16)
+#define CLK_ZERO_COUNT_SHIFT 8
+#define CLK_ZERO_COUNT_MASK (0xff << 8)
+#define PREPARE_COUNT_SHIFT 0
+#define PREPARE_COUNT_MASK (0x3f << 0)
+
+/* bits 31:0 */
+#define _MIPIA_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb084)
+#define _MIPIB_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb884)
+#define MIPI_DBI_BW_CTRL(pipe) _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL)
+
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb088)
+#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb888)
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe) _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
+#define LP_HS_SSW_CNT_SHIFT 16
+#define LP_HS_SSW_CNT_MASK (0xffff << 16)
+#define HS_LP_PWR_SW_CNT_SHIFT 0
+#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
+
+#define _MIPIA_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb08c)
+#define _MIPIB_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb88c)
+#define MIPI_STOP_STATE_STALL(pipe) _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
+#define STOP_STATE_STALL_COUNTER_SHIFT 0
+#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
+
+#define _MIPIA_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb090)
+#define _MIPIB_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb890)
+#define MIPI_INTR_STAT_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
+#define _MIPIA_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb094)
+#define _MIPIB_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb894)
+#define MIPI_INTR_EN_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1)
+#define RX_CONTENTION_DETECTED (1 << 0)
+
+/* XXX: only pipe A ?!? */
+#define MIPIA_DBI_TYPEC_CTRL (VLV_DISPLAY_BASE + 0xb100)
+#define DBI_TYPEC_ENABLE (1 << 31)
+#define DBI_TYPEC_WIP (1 << 30)
+#define DBI_TYPEC_OPTION_SHIFT 28
+#define DBI_TYPEC_OPTION_MASK (3 << 28)
+#define DBI_TYPEC_FREQ_SHIFT 24
+#define DBI_TYPEC_FREQ_MASK (0xf << 24)
+#define DBI_TYPEC_OVERRIDE (1 << 8)
+#define DBI_TYPEC_OVERRIDE_COUNTER_SHIFT 0
+#define DBI_TYPEC_OVERRIDE_COUNTER_MASK (0xff << 0)
+
+
+/* MIPI adapter registers */
+
+#define _MIPIA_CTRL (VLV_DISPLAY_BASE + 0xb104)
+#define _MIPIB_CTRL (VLV_DISPLAY_BASE + 0xb904)
+#define MIPI_CTRL(pipe) _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL)
+#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
+#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
+#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
+#define ESCAPE_CLOCK_DIVIDER_2 (1 << 5)
+#define ESCAPE_CLOCK_DIVIDER_4 (2 << 5)
+#define READ_REQUEST_PRIORITY_SHIFT 3
+#define READ_REQUEST_PRIORITY_MASK (3 << 3)
+#define READ_REQUEST_PRIORITY_LOW (0 << 3)
+#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
+#define RGB_FLIP_TO_BGR (1 << 2)
+
+#define _MIPIA_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb108)
+#define _MIPIB_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb908)
+#define MIPI_DATA_ADDRESS(pipe) _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS)
+#define DATA_MEM_ADDRESS_SHIFT 5
+#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
+#define DATA_VALID (1 << 0)
+
+#define _MIPIA_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb10c)
+#define _MIPIB_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb90c)
+#define MIPI_DATA_LENGTH(pipe) _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH)
+#define DATA_LENGTH_SHIFT 0
+#define DATA_LENGTH_MASK (0xfffff << 0)
+
+#define _MIPIA_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb110)
+#define _MIPIB_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb910)
+#define MIPI_COMMAND_ADDRESS(pipe) _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
+#define COMMAND_MEM_ADDRESS_SHIFT 5
+#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
+#define AUTO_PWG_ENABLE (1 << 2)
+#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
+#define COMMAND_VALID (1 << 0)
+
+#define _MIPIA_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb114)
+#define _MIPIB_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb914)
+#define MIPI_COMMAND_LENGTH(pipe) _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH)
+#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
+#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
+
+#define _MIPIA_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb118)
+#define _MIPIB_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb918)
+#define MIPI_READ_DATA_RETURN(pipe, n) \
+ (_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
+
+#define _MIPIA_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb138)
+#define _MIPIB_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb938)
+#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
+#define READ_DATA_VALID(n) (1 << (n))
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 70db618989c4..a088f1f46bdb 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -340,7 +340,9 @@ int i915_save_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB);
+ if (INTEL_INFO(dev)->gen <= 4)
+ pci_read_config_byte(dev->pdev, LBB,
+ &dev_priv->regfile.saveLBB);
mutex_lock(&dev->struct_mutex);
@@ -367,7 +369,8 @@ int i915_save_state(struct drm_device *dev)
intel_disable_gt_powersave(dev);
/* Cache mode state */
- dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+ if (INTEL_INFO(dev)->gen < 7)
+ dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
@@ -390,7 +393,9 @@ int i915_restore_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB);
+ if (INTEL_INFO(dev)->gen <= 4)
+ pci_write_config_byte(dev->pdev, LBB,
+ dev_priv->regfile.saveLBB);
mutex_lock(&dev->struct_mutex);
@@ -414,7 +419,9 @@ int i915_restore_state(struct drm_device *dev)
}
/* Cache mode state */
- I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000);
+ if (INTEL_INFO(dev)->gen < 7)
+ I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
+ 0xffff0000);
/* Memory arbitration state */
I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index c8c4112de110..cef38fd320a7 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -32,30 +32,50 @@
#include "intel_drv.h"
#include "i915_drv.h"
+#define dev_to_drm_minor(d) dev_get_drvdata((d))
+
#ifdef CONFIG_PM
static u32 calc_residency(struct drm_device *dev, const u32 reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u64 raw_time; /* 32b value may overflow during fixed point math */
+ u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
if (!intel_enable_rc6(dev))
return 0;
- raw_time = I915_READ(reg) * 128ULL;
- return DIV_ROUND_UP_ULL(raw_time, 100000);
+ /* On VLV, residency time is in CZ units rather than 1.28us */
+ if (IS_VALLEYVIEW(dev)) {
+ u32 clkctl2;
+
+ clkctl2 = I915_READ(VLV_CLK_CTL2) >>
+ CLK_CTL2_CZCOUNT_30NS_SHIFT;
+ if (!clkctl2) {
+ WARN(!clkctl2, "bogus CZ count value");
+ return 0;
+ }
+ units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
+ if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
+ units <<= 8;
+
+ div = 1000000ULL * bias;
+ }
+
+ raw_time = I915_READ(reg) * units;
+ return DIV_ROUND_UP_ULL(raw_time, div);
}
static ssize_t
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *dminor = dev_to_drm_minor(kdev);
return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
}
static ssize_t
show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *dminor = dev_get_drvdata(kdev);
u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
}
@@ -63,16 +83,20 @@ show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
static ssize_t
show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *dminor = dev_to_drm_minor(kdev);
u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
+ if (IS_VALLEYVIEW(dminor->dev))
+ rc6p_residency = 0;
return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
}
static ssize_t
show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *dminor = dev_to_drm_minor(kdev);
u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
+ if (IS_VALLEYVIEW(dminor->dev))
+ rc6pp_residency = 0;
return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
}
@@ -97,7 +121,7 @@ static struct attribute_group rc6_attr_group = {
static int l3_access_valid(struct drm_device *dev, loff_t offset)
{
- if (!HAS_L3_GPU_CACHE(dev))
+ if (!HAS_L3_DPF(dev))
return -EPERM;
if (offset % 4 != 0)
@@ -115,31 +139,34 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
loff_t offset, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
- uint32_t misccpctl;
- int i, ret;
+ int slice = (int)(uintptr_t)attr->private;
+ int ret;
+
+ count = round_down(count, 4);
ret = l3_access_valid(drm_dev, offset);
if (ret)
return ret;
+ count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
+
ret = i915_mutex_lock_interruptible(drm_dev);
if (ret)
return ret;
- misccpctl = I915_READ(GEN7_MISCCPCTL);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
-
- for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4)
- *((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i);
-
- I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+ if (dev_priv->l3_parity.remap_info[slice])
+ memcpy(buf,
+ dev_priv->l3_parity.remap_info[slice] + (offset/4),
+ count);
+ else
+ memset(buf, 0, count);
mutex_unlock(&drm_dev->struct_mutex);
- return i - offset;
+ return count;
}
static ssize_t
@@ -148,21 +175,26 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
loff_t offset, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
+ struct i915_hw_context *ctx;
u32 *temp = NULL; /* Just here to make handling failures easy */
+ int slice = (int)(uintptr_t)attr->private;
int ret;
ret = l3_access_valid(drm_dev, offset);
if (ret)
return ret;
+ if (dev_priv->hw_contexts_disabled)
+ return -ENXIO;
+
ret = i915_mutex_lock_interruptible(drm_dev);
if (ret)
return ret;
- if (!dev_priv->l3_parity.remap_info) {
+ if (!dev_priv->l3_parity.remap_info[slice]) {
temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
if (!temp) {
mutex_unlock(&drm_dev->struct_mutex);
@@ -182,13 +214,13 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
* at this point it is left as a TODO.
*/
if (temp)
- dev_priv->l3_parity.remap_info = temp;
+ dev_priv->l3_parity.remap_info[slice] = temp;
- memcpy(dev_priv->l3_parity.remap_info + (offset/4),
- buf + (offset/4),
- count);
+ memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
- i915_gem_l3_remap(drm_dev);
+ /* NB: We defer the remapping until we switch to the context */
+ list_for_each_entry(ctx, &dev_priv->context_list, link)
+ ctx->remap_slice |= (1<<slice);
mutex_unlock(&drm_dev->struct_mutex);
@@ -200,17 +232,29 @@ static struct bin_attribute dpf_attrs = {
.size = GEN7_L3LOG_SIZE,
.read = i915_l3_read,
.write = i915_l3_write,
- .mmap = NULL
+ .mmap = NULL,
+ .private = (void *)0
+};
+
+static struct bin_attribute dpf_attrs_1 = {
+ .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
+ .size = GEN7_L3LOG_SIZE,
+ .read = i915_l3_read,
+ .write = i915_l3_write,
+ .mmap = NULL,
+ .private = (void *)1
};
static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
- struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) {
u32 freq;
@@ -227,7 +271,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
- struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -238,11 +282,13 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev))
ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
@@ -257,7 +303,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
@@ -267,6 +313,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) {
@@ -310,11 +358,13 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev))
ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
@@ -329,7 +379,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, rp_state_cap, hw_max, hw_min;
@@ -339,6 +389,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev)) {
@@ -388,7 +440,7 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
/* For now we have a static number of RP states */
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, rp_state_cap;
@@ -436,7 +488,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
{
struct device *kdev = container_of(kobj, struct device, kobj);
- struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct i915_error_state_file_priv error_priv;
struct drm_i915_error_state_buf error_str;
@@ -471,7 +523,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
loff_t off, size_t count)
{
struct device *kdev = container_of(kobj, struct device, kobj);
- struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
int ret;
@@ -501,27 +553,34 @@ void i915_setup_sysfs(struct drm_device *dev)
#ifdef CONFIG_PM
if (INTEL_INFO(dev)->gen >= 6) {
- ret = sysfs_merge_group(&dev->primary->kdev.kobj,
+ ret = sysfs_merge_group(&dev->primary->kdev->kobj,
&rc6_attr_group);
if (ret)
DRM_ERROR("RC6 residency sysfs setup failed\n");
}
#endif
- if (HAS_L3_GPU_CACHE(dev)) {
- ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
+ if (HAS_L3_DPF(dev)) {
+ ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
if (ret)
DRM_ERROR("l3 parity sysfs setup failed\n");
+
+ if (NUM_L3_SLICES(dev) > 1) {
+ ret = device_create_bin_file(dev->primary->kdev,
+ &dpf_attrs_1);
+ if (ret)
+ DRM_ERROR("l3 parity slice 1 setup failed\n");
+ }
}
ret = 0;
if (IS_VALLEYVIEW(dev))
- ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs);
+ ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
else if (INTEL_INFO(dev)->gen >= 6)
- ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
+ ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
if (ret)
DRM_ERROR("RPS sysfs setup failed\n");
- ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
+ ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
&error_state_attr);
if (ret)
DRM_ERROR("error_state sysfs setup failed\n");
@@ -529,13 +588,14 @@ void i915_setup_sysfs(struct drm_device *dev)
void i915_teardown_sysfs(struct drm_device *dev)
{
- sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
+ sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
if (IS_VALLEYVIEW(dev))
- sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
+ sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
else
- sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
- device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
+ sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
+ device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1);
+ device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
#ifdef CONFIG_PM
- sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+ sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
#endif
}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index e2c5ee6f6194..6e580c98dede 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -233,6 +233,47 @@ TRACE_EVENT(i915_gem_evict_everything,
TP_printk("dev=%d", __entry->dev)
);
+TRACE_EVENT(i915_gem_evict_vm,
+ TP_PROTO(struct i915_address_space *vm),
+ TP_ARGS(vm),
+
+ TP_STRUCT__entry(
+ __field(struct i915_address_space *, vm)
+ ),
+
+ TP_fast_assign(
+ __entry->vm = vm;
+ ),
+
+ TP_printk("dev=%d, vm=%p", __entry->vm->dev->primary->index, __entry->vm)
+);
+
+TRACE_EVENT(i915_gem_ring_sync_to,
+ TP_PROTO(struct intel_ring_buffer *from,
+ struct intel_ring_buffer *to,
+ u32 seqno),
+ TP_ARGS(from, to, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, sync_from)
+ __field(u32, sync_to)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = from->dev->primary->index;
+ __entry->sync_from = from->id;
+ __entry->sync_to = to->id;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
+ __entry->dev,
+ __entry->sync_from, __entry->sync_to,
+ __entry->seqno)
+);
+
TRACE_EVENT(i915_gem_ring_dispatch,
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
TP_ARGS(ring, seqno, flags),
@@ -304,9 +345,24 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
TP_ARGS(ring, seqno)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
- TP_ARGS(ring, seqno)
+TRACE_EVENT(i915_gem_request_complete,
+ TP_PROTO(struct intel_ring_buffer *ring),
+ TP_ARGS(ring),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, ring)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
+ __entry->seqno = ring->get_seqno(ring, false);
+ ),
+
+ TP_printk("dev=%u, ring=%u, seqno=%u",
+ __entry->dev, __entry->ring, __entry->seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 57fe1ae32a0d..43959edd4291 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -193,16 +193,14 @@ out:
static bool intel_dsm_pci_probe(struct pci_dev *pdev)
{
- acpi_handle dhandle, intel_handle;
- acpi_status status;
+ acpi_handle dhandle;
int ret;
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
- status = acpi_get_handle(dhandle, "_DSM", &intel_handle);
- if (ACPI_FAILURE(status)) {
+ if (!acpi_has_method(dhandle, "_DSM")) {
DRM_DEBUG_KMS("no _DSM method for intel device\n");
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 53f2bed8bc5f..e29bcae1ef81 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -389,7 +389,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
{
struct sdvo_device_mapping *p_mapping;
struct bdb_general_definitions *p_defs;
- struct child_device_config *p_child;
+ union child_device_config *p_child;
int i, child_device_num, count;
u16 block_size;
@@ -416,36 +416,36 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
count = 0;
for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]);
- if (!p_child->device_type) {
+ if (!p_child->old.device_type) {
/* skip the device block if device type is invalid */
continue;
}
- if (p_child->slave_addr != SLAVE_ADDR1 &&
- p_child->slave_addr != SLAVE_ADDR2) {
+ if (p_child->old.slave_addr != SLAVE_ADDR1 &&
+ p_child->old.slave_addr != SLAVE_ADDR2) {
/*
* If the slave address is neither 0x70 nor 0x72,
* it is not a SDVO device. Skip it.
*/
continue;
}
- if (p_child->dvo_port != DEVICE_PORT_DVOB &&
- p_child->dvo_port != DEVICE_PORT_DVOC) {
+ if (p_child->old.dvo_port != DEVICE_PORT_DVOB &&
+ p_child->old.dvo_port != DEVICE_PORT_DVOC) {
/* skip the incorrect SDVO port */
DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
continue;
}
DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
" %s port\n",
- p_child->slave_addr,
- (p_child->dvo_port == DEVICE_PORT_DVOB) ?
+ p_child->old.slave_addr,
+ (p_child->old.dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
- p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
+ p_mapping = &(dev_priv->sdvo_mappings[p_child->old.dvo_port - 1]);
if (!p_mapping->initialized) {
- p_mapping->dvo_port = p_child->dvo_port;
- p_mapping->slave_addr = p_child->slave_addr;
- p_mapping->dvo_wiring = p_child->dvo_wiring;
- p_mapping->ddc_pin = p_child->ddc_pin;
- p_mapping->i2c_pin = p_child->i2c_pin;
+ p_mapping->dvo_port = p_child->old.dvo_port;
+ p_mapping->slave_addr = p_child->old.slave_addr;
+ p_mapping->dvo_wiring = p_child->old.dvo_wiring;
+ p_mapping->ddc_pin = p_child->old.ddc_pin;
+ p_mapping->i2c_pin = p_child->old.i2c_pin;
p_mapping->initialized = 1;
DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
p_mapping->dvo_port,
@@ -457,7 +457,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
}
- if (p_child->slave2_addr) {
+ if (p_child->old.slave2_addr) {
/* Maybe this is a SDVO device with multiple inputs */
/* And the mapping info is not added */
DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
@@ -477,15 +477,13 @@ static void
parse_driver_features(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
- struct drm_device *dev = dev_priv->dev;
struct bdb_driver_features *driver;
driver = find_section(bdb, BDB_DRIVER_FEATURES);
if (!driver)
return;
- if (SUPPORTS_EDP(dev) &&
- driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+ if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
dev_priv->vbt.edp_support = 1;
if (driver->dual_frequency)
@@ -501,7 +499,7 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
edp = find_section(bdb, BDB_EDP);
if (!edp) {
- if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->vbt.edp_support)
+ if (dev_priv->vbt.edp_support)
DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
return;
}
@@ -569,11 +567,149 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
}
static void
+parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+ struct bdb_mipi *mipi;
+
+ mipi = find_section(bdb, BDB_MIPI);
+ if (!mipi) {
+ DRM_DEBUG_KMS("No MIPI BDB found");
+ return;
+ }
+
+ /* XXX: add more info */
+ dev_priv->vbt.dsi.panel_id = mipi->panel_id;
+}
+
+static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
+ struct bdb_header *bdb)
+{
+ union child_device_config *it, *child = NULL;
+ struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
+ uint8_t hdmi_level_shift;
+ int i, j;
+ bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
+ uint8_t aux_channel;
+ /* Each DDI port can have more than one value on the "DVO Port" field,
+ * so look for all the possible values for each port and abort if more
+ * than one is found. */
+ int dvo_ports[][2] = {
+ {DVO_PORT_HDMIA, DVO_PORT_DPA},
+ {DVO_PORT_HDMIB, DVO_PORT_DPB},
+ {DVO_PORT_HDMIC, DVO_PORT_DPC},
+ {DVO_PORT_HDMID, DVO_PORT_DPD},
+ {DVO_PORT_CRT, -1 /* Port E can only be DVO_PORT_CRT */ },
+ };
+
+ /* Find the child device to use, abort if more than one found. */
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ it = dev_priv->vbt.child_dev + i;
+
+ for (j = 0; j < 2; j++) {
+ if (dvo_ports[port][j] == -1)
+ break;
+
+ if (it->common.dvo_port == dvo_ports[port][j]) {
+ if (child) {
+ DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
+ port_name(port));
+ return;
+ }
+ child = it;
+ }
+ }
+ }
+ if (!child)
+ return;
+
+ aux_channel = child->raw[25];
+
+ is_dvi = child->common.device_type & (1 << 4);
+ is_dp = child->common.device_type & (1 << 2);
+ is_crt = child->common.device_type & (1 << 0);
+ is_hdmi = is_dvi && (child->common.device_type & (1 << 11)) == 0;
+ is_edp = is_dp && (child->common.device_type & (1 << 12));
+
+ info->supports_dvi = is_dvi;
+ info->supports_hdmi = is_hdmi;
+ info->supports_dp = is_dp;
+
+ DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
+ port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
+
+ if (is_edp && is_dvi)
+ DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
+ port_name(port));
+ if (is_crt && port != PORT_E)
+ DRM_DEBUG_KMS("Port %c is analog\n", port_name(port));
+ if (is_crt && (is_dvi || is_dp))
+ DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
+ port_name(port));
+ if (is_dvi && (port == PORT_A || port == PORT_E))
+ DRM_DEBUG_KMS("Port %c is TMDS compabile\n", port_name(port));
+ if (!is_dvi && !is_dp && !is_crt)
+ DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
+ port_name(port));
+ if (is_edp && (port == PORT_B || port == PORT_C || port == PORT_E))
+ DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
+
+ if (is_dvi) {
+ if (child->common.ddc_pin == 0x05 && port != PORT_B)
+ DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
+ if (child->common.ddc_pin == 0x04 && port != PORT_C)
+ DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
+ if (child->common.ddc_pin == 0x06 && port != PORT_D)
+ DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
+ }
+
+ if (is_dp) {
+ if (aux_channel == 0x40 && port != PORT_A)
+ DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
+ if (aux_channel == 0x10 && port != PORT_B)
+ DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
+ if (aux_channel == 0x20 && port != PORT_C)
+ DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
+ if (aux_channel == 0x30 && port != PORT_D)
+ DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
+ }
+
+ if (bdb->version >= 158) {
+ /* The VBT HDMI level shift values match the table we have. */
+ hdmi_level_shift = child->raw[7] & 0xF;
+ if (hdmi_level_shift < 0xC) {
+ DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
+ port_name(port),
+ hdmi_level_shift);
+ info->hdmi_level_shift = hdmi_level_shift;
+ }
+ }
+}
+
+static void parse_ddi_ports(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct drm_device *dev = dev_priv->dev;
+ enum port port;
+
+ if (!HAS_DDI(dev))
+ return;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return;
+
+ if (bdb->version < 155)
+ return;
+
+ for (port = PORT_A; port < I915_MAX_PORTS; port++)
+ parse_ddi_port(dev_priv, port, bdb);
+}
+
+static void
parse_device_mapping(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
struct bdb_general_definitions *p_defs;
- struct child_device_config *p_child, *child_dev_ptr;
+ union child_device_config *p_child, *child_dev_ptr;
int i, child_device_num, count;
u16 block_size;
@@ -601,7 +737,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
/* get the number of child device that is present */
for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]);
- if (!p_child->device_type) {
+ if (!p_child->common.device_type) {
/* skip the device block if device type is invalid */
continue;
}
@@ -621,7 +757,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
count = 0;
for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]);
- if (!p_child->device_type) {
+ if (!p_child->common.device_type) {
/* skip the device block if device type is invalid */
continue;
}
@@ -637,6 +773,7 @@ static void
init_vbt_defaults(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
+ enum port port;
dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
@@ -655,6 +792,18 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
dev_priv->vbt.lvds_use_ssc = 1;
dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
+
+ for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+ struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+
+ /* Recommended BSpec default: 800mV 0dB. */
+ info->hdmi_level_shift = 6;
+
+ info->supports_dvi = (port != PORT_A && port != PORT_E);
+ info->supports_hdmi = info->supports_dvi;
+ info->supports_dp = (port != PORT_E);
+ }
}
static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
@@ -745,6 +894,8 @@ intel_parse_bios(struct drm_device *dev)
parse_device_mapping(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
+ parse_mipi(dev_priv, bdb);
+ parse_ddi_ports(dev_priv, bdb);
if (bios)
pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index e088d6f0956a..287cc5a21c2e 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -104,6 +104,7 @@ struct vbios_data {
#define BDB_LVDS_LFP_DATA 42
#define BDB_LVDS_BACKLIGHT 43
#define BDB_LVDS_POWER 44
+#define BDB_MIPI 50
#define BDB_SKIP 254 /* VBIOS private block, ignore */
struct bdb_general_features {
@@ -201,7 +202,10 @@ struct bdb_general_features {
#define DEVICE_PORT_DVOB 0x01
#define DEVICE_PORT_DVOC 0x02
-struct child_device_config {
+/* We used to keep this struct but without any version control. We should avoid
+ * using it in the future, but it should be safe to keep using it in the old
+ * code. */
+struct old_child_dev_config {
u16 handle;
u16 device_type;
u8 device_id[10]; /* ascii string */
@@ -223,6 +227,32 @@ struct child_device_config {
u8 dvo_function;
} __attribute__((packed));
+/* This one contains field offsets that are known to be common for all BDB
+ * versions. Notice that the meaning of the contents contents may still change,
+ * but at least the offsets are consistent. */
+struct common_child_dev_config {
+ u16 handle;
+ u16 device_type;
+ u8 not_common1[12];
+ u8 dvo_port;
+ u8 not_common2[2];
+ u8 ddc_pin;
+ u16 edid_ptr;
+} __attribute__((packed));
+
+/* This field changes depending on the BDB version, so the most reliable way to
+ * read it is by checking the BDB version and reading the raw pointer. */
+union child_device_config {
+ /* This one is safe to be used anywhere, but the code should still check
+ * the BDB version. */
+ u8 raw[33];
+ /* This one should only be kept for legacy code. */
+ struct old_child_dev_config old;
+ /* This one should also be safe to use anywhere, even without version
+ * checks. */
+ struct common_child_dev_config common;
+};
+
struct bdb_general_definitions {
/* DDC GPIO */
u8 crt_ddc_gmbus_pin;
@@ -248,7 +278,7 @@ struct bdb_general_definitions {
* number = (block_size - sizeof(bdb_general_definitions))/
* sizeof(child_device_config);
*/
- struct child_device_config devices[0];
+ union child_device_config devices[0];
} __attribute__((packed));
struct bdb_lvds_options {
@@ -618,4 +648,57 @@ int intel_parse_bios(struct drm_device *dev);
#define PORT_IDPC 8
#define PORT_IDPD 9
+/* Possible values for the "DVO Port" field for versions >= 155: */
+#define DVO_PORT_HDMIA 0
+#define DVO_PORT_HDMIB 1
+#define DVO_PORT_HDMIC 2
+#define DVO_PORT_HDMID 3
+#define DVO_PORT_LVDS 4
+#define DVO_PORT_TV 5
+#define DVO_PORT_CRT 6
+#define DVO_PORT_DPB 7
+#define DVO_PORT_DPC 8
+#define DVO_PORT_DPD 9
+#define DVO_PORT_DPA 10
+
+/* MIPI DSI panel info */
+struct bdb_mipi {
+ u16 panel_id;
+ u16 bridge_revision;
+
+ /* General params */
+ u32 dithering:1;
+ u32 bpp_pixel_format:1;
+ u32 rsvd1:1;
+ u32 dphy_valid:1;
+ u32 resvd2:28;
+
+ u16 port_info;
+ u16 rsvd3:2;
+ u16 num_lanes:2;
+ u16 rsvd4:12;
+
+ /* DSI config */
+ u16 virt_ch_num:2;
+ u16 vtm:2;
+ u16 rsvd5:12;
+
+ u32 dsi_clock;
+ u32 bridge_ref_clk;
+ u16 rsvd_pwr;
+
+ /* Dphy Params */
+ u32 prepare_cnt:5;
+ u32 rsvd6:3;
+ u32 clk_zero_cnt:8;
+ u32 trail_cnt:5;
+ u32 rsvd7:3;
+ u32 exit_zero_cnt:6;
+ u32 rsvd8:2;
+
+ u32 hl_switch_cnt;
+ u32 lp_byte_clk;
+ u32 clk_lane_switch_cnt;
+} __attribute__((packed));
+
#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index ea9022ef15d5..2e01bd3a5d8c 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -83,8 +83,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
return true;
}
-static void intel_crt_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
@@ -102,7 +101,35 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_NVSYNC;
- pipe_config->adjusted_mode.flags |= flags;
+ return flags;
+}
+
+static void intel_crt_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = encoder->base.dev;
+ int dotclock;
+
+ pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
+
+ dotclock = pipe_config->port_clock;
+
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+ pipe_config->adjusted_mode.crtc_clock = dotclock;
+}
+
+static void hsw_crt_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ intel_ddi_get_config(encoder, pipe_config);
+
+ pipe_config->adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
+ DRM_MODE_FLAG_NHSYNC |
+ DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_NVSYNC);
+ pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
}
/* Note: The caller is required to filter out dpms modes not supported by the
@@ -247,7 +274,7 @@ static void intel_crt_mode_set(struct intel_encoder *encoder)
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
u32 adpa;
- if (HAS_PCH_SPLIT(dev))
+ if (INTEL_INFO(dev)->gen >= 5)
adpa = ADPA_HOTPLUG_BITS;
else
adpa = 0;
@@ -349,9 +376,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
- /* FIXME: debug force function and remove */
- ret = true;
-
return ret;
}
@@ -653,7 +677,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
static void intel_crt_destroy(struct drm_connector *connector)
{
- drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -759,7 +782,7 @@ void intel_crt_init(struct drm_device *dev)
if (!crt)
return;
- intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(crt);
return;
@@ -799,7 +822,10 @@ void intel_crt_init(struct drm_device *dev)
crt->base.mode_set = intel_crt_mode_set;
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
- crt->base.get_config = intel_crt_get_config;
+ if (IS_HASWELL(dev))
+ crt->base.get_config = hsw_crt_get_config;
+ else
+ crt->base.get_config = intel_crt_get_config;
if (I915_HAS_HOTPLUG(dev))
crt->base.hpd_pin = HPD_CRT;
if (HAS_DDI(dev))
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 63de2701b974..31f4fe271388 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -42,7 +42,6 @@ static const u32 hsw_ddi_translations_dp[] = {
0x80C30FFF, 0x000B0000,
0x00FFFFFF, 0x00040006,
0x80D75FFF, 0x000B0000,
- 0x00FFFFFF, 0x00040006 /* HDMI parameters */
};
static const u32 hsw_ddi_translations_fdi[] = {
@@ -55,10 +54,25 @@ static const u32 hsw_ddi_translations_fdi[] = {
0x00C30FFF, 0x001E0000,
0x00FFFFFF, 0x00060006,
0x00D75FFF, 0x001E0000,
- 0x00FFFFFF, 0x00040006 /* HDMI parameters */
};
-static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
+static const u32 hsw_ddi_translations_hdmi[] = {
+ /* Idx NT mV diff T mV diff db */
+ 0x00FFFFFF, 0x0006000E, /* 0: 400 400 0 */
+ 0x00E79FFF, 0x000E000C, /* 1: 400 500 2 */
+ 0x00D75FFF, 0x0005000A, /* 2: 400 600 3.5 */
+ 0x00FFFFFF, 0x0005000A, /* 3: 600 600 0 */
+ 0x00E79FFF, 0x001D0007, /* 4: 600 750 2 */
+ 0x00D75FFF, 0x000C0004, /* 5: 600 900 3.5 */
+ 0x00FFFFFF, 0x00040006, /* 6: 800 800 0 */
+ 0x80E79FFF, 0x00030002, /* 7: 800 1000 2 */
+ 0x00FFFFFF, 0x00140005, /* 8: 850 850 0 */
+ 0x00FFFFFF, 0x000C0004, /* 9: 900 900 0 */
+ 0x00FFFFFF, 0x001C0003, /* 10: 950 950 0 */
+ 0x80FFFFFF, 0x00030002, /* 11: 1000 1000 0 */
+};
+
+enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
int type = intel_encoder->type;
@@ -92,12 +106,18 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
const u32 *ddi_translations = (port == PORT_E) ?
hsw_ddi_translations_fdi :
hsw_ddi_translations_dp;
+ int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
for (i = 0, reg = DDI_BUF_TRANS(port);
i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
I915_WRITE(reg, ddi_translations[i]);
reg += 4;
}
+ /* Entry 9 is for HDMI: */
+ for (i = 0; i < 2; i++) {
+ I915_WRITE(reg, hsw_ddi_translations_hdmi[hdmi_level * 2 + i]);
+ reg += 4;
+ }
}
/* Program DDI buffers translations for DP. By default, program ports A-D in DP
@@ -296,9 +316,6 @@ static void intel_ddi_mode_set(struct intel_encoder *encoder)
DRM_DEBUG_DRIVER("DP audio: write eld information\n");
intel_write_eld(&encoder->base, adjusted_mode);
}
-
- intel_dp_init_link_config(intel_dp);
-
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
@@ -767,9 +784,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
BUG();
}
- if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+ if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DDI_PVSYNC;
- if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+ if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DDI_PHSYNC;
if (cpu_transcoder == TRANSCODER_EDP) {
@@ -1202,7 +1219,7 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
- if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
I915_WRITE(DP_TP_CTL(port), val);
POSTING_READ(DP_TP_CTL(port));
@@ -1249,8 +1266,8 @@ static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
intel_dp_check_link_status(intel_dp);
}
-static void intel_ddi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+void intel_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
@@ -1268,6 +1285,37 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->adjusted_mode.flags |= flags;
+
+ switch (temp & TRANS_DDI_BPC_MASK) {
+ case TRANS_DDI_BPC_6:
+ pipe_config->pipe_bpp = 18;
+ break;
+ case TRANS_DDI_BPC_8:
+ pipe_config->pipe_bpp = 24;
+ break;
+ case TRANS_DDI_BPC_10:
+ pipe_config->pipe_bpp = 30;
+ break;
+ case TRANS_DDI_BPC_12:
+ pipe_config->pipe_bpp = 36;
+ break;
+ default:
+ break;
+ }
+
+ switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
+ case TRANS_DDI_MODE_SELECT_HDMI:
+ case TRANS_DDI_MODE_SELECT_DVI:
+ case TRANS_DDI_MODE_SELECT_FDI:
+ break;
+ case TRANS_DDI_MODE_SELECT_DP_SST:
+ case TRANS_DDI_MODE_SELECT_DP_MST:
+ pipe_config->has_dp_encoder = true;
+ intel_dp_get_m_n(intel_crtc, pipe_config);
+ break;
+ default:
+ break;
+ }
}
static void intel_ddi_destroy(struct drm_encoder *encoder)
@@ -1297,6 +1345,41 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
.destroy = intel_ddi_destroy,
};
+static struct intel_connector *
+intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
+{
+ struct intel_connector *connector;
+ enum port port = intel_dig_port->port;
+
+ connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return NULL;
+
+ intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
+ if (!intel_dp_init_connector(intel_dig_port, connector)) {
+ kfree(connector);
+ return NULL;
+ }
+
+ return connector;
+}
+
+static struct intel_connector *
+intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
+{
+ struct intel_connector *connector;
+ enum port port = intel_dig_port->port;
+
+ connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return NULL;
+
+ intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
+ intel_hdmi_init_connector(intel_dig_port, connector);
+
+ return connector;
+}
+
void intel_ddi_init(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1305,17 +1388,22 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
struct drm_encoder *encoder;
struct intel_connector *hdmi_connector = NULL;
struct intel_connector *dp_connector = NULL;
+ bool init_hdmi, init_dp;
+
+ init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
+ dev_priv->vbt.ddi_port_info[port].supports_hdmi);
+ init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
+ if (!init_dp && !init_hdmi) {
+ DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible\n",
+ port_name(port));
+ init_hdmi = true;
+ init_dp = true;
+ }
- intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+ intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
if (!intel_dig_port)
return;
- dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
- if (!dp_connector) {
- kfree(intel_dig_port);
- return;
- }
-
intel_encoder = &intel_dig_port->base;
encoder = &intel_encoder->base;
@@ -1335,28 +1423,22 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
(DDI_BUF_PORT_REVERSAL |
DDI_A_4_LANES);
- intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = false;
intel_encoder->hot_plug = intel_ddi_hot_plug;
- if (!intel_dp_init_connector(intel_dig_port, dp_connector)) {
- drm_encoder_cleanup(encoder);
- kfree(intel_dig_port);
- kfree(dp_connector);
- return;
- }
+ if (init_dp)
+ dp_connector = intel_ddi_init_dp_connector(intel_dig_port);
- if (intel_encoder->type != INTEL_OUTPUT_EDP) {
- hdmi_connector = kzalloc(sizeof(struct intel_connector),
- GFP_KERNEL);
- if (!hdmi_connector) {
- return;
- }
+ /* In theory we don't need the encoder->type check, but leave it just in
+ * case we have some really bad VBTs... */
+ if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi)
+ hdmi_connector = intel_ddi_init_hdmi_connector(intel_dig_port);
- intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
- intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
+ if (!dp_connector && !hdmi_connector) {
+ drm_encoder_cleanup(encoder);
+ kfree(intel_dig_port);
}
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 581fb4b2f766..f34252d134b6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -41,14 +41,13 @@
#include <drm/drm_crtc_helper.h>
#include <linux/dma_remapping.h>
-bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config);
-static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config);
+static void ironlake_pch_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config);
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *old_fb);
@@ -69,9 +68,6 @@ struct intel_limit {
intel_p2_t p2;
};
-/* FDI */
-#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
-
int
intel_pch_rawclk(struct drm_device *dev)
{
@@ -313,44 +309,44 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
.p2_slow = 7, .p2_fast = 7 },
};
-static const intel_limit_t intel_limits_vlv_dac = {
- .dot = { .min = 25000, .max = 270000 },
+static const intel_limit_t intel_limits_vlv = {
+ /*
+ * These are the data rate limits (measured in fast clocks)
+ * since those are the strictest limits we have. The fast
+ * clock and actual rate limits are more relaxed, so checking
+ * them would make no difference.
+ */
+ .dot = { .min = 25000 * 5, .max = 270000 * 5 },
.vco = { .min = 4000000, .max = 6000000 },
.n = { .min = 1, .max = 7 },
- .m = { .min = 22, .max = 450 }, /* guess */
.m1 = { .min = 2, .max = 3 },
.m2 = { .min = 11, .max = 156 },
- .p = { .min = 10, .max = 30 },
- .p1 = { .min = 1, .max = 3 },
- .p2 = { .dot_limit = 270000,
- .p2_slow = 2, .p2_fast = 20 },
-};
-
-static const intel_limit_t intel_limits_vlv_hdmi = {
- .dot = { .min = 25000, .max = 270000 },
- .vco = { .min = 4000000, .max = 6000000 },
- .n = { .min = 1, .max = 7 },
- .m = { .min = 60, .max = 300 }, /* guess */
- .m1 = { .min = 2, .max = 3 },
- .m2 = { .min = 11, .max = 156 },
- .p = { .min = 10, .max = 30 },
.p1 = { .min = 2, .max = 3 },
- .p2 = { .dot_limit = 270000,
- .p2_slow = 2, .p2_fast = 20 },
+ .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
};
-static const intel_limit_t intel_limits_vlv_dp = {
- .dot = { .min = 25000, .max = 270000 },
- .vco = { .min = 4000000, .max = 6000000 },
- .n = { .min = 1, .max = 7 },
- .m = { .min = 22, .max = 450 },
- .m1 = { .min = 2, .max = 3 },
- .m2 = { .min = 11, .max = 156 },
- .p = { .min = 10, .max = 30 },
- .p1 = { .min = 1, .max = 3 },
- .p2 = { .dot_limit = 270000,
- .p2_slow = 2, .p2_fast = 20 },
-};
+static void vlv_clock(int refclk, intel_clock_t *clock)
+{
+ clock->m = clock->m1 * clock->m2;
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+}
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_encoder *encoder;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->type == type)
+ return true;
+
+ return false;
+}
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
int refclk)
@@ -412,12 +408,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
else
limit = &intel_limits_pineview_sdvo;
} else if (IS_VALLEYVIEW(dev)) {
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
- limit = &intel_limits_vlv_dac;
- else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
- limit = &intel_limits_vlv_hdmi;
- else
- limit = &intel_limits_vlv_dp;
+ limit = &intel_limits_vlv;
} else if (!IS_GEN2(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i9xx_lvds;
@@ -439,8 +430,8 @@ static void pineview_clock(int refclk, intel_clock_t *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
- clock->vco = refclk * clock->m / clock->n;
- clock->dot = clock->vco / clock->p;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
}
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
@@ -452,23 +443,8 @@ static void i9xx_clock(int refclk, intel_clock_t *clock)
{
clock->m = i9xx_dpll_compute_m(clock);
clock->p = clock->p1 * clock->p2;
- clock->vco = refclk * clock->m / (clock->n + 2);
- clock->dot = clock->vco / clock->p;
-}
-
-/**
- * Returns whether any output on the specified pipe is of the specified type
- */
-bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
-{
- struct drm_device *dev = crtc->dev;
- struct intel_encoder *encoder;
-
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->type == type)
- return true;
-
- return false;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
}
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
@@ -481,20 +457,26 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
const intel_limit_t *limit,
const intel_clock_t *clock)
{
+ if (clock->n < limit->n.min || limit->n.max < clock->n)
+ INTELPllInvalid("n out of range\n");
if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
INTELPllInvalid("p1 out of range\n");
- if (clock->p < limit->p.min || limit->p.max < clock->p)
- INTELPllInvalid("p out of range\n");
if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
INTELPllInvalid("m2 out of range\n");
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
INTELPllInvalid("m1 out of range\n");
- if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
- INTELPllInvalid("m1 <= m2\n");
- if (clock->m < limit->m.min || limit->m.max < clock->m)
- INTELPllInvalid("m out of range\n");
- if (clock->n < limit->n.min || limit->n.max < clock->n)
- INTELPllInvalid("n out of range\n");
+
+ if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
+ if (clock->m1 <= clock->m2)
+ INTELPllInvalid("m1 <= m2\n");
+
+ if (!IS_VALLEYVIEW(dev)) {
+ if (clock->p < limit->p.min || limit->p.max < clock->p)
+ INTELPllInvalid("p out of range\n");
+ if (clock->m < limit->m.min || limit->m.max < clock->m)
+ INTELPllInvalid("m out of range\n");
+ }
+
if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
INTELPllInvalid("vco out of range\n");
/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
@@ -688,67 +670,73 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
- u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
- u32 m, n, fastclk;
- u32 updrate, minupdate, p;
- unsigned long bestppm, ppm, absppm;
- int dotclk, flag;
-
- flag = 0;
- dotclk = target * 1000;
- bestppm = 1000000;
- ppm = absppm = 0;
- fastclk = dotclk / (2*100);
- updrate = 0;
- minupdate = 19200;
- n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
- bestm1 = bestm2 = bestp1 = bestp2 = 0;
+ struct drm_device *dev = crtc->dev;
+ intel_clock_t clock;
+ unsigned int bestppm = 1000000;
+ /* min update 19.2 MHz */
+ int max_n = min(limit->n.max, refclk / 19200);
+ bool found = false;
+
+ target *= 5; /* fast clock */
+
+ memset(best_clock, 0, sizeof(*best_clock));
/* based on hardware requirement, prefer smaller n to precision */
- for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
- updrate = refclk / n;
- for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
- for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
- if (p2 > 10)
- p2 = p2 - 1;
- p = p1 * p2;
+ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+ for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+ for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
+ clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+ clock.p = clock.p1 * clock.p2;
/* based on hardware requirement, prefer bigger m1,m2 values */
- for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
- m2 = (((2*(fastclk * p * n / m1 )) +
- refclk) / (2*refclk));
- m = m1 * m2;
- vco = updrate * m;
- if (vco >= limit->vco.min && vco < limit->vco.max) {
- ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
- absppm = (ppm > 0) ? ppm : (-ppm);
- if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
- bestppm = 0;
- flag = 1;
- }
- if (absppm < bestppm - 10) {
- bestppm = absppm;
- flag = 1;
- }
- if (flag) {
- bestn = n;
- bestm1 = m1;
- bestm2 = m2;
- bestp1 = p1;
- bestp2 = p2;
- flag = 0;
- }
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+ unsigned int ppm, diff;
+
+ clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
+ refclk * clock.m1);
+
+ vlv_clock(refclk, &clock);
+
+ if (!intel_PLL_is_valid(dev, limit,
+ &clock))
+ continue;
+
+ diff = abs(clock.dot - target);
+ ppm = div_u64(1000000ULL * diff, target);
+
+ if (ppm < 100 && clock.p > best_clock->p) {
+ bestppm = 0;
+ *best_clock = clock;
+ found = true;
+ }
+
+ if (bestppm >= 10 && ppm < bestppm - 10) {
+ bestppm = ppm;
+ *best_clock = clock;
+ found = true;
}
}
}
}
}
- best_clock->n = bestn;
- best_clock->m1 = bestm1;
- best_clock->m2 = bestm2;
- best_clock->p1 = bestp1;
- best_clock->p2 = bestp2;
- return true;
+ return found;
+}
+
+bool intel_crtc_active(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /* Be paranoid as we can arrive here with only partial
+ * state retrieved from the hardware during setup.
+ *
+ * We can ditch the adjusted_mode.crtc_clock check as soon
+ * as Haswell has gained clock readout/fastboot support.
+ *
+ * We can ditch the crtc->fb check as soon as we can
+ * properly reconstruct framebuffers.
+ */
+ return intel_crtc->active && crtc->fb &&
+ intel_crtc->config.adjusted_mode.crtc_clock;
}
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
@@ -812,6 +800,25 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
DRM_DEBUG_KMS("vblank wait timed out\n");
}
+static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg = PIPEDSL(pipe);
+ u32 line1, line2;
+ u32 line_mask;
+
+ if (IS_GEN2(dev))
+ line_mask = DSL_LINEMASK_GEN2;
+ else
+ line_mask = DSL_LINEMASK_GEN3;
+
+ line1 = I915_READ(reg) & line_mask;
+ mdelay(5);
+ line2 = I915_READ(reg) & line_mask;
+
+ return line1 == line2;
+}
+
/*
* intel_wait_for_pipe_off - wait for pipe to turn off
* @dev: drm device
@@ -843,22 +850,8 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
100))
WARN(1, "pipe_off wait timed out\n");
} else {
- u32 last_line, line_mask;
- int reg = PIPEDSL(pipe);
- unsigned long timeout = jiffies + msecs_to_jiffies(100);
-
- if (IS_GEN2(dev))
- line_mask = DSL_LINEMASK_GEN2;
- else
- line_mask = DSL_LINEMASK_GEN3;
-
/* Wait for the display line to settle */
- do {
- last_line = I915_READ(reg) & line_mask;
- mdelay(5);
- } while (((I915_READ(reg) & line_mask) != last_line) &&
- time_after(timeout, jiffies));
- if (time_after(jiffies, timeout))
+ if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
WARN(1, "pipe_off wait timed out\n");
}
}
@@ -929,6 +922,24 @@ void assert_pll(struct drm_i915_private *dev_priv,
state_string(state), state_string(cur_state));
}
+/* XXX: the dsi pll is shared between MIPI DSI ports */
+static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
+{
+ u32 val;
+ bool cur_state;
+
+ mutex_lock(&dev_priv->dpio_lock);
+ val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ cur_state = val & DSI_PLL_VCO_EN;
+ WARN(cur_state != state,
+ "DSI PLL state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
+#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
+
struct intel_shared_dpll *
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
{
@@ -1069,6 +1080,26 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
pipe_name(pipe));
}
+static void assert_cursor(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ struct drm_device *dev = dev_priv->dev;
+ bool cur_state;
+
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
+ else if (IS_845G(dev) || IS_I865G(dev))
+ cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
+ else
+ cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+
+ WARN(cur_state != state,
+ "cursor on pipe %c assertion failure (expected %s, current %s)\n",
+ pipe_name(pipe), state_string(state), state_string(cur_state));
+}
+#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
+#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
+
void assert_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
@@ -1323,6 +1354,26 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
}
+static void intel_init_dpio(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_VALLEYVIEW(dev))
+ return;
+
+ /*
+ * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+ * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
+ * a. GUnit 0x2110 bit[0] set to 1 (def 0)
+ * b. The other bits such as sfr settings / modesel may all be set
+ * to 0.
+ *
+ * This should only be done on init and resume from S3 with both
+ * PLLs disabled, or we risk losing DPIO and PLL synchronization.
+ */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
static void vlv_enable_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -1429,6 +1480,20 @@ static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
POSTING_READ(DPLL(pipe));
}
+static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ u32 val = 0;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_pipe_disabled(dev_priv, pipe);
+
+ /* Leave integrated clock source enabled */
+ if (pipe == PIPE_B)
+ val = DPLL_INTEGRATED_CRI_CLK_VLV;
+ I915_WRITE(DPLL(pipe), val);
+ POSTING_READ(DPLL(pipe));
+}
+
void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
{
u32 port_mask;
@@ -1661,7 +1726,7 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
* returning.
*/
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
- bool pch_port)
+ bool pch_port, bool dsi)
{
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
@@ -1670,6 +1735,7 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 val;
assert_planes_disabled(dev_priv, pipe);
+ assert_cursor_disabled(dev_priv, pipe);
assert_sprites_disabled(dev_priv, pipe);
if (HAS_PCH_LPT(dev_priv->dev))
@@ -1683,7 +1749,10 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
* need the check.
*/
if (!HAS_PCH_SPLIT(dev_priv->dev))
- assert_pll_enabled(dev_priv, pipe);
+ if (dsi)
+ assert_dsi_pll_enabled(dev_priv);
+ else
+ assert_pll_enabled(dev_priv, pipe);
else {
if (pch_port) {
/* if driving the PCH, we need FDI enabled */
@@ -1728,6 +1797,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
* or we might hang the display.
*/
assert_planes_disabled(dev_priv, pipe);
+ assert_cursor_disabled(dev_priv, pipe);
assert_sprites_disabled(dev_priv, pipe);
/* Don't disable pipe A or pipe A PLLs if needed */
@@ -1747,63 +1817,75 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
* Plane regs are double buffered, going from enabled->disabled needs a
* trigger in order to latch. The display address reg provides this.
*/
-void intel_flush_display_plane(struct drm_i915_private *dev_priv,
- enum plane plane)
+void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
+ enum plane plane)
{
- if (dev_priv->info->gen >= 4)
- I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
- else
- I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
+ u32 reg = dev_priv->info->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
+
+ I915_WRITE(reg, I915_READ(reg));
+ POSTING_READ(reg);
}
/**
- * intel_enable_plane - enable a display plane on a given pipe
+ * intel_enable_primary_plane - enable the primary plane on a given pipe
* @dev_priv: i915 private structure
* @plane: plane to enable
* @pipe: pipe being fed
*
* Enable @plane on @pipe, making sure that @pipe is running first.
*/
-static void intel_enable_plane(struct drm_i915_private *dev_priv,
- enum plane plane, enum pipe pipe)
+static void intel_enable_primary_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, enum pipe pipe)
{
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
int reg;
u32 val;
/* If the pipe isn't enabled, we can't pump pixels and may hang */
assert_pipe_enabled(dev_priv, pipe);
+ WARN(intel_crtc->primary_enabled, "Primary plane already enabled\n");
+
+ intel_crtc->primary_enabled = true;
+
reg = DSPCNTR(plane);
val = I915_READ(reg);
if (val & DISPLAY_PLANE_ENABLE)
return;
I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
- intel_flush_display_plane(dev_priv, plane);
+ intel_flush_primary_plane(dev_priv, plane);
intel_wait_for_vblank(dev_priv->dev, pipe);
}
/**
- * intel_disable_plane - disable a display plane
+ * intel_disable_primary_plane - disable the primary plane
* @dev_priv: i915 private structure
* @plane: plane to disable
* @pipe: pipe consuming the data
*
* Disable @plane; should be an independent operation.
*/
-static void intel_disable_plane(struct drm_i915_private *dev_priv,
- enum plane plane, enum pipe pipe)
+static void intel_disable_primary_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, enum pipe pipe)
{
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
int reg;
u32 val;
+ WARN(!intel_crtc->primary_enabled, "Primary plane already disabled\n");
+
+ intel_crtc->primary_enabled = false;
+
reg = DSPCNTR(plane);
val = I915_READ(reg);
if ((val & DISPLAY_PLANE_ENABLE) == 0)
return;
I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
- intel_flush_display_plane(dev_priv, plane);
+ intel_flush_primary_plane(dev_priv, plane);
intel_wait_for_vblank(dev_priv->dev, pipe);
}
@@ -1839,10 +1921,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
alignment = 0;
break;
case I915_TILING_Y:
- /* Despite that we check this in framebuffer_init userspace can
- * screw us over and change the tiling after the fact. Only
- * pinned buffers can't change their tiling. */
- DRM_DEBUG_DRIVER("Y tiled not allowed for scan out buffers\n");
+ WARN(1, "Y tiled bo slipped through, driver bug!\n");
return -EINVAL;
default:
BUG();
@@ -2244,11 +2323,26 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- /* Update pipe size and adjust fitter if needed */
+ /*
+ * Update pipe size and adjust fitter if needed: the reason for this is
+ * that in compute_mode_changes we check the native mode (not the pfit
+ * mode) to see if we can flip rather than do a full mode set. In the
+ * fastboot case, we'll flip, but if we don't update the pipesrc and
+ * pfit state, we'll end up with a big fb scanned out into the wrong
+ * sized surface.
+ *
+ * To fix this properly, we need to hoist the checks up into
+ * compute_mode_changes (or above), check the actual pfit state and
+ * whether the platform allows pfit disable with pipe active, and only
+ * then update the pipesrc and pfit state, even on the flip path.
+ */
if (i915_fastboot) {
+ const struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
+
I915_WRITE(PIPESRC(intel_crtc->pipe),
- ((crtc->mode.hdisplay - 1) << 16) |
- (crtc->mode.vdisplay - 1));
+ ((adjusted_mode->crtc_hdisplay - 1) << 16) |
+ (adjusted_mode->crtc_vdisplay - 1));
if (!intel_crtc->config.pch_pfit.enabled &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
@@ -2327,9 +2421,10 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
FDI_FE_ERRC_ENABLE);
}
-static bool pipe_has_enabled_pch(struct intel_crtc *intel_crtc)
+static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
{
- return intel_crtc->base.enabled && intel_crtc->config.has_pch_encoder;
+ return crtc->base.enabled && crtc->active &&
+ crtc->config.has_pch_encoder;
}
static void ivb_modeset_global_resources(struct drm_device *dev)
@@ -2872,6 +2967,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
@@ -2889,14 +2985,14 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
SBI_ICLK);
/* 20MHz is a corner case which is out of range for the 7-bit divisor */
- if (crtc->mode.clock == 20000) {
+ if (clock == 20000) {
auxdiv = 1;
divsel = 0x41;
phaseinc = 0x20;
} else {
/* The iCLK virtual clock root frequency is in MHz,
- * but the crtc->mode.clock in in KHz. To get the divisors,
- * it is necessary to divide one by another, so we
+ * but the adjusted_mode->crtc_clock in in KHz. To get the
+ * divisors, it is necessary to divide one by another, so we
* convert the virtual clock precision to KHz here for higher
* precision.
*/
@@ -2904,7 +3000,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
u32 iclk_pi_range = 64;
u32 desired_divisor, msb_divisor_value, pi_value;
- desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
+ desired_divisor = (iclk_virtual_root_freq / clock);
msb_divisor_value = desired_divisor / iclk_pi_range;
pi_value = desired_divisor % iclk_pi_range;
@@ -2920,7 +3016,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
~SBI_SSCDIVINTPHASE_INCVAL_MASK);
DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
- crtc->mode.clock,
+ clock,
auxdiv,
divsel,
phasedir,
@@ -2979,6 +3075,48 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
I915_READ(VSYNCSHIFT(cpu_transcoder)));
}
+static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t temp;
+
+ temp = I915_READ(SOUTH_CHICKEN1);
+ if (temp & FDI_BC_BIFURCATION_SELECT)
+ return;
+
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+ temp |= FDI_BC_BIFURCATION_SELECT;
+ DRM_DEBUG_KMS("enabling fdi C rx\n");
+ I915_WRITE(SOUTH_CHICKEN1, temp);
+ POSTING_READ(SOUTH_CHICKEN1);
+}
+
+static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ switch (intel_crtc->pipe) {
+ case PIPE_A:
+ break;
+ case PIPE_B:
+ if (intel_crtc->config.fdi_lanes > 2)
+ WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
+ else
+ cpt_enable_fdi_bc_bifurcation(dev);
+
+ break;
+ case PIPE_C:
+ cpt_enable_fdi_bc_bifurcation(dev);
+
+ break;
+ default:
+ BUG();
+ }
+}
+
/*
* Enable PCH resources required for PCH ports:
* - PCH PLLs
@@ -2997,6 +3135,9 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
assert_pch_transcoder_disabled(dev_priv, pipe);
+ if (IS_IVYBRIDGE(dev))
+ ivybridge_update_fdi_bc_bifurcation(intel_crtc);
+
/* Write the TU size bits before fdi link training, so that error
* detection works. */
I915_WRITE(FDI_RX_TUSIZE1(pipe),
@@ -3240,6 +3381,92 @@ static void intel_disable_planes(struct drm_crtc *crtc)
intel_plane_disable(&intel_plane->base);
}
+void hsw_enable_ips(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+ if (!crtc->config.ips_enabled)
+ return;
+
+ /* We can only enable IPS after we enable a plane and wait for a vblank.
+ * We guarantee that the plane is enabled by calling intel_enable_ips
+ * only after intel_enable_plane. And intel_enable_plane already waits
+ * for a vblank, so all we need to do here is to enable the IPS bit. */
+ assert_plane_enabled(dev_priv, crtc->plane);
+ I915_WRITE(IPS_CTL, IPS_ENABLE);
+
+ /* The bit only becomes 1 in the next vblank, so this wait here is
+ * essentially intel_wait_for_vblank. If we don't have this and don't
+ * wait for vblanks until the end of crtc_enable, then the HW state
+ * readout code will complain that the expected IPS_CTL value is not the
+ * one we read. */
+ if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
+ DRM_ERROR("Timed out waiting for IPS enable\n");
+}
+
+void hsw_disable_ips(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!crtc->config.ips_enabled)
+ return;
+
+ assert_plane_enabled(dev_priv, crtc->plane);
+ I915_WRITE(IPS_CTL, 0);
+ POSTING_READ(IPS_CTL);
+
+ /* We need to wait for a vblank before we can disable the plane. */
+ intel_wait_for_vblank(dev, crtc->pipe);
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+static void intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ int palreg = PALETTE(pipe);
+ int i;
+ bool reenable_ips = false;
+
+ /* The clocks have to be on to load the palette. */
+ if (!crtc->enabled || !intel_crtc->active)
+ return;
+
+ if (!HAS_PCH_SPLIT(dev_priv->dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
+ assert_dsi_pll_enabled(dev_priv);
+ else
+ assert_pll_enabled(dev_priv, pipe);
+ }
+
+ /* use legacy palette for Ironlake */
+ if (HAS_PCH_SPLIT(dev))
+ palreg = LGC_PALETTE(pipe);
+
+ /* Workaround : Do not read or write the pipe palette/gamma data while
+ * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+ */
+ if (intel_crtc->config.ips_enabled &&
+ ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
+ GAMMA_MODE_MODE_SPLIT)) {
+ hsw_disable_ips(intel_crtc);
+ reenable_ips = true;
+ }
+
+ for (i = 0; i < 256; i++) {
+ I915_WRITE(palreg + 4 * i,
+ (intel_crtc->lut_r[i] << 16) |
+ (intel_crtc->lut_g[i] << 8) |
+ intel_crtc->lut_b[i]);
+ }
+
+ if (reenable_ips)
+ hsw_enable_ips(intel_crtc);
+}
+
static void ironlake_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3259,8 +3486,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
- intel_update_watermarks(dev);
-
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
@@ -3283,9 +3508,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
*/
intel_crtc_load_lut(crtc);
+ intel_update_watermarks(crtc);
intel_enable_pipe(dev_priv, pipe,
- intel_crtc->config.has_pch_encoder);
- intel_enable_plane(dev_priv, plane, pipe);
+ intel_crtc->config.has_pch_encoder, false);
+ intel_enable_primary_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
@@ -3319,34 +3545,74 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
}
-static void hsw_enable_ips(struct intel_crtc *crtc)
+static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
- if (!crtc->config.ips_enabled)
- return;
+ intel_enable_primary_plane(dev_priv, plane, pipe);
+ intel_enable_planes(crtc);
+ intel_crtc_update_cursor(crtc, true);
- /* We can only enable IPS after we enable a plane and wait for a vblank.
- * We guarantee that the plane is enabled by calling intel_enable_ips
- * only after intel_enable_plane. And intel_enable_plane already waits
- * for a vblank, so all we need to do here is to enable the IPS bit. */
- assert_plane_enabled(dev_priv, crtc->plane);
- I915_WRITE(IPS_CTL, IPS_ENABLE);
+ hsw_enable_ips(intel_crtc);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
}
-static void hsw_disable_ips(struct intel_crtc *crtc)
+static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
- if (!crtc->config.ips_enabled)
- return;
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_vblank_off(dev, pipe);
- assert_plane_enabled(dev_priv, crtc->plane);
- I915_WRITE(IPS_CTL, 0);
+ /* FBC must be disabled before disabling the plane on HSW. */
+ if (dev_priv->fbc.plane == plane)
+ intel_disable_fbc(dev);
- /* We need to wait for a vblank before we can disable the plane. */
- intel_wait_for_vblank(dev, crtc->pipe);
+ hsw_disable_ips(intel_crtc);
+
+ intel_crtc_update_cursor(crtc, false);
+ intel_disable_planes(crtc);
+ intel_disable_primary_plane(dev_priv, plane, pipe);
+}
+
+/*
+ * This implements the workaround described in the "notes" section of the mode
+ * set sequence documentation. When going from no pipes or single pipe to
+ * multiple pipes, and planes are enabled after the pipe, we need to wait at
+ * least 2 vblanks on the first pipe before enabling planes on the second pipe.
+ */
+static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_crtc *crtc_it, *other_active_crtc = NULL;
+
+ /* We want to get the other_active_crtc only if there's only 1 other
+ * active crtc. */
+ list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) {
+ if (!crtc_it->active || crtc_it == crtc)
+ continue;
+
+ if (other_active_crtc)
+ return;
+
+ other_active_crtc = crtc_it;
+ }
+ if (!other_active_crtc)
+ return;
+
+ intel_wait_for_vblank(dev, other_active_crtc->pipe);
+ intel_wait_for_vblank(dev, other_active_crtc->pipe);
}
static void haswell_crtc_enable(struct drm_crtc *crtc)
@@ -3356,7 +3622,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
WARN_ON(!crtc->enabled);
@@ -3369,8 +3634,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
- intel_update_watermarks(dev);
-
if (intel_crtc->config.has_pch_encoder)
dev_priv->display.fdi_link_train(crtc);
@@ -3391,23 +3654,22 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_ddi_set_pipe_settings(crtc);
intel_ddi_enable_transcoder_func(crtc);
+ intel_update_watermarks(crtc);
intel_enable_pipe(dev_priv, pipe,
- intel_crtc->config.has_pch_encoder);
- intel_enable_plane(dev_priv, plane, pipe);
- intel_enable_planes(crtc);
- intel_crtc_update_cursor(crtc, true);
-
- hsw_enable_ips(intel_crtc);
+ intel_crtc->config.has_pch_encoder, false);
if (intel_crtc->config.has_pch_encoder)
lpt_pch_enable(crtc);
- mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
- mutex_unlock(&dev->struct_mutex);
-
- for_each_encoder_on_crtc(dev, crtc, encoder)
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
encoder->enable(encoder);
+ intel_opregion_notify_encoder(encoder, true);
+ }
+
+ /* If we change the relative order between pipe/planes enabling, we need
+ * to change the workaround. */
+ haswell_mode_set_planes_workaround(intel_crtc);
+ haswell_crtc_enable_planes(crtc);
/*
* There seems to be a race in PCH platform hw (at least on some
@@ -3460,7 +3722,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
- intel_disable_plane(dev_priv, plane, pipe);
+ intel_disable_primary_plane(dev_priv, plane, pipe);
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
@@ -3501,7 +3763,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
}
intel_crtc->active = false;
- intel_update_watermarks(dev);
+ intel_update_watermarks(crtc);
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
@@ -3515,27 +3777,17 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
if (!intel_crtc->active)
return;
- for_each_encoder_on_crtc(dev, crtc, encoder)
- encoder->disable(encoder);
+ haswell_crtc_disable_planes(crtc);
- intel_crtc_wait_for_pending_flips(crtc);
- drm_vblank_off(dev, pipe);
-
- /* FBC must be disabled before disabling the plane on HSW. */
- if (dev_priv->fbc.plane == plane)
- intel_disable_fbc(dev);
-
- hsw_disable_ips(intel_crtc);
-
- intel_crtc_update_cursor(crtc, false);
- intel_disable_planes(crtc);
- intel_disable_plane(dev_priv, plane, pipe);
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ intel_opregion_notify_encoder(encoder, false);
+ encoder->disable(encoder);
+ }
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
@@ -3558,7 +3810,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
}
intel_crtc->active = false;
- intel_update_watermarks(dev);
+ intel_update_watermarks(crtc);
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
@@ -3650,6 +3902,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
+ bool is_dsi;
WARN_ON(!crtc->enabled);
@@ -3657,13 +3910,15 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
return;
intel_crtc->active = true;
- intel_update_watermarks(dev);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
- vlv_enable_pll(intel_crtc);
+ is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
+
+ if (!is_dsi)
+ vlv_enable_pll(intel_crtc);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
@@ -3673,8 +3928,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_crtc_load_lut(crtc);
- intel_enable_pipe(dev_priv, pipe, false);
- intel_enable_plane(dev_priv, plane, pipe);
+ intel_update_watermarks(crtc);
+ intel_enable_pipe(dev_priv, pipe, false, is_dsi);
+ intel_enable_primary_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
@@ -3699,7 +3955,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
return;
intel_crtc->active = true;
- intel_update_watermarks(dev);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
@@ -3711,8 +3966,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_crtc_load_lut(crtc);
- intel_enable_pipe(dev_priv, pipe, false);
- intel_enable_plane(dev_priv, plane, pipe);
+ intel_update_watermarks(crtc);
+ intel_enable_pipe(dev_priv, pipe, false, false);
+ intel_enable_primary_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
/* The fixup needs to happen before cursor is enabled */
if (IS_G4X(dev))
@@ -3768,7 +4024,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_crtc_dpms_overlay(intel_crtc, false);
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
- intel_disable_plane(dev_priv, plane, pipe);
+ intel_disable_primary_plane(dev_priv, plane, pipe);
intel_disable_pipe(dev_priv, pipe);
@@ -3778,11 +4034,15 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
if (encoder->post_disable)
encoder->post_disable(encoder);
- i9xx_disable_pll(dev_priv, pipe);
+ if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
+ vlv_disable_pll(dev_priv, pipe);
+ else if (!IS_VALLEYVIEW(dev))
+ i9xx_disable_pll(dev_priv, pipe);
intel_crtc->active = false;
+ intel_update_watermarks(crtc);
+
intel_update_fbc(dev);
- intel_update_watermarks(dev);
}
static void i9xx_crtc_off(struct drm_crtc *crtc)
@@ -3856,6 +4116,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
dev_priv->display.off(crtc);
assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
+ assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
if (crtc->fb) {
@@ -4045,8 +4306,7 @@ retry:
*/
link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
- fdi_dotclock = adjusted_mode->clock;
- fdi_dotclock /= pipe_config->pixel_multiplier;
+ fdi_dotclock = adjusted_mode->crtc_clock;
lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
pipe_config->pipe_bpp);
@@ -4088,13 +4348,39 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
- if (HAS_PCH_SPLIT(dev)) {
- /* FDI link clock is fixed at 2.7G */
- if (pipe_config->requested_mode.clock * 3
- > IRONLAKE_FDI_FREQ * 4)
+ /* FIXME should check pixel clock limits on all platforms */
+ if (INTEL_INFO(dev)->gen < 4) {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int clock_limit =
+ dev_priv->display.get_display_clock_speed(dev);
+
+ /*
+ * Enable pixel doubling when the dot clock
+ * is > 90% of the (display) core speed.
+ *
+ * GDG double wide on either pipe,
+ * otherwise pipe A only.
+ */
+ if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
+ adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
+ clock_limit *= 2;
+ pipe_config->double_wide = true;
+ }
+
+ if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
return -EINVAL;
}
+ /*
+ * Pipe horizontal size must be even in:
+ * - DVO ganged mode
+ * - LVDS dual channel mode
+ * - Double wide pipe
+ */
+ if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+ intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
+ pipe_config->pipe_src_w &= ~1;
+
/* Cantiga+ cannot handle modes with a hsync front porch of 0.
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
*/
@@ -4258,28 +4544,6 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
-static int vlv_get_refclk(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int refclk = 27000; /* for DP & HDMI */
-
- return 100000; /* only one validated so far */
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
- refclk = 96000;
- } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv))
- refclk = 100000;
- else
- refclk = 96000;
- } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
- refclk = 100000;
- }
-
- return refclk;
-}
-
static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
{
struct drm_device *dev = crtc->dev;
@@ -4287,7 +4551,7 @@ static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
int refclk;
if (IS_VALLEYVIEW(dev)) {
- refclk = vlv_get_refclk(crtc);
+ refclk = 100000;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->vbt.lvds_ssc_freq * 1000;
@@ -4345,7 +4609,8 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
}
}
-static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv)
+static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
+ pipe)
{
u32 reg_val;
@@ -4353,24 +4618,24 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv)
* PLLB opamp always calibrates to max value of 0x3f, force enable it
* and set it to a reasonable value instead.
*/
- reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
+ reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
reg_val &= 0xffffff00;
reg_val |= 0x00000030;
- vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
+ vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
- reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
+ reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
reg_val &= 0x8cffffff;
reg_val = 0x8c000000;
- vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
+ vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
- reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
+ reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
reg_val &= 0xffffff00;
- vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
+ vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
- reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
+ reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
reg_val &= 0x00ffffff;
reg_val |= 0xb0000000;
- vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
+ vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
}
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
@@ -4436,18 +4701,18 @@ static void vlv_update_pll(struct intel_crtc *crtc)
/* PLL B needs special handling */
if (pipe)
- vlv_pllb_recal_opamp(dev_priv);
+ vlv_pllb_recal_opamp(dev_priv, pipe);
/* Set up Tx target for periodic Rcomp update */
- vlv_dpio_write(dev_priv, DPIO_IREF_BCAST, 0x0100000f);
+ vlv_dpio_write(dev_priv, pipe, DPIO_IREF_BCAST, 0x0100000f);
/* Disable target IRef on PLL */
- reg_val = vlv_dpio_read(dev_priv, DPIO_IREF_CTL(pipe));
+ reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF_CTL(pipe));
reg_val &= 0x00ffffff;
- vlv_dpio_write(dev_priv, DPIO_IREF_CTL(pipe), reg_val);
+ vlv_dpio_write(dev_priv, pipe, DPIO_IREF_CTL(pipe), reg_val);
/* Disable fast lock */
- vlv_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x610);
+ vlv_dpio_write(dev_priv, pipe, DPIO_FASTCLK_DISABLE, 0x610);
/* Set idtafcrecal before PLL is enabled */
mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
@@ -4461,55 +4726,55 @@ static void vlv_update_pll(struct intel_crtc *crtc)
* Note: don't use the DAC post divider as it seems unstable.
*/
mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
- vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
+ vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
mdiv |= DPIO_ENABLE_CALIBRATION;
- vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
+ vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
/* Set HBR and RBR LPF coefficients */
if (crtc->config.port_clock == 162000 ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
- vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
+ vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
0x009f0003);
else
- vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
+ vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
0x00d0000f);
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
/* Use SSC source */
if (!pipe)
- vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+ vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
0x0df40000);
else
- vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+ vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
0x0df70000);
} else { /* HDMI or VGA */
/* Use bend source */
if (!pipe)
- vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+ vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
0x0df70000);
else
- vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+ vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
0x0df40000);
}
- coreclk = vlv_dpio_read(dev_priv, DPIO_CORE_CLK(pipe));
+ coreclk = vlv_dpio_read(dev_priv, pipe, DPIO_CORE_CLK(pipe));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
coreclk |= 0x01000000;
- vlv_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), coreclk);
+ vlv_dpio_write(dev_priv, pipe, DPIO_CORE_CLK(pipe), coreclk);
- vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
+ vlv_dpio_write(dev_priv, pipe, DPIO_PLL_CML(pipe), 0x87871000);
/* Enable DPIO clock input */
dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
- if (pipe)
+ /* We should never disable this, set it here for state tracking */
+ if (pipe == PIPE_B)
dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
dpll |= DPLL_VCO_ENABLE;
crtc->config.dpll_hw_state.dpll = dpll;
@@ -4647,7 +4912,6 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
- struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
/* We need to be careful not to changed the adjusted mode, for otherwise
@@ -4700,7 +4964,8 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
* always be the user's requested size.
*/
I915_WRITE(PIPESRC(pipe),
- ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+ ((intel_crtc->config.pipe_src_w - 1) << 16) |
+ (intel_crtc->config.pipe_src_h - 1));
}
static void intel_get_pipe_timings(struct intel_crtc *crtc,
@@ -4738,8 +5003,11 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
}
tmp = I915_READ(PIPESRC(crtc->pipe));
- pipe_config->requested_mode.vdisplay = (tmp & 0xffff) + 1;
- pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
+ pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
+
+ pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
+ pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
}
static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
@@ -4759,7 +5027,7 @@ static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
crtc->mode.flags = pipe_config->adjusted_mode.flags;
- crtc->mode.clock = pipe_config->adjusted_mode.clock;
+ crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock;
crtc->mode.flags |= pipe_config->adjusted_mode.flags;
}
@@ -4775,17 +5043,8 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
pipeconf |= PIPECONF_ENABLE;
- if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
- /* Enable pixel doubling when the dot clock is > 90% of the (display)
- * core speed.
- *
- * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
- * pipe == 0 check?
- */
- if (intel_crtc->config.requested_mode.clock >
- dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
- pipeconf |= PIPECONF_DOUBLE_WIDE;
- }
+ if (intel_crtc->config.double_wide)
+ pipeconf |= PIPECONF_DOUBLE_WIDE;
/* only g4x and later have fancy bpc/dither controls */
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
@@ -4839,14 +5098,13 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dspcntr;
bool ok, has_reduced_clock = false;
- bool is_lvds = false;
+ bool is_lvds = false, is_dsi = false;
struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
@@ -4856,42 +5114,49 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
+ case INTEL_OUTPUT_DSI:
+ is_dsi = true;
+ break;
}
num_connectors++;
}
- refclk = i9xx_get_refclk(crtc, num_connectors);
+ if (is_dsi)
+ goto skip_dpll;
- /*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
- limit = intel_limit(crtc, refclk);
- ok = dev_priv->display.find_dpll(limit, crtc,
- intel_crtc->config.port_clock,
- refclk, NULL, &clock);
- if (!ok && !intel_crtc->config.clock_set) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
+ if (!intel_crtc->config.clock_set) {
+ refclk = i9xx_get_refclk(crtc, num_connectors);
- if (is_lvds && dev_priv->lvds_downclock_avail) {
/*
- * Ensure we match the reduced clock's P to the target clock.
- * If the clocks don't match, we can't switch the display clock
- * by using the FP0/FP1. In such case we will disable the LVDS
- * downclock feature.
- */
- has_reduced_clock =
- dev_priv->display.find_dpll(limit, crtc,
- dev_priv->lvds_downclock,
- refclk, &clock,
- &reduced_clock);
- }
- /* Compat-code for transition, will disappear. */
- if (!intel_crtc->config.clock_set) {
+ * Returns a set of divisors for the desired target clock with
+ * the given refclk, or FALSE. The returned values represent
+ * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
+ * 2) / p1 / p2.
+ */
+ limit = intel_limit(crtc, refclk);
+ ok = dev_priv->display.find_dpll(limit, crtc,
+ intel_crtc->config.port_clock,
+ refclk, NULL, &clock);
+ if (!ok) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ if (is_lvds && dev_priv->lvds_downclock_avail) {
+ /*
+ * Ensure we match the reduced clock's P to the target
+ * clock. If the clocks don't match, we can't switch
+ * the display clock by using the FP0/FP1. In such case
+ * we will disable the LVDS downclock feature.
+ */
+ has_reduced_clock =
+ dev_priv->display.find_dpll(limit, crtc,
+ dev_priv->lvds_downclock,
+ refclk, &clock,
+ &reduced_clock);
+ }
+ /* Compat-code for transition, will disappear. */
intel_crtc->config.dpll.n = clock.n;
intel_crtc->config.dpll.m1 = clock.m1;
intel_crtc->config.dpll.m2 = clock.m2;
@@ -4899,17 +5164,19 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
intel_crtc->config.dpll.p2 = clock.p2;
}
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev)) {
i8xx_update_pll(intel_crtc,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
- else if (IS_VALLEYVIEW(dev))
+ } else if (IS_VALLEYVIEW(dev)) {
vlv_update_pll(intel_crtc);
- else
+ } else {
i9xx_update_pll(intel_crtc,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
+ }
+skip_dpll:
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -4926,8 +5193,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
* which should always be the user's requested size.
*/
I915_WRITE(DSPSIZE(plane),
- ((mode->vdisplay - 1) << 16) |
- (mode->hdisplay - 1));
+ ((intel_crtc->config.pipe_src_h - 1) << 16) |
+ (intel_crtc->config.pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
i9xx_set_pipeconf(intel_crtc);
@@ -4937,8 +5204,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
ret = intel_pipe_set_base(crtc, x, y, fb);
- intel_update_watermarks(dev);
-
return ret;
}
@@ -4969,6 +5234,32 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
I915_READ(LVDS) & LVDS_BORDER_ENABLE;
}
+static void vlv_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = pipe_config->cpu_transcoder;
+ intel_clock_t clock;
+ u32 mdiv;
+ int refclk = 100000;
+
+ mutex_lock(&dev_priv->dpio_lock);
+ mdiv = vlv_dpio_read(dev_priv, pipe, DPIO_DIV(pipe));
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
+ clock.m2 = mdiv & DPIO_M2DIV_MASK;
+ clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
+ clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
+ clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
+
+ vlv_clock(refclk, &clock);
+
+ /* clock.dot is the fast clock */
+ pipe_config->port_clock = clock.dot / 5;
+}
+
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
@@ -4983,6 +5274,25 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
if (!(tmp & PIPECONF_ENABLE))
return false;
+ if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
+ switch (tmp & PIPECONF_BPC_MASK) {
+ case PIPECONF_6BPC:
+ pipe_config->pipe_bpp = 18;
+ break;
+ case PIPECONF_8BPC:
+ pipe_config->pipe_bpp = 24;
+ break;
+ case PIPECONF_10BPC:
+ pipe_config->pipe_bpp = 30;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (INTEL_INFO(dev)->gen < 4)
+ pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
+
intel_get_pipe_timings(crtc, pipe_config);
i9xx_get_pfit_config(crtc, pipe_config);
@@ -5015,6 +5325,11 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
DPLL_PORTB_READY_MASK);
}
+ if (IS_VALLEYVIEW(dev))
+ vlv_crtc_clock_get(crtc, pipe_config);
+ else
+ i9xx_crtc_clock_get(crtc, pipe_config);
+
return true;
}
@@ -5576,48 +5891,6 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
return true;
}
-static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t temp;
-
- temp = I915_READ(SOUTH_CHICKEN1);
- if (temp & FDI_BC_BIFURCATION_SELECT)
- return;
-
- WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
- WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
-
- temp |= FDI_BC_BIFURCATION_SELECT;
- DRM_DEBUG_KMS("enabling fdi C rx\n");
- I915_WRITE(SOUTH_CHICKEN1, temp);
- POSTING_READ(SOUTH_CHICKEN1);
-}
-
-static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
-{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- switch (intel_crtc->pipe) {
- case PIPE_A:
- break;
- case PIPE_B:
- if (intel_crtc->config.fdi_lanes > 2)
- WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
- else
- cpt_enable_fdi_bc_bifurcation(dev);
-
- break;
- case PIPE_C:
- cpt_enable_fdi_bc_bifurcation(dev);
-
- break;
- default:
- BUG();
- }
-}
-
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
{
/*
@@ -5799,11 +6072,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
else
intel_crtc->lowfreq_avail = false;
- if (intel_crtc->config.has_pch_encoder) {
- pll = intel_crtc_to_shared_dpll(intel_crtc);
-
- }
-
intel_set_pipe_timings(intel_crtc);
if (intel_crtc->config.has_pch_encoder) {
@@ -5811,9 +6079,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
&intel_crtc->config.fdi_m_n);
}
- if (IS_IVYBRIDGE(dev))
- ivybridge_update_fdi_bc_bifurcation(intel_crtc);
-
ironlake_set_pipeconf(crtc);
/* Set up the display plane register */
@@ -5822,25 +6087,67 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
ret = intel_pipe_set_base(crtc, x, y, fb);
- intel_update_watermarks(dev);
-
return ret;
}
-static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
+ struct intel_link_m_n *m_n)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum transcoder transcoder = pipe_config->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
- pipe_config->fdi_m_n.link_m = I915_READ(PIPE_LINK_M1(transcoder));
- pipe_config->fdi_m_n.link_n = I915_READ(PIPE_LINK_N1(transcoder));
- pipe_config->fdi_m_n.gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
- & ~TU_SIZE_MASK;
- pipe_config->fdi_m_n.gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
- pipe_config->fdi_m_n.tu = ((I915_READ(PIPE_DATA_M1(transcoder))
- & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+ m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
+ m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
+ m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
+ & ~TU_SIZE_MASK;
+ m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
+ m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
+ & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+}
+
+static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
+ enum transcoder transcoder,
+ struct intel_link_m_n *m_n)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = crtc->pipe;
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
+ m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
+ m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
+ & ~TU_SIZE_MASK;
+ m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
+ m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
+ & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+ } else {
+ m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
+ m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
+ m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
+ & ~TU_SIZE_MASK;
+ m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
+ m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
+ & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+ }
+}
+
+void intel_dp_get_m_n(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ if (crtc->config.has_pch_encoder)
+ intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
+ else
+ intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
+ &pipe_config->dp_m_n);
+}
+
+static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
+ &pipe_config->fdi_m_n);
}
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
@@ -5881,6 +6188,23 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
if (!(tmp & PIPECONF_ENABLE))
return false;
+ switch (tmp & PIPECONF_BPC_MASK) {
+ case PIPECONF_6BPC:
+ pipe_config->pipe_bpp = 18;
+ break;
+ case PIPECONF_8BPC:
+ pipe_config->pipe_bpp = 24;
+ break;
+ case PIPECONF_10BPC:
+ pipe_config->pipe_bpp = 30;
+ break;
+ case PIPECONF_12BPC:
+ pipe_config->pipe_bpp = 36;
+ break;
+ default:
+ break;
+ }
+
if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
struct intel_shared_dpll *pll;
@@ -5912,6 +6236,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier =
((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
>> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
+
+ ironlake_pch_clock_get(crtc, pipe_config);
} else {
pipe_config->pixel_multiplier = 1;
}
@@ -5968,8 +6294,8 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
* register. Callers should take care of disabling all the display engine
* functions, doing the mode unset, fixing interrupts, etc.
*/
-void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
- bool switch_to_fclk, bool allow_power_down)
+static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+ bool switch_to_fclk, bool allow_power_down)
{
uint32_t val;
@@ -5997,7 +6323,10 @@ void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
val = I915_READ(D_COMP);
val |= D_COMP_COMP_DISABLE;
- I915_WRITE(D_COMP, val);
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
+ DRM_ERROR("Failed to disable D_COMP\n");
+ mutex_unlock(&dev_priv->rps.hw_lock);
POSTING_READ(D_COMP);
ndelay(100);
@@ -6016,7 +6345,7 @@ void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
* Fully restores LCPLL, disallowing power down and switching back to LCPLL
* source.
*/
-void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
{
uint32_t val;
@@ -6039,7 +6368,10 @@ void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
val = I915_READ(D_COMP);
val |= D_COMP_COMP_FORCE;
val &= ~D_COMP_COMP_DISABLE;
- I915_WRITE(D_COMP, val);
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
+ DRM_ERROR("Failed to enable D_COMP\n");
+ mutex_unlock(&dev_priv->rps.hw_lock);
POSTING_READ(D_COMP);
val = I915_READ(LCPLL_CTL);
@@ -6222,22 +6554,79 @@ static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
}
}
-static void haswell_modeset_global_resources(struct drm_device *dev)
+#define for_each_power_domain(domain, mask) \
+ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
+ if ((1 << (domain)) & (mask))
+
+static unsigned long get_pipe_power_domains(struct drm_device *dev,
+ enum pipe pipe, bool pfit_enabled)
{
- bool enable = false;
+ unsigned long mask;
+ enum transcoder transcoder;
+
+ transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
+
+ mask = BIT(POWER_DOMAIN_PIPE(pipe));
+ mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
+ if (pfit_enabled)
+ mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
+
+ return mask;
+}
+
+void intel_display_set_init_power(struct drm_device *dev, bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->power_domains.init_power_on == enable)
+ return;
+
+ if (enable)
+ intel_display_power_get(dev, POWER_DOMAIN_INIT);
+ else
+ intel_display_power_put(dev, POWER_DOMAIN_INIT);
+
+ dev_priv->power_domains.init_power_on = enable;
+}
+
+static void modeset_update_power_wells(struct drm_device *dev)
+{
+ unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
struct intel_crtc *crtc;
+ /*
+ * First get all needed power domains, then put all unneeded, to avoid
+ * any unnecessary toggling of the power wells.
+ */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ enum intel_display_power_domain domain;
+
if (!crtc->base.enabled)
continue;
- if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled ||
- crtc->config.cpu_transcoder != TRANSCODER_EDP)
- enable = true;
+ pipe_domains[crtc->pipe] = get_pipe_power_domains(dev,
+ crtc->pipe,
+ crtc->config.pch_pfit.enabled);
+
+ for_each_power_domain(domain, pipe_domains[crtc->pipe])
+ intel_display_power_get(dev, domain);
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ enum intel_display_power_domain domain;
+
+ for_each_power_domain(domain, crtc->enabled_power_domains)
+ intel_display_power_put(dev, domain);
+
+ crtc->enabled_power_domains = pipe_domains[crtc->pipe];
}
- intel_set_power_well(dev, enable);
+ intel_display_set_init_power(dev, false);
+}
+static void haswell_modeset_global_resources(struct drm_device *dev)
+{
+ modeset_update_power_wells(dev);
hsw_update_package_c8(dev);
}
@@ -6276,8 +6665,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
ret = intel_pipe_set_base(crtc, x, y, fb);
- intel_update_watermarks(dev);
-
return ret;
}
@@ -6385,6 +6772,44 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
return 0;
}
+static struct {
+ int clock;
+ u32 config;
+} hdmi_audio_clock[] = {
+ { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
+ { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
+ { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
+ { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
+ { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
+ { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
+ { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
+ { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
+ { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
+ { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
+};
+
+/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
+static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
+ if (mode->clock == hdmi_audio_clock[i].clock)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(hdmi_audio_clock)) {
+ DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
+ i = 1;
+ }
+
+ DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
+ hdmi_audio_clock[i].clock,
+ hdmi_audio_clock[i].config);
+
+ return hdmi_audio_clock[i].config;
+}
+
static bool intel_eld_uptodate(struct drm_connector *connector,
int reg_eldv, uint32_t bits_eldv,
int reg_elda, uint32_t bits_elda,
@@ -6415,7 +6840,8 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
}
static void g4x_write_eld(struct drm_connector *connector,
- struct drm_crtc *crtc)
+ struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
@@ -6455,7 +6881,8 @@ static void g4x_write_eld(struct drm_connector *connector,
}
static void haswell_write_eld(struct drm_connector *connector,
- struct drm_crtc *crtc)
+ struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
@@ -6508,8 +6935,9 @@ static void haswell_write_eld(struct drm_connector *connector,
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
- } else
- I915_WRITE(aud_config, 0);
+ } else {
+ I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
+ }
if (intel_eld_uptodate(connector,
aud_cntrl_st2, eldv,
@@ -6542,7 +6970,8 @@ static void haswell_write_eld(struct drm_connector *connector,
}
static void ironlake_write_eld(struct drm_connector *connector,
- struct drm_crtc *crtc)
+ struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
@@ -6586,8 +7015,9 @@ static void ironlake_write_eld(struct drm_connector *connector,
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
- } else
- I915_WRITE(aud_config, 0);
+ } else {
+ I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
+ }
if (intel_eld_uptodate(connector,
aud_cntrl_st2, eldv,
@@ -6637,50 +7067,7 @@ void intel_write_eld(struct drm_encoder *encoder,
connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
if (dev_priv->display.write_eld)
- dev_priv->display.write_eld(connector, crtc);
-}
-
-/** Loads the palette/gamma unit for the CRTC with the prepared values */
-void intel_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
- int palreg = PALETTE(pipe);
- int i;
- bool reenable_ips = false;
-
- /* The clocks have to be on to load the palette. */
- if (!crtc->enabled || !intel_crtc->active)
- return;
-
- if (!HAS_PCH_SPLIT(dev_priv->dev))
- assert_pll_enabled(dev_priv, pipe);
-
- /* use legacy palette for Ironlake */
- if (HAS_PCH_SPLIT(dev))
- palreg = LGC_PALETTE(pipe);
-
- /* Workaround : Do not read or write the pipe palette/gamma data while
- * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
- */
- if (intel_crtc->config.ips_enabled &&
- ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
- GAMMA_MODE_MODE_SPLIT)) {
- hsw_disable_ips(intel_crtc);
- reenable_ips = true;
- }
-
- for (i = 0; i < 256; i++) {
- I915_WRITE(palreg + 4 * i,
- (intel_crtc->lut_r[i] << 16) |
- (intel_crtc->lut_g[i] << 8) |
- intel_crtc->lut_b[i]);
- }
-
- if (reenable_ips)
- hsw_enable_ips(intel_crtc);
+ dev_priv->display.write_eld(connector, crtc, mode);
}
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
@@ -6778,23 +7165,20 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
int pipe = intel_crtc->pipe;
int x = intel_crtc->cursor_x;
int y = intel_crtc->cursor_y;
- u32 base, pos;
+ u32 base = 0, pos = 0;
bool visible;
- pos = 0;
-
- if (on && crtc->enabled && crtc->fb) {
+ if (on)
base = intel_crtc->cursor_addr;
- if (x > (int) crtc->fb->width)
- base = 0;
- if (y > (int) crtc->fb->height)
- base = 0;
- } else
+ if (x >= intel_crtc->config.pipe_src_w)
+ base = 0;
+
+ if (y >= intel_crtc->config.pipe_src_h)
base = 0;
if (x < 0) {
- if (x + intel_crtc->cursor_width < 0)
+ if (x + intel_crtc->cursor_width <= 0)
base = 0;
pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
@@ -6803,7 +7187,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
pos |= x << CURSOR_X_SHIFT;
if (y < 0) {
- if (y + intel_crtc->cursor_height < 0)
+ if (y + intel_crtc->cursor_height <= 0)
base = 0;
pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
@@ -6946,8 +7330,8 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- intel_crtc->cursor_x = x;
- intel_crtc->cursor_y = y;
+ intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
+ intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
if (intel_crtc->active)
intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
@@ -6955,27 +7339,6 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
return 0;
}
-/** Sets the color ramps on behalf of RandR */
-void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- intel_crtc->lut_r[regno] = red >> 8;
- intel_crtc->lut_g[regno] = green >> 8;
- intel_crtc->lut_b[regno] = blue >> 8;
-}
-
-void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- *red = intel_crtc->lut_r[regno] << 8;
- *green = intel_crtc->lut_g[regno] << 8;
- *blue = intel_crtc->lut_b[regno] << 8;
-}
-
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t start, uint32_t size)
{
@@ -7011,14 +7374,21 @@ intel_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto err;
+
ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
- if (ret) {
- drm_gem_object_unreference_unlocked(&obj->base);
- kfree(intel_fb);
- return ERR_PTR(ret);
- }
+ mutex_unlock(&dev->struct_mutex);
+ if (ret)
+ goto err;
return &intel_fb->base;
+err:
+ drm_gem_object_unreference_unlocked(&obj->base);
+ kfree(intel_fb);
+
+ return ERR_PTR(ret);
}
static u32
@@ -7061,6 +7431,7 @@ static struct drm_framebuffer *
mode_fits_in_fbdev(struct drm_device *dev,
struct drm_display_mode *mode)
{
+#ifdef CONFIG_DRM_I915_FBDEV
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_framebuffer *fb;
@@ -7081,6 +7452,9 @@ mode_fits_in_fbdev(struct drm_device *dev,
return NULL;
return fb;
+#else
+ return NULL;
+#endif
}
bool intel_get_load_detect_pipe(struct drm_connector *connector,
@@ -7224,6 +7598,22 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
mutex_unlock(&crtc->mutex);
}
+static int i9xx_pll_refclk(struct drm_device *dev,
+ const struct intel_crtc_config *pipe_config)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpll = pipe_config->dpll_hw_state.dpll;
+
+ if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
+ return dev_priv->vbt.lvds_ssc_freq * 1000;
+ else if (HAS_PCH_SPLIT(dev))
+ return 120000;
+ else if (!IS_GEN2(dev))
+ return 96000;
+ else
+ return 48000;
+}
+
/* Returns the clock of the currently programmed mode of the given pipe. */
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
@@ -7231,14 +7621,15 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = pipe_config->cpu_transcoder;
- u32 dpll = I915_READ(DPLL(pipe));
+ u32 dpll = pipe_config->dpll_hw_state.dpll;
u32 fp;
intel_clock_t clock;
+ int refclk = i9xx_pll_refclk(dev, pipe_config);
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = I915_READ(FP0(pipe));
+ fp = pipe_config->dpll_hw_state.fp0;
else
- fp = I915_READ(FP1(pipe));
+ fp = pipe_config->dpll_hw_state.fp1;
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
if (IS_PINEVIEW(dev)) {
@@ -7269,14 +7660,13 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
default:
DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
"mode\n", (int)(dpll & DPLL_MODE_MASK));
- pipe_config->adjusted_mode.clock = 0;
return;
}
if (IS_PINEVIEW(dev))
- pineview_clock(96000, &clock);
+ pineview_clock(refclk, &clock);
else
- i9xx_clock(96000, &clock);
+ i9xx_clock(refclk, &clock);
} else {
bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
@@ -7284,13 +7674,6 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
clock.p2 = 14;
-
- if ((dpll & PLL_REF_INPUT_MASK) ==
- PLLB_REF_INPUT_SPREADSPECTRUMIN) {
- /* XXX: might not be 66MHz */
- i9xx_clock(66000, &clock);
- } else
- i9xx_clock(48000, &clock);
} else {
if (dpll & PLL_P1_DIVIDE_BY_TWO)
clock.p1 = 2;
@@ -7302,59 +7685,55 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
clock.p2 = 4;
else
clock.p2 = 2;
-
- i9xx_clock(48000, &clock);
}
+
+ i9xx_clock(refclk, &clock);
}
- pipe_config->adjusted_mode.clock = clock.dot;
+ /*
+ * This value includes pixel_multiplier. We will use
+ * port_clock to compute adjusted_mode.crtc_clock in the
+ * encoder's get_config() function.
+ */
+ pipe_config->port_clock = clock.dot;
}
-static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config)
+int intel_dotclock_calculate(int link_freq,
+ const struct intel_link_m_n *m_n)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
- int link_freq, repeat;
- u64 clock;
- u32 link_m, link_n;
-
- repeat = pipe_config->pixel_multiplier;
-
/*
* The calculation for the data clock is:
- * pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp
+ * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
* But we want to avoid losing precison if possible, so:
- * pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp))
+ * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
*
* and the link clock is simpler:
- * link_clock = (m * link_clock * repeat) / n
+ * link_clock = (m * link_clock) / n
*/
- /*
- * We need to get the FDI or DP link clock here to derive
- * the M/N dividers.
- *
- * For FDI, we read it from the BIOS or use a fixed 2.7GHz.
- * For DP, it's either 1.62GHz or 2.7GHz.
- * We do our calculations in 10*MHz since we don't need much precison.
- */
- if (pipe_config->has_pch_encoder)
- link_freq = intel_fdi_link_freq(dev) * 10000;
- else
- link_freq = pipe_config->port_clock;
+ if (!m_n->link_n)
+ return 0;
- link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder));
- link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder));
+ return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
+}
- if (!link_m || !link_n)
- return;
+static void ironlake_pch_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
- clock = ((u64)link_m * (u64)link_freq * (u64)repeat);
- do_div(clock, link_n);
+ /* read out port_clock from the DPLL */
+ i9xx_crtc_clock_get(crtc, pipe_config);
- pipe_config->adjusted_mode.clock = clock;
+ /*
+ * This value does not include pixel_multiplier.
+ * We will check that port_clock and adjusted_mode.crtc_clock
+ * agree once we know their relationship in the encoder's
+ * get_config() function.
+ */
+ pipe_config->adjusted_mode.crtc_clock =
+ intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
+ &pipe_config->fdi_m_n);
}
/** Returns the currently programmed mode of the given pipe. */
@@ -7370,6 +7749,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
int hsync = I915_READ(HSYNC(cpu_transcoder));
int vtot = I915_READ(VTOTAL(cpu_transcoder));
int vsync = I915_READ(VSYNC(cpu_transcoder));
+ enum pipe pipe = intel_crtc->pipe;
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
@@ -7382,11 +7762,14 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
* Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
* to use a real value here instead.
*/
- pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
+ pipe_config.cpu_transcoder = (enum transcoder) pipe;
pipe_config.pixel_multiplier = 1;
+ pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
+ pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
+ pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
i9xx_crtc_clock_get(intel_crtc, &pipe_config);
- mode->clock = pipe_config.adjusted_mode.clock;
+ mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
@@ -7492,6 +7875,9 @@ void intel_mark_idle(struct drm_device *dev)
intel_decrease_pllclock(crtc);
}
+
+ if (dev_priv->info->gen >= 6)
+ gen6_rps_idle(dev->dev_private);
}
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
@@ -7680,7 +8066,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, 0); /* aux display base address, unused */
intel_mark_page_flip_active(intel_crtc);
- intel_ring_advance(ring);
+ __intel_ring_advance(ring);
return 0;
err_unpin:
@@ -7722,7 +8108,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_NOOP);
intel_mark_page_flip_active(intel_crtc);
- intel_ring_advance(ring);
+ __intel_ring_advance(ring);
return 0;
err_unpin:
@@ -7771,7 +8157,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc);
- intel_ring_advance(ring);
+ __intel_ring_advance(ring);
return 0;
err_unpin:
@@ -7816,7 +8202,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc);
- intel_ring_advance(ring);
+ __intel_ring_advance(ring);
return 0;
err_unpin:
@@ -7895,7 +8281,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, (MI_NOOP));
intel_mark_page_flip_active(intel_crtc);
- intel_ring_advance(ring);
+ __intel_ring_advance(ring);
return 0;
err_unpin:
@@ -7940,7 +8326,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
fb->pitches[0] != crtc->fb->pitches[0]))
return -EINVAL;
- work = kzalloc(sizeof *work, GFP_KERNEL);
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
@@ -8175,6 +8561,17 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
return bpp;
}
+static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
+{
+ DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
+ "type: 0x%x flags: 0x%x\n",
+ mode->crtc_clock,
+ mode->crtc_hdisplay, mode->crtc_hsync_start,
+ mode->crtc_hsync_end, mode->crtc_htotal,
+ mode->crtc_vdisplay, mode->crtc_vsync_start,
+ mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
+}
+
static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
const char *context)
@@ -8191,10 +8588,19 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
pipe_config->fdi_m_n.tu);
+ DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+ pipe_config->has_dp_encoder,
+ pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
+ pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
+ pipe_config->dp_m_n.tu);
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->requested_mode);
DRM_DEBUG_KMS("adjusted mode:\n");
drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
+ intel_dump_crtc_timings(&pipe_config->adjusted_mode);
+ DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
+ DRM_DEBUG_KMS("pipe src size: %dx%d\n",
+ pipe_config->pipe_src_w, pipe_config->pipe_src_h);
DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
pipe_config->gmch_pfit.control,
pipe_config->gmch_pfit.pgm_ratios,
@@ -8204,6 +8610,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->pch_pfit.size,
pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
+ DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
}
static bool check_encoder_cloning(struct drm_crtc *crtc)
@@ -8247,6 +8654,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
drm_mode_copy(&pipe_config->adjusted_mode, mode);
drm_mode_copy(&pipe_config->requested_mode, mode);
+
pipe_config->cpu_transcoder =
(enum transcoder) to_intel_crtc(crtc)->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -8273,13 +8681,25 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
if (plane_bpp < 0)
goto fail;
+ /*
+ * Determine the real pipe dimensions. Note that stereo modes can
+ * increase the actual pipe size due to the frame doubling and
+ * insertion of additional space for blanks between the frame. This
+ * is stored in the crtc timings. We use the requested mode to do this
+ * computation to clearly distinguish it from the adjusted mode, which
+ * can be changed by the connectors in the below retry loop.
+ */
+ drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
+ pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
+ pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
+
encoder_retry:
/* Ensure the port clock defaults are reset when retrying. */
pipe_config->port_clock = 0;
pipe_config->pixel_multiplier = 1;
/* Fill in default crtc timings, allow encoders to overwrite them. */
- drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0);
+ drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
@@ -8300,7 +8720,8 @@ encoder_retry:
/* Set default port clock if not overwritten by the encoder. Needs to be
* done afterwards in case the encoder adjusts the mode. */
if (!pipe_config->port_clock)
- pipe_config->port_clock = pipe_config->adjusted_mode.clock;
+ pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
+ * pipe_config->pixel_multiplier;
ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
if (ret < 0) {
@@ -8487,13 +8908,9 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
}
-static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur,
- struct intel_crtc_config *new)
+static bool intel_fuzzy_clock_check(int clock1, int clock2)
{
- int clock1, clock2, diff;
-
- clock1 = cur->adjusted_mode.clock;
- clock2 = new->adjusted_mode.clock;
+ int diff;
if (clock1 == clock2)
return true;
@@ -8547,6 +8964,15 @@ intel_pipe_config_compare(struct drm_device *dev,
return false; \
}
+#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
+ if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
+ DRM_ERROR("mismatch in " #name " " \
+ "(expected %i, found %i)\n", \
+ current_config->name, \
+ pipe_config->name); \
+ return false; \
+ }
+
#define PIPE_CONF_QUIRK(quirk) \
((current_config->quirks | pipe_config->quirks) & (quirk))
@@ -8560,6 +8986,13 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(fdi_m_n.link_n);
PIPE_CONF_CHECK_I(fdi_m_n.tu);
+ PIPE_CONF_CHECK_I(has_dp_encoder);
+ PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
+ PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
+ PIPE_CONF_CHECK_I(dp_m_n.link_m);
+ PIPE_CONF_CHECK_I(dp_m_n.link_n);
+ PIPE_CONF_CHECK_I(dp_m_n.tu);
+
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
@@ -8590,8 +9023,8 @@ intel_pipe_config_compare(struct drm_device *dev,
DRM_MODE_FLAG_NVSYNC);
}
- PIPE_CONF_CHECK_I(requested_mode.hdisplay);
- PIPE_CONF_CHECK_I(requested_mode.vdisplay);
+ PIPE_CONF_CHECK_I(pipe_src_w);
+ PIPE_CONF_CHECK_I(pipe_src_h);
PIPE_CONF_CHECK_I(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
@@ -8606,26 +9039,28 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(ips_enabled);
+ PIPE_CONF_CHECK_I(double_wide);
+
PIPE_CONF_CHECK_I(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
+ if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
+ PIPE_CONF_CHECK_I(pipe_bpp);
+
+ if (!IS_HASWELL(dev)) {
+ PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
+ PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
+ }
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_FLAGS
+#undef PIPE_CONF_CHECK_CLOCK_FUZZY
#undef PIPE_CONF_QUIRK
- if (!IS_HASWELL(dev)) {
- if (!intel_fuzzy_clock_check(current_config, pipe_config)) {
- DRM_ERROR("mismatch in clock (expected %d, found %d)\n",
- current_config->adjusted_mode.clock,
- pipe_config->adjusted_mode.clock);
- return false;
- }
- }
-
return true;
}
@@ -8757,9 +9192,6 @@ check_crtc_state(struct drm_device *dev)
encoder->get_config(encoder, &pipe_config);
}
- if (dev_priv->display.get_clock)
- dev_priv->display.get_clock(crtc, &pipe_config);
-
WARN(crtc->active != active,
"crtc active state doesn't match with hw state "
"(expected %i, found %i)\n", crtc->active, active);
@@ -8834,6 +9266,18 @@ intel_modeset_check_state(struct drm_device *dev)
check_shared_dpll_state(dev);
}
+void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
+ int dotclock)
+{
+ /*
+ * FDI already provided one idea for the dotclock.
+ * Yell if the encoder disagrees.
+ */
+ WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
+ "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
+ pipe_config->adjusted_mode.crtc_clock, dotclock);
+}
+
static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
@@ -8846,7 +9290,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
unsigned disable_pipes, prepare_pipes, modeset_pipes;
int ret = 0;
- saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL);
+ saved_mode = kcalloc(2, sizeof(*saved_mode), GFP_KERNEL);
if (!saved_mode)
return -ENOMEM;
saved_hwmode = saved_mode + 1;
@@ -9385,7 +9829,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
struct intel_crtc *intel_crtc;
int i;
- intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+ intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
if (intel_crtc == NULL)
return;
@@ -9536,7 +9980,13 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
- /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
+ if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
+ intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
+ PORT_B);
+ if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
+ }
+
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
PORT_C);
@@ -9545,12 +9995,7 @@ static void intel_setup_outputs(struct drm_device *dev)
PORT_C);
}
- if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
- intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
- PORT_B);
- if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
- intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
- }
+ intel_dsi_init(dev);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
@@ -9606,6 +10051,7 @@ static void intel_setup_outputs(struct drm_device *dev)
void intel_framebuffer_fini(struct intel_framebuffer *fb)
{
drm_framebuffer_cleanup(&fb->base);
+ WARN_ON(!fb->obj->framebuffer_references--);
drm_gem_object_unreference_unlocked(&fb->obj->base);
}
@@ -9637,9 +10083,12 @@ int intel_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
+ int aligned_height, tile_height;
int pitch_limit;
int ret;
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
if (obj->tiling_mode == I915_TILING_Y) {
DRM_DEBUG("hardware does not support tiling Y\n");
return -EINVAL;
@@ -9728,8 +10177,16 @@ int intel_framebuffer_init(struct drm_device *dev,
if (mode_cmd->offsets[0] != 0)
return -EINVAL;
+ tile_height = IS_GEN2(dev) ? 16 : 8;
+ aligned_height = ALIGN(mode_cmd->height,
+ obj->tiling_mode ? tile_height : 1);
+ /* FIXME drm helper for size checks (especially planar formats)? */
+ if (obj->base.size < aligned_height * mode_cmd->pitches[0])
+ return -EINVAL;
+
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
intel_fb->obj = obj;
+ intel_fb->obj->framebuffer_references++;
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
@@ -9755,9 +10212,15 @@ intel_user_framebuffer_create(struct drm_device *dev,
return intel_framebuffer_create(dev, mode_cmd, obj);
}
+#ifndef CONFIG_DRM_I915_FBDEV
+static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
+{
+}
+#endif
+
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
- .output_poll_changed = intel_fb_output_poll_changed,
+ .output_poll_changed = intel_fbdev_output_poll_changed,
};
/* Set up chip specific display functions */
@@ -9783,7 +10246,6 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_plane = ironlake_update_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
- dev_priv->display.get_clock = ironlake_crtc_clock_get;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -9791,7 +10253,6 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_plane = ironlake_update_plane;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_clock = i9xx_crtc_clock_get;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9799,7 +10260,6 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_plane = i9xx_update_plane;
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_clock = i9xx_crtc_clock_get;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9975,8 +10435,7 @@ static struct intel_quirk intel_quirks[] = {
/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
- /* 830/845 need to leave pipe A & dpll A up */
- { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+ /* 830 needs to leave pipe A & dpll A up */
{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
/* Lenovo U160 cannot use SSC on LVDS */
@@ -9985,20 +10444,11 @@ static struct intel_quirk intel_quirks[] = {
/* Sony Vaio Y cannot use SSC on LVDS */
{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
- /* Acer Aspire 5734Z must invert backlight brightness */
- { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
-
- /* Acer/eMachines G725 */
- { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
-
- /* Acer/eMachines e725 */
- { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
-
- /* Acer/Packard Bell NCL20 */
- { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
-
- /* Acer Aspire 4736Z */
- { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+ /*
+ * All GM45 Acer (and its brands eMachines and Packard Bell) laptops
+ * seem to use inverted backlight PWM.
+ */
+ { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness },
/* Dell XPS13 HD Sandy Bridge */
{ 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
@@ -10047,12 +10497,19 @@ static void i915_disable_vga(struct drm_device *dev)
void intel_modeset_init_hw(struct drm_device *dev)
{
- intel_init_power_well(dev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
intel_prepare_ddi(dev);
intel_init_clock_gating(dev);
+ /* Enable the CRI clock source so we can get at the display */
+ if (IS_VALLEYVIEW(dev))
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+ DPLL_INTEGRATED_CRI_CLK_VLV);
+
+ intel_init_dpio(dev);
+
mutex_lock(&dev->struct_mutex);
intel_enable_gt_powersave(dev);
mutex_unlock(&dev->struct_mutex);
@@ -10320,7 +10777,7 @@ void i915_redisable_vga(struct drm_device *dev)
(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
return;
- if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
+ if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
i915_disable_vga(dev);
}
@@ -10343,6 +10800,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
&crtc->config);
crtc->base.enabled = crtc->active;
+ crtc->primary_enabled = crtc->active;
DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
crtc->base.base.id,
@@ -10383,20 +10841,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
encoder->connectors_active = false;
- DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
encoder->base.base.id,
drm_get_encoder_name(&encoder->base),
encoder->base.crtc ? "enabled" : "disabled",
- pipe);
- }
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- base.head) {
- if (!crtc->active)
- continue;
- if (dev_priv->display.get_clock)
- dev_priv->display.get_clock(crtc,
- &crtc->config);
+ pipe_name(pipe));
}
list_for_each_entry(connector, &dev->mode_config.connector_list,
@@ -10423,7 +10872,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
- struct drm_plane *plane;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
int i;
@@ -10470,7 +10918,12 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
pll->on = false;
}
+ if (IS_HASWELL(dev))
+ ilk_wm_get_hw_state(dev);
+
if (force_restore) {
+ i915_redisable_vga(dev);
+
/*
* We need to use raw interfaces for restoring state to avoid
* checking (bogus) intermediate states.
@@ -10482,10 +10935,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
__intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
crtc->fb);
}
- list_for_each_entry(plane, &dev->mode_config.plane_list, head)
- intel_plane_restore(plane);
-
- i915_redisable_vga(dev);
} else {
intel_modeset_update_staged_output_state(dev);
}
@@ -10508,6 +10957,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
+ struct drm_connector *connector;
/*
* Interrupts and polling as the first thing to avoid creating havoc.
@@ -10548,6 +10998,10 @@ void intel_modeset_cleanup(struct drm_device *dev)
/* destroy backlight, if any, before the connectors */
intel_panel_destroy_backlight(dev);
+ /* destroy the sysfs files before encoders/connectors */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ drm_sysfs_connector_remove(connector);
+
drm_mode_config_cleanup(dev);
intel_cleanup_overlay(dev);
@@ -10643,7 +11097,7 @@ intel_display_capture_error_state(struct drm_device *dev)
if (INTEL_INFO(dev)->num_pipes == 0)
return NULL;
- error = kmalloc(sizeof(*error), GFP_ATOMIC);
+ error = kzalloc(sizeof(*error), GFP_ATOMIC);
if (error == NULL)
return NULL;
@@ -10651,6 +11105,9 @@ intel_display_capture_error_state(struct drm_device *dev)
error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
for_each_pipe(i) {
+ if (!intel_display_power_enabled(dev, POWER_DOMAIN_PIPE(i)))
+ continue;
+
if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
@@ -10684,6 +11141,10 @@ intel_display_capture_error_state(struct drm_device *dev)
for (i = 0; i < error->num_transcoders; i++) {
enum transcoder cpu_transcoder = transcoders[i];
+ if (!intel_display_power_enabled(dev,
+ POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
+ continue;
+
error->transcoder[i].cpu_transcoder = cpu_transcoder;
error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
@@ -10695,12 +11156,6 @@ intel_display_capture_error_state(struct drm_device *dev)
error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
}
- /* In the code above we read the registers without checking if the power
- * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
- * prevent the next I915_WRITE from detecting it and printing an error
- * message. */
- intel_uncore_clear_errors(dev);
-
return error;
}
@@ -10745,7 +11200,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
}
for (i = 0; i < error->num_transcoders; i++) {
- err_printf(m, " CPU transcoder: %c\n",
+ err_printf(m, "CPU transcoder: %c\n",
transcoder_name(error->transcoder[i].cpu_transcoder));
err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2c555f91bfae..c8515bbfaadb 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -38,6 +38,32 @@
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
+struct dp_link_dpll {
+ int link_bw;
+ struct dpll dpll;
+};
+
+static const struct dp_link_dpll gen4_dpll[] = {
+ { DP_LINK_BW_1_62,
+ { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
+ { DP_LINK_BW_2_7,
+ { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
+};
+
+static const struct dp_link_dpll pch_dpll[] = {
+ { DP_LINK_BW_1_62,
+ { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
+ { DP_LINK_BW_2_7,
+ { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
+};
+
+static const struct dp_link_dpll vlv_dpll[] = {
+ { DP_LINK_BW_1_62,
+ { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
+ { DP_LINK_BW_2_7,
+ { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
+};
+
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
@@ -211,24 +237,77 @@ intel_hrawclk(struct drm_device *dev)
}
}
+static void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+ struct intel_dp *intel_dp,
+ struct edp_power_seq *out);
+static void
+intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
+ struct intel_dp *intel_dp,
+ struct edp_power_seq *out);
+
+static enum pipe
+vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
+ enum pipe pipe;
+
+ /* modeset should have pipe */
+ if (crtc)
+ return to_intel_crtc(crtc)->pipe;
+
+ /* init time, try to find a pipe with this port selected */
+ for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
+ u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
+ PANEL_PORT_SELECT_MASK;
+ if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
+ return pipe;
+ if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
+ return pipe;
+ }
+
+ /* shrug */
+ return PIPE_A;
+}
+
+static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+ if (HAS_PCH_SPLIT(dev))
+ return PCH_PP_CONTROL;
+ else
+ return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
+}
+
+static u32 _pp_stat_reg(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+ if (HAS_PCH_SPLIT(dev))
+ return PCH_PP_STATUS;
+ else
+ return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
+}
+
static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_stat_reg;
- pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
- return (I915_READ(pp_stat_reg) & PP_ON) != 0;
+ return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
}
static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_ctrl_reg;
- pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
- return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
+ return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
}
static void
@@ -236,19 +315,15 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_stat_reg, pp_ctrl_reg;
if (!is_edp(intel_dp))
return;
- pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
- pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
-
if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
WARN(1, "eDP powered off while attempting aux channel communication.\n");
DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
- I915_READ(pp_stat_reg),
- I915_READ(pp_ctrl_reg));
+ I915_READ(_pp_stat_reg(intel_dp)),
+ I915_READ(_pp_ctrl_reg(intel_dp)));
}
}
@@ -361,6 +436,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
goto out;
}
+ /* Only 5 data registers! */
+ if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
+ ret = -E2BIG;
+ goto out;
+ }
+
while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
@@ -451,9 +532,10 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
int msg_bytes;
uint8_t ack;
+ if (WARN_ON(send_bytes > 16))
+ return -E2BIG;
+
intel_dp_check_edp(intel_dp);
- if (send_bytes > 16)
- return -1;
msg[0] = AUX_NATIVE_WRITE << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
@@ -494,6 +576,9 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
uint8_t ack;
int ret;
+ if (WARN_ON(recv_bytes > 19))
+ return -E2BIG;
+
intel_dp_check_edp(intel_dp);
msg[0] = AUX_NATIVE_READ << 4;
msg[1] = address >> 8;
@@ -538,6 +623,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
int reply_bytes;
int ret;
+ ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_check_edp(intel_dp);
/* Set up the command byte */
if (mode & MODE_I2C_READ)
@@ -569,13 +655,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
break;
}
- for (retry = 0; retry < 5; retry++) {
+ /*
+ * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
+ * required to retry at least seven times upon receiving AUX_DEFER
+ * before giving up the AUX transaction.
+ */
+ for (retry = 0; retry < 7; retry++) {
ret = intel_dp_aux_ch(intel_dp,
msg, msg_bytes,
reply, reply_bytes);
if (ret < 0) {
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
- return ret;
+ goto out;
}
switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
@@ -586,7 +677,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
break;
case AUX_NATIVE_REPLY_NACK:
DRM_DEBUG_KMS("aux_ch native nack\n");
- return -EREMOTEIO;
+ ret = -EREMOTEIO;
+ goto out;
case AUX_NATIVE_REPLY_DEFER:
/*
* For now, just give more slack to branch devices. We
@@ -604,7 +696,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
default:
DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
reply[0]);
- return -EREMOTEIO;
+ ret = -EREMOTEIO;
+ goto out;
}
switch (reply[0] & AUX_I2C_REPLY_MASK) {
@@ -612,22 +705,29 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
if (mode == MODE_I2C_READ) {
*read_byte = reply[1];
}
- return reply_bytes - 1;
+ ret = reply_bytes - 1;
+ goto out;
case AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("aux_i2c nack\n");
- return -EREMOTEIO;
+ ret = -EREMOTEIO;
+ goto out;
case AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("aux_i2c defer\n");
udelay(100);
break;
default:
DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
- return -EREMOTEIO;
+ ret = -EREMOTEIO;
+ goto out;
}
}
DRM_ERROR("too many retries, giving up\n");
- return -EREMOTEIO;
+ ret = -EREMOTEIO;
+
+out:
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+ return ret;
}
static int
@@ -647,11 +747,9 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
intel_dp->adapter.algo_data = &intel_dp->algo;
- intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
+ intel_dp->adapter.dev.parent = intel_connector->base.kdev;
- ironlake_edp_panel_vdd_on(intel_dp);
ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
- ironlake_edp_panel_vdd_off(intel_dp, false);
return ret;
}
@@ -660,41 +758,30 @@ intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config, int link_bw)
{
struct drm_device *dev = encoder->base.dev;
+ const struct dp_link_dpll *divisor = NULL;
+ int i, count = 0;
if (IS_G4X(dev)) {
- if (link_bw == DP_LINK_BW_1_62) {
- pipe_config->dpll.p1 = 2;
- pipe_config->dpll.p2 = 10;
- pipe_config->dpll.n = 2;
- pipe_config->dpll.m1 = 23;
- pipe_config->dpll.m2 = 8;
- } else {
- pipe_config->dpll.p1 = 1;
- pipe_config->dpll.p2 = 10;
- pipe_config->dpll.n = 1;
- pipe_config->dpll.m1 = 14;
- pipe_config->dpll.m2 = 2;
- }
- pipe_config->clock_set = true;
+ divisor = gen4_dpll;
+ count = ARRAY_SIZE(gen4_dpll);
} else if (IS_HASWELL(dev)) {
/* Haswell has special-purpose DP DDI clocks. */
} else if (HAS_PCH_SPLIT(dev)) {
- if (link_bw == DP_LINK_BW_1_62) {
- pipe_config->dpll.n = 1;
- pipe_config->dpll.p1 = 2;
- pipe_config->dpll.p2 = 10;
- pipe_config->dpll.m1 = 12;
- pipe_config->dpll.m2 = 9;
- } else {
- pipe_config->dpll.n = 2;
- pipe_config->dpll.p1 = 1;
- pipe_config->dpll.p2 = 10;
- pipe_config->dpll.m1 = 14;
- pipe_config->dpll.m2 = 8;
- }
- pipe_config->clock_set = true;
+ divisor = pch_dpll;
+ count = ARRAY_SIZE(pch_dpll);
} else if (IS_VALLEYVIEW(dev)) {
- /* FIXME: Need to figure out optimized DP clocks for vlv. */
+ divisor = vlv_dpll;
+ count = ARRAY_SIZE(vlv_dpll);
+ }
+
+ if (divisor && count) {
+ for (i = 0; i < count; i++) {
+ if (link_bw == divisor[i].link_bw) {
+ pipe_config->dpll = divisor[i].dpll;
+ pipe_config->clock_set = true;
+ break;
+ }
+ }
}
}
@@ -737,19 +824,22 @@ intel_dp_compute_config(struct intel_encoder *encoder,
DRM_DEBUG_KMS("DP link computation with max lane count %i "
"max bw %02x pixel clock %iKHz\n",
- max_lane_count, bws[max_clock], adjusted_mode->clock);
+ max_lane_count, bws[max_clock],
+ adjusted_mode->crtc_clock);
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
* bpc in between. */
bpp = pipe_config->pipe_bpp;
- if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) {
+ if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
+ dev_priv->vbt.edp_bpp < bpp) {
DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
dev_priv->vbt.edp_bpp);
- bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
+ bpp = dev_priv->vbt.edp_bpp;
}
for (; bpp >= 6*3; bpp -= 2*3) {
- mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
+ mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
+ bpp);
for (clock = 0; clock <= max_clock; clock++) {
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
@@ -794,7 +884,8 @@ found:
mode_rate, link_avail);
intel_link_compute_m_n(bpp, lane_count,
- adjusted_mode->clock, pipe_config->port_clock,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
&pipe_config->dp_m_n);
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@@ -802,21 +893,6 @@ found:
return true;
}
-void intel_dp_init_link_config(struct intel_dp *intel_dp)
-{
- memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
- intel_dp->link_configuration[0] = intel_dp->link_bw;
- intel_dp->link_configuration[1] = intel_dp->lane_count;
- intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
- /*
- * Check for DPCD version > 1.1 and enhanced framing support
- */
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
- (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
- intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- }
-}
-
static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -889,8 +965,6 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
intel_write_eld(&encoder->base, adjusted_mode);
}
- intel_dp_init_link_config(intel_dp);
-
/* Split out the IBX/CPU vs CPT settings */
if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
@@ -900,7 +974,7 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
intel_dp->DP |= DP_SYNC_VS_HIGH;
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
- if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING;
intel_dp->DP |= crtc->pipe << 29;
@@ -914,7 +988,7 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
intel_dp->DP |= DP_SYNC_VS_HIGH;
intel_dp->DP |= DP_LINK_TRAIN_OFF;
- if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING;
if (crtc->pipe == 1)
@@ -944,8 +1018,8 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_stat_reg, pp_ctrl_reg;
- pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
- pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
mask, value,
@@ -987,11 +1061,8 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 control;
- u32 pp_ctrl_reg;
-
- pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
- control = I915_READ(pp_ctrl_reg);
+ control = I915_READ(_pp_ctrl_reg(intel_dp));
control &= ~PANEL_UNLOCK_MASK;
control |= PANEL_UNLOCK_REGS;
return control;
@@ -1006,17 +1077,16 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
if (!is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn eDP VDD on\n");
WARN(intel_dp->want_panel_vdd,
"eDP VDD already requested on\n");
intel_dp->want_panel_vdd = true;
- if (ironlake_edp_have_panel_vdd(intel_dp)) {
- DRM_DEBUG_KMS("eDP VDD already on\n");
+ if (ironlake_edp_have_panel_vdd(intel_dp))
return;
- }
+
+ DRM_DEBUG_KMS("Turning eDP VDD on\n");
if (!ironlake_edp_have_panel_power(intel_dp))
ironlake_wait_panel_power_cycle(intel_dp);
@@ -1024,8 +1094,8 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
pp = ironlake_get_pp_control(intel_dp);
pp |= EDP_FORCE_VDD;
- pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
- pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
@@ -1050,11 +1120,13 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
+ DRM_DEBUG_KMS("Turning eDP VDD off\n");
+
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
- pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
- pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ pp_stat_reg = _pp_stat_reg(intel_dp);
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
@@ -1082,7 +1154,6 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
if (!is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
intel_dp->want_panel_vdd = false;
@@ -1119,20 +1190,19 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
ironlake_wait_panel_power_cycle(intel_dp);
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp = ironlake_get_pp_control(intel_dp);
if (IS_GEN5(dev)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
}
pp |= POWER_TARGET_ON;
if (!IS_GEN5(dev))
pp |= PANEL_POWER_RESET;
- pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
-
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
@@ -1140,8 +1210,8 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
if (IS_GEN5(dev)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
}
}
@@ -1164,7 +1234,7 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
* panels get very unhappy and cease to work. */
pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
- pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
@@ -1197,7 +1267,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
pp = ironlake_get_pp_control(intel_dp);
pp |= EDP_BLC_ENABLE;
- pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
@@ -1221,7 +1291,7 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
- pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
@@ -1368,6 +1438,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ int dotclock;
if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
tmp = I915_READ(intel_dp->output_reg);
@@ -1395,28 +1466,61 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->adjusted_mode.flags |= flags;
- if (dp_to_dig_port(intel_dp)->port == PORT_A) {
+ pipe_config->has_dp_encoder = true;
+
+ intel_dp_get_m_n(crtc, pipe_config);
+
+ if (port == PORT_A) {
if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
pipe_config->port_clock = 162000;
else
pipe_config->port_clock = 270000;
}
+
+ dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+
+ if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
+ ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+ pipe_config->adjusted_mode.crtc_clock = dotclock;
+
+ if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
+ pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+ /*
+ * This is a big fat ugly hack.
+ *
+ * Some machines in UEFI boot mode provide us a VBT that has 18
+ * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
+ * unknown we fail to light up. Yet the same BIOS boots up with
+ * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
+ * max, not what it tells us to use.
+ *
+ * Note: This will still be broken if the eDP panel is not lit
+ * up by the BIOS, and thus we can't get the mode at module
+ * load.
+ */
+ DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+ pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
+ dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
+ }
}
-static bool is_edp_psr(struct intel_dp *intel_dp)
+static bool is_edp_psr(struct drm_device *dev)
{
- return is_edp(intel_dp) &&
- intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return dev_priv->psr.sink_support;
}
static bool intel_edp_is_psr_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!IS_HASWELL(dev))
+ if (!HAS_PSR(dev))
return false;
- return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
+ return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
}
static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
@@ -1466,7 +1570,7 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
/* Avoid continuous PSR exit by masking memup and hpd */
- I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
+ I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
intel_dp->psr_setup_done = true;
@@ -1491,9 +1595,9 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
DP_PSR_MAIN_LINK_ACTIVE);
/* Setup AUX registers */
- I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND);
- I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION);
- I915_WRITE(EDP_PSR_AUX_CTL,
+ I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
+ I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
+ I915_WRITE(EDP_PSR_AUX_CTL(dev),
DP_AUX_CH_CTL_TIME_OUT_400us |
(msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
@@ -1516,7 +1620,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
} else
val |= EDP_PSR_LINK_DISABLE;
- I915_WRITE(EDP_PSR_CTL, val |
+ I915_WRITE(EDP_PSR_CTL(dev), val |
EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
@@ -1533,42 +1637,33 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
- if (!IS_HASWELL(dev)) {
+ dev_priv->psr.source_ok = false;
+
+ if (!HAS_PSR(dev)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
- dev_priv->no_psr_reason = PSR_NO_SOURCE;
return false;
}
if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
(dig_port->port != PORT_A)) {
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
- dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
- return false;
- }
-
- if (!is_edp_psr(intel_dp)) {
- DRM_DEBUG_KMS("PSR not supported by this panel\n");
- dev_priv->no_psr_reason = PSR_NO_SINK;
return false;
}
if (!i915_enable_psr) {
DRM_DEBUG_KMS("PSR disable by flag\n");
- dev_priv->no_psr_reason = PSR_MODULE_PARAM;
return false;
}
crtc = dig_port->base.base.crtc;
if (crtc == NULL) {
DRM_DEBUG_KMS("crtc not active for PSR\n");
- dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
return false;
}
intel_crtc = to_intel_crtc(crtc);
- if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) {
+ if (!intel_crtc_active(crtc)) {
DRM_DEBUG_KMS("crtc not active for PSR\n");
- dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
return false;
}
@@ -1576,29 +1671,26 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
- dev_priv->no_psr_reason = PSR_NOT_TILED;
return false;
}
if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
- dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
return false;
}
if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
- dev_priv->no_psr_reason = PSR_S3D_ENABLED;
return false;
}
- if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
- dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
return false;
}
+ dev_priv->psr.source_ok = true;
return true;
}
@@ -1637,10 +1729,11 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp)
if (!intel_edp_is_psr_enabled(dev))
return;
- I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
+ I915_WRITE(EDP_PSR_CTL(dev),
+ I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
/* Wait till PSR is idle */
- if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
+ if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
}
@@ -1654,7 +1747,7 @@ void intel_edp_psr_update(struct drm_device *dev)
if (encoder->type == INTEL_OUTPUT_EDP) {
intel_dp = enc_to_intel_dp(&encoder->base);
- if (!is_edp_psr(intel_dp))
+ if (!is_edp_psr(dev))
return;
if (!intel_edp_psr_match_conditions(intel_dp))
@@ -1713,14 +1806,24 @@ static void intel_enable_dp(struct intel_encoder *encoder)
ironlake_edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
+}
+
+static void g4x_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ intel_enable_dp(encoder);
ironlake_edp_backlight_on(intel_dp);
}
static void vlv_enable_dp(struct intel_encoder *encoder)
{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ ironlake_edp_backlight_on(intel_dp);
}
-static void intel_pre_enable_dp(struct intel_encoder *encoder)
+static void g4x_pre_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
@@ -1738,53 +1841,59 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
int port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
+ struct edp_power_seq power_seq;
u32 val;
mutex_lock(&dev_priv->dpio_lock);
- val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
+ val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
val = 0;
if (pipe)
val |= (1<<21);
else
val &= ~(1<<21);
val |= 0x001000c4;
- vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
- vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
- vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
+ vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
mutex_unlock(&dev_priv->dpio_lock);
+ /* init power sequencer on this pipe and port */
+ intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+ &power_seq);
+
intel_enable_dp(encoder);
vlv_wait_port_ready(dev_priv, port);
}
-static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
+static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
int port = vlv_dport_to_channel(dport);
-
- if (!IS_VALLEYVIEW(dev))
- return;
+ int pipe = intel_crtc->pipe;
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
- vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
- vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
- vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
mutex_unlock(&dev_priv->dpio_lock);
}
@@ -1919,10 +2028,13 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(dport->base.base.crtc);
unsigned long demph_reg_value, preemph_reg_value,
uniqtranscale_reg_value;
uint8_t train_set = intel_dp->train_set[0];
int port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPHASIS_0:
@@ -1998,21 +2110,22 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
}
mutex_lock(&dev_priv->dpio_lock);
- vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
- vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
- vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000);
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value);
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
uniqtranscale_reg_value);
- vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040);
- vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
- vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
- vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040);
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000);
mutex_unlock(&dev_priv->dpio_lock);
return 0;
}
static void
-intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+intel_get_adjust_train(struct intel_dp *intel_dp,
+ const uint8_t link_status[DP_LINK_STATUS_SIZE])
{
uint8_t v = 0;
uint8_t p = 0;
@@ -2207,14 +2320,15 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
- uint32_t dp_reg_value,
+ uint32_t *DP,
uint8_t dp_train_pat)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
- int ret;
+ uint8_t buf[sizeof(intel_dp->train_set) + 1];
+ int ret, len;
if (HAS_DDI(dev)) {
uint32_t temp = I915_READ(DP_TP_CTL(port));
@@ -2243,62 +2357,93 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
I915_WRITE(DP_TP_CTL(port), temp);
} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
- dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
+ *DP &= ~DP_LINK_TRAIN_MASK_CPT;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
- dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
+ *DP |= DP_LINK_TRAIN_OFF_CPT;
break;
case DP_TRAINING_PATTERN_1:
- dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
+ *DP |= DP_LINK_TRAIN_PAT_1_CPT;
break;
case DP_TRAINING_PATTERN_2:
- dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+ *DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
case DP_TRAINING_PATTERN_3:
DRM_ERROR("DP training pattern 3 not supported\n");
- dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+ *DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
}
} else {
- dp_reg_value &= ~DP_LINK_TRAIN_MASK;
+ *DP &= ~DP_LINK_TRAIN_MASK;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
- dp_reg_value |= DP_LINK_TRAIN_OFF;
+ *DP |= DP_LINK_TRAIN_OFF;
break;
case DP_TRAINING_PATTERN_1:
- dp_reg_value |= DP_LINK_TRAIN_PAT_1;
+ *DP |= DP_LINK_TRAIN_PAT_1;
break;
case DP_TRAINING_PATTERN_2:
- dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+ *DP |= DP_LINK_TRAIN_PAT_2;
break;
case DP_TRAINING_PATTERN_3:
DRM_ERROR("DP training pattern 3 not supported\n");
- dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+ *DP |= DP_LINK_TRAIN_PAT_2;
break;
}
}
- I915_WRITE(intel_dp->output_reg, dp_reg_value);
+ I915_WRITE(intel_dp->output_reg, *DP);
POSTING_READ(intel_dp->output_reg);
- intel_dp_aux_native_write_1(intel_dp,
- DP_TRAINING_PATTERN_SET,
- dp_train_pat);
-
- if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
+ buf[0] = dp_train_pat;
+ if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
DP_TRAINING_PATTERN_DISABLE) {
- ret = intel_dp_aux_native_write(intel_dp,
- DP_TRAINING_LANE0_SET,
- intel_dp->train_set,
- intel_dp->lane_count);
- if (ret != intel_dp->lane_count)
- return false;
+ /* don't write DP_TRAINING_LANEx_SET on disable */
+ len = 1;
+ } else {
+ /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
+ memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
+ len = intel_dp->lane_count + 1;
}
- return true;
+ ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET,
+ buf, len);
+
+ return ret == len;
+}
+
+static bool
+intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
+ uint8_t dp_train_pat)
+{
+ memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
+ intel_dp_set_signal_levels(intel_dp, DP);
+ return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
+}
+
+static bool
+intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
+ const uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ intel_get_adjust_train(intel_dp, link_status);
+ intel_dp_set_signal_levels(intel_dp, DP);
+
+ I915_WRITE(intel_dp->output_reg, *DP);
+ POSTING_READ(intel_dp->output_reg);
+
+ ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET,
+ intel_dp->train_set,
+ intel_dp->lane_count);
+
+ return ret == intel_dp->lane_count;
}
static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
@@ -2342,32 +2487,37 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
uint8_t voltage;
int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP;
+ uint8_t link_config[2];
if (HAS_DDI(dev))
intel_ddi_prepare_link_retrain(encoder);
/* Write the link configuration data */
- intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
- intel_dp->link_configuration,
- DP_LINK_CONFIGURATION_SIZE);
+ link_config[0] = intel_dp->link_bw;
+ link_config[1] = intel_dp->lane_count;
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+ link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2);
+
+ link_config[0] = 0;
+ link_config[1] = DP_SET_ANSI_8B10B;
+ intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2);
DP |= DP_PORT_EN;
- memset(intel_dp->train_set, 0, 4);
+ /* clock recovery */
+ if (!intel_dp_reset_link_train(intel_dp, &DP,
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE)) {
+ DRM_ERROR("failed to enable link training\n");
+ return;
+ }
+
voltage = 0xff;
voltage_tries = 0;
loop_tries = 0;
for (;;) {
- /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
- uint8_t link_status[DP_LINK_STATUS_SIZE];
-
- intel_dp_set_signal_levels(intel_dp, &DP);
-
- /* Set training pattern 1 */
- if (!intel_dp_set_link_train(intel_dp, DP,
- DP_TRAINING_PATTERN_1 |
- DP_LINK_SCRAMBLING_DISABLE))
- break;
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
@@ -2387,10 +2537,12 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
if (i == intel_dp->lane_count) {
++loop_tries;
if (loop_tries == 5) {
- DRM_DEBUG_KMS("too many full retries, give up\n");
+ DRM_ERROR("too many full retries, give up\n");
break;
}
- memset(intel_dp->train_set, 0, 4);
+ intel_dp_reset_link_train(intel_dp, &DP,
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE);
voltage_tries = 0;
continue;
}
@@ -2399,15 +2551,18 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++voltage_tries;
if (voltage_tries == 5) {
- DRM_DEBUG_KMS("too many voltage retries, give up\n");
+ DRM_ERROR("too many voltage retries, give up\n");
break;
}
} else
voltage_tries = 0;
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
- /* Compute new intel_dp->train_set as requested by target */
- intel_get_adjust_train(intel_dp, link_status);
+ /* Update training set as requested by target */
+ if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
+ DRM_ERROR("failed to update link training\n");
+ break;
+ }
}
intel_dp->DP = DP;
@@ -2421,11 +2576,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
uint32_t DP = intel_dp->DP;
/* channel equalization */
+ if (!intel_dp_set_link_train(intel_dp, &DP,
+ DP_TRAINING_PATTERN_2 |
+ DP_LINK_SCRAMBLING_DISABLE)) {
+ DRM_ERROR("failed to start channel equalization\n");
+ return;
+ }
+
tries = 0;
cr_tries = 0;
channel_eq = false;
for (;;) {
- uint8_t link_status[DP_LINK_STATUS_SIZE];
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
if (cr_tries > 5) {
DRM_ERROR("failed to train DP, aborting\n");
@@ -2433,21 +2595,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
}
- intel_dp_set_signal_levels(intel_dp, &DP);
-
- /* channel eq pattern */
- if (!intel_dp_set_link_train(intel_dp, DP,
- DP_TRAINING_PATTERN_2 |
- DP_LINK_SCRAMBLING_DISABLE))
- break;
-
drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
- if (!intel_dp_get_link_status(intel_dp, link_status))
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ DRM_ERROR("failed to get link status\n");
break;
+ }
/* Make sure clock is still ok */
if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
+ intel_dp_set_link_train(intel_dp, &DP,
+ DP_TRAINING_PATTERN_2 |
+ DP_LINK_SCRAMBLING_DISABLE);
cr_tries++;
continue;
}
@@ -2461,13 +2620,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
if (tries > 5) {
intel_dp_link_down(intel_dp);
intel_dp_start_link_train(intel_dp);
+ intel_dp_set_link_train(intel_dp, &DP,
+ DP_TRAINING_PATTERN_2 |
+ DP_LINK_SCRAMBLING_DISABLE);
tries = 0;
cr_tries++;
continue;
}
- /* Compute new intel_dp->train_set as requested by target */
- intel_get_adjust_train(intel_dp, link_status);
+ /* Update training set as requested by target */
+ if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
+ DRM_ERROR("failed to update link training\n");
+ break;
+ }
++tries;
}
@@ -2482,7 +2647,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
{
- intel_dp_set_link_train(intel_dp, intel_dp->DP,
+ intel_dp_set_link_train(intel_dp, &intel_dp->DP,
DP_TRAINING_PATTERN_DISABLE);
}
@@ -2569,6 +2734,10 @@ intel_dp_link_down(struct intel_dp *intel_dp)
static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
@@ -2584,11 +2753,16 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
/* Check if the panel supports PSR */
memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
- intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
- intel_dp->psr_dpcd,
- sizeof(intel_dp->psr_dpcd));
- if (is_edp_psr(intel_dp))
- DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+ if (is_edp(intel_dp)) {
+ intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
+ intel_dp->psr_dpcd,
+ sizeof(intel_dp->psr_dpcd));
+ if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
+ dev_priv->psr.sink_support = true;
+ DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+ }
+ }
+
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT))
return true; /* native DP sink */
@@ -2708,7 +2882,6 @@ static enum drm_connector_status
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
{
uint8_t *dpcd = intel_dp->dpcd;
- bool hpd;
uint8_t type;
if (!intel_dp_get_dpcd(intel_dp))
@@ -2719,8 +2892,8 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
return connector_status_connected;
/* If we're HPD-aware, SINK_COUNT changes dynamically */
- hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
- if (hpd) {
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
uint8_t reg;
if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
&reg, 1))
@@ -2734,9 +2907,18 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
return connector_status_connected;
/* Well we tried, say unknown for unreliable port types */
- type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
- if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
- return connector_status_unknown;
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
+ type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
+ if (type == DP_DS_PORT_TYPE_VGA ||
+ type == DP_DS_PORT_TYPE_NON_EDID)
+ return connector_status_unknown;
+ } else {
+ type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+ DP_DWN_STRM_PORT_TYPE_MASK;
+ if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
+ type == DP_DWN_STRM_PORT_TYPE_OTHER)
+ return connector_status_unknown;
+ }
/* Anything else is out of spec, warn and ignore */
DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
@@ -2810,19 +2992,11 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
/* use cached edid if we have one */
if (intel_connector->edid) {
- struct edid *edid;
- int size;
-
/* invalid edid */
if (IS_ERR(intel_connector->edid))
return NULL;
- size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
- edid = kmemdup(intel_connector->edid, size, GFP_KERNEL);
- if (!edid)
- return NULL;
-
- return edid;
+ return drm_edid_duplicate(intel_connector->edid);
}
return drm_get_edid(connector, adapter);
@@ -3030,7 +3204,6 @@ intel_dp_connector_destroy(struct drm_connector *connector)
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
intel_panel_fini(&intel_connector->panel);
- drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -3101,7 +3274,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
bool intel_dpd_is_edp(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct child_device_config *p_child;
+ union child_device_config *p_child;
int i;
if (!dev_priv->vbt.child_dev_num)
@@ -3110,8 +3283,8 @@ bool intel_dpd_is_edp(struct drm_device *dev)
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
p_child = dev_priv->vbt.child_dev + i;
- if (p_child->dvo_port == PORT_IDPD &&
- p_child->device_type == DEVICE_TYPE_eDP)
+ if (p_child->common.dvo_port == PORT_IDPD &&
+ p_child->common.device_type == DEVICE_TYPE_eDP)
return true;
}
return false;
@@ -3144,24 +3317,26 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_power_seq cur, vbt, spec, final;
u32 pp_on, pp_off, pp_div, pp;
- int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg;
+ int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
if (HAS_PCH_SPLIT(dev)) {
- pp_control_reg = PCH_PP_CONTROL;
+ pp_ctrl_reg = PCH_PP_CONTROL;
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
pp_div_reg = PCH_PP_DIVISOR;
} else {
- pp_control_reg = PIPEA_PP_CONTROL;
- pp_on_reg = PIPEA_PP_ON_DELAYS;
- pp_off_reg = PIPEA_PP_OFF_DELAYS;
- pp_div_reg = PIPEA_PP_DIVISOR;
+ enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+ pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
+ pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+ pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
+ pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
}
/* Workaround: Need to write PP_CONTROL with the unlock key as
* the very first thing. */
pp = ironlake_get_pp_control(intel_dp);
- I915_WRITE(pp_control_reg, pp);
+ I915_WRITE(pp_ctrl_reg, pp);
pp_on = I915_READ(pp_on_reg);
pp_off = I915_READ(pp_off_reg);
@@ -3249,9 +3424,11 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
pp_off_reg = PCH_PP_OFF_DELAYS;
pp_div_reg = PCH_PP_DIVISOR;
} else {
- pp_on_reg = PIPEA_PP_ON_DELAYS;
- pp_off_reg = PIPEA_PP_OFF_DELAYS;
- pp_div_reg = PIPEA_PP_DIVISOR;
+ enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+ pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+ pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
+ pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
}
/* And finally store the new values in the power sequencer. */
@@ -3268,12 +3445,15 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
if (IS_VALLEYVIEW(dev)) {
- port_sel = I915_READ(pp_on_reg) & 0xc0000000;
+ if (dp_to_dig_port(intel_dp)->port == PORT_B)
+ port_sel = PANEL_PORT_SELECT_DPB_VLV;
+ else
+ port_sel = PANEL_PORT_SELECT_DPC_VLV;
} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
if (dp_to_dig_port(intel_dp)->port == PORT_A)
- port_sel = PANEL_POWER_PORT_DP_A;
+ port_sel = PANEL_PORT_SELECT_DPA;
else
- port_sel = PANEL_POWER_PORT_DP_D;
+ port_sel = PANEL_PORT_SELECT_DPD;
}
pp_on |= port_sel;
@@ -3326,7 +3506,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
&power_seq);
- ironlake_edp_panel_vdd_on(intel_dp);
edid = drm_get_edid(connector, &intel_dp->adapter);
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
@@ -3358,8 +3537,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
- ironlake_edp_panel_vdd_off(intel_dp, false);
-
intel_panel_init(&intel_connector->panel, fixed_mode);
intel_panel_setup_backlight(connector);
@@ -3516,11 +3693,11 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
- intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+ intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
if (!intel_dig_port)
return;
- intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dig_port);
return;
@@ -3539,12 +3716,12 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
if (IS_VALLEYVIEW(dev)) {
- intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
+ intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
intel_encoder->pre_enable = vlv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
} else {
- intel_encoder->pre_enable = intel_pre_enable_dp;
- intel_encoder->enable = intel_enable_dp;
+ intel_encoder->pre_enable = g4x_pre_enable_dp;
+ intel_encoder->enable = g4x_enable_dp;
}
intel_dig_port->port = port;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9b7b68fd5d47..1283ad62f33c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -77,7 +77,6 @@
/* the i915, i945 have a single sDVO i2c bus - which is different */
#define MAX_OUTPUTS 6
/* maximum connectors per crtcs in the mode set */
-#define INTELFB_CONN_LIMIT 4
#define INTEL_I2C_BUS_DVO 1
#define INTEL_I2C_BUS_SDVO 2
@@ -93,13 +92,17 @@
#define INTEL_OUTPUT_HDMI 6
#define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_OUTPUT_EDP 8
-#define INTEL_OUTPUT_UNKNOWN 9
+#define INTEL_OUTPUT_DSI 9
+#define INTEL_OUTPUT_UNKNOWN 10
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
#define INTEL_DVO_CHIP_TMDS 2
#define INTEL_DVO_CHIP_TVOUT 4
+#define INTEL_DSI_COMMAND_MODE 0
+#define INTEL_DSI_VIDEO_MODE 1
+
struct intel_framebuffer {
struct drm_framebuffer base;
struct drm_i915_gem_object *obj;
@@ -207,8 +210,21 @@ struct intel_crtc_config {
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
unsigned long quirks;
+ /* User requested mode, only valid as a starting point to
+ * compute adjusted_mode, except in the case of (S)DVO where
+ * it's also for the output timings of the (S)DVO chip.
+ * adjusted_mode will then correspond to the S(DVO) chip's
+ * preferred input timings. */
struct drm_display_mode requested_mode;
+ /* Actual pipe timings ie. what we program into the pipe timing
+ * registers. adjusted_mode.crtc_clock is the pipe pixel clock. */
struct drm_display_mode adjusted_mode;
+
+ /* Pipe source size (ie. panel fitter input size)
+ * All planes will be positioned inside this space,
+ * and get clipped at the edges. */
+ int pipe_src_w, pipe_src_h;
+
/* Whether to set up the PCH/FDI. Note that we never allow sharing
* between pch encoders and cpu encoders. */
bool has_pch_encoder;
@@ -262,7 +278,8 @@ struct intel_crtc_config {
/*
* Frequence the dpll for the port should run at. Differs from the
- * adjusted dotclock e.g. for DP or 12bpc hdmi mode.
+ * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
+ * already multiplied by pixel_multiplier.
*/
int port_clock;
@@ -288,6 +305,14 @@ struct intel_crtc_config {
struct intel_link_m_n fdi_m_n;
bool ips_enabled;
+
+ bool double_wide;
+};
+
+struct intel_pipe_wm {
+ struct intel_wm_level wm[5];
+ uint32_t linetime;
+ bool fbc_wm_enabled;
};
struct intel_crtc {
@@ -301,8 +326,9 @@ struct intel_crtc {
* some outputs connected to this crtc.
*/
bool active;
+ unsigned long enabled_power_domains;
bool eld_vld;
- bool primary_disabled; /* is the crtc obscured by a plane? */
+ bool primary_enabled; /* is the primary plane (partially) visible? */
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
@@ -330,6 +356,12 @@ struct intel_crtc {
/* Access to these should be protected by dev_priv->irq_lock. */
bool cpu_fifo_underrun_disabled;
bool pch_fifo_underrun_disabled;
+
+ /* per-pipe watermark state */
+ struct {
+ /* watermarks currently being used */
+ struct intel_pipe_wm active;
+ } wm;
};
struct intel_plane_wm_parameters {
@@ -417,13 +449,11 @@ struct intel_hdmi {
};
#define DP_MAX_DOWNSTREAM_PORTS 0x10
-#define DP_LINK_CONFIGURATION_SIZE 9
struct intel_dp {
uint32_t output_reg;
uint32_t aux_ch_ctl_reg;
uint32_t DP;
- uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
enum hdmi_force_audio force_audio;
uint32_t color_range;
@@ -495,80 +525,6 @@ struct intel_unpin_work {
bool enable_stall_check;
};
-int intel_pch_rawclk(struct drm_device *dev);
-
-int intel_connector_update_modes(struct drm_connector *connector,
- struct edid *edid);
-int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
-
-extern void intel_attach_force_audio_property(struct drm_connector *connector);
-extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
-
-extern bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
-extern void intel_crt_init(struct drm_device *dev);
-extern void intel_hdmi_init(struct drm_device *dev,
- int hdmi_reg, enum port port);
-extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
- struct intel_connector *intel_connector);
-extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
-extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config);
-extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
- bool is_sdvob);
-extern void intel_dvo_init(struct drm_device *dev);
-extern void intel_tv_init(struct drm_device *dev);
-extern void intel_mark_busy(struct drm_device *dev);
-extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring);
-extern void intel_mark_idle(struct drm_device *dev);
-extern void intel_lvds_init(struct drm_device *dev);
-extern bool intel_is_dual_link_lvds(struct drm_device *dev);
-extern void intel_dp_init(struct drm_device *dev, int output_reg,
- enum port port);
-extern bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
- struct intel_connector *intel_connector);
-extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
-extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
-extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
-extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
-extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
-extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
-extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
-extern bool intel_dp_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config);
-extern bool intel_dpd_is_edp(struct drm_device *dev);
-extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
-extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
-extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
- enum plane plane);
-
-/* intel_panel.c */
-extern int intel_panel_init(struct intel_panel *panel,
- struct drm_display_mode *fixed_mode);
-extern void intel_panel_fini(struct intel_panel *panel);
-
-extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
- struct drm_display_mode *adjusted_mode);
-extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config,
- int fitting_mode);
-extern void intel_gmch_panel_fitting(struct intel_crtc *crtc,
- struct intel_crtc_config *pipe_config,
- int fitting_mode);
-extern void intel_panel_set_backlight(struct drm_device *dev,
- u32 level, u32 max);
-extern int intel_panel_setup_backlight(struct drm_connector *connector);
-extern void intel_panel_enable_backlight(struct drm_device *dev,
- enum pipe pipe);
-extern void intel_panel_disable_backlight(struct drm_device *dev);
-extern void intel_panel_destroy_backlight(struct drm_device *dev);
-extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
-
struct intel_set_config {
struct drm_encoder **save_connector_encoders;
struct drm_crtc **save_encoder_crtcs;
@@ -577,18 +533,14 @@ struct intel_set_config {
bool mode_changed;
};
-extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
-extern void intel_crtc_load_lut(struct drm_crtc *crtc);
-extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
-extern void intel_encoder_destroy(struct drm_encoder *encoder);
-extern void intel_connector_dpms(struct drm_connector *, int mode);
-extern bool intel_connector_get_hw_state(struct intel_connector *connector);
-extern void intel_modeset_check_state(struct drm_device *dev);
-extern void intel_plane_restore(struct drm_plane *plane);
-extern void intel_plane_disable(struct drm_plane *plane);
-
+struct intel_load_detect_pipe {
+ struct drm_framebuffer *release_fb;
+ bool load_detect_temp;
+ int dpms_mode;
+};
-static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
+static inline struct intel_encoder *
+intel_attached_encoder(struct drm_connector *connector)
{
return to_intel_connector(connector)->encoder;
}
@@ -616,73 +568,94 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
+
+/* i915_irq.c */
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable);
+bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
+ enum transcoder pch_transcoder,
+ bool enable);
+void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void hsw_pc8_disable_interrupts(struct drm_device *dev);
+void hsw_pc8_restore_interrupts(struct drm_device *dev);
+
+
+/* intel_crt.c */
+void intel_crt_init(struct drm_device *dev);
+
+
+/* intel_ddi.c */
+void intel_prepare_ddi(struct drm_device *dev);
+void hsw_fdi_link_train(struct drm_crtc *crtc);
+void intel_ddi_init(struct drm_device *dev, enum port port);
+enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
+int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
+void intel_ddi_pll_init(struct drm_device *dev);
+void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
+void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder);
+void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
+void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
+void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
+bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
+void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
+void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
+void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+void intel_ddi_fdi_disable(struct drm_crtc *crtc);
+void intel_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config);
+
+
+/* intel_display.c */
+int intel_pch_rawclk(struct drm_device *dev);
+void intel_mark_busy(struct drm_device *dev);
+void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring);
+void intel_mark_idle(struct drm_device *dev);
+void intel_crtc_restore_mode(struct drm_crtc *crtc);
+void intel_crtc_update_dpms(struct drm_crtc *crtc);
+void intel_encoder_destroy(struct drm_encoder *encoder);
+void intel_connector_dpms(struct drm_connector *, int mode);
+bool intel_connector_get_hw_state(struct intel_connector *connector);
+void intel_modeset_check_state(struct drm_device *dev);
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port);
-
-extern void intel_connector_attach_encoder(struct intel_connector *connector,
- struct intel_encoder *encoder);
-extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
-
-extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
- struct drm_crtc *crtc);
+void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder);
+struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern enum transcoder
-intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe);
-extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
-extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
-extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
-extern void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
-
-struct intel_load_detect_pipe {
- struct drm_framebuffer *release_fb;
- bool load_detect_temp;
- int dpms_mode;
-};
-extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
- struct drm_display_mode *mode,
- struct intel_load_detect_pipe *old);
-extern void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old);
-
-extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno);
-extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno);
-
-extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined);
-extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
-
-extern int intel_framebuffer_init(struct drm_device *dev,
- struct intel_framebuffer *ifb,
- struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_i915_gem_object *obj);
-extern void intel_framebuffer_fini(struct intel_framebuffer *fb);
-extern int intel_fbdev_init(struct drm_device *dev);
-extern void intel_fbdev_initial_config(struct drm_device *dev);
-extern void intel_fbdev_fini(struct drm_device *dev);
-extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
-extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
-extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
-extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
-
-extern void intel_setup_overlay(struct drm_device *dev);
-extern void intel_cleanup_overlay(struct drm_device *dev);
-extern int intel_overlay_switch_off(struct intel_overlay *overlay);
-extern int intel_overlay_put_image(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int intel_overlay_attrs(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-extern void intel_fb_output_poll_changed(struct drm_device *dev);
-extern void intel_fb_restore_mode(struct drm_device *dev);
-
-struct intel_shared_dpll *
-intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
-
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+void intel_wait_for_vblank(struct drm_device *dev, int pipe);
+void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
+bool intel_get_load_detect_pipe(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct intel_load_detect_pipe *old);
+void intel_release_load_detect_pipe(struct drm_connector *connector,
+ struct intel_load_detect_pipe *old);
+int intel_pin_and_fence_fb_obj(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined);
+void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
+int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *ifb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_i915_gem_object *obj);
+void intel_framebuffer_fini(struct intel_framebuffer *fb);
+void intel_prepare_page_flip(struct drm_device *dev, int plane);
+void intel_finish_page_flip(struct drm_device *dev, int pipe);
+void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
+struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
bool state);
@@ -696,102 +669,197 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
-extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
- bool state);
+void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+void intel_write_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
+unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+ unsigned int tiling_mode,
+ unsigned int bpp,
+ unsigned int pitch);
+void intel_display_handle_reset(struct drm_device *dev);
+void hsw_enable_pc8_work(struct work_struct *__work);
+void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
+void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
+void intel_dp_get_m_n(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config);
+int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
+void
+ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
+ int dotclock);
+bool intel_crtc_active(struct drm_crtc *crtc);
+void hsw_enable_ips(struct intel_crtc *crtc);
+void hsw_disable_ips(struct intel_crtc *crtc);
+void intel_display_set_init_power(struct drm_device *dev, bool enable);
+
+
+/* intel_dp.c */
+void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
+bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
+void intel_dp_start_link_train(struct intel_dp *intel_dp);
+void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+void intel_dp_stop_link_train(struct intel_dp *intel_dp);
+void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+void intel_dp_check_link_status(struct intel_dp *intel_dp);
+bool intel_dp_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config);
+bool intel_dpd_is_edp(struct drm_device *dev);
+void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
+void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
+void ironlake_edp_panel_on(struct intel_dp *intel_dp);
+void ironlake_edp_panel_off(struct intel_dp *intel_dp);
+void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
+void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
+void intel_edp_psr_enable(struct intel_dp *intel_dp);
+void intel_edp_psr_disable(struct intel_dp *intel_dp);
+void intel_edp_psr_update(struct drm_device *dev);
+
+
+/* intel_dsi.c */
+bool intel_dsi_init(struct drm_device *dev);
+
+
+/* intel_dvo.c */
+void intel_dvo_init(struct drm_device *dev);
+
+
+/* legacy fbdev emulation in intel_fbdev.c */
+#ifdef CONFIG_DRM_I915_FBDEV
+extern int intel_fbdev_init(struct drm_device *dev);
+extern void intel_fbdev_initial_config(struct drm_device *dev);
+extern void intel_fbdev_fini(struct drm_device *dev);
+extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
+extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
+extern void intel_fbdev_restore_mode(struct drm_device *dev);
+#else
+static inline int intel_fbdev_init(struct drm_device *dev)
+{
+ return 0;
+}
-extern void intel_init_clock_gating(struct drm_device *dev);
-extern void intel_suspend_hw(struct drm_device *dev);
-extern void intel_write_eld(struct drm_encoder *encoder,
- struct drm_display_mode *mode);
-extern void intel_prepare_ddi(struct drm_device *dev);
-extern void hsw_fdi_link_train(struct drm_crtc *crtc);
-extern void intel_ddi_init(struct drm_device *dev, enum port port);
-
-/* For use by IVB LP watermark workaround in intel_sprite.c */
-extern void intel_update_watermarks(struct drm_device *dev);
-extern void intel_update_sprite_watermarks(struct drm_plane *plane,
- struct drm_crtc *crtc,
- uint32_t sprite_width, int pixel_size,
- bool enabled, bool scaled);
-
-extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
- unsigned int tiling_mode,
- unsigned int bpp,
- unsigned int pitch);
-
-extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-/* Power-related functions, located in intel_pm.c */
-extern void intel_init_pm(struct drm_device *dev);
-/* FBC */
-extern bool intel_fbc_enabled(struct drm_device *dev);
-extern void intel_update_fbc(struct drm_device *dev);
-/* IPS */
-extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
-extern void intel_gpu_ips_teardown(void);
-
-/* Power well */
-extern int i915_init_power_well(struct drm_device *dev);
-extern void i915_remove_power_well(struct drm_device *dev);
-
-extern bool intel_display_power_enabled(struct drm_device *dev,
- enum intel_display_power_domain domain);
-extern void intel_init_power_well(struct drm_device *dev);
-extern void intel_set_power_well(struct drm_device *dev, bool enable);
-extern void intel_enable_gt_powersave(struct drm_device *dev);
-extern void intel_disable_gt_powersave(struct drm_device *dev);
-extern void ironlake_teardown_rc6(struct drm_device *dev);
+static inline void intel_fbdev_initial_config(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_fini(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state)
+{
+}
+
+static inline void intel_fbdev_restore_mode(struct drm_device *dev)
+{
+}
+#endif
+
+/* intel_hdmi.c */
+void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
+void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
+struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+bool intel_hdmi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config);
+
+
+/* intel_lvds.c */
+void intel_lvds_init(struct drm_device *dev);
+bool intel_is_dual_link_lvds(struct drm_device *dev);
+
+
+/* intel_modes.c */
+int intel_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+void intel_attach_force_audio_property(struct drm_connector *connector);
+void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
+
+/* intel_overlay.c */
+void intel_setup_overlay(struct drm_device *dev);
+void intel_cleanup_overlay(struct drm_device *dev);
+int intel_overlay_switch_off(struct intel_overlay *overlay);
+int intel_overlay_put_image(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int intel_overlay_attrs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+
+/* intel_panel.c */
+int intel_panel_init(struct intel_panel *panel,
+ struct drm_display_mode *fixed_mode);
+void intel_panel_fini(struct intel_panel *panel);
+void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode);
+void intel_pch_panel_fitting(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config,
+ int fitting_mode);
+void intel_gmch_panel_fitting(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config,
+ int fitting_mode);
+void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max);
+int intel_panel_setup_backlight(struct drm_connector *connector);
+void intel_panel_enable_backlight(struct drm_device *dev, enum pipe pipe);
+void intel_panel_disable_backlight(struct drm_device *dev);
+void intel_panel_destroy_backlight(struct drm_device *dev);
+enum drm_connector_status intel_panel_detect(struct drm_device *dev);
+
+
+/* intel_pm.c */
+void intel_init_clock_gating(struct drm_device *dev);
+void intel_suspend_hw(struct drm_device *dev);
+void intel_update_watermarks(struct drm_crtc *crtc);
+void intel_update_sprite_watermarks(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ uint32_t sprite_width, int pixel_size,
+ bool enabled, bool scaled);
+void intel_init_pm(struct drm_device *dev);
+bool intel_fbc_enabled(struct drm_device *dev);
+void intel_update_fbc(struct drm_device *dev);
+void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
+void intel_gpu_ips_teardown(void);
+int intel_power_domains_init(struct drm_device *dev);
+void intel_power_domains_remove(struct drm_device *dev);
+bool intel_display_power_enabled(struct drm_device *dev,
+ enum intel_display_power_domain domain);
+void intel_display_power_get(struct drm_device *dev,
+ enum intel_display_power_domain domain);
+void intel_display_power_put(struct drm_device *dev,
+ enum intel_display_power_domain domain);
+void intel_power_domains_init_hw(struct drm_device *dev);
+void intel_set_power_well(struct drm_device *dev, bool enable);
+void intel_enable_gt_powersave(struct drm_device *dev);
+void intel_disable_gt_powersave(struct drm_device *dev);
+void ironlake_teardown_rc6(struct drm_device *dev);
void gen6_update_ring_freq(struct drm_device *dev);
+void gen6_rps_idle(struct drm_i915_private *dev_priv);
+void gen6_rps_boost(struct drm_i915_private *dev_priv);
+void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
+void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+void ilk_wm_get_hw_state(struct drm_device *dev);
+
+
+/* intel_sdvo.c */
+bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
+
+
+/* intel_sprite.c */
+int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
+void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
+ enum plane plane);
+void intel_plane_restore(struct drm_plane *plane);
+void intel_plane_disable(struct drm_plane *plane);
+int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
-extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
- enum pipe *pipe);
-extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
-extern void intel_ddi_pll_init(struct drm_device *dev);
-extern void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
-extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder);
-extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
-extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
-extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
-extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
-extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
-extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
-extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
-extern bool
-intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
-extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
-
-extern void intel_display_handle_reset(struct drm_device *dev);
-extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe,
- bool enable);
-extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
- enum transcoder pch_transcoder,
- bool enable);
-
-extern void intel_edp_psr_enable(struct intel_dp *intel_dp);
-extern void intel_edp_psr_disable(struct intel_dp *intel_dp);
-extern void intel_edp_psr_update(struct drm_device *dev);
-extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
- bool switch_to_fclk, bool allow_power_down);
-extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
-extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv,
- uint32_t mask);
-extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv,
- uint32_t mask);
-extern void hsw_enable_pc8_work(struct work_struct *__work);
-extern void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
-extern void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
-extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
-extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
-extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
-extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+/* intel_tv.c */
+void intel_tv_init(struct drm_device *dev);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
new file mode 100644
index 000000000000..d257b093ca68
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -0,0 +1,620 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jani Nikula <jani.nikula@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/i915_drm.h>
+#include <linux/slab.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+#include "intel_dsi_cmd.h"
+
+/* the sub-encoders aka panel drivers */
+static const struct intel_dsi_device intel_dsi_devices[] = {
+};
+
+
+static void vlv_cck_modify(struct drm_i915_private *dev_priv, u32 reg, u32 val,
+ u32 mask)
+{
+ u32 tmp = vlv_cck_read(dev_priv, reg);
+ tmp &= ~mask;
+ tmp |= val;
+ vlv_cck_write(dev_priv, reg, tmp);
+}
+
+static void band_gap_wa(struct drm_i915_private *dev_priv)
+{
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Enable bandgap fix in GOP driver */
+ vlv_cck_modify(dev_priv, 0x6D, 0x00010000, 0x00030000);
+ msleep(20);
+ vlv_cck_modify(dev_priv, 0x6E, 0x00010000, 0x00030000);
+ msleep(20);
+ vlv_cck_modify(dev_priv, 0x6F, 0x00010000, 0x00030000);
+ msleep(20);
+ vlv_cck_modify(dev_priv, 0x00, 0x00008000, 0x00008000);
+ msleep(20);
+ vlv_cck_modify(dev_priv, 0x00, 0x00000000, 0x00008000);
+ msleep(20);
+
+ /* Turn Display Trunk on */
+ vlv_cck_modify(dev_priv, 0x6B, 0x00020000, 0x00030000);
+ msleep(20);
+
+ vlv_cck_modify(dev_priv, 0x6C, 0x00020000, 0x00030000);
+ msleep(20);
+
+ vlv_cck_modify(dev_priv, 0x6D, 0x00020000, 0x00030000);
+ msleep(20);
+ vlv_cck_modify(dev_priv, 0x6E, 0x00020000, 0x00030000);
+ msleep(20);
+ vlv_cck_modify(dev_priv, 0x6F, 0x00020000, 0x00030000);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ /* Need huge delay, otherwise clock is not stable */
+ msleep(100);
+}
+
+static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_dsi, base);
+}
+
+static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
+{
+ return intel_dsi->dev.type == INTEL_DSI_VIDEO_MODE;
+}
+
+static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
+{
+ return intel_dsi->dev.type == INTEL_DSI_COMMAND_MODE;
+}
+
+static void intel_dsi_hot_plug(struct intel_encoder *encoder)
+{
+ DRM_DEBUG_KMS("\n");
+}
+
+static bool intel_dsi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *config)
+{
+ struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
+ base);
+ struct intel_connector *intel_connector = intel_dsi->attached_connector;
+ struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ struct drm_display_mode *adjusted_mode = &config->adjusted_mode;
+ struct drm_display_mode *mode = &config->requested_mode;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (fixed_mode)
+ intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+
+ if (intel_dsi->dev.dev_ops->mode_fixup)
+ return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev,
+ mode, adjusted_mode);
+
+ return true;
+}
+
+static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
+{
+ DRM_DEBUG_KMS("\n");
+
+ vlv_enable_dsi_pll(encoder);
+}
+
+static void intel_dsi_pre_enable(struct intel_encoder *encoder)
+{
+ DRM_DEBUG_KMS("\n");
+}
+
+static void intel_dsi_enable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ int pipe = intel_crtc->pipe;
+ u32 temp;
+
+ DRM_DEBUG_KMS("\n");
+
+ temp = I915_READ(MIPI_DEVICE_READY(pipe));
+ if ((temp & DEVICE_READY) == 0) {
+ temp &= ~ULPS_STATE_MASK;
+ I915_WRITE(MIPI_DEVICE_READY(pipe), temp | DEVICE_READY);
+ } else if (temp & ULPS_STATE_MASK) {
+ temp &= ~ULPS_STATE_MASK;
+ I915_WRITE(MIPI_DEVICE_READY(pipe), temp | ULPS_STATE_EXIT);
+ /*
+ * We need to ensure that there is a minimum of 1 ms time
+ * available before clearing the UPLS exit state.
+ */
+ msleep(2);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
+ }
+
+ if (is_cmd_mode(intel_dsi))
+ I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
+
+ if (is_vid_mode(intel_dsi)) {
+ msleep(20); /* XXX */
+ dpi_send_cmd(intel_dsi, TURN_ON);
+ msleep(100);
+
+ /* assert ip_tg_enable signal */
+ temp = I915_READ(MIPI_PORT_CTRL(pipe));
+ I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
+ POSTING_READ(MIPI_PORT_CTRL(pipe));
+ }
+
+ intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
+}
+
+static void intel_dsi_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ int pipe = intel_crtc->pipe;
+ u32 temp;
+
+ DRM_DEBUG_KMS("\n");
+
+ intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
+
+ if (is_vid_mode(intel_dsi)) {
+ dpi_send_cmd(intel_dsi, SHUTDOWN);
+ msleep(10);
+
+ /* de-assert ip_tg_enable signal */
+ temp = I915_READ(MIPI_PORT_CTRL(pipe));
+ I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
+ POSTING_READ(MIPI_PORT_CTRL(pipe));
+
+ msleep(2);
+ }
+
+ temp = I915_READ(MIPI_DEVICE_READY(pipe));
+ if (temp & DEVICE_READY) {
+ temp &= ~DEVICE_READY;
+ temp &= ~ULPS_STATE_MASK;
+ I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
+ }
+}
+
+static void intel_dsi_post_disable(struct intel_encoder *encoder)
+{
+ DRM_DEBUG_KMS("\n");
+
+ vlv_disable_dsi_pll(encoder);
+}
+
+static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ u32 port, func;
+ enum pipe p;
+
+ DRM_DEBUG_KMS("\n");
+
+ /* XXX: this only works for one DSI output */
+ for (p = PIPE_A; p <= PIPE_B; p++) {
+ port = I915_READ(MIPI_PORT_CTRL(p));
+ func = I915_READ(MIPI_DSI_FUNC_PRG(p));
+
+ if ((port & DPI_ENABLE) || (func & CMD_MODE_DATA_WIDTH_MASK)) {
+ if (I915_READ(MIPI_DEVICE_READY(p)) & DEVICE_READY) {
+ *pipe = p;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static void intel_dsi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ DRM_DEBUG_KMS("\n");
+
+ /* XXX: read flags, set to adjusted_mode */
+}
+
+static int intel_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
+
+ DRM_DEBUG_KMS("\n");
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+ DRM_DEBUG_KMS("MODE_NO_DBLESCAN\n");
+ return MODE_NO_DBLESCAN;
+ }
+
+ if (fixed_mode) {
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ return intel_dsi->dev.dev_ops->mode_valid(&intel_dsi->dev, mode);
+}
+
+/* return txclkesc cycles in terms of divider and duration in us */
+static u16 txclkesc(u32 divider, unsigned int us)
+{
+ switch (divider) {
+ case ESCAPE_CLOCK_DIVIDER_1:
+ default:
+ return 20 * us;
+ case ESCAPE_CLOCK_DIVIDER_2:
+ return 10 * us;
+ case ESCAPE_CLOCK_DIVIDER_4:
+ return 5 * us;
+ }
+}
+
+/* return pixels in terms of txbyteclkhs */
+static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count)
+{
+ return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp, 8), lane_count);
+}
+
+static void set_dsi_timings(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ int pipe = intel_crtc->pipe;
+ unsigned int bpp = intel_crtc->config.pipe_bpp;
+ unsigned int lane_count = intel_dsi->lane_count;
+
+ u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
+
+ hactive = mode->hdisplay;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsync = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+
+ vfp = mode->vsync_start - mode->vdisplay;
+ vsync = mode->vsync_end - mode->vsync_start;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ /* horizontal values are in terms of high speed byte clock */
+ hactive = txbyteclkhs(hactive, bpp, lane_count);
+ hfp = txbyteclkhs(hfp, bpp, lane_count);
+ hsync = txbyteclkhs(hsync, bpp, lane_count);
+ hbp = txbyteclkhs(hbp, bpp, lane_count);
+
+ I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
+ I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
+
+ /* meaningful for video mode non-burst sync pulse mode only, can be zero
+ * for non-burst sync events and burst modes */
+ I915_WRITE(MIPI_HSYNC_PADDING_COUNT(pipe), hsync);
+ I915_WRITE(MIPI_HBP_COUNT(pipe), hbp);
+
+ /* vertical values are in terms of lines */
+ I915_WRITE(MIPI_VFP_COUNT(pipe), vfp);
+ I915_WRITE(MIPI_VSYNC_PADDING_COUNT(pipe), vsync);
+ I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
+}
+
+static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
+ int pipe = intel_crtc->pipe;
+ unsigned int bpp = intel_crtc->config.pipe_bpp;
+ u32 val, tmp;
+
+ DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+
+ /* Update the DSI PLL */
+ vlv_enable_dsi_pll(intel_encoder);
+
+ /* XXX: Location of the call */
+ band_gap_wa(dev_priv);
+
+ /* escape clock divider, 20MHz, shared for A and C. device ready must be
+ * off when doing this! txclkesc? */
+ tmp = I915_READ(MIPI_CTRL(0));
+ tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
+ I915_WRITE(MIPI_CTRL(0), tmp | ESCAPE_CLOCK_DIVIDER_1);
+
+ /* read request priority is per pipe */
+ tmp = I915_READ(MIPI_CTRL(pipe));
+ tmp &= ~READ_REQUEST_PRIORITY_MASK;
+ I915_WRITE(MIPI_CTRL(pipe), tmp | READ_REQUEST_PRIORITY_HIGH);
+
+ /* XXX: why here, why like this? handling in irq handler?! */
+ I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
+ I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
+
+ I915_WRITE(MIPI_DPHY_PARAM(pipe),
+ 0x3c << EXIT_ZERO_COUNT_SHIFT |
+ 0x1f << TRAIL_COUNT_SHIFT |
+ 0xc5 << CLK_ZERO_COUNT_SHIFT |
+ 0x1f << PREPARE_COUNT_SHIFT);
+
+ I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
+ adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
+ adjusted_mode->hdisplay << HORIZONTAL_ADDRESS_SHIFT);
+
+ set_dsi_timings(encoder, adjusted_mode);
+
+ val = intel_dsi->lane_count << DATA_LANES_PRG_REG_SHIFT;
+ if (is_cmd_mode(intel_dsi)) {
+ val |= intel_dsi->channel << CMD_MODE_CHANNEL_NUMBER_SHIFT;
+ val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */
+ } else {
+ val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT;
+
+ /* XXX: cross-check bpp vs. pixel format? */
+ val |= intel_dsi->pixel_format;
+ }
+ I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), val);
+
+ /* timeouts for recovery. one frame IIUC. if counter expires, EOT and
+ * stop state. */
+
+ /*
+ * In burst mode, value greater than one DPI line Time in byte clock
+ * (txbyteclkhs) To timeout this timer 1+ of the above said value is
+ * recommended.
+ *
+ * In non-burst mode, Value greater than one DPI frame time in byte
+ * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
+ * is recommended.
+ *
+ * In DBI only mode, value greater than one DBI frame time in byte
+ * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
+ * is recommended.
+ */
+
+ if (is_vid_mode(intel_dsi) &&
+ intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
+ I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
+ txbyteclkhs(adjusted_mode->htotal, bpp,
+ intel_dsi->lane_count) + 1);
+ } else {
+ I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
+ txbyteclkhs(adjusted_mode->vtotal *
+ adjusted_mode->htotal,
+ bpp, intel_dsi->lane_count) + 1);
+ }
+ I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), 8309); /* max */
+ I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), 0x14); /* max */
+ I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), 0xffff); /* max */
+
+ /* dphy stuff */
+
+ /* in terms of low power clock */
+ I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(ESCAPE_CLOCK_DIVIDER_1, 100));
+
+ /* recovery disables */
+ I915_WRITE(MIPI_EOT_DISABLE(pipe), intel_dsi->eot_disable);
+
+ /* in terms of txbyteclkhs. actual high to low switch +
+ * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
+ *
+ * XXX: write MIPI_STOP_STATE_STALL?
+ */
+ I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe), 0x46);
+
+ /* XXX: low power clock equivalence in terms of byte clock. the number
+ * of byte clocks occupied in one low power clock. based on txbyteclkhs
+ * and txclkesc. txclkesc time / txbyteclk time * (105 +
+ * MIPI_STOP_STATE_STALL) / 105.???
+ */
+ I915_WRITE(MIPI_LP_BYTECLK(pipe), 4);
+
+ /* the bw essential for transmitting 16 long packets containing 252
+ * bytes meant for dcs write memory command is programmed in this
+ * register in terms of byte clocks. based on dsi transfer rate and the
+ * number of lanes configured the time taken to transmit 16 long packets
+ * in a dsi stream varies. */
+ I915_WRITE(MIPI_DBI_BW_CTRL(pipe), 0x820);
+
+ I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
+ 0xa << LP_HS_SSW_CNT_SHIFT |
+ 0x14 << HS_LP_PWR_SW_CNT_SHIFT);
+
+ if (is_vid_mode(intel_dsi))
+ I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
+ intel_dsi->video_mode_format);
+}
+
+static enum drm_connector_status
+intel_dsi_detect(struct drm_connector *connector, bool force)
+{
+ struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
+ DRM_DEBUG_KMS("\n");
+ return intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
+}
+
+static int intel_dsi_get_modes(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *mode;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (!intel_connector->panel.fixed_mode) {
+ DRM_DEBUG_KMS("no fixed mode\n");
+ return 0;
+ }
+
+ mode = drm_mode_duplicate(connector->dev,
+ intel_connector->panel.fixed_mode);
+ if (!mode) {
+ DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
+ return 0;
+ }
+
+ drm_mode_probed_add(connector, mode);
+ return 1;
+}
+
+static void intel_dsi_destroy(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ DRM_DEBUG_KMS("\n");
+ intel_panel_fini(&intel_connector->panel);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static const struct drm_encoder_funcs intel_dsi_funcs = {
+ .destroy = intel_encoder_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
+ .get_modes = intel_dsi_get_modes,
+ .mode_valid = intel_dsi_mode_valid,
+ .best_encoder = intel_best_encoder,
+};
+
+static const struct drm_connector_funcs intel_dsi_connector_funcs = {
+ .dpms = intel_connector_dpms,
+ .detect = intel_dsi_detect,
+ .destroy = intel_dsi_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+bool intel_dsi_init(struct drm_device *dev)
+{
+ struct intel_dsi *intel_dsi;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+ struct drm_display_mode *fixed_mode = NULL;
+ const struct intel_dsi_device *dsi;
+ unsigned int i;
+
+ DRM_DEBUG_KMS("\n");
+
+ intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
+ if (!intel_dsi)
+ return false;
+
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_dsi);
+ return false;
+ }
+
+ intel_encoder = &intel_dsi->base;
+ encoder = &intel_encoder->base;
+ intel_dsi->attached_connector = intel_connector;
+
+ connector = &intel_connector->base;
+
+ drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
+
+ /* XXX: very likely not all of these are needed */
+ intel_encoder->hot_plug = intel_dsi_hot_plug;
+ intel_encoder->compute_config = intel_dsi_compute_config;
+ intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
+ intel_encoder->pre_enable = intel_dsi_pre_enable;
+ intel_encoder->enable = intel_dsi_enable;
+ intel_encoder->mode_set = intel_dsi_mode_set;
+ intel_encoder->disable = intel_dsi_disable;
+ intel_encoder->post_disable = intel_dsi_post_disable;
+ intel_encoder->get_hw_state = intel_dsi_get_hw_state;
+ intel_encoder->get_config = intel_dsi_get_config;
+
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
+ dsi = &intel_dsi_devices[i];
+ intel_dsi->dev = *dsi;
+
+ if (dsi->dev_ops->init(&intel_dsi->dev))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(intel_dsi_devices)) {
+ DRM_DEBUG_KMS("no device found\n");
+ goto err;
+ }
+
+ intel_encoder->type = INTEL_OUTPUT_DSI;
+ intel_encoder->crtc_mask = (1 << 0); /* XXX */
+
+ intel_encoder->cloneable = false;
+ drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ drm_connector_helper_add(connector, &intel_dsi_connector_helper_funcs);
+
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+
+ drm_sysfs_connector_add(connector);
+
+ fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
+ if (!fixed_mode) {
+ DRM_DEBUG_KMS("no fixed mode\n");
+ goto err;
+ }
+
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ intel_panel_init(&intel_connector->panel, fixed_mode);
+
+ return true;
+
+err:
+ drm_encoder_cleanup(&intel_encoder->base);
+ kfree(intel_dsi);
+ kfree(intel_connector);
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
new file mode 100644
index 000000000000..c7765f33d524
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _INTEL_DSI_H
+#define _INTEL_DSI_H
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "intel_drv.h"
+
+struct intel_dsi_device {
+ unsigned int panel_id;
+ const char *name;
+ int type;
+ const struct intel_dsi_dev_ops *dev_ops;
+ void *dev_priv;
+};
+
+struct intel_dsi_dev_ops {
+ bool (*init)(struct intel_dsi_device *dsi);
+
+ /* This callback must be able to assume DSI commands can be sent */
+ void (*enable)(struct intel_dsi_device *dsi);
+
+ /* This callback must be able to assume DSI commands can be sent */
+ void (*disable)(struct intel_dsi_device *dsi);
+
+ int (*mode_valid)(struct intel_dsi_device *dsi,
+ struct drm_display_mode *mode);
+
+ bool (*mode_fixup)(struct intel_dsi_device *dsi,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ void (*mode_set)(struct intel_dsi_device *dsi,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ enum drm_connector_status (*detect)(struct intel_dsi_device *dsi);
+
+ bool (*get_hw_state)(struct intel_dsi_device *dev);
+
+ struct drm_display_mode *(*get_modes)(struct intel_dsi_device *dsi);
+
+ void (*destroy) (struct intel_dsi_device *dsi);
+};
+
+struct intel_dsi {
+ struct intel_encoder base;
+
+ struct intel_dsi_device dev;
+
+ struct intel_connector *attached_connector;
+
+ /* if true, use HS mode, otherwise LP */
+ bool hs;
+
+ /* virtual channel */
+ int channel;
+
+ /* number of DSI lanes */
+ unsigned int lane_count;
+
+ /* video mode pixel format for MIPI_DSI_FUNC_PRG register */
+ u32 pixel_format;
+
+ /* video mode format for MIPI_VIDEO_MODE_FORMAT register */
+ u32 video_mode_format;
+
+ /* eot for MIPI_EOT_DISABLE register */
+ u32 eot_disable;
+};
+
+static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_dsi, base.base);
+}
+
+extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
+extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
+
+#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
new file mode 100644
index 000000000000..7c40f981d2c7
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -0,0 +1,427 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jani Nikula <jani.nikula@intel.com>
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <video/mipi_display.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+#include "intel_dsi_cmd.h"
+
+/*
+ * XXX: MIPI_DATA_ADDRESS, MIPI_DATA_LENGTH, MIPI_COMMAND_LENGTH, and
+ * MIPI_COMMAND_ADDRESS registers.
+ *
+ * Apparently these registers provide a MIPI adapter level way to send (lots of)
+ * commands and data to the receiver, without having to write the commands and
+ * data to MIPI_{HS,LP}_GEN_{CTRL,DATA} registers word by word.
+ *
+ * Presumably for anything other than MIPI_DCS_WRITE_MEMORY_START and
+ * MIPI_DCS_WRITE_MEMORY_CONTINUE (which are used to update the external
+ * framebuffer in command mode displays) these are just an optimization that can
+ * come later.
+ *
+ * For memory writes, these should probably be used for performance.
+ */
+
+static void print_stat(struct intel_dsi *intel_dsi)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 val;
+
+ val = I915_READ(MIPI_INTR_STAT(pipe));
+
+#define STAT_BIT(val, bit) (val) & (bit) ? " " #bit : ""
+ DRM_DEBUG_KMS("MIPI_INTR_STAT(%d) = %08x"
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
+ "\n", pipe, val,
+ STAT_BIT(val, TEARING_EFFECT),
+ STAT_BIT(val, SPL_PKT_SENT_INTERRUPT),
+ STAT_BIT(val, GEN_READ_DATA_AVAIL),
+ STAT_BIT(val, LP_GENERIC_WR_FIFO_FULL),
+ STAT_BIT(val, HS_GENERIC_WR_FIFO_FULL),
+ STAT_BIT(val, RX_PROT_VIOLATION),
+ STAT_BIT(val, RX_INVALID_TX_LENGTH),
+ STAT_BIT(val, ACK_WITH_NO_ERROR),
+ STAT_BIT(val, TURN_AROUND_ACK_TIMEOUT),
+ STAT_BIT(val, LP_RX_TIMEOUT),
+ STAT_BIT(val, HS_TX_TIMEOUT),
+ STAT_BIT(val, DPI_FIFO_UNDERRUN),
+ STAT_BIT(val, LOW_CONTENTION),
+ STAT_BIT(val, HIGH_CONTENTION),
+ STAT_BIT(val, TXDSI_VC_ID_INVALID),
+ STAT_BIT(val, TXDSI_DATA_TYPE_NOT_RECOGNISED),
+ STAT_BIT(val, TXCHECKSUM_ERROR),
+ STAT_BIT(val, TXECC_MULTIBIT_ERROR),
+ STAT_BIT(val, TXECC_SINGLE_BIT_ERROR),
+ STAT_BIT(val, TXFALSE_CONTROL_ERROR),
+ STAT_BIT(val, RXDSI_VC_ID_INVALID),
+ STAT_BIT(val, RXDSI_DATA_TYPE_NOT_REGOGNISED),
+ STAT_BIT(val, RXCHECKSUM_ERROR),
+ STAT_BIT(val, RXECC_MULTIBIT_ERROR),
+ STAT_BIT(val, RXECC_SINGLE_BIT_ERROR),
+ STAT_BIT(val, RXFALSE_CONTROL_ERROR),
+ STAT_BIT(val, RXHS_RECEIVE_TIMEOUT_ERROR),
+ STAT_BIT(val, RX_LP_TX_SYNC_ERROR),
+ STAT_BIT(val, RXEXCAPE_MODE_ENTRY_ERROR),
+ STAT_BIT(val, RXEOT_SYNC_ERROR),
+ STAT_BIT(val, RXSOT_SYNC_ERROR),
+ STAT_BIT(val, RXSOT_ERROR));
+#undef STAT_BIT
+}
+
+enum dsi_type {
+ DSI_DCS,
+ DSI_GENERIC,
+};
+
+/* enable or disable command mode hs transmissions */
+void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 temp;
+ u32 mask = DBI_FIFO_EMPTY;
+
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
+ DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
+
+ temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(pipe));
+ temp &= DBI_HS_LP_MODE_MASK;
+ I915_WRITE(MIPI_HS_LP_DBI_ENABLE(pipe), enable ? DBI_HS_MODE : DBI_LP_MODE);
+
+ intel_dsi->hs = enable;
+}
+
+static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel,
+ u8 data_type, u16 data)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 ctrl_reg;
+ u32 ctrl;
+ u32 mask;
+
+ DRM_DEBUG_KMS("channel %d, data_type %d, data %04x\n",
+ channel, data_type, data);
+
+ if (intel_dsi->hs) {
+ ctrl_reg = MIPI_HS_GEN_CTRL(pipe);
+ mask = HS_CTRL_FIFO_FULL;
+ } else {
+ ctrl_reg = MIPI_LP_GEN_CTRL(pipe);
+ mask = LP_CTRL_FIFO_FULL;
+ }
+
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50)) {
+ DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
+ print_stat(intel_dsi);
+ }
+
+ /*
+ * Note: This function is also used for long packets, with length passed
+ * as data, since SHORT_PACKET_PARAM_SHIFT ==
+ * LONG_PACKET_WORD_COUNT_SHIFT.
+ */
+ ctrl = data << SHORT_PACKET_PARAM_SHIFT |
+ channel << VIRTUAL_CHANNEL_SHIFT |
+ data_type << DATA_TYPE_SHIFT;
+
+ I915_WRITE(ctrl_reg, ctrl);
+
+ return 0;
+}
+
+static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel,
+ u8 data_type, const u8 *data, int len)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 data_reg;
+ int i, j, n;
+ u32 mask;
+
+ DRM_DEBUG_KMS("channel %d, data_type %d, len %04x\n",
+ channel, data_type, len);
+
+ if (intel_dsi->hs) {
+ data_reg = MIPI_HS_GEN_DATA(pipe);
+ mask = HS_DATA_FIFO_FULL;
+ } else {
+ data_reg = MIPI_LP_GEN_DATA(pipe);
+ mask = LP_DATA_FIFO_FULL;
+ }
+
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50))
+ DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
+
+ for (i = 0; i < len; i += n) {
+ u32 val = 0;
+ n = min_t(int, len - i, 4);
+
+ for (j = 0; j < n; j++)
+ val |= *data++ << 8 * j;
+
+ I915_WRITE(data_reg, val);
+ /* XXX: check for data fifo full, once that is set, write 4
+ * dwords, then wait for not set, then continue. */
+ }
+
+ return dsi_vc_send_short(intel_dsi, channel, data_type, len);
+}
+
+static int dsi_vc_write_common(struct intel_dsi *intel_dsi,
+ int channel, const u8 *data, int len,
+ enum dsi_type type)
+{
+ int ret;
+
+ if (len == 0) {
+ BUG_ON(type == DSI_GENERIC);
+ ret = dsi_vc_send_short(intel_dsi, channel,
+ MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM,
+ 0);
+ } else if (len == 1) {
+ ret = dsi_vc_send_short(intel_dsi, channel,
+ type == DSI_GENERIC ?
+ MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
+ MIPI_DSI_DCS_SHORT_WRITE, data[0]);
+ } else if (len == 2) {
+ ret = dsi_vc_send_short(intel_dsi, channel,
+ type == DSI_GENERIC ?
+ MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
+ MIPI_DSI_DCS_SHORT_WRITE_PARAM,
+ (data[1] << 8) | data[0]);
+ } else {
+ ret = dsi_vc_send_long(intel_dsi, channel,
+ type == DSI_GENERIC ?
+ MIPI_DSI_GENERIC_LONG_WRITE :
+ MIPI_DSI_DCS_LONG_WRITE, data, len);
+ }
+
+ return ret;
+}
+
+int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
+ const u8 *data, int len)
+{
+ return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS);
+}
+
+int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
+ const u8 *data, int len)
+{
+ return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC);
+}
+
+static int dsi_vc_dcs_send_read_request(struct intel_dsi *intel_dsi,
+ int channel, u8 dcs_cmd)
+{
+ return dsi_vc_send_short(intel_dsi, channel, MIPI_DSI_DCS_READ,
+ dcs_cmd);
+}
+
+static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi,
+ int channel, u8 *reqdata,
+ int reqlen)
+{
+ u16 data;
+ u8 data_type;
+
+ switch (reqlen) {
+ case 0:
+ data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
+ data = 0;
+ break;
+ case 1:
+ data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
+ data = reqdata[0];
+ break;
+ case 2:
+ data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
+ data = (reqdata[1] << 8) | reqdata[0];
+ break;
+ default:
+ BUG();
+ }
+
+ return dsi_vc_send_short(intel_dsi, channel, data_type, data);
+}
+
+static int dsi_read_data_return(struct intel_dsi *intel_dsi,
+ u8 *buf, int buflen)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ int i, len = 0;
+ u32 data_reg, val;
+
+ if (intel_dsi->hs) {
+ data_reg = MIPI_HS_GEN_DATA(pipe);
+ } else {
+ data_reg = MIPI_LP_GEN_DATA(pipe);
+ }
+
+ while (len < buflen) {
+ val = I915_READ(data_reg);
+ for (i = 0; i < 4 && len < buflen; i++, len++)
+ buf[len] = val >> 8 * i;
+ }
+
+ return len;
+}
+
+int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
+ u8 *buf, int buflen)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 mask;
+ int ret;
+
+ /*
+ * XXX: should issue multiple read requests and reads if request is
+ * longer than MIPI_MAX_RETURN_PKT_SIZE
+ */
+
+ I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
+
+ ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd);
+ if (ret)
+ return ret;
+
+ mask = GEN_READ_DATA_AVAIL;
+ if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
+ DRM_ERROR("Timeout waiting for read data.\n");
+
+ ret = dsi_read_data_return(intel_dsi, buf, buflen);
+ if (ret < 0)
+ return ret;
+
+ if (ret != buflen)
+ return -EIO;
+
+ return 0;
+}
+
+int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
+ u8 *reqdata, int reqlen, u8 *buf, int buflen)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 mask;
+ int ret;
+
+ /*
+ * XXX: should issue multiple read requests and reads if request is
+ * longer than MIPI_MAX_RETURN_PKT_SIZE
+ */
+
+ I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
+
+ ret = dsi_vc_generic_send_read_request(intel_dsi, channel, reqdata,
+ reqlen);
+ if (ret)
+ return ret;
+
+ mask = GEN_READ_DATA_AVAIL;
+ if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
+ DRM_ERROR("Timeout waiting for read data.\n");
+
+ ret = dsi_read_data_return(intel_dsi, buf, buflen);
+ if (ret < 0)
+ return ret;
+
+ if (ret != buflen)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * send a video mode command
+ *
+ * XXX: commands with data in MIPI_DPI_DATA?
+ */
+int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 mask;
+
+ /* XXX: pipe, hs */
+ if (intel_dsi->hs)
+ cmd &= ~DPI_LP_MODE;
+ else
+ cmd |= DPI_LP_MODE;
+
+ /* DPI virtual channel?! */
+
+ mask = DPI_FIFO_EMPTY;
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
+ DRM_ERROR("Timeout waiting for DPI FIFO empty.\n");
+
+ /* clear bit */
+ I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
+
+ /* XXX: old code skips write if control unchanged */
+ if (cmd == I915_READ(MIPI_DPI_CONTROL(pipe)))
+ DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
+
+ I915_WRITE(MIPI_DPI_CONTROL(pipe), cmd);
+
+ mask = SPL_PKT_SENT_INTERRUPT;
+ if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 100))
+ DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h
new file mode 100644
index 000000000000..54c8a234a2e0
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jani Nikula <jani.nikula@intel.com>
+ */
+
+#ifndef _INTEL_DSI_DSI_H
+#define _INTEL_DSI_DSI_H
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <video/mipi_display.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+
+void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
+
+int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
+ const u8 *data, int len);
+
+int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
+ const u8 *data, int len);
+
+int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
+ u8 *buf, int buflen);
+
+int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
+ u8 *reqdata, int reqlen, u8 *buf, int buflen);
+
+int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd);
+
+/* XXX: questionable write helpers */
+static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
+ int channel, u8 dcs_cmd)
+{
+ return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1);
+}
+
+static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi,
+ int channel, u8 dcs_cmd, u8 param)
+{
+ u8 buf[2] = { dcs_cmd, param };
+ return dsi_vc_dcs_write(intel_dsi, channel, buf, 2);
+}
+
+static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi,
+ int channel)
+{
+ return dsi_vc_generic_write(intel_dsi, channel, NULL, 0);
+}
+
+static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi,
+ int channel, u8 param)
+{
+ return dsi_vc_generic_write(intel_dsi, channel, &param, 1);
+}
+
+static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi,
+ int channel, u8 param1, u8 param2)
+{
+ u8 buf[2] = { param1, param2 };
+ return dsi_vc_generic_write(intel_dsi, channel, buf, 2);
+}
+
+/* XXX: questionable read helpers */
+static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi,
+ int channel, u8 *buf, int buflen)
+{
+ return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen);
+}
+
+static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi,
+ int channel, u8 param, u8 *buf,
+ int buflen)
+{
+ return dsi_vc_generic_read(intel_dsi, channel, &param, 1, buf, buflen);
+}
+
+static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi,
+ int channel, u8 param1, u8 param2,
+ u8 *buf, int buflen)
+{
+ u8 req[2] = { param1, param2 };
+
+ return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen);
+}
+
+
+#endif /* _INTEL_DSI_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
new file mode 100644
index 000000000000..44279b2ade88
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -0,0 +1,317 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Shobhit Kumar <shobhit.kumar@intel.com>
+ * Yogesh Mohan Marimuthu <yogesh.mohan.marimuthu@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include "intel_drv.h"
+#include "i915_drv.h"
+#include "intel_dsi.h"
+
+#define DSI_HSS_PACKET_SIZE 4
+#define DSI_HSE_PACKET_SIZE 4
+#define DSI_HSA_PACKET_EXTRA_SIZE 6
+#define DSI_HBP_PACKET_EXTRA_SIZE 6
+#define DSI_HACTIVE_PACKET_EXTRA_SIZE 6
+#define DSI_HFP_PACKET_EXTRA_SIZE 6
+#define DSI_EOTP_PACKET_SIZE 4
+
+struct dsi_mnp {
+ u32 dsi_pll_ctrl;
+ u32 dsi_pll_div;
+};
+
+static const u32 lfsr_converts[] = {
+ 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */
+ 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
+ 106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
+ 71, 35 /* 91 - 92 */
+};
+
+static u32 dsi_rr_formula(const struct drm_display_mode *mode,
+ int pixel_format, int video_mode_format,
+ int lane_count, bool eotp)
+{
+ u32 bpp;
+ u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp;
+ u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes;
+ u32 bytes_per_line, bytes_per_frame;
+ u32 num_frames;
+ u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes;
+ u32 dsi_bit_clock_hz;
+ u32 dsi_clk;
+
+ switch (pixel_format) {
+ default:
+ case VID_MODE_FORMAT_RGB888:
+ case VID_MODE_FORMAT_RGB666_LOOSE:
+ bpp = 24;
+ break;
+ case VID_MODE_FORMAT_RGB666:
+ bpp = 18;
+ break;
+ case VID_MODE_FORMAT_RGB565:
+ bpp = 16;
+ break;
+ }
+
+ hactive = mode->hdisplay;
+ vactive = mode->vdisplay;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsync = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+
+ vfp = mode->vsync_start - mode->vdisplay;
+ vsync = mode->vsync_end - mode->vsync_start;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8);
+ hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8);
+ hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8);
+ hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8);
+
+ bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes +
+ DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE +
+ hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE +
+ hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE +
+ hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE;
+
+ /*
+ * XXX: Need to accurately calculate LP to HS transition timeout and add
+ * it to bytes_per_line/bytes_per_frame.
+ */
+
+ if (eotp && video_mode_format == VIDEO_MODE_BURST)
+ bytes_per_line += DSI_EOTP_PACKET_SIZE;
+
+ bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line +
+ vactive * bytes_per_line + vfp * bytes_per_line;
+
+ if (eotp &&
+ (video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ||
+ video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS))
+ bytes_per_frame += DSI_EOTP_PACKET_SIZE;
+
+ num_frames = drm_mode_vrefresh(mode);
+ bytes_per_x_frames = num_frames * bytes_per_frame;
+
+ bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count;
+
+ /* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
+ dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
+ dsi_clk = dsi_bit_clock_hz / (1000 * 1000);
+
+ if (eotp && video_mode_format == VIDEO_MODE_BURST)
+ dsi_clk *= 2;
+
+ return dsi_clk;
+}
+
+#ifdef MNP_FROM_TABLE
+
+struct dsi_clock_table {
+ u32 freq;
+ u8 m;
+ u8 p;
+};
+
+static const struct dsi_clock_table dsi_clk_tbl[] = {
+ {300, 72, 6}, {313, 75, 6}, {323, 78, 6}, {333, 80, 6},
+ {343, 82, 6}, {353, 85, 6}, {363, 87, 6}, {373, 90, 6},
+ {383, 92, 6}, {390, 78, 5}, {393, 79, 5}, {400, 80, 5},
+ {401, 80, 5}, {402, 80, 5}, {403, 81, 5}, {404, 81, 5},
+ {405, 81, 5}, {406, 81, 5}, {407, 81, 5}, {408, 82, 5},
+ {409, 82, 5}, {410, 82, 5}, {411, 82, 5}, {412, 82, 5},
+ {413, 83, 5}, {414, 83, 5}, {415, 83, 5}, {416, 83, 5},
+ {417, 83, 5}, {418, 84, 5}, {419, 84, 5}, {420, 84, 5},
+ {430, 86, 5}, {440, 88, 5}, {450, 90, 5}, {460, 92, 5},
+ {470, 75, 4}, {480, 77, 4}, {490, 78, 4}, {500, 80, 4},
+ {510, 82, 4}, {520, 83, 4}, {530, 85, 4}, {540, 86, 4},
+ {550, 88, 4}, {560, 90, 4}, {570, 91, 4}, {580, 70, 3},
+ {590, 71, 3}, {600, 72, 3}, {610, 73, 3}, {620, 74, 3},
+ {630, 76, 3}, {640, 77, 3}, {650, 78, 3}, {660, 79, 3},
+ {670, 80, 3}, {680, 82, 3}, {690, 83, 3}, {700, 84, 3},
+ {710, 85, 3}, {720, 86, 3}, {730, 88, 3}, {740, 89, 3},
+ {750, 90, 3}, {760, 91, 3}, {770, 92, 3}, {780, 62, 2},
+ {790, 63, 2}, {800, 64, 2}, {880, 70, 2}, {900, 72, 2},
+ {1000, 80, 2}, /* dsi clock frequency in Mhz*/
+};
+
+static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
+{
+ unsigned int i;
+ u8 m;
+ u8 n;
+ u8 p;
+ u32 m_seed;
+
+ if (dsi_clk < 300 || dsi_clk > 1000)
+ return -ECHRNG;
+
+ for (i = 0; i <= ARRAY_SIZE(dsi_clk_tbl); i++) {
+ if (dsi_clk_tbl[i].freq > dsi_clk)
+ break;
+ }
+
+ m = dsi_clk_tbl[i].m;
+ p = dsi_clk_tbl[i].p;
+ m_seed = lfsr_converts[m - 62];
+ n = 1;
+ dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + p - 2);
+ dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
+ m_seed << DSI_PLL_M1_DIV_SHIFT;
+
+ return 0;
+}
+
+#else
+
+static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
+{
+ u32 m, n, p;
+ u32 ref_clk;
+ u32 error;
+ u32 tmp_error;
+ u32 target_dsi_clk;
+ u32 calc_dsi_clk;
+ u32 calc_m;
+ u32 calc_p;
+ u32 m_seed;
+
+ if (dsi_clk < 300 || dsi_clk > 1150) {
+ DRM_ERROR("DSI CLK Out of Range\n");
+ return -ECHRNG;
+ }
+
+ ref_clk = 25000;
+ target_dsi_clk = dsi_clk * 1000;
+ error = 0xFFFFFFFF;
+ calc_m = 0;
+ calc_p = 0;
+
+ for (m = 62; m <= 92; m++) {
+ for (p = 2; p <= 6; p++) {
+
+ calc_dsi_clk = (m * ref_clk) / p;
+ if (calc_dsi_clk >= target_dsi_clk) {
+ tmp_error = calc_dsi_clk - target_dsi_clk;
+ if (tmp_error < error) {
+ error = tmp_error;
+ calc_m = m;
+ calc_p = p;
+ }
+ }
+ }
+ }
+
+ m_seed = lfsr_converts[calc_m - 62];
+ n = 1;
+ dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
+ dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
+ m_seed << DSI_PLL_M1_DIV_SHIFT;
+
+ return 0;
+}
+
+#endif
+
+/*
+ * XXX: The muxing and gating is hard coded for now. Need to add support for
+ * sharing PLLs with two DSI outputs.
+ */
+static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ int ret;
+ struct dsi_mnp dsi_mnp;
+ u32 dsi_clk;
+
+ dsi_clk = dsi_rr_formula(mode, intel_dsi->pixel_format,
+ intel_dsi->video_mode_format,
+ intel_dsi->lane_count, !intel_dsi->eot_disable);
+
+ ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
+ if (ret) {
+ DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
+ return;
+ }
+
+ dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
+
+ DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
+ dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
+
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, dsi_mnp.dsi_pll_div);
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
+}
+
+void vlv_enable_dsi_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ u32 tmp;
+
+ DRM_DEBUG_KMS("\n");
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ vlv_configure_dsi_pll(encoder);
+
+ /* wait at least 0.5 us after ungating before enabling VCO */
+ usleep_range(1, 10);
+
+ tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+ tmp |= DSI_PLL_VCO_EN;
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ if (wait_for(I915_READ(PIPECONF(PIPE_A)) & PIPECONF_DSI_PLL_LOCKED, 20)) {
+ DRM_ERROR("DSI PLL lock failed\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("DSI PLL locked\n");
+}
+
+void vlv_disable_dsi_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ u32 tmp;
+
+ DRM_DEBUG_KMS("\n");
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+ tmp &= ~DSI_PLL_VCO_EN;
+ tmp |= DSI_PLL_LDO_GATE;
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 7fa7df546c1e..3c7736546856 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -153,6 +153,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->adjusted_mode.flags |= flags;
+
+ pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void intel_disable_dvo(struct intel_encoder *encoder)
@@ -171,11 +173,16 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
+ intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
+ &crtc->config.requested_mode,
+ &crtc->config.adjusted_mode);
+
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
}
@@ -184,6 +191,7 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
struct drm_crtc *crtc;
+ struct intel_crtc_config *config;
/* dvo supports only 2 dpms states. */
if (mode != DRM_MODE_DPMS_ON)
@@ -204,10 +212,16 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
/* We call connector dpms manually below in case pipe dpms doesn't
* change due to cloning. */
if (mode == DRM_MODE_DPMS_ON) {
+ config = &to_intel_crtc(crtc)->config;
+
intel_dvo->base.connectors_active = true;
intel_crtc_update_dpms(crtc);
+ intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
+ &config->requested_mode,
+ &config->adjusted_mode);
+
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
} else {
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
@@ -267,11 +281,6 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, 0);
}
- if (intel_dvo->dev.dev_ops->mode_fixup)
- return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev,
- &pipe_config->requested_mode,
- adjusted_mode);
-
return true;
}
@@ -299,10 +308,6 @@ static void intel_dvo_mode_set(struct intel_encoder *encoder)
break;
}
- intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
- &crtc->config.requested_mode,
- adjusted_mode);
-
/* Save the data order, since I don't know what it should be set to. */
dvo_val = I915_READ(dvo_reg) &
(DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
@@ -370,7 +375,6 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
static void intel_dvo_destroy(struct drm_connector *connector)
{
- drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -451,11 +455,11 @@ void intel_dvo_init(struct drm_device *dev)
int i;
int encoder_type = DRM_MODE_ENCODER_NONE;
- intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL);
+ intel_dvo = kzalloc(sizeof(*intel_dvo), GFP_KERNEL);
if (!intel_dvo)
return;
- intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dvo);
return;
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fbdev.c
index bc2100007b21..895fcb4fbd94 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -78,8 +78,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
- 8), 64);
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
+ DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
@@ -184,6 +184,27 @@ out:
return ret;
}
+/** Sets the color ramps on behalf of RandR */
+static void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ intel_crtc->lut_r[regno] = red >> 8;
+ intel_crtc->lut_g[regno] = green >> 8;
+ intel_crtc->lut_b[regno] = blue >> 8;
+}
+
+static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ *red = intel_crtc->lut_r[regno] << 8;
+ *green = intel_crtc->lut_g[regno] << 8;
+ *blue = intel_crtc->lut_b[regno] << 8;
+}
+
static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.gamma_set = intel_crtc_fb_gamma_set,
.gamma_get = intel_crtc_fb_gamma_get,
@@ -216,7 +237,7 @@ int intel_fbdev_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
+ ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL);
if (!ifbdev)
return -ENOMEM;
@@ -225,7 +246,7 @@ int intel_fbdev_init(struct drm_device *dev)
ret = drm_fb_helper_init(dev, &ifbdev->helper,
INTEL_INFO(dev)->num_pipes,
- INTELFB_CONN_LIMIT);
+ 4);
if (ret) {
kfree(ifbdev);
return ret;
@@ -278,13 +299,13 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
MODULE_LICENSE("GPL and additional rights");
-void intel_fb_output_poll_changed(struct drm_device *dev)
+void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
}
-void intel_fb_restore_mode(struct drm_device *dev)
+void intel_fbdev_restore_mode(struct drm_device *dev)
{
int ret;
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4148cc85bf7f..51a8336dec2e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -713,6 +713,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
u32 tmp, flags = 0;
+ int dotclock;
tmp = I915_READ(intel_hdmi->hdmi_reg);
@@ -727,6 +728,16 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->adjusted_mode.flags |= flags;
+
+ if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
+ dotclock = pipe_config->port_clock * 2 / 3;
+ else
+ dotclock = pipe_config->port_clock;
+
+ if (HAS_PCH_SPLIT(dev_priv->dev))
+ ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+ pipe_config->adjusted_mode.crtc_clock = dotclock;
}
static void intel_enable_hdmi(struct intel_encoder *encoder)
@@ -862,7 +873,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
- int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
+ int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
int portclock_limit = hdmi_portclock_limit(intel_hdmi);
int desired_bpp;
@@ -904,7 +915,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp = desired_bpp;
}
- if (adjusted_mode->clock > portclock_limit) {
+ if (adjusted_mode->crtc_clock > portclock_limit) {
DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
return false;
}
@@ -1063,7 +1074,7 @@ done:
return 0;
}
-static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
+static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
@@ -1079,35 +1090,35 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
/* Enable clock channels for this port */
mutex_lock(&dev_priv->dpio_lock);
- val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
+ val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
val = 0;
if (pipe)
val |= (1<<21);
else
val &= ~(1<<21);
val |= 0x001000c4;
- vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
+ vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
/* HDMI 1.0V-2dB */
- vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0);
- vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0);
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port),
0x2b245f5f);
- vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
0x5578b83a);
- vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port),
0x0c782040);
- vlv_dpio_write(dev_priv, DPIO_TX3_SWING_CTL4(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX3_SWING_CTL4(port),
0x2b247878);
- vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
- vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
0x00002000);
- vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
DPIO_TX_OCALINIT_EN);
/* Program lane clock */
- vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port),
0x00760018);
- vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port),
0x00400888);
mutex_unlock(&dev_priv->dpio_lock);
@@ -1116,55 +1127,60 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
vlv_wait_port_ready(dev_priv, port);
}
-static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
int port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
if (!IS_VALLEYVIEW(dev))
return;
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
- vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
- vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
- vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
- vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
0x00002000);
- vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
+ vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
DPIO_TX_OCALINIT_EN);
mutex_unlock(&dev_priv->dpio_lock);
}
-static void intel_hdmi_post_disable(struct intel_encoder *encoder)
+static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
int port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
mutex_lock(&dev_priv->dpio_lock);
- vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 0x00000000);
- vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 0x00e00060);
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port), 0x00000000);
+ vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port), 0x00e00060);
mutex_unlock(&dev_priv->dpio_lock);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
{
- drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -1211,6 +1227,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
+ connector->stereo_allowed = 1;
switch (port) {
case PORT_B:
@@ -1275,11 +1292,11 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
- intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+ intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
if (!intel_dig_port)
return;
- intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dig_port);
return;
@@ -1296,10 +1313,10 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
if (IS_VALLEYVIEW(dev)) {
- intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable;
- intel_encoder->pre_enable = intel_hdmi_pre_enable;
+ intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
+ intel_encoder->pre_enable = vlv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
- intel_encoder->post_disable = intel_hdmi_post_disable;
+ intel_encoder->post_disable = vlv_hdmi_post_disable;
} else {
intel_encoder->enable = intel_enable_hdmi;
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d1c1e0f7f262..2ca17b14b6c1 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -34,6 +34,11 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+enum disp_clk {
+ CDCLK,
+ CZCLK
+};
+
struct gmbus_port {
const char *name;
int reg;
@@ -58,10 +63,69 @@ to_intel_gmbus(struct i2c_adapter *i2c)
return container_of(i2c, struct intel_gmbus, adapter);
}
+static int get_disp_clk_div(struct drm_i915_private *dev_priv,
+ enum disp_clk clk)
+{
+ u32 reg_val;
+ int clk_ratio;
+
+ reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
+
+ if (clk == CDCLK)
+ clk_ratio =
+ ((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
+ else
+ clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
+
+ return clk_ratio;
+}
+
+static void gmbus_set_freq(struct drm_i915_private *dev_priv)
+{
+ int vco_freq[] = { 800, 1600, 2000, 2400 };
+ int gmbus_freq = 0, cdclk_div, hpll_freq;
+
+ BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
+
+ /* Skip setting the gmbus freq if BIOS has already programmed it */
+ if (I915_READ(GMBUSFREQ_VLV) != 0xA0)
+ return;
+
+ /* Obtain SKU information */
+ mutex_lock(&dev_priv->dpio_lock);
+ hpll_freq =
+ vlv_cck_read(dev_priv, CCK_FUSE_REG) & CCK_FUSE_HPLL_FREQ_MASK;
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ /* Get the CDCLK divide ratio */
+ cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
+
+ /*
+ * Program the gmbus_freq based on the cdclk frequency.
+ * BSpec erroneously claims we should aim for 4MHz, but
+ * in fact 1MHz is the correct frequency.
+ */
+ if (cdclk_div)
+ gmbus_freq = (vco_freq[hpll_freq] << 1) / cdclk_div;
+
+ if (WARN_ON(gmbus_freq == 0))
+ return;
+
+ I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
+}
+
void
intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * In BIOS-less system, program the correct gmbus frequency
+ * before reading edid.
+ */
+ if (IS_VALLEYVIEW(dev))
+ gmbus_set_freq(dev_priv);
+
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 831a5c021c4b..b0ef55833087 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -92,6 +92,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 lvds_reg, tmp, flags = 0;
+ int dotclock;
if (HAS_PCH_SPLIT(dev))
lvds_reg = PCH_LVDS;
@@ -116,6 +117,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
}
+
+ dotclock = pipe_config->port_clock;
+
+ if (HAS_PCH_SPLIT(dev_priv->dev))
+ ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+ pipe_config->adjusted_mode.crtc_clock = dotclock;
}
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
@@ -466,7 +474,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
intel_panel_fini(&lvds_connector->base.panel);
- drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -700,6 +707,22 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
+ .ident = "Intel D410PT",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+ DMI_MATCH(DMI_BOARD_NAME, "D410PT"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Intel D425KT",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "D425KT"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
.ident = "Intel D510MO",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
@@ -786,7 +809,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
return true;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- struct child_device_config *child = dev_priv->vbt.child_dev + i;
+ union child_device_config *uchild = dev_priv->vbt.child_dev + i;
+ struct old_child_dev_config *child = &uchild->old;
/* If the device type is not LFP, continue.
* We have to check both the new identifiers as well as the
@@ -940,11 +964,11 @@ void intel_lvds_init(struct drm_device *dev)
}
}
- lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
+ lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
if (!lvds_encoder)
return;
- lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
+ lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL);
if (!lvds_connector) {
kfree(lvds_encoder);
return;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 119771ff46ab..b82050c96f3e 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -36,8 +36,11 @@
#include "i915_drv.h"
#include "intel_drv.h"
-#define PCI_ASLE 0xe4
-#define PCI_ASLS 0xfc
+#define PCI_ASLE 0xe4
+#define PCI_ASLS 0xfc
+#define PCI_SWSCI 0xe8
+#define PCI_SWSCI_SCISEL (1 << 15)
+#define PCI_SWSCI_GSSCIE (1 << 0)
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
@@ -107,25 +110,38 @@ struct opregion_asle {
u32 epfm; /* enabled panel fitting modes */
u8 plut[74]; /* panel LUT and identifier */
u32 pfmb; /* PWM freq and min brightness */
- u8 rsvd[102];
+ u32 cddv; /* color correction default values */
+ u32 pcft; /* power conservation features */
+ u32 srot; /* supported rotation angles */
+ u32 iuer; /* IUER events */
+ u8 rsvd[86];
} __attribute__((packed));
/* Driver readiness indicator */
#define ASLE_ARDY_READY (1 << 0)
#define ASLE_ARDY_NOT_READY (0 << 0)
-/* ASLE irq request bits */
-#define ASLE_SET_ALS_ILLUM (1 << 0)
-#define ASLE_SET_BACKLIGHT (1 << 1)
-#define ASLE_SET_PFIT (1 << 2)
-#define ASLE_SET_PWM_FREQ (1 << 3)
-#define ASLE_REQ_MSK 0xf
-
-/* response bits of ASLE irq request */
-#define ASLE_ALS_ILLUM_FAILED (1<<10)
-#define ASLE_BACKLIGHT_FAILED (1<<12)
-#define ASLE_PFIT_FAILED (1<<14)
-#define ASLE_PWM_FREQ_FAILED (1<<16)
+/* ASLE Interrupt Command (ASLC) bits */
+#define ASLC_SET_ALS_ILLUM (1 << 0)
+#define ASLC_SET_BACKLIGHT (1 << 1)
+#define ASLC_SET_PFIT (1 << 2)
+#define ASLC_SET_PWM_FREQ (1 << 3)
+#define ASLC_SUPPORTED_ROTATION_ANGLES (1 << 4)
+#define ASLC_BUTTON_ARRAY (1 << 5)
+#define ASLC_CONVERTIBLE_INDICATOR (1 << 6)
+#define ASLC_DOCKING_INDICATOR (1 << 7)
+#define ASLC_ISCT_STATE_CHANGE (1 << 8)
+#define ASLC_REQ_MSK 0x1ff
+/* response bits */
+#define ASLC_ALS_ILLUM_FAILED (1 << 10)
+#define ASLC_BACKLIGHT_FAILED (1 << 12)
+#define ASLC_PFIT_FAILED (1 << 14)
+#define ASLC_PWM_FREQ_FAILED (1 << 16)
+#define ASLC_ROTATION_ANGLES_FAILED (1 << 18)
+#define ASLC_BUTTON_ARRAY_FAILED (1 << 20)
+#define ASLC_CONVERTIBLE_FAILED (1 << 22)
+#define ASLC_DOCKING_FAILED (1 << 24)
+#define ASLC_ISCT_STATE_FAILED (1 << 26)
/* Technology enabled indicator */
#define ASLE_TCHE_ALS_EN (1 << 0)
@@ -151,6 +167,60 @@ struct opregion_asle {
#define ASLE_CBLV_VALID (1<<31)
+/* IUER */
+#define ASLE_IUER_DOCKING (1 << 7)
+#define ASLE_IUER_CONVERTIBLE (1 << 6)
+#define ASLE_IUER_ROTATION_LOCK_BTN (1 << 4)
+#define ASLE_IUER_VOLUME_DOWN_BTN (1 << 3)
+#define ASLE_IUER_VOLUME_UP_BTN (1 << 2)
+#define ASLE_IUER_WINDOWS_BTN (1 << 1)
+#define ASLE_IUER_POWER_BTN (1 << 0)
+
+/* Software System Control Interrupt (SWSCI) */
+#define SWSCI_SCIC_INDICATOR (1 << 0)
+#define SWSCI_SCIC_MAIN_FUNCTION_SHIFT 1
+#define SWSCI_SCIC_MAIN_FUNCTION_MASK (0xf << 1)
+#define SWSCI_SCIC_SUB_FUNCTION_SHIFT 8
+#define SWSCI_SCIC_SUB_FUNCTION_MASK (0xff << 8)
+#define SWSCI_SCIC_EXIT_PARAMETER_SHIFT 8
+#define SWSCI_SCIC_EXIT_PARAMETER_MASK (0xff << 8)
+#define SWSCI_SCIC_EXIT_STATUS_SHIFT 5
+#define SWSCI_SCIC_EXIT_STATUS_MASK (7 << 5)
+#define SWSCI_SCIC_EXIT_STATUS_SUCCESS 1
+
+#define SWSCI_FUNCTION_CODE(main, sub) \
+ ((main) << SWSCI_SCIC_MAIN_FUNCTION_SHIFT | \
+ (sub) << SWSCI_SCIC_SUB_FUNCTION_SHIFT)
+
+/* SWSCI: Get BIOS Data (GBDA) */
+#define SWSCI_GBDA 4
+#define SWSCI_GBDA_SUPPORTED_CALLS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 0)
+#define SWSCI_GBDA_REQUESTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 1)
+#define SWSCI_GBDA_BOOT_DISPLAY_PREF SWSCI_FUNCTION_CODE(SWSCI_GBDA, 4)
+#define SWSCI_GBDA_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 5)
+#define SWSCI_GBDA_TV_STANDARD SWSCI_FUNCTION_CODE(SWSCI_GBDA, 6)
+#define SWSCI_GBDA_INTERNAL_GRAPHICS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 7)
+#define SWSCI_GBDA_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_GBDA, 10)
+
+/* SWSCI: System BIOS Callbacks (SBCB) */
+#define SWSCI_SBCB 6
+#define SWSCI_SBCB_SUPPORTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 0)
+#define SWSCI_SBCB_INIT_COMPLETION SWSCI_FUNCTION_CODE(SWSCI_SBCB, 1)
+#define SWSCI_SBCB_PRE_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 3)
+#define SWSCI_SBCB_POST_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 4)
+#define SWSCI_SBCB_DISPLAY_SWITCH SWSCI_FUNCTION_CODE(SWSCI_SBCB, 5)
+#define SWSCI_SBCB_SET_TV_FORMAT SWSCI_FUNCTION_CODE(SWSCI_SBCB, 6)
+#define SWSCI_SBCB_ADAPTER_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 7)
+#define SWSCI_SBCB_DISPLAY_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 8)
+#define SWSCI_SBCB_SET_BOOT_DISPLAY SWSCI_FUNCTION_CODE(SWSCI_SBCB, 9)
+#define SWSCI_SBCB_SET_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 10)
+#define SWSCI_SBCB_SET_INTERNAL_GFX SWSCI_FUNCTION_CODE(SWSCI_SBCB, 11)
+#define SWSCI_SBCB_POST_HIRES_TO_DOS_FS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 16)
+#define SWSCI_SBCB_SUSPEND_RESUME SWSCI_FUNCTION_CODE(SWSCI_SBCB, 17)
+#define SWSCI_SBCB_SET_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 18)
+#define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
+#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
+
#define ACPI_OTHER_OUTPUT (0<<8)
#define ACPI_VGA_OUTPUT (1<<8)
#define ACPI_TV_OUTPUT (2<<8)
@@ -158,6 +228,171 @@ struct opregion_asle {
#define ACPI_LVDS_OUTPUT (4<<8)
#ifdef CONFIG_ACPI
+static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct opregion_swsci __iomem *swsci = dev_priv->opregion.swsci;
+ u32 main_function, sub_function, scic;
+ u16 pci_swsci;
+ u32 dslp;
+
+ if (!swsci)
+ return -ENODEV;
+
+ main_function = (function & SWSCI_SCIC_MAIN_FUNCTION_MASK) >>
+ SWSCI_SCIC_MAIN_FUNCTION_SHIFT;
+ sub_function = (function & SWSCI_SCIC_SUB_FUNCTION_MASK) >>
+ SWSCI_SCIC_SUB_FUNCTION_SHIFT;
+
+ /* Check if we can call the function. See swsci_setup for details. */
+ if (main_function == SWSCI_SBCB) {
+ if ((dev_priv->opregion.swsci_sbcb_sub_functions &
+ (1 << sub_function)) == 0)
+ return -EINVAL;
+ } else if (main_function == SWSCI_GBDA) {
+ if ((dev_priv->opregion.swsci_gbda_sub_functions &
+ (1 << sub_function)) == 0)
+ return -EINVAL;
+ }
+
+ /* Driver sleep timeout in ms. */
+ dslp = ioread32(&swsci->dslp);
+ if (!dslp) {
+ /* The spec says 2ms should be the default, but it's too small
+ * for some machines. */
+ dslp = 50;
+ } else if (dslp > 500) {
+ /* Hey bios, trust must be earned. */
+ WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp);
+ dslp = 500;
+ }
+
+ /* The spec tells us to do this, but we are the only user... */
+ scic = ioread32(&swsci->scic);
+ if (scic & SWSCI_SCIC_INDICATOR) {
+ DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
+ return -EBUSY;
+ }
+
+ scic = function | SWSCI_SCIC_INDICATOR;
+
+ iowrite32(parm, &swsci->parm);
+ iowrite32(scic, &swsci->scic);
+
+ /* Ensure SCI event is selected and event trigger is cleared. */
+ pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci);
+ if (!(pci_swsci & PCI_SWSCI_SCISEL) || (pci_swsci & PCI_SWSCI_GSSCIE)) {
+ pci_swsci |= PCI_SWSCI_SCISEL;
+ pci_swsci &= ~PCI_SWSCI_GSSCIE;
+ pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+ }
+
+ /* Use event trigger to tell bios to check the mail. */
+ pci_swsci |= PCI_SWSCI_GSSCIE;
+ pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+
+ /* Poll for the result. */
+#define C (((scic = ioread32(&swsci->scic)) & SWSCI_SCIC_INDICATOR) == 0)
+ if (wait_for(C, dslp)) {
+ DRM_DEBUG_DRIVER("SWSCI request timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ scic = (scic & SWSCI_SCIC_EXIT_STATUS_MASK) >>
+ SWSCI_SCIC_EXIT_STATUS_SHIFT;
+
+ /* Note: scic == 0 is an error! */
+ if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
+ DRM_DEBUG_DRIVER("SWSCI request error %u\n", scic);
+ return -EIO;
+ }
+
+ if (parm_out)
+ *parm_out = ioread32(&swsci->parm);
+
+ return 0;
+
+#undef C
+}
+
+#define DISPLAY_TYPE_CRT 0
+#define DISPLAY_TYPE_TV 1
+#define DISPLAY_TYPE_EXTERNAL_FLAT_PANEL 2
+#define DISPLAY_TYPE_INTERNAL_FLAT_PANEL 3
+
+int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+ bool enable)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ u32 parm = 0;
+ u32 type = 0;
+ u32 port;
+
+ /* don't care about old stuff for now */
+ if (!HAS_DDI(dev))
+ return 0;
+
+ port = intel_ddi_get_encoder_port(intel_encoder);
+ if (port == PORT_E) {
+ port = 0;
+ } else {
+ parm |= 1 << port;
+ port++;
+ }
+
+ if (!enable)
+ parm |= 4 << 8;
+
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_ANALOG:
+ type = DISPLAY_TYPE_CRT;
+ break;
+ case INTEL_OUTPUT_UNKNOWN:
+ case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_HDMI:
+ type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
+ break;
+ case INTEL_OUTPUT_EDP:
+ type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
+ break;
+ default:
+ WARN_ONCE(1, "unsupported intel_encoder type %d\n",
+ intel_encoder->type);
+ return -EINVAL;
+ }
+
+ parm |= type << (16 + port * 3);
+
+ return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
+}
+
+static const struct {
+ pci_power_t pci_power_state;
+ u32 parm;
+} power_state_map[] = {
+ { PCI_D0, 0x00 },
+ { PCI_D1, 0x01 },
+ { PCI_D2, 0x02 },
+ { PCI_D3hot, 0x04 },
+ { PCI_D3cold, 0x04 },
+};
+
+int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+{
+ int i;
+
+ if (!HAS_DDI(dev))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
+ if (state == power_state_map[i].pci_power_state)
+ return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE,
+ power_state_map[i].parm, NULL);
+ }
+
+ return -EINVAL;
+}
+
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -166,12 +401,13 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
if (!(bclp & ASLE_BCLP_VALID))
- return ASLE_BACKLIGHT_FAILED;
+ return ASLC_BACKLIGHT_FAILED;
bclp &= ASLE_BCLP_MSK;
if (bclp > 255)
- return ASLE_BACKLIGHT_FAILED;
+ return ASLC_BACKLIGHT_FAILED;
+ DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
intel_panel_set_backlight(dev, bclp, 255);
iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
@@ -183,13 +419,13 @@ static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
/* alsi is the current ALS reading in lux. 0 indicates below sensor
range, 0xffff indicates above sensor range. 1-0xfffe are valid */
DRM_DEBUG_DRIVER("Illum is not supported\n");
- return ASLE_ALS_ILLUM_FAILED;
+ return ASLC_ALS_ILLUM_FAILED;
}
static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
{
DRM_DEBUG_DRIVER("PWM freq is not supported\n");
- return ASLE_PWM_FREQ_FAILED;
+ return ASLC_PWM_FREQ_FAILED;
}
static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
@@ -197,39 +433,106 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
/* Panel fitting is currently controlled by the X code, so this is a
noop until modesetting support works fully */
DRM_DEBUG_DRIVER("Pfit is not supported\n");
- return ASLE_PFIT_FAILED;
+ return ASLC_PFIT_FAILED;
+}
+
+static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot)
+{
+ DRM_DEBUG_DRIVER("SROT is not supported\n");
+ return ASLC_ROTATION_ANGLES_FAILED;
+}
+
+static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
+{
+ if (!iuer)
+ DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
+ if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
+ DRM_DEBUG_DRIVER("Button array event is not supported (rotation lock)\n");
+ if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
+ DRM_DEBUG_DRIVER("Button array event is not supported (volume down)\n");
+ if (iuer & ASLE_IUER_VOLUME_UP_BTN)
+ DRM_DEBUG_DRIVER("Button array event is not supported (volume up)\n");
+ if (iuer & ASLE_IUER_WINDOWS_BTN)
+ DRM_DEBUG_DRIVER("Button array event is not supported (windows)\n");
+ if (iuer & ASLE_IUER_POWER_BTN)
+ DRM_DEBUG_DRIVER("Button array event is not supported (power)\n");
+
+ return ASLC_BUTTON_ARRAY_FAILED;
+}
+
+static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
+{
+ if (iuer & ASLE_IUER_CONVERTIBLE)
+ DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
+ else
+ DRM_DEBUG_DRIVER("Convertible is not supported (slate)\n");
+
+ return ASLC_CONVERTIBLE_FAILED;
+}
+
+static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
+{
+ if (iuer & ASLE_IUER_DOCKING)
+ DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
+ else
+ DRM_DEBUG_DRIVER("Docking is not supported (undocked)\n");
+
+ return ASLC_DOCKING_FAILED;
+}
+
+static u32 asle_isct_state(struct drm_device *dev)
+{
+ DRM_DEBUG_DRIVER("ISCT is not supported\n");
+ return ASLC_ISCT_STATE_FAILED;
}
void intel_opregion_asle_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
- u32 asle_stat = 0;
- u32 asle_req;
+ u32 aslc_stat = 0;
+ u32 aslc_req;
if (!asle)
return;
- asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
+ aslc_req = ioread32(&asle->aslc);
- if (!asle_req) {
- DRM_DEBUG_DRIVER("non asle set request??\n");
+ if (!(aslc_req & ASLC_REQ_MSK)) {
+ DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
+ aslc_req);
return;
}
- if (asle_req & ASLE_SET_ALS_ILLUM)
- asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
+ if (aslc_req & ASLC_SET_ALS_ILLUM)
+ aslc_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
+
+ if (aslc_req & ASLC_SET_BACKLIGHT)
+ aslc_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
+
+ if (aslc_req & ASLC_SET_PFIT)
+ aslc_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
+
+ if (aslc_req & ASLC_SET_PWM_FREQ)
+ aslc_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
- if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
+ if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
+ aslc_stat |= asle_set_supported_rotation_angles(dev,
+ ioread32(&asle->srot));
- if (asle_req & ASLE_SET_PFIT)
- asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
+ if (aslc_req & ASLC_BUTTON_ARRAY)
+ aslc_stat |= asle_set_button_array(dev, ioread32(&asle->iuer));
- if (asle_req & ASLE_SET_PWM_FREQ)
- asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
+ if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
+ aslc_stat |= asle_set_convertible(dev, ioread32(&asle->iuer));
- iowrite32(asle_stat, &asle->aslc);
+ if (aslc_req & ASLC_DOCKING_INDICATOR)
+ aslc_stat |= asle_set_docking(dev, ioread32(&asle->iuer));
+
+ if (aslc_req & ASLC_ISCT_STATE_CHANGE)
+ aslc_stat |= asle_isct_state(dev);
+
+ iowrite32(aslc_stat, &asle->aslc);
}
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
@@ -446,8 +749,68 @@ void intel_opregion_fini(struct drm_device *dev)
opregion->swsci = NULL;
opregion->asle = NULL;
opregion->vbt = NULL;
+ opregion->lid_state = NULL;
+}
+
+static void swsci_setup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+ bool requested_callbacks = false;
+ u32 tmp;
+
+ /* Sub-function code 0 is okay, let's allow them. */
+ opregion->swsci_gbda_sub_functions = 1;
+ opregion->swsci_sbcb_sub_functions = 1;
+
+ /* We use GBDA to ask for supported GBDA calls. */
+ if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
+ /* make the bits match the sub-function codes */
+ tmp <<= 1;
+ opregion->swsci_gbda_sub_functions |= tmp;
+ }
+
+ /*
+ * We also use GBDA to ask for _requested_ SBCB callbacks. The driver
+ * must not call interfaces that are not specifically requested by the
+ * bios.
+ */
+ if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
+ /* here, the bits already match sub-function codes */
+ opregion->swsci_sbcb_sub_functions |= tmp;
+ requested_callbacks = true;
+ }
+
+ /*
+ * But we use SBCB to ask for _supported_ SBCB calls. This does not mean
+ * the callback is _requested_. But we still can't call interfaces that
+ * are not requested.
+ */
+ if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
+ /* make the bits match the sub-function codes */
+ u32 low = tmp & 0x7ff;
+ u32 high = tmp & ~0xfff; /* bit 11 is reserved */
+ tmp = (high << 4) | (low << 1) | 1;
+
+ /* best guess what to do with supported wrt requested */
+ if (requested_callbacks) {
+ u32 req = opregion->swsci_sbcb_sub_functions;
+ if ((req & tmp) != req)
+ DRM_DEBUG_DRIVER("SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp);
+ /* XXX: for now, trust the requested callbacks */
+ /* opregion->swsci_sbcb_sub_functions &= tmp; */
+ } else {
+ opregion->swsci_sbcb_sub_functions |= tmp;
+ }
+ }
+
+ DRM_DEBUG_DRIVER("SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
+ opregion->swsci_gbda_sub_functions,
+ opregion->swsci_sbcb_sub_functions);
}
-#endif
+#else /* CONFIG_ACPI */
+static inline void swsci_setup(struct drm_device *dev) {}
+#endif /* CONFIG_ACPI */
int intel_opregion_setup(struct drm_device *dev)
{
@@ -490,6 +853,7 @@ int intel_opregion_setup(struct drm_device *dev)
if (mboxes & MBOX_SWSCI) {
DRM_DEBUG_DRIVER("SWSCI supported\n");
opregion->swsci = base + OPREGION_SWSCI_OFFSET;
+ swsci_setup(dev);
}
if (mboxes & MBOX_ASLE) {
DRM_DEBUG_DRIVER("ASLE supported\n");
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index ddfd0aefe0c0..a98a990fbab3 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -821,14 +821,11 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
struct intel_crtc *crtc)
{
- drm_i915_private_t *dev_priv = overlay->dev->dev_private;
-
if (!crtc->active)
return -EINVAL;
/* can't use the overlay with double wide pipe */
- if (INTEL_INFO(overlay->dev)->gen < 4 &&
- (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
+ if (crtc->config.double_wide)
return -EINVAL;
return 0;
@@ -1056,7 +1053,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
return ret;
}
- params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL);
+ params = kmalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
@@ -1323,7 +1320,7 @@ void intel_setup_overlay(struct drm_device *dev)
if (!HAS_OVERLAY(dev))
return;
- overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
+ overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 293564a2896a..09b2994c9b37 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -50,23 +50,22 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode)
{
- struct drm_display_mode *mode, *adjusted_mode;
+ struct drm_display_mode *adjusted_mode;
int x, y, width, height;
- mode = &pipe_config->requested_mode;
adjusted_mode = &pipe_config->adjusted_mode;
x = y = width = height = 0;
/* Native modes don't need fitting */
- if (adjusted_mode->hdisplay == mode->hdisplay &&
- adjusted_mode->vdisplay == mode->vdisplay)
+ if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
+ adjusted_mode->vdisplay == pipe_config->pipe_src_h)
goto done;
switch (fitting_mode) {
case DRM_MODE_SCALE_CENTER:
- width = mode->hdisplay;
- height = mode->vdisplay;
+ width = pipe_config->pipe_src_w;
+ height = pipe_config->pipe_src_h;
x = (adjusted_mode->hdisplay - width + 1)/2;
y = (adjusted_mode->vdisplay - height + 1)/2;
break;
@@ -74,17 +73,19 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
{
- u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
- u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+ u32 scaled_width = adjusted_mode->hdisplay
+ * pipe_config->pipe_src_h;
+ u32 scaled_height = pipe_config->pipe_src_w
+ * adjusted_mode->vdisplay;
if (scaled_width > scaled_height) { /* pillar */
- width = scaled_height / mode->vdisplay;
+ width = scaled_height / pipe_config->pipe_src_h;
if (width & 1)
width++;
x = (adjusted_mode->hdisplay - width + 1) / 2;
y = 0;
height = adjusted_mode->vdisplay;
} else if (scaled_width < scaled_height) { /* letter */
- height = scaled_width / mode->hdisplay;
+ height = scaled_width / pipe_config->pipe_src_w;
if (height & 1)
height++;
y = (adjusted_mode->vdisplay - height + 1) / 2;
@@ -171,20 +172,96 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
return (FACTOR * ratio + FACTOR/2) / FACTOR;
}
+static void i965_scale_aspect(struct intel_crtc_config *pipe_config,
+ u32 *pfit_control)
+{
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ u32 scaled_width = adjusted_mode->hdisplay *
+ pipe_config->pipe_src_h;
+ u32 scaled_height = pipe_config->pipe_src_w *
+ adjusted_mode->vdisplay;
+
+ /* 965+ is easy, it does everything in hw */
+ if (scaled_width > scaled_height)
+ *pfit_control |= PFIT_ENABLE |
+ PFIT_SCALING_PILLAR;
+ else if (scaled_width < scaled_height)
+ *pfit_control |= PFIT_ENABLE |
+ PFIT_SCALING_LETTER;
+ else if (adjusted_mode->hdisplay != pipe_config->pipe_src_w)
+ *pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
+}
+
+static void i9xx_scale_aspect(struct intel_crtc_config *pipe_config,
+ u32 *pfit_control, u32 *pfit_pgm_ratios,
+ u32 *border)
+{
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ u32 scaled_width = adjusted_mode->hdisplay *
+ pipe_config->pipe_src_h;
+ u32 scaled_height = pipe_config->pipe_src_w *
+ adjusted_mode->vdisplay;
+ u32 bits;
+
+ /*
+ * For earlier chips we have to calculate the scaling
+ * ratio by hand and program it into the
+ * PFIT_PGM_RATIO register
+ */
+ if (scaled_width > scaled_height) { /* pillar */
+ centre_horizontally(adjusted_mode,
+ scaled_height /
+ pipe_config->pipe_src_h);
+
+ *border = LVDS_BORDER_ENABLE;
+ if (pipe_config->pipe_src_h != adjusted_mode->vdisplay) {
+ bits = panel_fitter_scaling(pipe_config->pipe_src_h,
+ adjusted_mode->vdisplay);
+
+ *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+ bits << PFIT_VERT_SCALE_SHIFT);
+ *pfit_control |= (PFIT_ENABLE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ } else if (scaled_width < scaled_height) { /* letter */
+ centre_vertically(adjusted_mode,
+ scaled_width /
+ pipe_config->pipe_src_w);
+
+ *border = LVDS_BORDER_ENABLE;
+ if (pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
+ bits = panel_fitter_scaling(pipe_config->pipe_src_w,
+ adjusted_mode->hdisplay);
+
+ *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+ bits << PFIT_VERT_SCALE_SHIFT);
+ *pfit_control |= (PFIT_ENABLE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ } else {
+ /* Aspects match, Let hw scale both directions */
+ *pfit_control |= (PFIT_ENABLE |
+ VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+}
+
void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode)
{
struct drm_device *dev = intel_crtc->base.dev;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
- struct drm_display_mode *mode, *adjusted_mode;
+ struct drm_display_mode *adjusted_mode;
- mode = &pipe_config->requested_mode;
adjusted_mode = &pipe_config->adjusted_mode;
/* Native modes don't need fitting */
- if (adjusted_mode->hdisplay == mode->hdisplay &&
- adjusted_mode->vdisplay == mode->vdisplay)
+ if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
+ adjusted_mode->vdisplay == pipe_config->pipe_src_h)
goto out;
switch (fitting_mode) {
@@ -193,81 +270,25 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
* For centered modes, we have to calculate border widths &
* heights and modify the values programmed into the CRTC.
*/
- centre_horizontally(adjusted_mode, mode->hdisplay);
- centre_vertically(adjusted_mode, mode->vdisplay);
+ centre_horizontally(adjusted_mode, pipe_config->pipe_src_w);
+ centre_vertically(adjusted_mode, pipe_config->pipe_src_h);
border = LVDS_BORDER_ENABLE;
break;
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
- if (INTEL_INFO(dev)->gen >= 4) {
- u32 scaled_width = adjusted_mode->hdisplay *
- mode->vdisplay;
- u32 scaled_height = mode->hdisplay *
- adjusted_mode->vdisplay;
-
- /* 965+ is easy, it does everything in hw */
- if (scaled_width > scaled_height)
- pfit_control |= PFIT_ENABLE |
- PFIT_SCALING_PILLAR;
- else if (scaled_width < scaled_height)
- pfit_control |= PFIT_ENABLE |
- PFIT_SCALING_LETTER;
- else if (adjusted_mode->hdisplay != mode->hdisplay)
- pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
- } else {
- u32 scaled_width = adjusted_mode->hdisplay *
- mode->vdisplay;
- u32 scaled_height = mode->hdisplay *
- adjusted_mode->vdisplay;
- /*
- * For earlier chips we have to calculate the scaling
- * ratio by hand and program it into the
- * PFIT_PGM_RATIO register
- */
- if (scaled_width > scaled_height) { /* pillar */
- centre_horizontally(adjusted_mode,
- scaled_height /
- mode->vdisplay);
-
- border = LVDS_BORDER_ENABLE;
- if (mode->vdisplay != adjusted_mode->vdisplay) {
- u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
- pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
- bits << PFIT_VERT_SCALE_SHIFT);
- pfit_control |= (PFIT_ENABLE |
- VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
- }
- } else if (scaled_width < scaled_height) { /* letter */
- centre_vertically(adjusted_mode,
- scaled_width /
- mode->hdisplay);
-
- border = LVDS_BORDER_ENABLE;
- if (mode->hdisplay != adjusted_mode->hdisplay) {
- u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
- pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
- bits << PFIT_VERT_SCALE_SHIFT);
- pfit_control |= (PFIT_ENABLE |
- VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
- }
- } else {
- /* Aspects match, Let hw scale both directions */
- pfit_control |= (PFIT_ENABLE |
- VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
- VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
- }
- }
+ if (INTEL_INFO(dev)->gen >= 4)
+ i965_scale_aspect(pipe_config, &pfit_control);
+ else
+ i9xx_scale_aspect(pipe_config, &pfit_control,
+ &pfit_pgm_ratios, &border);
break;
case DRM_MODE_SCALE_FULLSCREEN:
/*
* Full scaling, even if it changes the aspect ratio.
* Fortunately this is all done for us in hw.
*/
- if (mode->vdisplay != adjusted_mode->vdisplay ||
- mode->hdisplay != adjusted_mode->hdisplay) {
+ if (pipe_config->pipe_src_h != adjusted_mode->vdisplay ||
+ pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
pfit_control |= PFIT_ENABLE;
if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= PFIT_SCALING_AUTO;
@@ -308,7 +329,7 @@ static int is_backlight_combination_mode(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (INTEL_INFO(dev)->gen >= 4)
+ if (IS_GEN4(dev))
return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
if (IS_GEN2(dev))
@@ -351,6 +372,9 @@ static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
I915_WRITE(BLC_PWM_CTL2,
dev_priv->regfile.saveBLC_PWM_CTL2);
}
+
+ if (IS_VALLEYVIEW(dev) && !val)
+ val = 0x0f42ffff;
}
return val;
@@ -441,7 +465,8 @@ static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
I915_WRITE(BLC_PWM_CPU_CTL, val | level);
}
-static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level)
+static void intel_panel_actually_set_backlight(struct drm_device *dev,
+ u32 level)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
@@ -549,6 +574,8 @@ void intel_panel_enable_backlight(struct drm_device *dev,
intel_pipe_to_cpu_transcoder(dev_priv, pipe);
unsigned long flags;
+ DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
if (dev_priv->backlight.level == 0) {
@@ -607,10 +634,24 @@ set_level:
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
}
+/* FIXME: use VBT vals to init PWM_CTL and PWM_CTL2 correctly */
+static void intel_panel_init_backlight_regs(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_VALLEYVIEW(dev)) {
+ u32 cur_val = I915_READ(BLC_PWM_CTL) &
+ BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_CTL, (0xf42 << 16) | cur_val);
+ }
+}
+
static void intel_panel_init_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ intel_panel_init_backlight_regs(dev);
+
dev_priv->backlight.level = intel_panel_get_backlight(dev);
dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
}
@@ -637,10 +678,12 @@ intel_panel_detect(struct drm_device *dev)
}
}
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
static int intel_panel_update_status(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
+ DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
+ bd->props.brightness, bd->props.max_brightness);
intel_panel_set_backlight(dev, bd->props.brightness,
bd->props.max_brightness);
return 0;
@@ -683,7 +726,7 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
}
dev_priv->backlight.device =
backlight_device_register("intel_backlight",
- &connector->kdev, dev,
+ connector->kdev, dev,
&intel_panel_bl_ops, &props);
if (IS_ERR(dev_priv->backlight.device)) {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f4c5e95b2d6f..09ac9e79830f 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -32,6 +32,27 @@
#include <linux/module.h>
#include <drm/i915_powerwell.h>
+/**
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage. This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+#define INTEL_RC6_ENABLE (1<<0)
+#define INTEL_RC6p_ENABLE (1<<1)
+#define INTEL_RC6pp_ENABLE (1<<2)
+
/* FBC, or Frame Buffer Compression, is a technique employed to compress the
* framebuffer contents in-memory, aiming at reducing the required bandwidth
* during in-memory transfers and, therefore, reduce the power packet.
@@ -43,14 +64,6 @@
* i915.i915_enable_fbc parameter
*/
-static bool intel_crtc_active(struct drm_crtc *crtc)
-{
- /* Be paranoid as we can arrive here with only partial
- * state retrieved from the hardware during setup.
- */
- return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
-}
-
static void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -241,18 +254,6 @@ static void ironlake_disable_fbc(struct drm_device *dev)
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
- if (IS_IVYBRIDGE(dev))
- /* WaFbcDisableDpfcClockGating:ivb */
- I915_WRITE(ILK_DSPCLK_GATE_D,
- I915_READ(ILK_DSPCLK_GATE_D) &
- ~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
-
- if (IS_HASWELL(dev))
- /* WaFbcDisableDpfcClockGating:hsw */
- I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
- I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
- ~HSW_DPFC_GATING_DISABLE);
-
DRM_DEBUG_KMS("disabled FBC\n");
}
}
@@ -282,18 +283,10 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
if (IS_IVYBRIDGE(dev)) {
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
- /* WaFbcDisableDpfcClockGating:ivb */
- I915_WRITE(ILK_DSPCLK_GATE_D,
- I915_READ(ILK_DSPCLK_GATE_D) |
- ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
} else {
/* WaFbcAsynchFlipDisableFbcQueue:hsw */
I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
HSW_BYPASS_FBC_QUEUE);
- /* WaFbcDisableDpfcClockGating:hsw */
- I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
- I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
- HSW_DPFC_GATING_DISABLE);
}
I915_WRITE(SNB_DPFC_CTL_SA,
@@ -378,7 +371,7 @@ static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
intel_cancel_fbc_work(dev_priv);
- work = kzalloc(sizeof *work, GFP_KERNEL);
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL) {
DRM_ERROR("Failed to allocate FBC work structure\n");
dev_priv->display.enable_fbc(crtc, interval);
@@ -458,7 +451,8 @@ void intel_update_fbc(struct drm_device *dev)
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
- unsigned int max_hdisplay, max_vdisplay;
+ const struct drm_display_mode *adjusted_mode;
+ unsigned int max_width, max_height;
if (!I915_HAS_FBC(dev)) {
set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
@@ -482,7 +476,7 @@ void intel_update_fbc(struct drm_device *dev)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
if (intel_crtc_active(tmp_crtc) &&
- !to_intel_crtc(tmp_crtc)->primary_disabled) {
+ to_intel_crtc(tmp_crtc)->primary_enabled) {
if (crtc) {
if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
@@ -502,6 +496,7 @@ void intel_update_fbc(struct drm_device *dev)
fb = crtc->fb;
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
+ adjusted_mode = &intel_crtc->config.adjusted_mode;
if (i915_enable_fbc < 0 &&
INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
@@ -514,8 +509,8 @@ void intel_update_fbc(struct drm_device *dev)
DRM_DEBUG_KMS("fbc disabled per module param\n");
goto out_disable;
}
- if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
- (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
+ if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
DRM_DEBUG_KMS("mode incompatible with compression, "
"disabling\n");
@@ -523,14 +518,14 @@ void intel_update_fbc(struct drm_device *dev)
}
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
- max_hdisplay = 4096;
- max_vdisplay = 2048;
+ max_width = 4096;
+ max_height = 2048;
} else {
- max_hdisplay = 2048;
- max_vdisplay = 1536;
+ max_width = 2048;
+ max_height = 1536;
}
- if ((crtc->mode.hdisplay > max_hdisplay) ||
- (crtc->mode.vdisplay > max_vdisplay)) {
+ if (intel_crtc->config.pipe_src_w > max_width ||
+ intel_crtc->config.pipe_src_h > max_height) {
if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
goto out_disable;
@@ -1087,8 +1082,9 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
return enabled;
}
-static void pineview_update_wm(struct drm_device *dev)
+static void pineview_update_wm(struct drm_crtc *unused_crtc)
{
+ struct drm_device *dev = unused_crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
const struct cxsr_latency *latency;
@@ -1105,8 +1101,12 @@ static void pineview_update_wm(struct drm_device *dev)
crtc = single_enabled_crtc(dev);
if (crtc) {
- int clock = crtc->mode.clock;
+ const struct drm_display_mode *adjusted_mode;
int pixel_size = crtc->fb->bits_per_pixel / 8;
+ int clock;
+
+ adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ clock = adjusted_mode->crtc_clock;
/* Display SR */
wm = intel_calculate_wm(clock, &pineview_display_wm,
@@ -1166,6 +1166,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
int *cursor_wm)
{
struct drm_crtc *crtc;
+ const struct drm_display_mode *adjusted_mode;
int htotal, hdisplay, clock, pixel_size;
int line_time_us, line_count;
int entries, tlb_miss;
@@ -1177,9 +1178,10 @@ static bool g4x_compute_wm0(struct drm_device *dev,
return false;
}
- htotal = crtc->mode.htotal;
- hdisplay = crtc->mode.hdisplay;
- clock = crtc->mode.clock;
+ adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ clock = adjusted_mode->crtc_clock;
+ htotal = adjusted_mode->htotal;
+ hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8;
/* Use the small buffer method to calculate plane watermark */
@@ -1250,6 +1252,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
int *display_wm, int *cursor_wm)
{
struct drm_crtc *crtc;
+ const struct drm_display_mode *adjusted_mode;
int hdisplay, htotal, pixel_size, clock;
unsigned long line_time_us;
int line_count, line_size;
@@ -1262,9 +1265,10 @@ static bool g4x_compute_srwm(struct drm_device *dev,
}
crtc = intel_get_crtc_for_plane(dev, plane);
- hdisplay = crtc->mode.hdisplay;
- htotal = crtc->mode.htotal;
- clock = crtc->mode.clock;
+ adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ clock = adjusted_mode->crtc_clock;
+ htotal = adjusted_mode->htotal;
+ hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8;
line_time_us = (htotal * 1000) / clock;
@@ -1303,7 +1307,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
if (!intel_crtc_active(crtc))
return false;
- clock = crtc->mode.clock; /* VESA DOT Clock */
+ clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
entries = (clock / 1000) * pixel_size;
@@ -1365,8 +1369,9 @@ static void vlv_update_drain_latency(struct drm_device *dev)
#define single_plane_enabled(mask) is_power_of_2(mask)
-static void valleyview_update_wm(struct drm_device *dev)
+static void valleyview_update_wm(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
static const int sr_latency_ns = 12000;
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -1424,8 +1429,9 @@ static void valleyview_update_wm(struct drm_device *dev)
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
-static void g4x_update_wm(struct drm_device *dev)
+static void g4x_update_wm(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
static const int sr_latency_ns = 12000;
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -1476,8 +1482,9 @@ static void g4x_update_wm(struct drm_device *dev)
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
-static void i965_update_wm(struct drm_device *dev)
+static void i965_update_wm(struct drm_crtc *unused_crtc)
{
+ struct drm_device *dev = unused_crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
int srwm = 1;
@@ -1488,9 +1495,11 @@ static void i965_update_wm(struct drm_device *dev)
if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
- int clock = crtc->mode.clock;
- int htotal = crtc->mode.htotal;
- int hdisplay = crtc->mode.hdisplay;
+ const struct drm_display_mode *adjusted_mode =
+ &to_intel_crtc(crtc)->config.adjusted_mode;
+ int clock = adjusted_mode->crtc_clock;
+ int htotal = adjusted_mode->htotal;
+ int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
int pixel_size = crtc->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@@ -1541,8 +1550,9 @@ static void i965_update_wm(struct drm_device *dev)
I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
-static void i9xx_update_wm(struct drm_device *dev)
+static void i9xx_update_wm(struct drm_crtc *unused_crtc)
{
+ struct drm_device *dev = unused_crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const struct intel_watermark_params *wm_info;
uint32_t fwater_lo;
@@ -1562,11 +1572,13 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0);
if (intel_crtc_active(crtc)) {
+ const struct drm_display_mode *adjusted_mode;
int cpp = crtc->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
- planea_wm = intel_calculate_wm(crtc->mode.clock,
+ adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
latency_ns);
enabled = crtc;
@@ -1576,11 +1588,13 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 1);
crtc = intel_get_crtc_for_plane(dev, 1);
if (intel_crtc_active(crtc)) {
+ const struct drm_display_mode *adjusted_mode;
int cpp = crtc->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
- planeb_wm = intel_calculate_wm(crtc->mode.clock,
+ adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
latency_ns);
if (enabled == NULL)
@@ -1607,9 +1621,11 @@ static void i9xx_update_wm(struct drm_device *dev)
if (HAS_FW_BLC(dev) && enabled) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
- int clock = enabled->mode.clock;
- int htotal = enabled->mode.htotal;
- int hdisplay = enabled->mode.hdisplay;
+ const struct drm_display_mode *adjusted_mode =
+ &to_intel_crtc(enabled)->config.adjusted_mode;
+ int clock = adjusted_mode->crtc_clock;
+ int htotal = adjusted_mode->htotal;
+ int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
int pixel_size = enabled->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@@ -1658,10 +1674,12 @@ static void i9xx_update_wm(struct drm_device *dev)
}
}
-static void i830_update_wm(struct drm_device *dev)
+static void i830_update_wm(struct drm_crtc *unused_crtc)
{
+ struct drm_device *dev = unused_crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
+ const struct drm_display_mode *adjusted_mode;
uint32_t fwater_lo;
int planea_wm;
@@ -1669,7 +1687,9 @@ static void i830_update_wm(struct drm_device *dev)
if (crtc == NULL)
return;
- planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
+ adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
+ &i830_wm_info,
dev_priv->display.get_fifo_size(dev, 0),
4, latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
@@ -1741,6 +1761,7 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
int *fbc_wm, int *display_wm, int *cursor_wm)
{
struct drm_crtc *crtc;
+ const struct drm_display_mode *adjusted_mode;
unsigned long line_time_us;
int hdisplay, htotal, pixel_size, clock;
int line_count, line_size;
@@ -1753,9 +1774,10 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
}
crtc = intel_get_crtc_for_plane(dev, plane);
- hdisplay = crtc->mode.hdisplay;
- htotal = crtc->mode.htotal;
- clock = crtc->mode.clock;
+ adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ clock = adjusted_mode->crtc_clock;
+ htotal = adjusted_mode->htotal;
+ hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8;
line_time_us = (htotal * 1000) / clock;
@@ -1785,8 +1807,9 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
display, cursor);
}
-static void ironlake_update_wm(struct drm_device *dev)
+static void ironlake_update_wm(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled;
@@ -1868,8 +1891,9 @@ static void ironlake_update_wm(struct drm_device *dev)
*/
}
-static void sandybridge_update_wm(struct drm_device *dev)
+static void sandybridge_update_wm(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
u32 val;
@@ -1970,8 +1994,9 @@ static void sandybridge_update_wm(struct drm_device *dev)
cursor_wm);
}
-static void ivybridge_update_wm(struct drm_device *dev)
+static void ivybridge_update_wm(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
u32 val;
@@ -2098,7 +2123,7 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pixel_rate;
- pixel_rate = intel_crtc->config.adjusted_mode.clock;
+ pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
* adjust the pixel_rate here. */
@@ -2107,8 +2132,8 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
- pipe_w = intel_crtc->config.requested_mode.hdisplay;
- pipe_h = intel_crtc->config.requested_mode.vdisplay;
+ pipe_w = intel_crtc->config.pipe_src_w;
+ pipe_h = intel_crtc->config.pipe_src_h;
pfit_w = (pfit_size >> 16) & 0xFFFF;
pfit_h = pfit_size & 0xFFFF;
if (pipe_w < pfit_w)
@@ -2176,27 +2201,18 @@ struct hsw_wm_maximums {
uint16_t fbc;
};
-struct hsw_wm_values {
- uint32_t wm_pipe[3];
- uint32_t wm_lp[3];
- uint32_t wm_lp_spr[3];
- uint32_t wm_linetime[3];
- bool enable_fbc_wm;
-};
-
/* used in computing the new watermarks state */
struct intel_wm_config {
unsigned int num_pipes_active;
bool sprites_enabled;
bool sprites_scaled;
- bool fbc_wm_enabled;
};
/*
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
uint32_t mem_value,
bool is_lp)
{
@@ -2225,7 +2241,7 @@ static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
uint32_t mem_value)
{
uint32_t method1, method2;
@@ -2248,7 +2264,7 @@ static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
uint32_t mem_value)
{
if (!params->active || !params->cur.enabled)
@@ -2262,7 +2278,7 @@ static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
}
/* Only for WM_LP. */
-static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params,
uint32_t pri_val)
{
if (!params->active || !params->pri.enabled)
@@ -2356,11 +2372,11 @@ static unsigned int ilk_fbc_wm_max(void)
return 15;
}
-static void ilk_wm_max(struct drm_device *dev,
- int level,
- const struct intel_wm_config *config,
- enum intel_ddb_partitioning ddb_partitioning,
- struct hsw_wm_maximums *max)
+static void ilk_compute_wm_maximums(struct drm_device *dev,
+ int level,
+ const struct intel_wm_config *config,
+ enum intel_ddb_partitioning ddb_partitioning,
+ struct hsw_wm_maximums *max)
{
max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
@@ -2368,9 +2384,9 @@ static void ilk_wm_max(struct drm_device *dev,
max->fbc = ilk_fbc_wm_max();
}
-static bool ilk_check_wm(int level,
- const struct hsw_wm_maximums *max,
- struct intel_wm_level *result)
+static bool ilk_validate_wm_level(int level,
+ const struct hsw_wm_maximums *max,
+ struct intel_wm_level *result)
{
bool ret;
@@ -2406,14 +2422,12 @@ static bool ilk_check_wm(int level,
result->enable = true;
}
- DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis");
-
return ret;
}
static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
int level,
- struct hsw_pipe_wm_parameters *p,
+ const struct hsw_pipe_wm_parameters *p,
struct intel_wm_level *result)
{
uint16_t pri_latency = dev_priv->wm.pri_latency[level];
@@ -2434,55 +2448,6 @@ static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
result->enable = true;
}
-static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv,
- int level, struct hsw_wm_maximums *max,
- struct hsw_pipe_wm_parameters *params,
- struct intel_wm_level *result)
-{
- enum pipe pipe;
- struct intel_wm_level res[3];
-
- for (pipe = PIPE_A; pipe <= PIPE_C; pipe++)
- ilk_compute_wm_level(dev_priv, level, &params[pipe], &res[pipe]);
-
- result->pri_val = max3(res[0].pri_val, res[1].pri_val, res[2].pri_val);
- result->spr_val = max3(res[0].spr_val, res[1].spr_val, res[2].spr_val);
- result->cur_val = max3(res[0].cur_val, res[1].cur_val, res[2].cur_val);
- result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val);
- result->enable = true;
-
- return ilk_check_wm(level, max, result);
-}
-
-static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- struct hsw_pipe_wm_parameters *params)
-{
- uint32_t pri_val, cur_val, spr_val;
- /* WM0 latency values stored in 0.1us units */
- uint16_t pri_latency = dev_priv->wm.pri_latency[0];
- uint16_t spr_latency = dev_priv->wm.spr_latency[0];
- uint16_t cur_latency = dev_priv->wm.cur_latency[0];
-
- pri_val = ilk_compute_pri_wm(params, pri_latency, false);
- spr_val = ilk_compute_spr_wm(params, spr_latency);
- cur_val = ilk_compute_cur_wm(params, cur_latency);
-
- WARN(pri_val > 127,
- "Primary WM error, mode not supported for pipe %c\n",
- pipe_name(pipe));
- WARN(spr_val > 127,
- "Sprite WM error, mode not supported for pipe %c\n",
- pipe_name(pipe));
- WARN(cur_val > 63,
- "Cursor WM error, mode not supported for pipe %c\n",
- pipe_name(pipe));
-
- return (pri_val << WM0_PIPE_PLANE_SHIFT) |
- (spr_val << WM0_PIPE_SPRITE_SHIFT) |
- cur_val;
-}
-
static uint32_t
hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
{
@@ -2554,19 +2519,22 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
wm[3] *= 2;
}
-static void intel_print_wm_latency(struct drm_device *dev,
- const char *name,
- const uint16_t wm[5])
+static int ilk_wm_max_level(const struct drm_device *dev)
{
- int level, max_level;
-
/* how many WM levels are we expecting */
if (IS_HASWELL(dev))
- max_level = 4;
+ return 4;
else if (INTEL_INFO(dev)->gen >= 6)
- max_level = 3;
+ return 3;
else
- max_level = 2;
+ return 2;
+}
+
+static void intel_print_wm_latency(struct drm_device *dev,
+ const char *name,
+ const uint16_t wm[5])
+{
+ int level, max_level = ilk_wm_max_level(dev);
for (level = 0; level <= max_level; level++) {
unsigned int latency = wm[level];
@@ -2606,101 +2574,154 @@ static void intel_setup_wm_latency(struct drm_device *dev)
intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
}
-static void hsw_compute_wm_parameters(struct drm_device *dev,
- struct hsw_pipe_wm_parameters *params,
- struct hsw_wm_maximums *lp_max_1_2,
- struct hsw_wm_maximums *lp_max_5_6)
+static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
+ struct hsw_pipe_wm_parameters *p,
+ struct intel_wm_config *config)
{
- struct drm_crtc *crtc;
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
struct drm_plane *plane;
- enum pipe pipe;
- struct intel_wm_config config = {};
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct hsw_pipe_wm_parameters *p;
-
- pipe = intel_crtc->pipe;
- p = &params[pipe];
-
- p->active = intel_crtc_active(crtc);
- if (!p->active)
- continue;
-
- config.num_pipes_active++;
+ p->active = intel_crtc_active(crtc);
+ if (p->active) {
p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
p->cur.bytes_per_pixel = 4;
- p->pri.horiz_pixels =
- intel_crtc->config.requested_mode.hdisplay;
+ p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
p->cur.horiz_pixels = 64;
/* TODO: for now, assume primary and cursor planes are always enabled. */
p->pri.enabled = true;
p->cur.enabled = true;
}
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ config->num_pipes_active += intel_crtc_active(crtc);
+
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
struct intel_plane *intel_plane = to_intel_plane(plane);
- struct hsw_pipe_wm_parameters *p;
-
- pipe = intel_plane->pipe;
- p = &params[pipe];
- p->spr = intel_plane->wm;
+ if (intel_plane->pipe == pipe)
+ p->spr = intel_plane->wm;
- config.sprites_enabled |= p->spr.enabled;
- config.sprites_scaled |= p->spr.scaled;
+ config->sprites_enabled |= intel_plane->wm.enabled;
+ config->sprites_scaled |= intel_plane->wm.scaled;
}
+}
- ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2);
+/* Compute new watermarks for the pipe */
+static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
+ const struct hsw_pipe_wm_parameters *params,
+ struct intel_pipe_wm *pipe_wm)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int level, max_level = ilk_wm_max_level(dev);
+ /* LP0 watermark maximums depend on this pipe alone */
+ struct intel_wm_config config = {
+ .num_pipes_active = 1,
+ .sprites_enabled = params->spr.enabled,
+ .sprites_scaled = params->spr.scaled,
+ };
+ struct hsw_wm_maximums max;
- /* 5/6 split only in single pipe config on IVB+ */
- if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1)
- ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6);
- else
- *lp_max_5_6 = *lp_max_1_2;
+ /* LP0 watermarks always use 1/2 DDB partitioning */
+ ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
+
+ for (level = 0; level <= max_level; level++)
+ ilk_compute_wm_level(dev_priv, level, params,
+ &pipe_wm->wm[level]);
+
+ pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
+
+ /* At least LP0 must be valid */
+ return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
}
-static void hsw_compute_wm_results(struct drm_device *dev,
- struct hsw_pipe_wm_parameters *params,
- struct hsw_wm_maximums *lp_maximums,
- struct hsw_wm_values *results)
+/*
+ * Merge the watermarks from all active pipes for a specific level.
+ */
+static void ilk_merge_wm_level(struct drm_device *dev,
+ int level,
+ struct intel_wm_level *ret_wm)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- struct intel_wm_level lp_results[4] = {};
- enum pipe pipe;
- int level, max_level, wm_lp;
+ const struct intel_crtc *intel_crtc;
- for (level = 1; level <= 4; level++)
- if (!hsw_compute_lp_wm(dev_priv, level,
- lp_maximums, params,
- &lp_results[level - 1]))
- break;
- max_level = level - 1;
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
+ const struct intel_wm_level *wm =
+ &intel_crtc->wm.active.wm[level];
+
+ if (!wm->enable)
+ return;
+
+ ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
+ ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
+ ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
+ ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
+ }
- memset(results, 0, sizeof(*results));
+ ret_wm->enable = true;
+}
+
+/*
+ * Merge all low power watermarks for all active pipes.
+ */
+static void ilk_wm_merge(struct drm_device *dev,
+ const struct hsw_wm_maximums *max,
+ struct intel_pipe_wm *merged)
+{
+ int level, max_level = ilk_wm_max_level(dev);
+
+ merged->fbc_wm_enabled = true;
- /* The spec says it is preferred to disable FBC WMs instead of disabling
- * a WM level. */
- results->enable_fbc_wm = true;
+ /* merge each WM1+ level */
for (level = 1; level <= max_level; level++) {
- if (lp_results[level - 1].fbc_val > lp_maximums->fbc) {
- results->enable_fbc_wm = false;
- lp_results[level - 1].fbc_val = 0;
+ struct intel_wm_level *wm = &merged->wm[level];
+
+ ilk_merge_wm_level(dev, level, wm);
+
+ if (!ilk_validate_wm_level(level, max, wm))
+ break;
+
+ /*
+ * The spec says it is preferred to disable
+ * FBC WMs instead of disabling a WM level.
+ */
+ if (wm->fbc_val > max->fbc) {
+ merged->fbc_wm_enabled = false;
+ wm->fbc_val = 0;
}
}
+}
+
+static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
+{
+ /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
+ return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
+}
+
+static void hsw_compute_wm_results(struct drm_device *dev,
+ const struct intel_pipe_wm *merged,
+ enum intel_ddb_partitioning partitioning,
+ struct hsw_wm_values *results)
+{
+ struct intel_crtc *intel_crtc;
+ int level, wm_lp;
+
+ results->enable_fbc_wm = merged->fbc_wm_enabled;
+ results->partitioning = partitioning;
+ /* LP1+ register values */
for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
const struct intel_wm_level *r;
- level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp;
- if (level > max_level)
+ level = ilk_wm_lp_to_level(wm_lp, merged);
+
+ r = &merged->wm[level];
+ if (!r->enable)
break;
- r = &lp_results[level - 1];
results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2,
r->fbc_val,
r->pri_val,
@@ -2708,116 +2729,158 @@ static void hsw_compute_wm_results(struct drm_device *dev,
results->wm_lp_spr[wm_lp - 1] = r->spr_val;
}
- for_each_pipe(pipe)
- results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, pipe,
- &params[pipe]);
+ /* LP0 register values */
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
+ enum pipe pipe = intel_crtc->pipe;
+ const struct intel_wm_level *r =
+ &intel_crtc->wm.active.wm[0];
- for_each_pipe(pipe) {
- crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- results->wm_linetime[pipe] = hsw_compute_linetime_wm(dev, crtc);
+ if (WARN_ON(!r->enable))
+ continue;
+
+ results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
+
+ results->wm_pipe[pipe] =
+ (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
+ (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
+ r->cur_val;
}
}
/* Find the result with the highest level enabled. Check for enable_fbc_wm in
* case both are at the same level. Prefer r1 in case they're the same. */
-static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
- struct hsw_wm_values *r2)
+static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev,
+ struct intel_pipe_wm *r1,
+ struct intel_pipe_wm *r2)
{
- int i, val_r1 = 0, val_r2 = 0;
+ int level, max_level = ilk_wm_max_level(dev);
+ int level1 = 0, level2 = 0;
- for (i = 0; i < 3; i++) {
- if (r1->wm_lp[i] & WM3_LP_EN)
- val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK;
- if (r2->wm_lp[i] & WM3_LP_EN)
- val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK;
+ for (level = 1; level <= max_level; level++) {
+ if (r1->wm[level].enable)
+ level1 = level;
+ if (r2->wm[level].enable)
+ level2 = level;
}
- if (val_r1 == val_r2) {
- if (r2->enable_fbc_wm && !r1->enable_fbc_wm)
+ if (level1 == level2) {
+ if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
return r2;
else
return r1;
- } else if (val_r1 > val_r2) {
+ } else if (level1 > level2) {
return r1;
} else {
return r2;
}
}
+/* dirty bits used to track which watermarks need changes */
+#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
+#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
+#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
+#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
+#define WM_DIRTY_FBC (1 << 24)
+#define WM_DIRTY_DDB (1 << 25)
+
+static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
+ const struct hsw_wm_values *old,
+ const struct hsw_wm_values *new)
+{
+ unsigned int dirty = 0;
+ enum pipe pipe;
+ int wm_lp;
+
+ for_each_pipe(pipe) {
+ if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
+ dirty |= WM_DIRTY_LINETIME(pipe);
+ /* Must disable LP1+ watermarks too */
+ dirty |= WM_DIRTY_LP_ALL;
+ }
+
+ if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
+ dirty |= WM_DIRTY_PIPE(pipe);
+ /* Must disable LP1+ watermarks too */
+ dirty |= WM_DIRTY_LP_ALL;
+ }
+ }
+
+ if (old->enable_fbc_wm != new->enable_fbc_wm) {
+ dirty |= WM_DIRTY_FBC;
+ /* Must disable LP1+ watermarks too */
+ dirty |= WM_DIRTY_LP_ALL;
+ }
+
+ if (old->partitioning != new->partitioning) {
+ dirty |= WM_DIRTY_DDB;
+ /* Must disable LP1+ watermarks too */
+ dirty |= WM_DIRTY_LP_ALL;
+ }
+
+ /* LP1+ watermarks already deemed dirty, no need to continue */
+ if (dirty & WM_DIRTY_LP_ALL)
+ return dirty;
+
+ /* Find the lowest numbered LP1+ watermark in need of an update... */
+ for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
+ if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
+ old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
+ break;
+ }
+
+ /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
+ for (; wm_lp <= 3; wm_lp++)
+ dirty |= WM_DIRTY_LP(wm_lp);
+
+ return dirty;
+}
+
/*
* The spec says we shouldn't write when we don't need, because every write
* causes WMs to be re-evaluated, expending some power.
*/
static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
- struct hsw_wm_values *results,
- enum intel_ddb_partitioning partitioning)
+ struct hsw_wm_values *results)
{
- struct hsw_wm_values previous;
+ struct hsw_wm_values *previous = &dev_priv->wm.hw;
+ unsigned int dirty;
uint32_t val;
- enum intel_ddb_partitioning prev_partitioning;
- bool prev_enable_fbc_wm;
-
- previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
- previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
- previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
- previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
- previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
- previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
- previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
- previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
- previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
- previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
- previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
- previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
-
- prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
- INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
-
- prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
-
- if (memcmp(results->wm_pipe, previous.wm_pipe,
- sizeof(results->wm_pipe)) == 0 &&
- memcmp(results->wm_lp, previous.wm_lp,
- sizeof(results->wm_lp)) == 0 &&
- memcmp(results->wm_lp_spr, previous.wm_lp_spr,
- sizeof(results->wm_lp_spr)) == 0 &&
- memcmp(results->wm_linetime, previous.wm_linetime,
- sizeof(results->wm_linetime)) == 0 &&
- partitioning == prev_partitioning &&
- results->enable_fbc_wm == prev_enable_fbc_wm)
+
+ dirty = ilk_compute_wm_dirty(dev_priv->dev, previous, results);
+ if (!dirty)
return;
- if (previous.wm_lp[2] != 0)
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != 0)
I915_WRITE(WM3_LP_ILK, 0);
- if (previous.wm_lp[1] != 0)
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != 0)
I915_WRITE(WM2_LP_ILK, 0);
- if (previous.wm_lp[0] != 0)
+ if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != 0)
I915_WRITE(WM1_LP_ILK, 0);
- if (previous.wm_pipe[0] != results->wm_pipe[0])
+ if (dirty & WM_DIRTY_PIPE(PIPE_A))
I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
- if (previous.wm_pipe[1] != results->wm_pipe[1])
+ if (dirty & WM_DIRTY_PIPE(PIPE_B))
I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
- if (previous.wm_pipe[2] != results->wm_pipe[2])
+ if (dirty & WM_DIRTY_PIPE(PIPE_C))
I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
- if (previous.wm_linetime[0] != results->wm_linetime[0])
+ if (dirty & WM_DIRTY_LINETIME(PIPE_A))
I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
- if (previous.wm_linetime[1] != results->wm_linetime[1])
+ if (dirty & WM_DIRTY_LINETIME(PIPE_B))
I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
- if (previous.wm_linetime[2] != results->wm_linetime[2])
+ if (dirty & WM_DIRTY_LINETIME(PIPE_C))
I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
- if (prev_partitioning != partitioning) {
+ if (dirty & WM_DIRTY_DDB) {
val = I915_READ(WM_MISC);
- if (partitioning == INTEL_DDB_PART_1_2)
+ if (results->partitioning == INTEL_DDB_PART_1_2)
val &= ~WM_MISC_DATA_PARTITION_5_6;
else
val |= WM_MISC_DATA_PARTITION_5_6;
I915_WRITE(WM_MISC, val);
}
- if (prev_enable_fbc_wm != results->enable_fbc_wm) {
+ if (dirty & WM_DIRTY_FBC) {
val = I915_READ(DISP_ARB_CTL);
if (results->enable_fbc_wm)
val &= ~DISP_FBC_WM_DIS;
@@ -2826,45 +2889,65 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
I915_WRITE(DISP_ARB_CTL, val);
}
- if (previous.wm_lp_spr[0] != results->wm_lp_spr[0])
+ if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0])
I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
- if (previous.wm_lp_spr[1] != results->wm_lp_spr[1])
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
- if (previous.wm_lp_spr[2] != results->wm_lp_spr[2])
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
- if (results->wm_lp[0] != 0)
+ if (dirty & WM_DIRTY_LP(1) && results->wm_lp[0] != 0)
I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
- if (results->wm_lp[1] != 0)
+ if (dirty & WM_DIRTY_LP(2) && results->wm_lp[1] != 0)
I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
- if (results->wm_lp[2] != 0)
+ if (dirty & WM_DIRTY_LP(3) && results->wm_lp[2] != 0)
I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
+
+ dev_priv->wm.hw = *results;
}
-static void haswell_update_wm(struct drm_device *dev)
+static void haswell_update_wm(struct drm_crtc *crtc)
{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
- struct hsw_pipe_wm_parameters params[3];
- struct hsw_wm_values results_1_2, results_5_6, *best_results;
+ struct hsw_wm_maximums max;
+ struct hsw_pipe_wm_parameters params = {};
+ struct hsw_wm_values results = {};
enum intel_ddb_partitioning partitioning;
+ struct intel_pipe_wm pipe_wm = {};
+ struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
+ struct intel_wm_config config = {};
+
+ hsw_compute_wm_parameters(crtc, &params, &config);
+
+ intel_compute_pipe_wm(crtc, &params, &pipe_wm);
+
+ if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
+ return;
- hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6);
+ intel_crtc->wm.active = pipe_wm;
- hsw_compute_wm_results(dev, params,
- &lp_max_1_2, &results_1_2);
- if (lp_max_1_2.pri != lp_max_5_6.pri) {
- hsw_compute_wm_results(dev, params,
- &lp_max_5_6, &results_5_6);
- best_results = hsw_find_best_result(&results_1_2, &results_5_6);
+ ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_wm_merge(dev, &max, &lp_wm_1_2);
+
+ /* 5/6 split only in single pipe config on IVB+ */
+ if (INTEL_INFO(dev)->gen >= 7 &&
+ config.num_pipes_active == 1 && config.sprites_enabled) {
+ ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
+ ilk_wm_merge(dev, &max, &lp_wm_5_6);
+
+ best_lp_wm = hsw_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
} else {
- best_results = &results_1_2;
+ best_lp_wm = &lp_wm_1_2;
}
- partitioning = (best_results == &results_1_2) ?
+ partitioning = (best_lp_wm == &lp_wm_1_2) ?
INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
- hsw_write_wm_values(dev_priv, best_results, partitioning);
+ hsw_compute_wm_results(dev, best_lp_wm, partitioning, &results);
+
+ hsw_write_wm_values(dev_priv, &results);
}
static void haswell_update_sprite_wm(struct drm_plane *plane,
@@ -2879,7 +2962,7 @@ static void haswell_update_sprite_wm(struct drm_plane *plane,
intel_plane->wm.horiz_pixels = sprite_width;
intel_plane->wm.bytes_per_pixel = pixel_size;
- haswell_update_wm(plane->dev);
+ haswell_update_wm(crtc);
}
static bool
@@ -2898,7 +2981,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
return false;
}
- clock = crtc->mode.clock;
+ clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
/* Use the small buffer method to calculate the sprite watermark */
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
@@ -2933,7 +3016,7 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
}
crtc = intel_get_crtc_for_plane(dev, plane);
- clock = crtc->mode.clock;
+ clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
if (!clock) {
*sprite_wm = 0;
return false;
@@ -3044,6 +3127,74 @@ static void sandybridge_update_sprite_wm(struct drm_plane *plane,
I915_WRITE(WM3S_LP_IVB, sprite_wm);
}
+static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct hsw_wm_values *hw = &dev_priv->wm.hw;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_pipe_wm *active = &intel_crtc->wm.active;
+ enum pipe pipe = intel_crtc->pipe;
+ static const unsigned int wm0_pipe_reg[] = {
+ [PIPE_A] = WM0_PIPEA_ILK,
+ [PIPE_B] = WM0_PIPEB_ILK,
+ [PIPE_C] = WM0_PIPEC_IVB,
+ };
+
+ hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
+ hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+
+ if (intel_crtc_active(crtc)) {
+ u32 tmp = hw->wm_pipe[pipe];
+
+ /*
+ * For active pipes LP0 watermark is marked as
+ * enabled, and LP1+ watermaks as disabled since
+ * we can't really reverse compute them in case
+ * multiple pipes are active.
+ */
+ active->wm[0].enable = true;
+ active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
+ active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
+ active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
+ active->linetime = hw->wm_linetime[pipe];
+ } else {
+ int level, max_level = ilk_wm_max_level(dev);
+
+ /*
+ * For inactive pipes, all watermark levels
+ * should be marked as enabled but zeroed,
+ * which is what we'd compute them to.
+ */
+ for (level = 0; level <= max_level; level++)
+ active->wm[level].enable = true;
+ }
+}
+
+void ilk_wm_get_hw_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct hsw_wm_values *hw = &dev_priv->wm.hw;
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ ilk_pipe_wm_get_hw_state(crtc);
+
+ hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
+ hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
+ hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
+
+ hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
+ hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
+ hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
+
+ hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
+ INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+
+ hw->enable_fbc_wm =
+ !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+}
+
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
@@ -3076,12 +3227,12 @@ static void sandybridge_update_sprite_wm(struct drm_plane *plane,
* We don't use the sprite, so we can ignore that. And on Crestline we have
* to set the non-SR watermarks to 8.
*/
-void intel_update_watermarks(struct drm_device *dev)
+void intel_update_watermarks(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
if (dev_priv->display.update_wm)
- dev_priv->display.update_wm(dev);
+ dev_priv->display.update_wm(crtc);
}
void intel_update_sprite_watermarks(struct drm_plane *plane,
@@ -3287,6 +3438,98 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
return limits;
}
+static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
+{
+ int new_power;
+
+ new_power = dev_priv->rps.power;
+ switch (dev_priv->rps.power) {
+ case LOW_POWER:
+ if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay)
+ new_power = BETWEEN;
+ break;
+
+ case BETWEEN:
+ if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay)
+ new_power = LOW_POWER;
+ else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay)
+ new_power = HIGH_POWER;
+ break;
+
+ case HIGH_POWER:
+ if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay)
+ new_power = BETWEEN;
+ break;
+ }
+ /* Max/min bins are special */
+ if (val == dev_priv->rps.min_delay)
+ new_power = LOW_POWER;
+ if (val == dev_priv->rps.max_delay)
+ new_power = HIGH_POWER;
+ if (new_power == dev_priv->rps.power)
+ return;
+
+ /* Note the units here are not exactly 1us, but 1280ns. */
+ switch (new_power) {
+ case LOW_POWER:
+ /* Upclock if more than 95% busy over 16ms */
+ I915_WRITE(GEN6_RP_UP_EI, 12500);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
+
+ /* Downclock if less than 85% busy over 32ms */
+ I915_WRITE(GEN6_RP_DOWN_EI, 25000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
+
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+ break;
+
+ case BETWEEN:
+ /* Upclock if more than 90% busy over 13ms */
+ I915_WRITE(GEN6_RP_UP_EI, 10250);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
+
+ /* Downclock if less than 75% busy over 32ms */
+ I915_WRITE(GEN6_RP_DOWN_EI, 25000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
+
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+ break;
+
+ case HIGH_POWER:
+ /* Upclock if more than 85% busy over 10ms */
+ I915_WRITE(GEN6_RP_UP_EI, 8000);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
+
+ /* Downclock if less than 60% busy over 32ms */
+ I915_WRITE(GEN6_RP_DOWN_EI, 25000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
+
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+ break;
+ }
+
+ dev_priv->rps.power = new_power;
+ dev_priv->rps.last_adj = 0;
+}
+
void gen6_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3299,6 +3542,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
if (val == dev_priv->rps.cur_delay)
return;
+ gen6_set_rps_thresholds(dev_priv, val);
+
if (IS_HASWELL(dev))
I915_WRITE(GEN6_RPNSWREQ,
HSW_FREQUENCY(val));
@@ -3320,6 +3565,32 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
trace_intel_gpu_freq_change(val * 50);
}
+void gen6_rps_idle(struct drm_i915_private *dev_priv)
+{
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (dev_priv->rps.enabled) {
+ if (dev_priv->info->is_valleyview)
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+ else
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+ dev_priv->rps.last_adj = 0;
+ }
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+void gen6_rps_boost(struct drm_i915_private *dev_priv)
+{
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (dev_priv->rps.enabled) {
+ if (dev_priv->info->is_valleyview)
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
+ else
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
+ dev_priv->rps.last_adj = 0;
+ }
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
/*
* Wait until the previous freq change has completed,
* or the timeout elapsed, and then update our notion
@@ -3415,6 +3686,20 @@ static void valleyview_disable_rps(struct drm_device *dev)
}
}
+static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
+{
+ if (IS_GEN6(dev))
+ DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+
+ if (IS_HASWELL(dev))
+ DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
+
+ DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+ (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+ (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+ (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+}
+
int intel_enable_rc6(const struct drm_device *dev)
{
/* No RC6 before Ironlake */
@@ -3429,18 +3714,13 @@ int intel_enable_rc6(const struct drm_device *dev)
if (INTEL_INFO(dev)->gen == 5)
return 0;
- if (IS_HASWELL(dev)) {
- DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
+ if (IS_HASWELL(dev))
return INTEL_RC6_ENABLE;
- }
/* snb/ivb have more than one rc6 state. */
- if (INTEL_INFO(dev)->gen == 6) {
- DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+ if (INTEL_INFO(dev)->gen == 6)
return INTEL_RC6_ENABLE;
- }
- DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
}
@@ -3501,7 +3781,10 @@ static void gen6_enable_rps(struct drm_device *dev)
/* In units of 50MHz */
dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
- dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
+ dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff;
+ dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff;
+ dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff;
+ dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
dev_priv->rps.cur_delay = 0;
/* disable the counters and set deterministic thresholds */
@@ -3539,48 +3822,16 @@ static void gen6_enable_rps(struct drm_device *dev)
rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
}
- DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
- (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
- (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
- (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+ intel_print_rc6_info(dev, rc6_mask);
I915_WRITE(GEN6_RC_CONTROL,
rc6_mask |
GEN6_RC_CTL_EI_MODE(1) |
GEN6_RC_CTL_HW_ENABLE);
- if (IS_HASWELL(dev)) {
- I915_WRITE(GEN6_RPNSWREQ,
- HSW_FREQUENCY(10));
- I915_WRITE(GEN6_RC_VIDEO_FREQ,
- HSW_FREQUENCY(12));
- } else {
- I915_WRITE(GEN6_RPNSWREQ,
- GEN6_FREQUENCY(10) |
- GEN6_OFFSET(0) |
- GEN6_AGGRESSIVE_TURBO);
- I915_WRITE(GEN6_RC_VIDEO_FREQ,
- GEN6_FREQUENCY(12));
- }
-
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- dev_priv->rps.max_delay << 24 |
- dev_priv->rps.min_delay << 16);
-
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
- I915_WRITE(GEN6_RP_UP_EI, 66000);
- I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-
+ /* Power down if completely idle for over 50ms */
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
if (!ret) {
@@ -3596,7 +3847,8 @@ static void gen6_enable_rps(struct drm_device *dev)
DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
}
- gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
+ dev_priv->rps.power = HIGH_POWER; /* force a reset */
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
gen6_enable_rps_interrupts(dev);
@@ -3624,23 +3876,28 @@ void gen6_update_ring_freq(struct drm_device *dev)
unsigned int gpu_freq;
unsigned int max_ia_freq, min_ring_freq;
int scaling_factor = 180;
+ struct cpufreq_policy *policy;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- max_ia_freq = cpufreq_quick_get_max(0);
- /*
- * Default to measured freq if none found, PCU will ensure we don't go
- * over
- */
- if (!max_ia_freq)
+ policy = cpufreq_cpu_get(0);
+ if (policy) {
+ max_ia_freq = policy->cpuinfo.max_freq;
+ cpufreq_cpu_put(policy);
+ } else {
+ /*
+ * Default to measured freq if none found, PCU will ensure we
+ * don't go over
+ */
max_ia_freq = tsc_khz;
+ }
/* Convert from kHz to MHz */
max_ia_freq /= 1000;
- min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK);
- /* convert DDR frequency from units of 133.3MHz to bandwidth */
- min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3;
+ min_ring_freq = I915_READ(DCLK) & 0xf;
+ /* convert DDR frequency from units of 266.6MHz to bandwidth */
+ min_ring_freq = mult_frac(min_ring_freq, 8, 3);
/*
* For each potential GPU frequency, load a ring frequency we'd like
@@ -3653,7 +3910,7 @@ void gen6_update_ring_freq(struct drm_device *dev)
unsigned int ia_freq = 0, ring_freq = 0;
if (IS_HASWELL(dev)) {
- ring_freq = (gpu_freq * 5 + 3) / 4;
+ ring_freq = mult_frac(gpu_freq, 5, 4);
ring_freq = max(min_ring_freq, ring_freq);
/* leave ia_freq as the default, chosen by cpufreq */
} else {
@@ -3709,24 +3966,6 @@ int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
}
-static void vlv_rps_timer_work(struct work_struct *work)
-{
- drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- rps.vlv_work.work);
-
- /*
- * Timer fired, we must be idle. Drop to min voltage state.
- * Note: we use RPe here since it should match the
- * Vmin we were shooting for. That should give us better
- * perf when we come back out of RC6 than if we used the
- * min freq available.
- */
- mutex_lock(&dev_priv->rps.hw_lock);
- if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
- mutex_unlock(&dev_priv->rps.hw_lock);
-}
-
static void valleyview_setup_pctx(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3773,13 +4012,14 @@ static void valleyview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
- u32 gtfifodbg, val;
+ u32 gtfifodbg, val, rc6_mode = 0;
int i;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
if ((gtfifodbg = I915_READ(GTFIFODBG))) {
- DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
+ DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
+ gtfifodbg);
I915_WRITE(GTFIFODBG, gtfifodbg);
}
@@ -3812,9 +4052,16 @@ static void valleyview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
/* allows RC6 residency counter to work */
- I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3));
- I915_WRITE(GEN6_RC_CONTROL,
- GEN7_RC_CTL_TO_MODE);
+ I915_WRITE(VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
+ if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+ rc6_mode = GEN7_RC_CTL_TO_MODE;
+
+ intel_print_rc6_info(dev, rc6_mode);
+
+ I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
switch ((val >> 6) & 3) {
@@ -3985,6 +4232,8 @@ static void ironlake_enable_rc6(struct drm_device *dev)
I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+
+ intel_print_rc6_info(dev, INTEL_RC6_ENABLE);
}
static unsigned long intel_pxfreq(u32 vidfreq)
@@ -4603,13 +4852,12 @@ void intel_disable_gt_powersave(struct drm_device *dev)
} else if (INTEL_INFO(dev)->gen >= 6) {
cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
cancel_work_sync(&dev_priv->rps.work);
- if (IS_VALLEYVIEW(dev))
- cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev))
valleyview_disable_rps(dev);
else
gen6_disable_rps(dev);
+ dev_priv->rps.enabled = false;
mutex_unlock(&dev_priv->rps.hw_lock);
}
}
@@ -4629,6 +4877,7 @@ static void intel_gen6_powersave_work(struct work_struct *work)
gen6_enable_rps(dev);
gen6_update_ring_freq(dev);
}
+ dev_priv->rps.enabled = true;
mutex_unlock(&dev_priv->rps.hw_lock);
}
@@ -4672,7 +4921,7 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
- intel_flush_display_plane(dev_priv, pipe);
+ intel_flush_primary_plane(dev_priv, pipe);
}
}
@@ -4759,7 +5008,9 @@ static void cpt_init_clock_gating(struct drm_device *dev)
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
+ PCH_DPLUNIT_CLOCK_GATE_DISABLE |
+ PCH_CPUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
DPLS_EDP_PPS_FIX_DIS);
/* The below fixes the weird display corruption, a few pixels shifted
@@ -5253,6 +5504,23 @@ void intel_suspend_hw(struct drm_device *dev)
lpt_suspend_hw(dev);
}
+static bool is_always_on_power_domain(struct drm_device *dev,
+ enum intel_display_power_domain domain)
+{
+ unsigned long always_on_domains;
+
+ BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK);
+
+ if (IS_HASWELL(dev)) {
+ always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS;
+ } else {
+ WARN_ON(1);
+ return true;
+ }
+
+ return BIT(domain) & always_on_domains;
+}
+
/**
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
@@ -5266,23 +5534,11 @@ bool intel_display_power_enabled(struct drm_device *dev,
if (!HAS_POWER_WELL(dev))
return true;
- switch (domain) {
- case POWER_DOMAIN_PIPE_A:
- case POWER_DOMAIN_TRANSCODER_EDP:
+ if (is_always_on_power_domain(dev, domain))
return true;
- case POWER_DOMAIN_PIPE_B:
- case POWER_DOMAIN_PIPE_C:
- case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
- case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
- case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
- case POWER_DOMAIN_TRANSCODER_A:
- case POWER_DOMAIN_TRANSCODER_B:
- case POWER_DOMAIN_TRANSCODER_C:
- return I915_READ(HSW_PWR_WELL_DRIVER) ==
+
+ return I915_READ(HSW_PWR_WELL_DRIVER) ==
(HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
- default:
- BUG();
- }
}
static void __intel_set_power_well(struct drm_device *dev, bool enable)
@@ -5326,83 +5582,136 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
for_each_pipe(p)
if (p != PIPE_A)
- dev->last_vblank[p] = 0;
+ dev->vblank[p].last = 0;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
}
}
-static struct i915_power_well *hsw_pwr;
+static void __intel_power_well_get(struct drm_device *dev,
+ struct i915_power_well *power_well)
+{
+ if (!power_well->count++)
+ __intel_set_power_well(dev, true);
+}
+
+static void __intel_power_well_put(struct drm_device *dev,
+ struct i915_power_well *power_well)
+{
+ WARN_ON(!power_well->count);
+ if (!--power_well->count && i915_disable_power_well)
+ __intel_set_power_well(dev, false);
+}
+
+void intel_display_power_get(struct drm_device *dev,
+ enum intel_display_power_domain domain)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_power_domains *power_domains;
+
+ if (!HAS_POWER_WELL(dev))
+ return;
+
+ if (is_always_on_power_domain(dev, domain))
+ return;
+
+ power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+ __intel_power_well_get(dev, &power_domains->power_wells[0]);
+ mutex_unlock(&power_domains->lock);
+}
+
+void intel_display_power_put(struct drm_device *dev,
+ enum intel_display_power_domain domain)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_power_domains *power_domains;
+
+ if (!HAS_POWER_WELL(dev))
+ return;
+
+ if (is_always_on_power_domain(dev, domain))
+ return;
+
+ power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+ __intel_power_well_put(dev, &power_domains->power_wells[0]);
+ mutex_unlock(&power_domains->lock);
+}
+
+static struct i915_power_domains *hsw_pwr;
/* Display audio driver power well request */
void i915_request_power_well(void)
{
+ struct drm_i915_private *dev_priv;
+
if (WARN_ON(!hsw_pwr))
return;
- spin_lock_irq(&hsw_pwr->lock);
- if (!hsw_pwr->count++ &&
- !hsw_pwr->i915_request)
- __intel_set_power_well(hsw_pwr->device, true);
- spin_unlock_irq(&hsw_pwr->lock);
+ dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+ power_domains);
+
+ mutex_lock(&hsw_pwr->lock);
+ __intel_power_well_get(dev_priv->dev, &hsw_pwr->power_wells[0]);
+ mutex_unlock(&hsw_pwr->lock);
}
EXPORT_SYMBOL_GPL(i915_request_power_well);
/* Display audio driver power well release */
void i915_release_power_well(void)
{
+ struct drm_i915_private *dev_priv;
+
if (WARN_ON(!hsw_pwr))
return;
- spin_lock_irq(&hsw_pwr->lock);
- WARN_ON(!hsw_pwr->count);
- if (!--hsw_pwr->count &&
- !hsw_pwr->i915_request)
- __intel_set_power_well(hsw_pwr->device, false);
- spin_unlock_irq(&hsw_pwr->lock);
+ dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+ power_domains);
+
+ mutex_lock(&hsw_pwr->lock);
+ __intel_power_well_put(dev_priv->dev, &hsw_pwr->power_wells[0]);
+ mutex_unlock(&hsw_pwr->lock);
}
EXPORT_SYMBOL_GPL(i915_release_power_well);
-int i915_init_power_well(struct drm_device *dev)
+int intel_power_domains_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
- hsw_pwr = &dev_priv->power_well;
+ mutex_init(&power_domains->lock);
+ hsw_pwr = power_domains;
- hsw_pwr->device = dev;
- spin_lock_init(&hsw_pwr->lock);
- hsw_pwr->count = 0;
+ power_well = &power_domains->power_wells[0];
+ power_well->count = 0;
return 0;
}
-void i915_remove_power_well(struct drm_device *dev)
+void intel_power_domains_remove(struct drm_device *dev)
{
hsw_pwr = NULL;
}
-void intel_set_power_well(struct drm_device *dev, bool enable)
+static void intel_power_domains_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_power_well *power_well = &dev_priv->power_well;
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
if (!HAS_POWER_WELL(dev))
return;
- if (!i915_disable_power_well && !enable)
- return;
+ mutex_lock(&power_domains->lock);
- spin_lock_irq(&power_well->lock);
- power_well->i915_request = enable;
+ power_well = &power_domains->power_wells[0];
+ __intel_set_power_well(dev, power_well->count > 0);
- /* only reject "disable" power well request */
- if (power_well->count && !enable) {
- spin_unlock_irq(&power_well->lock);
- return;
- }
-
- __intel_set_power_well(dev, enable);
- spin_unlock_irq(&power_well->lock);
+ mutex_unlock(&power_domains->lock);
}
/*
@@ -5411,7 +5720,7 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
* to be enabled, and it will only be disabled if none of the registers is
* requesting it to be enabled.
*/
-void intel_init_power_well(struct drm_device *dev)
+void intel_power_domains_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5419,7 +5728,8 @@ void intel_init_power_well(struct drm_device *dev)
return;
/* For now, we need the power well to be always enabled. */
- intel_set_power_well(dev, true);
+ intel_display_set_init_power(dev, true);
+ intel_power_domains_resume(dev);
/* We're taking over the BIOS, so clear any requests made by it since
* the driver is in charge now. */
@@ -5684,7 +5994,5 @@ void intel_pm_init(struct drm_device *dev)
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
-
- INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 460ee1026fca..2dec134f75eb 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -41,6 +41,16 @@ static inline int ring_space(struct intel_ring_buffer *ring)
return space;
}
+void __intel_ring_advance(struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ ring->tail &= ring->size - 1;
+ if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
+ return;
+ ring->write_tail(ring, ring->tail);
+}
+
static int
gen2_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains,
@@ -385,8 +395,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
int ret = 0;
u32 head;
- if (HAS_FORCE_WAKE(dev))
- gen6_gt_force_wake_get(dev_priv);
+ gen6_gt_force_wake_get(dev_priv);
if (I915_NEED_GFX_HWS(dev))
intel_ring_setup_status_page(ring);
@@ -459,8 +468,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
out:
- if (HAS_FORCE_WAKE(dev))
- gen6_gt_force_wake_put(dev_priv);
+ gen6_gt_force_wake_put(dev_priv);
return ret;
}
@@ -559,8 +567,8 @@ static int init_render_ring(struct intel_ring_buffer *ring)
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
- if (HAS_L3_GPU_CACHE(dev))
- I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+ if (HAS_L3_DPF(dev))
+ I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
return ret;
}
@@ -593,7 +601,7 @@ update_mboxes(struct intel_ring_buffer *ring,
#define MBOX_UPDATE_DWORDS 4
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, mmio_offset);
- intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, MI_NOOP);
}
@@ -629,9 +637,9 @@ gen6_add_request(struct intel_ring_buffer *ring)
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_advance(ring);
+ __intel_ring_advance(ring);
return 0;
}
@@ -723,7 +731,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128; /* write to separate cachelines */
@@ -742,9 +750,9 @@ pc_render_add_request(struct intel_ring_buffer *ring)
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ __intel_ring_advance(ring);
return 0;
}
@@ -963,9 +971,9 @@ i9xx_add_request(struct intel_ring_buffer *ring)
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_advance(ring);
+ __intel_ring_advance(ring);
return 0;
}
@@ -987,10 +995,10 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
- if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
+ if (HAS_L3_DPF(dev) && ring->id == RCS)
I915_WRITE_IMR(ring,
~(ring->irq_enable_mask |
- GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
+ GT_PARITY_ERROR(dev)));
else
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
@@ -1009,9 +1017,8 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
- if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
- I915_WRITE_IMR(ring,
- ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+ if (HAS_L3_DPF(dev) && ring->id == RCS)
+ I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
else
I915_WRITE_IMR(ring, ~0);
ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
@@ -1317,7 +1324,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
/* Disable the ring buffer. The ring must be idle at this point */
dev_priv = ring->dev->dev_private;
ret = intel_ring_idle(ring);
- if (ret)
+ if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
ring->name, ret);
@@ -1328,6 +1335,8 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
i915_gem_object_unpin(ring->obj);
drm_gem_object_unreference(&ring->obj->base);
ring->obj = NULL;
+ ring->preallocated_lazy_request = NULL;
+ ring->outstanding_lazy_seqno = 0;
if (ring->cleanup)
ring->cleanup(ring);
@@ -1414,6 +1423,9 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
if (ret != -ENOSPC)
return ret;
+ /* force the tail write in case we have been skipping them */
+ __intel_ring_advance(ring);
+
trace_i915_ring_wait_begin(ring);
/* With GEM the hangcheck timer should kick us out of the loop,
* leaving it early runs the risk of corrupting GEM state (due
@@ -1475,7 +1487,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
int ret;
/* We need to add any requests required to flush the objects and ring */
- if (ring->outstanding_lazy_request) {
+ if (ring->outstanding_lazy_seqno) {
ret = i915_add_request(ring, NULL);
if (ret)
return ret;
@@ -1495,10 +1507,20 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
static int
intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
{
- if (ring->outstanding_lazy_request)
+ if (ring->outstanding_lazy_seqno)
return 0;
- return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
+ if (ring->preallocated_lazy_request == NULL) {
+ struct drm_i915_gem_request *request;
+
+ request = kmalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
+
+ ring->preallocated_lazy_request = request;
+ }
+
+ return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
}
static int __intel_ring_begin(struct intel_ring_buffer *ring,
@@ -1545,7 +1567,7 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
- BUG_ON(ring->outstanding_lazy_request);
+ BUG_ON(ring->outstanding_lazy_seqno);
if (INTEL_INFO(ring->dev)->gen >= 6) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
@@ -1558,17 +1580,6 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
ring->hangcheck.seqno = seqno;
}
-void intel_ring_advance(struct intel_ring_buffer *ring)
-{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
- ring->tail &= ring->size - 1;
- if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
- return;
- ring->write_tail(ring, ring->tail);
-}
-
-
static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 68b1ca974d59..71a73f4fe252 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -34,6 +34,7 @@ struct intel_hw_status_page {
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
enum intel_ring_hangcheck_action {
+ HANGCHECK_IDLE = 0,
HANGCHECK_WAIT,
HANGCHECK_ACTIVE,
HANGCHECK_KICK,
@@ -140,7 +141,8 @@ struct intel_ring_buffer {
/**
* Do we have some not yet emitted requests outstanding?
*/
- u32 outstanding_lazy_request;
+ struct drm_i915_gem_request *preallocated_lazy_request;
+ u32 outstanding_lazy_seqno;
bool gpu_caches_dirty;
bool fbc_dirty;
@@ -237,7 +239,12 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
iowrite32(data, ring->virtual_start + ring->tail);
ring->tail += 4;
}
-void intel_ring_advance(struct intel_ring_buffer *ring);
+static inline void intel_ring_advance(struct intel_ring_buffer *ring)
+{
+ ring->tail &= ring->size - 1;
+}
+void __intel_ring_advance(struct intel_ring_buffer *ring);
+
int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
@@ -258,8 +265,8 @@ static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
{
- BUG_ON(ring->outstanding_lazy_request == 0);
- return ring->outstanding_lazy_request;
+ BUG_ON(ring->outstanding_lazy_seqno == 0);
+ return ring->outstanding_lazy_seqno;
}
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 49482fd5b76c..a583e8f718a7 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -539,7 +539,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
goto log_fail;
while ((status == SDVO_CMD_STATUS_PENDING ||
- status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
+ status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
if (retry < 10)
msleep(15);
else
@@ -1068,7 +1068,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
{
- unsigned dotclock = pipe_config->adjusted_mode.clock;
+ unsigned dotclock = pipe_config->port_clock;
struct dpll *clock = &pipe_config->dpll;
/* SDVO TV has fixed PLL values depend on its clock range,
@@ -1133,7 +1133,6 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
*/
pipe_config->pixel_multiplier =
intel_sdvo_get_pixel_multiplier(adjusted_mode);
- adjusted_mode->clock *= pipe_config->pixel_multiplier;
if (intel_sdvo->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
@@ -1217,11 +1216,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
!intel_sdvo_set_tv_format(intel_sdvo))
return;
- /* We have tried to get input timing in mode_fixup, and filled into
- * adjusted_mode.
- */
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
- input_dtd.part1.clock /= crtc->config.pixel_multiplier;
if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
@@ -1330,6 +1325,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_dtd dtd;
int encoder_pixel_multiplier = 0;
+ int dotclock;
u32 flags = 0, sdvox;
u8 val;
bool ret;
@@ -1368,6 +1364,13 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
>> SDVO_PORT_MULTIPLY_SHIFT) + 1;
}
+ dotclock = pipe_config->port_clock / pipe_config->pixel_multiplier;
+
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+ pipe_config->adjusted_mode.crtc_clock = dotclock;
+
/* Cross check the port pixel multiplier with the sdvo encoder state. */
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
&val, 1)) {
@@ -1770,6 +1773,9 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
struct edid *edid;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector));
+
/* set the bus switch and get the modes */
edid = intel_sdvo_get_edid(connector);
@@ -1865,6 +1871,9 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
uint32_t reply = 0, format_map = 0;
int i;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector));
+
/* Read the list of supported input resolutions for the selected TV
* format.
*/
@@ -1899,6 +1908,9 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct drm_display_mode *newmode;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector));
+
/*
* Fetch modes from VBT. For SDVO prefer the VBT mode since some
* SDVO->LVDS transcoders can't cope with the EDID mode.
@@ -1930,7 +1942,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
break;
}
}
-
}
static int intel_sdvo_get_modes(struct drm_connector *connector)
@@ -1998,7 +2009,6 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
intel_sdvo_connector->tv_format);
intel_sdvo_destroy_enhance_property(connector);
- drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(intel_sdvo_connector);
}
@@ -2394,7 +2404,9 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ DRM_DEBUG_KMS("initialising DVI device %d\n", device);
+
+ intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
if (!intel_sdvo_connector)
return false;
@@ -2442,7 +2454,9 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ DRM_DEBUG_KMS("initialising TV type %d\n", type);
+
+ intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
if (!intel_sdvo_connector)
return false;
@@ -2467,6 +2481,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
return true;
err:
+ drm_sysfs_connector_remove(connector);
intel_sdvo_destroy(connector);
return false;
}
@@ -2479,7 +2494,9 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ DRM_DEBUG_KMS("initialising analog device %d\n", device);
+
+ intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
if (!intel_sdvo_connector)
return false;
@@ -2510,7 +2527,9 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
+
+ intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
if (!intel_sdvo_connector)
return false;
@@ -2534,6 +2553,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
return true;
err:
+ drm_sysfs_connector_remove(connector);
intel_sdvo_destroy(connector);
return false;
}
@@ -2605,8 +2625,10 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
list_for_each_entry_safe(connector, tmp,
&dev->mode_config.connector_list, head) {
- if (intel_attached_encoder(connector) == &intel_sdvo->base)
+ if (intel_attached_encoder(connector) == &intel_sdvo->base) {
+ drm_sysfs_connector_remove(connector);
intel_sdvo_destroy(connector);
+ }
}
}
@@ -2876,7 +2898,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
int i;
- intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
+ intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
if (!intel_sdvo)
return false;
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 9a0e6c5ea540..9944d8135e87 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -25,7 +25,10 @@
#include "i915_drv.h"
#include "intel_drv.h"
-/* IOSF sideband */
+/*
+ * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
+ * VLV_VLV2_PUNIT_HAS_0.8.docx
+ */
static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
u32 port, u32 opcode, u32 addr, u32 *val)
{
@@ -101,19 +104,83 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
return val;
}
-u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg)
+u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
+ PUNIT_OPCODE_REG_READ, reg, &val);
+ return val;
+}
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
- DPIO_OPCODE_REG_READ, reg, &val);
+void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
+ PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
+u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u32 val = 0;
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
+ PUNIT_OPCODE_REG_READ, reg, &val);
+ return val;
+}
+
+void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
+ PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
+u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u32 val = 0;
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
+ PUNIT_OPCODE_REG_READ, reg, &val);
+ return val;
+}
+void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
+ PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
+u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u32 val = 0;
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
+ PUNIT_OPCODE_REG_READ, reg, &val);
+ return val;
+}
+
+void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
+ PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
+static u32 vlv_get_phy_port(enum pipe pipe)
+{
+ u32 port = IOSF_PORT_DPIO;
+
+ WARN_ON ((pipe != PIPE_A) && (pipe != PIPE_B));
+
+ return port;
+}
+
+u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
+ DPIO_OPCODE_REG_READ, reg, &val);
return val;
}
-void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val)
+void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
{
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
DPIO_OPCODE_REG_WRITE, reg, &val);
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index ad6ec4b39005..8afaad6bcc48 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -288,7 +288,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
dev_priv->sprite_scaling_enabled |= 1 << pipe;
if (!scaling_was_enabled) {
- intel_update_watermarks(dev);
+ intel_update_watermarks(crtc);
intel_wait_for_vblank(dev, pipe);
}
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
@@ -323,7 +323,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
/* potentially re-enable LP watermarks */
if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
- intel_update_watermarks(dev);
+ intel_update_watermarks(crtc);
}
static void
@@ -349,7 +349,7 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
/* potentially re-enable LP watermarks */
if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
- intel_update_watermarks(dev);
+ intel_update_watermarks(crtc);
}
static int
@@ -521,13 +521,28 @@ intel_enable_primary(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int reg = DSPCNTR(intel_crtc->plane);
- if (!intel_crtc->primary_disabled)
+ if (intel_crtc->primary_enabled)
return;
- intel_crtc->primary_disabled = false;
- intel_update_fbc(dev);
+ intel_crtc->primary_enabled = true;
I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
+ intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+
+ /*
+ * FIXME IPS should be fine as long as one plane is
+ * enabled, but in practice it seems to have problems
+ * when going from primary only to sprite only and vice
+ * versa.
+ */
+ if (intel_crtc->config.ips_enabled) {
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ hsw_enable_ips(intel_crtc);
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
}
static void
@@ -538,13 +553,26 @@ intel_disable_primary(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int reg = DSPCNTR(intel_crtc->plane);
- if (intel_crtc->primary_disabled)
+ if (!intel_crtc->primary_enabled)
return;
- I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+ intel_crtc->primary_enabled = false;
- intel_crtc->primary_disabled = true;
- intel_update_fbc(dev);
+ mutex_lock(&dev->struct_mutex);
+ if (dev_priv->fbc.plane == intel_crtc->plane)
+ intel_disable_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ /*
+ * FIXME IPS should be fine as long as one plane is
+ * enabled, but in practice it seems to have problems
+ * when going from primary only to sprite only and vice
+ * versa.
+ */
+ hsw_disable_ips(intel_crtc);
+
+ I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+ intel_flush_primary_plane(dev_priv, intel_crtc->plane);
}
static int
@@ -623,15 +651,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t src_w, uint32_t src_h)
{
struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj, *old_obj;
- int pipe = intel_plane->pipe;
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
- int ret = 0;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *old_obj = intel_plane->obj;
+ int ret;
bool disable_primary = false;
bool visible;
int hscale, vscale;
@@ -652,29 +677,23 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
.y2 = crtc_y + crtc_h,
};
const struct drm_rect clip = {
- .x2 = crtc->mode.hdisplay,
- .y2 = crtc->mode.vdisplay,
+ .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
+ .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
+ };
+ const struct {
+ int crtc_x, crtc_y;
+ unsigned int crtc_w, crtc_h;
+ uint32_t src_x, src_y, src_w, src_h;
+ } orig = {
+ .crtc_x = crtc_x,
+ .crtc_y = crtc_y,
+ .crtc_w = crtc_w,
+ .crtc_h = crtc_h,
+ .src_x = src_x,
+ .src_y = src_y,
+ .src_w = src_w,
+ .src_h = src_h,
};
-
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
-
- old_obj = intel_plane->obj;
-
- intel_plane->crtc_x = crtc_x;
- intel_plane->crtc_y = crtc_y;
- intel_plane->crtc_w = crtc_w;
- intel_plane->crtc_h = crtc_h;
- intel_plane->src_x = src_x;
- intel_plane->src_y = src_y;
- intel_plane->src_w = src_w;
- intel_plane->src_h = src_h;
-
- /* Pipe must be running... */
- if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) {
- DRM_DEBUG_KMS("Pipe disabled\n");
- return -EINVAL;
- }
/* Don't modify another pipe's plane */
if (intel_plane->pipe != intel_crtc->pipe) {
@@ -810,7 +829,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
* we can disable the primary and save power.
*/
disable_primary = drm_rect_equals(&dst, &clip);
- WARN_ON(disable_primary && !visible);
+ WARN_ON(disable_primary && !visible && intel_crtc->active);
mutex_lock(&dev->struct_mutex);
@@ -820,27 +839,40 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
* the sprite planes only require 128KiB alignment and 32 PTE padding.
*/
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
- if (ret)
- goto out_unlock;
- intel_plane->obj = obj;
-
- /*
- * Be sure to re-enable the primary before the sprite is no longer
- * covering it fully.
- */
- if (!disable_primary)
- intel_enable_primary(crtc);
+ mutex_unlock(&dev->struct_mutex);
- if (visible)
- intel_plane->update_plane(plane, crtc, fb, obj,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h);
- else
- intel_plane->disable_plane(plane, crtc);
+ if (ret)
+ return ret;
+
+ intel_plane->crtc_x = orig.crtc_x;
+ intel_plane->crtc_y = orig.crtc_y;
+ intel_plane->crtc_w = orig.crtc_w;
+ intel_plane->crtc_h = orig.crtc_h;
+ intel_plane->src_x = orig.src_x;
+ intel_plane->src_y = orig.src_y;
+ intel_plane->src_w = orig.src_w;
+ intel_plane->src_h = orig.src_h;
+ intel_plane->obj = obj;
- if (disable_primary)
- intel_disable_primary(crtc);
+ if (intel_crtc->active) {
+ /*
+ * Be sure to re-enable the primary before the sprite is no longer
+ * covering it fully.
+ */
+ if (!disable_primary)
+ intel_enable_primary(crtc);
+
+ if (visible)
+ intel_plane->update_plane(plane, crtc, fb, obj,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+ else
+ intel_plane->disable_plane(plane, crtc);
+
+ if (disable_primary)
+ intel_disable_primary(crtc);
+ }
/* Unpin old obj after new one is active to avoid ugliness */
if (old_obj) {
@@ -850,17 +882,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
* wait for vblank to avoid ugliness, we only need to
* do the pin & ref bookkeeping.
*/
- if (old_obj != obj) {
- mutex_unlock(&dev->struct_mutex);
- intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
- mutex_lock(&dev->struct_mutex);
- }
+ if (old_obj != obj && intel_crtc->active)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(old_obj);
+ mutex_unlock(&dev->struct_mutex);
}
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ return 0;
}
static int
@@ -868,7 +898,7 @@ intel_disable_plane(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
- int ret = 0;
+ struct intel_crtc *intel_crtc;
if (!plane->fb)
return 0;
@@ -876,21 +906,25 @@ intel_disable_plane(struct drm_plane *plane)
if (WARN_ON(!plane->crtc))
return -EINVAL;
- intel_enable_primary(plane->crtc);
- intel_plane->disable_plane(plane, plane->crtc);
+ intel_crtc = to_intel_crtc(plane->crtc);
- if (!intel_plane->obj)
- goto out;
+ if (intel_crtc->active) {
+ intel_enable_primary(plane->crtc);
+ intel_plane->disable_plane(plane, plane->crtc);
+ }
- intel_wait_for_vblank(dev, intel_plane->pipe);
+ if (intel_plane->obj) {
+ if (intel_crtc->active)
+ intel_wait_for_vblank(dev, intel_plane->pipe);
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(intel_plane->obj);
- intel_plane->obj = NULL;
- mutex_unlock(&dev->struct_mutex);
-out:
+ mutex_lock(&dev->struct_mutex);
+ intel_unpin_fb_obj(intel_plane->obj);
+ mutex_unlock(&dev->struct_mutex);
- return ret;
+ intel_plane->obj = NULL;
+ }
+
+ return 0;
}
static void intel_destroy_plane(struct drm_plane *plane)
@@ -1034,7 +1068,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
if (INTEL_INFO(dev)->gen < 5)
return -ENODEV;
- intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
+ intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
if (!intel_plane)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index dd6f84bf6c22..18c406246a2d 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -912,7 +912,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
if (!tv_mode)
return false;
- pipe_config->adjusted_mode.clock = tv_mode->clock;
+ pipe_config->adjusted_mode.crtc_clock = tv_mode->clock;
DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
pipe_config->pipe_bpp = 8*3;
@@ -1044,7 +1044,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
/* Enable two fixes for the chips that need them. */
- if (dev->pci_device < 0x2772)
+ if (dev->pdev->device < 0x2772)
tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
I915_WRITE(TV_H_CTL_1, hctl1);
@@ -1094,7 +1094,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
unsigned int xsize, ysize;
/* Pipe must be off here */
I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
- intel_flush_display_plane(dev_priv, intel_crtc->plane);
+ intel_flush_primary_plane(dev_priv, intel_crtc->plane);
/* Wait for vblank for the disable to take effect */
if (IS_GEN2(dev))
@@ -1123,7 +1123,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
I915_WRITE(pipeconf_reg, pipeconf);
I915_WRITE(dspcntr_reg, dspcntr);
- intel_flush_display_plane(dev_priv, intel_crtc->plane);
+ intel_flush_primary_plane(dev_priv, intel_crtc->plane);
}
j = 0;
@@ -1433,7 +1433,6 @@ intel_tv_get_modes(struct drm_connector *connector)
static void
intel_tv_destroy(struct drm_connector *connector)
{
- drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -1518,7 +1517,7 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
static int tv_is_present_in_vbt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct child_device_config *p_child;
+ union child_device_config *p_child;
int i, ret;
if (!dev_priv->vbt.child_dev_num)
@@ -1530,13 +1529,13 @@ static int tv_is_present_in_vbt(struct drm_device *dev)
/*
* If the device type is not TV, continue.
*/
- if (p_child->device_type != DEVICE_TYPE_INT_TV &&
- p_child->device_type != DEVICE_TYPE_TV)
+ if (p_child->old.device_type != DEVICE_TYPE_INT_TV &&
+ p_child->old.device_type != DEVICE_TYPE_TV)
continue;
/* Only when the addin_offset is non-zero, it is regarded
* as present.
*/
- if (p_child->addin_offset) {
+ if (p_child->old.addin_offset) {
ret = 1;
break;
}
@@ -1590,12 +1589,12 @@ intel_tv_init(struct drm_device *dev)
(tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
return;
- intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL);
+ intel_tv = kzalloc(sizeof(*intel_tv), GFP_KERNEL);
if (!intel_tv) {
return;
}
- intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_tv);
return;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8649f1c36b00..f6fae35c568e 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -204,60 +204,34 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
gen6_gt_check_fifodbg(dev_priv);
}
-void intel_uncore_early_sanitize(struct drm_device *dev)
+static void gen6_force_wake_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
+ unsigned long irqflags;
- if (HAS_FPGA_DBG_UNCLAIMED(dev))
- __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ if (--dev_priv->uncore.forcewake_count == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-void intel_uncore_init(struct drm_device *dev)
+void intel_uncore_early_sanitize(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_VALLEYVIEW(dev)) {
- dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
- dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
- } else if (IS_HASWELL(dev)) {
- dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
- dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
- } else if (IS_IVYBRIDGE(dev)) {
- u32 ecobus;
-
- /* IVB configs may use multi-threaded forcewake */
-
- /* A small trick here - if the bios hasn't configured
- * MT forcewake, and if the device is in RC6, then
- * force_wake_mt_get will not wake the device and the
- * ECOBUS read will return zero. Which will be
- * (correctly) interpreted by the test below as MT
- * forcewake being disabled.
- */
- mutex_lock(&dev->struct_mutex);
- __gen6_gt_force_wake_mt_get(dev_priv);
- ecobus = __raw_i915_read32(dev_priv, ECOBUS);
- __gen6_gt_force_wake_mt_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
+ if (HAS_FPGA_DBG_UNCLAIMED(dev))
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
- if (ecobus & FORCEWAKE_MT_ENABLE) {
- dev_priv->uncore.funcs.force_wake_get =
- __gen6_gt_force_wake_mt_get;
- dev_priv->uncore.funcs.force_wake_put =
- __gen6_gt_force_wake_mt_put;
- } else {
- DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
- DRM_INFO("when using vblank-synced partial screen updates.\n");
- dev_priv->uncore.funcs.force_wake_get =
- __gen6_gt_force_wake_get;
- dev_priv->uncore.funcs.force_wake_put =
- __gen6_gt_force_wake_put;
- }
- } else if (IS_GEN6(dev)) {
- dev_priv->uncore.funcs.force_wake_get =
- __gen6_gt_force_wake_get;
- dev_priv->uncore.funcs.force_wake_put =
- __gen6_gt_force_wake_put;
+ if (IS_HASWELL(dev) &&
+ (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
+ /* The docs do not explain exactly how the calculation can be
+ * made. It is somewhat guessable, but for now, it's always
+ * 128MB.
+ * NB: We can't write IDICR yet because we do not have gt funcs
+ * set up */
+ dev_priv->ellc_size = 128;
+ DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
}
}
@@ -276,10 +250,26 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev)
void intel_uncore_sanitize(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg_val;
+
intel_uncore_forcewake_reset(dev);
/* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_disable_gt_powersave(dev);
+
+ /* Turn off power gate, require especially for the BIOS less system */
+ if (IS_VALLEYVIEW(dev)) {
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
+
+ if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT))
+ vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ }
}
/*
@@ -292,6 +282,9 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
unsigned long irqflags;
+ if (!dev_priv->uncore.funcs.force_wake_get)
+ return;
+
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (dev_priv->uncore.forcewake_count++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv);
@@ -305,17 +298,22 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
unsigned long irqflags;
+ if (!dev_priv->uncore.funcs.force_wake_put)
+ return;
+
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- if (--dev_priv->uncore.forcewake_count == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv);
+ if (--dev_priv->uncore.forcewake_count == 0) {
+ dev_priv->uncore.forcewake_count++;
+ mod_delayed_work(dev_priv->wq,
+ &dev_priv->uncore.force_wake_work,
+ 1);
+ }
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
- ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
+ ((reg) < 0x40000 && (reg) != FORCEWAKE)
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
@@ -329,8 +327,7 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
static void
hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
{
- if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
- (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+ if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
DRM_ERROR("Unknown unclaimed register before writing to %x\n",
reg);
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
@@ -340,20 +337,43 @@ hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
static void
hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
{
- if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
- (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+ if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
DRM_ERROR("Unclaimed write to %x\n", reg);
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}
-#define __i915_read(x) \
-u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
+#define REG_READ_HEADER(x) \
unsigned long irqflags; \
u##x val = 0; \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
- if (dev_priv->info->gen == 5) \
- ilk_dummy_write(dev_priv); \
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+
+#define REG_READ_FOOTER \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
+ return val
+
+#define __gen4_read(x) \
+static u##x \
+gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ REG_READ_HEADER(x); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ REG_READ_FOOTER; \
+}
+
+#define __gen5_read(x) \
+static u##x \
+gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ REG_READ_HEADER(x); \
+ ilk_dummy_write(dev_priv); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ REG_READ_FOOTER; \
+}
+
+#define __gen6_read(x) \
+static u##x \
+gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ REG_READ_HEADER(x); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
if (dev_priv->uncore.forcewake_count == 0) \
dev_priv->uncore.funcs.force_wake_get(dev_priv); \
@@ -363,28 +383,73 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
} else { \
val = __raw_i915_read##x(dev_priv, reg); \
} \
+ REG_READ_FOOTER; \
+}
+
+__gen6_read(8)
+__gen6_read(16)
+__gen6_read(32)
+__gen6_read(64)
+__gen5_read(8)
+__gen5_read(16)
+__gen5_read(32)
+__gen5_read(64)
+__gen4_read(8)
+__gen4_read(16)
+__gen4_read(32)
+__gen4_read(64)
+
+#undef __gen6_read
+#undef __gen5_read
+#undef __gen4_read
+#undef REG_READ_FOOTER
+#undef REG_READ_HEADER
+
+#define REG_WRITE_HEADER \
+ unsigned long irqflags; \
+ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+
+#define __gen4_write(x) \
+static void \
+gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+ REG_WRITE_HEADER; \
+ __raw_i915_write##x(dev_priv, reg, val); \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
- trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
- return val; \
}
-__i915_read(8)
-__i915_read(16)
-__i915_read(32)
-__i915_read(64)
-#undef __i915_read
+#define __gen5_write(x) \
+static void \
+gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+ REG_WRITE_HEADER; \
+ ilk_dummy_write(dev_priv); \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+}
-#define __i915_write(x) \
-void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \
- unsigned long irqflags; \
+#define __gen6_write(x) \
+static void \
+gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
u32 __fifo_ret = 0; \
- trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
+ REG_WRITE_HEADER; \
+ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
+ } \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ if (unlikely(__fifo_ret)) { \
+ gen6_gt_check_fifodbg(dev_priv); \
+ } \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+}
+
+#define __hsw_write(x) \
+static void \
+hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+ u32 __fifo_ret = 0; \
+ REG_WRITE_HEADER; \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
- if (dev_priv->info->gen == 5) \
- ilk_dummy_write(dev_priv); \
hsw_unclaimed_reg_clear(dev_priv, reg); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
@@ -393,11 +458,134 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool tr
hsw_unclaimed_reg_check(dev_priv, reg); \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
}
-__i915_write(8)
-__i915_write(16)
-__i915_write(32)
-__i915_write(64)
-#undef __i915_write
+
+__hsw_write(8)
+__hsw_write(16)
+__hsw_write(32)
+__hsw_write(64)
+__gen6_write(8)
+__gen6_write(16)
+__gen6_write(32)
+__gen6_write(64)
+__gen5_write(8)
+__gen5_write(16)
+__gen5_write(32)
+__gen5_write(64)
+__gen4_write(8)
+__gen4_write(16)
+__gen4_write(32)
+__gen4_write(64)
+
+#undef __hsw_write
+#undef __gen6_write
+#undef __gen5_write
+#undef __gen4_write
+#undef REG_WRITE_HEADER
+
+void intel_uncore_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
+ gen6_force_wake_work);
+
+ if (IS_VALLEYVIEW(dev)) {
+ dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
+ } else if (IS_HASWELL(dev)) {
+ dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
+ dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
+ } else if (IS_IVYBRIDGE(dev)) {
+ u32 ecobus;
+
+ /* IVB configs may use multi-threaded forcewake */
+
+ /* A small trick here - if the bios hasn't configured
+ * MT forcewake, and if the device is in RC6, then
+ * force_wake_mt_get will not wake the device and the
+ * ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT
+ * forcewake being disabled.
+ */
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = __raw_i915_read32(dev_priv, ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ dev_priv->uncore.funcs.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->uncore.funcs.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ } else {
+ DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
+ DRM_INFO("when using vblank-synced partial screen updates.\n");
+ dev_priv->uncore.funcs.force_wake_get =
+ __gen6_gt_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put =
+ __gen6_gt_force_wake_put;
+ }
+ } else if (IS_GEN6(dev)) {
+ dev_priv->uncore.funcs.force_wake_get =
+ __gen6_gt_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put =
+ __gen6_gt_force_wake_put;
+ }
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ if (IS_HASWELL(dev)) {
+ dev_priv->uncore.funcs.mmio_writeb = hsw_write8;
+ dev_priv->uncore.funcs.mmio_writew = hsw_write16;
+ dev_priv->uncore.funcs.mmio_writel = hsw_write32;
+ dev_priv->uncore.funcs.mmio_writeq = hsw_write64;
+ } else {
+ dev_priv->uncore.funcs.mmio_writeb = gen6_write8;
+ dev_priv->uncore.funcs.mmio_writew = gen6_write16;
+ dev_priv->uncore.funcs.mmio_writel = gen6_write32;
+ dev_priv->uncore.funcs.mmio_writeq = gen6_write64;
+ }
+ dev_priv->uncore.funcs.mmio_readb = gen6_read8;
+ dev_priv->uncore.funcs.mmio_readw = gen6_read16;
+ dev_priv->uncore.funcs.mmio_readl = gen6_read32;
+ dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+ break;
+ case 5:
+ dev_priv->uncore.funcs.mmio_writeb = gen5_write8;
+ dev_priv->uncore.funcs.mmio_writew = gen5_write16;
+ dev_priv->uncore.funcs.mmio_writel = gen5_write32;
+ dev_priv->uncore.funcs.mmio_writeq = gen5_write64;
+ dev_priv->uncore.funcs.mmio_readb = gen5_read8;
+ dev_priv->uncore.funcs.mmio_readw = gen5_read16;
+ dev_priv->uncore.funcs.mmio_readl = gen5_read32;
+ dev_priv->uncore.funcs.mmio_readq = gen5_read64;
+ break;
+ case 4:
+ case 3:
+ case 2:
+ dev_priv->uncore.funcs.mmio_writeb = gen4_write8;
+ dev_priv->uncore.funcs.mmio_writew = gen4_write16;
+ dev_priv->uncore.funcs.mmio_writel = gen4_write32;
+ dev_priv->uncore.funcs.mmio_writeq = gen4_write64;
+ dev_priv->uncore.funcs.mmio_readb = gen4_read8;
+ dev_priv->uncore.funcs.mmio_readw = gen4_read16;
+ dev_priv->uncore.funcs.mmio_readl = gen4_read32;
+ dev_priv->uncore.funcs.mmio_readq = gen4_read64;
+ break;
+ }
+}
+
+void intel_uncore_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ flush_delayed_work(&dev_priv->uncore.force_wake_work);
+
+ /* Paranoia: make sure we have disabled everything before we exit. */
+ intel_uncore_sanitize(dev);
+}
static const struct register_whitelist {
uint64_t offset;
@@ -445,36 +633,6 @@ int i915_reg_read_ioctl(struct drm_device *dev,
return 0;
}
-static int i8xx_do_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_I85X(dev))
- return -ENODEV;
-
- I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
- POSTING_READ(D_STATE);
-
- if (IS_I830(dev) || IS_845G(dev)) {
- I915_WRITE(DEBUG_RESET_I830,
- DEBUG_RESET_DISPLAY |
- DEBUG_RESET_RENDER |
- DEBUG_RESET_FULL);
- POSTING_READ(DEBUG_RESET_I830);
- msleep(1);
-
- I915_WRITE(DEBUG_RESET_I830, 0);
- POSTING_READ(DEBUG_RESET_I830);
- }
-
- msleep(1);
-
- I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
- POSTING_READ(D_STATE);
-
- return 0;
-}
-
static int i965_reset_complete(struct drm_device *dev)
{
u8 gdrst;
@@ -576,7 +734,6 @@ int intel_gpu_reset(struct drm_device *dev)
case 6: return gen6_do_reset(dev);
case 5: return ironlake_do_reset(dev);
case 4: return i965_do_reset(dev);
- case 2: return i8xx_do_reset(dev);
default: return -ENODEV;
}
}
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index cc3166dd445a..087db33f6cff 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -406,11 +406,6 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
- dev->counters += 3;
- dev->types[6] = _DRM_STAT_IRQ;
- dev->types[7] = _DRM_STAT_PRIMARY;
- dev->types[8] = _DRM_STAT_SECONDARY;
-
ret = drm_vblank_init(dev, 1);
if (ret) {
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index 598c281def0a..2b0ceb8dc11b 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -169,5 +169,5 @@ void mga_driver_irq_uninstall(struct drm_device *dev)
/* Disable *all* interrupts */
MGA_WRITE(MGA_IEN, 0);
- dev->irq_enabled = 0;
+ dev->irq_enabled = false;
}
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index b487cdec5ee7..3a1c5fbae54a 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -5,6 +5,7 @@ config DRM_MGAG200
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select DRM_TTM
help
This is a KMS driver for the MGA G200 server chips, it
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index fcce7b2f8011..f15ea3c4a90a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -99,7 +99,6 @@ static struct drm_driver driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- .gem_init_object = mgag200_gem_init_object,
.gem_free_object = mgag200_gem_free_object,
.dumb_create = mgag200_dumb_create,
.dumb_map_offset = mgag200_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index baaae19332e2..cf11ee68a6d9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -260,7 +260,6 @@ int mgag200_driver_unload(struct drm_device *dev);
int mgag200_gem_create(struct drm_device *dev,
u32 size, bool iskernel,
struct drm_gem_object **obj);
-int mgag200_gem_init_object(struct drm_gem_object *obj);
int mgag200_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 0f8b861b10b3..b1120cb1db6d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -310,12 +310,6 @@ int mgag200_dumb_create(struct drm_file *file,
return 0;
}
-int mgag200_gem_init_object(struct drm_gem_object *obj)
-{
- BUG();
- return 0;
-}
-
void mgag200_bo_unref(struct mgag200_bo **bo)
{
struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index a06c19cc56f8..f39ab7554fc9 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -14,6 +14,7 @@ config DRM_MSM
config DRM_MSM_FBDEV
bool "Enable legacy fbdev support for MSM modesetting driver"
depends on DRM_MSM
+ select DRM_KMS_FB_HELPER
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index ff80f12480ea..7cf787d697b1 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -3,6 +3,7 @@ config DRM_NOUVEAU
depends on DRM && PCI
select FW_LOADER
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select DRM_TTM
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index 8b3adec5fbb1..eae939d3fc1a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -41,7 +41,8 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c,
if (!client)
return false;
- if (!client->driver || client->driver->detect(client, info)) {
+ if (!client->dev.driver ||
+ to_i2c_driver(client->dev.driver)->detect(client, info)) {
i2c_unregister_device(client);
return false;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 2e70462883e8..2a15b98b4d2b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -210,8 +210,8 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
sim_data.nvclk_khz = NVClk;
sim_data.bpp = bpp;
sim_data.two_heads = nv_two_heads(dev);
- if ((dev->pci_device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
- (dev->pci_device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
+ if ((dev->pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
+ (dev->pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
uint32_t type;
pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type);
@@ -256,8 +256,8 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm
if (nv_device(drm->device)->card_type < NV_20)
nv04_update_arb(dev, vclk, bpp, burst, lwm);
- else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
- (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
+ else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
+ (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
*burst = 128;
*lwm = 0x0480;
} else
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index d4fbf11360fe..0e3270c3ffd2 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -326,8 +326,6 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
regp->MiscOutReg = 0x23; /* +hsync +vsync */
}
- regp->MiscOutReg |= (mode->clock_index & 0x03) << 2;
-
/*
* Time Sequencer
*/
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 93dd23ff0093..59d1c040b84f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -490,8 +490,8 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
/* BIOS scripts usually take care of the backlight, thanks
* Apple for your consistency.
*/
- if (dev->pci_device == 0x0174 || dev->pci_device == 0x0179 ||
- dev->pci_device == 0x0189 || dev->pci_device == 0x0329) {
+ if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 ||
+ dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) {
if (mode == DRM_MODE_DPMS_ON) {
nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 9928187f0a7d..2cf65e0b517e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -127,7 +127,7 @@ static inline bool
nv_two_heads(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- const int impl = dev->pci_device & 0x0ff0;
+ const int impl = dev->pdev->device & 0x0ff0;
if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 &&
impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
@@ -139,14 +139,14 @@ nv_two_heads(struct drm_device *dev)
static inline bool
nv_gf4_disp_arch(struct drm_device *dev)
{
- return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110;
+ return nv_two_heads(dev) && (dev->pdev->device & 0x0ff0) != 0x0110;
}
static inline bool
nv_two_reg_pll(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- const int impl = dev->pci_device & 0x0ff0;
+ const int impl = dev->pdev->device & 0x0ff0;
if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40)
return true;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 973056b86207..f8dee834527f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -220,7 +220,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
int ret;
if (plltype == PLL_MEMORY &&
- (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
+ (dev->pdev->device & 0x0ff0) == CHIPSET_NFORCE) {
uint32_t mpllP;
pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
@@ -230,7 +230,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
return 400000 / mpllP;
} else
if (plltype == PLL_MEMORY &&
- (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
+ (dev->pdev->device & 0xff0) == CHIPSET_NFORCE2) {
uint32_t clock;
pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 8f467e7bfd19..72055a35f845 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -130,7 +130,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
if (chan->ntfy) {
nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
nouveau_bo_unpin(chan->ntfy);
- drm_gem_object_unreference_unlocked(chan->ntfy->gem);
+ drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
}
if (chan->heap.block_size)
@@ -178,10 +178,10 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = device->chipset;
break;
case NOUVEAU_GETPARAM_PCI_VENDOR:
- getparam->value = dev->pci_vendor;
+ getparam->value = dev->pdev->vendor;
break;
case NOUVEAU_GETPARAM_PCI_DEVICE:
- getparam->value = dev->pci_device;
+ getparam->value = dev->pdev->device;
break;
case NOUVEAU_GETPARAM_BUS_TYPE:
if (drm_pci_device_is_agp(dev))
@@ -320,7 +320,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
goto done;
}
- ret = drm_gem_handle_create(file_priv, chan->ntfy->gem,
+ ret = drm_gem_handle_create(file_priv, &chan->ntfy->gem,
&init->notifier_handle);
if (ret)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index dd7d2e182719..cfbeee607b3a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -253,18 +253,15 @@ static struct vga_switcheroo_handler nouveau_dsm_handler = {
static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
{
- acpi_handle dhandle, nvidia_handle;
- acpi_status status;
+ acpi_handle dhandle;
int retval = 0;
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
- status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
- if (ACPI_FAILURE(status)) {
+ if (!acpi_has_method(dhandle, "_DSM"))
return false;
- }
if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
retval |= NOUVEAU_DSM_HAS_MUX;
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 2ffad2176b7f..630f6e84fc01 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -82,7 +82,7 @@ nv40_backlight_init(struct drm_connector *connector)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = 31;
- bd = backlight_device_register("nv_backlight", &connector->kdev, drm,
+ bd = backlight_device_register("nv_backlight", connector->kdev, drm,
&nv40_bl_ops, &props);
if (IS_ERR(bd))
return PTR_ERR(bd);
@@ -204,7 +204,7 @@ nv50_backlight_init(struct drm_connector *connector)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = 100;
- bd = backlight_device_register("nv_backlight", &connector->kdev,
+ bd = backlight_device_register("nv_backlight", connector->kdev,
nv_encoder, ops, &props);
if (IS_ERR(bd))
return PTR_ERR(bd);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 3e7287675ecf..4c3feaaa1037 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -127,8 +127,8 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_outp
#ifdef __powerpc__
/* Powerbook specific quirks */
if (script == LVDS_RESET &&
- (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
- dev->pci_device == 0x0329))
+ (dev->pdev->device == 0x0179 || dev->pdev->device == 0x0189 ||
+ dev->pdev->device == 0x0329))
nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 755c38d06271..4172854d4365 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -146,7 +146,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
struct drm_device *dev = drm->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- if (unlikely(nvbo->gem))
+ if (unlikely(nvbo->gem.filp))
DRM_ERROR("bo %p still attached to GEM object\n", bo);
WARN_ON(nvbo->pin_refcnt > 0);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
@@ -1267,7 +1267,7 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
- return drm_vma_node_verify_access(&nvbo->gem->vma_node, filp);
+ return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 653dbbbd4fa1..ff17c1f432fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -27,7 +27,10 @@ struct nouveau_bo {
u32 tile_flags;
struct nouveau_drm_tile *tile;
- struct drm_gem_object *gem;
+ /* Only valid if allocated via nouveau_gem_new() and iff you hold a
+ * gem reference to it! For debugging, use gem.filp != NULL to test
+ * whether it is valid. */
+ struct drm_gem_object gem;
/* protect by the ttm reservation lock */
int pin_refcnt;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index c5b36f9e9a10..2136d0038252 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -215,8 +215,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
connector->doublescan_allowed = true;
if (nv_device(drm->device)->card_type == NV_20 ||
(nv_device(drm->device)->card_type == NV_10 &&
- (dev->pci_device & 0x0ff0) != 0x0100 &&
- (dev->pci_device & 0x0ff0) != 0x0150))
+ (dev->pdev->device & 0x0ff0) != 0x0100 &&
+ (dev->pdev->device & 0x0ff0) != 0x0150))
/* HW is broken */
connector->interlace_allowed = false;
else
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7848590f5568..bdd5cf71a24c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -50,7 +50,7 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
if (fb->nvbo)
- drm_gem_object_unreference_unlocked(fb->nvbo->gem);
+ drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
drm_framebuffer_cleanup(drm_fb);
kfree(fb);
@@ -63,7 +63,7 @@ nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
- return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle);
+ return drm_gem_handle_create(file_priv, &fb->nvbo->gem, handle);
}
static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
@@ -674,8 +674,8 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
if (ret)
return ret;
- ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
- drm_gem_object_unreference_unlocked(bo->gem);
+ ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
+ drm_gem_object_unreference_unlocked(&bo->gem);
return ret;
}
@@ -688,7 +688,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
gem = drm_gem_object_lookup(dev, file_priv, handle);
if (gem) {
- struct nouveau_bo *bo = gem->driver_private;
+ struct nouveau_bo *bo = nouveau_gem_object(gem);
*poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
drm_gem_object_unreference_unlocked(gem);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index e893c5362402..428d818be775 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -834,7 +834,6 @@ driver = {
.gem_prime_vmap = nouveau_gem_prime_vmap,
.gem_prime_vunmap = nouveau_gem_prime_vunmap,
- .gem_init_object = nouveau_gem_object_new,
.gem_free_object = nouveau_gem_object_del,
.gem_open_object = nouveau_gem_object_open,
.gem_close_object = nouveau_gem_object_close,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index a86ecf65c164..c80b519b513a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -420,7 +420,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
nouveau_bo_unpin(nouveau_fb->nvbo);
- drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
+ drm_gem_object_unreference_unlocked(&nouveau_fb->nvbo->gem);
nouveau_fb->nvbo = NULL;
}
drm_fb_helper_fini(&fbcon->helper);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index f32b71238c03..418a6177a653 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -34,29 +34,20 @@
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
-int
-nouveau_gem_object_new(struct drm_gem_object *gem)
-{
- return 0;
-}
-
void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
- struct nouveau_bo *nvbo = gem->driver_private;
+ struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct ttm_buffer_object *bo = &nvbo->bo;
- if (!nvbo)
- return;
- nvbo->gem = NULL;
-
if (gem->import_attach)
drm_prime_gem_destroy(gem, nvbo->bo.sg);
- ttm_bo_unref(&bo);
-
drm_gem_object_release(gem);
- kfree(gem);
+
+ /* reset filp so nouveau_bo_del_ttm() can test for it */
+ gem->filp = NULL;
+ ttm_bo_unref(&bo);
}
int
@@ -186,14 +177,15 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
if (nv_device(drm->device)->card_type >= NV_50)
nvbo->valid_domains &= domain;
- nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
- if (!nvbo->gem) {
+ /* Initialize the embedded gem-object. We return a single gem-reference
+ * to the caller, instead of a normal nouveau_bo ttm reference. */
+ ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
+ if (ret) {
nouveau_bo_ref(NULL, pnvbo);
return -ENOMEM;
}
- nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
- nvbo->gem->driver_private = nvbo;
+ nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
return 0;
}
@@ -250,15 +242,15 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
if (ret)
return ret;
- ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
+ ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
if (ret == 0) {
- ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
+ ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
if (ret)
drm_gem_handle_delete(file_priv, req->info.handle);
}
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(nvbo->gem);
+ drm_gem_object_unreference_unlocked(&nvbo->gem);
return ret;
}
@@ -266,7 +258,7 @@ static int
nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
uint32_t write_domains, uint32_t valid_domains)
{
- struct nouveau_bo *nvbo = gem->driver_private;
+ struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct ttm_buffer_object *bo = &nvbo->bo;
uint32_t domains = valid_domains & nvbo->valid_domains &
(write_domains ? write_domains : read_domains);
@@ -327,7 +319,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
ttm_bo_unreserve_ticket(&nvbo->bo, ticket);
- drm_gem_object_unreference_unlocked(nvbo->gem);
+ drm_gem_object_unreference_unlocked(&nvbo->gem);
}
}
@@ -376,7 +368,7 @@ retry:
validate_fini(op, NULL);
return -ENOENT;
}
- nvbo = gem->driver_private;
+ nvbo = nouveau_gem_object(gem);
if (nvbo == res_bo) {
res_bo = NULL;
drm_gem_object_unreference_unlocked(gem);
@@ -478,7 +470,7 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
return ret;
}
- ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
+ ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
b->write_domains,
b->valid_domains);
if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 502e4290aa8f..7caca057bc38 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -12,14 +12,13 @@
static inline struct nouveau_bo *
nouveau_gem_object(struct drm_gem_object *gem)
{
- return gem ? gem->driver_private : NULL;
+ return gem ? container_of(gem, struct nouveau_bo, gem) : NULL;
}
/* nouveau_gem.c */
extern int nouveau_gem_new(struct drm_device *, int size, int align,
uint32_t domain, uint32_t tile_mode,
uint32_t tile_flags, struct nouveau_bo **);
-extern int nouveau_gem_object_new(struct drm_gem_object *);
extern void nouveau_gem_object_del(struct drm_gem_object *);
extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
extern void nouveau_gem_object_close(struct drm_gem_object *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index e90468d5e5c0..51a2cb102b44 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -71,14 +71,16 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
return ERR_PTR(ret);
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
- nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
- if (!nvbo->gem) {
+
+ /* Initialize the embedded gem-object. We return a single gem-reference
+ * to the caller, instead of a normal nouveau_bo ttm reference. */
+ ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
+ if (ret) {
nouveau_bo_ref(NULL, &nvbo);
return ERR_PTR(-ENOMEM);
}
- nvbo->gem->driver_private = nvbo;
- return nvbo->gem;
+ return &nvbo->gem;
}
int nouveau_gem_prime_pin(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 20c41e73d448..6c220cd3497a 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -5,6 +5,7 @@ config DRM_OMAP
depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
depends on OMAP2_DSS
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index acf667859cb6..701c4c10e08b 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -664,8 +664,9 @@ static int omap_dmm_probe(struct platform_device *dev)
}
/* set dma mask for device */
- /* NOTE: this is a workaround for the hwmod not initializing properly */
- dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail;
omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 2603d909f49c..e7fa3cd96743 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -620,7 +620,6 @@ static struct drm_driver omap_drm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = omap_gem_prime_export,
.gem_prime_import = omap_gem_prime_import,
- .gem_init_object = omap_gem_init_object,
.gem_free_object = omap_gem_free_object,
.gem_vm_ops = &omap_gem_vm_ops,
.dumb_create = omap_gem_dumb_create,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 30b95b736658..07847693cf49 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -220,7 +220,6 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
void omap_gem_free_object(struct drm_gem_object *obj);
-int omap_gem_init_object(struct drm_gem_object *obj);
void *omap_gem_vaddr(struct drm_gem_object *obj);
int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 533f6ebec531..5aec3e81fe24 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1274,11 +1274,6 @@ unlock:
return ret;
}
-int omap_gem_init_object(struct drm_gem_object *obj)
-{
- return -EINVAL; /* unused */
-}
-
/* don't call directly.. called from GEM core when it is time to actually
* free the object..
*/
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 9263db117ff8..cb858600185f 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -261,7 +261,7 @@ int omap_drm_irq_install(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
return -EBUSY;
}
- dev->irq_enabled = 1;
+ dev->irq_enabled = true;
mutex_unlock(&dev->struct_mutex);
/* Before installing handler */
@@ -272,7 +272,7 @@ int omap_drm_irq_install(struct drm_device *dev)
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
- dev->irq_enabled = 0;
+ dev->irq_enabled = false;
mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -283,7 +283,7 @@ int omap_drm_irq_install(struct drm_device *dev)
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
- dev->irq_enabled = 0;
+ dev->irq_enabled = false;
mutex_unlock(&dev->struct_mutex);
dispc_free_irq(dev);
}
@@ -294,11 +294,12 @@ int omap_drm_irq_install(struct drm_device *dev)
int omap_drm_irq_uninstall(struct drm_device *dev)
{
unsigned long irqflags;
- int irq_enabled, i;
+ bool irq_enabled;
+ int i;
mutex_lock(&dev->struct_mutex);
irq_enabled = dev->irq_enabled;
- dev->irq_enabled = 0;
+ dev->irq_enabled = false;
mutex_unlock(&dev->struct_mutex);
/*
@@ -307,9 +308,9 @@ int omap_drm_irq_uninstall(struct drm_device *dev)
if (dev->num_crtcs) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
for (i = 0; i < dev->num_crtcs; i++) {
- DRM_WAKEUP(&dev->vbl_queue[i]);
- dev->vblank_enabled[i] = 0;
- dev->last_vblank[i] =
+ DRM_WAKEUP(&dev->vblank[i].queue);
+ dev->vblank[i].enabled = false;
+ dev->vblank[i].last =
dev->driver->get_vblank_counter(dev, i);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index d6c12796023c..037d324bf58f 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -6,6 +6,7 @@ config DRM_QXL
select FB_SYS_IMAGEBLIT
select FB_DEFERRED_IO
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select DRM_TTM
help
QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 835caba026d3..61974cb9d205 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -107,10 +107,17 @@ void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
qxl_io_log(qdev, "failed crc check for client_monitors_config,"
" retrying\n");
}
- drm_helper_hpd_irq_event(qdev->ddev);
+
+ if (!drm_helper_hpd_irq_event(qdev->ddev)) {
+ /* notify that the monitor configuration changed, to
+ adjust at the arbitrary resolution */
+ drm_kms_helper_hotplug_event(qdev->ddev);
+ }
}
-static int qxl_add_monitors_config_modes(struct drm_connector *connector)
+static int qxl_add_monitors_config_modes(struct drm_connector *connector,
+ unsigned *pwidth,
+ unsigned *pheight)
{
struct drm_device *dev = connector->dev;
struct qxl_device *qdev = dev->dev_private;
@@ -126,11 +133,15 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector)
mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
false);
mode->type |= DRM_MODE_TYPE_PREFERRED;
+ *pwidth = head->width;
+ *pheight = head->height;
drm_mode_probed_add(connector, mode);
return 1;
}
-static int qxl_add_common_modes(struct drm_connector *connector)
+static int qxl_add_common_modes(struct drm_connector *connector,
+ unsigned pwidth,
+ unsigned pheight)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode = NULL;
@@ -159,12 +170,9 @@ static int qxl_add_common_modes(struct drm_connector *connector)
};
for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
- if (common_modes[i].w < 320 || common_modes[i].h < 200)
- continue;
-
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
60, false, false, false);
- if (common_modes[i].w == 1024 && common_modes[i].h == 768)
+ if (common_modes[i].w == pwidth && common_modes[i].h == pheight)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
}
@@ -720,16 +728,18 @@ static int qxl_conn_get_modes(struct drm_connector *connector)
{
int ret = 0;
struct qxl_device *qdev = connector->dev->dev_private;
+ unsigned pwidth = 1024;
+ unsigned pheight = 768;
DRM_DEBUG_KMS("monitors_config=%p\n", qdev->monitors_config);
/* TODO: what should we do here? only show the configured modes for the
* device, or allow the full list, or both? */
if (qdev->monitors_config && qdev->monitors_config->count) {
- ret = qxl_add_monitors_config_modes(connector);
+ ret = qxl_add_monitors_config_modes(connector, &pwidth, &pheight);
if (ret < 0)
return ret;
}
- ret += qxl_add_common_modes(connector);
+ ret += qxl_add_common_modes(connector, pwidth, pheight);
return ret;
}
@@ -793,7 +803,10 @@ static enum drm_connector_status qxl_conn_detect(
qdev->client_monitors_config->count > output->index &&
qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]));
- DRM_DEBUG("\n");
+ DRM_DEBUG("#%d connected: %d\n", output->index, connected);
+ if (!connected)
+ qxl_monitors_config_set(qdev, output->index, 0, 0, 0, 0, 0);
+
return connected ? connector_status_connected
: connector_status_disconnected;
}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 514118ae72d4..fee8748bdca5 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -225,7 +225,6 @@ static struct drm_driver qxl_driver = {
.debugfs_init = qxl_debugfs_init,
.debugfs_cleanup = qxl_debugfs_takedown,
#endif
- .gem_init_object = qxl_gem_object_init,
.gem_free_object = qxl_gem_object_free,
.gem_open_object = qxl_gem_object_open,
.gem_close_object = qxl_gem_object_close,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index f7c9adde46a0..41d22ed26060 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -412,7 +412,6 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
struct qxl_surface *surf,
struct qxl_bo **qobj,
uint32_t *handle);
-int qxl_gem_object_init(struct drm_gem_object *obj);
void qxl_gem_object_free(struct drm_gem_object *gobj);
int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
void qxl_gem_object_close(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index 1648e4125af7..b96f0c9d89b2 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -28,12 +28,6 @@
#include "qxl_drv.h"
#include "qxl_object.h"
-int qxl_gem_object_init(struct drm_gem_object *obj)
-{
- /* we do nothings here */
- return 0;
-}
-
void qxl_gem_object_free(struct drm_gem_object *gobj)
{
struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 00885417ffff..fb3ae07a1469 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -690,8 +690,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
/* set the lane count on the sink */
tmp = dp_info->dp_lane_count;
- if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 &&
- dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
+ if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 32923d2f6002..2cb08f93236d 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -213,7 +213,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name),
"radeon_bl%d", dev->primary->index);
- bd = backlight_device_register(bl_name, &drm_connector->kdev,
+ bd = backlight_device_register(bl_name, drm_connector->kdev,
pdata, &radeon_atom_backlight_ops, &props);
if (IS_ERR(bd)) {
DRM_ERROR("Backlight registration failed\n");
@@ -707,24 +707,37 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
- if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
- (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- (radeon_connector->audio == RADEON_AUDIO_AUTO)))
- return ATOM_ENCODER_MODE_HDMI;
- else if (radeon_connector->use_digital)
+ if (radeon_audio != 0) {
+ if (radeon_connector->use_digital &&
+ (radeon_connector->audio == RADEON_AUDIO_ENABLE))
+ return ATOM_ENCODER_MODE_HDMI;
+ else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ (radeon_connector->audio == RADEON_AUDIO_AUTO))
+ return ATOM_ENCODER_MODE_HDMI;
+ else if (radeon_connector->use_digital)
+ return ATOM_ENCODER_MODE_DVI;
+ else
+ return ATOM_ENCODER_MODE_CRT;
+ } else if (radeon_connector->use_digital) {
return ATOM_ENCODER_MODE_DVI;
- else
+ } else {
return ATOM_ENCODER_MODE_CRT;
+ }
break;
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
- if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
- (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- (radeon_connector->audio == RADEON_AUDIO_AUTO)))
- return ATOM_ENCODER_MODE_HDMI;
- else
+ if (radeon_audio != 0) {
+ if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
+ return ATOM_ENCODER_MODE_HDMI;
+ else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ (radeon_connector->audio == RADEON_AUDIO_AUTO))
+ return ATOM_ENCODER_MODE_HDMI;
+ else
+ return ATOM_ENCODER_MODE_DVI;
+ } else {
return ATOM_ENCODER_MODE_DVI;
+ }
break;
case DRM_MODE_CONNECTOR_LVDS:
return ATOM_ENCODER_MODE_LVDS;
@@ -732,14 +745,19 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DisplayPort:
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
- (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+ (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
return ATOM_ENCODER_MODE_DP;
- else if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
- (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- (radeon_connector->audio == RADEON_AUDIO_AUTO)))
- return ATOM_ENCODER_MODE_HDMI;
- else
+ } else if (radeon_audio != 0) {
+ if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
+ return ATOM_ENCODER_MODE_HDMI;
+ else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ (radeon_connector->audio == RADEON_AUDIO_AUTO))
+ return ATOM_ENCODER_MODE_HDMI;
+ else
+ return ATOM_ENCODER_MODE_DVI;
+ } else {
return ATOM_ENCODER_MODE_DVI;
+ }
break;
case DRM_MODE_CONNECTOR_eDP:
return ATOM_ENCODER_MODE_DP;
@@ -1655,7 +1673,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
* does the same thing and more.
*/
if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
- (rdev->family != CHIP_RS880))
+ (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index b874ccdf52f7..9cd2bc989ac7 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -1694,6 +1694,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
fw_name);
release_firmware(rdev->smc_fw);
rdev->smc_fw = NULL;
+ err = 0;
} else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR
"cik_smc: Bogus length %zu in firmware \"%s\"\n",
@@ -3182,6 +3183,7 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
if (r) {
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+ radeon_scratch_free(rdev, scratch);
return r;
}
ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
@@ -3198,6 +3200,8 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
r = radeon_fence_wait(ib.fence, false);
if (r) {
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ radeon_scratch_free(rdev, scratch);
+ radeon_ib_free(rdev, &ib);
return r;
}
for (i = 0; i < rdev->usec_timeout; i++) {
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 85a69d2ea3d2..9fcd338c0fcf 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -113,6 +113,9 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
u8 *sadb;
int sad_count;
+ /* XXX: setting this register causes hangs on some asics */
+ return;
+
if (!dig->afmt->pin)
return;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index b5c67a99dda9..56f6bec34af5 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1174,23 +1174,16 @@ int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
{
- u16 ctl, v;
- int err;
-
- err = pcie_capability_read_word(rdev->pdev, PCI_EXP_DEVCTL, &ctl);
- if (err)
- return;
-
- v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
+ int readrq;
+ u16 v;
+ readrq = pcie_get_readrq(rdev->pdev);
+ v = ffs(readrq) - 8;
/* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
* to avoid hangs or perfomance issues
*/
- if ((v == 0) || (v == 6) || (v == 7)) {
- ctl &= ~PCI_EXP_DEVCTL_READRQ;
- ctl |= (2 << 12);
- pcie_capability_write_word(rdev->pdev, PCI_EXP_DEVCTL, ctl);
- }
+ if ((v == 0) || (v == 6) || (v == 7))
+ pcie_set_readrq(rdev->pdev, 512);
}
static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index f815c20640bd..57fcc4b16a52 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -67,6 +67,9 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
u8 *sadb;
int sad_count;
+ /* XXX: setting this register causes hangs on some asics */
+ return;
+
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
if (connector->encoder == encoder)
radeon_connector = to_radeon_connector(connector);
@@ -288,6 +291,7 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
/* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+ HDMI_ACR_SOURCE | /* select SW CTS value */
HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
evergreen_hdmi_update_ACR(encoder, mode->clock);
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 71399065db04..b41905573cd2 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2635,7 +2635,7 @@ int kv_dpm_init(struct radeon_device *rdev)
pi->caps_sclk_ds = true;
pi->enable_auto_thermal_throttling = true;
pi->disable_nb_ps3_in_battery = false;
- pi->bapm_enable = true;
+ pi->bapm_enable = false;
pi->voltage_drop_t = 0;
pi->caps_sclk_throttle_low_notification = false;
pi->caps_fps = false; /* true? */
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 93c1f9ef5da9..cac2866d79da 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -804,6 +804,7 @@ int ni_init_microcode(struct radeon_device *rdev)
fw_name);
release_firmware(rdev->smc_fw);
rdev->smc_fw = NULL;
+ err = 0;
} else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR
"ni_mc: Bogus length %zu in firmware \"%s\"\n",
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2a1b1876b431..f9be22062df1 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2302,6 +2302,7 @@ int r600_init_microcode(struct radeon_device *rdev)
fw_name);
release_firmware(rdev->smc_fw);
rdev->smc_fw = NULL;
+ err = 0;
} else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR
"smc: Bogus length %zu in firmware \"%s\"\n",
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 5b729319f27b..06022e3b9c3b 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -309,6 +309,9 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
u8 *sadb;
int sad_count;
+ /* XXX: setting this register causes hangs on some asics */
+ return;
+
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
if (connector->encoder == encoder)
radeon_connector = to_radeon_connector(connector);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index a400ac1c4147..24f4960f59ee 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1272,8 +1272,8 @@ struct radeon_blacklist_clocks
struct radeon_clock_and_voltage_limits {
u32 sclk;
u32 mclk;
- u32 vddc;
- u32 vddci;
+ u16 vddc;
+ u16 vddci;
};
struct radeon_clock_array {
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 061b227dae0c..c155d6f3fa68 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -499,7 +499,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
fp2_gen_cntl = 0;
- if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
}
@@ -536,7 +536,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
(RADEON_CRTC_SYNC_TRISTAT |
RADEON_CRTC_DISPLAY_DIS)));
- if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
}
@@ -554,7 +554,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
}
WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
- if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
}
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 79159b5da05b..64565732cb98 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1658,9 +1658,12 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.audio_property,
- RADEON_AUDIO_DISABLE);
+ if (radeon_audio != 0)
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.audio_property,
+ (radeon_audio == 1) ?
+ RADEON_AUDIO_AUTO :
+ RADEON_AUDIO_DISABLE);
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1754,10 +1757,12 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.underscan_vborder_property,
0);
}
- if (ASIC_IS_DCE2(rdev)) {
+ if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.audio_property,
- RADEON_AUDIO_DISABLE);
+ rdev->mode_info.audio_property,
+ (radeon_audio == 1) ?
+ RADEON_AUDIO_AUTO :
+ RADEON_AUDIO_DISABLE);
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
@@ -1799,10 +1804,12 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.underscan_vborder_property,
0);
}
- if (ASIC_IS_DCE2(rdev)) {
+ if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.audio_property,
- RADEON_AUDIO_DISABLE);
+ rdev->mode_info.audio_property,
+ (radeon_audio == 1) ?
+ RADEON_AUDIO_AUTO :
+ RADEON_AUDIO_DISABLE);
}
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
@@ -1843,10 +1850,12 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.underscan_vborder_property,
0);
}
- if (ASIC_IS_DCE2(rdev)) {
+ if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.audio_property,
- RADEON_AUDIO_DISABLE);
+ rdev->mode_info.audio_property,
+ (radeon_audio == 1) ?
+ RADEON_AUDIO_AUTO :
+ RADEON_AUDIO_DISABLE);
}
connector->interlace_allowed = true;
/* in theory with a DP to VGA converter... */
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 66c222836631..80285e35bc65 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -85,9 +85,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
VRAM, also but everything into VRAM on AGP cards to avoid
image corruptions */
if (p->ring == R600_RING_TYPE_UVD_INDEX &&
- p->rdev->family < CHIP_PALM &&
(i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
-
+ /* TODO: is this still needed for NI+ ? */
p->relocs[i].lobj.domain =
RADEON_GEM_DOMAIN_VRAM;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index cdd12dcd988b..b01f231c2f19 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -100,7 +100,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
-int radeon_gem_object_init(struct drm_gem_object *obj);
void radeon_gem_object_free(struct drm_gem_object *obj);
int radeon_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file_priv);
@@ -153,7 +152,7 @@ int radeon_benchmarking = 0;
int radeon_testing = 0;
int radeon_connector_table = 0;
int radeon_tv = 1;
-int radeon_audio = 1;
+int radeon_audio = -1;
int radeon_disp_priority = 0;
int radeon_hw_i2c = 0;
int radeon_pcie_gen2 = -1;
@@ -196,7 +195,7 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
module_param_named(tv, radeon_tv, int, 0444);
-MODULE_PARM_DESC(audio, "Audio enable (1 = enable)");
+MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
module_param_named(audio, radeon_audio, int, 0444);
MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
@@ -408,7 +407,6 @@ static struct drm_driver kms_driver = {
.irq_uninstall = radeon_driver_irq_uninstall_kms,
.irq_handler = radeon_driver_irq_handler_kms,
.ioctls = radeon_ioctls_kms,
- .gem_init_object = radeon_gem_object_init,
.gem_free_object = radeon_gem_object_free,
.gem_open_object = radeon_gem_object_open,
.gem_close_object = radeon_gem_object_close,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index dce99c8a5835..805c5e566b9a 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -29,13 +29,6 @@
#include <drm/radeon_drm.h>
#include "radeon.h"
-int radeon_gem_object_init(struct drm_gem_object *obj)
-{
- BUG();
-
- return 0;
-}
-
void radeon_gem_object_free(struct drm_gem_object *gobj)
{
struct radeon_bo *robj = gem_to_radeon_bo(gobj);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 61580ddc4eb2..d6b36766e8c9 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -191,7 +191,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
switch (info->request) {
case RADEON_INFO_DEVICE_ID:
- *value = dev->pci_device;
+ *value = dev->pdev->device;
break;
case RADEON_INFO_NUM_GB_PIPES:
*value = rdev->num_gb_pipes;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 62cd512f5c8d..c89971d904c3 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -392,7 +392,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name),
"radeon_bl%d", dev->primary->index);
- bd = backlight_device_register(bl_name, &drm_connector->kdev,
+ bd = backlight_device_register(bl_name, drm_connector->kdev,
pdata, &radeon_backlight_ops, &props);
if (IS_ERR(bd)) {
DRM_ERROR("Backlight registration failed\n");
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 4f2e73f79638..308eff5be1b4 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -476,7 +476,8 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
return -EINVAL;
}
- if (p->rdev->family < CHIP_PALM && (cmd == 0 || cmd == 0x3) &&
+ /* TODO: is this still necessary on NI+ ? */
+ if ((cmd == 0 || cmd == 0x3) &&
(start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
start, end);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d4652af425b8..d96f7cbca0a1 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1681,6 +1681,7 @@ static int si_init_microcode(struct radeon_device *rdev)
fw_name);
release_firmware(rdev->smc_fw);
rdev->smc_fw = NULL;
+ err = 0;
} else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR
"si_smc: Bogus length %zu in firmware \"%s\"\n",
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 3100fa9cb52f..7266805d9786 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -212,8 +212,8 @@ int uvd_v1_0_start(struct radeon_device *rdev)
/* enable VCPU clock */
WREG32(UVD_VCPU_CNTL, 1 << 9);
- /* enable UMC and NC0 */
- WREG32_P(UVD_LMI_CTRL2, 1 << 13, ~((1 << 8) | (1 << 13)));
+ /* enable UMC */
+ WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
/* boot up the VCPU */
WREG32(UVD_SOFT_RESET, 0);
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index c590cd9dca0b..d8e835ac2c5e 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -4,6 +4,7 @@ config DRM_RCAR_DU
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
+ select DRM_KMS_FB_HELPER
help
Choose this option if you have an R-Car chipset.
If M is selected the module will be called rcar-du-drm.
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index ca498d151a76..d1372862d871 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -2,6 +2,7 @@ config DRM_SHMOBILE
tristate "DRM Support for SH Mobile"
depends on DRM && (ARM || SUPERH)
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
help
diff --git a/drivers/gpu/host1x/drm/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 69853a4de40a..8961ba6a34b8 100644
--- a/drivers/gpu/host1x/drm/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -1,7 +1,10 @@
config DRM_TEGRA
bool "NVIDIA Tegra DRM"
+ depends on ARCH_TEGRA || ARCH_MULTIPLATFORM
depends on DRM
+ select TEGRA_HOST1X
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
@@ -13,6 +16,11 @@ config DRM_TEGRA
if DRM_TEGRA
+config DRM_TEGRA_DEBUG
+ bool "NVIDIA Tegra DRM debug support"
+ help
+ Say yes here to enable debugging support.
+
config DRM_TEGRA_STAGING
bool "Enable HOST1X interface"
depends on STAGING
@@ -21,9 +29,4 @@ config DRM_TEGRA_STAGING
If unsure, choose N.
-config DRM_TEGRA_DEBUG
- bool "NVIDIA Tegra DRM debug support"
- help
- Say yes here to enable debugging support.
-
endif
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
new file mode 100644
index 000000000000..edc76abd58bb
--- /dev/null
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -0,0 +1,15 @@
+ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
+
+tegra-drm-y := \
+ bus.o \
+ drm.o \
+ gem.o \
+ fb.o \
+ dc.o \
+ output.o \
+ rgb.o \
+ hdmi.o \
+ gr2d.o \
+ gr3d.o
+
+obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/bus.c b/drivers/gpu/drm/tegra/bus.c
new file mode 100644
index 000000000000..565f8f7b9a47
--- /dev/null
+++ b/drivers/gpu/drm/tegra/bus.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "drm.h"
+
+static int drm_host1x_set_busid(struct drm_device *dev,
+ struct drm_master *master)
+{
+ const char *device = dev_name(dev->dev);
+ const char *driver = dev->driver->name;
+ const char *bus = dev->dev->bus->name;
+ int length;
+
+ master->unique_len = strlen(bus) + 1 + strlen(device);
+ master->unique_size = master->unique_len;
+
+ master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
+ if (!master->unique)
+ return -ENOMEM;
+
+ snprintf(master->unique, master->unique_len + 1, "%s:%s", bus, device);
+
+ length = strlen(driver) + 1 + master->unique_len;
+
+ dev->devname = kmalloc(length + 1, GFP_KERNEL);
+ if (!dev->devname)
+ return -ENOMEM;
+
+ snprintf(dev->devname, length + 1, "%s@%s", driver, master->unique);
+
+ return 0;
+}
+
+static struct drm_bus drm_host1x_bus = {
+ .bus_type = DRIVER_BUS_HOST1X,
+ .set_busid = drm_host1x_set_busid,
+};
+
+int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device)
+{
+ struct drm_device *drm;
+ int ret;
+
+ INIT_LIST_HEAD(&driver->device_list);
+ driver->bus = &drm_host1x_bus;
+
+ drm = drm_dev_alloc(driver, &device->dev);
+ if (!drm)
+ return -ENOMEM;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto err_free;
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
+ driver->major, driver->minor, driver->patchlevel,
+ driver->date, drm->primary->index);
+
+ return 0;
+
+err_free:
+ drm_dev_free(drm);
+ return ret;
+}
+
+void drm_host1x_exit(struct drm_driver *driver, struct host1x_device *device)
+{
+ struct tegra_drm *tegra = dev_get_drvdata(&device->dev);
+
+ drm_put_dev(tegra->drm);
+}
diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/drm/tegra/dc.c
index b1a05ad901c3..ae1cb31ead7e 100644
--- a/drivers/gpu/host1x/drm/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -8,13 +8,9 @@
*/
#include <linux/clk.h>
-#include <linux/debugfs.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
#include <linux/clk/tegra.h>
+#include <linux/debugfs.h>
-#include "host1x_client.h"
#include "dc.h"
#include "drm.h"
#include "gem.h"
@@ -51,6 +47,8 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
window.dst.h = crtc_h;
window.format = tegra_dc_format(fb->pixel_format);
window.bits_per_pixel = fb->bits_per_pixel;
+ window.bottom_up = tegra_fb_is_bottom_up(fb);
+ window.tiled = tegra_fb_is_tiled(fb);
for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
@@ -97,8 +95,11 @@ static int tegra_plane_disable(struct drm_plane *plane)
static void tegra_plane_destroy(struct drm_plane *plane)
{
+ struct tegra_plane *p = to_tegra_plane(plane);
+
tegra_plane_disable(plane);
drm_plane_cleanup(plane);
+ kfree(p);
}
static const struct drm_plane_funcs tegra_plane_funcs = {
@@ -124,7 +125,7 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
for (i = 0; i < 2; i++) {
struct tegra_plane *plane;
- plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
if (!plane)
return -ENOMEM;
@@ -133,8 +134,10 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
err = drm_plane_init(drm, &plane->base, 1 << dc->pipe,
&tegra_plane_funcs, plane_formats,
ARRAY_SIZE(plane_formats), false);
- if (err < 0)
+ if (err < 0) {
+ kfree(plane);
return err;
+ }
}
return 0;
@@ -145,6 +148,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
{
unsigned int format = tegra_dc_format(fb->pixel_format);
struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
+ unsigned int h_offset = 0, v_offset = 0;
unsigned long value;
tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -156,6 +160,32 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE);
tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH);
+ if (tegra_fb_is_tiled(fb)) {
+ value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
+ DC_WIN_BUFFER_ADDR_MODE_TILE;
+ } else {
+ value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
+ DC_WIN_BUFFER_ADDR_MODE_LINEAR;
+ }
+
+ tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+
+ /* make sure bottom-up buffers are properly displayed */
+ if (tegra_fb_is_bottom_up(fb)) {
+ value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
+ value |= INVERT_V;
+ tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+ v_offset += fb->height - 1;
+ } else {
+ value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
+ value &= ~INVERT_V;
+ tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+ }
+
+ tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
+ tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
+
value = GENERAL_UPDATE | WIN_A_UPDATE;
tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
@@ -255,14 +285,26 @@ static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return 0;
}
+static void drm_crtc_clear(struct drm_crtc *crtc)
+{
+ memset(crtc, 0, sizeof(*crtc));
+}
+
+static void tegra_dc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+ drm_crtc_clear(crtc);
+}
+
static const struct drm_crtc_funcs tegra_crtc_funcs = {
.page_flip = tegra_dc_page_flip,
.set_config = drm_crtc_helper_set_config,
- .destroy = drm_crtc_cleanup,
+ .destroy = tegra_dc_destroy,
};
static void tegra_crtc_disable(struct drm_crtc *crtc)
{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
struct drm_device *drm = crtc->dev;
struct drm_plane *plane;
@@ -277,6 +319,8 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
}
}
}
+
+ drm_vblank_off(drm, dc->pipe);
}
static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -491,9 +535,22 @@ int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
tegra_dc_writel(dc, window->stride[0], DC_WIN_LINE_STRIDE);
}
+ if (window->bottom_up)
+ v_offset += window->src.h - 1;
+
tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
+ if (window->tiled) {
+ value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
+ DC_WIN_BUFFER_ADDR_MODE_TILE;
+ } else {
+ value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
+ DC_WIN_BUFFER_ADDR_MODE_LINEAR;
+ }
+
+ tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+
value = WIN_ENABLE;
if (yuv) {
@@ -512,6 +569,9 @@ int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
value |= COLOR_EXPAND;
}
+ if (window->bottom_up)
+ value |= INVERT_V;
+
tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
/*
@@ -1041,30 +1101,30 @@ static int tegra_dc_debugfs_exit(struct tegra_dc *dc)
return 0;
}
-static int tegra_dc_drm_init(struct host1x_client *client,
- struct drm_device *drm)
+static int tegra_dc_init(struct host1x_client *client)
{
+ struct tegra_drm *tegra = dev_get_drvdata(client->parent);
struct tegra_dc *dc = host1x_client_to_dc(client);
int err;
- dc->pipe = drm->mode_config.num_crtc;
+ dc->pipe = tegra->drm->mode_config.num_crtc;
- drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
+ drm_crtc_init(tegra->drm, &dc->base, &tegra_crtc_funcs);
drm_mode_crtc_set_gamma_size(&dc->base, 256);
drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
- err = tegra_dc_rgb_init(drm, dc);
+ err = tegra_dc_rgb_init(tegra->drm, dc);
if (err < 0 && err != -ENODEV) {
dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
return err;
}
- err = tegra_dc_add_planes(drm, dc);
+ err = tegra_dc_add_planes(tegra->drm, dc);
if (err < 0)
return err;
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
- err = tegra_dc_debugfs_init(dc, drm->primary);
+ err = tegra_dc_debugfs_init(dc, tegra->drm->primary);
if (err < 0)
dev_err(dc->dev, "debugfs setup failed: %d\n", err);
}
@@ -1080,7 +1140,7 @@ static int tegra_dc_drm_init(struct host1x_client *client,
return 0;
}
-static int tegra_dc_drm_exit(struct host1x_client *client)
+static int tegra_dc_exit(struct host1x_client *client)
{
struct tegra_dc *dc = host1x_client_to_dc(client);
int err;
@@ -1103,13 +1163,12 @@ static int tegra_dc_drm_exit(struct host1x_client *client)
}
static const struct host1x_client_ops dc_client_ops = {
- .drm_init = tegra_dc_drm_init,
- .drm_exit = tegra_dc_drm_exit,
+ .init = tegra_dc_init,
+ .exit = tegra_dc_exit,
};
static int tegra_dc_probe(struct platform_device *pdev)
{
- struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
struct resource *regs;
struct tegra_dc *dc;
int err;
@@ -1153,7 +1212,7 @@ static int tegra_dc_probe(struct platform_device *pdev)
return err;
}
- err = host1x_register_client(host1x, &dc->client);
+ err = host1x_client_register(&dc->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
@@ -1167,17 +1226,22 @@ static int tegra_dc_probe(struct platform_device *pdev)
static int tegra_dc_remove(struct platform_device *pdev)
{
- struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
struct tegra_dc *dc = platform_get_drvdata(pdev);
int err;
- err = host1x_unregister_client(host1x, &dc->client);
+ err = host1x_client_unregister(&dc->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
err);
return err;
}
+ err = tegra_dc_rgb_remove(dc);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to remove RGB output: %d\n", err);
+ return err;
+ }
+
clk_disable_unprepare(dc->clk);
return 0;
diff --git a/drivers/gpu/host1x/drm/dc.h b/drivers/gpu/drm/tegra/dc.h
index 79eaec9aac77..91bbda291470 100644
--- a/drivers/gpu/host1x/drm/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -302,6 +302,7 @@
#define DC_WIN_CSC_KVB 0x618
#define DC_WIN_WIN_OPTIONS 0x700
+#define INVERT_V (1 << 2)
#define COLOR_EXPAND (1 << 6)
#define CSC_ENABLE (1 << 18)
#define WIN_ENABLE (1 << 30)
@@ -365,6 +366,10 @@
#define DC_WIN_BUF_STRIDE 0x70b
#define DC_WIN_UV_BUF_STRIDE 0x70c
#define DC_WIN_BUFFER_ADDR_MODE 0x70d
+#define DC_WIN_BUFFER_ADDR_MODE_LINEAR (0 << 0)
+#define DC_WIN_BUFFER_ADDR_MODE_TILE (1 << 0)
+#define DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV (0 << 16)
+#define DC_WIN_BUFFER_ADDR_MODE_TILE_UV (1 << 16)
#define DC_WIN_DV_CONTROL 0x70e
#define DC_WIN_BLEND_NOKEY 0x70f
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
new file mode 100644
index 000000000000..28e178137718
--- /dev/null
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -0,0 +1,714 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012-2013 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/host1x.h>
+
+#include "drm.h"
+#include "gem.h"
+
+#define DRIVER_NAME "tegra"
+#define DRIVER_DESC "NVIDIA Tegra graphics"
+#define DRIVER_DATE "20120330"
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+struct tegra_drm_file {
+ struct list_head contexts;
+};
+
+static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
+{
+ struct host1x_device *device = to_host1x_device(drm->dev);
+ struct tegra_drm *tegra;
+ int err;
+
+ tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
+ if (!tegra)
+ return -ENOMEM;
+
+ dev_set_drvdata(drm->dev, tegra);
+ mutex_init(&tegra->clients_lock);
+ INIT_LIST_HEAD(&tegra->clients);
+ drm->dev_private = tegra;
+ tegra->drm = drm;
+
+ drm_mode_config_init(drm);
+
+ err = host1x_device_init(device);
+ if (err < 0)
+ return err;
+
+ /*
+ * We don't use the drm_irq_install() helpers provided by the DRM
+ * core, so we need to set this manually in order to allow the
+ * DRM_IOCTL_WAIT_VBLANK to operate correctly.
+ */
+ drm->irq_enabled = true;
+
+ err = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (err < 0)
+ return err;
+
+ err = tegra_drm_fb_init(drm);
+ if (err < 0)
+ return err;
+
+ drm_kms_helper_poll_init(drm);
+
+ return 0;
+}
+
+static int tegra_drm_unload(struct drm_device *drm)
+{
+ struct host1x_device *device = to_host1x_device(drm->dev);
+ int err;
+
+ drm_kms_helper_poll_fini(drm);
+ tegra_drm_fb_exit(drm);
+ drm_vblank_cleanup(drm);
+ drm_mode_config_cleanup(drm);
+
+ err = host1x_device_exit(device);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
+{
+ struct tegra_drm_file *fpriv;
+
+ fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+ if (!fpriv)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&fpriv->contexts);
+ filp->driver_priv = fpriv;
+
+ return 0;
+}
+
+static void tegra_drm_context_free(struct tegra_drm_context *context)
+{
+ context->client->ops->close_channel(context);
+ kfree(context);
+}
+
+static void tegra_drm_lastclose(struct drm_device *drm)
+{
+ struct tegra_drm *tegra = drm->dev_private;
+
+ tegra_fbdev_restore_mode(tegra->fbdev);
+}
+
+static struct host1x_bo *
+host1x_bo_lookup(struct drm_device *drm, struct drm_file *file, u32 handle)
+{
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+
+ gem = drm_gem_object_lookup(drm, file, handle);
+ if (!gem)
+ return NULL;
+
+ mutex_lock(&drm->struct_mutex);
+ drm_gem_object_unreference(gem);
+ mutex_unlock(&drm->struct_mutex);
+
+ bo = to_tegra_bo(gem);
+ return &bo->base;
+}
+
+int tegra_drm_submit(struct tegra_drm_context *context,
+ struct drm_tegra_submit *args, struct drm_device *drm,
+ struct drm_file *file)
+{
+ unsigned int num_cmdbufs = args->num_cmdbufs;
+ unsigned int num_relocs = args->num_relocs;
+ unsigned int num_waitchks = args->num_waitchks;
+ struct drm_tegra_cmdbuf __user *cmdbufs =
+ (void * __user)(uintptr_t)args->cmdbufs;
+ struct drm_tegra_reloc __user *relocs =
+ (void * __user)(uintptr_t)args->relocs;
+ struct drm_tegra_waitchk __user *waitchks =
+ (void * __user)(uintptr_t)args->waitchks;
+ struct drm_tegra_syncpt syncpt;
+ struct host1x_job *job;
+ int err;
+
+ /* We don't yet support other than one syncpt_incr struct per submit */
+ if (args->num_syncpts != 1)
+ return -EINVAL;
+
+ job = host1x_job_alloc(context->channel, args->num_cmdbufs,
+ args->num_relocs, args->num_waitchks);
+ if (!job)
+ return -ENOMEM;
+
+ job->num_relocs = args->num_relocs;
+ job->num_waitchk = args->num_waitchks;
+ job->client = (u32)args->context;
+ job->class = context->client->base.class;
+ job->serialize = true;
+
+ while (num_cmdbufs) {
+ struct drm_tegra_cmdbuf cmdbuf;
+ struct host1x_bo *bo;
+
+ err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
+ if (err)
+ goto fail;
+
+ bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
+ if (!bo) {
+ err = -ENOENT;
+ goto fail;
+ }
+
+ host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
+ num_cmdbufs--;
+ cmdbufs++;
+ }
+
+ err = copy_from_user(job->relocarray, relocs,
+ sizeof(*relocs) * num_relocs);
+ if (err)
+ goto fail;
+
+ while (num_relocs--) {
+ struct host1x_reloc *reloc = &job->relocarray[num_relocs];
+ struct host1x_bo *cmdbuf, *target;
+
+ cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
+ target = host1x_bo_lookup(drm, file, (u32)reloc->target);
+
+ reloc->cmdbuf = cmdbuf;
+ reloc->target = target;
+
+ if (!reloc->target || !reloc->cmdbuf) {
+ err = -ENOENT;
+ goto fail;
+ }
+ }
+
+ err = copy_from_user(job->waitchk, waitchks,
+ sizeof(*waitchks) * num_waitchks);
+ if (err)
+ goto fail;
+
+ err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
+ sizeof(syncpt));
+ if (err)
+ goto fail;
+
+ job->is_addr_reg = context->client->ops->is_addr_reg;
+ job->syncpt_incrs = syncpt.incrs;
+ job->syncpt_id = syncpt.id;
+ job->timeout = 10000;
+
+ if (args->timeout && args->timeout < 10000)
+ job->timeout = args->timeout;
+
+ err = host1x_job_pin(job, context->client->base.dev);
+ if (err)
+ goto fail;
+
+ err = host1x_job_submit(job);
+ if (err)
+ goto fail_submit;
+
+ args->fence = job->syncpt_end;
+
+ host1x_job_put(job);
+ return 0;
+
+fail_submit:
+ host1x_job_unpin(job);
+fail:
+ host1x_job_put(job);
+ return err;
+}
+
+
+#ifdef CONFIG_DRM_TEGRA_STAGING
+static struct tegra_drm_context *tegra_drm_get_context(__u64 context)
+{
+ return (struct tegra_drm_context *)(uintptr_t)context;
+}
+
+static bool tegra_drm_file_owns_context(struct tegra_drm_file *file,
+ struct tegra_drm_context *context)
+{
+ struct tegra_drm_context *ctx;
+
+ list_for_each_entry(ctx, &file->contexts, list)
+ if (ctx == context)
+ return true;
+
+ return false;
+}
+
+static int tegra_gem_create(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_create *args = data;
+ struct tegra_bo *bo;
+
+ bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
+ &args->handle);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ return 0;
+}
+
+static int tegra_gem_mmap(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_mmap *args = data;
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+
+ gem = drm_gem_object_lookup(drm, file, args->handle);
+ if (!gem)
+ return -EINVAL;
+
+ bo = to_tegra_bo(gem);
+
+ args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
+
+ drm_gem_object_unreference(gem);
+
+ return 0;
+}
+
+static int tegra_syncpt_read(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct host1x *host = dev_get_drvdata(drm->dev->parent);
+ struct drm_tegra_syncpt_read *args = data;
+ struct host1x_syncpt *sp;
+
+ sp = host1x_syncpt_get(host, args->id);
+ if (!sp)
+ return -EINVAL;
+
+ args->value = host1x_syncpt_read_min(sp);
+ return 0;
+}
+
+static int tegra_syncpt_incr(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
+ struct drm_tegra_syncpt_incr *args = data;
+ struct host1x_syncpt *sp;
+
+ sp = host1x_syncpt_get(host1x, args->id);
+ if (!sp)
+ return -EINVAL;
+
+ return host1x_syncpt_incr(sp);
+}
+
+static int tegra_syncpt_wait(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
+ struct drm_tegra_syncpt_wait *args = data;
+ struct host1x_syncpt *sp;
+
+ sp = host1x_syncpt_get(host1x, args->id);
+ if (!sp)
+ return -EINVAL;
+
+ return host1x_syncpt_wait(sp, args->thresh, args->timeout,
+ &args->value);
+}
+
+static int tegra_open_channel(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct tegra_drm *tegra = drm->dev_private;
+ struct drm_tegra_open_channel *args = data;
+ struct tegra_drm_context *context;
+ struct tegra_drm_client *client;
+ int err = -ENODEV;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ list_for_each_entry(client, &tegra->clients, list)
+ if (client->base.class == args->client) {
+ err = client->ops->open_channel(client, context);
+ if (err)
+ break;
+
+ list_add(&context->list, &fpriv->contexts);
+ args->context = (uintptr_t)context;
+ context->client = client;
+ return 0;
+ }
+
+ kfree(context);
+ return err;
+}
+
+static int tegra_close_channel(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_close_channel *args = data;
+ struct tegra_drm_context *context;
+
+ context = tegra_drm_get_context(args->context);
+
+ if (!tegra_drm_file_owns_context(fpriv, context))
+ return -EINVAL;
+
+ list_del(&context->list);
+ tegra_drm_context_free(context);
+
+ return 0;
+}
+
+static int tegra_get_syncpt(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_get_syncpt *args = data;
+ struct tegra_drm_context *context;
+ struct host1x_syncpt *syncpt;
+
+ context = tegra_drm_get_context(args->context);
+
+ if (!tegra_drm_file_owns_context(fpriv, context))
+ return -ENODEV;
+
+ if (args->index >= context->client->base.num_syncpts)
+ return -EINVAL;
+
+ syncpt = context->client->base.syncpts[args->index];
+ args->id = host1x_syncpt_id(syncpt);
+
+ return 0;
+}
+
+static int tegra_submit(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_submit *args = data;
+ struct tegra_drm_context *context;
+
+ context = tegra_drm_get_context(args->context);
+
+ if (!tegra_drm_file_owns_context(fpriv, context))
+ return -ENODEV;
+
+ return context->client->ops->submit(context, args, drm, file);
+}
+
+static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_get_syncpt_base *args = data;
+ struct tegra_drm_context *context;
+ struct host1x_syncpt_base *base;
+ struct host1x_syncpt *syncpt;
+
+ context = tegra_drm_get_context(args->context);
+
+ if (!tegra_drm_file_owns_context(fpriv, context))
+ return -ENODEV;
+
+ if (args->syncpt >= context->client->base.num_syncpts)
+ return -EINVAL;
+
+ syncpt = context->client->base.syncpts[args->syncpt];
+
+ base = host1x_syncpt_get_base(syncpt);
+ if (!base)
+ return -ENXIO;
+
+ args->id = host1x_syncpt_base_id(base);
+
+ return 0;
+}
+#endif
+
+static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
+#ifdef CONFIG_DRM_TEGRA_STAGING
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, DRM_UNLOCKED),
+#endif
+};
+
+static const struct file_operations tegra_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = tegra_drm_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (dc->pipe == pipe)
+ return crtc;
+ }
+
+ return NULL;
+}
+
+static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+ /* TODO: implement real hardware counter using syncpoints */
+ return drm_vblank_count(dev, crtc);
+}
+
+static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (!crtc)
+ return -ENODEV;
+
+ tegra_dc_enable_vblank(dc);
+
+ return 0;
+}
+
+static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (crtc)
+ tegra_dc_disable_vblank(dc);
+}
+
+static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct tegra_drm_context *context, *tmp;
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+ tegra_dc_cancel_page_flip(crtc, file);
+
+ list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
+ tegra_drm_context_free(context);
+
+ kfree(fpriv);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)s->private;
+ struct drm_device *drm = node->minor->dev;
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&drm->mode_config.fb_lock);
+
+ list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
+ seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
+ fb->base.id, fb->width, fb->height, fb->depth,
+ fb->bits_per_pixel,
+ atomic_read(&fb->refcount.refcount));
+ }
+
+ mutex_unlock(&drm->mode_config.fb_lock);
+
+ return 0;
+}
+
+static struct drm_info_list tegra_debugfs_list[] = {
+ { "framebuffers", tegra_debugfs_framebuffers, 0 },
+};
+
+static int tegra_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(tegra_debugfs_list,
+ ARRAY_SIZE(tegra_debugfs_list),
+ minor->debugfs_root, minor);
+}
+
+static void tegra_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(tegra_debugfs_list,
+ ARRAY_SIZE(tegra_debugfs_list), minor);
+}
+#endif
+
+struct drm_driver tegra_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM,
+ .load = tegra_drm_load,
+ .unload = tegra_drm_unload,
+ .open = tegra_drm_open,
+ .preclose = tegra_drm_preclose,
+ .lastclose = tegra_drm_lastclose,
+
+ .get_vblank_counter = tegra_drm_get_vblank_counter,
+ .enable_vblank = tegra_drm_enable_vblank,
+ .disable_vblank = tegra_drm_disable_vblank,
+
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = tegra_debugfs_init,
+ .debugfs_cleanup = tegra_debugfs_cleanup,
+#endif
+
+ .gem_free_object = tegra_bo_free_object,
+ .gem_vm_ops = &tegra_bo_vm_ops,
+ .dumb_create = tegra_bo_dumb_create,
+ .dumb_map_offset = tegra_bo_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+
+ .ioctls = tegra_drm_ioctls,
+ .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
+ .fops = &tegra_drm_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+int tegra_drm_register_client(struct tegra_drm *tegra,
+ struct tegra_drm_client *client)
+{
+ mutex_lock(&tegra->clients_lock);
+ list_add_tail(&client->list, &tegra->clients);
+ mutex_unlock(&tegra->clients_lock);
+
+ return 0;
+}
+
+int tegra_drm_unregister_client(struct tegra_drm *tegra,
+ struct tegra_drm_client *client)
+{
+ mutex_lock(&tegra->clients_lock);
+ list_del_init(&client->list);
+ mutex_unlock(&tegra->clients_lock);
+
+ return 0;
+}
+
+static int host1x_drm_probe(struct host1x_device *device)
+{
+ return drm_host1x_init(&tegra_drm_driver, device);
+}
+
+static int host1x_drm_remove(struct host1x_device *device)
+{
+ drm_host1x_exit(&tegra_drm_driver, device);
+
+ return 0;
+}
+
+static const struct of_device_id host1x_drm_subdevs[] = {
+ { .compatible = "nvidia,tegra20-dc", },
+ { .compatible = "nvidia,tegra20-hdmi", },
+ { .compatible = "nvidia,tegra20-gr2d", },
+ { .compatible = "nvidia,tegra20-gr3d", },
+ { .compatible = "nvidia,tegra30-dc", },
+ { .compatible = "nvidia,tegra30-hdmi", },
+ { .compatible = "nvidia,tegra30-gr2d", },
+ { .compatible = "nvidia,tegra30-gr3d", },
+ { .compatible = "nvidia,tegra114-hdmi", },
+ { .compatible = "nvidia,tegra114-gr3d", },
+ { /* sentinel */ }
+};
+
+static struct host1x_driver host1x_drm_driver = {
+ .name = "drm",
+ .probe = host1x_drm_probe,
+ .remove = host1x_drm_remove,
+ .subdevs = host1x_drm_subdevs,
+};
+
+static int __init host1x_drm_init(void)
+{
+ int err;
+
+ err = host1x_driver_register(&host1x_drm_driver);
+ if (err < 0)
+ return err;
+
+ err = platform_driver_register(&tegra_dc_driver);
+ if (err < 0)
+ goto unregister_host1x;
+
+ err = platform_driver_register(&tegra_hdmi_driver);
+ if (err < 0)
+ goto unregister_dc;
+
+ err = platform_driver_register(&tegra_gr2d_driver);
+ if (err < 0)
+ goto unregister_hdmi;
+
+ err = platform_driver_register(&tegra_gr3d_driver);
+ if (err < 0)
+ goto unregister_gr2d;
+
+ return 0;
+
+unregister_gr2d:
+ platform_driver_unregister(&tegra_gr2d_driver);
+unregister_hdmi:
+ platform_driver_unregister(&tegra_hdmi_driver);
+unregister_dc:
+ platform_driver_unregister(&tegra_dc_driver);
+unregister_host1x:
+ host1x_driver_unregister(&host1x_drm_driver);
+ return err;
+}
+module_init(host1x_drm_init);
+
+static void __exit host1x_drm_exit(void)
+{
+ platform_driver_unregister(&tegra_gr3d_driver);
+ platform_driver_unregister(&tegra_gr2d_driver);
+ platform_driver_unregister(&tegra_hdmi_driver);
+ platform_driver_unregister(&tegra_dc_driver);
+ host1x_driver_unregister(&host1x_drm_driver);
+}
+module_exit(host1x_drm_exit);
+
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/host1x/drm/drm.h b/drivers/gpu/drm/tegra/drm.h
index 02ce020f2575..fdfe259ed7f8 100644
--- a/drivers/gpu/host1x/drm/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -10,14 +10,14 @@
#ifndef HOST1X_DRM_H
#define HOST1X_DRM_H 1
+#include <uapi/drm/tegra_drm.h>
+#include <linux/host1x.h>
+
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fixed.h>
-#include <uapi/drm/tegra_drm.h>
-
-#include "host1x.h"
struct tegra_fb {
struct drm_framebuffer base;
@@ -30,17 +30,8 @@ struct tegra_fbdev {
struct tegra_fb *fb;
};
-struct host1x_drm {
+struct tegra_drm {
struct drm_device *drm;
- struct device *dev;
- void __iomem *regs;
- struct clk *clk;
- int syncpt;
- int irq;
-
- struct mutex drm_clients_lock;
- struct list_head drm_clients;
- struct list_head drm_active;
struct mutex clients_lock;
struct list_head clients;
@@ -48,66 +39,60 @@ struct host1x_drm {
struct tegra_fbdev *fbdev;
};
-struct host1x_client;
+struct tegra_drm_client;
-struct host1x_drm_context {
- struct host1x_client *client;
+struct tegra_drm_context {
+ struct tegra_drm_client *client;
struct host1x_channel *channel;
struct list_head list;
};
-struct host1x_client_ops {
- int (*drm_init)(struct host1x_client *client, struct drm_device *drm);
- int (*drm_exit)(struct host1x_client *client);
- int (*open_channel)(struct host1x_client *client,
- struct host1x_drm_context *context);
- void (*close_channel)(struct host1x_drm_context *context);
- int (*submit)(struct host1x_drm_context *context,
+struct tegra_drm_client_ops {
+ int (*open_channel)(struct tegra_drm_client *client,
+ struct tegra_drm_context *context);
+ void (*close_channel)(struct tegra_drm_context *context);
+ int (*is_addr_reg)(struct device *dev, u32 class, u32 offset);
+ int (*submit)(struct tegra_drm_context *context,
struct drm_tegra_submit *args, struct drm_device *drm,
struct drm_file *file);
};
-struct host1x_drm_file {
- struct list_head contexts;
-};
-
-struct host1x_client {
- struct host1x_drm *host1x;
- struct device *dev;
-
- const struct host1x_client_ops *ops;
-
- enum host1x_class class;
- struct host1x_channel *channel;
-
- struct host1x_syncpt **syncpts;
- unsigned int num_syncpts;
+int tegra_drm_submit(struct tegra_drm_context *context,
+ struct drm_tegra_submit *args, struct drm_device *drm,
+ struct drm_file *file);
+struct tegra_drm_client {
+ struct host1x_client base;
struct list_head list;
+
+ const struct tegra_drm_client_ops *ops;
};
-extern int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm);
-extern int host1x_drm_exit(struct host1x_drm *host1x);
+static inline struct tegra_drm_client *
+host1x_to_drm_client(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_drm_client, base);
+}
+
+extern int tegra_drm_register_client(struct tegra_drm *tegra,
+ struct tegra_drm_client *client);
+extern int tegra_drm_unregister_client(struct tegra_drm *tegra,
+ struct tegra_drm_client *client);
-extern int host1x_register_client(struct host1x_drm *host1x,
- struct host1x_client *client);
-extern int host1x_unregister_client(struct host1x_drm *host1x,
- struct host1x_client *client);
+extern int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
+extern int tegra_drm_exit(struct tegra_drm *tegra);
struct tegra_output;
struct tegra_dc {
struct host1x_client client;
- spinlock_t lock;
-
- struct host1x_drm *host1x;
struct device *dev;
+ spinlock_t lock;
struct drm_crtc base;
int pipe;
struct clk *clk;
-
void __iomem *regs;
int irq;
@@ -123,7 +108,8 @@ struct tegra_dc {
struct drm_pending_vblank_event *event;
};
-static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client)
+static inline struct tegra_dc *
+host1x_client_to_dc(struct host1x_client *client)
{
return container_of(client, struct tegra_dc, client);
}
@@ -162,6 +148,8 @@ struct tegra_dc_window {
unsigned int format;
unsigned int stride[2];
unsigned long base[3];
+ bool bottom_up;
+ bool tiled;
};
/* from dc.c */
@@ -249,23 +237,34 @@ static inline int tegra_output_check_mode(struct tegra_output *output,
return output ? -ENOSYS : -EINVAL;
}
+/* from bus.c */
+int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device);
+void drm_host1x_exit(struct drm_driver *driver, struct host1x_device *device);
+
/* from rgb.c */
extern int tegra_dc_rgb_probe(struct tegra_dc *dc);
+extern int tegra_dc_rgb_remove(struct tegra_dc *dc);
extern int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
extern int tegra_dc_rgb_exit(struct tegra_dc *dc);
/* from output.c */
-extern int tegra_output_parse_dt(struct tegra_output *output);
+extern int tegra_output_probe(struct tegra_output *output);
+extern int tegra_output_remove(struct tegra_output *output);
extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
extern int tegra_output_exit(struct tegra_output *output);
/* from fb.c */
struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
unsigned int index);
+bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
+bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer);
extern int tegra_drm_fb_init(struct drm_device *drm);
extern void tegra_drm_fb_exit(struct drm_device *drm);
extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
-extern struct drm_driver tegra_drm_driver;
+extern struct platform_driver tegra_dc_driver;
+extern struct platform_driver tegra_hdmi_driver;
+extern struct platform_driver tegra_gr2d_driver;
+extern struct platform_driver tegra_gr3d_driver;
#endif /* HOST1X_DRM_H */
diff --git a/drivers/gpu/host1x/drm/fb.c b/drivers/gpu/drm/tegra/fb.c
index 979a3e32b78b..490f7719e317 100644
--- a/drivers/gpu/host1x/drm/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -10,8 +10,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
-
#include "drm.h"
#include "gem.h"
@@ -36,6 +34,26 @@ struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
return fb->planes[index];
}
+bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer)
+{
+ struct tegra_fb *fb = to_tegra_fb(framebuffer);
+
+ if (fb->planes[0]->flags & TEGRA_BO_BOTTOM_UP)
+ return true;
+
+ return false;
+}
+
+bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer)
+{
+ struct tegra_fb *fb = to_tegra_fb(framebuffer);
+
+ if (fb->planes[0]->flags & TEGRA_BO_TILED)
+ return true;
+
+ return false;
+}
+
static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
{
struct tegra_fb *fb = to_tegra_fb(framebuffer);
@@ -190,7 +208,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
size = cmd.pitches[0] * cmd.height;
- bo = tegra_bo_create(drm, size);
+ bo = tegra_bo_create(drm, size, 0);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -323,10 +341,10 @@ static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
static void tegra_fb_output_poll_changed(struct drm_device *drm)
{
- struct host1x_drm *host1x = drm->dev_private;
+ struct tegra_drm *tegra = drm->dev_private;
- if (host1x->fbdev)
- drm_fb_helper_hotplug_event(&host1x->fbdev->base);
+ if (tegra->fbdev)
+ drm_fb_helper_hotplug_event(&tegra->fbdev->base);
}
static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
@@ -336,7 +354,7 @@ static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
int tegra_drm_fb_init(struct drm_device *drm)
{
- struct host1x_drm *host1x = drm->dev_private;
+ struct tegra_drm *tegra = drm->dev_private;
struct tegra_fbdev *fbdev;
drm->mode_config.min_width = 0;
@@ -352,16 +370,16 @@ int tegra_drm_fb_init(struct drm_device *drm)
if (IS_ERR(fbdev))
return PTR_ERR(fbdev);
- host1x->fbdev = fbdev;
+ tegra->fbdev = fbdev;
return 0;
}
void tegra_drm_fb_exit(struct drm_device *drm)
{
- struct host1x_drm *host1x = drm->dev_private;
+ struct tegra_drm *tegra = drm->dev_private;
- tegra_fbdev_free(host1x->fbdev);
+ tegra_fbdev_free(tegra->fbdev);
}
void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/drm/tegra/gem.c
index 59623de4ee15..28a9cbc07ab9 100644
--- a/drivers/gpu/host1x/drm/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -18,25 +18,18 @@
* GNU General Public License for more details.
*/
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/export.h>
-#include <linux/dma-mapping.h>
-
-#include <drm/drmP.h>
-#include <drm/drm.h>
+#include <drm/tegra_drm.h>
#include "gem.h"
-static inline struct tegra_bo *host1x_to_drm_bo(struct host1x_bo *bo)
+static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
{
return container_of(bo, struct tegra_bo, base);
}
static void tegra_bo_put(struct host1x_bo *bo)
{
- struct tegra_bo *obj = host1x_to_drm_bo(bo);
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
struct drm_device *drm = obj->gem.dev;
mutex_lock(&drm->struct_mutex);
@@ -46,7 +39,7 @@ static void tegra_bo_put(struct host1x_bo *bo)
static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
{
- struct tegra_bo *obj = host1x_to_drm_bo(bo);
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
return obj->paddr;
}
@@ -57,7 +50,7 @@ static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
static void *tegra_bo_mmap(struct host1x_bo *bo)
{
- struct tegra_bo *obj = host1x_to_drm_bo(bo);
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
return obj->vaddr;
}
@@ -68,7 +61,7 @@ static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
{
- struct tegra_bo *obj = host1x_to_drm_bo(bo);
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
return obj->vaddr + page * PAGE_SIZE;
}
@@ -80,7 +73,7 @@ static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
{
- struct tegra_bo *obj = host1x_to_drm_bo(bo);
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
struct drm_device *drm = obj->gem.dev;
mutex_lock(&drm->struct_mutex);
@@ -106,7 +99,8 @@ static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
}
-struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
+ unsigned long flags)
{
struct tegra_bo *bo;
int err;
@@ -135,6 +129,12 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
if (err)
goto err_mmap;
+ if (flags & DRM_TEGRA_GEM_CREATE_TILED)
+ bo->flags |= TEGRA_BO_TILED;
+
+ if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
+ bo->flags |= TEGRA_BO_BOTTOM_UP;
+
return bo;
err_mmap:
@@ -149,14 +149,15 @@ err_dma:
}
struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
- struct drm_device *drm,
- unsigned int size,
- unsigned int *handle)
+ struct drm_device *drm,
+ unsigned int size,
+ unsigned long flags,
+ unsigned int *handle)
{
struct tegra_bo *bo;
int ret;
- bo = tegra_bo_create(drm, size);
+ bo = tegra_bo_create(drm, size, flags);
if (IS_ERR(bo))
return bo;
@@ -178,7 +179,6 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
struct tegra_bo *bo = to_tegra_bo(gem);
drm_gem_free_mmap_offset(gem);
-
drm_gem_object_release(gem);
tegra_bo_destroy(gem->dev, bo);
@@ -197,8 +197,8 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
if (args->size < args->pitch * args->height)
args->size = args->pitch * args->height;
- bo = tegra_bo_create_with_handle(file, drm, args->size,
- &args->handle);
+ bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
+ &args->handle);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/host1x/drm/gem.h b/drivers/gpu/drm/tegra/gem.h
index 492533a2dacb..7674000bf47d 100644
--- a/drivers/gpu/host1x/drm/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -19,14 +19,18 @@
#ifndef __HOST1X_GEM_H
#define __HOST1X_GEM_H
+#include <linux/host1x.h>
+
#include <drm/drm.h>
#include <drm/drmP.h>
-#include "host1x_bo.h"
+#define TEGRA_BO_TILED (1 << 0)
+#define TEGRA_BO_BOTTOM_UP (1 << 1)
struct tegra_bo {
struct drm_gem_object gem;
struct host1x_bo base;
+ unsigned long flags;
dma_addr_t paddr;
void *vaddr;
};
@@ -38,11 +42,13 @@ static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
extern const struct host1x_bo_ops tegra_bo_ops;
-struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size);
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
+ unsigned long flags);
struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
- struct drm_device *drm,
- unsigned int size,
- unsigned int *handle);
+ struct drm_device *drm,
+ unsigned int size,
+ unsigned long flags,
+ unsigned int *handle);
void tegra_bo_free_object(struct drm_gem_object *gem);
int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
new file mode 100644
index 000000000000..7ec4259ffded
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+
+#include "drm.h"
+#include "gem.h"
+#include "gr2d.h"
+
+struct gr2d {
+ struct tegra_drm_client client;
+ struct host1x_channel *channel;
+ struct clk *clk;
+
+ DECLARE_BITMAP(addr_regs, GR2D_NUM_REGS);
+};
+
+static inline struct gr2d *to_gr2d(struct tegra_drm_client *client)
+{
+ return container_of(client, struct gr2d, client);
+}
+
+static int gr2d_init(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+ unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
+ struct gr2d *gr2d = to_gr2d(drm);
+
+ gr2d->channel = host1x_channel_request(client->dev);
+ if (!gr2d->channel)
+ return -ENOMEM;
+
+ client->syncpts[0] = host1x_syncpt_request(client->dev, flags);
+ if (!client->syncpts[0]) {
+ host1x_channel_free(gr2d->channel);
+ return -ENOMEM;
+ }
+
+ return tegra_drm_register_client(tegra, drm);
+}
+
+static int gr2d_exit(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+ struct gr2d *gr2d = to_gr2d(drm);
+ int err;
+
+ err = tegra_drm_unregister_client(tegra, drm);
+ if (err < 0)
+ return err;
+
+ host1x_syncpt_free(client->syncpts[0]);
+ host1x_channel_free(gr2d->channel);
+
+ return 0;
+}
+
+static const struct host1x_client_ops gr2d_client_ops = {
+ .init = gr2d_init,
+ .exit = gr2d_exit,
+};
+
+static int gr2d_open_channel(struct tegra_drm_client *client,
+ struct tegra_drm_context *context)
+{
+ struct gr2d *gr2d = to_gr2d(client);
+
+ context->channel = host1x_channel_get(gr2d->channel);
+ if (!context->channel)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void gr2d_close_channel(struct tegra_drm_context *context)
+{
+ host1x_channel_put(context->channel);
+}
+
+static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 offset)
+{
+ struct gr2d *gr2d = dev_get_drvdata(dev);
+
+ switch (class) {
+ case HOST1X_CLASS_HOST1X:
+ if (offset == 0x2b)
+ return 1;
+
+ break;
+
+ case HOST1X_CLASS_GR2D:
+ case HOST1X_CLASS_GR2D_SB:
+ if (offset >= GR2D_NUM_REGS)
+ break;
+
+ if (test_bit(offset, gr2d->addr_regs))
+ return 1;
+
+ break;
+ }
+
+ return 0;
+}
+
+static const struct tegra_drm_client_ops gr2d_ops = {
+ .open_channel = gr2d_open_channel,
+ .close_channel = gr2d_close_channel,
+ .is_addr_reg = gr2d_is_addr_reg,
+ .submit = tegra_drm_submit,
+};
+
+static const struct of_device_id gr2d_match[] = {
+ { .compatible = "nvidia,tegra30-gr2d" },
+ { .compatible = "nvidia,tegra20-gr2d" },
+ { },
+};
+
+static const u32 gr2d_addr_regs[] = {
+ GR2D_UA_BASE_ADDR,
+ GR2D_VA_BASE_ADDR,
+ GR2D_PAT_BASE_ADDR,
+ GR2D_DSTA_BASE_ADDR,
+ GR2D_DSTB_BASE_ADDR,
+ GR2D_DSTC_BASE_ADDR,
+ GR2D_SRCA_BASE_ADDR,
+ GR2D_SRCB_BASE_ADDR,
+ GR2D_SRC_BASE_ADDR_SB,
+ GR2D_DSTA_BASE_ADDR_SB,
+ GR2D_DSTB_BASE_ADDR_SB,
+ GR2D_UA_BASE_ADDR_SB,
+ GR2D_VA_BASE_ADDR_SB,
+};
+
+static int gr2d_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct host1x_syncpt **syncpts;
+ struct gr2d *gr2d;
+ unsigned int i;
+ int err;
+
+ gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
+ if (!gr2d)
+ return -ENOMEM;
+
+ syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
+ if (!syncpts)
+ return -ENOMEM;
+
+ gr2d->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(gr2d->clk)) {
+ dev_err(dev, "cannot get clock\n");
+ return PTR_ERR(gr2d->clk);
+ }
+
+ err = clk_prepare_enable(gr2d->clk);
+ if (err) {
+ dev_err(dev, "cannot turn on clock\n");
+ return err;
+ }
+
+ INIT_LIST_HEAD(&gr2d->client.base.list);
+ gr2d->client.base.ops = &gr2d_client_ops;
+ gr2d->client.base.dev = dev;
+ gr2d->client.base.class = HOST1X_CLASS_GR2D;
+ gr2d->client.base.syncpts = syncpts;
+ gr2d->client.base.num_syncpts = 1;
+
+ INIT_LIST_HEAD(&gr2d->client.list);
+ gr2d->client.ops = &gr2d_ops;
+
+ err = host1x_client_register(&gr2d->client.base);
+ if (err < 0) {
+ dev_err(dev, "failed to register host1x client: %d\n", err);
+ clk_disable_unprepare(gr2d->clk);
+ return err;
+ }
+
+ /* initialize address register map */
+ for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); i++)
+ set_bit(gr2d_addr_regs[i], gr2d->addr_regs);
+
+ platform_set_drvdata(pdev, gr2d);
+
+ return 0;
+}
+
+static int gr2d_remove(struct platform_device *pdev)
+{
+ struct gr2d *gr2d = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&gr2d->client.base);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ clk_disable_unprepare(gr2d->clk);
+
+ return 0;
+}
+
+struct platform_driver tegra_gr2d_driver = {
+ .driver = {
+ .name = "tegra-gr2d",
+ .of_match_table = gr2d_match,
+ },
+ .probe = gr2d_probe,
+ .remove = gr2d_remove,
+};
diff --git a/drivers/gpu/drm/tegra/gr2d.h b/drivers/gpu/drm/tegra/gr2d.h
new file mode 100644
index 000000000000..4d7304fb015e
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr2d.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_GR2D_H
+#define TEGRA_GR2D_H
+
+#define GR2D_UA_BASE_ADDR 0x1a
+#define GR2D_VA_BASE_ADDR 0x1b
+#define GR2D_PAT_BASE_ADDR 0x26
+#define GR2D_DSTA_BASE_ADDR 0x2b
+#define GR2D_DSTB_BASE_ADDR 0x2c
+#define GR2D_DSTC_BASE_ADDR 0x2d
+#define GR2D_SRCA_BASE_ADDR 0x31
+#define GR2D_SRCB_BASE_ADDR 0x32
+#define GR2D_SRC_BASE_ADDR_SB 0x48
+#define GR2D_DSTA_BASE_ADDR_SB 0x49
+#define GR2D_DSTB_BASE_ADDR_SB 0x4a
+#define GR2D_UA_BASE_ADDR_SB 0x4b
+#define GR2D_VA_BASE_ADDR_SB 0x4c
+
+#define GR2D_NUM_REGS 0x4d
+
+#endif
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
new file mode 100644
index 000000000000..4cec8f526af7
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright (C) 2013 Avionic Design GmbH
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/host1x.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/tegra-powergate.h>
+
+#include "drm.h"
+#include "gem.h"
+#include "gr3d.h"
+
+struct gr3d {
+ struct tegra_drm_client client;
+ struct host1x_channel *channel;
+ struct clk *clk_secondary;
+ struct clk *clk;
+
+ DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
+};
+
+static inline struct gr3d *to_gr3d(struct tegra_drm_client *client)
+{
+ return container_of(client, struct gr3d, client);
+}
+
+static int gr3d_init(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+ unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
+ struct gr3d *gr3d = to_gr3d(drm);
+
+ gr3d->channel = host1x_channel_request(client->dev);
+ if (!gr3d->channel)
+ return -ENOMEM;
+
+ client->syncpts[0] = host1x_syncpt_request(client->dev, flags);
+ if (!client->syncpts[0]) {
+ host1x_channel_free(gr3d->channel);
+ return -ENOMEM;
+ }
+
+ return tegra_drm_register_client(tegra, drm);
+}
+
+static int gr3d_exit(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+ struct gr3d *gr3d = to_gr3d(drm);
+ int err;
+
+ err = tegra_drm_unregister_client(tegra, drm);
+ if (err < 0)
+ return err;
+
+ host1x_syncpt_free(client->syncpts[0]);
+ host1x_channel_free(gr3d->channel);
+
+ return 0;
+}
+
+static const struct host1x_client_ops gr3d_client_ops = {
+ .init = gr3d_init,
+ .exit = gr3d_exit,
+};
+
+static int gr3d_open_channel(struct tegra_drm_client *client,
+ struct tegra_drm_context *context)
+{
+ struct gr3d *gr3d = to_gr3d(client);
+
+ context->channel = host1x_channel_get(gr3d->channel);
+ if (!context->channel)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void gr3d_close_channel(struct tegra_drm_context *context)
+{
+ host1x_channel_put(context->channel);
+}
+
+static int gr3d_is_addr_reg(struct device *dev, u32 class, u32 offset)
+{
+ struct gr3d *gr3d = dev_get_drvdata(dev);
+
+ switch (class) {
+ case HOST1X_CLASS_HOST1X:
+ if (offset == 0x2b)
+ return 1;
+
+ break;
+
+ case HOST1X_CLASS_GR3D:
+ if (offset >= GR3D_NUM_REGS)
+ break;
+
+ if (test_bit(offset, gr3d->addr_regs))
+ return 1;
+
+ break;
+ }
+
+ return 0;
+}
+
+static const struct tegra_drm_client_ops gr3d_ops = {
+ .open_channel = gr3d_open_channel,
+ .close_channel = gr3d_close_channel,
+ .is_addr_reg = gr3d_is_addr_reg,
+ .submit = tegra_drm_submit,
+};
+
+static const struct of_device_id tegra_gr3d_match[] = {
+ { .compatible = "nvidia,tegra114-gr3d" },
+ { .compatible = "nvidia,tegra30-gr3d" },
+ { .compatible = "nvidia,tegra20-gr3d" },
+ { }
+};
+
+static const u32 gr3d_addr_regs[] = {
+ GR3D_IDX_ATTRIBUTE( 0),
+ GR3D_IDX_ATTRIBUTE( 1),
+ GR3D_IDX_ATTRIBUTE( 2),
+ GR3D_IDX_ATTRIBUTE( 3),
+ GR3D_IDX_ATTRIBUTE( 4),
+ GR3D_IDX_ATTRIBUTE( 5),
+ GR3D_IDX_ATTRIBUTE( 6),
+ GR3D_IDX_ATTRIBUTE( 7),
+ GR3D_IDX_ATTRIBUTE( 8),
+ GR3D_IDX_ATTRIBUTE( 9),
+ GR3D_IDX_ATTRIBUTE(10),
+ GR3D_IDX_ATTRIBUTE(11),
+ GR3D_IDX_ATTRIBUTE(12),
+ GR3D_IDX_ATTRIBUTE(13),
+ GR3D_IDX_ATTRIBUTE(14),
+ GR3D_IDX_ATTRIBUTE(15),
+ GR3D_IDX_INDEX_BASE,
+ GR3D_QR_ZTAG_ADDR,
+ GR3D_QR_CTAG_ADDR,
+ GR3D_QR_CZ_ADDR,
+ GR3D_TEX_TEX_ADDR( 0),
+ GR3D_TEX_TEX_ADDR( 1),
+ GR3D_TEX_TEX_ADDR( 2),
+ GR3D_TEX_TEX_ADDR( 3),
+ GR3D_TEX_TEX_ADDR( 4),
+ GR3D_TEX_TEX_ADDR( 5),
+ GR3D_TEX_TEX_ADDR( 6),
+ GR3D_TEX_TEX_ADDR( 7),
+ GR3D_TEX_TEX_ADDR( 8),
+ GR3D_TEX_TEX_ADDR( 9),
+ GR3D_TEX_TEX_ADDR(10),
+ GR3D_TEX_TEX_ADDR(11),
+ GR3D_TEX_TEX_ADDR(12),
+ GR3D_TEX_TEX_ADDR(13),
+ GR3D_TEX_TEX_ADDR(14),
+ GR3D_TEX_TEX_ADDR(15),
+ GR3D_DW_MEMORY_OUTPUT_ADDRESS,
+ GR3D_GLOBAL_SURFADDR( 0),
+ GR3D_GLOBAL_SURFADDR( 1),
+ GR3D_GLOBAL_SURFADDR( 2),
+ GR3D_GLOBAL_SURFADDR( 3),
+ GR3D_GLOBAL_SURFADDR( 4),
+ GR3D_GLOBAL_SURFADDR( 5),
+ GR3D_GLOBAL_SURFADDR( 6),
+ GR3D_GLOBAL_SURFADDR( 7),
+ GR3D_GLOBAL_SURFADDR( 8),
+ GR3D_GLOBAL_SURFADDR( 9),
+ GR3D_GLOBAL_SURFADDR(10),
+ GR3D_GLOBAL_SURFADDR(11),
+ GR3D_GLOBAL_SURFADDR(12),
+ GR3D_GLOBAL_SURFADDR(13),
+ GR3D_GLOBAL_SURFADDR(14),
+ GR3D_GLOBAL_SURFADDR(15),
+ GR3D_GLOBAL_SPILLSURFADDR,
+ GR3D_GLOBAL_SURFOVERADDR( 0),
+ GR3D_GLOBAL_SURFOVERADDR( 1),
+ GR3D_GLOBAL_SURFOVERADDR( 2),
+ GR3D_GLOBAL_SURFOVERADDR( 3),
+ GR3D_GLOBAL_SURFOVERADDR( 4),
+ GR3D_GLOBAL_SURFOVERADDR( 5),
+ GR3D_GLOBAL_SURFOVERADDR( 6),
+ GR3D_GLOBAL_SURFOVERADDR( 7),
+ GR3D_GLOBAL_SURFOVERADDR( 8),
+ GR3D_GLOBAL_SURFOVERADDR( 9),
+ GR3D_GLOBAL_SURFOVERADDR(10),
+ GR3D_GLOBAL_SURFOVERADDR(11),
+ GR3D_GLOBAL_SURFOVERADDR(12),
+ GR3D_GLOBAL_SURFOVERADDR(13),
+ GR3D_GLOBAL_SURFOVERADDR(14),
+ GR3D_GLOBAL_SURFOVERADDR(15),
+ GR3D_GLOBAL_SAMP01SURFADDR( 0),
+ GR3D_GLOBAL_SAMP01SURFADDR( 1),
+ GR3D_GLOBAL_SAMP01SURFADDR( 2),
+ GR3D_GLOBAL_SAMP01SURFADDR( 3),
+ GR3D_GLOBAL_SAMP01SURFADDR( 4),
+ GR3D_GLOBAL_SAMP01SURFADDR( 5),
+ GR3D_GLOBAL_SAMP01SURFADDR( 6),
+ GR3D_GLOBAL_SAMP01SURFADDR( 7),
+ GR3D_GLOBAL_SAMP01SURFADDR( 8),
+ GR3D_GLOBAL_SAMP01SURFADDR( 9),
+ GR3D_GLOBAL_SAMP01SURFADDR(10),
+ GR3D_GLOBAL_SAMP01SURFADDR(11),
+ GR3D_GLOBAL_SAMP01SURFADDR(12),
+ GR3D_GLOBAL_SAMP01SURFADDR(13),
+ GR3D_GLOBAL_SAMP01SURFADDR(14),
+ GR3D_GLOBAL_SAMP01SURFADDR(15),
+ GR3D_GLOBAL_SAMP23SURFADDR( 0),
+ GR3D_GLOBAL_SAMP23SURFADDR( 1),
+ GR3D_GLOBAL_SAMP23SURFADDR( 2),
+ GR3D_GLOBAL_SAMP23SURFADDR( 3),
+ GR3D_GLOBAL_SAMP23SURFADDR( 4),
+ GR3D_GLOBAL_SAMP23SURFADDR( 5),
+ GR3D_GLOBAL_SAMP23SURFADDR( 6),
+ GR3D_GLOBAL_SAMP23SURFADDR( 7),
+ GR3D_GLOBAL_SAMP23SURFADDR( 8),
+ GR3D_GLOBAL_SAMP23SURFADDR( 9),
+ GR3D_GLOBAL_SAMP23SURFADDR(10),
+ GR3D_GLOBAL_SAMP23SURFADDR(11),
+ GR3D_GLOBAL_SAMP23SURFADDR(12),
+ GR3D_GLOBAL_SAMP23SURFADDR(13),
+ GR3D_GLOBAL_SAMP23SURFADDR(14),
+ GR3D_GLOBAL_SAMP23SURFADDR(15),
+};
+
+static int gr3d_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct host1x_syncpt **syncpts;
+ struct gr3d *gr3d;
+ unsigned int i;
+ int err;
+
+ gr3d = devm_kzalloc(&pdev->dev, sizeof(*gr3d), GFP_KERNEL);
+ if (!gr3d)
+ return -ENOMEM;
+
+ syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL);
+ if (!syncpts)
+ return -ENOMEM;
+
+ gr3d->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(gr3d->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ return PTR_ERR(gr3d->clk);
+ }
+
+ if (of_device_is_compatible(np, "nvidia,tegra30-gr3d")) {
+ gr3d->clk_secondary = devm_clk_get(&pdev->dev, "3d2");
+ if (IS_ERR(gr3d->clk)) {
+ dev_err(&pdev->dev, "cannot get secondary clock\n");
+ return PTR_ERR(gr3d->clk);
+ }
+ }
+
+ err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D, gr3d->clk);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to power up 3D unit\n");
+ return err;
+ }
+
+ if (gr3d->clk_secondary) {
+ err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D1,
+ gr3d->clk_secondary);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "failed to power up secondary 3D unit\n");
+ return err;
+ }
+ }
+
+ INIT_LIST_HEAD(&gr3d->client.base.list);
+ gr3d->client.base.ops = &gr3d_client_ops;
+ gr3d->client.base.dev = &pdev->dev;
+ gr3d->client.base.class = HOST1X_CLASS_GR3D;
+ gr3d->client.base.syncpts = syncpts;
+ gr3d->client.base.num_syncpts = 1;
+
+ INIT_LIST_HEAD(&gr3d->client.list);
+ gr3d->client.ops = &gr3d_ops;
+
+ err = host1x_client_register(&gr3d->client.base);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ /* initialize address register map */
+ for (i = 0; i < ARRAY_SIZE(gr3d_addr_regs); i++)
+ set_bit(gr3d_addr_regs[i], gr3d->addr_regs);
+
+ platform_set_drvdata(pdev, gr3d);
+
+ return 0;
+}
+
+static int gr3d_remove(struct platform_device *pdev)
+{
+ struct gr3d *gr3d = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&gr3d->client.base);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ if (gr3d->clk_secondary) {
+ tegra_powergate_power_off(TEGRA_POWERGATE_3D1);
+ clk_disable_unprepare(gr3d->clk_secondary);
+ }
+
+ tegra_powergate_power_off(TEGRA_POWERGATE_3D);
+ clk_disable_unprepare(gr3d->clk);
+
+ return 0;
+}
+
+struct platform_driver tegra_gr3d_driver = {
+ .driver = {
+ .name = "tegra-gr3d",
+ .of_match_table = tegra_gr3d_match,
+ },
+ .probe = gr3d_probe,
+ .remove = gr3d_remove,
+};
diff --git a/drivers/gpu/drm/tegra/gr3d.h b/drivers/gpu/drm/tegra/gr3d.h
new file mode 100644
index 000000000000..0c30a1351c83
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr3d.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_GR3D_H
+#define TEGRA_GR3D_H
+
+#define GR3D_IDX_ATTRIBUTE(x) (0x100 + (x) * 2)
+#define GR3D_IDX_INDEX_BASE 0x121
+#define GR3D_QR_ZTAG_ADDR 0x415
+#define GR3D_QR_CTAG_ADDR 0x417
+#define GR3D_QR_CZ_ADDR 0x419
+#define GR3D_TEX_TEX_ADDR(x) (0x710 + (x))
+#define GR3D_DW_MEMORY_OUTPUT_ADDRESS 0x904
+#define GR3D_GLOBAL_SURFADDR(x) (0xe00 + (x))
+#define GR3D_GLOBAL_SPILLSURFADDR 0xe2a
+#define GR3D_GLOBAL_SURFOVERADDR(x) (0xe30 + (x))
+#define GR3D_GLOBAL_SAMP01SURFADDR(x) (0xe50 + (x))
+#define GR3D_GLOBAL_SAMP23SURFADDR(x) (0xe60 + (x))
+
+#define GR3D_NUM_REGS 0xe88
+
+#endif
diff --git a/drivers/gpu/host1x/drm/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 644d95c7d489..0cd9bc2056e8 100644
--- a/drivers/gpu/host1x/drm/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -8,21 +8,33 @@
*/
#include <linux/clk.h>
+#include <linux/clk/tegra.h>
#include <linux/debugfs.h>
-#include <linux/gpio.h>
#include <linux/hdmi.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
-#include <linux/clk/tegra.h>
-
-#include <drm/drm_edid.h>
#include "hdmi.h"
#include "drm.h"
#include "dc.h"
-#include "host1x_client.h"
+
+struct tmds_config {
+ unsigned int pclk;
+ u32 pll0;
+ u32 pll1;
+ u32 pe_current;
+ u32 drive_current;
+ u32 peak_current;
+};
+
+struct tegra_hdmi_config {
+ const struct tmds_config *tmds;
+ unsigned int num_tmds;
+
+ unsigned long fuse_override_offset;
+ unsigned long fuse_override_value;
+
+ bool has_sor_io_peak_current;
+};
struct tegra_hdmi {
struct host1x_client client;
@@ -38,6 +50,8 @@ struct tegra_hdmi {
struct clk *clk_parent;
struct clk *clk;
+ const struct tegra_hdmi_config *config;
+
unsigned int audio_source;
unsigned int audio_freq;
bool stereo;
@@ -143,15 +157,7 @@ static const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = {
{ 0, 0, 0, 0 },
};
-struct tmds_config {
- unsigned int pclk;
- u32 pll0;
- u32 pll1;
- u32 pe_current;
- u32 drive_current;
-};
-
-static const struct tmds_config tegra2_tmds_config[] = {
+static const struct tmds_config tegra20_tmds_config[] = {
{ /* slow pixel clock modes */
.pclk = 27000000,
.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
@@ -184,7 +190,7 @@ static const struct tmds_config tegra2_tmds_config[] = {
},
};
-static const struct tmds_config tegra3_tmds_config[] = {
+static const struct tmds_config tegra30_tmds_config[] = {
{ /* 480p modes */
.pclk = 27000000,
.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
@@ -230,6 +236,85 @@ static const struct tmds_config tegra3_tmds_config[] = {
},
};
+static const struct tmds_config tegra114_tmds_config[] = {
+ { /* 480p/576p / 25.2MHz/27MHz modes */
+ .pclk = 27000000,
+ .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+ SOR_PLL_VCOCAP(0) | SOR_PLL_RESISTORSEL,
+ .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(0),
+ .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT1(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT2(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT3(PE_CURRENT_0_mA_T114),
+ .drive_current =
+ DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
+ .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+ }, { /* 720p / 74.25MHz modes */
+ .pclk = 74250000,
+ .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+ SOR_PLL_VCOCAP(1) | SOR_PLL_RESISTORSEL,
+ .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
+ SOR_PLL_TMDS_TERMADJ(0),
+ .pe_current = PE_CURRENT0(PE_CURRENT_15_mA_T114) |
+ PE_CURRENT1(PE_CURRENT_15_mA_T114) |
+ PE_CURRENT2(PE_CURRENT_15_mA_T114) |
+ PE_CURRENT3(PE_CURRENT_15_mA_T114),
+ .drive_current =
+ DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
+ .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+ }, { /* 1080p / 148.5MHz modes */
+ .pclk = 148500000,
+ .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+ SOR_PLL_VCOCAP(3) | SOR_PLL_RESISTORSEL,
+ .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
+ SOR_PLL_TMDS_TERMADJ(0),
+ .pe_current = PE_CURRENT0(PE_CURRENT_10_mA_T114) |
+ PE_CURRENT1(PE_CURRENT_10_mA_T114) |
+ PE_CURRENT2(PE_CURRENT_10_mA_T114) |
+ PE_CURRENT3(PE_CURRENT_10_mA_T114),
+ .drive_current =
+ DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_12_400_mA_T114) |
+ DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_12_400_mA_T114) |
+ DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_12_400_mA_T114) |
+ DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_12_400_mA_T114),
+ .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+ }, { /* 225/297MHz modes */
+ .pclk = UINT_MAX,
+ .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+ SOR_PLL_VCOCAP(0xf) | SOR_PLL_RESISTORSEL,
+ .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(7)
+ | SOR_PLL_TMDS_TERM_ENABLE,
+ .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT1(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT2(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT3(PE_CURRENT_0_mA_T114),
+ .drive_current =
+ DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_25_200_mA_T114) |
+ DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_25_200_mA_T114) |
+ DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_25_200_mA_T114) |
+ DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_19_200_mA_T114),
+ .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_3_000_mA) |
+ PEAK_CURRENT_LANE1(PEAK_CURRENT_3_000_mA) |
+ PEAK_CURRENT_LANE2(PEAK_CURRENT_3_000_mA) |
+ PEAK_CURRENT_LANE3(PEAK_CURRENT_0_800_mA),
+ },
+};
+
static const struct tegra_hdmi_audio_config *
tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
{
@@ -511,7 +596,7 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
err = hdmi_audio_infoframe_init(&frame);
if (err < 0) {
- dev_err(hdmi->dev, "failed to initialize audio infoframe: %d\n",
+ dev_err(hdmi->dev, "failed to setup audio infoframe: %zd\n",
err);
return;
}
@@ -531,7 +616,7 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
* contain 7 bytes. Including the 3 byte header only the first 10
* bytes can be programmed.
*/
- tegra_hdmi_write_infopack(hdmi, buffer, min(10, err));
+ tegra_hdmi_write_infopack(hdmi, buffer, min_t(size_t, 10, err));
tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
@@ -577,8 +662,28 @@ static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi,
tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1);
tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT);
- value = tmds->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE;
- tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+ tegra_hdmi_writel(hdmi, tmds->drive_current,
+ HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+
+ value = tegra_hdmi_readl(hdmi, hdmi->config->fuse_override_offset);
+ value |= hdmi->config->fuse_override_value;
+ tegra_hdmi_writel(hdmi, value, hdmi->config->fuse_override_offset);
+
+ if (hdmi->config->has_sor_io_peak_current)
+ tegra_hdmi_writel(hdmi, tmds->peak_current,
+ HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
+}
+
+static bool tegra_output_is_hdmi(struct tegra_output *output)
+{
+ struct edid *edid;
+
+ if (!output->connector.edid_blob_ptr)
+ return false;
+
+ edid = (struct edid *)output->connector.edid_blob_ptr->data;
+
+ return drm_detect_hdmi_monitor(edid);
}
static int tegra_output_hdmi_enable(struct tegra_output *output)
@@ -589,23 +694,17 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
struct tegra_hdmi *hdmi = to_hdmi(output);
struct device_node *node = hdmi->dev->of_node;
unsigned int pulse_start, div82, pclk;
- const struct tmds_config *tmds;
- unsigned int num_tmds;
unsigned long value;
int retries = 1000;
int err;
+ hdmi->dvi = !tegra_output_is_hdmi(output);
+
pclk = mode->clock * 1000;
h_sync_width = mode->hsync_end - mode->hsync_start;
h_back_porch = mode->htotal - mode->hsync_end;
h_front_porch = mode->hsync_start - mode->hdisplay;
- err = regulator_enable(hdmi->vdd);
- if (err < 0) {
- dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
- return err;
- }
-
err = regulator_enable(hdmi->pll);
if (err < 0) {
dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err);
@@ -710,17 +809,9 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
tegra_hdmi_setup_stereo_infoframe(hdmi);
/* TMDS CONFIG */
- if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
- num_tmds = ARRAY_SIZE(tegra3_tmds_config);
- tmds = tegra3_tmds_config;
- } else {
- num_tmds = ARRAY_SIZE(tegra2_tmds_config);
- tmds = tegra2_tmds_config;
- }
-
- for (i = 0; i < num_tmds; i++) {
- if (pclk <= tmds[i].pclk) {
- tegra_hdmi_setup_tmds(hdmi, &tmds[i]);
+ for (i = 0; i < hdmi->config->num_tmds; i++) {
+ if (pclk <= hdmi->config->tmds[i].pclk) {
+ tegra_hdmi_setup_tmds(hdmi, &hdmi->config->tmds[i]);
break;
}
}
@@ -824,7 +915,6 @@ static int tegra_output_hdmi_disable(struct tegra_output *output)
tegra_periph_reset_assert(hdmi->clk);
clk_disable(hdmi->clk);
regulator_disable(hdmi->pll);
- regulator_disable(hdmi->vdd);
return 0;
}
@@ -1055,6 +1145,7 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+ DUMP_REG(HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
#undef DUMP_REG
@@ -1122,24 +1213,31 @@ static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi)
return 0;
}
-static int tegra_hdmi_drm_init(struct host1x_client *client,
- struct drm_device *drm)
+static int tegra_hdmi_init(struct host1x_client *client)
{
+ struct tegra_drm *tegra = dev_get_drvdata(client->parent);
struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
int err;
+ err = regulator_enable(hdmi->vdd);
+ if (err < 0) {
+ dev_err(client->dev, "failed to enable VDD regulator: %d\n",
+ err);
+ return err;
+ }
+
hdmi->output.type = TEGRA_OUTPUT_HDMI;
hdmi->output.dev = client->dev;
hdmi->output.ops = &hdmi_ops;
- err = tegra_output_init(drm, &hdmi->output);
+ err = tegra_output_init(tegra->drm, &hdmi->output);
if (err < 0) {
dev_err(client->dev, "output setup failed: %d\n", err);
return err;
}
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
- err = tegra_hdmi_debugfs_init(hdmi, drm->primary);
+ err = tegra_hdmi_debugfs_init(hdmi, tegra->drm->primary);
if (err < 0)
dev_err(client->dev, "debugfs setup failed: %d\n", err);
}
@@ -1147,7 +1245,7 @@ static int tegra_hdmi_drm_init(struct host1x_client *client,
return 0;
}
-static int tegra_hdmi_drm_exit(struct host1x_client *client)
+static int tegra_hdmi_exit(struct host1x_client *client)
{
struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
int err;
@@ -1171,25 +1269,63 @@ static int tegra_hdmi_drm_exit(struct host1x_client *client)
return err;
}
+ regulator_disable(hdmi->vdd);
+
return 0;
}
static const struct host1x_client_ops hdmi_client_ops = {
- .drm_init = tegra_hdmi_drm_init,
- .drm_exit = tegra_hdmi_drm_exit,
+ .init = tegra_hdmi_init,
+ .exit = tegra_hdmi_exit,
+};
+
+static const struct tegra_hdmi_config tegra20_hdmi_config = {
+ .tmds = tegra20_tmds_config,
+ .num_tmds = ARRAY_SIZE(tegra20_tmds_config),
+ .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
+ .fuse_override_value = 1 << 31,
+ .has_sor_io_peak_current = false,
+};
+
+static const struct tegra_hdmi_config tegra30_hdmi_config = {
+ .tmds = tegra30_tmds_config,
+ .num_tmds = ARRAY_SIZE(tegra30_tmds_config),
+ .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
+ .fuse_override_value = 1 << 31,
+ .has_sor_io_peak_current = false,
+};
+
+static const struct tegra_hdmi_config tegra114_hdmi_config = {
+ .tmds = tegra114_tmds_config,
+ .num_tmds = ARRAY_SIZE(tegra114_tmds_config),
+ .fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
+ .fuse_override_value = 1 << 31,
+ .has_sor_io_peak_current = true,
+};
+
+static const struct of_device_id tegra_hdmi_of_match[] = {
+ { .compatible = "nvidia,tegra114-hdmi", .data = &tegra114_hdmi_config },
+ { .compatible = "nvidia,tegra30-hdmi", .data = &tegra30_hdmi_config },
+ { .compatible = "nvidia,tegra20-hdmi", .data = &tegra20_hdmi_config },
+ { },
};
static int tegra_hdmi_probe(struct platform_device *pdev)
{
- struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
+ const struct of_device_id *match;
struct tegra_hdmi *hdmi;
struct resource *regs;
int err;
+ match = of_match_node(tegra_hdmi_of_match, pdev->dev.of_node);
+ if (!match)
+ return -ENODEV;
+
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
+ hdmi->config = match->data;
hdmi->dev = &pdev->dev;
hdmi->audio_source = AUTO;
hdmi->audio_freq = 44100;
@@ -1234,7 +1370,7 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
hdmi->output.dev = &pdev->dev;
- err = tegra_output_parse_dt(&hdmi->output);
+ err = tegra_output_probe(&hdmi->output);
if (err < 0)
return err;
@@ -1252,11 +1388,11 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
hdmi->irq = err;
- hdmi->client.ops = &hdmi_client_ops;
INIT_LIST_HEAD(&hdmi->client.list);
+ hdmi->client.ops = &hdmi_client_ops;
hdmi->client.dev = &pdev->dev;
- err = host1x_register_client(host1x, &hdmi->client);
+ err = host1x_client_register(&hdmi->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
@@ -1270,29 +1406,28 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
static int tegra_hdmi_remove(struct platform_device *pdev)
{
- struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
int err;
- err = host1x_unregister_client(host1x, &hdmi->client);
+ err = host1x_client_unregister(&hdmi->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
err);
return err;
}
+ err = tegra_output_remove(&hdmi->output);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to remove output: %d\n", err);
+ return err;
+ }
+
clk_unprepare(hdmi->clk_parent);
clk_unprepare(hdmi->clk);
return 0;
}
-static struct of_device_id tegra_hdmi_of_match[] = {
- { .compatible = "nvidia,tegra30-hdmi", },
- { .compatible = "nvidia,tegra20-hdmi", },
- { },
-};
-
struct platform_driver tegra_hdmi_driver = {
.driver = {
.name = "tegra-hdmi",
diff --git a/drivers/gpu/host1x/drm/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
index 52ac36e08ccb..0aebc485f7fa 100644
--- a/drivers/gpu/host1x/drm/hdmi.h
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -233,7 +233,10 @@
#define DRIVE_CURRENT_LANE1(x) (((x) & 0x3f) << 8)
#define DRIVE_CURRENT_LANE2(x) (((x) & 0x3f) << 16)
#define DRIVE_CURRENT_LANE3(x) (((x) & 0x3f) << 24)
-#define DRIVE_CURRENT_FUSE_OVERRIDE (1 << 31)
+#define DRIVE_CURRENT_LANE0_T114(x) (((x) & 0x7f) << 0)
+#define DRIVE_CURRENT_LANE1_T114(x) (((x) & 0x7f) << 8)
+#define DRIVE_CURRENT_LANE2_T114(x) (((x) & 0x7f) << 16)
+#define DRIVE_CURRENT_LANE3_T114(x) (((x) & 0x7f) << 24)
#define DRIVE_CURRENT_1_500_mA 0x00
#define DRIVE_CURRENT_1_875_mA 0x01
@@ -299,6 +302,79 @@
#define DRIVE_CURRENT_24_375_mA 0x3d
#define DRIVE_CURRENT_24_750_mA 0x3e
+#define DRIVE_CURRENT_0_000_mA_T114 0x00
+#define DRIVE_CURRENT_0_400_mA_T114 0x01
+#define DRIVE_CURRENT_0_800_mA_T114 0x02
+#define DRIVE_CURRENT_1_200_mA_T114 0x03
+#define DRIVE_CURRENT_1_600_mA_T114 0x04
+#define DRIVE_CURRENT_2_000_mA_T114 0x05
+#define DRIVE_CURRENT_2_400_mA_T114 0x06
+#define DRIVE_CURRENT_2_800_mA_T114 0x07
+#define DRIVE_CURRENT_3_200_mA_T114 0x08
+#define DRIVE_CURRENT_3_600_mA_T114 0x09
+#define DRIVE_CURRENT_4_000_mA_T114 0x0a
+#define DRIVE_CURRENT_4_400_mA_T114 0x0b
+#define DRIVE_CURRENT_4_800_mA_T114 0x0c
+#define DRIVE_CURRENT_5_200_mA_T114 0x0d
+#define DRIVE_CURRENT_5_600_mA_T114 0x0e
+#define DRIVE_CURRENT_6_000_mA_T114 0x0f
+#define DRIVE_CURRENT_6_400_mA_T114 0x10
+#define DRIVE_CURRENT_6_800_mA_T114 0x11
+#define DRIVE_CURRENT_7_200_mA_T114 0x12
+#define DRIVE_CURRENT_7_600_mA_T114 0x13
+#define DRIVE_CURRENT_8_000_mA_T114 0x14
+#define DRIVE_CURRENT_8_400_mA_T114 0x15
+#define DRIVE_CURRENT_8_800_mA_T114 0x16
+#define DRIVE_CURRENT_9_200_mA_T114 0x17
+#define DRIVE_CURRENT_9_600_mA_T114 0x18
+#define DRIVE_CURRENT_10_000_mA_T114 0x19
+#define DRIVE_CURRENT_10_400_mA_T114 0x1a
+#define DRIVE_CURRENT_10_800_mA_T114 0x1b
+#define DRIVE_CURRENT_11_200_mA_T114 0x1c
+#define DRIVE_CURRENT_11_600_mA_T114 0x1d
+#define DRIVE_CURRENT_12_000_mA_T114 0x1e
+#define DRIVE_CURRENT_12_400_mA_T114 0x1f
+#define DRIVE_CURRENT_12_800_mA_T114 0x20
+#define DRIVE_CURRENT_13_200_mA_T114 0x21
+#define DRIVE_CURRENT_13_600_mA_T114 0x22
+#define DRIVE_CURRENT_14_000_mA_T114 0x23
+#define DRIVE_CURRENT_14_400_mA_T114 0x24
+#define DRIVE_CURRENT_14_800_mA_T114 0x25
+#define DRIVE_CURRENT_15_200_mA_T114 0x26
+#define DRIVE_CURRENT_15_600_mA_T114 0x27
+#define DRIVE_CURRENT_16_000_mA_T114 0x28
+#define DRIVE_CURRENT_16_400_mA_T114 0x29
+#define DRIVE_CURRENT_16_800_mA_T114 0x2a
+#define DRIVE_CURRENT_17_200_mA_T114 0x2b
+#define DRIVE_CURRENT_17_600_mA_T114 0x2c
+#define DRIVE_CURRENT_18_000_mA_T114 0x2d
+#define DRIVE_CURRENT_18_400_mA_T114 0x2e
+#define DRIVE_CURRENT_18_800_mA_T114 0x2f
+#define DRIVE_CURRENT_19_200_mA_T114 0x30
+#define DRIVE_CURRENT_19_600_mA_T114 0x31
+#define DRIVE_CURRENT_20_000_mA_T114 0x32
+#define DRIVE_CURRENT_20_400_mA_T114 0x33
+#define DRIVE_CURRENT_20_800_mA_T114 0x34
+#define DRIVE_CURRENT_21_200_mA_T114 0x35
+#define DRIVE_CURRENT_21_600_mA_T114 0x36
+#define DRIVE_CURRENT_22_000_mA_T114 0x37
+#define DRIVE_CURRENT_22_400_mA_T114 0x38
+#define DRIVE_CURRENT_22_800_mA_T114 0x39
+#define DRIVE_CURRENT_23_200_mA_T114 0x3a
+#define DRIVE_CURRENT_23_600_mA_T114 0x3b
+#define DRIVE_CURRENT_24_000_mA_T114 0x3c
+#define DRIVE_CURRENT_24_400_mA_T114 0x3d
+#define DRIVE_CURRENT_24_800_mA_T114 0x3e
+#define DRIVE_CURRENT_25_200_mA_T114 0x3f
+#define DRIVE_CURRENT_25_400_mA_T114 0x40
+#define DRIVE_CURRENT_25_800_mA_T114 0x41
+#define DRIVE_CURRENT_26_200_mA_T114 0x42
+#define DRIVE_CURRENT_26_600_mA_T114 0x43
+#define DRIVE_CURRENT_27_000_mA_T114 0x44
+#define DRIVE_CURRENT_27_400_mA_T114 0x45
+#define DRIVE_CURRENT_27_800_mA_T114 0x46
+#define DRIVE_CURRENT_28_200_mA_T114 0x47
+
#define HDMI_NV_PDISP_AUDIO_DEBUG0 0x7f
#define HDMI_NV_PDISP_AUDIO_DEBUG1 0x80
#define HDMI_NV_PDISP_AUDIO_DEBUG2 0x81
@@ -358,6 +434,23 @@
#define PE_CURRENT_7_0_mA 0xe
#define PE_CURRENT_7_5_mA 0xf
+#define PE_CURRENT_0_mA_T114 0x0
+#define PE_CURRENT_1_mA_T114 0x1
+#define PE_CURRENT_2_mA_T114 0x2
+#define PE_CURRENT_3_mA_T114 0x3
+#define PE_CURRENT_4_mA_T114 0x4
+#define PE_CURRENT_5_mA_T114 0x5
+#define PE_CURRENT_6_mA_T114 0x6
+#define PE_CURRENT_7_mA_T114 0x7
+#define PE_CURRENT_8_mA_T114 0x8
+#define PE_CURRENT_9_mA_T114 0x9
+#define PE_CURRENT_10_mA_T114 0xa
+#define PE_CURRENT_11_mA_T114 0xb
+#define PE_CURRENT_12_mA_T114 0xc
+#define PE_CURRENT_13_mA_T114 0xd
+#define PE_CURRENT_14_mA_T114 0xe
+#define PE_CURRENT_15_mA_T114 0xf
+
#define HDMI_NV_PDISP_KEY_CTRL 0x9a
#define HDMI_NV_PDISP_KEY_DEBUG0 0x9b
#define HDMI_NV_PDISP_KEY_DEBUG1 0x9c
@@ -383,4 +476,61 @@
#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 0xc5
#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
+#define HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT 0xd1
+#define PEAK_CURRENT_LANE0(x) (((x) & 0x7f) << 0)
+#define PEAK_CURRENT_LANE1(x) (((x) & 0x7f) << 8)
+#define PEAK_CURRENT_LANE2(x) (((x) & 0x7f) << 16)
+#define PEAK_CURRENT_LANE3(x) (((x) & 0x7f) << 24)
+
+#define PEAK_CURRENT_0_000_mA 0x00
+#define PEAK_CURRENT_0_200_mA 0x01
+#define PEAK_CURRENT_0_400_mA 0x02
+#define PEAK_CURRENT_0_600_mA 0x03
+#define PEAK_CURRENT_0_800_mA 0x04
+#define PEAK_CURRENT_1_000_mA 0x05
+#define PEAK_CURRENT_1_200_mA 0x06
+#define PEAK_CURRENT_1_400_mA 0x07
+#define PEAK_CURRENT_1_600_mA 0x08
+#define PEAK_CURRENT_1_800_mA 0x09
+#define PEAK_CURRENT_2_000_mA 0x0a
+#define PEAK_CURRENT_2_200_mA 0x0b
+#define PEAK_CURRENT_2_400_mA 0x0c
+#define PEAK_CURRENT_2_600_mA 0x0d
+#define PEAK_CURRENT_2_800_mA 0x0e
+#define PEAK_CURRENT_3_000_mA 0x0f
+#define PEAK_CURRENT_3_200_mA 0x10
+#define PEAK_CURRENT_3_400_mA 0x11
+#define PEAK_CURRENT_3_600_mA 0x12
+#define PEAK_CURRENT_3_800_mA 0x13
+#define PEAK_CURRENT_4_000_mA 0x14
+#define PEAK_CURRENT_4_200_mA 0x15
+#define PEAK_CURRENT_4_400_mA 0x16
+#define PEAK_CURRENT_4_600_mA 0x17
+#define PEAK_CURRENT_4_800_mA 0x18
+#define PEAK_CURRENT_5_000_mA 0x19
+#define PEAK_CURRENT_5_200_mA 0x1a
+#define PEAK_CURRENT_5_400_mA 0x1b
+#define PEAK_CURRENT_5_600_mA 0x1c
+#define PEAK_CURRENT_5_800_mA 0x1d
+#define PEAK_CURRENT_6_000_mA 0x1e
+#define PEAK_CURRENT_6_200_mA 0x1f
+#define PEAK_CURRENT_6_400_mA 0x20
+#define PEAK_CURRENT_6_600_mA 0x21
+#define PEAK_CURRENT_6_800_mA 0x22
+#define PEAK_CURRENT_7_000_mA 0x23
+#define PEAK_CURRENT_7_200_mA 0x24
+#define PEAK_CURRENT_7_400_mA 0x25
+#define PEAK_CURRENT_7_600_mA 0x26
+#define PEAK_CURRENT_7_800_mA 0x27
+#define PEAK_CURRENT_8_000_mA 0x28
+#define PEAK_CURRENT_8_200_mA 0x29
+#define PEAK_CURRENT_8_400_mA 0x2a
+#define PEAK_CURRENT_8_600_mA 0x2b
+#define PEAK_CURRENT_8_800_mA 0x2c
+#define PEAK_CURRENT_9_000_mA 0x2d
+#define PEAK_CURRENT_9_200_mA 0x2e
+#define PEAK_CURRENT_9_400_mA 0x2f
+
+#define HDMI_NV_PDISP_SOR_PAD_CTLS0 0xd2
+
#endif /* TEGRA_HDMI_H */
diff --git a/drivers/gpu/host1x/drm/output.c b/drivers/gpu/drm/tegra/output.c
index 137ae81ab80e..2cb0065e0578 100644
--- a/drivers/gpu/host1x/drm/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -7,9 +7,7 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
#include <linux/of_gpio.h>
-#include <linux/i2c.h>
#include "drm.h"
@@ -81,10 +79,16 @@ tegra_connector_detect(struct drm_connector *connector, bool force)
return status;
}
+static void drm_connector_clear(struct drm_connector *connector)
+{
+ memset(connector, 0, sizeof(*connector));
+}
+
static void tegra_connector_destroy(struct drm_connector *connector)
{
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
+ drm_connector_clear(connector);
}
static const struct drm_connector_funcs connector_funcs = {
@@ -94,9 +98,15 @@ static const struct drm_connector_funcs connector_funcs = {
.destroy = tegra_connector_destroy,
};
+static void drm_encoder_clear(struct drm_encoder *encoder)
+{
+ memset(encoder, 0, sizeof(*encoder));
+}
+
static void tegra_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
+ drm_encoder_clear(encoder);
}
static const struct drm_encoder_funcs encoder_funcs = {
@@ -151,7 +161,7 @@ static irqreturn_t hpd_irq(int irq, void *data)
return IRQ_HANDLED;
}
-int tegra_output_parse_dt(struct tegra_output *output)
+int tegra_output_probe(struct tegra_output *output)
{
enum of_gpio_flags flags;
struct device_node *ddc;
@@ -181,14 +191,6 @@ int tegra_output_parse_dt(struct tegra_output *output)
output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
"nvidia,hpd-gpio", 0,
&flags);
-
- return 0;
-}
-
-int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
-{
- int connector, encoder, err;
-
if (gpio_is_valid(output->hpd_gpio)) {
unsigned long flags;
@@ -202,7 +204,8 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
err = gpio_to_irq(output->hpd_gpio);
if (err < 0) {
dev_err(output->dev, "gpio_to_irq(): %d\n", err);
- goto free_hpd;
+ gpio_free(output->hpd_gpio);
+ return err;
}
output->hpd_irq = err;
@@ -215,12 +218,33 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
if (err < 0) {
dev_err(output->dev, "failed to request IRQ#%u: %d\n",
output->hpd_irq, err);
- goto free_hpd;
+ gpio_free(output->hpd_gpio);
+ return err;
}
output->connector.polled = DRM_CONNECTOR_POLL_HPD;
}
+ return 0;
+}
+
+int tegra_output_remove(struct tegra_output *output)
+{
+ if (gpio_is_valid(output->hpd_gpio)) {
+ free_irq(output->hpd_irq, output);
+ gpio_free(output->hpd_gpio);
+ }
+
+ if (output->ddc)
+ put_device(&output->ddc->dev);
+
+ return 0;
+}
+
+int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
+{
+ int connector, encoder;
+
switch (output->type) {
case TEGRA_OUTPUT_RGB:
connector = DRM_MODE_CONNECTOR_LVDS;
@@ -241,6 +265,7 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
drm_connector_init(drm, &output->connector, &connector_funcs,
connector);
drm_connector_helper_add(&output->connector, &connector_helper_funcs);
+ output->connector.dpms = DRM_MODE_DPMS_OFF;
drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder);
drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
@@ -251,22 +276,9 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
output->encoder.possible_crtcs = 0x3;
return 0;
-
-free_hpd:
- gpio_free(output->hpd_gpio);
-
- return err;
}
int tegra_output_exit(struct tegra_output *output)
{
- if (gpio_is_valid(output->hpd_gpio)) {
- free_irq(output->hpd_irq, output);
- gpio_free(output->hpd_gpio);
- }
-
- if (output->ddc)
- put_device(&output->ddc->dev);
-
return 0;
}
diff --git a/drivers/gpu/host1x/drm/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 5aa66ef7a946..ba47ca4fb880 100644
--- a/drivers/gpu/host1x/drm/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -8,9 +8,6 @@
*/
#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
#include "drm.h"
#include "dc.h"
@@ -150,7 +147,7 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
rgb->output.dev = dc->dev;
rgb->output.of_node = np;
- err = tegra_output_parse_dt(&rgb->output);
+ err = tegra_output_probe(&rgb->output);
if (err < 0)
return err;
@@ -177,6 +174,20 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
return 0;
}
+int tegra_dc_rgb_remove(struct tegra_dc *dc)
+{
+ int err;
+
+ if (!dc->rgb)
+ return 0;
+
+ err = tegra_output_remove(dc->rgb);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
{
struct tegra_rgb *rgb = to_rgb(dc->rgb);
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index 7a4d10106906..7c3ef79fcb37 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -2,6 +2,7 @@ config DRM_TILCDC
tristate "DRM Support for TI LCDC Display Controller"
depends on DRM && OF && ARM
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index 6222af19f456..f02528686cd5 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -8,6 +8,7 @@ config DRM_UDL
select FB_SYS_IMAGEBLIT
select FB_DEFERRED_IO
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
help
This is a KMS driver for the USB displaylink video adapters.
Say M/Y to add support for these devices via drm/kms interfaces.
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 7650dc0d78ce..3ddd6cd98ac1 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -77,7 +77,6 @@ static struct drm_driver driver = {
.unload = udl_driver_unload,
/* gem hooks */
- .gem_init_object = udl_gem_init_object,
.gem_free_object = udl_gem_free_object,
.gem_vm_ops = &udl_gem_vm_ops,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 56aec9409fa3..1fbf7b357f16 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -115,7 +115,6 @@ int udl_dumb_create(struct drm_file *file_priv,
int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
-int udl_gem_init_object(struct drm_gem_object *obj);
void udl_gem_free_object(struct drm_gem_object *gem_obj);
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
size_t size);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8bf646183bac..24ffbe990736 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -107,13 +107,6 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
}
-int udl_gem_init_object(struct drm_gem_object *obj)
-{
- BUG();
-
- return 0;
-}
-
static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
{
struct page **pages;
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 7e3ad87c366c..927889105483 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -79,7 +79,7 @@ int via_final_context(struct drm_device *dev, int context)
/* Linux specific until context tracking code gets ported to BSD */
/* Last context, perform cleanup */
- if (dev->ctx_count == 1 && dev->dev_private) {
+ if (list_is_singular(&dev->ctxlist) && dev->dev_private) {
DRM_DEBUG("Last Context\n");
drm_irq_uninstall(dev);
via_cleanup_futex(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 1a90f0a2f7e5..0508f93b9795 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -740,9 +740,17 @@ static void vmw_postclose(struct drm_device *dev,
struct vmw_fpriv *vmw_fp;
vmw_fp = vmw_fpriv(file_priv);
- ttm_object_file_release(&vmw_fp->tfile);
- if (vmw_fp->locked_master)
+
+ if (vmw_fp->locked_master) {
+ struct vmw_master *vmaster =
+ vmw_master(vmw_fp->locked_master);
+
+ ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
+ ttm_vt_unlock(&vmaster->lock);
drm_master_put(&vmw_fp->locked_master);
+ }
+
+ ttm_object_file_release(&vmw_fp->tfile);
kfree(vmw_fp);
}
@@ -925,14 +933,13 @@ static void vmw_master_drop(struct drm_device *dev,
vmw_fp->locked_master = drm_master_get(file_priv->master);
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
- vmw_execbuf_release_pinned_bo(dev_priv);
-
if (unlikely((ret != 0))) {
DRM_ERROR("Unable to lock TTM at VT switch.\n");
drm_master_put(&vmw_fp->locked_master);
}
- ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
+ ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
+ vmw_execbuf_release_pinned_bo(dev_priv);
if (!dev_priv->enable_fb) {
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 0e67cf41065d..37fb4befec82 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -970,7 +970,7 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (new_backup)
res->backup_offset = new_backup_offset;
- if (!res->func->may_evict)
+ if (!res->func->may_evict || res->id == -1)
return;
write_lock(&dev_priv->resource_lock);
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
index ccfd42b23606..7d6bed222542 100644
--- a/drivers/gpu/host1x/Kconfig
+++ b/drivers/gpu/host1x/Kconfig
@@ -19,6 +19,4 @@ config TEGRA_HOST1X_FIREWALL
If unsure, choose Y.
-source "drivers/gpu/host1x/drm/Kconfig"
-
endif
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index 3b037b6e0298..afa1e9e4e512 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -1,6 +1,5 @@
-ccflags-y = -Idrivers/gpu/host1x
-
host1x-y = \
+ bus.o \
syncpt.o \
dev.o \
intr.o \
@@ -8,13 +7,7 @@ host1x-y = \
channel.o \
job.o \
debug.o \
- hw/host1x01.o
-
-ccflags-y += -Iinclude/drm
-ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
+ hw/host1x01.o \
+ hw/host1x02.o
-host1x-$(CONFIG_DRM_TEGRA) += drm/drm.o drm/fb.o drm/dc.o
-host1x-$(CONFIG_DRM_TEGRA) += drm/output.o drm/rgb.o drm/hdmi.o
-host1x-$(CONFIG_DRM_TEGRA) += drm/gem.o
-host1x-$(CONFIG_DRM_TEGRA) += drm/gr2d.o
obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
new file mode 100644
index 000000000000..509383f8be03
--- /dev/null
+++ b/drivers/gpu/host1x/bus.c
@@ -0,0 +1,550 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012-2013, NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/host1x.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "dev.h"
+
+static DEFINE_MUTEX(clients_lock);
+static LIST_HEAD(clients);
+
+static DEFINE_MUTEX(drivers_lock);
+static LIST_HEAD(drivers);
+
+static DEFINE_MUTEX(devices_lock);
+static LIST_HEAD(devices);
+
+struct host1x_subdev {
+ struct host1x_client *client;
+ struct device_node *np;
+ struct list_head list;
+};
+
+/**
+ * host1x_subdev_add() - add a new subdevice with an associated device node
+ */
+static int host1x_subdev_add(struct host1x_device *device,
+ struct device_node *np)
+{
+ struct host1x_subdev *subdev;
+
+ subdev = kzalloc(sizeof(*subdev), GFP_KERNEL);
+ if (!subdev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&subdev->list);
+ subdev->np = of_node_get(np);
+
+ mutex_lock(&device->subdevs_lock);
+ list_add_tail(&subdev->list, &device->subdevs);
+ mutex_unlock(&device->subdevs_lock);
+
+ return 0;
+}
+
+/**
+ * host1x_subdev_del() - remove subdevice
+ */
+static void host1x_subdev_del(struct host1x_subdev *subdev)
+{
+ list_del(&subdev->list);
+ of_node_put(subdev->np);
+ kfree(subdev);
+}
+
+/**
+ * host1x_device_parse_dt() - scan device tree and add matching subdevices
+ */
+static int host1x_device_parse_dt(struct host1x_device *device)
+{
+ struct device_node *np;
+ int err;
+
+ for_each_child_of_node(device->dev.parent->of_node, np) {
+ if (of_match_node(device->driver->subdevs, np) &&
+ of_device_is_available(np)) {
+ err = host1x_subdev_add(device, np);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void host1x_subdev_register(struct host1x_device *device,
+ struct host1x_subdev *subdev,
+ struct host1x_client *client)
+{
+ int err;
+
+ /*
+ * Move the subdevice to the list of active (registered) subdevices
+ * and associate it with a client. At the same time, associate the
+ * client with its parent device.
+ */
+ mutex_lock(&device->subdevs_lock);
+ mutex_lock(&device->clients_lock);
+ list_move_tail(&client->list, &device->clients);
+ list_move_tail(&subdev->list, &device->active);
+ client->parent = &device->dev;
+ subdev->client = client;
+ mutex_unlock(&device->clients_lock);
+ mutex_unlock(&device->subdevs_lock);
+
+ /*
+ * When all subdevices have been registered, the composite device is
+ * ready to be probed.
+ */
+ if (list_empty(&device->subdevs)) {
+ err = device->driver->probe(device);
+ if (err < 0)
+ dev_err(&device->dev, "probe failed: %d\n", err);
+ }
+}
+
+static void __host1x_subdev_unregister(struct host1x_device *device,
+ struct host1x_subdev *subdev)
+{
+ struct host1x_client *client = subdev->client;
+ int err;
+
+ /*
+ * If all subdevices have been activated, we're about to remove the
+ * first active subdevice, so unload the driver first.
+ */
+ if (list_empty(&device->subdevs)) {
+ err = device->driver->remove(device);
+ if (err < 0)
+ dev_err(&device->dev, "remove failed: %d\n", err);
+ }
+
+ /*
+ * Move the subdevice back to the list of idle subdevices and remove
+ * it from list of clients.
+ */
+ mutex_lock(&device->clients_lock);
+ subdev->client = NULL;
+ client->parent = NULL;
+ list_move_tail(&subdev->list, &device->subdevs);
+ /*
+ * XXX: Perhaps don't do this here, but rather explicitly remove it
+ * when the device is about to be deleted.
+ *
+ * This is somewhat complicated by the fact that this function is
+ * used to remove the subdevice when a client is unregistered but
+ * also when the composite device is about to be removed.
+ */
+ list_del_init(&client->list);
+ mutex_unlock(&device->clients_lock);
+}
+
+static void host1x_subdev_unregister(struct host1x_device *device,
+ struct host1x_subdev *subdev)
+{
+ mutex_lock(&device->subdevs_lock);
+ __host1x_subdev_unregister(device, subdev);
+ mutex_unlock(&device->subdevs_lock);
+}
+
+int host1x_device_init(struct host1x_device *device)
+{
+ struct host1x_client *client;
+ int err;
+
+ mutex_lock(&device->clients_lock);
+
+ list_for_each_entry(client, &device->clients, list) {
+ if (client->ops && client->ops->init) {
+ err = client->ops->init(client);
+ if (err < 0) {
+ dev_err(&device->dev,
+ "failed to initialize %s: %d\n",
+ dev_name(client->dev), err);
+ mutex_unlock(&device->clients_lock);
+ return err;
+ }
+ }
+ }
+
+ mutex_unlock(&device->clients_lock);
+
+ return 0;
+}
+
+int host1x_device_exit(struct host1x_device *device)
+{
+ struct host1x_client *client;
+ int err;
+
+ mutex_lock(&device->clients_lock);
+
+ list_for_each_entry_reverse(client, &device->clients, list) {
+ if (client->ops && client->ops->exit) {
+ err = client->ops->exit(client);
+ if (err < 0) {
+ dev_err(&device->dev,
+ "failed to cleanup %s: %d\n",
+ dev_name(client->dev), err);
+ mutex_unlock(&device->clients_lock);
+ return err;
+ }
+ }
+ }
+
+ mutex_unlock(&device->clients_lock);
+
+ return 0;
+}
+
+static int host1x_register_client(struct host1x *host1x,
+ struct host1x_client *client)
+{
+ struct host1x_device *device;
+ struct host1x_subdev *subdev;
+
+ mutex_lock(&host1x->devices_lock);
+
+ list_for_each_entry(device, &host1x->devices, list) {
+ list_for_each_entry(subdev, &device->subdevs, list) {
+ if (subdev->np == client->dev->of_node) {
+ host1x_subdev_register(device, subdev, client);
+ mutex_unlock(&host1x->devices_lock);
+ return 0;
+ }
+ }
+ }
+
+ mutex_unlock(&host1x->devices_lock);
+ return -ENODEV;
+}
+
+static int host1x_unregister_client(struct host1x *host1x,
+ struct host1x_client *client)
+{
+ struct host1x_device *device, *dt;
+ struct host1x_subdev *subdev;
+
+ mutex_lock(&host1x->devices_lock);
+
+ list_for_each_entry_safe(device, dt, &host1x->devices, list) {
+ list_for_each_entry(subdev, &device->active, list) {
+ if (subdev->client == client) {
+ host1x_subdev_unregister(device, subdev);
+ mutex_unlock(&host1x->devices_lock);
+ return 0;
+ }
+ }
+ }
+
+ mutex_unlock(&host1x->devices_lock);
+ return -ENODEV;
+}
+
+struct bus_type host1x_bus_type = {
+ .name = "host1x",
+};
+
+int host1x_bus_init(void)
+{
+ return bus_register(&host1x_bus_type);
+}
+
+void host1x_bus_exit(void)
+{
+ bus_unregister(&host1x_bus_type);
+}
+
+static void host1x_device_release(struct device *dev)
+{
+ struct host1x_device *device = to_host1x_device(dev);
+
+ kfree(device);
+}
+
+static int host1x_device_add(struct host1x *host1x,
+ struct host1x_driver *driver)
+{
+ struct host1x_client *client, *tmp;
+ struct host1x_subdev *subdev;
+ struct host1x_device *device;
+ int err;
+
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+
+ mutex_init(&device->subdevs_lock);
+ INIT_LIST_HEAD(&device->subdevs);
+ INIT_LIST_HEAD(&device->active);
+ mutex_init(&device->clients_lock);
+ INIT_LIST_HEAD(&device->clients);
+ INIT_LIST_HEAD(&device->list);
+ device->driver = driver;
+
+ device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
+ device->dev.dma_mask = &device->dev.coherent_dma_mask;
+ device->dev.release = host1x_device_release;
+ dev_set_name(&device->dev, driver->name);
+ device->dev.bus = &host1x_bus_type;
+ device->dev.parent = host1x->dev;
+
+ err = device_register(&device->dev);
+ if (err < 0)
+ return err;
+
+ err = host1x_device_parse_dt(device);
+ if (err < 0) {
+ device_unregister(&device->dev);
+ return err;
+ }
+
+ mutex_lock(&host1x->devices_lock);
+ list_add_tail(&device->list, &host1x->devices);
+ mutex_unlock(&host1x->devices_lock);
+
+ mutex_lock(&clients_lock);
+
+ list_for_each_entry_safe(client, tmp, &clients, list) {
+ list_for_each_entry(subdev, &device->subdevs, list) {
+ if (subdev->np == client->dev->of_node) {
+ host1x_subdev_register(device, subdev, client);
+ break;
+ }
+ }
+ }
+
+ mutex_unlock(&clients_lock);
+
+ return 0;
+}
+
+/*
+ * Removes a device by first unregistering any subdevices and then removing
+ * itself from the list of devices.
+ *
+ * This function must be called with the host1x->devices_lock held.
+ */
+static void host1x_device_del(struct host1x *host1x,
+ struct host1x_device *device)
+{
+ struct host1x_subdev *subdev, *sd;
+ struct host1x_client *client, *cl;
+
+ mutex_lock(&device->subdevs_lock);
+
+ /* unregister subdevices */
+ list_for_each_entry_safe(subdev, sd, &device->active, list) {
+ /*
+ * host1x_subdev_unregister() will remove the client from
+ * any lists, so we'll need to manually add it back to the
+ * list of idle clients.
+ *
+ * XXX: Alternatively, perhaps don't remove the client from
+ * any lists in host1x_subdev_unregister() and instead do
+ * that explicitly from host1x_unregister_client()?
+ */
+ client = subdev->client;
+
+ __host1x_subdev_unregister(device, subdev);
+
+ /* add the client to the list of idle clients */
+ mutex_lock(&clients_lock);
+ list_add_tail(&client->list, &clients);
+ mutex_unlock(&clients_lock);
+ }
+
+ /* remove subdevices */
+ list_for_each_entry_safe(subdev, sd, &device->subdevs, list)
+ host1x_subdev_del(subdev);
+
+ mutex_unlock(&device->subdevs_lock);
+
+ /* move clients to idle list */
+ mutex_lock(&clients_lock);
+ mutex_lock(&device->clients_lock);
+
+ list_for_each_entry_safe(client, cl, &device->clients, list)
+ list_move_tail(&client->list, &clients);
+
+ mutex_unlock(&device->clients_lock);
+ mutex_unlock(&clients_lock);
+
+ /* finally remove the device */
+ list_del_init(&device->list);
+ device_unregister(&device->dev);
+}
+
+static void host1x_attach_driver(struct host1x *host1x,
+ struct host1x_driver *driver)
+{
+ struct host1x_device *device;
+ int err;
+
+ mutex_lock(&host1x->devices_lock);
+
+ list_for_each_entry(device, &host1x->devices, list) {
+ if (device->driver == driver) {
+ mutex_unlock(&host1x->devices_lock);
+ return;
+ }
+ }
+
+ mutex_unlock(&host1x->devices_lock);
+
+ err = host1x_device_add(host1x, driver);
+ if (err < 0)
+ dev_err(host1x->dev, "failed to allocate device: %d\n", err);
+}
+
+static void host1x_detach_driver(struct host1x *host1x,
+ struct host1x_driver *driver)
+{
+ struct host1x_device *device, *tmp;
+
+ mutex_lock(&host1x->devices_lock);
+
+ list_for_each_entry_safe(device, tmp, &host1x->devices, list)
+ if (device->driver == driver)
+ host1x_device_del(host1x, device);
+
+ mutex_unlock(&host1x->devices_lock);
+}
+
+int host1x_register(struct host1x *host1x)
+{
+ struct host1x_driver *driver;
+
+ mutex_lock(&devices_lock);
+ list_add_tail(&host1x->list, &devices);
+ mutex_unlock(&devices_lock);
+
+ mutex_lock(&drivers_lock);
+
+ list_for_each_entry(driver, &drivers, list)
+ host1x_attach_driver(host1x, driver);
+
+ mutex_unlock(&drivers_lock);
+
+ return 0;
+}
+
+int host1x_unregister(struct host1x *host1x)
+{
+ struct host1x_driver *driver;
+
+ mutex_lock(&drivers_lock);
+
+ list_for_each_entry(driver, &drivers, list)
+ host1x_detach_driver(host1x, driver);
+
+ mutex_unlock(&drivers_lock);
+
+ mutex_lock(&devices_lock);
+ list_del_init(&host1x->list);
+ mutex_unlock(&devices_lock);
+
+ return 0;
+}
+
+int host1x_driver_register(struct host1x_driver *driver)
+{
+ struct host1x *host1x;
+
+ INIT_LIST_HEAD(&driver->list);
+
+ mutex_lock(&drivers_lock);
+ list_add_tail(&driver->list, &drivers);
+ mutex_unlock(&drivers_lock);
+
+ mutex_lock(&devices_lock);
+
+ list_for_each_entry(host1x, &devices, list)
+ host1x_attach_driver(host1x, driver);
+
+ mutex_unlock(&devices_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(host1x_driver_register);
+
+void host1x_driver_unregister(struct host1x_driver *driver)
+{
+ mutex_lock(&drivers_lock);
+ list_del_init(&driver->list);
+ mutex_unlock(&drivers_lock);
+}
+EXPORT_SYMBOL(host1x_driver_unregister);
+
+int host1x_client_register(struct host1x_client *client)
+{
+ struct host1x *host1x;
+ int err;
+
+ mutex_lock(&devices_lock);
+
+ list_for_each_entry(host1x, &devices, list) {
+ err = host1x_register_client(host1x, client);
+ if (!err) {
+ mutex_unlock(&devices_lock);
+ return 0;
+ }
+ }
+
+ mutex_unlock(&devices_lock);
+
+ mutex_lock(&clients_lock);
+ list_add_tail(&client->list, &clients);
+ mutex_unlock(&clients_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(host1x_client_register);
+
+int host1x_client_unregister(struct host1x_client *client)
+{
+ struct host1x_client *c;
+ struct host1x *host1x;
+ int err;
+
+ mutex_lock(&devices_lock);
+
+ list_for_each_entry(host1x, &devices, list) {
+ err = host1x_unregister_client(host1x, client);
+ if (!err) {
+ mutex_unlock(&devices_lock);
+ return 0;
+ }
+ }
+
+ mutex_unlock(&devices_lock);
+ mutex_lock(&clients_lock);
+
+ list_for_each_entry(c, &clients, list) {
+ if (c == client) {
+ list_del_init(&c->list);
+ break;
+ }
+ }
+
+ mutex_unlock(&clients_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(host1x_client_unregister);
diff --git a/drivers/gpu/host1x/host1x_client.h b/drivers/gpu/host1x/bus.h
index 9b85f10f4a44..4099e99212c8 100644
--- a/drivers/gpu/host1x/host1x_client.h
+++ b/drivers/gpu/host1x/bus.h
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2013, NVIDIA Corporation.
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012-2013, NVIDIA Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -14,22 +15,15 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef HOST1X_CLIENT_H
-#define HOST1X_CLIENT_H
+#ifndef HOST1X_BUS_H
+#define HOST1X_BUS_H
-struct device;
-struct platform_device;
+struct host1x;
-#ifdef CONFIG_DRM_TEGRA
-int host1x_drm_alloc(struct platform_device *pdev);
-#else
-static inline int host1x_drm_alloc(struct platform_device *pdev)
-{
- return 0;
-}
-#endif
+int host1x_bus_init(void);
+void host1x_bus_exit(void);
-void host1x_set_drm_data(struct device *dev, void *data);
-void *host1x_get_drm_data(struct device *dev);
+int host1x_register(struct host1x *host1x);
+int host1x_unregister(struct host1x *host1x);
#endif
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index de72172d3b5f..3995255b16c7 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -20,6 +20,7 @@
#include <asm/cacheflush.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/host1x.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kfifo.h>
@@ -30,7 +31,6 @@
#include "channel.h"
#include "dev.h"
#include "debug.h"
-#include "host1x_bo.h"
#include "job.h"
/*
diff --git a/drivers/gpu/host1x/channel.h b/drivers/gpu/host1x/channel.h
index 48723b8eea42..df767cf90d51 100644
--- a/drivers/gpu/host1x/channel.h
+++ b/drivers/gpu/host1x/channel.h
@@ -40,12 +40,6 @@ struct host1x_channel {
/* channel list operations */
int host1x_channel_list_init(struct host1x *host);
-struct host1x_channel *host1x_channel_request(struct device *dev);
-void host1x_channel_free(struct host1x_channel *channel);
-struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
-void host1x_channel_put(struct host1x_channel *channel);
-int host1x_job_submit(struct host1x_job *job);
-
#define host1x_for_each_channel(host, channel) \
list_for_each_entry(channel, &host->chlist.list, list)
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 471630299878..80da003d63de 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -27,24 +27,13 @@
#define CREATE_TRACE_POINTS
#include <trace/events/host1x.h>
+#include "bus.h"
#include "dev.h"
#include "intr.h"
#include "channel.h"
#include "debug.h"
#include "hw/host1x01.h"
-#include "host1x_client.h"
-
-void host1x_set_drm_data(struct device *dev, void *data)
-{
- struct host1x *host1x = dev_get_drvdata(dev);
- host1x->drm_data = data;
-}
-
-void *host1x_get_drm_data(struct device *dev)
-{
- struct host1x *host1x = dev_get_drvdata(dev);
- return host1x ? host1x->drm_data : NULL;
-}
+#include "hw/host1x02.h"
void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
{
@@ -79,7 +68,17 @@ static const struct host1x_info host1x01_info = {
.sync_offset = 0x3000,
};
+static const struct host1x_info host1x02_info = {
+ .nb_channels = 9,
+ .nb_pts = 32,
+ .nb_mlocks = 16,
+ .nb_bases = 12,
+ .init = host1x02_init,
+ .sync_offset = 0x3000,
+};
+
static struct of_device_id host1x_of_match[] = {
+ { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
{ .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
{ .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
{ },
@@ -114,6 +113,9 @@ static int host1x_probe(struct platform_device *pdev)
if (!host)
return -ENOMEM;
+ mutex_init(&host->devices_lock);
+ INIT_LIST_HEAD(&host->devices);
+ INIT_LIST_HEAD(&host->list);
host->dev = &pdev->dev;
host->info = id->data;
@@ -152,7 +154,7 @@ static int host1x_probe(struct platform_device *pdev)
err = host1x_syncpt_init(host);
if (err) {
dev_err(&pdev->dev, "failed to initialize syncpts\n");
- return err;
+ goto fail_unprepare_disable;
}
err = host1x_intr_init(host, syncpt_irq);
@@ -163,19 +165,26 @@ static int host1x_probe(struct platform_device *pdev)
host1x_debug_init(host);
- host1x_drm_alloc(pdev);
+ err = host1x_register(host);
+ if (err < 0)
+ goto fail_deinit_intr;
return 0;
+fail_deinit_intr:
+ host1x_intr_deinit(host);
fail_deinit_syncpt:
host1x_syncpt_deinit(host);
+fail_unprepare_disable:
+ clk_disable_unprepare(host->clk);
return err;
}
-static int __exit host1x_remove(struct platform_device *pdev)
+static int host1x_remove(struct platform_device *pdev)
{
struct host1x *host = platform_get_drvdata(pdev);
+ host1x_unregister(host);
host1x_intr_deinit(host);
host1x_syncpt_deinit(host);
clk_disable_unprepare(host->clk);
@@ -184,59 +193,36 @@ static int __exit host1x_remove(struct platform_device *pdev)
}
static struct platform_driver tegra_host1x_driver = {
- .probe = host1x_probe,
- .remove = __exit_p(host1x_remove),
.driver = {
- .owner = THIS_MODULE,
.name = "tegra-host1x",
.of_match_table = host1x_of_match,
},
+ .probe = host1x_probe,
+ .remove = host1x_remove,
};
static int __init tegra_host1x_init(void)
{
int err;
- err = platform_driver_register(&tegra_host1x_driver);
+ err = host1x_bus_init();
if (err < 0)
return err;
-#ifdef CONFIG_DRM_TEGRA
- err = platform_driver_register(&tegra_dc_driver);
- if (err < 0)
- goto unregister_host1x;
-
- err = platform_driver_register(&tegra_hdmi_driver);
- if (err < 0)
- goto unregister_dc;
-
- err = platform_driver_register(&tegra_gr2d_driver);
- if (err < 0)
- goto unregister_hdmi;
-#endif
+ err = platform_driver_register(&tegra_host1x_driver);
+ if (err < 0) {
+ host1x_bus_exit();
+ return err;
+ }
return 0;
-
-#ifdef CONFIG_DRM_TEGRA
-unregister_hdmi:
- platform_driver_unregister(&tegra_hdmi_driver);
-unregister_dc:
- platform_driver_unregister(&tegra_dc_driver);
-unregister_host1x:
- platform_driver_unregister(&tegra_host1x_driver);
- return err;
-#endif
}
module_init(tegra_host1x_init);
static void __exit tegra_host1x_exit(void)
{
-#ifdef CONFIG_DRM_TEGRA
- platform_driver_unregister(&tegra_gr2d_driver);
- platform_driver_unregister(&tegra_hdmi_driver);
- platform_driver_unregister(&tegra_dc_driver);
-#endif
platform_driver_unregister(&tegra_host1x_driver);
+ host1x_bus_exit();
}
module_exit(tegra_host1x_exit);
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index bed90a8131be..a61a976e7a42 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -27,6 +27,7 @@
#include "job.h"
struct host1x_syncpt;
+struct host1x_syncpt_base;
struct host1x_channel;
struct host1x_cdma;
struct host1x_job;
@@ -102,6 +103,7 @@ struct host1x {
void __iomem *regs;
struct host1x_syncpt *syncpt;
+ struct host1x_syncpt_base *bases;
struct device *dev;
struct clk *clk;
@@ -125,7 +127,10 @@ struct host1x {
struct dentry *debugfs;
- void *drm_data;
+ struct mutex devices_lock;
+ struct list_head devices;
+
+ struct list_head list;
};
void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v);
@@ -301,8 +306,4 @@ static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o)
host->debug_op->show_mlocks(host, o);
}
-extern struct platform_driver tegra_dc_driver;
-extern struct platform_driver tegra_hdmi_driver;
-extern struct platform_driver tegra_gr2d_driver;
-
#endif
diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c
deleted file mode 100644
index 8c61ceeaa12d..000000000000
--- a/drivers/gpu/host1x/drm/drm.c
+++ /dev/null
@@ -1,647 +0,0 @@
-/*
- * Copyright (C) 2012 Avionic Design GmbH
- * Copyright (C) 2012-2013 NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-
-#include <linux/dma-mapping.h>
-#include <asm/dma-iommu.h>
-
-#include <drm/drm.h>
-#include <drm/drmP.h>
-
-#include "host1x_client.h"
-#include "dev.h"
-#include "drm.h"
-#include "gem.h"
-#include "syncpt.h"
-
-#define DRIVER_NAME "tegra"
-#define DRIVER_DESC "NVIDIA Tegra graphics"
-#define DRIVER_DATE "20120330"
-#define DRIVER_MAJOR 0
-#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 0
-
-struct host1x_drm_client {
- struct host1x_client *client;
- struct device_node *np;
- struct list_head list;
-};
-
-static int host1x_add_drm_client(struct host1x_drm *host1x,
- struct device_node *np)
-{
- struct host1x_drm_client *client;
-
- client = kzalloc(sizeof(*client), GFP_KERNEL);
- if (!client)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&client->list);
- client->np = of_node_get(np);
-
- list_add_tail(&client->list, &host1x->drm_clients);
-
- return 0;
-}
-
-static int host1x_activate_drm_client(struct host1x_drm *host1x,
- struct host1x_drm_client *drm,
- struct host1x_client *client)
-{
- mutex_lock(&host1x->drm_clients_lock);
- list_del_init(&drm->list);
- list_add_tail(&drm->list, &host1x->drm_active);
- drm->client = client;
- mutex_unlock(&host1x->drm_clients_lock);
-
- return 0;
-}
-
-static int host1x_remove_drm_client(struct host1x_drm *host1x,
- struct host1x_drm_client *client)
-{
- mutex_lock(&host1x->drm_clients_lock);
- list_del_init(&client->list);
- mutex_unlock(&host1x->drm_clients_lock);
-
- of_node_put(client->np);
- kfree(client);
-
- return 0;
-}
-
-static int host1x_parse_dt(struct host1x_drm *host1x)
-{
- static const char * const compat[] = {
- "nvidia,tegra20-dc",
- "nvidia,tegra20-hdmi",
- "nvidia,tegra20-gr2d",
- "nvidia,tegra30-dc",
- "nvidia,tegra30-hdmi",
- "nvidia,tegra30-gr2d",
- };
- unsigned int i;
- int err;
-
- for (i = 0; i < ARRAY_SIZE(compat); i++) {
- struct device_node *np;
-
- for_each_child_of_node(host1x->dev->of_node, np) {
- if (of_device_is_compatible(np, compat[i]) &&
- of_device_is_available(np)) {
- err = host1x_add_drm_client(host1x, np);
- if (err < 0)
- return err;
- }
- }
- }
-
- return 0;
-}
-
-int host1x_drm_alloc(struct platform_device *pdev)
-{
- struct host1x_drm *host1x;
- int err;
-
- host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
- if (!host1x)
- return -ENOMEM;
-
- mutex_init(&host1x->drm_clients_lock);
- INIT_LIST_HEAD(&host1x->drm_clients);
- INIT_LIST_HEAD(&host1x->drm_active);
- mutex_init(&host1x->clients_lock);
- INIT_LIST_HEAD(&host1x->clients);
- host1x->dev = &pdev->dev;
-
- err = host1x_parse_dt(host1x);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
- return err;
- }
-
- host1x_set_drm_data(&pdev->dev, host1x);
-
- return 0;
-}
-
-int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm)
-{
- struct host1x_client *client;
-
- mutex_lock(&host1x->clients_lock);
-
- list_for_each_entry(client, &host1x->clients, list) {
- if (client->ops && client->ops->drm_init) {
- int err = client->ops->drm_init(client, drm);
- if (err < 0) {
- dev_err(host1x->dev,
- "DRM setup failed for %s: %d\n",
- dev_name(client->dev), err);
- mutex_unlock(&host1x->clients_lock);
- return err;
- }
- }
- }
-
- mutex_unlock(&host1x->clients_lock);
-
- return 0;
-}
-
-int host1x_drm_exit(struct host1x_drm *host1x)
-{
- struct platform_device *pdev = to_platform_device(host1x->dev);
- struct host1x_client *client;
-
- if (!host1x->drm)
- return 0;
-
- mutex_lock(&host1x->clients_lock);
-
- list_for_each_entry_reverse(client, &host1x->clients, list) {
- if (client->ops && client->ops->drm_exit) {
- int err = client->ops->drm_exit(client);
- if (err < 0) {
- dev_err(host1x->dev,
- "DRM cleanup failed for %s: %d\n",
- dev_name(client->dev), err);
- mutex_unlock(&host1x->clients_lock);
- return err;
- }
- }
- }
-
- mutex_unlock(&host1x->clients_lock);
-
- drm_platform_exit(&tegra_drm_driver, pdev);
- host1x->drm = NULL;
-
- return 0;
-}
-
-int host1x_register_client(struct host1x_drm *host1x,
- struct host1x_client *client)
-{
- struct host1x_drm_client *drm, *tmp;
- int err;
-
- mutex_lock(&host1x->clients_lock);
- list_add_tail(&client->list, &host1x->clients);
- mutex_unlock(&host1x->clients_lock);
-
- list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
- if (drm->np == client->dev->of_node)
- host1x_activate_drm_client(host1x, drm, client);
-
- if (list_empty(&host1x->drm_clients)) {
- struct platform_device *pdev = to_platform_device(host1x->dev);
-
- err = drm_platform_init(&tegra_drm_driver, pdev);
- if (err < 0) {
- dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
- return err;
- }
- }
-
- return 0;
-}
-
-int host1x_unregister_client(struct host1x_drm *host1x,
- struct host1x_client *client)
-{
- struct host1x_drm_client *drm, *tmp;
- int err;
-
- list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
- if (drm->client == client) {
- err = host1x_drm_exit(host1x);
- if (err < 0) {
- dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
- err);
- return err;
- }
-
- host1x_remove_drm_client(host1x, drm);
- break;
- }
- }
-
- mutex_lock(&host1x->clients_lock);
- list_del_init(&client->list);
- mutex_unlock(&host1x->clients_lock);
-
- return 0;
-}
-
-static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
-{
- struct host1x_drm *host1x;
- int err;
-
- host1x = host1x_get_drm_data(drm->dev);
- drm->dev_private = host1x;
- host1x->drm = drm;
-
- drm_mode_config_init(drm);
-
- err = host1x_drm_init(host1x, drm);
- if (err < 0)
- return err;
-
- /*
- * We don't use the drm_irq_install() helpers provided by the DRM
- * core, so we need to set this manually in order to allow the
- * DRM_IOCTL_WAIT_VBLANK to operate correctly.
- */
- drm->irq_enabled = 1;
-
- err = drm_vblank_init(drm, drm->mode_config.num_crtc);
- if (err < 0)
- return err;
-
- err = tegra_drm_fb_init(drm);
- if (err < 0)
- return err;
-
- drm_kms_helper_poll_init(drm);
-
- return 0;
-}
-
-static int tegra_drm_unload(struct drm_device *drm)
-{
- drm_kms_helper_poll_fini(drm);
- tegra_drm_fb_exit(drm);
-
- drm_mode_config_cleanup(drm);
-
- return 0;
-}
-
-static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
-{
- struct host1x_drm_file *fpriv;
-
- fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
- if (!fpriv)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&fpriv->contexts);
- filp->driver_priv = fpriv;
-
- return 0;
-}
-
-static void host1x_drm_context_free(struct host1x_drm_context *context)
-{
- context->client->ops->close_channel(context);
- kfree(context);
-}
-
-static void tegra_drm_lastclose(struct drm_device *drm)
-{
- struct host1x_drm *host1x = drm->dev_private;
-
- tegra_fbdev_restore_mode(host1x->fbdev);
-}
-
-#ifdef CONFIG_DRM_TEGRA_STAGING
-static bool host1x_drm_file_owns_context(struct host1x_drm_file *file,
- struct host1x_drm_context *context)
-{
- struct host1x_drm_context *ctx;
-
- list_for_each_entry(ctx, &file->contexts, list)
- if (ctx == context)
- return true;
-
- return false;
-}
-
-static int tegra_gem_create(struct drm_device *drm, void *data,
- struct drm_file *file)
-{
- struct drm_tegra_gem_create *args = data;
- struct tegra_bo *bo;
-
- bo = tegra_bo_create_with_handle(file, drm, args->size,
- &args->handle);
- if (IS_ERR(bo))
- return PTR_ERR(bo);
-
- return 0;
-}
-
-static int tegra_gem_mmap(struct drm_device *drm, void *data,
- struct drm_file *file)
-{
- struct drm_tegra_gem_mmap *args = data;
- struct drm_gem_object *gem;
- struct tegra_bo *bo;
-
- gem = drm_gem_object_lookup(drm, file, args->handle);
- if (!gem)
- return -EINVAL;
-
- bo = to_tegra_bo(gem);
-
- args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
-
- drm_gem_object_unreference(gem);
-
- return 0;
-}
-
-static int tegra_syncpt_read(struct drm_device *drm, void *data,
- struct drm_file *file)
-{
- struct drm_tegra_syncpt_read *args = data;
- struct host1x *host = dev_get_drvdata(drm->dev);
- struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
-
- if (!sp)
- return -EINVAL;
-
- args->value = host1x_syncpt_read_min(sp);
- return 0;
-}
-
-static int tegra_syncpt_incr(struct drm_device *drm, void *data,
- struct drm_file *file)
-{
- struct drm_tegra_syncpt_incr *args = data;
- struct host1x *host = dev_get_drvdata(drm->dev);
- struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
-
- if (!sp)
- return -EINVAL;
-
- return host1x_syncpt_incr(sp);
-}
-
-static int tegra_syncpt_wait(struct drm_device *drm, void *data,
- struct drm_file *file)
-{
- struct drm_tegra_syncpt_wait *args = data;
- struct host1x *host = dev_get_drvdata(drm->dev);
- struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
-
- if (!sp)
- return -EINVAL;
-
- return host1x_syncpt_wait(sp, args->thresh, args->timeout,
- &args->value);
-}
-
-static int tegra_open_channel(struct drm_device *drm, void *data,
- struct drm_file *file)
-{
- struct drm_tegra_open_channel *args = data;
- struct host1x_client *client;
- struct host1x_drm_context *context;
- struct host1x_drm_file *fpriv = file->driver_priv;
- struct host1x_drm *host1x = drm->dev_private;
- int err = -ENODEV;
-
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return -ENOMEM;
-
- list_for_each_entry(client, &host1x->clients, list)
- if (client->class == args->client) {
- err = client->ops->open_channel(client, context);
- if (err)
- break;
-
- context->client = client;
- list_add(&context->list, &fpriv->contexts);
- args->context = (uintptr_t)context;
- return 0;
- }
-
- kfree(context);
- return err;
-}
-
-static int tegra_close_channel(struct drm_device *drm, void *data,
- struct drm_file *file)
-{
- struct drm_tegra_close_channel *args = data;
- struct host1x_drm_file *fpriv = file->driver_priv;
- struct host1x_drm_context *context =
- (struct host1x_drm_context *)(uintptr_t)args->context;
-
- if (!host1x_drm_file_owns_context(fpriv, context))
- return -EINVAL;
-
- list_del(&context->list);
- host1x_drm_context_free(context);
-
- return 0;
-}
-
-static int tegra_get_syncpt(struct drm_device *drm, void *data,
- struct drm_file *file)
-{
- struct drm_tegra_get_syncpt *args = data;
- struct host1x_drm_file *fpriv = file->driver_priv;
- struct host1x_drm_context *context =
- (struct host1x_drm_context *)(uintptr_t)args->context;
- struct host1x_syncpt *syncpt;
-
- if (!host1x_drm_file_owns_context(fpriv, context))
- return -ENODEV;
-
- if (args->index >= context->client->num_syncpts)
- return -EINVAL;
-
- syncpt = context->client->syncpts[args->index];
- args->id = host1x_syncpt_id(syncpt);
-
- return 0;
-}
-
-static int tegra_submit(struct drm_device *drm, void *data,
- struct drm_file *file)
-{
- struct drm_tegra_submit *args = data;
- struct host1x_drm_file *fpriv = file->driver_priv;
- struct host1x_drm_context *context =
- (struct host1x_drm_context *)(uintptr_t)args->context;
-
- if (!host1x_drm_file_owns_context(fpriv, context))
- return -ENODEV;
-
- return context->client->ops->submit(context, args, drm, file);
-}
-#endif
-
-static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
-#ifdef CONFIG_DRM_TEGRA_STAGING
- DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
- DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
-#endif
-};
-
-static const struct file_operations tegra_drm_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = tegra_drm_mmap,
- .poll = drm_poll,
- .read = drm_read,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = drm_compat_ioctl,
-#endif
- .llseek = noop_llseek,
-};
-
-static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
-{
- struct drm_crtc *crtc;
-
- list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
- struct tegra_dc *dc = to_tegra_dc(crtc);
-
- if (dc->pipe == pipe)
- return crtc;
- }
-
- return NULL;
-}
-
-static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
-{
- /* TODO: implement real hardware counter using syncpoints */
- return drm_vblank_count(dev, crtc);
-}
-
-static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
-{
- struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
- struct tegra_dc *dc = to_tegra_dc(crtc);
-
- if (!crtc)
- return -ENODEV;
-
- tegra_dc_enable_vblank(dc);
-
- return 0;
-}
-
-static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
-{
- struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
- struct tegra_dc *dc = to_tegra_dc(crtc);
-
- if (crtc)
- tegra_dc_disable_vblank(dc);
-}
-
-static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
-{
- struct host1x_drm_file *fpriv = file->driver_priv;
- struct host1x_drm_context *context, *tmp;
- struct drm_crtc *crtc;
-
- list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
- tegra_dc_cancel_page_flip(crtc, file);
-
- list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
- host1x_drm_context_free(context);
-
- kfree(fpriv);
-}
-
-#ifdef CONFIG_DEBUG_FS
-static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *)s->private;
- struct drm_device *drm = node->minor->dev;
- struct drm_framebuffer *fb;
-
- mutex_lock(&drm->mode_config.fb_lock);
-
- list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
- seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
- fb->base.id, fb->width, fb->height, fb->depth,
- fb->bits_per_pixel,
- atomic_read(&fb->refcount.refcount));
- }
-
- mutex_unlock(&drm->mode_config.fb_lock);
-
- return 0;
-}
-
-static struct drm_info_list tegra_debugfs_list[] = {
- { "framebuffers", tegra_debugfs_framebuffers, 0 },
-};
-
-static int tegra_debugfs_init(struct drm_minor *minor)
-{
- return drm_debugfs_create_files(tegra_debugfs_list,
- ARRAY_SIZE(tegra_debugfs_list),
- minor->debugfs_root, minor);
-}
-
-static void tegra_debugfs_cleanup(struct drm_minor *minor)
-{
- drm_debugfs_remove_files(tegra_debugfs_list,
- ARRAY_SIZE(tegra_debugfs_list), minor);
-}
-#endif
-
-struct drm_driver tegra_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM,
- .load = tegra_drm_load,
- .unload = tegra_drm_unload,
- .open = tegra_drm_open,
- .preclose = tegra_drm_preclose,
- .lastclose = tegra_drm_lastclose,
-
- .get_vblank_counter = tegra_drm_get_vblank_counter,
- .enable_vblank = tegra_drm_enable_vblank,
- .disable_vblank = tegra_drm_disable_vblank,
-
-#if defined(CONFIG_DEBUG_FS)
- .debugfs_init = tegra_debugfs_init,
- .debugfs_cleanup = tegra_debugfs_cleanup,
-#endif
-
- .gem_free_object = tegra_bo_free_object,
- .gem_vm_ops = &tegra_bo_vm_ops,
- .dumb_create = tegra_bo_dumb_create,
- .dumb_map_offset = tegra_bo_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
-
- .ioctls = tegra_drm_ioctls,
- .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
- .fops = &tegra_drm_fops,
-
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-};
diff --git a/drivers/gpu/host1x/drm/gr2d.c b/drivers/gpu/host1x/drm/gr2d.c
deleted file mode 100644
index 27ffcf15a4b4..000000000000
--- a/drivers/gpu/host1x/drm/gr2d.c
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- * drivers/video/tegra/host/gr2d/gr2d.c
- *
- * Tegra Graphics 2D
- *
- * Copyright (c) 2012-2013, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/export.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/clk.h>
-
-#include "channel.h"
-#include "drm.h"
-#include "gem.h"
-#include "job.h"
-#include "host1x.h"
-#include "host1x_bo.h"
-#include "host1x_client.h"
-#include "syncpt.h"
-
-struct gr2d {
- struct host1x_client client;
- struct clk *clk;
- struct host1x_channel *channel;
- unsigned long *addr_regs;
-};
-
-static inline struct gr2d *to_gr2d(struct host1x_client *client)
-{
- return container_of(client, struct gr2d, client);
-}
-
-static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg);
-
-static int gr2d_client_init(struct host1x_client *client,
- struct drm_device *drm)
-{
- return 0;
-}
-
-static int gr2d_client_exit(struct host1x_client *client)
-{
- return 0;
-}
-
-static int gr2d_open_channel(struct host1x_client *client,
- struct host1x_drm_context *context)
-{
- struct gr2d *gr2d = to_gr2d(client);
-
- context->channel = host1x_channel_get(gr2d->channel);
-
- if (!context->channel)
- return -ENOMEM;
-
- return 0;
-}
-
-static void gr2d_close_channel(struct host1x_drm_context *context)
-{
- host1x_channel_put(context->channel);
-}
-
-static struct host1x_bo *host1x_bo_lookup(struct drm_device *drm,
- struct drm_file *file,
- u32 handle)
-{
- struct drm_gem_object *gem;
- struct tegra_bo *bo;
-
- gem = drm_gem_object_lookup(drm, file, handle);
- if (!gem)
- return NULL;
-
- mutex_lock(&drm->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&drm->struct_mutex);
-
- bo = to_tegra_bo(gem);
- return &bo->base;
-}
-
-static int gr2d_submit(struct host1x_drm_context *context,
- struct drm_tegra_submit *args, struct drm_device *drm,
- struct drm_file *file)
-{
- struct host1x_job *job;
- unsigned int num_cmdbufs = args->num_cmdbufs;
- unsigned int num_relocs = args->num_relocs;
- unsigned int num_waitchks = args->num_waitchks;
- struct drm_tegra_cmdbuf __user *cmdbufs =
- (void * __user)(uintptr_t)args->cmdbufs;
- struct drm_tegra_reloc __user *relocs =
- (void * __user)(uintptr_t)args->relocs;
- struct drm_tegra_waitchk __user *waitchks =
- (void * __user)(uintptr_t)args->waitchks;
- struct drm_tegra_syncpt syncpt;
- int err;
-
- /* We don't yet support other than one syncpt_incr struct per submit */
- if (args->num_syncpts != 1)
- return -EINVAL;
-
- job = host1x_job_alloc(context->channel, args->num_cmdbufs,
- args->num_relocs, args->num_waitchks);
- if (!job)
- return -ENOMEM;
-
- job->num_relocs = args->num_relocs;
- job->num_waitchk = args->num_waitchks;
- job->client = (u32)args->context;
- job->class = context->client->class;
- job->serialize = true;
-
- while (num_cmdbufs) {
- struct drm_tegra_cmdbuf cmdbuf;
- struct host1x_bo *bo;
-
- err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
- if (err)
- goto fail;
-
- bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
- if (!bo) {
- err = -ENOENT;
- goto fail;
- }
-
- host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
- num_cmdbufs--;
- cmdbufs++;
- }
-
- err = copy_from_user(job->relocarray, relocs,
- sizeof(*relocs) * num_relocs);
- if (err)
- goto fail;
-
- while (num_relocs--) {
- struct host1x_reloc *reloc = &job->relocarray[num_relocs];
- struct host1x_bo *cmdbuf, *target;
-
- cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
- target = host1x_bo_lookup(drm, file, (u32)reloc->target);
-
- reloc->cmdbuf = cmdbuf;
- reloc->target = target;
-
- if (!reloc->target || !reloc->cmdbuf) {
- err = -ENOENT;
- goto fail;
- }
- }
-
- err = copy_from_user(job->waitchk, waitchks,
- sizeof(*waitchks) * num_waitchks);
- if (err)
- goto fail;
-
- err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
- sizeof(syncpt));
- if (err)
- goto fail;
-
- job->syncpt_id = syncpt.id;
- job->syncpt_incrs = syncpt.incrs;
- job->timeout = 10000;
- job->is_addr_reg = gr2d_is_addr_reg;
-
- if (args->timeout && args->timeout < 10000)
- job->timeout = args->timeout;
-
- err = host1x_job_pin(job, context->client->dev);
- if (err)
- goto fail;
-
- err = host1x_job_submit(job);
- if (err)
- goto fail_submit;
-
- args->fence = job->syncpt_end;
-
- host1x_job_put(job);
- return 0;
-
-fail_submit:
- host1x_job_unpin(job);
-fail:
- host1x_job_put(job);
- return err;
-}
-
-static struct host1x_client_ops gr2d_client_ops = {
- .drm_init = gr2d_client_init,
- .drm_exit = gr2d_client_exit,
- .open_channel = gr2d_open_channel,
- .close_channel = gr2d_close_channel,
- .submit = gr2d_submit,
-};
-
-static void gr2d_init_addr_reg_map(struct device *dev, struct gr2d *gr2d)
-{
- const u32 gr2d_addr_regs[] = {0x1a, 0x1b, 0x26, 0x2b, 0x2c, 0x2d, 0x31,
- 0x32, 0x48, 0x49, 0x4a, 0x4b, 0x4c};
- unsigned long *bitmap;
- int i;
-
- bitmap = devm_kzalloc(dev, DIV_ROUND_UP(256, BITS_PER_BYTE),
- GFP_KERNEL);
-
- for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); ++i) {
- u32 reg = gr2d_addr_regs[i];
- bitmap[BIT_WORD(reg)] |= BIT_MASK(reg);
- }
-
- gr2d->addr_regs = bitmap;
-}
-
-static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg)
-{
- struct gr2d *gr2d = dev_get_drvdata(dev);
-
- switch (class) {
- case HOST1X_CLASS_HOST1X:
- return reg == 0x2b;
- case HOST1X_CLASS_GR2D:
- case HOST1X_CLASS_GR2D_SB:
- reg &= 0xff;
- if (gr2d->addr_regs[BIT_WORD(reg)] & BIT_MASK(reg))
- return 1;
- default:
- return 0;
- }
-}
-
-static const struct of_device_id gr2d_match[] = {
- { .compatible = "nvidia,tegra30-gr2d" },
- { .compatible = "nvidia,tegra20-gr2d" },
- { },
-};
-
-static int gr2d_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct host1x_drm *host1x = host1x_get_drm_data(dev->parent);
- int err;
- struct gr2d *gr2d = NULL;
- struct host1x_syncpt **syncpts;
-
- gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
- if (!gr2d)
- return -ENOMEM;
-
- syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
- if (!syncpts)
- return -ENOMEM;
-
- gr2d->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(gr2d->clk)) {
- dev_err(dev, "cannot get clock\n");
- return PTR_ERR(gr2d->clk);
- }
-
- err = clk_prepare_enable(gr2d->clk);
- if (err) {
- dev_err(dev, "cannot turn on clock\n");
- return err;
- }
-
- gr2d->channel = host1x_channel_request(dev);
- if (!gr2d->channel)
- return -ENOMEM;
-
- *syncpts = host1x_syncpt_request(dev, false);
- if (!(*syncpts)) {
- host1x_channel_free(gr2d->channel);
- return -ENOMEM;
- }
-
- gr2d->client.ops = &gr2d_client_ops;
- gr2d->client.dev = dev;
- gr2d->client.class = HOST1X_CLASS_GR2D;
- gr2d->client.syncpts = syncpts;
- gr2d->client.num_syncpts = 1;
-
- err = host1x_register_client(host1x, &gr2d->client);
- if (err < 0) {
- dev_err(dev, "failed to register host1x client: %d\n", err);
- return err;
- }
-
- gr2d_init_addr_reg_map(dev, gr2d);
-
- platform_set_drvdata(pdev, gr2d);
-
- return 0;
-}
-
-static int __exit gr2d_remove(struct platform_device *pdev)
-{
- struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
- struct gr2d *gr2d = platform_get_drvdata(pdev);
- unsigned int i;
- int err;
-
- err = host1x_unregister_client(host1x, &gr2d->client);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to unregister client: %d\n", err);
- return err;
- }
-
- for (i = 0; i < gr2d->client.num_syncpts; i++)
- host1x_syncpt_free(gr2d->client.syncpts[i]);
-
- host1x_channel_free(gr2d->channel);
- clk_disable_unprepare(gr2d->clk);
-
- return 0;
-}
-
-struct platform_driver tegra_gr2d_driver = {
- .probe = gr2d_probe,
- .remove = __exit_p(gr2d_remove),
- .driver = {
- .owner = THIS_MODULE,
- .name = "gr2d",
- .of_match_table = gr2d_match,
- }
-};
diff --git a/drivers/gpu/host1x/host1x.h b/drivers/gpu/host1x/host1x.h
deleted file mode 100644
index a2bc1e65e972..000000000000
--- a/drivers/gpu/host1x/host1x.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Tegra host1x driver
- *
- * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __LINUX_HOST1X_H
-#define __LINUX_HOST1X_H
-
-enum host1x_class {
- HOST1X_CLASS_HOST1X = 0x1,
- HOST1X_CLASS_GR2D = 0x51,
- HOST1X_CLASS_GR2D_SB = 0x52
-};
-
-#endif
diff --git a/drivers/gpu/host1x/host1x_bo.h b/drivers/gpu/host1x/host1x_bo.h
deleted file mode 100644
index 4c1f10bd773d..000000000000
--- a/drivers/gpu/host1x/host1x_bo.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Tegra host1x Memory Management Abstraction header
- *
- * Copyright (c) 2012-2013, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _HOST1X_BO_H
-#define _HOST1X_BO_H
-
-struct host1x_bo;
-
-struct host1x_bo_ops {
- struct host1x_bo *(*get)(struct host1x_bo *bo);
- void (*put)(struct host1x_bo *bo);
- dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
- void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
- void *(*mmap)(struct host1x_bo *bo);
- void (*munmap)(struct host1x_bo *bo, void *addr);
- void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
- void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
-};
-
-struct host1x_bo {
- const struct host1x_bo_ops *ops;
-};
-
-static inline void host1x_bo_init(struct host1x_bo *bo,
- const struct host1x_bo_ops *ops)
-{
- bo->ops = ops;
-}
-
-static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
-{
- return bo->ops->get(bo);
-}
-
-static inline void host1x_bo_put(struct host1x_bo *bo)
-{
- bo->ops->put(bo);
-}
-
-static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
- struct sg_table **sgt)
-{
- return bo->ops->pin(bo, sgt);
-}
-
-static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
-{
- bo->ops->unpin(bo, sgt);
-}
-
-static inline void *host1x_bo_mmap(struct host1x_bo *bo)
-{
- return bo->ops->mmap(bo);
-}
-
-static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
-{
- bo->ops->munmap(bo, addr);
-}
-
-static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
-{
- return bo->ops->kmap(bo, pagenum);
-}
-
-static inline void host1x_bo_kunmap(struct host1x_bo *bo,
- unsigned int pagenum, void *addr)
-{
- bo->ops->kunmap(bo, pagenum, addr);
-}
-
-#endif
diff --git a/drivers/gpu/host1x/hw/Makefile b/drivers/gpu/host1x/hw/Makefile
deleted file mode 100644
index 9b50863a2236..000000000000
--- a/drivers/gpu/host1x/hw/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-ccflags-y = -Idrivers/gpu/host1x
-
-host1x-hw-objs = \
- host1x01.o
-
-obj-$(CONFIG_TEGRA_HOST1X) += host1x-hw.o
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 2ee4ad55c4db..37e2a63241a9 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -20,10 +20,10 @@
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
-#include "cdma.h"
-#include "channel.h"
-#include "dev.h"
-#include "debug.h"
+#include "../cdma.h"
+#include "../channel.h"
+#include "../dev.h"
+#include "../debug.h"
/*
* Put the restart at the end of pushbuffer memor
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index ee199623e365..4608257ab656 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -16,15 +16,15 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/host1x.h>
#include <linux/slab.h>
+
#include <trace/events/host1x.h>
-#include "host1x.h"
-#include "host1x_bo.h"
-#include "channel.h"
-#include "dev.h"
-#include "intr.h"
-#include "job.h"
+#include "../channel.h"
+#include "../dev.h"
+#include "../intr.h"
+#include "../job.h"
#define HOST1X_CHANNEL_SIZE 16384
#define TRACE_MAX_LENGTH 128U
@@ -67,6 +67,22 @@ static void submit_gathers(struct host1x_job *job)
}
}
+static inline void synchronize_syncpt_base(struct host1x_job *job)
+{
+ struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
+ struct host1x_syncpt *sp = host->syncpt + job->syncpt_id;
+ u32 id, value;
+
+ value = host1x_syncpt_read_max(sp);
+ id = sp->base->id;
+
+ host1x_cdma_push(&job->channel->cdma,
+ host1x_opcode_setclass(HOST1X_CLASS_HOST1X,
+ HOST1X_UCLASS_LOAD_SYNCPT_BASE, 1),
+ HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(id) |
+ HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(value));
+}
+
static int channel_submit(struct host1x_job *job)
{
struct host1x_channel *ch = job->channel;
@@ -118,6 +134,10 @@ static int channel_submit(struct host1x_job *job)
host1x_syncpt_read_max(sp)));
}
+ /* Synchronize base register to allow using it for relative waiting */
+ if (sp->base)
+ synchronize_syncpt_base(job);
+
syncval = host1x_syncpt_incr_max(sp, user_syncpt_incrs);
job->syncpt_end = syncval;
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index 334c038052f5..640c75ca5a8b 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -15,18 +15,10 @@
*
*/
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-
-#include <linux/io.h>
-
-#include "dev.h"
-#include "debug.h"
-#include "cdma.h"
-#include "channel.h"
-#include "host1x_bo.h"
+#include "../dev.h"
+#include "../debug.h"
+#include "../cdma.h"
+#include "../channel.h"
#define HOST1X_DEBUG_MAX_PAGE_OFFSET 102400
diff --git a/drivers/gpu/host1x/hw/host1x01.c b/drivers/gpu/host1x/hw/host1x01.c
index a14e91cd1e58..859b73beb4d0 100644
--- a/drivers/gpu/host1x/hw/host1x01.c
+++ b/drivers/gpu/host1x/hw/host1x01.c
@@ -17,17 +17,17 @@
*/
/* include hw specification */
-#include "hw/host1x01.h"
-#include "hw/host1x01_hardware.h"
+#include "host1x01.h"
+#include "host1x01_hardware.h"
/* include code */
-#include "hw/cdma_hw.c"
-#include "hw/channel_hw.c"
-#include "hw/debug_hw.c"
-#include "hw/intr_hw.c"
-#include "hw/syncpt_hw.c"
+#include "cdma_hw.c"
+#include "channel_hw.c"
+#include "debug_hw.c"
+#include "intr_hw.c"
+#include "syncpt_hw.c"
-#include "dev.h"
+#include "../dev.h"
int host1x01_init(struct host1x *host)
{
diff --git a/drivers/gpu/host1x/hw/host1x02.c b/drivers/gpu/host1x/hw/host1x02.c
new file mode 100644
index 000000000000..e98caca0ca42
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x02.c
@@ -0,0 +1,42 @@
+/*
+ * Host1x init for Tegra114 SoCs
+ *
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* include hw specification */
+#include "host1x01.h"
+#include "host1x01_hardware.h"
+
+/* include code */
+#include "cdma_hw.c"
+#include "channel_hw.c"
+#include "debug_hw.c"
+#include "intr_hw.c"
+#include "syncpt_hw.c"
+
+#include "../dev.h"
+
+int host1x02_init(struct host1x *host)
+{
+ host->channel_op = &host1x_channel_ops;
+ host->cdma_op = &host1x_cdma_ops;
+ host->cdma_pb_op = &host1x_pushbuffer_ops;
+ host->syncpt_op = &host1x_syncpt_ops;
+ host->intr_op = &host1x_intr_ops;
+ host->debug_op = &host1x_debug_ops;
+
+ return 0;
+}
diff --git a/arch/arm/mach-highbank/hotplug.c b/drivers/gpu/host1x/hw/host1x02.h
index a019e4e86e51..f7486609a90e 100644
--- a/arch/arm/mach-highbank/hotplug.c
+++ b/drivers/gpu/host1x/hw/host1x02.h
@@ -1,5 +1,7 @@
/*
- * Copyright 2011 Calxeda, Inc.
+ * Host1x init for Tegra114 SoCs
+ *
+ * Copyright (c) 2013 NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -10,28 +12,15 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/kernel.h>
-#include <asm/cacheflush.h>
-
-#include "core.h"
-#include "sysregs.h"
-extern void secondary_startup(void);
+#ifndef HOST1X_HOST1X02_H
+#define HOST1X_HOST1X02_H
-/*
- * platform-specific code to shutdown a CPU
- *
- */
-void __ref highbank_cpu_die(unsigned int cpu)
-{
- highbank_set_cpu_jump(cpu, phys_to_virt(0));
+struct host1x;
- flush_cache_louis();
- highbank_set_core_pwr();
+int host1x02_init(struct host1x *host);
- while (1)
- cpu_do_idle();
-}
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_uclass.h b/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
index 42f3ce19ca32..f7553599ee27 100644
--- a/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
+++ b/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
@@ -111,6 +111,12 @@ static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
}
#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_r(void)
+{
+ return 0xb;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
+ host1x_uclass_load_syncpt_base_r()
static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
{
return (v & 0xff) << 24;
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_channel.h b/drivers/gpu/host1x/hw/hw_host1x02_channel.h
new file mode 100644
index 000000000000..e490bcde33fe
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x02_channel.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef HOST1X_HW_HOST1X02_CHANNEL_H
+#define HOST1X_HW_HOST1X02_CHANNEL_H
+
+static inline u32 host1x_channel_fifostat_r(void)
+{
+ return 0x0;
+}
+#define HOST1X_CHANNEL_FIFOSTAT \
+ host1x_channel_fifostat_r()
+static inline u32 host1x_channel_fifostat_cfempty_v(u32 r)
+{
+ return (r >> 11) & 0x1;
+}
+#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(r) \
+ host1x_channel_fifostat_cfempty_v(r)
+static inline u32 host1x_channel_dmastart_r(void)
+{
+ return 0x14;
+}
+#define HOST1X_CHANNEL_DMASTART \
+ host1x_channel_dmastart_r()
+static inline u32 host1x_channel_dmaput_r(void)
+{
+ return 0x18;
+}
+#define HOST1X_CHANNEL_DMAPUT \
+ host1x_channel_dmaput_r()
+static inline u32 host1x_channel_dmaget_r(void)
+{
+ return 0x1c;
+}
+#define HOST1X_CHANNEL_DMAGET \
+ host1x_channel_dmaget_r()
+static inline u32 host1x_channel_dmaend_r(void)
+{
+ return 0x20;
+}
+#define HOST1X_CHANNEL_DMAEND \
+ host1x_channel_dmaend_r()
+static inline u32 host1x_channel_dmactrl_r(void)
+{
+ return 0x24;
+}
+#define HOST1X_CHANNEL_DMACTRL \
+ host1x_channel_dmactrl_r()
+static inline u32 host1x_channel_dmactrl_dmastop(void)
+{
+ return 1 << 0;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP \
+ host1x_channel_dmactrl_dmastop()
+static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r)
+{
+ return (r >> 0) & 0x1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP_V(r) \
+ host1x_channel_dmactrl_dmastop_v(r)
+static inline u32 host1x_channel_dmactrl_dmagetrst(void)
+{
+ return 1 << 1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAGETRST \
+ host1x_channel_dmactrl_dmagetrst()
+static inline u32 host1x_channel_dmactrl_dmainitget(void)
+{
+ return 1 << 2;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
+ host1x_channel_dmactrl_dmainitget()
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_sync.h b/drivers/gpu/host1x/hw/hw_host1x02_sync.h
new file mode 100644
index 000000000000..4495401525e8
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x02_sync.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef HOST1X_HW_HOST1X02_SYNC_H
+#define HOST1X_HW_HOST1X02_SYNC_H
+
+#define REGISTER_STRIDE 4
+
+static inline u32 host1x_sync_syncpt_r(unsigned int id)
+{
+ return 0x400 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT(id) \
+ host1x_sync_syncpt_r(id)
+static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id)
+{
+ return 0x40 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \
+ host1x_sync_syncpt_thresh_cpu0_int_status_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id)
+{
+ return 0x60 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \
+ host1x_sync_syncpt_thresh_int_disable_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id)
+{
+ return 0x68 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \
+ host1x_sync_syncpt_thresh_int_enable_cpu0_r(id)
+static inline u32 host1x_sync_cf_setup_r(unsigned int channel)
+{
+ return 0x80 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CF_SETUP(channel) \
+ host1x_sync_cf_setup_r(channel)
+static inline u32 host1x_sync_cf_setup_base_v(u32 r)
+{
+ return (r >> 0) & 0x3ff;
+}
+#define HOST1X_SYNC_CF_SETUP_BASE_V(r) \
+ host1x_sync_cf_setup_base_v(r)
+static inline u32 host1x_sync_cf_setup_limit_v(u32 r)
+{
+ return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CF_SETUP_LIMIT_V(r) \
+ host1x_sync_cf_setup_limit_v(r)
+static inline u32 host1x_sync_cmdproc_stop_r(void)
+{
+ return 0xac;
+}
+#define HOST1X_SYNC_CMDPROC_STOP \
+ host1x_sync_cmdproc_stop_r()
+static inline u32 host1x_sync_ch_teardown_r(void)
+{
+ return 0xb0;
+}
+#define HOST1X_SYNC_CH_TEARDOWN \
+ host1x_sync_ch_teardown_r()
+static inline u32 host1x_sync_usec_clk_r(void)
+{
+ return 0x1a4;
+}
+#define HOST1X_SYNC_USEC_CLK \
+ host1x_sync_usec_clk_r()
+static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
+{
+ return 0x1a8;
+}
+#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \
+ host1x_sync_ctxsw_timeout_cfg_r()
+static inline u32 host1x_sync_ip_busy_timeout_r(void)
+{
+ return 0x1bc;
+}
+#define HOST1X_SYNC_IP_BUSY_TIMEOUT \
+ host1x_sync_ip_busy_timeout_r()
+static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
+{
+ return 0x340 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_MLOCK_OWNER(id) \
+ host1x_sync_mlock_owner_r(id)
+static inline u32 host1x_sync_mlock_owner_chid_f(u32 v)
+{
+ return (v & 0xf) << 8;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CHID_F(v) \
+ host1x_sync_mlock_owner_chid_f(v)
+static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
+{
+ return (r >> 1) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(r) \
+ host1x_sync_mlock_owner_cpu_owns_v(r)
+static inline u32 host1x_sync_mlock_owner_ch_owns_v(u32 r)
+{
+ return (r >> 0) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(r) \
+ host1x_sync_mlock_owner_ch_owns_v(r)
+static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id)
+{
+ return 0x500 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \
+ host1x_sync_syncpt_int_thresh_r(id)
+static inline u32 host1x_sync_syncpt_base_r(unsigned int id)
+{
+ return 0x600 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_BASE(id) \
+ host1x_sync_syncpt_base_r(id)
+static inline u32 host1x_sync_syncpt_cpu_incr_r(unsigned int id)
+{
+ return 0x700 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_CPU_INCR(id) \
+ host1x_sync_syncpt_cpu_incr_r(id)
+static inline u32 host1x_sync_cbread_r(unsigned int channel)
+{
+ return 0x720 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBREAD(channel) \
+ host1x_sync_cbread_r(channel)
+static inline u32 host1x_sync_cfpeek_ctrl_r(void)
+{
+ return 0x74c;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL \
+ host1x_sync_cfpeek_ctrl_r()
+static inline u32 host1x_sync_cfpeek_ctrl_addr_f(u32 v)
+{
+ return (v & 0x3ff) << 0;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(v) \
+ host1x_sync_cfpeek_ctrl_addr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_channr_f(u32 v)
+{
+ return (v & 0xf) << 16;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(v) \
+ host1x_sync_cfpeek_ctrl_channr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_ena_f(u32 v)
+{
+ return (v & 0x1) << 31;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ENA_F(v) \
+ host1x_sync_cfpeek_ctrl_ena_f(v)
+static inline u32 host1x_sync_cfpeek_read_r(void)
+{
+ return 0x750;
+}
+#define HOST1X_SYNC_CFPEEK_READ \
+ host1x_sync_cfpeek_read_r()
+static inline u32 host1x_sync_cfpeek_ptrs_r(void)
+{
+ return 0x754;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS \
+ host1x_sync_cfpeek_ptrs_r()
+static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r)
+{
+ return (r >> 0) & 0x3ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(r) \
+ host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(r)
+static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r)
+{
+ return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(r) \
+ host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(r)
+static inline u32 host1x_sync_cbstat_r(unsigned int channel)
+{
+ return 0x758 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBSTAT(channel) \
+ host1x_sync_cbstat_r(channel)
+static inline u32 host1x_sync_cbstat_cboffset_v(u32 r)
+{
+ return (r >> 0) & 0xffff;
+}
+#define HOST1X_SYNC_CBSTAT_CBOFFSET_V(r) \
+ host1x_sync_cbstat_cboffset_v(r)
+static inline u32 host1x_sync_cbstat_cbclass_v(u32 r)
+{
+ return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CBSTAT_CBCLASS_V(r) \
+ host1x_sync_cbstat_cbclass_v(r)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_uclass.h b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
new file mode 100644
index 000000000000..a3b3c9874413
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef HOST1X_HW_HOST1X02_UCLASS_H
+#define HOST1X_HW_HOST1X02_UCLASS_H
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+ return 0x0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT \
+ host1x_uclass_incr_syncpt_r()
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+ return (v & 0xff) << 8;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
+ host1x_uclass_incr_syncpt_cond_f(v)
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+ return (v & 0xff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+ host1x_uclass_incr_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+ return 0x8;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT \
+ host1x_uclass_wait_syncpt_r()
+static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
+ host1x_uclass_wait_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
+ host1x_uclass_wait_syncpt_thresh_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_r(void)
+{
+ return 0x9;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
+ host1x_uclass_wait_syncpt_base_r()
+static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
+ host1x_uclass_wait_syncpt_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 16;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_wait_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
+{
+ return (v & 0xffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
+ host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_load_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
+ host1x_uclass_load_syncpt_base_value_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_incr_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
+ host1x_uclass_incr_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_indoff_r(void)
+{
+ return 0x2d;
+}
+#define HOST1X_UCLASS_INDOFF \
+ host1x_uclass_indoff_r()
+static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
+{
+ return (v & 0xf) << 28;
+}
+#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
+ host1x_uclass_indoff_indbe_f(v)
+static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
+{
+ return (v & 0x1) << 27;
+}
+#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
+ host1x_uclass_indoff_autoinc_f(v)
+static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
+{
+ return (v & 0xff) << 18;
+}
+#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
+ host1x_uclass_indoff_indmodid_f(v)
+static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
+{
+ return (v & 0xffff) << 2;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+ host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_indoff_rwn_read_v(void)
+{
+ return 1;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+ host1x_uclass_indoff_indroffset_f(v)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index b592eef1efcb..b26dcc83bc1b 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -22,8 +22,8 @@
#include <linux/io.h>
#include <asm/mach/irq.h>
-#include "intr.h"
-#include "dev.h"
+#include "../intr.h"
+#include "../dev.h"
/*
* Sync point threshold interrupt service function
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
index 0cf6095d3367..56e85395ac24 100644
--- a/drivers/gpu/host1x/hw/syncpt_hw.c
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -18,8 +18,8 @@
#include <linux/io.h>
-#include "dev.h"
-#include "syncpt.h"
+#include "../dev.h"
+#include "../syncpt.h"
/*
* Write the current syncpoint value back to hw.
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index c4e1050f2252..de5ec333ce1a 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -18,6 +18,7 @@
#include <linux/dma-mapping.h>
#include <linux/err.h>
+#include <linux/host1x.h>
#include <linux/kref.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
@@ -27,7 +28,6 @@
#include "channel.h"
#include "dev.h"
-#include "host1x_bo.h"
#include "job.h"
#include "syncpt.h"
@@ -264,7 +264,7 @@ static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
}
static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
- unsigned int offset)
+ unsigned int offset)
{
offset *= sizeof(u32);
@@ -281,7 +281,7 @@ struct host1x_firewall {
unsigned int num_relocs;
struct host1x_reloc *reloc;
- struct host1x_bo *cmdbuf_id;
+ struct host1x_bo *cmdbuf;
unsigned int offset;
u32 words;
@@ -291,25 +291,37 @@ struct host1x_firewall {
u32 count;
};
+static int check_register(struct host1x_firewall *fw, unsigned long offset)
+{
+ if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
+ if (!fw->num_relocs)
+ return -EINVAL;
+
+ if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
+ return -EINVAL;
+
+ fw->num_relocs--;
+ fw->reloc++;
+ }
+
+ return 0;
+}
+
static int check_mask(struct host1x_firewall *fw)
{
u32 mask = fw->mask;
u32 reg = fw->reg;
+ int ret;
while (mask) {
if (fw->words == 0)
return -EINVAL;
if (mask & 1) {
- if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
- if (!fw->num_relocs)
- return -EINVAL;
- if (!check_reloc(fw->reloc, fw->cmdbuf_id,
- fw->offset))
- return -EINVAL;
- fw->reloc++;
- fw->num_relocs--;
- }
+ ret = check_register(fw, reg);
+ if (ret < 0)
+ return ret;
+
fw->words--;
fw->offset++;
}
@@ -324,19 +336,16 @@ static int check_incr(struct host1x_firewall *fw)
{
u32 count = fw->count;
u32 reg = fw->reg;
+ int ret;
while (count) {
if (fw->words == 0)
return -EINVAL;
- if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
- if (!fw->num_relocs)
- return -EINVAL;
- if (!check_reloc(fw->reloc, fw->cmdbuf_id, fw->offset))
- return -EINVAL;
- fw->reloc++;
- fw->num_relocs--;
- }
+ ret = check_register(fw, reg);
+ if (ret < 0)
+ return ret;
+
reg++;
fw->words--;
fw->offset++;
@@ -348,21 +357,17 @@ static int check_incr(struct host1x_firewall *fw)
static int check_nonincr(struct host1x_firewall *fw)
{
- int is_addr_reg = fw->job->is_addr_reg(fw->dev, fw->class, fw->reg);
u32 count = fw->count;
+ int ret;
while (count) {
if (fw->words == 0)
return -EINVAL;
- if (is_addr_reg) {
- if (!fw->num_relocs)
- return -EINVAL;
- if (!check_reloc(fw->reloc, fw->cmdbuf_id, fw->offset))
- return -EINVAL;
- fw->reloc++;
- fw->num_relocs--;
- }
+ ret = check_register(fw, fw->reg);
+ if (ret < 0)
+ return ret;
+
fw->words--;
fw->offset++;
count--;
@@ -381,7 +386,7 @@ static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
return 0;
fw->words = g->words;
- fw->cmdbuf_id = g->bo;
+ fw->cmdbuf = g->bo;
fw->offset = 0;
while (fw->words && !err) {
@@ -436,10 +441,6 @@ static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
}
}
- /* No relocs should remain at this point */
- if (fw->num_relocs)
- err = -EINVAL;
-
out:
return err;
}
@@ -493,6 +494,10 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
offset += g->words * sizeof(u32);
}
+ /* No relocs should remain at this point */
+ if (fw.num_relocs)
+ return -EINVAL;
+
return 0;
}
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
index fba45f20458e..33a697d6dcef 100644
--- a/drivers/gpu/host1x/job.h
+++ b/drivers/gpu/host1x/job.h
@@ -34,15 +34,6 @@ struct host1x_cmdbuf {
u32 pad;
};
-struct host1x_reloc {
- struct host1x_bo *cmdbuf;
- u32 cmdbuf_offset;
- struct host1x_bo *target;
- u32 target_offset;
- u32 shift;
- u32 pad;
-};
-
struct host1x_waitchk {
struct host1x_bo *bo;
u32 offset;
@@ -56,105 +47,6 @@ struct host1x_job_unpin_data {
};
/*
- * Each submit is tracked as a host1x_job.
- */
-struct host1x_job {
- /* When refcount goes to zero, job can be freed */
- struct kref ref;
-
- /* List entry */
- struct list_head list;
-
- /* Channel where job is submitted to */
- struct host1x_channel *channel;
-
- u32 client;
-
- /* Gathers and their memory */
- struct host1x_job_gather *gathers;
- unsigned int num_gathers;
-
- /* Wait checks to be processed at submit time */
- struct host1x_waitchk *waitchk;
- unsigned int num_waitchk;
- u32 waitchk_mask;
-
- /* Array of handles to be pinned & unpinned */
- struct host1x_reloc *relocarray;
- unsigned int num_relocs;
- struct host1x_job_unpin_data *unpins;
- unsigned int num_unpins;
-
- dma_addr_t *addr_phys;
- dma_addr_t *gather_addr_phys;
- dma_addr_t *reloc_addr_phys;
-
- /* Sync point id, number of increments and end related to the submit */
- u32 syncpt_id;
- u32 syncpt_incrs;
- u32 syncpt_end;
-
- /* Maximum time to wait for this job */
- unsigned int timeout;
-
- /* Index and number of slots used in the push buffer */
- unsigned int first_get;
- unsigned int num_slots;
-
- /* Copy of gathers */
- size_t gather_copy_size;
- dma_addr_t gather_copy;
- u8 *gather_copy_mapped;
-
- /* Check if register is marked as an address reg */
- int (*is_addr_reg)(struct device *dev, u32 reg, u32 class);
-
- /* Request a SETCLASS to this class */
- u32 class;
-
- /* Add a channel wait for previous ops to complete */
- bool serialize;
-};
-/*
- * Allocate memory for a job. Just enough memory will be allocated to
- * accomodate the submit.
- */
-struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
- u32 num_cmdbufs, u32 num_relocs,
- u32 num_waitchks);
-
-/*
- * Add a gather to a job.
- */
-void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
- u32 words, u32 offset);
-
-/*
- * Increment reference going to host1x_job.
- */
-struct host1x_job *host1x_job_get(struct host1x_job *job);
-
-/*
- * Decrement reference job, free if goes to zero.
- */
-void host1x_job_put(struct host1x_job *job);
-
-/*
- * Pin memory related to job. This handles relocation of addresses to the
- * host1x address space. Handles both the gather memory and any other memory
- * referred to from the gather buffers.
- *
- * Handles also patching out host waits that would wait for an expired sync
- * point value.
- */
-int host1x_job_pin(struct host1x_job *job, struct device *dev);
-
-/*
- * Unpin memory related to job.
- */
-void host1x_job_unpin(struct host1x_job *job);
-
-/*
* Dump contents of job to debug output.
*/
void host1x_job_dump(struct device *dev, struct host1x_job *job);
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index 409745b949db..159c479829c9 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -30,9 +30,32 @@
#define SYNCPT_CHECK_PERIOD (2 * HZ)
#define MAX_STUCK_CHECK_COUNT 15
-static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
- struct device *dev,
- bool client_managed)
+static struct host1x_syncpt_base *
+host1x_syncpt_base_request(struct host1x *host)
+{
+ struct host1x_syncpt_base *bases = host->bases;
+ unsigned int i;
+
+ for (i = 0; i < host->info->nb_bases; i++)
+ if (!bases[i].requested)
+ break;
+
+ if (i >= host->info->nb_bases)
+ return NULL;
+
+ bases[i].requested = true;
+ return &bases[i];
+}
+
+static void host1x_syncpt_base_free(struct host1x_syncpt_base *base)
+{
+ if (base)
+ base->requested = false;
+}
+
+static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
+ struct device *dev,
+ unsigned long flags)
{
int i;
struct host1x_syncpt *sp = host->syncpt;
@@ -44,6 +67,12 @@ static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
if (i >= host->info->nb_pts)
return NULL;
+ if (flags & HOST1X_SYNCPT_HAS_BASE) {
+ sp->base = host1x_syncpt_base_request(host);
+ if (!sp->base)
+ return NULL;
+ }
+
name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
dev ? dev_name(dev) : NULL);
if (!name)
@@ -51,7 +80,11 @@ static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
sp->dev = dev;
sp->name = name;
- sp->client_managed = client_managed;
+
+ if (flags & HOST1X_SYNCPT_CLIENT_MANAGED)
+ sp->client_managed = true;
+ else
+ sp->client_managed = false;
return sp;
}
@@ -303,25 +336,35 @@ int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
int host1x_syncpt_init(struct host1x *host)
{
+ struct host1x_syncpt_base *bases;
struct host1x_syncpt *syncpt;
int i;
syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts,
- GFP_KERNEL);
+ GFP_KERNEL);
if (!syncpt)
return -ENOMEM;
- for (i = 0; i < host->info->nb_pts; ++i) {
+ bases = devm_kzalloc(host->dev, sizeof(*bases) * host->info->nb_bases,
+ GFP_KERNEL);
+ if (!bases)
+ return -ENOMEM;
+
+ for (i = 0; i < host->info->nb_pts; i++) {
syncpt[i].id = i;
syncpt[i].host = host;
}
+ for (i = 0; i < host->info->nb_bases; i++)
+ bases[i].id = i;
+
host->syncpt = syncpt;
+ host->bases = bases;
host1x_syncpt_restore(host);
/* Allocate sync point to use for clearing waits for expired fences */
- host->nop_sp = _host1x_syncpt_alloc(host, NULL, false);
+ host->nop_sp = host1x_syncpt_alloc(host, NULL, 0);
if (!host->nop_sp)
return -ENOMEM;
@@ -329,10 +372,10 @@ int host1x_syncpt_init(struct host1x *host)
}
struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
- bool client_managed)
+ unsigned long flags)
{
struct host1x *host = dev_get_drvdata(dev->parent);
- return _host1x_syncpt_alloc(host, dev, client_managed);
+ return host1x_syncpt_alloc(host, dev, flags);
}
void host1x_syncpt_free(struct host1x_syncpt *sp)
@@ -340,7 +383,9 @@ void host1x_syncpt_free(struct host1x_syncpt *sp)
if (!sp)
return;
+ host1x_syncpt_base_free(sp->base);
kfree(sp->name);
+ sp->base = NULL;
sp->dev = NULL;
sp->name = NULL;
sp->client_managed = false;
@@ -354,6 +399,25 @@ void host1x_syncpt_deinit(struct host1x *host)
kfree(sp->name);
}
+/*
+ * Read max. It indicates how many operations there are in queue, either in
+ * channel or in a software thread.
+ * */
+u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
+{
+ smp_rmb();
+ return (u32)atomic_read(&sp->max_val);
+}
+
+/*
+ * Read min, which is a shadow of the current sync point value in hardware.
+ */
+u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
+{
+ smp_rmb();
+ return (u32)atomic_read(&sp->min_val);
+}
+
int host1x_syncpt_nb_pts(struct host1x *host)
{
return host->info->nb_pts;
@@ -375,3 +439,13 @@ struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
return NULL;
return host->syncpt + id;
}
+
+struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp)
+{
+ return sp ? sp->base : NULL;
+}
+
+u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base)
+{
+ return base->id;
+}
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
index 267c0b9d3647..9056465ecd3f 100644
--- a/drivers/gpu/host1x/syncpt.h
+++ b/drivers/gpu/host1x/syncpt.h
@@ -20,6 +20,7 @@
#define __HOST1X_SYNCPT_H
#include <linux/atomic.h>
+#include <linux/host1x.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -30,6 +31,11 @@ struct host1x;
/* Reserved for replacing an expired wait with a NOP */
#define HOST1X_SYNCPT_RESERVED 0
+struct host1x_syncpt_base {
+ unsigned int id;
+ bool requested;
+};
+
struct host1x_syncpt {
int id;
atomic_t min_val;
@@ -39,6 +45,7 @@ struct host1x_syncpt {
bool client_managed;
struct host1x *host;
struct device *dev;
+ struct host1x_syncpt_base *base;
/* interrupt data */
struct host1x_syncpt_intr intr;
@@ -50,25 +57,6 @@ int host1x_syncpt_init(struct host1x *host);
/* Free sync point array */
void host1x_syncpt_deinit(struct host1x *host);
-/*
- * Read max. It indicates how many operations there are in queue, either in
- * channel or in a software thread.
- * */
-static inline u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
-{
- smp_rmb();
- return (u32)atomic_read(&sp->max_val);
-}
-
-/*
- * Read min, which is a shadow of the current sync point value in hardware.
- */
-static inline u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
-{
- smp_rmb();
- return (u32)atomic_read(&sp->min_val);
-}
-
/* Return number of sync point supported. */
int host1x_syncpt_nb_pts(struct host1x *host);
@@ -112,9 +100,6 @@ static inline bool host1x_syncpt_idle(struct host1x_syncpt *sp)
return (min == max);
}
-/* Return pointer to struct denoting sync point id. */
-struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
-
/* Load current value from hardware to the shadow register. */
u32 host1x_syncpt_load(struct host1x_syncpt *sp);
@@ -130,16 +115,9 @@ void host1x_syncpt_restore(struct host1x *host);
/* Read current wait base value into shadow register and return it. */
u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp);
-/* Request incrementing a sync point. */
-int host1x_syncpt_incr(struct host1x_syncpt *sp);
-
/* Indicate future operations by incrementing the sync point max. */
u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
-/* Wait until sync point reaches a threshold value, or a timeout. */
-int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh,
- long timeout, u32 *value);
-
/* Check if sync point id is valid. */
static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
{
@@ -149,14 +127,4 @@ static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
/* Patch a wait by replacing it with a wait for syncpt 0 value 0 */
int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr);
-/* Return id of the sync point */
-u32 host1x_syncpt_id(struct host1x_syncpt *sp);
-
-/* Allocate a sync point for a device. */
-struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
- bool client_managed);
-
-/* Free a sync point. */
-void host1x_syncpt_free(struct host1x_syncpt *sp);
-
#endif
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index c91d547191dd..a27e5313f80f 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -242,6 +242,7 @@ config HID_HOLTEK
- Tracer Sniper TRM-503 / NOVA Gaming Slider X200 /
Zalman ZM-GM1
- SHARKOON DarkGlider Gaming mouse
+ - LEETGION Hellion Gaming Mouse
config HOLTEK_FF
bool "Holtek On Line Grip force feedback support"
@@ -323,7 +324,7 @@ config HID_LCPOWER
config HID_LENOVO_TPKBD
tristate "Lenovo ThinkPad USB Keyboard with TrackPoint"
- depends on USB_HID
+ depends on HID
select NEW_LEDS
select LEDS_CLASS
---help---
@@ -362,19 +363,20 @@ config LOGITECH_FF
- Logitech WingMan Force 3D
- Logitech Formula Force EX
- Logitech WingMan Formula Force GP
- - Logitech MOMO Force wheel
and if you want to enable force feedback for them.
Note: if you say N here, this device will still be supported, but without
force feedback.
config LOGIRUMBLEPAD2_FF
- bool "Logitech RumblePad/Rumblepad 2 force feedback support"
+ bool "Logitech force feedback support (variant 2)"
depends on HID_LOGITECH
select INPUT_FF_MEMLESS
help
- Say Y here if you want to enable force feedback support for Logitech
- RumblePad and Rumblepad 2 devices.
+ Say Y here if you want to enable force feedback support for:
+ - Logitech RumblePad
+ - Logitech Rumblepad 2
+ - Logitech Formula Vibration Feedback Wheel
config LOGIG940_FF
bool "Logitech Flight System G940 force feedback support"
@@ -437,6 +439,7 @@ config HID_MULTITOUCH
- Chunghwa panels
- CVTouch panels
- Cypress TrueTouch panels
+ - Elan Microelectronics touch panels
- Elo TouchSystems IntelliTouch Plus panels
- GeneralTouch 'Sensing Win7-TwoFinger' panels
- GoodTouch panels
@@ -453,6 +456,7 @@ config HID_MULTITOUCH
- Pixcir dual touch panels
- Quanta panels
- eGalax dual-touch panels, including the Joojoo and Wetab tablets
+ - SiS multitouch panels
- Stantum multitouch panels
- Touch International Panels
- Unitec Panels
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index a959f4aecaf5..30e44318f87f 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -95,7 +95,7 @@ obj-$(CONFIG_HID_PRIMAX) += hid-primax.o
obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o hid-roccat-common.o \
hid-roccat-arvo.o hid-roccat-isku.o hid-roccat-kone.o \
hid-roccat-koneplus.o hid-roccat-konepure.o hid-roccat-kovaplus.o \
- hid-roccat-lua.o hid-roccat-pyra.o hid-roccat-savu.o
+ hid-roccat-lua.o hid-roccat-pyra.o hid-roccat-ryos.o hid-roccat-savu.o
obj-$(CONFIG_HID_SAITEK) += hid-saitek.o
obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 881cf7b4f9a4..497558127bb3 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -46,6 +46,12 @@ module_param(iso_layout, uint, 0644);
MODULE_PARM_DESC(iso_layout, "Enable/Disable hardcoded ISO-layout of the keyboard. "
"(0 = disabled, [1] = enabled)");
+static unsigned int swap_opt_cmd;
+module_param(swap_opt_cmd, uint, 0644);
+MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\") keys. "
+ "(For people who want to keep Windows PC keyboard muscle memory. "
+ "[0] = as-is, Mac layout. 1 = swapped, Windows layout.)");
+
struct apple_sc {
unsigned long quirks;
unsigned int fn_on;
@@ -150,6 +156,14 @@ static const struct apple_key_translation apple_iso_keyboard[] = {
{ }
};
+static const struct apple_key_translation swapped_option_cmd_keys[] = {
+ { KEY_LEFTALT, KEY_LEFTMETA },
+ { KEY_LEFTMETA, KEY_LEFTALT },
+ { KEY_RIGHTALT, KEY_RIGHTMETA },
+ { KEY_RIGHTMETA,KEY_RIGHTALT },
+ { }
+};
+
static const struct apple_key_translation *apple_find_translation(
const struct apple_key_translation *table, u16 from)
{
@@ -242,6 +256,14 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
}
}
+ if (swap_opt_cmd) {
+ trans = apple_find_translation(swapped_option_cmd_keys, usage->code);
+ if (trans) {
+ input_event(input, usage->type, trans->to, value);
+ return 1;
+ }
+ }
+
return 0;
}
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 5a8c01112a23..9bbbb7262e14 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -319,7 +319,7 @@ static s32 item_sdata(struct hid_item *item)
static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
{
- __u32 raw_value;
+ __s32 raw_value;
switch (item->tag) {
case HID_GLOBAL_ITEM_TAG_PUSH:
@@ -370,10 +370,11 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
return 0;
case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
- /* Units exponent negative numbers are given through a
- * two's complement.
- * See "6.2.2.7 Global Items" for more information. */
- raw_value = item_udata(item);
+ /* Many devices provide unit exponent as a two's complement
+ * nibble due to the common misunderstanding of HID
+ * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
+ * both this and the standard encoding. */
+ raw_value = item_sdata(item);
if (!(raw_value & 0xfffffff0))
parser->global.unit_exponent = hid_snto32(raw_value, 4);
else
@@ -1417,10 +1418,8 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
ret = hdrv->raw_event(hid, report, data, size);
- if (ret < 0) {
- ret = ret < 0 ? ret : 0;
+ if (ret < 0)
goto unlock;
- }
}
ret = hid_report_raw_event(hid, type, data, size, interrupt);
@@ -1715,6 +1714,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) },
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
@@ -1753,6 +1753,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
@@ -1810,11 +1811,16 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) },
#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS817_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
@@ -1870,6 +1876,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
{ }
};
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
index f042a6cf8b18..4e49462870ab 100644
--- a/drivers/hid/hid-elo.c
+++ b/drivers/hid/hid-elo.c
@@ -181,7 +181,40 @@ fail:
*/
static bool elo_broken_firmware(struct usb_device *dev)
{
- return use_fw_quirk && le16_to_cpu(dev->descriptor.bcdDevice) == 0x10d;
+ struct usb_device *hub = dev->parent;
+ struct usb_device *child = NULL;
+ u16 fw_lvl = le16_to_cpu(dev->descriptor.bcdDevice);
+ u16 child_vid, child_pid;
+ int i;
+
+ if (!use_fw_quirk)
+ return false;
+ if (fw_lvl != 0x10d)
+ return false;
+
+ /* iterate sibling devices of the touch controller */
+ usb_hub_for_each_child(hub, i, child) {
+ child_vid = le16_to_cpu(child->descriptor.idVendor);
+ child_pid = le16_to_cpu(child->descriptor.idProduct);
+
+ /*
+ * If one of the devices below is present attached as a sibling of
+ * the touch controller then this is a newer IBM 4820 monitor that
+ * does not need the IBM-requested workaround if fw level is
+ * 0x010d - aka 'M'.
+ * No other HW can have this combination.
+ */
+ if (child_vid==0x04b3) {
+ switch (child_pid) {
+ case 0x4676: /* 4820 21x Video */
+ case 0x4677: /* 4820 51x Video */
+ case 0x4678: /* 4820 2Lx Video */
+ case 0x4679: /* 4820 5Lx Video */
+ return false;
+ }
+ }
+ }
+ return true;
}
static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c
index e696566cde46..0caa676de622 100644
--- a/drivers/hid/hid-holtek-mouse.c
+++ b/drivers/hid/hid-holtek-mouse.c
@@ -28,6 +28,7 @@
* - USB ID 04d9:a04a, sold as Tracer Sniper TRM-503, NOVA Gaming Slider X200
* and Zalman ZM-GM1
* - USB ID 04d9:a081, sold as SHARKOON DarkGlider Gaming mouse
+ * - USB ID 04d9:a072, sold as LEETGION Hellion Gaming Mouse
*/
static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
@@ -40,6 +41,7 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
* 0x2fff, so they don't exceed HID_MAX_USAGES */
switch (hdev->product) {
case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067:
+ case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072:
if (*rsize >= 122 && rdesc[115] == 0xff && rdesc[116] == 0x7f
&& rdesc[120] == 0xff && rdesc[121] == 0x7f) {
hid_info(hdev, "Fixing up report descriptor\n");
@@ -66,6 +68,8 @@ static const struct hid_device_id holtek_mouse_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
+ USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
{ }
};
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 9cbc7ab07dfa..76559629568c 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -332,6 +332,11 @@
#define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS 0x0100
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0101 0x0101
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0102 0x0102
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0106 0x0106
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
#define USB_VENDOR_ID_GLAB 0x06c2
#define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038
@@ -448,8 +453,9 @@
#define USB_VENDOR_ID_HOLTEK_ALT 0x04d9
#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055
-#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067 0xa067
#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A 0xa04a
+#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067 0xa067
+#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072 0xa072
#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081 0xa081
#define USB_VENDOR_ID_IMATION 0x0718
@@ -571,6 +577,7 @@
#define USB_DEVICE_ID_DINOVO_EDGE 0xc714
#define USB_DEVICE_ID_DINOVO_MINI 0xc71f
#define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2 0xca03
+#define USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL 0xca04
#define USB_VENDOR_ID_LUMIO 0x202e
#define USB_DEVICE_ID_CRYSTALTOUCH 0x0006
@@ -633,6 +640,7 @@
#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
#define USB_VENDOR_ID_NINTENDO 0x057e
+#define USB_VENDOR_ID_NINTENDO2 0x054c
#define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306
#define USB_DEVICE_ID_NINTENDO_WIIMOTE2 0x0330
@@ -725,6 +733,9 @@
#define USB_DEVICE_ID_ROCCAT_LUA 0x2c2e
#define USB_DEVICE_ID_ROCCAT_PYRA_WIRED 0x2c24
#define USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS 0x2cf6
+#define USB_DEVICE_ID_ROCCAT_RYOS_MK 0x3138
+#define USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW 0x31ce
+#define USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO 0x3232
#define USB_DEVICE_ID_ROCCAT_SAVU 0x2d5a
#define USB_VENDOR_ID_SAITEK 0x06a3
@@ -744,6 +755,10 @@
#define USB_VENDOR_ID_SIGMATEL 0x066F
#define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780
+#define USB_VENDOR_ID_SIS2_TOUCH 0x0457
+#define USB_DEVICE_ID_SIS9200_TOUCH 0x9200
+#define USB_DEVICE_ID_SIS817_TOUCH 0x0817
+
#define USB_VENDOR_ID_SKYCABLE 0x1223
#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
@@ -792,6 +807,8 @@
#define USB_DEVICE_ID_SYNAPTICS_COMP_TP 0x0009
#define USB_DEVICE_ID_SYNAPTICS_WTP 0x0010
#define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013
+#define USB_DEVICE_ID_SYNAPTICS_LTS1 0x0af8
+#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
#define USB_VENDOR_ID_THINGM 0x27b8
#define USB_DEVICE_ID_BLINK1 0x01ed
@@ -919,4 +936,7 @@
#define USB_VENDOR_ID_PRIMAX 0x0461
#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
+#define USB_VENDOR_ID_SIS 0x0457
+#define USB_DEVICE_ID_SIS_TS 0x1013
+
#endif
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 8741d953dcc8..d97f2323af57 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -192,6 +192,7 @@ static int hidinput_setkeycode(struct input_dev *dev,
return -EINVAL;
}
+
/**
* hidinput_calc_abs_res - calculate an absolute axis resolution
* @field: the HID report field to calculate resolution for
@@ -234,23 +235,17 @@ __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
case ABS_MT_TOOL_Y:
case ABS_MT_TOUCH_MAJOR:
case ABS_MT_TOUCH_MINOR:
- if (field->unit & 0xffffff00) /* Not a length */
- return 0;
- unit_exponent += hid_snto32(field->unit >> 4, 4) - 1;
- switch (field->unit & 0xf) {
- case 0x1: /* If centimeters */
+ if (field->unit == 0x11) { /* If centimeters */
/* Convert to millimeters */
unit_exponent += 1;
- break;
- case 0x3: /* If inches */
+ } else if (field->unit == 0x13) { /* If inches */
/* Convert to millimeters */
prev = physical_extents;
physical_extents *= 254;
if (physical_extents < prev)
return 0;
unit_exponent -= 1;
- break;
- default:
+ } else {
return 0;
}
break;
diff --git a/drivers/hid/hid-lenovo-tpkbd.c b/drivers/hid/hid-lenovo-tpkbd.c
index 31cf29a6ba17..2d25b6cbbc05 100644
--- a/drivers/hid/hid-lenovo-tpkbd.c
+++ b/drivers/hid/hid-lenovo-tpkbd.c
@@ -14,11 +14,9 @@
#include <linux/module.h>
#include <linux/sysfs.h>
#include <linux/device.h>
-#include <linux/usb.h>
#include <linux/hid.h>
#include <linux/input.h>
#include <linux/leds.h>
-#include "usbhid/usbhid.h"
#include "hid-ids.h"
@@ -41,10 +39,9 @@ static int tpkbd_input_mapping(struct hid_device *hdev,
struct hid_input *hi, struct hid_field *field,
struct hid_usage *usage, unsigned long **bit, int *max)
{
- struct usbhid_device *uhdev;
-
- uhdev = (struct usbhid_device *) hdev->driver_data;
- if (uhdev->ifnum == 1 && usage->hid == (HID_UP_BUTTON | 0x0010)) {
+ if (usage->hid == (HID_UP_BUTTON | 0x0010)) {
+ /* mark the device as pointer */
+ hid_set_drvdata(hdev, (void *)1);
map_key_clear(KEY_MICMUTE);
return 1;
}
@@ -339,7 +336,7 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
struct tpkbd_data_pointer *data_pointer;
size_t name_sz = strlen(dev_name(dev)) + 16;
char *name_mute, *name_micmute;
- int i, ret;
+ int i;
/* Validate required reports. */
for (i = 0; i < 4; i++) {
@@ -354,7 +351,9 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
hid_warn(hdev, "Could not create sysfs group\n");
}
- data_pointer = kzalloc(sizeof(struct tpkbd_data_pointer), GFP_KERNEL);
+ data_pointer = devm_kzalloc(&hdev->dev,
+ sizeof(struct tpkbd_data_pointer),
+ GFP_KERNEL);
if (data_pointer == NULL) {
hid_err(hdev, "Could not allocate memory for driver data\n");
return -ENOMEM;
@@ -364,20 +363,13 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
data_pointer->sensitivity = 0xa0;
data_pointer->press_speed = 0x38;
- name_mute = kzalloc(name_sz, GFP_KERNEL);
- if (name_mute == NULL) {
+ name_mute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL);
+ name_micmute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL);
+ if (name_mute == NULL || name_micmute == NULL) {
hid_err(hdev, "Could not allocate memory for led data\n");
- ret = -ENOMEM;
- goto err;
+ return -ENOMEM;
}
snprintf(name_mute, name_sz, "%s:amber:mute", dev_name(dev));
-
- name_micmute = kzalloc(name_sz, GFP_KERNEL);
- if (name_micmute == NULL) {
- hid_err(hdev, "Could not allocate memory for led data\n");
- ret = -ENOMEM;
- goto err2;
- }
snprintf(name_micmute, name_sz, "%s:amber:micmute", dev_name(dev));
hid_set_drvdata(hdev, data_pointer);
@@ -397,19 +389,12 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
tpkbd_features_set(hdev);
return 0;
-
-err2:
- kfree(name_mute);
-err:
- kfree(data_pointer);
- return ret;
}
static int tpkbd_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
int ret;
- struct usbhid_device *uhdev;
ret = hid_parse(hdev);
if (ret) {
@@ -423,9 +408,8 @@ static int tpkbd_probe(struct hid_device *hdev,
goto err;
}
- uhdev = (struct usbhid_device *) hdev->driver_data;
-
- if (uhdev->ifnum == 1) {
+ if (hid_get_drvdata(hdev)) {
+ hid_set_drvdata(hdev, NULL);
ret = tpkbd_probe_tp(hdev);
if (ret)
goto err_hid;
@@ -449,17 +433,11 @@ static void tpkbd_remove_tp(struct hid_device *hdev)
led_classdev_unregister(&data_pointer->led_mute);
hid_set_drvdata(hdev, NULL);
- kfree(data_pointer->led_micmute.name);
- kfree(data_pointer->led_mute.name);
- kfree(data_pointer);
}
static void tpkbd_remove(struct hid_device *hdev)
{
- struct usbhid_device *uhdev;
-
- uhdev = (struct usbhid_device *) hdev->driver_data;
- if (uhdev->ifnum == 1)
+ if (hid_get_drvdata(hdev))
tpkbd_remove_tp(hdev);
hid_hw_stop(hdev);
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 6f12ecd36c88..06eb45fa6331 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -45,7 +45,9 @@
/* Size of the original descriptors of the Driving Force (and Pro) wheels */
#define DF_RDESC_ORIG_SIZE 130
#define DFP_RDESC_ORIG_SIZE 97
+#define FV_RDESC_ORIG_SIZE 130
#define MOMO_RDESC_ORIG_SIZE 87
+#define MOMO2_RDESC_ORIG_SIZE 87
/* Fixed report descriptors for Logitech Driving Force (and Pro)
* wheel controllers
@@ -170,6 +172,73 @@ static __u8 dfp_rdesc_fixed[] = {
0xC0 /* End Collection */
};
+static __u8 fv_rdesc_fixed[] = {
+0x05, 0x01, /* Usage Page (Desktop), */
+0x09, 0x04, /* Usage (Joystik), */
+0xA1, 0x01, /* Collection (Application), */
+0xA1, 0x02, /* Collection (Logical), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x0A, /* Report Size (10), */
+0x15, 0x00, /* Logical Minimum (0), */
+0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
+0x35, 0x00, /* Physical Minimum (0), */
+0x46, 0xFF, 0x03, /* Physical Maximum (1023), */
+0x09, 0x30, /* Usage (X), */
+0x81, 0x02, /* Input (Variable), */
+0x95, 0x0C, /* Report Count (12), */
+0x75, 0x01, /* Report Size (1), */
+0x25, 0x01, /* Logical Maximum (1), */
+0x45, 0x01, /* Physical Maximum (1), */
+0x05, 0x09, /* Usage Page (Button), */
+0x19, 0x01, /* Usage Minimum (01h), */
+0x29, 0x0C, /* Usage Maximum (0Ch), */
+0x81, 0x02, /* Input (Variable), */
+0x95, 0x02, /* Report Count (2), */
+0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+0x09, 0x01, /* Usage (01h), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x02, /* Usage (02h), */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x08, /* Report Size (8), */
+0x81, 0x02, /* Input (Variable), */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x25, 0x07, /* Logical Maximum (7), */
+0x46, 0x3B, 0x01, /* Physical Maximum (315), */
+0x75, 0x04, /* Report Size (4), */
+0x65, 0x14, /* Unit (Degrees), */
+0x09, 0x39, /* Usage (Hat Switch), */
+0x81, 0x42, /* Input (Variable, Null State), */
+0x75, 0x01, /* Report Size (1), */
+0x95, 0x04, /* Report Count (4), */
+0x65, 0x00, /* Unit, */
+0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+0x09, 0x01, /* Usage (01h), */
+0x25, 0x01, /* Logical Maximum (1), */
+0x45, 0x01, /* Physical Maximum (1), */
+0x81, 0x02, /* Input (Variable), */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x08, /* Report Size (8), */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x09, 0x31, /* Usage (Y), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x32, /* Usage (Z), */
+0x81, 0x02, /* Input (Variable), */
+0xC0, /* End Collection, */
+0xA1, 0x02, /* Collection (Logical), */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x95, 0x07, /* Report Count (7), */
+0x75, 0x08, /* Report Size (8), */
+0x09, 0x03, /* Usage (03h), */
+0x91, 0x02, /* Output (Variable), */
+0xC0, /* End Collection, */
+0xC0 /* End Collection */
+};
+
static __u8 momo_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x04, /* Usage (Joystik), */
@@ -216,6 +285,54 @@ static __u8 momo_rdesc_fixed[] = {
0xC0 /* End Collection */
};
+static __u8 momo2_rdesc_fixed[] = {
+0x05, 0x01, /* Usage Page (Desktop), */
+0x09, 0x04, /* Usage (Joystik), */
+0xA1, 0x01, /* Collection (Application), */
+0xA1, 0x02, /* Collection (Logical), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x0A, /* Report Size (10), */
+0x15, 0x00, /* Logical Minimum (0), */
+0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
+0x35, 0x00, /* Physical Minimum (0), */
+0x46, 0xFF, 0x03, /* Physical Maximum (1023), */
+0x09, 0x30, /* Usage (X), */
+0x81, 0x02, /* Input (Variable), */
+0x95, 0x0A, /* Report Count (10), */
+0x75, 0x01, /* Report Size (1), */
+0x25, 0x01, /* Logical Maximum (1), */
+0x45, 0x01, /* Physical Maximum (1), */
+0x05, 0x09, /* Usage Page (Button), */
+0x19, 0x01, /* Usage Minimum (01h), */
+0x29, 0x0A, /* Usage Maximum (0Ah), */
+0x81, 0x02, /* Input (Variable), */
+0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+0x09, 0x00, /* Usage (00h), */
+0x95, 0x04, /* Report Count (4), */
+0x81, 0x02, /* Input (Variable), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x08, /* Report Size (8), */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x09, 0x01, /* Usage (01h), */
+0x81, 0x02, /* Input (Variable), */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x09, 0x31, /* Usage (Y), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x32, /* Usage (Z), */
+0x81, 0x02, /* Input (Variable), */
+0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+0x09, 0x00, /* Usage (00h), */
+0x81, 0x02, /* Input (Variable), */
+0xC0, /* End Collection, */
+0xA1, 0x02, /* Collection (Logical), */
+0x09, 0x02, /* Usage (02h), */
+0x95, 0x07, /* Report Count (7), */
+0x91, 0x02, /* Output (Variable), */
+0xC0, /* End Collection, */
+0xC0 /* End Collection */
+};
+
/*
* Certain Logitech keyboards send in report #3 keys which are far
* above the logical maximum described in descriptor. This extends
@@ -275,6 +392,24 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
}
break;
+ case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
+ if (*rsize == MOMO2_RDESC_ORIG_SIZE) {
+ hid_info(hdev,
+ "fixing up Logitech Momo Racing Force (Black) report descriptor\n");
+ rdesc = momo2_rdesc_fixed;
+ *rsize = sizeof(momo2_rdesc_fixed);
+ }
+ break;
+
+ case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL:
+ if (*rsize == FV_RDESC_ORIG_SIZE) {
+ hid_info(hdev,
+ "fixing up Logitech Formula Vibration report descriptor\n");
+ rdesc = fv_rdesc_fixed;
+ *rsize = sizeof(fv_rdesc_fixed);
+ }
+ break;
+
case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
if (*rsize == DFP_RDESC_ORIG_SIZE) {
hid_info(hdev,
@@ -492,6 +627,7 @@ static int lg_input_mapped(struct hid_device *hdev, struct hid_input *hi,
case USB_DEVICE_ID_LOGITECH_G27_WHEEL:
case USB_DEVICE_ID_LOGITECH_WII_WHEEL:
case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
+ case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL:
field->application = HID_GD_MULTIAXIS;
break;
default:
@@ -639,6 +775,8 @@ static const struct hid_device_id lg_devices[] = {
.driver_data = LG_NOGET | LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2),
.driver_data = LG_FF4 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL),
+ .driver_data = LG_FF2 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL),
.driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL),
diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
index 1a42eaa6ca02..0e3fb1a7e421 100644
--- a/drivers/hid/hid-lg2ff.c
+++ b/drivers/hid/hid-lg2ff.c
@@ -95,7 +95,7 @@ int lg2ff_init(struct hid_device *hid)
hid_hw_request(hid, report, HID_REQ_SET_REPORT);
- hid_info(hid, "Force feedback for Logitech RumblePad/Rumblepad 2 by Anssi Hannula <anssi.hannula@gmail.com>\n");
+ hid_info(hid, "Force feedback for Logitech variant 2 rumble devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
return 0;
}
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 2e5302462efb..a7947d8251a8 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -542,9 +542,9 @@ static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf,
return 0;
}
-static void rdcat(char **rdesc, unsigned int *rsize, const char *data, unsigned int size)
+static void rdcat(char *rdesc, unsigned int *rsize, const char *data, unsigned int size)
{
- memcpy(*rdesc + *rsize, data, size);
+ memcpy(rdesc + *rsize, data, size);
*rsize += size;
}
@@ -567,31 +567,31 @@ static int logi_dj_ll_parse(struct hid_device *hid)
if (djdev->reports_supported & STD_KEYBOARD) {
dbg_hid("%s: sending a kbd descriptor, reports_supported: %x\n",
__func__, djdev->reports_supported);
- rdcat(&rdesc, &rsize, kbd_descriptor, sizeof(kbd_descriptor));
+ rdcat(rdesc, &rsize, kbd_descriptor, sizeof(kbd_descriptor));
}
if (djdev->reports_supported & STD_MOUSE) {
dbg_hid("%s: sending a mouse descriptor, reports_supported: "
"%x\n", __func__, djdev->reports_supported);
- rdcat(&rdesc, &rsize, mse_descriptor, sizeof(mse_descriptor));
+ rdcat(rdesc, &rsize, mse_descriptor, sizeof(mse_descriptor));
}
if (djdev->reports_supported & MULTIMEDIA) {
dbg_hid("%s: sending a multimedia report descriptor: %x\n",
__func__, djdev->reports_supported);
- rdcat(&rdesc, &rsize, consumer_descriptor, sizeof(consumer_descriptor));
+ rdcat(rdesc, &rsize, consumer_descriptor, sizeof(consumer_descriptor));
}
if (djdev->reports_supported & POWER_KEYS) {
dbg_hid("%s: sending a power keys report descriptor: %x\n",
__func__, djdev->reports_supported);
- rdcat(&rdesc, &rsize, syscontrol_descriptor, sizeof(syscontrol_descriptor));
+ rdcat(rdesc, &rsize, syscontrol_descriptor, sizeof(syscontrol_descriptor));
}
if (djdev->reports_supported & MEDIA_CENTER) {
dbg_hid("%s: sending a media center report descriptor: %x\n",
__func__, djdev->reports_supported);
- rdcat(&rdesc, &rsize, media_descriptor, sizeof(media_descriptor));
+ rdcat(rdesc, &rsize, media_descriptor, sizeof(media_descriptor));
}
if (djdev->reports_supported & KBD_LEDS) {
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 5e5fe1b8eebb..a2cedb8ae1c0 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -250,12 +250,12 @@ static struct mt_class mt_classes[] = {
{ .name = MT_CLS_GENERALTOUCH_TWOFINGERS,
.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
MT_QUIRK_VALID_IS_INRANGE |
- MT_QUIRK_SLOT_IS_CONTACTNUMBER,
+ MT_QUIRK_SLOT_IS_CONTACTID,
.maxcontacts = 2
},
{ .name = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
- MT_QUIRK_SLOT_IS_CONTACTNUMBER
+ MT_QUIRK_SLOT_IS_CONTACTID
},
{ .name = MT_CLS_FLATFROG,
@@ -1173,6 +1173,21 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS) },
+ { .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS,
+ MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+ USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0101) },
+ { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+ MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+ USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0102) },
+ { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+ MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+ USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0106) },
+ { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+ MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+ USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A) },
+ { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+ MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+ USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100) },
/* Gametel game controller */
{ .driver_data = MT_CLS_NSMU,
@@ -1284,6 +1299,14 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_QUANTA,
USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008) },
+ /* SiS panels */
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH,
+ USB_DEVICE_ID_SIS9200_TOUCH) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH,
+ USB_DEVICE_ID_SIS817_TOUCH) },
+
/* Stantum panels */
{ .driver_data = MT_CLS_CONFIDENCE,
MT_USB_DEVICE(USB_VENDOR_ID_STANTUM,
diff --git a/drivers/hid/hid-roccat-common.c b/drivers/hid/hid-roccat-common.c
index 74f704032627..02e28e9f4ea7 100644
--- a/drivers/hid/hid-roccat-common.c
+++ b/drivers/hid/hid-roccat-common.c
@@ -65,10 +65,11 @@ int roccat_common2_send(struct usb_device *usb_dev, uint report_id,
EXPORT_SYMBOL_GPL(roccat_common2_send);
enum roccat_common2_control_states {
- ROCCAT_COMMON_CONTROL_STATUS_OVERLOAD = 0,
+ ROCCAT_COMMON_CONTROL_STATUS_CRITICAL = 0,
ROCCAT_COMMON_CONTROL_STATUS_OK = 1,
ROCCAT_COMMON_CONTROL_STATUS_INVALID = 2,
- ROCCAT_COMMON_CONTROL_STATUS_WAIT = 3,
+ ROCCAT_COMMON_CONTROL_STATUS_BUSY = 3,
+ ROCCAT_COMMON_CONTROL_STATUS_CRITICAL_NEW = 4,
};
static int roccat_common2_receive_control_status(struct usb_device *usb_dev)
@@ -88,13 +89,12 @@ static int roccat_common2_receive_control_status(struct usb_device *usb_dev)
switch (control.value) {
case ROCCAT_COMMON_CONTROL_STATUS_OK:
return 0;
- case ROCCAT_COMMON_CONTROL_STATUS_WAIT:
+ case ROCCAT_COMMON_CONTROL_STATUS_BUSY:
msleep(500);
continue;
case ROCCAT_COMMON_CONTROL_STATUS_INVALID:
-
- case ROCCAT_COMMON_CONTROL_STATUS_OVERLOAD:
- /* seems to be critical - replug necessary */
+ case ROCCAT_COMMON_CONTROL_STATUS_CRITICAL:
+ case ROCCAT_COMMON_CONTROL_STATUS_CRITICAL_NEW:
return -EINVAL;
default:
dev_err(&usb_dev->dev,
@@ -122,6 +122,59 @@ int roccat_common2_send_with_status(struct usb_device *usb_dev,
}
EXPORT_SYMBOL_GPL(roccat_common2_send_with_status);
+int roccat_common2_device_init_struct(struct usb_device *usb_dev,
+ struct roccat_common2_device *dev)
+{
+ mutex_init(&dev->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(roccat_common2_device_init_struct);
+
+ssize_t roccat_common2_sysfs_read(struct file *fp, struct kobject *kobj,
+ char *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct roccat_common2_device *roccat_dev = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off >= real_size)
+ return 0;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&roccat_dev->lock);
+ retval = roccat_common2_receive(usb_dev, command, buf, real_size);
+ mutex_unlock(&roccat_dev->lock);
+
+ return retval ? retval : real_size;
+}
+EXPORT_SYMBOL_GPL(roccat_common2_sysfs_read);
+
+ssize_t roccat_common2_sysfs_write(struct file *fp, struct kobject *kobj,
+ void const *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct roccat_common2_device *roccat_dev = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&roccat_dev->lock);
+ retval = roccat_common2_send_with_status(usb_dev, command, buf, real_size);
+ mutex_unlock(&roccat_dev->lock);
+
+ return retval ? retval : real_size;
+}
+EXPORT_SYMBOL_GPL(roccat_common2_sysfs_write);
+
MODULE_AUTHOR("Stefan Achatz");
MODULE_DESCRIPTION("USB Roccat common driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-common.h b/drivers/hid/hid-roccat-common.h
index a97746a63b70..eaa56eb7d5d1 100644
--- a/drivers/hid/hid-roccat-common.h
+++ b/drivers/hid/hid-roccat-common.h
@@ -32,4 +32,66 @@ int roccat_common2_send(struct usb_device *usb_dev, uint report_id,
int roccat_common2_send_with_status(struct usb_device *usb_dev,
uint command, void const *buf, uint size);
+struct roccat_common2_device {
+ int roccat_claimed;
+ int chrdev_minor;
+ struct mutex lock;
+};
+
+int roccat_common2_device_init_struct(struct usb_device *usb_dev,
+ struct roccat_common2_device *dev);
+ssize_t roccat_common2_sysfs_read(struct file *fp, struct kobject *kobj,
+ char *buf, loff_t off, size_t count,
+ size_t real_size, uint command);
+ssize_t roccat_common2_sysfs_write(struct file *fp, struct kobject *kobj,
+ void const *buf, loff_t off, size_t count,
+ size_t real_size, uint command);
+
+#define ROCCAT_COMMON2_SYSFS_W(thingy, COMMAND, SIZE) \
+static ssize_t roccat_common2_sysfs_write_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return roccat_common2_sysfs_write(fp, kobj, buf, off, count, \
+ SIZE, COMMAND); \
+}
+
+#define ROCCAT_COMMON2_SYSFS_R(thingy, COMMAND, SIZE) \
+static ssize_t roccat_common2_sysfs_read_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return roccat_common2_sysfs_read(fp, kobj, buf, off, count, \
+ SIZE, COMMAND); \
+}
+
+#define ROCCAT_COMMON2_SYSFS_RW(thingy, COMMAND, SIZE) \
+ROCCAT_COMMON2_SYSFS_W(thingy, COMMAND, SIZE) \
+ROCCAT_COMMON2_SYSFS_R(thingy, COMMAND, SIZE)
+
+#define ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(thingy, COMMAND, SIZE) \
+ROCCAT_COMMON2_SYSFS_RW(thingy, COMMAND, SIZE); \
+static struct bin_attribute bin_attr_ ## thingy = { \
+ .attr = { .name = #thingy, .mode = 0660 }, \
+ .size = SIZE, \
+ .read = roccat_common2_sysfs_read_ ## thingy, \
+ .write = roccat_common2_sysfs_write_ ## thingy \
+}
+
+#define ROCCAT_COMMON2_BIN_ATTRIBUTE_R(thingy, COMMAND, SIZE) \
+ROCCAT_COMMON2_SYSFS_R(thingy, COMMAND, SIZE); \
+static struct bin_attribute bin_attr_ ## thingy = { \
+ .attr = { .name = #thingy, .mode = 0440 }, \
+ .size = SIZE, \
+ .read = roccat_common2_sysfs_read_ ## thingy, \
+}
+
+#define ROCCAT_COMMON2_BIN_ATTRIBUTE_W(thingy, COMMAND, SIZE) \
+ROCCAT_COMMON2_SYSFS_W(thingy, COMMAND, SIZE); \
+static struct bin_attribute bin_attr_ ## thingy = { \
+ .attr = { .name = #thingy, .mode = 0220 }, \
+ .size = SIZE, \
+ .write = roccat_common2_sysfs_write_ ## thingy \
+}
+
#endif
diff --git a/drivers/hid/hid-roccat-konepure.c b/drivers/hid/hid-roccat-konepure.c
index 99a605ebb665..07de2f9014c6 100644
--- a/drivers/hid/hid-roccat-konepure.c
+++ b/drivers/hid/hid-roccat-konepure.c
@@ -15,6 +15,7 @@
* Roccat KonePure is a smaller version of KoneXTD with less buttons and lights.
*/
+#include <linux/types.h>
#include <linux/device.h>
#include <linux/input.h>
#include <linux/hid.h>
@@ -23,128 +24,50 @@
#include <linux/hid-roccat.h>
#include "hid-ids.h"
#include "hid-roccat-common.h"
-#include "hid-roccat-konepure.h"
-static struct class *konepure_class;
-
-static ssize_t konepure_sysfs_read(struct file *fp, struct kobject *kobj,
- char *buf, loff_t off, size_t count,
- size_t real_size, uint command)
-{
- struct device *dev =
- container_of(kobj, struct device, kobj)->parent->parent;
- struct konepure_device *konepure = hid_get_drvdata(dev_get_drvdata(dev));
- struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval;
-
- if (off >= real_size)
- return 0;
-
- if (off != 0 || count != real_size)
- return -EINVAL;
-
- mutex_lock(&konepure->konepure_lock);
- retval = roccat_common2_receive(usb_dev, command, buf, real_size);
- mutex_unlock(&konepure->konepure_lock);
-
- return retval ? retval : real_size;
-}
-
-static ssize_t konepure_sysfs_write(struct file *fp, struct kobject *kobj,
- void const *buf, loff_t off, size_t count,
- size_t real_size, uint command)
-{
- struct device *dev =
- container_of(kobj, struct device, kobj)->parent->parent;
- struct konepure_device *konepure = hid_get_drvdata(dev_get_drvdata(dev));
- struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval;
-
- if (off != 0 || count != real_size)
- return -EINVAL;
-
- mutex_lock(&konepure->konepure_lock);
- retval = roccat_common2_send_with_status(usb_dev, command,
- (void *)buf, real_size);
- mutex_unlock(&konepure->konepure_lock);
-
- return retval ? retval : real_size;
-}
-
-#define KONEPURE_SYSFS_W(thingy, THINGY) \
-static ssize_t konepure_sysfs_write_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
-{ \
- return konepure_sysfs_write(fp, kobj, buf, off, count, \
- KONEPURE_SIZE_ ## THINGY, KONEPURE_COMMAND_ ## THINGY); \
-}
-
-#define KONEPURE_SYSFS_R(thingy, THINGY) \
-static ssize_t konepure_sysfs_read_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
-{ \
- return konepure_sysfs_read(fp, kobj, buf, off, count, \
- KONEPURE_SIZE_ ## THINGY, KONEPURE_COMMAND_ ## THINGY); \
-}
+enum {
+ KONEPURE_MOUSE_REPORT_NUMBER_BUTTON = 3,
+};
-#define KONEPURE_SYSFS_RW(thingy, THINGY) \
-KONEPURE_SYSFS_W(thingy, THINGY) \
-KONEPURE_SYSFS_R(thingy, THINGY)
-
-#define KONEPURE_BIN_ATTRIBUTE_RW(thingy, THINGY) \
-KONEPURE_SYSFS_RW(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
- .attr = { .name = #thingy, .mode = 0660 }, \
- .size = KONEPURE_SIZE_ ## THINGY, \
- .read = konepure_sysfs_read_ ## thingy, \
- .write = konepure_sysfs_write_ ## thingy \
-}
+struct konepure_mouse_report_button {
+ uint8_t report_number; /* always KONEPURE_MOUSE_REPORT_NUMBER_BUTTON */
+ uint8_t zero;
+ uint8_t type;
+ uint8_t data1;
+ uint8_t data2;
+ uint8_t zero2;
+ uint8_t unknown[2];
+} __packed;
-#define KONEPURE_BIN_ATTRIBUTE_R(thingy, THINGY) \
-KONEPURE_SYSFS_R(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
- .attr = { .name = #thingy, .mode = 0440 }, \
- .size = KONEPURE_SIZE_ ## THINGY, \
- .read = konepure_sysfs_read_ ## thingy, \
-}
-
-#define KONEPURE_BIN_ATTRIBUTE_W(thingy, THINGY) \
-KONEPURE_SYSFS_W(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
- .attr = { .name = #thingy, .mode = 0220 }, \
- .size = KONEPURE_SIZE_ ## THINGY, \
- .write = konepure_sysfs_write_ ## thingy \
-}
+static struct class *konepure_class;
-KONEPURE_BIN_ATTRIBUTE_RW(actual_profile, ACTUAL_PROFILE);
-KONEPURE_BIN_ATTRIBUTE_RW(info, INFO);
-KONEPURE_BIN_ATTRIBUTE_RW(sensor, SENSOR);
-KONEPURE_BIN_ATTRIBUTE_RW(tcu, TCU);
-KONEPURE_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS);
-KONEPURE_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS);
-KONEPURE_BIN_ATTRIBUTE_W(control, CONTROL);
-KONEPURE_BIN_ATTRIBUTE_W(talk, TALK);
-KONEPURE_BIN_ATTRIBUTE_W(macro, MACRO);
-KONEPURE_BIN_ATTRIBUTE_R(tcu_image, TCU_IMAGE);
-
-static struct bin_attribute *konepure_bin_attributes[] = {
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(control, 0x04, 0x03);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(actual_profile, 0x05, 0x03);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile_settings, 0x06, 0x1f);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile_buttons, 0x07, 0x3b);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(macro, 0x08, 0x0822);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(info, 0x09, 0x06);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(tcu, 0x0c, 0x04);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_R(tcu_image, 0x0c, 0x0404);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(sensor, 0x0f, 0x06);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(talk, 0x10, 0x10);
+
+static struct bin_attribute *konepure_bin_attrs[] = {
&bin_attr_actual_profile,
+ &bin_attr_control,
&bin_attr_info,
+ &bin_attr_talk,
+ &bin_attr_macro,
&bin_attr_sensor,
&bin_attr_tcu,
+ &bin_attr_tcu_image,
&bin_attr_profile_settings,
&bin_attr_profile_buttons,
- &bin_attr_control,
- &bin_attr_talk,
- &bin_attr_macro,
- &bin_attr_tcu_image,
NULL,
};
static const struct attribute_group konepure_group = {
- .bin_attrs = konepure_bin_attributes,
+ .bin_attrs = konepure_bin_attrs,
};
static const struct attribute_group *konepure_groups[] = {
@@ -152,20 +75,11 @@ static const struct attribute_group *konepure_groups[] = {
NULL,
};
-
-static int konepure_init_konepure_device_struct(struct usb_device *usb_dev,
- struct konepure_device *konepure)
-{
- mutex_init(&konepure->konepure_lock);
-
- return 0;
-}
-
static int konepure_init_specials(struct hid_device *hdev)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
struct usb_device *usb_dev = interface_to_usbdev(intf);
- struct konepure_device *konepure;
+ struct roccat_common2_device *konepure;
int retval;
if (intf->cur_altsetting->desc.bInterfaceProtocol
@@ -181,9 +95,9 @@ static int konepure_init_specials(struct hid_device *hdev)
}
hid_set_drvdata(hdev, konepure);
- retval = konepure_init_konepure_device_struct(usb_dev, konepure);
+ retval = roccat_common2_device_init_struct(usb_dev, konepure);
if (retval) {
- hid_err(hdev, "couldn't init struct konepure_device\n");
+ hid_err(hdev, "couldn't init KonePure device\n");
goto exit_free;
}
@@ -205,7 +119,7 @@ exit_free:
static void konepure_remove_specials(struct hid_device *hdev)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
- struct konepure_device *konepure;
+ struct roccat_common2_device *konepure;
if (intf->cur_altsetting->desc.bInterfaceProtocol
!= USB_INTERFACE_PROTOCOL_MOUSE)
@@ -258,7 +172,7 @@ static int konepure_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data, int size)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
- struct konepure_device *konepure = hid_get_drvdata(hdev);
+ struct roccat_common2_device *konepure = hid_get_drvdata(hdev);
if (intf->cur_altsetting->desc.bInterfaceProtocol
!= USB_INTERFACE_PROTOCOL_MOUSE)
diff --git a/drivers/hid/hid-roccat-konepure.h b/drivers/hid/hid-roccat-konepure.h
deleted file mode 100644
index 2cd24e93dfd6..000000000000
--- a/drivers/hid/hid-roccat-konepure.h
+++ /dev/null
@@ -1,72 +0,0 @@
-#ifndef __HID_ROCCAT_KONEPURE_H
-#define __HID_ROCCAT_KONEPURE_H
-
-/*
- * Copyright (c) 2012 Stefan Achatz <erazor_de@users.sourceforge.net>
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#include <linux/types.h>
-
-enum {
- KONEPURE_SIZE_ACTUAL_PROFILE = 0x03,
- KONEPURE_SIZE_CONTROL = 0x03,
- KONEPURE_SIZE_FIRMWARE_WRITE = 0x0402,
- KONEPURE_SIZE_INFO = 0x06,
- KONEPURE_SIZE_MACRO = 0x0822,
- KONEPURE_SIZE_PROFILE_SETTINGS = 0x1f,
- KONEPURE_SIZE_PROFILE_BUTTONS = 0x3b,
- KONEPURE_SIZE_SENSOR = 0x06,
- KONEPURE_SIZE_TALK = 0x10,
- KONEPURE_SIZE_TCU = 0x04,
- KONEPURE_SIZE_TCU_IMAGE = 0x0404,
-};
-
-enum konepure_control_requests {
- KONEPURE_CONTROL_REQUEST_GENERAL = 0x80,
- KONEPURE_CONTROL_REQUEST_BUTTONS = 0x90,
-};
-
-enum konepure_commands {
- KONEPURE_COMMAND_CONTROL = 0x04,
- KONEPURE_COMMAND_ACTUAL_PROFILE = 0x05,
- KONEPURE_COMMAND_PROFILE_SETTINGS = 0x06,
- KONEPURE_COMMAND_PROFILE_BUTTONS = 0x07,
- KONEPURE_COMMAND_MACRO = 0x08,
- KONEPURE_COMMAND_INFO = 0x09,
- KONEPURE_COMMAND_TCU = 0x0c,
- KONEPURE_COMMAND_TCU_IMAGE = 0x0c,
- KONEPURE_COMMAND_E = 0x0e,
- KONEPURE_COMMAND_SENSOR = 0x0f,
- KONEPURE_COMMAND_TALK = 0x10,
- KONEPURE_COMMAND_FIRMWARE_WRITE = 0x1b,
- KONEPURE_COMMAND_FIRMWARE_WRITE_CONTROL = 0x1c,
-};
-
-enum {
- KONEPURE_MOUSE_REPORT_NUMBER_BUTTON = 3,
-};
-
-struct konepure_mouse_report_button {
- uint8_t report_number; /* always KONEPURE_MOUSE_REPORT_NUMBER_BUTTON */
- uint8_t zero;
- uint8_t type;
- uint8_t data1;
- uint8_t data2;
- uint8_t zero2;
- uint8_t unknown[2];
-} __packed;
-
-struct konepure_device {
- int roccat_claimed;
- int chrdev_minor;
- struct mutex konepure_lock;
-};
-
-#endif
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 0c8e1ef0b67d..966047711fbf 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -554,9 +554,13 @@ static void kovaplus_keep_values_up_to_date(struct kovaplus_device *kovaplus,
break;
case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI:
kovaplus->actual_cpi = kovaplus_convert_event_cpi(button_report->data1);
+ break;
case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SENSITIVITY:
kovaplus->actual_x_sensitivity = button_report->data1;
kovaplus->actual_y_sensitivity = button_report->data2;
+ break;
+ default:
+ break;
}
}
diff --git a/drivers/hid/hid-roccat-ryos.c b/drivers/hid/hid-roccat-ryos.c
new file mode 100644
index 000000000000..47cc8f30ff6d
--- /dev/null
+++ b/drivers/hid/hid-roccat-ryos.c
@@ -0,0 +1,241 @@
+/*
+ * Roccat Ryos driver for Linux
+ *
+ * Copyright (c) 2013 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/hid-roccat.h>
+#include "hid-ids.h"
+#include "hid-roccat-common.h"
+
+enum {
+ RYOS_REPORT_NUMBER_SPECIAL = 3,
+ RYOS_USB_INTERFACE_PROTOCOL = 0,
+};
+
+struct ryos_report_special {
+ uint8_t number; /* RYOS_REPORT_NUMBER_SPECIAL */
+ uint8_t data[4];
+} __packed;
+
+static struct class *ryos_class;
+
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(control, 0x04, 0x03);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile, 0x05, 0x03);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_primary, 0x06, 0x7d);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_function, 0x07, 0x5f);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_macro, 0x08, 0x23);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_thumbster, 0x09, 0x17);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_extra, 0x0a, 0x08);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_easyzone, 0x0b, 0x126);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(key_mask, 0x0c, 0x06);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(light, 0x0d, 0x10);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(macro, 0x0e, 0x7d2);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_R(info, 0x0f, 0x08);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(reset, 0x11, 0x03);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(light_control, 0x13, 0x08);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(talk, 0x16, 0x10);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(stored_lights, 0x17, 0x0566);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(custom_lights, 0x18, 0x14);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(light_macro, 0x19, 0x07d2);
+
+static struct bin_attribute *ryos_bin_attrs[] = {
+ &bin_attr_control,
+ &bin_attr_profile,
+ &bin_attr_keys_primary,
+ &bin_attr_keys_function,
+ &bin_attr_keys_macro,
+ &bin_attr_keys_thumbster,
+ &bin_attr_keys_extra,
+ &bin_attr_keys_easyzone,
+ &bin_attr_key_mask,
+ &bin_attr_light,
+ &bin_attr_macro,
+ &bin_attr_info,
+ &bin_attr_reset,
+ &bin_attr_light_control,
+ &bin_attr_talk,
+ &bin_attr_stored_lights,
+ &bin_attr_custom_lights,
+ &bin_attr_light_macro,
+ NULL,
+};
+
+static const struct attribute_group ryos_group = {
+ .bin_attrs = ryos_bin_attrs,
+};
+
+static const struct attribute_group *ryos_groups[] = {
+ &ryos_group,
+ NULL,
+};
+
+static int ryos_init_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct roccat_common2_device *ryos;
+ int retval;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != RYOS_USB_INTERFACE_PROTOCOL) {
+ hid_set_drvdata(hdev, NULL);
+ return 0;
+ }
+
+ ryos = kzalloc(sizeof(*ryos), GFP_KERNEL);
+ if (!ryos) {
+ hid_err(hdev, "can't alloc device descriptor\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, ryos);
+
+ retval = roccat_common2_device_init_struct(usb_dev, ryos);
+ if (retval) {
+ hid_err(hdev, "couldn't init Ryos device\n");
+ goto exit_free;
+ }
+
+ retval = roccat_connect(ryos_class, hdev,
+ sizeof(struct ryos_report_special));
+ if (retval < 0) {
+ hid_err(hdev, "couldn't init char dev\n");
+ } else {
+ ryos->chrdev_minor = retval;
+ ryos->roccat_claimed = 1;
+ }
+
+ return 0;
+exit_free:
+ kfree(ryos);
+ return retval;
+}
+
+static void ryos_remove_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct roccat_common2_device *ryos;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != RYOS_USB_INTERFACE_PROTOCOL)
+ return;
+
+ ryos = hid_get_drvdata(hdev);
+ if (ryos->roccat_claimed)
+ roccat_disconnect(ryos->chrdev_minor);
+ kfree(ryos);
+}
+
+static int ryos_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int retval;
+
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+ goto exit;
+ }
+
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ hid_err(hdev, "hw start failed\n");
+ goto exit;
+ }
+
+ retval = ryos_init_specials(hdev);
+ if (retval) {
+ hid_err(hdev, "couldn't install mouse\n");
+ goto exit_stop;
+ }
+
+ return 0;
+
+exit_stop:
+ hid_hw_stop(hdev);
+exit:
+ return retval;
+}
+
+static void ryos_remove(struct hid_device *hdev)
+{
+ ryos_remove_specials(hdev);
+ hid_hw_stop(hdev);
+}
+
+static int ryos_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct roccat_common2_device *ryos = hid_get_drvdata(hdev);
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != RYOS_USB_INTERFACE_PROTOCOL)
+ return 0;
+
+ if (data[0] != RYOS_REPORT_NUMBER_SPECIAL)
+ return 0;
+
+ if (ryos != NULL && ryos->roccat_claimed)
+ roccat_report_event(ryos->chrdev_minor, data);
+
+ return 0;
+}
+
+static const struct hid_device_id ryos_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, ryos_devices);
+
+static struct hid_driver ryos_driver = {
+ .name = "ryos",
+ .id_table = ryos_devices,
+ .probe = ryos_probe,
+ .remove = ryos_remove,
+ .raw_event = ryos_raw_event
+};
+
+static int __init ryos_init(void)
+{
+ int retval;
+
+ ryos_class = class_create(THIS_MODULE, "ryos");
+ if (IS_ERR(ryos_class))
+ return PTR_ERR(ryos_class);
+ ryos_class->dev_groups = ryos_groups;
+
+ retval = hid_register_driver(&ryos_driver);
+ if (retval)
+ class_destroy(ryos_class);
+ return retval;
+}
+
+static void __exit ryos_exit(void)
+{
+ hid_unregister_driver(&ryos_driver);
+ class_destroy(ryos_class);
+}
+
+module_init(ryos_init);
+module_exit(ryos_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Ryos MK/Glow/Pro driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c
index 0332267199d5..6dbf6e04dce7 100644
--- a/drivers/hid/hid-roccat-savu.c
+++ b/drivers/hid/hid-roccat-savu.c
@@ -27,98 +27,15 @@
static struct class *savu_class;
-static ssize_t savu_sysfs_read(struct file *fp, struct kobject *kobj,
- char *buf, loff_t off, size_t count,
- size_t real_size, uint command)
-{
- struct device *dev =
- container_of(kobj, struct device, kobj)->parent->parent;
- struct savu_device *savu = hid_get_drvdata(dev_get_drvdata(dev));
- struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval;
-
- if (off >= real_size)
- return 0;
-
- if (off != 0 || count != real_size)
- return -EINVAL;
-
- mutex_lock(&savu->savu_lock);
- retval = roccat_common2_receive(usb_dev, command, buf, real_size);
- mutex_unlock(&savu->savu_lock);
-
- return retval ? retval : real_size;
-}
-
-static ssize_t savu_sysfs_write(struct file *fp, struct kobject *kobj,
- void const *buf, loff_t off, size_t count,
- size_t real_size, uint command)
-{
- struct device *dev =
- container_of(kobj, struct device, kobj)->parent->parent;
- struct savu_device *savu = hid_get_drvdata(dev_get_drvdata(dev));
- struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval;
-
- if (off != 0 || count != real_size)
- return -EINVAL;
-
- mutex_lock(&savu->savu_lock);
- retval = roccat_common2_send_with_status(usb_dev, command,
- (void *)buf, real_size);
- mutex_unlock(&savu->savu_lock);
-
- return retval ? retval : real_size;
-}
-
-#define SAVU_SYSFS_W(thingy, THINGY) \
-static ssize_t savu_sysfs_write_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
-{ \
- return savu_sysfs_write(fp, kobj, buf, off, count, \
- SAVU_SIZE_ ## THINGY, SAVU_COMMAND_ ## THINGY); \
-}
-
-#define SAVU_SYSFS_R(thingy, THINGY) \
-static ssize_t savu_sysfs_read_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
-{ \
- return savu_sysfs_read(fp, kobj, buf, off, count, \
- SAVU_SIZE_ ## THINGY, SAVU_COMMAND_ ## THINGY); \
-}
-
-#define SAVU_SYSFS_RW(thingy, THINGY) \
-SAVU_SYSFS_W(thingy, THINGY) \
-SAVU_SYSFS_R(thingy, THINGY)
-
-#define SAVU_BIN_ATTRIBUTE_RW(thingy, THINGY) \
-SAVU_SYSFS_RW(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
- .attr = { .name = #thingy, .mode = 0660 }, \
- .size = SAVU_SIZE_ ## THINGY, \
- .read = savu_sysfs_read_ ## thingy, \
- .write = savu_sysfs_write_ ## thingy \
-}
-
-#define SAVU_BIN_ATTRIBUTE_W(thingy, THINGY) \
-SAVU_SYSFS_W(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
- .attr = { .name = #thingy, .mode = 0220 }, \
- .size = SAVU_SIZE_ ## THINGY, \
- .write = savu_sysfs_write_ ## thingy \
-}
-
-SAVU_BIN_ATTRIBUTE_W(control, CONTROL);
-SAVU_BIN_ATTRIBUTE_RW(profile, PROFILE);
-SAVU_BIN_ATTRIBUTE_RW(general, GENERAL);
-SAVU_BIN_ATTRIBUTE_RW(buttons, BUTTONS);
-SAVU_BIN_ATTRIBUTE_RW(macro, MACRO);
-SAVU_BIN_ATTRIBUTE_RW(info, INFO);
-SAVU_BIN_ATTRIBUTE_RW(sensor, SENSOR);
-
-static struct bin_attribute *savu_bin_attributes[] = {
+ROCCAT_COMMON2_BIN_ATTRIBUTE_W(control, 0x4, 0x03);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile, 0x5, 0x03);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(general, 0x6, 0x10);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(buttons, 0x7, 0x2f);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(macro, 0x8, 0x0823);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(info, 0x9, 0x08);
+ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(sensor, 0xc, 0x04);
+
+static struct bin_attribute *savu_bin_attrs[] = {
&bin_attr_control,
&bin_attr_profile,
&bin_attr_general,
@@ -130,7 +47,7 @@ static struct bin_attribute *savu_bin_attributes[] = {
};
static const struct attribute_group savu_group = {
- .bin_attrs = savu_bin_attributes,
+ .bin_attrs = savu_bin_attrs,
};
static const struct attribute_group *savu_groups[] = {
@@ -138,19 +55,11 @@ static const struct attribute_group *savu_groups[] = {
NULL,
};
-static int savu_init_savu_device_struct(struct usb_device *usb_dev,
- struct savu_device *savu)
-{
- mutex_init(&savu->savu_lock);
-
- return 0;
-}
-
static int savu_init_specials(struct hid_device *hdev)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
struct usb_device *usb_dev = interface_to_usbdev(intf);
- struct savu_device *savu;
+ struct roccat_common2_device *savu;
int retval;
if (intf->cur_altsetting->desc.bInterfaceProtocol
@@ -166,9 +75,9 @@ static int savu_init_specials(struct hid_device *hdev)
}
hid_set_drvdata(hdev, savu);
- retval = savu_init_savu_device_struct(usb_dev, savu);
+ retval = roccat_common2_device_init_struct(usb_dev, savu);
if (retval) {
- hid_err(hdev, "couldn't init struct savu_device\n");
+ hid_err(hdev, "couldn't init Savu device\n");
goto exit_free;
}
@@ -190,7 +99,7 @@ exit_free:
static void savu_remove_specials(struct hid_device *hdev)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
- struct savu_device *savu;
+ struct roccat_common2_device *savu;
if (intf->cur_altsetting->desc.bInterfaceProtocol
!= USB_INTERFACE_PROTOCOL_MOUSE)
@@ -239,7 +148,7 @@ static void savu_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
}
-static void savu_report_to_chrdev(struct savu_device const *savu,
+static void savu_report_to_chrdev(struct roccat_common2_device const *savu,
u8 const *data)
{
struct savu_roccat_report roccat_report;
@@ -261,7 +170,7 @@ static int savu_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data, int size)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
- struct savu_device *savu = hid_get_drvdata(hdev);
+ struct roccat_common2_device *savu = hid_get_drvdata(hdev);
if (intf->cur_altsetting->desc.bInterfaceProtocol
!= USB_INTERFACE_PROTOCOL_MOUSE)
diff --git a/drivers/hid/hid-roccat-savu.h b/drivers/hid/hid-roccat-savu.h
index 9120ba72087f..d23217bd2b86 100644
--- a/drivers/hid/hid-roccat-savu.h
+++ b/drivers/hid/hid-roccat-savu.h
@@ -14,31 +14,6 @@
#include <linux/types.h>
-enum {
- SAVU_SIZE_CONTROL = 0x03,
- SAVU_SIZE_PROFILE = 0x03,
- SAVU_SIZE_GENERAL = 0x10,
- SAVU_SIZE_BUTTONS = 0x2f,
- SAVU_SIZE_MACRO = 0x0823,
- SAVU_SIZE_INFO = 0x08,
- SAVU_SIZE_SENSOR = 0x04,
-};
-
-enum savu_control_requests {
- SAVU_CONTROL_REQUEST_GENERAL = 0x80,
- SAVU_CONTROL_REQUEST_BUTTONS = 0x90,
-};
-
-enum savu_commands {
- SAVU_COMMAND_CONTROL = 0x4,
- SAVU_COMMAND_PROFILE = 0x5,
- SAVU_COMMAND_GENERAL = 0x6,
- SAVU_COMMAND_BUTTONS = 0x7,
- SAVU_COMMAND_MACRO = 0x8,
- SAVU_COMMAND_INFO = 0x9,
- SAVU_COMMAND_SENSOR = 0xc,
-};
-
struct savu_mouse_report_special {
uint8_t report_number; /* always 3 */
uint8_t zero;
@@ -77,11 +52,4 @@ struct savu_roccat_report {
uint8_t data[2];
} __packed;
-struct savu_device {
- int roccat_claimed;
- int chrdev_minor;
-
- struct mutex savu_lock;
-};
-
#endif
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 10e1581022cf..9e4cdca549c0 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -326,7 +326,8 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
field->logical == attr_usage_id) {
sensor_hub_fill_attr_info(info, i, report->id,
field->unit, field->unit_exponent,
- field->report_size);
+ field->report_size *
+ field->report_count);
ret = 0;
} else {
for (j = 0; j < field->maxusage; ++j) {
@@ -338,7 +339,8 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
i, report->id,
field->unit,
field->unit_exponent,
- field->report_size);
+ field->report_size *
+ field->report_count);
ret = 0;
break;
}
@@ -425,9 +427,10 @@ static int sensor_hub_raw_event(struct hid_device *hdev,
hid_dbg(hdev, "%d collection_index:%x hid:%x sz:%x\n",
i, report->field[i]->usage->collection_index,
report->field[i]->usage->hid,
- report->field[i]->report_size/8);
-
- sz = report->field[i]->report_size/8;
+ (report->field[i]->report_size *
+ report->field[i]->report_count)/8);
+ sz = (report->field[i]->report_size *
+ report->field[i]->report_count)/8;
if (pdata->pending.status && pdata->pending.attr_usage_id ==
report->field[i]->usage->hid) {
hid_dbg(hdev, "data was pending ...\n");
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index b18320db5f7d..bc37a1800166 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -419,21 +419,14 @@ static int sixaxis_usb_output_raw_report(struct hid_device *hid, __u8 *buf,
*/
static int sixaxis_set_operational_usb(struct hid_device *hdev)
{
- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
- struct usb_device *dev = interface_to_usbdev(intf);
- __u16 ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
int ret;
char *buf = kmalloc(18, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
- HID_REQ_GET_REPORT,
- USB_DIR_IN | USB_TYPE_CLASS |
- USB_RECIP_INTERFACE,
- (3 << 8) | 0xf2, ifnum, buf, 17,
- USB_CTRL_GET_TIMEOUT);
+ ret = hdev->hid_get_raw_report(hdev, 0xf2, buf, 17, HID_FEATURE_REPORT);
+
if (ret < 0)
hid_err(hdev, "can't set operational mode\n");
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index abb20db2b443..1446f526ee8b 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -834,7 +834,8 @@ static void wiimote_init_set_type(struct wiimote_data *wdata,
goto done;
}
- if (vendor == USB_VENDOR_ID_NINTENDO) {
+ if (vendor == USB_VENDOR_ID_NINTENDO ||
+ vendor == USB_VENDOR_ID_NINTENDO2) {
if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE) {
devtype = WIIMOTE_DEV_GEN10;
goto done;
@@ -1855,6 +1856,8 @@ static void wiimote_hid_remove(struct hid_device *hdev)
static const struct hid_device_id wiimote_hid_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
USB_DEVICE_ID_NINTENDO_WIIMOTE) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2,
+ USB_DEVICE_ID_NINTENDO_WIIMOTE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
{ }
diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c
index 71adf9e60b13..6b61f01e01e7 100644
--- a/drivers/hid/hid-wiimote-modules.c
+++ b/drivers/hid/hid-wiimote-modules.c
@@ -1655,10 +1655,39 @@ static void wiimod_pro_in_ext(struct wiimote_data *wdata, const __u8 *ext)
ly = (ext[4] & 0xff) | ((ext[5] & 0x0f) << 8);
ry = (ext[6] & 0xff) | ((ext[7] & 0x0f) << 8);
- input_report_abs(wdata->extension.input, ABS_X, lx - 0x800);
- input_report_abs(wdata->extension.input, ABS_Y, ly - 0x800);
- input_report_abs(wdata->extension.input, ABS_RX, rx - 0x800);
- input_report_abs(wdata->extension.input, ABS_RY, ry - 0x800);
+ /* zero-point offsets */
+ lx -= 0x800;
+ ly = 0x800 - ly;
+ rx -= 0x800;
+ ry = 0x800 - ry;
+
+ /* Trivial automatic calibration. We don't know any calibration data
+ * in the EEPROM so we must use the first report to calibrate the
+ * null-position of the analog sticks. Users can retrigger calibration
+ * via sysfs, or set it explicitly. If data is off more than abs(500),
+ * we skip calibration as the sticks are likely to be moved already. */
+ if (!(wdata->state.flags & WIIPROTO_FLAG_PRO_CALIB_DONE)) {
+ wdata->state.flags |= WIIPROTO_FLAG_PRO_CALIB_DONE;
+ if (abs(lx) < 500)
+ wdata->state.calib_pro_sticks[0] = -lx;
+ if (abs(ly) < 500)
+ wdata->state.calib_pro_sticks[1] = -ly;
+ if (abs(rx) < 500)
+ wdata->state.calib_pro_sticks[2] = -rx;
+ if (abs(ry) < 500)
+ wdata->state.calib_pro_sticks[3] = -ry;
+ }
+
+ /* apply calibration data */
+ lx += wdata->state.calib_pro_sticks[0];
+ ly += wdata->state.calib_pro_sticks[1];
+ rx += wdata->state.calib_pro_sticks[2];
+ ry += wdata->state.calib_pro_sticks[3];
+
+ input_report_abs(wdata->extension.input, ABS_X, lx);
+ input_report_abs(wdata->extension.input, ABS_Y, ly);
+ input_report_abs(wdata->extension.input, ABS_RX, rx);
+ input_report_abs(wdata->extension.input, ABS_RY, ry);
input_report_key(wdata->extension.input,
wiimod_pro_map[WIIMOD_PRO_KEY_RIGHT],
@@ -1766,12 +1795,70 @@ static int wiimod_pro_play(struct input_dev *dev, void *data,
return 0;
}
+static ssize_t wiimod_pro_calib_show(struct device *dev,
+ struct device_attribute *attr,
+ char *out)
+{
+ struct wiimote_data *wdata = dev_to_wii(dev);
+ int r;
+
+ r = 0;
+ r += sprintf(&out[r], "%+06hd:", wdata->state.calib_pro_sticks[0]);
+ r += sprintf(&out[r], "%+06hd ", wdata->state.calib_pro_sticks[1]);
+ r += sprintf(&out[r], "%+06hd:", wdata->state.calib_pro_sticks[2]);
+ r += sprintf(&out[r], "%+06hd\n", wdata->state.calib_pro_sticks[3]);
+
+ return r;
+}
+
+static ssize_t wiimod_pro_calib_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wiimote_data *wdata = dev_to_wii(dev);
+ int r;
+ s16 x1, y1, x2, y2;
+
+ if (!strncmp(buf, "scan\n", 5)) {
+ spin_lock_irq(&wdata->state.lock);
+ wdata->state.flags &= ~WIIPROTO_FLAG_PRO_CALIB_DONE;
+ spin_unlock_irq(&wdata->state.lock);
+ } else {
+ r = sscanf(buf, "%hd:%hd %hd:%hd", &x1, &y1, &x2, &y2);
+ if (r != 4)
+ return -EINVAL;
+
+ spin_lock_irq(&wdata->state.lock);
+ wdata->state.flags |= WIIPROTO_FLAG_PRO_CALIB_DONE;
+ spin_unlock_irq(&wdata->state.lock);
+
+ wdata->state.calib_pro_sticks[0] = x1;
+ wdata->state.calib_pro_sticks[1] = y1;
+ wdata->state.calib_pro_sticks[2] = x2;
+ wdata->state.calib_pro_sticks[3] = y2;
+ }
+
+ return strnlen(buf, PAGE_SIZE);
+}
+
+static DEVICE_ATTR(pro_calib, S_IRUGO|S_IWUSR|S_IWGRP, wiimod_pro_calib_show,
+ wiimod_pro_calib_store);
+
static int wiimod_pro_probe(const struct wiimod_ops *ops,
struct wiimote_data *wdata)
{
int ret, i;
+ unsigned long flags;
INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker);
+ wdata->state.calib_pro_sticks[0] = 0;
+ wdata->state.calib_pro_sticks[1] = 0;
+ wdata->state.calib_pro_sticks[2] = 0;
+ wdata->state.calib_pro_sticks[3] = 0;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.flags &= ~WIIPROTO_FLAG_PRO_CALIB_DONE;
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
wdata->extension.input = input_allocate_device();
if (!wdata->extension.input)
@@ -1786,6 +1873,13 @@ static int wiimod_pro_probe(const struct wiimod_ops *ops,
goto err_free;
}
+ ret = device_create_file(&wdata->hdev->dev,
+ &dev_attr_pro_calib);
+ if (ret) {
+ hid_err(wdata->hdev, "cannot create sysfs attribute\n");
+ goto err_free;
+ }
+
wdata->extension.input->open = wiimod_pro_open;
wdata->extension.input->close = wiimod_pro_close;
wdata->extension.input->dev.parent = &wdata->hdev->dev;
@@ -1806,20 +1900,23 @@ static int wiimod_pro_probe(const struct wiimod_ops *ops,
set_bit(ABS_RX, wdata->extension.input->absbit);
set_bit(ABS_RY, wdata->extension.input->absbit);
input_set_abs_params(wdata->extension.input,
- ABS_X, -0x800, 0x800, 2, 4);
+ ABS_X, -0x400, 0x400, 4, 100);
input_set_abs_params(wdata->extension.input,
- ABS_Y, -0x800, 0x800, 2, 4);
+ ABS_Y, -0x400, 0x400, 4, 100);
input_set_abs_params(wdata->extension.input,
- ABS_RX, -0x800, 0x800, 2, 4);
+ ABS_RX, -0x400, 0x400, 4, 100);
input_set_abs_params(wdata->extension.input,
- ABS_RY, -0x800, 0x800, 2, 4);
+ ABS_RY, -0x400, 0x400, 4, 100);
ret = input_register_device(wdata->extension.input);
if (ret)
- goto err_free;
+ goto err_file;
return 0;
+err_file:
+ device_remove_file(&wdata->hdev->dev,
+ &dev_attr_pro_calib);
err_free:
input_free_device(wdata->extension.input);
wdata->extension.input = NULL;
@@ -1837,6 +1934,8 @@ static void wiimod_pro_remove(const struct wiimod_ops *ops,
input_unregister_device(wdata->extension.input);
wdata->extension.input = NULL;
cancel_work_sync(&wdata->rumble_worker);
+ device_remove_file(&wdata->hdev->dev,
+ &dev_attr_pro_calib);
spin_lock_irqsave(&wdata->state.lock, flags);
wiiproto_req_rumble(wdata, 0);
diff --git a/drivers/hid/hid-wiimote.h b/drivers/hid/hid-wiimote.h
index 75db0c400037..03065f1917fc 100644
--- a/drivers/hid/hid-wiimote.h
+++ b/drivers/hid/hid-wiimote.h
@@ -46,6 +46,7 @@
#define WIIPROTO_FLAG_DRM_LOCKED 0x8000
#define WIIPROTO_FLAG_BUILTIN_MP 0x010000
#define WIIPROTO_FLAG_NO_MP 0x020000
+#define WIIPROTO_FLAG_PRO_CALIB_DONE 0x040000
#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \
WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4)
@@ -135,6 +136,7 @@ struct wiimote_state {
/* calibration/cache data */
__u16 calib_bboard[4][3];
+ __s16 calib_pro_sticks[4];
__u8 cache_rumble;
};
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index c1336193b04b..ae48d18ee315 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -455,10 +455,6 @@ static void i2c_hid_init_reports(struct hid_device *hid)
}
list_for_each_entry(report,
- &hid->report_enum[HID_INPUT_REPORT].report_list, list)
- i2c_hid_init_report(report, inbuf, ihid->bufsize);
-
- list_for_each_entry(report,
&hid->report_enum[HID_FEATURE_REPORT].report_list, list)
i2c_hid_init_report(report, inbuf, ihid->bufsize);
@@ -854,10 +850,10 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
0xF7, 0xF6, 0xDF, 0x3C, 0x67, 0x42, 0x55, 0x45,
0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE,
};
- struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object params[4], *obj;
+ union acpi_object params[4];
struct acpi_object_list input;
struct acpi_device *adev;
+ unsigned long long value;
acpi_handle handle;
handle = ACPI_HANDLE(&client->dev);
@@ -878,22 +874,14 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
params[3].package.count = 0;
params[3].package.elements = NULL;
- if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DSM", &input, &buf))) {
+ if (ACPI_FAILURE(acpi_evaluate_integer(handle, "_DSM", &input,
+ &value))) {
dev_err(&client->dev, "device _DSM execution failed\n");
return -ENODEV;
}
- obj = (union acpi_object *)buf.pointer;
- if (obj->type != ACPI_TYPE_INTEGER) {
- dev_err(&client->dev, "device _DSM returned invalid type: %d\n",
- obj->type);
- kfree(buf.pointer);
- return -EINVAL;
- }
-
- pdata->hid_descriptor_address = obj->integer.value;
+ pdata->hid_descriptor_address = value;
- kfree(buf.pointer);
return 0;
}
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 07345521f421..0db9a67278ba 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -84,6 +84,8 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS817_TOUCH, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
@@ -110,6 +112,9 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_SIS, USB_DEVICE_ID_SIS_TS, HID_QUIRK_NO_INIT_REPORTS },
{ 0, 0 }
};
diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c
index 66d44581e1b1..749f7b5c8179 100644
--- a/drivers/hsi/hsi.c
+++ b/drivers/hsi/hsi.c
@@ -33,11 +33,13 @@ static ssize_t modalias_show(struct device *dev,
{
return sprintf(buf, "hsi:%s\n", dev_name(dev));
}
+static DEVICE_ATTR_RO(modalias);
-static struct device_attribute hsi_bus_dev_attrs[] = {
- __ATTR_RO(modalias),
- __ATTR_NULL,
+static struct attribute *hsi_bus_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(hsi_bus_dev);
static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
{
@@ -53,7 +55,7 @@ static int hsi_bus_match(struct device *dev, struct device_driver *driver)
static struct bus_type hsi_bus_type = {
.name = "hsi",
- .dev_attrs = hsi_bus_dev_attrs,
+ .dev_groups = hsi_bus_dev_groups,
.match = hsi_bus_match,
.uevent = hsi_bus_uevent,
};
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index 2ebd6ce46108..9c8a6bab8228 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -164,7 +164,7 @@ static const u8 abituguru_bank2_max_threshold = 50;
static const int abituguru_pwm_settings_multiplier[5] = { 0, 1, 1, 1000, 1000 };
/*
* Min / Max allowed values for pwm_settings. Note: pwm1 (CPU fan) is a
- * special case the minium allowed pwm% setting for this is 30% (77) on
+ * special case the minimum allowed pwm% setting for this is 30% (77) on
* some MB's this special case is handled in the code!
*/
static const u8 abituguru_pwm_min[5] = { 0, 170, 170, 25, 25 };
@@ -517,7 +517,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
ABIT_UGURU_DEBUG(2, "testing bank1 sensor %d\n", (int)sensor_addr);
/*
- * Volt sensor test, enable volt low alarm, set min value ridicously
+ * Volt sensor test, enable volt low alarm, set min value ridiculously
* high, or vica versa if the reading is very high. If its a volt
* sensor this should always give us an alarm.
*/
@@ -564,7 +564,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
/*
* Temp sensor test, enable sensor as a temp sensor, set beep value
- * ridicously low (but not too low, otherwise uguru ignores it).
+ * ridiculously low (but not too low, otherwise uguru ignores it).
* If its a temp sensor this should always give us an alarm.
*/
buf[0] = ABIT_UGURU_TEMP_HIGH_ALARM_ENABLE;
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index 0cac8c0b001a..4ae74aa8cdc1 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -176,7 +176,7 @@ struct abituguru3_data {
/*
* The abituguru3 supports up to 48 sensors, and thus has registers
- * sets for 48 sensors, for convienence reasons / simplicity of the
+ * sets for 48 sensors, for convenience reasons / simplicity of the
* code we always read and store all registers for all 48 sensors
*/
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index a9e3d0152c0b..8d40da314a8e 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -381,8 +381,10 @@ static ssize_t show_str(struct device *dev,
val = resource->oem_info;
break;
default:
- BUG();
+ WARN(1, "Implementation error: unexpected attribute index %d\n",
+ attr->index);
val = "";
+ break;
}
return sprintf(buf, "%s\n", val);
@@ -436,7 +438,9 @@ static ssize_t show_val(struct device *dev,
val = resource->trip[attr->index - 7] * 1000;
break;
default:
- BUG();
+ WARN(1, "Implementation error: unexpected attribute index %d\n",
+ attr->index);
+ break;
}
return sprintf(buf, "%llu\n", val);
@@ -855,7 +859,8 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event)
dev_info(&device->dev, "Capping in progress.\n");
break;
default:
- BUG();
+ WARN(1, "Unexpected event %d\n", event);
+ break;
}
mutex_unlock(&resource->lock);
@@ -991,7 +996,7 @@ static int __init acpi_power_meter_init(void)
result = acpi_bus_register_driver(&acpi_power_meter_driver);
if (result < 0)
- return -ENODEV;
+ return result;
return 0;
}
diff --git a/drivers/hwmon/adcxx.c b/drivers/hwmon/adcxx.c
index 751b1f0264a4..04c08c2f79b8 100644
--- a/drivers/hwmon/adcxx.c
+++ b/drivers/hwmon/adcxx.c
@@ -203,7 +203,6 @@ out_err:
for (i--; i >= 0; i--)
device_remove_file(&spi->dev, &ad_input[i].dev_attr);
- spi_set_drvdata(spi, NULL);
mutex_unlock(&adc->lock);
return status;
}
@@ -218,7 +217,6 @@ static int adcxx_remove(struct spi_device *spi)
for (i = 0; i < 3 + adc->channels; i++)
device_remove_file(&spi->dev, &ad_input[i].dev_attr);
- spi_set_drvdata(spi, NULL);
mutex_unlock(&adc->lock);
return 0;
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index 3a6d9ef1c16c..b3498acb9ab4 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -616,7 +616,7 @@ static struct adm1026_data *adm1026_update_device(struct device *dev)
data->gpio = gpio;
data->last_reading = jiffies;
- }; /* last_reading */
+ } /* last_reading */
if (!data->valid ||
time_after(jiffies, data->last_config + ADM1026_CONFIG_INTERVAL)) {
@@ -700,7 +700,7 @@ static struct adm1026_data *adm1026_update_device(struct device *dev)
}
data->last_config = jiffies;
- }; /* last_config */
+ } /* last_config */
data->valid = 1;
mutex_unlock(&data->update_lock);
@@ -1791,7 +1791,7 @@ static int adm1026_detect(struct i2c_client *client,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
/* We need to be able to do byte I/O */
return -ENODEV;
- };
+ }
/* Now, we do the remaining detection. */
diff --git a/drivers/hwmon/adt7310.c b/drivers/hwmon/adt7310.c
index da5f0789fb97..5994cf68e0a4 100644
--- a/drivers/hwmon/adt7310.c
+++ b/drivers/hwmon/adt7310.c
@@ -42,13 +42,8 @@ static const u8 adt7310_reg_table[] = {
static int adt7310_spi_read_word(struct device *dev, u8 reg)
{
struct spi_device *spi = to_spi_device(dev);
- int ret;
- ret = spi_w8r16(spi, AD7310_COMMAND(reg) | ADT7310_CMD_READ);
- if (ret < 0)
- return ret;
-
- return be16_to_cpu((__force __be16)ret);
+ return spi_w8r16be(spi, AD7310_COMMAND(reg) | ADT7310_CMD_READ);
}
static int adt7310_spi_write_word(struct device *dev, u8 reg, u16 data)
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index addb5a4d5064..562cc3881d33 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -700,7 +700,7 @@ static int find_trange_value(int trange)
if (trange_values[i] == trange)
return i;
- return -ENODEV;
+ return -EINVAL;
}
static struct adt7462_data *adt7462_update_device(struct device *dev)
@@ -1294,9 +1294,8 @@ static ssize_t set_pwm_tmax(struct device *dev,
/* trange = tmax - tmin */
tmin = (data->pwm_tmin[attr->index] - 64) * 1000;
trange_value = find_trange_value(trange - tmin);
-
if (trange_value < 0)
- return -EINVAL;
+ return trange_value;
temp = trange_value << ADT7462_PWM_RANGE_SHIFT;
temp |= data->pwm_trange[attr->index] & ADT7462_PWM_HYST_MASK;
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index 3ad9d849add2..8d9f2a0e8efe 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -138,7 +138,7 @@ static inline u8 read_byte(struct i2c_client *client, u8 reg)
dev_err(&client->dev,
"Unable to read from register 0x%02x.\n", reg);
return 0;
- };
+ }
return res & 0xff;
}
@@ -149,7 +149,7 @@ static inline int write_byte(struct i2c_client *client, u8 reg, u8 data)
dev_err(&client->dev,
"Unable to write value 0x%02x to register 0x%02x.\n",
data, reg);
- };
+ }
return res;
}
@@ -1030,7 +1030,7 @@ static struct asc7621_data *asc7621_update_device(struct device *dev)
}
}
data->last_high_reading = jiffies;
- }; /* last_reading */
+ } /* last_reading */
/* Read all the low priority registers. */
@@ -1044,7 +1044,7 @@ static struct asc7621_data *asc7621_update_device(struct device *dev)
}
}
data->last_low_reading = jiffies;
- }; /* last_reading */
+ } /* last_reading */
data->valid = 1;
@@ -1084,11 +1084,11 @@ static void asc7621_init_client(struct i2c_client *client)
dev_err(&client->dev,
"Client (%d,0x%02x) config is locked.\n",
i2c_adapter_id(client->adapter), client->addr);
- };
+ }
if (!(value & 0x04)) {
dev_err(&client->dev, "Client (%d,0x%02x) is not ready.\n",
i2c_adapter_id(client->adapter), client->addr);
- };
+ }
/*
* Start monitoring
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index b25c64302cbc..1d7ff46812c3 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -119,7 +119,7 @@ struct atk_data {
acpi_handle rtmp_handle;
acpi_handle rvlt_handle;
acpi_handle rfan_handle;
- /* new inteface */
+ /* new interface */
acpi_handle enumerate_handle;
acpi_handle read_handle;
acpi_handle write_handle;
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index aecb9ea7beb5..ddff02e3e66f 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -147,10 +147,9 @@ static ssize_t atxp1_storevcore(struct device *dev,
/* Calculate VID */
vid = vid_to_reg(vcore, data->vrm);
-
if (vid < 0) {
dev_err(dev, "VID calculation failed.\n");
- return -1;
+ return vid;
}
/*
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index a26ba7a17c2b..872d76744e30 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -120,7 +120,7 @@ static const u8 DS1621_REG_TEMP[3] = {
/* Each client has this additional data */
struct ds1621_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
@@ -151,10 +151,10 @@ static inline u16 DS1621_TEMP_TO_REG(long temp, u8 zbits)
return temp;
}
-static void ds1621_init_client(struct i2c_client *client)
+static void ds1621_init_client(struct ds1621_data *data,
+ struct i2c_client *client)
{
u8 conf, new_conf, sreg, resol;
- struct ds1621_data *data = i2c_get_clientdata(client);
new_conf = conf = i2c_smbus_read_byte_data(client, DS1621_REG_CONF);
/* switch to continuous conversion mode */
@@ -197,8 +197,8 @@ static void ds1621_init_client(struct i2c_client *client)
static struct ds1621_data *ds1621_update_client(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ds1621_data *data = i2c_get_clientdata(client);
+ struct ds1621_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
u8 new_conf;
mutex_lock(&data->update_lock);
@@ -247,8 +247,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct i2c_client *client = to_i2c_client(dev);
- struct ds1621_data *data = i2c_get_clientdata(client);
+ struct ds1621_data *data = dev_get_drvdata(dev);
long val;
int err;
@@ -258,7 +257,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
mutex_lock(&data->update_lock);
data->temp[attr->index] = DS1621_TEMP_TO_REG(val, data->zbits);
- i2c_smbus_write_word_swapped(client, DS1621_REG_TEMP[attr->index],
+ i2c_smbus_write_word_swapped(data->client, DS1621_REG_TEMP[attr->index],
data->temp[attr->index]);
mutex_unlock(&data->update_lock);
return count;
@@ -282,16 +281,15 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
static ssize_t show_convrate(struct device *dev, struct device_attribute *da,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ds1621_data *data = i2c_get_clientdata(client);
+ struct ds1621_data *data = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%hu\n", data->update_interval);
}
static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ds1621_data *data = i2c_get_clientdata(client);
+ struct ds1621_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long convrate;
s32 err;
int resol = 0;
@@ -343,8 +341,7 @@ static umode_t ds1621_attribute_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct i2c_client *client = to_i2c_client(dev);
- struct ds1621_data *data = i2c_get_clientdata(client);
+ struct ds1621_data *data = dev_get_drvdata(dev);
if (attr == &dev_attr_update_interval.attr)
if (data->kind == ds1621 || data->kind == ds1625)
@@ -357,52 +354,31 @@ static const struct attribute_group ds1621_group = {
.attrs = ds1621_attributes,
.is_visible = ds1621_attribute_visible
};
+__ATTRIBUTE_GROUPS(ds1621);
static int ds1621_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ds1621_data *data;
- int err;
+ struct device *hwmon_dev;
data = devm_kzalloc(&client->dev, sizeof(struct ds1621_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
data->kind = id->driver_data;
+ data->client = client;
/* Initialize the DS1621 chip */
- ds1621_init_client(client);
-
- /* Register sysfs hooks */
- err = sysfs_create_group(&client->dev.kobj, &ds1621_group);
- if (err)
- return err;
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove_files;
- }
-
- return 0;
-
- exit_remove_files:
- sysfs_remove_group(&client->dev.kobj, &ds1621_group);
- return err;
-}
-
-static int ds1621_remove(struct i2c_client *client)
-{
- struct ds1621_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &ds1621_group);
+ ds1621_init_client(data, client);
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+ client->name, data,
+ ds1621_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id ds1621_id[] = {
@@ -422,7 +398,6 @@ static struct i2c_driver ds1621_driver = {
.name = "ds1621",
},
.probe = ds1621_probe,
- .remove = ds1621_remove,
.id_table = ds1621_id,
};
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 142e1cb8dea7..90ec1173b8a1 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -21,7 +21,6 @@
*
* TODO
* - cache alarm and critical limit registers
- * - add emc1404 support
*/
#include <linux/module.h>
@@ -40,7 +39,8 @@
#define THERMAL_REVISION_REG 0xff
struct thermal_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
+ const struct attribute_group *groups[3];
struct mutex mutex;
/*
* Cache the hyst value so we don't keep re-reading it. In theory
@@ -53,10 +53,11 @@ struct thermal_data {
static ssize_t show_temp(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
- int retval = i2c_smbus_read_byte_data(client, sda->index);
+ struct thermal_data *data = dev_get_drvdata(dev);
+ int retval;
+ retval = i2c_smbus_read_byte_data(data->client, sda->index);
if (retval < 0)
return retval;
return sprintf(buf, "%d000\n", retval);
@@ -65,27 +66,27 @@ static ssize_t show_temp(struct device *dev,
static ssize_t show_bit(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr);
- int retval = i2c_smbus_read_byte_data(client, sda->nr);
+ struct thermal_data *data = dev_get_drvdata(dev);
+ int retval;
+ retval = i2c_smbus_read_byte_data(data->client, sda->nr);
if (retval < 0)
return retval;
- retval &= sda->index;
- return sprintf(buf, "%d\n", retval ? 1 : 0);
+ return sprintf(buf, "%d\n", !!(retval & sda->index));
}
static ssize_t store_temp(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
- struct i2c_client *client = to_i2c_client(dev);
+ struct thermal_data *data = dev_get_drvdata(dev);
unsigned long val;
int retval;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
- retval = i2c_smbus_write_byte_data(client, sda->index,
+ retval = i2c_smbus_write_byte_data(data->client, sda->index,
DIV_ROUND_CLOSEST(val, 1000));
if (retval < 0)
return retval;
@@ -95,9 +96,9 @@ static ssize_t store_temp(struct device *dev,
static ssize_t store_bit(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct thermal_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr);
+ struct thermal_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
int retval;
@@ -124,9 +125,9 @@ fail:
static ssize_t show_hyst(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct thermal_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ struct thermal_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
int retval;
int hyst;
@@ -147,9 +148,9 @@ static ssize_t show_hyst(struct device *dev,
static ssize_t store_hyst(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct thermal_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ struct thermal_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
int retval;
int hyst;
unsigned long val;
@@ -232,10 +233,26 @@ static SENSOR_DEVICE_ATTR_2(temp3_crit_alarm, S_IRUGO,
static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO | S_IWUSR,
show_hyst, store_hyst, 0x1A);
+static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x2D);
+static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x2C);
+static SENSOR_DEVICE_ATTR(temp4_crit, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x30);
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 0x2A);
+static SENSOR_DEVICE_ATTR_2(temp4_min_alarm, S_IRUGO,
+ show_bit, NULL, 0x36, 0x08);
+static SENSOR_DEVICE_ATTR_2(temp4_max_alarm, S_IRUGO,
+ show_bit, NULL, 0x35, 0x08);
+static SENSOR_DEVICE_ATTR_2(temp4_crit_alarm, S_IRUGO,
+ show_bit, NULL, 0x37, 0x08);
+static SENSOR_DEVICE_ATTR(temp4_crit_hyst, S_IRUGO | S_IWUSR,
+ show_hyst, store_hyst, 0x30);
+
static SENSOR_DEVICE_ATTR_2(power_state, S_IRUGO | S_IWUSR,
show_bit, store_bit, 0x03, 0x40);
-static struct attribute *mid_att_thermal[] = {
+static struct attribute *emc1403_attrs[] = {
&sensor_dev_attr_temp1_min.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
@@ -264,8 +281,24 @@ static struct attribute *mid_att_thermal[] = {
NULL
};
-static const struct attribute_group m_thermal_gr = {
- .attrs = mid_att_thermal
+static const struct attribute_group emc1403_group = {
+ .attrs = emc1403_attrs,
+};
+
+static struct attribute *emc1404_attrs[] = {
+ &sensor_dev_attr_temp4_min.dev_attr.attr,
+ &sensor_dev_attr_temp4_max.dev_attr.attr,
+ &sensor_dev_attr_temp4_crit.dev_attr.attr,
+ &sensor_dev_attr_temp4_input.dev_attr.attr,
+ &sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_crit_hyst.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group emc1404_group = {
+ .attrs = emc1404_attrs,
};
static int emc1403_detect(struct i2c_client *client,
@@ -286,10 +319,12 @@ static int emc1403_detect(struct i2c_client *client,
case 0x23:
strlcpy(info->type, "emc1423", I2C_NAME_SIZE);
break;
- /*
- * Note: 0x25 is the 1404 which is very similar and this
- * driver could be extended
- */
+ case 0x25:
+ strlcpy(info->type, "emc1404", I2C_NAME_SIZE);
+ break;
+ case 0x27:
+ strlcpy(info->type, "emc1424", I2C_NAME_SIZE);
+ break;
default:
return -ENODEV;
}
@@ -304,43 +339,29 @@ static int emc1403_detect(struct i2c_client *client,
static int emc1403_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- int res;
struct thermal_data *data;
+ struct device *hwmon_dev;
data = devm_kzalloc(&client->dev, sizeof(struct thermal_data),
GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->mutex);
data->hyst_valid = jiffies - 1; /* Expired */
- res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr);
- if (res) {
- dev_warn(&client->dev, "create group failed\n");
- return res;
- }
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- res = PTR_ERR(data->hwmon_dev);
- dev_warn(&client->dev, "register hwmon dev failed\n");
- goto thermal_error;
- }
- dev_info(&client->dev, "EMC1403 Thermal chip found\n");
- return 0;
-
-thermal_error:
- sysfs_remove_group(&client->dev.kobj, &m_thermal_gr);
- return res;
-}
+ data->groups[0] = &emc1403_group;
+ if (id->driver_data)
+ data->groups[1] = &emc1404_group;
-static int emc1403_remove(struct i2c_client *client)
-{
- struct thermal_data *data = i2c_get_clientdata(client);
+ hwmon_dev = hwmon_device_register_with_groups(&client->dev,
+ client->name, data,
+ data->groups);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &m_thermal_gr);
+ dev_info(&client->dev, "%s Thermal chip found\n", id->name);
return 0;
}
@@ -350,7 +371,9 @@ static const unsigned short emc1403_address_list[] = {
static const struct i2c_device_id emc1403_idtable[] = {
{ "emc1403", 0 },
+ { "emc1404", 1 },
{ "emc1423", 0 },
+ { "emc1424", 1 },
{ }
};
MODULE_DEVICE_TABLE(i2c, emc1403_idtable);
@@ -362,7 +385,6 @@ static struct i2c_driver sensor_emc1403 = {
},
.detect = emc1403_detect,
.probe = emc1403_probe,
- .remove = emc1403_remove,
.id_table = emc1403_idtable,
.address_list = emc1403_address_list,
};
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 31b221eeee6c..03d8592810bf 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -2420,7 +2420,6 @@ static int f71882fg_probe(struct platform_device *pdev)
exit_unregister_sysfs:
f71882fg_remove(pdev); /* Will unregister the sysfs files for us */
return err; /* f71882fg_remove() also frees our data */
- return err;
}
static int f71882fg_remove(struct platform_device *pdev)
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index a837b94977f4..80c42bea90ed 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -275,7 +275,7 @@ static bool duty_mode_enabled(u8 pwm_enable)
case 3: /* Manual, speed mode */
return false;
default:
- BUG();
+ WARN(1, "Unexpected pwm_enable value %d\n", pwm_enable);
return true;
}
}
@@ -291,7 +291,7 @@ static bool auto_mode_enabled(u8 pwm_enable)
case 4: /* Auto, duty mode */
return true;
default:
- BUG();
+ WARN(1, "Unexpected pwm_enable value %d\n", pwm_enable);
return false;
}
}
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index b7d6a5704eb2..73181be5b30b 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -31,6 +31,7 @@
#include <linux/hwmon.h>
#include <linux/gpio.h>
#include <linux/gpio-fan.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
@@ -169,7 +170,7 @@ static int get_fan_speed_index(struct gpio_fan_data *fan_data)
dev_warn(&fan_data->pdev->dev,
"missing speed array entry for GPIO value 0x%x\n", ctrl_val);
- return -EINVAL;
+ return -ENODEV;
}
static int rpm_to_speed_index(struct gpio_fan_data *fan_data, int rpm)
@@ -309,12 +310,6 @@ exit_unlock:
return ret;
}
-static ssize_t show_name(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "gpio-fan\n");
-}
-
static DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm, set_pwm);
static DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
show_pwm_enable, set_pwm_enable);
@@ -324,26 +319,23 @@ static DEVICE_ATTR(fan1_max, S_IRUGO, show_rpm_max, NULL);
static DEVICE_ATTR(fan1_input, S_IRUGO, show_rpm, NULL);
static DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, show_rpm, set_rpm);
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-
static umode_t gpio_fan_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct gpio_fan_data *data = dev_get_drvdata(dev);
- if (index == 1 && !data->alarm)
+ if (index == 0 && !data->alarm)
return 0;
- if (index > 1 && !data->ctrl)
+ if (index > 0 && !data->ctrl)
return 0;
return attr->mode;
}
static struct attribute *gpio_fan_attributes[] = {
- &dev_attr_name.attr,
- &dev_attr_fan1_alarm.attr, /* 1 */
- &dev_attr_pwm1.attr, /* 2 */
+ &dev_attr_fan1_alarm.attr, /* 0 */
+ &dev_attr_pwm1.attr, /* 1 */
&dev_attr_pwm1_enable.attr,
&dev_attr_pwm1_mode.attr,
&dev_attr_fan1_input.attr,
@@ -358,6 +350,11 @@ static const struct attribute_group gpio_fan_group = {
.is_visible = gpio_fan_is_visible,
};
+static const struct attribute_group *gpio_fan_groups[] = {
+ &gpio_fan_group,
+ NULL
+};
+
static int fan_ctrl_init(struct gpio_fan_data *fan_data,
struct gpio_fan_platform_data *pdata)
{
@@ -384,7 +381,7 @@ static int fan_ctrl_init(struct gpio_fan_data *fan_data,
fan_data->pwm_enable = true; /* Enable manual fan speed control. */
fan_data->speed_index = get_fan_speed_index(fan_data);
if (fan_data->speed_index < 0)
- return -ENODEV;
+ return fan_data->speed_index;
return 0;
}
@@ -539,24 +536,16 @@ static int gpio_fan_probe(struct platform_device *pdev)
return err;
}
- err = sysfs_create_group(&pdev->dev.kobj, &gpio_fan_group);
- if (err)
- return err;
-
/* Make this driver part of hwmon class. */
- fan_data->hwmon_dev = hwmon_device_register(&pdev->dev);
- if (IS_ERR(fan_data->hwmon_dev)) {
- err = PTR_ERR(fan_data->hwmon_dev);
- goto err_remove;
- }
+ fan_data->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
+ "gpio-fan", fan_data,
+ gpio_fan_groups);
+ if (IS_ERR(fan_data->hwmon_dev))
+ return PTR_ERR(fan_data->hwmon_dev);
dev_info(&pdev->dev, "GPIO fan initialized\n");
return 0;
-
-err_remove:
- sysfs_remove_group(&pdev->dev.kobj, &gpio_fan_group);
- return err;
}
static int gpio_fan_remove(struct platform_device *pdev)
@@ -564,7 +553,6 @@ static int gpio_fan_remove(struct platform_device *pdev)
struct gpio_fan_data *fan_data = platform_get_drvdata(pdev);
hwmon_device_unregister(fan_data->hwmon_dev);
- sysfs_remove_group(&pdev->dev.kobj, &gpio_fan_group);
return 0;
}
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 646314f7c839..e176a43af63d 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/slab.h>
#include <linux/kdev_t.h>
#include <linux/idr.h>
#include <linux/hwmon.h>
@@ -25,35 +26,122 @@
#define HWMON_ID_PREFIX "hwmon"
#define HWMON_ID_FORMAT HWMON_ID_PREFIX "%d"
-static struct class *hwmon_class;
+struct hwmon_device {
+ const char *name;
+ struct device dev;
+};
+#define to_hwmon_device(d) container_of(d, struct hwmon_device, dev)
+
+static ssize_t
+show_name(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", to_hwmon_device(dev)->name);
+}
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
+static struct attribute *hwmon_dev_attrs[] = {
+ &dev_attr_name.attr,
+ NULL
+};
+
+static umode_t hwmon_dev_name_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+
+ if (to_hwmon_device(dev)->name == NULL)
+ return 0;
+
+ return attr->mode;
+}
+
+static struct attribute_group hwmon_dev_attr_group = {
+ .attrs = hwmon_dev_attrs,
+ .is_visible = hwmon_dev_name_is_visible,
+};
+
+static const struct attribute_group *hwmon_dev_attr_groups[] = {
+ &hwmon_dev_attr_group,
+ NULL
+};
+
+static void hwmon_dev_release(struct device *dev)
+{
+ kfree(to_hwmon_device(dev));
+}
+
+static struct class hwmon_class = {
+ .name = "hwmon",
+ .owner = THIS_MODULE,
+ .dev_groups = hwmon_dev_attr_groups,
+ .dev_release = hwmon_dev_release,
+};
static DEFINE_IDA(hwmon_ida);
/**
- * hwmon_device_register - register w/ hwmon
- * @dev: the device to register
+ * hwmon_device_register_with_groups - register w/ hwmon
+ * @dev: the parent device
+ * @name: hwmon name attribute
+ * @drvdata: driver data to attach to created device
+ * @groups: List of attribute groups to create
*
* hwmon_device_unregister() must be called when the device is no
* longer needed.
*
* Returns the pointer to the new device.
*/
-struct device *hwmon_device_register(struct device *dev)
+struct device *
+hwmon_device_register_with_groups(struct device *dev, const char *name,
+ void *drvdata,
+ const struct attribute_group **groups)
{
- struct device *hwdev;
- int id;
+ struct hwmon_device *hwdev;
+ int err, id;
id = ida_simple_get(&hwmon_ida, 0, 0, GFP_KERNEL);
if (id < 0)
return ERR_PTR(id);
- hwdev = device_create(hwmon_class, dev, MKDEV(0, 0), NULL,
- HWMON_ID_FORMAT, id);
+ hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL);
+ if (hwdev == NULL) {
+ err = -ENOMEM;
+ goto ida_remove;
+ }
- if (IS_ERR(hwdev))
- ida_simple_remove(&hwmon_ida, id);
+ hwdev->name = name;
+ hwdev->dev.class = &hwmon_class;
+ hwdev->dev.parent = dev;
+ hwdev->dev.groups = groups;
+ hwdev->dev.of_node = dev ? dev->of_node : NULL;
+ dev_set_drvdata(&hwdev->dev, drvdata);
+ dev_set_name(&hwdev->dev, HWMON_ID_FORMAT, id);
+ err = device_register(&hwdev->dev);
+ if (err)
+ goto free;
- return hwdev;
+ return &hwdev->dev;
+
+free:
+ kfree(hwdev);
+ida_remove:
+ ida_simple_remove(&hwmon_ida, id);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(hwmon_device_register_with_groups);
+
+/**
+ * hwmon_device_register - register w/ hwmon
+ * @dev: the device to register
+ *
+ * hwmon_device_unregister() must be called when the device is no
+ * longer needed.
+ *
+ * Returns the pointer to the new device.
+ */
+struct device *hwmon_device_register(struct device *dev)
+{
+ return hwmon_device_register_with_groups(dev, NULL, NULL, NULL);
}
EXPORT_SYMBOL_GPL(hwmon_device_register);
@@ -75,6 +163,69 @@ void hwmon_device_unregister(struct device *dev)
}
EXPORT_SYMBOL_GPL(hwmon_device_unregister);
+static void devm_hwmon_release(struct device *dev, void *res)
+{
+ struct device *hwdev = *(struct device **)res;
+
+ hwmon_device_unregister(hwdev);
+}
+
+/**
+ * devm_hwmon_device_register_with_groups - register w/ hwmon
+ * @dev: the parent device
+ * @name: hwmon name attribute
+ * @drvdata: driver data to attach to created device
+ * @groups: List of attribute groups to create
+ *
+ * Returns the pointer to the new device. The new device is automatically
+ * unregistered with the parent device.
+ */
+struct device *
+devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
+ void *drvdata,
+ const struct attribute_group **groups)
+{
+ struct device **ptr, *hwdev;
+
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ ptr = devres_alloc(devm_hwmon_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ hwdev = hwmon_device_register_with_groups(dev, name, drvdata, groups);
+ if (IS_ERR(hwdev))
+ goto error;
+
+ *ptr = hwdev;
+ devres_add(dev, ptr);
+ return hwdev;
+
+error:
+ devres_free(ptr);
+ return hwdev;
+}
+EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_groups);
+
+static int devm_hwmon_match(struct device *dev, void *res, void *data)
+{
+ struct device **hwdev = res;
+
+ return *hwdev == data;
+}
+
+/**
+ * devm_hwmon_device_unregister - removes a previously registered hwmon device
+ *
+ * @dev: the parent device of the device to unregister
+ */
+void devm_hwmon_device_unregister(struct device *dev)
+{
+ WARN_ON(devres_release(dev, devm_hwmon_release, devm_hwmon_match, dev));
+}
+EXPORT_SYMBOL_GPL(devm_hwmon_device_unregister);
+
static void __init hwmon_pci_quirks(void)
{
#if defined CONFIG_X86 && defined CONFIG_PCI
@@ -105,19 +256,21 @@ static void __init hwmon_pci_quirks(void)
static int __init hwmon_init(void)
{
+ int err;
+
hwmon_pci_quirks();
- hwmon_class = class_create(THIS_MODULE, "hwmon");
- if (IS_ERR(hwmon_class)) {
- pr_err("couldn't create sysfs class\n");
- return PTR_ERR(hwmon_class);
+ err = class_register(&hwmon_class);
+ if (err) {
+ pr_err("couldn't register hwmon sysfs class\n");
+ return err;
}
return 0;
}
static void __exit hwmon_exit(void)
{
- class_destroy(hwmon_class);
+ class_unregister(&hwmon_class);
}
subsys_initcall(hwmon_init);
diff --git a/drivers/hwmon/ina209.c b/drivers/hwmon/ina209.c
index c6fdd5bd395e..5378fdefc1f7 100644
--- a/drivers/hwmon/ina209.c
+++ b/drivers/hwmon/ina209.c
@@ -63,7 +63,7 @@
#define INA209_SHUNT_DEFAULT 10000 /* uOhm */
struct ina209_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock;
bool valid;
@@ -78,8 +78,8 @@ struct ina209_data {
static struct ina209_data *ina209_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ina209_data *data = i2c_get_clientdata(client);
+ struct ina209_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
struct ina209_data *ret = data;
s32 val;
int i;
@@ -234,7 +234,6 @@ static ssize_t ina209_set_interval(struct device *dev,
struct device_attribute *da,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
struct ina209_data *data = ina209_update_device(dev);
long val;
u16 regval;
@@ -250,7 +249,8 @@ static ssize_t ina209_set_interval(struct device *dev,
mutex_lock(&data->update_lock);
regval = ina209_reg_from_interval(data->regs[INA209_CONFIGURATION],
val);
- i2c_smbus_write_word_swapped(client, INA209_CONFIGURATION, regval);
+ i2c_smbus_write_word_swapped(data->client, INA209_CONFIGURATION,
+ regval);
data->regs[INA209_CONFIGURATION] = regval;
data->update_interval = ina209_interval_from_reg(regval);
mutex_unlock(&data->update_lock);
@@ -260,8 +260,7 @@ static ssize_t ina209_set_interval(struct device *dev,
static ssize_t ina209_show_interval(struct device *dev,
struct device_attribute *da, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ina209_data *data = i2c_get_clientdata(client);
+ struct ina209_data *data = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", data->update_interval);
}
@@ -285,9 +284,9 @@ static ssize_t ina209_reset_history(struct device *dev,
const char *buf,
size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ina209_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ina209_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
u32 mask = attr->index;
long val;
int i, ret;
@@ -312,7 +311,6 @@ static ssize_t ina209_set_value(struct device *dev,
const char *buf,
size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
struct ina209_data *data = ina209_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int reg = attr->index;
@@ -332,7 +330,7 @@ static ssize_t ina209_set_value(struct device *dev,
count = ret;
goto abort;
}
- i2c_smbus_write_word_swapped(client, reg, ret);
+ i2c_smbus_write_word_swapped(data->client, reg, ret);
data->regs[reg] = ret;
abort:
mutex_unlock(&data->update_lock);
@@ -457,7 +455,7 @@ static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR,
* Finally, construct an array of pointers to members of the above objects,
* as required for sysfs_create_group()
*/
-static struct attribute *ina209_attributes[] = {
+static struct attribute *ina209_attrs[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in0_input_highest.dev_attr.attr,
&sensor_dev_attr_in0_input_lowest.dev_attr.attr,
@@ -498,10 +496,7 @@ static struct attribute *ina209_attributes[] = {
NULL,
};
-
-static const struct attribute_group ina209_group = {
- .attrs = ina209_attributes,
-};
+ATTRIBUTE_GROUPS(ina209);
static void ina209_restore_conf(struct i2c_client *client,
struct ina209_data *data)
@@ -565,6 +560,7 @@ static int ina209_probe(struct i2c_client *client,
{
struct i2c_adapter *adapter = client->adapter;
struct ina209_data *data;
+ struct device *hwmon_dev;
int ret;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
@@ -575,27 +571,23 @@ static int ina209_probe(struct i2c_client *client,
return -ENOMEM;
i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
ret = ina209_init_client(client, data);
if (ret)
return ret;
- /* Register sysfs hooks */
- ret = sysfs_create_group(&client->dev.kobj, &ina209_group);
- if (ret)
+ hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+ client->name,
+ data, ina209_groups);
+ if (IS_ERR(hwmon_dev)) {
+ ret = PTR_ERR(hwmon_dev);
goto out_restore_conf;
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- ret = PTR_ERR(data->hwmon_dev);
- goto out_hwmon_device_register;
}
return 0;
-out_hwmon_device_register:
- sysfs_remove_group(&client->dev.kobj, &ina209_group);
out_restore_conf:
ina209_restore_conf(client, data);
return ret;
@@ -605,8 +597,6 @@ static int ina209_remove(struct i2c_client *client)
{
struct ina209_data *data = i2c_get_clientdata(client);
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &ina209_group);
ina209_restore_conf(client, data);
return 0;
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 70a39a8ac016..93d26e8af3e2 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -78,7 +78,7 @@ struct ina2xx_config {
};
struct ina2xx_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
const struct ina2xx_config *config;
struct mutex update_lock;
@@ -112,8 +112,8 @@ static const struct ina2xx_config ina2xx_config[] = {
static struct ina2xx_data *ina2xx_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ina2xx_data *data = i2c_get_clientdata(client);
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
struct ina2xx_data *ret = data;
mutex_lock(&data->update_lock);
@@ -203,41 +203,39 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
INA2XX_POWER);
/* pointers to created device attributes */
-static struct attribute *ina2xx_attributes[] = {
+static struct attribute *ina2xx_attrs[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_curr1_input.dev_attr.attr,
&sensor_dev_attr_power1_input.dev_attr.attr,
NULL,
};
-
-static const struct attribute_group ina2xx_group = {
- .attrs = ina2xx_attributes,
-};
+ATTRIBUTE_GROUPS(ina2xx);
static int ina2xx_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = client->adapter;
- struct ina2xx_data *data;
struct ina2xx_platform_data *pdata;
- int ret;
- u32 val;
+ struct device *dev = &client->dev;
+ struct ina2xx_data *data;
+ struct device *hwmon_dev;
long shunt = 10000; /* default shunt value 10mOhms */
+ u32 val;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -ENODEV;
- data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- if (dev_get_platdata(&client->dev)) {
- pdata = dev_get_platdata(&client->dev);
+ if (dev_get_platdata(dev)) {
+ pdata = dev_get_platdata(dev);
shunt = pdata->shunt_uohms;
- } else if (!of_property_read_u32(client->dev.of_node,
- "shunt-resistor", &val)) {
- shunt = val;
+ } else if (!of_property_read_u32(dev->of_node,
+ "shunt-resistor", &val)) {
+ shunt = val;
}
if (shunt <= 0)
@@ -255,37 +253,18 @@ static int ina2xx_probe(struct i2c_client *client,
i2c_smbus_write_word_swapped(client, INA2XX_CALIBRATION,
data->config->calibration_factor / shunt);
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
- ret = sysfs_create_group(&client->dev.kobj, &ina2xx_group);
- if (ret)
- return ret;
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- ret = PTR_ERR(data->hwmon_dev);
- goto out_err_hwmon;
- }
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data, ina2xx_groups);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
- dev_info(&client->dev, "power monitor %s (Rshunt = %li uOhm)\n",
+ dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n",
id->name, shunt);
return 0;
-
-out_err_hwmon:
- sysfs_remove_group(&client->dev.kobj, &ina2xx_group);
- return ret;
-}
-
-static int ina2xx_remove(struct i2c_client *client)
-{
- struct ina2xx_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &ina2xx_group);
-
- return 0;
}
static const struct i2c_device_id ina2xx_id[] = {
@@ -302,7 +281,6 @@ static struct i2c_driver ina2xx_driver = {
.name = "ina2xx",
},
.probe = ina2xx_probe,
- .remove = ina2xx_remove,
.id_table = ina2xx_id,
};
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 4a58f130fd4e..6013611e4f21 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -163,7 +163,7 @@ static struct jc42_chips jc42_chips[] = {
/* Each client has this additional data */
struct jc42_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock; /* protect register access */
bool extended; /* true if extended range supported */
bool valid;
@@ -193,21 +193,21 @@ MODULE_DEVICE_TABLE(i2c, jc42_id);
static int jc42_suspend(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct jc42_data *data = i2c_get_clientdata(client);
+ struct jc42_data *data = dev_get_drvdata(dev);
data->config |= JC42_CFG_SHUTDOWN;
- i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, data->config);
+ i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG,
+ data->config);
return 0;
}
static int jc42_resume(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct jc42_data *data = i2c_get_clientdata(client);
+ struct jc42_data *data = dev_get_drvdata(dev);
data->config &= ~JC42_CFG_SHUTDOWN;
- i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, data->config);
+ i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG,
+ data->config);
return 0;
}
@@ -317,15 +317,14 @@ static ssize_t set_##value(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
- struct i2c_client *client = to_i2c_client(dev); \
- struct jc42_data *data = i2c_get_clientdata(client); \
+ struct jc42_data *data = dev_get_drvdata(dev); \
int err, ret = count; \
long val; \
- if (kstrtol(buf, 10, &val) < 0) \
+ if (kstrtol(buf, 10, &val) < 0) \
return -EINVAL; \
mutex_lock(&data->update_lock); \
data->value = jc42_temp_to_reg(val, data->extended); \
- err = i2c_smbus_write_word_swapped(client, reg, data->value); \
+ err = i2c_smbus_write_word_swapped(data->client, reg, data->value); \
if (err < 0) \
ret = err; \
mutex_unlock(&data->update_lock); \
@@ -344,8 +343,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct jc42_data *data = i2c_get_clientdata(client);
+ struct jc42_data *data = dev_get_drvdata(dev);
unsigned long val;
int diff, hyst;
int err;
@@ -368,7 +366,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
mutex_lock(&data->update_lock);
data->config = (data->config & ~JC42_CFG_HYST_MASK)
| (hyst << JC42_CFG_HYST_SHIFT);
- err = i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG,
+ err = i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG,
data->config);
if (err < 0)
ret = err;
@@ -430,8 +428,7 @@ static umode_t jc42_attribute_mode(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct i2c_client *client = to_i2c_client(dev);
- struct jc42_data *data = i2c_get_clientdata(client);
+ struct jc42_data *data = dev_get_drvdata(dev);
unsigned int config = data->config;
bool readonly;
@@ -452,6 +449,7 @@ static const struct attribute_group jc42_group = {
.attrs = jc42_attributes,
.is_visible = jc42_attribute_mode,
};
+__ATTRIBUTE_GROUPS(jc42);
/* Return 0 if detection is successful, -ENODEV otherwise */
static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info)
@@ -487,14 +485,16 @@ static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info)
static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
- struct jc42_data *data;
- int config, cap, err;
struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct jc42_data *data;
+ int config, cap;
data = devm_kzalloc(dev, sizeof(struct jc42_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ data->client = client;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -515,29 +515,15 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
data->config = config;
- /* Register sysfs hooks */
- err = sysfs_create_group(&dev->kobj, &jc42_group);
- if (err)
- return err;
-
- data->hwmon_dev = hwmon_device_register(dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove;
- }
-
- return 0;
-
-exit_remove:
- sysfs_remove_group(&dev->kobj, &jc42_group);
- return err;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data,
+ jc42_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static int jc42_remove(struct i2c_client *client)
{
struct jc42_data *data = i2c_get_clientdata(client);
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &jc42_group);
/* Restore original configuration except hysteresis */
if ((data->config & ~JC42_CFG_HYST_MASK) !=
@@ -553,8 +539,8 @@ static int jc42_remove(struct i2c_client *client)
static struct jc42_data *jc42_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct jc42_data *data = i2c_get_clientdata(client);
+ struct jc42_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
struct jc42_data *ret = data;
int val;
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index 016efa26ba7c..505a59e100b0 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -174,7 +174,6 @@ out_dev_reg_failed:
out_dev_create_file_failed:
device_remove_file(&spi->dev, &dev_attr_temp1_input);
out_dev_create_temp_file_failed:
- spi_set_drvdata(spi, NULL);
return status;
}
@@ -185,7 +184,6 @@ static int lm70_remove(struct spi_device *spi)
hwmon_device_unregister(p_lm70->hwmon_dev);
device_remove_file(&spi->dev, &dev_attr_temp1_input);
device_remove_file(&spi->dev, &dev_attr_name);
- spi_set_drvdata(spi, NULL);
return 0;
}
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index 9bde9644b102..9653bb870a47 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -55,7 +55,7 @@ static const unsigned short lm73_convrates[] = {
};
struct lm73_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex lock;
u8 ctrl; /* control register value */
};
@@ -66,7 +66,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct i2c_client *client = to_i2c_client(dev);
+ struct lm73_data *data = dev_get_drvdata(dev);
long temp;
short value;
s32 err;
@@ -77,7 +77,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
/* Write value */
value = clamp_val(temp / 250, LM73_TEMP_MIN, LM73_TEMP_MAX) << 5;
- err = i2c_smbus_write_word_swapped(client, attr->index, value);
+ err = i2c_smbus_write_word_swapped(data->client, attr->index, value);
return (err < 0) ? err : count;
}
@@ -85,10 +85,10 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct i2c_client *client = to_i2c_client(dev);
+ struct lm73_data *data = dev_get_drvdata(dev);
int temp;
- s32 err = i2c_smbus_read_word_swapped(client, attr->index);
+ s32 err = i2c_smbus_read_word_swapped(data->client, attr->index);
if (err < 0)
return err;
@@ -101,8 +101,7 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm73_data *data = i2c_get_clientdata(client);
+ struct lm73_data *data = dev_get_drvdata(dev);
unsigned long convrate;
s32 err;
int res = 0;
@@ -124,7 +123,8 @@ static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
mutex_lock(&data->lock);
data->ctrl &= LM73_CTRL_TO_MASK;
data->ctrl |= res << LM73_CTRL_RES_SHIFT;
- err = i2c_smbus_write_byte_data(client, LM73_REG_CTRL, data->ctrl);
+ err = i2c_smbus_write_byte_data(data->client, LM73_REG_CTRL,
+ data->ctrl);
mutex_unlock(&data->lock);
if (err < 0)
@@ -136,8 +136,7 @@ static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
static ssize_t show_convrate(struct device *dev, struct device_attribute *da,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm73_data *data = i2c_get_clientdata(client);
+ struct lm73_data *data = dev_get_drvdata(dev);
int res;
res = (data->ctrl & LM73_CTRL_RES_MASK) >> LM73_CTRL_RES_SHIFT;
@@ -147,13 +146,12 @@ static ssize_t show_convrate(struct device *dev, struct device_attribute *da,
static ssize_t show_maxmin_alarm(struct device *dev,
struct device_attribute *da, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct lm73_data *data = i2c_get_clientdata(client);
+ struct lm73_data *data = dev_get_drvdata(dev);
s32 ctrl;
mutex_lock(&data->lock);
- ctrl = i2c_smbus_read_byte_data(client, LM73_REG_CTRL);
+ ctrl = i2c_smbus_read_byte_data(data->client, LM73_REG_CTRL);
if (ctrl < 0)
goto abort;
data->ctrl = ctrl;
@@ -183,7 +181,7 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO,
show_maxmin_alarm, NULL, LM73_CTRL_LO_SHIFT);
-static struct attribute *lm73_attributes[] = {
+static struct attribute *lm73_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_min.dev_attr.attr,
@@ -192,10 +190,7 @@ static struct attribute *lm73_attributes[] = {
&sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
NULL
};
-
-static const struct attribute_group lm73_group = {
- .attrs = lm73_attributes,
-};
+ATTRIBUTE_GROUPS(lm73);
/*-----------------------------------------------------------------------*/
@@ -204,16 +199,16 @@ static const struct attribute_group lm73_group = {
static int
lm73_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
- int status;
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
struct lm73_data *data;
int ctrl;
- data = devm_kzalloc(&client->dev, sizeof(struct lm73_data),
- GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct lm73_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->lock);
ctrl = i2c_smbus_read_byte_data(client, LM73_REG_CTRL);
@@ -221,33 +216,13 @@ lm73_probe(struct i2c_client *client, const struct i2c_device_id *id)
return ctrl;
data->ctrl = ctrl;
- /* Register sysfs hooks */
- status = sysfs_create_group(&client->dev.kobj, &lm73_group);
- if (status)
- return status;
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- status = PTR_ERR(data->hwmon_dev);
- goto exit_remove;
- }
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data, lm73_groups);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
- dev_info(&client->dev, "%s: sensor '%s'\n",
- dev_name(data->hwmon_dev), client->name);
-
- return 0;
-
-exit_remove:
- sysfs_remove_group(&client->dev.kobj, &lm73_group);
- return status;
-}
-
-static int lm73_remove(struct i2c_client *client)
-{
- struct lm73_data *data = i2c_get_clientdata(client);
+ dev_info(dev, "sensor '%s'\n", client->name);
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &lm73_group);
return 0;
}
@@ -300,7 +275,6 @@ static struct i2c_driver lm73_driver = {
.name = "lm73",
},
.probe = lm73_probe,
- .remove = lm73_remove,
.id_table = lm73_ids,
.detect = lm73_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index cdff74282955..d0c65d23d5be 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -60,6 +60,11 @@
* This driver also supports the G781 from GMT. This device is compatible
* with the ADM1032.
*
+ * This driver also supports TMP451 from Texas Instruments. This device is
+ * supported in both compatibility and extended mode. It's mostly compatible
+ * with ADT7461 except for local temperature low byte register and max
+ * conversion rate.
+ *
* Since the LM90 was the first chipset supported by this driver, most
* comments will refer to this chipset, but are actually general and
* concern all supported chipsets, unless mentioned otherwise.
@@ -89,6 +94,7 @@
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
+#include <linux/interrupt.h>
/*
* Addresses to scan
@@ -110,7 +116,7 @@ static const unsigned short normal_i2c[] = {
0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
- max6646, w83l771, max6696, sa56004, g781 };
+ max6646, w83l771, max6696, sa56004, g781, tmp451 };
/*
* The LM90 registers
@@ -167,6 +173,9 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define LM90_DEF_CONVRATE_RVAL 6 /* Def conversion rate register value */
#define LM90_MAX_CONVRATE_MS 16000 /* Maximum conversion rate in ms */
+/* TMP451 registers */
+#define TMP451_REG_R_LOCAL_TEMPL 0x15
+
/*
* Device flags
*/
@@ -179,6 +188,23 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define LM90_HAVE_TEMP3 (1 << 6) /* 3rd temperature sensor */
#define LM90_HAVE_BROKEN_ALERT (1 << 7) /* Broken alert */
+/* LM90 status */
+#define LM90_STATUS_LTHRM (1 << 0) /* local THERM limit tripped */
+#define LM90_STATUS_RTHRM (1 << 1) /* remote THERM limit tripped */
+#define LM90_STATUS_ROPEN (1 << 2) /* remote is an open circuit */
+#define LM90_STATUS_RLOW (1 << 3) /* remote low temp limit tripped */
+#define LM90_STATUS_RHIGH (1 << 4) /* remote high temp limit tripped */
+#define LM90_STATUS_LLOW (1 << 5) /* local low temp limit tripped */
+#define LM90_STATUS_LHIGH (1 << 6) /* local high temp limit tripped */
+
+#define MAX6696_STATUS2_R2THRM (1 << 1) /* remote2 THERM limit tripped */
+#define MAX6696_STATUS2_R2OPEN (1 << 2) /* remote2 is an open circuit */
+#define MAX6696_STATUS2_R2LOW (1 << 3) /* remote2 low temp limit tripped */
+#define MAX6696_STATUS2_R2HIGH (1 << 4) /* remote2 high temp limit tripped */
+#define MAX6696_STATUS2_ROT2 (1 << 5) /* remote emergency limit tripped */
+#define MAX6696_STATUS2_R2OT2 (1 << 6) /* remote2 emergency limit tripped */
+#define MAX6696_STATUS2_LOT2 (1 << 7) /* local emergency limit tripped */
+
/*
* Driver data (common to all clients)
*/
@@ -205,6 +231,7 @@ static const struct i2c_device_id lm90_id[] = {
{ "nct1008", adt7461 },
{ "w83l771", w83l771 },
{ "sa56004", sa56004 },
+ { "tmp451", tmp451 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm90_id);
@@ -278,7 +305,7 @@ static const struct lm90_params lm90_params[] = {
[max6696] = {
.flags = LM90_HAVE_EMERGENCY
| LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3,
- .alert_alarms = 0x187c,
+ .alert_alarms = 0x1c7c,
.max_convrate = 6,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
@@ -293,6 +320,43 @@ static const struct lm90_params lm90_params[] = {
.max_convrate = 9,
.reg_local_ext = SA56004_REG_R_LOCAL_TEMPL,
},
+ [tmp451] = {
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+ | LM90_HAVE_BROKEN_ALERT,
+ .alert_alarms = 0x7c,
+ .max_convrate = 9,
+ .reg_local_ext = TMP451_REG_R_LOCAL_TEMPL,
+ }
+};
+
+/*
+ * TEMP8 register index
+ */
+enum lm90_temp8_reg_index {
+ LOCAL_LOW = 0,
+ LOCAL_HIGH,
+ LOCAL_CRIT,
+ REMOTE_CRIT,
+ LOCAL_EMERG, /* max6659 and max6695/96 */
+ REMOTE_EMERG, /* max6659 and max6695/96 */
+ REMOTE2_CRIT, /* max6695/96 only */
+ REMOTE2_EMERG, /* max6695/96 only */
+ TEMP8_REG_NUM
+};
+
+/*
+ * TEMP11 register index
+ */
+enum lm90_temp11_reg_index {
+ REMOTE_TEMP = 0,
+ REMOTE_LOW,
+ REMOTE_HIGH,
+ REMOTE_OFFSET, /* except max6646, max6657/58/59, and max6695/96 */
+ LOCAL_TEMP,
+ REMOTE2_TEMP, /* max6695/96 only */
+ REMOTE2_LOW, /* max6695/96 only */
+ REMOTE2_HIGH, /* max6695/96 only */
+ TEMP11_REG_NUM
};
/*
@@ -317,25 +381,8 @@ struct lm90_data {
u8 reg_local_ext; /* local extension register offset */
/* registers values */
- s8 temp8[8]; /* 0: local low limit
- * 1: local high limit
- * 2: local critical limit
- * 3: remote critical limit
- * 4: local emergency limit (max6659 and max6695/96)
- * 5: remote emergency limit (max6659 and max6695/96)
- * 6: remote 2 critical limit (max6695/96 only)
- * 7: remote 2 emergency limit (max6695/96 only)
- */
- s16 temp11[8]; /* 0: remote input
- * 1: remote low limit
- * 2: remote high limit
- * 3: remote offset (except max6646, max6657/58/59,
- * and max6695/96)
- * 4: local input
- * 5: remote 2 input (max6695/96 only)
- * 6: remote 2 low limit (max6695/96 only)
- * 7: remote 2 high limit (max6695/96 only)
- */
+ s8 temp8[TEMP8_REG_NUM];
+ s16 temp11[TEMP11_REG_NUM];
u8 temp_hyst;
u16 alarms; /* bitvector (upper 8 bits for max6695/96) */
};
@@ -422,20 +469,29 @@ static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl, u16 *value)
* various registers have different meanings as a result of selecting a
* non-default remote channel.
*/
-static inline void lm90_select_remote_channel(struct i2c_client *client,
- struct lm90_data *data,
- int channel)
+static inline int lm90_select_remote_channel(struct i2c_client *client,
+ struct lm90_data *data,
+ int channel)
{
u8 config;
+ int err;
if (data->kind == max6696) {
lm90_read_reg(client, LM90_REG_R_CONFIG1, &config);
config &= ~0x08;
if (channel)
config |= 0x08;
- i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
- config);
+ err = i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
+ config);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "Failed to select remote channel %d, err %d\n",
+ channel, err);
+ return err;
+ }
}
+
+ return 0;
}
/*
@@ -477,37 +533,42 @@ static struct lm90_data *lm90_update_device(struct device *dev)
u8 alarms;
dev_dbg(&client->dev, "Updating lm90 data.\n");
- lm90_read_reg(client, LM90_REG_R_LOCAL_LOW, &data->temp8[0]);
- lm90_read_reg(client, LM90_REG_R_LOCAL_HIGH, &data->temp8[1]);
- lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT, &data->temp8[2]);
- lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT, &data->temp8[3]);
+ lm90_read_reg(client, LM90_REG_R_LOCAL_LOW,
+ &data->temp8[LOCAL_LOW]);
+ lm90_read_reg(client, LM90_REG_R_LOCAL_HIGH,
+ &data->temp8[LOCAL_HIGH]);
+ lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT,
+ &data->temp8[LOCAL_CRIT]);
+ lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT,
+ &data->temp8[REMOTE_CRIT]);
lm90_read_reg(client, LM90_REG_R_TCRIT_HYST, &data->temp_hyst);
if (data->reg_local_ext) {
lm90_read16(client, LM90_REG_R_LOCAL_TEMP,
data->reg_local_ext,
- &data->temp11[4]);
+ &data->temp11[LOCAL_TEMP]);
} else {
if (lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP,
&h) == 0)
- data->temp11[4] = h << 8;
+ data->temp11[LOCAL_TEMP] = h << 8;
}
lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
- LM90_REG_R_REMOTE_TEMPL, &data->temp11[0]);
+ LM90_REG_R_REMOTE_TEMPL,
+ &data->temp11[REMOTE_TEMP]);
if (lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH, &h) == 0) {
- data->temp11[1] = h << 8;
+ data->temp11[REMOTE_LOW] = h << 8;
if ((data->flags & LM90_HAVE_REM_LIMIT_EXT)
&& lm90_read_reg(client, LM90_REG_R_REMOTE_LOWL,
&l) == 0)
- data->temp11[1] |= l;
+ data->temp11[REMOTE_LOW] |= l;
}
if (lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH, &h) == 0) {
- data->temp11[2] = h << 8;
+ data->temp11[REMOTE_HIGH] = h << 8;
if ((data->flags & LM90_HAVE_REM_LIMIT_EXT)
&& lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHL,
&l) == 0)
- data->temp11[2] |= l;
+ data->temp11[REMOTE_HIGH] |= l;
}
if (data->flags & LM90_HAVE_OFFSET) {
@@ -515,13 +576,13 @@ static struct lm90_data *lm90_update_device(struct device *dev)
&h) == 0
&& lm90_read_reg(client, LM90_REG_R_REMOTE_OFFSL,
&l) == 0)
- data->temp11[3] = (h << 8) | l;
+ data->temp11[REMOTE_OFFSET] = (h << 8) | l;
}
if (data->flags & LM90_HAVE_EMERGENCY) {
lm90_read_reg(client, MAX6659_REG_R_LOCAL_EMERG,
- &data->temp8[4]);
+ &data->temp8[LOCAL_EMERG]);
lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG,
- &data->temp8[5]);
+ &data->temp8[REMOTE_EMERG]);
}
lm90_read_reg(client, LM90_REG_R_STATUS, &alarms);
data->alarms = alarms; /* save as 16 bit value */
@@ -529,15 +590,16 @@ static struct lm90_data *lm90_update_device(struct device *dev)
if (data->kind == max6696) {
lm90_select_remote_channel(client, data, 1);
lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT,
- &data->temp8[6]);
+ &data->temp8[REMOTE2_CRIT]);
lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG,
- &data->temp8[7]);
+ &data->temp8[REMOTE2_EMERG]);
lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
- LM90_REG_R_REMOTE_TEMPL, &data->temp11[5]);
+ LM90_REG_R_REMOTE_TEMPL,
+ &data->temp11[REMOTE2_TEMP]);
if (!lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH, &h))
- data->temp11[6] = h << 8;
+ data->temp11[REMOTE2_LOW] = h << 8;
if (!lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH, &h))
- data->temp11[7] = h << 8;
+ data->temp11[REMOTE2_HIGH] = h << 8;
lm90_select_remote_channel(client, data, 0);
if (!lm90_read_reg(client, MAX6696_REG_R_STATUS2,
@@ -702,31 +764,36 @@ static u16 temp_to_u16_adt7461(struct lm90_data *data, long val)
* Sysfs stuff
*/
-static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr,
- char *buf)
+static int read_temp8(struct device *dev, int index)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm90_data *data = lm90_update_device(dev);
int temp;
- if (data->kind == adt7461)
- temp = temp_from_u8_adt7461(data, data->temp8[attr->index]);
+ if (data->kind == adt7461 || data->kind == tmp451)
+ temp = temp_from_u8_adt7461(data, data->temp8[index]);
else if (data->kind == max6646)
- temp = temp_from_u8(data->temp8[attr->index]);
+ temp = temp_from_u8(data->temp8[index]);
else
- temp = temp_from_s8(data->temp8[attr->index]);
+ temp = temp_from_s8(data->temp8[index]);
/* +16 degrees offset for temp2 for the LM99 */
- if (data->kind == lm99 && attr->index == 3)
+ if (data->kind == lm99 && index == 3)
temp += 16000;
- return sprintf(buf, "%d\n", temp);
+ return temp;
}
-static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ return sprintf(buf, "%d\n", read_temp8(dev, attr->index));
+}
+
+static int write_temp8(struct device *dev, int index, long val)
{
- static const u8 reg[8] = {
+ static const u8 reg[TEMP8_REG_NUM] = {
LM90_REG_W_LOCAL_LOW,
LM90_REG_W_LOCAL_HIGH,
LM90_REG_W_LOCAL_CRIT,
@@ -737,60 +804,79 @@ static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
MAX6659_REG_W_REMOTE_EMERG,
};
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct lm90_data *data = i2c_get_clientdata(client);
- int nr = attr->index;
- long val;
int err;
- err = kstrtol(buf, 10, &val);
- if (err < 0)
- return err;
-
/* +16 degrees offset for temp2 for the LM99 */
- if (data->kind == lm99 && attr->index == 3)
+ if (data->kind == lm99 && index == 3)
val -= 16000;
mutex_lock(&data->update_lock);
- if (data->kind == adt7461)
- data->temp8[nr] = temp_to_u8_adt7461(data, val);
+ if (data->kind == adt7461 || data->kind == tmp451)
+ data->temp8[index] = temp_to_u8_adt7461(data, val);
else if (data->kind == max6646)
- data->temp8[nr] = temp_to_u8(val);
+ data->temp8[index] = temp_to_u8(val);
else
- data->temp8[nr] = temp_to_s8(val);
-
- lm90_select_remote_channel(client, data, nr >= 6);
- i2c_smbus_write_byte_data(client, reg[nr], data->temp8[nr]);
- lm90_select_remote_channel(client, data, 0);
+ data->temp8[index] = temp_to_s8(val);
+ if ((err = lm90_select_remote_channel(client, data, index >= 6)) ||
+ (err = i2c_smbus_write_byte_data(client, reg[index],
+ data->temp8[index])) ||
+ (err = lm90_select_remote_channel(client, data, 0)))
+ dev_err(dev, "write_temp8 failed, err %d\n", err);
mutex_unlock(&data->update_lock);
+
+ return err;
+}
+
+static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int index = attr->index;
+ long val;
+ int err;
+
+ err = kstrtol(buf, 10, &val);
+ if (err < 0)
+ return err;
+
+ err = write_temp8(dev, index, val);
+ if (err < 0)
+ return err;
+
return count;
}
-static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
- char *buf)
+static int read_temp11(struct device *dev, int index)
{
- struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
struct lm90_data *data = lm90_update_device(dev);
int temp;
- if (data->kind == adt7461)
- temp = temp_from_u16_adt7461(data, data->temp11[attr->index]);
+ if (data->kind == adt7461 || data->kind == tmp451)
+ temp = temp_from_u16_adt7461(data, data->temp11[index]);
else if (data->kind == max6646)
- temp = temp_from_u16(data->temp11[attr->index]);
+ temp = temp_from_u16(data->temp11[index]);
else
- temp = temp_from_s16(data->temp11[attr->index]);
+ temp = temp_from_s16(data->temp11[index]);
/* +16 degrees offset for temp2 for the LM99 */
- if (data->kind == lm99 && attr->index <= 2)
+ if (data->kind == lm99 && index <= 2)
temp += 16000;
- return sprintf(buf, "%d\n", temp);
+ return temp;
}
-static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
+
+ return sprintf(buf, "%d\n", read_temp11(dev, attr->index));
+}
+
+static int write_temp11(struct device *dev, int nr, int index, long val)
{
struct {
u8 high;
@@ -804,24 +890,16 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
{ LM90_REG_W_REMOTE_HIGHH, LM90_REG_W_REMOTE_HIGHL, 1 }
};
- struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct lm90_data *data = i2c_get_clientdata(client);
- int nr = attr->nr;
- int index = attr->index;
- long val;
int err;
- err = kstrtol(buf, 10, &val);
- if (err < 0)
- return err;
-
/* +16 degrees offset for temp2 for the LM99 */
if (data->kind == lm99 && index <= 2)
val -= 16000;
mutex_lock(&data->update_lock);
- if (data->kind == adt7461)
+ if (data->kind == adt7461 || data->kind == tmp451)
data->temp11[index] = temp_to_u16_adt7461(data, val);
else if (data->kind == max6646)
data->temp11[index] = temp_to_u8(val) << 8;
@@ -830,15 +908,50 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
else
data->temp11[index] = temp_to_s8(val) << 8;
- lm90_select_remote_channel(client, data, reg[nr].channel);
- i2c_smbus_write_byte_data(client, reg[nr].high,
- data->temp11[index] >> 8);
- if (data->flags & LM90_HAVE_REM_LIMIT_EXT)
- i2c_smbus_write_byte_data(client, reg[nr].low,
- data->temp11[index] & 0xff);
- lm90_select_remote_channel(client, data, 0);
+ err = lm90_select_remote_channel(client, data, reg[nr].channel);
+ if (err)
+ goto error;
+
+ err = i2c_smbus_write_byte_data(client, reg[nr].high,
+ data->temp11[index] >> 8);
+ if (err)
+ goto error;
+
+ if (data->flags & LM90_HAVE_REM_LIMIT_EXT) {
+ err = i2c_smbus_write_byte_data(client, reg[nr].low,
+ data->temp11[index] & 0xff);
+ if (err)
+ goto error;
+ }
+
+ err = lm90_select_remote_channel(client, data, 0);
+
+error:
+ if (err)
+ dev_err(dev, "write_temp11 failed, err %d\n", err);
mutex_unlock(&data->update_lock);
+
+ return err;
+}
+
+static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
+ int nr = attr->nr;
+ int index = attr->index;
+ long val;
+ int err;
+
+ err = kstrtol(buf, 10, &val);
+ if (err < 0)
+ return err;
+
+ err = write_temp11(dev, nr, index, val);
+ if (err < 0)
+ return err;
+
return count;
}
@@ -850,7 +963,7 @@ static ssize_t show_temphyst(struct device *dev,
struct lm90_data *data = lm90_update_device(dev);
int temp;
- if (data->kind == adt7461)
+ if (data->kind == adt7461 || data->kind == tmp451)
temp = temp_from_u8_adt7461(data, data->temp8[attr->index]);
else if (data->kind == max6646)
temp = temp_from_u8(data->temp8[attr->index]);
@@ -878,12 +991,12 @@ static ssize_t set_temphyst(struct device *dev, struct device_attribute *dummy,
return err;
mutex_lock(&data->update_lock);
- if (data->kind == adt7461)
- temp = temp_from_u8_adt7461(data, data->temp8[2]);
+ if (data->kind == adt7461 || data->kind == tmp451)
+ temp = temp_from_u8_adt7461(data, data->temp8[LOCAL_CRIT]);
else if (data->kind == max6646)
- temp = temp_from_u8(data->temp8[2]);
+ temp = temp_from_u8(data->temp8[LOCAL_CRIT]);
else
- temp = temp_from_s8(data->temp8[2]);
+ temp = temp_from_s8(data->temp8[LOCAL_CRIT]);
data->temp_hyst = hyst_to_reg(temp - val);
i2c_smbus_write_byte_data(client, LM90_REG_W_TCRIT_HYST,
@@ -937,25 +1050,28 @@ static ssize_t set_update_interval(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp11, NULL, 0, 4);
-static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp11, NULL, 0, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp11, NULL,
+ 0, LOCAL_TEMP);
+static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp11, NULL,
+ 0, REMOTE_TEMP);
static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, 0);
+ set_temp8, LOCAL_LOW);
static SENSOR_DEVICE_ATTR_2(temp2_min, S_IWUSR | S_IRUGO, show_temp11,
- set_temp11, 0, 1);
+ set_temp11, 0, REMOTE_LOW);
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, 1);
+ set_temp8, LOCAL_HIGH);
static SENSOR_DEVICE_ATTR_2(temp2_max, S_IWUSR | S_IRUGO, show_temp11,
- set_temp11, 1, 2);
+ set_temp11, 1, REMOTE_HIGH);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, 2);
+ set_temp8, LOCAL_CRIT);
static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, 3);
+ set_temp8, REMOTE_CRIT);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temphyst,
- set_temphyst, 2);
-static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, show_temphyst, NULL, 3);
+ set_temphyst, LOCAL_CRIT);
+static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, show_temphyst, NULL,
+ REMOTE_CRIT);
static SENSOR_DEVICE_ATTR_2(temp2_offset, S_IWUSR | S_IRUGO, show_temp11,
- set_temp11, 2, 3);
+ set_temp11, 2, REMOTE_OFFSET);
/* Individual alarm files */
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0);
@@ -1003,13 +1119,13 @@ static const struct attribute_group lm90_group = {
* Additional attributes for devices with emergency sensors
*/
static SENSOR_DEVICE_ATTR(temp1_emergency, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, 4);
+ set_temp8, LOCAL_EMERG);
static SENSOR_DEVICE_ATTR(temp2_emergency, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, 5);
+ set_temp8, REMOTE_EMERG);
static SENSOR_DEVICE_ATTR(temp1_emergency_hyst, S_IRUGO, show_temphyst,
- NULL, 4);
+ NULL, LOCAL_EMERG);
static SENSOR_DEVICE_ATTR(temp2_emergency_hyst, S_IRUGO, show_temphyst,
- NULL, 5);
+ NULL, REMOTE_EMERG);
static struct attribute *lm90_emergency_attributes[] = {
&sensor_dev_attr_temp1_emergency.dev_attr.attr,
@@ -1039,18 +1155,20 @@ static const struct attribute_group lm90_emergency_alarm_group = {
/*
* Additional attributes for devices with 3 temperature sensors
*/
-static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp11, NULL, 0, 5);
+static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp11, NULL,
+ 0, REMOTE2_TEMP);
static SENSOR_DEVICE_ATTR_2(temp3_min, S_IWUSR | S_IRUGO, show_temp11,
- set_temp11, 3, 6);
+ set_temp11, 3, REMOTE2_LOW);
static SENSOR_DEVICE_ATTR_2(temp3_max, S_IWUSR | S_IRUGO, show_temp11,
- set_temp11, 4, 7);
+ set_temp11, 4, REMOTE2_HIGH);
static SENSOR_DEVICE_ATTR(temp3_crit, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, 6);
-static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, show_temphyst, NULL, 6);
+ set_temp8, REMOTE2_CRIT);
+static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, show_temphyst, NULL,
+ REMOTE2_CRIT);
static SENSOR_DEVICE_ATTR(temp3_emergency, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, 7);
+ set_temp8, REMOTE2_EMERG);
static SENSOR_DEVICE_ATTR(temp3_emergency_hyst, S_IRUGO, show_temphyst,
- NULL, 7);
+ NULL, REMOTE2_EMERG);
static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 9);
static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 10);
@@ -1306,6 +1424,19 @@ static int lm90_detect(struct i2c_client *client,
&& (config1 & 0x3F) == 0x00
&& convrate <= 0x08)
name = "g781";
+ } else
+ if (address == 0x4C
+ && man_id == 0x55) { /* Texas Instruments */
+ int local_ext;
+
+ local_ext = i2c_smbus_read_byte_data(client,
+ TMP451_REG_R_LOCAL_TEMPL);
+
+ if (chip_id == 0x00 /* TMP451 */
+ && (config1 & 0x1B) == 0x00
+ && convrate <= 0x09
+ && (local_ext & 0x0F) == 0x00)
+ name = "tmp451";
}
if (!name) { /* identification failed */
@@ -1367,7 +1498,7 @@ static void lm90_init_client(struct i2c_client *client)
data->config_orig = config;
/* Check Temperature Range Select */
- if (data->kind == adt7461) {
+ if (data->kind == adt7461 || data->kind == tmp451) {
if (config & 0x04)
data->flags |= LM90_FLAG_ADT7461_EXT;
}
@@ -1391,6 +1522,54 @@ static void lm90_init_client(struct i2c_client *client)
i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config);
}
+static bool lm90_is_tripped(struct i2c_client *client, u16 *status)
+{
+ struct lm90_data *data = i2c_get_clientdata(client);
+ u8 st, st2 = 0;
+
+ lm90_read_reg(client, LM90_REG_R_STATUS, &st);
+
+ if (data->kind == max6696)
+ lm90_read_reg(client, MAX6696_REG_R_STATUS2, &st2);
+
+ *status = st | (st2 << 8);
+
+ if ((st & 0x7f) == 0 && (st2 & 0xfe) == 0)
+ return false;
+
+ if ((st & (LM90_STATUS_LLOW | LM90_STATUS_LHIGH | LM90_STATUS_LTHRM)) ||
+ (st2 & MAX6696_STATUS2_LOT2))
+ dev_warn(&client->dev,
+ "temp%d out of range, please check!\n", 1);
+ if ((st & (LM90_STATUS_RLOW | LM90_STATUS_RHIGH | LM90_STATUS_RTHRM)) ||
+ (st2 & MAX6696_STATUS2_ROT2))
+ dev_warn(&client->dev,
+ "temp%d out of range, please check!\n", 2);
+ if (st & LM90_STATUS_ROPEN)
+ dev_warn(&client->dev,
+ "temp%d diode open, please check!\n", 2);
+ if (st2 & (MAX6696_STATUS2_R2LOW | MAX6696_STATUS2_R2HIGH |
+ MAX6696_STATUS2_R2THRM | MAX6696_STATUS2_R2OT2))
+ dev_warn(&client->dev,
+ "temp%d out of range, please check!\n", 3);
+ if (st2 & MAX6696_STATUS2_R2OPEN)
+ dev_warn(&client->dev,
+ "temp%d diode open, please check!\n", 3);
+
+ return true;
+}
+
+static irqreturn_t lm90_irq_thread(int irq, void *dev_id)
+{
+ struct i2c_client *client = dev_id;
+ u16 status;
+
+ if (lm90_is_tripped(client, &status))
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
static int lm90_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1467,6 +1646,18 @@ static int lm90_probe(struct i2c_client *client,
goto exit_remove_files;
}
+ if (client->irq) {
+ dev_dbg(dev, "IRQ: %d\n", client->irq);
+ err = devm_request_threaded_irq(dev, client->irq,
+ NULL, lm90_irq_thread,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "lm90", client);
+ if (err < 0) {
+ dev_err(dev, "cannot request IRQ %d\n", client->irq);
+ goto exit_remove_files;
+ }
+ }
+
return 0;
exit_remove_files:
@@ -1489,43 +1680,26 @@ static int lm90_remove(struct i2c_client *client)
static void lm90_alert(struct i2c_client *client, unsigned int flag)
{
- struct lm90_data *data = i2c_get_clientdata(client);
- u8 config, alarms, alarms2 = 0;
-
- lm90_read_reg(client, LM90_REG_R_STATUS, &alarms);
-
- if (data->kind == max6696)
- lm90_read_reg(client, MAX6696_REG_R_STATUS2, &alarms2);
-
- if ((alarms & 0x7f) == 0 && (alarms2 & 0xfe) == 0) {
- dev_info(&client->dev, "Everything OK\n");
- } else {
- if (alarms & 0x61)
- dev_warn(&client->dev,
- "temp%d out of range, please check!\n", 1);
- if (alarms & 0x1a)
- dev_warn(&client->dev,
- "temp%d out of range, please check!\n", 2);
- if (alarms & 0x04)
- dev_warn(&client->dev,
- "temp%d diode open, please check!\n", 2);
-
- if (alarms2 & 0x18)
- dev_warn(&client->dev,
- "temp%d out of range, please check!\n", 3);
+ u16 alarms;
+ if (lm90_is_tripped(client, &alarms)) {
/*
* Disable ALERT# output, because these chips don't implement
* SMBus alert correctly; they should only hold the alert line
* low briefly.
*/
+ struct lm90_data *data = i2c_get_clientdata(client);
+
if ((data->flags & LM90_HAVE_BROKEN_ALERT)
&& (alarms & data->alert_alarms)) {
+ u8 config;
dev_dbg(&client->dev, "Disabling ALERT#\n");
lm90_read_reg(client, LM90_REG_R_CONFIG1, &config);
i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
config | 0x80);
}
+ } else {
+ dev_info(&client->dev, "Everything OK\n");
}
}
diff --git a/drivers/hwmon/lm95234.c b/drivers/hwmon/lm95234.c
index 307c9eaeeb9f..411202bdaf6b 100644
--- a/drivers/hwmon/lm95234.c
+++ b/drivers/hwmon/lm95234.c
@@ -57,7 +57,7 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4d, 0x4e, I2C_CLIENT_END };
/* Client data (each client gets its own) */
struct lm95234_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock;
unsigned long last_updated, interval; /* in jiffies */
bool valid; /* false until following fields are valid */
@@ -114,9 +114,9 @@ static u16 update_intervals[] = { 143, 364, 1000, 2500 };
/* Fill value cache. Must be called with update lock held. */
-static int lm95234_fill_cache(struct i2c_client *client)
+static int lm95234_fill_cache(struct lm95234_data *data,
+ struct i2c_client *client)
{
- struct lm95234_data *data = i2c_get_clientdata(client);
int i, ret;
ret = i2c_smbus_read_byte_data(client, LM95234_REG_CONVRATE);
@@ -157,9 +157,9 @@ static int lm95234_fill_cache(struct i2c_client *client)
return 0;
}
-static int lm95234_update_device(struct i2c_client *client,
- struct lm95234_data *data)
+static int lm95234_update_device(struct lm95234_data *data)
{
+ struct i2c_client *client = data->client;
int ret;
mutex_lock(&data->update_lock);
@@ -169,7 +169,7 @@ static int lm95234_update_device(struct i2c_client *client,
int i;
if (!data->valid) {
- ret = lm95234_fill_cache(client);
+ ret = lm95234_fill_cache(data, client);
if (ret < 0)
goto abort;
}
@@ -209,10 +209,9 @@ abort:
static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(client, data);
+ int ret = lm95234_update_device(data);
if (ret)
return ret;
@@ -224,10 +223,9 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
static ssize_t show_alarm(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
u32 mask = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(client, data);
+ int ret = lm95234_update_device(data);
if (ret)
return ret;
@@ -238,10 +236,9 @@ static ssize_t show_alarm(struct device *dev,
static ssize_t show_type(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
u8 mask = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(client, data);
+ int ret = lm95234_update_device(data);
if (ret)
return ret;
@@ -252,11 +249,10 @@ static ssize_t show_type(struct device *dev, struct device_attribute *attr,
static ssize_t set_type(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
unsigned long val;
u8 mask = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(client, data);
+ int ret = lm95234_update_device(data);
if (ret)
return ret;
@@ -274,7 +270,7 @@ static ssize_t set_type(struct device *dev, struct device_attribute *attr,
else
data->sensor_type &= ~mask;
data->valid = false;
- i2c_smbus_write_byte_data(client, LM95234_REG_REM_MODEL,
+ i2c_smbus_write_byte_data(data->client, LM95234_REG_REM_MODEL,
data->sensor_type);
mutex_unlock(&data->update_lock);
@@ -284,10 +280,9 @@ static ssize_t set_type(struct device *dev, struct device_attribute *attr,
static ssize_t show_tcrit2(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(client, data);
+ int ret = lm95234_update_device(data);
if (ret)
return ret;
@@ -298,11 +293,10 @@ static ssize_t show_tcrit2(struct device *dev, struct device_attribute *attr,
static ssize_t set_tcrit2(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
long val;
- int ret = lm95234_update_device(client, data);
+ int ret = lm95234_update_device(data);
if (ret)
return ret;
@@ -315,7 +309,7 @@ static ssize_t set_tcrit2(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->tcrit2[index] = val;
- i2c_smbus_write_byte_data(client, LM95234_REG_TCRIT2(index), val);
+ i2c_smbus_write_byte_data(data->client, LM95234_REG_TCRIT2(index), val);
mutex_unlock(&data->update_lock);
return count;
@@ -324,10 +318,9 @@ static ssize_t set_tcrit2(struct device *dev, struct device_attribute *attr,
static ssize_t show_tcrit2_hyst(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(client, data);
+ int ret = lm95234_update_device(data);
if (ret)
return ret;
@@ -340,8 +333,7 @@ static ssize_t show_tcrit2_hyst(struct device *dev,
static ssize_t show_tcrit1(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
return sprintf(buf, "%u", data->tcrit1[index] * 1000);
@@ -350,11 +342,10 @@ static ssize_t show_tcrit1(struct device *dev, struct device_attribute *attr,
static ssize_t set_tcrit1(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
+ int ret = lm95234_update_device(data);
long val;
- int ret = lm95234_update_device(client, data);
if (ret)
return ret;
@@ -367,7 +358,7 @@ static ssize_t set_tcrit1(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->tcrit1[index] = val;
- i2c_smbus_write_byte_data(client, LM95234_REG_TCRIT1(index), val);
+ i2c_smbus_write_byte_data(data->client, LM95234_REG_TCRIT1(index), val);
mutex_unlock(&data->update_lock);
return count;
@@ -376,10 +367,9 @@ static ssize_t set_tcrit1(struct device *dev, struct device_attribute *attr,
static ssize_t show_tcrit1_hyst(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(client, data);
+ int ret = lm95234_update_device(data);
if (ret)
return ret;
@@ -393,11 +383,10 @@ static ssize_t set_tcrit1_hyst(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
+ int ret = lm95234_update_device(data);
long val;
- int ret = lm95234_update_device(client, data);
if (ret)
return ret;
@@ -411,7 +400,7 @@ static ssize_t set_tcrit1_hyst(struct device *dev,
mutex_lock(&data->update_lock);
data->thyst = val;
- i2c_smbus_write_byte_data(client, LM95234_REG_TCRIT_HYST, val);
+ i2c_smbus_write_byte_data(data->client, LM95234_REG_TCRIT_HYST, val);
mutex_unlock(&data->update_lock);
return count;
@@ -420,10 +409,9 @@ static ssize_t set_tcrit1_hyst(struct device *dev,
static ssize_t show_offset(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(client, data);
+ int ret = lm95234_update_device(data);
if (ret)
return ret;
@@ -434,11 +422,10 @@ static ssize_t show_offset(struct device *dev, struct device_attribute *attr,
static ssize_t set_offset(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
+ int ret = lm95234_update_device(data);
long val;
- int ret = lm95234_update_device(client, data);
if (ret)
return ret;
@@ -452,7 +439,7 @@ static ssize_t set_offset(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->toffset[index] = val;
- i2c_smbus_write_byte_data(client, LM95234_REG_OFFSET(index), val);
+ i2c_smbus_write_byte_data(data->client, LM95234_REG_OFFSET(index), val);
mutex_unlock(&data->update_lock);
return count;
@@ -461,9 +448,8 @@ static ssize_t set_offset(struct device *dev, struct device_attribute *attr,
static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
- int ret = lm95234_update_device(client, data);
+ struct lm95234_data *data = dev_get_drvdata(dev);
+ int ret = lm95234_update_device(data);
if (ret)
return ret;
@@ -475,11 +461,10 @@ static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95234_data *data = i2c_get_clientdata(client);
+ struct lm95234_data *data = dev_get_drvdata(dev);
+ int ret = lm95234_update_device(data);
unsigned long val;
u8 regval;
- int ret = lm95234_update_device(client, data);
if (ret)
return ret;
@@ -495,7 +480,7 @@ static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->interval = msecs_to_jiffies(update_intervals[regval]);
- i2c_smbus_write_byte_data(client, LM95234_REG_CONVRATE, regval);
+ i2c_smbus_write_byte_data(data->client, LM95234_REG_CONVRATE, regval);
mutex_unlock(&data->update_lock);
return count;
@@ -579,7 +564,7 @@ static SENSOR_DEVICE_ATTR(temp5_offset, S_IWUSR | S_IRUGO, show_offset,
static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
set_interval);
-static struct attribute *lm95234_attributes[] = {
+static struct attribute *lm95234_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp3_input.dev_attr.attr,
@@ -621,10 +606,7 @@ static struct attribute *lm95234_attributes[] = {
&dev_attr_update_interval.attr,
NULL
};
-
-static const struct attribute_group lm95234_group = {
- .attrs = lm95234_attributes,
-};
+ATTRIBUTE_GROUPS(lm95234);
static int lm95234_detect(struct i2c_client *client,
struct i2c_board_info *info)
@@ -701,13 +683,14 @@ static int lm95234_probe(struct i2c_client *client,
{
struct device *dev = &client->dev;
struct lm95234_data *data;
+ struct device *hwmon_dev;
int err;
data = devm_kzalloc(dev, sizeof(struct lm95234_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
/* Initialize the LM95234 chip */
@@ -715,32 +698,10 @@ static int lm95234_probe(struct i2c_client *client,
if (err < 0)
return err;
- /* Register sysfs hooks */
- err = sysfs_create_group(&dev->kobj, &lm95234_group);
- if (err)
- return err;
-
- data->hwmon_dev = hwmon_device_register(dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove_files;
- }
-
- return 0;
-
-exit_remove_files:
- sysfs_remove_group(&dev->kobj, &lm95234_group);
- return err;
-}
-
-static int lm95234_remove(struct i2c_client *client)
-{
- struct lm95234_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &lm95234_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data,
+ lm95234_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
/* Driver data (common to all clients) */
@@ -756,7 +717,6 @@ static struct i2c_driver lm95234_driver = {
.name = DRVNAME,
},
.probe = lm95234_probe,
- .remove = lm95234_remove,
.id_table = lm95234_id,
.detect = lm95234_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index cdc1ecc6734d..d4172933ce4f 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -51,7 +51,9 @@ enum ltc4245_cmd {
};
struct ltc4245_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
+
+ const struct attribute_group *groups[3];
struct mutex update_lock;
bool valid;
@@ -77,8 +79,8 @@ struct ltc4245_data {
*/
static void ltc4245_update_gpios(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ltc4245_data *data = i2c_get_clientdata(client);
+ struct ltc4245_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
u8 gpio_curr, gpio_next, gpio_reg;
int i;
@@ -130,8 +132,8 @@ static void ltc4245_update_gpios(struct device *dev)
static struct ltc4245_data *ltc4245_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ltc4245_data *data = i2c_get_clientdata(client);
+ struct ltc4245_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
s32 val;
int i;
@@ -455,41 +457,14 @@ static const struct attribute_group ltc4245_gpio_group = {
.attrs = ltc4245_gpio_attributes,
};
-static int ltc4245_sysfs_create_groups(struct i2c_client *client)
+static void ltc4245_sysfs_add_groups(struct ltc4245_data *data)
{
- struct ltc4245_data *data = i2c_get_clientdata(client);
- struct device *dev = &client->dev;
- int ret;
-
- /* register the standard sysfs attributes */
- ret = sysfs_create_group(&dev->kobj, &ltc4245_std_group);
- if (ret) {
- dev_err(dev, "unable to register standard attributes\n");
- return ret;
- }
+ /* standard sysfs attributes */
+ data->groups[0] = &ltc4245_std_group;
/* if we're using the extra gpio support, register it's attributes */
- if (data->use_extra_gpios) {
- ret = sysfs_create_group(&dev->kobj, &ltc4245_gpio_group);
- if (ret) {
- dev_err(dev, "unable to register gpio attributes\n");
- sysfs_remove_group(&dev->kobj, &ltc4245_std_group);
- return ret;
- }
- }
-
- return 0;
-}
-
-static void ltc4245_sysfs_remove_groups(struct i2c_client *client)
-{
- struct ltc4245_data *data = i2c_get_clientdata(client);
- struct device *dev = &client->dev;
-
if (data->use_extra_gpios)
- sysfs_remove_group(&dev->kobj, &ltc4245_gpio_group);
-
- sysfs_remove_group(&dev->kobj, &ltc4245_std_group);
+ data->groups[1] = &ltc4245_gpio_group;
}
static bool ltc4245_use_extra_gpios(struct i2c_client *client)
@@ -517,7 +492,7 @@ static int ltc4245_probe(struct i2c_client *client,
{
struct i2c_adapter *adapter = client->adapter;
struct ltc4245_data *data;
- int ret;
+ struct device *hwmon_dev;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
@@ -526,7 +501,7 @@ static int ltc4245_probe(struct i2c_client *client,
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
data->use_extra_gpios = ltc4245_use_extra_gpios(client);
@@ -534,30 +509,25 @@ static int ltc4245_probe(struct i2c_client *client,
i2c_smbus_write_byte_data(client, LTC4245_FAULT1, 0x00);
i2c_smbus_write_byte_data(client, LTC4245_FAULT2, 0x00);
- /* Register sysfs hooks */
- ret = ltc4245_sysfs_create_groups(client);
- if (ret)
- return ret;
+ /* Add sysfs hooks */
+ ltc4245_sysfs_add_groups(data);
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- ret = PTR_ERR(data->hwmon_dev);
- goto out_hwmon_device_register;
- }
+ hwmon_dev = hwmon_device_register_with_groups(&client->dev,
+ client->name, data,
+ data->groups);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
- return 0;
+ i2c_set_clientdata(client, hwmon_dev);
-out_hwmon_device_register:
- ltc4245_sysfs_remove_groups(client);
- return ret;
+ return 0;
}
static int ltc4245_remove(struct i2c_client *client)
{
- struct ltc4245_data *data = i2c_get_clientdata(client);
+ struct device *hwmon_dev = i2c_get_clientdata(client);
- hwmon_device_unregister(data->hwmon_dev);
- ltc4245_sysfs_remove_groups(client);
+ hwmon_device_unregister(hwmon_dev);
return 0;
}
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
index 487da58ec86c..0becd69842bb 100644
--- a/drivers/hwmon/ltc4261.c
+++ b/drivers/hwmon/ltc4261.c
@@ -55,7 +55,7 @@
#define FAULT_OC (1<<2)
struct ltc4261_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock;
bool valid;
@@ -67,8 +67,8 @@ struct ltc4261_data {
static struct ltc4261_data *ltc4261_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ltc4261_data *data = i2c_get_clientdata(client);
+ struct ltc4261_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
struct ltc4261_data *ret = data;
mutex_lock(&data->update_lock);
@@ -150,7 +150,6 @@ static ssize_t ltc4261_show_bool(struct device *dev,
struct device_attribute *da, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct i2c_client *client = to_i2c_client(dev);
struct ltc4261_data *data = ltc4261_update_device(dev);
u8 fault;
@@ -159,7 +158,7 @@ static ssize_t ltc4261_show_bool(struct device *dev,
fault = data->regs[LTC4261_FAULT] & attr->index;
if (fault) /* Clear reported faults in chip register */
- i2c_smbus_write_byte_data(client, LTC4261_FAULT, ~fault);
+ i2c_smbus_write_byte_data(data->client, LTC4261_FAULT, ~fault);
return snprintf(buf, PAGE_SIZE, "%d\n", fault ? 1 : 0);
}
@@ -197,7 +196,7 @@ static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc4261_show_value, NULL,
static SENSOR_DEVICE_ATTR(curr1_max_alarm, S_IRUGO, ltc4261_show_bool, NULL,
FAULT_OC);
-static struct attribute *ltc4261_attributes[] = {
+static struct attribute *ltc4261_attrs[] = {
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_min_alarm.dev_attr.attr,
&sensor_dev_attr_in1_max_alarm.dev_attr.attr,
@@ -210,62 +209,38 @@ static struct attribute *ltc4261_attributes[] = {
NULL,
};
-
-static const struct attribute_group ltc4261_group = {
- .attrs = ltc4261_attributes,
-};
+ATTRIBUTE_GROUPS(ltc4261);
static int ltc4261_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = client->adapter;
+ struct device *dev = &client->dev;
struct ltc4261_data *data;
- int ret;
+ struct device *hwmon_dev;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
if (i2c_smbus_read_byte_data(client, LTC4261_STATUS) < 0) {
- dev_err(&client->dev, "Failed to read status register\n");
+ dev_err(dev, "Failed to read status register\n");
return -ENODEV;
}
- data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
/* Clear faults */
i2c_smbus_write_byte_data(client, LTC4261_FAULT, 0x00);
- /* Register sysfs hooks */
- ret = sysfs_create_group(&client->dev.kobj, &ltc4261_group);
- if (ret)
- return ret;
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- ret = PTR_ERR(data->hwmon_dev);
- goto out_hwmon_device_register;
- }
-
- return 0;
-
-out_hwmon_device_register:
- sysfs_remove_group(&client->dev.kobj, &ltc4261_group);
- return ret;
-}
-
-static int ltc4261_remove(struct i2c_client *client)
-{
- struct ltc4261_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &ltc4261_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data,
+ ltc4261_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id ltc4261_id[] = {
@@ -281,7 +256,6 @@ static struct i2c_driver ltc4261_driver = {
.name = "ltc4261",
},
.probe = ltc4261_probe,
- .remove = ltc4261_remove,
.id_table = ltc4261_id,
};
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index 2fa2c02f5569..d4efc79d7b93 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -83,7 +83,8 @@ static const bool max16065_have_current[] = {
struct max16065_data {
enum chips type;
- struct device *hwmon_dev;
+ struct i2c_client *client;
+ const struct attribute_group *groups[4];
struct mutex update_lock;
bool valid;
unsigned long last_updated; /* in jiffies */
@@ -144,8 +145,8 @@ static int max16065_read_adc(struct i2c_client *client, int reg)
static struct max16065_data *max16065_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max16065_data *data = i2c_get_clientdata(client);
+ struct max16065_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
@@ -186,7 +187,7 @@ static ssize_t max16065_show_alarm(struct device *dev,
val &= (1 << attr2->index);
if (val)
- i2c_smbus_write_byte_data(to_i2c_client(dev),
+ i2c_smbus_write_byte_data(data->client,
MAX16065_FAULT(attr2->nr), val);
return snprintf(buf, PAGE_SIZE, "%d\n", !!val);
@@ -223,8 +224,7 @@ static ssize_t max16065_set_limit(struct device *dev,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(da);
- struct i2c_client *client = to_i2c_client(dev);
- struct max16065_data *data = i2c_get_clientdata(client);
+ struct max16065_data *data = dev_get_drvdata(dev);
unsigned long val;
int err;
int limit;
@@ -238,7 +238,7 @@ static ssize_t max16065_set_limit(struct device *dev,
mutex_lock(&data->update_lock);
data->limit[attr2->nr][attr2->index]
= LIMIT_TO_MV(limit, data->range[attr2->index]);
- i2c_smbus_write_byte_data(client,
+ i2c_smbus_write_byte_data(data->client,
MAX16065_LIMIT(attr2->nr, attr2->index),
limit);
mutex_unlock(&data->update_lock);
@@ -250,8 +250,7 @@ static ssize_t max16065_show_limit(struct device *dev,
struct device_attribute *da, char *buf)
{
struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(da);
- struct i2c_client *client = to_i2c_client(dev);
- struct max16065_data *data = i2c_get_clientdata(client);
+ struct max16065_data *data = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%d\n",
data->limit[attr2->nr][attr2->index]);
@@ -516,8 +515,32 @@ static struct attribute *max16065_max_attributes[] = {
NULL
};
+static umode_t max16065_basic_is_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct max16065_data *data = dev_get_drvdata(dev);
+ int index = n / 4;
+
+ if (index >= data->num_adc || !data->range[index])
+ return 0;
+ return a->mode;
+}
+
+static umode_t max16065_secondary_is_visible(struct kobject *kobj,
+ struct attribute *a, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct max16065_data *data = dev_get_drvdata(dev);
+
+ if (index >= data->num_adc)
+ return 0;
+ return a->mode;
+}
+
static const struct attribute_group max16065_basic_group = {
.attrs = max16065_basic_attributes,
+ .is_visible = max16065_basic_is_visible,
};
static const struct attribute_group max16065_current_group = {
@@ -526,38 +549,35 @@ static const struct attribute_group max16065_current_group = {
static const struct attribute_group max16065_min_group = {
.attrs = max16065_min_attributes,
+ .is_visible = max16065_secondary_is_visible,
};
static const struct attribute_group max16065_max_group = {
.attrs = max16065_max_attributes,
+ .is_visible = max16065_secondary_is_visible,
};
-static void max16065_cleanup(struct i2c_client *client)
-{
- sysfs_remove_group(&client->dev.kobj, &max16065_max_group);
- sysfs_remove_group(&client->dev.kobj, &max16065_min_group);
- sysfs_remove_group(&client->dev.kobj, &max16065_current_group);
- sysfs_remove_group(&client->dev.kobj, &max16065_basic_group);
-}
-
static int max16065_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = client->adapter;
struct max16065_data *data;
- int i, j, val, ret;
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ int i, j, val;
bool have_secondary; /* true if chip has secondary limits */
bool secondary_is_max = false; /* secondary limits reflect max */
+ int groups = 0;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
| I2C_FUNC_SMBUS_READ_WORD_DATA))
return -ENODEV;
- data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (unlikely(!data))
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
data->num_adc = max16065_num_adc[id->driver_data];
@@ -596,38 +616,16 @@ static int max16065_probe(struct i2c_client *client,
}
}
- /* Register sysfs hooks */
- for (i = 0; i < data->num_adc * 4; i++) {
- /* Do not create sysfs entry if channel is disabled */
- if (!data->range[i / 4])
- continue;
-
- ret = sysfs_create_file(&client->dev.kobj,
- max16065_basic_attributes[i]);
- if (unlikely(ret))
- goto out;
- }
-
- if (have_secondary) {
- struct attribute **attr = secondary_is_max ?
- max16065_max_attributes : max16065_min_attributes;
-
- for (i = 0; i < data->num_adc; i++) {
- if (!data->range[i])
- continue;
-
- ret = sysfs_create_file(&client->dev.kobj, attr[i]);
- if (unlikely(ret))
- goto out;
- }
- }
+ /* sysfs hooks */
+ data->groups[groups++] = &max16065_basic_group;
+ if (have_secondary)
+ data->groups[groups++] = secondary_is_max ?
+ &max16065_max_group : &max16065_min_group;
if (data->have_current) {
val = i2c_smbus_read_byte_data(client, MAX16065_CURR_CONTROL);
- if (unlikely(val < 0)) {
- ret = val;
- goto out;
- }
+ if (unlikely(val < 0))
+ return val;
if (val & MAX16065_CURR_ENABLE) {
/*
* Current gain is 6, 12, 24, 48 based on values in
@@ -636,33 +634,16 @@ static int max16065_probe(struct i2c_client *client,
data->curr_gain = 6 << ((val >> 2) & 0x03);
data->range[MAX16065_NUM_ADC]
= max16065_csp_adc_range[(val >> 1) & 0x01];
- ret = sysfs_create_group(&client->dev.kobj,
- &max16065_current_group);
- if (unlikely(ret))
- goto out;
+ data->groups[groups++] = &max16065_current_group;
} else {
data->have_current = false;
}
}
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (unlikely(IS_ERR(data->hwmon_dev))) {
- ret = PTR_ERR(data->hwmon_dev);
- goto out;
- }
- return 0;
-
-out:
- max16065_cleanup(client);
- return ret;
-}
-
-static int max16065_remove(struct i2c_client *client)
-{
- struct max16065_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- max16065_cleanup(client);
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data, data->groups);
+ if (unlikely(IS_ERR(hwmon_dev)))
+ return PTR_ERR(hwmon_dev);
return 0;
}
@@ -685,7 +666,6 @@ static struct i2c_driver max16065_driver = {
.name = "max16065",
},
.probe = max16065_probe,
- .remove = max16065_remove,
.id_table = max16065_id,
};
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 57d58cd32206..8326fbd60150 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -87,7 +87,7 @@ static int temp_to_reg(int val)
*/
struct max6642_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock;
bool valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
@@ -102,10 +102,10 @@ struct max6642_data {
* Real code
*/
-static void max6642_init_client(struct i2c_client *client)
+static void max6642_init_client(struct max6642_data *data,
+ struct i2c_client *client)
{
u8 config;
- struct max6642_data *data = i2c_get_clientdata(client);
/*
* Start the conversions.
@@ -168,14 +168,14 @@ static int max6642_detect(struct i2c_client *client,
static struct max6642_data *max6642_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6642_data *data = i2c_get_clientdata(client);
+ struct max6642_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
u16 val, tmp;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
- dev_dbg(&client->dev, "Updating max6642 data.\n");
+ dev_dbg(dev, "Updating max6642 data.\n");
val = i2c_smbus_read_byte_data(client,
MAX6642_REG_R_LOCAL_TEMPL);
tmp = (val >> 6) & 3;
@@ -209,8 +209,8 @@ static struct max6642_data *max6642_update_device(struct device *dev)
static ssize_t show_temp_max10(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
- struct max6642_data *data = max6642_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ struct max6642_data *data = max6642_update_device(dev);
return sprintf(buf, "%d\n",
temp_from_reg10(data->temp_input[attr->index]));
@@ -219,8 +219,8 @@ static ssize_t show_temp_max10(struct device *dev,
static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct max6642_data *data = max6642_update_device(dev);
struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
+ struct max6642_data *data = max6642_update_device(dev);
return sprintf(buf, "%d\n", temp_from_reg(data->temp_high[attr2->nr]));
}
@@ -228,11 +228,10 @@ static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
+ struct max6642_data *data = dev_get_drvdata(dev);
unsigned long val;
int err;
- struct i2c_client *client = to_i2c_client(dev);
- struct max6642_data *data = i2c_get_clientdata(client);
- struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
err = kstrtoul(buf, 10, &val);
if (err < 0)
@@ -240,7 +239,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->temp_high[attr2->nr] = clamp_val(temp_to_reg(val), 0, 255);
- i2c_smbus_write_byte_data(client, attr2->index,
+ i2c_smbus_write_byte_data(data->client, attr2->index,
data->temp_high[attr2->nr]);
mutex_unlock(&data->update_lock);
return count;
@@ -264,7 +263,7 @@ static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
-static struct attribute *max6642_attributes[] = {
+static struct attribute *max6642_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
@@ -275,54 +274,29 @@ static struct attribute *max6642_attributes[] = {
&sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
NULL
};
+ATTRIBUTE_GROUPS(max6642);
-static const struct attribute_group max6642_group = {
- .attrs = max6642_attributes,
-};
-
-static int max6642_probe(struct i2c_client *new_client,
+static int max6642_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
struct max6642_data *data;
- int err;
+ struct device *hwmon_dev;
- data = devm_kzalloc(&new_client->dev, sizeof(struct max6642_data),
- GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct max6642_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(new_client, data);
+ data->client = client;
mutex_init(&data->update_lock);
/* Initialize the MAX6642 chip */
- max6642_init_client(new_client);
+ max6642_init_client(data, client);
- /* Register sysfs hooks */
- err = sysfs_create_group(&new_client->dev.kobj, &max6642_group);
- if (err)
- return err;
-
- data->hwmon_dev = hwmon_device_register(&new_client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove_files;
- }
-
- return 0;
-
-exit_remove_files:
- sysfs_remove_group(&new_client->dev.kobj, &max6642_group);
- return err;
-}
-
-static int max6642_remove(struct i2c_client *client)
-{
- struct max6642_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &max6642_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+ client->name, data,
+ max6642_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
/*
@@ -341,7 +315,6 @@ static struct i2c_driver max6642_driver = {
.name = "max6642",
},
.probe = max6642_probe,
- .remove = max6642_remove,
.id_table = max6642_id,
.detect = max6642_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 3c16cbd4c002..0cafc390db4d 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -660,7 +660,7 @@ static int max6650_init_client(struct i2c_client *client)
/*
* If mode is set to "full off", we change it to "open loop" and
* set DAC to 255, which has the same effect. We do this because
- * there's no "full off" mode defined in hwmon specifcations.
+ * there's no "full off" mode defined in hwmon specifications.
*/
if ((config & MAX6650_CFG_MODE_MASK) == MAX6650_CFG_MODE_OFF) {
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
index a41b5f3fc506..7fd3eaf817f4 100644
--- a/drivers/hwmon/max6697.c
+++ b/drivers/hwmon/max6697.c
@@ -77,7 +77,7 @@ struct max6697_chip_data {
};
struct max6697_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
enum chips type;
const struct max6697_chip_data *chip;
@@ -181,8 +181,8 @@ static const struct max6697_chip_data max6697_chip_data[] = {
static struct max6697_data *max6697_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6697_data *data = i2c_get_clientdata(client);
+ struct max6697_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
struct max6697_data *ret = data;
int val;
int i;
@@ -303,8 +303,7 @@ static ssize_t set_temp(struct device *dev,
{
int nr = to_sensor_dev_attr_2(devattr)->nr;
int index = to_sensor_dev_attr_2(devattr)->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct max6697_data *data = i2c_get_clientdata(client);
+ struct max6697_data *data = dev_get_drvdata(dev);
long temp;
int ret;
@@ -316,7 +315,7 @@ static ssize_t set_temp(struct device *dev,
temp = DIV_ROUND_CLOSEST(temp, 1000) + data->temp_offset;
temp = clamp_val(temp, 0, data->type == max6581 ? 255 : 127);
data->temp[nr][index] = temp;
- ret = i2c_smbus_write_byte_data(client,
+ ret = i2c_smbus_write_byte_data(data->client,
index == 2 ? MAX6697_REG_MAX[nr]
: MAX6697_REG_CRIT[nr],
temp);
@@ -405,8 +404,7 @@ static umode_t max6697_is_visible(struct kobject *kobj, struct attribute *attr,
int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct i2c_client *client = to_i2c_client(dev);
- struct max6697_data *data = i2c_get_clientdata(client);
+ struct max6697_data *data = dev_get_drvdata(dev);
const struct max6697_chip_data *chip = data->chip;
int channel = index / 6; /* channel number */
int nr = index % 6; /* attribute index within channel */
@@ -489,6 +487,7 @@ static struct attribute *max6697_attributes[] = {
static const struct attribute_group max6697_group = {
.attrs = max6697_attributes, .is_visible = max6697_is_visible,
};
+__ATTRIBUTE_GROUPS(max6697);
static void max6697_get_config_of(struct device_node *node,
struct max6697_platform_data *pdata)
@@ -525,9 +524,9 @@ static void max6697_get_config_of(struct device_node *node,
}
}
-static int max6697_init_chip(struct i2c_client *client)
+static int max6697_init_chip(struct max6697_data *data,
+ struct i2c_client *client)
{
- struct max6697_data *data = i2c_get_clientdata(client);
struct max6697_platform_data *pdata = dev_get_platdata(&client->dev);
struct max6697_platform_data p;
const struct max6697_chip_data *chip = data->chip;
@@ -625,6 +624,7 @@ static int max6697_probe(struct i2c_client *client,
struct i2c_adapter *adapter = client->adapter;
struct device *dev = &client->dev;
struct max6697_data *data;
+ struct device *hwmon_dev;
int err;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -636,39 +636,17 @@ static int max6697_probe(struct i2c_client *client,
data->type = id->driver_data;
data->chip = &max6697_chip_data[data->type];
-
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
- err = max6697_init_chip(client);
- if (err)
- return err;
-
- err = sysfs_create_group(&client->dev.kobj, &max6697_group);
+ err = max6697_init_chip(data, client);
if (err)
return err;
- data->hwmon_dev = hwmon_device_register(dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto error;
- }
-
- return 0;
-
-error:
- sysfs_remove_group(&client->dev.kobj, &max6697_group);
- return err;
-}
-
-static int max6697_remove(struct i2c_client *client)
-{
- struct max6697_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &max6697_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data,
+ max6697_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id max6697_id[] = {
@@ -692,7 +670,6 @@ static struct i2c_driver max6697_driver = {
.name = "max6697",
},
.probe = max6697_probe,
- .remove = max6697_remove,
.id_table = max6697_id,
};
diff --git a/drivers/hwmon/mc13783-adc.c b/drivers/hwmon/mc13783-adc.c
index 982d8622c09b..ae00e60d856c 100644
--- a/drivers/hwmon/mc13783-adc.c
+++ b/drivers/hwmon/mc13783-adc.c
@@ -37,7 +37,7 @@
struct mc13783_adc_priv {
struct mc13xxx *mc13xxx;
struct device *hwmon_dev;
- char name[10];
+ char name[PLATFORM_NAME_SIZE];
};
static ssize_t mc13783_adc_show_name(struct device *dev, struct device_attribute
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 6eb03ce2cff4..d17325db0ea3 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -724,11 +724,8 @@ struct nct6775_data {
enum kinds kind;
const char *name;
- struct device *hwmon_dev;
- struct attribute_group *group_in;
- struct attribute_group *group_fan;
- struct attribute_group *group_temp;
- struct attribute_group *group_pwm;
+ int num_attr_groups;
+ const struct attribute_group *groups[6];
u16 reg_temp[5][NUM_TEMP]; /* 0=temp, 1=temp_over, 2=temp_hyst,
* 3=temp_crit, 4=temp_lcrit
@@ -942,7 +939,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
struct sensor_device_attribute_2 *a2;
struct attribute **attrs;
struct sensor_device_template **t;
- int err, i, j, count;
+ int i, count;
if (repeat <= 0)
return ERR_PTR(-EINVAL);
@@ -973,7 +970,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
for (i = 0; i < repeat; i++) {
t = tg->templates;
- for (j = 0; *t != NULL; j++) {
+ while (*t != NULL) {
snprintf(su->name, sizeof(su->name),
(*t)->dev_attr.attr.name, tg->base + i);
if ((*t)->s2) {
@@ -1002,10 +999,6 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
}
}
- err = sysfs_create_group(&dev->kobj, group);
- if (err)
- return ERR_PTR(-ENOMEM);
-
return group;
}
@@ -1457,7 +1450,8 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
= nct6775_read_temp(data,
data->reg_temp[j][i]);
}
- if (!(data->have_temp_fixed & (1 << i)))
+ if (i >= NUM_TEMP_FIXED ||
+ !(data->have_temp_fixed & (1 << i)))
continue;
data->temp_offset[i]
= nct6775_read_value(data, data->REG_TEMP_OFFSET[i]);
@@ -1545,7 +1539,7 @@ static int find_temp_source(struct nct6775_data *data, int index, int count)
if (src == source)
return nr;
}
- return -1;
+ return -ENODEV;
}
static ssize_t
@@ -1644,7 +1638,7 @@ store_temp_beep(struct device *dev, struct device_attribute *attr,
nr = find_temp_source(data, sattr->index, data->num_temp_beeps);
if (nr < 0)
- return -ENODEV;
+ return nr;
bit = data->BEEP_BITS[nr + TEMP_ALARM_BASE];
regindex = bit >> 3;
@@ -2726,16 +2720,6 @@ store_fan_time(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-show_name(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct nct6775_data *data = dev_get_drvdata(dev);
-
- return sprintf(buf, "%s\n", data->name);
-}
-
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-
-static ssize_t
show_auto_pwm(struct device *dev, struct device_attribute *attr, char *buf)
{
struct nct6775_data *data = nct6775_update_device(dev);
@@ -3061,16 +3045,16 @@ static umode_t nct6775_other_is_visible(struct kobject *kobj,
struct device *dev = container_of(kobj, struct device, kobj);
struct nct6775_data *data = dev_get_drvdata(dev);
- if (index == 1 && !data->have_vid)
+ if (index == 0 && !data->have_vid)
return 0;
- if (index == 2 || index == 3) {
- if (data->ALARM_BITS[INTRUSION_ALARM_BASE + index - 2] < 0)
+ if (index == 1 || index == 2) {
+ if (data->ALARM_BITS[INTRUSION_ALARM_BASE + index - 1] < 0)
return 0;
}
- if (index == 4 || index == 5) {
- if (data->BEEP_BITS[INTRUSION_ALARM_BASE + index - 4] < 0)
+ if (index == 3 || index == 4) {
+ if (data->BEEP_BITS[INTRUSION_ALARM_BASE + index - 3] < 0)
return 0;
}
@@ -3083,13 +3067,12 @@ static umode_t nct6775_other_is_visible(struct kobject *kobj,
* Any change in order or content must be matched.
*/
static struct attribute *nct6775_attributes_other[] = {
- &dev_attr_name.attr,
- &dev_attr_cpu0_vid.attr, /* 1 */
- &sensor_dev_attr_intrusion0_alarm.dev_attr.attr, /* 2 */
- &sensor_dev_attr_intrusion1_alarm.dev_attr.attr, /* 3 */
- &sensor_dev_attr_intrusion0_beep.dev_attr.attr, /* 4 */
- &sensor_dev_attr_intrusion1_beep.dev_attr.attr, /* 5 */
- &sensor_dev_attr_beep_enable.dev_attr.attr, /* 6 */
+ &dev_attr_cpu0_vid.attr, /* 0 */
+ &sensor_dev_attr_intrusion0_alarm.dev_attr.attr, /* 1 */
+ &sensor_dev_attr_intrusion1_alarm.dev_attr.attr, /* 2 */
+ &sensor_dev_attr_intrusion0_beep.dev_attr.attr, /* 3 */
+ &sensor_dev_attr_intrusion1_beep.dev_attr.attr, /* 4 */
+ &sensor_dev_attr_beep_enable.dev_attr.attr, /* 5 */
NULL
};
@@ -3099,27 +3082,6 @@ static const struct attribute_group nct6775_group_other = {
.is_visible = nct6775_other_is_visible,
};
-/*
- * Driver and device management
- */
-
-static void nct6775_device_remove_files(struct device *dev)
-{
- struct nct6775_data *data = dev_get_drvdata(dev);
-
- if (data->group_pwm)
- sysfs_remove_group(&dev->kobj, data->group_pwm);
- if (data->group_in)
- sysfs_remove_group(&dev->kobj, data->group_in);
- if (data->group_fan)
- sysfs_remove_group(&dev->kobj, data->group_fan);
- if (data->group_temp)
- sysfs_remove_group(&dev->kobj, data->group_temp);
-
- sysfs_remove_group(&dev->kobj, &nct6775_group_other);
-}
-
-/* Get the monitoring functions started */
static inline void nct6775_init_device(struct nct6775_data *data)
{
int i;
@@ -3296,6 +3258,7 @@ static int nct6775_probe(struct platform_device *pdev)
int num_reg_temp;
u8 cr2a;
struct attribute_group *group;
+ struct device *hwmon_dev;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!devm_request_region(&pdev->dev, res->start, IOREGION_LENGTH,
@@ -3870,61 +3833,36 @@ static int nct6775_probe(struct platform_device *pdev)
/* Register sysfs hooks */
group = nct6775_create_attr_group(dev, &nct6775_pwm_template_group,
data->pwm_num);
- if (IS_ERR(group)) {
- err = PTR_ERR(group);
- goto exit_remove;
- }
- data->group_pwm = group;
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ data->groups[data->num_attr_groups++] = group;
group = nct6775_create_attr_group(dev, &nct6775_in_template_group,
fls(data->have_in));
- if (IS_ERR(group)) {
- err = PTR_ERR(group);
- goto exit_remove;
- }
- data->group_in = group;
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ data->groups[data->num_attr_groups++] = group;
group = nct6775_create_attr_group(dev, &nct6775_fan_template_group,
fls(data->has_fan));
- if (IS_ERR(group)) {
- err = PTR_ERR(group);
- goto exit_remove;
- }
- data->group_fan = group;
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ data->groups[data->num_attr_groups++] = group;
group = nct6775_create_attr_group(dev, &nct6775_temp_template_group,
fls(data->have_temp));
- if (IS_ERR(group)) {
- err = PTR_ERR(group);
- goto exit_remove;
- }
- data->group_temp = group;
-
- err = sysfs_create_group(&dev->kobj, &nct6775_group_other);
- if (err)
- goto exit_remove;
+ if (IS_ERR(group))
+ return PTR_ERR(group);
- data->hwmon_dev = hwmon_device_register(dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove;
- }
-
- return 0;
-
-exit_remove:
- nct6775_device_remove_files(dev);
- return err;
-}
-
-static int nct6775_remove(struct platform_device *pdev)
-{
- struct nct6775_data *data = platform_get_drvdata(pdev);
+ data->groups[data->num_attr_groups++] = group;
+ data->groups[data->num_attr_groups++] = &nct6775_group_other;
- hwmon_device_unregister(data->hwmon_dev);
- nct6775_device_remove_files(&pdev->dev);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, data->name,
+ data, data->groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
#ifdef CONFIG_PM
@@ -4013,7 +3951,6 @@ static struct platform_driver nct6775_driver = {
.pm = NCT6775_DEV_PM_OPS,
},
.probe = nct6775_probe,
- .remove = nct6775_remove,
};
static const char * const nct6775_sio_names[] __initconst = {
@@ -4101,7 +4038,7 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
/*
* when Super-I/O functions move to a separate file, the Super-I/O
* bus will manage the lifetime of the device and this module will only keep
- * track of the nct6775 driver. But since we platform_device_alloc(), we
+ * track of the nct6775 driver. But since we use platform_device_alloc(), we
* must keep track of the device
*/
static struct platform_device *pdev[2];
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index 6a9d6edaacb3..a26b1d1d9514 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -1,5 +1,5 @@
/*
- * Hardware monitoring driver for LM25056 / LM25066 / LM5064 / LM5066
+ * Hardware monitoring driver for LM25056 / LM25063 / LM25066 / LM5064 / LM5066
*
* Copyright (c) 2011 Ericsson AB.
* Copyright (c) 2013 Guenter Roeck
@@ -27,7 +27,7 @@
#include <linux/i2c.h>
#include "pmbus.h"
-enum chips { lm25056, lm25066, lm5064, lm5066 };
+enum chips { lm25056, lm25063, lm25066, lm5064, lm5066 };
#define LM25066_READ_VAUX 0xd0
#define LM25066_MFR_READ_IIN 0xd1
@@ -52,6 +52,11 @@ enum chips { lm25056, lm25066, lm5064, lm5066 };
#define LM25056_MFR_STS_VAUX_OV_WARN (1 << 1)
#define LM25056_MFR_STS_VAUX_UV_WARN (1 << 0)
+/* LM25063 only */
+
+#define LM25063_READ_VOUT_MAX 0xe5
+#define LM25063_READ_VOUT_MIN 0xe6
+
struct __coeff {
short m, b, R;
};
@@ -59,7 +64,7 @@ struct __coeff {
#define PSC_CURRENT_IN_L (PSC_NUM_CLASSES)
#define PSC_POWER_L (PSC_NUM_CLASSES + 1)
-static struct __coeff lm25066_coeff[4][PSC_NUM_CLASSES + 2] = {
+static struct __coeff lm25066_coeff[5][PSC_NUM_CLASSES + 2] = {
[lm25056] = {
[PSC_VOLTAGE_IN] = {
.m = 16296,
@@ -116,6 +121,36 @@ static struct __coeff lm25066_coeff[4][PSC_NUM_CLASSES + 2] = {
.m = 16,
},
},
+ [lm25063] = {
+ [PSC_VOLTAGE_IN] = {
+ .m = 16000,
+ .R = -2,
+ },
+ [PSC_VOLTAGE_OUT] = {
+ .m = 16000,
+ .R = -2,
+ },
+ [PSC_CURRENT_IN] = {
+ .m = 10000,
+ .R = -2,
+ },
+ [PSC_CURRENT_IN_L] = {
+ .m = 10000,
+ .R = -2,
+ },
+ [PSC_POWER] = {
+ .m = 5000,
+ .R = -3,
+ },
+ [PSC_POWER_L] = {
+ .m = 5000,
+ .R = -3,
+ },
+ [PSC_TEMPERATURE] = {
+ .m = 15596,
+ .R = -3,
+ },
+ },
[lm5064] = {
[PSC_VOLTAGE_IN] = {
.m = 4611,
@@ -178,6 +213,7 @@ static struct __coeff lm25066_coeff[4][PSC_NUM_CLASSES + 2] = {
struct lm25066_data {
int id;
+ u16 rlimit; /* Maximum register value */
struct pmbus_driver_info info;
};
@@ -200,6 +236,10 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
/* VIN: 6.14 mV VAUX: 293 uV LSB */
ret = DIV_ROUND_CLOSEST(ret * 293, 6140);
break;
+ case lm25063:
+ /* VIN: 6.25 mV VAUX: 200.0 uV LSB */
+ ret = DIV_ROUND_CLOSEST(ret * 20, 625);
+ break;
case lm25066:
/* VIN: 4.54 mV VAUX: 283.2 uV LSB */
ret = DIV_ROUND_CLOSEST(ret * 2832, 45400);
@@ -253,6 +293,24 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
return ret;
}
+static int lm25063_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_READ_VOUT_MAX:
+ ret = pmbus_read_word_data(client, 0, LM25063_READ_VOUT_MAX);
+ break;
+ case PMBUS_VIRT_READ_VOUT_MIN:
+ ret = pmbus_read_word_data(client, 0, LM25063_READ_VOUT_MIN);
+ break;
+ default:
+ ret = lm25066_read_word_data(client, page, reg);
+ break;
+ }
+ return ret;
+}
+
static int lm25056_read_word_data(struct i2c_client *client, int page, int reg)
{
int ret;
@@ -308,27 +366,34 @@ static int lm25056_read_byte_data(struct i2c_client *client, int page, int reg)
static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
u16 word)
{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ const struct lm25066_data *data = to_lm25066_data(info);
int ret;
switch (reg) {
+ case PMBUS_POUT_OP_FAULT_LIMIT:
+ case PMBUS_POUT_OP_WARN_LIMIT:
case PMBUS_VOUT_UV_WARN_LIMIT:
case PMBUS_OT_FAULT_LIMIT:
case PMBUS_OT_WARN_LIMIT:
+ case PMBUS_IIN_OC_FAULT_LIMIT:
case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ case PMBUS_VIN_OV_FAULT_LIMIT:
case PMBUS_VIN_OV_WARN_LIMIT:
- word = ((s16)word < 0) ? 0 : clamp_val(word, 0, 0x0fff);
+ word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
ret = pmbus_write_word_data(client, 0, reg, word);
pmbus_clear_cache(client);
break;
case PMBUS_IIN_OC_WARN_LIMIT:
- word = ((s16)word < 0) ? 0 : clamp_val(word, 0, 0x0fff);
+ word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
ret = pmbus_write_word_data(client, 0,
LM25066_MFR_IIN_OC_WARN_LIMIT,
word);
pmbus_clear_cache(client);
break;
case PMBUS_PIN_OP_WARN_LIMIT:
- word = ((s16)word < 0) ? 0 : clamp_val(word, 0, 0x0fff);
+ word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
ret = pmbus_write_word_data(client, 0,
LM25066_MFR_PIN_OP_WARN_LIMIT,
word);
@@ -337,7 +402,7 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
/* Adjust from VIN coefficients (for LM25056) */
word = DIV_ROUND_CLOSEST((int)word * 6140, 293);
- word = ((s16)word < 0) ? 0 : clamp_val(word, 0, 0x0fff);
+ word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
ret = pmbus_write_word_data(client, 0,
LM25056_VAUX_UV_WARN_LIMIT, word);
pmbus_clear_cache(client);
@@ -345,7 +410,7 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
/* Adjust from VIN coefficients (for LM25056) */
word = DIV_ROUND_CLOSEST((int)word * 6140, 293);
- word = ((s16)word < 0) ? 0 : clamp_val(word, 0, 0x0fff);
+ word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
ret = pmbus_write_word_data(client, 0,
LM25056_VAUX_OV_WARN_LIMIT, word);
pmbus_clear_cache(client);
@@ -399,9 +464,16 @@ static int lm25066_probe(struct i2c_client *client,
info->func[0] |= PMBUS_HAVE_STATUS_VMON;
info->read_word_data = lm25056_read_word_data;
info->read_byte_data = lm25056_read_byte_data;
+ data->rlimit = 0x0fff;
+ } else if (data->id == lm25063) {
+ info->func[0] |= PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_POUT;
+ info->read_word_data = lm25063_read_word_data;
+ data->rlimit = 0xffff;
} else {
info->func[0] |= PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
info->read_word_data = lm25066_read_word_data;
+ data->rlimit = 0x0fff;
}
info->write_word_data = lm25066_write_word_data;
@@ -432,6 +504,7 @@ static int lm25066_probe(struct i2c_client *client,
static const struct i2c_device_id lm25066_id[] = {
{"lm25056", lm25056},
+ {"lm25063", lm25063},
{"lm25066", lm25066},
{"lm5064", lm5064},
{"lm5066", lm5066},
@@ -453,5 +526,5 @@ static struct i2c_driver lm25066_driver = {
module_i2c_driver(lm25066_driver);
MODULE_AUTHOR("Guenter Roeck");
-MODULE_DESCRIPTION("PMBus driver for LM25056/LM25066/LM5064/LM5066");
+MODULE_DESCRIPTION("PMBus driver for LM25066 and compatible chips");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index 586a89ef9e0f..de3c152a1d9a 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -1,5 +1,6 @@
/*
- * Hardware monitoring driver for LTC2974, LTC2978, LTC3880, and LTC3883
+ * Hardware monitoring driver for LTC2974, LTC2977, LTC2978, LTC3880,
+ * and LTC3883
*
* Copyright (c) 2011 Ericsson AB.
* Copyright (c) 2013 Guenter Roeck
@@ -27,7 +28,7 @@
#include <linux/i2c.h>
#include "pmbus.h"
-enum chips { ltc2974, ltc2978, ltc3880, ltc3883 };
+enum chips { ltc2974, ltc2977, ltc2978, ltc3880, ltc3883 };
/* Common for all chips */
#define LTC2978_MFR_VOUT_PEAK 0xdd
@@ -35,7 +36,7 @@ enum chips { ltc2974, ltc2978, ltc3880, ltc3883 };
#define LTC2978_MFR_TEMPERATURE_PEAK 0xdf
#define LTC2978_MFR_SPECIAL_ID 0xe7
-/* LTC2974 and LTC2978 */
+/* LTC2974, LCT2977, and LTC2978 */
#define LTC2978_MFR_VOUT_MIN 0xfb
#define LTC2978_MFR_VIN_MIN 0xfc
#define LTC2978_MFR_TEMPERATURE_MIN 0xfd
@@ -53,8 +54,10 @@ enum chips { ltc2974, ltc2978, ltc3880, ltc3883 };
#define LTC3883_MFR_IIN_PEAK 0xe1
#define LTC2974_ID 0x0212
+#define LTC2977_ID 0x0130
#define LTC2978_ID_REV1 0x0121
#define LTC2978_ID_REV2 0x0122
+#define LTC2978A_ID 0x0124
#define LTC3880_ID 0x4000
#define LTC3880_ID_MASK 0xff00
#define LTC3883_ID 0x4300
@@ -363,6 +366,7 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
static const struct i2c_device_id ltc2978_id[] = {
{"ltc2974", ltc2974},
+ {"ltc2977", ltc2977},
{"ltc2978", ltc2978},
{"ltc3880", ltc3880},
{"ltc3883", ltc3883},
@@ -392,7 +396,10 @@ static int ltc2978_probe(struct i2c_client *client,
if (chip_id == LTC2974_ID) {
data->id = ltc2974;
- } else if (chip_id == LTC2978_ID_REV1 || chip_id == LTC2978_ID_REV2) {
+ } else if (chip_id == LTC2977_ID) {
+ data->id = ltc2977;
+ } else if (chip_id == LTC2978_ID_REV1 || chip_id == LTC2978_ID_REV2 ||
+ chip_id == LTC2978A_ID) {
data->id = ltc2978;
} else if ((chip_id & LTC3880_ID_MASK) == LTC3880_ID) {
data->id = ltc3880;
@@ -438,6 +445,7 @@ static int ltc2978_probe(struct i2c_client *client,
| PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
}
break;
+ case ltc2977:
case ltc2978:
info->read_word_data = ltc2978_read_word_data;
info->pages = LTC2978_NUM_PAGES;
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 9319fcf142d9..3cbf66e9d861 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -97,6 +97,7 @@ struct pmbus_data {
int max_attributes;
int num_attributes;
struct attribute_group group;
+ const struct attribute_group *groups[2];
struct pmbus_sensor *sensors;
@@ -156,7 +157,7 @@ EXPORT_SYMBOL_GPL(pmbus_write_byte);
/*
* _pmbus_write_byte() is similar to pmbus_write_byte(), but checks if
- * a device specific mapping funcion exists and calls it if necessary.
+ * a device specific mapping function exists and calls it if necessary.
*/
static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value)
{
@@ -348,7 +349,7 @@ static struct _pmbus_status {
static struct pmbus_data *pmbus_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = to_i2c_client(dev->parent);
struct pmbus_data *data = i2c_get_clientdata(client);
const struct pmbus_driver_info *info = data->info;
struct pmbus_sensor *sensor;
@@ -686,7 +687,7 @@ static int pmbus_get_boolean(struct pmbus_data *data, struct pmbus_boolean *b,
if (!s1 && !s2) {
ret = !!regval;
} else if (!s1 || !s2) {
- BUG();
+ WARN(1, "Bad boolean descriptor %p: s1=%p, s2=%p\n", b, s1, s2);
return 0;
} else {
long v1, v2;
@@ -733,7 +734,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = to_i2c_client(dev->parent);
struct pmbus_data *data = i2c_get_clientdata(client);
struct pmbus_sensor *sensor = to_pmbus_sensor(devattr);
ssize_t rv = count;
@@ -1768,22 +1769,16 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
goto out_kfree;
}
- /* Register sysfs hooks */
- ret = sysfs_create_group(&dev->kobj, &data->group);
- if (ret) {
- dev_err(dev, "Failed to create sysfs entries\n");
- goto out_kfree;
- }
- data->hwmon_dev = hwmon_device_register(dev);
+ data->groups[0] = &data->group;
+ data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
+ data, data->groups);
if (IS_ERR(data->hwmon_dev)) {
ret = PTR_ERR(data->hwmon_dev);
dev_err(dev, "Failed to register hwmon device\n");
- goto out_hwmon_device_register;
+ goto out_kfree;
}
return 0;
-out_hwmon_device_register:
- sysfs_remove_group(&dev->kobj, &data->group);
out_kfree:
kfree(data->group.attrs);
return ret;
@@ -1794,7 +1789,6 @@ int pmbus_do_remove(struct i2c_client *client)
{
struct pmbus_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &data->group);
kfree(data->group.attrs);
return 0;
}
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index dfe6d9527efb..7fa6e7d0b9b6 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -155,7 +155,8 @@ MODULE_DEVICE_TABLE(i2c, tmp401_id);
*/
struct tmp401_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
+ const struct attribute_group *groups[3];
struct mutex update_lock;
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
@@ -231,8 +232,8 @@ static int tmp401_update_device_reg16(struct i2c_client *client,
static struct tmp401_data *tmp401_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct tmp401_data *data = i2c_get_clientdata(client);
+ struct tmp401_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
struct tmp401_data *ret = data;
int i, val;
unsigned long next_update;
@@ -350,15 +351,12 @@ static ssize_t store_temp(struct device *dev, struct device_attribute *devattr,
{
int nr = to_sensor_dev_attr_2(devattr)->nr;
int index = to_sensor_dev_attr_2(devattr)->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct tmp401_data *data = tmp401_update_device(dev);
+ struct tmp401_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
long val;
u16 reg;
u8 regaddr;
- if (IS_ERR(data))
- return PTR_ERR(data);
-
if (kstrtol(buf, 10, &val))
return -EINVAL;
@@ -405,7 +403,7 @@ static ssize_t store_temp_crit_hyst(struct device *dev, struct device_attribute
val = clamp_val(val, temp - 255000, temp);
reg = ((temp - val) + 500) / 1000;
- i2c_smbus_write_byte_data(to_i2c_client(dev), TMP401_TEMP_CRIT_HYST,
+ i2c_smbus_write_byte_data(data->client, TMP401_TEMP_CRIT_HYST,
reg);
data->temp_crit_hyst = reg;
@@ -423,8 +421,8 @@ static ssize_t store_temp_crit_hyst(struct device *dev, struct device_attribute
static ssize_t reset_temp_history(struct device *dev,
struct device_attribute *devattr, const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct tmp401_data *data = i2c_get_clientdata(client);
+ struct tmp401_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
long val;
if (kstrtol(buf, 10, &val))
@@ -447,8 +445,7 @@ static ssize_t reset_temp_history(struct device *dev,
static ssize_t show_update_interval(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct tmp401_data *data = i2c_get_clientdata(client);
+ struct tmp401_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", data->update_interval);
}
@@ -457,8 +454,8 @@ static ssize_t set_update_interval(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct tmp401_data *data = i2c_get_clientdata(client);
+ struct tmp401_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
int err, rate;
@@ -616,10 +613,10 @@ static const struct attribute_group tmp432_group = {
* Begin non sysfs callback code (aka Real code)
*/
-static void tmp401_init_client(struct i2c_client *client)
+static void tmp401_init_client(struct tmp401_data *data,
+ struct i2c_client *client)
{
int config, config_orig;
- struct tmp401_data *data = i2c_get_clientdata(client);
/* Set the conversion rate to 2 Hz */
i2c_smbus_write_byte_data(client, TMP401_CONVERSION_RATE_WRITE, 5);
@@ -705,77 +702,45 @@ static int tmp401_detect(struct i2c_client *client,
return 0;
}
-static int tmp401_remove(struct i2c_client *client)
-{
- struct device *dev = &client->dev;
- struct tmp401_data *data = i2c_get_clientdata(client);
-
- if (data->hwmon_dev)
- hwmon_device_unregister(data->hwmon_dev);
-
- sysfs_remove_group(&dev->kobj, &tmp401_group);
-
- if (data->kind == tmp411)
- sysfs_remove_group(&dev->kobj, &tmp411_group);
-
- if (data->kind == tmp432)
- sysfs_remove_group(&dev->kobj, &tmp432_group);
-
- return 0;
-}
-
static int tmp401_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ const char *names[] = { "TMP401", "TMP411", "TMP431", "TMP432" };
struct device *dev = &client->dev;
- int err;
+ struct device *hwmon_dev;
struct tmp401_data *data;
- const char *names[] = { "TMP401", "TMP411", "TMP431", "TMP432" };
+ int groups = 0;
data = devm_kzalloc(dev, sizeof(struct tmp401_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
data->kind = id->driver_data;
/* Initialize the TMP401 chip */
- tmp401_init_client(client);
+ tmp401_init_client(data, client);
/* Register sysfs hooks */
- err = sysfs_create_group(&dev->kobj, &tmp401_group);
- if (err)
- return err;
+ data->groups[groups++] = &tmp401_group;
/* Register additional tmp411 sysfs hooks */
- if (data->kind == tmp411) {
- err = sysfs_create_group(&dev->kobj, &tmp411_group);
- if (err)
- goto exit_remove;
- }
+ if (data->kind == tmp411)
+ data->groups[groups++] = &tmp411_group;
/* Register additional tmp432 sysfs hooks */
- if (data->kind == tmp432) {
- err = sysfs_create_group(&dev->kobj, &tmp432_group);
- if (err)
- goto exit_remove;
- }
+ if (data->kind == tmp432)
+ data->groups[groups++] = &tmp432_group;
- data->hwmon_dev = hwmon_device_register(dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- data->hwmon_dev = NULL;
- goto exit_remove;
- }
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data, data->groups);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
dev_info(dev, "Detected TI %s chip\n", names[data->kind]);
return 0;
-
-exit_remove:
- tmp401_remove(client);
- return err;
}
static struct i2c_driver tmp401_driver = {
@@ -784,7 +749,6 @@ static struct i2c_driver tmp401_driver = {
.name = "tmp401",
},
.probe = tmp401_probe,
- .remove = tmp401_remove,
.id_table = tmp401_id,
.detect = tmp401_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index a3feee332e20..bdcf2dce5ec4 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -1043,7 +1043,7 @@ static struct sensor_device_attribute sda_temp_alarm[] = {
SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13),
};
-/* get reatime status of all sensors items: voltage, temp, fan */
+/* get realtime status of all sensors items: voltage, temp, fan */
static ssize_t show_alarms_reg(struct device *dev,
struct device_attribute *attr, char *buf)
{
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 5febb43cb4c1..df585808adb6 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -579,7 +579,7 @@ static ssize_t store_temp23(struct device *dev, struct device_attribute *attr,
return count;
}
-/* get reatime status of all sensors items: voltage, temp, fan */
+/* get realtime status of all sensors items: voltage, temp, fan */
static ssize_t
show_alarms_reg(struct device *dev, struct device_attribute *attr, char *buf)
{
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index b0c30a546ff2..9d63d71214ca 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -808,7 +808,7 @@ show_sf_ctrl(struct device *dev, struct device_attribute *attr, char *buf)
if (nr == TEMP_FAN_MAP) {
val = data->temp_fan_map[index];
} else if (nr == TEMP_PWM_ENABLE) {
- /* +2 to transfrom into 2 and 3 to conform with sysfs intf */
+ /* +2 to transform into 2 and 3 to conform with sysfs intf */
val = ((data->pwm_enable >> index) & 0x01) + 2;
} else if (nr == TEMP_CRUISE) {
val = TEMP_FROM_REG(data->temp_cruise[index] & 0x7f);
@@ -1199,7 +1199,8 @@ static void w83793_init_client(struct i2c_client *client)
static int watchdog_set_timeout(struct w83793_data *data, int timeout)
{
- int ret, mtimeout;
+ unsigned int mtimeout;
+ int ret;
mtimeout = DIV_ROUND_UP(timeout, 60);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index cdcbd8368ed3..80e55305656f 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -436,6 +436,13 @@ config I2C_EG20T
ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
+config I2C_EXYNOS5
+ tristate "Exynos5 high-speed I2C driver"
+ depends on ARCH_EXYNOS5 && OF
+ help
+ Say Y here to include support for high-speed I2C controller in the
+ Exynos5 based Samsung SoCs.
+
config I2C_GPIO
tristate "GPIO-based bitbanging I2C"
depends on GPIOLIB
@@ -665,7 +672,7 @@ config I2C_SH7760
config I2C_SH_MOBILE
tristate "SuperH Mobile I2C Controller"
- depends on SUPERH || ARCH_SHMOBILE
+ depends on SUPERH || ARM || COMPILE_TEST
help
If you say yes to this option, support will be included for the
built-in I2C interface on the Renesas SH-Mobile processor.
@@ -768,7 +775,7 @@ config I2C_XLR
config I2C_RCAR
tristate "Renesas R-Car I2C Controller"
- depends on ARCH_SHMOBILE && I2C
+ depends on ARM || COMPILE_TEST
help
If you say yes to this option, support will be included for the
R-Car I2C controller.
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index d00997f3eb3b..d1ad3712d905 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -42,6 +42,7 @@ i2c-designware-platform-objs := i2c-designware-platdrv.o
obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o
i2c-designware-pci-objs := i2c-designware-pcidrv.o
obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o
+obj-$(CONFIG_I2C_EXYNOS5) += i2c-exynos5.o
obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o
obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o
obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index 35a473ba3d81..3b9bd9a3f2b0 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -675,7 +675,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
p_adap->retries = 3;
rc = peripheral_request_list(
- (unsigned short *)dev_get_platdata(&pdev->dev),
+ dev_get_platdata(&pdev->dev),
"i2c-bfin-twi");
if (rc) {
dev_err(&pdev->dev, "Can't setup pin mux!\n");
@@ -723,7 +723,7 @@ out_error_add_adapter:
free_irq(iface->irq, iface);
out_error_req_irq:
out_error_no_irq:
- peripheral_free_list((unsigned short *)dev_get_platdata(&pdev->dev));
+ peripheral_free_list(dev_get_platdata(&pdev->dev));
out_error_pin_mux:
iounmap(iface->regs_base);
out_error_ioremap:
@@ -739,7 +739,7 @@ static int i2c_bfin_twi_remove(struct platform_device *pdev)
i2c_del_adapter(&(iface->adap));
free_irq(iface->irq, iface);
- peripheral_free_list((unsigned short *)dev_get_platdata(&pdev->dev));
+ peripheral_free_list(dev_get_platdata(&pdev->dev));
iounmap(iface->regs_base);
kfree(iface);
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index b2b8aa9adc0e..3e5ea2c87a6e 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -447,7 +447,7 @@ static int cpm_i2c_setup(struct cpm_i2c *cpm)
init_waitqueue_head(&cpm->i2c_wait);
- cpm->irq = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
+ cpm->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
if (!cpm->irq)
return -EINVAL;
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 132369fad4e0..85e8ad6056c4 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -795,7 +795,7 @@ static struct platform_driver davinci_i2c_driver = {
.name = "i2c_davinci",
.owner = THIS_MODULE,
.pm = davinci_i2c_pm_ops,
- .of_match_table = of_match_ptr(davinci_i2c_of_match),
+ .of_match_table = davinci_i2c_of_match,
},
};
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
new file mode 100644
index 000000000000..aca3991b7636
--- /dev/null
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -0,0 +1,774 @@
+/**
+ * i2c-exynos5.c - Samsung Exynos5 I2C Controller Driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+
+/*
+ * HSI2C controller from Samsung supports 2 modes of operation
+ * 1. Auto mode: Where in master automatically controls the whole transaction
+ * 2. Manual mode: Software controls the transaction by issuing commands
+ * START, READ, WRITE, STOP, RESTART in I2C_MANUAL_CMD register.
+ *
+ * Operation mode can be selected by setting AUTO_MODE bit in I2C_CONF register
+ *
+ * Special bits are available for both modes of operation to set commands
+ * and for checking transfer status
+ */
+
+/* Register Map */
+#define HSI2C_CTL 0x00
+#define HSI2C_FIFO_CTL 0x04
+#define HSI2C_TRAILIG_CTL 0x08
+#define HSI2C_CLK_CTL 0x0C
+#define HSI2C_CLK_SLOT 0x10
+#define HSI2C_INT_ENABLE 0x20
+#define HSI2C_INT_STATUS 0x24
+#define HSI2C_ERR_STATUS 0x2C
+#define HSI2C_FIFO_STATUS 0x30
+#define HSI2C_TX_DATA 0x34
+#define HSI2C_RX_DATA 0x38
+#define HSI2C_CONF 0x40
+#define HSI2C_AUTO_CONF 0x44
+#define HSI2C_TIMEOUT 0x48
+#define HSI2C_MANUAL_CMD 0x4C
+#define HSI2C_TRANS_STATUS 0x50
+#define HSI2C_TIMING_HS1 0x54
+#define HSI2C_TIMING_HS2 0x58
+#define HSI2C_TIMING_HS3 0x5C
+#define HSI2C_TIMING_FS1 0x60
+#define HSI2C_TIMING_FS2 0x64
+#define HSI2C_TIMING_FS3 0x68
+#define HSI2C_TIMING_SLA 0x6C
+#define HSI2C_ADDR 0x70
+
+/* I2C_CTL Register bits */
+#define HSI2C_FUNC_MODE_I2C (1u << 0)
+#define HSI2C_MASTER (1u << 3)
+#define HSI2C_RXCHON (1u << 6)
+#define HSI2C_TXCHON (1u << 7)
+#define HSI2C_SW_RST (1u << 31)
+
+/* I2C_FIFO_CTL Register bits */
+#define HSI2C_RXFIFO_EN (1u << 0)
+#define HSI2C_TXFIFO_EN (1u << 1)
+#define HSI2C_RXFIFO_TRIGGER_LEVEL(x) ((x) << 4)
+#define HSI2C_TXFIFO_TRIGGER_LEVEL(x) ((x) << 16)
+
+/* As per user manual FIFO max depth is 64bytes */
+#define HSI2C_FIFO_MAX 0x40
+/* default trigger levels for Tx and Rx FIFOs */
+#define HSI2C_DEF_TXFIFO_LVL (HSI2C_FIFO_MAX - 0x30)
+#define HSI2C_DEF_RXFIFO_LVL (HSI2C_FIFO_MAX - 0x10)
+
+/* I2C_TRAILING_CTL Register bits */
+#define HSI2C_TRAILING_COUNT (0xf)
+
+/* I2C_INT_EN Register bits */
+#define HSI2C_INT_TX_ALMOSTEMPTY_EN (1u << 0)
+#define HSI2C_INT_RX_ALMOSTFULL_EN (1u << 1)
+#define HSI2C_INT_TRAILING_EN (1u << 6)
+#define HSI2C_INT_I2C_EN (1u << 9)
+
+/* I2C_INT_STAT Register bits */
+#define HSI2C_INT_TX_ALMOSTEMPTY (1u << 0)
+#define HSI2C_INT_RX_ALMOSTFULL (1u << 1)
+#define HSI2C_INT_TX_UNDERRUN (1u << 2)
+#define HSI2C_INT_TX_OVERRUN (1u << 3)
+#define HSI2C_INT_RX_UNDERRUN (1u << 4)
+#define HSI2C_INT_RX_OVERRUN (1u << 5)
+#define HSI2C_INT_TRAILING (1u << 6)
+#define HSI2C_INT_I2C (1u << 9)
+
+/* I2C_FIFO_STAT Register bits */
+#define HSI2C_RX_FIFO_EMPTY (1u << 24)
+#define HSI2C_RX_FIFO_FULL (1u << 23)
+#define HSI2C_RX_FIFO_LVL(x) ((x >> 16) & 0x7f)
+#define HSI2C_TX_FIFO_EMPTY (1u << 8)
+#define HSI2C_TX_FIFO_FULL (1u << 7)
+#define HSI2C_TX_FIFO_LVL(x) ((x >> 0) & 0x7f)
+
+/* I2C_CONF Register bits */
+#define HSI2C_AUTO_MODE (1u << 31)
+#define HSI2C_10BIT_ADDR_MODE (1u << 30)
+#define HSI2C_HS_MODE (1u << 29)
+
+/* I2C_AUTO_CONF Register bits */
+#define HSI2C_READ_WRITE (1u << 16)
+#define HSI2C_STOP_AFTER_TRANS (1u << 17)
+#define HSI2C_MASTER_RUN (1u << 31)
+
+/* I2C_TIMEOUT Register bits */
+#define HSI2C_TIMEOUT_EN (1u << 31)
+#define HSI2C_TIMEOUT_MASK 0xff
+
+/* I2C_TRANS_STATUS register bits */
+#define HSI2C_MASTER_BUSY (1u << 17)
+#define HSI2C_SLAVE_BUSY (1u << 16)
+#define HSI2C_TIMEOUT_AUTO (1u << 4)
+#define HSI2C_NO_DEV (1u << 3)
+#define HSI2C_NO_DEV_ACK (1u << 2)
+#define HSI2C_TRANS_ABORT (1u << 1)
+#define HSI2C_TRANS_DONE (1u << 0)
+
+/* I2C_ADDR register bits */
+#define HSI2C_SLV_ADDR_SLV(x) ((x & 0x3ff) << 0)
+#define HSI2C_SLV_ADDR_MAS(x) ((x & 0x3ff) << 10)
+#define HSI2C_MASTER_ID(x) ((x & 0xff) << 24)
+#define MASTER_ID(x) ((x & 0x7) + 0x08)
+
+/*
+ * Controller operating frequency, timing values for operation
+ * are calculated against this frequency
+ */
+#define HSI2C_HS_TX_CLOCK 1000000
+#define HSI2C_FS_TX_CLOCK 100000
+#define HSI2C_HIGH_SPD 1
+#define HSI2C_FAST_SPD 0
+
+#define EXYNOS5_I2C_TIMEOUT (msecs_to_jiffies(1000))
+
+struct exynos5_i2c {
+ struct i2c_adapter adap;
+ unsigned int suspended:1;
+
+ struct i2c_msg *msg;
+ struct completion msg_complete;
+ unsigned int msg_ptr;
+
+ unsigned int irq;
+
+ void __iomem *regs;
+ struct clk *clk;
+ struct device *dev;
+ int state;
+
+ spinlock_t lock; /* IRQ synchronization */
+
+ /*
+ * Since the TRANS_DONE bit is cleared on read, and we may read it
+ * either during an IRQ or after a transaction, keep track of its
+ * state here.
+ */
+ int trans_done;
+
+ /* Controller operating frequency */
+ unsigned int fs_clock;
+ unsigned int hs_clock;
+
+ /*
+ * HSI2C Controller can operate in
+ * 1. High speed upto 3.4Mbps
+ * 2. Fast speed upto 1Mbps
+ */
+ int speed_mode;
+};
+
+static const struct of_device_id exynos5_i2c_match[] = {
+ { .compatible = "samsung,exynos5-hsi2c" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos5_i2c_match);
+
+static void exynos5_i2c_clr_pend_irq(struct exynos5_i2c *i2c)
+{
+ writel(readl(i2c->regs + HSI2C_INT_STATUS),
+ i2c->regs + HSI2C_INT_STATUS);
+}
+
+/*
+ * exynos5_i2c_set_timing: updates the registers with appropriate
+ * timing values calculated
+ *
+ * Returns 0 on success, -EINVAL if the cycle length cannot
+ * be calculated.
+ */
+static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, int mode)
+{
+ u32 i2c_timing_s1;
+ u32 i2c_timing_s2;
+ u32 i2c_timing_s3;
+ u32 i2c_timing_sla;
+ unsigned int t_start_su, t_start_hd;
+ unsigned int t_stop_su;
+ unsigned int t_data_su, t_data_hd;
+ unsigned int t_scl_l, t_scl_h;
+ unsigned int t_sr_release;
+ unsigned int t_ftl_cycle;
+ unsigned int clkin = clk_get_rate(i2c->clk);
+ unsigned int div, utemp0 = 0, utemp1 = 0, clk_cycle;
+ unsigned int op_clk = (mode == HSI2C_HIGH_SPD) ?
+ i2c->hs_clock : i2c->fs_clock;
+
+ /*
+ * FPCLK / FI2C =
+ * (CLK_DIV + 1) * (TSCLK_L + TSCLK_H + 2) + 8 + 2 * FLT_CYCLE
+ * utemp0 = (CLK_DIV + 1) * (TSCLK_L + TSCLK_H + 2)
+ * utemp1 = (TSCLK_L + TSCLK_H + 2)
+ */
+ t_ftl_cycle = (readl(i2c->regs + HSI2C_CONF) >> 16) & 0x7;
+ utemp0 = (clkin / op_clk) - 8 - 2 * t_ftl_cycle;
+
+ /* CLK_DIV max is 256 */
+ for (div = 0; div < 256; div++) {
+ utemp1 = utemp0 / (div + 1);
+
+ /*
+ * SCL_L and SCL_H each has max value of 255
+ * Hence, For the clk_cycle to the have right value
+ * utemp1 has to be less then 512 and more than 4.
+ */
+ if ((utemp1 < 512) && (utemp1 > 4)) {
+ clk_cycle = utemp1 - 2;
+ break;
+ } else if (div == 255) {
+ dev_warn(i2c->dev, "Failed to calculate divisor");
+ return -EINVAL;
+ }
+ }
+
+ t_scl_l = clk_cycle / 2;
+ t_scl_h = clk_cycle / 2;
+ t_start_su = t_scl_l;
+ t_start_hd = t_scl_l;
+ t_stop_su = t_scl_l;
+ t_data_su = t_scl_l / 2;
+ t_data_hd = t_scl_l / 2;
+ t_sr_release = clk_cycle;
+
+ i2c_timing_s1 = t_start_su << 24 | t_start_hd << 16 | t_stop_su << 8;
+ i2c_timing_s2 = t_data_su << 24 | t_scl_l << 8 | t_scl_h << 0;
+ i2c_timing_s3 = div << 16 | t_sr_release << 0;
+ i2c_timing_sla = t_data_hd << 0;
+
+ dev_dbg(i2c->dev, "tSTART_SU: %X, tSTART_HD: %X, tSTOP_SU: %X\n",
+ t_start_su, t_start_hd, t_stop_su);
+ dev_dbg(i2c->dev, "tDATA_SU: %X, tSCL_L: %X, tSCL_H: %X\n",
+ t_data_su, t_scl_l, t_scl_h);
+ dev_dbg(i2c->dev, "nClkDiv: %X, tSR_RELEASE: %X\n",
+ div, t_sr_release);
+ dev_dbg(i2c->dev, "tDATA_HD: %X\n", t_data_hd);
+
+ if (mode == HSI2C_HIGH_SPD) {
+ writel(i2c_timing_s1, i2c->regs + HSI2C_TIMING_HS1);
+ writel(i2c_timing_s2, i2c->regs + HSI2C_TIMING_HS2);
+ writel(i2c_timing_s3, i2c->regs + HSI2C_TIMING_HS3);
+ } else {
+ writel(i2c_timing_s1, i2c->regs + HSI2C_TIMING_FS1);
+ writel(i2c_timing_s2, i2c->regs + HSI2C_TIMING_FS2);
+ writel(i2c_timing_s3, i2c->regs + HSI2C_TIMING_FS3);
+ }
+ writel(i2c_timing_sla, i2c->regs + HSI2C_TIMING_SLA);
+
+ return 0;
+}
+
+static int exynos5_hsi2c_clock_setup(struct exynos5_i2c *i2c)
+{
+ /*
+ * Configure the Fast speed timing values
+ * Even the High Speed mode initially starts with Fast mode
+ */
+ if (exynos5_i2c_set_timing(i2c, HSI2C_FAST_SPD)) {
+ dev_err(i2c->dev, "HSI2C FS Clock set up failed\n");
+ return -EINVAL;
+ }
+
+ /* configure the High speed timing values */
+ if (i2c->speed_mode == HSI2C_HIGH_SPD) {
+ if (exynos5_i2c_set_timing(i2c, HSI2C_HIGH_SPD)) {
+ dev_err(i2c->dev, "HSI2C HS Clock set up failed\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * exynos5_i2c_init: configures the controller for I2C functionality
+ * Programs I2C controller for Master mode operation
+ */
+static void exynos5_i2c_init(struct exynos5_i2c *i2c)
+{
+ u32 i2c_conf = readl(i2c->regs + HSI2C_CONF);
+ u32 i2c_timeout = readl(i2c->regs + HSI2C_TIMEOUT);
+
+ /* Clear to disable Timeout */
+ i2c_timeout &= ~HSI2C_TIMEOUT_EN;
+ writel(i2c_timeout, i2c->regs + HSI2C_TIMEOUT);
+
+ writel((HSI2C_FUNC_MODE_I2C | HSI2C_MASTER),
+ i2c->regs + HSI2C_CTL);
+ writel(HSI2C_TRAILING_COUNT, i2c->regs + HSI2C_TRAILIG_CTL);
+
+ if (i2c->speed_mode == HSI2C_HIGH_SPD) {
+ writel(HSI2C_MASTER_ID(MASTER_ID(i2c->adap.nr)),
+ i2c->regs + HSI2C_ADDR);
+ i2c_conf |= HSI2C_HS_MODE;
+ }
+
+ writel(i2c_conf | HSI2C_AUTO_MODE, i2c->regs + HSI2C_CONF);
+}
+
+static void exynos5_i2c_reset(struct exynos5_i2c *i2c)
+{
+ u32 i2c_ctl;
+
+ /* Set and clear the bit for reset */
+ i2c_ctl = readl(i2c->regs + HSI2C_CTL);
+ i2c_ctl |= HSI2C_SW_RST;
+ writel(i2c_ctl, i2c->regs + HSI2C_CTL);
+
+ i2c_ctl = readl(i2c->regs + HSI2C_CTL);
+ i2c_ctl &= ~HSI2C_SW_RST;
+ writel(i2c_ctl, i2c->regs + HSI2C_CTL);
+
+ /* We don't expect calculations to fail during the run */
+ exynos5_hsi2c_clock_setup(i2c);
+ /* Initialize the configure registers */
+ exynos5_i2c_init(i2c);
+}
+
+/*
+ * exynos5_i2c_irq: top level IRQ servicing routine
+ *
+ * INT_STATUS registers gives the interrupt details. Further,
+ * FIFO_STATUS or TRANS_STATUS registers are to be check for detailed
+ * state of the bus.
+ */
+static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
+{
+ struct exynos5_i2c *i2c = dev_id;
+ u32 fifo_level, int_status, fifo_status, trans_status;
+ unsigned char byte;
+ int len = 0;
+
+ i2c->state = -EINVAL;
+
+ spin_lock(&i2c->lock);
+
+ int_status = readl(i2c->regs + HSI2C_INT_STATUS);
+ writel(int_status, i2c->regs + HSI2C_INT_STATUS);
+ fifo_status = readl(i2c->regs + HSI2C_FIFO_STATUS);
+
+ /* handle interrupt related to the transfer status */
+ if (int_status & HSI2C_INT_I2C) {
+ trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS);
+ if (trans_status & HSI2C_NO_DEV_ACK) {
+ dev_dbg(i2c->dev, "No ACK from device\n");
+ i2c->state = -ENXIO;
+ goto stop;
+ } else if (trans_status & HSI2C_NO_DEV) {
+ dev_dbg(i2c->dev, "No device\n");
+ i2c->state = -ENXIO;
+ goto stop;
+ } else if (trans_status & HSI2C_TRANS_ABORT) {
+ dev_dbg(i2c->dev, "Deal with arbitration lose\n");
+ i2c->state = -EAGAIN;
+ goto stop;
+ } else if (trans_status & HSI2C_TIMEOUT_AUTO) {
+ dev_dbg(i2c->dev, "Accessing device timed out\n");
+ i2c->state = -EAGAIN;
+ goto stop;
+ } else if (trans_status & HSI2C_TRANS_DONE) {
+ i2c->trans_done = 1;
+ i2c->state = 0;
+ }
+ }
+
+ if ((i2c->msg->flags & I2C_M_RD) && (int_status &
+ (HSI2C_INT_TRAILING | HSI2C_INT_RX_ALMOSTFULL))) {
+ fifo_status = readl(i2c->regs + HSI2C_FIFO_STATUS);
+ fifo_level = HSI2C_RX_FIFO_LVL(fifo_status);
+ len = min(fifo_level, i2c->msg->len - i2c->msg_ptr);
+
+ while (len > 0) {
+ byte = (unsigned char)
+ readl(i2c->regs + HSI2C_RX_DATA);
+ i2c->msg->buf[i2c->msg_ptr++] = byte;
+ len--;
+ }
+ i2c->state = 0;
+ } else if (int_status & HSI2C_INT_TX_ALMOSTEMPTY) {
+ fifo_status = readl(i2c->regs + HSI2C_FIFO_STATUS);
+ fifo_level = HSI2C_TX_FIFO_LVL(fifo_status);
+
+ len = HSI2C_FIFO_MAX - fifo_level;
+ if (len > (i2c->msg->len - i2c->msg_ptr))
+ len = i2c->msg->len - i2c->msg_ptr;
+
+ while (len > 0) {
+ byte = i2c->msg->buf[i2c->msg_ptr++];
+ writel(byte, i2c->regs + HSI2C_TX_DATA);
+ len--;
+ }
+ i2c->state = 0;
+ }
+
+ stop:
+ if ((i2c->trans_done && (i2c->msg->len == i2c->msg_ptr)) ||
+ (i2c->state < 0)) {
+ writel(0, i2c->regs + HSI2C_INT_ENABLE);
+ exynos5_i2c_clr_pend_irq(i2c);
+ complete(&i2c->msg_complete);
+ }
+
+ spin_unlock(&i2c->lock);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * exynos5_i2c_wait_bus_idle
+ *
+ * Wait for the bus to go idle, indicated by the MASTER_BUSY bit being
+ * cleared.
+ *
+ * Returns -EBUSY if the bus cannot be bought to idle
+ */
+static int exynos5_i2c_wait_bus_idle(struct exynos5_i2c *i2c)
+{
+ unsigned long stop_time;
+ u32 trans_status;
+
+ /* wait for 100 milli seconds for the bus to be idle */
+ stop_time = jiffies + msecs_to_jiffies(100) + 1;
+ do {
+ trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS);
+ if (!(trans_status & HSI2C_MASTER_BUSY))
+ return 0;
+
+ usleep_range(50, 200);
+ } while (time_before(jiffies, stop_time));
+
+ return -EBUSY;
+}
+
+/*
+ * exynos5_i2c_message_start: Configures the bus and starts the xfer
+ * i2c: struct exynos5_i2c pointer for the current bus
+ * stop: Enables stop after transfer if set. Set for last transfer of
+ * in the list of messages.
+ *
+ * Configures the bus for read/write function
+ * Sets chip address to talk to, message length to be sent.
+ * Enables appropriate interrupts and sends start xfer command.
+ */
+static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop)
+{
+ u32 i2c_ctl;
+ u32 int_en = HSI2C_INT_I2C_EN;
+ u32 i2c_auto_conf = 0;
+ u32 fifo_ctl;
+ unsigned long flags;
+
+ i2c_ctl = readl(i2c->regs + HSI2C_CTL);
+ i2c_ctl &= ~(HSI2C_TXCHON | HSI2C_RXCHON);
+ fifo_ctl = HSI2C_RXFIFO_EN | HSI2C_TXFIFO_EN;
+
+ if (i2c->msg->flags & I2C_M_RD) {
+ i2c_ctl |= HSI2C_RXCHON;
+
+ i2c_auto_conf = HSI2C_READ_WRITE;
+
+ fifo_ctl |= HSI2C_RXFIFO_TRIGGER_LEVEL(HSI2C_DEF_TXFIFO_LVL);
+ int_en |= (HSI2C_INT_RX_ALMOSTFULL_EN |
+ HSI2C_INT_TRAILING_EN);
+ } else {
+ i2c_ctl |= HSI2C_TXCHON;
+
+ fifo_ctl |= HSI2C_TXFIFO_TRIGGER_LEVEL(HSI2C_DEF_RXFIFO_LVL);
+ int_en |= HSI2C_INT_TX_ALMOSTEMPTY_EN;
+ }
+
+ writel(HSI2C_SLV_ADDR_MAS(i2c->msg->addr), i2c->regs + HSI2C_ADDR);
+
+ writel(fifo_ctl, i2c->regs + HSI2C_FIFO_CTL);
+ writel(i2c_ctl, i2c->regs + HSI2C_CTL);
+
+
+ /*
+ * Enable interrupts before starting the transfer so that we don't
+ * miss any INT_I2C interrupts.
+ */
+ spin_lock_irqsave(&i2c->lock, flags);
+ writel(int_en, i2c->regs + HSI2C_INT_ENABLE);
+
+ if (stop == 1)
+ i2c_auto_conf |= HSI2C_STOP_AFTER_TRANS;
+ i2c_auto_conf |= i2c->msg->len;
+ i2c_auto_conf |= HSI2C_MASTER_RUN;
+ writel(i2c_auto_conf, i2c->regs + HSI2C_AUTO_CONF);
+ spin_unlock_irqrestore(&i2c->lock, flags);
+}
+
+static int exynos5_i2c_xfer_msg(struct exynos5_i2c *i2c,
+ struct i2c_msg *msgs, int stop)
+{
+ unsigned long timeout;
+ int ret;
+
+ i2c->msg = msgs;
+ i2c->msg_ptr = 0;
+ i2c->trans_done = 0;
+
+ INIT_COMPLETION(i2c->msg_complete);
+
+ exynos5_i2c_message_start(i2c, stop);
+
+ timeout = wait_for_completion_timeout(&i2c->msg_complete,
+ EXYNOS5_I2C_TIMEOUT);
+ if (timeout == 0)
+ ret = -ETIMEDOUT;
+ else
+ ret = i2c->state;
+
+ /*
+ * If this is the last message to be transfered (stop == 1)
+ * Then check if the bus can be brought back to idle.
+ */
+ if (ret == 0 && stop)
+ ret = exynos5_i2c_wait_bus_idle(i2c);
+
+ if (ret < 0) {
+ exynos5_i2c_reset(i2c);
+ if (ret == -ETIMEDOUT)
+ dev_warn(i2c->dev, "%s timeout\n",
+ (msgs->flags & I2C_M_RD) ? "rx" : "tx");
+ }
+
+ /* Return the state as in interrupt routine */
+ return ret;
+}
+
+static int exynos5_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct exynos5_i2c *i2c = (struct exynos5_i2c *)adap->algo_data;
+ int i = 0, ret = 0, stop = 0;
+
+ if (i2c->suspended) {
+ dev_err(i2c->dev, "HS-I2C is not initialzed.\n");
+ return -EIO;
+ }
+
+ clk_prepare_enable(i2c->clk);
+
+ for (i = 0; i < num; i++, msgs++) {
+ stop = (i == num - 1);
+
+ ret = exynos5_i2c_xfer_msg(i2c, msgs, stop);
+
+ if (ret < 0)
+ goto out;
+ }
+
+ if (i == num) {
+ ret = num;
+ } else {
+ /* Only one message, cannot access the device */
+ if (i == 1)
+ ret = -EREMOTEIO;
+ else
+ ret = i;
+
+ dev_warn(i2c->dev, "xfer message failed\n");
+ }
+
+ out:
+ clk_disable_unprepare(i2c->clk);
+ return ret;
+}
+
+static u32 exynos5_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+}
+
+static const struct i2c_algorithm exynos5_i2c_algorithm = {
+ .master_xfer = exynos5_i2c_xfer,
+ .functionality = exynos5_i2c_func,
+};
+
+static int exynos5_i2c_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct exynos5_i2c *i2c;
+ struct resource *mem;
+ unsigned int op_clock;
+ int ret;
+
+ i2c = devm_kzalloc(&pdev->dev, sizeof(struct exynos5_i2c), GFP_KERNEL);
+ if (!i2c) {
+ dev_err(&pdev->dev, "no memory for state\n");
+ return -ENOMEM;
+ }
+
+ if (of_property_read_u32(np, "clock-frequency", &op_clock)) {
+ i2c->speed_mode = HSI2C_FAST_SPD;
+ i2c->fs_clock = HSI2C_FS_TX_CLOCK;
+ } else {
+ if (op_clock >= HSI2C_HS_TX_CLOCK) {
+ i2c->speed_mode = HSI2C_HIGH_SPD;
+ i2c->fs_clock = HSI2C_FS_TX_CLOCK;
+ i2c->hs_clock = op_clock;
+ } else {
+ i2c->speed_mode = HSI2C_FAST_SPD;
+ i2c->fs_clock = op_clock;
+ }
+ }
+
+ strlcpy(i2c->adap.name, "exynos5-i2c", sizeof(i2c->adap.name));
+ i2c->adap.owner = THIS_MODULE;
+ i2c->adap.algo = &exynos5_i2c_algorithm;
+ i2c->adap.retries = 3;
+
+ i2c->dev = &pdev->dev;
+ i2c->clk = devm_clk_get(&pdev->dev, "hsi2c");
+ if (IS_ERR(i2c->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ return -ENOENT;
+ }
+
+ clk_prepare_enable(i2c->clk);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(i2c->regs)) {
+ ret = PTR_ERR(i2c->regs);
+ goto err_clk;
+ }
+
+ i2c->adap.dev.of_node = np;
+ i2c->adap.algo_data = i2c;
+ i2c->adap.dev.parent = &pdev->dev;
+
+ /* Clear pending interrupts from u-boot or misc causes */
+ exynos5_i2c_clr_pend_irq(i2c);
+
+ spin_lock_init(&i2c->lock);
+ init_completion(&i2c->msg_complete);
+
+ i2c->irq = ret = platform_get_irq(pdev, 0);
+ if (ret <= 0) {
+ dev_err(&pdev->dev, "cannot find HS-I2C IRQ\n");
+ ret = -EINVAL;
+ goto err_clk;
+ }
+
+ ret = devm_request_irq(&pdev->dev, i2c->irq, exynos5_i2c_irq,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(&pdev->dev), i2c);
+
+ if (ret != 0) {
+ dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", i2c->irq);
+ goto err_clk;
+ }
+
+ ret = exynos5_hsi2c_clock_setup(i2c);
+ if (ret)
+ goto err_clk;
+
+ exynos5_i2c_init(i2c);
+
+ ret = i2c_add_adapter(&i2c->adap);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add bus to i2c core\n");
+ goto err_clk;
+ }
+
+ platform_set_drvdata(pdev, i2c);
+
+ clk_disable_unprepare(i2c->clk);
+
+ return 0;
+
+ err_clk:
+ clk_disable_unprepare(i2c->clk);
+ return ret;
+}
+
+static int exynos5_i2c_remove(struct platform_device *pdev)
+{
+ struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&i2c->adap);
+ clk_disable_unprepare(i2c->clk);
+
+ return 0;
+}
+
+static int exynos5_i2c_suspend_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
+
+ i2c->suspended = 1;
+
+ return 0;
+}
+
+static int exynos5_i2c_resume_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ clk_prepare_enable(i2c->clk);
+
+ ret = exynos5_hsi2c_clock_setup(i2c);
+ if (ret) {
+ clk_disable_unprepare(i2c->clk);
+ return ret;
+ }
+
+ exynos5_i2c_init(i2c);
+ clk_disable_unprepare(i2c->clk);
+ i2c->suspended = 0;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(exynos5_i2c_dev_pm_ops, exynos5_i2c_suspend_noirq,
+ exynos5_i2c_resume_noirq);
+
+static struct platform_driver exynos5_i2c_driver = {
+ .probe = exynos5_i2c_probe,
+ .remove = exynos5_i2c_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "exynos5-hsi2c",
+ .pm = &exynos5_i2c_dev_pm_ops,
+ .of_match_table = exynos5_i2c_match,
+ },
+};
+
+module_platform_driver(exynos5_i2c_driver);
+
+MODULE_DESCRIPTION("Exynos5 HS-I2C Bus driver");
+MODULE_AUTHOR("Naveen Krishna Chatradhi, <ch.naveen@samsung.com>");
+MODULE_AUTHOR("Taekgyun Ko, <taeggyun.ko@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index bfa02c6c2dda..d9f7e186a4c7 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
struct i2c_gpio_private_data {
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index ff3caa0c28cd..f7444100f397 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -41,6 +41,8 @@
#include <asm/irq.h>
#include <linux/io.h>
#include <linux/i2c.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include "i2c-ibm_iic.h"
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index b80c76888cab..b6a741caf4f6 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -17,6 +17,8 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index d3e9cc3153a9..8be7e42aa4de 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -911,7 +911,7 @@ static struct platform_driver mv64xxx_i2c_driver = {
.driver = {
.owner = THIS_MODULE,
.name = MV64XXX_I2C_CTLR_NAME,
- .of_match_table = of_match_ptr(mv64xxx_i2c_of_match_table),
+ .of_match_table = mv64xxx_i2c_of_match_table,
},
};
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index b7c857774708..99fe86e24fba 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -1,6 +1,7 @@
/*
* Freescale MXS I2C bus driver
*
+ * Copyright (C) 2012-2013 Marek Vasut <marex@denx.de>
* Copyright (C) 2011-2012 Wolfram Sang, Pengutronix e.K.
*
* based on a (non-working) driver which was:
@@ -34,10 +35,12 @@
#define MXS_I2C_CTRL0 (0x00)
#define MXS_I2C_CTRL0_SET (0x04)
+#define MXS_I2C_CTRL0_CLR (0x08)
#define MXS_I2C_CTRL0_SFTRST 0x80000000
#define MXS_I2C_CTRL0_RUN 0x20000000
#define MXS_I2C_CTRL0_SEND_NAK_ON_LAST 0x02000000
+#define MXS_I2C_CTRL0_PIO_MODE 0x01000000
#define MXS_I2C_CTRL0_RETAIN_CLOCK 0x00200000
#define MXS_I2C_CTRL0_POST_SEND_STOP 0x00100000
#define MXS_I2C_CTRL0_PRE_SEND_START 0x00080000
@@ -64,13 +67,13 @@
#define MXS_I2C_CTRL1_SLAVE_IRQ 0x01
#define MXS_I2C_STAT (0x50)
+#define MXS_I2C_STAT_GOT_A_NAK 0x10000000
#define MXS_I2C_STAT_BUS_BUSY 0x00000800
#define MXS_I2C_STAT_CLK_GEN_BUSY 0x00000400
-#define MXS_I2C_DATA (0xa0)
+#define MXS_I2C_DATA(i2c) ((i2c->dev_type == MXS_I2C_V1) ? 0x60 : 0xa0)
-#define MXS_I2C_DEBUG0 (0xb0)
-#define MXS_I2C_DEBUG0_CLR (0xb8)
+#define MXS_I2C_DEBUG0_CLR(i2c) ((i2c->dev_type == MXS_I2C_V1) ? 0x78 : 0xb8)
#define MXS_I2C_DEBUG0_DMAREQ 0x80000000
@@ -95,10 +98,17 @@
#define MXS_CMD_I2C_READ (MXS_I2C_CTRL0_SEND_NAK_ON_LAST | \
MXS_I2C_CTRL0_MASTER_MODE)
+enum mxs_i2c_devtype {
+ MXS_I2C_UNKNOWN = 0,
+ MXS_I2C_V1,
+ MXS_I2C_V2,
+};
+
/**
* struct mxs_i2c_dev - per device, private MXS-I2C data
*
* @dev: driver model device node
+ * @dev_type: distinguish i.MX23/i.MX28 features
* @regs: IO registers pointer
* @cmd_complete: completion object for transaction wait
* @cmd_err: error code for last transaction
@@ -106,6 +116,7 @@
*/
struct mxs_i2c_dev {
struct device *dev;
+ enum mxs_i2c_devtype dev_type;
void __iomem *regs;
struct completion cmd_complete;
int cmd_err;
@@ -291,48 +302,11 @@ write_init_pio_fail:
return -EINVAL;
}
-static int mxs_i2c_pio_wait_dmareq(struct mxs_i2c_dev *i2c)
+static int mxs_i2c_pio_wait_xfer_end(struct mxs_i2c_dev *i2c)
{
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
- while (!(readl(i2c->regs + MXS_I2C_DEBUG0) &
- MXS_I2C_DEBUG0_DMAREQ)) {
- if (time_after(jiffies, timeout))
- return -ETIMEDOUT;
- cond_resched();
- }
-
- return 0;
-}
-
-static int mxs_i2c_pio_wait_cplt(struct mxs_i2c_dev *i2c, int last)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
-
- /*
- * We do not use interrupts in the PIO mode. Due to the
- * maximum transfer length being 8 bytes in PIO mode, the
- * overhead of interrupt would be too large and this would
- * neglect the gain from using the PIO mode.
- */
-
- while (!(readl(i2c->regs + MXS_I2C_CTRL1) &
- MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ)) {
- if (time_after(jiffies, timeout))
- return -ETIMEDOUT;
- cond_resched();
- }
-
- writel(MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ,
- i2c->regs + MXS_I2C_CTRL1_CLR);
-
- /*
- * When ending a transfer with a stop, we have to wait for the bus to
- * go idle before we report the transfer as completed. Otherwise the
- * start of the next transfer may race with the end of the current one.
- */
- while (last && (readl(i2c->regs + MXS_I2C_STAT) &
- (MXS_I2C_STAT_BUS_BUSY | MXS_I2C_STAT_CLK_GEN_BUSY))) {
+ while (readl(i2c->regs + MXS_I2C_CTRL0) & MXS_I2C_CTRL0_RUN) {
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
cond_resched();
@@ -370,106 +344,215 @@ static void mxs_i2c_pio_trigger_cmd(struct mxs_i2c_dev *i2c, u32 cmd)
writel(reg, i2c->regs + MXS_I2C_CTRL0);
}
+/*
+ * Start WRITE transaction on the I2C bus. By studying i.MX23 datasheet,
+ * CTRL0::PIO_MODE bit description clarifies the order in which the registers
+ * must be written during PIO mode operation. First, the CTRL0 register has
+ * to be programmed with all the necessary bits but the RUN bit. Then the
+ * payload has to be written into the DATA register. Finally, the transmission
+ * is executed by setting the RUN bit in CTRL0.
+ */
+static void mxs_i2c_pio_trigger_write_cmd(struct mxs_i2c_dev *i2c, u32 cmd,
+ u32 data)
+{
+ writel(cmd, i2c->regs + MXS_I2C_CTRL0);
+
+ if (i2c->dev_type == MXS_I2C_V1)
+ writel(MXS_I2C_CTRL0_PIO_MODE, i2c->regs + MXS_I2C_CTRL0_SET);
+
+ writel(data, i2c->regs + MXS_I2C_DATA(i2c));
+ writel(MXS_I2C_CTRL0_RUN, i2c->regs + MXS_I2C_CTRL0_SET);
+}
+
static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap,
struct i2c_msg *msg, uint32_t flags)
{
struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
uint32_t addr_data = msg->addr << 1;
uint32_t data = 0;
- int i, shifts_left, ret;
+ int i, ret, xlen = 0, xmit = 0;
+ uint32_t start;
/* Mute IRQs coming from this block. */
writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_CLR);
+ /*
+ * MX23 idea:
+ * - Enable CTRL0::PIO_MODE (1 << 24)
+ * - Enable CTRL1::ACK_MODE (1 << 27)
+ *
+ * WARNING! The MX23 is broken in some way, even if it claims
+ * to support PIO, when we try to transfer any amount of data
+ * that is not aligned to 4 bytes, the DMA engine will have
+ * bits in DEBUG1::DMA_BYTES_ENABLES still set even after the
+ * transfer. This in turn will mess up the next transfer as
+ * the block it emit one byte write onto the bus terminated
+ * with a NAK+STOP. A possible workaround is to reset the IP
+ * block after every PIO transmission, which might just work.
+ *
+ * NOTE: The CTRL0::PIO_MODE description is important, since
+ * it outlines how the PIO mode is really supposed to work.
+ */
if (msg->flags & I2C_M_RD) {
+ /*
+ * PIO READ transfer:
+ *
+ * This transfer MUST be limited to 4 bytes maximum. It is not
+ * possible to transfer more than four bytes via PIO, since we
+ * can not in any way make sure we can read the data from the
+ * DATA register fast enough. Besides, the RX FIFO is only four
+ * bytes deep, thus we can only really read up to four bytes at
+ * time. Finally, there is no bit indicating us that new data
+ * arrived at the FIFO and can thus be fetched from the DATA
+ * register.
+ */
+ BUG_ON(msg->len > 4);
+
addr_data |= I2C_SMBUS_READ;
/* SELECT command. */
- mxs_i2c_pio_trigger_cmd(i2c, MXS_CMD_I2C_SELECT);
-
- ret = mxs_i2c_pio_wait_dmareq(i2c);
- if (ret)
- return ret;
-
- writel(addr_data, i2c->regs + MXS_I2C_DATA);
- writel(MXS_I2C_DEBUG0_DMAREQ, i2c->regs + MXS_I2C_DEBUG0_CLR);
+ mxs_i2c_pio_trigger_write_cmd(i2c, MXS_CMD_I2C_SELECT,
+ addr_data);
- ret = mxs_i2c_pio_wait_cplt(i2c, 0);
- if (ret)
- return ret;
-
- if (mxs_i2c_pio_check_error_state(i2c))
+ ret = mxs_i2c_pio_wait_xfer_end(i2c);
+ if (ret) {
+ dev_err(i2c->dev,
+ "PIO: Failed to send SELECT command!\n");
goto cleanup;
+ }
/* READ command. */
mxs_i2c_pio_trigger_cmd(i2c,
MXS_CMD_I2C_READ | flags |
MXS_I2C_CTRL0_XFER_COUNT(msg->len));
+ ret = mxs_i2c_pio_wait_xfer_end(i2c);
+ if (ret) {
+ dev_err(i2c->dev,
+ "PIO: Failed to send SELECT command!\n");
+ goto cleanup;
+ }
+
+ data = readl(i2c->regs + MXS_I2C_DATA(i2c));
for (i = 0; i < msg->len; i++) {
- if ((i & 3) == 0) {
- ret = mxs_i2c_pio_wait_dmareq(i2c);
- if (ret)
- return ret;
- data = readl(i2c->regs + MXS_I2C_DATA);
- writel(MXS_I2C_DEBUG0_DMAREQ,
- i2c->regs + MXS_I2C_DEBUG0_CLR);
- }
msg->buf[i] = data & 0xff;
data >>= 8;
}
} else {
+ /*
+ * PIO WRITE transfer:
+ *
+ * The code below implements clock stretching to circumvent
+ * the possibility of kernel not being able to supply data
+ * fast enough. It is possible to transfer arbitrary amount
+ * of data using PIO write.
+ */
addr_data |= I2C_SMBUS_WRITE;
- /* WRITE command. */
- mxs_i2c_pio_trigger_cmd(i2c,
- MXS_CMD_I2C_WRITE | flags |
- MXS_I2C_CTRL0_XFER_COUNT(msg->len + 1));
-
/*
* The LSB of data buffer is the first byte blasted across
* the bus. Higher order bytes follow. Thus the following
* filling schematic.
*/
+
data = addr_data << 24;
+
+ /* Start the transfer with START condition. */
+ start = MXS_I2C_CTRL0_PRE_SEND_START;
+
+ /* If the transfer is long, use clock stretching. */
+ if (msg->len > 3)
+ start |= MXS_I2C_CTRL0_RETAIN_CLOCK;
+
for (i = 0; i < msg->len; i++) {
data >>= 8;
data |= (msg->buf[i] << 24);
- if ((i & 3) == 2) {
- ret = mxs_i2c_pio_wait_dmareq(i2c);
- if (ret)
- return ret;
- writel(data, i2c->regs + MXS_I2C_DATA);
- writel(MXS_I2C_DEBUG0_DMAREQ,
- i2c->regs + MXS_I2C_DEBUG0_CLR);
+
+ xmit = 0;
+
+ /* This is the last transfer of the message. */
+ if (i + 1 == msg->len) {
+ /* Add optional STOP flag. */
+ start |= flags;
+ /* Remove RETAIN_CLOCK bit. */
+ start &= ~MXS_I2C_CTRL0_RETAIN_CLOCK;
+ xmit = 1;
}
- }
- shifts_left = 24 - (i & 3) * 8;
- if (shifts_left) {
- data >>= shifts_left;
- ret = mxs_i2c_pio_wait_dmareq(i2c);
- if (ret)
- return ret;
- writel(data, i2c->regs + MXS_I2C_DATA);
+ /* Four bytes are ready in the "data" variable. */
+ if ((i & 3) == 2)
+ xmit = 1;
+
+ /* Nothing interesting happened, continue stuffing. */
+ if (!xmit)
+ continue;
+
+ /*
+ * Compute the size of the transfer and shift the
+ * data accordingly.
+ *
+ * i = (4k + 0) .... xlen = 2
+ * i = (4k + 1) .... xlen = 3
+ * i = (4k + 2) .... xlen = 4
+ * i = (4k + 3) .... xlen = 1
+ */
+
+ if ((i % 4) == 3)
+ xlen = 1;
+ else
+ xlen = (i % 4) + 2;
+
+ data >>= (4 - xlen) * 8;
+
+ dev_dbg(i2c->dev,
+ "PIO: len=%i pos=%i total=%i [W%s%s%s]\n",
+ xlen, i, msg->len,
+ start & MXS_I2C_CTRL0_PRE_SEND_START ? "S" : "",
+ start & MXS_I2C_CTRL0_POST_SEND_STOP ? "E" : "",
+ start & MXS_I2C_CTRL0_RETAIN_CLOCK ? "C" : "");
+
writel(MXS_I2C_DEBUG0_DMAREQ,
- i2c->regs + MXS_I2C_DEBUG0_CLR);
+ i2c->regs + MXS_I2C_DEBUG0_CLR(i2c));
+
+ mxs_i2c_pio_trigger_write_cmd(i2c,
+ start | MXS_I2C_CTRL0_MASTER_MODE |
+ MXS_I2C_CTRL0_DIRECTION |
+ MXS_I2C_CTRL0_XFER_COUNT(xlen), data);
+
+ /* The START condition is sent only once. */
+ start &= ~MXS_I2C_CTRL0_PRE_SEND_START;
+
+ /* Wait for the end of the transfer. */
+ ret = mxs_i2c_pio_wait_xfer_end(i2c);
+ if (ret) {
+ dev_err(i2c->dev,
+ "PIO: Failed to finish WRITE cmd!\n");
+ break;
+ }
+
+ /* Check NAK here. */
+ ret = readl(i2c->regs + MXS_I2C_STAT) &
+ MXS_I2C_STAT_GOT_A_NAK;
+ if (ret) {
+ ret = -ENXIO;
+ goto cleanup;
+ }
}
}
- ret = mxs_i2c_pio_wait_cplt(i2c, flags & MXS_I2C_CTRL0_POST_SEND_STOP);
- if (ret)
- return ret;
-
/* make sure we capture any occurred error into cmd_err */
- mxs_i2c_pio_check_error_state(i2c);
+ ret = mxs_i2c_pio_check_error_state(i2c);
cleanup:
/* Clear any dangling IRQs and re-enable interrupts. */
writel(MXS_I2C_IRQ_MASK, i2c->regs + MXS_I2C_CTRL1_CLR);
writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
- return 0;
+ /* Clear the PIO_MODE on i.MX23 */
+ if (i2c->dev_type == MXS_I2C_V1)
+ writel(MXS_I2C_CTRL0_PIO_MODE, i2c->regs + MXS_I2C_CTRL0_CLR);
+
+ return ret;
}
/*
@@ -479,8 +562,9 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
int stop)
{
struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
- int ret, err;
+ int ret;
int flags;
+ int use_pio = 0;
flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0;
@@ -491,19 +575,21 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
return -EINVAL;
/*
- * The current boundary to select between PIO/DMA transfer method
- * is set to 8 bytes, transfers shorter than 8 bytes are transfered
- * using PIO mode while longer transfers use DMA. The 8 byte border is
- * based on this empirical measurement and a lot of previous frobbing.
+ * The MX28 I2C IP block can only do PIO READ for transfer of to up
+ * 4 bytes of length. The write transfer is not limited as it can use
+ * clock stretching to avoid FIFO underruns.
*/
+ if ((msg->flags & I2C_M_RD) && (msg->len <= 4))
+ use_pio = 1;
+ if (!(msg->flags & I2C_M_RD) && (msg->len < 7))
+ use_pio = 1;
+
i2c->cmd_err = 0;
- if (0) { /* disable PIO mode until a proper fix is made */
+ if (use_pio) {
ret = mxs_i2c_pio_setup_xfer(adap, msg, flags);
- if (ret) {
- err = mxs_i2c_reset(i2c);
- if (err)
- return err;
- }
+ /* No need to reset the block if NAK was received. */
+ if (ret && (ret != -ENXIO))
+ mxs_i2c_reset(i2c);
} else {
INIT_COMPLETION(i2c->cmd_complete);
ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
@@ -514,9 +600,11 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
msecs_to_jiffies(1000));
if (ret == 0)
goto timeout;
+
+ ret = i2c->cmd_err;
}
- if (i2c->cmd_err == -ENXIO) {
+ if (ret == -ENXIO) {
/*
* If the transfer fails with a NAK from the slave the
* controller halts until it gets told to return to idle state.
@@ -525,7 +613,19 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
i2c->regs + MXS_I2C_CTRL1_SET);
}
- ret = i2c->cmd_err;
+ /*
+ * WARNING!
+ * The i.MX23 is strange. After each and every operation, it's I2C IP
+ * block must be reset, otherwise the IP block will misbehave. This can
+ * be observed on the bus by the block sending out one single byte onto
+ * the bus. In case such an error happens, bit 27 will be set in the
+ * DEBUG0 register. This bit is not documented in the i.MX23 datasheet
+ * and is marked as "TBD" instead. To reset this bit to a correct state,
+ * reset the whole block. Since the block reset does not take long, do
+ * reset the block after every transfer to play safe.
+ */
+ if (i2c->dev_type == MXS_I2C_V1)
+ mxs_i2c_reset(i2c);
dev_dbg(i2c->dev, "Done with err=%d\n", ret);
@@ -680,8 +780,28 @@ static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c)
return 0;
}
+static struct platform_device_id mxs_i2c_devtype[] = {
+ {
+ .name = "imx23-i2c",
+ .driver_data = MXS_I2C_V1,
+ }, {
+ .name = "imx28-i2c",
+ .driver_data = MXS_I2C_V2,
+ }, { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, mxs_i2c_devtype);
+
+static const struct of_device_id mxs_i2c_dt_ids[] = {
+ { .compatible = "fsl,imx23-i2c", .data = &mxs_i2c_devtype[0], },
+ { .compatible = "fsl,imx28-i2c", .data = &mxs_i2c_devtype[1], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_i2c_dt_ids);
+
static int mxs_i2c_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id =
+ of_match_device(mxs_i2c_dt_ids, &pdev->dev);
struct device *dev = &pdev->dev;
struct mxs_i2c_dev *i2c;
struct i2c_adapter *adap;
@@ -693,6 +813,11 @@ static int mxs_i2c_probe(struct platform_device *pdev)
if (!i2c)
return -ENOMEM;
+ if (of_id) {
+ const struct platform_device_id *device_id = of_id->data;
+ i2c->dev_type = device_id->driver_data;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
@@ -768,12 +893,6 @@ static int mxs_i2c_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id mxs_i2c_dt_ids[] = {
- { .compatible = "fsl,imx28-i2c", },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, mxs_i2c_dt_ids);
-
static struct platform_driver mxs_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
@@ -796,6 +915,7 @@ static void __exit mxs_i2c_exit(void)
}
module_exit(mxs_i2c_exit);
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
MODULE_DESCRIPTION("MXS I2C Bus Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 1a9ea25f2314..c9a352f0a9a5 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -23,6 +23,7 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/slab.h>
+#include <linux/of.h>
#define I2C_PNX_TIMEOUT_DEFAULT 10 /* msec */
#define I2C_PNX_SPEED_KHZ_DEFAULT 100
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 37e8cfad625b..8c87f4a9793b 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -27,6 +27,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
+#include <linux/of_irq.h>
#include <asm/prom.h>
#include <asm/pmac_low_i2c.h>
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index d2fe11da5e82..2c2fd7c2b116 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -33,6 +33,7 @@
#include <linux/i2c/i2c-rcar.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -102,8 +103,8 @@ enum {
#define ID_NACK (1 << 4)
enum rcar_i2c_type {
- I2C_RCAR_H1,
- I2C_RCAR_H2,
+ I2C_RCAR_GEN1,
+ I2C_RCAR_GEN2,
};
struct rcar_i2c_priv {
@@ -226,22 +227,23 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv,
u32 bus_speed,
struct device *dev)
{
- struct clk *clkp = clk_get(NULL, "peripheral_clk");
+ struct clk *clkp = clk_get(dev, NULL);
u32 scgd, cdf;
u32 round, ick;
u32 scl;
u32 cdf_width;
+ unsigned long rate;
- if (!clkp) {
- dev_err(dev, "there is no peripheral_clk\n");
- return -EIO;
+ if (IS_ERR(clkp)) {
+ dev_err(dev, "couldn't get clock\n");
+ return PTR_ERR(clkp);
}
switch (priv->devtype) {
- case I2C_RCAR_H1:
+ case I2C_RCAR_GEN1:
cdf_width = 2;
break;
- case I2C_RCAR_H2:
+ case I2C_RCAR_GEN2:
cdf_width = 3;
break;
default:
@@ -264,15 +266,14 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv,
* clkp : peripheral_clk
* F[] : integer up-valuation
*/
- for (cdf = 0; cdf < (1 << cdf_width); cdf++) {
- ick = clk_get_rate(clkp) / (1 + cdf);
- if (ick < 20000000)
- goto ick_find;
+ rate = clk_get_rate(clkp);
+ cdf = rate / 20000000;
+ if (cdf >= 1 << cdf_width) {
+ dev_err(dev, "Input clock %lu too high\n", rate);
+ return -EIO;
}
- dev_err(dev, "there is no best CDF\n");
- return -EIO;
+ ick = rate / (cdf + 1);
-ick_find:
/*
* it is impossible to calculate large scale
* number on u32. separate it
@@ -290,6 +291,12 @@ ick_find:
*
* Calculation result (= SCL) should be less than
* bus_speed for hardware safety
+ *
+ * We could use something along the lines of
+ * div = ick / (bus_speed + 1) + 1;
+ * scgd = (div - 20 - round + 7) / 8;
+ * scl = ick / (20 + (scgd * 8) + round);
+ * (not fully verified) but that would get pretty involved
*/
for (scgd = 0; scgd < 0x40; scgd++) {
scl = ick / (20 + (scgd * 8) + round);
@@ -306,7 +313,7 @@ scgd_find:
/*
* keep icccr value
*/
- priv->icccr = (scgd << (cdf_width) | cdf);
+ priv->icccr = scgd << cdf_width | cdf;
return 0;
}
@@ -632,6 +639,15 @@ static const struct i2c_algorithm rcar_i2c_algo = {
.functionality = rcar_i2c_func,
};
+static const struct of_device_id rcar_i2c_dt_ids[] = {
+ { .compatible = "renesas,i2c-rcar", .data = (void *)I2C_RCAR_GEN1 },
+ { .compatible = "renesas,i2c-r8a7778", .data = (void *)I2C_RCAR_GEN1 },
+ { .compatible = "renesas,i2c-r8a7779", .data = (void *)I2C_RCAR_GEN1 },
+ { .compatible = "renesas,i2c-r8a7790", .data = (void *)I2C_RCAR_GEN2 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rcar_i2c_dt_ids);
+
static int rcar_i2c_probe(struct platform_device *pdev)
{
struct i2c_rcar_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -649,10 +665,15 @@ static int rcar_i2c_probe(struct platform_device *pdev)
}
bus_speed = 100000; /* default 100 kHz */
- if (pdata && pdata->bus_speed)
+ ret = of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed);
+ if (ret < 0 && pdata && pdata->bus_speed)
bus_speed = pdata->bus_speed;
- priv->devtype = platform_get_device_id(pdev)->driver_data;
+ if (pdev->dev.of_node)
+ priv->devtype = (long)of_match_device(rcar_i2c_dt_ids,
+ dev)->data;
+ else
+ priv->devtype = platform_get_device_id(pdev)->driver_data;
ret = rcar_i2c_clock_calculate(priv, bus_speed, dev);
if (ret < 0)
@@ -673,6 +694,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
adap->retries = 3;
adap->dev.parent = dev;
+ adap->dev.of_node = dev->of_node;
i2c_set_adapdata(adap, priv);
strlcpy(adap->name, pdev->name, sizeof(adap->name));
@@ -709,9 +731,9 @@ static int rcar_i2c_remove(struct platform_device *pdev)
}
static struct platform_device_id rcar_i2c_id_table[] = {
- { "i2c-rcar", I2C_RCAR_H1 },
- { "i2c-rcar_h1", I2C_RCAR_H1 },
- { "i2c-rcar_h2", I2C_RCAR_H2 },
+ { "i2c-rcar", I2C_RCAR_GEN1 },
+ { "i2c-rcar_gen1", I2C_RCAR_GEN1 },
+ { "i2c-rcar_gen2", I2C_RCAR_GEN2 },
{},
};
MODULE_DEVICE_TABLE(platform, rcar_i2c_id_table);
@@ -720,6 +742,7 @@ static struct platform_driver rcar_i2c_driver = {
.driver = {
.name = "i2c-rcar",
.owner = THIS_MODULE,
+ .of_match_table = rcar_i2c_dt_ids,
},
.probe = rcar_i2c_probe,
.remove = rcar_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 3747b9bf67d6..bf8fb94ebc5d 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -36,6 +36,7 @@
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index c447e8d40b78..599235514138 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -223,7 +223,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
goto out;
obj = pkg->package.elements + 1;
- if (obj == NULL || obj->type != ACPI_TYPE_INTEGER) {
+ if (obj->type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO, "Invalid argument type"));
result = -EIO;
goto out;
@@ -235,7 +235,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
case I2C_SMBUS_BYTE:
case I2C_SMBUS_BYTE_DATA:
case I2C_SMBUS_WORD_DATA:
- if (obj == NULL || obj->type != ACPI_TYPE_INTEGER) {
+ if (obj->type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO, "Invalid argument type"));
result = -EIO;
goto out;
@@ -246,7 +246,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
data->byte = obj->integer.value;
break;
case I2C_SMBUS_BLOCK_DATA:
- if (obj == NULL || obj->type != ACPI_TYPE_BUFFER) {
+ if (obj->type != ACPI_TYPE_BUFFER) {
ACPI_ERROR((AE_INFO, "Invalid argument type"));
result = -EIO;
goto out;
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 55110ddbed1f..1d79585ba4b3 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -235,7 +235,7 @@ static void sh_mobile_i2c_init(struct sh_mobile_i2c_data *pd)
int offset;
/* Get clock rate after clock is enabled */
- clk_enable(pd->clk);
+ clk_prepare_enable(pd->clk);
i2c_clk_khz = clk_get_rate(pd->clk) / 1000;
i2c_clk_khz /= pd->clks_per_count;
@@ -270,14 +270,14 @@ static void sh_mobile_i2c_init(struct sh_mobile_i2c_data *pd)
pd->icic &= ~ICIC_ICCHB8;
out:
- clk_disable(pd->clk);
+ clk_disable_unprepare(pd->clk);
}
static void activate_ch(struct sh_mobile_i2c_data *pd)
{
/* Wake up device and enable clock */
pm_runtime_get_sync(pd->dev);
- clk_enable(pd->clk);
+ clk_prepare_enable(pd->clk);
/* Enable channel and configure rx ack */
iic_set_clr(pd, ICCR, ICCR_ICE, 0);
@@ -300,7 +300,7 @@ static void deactivate_ch(struct sh_mobile_i2c_data *pd)
iic_set_clr(pd, ICCR, 0, ICCR_ICE);
/* Disable clock and mark device as idle */
- clk_disable(pd->clk);
+ clk_disable_unprepare(pd->clk);
pm_runtime_put_sync(pd->dev);
}
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 4c8b368d463b..fc2716afdfd9 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -40,6 +40,7 @@
#include <linux/i2c-xiic.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/of.h>
#define DRIVER_NAME "xiic-i2c"
@@ -702,7 +703,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
if (irq < 0)
goto resource_missing;
- pdata = (struct xiic_i2c_platform_data *)dev_get_platdata(&pdev->dev);
+ pdata = dev_get_platdata(&pdev->dev);
i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
if (!i2c)
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 3be58f89ac77..d1416635faf5 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -248,16 +248,17 @@ static int i2c_device_probe(struct device *dev)
driver = to_i2c_driver(dev->driver);
if (!driver->probe || !driver->id_table)
return -ENODEV;
- client->driver = driver;
+
if (!device_can_wakeup(&client->dev))
device_init_wakeup(&client->dev,
client->flags & I2C_CLIENT_WAKE);
dev_dbg(dev, "probe\n");
+ acpi_dev_pm_attach(&client->dev, true);
status = driver->probe(client, i2c_match_id(driver->id_table, client));
if (status) {
- client->driver = NULL;
i2c_set_clientdata(client, NULL);
+ acpi_dev_pm_detach(&client->dev, true);
}
return status;
}
@@ -279,10 +280,9 @@ static int i2c_device_remove(struct device *dev)
dev->driver = NULL;
status = 0;
}
- if (status == 0) {
- client->driver = NULL;
+ if (status == 0)
i2c_set_clientdata(client, NULL);
- }
+ acpi_dev_pm_detach(&client->dev, true);
return status;
}
@@ -615,6 +615,24 @@ void i2c_unlock_adapter(struct i2c_adapter *adapter)
}
EXPORT_SYMBOL_GPL(i2c_unlock_adapter);
+static void i2c_dev_set_name(struct i2c_adapter *adap,
+ struct i2c_client *client)
+{
+ if (ACPI_HANDLE(&client->dev)) {
+ struct acpi_device *adev;
+ if (!acpi_bus_get_device(ACPI_HANDLE(&client->dev), &adev)) {
+ dev_set_name(&client->dev, "i2c-%s",
+ dev_name(&adev->dev));
+ return;
+ }
+ }
+
+ /* For 10-bit clients, add an arbitrary offset to avoid collisions */
+ dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
+ client->addr | ((client->flags & I2C_CLIENT_TEN)
+ ? 0xa000 : 0));
+}
+
/**
* i2c_new_device - instantiate an i2c device
* @adap: the adapter managing the device
@@ -673,10 +691,7 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
client->dev.of_node = info->of_node;
ACPI_HANDLE_SET(&client->dev, info->acpi_node.handle);
- /* For 10-bit clients, add an arbitrary offset to avoid collisions */
- dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
- client->addr | ((client->flags & I2C_CLIENT_TEN)
- ? 0xa000 : 0));
+ i2c_dev_set_name(adap, client);
status = device_register(&client->dev);
if (status)
goto out_err;
@@ -1111,8 +1126,10 @@ static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
if (ret < 0 || !info.addr)
return AE_OK;
+ adev->power.flags.ignore_parent = true;
strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type));
if (!i2c_new_device(adapter, &info)) {
+ adev->power.flags.ignore_parent = false;
dev_err(&adapter->dev,
"failed to add I2C device %s from ACPI\n",
dev_name(&adev->dev));
@@ -1609,9 +1626,14 @@ static int i2c_cmd(struct device *dev, void *_arg)
{
struct i2c_client *client = i2c_verify_client(dev);
struct i2c_cmd_arg *arg = _arg;
+ struct i2c_driver *driver;
+
+ if (!client || !client->dev.driver)
+ return 0;
- if (client && client->driver && client->driver->command)
- client->driver->command(client, arg->cmd, arg->arg);
+ driver = to_i2c_driver(client->dev.driver);
+ if (driver->command)
+ driver->command(client, arg->cmd, arg->arg);
return 0;
}
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index c3ccdea3d180..80b47e8ce030 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -102,8 +102,8 @@ static void return_i2c_dev(struct i2c_dev *i2c_dev)
kfree(i2c_dev);
}
-static ssize_t show_adapter_name(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct i2c_dev *i2c_dev = i2c_dev_get_by_minor(MINOR(dev->devt));
@@ -111,7 +111,13 @@ static ssize_t show_adapter_name(struct device *dev,
return -ENODEV;
return sprintf(buf, "%s\n", i2c_dev->adap->name);
}
-static DEVICE_ATTR(name, S_IRUGO, show_adapter_name, NULL);
+static DEVICE_ATTR_RO(name);
+
+static struct attribute *i2c_attrs[] = {
+ &dev_attr_name.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(i2c);
/* ------------------------------------------------------------------------- */
@@ -562,15 +568,10 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
res = PTR_ERR(i2c_dev->dev);
goto error;
}
- res = device_create_file(i2c_dev->dev, &dev_attr_name);
- if (res)
- goto error_destroy;
pr_debug("i2c-dev: adapter [%s] registered as minor %d\n",
adap->name, adap->nr);
return 0;
-error_destroy:
- device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
error:
return_i2c_dev(i2c_dev);
return res;
@@ -589,7 +590,6 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
if (!i2c_dev) /* attach_adapter must have failed */
return 0;
- device_remove_file(i2c_dev->dev, &dev_attr_name);
return_i2c_dev(i2c_dev);
device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
@@ -637,6 +637,7 @@ static int __init i2c_dev_init(void)
res = PTR_ERR(i2c_dev_class);
goto out_unreg_chrdev;
}
+ i2c_dev_class->dev_groups = i2c_groups;
/* Keep track of adapters which will be added or removed later */
res = bus_register_notifier(&i2c_bus_type, &i2cdev_notifier);
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 44d4c6071c15..c99b22987366 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -46,6 +46,7 @@ static int smbus_do_alert(struct device *dev, void *addrp)
{
struct i2c_client *client = i2c_verify_client(dev);
struct alert_data *data = addrp;
+ struct i2c_driver *driver;
if (!client || client->addr != data->addr)
return 0;
@@ -54,12 +55,13 @@ static int smbus_do_alert(struct device *dev, void *addrp)
/*
* Drivers should either disable alerts, or provide at least
- * a minimal handler. Lock so client->driver won't change.
+ * a minimal handler. Lock so the driver won't change.
*/
device_lock(dev);
- if (client->driver) {
- if (client->driver->alert)
- client->driver->alert(client, data->flag);
+ if (client->dev.driver) {
+ driver = to_i2c_driver(client->dev.driver);
+ if (driver->alert)
+ driver->alert(client, data->flag);
else
dev_warn(&client->dev, "no driver alert()!\n");
} else
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
index 928656e241dd..c58e093b6032 100644
--- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
+++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
@@ -238,7 +238,7 @@ static struct platform_driver i2c_arbitrator_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "i2c-arb-gpio-challenge",
- .of_match_table = of_match_ptr(i2c_arbitrator_of_match),
+ .of_match_table = i2c_arbitrator_of_match,
},
};
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index a764da777f08..8a8c56f4b026 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -30,15 +30,15 @@ static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
int i;
for (i = 0; i < mux->data.n_gpios; i++)
- gpio_set_value(mux->gpio_base + mux->data.gpios[i],
- val & (1 << i));
+ gpio_set_value_cansleep(mux->gpio_base + mux->data.gpios[i],
+ val & (1 << i));
}
static int i2c_mux_gpio_select(struct i2c_adapter *adap, void *data, u32 chan)
{
struct gpiomux *mux = data;
- i2c_mux_gpio_set(mux, mux->data.values[chan]);
+ i2c_mux_gpio_set(mux, chan);
return 0;
}
@@ -228,7 +228,7 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
unsigned int class = mux->data.classes ? mux->data.classes[i] : 0;
mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr,
- i, class,
+ mux->data.values[i], class,
i2c_mux_gpio_select, deselect);
if (!mux->adap[i]) {
ret = -ENODEV;
@@ -283,7 +283,7 @@ static struct platform_driver i2c_mux_gpio_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "i2c-mux-gpio",
- .of_match_table = of_match_ptr(i2c_mux_gpio_of_match),
+ .of_match_table = i2c_mux_gpio_of_match,
},
};
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index 68a37157377d..d7978dc4ad0b 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -24,6 +24,7 @@
#include <linux/i2c-mux-pinctrl.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/of.h>
struct i2c_mux_pinctrl {
struct device *dev;
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 02906ca99b41..8fb46aab2d87 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -197,8 +197,8 @@ comment "IDE chipset support/bugfixes"
config IDE_GENERIC
tristate "generic/default IDE chipset support"
- depends on ALPHA || X86 || IA64 || M32R || MIPS || ARCH_RPC || ARCH_SHARK
- default ARM && (ARCH_RPC || ARCH_SHARK)
+ depends on ALPHA || X86 || IA64 || M32R || MIPS || ARCH_RPC
+ default ARM && ARCH_RPC
help
This is the generic IDE driver. This driver attaches to the
fixed legacy ports (e.g. on PCs 0x1f0/0x170, 0x1e8/0x168 and
@@ -722,13 +722,6 @@ config BLK_DEV_IDE_RAPIDE
Say Y here if you want to support the Yellowstone RapIDE controller
manufactured for use with Acorn computers.
-config IDE_H8300
- tristate "H8300 IDE support"
- depends on H8300
- default y
- help
- Enables the H8300 IDE driver.
-
config BLK_DEV_GAYLE
tristate "Amiga Gayle IDE interface support"
depends on AMIGA
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index af8d016c37ea..a04ee82f1c8f 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -78,8 +78,6 @@ obj-$(CONFIG_BLK_DEV_CMD640) += cmd640.o
obj-$(CONFIG_BLK_DEV_IDE_PMAC) += pmac.o
-obj-$(CONFIG_IDE_H8300) += ide-h8300.o
-
obj-$(CONFIG_IDE_GENERIC) += ide-generic.o
obj-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o
diff --git a/drivers/ide/ide-h8300.c b/drivers/ide/ide-h8300.c
deleted file mode 100644
index 520f42c5445a..000000000000
--- a/drivers/ide/ide-h8300.c
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * H8/300 generic IDE interface
- */
-
-#include <linux/init.h>
-#include <linux/ide.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#define DRV_NAME "ide-h8300"
-
-#define bswap(d) \
-({ \
- u16 r; \
- __asm__("mov.b %w1,r1h\n\t" \
- "mov.b %x1,r1l\n\t" \
- "mov.w r1,%0" \
- :"=r"(r) \
- :"r"(d) \
- :"er1"); \
- (r); \
-})
-
-static void mm_outsw(unsigned long addr, void *buf, u32 len)
-{
- unsigned short *bp = (unsigned short *)buf;
- for (; len > 0; len--, bp++)
- *(volatile u16 *)addr = bswap(*bp);
-}
-
-static void mm_insw(unsigned long addr, void *buf, u32 len)
-{
- unsigned short *bp = (unsigned short *)buf;
- for (; len > 0; len--, bp++)
- *bp = bswap(*(volatile u16 *)addr);
-}
-
-static void h8300_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- mm_insw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
-}
-
-static void h8300_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- mm_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
-}
-
-static const struct ide_tp_ops h8300_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ide_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = h8300_input_data,
- .output_data = h8300_output_data,
-};
-
-#define H8300_IDE_GAP (2)
-
-static inline void hw_setup(struct ide_hw *hw)
-{
- int i;
-
- memset(hw, 0, sizeof(*hw));
- for (i = 0; i <= 7; i++)
- hw->io_ports_array[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i;
- hw->io_ports.ctl_addr = CONFIG_H8300_IDE_ALT;
- hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ;
-}
-
-static const struct ide_port_info h8300_port_info = {
- .tp_ops = &h8300_tp_ops,
- .host_flags = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA,
- .chipset = ide_generic,
-};
-
-static int __init h8300_ide_init(void)
-{
- struct ide_hw hw, *hws[] = { &hw };
-
- printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n");
-
- if (!request_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8, "ide-h8300"))
- goto out_busy;
- if (!request_region(CONFIG_H8300_IDE_ALT, H8300_IDE_GAP, "ide-h8300")) {
- release_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8);
- goto out_busy;
- }
-
- hw_setup(&hw);
-
- return ide_host_add(&h8300_port_info, hws, 1, NULL);
-
-out_busy:
- printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n");
-
- return -EBUSY;
-}
-
-module_init(h8300_ide_init);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ide-sysfs.c b/drivers/ide/ide-sysfs.c
index 883ffacaf45a..84a6a9e08d64 100644
--- a/drivers/ide/ide-sysfs.c
+++ b/drivers/ide/ide-sysfs.c
@@ -25,6 +25,7 @@ static ssize_t media_show(struct device *dev, struct device_attribute *attr,
ide_drive_t *drive = to_ide_device(dev);
return sprintf(buf, "%s\n", ide_media_string(drive));
}
+static DEVICE_ATTR_RO(media);
static ssize_t drivename_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -32,6 +33,7 @@ static ssize_t drivename_show(struct device *dev, struct device_attribute *attr,
ide_drive_t *drive = to_ide_device(dev);
return sprintf(buf, "%s\n", drive->name);
}
+static DEVICE_ATTR_RO(drivename);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -39,6 +41,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
ide_drive_t *drive = to_ide_device(dev);
return sprintf(buf, "ide:m-%s\n", ide_media_string(drive));
}
+static DEVICE_ATTR_RO(modalias);
static ssize_t model_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -46,6 +49,7 @@ static ssize_t model_show(struct device *dev, struct device_attribute *attr,
ide_drive_t *drive = to_ide_device(dev);
return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_PROD]);
}
+static DEVICE_ATTR_RO(model);
static ssize_t firmware_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -53,6 +57,7 @@ static ssize_t firmware_show(struct device *dev, struct device_attribute *attr,
ide_drive_t *drive = to_ide_device(dev);
return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_FW_REV]);
}
+static DEVICE_ATTR_RO(firmware);
static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -60,16 +65,28 @@ static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
ide_drive_t *drive = to_ide_device(dev);
return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_SERNO]);
}
+static DEVICE_ATTR(serial, 0400, serial_show, NULL);
+
+static DEVICE_ATTR(unload_heads, 0644, ide_park_show, ide_park_store);
+
+static struct attribute *ide_attrs[] = {
+ &dev_attr_media.attr,
+ &dev_attr_drivename.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_model.attr,
+ &dev_attr_firmware.attr,
+ &dev_attr_serial.attr,
+ &dev_attr_unload_heads.attr,
+ NULL,
+};
+
+static const struct attribute_group ide_attr_group = {
+ .attrs = ide_attrs,
+};
-struct device_attribute ide_dev_attrs[] = {
- __ATTR_RO(media),
- __ATTR_RO(drivename),
- __ATTR_RO(modalias),
- __ATTR_RO(model),
- __ATTR_RO(firmware),
- __ATTR(serial, 0400, serial_show, NULL),
- __ATTR(unload_heads, 0644, ide_park_show, ide_park_store),
- __ATTR_NULL
+const struct attribute_group *ide_dev_groups[] = {
+ &ide_attr_group,
+ NULL,
};
static ssize_t store_delete_devices(struct device *portdev,
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index fa896210ed7b..2ce6268a2734 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -158,7 +158,7 @@ struct bus_type ide_bus_type = {
.probe = generic_ide_probe,
.remove = generic_ide_remove,
.shutdown = generic_ide_shutdown,
- .dev_attrs = ide_dev_attrs,
+ .dev_groups = ide_dev_groups,
.suspend = generic_ide_suspend,
.resume = generic_ide_resume,
};
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index fa6964d8681a..499b0ff20a9b 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1,7 +1,7 @@
/*
* intel_idle.c - native hardware idle loop for modern Intel processors
*
- * Copyright (c) 2010, Intel Corporation.
+ * Copyright (c) 2013, Intel Corporation.
* Len Brown <len.brown@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -123,7 +123,7 @@ static struct cpuidle_state *cpuidle_state_table;
* which is also the index into the MWAIT hint array.
* Thus C0 is a dummy.
*/
-static struct cpuidle_state nehalem_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state nehalem_cstates[] __initdata = {
{
.name = "C1-NHM",
.desc = "MWAIT 0x00",
@@ -156,7 +156,7 @@ static struct cpuidle_state nehalem_cstates[CPUIDLE_STATE_MAX] = {
.enter = NULL }
};
-static struct cpuidle_state snb_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state snb_cstates[] __initdata = {
{
.name = "C1-SNB",
.desc = "MWAIT 0x00",
@@ -196,7 +196,7 @@ static struct cpuidle_state snb_cstates[CPUIDLE_STATE_MAX] = {
.enter = NULL }
};
-static struct cpuidle_state ivb_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state ivb_cstates[] __initdata = {
{
.name = "C1-IVB",
.desc = "MWAIT 0x00",
@@ -236,7 +236,7 @@ static struct cpuidle_state ivb_cstates[CPUIDLE_STATE_MAX] = {
.enter = NULL }
};
-static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state hsw_cstates[] __initdata = {
{
.name = "C1-HSW",
.desc = "MWAIT 0x00",
@@ -297,7 +297,7 @@ static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = {
.enter = NULL }
};
-static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state atom_cstates[] __initdata = {
{
.name = "C1E-ATM",
.desc = "MWAIT 0x00",
@@ -329,6 +329,36 @@ static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = {
{
.enter = NULL }
};
+static struct cpuidle_state avn_cstates[CPUIDLE_STATE_MAX] = {
+ {
+ .name = "C1-AVN",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .enter = &intel_idle },
+ {
+ .name = "C1E-AVN",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 5,
+ .target_residency = 10,
+ .enter = &intel_idle },
+ {
+ .name = "C6NS-AVN", /* No Cache Shrink */
+ .desc = "MWAIT 0x51",
+ .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 15,
+ .target_residency = 45,
+ .enter = &intel_idle },
+ {
+ .name = "C6FS-AVN", /* Full Cache shrink */
+ .desc = "MWAIT 0x52",
+ .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 150, /* fake penalty added due to cold cache */
+ .target_residency = 100000, /* fake penalty added due to cold cache */
+ .enter = &intel_idle },
+};
/**
* intel_idle
@@ -359,7 +389,7 @@ static int intel_idle(struct cpuidle_device *dev,
if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
- if (!need_resched()) {
+ if (!current_set_polling_and_test()) {
__monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb();
@@ -390,7 +420,7 @@ static int cpu_hotplug_notify(struct notifier_block *n,
int hotcpu = (unsigned long)hcpu;
struct cpuidle_device *dev;
- switch (action & 0xf) {
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
@@ -462,6 +492,11 @@ static const struct idle_cpu idle_cpu_hsw = {
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_avn = {
+ .state_table = avn_cstates,
+ .disable_promotion_to_c1e = true,
+};
+
#define ICPU(model, cpu) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
@@ -483,6 +518,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
ICPU(0x3f, idle_cpu_hsw),
ICPU(0x45, idle_cpu_hsw),
ICPU(0x46, idle_cpu_hsw),
+ ICPU(0x4D, idle_cpu_avn),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
@@ -490,7 +526,7 @@ MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
/*
* intel_idle_probe()
*/
-static int intel_idle_probe(void)
+static int __init intel_idle_probe(void)
{
unsigned int eax, ebx, ecx;
const struct x86_cpu_id *id;
@@ -558,7 +594,7 @@ static void intel_idle_cpuidle_devices_uninit(void)
* intel_idle_cpuidle_driver_init()
* allocate, initialize cpuidle_states
*/
-static int intel_idle_cpuidle_driver_init(void)
+static int __init intel_idle_cpuidle_driver_init(void)
{
int cstate;
struct cpuidle_driver *drv = &intel_idle_driver;
@@ -628,7 +664,7 @@ static int intel_idle_cpu_init(int cpu)
int num_substates, mwait_hint, mwait_cstate, mwait_substate;
if (cpuidle_state_table[cstate].enter == NULL)
- continue;
+ break;
if (cstate + 1 > max_cstate) {
printk(PREFIX "max_cstate %d reached\n", max_cstate);
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 5ceda710f516..b84791f03a27 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -31,6 +31,17 @@ config INFINIBAND_USER_ACCESS
libibverbs, libibcm and a hardware driver library from
<http://www.openfabrics.org/git/>.
+config INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
+ bool "Experimental and unstable ABI for userspace access to flow steering verbs"
+ depends on INFINIBAND_USER_ACCESS
+ depends on STAGING
+ ---help---
+ The final ABI for userspace access to flow steering verbs
+ has not been defined. To use the current ABI, *WHICH WILL
+ CHANGE IN THE FUTURE*, say Y here.
+
+ If unsure, say N.
+
config INFINIBAND_USER_MEM
bool
depends on INFINIBAND_USER_ACCESS != n
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index dab4b41f1715..da9611d8c467 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -328,28 +328,6 @@ static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
return ret;
}
-static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_num)
-{
- int i;
- int err;
- struct ib_port_attr props;
- union ib_gid tmp;
-
- err = ib_query_port(device, port_num, &props);
- if (err)
- return err;
-
- for (i = 0; i < props.gid_tbl_len; ++i) {
- err = ib_query_gid(device, port_num, i, &tmp);
- if (err)
- return err;
- if (!memcmp(&tmp, gid, sizeof tmp))
- return 0;
- }
-
- return -EADDRNOTAVAIL;
-}
-
static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
{
dev_addr->dev_type = ARPHRD_INFINIBAND;
@@ -371,13 +349,14 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
return ret;
}
-static int cma_acquire_dev(struct rdma_id_private *id_priv)
+static int cma_acquire_dev(struct rdma_id_private *id_priv,
+ struct rdma_id_private *listen_id_priv)
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
struct cma_device *cma_dev;
union ib_gid gid, iboe_gid;
int ret = -ENODEV;
- u8 port;
+ u8 port, found_port;
enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
@@ -389,17 +368,39 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
iboe_addr_get_sgid(dev_addr, &iboe_gid);
memcpy(&gid, dev_addr->src_dev_addr +
rdma_addr_gid_offset(dev_addr), sizeof gid);
+ if (listen_id_priv &&
+ rdma_port_get_link_layer(listen_id_priv->id.device,
+ listen_id_priv->id.port_num) == dev_ll) {
+ cma_dev = listen_id_priv->cma_dev;
+ port = listen_id_priv->id.port_num;
+ if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
+ rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
+ ret = ib_find_cached_gid(cma_dev->device, &iboe_gid,
+ &found_port, NULL);
+ else
+ ret = ib_find_cached_gid(cma_dev->device, &gid,
+ &found_port, NULL);
+
+ if (!ret && (port == found_port)) {
+ id_priv->id.port_num = found_port;
+ goto out;
+ }
+ }
list_for_each_entry(cma_dev, &dev_list, list) {
for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
+ if (listen_id_priv &&
+ listen_id_priv->cma_dev == cma_dev &&
+ listen_id_priv->id.port_num == port)
+ continue;
if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) {
if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
- ret = find_gid_port(cma_dev->device, &iboe_gid, port);
+ ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, &found_port, NULL);
else
- ret = find_gid_port(cma_dev->device, &gid, port);
+ ret = ib_find_cached_gid(cma_dev->device, &gid, &found_port, NULL);
- if (!ret) {
- id_priv->id.port_num = port;
+ if (!ret && (port == found_port)) {
+ id_priv->id.port_num = found_port;
goto out;
}
}
@@ -1292,7 +1293,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
}
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
- ret = cma_acquire_dev(conn_id);
+ ret = cma_acquire_dev(conn_id, listen_id);
if (ret)
goto err2;
@@ -1481,7 +1482,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
goto out;
}
- ret = cma_acquire_dev(conn_id);
+ ret = cma_acquire_dev(conn_id, listen_id);
if (ret) {
mutex_unlock(&conn_id->handler_mutex);
rdma_destroy_id(new_cm_id);
@@ -2050,7 +2051,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
goto out;
if (!status && !id_priv->cma_dev)
- status = cma_acquire_dev(id_priv);
+ status = cma_acquire_dev(id_priv, NULL);
if (status) {
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
@@ -2294,7 +2295,7 @@ static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
int low, high, remaining;
unsigned int rover;
- inet_get_local_port_range(&low, &high);
+ inet_get_local_port_range(&init_net, &low, &high);
remaining = (high - low) + 1;
rover = net_random() % remaining + low;
retry:
@@ -2547,7 +2548,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
if (ret)
goto err1;
- ret = cma_acquire_dev(id_priv);
+ ret = cma_acquire_dev(id_priv, NULL);
if (ret)
goto err1;
}
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index da06abde9e0d..a1e9cba84944 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -148,7 +148,7 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
list_for_each_entry(client, &client_list, list) {
if (client->index == index) {
if (op < 0 || op >= client->nops ||
- !client->cb_table[RDMA_NL_GET_OP(op)].dump)
+ !client->cb_table[op].dump)
return -EINVAL;
{
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index d040b877475f..d8f9c6c272d7 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -217,7 +217,9 @@ IB_UVERBS_DECLARE_CMD(destroy_srq);
IB_UVERBS_DECLARE_CMD(create_xsrq);
IB_UVERBS_DECLARE_CMD(open_xrcd);
IB_UVERBS_DECLARE_CMD(close_xrcd);
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
IB_UVERBS_DECLARE_CMD(create_flow);
IB_UVERBS_DECLARE_CMD(destroy_flow);
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index f2b81b9ee0d6..5bb2a82d52e7 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -54,7 +54,9 @@ static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
do { \
@@ -2126,6 +2128,9 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
}
next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn;
next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
+ if (next->opcode == IB_WR_SEND_WITH_IMM)
+ next->ex.imm_data =
+ (__be32 __force) user_wr->ex.imm_data;
} else {
switch (next->opcode) {
case IB_WR_RDMA_WRITE_WITH_IMM:
@@ -2599,6 +2604,7 @@ out_put:
return ret ? ret : in_len;
}
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec,
union ib_flow_spec *ib_spec)
{
@@ -2824,6 +2830,7 @@ ssize_t ib_uverbs_destroy_flow(struct ib_uverbs_file *file,
return ret ? ret : in_len;
}
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
struct ib_uverbs_create_xsrq *cmd,
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 75ad86c4abf8..2df31f68ea09 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -115,8 +115,10 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
[IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd,
[IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq,
[IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp,
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
[IB_USER_VERBS_CMD_CREATE_FLOW] = ib_uverbs_create_flow,
[IB_USER_VERBS_CMD_DESTROY_FLOW] = ib_uverbs_destroy_flow
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
};
static void ib_uverbs_add_one(struct ib_device *device);
@@ -605,6 +607,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
return -ENOSYS;
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
if (hdr.command >= IB_USER_VERBS_CMD_THRESHOLD) {
struct ib_uverbs_cmd_hdr_ex hdr_ex;
@@ -621,6 +624,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
(hdr_ex.out_words +
hdr_ex.provider_out_words) * 4);
} else {
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
if (hdr.in_words * 4 != count)
return -EINVAL;
@@ -628,7 +632,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
buf + sizeof(hdr),
hdr.in_words * 4,
hdr.out_words * 4);
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
}
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
}
static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
index f5cb13b21445..06cbfd4ccf4b 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
@@ -280,9 +280,7 @@ static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
int j;
int ret;
- ret = get_user_pages(current, current->mm, addr,
- npages, 0, 1, pages, NULL);
-
+ ret = get_user_pages_fast(addr, j, 0, pages);
if (ret != npages) {
int i;
@@ -811,10 +809,7 @@ int ipath_user_sdma_writev(struct ipath_devdata *dd,
while (dim) {
const int mxp = 8;
- down_write(&current->mm->mmap_sem);
ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
- up_write(&current->mm->mmap_sem);
-
if (ret <= 0)
goto done_unlock;
else {
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index d6c5a73becf4..7567437dbd34 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -177,18 +177,18 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->max_mr_size = ~0ull;
props->page_size_cap = dev->dev->caps.page_size_cap;
- props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps;
+ props->max_qp = dev->dev->quotas.qp;
props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
props->max_sge = min(dev->dev->caps.max_sq_sg,
dev->dev->caps.max_rq_sg);
- props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs;
+ props->max_cq = dev->dev->quotas.cq;
props->max_cqe = dev->dev->caps.max_cqes;
- props->max_mr = dev->dev->caps.num_mpts - dev->dev->caps.reserved_mrws;
+ props->max_mr = dev->dev->quotas.mpt;
props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
- props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs;
+ props->max_srq = dev->dev->quotas.srq;
props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
props->max_srq_sge = dev->dev->caps.max_srq_sge;
props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
@@ -1691,9 +1691,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
ibdev->ib_dev.uverbs_cmd_mask |=
(1ull << IB_USER_VERBS_CMD_CREATE_FLOW) |
(1ull << IB_USER_VERBS_CMD_DESTROY_FLOW);
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
}
mlx4_ib_alloc_eqs(dev, ibdev);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 344ab03948a3..28344773f640 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -620,7 +620,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
}
mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
- (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - PAGE_SHIFT;
+ (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
*index = dev->mdev.priv.uuari.uars[0].index;
return 0;
@@ -653,8 +653,11 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
int eqn;
int err;
+ if (entries < 0)
+ return ERR_PTR(-EINVAL);
+
entries = roundup_pow_of_two(entries + 1);
- if (entries < 1 || entries > dev->mdev.caps.max_cqes)
+ if (entries > dev->mdev.caps.max_cqes)
return ERR_PTR(-EINVAL);
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -747,17 +750,9 @@ int mlx5_ib_destroy_cq(struct ib_cq *cq)
return 0;
}
-static int is_equal_rsn(struct mlx5_cqe64 *cqe64, struct mlx5_ib_srq *srq,
- u32 rsn)
+static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
{
- u32 lrsn;
-
- if (srq)
- lrsn = be32_to_cpu(cqe64->srqn) & 0xffffff;
- else
- lrsn = be32_to_cpu(cqe64->sop_drop_qpn) & 0xffffff;
-
- return rsn == lrsn;
+ return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff);
}
void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
@@ -787,8 +782,8 @@ void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
- if (is_equal_rsn(cqe64, srq, rsn)) {
- if (srq)
+ if (is_equal_rsn(cqe64, rsn)) {
+ if (srq && (ntohl(cqe64->srqn) & 0xffffff))
mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter));
++nfreed;
} else if (nfreed) {
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index b1a6cb3a2809..306534109627 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -745,7 +745,8 @@ static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn)
seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
seg->start_addr = 0;
- err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in));
+ err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in),
+ NULL, NULL, NULL);
if (err) {
mlx5_ib_warn(dev, "failed to create mkey, %d\n", err);
goto err_in;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 836be9157242..4c134d93d4fc 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -262,6 +262,9 @@ struct mlx5_ib_mr {
int npages;
struct completion done;
enum ib_wc_status status;
+ struct mlx5_ib_dev *dev;
+ struct mlx5_create_mkey_mbox_out out;
+ unsigned long start;
};
struct mlx5_ib_fast_reg_page_list {
@@ -323,6 +326,7 @@ struct mlx5_cache_ent {
struct mlx5_ib_dev *dev;
struct work_struct work;
struct delayed_work dwork;
+ int pending;
};
struct mlx5_mr_cache {
@@ -358,6 +362,8 @@ struct mlx5_ib_dev {
spinlock_t mr_lock;
struct mlx5_ib_resources devr;
struct mlx5_mr_cache cache;
+ struct timer_list delay_timer;
+ int fill_delay;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 3453580b1eb2..2c4626f7b1b8 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -35,11 +35,12 @@
#include <linux/random.h>
#include <linux/debugfs.h>
#include <linux/export.h>
+#include <linux/delay.h>
#include <rdma/ib_umem.h>
#include "mlx5_ib.h"
enum {
- DEF_CACHE_SIZE = 10,
+ MAX_PENDING_REG_MR = 8,
};
enum {
@@ -63,6 +64,57 @@ static int order2idx(struct mlx5_ib_dev *dev, int order)
return order - cache->ent[0].order;
}
+static void reg_mr_callback(int status, void *context)
+{
+ struct mlx5_ib_mr *mr = context;
+ struct mlx5_ib_dev *dev = mr->dev;
+ struct mlx5_mr_cache *cache = &dev->cache;
+ int c = order2idx(dev, mr->order);
+ struct mlx5_cache_ent *ent = &cache->ent[c];
+ u8 key;
+ unsigned long delta = jiffies - mr->start;
+ unsigned long index;
+ unsigned long flags;
+
+ index = find_last_bit(&delta, 8 * sizeof(delta));
+ if (index == 64)
+ index = 0;
+
+ spin_lock_irqsave(&ent->lock, flags);
+ ent->pending--;
+ spin_unlock_irqrestore(&ent->lock, flags);
+ if (status) {
+ mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
+ kfree(mr);
+ dev->fill_delay = 1;
+ mod_timer(&dev->delay_timer, jiffies + HZ);
+ return;
+ }
+
+ if (mr->out.hdr.status) {
+ mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n",
+ mr->out.hdr.status,
+ be32_to_cpu(mr->out.hdr.syndrome));
+ kfree(mr);
+ dev->fill_delay = 1;
+ mod_timer(&dev->delay_timer, jiffies + HZ);
+ return;
+ }
+
+ spin_lock_irqsave(&dev->mdev.priv.mkey_lock, flags);
+ key = dev->mdev.priv.mkey_key++;
+ spin_unlock_irqrestore(&dev->mdev.priv.mkey_lock, flags);
+ mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
+
+ cache->last_add = jiffies;
+
+ spin_lock_irqsave(&ent->lock, flags);
+ list_add_tail(&mr->list, &ent->head);
+ ent->cur++;
+ ent->size++;
+ spin_unlock_irqrestore(&ent->lock, flags);
+}
+
static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
{
struct mlx5_mr_cache *cache = &dev->cache;
@@ -78,36 +130,39 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
return -ENOMEM;
for (i = 0; i < num; i++) {
+ if (ent->pending >= MAX_PENDING_REG_MR) {
+ err = -EAGAIN;
+ break;
+ }
+
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr) {
err = -ENOMEM;
- goto out;
+ break;
}
mr->order = ent->order;
mr->umred = 1;
+ mr->dev = dev;
in->seg.status = 1 << 6;
in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
in->seg.log2_page_size = 12;
+ spin_lock_irq(&ent->lock);
+ ent->pending++;
+ spin_unlock_irq(&ent->lock);
+ mr->start = jiffies;
err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in,
- sizeof(*in));
+ sizeof(*in), reg_mr_callback,
+ mr, &mr->out);
if (err) {
mlx5_ib_warn(dev, "create mkey failed %d\n", err);
kfree(mr);
- goto out;
+ break;
}
- cache->last_add = jiffies;
-
- spin_lock(&ent->lock);
- list_add_tail(&mr->list, &ent->head);
- ent->cur++;
- ent->size++;
- spin_unlock(&ent->lock);
}
-out:
kfree(in);
return err;
}
@@ -121,16 +176,16 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
int i;
for (i = 0; i < num; i++) {
- spin_lock(&ent->lock);
+ spin_lock_irq(&ent->lock);
if (list_empty(&ent->head)) {
- spin_unlock(&ent->lock);
+ spin_unlock_irq(&ent->lock);
return;
}
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
list_del(&mr->list);
ent->cur--;
ent->size--;
- spin_unlock(&ent->lock);
+ spin_unlock_irq(&ent->lock);
err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
if (err)
mlx5_ib_warn(dev, "failed destroy mkey\n");
@@ -162,9 +217,13 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
return -EINVAL;
if (var > ent->size) {
- err = add_keys(dev, c, var - ent->size);
- if (err)
- return err;
+ do {
+ err = add_keys(dev, c, var - ent->size);
+ if (err && err != -EAGAIN)
+ return err;
+
+ usleep_range(3000, 5000);
+ } while (err);
} else if (var < ent->size) {
remove_keys(dev, c, ent->size - var);
}
@@ -280,23 +339,37 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
struct mlx5_ib_dev *dev = ent->dev;
struct mlx5_mr_cache *cache = &dev->cache;
int i = order2idx(dev, ent->order);
+ int err;
if (cache->stopped)
return;
ent = &dev->cache.ent[i];
- if (ent->cur < 2 * ent->limit) {
- add_keys(dev, i, 1);
- if (ent->cur < 2 * ent->limit)
- queue_work(cache->wq, &ent->work);
+ if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
+ err = add_keys(dev, i, 1);
+ if (ent->cur < 2 * ent->limit) {
+ if (err == -EAGAIN) {
+ mlx5_ib_dbg(dev, "returned eagain, order %d\n",
+ i + 2);
+ queue_delayed_work(cache->wq, &ent->dwork,
+ msecs_to_jiffies(3));
+ } else if (err) {
+ mlx5_ib_warn(dev, "command failed order %d, err %d\n",
+ i + 2, err);
+ queue_delayed_work(cache->wq, &ent->dwork,
+ msecs_to_jiffies(1000));
+ } else {
+ queue_work(cache->wq, &ent->work);
+ }
+ }
} else if (ent->cur > 2 * ent->limit) {
if (!someone_adding(cache) &&
- time_after(jiffies, cache->last_add + 60 * HZ)) {
+ time_after(jiffies, cache->last_add + 300 * HZ)) {
remove_keys(dev, i, 1);
if (ent->cur > ent->limit)
queue_work(cache->wq, &ent->work);
} else {
- queue_delayed_work(cache->wq, &ent->dwork, 60 * HZ);
+ queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
}
}
}
@@ -336,18 +409,18 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
- spin_lock(&ent->lock);
+ spin_lock_irq(&ent->lock);
if (!list_empty(&ent->head)) {
mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
list);
list_del(&mr->list);
ent->cur--;
- spin_unlock(&ent->lock);
+ spin_unlock_irq(&ent->lock);
if (ent->cur < ent->limit)
queue_work(cache->wq, &ent->work);
break;
}
- spin_unlock(&ent->lock);
+ spin_unlock_irq(&ent->lock);
queue_work(cache->wq, &ent->work);
@@ -374,12 +447,12 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
return;
}
ent = &cache->ent[c];
- spin_lock(&ent->lock);
+ spin_lock_irq(&ent->lock);
list_add_tail(&mr->list, &ent->head);
ent->cur++;
if (ent->cur > 2 * ent->limit)
shrink = 1;
- spin_unlock(&ent->lock);
+ spin_unlock_irq(&ent->lock);
if (shrink)
queue_work(cache->wq, &ent->work);
@@ -394,16 +467,16 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
cancel_delayed_work(&ent->dwork);
while (1) {
- spin_lock(&ent->lock);
+ spin_lock_irq(&ent->lock);
if (list_empty(&ent->head)) {
- spin_unlock(&ent->lock);
+ spin_unlock_irq(&ent->lock);
return;
}
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
list_del(&mr->list);
ent->cur--;
ent->size--;
- spin_unlock(&ent->lock);
+ spin_unlock_irq(&ent->lock);
err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
if (err)
mlx5_ib_warn(dev, "failed destroy mkey\n");
@@ -464,12 +537,18 @@ static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
debugfs_remove_recursive(dev->cache.root);
}
+static void delay_time_func(unsigned long ctx)
+{
+ struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
+
+ dev->fill_delay = 0;
+}
+
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
{
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent;
int limit;
- int size;
int err;
int i;
@@ -479,6 +558,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
return -ENOMEM;
}
+ setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
INIT_LIST_HEAD(&cache->ent[i].head);
spin_lock_init(&cache->ent[i].lock);
@@ -489,13 +569,11 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->order = i + 2;
ent->dev = dev;
- if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE) {
- size = dev->mdev.profile->mr_cache[i].size;
+ if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE)
limit = dev->mdev.profile->mr_cache[i].limit;
- } else {
- size = DEF_CACHE_SIZE;
+ else
limit = 0;
- }
+
INIT_WORK(&ent->work, cache_work_func);
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
ent->limit = limit;
@@ -522,6 +600,7 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
clean_keys(dev, i);
destroy_workqueue(dev->cache.wq);
+ del_timer_sync(&dev->delay_timer);
return 0;
}
@@ -551,7 +630,8 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
seg->start_addr = 0;
- err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in));
+ err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL,
+ NULL);
if (err)
goto err_in;
@@ -660,14 +740,14 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
int err;
int i;
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 1; i++) {
mr = alloc_cached_mr(dev, order);
if (mr)
break;
err = add_keys(dev, order2idx(dev, order), 1);
- if (err) {
- mlx5_ib_warn(dev, "add_keys failed\n");
+ if (err && err != -EAGAIN) {
+ mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
break;
}
}
@@ -759,8 +839,10 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
in->seg.log2_page_size = page_shift;
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
- err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen);
+ in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
+ 1 << page_shift));
+ err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen, NULL,
+ NULL, NULL);
if (err) {
mlx5_ib_warn(dev, "create mkey failed\n");
goto err_2;
@@ -944,7 +1026,8 @@ struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
* TBD not needed - issue 197292 */
in->seg.log2_page_size = PAGE_SHIFT;
- err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in));
+ err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in), NULL,
+ NULL, NULL);
kfree(in);
if (err)
goto err_free;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 5659ea880741..7c6b4ba49bec 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -551,7 +551,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
(*in)->ctx.log_pg_sz_remote_qpn =
- cpu_to_be32((page_shift - PAGE_SHIFT) << 24);
+ cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
(*in)->ctx.params2 = cpu_to_be32(offset << 6);
(*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
@@ -648,7 +648,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
goto err_buf;
}
(*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
- (*in)->ctx.log_pg_sz_remote_qpn = cpu_to_be32((qp->buf.page_shift - PAGE_SHIFT) << 24);
+ (*in)->ctx.log_pg_sz_remote_qpn =
+ cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
/* Set "fast registration enabled" for all kernel QPs */
(*in)->ctx.params1 |= cpu_to_be32(1 << 11);
(*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
@@ -1317,9 +1318,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_RNR_TIMEOUT |
- MLX5_QP_OPTPAR_PM_STATE,
+ MLX5_QP_OPTPAR_PM_STATE |
+ MLX5_QP_OPTPAR_ALT_ADDR_PATH,
[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
- MLX5_QP_OPTPAR_PM_STATE,
+ MLX5_QP_OPTPAR_PM_STATE |
+ MLX5_QP_OPTPAR_ALT_ADDR_PATH,
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
MLX5_QP_OPTPAR_SRQN |
MLX5_QP_OPTPAR_CQN_RCV,
@@ -1550,7 +1553,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
mlx5_cur = to_mlx5_state(cur_state);
mlx5_new = to_mlx5_state(new_state);
mlx5_st = to_mlx5_st(ibqp->qp_type);
- if (mlx5_cur < 0 || mlx5_new < 0 || mlx5_st < 0)
+ if (mlx5_st < 0)
goto out;
optpar = ib_mask_to_mlx5_opt(attr_mask);
@@ -1744,6 +1747,7 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
MLX5_MKEY_MASK_PD |
MLX5_MKEY_MASK_LR |
MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_KEY |
MLX5_MKEY_MASK_RR |
MLX5_MKEY_MASK_RW |
MLX5_MKEY_MASK_A |
@@ -1800,7 +1804,8 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *w
seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
seg->len = cpu_to_be64(wr->wr.fast_reg.length);
seg->log2_page_size = wr->wr.fast_reg.page_shift;
- seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+ seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
+ mlx5_mkey_variant(wr->wr.fast_reg.rkey));
}
static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
@@ -1913,6 +1918,10 @@ static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size,
if (unlikely((*seg == qp->sq.qend)))
*seg = mlx5_get_send_wqe(qp, 0);
if (!li) {
+ if (unlikely(wr->wr.fast_reg.page_list_len >
+ wr->wr.fast_reg.page_list->max_page_list_len))
+ return -ENOMEM;
+
set_frwr_pages(*seg, wr, mdev, pd, writ);
*seg += sizeof(struct mlx5_wqe_data_seg);
*size += (sizeof(struct mlx5_wqe_data_seg) / 16);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 0aa478bc291a..210b3eaf188a 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -123,7 +123,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
goto err_in;
}
- (*in)->ctx.log_pg_sz = page_shift - PAGE_SHIFT;
+ (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
(*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
return 0;
@@ -192,7 +192,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
}
srq->wq_sig = !!srq_signature;
- (*in)->ctx.log_pg_sz = page_shift - PAGE_SHIFT;
+ (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
return 0;
@@ -390,9 +390,7 @@ int mlx5_ib_destroy_srq(struct ib_srq *srq)
mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
ib_umem_release(msrq->umem);
} else {
- kfree(msrq->wrid);
- mlx5_buf_free(&dev->mdev, &msrq->buf);
- mlx5_db_free(&dev->mdev, &msrq->db);
+ destroy_srq_kernel(dev, msrq);
}
kfree(srq);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index adc11d14f878..294dd27b601e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -122,6 +122,32 @@ struct mqe_ctx {
bool cmd_done;
};
+struct ocrdma_hw_mr {
+ u32 lkey;
+ u8 fr_mr;
+ u8 remote_atomic;
+ u8 remote_rd;
+ u8 remote_wr;
+ u8 local_rd;
+ u8 local_wr;
+ u8 mw_bind;
+ u8 rsvd;
+ u64 len;
+ struct ocrdma_pbl *pbl_table;
+ u32 num_pbls;
+ u32 num_pbes;
+ u32 pbl_size;
+ u32 pbe_size;
+ u64 fbo;
+ u64 va;
+};
+
+struct ocrdma_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+ struct ocrdma_hw_mr hwmr;
+};
+
struct ocrdma_dev {
struct ib_device ibdev;
struct ocrdma_dev_attr attr;
@@ -169,7 +195,7 @@ struct ocrdma_dev {
struct list_head entry;
struct rcu_head rcu;
int id;
- u64 stag_arr[OCRDMA_MAX_STAG];
+ struct ocrdma_mr *stag_arr[OCRDMA_MAX_STAG];
u16 pvid;
};
@@ -294,31 +320,6 @@ struct ocrdma_qp {
u16 db_cache;
};
-struct ocrdma_hw_mr {
- u32 lkey;
- u8 fr_mr;
- u8 remote_atomic;
- u8 remote_rd;
- u8 remote_wr;
- u8 local_rd;
- u8 local_wr;
- u8 mw_bind;
- u8 rsvd;
- u64 len;
- struct ocrdma_pbl *pbl_table;
- u32 num_pbls;
- u32 num_pbes;
- u32 pbl_size;
- u32 pbe_size;
- u64 fbo;
- u64 va;
-};
-
-struct ocrdma_mr {
- struct ib_mr ibmr;
- struct ib_umem *umem;
- struct ocrdma_hw_mr hwmr;
-};
struct ocrdma_ucontext {
struct ib_ucontext ibucontext;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 50219ab2279d..56bf32fcb62c 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1783,7 +1783,7 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
u32 max_sges = attrs->cap.max_send_sge;
/* QP1 may exceed 127 */
- max_wqe_allocated = min_t(int, attrs->cap.max_send_wr + 1,
+ max_wqe_allocated = min_t(u32, attrs->cap.max_send_wr + 1,
dev->attr.max_wqe);
status = ocrdma_build_q_conf(&max_wqe_allocated,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 0ce7674621ea..91443bcb9e0e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -452,9 +452,6 @@ static void ocrdma_remove_free(struct rcu_head *rcu)
{
struct ocrdma_dev *dev = container_of(rcu, struct ocrdma_dev, rcu);
- ocrdma_free_resources(dev);
- ocrdma_cleanup_hw(dev);
-
idr_remove(&ocrdma_dev_id, dev->id);
kfree(dev->mbx_cmd);
ib_dealloc_device(&dev->ibdev);
@@ -470,6 +467,10 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
spin_lock(&ocrdma_devlist_lock);
list_del_rcu(&dev->entry);
spin_unlock(&ocrdma_devlist_lock);
+
+ ocrdma_free_resources(dev);
+ ocrdma_cleanup_hw(dev);
+
call_rcu(&dev->rcu, ocrdma_remove_free);
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 69f1d1221a6b..7686dceadd29 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1981,9 +1981,7 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
- if ((wr->wr.fast_reg.page_list_len >
- qp->dev->attr.max_pages_per_frmr) ||
- (wr->wr.fast_reg.length > 0xffffffffULL))
+ if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr)
return -EINVAL;
hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
@@ -2839,7 +2837,7 @@ struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
goto mbx_err;
mr->ibmr.rkey = mr->hwmr.lkey;
mr->ibmr.lkey = mr->hwmr.lkey;
- dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = (unsigned long) mr;
+ dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = mr;
return &mr->ibmr;
mbx_err:
ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 016e7429adf6..5bfc02f450e6 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -6190,21 +6190,20 @@ static int setup_txselect(const char *str, struct kernel_param *kp)
{
struct qib_devdata *dd;
unsigned long val;
- int ret;
-
+ char *n;
if (strlen(str) >= MAX_ATTEN_LEN) {
pr_info("txselect_values string too long\n");
return -ENOSPC;
}
- ret = kstrtoul(str, 0, &val);
- if (ret || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
+ val = simple_strtoul(str, &n, 0);
+ if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
TXDDS_MFG_SZ)) {
pr_info("txselect_values must start with a number < %d\n",
TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
- return ret ? ret : -EINVAL;
+ return -EINVAL;
}
-
strcpy(txselect_list, str);
+
list_for_each_entry(dd, &qib_dev_list, list)
if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
set_no_qsfp_atten(dd, 1);
diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h
index 28874f8606f8..941d4d50d8e7 100644
--- a/drivers/infiniband/hw/qib/qib_mad.h
+++ b/drivers/infiniband/hw/qib/qib_mad.h
@@ -54,7 +54,7 @@ struct ib_node_info {
__be32 revision;
u8 local_port_num;
u8 vendor_id[3];
-} __attribute__ ((packed));
+} __packed;
struct ib_mad_notice_attr {
u8 generic_type;
@@ -73,7 +73,7 @@ struct ib_mad_notice_attr {
__be16 reserved;
__be16 lid; /* where violation happened */
u8 port_num; /* where violation happened */
- } __attribute__ ((packed)) ntc_129_131;
+ } __packed ntc_129_131;
struct {
__be16 reserved;
@@ -83,14 +83,14 @@ struct ib_mad_notice_attr {
__be32 new_cap_mask; /* new capability mask */
u8 reserved3;
u8 change_flags; /* low 3 bits only */
- } __attribute__ ((packed)) ntc_144;
+ } __packed ntc_144;
struct {
__be16 reserved;
__be16 lid; /* lid where sys guid changed */
__be16 reserved2;
__be64 new_sys_guid;
- } __attribute__ ((packed)) ntc_145;
+ } __packed ntc_145;
struct {
__be16 reserved;
@@ -104,7 +104,7 @@ struct ib_mad_notice_attr {
u8 reserved3;
u8 dr_trunc_hop;
u8 dr_rtn_path[30];
- } __attribute__ ((packed)) ntc_256;
+ } __packed ntc_256;
struct {
__be16 reserved;
@@ -115,7 +115,7 @@ struct ib_mad_notice_attr {
__be32 qp2; /* high 8 bits reserved */
union ib_gid gid1;
union ib_gid gid2;
- } __attribute__ ((packed)) ntc_257_258;
+ } __packed ntc_257_258;
} details;
};
@@ -209,7 +209,7 @@ struct ib_pma_portcounters_cong {
__be64 port_rcv_packets;
__be64 port_xmit_wait;
__be64 port_adr_events;
-} __attribute__ ((packed));
+} __packed;
#define IB_PMA_CONG_HW_CONTROL_TIMER 0x00
#define IB_PMA_CONG_HW_CONTROL_SAMPLE 0x01
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 3f14009fb662..c8d9c4ab142b 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -51,8 +51,8 @@
* file calls, even though this violates some
* expectations of harmlessness.
*/
-static int qib_tune_pcie_caps(struct qib_devdata *);
-static int qib_tune_pcie_coalesce(struct qib_devdata *);
+static void qib_tune_pcie_caps(struct qib_devdata *);
+static void qib_tune_pcie_coalesce(struct qib_devdata *);
/*
* Do all the common PCIe setup and initialization.
@@ -476,30 +476,6 @@ void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
"pci_enable_device failed after reset: %d\n", r);
}
-/* code to adjust PCIe capabilities. */
-
-static int fld2val(int wd, int mask)
-{
- int lsbmask;
-
- if (!mask)
- return 0;
- wd &= mask;
- lsbmask = mask ^ (mask & (mask - 1));
- wd /= lsbmask;
- return wd;
-}
-
-static int val2fld(int wd, int mask)
-{
- int lsbmask;
-
- if (!mask)
- return 0;
- lsbmask = mask ^ (mask & (mask - 1));
- wd *= lsbmask;
- return wd;
-}
static int qib_pcie_coalesce;
module_param_named(pcie_coalesce, qib_pcie_coalesce, int, S_IRUGO);
@@ -511,7 +487,7 @@ MODULE_PARM_DESC(pcie_coalesce, "tune PCIe colescing on some Intel chipsets");
* of these chipsets, with some BIOS settings, and enabling it on those
* systems may result in the system crashing, and/or data corruption.
*/
-static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
+static void qib_tune_pcie_coalesce(struct qib_devdata *dd)
{
int r;
struct pci_dev *parent;
@@ -519,18 +495,18 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
u32 mask, bits, val;
if (!qib_pcie_coalesce)
- return 0;
+ return;
/* Find out supported and configured values for parent (root) */
parent = dd->pcidev->bus->self;
if (parent->bus->parent) {
qib_devinfo(dd->pcidev, "Parent not root\n");
- return 1;
+ return;
}
if (!pci_is_pcie(parent))
- return 1;
+ return;
if (parent->vendor != 0x8086)
- return 1;
+ return;
/*
* - bit 12: Max_rdcmp_Imt_EN: need to set to 1
@@ -563,13 +539,12 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
mask = (3U << 24) | (7U << 10);
} else {
/* not one of the chipsets that we know about */
- return 1;
+ return;
}
pci_read_config_dword(parent, 0x48, &val);
val &= ~mask;
val |= bits;
r = pci_write_config_dword(parent, 0x48, val);
- return 0;
}
/*
@@ -580,55 +555,44 @@ static int qib_pcie_caps;
module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
-static int qib_tune_pcie_caps(struct qib_devdata *dd)
+static void qib_tune_pcie_caps(struct qib_devdata *dd)
{
- int ret = 1; /* Assume the worst */
struct pci_dev *parent;
- u16 pcaps, pctl, ecaps, ectl;
- int rc_sup, ep_sup;
- int rc_cur, ep_cur;
+ u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
+ u16 rc_mrrs, ep_mrrs, max_mrrs;
/* Find out supported and configured values for parent (root) */
parent = dd->pcidev->bus->self;
- if (parent->bus->parent) {
+ if (!pci_is_root_bus(parent->bus)) {
qib_devinfo(dd->pcidev, "Parent not root\n");
- goto bail;
+ return;
}
if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev))
- goto bail;
- pcie_capability_read_word(parent, PCI_EXP_DEVCAP, &pcaps);
- pcie_capability_read_word(parent, PCI_EXP_DEVCTL, &pctl);
+ return;
+
+ rc_mpss = parent->pcie_mpss;
+ rc_mps = ffs(pcie_get_mps(parent)) - 8;
/* Find out supported and configured values for endpoint (us) */
- pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCAP, &ecaps);
- pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl);
+ ep_mpss = dd->pcidev->pcie_mpss;
+ ep_mps = ffs(pcie_get_mps(dd->pcidev)) - 8;
- ret = 0;
/* Find max payload supported by root, endpoint */
- rc_sup = fld2val(pcaps, PCI_EXP_DEVCAP_PAYLOAD);
- ep_sup = fld2val(ecaps, PCI_EXP_DEVCAP_PAYLOAD);
- if (rc_sup > ep_sup)
- rc_sup = ep_sup;
-
- rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_PAYLOAD);
- ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_PAYLOAD);
+ if (rc_mpss > ep_mpss)
+ rc_mpss = ep_mpss;
/* If Supported greater than limit in module param, limit it */
- if (rc_sup > (qib_pcie_caps & 7))
- rc_sup = qib_pcie_caps & 7;
+ if (rc_mpss > (qib_pcie_caps & 7))
+ rc_mpss = qib_pcie_caps & 7;
/* If less than (allowed, supported), bump root payload */
- if (rc_sup > rc_cur) {
- rc_cur = rc_sup;
- pctl = (pctl & ~PCI_EXP_DEVCTL_PAYLOAD) |
- val2fld(rc_cur, PCI_EXP_DEVCTL_PAYLOAD);
- pcie_capability_write_word(parent, PCI_EXP_DEVCTL, pctl);
+ if (rc_mpss > rc_mps) {
+ rc_mps = rc_mpss;
+ pcie_set_mps(parent, 128 << rc_mps);
}
/* If less than (allowed, supported), bump endpoint payload */
- if (rc_sup > ep_cur) {
- ep_cur = rc_sup;
- ectl = (ectl & ~PCI_EXP_DEVCTL_PAYLOAD) |
- val2fld(ep_cur, PCI_EXP_DEVCTL_PAYLOAD);
- pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl);
+ if (rc_mpss > ep_mps) {
+ ep_mps = rc_mpss;
+ pcie_set_mps(dd->pcidev, 128 << ep_mps);
}
/*
@@ -636,26 +600,22 @@ static int qib_tune_pcie_caps(struct qib_devdata *dd)
* No field for max supported, but PCIe spec limits it to 4096,
* which is code '5' (log2(4096) - 7)
*/
- rc_sup = 5;
- if (rc_sup > ((qib_pcie_caps >> 4) & 7))
- rc_sup = (qib_pcie_caps >> 4) & 7;
- rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_READRQ);
- ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_READRQ);
-
- if (rc_sup > rc_cur) {
- rc_cur = rc_sup;
- pctl = (pctl & ~PCI_EXP_DEVCTL_READRQ) |
- val2fld(rc_cur, PCI_EXP_DEVCTL_READRQ);
- pcie_capability_write_word(parent, PCI_EXP_DEVCTL, pctl);
+ max_mrrs = 5;
+ if (max_mrrs > ((qib_pcie_caps >> 4) & 7))
+ max_mrrs = (qib_pcie_caps >> 4) & 7;
+
+ max_mrrs = 128 << max_mrrs;
+ rc_mrrs = pcie_get_readrq(parent);
+ ep_mrrs = pcie_get_readrq(dd->pcidev);
+
+ if (max_mrrs > rc_mrrs) {
+ rc_mrrs = max_mrrs;
+ pcie_set_readrq(parent, rc_mrrs);
}
- if (rc_sup > ep_cur) {
- ep_cur = rc_sup;
- ectl = (ectl & ~PCI_EXP_DEVCTL_READRQ) |
- val2fld(ep_cur, PCI_EXP_DEVCTL_READRQ);
- pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl);
+ if (max_mrrs > ep_mrrs) {
+ ep_mrrs = max_mrrs;
+ pcie_set_readrq(dd->pcidev, ep_mrrs);
}
-bail:
- return ret;
}
/* End of PCIe capability tuning */
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index d0a0ea0c14d6..165aee2ca8a0 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -594,8 +594,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
else
j = npages;
- ret = get_user_pages(current, current->mm, addr,
- j, 0, 1, pages, NULL);
+ ret = get_user_pages_fast(addr, j, 0, pages);
if (ret != j) {
i = 0;
j = ret;
@@ -1294,11 +1293,8 @@ int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
int mxp = 8;
int ndesc = 0;
- down_write(&current->mm->mmap_sem);
ret = qib_user_sdma_queue_pkts(dd, ppd, pq,
iov, dim, &list, &mxp, &ndesc);
- up_write(&current->mm->mmap_sem);
-
if (ret < 0)
goto done_unlock;
else {
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 012e2c7575ad..a01c7d2cf541 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -150,14 +150,14 @@ struct ib_reth {
__be64 vaddr;
__be32 rkey;
__be32 length;
-} __attribute__ ((packed));
+} __packed;
struct ib_atomic_eth {
__be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
__be32 rkey;
__be64 swap_data;
__be64 compare_data;
-} __attribute__ ((packed));
+} __packed;
struct qib_other_headers {
__be32 bth[3];
@@ -178,7 +178,7 @@ struct qib_other_headers {
__be32 aeth;
struct ib_atomic_eth atomic_eth;
} u;
-} __attribute__ ((packed));
+} __packed;
/*
* Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
@@ -195,12 +195,12 @@ struct qib_ib_header {
} l;
struct qib_other_headers oth;
} u;
-} __attribute__ ((packed));
+} __packed;
struct qib_pio_header {
__le32 pbc[2];
struct qib_ib_header hdr;
-} __attribute__ ((packed));
+} __packed;
/*
* There is one struct qib_mcast for each multicast GID.
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index eb71aaa26a9a..c639f90cfda4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -101,6 +101,7 @@ enum {
IPOIB_MCAST_FLAG_SENDONLY = 1,
IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
IPOIB_MCAST_FLAG_ATTACHED = 3,
+ IPOIB_MCAST_JOIN_STARTED = 4,
MAX_SEND_CQE = 16,
IPOIB_CM_COPYBREAK = 256,
@@ -151,6 +152,7 @@ struct ipoib_mcast {
struct sk_buff_head pkt_queue;
struct net_device *dev;
+ struct completion done;
};
struct ipoib_rx_buf {
@@ -299,7 +301,7 @@ struct ipoib_dev_priv {
unsigned long flags;
- struct mutex vlan_mutex;
+ struct rw_semaphore vlan_rwsem;
struct rb_root path_tree;
struct list_head path_list;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 7a3175400b2a..1377f85911c2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -140,7 +140,8 @@ static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
struct ipoib_cm_rx_buf *rx_ring,
int id, int frags,
- u64 mapping[IPOIB_CM_RX_SG])
+ u64 mapping[IPOIB_CM_RX_SG],
+ gfp_t gfp)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
@@ -164,7 +165,7 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
}
for (i = 0; i < frags; i++) {
- struct page *page = alloc_page(GFP_ATOMIC);
+ struct page *page = alloc_page(gfp);
if (!page)
goto partial_error;
@@ -382,7 +383,8 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
for (i = 0; i < ipoib_recvq_size; ++i) {
if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
- rx->rx_ring[i].mapping)) {
+ rx->rx_ring[i].mapping,
+ GFP_KERNEL)) {
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
ret = -ENOMEM;
goto err_count;
@@ -639,7 +641,8 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
(unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
- newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, mapping);
+ newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags,
+ mapping, GFP_ATOMIC);
if (unlikely(!newskb)) {
/*
* If we can't allocate a new RX buffer, dump
@@ -1556,7 +1559,8 @@ int ipoib_cm_dev_init(struct net_device *dev)
for (i = 0; i < ipoib_recvq_size; ++i) {
if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
priv->cm.num_frags - 1,
- priv->cm.srq_ring[i].mapping)) {
+ priv->cm.srq_ring[i].mapping,
+ GFP_KERNEL)) {
ipoib_warn(priv, "failed to allocate "
"receive buffer %d\n", i);
ipoib_cm_dev_cleanup(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 196b1d13cbcb..6a7003ddb0be 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -685,15 +685,13 @@ int ipoib_ib_dev_open(struct net_device *dev)
ret = ipoib_ib_post_receives(dev);
if (ret) {
ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
- ipoib_ib_dev_stop(dev, 1);
- return -1;
+ goto dev_stop;
}
ret = ipoib_cm_dev_open(dev);
if (ret) {
ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
- ipoib_ib_dev_stop(dev, 1);
- return -1;
+ goto dev_stop;
}
clear_bit(IPOIB_STOP_REAPER, &priv->flags);
@@ -704,6 +702,11 @@ int ipoib_ib_dev_open(struct net_device *dev)
napi_enable(&priv->napi);
return 0;
+dev_stop:
+ if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+ napi_enable(&priv->napi);
+ ipoib_ib_dev_stop(dev, 1);
+ return -1;
}
static void ipoib_pkey_dev_check_presence(struct net_device *dev)
@@ -746,10 +749,8 @@ int ipoib_ib_dev_down(struct net_device *dev, int flush)
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
mutex_lock(&pkey_mutex);
set_bit(IPOIB_PKEY_STOP, &priv->flags);
- cancel_delayed_work(&priv->pkey_poll_task);
+ cancel_delayed_work_sync(&priv->pkey_poll_task);
mutex_unlock(&pkey_mutex);
- if (flush)
- flush_workqueue(ipoib_workqueue);
}
ipoib_mcast_stop_thread(dev, flush);
@@ -974,7 +975,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
u16 new_index;
int result;
- mutex_lock(&priv->vlan_mutex);
+ down_read(&priv->vlan_rwsem);
/*
* Flush any child interfaces too -- they might be up even if
@@ -983,7 +984,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
list_for_each_entry(cpriv, &priv->child_intfs, list)
__ipoib_ib_dev_flush(cpriv, level);
- mutex_unlock(&priv->vlan_mutex);
+ up_read(&priv->vlan_rwsem);
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
/* for non-child devices must check/update the pkey value here */
@@ -1081,6 +1082,11 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
struct ipoib_dev_priv *priv = netdev_priv(dev);
ipoib_dbg(priv, "cleaning up ib_dev\n");
+ /*
+ * We must make sure there are no more (path) completions
+ * that may wish to touch priv fields that are no longer valid
+ */
+ ipoib_flush_paths(dev);
ipoib_mcast_stop_thread(dev, 1);
ipoib_mcast_dev_flush(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 82cec1af902c..d64ed05fb082 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -119,7 +119,7 @@ int ipoib_open(struct net_device *dev)
struct ipoib_dev_priv *cpriv;
/* Bring up any child interfaces too */
- mutex_lock(&priv->vlan_mutex);
+ down_read(&priv->vlan_rwsem);
list_for_each_entry(cpriv, &priv->child_intfs, list) {
int flags;
@@ -129,7 +129,7 @@ int ipoib_open(struct net_device *dev)
dev_change_flags(cpriv->dev, flags | IFF_UP);
}
- mutex_unlock(&priv->vlan_mutex);
+ up_read(&priv->vlan_rwsem);
}
netif_start_queue(dev);
@@ -162,7 +162,7 @@ static int ipoib_stop(struct net_device *dev)
struct ipoib_dev_priv *cpriv;
/* Bring down any child interfaces too */
- mutex_lock(&priv->vlan_mutex);
+ down_read(&priv->vlan_rwsem);
list_for_each_entry(cpriv, &priv->child_intfs, list) {
int flags;
@@ -172,7 +172,7 @@ static int ipoib_stop(struct net_device *dev)
dev_change_flags(cpriv->dev, flags & ~IFF_UP);
}
- mutex_unlock(&priv->vlan_mutex);
+ up_read(&priv->vlan_rwsem);
}
return 0;
@@ -1350,7 +1350,7 @@ void ipoib_setup(struct net_device *dev)
ipoib_set_ethtool_ops(dev);
- netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
+ netif_napi_add(dev, &priv->napi, ipoib_poll, NAPI_POLL_WEIGHT);
dev->watchdog_timeo = HZ;
@@ -1372,7 +1372,7 @@ void ipoib_setup(struct net_device *dev)
spin_lock_init(&priv->lock);
- mutex_init(&priv->vlan_mutex);
+ init_rwsem(&priv->vlan_rwsem);
INIT_LIST_HEAD(&priv->path_list);
INIT_LIST_HEAD(&priv->child_intfs);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index cecb98a4c662..d4e005720d01 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -386,8 +386,10 @@ static int ipoib_mcast_join_complete(int status,
mcast->mcmember.mgid.raw, status);
/* We trap for port events ourselves. */
- if (status == -ENETRESET)
- return 0;
+ if (status == -ENETRESET) {
+ status = 0;
+ goto out;
+ }
if (!status)
status = ipoib_mcast_join_finish(mcast, &multicast->rec);
@@ -407,7 +409,8 @@ static int ipoib_mcast_join_complete(int status,
if (mcast == priv->broadcast)
queue_work(ipoib_workqueue, &priv->carrier_on_task);
- return 0;
+ status = 0;
+ goto out;
}
if (mcast->logcount++ < 20) {
@@ -434,7 +437,8 @@ static int ipoib_mcast_join_complete(int status,
mcast->backoff * HZ);
spin_unlock_irq(&priv->lock);
mutex_unlock(&mcast_mutex);
-
+out:
+ complete(&mcast->done);
return status;
}
@@ -484,11 +488,15 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
}
set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ init_completion(&mcast->done);
+ set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags);
+
mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
&rec, comp_mask, GFP_KERNEL,
ipoib_mcast_join_complete, mcast);
if (IS_ERR(mcast->mc)) {
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ complete(&mcast->done);
ret = PTR_ERR(mcast->mc);
ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
@@ -510,10 +518,18 @@ void ipoib_mcast_join_task(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, mcast_task.work);
struct net_device *dev = priv->dev;
+ struct ib_port_attr port_attr;
if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
return;
+ if (ib_query_port(priv->ca, priv->port, &port_attr) ||
+ port_attr.state != IB_PORT_ACTIVE) {
+ ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n",
+ port_attr.state);
+ return;
+ }
+
if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
ipoib_warn(priv, "ib_query_gid() failed\n");
else
@@ -751,6 +767,11 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
+ /* seperate between the wait to the leave*/
+ list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
+ if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags))
+ wait_for_completion(&mcast->done);
+
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
ipoib_mcast_leave(dev, mcast);
ipoib_mcast_free(mcast);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index f81abe16cf09..c29b5c838833 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -142,10 +142,10 @@ static void ipoib_unregister_child_dev(struct net_device *dev, struct list_head
priv = netdev_priv(dev);
ppriv = netdev_priv(priv->parent);
- mutex_lock(&ppriv->vlan_mutex);
+ down_write(&ppriv->vlan_rwsem);
unregister_netdevice_queue(dev, head);
list_del(&priv->list);
- mutex_unlock(&ppriv->vlan_mutex);
+ up_write(&ppriv->vlan_rwsem);
}
static size_t ipoib_get_size(const struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 8292554bccb5..9fad7b5ac8b9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -140,7 +140,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
if (!rtnl_trylock())
return restart_syscall();
- mutex_lock(&ppriv->vlan_mutex);
+ down_write(&ppriv->vlan_rwsem);
/*
* First ensure this isn't a duplicate. We check the parent device and
@@ -163,7 +163,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
out:
- mutex_unlock(&ppriv->vlan_mutex);
+ up_write(&ppriv->vlan_rwsem);
if (result)
free_netdev(priv->dev);
@@ -185,7 +185,8 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
if (!rtnl_trylock())
return restart_syscall();
- mutex_lock(&ppriv->vlan_mutex);
+
+ down_write(&ppriv->vlan_rwsem);
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey &&
priv->child_type == IPOIB_LEGACY_CHILD) {
@@ -195,7 +196,8 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
break;
}
}
- mutex_unlock(&ppriv->vlan_mutex);
+ up_write(&ppriv->vlan_rwsem);
+
rtnl_unlock();
if (dev) {
diff --git a/drivers/infiniband/ulp/isert/Kconfig b/drivers/infiniband/ulp/isert/Kconfig
index ce3fd32167dc..02f9759ebb1a 100644
--- a/drivers/infiniband/ulp/isert/Kconfig
+++ b/drivers/infiniband/ulp/isert/Kconfig
@@ -1,5 +1,5 @@
config INFINIBAND_ISERT
- tristate "iSCSI Extentions for RDMA (iSER) target support"
+ tristate "iSCSI Extensions for RDMA (iSER) target support"
depends on INET && INFINIBAND_ADDR_TRANS && TARGET_CORE && ISCSI_TARGET
---help---
- Support for iSCSI Extentions for RDMA (iSER) Target on Infiniband fabrics.
+ Support for iSCSI Extensions for RDMA (iSER) Target on Infiniband fabrics.
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 3591855cc5b5..6df23502059a 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -594,7 +594,7 @@ isert_connect_release(struct isert_conn *isert_conn)
pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
- if (device->use_frwr)
+ if (device && device->use_frwr)
isert_conn_free_frwr_pool(isert_conn);
if (isert_conn->conn_qp) {
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index f93baf8254c4..6aa660d188a3 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -86,6 +86,32 @@ module_param(topspin_workarounds, int, 0444);
MODULE_PARM_DESC(topspin_workarounds,
"Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
+static struct kernel_param_ops srp_tmo_ops;
+
+static int srp_reconnect_delay = 10;
+module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
+
+static int srp_fast_io_fail_tmo = 15;
+module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(fast_io_fail_tmo,
+ "Number of seconds between the observation of a transport"
+ " layer error and failing all I/O. \"off\" means that this"
+ " functionality is disabled.");
+
+static int srp_dev_loss_tmo = 600;
+module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dev_loss_tmo,
+ "Maximum number of seconds that the SRP transport should"
+ " insulate transport layer errors. After this time has been"
+ " exceeded the SCSI host is removed. Should be"
+ " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
+ " if fast_io_fail_tmo has not been set. \"off\" means that"
+ " this functionality is disabled.");
+
static void srp_add_one(struct ib_device *device);
static void srp_remove_one(struct ib_device *device);
static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
@@ -102,6 +128,48 @@ static struct ib_client srp_client = {
static struct ib_sa_client srp_sa_client;
+static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
+{
+ int tmo = *(int *)kp->arg;
+
+ if (tmo >= 0)
+ return sprintf(buffer, "%d", tmo);
+ else
+ return sprintf(buffer, "off");
+}
+
+static int srp_tmo_set(const char *val, const struct kernel_param *kp)
+{
+ int tmo, res;
+
+ if (strncmp(val, "off", 3) != 0) {
+ res = kstrtoint(val, 0, &tmo);
+ if (res)
+ goto out;
+ } else {
+ tmo = -1;
+ }
+ if (kp->arg == &srp_reconnect_delay)
+ res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
+ srp_dev_loss_tmo);
+ else if (kp->arg == &srp_fast_io_fail_tmo)
+ res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
+ else
+ res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
+ tmo);
+ if (res)
+ goto out;
+ *(int *)kp->arg = tmo;
+
+out:
+ return res;
+}
+
+static struct kernel_param_ops srp_tmo_ops = {
+ .get = srp_tmo_get,
+ .set = srp_tmo_set,
+};
+
static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
{
return (struct srp_target_port *) host->hostdata;
@@ -231,16 +299,16 @@ static int srp_create_target_ib(struct srp_target_port *target)
return -ENOMEM;
recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
- srp_recv_completion, NULL, target, SRP_RQ_SIZE,
- target->comp_vector);
+ srp_recv_completion, NULL, target,
+ target->queue_size, target->comp_vector);
if (IS_ERR(recv_cq)) {
ret = PTR_ERR(recv_cq);
goto err;
}
send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
- srp_send_completion, NULL, target, SRP_SQ_SIZE,
- target->comp_vector);
+ srp_send_completion, NULL, target,
+ target->queue_size, target->comp_vector);
if (IS_ERR(send_cq)) {
ret = PTR_ERR(send_cq);
goto err_recv_cq;
@@ -249,8 +317,8 @@ static int srp_create_target_ib(struct srp_target_port *target)
ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
init_attr->event_handler = srp_qp_event;
- init_attr->cap.max_send_wr = SRP_SQ_SIZE;
- init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
+ init_attr->cap.max_send_wr = target->queue_size;
+ init_attr->cap.max_recv_wr = target->queue_size;
init_attr->cap.max_recv_sge = 1;
init_attr->cap.max_send_sge = 1;
init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
@@ -296,6 +364,10 @@ err:
return ret;
}
+/*
+ * Note: this function may be called without srp_alloc_iu_bufs() having been
+ * invoked. Hence the target->[rt]x_ring checks.
+ */
static void srp_free_target_ib(struct srp_target_port *target)
{
int i;
@@ -307,10 +379,18 @@ static void srp_free_target_ib(struct srp_target_port *target)
target->qp = NULL;
target->send_cq = target->recv_cq = NULL;
- for (i = 0; i < SRP_RQ_SIZE; ++i)
- srp_free_iu(target->srp_host, target->rx_ring[i]);
- for (i = 0; i < SRP_SQ_SIZE; ++i)
- srp_free_iu(target->srp_host, target->tx_ring[i]);
+ if (target->rx_ring) {
+ for (i = 0; i < target->queue_size; ++i)
+ srp_free_iu(target->srp_host, target->rx_ring[i]);
+ kfree(target->rx_ring);
+ target->rx_ring = NULL;
+ }
+ if (target->tx_ring) {
+ for (i = 0; i < target->queue_size; ++i)
+ srp_free_iu(target->srp_host, target->tx_ring[i]);
+ kfree(target->tx_ring);
+ target->tx_ring = NULL;
+ }
}
static void srp_path_rec_completion(int status,
@@ -390,7 +470,7 @@ static int srp_send_req(struct srp_target_port *target)
req->param.responder_resources = 4;
req->param.remote_cm_response_timeout = 20;
req->param.local_cm_response_timeout = 20;
- req->param.retry_count = 7;
+ req->param.retry_count = target->tl_retry_count;
req->param.rnr_retry_count = 7;
req->param.max_cm_retries = 15;
@@ -496,7 +576,11 @@ static void srp_free_req_data(struct srp_target_port *target)
struct srp_request *req;
int i;
- for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) {
+ if (!target->req_ring)
+ return;
+
+ for (i = 0; i < target->req_ring_size; ++i) {
+ req = &target->req_ring[i];
kfree(req->fmr_list);
kfree(req->map_page);
if (req->indirect_dma_addr) {
@@ -506,6 +590,50 @@ static void srp_free_req_data(struct srp_target_port *target)
}
kfree(req->indirect_desc);
}
+
+ kfree(target->req_ring);
+ target->req_ring = NULL;
+}
+
+static int srp_alloc_req_data(struct srp_target_port *target)
+{
+ struct srp_device *srp_dev = target->srp_host->srp_dev;
+ struct ib_device *ibdev = srp_dev->dev;
+ struct srp_request *req;
+ dma_addr_t dma_addr;
+ int i, ret = -ENOMEM;
+
+ INIT_LIST_HEAD(&target->free_reqs);
+
+ target->req_ring = kzalloc(target->req_ring_size *
+ sizeof(*target->req_ring), GFP_KERNEL);
+ if (!target->req_ring)
+ goto out;
+
+ for (i = 0; i < target->req_ring_size; ++i) {
+ req = &target->req_ring[i];
+ req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
+ GFP_KERNEL);
+ req->map_page = kmalloc(SRP_FMR_SIZE * sizeof(void *),
+ GFP_KERNEL);
+ req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
+ if (!req->fmr_list || !req->map_page || !req->indirect_desc)
+ goto out;
+
+ dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
+ target->indirect_size,
+ DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ibdev, dma_addr))
+ goto out;
+
+ req->indirect_dma_addr = dma_addr;
+ req->index = i;
+ list_add_tail(&req->list, &target->free_reqs);
+ }
+ ret = 0;
+
+out:
+ return ret;
}
/**
@@ -528,11 +656,14 @@ static void srp_remove_target(struct srp_target_port *target)
WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
srp_del_scsi_host_attr(target->scsi_host);
+ srp_rport_get(target->rport);
srp_remove_host(target->scsi_host);
scsi_remove_host(target->scsi_host);
srp_disconnect_target(target);
ib_destroy_cm_id(target->cm_id);
srp_free_target_ib(target);
+ cancel_work_sync(&target->tl_err_work);
+ srp_rport_put(target->rport);
srp_free_req_data(target);
scsi_host_put(target->scsi_host);
}
@@ -686,23 +817,42 @@ static void srp_free_req(struct srp_target_port *target,
spin_unlock_irqrestore(&target->lock, flags);
}
-static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
+static void srp_finish_req(struct srp_target_port *target,
+ struct srp_request *req, int result)
{
struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
if (scmnd) {
srp_free_req(target, req, scmnd, 0);
- scmnd->result = DID_RESET << 16;
+ scmnd->result = result;
scmnd->scsi_done(scmnd);
}
}
-static int srp_reconnect_target(struct srp_target_port *target)
+static void srp_terminate_io(struct srp_rport *rport)
{
- struct Scsi_Host *shost = target->scsi_host;
- int i, ret;
+ struct srp_target_port *target = rport->lld_data;
+ int i;
+
+ for (i = 0; i < target->req_ring_size; ++i) {
+ struct srp_request *req = &target->req_ring[i];
+ srp_finish_req(target, req, DID_TRANSPORT_FAILFAST << 16);
+ }
+}
- scsi_target_block(&shost->shost_gendev);
+/*
+ * It is up to the caller to ensure that srp_rport_reconnect() calls are
+ * serialized and that no concurrent srp_queuecommand(), srp_abort(),
+ * srp_reset_device() or srp_reset_host() calls will occur while this function
+ * is in progress. One way to realize that is not to call this function
+ * directly but to call srp_reconnect_rport() instead since that last function
+ * serializes calls of this function via rport->mutex and also blocks
+ * srp_queuecommand() calls before invoking this function.
+ */
+static int srp_rport_reconnect(struct srp_rport *rport)
+{
+ struct srp_target_port *target = rport->lld_data;
+ int i, ret;
srp_disconnect_target(target);
/*
@@ -721,41 +871,21 @@ static int srp_reconnect_target(struct srp_target_port *target)
else
srp_create_target_ib(target);
- for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
+ for (i = 0; i < target->req_ring_size; ++i) {
struct srp_request *req = &target->req_ring[i];
- if (req->scmnd)
- srp_reset_req(target, req);
+ srp_finish_req(target, req, DID_RESET << 16);
}
INIT_LIST_HEAD(&target->free_tx);
- for (i = 0; i < SRP_SQ_SIZE; ++i)
+ for (i = 0; i < target->queue_size; ++i)
list_add(&target->tx_ring[i]->list, &target->free_tx);
if (ret == 0)
ret = srp_connect_target(target);
- scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING :
- SDEV_TRANSPORT_OFFLINE);
- target->transport_offline = !!ret;
-
- if (ret)
- goto err;
-
- shost_printk(KERN_INFO, target->scsi_host, PFX "reconnect succeeded\n");
-
- return ret;
-
-err:
- shost_printk(KERN_ERR, target->scsi_host,
- PFX "reconnect failed (%d), removing target port.\n", ret);
-
- /*
- * We couldn't reconnect, so kill our target port off.
- * However, we have to defer the real removal because we
- * are in the context of the SCSI error handler now, which
- * will deadlock if we call scsi_remove_host().
- */
- srp_queue_remove_work(target);
+ if (ret == 0)
+ shost_printk(KERN_INFO, target->scsi_host,
+ PFX "reconnect succeeded\n");
return ret;
}
@@ -1302,6 +1432,21 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
PFX "Recv failed with error code %d\n", res);
}
+/**
+ * srp_tl_err_work() - handle a transport layer error
+ *
+ * Note: This function may get invoked before the rport has been created,
+ * hence the target->rport test.
+ */
+static void srp_tl_err_work(struct work_struct *work)
+{
+ struct srp_target_port *target;
+
+ target = container_of(work, struct srp_target_port, tl_err_work);
+ if (target->rport)
+ srp_start_tl_fail_timers(target->rport);
+}
+
static void srp_handle_qp_err(enum ib_wc_status wc_status,
enum ib_wc_opcode wc_opcode,
struct srp_target_port *target)
@@ -1311,6 +1456,7 @@ static void srp_handle_qp_err(enum ib_wc_status wc_status,
PFX "failed %s status %d\n",
wc_opcode & IB_WC_RECV ? "receive" : "send",
wc_status);
+ queue_work(system_long_wq, &target->tl_err_work);
}
target->qp_in_error = true;
}
@@ -1349,17 +1495,29 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(shost);
+ struct srp_rport *rport = target->rport;
struct srp_request *req;
struct srp_iu *iu;
struct srp_cmd *cmd;
struct ib_device *dev;
unsigned long flags;
- int len;
+ int len, result;
+ const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
- if (unlikely(target->transport_offline)) {
- scmnd->result = DID_NO_CONNECT << 16;
+ /*
+ * The SCSI EH thread is the only context from which srp_queuecommand()
+ * can get invoked for blocked devices (SDEV_BLOCK /
+ * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
+ * locking the rport mutex if invoked from inside the SCSI EH.
+ */
+ if (in_scsi_eh)
+ mutex_lock(&rport->mutex);
+
+ result = srp_chkready(target->rport);
+ if (unlikely(result)) {
+ scmnd->result = result;
scmnd->scsi_done(scmnd);
- return 0;
+ goto unlock_rport;
}
spin_lock_irqsave(&target->lock, flags);
@@ -1404,6 +1562,10 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
goto err_unmap;
}
+unlock_rport:
+ if (in_scsi_eh)
+ mutex_unlock(&rport->mutex);
+
return 0;
err_unmap:
@@ -1418,14 +1580,30 @@ err_iu:
err_unlock:
spin_unlock_irqrestore(&target->lock, flags);
+ if (in_scsi_eh)
+ mutex_unlock(&rport->mutex);
+
return SCSI_MLQUEUE_HOST_BUSY;
}
+/*
+ * Note: the resources allocated in this function are freed in
+ * srp_free_target_ib().
+ */
static int srp_alloc_iu_bufs(struct srp_target_port *target)
{
int i;
- for (i = 0; i < SRP_RQ_SIZE; ++i) {
+ target->rx_ring = kzalloc(target->queue_size * sizeof(*target->rx_ring),
+ GFP_KERNEL);
+ if (!target->rx_ring)
+ goto err_no_ring;
+ target->tx_ring = kzalloc(target->queue_size * sizeof(*target->tx_ring),
+ GFP_KERNEL);
+ if (!target->tx_ring)
+ goto err_no_ring;
+
+ for (i = 0; i < target->queue_size; ++i) {
target->rx_ring[i] = srp_alloc_iu(target->srp_host,
target->max_ti_iu_len,
GFP_KERNEL, DMA_FROM_DEVICE);
@@ -1433,7 +1611,7 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
goto err;
}
- for (i = 0; i < SRP_SQ_SIZE; ++i) {
+ for (i = 0; i < target->queue_size; ++i) {
target->tx_ring[i] = srp_alloc_iu(target->srp_host,
target->max_iu_len,
GFP_KERNEL, DMA_TO_DEVICE);
@@ -1446,16 +1624,18 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
return 0;
err:
- for (i = 0; i < SRP_RQ_SIZE; ++i) {
+ for (i = 0; i < target->queue_size; ++i) {
srp_free_iu(target->srp_host, target->rx_ring[i]);
- target->rx_ring[i] = NULL;
- }
-
- for (i = 0; i < SRP_SQ_SIZE; ++i) {
srp_free_iu(target->srp_host, target->tx_ring[i]);
- target->tx_ring[i] = NULL;
}
+
+err_no_ring:
+ kfree(target->tx_ring);
+ target->tx_ring = NULL;
+ kfree(target->rx_ring);
+ target->rx_ring = NULL;
+
return -ENOMEM;
}
@@ -1506,6 +1686,9 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
target->scsi_host->can_queue
= min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
target->scsi_host->can_queue);
+ target->scsi_host->cmd_per_lun
+ = min_t(int, target->scsi_host->can_queue,
+ target->scsi_host->cmd_per_lun);
} else {
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
@@ -1513,7 +1696,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
goto error;
}
- if (!target->rx_ring[0]) {
+ if (!target->rx_ring) {
ret = srp_alloc_iu_bufs(target);
if (ret)
goto error;
@@ -1533,7 +1716,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
if (ret)
goto error_free;
- for (i = 0; i < SRP_RQ_SIZE; i++) {
+ for (i = 0; i < target->queue_size; i++) {
struct srp_iu *iu = target->rx_ring[i];
ret = srp_post_recv(target, iu);
if (ret)
@@ -1672,6 +1855,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
if (ib_send_cm_drep(cm_id, NULL, 0))
shost_printk(KERN_ERR, target->scsi_host,
PFX "Sending CM DREP failed\n");
+ queue_work(system_long_wq, &target->tl_err_work);
break;
case IB_CM_TIMEWAIT_EXIT:
@@ -1701,6 +1885,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
static int srp_send_tsk_mgmt(struct srp_target_port *target,
u64 req_tag, unsigned int lun, u8 func)
{
+ struct srp_rport *rport = target->rport;
struct ib_device *dev = target->srp_host->srp_dev->dev;
struct srp_iu *iu;
struct srp_tsk_mgmt *tsk_mgmt;
@@ -1710,12 +1895,20 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
init_completion(&target->tsk_mgmt_done);
+ /*
+ * Lock the rport mutex to avoid that srp_create_target_ib() is
+ * invoked while a task management function is being sent.
+ */
+ mutex_lock(&rport->mutex);
spin_lock_irq(&target->lock);
iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
spin_unlock_irq(&target->lock);
- if (!iu)
+ if (!iu) {
+ mutex_unlock(&rport->mutex);
+
return -1;
+ }
ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
DMA_TO_DEVICE);
@@ -1732,8 +1925,11 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
DMA_TO_DEVICE);
if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
+ mutex_unlock(&rport->mutex);
+
return -1;
}
+ mutex_unlock(&rport->mutex);
if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
@@ -1755,7 +1951,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
SRP_TSK_ABORT_TASK) == 0)
ret = SUCCESS;
- else if (target->transport_offline)
+ else if (target->rport->state == SRP_RPORT_LOST)
ret = FAST_IO_FAIL;
else
ret = FAILED;
@@ -1779,10 +1975,10 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
if (target->tsk_mgmt_status)
return FAILED;
- for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
+ for (i = 0; i < target->req_ring_size; ++i) {
struct srp_request *req = &target->req_ring[i];
if (req->scmnd && req->scmnd->device == scmnd->device)
- srp_reset_req(target, req);
+ srp_finish_req(target, req, DID_RESET << 16);
}
return SUCCESS;
@@ -1791,14 +1987,10 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
static int srp_reset_host(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
- int ret = FAILED;
shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
- if (!srp_reconnect_target(target))
- ret = SUCCESS;
-
- return ret;
+ return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
}
static int srp_slave_configure(struct scsi_device *sdev)
@@ -1851,6 +2043,14 @@ static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
}
+static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(dev));
+
+ return sprintf(buf, "%pI6\n", target->path.sgid.raw);
+}
+
static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1907,6 +2107,14 @@ static ssize_t show_comp_vector(struct device *dev,
return sprintf(buf, "%d\n", target->comp_vector);
}
+static ssize_t show_tl_retry_count(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(dev));
+
+ return sprintf(buf, "%d\n", target->tl_retry_count);
+}
+
static ssize_t show_cmd_sg_entries(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1927,6 +2135,7 @@ static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
+static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
@@ -1934,6 +2143,7 @@ static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
+static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
@@ -1942,6 +2152,7 @@ static struct device_attribute *srp_host_attrs[] = {
&dev_attr_ioc_guid,
&dev_attr_service_id,
&dev_attr_pkey,
+ &dev_attr_sgid,
&dev_attr_dgid,
&dev_attr_orig_dgid,
&dev_attr_req_lim,
@@ -1949,6 +2160,7 @@ static struct device_attribute *srp_host_attrs[] = {
&dev_attr_local_ib_port,
&dev_attr_local_ib_device,
&dev_attr_comp_vector,
+ &dev_attr_tl_retry_count,
&dev_attr_cmd_sg_entries,
&dev_attr_allow_ext_sg,
NULL
@@ -1966,9 +2178,9 @@ static struct scsi_host_template srp_template = {
.eh_host_reset_handler = srp_reset_host,
.skip_settle_delay = true,
.sg_tablesize = SRP_DEF_SG_TABLESIZE,
- .can_queue = SRP_CMD_SQ_SIZE,
+ .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
.this_id = -1,
- .cmd_per_lun = SRP_CMD_SQ_SIZE,
+ .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = srp_host_attrs
};
@@ -1994,6 +2206,7 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
}
rport->lld_data = target;
+ target->rport = rport;
spin_lock(&host->target_lock);
list_add_tail(&target->list, &host->target_list);
@@ -2073,6 +2286,8 @@ enum {
SRP_OPT_ALLOW_EXT_SG = 1 << 10,
SRP_OPT_SG_TABLESIZE = 1 << 11,
SRP_OPT_COMP_VECTOR = 1 << 12,
+ SRP_OPT_TL_RETRY_COUNT = 1 << 13,
+ SRP_OPT_QUEUE_SIZE = 1 << 14,
SRP_OPT_ALL = (SRP_OPT_ID_EXT |
SRP_OPT_IOC_GUID |
SRP_OPT_DGID |
@@ -2094,6 +2309,8 @@ static const match_table_t srp_opt_tokens = {
{ SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
{ SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
{ SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
+ { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
+ { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
{ SRP_OPT_ERR, NULL }
};
@@ -2188,13 +2405,25 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
target->scsi_host->max_sectors = token;
break;
+ case SRP_OPT_QUEUE_SIZE:
+ if (match_int(args, &token) || token < 1) {
+ pr_warn("bad queue_size parameter '%s'\n", p);
+ goto out;
+ }
+ target->scsi_host->can_queue = token;
+ target->queue_size = token + SRP_RSP_SQ_SIZE +
+ SRP_TSK_MGMT_SQ_SIZE;
+ if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
+ target->scsi_host->cmd_per_lun = token;
+ break;
+
case SRP_OPT_MAX_CMD_PER_LUN:
- if (match_int(args, &token)) {
+ if (match_int(args, &token) || token < 1) {
pr_warn("bad max cmd_per_lun parameter '%s'\n",
p);
goto out;
}
- target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
+ target->scsi_host->cmd_per_lun = token;
break;
case SRP_OPT_IO_CLASS:
@@ -2257,6 +2486,15 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
target->comp_vector = token;
break;
+ case SRP_OPT_TL_RETRY_COUNT:
+ if (match_int(args, &token) || token < 2 || token > 7) {
+ pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
+ p);
+ goto out;
+ }
+ target->tl_retry_count = token;
+ break;
+
default:
pr_warn("unknown parameter or missing value '%s' in target creation request\n",
p);
@@ -2273,6 +2511,12 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
pr_warn("target creation request is missing parameter '%s'\n",
srp_opt_tokens[i].pattern);
+ if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
+ && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
+ pr_warn("cmd_per_lun = %d > queue_size = %d\n",
+ target->scsi_host->cmd_per_lun,
+ target->scsi_host->can_queue);
+
out:
kfree(options);
return ret;
@@ -2287,8 +2531,7 @@ static ssize_t srp_create_target(struct device *dev,
struct Scsi_Host *target_host;
struct srp_target_port *target;
struct ib_device *ibdev = host->srp_dev->dev;
- dma_addr_t dma_addr;
- int i, ret;
+ int ret;
target_host = scsi_host_alloc(&srp_template,
sizeof (struct srp_target_port));
@@ -2311,11 +2554,15 @@ static ssize_t srp_create_target(struct device *dev,
target->cmd_sg_cnt = cmd_sg_entries;
target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
target->allow_ext_sg = allow_ext_sg;
+ target->tl_retry_count = 7;
+ target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
ret = srp_parse_options(buf, target);
if (ret)
goto err;
+ target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
+
if (!srp_conn_unique(target->srp_host, target)) {
shost_printk(KERN_INFO, target->scsi_host,
PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
@@ -2339,31 +2586,13 @@ static ssize_t srp_create_target(struct device *dev,
sizeof (struct srp_indirect_buf) +
target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
+ INIT_WORK(&target->tl_err_work, srp_tl_err_work);
INIT_WORK(&target->remove_work, srp_remove_work);
spin_lock_init(&target->lock);
INIT_LIST_HEAD(&target->free_tx);
- INIT_LIST_HEAD(&target->free_reqs);
- for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
- struct srp_request *req = &target->req_ring[i];
-
- req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *),
- GFP_KERNEL);
- req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *),
- GFP_KERNEL);
- req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
- if (!req->fmr_list || !req->map_page || !req->indirect_desc)
- goto err_free_mem;
-
- dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
- target->indirect_size,
- DMA_TO_DEVICE);
- if (ib_dma_mapping_error(ibdev, dma_addr))
- goto err_free_mem;
-
- req->indirect_dma_addr = dma_addr;
- req->index = i;
- list_add_tail(&req->list, &target->free_reqs);
- }
+ ret = srp_alloc_req_data(target);
+ if (ret)
+ goto err_free_mem;
ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
@@ -2612,7 +2841,14 @@ static void srp_remove_one(struct ib_device *device)
}
static struct srp_function_template ib_srp_transport_functions = {
+ .has_rport_state = true,
+ .reset_timer_if_blocked = true,
+ .reconnect_delay = &srp_reconnect_delay,
+ .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
+ .dev_loss_tmo = &srp_dev_loss_tmo,
+ .reconnect = srp_rport_reconnect,
.rport_delete = srp_rport_delete,
+ .terminate_rport_io = srp_terminate_io,
};
static int __init srp_init_module(void)
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index e641088c14dc..575681063f38 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -57,14 +57,11 @@ enum {
SRP_MAX_LUN = 512,
SRP_DEF_SG_TABLESIZE = 12,
- SRP_RQ_SHIFT = 6,
- SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
-
- SRP_SQ_SIZE = SRP_RQ_SIZE,
+ SRP_DEFAULT_QUEUE_SIZE = 1 << 6,
SRP_RSP_SQ_SIZE = 1,
- SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE,
SRP_TSK_MGMT_SQ_SIZE = 1,
- SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE,
+ SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
+ SRP_TSK_MGMT_SQ_SIZE,
SRP_TAG_NO_REQ = ~0U,
SRP_TAG_TSK_MGMT = 1U << 31,
@@ -140,7 +137,6 @@ struct srp_target_port {
unsigned int cmd_sg_cnt;
unsigned int indirect_size;
bool allow_ext_sg;
- bool transport_offline;
/* Everything above this point is used in the hot path of
* command processing. Try to keep them packed into cachelines.
@@ -153,10 +149,14 @@ struct srp_target_port {
u16 io_class;
struct srp_host *srp_host;
struct Scsi_Host *scsi_host;
+ struct srp_rport *rport;
char target_name[32];
unsigned int scsi_id;
unsigned int sg_tablesize;
+ int queue_size;
+ int req_ring_size;
int comp_vector;
+ int tl_retry_count;
struct ib_sa_path_rec path;
__be16 orig_dgid[8];
@@ -172,10 +172,11 @@ struct srp_target_port {
int zero_req_lim;
- struct srp_iu *tx_ring[SRP_SQ_SIZE];
- struct srp_iu *rx_ring[SRP_RQ_SIZE];
- struct srp_request req_ring[SRP_CMD_SQ_SIZE];
+ struct srp_iu **tx_ring;
+ struct srp_iu **rx_ring;
+ struct srp_request *req_ring;
+ struct work_struct tl_err_work;
struct work_struct remove_work;
struct list_head list;
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 38b523a1ece0..a11ff74a5127 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -80,7 +80,7 @@ config INPUT_MATRIXKMAP
comment "Userland interfaces"
config INPUT_MOUSEDEV
- tristate "Mouse interface" if EXPERT
+ tristate "Mouse interface"
default y
help
Say Y here if you want your mouse to be accessible as char devices
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index b6ded17b3be3..a06e12552886 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -18,6 +18,8 @@
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input/mt.h>
@@ -369,7 +371,11 @@ static int evdev_release(struct inode *inode, struct file *file)
mutex_unlock(&evdev->mutex);
evdev_detach_client(evdev, client);
- kfree(client);
+
+ if (is_vmalloc_addr(client))
+ vfree(client);
+ else
+ kfree(client);
evdev_close_device(evdev);
@@ -389,12 +395,14 @@ static int evdev_open(struct inode *inode, struct file *file)
{
struct evdev *evdev = container_of(inode->i_cdev, struct evdev, cdev);
unsigned int bufsize = evdev_compute_buffer_size(evdev->handle.dev);
+ unsigned int size = sizeof(struct evdev_client) +
+ bufsize * sizeof(struct input_event);
struct evdev_client *client;
int error;
- client = kzalloc(sizeof(struct evdev_client) +
- bufsize * sizeof(struct input_event),
- GFP_KERNEL);
+ client = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+ if (!client)
+ client = vzalloc(size);
if (!client)
return -ENOMEM;
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 922a7fea2ce6..24c41ba7d4e0 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -422,14 +422,15 @@ static struct gameport *gameport_get_pending_child(struct gameport *parent)
* Gameport port operations
*/
-static ssize_t gameport_show_description(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t gameport_description_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct gameport *gameport = to_gameport_port(dev);
return sprintf(buf, "%s\n", gameport->name);
}
+static DEVICE_ATTR(description, S_IRUGO, gameport_description_show, NULL);
-static ssize_t gameport_rebind_driver(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t drvctl_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct gameport *gameport = to_gameport_port(dev);
struct device_driver *drv;
@@ -457,12 +458,14 @@ static ssize_t gameport_rebind_driver(struct device *dev, struct device_attribut
return error ? error : count;
}
+static DEVICE_ATTR_WO(drvctl);
-static struct device_attribute gameport_device_attrs[] = {
- __ATTR(description, S_IRUGO, gameport_show_description, NULL),
- __ATTR(drvctl, S_IWUSR, NULL, gameport_rebind_driver),
- __ATTR_NULL
+static struct attribute *gameport_device_attrs[] = {
+ &dev_attr_description.attr,
+ &dev_attr_drvctl.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(gameport_device);
static void gameport_release_port(struct device *dev)
{
@@ -750,7 +753,7 @@ static int gameport_bus_match(struct device *dev, struct device_driver *drv)
static struct bus_type gameport_bus = {
.name = "gameport",
- .dev_attrs = gameport_device_attrs,
+ .dev_groups = gameport_device_groups,
.drv_groups = gameport_driver_groups,
.match = gameport_bus_match,
.probe = gameport_driver_probe,
diff --git a/drivers/input/input.c b/drivers/input/input.c
index c04469928925..846ccdd905b1 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1734,6 +1734,7 @@ EXPORT_SYMBOL_GPL(input_class);
*/
struct input_dev *input_allocate_device(void)
{
+ static atomic_t input_no = ATOMIC_INIT(0);
struct input_dev *dev;
dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
@@ -1743,9 +1744,13 @@ struct input_dev *input_allocate_device(void)
device_initialize(&dev->dev);
mutex_init(&dev->mutex);
spin_lock_init(&dev->event_lock);
+ init_timer(&dev->timer);
INIT_LIST_HEAD(&dev->h_list);
INIT_LIST_HEAD(&dev->node);
+ dev_set_name(&dev->dev, "input%ld",
+ (unsigned long) atomic_inc_return(&input_no) - 1);
+
__module_get(THIS_MODULE);
}
@@ -2019,7 +2024,6 @@ static void devm_input_device_unregister(struct device *dev, void *res)
*/
int input_register_device(struct input_dev *dev)
{
- static atomic_t input_no = ATOMIC_INIT(0);
struct input_devres *devres = NULL;
struct input_handler *handler;
unsigned int packet_size;
@@ -2048,7 +2052,7 @@ int input_register_device(struct input_dev *dev)
if (dev->hint_events_per_packet < packet_size)
dev->hint_events_per_packet = packet_size;
- dev->max_vals = max(dev->hint_events_per_packet, packet_size) + 2;
+ dev->max_vals = dev->hint_events_per_packet + 2;
dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL);
if (!dev->vals) {
error = -ENOMEM;
@@ -2059,7 +2063,6 @@ int input_register_device(struct input_dev *dev)
* If delay and period are pre-set by the driver, then autorepeating
* is handled by the driver itself and we don't do it in input.c.
*/
- init_timer(&dev->timer);
if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) {
dev->timer.data = (long) dev;
dev->timer.function = input_repeat_key;
@@ -2073,9 +2076,6 @@ int input_register_device(struct input_dev *dev)
if (!dev->setkeycode)
dev->setkeycode = input_default_setkeycode;
- dev_set_name(&dev->dev, "input%ld",
- (unsigned long) atomic_inc_return(&input_no) - 1);
-
error = device_add(&dev->dev);
if (error)
goto err_free_vals;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index c1edd39bc5ba..bb174c1a9886 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -2,7 +2,7 @@
# Input core configuration
#
menuconfig INPUT_KEYBOARD
- bool "Keyboards" if EXPERT || !X86
+ bool "Keyboards"
default y
help
Say Y here, and a list of supported keyboards will be displayed.
@@ -67,7 +67,7 @@ config KEYBOARD_ATARI
module will be called atakbd.
config KEYBOARD_ATKBD
- tristate "AT keyboard" if EXPERT || !X86
+ tristate "AT keyboard"
default y
select SERIO
select SERIO_LIBPS2
@@ -525,7 +525,7 @@ config KEYBOARD_SUNKBD
config KEYBOARD_SH_KEYSC
tristate "SuperH KEYSC keypad support"
- depends on SUPERH || ARCH_SHMOBILE
+ depends on SUPERH || ARM || COMPILE_TEST
help
Say Y here if you want to use a keypad attached to the KEYSC block
on SuperH processors such as sh7722 and sh7343.
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 440ce32462ba..2db13246eb8e 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -26,6 +26,7 @@
#include <linux/gpio_keys.h>
#include <linux/workqueue.h>
#include <linux/gpio.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/spinlock.h>
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index cd5ed9e22168..4e428199e580 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -25,6 +25,7 @@
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
diff --git a/drivers/input/keyboard/lpc32xx-keys.c b/drivers/input/keyboard/lpc32xx-keys.c
index 42181435fe67..8b1b01361ec6 100644
--- a/drivers/input/keyboard/lpc32xx-keys.c
+++ b/drivers/input/keyboard/lpc32xx-keys.c
@@ -383,7 +383,7 @@ static struct platform_driver lpc32xx_kscan_driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
.pm = &lpc32xx_kscan_pm_ops,
- .of_match_table = of_match_ptr(lpc32xx_kscan_match),
+ .of_match_table = lpc32xx_kscan_match,
}
};
diff --git a/drivers/input/keyboard/nspire-keypad.c b/drivers/input/keyboard/nspire-keypad.c
index b3e3edab6d9f..b31064981e96 100644
--- a/drivers/input/keyboard/nspire-keypad.c
+++ b/drivers/input/keyboard/nspire-keypad.c
@@ -143,8 +143,10 @@ static int nspire_keypad_open(struct input_dev *input)
return error;
error = nspire_keypad_chip_init(keypad);
- if (error)
+ if (error) {
+ clk_disable_unprepare(keypad->clk);
return error;
+ }
return 0;
}
@@ -267,7 +269,7 @@ static struct platform_driver nspire_keypad_driver = {
.driver = {
.name = "nspire-keypad",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(nspire_keypad_dt_match),
+ .of_match_table = nspire_keypad_dt_match,
},
.probe = nspire_keypad_probe,
};
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 134c3b404a54..186138c720c7 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -27,6 +27,7 @@
#include <linux/err.h>
#include <linux/input/matrix_keypad.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -786,10 +787,17 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
- if (pdata)
+ if (pdata) {
error = pxa27x_keypad_build_keycode(keypad);
- else
+ } else {
error = pxa27x_keypad_build_keycode_from_dt(keypad);
+ /*
+ * Data that we get from DT resides in dynamically
+ * allocated memory so we need to update our pdata
+ * pointer.
+ */
+ pdata = keypad->pdata;
+ }
if (error) {
dev_err(&pdev->dev, "failed to build keycode\n");
goto failed_put_clk;
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 9cd20e6905a0..8508879f6faf 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -614,7 +614,7 @@ static int tegra_kbc_probe(struct platform_device *pdev)
unsigned int keymap_rows;
const struct of_device_id *match;
- match = of_match_device(of_match_ptr(tegra_kbc_of_match), &pdev->dev);
+ match = of_match_device(tegra_kbc_of_match, &pdev->dev);
kbc = devm_kzalloc(&pdev->dev, sizeof(*kbc), GFP_KERNEL);
if (!kbc) {
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index aa51baaa9b1e..5f4967d01bc3 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -156,7 +156,7 @@ config INPUT_MAX8925_ONKEY
config INPUT_MAX8997_HAPTIC
tristate "MAXIM MAX8997 haptic controller support"
- depends on HAVE_PWM && MFD_MAX8997
+ depends on PWM && HAVE_PWM && MFD_MAX8997
select INPUT_FF_MEMLESS
help
This option enables device driver support for the haptic controller
@@ -461,7 +461,7 @@ config INPUT_PCF8574
config INPUT_PWM_BEEPER
tristate "PWM beeper support"
- depends on HAVE_PWM || PWM
+ depends on PWM && HAVE_PWM
help
Say Y here to get support for PWM based beeper devices.
diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c
index 61891486067c..3a90b710e309 100644
--- a/drivers/input/misc/ad714x-spi.c
+++ b/drivers/input/misc/ad714x-spi.c
@@ -108,7 +108,6 @@ static int ad714x_spi_remove(struct spi_device *spi)
struct ad714x_chip *chip = spi_get_drvdata(spi);
ad714x_remove(chip);
- spi_set_drvdata(spi, NULL);
return 0;
}
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index 082684e7f390..9365535ba7f1 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -351,7 +351,9 @@ static void cm109_urb_irq_callback(struct urb *urb)
if (status) {
if (status == -ESHUTDOWN)
return;
- dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status);
+ dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n",
+ __func__, status);
+ goto out;
}
/* Special keys */
@@ -418,8 +420,12 @@ static void cm109_urb_ctl_callback(struct urb *urb)
dev->ctl_data->byte[2],
dev->ctl_data->byte[3]);
- if (status)
- dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status);
+ if (status) {
+ if (status == -ESHUTDOWN)
+ return;
+ dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n",
+ __func__, status);
+ }
spin_lock(&dev->ctl_submit_lock);
@@ -427,7 +433,7 @@ static void cm109_urb_ctl_callback(struct urb *urb)
if (likely(!dev->shutdown)) {
- if (dev->buzzer_pending) {
+ if (dev->buzzer_pending || status) {
dev->buzzer_pending = 0;
dev->ctl_urb_pending = 1;
cm109_submit_buzz_toggle(dev);
diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c
index 4f77f87847e8..b5d71d245854 100644
--- a/drivers/input/misc/cobalt_btns.c
+++ b/drivers/input/misc/cobalt_btns.c
@@ -131,7 +131,6 @@ static int cobalt_buttons_probe(struct platform_device *pdev)
err_free_mem:
input_free_polled_device(poll_dev);
kfree(bdev);
- dev_set_drvdata(&pdev->dev, NULL);
return error;
}
@@ -144,7 +143,6 @@ static int cobalt_buttons_remove(struct platform_device *pdev)
input_free_polled_device(bdev->poll_dev);
iounmap(bdev->reg);
kfree(bdev);
- dev_set_drvdata(dev, NULL);
return 0;
}
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index 2ff4d1c78ab8..940566e7be13 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -16,6 +16,7 @@
#include <linux/input.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
diff --git a/drivers/input/misc/rb532_button.c b/drivers/input/misc/rb532_button.c
index fb4f8ac3343b..83fff38b86b3 100644
--- a/drivers/input/misc/rb532_button.c
+++ b/drivers/input/misc/rb532_button.c
@@ -87,7 +87,6 @@ static int rb532_button_remove(struct platform_device *pdev)
input_unregister_polled_device(poll_dev);
input_free_polled_device(poll_dev);
- dev_set_drvdata(&pdev->dev, NULL);
return 0;
}
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 5b1aff825138..f920ba7ab51f 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -24,6 +24,7 @@
#include <linux/gpio.h>
#include <linux/rotary_encoder.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index 0621c367049a..7b8b03e0d0be 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -153,7 +153,7 @@ static struct platform_driver sirfsoc_pwrc_driver = {
.name = "sirfsoc-pwrc",
.owner = THIS_MODULE,
.pm = &sirfsoc_pwrc_pm_ops,
- .of_match_table = of_match_ptr(sirfsoc_pwrc_of_match),
+ .of_match_table = sirfsoc_pwrc_of_match,
}
};
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index a0a4bbaef02c..772835938a52 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -430,20 +430,30 @@ static int uinput_setup_device(struct uinput_device *udev,
return retval;
}
-static ssize_t uinput_inject_event(struct uinput_device *udev,
- const char __user *buffer, size_t count)
+static ssize_t uinput_inject_events(struct uinput_device *udev,
+ const char __user *buffer, size_t count)
{
struct input_event ev;
+ size_t bytes = 0;
- if (count < input_event_size())
+ if (count != 0 && count < input_event_size())
return -EINVAL;
- if (input_event_from_user(buffer, &ev))
- return -EFAULT;
+ while (bytes + input_event_size() <= count) {
+ /*
+ * Note that even if some events were fetched successfully
+ * we are still going to return EFAULT instead of partial
+ * count to let userspace know that it got it's buffers
+ * all wrong.
+ */
+ if (input_event_from_user(buffer + bytes, &ev))
+ return -EFAULT;
- input_event(udev->dev, ev.type, ev.code, ev.value);
+ input_event(udev->dev, ev.type, ev.code, ev.value);
+ bytes += input_event_size();
+ }
- return input_event_size();
+ return bytes;
}
static ssize_t uinput_write(struct file *file, const char __user *buffer,
@@ -460,7 +470,7 @@ static ssize_t uinput_write(struct file *file, const char __user *buffer,
return retval;
retval = udev->state == UIST_CREATED ?
- uinput_inject_event(udev, buffer, count) :
+ uinput_inject_events(udev, buffer, count) :
uinput_setup_device(udev, buffer, count);
mutex_unlock(&udev->mutex);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 7c5d72a6a26a..24b362614ef4 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -103,6 +103,7 @@ static const struct alps_model_info alps_model_data[] = {
/* Dell Latitude E5500, E6400, E6500, Precision M4400 */
{ { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
+ { { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_DUALPOINT }, /* Dell XT2 */
{ { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
{ { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
@@ -1792,7 +1793,7 @@ int alps_init(struct psmouse *psmouse)
snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys);
dev2->phys = priv->phys;
dev2->name = (priv->flags & ALPS_DUALPOINT) ?
- "DualPoint Stick" : "PS/2 Mouse";
+ "DualPoint Stick" : "ALPS PS/2 Device";
dev2->id.bustype = BUS_I8042;
dev2->id.vendor = 0x0002;
dev2->id.product = PSMOUSE_ALPS;
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
index f51765fff054..a5869a856ea5 100644
--- a/drivers/input/mouse/cypress_ps2.c
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -439,7 +439,7 @@ static int cypress_get_finger_count(unsigned char header_byte)
case 2: return 5;
default:
/* Invalid contact (e.g. palm). Ignore it. */
- return -1;
+ return 0;
}
}
@@ -452,17 +452,10 @@ static int cypress_parse_packet(struct psmouse *psmouse,
{
unsigned char *packet = psmouse->packet;
unsigned char header_byte = packet[0];
- int contact_cnt;
memset(report_data, 0, sizeof(struct cytp_report_data));
- contact_cnt = cypress_get_finger_count(header_byte);
-
- if (contact_cnt < 0) /* e.g. palm detect */
- return -EINVAL;
-
- report_data->contact_cnt = contact_cnt;
-
+ report_data->contact_cnt = cypress_get_finger_count(header_byte);
report_data->tap = (header_byte & ABS_MULTIFINGER_TAP) ? 1 : 0;
if (report_data->contact_cnt == 1) {
@@ -535,11 +528,9 @@ static void cypress_process_packet(struct psmouse *psmouse, bool zero_pkt)
int slots[CYTP_MAX_MT_SLOTS];
int n;
- if (cypress_parse_packet(psmouse, cytp, &report_data))
- return;
+ cypress_parse_packet(psmouse, cytp, &report_data);
n = report_data.contact_cnt;
-
if (n > CYTP_MAX_MT_SLOTS)
n = CYTP_MAX_MT_SLOTS;
@@ -605,10 +596,6 @@ static psmouse_ret_t cypress_validate_byte(struct psmouse *psmouse)
return PSMOUSE_BAD_DATA;
contact_cnt = cypress_get_finger_count(packet[0]);
-
- if (contact_cnt < 0)
- return PSMOUSE_BAD_DATA;
-
if (cytp->mode & CYTP_BIT_ABS_NO_PRESSURE)
cypress_set_packet_size(psmouse, contact_cnt == 2 ? 7 : 4);
else
@@ -679,15 +666,15 @@ int cypress_init(struct psmouse *psmouse)
{
struct cytp_data *cytp;
- cytp = (struct cytp_data *)kzalloc(sizeof(struct cytp_data), GFP_KERNEL);
- psmouse->private = (void *)cytp;
- if (cytp == NULL)
+ cytp = kzalloc(sizeof(struct cytp_data), GFP_KERNEL);
+ if (!cytp)
return -ENOMEM;
- cypress_reset(psmouse);
-
+ psmouse->private = cytp;
psmouse->pktsize = 8;
+ cypress_reset(psmouse);
+
if (cypress_query_hardware(psmouse)) {
psmouse_err(psmouse, "Unable to query Trackpad hardware.\n");
goto err_exit;
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 33b3e88fe4a2..8541f949778d 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -2,7 +2,7 @@
# Input core configuration
#
config SERIO
- tristate "Serial I/O support" if EXPERT || !X86
+ tristate "Serial I/O support"
default y
help
Say Yes here if you have any input device that uses serial I/O to
@@ -19,9 +19,9 @@ config SERIO
if SERIO
config SERIO_I8042
- tristate "i8042 PC Keyboard controller" if EXPERT || !X86
+ tristate "i8042 PC Keyboard controller"
default y
- depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \
+ depends on !PARISC && (!ARM || FOOTBRIDGE_HOST) && \
(!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 && \
!ARC
help
@@ -170,7 +170,7 @@ config SERIO_MACEPS2
module will be called maceps2.
config SERIO_LIBPS2
- tristate "PS/2 driver library" if EXPERT
+ tristate "PS/2 driver library"
depends on SERIO_I8042 || SERIO_I8042=n
help
Say Y here if you are using a driver for device connected
@@ -266,4 +266,14 @@ config SERIO_OLPC_APSP
To compile this driver as a module, choose M here: the module will
be called olpc_apsp.
+config HYPERV_KEYBOARD
+ tristate "Microsoft Synthetic Keyboard driver"
+ depends on HYPERV
+ default HYPERV
+ help
+ Select this option to enable the Hyper-V Keyboard driver.
+
+ To compile this driver as a module, choose M here: the module will
+ be called hyperv_keyboard.
+
endif
diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile
index 12298b1c0e71..815d874fe724 100644
--- a/drivers/input/serio/Makefile
+++ b/drivers/input/serio/Makefile
@@ -28,3 +28,4 @@ obj-$(CONFIG_SERIO_ALTERA_PS2) += altera_ps2.o
obj-$(CONFIG_SERIO_ARC_PS2) += arc_ps2.o
obj-$(CONFIG_SERIO_APBPS2) += apbps2.o
obj-$(CONFIG_SERIO_OLPC_APSP) += olpc_apsp.o
+obj-$(CONFIG_HYPERV_KEYBOARD) += hyperv-keyboard.o
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
new file mode 100644
index 000000000000..3a83c3c14b23
--- /dev/null
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -0,0 +1,437 @@
+/*
+ * Copyright (c) 2013, Microsoft Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/completion.h>
+#include <linux/hyperv.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+
+/*
+ * Current version 1.0
+ *
+ */
+#define SYNTH_KBD_VERSION_MAJOR 1
+#define SYNTH_KBD_VERSION_MINOR 0
+#define SYNTH_KBD_VERSION (SYNTH_KBD_VERSION_MINOR | \
+ (SYNTH_KBD_VERSION_MAJOR << 16))
+
+
+/*
+ * Message types in the synthetic input protocol
+ */
+enum synth_kbd_msg_type {
+ SYNTH_KBD_PROTOCOL_REQUEST = 1,
+ SYNTH_KBD_PROTOCOL_RESPONSE = 2,
+ SYNTH_KBD_EVENT = 3,
+ SYNTH_KBD_LED_INDICATORS = 4,
+};
+
+/*
+ * Basic message structures.
+ */
+struct synth_kbd_msg_hdr {
+ __le32 type;
+};
+
+struct synth_kbd_msg {
+ struct synth_kbd_msg_hdr header;
+ char data[]; /* Enclosed message */
+};
+
+union synth_kbd_version {
+ __le32 version;
+};
+
+/*
+ * Protocol messages
+ */
+struct synth_kbd_protocol_request {
+ struct synth_kbd_msg_hdr header;
+ union synth_kbd_version version_requested;
+};
+
+#define PROTOCOL_ACCEPTED BIT(0)
+struct synth_kbd_protocol_response {
+ struct synth_kbd_msg_hdr header;
+ __le32 proto_status;
+};
+
+#define IS_UNICODE BIT(0)
+#define IS_BREAK BIT(1)
+#define IS_E0 BIT(2)
+#define IS_E1 BIT(3)
+struct synth_kbd_keystroke {
+ struct synth_kbd_msg_hdr header;
+ __le16 make_code;
+ __le16 reserved0;
+ __le32 info; /* Additional information */
+};
+
+
+#define HK_MAXIMUM_MESSAGE_SIZE 256
+
+#define KBD_VSC_SEND_RING_BUFFER_SIZE (10 * PAGE_SIZE)
+#define KBD_VSC_RECV_RING_BUFFER_SIZE (10 * PAGE_SIZE)
+
+#define XTKBD_EMUL0 0xe0
+#define XTKBD_EMUL1 0xe1
+#define XTKBD_RELEASE 0x80
+
+
+/*
+ * Represents a keyboard device
+ */
+struct hv_kbd_dev {
+ struct hv_device *hv_dev;
+ struct serio *hv_serio;
+ struct synth_kbd_protocol_request protocol_req;
+ struct synth_kbd_protocol_response protocol_resp;
+ /* Synchronize the request/response if needed */
+ struct completion wait_event;
+ spinlock_t lock; /* protects 'started' field */
+ bool started;
+};
+
+static void hv_kbd_on_receive(struct hv_device *hv_dev,
+ struct synth_kbd_msg *msg, u32 msg_length)
+{
+ struct hv_kbd_dev *kbd_dev = hv_get_drvdata(hv_dev);
+ struct synth_kbd_keystroke *ks_msg;
+ unsigned long flags;
+ u32 msg_type = __le32_to_cpu(msg->header.type);
+ u32 info;
+ u16 scan_code;
+
+ switch (msg_type) {
+ case SYNTH_KBD_PROTOCOL_RESPONSE:
+ /*
+ * Validate the information provided by the host.
+ * If the host is giving us a bogus packet,
+ * drop the packet (hoping the problem
+ * goes away).
+ */
+ if (msg_length < sizeof(struct synth_kbd_protocol_response)) {
+ dev_err(&hv_dev->device,
+ "Illegal protocol response packet (len: %d)\n",
+ msg_length);
+ break;
+ }
+
+ memcpy(&kbd_dev->protocol_resp, msg,
+ sizeof(struct synth_kbd_protocol_response));
+ complete(&kbd_dev->wait_event);
+ break;
+
+ case SYNTH_KBD_EVENT:
+ /*
+ * Validate the information provided by the host.
+ * If the host is giving us a bogus packet,
+ * drop the packet (hoping the problem
+ * goes away).
+ */
+ if (msg_length < sizeof(struct synth_kbd_keystroke)) {
+ dev_err(&hv_dev->device,
+ "Illegal keyboard event packet (len: %d)\n",
+ msg_length);
+ break;
+ }
+
+ ks_msg = (struct synth_kbd_keystroke *)msg;
+ info = __le32_to_cpu(ks_msg->info);
+
+ /*
+ * Inject the information through the serio interrupt.
+ */
+ spin_lock_irqsave(&kbd_dev->lock, flags);
+ if (kbd_dev->started) {
+ if (info & IS_E0)
+ serio_interrupt(kbd_dev->hv_serio,
+ XTKBD_EMUL0, 0);
+
+ scan_code = __le16_to_cpu(ks_msg->make_code);
+ if (info & IS_BREAK)
+ scan_code |= XTKBD_RELEASE;
+
+ serio_interrupt(kbd_dev->hv_serio, scan_code, 0);
+ }
+ spin_unlock_irqrestore(&kbd_dev->lock, flags);
+ break;
+
+ default:
+ dev_err(&hv_dev->device,
+ "unhandled message type %d\n", msg_type);
+ }
+}
+
+static void hv_kbd_handle_received_packet(struct hv_device *hv_dev,
+ struct vmpacket_descriptor *desc,
+ u32 bytes_recvd,
+ u64 req_id)
+{
+ struct synth_kbd_msg *msg;
+ u32 msg_sz;
+
+ switch (desc->type) {
+ case VM_PKT_COMP:
+ break;
+
+ case VM_PKT_DATA_INBAND:
+ /*
+ * We have a packet that has "inband" data. The API used
+ * for retrieving the packet guarantees that the complete
+ * packet is read. So, minimally, we should be able to
+ * parse the payload header safely (assuming that the host
+ * can be trusted. Trusting the host seems to be a
+ * reasonable assumption because in a virtualized
+ * environment there is not whole lot you can do if you
+ * don't trust the host.
+ *
+ * Nonetheless, let us validate if the host can be trusted
+ * (in a trivial way). The interesting aspect of this
+ * validation is how do you recover if we discover that the
+ * host is not to be trusted? Simply dropping the packet, I
+ * don't think is an appropriate recovery. In the interest
+ * of failing fast, it may be better to crash the guest.
+ * For now, I will just drop the packet!
+ */
+
+ msg_sz = bytes_recvd - (desc->offset8 << 3);
+ if (msg_sz <= sizeof(struct synth_kbd_msg_hdr)) {
+ /*
+ * Drop the packet and hope
+ * the problem magically goes away.
+ */
+ dev_err(&hv_dev->device,
+ "Illegal packet (type: %d, tid: %llx, size: %d)\n",
+ desc->type, req_id, msg_sz);
+ break;
+ }
+
+ msg = (void *)desc + (desc->offset8 << 3);
+ hv_kbd_on_receive(hv_dev, msg, msg_sz);
+ break;
+
+ default:
+ dev_err(&hv_dev->device,
+ "unhandled packet type %d, tid %llx len %d\n",
+ desc->type, req_id, bytes_recvd);
+ break;
+ }
+}
+
+static void hv_kbd_on_channel_callback(void *context)
+{
+ struct hv_device *hv_dev = context;
+ void *buffer;
+ int bufferlen = 0x100; /* Start with sensible size */
+ u32 bytes_recvd;
+ u64 req_id;
+ int error;
+
+ buffer = kmalloc(bufferlen, GFP_ATOMIC);
+ if (!buffer)
+ return;
+
+ while (1) {
+ error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen,
+ &bytes_recvd, &req_id);
+ switch (error) {
+ case 0:
+ if (bytes_recvd == 0) {
+ kfree(buffer);
+ return;
+ }
+
+ hv_kbd_handle_received_packet(hv_dev, buffer,
+ bytes_recvd, req_id);
+ break;
+
+ case -ENOBUFS:
+ kfree(buffer);
+ /* Handle large packet */
+ bufferlen = bytes_recvd;
+ buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
+ if (!buffer)
+ return;
+ break;
+ }
+ }
+}
+
+static int hv_kbd_connect_to_vsp(struct hv_device *hv_dev)
+{
+ struct hv_kbd_dev *kbd_dev = hv_get_drvdata(hv_dev);
+ struct synth_kbd_protocol_request *request;
+ struct synth_kbd_protocol_response *response;
+ u32 proto_status;
+ int error;
+
+ request = &kbd_dev->protocol_req;
+ memset(request, 0, sizeof(struct synth_kbd_protocol_request));
+ request->header.type = __cpu_to_le32(SYNTH_KBD_PROTOCOL_REQUEST);
+ request->version_requested.version = __cpu_to_le32(SYNTH_KBD_VERSION);
+
+ error = vmbus_sendpacket(hv_dev->channel, request,
+ sizeof(struct synth_kbd_protocol_request),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (error)
+ return error;
+
+ if (!wait_for_completion_timeout(&kbd_dev->wait_event, 10 * HZ))
+ return -ETIMEDOUT;
+
+ response = &kbd_dev->protocol_resp;
+ proto_status = __le32_to_cpu(response->proto_status);
+ if (!(proto_status & PROTOCOL_ACCEPTED)) {
+ dev_err(&hv_dev->device,
+ "synth_kbd protocol request failed (version %d)\n",
+ SYNTH_KBD_VERSION);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int hv_kbd_start(struct serio *serio)
+{
+ struct hv_kbd_dev *kbd_dev = serio->port_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbd_dev->lock, flags);
+ kbd_dev->started = true;
+ spin_unlock_irqrestore(&kbd_dev->lock, flags);
+
+ return 0;
+}
+
+static void hv_kbd_stop(struct serio *serio)
+{
+ struct hv_kbd_dev *kbd_dev = serio->port_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbd_dev->lock, flags);
+ kbd_dev->started = false;
+ spin_unlock_irqrestore(&kbd_dev->lock, flags);
+}
+
+static int hv_kbd_probe(struct hv_device *hv_dev,
+ const struct hv_vmbus_device_id *dev_id)
+{
+ struct hv_kbd_dev *kbd_dev;
+ struct serio *hv_serio;
+ int error;
+
+ kbd_dev = kzalloc(sizeof(struct hv_kbd_dev), GFP_KERNEL);
+ hv_serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ if (!kbd_dev || !hv_serio) {
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ kbd_dev->hv_dev = hv_dev;
+ kbd_dev->hv_serio = hv_serio;
+ spin_lock_init(&kbd_dev->lock);
+ init_completion(&kbd_dev->wait_event);
+ hv_set_drvdata(hv_dev, kbd_dev);
+
+ hv_serio->dev.parent = &hv_dev->device;
+ hv_serio->id.type = SERIO_8042_XL;
+ hv_serio->port_data = kbd_dev;
+ strlcpy(hv_serio->name, dev_name(&hv_dev->device),
+ sizeof(hv_serio->name));
+ strlcpy(hv_serio->phys, dev_name(&hv_dev->device),
+ sizeof(hv_serio->phys));
+
+ hv_serio->start = hv_kbd_start;
+ hv_serio->stop = hv_kbd_stop;
+
+ error = vmbus_open(hv_dev->channel,
+ KBD_VSC_SEND_RING_BUFFER_SIZE,
+ KBD_VSC_RECV_RING_BUFFER_SIZE,
+ NULL, 0,
+ hv_kbd_on_channel_callback,
+ hv_dev);
+ if (error)
+ goto err_free_mem;
+
+ error = hv_kbd_connect_to_vsp(hv_dev);
+ if (error)
+ goto err_close_vmbus;
+
+ serio_register_port(kbd_dev->hv_serio);
+ return 0;
+
+err_close_vmbus:
+ vmbus_close(hv_dev->channel);
+err_free_mem:
+ kfree(hv_serio);
+ kfree(kbd_dev);
+ return error;
+}
+
+static int hv_kbd_remove(struct hv_device *hv_dev)
+{
+ struct hv_kbd_dev *kbd_dev = hv_get_drvdata(hv_dev);
+
+ serio_unregister_port(kbd_dev->hv_serio);
+ vmbus_close(hv_dev->channel);
+ kfree(kbd_dev);
+
+ hv_set_drvdata(hv_dev, NULL);
+
+ return 0;
+}
+
+/*
+ * Keyboard GUID
+ * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
+ */
+#define HV_KBD_GUID \
+ .guid = { \
+ 0x6d, 0xad, 0x12, 0xf9, 0x17, 0x2b, 0xea, 0x48, \
+ 0xbd, 0x65, 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84 \
+ }
+
+static const struct hv_vmbus_device_id id_table[] = {
+ /* Keyboard guid */
+ { HV_KBD_GUID, },
+ { },
+};
+
+MODULE_DEVICE_TABLE(vmbus, id_table);
+
+static struct hv_driver hv_kbd_drv = {
+ .name = KBUILD_MODNAME,
+ .id_table = id_table,
+ .probe = hv_kbd_probe,
+ .remove = hv_kbd_remove,
+};
+
+static int __init hv_kbd_init(void)
+{
+ return vmbus_driver_register(&hv_kbd_drv);
+}
+
+static void __exit hv_kbd_exit(void)
+{
+ vmbus_driver_unregister(&hv_kbd_drv);
+}
+
+MODULE_LICENSE("GPL");
+module_init(hv_kbd_init);
+module_exit(hv_kbd_exit);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 5f306f79da0c..0ec9abbe31fe 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -765,6 +765,7 @@ static struct pnp_device_id pnp_kbd_devids[] = {
{ .id = "CPQA0D7", .driver_data = 0 },
{ .id = "", },
};
+MODULE_DEVICE_TABLE(pnp, pnp_kbd_devids);
static struct pnp_driver i8042_pnp_kbd_driver = {
.name = "i8042 kbd",
@@ -786,6 +787,7 @@ static struct pnp_device_id pnp_aux_devids[] = {
{ .id = "SYN0801", .driver_data = 0 },
{ .id = "", },
};
+MODULE_DEVICE_TABLE(pnp, pnp_aux_devids);
static struct pnp_driver i8042_pnp_aux_driver = {
.name = "i8042 aux",
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 78e4de42efaa..020053fa5aaa 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -223,21 +223,26 @@ static int i8042_flush(void)
{
unsigned long flags;
unsigned char data, str;
- int i = 0;
+ int count = 0;
+ int retval = 0;
spin_lock_irqsave(&i8042_lock, flags);
- while (((str = i8042_read_status()) & I8042_STR_OBF) && (i < I8042_BUFFER_SIZE)) {
- udelay(50);
- data = i8042_read_data();
- i++;
- dbg("%02x <- i8042 (flush, %s)\n",
- data, str & I8042_STR_AUXDATA ? "aux" : "kbd");
+ while ((str = i8042_read_status()) & I8042_STR_OBF) {
+ if (count++ < I8042_BUFFER_SIZE) {
+ udelay(50);
+ data = i8042_read_data();
+ dbg("%02x <- i8042 (flush, %s)\n",
+ data, str & I8042_STR_AUXDATA ? "aux" : "kbd");
+ } else {
+ retval = -EIO;
+ break;
+ }
}
spin_unlock_irqrestore(&i8042_lock, flags);
- return i;
+ return retval;
}
/*
@@ -849,7 +854,7 @@ static int __init i8042_check_aux(void)
static int i8042_controller_check(void)
{
- if (i8042_flush() == I8042_BUFFER_SIZE) {
+ if (i8042_flush()) {
pr_err("No controller found\n");
return -ENODEV;
}
@@ -1031,7 +1036,7 @@ static void i8042_controller_reset(bool force_reset)
/*
* i8042_panic_blink() will turn the keyboard LEDs on or off and is called
* when kernel panics. Flashing LEDs is useful for users running X who may
- * not see the console and will help distingushing panics from "real"
+ * not see the console and will help distinguishing panics from "real"
* lockups.
*
* Note that DELAY has a limit of 10ms so we will not get stuck here
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 2b56855c2c77..98707fb2cb5d 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -365,7 +365,7 @@ static ssize_t serio_show_description(struct device *dev, struct device_attribut
return sprintf(buf, "%s\n", serio->name);
}
-static ssize_t serio_show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct serio *serio = to_serio_port(dev);
@@ -373,54 +373,31 @@ static ssize_t serio_show_modalias(struct device *dev, struct device_attribute *
serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
}
-static ssize_t serio_show_id_type(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct serio *serio = to_serio_port(dev);
return sprintf(buf, "%02x\n", serio->id.type);
}
-static ssize_t serio_show_id_proto(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t proto_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct serio *serio = to_serio_port(dev);
return sprintf(buf, "%02x\n", serio->id.proto);
}
-static ssize_t serio_show_id_id(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct serio *serio = to_serio_port(dev);
return sprintf(buf, "%02x\n", serio->id.id);
}
-static ssize_t serio_show_id_extra(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t extra_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct serio *serio = to_serio_port(dev);
return sprintf(buf, "%02x\n", serio->id.extra);
}
-static DEVICE_ATTR(type, S_IRUGO, serio_show_id_type, NULL);
-static DEVICE_ATTR(proto, S_IRUGO, serio_show_id_proto, NULL);
-static DEVICE_ATTR(id, S_IRUGO, serio_show_id_id, NULL);
-static DEVICE_ATTR(extra, S_IRUGO, serio_show_id_extra, NULL);
-
-static struct attribute *serio_device_id_attrs[] = {
- &dev_attr_type.attr,
- &dev_attr_proto.attr,
- &dev_attr_id.attr,
- &dev_attr_extra.attr,
- NULL
-};
-
-static struct attribute_group serio_id_attr_group = {
- .name = "id",
- .attrs = serio_device_id_attrs,
-};
-
-static const struct attribute_group *serio_device_attr_groups[] = {
- &serio_id_attr_group,
- NULL
-};
-
-static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t drvctl_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct serio *serio = to_serio_port(dev);
struct device_driver *drv;
@@ -474,14 +451,36 @@ static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *
return retval;
}
-static struct device_attribute serio_device_attrs[] = {
- __ATTR(description, S_IRUGO, serio_show_description, NULL),
- __ATTR(modalias, S_IRUGO, serio_show_modalias, NULL),
- __ATTR(drvctl, S_IWUSR, NULL, serio_rebind_driver),
- __ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode),
- __ATTR_NULL
+static DEVICE_ATTR_RO(type);
+static DEVICE_ATTR_RO(proto);
+static DEVICE_ATTR_RO(id);
+static DEVICE_ATTR_RO(extra);
+static DEVICE_ATTR_RO(modalias);
+static DEVICE_ATTR_WO(drvctl);
+static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL);
+static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode);
+
+static struct attribute *serio_device_id_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_proto.attr,
+ &dev_attr_id.attr,
+ &dev_attr_extra.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_description.attr,
+ &dev_attr_drvctl.attr,
+ &dev_attr_bind_mode.attr,
+ NULL
};
+static struct attribute_group serio_id_attr_group = {
+ .name = "id",
+ .attrs = serio_device_id_attrs,
+};
+
+static const struct attribute_group *serio_device_attr_groups[] = {
+ &serio_id_attr_group,
+ NULL
+};
static void serio_release_port(struct device *dev)
{
@@ -996,7 +995,6 @@ EXPORT_SYMBOL(serio_interrupt);
static struct bus_type serio_bus = {
.name = "serio",
- .dev_attrs = serio_device_attrs,
.drv_groups = serio_driver_groups,
.match = serio_bus_match,
.uevent = serio_uevent,
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index 4b7662a17ae9..dfbcd872f95e 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -25,6 +25,7 @@
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#define DRIVER_NAME "xilinx_ps2"
@@ -235,12 +236,12 @@ static void sxps2_close(struct serio *pserio)
*/
static int xps2_of_probe(struct platform_device *ofdev)
{
- struct resource r_irq; /* Interrupt resources */
struct resource r_mem; /* IO mem resources */
struct xps2data *drvdata;
struct serio *serio;
struct device *dev = &ofdev->dev;
resource_size_t remap_size, phys_addr;
+ unsigned int irq;
int error;
dev_info(dev, "Device Tree Probing \'%s\'\n",
@@ -254,7 +255,8 @@ static int xps2_of_probe(struct platform_device *ofdev)
}
/* Get IRQ for the device */
- if (!of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq)) {
+ irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ if (!irq) {
dev_err(dev, "no IRQ found\n");
return -ENODEV;
}
@@ -267,7 +269,7 @@ static int xps2_of_probe(struct platform_device *ofdev)
}
spin_lock_init(&drvdata->lock);
- drvdata->irq = r_irq.start;
+ drvdata->irq = irq;
drvdata->serio = serio;
drvdata->dev = dev;
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 79b69ea47f74..867e7c33ac55 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -524,9 +524,6 @@ static int wacom_set_device_mode(struct usb_interface *intf, int report_id, int
error = wacom_set_report(intf, WAC_HID_FEATURE_REPORT,
report_id, rep_data, length, 1);
- if (error >= 0)
- error = wacom_get_report(intf, WAC_HID_FEATURE_REPORT,
- report_id, rep_data, length, 1);
} while ((error < 0 || rep_data[1] != mode) && limit++ < WAC_MSG_RETRIES);
kfree(rep_data);
@@ -548,7 +545,7 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
/* MT Tablet PC touch */
return wacom_set_device_mode(intf, 3, 4, 4);
}
- else if (features->type == WACOM_24HDT) {
+ else if (features->type == WACOM_24HDT || features->type == CINTIQ_HYBRID) {
return wacom_set_device_mode(intf, 18, 3, 2);
}
} else if (features->device_type == BTN_TOOL_PEN) {
@@ -719,7 +716,7 @@ static int wacom_led_control(struct wacom *wacom)
return -ENOMEM;
if (wacom->wacom_wac.features.type >= INTUOS5S &&
- wacom->wacom_wac.features.type <= INTUOS5L) {
+ wacom->wacom_wac.features.type <= INTUOSPL) {
/*
* Touch Ring and crop mark LED luminance may take on
* one of four values:
@@ -981,14 +978,20 @@ static int wacom_initialize_leds(struct wacom *wacom)
case INTUOS5S:
case INTUOS5:
case INTUOS5L:
- wacom->led.select[0] = 0;
- wacom->led.select[1] = 0;
- wacom->led.llv = 32;
- wacom->led.hlv = 0;
- wacom->led.img_lum = 0;
-
- error = sysfs_create_group(&wacom->intf->dev.kobj,
- &intuos5_led_attr_group);
+ case INTUOSPS:
+ case INTUOSPM:
+ case INTUOSPL:
+ if (wacom->wacom_wac.features.device_type == BTN_TOOL_PEN) {
+ wacom->led.select[0] = 0;
+ wacom->led.select[1] = 0;
+ wacom->led.llv = 32;
+ wacom->led.hlv = 0;
+ wacom->led.img_lum = 0;
+
+ error = sysfs_create_group(&wacom->intf->dev.kobj,
+ &intuos5_led_attr_group);
+ } else
+ return 0;
break;
default:
@@ -1024,13 +1027,18 @@ static void wacom_destroy_leds(struct wacom *wacom)
case INTUOS5S:
case INTUOS5:
case INTUOS5L:
- sysfs_remove_group(&wacom->intf->dev.kobj,
- &intuos5_led_attr_group);
+ case INTUOSPS:
+ case INTUOSPM:
+ case INTUOSPL:
+ if (wacom->wacom_wac.features.device_type == BTN_TOOL_PEN)
+ sysfs_remove_group(&wacom->intf->dev.kobj,
+ &intuos5_led_attr_group);
break;
}
}
static enum power_supply_property wacom_battery_props[] = {
+ POWER_SUPPLY_PROP_SCOPE,
POWER_SUPPLY_PROP_CAPACITY
};
@@ -1042,6 +1050,9 @@ static int wacom_battery_get_property(struct power_supply *psy,
int ret = 0;
switch (psp) {
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+ break;
case POWER_SUPPLY_PROP_CAPACITY:
val->intval =
wacom->wacom_wac.battery_capacity * 100 / 31;
@@ -1181,34 +1192,47 @@ static void wacom_wireless_work(struct work_struct *work)
wacom_wac1->features =
*((struct wacom_features *)id->driver_info);
wacom_wac1->features.device_type = BTN_TOOL_PEN;
+ snprintf(wacom_wac1->name, WACOM_NAME_MAX, "%s (WL) Pen",
+ wacom_wac1->features.name);
error = wacom_register_input(wacom1);
if (error)
- goto fail1;
+ goto fail;
/* Touch interface */
- wacom_wac2->features =
- *((struct wacom_features *)id->driver_info);
- wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
- wacom_wac2->features.device_type = BTN_TOOL_FINGER;
- wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
- error = wacom_register_input(wacom2);
- if (error)
- goto fail2;
+ if (wacom_wac1->features.touch_max) {
+ wacom_wac2->features =
+ *((struct wacom_features *)id->driver_info);
+ wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
+ wacom_wac2->features.device_type = BTN_TOOL_FINGER;
+ wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
+ if (wacom_wac2->features.touch_max)
+ snprintf(wacom_wac2->name, WACOM_NAME_MAX,
+ "%s (WL) Finger",wacom_wac2->features.name);
+ else
+ snprintf(wacom_wac2->name, WACOM_NAME_MAX,
+ "%s (WL) Pad",wacom_wac2->features.name);
+ error = wacom_register_input(wacom2);
+ if (error)
+ goto fail;
+ }
error = wacom_initialize_battery(wacom);
if (error)
- goto fail3;
+ goto fail;
}
return;
-fail3:
- input_unregister_device(wacom_wac2->input);
- wacom_wac2->input = NULL;
-fail2:
- input_unregister_device(wacom_wac1->input);
- wacom_wac1->input = NULL;
-fail1:
+fail:
+ if (wacom_wac2->input) {
+ input_unregister_device(wacom_wac2->input);
+ wacom_wac2->input = NULL;
+ }
+
+ if (wacom_wac1->input) {
+ input_unregister_device(wacom_wac1->input);
+ wacom_wac1->input = NULL;
+ }
return;
}
@@ -1298,7 +1322,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
* HID descriptor. If this is the touch interface (wMaxPacketSize
* of WACOM_PKGLEN_BBTOUCH3), override the table values.
*/
- if (features->type >= INTUOS5S && features->type <= INTUOS5L) {
+ if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
if (endpoint->wMaxPacketSize == WACOM_PKGLEN_BBTOUCH3) {
features->device_type = BTN_TOOL_FINGER;
features->pktlen = WACOM_PKGLEN_BBTOUCH3;
@@ -1325,10 +1349,12 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
struct usb_device *other_dev;
/* Append the device type to the name */
- strlcat(wacom_wac->name,
- features->device_type == BTN_TOOL_PEN ?
- " Pen" : " Finger",
- sizeof(wacom_wac->name));
+ if (features->device_type != BTN_TOOL_FINGER)
+ strlcat(wacom_wac->name, " Pen", WACOM_NAME_MAX);
+ else if (features->touch_max)
+ strlcat(wacom_wac->name, " Finger", WACOM_NAME_MAX);
+ else
+ strlcat(wacom_wac->name, " Pad", WACOM_NAME_MAX);
other_dev = wacom_get_sibling(dev, features->oVid, features->oPid);
if (other_dev == NULL || wacom_get_usbdev_data(other_dev) == NULL)
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index b2aa503c16b1..782c2535f1d8 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -427,6 +427,13 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
(features->type == WACOM_21UX2))
return 1;
+ /* Range Report */
+ if ((data[1] & 0xfe) == 0x20) {
+ input_report_key(input, BTN_TOUCH, 0);
+ input_report_abs(input, ABS_PRESSURE, 0);
+ input_report_abs(input, ABS_DISTANCE, wacom->features.distance_max);
+ }
+
/* Exit report */
if ((data[1] & 0xfe) == 0x80) {
if (features->quirks == WACOM_QUIRK_MULTI_INPUT)
@@ -477,7 +484,7 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
/* general pen packet */
if ((data[1] & 0xb8) == 0xa0) {
t = (data[6] << 2) | ((data[7] >> 6) & 3);
- if (features->type >= INTUOS4S && features->type <= WACOM_24HD) {
+ if (features->type >= INTUOS4S && features->type <= CINTIQ_HYBRID) {
t = (t << 1) | (data[1] & 1);
}
input_report_abs(input, ABS_PRESSURE, t);
@@ -621,14 +628,30 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
} else {
input_report_abs(input, ABS_MISC, 0);
}
- } else if (features->type >= INTUOS5S && features->type <= INTUOS5L) {
+ } else if (features->type == CINTIQ_HYBRID) {
+ /*
+ * Do not send hardware buttons under Android. They
+ * are already sent to the system through GPIO (and
+ * have different meaning).
+ */
+ input_report_key(input, BTN_1, (data[4] & 0x01));
+ input_report_key(input, BTN_2, (data[4] & 0x02));
+ input_report_key(input, BTN_3, (data[4] & 0x04));
+ input_report_key(input, BTN_4, (data[4] & 0x08));
+
+ input_report_key(input, BTN_5, (data[4] & 0x10)); /* Right */
+ input_report_key(input, BTN_6, (data[4] & 0x20)); /* Up */
+ input_report_key(input, BTN_7, (data[4] & 0x40)); /* Left */
+ input_report_key(input, BTN_8, (data[4] & 0x80)); /* Down */
+ input_report_key(input, BTN_0, (data[3] & 0x01)); /* Center */
+ } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
int i;
/* Touch ring mode switch has no capacitive sensor */
input_report_key(input, BTN_0, (data[3] & 0x01));
/*
- * ExpressKeys on Intuos5 have a capacitive sensor in
+ * ExpressKeys on Intuos5/Intuos Pro have a capacitive sensor in
* addition to the mechanical switch. Switch data is
* stored in data[4], capacitive data in data[5].
*/
@@ -716,7 +739,9 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
features->type == INTUOS4 ||
features->type == INTUOS4S ||
features->type == INTUOS5 ||
- features->type == INTUOS5S)) {
+ features->type == INTUOS5S ||
+ features->type == INTUOSPM ||
+ features->type == INTUOSPS)) {
return 0;
}
@@ -769,8 +794,7 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
} else if (wacom->tool[idx] == BTN_TOOL_MOUSE) {
/* I4 mouse */
- if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
- (features->type >= INTUOS5S && features->type <= INTUOS5L)) {
+ if (features->type >= INTUOS4S && features->type <= INTUOSPL) {
input_report_key(input, BTN_LEFT, data[6] & 0x01);
input_report_key(input, BTN_MIDDLE, data[6] & 0x02);
input_report_key(input, BTN_RIGHT, data[6] & 0x04);
@@ -797,7 +821,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
}
}
} else if ((features->type < INTUOS3S || features->type == INTUOS3L ||
- features->type == INTUOS4L || features->type == INTUOS5L) &&
+ features->type == INTUOS4L || features->type == INTUOS5L ||
+ features->type == INTUOSPL) &&
wacom->tool[idx] == BTN_TOOL_LENS) {
/* Lens cursor packets */
input_report_key(input, BTN_LEFT, data[8] & 0x01);
@@ -1107,6 +1132,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
{
+ struct wacom_features *features = &wacom->features;
struct input_dev *input = wacom->input;
bool touch = data[1] & 0x80;
int slot = input_mt_get_slot_by_key(input, data[0]);
@@ -1122,14 +1148,23 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
if (touch) {
int x = (data[2] << 4) | (data[4] >> 4);
int y = (data[3] << 4) | (data[4] & 0x0f);
- int a = data[5];
+ int width, height;
- // "a" is a scaled-down area which we assume is roughly
- // circular and which can be described as: a=(pi*r^2)/C.
- int x_res = input_abs_get_res(input, ABS_X);
- int y_res = input_abs_get_res(input, ABS_Y);
- int width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
- int height = width * y_res / x_res;
+ if (features->type >= INTUOSPS && features->type <= INTUOSPL) {
+ width = data[5];
+ height = data[6];
+ } else {
+ /*
+ * "a" is a scaled-down area which we assume is
+ * roughly circular and which can be described as:
+ * a=(pi*r^2)/C.
+ */
+ int a = data[5];
+ int x_res = input_abs_get_res(input, ABS_X);
+ int y_res = input_abs_get_res(input, ABS_Y);
+ width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
+ height = width * y_res / x_res;
+ }
input_report_abs(input, ABS_MT_POSITION_X, x);
input_report_abs(input, ABS_MT_POSITION_Y, y);
@@ -1327,6 +1362,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
case WACOM_22HD:
case WACOM_24HD:
case DTK:
+ case CINTIQ_HYBRID:
sync = wacom_intuos_irq(wacom_wac);
break;
@@ -1337,6 +1373,9 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
case INTUOS5S:
case INTUOS5:
case INTUOS5L:
+ case INTUOSPS:
+ case INTUOSPM:
+ case INTUOSPL:
if (len == WACOM_PKGLEN_BBTOUCH3)
sync = wacom_bpt3_touch(wacom_wac);
else
@@ -1420,7 +1459,7 @@ void wacom_setup_device_quirks(struct wacom_features *features)
/* these device have multiple inputs */
if (features->type >= WIRELESS ||
- (features->type >= INTUOS5S && features->type <= INTUOS5L) ||
+ (features->type >= INTUOS5S && features->type <= INTUOSPL) ||
(features->oVid && features->oPid))
features->quirks |= WACOM_QUIRK_MULTI_INPUT;
@@ -1627,6 +1666,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
case INTUOS5:
case INTUOS5L:
+ case INTUOSPM:
+ case INTUOSPL:
if (features->device_type == BTN_TOOL_PEN) {
__set_bit(BTN_7, input_dev->keybit);
__set_bit(BTN_8, input_dev->keybit);
@@ -1634,6 +1675,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
/* fall through */
case INTUOS5S:
+ case INTUOSPS:
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
if (features->device_type == BTN_TOOL_PEN) {
@@ -1765,6 +1807,24 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
0, 0);
}
break;
+
+ case CINTIQ_HYBRID:
+ __set_bit(BTN_1, input_dev->keybit);
+ __set_bit(BTN_2, input_dev->keybit);
+ __set_bit(BTN_3, input_dev->keybit);
+ __set_bit(BTN_4, input_dev->keybit);
+
+ __set_bit(BTN_5, input_dev->keybit);
+ __set_bit(BTN_6, input_dev->keybit);
+ __set_bit(BTN_7, input_dev->keybit);
+ __set_bit(BTN_8, input_dev->keybit);
+ __set_bit(BTN_0, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+
+ wacom_setup_cintiq(wacom_wac);
+ break;
}
return 0;
}
@@ -1952,6 +2012,18 @@ static const struct wacom_features wacom_features_0x29 =
static const struct wacom_features wacom_features_0x2A =
{ "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047,
63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0x314 =
+ { "Wacom Intuos Pro S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047,
+ 63, INTUOSPS, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ .touch_max = 16 };
+static const struct wacom_features wacom_features_0x315 =
+ { "Wacom Intuos Pro M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047,
+ 63, INTUOSPM, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ .touch_max = 16 };
+static const struct wacom_features wacom_features_0x317 =
+ { "Wacom Intuos Pro L", WACOM_PKGLEN_INTUOS, 65024, 40640, 2047,
+ 63, INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ .touch_max = 16 };
static const struct wacom_features wacom_features_0xF4 =
{ "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
@@ -2054,6 +2126,12 @@ static const struct wacom_features wacom_features_0x101 =
static const struct wacom_features wacom_features_0x10D =
{ "Wacom ISDv4 10D", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x10E =
+ { "Wacom ISDv4 10E", WACOM_PKGLEN_MTTPC, 27760, 15694, 255,
+ 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x10F =
+ { "Wacom ISDv4 10F", WACOM_PKGLEN_MTTPC, 27760, 15694, 255,
+ 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x4001 =
{ "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2125,6 +2203,13 @@ static const struct wacom_features wacom_features_0x301 =
static const struct wacom_features wacom_features_0x6004 =
{ "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x0307 =
+ { "Wacom ISDv5 307", WACOM_PKGLEN_INTUOS, 59552, 33848, 2047,
+ 63, CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x309 };
+static const struct wacom_features wacom_features_0x0309 =
+ { "Wacom ISDv5 309", .type = WACOM_24HDT, /* Touch */
+ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10 };
#define USB_DEVICE_WACOM(prod) \
USB_DEVICE(USB_VENDOR_ID_WACOM, prod), \
@@ -2248,15 +2333,22 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x100) },
{ USB_DEVICE_WACOM(0x101) },
{ USB_DEVICE_WACOM(0x10D) },
+ { USB_DEVICE_WACOM(0x10E) },
+ { USB_DEVICE_WACOM(0x10F) },
{ USB_DEVICE_WACOM(0x300) },
{ USB_DEVICE_WACOM(0x301) },
{ USB_DEVICE_WACOM(0x304) },
+ { USB_DEVICE_DETAILED(0x314, USB_CLASS_HID, 0, 0) },
+ { USB_DEVICE_DETAILED(0x315, USB_CLASS_HID, 0, 0) },
+ { USB_DEVICE_DETAILED(0x317, USB_CLASS_HID, 0, 0) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_WACOM(0xF4) },
{ USB_DEVICE_WACOM(0xF8) },
{ USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) },
{ USB_DEVICE_WACOM(0xFA) },
+ { USB_DEVICE_WACOM(0x0307) },
+ { USB_DEVICE_DETAILED(0x0309, USB_CLASS_HID, 0, 0) },
{ USB_DEVICE_LENOVO(0x6004) },
{ }
};
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index dfc9e08e7f70..fd23a3790605 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -14,6 +14,8 @@
/* maximum packet length for USB devices */
#define WACOM_PKGLEN_MAX 64
+#define WACOM_NAME_MAX 64
+
/* packet length for individual models */
#define WACOM_PKGLEN_PENPRTN 7
#define WACOM_PKGLEN_GRAPHIRE 8
@@ -76,10 +78,14 @@ enum {
INTUOS5S,
INTUOS5,
INTUOS5L,
+ INTUOSPS,
+ INTUOSPM,
+ INTUOSPL,
WACOM_21UX2,
WACOM_22HD,
DTK,
WACOM_24HD,
+ CINTIQ_HYBRID,
CINTIQ,
WACOM_BEE,
WACOM_13HD,
@@ -126,7 +132,7 @@ struct wacom_shared {
};
struct wacom_wac {
- char name[64];
+ char name[WACOM_NAME_MAX];
unsigned char *data;
int tool[2];
int id[2];
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index e09ec67957a3..00d1e547b211 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -919,4 +919,17 @@ config TOUCHSCREEN_TPS6507X
To compile this driver as a module, choose M here: the
module will be called tps6507x_ts.
+config TOUCHSCREEN_ZFORCE
+ tristate "Neonode zForce infrared touchscreens"
+ depends on I2C
+ depends on GPIOLIB
+ help
+ Say Y here if you have a touchscreen using the zforce
+ infraread technology from Neonode.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called zforce_ts.
+
endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index f5216c1bf53e..7587883b8d38 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -75,3 +75,4 @@ obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o
obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE) += zylonite-wm97xx.o
obj-$(CONFIG_TOUCHSCREEN_W90X900) += w90p910_ts.o
obj-$(CONFIG_TOUCHSCREEN_TPS6507X) += tps6507x-ts.o
+obj-$(CONFIG_TOUCHSCREEN_ZFORCE) += zforce_ts.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index f3a174a83c82..69834dd3c313 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -806,7 +806,6 @@ err_free_irq:
err_free_mem:
input_free_device(input_dev);
kfree(ts);
- spi_set_drvdata(spi, NULL);
return err;
}
@@ -823,7 +822,6 @@ static int ad7877_remove(struct spi_device *spi)
kfree(ts);
dev_dbg(&spi->dev, "unregistered touchscreen\n");
- spi_set_drvdata(spi, NULL);
return 0;
}
diff --git a/drivers/input/touchscreen/ad7879-spi.c b/drivers/input/touchscreen/ad7879-spi.c
index 606da5bd6115..1a7b1143536e 100644
--- a/drivers/input/touchscreen/ad7879-spi.c
+++ b/drivers/input/touchscreen/ad7879-spi.c
@@ -142,7 +142,6 @@ static int ad7879_spi_remove(struct spi_device *spi)
struct ad7879 *ts = spi_get_drvdata(spi);
ad7879_remove(ts);
- spi_set_drvdata(spi, NULL);
return 0;
}
diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
index d038575f49db..42d830efa316 100644
--- a/drivers/input/touchscreen/cyttsp4_core.c
+++ b/drivers/input/touchscreen/cyttsp4_core.c
@@ -2113,7 +2113,6 @@ error_startup:
error_request_irq:
if (cd->cpdata->init)
cd->cpdata->init(cd->cpdata, 0, dev);
- dev_set_drvdata(dev, NULL);
error_free_xfer:
kfree(cd->xfer_buf);
error_free_cd:
@@ -2151,7 +2150,6 @@ int cyttsp4_remove(struct cyttsp4 *cd)
free_irq(cd->irq, cd);
if (cd->cpdata->init)
cd->cpdata->init(cd->cpdata, 0, dev);
- dev_set_drvdata(dev, NULL);
cyttsp4_free_si_ptrs(cd);
kfree(cd);
return 0;
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index ef5fcb0945e9..054d22583248 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -273,7 +273,7 @@ static struct i2c_driver egalax_ts_driver = {
.name = "egalax_ts",
.owner = THIS_MODULE,
.pm = &egalax_ts_pm_ops,
- .of_match_table = of_match_ptr(egalax_ts_dt_ids),
+ .of_match_table = egalax_ts_dt_ids,
},
.id_table = egalax_ts_id,
.probe = egalax_ts_probe,
diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c
index 66500852341b..92e2243fb77d 100644
--- a/drivers/input/touchscreen/htcpen.c
+++ b/drivers/input/touchscreen/htcpen.c
@@ -186,8 +186,6 @@ static int htcpen_isa_remove(struct device *dev, unsigned int id)
release_region(HTCPEN_PORT_INIT, 1);
release_region(HTCPEN_PORT_IRQ_CLEAR, 1);
- dev_set_drvdata(dev, NULL);
-
return 0;
}
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 1740a2496371..2f03b2f289dd 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -24,6 +24,7 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index e1c5300cacfc..df9b24f7e2cb 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -505,7 +505,7 @@ static struct platform_driver ti_tsc_driver = {
.name = "TI-am335x-tsc",
.owner = THIS_MODULE,
.pm = TITSC_PM_OPS,
- .of_match_table = of_match_ptr(ti_tsc_dt_ids),
+ .of_match_table = ti_tsc_dt_ids,
},
};
module_platform_driver(ti_tsc_driver);
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index 7213e8b07e79..811353353917 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -678,7 +678,6 @@ static int tsc2005_probe(struct spi_device *spi)
err_remove_sysfs:
sysfs_remove_group(&spi->dev.kobj, &tsc2005_attr_group);
err_clear_drvdata:
- spi_set_drvdata(spi, NULL);
free_irq(spi->irq, ts);
err_free_mem:
input_free_device(input_dev);
@@ -696,7 +695,6 @@ static int tsc2005_remove(struct spi_device *spi)
input_unregister_device(ts->idev);
kfree(ts);
- spi_set_drvdata(spi, NULL);
return 0;
}
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
new file mode 100644
index 000000000000..75762d6ff3ba
--- /dev/null
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -0,0 +1,836 @@
+/*
+ * Copyright (C) 2012-2013 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * based in parts on Nook zforce driver
+ *
+ * Copyright (C) 2010 Barnes & Noble, Inc.
+ * Author: Pieter Truter<ptruter@intrinsyc.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/hrtimer.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/input/mt.h>
+#include <linux/platform_data/zforce_ts.h>
+
+#define WAIT_TIMEOUT msecs_to_jiffies(1000)
+
+#define FRAME_START 0xee
+
+/* Offsets of the different parts of the payload the controller sends */
+#define PAYLOAD_HEADER 0
+#define PAYLOAD_LENGTH 1
+#define PAYLOAD_BODY 2
+
+/* Response offsets */
+#define RESPONSE_ID 0
+#define RESPONSE_DATA 1
+
+/* Commands */
+#define COMMAND_DEACTIVATE 0x00
+#define COMMAND_INITIALIZE 0x01
+#define COMMAND_RESOLUTION 0x02
+#define COMMAND_SETCONFIG 0x03
+#define COMMAND_DATAREQUEST 0x04
+#define COMMAND_SCANFREQ 0x08
+#define COMMAND_STATUS 0X1e
+
+/*
+ * Responses the controller sends as a result of
+ * command requests
+ */
+#define RESPONSE_DEACTIVATE 0x00
+#define RESPONSE_INITIALIZE 0x01
+#define RESPONSE_RESOLUTION 0x02
+#define RESPONSE_SETCONFIG 0x03
+#define RESPONSE_SCANFREQ 0x08
+#define RESPONSE_STATUS 0X1e
+
+/*
+ * Notifications are send by the touch controller without
+ * being requested by the driver and include for example
+ * touch indications
+ */
+#define NOTIFICATION_TOUCH 0x04
+#define NOTIFICATION_BOOTCOMPLETE 0x07
+#define NOTIFICATION_OVERRUN 0x25
+#define NOTIFICATION_PROXIMITY 0x26
+#define NOTIFICATION_INVALID_COMMAND 0xfe
+
+#define ZFORCE_REPORT_POINTS 2
+#define ZFORCE_MAX_AREA 0xff
+
+#define STATE_DOWN 0
+#define STATE_MOVE 1
+#define STATE_UP 2
+
+#define SETCONFIG_DUALTOUCH (1 << 0)
+
+struct zforce_point {
+ int coord_x;
+ int coord_y;
+ int state;
+ int id;
+ int area_major;
+ int area_minor;
+ int orientation;
+ int pressure;
+ int prblty;
+};
+
+/*
+ * @client the i2c_client
+ * @input the input device
+ * @suspending in the process of going to suspend (don't emit wakeup
+ * events for commands executed to suspend the device)
+ * @suspended device suspended
+ * @access_mutex serialize i2c-access, to keep multipart reads together
+ * @command_done completion to wait for the command result
+ * @command_mutex serialize commands send to the ic
+ * @command_waiting the id of the command that that is currently waiting
+ * for a result
+ * @command_result returned result of the command
+ */
+struct zforce_ts {
+ struct i2c_client *client;
+ struct input_dev *input;
+ const struct zforce_ts_platdata *pdata;
+ char phys[32];
+
+ bool suspending;
+ bool suspended;
+ bool boot_complete;
+
+ /* Firmware version information */
+ u16 version_major;
+ u16 version_minor;
+ u16 version_build;
+ u16 version_rev;
+
+ struct mutex access_mutex;
+
+ struct completion command_done;
+ struct mutex command_mutex;
+ int command_waiting;
+ int command_result;
+};
+
+static int zforce_command(struct zforce_ts *ts, u8 cmd)
+{
+ struct i2c_client *client = ts->client;
+ char buf[3];
+ int ret;
+
+ dev_dbg(&client->dev, "%s: 0x%x\n", __func__, cmd);
+
+ buf[0] = FRAME_START;
+ buf[1] = 1; /* data size, command only */
+ buf[2] = cmd;
+
+ mutex_lock(&ts->access_mutex);
+ ret = i2c_master_send(client, &buf[0], ARRAY_SIZE(buf));
+ mutex_unlock(&ts->access_mutex);
+ if (ret < 0) {
+ dev_err(&client->dev, "i2c send data request error: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zforce_send_wait(struct zforce_ts *ts, const char *buf, int len)
+{
+ struct i2c_client *client = ts->client;
+ int ret;
+
+ ret = mutex_trylock(&ts->command_mutex);
+ if (!ret) {
+ dev_err(&client->dev, "already waiting for a command\n");
+ return -EBUSY;
+ }
+
+ dev_dbg(&client->dev, "sending %d bytes for command 0x%x\n",
+ buf[1], buf[2]);
+
+ ts->command_waiting = buf[2];
+
+ mutex_lock(&ts->access_mutex);
+ ret = i2c_master_send(client, buf, len);
+ mutex_unlock(&ts->access_mutex);
+ if (ret < 0) {
+ dev_err(&client->dev, "i2c send data request error: %d\n", ret);
+ goto unlock;
+ }
+
+ dev_dbg(&client->dev, "waiting for result for command 0x%x\n", buf[2]);
+
+ if (wait_for_completion_timeout(&ts->command_done, WAIT_TIMEOUT) == 0) {
+ ret = -ETIME;
+ goto unlock;
+ }
+
+ ret = ts->command_result;
+
+unlock:
+ mutex_unlock(&ts->command_mutex);
+ return ret;
+}
+
+static int zforce_command_wait(struct zforce_ts *ts, u8 cmd)
+{
+ struct i2c_client *client = ts->client;
+ char buf[3];
+ int ret;
+
+ dev_dbg(&client->dev, "%s: 0x%x\n", __func__, cmd);
+
+ buf[0] = FRAME_START;
+ buf[1] = 1; /* data size, command only */
+ buf[2] = cmd;
+
+ ret = zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf));
+ if (ret < 0) {
+ dev_err(&client->dev, "i2c send data request error: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zforce_resolution(struct zforce_ts *ts, u16 x, u16 y)
+{
+ struct i2c_client *client = ts->client;
+ char buf[7] = { FRAME_START, 5, COMMAND_RESOLUTION,
+ (x & 0xff), ((x >> 8) & 0xff),
+ (y & 0xff), ((y >> 8) & 0xff) };
+
+ dev_dbg(&client->dev, "set resolution to (%d,%d)\n", x, y);
+
+ return zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf));
+}
+
+static int zforce_scan_frequency(struct zforce_ts *ts, u16 idle, u16 finger,
+ u16 stylus)
+{
+ struct i2c_client *client = ts->client;
+ char buf[9] = { FRAME_START, 7, COMMAND_SCANFREQ,
+ (idle & 0xff), ((idle >> 8) & 0xff),
+ (finger & 0xff), ((finger >> 8) & 0xff),
+ (stylus & 0xff), ((stylus >> 8) & 0xff) };
+
+ dev_dbg(&client->dev, "set scan frequency to (idle: %d, finger: %d, stylus: %d)\n",
+ idle, finger, stylus);
+
+ return zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf));
+}
+
+static int zforce_setconfig(struct zforce_ts *ts, char b1)
+{
+ struct i2c_client *client = ts->client;
+ char buf[7] = { FRAME_START, 5, COMMAND_SETCONFIG,
+ b1, 0, 0, 0 };
+
+ dev_dbg(&client->dev, "set config to (%d)\n", b1);
+
+ return zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf));
+}
+
+static int zforce_start(struct zforce_ts *ts)
+{
+ struct i2c_client *client = ts->client;
+ const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev);
+ int ret;
+
+ dev_dbg(&client->dev, "starting device\n");
+
+ ret = zforce_command_wait(ts, COMMAND_INITIALIZE);
+ if (ret) {
+ dev_err(&client->dev, "Unable to initialize, %d\n", ret);
+ return ret;
+ }
+
+ ret = zforce_resolution(ts, pdata->x_max, pdata->y_max);
+ if (ret) {
+ dev_err(&client->dev, "Unable to set resolution, %d\n", ret);
+ goto error;
+ }
+
+ ret = zforce_scan_frequency(ts, 10, 50, 50);
+ if (ret) {
+ dev_err(&client->dev, "Unable to set scan frequency, %d\n",
+ ret);
+ goto error;
+ }
+
+ if (zforce_setconfig(ts, SETCONFIG_DUALTOUCH)) {
+ dev_err(&client->dev, "Unable to set config\n");
+ goto error;
+ }
+
+ /* start sending touch events */
+ ret = zforce_command(ts, COMMAND_DATAREQUEST);
+ if (ret) {
+ dev_err(&client->dev, "Unable to request data\n");
+ goto error;
+ }
+
+ /*
+ * Per NN, initial cal. take max. of 200msec.
+ * Allow time to complete this calibration
+ */
+ msleep(200);
+
+ return 0;
+
+error:
+ zforce_command_wait(ts, COMMAND_DEACTIVATE);
+ return ret;
+}
+
+static int zforce_stop(struct zforce_ts *ts)
+{
+ struct i2c_client *client = ts->client;
+ int ret;
+
+ dev_dbg(&client->dev, "stopping device\n");
+
+ /* Deactivates touch sensing and puts the device into sleep. */
+ ret = zforce_command_wait(ts, COMMAND_DEACTIVATE);
+ if (ret != 0) {
+ dev_err(&client->dev, "could not deactivate device, %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
+{
+ struct i2c_client *client = ts->client;
+ const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev);
+ struct zforce_point point;
+ int count, i, num = 0;
+
+ count = payload[0];
+ if (count > ZFORCE_REPORT_POINTS) {
+ dev_warn(&client->dev, "to many coordinates %d, expected max %d\n",
+ count, ZFORCE_REPORT_POINTS);
+ count = ZFORCE_REPORT_POINTS;
+ }
+
+ for (i = 0; i < count; i++) {
+ point.coord_x =
+ payload[9 * i + 2] << 8 | payload[9 * i + 1];
+ point.coord_y =
+ payload[9 * i + 4] << 8 | payload[9 * i + 3];
+
+ if (point.coord_x > pdata->x_max ||
+ point.coord_y > pdata->y_max) {
+ dev_warn(&client->dev, "coordinates (%d,%d) invalid\n",
+ point.coord_x, point.coord_y);
+ point.coord_x = point.coord_y = 0;
+ }
+
+ point.state = payload[9 * i + 5] & 0x03;
+ point.id = (payload[9 * i + 5] & 0xfc) >> 2;
+
+ /* determine touch major, minor and orientation */
+ point.area_major = max(payload[9 * i + 6],
+ payload[9 * i + 7]);
+ point.area_minor = min(payload[9 * i + 6],
+ payload[9 * i + 7]);
+ point.orientation = payload[9 * i + 6] > payload[9 * i + 7];
+
+ point.pressure = payload[9 * i + 8];
+ point.prblty = payload[9 * i + 9];
+
+ dev_dbg(&client->dev,
+ "point %d/%d: state %d, id %d, pressure %d, prblty %d, x %d, y %d, amajor %d, aminor %d, ori %d\n",
+ i, count, point.state, point.id,
+ point.pressure, point.prblty,
+ point.coord_x, point.coord_y,
+ point.area_major, point.area_minor,
+ point.orientation);
+
+ /* the zforce id starts with "1", so needs to be decreased */
+ input_mt_slot(ts->input, point.id - 1);
+
+ input_mt_report_slot_state(ts->input, MT_TOOL_FINGER,
+ point.state != STATE_UP);
+
+ if (point.state != STATE_UP) {
+ input_report_abs(ts->input, ABS_MT_POSITION_X,
+ point.coord_x);
+ input_report_abs(ts->input, ABS_MT_POSITION_Y,
+ point.coord_y);
+ input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR,
+ point.area_major);
+ input_report_abs(ts->input, ABS_MT_TOUCH_MINOR,
+ point.area_minor);
+ input_report_abs(ts->input, ABS_MT_ORIENTATION,
+ point.orientation);
+ num++;
+ }
+ }
+
+ input_mt_sync_frame(ts->input);
+
+ input_mt_report_finger_count(ts->input, num);
+
+ input_sync(ts->input);
+
+ return 0;
+}
+
+static int zforce_read_packet(struct zforce_ts *ts, u8 *buf)
+{
+ struct i2c_client *client = ts->client;
+ int ret;
+
+ mutex_lock(&ts->access_mutex);
+
+ /* read 2 byte message header */
+ ret = i2c_master_recv(client, buf, 2);
+ if (ret < 0) {
+ dev_err(&client->dev, "error reading header: %d\n", ret);
+ goto unlock;
+ }
+
+ if (buf[PAYLOAD_HEADER] != FRAME_START) {
+ dev_err(&client->dev, "invalid frame start: %d\n", buf[0]);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ if (buf[PAYLOAD_LENGTH] <= 0 || buf[PAYLOAD_LENGTH] > 255) {
+ dev_err(&client->dev, "invalid payload length: %d\n",
+ buf[PAYLOAD_LENGTH]);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /* read the message */
+ ret = i2c_master_recv(client, &buf[PAYLOAD_BODY], buf[PAYLOAD_LENGTH]);
+ if (ret < 0) {
+ dev_err(&client->dev, "error reading payload: %d\n", ret);
+ goto unlock;
+ }
+
+ dev_dbg(&client->dev, "read %d bytes for response command 0x%x\n",
+ buf[PAYLOAD_LENGTH], buf[PAYLOAD_BODY]);
+
+unlock:
+ mutex_unlock(&ts->access_mutex);
+ return ret;
+}
+
+static void zforce_complete(struct zforce_ts *ts, int cmd, int result)
+{
+ struct i2c_client *client = ts->client;
+
+ if (ts->command_waiting == cmd) {
+ dev_dbg(&client->dev, "completing command 0x%x\n", cmd);
+ ts->command_result = result;
+ complete(&ts->command_done);
+ } else {
+ dev_dbg(&client->dev, "command %d not for us\n", cmd);
+ }
+}
+
+static irqreturn_t zforce_interrupt(int irq, void *dev_id)
+{
+ struct zforce_ts *ts = dev_id;
+ struct i2c_client *client = ts->client;
+ const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev);
+ int ret;
+ u8 payload_buffer[512];
+ u8 *payload;
+
+ /*
+ * When suspended, emit a wakeup signal if necessary and return.
+ * Due to the level-interrupt we will get re-triggered later.
+ */
+ if (ts->suspended) {
+ if (device_may_wakeup(&client->dev))
+ pm_wakeup_event(&client->dev, 500);
+ msleep(20);
+ return IRQ_HANDLED;
+ }
+
+ dev_dbg(&client->dev, "handling interrupt\n");
+
+ /* Don't emit wakeup events from commands run by zforce_suspend */
+ if (!ts->suspending && device_may_wakeup(&client->dev))
+ pm_stay_awake(&client->dev);
+
+ while (!gpio_get_value(pdata->gpio_int)) {
+ ret = zforce_read_packet(ts, payload_buffer);
+ if (ret < 0) {
+ dev_err(&client->dev, "could not read packet, ret: %d\n",
+ ret);
+ break;
+ }
+
+ payload = &payload_buffer[PAYLOAD_BODY];
+
+ switch (payload[RESPONSE_ID]) {
+ case NOTIFICATION_TOUCH:
+ /*
+ * Always report touch-events received while
+ * suspending, when being a wakeup source
+ */
+ if (ts->suspending && device_may_wakeup(&client->dev))
+ pm_wakeup_event(&client->dev, 500);
+ zforce_touch_event(ts, &payload[RESPONSE_DATA]);
+ break;
+
+ case NOTIFICATION_BOOTCOMPLETE:
+ ts->boot_complete = payload[RESPONSE_DATA];
+ zforce_complete(ts, payload[RESPONSE_ID], 0);
+ break;
+
+ case RESPONSE_INITIALIZE:
+ case RESPONSE_DEACTIVATE:
+ case RESPONSE_SETCONFIG:
+ case RESPONSE_RESOLUTION:
+ case RESPONSE_SCANFREQ:
+ zforce_complete(ts, payload[RESPONSE_ID],
+ payload[RESPONSE_DATA]);
+ break;
+
+ case RESPONSE_STATUS:
+ /*
+ * Version Payload Results
+ * [2:major] [2:minor] [2:build] [2:rev]
+ */
+ ts->version_major = (payload[RESPONSE_DATA + 1] << 8) |
+ payload[RESPONSE_DATA];
+ ts->version_minor = (payload[RESPONSE_DATA + 3] << 8) |
+ payload[RESPONSE_DATA + 2];
+ ts->version_build = (payload[RESPONSE_DATA + 5] << 8) |
+ payload[RESPONSE_DATA + 4];
+ ts->version_rev = (payload[RESPONSE_DATA + 7] << 8) |
+ payload[RESPONSE_DATA + 6];
+ dev_dbg(&ts->client->dev, "Firmware Version %04x:%04x %04x:%04x\n",
+ ts->version_major, ts->version_minor,
+ ts->version_build, ts->version_rev);
+
+ zforce_complete(ts, payload[RESPONSE_ID], 0);
+ break;
+
+ case NOTIFICATION_INVALID_COMMAND:
+ dev_err(&ts->client->dev, "invalid command: 0x%x\n",
+ payload[RESPONSE_DATA]);
+ break;
+
+ default:
+ dev_err(&ts->client->dev, "unrecognized response id: 0x%x\n",
+ payload[RESPONSE_ID]);
+ break;
+ }
+ }
+
+ if (!ts->suspending && device_may_wakeup(&client->dev))
+ pm_relax(&client->dev);
+
+ dev_dbg(&client->dev, "finished interrupt\n");
+
+ return IRQ_HANDLED;
+}
+
+static int zforce_input_open(struct input_dev *dev)
+{
+ struct zforce_ts *ts = input_get_drvdata(dev);
+ int ret;
+
+ ret = zforce_start(ts);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void zforce_input_close(struct input_dev *dev)
+{
+ struct zforce_ts *ts = input_get_drvdata(dev);
+ struct i2c_client *client = ts->client;
+ int ret;
+
+ ret = zforce_stop(ts);
+ if (ret)
+ dev_warn(&client->dev, "stopping zforce failed\n");
+
+ return;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int zforce_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct zforce_ts *ts = i2c_get_clientdata(client);
+ struct input_dev *input = ts->input;
+ int ret = 0;
+
+ mutex_lock(&input->mutex);
+ ts->suspending = true;
+
+ /*
+ * When configured as a wakeup source device should always wake
+ * the system, therefore start device if necessary.
+ */
+ if (device_may_wakeup(&client->dev)) {
+ dev_dbg(&client->dev, "suspend while being a wakeup source\n");
+
+ /* Need to start device, if not open, to be a wakeup source. */
+ if (!input->users) {
+ ret = zforce_start(ts);
+ if (ret)
+ goto unlock;
+ }
+
+ enable_irq_wake(client->irq);
+ } else if (input->users) {
+ dev_dbg(&client->dev, "suspend without being a wakeup source\n");
+
+ ret = zforce_stop(ts);
+ if (ret)
+ goto unlock;
+
+ disable_irq(client->irq);
+ }
+
+ ts->suspended = true;
+
+unlock:
+ ts->suspending = false;
+ mutex_unlock(&input->mutex);
+
+ return ret;
+}
+
+static int zforce_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct zforce_ts *ts = i2c_get_clientdata(client);
+ struct input_dev *input = ts->input;
+ int ret = 0;
+
+ mutex_lock(&input->mutex);
+
+ ts->suspended = false;
+
+ if (device_may_wakeup(&client->dev)) {
+ dev_dbg(&client->dev, "resume from being a wakeup source\n");
+
+ disable_irq_wake(client->irq);
+
+ /* need to stop device if it was not open on suspend */
+ if (!input->users) {
+ ret = zforce_stop(ts);
+ if (ret)
+ goto unlock;
+ }
+ } else if (input->users) {
+ dev_dbg(&client->dev, "resume without being a wakeup source\n");
+
+ enable_irq(client->irq);
+
+ ret = zforce_start(ts);
+ if (ret < 0)
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&input->mutex);
+
+ return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(zforce_pm_ops, zforce_suspend, zforce_resume);
+
+static void zforce_reset(void *data)
+{
+ struct zforce_ts *ts = data;
+
+ gpio_set_value(ts->pdata->gpio_rst, 0);
+}
+
+static int zforce_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev);
+ struct zforce_ts *ts;
+ struct input_dev *input_dev;
+ int ret;
+
+ if (!pdata)
+ return -EINVAL;
+
+ ts = devm_kzalloc(&client->dev, sizeof(struct zforce_ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ ret = devm_gpio_request_one(&client->dev, pdata->gpio_int, GPIOF_IN,
+ "zforce_ts_int");
+ if (ret) {
+ dev_err(&client->dev, "request of gpio %d failed, %d\n",
+ pdata->gpio_int, ret);
+ return ret;
+ }
+
+ ret = devm_gpio_request_one(&client->dev, pdata->gpio_rst,
+ GPIOF_OUT_INIT_LOW, "zforce_ts_rst");
+ if (ret) {
+ dev_err(&client->dev, "request of gpio %d failed, %d\n",
+ pdata->gpio_rst, ret);
+ return ret;
+ }
+
+ ret = devm_add_action(&client->dev, zforce_reset, ts);
+ if (ret) {
+ dev_err(&client->dev, "failed to register reset action, %d\n",
+ ret);
+ return ret;
+ }
+
+ snprintf(ts->phys, sizeof(ts->phys),
+ "%s/input0", dev_name(&client->dev));
+
+ input_dev = devm_input_allocate_device(&client->dev);
+ if (!input_dev) {
+ dev_err(&client->dev, "could not allocate input device\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&ts->access_mutex);
+ mutex_init(&ts->command_mutex);
+
+ ts->pdata = pdata;
+ ts->client = client;
+ ts->input = input_dev;
+
+ input_dev->name = "Neonode zForce touchscreen";
+ input_dev->phys = ts->phys;
+ input_dev->id.bustype = BUS_I2C;
+
+ input_dev->open = zforce_input_open;
+ input_dev->close = zforce_input_close;
+
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(EV_SYN, input_dev->evbit);
+ __set_bit(EV_ABS, input_dev->evbit);
+
+ /* For multi touch */
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0,
+ pdata->x_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0,
+ pdata->y_max, 0, 0);
+
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0,
+ ZFORCE_MAX_AREA, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0,
+ ZFORCE_MAX_AREA, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+ input_mt_init_slots(input_dev, ZFORCE_REPORT_POINTS, INPUT_MT_DIRECT);
+
+ input_set_drvdata(ts->input, ts);
+
+ init_completion(&ts->command_done);
+
+ /*
+ * The zforce pulls the interrupt low when it has data ready.
+ * After it is triggered the isr thread runs until all the available
+ * packets have been read and the interrupt is high again.
+ * Therefore we can trigger the interrupt anytime it is low and do
+ * not need to limit it to the interrupt edge.
+ */
+ ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ zforce_interrupt,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ input_dev->name, ts);
+ if (ret) {
+ dev_err(&client->dev, "irq %d request failed\n", client->irq);
+ return ret;
+ }
+
+ i2c_set_clientdata(client, ts);
+
+ /* let the controller boot */
+ gpio_set_value(pdata->gpio_rst, 1);
+
+ ts->command_waiting = NOTIFICATION_BOOTCOMPLETE;
+ if (wait_for_completion_timeout(&ts->command_done, WAIT_TIMEOUT) == 0)
+ dev_warn(&client->dev, "bootcomplete timed out\n");
+
+ /* need to start device to get version information */
+ ret = zforce_command_wait(ts, COMMAND_INITIALIZE);
+ if (ret) {
+ dev_err(&client->dev, "unable to initialize, %d\n", ret);
+ return ret;
+ }
+
+ /* this gets the firmware version among other informations */
+ ret = zforce_command_wait(ts, COMMAND_STATUS);
+ if (ret < 0) {
+ dev_err(&client->dev, "couldn't get status, %d\n", ret);
+ zforce_stop(ts);
+ return ret;
+ }
+
+ /* stop device and put it into sleep until it is opened */
+ ret = zforce_stop(ts);
+ if (ret < 0)
+ return ret;
+
+ device_set_wakeup_capable(&client->dev, true);
+
+ ret = input_register_device(input_dev);
+ if (ret) {
+ dev_err(&client->dev, "could not register input device, %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct i2c_device_id zforce_idtable[] = {
+ { "zforce-ts", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, zforce_idtable);
+
+static struct i2c_driver zforce_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "zforce-ts",
+ .pm = &zforce_pm_ops,
+ },
+ .probe = zforce_probe,
+ .id_table = zforce_idtable,
+};
+
+module_i2c_driver(zforce_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("zForce TouchScreen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 14c1f474cf11..5d58bf16e9e3 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_IOMMU_API) += iommu.o
+obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 785675a56a10..900946950230 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -88,7 +88,7 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
pr_warn("Device scope bus [%d] not found\n", scope->bus);
break;
}
- pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
+ pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
if (!pdev) {
/* warning will be printed below */
break;
@@ -99,7 +99,7 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
}
if (!pdev) {
pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
- segment, scope->bus, path->dev, path->fn);
+ segment, scope->bus, path->device, path->function);
*dev = NULL;
return 0;
}
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index f71673dbb23d..bab10b1002fb 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -525,12 +525,13 @@ static int __init intel_irq_remapping_supported(void)
if (disable_irq_remap)
return 0;
if (irq_remap_broken) {
- WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
- "This system BIOS has enabled interrupt remapping\n"
- "on a chipset that contains an erratum making that\n"
- "feature unstable. To maintain system stability\n"
- "interrupt remapping is being disabled. Please\n"
- "contact your BIOS vendor for an update\n");
+ printk(KERN_WARNING
+ "This system BIOS has enabled interrupt remapping\n"
+ "on a chipset that contains an erratum making that\n"
+ "feature unstable. To maintain system stability\n"
+ "interrupt remapping is being disabled. Please\n"
+ "contact your BIOS vendor for an update\n");
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
disable_irq_remap = 1;
return 0;
}
@@ -686,12 +687,12 @@ static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
* Access PCI directly due to the PCI
* subsystem isn't initialized yet.
*/
- bus = read_pci_config_byte(bus, path->dev, path->fn,
+ bus = read_pci_config_byte(bus, path->device, path->function,
PCI_SECONDARY_BUS);
path++;
}
ir_hpet[ir_hpet_num].bus = bus;
- ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
+ ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function);
ir_hpet[ir_hpet_num].iommu = iommu;
ir_hpet[ir_hpet_num].id = scope->enumeration_id;
ir_hpet_num++;
@@ -714,13 +715,13 @@ static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
* Access PCI directly due to the PCI
* subsystem isn't initialized yet.
*/
- bus = read_pci_config_byte(bus, path->dev, path->fn,
+ bus = read_pci_config_byte(bus, path->device, path->function,
PCI_SECONDARY_BUS);
path++;
}
ir_ioapic[ir_ioapic_num].bus = bus;
- ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
+ ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function);
ir_ioapic[ir_ioapic_num].iommu = iommu;
ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
ir_ioapic_num++;
diff --git a/drivers/iommu/iommu-traces.c b/drivers/iommu/iommu-traces.c
new file mode 100644
index 000000000000..bf3b317ff0c1
--- /dev/null
+++ b/drivers/iommu/iommu-traces.c
@@ -0,0 +1,27 @@
+/*
+ * iommu trace points
+ *
+ * Copyright (C) 2013 Shuah Khan <shuah.kh@samsung.com>
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/types.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/iommu.h>
+
+/* iommu_group_event */
+EXPORT_TRACEPOINT_SYMBOL_GPL(add_device_to_group);
+EXPORT_TRACEPOINT_SYMBOL_GPL(remove_device_from_group);
+
+/* iommu_device_event */
+EXPORT_TRACEPOINT_SYMBOL_GPL(attach_device_to_domain);
+EXPORT_TRACEPOINT_SYMBOL_GPL(detach_device_from_domain);
+
+/* iommu_map_unmap */
+EXPORT_TRACEPOINT_SYMBOL_GPL(map);
+EXPORT_TRACEPOINT_SYMBOL_GPL(unmap);
+
+/* iommu_error */
+EXPORT_TRACEPOINT_SYMBOL_GPL(io_page_fault);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index fbe9ca734f8f..e5555fcfe703 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -29,6 +29,7 @@
#include <linux/idr.h>
#include <linux/notifier.h>
#include <linux/err.h>
+#include <trace/events/iommu.h>
static struct kset *iommu_group_kset;
static struct ida iommu_group_ida;
@@ -363,6 +364,8 @@ rename:
/* Notify any listeners about change to group. */
blocking_notifier_call_chain(&group->notifier,
IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
+
+ trace_add_device_to_group(group->id, dev);
return 0;
}
EXPORT_SYMBOL_GPL(iommu_group_add_device);
@@ -399,6 +402,8 @@ void iommu_group_remove_device(struct device *dev)
sysfs_remove_link(group->devices_kobj, device->name);
sysfs_remove_link(&dev->kobj, "iommu_group");
+ trace_remove_device_from_group(group->id, dev);
+
kfree(device->name);
kfree(device);
dev->iommu_group = NULL;
@@ -680,10 +685,14 @@ EXPORT_SYMBOL_GPL(iommu_domain_free);
int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
{
+ int ret;
if (unlikely(domain->ops->attach_dev == NULL))
return -ENODEV;
- return domain->ops->attach_dev(domain, dev);
+ ret = domain->ops->attach_dev(domain, dev);
+ if (!ret)
+ trace_attach_device_to_domain(dev);
+ return ret;
}
EXPORT_SYMBOL_GPL(iommu_attach_device);
@@ -693,6 +702,7 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
return;
domain->ops->detach_dev(domain, dev);
+ trace_detach_device_from_domain(dev);
}
EXPORT_SYMBOL_GPL(iommu_detach_device);
@@ -807,17 +817,17 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
* size of the smallest page supported by the hardware
*/
if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
- pr_err("unaligned: iova 0x%lx pa 0x%pa size 0x%zx min_pagesz 0x%x\n",
+ pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
iova, &paddr, size, min_pagesz);
return -EINVAL;
}
- pr_debug("map: iova 0x%lx pa 0x%pa size 0x%zx\n", iova, &paddr, size);
+ pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
while (size) {
size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
- pr_debug("mapping: iova 0x%lx pa 0x%pa pgsize 0x%zx\n",
+ pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
iova, &paddr, pgsize);
ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
@@ -832,6 +842,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
/* unroll mapping in case something went wrong */
if (ret)
iommu_unmap(domain, orig_iova, orig_size - size);
+ else
+ trace_map(iova, paddr, size);
return ret;
}
@@ -880,6 +892,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
unmapped += unmapped_page;
}
+ trace_unmap(iova, 0, size);
return unmapped;
}
EXPORT_SYMBOL_GPL(iommu_unmap);
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 108c0e9c24d9..f75483a3a2ef 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -252,7 +252,7 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
spin_lock_irqsave(&gart->pte_lock, flags);
pfn = __phys_to_pfn(pa);
if (!pfn_valid(pfn)) {
- dev_err(gart->dev, "Invalid page: %08x\n", pa);
+ dev_err(gart->dev, "Invalid page: %pa\n", &pa);
spin_unlock_irqrestore(&gart->pte_lock, flags);
return -EINVAL;
}
@@ -295,8 +295,8 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
pa = (pte & GART_PAGE_MASK);
if (!pfn_valid(__phys_to_pfn(pa))) {
- dev_err(gart->dev, "No entry for %08llx:%08x\n",
- (unsigned long long)iova, pa);
+ dev_err(gart->dev, "No entry for %08llx:%pa\n",
+ (unsigned long long)iova, &pa);
gart_dump_table(gart);
return -EINVAL;
}
@@ -351,7 +351,6 @@ static int tegra_gart_probe(struct platform_device *pdev)
struct gart_device *gart;
struct resource *res, *res_remap;
void __iomem *gart_regs;
- int err;
struct device *dev = &pdev->dev;
if (gart_handle)
@@ -376,8 +375,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
gart_regs = devm_ioremap(dev, res->start, resource_size(res));
if (!gart_regs) {
dev_err(dev, "failed to remap GART registers\n");
- err = -ENXIO;
- goto fail;
+ return -ENXIO;
}
gart->dev = &pdev->dev;
@@ -391,8 +389,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
gart->savedata = vmalloc(sizeof(u32) * gart->page_count);
if (!gart->savedata) {
dev_err(dev, "failed to allocate context save area\n");
- err = -ENOMEM;
- goto fail;
+ return -ENOMEM;
}
platform_set_drvdata(pdev, gart);
@@ -401,27 +398,15 @@ static int tegra_gart_probe(struct platform_device *pdev)
gart_handle = gart;
bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
return 0;
-
-fail:
- if (gart_regs)
- devm_iounmap(dev, gart_regs);
- if (gart && gart->savedata)
- vfree(gart->savedata);
- devm_kfree(dev, gart);
- return err;
}
static int tegra_gart_remove(struct platform_device *pdev)
{
struct gart_device *gart = platform_get_drvdata(pdev);
- struct device *dev = gart->dev;
writel(0, gart->regs + GART_CONFIG);
if (gart->savedata)
vfree(gart->savedata);
- if (gart->regs)
- devm_iounmap(dev, gart->regs);
- devm_kfree(dev, gart);
gart_handle = NULL;
return 0;
}
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index e0665603afd9..34374b3bc13b 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -731,7 +731,7 @@ static int smmu_iommu_map(struct iommu_domain *domain, unsigned long iova,
unsigned long pfn = __phys_to_pfn(pa);
unsigned long flags;
- dev_dbg(as->smmu->dev, "[%d] %08lx:%08x\n", as->asid, iova, pa);
+ dev_dbg(as->smmu->dev, "[%d] %08lx:%pa\n", as->asid, iova, &pa);
if (!pfn_valid(pfn))
return -ENOMEM;
diff --git a/drivers/ipack/ipack.c b/drivers/ipack/ipack.c
index 6e066c53acce..d0016ba469ed 100644
--- a/drivers/ipack/ipack.c
+++ b/drivers/ipack/ipack.c
@@ -180,20 +180,28 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
ipack_device_attr(id_format, "0x%hhu\n");
-static struct device_attribute ipack_dev_attrs[] = {
- __ATTR_RO(id),
- __ATTR_RO(id_device),
- __ATTR_RO(id_format),
- __ATTR_RO(id_vendor),
- __ATTR_RO(modalias),
+static DEVICE_ATTR_RO(id);
+static DEVICE_ATTR_RO(id_device);
+static DEVICE_ATTR_RO(id_format);
+static DEVICE_ATTR_RO(id_vendor);
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *ipack_attrs[] = {
+ &dev_attr_id.attr,
+ &dev_attr_id_device.attr,
+ &dev_attr_id_format.attr,
+ &dev_attr_id_vendor.attr,
+ &dev_attr_modalias.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(ipack);
static struct bus_type ipack_bus_type = {
.name = "ipack",
.probe = ipack_bus_probe,
.match = ipack_bus_match,
.remove = ipack_bus_remove,
- .dev_attrs = ipack_dev_attrs,
+ .dev_groups = ipack_groups,
.uevent = ipack_uevent,
};
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
index 868ed40cb6bf..40e6440348ff 100644
--- a/drivers/irqchip/exynos-combiner.c
+++ b/drivers/irqchip/exynos-combiner.c
@@ -171,8 +171,7 @@ static struct irq_domain_ops combiner_irq_domain_ops = {
static void __init combiner_init(void __iomem *combiner_base,
struct device_node *np,
- unsigned int max_nr,
- int irq_base)
+ unsigned int max_nr)
{
int i, irq;
unsigned int nr_irq;
@@ -186,7 +185,7 @@ static void __init combiner_init(void __iomem *combiner_base,
return;
}
- combiner_irq_domain = irq_domain_add_simple(np, nr_irq, irq_base,
+ combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
&combiner_irq_domain_ops, combiner_data);
if (WARN_ON(!combiner_irq_domain)) {
pr_warning("%s: irq domain init failed\n", __func__);
@@ -207,7 +206,6 @@ static int __init combiner_of_init(struct device_node *np,
{
void __iomem *combiner_base;
unsigned int max_nr = 20;
- int irq_base = -1;
combiner_base = of_iomap(np, 0);
if (!combiner_base) {
@@ -221,14 +219,7 @@ static int __init combiner_of_init(struct device_node *np,
__func__, max_nr);
}
- /*
- * FIXME: This is a hardwired COMBINER_IRQ(0,0). Once all devices
- * get their IRQ from DT, remove this in order to get dynamic
- * allocation.
- */
- irq_base = 160;
-
- combiner_init(combiner_base, np, max_nr, irq_base);
+ combiner_init(combiner_base, np, max_nr);
return 0;
}
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index bb328a366122..433cc8568dec 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -21,7 +21,10 @@
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/of_pci.h>
#include <linux/irqdomain.h>
+#include <linux/slab.h>
+#include <linux/msi.h>
#include <asm/mach/arch.h>
#include <asm/exception.h>
#include <asm/smp_plat.h>
@@ -51,12 +54,22 @@
#define IPI_DOORBELL_START (0)
#define IPI_DOORBELL_END (8)
#define IPI_DOORBELL_MASK 0xFF
+#define PCI_MSI_DOORBELL_START (16)
+#define PCI_MSI_DOORBELL_NR (16)
+#define PCI_MSI_DOORBELL_END (32)
+#define PCI_MSI_DOORBELL_MASK 0xFFFF0000
static DEFINE_RAW_SPINLOCK(irq_controller_lock);
static void __iomem *per_cpu_int_base;
static void __iomem *main_int_base;
static struct irq_domain *armada_370_xp_mpic_domain;
+#ifdef CONFIG_PCI_MSI
+static struct irq_domain *armada_370_xp_msi_domain;
+static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
+static DEFINE_MUTEX(msi_used_lock);
+static phys_addr_t msi_doorbell_addr;
+#endif
/*
* In SMP mode:
@@ -87,6 +100,144 @@ static void armada_370_xp_irq_unmask(struct irq_data *d)
ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
}
+#ifdef CONFIG_PCI_MSI
+
+static int armada_370_xp_alloc_msi(void)
+{
+ int hwirq;
+
+ mutex_lock(&msi_used_lock);
+ hwirq = find_first_zero_bit(&msi_used, PCI_MSI_DOORBELL_NR);
+ if (hwirq >= PCI_MSI_DOORBELL_NR)
+ hwirq = -ENOSPC;
+ else
+ set_bit(hwirq, msi_used);
+ mutex_unlock(&msi_used_lock);
+
+ return hwirq;
+}
+
+static void armada_370_xp_free_msi(int hwirq)
+{
+ mutex_lock(&msi_used_lock);
+ if (!test_bit(hwirq, msi_used))
+ pr_err("trying to free unused MSI#%d\n", hwirq);
+ else
+ clear_bit(hwirq, msi_used);
+ mutex_unlock(&msi_used_lock);
+}
+
+static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
+ struct pci_dev *pdev,
+ struct msi_desc *desc)
+{
+ struct msi_msg msg;
+ irq_hw_number_t hwirq;
+ int virq;
+
+ hwirq = armada_370_xp_alloc_msi();
+ if (hwirq < 0)
+ return hwirq;
+
+ virq = irq_create_mapping(armada_370_xp_msi_domain, hwirq);
+ if (!virq) {
+ armada_370_xp_free_msi(hwirq);
+ return -EINVAL;
+ }
+
+ irq_set_msi_desc(virq, desc);
+
+ msg.address_lo = msi_doorbell_addr;
+ msg.address_hi = 0;
+ msg.data = 0xf00 | (hwirq + 16);
+
+ write_msi_msg(virq, &msg);
+ return 0;
+}
+
+static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
+ unsigned int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+ irq_dispose_mapping(irq);
+ armada_370_xp_free_msi(d->hwirq);
+}
+
+static struct irq_chip armada_370_xp_msi_irq_chip = {
+ .name = "armada_370_xp_msi_irq",
+ .irq_enable = unmask_msi_irq,
+ .irq_disable = mask_msi_irq,
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
+};
+
+static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip,
+ handle_simple_irq);
+ set_irq_flags(virq, IRQF_VALID);
+
+ return 0;
+}
+
+static const struct irq_domain_ops armada_370_xp_msi_irq_ops = {
+ .map = armada_370_xp_msi_map,
+};
+
+static int armada_370_xp_msi_init(struct device_node *node,
+ phys_addr_t main_int_phys_base)
+{
+ struct msi_chip *msi_chip;
+ u32 reg;
+ int ret;
+
+ msi_doorbell_addr = main_int_phys_base +
+ ARMADA_370_XP_SW_TRIG_INT_OFFS;
+
+ msi_chip = kzalloc(sizeof(*msi_chip), GFP_KERNEL);
+ if (!msi_chip)
+ return -ENOMEM;
+
+ msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
+ msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
+ msi_chip->of_node = node;
+
+ armada_370_xp_msi_domain =
+ irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
+ &armada_370_xp_msi_irq_ops,
+ NULL);
+ if (!armada_370_xp_msi_domain) {
+ kfree(msi_chip);
+ return -ENOMEM;
+ }
+
+ ret = of_pci_msi_chip_add(msi_chip);
+ if (ret < 0) {
+ irq_domain_remove(armada_370_xp_msi_domain);
+ kfree(msi_chip);
+ return ret;
+ }
+
+ reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
+ | PCI_MSI_DOORBELL_MASK;
+
+ writel(reg, per_cpu_int_base +
+ ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+
+ /* Unmask IPI interrupt */
+ writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+
+ return 0;
+}
+#else
+static inline int armada_370_xp_msi_init(struct device_node *node,
+ phys_addr_t main_int_phys_base)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_SMP
static int armada_xp_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
@@ -214,12 +365,39 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
if (irqnr > 1022)
break;
- if (irqnr > 0) {
+ if (irqnr > 1) {
irqnr = irq_find_mapping(armada_370_xp_mpic_domain,
irqnr);
handle_IRQ(irqnr, regs);
continue;
}
+
+#ifdef CONFIG_PCI_MSI
+ /* MSI handling */
+ if (irqnr == 1) {
+ u32 msimask, msinr;
+
+ msimask = readl_relaxed(per_cpu_int_base +
+ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
+ & PCI_MSI_DOORBELL_MASK;
+
+ writel(~PCI_MSI_DOORBELL_MASK, per_cpu_int_base +
+ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+
+ for (msinr = PCI_MSI_DOORBELL_START;
+ msinr < PCI_MSI_DOORBELL_END; msinr++) {
+ int irq;
+
+ if (!(msimask & BIT(msinr)))
+ continue;
+
+ irq = irq_find_mapping(armada_370_xp_msi_domain,
+ msinr - 16);
+ handle_IRQ(irq, regs);
+ }
+ }
+#endif
+
#ifdef CONFIG_SMP
/* IPI Handling */
if (irqnr == 0) {
@@ -248,12 +426,25 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
static int __init armada_370_xp_mpic_of_init(struct device_node *node,
struct device_node *parent)
{
+ struct resource main_int_res, per_cpu_int_res;
u32 control;
- main_int_base = of_iomap(node, 0);
- per_cpu_int_base = of_iomap(node, 1);
+ BUG_ON(of_address_to_resource(node, 0, &main_int_res));
+ BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res));
+
+ BUG_ON(!request_mem_region(main_int_res.start,
+ resource_size(&main_int_res),
+ node->full_name));
+ BUG_ON(!request_mem_region(per_cpu_int_res.start,
+ resource_size(&per_cpu_int_res),
+ node->full_name));
+ main_int_base = ioremap(main_int_res.start,
+ resource_size(&main_int_res));
BUG_ON(!main_int_base);
+
+ per_cpu_int_base = ioremap(per_cpu_int_res.start,
+ resource_size(&per_cpu_int_res));
BUG_ON(!per_cpu_int_base);
control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
@@ -262,8 +453,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
irq_domain_add_linear(node, (control >> 2) & 0x3ff,
&armada_370_xp_mpic_irq_ops, NULL);
- if (!armada_370_xp_mpic_domain)
- panic("Unable to add Armada_370_Xp MPIC irq domain (DT)\n");
+ BUG_ON(!armada_370_xp_mpic_domain);
irq_set_default_host(armada_370_xp_mpic_domain);
@@ -280,6 +470,8 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
#endif
+ armada_370_xp_msi_init(node, main_int_res.start);
+
set_handle_irq(armada_370_xp_handle_irq);
return 0;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index d0e948084eaf..9031171c141b 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -253,10 +253,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
return -EINVAL;
+ raw_spin_lock(&irq_controller_lock);
mask = 0xff << shift;
bit = gic_cpu_map[cpu] << shift;
-
- raw_spin_lock(&irq_controller_lock);
val = readl_relaxed(reg) & ~mask;
writel_relaxed(val | bit, reg);
raw_spin_unlock(&irq_controller_lock);
@@ -652,7 +651,9 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
{
int cpu;
- unsigned long map = 0;
+ unsigned long flags, map = 0;
+
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
/* Convert our logical CPU mask into a physical one. */
for_each_cpu(cpu, mask)
@@ -666,7 +667,149 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
/* this always happens on GIC0 */
writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+#endif
+
+#ifdef CONFIG_BL_SWITCHER
+/*
+ * gic_send_sgi - send a SGI directly to given CPU interface number
+ *
+ * cpu_id: the ID for the destination CPU interface
+ * irq: the IPI number to send a SGI for
+ */
+void gic_send_sgi(unsigned int cpu_id, unsigned int irq)
+{
+ BUG_ON(cpu_id >= NR_GIC_CPU_IF);
+ cpu_id = 1 << cpu_id;
+ /* this always happens on GIC0 */
+ writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+}
+
+/*
+ * gic_get_cpu_id - get the CPU interface ID for the specified CPU
+ *
+ * @cpu: the logical CPU number to get the GIC ID for.
+ *
+ * Return the CPU interface ID for the given logical CPU number,
+ * or -1 if the CPU number is too large or the interface ID is
+ * unknown (more than one bit set).
+ */
+int gic_get_cpu_id(unsigned int cpu)
+{
+ unsigned int cpu_bit;
+
+ if (cpu >= NR_GIC_CPU_IF)
+ return -1;
+ cpu_bit = gic_cpu_map[cpu];
+ if (cpu_bit & (cpu_bit - 1))
+ return -1;
+ return __ffs(cpu_bit);
}
+
+/*
+ * gic_migrate_target - migrate IRQs to another CPU interface
+ *
+ * @new_cpu_id: the CPU target ID to migrate IRQs to
+ *
+ * Migrate all peripheral interrupts with a target matching the current CPU
+ * to the interface corresponding to @new_cpu_id. The CPU interface mapping
+ * is also updated. Targets to other CPU interfaces are unchanged.
+ * This must be called with IRQs locally disabled.
+ */
+void gic_migrate_target(unsigned int new_cpu_id)
+{
+ unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
+ void __iomem *dist_base;
+ int i, ror_val, cpu = smp_processor_id();
+ u32 val, cur_target_mask, active_mask;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+ if (!dist_base)
+ return;
+ gic_irqs = gic_data[gic_nr].gic_irqs;
+
+ cur_cpu_id = __ffs(gic_cpu_map[cpu]);
+ cur_target_mask = 0x01010101 << cur_cpu_id;
+ ror_val = (cur_cpu_id - new_cpu_id) & 31;
+
+ raw_spin_lock(&irq_controller_lock);
+
+ /* Update the target interface for this logical CPU */
+ gic_cpu_map[cpu] = 1 << new_cpu_id;
+
+ /*
+ * Find all the peripheral interrupts targetting the current
+ * CPU interface and migrate them to the new CPU interface.
+ * We skip DIST_TARGET 0 to 7 as they are read-only.
+ */
+ for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
+ val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
+ active_mask = val & cur_target_mask;
+ if (active_mask) {
+ val &= ~active_mask;
+ val |= ror32(active_mask, ror_val);
+ writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
+ }
+ }
+
+ raw_spin_unlock(&irq_controller_lock);
+
+ /*
+ * Now let's migrate and clear any potential SGIs that might be
+ * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET
+ * is a banked register, we can only forward the SGI using
+ * GIC_DIST_SOFTINT. The original SGI source is lost but Linux
+ * doesn't use that information anyway.
+ *
+ * For the same reason we do not adjust SGI source information
+ * for previously sent SGIs by us to other CPUs either.
+ */
+ for (i = 0; i < 16; i += 4) {
+ int j;
+ val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
+ if (!val)
+ continue;
+ writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
+ for (j = i; j < i + 4; j++) {
+ if (val & 0xff)
+ writel_relaxed((1 << (new_cpu_id + 16)) | j,
+ dist_base + GIC_DIST_SOFTINT);
+ val >>= 8;
+ }
+ }
+}
+
+/*
+ * gic_get_sgir_physaddr - get the physical address for the SGI register
+ *
+ * REturn the physical address of the SGI register to be used
+ * by some early assembly code when the kernel is not yet available.
+ */
+static unsigned long gic_dist_physaddr;
+
+unsigned long gic_get_sgir_physaddr(void)
+{
+ if (!gic_dist_physaddr)
+ return 0;
+ return gic_dist_physaddr + GIC_DIST_SOFTINT;
+}
+
+void __init gic_init_physaddr(struct device_node *node)
+{
+ struct resource res;
+ if (of_address_to_resource(node, 0, &res) == 0) {
+ gic_dist_physaddr = res.start;
+ pr_info("GIC physical location is %#lx\n", gic_dist_physaddr);
+ }
+}
+
+#else
+#define gic_init_physaddr(node) do { } while (0)
#endif
static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
@@ -850,6 +993,8 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
percpu_offset = 0;
gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
+ if (!gic_cnt)
+ gic_init_physaddr(node);
if (parent) {
irq = irq_of_parse_and_map(node, 0);
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 2bbb00404cf5..8e21ae0bab46 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -469,6 +469,8 @@ void __init vic_init(void __iomem *base, unsigned int irq_start,
int __init vic_of_init(struct device_node *node, struct device_node *parent)
{
void __iomem *regs;
+ u32 interrupt_mask = ~0;
+ u32 wakeup_mask = ~0;
if (WARN(parent, "non-root VICs are not supported"))
return -EINVAL;
@@ -477,10 +479,13 @@ int __init vic_of_init(struct device_node *node, struct device_node *parent)
if (WARN_ON(!regs))
return -EIO;
+ of_property_read_u32(node, "valid-mask", &interrupt_mask);
+ of_property_read_u32(node, "valid-wakeup-mask", &wakeup_mask);
+
/*
* Passing 0 as first IRQ makes the simple domain allocate descriptors
*/
- __vic_init(regs, 0, ~0, ~0, node);
+ __vic_init(regs, 0, interrupt_mask, wakeup_mask, node);
return 0;
}
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index 52377b4bf039..a2e0ed6c9a4d 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -481,7 +481,7 @@ void __inline__ outpp(void __iomem *addr, word p)
int diva_os_register_irq(void *context, byte irq, const char *name)
{
int result = request_irq(irq, diva_os_irq_wrapper,
- IRQF_DISABLED | IRQF_SHARED, name, context);
+ IRQF_SHARED, name, context);
return (result);
}
diff --git a/drivers/isdn/hardware/eicon/um_idi.c b/drivers/isdn/hardware/eicon/um_idi.c
index 7cab5c3276c2..e1519718ce67 100644
--- a/drivers/isdn/hardware/eicon/um_idi.c
+++ b/drivers/isdn/hardware/eicon/um_idi.c
@@ -288,9 +288,9 @@ int divas_um_idi_delete_entity(int adapter_nr, void *entity)
cleanup_entity(e);
diva_os_free(0, e->os_context);
memset(e, 0x00, sizeof(*e));
- diva_os_free(0, e);
DBG_LOG(("A(%d) remove E:%08x", adapter_nr, e));
+ diva_os_free(0, e);
return (0);
}
diff --git a/drivers/isdn/sc/init.c b/drivers/isdn/sc/init.c
index ca997bd4e818..92acc81f844d 100644
--- a/drivers/isdn/sc/init.c
+++ b/drivers/isdn/sc/init.c
@@ -336,7 +336,7 @@ static int __init sc_init(void)
*/
sc_adapter[cinst]->interrupt = irq[b];
if (request_irq(sc_adapter[cinst]->interrupt, interrupt_handler,
- IRQF_DISABLED, interface->id,
+ 0, interface->id,
(void *)(unsigned long) cinst))
{
kfree(sc_adapter[cinst]->channel);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 875bbe4c962e..72156c123033 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -300,6 +300,16 @@ config LEDS_PCA963X
LED driver chip accessed via the I2C bus. Supported
devices include PCA9633 and PCA9634
+config LEDS_PCA9685
+ tristate "LED support for PCA9685 I2C chip"
+ depends on LEDS_CLASS
+ depends on I2C
+ help
+ This option enables support for LEDs connected to the PCA9685
+ LED driver chip accessed via the I2C bus.
+ The PCA9685 offers 12-bit PWM (4095 levels of brightness) on
+ 16 individual channels.
+
config LEDS_WM831X_STATUS
tristate "LED support for status LEDs on WM831x PMICs"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 8979b0b2c85e..3cd76dbd9be2 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_LEDS_OT200) += leds-ot200.o
obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
obj-$(CONFIG_LEDS_PCA963X) += leds-pca963x.o
+obj-$(CONFIG_LEDS_PCA9685) += leds-pca9685.o
obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
obj-$(CONFIG_LEDS_DA9052) += leds-da9052.o
obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c
index a502678cc7f5..66d0a57db221 100644
--- a/drivers/leds/leds-blinkm.c
+++ b/drivers/leds/leds-blinkm.c
@@ -161,13 +161,10 @@ static ssize_t show_color_common(struct device *dev, char *buf, int color)
switch (color) {
case RED:
return scnprintf(buf, PAGE_SIZE, "%02X\n", data->red);
- break;
case GREEN:
return scnprintf(buf, PAGE_SIZE, "%02X\n", data->green);
- break;
case BLUE:
return scnprintf(buf, PAGE_SIZE, "%02X\n", data->blue);
- break;
default:
return -EINVAL;
}
diff --git a/drivers/leds/leds-dac124s085.c b/drivers/leds/leds-dac124s085.c
index 1f9d8e62d37e..db3ba8b42517 100644
--- a/drivers/leds/leds-dac124s085.c
+++ b/drivers/leds/leds-dac124s085.c
@@ -101,7 +101,6 @@ eledcr:
while (i--)
led_classdev_unregister(&dac->leds[i].ldev);
- spi_set_drvdata(spi, NULL);
return ret;
}
@@ -115,8 +114,6 @@ static int dac124s085_remove(struct spi_device *spi)
cancel_work_sync(&dac->leds[i].work);
}
- spi_set_drvdata(spi, NULL);
-
return 0;
}
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index e8b01e57348d..78b0e273a903 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/leds.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
@@ -170,11 +171,11 @@ static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
int count, ret;
/* count LEDs in this device, so we know how much to allocate */
- count = of_get_child_count(np);
+ count = of_get_available_child_count(np);
if (!count)
return ERR_PTR(-ENODEV);
- for_each_child_of_node(np, child)
+ for_each_available_child_of_node(np, child)
if (of_get_gpio(child, 0) == -EPROBE_DEFER)
return ERR_PTR(-EPROBE_DEFER);
@@ -183,7 +184,7 @@ static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
if (!priv)
return ERR_PTR(-ENOMEM);
- for_each_child_of_node(np, child) {
+ for_each_available_child_of_node(np, child) {
struct gpio_led led = {};
enum of_gpio_flags flags;
const char *state;
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index fe3bcbb5747f..6b553d9f4266 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -29,6 +29,7 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/platform_data/leds-lp55xx.h>
#include <linux/slab.h>
diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
index 2585cfd57711..bf006f4e44a0 100644
--- a/drivers/leds/leds-lp5562.c
+++ b/drivers/leds/leds-lp5562.c
@@ -17,6 +17,7 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/platform_data/leds-lp55xx.h>
#include <linux/slab.h>
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index 351825b96f16..9acc6bb7deef 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -20,6 +20,8 @@
#include <linux/module.h>
#include <linux/platform_data/leds-lp55xx.h>
#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include "leds-lp55xx-common.h"
@@ -165,6 +167,7 @@ static int lp55xx_init_led(struct lp55xx_led *led,
led->led_current = pdata->led_config[chan].led_current;
led->max_current = pdata->led_config[chan].max_current;
led->chan_nr = pdata->led_config[chan].chan_nr;
+ led->cdev.default_trigger = pdata->led_config[chan].default_trigger;
if (led->chan_nr >= max_channel) {
dev_err(dev, "Use channel numbers between 0 and %d\n",
@@ -406,18 +409,18 @@ int lp55xx_init_device(struct lp55xx_chip *chip)
if (!pdata || !cfg)
return -EINVAL;
- if (pdata->setup_resources) {
- ret = pdata->setup_resources();
+ if (gpio_is_valid(pdata->enable_gpio)) {
+ ret = devm_gpio_request_one(dev, pdata->enable_gpio,
+ GPIOF_DIR_OUT, "lp5523_enable");
if (ret < 0) {
- dev_err(dev, "setup resoure err: %d\n", ret);
+ dev_err(dev, "could not acquire enable gpio (err=%d)\n",
+ ret);
goto err;
}
- }
- if (pdata->enable) {
- pdata->enable(0);
+ gpio_set_value(pdata->enable_gpio, 0);
usleep_range(1000, 2000); /* Keep enable down at least 1ms */
- pdata->enable(1);
+ gpio_set_value(pdata->enable_gpio, 1);
usleep_range(1000, 2000); /* 500us abs min. */
}
@@ -458,11 +461,8 @@ void lp55xx_deinit_device(struct lp55xx_chip *chip)
if (chip->clk)
clk_disable_unprepare(chip->clk);
- if (pdata->enable)
- pdata->enable(0);
-
- if (pdata->release_resources)
- pdata->release_resources();
+ if (gpio_is_valid(pdata->enable_gpio))
+ gpio_set_value(pdata->enable_gpio, 0);
}
EXPORT_SYMBOL_GPL(lp55xx_deinit_device);
@@ -586,6 +586,8 @@ int lp55xx_of_populate_pdata(struct device *dev, struct device_node *np)
of_property_read_string(child, "chan-name", &cfg[i].name);
of_property_read_u8(child, "led-cur", &cfg[i].led_current);
of_property_read_u8(child, "max-cur", &cfg[i].max_current);
+ cfg[i].default_trigger =
+ of_get_property(child, "linux,default-trigger", NULL);
i++;
}
@@ -593,6 +595,8 @@ int lp55xx_of_populate_pdata(struct device *dev, struct device_node *np)
of_property_read_string(np, "label", &pdata->label);
of_property_read_u8(np, "clock-mode", &pdata->clock_mode);
+ pdata->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0);
+
/* LP8501 specific */
of_property_read_u8(np, "pwr-sel", (u8 *)&pdata->pwr_sel);
diff --git a/drivers/leds/leds-lp8501.c b/drivers/leds/leds-lp8501.c
index 8d55a780ca46..f1c704f2243a 100644
--- a/drivers/leds/leds-lp8501.c
+++ b/drivers/leds/leds-lp8501.c
@@ -18,6 +18,7 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/platform_data/leds-lp55xx.h>
#include <linux/slab.h>
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 141f13438e80..c7a4230233ea 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -30,6 +30,7 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/platform_data/leds-kirkwood-ns2.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
/*
diff --git a/drivers/leds/leds-pca9685.c b/drivers/leds/leds-pca9685.c
new file mode 100644
index 000000000000..6e1ef3a9d6ef
--- /dev/null
+++ b/drivers/leds/leds-pca9685.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2013 Maximilian Güntner <maximilian.guentner@gmail.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Based on leds-pca963x.c driver by
+ * Peter Meerwald <p.meerwald@bct-electronic.com>
+ *
+ * Driver for the NXP PCA9685 12-Bit PWM LED driver chip.
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+
+#include <linux/platform_data/leds-pca9685.h>
+
+/* Register Addresses */
+#define PCA9685_MODE1 0x00
+#define PCA9685_MODE2 0x01
+#define PCA9685_LED0_ON_L 0x06
+#define PCA9685_ALL_LED_ON_L 0xFA
+
+/* MODE1 Register */
+#define PCA9685_ALLCALL 0x00
+#define PCA9685_SLEEP 0x04
+#define PCA9685_AI 0x05
+
+/* MODE2 Register */
+#define PCA9685_INVRT 0x04
+#define PCA9685_OUTDRV 0x02
+
+static const struct i2c_device_id pca9685_id[] = {
+ { "pca9685", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pca9685_id);
+
+struct pca9685_led {
+ struct i2c_client *client;
+ struct work_struct work;
+ u16 brightness;
+ struct led_classdev led_cdev;
+ int led_num; /* 0-15 */
+ char name[32];
+};
+
+static void pca9685_write_msg(struct i2c_client *client, u8 *buf, u8 len)
+{
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .flags = 0x00,
+ .len = len,
+ .buf = buf
+ };
+ i2c_transfer(client->adapter, &msg, 1);
+}
+
+static void pca9685_all_off(struct i2c_client *client)
+{
+ u8 i2c_buffer[5] = {PCA9685_ALL_LED_ON_L, 0x00, 0x00, 0x00, 0x10};
+ pca9685_write_msg(client, i2c_buffer, 5);
+}
+
+static void pca9685_led_work(struct work_struct *work)
+{
+ struct pca9685_led *pca9685;
+ u8 i2c_buffer[5];
+
+ pca9685 = container_of(work, struct pca9685_led, work);
+ i2c_buffer[0] = PCA9685_LED0_ON_L + 4 * pca9685->led_num;
+ /*
+ * 4095 is the maximum brightness, so we set the ON time to 0x1000
+ * which disables the PWM generator for that LED
+ */
+ if (pca9685->brightness == 4095)
+ *((__le16 *)(i2c_buffer+1)) = cpu_to_le16(0x1000);
+ else
+ *((__le16 *)(i2c_buffer+1)) = 0x0000;
+
+ if (pca9685->brightness == 0)
+ *((__le16 *)(i2c_buffer+3)) = cpu_to_le16(0x1000);
+ else if (pca9685->brightness == 4095)
+ *((__le16 *)(i2c_buffer+3)) = 0x0000;
+ else
+ *((__le16 *)(i2c_buffer+3)) = cpu_to_le16(pca9685->brightness);
+
+ pca9685_write_msg(pca9685->client, i2c_buffer, 5);
+}
+
+static void pca9685_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct pca9685_led *pca9685;
+ pca9685 = container_of(led_cdev, struct pca9685_led, led_cdev);
+ pca9685->brightness = value;
+
+ schedule_work(&pca9685->work);
+}
+
+static int pca9685_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pca9685_led *pca9685;
+ struct pca9685_platform_data *pdata;
+ int err;
+ u8 i;
+
+ pdata = dev_get_platdata(&client->dev);
+ if (pdata) {
+ if (pdata->leds.num_leds < 1 || pdata->leds.num_leds > 15) {
+ dev_err(&client->dev, "board info must claim 1-16 LEDs");
+ return -EINVAL;
+ }
+ }
+
+ pca9685 = devm_kzalloc(&client->dev, 16 * sizeof(*pca9685), GFP_KERNEL);
+ if (!pca9685)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, pca9685);
+ pca9685_all_off(client);
+
+ for (i = 0; i < 16; i++) {
+ pca9685[i].client = client;
+ pca9685[i].led_num = i;
+ pca9685[i].name[0] = '\0';
+ if (pdata && i < pdata->leds.num_leds) {
+ if (pdata->leds.leds[i].name)
+ strncpy(pca9685[i].name,
+ pdata->leds.leds[i].name,
+ sizeof(pca9685[i].name)-1);
+ if (pdata->leds.leds[i].default_trigger)
+ pca9685[i].led_cdev.default_trigger =
+ pdata->leds.leds[i].default_trigger;
+ }
+ if (strlen(pca9685[i].name) == 0) {
+ /*
+ * Write adapter and address to the name as well.
+ * Otherwise multiple chips attached to one host would
+ * not work.
+ */
+ snprintf(pca9685[i].name, sizeof(pca9685[i].name),
+ "pca9685:%d:x%.2x:%d",
+ client->adapter->nr, client->addr, i);
+ }
+ pca9685[i].led_cdev.name = pca9685[i].name;
+ pca9685[i].led_cdev.max_brightness = 0xfff;
+ pca9685[i].led_cdev.brightness_set = pca9685_led_set;
+
+ INIT_WORK(&pca9685[i].work, pca9685_led_work);
+ err = led_classdev_register(&client->dev, &pca9685[i].led_cdev);
+ if (err < 0)
+ goto exit;
+ }
+
+ if (pdata)
+ i2c_smbus_write_byte_data(client, PCA9685_MODE2,
+ pdata->outdrv << PCA9685_OUTDRV |
+ pdata->inverted << PCA9685_INVRT);
+ else
+ i2c_smbus_write_byte_data(client, PCA9685_MODE2,
+ PCA9685_TOTEM_POLE << PCA9685_OUTDRV);
+ /* Enable Auto-Increment, enable oscillator, ALLCALL/SUBADDR disabled */
+ i2c_smbus_write_byte_data(client, PCA9685_MODE1, BIT(PCA9685_AI));
+
+ return 0;
+
+exit:
+ while (i--) {
+ led_classdev_unregister(&pca9685[i].led_cdev);
+ cancel_work_sync(&pca9685[i].work);
+ }
+ return err;
+}
+
+static int pca9685_remove(struct i2c_client *client)
+{
+ struct pca9685_led *pca9685 = i2c_get_clientdata(client);
+ u8 i;
+
+ for (i = 0; i < 16; i++) {
+ led_classdev_unregister(&pca9685[i].led_cdev);
+ cancel_work_sync(&pca9685[i].work);
+ }
+ pca9685_all_off(client);
+ return 0;
+}
+
+static struct i2c_driver pca9685_driver = {
+ .driver = {
+ .name = "leds-pca9685",
+ .owner = THIS_MODULE,
+ },
+ .probe = pca9685_probe,
+ .remove = pca9685_remove,
+ .id_table = pca9685_id,
+};
+
+module_i2c_driver(pca9685_driver);
+
+MODULE_AUTHOR("Maximilian Güntner <maximilian.guentner@gmail.com>");
+MODULE_DESCRIPTION("PCA9685 LED Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index bb6f94898541..2848171b8576 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -232,7 +232,7 @@ static struct platform_driver led_pwm_driver = {
.driver = {
.name = "leds_pwm",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(of_pwm_leds_match),
+ .of_match_table = of_pwm_leds_match,
},
};
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index b3256ff0d426..d0a1d8a45c81 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -229,7 +229,7 @@ struct lguest_vq_info {
* make a hypercall. We hand the physical address of the virtqueue so the Host
* knows which virtqueue we're talking about.
*/
-static void lg_notify(struct virtqueue *vq)
+static bool lg_notify(struct virtqueue *vq)
{
/*
* We store our virtqueue information in the "priv" pointer of the
@@ -238,6 +238,7 @@ static void lg_notify(struct virtqueue *vq)
struct lguest_vq_info *lvq = vq->priv;
hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0, 0);
+ return true;
}
/* An extern declaration inside a C file is bad form. Don't do it. */
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index 696238b9f0f7..d26a312f117a 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -103,6 +103,7 @@ config ADB_PMU_LED_IDE
bool "Use front LED as IDE LED by default"
depends on ADB_PMU_LED
depends on LEDS_CLASS
+ depends on IDE_GD_ATA
select LEDS_TRIGGERS
select LEDS_TRIGGER_IDE_DISK
help
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index ac5c87939860..4f12c6f01fe7 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -24,6 +24,8 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/machdep.h>
#include <asm/macio.h>
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index cad0e19b47a2..4192901cab40 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -25,6 +25,8 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/kernel_stat.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/io.h>
#include <asm/prom.h>
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index b3b2d36c009e..23b4a3b28dbc 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -35,6 +35,7 @@
#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 283e1b53c6be..dee88e59f0d3 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -46,6 +46,8 @@
#include <linux/suspend.h>
#include <linux/cpu.h>
#include <linux/compat.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/io.h>
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 30b426ed744b..f70fda04fa08 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -282,6 +282,23 @@ config DM_CACHE_MQ
This is meant to be a general purpose policy. It prioritises
reads over writes.
+config DM_CACHE_ERA
+ tristate "ERA Cache Policy shim (EXPERIMENTAL)"
+ depends on DM_CACHE
+ ---help---
+ A cache policy shim that adds an "era" property to the
+ per-cache-block metadata, to facilitate the implementation of
+ cache coherency validation and recovery tools. This mechanism
+ works as follows. There is a monotonically increasing 32-bit
+ era counter associated with each cache instance. Each cache
+ block is tagged with the era during which it was last written.
+ A device mapper message interface is provided to obtain the
+ current era, advance to the next era, and invalidate blocks
+ from before or after a given era. Note that you can use this
+ policy shim to add the era functionality to any cache policy
+ via name concatenation -- specify era+mq instead of just mq to
+ add the era mechanism to the mq policy, for example.
+
config DM_CACHE_CLEANER
tristate "Cleaner Cache Policy (EXPERIMENTAL)"
depends on DM_CACHE
@@ -371,27 +388,12 @@ config DM_MULTIPATH_ST
If unsure, say N.
-config DM_DELAY
- tristate "I/O delaying target"
- depends on BLK_DEV_DM
- ---help---
- A target that delays reads and/or writes and can send
- them to different devices. Useful for testing.
-
- If unsure, say N.
-
config DM_UEVENT
bool "DM uevents"
depends on BLK_DEV_DM
---help---
Generate udev events for DM events.
-config DM_FLAKEY
- tristate "Flakey target"
- depends on BLK_DEV_DM
- ---help---
- A target that intermittently fails I/O for debugging purposes.
-
config DM_VERITY
tristate "Verity target support"
depends on BLK_DEV_DM
@@ -426,4 +428,34 @@ config DM_SWITCH
If unsure, say N.
+config DM_TEST_TARGETS
+ tristate "DM test targets"
+ depends on BLK_DEV_DM
+ ---help---
+ Targets that are primarily useful for testing.
+
+config DM_FLAKEY
+ tristate "Flakey target"
+ depends on DM_TEST_TARGETS
+ ---help---
+ A target that intermittently fails I/O for debugging purposes.
+
+ If unsure, say N.
+
+config DM_DELAY
+ tristate "I/O delaying target"
+ depends on DM_TEST_TARGETS
+ ---help---
+ A target that delays reads and/or writes and can send
+ them to different devices. Primarily useful for testing.
+
+ If unsure, say N.
+
+config DM_CACHE_HINTS
+ tristate "Hint Size Test Cache Policy (EXPERIMENTAL)"
+ depends on DM_CACHE
+ depends on DM_TEST_TARGETS
+ ---help---
+ A dumb cache policy just to test variable hint size
+
endif # MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 2acc43fe0229..e127dfbd8994 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -11,9 +11,12 @@ dm-mirror-y += dm-raid1.o
dm-log-userspace-y \
+= dm-log-userspace-base.o dm-log-userspace-transfer.o
dm-thin-pool-y += dm-thin.o dm-thin-metadata.o
-dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o
+dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o \
+ dm-cache-shim-utils.o dm-cache-stack-utils.o
dm-cache-mq-y += dm-cache-policy-mq.o
dm-cache-cleaner-y += dm-cache-policy-cleaner.o
+dm-cache-era-y += dm-cache-policy-era.o
+dm-cache-hints-y += dm-cache-policy-hints.o
md-mod-y += md.o bitmap.o
raid456-y += raid5.o
@@ -52,6 +55,8 @@ obj-$(CONFIG_DM_VERITY) += dm-verity.o
obj-$(CONFIG_DM_CACHE) += dm-cache.o
obj-$(CONFIG_DM_CACHE_MQ) += dm-cache-mq.o
obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
+obj-$(CONFIG_DM_CACHE_ERA) += dm-cache-era.o
+obj-$(CONFIG_DM_CACHE_HINTS) += dm-cache-hints.o
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index f950c9d29f3e..2638417b19aa 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -13,15 +13,8 @@ config BCACHE_DEBUG
---help---
Don't select this option unless you're a developer
- Enables extra debugging tools (primarily a fuzz tester)
-
-config BCACHE_EDEBUG
- bool "Extended runtime checks"
- depends on BCACHE
- ---help---
- Don't select this option unless you're a developer
-
- Enables extra runtime checks which significantly affect performance
+ Enables extra debugging tools, allows expensive runtime checks to be
+ turned on.
config BCACHE_CLOSURES_DEBUG
bool "Debug closures"
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index e45f5575fd4d..2b46bf1d7e40 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -63,13 +63,12 @@
#include "bcache.h"
#include "btree.h"
+#include <linux/blkdev.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/random.h>
#include <trace/events/bcache.h>
-#define MAX_IN_FLIGHT_DISCARDS 8U
-
/* Bucket heap / gen */
uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
@@ -121,75 +120,6 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
mutex_unlock(&c->bucket_lock);
}
-/* Discard/TRIM */
-
-struct discard {
- struct list_head list;
- struct work_struct work;
- struct cache *ca;
- long bucket;
-
- struct bio bio;
- struct bio_vec bv;
-};
-
-static void discard_finish(struct work_struct *w)
-{
- struct discard *d = container_of(w, struct discard, work);
- struct cache *ca = d->ca;
- char buf[BDEVNAME_SIZE];
-
- if (!test_bit(BIO_UPTODATE, &d->bio.bi_flags)) {
- pr_notice("discard error on %s, disabling",
- bdevname(ca->bdev, buf));
- d->ca->discard = 0;
- }
-
- mutex_lock(&ca->set->bucket_lock);
-
- fifo_push(&ca->free, d->bucket);
- list_add(&d->list, &ca->discards);
- atomic_dec(&ca->discards_in_flight);
-
- mutex_unlock(&ca->set->bucket_lock);
-
- closure_wake_up(&ca->set->bucket_wait);
- wake_up_process(ca->alloc_thread);
-
- closure_put(&ca->set->cl);
-}
-
-static void discard_endio(struct bio *bio, int error)
-{
- struct discard *d = container_of(bio, struct discard, bio);
- schedule_work(&d->work);
-}
-
-static void do_discard(struct cache *ca, long bucket)
-{
- struct discard *d = list_first_entry(&ca->discards,
- struct discard, list);
-
- list_del(&d->list);
- d->bucket = bucket;
-
- atomic_inc(&ca->discards_in_flight);
- closure_get(&ca->set->cl);
-
- bio_init(&d->bio);
-
- d->bio.bi_sector = bucket_to_sector(ca->set, d->bucket);
- d->bio.bi_bdev = ca->bdev;
- d->bio.bi_rw = REQ_WRITE|REQ_DISCARD;
- d->bio.bi_max_vecs = 1;
- d->bio.bi_io_vec = d->bio.bi_inline_vecs;
- d->bio.bi_size = bucket_bytes(ca);
- d->bio.bi_end_io = discard_endio;
- bio_set_prio(&d->bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
-
- submit_bio(0, &d->bio);
-}
-
/* Allocation */
static inline bool can_inc_bucket_gen(struct bucket *b)
@@ -280,7 +210,7 @@ static void invalidate_buckets_lru(struct cache *ca)
* multiple times when it can't do anything
*/
ca->invalidate_needs_gc = 1;
- bch_queue_gc(ca->set);
+ wake_up_gc(ca->set);
return;
}
@@ -305,7 +235,7 @@ static void invalidate_buckets_fifo(struct cache *ca)
if (++checked >= ca->sb.nbuckets) {
ca->invalidate_needs_gc = 1;
- bch_queue_gc(ca->set);
+ wake_up_gc(ca->set);
return;
}
}
@@ -330,7 +260,7 @@ static void invalidate_buckets_random(struct cache *ca)
if (++checked >= ca->sb.nbuckets / 2) {
ca->invalidate_needs_gc = 1;
- bch_queue_gc(ca->set);
+ wake_up_gc(ca->set);
return;
}
}
@@ -398,16 +328,18 @@ static int bch_allocator_thread(void *arg)
else
break;
- allocator_wait(ca, (int) fifo_free(&ca->free) >
- atomic_read(&ca->discards_in_flight));
-
if (ca->discard) {
- allocator_wait(ca, !list_empty(&ca->discards));
- do_discard(ca, bucket);
- } else {
- fifo_push(&ca->free, bucket);
- closure_wake_up(&ca->set->bucket_wait);
+ mutex_unlock(&ca->set->bucket_lock);
+ blkdev_issue_discard(ca->bdev,
+ bucket_to_sector(ca->set, bucket),
+ ca->sb.block_size, GFP_KERNEL, 0);
+ mutex_lock(&ca->set->bucket_lock);
}
+
+ allocator_wait(ca, !fifo_full(&ca->free));
+
+ fifo_push(&ca->free, bucket);
+ wake_up(&ca->set->bucket_wait);
}
/*
@@ -433,16 +365,40 @@ static int bch_allocator_thread(void *arg)
}
}
-long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl)
+long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait)
{
- long r = -1;
-again:
+ DEFINE_WAIT(w);
+ struct bucket *b;
+ long r;
+
+ /* fastpath */
+ if (fifo_used(&ca->free) > ca->watermark[watermark]) {
+ fifo_pop(&ca->free, r);
+ goto out;
+ }
+
+ if (!wait)
+ return -1;
+
+ while (1) {
+ if (fifo_used(&ca->free) > ca->watermark[watermark]) {
+ fifo_pop(&ca->free, r);
+ break;
+ }
+
+ prepare_to_wait(&ca->set->bucket_wait, &w,
+ TASK_UNINTERRUPTIBLE);
+
+ mutex_unlock(&ca->set->bucket_lock);
+ schedule();
+ mutex_lock(&ca->set->bucket_lock);
+ }
+
+ finish_wait(&ca->set->bucket_wait, &w);
+out:
wake_up_process(ca->alloc_thread);
- if (fifo_used(&ca->free) > ca->watermark[watermark] &&
- fifo_pop(&ca->free, r)) {
- struct bucket *b = ca->buckets + r;
-#ifdef CONFIG_BCACHE_EDEBUG
+ if (expensive_debug_checks(ca->set)) {
size_t iter;
long i;
@@ -455,36 +411,23 @@ again:
BUG_ON(i == r);
fifo_for_each(i, &ca->unused, iter)
BUG_ON(i == r);
-#endif
- BUG_ON(atomic_read(&b->pin) != 1);
-
- SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
-
- if (watermark <= WATERMARK_METADATA) {
- SET_GC_MARK(b, GC_MARK_METADATA);
- b->prio = BTREE_PRIO;
- } else {
- SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
- b->prio = INITIAL_PRIO;
- }
-
- return r;
}
- trace_bcache_alloc_fail(ca);
+ b = ca->buckets + r;
- if (cl) {
- closure_wait(&ca->set->bucket_wait, cl);
+ BUG_ON(atomic_read(&b->pin) != 1);
- if (closure_blocking(cl)) {
- mutex_unlock(&ca->set->bucket_lock);
- closure_sync(cl);
- mutex_lock(&ca->set->bucket_lock);
- goto again;
- }
+ SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
+
+ if (watermark <= WATERMARK_METADATA) {
+ SET_GC_MARK(b, GC_MARK_METADATA);
+ b->prio = BTREE_PRIO;
+ } else {
+ SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+ b->prio = INITIAL_PRIO;
}
- return -1;
+ return r;
}
void bch_bucket_free(struct cache_set *c, struct bkey *k)
@@ -501,7 +444,7 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
}
int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
- struct bkey *k, int n, struct closure *cl)
+ struct bkey *k, int n, bool wait)
{
int i;
@@ -514,7 +457,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
for (i = 0; i < n; i++) {
struct cache *ca = c->cache_by_alloc[i];
- long b = bch_bucket_alloc(ca, watermark, cl);
+ long b = bch_bucket_alloc(ca, watermark, wait);
if (b == -1)
goto err;
@@ -529,22 +472,202 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
return 0;
err:
bch_bucket_free(c, k);
- __bkey_put(c, k);
+ bkey_put(c, k);
return -1;
}
int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
- struct bkey *k, int n, struct closure *cl)
+ struct bkey *k, int n, bool wait)
{
int ret;
mutex_lock(&c->bucket_lock);
- ret = __bch_bucket_alloc_set(c, watermark, k, n, cl);
+ ret = __bch_bucket_alloc_set(c, watermark, k, n, wait);
mutex_unlock(&c->bucket_lock);
return ret;
}
+/* Sector allocator */
+
+struct open_bucket {
+ struct list_head list;
+ unsigned last_write_point;
+ unsigned sectors_free;
+ BKEY_PADDED(key);
+};
+
+/*
+ * We keep multiple buckets open for writes, and try to segregate different
+ * write streams for better cache utilization: first we look for a bucket where
+ * the last write to it was sequential with the current write, and failing that
+ * we look for a bucket that was last used by the same task.
+ *
+ * The ideas is if you've got multiple tasks pulling data into the cache at the
+ * same time, you'll get better cache utilization if you try to segregate their
+ * data and preserve locality.
+ *
+ * For example, say you've starting Firefox at the same time you're copying a
+ * bunch of files. Firefox will likely end up being fairly hot and stay in the
+ * cache awhile, but the data you copied might not be; if you wrote all that
+ * data to the same buckets it'd get invalidated at the same time.
+ *
+ * Both of those tasks will be doing fairly random IO so we can't rely on
+ * detecting sequential IO to segregate their data, but going off of the task
+ * should be a sane heuristic.
+ */
+static struct open_bucket *pick_data_bucket(struct cache_set *c,
+ const struct bkey *search,
+ unsigned write_point,
+ struct bkey *alloc)
+{
+ struct open_bucket *ret, *ret_task = NULL;
+
+ list_for_each_entry_reverse(ret, &c->data_buckets, list)
+ if (!bkey_cmp(&ret->key, search))
+ goto found;
+ else if (ret->last_write_point == write_point)
+ ret_task = ret;
+
+ ret = ret_task ?: list_first_entry(&c->data_buckets,
+ struct open_bucket, list);
+found:
+ if (!ret->sectors_free && KEY_PTRS(alloc)) {
+ ret->sectors_free = c->sb.bucket_size;
+ bkey_copy(&ret->key, alloc);
+ bkey_init(alloc);
+ }
+
+ if (!ret->sectors_free)
+ ret = NULL;
+
+ return ret;
+}
+
+/*
+ * Allocates some space in the cache to write to, and k to point to the newly
+ * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
+ * end of the newly allocated space).
+ *
+ * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
+ * sectors were actually allocated.
+ *
+ * If s->writeback is true, will not fail.
+ */
+bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
+ unsigned write_point, unsigned write_prio, bool wait)
+{
+ struct open_bucket *b;
+ BKEY_PADDED(key) alloc;
+ unsigned i;
+
+ /*
+ * We might have to allocate a new bucket, which we can't do with a
+ * spinlock held. So if we have to allocate, we drop the lock, allocate
+ * and then retry. KEY_PTRS() indicates whether alloc points to
+ * allocated bucket(s).
+ */
+
+ bkey_init(&alloc.key);
+ spin_lock(&c->data_bucket_lock);
+
+ while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
+ unsigned watermark = write_prio
+ ? WATERMARK_MOVINGGC
+ : WATERMARK_NONE;
+
+ spin_unlock(&c->data_bucket_lock);
+
+ if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
+ return false;
+
+ spin_lock(&c->data_bucket_lock);
+ }
+
+ /*
+ * If we had to allocate, we might race and not need to allocate the
+ * second time we call find_data_bucket(). If we allocated a bucket but
+ * didn't use it, drop the refcount bch_bucket_alloc_set() took:
+ */
+ if (KEY_PTRS(&alloc.key))
+ bkey_put(c, &alloc.key);
+
+ for (i = 0; i < KEY_PTRS(&b->key); i++)
+ EBUG_ON(ptr_stale(c, &b->key, i));
+
+ /* Set up the pointer to the space we're allocating: */
+
+ for (i = 0; i < KEY_PTRS(&b->key); i++)
+ k->ptr[i] = b->key.ptr[i];
+
+ sectors = min(sectors, b->sectors_free);
+
+ SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
+ SET_KEY_SIZE(k, sectors);
+ SET_KEY_PTRS(k, KEY_PTRS(&b->key));
+
+ /*
+ * Move b to the end of the lru, and keep track of what this bucket was
+ * last used for:
+ */
+ list_move_tail(&b->list, &c->data_buckets);
+ bkey_copy_key(&b->key, k);
+ b->last_write_point = write_point;
+
+ b->sectors_free -= sectors;
+
+ for (i = 0; i < KEY_PTRS(&b->key); i++) {
+ SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
+
+ atomic_long_add(sectors,
+ &PTR_CACHE(c, &b->key, i)->sectors_written);
+ }
+
+ if (b->sectors_free < c->sb.block_size)
+ b->sectors_free = 0;
+
+ /*
+ * k takes refcounts on the buckets it points to until it's inserted
+ * into the btree, but if we're done with this bucket we just transfer
+ * get_data_bucket()'s refcount.
+ */
+ if (b->sectors_free)
+ for (i = 0; i < KEY_PTRS(&b->key); i++)
+ atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
+
+ spin_unlock(&c->data_bucket_lock);
+ return true;
+}
+
/* Init */
+void bch_open_buckets_free(struct cache_set *c)
+{
+ struct open_bucket *b;
+
+ while (!list_empty(&c->data_buckets)) {
+ b = list_first_entry(&c->data_buckets,
+ struct open_bucket, list);
+ list_del(&b->list);
+ kfree(b);
+ }
+}
+
+int bch_open_buckets_alloc(struct cache_set *c)
+{
+ int i;
+
+ spin_lock_init(&c->data_bucket_lock);
+
+ for (i = 0; i < 6; i++) {
+ struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+ list_add(&b->list, &c->data_buckets);
+ }
+
+ return 0;
+}
+
int bch_cache_allocator_start(struct cache *ca)
{
struct task_struct *k = kthread_run(bch_allocator_thread,
@@ -556,22 +679,8 @@ int bch_cache_allocator_start(struct cache *ca)
return 0;
}
-void bch_cache_allocator_exit(struct cache *ca)
-{
- struct discard *d;
-
- while (!list_empty(&ca->discards)) {
- d = list_first_entry(&ca->discards, struct discard, list);
- cancel_work_sync(&d->work);
- list_del(&d->list);
- kfree(d);
- }
-}
-
int bch_cache_allocator_init(struct cache *ca)
{
- unsigned i;
-
/*
* Reserve:
* Prio/gen writes first
@@ -589,15 +698,5 @@ int bch_cache_allocator_init(struct cache *ca)
ca->watermark[WATERMARK_NONE] = ca->free.size / 2 +
ca->watermark[WATERMARK_MOVINGGC];
- for (i = 0; i < MAX_IN_FLIGHT_DISCARDS; i++) {
- struct discard *d = kzalloc(sizeof(*d), GFP_KERNEL);
- if (!d)
- return -ENOMEM;
-
- d->ca = ca;
- INIT_WORK(&d->work, discard_finish);
- list_add(&d->list, &ca->discards);
- }
-
return 0;
}
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 0f12382aa35d..964353c5329d 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -177,6 +177,7 @@
#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
+#include <linux/bcache.h>
#include <linux/bio.h>
#include <linux/kobject.h>
#include <linux/list.h>
@@ -210,168 +211,6 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
#define GC_MARK_METADATA 2
BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14);
-struct bkey {
- uint64_t high;
- uint64_t low;
- uint64_t ptr[];
-};
-
-/* Enough for a key with 6 pointers */
-#define BKEY_PAD 8
-
-#define BKEY_PADDED(key) \
- union { struct bkey key; uint64_t key ## _pad[BKEY_PAD]; }
-
-/* Version 0: Cache device
- * Version 1: Backing device
- * Version 2: Seed pointer into btree node checksum
- * Version 3: Cache device with new UUID format
- * Version 4: Backing device with data offset
- */
-#define BCACHE_SB_VERSION_CDEV 0
-#define BCACHE_SB_VERSION_BDEV 1
-#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
-#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
-#define BCACHE_SB_MAX_VERSION 4
-
-#define SB_SECTOR 8
-#define SB_SIZE 4096
-#define SB_LABEL_SIZE 32
-#define SB_JOURNAL_BUCKETS 256U
-/* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */
-#define MAX_CACHES_PER_SET 8
-
-#define BDEV_DATA_START_DEFAULT 16 /* sectors */
-
-struct cache_sb {
- uint64_t csum;
- uint64_t offset; /* sector where this sb was written */
- uint64_t version;
-
- uint8_t magic[16];
-
- uint8_t uuid[16];
- union {
- uint8_t set_uuid[16];
- uint64_t set_magic;
- };
- uint8_t label[SB_LABEL_SIZE];
-
- uint64_t flags;
- uint64_t seq;
- uint64_t pad[8];
-
- union {
- struct {
- /* Cache devices */
- uint64_t nbuckets; /* device size */
-
- uint16_t block_size; /* sectors */
- uint16_t bucket_size; /* sectors */
-
- uint16_t nr_in_set;
- uint16_t nr_this_dev;
- };
- struct {
- /* Backing devices */
- uint64_t data_offset;
-
- /*
- * block_size from the cache device section is still used by
- * backing devices, so don't add anything here until we fix
- * things to not need it for backing devices anymore
- */
- };
- };
-
- uint32_t last_mount; /* time_t */
-
- uint16_t first_bucket;
- union {
- uint16_t njournal_buckets;
- uint16_t keys;
- };
- uint64_t d[SB_JOURNAL_BUCKETS]; /* journal buckets */
-};
-
-BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
-BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1);
-BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3);
-#define CACHE_REPLACEMENT_LRU 0U
-#define CACHE_REPLACEMENT_FIFO 1U
-#define CACHE_REPLACEMENT_RANDOM 2U
-
-BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4);
-#define CACHE_MODE_WRITETHROUGH 0U
-#define CACHE_MODE_WRITEBACK 1U
-#define CACHE_MODE_WRITEAROUND 2U
-#define CACHE_MODE_NONE 3U
-BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
-#define BDEV_STATE_NONE 0U
-#define BDEV_STATE_CLEAN 1U
-#define BDEV_STATE_DIRTY 2U
-#define BDEV_STATE_STALE 3U
-
-/* Version 1: Seed pointer into btree node checksum
- */
-#define BCACHE_BSET_VERSION 1
-
-/*
- * This is the on disk format for btree nodes - a btree node on disk is a list
- * of these; within each set the keys are sorted
- */
-struct bset {
- uint64_t csum;
- uint64_t magic;
- uint64_t seq;
- uint32_t version;
- uint32_t keys;
-
- union {
- struct bkey start[0];
- uint64_t d[0];
- };
-};
-
-/*
- * On disk format for priorities and gens - see super.c near prio_write() for
- * more.
- */
-struct prio_set {
- uint64_t csum;
- uint64_t magic;
- uint64_t seq;
- uint32_t version;
- uint32_t pad;
-
- uint64_t next_bucket;
-
- struct bucket_disk {
- uint16_t prio;
- uint8_t gen;
- } __attribute((packed)) data[];
-};
-
-struct uuid_entry {
- union {
- struct {
- uint8_t uuid[16];
- uint8_t label[32];
- uint32_t first_reg;
- uint32_t last_reg;
- uint32_t invalidated;
-
- uint32_t flags;
- /* Size of flash only volumes */
- uint64_t sectors;
- };
-
- uint8_t pad[128];
- };
-};
-
-BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1);
-
#include "journal.h"
#include "stats.h"
struct search;
@@ -384,8 +223,6 @@ struct keybuf_key {
void *private;
};
-typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
-
struct keybuf {
struct bkey last_scanned;
spinlock_t lock;
@@ -400,7 +237,7 @@ struct keybuf {
struct rb_root keys;
-#define KEYBUF_NR 100
+#define KEYBUF_NR 500
DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
};
@@ -429,21 +266,19 @@ struct bcache_device {
struct gendisk *disk;
- /* If nonzero, we're closing */
- atomic_t closing;
-
- /* If nonzero, we're detaching/unregistering from cache set */
- atomic_t detaching;
- int flush_done;
+ unsigned long flags;
+#define BCACHE_DEV_CLOSING 0
+#define BCACHE_DEV_DETACHING 1
+#define BCACHE_DEV_UNLINK_DONE 2
- uint64_t nr_stripes;
- unsigned stripe_size_bits;
+ unsigned nr_stripes;
+ unsigned stripe_size;
atomic_t *stripe_sectors_dirty;
+ unsigned long *full_dirty_stripes;
unsigned long sectors_dirty_last;
long sectors_dirty_derivative;
- mempool_t *unaligned_bvec;
struct bio_set *bio_split;
unsigned data_csum:1;
@@ -509,7 +344,7 @@ struct cached_dev {
/* Limit number of writeback bios in flight */
struct semaphore in_flight;
- struct closure_with_timer writeback;
+ struct task_struct *writeback_thread;
struct keybuf writeback_keys;
@@ -527,8 +362,8 @@ struct cached_dev {
unsigned sequential_cutoff;
unsigned readahead;
- unsigned sequential_merge:1;
unsigned verify:1;
+ unsigned bypass_torture_test:1;
unsigned partial_stripes_expensive:1;
unsigned writeback_metadata:1;
@@ -620,15 +455,6 @@ struct cache {
bool discard; /* Get rid of? */
- /*
- * We preallocate structs for issuing discards to buckets, and keep them
- * on this list when they're not in use; do_discard() issues discards
- * whenever there's work to do and is called by free_some_buckets() and
- * when a discard finishes.
- */
- atomic_t discards_in_flight;
- struct list_head discards;
-
struct journal_device journal;
/* The rest of this all shows up in sysfs */
@@ -649,7 +475,6 @@ struct gc_stat {
size_t nkeys;
uint64_t data; /* sectors */
- uint64_t dirty; /* sectors */
unsigned in_use; /* percent */
};
@@ -744,8 +569,8 @@ struct cache_set {
* basically a lock for this that we can wait on asynchronously. The
* btree_root() macro releases the lock when it returns.
*/
- struct closure *try_harder;
- struct closure_waitlist try_wait;
+ struct task_struct *try_harder;
+ wait_queue_head_t try_wait;
uint64_t try_harder_start;
/*
@@ -759,7 +584,7 @@ struct cache_set {
* written.
*/
atomic_t prio_blocked;
- struct closure_waitlist bucket_wait;
+ wait_queue_head_t bucket_wait;
/*
* For any bio we don't skip we subtract the number of sectors from
@@ -782,7 +607,7 @@ struct cache_set {
struct gc_stat gc_stats;
size_t nbuckets;
- struct closure_with_waitlist gc;
+ struct task_struct *gc_thread;
/* Where in the btree gc currently is */
struct bkey gc_done;
@@ -795,11 +620,10 @@ struct cache_set {
/* Counts how many sectors bio_insert has added to the cache */
atomic_t sectors_to_gc;
- struct closure moving_gc;
- struct closure_waitlist moving_gc_wait;
+ wait_queue_head_t moving_gc_wait;
struct keybuf moving_gc_keys;
/* Number of moving GC bios in flight */
- atomic_t in_flight;
+ struct semaphore moving_in_flight;
struct btree *root;
@@ -841,22 +665,27 @@ struct cache_set {
unsigned congested_read_threshold_us;
unsigned congested_write_threshold_us;
- spinlock_t sort_time_lock;
struct time_stats sort_time;
struct time_stats btree_gc_time;
struct time_stats btree_split_time;
- spinlock_t btree_read_time_lock;
struct time_stats btree_read_time;
struct time_stats try_harder_time;
atomic_long_t cache_read_races;
atomic_long_t writeback_keys_done;
atomic_long_t writeback_keys_failed;
+
+ enum {
+ ON_ERROR_UNREGISTER,
+ ON_ERROR_PANIC,
+ } on_error;
unsigned error_limit;
unsigned error_decay;
+
unsigned short journal_delay_ms;
unsigned verify:1;
unsigned key_merging_disabled:1;
+ unsigned expensive_debug_checks:1;
unsigned gc_always_rewrite:1;
unsigned shrinker_disabled:1;
unsigned copy_gc_enabled:1;
@@ -865,21 +694,6 @@ struct cache_set {
struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
};
-static inline bool key_merging_disabled(struct cache_set *c)
-{
-#ifdef CONFIG_BCACHE_DEBUG
- return c->key_merging_disabled;
-#else
- return 0;
-#endif
-}
-
-static inline bool SB_IS_BDEV(const struct cache_sb *sb)
-{
- return sb->version == BCACHE_SB_VERSION_BDEV
- || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
-}
-
struct bbio {
unsigned submit_time_us;
union {
@@ -933,59 +747,6 @@ static inline unsigned local_clock_us(void)
#define prio_buckets(c) \
DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c))
-#define JSET_MAGIC 0x245235c1a3625032ULL
-#define PSET_MAGIC 0x6750e15f87337f91ULL
-#define BSET_MAGIC 0x90135c78b99e07f5ULL
-
-#define jset_magic(c) ((c)->sb.set_magic ^ JSET_MAGIC)
-#define pset_magic(c) ((c)->sb.set_magic ^ PSET_MAGIC)
-#define bset_magic(c) ((c)->sb.set_magic ^ BSET_MAGIC)
-
-/* Bkey fields: all units are in sectors */
-
-#define KEY_FIELD(name, field, offset, size) \
- BITMASK(name, struct bkey, field, offset, size)
-
-#define PTR_FIELD(name, offset, size) \
- static inline uint64_t name(const struct bkey *k, unsigned i) \
- { return (k->ptr[i] >> offset) & ~(((uint64_t) ~0) << size); } \
- \
- static inline void SET_##name(struct bkey *k, unsigned i, uint64_t v)\
- { \
- k->ptr[i] &= ~(~((uint64_t) ~0 << size) << offset); \
- k->ptr[i] |= v << offset; \
- }
-
-KEY_FIELD(KEY_PTRS, high, 60, 3)
-KEY_FIELD(HEADER_SIZE, high, 58, 2)
-KEY_FIELD(KEY_CSUM, high, 56, 2)
-KEY_FIELD(KEY_PINNED, high, 55, 1)
-KEY_FIELD(KEY_DIRTY, high, 36, 1)
-
-KEY_FIELD(KEY_SIZE, high, 20, 16)
-KEY_FIELD(KEY_INODE, high, 0, 20)
-
-/* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */
-
-static inline uint64_t KEY_OFFSET(const struct bkey *k)
-{
- return k->low;
-}
-
-static inline void SET_KEY_OFFSET(struct bkey *k, uint64_t v)
-{
- k->low = v;
-}
-
-PTR_FIELD(PTR_DEV, 51, 12)
-PTR_FIELD(PTR_OFFSET, 8, 43)
-PTR_FIELD(PTR_GEN, 0, 8)
-
-#define PTR_CHECK_DEV ((1 << 12) - 1)
-
-#define PTR(gen, offset, dev) \
- ((((uint64_t) dev) << 51) | ((uint64_t) offset) << 8 | gen)
-
static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
{
return s >> c->bucket_bits;
@@ -1024,27 +785,11 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c,
/* Btree key macros */
-/*
- * The high bit being set is a relic from when we used it to do binary
- * searches - it told you where a key started. It's not used anymore,
- * and can probably be safely dropped.
- */
-#define KEY(dev, sector, len) \
-((struct bkey) { \
- .high = (1ULL << 63) | ((uint64_t) (len) << 20) | (dev), \
- .low = (sector) \
-})
-
static inline void bkey_init(struct bkey *k)
{
- *k = KEY(0, 0, 0);
+ *k = ZERO_KEY;
}
-#define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k))
-#define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0)
-#define MAX_KEY KEY(~(~0 << 20), ((uint64_t) ~0) >> 1, 0)
-#define ZERO_KEY KEY(0, 0, 0)
-
/*
* This is used for various on disk data structures - cache_sb, prio_set, bset,
* jset: The checksum is _always_ the first 8 bytes of these structs
@@ -1094,14 +839,6 @@ do { \
for (b = (ca)->buckets + (ca)->sb.first_bucket; \
b < (ca)->buckets + (ca)->sb.nbuckets; b++)
-static inline void __bkey_put(struct cache_set *c, struct bkey *k)
-{
- unsigned i;
-
- for (i = 0; i < KEY_PTRS(k); i++)
- atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
-}
-
static inline void cached_dev_put(struct cached_dev *dc)
{
if (atomic_dec_and_test(&dc->count))
@@ -1164,7 +901,6 @@ void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
void bch_bbio_free(struct bio *, struct cache_set *);
struct bio *bch_bbio_alloc(struct cache_set *);
-struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *);
void bch_generic_make_request(struct bio *, struct bio_split_pool *);
void __bch_submit_bbio(struct bio *, struct cache_set *);
void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
@@ -1173,13 +909,15 @@ uint8_t bch_inc_gen(struct cache *, struct bucket *);
void bch_rescale_priorities(struct cache_set *, int);
bool bch_bucket_add_unused(struct cache *, struct bucket *);
-long bch_bucket_alloc(struct cache *, unsigned, struct closure *);
+long bch_bucket_alloc(struct cache *, unsigned, bool);
void bch_bucket_free(struct cache_set *, struct bkey *);
int __bch_bucket_alloc_set(struct cache_set *, unsigned,
- struct bkey *, int, struct closure *);
+ struct bkey *, int, bool);
int bch_bucket_alloc_set(struct cache_set *, unsigned,
- struct bkey *, int, struct closure *);
+ struct bkey *, int, bool);
+bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned,
+ unsigned, unsigned, bool);
__printf(2, 3)
bool bch_cache_set_error(struct cache_set *, const char *, ...);
@@ -1187,7 +925,7 @@ bool bch_cache_set_error(struct cache_set *, const char *, ...);
void bch_prio_write(struct cache *);
void bch_write_bdev_super(struct cached_dev *, struct closure *);
-extern struct workqueue_struct *bcache_wq, *bch_gc_wq;
+extern struct workqueue_struct *bcache_wq;
extern const char * const bch_cache_modes[];
extern struct mutex bch_register_lock;
extern struct list_head bch_cache_sets;
@@ -1220,15 +958,14 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *);
void bch_btree_cache_free(struct cache_set *);
int bch_btree_cache_alloc(struct cache_set *);
void bch_moving_init_cache_set(struct cache_set *);
+int bch_open_buckets_alloc(struct cache_set *);
+void bch_open_buckets_free(struct cache_set *);
int bch_cache_allocator_start(struct cache *ca);
-void bch_cache_allocator_exit(struct cache *ca);
int bch_cache_allocator_init(struct cache *ca);
void bch_debug_exit(void);
int bch_debug_init(struct kobject *);
-void bch_writeback_exit(void);
-int bch_writeback_init(void);
void bch_request_exit(void);
int bch_request_init(void);
void bch_btree_exit(void);
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 22d1ae72c282..7d388b8bb50e 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -14,22 +14,12 @@
/* Keylists */
-void bch_keylist_copy(struct keylist *dest, struct keylist *src)
-{
- *dest = *src;
-
- if (src->list == src->d) {
- size_t n = (uint64_t *) src->top - src->d;
- dest->top = (struct bkey *) &dest->d[n];
- dest->list = dest->d;
- }
-}
-
int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
{
- unsigned oldsize = (uint64_t *) l->top - l->list;
- unsigned newsize = oldsize + 2 + nptrs;
- uint64_t *new;
+ size_t oldsize = bch_keylist_nkeys(l);
+ size_t newsize = oldsize + 2 + nptrs;
+ uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
+ uint64_t *new_keys;
/* The journalling code doesn't handle the case where the keys to insert
* is bigger than an empty write: If we just return -ENOMEM here,
@@ -45,24 +35,23 @@ int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
roundup_pow_of_two(oldsize) == newsize)
return 0;
- new = krealloc(l->list == l->d ? NULL : l->list,
- sizeof(uint64_t) * newsize, GFP_NOIO);
+ new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO);
- if (!new)
+ if (!new_keys)
return -ENOMEM;
- if (l->list == l->d)
- memcpy(new, l->list, sizeof(uint64_t) * KEYLIST_INLINE);
+ if (!old_keys)
+ memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize);
- l->list = new;
- l->top = (struct bkey *) (&l->list[oldsize]);
+ l->keys_p = new_keys;
+ l->top_p = new_keys + oldsize;
return 0;
}
struct bkey *bch_keylist_pop(struct keylist *l)
{
- struct bkey *k = l->bottom;
+ struct bkey *k = l->keys;
if (k == l->top)
return NULL;
@@ -73,21 +62,20 @@ struct bkey *bch_keylist_pop(struct keylist *l)
return l->top = k;
}
-/* Pointer validation */
-
-bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k)
+void bch_keylist_pop_front(struct keylist *l)
{
- unsigned i;
- char buf[80];
+ l->top_p -= bkey_u64s(l->keys);
- if (level && (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k)))
- goto bad;
+ memmove(l->keys,
+ bkey_next(l->keys),
+ bch_keylist_bytes(l));
+}
- if (!level && KEY_SIZE(k) > KEY_OFFSET(k))
- goto bad;
+/* Pointer validation */
- if (!KEY_SIZE(k))
- return true;
+static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+ unsigned i;
for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i)) {
@@ -98,13 +86,83 @@ bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k)
if (KEY_SIZE(k) + r > c->sb.bucket_size ||
bucket < ca->sb.first_bucket ||
bucket >= ca->sb.nbuckets)
- goto bad;
+ return true;
}
return false;
+}
+
+bool bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+ char buf[80];
+
+ if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
+ goto bad;
+
+ if (__ptr_invalid(c, k))
+ goto bad;
+
+ return false;
+bad:
+ bch_bkey_to_text(buf, sizeof(buf), k);
+ cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
+ return true;
+}
+
+bool bch_extent_ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+ char buf[80];
+
+ if (!KEY_SIZE(k))
+ return true;
+
+ if (KEY_SIZE(k) > KEY_OFFSET(k))
+ goto bad;
+
+ if (__ptr_invalid(c, k))
+ goto bad;
+
+ return false;
bad:
bch_bkey_to_text(buf, sizeof(buf), k);
- cache_bug(c, "spotted bad key %s: %s", buf, bch_ptr_status(c, k));
+ cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k));
+ return true;
+}
+
+static bool ptr_bad_expensive_checks(struct btree *b, const struct bkey *k,
+ unsigned ptr)
+{
+ struct bucket *g = PTR_BUCKET(b->c, k, ptr);
+ char buf[80];
+
+ if (mutex_trylock(&b->c->bucket_lock)) {
+ if (b->level) {
+ if (KEY_DIRTY(k) ||
+ g->prio != BTREE_PRIO ||
+ (b->c->gc_mark_valid &&
+ GC_MARK(g) != GC_MARK_METADATA))
+ goto err;
+
+ } else {
+ if (g->prio == BTREE_PRIO)
+ goto err;
+
+ if (KEY_DIRTY(k) &&
+ b->c->gc_mark_valid &&
+ GC_MARK(g) != GC_MARK_DIRTY)
+ goto err;
+ }
+ mutex_unlock(&b->c->bucket_lock);
+ }
+
+ return false;
+err:
+ mutex_unlock(&b->c->bucket_lock);
+ bch_bkey_to_text(buf, sizeof(buf), k);
+ btree_bug(b,
+"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
+ buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
+ g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
return true;
}
@@ -118,64 +176,29 @@ bool bch_ptr_bad(struct btree *b, const struct bkey *k)
bch_ptr_invalid(b, k))
return true;
- if (KEY_PTRS(k) && PTR_DEV(k, 0) == PTR_CHECK_DEV)
- return true;
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ if (!ptr_available(b->c, k, i))
+ return true;
- for (i = 0; i < KEY_PTRS(k); i++)
- if (ptr_available(b->c, k, i)) {
- g = PTR_BUCKET(b->c, k, i);
- stale = ptr_stale(b->c, k, i);
+ g = PTR_BUCKET(b->c, k, i);
+ stale = ptr_stale(b->c, k, i);
- btree_bug_on(stale > 96, b,
- "key too stale: %i, need_gc %u",
- stale, b->c->need_gc);
+ btree_bug_on(stale > 96, b,
+ "key too stale: %i, need_gc %u",
+ stale, b->c->need_gc);
- btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
- b, "stale dirty pointer");
+ btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
+ b, "stale dirty pointer");
- if (stale)
- return true;
+ if (stale)
+ return true;
-#ifdef CONFIG_BCACHE_EDEBUG
- if (!mutex_trylock(&b->c->bucket_lock))
- continue;
-
- if (b->level) {
- if (KEY_DIRTY(k) ||
- g->prio != BTREE_PRIO ||
- (b->c->gc_mark_valid &&
- GC_MARK(g) != GC_MARK_METADATA))
- goto bug;
-
- } else {
- if (g->prio == BTREE_PRIO)
- goto bug;
-
- if (KEY_DIRTY(k) &&
- b->c->gc_mark_valid &&
- GC_MARK(g) != GC_MARK_DIRTY)
- goto bug;
- }
- mutex_unlock(&b->c->bucket_lock);
-#endif
- }
+ if (expensive_debug_checks(b->c) &&
+ ptr_bad_expensive_checks(b, k, i))
+ return true;
+ }
return false;
-#ifdef CONFIG_BCACHE_EDEBUG
-bug:
- mutex_unlock(&b->c->bucket_lock);
-
- {
- char buf[80];
-
- bch_bkey_to_text(buf, sizeof(buf), k);
- btree_bug(b,
-"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
- buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
- g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
- }
- return true;
-#endif
}
/* Key/pointer manipulation */
@@ -458,16 +481,8 @@ static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)
static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
{
-#ifdef CONFIG_X86_64
- asm("shrd %[shift],%[high],%[low]"
- : [low] "+Rm" (low)
- : [high] "R" (high),
- [shift] "ci" (shift)
- : "cc");
-#else
low >>= shift;
low |= (high << 1) << (63U - shift);
-#endif
return low;
}
@@ -686,7 +701,7 @@ void bch_bset_init_next(struct btree *b)
} else
get_random_bytes(&i->seq, sizeof(uint64_t));
- i->magic = bset_magic(b->c);
+ i->magic = bset_magic(&b->c->sb);
i->version = 0;
i->keys = 0;
@@ -824,16 +839,16 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
} else
i = bset_search_write_set(b, t, search);
-#ifdef CONFIG_BCACHE_EDEBUG
- BUG_ON(bset_written(b, t) &&
- i.l != t->data->start &&
- bkey_cmp(tree_to_prev_bkey(t,
- inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
- search) > 0);
+ if (expensive_debug_checks(b->c)) {
+ BUG_ON(bset_written(b, t) &&
+ i.l != t->data->start &&
+ bkey_cmp(tree_to_prev_bkey(t,
+ inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
+ search) > 0);
- BUG_ON(i.r != end(t->data) &&
- bkey_cmp(i.r, search) <= 0);
-#endif
+ BUG_ON(i.r != end(t->data) &&
+ bkey_cmp(i.r, search) <= 0);
+ }
while (likely(i.l != i.r) &&
bkey_cmp(i.l, search) <= 0)
@@ -844,6 +859,13 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
/* Btree iterator */
+/*
+ * Returns true if l > r - unless l == r, in which case returns true if l is
+ * older than r.
+ *
+ * Necessary for btree_sort_fixup() - if there are multiple keys that compare
+ * equal in different sets, we have to process them newest to oldest.
+ */
static inline bool btree_iter_cmp(struct btree_iter_set l,
struct btree_iter_set r)
{
@@ -867,12 +889,16 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
}
struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
- struct bkey *search, struct bset_tree *start)
+ struct bkey *search, struct bset_tree *start)
{
struct bkey *ret = NULL;
iter->size = ARRAY_SIZE(iter->data);
iter->used = 0;
+#ifdef CONFIG_BCACHE_DEBUG
+ iter->b = b;
+#endif
+
for (; start <= &b->sets[b->nsets]; start++) {
ret = bch_bset_search(b, start, search);
bch_btree_iter_push(iter, ret, end(start->data));
@@ -887,6 +913,8 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter)
struct bkey *ret = NULL;
if (!btree_iter_end(iter)) {
+ bch_btree_iter_next_check(iter);
+
ret = iter->data->k;
iter->data->k = bkey_next(iter->data->k);
@@ -916,14 +944,6 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
return ret;
}
-struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search)
-{
- struct btree_iter iter;
-
- bch_btree_iter_init(b, &iter, search);
- return bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
-}
-
/* Mergesort */
static void sort_key_next(struct btree_iter *iter,
@@ -998,7 +1018,6 @@ static void btree_mergesort(struct btree *b, struct bset *out,
out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
pr_debug("sorted %i keys", out->keys);
- bch_check_key_order(b, out);
}
static void __btree_sort(struct btree *b, struct btree_iter *iter,
@@ -1029,7 +1048,7 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
* memcpy()
*/
- out->magic = bset_magic(b->c);
+ out->magic = bset_magic(&b->c->sb);
out->seq = b->sets[0].data->seq;
out->version = b->sets[0].data->version;
swap(out, b->sets[0].data);
@@ -1050,24 +1069,21 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
if (b->written)
bset_build_written_tree(b);
- if (!start) {
- spin_lock(&b->c->sort_time_lock);
+ if (!start)
bch_time_stats_update(&b->c->sort_time, start_time);
- spin_unlock(&b->c->sort_time_lock);
- }
}
void bch_btree_sort_partial(struct btree *b, unsigned start)
{
- size_t oldsize = 0, order = b->page_order, keys = 0;
+ size_t order = b->page_order, keys = 0;
struct btree_iter iter;
+ int oldsize = bch_count_data(b);
+
__bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);
BUG_ON(b->sets[b->nsets].data == write_block(b) &&
(b->sets[b->nsets].size || b->nsets));
- if (b->written)
- oldsize = bch_count_data(b);
if (start) {
unsigned i;
@@ -1083,7 +1099,7 @@ void bch_btree_sort_partial(struct btree *b, unsigned start)
__btree_sort(b, &iter, start, order, false);
- EBUG_ON(b->written && bch_count_data(b) != oldsize);
+ EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize);
}
void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter)
@@ -1101,9 +1117,7 @@ void bch_btree_sort_into(struct btree *b, struct btree *new)
btree_mergesort(b, new->sets->data, &iter, false, true);
- spin_lock(&b->c->sort_time_lock);
bch_time_stats_update(&b->c->sort_time, start_time);
- spin_unlock(&b->c->sort_time_lock);
bkey_copy_key(&new->key, &b->key);
new->sets->size = 0;
@@ -1148,16 +1162,16 @@ out:
/* Sysfs stuff */
struct bset_stats {
+ struct btree_op op;
size_t nodes;
size_t sets_written, sets_unwritten;
size_t bytes_written, bytes_unwritten;
size_t floats, failed;
};
-static int bch_btree_bset_stats(struct btree *b, struct btree_op *op,
- struct bset_stats *stats)
+static int btree_bset_stats(struct btree_op *op, struct btree *b)
{
- struct bkey *k;
+ struct bset_stats *stats = container_of(op, struct bset_stats, op);
unsigned i;
stats->nodes++;
@@ -1182,30 +1196,19 @@ static int bch_btree_bset_stats(struct btree *b, struct btree_op *op,
}
}
- if (b->level) {
- struct btree_iter iter;
-
- for_each_key_filter(b, k, &iter, bch_ptr_bad) {
- int ret = btree(bset_stats, k, b, op, stats);
- if (ret)
- return ret;
- }
- }
-
- return 0;
+ return MAP_CONTINUE;
}
int bch_bset_print_stats(struct cache_set *c, char *buf)
{
- struct btree_op op;
struct bset_stats t;
int ret;
- bch_btree_op_init_stack(&op);
memset(&t, 0, sizeof(struct bset_stats));
+ bch_btree_op_init(&t.op, -1);
- ret = btree_root(bset_stats, c, &op, &t);
- if (ret)
+ ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
+ if (ret < 0)
return ret;
return snprintf(buf, PAGE_SIZE,
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index ae115a253d73..1d3c24f9fa0e 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -148,6 +148,9 @@
struct btree_iter {
size_t size, used;
+#ifdef CONFIG_BCACHE_DEBUG
+ struct btree *b;
+#endif
struct btree_iter_set {
struct bkey *k, *end;
} data[MAX_BSETS];
@@ -193,54 +196,26 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l,
: (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
}
-static inline size_t bkey_u64s(const struct bkey *k)
-{
- BUG_ON(KEY_CSUM(k) > 1);
- return 2 + KEY_PTRS(k) + (KEY_CSUM(k) ? 1 : 0);
-}
-
-static inline size_t bkey_bytes(const struct bkey *k)
-{
- return bkey_u64s(k) * sizeof(uint64_t);
-}
-
-static inline void bkey_copy(struct bkey *dest, const struct bkey *src)
-{
- memcpy(dest, src, bkey_bytes(src));
-}
-
-static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
-{
- if (!src)
- src = &KEY(0, 0, 0);
-
- SET_KEY_INODE(dest, KEY_INODE(src));
- SET_KEY_OFFSET(dest, KEY_OFFSET(src));
-}
-
-static inline struct bkey *bkey_next(const struct bkey *k)
-{
- uint64_t *d = (void *) k;
- return (struct bkey *) (d + bkey_u64s(k));
-}
-
/* Keylists */
struct keylist {
- struct bkey *top;
union {
- uint64_t *list;
- struct bkey *bottom;
+ struct bkey *keys;
+ uint64_t *keys_p;
+ };
+ union {
+ struct bkey *top;
+ uint64_t *top_p;
};
/* Enough room for btree_split's keys without realloc */
#define KEYLIST_INLINE 16
- uint64_t d[KEYLIST_INLINE];
+ uint64_t inline_keys[KEYLIST_INLINE];
};
static inline void bch_keylist_init(struct keylist *l)
{
- l->top = (void *) (l->list = l->d);
+ l->top_p = l->keys_p = l->inline_keys;
}
static inline void bch_keylist_push(struct keylist *l)
@@ -256,17 +231,32 @@ static inline void bch_keylist_add(struct keylist *l, struct bkey *k)
static inline bool bch_keylist_empty(struct keylist *l)
{
- return l->top == (void *) l->list;
+ return l->top == l->keys;
+}
+
+static inline void bch_keylist_reset(struct keylist *l)
+{
+ l->top = l->keys;
}
static inline void bch_keylist_free(struct keylist *l)
{
- if (l->list != l->d)
- kfree(l->list);
+ if (l->keys_p != l->inline_keys)
+ kfree(l->keys_p);
+}
+
+static inline size_t bch_keylist_nkeys(struct keylist *l)
+{
+ return l->top_p - l->keys_p;
+}
+
+static inline size_t bch_keylist_bytes(struct keylist *l)
+{
+ return bch_keylist_nkeys(l) * sizeof(uint64_t);
}
-void bch_keylist_copy(struct keylist *, struct keylist *);
struct bkey *bch_keylist_pop(struct keylist *);
+void bch_keylist_pop_front(struct keylist *);
int bch_keylist_realloc(struct keylist *, int, struct cache_set *);
void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
@@ -287,7 +277,9 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
}
const char *bch_ptr_status(struct cache_set *, const struct bkey *);
-bool __bch_ptr_invalid(struct cache_set *, int level, const struct bkey *);
+bool bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
+bool bch_extent_ptr_invalid(struct cache_set *, const struct bkey *);
+
bool bch_ptr_bad(struct btree *, const struct bkey *);
static inline uint8_t gen_after(uint8_t a, uint8_t b)
@@ -311,7 +303,6 @@ static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *);
-struct bkey *bch_next_recurse_key(struct btree *, struct bkey *);
struct bkey *bch_btree_iter_next(struct btree_iter *);
struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
struct btree *, ptr_filter_fn);
@@ -361,12 +352,30 @@ void bch_bset_fix_lookup_table(struct btree *, struct bkey *);
struct bkey *__bch_bset_search(struct btree *, struct bset_tree *,
const struct bkey *);
+/*
+ * Returns the first key that is strictly greater than search
+ */
static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t,
const struct bkey *search)
{
return search ? __bch_bset_search(b, t, search) : t->data->start;
}
+#define PRECEDING_KEY(_k) \
+({ \
+ struct bkey *_ret = NULL; \
+ \
+ if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \
+ _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \
+ \
+ if (!_ret->low) \
+ _ret->high--; \
+ _ret->low--; \
+ } \
+ \
+ _ret; \
+})
+
bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *);
void bch_btree_sort_lazy(struct btree *);
void bch_btree_sort_into(struct btree *, struct btree *);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index f42fc7ed9cd6..46fb793a3f58 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -23,12 +23,13 @@
#include "bcache.h"
#include "btree.h"
#include "debug.h"
-#include "request.h"
#include "writeback.h"
#include <linux/slab.h>
#include <linux/bitops.h>
+#include <linux/freezer.h>
#include <linux/hash.h>
+#include <linux/kthread.h>
#include <linux/prefetch.h>
#include <linux/random.h>
#include <linux/rcupdate.h>
@@ -88,15 +89,13 @@
* Test module load/unload
*/
-static const char * const op_types[] = {
- "insert", "replace"
+enum {
+ BTREE_INSERT_STATUS_INSERT,
+ BTREE_INSERT_STATUS_BACK_MERGE,
+ BTREE_INSERT_STATUS_OVERWROTE,
+ BTREE_INSERT_STATUS_FRONT_MERGE,
};
-static const char *op_type(struct btree_op *op)
-{
- return op_types[op->type];
-}
-
#define MAX_NEED_GC 64
#define MAX_SAVE_PRIO 72
@@ -105,23 +104,89 @@ static const char *op_type(struct btree_op *op)
#define PTR_HASH(c, k) \
(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
-struct workqueue_struct *bch_gc_wq;
static struct workqueue_struct *btree_io_wq;
-void bch_btree_op_init_stack(struct btree_op *op)
+static inline bool should_split(struct btree *b)
{
- memset(op, 0, sizeof(struct btree_op));
- closure_init_stack(&op->cl);
- op->lock = -1;
- bch_keylist_init(&op->keys);
+ struct bset *i = write_block(b);
+ return b->written >= btree_blocks(b) ||
+ (b->written + __set_blocks(i, i->keys + 15, b->c)
+ > btree_blocks(b));
}
+#define insert_lock(s, b) ((b)->level <= (s)->lock)
+
+/*
+ * These macros are for recursing down the btree - they handle the details of
+ * locking and looking up nodes in the cache for you. They're best treated as
+ * mere syntax when reading code that uses them.
+ *
+ * op->lock determines whether we take a read or a write lock at a given depth.
+ * If you've got a read lock and find that you need a write lock (i.e. you're
+ * going to have to split), set op->lock and return -EINTR; btree_root() will
+ * call you again and you'll have the correct lock.
+ */
+
+/**
+ * btree - recurse down the btree on a specified key
+ * @fn: function to call, which will be passed the child node
+ * @key: key to recurse on
+ * @b: parent btree node
+ * @op: pointer to struct btree_op
+ */
+#define btree(fn, key, b, op, ...) \
+({ \
+ int _r, l = (b)->level - 1; \
+ bool _w = l <= (op)->lock; \
+ struct btree *_child = bch_btree_node_get((b)->c, key, l, _w); \
+ if (!IS_ERR(_child)) { \
+ _child->parent = (b); \
+ _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
+ rw_unlock(_w, _child); \
+ } else \
+ _r = PTR_ERR(_child); \
+ _r; \
+})
+
+/**
+ * btree_root - call a function on the root of the btree
+ * @fn: function to call, which will be passed the child node
+ * @c: cache set
+ * @op: pointer to struct btree_op
+ */
+#define btree_root(fn, c, op, ...) \
+({ \
+ int _r = -EINTR; \
+ do { \
+ struct btree *_b = (c)->root; \
+ bool _w = insert_lock(op, _b); \
+ rw_lock(_w, _b, _b->level); \
+ if (_b == (c)->root && \
+ _w == insert_lock(op, _b)) { \
+ _b->parent = NULL; \
+ _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
+ } \
+ rw_unlock(_w, _b); \
+ bch_cannibalize_unlock(c); \
+ if (_r == -ENOSPC) { \
+ wait_event((c)->try_wait, \
+ !(c)->try_harder); \
+ _r = -EINTR; \
+ } \
+ } while (_r == -EINTR); \
+ \
+ _r; \
+})
+
/* Btree key manipulation */
-static void bkey_put(struct cache_set *c, struct bkey *k, int level)
+void bkey_put(struct cache_set *c, struct bkey *k)
{
- if ((level && KEY_OFFSET(k)) || !level)
- __bkey_put(c, k);
+ unsigned i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i))
+ atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
}
/* Btree IO */
@@ -145,6 +210,10 @@ static void bch_btree_node_read_done(struct btree *b)
iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
iter->used = 0;
+#ifdef CONFIG_BCACHE_DEBUG
+ iter->b = b;
+#endif
+
if (!i->seq)
goto err;
@@ -160,7 +229,7 @@ static void bch_btree_node_read_done(struct btree *b)
goto err;
err = "bad magic";
- if (i->magic != bset_magic(b->c))
+ if (i->magic != bset_magic(&b->c->sb))
goto err;
err = "bad checksum";
@@ -230,7 +299,7 @@ void bch_btree_node_read(struct btree *b)
bio = bch_bbio_alloc(b->c);
bio->bi_rw = REQ_META|READ_SYNC;
- bio->bi_size = KEY_SIZE(&b->key) << 9;
+ bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
bio->bi_end_io = btree_node_read_endio;
bio->bi_private = &cl;
@@ -248,10 +317,7 @@ void bch_btree_node_read(struct btree *b)
goto err;
bch_btree_node_read_done(b);
-
- spin_lock(&b->c->btree_read_time_lock);
bch_time_stats_update(&b->c->btree_read_time, start_time);
- spin_unlock(&b->c->btree_read_time_lock);
return;
err:
@@ -296,7 +362,7 @@ static void btree_node_write_done(struct closure *cl)
struct bio_vec *bv;
int n;
- __bio_for_each_segment(bv, b->bio, n, 0)
+ bio_for_each_segment_all(bv, b->bio, n)
__free_page(bv->bv_page);
__btree_node_write_done(cl);
@@ -327,9 +393,9 @@ static void do_btree_node_write(struct btree *b)
b->bio = bch_bbio_alloc(b->c);
b->bio->bi_end_io = btree_node_write_endio;
- b->bio->bi_private = &b->io.cl;
+ b->bio->bi_private = cl;
b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
- b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c);
+ b->bio->bi_iter.bi_size = set_blocks(i, b->c) * block_bytes(b->c);
bch_bio_map(b->bio, i);
/*
@@ -355,7 +421,7 @@ static void do_btree_node_write(struct btree *b)
struct bio_vec *bv;
void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
- bio_for_each_segment(bv, b->bio, j)
+ bio_for_each_segment_all(bv, b->bio, j)
memcpy(page_address(bv->bv_page),
base + j * PAGE_SIZE, PAGE_SIZE);
@@ -383,7 +449,7 @@ void bch_btree_node_write(struct btree *b, struct closure *parent)
BUG_ON(b->written >= btree_blocks(b));
BUG_ON(b->written && !i->keys);
BUG_ON(b->sets->data->seq != i->seq);
- bch_check_key_order(b, i);
+ bch_check_keys(b, "writing");
cancel_delayed_work(&b->work);
@@ -405,6 +471,15 @@ void bch_btree_node_write(struct btree *b, struct closure *parent)
bch_bset_init_next(b);
}
+static void bch_btree_node_write_sync(struct btree *b)
+{
+ struct closure cl;
+
+ closure_init_stack(&cl);
+ bch_btree_node_write(b, &cl);
+ closure_sync(&cl);
+}
+
static void btree_node_write_work(struct work_struct *w)
{
struct btree *b = container_of(to_delayed_work(w), struct btree, work);
@@ -416,7 +491,7 @@ static void btree_node_write_work(struct work_struct *w)
rw_unlock(true, b);
}
-static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op)
+static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
{
struct bset *i = b->sets[b->nsets].data;
struct btree_write *w = btree_current_write(b);
@@ -429,15 +504,15 @@ static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op)
set_btree_node_dirty(b);
- if (op && op->journal) {
+ if (journal_ref) {
if (w->journal &&
- journal_pin_cmp(b->c, w, op)) {
+ journal_pin_cmp(b->c, w->journal, journal_ref)) {
atomic_dec_bug(w->journal);
w->journal = NULL;
}
if (!w->journal) {
- w->journal = op->journal;
+ w->journal = journal_ref;
atomic_inc(w->journal);
}
}
@@ -566,33 +641,32 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
return b;
}
-static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order)
+static int mca_reap(struct btree *b, unsigned min_order, bool flush)
{
+ struct closure cl;
+
+ closure_init_stack(&cl);
lockdep_assert_held(&b->c->bucket_lock);
if (!down_write_trylock(&b->lock))
return -ENOMEM;
- if (b->page_order < min_order) {
+ BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
+
+ if (b->page_order < min_order ||
+ (!flush &&
+ (btree_node_dirty(b) ||
+ atomic_read(&b->io.cl.remaining) != -1))) {
rw_unlock(true, b);
return -ENOMEM;
}
- BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
-
- if (cl && btree_node_dirty(b))
- bch_btree_node_write(b, NULL);
-
- if (cl)
- closure_wait_event_async(&b->io.wait, cl,
- atomic_read(&b->io.cl.remaining) == -1);
+ if (btree_node_dirty(b))
+ bch_btree_node_write_sync(b);
- if (btree_node_dirty(b) ||
- !closure_is_unlocked(&b->io.cl) ||
- work_pending(&b->work.work)) {
- rw_unlock(true, b);
- return -EAGAIN;
- }
+ /* wait for any in flight btree write */
+ closure_wait_event(&b->io.wait, &cl,
+ atomic_read(&b->io.cl.remaining) == -1);
return 0;
}
@@ -633,7 +707,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
break;
if (++i > 3 &&
- !mca_reap(b, NULL, 0)) {
+ !mca_reap(b, 0, false)) {
mca_data_free(b);
rw_unlock(true, b);
freed++;
@@ -652,7 +726,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
list_rotate_left(&c->btree_cache);
if (!b->accessed &&
- !mca_reap(b, NULL, 0)) {
+ !mca_reap(b, 0, false)) {
mca_bucket_free(b);
mca_data_free(b);
rw_unlock(true, b);
@@ -723,12 +797,9 @@ int bch_btree_cache_alloc(struct cache_set *c)
{
unsigned i;
- /* XXX: doesn't check for errors */
-
- closure_init_unlocked(&c->gc);
-
for (i = 0; i < mca_reserve(c); i++)
- mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
+ if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
+ return -ENOMEM;
list_splice_init(&c->btree_cache,
&c->btree_cache_freeable);
@@ -775,52 +846,27 @@ out:
return b;
}
-static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k,
- int level, struct closure *cl)
+static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k)
{
- int ret = -ENOMEM;
- struct btree *i;
+ struct btree *b;
trace_bcache_btree_cache_cannibalize(c);
- if (!cl)
- return ERR_PTR(-ENOMEM);
-
- /*
- * Trying to free up some memory - i.e. reuse some btree nodes - may
- * require initiating IO to flush the dirty part of the node. If we're
- * running under generic_make_request(), that IO will never finish and
- * we would deadlock. Returning -EAGAIN causes the cache lookup code to
- * punt to workqueue and retry.
- */
- if (current->bio_list)
- return ERR_PTR(-EAGAIN);
+ if (!c->try_harder) {
+ c->try_harder = current;
+ c->try_harder_start = local_clock();
+ } else if (c->try_harder != current)
+ return ERR_PTR(-ENOSPC);
- if (c->try_harder && c->try_harder != cl) {
- closure_wait_event_async(&c->try_wait, cl, !c->try_harder);
- return ERR_PTR(-EAGAIN);
- }
-
- c->try_harder = cl;
- c->try_harder_start = local_clock();
-retry:
- list_for_each_entry_reverse(i, &c->btree_cache, list) {
- int r = mca_reap(i, cl, btree_order(k));
- if (!r)
- return i;
- if (r != -ENOMEM)
- ret = r;
- }
+ list_for_each_entry_reverse(b, &c->btree_cache, list)
+ if (!mca_reap(b, btree_order(k), false))
+ return b;
- if (ret == -EAGAIN &&
- closure_blocking(cl)) {
- mutex_unlock(&c->bucket_lock);
- closure_sync(cl);
- mutex_lock(&c->bucket_lock);
- goto retry;
- }
+ list_for_each_entry_reverse(b, &c->btree_cache, list)
+ if (!mca_reap(b, btree_order(k), true))
+ return b;
- return ERR_PTR(ret);
+ return ERR_PTR(-ENOMEM);
}
/*
@@ -829,20 +875,21 @@ retry:
* cannibalize_bucket() will take. This means every time we unlock the root of
* the btree, we need to release this lock if we have it held.
*/
-void bch_cannibalize_unlock(struct cache_set *c, struct closure *cl)
+static void bch_cannibalize_unlock(struct cache_set *c)
{
- if (c->try_harder == cl) {
+ if (c->try_harder == current) {
bch_time_stats_update(&c->try_harder_time, c->try_harder_start);
c->try_harder = NULL;
- __closure_wake_up(&c->try_wait);
+ wake_up(&c->try_wait);
}
}
-static struct btree *mca_alloc(struct cache_set *c, struct bkey *k,
- int level, struct closure *cl)
+static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
{
struct btree *b;
+ BUG_ON(current->bio_list);
+
lockdep_assert_held(&c->bucket_lock);
if (mca_find(c, k))
@@ -852,14 +899,14 @@ static struct btree *mca_alloc(struct cache_set *c, struct bkey *k,
* the list. Check if there's any freed nodes there:
*/
list_for_each_entry(b, &c->btree_cache_freeable, list)
- if (!mca_reap(b, NULL, btree_order(k)))
+ if (!mca_reap(b, btree_order(k), false))
goto out;
/* We never free struct btree itself, just the memory that holds the on
* disk node. Check the freed list before allocating a new one:
*/
list_for_each_entry(b, &c->btree_cache_freed, list)
- if (!mca_reap(b, NULL, 0)) {
+ if (!mca_reap(b, 0, false)) {
mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
if (!b->sets[0].data)
goto err;
@@ -884,6 +931,7 @@ out:
lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
b->level = level;
+ b->parent = (void *) ~0UL;
mca_reinit(b);
@@ -892,7 +940,7 @@ err:
if (b)
rw_unlock(true, b);
- b = mca_cannibalize(c, k, level, cl);
+ b = mca_cannibalize(c, k);
if (!IS_ERR(b))
goto out;
@@ -903,17 +951,15 @@ err:
* bch_btree_node_get - find a btree node in the cache and lock it, reading it
* in from disk if necessary.
*
- * If IO is necessary, it uses the closure embedded in struct btree_op to wait;
- * if that closure is in non blocking mode, will return -EAGAIN.
+ * If IO is necessary and running under generic_make_request, returns -EAGAIN.
*
* The btree node will have either a read or a write lock held, depending on
* level and op->lock.
*/
struct btree *bch_btree_node_get(struct cache_set *c, struct bkey *k,
- int level, struct btree_op *op)
+ int level, bool write)
{
int i = 0;
- bool write = level <= op->lock;
struct btree *b;
BUG_ON(level < 0);
@@ -925,7 +971,7 @@ retry:
return ERR_PTR(-EAGAIN);
mutex_lock(&c->bucket_lock);
- b = mca_alloc(c, k, level, &op->cl);
+ b = mca_alloc(c, k, level);
mutex_unlock(&c->bucket_lock);
if (!b)
@@ -971,7 +1017,7 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
struct btree *b;
mutex_lock(&c->bucket_lock);
- b = mca_alloc(c, k, level, NULL);
+ b = mca_alloc(c, k, level);
mutex_unlock(&c->bucket_lock);
if (!IS_ERR_OR_NULL(b)) {
@@ -982,17 +1028,12 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
/* Btree alloc */
-static void btree_node_free(struct btree *b, struct btree_op *op)
+static void btree_node_free(struct btree *b)
{
unsigned i;
trace_bcache_btree_node_free(b);
- /*
- * The BUG_ON() in btree_node_get() implies that we must have a write
- * lock on parent to free or even invalidate a node
- */
- BUG_ON(op->lock <= b->level);
BUG_ON(b == b->c->root);
if (btree_node_dirty(b))
@@ -1015,27 +1056,26 @@ static void btree_node_free(struct btree *b, struct btree_op *op)
mutex_unlock(&b->c->bucket_lock);
}
-struct btree *bch_btree_node_alloc(struct cache_set *c, int level,
- struct closure *cl)
+struct btree *bch_btree_node_alloc(struct cache_set *c, int level, bool wait)
{
BKEY_PADDED(key) k;
struct btree *b = ERR_PTR(-EAGAIN);
mutex_lock(&c->bucket_lock);
retry:
- if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, cl))
+ if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, wait))
goto err;
+ bkey_put(c, &k.key);
SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
- b = mca_alloc(c, &k.key, level, cl);
+ b = mca_alloc(c, &k.key, level);
if (IS_ERR(b))
goto err_free;
if (!b) {
cache_bug(c,
"Tried to allocate bucket that was in btree cache");
- __bkey_put(c, &k.key);
goto retry;
}
@@ -1048,7 +1088,6 @@ retry:
return b;
err_free:
bch_bucket_free(c, &k.key);
- __bkey_put(c, &k.key);
err:
mutex_unlock(&c->bucket_lock);
@@ -1056,16 +1095,31 @@ err:
return b;
}
-static struct btree *btree_node_alloc_replacement(struct btree *b,
- struct closure *cl)
+static struct btree *btree_node_alloc_replacement(struct btree *b, bool wait)
{
- struct btree *n = bch_btree_node_alloc(b->c, b->level, cl);
+ struct btree *n = bch_btree_node_alloc(b->c, b->level, wait);
if (!IS_ERR_OR_NULL(n))
bch_btree_sort_into(b, n);
return n;
}
+static void make_btree_freeing_key(struct btree *b, struct bkey *k)
+{
+ unsigned i;
+
+ bkey_copy(k, &b->key);
+ bkey_copy_key(k, &ZERO_KEY);
+
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ uint8_t g = PTR_BUCKET(b->c, k, i)->gen + 1;
+
+ SET_PTR_GEN(k, i, g);
+ }
+
+ atomic_inc(&b->c->prio_blocked);
+}
+
/* Garbage collection */
uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
@@ -1119,12 +1173,10 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
-static int btree_gc_mark_node(struct btree *b, unsigned *keys,
- struct gc_stat *gc)
+static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
{
uint8_t stale = 0;
- unsigned last_dev = -1;
- struct bcache_device *d = NULL;
+ unsigned keys = 0, good_keys = 0;
struct bkey *k;
struct btree_iter iter;
struct bset_tree *t;
@@ -1132,27 +1184,17 @@ static int btree_gc_mark_node(struct btree *b, unsigned *keys,
gc->nodes++;
for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
- if (last_dev != KEY_INODE(k)) {
- last_dev = KEY_INODE(k);
-
- d = KEY_INODE(k) < b->c->nr_uuids
- ? b->c->devices[last_dev]
- : NULL;
- }
-
stale = max(stale, btree_mark_key(b, k));
+ keys++;
if (bch_ptr_bad(b, k))
continue;
- *keys += bkey_u64s(k);
-
gc->key_bytes += bkey_u64s(k);
gc->nkeys++;
+ good_keys++;
gc->data += KEY_SIZE(k);
- if (KEY_DIRTY(k))
- gc->dirty += KEY_SIZE(k);
}
for (t = b->sets; t <= &b->sets[b->nsets]; t++)
@@ -1161,78 +1203,74 @@ static int btree_gc_mark_node(struct btree *b, unsigned *keys,
bkey_cmp(&b->key, &t->end) < 0,
b, "found short btree key in gc");
- return stale;
-}
-
-static struct btree *btree_gc_alloc(struct btree *b, struct bkey *k,
- struct btree_op *op)
-{
- /*
- * We block priorities from being written for the duration of garbage
- * collection, so we can't sleep in btree_alloc() ->
- * bch_bucket_alloc_set(), or we'd risk deadlock - so we don't pass it
- * our closure.
- */
- struct btree *n = btree_node_alloc_replacement(b, NULL);
+ if (b->c->gc_always_rewrite)
+ return true;
- if (!IS_ERR_OR_NULL(n)) {
- swap(b, n);
- __bkey_put(b->c, &b->key);
+ if (stale > 10)
+ return true;
- memcpy(k->ptr, b->key.ptr,
- sizeof(uint64_t) * KEY_PTRS(&b->key));
+ if ((keys - good_keys) * 2 > keys)
+ return true;
- btree_node_free(n, op);
- up_write(&n->lock);
- }
-
- return b;
+ return false;
}
-/*
- * Leaving this at 2 until we've got incremental garbage collection done; it
- * could be higher (and has been tested with 4) except that garbage collection
- * could take much longer, adversely affecting latency.
- */
-#define GC_MERGE_NODES 2U
+#define GC_MERGE_NODES 4U
struct gc_merge_info {
struct btree *b;
- struct bkey *k;
unsigned keys;
};
-static void btree_gc_coalesce(struct btree *b, struct btree_op *op,
- struct gc_stat *gc, struct gc_merge_info *r)
+static int bch_btree_insert_node(struct btree *, struct btree_op *,
+ struct keylist *, atomic_t *, struct bkey *);
+
+static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
+ struct keylist *keylist, struct gc_stat *gc,
+ struct gc_merge_info *r)
{
- unsigned nodes = 0, keys = 0, blocks;
- int i;
+ unsigned i, nodes = 0, keys = 0, blocks;
+ struct btree *new_nodes[GC_MERGE_NODES];
+ struct closure cl;
+ struct bkey *k;
- while (nodes < GC_MERGE_NODES && r[nodes].b)
+ memset(new_nodes, 0, sizeof(new_nodes));
+ closure_init_stack(&cl);
+
+ while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
keys += r[nodes++].keys;
blocks = btree_default_blocks(b->c) * 2 / 3;
if (nodes < 2 ||
__set_blocks(b->sets[0].data, keys, b->c) > blocks * (nodes - 1))
- return;
-
- for (i = nodes - 1; i >= 0; --i) {
- if (r[i].b->written)
- r[i].b = btree_gc_alloc(r[i].b, r[i].k, op);
+ return 0;
- if (r[i].b->written)
- return;
+ for (i = 0; i < nodes; i++) {
+ new_nodes[i] = btree_node_alloc_replacement(r[i].b, false);
+ if (IS_ERR_OR_NULL(new_nodes[i]))
+ goto out_nocoalesce;
}
for (i = nodes - 1; i > 0; --i) {
- struct bset *n1 = r[i].b->sets->data;
- struct bset *n2 = r[i - 1].b->sets->data;
+ struct bset *n1 = new_nodes[i]->sets->data;
+ struct bset *n2 = new_nodes[i - 1]->sets->data;
struct bkey *k, *last = NULL;
keys = 0;
- if (i == 1) {
+ if (i > 1) {
+ for (k = n2->start;
+ k < end(n2);
+ k = bkey_next(k)) {
+ if (__set_blocks(n1, n1->keys + keys +
+ bkey_u64s(k), b->c) > blocks)
+ break;
+
+ last = k;
+ keys += bkey_u64s(k);
+ }
+ } else {
/*
* Last node we're not getting rid of - we're getting
* rid of the node at r[0]. Have to try and fit all of
@@ -1241,37 +1279,27 @@ static void btree_gc_coalesce(struct btree *b, struct btree_op *op,
* length keys (shouldn't be possible in practice,
* though)
*/
- if (__set_blocks(n1, n1->keys + r->keys,
- b->c) > btree_blocks(r[i].b))
- return;
+ if (__set_blocks(n1, n1->keys + n2->keys,
+ b->c) > btree_blocks(new_nodes[i]))
+ goto out_nocoalesce;
keys = n2->keys;
+ /* Take the key of the node we're getting rid of */
last = &r->b->key;
- } else
- for (k = n2->start;
- k < end(n2);
- k = bkey_next(k)) {
- if (__set_blocks(n1, n1->keys + keys +
- bkey_u64s(k), b->c) > blocks)
- break;
-
- last = k;
- keys += bkey_u64s(k);
- }
+ }
BUG_ON(__set_blocks(n1, n1->keys + keys,
- b->c) > btree_blocks(r[i].b));
+ b->c) > btree_blocks(new_nodes[i]));
- if (last) {
- bkey_copy_key(&r[i].b->key, last);
- bkey_copy_key(r[i].k, last);
- }
+ if (last)
+ bkey_copy_key(&new_nodes[i]->key, last);
memcpy(end(n1),
n2->start,
(void *) node(n2, keys) - (void *) n2->start);
n1->keys += keys;
+ r[i].keys = n1->keys;
memmove(n2->start,
node(n2, keys),
@@ -1279,95 +1307,176 @@ static void btree_gc_coalesce(struct btree *b, struct btree_op *op,
n2->keys -= keys;
- r[i].keys = n1->keys;
- r[i - 1].keys = n2->keys;
+ if (bch_keylist_realloc(keylist,
+ KEY_PTRS(&new_nodes[i]->key), b->c))
+ goto out_nocoalesce;
+
+ bch_btree_node_write(new_nodes[i], &cl);
+ bch_keylist_add(keylist, &new_nodes[i]->key);
}
- btree_node_free(r->b, op);
- up_write(&r->b->lock);
+ for (i = 0; i < nodes; i++) {
+ if (bch_keylist_realloc(keylist, KEY_PTRS(&r[i].b->key), b->c))
+ goto out_nocoalesce;
- trace_bcache_btree_gc_coalesce(nodes);
+ make_btree_freeing_key(r[i].b, keylist->top);
+ bch_keylist_push(keylist);
+ }
+
+ /* We emptied out this node */
+ BUG_ON(new_nodes[0]->sets->data->keys);
+ btree_node_free(new_nodes[0]);
+ rw_unlock(true, new_nodes[0]);
+
+ closure_sync(&cl);
+
+ for (i = 0; i < nodes; i++) {
+ btree_node_free(r[i].b);
+ rw_unlock(true, r[i].b);
+
+ r[i].b = new_nodes[i];
+ }
+
+ bch_btree_insert_node(b, op, keylist, NULL, NULL);
+ BUG_ON(!bch_keylist_empty(keylist));
+ memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
+ r[nodes - 1].b = ERR_PTR(-EINTR);
+
+ trace_bcache_btree_gc_coalesce(nodes);
gc->nodes--;
- nodes--;
- memmove(&r[0], &r[1], sizeof(struct gc_merge_info) * nodes);
- memset(&r[nodes], 0, sizeof(struct gc_merge_info));
+ /* Invalidated our iterator */
+ return -EINTR;
+
+out_nocoalesce:
+ closure_sync(&cl);
+
+ while ((k = bch_keylist_pop(keylist)))
+ if (!bkey_cmp(k, &ZERO_KEY))
+ atomic_dec(&b->c->prio_blocked);
+
+ for (i = 0; i < nodes; i++)
+ if (!IS_ERR_OR_NULL(new_nodes[i])) {
+ btree_node_free(new_nodes[i]);
+ rw_unlock(true, new_nodes[i]);
+ }
+ return 0;
}
-static int btree_gc_recurse(struct btree *b, struct btree_op *op,
- struct closure *writes, struct gc_stat *gc)
+static unsigned btree_gc_count_keys(struct btree *b)
{
- void write(struct btree *r)
- {
- if (!r->written)
- bch_btree_node_write(r, &op->cl);
- else if (btree_node_dirty(r))
- bch_btree_node_write(r, writes);
+ struct bkey *k;
+ struct btree_iter iter;
+ unsigned ret = 0;
- up_write(&r->lock);
- }
+ for_each_key_filter(b, k, &iter, bch_ptr_bad)
+ ret += bkey_u64s(k);
+
+ return ret;
+}
- int ret = 0, stale;
+static int btree_gc_recurse(struct btree *b, struct btree_op *op,
+ struct closure *writes, struct gc_stat *gc)
+{
unsigned i;
+ int ret = 0;
+ bool should_rewrite;
+ struct btree *n;
+ struct bkey *k;
+ struct keylist keys;
+ struct btree_iter iter;
struct gc_merge_info r[GC_MERGE_NODES];
+ struct gc_merge_info *last = r + GC_MERGE_NODES - 1;
- memset(r, 0, sizeof(r));
+ bch_keylist_init(&keys);
+ bch_btree_iter_init(b, &iter, &b->c->gc_done);
- while ((r->k = bch_next_recurse_key(b, &b->c->gc_done))) {
- r->b = bch_btree_node_get(b->c, r->k, b->level - 1, op);
+ for (i = 0; i < GC_MERGE_NODES; i++)
+ r[i].b = ERR_PTR(-EINTR);
- if (IS_ERR(r->b)) {
- ret = PTR_ERR(r->b);
- break;
+ while (1) {
+ k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+ if (k) {
+ r->b = bch_btree_node_get(b->c, k, b->level - 1, true);
+ if (IS_ERR(r->b)) {
+ ret = PTR_ERR(r->b);
+ break;
+ }
+
+ r->keys = btree_gc_count_keys(r->b);
+
+ ret = btree_gc_coalesce(b, op, &keys, gc, r);
+ if (ret)
+ break;
}
- r->keys = 0;
- stale = btree_gc_mark_node(r->b, &r->keys, gc);
+ if (!last->b)
+ break;
- if (!b->written &&
- (r->b->level || stale > 10 ||
- b->c->gc_always_rewrite))
- r->b = btree_gc_alloc(r->b, r->k, op);
+ if (!IS_ERR(last->b)) {
+ should_rewrite = btree_gc_mark_node(last->b, gc);
+ if (should_rewrite) {
+ n = btree_node_alloc_replacement(last->b,
+ false);
- if (r->b->level)
- ret = btree_gc_recurse(r->b, op, writes, gc);
+ if (!IS_ERR_OR_NULL(n)) {
+ bch_btree_node_write_sync(n);
+ bch_keylist_add(&keys, &n->key);
- if (ret) {
- write(r->b);
- break;
- }
+ make_btree_freeing_key(last->b,
+ keys.top);
+ bch_keylist_push(&keys);
- bkey_copy_key(&b->c->gc_done, r->k);
+ btree_node_free(last->b);
- if (!b->written)
- btree_gc_coalesce(b, op, gc, r);
+ bch_btree_insert_node(b, op, &keys,
+ NULL, NULL);
+ BUG_ON(!bch_keylist_empty(&keys));
- if (r[GC_MERGE_NODES - 1].b)
- write(r[GC_MERGE_NODES - 1].b);
+ rw_unlock(true, last->b);
+ last->b = n;
- memmove(&r[1], &r[0],
- sizeof(struct gc_merge_info) * (GC_MERGE_NODES - 1));
+ /* Invalidated our iterator */
+ ret = -EINTR;
+ break;
+ }
+ }
+
+ if (last->b->level) {
+ ret = btree_gc_recurse(last->b, op, writes, gc);
+ if (ret)
+ break;
+ }
+
+ bkey_copy_key(&b->c->gc_done, &last->b->key);
+
+ /*
+ * Must flush leaf nodes before gc ends, since replace
+ * operations aren't journalled
+ */
+ if (btree_node_dirty(last->b))
+ bch_btree_node_write(last->b, writes);
+ rw_unlock(true, last->b);
+ }
+
+ memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
+ r->b = NULL;
- /* When we've got incremental GC working, we'll want to do
- * if (should_resched())
- * return -EAGAIN;
- */
- cond_resched();
-#if 0
if (need_resched()) {
ret = -EAGAIN;
break;
}
-#endif
}
- for (i = 1; i < GC_MERGE_NODES && r[i].b; i++)
- write(r[i].b);
+ for (i = 0; i < GC_MERGE_NODES; i++)
+ if (!IS_ERR_OR_NULL(r[i].b)) {
+ if (btree_node_dirty(r[i].b))
+ bch_btree_node_write(r[i].b, writes);
+ rw_unlock(true, r[i].b);
+ }
- /* Might have freed some children, must remove their keys */
- if (!b->written)
- bch_btree_sort(b);
+ bch_keylist_free(&keys);
return ret;
}
@@ -1376,29 +1485,31 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
struct closure *writes, struct gc_stat *gc)
{
struct btree *n = NULL;
- unsigned keys = 0;
- int ret = 0, stale = btree_gc_mark_node(b, &keys, gc);
-
- if (b->level || stale > 10)
- n = btree_node_alloc_replacement(b, NULL);
+ int ret = 0;
+ bool should_rewrite;
- if (!IS_ERR_OR_NULL(n))
- swap(b, n);
+ should_rewrite = btree_gc_mark_node(b, gc);
+ if (should_rewrite) {
+ n = btree_node_alloc_replacement(b, false);
- if (b->level)
- ret = btree_gc_recurse(b, op, writes, gc);
+ if (!IS_ERR_OR_NULL(n)) {
+ bch_btree_node_write_sync(n);
+ bch_btree_set_root(n);
+ btree_node_free(b);
+ rw_unlock(true, n);
- if (!b->written || btree_node_dirty(b)) {
- bch_btree_node_write(b, n ? &op->cl : NULL);
+ return -EINTR;
+ }
}
- if (!IS_ERR_OR_NULL(n)) {
- closure_sync(&op->cl);
- bch_btree_set_root(b);
- btree_node_free(n, op);
- rw_unlock(true, b);
+ if (b->level) {
+ ret = btree_gc_recurse(b, op, writes, gc);
+ if (ret)
+ return ret;
}
+ bkey_copy_key(&b->c->gc_done, &b->key);
+
return ret;
}
@@ -1479,9 +1590,8 @@ size_t bch_btree_gc_finish(struct cache_set *c)
return available;
}
-static void bch_btree_gc(struct closure *cl)
+static void bch_btree_gc(struct cache_set *c)
{
- struct cache_set *c = container_of(cl, struct cache_set, gc.cl);
int ret;
unsigned long available;
struct gc_stat stats;
@@ -1493,47 +1603,73 @@ static void bch_btree_gc(struct closure *cl)
memset(&stats, 0, sizeof(struct gc_stat));
closure_init_stack(&writes);
- bch_btree_op_init_stack(&op);
- op.lock = SHRT_MAX;
+ bch_btree_op_init(&op, SHRT_MAX);
btree_gc_start(c);
- atomic_inc(&c->prio_blocked);
-
- ret = btree_root(gc_root, c, &op, &writes, &stats);
- closure_sync(&op.cl);
- closure_sync(&writes);
-
- if (ret) {
- pr_warn("gc failed!");
- continue_at(cl, bch_btree_gc, bch_gc_wq);
- }
+ do {
+ ret = btree_root(gc_root, c, &op, &writes, &stats);
+ closure_sync(&writes);
- /* Possibly wait for new UUIDs or whatever to hit disk */
- bch_journal_meta(c, &op.cl);
- closure_sync(&op.cl);
+ if (ret && ret != -EAGAIN)
+ pr_warn("gc failed!");
+ } while (ret);
available = bch_btree_gc_finish(c);
-
- atomic_dec(&c->prio_blocked);
wake_up_allocators(c);
bch_time_stats_update(&c->btree_gc_time, start_time);
stats.key_bytes *= sizeof(uint64_t);
- stats.dirty <<= 9;
stats.data <<= 9;
stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets;
memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
trace_bcache_gc_end(c);
- continue_at(cl, bch_moving_gc, bch_gc_wq);
+ bch_moving_gc(c);
+}
+
+static int bch_gc_thread(void *arg)
+{
+ struct cache_set *c = arg;
+ struct cache *ca;
+ unsigned i;
+
+ while (1) {
+again:
+ bch_btree_gc(c);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop())
+ break;
+
+ mutex_lock(&c->bucket_lock);
+
+ for_each_cache(ca, c, i)
+ if (ca->invalidate_needs_gc) {
+ mutex_unlock(&c->bucket_lock);
+ set_current_state(TASK_RUNNING);
+ goto again;
+ }
+
+ mutex_unlock(&c->bucket_lock);
+
+ try_to_freeze();
+ schedule();
+ }
+
+ return 0;
}
-void bch_queue_gc(struct cache_set *c)
+int bch_gc_thread_start(struct cache_set *c)
{
- closure_trylock_call(&c->gc.cl, bch_btree_gc, bch_gc_wq, &c->cl);
+ c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
+ if (IS_ERR(c->gc_thread))
+ return PTR_ERR(c->gc_thread);
+
+ set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
+ return 0;
}
/* Initial partial gc */
@@ -1541,9 +1677,9 @@ void bch_queue_gc(struct cache_set *c)
static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
unsigned long **seen)
{
- int ret;
+ int ret = 0;
unsigned i;
- struct bkey *k;
+ struct bkey *k, *p = NULL;
struct bucket *g;
struct btree_iter iter;
@@ -1570,31 +1706,32 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
}
if (b->level) {
- k = bch_next_recurse_key(b, &ZERO_KEY);
+ bch_btree_iter_init(b, &iter, NULL);
- while (k) {
- struct bkey *p = bch_next_recurse_key(b, k);
- if (p)
- btree_node_prefetch(b->c, p, b->level - 1);
+ do {
+ k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+ if (k)
+ btree_node_prefetch(b->c, k, b->level - 1);
- ret = btree(check_recurse, k, b, op, seen);
- if (ret)
- return ret;
+ if (p)
+ ret = btree(check_recurse, p, b, op, seen);
- k = p;
- }
+ p = k;
+ } while (p && !ret);
}
return 0;
}
-int bch_btree_check(struct cache_set *c, struct btree_op *op)
+int bch_btree_check(struct cache_set *c)
{
int ret = -ENOMEM;
unsigned i;
unsigned long *seen[MAX_CACHES_PER_SET];
+ struct btree_op op;
memset(seen, 0, sizeof(seen));
+ bch_btree_op_init(&op, SHRT_MAX);
for (i = 0; c->cache[i]; i++) {
size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
@@ -1606,7 +1743,7 @@ int bch_btree_check(struct cache_set *c, struct btree_op *op)
memset(seen[i], 0xFF, n);
}
- ret = btree_root(check_recurse, c, op, seen);
+ ret = btree_root(check_recurse, c, &op, seen);
err:
for (i = 0; i < MAX_CACHES_PER_SET; i++)
kfree(seen[i]);
@@ -1628,10 +1765,9 @@ static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert)
bch_bset_fix_lookup_table(b, where);
}
-static bool fix_overlapping_extents(struct btree *b,
- struct bkey *insert,
+static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
struct btree_iter *iter,
- struct btree_op *op)
+ struct bkey *replace_key)
{
void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
{
@@ -1659,39 +1795,38 @@ static bool fix_overlapping_extents(struct btree *b,
* We might overlap with 0 size extents; we can't skip these
* because if they're in the set we're inserting to we have to
* adjust them so they don't overlap with the key we're
- * inserting. But we don't want to check them for BTREE_REPLACE
+ * inserting. But we don't want to check them for replace
* operations.
*/
- if (op->type == BTREE_REPLACE &&
- KEY_SIZE(k)) {
+ if (replace_key && KEY_SIZE(k)) {
/*
* k might have been split since we inserted/found the
* key we're replacing
*/
unsigned i;
uint64_t offset = KEY_START(k) -
- KEY_START(&op->replace);
+ KEY_START(replace_key);
/* But it must be a subset of the replace key */
- if (KEY_START(k) < KEY_START(&op->replace) ||
- KEY_OFFSET(k) > KEY_OFFSET(&op->replace))
+ if (KEY_START(k) < KEY_START(replace_key) ||
+ KEY_OFFSET(k) > KEY_OFFSET(replace_key))
goto check_failed;
/* We didn't find a key that we were supposed to */
if (KEY_START(k) > KEY_START(insert) + sectors_found)
goto check_failed;
- if (KEY_PTRS(&op->replace) != KEY_PTRS(k))
+ if (KEY_PTRS(replace_key) != KEY_PTRS(k))
goto check_failed;
/* skip past gen */
offset <<= 8;
- BUG_ON(!KEY_PTRS(&op->replace));
+ BUG_ON(!KEY_PTRS(replace_key));
- for (i = 0; i < KEY_PTRS(&op->replace); i++)
- if (k->ptr[i] != op->replace.ptr[i] + offset)
+ for (i = 0; i < KEY_PTRS(replace_key); i++)
+ if (k->ptr[i] != replace_key->ptr[i] + offset)
goto check_failed;
sectors_found = KEY_OFFSET(k) - KEY_START(insert);
@@ -1759,9 +1894,8 @@ static bool fix_overlapping_extents(struct btree *b,
}
check_failed:
- if (op->type == BTREE_REPLACE) {
+ if (replace_key) {
if (!sectors_found) {
- op->insert_collision = true;
return true;
} else if (sectors_found < KEY_SIZE(insert)) {
SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
@@ -1774,7 +1908,7 @@ check_failed:
}
static bool btree_insert_key(struct btree *b, struct btree_op *op,
- struct bkey *k)
+ struct bkey *k, struct bkey *replace_key)
{
struct bset *i = b->sets[b->nsets].data;
struct bkey *m, *prev;
@@ -1786,22 +1920,19 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
if (!b->level) {
struct btree_iter iter;
- struct bkey search = KEY(KEY_INODE(k), KEY_START(k), 0);
/*
* bset_search() returns the first key that is strictly greater
* than the search key - but for back merging, we want to find
- * the first key that is greater than or equal to KEY_START(k) -
- * unless KEY_START(k) is 0.
+ * the previous key.
*/
- if (KEY_OFFSET(&search))
- SET_KEY_OFFSET(&search, KEY_OFFSET(&search) - 1);
-
prev = NULL;
- m = bch_btree_iter_init(b, &iter, &search);
+ m = bch_btree_iter_init(b, &iter, PRECEDING_KEY(&START_KEY(k)));
- if (fix_overlapping_extents(b, k, &iter, op))
+ if (fix_overlapping_extents(b, k, &iter, replace_key)) {
+ op->insert_collision = true;
return false;
+ }
while (m != end(i) &&
bkey_cmp(k, &START_KEY(m)) > 0)
@@ -1825,8 +1956,10 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
if (m != end(i) &&
bch_bkey_try_merge(b, k, m))
goto copy;
- } else
+ } else {
+ BUG_ON(replace_key);
m = bch_bset_search(b, &b->sets[b->nsets], k);
+ }
insert: shift_keys(b, m, k);
copy: bkey_copy(m, k);
@@ -1835,74 +1968,72 @@ merged:
bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
KEY_START(k), KEY_SIZE(k));
- bch_check_keys(b, "%u for %s", status, op_type(op));
+ bch_check_keys(b, "%u for %s", status,
+ replace_key ? "replace" : "insert");
if (b->level && !KEY_OFFSET(k))
btree_current_write(b)->prio_blocked++;
- trace_bcache_btree_insert_key(b, k, op->type, status);
+ trace_bcache_btree_insert_key(b, k, replace_key != NULL, status);
return true;
}
-static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op)
+static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
+ struct keylist *insert_keys,
+ struct bkey *replace_key)
{
bool ret = false;
- struct bkey *k;
- unsigned oldsize = bch_count_data(b);
+ int oldsize = bch_count_data(b);
- while ((k = bch_keylist_pop(&op->keys))) {
- bkey_put(b->c, k, b->level);
- ret |= btree_insert_key(b, op, k);
- }
+ while (!bch_keylist_empty(insert_keys)) {
+ struct bset *i = write_block(b);
+ struct bkey *k = insert_keys->keys;
- BUG_ON(bch_count_data(b) < oldsize);
- return ret;
-}
-
-bool bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
- struct bio *bio)
-{
- bool ret = false;
- uint64_t btree_ptr = b->key.ptr[0];
- unsigned long seq = b->seq;
- BKEY_PADDED(k) tmp;
-
- rw_unlock(false, b);
- rw_lock(true, b, b->level);
+ if (b->written + __set_blocks(i, i->keys + bkey_u64s(k), b->c)
+ > btree_blocks(b))
+ break;
- if (b->key.ptr[0] != btree_ptr ||
- b->seq != seq + 1 ||
- should_split(b))
- goto out;
+ if (bkey_cmp(k, &b->key) <= 0) {
+ if (!b->level)
+ bkey_put(b->c, k);
- op->replace = KEY(op->inode, bio_end_sector(bio), bio_sectors(bio));
+ ret |= btree_insert_key(b, op, k, replace_key);
+ bch_keylist_pop_front(insert_keys);
+ } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
+ BKEY_PADDED(key) temp;
+ bkey_copy(&temp.key, insert_keys->keys);
- SET_KEY_PTRS(&op->replace, 1);
- get_random_bytes(&op->replace.ptr[0], sizeof(uint64_t));
+ bch_cut_back(&b->key, &temp.key);
+ bch_cut_front(&b->key, insert_keys->keys);
- SET_PTR_DEV(&op->replace, 0, PTR_CHECK_DEV);
+ ret |= btree_insert_key(b, op, &temp.key, replace_key);
+ break;
+ } else {
+ break;
+ }
+ }
- bkey_copy(&tmp.k, &op->replace);
+ BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
- BUG_ON(op->type != BTREE_INSERT);
- BUG_ON(!btree_insert_key(b, op, &tmp.k));
- ret = true;
-out:
- downgrade_write(&b->lock);
+ BUG_ON(bch_count_data(b) < oldsize);
return ret;
}
-static int btree_split(struct btree *b, struct btree_op *op)
+static int btree_split(struct btree *b, struct btree_op *op,
+ struct keylist *insert_keys,
+ struct bkey *replace_key)
{
- bool split, root = b == b->c->root;
+ bool split;
struct btree *n1, *n2 = NULL, *n3 = NULL;
uint64_t start_time = local_clock();
+ struct closure cl;
+ struct keylist parent_keys;
- if (b->level)
- set_closure_blocking(&op->cl);
+ closure_init_stack(&cl);
+ bch_keylist_init(&parent_keys);
- n1 = btree_node_alloc_replacement(b, &op->cl);
+ n1 = btree_node_alloc_replacement(b, true);
if (IS_ERR(n1))
goto err;
@@ -1913,19 +2044,20 @@ static int btree_split(struct btree *b, struct btree_op *op)
trace_bcache_btree_node_split(b, n1->sets[0].data->keys);
- n2 = bch_btree_node_alloc(b->c, b->level, &op->cl);
+ n2 = bch_btree_node_alloc(b->c, b->level, true);
if (IS_ERR(n2))
goto err_free1;
- if (root) {
- n3 = bch_btree_node_alloc(b->c, b->level + 1, &op->cl);
+ if (!b->parent) {
+ n3 = bch_btree_node_alloc(b->c, b->level + 1, true);
if (IS_ERR(n3))
goto err_free2;
}
- bch_btree_insert_keys(n1, op);
+ bch_btree_insert_keys(n1, op, insert_keys, replace_key);
- /* Has to be a linear search because we don't have an auxiliary
+ /*
+ * Has to be a linear search because we don't have an auxiliary
* search tree yet
*/
@@ -1944,60 +2076,57 @@ static int btree_split(struct btree *b, struct btree_op *op)
bkey_copy_key(&n2->key, &b->key);
- bch_keylist_add(&op->keys, &n2->key);
- bch_btree_node_write(n2, &op->cl);
+ bch_keylist_add(&parent_keys, &n2->key);
+ bch_btree_node_write(n2, &cl);
rw_unlock(true, n2);
} else {
trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
- bch_btree_insert_keys(n1, op);
+ bch_btree_insert_keys(n1, op, insert_keys, replace_key);
}
- bch_keylist_add(&op->keys, &n1->key);
- bch_btree_node_write(n1, &op->cl);
+ bch_keylist_add(&parent_keys, &n1->key);
+ bch_btree_node_write(n1, &cl);
if (n3) {
+ /* Depth increases, make a new root */
bkey_copy_key(&n3->key, &MAX_KEY);
- bch_btree_insert_keys(n3, op);
- bch_btree_node_write(n3, &op->cl);
+ bch_btree_insert_keys(n3, op, &parent_keys, NULL);
+ bch_btree_node_write(n3, &cl);
- closure_sync(&op->cl);
+ closure_sync(&cl);
bch_btree_set_root(n3);
rw_unlock(true, n3);
- } else if (root) {
- op->keys.top = op->keys.bottom;
- closure_sync(&op->cl);
+
+ btree_node_free(b);
+ } else if (!b->parent) {
+ /* Root filled up but didn't need to be split */
+ closure_sync(&cl);
bch_btree_set_root(n1);
- } else {
- unsigned i;
- bkey_copy(op->keys.top, &b->key);
- bkey_copy_key(op->keys.top, &ZERO_KEY);
+ btree_node_free(b);
+ } else {
+ /* Split a non root node */
+ closure_sync(&cl);
+ make_btree_freeing_key(b, parent_keys.top);
+ bch_keylist_push(&parent_keys);
- for (i = 0; i < KEY_PTRS(&b->key); i++) {
- uint8_t g = PTR_BUCKET(b->c, &b->key, i)->gen + 1;
+ btree_node_free(b);
- SET_PTR_GEN(op->keys.top, i, g);
- }
-
- bch_keylist_push(&op->keys);
- closure_sync(&op->cl);
- atomic_inc(&b->c->prio_blocked);
+ bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
+ BUG_ON(!bch_keylist_empty(&parent_keys));
}
rw_unlock(true, n1);
- btree_node_free(b, op);
bch_time_stats_update(&b->c->btree_split_time, start_time);
return 0;
err_free2:
- __bkey_put(n2->c, &n2->key);
- btree_node_free(n2, op);
+ btree_node_free(n2);
rw_unlock(true, n2);
err_free1:
- __bkey_put(n1->c, &n1->key);
- btree_node_free(n1, op);
+ btree_node_free(n1);
rw_unlock(true, n1);
err:
if (n3 == ERR_PTR(-EAGAIN) ||
@@ -2009,116 +2138,126 @@ err:
return -ENOMEM;
}
-static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
- struct keylist *stack_keys)
+static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
+ struct keylist *insert_keys,
+ atomic_t *journal_ref,
+ struct bkey *replace_key)
{
- if (b->level) {
- int ret;
- struct bkey *insert = op->keys.bottom;
- struct bkey *k = bch_next_recurse_key(b, &START_KEY(insert));
-
- if (!k) {
- btree_bug(b, "no key to recurse on at level %i/%i",
- b->level, b->c->root->level);
+ BUG_ON(b->level && replace_key);
- op->keys.top = op->keys.bottom;
- return -EIO;
+ if (should_split(b)) {
+ if (current->bio_list) {
+ op->lock = b->c->root->level + 1;
+ return -EAGAIN;
+ } else if (op->lock <= b->c->root->level) {
+ op->lock = b->c->root->level + 1;
+ return -EINTR;
+ } else {
+ /* Invalidated all iterators */
+ return btree_split(b, op, insert_keys, replace_key) ?:
+ -EINTR;
}
+ } else {
+ BUG_ON(write_block(b) != b->sets[b->nsets].data);
- if (bkey_cmp(insert, k) > 0) {
- unsigned i;
-
- if (op->type == BTREE_REPLACE) {
- __bkey_put(b->c, insert);
- op->keys.top = op->keys.bottom;
- op->insert_collision = true;
- return 0;
- }
+ if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
+ if (!b->level)
+ bch_btree_leaf_dirty(b, journal_ref);
+ else
+ bch_btree_node_write_sync(b);
+ }
- for (i = 0; i < KEY_PTRS(insert); i++)
- atomic_inc(&PTR_BUCKET(b->c, insert, i)->pin);
+ return 0;
+ }
+}
- bkey_copy(stack_keys->top, insert);
+int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
+ struct bkey *check_key)
+{
+ int ret = -EINTR;
+ uint64_t btree_ptr = b->key.ptr[0];
+ unsigned long seq = b->seq;
+ struct keylist insert;
+ bool upgrade = op->lock == -1;
- bch_cut_back(k, insert);
- bch_cut_front(k, stack_keys->top);
+ bch_keylist_init(&insert);
- bch_keylist_push(stack_keys);
- }
+ if (upgrade) {
+ rw_unlock(false, b);
+ rw_lock(true, b, b->level);
- ret = btree(insert_recurse, k, b, op, stack_keys);
- if (ret)
- return ret;
+ if (b->key.ptr[0] != btree_ptr ||
+ b->seq != seq + 1)
+ goto out;
}
- if (!bch_keylist_empty(&op->keys)) {
- if (should_split(b)) {
- if (op->lock <= b->c->root->level) {
- BUG_ON(b->level);
- op->lock = b->c->root->level + 1;
- return -EINTR;
- }
- return btree_split(b, op);
- }
+ SET_KEY_PTRS(check_key, 1);
+ get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
- BUG_ON(write_block(b) != b->sets[b->nsets].data);
+ SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
- if (bch_btree_insert_keys(b, op)) {
- if (!b->level)
- bch_btree_leaf_dirty(b, op);
- else
- bch_btree_node_write(b, &op->cl);
- }
- }
+ bch_keylist_add(&insert, check_key);
- return 0;
+ ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
+
+ BUG_ON(!ret && !bch_keylist_empty(&insert));
+out:
+ if (upgrade)
+ downgrade_write(&b->lock);
+ return ret;
}
-int bch_btree_insert(struct btree_op *op, struct cache_set *c)
+struct btree_insert_op {
+ struct btree_op op;
+ struct keylist *keys;
+ atomic_t *journal_ref;
+ struct bkey *replace_key;
+};
+
+int btree_insert_fn(struct btree_op *b_op, struct btree *b)
{
- int ret = 0;
- struct keylist stack_keys;
+ struct btree_insert_op *op = container_of(b_op,
+ struct btree_insert_op, op);
- /*
- * Don't want to block with the btree locked unless we have to,
- * otherwise we get deadlocks with try_harder and between split/gc
- */
- clear_closure_blocking(&op->cl);
-
- BUG_ON(bch_keylist_empty(&op->keys));
- bch_keylist_copy(&stack_keys, &op->keys);
- bch_keylist_init(&op->keys);
-
- while (!bch_keylist_empty(&stack_keys) ||
- !bch_keylist_empty(&op->keys)) {
- if (bch_keylist_empty(&op->keys)) {
- bch_keylist_add(&op->keys,
- bch_keylist_pop(&stack_keys));
- op->lock = 0;
- }
+ int ret = bch_btree_insert_node(b, &op->op, op->keys,
+ op->journal_ref, op->replace_key);
+ if (ret && !bch_keylist_empty(op->keys))
+ return ret;
+ else
+ return MAP_DONE;
+}
- ret = btree_root(insert_recurse, c, op, &stack_keys);
+int bch_btree_insert(struct cache_set *c, struct keylist *keys,
+ atomic_t *journal_ref, struct bkey *replace_key)
+{
+ struct btree_insert_op op;
+ int ret = 0;
- if (ret == -EAGAIN) {
- ret = 0;
- closure_sync(&op->cl);
- } else if (ret) {
- struct bkey *k;
+ BUG_ON(current->bio_list);
+ BUG_ON(bch_keylist_empty(keys));
+
+ bch_btree_op_init(&op.op, 0);
+ op.keys = keys;
+ op.journal_ref = journal_ref;
+ op.replace_key = replace_key;
+
+ while (!ret && !bch_keylist_empty(keys)) {
+ op.op.lock = 0;
+ ret = bch_btree_map_leaf_nodes(&op.op, c,
+ &START_KEY(keys->keys),
+ btree_insert_fn);
+ }
- pr_err("error %i trying to insert key for %s",
- ret, op_type(op));
+ if (ret) {
+ struct bkey *k;
- while ((k = bch_keylist_pop(&stack_keys) ?:
- bch_keylist_pop(&op->keys)))
- bkey_put(c, k, 0);
- }
- }
+ pr_err("error %i", ret);
- bch_keylist_free(&stack_keys);
+ while ((k = bch_keylist_pop(keys)))
+ bkey_put(c, k);
+ } else if (op.op.insert_collision)
+ ret = -ESRCH;
- if (op->journal)
- atomic_dec_bug(op->journal);
- op->journal = NULL;
return ret;
}
@@ -2141,132 +2280,81 @@ void bch_btree_set_root(struct btree *b)
mutex_unlock(&b->c->bucket_lock);
b->c->root = b;
- __bkey_put(b->c, &b->key);
bch_journal_meta(b->c, &cl);
closure_sync(&cl);
}
-/* Cache lookup */
+/* Map across nodes or keys */
-static int submit_partial_cache_miss(struct btree *b, struct btree_op *op,
- struct bkey *k)
+static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
+ struct bkey *from,
+ btree_map_nodes_fn *fn, int flags)
{
- struct search *s = container_of(op, struct search, op);
- struct bio *bio = &s->bio.bio;
- int ret = 0;
+ int ret = MAP_CONTINUE;
- while (!ret &&
- !op->lookup_done) {
- unsigned sectors = INT_MAX;
+ if (b->level) {
+ struct bkey *k;
+ struct btree_iter iter;
- if (KEY_INODE(k) == op->inode) {
- if (KEY_START(k) <= bio->bi_sector)
- break;
+ bch_btree_iter_init(b, &iter, from);
- sectors = min_t(uint64_t, sectors,
- KEY_START(k) - bio->bi_sector);
- }
+ while ((k = bch_btree_iter_next_filter(&iter, b,
+ bch_ptr_bad))) {
+ ret = btree(map_nodes_recurse, k, b,
+ op, from, fn, flags);
+ from = NULL;
- ret = s->d->cache_miss(b, s, bio, sectors);
+ if (ret != MAP_CONTINUE)
+ return ret;
+ }
}
+ if (!b->level || flags == MAP_ALL_NODES)
+ ret = fn(op, b);
+
return ret;
}
-/*
- * Read from a single key, handling the initial cache miss if the key starts in
- * the middle of the bio
- */
-static int submit_partial_cache_hit(struct btree *b, struct btree_op *op,
- struct bkey *k)
+int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
+ struct bkey *from, btree_map_nodes_fn *fn, int flags)
{
- struct search *s = container_of(op, struct search, op);
- struct bio *bio = &s->bio.bio;
- unsigned ptr;
- struct bio *n;
-
- int ret = submit_partial_cache_miss(b, op, k);
- if (ret || op->lookup_done)
- return ret;
-
- /* XXX: figure out best pointer - for multiple cache devices */
- ptr = 0;
-
- PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
-
- while (!op->lookup_done &&
- KEY_INODE(k) == op->inode &&
- bio->bi_sector < KEY_OFFSET(k)) {
- struct bkey *bio_key;
- sector_t sector = PTR_OFFSET(k, ptr) +
- (bio->bi_sector - KEY_START(k));
- unsigned sectors = min_t(uint64_t, INT_MAX,
- KEY_OFFSET(k) - bio->bi_sector);
-
- n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
- if (n == bio)
- op->lookup_done = true;
-
- bio_key = &container_of(n, struct bbio, bio)->key;
-
- /*
- * The bucket we're reading from might be reused while our bio
- * is in flight, and we could then end up reading the wrong
- * data.
- *
- * We guard against this by checking (in cache_read_endio()) if
- * the pointer is stale again; if so, we treat it as an error
- * and reread from the backing device (but we don't pass that
- * error up anywhere).
- */
-
- bch_bkey_copy_single_ptr(bio_key, k, ptr);
- SET_PTR_OFFSET(bio_key, 0, sector);
-
- n->bi_end_io = bch_cache_read_endio;
- n->bi_private = &s->cl;
-
- __bch_submit_bbio(n, b->c);
- }
-
- return 0;
+ return btree_root(map_nodes_recurse, c, op, from, fn, flags);
}
-int bch_btree_search_recurse(struct btree *b, struct btree_op *op)
+static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
+ struct bkey *from, btree_map_keys_fn *fn,
+ int flags)
{
- struct search *s = container_of(op, struct search, op);
- struct bio *bio = &s->bio.bio;
-
- int ret = 0;
+ int ret = MAP_CONTINUE;
struct bkey *k;
struct btree_iter iter;
- bch_btree_iter_init(b, &iter, &KEY(op->inode, bio->bi_sector, 0));
- do {
- k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
- if (!k) {
- /*
- * b->key would be exactly what we want, except that
- * pointers to btree nodes have nonzero size - we
- * wouldn't go far enough
- */
+ bch_btree_iter_init(b, &iter, from);
- ret = submit_partial_cache_miss(b, op,
- &KEY(KEY_INODE(&b->key),
- KEY_OFFSET(&b->key), 0));
- break;
- }
+ while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) {
+ ret = !b->level
+ ? fn(op, b, k)
+ : btree(map_keys_recurse, k, b, op, from, fn, flags);
+ from = NULL;
+
+ if (ret != MAP_CONTINUE)
+ return ret;
+ }
- ret = b->level
- ? btree(search_recurse, k, b, op)
- : submit_partial_cache_hit(b, op, k);
- } while (!ret &&
- !op->lookup_done);
+ if (!b->level && (flags & MAP_END_KEY))
+ ret = fn(op, b, &KEY(KEY_INODE(&b->key),
+ KEY_OFFSET(&b->key), 0));
return ret;
}
+int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
+ struct bkey *from, btree_map_keys_fn *fn, int flags)
+{
+ return btree_root(map_keys_recurse, c, op, from, fn, flags);
+}
+
/* Keybuf code */
static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
@@ -2285,80 +2373,79 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
}
-static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op,
- struct keybuf *buf, struct bkey *end,
- keybuf_pred_fn *pred)
-{
- struct btree_iter iter;
- bch_btree_iter_init(b, &iter, &buf->last_scanned);
-
- while (!array_freelist_empty(&buf->freelist)) {
- struct bkey *k = bch_btree_iter_next_filter(&iter, b,
- bch_ptr_bad);
-
- if (!b->level) {
- if (!k) {
- buf->last_scanned = b->key;
- break;
- }
+struct refill {
+ struct btree_op op;
+ unsigned nr_found;
+ struct keybuf *buf;
+ struct bkey *end;
+ keybuf_pred_fn *pred;
+};
- buf->last_scanned = *k;
- if (bkey_cmp(&buf->last_scanned, end) >= 0)
- break;
+static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
+ struct bkey *k)
+{
+ struct refill *refill = container_of(op, struct refill, op);
+ struct keybuf *buf = refill->buf;
+ int ret = MAP_CONTINUE;
- if (pred(buf, k)) {
- struct keybuf_key *w;
+ if (bkey_cmp(k, refill->end) >= 0) {
+ ret = MAP_DONE;
+ goto out;
+ }
- spin_lock(&buf->lock);
+ if (!KEY_SIZE(k)) /* end key */
+ goto out;
- w = array_alloc(&buf->freelist);
+ if (refill->pred(buf, k)) {
+ struct keybuf_key *w;
- w->private = NULL;
- bkey_copy(&w->key, k);
+ spin_lock(&buf->lock);
- if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
- array_free(&buf->freelist, w);
+ w = array_alloc(&buf->freelist);
+ if (!w) {
+ spin_unlock(&buf->lock);
+ return MAP_DONE;
+ }
- spin_unlock(&buf->lock);
- }
- } else {
- if (!k)
- break;
+ w->private = NULL;
+ bkey_copy(&w->key, k);
- btree(refill_keybuf, k, b, op, buf, end, pred);
- /*
- * Might get an error here, but can't really do anything
- * and it'll get logged elsewhere. Just read what we
- * can.
- */
+ if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
+ array_free(&buf->freelist, w);
+ else
+ refill->nr_found++;
- if (bkey_cmp(&buf->last_scanned, end) >= 0)
- break;
+ if (array_freelist_empty(&buf->freelist))
+ ret = MAP_DONE;
- cond_resched();
- }
+ spin_unlock(&buf->lock);
}
-
- return 0;
+out:
+ buf->last_scanned = *k;
+ return ret;
}
void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
struct bkey *end, keybuf_pred_fn *pred)
{
struct bkey start = buf->last_scanned;
- struct btree_op op;
- bch_btree_op_init_stack(&op);
+ struct refill refill;
cond_resched();
- btree_root(refill_keybuf, c, &op, buf, end, pred);
- closure_sync(&op.cl);
+ bch_btree_op_init(&refill.op, -1);
+ refill.nr_found = 0;
+ refill.buf = buf;
+ refill.end = end;
+ refill.pred = pred;
+
+ bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
+ refill_keybuf_fn, MAP_END_KEY);
- pr_debug("found %s keys from %llu:%llu to %llu:%llu",
- RB_EMPTY_ROOT(&buf->keys) ? "no" :
- array_freelist_empty(&buf->freelist) ? "some" : "a few",
- KEY_INODE(&start), KEY_OFFSET(&start),
- KEY_INODE(&buf->last_scanned), KEY_OFFSET(&buf->last_scanned));
+ trace_bcache_keyscan(refill.nr_found,
+ KEY_INODE(&start), KEY_OFFSET(&start),
+ KEY_INODE(&buf->last_scanned),
+ KEY_OFFSET(&buf->last_scanned));
spin_lock(&buf->lock);
@@ -2436,9 +2523,9 @@ struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
}
struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
- struct keybuf *buf,
- struct bkey *end,
- keybuf_pred_fn *pred)
+ struct keybuf *buf,
+ struct bkey *end,
+ keybuf_pred_fn *pred)
{
struct keybuf_key *ret;
@@ -2471,14 +2558,12 @@ void bch_btree_exit(void)
{
if (btree_io_wq)
destroy_workqueue(btree_io_wq);
- if (bch_gc_wq)
- destroy_workqueue(bch_gc_wq);
}
int __init bch_btree_init(void)
{
- if (!(bch_gc_wq = create_singlethread_workqueue("bch_btree_gc")) ||
- !(btree_io_wq = create_singlethread_workqueue("bch_btree_io")))
+ btree_io_wq = create_singlethread_workqueue("bch_btree_io");
+ if (!btree_io_wq)
return -ENOMEM;
return 0;
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 3333d3723633..767e75570896 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -125,6 +125,7 @@ struct btree {
unsigned long seq;
struct rw_semaphore lock;
struct cache_set *c;
+ struct btree *parent;
unsigned long flags;
uint16_t written; /* would be nice to kill */
@@ -200,12 +201,7 @@ static inline bool bkey_written(struct btree *b, struct bkey *k)
static inline void set_gc_sectors(struct cache_set *c)
{
- atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 8);
-}
-
-static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
-{
- return __bch_ptr_invalid(b->c, b->level, k);
+ atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
}
static inline struct bkey *bch_btree_iter_init(struct btree *b,
@@ -215,6 +211,16 @@ static inline struct bkey *bch_btree_iter_init(struct btree *b,
return __bch_btree_iter_init(b, iter, search, b->sets);
}
+static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
+{
+ if (b->level)
+ return bch_btree_ptr_invalid(b->c, k);
+ else
+ return bch_extent_ptr_invalid(b->c, k);
+}
+
+void bkey_put(struct cache_set *c, struct bkey *k);
+
/* Looping macros */
#define for_each_cached_btree(b, c, iter) \
@@ -234,51 +240,17 @@ static inline struct bkey *bch_btree_iter_init(struct btree *b,
/* Recursing down the btree */
struct btree_op {
- struct closure cl;
- struct cache_set *c;
-
- /* Journal entry we have a refcount on */
- atomic_t *journal;
-
- /* Bio to be inserted into the cache */
- struct bio *cache_bio;
-
- unsigned inode;
-
- uint16_t write_prio;
-
/* Btree level at which we start taking write locks */
short lock;
- /* Btree insertion type */
- enum {
- BTREE_INSERT,
- BTREE_REPLACE
- } type:8;
-
- unsigned csum:1;
- unsigned skip:1;
- unsigned flush_journal:1;
-
- unsigned insert_data_done:1;
- unsigned lookup_done:1;
unsigned insert_collision:1;
-
- /* Anything after this point won't get zeroed in do_bio_hook() */
-
- /* Keys to be inserted */
- struct keylist keys;
- BKEY_PADDED(replace);
};
-enum {
- BTREE_INSERT_STATUS_INSERT,
- BTREE_INSERT_STATUS_BACK_MERGE,
- BTREE_INSERT_STATUS_OVERWROTE,
- BTREE_INSERT_STATUS_FRONT_MERGE,
-};
-
-void bch_btree_op_init_stack(struct btree_op *);
+static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
+{
+ memset(op, 0, sizeof(struct btree_op));
+ op->lock = write_lock_level;
+}
static inline void rw_lock(bool w, struct btree *b, int level)
{
@@ -290,108 +262,71 @@ static inline void rw_lock(bool w, struct btree *b, int level)
static inline void rw_unlock(bool w, struct btree *b)
{
-#ifdef CONFIG_BCACHE_EDEBUG
- unsigned i;
-
- if (w && b->key.ptr[0])
- for (i = 0; i <= b->nsets; i++)
- bch_check_key_order(b, b->sets[i].data);
-#endif
-
if (w)
b->seq++;
(w ? up_write : up_read)(&b->lock);
}
-#define insert_lock(s, b) ((b)->level <= (s)->lock)
+void bch_btree_node_read(struct btree *);
+void bch_btree_node_write(struct btree *, struct closure *);
-/*
- * These macros are for recursing down the btree - they handle the details of
- * locking and looking up nodes in the cache for you. They're best treated as
- * mere syntax when reading code that uses them.
- *
- * op->lock determines whether we take a read or a write lock at a given depth.
- * If you've got a read lock and find that you need a write lock (i.e. you're
- * going to have to split), set op->lock and return -EINTR; btree_root() will
- * call you again and you'll have the correct lock.
- */
+void bch_btree_set_root(struct btree *);
+struct btree *bch_btree_node_alloc(struct cache_set *, int, bool);
+struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool);
-/**
- * btree - recurse down the btree on a specified key
- * @fn: function to call, which will be passed the child node
- * @key: key to recurse on
- * @b: parent btree node
- * @op: pointer to struct btree_op
- */
-#define btree(fn, key, b, op, ...) \
-({ \
- int _r, l = (b)->level - 1; \
- bool _w = l <= (op)->lock; \
- struct btree *_b = bch_btree_node_get((b)->c, key, l, op); \
- if (!IS_ERR(_b)) { \
- _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
- rw_unlock(_w, _b); \
- } else \
- _r = PTR_ERR(_b); \
- _r; \
-})
-
-/**
- * btree_root - call a function on the root of the btree
- * @fn: function to call, which will be passed the child node
- * @c: cache set
- * @op: pointer to struct btree_op
- */
-#define btree_root(fn, c, op, ...) \
-({ \
- int _r = -EINTR; \
- do { \
- struct btree *_b = (c)->root; \
- bool _w = insert_lock(op, _b); \
- rw_lock(_w, _b, _b->level); \
- if (_b == (c)->root && \
- _w == insert_lock(op, _b)) \
- _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
- rw_unlock(_w, _b); \
- bch_cannibalize_unlock(c, &(op)->cl); \
- } while (_r == -EINTR); \
- \
- _r; \
-})
+int bch_btree_insert_check_key(struct btree *, struct btree_op *,
+ struct bkey *);
+int bch_btree_insert(struct cache_set *, struct keylist *,
+ atomic_t *, struct bkey *);
+
+int bch_gc_thread_start(struct cache_set *);
+size_t bch_btree_gc_finish(struct cache_set *);
+void bch_moving_gc(struct cache_set *);
+int bch_btree_check(struct cache_set *);
+uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);
-static inline bool should_split(struct btree *b)
+static inline void wake_up_gc(struct cache_set *c)
{
- struct bset *i = write_block(b);
- return b->written >= btree_blocks(b) ||
- (i->seq == b->sets[0].data->seq &&
- b->written + __set_blocks(i, i->keys + 15, b->c)
- > btree_blocks(b));
+ if (c->gc_thread)
+ wake_up_process(c->gc_thread);
}
-void bch_btree_node_read(struct btree *);
-void bch_btree_node_write(struct btree *, struct closure *);
+#define MAP_DONE 0
+#define MAP_CONTINUE 1
-void bch_cannibalize_unlock(struct cache_set *, struct closure *);
-void bch_btree_set_root(struct btree *);
-struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *);
-struct btree *bch_btree_node_get(struct cache_set *, struct bkey *,
- int, struct btree_op *);
+#define MAP_ALL_NODES 0
+#define MAP_LEAF_NODES 1
-bool bch_btree_insert_check_key(struct btree *, struct btree_op *,
- struct bio *);
-int bch_btree_insert(struct btree_op *, struct cache_set *);
+#define MAP_END_KEY 1
-int bch_btree_search_recurse(struct btree *, struct btree_op *);
+typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *);
+int __bch_btree_map_nodes(struct btree_op *, struct cache_set *,
+ struct bkey *, btree_map_nodes_fn *, int);
-void bch_queue_gc(struct cache_set *);
-size_t bch_btree_gc_finish(struct cache_set *);
-void bch_moving_gc(struct closure *);
-int bch_btree_check(struct cache_set *, struct btree_op *);
-uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);
+static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
+ struct bkey *from, btree_map_nodes_fn *fn)
+{
+ return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES);
+}
+
+static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
+ struct cache_set *c,
+ struct bkey *from,
+ btree_map_nodes_fn *fn)
+{
+ return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
+}
+
+typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *,
+ struct bkey *);
+int bch_btree_map_keys(struct btree_op *, struct cache_set *,
+ struct bkey *, btree_map_keys_fn *, int);
+
+typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
void bch_keybuf_init(struct keybuf *);
-void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *,
- keybuf_pred_fn *);
+void bch_refill_keybuf(struct cache_set *, struct keybuf *,
+ struct bkey *, keybuf_pred_fn *);
bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
struct bkey *);
void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 9aba2017f0d1..dfff2410322e 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -11,17 +11,6 @@
#include "closure.h"
-void closure_queue(struct closure *cl)
-{
- struct workqueue_struct *wq = cl->wq;
- if (wq) {
- INIT_WORK(&cl->work, cl->work.func);
- BUG_ON(!queue_work(wq, &cl->work));
- } else
- cl->fn(cl);
-}
-EXPORT_SYMBOL_GPL(closure_queue);
-
#define CL_FIELD(type, field) \
case TYPE_ ## type: \
return &container_of(cl, struct type, cl)->field
@@ -30,17 +19,6 @@ static struct closure_waitlist *closure_waitlist(struct closure *cl)
{
switch (cl->type) {
CL_FIELD(closure_with_waitlist, wait);
- CL_FIELD(closure_with_waitlist_and_timer, wait);
- default:
- return NULL;
- }
-}
-
-static struct timer_list *closure_timer(struct closure *cl)
-{
- switch (cl->type) {
- CL_FIELD(closure_with_timer, timer);
- CL_FIELD(closure_with_waitlist_and_timer, timer);
default:
return NULL;
}
@@ -51,7 +29,7 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
int r = flags & CLOSURE_REMAINING_MASK;
BUG_ON(flags & CLOSURE_GUARD_MASK);
- BUG_ON(!r && (flags & ~(CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING)));
+ BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
/* Must deliver precisely one wakeup */
if (r == 1 && (flags & CLOSURE_SLEEPING))
@@ -59,7 +37,6 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
if (!r) {
if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
- /* CLOSURE_BLOCKING might be set - clear it */
atomic_set(&cl->remaining,
CLOSURE_REMAINING_INITIALIZER);
closure_queue(cl);
@@ -90,13 +67,13 @@ void closure_sub(struct closure *cl, int v)
{
closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
}
-EXPORT_SYMBOL_GPL(closure_sub);
+EXPORT_SYMBOL(closure_sub);
void closure_put(struct closure *cl)
{
closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
}
-EXPORT_SYMBOL_GPL(closure_put);
+EXPORT_SYMBOL(closure_put);
static void set_waiting(struct closure *cl, unsigned long f)
{
@@ -133,7 +110,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
closure_sub(cl, CLOSURE_WAITING + 1);
}
}
-EXPORT_SYMBOL_GPL(__closure_wake_up);
+EXPORT_SYMBOL(__closure_wake_up);
bool closure_wait(struct closure_waitlist *list, struct closure *cl)
{
@@ -146,7 +123,7 @@ bool closure_wait(struct closure_waitlist *list, struct closure *cl)
return true;
}
-EXPORT_SYMBOL_GPL(closure_wait);
+EXPORT_SYMBOL(closure_wait);
/**
* closure_sync() - sleep until a closure a closure has nothing left to wait on
@@ -169,7 +146,7 @@ void closure_sync(struct closure *cl)
__closure_end_sleep(cl);
}
-EXPORT_SYMBOL_GPL(closure_sync);
+EXPORT_SYMBOL(closure_sync);
/**
* closure_trylock() - try to acquire the closure, without waiting
@@ -183,17 +160,17 @@ bool closure_trylock(struct closure *cl, struct closure *parent)
CLOSURE_REMAINING_INITIALIZER) != -1)
return false;
- closure_set_ret_ip(cl);
-
smp_mb();
+
cl->parent = parent;
if (parent)
closure_get(parent);
+ closure_set_ret_ip(cl);
closure_debug_create(cl);
return true;
}
-EXPORT_SYMBOL_GPL(closure_trylock);
+EXPORT_SYMBOL(closure_trylock);
void __closure_lock(struct closure *cl, struct closure *parent,
struct closure_waitlist *wait_list)
@@ -205,57 +182,11 @@ void __closure_lock(struct closure *cl, struct closure *parent,
if (closure_trylock(cl, parent))
return;
- closure_wait_event_sync(wait_list, &wait,
- atomic_read(&cl->remaining) == -1);
+ closure_wait_event(wait_list, &wait,
+ atomic_read(&cl->remaining) == -1);
}
}
-EXPORT_SYMBOL_GPL(__closure_lock);
-
-static void closure_delay_timer_fn(unsigned long data)
-{
- struct closure *cl = (struct closure *) data;
- closure_sub(cl, CLOSURE_TIMER + 1);
-}
-
-void do_closure_timer_init(struct closure *cl)
-{
- struct timer_list *timer = closure_timer(cl);
-
- init_timer(timer);
- timer->data = (unsigned long) cl;
- timer->function = closure_delay_timer_fn;
-}
-EXPORT_SYMBOL_GPL(do_closure_timer_init);
-
-bool __closure_delay(struct closure *cl, unsigned long delay,
- struct timer_list *timer)
-{
- if (atomic_read(&cl->remaining) & CLOSURE_TIMER)
- return false;
-
- BUG_ON(timer_pending(timer));
-
- timer->expires = jiffies + delay;
-
- atomic_add(CLOSURE_TIMER + 1, &cl->remaining);
- add_timer(timer);
- return true;
-}
-EXPORT_SYMBOL_GPL(__closure_delay);
-
-void __closure_flush(struct closure *cl, struct timer_list *timer)
-{
- if (del_timer(timer))
- closure_sub(cl, CLOSURE_TIMER + 1);
-}
-EXPORT_SYMBOL_GPL(__closure_flush);
-
-void __closure_flush_sync(struct closure *cl, struct timer_list *timer)
-{
- if (del_timer_sync(timer))
- closure_sub(cl, CLOSURE_TIMER + 1);
-}
-EXPORT_SYMBOL_GPL(__closure_flush_sync);
+EXPORT_SYMBOL(__closure_lock);
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
@@ -273,7 +204,7 @@ void closure_debug_create(struct closure *cl)
list_add(&cl->all, &closure_list);
spin_unlock_irqrestore(&closure_list_lock, flags);
}
-EXPORT_SYMBOL_GPL(closure_debug_create);
+EXPORT_SYMBOL(closure_debug_create);
void closure_debug_destroy(struct closure *cl)
{
@@ -286,7 +217,7 @@ void closure_debug_destroy(struct closure *cl)
list_del(&cl->all);
spin_unlock_irqrestore(&closure_list_lock, flags);
}
-EXPORT_SYMBOL_GPL(closure_debug_destroy);
+EXPORT_SYMBOL(closure_debug_destroy);
static struct dentry *debug;
@@ -304,14 +235,12 @@ static int debug_seq_show(struct seq_file *f, void *data)
cl, (void *) cl->ip, cl->fn, cl->parent,
r & CLOSURE_REMAINING_MASK);
- seq_printf(f, "%s%s%s%s%s%s\n",
+ seq_printf(f, "%s%s%s%s\n",
test_bit(WORK_STRUCT_PENDING,
work_data_bits(&cl->work)) ? "Q" : "",
r & CLOSURE_RUNNING ? "R" : "",
- r & CLOSURE_BLOCKING ? "B" : "",
r & CLOSURE_STACK ? "S" : "",
- r & CLOSURE_SLEEPING ? "Sl" : "",
- r & CLOSURE_TIMER ? "T" : "");
+ r & CLOSURE_SLEEPING ? "Sl" : "");
if (r & CLOSURE_WAITING)
seq_printf(f, " W %pF\n",
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index 00039924ea9d..9762f1be3304 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -155,21 +155,6 @@
* delayed_work embeds a work item and a timer_list. The important thing is, use
* it exactly like you would a regular closure and closure_put() will magically
* handle everything for you.
- *
- * We've got closures that embed timers, too. They're called, appropriately
- * enough:
- * struct closure_with_timer;
- *
- * This gives you access to closure_delay(). It takes a refcount for a specified
- * number of jiffies - you could then call closure_sync() (for a slightly
- * convoluted version of msleep()) or continue_at() - which gives you the same
- * effect as using a delayed work item, except you can reuse the work_struct
- * already embedded in struct closure.
- *
- * Lastly, there's struct closure_with_waitlist_and_timer. It does what you
- * probably expect, if you happen to need the features of both. (You don't
- * really want to know how all this is implemented, but if I've done my job
- * right you shouldn't have to care).
*/
struct closure;
@@ -182,16 +167,11 @@ struct closure_waitlist {
enum closure_type {
TYPE_closure = 0,
TYPE_closure_with_waitlist = 1,
- TYPE_closure_with_timer = 2,
- TYPE_closure_with_waitlist_and_timer = 3,
- MAX_CLOSURE_TYPE = 3,
+ MAX_CLOSURE_TYPE = 1,
};
enum closure_state {
/*
- * CLOSURE_BLOCKING: Causes closure_wait_event() to block, instead of
- * waiting asynchronously
- *
* CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
* the thread that owns the closure, and cleared by the thread that's
* waking up the closure.
@@ -200,10 +180,6 @@ enum closure_state {
* - indicates that cl->task is valid and closure_put() may wake it up.
* Only set or cleared by the thread that owns the closure.
*
- * CLOSURE_TIMER: Analagous to CLOSURE_WAITING, indicates that a closure
- * has an outstanding timer. Must be set by the thread that owns the
- * closure, and cleared by the timer function when the timer goes off.
- *
* The rest are for debugging and don't affect behaviour:
*
* CLOSURE_RUNNING: Set when a closure is running (i.e. by
@@ -218,19 +194,17 @@ enum closure_state {
* closure with this flag set
*/
- CLOSURE_BITS_START = (1 << 19),
- CLOSURE_DESTRUCTOR = (1 << 19),
- CLOSURE_BLOCKING = (1 << 21),
- CLOSURE_WAITING = (1 << 23),
- CLOSURE_SLEEPING = (1 << 25),
- CLOSURE_TIMER = (1 << 27),
+ CLOSURE_BITS_START = (1 << 23),
+ CLOSURE_DESTRUCTOR = (1 << 23),
+ CLOSURE_WAITING = (1 << 25),
+ CLOSURE_SLEEPING = (1 << 27),
CLOSURE_RUNNING = (1 << 29),
CLOSURE_STACK = (1 << 31),
};
#define CLOSURE_GUARD_MASK \
- ((CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING|CLOSURE_WAITING| \
- CLOSURE_SLEEPING|CLOSURE_TIMER|CLOSURE_RUNNING|CLOSURE_STACK) << 1)
+ ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_SLEEPING| \
+ CLOSURE_RUNNING|CLOSURE_STACK) << 1)
#define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1)
#define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING)
@@ -268,17 +242,6 @@ struct closure_with_waitlist {
struct closure_waitlist wait;
};
-struct closure_with_timer {
- struct closure cl;
- struct timer_list timer;
-};
-
-struct closure_with_waitlist_and_timer {
- struct closure cl;
- struct closure_waitlist wait;
- struct timer_list timer;
-};
-
extern unsigned invalid_closure_type(void);
#define __CLOSURE_TYPE(cl, _t) \
@@ -289,14 +252,11 @@ extern unsigned invalid_closure_type(void);
( \
__CLOSURE_TYPE(cl, closure) \
__CLOSURE_TYPE(cl, closure_with_waitlist) \
- __CLOSURE_TYPE(cl, closure_with_timer) \
- __CLOSURE_TYPE(cl, closure_with_waitlist_and_timer) \
invalid_closure_type() \
)
void closure_sub(struct closure *cl, int v);
void closure_put(struct closure *cl);
-void closure_queue(struct closure *cl);
void __closure_wake_up(struct closure_waitlist *list);
bool closure_wait(struct closure_waitlist *list, struct closure *cl);
void closure_sync(struct closure *cl);
@@ -305,12 +265,6 @@ bool closure_trylock(struct closure *cl, struct closure *parent);
void __closure_lock(struct closure *cl, struct closure *parent,
struct closure_waitlist *wait_list);
-void do_closure_timer_init(struct closure *cl);
-bool __closure_delay(struct closure *cl, unsigned long delay,
- struct timer_list *timer);
-void __closure_flush(struct closure *cl, struct timer_list *timer);
-void __closure_flush_sync(struct closure *cl, struct timer_list *timer);
-
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
void closure_debug_init(void);
@@ -354,11 +308,6 @@ static inline void closure_set_stopped(struct closure *cl)
atomic_sub(CLOSURE_RUNNING, &cl->remaining);
}
-static inline bool closure_is_stopped(struct closure *cl)
-{
- return !(atomic_read(&cl->remaining) & CLOSURE_RUNNING);
-}
-
static inline bool closure_is_unlocked(struct closure *cl)
{
return atomic_read(&cl->remaining) == -1;
@@ -367,14 +316,6 @@ static inline bool closure_is_unlocked(struct closure *cl)
static inline void do_closure_init(struct closure *cl, struct closure *parent,
bool running)
{
- switch (cl->type) {
- case TYPE_closure_with_timer:
- case TYPE_closure_with_waitlist_and_timer:
- do_closure_timer_init(cl);
- default:
- break;
- }
-
cl->parent = parent;
if (parent)
closure_get(parent);
@@ -429,8 +370,7 @@ do { \
static inline void closure_init_stack(struct closure *cl)
{
memset(cl, 0, sizeof(struct closure));
- atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|
- CLOSURE_BLOCKING|CLOSURE_STACK);
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
}
/**
@@ -461,24 +401,6 @@ do { \
#define closure_lock(cl, parent) \
__closure_lock(__to_internal_closure(cl), parent, &(cl)->wait)
-/**
- * closure_delay() - delay some number of jiffies
- * @cl: the closure that will sleep
- * @delay: the delay in jiffies
- *
- * Takes a refcount on @cl which will be released after @delay jiffies; this may
- * be used to have a function run after a delay with continue_at(), or
- * closure_sync() may be used for a convoluted version of msleep().
- */
-#define closure_delay(cl, delay) \
- __closure_delay(__to_internal_closure(cl), delay, &(cl)->timer)
-
-#define closure_flush(cl) \
- __closure_flush(__to_internal_closure(cl), &(cl)->timer)
-
-#define closure_flush_sync(cl) \
- __closure_flush_sync(__to_internal_closure(cl), &(cl)->timer)
-
static inline void __closure_end_sleep(struct closure *cl)
{
__set_current_state(TASK_RUNNING);
@@ -498,40 +420,6 @@ static inline void __closure_start_sleep(struct closure *cl)
}
/**
- * closure_blocking() - returns true if the closure is in blocking mode.
- *
- * If a closure is in blocking mode, closure_wait_event() will sleep until the
- * condition is true instead of waiting asynchronously.
- */
-static inline bool closure_blocking(struct closure *cl)
-{
- return atomic_read(&cl->remaining) & CLOSURE_BLOCKING;
-}
-
-/**
- * set_closure_blocking() - put a closure in blocking mode.
- *
- * If a closure is in blocking mode, closure_wait_event() will sleep until the
- * condition is true instead of waiting asynchronously.
- *
- * Not thread safe - can only be called by the thread running the closure.
- */
-static inline void set_closure_blocking(struct closure *cl)
-{
- if (!closure_blocking(cl))
- atomic_add(CLOSURE_BLOCKING, &cl->remaining);
-}
-
-/*
- * Not thread safe - can only be called by the thread running the closure.
- */
-static inline void clear_closure_blocking(struct closure *cl)
-{
- if (closure_blocking(cl))
- atomic_sub(CLOSURE_BLOCKING, &cl->remaining);
-}
-
-/**
* closure_wake_up() - wake up all closures on a wait list.
*/
static inline void closure_wake_up(struct closure_waitlist *list)
@@ -561,63 +449,36 @@ static inline void closure_wake_up(struct closure_waitlist *list)
* refcount on our closure. If this was a stack allocated closure, that would be
* bad.
*/
-#define __closure_wait_event(list, cl, condition, _block) \
+#define closure_wait_event(list, cl, condition) \
({ \
- bool block = _block; \
typeof(condition) ret; \
\
while (1) { \
ret = (condition); \
if (ret) { \
__closure_wake_up(list); \
- if (block) \
- closure_sync(cl); \
- \
+ closure_sync(cl); \
break; \
} \
\
- if (block) \
- __closure_start_sleep(cl); \
- \
- if (!closure_wait(list, cl)) { \
- if (!block) \
- break; \
+ __closure_start_sleep(cl); \
\
+ if (!closure_wait(list, cl)) \
schedule(); \
- } \
} \
\
ret; \
})
-/**
- * closure_wait_event() - wait on a condition, synchronously or asynchronously.
- * @list: the wait list to wait on
- * @cl: the closure that is doing the waiting
- * @condition: a C expression for the event to wait for
- *
- * If the closure is in blocking mode, sleeps until the @condition evaluates to
- * true - exactly like wait_event().
- *
- * If the closure is not in blocking mode, waits asynchronously; if the
- * condition is currently false the @cl is put onto @list and returns. @list
- * owns a refcount on @cl; closure_sync() or continue_at() may be used later to
- * wait for another thread to wake up @list, which drops the refcount on @cl.
- *
- * Returns the value of @condition; @cl will be on @list iff @condition was
- * false.
- *
- * closure_wake_up(@list) must be called after changing any variable that could
- * cause @condition to become true.
- */
-#define closure_wait_event(list, cl, condition) \
- __closure_wait_event(list, cl, condition, closure_blocking(cl))
-
-#define closure_wait_event_async(list, cl, condition) \
- __closure_wait_event(list, cl, condition, false)
-
-#define closure_wait_event_sync(list, cl, condition) \
- __closure_wait_event(list, cl, condition, true)
+static inline void closure_queue(struct closure *cl)
+{
+ struct workqueue_struct *wq = cl->wq;
+ if (wq) {
+ INIT_WORK(&cl->work, cl->work.func);
+ BUG_ON(!queue_work(wq, &cl->work));
+ } else
+ cl->fn(cl);
+}
static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
struct workqueue_struct *wq)
@@ -642,7 +503,7 @@ do { \
#define continue_at_nobarrier(_cl, _fn, _wq) \
do { \
set_closure_fn(_cl, _fn, _wq); \
- closure_queue(cl); \
+ closure_queue(_cl); \
return; \
} while (0)
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 88e6411eab4f..0062824af38b 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -8,7 +8,6 @@
#include "bcache.h"
#include "btree.h"
#include "debug.h"
-#include "request.h"
#include <linux/console.h>
#include <linux/debugfs.h>
@@ -77,29 +76,17 @@ int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k)
return out - buf;
}
-int bch_btree_to_text(char *buf, size_t size, const struct btree *b)
-{
- return scnprintf(buf, size, "%zu level %i/%i",
- PTR_BUCKET_NR(b->c, &b->key, 0),
- b->level, b->c->root ? b->c->root->level : -1);
-}
-
-#if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG)
-
-static bool skipped_backwards(struct btree *b, struct bkey *k)
-{
- return bkey_cmp(k, (!b->level)
- ? &START_KEY(bkey_next(k))
- : bkey_next(k)) > 0;
-}
+#ifdef CONFIG_BCACHE_DEBUG
static void dump_bset(struct btree *b, struct bset *i)
{
- struct bkey *k;
+ struct bkey *k, *next;
unsigned j;
char buf[80];
- for (k = i->start; k < end(i); k = bkey_next(k)) {
+ for (k = i->start; k < end(i); k = next) {
+ next = bkey_next(k);
+
bch_bkey_to_text(buf, sizeof(buf), k);
printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b),
(uint64_t *) k - i->d, i->keys, buf);
@@ -115,15 +102,21 @@ static void dump_bset(struct btree *b, struct bset *i)
printk(" %s\n", bch_ptr_status(b->c, k));
- if (bkey_next(k) < end(i) &&
- skipped_backwards(b, k))
+ if (next < end(i) &&
+ bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0)
printk(KERN_ERR "Key skipped backwards\n");
}
}
-#endif
+static void bch_dump_bucket(struct btree *b)
+{
+ unsigned i;
-#ifdef CONFIG_BCACHE_DEBUG
+ console_lock();
+ for (i = 0; i <= b->nsets; i++)
+ dump_bset(b, b->sets[i].data);
+ console_unlock();
+}
void bch_btree_verify(struct btree *b, struct bset *new)
{
@@ -176,66 +169,58 @@ void bch_btree_verify(struct btree *b, struct bset *new)
mutex_unlock(&b->c->verify_lock);
}
-static void data_verify_endio(struct bio *bio, int error)
-{
- struct closure *cl = bio->bi_private;
- closure_put(cl);
-}
-
-void bch_data_verify(struct search *s)
+void bch_data_verify(struct cached_dev *dc, struct bio *bio)
{
char name[BDEVNAME_SIZE];
- struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
- struct closure *cl = &s->cl;
struct bio *check;
struct bio_vec *bv;
+ struct bvec_iter iter1, iter2;
int i;
- if (!s->unaligned_bvec)
- bio_for_each_segment(bv, s->orig_bio, i)
- bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
+ check = bio_alloc(GFP_NOIO,
+ DIV_ROUND_UP(bio->bi_iter.bi_size, PAGE_SIZE));
- check = bio_clone(s->orig_bio, GFP_NOIO);
- if (!check)
- return;
+ check->bi_bdev = bio->bi_bdev;
+ check->bi_iter.bi_sector = bio->bi_iter.bi_sector;
+ check->bi_iter.bi_size = bio->bi_iter.bi_size;
+ bch_bio_map(check, NULL);
if (bio_alloc_pages(check, GFP_NOIO))
goto out_put;
- check->bi_rw = READ_SYNC;
- check->bi_private = cl;
- check->bi_end_io = data_verify_endio;
+ iter1 = bio->bi_iter;
+ iter2 = check->bi_iter;
+
+ submit_bio_wait(READ_SYNC, check);
- closure_bio_submit(check, cl, &dc->disk);
- closure_sync(cl);
+ while (iter1.bi_size) {
+ struct bio_vec bv1 = bio_iter_iovec(bio, iter1);
+ struct bio_vec bv2 = bio_iter_iovec(check, iter2);
+ void *p1 = kmap_atomic(bv1.bv_page);
+ void *p2 = page_address(bv2.bv_page);
+ unsigned bytes = min(bv1.bv_len, bv2.bv_len);
- bio_for_each_segment(bv, s->orig_bio, i) {
- void *p1 = kmap(bv->bv_page);
- void *p2 = kmap(check->bi_io_vec[i].bv_page);
+ cache_set_err_on(memcmp(p1 + bv1.bv_offset,
+ p2 + bv2.bv_offset,
+ bytes),
+ dc->disk.c,
+ "verify failed at dev %s sector %llu",
+ bdevname(dc->bdev, name),
+ (uint64_t) iter1.bi_sector);
- if (memcmp(p1 + bv->bv_offset,
- p2 + bv->bv_offset,
- bv->bv_len))
- printk(KERN_ERR
- "bcache (%s): verify failed at sector %llu\n",
- bdevname(dc->bdev, name),
- (uint64_t) s->orig_bio->bi_sector);
+ kunmap_atomic(p1);
- kunmap(bv->bv_page);
- kunmap(check->bi_io_vec[i].bv_page);
+ bio_advance_iter(bio, &iter1, bytes);
+ bio_advance_iter(check, &iter2, bytes);
}
- __bio_for_each_segment(bv, check, i, 0)
+ bio_for_each_segment_all(bv, check, i)
__free_page(bv->bv_page);
out_put:
bio_put(check);
}
-#endif
-
-#ifdef CONFIG_BCACHE_EDEBUG
-
-unsigned bch_count_data(struct btree *b)
+int __bch_count_data(struct btree *b)
{
unsigned ret = 0;
struct btree_iter iter;
@@ -247,72 +232,60 @@ unsigned bch_count_data(struct btree *b)
return ret;
}
-static void vdump_bucket_and_panic(struct btree *b, const char *fmt,
- va_list args)
-{
- unsigned i;
- char buf[80];
-
- console_lock();
-
- for (i = 0; i <= b->nsets; i++)
- dump_bset(b, b->sets[i].data);
-
- vprintk(fmt, args);
-
- console_unlock();
-
- bch_btree_to_text(buf, sizeof(buf), b);
- panic("at %s\n", buf);
-}
-
-void bch_check_key_order_msg(struct btree *b, struct bset *i,
- const char *fmt, ...)
-{
- struct bkey *k;
-
- if (!i->keys)
- return;
-
- for (k = i->start; bkey_next(k) < end(i); k = bkey_next(k))
- if (skipped_backwards(b, k)) {
- va_list args;
- va_start(args, fmt);
-
- vdump_bucket_and_panic(b, fmt, args);
- va_end(args);
- }
-}
-
-void bch_check_keys(struct btree *b, const char *fmt, ...)
+void __bch_check_keys(struct btree *b, const char *fmt, ...)
{
va_list args;
struct bkey *k, *p = NULL;
struct btree_iter iter;
-
- if (b->level)
- return;
+ const char *err;
for_each_key(b, k, &iter) {
- if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) {
- printk(KERN_ERR "Keys out of order:\n");
- goto bug;
- }
-
- if (bch_ptr_invalid(b, k))
- continue;
-
- if (p && bkey_cmp(p, &START_KEY(k)) > 0) {
- printk(KERN_ERR "Overlapping keys:\n");
- goto bug;
+ if (!b->level) {
+ err = "Keys out of order";
+ if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
+ goto bug;
+
+ if (bch_ptr_invalid(b, k))
+ continue;
+
+ err = "Overlapping keys";
+ if (p && bkey_cmp(p, &START_KEY(k)) > 0)
+ goto bug;
+ } else {
+ if (bch_ptr_bad(b, k))
+ continue;
+
+ err = "Duplicate keys";
+ if (p && !bkey_cmp(p, k))
+ goto bug;
}
p = k;
}
+
+ err = "Key larger than btree node key";
+ if (p && bkey_cmp(p, &b->key) > 0)
+ goto bug;
+
return;
bug:
+ bch_dump_bucket(b);
+
va_start(args, fmt);
- vdump_bucket_and_panic(b, fmt, args);
+ vprintk(fmt, args);
va_end(args);
+
+ panic("bcache error: %s:\n", err);
+}
+
+void bch_btree_iter_next_check(struct btree_iter *iter)
+{
+ struct bkey *k = iter->data->k, *next = bkey_next(k);
+
+ if (next < iter->data->end &&
+ bkey_cmp(k, iter->b->level ? next : &START_KEY(next)) > 0) {
+ bch_dump_bucket(iter->b);
+ panic("Key skipped backwards\n");
+ }
}
#endif
diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h
index 1c39b5a2489b..2ede60e31874 100644
--- a/drivers/md/bcache/debug.h
+++ b/drivers/md/bcache/debug.h
@@ -4,40 +4,44 @@
/* Btree/bkey debug printing */
int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k);
-int bch_btree_to_text(char *buf, size_t size, const struct btree *b);
-
-#ifdef CONFIG_BCACHE_EDEBUG
-
-unsigned bch_count_data(struct btree *);
-void bch_check_key_order_msg(struct btree *, struct bset *, const char *, ...);
-void bch_check_keys(struct btree *, const char *, ...);
-
-#define bch_check_key_order(b, i) \
- bch_check_key_order_msg(b, i, "keys out of order")
-#define EBUG_ON(cond) BUG_ON(cond)
-
-#else /* EDEBUG */
-
-#define bch_count_data(b) 0
-#define bch_check_key_order(b, i) do {} while (0)
-#define bch_check_key_order_msg(b, i, ...) do {} while (0)
-#define bch_check_keys(b, ...) do {} while (0)
-#define EBUG_ON(cond) do {} while (0)
-
-#endif
#ifdef CONFIG_BCACHE_DEBUG
void bch_btree_verify(struct btree *, struct bset *);
-void bch_data_verify(struct search *);
+void bch_data_verify(struct cached_dev *, struct bio *);
+int __bch_count_data(struct btree *);
+void __bch_check_keys(struct btree *, const char *, ...);
+void bch_btree_iter_next_check(struct btree_iter *);
+
+#define EBUG_ON(cond) BUG_ON(cond)
+#define expensive_debug_checks(c) ((c)->expensive_debug_checks)
+#define key_merging_disabled(c) ((c)->key_merging_disabled)
+#define bypass_torture_test(d) ((d)->bypass_torture_test)
#else /* DEBUG */
static inline void bch_btree_verify(struct btree *b, struct bset *i) {}
-static inline void bch_data_verify(struct search *s) {};
+static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}
+static inline int __bch_count_data(struct btree *b) { return -1; }
+static inline void __bch_check_keys(struct btree *b, const char *fmt, ...) {}
+static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
+
+#define EBUG_ON(cond) do { if (cond); } while (0)
+#define expensive_debug_checks(c) 0
+#define key_merging_disabled(c) 0
+#define bypass_torture_test(d) 0
#endif
+#define bch_count_data(b) \
+ (expensive_debug_checks((b)->c) ? __bch_count_data(b) : -1)
+
+#define bch_check_keys(b, ...) \
+do { \
+ if (expensive_debug_checks((b)->c)) \
+ __bch_check_keys(b, __VA_ARGS__); \
+} while (0)
+
#ifdef CONFIG_DEBUG_FS
void bch_debug_init_cache_set(struct cache_set *);
#else
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 9056632995b1..fa028fa82df4 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -11,178 +11,40 @@
#include <linux/blkdev.h>
-static void bch_bi_idx_hack_endio(struct bio *bio, int error)
-{
- struct bio *p = bio->bi_private;
-
- bio_endio(p, error);
- bio_put(bio);
-}
-
-static void bch_generic_make_request_hack(struct bio *bio)
-{
- if (bio->bi_idx) {
- struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
-
- memcpy(clone->bi_io_vec,
- bio_iovec(bio),
- bio_segments(bio) * sizeof(struct bio_vec));
-
- clone->bi_sector = bio->bi_sector;
- clone->bi_bdev = bio->bi_bdev;
- clone->bi_rw = bio->bi_rw;
- clone->bi_vcnt = bio_segments(bio);
- clone->bi_size = bio->bi_size;
-
- clone->bi_private = bio;
- clone->bi_end_io = bch_bi_idx_hack_endio;
-
- bio = clone;
- }
-
- /*
- * Hack, since drivers that clone bios clone up to bi_max_vecs, but our
- * bios might have had more than that (before we split them per device
- * limitations).
- *
- * To be taken out once immutable bvec stuff is in.
- */
- bio->bi_max_vecs = bio->bi_vcnt;
-
- generic_make_request(bio);
-}
-
-/**
- * bch_bio_split - split a bio
- * @bio: bio to split
- * @sectors: number of sectors to split from the front of @bio
- * @gfp: gfp mask
- * @bs: bio set to allocate from
- *
- * Allocates and returns a new bio which represents @sectors from the start of
- * @bio, and updates @bio to represent the remaining sectors.
- *
- * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio
- * unchanged.
- *
- * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
- * bvec boundry; it is the caller's responsibility to ensure that @bio is not
- * freed before the split.
- */
-struct bio *bch_bio_split(struct bio *bio, int sectors,
- gfp_t gfp, struct bio_set *bs)
-{
- unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9;
- struct bio_vec *bv;
- struct bio *ret = NULL;
-
- BUG_ON(sectors <= 0);
-
- if (sectors >= bio_sectors(bio))
- return bio;
-
- if (bio->bi_rw & REQ_DISCARD) {
- ret = bio_alloc_bioset(gfp, 1, bs);
- if (!ret)
- return NULL;
- idx = 0;
- goto out;
- }
-
- bio_for_each_segment(bv, bio, idx) {
- vcnt = idx - bio->bi_idx;
-
- if (!nbytes) {
- ret = bio_alloc_bioset(gfp, vcnt, bs);
- if (!ret)
- return NULL;
-
- memcpy(ret->bi_io_vec, bio_iovec(bio),
- sizeof(struct bio_vec) * vcnt);
-
- break;
- } else if (nbytes < bv->bv_len) {
- ret = bio_alloc_bioset(gfp, ++vcnt, bs);
- if (!ret)
- return NULL;
-
- memcpy(ret->bi_io_vec, bio_iovec(bio),
- sizeof(struct bio_vec) * vcnt);
-
- ret->bi_io_vec[vcnt - 1].bv_len = nbytes;
- bv->bv_offset += nbytes;
- bv->bv_len -= nbytes;
- break;
- }
-
- nbytes -= bv->bv_len;
- }
-out:
- ret->bi_bdev = bio->bi_bdev;
- ret->bi_sector = bio->bi_sector;
- ret->bi_size = sectors << 9;
- ret->bi_rw = bio->bi_rw;
- ret->bi_vcnt = vcnt;
- ret->bi_max_vecs = vcnt;
-
- bio->bi_sector += sectors;
- bio->bi_size -= sectors << 9;
- bio->bi_idx = idx;
-
- if (bio_integrity(bio)) {
- if (bio_integrity_clone(ret, bio, gfp)) {
- bio_put(ret);
- return NULL;
- }
-
- bio_integrity_trim(ret, 0, bio_sectors(ret));
- bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio));
- }
-
- return ret;
-}
-
static unsigned bch_bio_max_sectors(struct bio *bio)
{
- unsigned ret = bio_sectors(bio);
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
- unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES,
- queue_max_segments(q));
+ struct bio_vec bv;
+ struct bvec_iter iter;
+ unsigned ret = 0, seg = 0;
if (bio->bi_rw & REQ_DISCARD)
- return min(ret, q->limits.max_discard_sectors);
-
- if (bio_segments(bio) > max_segments ||
- q->merge_bvec_fn) {
- struct bio_vec *bv;
- int i, seg = 0;
-
- ret = 0;
-
- bio_for_each_segment(bv, bio, i) {
- struct bvec_merge_data bvm = {
- .bi_bdev = bio->bi_bdev,
- .bi_sector = bio->bi_sector,
- .bi_size = ret << 9,
- .bi_rw = bio->bi_rw,
- };
-
- if (seg == max_segments)
- break;
+ return min(bio_sectors(bio), q->limits.max_discard_sectors);
+
+ bio_for_each_segment(bv, bio, iter) {
+ struct bvec_merge_data bvm = {
+ .bi_bdev = bio->bi_bdev,
+ .bi_sector = bio->bi_iter.bi_sector,
+ .bi_size = ret << 9,
+ .bi_rw = bio->bi_rw,
+ };
+
+ if (seg == min_t(unsigned, BIO_MAX_PAGES,
+ queue_max_segments(q)))
+ break;
- if (q->merge_bvec_fn &&
- q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len)
- break;
+ if (q->merge_bvec_fn &&
+ q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
+ break;
- seg++;
- ret += bv->bv_len >> 9;
- }
+ seg++;
+ ret += bv.bv_len >> 9;
}
ret = min(ret, queue_max_sectors(q));
WARN_ON(!ret);
- ret = max_t(int, ret, bio_iovec(bio)->bv_len >> 9);
+ ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9);
return ret;
}
@@ -193,7 +55,7 @@ static void bch_bio_submit_split_done(struct closure *cl)
s->bio->bi_end_io = s->bi_end_io;
s->bio->bi_private = s->bi_private;
- bio_endio(s->bio, 0);
+ bio_endio_nodec(s->bio, 0);
closure_debug_destroy(&s->cl);
mempool_free(s, s->p->bio_split_hook);
@@ -232,19 +94,19 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
bio_get(bio);
do {
- n = bch_bio_split(bio, bch_bio_max_sectors(bio),
- GFP_NOIO, s->p->bio_split);
+ n = bio_next_split(bio, bch_bio_max_sectors(bio),
+ GFP_NOIO, s->p->bio_split);
n->bi_end_io = bch_bio_submit_split_endio;
n->bi_private = &s->cl;
closure_get(&s->cl);
- bch_generic_make_request_hack(n);
+ generic_make_request(n);
} while (n != bio);
continue_at(&s->cl, bch_bio_submit_split_done, NULL);
submit:
- bch_generic_make_request_hack(bio);
+ generic_make_request(bio);
}
/* Bios with headers */
@@ -272,8 +134,8 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
{
struct bbio *b = container_of(bio, struct bbio, bio);
- bio->bi_sector = PTR_OFFSET(&b->key, 0);
- bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
+ bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
+ bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
b->submit_time_us = local_clock_us();
closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 8435f81e5d85..7eafdf09a0ae 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -7,7 +7,6 @@
#include "bcache.h"
#include "btree.h"
#include "debug.h"
-#include "request.h"
#include <trace/events/bcache.h>
@@ -31,17 +30,20 @@ static void journal_read_endio(struct bio *bio, int error)
}
static int journal_read_bucket(struct cache *ca, struct list_head *list,
- struct btree_op *op, unsigned bucket_index)
+ unsigned bucket_index)
{
struct journal_device *ja = &ca->journal;
struct bio *bio = &ja->bio;
struct journal_replay *i;
struct jset *j, *data = ca->set->journal.w[0].data;
+ struct closure cl;
unsigned len, left, offset = 0;
int ret = 0;
sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
+ closure_init_stack(&cl);
+
pr_debug("reading %llu", (uint64_t) bucket);
while (offset < ca->sb.bucket_size) {
@@ -49,17 +51,17 @@ reread: left = ca->sb.bucket_size - offset;
len = min_t(unsigned, left, PAGE_SECTORS * 8);
bio_reset(bio);
- bio->bi_sector = bucket + offset;
+ bio->bi_iter.bi_sector = bucket + offset;
bio->bi_bdev = ca->bdev;
bio->bi_rw = READ;
- bio->bi_size = len << 9;
+ bio->bi_iter.bi_size = len << 9;
bio->bi_end_io = journal_read_endio;
- bio->bi_private = &op->cl;
+ bio->bi_private = &cl;
bch_bio_map(bio, data);
- closure_bio_submit(bio, &op->cl, ca);
- closure_sync(&op->cl);
+ closure_bio_submit(bio, &cl, ca);
+ closure_sync(&cl);
/* This function could be simpler now since we no longer write
* journal entries that overlap bucket boundaries; this means
@@ -72,7 +74,7 @@ reread: left = ca->sb.bucket_size - offset;
struct list_head *where;
size_t blocks, bytes = set_bytes(j);
- if (j->magic != jset_magic(ca->set))
+ if (j->magic != jset_magic(&ca->sb))
return ret;
if (bytes > left << 9)
@@ -129,12 +131,11 @@ next_set:
return ret;
}
-int bch_journal_read(struct cache_set *c, struct list_head *list,
- struct btree_op *op)
+int bch_journal_read(struct cache_set *c, struct list_head *list)
{
#define read_bucket(b) \
({ \
- int ret = journal_read_bucket(ca, list, op, b); \
+ int ret = journal_read_bucket(ca, list, b); \
__set_bit(b, bitmap); \
if (ret < 0) \
return ret; \
@@ -292,8 +293,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
}
}
-int bch_journal_replay(struct cache_set *s, struct list_head *list,
- struct btree_op *op)
+int bch_journal_replay(struct cache_set *s, struct list_head *list)
{
int ret = 0, keys = 0, entries = 0;
struct bkey *k;
@@ -301,31 +301,30 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
list_entry(list->prev, struct journal_replay, list);
uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
+ struct keylist keylist;
+
+ bch_keylist_init(&keylist);
list_for_each_entry(i, list, list) {
BUG_ON(i->pin && atomic_read(i->pin) != 1);
- if (n != i->j.seq)
- pr_err(
- "journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
- n, i->j.seq - 1, start, end);
+ cache_set_err_on(n != i->j.seq, s,
+"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
+ n, i->j.seq - 1, start, end);
for (k = i->j.start;
k < end(&i->j);
k = bkey_next(k)) {
trace_bcache_journal_replay_key(k);
- bkey_copy(op->keys.top, k);
- bch_keylist_push(&op->keys);
-
- op->journal = i->pin;
- atomic_inc(op->journal);
+ bkey_copy(keylist.top, k);
+ bch_keylist_push(&keylist);
- ret = bch_btree_insert(op, s);
+ ret = bch_btree_insert(s, &keylist, i->pin, NULL);
if (ret)
goto err;
- BUG_ON(!bch_keylist_empty(&op->keys));
+ BUG_ON(!bch_keylist_empty(&keylist));
keys++;
cond_resched();
@@ -339,14 +338,13 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
pr_info("journal replay done, %i keys in %i entries, seq %llu",
keys, entries, end);
-
+err:
while (!list_empty(list)) {
i = list_first_entry(list, struct journal_replay, list);
list_del(&i->list);
kfree(i);
}
-err:
- closure_sync(&op->cl);
+
return ret;
}
@@ -358,48 +356,35 @@ static void btree_flush_write(struct cache_set *c)
* Try to find the btree node with that references the oldest journal
* entry, best is our current candidate and is locked if non NULL:
*/
- struct btree *b, *best = NULL;
- unsigned iter;
+ struct btree *b, *best;
+ unsigned i;
+retry:
+ best = NULL;
+
+ for_each_cached_btree(b, c, i)
+ if (btree_current_write(b)->journal) {
+ if (!best)
+ best = b;
+ else if (journal_pin_cmp(c,
+ btree_current_write(best)->journal,
+ btree_current_write(b)->journal)) {
+ best = b;
+ }
+ }
- for_each_cached_btree(b, c, iter) {
- if (!down_write_trylock(&b->lock))
- continue;
+ b = best;
+ if (b) {
+ rw_lock(true, b, b->level);
- if (!btree_node_dirty(b) ||
- !btree_current_write(b)->journal) {
+ if (!btree_current_write(b)->journal) {
rw_unlock(true, b);
- continue;
+ /* We raced */
+ goto retry;
}
- if (!best)
- best = b;
- else if (journal_pin_cmp(c,
- btree_current_write(best),
- btree_current_write(b))) {
- rw_unlock(true, best);
- best = b;
- } else
- rw_unlock(true, b);
+ bch_btree_node_write(b, NULL);
+ rw_unlock(true, b);
}
-
- if (best)
- goto out;
-
- /* We can't find the best btree node, just pick the first */
- list_for_each_entry(b, &c->btree_cache, list)
- if (!b->level && btree_node_dirty(b)) {
- best = b;
- rw_lock(true, best, best->level);
- goto found;
- }
-
-out:
- if (!best)
- return;
-found:
- if (btree_node_dirty(best))
- bch_btree_node_write(best, NULL);
- rw_unlock(true, best);
}
#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
@@ -452,13 +437,13 @@ static void do_journal_discard(struct cache *ca)
atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
bio_init(bio);
- bio->bi_sector = bucket_to_sector(ca->set,
+ bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
ca->sb.d[ja->discard_idx]);
bio->bi_bdev = ca->bdev;
bio->bi_rw = REQ_WRITE|REQ_DISCARD;
bio->bi_max_vecs = 1;
bio->bi_io_vec = bio->bi_inline_vecs;
- bio->bi_size = bucket_bytes(ca);
+ bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = journal_discard_endio;
closure_get(&ca->set->cl);
@@ -495,7 +480,7 @@ static void journal_reclaim(struct cache_set *c)
do_journal_discard(ca);
if (c->journal.blocks_free)
- return;
+ goto out;
/*
* Allocate:
@@ -521,7 +506,7 @@ static void journal_reclaim(struct cache_set *c)
if (n)
c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
-
+out:
if (!journal_full(&c->journal))
__closure_wake_up(&c->journal.wait);
}
@@ -554,32 +539,26 @@ static void journal_write_endio(struct bio *bio, int error)
struct journal_write *w = bio->bi_private;
cache_set_err_on(error, w->c, "journal io error");
- closure_put(&w->c->journal.io.cl);
+ closure_put(&w->c->journal.io);
}
static void journal_write(struct closure *);
static void journal_write_done(struct closure *cl)
{
- struct journal *j = container_of(cl, struct journal, io.cl);
- struct cache_set *c = container_of(j, struct cache_set, journal);
-
+ struct journal *j = container_of(cl, struct journal, io);
struct journal_write *w = (j->cur == j->w)
? &j->w[1]
: &j->w[0];
__closure_wake_up(&w->wait);
-
- if (c->journal_delay_ms)
- closure_delay(&j->io, msecs_to_jiffies(c->journal_delay_ms));
-
- continue_at(cl, journal_write, system_wq);
+ continue_at_nobarrier(cl, journal_write, system_wq);
}
static void journal_write_unlocked(struct closure *cl)
__releases(c->journal.lock)
{
- struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl);
+ struct cache_set *c = container_of(cl, struct cache_set, journal.io);
struct cache *ca;
struct journal_write *w = c->journal.cur;
struct bkey *k = &c->journal.key;
@@ -617,7 +596,7 @@ static void journal_write_unlocked(struct closure *cl)
for_each_cache(ca, c, i)
w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
- w->data->magic = jset_magic(c);
+ w->data->magic = jset_magic(&c->sb);
w->data->version = BCACHE_JSET_VERSION;
w->data->last_seq = last_seq(&c->journal);
w->data->csum = csum_set(w->data);
@@ -629,10 +608,10 @@ static void journal_write_unlocked(struct closure *cl)
atomic_long_add(sectors, &ca->meta_sectors_written);
bio_reset(bio);
- bio->bi_sector = PTR_OFFSET(k, i);
+ bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
bio->bi_bdev = ca->bdev;
bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
- bio->bi_size = sectors << 9;
+ bio->bi_iter.bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio;
bio->bi_private = w;
@@ -660,121 +639,134 @@ static void journal_write_unlocked(struct closure *cl)
static void journal_write(struct closure *cl)
{
- struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl);
+ struct cache_set *c = container_of(cl, struct cache_set, journal.io);
spin_lock(&c->journal.lock);
journal_write_unlocked(cl);
}
-static void __journal_try_write(struct cache_set *c, bool noflush)
+static void journal_try_write(struct cache_set *c)
__releases(c->journal.lock)
{
- struct closure *cl = &c->journal.io.cl;
+ struct closure *cl = &c->journal.io;
+ struct journal_write *w = c->journal.cur;
- if (!closure_trylock(cl, &c->cl))
- spin_unlock(&c->journal.lock);
- else if (noflush && journal_full(&c->journal)) {
- spin_unlock(&c->journal.lock);
- continue_at(cl, journal_write, system_wq);
- } else
+ w->need_write = true;
+
+ if (closure_trylock(cl, &c->cl))
journal_write_unlocked(cl);
+ else
+ spin_unlock(&c->journal.lock);
}
-#define journal_try_write(c) __journal_try_write(c, false)
-
-void bch_journal_meta(struct cache_set *c, struct closure *cl)
+static struct journal_write *journal_wait_for_write(struct cache_set *c,
+ unsigned nkeys)
{
- struct journal_write *w;
+ size_t sectors;
+ struct closure cl;
- if (CACHE_SYNC(&c->sb)) {
- spin_lock(&c->journal.lock);
+ closure_init_stack(&cl);
+
+ spin_lock(&c->journal.lock);
- w = c->journal.cur;
- w->need_write = true;
+ while (1) {
+ struct journal_write *w = c->journal.cur;
- if (cl)
- BUG_ON(!closure_wait(&w->wait, cl));
+ sectors = __set_blocks(w->data, w->data->keys + nkeys,
+ c) * c->sb.block_size;
- closure_flush(&c->journal.io);
- __journal_try_write(c, true);
+ if (sectors <= min_t(size_t,
+ c->journal.blocks_free * c->sb.block_size,
+ PAGE_SECTORS << JSET_BITS))
+ return w;
+
+ /* XXX: tracepoint */
+ if (!journal_full(&c->journal)) {
+ trace_bcache_journal_entry_full(c);
+
+ /*
+ * XXX: If we were inserting so many keys that they
+ * won't fit in an _empty_ journal write, we'll
+ * deadlock. For now, handle this in
+ * bch_keylist_realloc() - but something to think about.
+ */
+ BUG_ON(!w->data->keys);
+
+ closure_wait(&w->wait, &cl);
+ journal_try_write(c); /* unlocks */
+ } else {
+ trace_bcache_journal_full(c);
+
+ closure_wait(&c->journal.wait, &cl);
+ journal_reclaim(c);
+ spin_unlock(&c->journal.lock);
+
+ btree_flush_write(c);
+ }
+
+ closure_sync(&cl);
+ spin_lock(&c->journal.lock);
}
}
+static void journal_write_work(struct work_struct *work)
+{
+ struct cache_set *c = container_of(to_delayed_work(work),
+ struct cache_set,
+ journal.work);
+ spin_lock(&c->journal.lock);
+ journal_try_write(c);
+}
+
/*
* Entry point to the journalling code - bio_insert() and btree_invalidate()
* pass bch_journal() a list of keys to be journalled, and then
* bch_journal() hands those same keys off to btree_insert_async()
*/
-void bch_journal(struct closure *cl)
+atomic_t *bch_journal(struct cache_set *c,
+ struct keylist *keys,
+ struct closure *parent)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct cache_set *c = op->c;
struct journal_write *w;
- size_t b, n = ((uint64_t *) op->keys.top) - op->keys.list;
-
- if (op->type != BTREE_INSERT ||
- !CACHE_SYNC(&c->sb))
- goto out;
+ atomic_t *ret;
- /*
- * If we're looping because we errored, might already be waiting on
- * another journal write:
- */
- while (atomic_read(&cl->parent->remaining) & CLOSURE_WAITING)
- closure_sync(cl->parent);
+ if (!CACHE_SYNC(&c->sb))
+ return NULL;
- spin_lock(&c->journal.lock);
+ w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
- if (journal_full(&c->journal)) {
- trace_bcache_journal_full(c);
+ memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys));
+ w->data->keys += bch_keylist_nkeys(keys);
- closure_wait(&c->journal.wait, cl);
+ ret = &fifo_back(&c->journal.pin);
+ atomic_inc(ret);
- journal_reclaim(c);
+ if (parent) {
+ closure_wait(&w->wait, parent);
+ journal_try_write(c);
+ } else if (!w->need_write) {
+ schedule_delayed_work(&c->journal.work,
+ msecs_to_jiffies(c->journal_delay_ms));
+ spin_unlock(&c->journal.lock);
+ } else {
spin_unlock(&c->journal.lock);
-
- btree_flush_write(c);
- continue_at(cl, bch_journal, bcache_wq);
}
- w = c->journal.cur;
- w->need_write = true;
- b = __set_blocks(w->data, w->data->keys + n, c);
-
- if (b * c->sb.block_size > PAGE_SECTORS << JSET_BITS ||
- b > c->journal.blocks_free) {
- trace_bcache_journal_entry_full(c);
-
- /*
- * XXX: If we were inserting so many keys that they won't fit in
- * an _empty_ journal write, we'll deadlock. For now, handle
- * this in bch_keylist_realloc() - but something to think about.
- */
- BUG_ON(!w->data->keys);
-
- BUG_ON(!closure_wait(&w->wait, cl));
-
- closure_flush(&c->journal.io);
- journal_try_write(c);
- continue_at(cl, bch_journal, bcache_wq);
- }
-
- memcpy(end(w->data), op->keys.list, n * sizeof(uint64_t));
- w->data->keys += n;
+ return ret;
+}
- op->journal = &fifo_back(&c->journal.pin);
- atomic_inc(op->journal);
+void bch_journal_meta(struct cache_set *c, struct closure *cl)
+{
+ struct keylist keys;
+ atomic_t *ref;
- if (op->flush_journal) {
- closure_flush(&c->journal.io);
- closure_wait(&w->wait, cl->parent);
- }
+ bch_keylist_init(&keys);
- journal_try_write(c);
-out:
- bch_btree_insert_async(cl);
+ ref = bch_journal(c, &keys, cl);
+ if (ref)
+ atomic_dec_bug(ref);
}
void bch_journal_free(struct cache_set *c)
@@ -790,6 +782,7 @@ int bch_journal_alloc(struct cache_set *c)
closure_init_unlocked(&j->io);
spin_lock_init(&j->lock);
+ INIT_DELAYED_WORK(&j->work, journal_write_work);
c->journal_delay_ms = 100;
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
index 3d7851274b04..a6472fda94b2 100644
--- a/drivers/md/bcache/journal.h
+++ b/drivers/md/bcache/journal.h
@@ -75,43 +75,6 @@
* nodes that are pinning the oldest journal entries first.
*/
-#define BCACHE_JSET_VERSION_UUIDv1 1
-/* Always latest UUID format */
-#define BCACHE_JSET_VERSION_UUID 1
-#define BCACHE_JSET_VERSION 1
-
-/*
- * On disk format for a journal entry:
- * seq is monotonically increasing; every journal entry has its own unique
- * sequence number.
- *
- * last_seq is the oldest journal entry that still has keys the btree hasn't
- * flushed to disk yet.
- *
- * version is for on disk format changes.
- */
-struct jset {
- uint64_t csum;
- uint64_t magic;
- uint64_t seq;
- uint32_t version;
- uint32_t keys;
-
- uint64_t last_seq;
-
- BKEY_PADDED(uuid_bucket);
- BKEY_PADDED(btree_root);
- uint16_t btree_level;
- uint16_t pad[3];
-
- uint64_t prio_bucket[MAX_CACHES_PER_SET];
-
- union {
- struct bkey start[0];
- uint64_t d[0];
- };
-};
-
/*
* Only used for holding the journal entries we read in btree_journal_read()
* during cache_registration
@@ -140,7 +103,8 @@ struct journal {
spinlock_t lock;
/* used when waiting because the journal was full */
struct closure_waitlist wait;
- struct closure_with_timer io;
+ struct closure io;
+ struct delayed_work work;
/* Number of blocks free in the bucket(s) we're currently writing to */
unsigned blocks_free;
@@ -188,8 +152,7 @@ struct journal_device {
};
#define journal_pin_cmp(c, l, r) \
- (fifo_idx(&(c)->journal.pin, (l)->journal) > \
- fifo_idx(&(c)->journal.pin, (r)->journal))
+ (fifo_idx(&(c)->journal.pin, (l)) > fifo_idx(&(c)->journal.pin, (r)))
#define JOURNAL_PIN 20000
@@ -199,15 +162,14 @@ struct journal_device {
struct closure;
struct cache_set;
struct btree_op;
+struct keylist;
-void bch_journal(struct closure *);
+atomic_t *bch_journal(struct cache_set *, struct keylist *, struct closure *);
void bch_journal_next(struct journal *);
void bch_journal_mark(struct cache_set *, struct list_head *);
void bch_journal_meta(struct cache_set *, struct closure *);
-int bch_journal_read(struct cache_set *, struct list_head *,
- struct btree_op *);
-int bch_journal_replay(struct cache_set *, struct list_head *,
- struct btree_op *);
+int bch_journal_read(struct cache_set *, struct list_head *);
+int bch_journal_replay(struct cache_set *, struct list_head *);
void bch_journal_free(struct cache_set *);
int bch_journal_alloc(struct cache_set *);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 1a3b4f4786c3..581f95df8265 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -12,8 +12,9 @@
#include <trace/events/bcache.h>
struct moving_io {
+ struct closure cl;
struct keybuf_key *w;
- struct search s;
+ struct data_insert_op op;
struct bbio bio;
};
@@ -38,13 +39,13 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
static void moving_io_destructor(struct closure *cl)
{
- struct moving_io *io = container_of(cl, struct moving_io, s.cl);
+ struct moving_io *io = container_of(cl, struct moving_io, cl);
kfree(io);
}
static void write_moving_finish(struct closure *cl)
{
- struct moving_io *io = container_of(cl, struct moving_io, s.cl);
+ struct moving_io *io = container_of(cl, struct moving_io, cl);
struct bio *bio = &io->bio.bio;
struct bio_vec *bv;
int i;
@@ -52,13 +53,12 @@ static void write_moving_finish(struct closure *cl)
bio_for_each_segment_all(bv, bio, i)
__free_page(bv->bv_page);
- if (io->s.op.insert_collision)
+ if (io->op.replace_collision)
trace_bcache_gc_copy_collision(&io->w->key);
- bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w);
+ bch_keybuf_del(&io->op.c->moving_gc_keys, io->w);
- atomic_dec_bug(&io->s.op.c->in_flight);
- closure_wake_up(&io->s.op.c->moving_gc_wait);
+ up(&io->op.c->moving_in_flight);
closure_return_with_destructor(cl, moving_io_destructor);
}
@@ -66,12 +66,12 @@ static void write_moving_finish(struct closure *cl)
static void read_moving_endio(struct bio *bio, int error)
{
struct moving_io *io = container_of(bio->bi_private,
- struct moving_io, s.cl);
+ struct moving_io, cl);
if (error)
- io->s.error = error;
+ io->op.error = error;
- bch_bbio_endio(io->s.op.c, bio, error, "reading data to move");
+ bch_bbio_endio(io->op.c, bio, error, "reading data to move");
}
static void moving_init(struct moving_io *io)
@@ -82,57 +82,56 @@ static void moving_init(struct moving_io *io)
bio_get(bio);
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
- bio->bi_size = KEY_SIZE(&io->w->key) << 9;
+ bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
PAGE_SECTORS);
- bio->bi_private = &io->s.cl;
+ bio->bi_private = &io->cl;
bio->bi_io_vec = bio->bi_inline_vecs;
bch_bio_map(bio, NULL);
}
static void write_moving(struct closure *cl)
{
- struct search *s = container_of(cl, struct search, cl);
- struct moving_io *io = container_of(s, struct moving_io, s);
+ struct moving_io *io = container_of(cl, struct moving_io, cl);
+ struct data_insert_op *op = &io->op;
- if (!s->error) {
+ if (!op->error) {
moving_init(io);
- io->bio.bio.bi_sector = KEY_START(&io->w->key);
- s->op.lock = -1;
- s->op.write_prio = 1;
- s->op.cache_bio = &io->bio.bio;
+ io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
+ op->write_prio = 1;
+ op->bio = &io->bio.bio;
- s->writeback = KEY_DIRTY(&io->w->key);
- s->op.csum = KEY_CSUM(&io->w->key);
+ op->writeback = KEY_DIRTY(&io->w->key);
+ op->csum = KEY_CSUM(&io->w->key);
- s->op.type = BTREE_REPLACE;
- bkey_copy(&s->op.replace, &io->w->key);
+ bkey_copy(&op->replace_key, &io->w->key);
+ op->replace = true;
- closure_init(&s->op.cl, cl);
- bch_insert_data(&s->op.cl);
+ closure_call(&op->cl, bch_data_insert, NULL, cl);
}
- continue_at(cl, write_moving_finish, NULL);
+ continue_at(cl, write_moving_finish, system_wq);
}
static void read_moving_submit(struct closure *cl)
{
- struct search *s = container_of(cl, struct search, cl);
- struct moving_io *io = container_of(s, struct moving_io, s);
+ struct moving_io *io = container_of(cl, struct moving_io, cl);
struct bio *bio = &io->bio.bio;
- bch_submit_bbio(bio, s->op.c, &io->w->key, 0);
+ bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
- continue_at(cl, write_moving, bch_gc_wq);
+ continue_at(cl, write_moving, system_wq);
}
-static void read_moving(struct closure *cl)
+static void read_moving(struct cache_set *c)
{
- struct cache_set *c = container_of(cl, struct cache_set, moving_gc);
struct keybuf_key *w;
struct moving_io *io;
struct bio *bio;
+ struct closure cl;
+
+ closure_init_stack(&cl);
/* XXX: if we error, background writeback could stall indefinitely */
@@ -150,8 +149,8 @@ static void read_moving(struct closure *cl)
w->private = io;
io->w = w;
- io->s.op.inode = KEY_INODE(&w->key);
- io->s.op.c = c;
+ io->op.inode = KEY_INODE(&w->key);
+ io->op.c = c;
moving_init(io);
bio = &io->bio.bio;
@@ -164,13 +163,8 @@ static void read_moving(struct closure *cl)
trace_bcache_gc_copy(&w->key);
- closure_call(&io->s.cl, read_moving_submit, NULL, &c->gc.cl);
-
- if (atomic_inc_return(&c->in_flight) >= 64) {
- closure_wait_event(&c->moving_gc_wait, cl,
- atomic_read(&c->in_flight) < 64);
- continue_at(cl, read_moving, bch_gc_wq);
- }
+ down(&c->moving_in_flight);
+ closure_call(&io->cl, read_moving_submit, NULL, &cl);
}
if (0) {
@@ -180,7 +174,7 @@ err: if (!IS_ERR_OR_NULL(w->private))
bch_keybuf_del(&c->moving_gc_keys, w);
}
- closure_return(cl);
+ closure_sync(&cl);
}
static bool bucket_cmp(struct bucket *l, struct bucket *r)
@@ -193,15 +187,14 @@ static unsigned bucket_heap_top(struct cache *ca)
return GC_SECTORS_USED(heap_peek(&ca->heap));
}
-void bch_moving_gc(struct closure *cl)
+void bch_moving_gc(struct cache_set *c)
{
- struct cache_set *c = container_of(cl, struct cache_set, gc.cl);
struct cache *ca;
struct bucket *b;
unsigned i;
if (!c->copy_gc_enabled)
- closure_return(cl);
+ return;
mutex_lock(&c->bucket_lock);
@@ -242,13 +235,11 @@ void bch_moving_gc(struct closure *cl)
c->moving_gc_keys.last_scanned = ZERO_KEY;
- closure_init(&c->moving_gc, cl);
- read_moving(&c->moving_gc);
-
- closure_return(cl);
+ read_moving(c);
}
void bch_moving_init_cache_set(struct cache_set *c)
{
bch_keybuf_init(&c->moving_gc_keys);
+ sema_init(&c->moving_in_flight, 64);
}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index b6a74bcbb08f..be49d0f8de0b 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -25,7 +25,7 @@
struct kmem_cache *bch_search_cache;
-static void check_should_skip(struct cached_dev *, struct search *);
+static void bch_data_insert_start(struct closure *);
/* Cgroup interface */
@@ -198,14 +198,14 @@ static bool verify(struct cached_dev *dc, struct bio *bio)
static void bio_csum(struct bio *bio, struct bkey *k)
{
- struct bio_vec *bv;
+ struct bio_vec bv;
+ struct bvec_iter iter;
uint64_t csum = 0;
- int i;
- bio_for_each_segment(bv, bio, i) {
- void *d = kmap(bv->bv_page) + bv->bv_offset;
- csum = bch_crc64_update(csum, d, bv->bv_len);
- kunmap(bv->bv_page);
+ bio_for_each_segment(bv, bio, iter) {
+ void *d = kmap(bv.bv_page) + bv.bv_offset;
+ csum = bch_crc64_update(csum, d, bv.bv_len);
+ kunmap(bv.bv_page);
}
k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
@@ -213,221 +213,80 @@ static void bio_csum(struct bio *bio, struct bkey *k)
/* Insert data into cache */
-static void bio_invalidate(struct closure *cl)
-{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct bio *bio = op->cache_bio;
-
- pr_debug("invalidating %i sectors from %llu",
- bio_sectors(bio), (uint64_t) bio->bi_sector);
-
- while (bio_sectors(bio)) {
- unsigned len = min(bio_sectors(bio), 1U << 14);
-
- if (bch_keylist_realloc(&op->keys, 0, op->c))
- goto out;
-
- bio->bi_sector += len;
- bio->bi_size -= len << 9;
-
- bch_keylist_add(&op->keys,
- &KEY(op->inode, bio->bi_sector, len));
- }
-
- op->insert_data_done = true;
- bio_put(bio);
-out:
- continue_at(cl, bch_journal, bcache_wq);
-}
-
-struct open_bucket {
- struct list_head list;
- struct task_struct *last;
- unsigned sectors_free;
- BKEY_PADDED(key);
-};
-
-void bch_open_buckets_free(struct cache_set *c)
-{
- struct open_bucket *b;
-
- while (!list_empty(&c->data_buckets)) {
- b = list_first_entry(&c->data_buckets,
- struct open_bucket, list);
- list_del(&b->list);
- kfree(b);
- }
-}
-
-int bch_open_buckets_alloc(struct cache_set *c)
-{
- int i;
-
- spin_lock_init(&c->data_bucket_lock);
-
- for (i = 0; i < 6; i++) {
- struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
- if (!b)
- return -ENOMEM;
-
- list_add(&b->list, &c->data_buckets);
- }
-
- return 0;
-}
-
-/*
- * We keep multiple buckets open for writes, and try to segregate different
- * write streams for better cache utilization: first we look for a bucket where
- * the last write to it was sequential with the current write, and failing that
- * we look for a bucket that was last used by the same task.
- *
- * The ideas is if you've got multiple tasks pulling data into the cache at the
- * same time, you'll get better cache utilization if you try to segregate their
- * data and preserve locality.
- *
- * For example, say you've starting Firefox at the same time you're copying a
- * bunch of files. Firefox will likely end up being fairly hot and stay in the
- * cache awhile, but the data you copied might not be; if you wrote all that
- * data to the same buckets it'd get invalidated at the same time.
- *
- * Both of those tasks will be doing fairly random IO so we can't rely on
- * detecting sequential IO to segregate their data, but going off of the task
- * should be a sane heuristic.
- */
-static struct open_bucket *pick_data_bucket(struct cache_set *c,
- const struct bkey *search,
- struct task_struct *task,
- struct bkey *alloc)
+static void bch_data_insert_keys(struct closure *cl)
{
- struct open_bucket *ret, *ret_task = NULL;
-
- list_for_each_entry_reverse(ret, &c->data_buckets, list)
- if (!bkey_cmp(&ret->key, search))
- goto found;
- else if (ret->last == task)
- ret_task = ret;
-
- ret = ret_task ?: list_first_entry(&c->data_buckets,
- struct open_bucket, list);
-found:
- if (!ret->sectors_free && KEY_PTRS(alloc)) {
- ret->sectors_free = c->sb.bucket_size;
- bkey_copy(&ret->key, alloc);
- bkey_init(alloc);
- }
-
- if (!ret->sectors_free)
- ret = NULL;
-
- return ret;
-}
-
-/*
- * Allocates some space in the cache to write to, and k to point to the newly
- * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
- * end of the newly allocated space).
- *
- * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
- * sectors were actually allocated.
- *
- * If s->writeback is true, will not fail.
- */
-static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
- struct search *s)
-{
- struct cache_set *c = s->op.c;
- struct open_bucket *b;
- BKEY_PADDED(key) alloc;
- struct closure cl, *w = NULL;
- unsigned i;
-
- if (s->writeback) {
- closure_init_stack(&cl);
- w = &cl;
- }
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ atomic_t *journal_ref = NULL;
+ struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
+ int ret;
/*
- * We might have to allocate a new bucket, which we can't do with a
- * spinlock held. So if we have to allocate, we drop the lock, allocate
- * and then retry. KEY_PTRS() indicates whether alloc points to
- * allocated bucket(s).
+ * If we're looping, might already be waiting on
+ * another journal write - can't wait on more than one journal write at
+ * a time
+ *
+ * XXX: this looks wrong
*/
+#if 0
+ while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
+ closure_sync(&s->cl);
+#endif
- bkey_init(&alloc.key);
- spin_lock(&c->data_bucket_lock);
-
- while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) {
- unsigned watermark = s->op.write_prio
- ? WATERMARK_MOVINGGC
- : WATERMARK_NONE;
-
- spin_unlock(&c->data_bucket_lock);
-
- if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, w))
- return false;
+ if (!op->replace)
+ journal_ref = bch_journal(op->c, &op->insert_keys,
+ op->flush_journal ? cl : NULL);
- spin_lock(&c->data_bucket_lock);
+ ret = bch_btree_insert(op->c, &op->insert_keys,
+ journal_ref, replace_key);
+ if (ret == -ESRCH) {
+ op->replace_collision = true;
+ } else if (ret) {
+ op->error = -ENOMEM;
+ op->insert_data_done = true;
}
- /*
- * If we had to allocate, we might race and not need to allocate the
- * second time we call find_data_bucket(). If we allocated a bucket but
- * didn't use it, drop the refcount bch_bucket_alloc_set() took:
- */
- if (KEY_PTRS(&alloc.key))
- __bkey_put(c, &alloc.key);
-
- for (i = 0; i < KEY_PTRS(&b->key); i++)
- EBUG_ON(ptr_stale(c, &b->key, i));
+ if (journal_ref)
+ atomic_dec_bug(journal_ref);
- /* Set up the pointer to the space we're allocating: */
+ if (!op->insert_data_done)
+ continue_at(cl, bch_data_insert_start, bcache_wq);
- for (i = 0; i < KEY_PTRS(&b->key); i++)
- k->ptr[i] = b->key.ptr[i];
+ bch_keylist_free(&op->insert_keys);
+ closure_return(cl);
+}
- sectors = min(sectors, b->sectors_free);
+static void bch_data_invalidate(struct closure *cl)
+{
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ struct bio *bio = op->bio;
- SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
- SET_KEY_SIZE(k, sectors);
- SET_KEY_PTRS(k, KEY_PTRS(&b->key));
+ pr_debug("invalidating %i sectors from %llu",
+ bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
- /*
- * Move b to the end of the lru, and keep track of what this bucket was
- * last used for:
- */
- list_move_tail(&b->list, &c->data_buckets);
- bkey_copy_key(&b->key, k);
- b->last = s->task;
+ while (bio_sectors(bio)) {
+ unsigned sectors = min(bio_sectors(bio),
+ 1U << (KEY_SIZE_BITS - 1));
- b->sectors_free -= sectors;
+ if (bch_keylist_realloc(&op->insert_keys, 0, op->c))
+ goto out;
- for (i = 0; i < KEY_PTRS(&b->key); i++) {
- SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
+ bio->bi_iter.bi_sector += sectors;
+ bio->bi_iter.bi_size -= sectors << 9;
- atomic_long_add(sectors,
- &PTR_CACHE(c, &b->key, i)->sectors_written);
+ bch_keylist_add(&op->insert_keys,
+ &KEY(op->inode, bio->bi_iter.bi_sector,
+ sectors));
}
- if (b->sectors_free < c->sb.block_size)
- b->sectors_free = 0;
-
- /*
- * k takes refcounts on the buckets it points to until it's inserted
- * into the btree, but if we're done with this bucket we just transfer
- * get_data_bucket()'s refcount.
- */
- if (b->sectors_free)
- for (i = 0; i < KEY_PTRS(&b->key); i++)
- atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
-
- spin_unlock(&c->data_bucket_lock);
- return true;
+ op->insert_data_done = true;
+ bio_put(bio);
+out:
+ continue_at(cl, bch_data_insert_keys, bcache_wq);
}
-static void bch_insert_data_error(struct closure *cl)
+static void bch_data_insert_error(struct closure *cl)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
/*
* Our data write just errored, which means we've got a bunch of keys to
@@ -438,35 +297,34 @@ static void bch_insert_data_error(struct closure *cl)
* from the keys we'll accomplish just that.
*/
- struct bkey *src = op->keys.bottom, *dst = op->keys.bottom;
+ struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
- while (src != op->keys.top) {
+ while (src != op->insert_keys.top) {
struct bkey *n = bkey_next(src);
SET_KEY_PTRS(src, 0);
- bkey_copy(dst, src);
+ memmove(dst, src, bkey_bytes(src));
dst = bkey_next(dst);
src = n;
}
- op->keys.top = dst;
+ op->insert_keys.top = dst;
- bch_journal(cl);
+ bch_data_insert_keys(cl);
}
-static void bch_insert_data_endio(struct bio *bio, int error)
+static void bch_data_insert_endio(struct bio *bio, int error)
{
struct closure *cl = bio->bi_private;
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct search *s = container_of(op, struct search, op);
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
if (error) {
/* TODO: We could try to recover from this. */
- if (s->writeback)
- s->error = error;
- else if (s->write)
- set_closure_fn(cl, bch_insert_data_error, bcache_wq);
+ if (op->writeback)
+ op->error = error;
+ else if (!op->replace)
+ set_closure_fn(cl, bch_data_insert_error, bcache_wq);
else
set_closure_fn(cl, NULL, NULL);
}
@@ -474,18 +332,17 @@ static void bch_insert_data_endio(struct bio *bio, int error)
bch_bbio_endio(op->c, bio, error, "writing data to cache");
}
-static void bch_insert_data_loop(struct closure *cl)
+static void bch_data_insert_start(struct closure *cl)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct search *s = container_of(op, struct search, op);
- struct bio *bio = op->cache_bio, *n;
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ struct bio *bio = op->bio, *n;
- if (op->skip)
- return bio_invalidate(cl);
+ if (op->bypass)
+ return bch_data_invalidate(cl);
if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
set_gc_sectors(op->c);
- bch_queue_gc(op->c);
+ wake_up_gc(op->c);
}
/*
@@ -497,29 +354,30 @@ static void bch_insert_data_loop(struct closure *cl)
do {
unsigned i;
struct bkey *k;
- struct bio_set *split = s->d
- ? s->d->bio_split : op->c->bio_split;
+ struct bio_set *split = op->c->bio_split;
/* 1 for the device pointer and 1 for the chksum */
- if (bch_keylist_realloc(&op->keys,
+ if (bch_keylist_realloc(&op->insert_keys,
1 + (op->csum ? 1 : 0),
op->c))
- continue_at(cl, bch_journal, bcache_wq);
+ continue_at(cl, bch_data_insert_keys, bcache_wq);
- k = op->keys.top;
+ k = op->insert_keys.top;
bkey_init(k);
SET_KEY_INODE(k, op->inode);
- SET_KEY_OFFSET(k, bio->bi_sector);
+ SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
- if (!bch_alloc_sectors(k, bio_sectors(bio), s))
+ if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
+ op->write_point, op->write_prio,
+ op->writeback))
goto err;
- n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
+ n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
- n->bi_end_io = bch_insert_data_endio;
+ n->bi_end_io = bch_data_insert_endio;
n->bi_private = cl;
- if (s->writeback) {
+ if (op->writeback) {
SET_KEY_DIRTY(k, true);
for (i = 0; i < KEY_PTRS(k); i++)
@@ -532,17 +390,17 @@ static void bch_insert_data_loop(struct closure *cl)
bio_csum(n, k);
trace_bcache_cache_insert(k);
- bch_keylist_push(&op->keys);
+ bch_keylist_push(&op->insert_keys);
n->bi_rw |= REQ_WRITE;
bch_submit_bbio(n, op->c, k, 0);
} while (n != bio);
op->insert_data_done = true;
- continue_at(cl, bch_journal, bcache_wq);
+ continue_at(cl, bch_data_insert_keys, bcache_wq);
err:
/* bch_alloc_sectors() blocks if s->writeback = true */
- BUG_ON(s->writeback);
+ BUG_ON(op->writeback);
/*
* But if it's not a writeback write we'd rather just bail out if
@@ -550,15 +408,15 @@ err:
* we might be starving btree writes for gc or something.
*/
- if (s->write) {
+ if (!op->replace) {
/*
* Writethrough write: We can't complete the write until we've
* updated the index. But we don't want to delay the write while
* we wait for buckets to be freed up, so just invalidate the
* rest of the write.
*/
- op->skip = true;
- return bio_invalidate(cl);
+ op->bypass = true;
+ return bch_data_invalidate(cl);
} else {
/*
* From a cache miss, we can just insert the keys for the data
@@ -567,15 +425,15 @@ err:
op->insert_data_done = true;
bio_put(bio);
- if (!bch_keylist_empty(&op->keys))
- continue_at(cl, bch_journal, bcache_wq);
+ if (!bch_keylist_empty(&op->insert_keys))
+ continue_at(cl, bch_data_insert_keys, bcache_wq);
else
closure_return(cl);
}
}
/**
- * bch_insert_data - stick some data in the cache
+ * bch_data_insert - stick some data in the cache
*
* This is the starting point for any data to end up in a cache device; it could
* be from a normal write, or a writeback write, or a write to a flash only
@@ -587,56 +445,178 @@ err:
* data is written it calls bch_journal, and after the keys have been added to
* the next journal write they're inserted into the btree.
*
- * It inserts the data in op->cache_bio; bi_sector is used for the key offset,
+ * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
* and op->inode is used for the key inode.
*
- * If op->skip is true, instead of inserting the data it invalidates the region
- * of the cache represented by op->cache_bio and op->inode.
+ * If s->bypass is true, instead of inserting the data it invalidates the
+ * region of the cache represented by s->cache_bio and op->inode.
*/
-void bch_insert_data(struct closure *cl)
+void bch_data_insert(struct closure *cl)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+
+ trace_bcache_write(op->bio, op->writeback, op->bypass);
- bch_keylist_init(&op->keys);
- bio_get(op->cache_bio);
- bch_insert_data_loop(cl);
+ bch_keylist_init(&op->insert_keys);
+ bio_get(op->bio);
+ bch_data_insert_start(cl);
}
-void bch_btree_insert_async(struct closure *cl)
+/* Congested? */
+
+unsigned bch_get_congested(struct cache_set *c)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct search *s = container_of(op, struct search, op);
+ int i;
+ long rand;
- if (bch_btree_insert(op, op->c)) {
- s->error = -ENOMEM;
- op->insert_data_done = true;
- }
+ if (!c->congested_read_threshold_us &&
+ !c->congested_write_threshold_us)
+ return 0;
+
+ i = (local_clock_us() - c->congested_last_us) / 1024;
+ if (i < 0)
+ return 0;
+
+ i += atomic_read(&c->congested);
+ if (i >= 0)
+ return 0;
- if (op->insert_data_done) {
- bch_keylist_free(&op->keys);
- closure_return(cl);
- } else
- continue_at(cl, bch_insert_data_loop, bcache_wq);
+ i += CONGESTED_MAX;
+
+ if (i > 0)
+ i = fract_exp_two(i, 6);
+
+ rand = get_random_int();
+ i -= bitmap_weight(&rand, BITS_PER_LONG);
+
+ return i > 0 ? i : 1;
}
-/* Common code for the make_request functions */
+static void add_sequential(struct task_struct *t)
+{
+ ewma_add(t->sequential_io_avg,
+ t->sequential_io, 8, 0);
-static void request_endio(struct bio *bio, int error)
+ t->sequential_io = 0;
+}
+
+static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
{
- struct closure *cl = bio->bi_private;
+ return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
+}
- if (error) {
- struct search *s = container_of(cl, struct search, cl);
- s->error = error;
- /* Only cache read errors are recoverable */
- s->recoverable = false;
+static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
+{
+ struct cache_set *c = dc->disk.c;
+ unsigned mode = cache_mode(dc, bio);
+ unsigned sectors, congested = bch_get_congested(c);
+ struct task_struct *task = current;
+ struct io *i;
+
+ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
+ c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
+ (bio->bi_rw & REQ_DISCARD))
+ goto skip;
+
+ if (mode == CACHE_MODE_NONE ||
+ (mode == CACHE_MODE_WRITEAROUND &&
+ (bio->bi_rw & REQ_WRITE)))
+ goto skip;
+
+ if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
+ bio_sectors(bio) & (c->sb.block_size - 1)) {
+ pr_debug("skipping unaligned io");
+ goto skip;
}
- bio_put(bio);
- closure_put(cl);
+ if (bypass_torture_test(dc)) {
+ if ((get_random_int() & 3) == 3)
+ goto skip;
+ else
+ goto rescale;
+ }
+
+ if (!congested && !dc->sequential_cutoff)
+ goto rescale;
+
+ if (!congested &&
+ mode == CACHE_MODE_WRITEBACK &&
+ (bio->bi_rw & REQ_WRITE) &&
+ (bio->bi_rw & REQ_SYNC))
+ goto rescale;
+
+ spin_lock(&dc->io_lock);
+
+ hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
+ if (i->last == bio->bi_iter.bi_sector &&
+ time_before(jiffies, i->jiffies))
+ goto found;
+
+ i = list_first_entry(&dc->io_lru, struct io, lru);
+
+ add_sequential(task);
+ i->sequential = 0;
+found:
+ if (i->sequential + bio->bi_iter.bi_size > i->sequential)
+ i->sequential += bio->bi_iter.bi_size;
+
+ i->last = bio_end_sector(bio);
+ i->jiffies = jiffies + msecs_to_jiffies(5000);
+ task->sequential_io = i->sequential;
+
+ hlist_del(&i->hash);
+ hlist_add_head(&i->hash, iohash(dc, i->last));
+ list_move_tail(&i->lru, &dc->io_lru);
+
+ spin_unlock(&dc->io_lock);
+
+ sectors = max(task->sequential_io,
+ task->sequential_io_avg) >> 9;
+
+ if (dc->sequential_cutoff &&
+ sectors >= dc->sequential_cutoff >> 9) {
+ trace_bcache_bypass_sequential(bio);
+ goto skip;
+ }
+
+ if (congested && sectors >= congested) {
+ trace_bcache_bypass_congested(bio);
+ goto skip;
+ }
+
+rescale:
+ bch_rescale_priorities(c, bio_sectors(bio));
+ return false;
+skip:
+ bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
+ return true;
}
-void bch_cache_read_endio(struct bio *bio, int error)
+/* Cache lookup */
+
+struct search {
+ /* Stack frame for bio_complete */
+ struct closure cl;
+
+ struct bcache_device *d;
+
+ struct bbio bio;
+ struct bio *orig_bio;
+ struct bio *cache_miss;
+
+ unsigned insert_bio_sectors;
+
+ unsigned recoverable:1;
+ unsigned write:1;
+ unsigned read_dirty_data:1;
+
+ unsigned long start_time;
+
+ struct btree_op op;
+ struct data_insert_op iop;
+};
+
+static void bch_cache_read_endio(struct bio *bio, int error)
{
struct bbio *b = container_of(bio, struct bbio, bio);
struct closure *cl = bio->bi_private;
@@ -650,13 +630,113 @@ void bch_cache_read_endio(struct bio *bio, int error)
*/
if (error)
- s->error = error;
- else if (ptr_stale(s->op.c, &b->key, 0)) {
- atomic_long_inc(&s->op.c->cache_read_races);
- s->error = -EINTR;
+ s->iop.error = error;
+ else if (ptr_stale(s->iop.c, &b->key, 0)) {
+ atomic_long_inc(&s->iop.c->cache_read_races);
+ s->iop.error = -EINTR;
}
- bch_bbio_endio(s->op.c, bio, error, "reading from cache");
+ bch_bbio_endio(s->iop.c, bio, error, "reading from cache");
+}
+
+/*
+ * Read from a single key, handling the initial cache miss if the key starts in
+ * the middle of the bio
+ */
+static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
+{
+ struct search *s = container_of(op, struct search, op);
+ struct bio *n, *bio = &s->bio.bio;
+ struct bkey *bio_key;
+ unsigned ptr;
+
+ if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
+ return MAP_CONTINUE;
+
+ if (KEY_INODE(k) != s->iop.inode ||
+ KEY_START(k) > bio->bi_iter.bi_sector) {
+ unsigned bio_sectors = bio_sectors(bio);
+ unsigned sectors = KEY_INODE(k) == s->iop.inode
+ ? min_t(uint64_t, INT_MAX,
+ KEY_START(k) - bio->bi_iter.bi_sector)
+ : INT_MAX;
+
+ int ret = s->d->cache_miss(b, s, bio, sectors);
+ if (ret != MAP_CONTINUE)
+ return ret;
+
+ /* if this was a complete miss we shouldn't get here */
+ BUG_ON(bio_sectors <= sectors);
+ }
+
+ if (!KEY_SIZE(k))
+ return MAP_CONTINUE;
+
+ /* XXX: figure out best pointer - for multiple cache devices */
+ ptr = 0;
+
+ PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
+
+ if (KEY_DIRTY(k))
+ s->read_dirty_data = true;
+
+ n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
+ KEY_OFFSET(k) - bio->bi_iter.bi_sector),
+ GFP_NOIO, s->d->bio_split);
+
+ bio_key = &container_of(n, struct bbio, bio)->key;
+ bch_bkey_copy_single_ptr(bio_key, k, ptr);
+
+ bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
+ bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
+
+ n->bi_end_io = bch_cache_read_endio;
+ n->bi_private = &s->cl;
+
+ /*
+ * The bucket we're reading from might be reused while our bio
+ * is in flight, and we could then end up reading the wrong
+ * data.
+ *
+ * We guard against this by checking (in cache_read_endio()) if
+ * the pointer is stale again; if so, we treat it as an error
+ * and reread from the backing device (but we don't pass that
+ * error up anywhere).
+ */
+
+ __bch_submit_bbio(n, b->c);
+ return n == bio ? MAP_DONE : MAP_CONTINUE;
+}
+
+static void cache_lookup(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, iop.cl);
+ struct bio *bio = &s->bio.bio;
+
+ int ret = bch_btree_map_keys(&s->op, s->iop.c,
+ &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
+ cache_lookup_fn, MAP_END_KEY);
+ if (ret == -EAGAIN)
+ continue_at(cl, cache_lookup, bcache_wq);
+
+ closure_return(cl);
+}
+
+/* Common code for the make_request functions */
+
+static void request_endio(struct bio *bio, int error)
+{
+ struct closure *cl = bio->bi_private;
+
+ if (error) {
+ struct search *s = container_of(cl, struct search, cl);
+ s->iop.error = error;
+ /* Only cache read errors are recoverable */
+ s->recoverable = false;
+ }
+
+ bio_put(bio);
+ closure_put(cl);
}
static void bio_complete(struct search *s)
@@ -670,8 +750,8 @@ static void bio_complete(struct search *s)
part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
part_stat_unlock();
- trace_bcache_request_end(s, s->orig_bio);
- bio_endio(s->orig_bio, s->error);
+ trace_bcache_request_end(s->d, s->orig_bio);
+ bio_endio(s->orig_bio, s->iop.error);
s->orig_bio = NULL;
}
}
@@ -679,10 +759,12 @@ static void bio_complete(struct search *s)
static void do_bio_hook(struct search *s)
{
struct bio *bio = &s->bio.bio;
- memcpy(bio, s->orig_bio, sizeof(struct bio));
+ bio_init(bio);
+ __bio_clone(bio, s->orig_bio);
bio->bi_end_io = request_endio;
bio->bi_private = &s->cl;
+
atomic_set(&bio->bi_cnt, 3);
}
@@ -691,11 +773,8 @@ static void search_free(struct closure *cl)
struct search *s = container_of(cl, struct search, cl);
bio_complete(s);
- if (s->op.cache_bio)
- bio_put(s->op.cache_bio);
-
- if (s->unaligned_bvec)
- mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
+ if (s->iop.bio)
+ bio_put(s->iop.bio);
closure_debug_destroy(cl);
mempool_free(s, s->d->c->search);
@@ -703,49 +782,28 @@ static void search_free(struct closure *cl)
static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
{
- struct bio_vec *bv;
- struct search *s = mempool_alloc(d->c->search, GFP_NOIO);
- memset(s, 0, offsetof(struct search, op.keys));
+ struct search *s;
+
+ s = mempool_alloc(d->c->search, GFP_NOIO);
+ memset(s, 0, offsetof(struct search, iop.insert_keys));
__closure_init(&s->cl, NULL);
- s->op.inode = d->id;
- s->op.c = d->c;
+ s->iop.inode = d->id;
+ s->iop.c = d->c;
s->d = d;
s->op.lock = -1;
- s->task = current;
+ s->iop.write_point = hash_long((unsigned long) current, 16);
s->orig_bio = bio;
s->write = (bio->bi_rw & REQ_WRITE) != 0;
- s->op.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
- s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0;
+ s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
s->recoverable = 1;
s->start_time = jiffies;
do_bio_hook(s);
- if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
- bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
- memcpy(bv, bio_iovec(bio),
- sizeof(struct bio_vec) * bio_segments(bio));
-
- s->bio.bio.bi_io_vec = bv;
- s->unaligned_bvec = 1;
- }
-
return s;
}
-static void btree_read_async(struct closure *cl)
-{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
-
- int ret = btree_root(search_recurse, op->c, op);
-
- if (ret == -EAGAIN)
- continue_at(cl, btree_read_async, bcache_wq);
-
- closure_return(cl);
-}
-
/* Cached devices */
static void cached_dev_bio_complete(struct closure *cl)
@@ -759,190 +817,180 @@ static void cached_dev_bio_complete(struct closure *cl)
/* Process reads */
-static void cached_dev_read_complete(struct closure *cl)
+static void cached_dev_cache_miss_done(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
- if (s->op.insert_collision)
- bch_mark_cache_miss_collision(s);
+ if (s->iop.replace_collision)
+ bch_mark_cache_miss_collision(s->iop.c, s->d);
- if (s->op.cache_bio) {
+ if (s->iop.bio) {
int i;
struct bio_vec *bv;
- __bio_for_each_segment(bv, s->op.cache_bio, i, 0)
+ bio_for_each_segment_all(bv, s->iop.bio, i)
__free_page(bv->bv_page);
}
cached_dev_bio_complete(cl);
}
-static void request_read_error(struct closure *cl)
+static void cached_dev_read_error(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
- struct bio_vec *bv;
- int i;
+ struct bio *bio = &s->bio.bio;
if (s->recoverable) {
/* Retry from the backing device: */
trace_bcache_read_retry(s->orig_bio);
- s->error = 0;
- bv = s->bio.bio.bi_io_vec;
+ s->iop.error = 0;
do_bio_hook(s);
- s->bio.bio.bi_io_vec = bv;
-
- if (!s->unaligned_bvec)
- bio_for_each_segment(bv, s->orig_bio, i)
- bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
- else
- memcpy(s->bio.bio.bi_io_vec,
- bio_iovec(s->orig_bio),
- sizeof(struct bio_vec) *
- bio_segments(s->orig_bio));
/* XXX: invalidate cache */
- closure_bio_submit(&s->bio.bio, &s->cl, s->d);
+ closure_bio_submit(bio, cl, s->d);
}
- continue_at(cl, cached_dev_read_complete, NULL);
+ continue_at(cl, cached_dev_cache_miss_done, NULL);
}
-static void request_read_done(struct closure *cl)
+static void cached_dev_read_done(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
/*
- * s->cache_bio != NULL implies that we had a cache miss; cache_bio now
- * contains data ready to be inserted into the cache.
+ * We had a cache miss; cache_bio now contains data ready to be inserted
+ * into the cache.
*
* First, we copy the data we just read from cache_bio's bounce buffers
* to the buffers the original bio pointed to:
*/
- if (s->op.cache_bio) {
- bio_reset(s->op.cache_bio);
- s->op.cache_bio->bi_sector = s->cache_miss->bi_sector;
- s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev;
- s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
- bch_bio_map(s->op.cache_bio, NULL);
+ if (s->iop.bio) {
+ bio_reset(s->iop.bio);
+ s->iop.bio->bi_iter.bi_sector =
+ s->cache_miss->bi_iter.bi_sector;
+ s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
+ s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
+ bch_bio_map(s->iop.bio, NULL);
- bio_copy_data(s->cache_miss, s->op.cache_bio);
+ bio_copy_data(s->cache_miss, s->iop.bio);
bio_put(s->cache_miss);
s->cache_miss = NULL;
}
- if (verify(dc, &s->bio.bio) && s->recoverable)
- bch_data_verify(s);
+ if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
+ bch_data_verify(dc, s->orig_bio);
bio_complete(s);
- if (s->op.cache_bio &&
- !test_bit(CACHE_SET_STOPPING, &s->op.c->flags)) {
- s->op.type = BTREE_REPLACE;
- closure_call(&s->op.cl, bch_insert_data, NULL, cl);
+ if (s->iop.bio &&
+ !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
+ BUG_ON(!s->iop.replace);
+ closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
}
- continue_at(cl, cached_dev_read_complete, NULL);
+ continue_at(cl, cached_dev_cache_miss_done, NULL);
}
-static void request_read_done_bh(struct closure *cl)
+static void cached_dev_read_done_bh(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
- bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip);
- trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.skip);
+ bch_mark_cache_accounting(s->iop.c, s->d,
+ !s->cache_miss, s->iop.bypass);
+ trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
- if (s->error)
- continue_at_nobarrier(cl, request_read_error, bcache_wq);
- else if (s->op.cache_bio || verify(dc, &s->bio.bio))
- continue_at_nobarrier(cl, request_read_done, bcache_wq);
+ if (s->iop.error)
+ continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
+ else if (s->iop.bio || verify(dc, &s->bio.bio))
+ continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
else
- continue_at_nobarrier(cl, cached_dev_read_complete, NULL);
+ continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
}
static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned sectors)
{
- int ret = 0;
- unsigned reada;
+ int ret = MAP_CONTINUE;
+ unsigned reada = 0;
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
- struct bio *miss;
+ struct bio *miss, *cache_bio;
- miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
- if (miss == bio)
- s->op.lookup_done = true;
-
- miss->bi_end_io = request_endio;
- miss->bi_private = &s->cl;
-
- if (s->cache_miss || s->op.skip)
+ if (s->cache_miss || s->iop.bypass) {
+ miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+ ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
goto out_submit;
-
- if (miss != bio ||
- (bio->bi_rw & REQ_RAHEAD) ||
- (bio->bi_rw & REQ_META) ||
- s->op.c->gc_stats.in_use >= CUTOFF_CACHE_READA)
- reada = 0;
- else {
- reada = min(dc->readahead >> 9,
- sectors - bio_sectors(miss));
-
- if (bio_end_sector(miss) + reada > bdev_sectors(miss->bi_bdev))
- reada = bdev_sectors(miss->bi_bdev) -
- bio_end_sector(miss);
}
- s->cache_bio_sectors = bio_sectors(miss) + reada;
- s->op.cache_bio = bio_alloc_bioset(GFP_NOWAIT,
- DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS),
- dc->disk.bio_split);
+ if (!(bio->bi_rw & REQ_RAHEAD) &&
+ !(bio->bi_rw & REQ_META) &&
+ s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
+ reada = min_t(sector_t, dc->readahead >> 9,
+ bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
- if (!s->op.cache_bio)
- goto out_submit;
+ s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
- s->op.cache_bio->bi_sector = miss->bi_sector;
- s->op.cache_bio->bi_bdev = miss->bi_bdev;
- s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
+ s->iop.replace_key = KEY(s->iop.inode,
+ bio->bi_iter.bi_sector + s->insert_bio_sectors,
+ s->insert_bio_sectors);
- s->op.cache_bio->bi_end_io = request_endio;
- s->op.cache_bio->bi_private = &s->cl;
+ ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
+ if (ret)
+ return ret;
+
+ s->iop.replace = true;
+
+ miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
/* btree_search_recurse()'s btree iterator is no good anymore */
- ret = -EINTR;
- if (!bch_btree_insert_check_key(b, &s->op, s->op.cache_bio))
- goto out_put;
+ ret = miss == bio ? MAP_DONE : -EINTR;
+
+ cache_bio = bio_alloc_bioset(GFP_NOWAIT,
+ DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
+ dc->disk.bio_split);
+ if (!cache_bio)
+ goto out_submit;
- bch_bio_map(s->op.cache_bio, NULL);
- if (bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
+ cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
+ cache_bio->bi_bdev = miss->bi_bdev;
+ cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
+
+ cache_bio->bi_end_io = request_endio;
+ cache_bio->bi_private = &s->cl;
+
+ bch_bio_map(cache_bio, NULL);
+ if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
goto out_put;
- s->cache_miss = miss;
- bio_get(s->op.cache_bio);
+ if (reada)
+ bch_mark_cache_readahead(s->iop.c, s->d);
- closure_bio_submit(s->op.cache_bio, &s->cl, s->d);
+ s->cache_miss = miss;
+ s->iop.bio = cache_bio;
+ bio_get(cache_bio);
+ closure_bio_submit(cache_bio, &s->cl, s->d);
return ret;
out_put:
- bio_put(s->op.cache_bio);
- s->op.cache_bio = NULL;
+ bio_put(cache_bio);
out_submit:
+ miss->bi_end_io = request_endio;
+ miss->bi_private = &s->cl;
closure_bio_submit(miss, &s->cl, s->d);
return ret;
}
-static void request_read(struct cached_dev *dc, struct search *s)
+static void cached_dev_read(struct cached_dev *dc, struct search *s)
{
struct closure *cl = &s->cl;
- check_should_skip(dc, s);
- closure_call(&s->op.cl, btree_read_async, NULL, cl);
-
- continue_at(cl, request_read_done_bh, NULL);
+ closure_call(&s->iop.cl, cache_lookup, NULL, cl);
+ continue_at(cl, cached_dev_read_done_bh, NULL);
}
/* Process writes */
@@ -956,51 +1004,56 @@ static void cached_dev_write_complete(struct closure *cl)
cached_dev_bio_complete(cl);
}
-static void request_write(struct cached_dev *dc, struct search *s)
+static void cached_dev_write(struct cached_dev *dc, struct search *s)
{
struct closure *cl = &s->cl;
struct bio *bio = &s->bio.bio;
- struct bkey start, end;
- start = KEY(dc->disk.id, bio->bi_sector, 0);
- end = KEY(dc->disk.id, bio_end_sector(bio), 0);
+ struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
+ struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
- bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);
+ bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
- check_should_skip(dc, s);
down_read_non_owner(&dc->writeback_lock);
-
if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
- s->op.skip = false;
- s->writeback = true;
+ /*
+ * We overlap with some dirty data undergoing background
+ * writeback, force this write to writeback
+ */
+ s->iop.bypass = false;
+ s->iop.writeback = true;
}
+ /*
+ * Discards aren't _required_ to do anything, so skipping if
+ * check_overlapping returned true is ok
+ *
+ * But check_overlapping drops dirty keys for which io hasn't started,
+ * so we still want to call it.
+ */
if (bio->bi_rw & REQ_DISCARD)
- goto skip;
+ s->iop.bypass = true;
if (should_writeback(dc, s->orig_bio,
cache_mode(dc, bio),
- s->op.skip)) {
- s->op.skip = false;
- s->writeback = true;
+ s->iop.bypass)) {
+ s->iop.bypass = false;
+ s->iop.writeback = true;
}
- if (s->op.skip)
- goto skip;
+ if (s->iop.bypass) {
+ s->iop.bio = s->orig_bio;
+ bio_get(s->iop.bio);
- trace_bcache_write(s->orig_bio, s->writeback, s->op.skip);
-
- if (!s->writeback) {
- s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
- dc->disk.bio_split);
-
- closure_bio_submit(bio, cl, s->d);
- } else {
+ if (!(bio->bi_rw & REQ_DISCARD) ||
+ blk_queue_discard(bdev_get_queue(dc->bdev)))
+ closure_bio_submit(bio, cl, s->d);
+ } else if (s->iop.writeback) {
bch_writeback_add(dc);
- s->op.cache_bio = bio;
+ s->iop.bio = bio;
if (bio->bi_rw & REQ_FLUSH) {
/* Also need to send a flush to the backing device */
- struct bio *flush = bio_alloc_bioset(0, GFP_NOIO,
+ struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
dc->disk.bio_split);
flush->bi_rw = WRITE_FLUSH;
@@ -1010,36 +1063,26 @@ static void request_write(struct cached_dev *dc, struct search *s)
closure_bio_submit(flush, cl, s->d);
}
- }
-out:
- closure_call(&s->op.cl, bch_insert_data, NULL, cl);
- continue_at(cl, cached_dev_write_complete, NULL);
-skip:
- s->op.skip = true;
- s->op.cache_bio = s->orig_bio;
- bio_get(s->op.cache_bio);
+ } else {
+ s->iop.bio = bio_clone_bioset(bio, GFP_NOIO,
+ dc->disk.bio_split);
- if ((bio->bi_rw & REQ_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(dc->bdev)))
- goto out;
+ closure_bio_submit(bio, cl, s->d);
+ }
- closure_bio_submit(bio, cl, s->d);
- goto out;
+ closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
+ continue_at(cl, cached_dev_write_complete, NULL);
}
-static void request_nodata(struct cached_dev *dc, struct search *s)
+static void cached_dev_nodata(struct closure *cl)
{
- struct closure *cl = &s->cl;
+ struct search *s = container_of(cl, struct search, cl);
struct bio *bio = &s->bio.bio;
- if (bio->bi_rw & REQ_DISCARD) {
- request_write(dc, s);
- return;
- }
-
- if (s->op.flush_journal)
- bch_journal_meta(s->op.c, cl);
+ if (s->iop.flush_journal)
+ bch_journal_meta(s->iop.c, cl);
+ /* If it's a flush, we send the flush to the backing device too */
closure_bio_submit(bio, cl, s->d);
continue_at(cl, cached_dev_bio_complete, NULL);
@@ -1047,134 +1090,6 @@ static void request_nodata(struct cached_dev *dc, struct search *s)
/* Cached devices - read & write stuff */
-unsigned bch_get_congested(struct cache_set *c)
-{
- int i;
- long rand;
-
- if (!c->congested_read_threshold_us &&
- !c->congested_write_threshold_us)
- return 0;
-
- i = (local_clock_us() - c->congested_last_us) / 1024;
- if (i < 0)
- return 0;
-
- i += atomic_read(&c->congested);
- if (i >= 0)
- return 0;
-
- i += CONGESTED_MAX;
-
- if (i > 0)
- i = fract_exp_two(i, 6);
-
- rand = get_random_int();
- i -= bitmap_weight(&rand, BITS_PER_LONG);
-
- return i > 0 ? i : 1;
-}
-
-static void add_sequential(struct task_struct *t)
-{
- ewma_add(t->sequential_io_avg,
- t->sequential_io, 8, 0);
-
- t->sequential_io = 0;
-}
-
-static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
-{
- return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
-}
-
-static void check_should_skip(struct cached_dev *dc, struct search *s)
-{
- struct cache_set *c = s->op.c;
- struct bio *bio = &s->bio.bio;
- unsigned mode = cache_mode(dc, bio);
- unsigned sectors, congested = bch_get_congested(c);
-
- if (atomic_read(&dc->disk.detaching) ||
- c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
- (bio->bi_rw & REQ_DISCARD))
- goto skip;
-
- if (mode == CACHE_MODE_NONE ||
- (mode == CACHE_MODE_WRITEAROUND &&
- (bio->bi_rw & REQ_WRITE)))
- goto skip;
-
- if (bio->bi_sector & (c->sb.block_size - 1) ||
- bio_sectors(bio) & (c->sb.block_size - 1)) {
- pr_debug("skipping unaligned io");
- goto skip;
- }
-
- if (!congested && !dc->sequential_cutoff)
- goto rescale;
-
- if (!congested &&
- mode == CACHE_MODE_WRITEBACK &&
- (bio->bi_rw & REQ_WRITE) &&
- (bio->bi_rw & REQ_SYNC))
- goto rescale;
-
- if (dc->sequential_merge) {
- struct io *i;
-
- spin_lock(&dc->io_lock);
-
- hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
- if (i->last == bio->bi_sector &&
- time_before(jiffies, i->jiffies))
- goto found;
-
- i = list_first_entry(&dc->io_lru, struct io, lru);
-
- add_sequential(s->task);
- i->sequential = 0;
-found:
- if (i->sequential + bio->bi_size > i->sequential)
- i->sequential += bio->bi_size;
-
- i->last = bio_end_sector(bio);
- i->jiffies = jiffies + msecs_to_jiffies(5000);
- s->task->sequential_io = i->sequential;
-
- hlist_del(&i->hash);
- hlist_add_head(&i->hash, iohash(dc, i->last));
- list_move_tail(&i->lru, &dc->io_lru);
-
- spin_unlock(&dc->io_lock);
- } else {
- s->task->sequential_io = bio->bi_size;
-
- add_sequential(s->task);
- }
-
- sectors = max(s->task->sequential_io,
- s->task->sequential_io_avg) >> 9;
-
- if (dc->sequential_cutoff &&
- sectors >= dc->sequential_cutoff >> 9) {
- trace_bcache_bypass_sequential(s->orig_bio);
- goto skip;
- }
-
- if (congested && sectors >= congested) {
- trace_bcache_bypass_congested(s->orig_bio);
- goto skip;
- }
-
-rescale:
- bch_rescale_priorities(c, bio_sectors(bio));
- return;
-skip:
- bch_mark_sectors_bypassed(s, bio_sectors(bio));
- s->op.skip = true;
-}
-
static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
{
struct search *s;
@@ -1188,18 +1103,28 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
part_stat_unlock();
bio->bi_bdev = dc->bdev;
- bio->bi_sector += dc->sb.data_offset;
+ bio->bi_iter.bi_sector += dc->sb.data_offset;
if (cached_dev_get(dc)) {
s = search_alloc(bio, d);
- trace_bcache_request_start(s, bio);
-
- if (!bio_has_data(bio))
- request_nodata(dc, s);
- else if (rw)
- request_write(dc, s);
- else
- request_read(dc, s);
+ trace_bcache_request_start(s->d, bio);
+
+ if (!bio->bi_iter.bi_size) {
+ /*
+ * can't call bch_journal_meta from under
+ * generic_make_request
+ */
+ continue_at_nobarrier(&s->cl,
+ cached_dev_nodata,
+ bcache_wq);
+ } else {
+ s->iop.bypass = check_should_bypass(dc, bio);
+
+ if (rw)
+ cached_dev_write(dc, s);
+ else
+ cached_dev_read(dc, s);
+ }
} else {
if ((bio->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(dc->bdev)))
@@ -1256,27 +1181,37 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
static int flash_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned sectors)
{
- struct bio_vec *bv;
- int i;
+ struct bio_vec bv;
+ struct bvec_iter iter;
/* Zero fill bio */
- bio_for_each_segment(bv, bio, i) {
- unsigned j = min(bv->bv_len >> 9, sectors);
+ bio_for_each_segment(bv, bio, iter) {
+ unsigned j = min(bv.bv_len >> 9, sectors);
- void *p = kmap(bv->bv_page);
- memset(p + bv->bv_offset, 0, j << 9);
- kunmap(bv->bv_page);
+ void *p = kmap(bv.bv_page);
+ memset(p + bv.bv_offset, 0, j << 9);
+ kunmap(bv.bv_page);
sectors -= j;
}
- bio_advance(bio, min(sectors << 9, bio->bi_size));
+ bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size));
- if (!bio->bi_size)
- s->op.lookup_done = true;
+ if (!bio->bi_iter.bi_size)
+ return MAP_DONE;
- return 0;
+ return MAP_CONTINUE;
+}
+
+static void flash_dev_nodata(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+
+ if (s->iop.flush_journal)
+ bch_journal_meta(s->iop.c, cl);
+
+ continue_at(cl, search_free, NULL);
}
static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
@@ -1295,23 +1230,28 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
cl = &s->cl;
bio = &s->bio.bio;
- trace_bcache_request_start(s, bio);
+ trace_bcache_request_start(s->d, bio);
- if (bio_has_data(bio) && !rw) {
- closure_call(&s->op.cl, btree_read_async, NULL, cl);
- } else if (bio_has_data(bio) || s->op.skip) {
- bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
- &KEY(d->id, bio->bi_sector, 0),
+ if (!bio->bi_iter.bi_size) {
+ /*
+ * can't call bch_journal_meta from under
+ * generic_make_request
+ */
+ continue_at_nobarrier(&s->cl,
+ flash_dev_nodata,
+ bcache_wq);
+ } else if (rw) {
+ bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
+ &KEY(d->id, bio->bi_iter.bi_sector, 0),
&KEY(d->id, bio_end_sector(bio), 0));
- s->writeback = true;
- s->op.cache_bio = bio;
+ s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
+ s->iop.writeback = true;
+ s->iop.bio = bio;
- closure_call(&s->op.cl, bch_insert_data, NULL, cl);
+ closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
} else {
- /* No data - probably a cache flush */
- if (s->op.flush_journal)
- bch_journal_meta(s->op.c, cl);
+ closure_call(&s->iop.cl, cache_lookup, NULL, cl);
}
continue_at(cl, search_free, NULL);
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 57dc4784f4f4..2cd65bf073c2 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -3,40 +3,33 @@
#include <linux/cgroup.h>
-struct search {
- /* Stack frame for bio_complete */
+struct data_insert_op {
struct closure cl;
+ struct cache_set *c;
+ struct bio *bio;
- struct bcache_device *d;
- struct task_struct *task;
-
- struct bbio bio;
- struct bio *orig_bio;
- struct bio *cache_miss;
- unsigned cache_bio_sectors;
-
- unsigned recoverable:1;
- unsigned unaligned_bvec:1;
+ unsigned inode;
+ uint16_t write_point;
+ uint16_t write_prio;
+ short error;
- unsigned write:1;
+ unsigned bypass:1;
unsigned writeback:1;
+ unsigned flush_journal:1;
+ unsigned csum:1;
- /* IO error returned to s->bio */
- short error;
- unsigned long start_time;
+ unsigned replace:1;
+ unsigned replace_collision:1;
+
+ unsigned insert_data_done:1;
- /* Anything past op->keys won't get zeroed in do_bio_hook */
- struct btree_op op;
+ /* Anything past this point won't get zeroed in search_alloc() */
+ struct keylist insert_keys;
+ BKEY_PADDED(replace_key);
};
-void bch_cache_read_endio(struct bio *, int);
unsigned bch_get_congested(struct cache_set *);
-void bch_insert_data(struct closure *cl);
-void bch_btree_insert_async(struct closure *);
-void bch_cache_read_endio(struct bio *, int);
-
-void bch_open_buckets_free(struct cache_set *);
-int bch_open_buckets_alloc(struct cache_set *);
+void bch_data_insert(struct closure *cl);
void bch_cached_dev_request_init(struct cached_dev *dc);
void bch_flash_dev_request_init(struct bcache_device *d);
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index b8730e714d69..84d0782f702e 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -7,7 +7,6 @@
#include "bcache.h"
#include "stats.h"
#include "btree.h"
-#include "request.h"
#include "sysfs.h"
/*
@@ -196,35 +195,36 @@ static void mark_cache_stats(struct cache_stat_collector *stats,
atomic_inc(&stats->cache_bypass_misses);
}
-void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass)
+void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
+ bool hit, bool bypass)
{
- struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
mark_cache_stats(&dc->accounting.collector, hit, bypass);
- mark_cache_stats(&s->op.c->accounting.collector, hit, bypass);
+ mark_cache_stats(&c->accounting.collector, hit, bypass);
#ifdef CONFIG_CGROUP_BCACHE
mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass);
#endif
}
-void bch_mark_cache_readahead(struct search *s)
+void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
{
- struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
atomic_inc(&dc->accounting.collector.cache_readaheads);
- atomic_inc(&s->op.c->accounting.collector.cache_readaheads);
+ atomic_inc(&c->accounting.collector.cache_readaheads);
}
-void bch_mark_cache_miss_collision(struct search *s)
+void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
{
- struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
atomic_inc(&dc->accounting.collector.cache_miss_collisions);
- atomic_inc(&s->op.c->accounting.collector.cache_miss_collisions);
+ atomic_inc(&c->accounting.collector.cache_miss_collisions);
}
-void bch_mark_sectors_bypassed(struct search *s, int sectors)
+void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc,
+ int sectors)
{
- struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
- atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed);
+ atomic_add(sectors, &c->accounting.collector.sectors_bypassed);
}
void bch_cache_accounting_init(struct cache_accounting *acc,
diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h
index c7c7a8fd29fe..adbff141c887 100644
--- a/drivers/md/bcache/stats.h
+++ b/drivers/md/bcache/stats.h
@@ -38,7 +38,9 @@ struct cache_accounting {
struct cache_stats day;
};
-struct search;
+struct cache_set;
+struct cached_dev;
+struct bcache_device;
void bch_cache_accounting_init(struct cache_accounting *acc,
struct closure *parent);
@@ -50,9 +52,10 @@ void bch_cache_accounting_clear(struct cache_accounting *acc);
void bch_cache_accounting_destroy(struct cache_accounting *acc);
-void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass);
-void bch_mark_cache_readahead(struct search *s);
-void bch_mark_cache_miss_collision(struct search *s);
-void bch_mark_sectors_bypassed(struct search *s, int sectors);
+void bch_mark_cache_accounting(struct cache_set *, struct bcache_device *,
+ bool, bool);
+void bch_mark_cache_readahead(struct cache_set *, struct bcache_device *);
+void bch_mark_cache_miss_collision(struct cache_set *, struct bcache_device *);
+void bch_mark_sectors_bypassed(struct cache_set *, struct cached_dev *, int);
#endif /* _BCACHE_STATS_H_ */
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 547c4c57b052..60fb6044b953 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -16,6 +16,7 @@
#include <linux/buffer_head.h>
#include <linux/debugfs.h>
#include <linux/genhd.h>
+#include <linux/idr.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/random.h>
@@ -45,21 +46,13 @@ const char * const bch_cache_modes[] = {
NULL
};
-struct uuid_entry_v0 {
- uint8_t uuid[16];
- uint8_t label[32];
- uint32_t first_reg;
- uint32_t last_reg;
- uint32_t invalidated;
- uint32_t pad;
-};
-
static struct kobject *bcache_kobj;
struct mutex bch_register_lock;
LIST_HEAD(bch_cache_sets);
static LIST_HEAD(uncached_devices);
-static int bcache_major, bcache_minor;
+static int bcache_major;
+static DEFINE_IDA(bcache_minor);
static wait_queue_head_t unregister_wait;
struct workqueue_struct *bcache_wq;
@@ -240,9 +233,9 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
unsigned i;
- bio->bi_sector = SB_SECTOR;
- bio->bi_rw = REQ_SYNC|REQ_META;
- bio->bi_size = SB_SIZE;
+ bio->bi_iter.bi_sector = SB_SECTOR;
+ bio->bi_rw = REQ_SYNC|REQ_META;
+ bio->bi_iter.bi_size = SB_SIZE;
bch_bio_map(bio, NULL);
out->offset = cpu_to_le64(sb->offset);
@@ -354,7 +347,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
struct bio *bio = bch_bbio_alloc(c);
bio->bi_rw = REQ_SYNC|REQ_META|rw;
- bio->bi_size = KEY_SIZE(k) << 9;
+ bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
bio->bi_end_io = uuid_endio;
bio->bi_private = cl;
@@ -382,7 +375,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
{
struct bkey *k = &j->uuid_bucket;
- if (__bch_ptr_invalid(c, 1, k))
+ if (bch_btree_ptr_invalid(c, k))
return "bad uuid pointer";
bkey_copy(&c->uuid_bucket, k);
@@ -427,7 +420,7 @@ static int __uuid_write(struct cache_set *c)
lockdep_assert_held(&bch_register_lock);
- if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, &cl))
+ if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, true))
return 1;
SET_KEY_SIZE(&k.key, c->sb.bucket_size);
@@ -435,7 +428,7 @@ static int __uuid_write(struct cache_set *c)
closure_sync(&cl);
bkey_copy(&c->uuid_bucket, &k.key);
- __bkey_put(c, &k.key);
+ bkey_put(c, &k.key);
return 0;
}
@@ -510,10 +503,10 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
closure_init_stack(cl);
- bio->bi_sector = bucket * ca->sb.bucket_size;
- bio->bi_bdev = ca->bdev;
- bio->bi_rw = REQ_SYNC|REQ_META|rw;
- bio->bi_size = bucket_bytes(ca);
+ bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
+ bio->bi_bdev = ca->bdev;
+ bio->bi_rw = REQ_SYNC|REQ_META|rw;
+ bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = prio_endio;
bio->bi_private = ca;
@@ -562,10 +555,10 @@ void bch_prio_write(struct cache *ca)
}
p->next_bucket = ca->prio_buckets[i + 1];
- p->magic = pset_magic(ca);
+ p->magic = pset_magic(&ca->sb);
p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
- bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, &cl);
+ bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, true);
BUG_ON(bucket == -1);
mutex_unlock(&ca->set->bucket_lock);
@@ -613,7 +606,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
pr_warn("bad csum reading priorities");
- if (p->magic != pset_magic(ca))
+ if (p->magic != pset_magic(&ca->sb))
pr_warn("bad magic reading priorities");
bucket = p->next_bucket;
@@ -630,7 +623,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
static int open_dev(struct block_device *b, fmode_t mode)
{
struct bcache_device *d = b->bd_disk->private_data;
- if (atomic_read(&d->closing))
+ if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
return -ENXIO;
closure_get(&d->cl);
@@ -659,20 +652,24 @@ static const struct block_device_operations bcache_ops = {
void bcache_device_stop(struct bcache_device *d)
{
- if (!atomic_xchg(&d->closing, 1))
+ if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
closure_queue(&d->cl);
}
static void bcache_device_unlink(struct bcache_device *d)
{
- unsigned i;
- struct cache *ca;
+ lockdep_assert_held(&bch_register_lock);
- sysfs_remove_link(&d->c->kobj, d->name);
- sysfs_remove_link(&d->kobj, "cache");
+ if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
+ unsigned i;
+ struct cache *ca;
- for_each_cache(ca, d->c, i)
- bd_unlink_disk_holder(ca->bdev, d->disk);
+ sysfs_remove_link(&d->c->kobj, d->name);
+ sysfs_remove_link(&d->kobj, "cache");
+
+ for_each_cache(ca, d->c, i)
+ bd_unlink_disk_holder(ca->bdev, d->disk);
+ }
}
static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
@@ -696,19 +693,16 @@ static void bcache_device_detach(struct bcache_device *d)
{
lockdep_assert_held(&bch_register_lock);
- if (atomic_read(&d->detaching)) {
+ if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
struct uuid_entry *u = d->c->uuids + d->id;
SET_UUID_FLASH_ONLY(u, 0);
memcpy(u->uuid, invalid_uuid, 16);
u->invalidated = cpu_to_le32(get_seconds());
bch_uuid_write(d->c);
-
- atomic_set(&d->detaching, 0);
}
- if (!d->flush_done)
- bcache_device_unlink(d);
+ bcache_device_unlink(d);
d->c->devices[d->id] = NULL;
closure_put(&d->c->caching);
@@ -739,14 +733,18 @@ static void bcache_device_free(struct bcache_device *d)
del_gendisk(d->disk);
if (d->disk && d->disk->queue)
blk_cleanup_queue(d->disk->queue);
- if (d->disk)
+ if (d->disk) {
+ ida_simple_remove(&bcache_minor, d->disk->first_minor);
put_disk(d->disk);
+ }
bio_split_pool_free(&d->bio_split_hook);
- if (d->unaligned_bvec)
- mempool_destroy(d->unaligned_bvec);
if (d->bio_split)
bioset_free(d->bio_split);
+ if (is_vmalloc_addr(d->full_dirty_stripes))
+ vfree(d->full_dirty_stripes);
+ else
+ kfree(d->full_dirty_stripes);
if (is_vmalloc_addr(d->stripe_sectors_dirty))
vfree(d->stripe_sectors_dirty);
else
@@ -760,15 +758,19 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
{
struct request_queue *q;
size_t n;
+ int minor;
- if (!d->stripe_size_bits)
- d->stripe_size_bits = 31;
+ if (!d->stripe_size)
+ d->stripe_size = 1 << 31;
- d->nr_stripes = round_up(sectors, 1 << d->stripe_size_bits) >>
- d->stripe_size_bits;
+ d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
- if (!d->nr_stripes || d->nr_stripes > SIZE_MAX / sizeof(atomic_t))
+ if (!d->nr_stripes ||
+ d->nr_stripes > INT_MAX ||
+ d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) {
+ pr_err("nr_stripes too large");
return -ENOMEM;
+ }
n = d->nr_stripes * sizeof(atomic_t);
d->stripe_sectors_dirty = n < PAGE_SIZE << 6
@@ -777,22 +779,36 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
if (!d->stripe_sectors_dirty)
return -ENOMEM;
+ n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
+ d->full_dirty_stripes = n < PAGE_SIZE << 6
+ ? kzalloc(n, GFP_KERNEL)
+ : vzalloc(n);
+ if (!d->full_dirty_stripes)
+ return -ENOMEM;
+
+ minor = ida_simple_get(&bcache_minor, 0, MINORMASK + 1, GFP_KERNEL);
+ if (minor < 0)
+ return minor;
+
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
- !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
- sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
bio_split_pool_init(&d->bio_split_hook) ||
- !(d->disk = alloc_disk(1)) ||
- !(q = blk_alloc_queue(GFP_KERNEL)))
+ !(d->disk = alloc_disk(1))) {
+ ida_simple_remove(&bcache_minor, minor);
return -ENOMEM;
+ }
set_capacity(d->disk, sectors);
- snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor);
+ snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", minor);
d->disk->major = bcache_major;
- d->disk->first_minor = bcache_minor++;
+ d->disk->first_minor = minor;
d->disk->fops = &bcache_ops;
d->disk->private_data = d;
+ q = blk_alloc_queue(GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
blk_queue_make_request(q, NULL);
d->disk->queue = q;
q->queuedata = d;
@@ -874,7 +890,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
struct closure cl;
closure_init_stack(&cl);
- BUG_ON(!atomic_read(&dc->disk.detaching));
+ BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
BUG_ON(atomic_read(&dc->count));
mutex_lock(&bch_register_lock);
@@ -888,6 +904,8 @@ static void cached_dev_detach_finish(struct work_struct *w)
bcache_device_detach(&dc->disk);
list_move(&dc->list, &uncached_devices);
+ clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
+
mutex_unlock(&bch_register_lock);
pr_info("Caching disabled for %s", bdevname(dc->bdev, buf));
@@ -900,10 +918,10 @@ void bch_cached_dev_detach(struct cached_dev *dc)
{
lockdep_assert_held(&bch_register_lock);
- if (atomic_read(&dc->disk.closing))
+ if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
return;
- if (atomic_xchg(&dc->disk.detaching, 1))
+ if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
return;
/*
@@ -1030,6 +1048,7 @@ static void cached_dev_free(struct closure *cl)
struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
cancel_delayed_work_sync(&dc->writeback_rate_update);
+ kthread_stop(dc->writeback_thread);
mutex_lock(&bch_register_lock);
@@ -1058,11 +1077,7 @@ static void cached_dev_flush(struct closure *cl)
struct bcache_device *d = &dc->disk;
mutex_lock(&bch_register_lock);
- d->flush_done = 1;
-
- if (d->c)
- bcache_device_unlink(d);
-
+ bcache_device_unlink(d);
mutex_unlock(&bch_register_lock);
bch_cache_accounting_destroy(&dc->accounting);
@@ -1088,7 +1103,6 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
spin_lock_init(&dc->io_lock);
bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
- dc->sequential_merge = true;
dc->sequential_cutoff = 4 << 20;
for (io = dc->io; io < dc->io + RECENT_IO; io++) {
@@ -1260,7 +1274,8 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
{
va_list args;
- if (test_bit(CACHE_SET_STOPPING, &c->flags))
+ if (c->on_error != ON_ERROR_PANIC &&
+ test_bit(CACHE_SET_STOPPING, &c->flags))
return false;
/* XXX: we can be called from atomic context
@@ -1275,6 +1290,9 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
printk(", disabling caching\n");
+ if (c->on_error == ON_ERROR_PANIC)
+ panic("panic forced after error\n");
+
bch_cache_set_unregister(c);
return true;
}
@@ -1339,6 +1357,9 @@ static void cache_set_flush(struct closure *cl)
kobject_put(&c->internal);
kobject_del(&c->kobj);
+ if (c->gc_thread)
+ kthread_stop(c->gc_thread);
+
if (!IS_ERR_OR_NULL(c->root))
list_add(&c->root->list, &c->btree_cache);
@@ -1433,12 +1454,19 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c->sort_crit_factor = int_sqrt(c->btree_pages);
- mutex_init(&c->bucket_lock);
- mutex_init(&c->sort_lock);
- spin_lock_init(&c->sort_time_lock);
closure_init_unlocked(&c->sb_write);
+ mutex_init(&c->bucket_lock);
+ init_waitqueue_head(&c->try_wait);
+ init_waitqueue_head(&c->bucket_wait);
closure_init_unlocked(&c->uuid_write);
- spin_lock_init(&c->btree_read_time_lock);
+ mutex_init(&c->sort_lock);
+
+ spin_lock_init(&c->sort_time.lock);
+ spin_lock_init(&c->btree_gc_time.lock);
+ spin_lock_init(&c->btree_split_time.lock);
+ spin_lock_init(&c->btree_read_time.lock);
+ spin_lock_init(&c->try_harder_time.lock);
+
bch_moving_init_cache_set(c);
INIT_LIST_HEAD(&c->list);
@@ -1483,11 +1511,10 @@ static void run_cache_set(struct cache_set *c)
const char *err = "cannot allocate memory";
struct cached_dev *dc, *t;
struct cache *ca;
+ struct closure cl;
unsigned i;
- struct btree_op op;
- bch_btree_op_init_stack(&op);
- op.lock = SHRT_MAX;
+ closure_init_stack(&cl);
for_each_cache(ca, c, i)
c->nbuckets += ca->sb.nbuckets;
@@ -1498,7 +1525,7 @@ static void run_cache_set(struct cache_set *c)
struct jset *j;
err = "cannot allocate memory for journal";
- if (bch_journal_read(c, &journal, &op))
+ if (bch_journal_read(c, &journal))
goto err;
pr_debug("btree_journal_read() done");
@@ -1522,23 +1549,23 @@ static void run_cache_set(struct cache_set *c)
k = &j->btree_root;
err = "bad btree root";
- if (__bch_ptr_invalid(c, j->btree_level + 1, k))
+ if (bch_btree_ptr_invalid(c, k))
goto err;
err = "error reading btree root";
- c->root = bch_btree_node_get(c, k, j->btree_level, &op);
+ c->root = bch_btree_node_get(c, k, j->btree_level, true);
if (IS_ERR_OR_NULL(c->root))
goto err;
list_del_init(&c->root->list);
rw_unlock(true, c->root);
- err = uuid_read(c, j, &op.cl);
+ err = uuid_read(c, j, &cl);
if (err)
goto err;
err = "error in recovery";
- if (bch_btree_check(c, &op))
+ if (bch_btree_check(c))
goto err;
bch_journal_mark(c, &journal);
@@ -1570,11 +1597,9 @@ static void run_cache_set(struct cache_set *c)
if (j->version < BCACHE_JSET_VERSION_UUID)
__uuid_write(c);
- bch_journal_replay(c, &journal, &op);
+ bch_journal_replay(c, &journal);
} else {
pr_notice("invalidating existing data");
- /* Don't want invalidate_buckets() to queue a gc yet */
- closure_lock(&c->gc, NULL);
for_each_cache(ca, c, i) {
unsigned j;
@@ -1600,15 +1625,15 @@ static void run_cache_set(struct cache_set *c)
err = "cannot allocate new UUID bucket";
if (__uuid_write(c))
- goto err_unlock_gc;
+ goto err;
err = "cannot allocate new btree root";
- c->root = bch_btree_node_alloc(c, 0, &op.cl);
+ c->root = bch_btree_node_alloc(c, 0, true);
if (IS_ERR_OR_NULL(c->root))
- goto err_unlock_gc;
+ goto err;
bkey_copy_key(&c->root->key, &MAX_KEY);
- bch_btree_node_write(c->root, &op.cl);
+ bch_btree_node_write(c->root, &cl);
bch_btree_set_root(c->root);
rw_unlock(true, c->root);
@@ -1621,14 +1646,14 @@ static void run_cache_set(struct cache_set *c)
SET_CACHE_SYNC(&c->sb, true);
bch_journal_next(&c->journal);
- bch_journal_meta(c, &op.cl);
-
- /* Unlock */
- closure_set_stopped(&c->gc.cl);
- closure_put(&c->gc.cl);
+ bch_journal_meta(c, &cl);
}
- closure_sync(&op.cl);
+ err = "error starting gc thread";
+ if (bch_gc_thread_start(c))
+ goto err;
+
+ closure_sync(&cl);
c->sb.last_mount = get_seconds();
bcache_write_super(c);
@@ -1638,13 +1663,10 @@ static void run_cache_set(struct cache_set *c)
flash_devs_run(c);
return;
-err_unlock_gc:
- closure_set_stopped(&c->gc.cl);
- closure_put(&c->gc.cl);
err:
- closure_sync(&op.cl);
+ closure_sync(&cl);
/* XXX: test this, it's broken */
- bch_cache_set_error(c, err);
+ bch_cache_set_error(c, "%s", err);
}
static bool can_attach_cache(struct cache *ca, struct cache_set *c)
@@ -1725,8 +1747,6 @@ void bch_cache_release(struct kobject *kobj)
if (ca->set)
ca->set->cache[ca->sb.nr_this_dev] = NULL;
- bch_cache_allocator_exit(ca);
-
bio_split_pool_free(&ca->bio_split_hook);
free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
@@ -1758,8 +1778,6 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
__module_get(THIS_MODULE);
kobject_init(&ca->kobj, &bch_cache_ktype);
- INIT_LIST_HEAD(&ca->discards);
-
bio_init(&ca->journal.bio);
ca->journal.bio.bi_max_vecs = 8;
ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
@@ -2006,7 +2024,6 @@ static struct notifier_block reboot = {
static void bcache_exit(void)
{
bch_debug_exit();
- bch_writeback_exit();
bch_request_exit();
bch_btree_exit();
if (bcache_kobj)
@@ -2039,7 +2056,6 @@ static int __init bcache_init(void)
sysfs_create_files(bcache_kobj, files) ||
bch_btree_init() ||
bch_request_init() ||
- bch_writeback_init() ||
bch_debug_init(bcache_kobj))
goto err;
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 924dcfdae111..80d4c2bee18a 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -21,6 +21,12 @@ static const char * const cache_replacement_policies[] = {
NULL
};
+static const char * const error_actions[] = {
+ "unregister",
+ "panic",
+ NULL
+};
+
write_attribute(attach);
write_attribute(detach);
write_attribute(unregister);
@@ -66,7 +72,6 @@ rw_attribute(congested_read_threshold_us);
rw_attribute(congested_write_threshold_us);
rw_attribute(sequential_cutoff);
-rw_attribute(sequential_merge);
rw_attribute(data_csum);
rw_attribute(cache_mode);
rw_attribute(writeback_metadata);
@@ -90,11 +95,14 @@ rw_attribute(discard);
rw_attribute(running);
rw_attribute(label);
rw_attribute(readahead);
+rw_attribute(errors);
rw_attribute(io_error_limit);
rw_attribute(io_error_halflife);
rw_attribute(verify);
+rw_attribute(bypass_torture_test);
rw_attribute(key_merging_disabled);
rw_attribute(gc_always_rewrite);
+rw_attribute(expensive_debug_checks);
rw_attribute(freelist_percent);
rw_attribute(cache_replacement_policy);
rw_attribute(btree_shrinker_disabled);
@@ -116,6 +124,7 @@ SHOW(__bch_cached_dev)
sysfs_printf(data_csum, "%i", dc->disk.data_csum);
var_printf(verify, "%i");
+ var_printf(bypass_torture_test, "%i");
var_printf(writeback_metadata, "%i");
var_printf(writeback_running, "%i");
var_print(writeback_delay);
@@ -150,10 +159,9 @@ SHOW(__bch_cached_dev)
sysfs_hprint(dirty_data,
bcache_dev_sectors_dirty(&dc->disk) << 9);
- sysfs_hprint(stripe_size, (1 << dc->disk.stripe_size_bits) << 9);
+ sysfs_hprint(stripe_size, dc->disk.stripe_size << 9);
var_printf(partial_stripes_expensive, "%u");
- var_printf(sequential_merge, "%i");
var_hprint(sequential_cutoff);
var_hprint(readahead);
@@ -185,6 +193,7 @@ STORE(__cached_dev)
sysfs_strtoul(data_csum, dc->disk.data_csum);
d_strtoul(verify);
+ d_strtoul(bypass_torture_test);
d_strtoul(writeback_metadata);
d_strtoul(writeback_running);
d_strtoul(writeback_delay);
@@ -199,7 +208,6 @@ STORE(__cached_dev)
dc->writeback_rate_p_term_inverse, 1, INT_MAX);
d_strtoul(writeback_rate_d_smooth);
- d_strtoul(sequential_merge);
d_strtoi_h(sequential_cutoff);
d_strtoi_h(readahead);
@@ -311,7 +319,6 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_stripe_size,
&sysfs_partial_stripes_expensive,
&sysfs_sequential_cutoff,
- &sysfs_sequential_merge,
&sysfs_clear_stats,
&sysfs_running,
&sysfs_state,
@@ -319,6 +326,7 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_readahead,
#ifdef CONFIG_BCACHE_DEBUG
&sysfs_verify,
+ &sysfs_bypass_torture_test,
#endif
NULL
};
@@ -366,7 +374,7 @@ STORE(__bch_flash_dev)
}
if (attr == &sysfs_unregister) {
- atomic_set(&d->detaching, 1);
+ set_bit(BCACHE_DEV_DETACHING, &d->flags);
bcache_device_stop(d);
}
@@ -481,7 +489,6 @@ lock_root:
sysfs_print(btree_used_percent, btree_used(c));
sysfs_print(btree_nodes, c->gc_stats.nodes);
- sysfs_hprint(dirty_data, c->gc_stats.dirty);
sysfs_hprint(average_key_size, average_key_size(c));
sysfs_print(cache_read_races,
@@ -492,6 +499,10 @@ lock_root:
sysfs_print(writeback_keys_failed,
atomic_long_read(&c->writeback_keys_failed));
+ if (attr == &sysfs_errors)
+ return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
+ c->on_error);
+
/* See count_io_errors for why 88 */
sysfs_print(io_error_halflife, c->error_decay * 88);
sysfs_print(io_error_limit, c->error_limit >> IO_ERROR_SHIFT);
@@ -506,6 +517,8 @@ lock_root:
sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
sysfs_printf(verify, "%i", c->verify);
sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
+ sysfs_printf(expensive_debug_checks,
+ "%i", c->expensive_debug_checks);
sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
@@ -555,7 +568,7 @@ STORE(__bch_cache_set)
}
if (attr == &sysfs_trigger_gc)
- bch_queue_gc(c);
+ wake_up_gc(c);
if (attr == &sysfs_prune_cache) {
struct shrink_control sc;
@@ -569,6 +582,15 @@ STORE(__bch_cache_set)
sysfs_strtoul(congested_write_threshold_us,
c->congested_write_threshold_us);
+ if (attr == &sysfs_errors) {
+ ssize_t v = bch_read_string_list(buf, error_actions);
+
+ if (v < 0)
+ return v;
+
+ c->on_error = v;
+ }
+
if (attr == &sysfs_io_error_limit)
c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT;
@@ -579,6 +601,7 @@ STORE(__bch_cache_set)
sysfs_strtoul(journal_delay_ms, c->journal_delay_ms);
sysfs_strtoul(verify, c->verify);
sysfs_strtoul(key_merging_disabled, c->key_merging_disabled);
+ sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks);
sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite);
sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled);
sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled);
@@ -618,8 +641,8 @@ static struct attribute *bch_cache_set_files[] = {
&sysfs_cache_available_percent,
&sysfs_average_key_size,
- &sysfs_dirty_data,
+ &sysfs_errors,
&sysfs_io_error_limit,
&sysfs_io_error_halflife,
&sysfs_congested,
@@ -653,6 +676,7 @@ static struct attribute *bch_cache_set_internal_files[] = {
#ifdef CONFIG_BCACHE_DEBUG
&sysfs_verify,
&sysfs_key_merging_disabled,
+ &sysfs_expensive_debug_checks,
#endif
&sysfs_gc_always_rewrite,
&sysfs_btree_shrinker_disabled,
diff --git a/drivers/md/bcache/trace.c b/drivers/md/bcache/trace.c
index f7b6c197f90f..adbc3df17a80 100644
--- a/drivers/md/bcache/trace.c
+++ b/drivers/md/bcache/trace.c
@@ -1,6 +1,5 @@
#include "bcache.h"
#include "btree.h"
-#include "request.h"
#include <linux/blktrace_api.h>
#include <linux/module.h>
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 420dad545c7d..c57621e49dc0 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -168,10 +168,14 @@ int bch_parse_uuid(const char *s, char *uuid)
void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
{
- uint64_t now = local_clock();
- uint64_t duration = time_after64(now, start_time)
+ uint64_t now, duration, last;
+
+ spin_lock(&stats->lock);
+
+ now = local_clock();
+ duration = time_after64(now, start_time)
? now - start_time : 0;
- uint64_t last = time_after64(now, stats->last)
+ last = time_after64(now, stats->last)
? now - stats->last : 0;
stats->max_duration = max(stats->max_duration, duration);
@@ -188,6 +192,8 @@ void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
}
stats->last = now ?: 1;
+
+ spin_unlock(&stats->lock);
}
/**
@@ -212,10 +218,10 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
void bch_bio_map(struct bio *bio, void *base)
{
- size_t size = bio->bi_size;
+ size_t size = bio->bi_iter.bi_size;
struct bio_vec *bv = bio->bi_io_vec;
- BUG_ON(!bio->bi_size);
+ BUG_ON(!bio->bi_iter.bi_size);
BUG_ON(bio->bi_vcnt);
bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0;
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index ea345c6896f4..362c4b3f8b4a 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -15,28 +15,18 @@
struct closure;
-#ifdef CONFIG_BCACHE_EDEBUG
+#ifdef CONFIG_BCACHE_DEBUG
#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
#define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i)
-#else /* EDEBUG */
+#else /* DEBUG */
#define atomic_dec_bug(v) atomic_dec(v)
#define atomic_inc_bug(v, i) atomic_inc(v)
#endif
-#define BITMASK(name, type, field, offset, size) \
-static inline uint64_t name(const type *k) \
-{ return (k->field >> offset) & ~(((uint64_t) ~0) << size); } \
- \
-static inline void SET_##name(type *k, uint64_t v) \
-{ \
- k->field &= ~(~((uint64_t) ~0 << size) << offset); \
- k->field |= v << offset; \
-}
-
#define DECLARE_HEAP(type, name) \
struct { \
size_t size, used; \
@@ -388,6 +378,7 @@ ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[
ssize_t bch_read_string_list(const char *buf, const char * const list[]);
struct time_stats {
+ spinlock_t lock;
/*
* all fields are in nanoseconds, averages are ewmas stored left shifted
* by 8
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index ba3ee48320f2..04657e93f4fd 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -11,18 +11,11 @@
#include "debug.h"
#include "writeback.h"
+#include <linux/delay.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
#include <trace/events/bcache.h>
-static struct workqueue_struct *dirty_wq;
-
-static void read_dirty(struct closure *);
-
-struct dirty_io {
- struct closure cl;
- struct cached_dev *dc;
- struct bio bio;
-};
-
/* Rate limiting */
static void __update_writeback_rate(struct cached_dev *dc)
@@ -72,9 +65,6 @@ out:
dc->writeback_rate_derivative = derivative;
dc->writeback_rate_change = change;
dc->writeback_rate_target = target;
-
- schedule_delayed_work(&dc->writeback_rate_update,
- dc->writeback_rate_update_seconds * HZ);
}
static void update_writeback_rate(struct work_struct *work)
@@ -90,13 +80,16 @@ static void update_writeback_rate(struct work_struct *work)
__update_writeback_rate(dc);
up_read(&dc->writeback_lock);
+
+ schedule_delayed_work(&dc->writeback_rate_update,
+ dc->writeback_rate_update_seconds * HZ);
}
static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
{
uint64_t ret;
- if (atomic_read(&dc->disk.detaching) ||
+ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
!dc->writeback_percent)
return 0;
@@ -105,37 +98,11 @@ static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
return min_t(uint64_t, ret, HZ);
}
-/* Background writeback */
-
-static bool dirty_pred(struct keybuf *buf, struct bkey *k)
-{
- return KEY_DIRTY(k);
-}
-
-static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k)
-{
- uint64_t stripe;
- unsigned nr_sectors = KEY_SIZE(k);
- struct cached_dev *dc = container_of(buf, struct cached_dev,
- writeback_keys);
- unsigned stripe_size = 1 << dc->disk.stripe_size_bits;
-
- if (!KEY_DIRTY(k))
- return false;
-
- stripe = KEY_START(k) >> dc->disk.stripe_size_bits;
- while (1) {
- if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) !=
- stripe_size)
- return false;
-
- if (nr_sectors <= stripe_size)
- return true;
-
- nr_sectors -= stripe_size;
- stripe++;
- }
-}
+struct dirty_io {
+ struct closure cl;
+ struct cached_dev *dc;
+ struct bio bio;
+};
static void dirty_init(struct keybuf_key *w)
{
@@ -146,138 +113,13 @@ static void dirty_init(struct keybuf_key *w)
if (!io->dc->writeback_percent)
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
- bio->bi_size = KEY_SIZE(&w->key) << 9;
+ bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
bio->bi_private = w;
bio->bi_io_vec = bio->bi_inline_vecs;
bch_bio_map(bio, NULL);
}
-static void refill_dirty(struct closure *cl)
-{
- struct cached_dev *dc = container_of(cl, struct cached_dev,
- writeback.cl);
- struct keybuf *buf = &dc->writeback_keys;
- bool searched_from_start = false;
- struct bkey end = MAX_KEY;
- SET_KEY_INODE(&end, dc->disk.id);
-
- if (!atomic_read(&dc->disk.detaching) &&
- !dc->writeback_running)
- closure_return(cl);
-
- down_write(&dc->writeback_lock);
-
- if (!atomic_read(&dc->has_dirty)) {
- SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
- bch_write_bdev_super(dc, NULL);
-
- up_write(&dc->writeback_lock);
- closure_return(cl);
- }
-
- if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
- buf->last_scanned = KEY(dc->disk.id, 0, 0);
- searched_from_start = true;
- }
-
- if (dc->partial_stripes_expensive) {
- uint64_t i;
-
- for (i = 0; i < dc->disk.nr_stripes; i++)
- if (atomic_read(dc->disk.stripe_sectors_dirty + i) ==
- 1 << dc->disk.stripe_size_bits)
- goto full_stripes;
-
- goto normal_refill;
-full_stripes:
- bch_refill_keybuf(dc->disk.c, buf, &end,
- dirty_full_stripe_pred);
- } else {
-normal_refill:
- bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
- }
-
- if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) {
- /* Searched the entire btree - delay awhile */
-
- if (RB_EMPTY_ROOT(&buf->keys)) {
- atomic_set(&dc->has_dirty, 0);
- cached_dev_put(dc);
- }
-
- if (!atomic_read(&dc->disk.detaching))
- closure_delay(&dc->writeback, dc->writeback_delay * HZ);
- }
-
- up_write(&dc->writeback_lock);
-
- bch_ratelimit_reset(&dc->writeback_rate);
-
- /* Punt to workqueue only so we don't recurse and blow the stack */
- continue_at(cl, read_dirty, dirty_wq);
-}
-
-void bch_writeback_queue(struct cached_dev *dc)
-{
- if (closure_trylock(&dc->writeback.cl, &dc->disk.cl)) {
- if (!atomic_read(&dc->disk.detaching))
- closure_delay(&dc->writeback, dc->writeback_delay * HZ);
-
- continue_at(&dc->writeback.cl, refill_dirty, dirty_wq);
- }
-}
-
-void bch_writeback_add(struct cached_dev *dc)
-{
- if (!atomic_read(&dc->has_dirty) &&
- !atomic_xchg(&dc->has_dirty, 1)) {
- atomic_inc(&dc->count);
-
- if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
- SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
- /* XXX: should do this synchronously */
- bch_write_bdev_super(dc, NULL);
- }
-
- bch_writeback_queue(dc);
-
- if (dc->writeback_percent)
- schedule_delayed_work(&dc->writeback_rate_update,
- dc->writeback_rate_update_seconds * HZ);
- }
-}
-
-void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
- uint64_t offset, int nr_sectors)
-{
- struct bcache_device *d = c->devices[inode];
- unsigned stripe_size, stripe_offset;
- uint64_t stripe;
-
- if (!d)
- return;
-
- stripe_size = 1 << d->stripe_size_bits;
- stripe = offset >> d->stripe_size_bits;
- stripe_offset = offset & (stripe_size - 1);
-
- while (nr_sectors) {
- int s = min_t(unsigned, abs(nr_sectors),
- stripe_size - stripe_offset);
-
- if (nr_sectors < 0)
- s = -s;
-
- atomic_add(s, d->stripe_sectors_dirty + stripe);
- nr_sectors -= s;
- stripe_offset = 0;
- stripe++;
- }
-}
-
-/* Background writeback - IO loop */
-
static void dirty_io_destructor(struct closure *cl)
{
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
@@ -297,26 +139,25 @@ static void write_dirty_finish(struct closure *cl)
/* This is kind of a dumb way of signalling errors. */
if (KEY_DIRTY(&w->key)) {
+ int ret;
unsigned i;
- struct btree_op op;
- bch_btree_op_init_stack(&op);
+ struct keylist keys;
- op.type = BTREE_REPLACE;
- bkey_copy(&op.replace, &w->key);
+ bch_keylist_init(&keys);
- SET_KEY_DIRTY(&w->key, false);
- bch_keylist_add(&op.keys, &w->key);
+ bkey_copy(keys.top, &w->key);
+ SET_KEY_DIRTY(keys.top, false);
+ bch_keylist_push(&keys);
for (i = 0; i < KEY_PTRS(&w->key); i++)
atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
- bch_btree_insert(&op, dc->disk.c);
- closure_sync(&op.cl);
+ ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
- if (op.insert_collision)
+ if (ret)
trace_bcache_writeback_collision(&w->key);
- atomic_long_inc(op.insert_collision
+ atomic_long_inc(ret
? &dc->disk.c->writeback_keys_failed
: &dc->disk.c->writeback_keys_done);
}
@@ -345,7 +186,7 @@ static void write_dirty(struct closure *cl)
dirty_init(w);
io->bio.bi_rw = WRITE;
- io->bio.bi_sector = KEY_START(&w->key);
+ io->bio.bi_iter.bi_sector = KEY_START(&w->key);
io->bio.bi_bdev = io->dc->bdev;
io->bio.bi_end_io = dirty_endio;
@@ -374,30 +215,33 @@ static void read_dirty_submit(struct closure *cl)
continue_at(cl, write_dirty, system_wq);
}
-static void read_dirty(struct closure *cl)
+static void read_dirty(struct cached_dev *dc)
{
- struct cached_dev *dc = container_of(cl, struct cached_dev,
- writeback.cl);
- unsigned delay = writeback_delay(dc, 0);
+ unsigned delay = 0;
struct keybuf_key *w;
struct dirty_io *io;
+ struct closure cl;
+
+ closure_init_stack(&cl);
/*
* XXX: if we error, background writeback just spins. Should use some
* mempools.
*/
- while (1) {
+ while (!kthread_should_stop()) {
+ try_to_freeze();
+
w = bch_keybuf_next(&dc->writeback_keys);
if (!w)
break;
BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
- if (delay > 0 &&
- (KEY_START(&w->key) != dc->last_read ||
- jiffies_to_msecs(delay) > 50))
- delay = schedule_timeout_uninterruptible(delay);
+ if (KEY_START(&w->key) != dc->last_read ||
+ jiffies_to_msecs(delay) > 50)
+ while (!kthread_should_stop() && delay)
+ delay = schedule_timeout_interruptible(delay);
dc->last_read = KEY_OFFSET(&w->key);
@@ -411,7 +255,7 @@ static void read_dirty(struct closure *cl)
io->dc = dc;
dirty_init(w);
- io->bio.bi_sector = PTR_OFFSET(&w->key, 0);
+ io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
&w->key, 0)->bdev;
io->bio.bi_rw = READ;
@@ -423,7 +267,7 @@ static void read_dirty(struct closure *cl)
trace_bcache_writeback(&w->key);
down(&dc->in_flight);
- closure_call(&io->cl, read_dirty_submit, NULL, cl);
+ closure_call(&io->cl, read_dirty_submit, NULL, &cl);
delay = writeback_delay(dc, KEY_SIZE(&w->key));
}
@@ -439,52 +283,205 @@ err:
* Wait for outstanding writeback IOs to finish (and keybuf slots to be
* freed) before refilling again
*/
- continue_at(cl, refill_dirty, dirty_wq);
+ closure_sync(&cl);
}
-/* Init */
+/* Scan for dirty data */
+
+void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
+ uint64_t offset, int nr_sectors)
+{
+ struct bcache_device *d = c->devices[inode];
+ unsigned stripe_offset, stripe, sectors_dirty;
+
+ if (!d)
+ return;
+
+ stripe = offset_to_stripe(d, offset);
+ stripe_offset = offset & (d->stripe_size - 1);
+
+ while (nr_sectors) {
+ int s = min_t(unsigned, abs(nr_sectors),
+ d->stripe_size - stripe_offset);
+
+ if (nr_sectors < 0)
+ s = -s;
+
+ if (stripe >= d->nr_stripes)
+ return;
+
+ sectors_dirty = atomic_add_return(s,
+ d->stripe_sectors_dirty + stripe);
+ if (sectors_dirty == d->stripe_size)
+ set_bit(stripe, d->full_dirty_stripes);
+ else
+ clear_bit(stripe, d->full_dirty_stripes);
+
+ nr_sectors -= s;
+ stripe_offset = 0;
+ stripe++;
+ }
+}
-static int bch_btree_sectors_dirty_init(struct btree *b, struct btree_op *op,
- struct cached_dev *dc)
+static bool dirty_pred(struct keybuf *buf, struct bkey *k)
{
- struct bkey *k;
- struct btree_iter iter;
-
- bch_btree_iter_init(b, &iter, &KEY(dc->disk.id, 0, 0));
- while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad)))
- if (!b->level) {
- if (KEY_INODE(k) > dc->disk.id)
- break;
-
- if (KEY_DIRTY(k))
- bcache_dev_sectors_dirty_add(b->c, dc->disk.id,
- KEY_START(k),
- KEY_SIZE(k));
- } else {
- btree(sectors_dirty_init, k, b, op, dc);
- if (KEY_INODE(k) > dc->disk.id)
- break;
-
- cond_resched();
+ return KEY_DIRTY(k);
+}
+
+static void refill_full_stripes(struct cached_dev *dc)
+{
+ struct keybuf *buf = &dc->writeback_keys;
+ unsigned start_stripe, stripe, next_stripe;
+ bool wrapped = false;
+
+ stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
+
+ if (stripe >= dc->disk.nr_stripes)
+ stripe = 0;
+
+ start_stripe = stripe;
+
+ while (1) {
+ stripe = find_next_bit(dc->disk.full_dirty_stripes,
+ dc->disk.nr_stripes, stripe);
+
+ if (stripe == dc->disk.nr_stripes)
+ goto next;
+
+ next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
+ dc->disk.nr_stripes, stripe);
+
+ buf->last_scanned = KEY(dc->disk.id,
+ stripe * dc->disk.stripe_size, 0);
+
+ bch_refill_keybuf(dc->disk.c, buf,
+ &KEY(dc->disk.id,
+ next_stripe * dc->disk.stripe_size, 0),
+ dirty_pred);
+
+ if (array_freelist_empty(&buf->freelist))
+ return;
+
+ stripe = next_stripe;
+next:
+ if (wrapped && stripe > start_stripe)
+ return;
+
+ if (stripe == dc->disk.nr_stripes) {
+ stripe = 0;
+ wrapped = true;
}
+ }
+}
+
+static bool refill_dirty(struct cached_dev *dc)
+{
+ struct keybuf *buf = &dc->writeback_keys;
+ struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
+ bool searched_from_start = false;
+
+ if (dc->partial_stripes_expensive) {
+ refill_full_stripes(dc);
+ if (array_freelist_empty(&buf->freelist))
+ return false;
+ }
+
+ if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
+ buf->last_scanned = KEY(dc->disk.id, 0, 0);
+ searched_from_start = true;
+ }
+
+ bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
+
+ return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
+}
+
+static int bch_writeback_thread(void *arg)
+{
+ struct cached_dev *dc = arg;
+ bool searched_full_index;
+
+ while (!kthread_should_stop()) {
+ down_write(&dc->writeback_lock);
+ if (!atomic_read(&dc->has_dirty) ||
+ (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
+ !dc->writeback_running)) {
+ up_write(&dc->writeback_lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (kthread_should_stop())
+ return 0;
+
+ try_to_freeze();
+ schedule();
+ continue;
+ }
+
+ searched_full_index = refill_dirty(dc);
+
+ if (searched_full_index &&
+ RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
+ atomic_set(&dc->has_dirty, 0);
+ cached_dev_put(dc);
+ SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
+ bch_write_bdev_super(dc, NULL);
+ }
+
+ up_write(&dc->writeback_lock);
+
+ bch_ratelimit_reset(&dc->writeback_rate);
+ read_dirty(dc);
+
+ if (searched_full_index) {
+ unsigned delay = dc->writeback_delay * HZ;
+
+ while (delay &&
+ !kthread_should_stop() &&
+ !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
+ delay = schedule_timeout_interruptible(delay);
+ }
+ }
return 0;
}
+/* Init */
+
+struct sectors_dirty_init {
+ struct btree_op op;
+ unsigned inode;
+};
+
+static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
+ struct bkey *k)
+{
+ struct sectors_dirty_init *op = container_of(_op,
+ struct sectors_dirty_init, op);
+ if (KEY_INODE(k) > op->inode)
+ return MAP_DONE;
+
+ if (KEY_DIRTY(k))
+ bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
+ KEY_START(k), KEY_SIZE(k));
+
+ return MAP_CONTINUE;
+}
+
void bch_sectors_dirty_init(struct cached_dev *dc)
{
- struct btree_op op;
+ struct sectors_dirty_init op;
+
+ bch_btree_op_init(&op.op, -1);
+ op.inode = dc->disk.id;
- bch_btree_op_init_stack(&op);
- btree_root(sectors_dirty_init, dc->disk.c, &op, dc);
+ bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
+ sectors_dirty_init_fn, 0);
}
-void bch_cached_dev_writeback_init(struct cached_dev *dc)
+int bch_cached_dev_writeback_init(struct cached_dev *dc)
{
sema_init(&dc->in_flight, 64);
- closure_init_unlocked(&dc->writeback);
init_rwsem(&dc->writeback_lock);
-
bch_keybuf_init(&dc->writeback_keys);
dc->writeback_metadata = true;
@@ -498,22 +495,16 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
dc->writeback_rate_p_term_inverse = 64;
dc->writeback_rate_d_smooth = 8;
+ dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
+ "bcache_writeback");
+ if (IS_ERR(dc->writeback_thread))
+ return PTR_ERR(dc->writeback_thread);
+
+ set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE);
+
INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
schedule_delayed_work(&dc->writeback_rate_update,
dc->writeback_rate_update_seconds * HZ);
-}
-
-void bch_writeback_exit(void)
-{
- if (dirty_wq)
- destroy_workqueue(dirty_wq);
-}
-
-int __init bch_writeback_init(void)
-{
- dirty_wq = create_workqueue("bcache_writeback");
- if (!dirty_wq)
- return -ENOMEM;
return 0;
}
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index c91f61bb95b6..e2f8598937ac 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -14,20 +14,27 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
return ret;
}
-static inline bool bcache_dev_stripe_dirty(struct bcache_device *d,
+static inline unsigned offset_to_stripe(struct bcache_device *d,
+ uint64_t offset)
+{
+ do_div(offset, d->stripe_size);
+ return offset;
+}
+
+static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
uint64_t offset,
unsigned nr_sectors)
{
- uint64_t stripe = offset >> d->stripe_size_bits;
+ unsigned stripe = offset_to_stripe(&dc->disk, offset);
while (1) {
- if (atomic_read(d->stripe_sectors_dirty + stripe))
+ if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
return true;
- if (nr_sectors <= 1 << d->stripe_size_bits)
+ if (nr_sectors <= dc->disk.stripe_size)
return false;
- nr_sectors -= 1 << d->stripe_size_bits;
+ nr_sectors -= dc->disk.stripe_size;
stripe++;
}
}
@@ -38,12 +45,12 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
unsigned in_use = dc->disk.c->gc_stats.in_use;
if (cache_mode != CACHE_MODE_WRITEBACK ||
- atomic_read(&dc->disk.detaching) ||
+ test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
in_use > CUTOFF_WRITEBACK_SYNC)
return false;
if (dc->partial_stripes_expensive &&
- bcache_dev_stripe_dirty(&dc->disk, bio->bi_sector,
+ bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
bio_sectors(bio)))
return true;
@@ -54,11 +61,30 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
in_use <= CUTOFF_WRITEBACK;
}
+static inline void bch_writeback_queue(struct cached_dev *dc)
+{
+ wake_up_process(dc->writeback_thread);
+}
+
+static inline void bch_writeback_add(struct cached_dev *dc)
+{
+ if (!atomic_read(&dc->has_dirty) &&
+ !atomic_xchg(&dc->has_dirty, 1)) {
+ atomic_inc(&dc->count);
+
+ if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
+ SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
+ /* XXX: should do this synchronously */
+ bch_write_bdev_super(dc, NULL);
+ }
+
+ bch_writeback_queue(dc);
+ }
+}
+
void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
-void bch_writeback_queue(struct cached_dev *);
-void bch_writeback_add(struct cached_dev *);
void bch_sectors_dirty_init(struct cached_dev *dc);
-void bch_cached_dev_writeback_init(struct cached_dev *);
+int bch_cached_dev_writeback_init(struct cached_dev *);
#endif
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index a7fd82133b12..12dc29ba7399 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1654,9 +1654,9 @@ int bitmap_create(struct mddev *mddev)
bitmap->mddev = mddev;
if (mddev->kobj.sd)
- bm = sysfs_get_dirent(mddev->kobj.sd, NULL, "bitmap");
+ bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
if (bm) {
- bitmap->sysfs_can_clear = sysfs_get_dirent(bm, NULL, "can_clear");
+ bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
sysfs_put(bm);
} else
bitmap->sysfs_can_clear = NULL;
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index 3a8cfa2645c7..dd3646111561 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -17,55 +17,24 @@
* original bio state.
*/
-struct dm_bio_vec_details {
-#if PAGE_SIZE < 65536
- __u16 bv_len;
- __u16 bv_offset;
-#else
- unsigned bv_len;
- unsigned bv_offset;
-#endif
-};
-
struct dm_bio_details {
- sector_t bi_sector;
struct block_device *bi_bdev;
- unsigned int bi_size;
- unsigned short bi_idx;
unsigned long bi_flags;
- struct dm_bio_vec_details bi_io_vec[BIO_MAX_PAGES];
+ struct bvec_iter bi_iter;
};
static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
{
- unsigned i;
-
- bd->bi_sector = bio->bi_sector;
bd->bi_bdev = bio->bi_bdev;
- bd->bi_size = bio->bi_size;
- bd->bi_idx = bio->bi_idx;
bd->bi_flags = bio->bi_flags;
-
- for (i = 0; i < bio->bi_vcnt; i++) {
- bd->bi_io_vec[i].bv_len = bio->bi_io_vec[i].bv_len;
- bd->bi_io_vec[i].bv_offset = bio->bi_io_vec[i].bv_offset;
- }
+ bd->bi_iter = bio->bi_iter;
}
static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
{
- unsigned i;
-
- bio->bi_sector = bd->bi_sector;
bio->bi_bdev = bd->bi_bdev;
- bio->bi_size = bd->bi_size;
- bio->bi_idx = bd->bi_idx;
bio->bi_flags = bd->bi_flags;
-
- for (i = 0; i < bio->bi_vcnt; i++) {
- bio->bi_io_vec[i].bv_len = bd->bi_io_vec[i].bv_len;
- bio->bi_io_vec[i].bv_offset = bd->bi_io_vec[i].bv_offset;
- }
+ bio->bi_iter = bd->bi_iter;
}
#endif
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 173cbb20d104..4113b6044b80 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -538,7 +538,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
bio_init(&b->bio);
b->bio.bi_io_vec = b->bio_vec;
b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
- b->bio.bi_sector = block << b->c->sectors_per_block_bits;
+ b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
b->bio.bi_bdev = b->c->bdev;
b->bio.bi_end_io = end_io;
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 1af7255bbffb..15f383211890 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -20,7 +20,13 @@
#define CACHE_SUPERBLOCK_MAGIC 06142003
#define CACHE_SUPERBLOCK_LOCATION 0
-#define CACHE_VERSION 1
+
+/*
+ * defines a range of metadata versions that this module can handle.
+ */
+#define MIN_CACHE_VERSION 1
+#define MAX_CACHE_VERSION 1
+
#define CACHE_METADATA_CACHE_SIZE 64
/*
@@ -113,6 +119,7 @@ struct dm_cache_metadata {
char policy_name[CACHE_POLICY_NAME_SIZE];
unsigned policy_version[CACHE_POLICY_VERSION_SIZE];
size_t policy_hint_size;
+ void *policy_hint_value_buffer;
struct dm_cache_statistics stats;
};
@@ -134,6 +141,18 @@ static void sb_prepare_for_write(struct dm_block_validator *v,
SUPERBLOCK_CSUM_XOR));
}
+static int check_metadata_version(struct cache_disk_superblock *disk_super)
+{
+ uint32_t metadata_version = le32_to_cpu(disk_super->version);
+ if (metadata_version < MIN_CACHE_VERSION || metadata_version > MAX_CACHE_VERSION) {
+ DMERR("Cache metadata version %u found, but only versions between %u and %u supported.",
+ metadata_version, MIN_CACHE_VERSION, MAX_CACHE_VERSION);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int sb_check(struct dm_block_validator *v,
struct dm_block *b,
size_t sb_block_size)
@@ -164,7 +183,7 @@ static int sb_check(struct dm_block_validator *v,
return -EILSEQ;
}
- return 0;
+ return check_metadata_version(disk_super);
}
static struct dm_block_validator sb_validator = {
@@ -198,7 +217,7 @@ static int superblock_lock(struct dm_cache_metadata *cmd,
/*----------------------------------------------------------------*/
-static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
+static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
{
int r;
unsigned i;
@@ -214,10 +233,10 @@ static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
return r;
data_le = dm_block_data(b);
- *result = 1;
+ *result = true;
for (i = 0; i < sb_block_size; i++) {
if (data_le[i] != zero) {
- *result = 0;
+ *result = false;
break;
}
}
@@ -225,7 +244,7 @@ static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
return dm_bm_unlock(b);
}
-static void __setup_mapping_info(struct dm_cache_metadata *cmd)
+static int __setup_mapping_info(struct dm_cache_metadata *cmd)
{
struct dm_btree_value_type vt;
@@ -237,9 +256,30 @@ static void __setup_mapping_info(struct dm_cache_metadata *cmd)
dm_array_info_init(&cmd->info, cmd->tm, &vt);
if (cmd->policy_hint_size) {
- vt.size = sizeof(__le32);
+ if (cmd->policy_hint_size > DM_CACHE_POLICY_MAX_HINT_SIZE ||
+ cmd->policy_hint_size % 4) {
+ DMERR("hint size not divisible by 4 or is larger than %d",
+ (int) DM_CACHE_POLICY_MAX_HINT_SIZE);
+ return -EINVAL;
+ }
+
+ vt.size = cmd->policy_hint_size;
dm_array_info_init(&cmd->hint_info, cmd->tm, &vt);
- }
+
+ cmd->policy_hint_value_buffer = kmalloc(cmd->policy_hint_size, GFP_KERNEL);
+ if (!cmd->policy_hint_value_buffer) {
+ DMERR("unable to allocate hint value buffer");
+ return -ENOMEM;
+ }
+ } else
+ cmd->policy_hint_value_buffer = NULL;
+
+ return 0;
+}
+
+static void __destroy_mapping_info(struct dm_cache_metadata *cmd)
+{
+ kfree(cmd->policy_hint_value_buffer);
}
static int __write_initial_superblock(struct dm_cache_metadata *cmd)
@@ -270,7 +310,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
disk_super->flags = 0;
memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
- disk_super->version = cpu_to_le32(CACHE_VERSION);
+ disk_super->version = cpu_to_le32(MAX_CACHE_VERSION);
memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
disk_super->policy_hint_size = 0;
@@ -312,7 +352,9 @@ static int __format_metadata(struct dm_cache_metadata *cmd)
return r;
}
- __setup_mapping_info(cmd);
+ r = __setup_mapping_info(cmd);
+ if (r < 0)
+ goto bad_mapping_info;
r = dm_array_empty(&cmd->info, &cmd->root);
if (r < 0)
@@ -335,6 +377,8 @@ static int __format_metadata(struct dm_cache_metadata *cmd)
return 0;
bad:
+ __destroy_mapping_info(cmd);
+bad_mapping_info:
dm_tm_destroy(cmd->tm);
dm_sm_destroy(cmd->metadata_sm);
@@ -369,6 +413,12 @@ static int __check_incompat_features(struct cache_disk_superblock *disk_super,
return 0;
}
+static bool using_variable_size_hints(struct cache_disk_superblock *disk_super)
+{
+ unsigned long iflags = le32_to_cpu(disk_super->incompat_flags);
+ return test_bit(DM_CACHE_VARIABLE_HINT_SIZE, &iflags);
+}
+
static int __open_metadata(struct dm_cache_metadata *cmd)
{
int r;
@@ -397,7 +447,18 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
goto bad;
}
- __setup_mapping_info(cmd);
+ /*
+ * We need to set the hint size before calling __setup_mapping_info()
+ */
+ if (using_variable_size_hints(disk_super))
+ cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size);
+ else
+ cmd->policy_hint_size = DM_CACHE_POLICY_DEF_HINT_SIZE;
+
+ r = __setup_mapping_info(cmd);
+ if (r < 0)
+ goto bad;
+
dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
sb_flags = le32_to_cpu(disk_super->flags);
cmd->clean_when_opened = test_bit(CLEAN_SHUTDOWN, &sb_flags);
@@ -411,7 +472,8 @@ bad:
static int __open_or_format_metadata(struct dm_cache_metadata *cmd,
bool format_device)
{
- int r, unformatted;
+ int r;
+ bool unformatted = false;
r = __superblock_all_zeroes(cmd->bm, &unformatted);
if (r)
@@ -484,7 +546,16 @@ static void read_superblock_fields(struct dm_cache_metadata *cmd,
cmd->policy_version[0] = le32_to_cpu(disk_super->policy_version[0]);
cmd->policy_version[1] = le32_to_cpu(disk_super->policy_version[1]);
cmd->policy_version[2] = le32_to_cpu(disk_super->policy_version[2]);
- cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size);
+
+ if (using_variable_size_hints(disk_super))
+ cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size);
+ else {
+ /*
+ * Must establish policy_hint_size because older superblock
+ * wouldn't have it.
+ */
+ cmd->policy_hint_size = DM_CACHE_POLICY_DEF_HINT_SIZE;
+ }
cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits);
cmd->stats.read_misses = le32_to_cpu(disk_super->read_misses);
@@ -582,6 +653,15 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
+ if (cmd->policy_hint_size != DM_CACHE_POLICY_DEF_HINT_SIZE) {
+ unsigned long iflags = 0;
+ set_bit(DM_CACHE_VARIABLE_HINT_SIZE, &iflags);
+ disk_super->incompat_flags = cpu_to_le32(iflags);
+ } else
+ disk_super->incompat_flags = cpu_to_le32(0u);
+
+ disk_super->policy_hint_size = cpu_to_le32(cmd->policy_hint_size);
+
disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits);
@@ -647,6 +727,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
r = __create_persistent_data_objects(cmd, may_format_device);
if (r) {
+ __destroy_mapping_info(cmd);
kfree(cmd);
return ERR_PTR(r);
}
@@ -663,22 +744,88 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
{
__destroy_persistent_data_objects(cmd);
+ __destroy_mapping_info(cmd);
kfree(cmd);
}
+/*
+ * Checks that the given cache block is either unmapped or clean.
+ */
+static int block_unmapped_or_clean(struct dm_cache_metadata *cmd, dm_cblock_t b,
+ bool *result)
+{
+ int r;
+ __le64 value;
+ dm_oblock_t ob;
+ unsigned flags;
+
+ r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value);
+ if (r) {
+ DMERR("block_unmapped_or_clean failed");
+ return r;
+ }
+
+ unpack_value(value, &ob, &flags);
+ *result = !((flags & M_VALID) && (flags & M_DIRTY));
+
+ return 0;
+}
+
+static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
+ dm_cblock_t begin, dm_cblock_t end,
+ bool *result)
+{
+ int r;
+
+ while (begin != end) {
+ r = block_unmapped_or_clean(cmd, begin, result);
+ if (r)
+ return r;
+
+ if (!*result) {
+ DMERR("cache block %llu is dirty",
+ (unsigned long long) from_cblock(begin));
+ return 0;
+ }
+
+ begin = to_cblock(from_cblock(begin) + 1);
+ }
+
+ return 0;
+}
+
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
{
int r;
+ bool clean;
__le64 null_mapping = pack_value(0, 0);
down_write(&cmd->root_lock);
__dm_bless_for_disk(&null_mapping);
+
+ if (from_cblock(new_cache_size) < from_cblock(cmd->cache_blocks)) {
+ r = blocks_are_unmapped_or_clean(cmd, new_cache_size, cmd->cache_blocks, &clean);
+ if (r) {
+ __dm_unbless_for_disk(&null_mapping);
+ goto out;
+ }
+
+ if (!clean) {
+ DMERR("unable to shrink cache due to dirty blocks");
+ r = -EINVAL;
+ __dm_unbless_for_disk(&null_mapping);
+ goto out;
+ }
+ }
+
r = dm_array_resize(&cmd->info, cmd->root, from_cblock(cmd->cache_blocks),
from_cblock(new_cache_size),
&null_mapping, &cmd->root);
if (!r)
cmd->cache_blocks = new_cache_size;
cmd->changed = true;
+
+out:
up_write(&cmd->root_lock);
return r;
@@ -908,7 +1055,6 @@ static int __load_mapping(void *context, uint64_t cblock, void *leaf)
int r = 0;
bool dirty;
__le64 value;
- __le32 hint_value = 0;
dm_oblock_t oblock;
unsigned flags;
struct thunk *thunk = context;
@@ -920,14 +1066,14 @@ static int __load_mapping(void *context, uint64_t cblock, void *leaf)
if (flags & M_VALID) {
if (thunk->hints_valid) {
r = dm_array_get_value(&cmd->hint_info, cmd->hint_root,
- cblock, &hint_value);
+ cblock, cmd->policy_hint_value_buffer);
if (r && r != -ENODATA)
return r;
}
dirty = thunk->respect_dirty_flags ? (flags & M_DIRTY) : true;
r = thunk->fn(thunk->context, oblock, to_cblock(cblock),
- dirty, le32_to_cpu(hint_value), thunk->hints_valid);
+ dirty, cmd->policy_hint_value_buffer, thunk->hints_valid);
}
return r;
@@ -1103,8 +1249,6 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
{
int r;
- __le32 value;
- size_t hint_size;
const char *policy_name = dm_cache_policy_get_name(policy);
const unsigned *policy_version = dm_cache_policy_get_version(policy);
@@ -1113,6 +1257,8 @@ static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po
return -EINVAL;
if (!policy_unchanged(cmd, policy)) {
+ size_t hint_size;
+
strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version));
@@ -1131,11 +1277,11 @@ static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po
if (r)
return r;
- value = cpu_to_le32(0);
+ memset(cmd->policy_hint_value_buffer, 0, hint_size);
__dm_bless_for_disk(&value);
r = dm_array_resize(&cmd->hint_info, cmd->hint_root, 0,
from_cblock(cmd->cache_blocks),
- &value, &cmd->hint_root);
+ cmd->policy_hint_value_buffer, &cmd->hint_root);
if (r)
return r;
}
@@ -1154,27 +1300,27 @@ int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
return r;
}
-static int save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,
- uint32_t hint)
+static int save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock, void *hint)
+ __dm_written_to_disk(hint)
{
int r;
- __le32 value = cpu_to_le32(hint);
- __dm_bless_for_disk(&value);
r = dm_array_set_value(&cmd->hint_info, cmd->hint_root,
- from_cblock(cblock), &value, &cmd->hint_root);
+ from_cblock(cblock), hint, &cmd->hint_root);
cmd->changed = true;
return r;
}
-int dm_cache_save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,
- uint32_t hint)
+int dm_cache_save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock, void *hint)
+ __dm_written_to_disk(hint)
{
int r;
- if (!hints_array_initialized(cmd))
+ if (!hints_array_initialized(cmd)) {
+ __dm_unbless_for_disk(hint);
return 0;
+ }
down_write(&cmd->root_lock);
r = save_hint(cmd, cblock, hint);
@@ -1182,3 +1328,8 @@ int dm_cache_save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,
return r;
}
+
+int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
+{
+ return blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
+}
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
index f45cef21f3d0..4e5f435ef72b 100644
--- a/drivers/md/dm-cache-metadata.h
+++ b/drivers/md/dm-cache-metadata.h
@@ -49,7 +49,12 @@
*/
#define DM_CACHE_FEATURE_COMPAT_SUPP 0UL
#define DM_CACHE_FEATURE_COMPAT_RO_SUPP 0UL
-#define DM_CACHE_FEATURE_INCOMPAT_SUPP 0UL
+
+enum dm_cache_incompat_bits {
+ DM_CACHE_VARIABLE_HINT_SIZE = 0
+};
+
+#define DM_CACHE_FEATURE_INCOMPAT_SUPP (1 << DM_CACHE_VARIABLE_HINT_SIZE)
/*
* Reopens or creates a new, empty metadata volume.
@@ -87,7 +92,7 @@ int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd);
typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock,
dm_cblock_t cblock, bool dirty,
- uint32_t hint, bool hint_valid);
+ void *hint, bool hint_valid);
int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
struct dm_cache_policy *policy,
load_mapping_fn fn,
@@ -118,9 +123,10 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
void dm_cache_dump(struct dm_cache_metadata *cmd);
/*
- * The policy is invited to save a 32bit hint value for every cblock (eg,
- * for a hit count). These are stored against the policy name. If
- * policies are changed, then hints will be lost. If the machine crashes,
+ * The policy is invited to save a hint (void* sequence of bytes) for every
+ * cblock (eg, for a hit count) and is reponsible to do endianess conversions.
+ * These are stored against the policy name.
+ * If policies are changed, then hints will be lost. If the machine crashes,
* hints will be lost.
*
* The hints are indexed by the cblock, but many policies will not
@@ -132,10 +138,18 @@ void dm_cache_dump(struct dm_cache_metadata *cmd);
int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);
/*
- * requests hints for every cblock and stores in the metadata device.
+ * Saves the hint for a given cblock in the metadata device. Policy
+ * modules must perform any endian conversions needed and bless the hints
+ * for disk.
*/
int dm_cache_save_hint(struct dm_cache_metadata *cmd,
- dm_cblock_t cblock, uint32_t hint);
+ dm_cblock_t cblock, void *hint)
+ __dm_written_to_disk(hint);
+
+/*
+ * Query method. Are all the blocks in the cache clean?
+ */
+int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
/*----------------------------------------------------------------*/
diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
index b04d1f904d07..e6273bb282bb 100644
--- a/drivers/md/dm-cache-policy-cleaner.c
+++ b/drivers/md/dm-cache-policy-cleaner.c
@@ -243,7 +243,7 @@ static void __set_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock, bo
}
}
-static void wb_set_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
+static int wb_set_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
{
struct policy *p = to_policy(pe);
unsigned long flags;
@@ -251,9 +251,11 @@ static void wb_set_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
spin_lock_irqsave(&p->lock, flags);
__set_clear_dirty(pe, oblock, true);
spin_unlock_irqrestore(&p->lock, flags);
+
+ return 0;
}
-static void wb_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
+static int wb_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
{
struct policy *p = to_policy(pe);
unsigned long flags;
@@ -261,6 +263,8 @@ static void wb_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
spin_lock_irqsave(&p->lock, flags);
__set_clear_dirty(pe, oblock, false);
spin_unlock_irqrestore(&p->lock, flags);
+
+ return 0;
}
static void add_cache_entry(struct policy *p, struct wb_cache_entry *e)
@@ -274,7 +278,7 @@ static void add_cache_entry(struct policy *p, struct wb_cache_entry *e)
static int wb_load_mapping(struct dm_cache_policy *pe,
dm_oblock_t oblock, dm_cblock_t cblock,
- uint32_t hint, bool hint_valid)
+ void *hint, bool hint_valid)
{
int r;
struct policy *p = to_policy(pe);
diff --git a/drivers/md/dm-cache-policy-era.c b/drivers/md/dm-cache-policy-era.c
new file mode 100644
index 000000000000..d4fc0b0a6cef
--- /dev/null
+++ b/drivers/md/dm-cache-policy-era.c
@@ -0,0 +1,511 @@
+/*
+ * Copyright 2013 NetApp, Inc. All Rights Reserved, contribution by
+ * Morgan Mears.
+ *
+ * Copyright 2013 Red Hat, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details
+ *
+ */
+
+#include "dm-cache-policy.h"
+#include "dm-cache-policy-internal.h"
+#include "dm-cache-shim-utils.h"
+#include "dm.h"
+
+#include <linux/hash.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <linux/delay.h>
+
+#define DM_MSG_PREFIX "cache-policy-era"
+
+typedef uint32_t era_t;
+#define ERA_MAX_ERA UINT_MAX
+
+struct era_policy {
+ struct dm_cache_policy policy;
+
+ struct mutex lock; /* FIXME: spinlock? */
+
+ dm_cblock_t cache_size;
+
+ era_t *cb_to_era;
+
+ era_t era_counter;
+
+ /* Temporary store for unmap information during invalidation. */
+ struct {
+ unsigned long *bitset;
+ dm_oblock_t *oblocks;
+ unsigned long last_cblock;
+ } invalidate;
+};
+
+/*----------------------------------------------------------------*/
+
+static struct era_policy *to_era_policy(struct dm_cache_policy *p)
+{
+ return container_of(p, struct era_policy, policy);
+}
+
+static unsigned long *alloc_bitset(unsigned nr_entries)
+{
+ size_t s = sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
+ return vzalloc(s);
+}
+
+static void free_bitset(unsigned long *bits)
+{
+ vfree(bits);
+}
+
+static dm_oblock_t *alloc_oblocks(unsigned nr_entries)
+{
+ size_t s = sizeof(dm_oblock_t) * nr_entries;
+ return vmalloc(s);
+}
+
+static void free_oblocks(dm_oblock_t *blocks)
+{
+ vfree(blocks);
+}
+
+static void free_invalidate(struct era_policy *era)
+{
+ if (era->invalidate.oblocks) {
+ free_oblocks(era->invalidate.oblocks);
+ era->invalidate.oblocks = NULL;
+ }
+
+ if (era->invalidate.bitset) {
+ free_bitset(era->invalidate.bitset);
+ era->invalidate.bitset = NULL; /* Being checked for! */
+ }
+}
+
+static int alloc_invalidate(struct era_policy *era)
+{
+ /* FIXME: memory consumption! */
+ era->invalidate.oblocks = alloc_oblocks(from_cblock(era->cache_size));
+ if (!era->invalidate.oblocks) {
+ DMERR("failed to allocate original blocks unmap array");
+ goto err;
+ }
+
+ era->invalidate.bitset = alloc_bitset(from_cblock(era->cache_size));
+ if (!era->invalidate.bitset) {
+ DMERR("failed to allocate cache blocks unmap bitset");
+ goto err;
+ }
+
+ era->invalidate.last_cblock = 0;
+ return 0;
+
+err:
+ free_invalidate(era);
+ return -ENOMEM;
+}
+
+
+typedef int (*era_match_fn_t)(era_t, era_t);
+
+static int incr_era_counter(struct era_policy *era, const char *curr_era_str,
+ era_match_fn_t dummy)
+{
+ era_t curr_era_counter;
+ int r;
+
+ /*
+ * If the era counter value provided by the user matches the current
+ * counter value while under lock, increment the counter (intention
+ * is to prevent races). Rollover problems are avoided by locking
+ * the counter at a maximum value. The application must take
+ * appropriate action on this error to preserve correction, but
+ * a properly behaved set of applications will never trigger it;
+ * the era counter is meant to increment less than once a second
+ * and is 32 bits.
+ */
+
+ if (kstrtou32(curr_era_str, 10, &curr_era_counter))
+ return -EINVAL;
+
+ smp_rmb();
+ if (era->era_counter != curr_era_counter)
+ r = -ECANCELED;
+ else if (era->era_counter >= ERA_MAX_ERA)
+ r = -EOVERFLOW;
+ else {
+ era->era_counter++;
+ smp_wmb();
+ r = 0;
+ }
+
+ return r;
+}
+
+static void *era_cblock_to_hint(struct shim_walk_map_ctx *ctx,
+ dm_cblock_t cblock, dm_oblock_t oblock)
+{
+ struct era_policy *era = to_era_policy(ctx->my_policy);
+ era_t era_val;
+ era_val = era->cb_to_era[from_cblock(cblock)];
+ ctx->le32_buf = cpu_to_le32(era_val);
+ return &ctx->le32_buf;
+}
+
+static int era_is_gt_value(era_t era, era_t value)
+{
+ return era > value;
+}
+
+static int era_is_gte_value(era_t era, era_t value)
+{
+ return era >= value;
+}
+
+static int era_is_lte_value(era_t era, era_t value)
+{
+ return era <= value;
+}
+
+static int era_is_lt_value(era_t era, era_t value)
+{
+ return era < value;
+}
+
+struct inval_oblocks_ctx {
+ struct era_policy *era;
+ era_match_fn_t era_match_fn;
+ era_t test_era;
+};
+
+static int era_inval_oblocks(void *context, dm_cblock_t cblock,
+ dm_oblock_t oblock, void *unused)
+{
+ struct inval_oblocks_ctx *ctx = (struct inval_oblocks_ctx *)context;
+ era_t act_era = ctx->era->cb_to_era[from_cblock(cblock)];
+
+ if (ctx->era_match_fn(act_era, ctx->test_era)) {
+ set_bit(from_cblock(cblock), ctx->era->invalidate.bitset);
+ ctx->era->invalidate.oblocks[from_cblock(cblock)] = oblock;
+ }
+
+ return 0;
+}
+
+static int cond_unmap_by_era(struct era_policy *era, const char *test_era_str,
+ era_match_fn_t era_match_fn)
+{
+ struct shim_walk_map_ctx ctx;
+ struct inval_oblocks_ctx io_ctx;
+ era_t test_era;
+ int r;
+
+ if (era->invalidate.bitset) {
+ DMERR("previous unmap request exists");
+ return -EPERM;
+ }
+
+ /*
+ * Unmap blocks with eras matching the given era, according to the
+ * given matching function.
+ */
+
+ if (kstrtou32(test_era_str, 10, &test_era))
+ return -EINVAL;
+
+ r = alloc_invalidate(era);
+ if (r)
+ return r;
+
+ io_ctx.era = era;
+ io_ctx.era_match_fn = era_match_fn;
+ io_ctx.test_era = test_era;
+
+ ctx.parent_ctx = &io_ctx;
+ ctx.parent_fn = era_inval_oblocks;
+ ctx.my_policy = &era->policy;
+ ctx.child_hint_buf = NULL;
+ ctx.cblock_to_hint_fn = NULL;
+
+ mutex_lock(&era->lock);
+ r = dm_cache_shim_utils_walk_map_with_ctx(&ctx);
+ mutex_unlock(&era->lock);
+
+ return r;
+}
+
+/*
+ * Public interface, via the policy struct. See dm-cache-policy.h for a
+ * description of these.
+ */
+
+static void era_destroy(struct dm_cache_policy *p)
+{
+ struct era_policy *era = to_era_policy(p);
+
+ free_invalidate(era);
+ vfree(era->cb_to_era);
+ kfree(era);
+}
+
+static int era_map(struct dm_cache_policy *p, dm_oblock_t oblock,
+ bool can_block, bool can_migrate, bool discarded_oblock,
+ struct bio *bio, struct policy_result *result)
+{
+ struct era_policy *era = to_era_policy(p);
+ uint32_t cb_idx;
+ int r;
+
+ result->op = POLICY_MISS;
+
+ if (can_block)
+ mutex_lock(&era->lock);
+
+ else if (!mutex_trylock(&era->lock))
+ return -EWOULDBLOCK;
+
+ /* Check for a mapping */
+ r = policy_map(p->child, oblock, can_block, can_migrate,
+ discarded_oblock, bio, result);
+
+ /* If we got a hit and this is a write, update the era for the block */
+ if (!r && (bio_data_dir(bio) == WRITE) && (result->op == POLICY_HIT)) {
+ cb_idx = from_cblock(result->cblock);
+ BUG_ON(cb_idx >= from_cblock(era->cache_size));
+ smp_rmb();
+ era->cb_to_era[cb_idx] = era->era_counter;
+ }
+
+ mutex_unlock(&era->lock);
+
+ return r;
+}
+
+static int era_load_mapping(struct dm_cache_policy *p,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ void *hint, bool hint_valid)
+{
+ struct era_policy *era = to_era_policy(p);
+ struct dm_cache_policy *child;
+ __le32 *le32_hint;
+ era_t recovered_era;
+ int r;
+
+ child = era->policy.child;
+
+ le32_hint = (__le32 *)hint;
+ hint = &le32_hint[1];
+
+ r = policy_load_mapping(child, oblock, cblock, hint, hint_valid);
+
+ /* FIXME: recovered area valid on reload called from cache core invalidate mapping error path? */
+ if (!r && hint_valid &&
+ (from_cblock(cblock) < from_cblock(era->cache_size))) {
+ recovered_era = le32_to_cpu(*le32_hint);
+ era->cb_to_era[from_cblock(cblock)] = recovered_era;
+
+ /*
+ * Make sure the era counter starts higher than the highest
+ * persisted era.
+ */
+ smp_rmb();
+ if (recovered_era >= era->era_counter) {
+ era->era_counter = recovered_era;
+ if (era->era_counter < ERA_MAX_ERA)
+ era->era_counter++;
+ smp_wmb();
+ }
+ }
+
+ return r;
+}
+
+static int era_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
+ void *context)
+{
+ return dm_cache_shim_utils_walk_map(p, fn, context, era_cblock_to_hint);
+}
+
+static void era_force_mapping(struct dm_cache_policy *p, dm_oblock_t old_oblock,
+ dm_oblock_t new_oblock)
+{
+ struct era_policy *era = to_era_policy(p);
+ dm_cblock_t cblock;
+
+ mutex_lock(&era->lock);
+
+ if (!policy_lookup(p->child, old_oblock, &cblock)) {
+ smp_rmb();
+ era->cb_to_era[from_cblock(cblock)] = era->era_counter;
+ }
+
+ policy_force_mapping(p->child, old_oblock, new_oblock);
+
+ mutex_unlock(&era->lock);
+}
+
+/* Find next block to invalidate. */
+static int __find_invalidate_block(struct era_policy *era, dm_cblock_t *cblock)
+{
+ int bit = find_next_bit(era->invalidate.bitset, from_cblock(era->cache_size),
+ era->invalidate.last_cblock);
+
+ *cblock = to_cblock(bit);
+ era->invalidate.last_cblock = bit;
+ return bit < from_cblock(era->cache_size) ? 0 : -ENODATA;
+}
+
+static int era_invalidate_mapping(struct dm_cache_policy *p,
+ dm_oblock_t *oblock, dm_cblock_t *cblock)
+{
+ struct era_policy *era = to_era_policy(p);
+ int r;
+
+ if (!era->invalidate.bitset)
+ return -ENODATA;
+
+ r = __find_invalidate_block(era, cblock);
+ if (r < 0)
+ free_invalidate(era);
+ else {
+ BUG_ON(from_cblock(*cblock) >= from_cblock(era->cache_size));
+ BUG_ON(!test_bit(from_cblock(*cblock), era->invalidate.bitset));
+ clear_bit(from_cblock(*cblock), era->invalidate.bitset);
+ *oblock = era->invalidate.oblocks[from_cblock(*cblock)];
+ r = policy_invalidate_mapping(p->child, oblock, cblock);
+ }
+
+ return r;
+}
+
+struct config_value_handler {
+ const char *cmd;
+ int (*handler_fn)(struct era_policy *, const char *, era_match_fn_t);
+ era_match_fn_t match_fn;
+};
+
+/* FIXME: is a delete unmap request needed or is reloading the mapping sufficient to achieve it? */
+static int era_set_config_value(struct dm_cache_policy *p, const char *key,
+ const char *value)
+{
+ struct era_policy *era = to_era_policy(p);
+ struct config_value_handler *vh, value_handlers[] = {
+ { "increment_era_counter", incr_era_counter, NULL },
+ { "unmap_blocks_from_later_eras", cond_unmap_by_era, era_is_gt_value },
+ { "unmap_blocks_from_this_era_and_later", cond_unmap_by_era, era_is_gte_value },
+ { "unmap_blocks_from_this_era_and_earlier", cond_unmap_by_era, era_is_lte_value },
+ { "unmap_blocks_from_earlier_eras", cond_unmap_by_era, era_is_lt_value },
+ { NULL }
+ };
+
+ for (vh = value_handlers; vh->cmd; vh++) {
+ if (!strcasecmp(key, vh->cmd))
+ return vh->handler_fn(era, value, vh->match_fn);
+ }
+
+ return policy_set_config_value(p->child, key, value);
+}
+
+static int era_emit_config_values(struct dm_cache_policy *p, char *result,
+ unsigned maxlen)
+{
+ struct era_policy *era = to_era_policy(p);
+ ssize_t sz = 0;
+
+ smp_rmb();
+ DMEMIT("era_counter %u ", era->era_counter);
+ return policy_emit_config_values(p->child, result + sz, maxlen - sz);
+}
+
+/* Init the policy plugin interface function pointers. */
+static void init_policy_functions(struct era_policy *era)
+{
+ dm_cache_shim_utils_init_shim_policy(&era->policy);
+ era->policy.destroy = era_destroy;
+ era->policy.map = era_map;
+ era->policy.load_mapping = era_load_mapping;
+ era->policy.walk_mappings = era_walk_mappings;
+ era->policy.force_mapping = era_force_mapping;
+ era->policy.invalidate_mapping = era_invalidate_mapping;
+ era->policy.emit_config_values = era_emit_config_values;
+ era->policy.set_config_value = era_set_config_value;
+}
+
+static struct dm_cache_policy *era_create(dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t cache_block_size)
+{
+ struct era_policy *era = kzalloc(sizeof(*era), GFP_KERNEL);
+
+ if (!era)
+ return NULL;
+
+ init_policy_functions(era);
+ era->cache_size = cache_size;
+ mutex_init(&era->lock);
+
+ era->cb_to_era = vzalloc(from_cblock(era->cache_size) *
+ sizeof(*era->cb_to_era));
+ if (era->cb_to_era) {
+ era->era_counter = 1;
+ return &era->policy;
+ }
+
+ kfree(era);
+ return NULL;
+}
+
+/*----------------------------------------------------------------*/
+
+static struct dm_cache_policy_type era_policy_type = {
+ .name = "era",
+ .version = {1, 0, 0},
+ .hint_size = 4,
+ .owner = THIS_MODULE,
+ .create = era_create,
+ .features = DM_CACHE_POLICY_SHIM
+};
+
+static int __init era_init(void)
+{
+ int r;
+
+ r = dm_cache_policy_register(&era_policy_type);
+ if (!r) {
+ DMINFO("version %u.%u.%u loaded",
+ era_policy_type.version[0],
+ era_policy_type.version[1],
+ era_policy_type.version[2]);
+ return 0;
+ }
+
+ DMERR("register failed %d", r);
+
+ dm_cache_policy_unregister(&era_policy_type);
+ return -ENOMEM;
+}
+
+static void __exit era_exit(void)
+{
+ dm_cache_policy_unregister(&era_policy_type);
+}
+
+module_init(era_init);
+module_exit(era_exit);
+
+MODULE_AUTHOR("Morgan Mears <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("era cache policy shim");
diff --git a/drivers/md/dm-cache-policy-hints.c b/drivers/md/dm-cache-policy-hints.c
new file mode 100644
index 000000000000..b706c8aa016d
--- /dev/null
+++ b/drivers/md/dm-cache-policy-hints.c
@@ -0,0 +1,774 @@
+/*
+ * Copyright (C) 2013 Red Hat.
+ *
+ * This file is released under the GPLv2.
+ *
+ * TESTING! NOT FOR PRODUCTION USE!
+ *
+ * "hints" policy to test variable hint size.
+ */
+
+#include "dm.h"
+#include "dm-cache-policy.h"
+#include "dm-cache-policy-internal.h"
+
+#include <linux/hash.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#define DM_MSG_PREFIX "cache-policy-hints"
+
+/*----------------------------------------------------------------*/
+
+static struct kmem_cache *hints_entry_cache;
+
+/*----------------------------------------------------------------*/
+
+static unsigned next_power(unsigned n, unsigned min)
+{
+ return roundup_pow_of_two(max(n, min));
+}
+
+struct hash {
+ struct hlist_head *table;
+ dm_block_t hash_bits;
+ unsigned nr_buckets;
+};
+
+struct entry {
+ struct hlist_node hlist;
+ struct list_head list;
+ dm_oblock_t oblock;
+ dm_cblock_t cblock;
+};
+
+#define DEFAULT_HINT_SIZE DM_CACHE_POLICY_MAX_HINT_SIZE
+struct policy {
+ struct dm_cache_policy policy;
+ struct mutex lock;
+
+ sector_t origin_size, block_size;
+
+ /* To optimize search in the allocation bitset */
+ unsigned find_free_nr_words, find_free_last_word;
+ unsigned long *allocation_bitset;
+
+ dm_cblock_t nr_cblocks_allocated;
+ dm_cblock_t cache_size;
+
+ struct {
+ struct list_head free; /* Free cache entry list */
+ struct list_head used; /* Used cache entry list */
+ } queues;
+
+ /* The cache hash */
+ struct hash chash;
+
+ void *hints_buffer;
+ unsigned hint_counter[4];
+
+ /* Flag to block (re)setting hint_size via the message interface */
+ bool hint_size_set;
+};
+
+/*----------------------------------------------------------------------------*/
+/* Low-level queue function. */
+static struct entry *queue_pop(struct list_head *q)
+{
+ if (!list_empty(q)) {
+ struct list_head *elt = q->next;
+
+ list_del(elt);
+ return list_entry(elt, struct entry, list);
+ }
+
+ return NULL;
+}
+/*----------------------------------------------------------------------------*/
+
+/* Allocate/free various resources. */
+static int alloc_hash(struct hash *hash, unsigned elts)
+{
+ hash->nr_buckets = next_power(elts >> 4, 16);
+ hash->hash_bits = ffs(hash->nr_buckets) - 1;
+ hash->table = vzalloc(sizeof(*hash->table) * hash->nr_buckets);
+
+ return hash->table ? 0 : -ENOMEM;
+}
+
+static void free_hash(struct hash *hash)
+{
+ vfree(hash->table);
+}
+
+/* Free/alloc basic cache entry structures. */
+static void __free_cache_entries(struct list_head *q)
+{
+ struct entry *e;
+
+ while ((e = queue_pop(q)))
+ kmem_cache_free(hints_entry_cache, e);
+}
+
+static void free_cache_entries(struct policy *p)
+{
+ __free_cache_entries(&p->queues.free);
+ __free_cache_entries(&p->queues.used);
+}
+
+static int alloc_cache_blocks_with_hash(struct policy *p, unsigned cache_size)
+{
+ int r = -ENOMEM;
+ unsigned u = cache_size;
+
+ p->nr_cblocks_allocated = to_cblock(0);
+
+ while (u--) {
+ struct entry *e = kmem_cache_zalloc(hints_entry_cache, GFP_KERNEL);
+
+ if (!e)
+ goto bad_cache_alloc;
+
+ list_add(&e->list, &p->queues.free);
+ }
+
+ /* Cache entries hash. */
+ r = alloc_hash(&p->chash, cache_size);
+ if (r)
+ goto bad_cache_alloc;
+
+ return 0;
+
+bad_cache_alloc:
+ free_cache_entries(p);
+
+ return r;
+}
+
+static void free_cache_blocks_and_hash(struct policy *p)
+{
+ free_hash(&p->chash);
+ free_cache_entries(p);
+}
+
+static void alloc_cblock(struct policy *p, dm_cblock_t cblock)
+{
+ BUG_ON(from_cblock(cblock) >= from_cblock(p->cache_size));
+ BUG_ON(test_bit(from_cblock(cblock), p->allocation_bitset));
+ set_bit(from_cblock(cblock), p->allocation_bitset);
+}
+
+static void free_cblock(struct policy *p, dm_cblock_t cblock)
+{
+ BUG_ON(from_cblock(cblock) >= from_cblock(p->cache_size));
+ BUG_ON(!test_bit(from_cblock(cblock), p->allocation_bitset));
+ clear_bit(from_cblock(cblock), p->allocation_bitset);
+}
+
+/*----------------------------------------------------------------------------*/
+/* Low-level functions. */
+static struct policy *to_policy(struct dm_cache_policy *p)
+{
+ return container_of(p, struct policy, policy);
+}
+
+/*----------------------------------------------------------------*/
+
+static unsigned bit_set_nr_words(unsigned long nr_cblocks)
+{
+ return dm_div_up(nr_cblocks, BITS_PER_LONG);
+}
+
+static unsigned long *alloc_bitset(unsigned nr_cblocks)
+{
+ return vzalloc(sizeof(unsigned long) * bit_set_nr_words(nr_cblocks));
+}
+
+static void free_bitset(unsigned long *bits)
+{
+ vfree(bits);
+}
+/*----------------------------------------------------------------------------*/
+
+/* Hash functions (lookup, insert, remove). */
+static struct entry *lookup_cache_entry(struct policy *p, dm_oblock_t oblock)
+{
+ struct hash *hash = &p->chash;
+ unsigned h = hash_64(from_oblock(oblock), hash->hash_bits);
+ struct entry *cur;
+ struct hlist_head *bucket = &hash->table[h];
+
+ hlist_for_each_entry(cur, bucket, hlist) {
+ if (cur->oblock == oblock) {
+ /* Move upfront bucket for faster access. */
+ hlist_del(&cur->hlist);
+ hlist_add_head(&cur->hlist, bucket);
+ return cur;
+ }
+ }
+
+ return NULL;
+}
+
+static void insert_cache_hash_entry(struct policy *p, struct entry *e)
+{
+ unsigned h = hash_64(from_oblock(e->oblock), p->chash.hash_bits);
+
+ hlist_add_head(&e->hlist, &p->chash.table[h]);
+}
+
+static void remove_cache_hash_entry(struct policy *p, struct entry *e)
+{
+ hlist_del(&e->hlist);
+}
+
+
+/*----------------------------------------------------------------------------*/
+/*
+ * This doesn't allocate the block.
+ */
+static int __find_free_cblock(struct policy *p, unsigned begin, unsigned end,
+ dm_cblock_t *result, unsigned *last_word)
+{
+ int r = -ENOSPC;
+ unsigned w;
+
+ for (w = begin; w < end; w++) {
+ /*
+ * ffz is undefined if no zero exists
+ */
+ if (p->allocation_bitset[w] != ULONG_MAX) {
+ *last_word = w;
+ *result = to_cblock((w * BITS_PER_LONG) + ffz(p->allocation_bitset[w]));
+ if (from_cblock(*result) < from_cblock(p->cache_size))
+ r = 0;
+
+ break;
+ }
+ }
+
+ return r;
+}
+
+static int find_free_cblock(struct policy *p, dm_cblock_t *result)
+{
+ int r = __find_free_cblock(p, p->find_free_last_word, p->find_free_nr_words, result, &p->find_free_last_word);
+
+ if (r == -ENOSPC && p->find_free_last_word)
+ r = __find_free_cblock(p, 0, p->find_free_last_word, result, &p->find_free_last_word);
+
+ return r;
+}
+
+static struct entry *alloc_cache_entry(struct policy *p)
+{
+ struct entry *e = queue_pop(&p->queues.free);
+
+ if (e) {
+ BUG_ON(from_cblock(p->nr_cblocks_allocated) >= from_cblock(p->cache_size));
+ p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) + 1);
+ }
+
+ return e;
+}
+
+static void alloc_cblock_and_insert_cache(struct policy *p, struct entry *e)
+{
+ alloc_cblock(p, e->cblock);
+ insert_cache_hash_entry(p, e);
+}
+
+static void add_cache_entry(struct policy *p, struct entry *e)
+{
+ list_add_tail(&e->list, &p->queues.used);
+ alloc_cblock_and_insert_cache(p, e);
+}
+
+static void remove_cache_entry(struct policy *p, struct entry *e)
+{
+ remove_cache_hash_entry(p, e);
+ free_cblock(p, e->cblock);
+}
+
+static struct entry *evict_cache_entry(struct policy *p)
+{
+ struct entry *e = queue_pop(&p->queues.used);
+
+ BUG_ON(!e);
+ remove_cache_entry(p, e);
+
+ return e;
+}
+
+static void get_cache_block(struct policy *p, dm_oblock_t oblock, struct bio *bio,
+ struct policy_result *result)
+{
+ struct entry *e = alloc_cache_entry(p);
+
+ if (e) {
+ int r = find_free_cblock(p, &e->cblock);
+
+ BUG_ON(r);
+ result->op = POLICY_NEW;
+
+ } else {
+ e = evict_cache_entry(p);
+ result->old_oblock = e->oblock;
+ result->op = POLICY_REPLACE;
+ }
+
+ result->cblock = e->cblock;
+ e->oblock = oblock;
+ add_cache_entry(p, e);
+}
+
+static bool in_cache(struct policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
+{
+ struct entry *e = lookup_cache_entry(p, oblock);
+
+ if (!e)
+ return false;
+
+ *cblock = e->cblock;
+ return true;
+}
+
+/*----------------------------------------------------------------------------*/
+
+/* Public interface (see dm-cache-policy.h */
+static int hints_map(struct dm_cache_policy *pe, dm_oblock_t oblock,
+ bool can_block, bool can_migrate, bool discarded_oblock,
+ struct bio *bio, struct policy_result *result)
+{
+ int r = 0;
+ struct policy *p = to_policy(pe);
+
+ result->op = POLICY_MISS;
+
+ if (can_block)
+ mutex_lock(&p->lock);
+
+ else if (!mutex_trylock(&p->lock))
+ return -EWOULDBLOCK;
+
+
+ if (in_cache(p, oblock, &result->cblock))
+ result->op = POLICY_HIT;
+
+ else if (!can_migrate)
+ r = -EWOULDBLOCK;
+
+ else
+ get_cache_block(p, oblock, bio, result);
+
+ mutex_unlock(&p->lock);
+
+ return r;
+}
+
+static int hints_lookup(struct dm_cache_policy *pe, dm_oblock_t oblock, dm_cblock_t *cblock)
+{
+ int r;
+ struct policy *p = to_policy(pe);
+
+ if (!mutex_trylock(&p->lock))
+ return -EWOULDBLOCK;
+
+ r = in_cache(p, oblock, cblock) ? 0 : -ENOENT;
+
+ mutex_unlock(&p->lock);
+
+ return r;
+}
+
+static void hints_destroy(struct dm_cache_policy *pe)
+{
+ struct policy *p = to_policy(pe);
+
+ free_bitset(p->allocation_bitset);
+ free_cache_blocks_and_hash(p);
+ kfree(p->hints_buffer);
+ kfree(p);
+}
+
+/*----------------------------------------------------------------------------*/
+
+/* Hints endianess conversions */
+#define __le8 uint8_t
+struct hints_ptrs {
+ __le64 *le64_hints;
+ __le32 *le32_hints;
+ __le16 *le16_hints;
+ __le8 *le8_hints;
+
+ uint64_t *u64_hints;
+ uint32_t *u32_hints;
+ uint16_t *u16_hints;
+ uint8_t *u8_hints;
+};
+
+typedef int (*hints_xfer_fn_t) (struct hints_ptrs*, unsigned, unsigned, bool);
+
+#define cpu_to_le8(x) (x)
+#define le8_to_cpu(x) (x)
+
+#define HINTS_XFER(width) \
+static int hints_ ## width ## _xfer(struct hints_ptrs *p, unsigned idx, unsigned val, bool to_disk) \
+{ \
+ if (to_disk) \
+ p->le ## width ## _hints[idx] = cpu_to_le ## width(val); \
+\
+ else { \
+ p->u ## width ## _hints[idx] = le ## width ## _to_cpu(p->le ## width ## _hints[idx]); \
+ if (p->u ## width ## _hints[idx] != val) { \
+ DMERR_LIMIT("%s -- hint value %llu != %u", __func__, \
+ (long long unsigned) p->u ## width ## _hints[idx], val); \
+ return -EINVAL; \
+ } \
+ } \
+\
+ return 0; \
+}
+
+HINTS_XFER(64)
+HINTS_XFER(32)
+HINTS_XFER(16)
+HINTS_XFER(8)
+
+static void calc_hint_value_counters(struct policy *p)
+{
+ unsigned div, rest = dm_cache_policy_get_hint_size(&p->policy), u;
+
+ for (u = 3, div = sizeof(uint64_t); rest; u--, div >>= 1) {
+ p->hint_counter[u] = rest / div;
+ rest -= p->hint_counter[u] * div;
+ }
+}
+
+/* Macro to set hint ptr for width on LHS based on RHS width<<1 */
+#define DM_PTR_INC(lhs, rhs, c) \
+do { \
+ inc = 2 * p->hint_counter[c]; \
+ ptrs->le ## lhs ## _hints = (__le ## lhs *) ptrs->le ## rhs ## _hints + inc; \
+ ptrs->u ## lhs ## _hints = (uint ## lhs ## _t *) ptrs->u ## rhs ## _hints + inc; \
+} while (0)
+
+static void set_hints_ptrs(struct policy *p, struct hints_ptrs *ptrs)
+{
+ unsigned inc;
+
+ ptrs->le64_hints = p->hints_buffer;
+ ptrs->u64_hints = p->hints_buffer;
+
+ DM_PTR_INC(32, 64, 3);
+ DM_PTR_INC(16, 32, 2);
+ DM_PTR_INC(8, 16, 1);
+}
+
+static void __hints_xfer_disk(struct policy *p, bool to_disk)
+{
+ unsigned idx, u, val;
+ hints_xfer_fn_t hints_xfer_fns[] = {
+ hints_8_xfer,
+ hints_16_xfer,
+ hints_32_xfer,
+ hints_64_xfer
+ };
+
+ struct hints_ptrs hints_ptrs;
+
+ if (!p->hint_size_set) {
+ calc_hint_value_counters(p);
+ p->hint_size_set = true;
+ }
+
+ /* Must happen after calc_hint_value_counters()! */
+ set_hints_ptrs(p, &hints_ptrs);
+
+ val = 1;
+ u = ARRAY_SIZE(hints_xfer_fns);
+ while (u--) {
+ for (idx = 0; idx < p->hint_counter[u]; idx++) {
+ /*
+ * val only suitable because of 256 hint value limitation.
+ *
+ * An uint8_t maxes at 255, so we could theoretically
+ * test hint sizes up to 2023 bytes with this limitation.
+ */
+ if (hints_xfer_fns[u](&hints_ptrs, idx, val, to_disk))
+ return;
+
+ val++;
+ }
+ }
+
+ return;
+}
+
+static void hints_preset_and_to_disk(struct policy *p)
+{
+ __hints_xfer_disk(p, true);
+}
+
+static void hints_from_disk_and_check(struct policy *p)
+{
+ __hints_xfer_disk(p, false);
+}
+
+static int hints_load_mapping(struct dm_cache_policy *pe,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ void *hint, bool hint_valid)
+{
+ struct policy *p = to_policy(pe);
+ struct entry *e;
+
+ e = alloc_cache_entry(p);
+ if (!e)
+ return -ENOMEM;
+
+ e->cblock = cblock;
+ e->oblock = oblock;
+
+ if (hint_valid) {
+ void *tmp = p->hints_buffer;
+
+ p->hints_buffer = hint;
+ hints_from_disk_and_check(p);
+ p->hints_buffer = tmp;
+ }
+
+ alloc_cblock_and_insert_cache(p, e);
+
+ return 0;
+}
+
+/* Walk mappings */
+static int hints_walk_mappings(struct dm_cache_policy *pe, policy_walk_fn fn, void *context)
+{
+ int r = 0;
+ struct policy *p = to_policy(pe);
+ struct entry *e;
+
+ hints_preset_and_to_disk(p);
+
+ mutex_lock(&p->lock);
+
+ list_for_each_entry(e, &p->queues.used, list) {
+ __dm_bless_for_disk(p->hints_buffer);
+ r = fn(context, e->cblock, e->oblock, (void *) p->hints_buffer);
+ if (r)
+ break;
+ }
+
+ mutex_unlock(&p->lock);
+
+ return r;
+}
+
+static struct entry *__hints_force_remove_mapping(struct policy *p, dm_oblock_t oblock)
+{
+ struct entry *e = lookup_cache_entry(p, oblock);
+
+ BUG_ON(!e);
+
+ list_del(&e->list);
+ remove_cache_entry(p, e);
+
+ return e;
+}
+
+static void hints_remove_mapping(struct dm_cache_policy *pe, dm_oblock_t oblock)
+{
+ struct policy *p = to_policy(pe);
+ struct entry *e;
+
+ mutex_lock(&p->lock);
+ e = __hints_force_remove_mapping(p, oblock);
+ list_add_tail(&e->list, &p->queues.free);
+
+ BUG_ON(!from_cblock(p->nr_cblocks_allocated));
+ p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) - 1);
+ mutex_unlock(&p->lock);
+}
+
+static void hints_force_mapping(struct dm_cache_policy *pe,
+ dm_oblock_t current_oblock, dm_oblock_t oblock)
+{
+ struct policy *p = to_policy(pe);
+ struct entry *e;
+
+ mutex_lock(&p->lock);
+
+ e = __hints_force_remove_mapping(p, current_oblock);
+ e->oblock = oblock;
+ add_cache_entry(p, e);
+
+ mutex_unlock(&p->lock);
+}
+
+static dm_cblock_t hints_residency(struct dm_cache_policy *pe)
+{
+ /* FIXME: lock mutex, not sure we can block here. */
+ return to_policy(pe)->nr_cblocks_allocated;
+}
+
+static int hints_set_config_value(struct dm_cache_policy *pe,
+ const char *key, const char *value)
+{
+ if (!strcasecmp(key, "hint_size")) {
+ struct policy *p = to_policy(pe);
+
+ if (p->hint_size_set)
+ return -EPERM;
+
+ else {
+ unsigned tmp;
+
+ if (kstrtou32(value, 10, &tmp))
+ return -EINVAL;
+
+ else {
+ int r = dm_cache_policy_set_hint_size(pe, tmp);
+
+ if (!r) {
+ calc_hint_value_counters(p);
+ p->hint_size_set = true;
+ }
+
+ return r;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int hints_emit_config_values(struct dm_cache_policy *pe, char *result, unsigned maxlen)
+{
+ ssize_t sz = 0;
+
+ DMEMIT("2 hint_size %llu", (long long unsigned) dm_cache_policy_get_hint_size(pe));
+ return 0;
+}
+
+/* Init the policy plugin interface function pointers. */
+static void init_policy_functions(struct policy *p)
+{
+ p->policy.destroy = hints_destroy;
+ p->policy.map = hints_map;
+ p->policy.lookup = hints_lookup;
+#if 0
+ p->policy.set_dirty = NULL;
+ p->policy.clear_dirty = NULL;
+#endif
+ p->policy.load_mapping = hints_load_mapping;
+ p->policy.walk_mappings = hints_walk_mappings;
+ p->policy.remove_mapping = hints_remove_mapping;
+ p->policy.writeback_work = NULL;
+ p->policy.force_mapping = hints_force_mapping;
+ p->policy.residency = hints_residency;
+ p->policy.tick = NULL;
+ p->policy.emit_config_values = hints_emit_config_values;
+ p->policy.set_config_value = hints_set_config_value;
+}
+
+static struct dm_cache_policy *hints_policy_create(dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t block_size)
+{
+ int r;
+ struct policy *p;
+
+ BUILD_BUG_ON(DEFAULT_HINT_SIZE > 2023);
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return NULL;
+
+ init_policy_functions(p);
+
+ p->cache_size = cache_size;
+ p->find_free_nr_words = bit_set_nr_words(from_cblock(cache_size));
+ p->find_free_last_word = 0;
+ p->block_size = block_size;
+ p->origin_size = origin_size;
+ mutex_init(&p->lock);
+ INIT_LIST_HEAD(&p->queues.free);
+ INIT_LIST_HEAD(&p->queues.used);
+
+ /* Allocate cache entry structs and add them to free list. */
+ r = alloc_cache_blocks_with_hash(p, from_cblock(cache_size));
+ if (r)
+ goto bad_free_policy;
+
+ /* Cache allocation bitset. */
+ p->allocation_bitset = alloc_bitset(from_cblock(cache_size));
+ if (!p->allocation_bitset)
+ goto bad_free_cache_blocks_and_hash;
+
+ p->hints_buffer = kzalloc(DM_CACHE_POLICY_MAX_HINT_SIZE, GFP_KERNEL);
+ if (!p->hints_buffer)
+ goto bad_free_allocation_bitset;
+
+ p->hint_size_set = false;
+
+ return &p->policy;
+
+bad_free_allocation_bitset:
+ free_bitset(p->allocation_bitset);
+bad_free_cache_blocks_and_hash:
+ free_cache_blocks_and_hash(p);
+bad_free_policy:
+ kfree(p);
+
+ return NULL;
+}
+
+/*----------------------------------------------------------------------------*/
+static struct dm_cache_policy_type hints_policy_type = {
+ .name = "hints",
+ .version = {1, 0, 0},
+ .hint_size = DEFAULT_HINT_SIZE,
+ .owner = THIS_MODULE,
+ .create = hints_policy_create
+};
+
+static int __init hints_init(void)
+{
+ int r = -ENOMEM;
+
+ hints_entry_cache = kmem_cache_create("dm_hints_policy_cache_entry",
+ sizeof(struct entry),
+ __alignof__(struct entry),
+ 0, NULL);
+ if (hints_entry_cache) {
+ r = dm_cache_policy_register(&hints_policy_type);
+ if (r)
+ kmem_cache_destroy(hints_entry_cache);
+
+ else {
+ DMINFO("version %u.%u.%u loaded",
+ hints_policy_type.version[0],
+ hints_policy_type.version[1],
+ hints_policy_type.version[2]);
+ }
+ }
+
+ return r;
+}
+
+static void __exit hints_exit(void)
+{
+ dm_cache_policy_unregister(&hints_policy_type);
+ kmem_cache_destroy(hints_entry_cache);
+}
+
+module_init(hints_init);
+module_exit(hints_exit);
+
+MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("hint size test cache policy");
diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h
index 0928abdc49f0..9efcda10f575 100644
--- a/drivers/md/dm-cache-policy-internal.h
+++ b/drivers/md/dm-cache-policy-internal.h
@@ -27,21 +27,19 @@ static inline int policy_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, d
return p->lookup(p, oblock, cblock);
}
-static inline void policy_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+static inline int policy_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
{
- if (p->set_dirty)
- p->set_dirty(p, oblock);
+ return p->set_dirty ? p->set_dirty(p, oblock) : -EINVAL;
}
-static inline void policy_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+static inline int policy_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
{
- if (p->clear_dirty)
- p->clear_dirty(p, oblock);
+ return p->clear_dirty ? p->clear_dirty(p, oblock) : -EINVAL;
}
static inline int policy_load_mapping(struct dm_cache_policy *p,
dm_oblock_t oblock, dm_cblock_t cblock,
- uint32_t hint, bool hint_valid)
+ void *hint, bool hint_valid)
{
return p->load_mapping(p, oblock, cblock, hint, hint_valid);
}
@@ -61,7 +59,7 @@ static inline int policy_writeback_work(struct dm_cache_policy *p,
static inline void policy_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
{
- return p->remove_mapping(p, oblock);
+ p->remove_mapping(p, oblock);
}
static inline void policy_force_mapping(struct dm_cache_policy *p,
@@ -70,6 +68,12 @@ static inline void policy_force_mapping(struct dm_cache_policy *p,
return p->force_mapping(p, current_oblock, new_oblock);
}
+static inline int policy_invalidate_mapping(struct dm_cache_policy *p,
+ dm_oblock_t *oblock, dm_cblock_t *cblock)
+{
+ return p->invalidate_mapping ? p->invalidate_mapping(p, oblock, cblock) : -EINVAL;
+}
+
static inline dm_cblock_t policy_residency(struct dm_cache_policy *p)
{
return p->residency(p);
@@ -119,8 +123,17 @@ const char *dm_cache_policy_get_name(struct dm_cache_policy *p);
const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p);
+#define DM_CACHE_POLICY_DEF_HINT_SIZE 4U
+#define DM_CACHE_POLICY_MAX_HINT_SIZE 128U
+int dm_cache_policy_set_hint_size(struct dm_cache_policy *p, unsigned hint_size);
size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p);
+/*
+ * Return bool that reflects whether or not policy is only a shim
+ * layer in a policy stack.
+ */
+bool dm_cache_policy_is_shim(struct dm_cache_policy *p);
+
/*----------------------------------------------------------------*/
#endif /* DM_CACHE_POLICY_INTERNAL_H */
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 4296155090b2..4fc63c54dcf1 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -6,6 +6,7 @@
#include "dm-cache-policy.h"
#include "dm.h"
+#include "persistent-data/dm-btree.h"
#include <linux/hash.h>
#include <linux/module.h>
@@ -85,7 +86,7 @@ static enum io_pattern iot_pattern(struct io_tracker *t)
static void iot_update_stats(struct io_tracker *t, struct bio *bio)
{
- if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1)
+ if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1)
t->nr_seq_samples++;
else {
/*
@@ -100,7 +101,7 @@ static void iot_update_stats(struct io_tracker *t, struct bio *bio)
t->nr_rand_samples++;
}
- t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1);
+ t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1);
}
static void iot_check_for_pattern_switch(struct io_tracker *t)
@@ -151,6 +152,21 @@ static void queue_init(struct queue *q)
}
/*
+ * Checks to see if the queue is empty.
+ * FIXME: reduce cpu usage.
+ */
+static bool queue_empty(struct queue *q)
+{
+ unsigned i;
+
+ for (i = 0; i < NR_QUEUE_LEVELS; i++)
+ if (!list_empty(q->qs + i))
+ return false;
+
+ return true;
+}
+
+/*
* Insert an entry to the back of the given level.
*/
static void queue_push(struct queue *q, unsigned level, struct list_head *elt)
@@ -224,6 +240,7 @@ struct entry {
* FIXME: pack these better
*/
bool in_cache:1;
+ bool dirty:1;
unsigned hit_count;
unsigned generation;
unsigned tick;
@@ -238,13 +255,15 @@ struct mq_policy {
struct io_tracker tracker;
/*
- * We maintain two queues of entries. The cache proper contains
- * the currently active mappings. Whereas the pre_cache tracks
- * blocks that are being hit frequently and potential candidates
- * for promotion to the cache.
+ * We maintain three queues of entries. The cache proper,
+ * consisting of a clean and dirty queue, contains the currently
+ * active mappings. Whereas the pre_cache tracks blocks that
+ * are being hit frequently and potential candidates for promotion
+ * to the cache.
*/
struct queue pre_cache;
- struct queue cache;
+ struct queue cache_clean;
+ struct queue cache_dirty;
/*
* Keeps track of time, incremented by the core. We use this to
@@ -311,7 +330,7 @@ struct mq_policy {
/*----------------------------------------------------------------*/
/* Free/alloc mq cache entry structures. */
-static void takeout_queue(struct list_head *lh, struct queue *q)
+static void concat_queue(struct list_head *lh, struct queue *q)
{
unsigned level;
@@ -323,8 +342,9 @@ static void free_entries(struct mq_policy *mq)
{
struct entry *e, *tmp;
- takeout_queue(&mq->free, &mq->pre_cache);
- takeout_queue(&mq->free, &mq->cache);
+ concat_queue(&mq->free, &mq->pre_cache);
+ concat_queue(&mq->free, &mq->cache_clean);
+ concat_queue(&mq->free, &mq->cache_dirty);
list_for_each_entry_safe(e, tmp, &mq->free, list)
kmem_cache_free(mq_entry_cache, e);
@@ -438,6 +458,11 @@ static bool any_free_cblocks(struct mq_policy *mq)
return mq->nr_cblocks_allocated < from_cblock(mq->cache_size);
}
+static bool any_clean_cblocks(struct mq_policy *mq)
+{
+ return !queue_empty(&mq->cache_clean);
+}
+
/*
* Fills result out with a cache block that isn't in use, or return
* -ENOSPC. This does _not_ mark the cblock as allocated, the caller is
@@ -508,7 +533,8 @@ static void push(struct mq_policy *mq, struct entry *e)
if (e->in_cache) {
alloc_cblock(mq, e->cblock);
- queue_push(&mq->cache, queue_level(e), &e->list);
+ queue_push(e->dirty ? &mq->cache_dirty : &mq->cache_clean,
+ queue_level(e), &e->list);
} else
queue_push(&mq->pre_cache, queue_level(e), &e->list);
}
@@ -531,14 +557,16 @@ static void del(struct mq_policy *mq, struct entry *e)
*/
static struct entry *pop(struct mq_policy *mq, struct queue *q)
{
- struct entry *e = container_of(queue_pop(q), struct entry, list);
+ struct entry *e;
+ struct list_head *h = queue_pop(q);
- if (e) {
- hash_remove(e);
+ if (!h)
+ return NULL;
- if (e->in_cache)
- free_cblock(mq, e->cblock);
- }
+ e = container_of(h, struct entry, list);
+ hash_remove(e);
+ if (e->in_cache)
+ free_cblock(mq, e->cblock);
return e;
}
@@ -556,7 +584,8 @@ static bool updated_this_tick(struct mq_policy *mq, struct entry *e)
* of the entries.
*
* At the moment the threshold is taken by averaging the hit counts of some
- * of the entries in the cache (the first 20 entries of the first level).
+ * of the entries in the cache (the first 20 entries across all levels in
+ * ascending order, giving preference to the clean entries at each level).
*
* We can be much cleverer than this though. For example, each promotion
* could bump up the threshold helping to prevent churn. Much more to do
@@ -578,7 +607,16 @@ static void check_generation(struct mq_policy *mq)
mq->generation++;
for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) {
- head = mq->cache.qs + level;
+ head = mq->cache_clean.qs + level;
+ list_for_each_entry(e, head, list) {
+ nr++;
+ total += e->hit_count;
+
+ if (++count >= MAX_TO_AVERAGE)
+ break;
+ }
+
+ head = mq->cache_dirty.qs + level;
list_for_each_entry(e, head, list) {
nr++;
total += e->hit_count;
@@ -631,19 +669,28 @@ static void requeue_and_update_tick(struct mq_policy *mq, struct entry *e)
* - set the hit count to a hard coded value other than 1, eg, is it better
* if it goes in at level 2?
*/
-static dm_cblock_t demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
+static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock, dm_cblock_t *cblock)
{
- dm_cblock_t result;
- struct entry *demoted = pop(mq, &mq->cache);
+ struct entry *demoted = pop(mq, &mq->cache_clean);
- BUG_ON(!demoted);
- result = demoted->cblock;
+ if (!demoted)
+ /*
+ * We could get a block from mq->cache_dirty, but that
+ * would add extra latency to the triggering bio as it
+ * waits for the writeback. Better to not promote this
+ * time and hope there's a clean block next time this block
+ * is hit.
+ */
+ return -ENOSPC;
+
+ *cblock = demoted->cblock;
*oblock = demoted->oblock;
demoted->in_cache = false;
+ demoted->dirty = false;
demoted->hit_count = 1;
push(mq, demoted);
- return result;
+ return 0;
}
/*
@@ -662,17 +709,18 @@ static dm_cblock_t demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
static unsigned adjusted_promote_threshold(struct mq_policy *mq,
bool discarded_oblock, int data_dir)
{
- if (discarded_oblock && any_free_cblocks(mq) && data_dir == WRITE)
+ if (data_dir == READ)
+ return mq->promote_threshold + READ_PROMOTE_THRESHOLD;
+
+ if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) {
/*
* We don't need to do any copying at all, so give this a
- * very low threshold. In practice this only triggers
- * during initial population after a format.
+ * very low threshold.
*/
return DISCARDED_PROMOTE_THRESHOLD;
+ }
- return data_dir == READ ?
- (mq->promote_threshold + READ_PROMOTE_THRESHOLD) :
- (mq->promote_threshold + WRITE_PROMOTE_THRESHOLD);
+ return mq->promote_threshold + WRITE_PROMOTE_THRESHOLD;
}
static bool should_promote(struct mq_policy *mq, struct entry *e,
@@ -697,17 +745,22 @@ static int cache_entry_found(struct mq_policy *mq,
}
/*
- * Moves and entry from the pre_cache to the cache. The main work is
+ * Moves an entry from the pre_cache to the cache. The main work is
* finding which cache block to use.
*/
static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
struct policy_result *result)
{
+ int r;
dm_cblock_t cblock;
if (find_free_cblock(mq, &cblock) == -ENOSPC) {
result->op = POLICY_REPLACE;
- cblock = demote_cblock(mq, &result->old_oblock);
+ r = demote_cblock(mq, &result->old_oblock, &cblock);
+ if (r) {
+ result->op = POLICY_MISS;
+ return 0;
+ }
} else
result->op = POLICY_NEW;
@@ -715,6 +768,7 @@ static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
del(mq, e);
e->in_cache = true;
+ e->dirty = false;
push(mq, e);
return 0;
@@ -740,6 +794,17 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
return r;
}
+static void insert_entry_in_pre_cache(struct mq_policy *mq,
+ struct entry *e, dm_oblock_t oblock)
+{
+ e->in_cache = false;
+ e->dirty = false;
+ e->oblock = oblock;
+ e->hit_count = 1;
+ e->generation = mq->generation;
+ push(mq, e);
+}
+
static void insert_in_pre_cache(struct mq_policy *mq,
dm_oblock_t oblock)
{
@@ -757,39 +822,51 @@ static void insert_in_pre_cache(struct mq_policy *mq,
return;
}
- e->in_cache = false;
- e->oblock = oblock;
- e->hit_count = 1;
- e->generation = mq->generation;
- push(mq, e);
+ insert_entry_in_pre_cache(mq, e, oblock);
}
static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
struct policy_result *result)
{
+ int r;
struct entry *e;
dm_cblock_t cblock;
if (find_free_cblock(mq, &cblock) == -ENOSPC) {
- result->op = POLICY_MISS;
- insert_in_pre_cache(mq, oblock);
- return;
- }
+ r = demote_cblock(mq, &result->old_oblock, &cblock);
+ if (unlikely(r)) {
+ result->op = POLICY_MISS;
+ insert_in_pre_cache(mq, oblock);
+ return;
+ }
- e = alloc_entry(mq);
- if (unlikely(!e)) {
- result->op = POLICY_MISS;
- return;
+ /*
+ * This will always succeed, since we've just demoted.
+ */
+ e = pop(mq, &mq->pre_cache);
+ result->op = POLICY_REPLACE;
+
+ } else {
+ e = alloc_entry(mq);
+ if (unlikely(!e))
+ e = pop(mq, &mq->pre_cache);
+
+ if (unlikely(!e)) {
+ result->op = POLICY_MISS;
+ return;
+ }
+
+ result->op = POLICY_NEW;
}
e->oblock = oblock;
e->cblock = cblock;
e->in_cache = true;
+ e->dirty = false;
e->hit_count = 1;
e->generation = mq->generation;
push(mq, e);
- result->op = POLICY_NEW;
result->cblock = e->cblock;
}
@@ -915,9 +992,47 @@ static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t
return r;
}
+/*
+ * FIXME: __mq_set_clear_dirty can block due to mutex.
+ * Ideally a policy should not block in functions called
+ * from the map() function. Explore using RCU.
+ */
+static int __mq_set_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock, bool set)
+{
+ int r = 0;
+ struct mq_policy *mq = to_mq_policy(p);
+ struct entry *e;
+
+ mutex_lock(&mq->lock);
+ e = hash_lookup(mq, oblock);
+ if (!e) {
+ r = -ENOENT;
+ DMWARN("__mq_set_clear_dirty called for a block that isn't in the cache");
+ } else {
+ BUG_ON(!e->in_cache);
+
+ del(mq, e);
+ e->dirty = set;
+ push(mq, e);
+ }
+ mutex_unlock(&mq->lock);
+
+ return r;
+}
+
+static int mq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ return __mq_set_clear_dirty(p, oblock, true);
+}
+
+static int mq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ return __mq_set_clear_dirty(p, oblock, false);
+}
+
static int mq_load_mapping(struct dm_cache_policy *p,
dm_oblock_t oblock, dm_cblock_t cblock,
- uint32_t hint, bool hint_valid)
+ void *hint, bool hint_valid)
{
struct mq_policy *mq = to_mq_policy(p);
struct entry *e;
@@ -929,64 +1044,127 @@ static int mq_load_mapping(struct dm_cache_policy *p,
e->cblock = cblock;
e->oblock = oblock;
e->in_cache = true;
- e->hit_count = hint_valid ? hint : 1;
+ e->dirty = false; /* this gets corrected in a minute */
+ e->hit_count = hint_valid ? le32_to_cpu(*((__le32 *) hint)) : 1;
e->generation = mq->generation;
push(mq, e);
return 0;
}
+static int mq_save_hints(struct mq_policy *mq, struct queue *q,
+ policy_walk_fn fn, void *context)
+{
+ int r;
+ unsigned level;
+ struct entry *e;
+
+ for (level = 0; level < NR_QUEUE_LEVELS; level++)
+ list_for_each_entry(e, q->qs + level, list) {
+ __le32 value = cpu_to_le32(e->hit_count);
+ __dm_bless_for_disk(&value);
+
+ r = fn(context, e->cblock, e->oblock, &value);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
static int mq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
void *context)
{
struct mq_policy *mq = to_mq_policy(p);
int r = 0;
- struct entry *e;
- unsigned level;
mutex_lock(&mq->lock);
- for (level = 0; level < NR_QUEUE_LEVELS; level++)
- list_for_each_entry(e, &mq->cache.qs[level], list) {
- r = fn(context, e->cblock, e->oblock, e->hit_count);
- if (r)
- goto out;
- }
+ r = mq_save_hints(mq, &mq->cache_clean, fn, context);
+ if (!r)
+ r = mq_save_hints(mq, &mq->cache_dirty, fn, context);
-out:
mutex_unlock(&mq->lock);
return r;
}
+static int __remove_mapping(struct mq_policy *mq,
+ dm_oblock_t oblock, dm_cblock_t *cblock)
+{
+ struct entry *e;
+
+ e = hash_lookup(mq, oblock);
+
+ if (e && e->in_cache) {
+ del(mq, e);
+ e->in_cache = false;
+ e->dirty = false;
+
+ if (cblock) {
+ *cblock = e->cblock;
+ list_add(&e->list, &mq->free);
+ } else
+ push(mq, e);
+
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
{
+ int r;
struct mq_policy *mq = to_mq_policy(p);
- struct entry *e;
mutex_lock(&mq->lock);
+ r = __remove_mapping(mq, oblock, NULL);
+ mutex_unlock(&mq->lock);
- e = hash_lookup(mq, oblock);
+ BUG_ON(r);
+}
- BUG_ON(!e || !e->in_cache);
+static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock,
+ dm_cblock_t *cblock)
+{
+ struct entry *e = pop(mq, &mq->cache_dirty);
- del(mq, e);
- e->in_cache = false;
+ if (!e)
+ return -ENODATA;
+
+ *oblock = e->oblock;
+ *cblock = e->cblock;
+ e->dirty = false;
push(mq, e);
+ return 0;
+}
+
+static int mq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock,
+ dm_cblock_t *cblock)
+{
+ int r;
+ struct mq_policy *mq = to_mq_policy(p);
+
+ mutex_lock(&mq->lock);
+ r = __mq_writeback_work(mq, oblock, cblock);
mutex_unlock(&mq->lock);
+
+ return r;
}
-static void force_mapping(struct mq_policy *mq,
- dm_oblock_t current_oblock, dm_oblock_t new_oblock)
+static void __force_mapping(struct mq_policy *mq,
+ dm_oblock_t current_oblock, dm_oblock_t new_oblock)
{
struct entry *e = hash_lookup(mq, current_oblock);
- BUG_ON(!e || !e->in_cache);
-
- del(mq, e);
- e->oblock = new_oblock;
- push(mq, e);
+ if (e && e->in_cache) {
+ del(mq, e);
+ e->oblock = new_oblock;
+ e->dirty = true;
+ push(mq, e);
+ }
}
static void mq_force_mapping(struct dm_cache_policy *p,
@@ -995,16 +1173,35 @@ static void mq_force_mapping(struct dm_cache_policy *p,
struct mq_policy *mq = to_mq_policy(p);
mutex_lock(&mq->lock);
- force_mapping(mq, current_oblock, new_oblock);
+ __force_mapping(mq, current_oblock, new_oblock);
+ mutex_unlock(&mq->lock);
+}
+
+static int mq_invalidate_mapping(struct dm_cache_policy *p,
+ dm_oblock_t *oblock, dm_cblock_t *cblock)
+{
+ int r;
+ struct mq_policy *mq = to_mq_policy(p);
+
+ mutex_lock(&mq->lock);
+ r = __remove_mapping(mq, *oblock, cblock);
mutex_unlock(&mq->lock);
+
+ return r;
}
static dm_cblock_t mq_residency(struct dm_cache_policy *p)
{
+ dm_cblock_t r;
struct mq_policy *mq = to_mq_policy(p);
- /* FIXME: lock mutex, not sure we can block here */
- return to_cblock(mq->nr_cblocks_allocated);
+ might_sleep();
+
+ mutex_lock(&mq->lock);
+ r = to_cblock(mq->nr_cblocks_allocated);
+ mutex_unlock(&mq->lock);
+
+ return r;
}
static void mq_tick(struct dm_cache_policy *p)
@@ -1057,11 +1254,14 @@ static void init_policy_functions(struct mq_policy *mq)
mq->policy.destroy = mq_destroy;
mq->policy.map = mq_map;
mq->policy.lookup = mq_lookup;
+ mq->policy.set_dirty = mq_set_dirty;
+ mq->policy.clear_dirty = mq_clear_dirty;
mq->policy.load_mapping = mq_load_mapping;
mq->policy.walk_mappings = mq_walk_mappings;
mq->policy.remove_mapping = mq_remove_mapping;
- mq->policy.writeback_work = NULL;
+ mq->policy.writeback_work = mq_writeback_work;
mq->policy.force_mapping = mq_force_mapping;
+ mq->policy.invalidate_mapping = mq_invalidate_mapping;
mq->policy.residency = mq_residency;
mq->policy.tick = mq_tick;
mq->policy.emit_config_values = mq_emit_config_values;
@@ -1093,7 +1293,9 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
mq->find_free_last_word = 0;
queue_init(&mq->pre_cache);
- queue_init(&mq->cache);
+ queue_init(&mq->cache_clean);
+ queue_init(&mq->cache_dirty);
+
mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
mq->nr_entries = 2 * from_cblock(cache_size);
diff --git a/drivers/md/dm-cache-policy.c b/drivers/md/dm-cache-policy.c
index 21c03c570c06..48bdd5756013 100644
--- a/drivers/md/dm-cache-policy.c
+++ b/drivers/md/dm-cache-policy.c
@@ -5,6 +5,7 @@
*/
#include "dm-cache-policy-internal.h"
+#include "dm-cache-stack-utils.h"
#include "dm.h"
#include <linux/module.h>
@@ -54,6 +55,9 @@ static struct dm_cache_policy_type *get_policy_once(const char *name)
static struct dm_cache_policy_type *get_policy(const char *name)
{
struct dm_cache_policy_type *t;
+ char name_wo_delim[CACHE_POLICY_NAME_SIZE];
+ char *p_delim;
+ int n;
t = get_policy_once(name);
if (IS_ERR(t))
@@ -68,6 +72,28 @@ static struct dm_cache_policy_type *get_policy(const char *name)
if (IS_ERR(t))
return NULL;
+ if (t)
+ return t;
+
+ /*
+ * We also need to check for dm-cache-<@name> with no trailing
+ * DM_CACHE_POLICY_STACK_DELIM if @name has one, in order to
+ * support loadable policy shims.
+ */
+ n = strlcpy(name_wo_delim, name, sizeof(name_wo_delim));
+ if (n >= sizeof(name_wo_delim))
+ return NULL;
+ p_delim = strchr(name_wo_delim, DM_CACHE_POLICY_STACK_DELIM);
+ if (!p_delim || (p_delim[1] != '\0'))
+ return NULL;
+ p_delim[0] = '\0';
+
+ request_module("dm-cache-%s", name_wo_delim);
+
+ t = get_policy_once(name);
+ if (IS_ERR(t))
+ return NULL;
+
return t;
}
@@ -80,9 +106,10 @@ int dm_cache_policy_register(struct dm_cache_policy_type *type)
{
int r;
- /* One size fits all for now */
- if (type->hint_size != 0 && type->hint_size != 4) {
- DMWARN("hint size must be 0 or 4 but %llu supplied.", (unsigned long long) type->hint_size);
+ if (type->hint_size > DM_CACHE_POLICY_MAX_HINT_SIZE) {
+ DMWARN("hint size must be <= %llu but %llu was supplied.",
+ (unsigned long long) DM_CACHE_POLICY_MAX_HINT_SIZE,
+ (unsigned long long) type->hint_size);
return -EINVAL;
}
@@ -116,16 +143,21 @@ struct dm_cache_policy *dm_cache_policy_create(const char *name,
struct dm_cache_policy *p = NULL;
struct dm_cache_policy_type *type;
+ if (dm_cache_stack_utils_string_is_policy_stack(name))
+ return dm_cache_stack_utils_policy_stack_create(name, cache_size,
+ origin_size,
+ cache_block_size);
+
type = get_policy(name);
if (!type) {
DMWARN("unknown policy type");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
p = type->create(cache_size, origin_size, cache_block_size);
if (!p) {
put_policy(type);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
p->private = type;
@@ -137,8 +169,12 @@ void dm_cache_policy_destroy(struct dm_cache_policy *p)
{
struct dm_cache_policy_type *t = p->private;
- p->destroy(p);
- put_policy(t);
+ if (dm_cache_stack_utils_string_is_policy_stack(t->name))
+ dm_cache_stack_utils_policy_stack_destroy(p);
+ else {
+ p->destroy(p);
+ put_policy(t);
+ }
}
EXPORT_SYMBOL_GPL(dm_cache_policy_destroy);
@@ -166,4 +202,24 @@ size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p)
}
EXPORT_SYMBOL_GPL(dm_cache_policy_get_hint_size);
+int dm_cache_policy_set_hint_size(struct dm_cache_policy *p, unsigned hint_size)
+{
+ struct dm_cache_policy_type *t = p->private;
+
+ if (hint_size > DM_CACHE_POLICY_MAX_HINT_SIZE)
+ return -EPERM;
+
+ t->hint_size = hint_size;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_set_hint_size);
+
+bool dm_cache_policy_is_shim(struct dm_cache_policy *p)
+{
+ struct dm_cache_policy_type *t = p->private;
+
+ return (t->features & DM_CACHE_POLICY_SHIM) ? true : false;
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_is_shim);
+
/*----------------------------------------------------------------*/
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
index 33369ca9614f..84257c8b27e6 100644
--- a/drivers/md/dm-cache-policy.h
+++ b/drivers/md/dm-cache-policy.h
@@ -8,6 +8,7 @@
#define DM_CACHE_POLICY_H
#include "dm-cache-block-types.h"
+#include "persistent-data/dm-btree.h"
#include <linux/device-mapper.h>
@@ -79,7 +80,8 @@ struct policy_result {
};
typedef int (*policy_walk_fn)(void *context, dm_cblock_t cblock,
- dm_oblock_t oblock, uint32_t hint);
+ dm_oblock_t oblock, void *hint)
+ __dm_written_to_disk(hint);
/*
* The cache policy object. Just a bunch of methods. It is envisaged that
@@ -136,17 +138,28 @@ struct dm_cache_policy {
int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
/*
- * oblock must be a mapped block. Must not block.
+ * set/clear a blocks dirty state.
+ *
+ * oblock is the block we want to change state for. Must not block.
+ *
+ * Returns:
+ *
+ * 0 if block is in cache _and_ set/clear respectively succeded
+ *
+ * -EINVAL if block is in cache _but_ block was already set to dirty
+ * on a set call / clean on a clean call
+ *
+ * -ENOENT if block is not in cache
*/
- void (*set_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
- void (*clear_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
+ int (*set_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
+ int (*clear_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
/*
* Called when a cache target is first created. Used to load a
* mapping from the metadata device into the policy.
*/
int (*load_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock,
- dm_cblock_t cblock, uint32_t hint, bool hint_valid);
+ dm_cblock_t cblock, void *hint, bool hint_valid);
int (*walk_mappings)(struct dm_cache_policy *p, policy_walk_fn fn,
void *context);
@@ -159,8 +172,35 @@ struct dm_cache_policy {
void (*force_mapping)(struct dm_cache_policy *p, dm_oblock_t current_oblock,
dm_oblock_t new_oblock);
- int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock);
+ /*
+ * Invalidate mapping for an origin block.
+ *
+ * Returns:
+ *
+ * 0 and @cblock,@oblock: if mapped, the policy returns the cache block
+ * and optionally changes the original block (e.g. era)
+ *
+ * -EINVAL: invalidation not supported
+ *
+ * -ENOENT: no entry for @oblock in the cache
+ *
+ * -ENODATA: all possible invalidation requests processed
+ *
+ * May return a _different_ oblock than the requested one
+ * to allow the policy to rule which block to invalidate (e.g. era).
+ */
+ int (*invalidate_mapping)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock);
+ /*
+ * Provide a dirty block to be written back by the core target.
+ *
+ * Returns:
+ *
+ * 0 and @cblock,@oblock: block to write back provided
+ *
+ * -ENODATA: no dirty blocks available
+ */
+ int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock);
/*
* How full is the cache?
@@ -188,11 +228,26 @@ struct dm_cache_policy {
* Book keeping ptr for the policy register, not for general use.
*/
void *private;
+
+ /*
+ * Support for stackable policies. A policy stack consists of 0 or more
+ * "non-terminal" policies (which can intercept requests to provide
+ * additional functionality, but ultimately hand them down the stack)
+ * followed by one "terminal" policy which actually runs a caching
+ * algorithm. This is the pointer to the "next" policy in a
+ * non-terminal policy. It will always be NULL in a terminal policy.
+ */
+ struct dm_cache_policy *child;
};
/*----------------------------------------------------------------*/
/*
+ * Indicates that a policy is only a shim layer in a policy stack.
+ */
+#define DM_CACHE_POLICY_SHIM (1 << 0)
+
+/*
* We maintain a little register of the different policy types.
*/
#define CACHE_POLICY_NAME_SIZE 16
@@ -210,9 +265,9 @@ struct dm_cache_policy_type {
unsigned version[CACHE_POLICY_VERSION_SIZE];
/*
- * Policies may store a hint for each each cache block.
- * Currently the size of this hint must be 0 or 4 bytes but we
- * expect to relax this in future.
+ * Policies may store a hint for each cache block.
+ * Currently the size of this hint must be <=
+ * DM_CACHE_POLICY_MAX_HINT_SIZE bytes.
*/
size_t hint_size;
@@ -220,6 +275,8 @@ struct dm_cache_policy_type {
struct dm_cache_policy *(*create)(dm_cblock_t cache_size,
sector_t origin_size,
sector_t block_size);
+
+ unsigned long features;
};
int dm_cache_policy_register(struct dm_cache_policy_type *type);
@@ -227,4 +284,4 @@ void dm_cache_policy_unregister(struct dm_cache_policy_type *type);
/*----------------------------------------------------------------*/
-#endif /* DM_CACHE_POLICY_H */
+#endif /* DM_CACHE_POLICY_H */
diff --git a/drivers/md/dm-cache-shim-utils.c b/drivers/md/dm-cache-shim-utils.c
new file mode 100644
index 000000000000..8b8d5d58865e
--- /dev/null
+++ b/drivers/md/dm-cache-shim-utils.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2013 NetApp, Inc. All Rights Reserved, contribution by
+ * Morgan Mears.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details
+ *
+ */
+
+#include "dm-cache-policy.h"
+#include "dm-cache-policy-internal.h"
+#include "dm-cache-shim-utils.h"
+#include "dm.h"
+
+#include <linux/module.h>
+
+#define DM_MSG_PREFIX "cache-shim-utils"
+
+/*----------------------------------------------------------------*/
+
+static int shim_nested_walk_apply(void *context, dm_cblock_t cblock,
+ dm_oblock_t oblock, void *hint)
+{
+ struct shim_walk_map_ctx *ctx = context;
+ struct dm_cache_policy *p;
+ int child_hint_size;
+ void *my_hint;
+
+ /* Save off our child's hint */
+ if (ctx->child_hint_buf) {
+ p = ctx->my_policy;
+ child_hint_size = dm_cache_policy_get_hint_size(p->child);
+ if (child_hint_size && hint)
+ memcpy(&ctx->child_hint_buf[0], hint, child_hint_size);
+ }
+
+ /* Provide my hint or NULL up the stack */
+ my_hint = ctx->cblock_to_hint_fn ?
+ ctx->cblock_to_hint_fn(ctx, cblock, oblock) : NULL;
+
+ /* Reverse recurse, unless short-circuted */
+ return (ctx->parent_fn) ?
+ (*ctx->parent_fn)(ctx->parent_ctx, cblock, oblock, my_hint) : 0;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Public interface, via the policy struct. See dm-cache-policy.h for a
+ * description of these.
+ */
+
+static void shim_destroy(struct dm_cache_policy *p)
+{
+ kfree(p);
+}
+
+static int shim_map(struct dm_cache_policy *p, dm_oblock_t oblock,
+ bool can_block, bool can_migrate, bool discarded_oblock,
+ struct bio *bio, struct policy_result *result)
+{
+ return policy_map(p->child, oblock, can_block, can_migrate,
+ discarded_oblock, bio, result);
+}
+
+static int shim_lookup(struct dm_cache_policy *p, dm_oblock_t oblock,
+ dm_cblock_t *cblock)
+{
+ return policy_lookup(p->child, oblock, cblock);
+}
+
+static int shim_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ return policy_set_dirty(p->child, oblock);
+}
+
+static int shim_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ return policy_clear_dirty(p->child, oblock);
+}
+
+static int shim_load_mapping(struct dm_cache_policy *p,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ void *hint, bool hint_valid)
+{
+ return policy_load_mapping(p->child, oblock, cblock, hint, hint_valid);
+}
+
+static int shim_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
+ void *context)
+{
+ struct shim_walk_map_ctx my_ctx, *parent_ctx;
+ int my_hint_size;
+
+ parent_ctx = (struct shim_walk_map_ctx *)context;
+ my_hint_size = dm_cache_policy_get_hint_size(p);
+
+ my_ctx.parent_ctx = parent_ctx;
+ my_ctx.parent_fn = fn;
+ my_ctx.my_policy = p;
+ my_ctx.child_hint_buf = (parent_ctx->child_hint_buf) ?
+ &parent_ctx->child_hint_buf[my_hint_size] : NULL;
+ my_ctx.cblock_to_hint_fn = NULL;
+
+ return policy_walk_mappings(p->child, shim_nested_walk_apply, &my_ctx);
+}
+
+static void shim_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ policy_remove_mapping(p->child, oblock);
+}
+
+static int shim_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock,
+ dm_cblock_t *cblock)
+{
+ return policy_writeback_work(p->child, oblock, cblock);
+}
+
+static void shim_force_mapping(struct dm_cache_policy *p,
+ dm_oblock_t current_oblock,
+ dm_oblock_t new_oblock)
+{
+ policy_force_mapping(p->child, current_oblock, new_oblock);
+}
+
+static int shim_invalidate_mapping(struct dm_cache_policy *p,
+ dm_oblock_t *oblock, dm_cblock_t *cblock)
+{
+ return policy_invalidate_mapping(p->child, oblock, cblock);
+}
+
+static dm_cblock_t shim_residency(struct dm_cache_policy *p)
+{
+ return policy_residency(p->child);
+}
+
+static void shim_tick(struct dm_cache_policy *p)
+{
+ policy_tick(p->child);
+}
+
+static int shim_set_config_value(struct dm_cache_policy *p,
+ const char *key, const char *value)
+{
+ return policy_set_config_value(p->child, key, value);
+}
+
+static int shim_emit_config_values(struct dm_cache_policy *p, char *result,
+ unsigned maxlen)
+{
+ return policy_emit_config_values(p->child, result, maxlen);
+}
+
+void dm_cache_shim_utils_init_shim_policy(struct dm_cache_policy *p)
+{
+ p->destroy = shim_destroy;
+ p->map = shim_map;
+ p->lookup = shim_lookup;
+ p->set_dirty = shim_set_dirty;
+ p->clear_dirty = shim_clear_dirty;
+ p->load_mapping = shim_load_mapping;
+ p->walk_mappings = shim_walk_mappings;
+ p->remove_mapping = shim_remove_mapping;
+ p->writeback_work = shim_writeback_work;
+ p->force_mapping = shim_force_mapping;
+ p->invalidate_mapping = shim_invalidate_mapping;
+ p->residency = shim_residency;
+ p->tick = shim_tick;
+ p->emit_config_values = shim_emit_config_values;
+ p->set_config_value = shim_set_config_value;
+}
+EXPORT_SYMBOL_GPL(dm_cache_shim_utils_init_shim_policy);
+
+int dm_cache_shim_utils_walk_map_with_ctx(struct shim_walk_map_ctx *ctx)
+{
+ struct dm_cache_policy *p = ctx->my_policy;
+
+ /*
+ * Used by the stack root policy in its walk_mappings implementation,
+ * to provide the top-level context that contains the buffer used to
+ * consolidate hint data from all of the shims and the terminal policy.
+ */
+ return policy_walk_mappings(p->child, shim_nested_walk_apply, ctx);
+}
+EXPORT_SYMBOL_GPL(dm_cache_shim_utils_walk_map_with_ctx);
+
+int dm_cache_shim_utils_walk_map(struct dm_cache_policy *p, policy_walk_fn fn,
+ void *context, cblock_to_hint_fn_t hint_fn)
+{
+ struct shim_walk_map_ctx my_ctx, *parent_ctx;
+ int my_hint_size;
+
+ /*
+ * Used by shim policies for their walk_mappings implementations.
+ * Handles packing up the hint data, in conjunction with
+ * shim_nested_walk_apply.
+ */
+ parent_ctx = (struct shim_walk_map_ctx *)context;
+ my_hint_size = dm_cache_policy_get_hint_size(p);
+
+ my_ctx.parent_ctx = parent_ctx;
+ my_ctx.parent_fn = fn;
+ my_ctx.my_policy = p;
+ my_ctx.child_hint_buf = (parent_ctx && parent_ctx->child_hint_buf) ?
+ &parent_ctx->child_hint_buf[my_hint_size] : NULL;
+ my_ctx.cblock_to_hint_fn = hint_fn;
+
+ return policy_walk_mappings(p->child, shim_nested_walk_apply, &my_ctx);
+}
+EXPORT_SYMBOL_GPL(dm_cache_shim_utils_walk_map);
diff --git a/drivers/md/dm-cache-shim-utils.h b/drivers/md/dm-cache-shim-utils.h
new file mode 100644
index 000000000000..92f2f2149212
--- /dev/null
+++ b/drivers/md/dm-cache-shim-utils.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2013 NetApp, Inc. All Rights Reserved, contribution by
+ * Morgan Mears.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details
+ *
+ */
+
+#ifndef DM_CACHE_SHIM_UTILS_H
+#define DM_CACHE_SHIM_UTILS_H
+
+#include "dm-cache-policy.h"
+
+struct shim_walk_map_ctx;
+
+typedef void* (*cblock_to_hint_fn_t)(struct shim_walk_map_ctx *,
+ dm_cblock_t,
+ dm_oblock_t);
+
+/*
+ * For walk_mappings to work with a policy stack, every non-terminal policy
+ * has to start its context with one of these. There are no requirements for
+ * the context used by the terminal policy.
+ */
+struct shim_walk_map_ctx {
+ void *parent_ctx;
+ policy_walk_fn parent_fn;
+ struct dm_cache_policy *my_policy;
+ char *child_hint_buf;
+ cblock_to_hint_fn_t cblock_to_hint_fn;
+ union {
+ __le64 le64_buf;
+ __le32 le32_buf;
+ __le16 le16_buf;
+ };
+};
+
+/*
+ * Populate a shim (non-terminal) policy structure with functions that just
+ * hand off to the child policy. Caller can then override just those
+ * functions of interest.
+ */
+void dm_cache_shim_utils_init_shim_policy(struct dm_cache_policy *p);
+
+/*
+ * Launch a "walk_mappings" leg using the context provided by our caller.
+ * Typically used at the bottom of a policy stack, so caller can provide
+ * the hint buffer.
+ */
+int dm_cache_shim_utils_walk_map_with_ctx(struct shim_walk_map_ctx *ctx);
+
+/*
+ * Initialize a context appropriately and Launch a "walk_mappings" leg.
+ * Typically used to implement walk_mappings in shim policies. The
+ * framework will call hint_fn at the appropriate point, and it should
+ * return a pointer to the disk-ready hint for the given cblock. The
+ * leXX_bufs in the shim_walk_map_ctx structure can be used to store the
+ * disk-ready hint if it will fit.
+ */
+int dm_cache_shim_utils_walk_map(struct dm_cache_policy *p,
+ policy_walk_fn fn,
+ void *context,
+ cblock_to_hint_fn_t hint_fn);
+
+#endif /* DM_CACHE_SHIM_UTILS_H */
diff --git a/drivers/md/dm-cache-stack-utils.c b/drivers/md/dm-cache-stack-utils.c
new file mode 100644
index 000000000000..14c718ad3fc6
--- /dev/null
+++ b/drivers/md/dm-cache-stack-utils.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright 2013 NetApp, Inc. All Rights Reserved, contribution by
+ * Morgan Mears.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details
+ *
+ */
+
+#include "dm-cache-policy-internal.h"
+#include "dm-cache-shim-utils.h"
+#include "dm-cache-stack-utils.h"
+#include "dm-cache-policy.h"
+#include "dm.h"
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#define DM_MSG_PREFIX "cache-stack-utils"
+
+struct stack_root_policy {
+ struct dm_cache_policy policy;
+ struct dm_cache_policy_type type;
+};
+
+/*----------------------------------------------------------------*/
+
+static void *stack_root_cblock_to_hint(struct shim_walk_map_ctx *ctx,
+ dm_cblock_t cblock, dm_oblock_t oblock)
+{
+ return ctx->child_hint_buf;
+}
+
+static int stack_root_walk_mappings(struct dm_cache_policy *p,
+ policy_walk_fn fn, void *context)
+{
+ struct shim_walk_map_ctx ctx;
+ size_t hint_size;
+ int r;
+
+ ctx.parent_ctx = context;
+ ctx.parent_fn = fn;
+ ctx.my_policy = p;
+ ctx.child_hint_buf = NULL;
+ ctx.cblock_to_hint_fn = stack_root_cblock_to_hint;
+
+ hint_size = dm_cache_policy_get_hint_size(p);
+ if (hint_size) {
+ ctx.child_hint_buf = kzalloc(hint_size, GFP_KERNEL);
+ if (!ctx.child_hint_buf)
+ return -ENOMEM;
+ }
+
+ r = dm_cache_shim_utils_walk_map_with_ctx(&ctx);
+
+ kfree(ctx.child_hint_buf);
+
+ return r;
+}
+
+static struct dm_cache_policy *stack_root_create(const char *policy_stack_str,
+ struct dm_cache_policy *head)
+{
+ struct stack_root_policy *p = kzalloc(sizeof(*p), GFP_KERNEL);
+ struct dm_cache_policy *child;
+ struct dm_cache_policy_type *t;
+ const unsigned *version;
+ const char *seg_name;
+ size_t canonical_name_len, hint_size;
+ int i;
+
+ if (!p)
+ return NULL;
+
+ t = &p->type;
+ dm_cache_shim_utils_init_shim_policy(&p->policy);
+ p->policy.walk_mappings = stack_root_walk_mappings;
+ p->policy.child = head;
+
+ /*
+ * We compose the canonical name for this policy stack by removing
+ * any shim policies that do not have hint data. This is intended
+ * to allow for a class of shim policies that can be inserted into,
+ * or removed from, the policy stack without causing the in-flash
+ * metadata to be invalidated. The thought is to allow debug or
+ * tracing shims to be inserted or removed without dropping the cache.
+ * The composite version numbers of a policy stack do not include the
+ * versions of the hintless policies for the same reason.
+ */
+ canonical_name_len = 0;
+ for (child = head; child; child = child->child) {
+ hint_size = dm_cache_policy_get_hint_size(child);
+
+#if 0
+ /* FIXME: avoids policy name in t->name, thus leaving an non-destroyable stack. */
+ if (!hint_size && child->child)
+ continue;
+#endif
+
+ t->hint_size += hint_size;
+
+ seg_name = dm_cache_policy_get_name(child);
+ canonical_name_len += strlen(seg_name) + (dm_cache_policy_is_shim(child) ? 1 : 0);
+
+ if (canonical_name_len >= sizeof(t->name)) {
+ DMWARN("policy stack string '%s' is too long",
+ policy_stack_str);
+ kfree(p);
+ return NULL;
+ }
+
+ strcat(t->name, seg_name);
+
+ if (dm_cache_policy_is_shim(child)) {
+ t->name[canonical_name_len - 1] = DM_CACHE_POLICY_STACK_DELIM;
+ t->name[canonical_name_len] = '\0';
+ }
+
+ version = dm_cache_policy_get_version(child);
+
+ for (i = 0; i < CACHE_POLICY_VERSION_SIZE; i++)
+ t->version[i] += version[i];
+ }
+
+ p->policy.private = t;
+ return &p->policy;
+}
+
+static void stack_root_destroy(struct dm_cache_policy *p)
+{
+ kfree(p);
+}
+
+/*----------------------------------------------------------------*/
+
+int dm_cache_stack_utils_string_is_policy_stack(const char *string)
+{
+ const char *delim;
+
+ /*
+ * A string specifies a policy stack instead of a policy if it
+ * contains a policy delimiter (+) anywhere but at the end. The
+ * latter is needed to properly distinguish between policy stacks and
+ * individual shim policies, since this function is called on them
+ * when the policy stack is constructed from the specified string.
+ */
+ delim = strchr(string, DM_CACHE_POLICY_STACK_DELIM);
+ if (!delim || (delim[1] == '\0'))
+ return false;
+
+ return true;
+}
+
+static void __policy_destroy_stack(struct dm_cache_policy *head_p)
+{
+ struct dm_cache_policy *cur_p, *next_p;
+
+ for (cur_p = head_p; cur_p; cur_p = next_p) {
+ next_p = cur_p->child;
+ dm_cache_policy_destroy(cur_p);
+ }
+}
+
+struct dm_cache_policy *
+dm_cache_stack_utils_policy_stack_create(const char *policy_stack_str,
+ dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t cache_block_size)
+{
+ char policy_name_buf[CACHE_POLICY_NAME_SIZE];
+ struct dm_cache_policy *p, *head_p, *next_p;
+ char *policy_name, *delim, uninitialized_var(saved_char);
+ int n, r = -EINVAL;
+
+ n = strlcpy(policy_name_buf, policy_stack_str, sizeof(policy_name_buf));
+ if (n >= sizeof(policy_name_buf)) {
+ DMWARN("policy stack string is too long");
+ return NULL;
+ }
+
+ policy_name = policy_name_buf;
+ p = head_p = next_p = NULL;
+
+ do {
+ delim = strchr(policy_name, DM_CACHE_POLICY_STACK_DELIM);
+ if (delim)
+ *delim = '\0';
+
+ next_p = dm_cache_policy_create(policy_name, cache_size,
+ origin_size, cache_block_size);
+ if (IS_ERR(next_p)) {
+ r = PTR_ERR(next_p);
+ goto cleanup;
+ }
+
+ next_p->child = NULL;
+ if (p)
+ p->child = next_p;
+ else
+ head_p = next_p;
+ p = next_p;
+
+ if (delim) {
+ if (!dm_cache_policy_is_shim(next_p)) {
+ DMERR("%s is not a shim policy", policy_name);
+ goto cleanup;
+ }
+
+ *delim = DM_CACHE_POLICY_STACK_DELIM;
+ policy_name = delim + 1;
+ }
+ } while (delim);
+
+ if (head_p->child) {
+ next_p = stack_root_create(policy_stack_str, head_p);
+ if (!next_p)
+ goto cleanup;
+
+ head_p = next_p;
+ }
+
+ return head_p;
+
+cleanup:
+ __policy_destroy_stack(head_p);
+ return ERR_PTR(r);
+}
+
+void dm_cache_stack_utils_policy_stack_destroy(struct dm_cache_policy *p)
+{
+ __policy_destroy_stack(p->child);
+ stack_root_destroy(p);
+}
diff --git a/drivers/md/dm-cache-stack-utils.h b/drivers/md/dm-cache-stack-utils.h
new file mode 100644
index 000000000000..54c767e1dc51
--- /dev/null
+++ b/drivers/md/dm-cache-stack-utils.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2013 NetApp, Inc. All Rights Reserved, contribution by
+ * Morgan Mears.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details
+ *
+ */
+
+#ifndef DM_CACHE_STACK_UTILS_H
+#define DM_CACHE_STACK_UTILS_H
+
+#include "dm-cache-policy.h"
+
+#define DM_CACHE_POLICY_STACK_DELIM '+'
+
+int dm_cache_stack_utils_string_is_policy_stack(const char *string);
+
+struct dm_cache_policy *dm_cache_stack_utils_policy_stack_create(
+ const char *policy_stack_string,
+ dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t cache_block_size);
+
+void dm_cache_stack_utils_policy_stack_destroy(struct dm_cache_policy *p);
+
+#endif /* DM_CACHE_STACK_UTILS_H */
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 29569768ffbf..5ce0f5ec6d74 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -61,6 +61,34 @@ static void free_bitset(unsigned long *bits)
/*----------------------------------------------------------------*/
+/*
+ * There are a couple of places where we let a bio run, but want to do some
+ * work before calling its endio function. We do this by temporarily
+ * changing the endio fn.
+ */
+struct dm_hook_info {
+ bio_end_io_t *bi_end_io;
+ void *bi_private;
+};
+
+static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
+ bio_end_io_t *bi_end_io, void *bi_private)
+{
+ h->bi_end_io = bio->bi_end_io;
+ h->bi_private = bio->bi_private;
+
+ bio->bi_end_io = bi_end_io;
+ bio->bi_private = bi_private;
+}
+
+static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
+{
+ bio->bi_end_io = h->bi_end_io;
+ bio->bi_private = h->bi_private;
+}
+
+/*----------------------------------------------------------------*/
+
#define PRISON_CELLS 1024
#define MIGRATION_POOL_SIZE 128
#define COMMIT_PERIOD HZ
@@ -76,14 +104,37 @@ static void free_bitset(unsigned long *bits)
/*
* FIXME: the cache is read/write for the time being.
*/
-enum cache_mode {
+enum cache_metadata_mode {
CM_WRITE, /* metadata may be changed */
CM_READ_ONLY, /* metadata may not be changed */
};
+enum cache_io_mode {
+ /*
+ * Data is written to cached blocks only. These blocks are marked
+ * dirty. If you lose the cache device you will lose data.
+ * Potential performance increase for both reads and writes.
+ */
+ CM_IO_WRITEBACK,
+
+ /*
+ * Data is written to both cache and origin. Blocks are never
+ * dirty. Potential performance benfit for reads only.
+ */
+ CM_IO_WRITETHROUGH,
+
+ /*
+ * A degraded mode useful for various cache coherency situations
+ * (eg, rolling back snapshots). Reads and writes always go to the
+ * origin. If a write goes to a cached oblock, then the cache
+ * block is invalidated.
+ */
+ CM_IO_PASSTHROUGH
+};
+
struct cache_features {
- enum cache_mode mode;
- bool write_through:1;
+ enum cache_metadata_mode mode;
+ enum cache_io_mode io_mode;
};
struct cache_stats {
@@ -132,6 +183,12 @@ struct cache {
dm_cblock_t cache_size;
/*
+ * Original block begin/end range to invalidate any mapped cache entries for.
+ */
+ dm_oblock_t begin_invalidate;
+ dm_oblock_t end_invalidate;
+
+ /*
* Fields for converting from sectors to blocks.
*/
uint32_t sectors_per_block;
@@ -148,6 +205,10 @@ struct cache {
wait_queue_head_t migration_wait;
atomic_t nr_migrations;
+ wait_queue_head_t quiescing_wait;
+ atomic_t quiescing;
+ atomic_t quiescing_ack;
+
/*
* cache_size entries, dirty if set
*/
@@ -186,7 +247,7 @@ struct cache {
bool need_tick_bio:1;
bool sized:1;
- bool quiescing:1;
+ bool invalidate:1;
bool commit_requested:1;
bool loaded_mappings:1;
bool loaded_discards:1;
@@ -211,7 +272,7 @@ struct per_bio_data {
*/
struct cache *cache;
dm_cblock_t cblock;
- bio_end_io_t *saved_bi_end_io;
+ struct dm_hook_info hook_info;
struct dm_bio_details bio_details;
};
@@ -228,6 +289,8 @@ struct dm_cache_migration {
bool writeback:1;
bool demote:1;
bool promote:1;
+ bool requeue_holder:1;
+ bool invalidate:1;
struct dm_bio_prison_cell *old_ocell;
struct dm_bio_prison_cell *new_ocell;
@@ -533,9 +596,24 @@ static void save_stats(struct cache *cache)
#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
+static bool writethrough_mode(struct cache_features *f)
+{
+ return f->io_mode == CM_IO_WRITETHROUGH;
+}
+
+static bool writeback_mode(struct cache_features *f)
+{
+ return f->io_mode == CM_IO_WRITEBACK;
+}
+
+static bool passthrough_mode(struct cache_features *f)
+{
+ return f->io_mode == CM_IO_PASSTHROUGH;
+}
+
static size_t get_per_bio_data_size(struct cache *cache)
{
- return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
+ return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
}
static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
@@ -567,15 +645,17 @@ static void remap_to_origin(struct cache *cache, struct bio *bio)
static void remap_to_cache(struct cache *cache, struct bio *bio,
dm_cblock_t cblock)
{
- sector_t bi_sector = bio->bi_sector;
+ sector_t bi_sector = bio->bi_iter.bi_sector;
bio->bi_bdev = cache->cache_dev->bdev;
if (!block_size_is_power_of_two(cache))
- bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) +
- sector_div(bi_sector, cache->sectors_per_block);
+ bio->bi_iter.bi_sector =
+ (from_cblock(cblock) * cache->sectors_per_block) +
+ sector_div(bi_sector, cache->sectors_per_block);
else
- bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) |
- (bi_sector & (cache->sectors_per_block - 1));
+ bio->bi_iter.bi_sector =
+ (from_cblock(cblock) << cache->sectors_per_block_shift) |
+ (bi_sector & (cache->sectors_per_block - 1));
}
static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
@@ -605,6 +685,7 @@ static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
dm_oblock_t oblock, dm_cblock_t cblock)
{
+ check_if_tick_bio_needed(cache, bio);
remap_to_cache(cache, bio, cblock);
if (bio_data_dir(bio) == WRITE) {
set_dirty(cache, oblock, cblock);
@@ -614,7 +695,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
{
- sector_t block_nr = bio->bi_sector;
+ sector_t block_nr = bio->bi_iter.bi_sector;
if (!block_size_is_power_of_two(cache))
(void) sector_div(block_nr, cache->sectors_per_block);
@@ -662,7 +743,14 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
static void writethrough_endio(struct bio *bio, int err)
{
struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
- bio->bi_end_io = pb->saved_bi_end_io;
+
+ dm_unhook_bio(&pb->hook_info, bio);
+
+ /*
+ * Must bump bi_remaining to allow bio to complete with
+ * restored bi_end_io.
+ */
+ atomic_inc(&bio->bi_remaining);
if (err) {
bio_endio(bio, err);
@@ -693,9 +781,8 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
pb->cache = cache;
pb->cblock = cblock;
- pb->saved_bi_end_io = bio->bi_end_io;
+ dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
dm_bio_record(&pb->bio_details, bio);
- bio->bi_end_io = writethrough_endio;
remap_to_origin_clear_discard(pb->cache, bio, oblock);
}
@@ -748,8 +835,9 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
static void cleanup_migration(struct dm_cache_migration *mg)
{
- dec_nr_migrations(mg->cache);
+ struct cache *cache = mg->cache;
free_migration(mg);
+ dec_nr_migrations(cache);
}
static void migration_failure(struct dm_cache_migration *mg)
@@ -765,13 +853,13 @@ static void migration_failure(struct dm_cache_migration *mg)
DMWARN_LIMIT("demotion failed; couldn't copy block");
policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
- cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
+ cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
if (mg->promote)
- cell_defer(cache, mg->new_ocell, 1);
+ cell_defer(cache, mg->new_ocell, true);
} else {
DMWARN_LIMIT("promotion failed; couldn't copy block");
policy_remove_mapping(cache->policy, mg->new_oblock);
- cell_defer(cache, mg->new_ocell, 1);
+ cell_defer(cache, mg->new_ocell, true);
}
cleanup_migration(mg);
@@ -798,6 +886,7 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
cleanup_migration(mg);
return;
}
+
} else {
if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
@@ -823,7 +912,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
return;
} else if (mg->demote) {
- cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
+ cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
if (mg->promote) {
mg->demote = false;
@@ -832,11 +921,19 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
list_add_tail(&mg->list, &cache->quiesced_migrations);
spin_unlock_irqrestore(&cache->lock, flags);
- } else
+ } else {
+ if (mg->invalidate)
+ policy_remove_mapping(cache->policy, mg->old_oblock);
cleanup_migration(mg);
+ }
} else {
- cell_defer(cache, mg->new_ocell, true);
+ if (mg->requeue_holder)
+ cell_defer(cache, mg->new_ocell, true);
+ else {
+ bio_endio(mg->new_ocell->holder, 0);
+ cell_defer(cache, mg->new_ocell, false);
+ }
clear_dirty(cache, mg->new_oblock, mg->cblock);
cleanup_migration(mg);
}
@@ -881,8 +978,46 @@ static void issue_copy_real(struct dm_cache_migration *mg)
r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
}
- if (r < 0)
+ if (r < 0) {
+ DMERR_LIMIT("issuing migration failed");
migration_failure(mg);
+ }
+}
+
+static void overwrite_endio(struct bio *bio, int err)
+{
+ struct dm_cache_migration *mg = bio->bi_private;
+ struct cache *cache = mg->cache;
+ size_t pb_data_size = get_per_bio_data_size(cache);
+ struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ unsigned long flags;
+
+ if (err)
+ mg->err = true;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ list_add_tail(&mg->list, &cache->completed_migrations);
+ dm_unhook_bio(&pb->hook_info, bio);
+ mg->requeue_holder = false;
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ wake_worker(cache);
+}
+
+static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
+{
+ size_t pb_data_size = get_per_bio_data_size(mg->cache);
+ struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+
+ dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
+ remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock);
+ generic_make_request(bio);
+}
+
+static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
+{
+ return (bio_data_dir(bio) == WRITE) &&
+ (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
}
static void avoid_copy(struct dm_cache_migration *mg)
@@ -899,9 +1034,17 @@ static void issue_copy(struct dm_cache_migration *mg)
if (mg->writeback || mg->demote)
avoid = !is_dirty(cache, mg->cblock) ||
is_discarded_oblock(cache, mg->old_oblock);
- else
+ else {
+ struct bio *bio = mg->new_ocell->holder;
+
avoid = is_discarded_oblock(cache, mg->new_oblock);
+ if (!avoid && bio_writes_complete_block(cache, bio)) {
+ issue_overwrite(mg, bio);
+ return;
+ }
+ }
+
avoid ? avoid_copy(mg) : issue_copy_real(mg);
}
@@ -991,6 +1134,8 @@ static void promote(struct cache *cache, struct prealloc *structs,
mg->writeback = false;
mg->demote = false;
mg->promote = true;
+ mg->requeue_holder = true;
+ mg->invalidate = false;
mg->cache = cache;
mg->new_oblock = oblock;
mg->cblock = cblock;
@@ -1012,6 +1157,8 @@ static void writeback(struct cache *cache, struct prealloc *structs,
mg->writeback = true;
mg->demote = false;
mg->promote = false;
+ mg->requeue_holder = true;
+ mg->invalidate = false;
mg->cache = cache;
mg->old_oblock = oblock;
mg->cblock = cblock;
@@ -1035,6 +1182,8 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,
mg->writeback = false;
mg->demote = true;
mg->promote = true;
+ mg->requeue_holder = true;
+ mg->invalidate = false;
mg->cache = cache;
mg->old_oblock = old_oblock;
mg->new_oblock = new_oblock;
@@ -1047,6 +1196,33 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,
quiesce_migration(mg);
}
+/*
+ * Invalidate a cache entry. No writeback occurs; any changes in the cache
+ * block are thrown away.
+ */
+static void invalidate(struct cache *cache, struct prealloc *structs,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ struct dm_bio_prison_cell *cell)
+{
+ struct dm_cache_migration *mg = prealloc_get_migration(structs);
+
+ mg->err = false;
+ mg->writeback = false;
+ mg->demote = true;
+ mg->promote = false;
+ mg->requeue_holder = true;
+ mg->invalidate = true;
+ mg->cache = cache;
+ mg->old_oblock = oblock;
+ mg->cblock = cblock;
+ mg->old_ocell = cell;
+ mg->new_ocell = NULL;
+ mg->start_jiffies = jiffies;
+
+ inc_nr_migrations(cache);
+ quiesce_migration(mg);
+}
+
/*----------------------------------------------------------------
* bio processing
*--------------------------------------------------------------*/
@@ -1066,7 +1242,7 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
size_t pb_data_size = get_per_bio_data_size(cache);
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
- BUG_ON(bio->bi_size);
+ BUG_ON(bio->bi_iter.bi_size);
if (!pb->req_nr)
remap_to_origin(cache, bio);
else
@@ -1089,9 +1265,9 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
*/
static void process_discard_bio(struct cache *cache, struct bio *bio)
{
- dm_block_t start_block = dm_sector_div_up(bio->bi_sector,
+ dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector,
cache->discard_block_size);
- dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
+ dm_block_t end_block = bio_end_sector(bio);
dm_block_t b;
end_block = block_div(end_block, cache->discard_block_size);
@@ -1109,13 +1285,6 @@ static bool spare_migration_bandwidth(struct cache *cache)
return current_volume < cache->migration_threshold;
}
-static bool is_writethrough_io(struct cache *cache, struct bio *bio,
- dm_cblock_t cblock)
-{
- return bio_data_dir(bio) == WRITE &&
- cache->features.write_through && !is_dirty(cache, cblock);
-}
-
static void inc_hit_counter(struct cache *cache, struct bio *bio)
{
atomic_inc(bio_data_dir(bio) == READ ?
@@ -1128,6 +1297,15 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio)
&cache->stats.read_miss : &cache->stats.write_miss);
}
+static void issue_cache_bio(struct cache *cache, struct bio *bio,
+ struct per_bio_data *pb,
+ dm_oblock_t oblock, dm_cblock_t cblock)
+{
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+ remap_to_cache_dirty(cache, bio, oblock, cblock);
+ issue(cache, bio);
+}
+
static void process_bio(struct cache *cache, struct prealloc *structs,
struct bio *bio)
{
@@ -1139,7 +1317,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
size_t pb_data_size = get_per_bio_data_size(cache);
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
bool discarded_block = is_discarded_oblock(cache, block);
- bool can_migrate = discarded_block || spare_migration_bandwidth(cache);
+ bool passthrough = passthrough_mode(&cache->features);
+ bool can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));
/*
* Check to see if that block is currently migrating.
@@ -1160,15 +1339,39 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
switch (lookup_result.op) {
case POLICY_HIT:
- inc_hit_counter(cache, bio);
- pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+ if (passthrough) {
+ inc_miss_counter(cache, bio);
- if (is_writethrough_io(cache, bio, lookup_result.cblock))
- remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
- else
- remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
+ /*
+ * Passthrough always maps to the origin,
+ * invalidating any cache blocks that are written
+ * to.
+ */
+
+ if (bio_data_dir(bio) == WRITE) {
+ atomic_inc(&cache->stats.demotion);
+ invalidate(cache, structs, block, lookup_result.cblock, new_ocell);
+ release_cell = false;
+
+ } else {
+ // FIXME: factor out issue_origin()
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+ remap_to_origin_clear_discard(cache, bio, block);
+ issue(cache, bio);
+ }
+ } else {
+ inc_hit_counter(cache, bio);
+
+ if (bio_data_dir(bio) == WRITE &&
+ writethrough_mode(&cache->features) &&
+ !is_dirty(cache, lookup_result.cblock)) {
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+ remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
+ issue(cache, bio);
+ } else
+ issue_cache_bio(cache, bio, pb, block, lookup_result.cblock);
+ }
- issue(cache, bio);
break;
case POLICY_MISS:
@@ -1227,15 +1430,17 @@ static int need_commit_due_to_time(struct cache *cache)
static int commit_if_needed(struct cache *cache)
{
- if (dm_cache_changed_this_transaction(cache->cmd) &&
- (cache->commit_requested || need_commit_due_to_time(cache))) {
+ int r = 0;
+
+ if ((cache->commit_requested || need_commit_due_to_time(cache)) &&
+ dm_cache_changed_this_transaction(cache->cmd)) {
atomic_inc(&cache->stats.commit_count);
- cache->last_commit_jiffies = jiffies;
cache->commit_requested = false;
- return dm_cache_commit(cache->cmd, false);
+ r = dm_cache_commit(cache->cmd, false);
+ cache->last_commit_jiffies = jiffies;
}
- return 0;
+ return r;
}
static void process_deferred_bios(struct cache *cache)
@@ -1346,34 +1551,34 @@ static void writeback_some_dirty_blocks(struct cache *cache)
/*----------------------------------------------------------------
* Main worker loop
*--------------------------------------------------------------*/
-static void start_quiescing(struct cache *cache)
+static bool is_quiescing(struct cache *cache)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cache->lock, flags);
- cache->quiescing = 1;
- spin_unlock_irqrestore(&cache->lock, flags);
+ return atomic_read(&cache->quiescing);
}
-static void stop_quiescing(struct cache *cache)
+static void ack_quiescing(struct cache *cache)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cache->lock, flags);
- cache->quiescing = 0;
- spin_unlock_irqrestore(&cache->lock, flags);
+ if (is_quiescing(cache)) {
+ atomic_inc(&cache->quiescing_ack);
+ wake_up(&cache->quiescing_wait);
+ }
}
-static bool is_quiescing(struct cache *cache)
+static void wait_for_quiescing_ack(struct cache *cache)
{
- int r;
- unsigned long flags;
+ wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
+}
- spin_lock_irqsave(&cache->lock, flags);
- r = cache->quiescing;
- spin_unlock_irqrestore(&cache->lock, flags);
+static void start_quiescing(struct cache *cache)
+{
+ atomic_inc(&cache->quiescing);
+ wait_for_quiescing_ack(cache);
+}
- return r;
+static void stop_quiescing(struct cache *cache)
+{
+ atomic_set(&cache->quiescing, 0);
+ atomic_set(&cache->quiescing_ack, 0);
}
static void wait_for_migrations(struct cache *cache)
@@ -1400,6 +1605,60 @@ static void requeue_deferred_io(struct cache *cache)
bio_endio(bio, DM_ENDIO_REQUEUE);
}
+static void invalidate_mappings(struct cache *cache)
+{
+ dm_oblock_t oblock, end;
+ unsigned long long count = 0;
+
+ smp_rmb();
+
+ if (!cache->invalidate)
+ return;
+
+ oblock = cache->begin_invalidate;
+ end = to_oblock(from_oblock(cache->end_invalidate) + 1);
+
+ while (oblock != end) {
+ int r;
+ dm_cblock_t cblock;
+ dm_oblock_t given_oblock = oblock;
+
+ r = policy_invalidate_mapping(cache->policy, &given_oblock, &cblock);
+ /*
+ * Policy either doesn't suport invalidation (yet) or
+ * doesn't offer any more blocks to invalidate (e.g. era).
+ */
+ if (r == -EINVAL) {
+ DMWARN("policy doesn't support invalidation (yet).");
+ break;
+ }
+
+ if (r == -ENODATA)
+ break;
+
+ else if (!r) {
+ if (dm_cache_remove_mapping(cache->cmd, cblock)) {
+ DMWARN_LIMIT("invalidation failed; couldn't update on disk metadata");
+ r = policy_load_mapping(cache->policy, given_oblock, cblock, NULL, false);
+ BUG_ON(r);
+
+ } else {
+ /*
+ * FIXME: we are cautious and keep this even though all
+ * blocks _should_ be clean in passthrough mode.
+ */
+ clear_dirty(cache, given_oblock, cblock);
+ cache->commit_requested = true;
+ count++;
+ }
+ }
+
+ oblock = to_oblock(from_oblock(oblock) + 1);
+ }
+
+ cache->invalidate = false;
+}
+
static int more_work(struct cache *cache)
{
if (is_quiescing(cache))
@@ -1412,7 +1671,8 @@ static int more_work(struct cache *cache)
!bio_list_empty(&cache->deferred_writethrough_bios) ||
!list_empty(&cache->quiesced_migrations) ||
!list_empty(&cache->completed_migrations) ||
- !list_empty(&cache->need_commit_migrations);
+ !list_empty(&cache->need_commit_migrations) ||
+ cache->invalidate;
}
static void do_worker(struct work_struct *ws)
@@ -1420,15 +1680,16 @@ static void do_worker(struct work_struct *ws)
struct cache *cache = container_of(ws, struct cache, worker);
do {
- if (!is_quiescing(cache))
+ if (!is_quiescing(cache)) {
+ writeback_some_dirty_blocks(cache);
+ process_deferred_writethrough_bios(cache);
process_deferred_bios(cache);
+ }
process_migrations(cache, &cache->quiesced_migrations, issue_copy);
process_migrations(cache, &cache->completed_migrations, complete_migration);
- writeback_some_dirty_blocks(cache);
-
- process_deferred_writethrough_bios(cache);
+ invalidate_mappings(cache);
if (commit_if_needed(cache)) {
process_deferred_flush_bios(cache, false);
@@ -1442,6 +1703,9 @@ static void do_worker(struct work_struct *ws)
process_migrations(cache, &cache->need_commit_migrations,
migration_success_post_commit);
}
+
+ ack_quiescing(cache);
+
} while (more_work(cache));
}
@@ -1715,7 +1979,7 @@ static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
static void init_features(struct cache_features *cf)
{
cf->mode = CM_WRITE;
- cf->write_through = false;
+ cf->io_mode = CM_IO_WRITEBACK;
}
static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
@@ -1740,10 +2004,13 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
arg = dm_shift_arg(as);
if (!strcasecmp(arg, "writeback"))
- cf->write_through = false;
+ cf->io_mode = CM_IO_WRITEBACK;
else if (!strcasecmp(arg, "writethrough"))
- cf->write_through = true;
+ cf->io_mode = CM_IO_WRITETHROUGH;
+
+ else if (!strcasecmp(arg, "passthrough"))
+ cf->io_mode = CM_IO_PASSTHROUGH;
else {
*error = "Unrecognised cache feature requested";
@@ -1872,14 +2139,15 @@ static int set_config_values(struct cache *cache, int argc, const char **argv)
static int create_cache_policy(struct cache *cache, struct cache_args *ca,
char **error)
{
- cache->policy = dm_cache_policy_create(ca->policy_name,
- cache->cache_size,
- cache->origin_sectors,
- cache->sectors_per_block);
- if (!cache->policy) {
+ struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
+ cache->cache_size,
+ cache->origin_sectors,
+ cache->sectors_per_block);
+ if (IS_ERR(p)) {
*error = "Error creating cache's policy";
- return -ENOMEM;
+ return PTR_ERR(p);
}
+ cache->policy = p;
return 0;
}
@@ -1995,6 +2263,22 @@ static int cache_create(struct cache_args *ca, struct cache **result)
}
cache->cmd = cmd;
+ if (passthrough_mode(&cache->features)) {
+ bool all_clean;
+
+ r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
+ if (r) {
+ *error = "dm_cache_metadata_all_clean() failed";
+ goto bad;
+ }
+
+ if (!all_clean) {
+ *error = "Cannot enter passthrough mode unless all blocks are clean";
+ r = -EINVAL;
+ goto bad;
+ }
+ }
+
spin_lock_init(&cache->lock);
bio_list_init(&cache->deferred_bios);
bio_list_init(&cache->deferred_flush_bios);
@@ -2005,6 +2289,10 @@ static int cache_create(struct cache_args *ca, struct cache **result)
atomic_set(&cache->nr_migrations, 0);
init_waitqueue_head(&cache->migration_wait);
+ init_waitqueue_head(&cache->quiescing_wait);
+ atomic_set(&cache->quiescing, 0);
+ atomic_set(&cache->quiescing_ack, 0);
+
r = -ENOMEM;
cache->nr_dirty = 0;
cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
@@ -2064,7 +2352,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
cache->need_tick_bio = true;
cache->sized = false;
- cache->quiescing = false;
+ cache->invalidate = false;
cache->commit_requested = false;
cache->loaded_mappings = false;
cache->loaded_discards = false;
@@ -2207,17 +2495,39 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
+ r = DM_MAPIO_REMAPPED;
switch (lookup_result.op) {
case POLICY_HIT:
- inc_hit_counter(cache, bio);
- pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+ if (passthrough_mode(&cache->features)) {
+ if (bio_data_dir(bio) == WRITE) {
+ /*
+ * We need to invalidate this block, so
+ * defer for the worker thread.
+ */
+ cell_defer(cache, cell, true);
+ r = DM_MAPIO_SUBMITTED;
+
+ } else {
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+ inc_miss_counter(cache, bio);
+ remap_to_origin_clear_discard(cache, bio, block);
+
+ cell_defer(cache, cell, false);
+ }
- if (is_writethrough_io(cache, bio, lookup_result.cblock))
- remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
- else
- remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
+ } else {
+ inc_hit_counter(cache, bio);
+
+ if (bio_data_dir(bio) == WRITE &&
+ writethrough_mode(&cache->features) &&
+ !is_dirty(cache, lookup_result.cblock))
+ remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
+
+ else
+ remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
- cell_defer(cache, cell, false);
+ cell_defer(cache, cell, false);
+ }
break;
case POLICY_MISS:
@@ -2242,10 +2552,10 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
(unsigned) lookup_result.op);
bio_io_error(bio);
- return DM_MAPIO_SUBMITTED;
+ r = DM_MAPIO_SUBMITTED;
}
- return DM_MAPIO_REMAPPED;
+ return r;
}
static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
@@ -2304,9 +2614,11 @@ static int write_discard_bitset(struct cache *cache)
}
static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock,
- uint32_t hint)
+ void *hint)
{
struct cache *cache = context;
+
+ __dm_bless_for_disk(hint);
return dm_cache_save_hint(cache->cmd, cblock, hint);
}
@@ -2374,7 +2686,7 @@ static void cache_postsuspend(struct dm_target *ti)
}
static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
- bool dirty, uint32_t hint, bool hint_valid)
+ bool dirty, void *hint, bool hint_valid)
{
int r;
struct cache *cache = context;
@@ -2406,26 +2718,71 @@ static int load_discard(void *context, sector_t discard_block_size,
return 0;
}
+static dm_cblock_t get_cache_dev_size(struct cache *cache)
+{
+ sector_t size = get_dev_size(cache->cache_dev);
+ (void) sector_div(size, cache->sectors_per_block);
+ return to_cblock(size);
+}
+
+static bool can_resize(struct cache *cache, dm_cblock_t new_size)
+{
+ if (from_cblock(new_size) > from_cblock(cache->cache_size))
+ return true;
+
+ /*
+ * We can't drop a dirty block when shrinking the cache.
+ */
+ while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
+ new_size = to_cblock(from_cblock(new_size) + 1);
+ if (is_dirty(cache, new_size)) {
+ DMERR("unable to shrink cache; cache block %llu is dirty",
+ (unsigned long long) from_cblock(new_size));
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
+{
+ int r;
+
+ r = dm_cache_resize(cache->cmd, cache->cache_size);
+ if (r) {
+ DMERR("could not resize cache metadata");
+ return r;
+ }
+
+ cache->cache_size = new_size;
+
+ return 0;
+}
+
static int cache_preresume(struct dm_target *ti)
{
int r = 0;
struct cache *cache = ti->private;
- sector_t actual_cache_size = get_dev_size(cache->cache_dev);
- (void) sector_div(actual_cache_size, cache->sectors_per_block);
+ dm_cblock_t csize = get_cache_dev_size(cache);
/*
* Check to see if the cache has resized.
*/
- if (from_cblock(cache->cache_size) != actual_cache_size || !cache->sized) {
- cache->cache_size = to_cblock(actual_cache_size);
-
- r = dm_cache_resize(cache->cmd, cache->cache_size);
- if (r) {
- DMERR("could not resize cache metadata");
+ if (!cache->sized) {
+ r = resize_cache_dev(cache, csize);
+ if (r)
return r;
- }
cache->sized = true;
+
+ } else if (csize != cache->cache_size) {
+ if (!can_resize(cache, csize))
+ return -EINVAL;
+
+ r = resize_cache_dev(cache, csize);
+ if (r)
+ return r;
}
if (!cache->loaded_mappings) {
@@ -2518,10 +2875,19 @@ static void cache_status(struct dm_target *ti, status_type_t type,
(unsigned long long) from_cblock(residency),
cache->nr_dirty);
- if (cache->features.write_through)
+ if (writethrough_mode(&cache->features))
DMEMIT("1 writethrough ");
- else
- DMEMIT("0 ");
+
+ else if (passthrough_mode(&cache->features))
+ DMEMIT("1 passthrough ");
+
+ else if (writeback_mode(&cache->features))
+ DMEMIT("1 writeback ");
+
+ else {
+ DMERR("internal error: unknown io mode: %d", (int) cache->features.io_mode);
+ goto err;
+ }
DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
if (sz < maxlen) {
@@ -2552,8 +2918,74 @@ err:
DMEMIT("Error");
}
+static int get_origin_block(struct cache *cache, const char *what,
+ char *arg, unsigned long long *val)
+{
+ unsigned long long last_block = from_oblock(cache->origin_blocks) - 1;
+
+ if (!strcmp(arg, "begin"))
+ *val = 0;
+
+ else if (!strcmp(arg, "end"))
+ *val = last_block;
+
+ else if (kstrtoull(arg, 10, val)) {
+ DMERR("%s origin block invalid", what);
+ return -EINVAL;
+
+ } else if (*val > last_block) {
+ *val = last_block;
+ DMERR("%s origin block adjusted to EOD=%llu", what, *val);
+ }
+
+ return 0;
+}
+
+static int set_invalidate_mappings(struct cache *cache, char **argv)
+{
+ unsigned long long begin, end;
+
+ if (strcasecmp(argv[0], "invalidate_mappings"))
+ return -EINVAL;
+
+ if (!passthrough_mode(&cache->features)) {
+ DMERR("cache has to be in passthrough mode for invalidation!");
+ return -EPERM;
+ }
+
+ if (cache->invalidate) {
+ DMERR("cache is processing invalidation");
+ return -EPERM;
+ }
+
+ if (get_origin_block(cache, "begin", argv[1], &begin) ||
+ get_origin_block(cache, "end", argv[2], &end))
+ return -EINVAL;
+
+ if (begin > end) {
+ DMERR("begin origin block > end origin block");
+ return -EINVAL;
+ }
+
+ /*
+ * Pass begin and end origin blocks to the worker and wake it.
+ */
+ cache->begin_invalidate = to_oblock(begin);
+ cache->end_invalidate = to_oblock(end);
+ smp_wmb();
+ cache->invalidate = true;
+ smp_wmb();
+
+ wake_worker(cache);
+
+ return 0;
+}
+
/*
- * Supports <key> <value>.
+ * Supports
+ * "<key> <value>"
+ * and
+ * "invalidate_mappings <begin_origin_block> <end_origin_block>".
*
* The key migration_threshold is supported by the cache target core.
*/
@@ -2561,10 +2993,16 @@ static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
{
struct cache *cache = ti->private;
- if (argc != 2)
- return -EINVAL;
+ switch (argc) {
+ case 2:
+ return set_config_value(cache, argv[0], argv[1]);
- return set_config_value(cache, argv[0], argv[1]);
+ case 3:
+ return set_invalidate_mappings(cache, argv);
+
+ default:
+ return -EINVAL;
+ }
}
static int cache_iterate_devices(struct dm_target *ti,
@@ -2630,7 +3068,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = {
.name = "cache",
- .version = {1, 1, 1},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = cache_ctr,
.dtr = cache_dtr,
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 0fce0bc1a957..c47c8301e777 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2,6 +2,7 @@
* Copyright (C) 2003 Christophe Saout <christophe@saout.de>
* Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
* Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com>
*
* This file is released under the GPL.
*/
@@ -38,10 +39,8 @@ struct convert_context {
struct completion restart;
struct bio *bio_in;
struct bio *bio_out;
- unsigned int offset_in;
- unsigned int offset_out;
- unsigned int idx_in;
- unsigned int idx_out;
+ struct bvec_iter iter_in;
+ struct bvec_iter iter_out;
sector_t cc_sector;
atomic_t cc_pending;
};
@@ -98,6 +97,13 @@ struct iv_lmk_private {
u8 *seed;
};
+#define TCW_WHITENING_SIZE 16
+struct iv_tcw_private {
+ struct crypto_shash *crc32_tfm;
+ u8 *iv_seed;
+ u8 *whitening;
+};
+
/*
* Crypt: maps a linear range of a block device
* and encrypts / decrypts at the same time.
@@ -139,6 +145,7 @@ struct crypt_config {
struct iv_essiv_private essiv;
struct iv_benbi_private benbi;
struct iv_lmk_private lmk;
+ struct iv_tcw_private tcw;
} iv_gen_private;
sector_t iv_offset;
unsigned int iv_size;
@@ -171,7 +178,8 @@ struct crypt_config {
unsigned long flags;
unsigned int key_size;
- unsigned int key_parts;
+ unsigned int key_parts; /* independent parts in key buffer */
+ unsigned int key_extra_size; /* additional keys length */
u8 key[0];
};
@@ -230,6 +238,16 @@ static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
* version 3: the same as version 2 with additional IV seed
* (it uses 65 keys, last key is used as IV seed)
*
+ * tcw: Compatible implementation of the block chaining mode used
+ * by the TrueCrypt device encryption system (prior to version 4.1).
+ * For more info see: http://www.truecrypt.org
+ * It operates on full 512 byte sectors and uses CBC
+ * with an IV derived from initial key and the sector number.
+ * In addition, whitening value is applied on every sector, whitening
+ * is calculated from initial key, sector number and mixed using CRC32.
+ * Note that this encryption scheme is vulnerable to watermarking attacks
+ * and should be used for old compatible containers access only.
+ *
* plumb: unimplemented, see:
* http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
*/
@@ -530,7 +548,7 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
char ctx[crypto_shash_descsize(lmk->hash_tfm)];
} sdesc;
struct md5_state md5state;
- u32 buf[4];
+ __le32 buf[4];
int i, r;
sdesc.desc.tfm = lmk->hash_tfm;
@@ -608,6 +626,153 @@ static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
return r;
}
+static void crypt_iv_tcw_dtr(struct crypt_config *cc)
+{
+ struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+
+ kzfree(tcw->iv_seed);
+ tcw->iv_seed = NULL;
+ kzfree(tcw->whitening);
+ tcw->whitening = NULL;
+
+ if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
+ crypto_free_shash(tcw->crc32_tfm);
+ tcw->crc32_tfm = NULL;
+}
+
+static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
+ const char *opts)
+{
+ struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+
+ if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
+ ti->error = "Wrong key size for TCW";
+ return -EINVAL;
+ }
+
+ tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
+ if (IS_ERR(tcw->crc32_tfm)) {
+ ti->error = "Error initializing CRC32 in TCW";
+ return PTR_ERR(tcw->crc32_tfm);
+ }
+
+ tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
+ tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
+ if (!tcw->iv_seed || !tcw->whitening) {
+ crypt_iv_tcw_dtr(cc);
+ ti->error = "Error allocating seed storage in TCW";
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int crypt_iv_tcw_init(struct crypt_config *cc)
+{
+ struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+ int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
+
+ memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
+ memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
+ TCW_WHITENING_SIZE);
+
+ return 0;
+}
+
+static int crypt_iv_tcw_wipe(struct crypt_config *cc)
+{
+ struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+
+ memset(tcw->iv_seed, 0, cc->iv_size);
+ memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
+
+ return 0;
+}
+
+static int crypt_iv_tcw_whitening(struct crypt_config *cc,
+ struct dm_crypt_request *dmreq,
+ u8 *data)
+{
+ struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+ u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
+ u8 buf[TCW_WHITENING_SIZE];
+ struct {
+ struct shash_desc desc;
+ char ctx[crypto_shash_descsize(tcw->crc32_tfm)];
+ } sdesc;
+ int i, r;
+
+ /* xor whitening with sector number */
+ memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
+ crypto_xor(buf, (u8 *)&sector, 8);
+ crypto_xor(&buf[8], (u8 *)&sector, 8);
+
+ /* calculate crc32 for every 32bit part and xor it */
+ sdesc.desc.tfm = tcw->crc32_tfm;
+ sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ for (i = 0; i < 4; i++) {
+ r = crypto_shash_init(&sdesc.desc);
+ if (r)
+ goto out;
+ r = crypto_shash_update(&sdesc.desc, &buf[i * 4], 4);
+ if (r)
+ goto out;
+ r = crypto_shash_final(&sdesc.desc, &buf[i * 4]);
+ if (r)
+ goto out;
+ }
+ crypto_xor(&buf[0], &buf[12], 4);
+ crypto_xor(&buf[4], &buf[8], 4);
+
+ /* apply whitening (8 bytes) to whole sector */
+ for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
+ crypto_xor(data + i * 8, buf, 8);
+out:
+ memset(buf, 0, sizeof(buf));
+ return r;
+}
+
+static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq)
+{
+ struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+ u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
+ u8 *src;
+ int r = 0;
+
+ /* Remove whitening from ciphertext */
+ if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
+ src = kmap_atomic(sg_page(&dmreq->sg_in));
+ r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset);
+ kunmap_atomic(src);
+ }
+
+ /* Calculate IV */
+ memcpy(iv, tcw->iv_seed, cc->iv_size);
+ crypto_xor(iv, (u8 *)&sector, 8);
+ if (cc->iv_size > 8)
+ crypto_xor(&iv[8], (u8 *)&sector, cc->iv_size - 8);
+
+ return r;
+}
+
+static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq)
+{
+ u8 *dst;
+ int r;
+
+ if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
+ return 0;
+
+ /* Apply whitening on ciphertext */
+ dst = kmap_atomic(sg_page(&dmreq->sg_out));
+ r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset);
+ kunmap_atomic(dst);
+
+ return r;
+}
+
static struct crypt_iv_operations crypt_iv_plain_ops = {
.generator = crypt_iv_plain_gen
};
@@ -643,6 +808,15 @@ static struct crypt_iv_operations crypt_iv_lmk_ops = {
.post = crypt_iv_lmk_post
};
+static struct crypt_iv_operations crypt_iv_tcw_ops = {
+ .ctr = crypt_iv_tcw_ctr,
+ .dtr = crypt_iv_tcw_dtr,
+ .init = crypt_iv_tcw_init,
+ .wipe = crypt_iv_tcw_wipe,
+ .generator = crypt_iv_tcw_gen,
+ .post = crypt_iv_tcw_post
+};
+
static void crypt_convert_init(struct crypt_config *cc,
struct convert_context *ctx,
struct bio *bio_out, struct bio *bio_in,
@@ -650,10 +824,10 @@ static void crypt_convert_init(struct crypt_config *cc,
{
ctx->bio_in = bio_in;
ctx->bio_out = bio_out;
- ctx->offset_in = 0;
- ctx->offset_out = 0;
- ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
- ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
+ if (bio_in)
+ ctx->iter_in = bio_in->bi_iter;
+ if (bio_out)
+ ctx->iter_out = bio_out->bi_iter;
ctx->cc_sector = sector + cc->iv_offset;
init_completion(&ctx->restart);
}
@@ -681,8 +855,8 @@ static int crypt_convert_block(struct crypt_config *cc,
struct convert_context *ctx,
struct ablkcipher_request *req)
{
- struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
- struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
+ struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
+ struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
struct dm_crypt_request *dmreq;
u8 *iv;
int r;
@@ -693,24 +867,15 @@ static int crypt_convert_block(struct crypt_config *cc,
dmreq->iv_sector = ctx->cc_sector;
dmreq->ctx = ctx;
sg_init_table(&dmreq->sg_in, 1);
- sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
- bv_in->bv_offset + ctx->offset_in);
+ sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
+ bv_in.bv_offset);
sg_init_table(&dmreq->sg_out, 1);
- sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
- bv_out->bv_offset + ctx->offset_out);
+ sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
+ bv_out.bv_offset);
- ctx->offset_in += 1 << SECTOR_SHIFT;
- if (ctx->offset_in >= bv_in->bv_len) {
- ctx->offset_in = 0;
- ctx->idx_in++;
- }
-
- ctx->offset_out += 1 << SECTOR_SHIFT;
- if (ctx->offset_out >= bv_out->bv_len) {
- ctx->offset_out = 0;
- ctx->idx_out++;
- }
+ bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
+ bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
if (cc->iv_gen_ops) {
r = cc->iv_gen_ops->generator(cc, iv, dmreq);
@@ -761,8 +926,7 @@ static int crypt_convert(struct crypt_config *cc,
atomic_set(&ctx->cc_pending, 1);
- while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
- ctx->idx_out < ctx->bio_out->bi_vcnt) {
+ while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
crypt_alloc_req(cc, ctx);
@@ -845,7 +1009,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
size -= len;
}
- if (!clone->bi_size) {
+ if (!clone->bi_iter.bi_size) {
bio_put(clone);
return NULL;
}
@@ -985,7 +1149,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
crypt_inc_pending(io);
clone_init(io, clone);
- clone->bi_sector = cc->start + io->sector;
+ clone->bi_iter.bi_sector = cc->start + io->sector;
generic_make_request(clone);
return 0;
@@ -1031,9 +1195,9 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
}
/* crypt_convert should have filled the clone bio */
- BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
+ BUG_ON(io->ctx.iter_out.bi_size);
- clone->bi_sector = cc->start + io->sector;
+ clone->bi_iter.bi_sector = cc->start + io->sector;
if (async)
kcryptd_queue_io(io);
@@ -1048,7 +1212,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
struct dm_crypt_io *new_io;
int crypt_finished;
unsigned out_of_pages = 0;
- unsigned remaining = io->base_bio->bi_size;
+ unsigned remaining = io->base_bio->bi_iter.bi_size;
sector_t sector = io->sector;
int r;
@@ -1070,9 +1234,9 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
}
io->ctx.bio_out = clone;
- io->ctx.idx_out = 0;
+ io->ctx.iter_out = clone->bi_iter;
- remaining -= clone->bi_size;
+ remaining -= clone->bi_iter.bi_size;
sector += bio_sectors(clone);
crypt_inc_pending(io);
@@ -1114,8 +1278,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
crypt_inc_pending(new_io);
crypt_convert_init(cc, &new_io->ctx, NULL,
io->base_bio, sector);
- new_io->ctx.idx_in = io->ctx.idx_in;
- new_io->ctx.offset_in = io->ctx.offset_in;
+ new_io->ctx.iter_in = io->ctx.iter_in;
/*
* Fragments after the first use the base_io
@@ -1274,9 +1437,12 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
static int crypt_setkey_allcpus(struct crypt_config *cc)
{
- unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count);
+ unsigned subkey_size;
int err = 0, i, r;
+ /* Ignore extra keys (which are used for IV etc) */
+ subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
+
for (i = 0; i < cc->tfms_count; i++) {
r = crypto_ablkcipher_setkey(cc->tfms[i],
cc->key + (i * subkey_size),
@@ -1409,6 +1575,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,
return -EINVAL;
}
cc->key_parts = cc->tfms_count;
+ cc->key_extra_size = 0;
cc->cipher = kstrdup(cipher, GFP_KERNEL);
if (!cc->cipher)
@@ -1460,13 +1627,6 @@ static int crypt_ctr_cipher(struct dm_target *ti,
goto bad;
}
- /* Initialize and set key */
- ret = crypt_set_key(cc, key);
- if (ret < 0) {
- ti->error = "Error decoding and setting key";
- goto bad;
- }
-
/* Initialize IV */
cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
if (cc->iv_size)
@@ -1493,18 +1653,33 @@ static int crypt_ctr_cipher(struct dm_target *ti,
cc->iv_gen_ops = &crypt_iv_null_ops;
else if (strcmp(ivmode, "lmk") == 0) {
cc->iv_gen_ops = &crypt_iv_lmk_ops;
- /* Version 2 and 3 is recognised according
+ /*
+ * Version 2 and 3 is recognised according
* to length of provided multi-key string.
* If present (version 3), last key is used as IV seed.
+ * All keys (including IV seed) are always the same size.
*/
- if (cc->key_size % cc->key_parts)
+ if (cc->key_size % cc->key_parts) {
cc->key_parts++;
+ cc->key_extra_size = cc->key_size / cc->key_parts;
+ }
+ } else if (strcmp(ivmode, "tcw") == 0) {
+ cc->iv_gen_ops = &crypt_iv_tcw_ops;
+ cc->key_parts += 2; /* IV + whitening */
+ cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
} else {
ret = -EINVAL;
ti->error = "Invalid IV mode";
goto bad;
}
+ /* Initialize and set key */
+ ret = crypt_set_key(cc, key);
+ if (ret < 0) {
+ ti->error = "Error decoding and setting key";
+ goto bad;
+ }
+
/* Allocate IV */
if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
@@ -1681,11 +1856,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
bio->bi_bdev = cc->dev->bdev;
if (bio_sectors(bio))
- bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector = cc->start +
+ dm_target_offset(ti, bio->bi_iter.bi_sector);
return DM_MAPIO_REMAPPED;
}
- io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector));
+ io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
if (bio_data_dir(io->base_bio) == READ) {
if (kcryptd_io_read(io, GFP_NOWAIT))
@@ -1817,7 +1993,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 12, 1},
+ .version = {1, 13, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 496d5f3646a5..84c860191a2e 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -281,14 +281,15 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
bio->bi_bdev = dc->dev_write->bdev;
if (bio_sectors(bio))
- bio->bi_sector = dc->start_write +
- dm_target_offset(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector = dc->start_write +
+ dm_target_offset(ti, bio->bi_iter.bi_sector);
return delay_bio(dc, dc->write_delay, bio);
}
bio->bi_bdev = dc->dev_read->bdev;
- bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector = dc->start_read +
+ dm_target_offset(ti, bio->bi_iter.bi_sector);
return delay_bio(dc, dc->read_delay, bio);
}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index c80a0ec5f126..b257e46876d3 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -248,7 +248,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
bio->bi_bdev = fc->dev->bdev;
if (bio_sectors(bio))
- bio->bi_sector = flakey_map_sector(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector =
+ flakey_map_sector(ti, bio->bi_iter.bi_sector);
}
static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
@@ -265,8 +266,8 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
"(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",
bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
- (bio_data_dir(bio) == WRITE) ? 'w' : 'r',
- bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes);
+ (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw,
+ (unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
}
}
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2a20986a2fec..b2b8a10e8427 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -201,26 +201,29 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
/*
* Functions for getting the pages from a bvec.
*/
-static void bvec_get_page(struct dpages *dp,
+static void bio_get_page(struct dpages *dp,
struct page **p, unsigned long *len, unsigned *offset)
{
- struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
- *p = bvec->bv_page;
- *len = bvec->bv_len;
- *offset = bvec->bv_offset;
+ struct bio *bio = dp->context_ptr;
+ struct bio_vec bvec = bio_iovec(bio);
+ *p = bvec.bv_page;
+ *len = bvec.bv_len;
+ *offset = bvec.bv_offset;
}
-static void bvec_next_page(struct dpages *dp)
+static void bio_next_page(struct dpages *dp)
{
- struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
- dp->context_ptr = bvec + 1;
+ struct bio *bio = dp->context_ptr;
+ struct bio_vec bvec = bio_iovec(bio);
+
+ bio_advance(bio, bvec.bv_len);
}
-static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
+static void bio_dp_init(struct dpages *dp, struct bio *bio)
{
- dp->get_page = bvec_get_page;
- dp->next_page = bvec_next_page;
- dp->context_ptr = bvec;
+ dp->get_page = bio_get_page;
+ dp->next_page = bio_next_page;
+ dp->context_ptr = bio;
}
/*
@@ -304,14 +307,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
- bio->bi_sector = where->sector + (where->count - remaining);
+ bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev;
bio->bi_end_io = endio;
store_io_and_region_in_bio(bio, io, region);
if (rw & REQ_DISCARD) {
num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
- bio->bi_size = num_sectors << SECTOR_SHIFT;
+ bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
remaining -= num_sectors;
} else if (rw & REQ_WRITE_SAME) {
/*
@@ -320,7 +323,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
dp->get_page(dp, &page, &len, &offset);
bio_add_page(bio, page, logical_block_size, offset);
num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
- bio->bi_size = num_sectors << SECTOR_SHIFT;
+ bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
offset = 0;
remaining -= num_sectors;
@@ -457,8 +460,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
break;
- case DM_IO_BVEC:
- bvec_dp_init(dp, io_req->mem.ptr.bvec);
+ case DM_IO_BIO:
+ bio_dp_init(dp, io_req->mem.ptr.bio);
break;
case DM_IO_VMA:
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index afe08146f73e..51521429fb59 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -57,7 +57,7 @@ struct vers_iter {
static struct list_head _name_buckets[NUM_BUCKETS];
static struct list_head _uuid_buckets[NUM_BUCKETS];
-static void dm_hash_remove_all(int keep_open_devices);
+static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred);
/*
* Guards access to both hash tables.
@@ -86,7 +86,7 @@ static int dm_hash_init(void)
static void dm_hash_exit(void)
{
- dm_hash_remove_all(0);
+ dm_hash_remove_all(false, false, false);
}
/*-----------------------------------------------------------------
@@ -276,7 +276,7 @@ static struct dm_table *__hash_remove(struct hash_cell *hc)
return table;
}
-static void dm_hash_remove_all(int keep_open_devices)
+static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred)
{
int i, dev_skipped;
struct hash_cell *hc;
@@ -293,7 +293,8 @@ retry:
md = hc->md;
dm_get(md);
- if (keep_open_devices && dm_lock_for_deletion(md)) {
+ if (keep_open_devices &&
+ dm_lock_for_deletion(md, mark_deferred, only_deferred)) {
dm_put(md);
dev_skipped++;
continue;
@@ -450,6 +451,11 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
return md;
}
+void dm_deferred_remove(void)
+{
+ dm_hash_remove_all(true, false, true);
+}
+
/*-----------------------------------------------------------------
* Implementation of the ioctl commands
*---------------------------------------------------------------*/
@@ -461,7 +467,7 @@ typedef int (*ioctl_fn)(struct dm_ioctl *param, size_t param_size);
static int remove_all(struct dm_ioctl *param, size_t param_size)
{
- dm_hash_remove_all(1);
+ dm_hash_remove_all(true, !!(param->flags & DM_DEFERRED_REMOVE), false);
param->data_size = 0;
return 0;
}
@@ -683,6 +689,9 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)
if (dm_suspended_md(md))
param->flags |= DM_SUSPEND_FLAG;
+ if (dm_test_deferred_remove_flag(md))
+ param->flags |= DM_DEFERRED_REMOVE;
+
param->dev = huge_encode_dev(disk_devt(disk));
/*
@@ -832,8 +841,13 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
/*
* Ensure the device is not open and nothing further can open it.
*/
- r = dm_lock_for_deletion(md);
+ r = dm_lock_for_deletion(md, !!(param->flags & DM_DEFERRED_REMOVE), false);
if (r) {
+ if (r == -EBUSY && param->flags & DM_DEFERRED_REMOVE) {
+ up_write(&_hash_lock);
+ dm_put(md);
+ return 0;
+ }
DMDEBUG_LIMIT("unable to remove open device %s", hc->name);
up_write(&_hash_lock);
dm_put(md);
@@ -848,6 +862,8 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
dm_table_destroy(t);
}
+ param->flags &= ~DM_DEFERRED_REMOVE;
+
if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr))
param->flags |= DM_UEVENT_GENERATED_FLAG;
@@ -1469,6 +1485,14 @@ static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
if (**argv != '@')
return 2; /* no '@' prefix, deliver to target */
+ if (!strcasecmp(argv[0], "@cancel_deferred_remove")) {
+ if (argc != 1) {
+ DMERR("Invalid arguments for @cancel_deferred_remove");
+ return -EINVAL;
+ }
+ return dm_cancel_deferred_remove(md);
+ }
+
r = dm_stats_message(md, argc, argv, result, maxlen);
if (r < 2)
return r;
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 4f99d267340c..53e848c10939 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -85,7 +85,8 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
bio->bi_bdev = lc->dev->bdev;
if (bio_sectors(bio))
- bio->bi_sector = linear_map_sector(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector =
+ linear_map_sector(ti, bio->bi_iter.bi_sector);
}
static int linear_map(struct dm_target *ti, struct bio *bio)
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index de570a558764..8f8d27cd6f33 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -87,6 +87,7 @@ struct multipath {
unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
+ unsigned pg_init_disabled:1; /* pg_init is not currently allowed */
unsigned pg_init_retries; /* Number of times to retry pg_init */
unsigned pg_init_count; /* Number of times pg_init called */
@@ -390,8 +391,12 @@ static int map_io(struct multipath *m, struct request *clone,
if (was_queued)
m->queue_size--;
- if ((pgpath && m->queue_io) ||
- (!pgpath && m->queue_if_no_path)) {
+ if (m->pg_init_required) {
+ if (!m->pg_init_in_progress)
+ queue_work(kmultipathd, &m->process_queued_ios);
+ r = DM_MAPIO_REQUEUE;
+ } else if ((pgpath && m->queue_io) ||
+ (!pgpath && m->queue_if_no_path)) {
/* Queue for the daemon to resubmit */
list_add_tail(&clone->queuelist, &m->queued_ios);
m->queue_size++;
@@ -497,7 +502,8 @@ static void process_queued_ios(struct work_struct *work)
(!pgpath && !m->queue_if_no_path))
must_queue = 0;
- if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
+ if (m->pg_init_required && !m->pg_init_in_progress && pgpath &&
+ !m->pg_init_disabled)
__pg_init_all_paths(m);
spin_unlock_irqrestore(&m->lock, flags);
@@ -942,10 +948,20 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m)
static void flush_multipath_work(struct multipath *m)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&m->lock, flags);
+ m->pg_init_disabled = 1;
+ spin_unlock_irqrestore(&m->lock, flags);
+
flush_workqueue(kmpath_handlerd);
multipath_wait_for_pg_init_completion(m);
flush_workqueue(kmultipathd);
flush_work(&m->trigger_event);
+
+ spin_lock_irqsave(&m->lock, flags);
+ m->pg_init_disabled = 0;
+ spin_unlock_irqrestore(&m->lock, flags);
}
static void multipath_dtr(struct dm_target *ti)
@@ -1164,7 +1180,7 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
spin_lock_irqsave(&m->lock, flags);
- if (m->pg_init_count <= m->pg_init_retries)
+ if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
m->pg_init_required = 1;
else
limit_reached = 1;
@@ -1665,6 +1681,11 @@ static int multipath_busy(struct dm_target *ti)
spin_lock_irqsave(&m->lock, flags);
+ /* pg_init in progress, requeue until done */
+ if (m->pg_init_in_progress) {
+ busy = 1;
+ goto out;
+ }
/* Guess which priority_group will be used at next mapping time */
if (unlikely(!m->current_pgpath && m->next_pg))
pg = m->next_pg;
@@ -1714,7 +1735,7 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 5, 1},
+ .version = {1, 6, 0},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9584443c5614..f284e0bfb25f 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -432,7 +432,7 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
region_t region = dm_rh_bio_to_region(ms->rh, bio);
if (log->type->in_sync(log, region, 0))
- return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
+ return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0;
return 0;
}
@@ -442,15 +442,15 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
*/
static sector_t map_sector(struct mirror *m, struct bio *bio)
{
- if (unlikely(!bio->bi_size))
+ if (unlikely(!bio->bi_iter.bi_size))
return 0;
- return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector);
+ return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
}
static void map_bio(struct mirror *m, struct bio *bio)
{
bio->bi_bdev = m->dev->bdev;
- bio->bi_sector = map_sector(m, bio);
+ bio->bi_iter.bi_sector = map_sector(m, bio);
}
static void map_region(struct dm_io_region *io, struct mirror *m,
@@ -526,8 +526,8 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
struct dm_io_region io;
struct dm_io_request io_req = {
.bi_rw = READ,
- .mem.type = DM_IO_BVEC,
- .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
+ .mem.type = DM_IO_BIO,
+ .mem.ptr.bio = bio,
.notify.fn = read_callback,
.notify.context = bio,
.client = m->ms->io_client,
@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
* We can only read balance if the region is in sync.
*/
if (likely(region_in_sync(ms, region, 1)))
- m = choose_mirror(ms, bio->bi_sector);
+ m = choose_mirror(ms, bio->bi_iter.bi_sector);
else if (m && atomic_read(&m->error_count))
m = NULL;
@@ -629,8 +629,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
struct mirror *m;
struct dm_io_request io_req = {
.bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
- .mem.type = DM_IO_BVEC,
- .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
+ .mem.type = DM_IO_BIO,
+ .mem.ptr.bio = bio,
.notify.fn = write_callback,
.notify.context = bio,
.client = ms->io_client,
@@ -1181,7 +1181,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
* The region is in-sync and we can perform reads directly.
* Store enough information so we can retry if it fails.
*/
- m = choose_mirror(ms, bio->bi_sector);
+ m = choose_mirror(ms, bio->bi_iter.bi_sector);
if (unlikely(!m))
return -EIO;
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 69732e03eb34..b929fd5f4984 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -126,7 +126,8 @@ EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
{
- return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin);
+ return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector -
+ rh->target_begin);
}
EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index aec57d76db5d..80b5cabbea29 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1415,6 +1415,7 @@ out:
if (full_bio) {
full_bio->bi_end_io = pe->full_bio_end_io;
full_bio->bi_private = pe->full_bio_private;
+ atomic_inc(&full_bio->bi_remaining);
}
free_pending_exception(pe);
@@ -1562,11 +1563,10 @@ static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
struct bio *bio, chunk_t chunk)
{
bio->bi_bdev = s->cow->bdev;
- bio->bi_sector = chunk_to_sector(s->store,
- dm_chunk_number(e->new_chunk) +
- (chunk - e->old_chunk)) +
- (bio->bi_sector &
- s->store->chunk_mask);
+ bio->bi_iter.bi_sector =
+ chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
+ (chunk - e->old_chunk)) +
+ (bio->bi_iter.bi_sector & s->store->chunk_mask);
}
static int snapshot_map(struct dm_target *ti, struct bio *bio)
@@ -1584,7 +1584,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
- chunk = sector_to_chunk(s->store, bio->bi_sector);
+ chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
/* Full snapshots are not usable */
/* To get here the table must be live so s->active is always set. */
@@ -1645,7 +1645,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
r = DM_MAPIO_SUBMITTED;
if (!pe->started &&
- bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) {
+ bio->bi_iter.bi_size ==
+ (s->store->chunk_size << SECTOR_SHIFT)) {
pe->started = 1;
up_write(&s->lock);
start_full_bio(pe, bio);
@@ -1701,7 +1702,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
- chunk = sector_to_chunk(s->store, bio->bi_sector);
+ chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
down_write(&s->lock);
@@ -2038,7 +2039,7 @@ static int do_origin(struct dm_dev *origin, struct bio *bio)
down_read(&_origins_lock);
o = __lookup_origin(origin->bdev);
if (o)
- r = __origin_write(&o->snapshots, bio->bi_sector, bio);
+ r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
up_read(&_origins_lock);
return r;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 73c1712dad96..d1600d2aa2e2 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -259,13 +259,15 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
{
sector_t begin, end;
- stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin);
+ stripe_map_range_sector(sc, bio->bi_iter.bi_sector,
+ target_stripe, &begin);
stripe_map_range_sector(sc, bio_end_sector(bio),
target_stripe, &end);
if (begin < end) {
bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
- bio->bi_sector = begin + sc->stripe[target_stripe].physical_start;
- bio->bi_size = to_bytes(end - begin);
+ bio->bi_iter.bi_sector = begin +
+ sc->stripe[target_stripe].physical_start;
+ bio->bi_iter.bi_size = to_bytes(end - begin);
return DM_MAPIO_REMAPPED;
} else {
/* The range doesn't map to the target stripe */
@@ -293,9 +295,10 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
return stripe_map_range(sc, bio, target_bio_nr);
}
- stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector);
+ stripe_map_sector(sc, bio->bi_iter.bi_sector,
+ &stripe, &bio->bi_iter.bi_sector);
- bio->bi_sector += sc->stripe[stripe].physical_start;
+ bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
bio->bi_bdev = sc->stripe[stripe].dev->bdev;
return DM_MAPIO_REMAPPED;
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index ff9ac4be4721..09a688b3d48c 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -311,11 +311,11 @@ error:
static int switch_map(struct dm_target *ti, struct bio *bio)
{
struct switch_ctx *sctx = ti->private;
- sector_t offset = dm_target_offset(ti, bio->bi_sector);
+ sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
unsigned path_nr = switch_get_path_nr(sctx, offset);
bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev;
- bio->bi_sector = sctx->path_list[path_nr].start + offset;
+ bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
return DM_MAPIO_REMAPPED;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 8f8783533ac7..465f08ca62b1 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -545,14 +545,28 @@ static int adjoin(struct dm_table *table, struct dm_target *ti)
/*
* Used to dynamically allocate the arg array.
+ *
+ * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
+ * process messages even if some device is suspended. These messages have a
+ * small fixed number of arguments.
+ *
+ * On the other hand, dm-switch needs to process bulk data using messages and
+ * excessive use of GFP_NOIO could cause trouble.
*/
static char **realloc_argv(unsigned *array_size, char **old_argv)
{
char **argv;
unsigned new_size;
+ gfp_t gfp;
- new_size = *array_size ? *array_size * 2 : 64;
- argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
+ if (*array_size) {
+ new_size = *array_size * 2;
+ gfp = GFP_KERNEL;
+ } else {
+ new_size = 8;
+ gfp = GFP_NOIO;
+ }
+ argv = kmalloc(new_size * sizeof(*argv), gfp);
if (argv) {
memcpy(argv, old_argv, *array_size * sizeof(*argv));
*array_size = new_size;
@@ -1548,8 +1562,11 @@ int dm_table_resume_targets(struct dm_table *t)
continue;
r = ti->type->preresume(ti);
- if (r)
+ if (r) {
+ DMERR("%s: %s: preresume failed, error = %d",
+ dm_device_name(t->md), ti->type->name, r);
return r;
+ }
}
for (i = 0; i < t->num_targets; i++) {
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 2c0cf511ec23..1abb4a24c338 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -413,7 +413,7 @@ static bool block_size_is_power_of_two(struct pool *pool)
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
struct pool *pool = tc->pool;
- sector_t block_nr = bio->bi_sector;
+ sector_t block_nr = bio->bi_iter.bi_sector;
if (block_size_is_power_of_two(pool))
block_nr >>= pool->sectors_per_block_shift;
@@ -426,14 +426,15 @@ static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{
struct pool *pool = tc->pool;
- sector_t bi_sector = bio->bi_sector;
+ sector_t bi_sector = bio->bi_iter.bi_sector;
bio->bi_bdev = tc->pool_dev->bdev;
if (block_size_is_power_of_two(pool))
- bio->bi_sector = (block << pool->sectors_per_block_shift) |
- (bi_sector & (pool->sectors_per_block - 1));
+ bio->bi_iter.bi_sector =
+ (block << pool->sectors_per_block_shift) |
+ (bi_sector & (pool->sectors_per_block - 1));
else
- bio->bi_sector = (block * pool->sectors_per_block) +
+ bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
sector_div(bi_sector, pool->sectors_per_block);
}
@@ -610,8 +611,10 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
- if (m->bio)
+ if (m->bio) {
m->bio->bi_end_io = m->saved_bi_end_io;
+ atomic_inc(&m->bio->bi_remaining);
+ }
cell_error(m->tc->pool, m->cell);
list_del(&m->list);
mempool_free(m, m->tc->pool->mapping_pool);
@@ -625,8 +628,10 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
int r;
bio = m->bio;
- if (bio)
+ if (bio) {
bio->bi_end_io = m->saved_bi_end_io;
+ atomic_inc(&bio->bi_remaining);
+ }
if (m->err) {
cell_error(pool, m->cell);
@@ -721,7 +726,8 @@ static void process_prepared(struct pool *pool, struct list_head *head,
*/
static int io_overlaps_block(struct pool *pool, struct bio *bio)
{
- return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT);
+ return bio->bi_iter.bi_size ==
+ (pool->sectors_per_block << SECTOR_SHIFT);
}
static int io_overwrites_block(struct pool *pool, struct bio *bio)
@@ -1130,7 +1136,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
if (bio_detain(pool, &key, bio, &cell))
return;
- if (bio_data_dir(bio) == WRITE && bio->bi_size)
+ if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
break_sharing(tc, bio, block, &key, lookup_result, cell);
else {
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -1153,7 +1159,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
/*
* Remap empty bios (flushes) immediately, without provisioning.
*/
- if (!bio->bi_size) {
+ if (!bio->bi_iter.bi_size) {
inc_all_io_entry(pool, bio);
cell_defer_no_holder(tc, cell);
@@ -1253,7 +1259,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
switch (r) {
case 0:
- if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
+ if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
bio_io_error(bio);
else {
inc_all_io_entry(tc->pool, bio);
@@ -2867,7 +2873,7 @@ out_unlock:
static int thin_map(struct dm_target *ti, struct bio *bio)
{
- bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
return thin_bio_map(ti, bio);
}
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 4b7941db3aff..dc15857578ee 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -73,15 +73,10 @@ struct dm_verity_io {
sector_t block;
unsigned n_blocks;
- /* saved bio vector */
- struct bio_vec *io_vec;
- unsigned io_vec_size;
+ struct bvec_iter iter;
struct work_struct work;
- /* A space for short vectors; longer vectors are allocated separately. */
- struct bio_vec io_vec_inline[DM_VERITY_IO_VEC_INLINE];
-
/*
* Three variably-size fields follow this struct:
*
@@ -284,9 +279,10 @@ release_ret_r:
static int verity_verify_io(struct dm_verity_io *io)
{
struct dm_verity *v = io->v;
+ struct bio *bio = dm_bio_from_per_bio_data(io,
+ v->ti->per_bio_data_size);
unsigned b;
int i;
- unsigned vector = 0, offset = 0;
for (b = 0; b < io->n_blocks; b++) {
struct shash_desc *desc;
@@ -336,31 +332,22 @@ test_block_hash:
}
todo = 1 << v->data_dev_block_bits;
- do {
- struct bio_vec *bv;
+ while (io->iter.bi_size) {
u8 *page;
- unsigned len;
-
- BUG_ON(vector >= io->io_vec_size);
- bv = &io->io_vec[vector];
- page = kmap_atomic(bv->bv_page);
- len = bv->bv_len - offset;
- if (likely(len >= todo))
- len = todo;
- r = crypto_shash_update(desc,
- page + bv->bv_offset + offset, len);
+ struct bio_vec bv = bio_iter_iovec(bio, io->iter);
+
+ page = kmap_atomic(bv.bv_page);
+ r = crypto_shash_update(desc, page + bv.bv_offset,
+ bv.bv_len);
kunmap_atomic(page);
+
if (r < 0) {
DMERR("crypto_shash_update failed: %d", r);
return r;
}
- offset += len;
- if (likely(offset == bv->bv_len)) {
- offset = 0;
- vector++;
- }
- todo -= len;
- } while (todo);
+
+ bio_advance_iter(bio, &io->iter, bv.bv_len);
+ }
if (!v->version) {
r = crypto_shash_update(desc, v->salt, v->salt_size);
@@ -383,8 +370,6 @@ test_block_hash:
return -EIO;
}
}
- BUG_ON(vector != io->io_vec_size);
- BUG_ON(offset);
return 0;
}
@@ -399,9 +384,7 @@ static void verity_finish_io(struct dm_verity_io *io, int error)
bio->bi_end_io = io->orig_bi_end_io;
bio->bi_private = io->orig_bi_private;
-
- if (io->io_vec != io->io_vec_inline)
- mempool_free(io->io_vec, v->vec_mempool);
+ atomic_inc(&bio->bi_remaining);
bio_endio(bio, error);
}
@@ -493,9 +476,9 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
struct dm_verity_io *io;
bio->bi_bdev = v->data_dev->bdev;
- bio->bi_sector = verity_map_sector(v, bio->bi_sector);
+ bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
- if (((unsigned)bio->bi_sector | bio_sectors(bio)) &
+ if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
DMERR_LIMIT("unaligned io");
return -EIO;
@@ -514,18 +497,12 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
io->v = v;
io->orig_bi_end_io = bio->bi_end_io;
io->orig_bi_private = bio->bi_private;
- io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
- io->n_blocks = bio->bi_size >> v->data_dev_block_bits;
+ io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
+ io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
bio->bi_end_io = verity_end_io;
bio->bi_private = io;
- io->io_vec_size = bio_segments(bio);
- if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE)
- io->io_vec = io->io_vec_inline;
- else
- io->io_vec = mempool_alloc(v->vec_mempool, GFP_NOIO);
- memcpy(io->io_vec, bio_iovec(bio),
- io->io_vec_size * sizeof(struct bio_vec));
+ io->iter = bio->bi_iter;
verity_submit_prefetch(v, io);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b3e26c7d1417..2d55d138b6d9 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -49,6 +49,11 @@ static unsigned int _major = 0;
static DEFINE_IDR(_minor_idr);
static DEFINE_SPINLOCK(_minor_lock);
+
+static void do_deferred_remove(struct work_struct *w);
+
+static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
+
/*
* For bio-based dm.
* One of these is allocated per bio.
@@ -116,6 +121,7 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
#define DMF_DELETING 4
#define DMF_NOFLUSH_SUSPENDING 5
#define DMF_MERGE_IS_OPTIONAL 6
+#define DMF_DEFERRED_REMOVE 7
/*
* A dummy definition to make RCU happy.
@@ -299,6 +305,8 @@ out_free_io_cache:
static void local_exit(void)
{
+ flush_scheduled_work();
+
kmem_cache_destroy(_rq_tio_cache);
kmem_cache_destroy(_io_cache);
unregister_blkdev(_major, _name);
@@ -404,7 +412,10 @@ static void dm_blk_close(struct gendisk *disk, fmode_t mode)
spin_lock(&_minor_lock);
- atomic_dec(&md->open_count);
+ if (atomic_dec_and_test(&md->open_count) &&
+ (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
+ schedule_work(&deferred_remove_work);
+
dm_put(md);
spin_unlock(&_minor_lock);
@@ -418,14 +429,18 @@ int dm_open_count(struct mapped_device *md)
/*
* Guarantees nothing is using the device before it's deleted.
*/
-int dm_lock_for_deletion(struct mapped_device *md)
+int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
{
int r = 0;
spin_lock(&_minor_lock);
- if (dm_open_count(md))
+ if (dm_open_count(md)) {
r = -EBUSY;
+ if (mark_deferred)
+ set_bit(DMF_DEFERRED_REMOVE, &md->flags);
+ } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
+ r = -EEXIST;
else
set_bit(DMF_DELETING, &md->flags);
@@ -434,6 +449,27 @@ int dm_lock_for_deletion(struct mapped_device *md)
return r;
}
+int dm_cancel_deferred_remove(struct mapped_device *md)
+{
+ int r = 0;
+
+ spin_lock(&_minor_lock);
+
+ if (test_bit(DMF_DELETING, &md->flags))
+ r = -EBUSY;
+ else
+ clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
+
+ spin_unlock(&_minor_lock);
+
+ return r;
+}
+
+static void do_deferred_remove(struct work_struct *w)
+{
+ dm_deferred_remove();
+}
+
sector_t dm_get_size(struct mapped_device *md)
{
return get_capacity(md->disk);
@@ -539,7 +575,7 @@ static void start_io_acct(struct dm_io *io)
atomic_inc_return(&md->pending[rw]));
if (unlikely(dm_stats_used(&md->stats)))
- dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+ dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
bio_sectors(bio), false, 0, &io->stats_aux);
}
@@ -557,7 +593,7 @@ static void end_io_acct(struct dm_io *io)
part_stat_unlock();
if (unlikely(dm_stats_used(&md->stats)))
- dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+ dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
bio_sectors(bio), true, duration, &io->stats_aux);
/*
@@ -706,7 +742,7 @@ static void dec_pending(struct dm_io *io, int error)
if (io_error == DM_ENDIO_REQUEUE)
return;
- if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) {
+ if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
/*
* Preflush done for flush with data, reissue
* without REQ_FLUSH.
@@ -761,7 +797,7 @@ static void end_clone_bio(struct bio *clone, int error)
struct dm_rq_clone_bio_info *info = clone->bi_private;
struct dm_rq_target_io *tio = info->tio;
struct bio *bio = info->orig;
- unsigned int nr_bytes = info->orig->bi_size;
+ unsigned int nr_bytes = info->orig->bi_iter.bi_size;
bio_put(clone);
@@ -1092,7 +1128,7 @@ static void __map_bio(struct dm_target_io *tio)
* this io.
*/
atomic_inc(&tio->io->io_count);
- sector = clone->bi_sector;
+ sector = clone->bi_iter.bi_sector;
r = ti->type->map(ti, clone);
if (r == DM_MAPIO_REMAPPED) {
/* the bio has been remapped so dispatch it */
@@ -1119,76 +1155,32 @@ struct clone_info {
struct dm_io *io;
sector_t sector;
sector_t sector_count;
- unsigned short idx;
};
static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
{
- bio->bi_sector = sector;
- bio->bi_size = to_bytes(len);
-}
-
-static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count)
-{
- bio->bi_idx = idx;
- bio->bi_vcnt = idx + bv_count;
- bio->bi_flags &= ~(1 << BIO_SEG_VALID);
-}
-
-static void clone_bio_integrity(struct bio *bio, struct bio *clone,
- unsigned short idx, unsigned len, unsigned offset,
- unsigned trim)
-{
- if (!bio_integrity(bio))
- return;
-
- bio_integrity_clone(clone, bio, GFP_NOIO);
-
- if (trim)
- bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len);
-}
-
-/*
- * Creates a little bio that just does part of a bvec.
- */
-static void clone_split_bio(struct dm_target_io *tio, struct bio *bio,
- sector_t sector, unsigned short idx,
- unsigned offset, unsigned len)
-{
- struct bio *clone = &tio->clone;
- struct bio_vec *bv = bio->bi_io_vec + idx;
-
- *clone->bi_io_vec = *bv;
-
- bio_setup_sector(clone, sector, len);
-
- clone->bi_bdev = bio->bi_bdev;
- clone->bi_rw = bio->bi_rw;
- clone->bi_vcnt = 1;
- clone->bi_io_vec->bv_offset = offset;
- clone->bi_io_vec->bv_len = clone->bi_size;
- clone->bi_flags |= 1 << BIO_CLONED;
-
- clone_bio_integrity(bio, clone, idx, len, offset, 1);
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_iter.bi_size = to_bytes(len);
}
/*
* Creates a bio that consists of range of complete bvecs.
*/
static void clone_bio(struct dm_target_io *tio, struct bio *bio,
- sector_t sector, unsigned short idx,
- unsigned short bv_count, unsigned len)
+ sector_t sector, unsigned len)
{
struct bio *clone = &tio->clone;
- unsigned trim = 0;
__bio_clone(clone, bio);
- bio_setup_sector(clone, sector, len);
- bio_setup_bv(clone, idx, bv_count);
- if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
- trim = 1;
- clone_bio_integrity(bio, clone, idx, len, 0, trim);
+ if (bio_integrity(bio))
+ bio_integrity_clone(clone, bio, GFP_NOIO);
+
+ bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
+ clone->bi_iter.bi_size = to_bytes(len);
+
+ if (bio_integrity(bio))
+ bio_integrity_trim(clone, 0, len);
}
static struct dm_target_io *alloc_tio(struct clone_info *ci,
@@ -1250,10 +1242,7 @@ static int __send_empty_flush(struct clone_info *ci)
}
static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
- sector_t sector, int nr_iovecs,
- unsigned short idx, unsigned short bv_count,
- unsigned offset, unsigned len,
- unsigned split_bvec)
+ sector_t sector, unsigned len)
{
struct bio *bio = ci->bio;
struct dm_target_io *tio;
@@ -1267,11 +1256,8 @@ static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti
num_target_bios = ti->num_write_bios(ti, bio);
for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
- tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr);
- if (split_bvec)
- clone_split_bio(tio, bio, sector, idx, offset, len);
- else
- clone_bio(tio, bio, sector, idx, bv_count, len);
+ tio = alloc_tio(ci, ti, 0, target_bio_nr);
+ clone_bio(tio, bio, sector, len);
__map_bio(tio);
}
}
@@ -1343,68 +1329,13 @@ static int __send_write_same(struct clone_info *ci)
}
/*
- * Find maximum number of sectors / bvecs we can process with a single bio.
- */
-static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx)
-{
- struct bio *bio = ci->bio;
- sector_t bv_len, total_len = 0;
-
- for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) {
- bv_len = to_sector(bio->bi_io_vec[*idx].bv_len);
-
- if (bv_len > max)
- break;
-
- max -= bv_len;
- total_len += bv_len;
- }
-
- return total_len;
-}
-
-static int __split_bvec_across_targets(struct clone_info *ci,
- struct dm_target *ti, sector_t max)
-{
- struct bio *bio = ci->bio;
- struct bio_vec *bv = bio->bi_io_vec + ci->idx;
- sector_t remaining = to_sector(bv->bv_len);
- unsigned offset = 0;
- sector_t len;
-
- do {
- if (offset) {
- ti = dm_table_find_target(ci->map, ci->sector);
- if (!dm_target_is_valid(ti))
- return -EIO;
-
- max = max_io_len(ci->sector, ti);
- }
-
- len = min(remaining, max);
-
- __clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0,
- bv->bv_offset + offset, len, 1);
-
- ci->sector += len;
- ci->sector_count -= len;
- offset += to_bytes(len);
- } while (remaining -= len);
-
- ci->idx++;
-
- return 0;
-}
-
-/*
* Select the correct strategy for processing a non-flush bio.
*/
static int __split_and_process_non_flush(struct clone_info *ci)
{
struct bio *bio = ci->bio;
struct dm_target *ti;
- sector_t len, max;
- int idx;
+ unsigned len;
if (unlikely(bio->bi_rw & REQ_DISCARD))
return __send_discard(ci);
@@ -1415,41 +1346,14 @@ static int __split_and_process_non_flush(struct clone_info *ci)
if (!dm_target_is_valid(ti))
return -EIO;
- max = max_io_len(ci->sector, ti);
-
- /*
- * Optimise for the simple case where we can do all of
- * the remaining io with a single clone.
- */
- if (ci->sector_count <= max) {
- __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
- ci->idx, bio->bi_vcnt - ci->idx, 0,
- ci->sector_count, 0);
- ci->sector_count = 0;
- return 0;
- }
-
- /*
- * There are some bvecs that don't span targets.
- * Do as many of these as possible.
- */
- if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
- len = __len_within_target(ci, max, &idx);
-
- __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
- ci->idx, idx - ci->idx, 0, len, 0);
+ len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
- ci->sector += len;
- ci->sector_count -= len;
- ci->idx = idx;
+ __clone_and_map_data_bio(ci, ti, ci->sector, len);
- return 0;
- }
+ ci->sector += len;
+ ci->sector_count -= len;
- /*
- * Handle a bvec that must be split between two or more targets.
- */
- return __split_bvec_across_targets(ci, ti, max);
+ return 0;
}
/*
@@ -1474,8 +1378,7 @@ static void __split_and_process_bio(struct mapped_device *md,
ci.io->bio = bio;
ci.io->md = md;
spin_lock_init(&ci.io->endio_lock);
- ci.sector = bio->bi_sector;
- ci.idx = bio->bi_idx;
+ ci.sector = bio->bi_iter.bi_sector;
start_io_acct(ci.io);
@@ -2894,6 +2797,11 @@ int dm_suspended_md(struct mapped_device *md)
return test_bit(DMF_SUSPENDED, &md->flags);
}
+int dm_test_deferred_remove_flag(struct mapped_device *md)
+{
+ return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
+}
+
int dm_suspended(struct dm_target *ti)
{
return dm_suspended_md(dm_table_get_md(ti->table));
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 1d1ad7b7e527..c57ba550f69e 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -129,6 +129,16 @@ int dm_deleting_md(struct mapped_device *md);
int dm_suspended_md(struct mapped_device *md);
/*
+ * Test if the device is scheduled for deferred remove.
+ */
+int dm_test_deferred_remove_flag(struct mapped_device *md);
+
+/*
+ * Try to remove devices marked for deferred removal.
+ */
+void dm_deferred_remove(void);
+
+/*
* The device-mapper can be driven through one of two interfaces;
* ioctl or filesystem, depending which patch you have applied.
*/
@@ -158,7 +168,8 @@ void dm_stripe_exit(void);
void dm_destroy(struct mapped_device *md);
void dm_destroy_immediate(struct mapped_device *md);
int dm_open_count(struct mapped_device *md);
-int dm_lock_for_deletion(struct mapped_device *md);
+int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
+int dm_cancel_deferred_remove(struct mapped_device *md);
int dm_request_based(struct mapped_device *md);
sector_t dm_get_size(struct mapped_device *md);
struct dm_stats *dm_get_stats(struct mapped_device *md);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 3193aefe982b..e8b4574956c7 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -74,8 +74,8 @@ static void faulty_fail(struct bio *bio, int error)
{
struct bio *b = bio->bi_private;
- b->bi_size = bio->bi_size;
- b->bi_sector = bio->bi_sector;
+ b->bi_iter.bi_size = bio->bi_iter.bi_size;
+ b->bi_iter.bi_sector = bio->bi_iter.bi_sector;
bio_put(bio);
@@ -185,26 +185,31 @@ static void make_request(struct mddev *mddev, struct bio *bio)
return;
}
- if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE))
+ if (check_sector(conf, bio->bi_iter.bi_sector,
+ bio_end_sector(bio), WRITE))
failit = 1;
if (check_mode(conf, WritePersistent)) {
- add_sector(conf, bio->bi_sector, WritePersistent);
+ add_sector(conf, bio->bi_iter.bi_sector,
+ WritePersistent);
failit = 1;
}
if (check_mode(conf, WriteTransient))
failit = 1;
} else {
/* read request */
- if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ))
+ if (check_sector(conf, bio->bi_iter.bi_sector,
+ bio_end_sector(bio), READ))
failit = 1;
if (check_mode(conf, ReadTransient))
failit = 1;
if (check_mode(conf, ReadPersistent)) {
- add_sector(conf, bio->bi_sector, ReadPersistent);
+ add_sector(conf, bio->bi_iter.bi_sector,
+ ReadPersistent);
failit = 1;
}
if (check_mode(conf, ReadFixable)) {
- add_sector(conf, bio->bi_sector, ReadFixable);
+ add_sector(conf, bio->bi_iter.bi_sector,
+ ReadFixable);
failit = 1;
}
}
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index f03fabd2b37b..56f534b4a2d2 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -288,65 +288,65 @@ static int linear_stop (struct mddev *mddev)
static void linear_make_request(struct mddev *mddev, struct bio *bio)
{
+ char b[BDEVNAME_SIZE];
struct dev_info *tmp_dev;
- sector_t start_sector;
+ struct bio *split;
+ sector_t start_sector, end_sector, data_offset;
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bio);
return;
}
- rcu_read_lock();
- tmp_dev = which_dev(mddev, bio->bi_sector);
- start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
-
-
- if (unlikely(bio->bi_sector >= (tmp_dev->end_sector)
- || (bio->bi_sector < start_sector))) {
- char b[BDEVNAME_SIZE];
-
- printk(KERN_ERR
- "md/linear:%s: make_request: Sector %llu out of bounds on "
- "dev %s: %llu sectors, offset %llu\n",
- mdname(mddev),
- (unsigned long long)bio->bi_sector,
- bdevname(tmp_dev->rdev->bdev, b),
- (unsigned long long)tmp_dev->rdev->sectors,
- (unsigned long long)start_sector);
- rcu_read_unlock();
- bio_io_error(bio);
- return;
- }
- if (unlikely(bio_end_sector(bio) > tmp_dev->end_sector)) {
- /* This bio crosses a device boundary, so we have to
- * split it.
- */
- struct bio_pair *bp;
- sector_t end_sector = tmp_dev->end_sector;
+ do {
+ rcu_read_lock();
- rcu_read_unlock();
-
- bp = bio_split(bio, end_sector - bio->bi_sector);
+ tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector);
+ start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
+ end_sector = tmp_dev->end_sector;
+ data_offset = tmp_dev->rdev->data_offset;
+ bio->bi_bdev = tmp_dev->rdev->bdev;
- linear_make_request(mddev, &bp->bio1);
- linear_make_request(mddev, &bp->bio2);
- bio_pair_release(bp);
- return;
- }
-
- bio->bi_bdev = tmp_dev->rdev->bdev;
- bio->bi_sector = bio->bi_sector - start_sector
- + tmp_dev->rdev->data_offset;
- rcu_read_unlock();
+ rcu_read_unlock();
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
- /* Just ignore it */
- bio_endio(bio, 0);
- return;
- }
+ if (unlikely(bio->bi_iter.bi_sector >= end_sector ||
+ bio->bi_iter.bi_sector < start_sector))
+ goto out_of_bounds;
+
+ if (unlikely(bio_end_sector(bio) > end_sector)) {
+ /* This bio crosses a device boundary, so we have to
+ * split it.
+ */
+ split = bio_split(bio, end_sector -
+ bio->bi_iter.bi_sector,
+ GFP_NOIO, fs_bio_set);
+ bio_chain(split, bio);
+ } else {
+ split = bio;
+ }
- generic_make_request(bio);
+ split->bi_iter.bi_sector = split->bi_iter.bi_sector -
+ start_sector + data_offset;
+
+ if (unlikely((split->bi_rw & REQ_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
+ /* Just ignore it */
+ bio_endio(split, 0);
+ } else
+ generic_make_request(split);
+ } while (split != bio);
+ return;
+
+out_of_bounds:
+ printk(KERN_ERR
+ "md/linear:%s: make_request: Sector %llu out of bounds on "
+ "dev %s: %llu sectors, offset %llu\n",
+ mdname(mddev),
+ (unsigned long long)bio->bi_iter.bi_sector,
+ bdevname(tmp_dev->rdev->bdev, b),
+ (unsigned long long)tmp_dev->rdev->sectors,
+ (unsigned long long)start_sector);
+ bio_io_error(bio);
}
static void linear_status (struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index adf4d7e1d5e1..046cec589538 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -112,7 +112,7 @@ static inline int speed_max(struct mddev *mddev)
static struct ctl_table_header *raid_table_header;
-static ctl_table raid_table[] = {
+static struct ctl_table raid_table[] = {
{
.procname = "speed_limit_min",
.data = &sysctl_speed_limit_min,
@@ -130,7 +130,7 @@ static ctl_table raid_table[] = {
{ }
};
-static ctl_table raid_dir_table[] = {
+static struct ctl_table raid_dir_table[] = {
{
.procname = "raid",
.maxlen = 0,
@@ -140,7 +140,7 @@ static ctl_table raid_dir_table[] = {
{ }
};
-static ctl_table raid_root_table[] = {
+static struct ctl_table raid_root_table[] = {
{
.procname = "dev",
.maxlen = 0,
@@ -183,46 +183,6 @@ struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
}
EXPORT_SYMBOL_GPL(bio_clone_mddev);
-void md_trim_bio(struct bio *bio, int offset, int size)
-{
- /* 'bio' is a cloned bio which we need to trim to match
- * the given offset and size.
- * This requires adjusting bi_sector, bi_size, and bi_io_vec
- */
- int i;
- struct bio_vec *bvec;
- int sofar = 0;
-
- size <<= 9;
- if (offset == 0 && size == bio->bi_size)
- return;
-
- clear_bit(BIO_SEG_VALID, &bio->bi_flags);
-
- bio_advance(bio, offset << 9);
-
- bio->bi_size = size;
-
- /* avoid any complications with bi_idx being non-zero*/
- if (bio->bi_idx) {
- memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
- (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
- bio->bi_vcnt -= bio->bi_idx;
- bio->bi_idx = 0;
- }
- /* Make sure vcnt and last bv are not too big */
- bio_for_each_segment(bvec, bio, i) {
- if (sofar + bvec->bv_len > size)
- bvec->bv_len = size - sofar;
- if (bvec->bv_len == 0) {
- bio->bi_vcnt = i;
- break;
- }
- sofar += bvec->bv_len;
- }
-}
-EXPORT_SYMBOL_GPL(md_trim_bio);
-
/*
* We have a system wide 'event count' that is incremented
* on any 'interesting' event, and readers of /proc/mdstat
@@ -433,7 +393,7 @@ static void md_submit_flush_data(struct work_struct *ws)
struct mddev *mddev = container_of(ws, struct mddev, flush_work);
struct bio *bio = mddev->flush_bio;
- if (bio->bi_size == 0)
+ if (bio->bi_iter.bi_size == 0)
/* an empty barrier - all done */
bio_endio(bio, 0);
else {
@@ -786,7 +746,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
@@ -825,13 +785,13 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
rdev->meta_bdev : rdev->bdev;
if (metadata_op)
- bio->bi_sector = sector + rdev->sb_start;
+ bio->bi_iter.bi_sector = sector + rdev->sb_start;
else if (rdev->mddev->reshape_position != MaxSector &&
(rdev->mddev->reshape_backwards ==
(sector >= rdev->mddev->reshape_position)))
- bio->bi_sector = sector + rdev->new_data_offset;
+ bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
else
- bio->bi_sector = sector + rdev->data_offset;
+ bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio_add_page(bio, page, size, 0);
init_completion(&event);
bio->bi_private = &event;
@@ -3555,7 +3515,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
printk(KERN_WARNING
"md: cannot register extra attributes for %s\n",
mdname(mddev));
- mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
+ mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
}
if (mddev->pers->sync_request != NULL &&
pers->sync_request == NULL) {
@@ -5331,20 +5291,31 @@ EXPORT_SYMBOL_GPL(md_stop);
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
{
int err = 0;
+ int did_freeze = 0;
+
+ if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
+ did_freeze = 1;
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
+ if (mddev->sync_thread)
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ mddev_unlock(mddev);
+ wait_event(resync_wait, mddev->sync_thread == NULL);
+ mddev_lock(mddev);
+
mutex_lock(&mddev->open_mutex);
- if (atomic_read(&mddev->openers) > !!bdev) {
+ if (atomic_read(&mddev->openers) > !!bdev ||
+ mddev->sync_thread ||
+ (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
printk("md: %s still in use.\n",mdname(mddev));
+ if (did_freeze) {
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
err = -EBUSY;
goto out;
}
- if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) {
- /* Someone opened the device since we flushed it
- * so page cache could be dirty and it is too late
- * to flush. So abort
- */
- mutex_unlock(&mddev->open_mutex);
- return -EBUSY;
- }
if (mddev->pers) {
__md_stop_writes(mddev);
@@ -5355,7 +5326,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
set_disk_ro(mddev->gendisk, 1);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_state);
- err = 0;
+ err = 0;
}
out:
mutex_unlock(&mddev->open_mutex);
@@ -5371,20 +5342,30 @@ static int do_md_stop(struct mddev * mddev, int mode,
{
struct gendisk *disk = mddev->gendisk;
struct md_rdev *rdev;
+ int did_freeze = 0;
+
+ if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
+ did_freeze = 1;
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
+ if (mddev->sync_thread)
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ mddev_unlock(mddev);
+ wait_event(resync_wait, mddev->sync_thread == NULL);
+ mddev_lock(mddev);
mutex_lock(&mddev->open_mutex);
if (atomic_read(&mddev->openers) > !!bdev ||
- mddev->sysfs_active) {
+ mddev->sysfs_active ||
+ mddev->sync_thread ||
+ (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
printk("md: %s still in use.\n",mdname(mddev));
mutex_unlock(&mddev->open_mutex);
- return -EBUSY;
- }
- if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) {
- /* Someone opened the device since we flushed it
- * so page cache could be dirty and it is too late
- * to flush. So abort
- */
- mutex_unlock(&mddev->open_mutex);
+ if (did_freeze) {
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
return -EBUSY;
}
if (mddev->pers) {
@@ -7934,6 +7915,7 @@ void md_reap_sync_thread(struct mddev *mddev)
/* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread);
+ wake_up(&resync_wait);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* success...*/
@@ -8111,6 +8093,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
u64 *p;
int lo, hi;
int rv = 1;
+ unsigned long flags;
if (bb->shift < 0)
/* badblocks are disabled */
@@ -8125,7 +8108,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
sectors = next - s;
}
- write_seqlock_irq(&bb->lock);
+ write_seqlock_irqsave(&bb->lock, flags);
p = bb->page;
lo = 0;
@@ -8241,7 +8224,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
bb->changed = 1;
if (!acknowledged)
bb->unacked_exist = 1;
- write_sequnlock_irq(&bb->lock);
+ write_sequnlock_irqrestore(&bb->lock, flags);
return rv;
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 608050c43f17..2f5cc8a7ef3e 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -501,7 +501,7 @@ extern struct attribute_group md_bitmap_group;
static inline struct sysfs_dirent *sysfs_get_dirent_safe(struct sysfs_dirent *sd, char *name)
{
if (sd)
- return sysfs_get_dirent(sd, NULL, name);
+ return sysfs_get_dirent(sd, name);
return sd;
}
static inline void sysfs_notify_dirent_safe(struct sysfs_dirent *sd)
@@ -617,7 +617,6 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
struct mddev *mddev);
extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev);
-extern void md_trim_bio(struct bio *bio, int offset, int size);
extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
static inline int mddev_check_plugged(struct mddev *mddev)
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 1642eae75a33..849ad39f547b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -100,7 +100,7 @@ static void multipath_end_request(struct bio *bio, int error)
md_error (mp_bh->mddev, rdev);
printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
bdevname(rdev->bdev,b),
- (unsigned long long)bio->bi_sector);
+ (unsigned long long)bio->bi_iter.bi_sector);
multipath_reschedule_retry(mp_bh);
} else
multipath_end_bh_io(mp_bh, error);
@@ -132,7 +132,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
multipath = conf->multipaths + mp_bh->path;
mp_bh->bio = *bio;
- mp_bh->bio.bi_sector += multipath->rdev->data_offset;
+ mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
mp_bh->bio.bi_end_io = multipath_end_request;
@@ -355,21 +355,22 @@ static void multipathd(struct md_thread *thread)
spin_unlock_irqrestore(&conf->device_lock, flags);
bio = &mp_bh->bio;
- bio->bi_sector = mp_bh->master_bio->bi_sector;
+ bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
if ((mp_bh->path = multipath_map (conf))<0) {
printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
" error for block %llu\n",
bdevname(bio->bi_bdev,b),
- (unsigned long long)bio->bi_sector);
+ (unsigned long long)bio->bi_iter.bi_sector);
multipath_end_bh_io(mp_bh, -EIO);
} else {
printk(KERN_ERR "multipath: %s: redirecting sector %llu"
" to another IO path\n",
bdevname(bio->bi_bdev,b),
- (unsigned long long)bio->bi_sector);
+ (unsigned long long)bio->bi_iter.bi_sector);
*bio = *(mp_bh->master_bio);
- bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
+ bio->bi_iter.bi_sector +=
+ conf->multipaths[mp_bh->path].rdev->data_offset;
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
bio->bi_end_io = multipath_end_request;
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index 172147eb1d40..af96e24ec328 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -509,15 +509,18 @@ static int grow_add_tail_block(struct resize *resize)
static int grow_needs_more_blocks(struct resize *resize)
{
int r;
+ unsigned old_nr_blocks = resize->old_nr_full_blocks;
if (resize->old_nr_entries_in_last_block > 0) {
+ old_nr_blocks++;
+
r = grow_extend_tail_block(resize, resize->max_entries);
if (r)
return r;
}
r = insert_full_ablocks(resize->info, resize->size_of_block,
- resize->old_nr_full_blocks,
+ old_nr_blocks,
resize->new_nr_full_blocks,
resize->max_entries, resize->value,
&resize->root);
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index e735a6d5a793..cfbf9617e465 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -140,26 +140,10 @@ static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b)
static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
{
- int r;
- uint32_t old_count;
enum allocation_event ev;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
- r = sm_ll_dec(&smd->ll, b, &ev);
- if (!r && (ev == SM_FREE)) {
- /*
- * It's only free if it's also free in the last
- * transaction.
- */
- r = sm_ll_lookup(&smd->old_ll, b, &old_count);
- if (r)
- return r;
-
- if (!old_count)
- smd->nr_allocated_this_transaction--;
- }
-
- return r;
+ return sm_ll_dec(&smd->ll, b, &ev);
}
static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c4d420b7d2f4..407a99e46f69 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -501,10 +501,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
unsigned int chunk_sects, struct bio *bio)
{
if (likely(is_power_of_2(chunk_sects))) {
- return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
+ return chunk_sects >=
+ ((bio->bi_iter.bi_sector & (chunk_sects-1))
+ bio_sectors(bio));
} else{
- sector_t sector = bio->bi_sector;
+ sector_t sector = bio->bi_iter.bi_sector;
return chunk_sects >= (sector_div(sector, chunk_sects)
+ bio_sectors(bio));
}
@@ -512,64 +513,44 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
static void raid0_make_request(struct mddev *mddev, struct bio *bio)
{
- unsigned int chunk_sects;
- sector_t sector_offset;
struct strip_zone *zone;
struct md_rdev *tmp_dev;
+ struct bio *split;
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bio);
return;
}
- chunk_sects = mddev->chunk_sectors;
- if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
- sector_t sector = bio->bi_sector;
- struct bio_pair *bp;
- /* Sanity check -- queue functions should prevent this happening */
- if (bio_segments(bio) > 1)
- goto bad_map;
- /* This is a one page bio that upper layers
- * refuse to split for us, so we need to split it.
- */
- if (likely(is_power_of_2(chunk_sects)))
- bp = bio_split(bio, chunk_sects - (sector &
- (chunk_sects-1)));
- else
- bp = bio_split(bio, chunk_sects -
- sector_div(sector, chunk_sects));
- raid0_make_request(mddev, &bp->bio1);
- raid0_make_request(mddev, &bp->bio2);
- bio_pair_release(bp);
- return;
- }
+ do {
+ sector_t sector = bio->bi_iter.bi_sector;
+ unsigned chunk_sects = mddev->chunk_sectors;
- sector_offset = bio->bi_sector;
- zone = find_zone(mddev->private, &sector_offset);
- tmp_dev = map_sector(mddev, zone, bio->bi_sector,
- &sector_offset);
- bio->bi_bdev = tmp_dev->bdev;
- bio->bi_sector = sector_offset + zone->dev_start +
- tmp_dev->data_offset;
-
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
- /* Just ignore it */
- bio_endio(bio, 0);
- return;
- }
+ unsigned sectors = chunk_sects -
+ (likely(is_power_of_2(chunk_sects))
+ ? (sector & (chunk_sects-1))
+ : sector_div(sector, chunk_sects));
- generic_make_request(bio);
- return;
-
-bad_map:
- printk("md/raid0:%s: make_request bug: can't convert block across chunks"
- " or bigger than %dk %llu %d\n",
- mdname(mddev), chunk_sects / 2,
- (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
+ if (sectors < bio_sectors(bio)) {
+ split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
+ bio_chain(split, bio);
+ } else {
+ split = bio;
+ }
- bio_io_error(bio);
- return;
+ zone = find_zone(mddev->private, &sector);
+ tmp_dev = map_sector(mddev, zone, sector, &sector);
+ split->bi_bdev = tmp_dev->bdev;
+ split->bi_iter.bi_sector = sector + zone->dev_start +
+ tmp_dev->data_offset;
+
+ if (unlikely((split->bi_rw & REQ_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
+ /* Just ignore it */
+ bio_endio(split, 0);
+ } else
+ generic_make_request(split);
+ } while (split != bio);
}
static void raid0_status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d60412c7f995..b172401c844b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -84,10 +84,12 @@ static void r1bio_pool_free(void *r1_bio, void *data)
}
#define RESYNC_BLOCK_SIZE (64*1024)
-//#define RESYNC_BLOCK_SIZE PAGE_SIZE
+#define RESYNC_DEPTH 32
#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
-#define RESYNC_WINDOW (2048*1024)
+#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
+#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
+#define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)
static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
{
@@ -255,9 +257,8 @@ static void raid_end_bio_io(struct r1bio *r1_bio)
if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
(bio_data_dir(bio) == WRITE) ? "write" : "read",
- (unsigned long long) bio->bi_sector,
- (unsigned long long) bio->bi_sector +
- bio_sectors(bio) - 1);
+ (unsigned long long) bio->bi_iter.bi_sector,
+ (unsigned long long) bio_end_sector(bio) - 1);
call_bio_endio(r1_bio);
}
@@ -456,9 +457,8 @@ static void raid1_end_write_request(struct bio *bio, int error)
struct bio *mbio = r1_bio->master_bio;
pr_debug("raid1: behind end write sectors"
" %llu-%llu\n",
- (unsigned long long) mbio->bi_sector,
- (unsigned long long) mbio->bi_sector +
- bio_sectors(mbio) - 1);
+ (unsigned long long) mbio->bi_iter.bi_sector,
+ (unsigned long long) bio_end_sector(mbio) - 1);
call_bio_endio(r1_bio);
}
}
@@ -814,8 +814,6 @@ static void flush_pending_writes(struct r1conf *conf)
* there is no normal IO happeing. It must arrange to call
* lower_barrier when the particular background IO completes.
*/
-#define RESYNC_DEPTH 32
-
static void raise_barrier(struct r1conf *conf)
{
spin_lock_irq(&conf->resync_lock);
@@ -829,6 +827,7 @@ static void raise_barrier(struct r1conf *conf)
/* Now wait for all pending IO to complete */
wait_event_lock_irq(conf->wait_barrier,
+ !conf->array_frozen &&
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
conf->resync_lock);
@@ -860,10 +859,11 @@ static void wait_barrier(struct r1conf *conf)
* count down.
*/
wait_event_lock_irq(conf->wait_barrier,
- !conf->barrier ||
+ !conf->array_frozen &&
+ (!conf->barrier ||
(conf->nr_pending &&
current->bio_list &&
- !bio_list_empty(current->bio_list)),
+ !bio_list_empty(current->bio_list))),
conf->resync_lock);
conf->nr_waiting--;
}
@@ -884,8 +884,7 @@ static void freeze_array(struct r1conf *conf, int extra)
{
/* stop syncio and normal IO and wait for everything to
* go quite.
- * We increment barrier and nr_waiting, and then
- * wait until nr_pending match nr_queued+extra
+ * We wait until nr_pending match nr_queued+extra
* This is called in the context of one normal IO request
* that has failed. Thus any sync request that might be pending
* will be blocked by nr_pending, and we need to wait for
@@ -895,8 +894,7 @@ static void freeze_array(struct r1conf *conf, int extra)
* we continue.
*/
spin_lock_irq(&conf->resync_lock);
- conf->barrier++;
- conf->nr_waiting++;
+ conf->array_frozen = 1;
wait_event_lock_irq_cmd(conf->wait_barrier,
conf->nr_pending == conf->nr_queued+extra,
conf->resync_lock,
@@ -907,8 +905,7 @@ static void unfreeze_array(struct r1conf *conf)
{
/* reverse the effect of the freeze */
spin_lock_irq(&conf->resync_lock);
- conf->barrier--;
- conf->nr_waiting--;
+ conf->array_frozen = 0;
wake_up(&conf->wait_barrier);
spin_unlock_irq(&conf->resync_lock);
}
@@ -945,7 +942,8 @@ do_sync_io:
if (bvecs[i].bv_page)
put_page(bvecs[i].bv_page);
kfree(bvecs);
- pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
+ pr_debug("%dB behind alloc failed, doing sync I/O\n",
+ bio->bi_iter.bi_size);
}
struct raid1_plug_cb {
@@ -1024,7 +1022,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
if (bio_data_dir(bio) == WRITE &&
bio_end_sector(bio) > mddev->suspend_lo &&
- bio->bi_sector < mddev->suspend_hi) {
+ bio->bi_iter.bi_sector < mddev->suspend_hi) {
/* As the suspend_* range is controlled by
* userspace, we want an interruptible
* wait.
@@ -1035,7 +1033,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
prepare_to_wait(&conf->wait_barrier,
&w, TASK_INTERRUPTIBLE);
if (bio_end_sector(bio) <= mddev->suspend_lo ||
- bio->bi_sector >= mddev->suspend_hi)
+ bio->bi_iter.bi_sector >= mddev->suspend_hi)
break;
schedule();
}
@@ -1057,7 +1055,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
r1_bio->sectors = bio_sectors(bio);
r1_bio->state = 0;
r1_bio->mddev = mddev;
- r1_bio->sector = bio->bi_sector;
+ r1_bio->sector = bio->bi_iter.bi_sector;
/* We might need to issue multiple reads to different
* devices if there are bad blocks around, so we keep
@@ -1097,12 +1095,13 @@ read_again:
r1_bio->read_disk = rdisk;
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(read_bio, r1_bio->sector - bio->bi_sector,
- max_sectors);
+ bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
+ max_sectors);
r1_bio->bios[rdisk] = read_bio;
- read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
+ read_bio->bi_iter.bi_sector = r1_bio->sector +
+ mirror->rdev->data_offset;
read_bio->bi_bdev = mirror->rdev->bdev;
read_bio->bi_end_io = raid1_end_read_request;
read_bio->bi_rw = READ | do_sync;
@@ -1114,7 +1113,7 @@ read_again:
*/
sectors_handled = (r1_bio->sector + max_sectors
- - bio->bi_sector);
+ - bio->bi_iter.bi_sector);
r1_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
if (bio->bi_phys_segments == 0)
@@ -1135,7 +1134,8 @@ read_again:
r1_bio->sectors = bio_sectors(bio) - sectors_handled;
r1_bio->state = 0;
r1_bio->mddev = mddev;
- r1_bio->sector = bio->bi_sector + sectors_handled;
+ r1_bio->sector = bio->bi_iter.bi_sector +
+ sectors_handled;
goto read_again;
} else
generic_make_request(read_bio);
@@ -1254,7 +1254,7 @@ read_again:
bio->bi_phys_segments++;
spin_unlock_irq(&conf->device_lock);
}
- sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector;
+ sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
atomic_set(&r1_bio->remaining, 1);
atomic_set(&r1_bio->behind_remaining, 0);
@@ -1266,7 +1266,7 @@ read_again:
continue;
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
+ bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
if (first_clone) {
/* do behind I/O ?
@@ -1300,7 +1300,7 @@ read_again:
r1_bio->bios[i] = mbio;
- mbio->bi_sector = (r1_bio->sector +
+ mbio->bi_iter.bi_sector = (r1_bio->sector +
conf->mirrors[i].rdev->data_offset);
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
@@ -1340,7 +1340,7 @@ read_again:
r1_bio->sectors = bio_sectors(bio) - sectors_handled;
r1_bio->state = 0;
r1_bio->mddev = mddev;
- r1_bio->sector = bio->bi_sector + sectors_handled;
+ r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
goto retry_write;
}
@@ -1479,6 +1479,7 @@ static int raid1_spare_active(struct mddev *mddev)
}
}
if (rdev
+ && rdev->recovery_offset == MaxSector
&& !test_bit(Faulty, &rdev->flags)
&& !test_and_set_bit(In_sync, &rdev->flags)) {
count++;
@@ -1860,14 +1861,14 @@ static int process_checks(struct r1bio *r1_bio)
/* fixup the bio for reuse */
bio_reset(b);
b->bi_vcnt = vcnt;
- b->bi_size = r1_bio->sectors << 9;
- b->bi_sector = r1_bio->sector +
+ b->bi_iter.bi_size = r1_bio->sectors << 9;
+ b->bi_iter.bi_sector = r1_bio->sector +
conf->mirrors[i].rdev->data_offset;
b->bi_bdev = conf->mirrors[i].rdev->bdev;
b->bi_end_io = end_sync_read;
b->bi_private = r1_bio;
- size = b->bi_size;
+ size = b->bi_iter.bi_size;
for (j = 0; j < vcnt ; j++) {
struct bio_vec *bi;
bi = &b->bi_io_vec[j];
@@ -2122,11 +2123,11 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
}
wbio->bi_rw = WRITE;
- wbio->bi_sector = r1_bio->sector;
- wbio->bi_size = r1_bio->sectors << 9;
+ wbio->bi_iter.bi_sector = r1_bio->sector;
+ wbio->bi_iter.bi_size = r1_bio->sectors << 9;
- md_trim_bio(wbio, sector - r1_bio->sector, sectors);
- wbio->bi_sector += rdev->data_offset;
+ bio_trim(wbio, sector - r1_bio->sector, sectors);
+ wbio->bi_iter.bi_sector += rdev->data_offset;
wbio->bi_bdev = rdev->bdev;
if (submit_bio_wait(WRITE, wbio) == 0)
/* failure! */
@@ -2240,7 +2241,8 @@ read_more:
}
r1_bio->read_disk = disk;
bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
- md_trim_bio(bio, r1_bio->sector - bio->bi_sector, max_sectors);
+ bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
+ max_sectors);
r1_bio->bios[r1_bio->read_disk] = bio;
rdev = conf->mirrors[disk].rdev;
printk_ratelimited(KERN_ERR
@@ -2249,7 +2251,7 @@ read_more:
mdname(mddev),
(unsigned long long)r1_bio->sector,
bdevname(rdev->bdev, b));
- bio->bi_sector = r1_bio->sector + rdev->data_offset;
+ bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_end_io = raid1_end_read_request;
bio->bi_rw = READ | do_sync;
@@ -2258,7 +2260,7 @@ read_more:
/* Drat - have to split this up more */
struct bio *mbio = r1_bio->master_bio;
int sectors_handled = (r1_bio->sector + max_sectors
- - mbio->bi_sector);
+ - mbio->bi_iter.bi_sector);
r1_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
if (mbio->bi_phys_segments == 0)
@@ -2276,7 +2278,8 @@ read_more:
r1_bio->state = 0;
set_bit(R1BIO_ReadError, &r1_bio->state);
r1_bio->mddev = mddev;
- r1_bio->sector = mbio->bi_sector + sectors_handled;
+ r1_bio->sector = mbio->bi_iter.bi_sector +
+ sectors_handled;
goto read_more;
} else
@@ -2500,7 +2503,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
}
if (bio->bi_end_io) {
atomic_inc(&rdev->nr_pending);
- bio->bi_sector = sector_nr + rdev->data_offset;
+ bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_private = r1_bio;
}
@@ -2600,7 +2603,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
continue;
/* remove last page from this bio */
bio->bi_vcnt--;
- bio->bi_size -= len;
+ bio->bi_iter.bi_size -= len;
bio->bi_flags &= ~(1<< BIO_SEG_VALID);
}
goto bio_full;
@@ -2870,8 +2873,8 @@ static int stop(struct mddev *mddev)
atomic_read(&bitmap->behind_writes) == 0);
}
- raise_barrier(conf);
- lower_barrier(conf);
+ freeze_array(conf, 0);
+ unfreeze_array(conf);
md_unregister_thread(&mddev->thread);
if (conf->r1bio_pool)
@@ -3030,10 +3033,10 @@ static void raid1_quiesce(struct mddev *mddev, int state)
wake_up(&conf->wait_barrier);
break;
case 1:
- raise_barrier(conf);
+ freeze_array(conf, 0);
break;
case 0:
- lower_barrier(conf);
+ unfreeze_array(conf);
break;
}
}
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 0ff3715fb7eb..331a98a231b4 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -65,6 +65,7 @@ struct r1conf {
int nr_waiting;
int nr_queued;
int barrier;
+ int array_frozen;
/* Set to 1 if a full sync is needed, (fresh device added).
* Cleared when a sync completes.
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index df7b0a06b0ea..bba256d8ca81 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1152,14 +1152,12 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
kfree(plug);
}
-static void make_request(struct mddev *mddev, struct bio * bio)
+static void __make_request(struct mddev *mddev, struct bio *bio)
{
struct r10conf *conf = mddev->private;
struct r10bio *r10_bio;
struct bio *read_bio;
int i;
- sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
- int chunk_sects = chunk_mask + 1;
const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
@@ -1174,88 +1172,27 @@ static void make_request(struct mddev *mddev, struct bio * bio)
int max_sectors;
int sectors;
- if (unlikely(bio->bi_rw & REQ_FLUSH)) {
- md_flush_request(mddev, bio);
- return;
- }
-
- /* If this request crosses a chunk boundary, we need to
- * split it. This will only happen for 1 PAGE (or less) requests.
- */
- if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
- > chunk_sects
- && (conf->geo.near_copies < conf->geo.raid_disks
- || conf->prev.near_copies < conf->prev.raid_disks))) {
- struct bio_pair *bp;
- /* Sanity check -- queue functions should prevent this happening */
- if (bio_segments(bio) > 1)
- goto bad_map;
- /* This is a one page bio that upper layers
- * refuse to split for us, so we need to split it.
- */
- bp = bio_split(bio,
- chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
-
- /* Each of these 'make_request' calls will call 'wait_barrier'.
- * If the first succeeds but the second blocks due to the resync
- * thread raising the barrier, we will deadlock because the
- * IO to the underlying device will be queued in generic_make_request
- * and will never complete, so will never reduce nr_pending.
- * So increment nr_waiting here so no new raise_barriers will
- * succeed, and so the second wait_barrier cannot block.
- */
- spin_lock_irq(&conf->resync_lock);
- conf->nr_waiting++;
- spin_unlock_irq(&conf->resync_lock);
-
- make_request(mddev, &bp->bio1);
- make_request(mddev, &bp->bio2);
-
- spin_lock_irq(&conf->resync_lock);
- conf->nr_waiting--;
- wake_up(&conf->wait_barrier);
- spin_unlock_irq(&conf->resync_lock);
-
- bio_pair_release(bp);
- return;
- bad_map:
- printk("md/raid10:%s: make_request bug: can't convert block across chunks"
- " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
- (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
-
- bio_io_error(bio);
- return;
- }
-
- md_write_start(mddev, bio);
-
- /*
- * Register the new request and wait if the reconstruction
- * thread has put up a bar for new requests.
- * Continue immediately if no resync is active currently.
- */
- wait_barrier(conf);
-
sectors = bio_sectors(bio);
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
- bio->bi_sector < conf->reshape_progress &&
- bio->bi_sector + sectors > conf->reshape_progress) {
+ bio->bi_iter.bi_sector < conf->reshape_progress &&
+ bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
/* IO spans the reshape position. Need to wait for
* reshape to pass
*/
allow_barrier(conf);
wait_event(conf->wait_barrier,
- conf->reshape_progress <= bio->bi_sector ||
- conf->reshape_progress >= bio->bi_sector + sectors);
+ conf->reshape_progress <= bio->bi_iter.bi_sector ||
+ conf->reshape_progress >= bio->bi_iter.bi_sector +
+ sectors);
wait_barrier(conf);
}
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
bio_data_dir(bio) == WRITE &&
(mddev->reshape_backwards
- ? (bio->bi_sector < conf->reshape_safe &&
- bio->bi_sector + sectors > conf->reshape_progress)
- : (bio->bi_sector + sectors > conf->reshape_safe &&
- bio->bi_sector < conf->reshape_progress))) {
+ ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
+ bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
+ : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
+ bio->bi_iter.bi_sector < conf->reshape_progress))) {
/* Need to update reshape_position in metadata */
mddev->reshape_position = conf->reshape_progress;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -1273,7 +1210,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
r10_bio->sectors = sectors;
r10_bio->mddev = mddev;
- r10_bio->sector = bio->bi_sector;
+ r10_bio->sector = bio->bi_iter.bi_sector;
r10_bio->state = 0;
/* We might need to issue multiple reads to different
@@ -1302,13 +1239,13 @@ read_again:
slot = r10_bio->read_slot;
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
- max_sectors);
+ bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
+ max_sectors);
r10_bio->devs[slot].bio = read_bio;
r10_bio->devs[slot].rdev = rdev;
- read_bio->bi_sector = r10_bio->devs[slot].addr +
+ read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
choose_data_offset(r10_bio, rdev);
read_bio->bi_bdev = rdev->bdev;
read_bio->bi_end_io = raid10_end_read_request;
@@ -1320,7 +1257,7 @@ read_again:
* need another r10_bio.
*/
sectors_handled = (r10_bio->sectors + max_sectors
- - bio->bi_sector);
+ - bio->bi_iter.bi_sector);
r10_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
if (bio->bi_phys_segments == 0)
@@ -1341,7 +1278,8 @@ read_again:
r10_bio->sectors = bio_sectors(bio) - sectors_handled;
r10_bio->state = 0;
r10_bio->mddev = mddev;
- r10_bio->sector = bio->bi_sector + sectors_handled;
+ r10_bio->sector = bio->bi_iter.bi_sector +
+ sectors_handled;
goto read_again;
} else
generic_make_request(read_bio);
@@ -1499,7 +1437,8 @@ retry_write:
bio->bi_phys_segments++;
spin_unlock_irq(&conf->device_lock);
}
- sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;
+ sectors_handled = r10_bio->sector + max_sectors -
+ bio->bi_iter.bi_sector;
atomic_set(&r10_bio->remaining, 1);
bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
@@ -1510,11 +1449,11 @@ retry_write:
if (r10_bio->devs[i].bio) {
struct md_rdev *rdev = conf->mirrors[d].rdev;
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
- max_sectors);
+ bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
+ max_sectors);
r10_bio->devs[i].bio = mbio;
- mbio->bi_sector = (r10_bio->devs[i].addr+
+ mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
choose_data_offset(r10_bio,
rdev));
mbio->bi_bdev = rdev->bdev;
@@ -1553,11 +1492,11 @@ retry_write:
rdev = conf->mirrors[d].rdev;
}
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
- max_sectors);
+ bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
+ max_sectors);
r10_bio->devs[i].repl_bio = mbio;
- mbio->bi_sector = (r10_bio->devs[i].addr +
+ mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
choose_data_offset(
r10_bio, rdev));
mbio->bi_bdev = rdev->bdev;
@@ -1591,11 +1530,57 @@ retry_write:
r10_bio->sectors = bio_sectors(bio) - sectors_handled;
r10_bio->mddev = mddev;
- r10_bio->sector = bio->bi_sector + sectors_handled;
+ r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
r10_bio->state = 0;
goto retry_write;
}
one_write_done(r10_bio);
+}
+
+static void make_request(struct mddev *mddev, struct bio *bio)
+{
+ struct r10conf *conf = mddev->private;
+ sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
+ int chunk_sects = chunk_mask + 1;
+
+ struct bio *split;
+
+ if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+ md_flush_request(mddev, bio);
+ return;
+ }
+
+ md_write_start(mddev, bio);
+
+ /*
+ * Register the new request and wait if the reconstruction
+ * thread has put up a bar for new requests.
+ * Continue immediately if no resync is active currently.
+ */
+ wait_barrier(conf);
+
+ do {
+
+ /*
+ * If this request crosses a chunk boundary, we need to split
+ * it.
+ */
+ if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
+ bio_sectors(bio) > chunk_sects
+ && (conf->geo.near_copies < conf->geo.raid_disks
+ || conf->prev.near_copies <
+ conf->prev.raid_disks))) {
+ split = bio_split(bio, chunk_sects -
+ (bio->bi_iter.bi_sector &
+ (chunk_sects - 1)),
+ GFP_NOIO, fs_bio_set);
+ bio_chain(split, bio);
+ } else {
+ split = bio;
+ }
+
+ __make_request(mddev, split);
+ } while (split != bio);
/* In case raid10d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
@@ -1782,6 +1767,7 @@ static int raid10_spare_active(struct mddev *mddev)
}
sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
} else if (tmp->rdev
+ && tmp->rdev->recovery_offset == MaxSector
&& !test_bit(Faulty, &tmp->rdev->flags)
&& !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
count++;
@@ -2123,10 +2109,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
bio_reset(tbio);
tbio->bi_vcnt = vcnt;
- tbio->bi_size = r10_bio->sectors << 9;
+ tbio->bi_iter.bi_size = r10_bio->sectors << 9;
tbio->bi_rw = WRITE;
tbio->bi_private = r10_bio;
- tbio->bi_sector = r10_bio->devs[i].addr;
+ tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
for (j=0; j < vcnt ; j++) {
tbio->bi_io_vec[j].bv_offset = 0;
@@ -2143,7 +2129,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
atomic_inc(&r10_bio->remaining);
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
- tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
+ tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
generic_make_request(tbio);
}
@@ -2613,8 +2599,8 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
sectors = sect_to_write;
/* Write at 'sector' for 'sectors' */
wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(wbio, sector - bio->bi_sector, sectors);
- wbio->bi_sector = (r10_bio->devs[i].addr+
+ bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
+ wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
choose_data_offset(r10_bio, rdev) +
(sector - r10_bio->sector));
wbio->bi_bdev = rdev->bdev;
@@ -2686,12 +2672,10 @@ read_more:
(unsigned long long)r10_bio->sector);
bio = bio_clone_mddev(r10_bio->master_bio,
GFP_NOIO, mddev);
- md_trim_bio(bio,
- r10_bio->sector - bio->bi_sector,
- max_sectors);
+ bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
r10_bio->devs[slot].bio = bio;
r10_bio->devs[slot].rdev = rdev;
- bio->bi_sector = r10_bio->devs[slot].addr
+ bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
+ choose_data_offset(r10_bio, rdev);
bio->bi_bdev = rdev->bdev;
bio->bi_rw = READ | do_sync;
@@ -2702,7 +2686,7 @@ read_more:
struct bio *mbio = r10_bio->master_bio;
int sectors_handled =
r10_bio->sector + max_sectors
- - mbio->bi_sector;
+ - mbio->bi_iter.bi_sector;
r10_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
if (mbio->bi_phys_segments == 0)
@@ -2720,7 +2704,7 @@ read_more:
set_bit(R10BIO_ReadError,
&r10_bio->state);
r10_bio->mddev = mddev;
- r10_bio->sector = mbio->bi_sector
+ r10_bio->sector = mbio->bi_iter.bi_sector
+ sectors_handled;
goto read_more;
@@ -3158,7 +3142,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_end_io = end_sync_read;
bio->bi_rw = READ;
from_addr = r10_bio->devs[j].addr;
- bio->bi_sector = from_addr + rdev->data_offset;
+ bio->bi_iter.bi_sector = from_addr +
+ rdev->data_offset;
bio->bi_bdev = rdev->bdev;
atomic_inc(&rdev->nr_pending);
/* and we write to 'i' (if not in_sync) */
@@ -3182,7 +3167,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
bio->bi_rw = WRITE;
- bio->bi_sector = to_addr
+ bio->bi_iter.bi_sector = to_addr
+ rdev->data_offset;
bio->bi_bdev = rdev->bdev;
atomic_inc(&r10_bio->remaining);
@@ -3211,7 +3196,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
bio->bi_rw = WRITE;
- bio->bi_sector = to_addr + rdev->data_offset;
+ bio->bi_iter.bi_sector = to_addr +
+ rdev->data_offset;
bio->bi_bdev = rdev->bdev;
atomic_inc(&r10_bio->remaining);
break;
@@ -3329,7 +3315,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read;
bio->bi_rw = READ;
- bio->bi_sector = sector +
+ bio->bi_iter.bi_sector = sector +
conf->mirrors[d].rdev->data_offset;
bio->bi_bdev = conf->mirrors[d].rdev->bdev;
count++;
@@ -3351,7 +3337,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
bio->bi_rw = WRITE;
- bio->bi_sector = sector +
+ bio->bi_iter.bi_sector = sector +
conf->mirrors[d].replacement->data_offset;
bio->bi_bdev = conf->mirrors[d].replacement->bdev;
count++;
@@ -3398,7 +3384,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio2 = bio2->bi_next) {
/* remove last page from this bio */
bio2->bi_vcnt--;
- bio2->bi_size -= len;
+ bio2->bi_iter.bi_size -= len;
bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
}
goto bio_full;
@@ -4414,7 +4400,7 @@ read_more:
read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
read_bio->bi_bdev = rdev->bdev;
- read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
+ read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
+ rdev->data_offset);
read_bio->bi_private = r10_bio;
read_bio->bi_end_io = end_sync_read;
@@ -4422,7 +4408,7 @@ read_more:
read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
read_bio->bi_flags |= 1 << BIO_UPTODATE;
read_bio->bi_vcnt = 0;
- read_bio->bi_size = 0;
+ read_bio->bi_iter.bi_size = 0;
r10_bio->master_bio = read_bio;
r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
@@ -4448,7 +4434,8 @@ read_more:
bio_reset(b);
b->bi_bdev = rdev2->bdev;
- b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset;
+ b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
+ rdev2->new_data_offset;
b->bi_private = r10_bio;
b->bi_end_io = end_reshape_write;
b->bi_rw = WRITE;
@@ -4475,7 +4462,7 @@ read_more:
bio2 = bio2->bi_next) {
/* Remove last page from this bio */
bio2->bi_vcnt--;
- bio2->bi_size -= len;
+ bio2->bi_iter.bi_size -= len;
bio2->bi_flags &= ~(1<<BIO_SEG_VALID);
}
goto bio_full;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7ff4f252ca1a..0a180cdf8458 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -85,6 +85,42 @@ static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
return &conf->stripe_hashtbl[hash];
}
+static inline int stripe_hash_locks_hash(sector_t sect)
+{
+ return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK;
+}
+
+static inline void lock_device_hash_lock(struct r5conf *conf, int hash)
+{
+ spin_lock_irq(conf->hash_locks + hash);
+ spin_lock(&conf->device_lock);
+}
+
+static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
+{
+ spin_unlock(&conf->device_lock);
+ spin_unlock_irq(conf->hash_locks + hash);
+}
+
+static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
+{
+ int i;
+ local_irq_disable();
+ spin_lock(conf->hash_locks);
+ for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
+ spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
+ spin_lock(&conf->device_lock);
+}
+
+static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
+{
+ int i;
+ spin_unlock(&conf->device_lock);
+ for (i = NR_STRIPE_HASH_LOCKS; i; i--)
+ spin_unlock(conf->hash_locks + i - 1);
+ local_irq_enable();
+}
+
/* bio's attached to a stripe+device for I/O are linked together in bi_sector
* order without overlap. There may be several bio's per stripe+device, and
* a bio could span several devices.
@@ -97,7 +133,7 @@ static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
{
int sectors = bio_sectors(bio);
- if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
+ if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
return bio->bi_next;
else
return NULL;
@@ -189,7 +225,7 @@ static void return_io(struct bio *return_bi)
return_bi = bi->bi_next;
bi->bi_next = NULL;
- bi->bi_size = 0;
+ bi->bi_iter.bi_size = 0;
trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
bi, 0);
bio_endio(bi, 0);
@@ -249,7 +285,8 @@ static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
}
}
-static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
+static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
+ struct list_head *temp_inactive_list)
{
BUG_ON(!list_empty(&sh->lru));
BUG_ON(atomic_read(&conf->active_stripes)==0);
@@ -278,19 +315,63 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
< IO_THRESHOLD)
md_wakeup_thread(conf->mddev->thread);
atomic_dec(&conf->active_stripes);
- if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
- list_add_tail(&sh->lru, &conf->inactive_list);
- wake_up(&conf->wait_for_stripe);
- if (conf->retry_read_aligned)
- md_wakeup_thread(conf->mddev->thread);
- }
+ if (!test_bit(STRIPE_EXPANDING, &sh->state))
+ list_add_tail(&sh->lru, temp_inactive_list);
}
}
-static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
+static void __release_stripe(struct r5conf *conf, struct stripe_head *sh,
+ struct list_head *temp_inactive_list)
{
if (atomic_dec_and_test(&sh->count))
- do_release_stripe(conf, sh);
+ do_release_stripe(conf, sh, temp_inactive_list);
+}
+
+/*
+ * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list
+ *
+ * Be careful: Only one task can add/delete stripes from temp_inactive_list at
+ * given time. Adding stripes only takes device lock, while deleting stripes
+ * only takes hash lock.
+ */
+static void release_inactive_stripe_list(struct r5conf *conf,
+ struct list_head *temp_inactive_list,
+ int hash)
+{
+ int size;
+ bool do_wakeup = false;
+ unsigned long flags;
+
+ if (hash == NR_STRIPE_HASH_LOCKS) {
+ size = NR_STRIPE_HASH_LOCKS;
+ hash = NR_STRIPE_HASH_LOCKS - 1;
+ } else
+ size = 1;
+ while (size) {
+ struct list_head *list = &temp_inactive_list[size - 1];
+
+ /*
+ * We don't hold any lock here yet, get_active_stripe() might
+ * remove stripes from the list
+ */
+ if (!list_empty_careful(list)) {
+ spin_lock_irqsave(conf->hash_locks + hash, flags);
+ if (list_empty(conf->inactive_list + hash) &&
+ !list_empty(list))
+ atomic_dec(&conf->empty_inactive_list_nr);
+ list_splice_tail_init(list, conf->inactive_list + hash);
+ do_wakeup = true;
+ spin_unlock_irqrestore(conf->hash_locks + hash, flags);
+ }
+ size--;
+ hash--;
+ }
+
+ if (do_wakeup) {
+ wake_up(&conf->wait_for_stripe);
+ if (conf->retry_read_aligned)
+ md_wakeup_thread(conf->mddev->thread);
+ }
}
static struct llist_node *llist_reverse_order(struct llist_node *head)
@@ -308,7 +389,8 @@ static struct llist_node *llist_reverse_order(struct llist_node *head)
}
/* should hold conf->device_lock already */
-static int release_stripe_list(struct r5conf *conf)
+static int release_stripe_list(struct r5conf *conf,
+ struct list_head *temp_inactive_list)
{
struct stripe_head *sh;
int count = 0;
@@ -317,6 +399,8 @@ static int release_stripe_list(struct r5conf *conf)
head = llist_del_all(&conf->released_stripes);
head = llist_reverse_order(head);
while (head) {
+ int hash;
+
sh = llist_entry(head, struct stripe_head, release_list);
head = llist_next(head);
/* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
@@ -327,7 +411,8 @@ static int release_stripe_list(struct r5conf *conf)
* again, the count is always > 1. This is true for
* STRIPE_ON_UNPLUG_LIST bit too.
*/
- __release_stripe(conf, sh);
+ hash = sh->hash_lock_index;
+ __release_stripe(conf, sh, &temp_inactive_list[hash]);
count++;
}
@@ -338,6 +423,8 @@ static void release_stripe(struct stripe_head *sh)
{
struct r5conf *conf = sh->raid_conf;
unsigned long flags;
+ struct list_head list;
+ int hash;
bool wakeup;
if (test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
@@ -350,8 +437,11 @@ slow_path:
local_irq_save(flags);
/* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
- do_release_stripe(conf, sh);
+ INIT_LIST_HEAD(&list);
+ hash = sh->hash_lock_index;
+ do_release_stripe(conf, sh, &list);
spin_unlock(&conf->device_lock);
+ release_inactive_stripe_list(conf, &list, hash);
}
local_irq_restore(flags);
}
@@ -376,18 +466,21 @@ static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
/* find an idle stripe, make sure it is unhashed, and return it. */
-static struct stripe_head *get_free_stripe(struct r5conf *conf)
+static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash)
{
struct stripe_head *sh = NULL;
struct list_head *first;
- if (list_empty(&conf->inactive_list))
+ if (list_empty(conf->inactive_list + hash))
goto out;
- first = conf->inactive_list.next;
+ first = (conf->inactive_list + hash)->next;
sh = list_entry(first, struct stripe_head, lru);
list_del_init(first);
remove_hash(sh);
atomic_inc(&conf->active_stripes);
+ BUG_ON(hash != sh->hash_lock_index);
+ if (list_empty(conf->inactive_list + hash))
+ atomic_inc(&conf->empty_inactive_list_nr);
out:
return sh;
}
@@ -430,7 +523,7 @@ static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
{
struct r5conf *conf = sh->raid_conf;
- int i;
+ int i, seq;
BUG_ON(atomic_read(&sh->count) != 0);
BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
@@ -440,7 +533,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
(unsigned long long)sh->sector);
remove_hash(sh);
-
+retry:
+ seq = read_seqcount_begin(&conf->gen_lock);
sh->generation = conf->generation - previous;
sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
sh->sector = sector;
@@ -462,6 +556,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
dev->flags = 0;
raid5_build_block(sh, i, previous);
}
+ if (read_seqcount_retry(&conf->gen_lock, seq))
+ goto retry;
insert_hash(conf, sh);
sh->cpu = smp_processor_id();
}
@@ -566,29 +662,31 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
int previous, int noblock, int noquiesce)
{
struct stripe_head *sh;
+ int hash = stripe_hash_locks_hash(sector);
pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
- spin_lock_irq(&conf->device_lock);
+ spin_lock_irq(conf->hash_locks + hash);
do {
wait_event_lock_irq(conf->wait_for_stripe,
conf->quiesce == 0 || noquiesce,
- conf->device_lock);
+ *(conf->hash_locks + hash));
sh = __find_stripe(conf, sector, conf->generation - previous);
if (!sh) {
if (!conf->inactive_blocked)
- sh = get_free_stripe(conf);
+ sh = get_free_stripe(conf, hash);
if (noblock && sh == NULL)
break;
if (!sh) {
conf->inactive_blocked = 1;
- wait_event_lock_irq(conf->wait_for_stripe,
- !list_empty(&conf->inactive_list) &&
- (atomic_read(&conf->active_stripes)
- < (conf->max_nr_stripes *3/4)
- || !conf->inactive_blocked),
- conf->device_lock);
+ wait_event_lock_irq(
+ conf->wait_for_stripe,
+ !list_empty(conf->inactive_list + hash) &&
+ (atomic_read(&conf->active_stripes)
+ < (conf->max_nr_stripes * 3 / 4)
+ || !conf->inactive_blocked),
+ *(conf->hash_locks + hash));
conf->inactive_blocked = 0;
} else
init_stripe(sh, sector, previous);
@@ -599,6 +697,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
&& !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)
&& !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
} else {
+ spin_lock(&conf->device_lock);
if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes);
if (list_empty(&sh->lru) &&
@@ -609,6 +708,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
sh->group->stripes_cnt--;
sh->group = NULL;
}
+ spin_unlock(&conf->device_lock);
}
}
} while (sh == NULL);
@@ -616,7 +716,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
if (sh)
atomic_inc(&sh->count);
- spin_unlock_irq(&conf->device_lock);
+ spin_unlock_irq(conf->hash_locks + hash);
return sh;
}
@@ -766,10 +866,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_rw, i);
atomic_inc(&sh->count);
if (use_new_offset(conf, sh))
- bi->bi_sector = (sh->sector
+ bi->bi_iter.bi_sector = (sh->sector
+ rdev->new_data_offset);
else
- bi->bi_sector = (sh->sector
+ bi->bi_iter.bi_sector = (sh->sector
+ rdev->data_offset);
if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
bi->bi_rw |= REQ_FLUSH;
@@ -777,7 +877,13 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_vcnt = 1;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
bi->bi_io_vec[0].bv_offset = 0;
- bi->bi_size = STRIPE_SIZE;
+ bi->bi_iter.bi_size = STRIPE_SIZE;
+ /*
+ * If this is discard request, set bi_vcnt 0. We don't
+ * want to confuse SCSI because SCSI will replace payload
+ */
+ if (rw & REQ_DISCARD)
+ bi->bi_vcnt = 0;
if (rrdev)
set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
@@ -807,15 +913,21 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
rbi->bi_rw, i);
atomic_inc(&sh->count);
if (use_new_offset(conf, sh))
- rbi->bi_sector = (sh->sector
+ rbi->bi_iter.bi_sector = (sh->sector
+ rrdev->new_data_offset);
else
- rbi->bi_sector = (sh->sector
+ rbi->bi_iter.bi_sector = (sh->sector
+ rrdev->data_offset);
rbi->bi_vcnt = 1;
rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
rbi->bi_io_vec[0].bv_offset = 0;
- rbi->bi_size = STRIPE_SIZE;
+ rbi->bi_iter.bi_size = STRIPE_SIZE;
+ /*
+ * If this is discard request, set bi_vcnt 0. We don't
+ * want to confuse SCSI because SCSI will replace payload
+ */
+ if (rw & REQ_DISCARD)
+ rbi->bi_vcnt = 0;
if (conf->mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
rbi, disk_devt(conf->mddev->gendisk),
@@ -837,24 +949,24 @@ static struct dma_async_tx_descriptor *
async_copy_data(int frombio, struct bio *bio, struct page *page,
sector_t sector, struct dma_async_tx_descriptor *tx)
{
- struct bio_vec *bvl;
+ struct bio_vec bvl;
+ struct bvec_iter iter;
struct page *bio_page;
- int i;
int page_offset;
struct async_submit_ctl submit;
enum async_tx_flags flags = 0;
- if (bio->bi_sector >= sector)
- page_offset = (signed)(bio->bi_sector - sector) * 512;
+ if (bio->bi_iter.bi_sector >= sector)
+ page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
else
- page_offset = (signed)(sector - bio->bi_sector) * -512;
+ page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
if (frombio)
flags |= ASYNC_TX_FENCE;
init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
- bio_for_each_segment(bvl, bio, i) {
- int len = bvl->bv_len;
+ bio_for_each_segment(bvl, bio, iter) {
+ int len = bvl.bv_len;
int clen;
int b_offset = 0;
@@ -870,8 +982,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
clen = len;
if (clen > 0) {
- b_offset += bvl->bv_offset;
- bio_page = bvl->bv_page;
+ b_offset += bvl.bv_offset;
+ bio_page = bvl.bv_page;
if (frombio)
tx = async_memcpy(page, bio_page, page_offset,
b_offset, clen, &submit);
@@ -914,7 +1026,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
BUG_ON(!dev->read);
rbi = dev->read;
dev->read = NULL;
- while (rbi && rbi->bi_sector <
+ while (rbi && rbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
rbi2 = r5_next_bio(rbi, dev->sector);
if (!raid5_dec_bi_active_stripes(rbi)) {
@@ -950,7 +1062,7 @@ static void ops_run_biofill(struct stripe_head *sh)
dev->read = rbi = dev->toread;
dev->toread = NULL;
spin_unlock_irq(&sh->stripe_lock);
- while (rbi && rbi->bi_sector <
+ while (rbi && rbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
tx = async_copy_data(0, rbi, dev->page,
dev->sector, tx);
@@ -1292,7 +1404,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
wbi = dev->written = chosen;
spin_unlock_irq(&sh->stripe_lock);
- while (wbi && wbi->bi_sector <
+ while (wbi && wbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
if (wbi->bi_rw & REQ_FUA)
set_bit(R5_WantFUA, &dev->flags);
@@ -1584,7 +1696,7 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
put_cpu();
}
-static int grow_one_stripe(struct r5conf *conf)
+static int grow_one_stripe(struct r5conf *conf, int hash)
{
struct stripe_head *sh;
sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
@@ -1600,6 +1712,7 @@ static int grow_one_stripe(struct r5conf *conf)
kmem_cache_free(conf->slab_cache, sh);
return 0;
}
+ sh->hash_lock_index = hash;
/* we just created an active stripe so... */
atomic_set(&sh->count, 1);
atomic_inc(&conf->active_stripes);
@@ -1612,6 +1725,7 @@ static int grow_stripes(struct r5conf *conf, int num)
{
struct kmem_cache *sc;
int devs = max(conf->raid_disks, conf->previous_raid_disks);
+ int hash;
if (conf->mddev->gendisk)
sprintf(conf->cache_name[0],
@@ -1629,9 +1743,13 @@ static int grow_stripes(struct r5conf *conf, int num)
return 1;
conf->slab_cache = sc;
conf->pool_size = devs;
- while (num--)
- if (!grow_one_stripe(conf))
+ hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
+ while (num--) {
+ if (!grow_one_stripe(conf, hash))
return 1;
+ conf->max_nr_stripes++;
+ hash = (hash + 1) % NR_STRIPE_HASH_LOCKS;
+ }
return 0;
}
@@ -1689,6 +1807,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
int err;
struct kmem_cache *sc;
int i;
+ int hash, cnt;
if (newsize <= conf->pool_size)
return 0; /* never bother to shrink */
@@ -1728,19 +1847,29 @@ static int resize_stripes(struct r5conf *conf, int newsize)
* OK, we have enough stripes, start collecting inactive
* stripes and copying them over
*/
+ hash = 0;
+ cnt = 0;
list_for_each_entry(nsh, &newstripes, lru) {
- spin_lock_irq(&conf->device_lock);
- wait_event_lock_irq(conf->wait_for_stripe,
- !list_empty(&conf->inactive_list),
- conf->device_lock);
- osh = get_free_stripe(conf);
- spin_unlock_irq(&conf->device_lock);
+ lock_device_hash_lock(conf, hash);
+ wait_event_cmd(conf->wait_for_stripe,
+ !list_empty(conf->inactive_list + hash),
+ unlock_device_hash_lock(conf, hash),
+ lock_device_hash_lock(conf, hash));
+ osh = get_free_stripe(conf, hash);
+ unlock_device_hash_lock(conf, hash);
atomic_set(&nsh->count, 1);
for(i=0; i<conf->pool_size; i++)
nsh->dev[i].page = osh->dev[i].page;
for( ; i<newsize; i++)
nsh->dev[i].page = NULL;
+ nsh->hash_lock_index = hash;
kmem_cache_free(conf->slab_cache, osh);
+ cnt++;
+ if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS +
+ !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) {
+ hash++;
+ cnt = 0;
+ }
}
kmem_cache_destroy(conf->slab_cache);
@@ -1799,13 +1928,13 @@ static int resize_stripes(struct r5conf *conf, int newsize)
return err;
}
-static int drop_one_stripe(struct r5conf *conf)
+static int drop_one_stripe(struct r5conf *conf, int hash)
{
struct stripe_head *sh;
- spin_lock_irq(&conf->device_lock);
- sh = get_free_stripe(conf);
- spin_unlock_irq(&conf->device_lock);
+ spin_lock_irq(conf->hash_locks + hash);
+ sh = get_free_stripe(conf, hash);
+ spin_unlock_irq(conf->hash_locks + hash);
if (!sh)
return 0;
BUG_ON(atomic_read(&sh->count));
@@ -1817,8 +1946,10 @@ static int drop_one_stripe(struct r5conf *conf)
static void shrink_stripes(struct r5conf *conf)
{
- while (drop_one_stripe(conf))
- ;
+ int hash;
+ for (hash = 0; hash < NR_STRIPE_HASH_LOCKS; hash++)
+ while (drop_one_stripe(conf, hash))
+ ;
if (conf->slab_cache)
kmem_cache_destroy(conf->slab_cache);
@@ -1923,6 +2054,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
mdname(conf->mddev), bdn);
else
retry = 1;
+ if (set_bad && test_bit(In_sync, &rdev->flags)
+ && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
+ retry = 1;
if (retry)
if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
set_bit(R5_ReadError, &sh->dev[i].flags);
@@ -2494,7 +2628,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
int firstwrite=0;
pr_debug("adding bi b#%llu to stripe s#%llu\n",
- (unsigned long long)bi->bi_sector,
+ (unsigned long long)bi->bi_iter.bi_sector,
(unsigned long long)sh->sector);
/*
@@ -2512,12 +2646,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
firstwrite = 1;
} else
bip = &sh->dev[dd_idx].toread;
- while (*bip && (*bip)->bi_sector < bi->bi_sector) {
- if (bio_end_sector(*bip) > bi->bi_sector)
+ while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
+ if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
goto overlap;
bip = & (*bip)->bi_next;
}
- if (*bip && (*bip)->bi_sector < bio_end_sector(bi))
+ if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
goto overlap;
BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
@@ -2531,7 +2665,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
sector_t sector = sh->dev[dd_idx].sector;
for (bi=sh->dev[dd_idx].towrite;
sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
- bi && bi->bi_sector <= sector;
+ bi && bi->bi_iter.bi_sector <= sector;
bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
if (bio_end_sector(bi) >= sector)
sector = bio_end_sector(bi);
@@ -2541,7 +2675,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
}
pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
- (unsigned long long)(*bip)->bi_sector,
+ (unsigned long long)(*bip)->bi_iter.bi_sector,
(unsigned long long)sh->sector, dd_idx);
spin_unlock_irq(&sh->stripe_lock);
@@ -2616,7 +2750,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap);
- while (bi && bi->bi_sector <
+ while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2635,7 +2769,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
bi = sh->dev[i].written;
sh->dev[i].written = NULL;
if (bi) bitmap_end = 1;
- while (bi && bi->bi_sector <
+ while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2659,7 +2793,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
spin_unlock_irq(&sh->stripe_lock);
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap);
- while (bi && bi->bi_sector <
+ while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi =
r5_next_bio(bi, sh->dev[i].sector);
@@ -2883,7 +3017,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
clear_bit(R5_UPTODATE, &dev->flags);
wbi = dev->written;
dev->written = NULL;
- while (wbi && wbi->bi_sector <
+ while (wbi && wbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
wbi2 = r5_next_bio(wbi, dev->sector);
if (!raid5_dec_bi_active_stripes(wbi)) {
@@ -2910,6 +3044,14 @@ static void handle_stripe_clean_event(struct r5conf *conf,
}
/* now that discard is done we can proceed with any sync */
clear_bit(STRIPE_DISCARD, &sh->state);
+ /*
+ * SCSI discard will change some bio fields and the stripe has
+ * no updated data, so remove it from hash list and the stripe
+ * will be reinitialized
+ */
+ spin_lock_irq(&conf->device_lock);
+ remove_hash(sh);
+ spin_unlock_irq(&conf->device_lock);
if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
set_bit(STRIPE_HANDLE, &sh->state);
@@ -3894,7 +4036,8 @@ static void raid5_activate_delayed(struct r5conf *conf)
}
}
-static void activate_bit_delay(struct r5conf *conf)
+static void activate_bit_delay(struct r5conf *conf,
+ struct list_head *temp_inactive_list)
{
/* device_lock is held */
struct list_head head;
@@ -3902,9 +4045,11 @@ static void activate_bit_delay(struct r5conf *conf)
list_del_init(&conf->bitmap_list);
while (!list_empty(&head)) {
struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
+ int hash;
list_del_init(&sh->lru);
atomic_inc(&sh->count);
- __release_stripe(conf, sh);
+ hash = sh->hash_lock_index;
+ __release_stripe(conf, sh, &temp_inactive_list[hash]);
}
}
@@ -3920,7 +4065,7 @@ int md_raid5_congested(struct mddev *mddev, int bits)
return 1;
if (conf->quiesce)
return 1;
- if (list_empty_careful(&conf->inactive_list))
+ if (atomic_read(&conf->empty_inactive_list_nr))
return 1;
return 0;
@@ -3964,7 +4109,7 @@ static int raid5_mergeable_bvec(struct request_queue *q,
static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
{
- sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
+ sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bio_sectors(bio);
@@ -4101,9 +4246,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
/*
* compute position
*/
- align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
- 0,
- &dd_idx, NULL);
+ align_bi->bi_iter.bi_sector =
+ raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
+ 0, &dd_idx, NULL);
end_sector = bio_end_sector(align_bi);
rcu_read_lock();
@@ -4128,7 +4273,8 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
if (!bio_fits_rdev(align_bi) ||
- is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi),
+ is_badblock(rdev, align_bi->bi_iter.bi_sector,
+ bio_sectors(align_bi),
&first_bad, &bad_sectors)) {
/* too big in some way, or has a known bad block */
bio_put(align_bi);
@@ -4137,7 +4283,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
}
/* No reshape active, so we can trust rdev->data_offset */
- align_bi->bi_sector += rdev->data_offset;
+ align_bi->bi_iter.bi_sector += rdev->data_offset;
spin_lock_irq(&conf->device_lock);
wait_event_lock_irq(conf->wait_for_stripe,
@@ -4149,7 +4295,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
align_bi, disk_devt(mddev->gendisk),
- raid_bio->bi_sector);
+ raid_bio->bi_iter.bi_sector);
generic_make_request(align_bi);
return 1;
} else {
@@ -4250,6 +4396,7 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
struct raid5_plug_cb {
struct blk_plug_cb cb;
struct list_head list;
+ struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
};
static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
@@ -4260,6 +4407,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
struct mddev *mddev = cb->cb.data;
struct r5conf *conf = mddev->private;
int cnt = 0;
+ int hash;
if (cb->list.next && !list_empty(&cb->list)) {
spin_lock_irq(&conf->device_lock);
@@ -4277,11 +4425,14 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
* STRIPE_ON_RELEASE_LIST could be set here. In that
* case, the count is always > 1 here
*/
- __release_stripe(conf, sh);
+ hash = sh->hash_lock_index;
+ __release_stripe(conf, sh, &cb->temp_inactive_list[hash]);
cnt++;
}
spin_unlock_irq(&conf->device_lock);
}
+ release_inactive_stripe_list(conf, cb->temp_inactive_list,
+ NR_STRIPE_HASH_LOCKS);
if (mddev->queue)
trace_block_unplug(mddev->queue, cnt, !from_schedule);
kfree(cb);
@@ -4302,8 +4453,12 @@ static void release_stripe_plug(struct mddev *mddev,
cb = container_of(blk_cb, struct raid5_plug_cb, cb);
- if (cb->list.next == NULL)
+ if (cb->list.next == NULL) {
+ int i;
INIT_LIST_HEAD(&cb->list);
+ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
+ INIT_LIST_HEAD(cb->temp_inactive_list + i);
+ }
if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
list_add_tail(&sh->lru, &cb->list);
@@ -4323,8 +4478,8 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
/* Skip discard while reshape is happening */
return;
- logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
- last_sector = bi->bi_sector + (bi->bi_size>>9);
+ logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+ last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -4428,7 +4583,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
return;
}
- logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+ logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
last_sector = bio_end_sector(bi);
bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -4901,7 +5056,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
int remaining;
int handled = 0;
- logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+ logical_sector = raid_bio->bi_iter.bi_sector &
+ ~((sector_t)STRIPE_SECTORS-1);
sector = raid5_compute_sector(conf, logical_sector,
0, &dd_idx, NULL);
last_sector = bio_end_sector(raid_bio);
@@ -4948,27 +5104,45 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
}
static int handle_active_stripes(struct r5conf *conf, int group,
- struct r5worker *worker)
+ struct r5worker *worker,
+ struct list_head *temp_inactive_list)
{
struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
- int i, batch_size = 0;
+ int i, batch_size = 0, hash;
+ bool release_inactive = false;
while (batch_size < MAX_STRIPE_BATCH &&
(sh = __get_priority_stripe(conf, group)) != NULL)
batch[batch_size++] = sh;
- if (batch_size == 0)
- return batch_size;
+ if (batch_size == 0) {
+ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
+ if (!list_empty(temp_inactive_list + i))
+ break;
+ if (i == NR_STRIPE_HASH_LOCKS)
+ return batch_size;
+ release_inactive = true;
+ }
spin_unlock_irq(&conf->device_lock);
+ release_inactive_stripe_list(conf, temp_inactive_list,
+ NR_STRIPE_HASH_LOCKS);
+
+ if (release_inactive) {
+ spin_lock_irq(&conf->device_lock);
+ return 0;
+ }
+
for (i = 0; i < batch_size; i++)
handle_stripe(batch[i]);
cond_resched();
spin_lock_irq(&conf->device_lock);
- for (i = 0; i < batch_size; i++)
- __release_stripe(conf, batch[i]);
+ for (i = 0; i < batch_size; i++) {
+ hash = batch[i]->hash_lock_index;
+ __release_stripe(conf, batch[i], &temp_inactive_list[hash]);
+ }
return batch_size;
}
@@ -4989,9 +5163,10 @@ static void raid5_do_work(struct work_struct *work)
while (1) {
int batch_size, released;
- released = release_stripe_list(conf);
+ released = release_stripe_list(conf, worker->temp_inactive_list);
- batch_size = handle_active_stripes(conf, group_id, worker);
+ batch_size = handle_active_stripes(conf, group_id, worker,
+ worker->temp_inactive_list);
worker->working = false;
if (!batch_size && !released)
break;
@@ -5030,7 +5205,7 @@ static void raid5d(struct md_thread *thread)
struct bio *bio;
int batch_size, released;
- released = release_stripe_list(conf);
+ released = release_stripe_list(conf, conf->temp_inactive_list);
if (
!list_empty(&conf->bitmap_list)) {
@@ -5040,7 +5215,7 @@ static void raid5d(struct md_thread *thread)
bitmap_unplug(mddev->bitmap);
spin_lock_irq(&conf->device_lock);
conf->seq_write = conf->seq_flush;
- activate_bit_delay(conf);
+ activate_bit_delay(conf, conf->temp_inactive_list);
}
raid5_activate_delayed(conf);
@@ -5054,7 +5229,8 @@ static void raid5d(struct md_thread *thread)
handled++;
}
- batch_size = handle_active_stripes(conf, ANY_GROUP, NULL);
+ batch_size = handle_active_stripes(conf, ANY_GROUP, NULL,
+ conf->temp_inactive_list);
if (!batch_size && !released)
break;
handled += batch_size;
@@ -5090,22 +5266,29 @@ raid5_set_cache_size(struct mddev *mddev, int size)
{
struct r5conf *conf = mddev->private;
int err;
+ int hash;
if (size <= 16 || size > 32768)
return -EINVAL;
+ hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
while (size < conf->max_nr_stripes) {
- if (drop_one_stripe(conf))
+ if (drop_one_stripe(conf, hash))
conf->max_nr_stripes--;
else
break;
+ hash--;
+ if (hash < 0)
+ hash = NR_STRIPE_HASH_LOCKS - 1;
}
err = md_allow_write(mddev);
if (err)
return err;
+ hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
while (size > conf->max_nr_stripes) {
- if (grow_one_stripe(conf))
+ if (grow_one_stripe(conf, hash))
conf->max_nr_stripes++;
else break;
+ hash = (hash + 1) % NR_STRIPE_HASH_LOCKS;
}
return 0;
}
@@ -5256,7 +5439,7 @@ static struct attribute_group raid5_attrs_group = {
static int alloc_thread_groups(struct r5conf *conf, int cnt)
{
- int i, j;
+ int i, j, k;
ssize_t size;
struct r5worker *workers;
@@ -5286,8 +5469,12 @@ static int alloc_thread_groups(struct r5conf *conf, int cnt)
group->workers = workers + i * cnt;
for (j = 0; j < cnt; j++) {
- group->workers[j].group = group;
- INIT_WORK(&group->workers[j].work, raid5_do_work);
+ struct r5worker *worker = group->workers + j;
+ worker->group = group;
+ INIT_WORK(&worker->work, raid5_do_work);
+
+ for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++)
+ INIT_LIST_HEAD(worker->temp_inactive_list + k);
}
}
@@ -5438,6 +5625,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
struct md_rdev *rdev;
struct disk_info *disk;
char pers_name[6];
+ int i;
if (mddev->new_level != 5
&& mddev->new_level != 4
@@ -5482,7 +5670,6 @@ static struct r5conf *setup_conf(struct mddev *mddev)
INIT_LIST_HEAD(&conf->hold_list);
INIT_LIST_HEAD(&conf->delayed_list);
INIT_LIST_HEAD(&conf->bitmap_list);
- INIT_LIST_HEAD(&conf->inactive_list);
init_llist_head(&conf->released_stripes);
atomic_set(&conf->active_stripes, 0);
atomic_set(&conf->preread_active_stripes, 0);
@@ -5508,6 +5695,16 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
goto abort;
+ spin_lock_init(conf->hash_locks);
+ for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
+ spin_lock_init(conf->hash_locks + i);
+
+ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
+ INIT_LIST_HEAD(conf->inactive_list + i);
+
+ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
+ INIT_LIST_HEAD(conf->temp_inactive_list + i);
+
conf->level = mddev->new_level;
if (raid5_alloc_percpu(conf) != 0)
goto abort;
@@ -5548,7 +5745,6 @@ static struct r5conf *setup_conf(struct mddev *mddev)
else
conf->max_degraded = 1;
conf->algorithm = mddev->new_layout;
- conf->max_nr_stripes = NR_STRIPES;
conf->reshape_progress = mddev->reshape_position;
if (conf->reshape_progress != MaxSector) {
conf->prev_chunk_sectors = mddev->chunk_sectors;
@@ -5557,7 +5753,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
- if (grow_stripes(conf, conf->max_nr_stripes)) {
+ atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
+ if (grow_stripes(conf, NR_STRIPES)) {
printk(KERN_ERR
"md/raid:%s: couldn't allocate %dkB for buffers\n",
mdname(mddev), memory);
@@ -6363,12 +6560,18 @@ static int raid5_start_reshape(struct mddev *mddev)
if (!mddev->sync_thread) {
mddev->recovery = 0;
spin_lock_irq(&conf->device_lock);
+ write_seqcount_begin(&conf->gen_lock);
mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
+ mddev->new_chunk_sectors =
+ conf->chunk_sectors = conf->prev_chunk_sectors;
+ mddev->new_layout = conf->algorithm = conf->prev_algo;
rdev_for_each(rdev, mddev)
rdev->new_data_offset = rdev->data_offset;
smp_wmb();
+ conf->generation --;
conf->reshape_progress = MaxSector;
mddev->reshape_position = MaxSector;
+ write_seqcount_end(&conf->gen_lock);
spin_unlock_irq(&conf->device_lock);
return -EAGAIN;
}
@@ -6456,27 +6659,28 @@ static void raid5_quiesce(struct mddev *mddev, int state)
break;
case 1: /* stop all writes */
- spin_lock_irq(&conf->device_lock);
+ lock_all_device_hash_locks_irq(conf);
/* '2' tells resync/reshape to pause so that all
* active stripes can drain
*/
conf->quiesce = 2;
- wait_event_lock_irq(conf->wait_for_stripe,
+ wait_event_cmd(conf->wait_for_stripe,
atomic_read(&conf->active_stripes) == 0 &&
atomic_read(&conf->active_aligned_reads) == 0,
- conf->device_lock);
+ unlock_all_device_hash_locks_irq(conf),
+ lock_all_device_hash_locks_irq(conf));
conf->quiesce = 1;
- spin_unlock_irq(&conf->device_lock);
+ unlock_all_device_hash_locks_irq(conf);
/* allow reshape to continue */
wake_up(&conf->wait_for_overlap);
break;
case 0: /* re-enable writes */
- spin_lock_irq(&conf->device_lock);
+ lock_all_device_hash_locks_irq(conf);
conf->quiesce = 0;
wake_up(&conf->wait_for_stripe);
wake_up(&conf->wait_for_overlap);
- spin_unlock_irq(&conf->device_lock);
+ unlock_all_device_hash_locks_irq(conf);
break;
}
}
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 2113ffa82c7a..01ad8ae8f578 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -49,7 +49,7 @@
* can't distinguish between a clean block that has been generated
* from parity calculations, and a clean block that has been
* successfully written to the spare ( or to parity when resyncing).
- * To distingush these states we have a stripe bit STRIPE_INSYNC that
+ * To distinguish these states we have a stripe bit STRIPE_INSYNC that
* is set whenever a write is scheduled to the spare, or to the parity
* disc if there is no spare. A sync request clears this bit, and
* when we find it set with no buffers locked, we know the sync is
@@ -205,6 +205,7 @@ struct stripe_head {
short pd_idx; /* parity disk index */
short qd_idx; /* 'Q' disk index for raid6 */
short ddf_layout;/* use DDF ordering to calculate Q */
+ short hash_lock_index;
unsigned long state; /* state flags */
atomic_t count; /* nr of active thread/requests */
int bm_seq; /* sequence number for bitmap flushes */
@@ -367,9 +368,18 @@ struct disk_info {
struct md_rdev *rdev, *replacement;
};
+/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
+ * This is because we sometimes take all the spinlocks
+ * and creating that much locking depth can cause
+ * problems.
+ */
+#define NR_STRIPE_HASH_LOCKS 8
+#define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1)
+
struct r5worker {
struct work_struct work;
struct r5worker_group *group;
+ struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
bool working;
};
@@ -382,6 +392,8 @@ struct r5worker_group {
struct r5conf {
struct hlist_head *stripe_hashtbl;
+ /* only protect corresponding hash list and inactive_list */
+ spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS];
struct mddev *mddev;
int chunk_sectors;
int level, algorithm;
@@ -462,7 +474,8 @@ struct r5conf {
* Free stripes pool
*/
atomic_t active_stripes;
- struct list_head inactive_list;
+ struct list_head inactive_list[NR_STRIPE_HASH_LOCKS];
+ atomic_t empty_inactive_list_nr;
struct llist_head released_stripes;
wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap;
@@ -477,6 +490,7 @@ struct r5conf {
* the new thread here until we fully activate the array.
*/
struct md_thread *thread;
+ struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
struct r5worker_group *worker_groups;
int group_cnt;
int worker_cnt_per_group;
diff --git a/drivers/media/common/b2c2/flexcop-sram.c b/drivers/media/common/b2c2/flexcop-sram.c
index f2199e43e803..185c285f70fc 100644
--- a/drivers/media/common/b2c2/flexcop-sram.c
+++ b/drivers/media/common/b2c2/flexcop-sram.c
@@ -85,7 +85,7 @@ static void flexcop_sram_write(struct adapter *adapter, u32 bank, u32 addr, u8 *
while (((read_reg_dw(adapter, 0x700) & 0x80000000) != 0) && (retries > 0)) {
mdelay(1);
retries--;
- };
+ }
if (retries == 0)
printk("%s: SRAM timeout\n", __func__);
@@ -110,7 +110,7 @@ static void flex_sram_read(struct adapter *adapter, u32 bank, u32 addr, u8 *buf,
while (((read_reg_dw(adapter, 0x700) & 0x80000000) != 0) && (retries > 0)) {
mdelay(1);
retries--;
- };
+ }
if (retries == 0)
printk("%s: SRAM timeout\n", __func__);
@@ -122,7 +122,7 @@ static void flex_sram_read(struct adapter *adapter, u32 bank, u32 addr, u8 *buf,
while (((read_reg_dw(adapter, 0x700) & 0x80000000) != 0) && (retries > 0)) {
mdelay(1);
retries--;
- };
+ }
if (retries == 0)
printk("%s: SRAM timeout\n", __func__);
diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c
index bb6ee5191eb1..34b0d0ddeef3 100644
--- a/drivers/media/common/saa7146/saa7146_core.c
+++ b/drivers/media/common/saa7146/saa7146_core.c
@@ -411,7 +411,7 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent
saa7146_write(dev, MC2, 0xf8000000);
/* request an interrupt for the saa7146 */
- err = request_irq(pci->irq, interrupt_hw, IRQF_SHARED | IRQF_DISABLED,
+ err = request_irq(pci->irq, interrupt_hw, IRQF_SHARED,
dev->name, dev);
if (err < 0) {
ERR("request_irq() failed\n");
@@ -524,8 +524,6 @@ static void saa7146_remove_one(struct pci_dev *pdev)
DEB_EE("dev:%p\n", dev);
dev->ext->detach(dev);
- /* Zero the PCI drvdata after use. */
- pci_set_drvdata(pdev, NULL);
/* shut down all video dma transfers */
saa7146_write(dev, MC1, 0x00ff0000);
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index a142f7942a01..050984c5b1e3 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -922,8 +922,8 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev,
u32 i, *ptr;
u8 *payload = firmware->payload;
int rc = 0;
- firmware->start_address = le32_to_cpu(firmware->start_address);
- firmware->length = le32_to_cpu(firmware->length);
+ firmware->start_address = le32_to_cpup((__le32 *)&firmware->start_address);
+ firmware->length = le32_to_cpup((__le32 *)&firmware->length);
mem_address = firmware->start_address;
@@ -982,7 +982,7 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev,
if (rc < 0)
goto exit_fw_download;
- sms_err("sending MSG_SMS_DATA_VALIDITY_REQ expecting 0x%x",
+ sms_debug("sending MSG_SMS_DATA_VALIDITY_REQ expecting 0x%x",
calc_checksum);
SMS_INIT_MSG(&msg->x_msg_header, MSG_SMS_DATA_VALIDITY_REQ,
sizeof(msg->x_msg_header) +
@@ -1562,7 +1562,7 @@ void smscore_onresponse(struct smscore_device_t *coredev,
{
struct sms_msg_data *validity = (struct sms_msg_data *) phdr;
- sms_err("MSG_SMS_DATA_VALIDITY_RES, checksum = 0x%x",
+ sms_debug("MSG_SMS_DATA_VALIDITY_RES, checksum = 0x%x",
validity->msg_data[0]);
complete(&coredev->data_validity_done);
break;
diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
index 63676a8b024c..85151efdd94c 100644
--- a/drivers/media/common/siano/smsdvb-main.c
+++ b/drivers/media/common/siano/smsdvb-main.c
@@ -44,14 +44,14 @@ module_param_named(debug, sms_dbg, int, 0644);
MODULE_PARM_DESC(debug, "set debug level (info=1, adv=2 (or-able))");
-u32 sms_to_guard_interval_table[] = {
+static u32 sms_to_guard_interval_table[] = {
[0] = GUARD_INTERVAL_1_32,
[1] = GUARD_INTERVAL_1_16,
[2] = GUARD_INTERVAL_1_8,
[3] = GUARD_INTERVAL_1_4,
};
-u32 sms_to_code_rate_table[] = {
+static u32 sms_to_code_rate_table[] = {
[0] = FEC_1_2,
[1] = FEC_2_3,
[2] = FEC_3_4,
@@ -60,14 +60,14 @@ u32 sms_to_code_rate_table[] = {
};
-u32 sms_to_hierarchy_table[] = {
+static u32 sms_to_hierarchy_table[] = {
[0] = HIERARCHY_NONE,
[1] = HIERARCHY_1,
[2] = HIERARCHY_2,
[3] = HIERARCHY_4,
};
-u32 sms_to_modulation_table[] = {
+static u32 sms_to_modulation_table[] = {
[0] = QPSK,
[1] = QAM_16,
[2] = QAM_64,
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index 3485655fa082..58de4410c525 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -476,7 +476,9 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf,
size_t count)
{
- spin_lock(&demux->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&demux->lock, flags);
while (count--) {
if (buf[0] == 0x47)
@@ -484,7 +486,7 @@ void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf,
buf += 188;
}
- spin_unlock(&demux->lock);
+ spin_unlock_irqrestore(&demux->lock, flags);
}
EXPORT_SYMBOL(dvb_dmx_swfilter_packets);
@@ -519,8 +521,9 @@ static inline void _dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf,
{
int p = 0, i, j;
const u8 *q;
+ unsigned long flags;
- spin_lock(&demux->lock);
+ spin_lock_irqsave(&demux->lock, flags);
if (demux->tsbufp) { /* tsbuf[0] is now 0x47. */
i = demux->tsbufp;
@@ -564,7 +567,7 @@ static inline void _dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf,
}
bailout:
- spin_unlock(&demux->lock);
+ spin_unlock_irqrestore(&demux->lock, flags);
}
void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count)
@@ -581,11 +584,13 @@ EXPORT_SYMBOL(dvb_dmx_swfilter_204);
void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, size_t count)
{
- spin_lock(&demux->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&demux->lock, flags);
demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts, DMX_OK);
- spin_unlock(&demux->lock);
+ spin_unlock_irqrestore(&demux->lock, flags);
}
EXPORT_SYMBOL(dvb_dmx_swfilter_raw);
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 0e2ec6f73b05..bddbab43a2df 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -200,6 +200,13 @@ config DVB_CX24116
help
A DVB-S/S2 tuner module. Say Y when you want to support this frontend.
+config DVB_CX24117
+ tristate "Conexant CX24117 based"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ A Dual DVB-S/S2 tuner module. Say Y when you want to support this frontend.
+
config DVB_SI21XX
tristate "Silicon Labs SI21XX based"
depends on DVB_CORE && I2C
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index cebc0faffab5..f9cb43d9aed9 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -76,6 +76,7 @@ obj-$(CONFIG_DVB_ATBM8830) += atbm8830.o
obj-$(CONFIG_DVB_DUMMY_FE) += dvb_dummy_fe.o
obj-$(CONFIG_DVB_AF9013) += af9013.o
obj-$(CONFIG_DVB_CX24116) += cx24116.o
+obj-$(CONFIG_DVB_CX24117) += cx24117.o
obj-$(CONFIG_DVB_SI21XX) += si21xx.o
obj-$(CONFIG_DVB_STV0288) += stv0288.o
obj-$(CONFIG_DVB_STB6000) += stb6000.o
diff --git a/drivers/media/dvb-frontends/cx24110.c b/drivers/media/dvb-frontends/cx24110.c
index 0cd6927e654c..95b981cd7115 100644
--- a/drivers/media/dvb-frontends/cx24110.c
+++ b/drivers/media/dvb-frontends/cx24110.c
@@ -378,7 +378,7 @@ static int cx24110_set_voltage (struct dvb_frontend* fe, fe_sec_voltage_t voltag
return cx24110_writereg(state,0x76,(cx24110_readreg(state,0x76)&0x3b)|0x40);
default:
return -EINVAL;
- };
+ }
}
static int cx24110_diseqc_send_burst(struct dvb_frontend* fe, fe_sec_mini_cmd_t burst)
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
new file mode 100644
index 000000000000..476b422ccf19
--- /dev/null
+++ b/drivers/media/dvb-frontends/cx24117.c
@@ -0,0 +1,1650 @@
+/*
+ Conexant cx24117/cx24132 - Dual DVBS/S2 Satellite demod/tuner driver
+
+ Copyright (C) 2013 Luis Alves <ljalvs@gmail.com>
+ July, 6th 2013
+ First release based on cx24116 driver by:
+ Steven Toth and Georg Acher, Darron Broad, Igor Liplianin
+ Cards currently supported:
+ TBS6980 - Dual DVBS/S2 PCIe card
+ TBS6981 - Dual DVBS/S2 PCIe card
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/firmware.h>
+
+#include "tuner-i2c.h"
+#include "dvb_frontend.h"
+#include "cx24117.h"
+
+
+#define CX24117_DEFAULT_FIRMWARE "dvb-fe-cx24117.fw"
+#define CX24117_SEARCH_RANGE_KHZ 5000
+
+/* known registers */
+#define CX24117_REG_COMMAND (0x00) /* command buffer */
+#define CX24117_REG_EXECUTE (0x1f) /* execute command */
+
+#define CX24117_REG_FREQ3_0 (0x34) /* frequency */
+#define CX24117_REG_FREQ2_0 (0x35)
+#define CX24117_REG_FREQ1_0 (0x36)
+#define CX24117_REG_STATE0 (0x39)
+#define CX24117_REG_SSTATUS0 (0x3a) /* demod0 signal high / status */
+#define CX24117_REG_SIGNAL0 (0x3b)
+#define CX24117_REG_FREQ5_0 (0x3c) /* +-freq */
+#define CX24117_REG_FREQ6_0 (0x3d)
+#define CX24117_REG_SRATE2_0 (0x3e) /* +- 1000 * srate */
+#define CX24117_REG_SRATE1_0 (0x3f)
+#define CX24117_REG_QUALITY2_0 (0x40)
+#define CX24117_REG_QUALITY1_0 (0x41)
+
+#define CX24117_REG_BER4_0 (0x47)
+#define CX24117_REG_BER3_0 (0x48)
+#define CX24117_REG_BER2_0 (0x49)
+#define CX24117_REG_BER1_0 (0x4a)
+#define CX24117_REG_DVBS_UCB2_0 (0x4b)
+#define CX24117_REG_DVBS_UCB1_0 (0x4c)
+#define CX24117_REG_DVBS2_UCB2_0 (0x50)
+#define CX24117_REG_DVBS2_UCB1_0 (0x51)
+#define CX24117_REG_QSTATUS0 (0x93)
+#define CX24117_REG_CLKDIV0 (0xe6)
+#define CX24117_REG_RATEDIV0 (0xf0)
+
+
+#define CX24117_REG_FREQ3_1 (0x55) /* frequency */
+#define CX24117_REG_FREQ2_1 (0x56)
+#define CX24117_REG_FREQ1_1 (0x57)
+#define CX24117_REG_STATE1 (0x5a)
+#define CX24117_REG_SSTATUS1 (0x5b) /* demod1 signal high / status */
+#define CX24117_REG_SIGNAL1 (0x5c)
+#define CX24117_REG_FREQ5_1 (0x5d) /* +- freq */
+#define CX24117_REG_FREQ4_1 (0x5e)
+#define CX24117_REG_SRATE2_1 (0x5f)
+#define CX24117_REG_SRATE1_1 (0x60)
+#define CX24117_REG_QUALITY2_1 (0x61)
+#define CX24117_REG_QUALITY1_1 (0x62)
+#define CX24117_REG_BER4_1 (0x68)
+#define CX24117_REG_BER3_1 (0x69)
+#define CX24117_REG_BER2_1 (0x6a)
+#define CX24117_REG_BER1_1 (0x6b)
+#define CX24117_REG_DVBS_UCB2_1 (0x6c)
+#define CX24117_REG_DVBS_UCB1_1 (0x6d)
+#define CX24117_REG_DVBS2_UCB2_1 (0x71)
+#define CX24117_REG_DVBS2_UCB1_1 (0x72)
+#define CX24117_REG_QSTATUS1 (0x9f)
+#define CX24117_REG_CLKDIV1 (0xe7)
+#define CX24117_REG_RATEDIV1 (0xf1)
+
+
+/* arg buffer size */
+#define CX24117_ARGLEN (0x1e)
+
+/* rolloff */
+#define CX24117_ROLLOFF_020 (0x00)
+#define CX24117_ROLLOFF_025 (0x01)
+#define CX24117_ROLLOFF_035 (0x02)
+
+/* pilot bit */
+#define CX24117_PILOT_OFF (0x00)
+#define CX24117_PILOT_ON (0x40)
+#define CX24117_PILOT_AUTO (0x80)
+
+/* signal status */
+#define CX24117_HAS_SIGNAL (0x01)
+#define CX24117_HAS_CARRIER (0x02)
+#define CX24117_HAS_VITERBI (0x04)
+#define CX24117_HAS_SYNCLOCK (0x08)
+#define CX24117_STATUS_MASK (0x0f)
+#define CX24117_SIGNAL_MASK (0xc0)
+
+
+/* arg offset for DiSEqC */
+#define CX24117_DISEQC_DEMOD (1)
+#define CX24117_DISEQC_BURST (2)
+#define CX24117_DISEQC_ARG3_2 (3) /* unknown value=2 */
+#define CX24117_DISEQC_ARG4_0 (4) /* unknown value=0 */
+#define CX24117_DISEQC_ARG5_0 (5) /* unknown value=0 */
+#define CX24117_DISEQC_MSGLEN (6)
+#define CX24117_DISEQC_MSGOFS (7)
+
+/* DiSEqC burst */
+#define CX24117_DISEQC_MINI_A (0)
+#define CX24117_DISEQC_MINI_B (1)
+
+
+#define CX24117_PNE (0) /* 0 disabled / 2 enabled */
+#define CX24117_OCC (1) /* 0 disabled / 1 enabled */
+
+
+enum cmds {
+ CMD_SET_VCO = 0x10,
+ CMD_TUNEREQUEST = 0x11,
+ CMD_MPEGCONFIG = 0x13,
+ CMD_TUNERINIT = 0x14,
+ CMD_LNBSEND = 0x21, /* Formerly CMD_SEND_DISEQC */
+ CMD_LNBDCLEVEL = 0x22,
+ CMD_SET_TONE = 0x23,
+ CMD_UPDFWVERS = 0x35,
+ CMD_TUNERSLEEP = 0x36,
+};
+
+static LIST_HEAD(hybrid_tuner_instance_list);
+static DEFINE_MUTEX(cx24117_list_mutex);
+
+/* The Demod/Tuner can't easily provide these, we cache them */
+struct cx24117_tuning {
+ u32 frequency;
+ u32 symbol_rate;
+ fe_spectral_inversion_t inversion;
+ fe_code_rate_t fec;
+
+ fe_delivery_system_t delsys;
+ fe_modulation_t modulation;
+ fe_pilot_t pilot;
+ fe_rolloff_t rolloff;
+
+ /* Demod values */
+ u8 fec_val;
+ u8 fec_mask;
+ u8 inversion_val;
+ u8 pilot_val;
+ u8 rolloff_val;
+};
+
+/* Basic commands that are sent to the firmware */
+struct cx24117_cmd {
+ u8 len;
+ u8 args[CX24117_ARGLEN];
+};
+
+/* common to both fe's */
+struct cx24117_priv {
+ u8 demod_address;
+ struct i2c_adapter *i2c;
+ u8 skip_fw_load;
+ struct mutex fe_lock;
+
+ /* Used for sharing this struct between demods */
+ struct tuner_i2c_props i2c_props;
+ struct list_head hybrid_tuner_instance_list;
+};
+
+/* one per each fe */
+struct cx24117_state {
+ struct cx24117_priv *priv;
+ struct dvb_frontend frontend;
+
+ struct cx24117_tuning dcur;
+ struct cx24117_tuning dnxt;
+ struct cx24117_cmd dsec_cmd;
+
+ int demod;
+};
+
+/* modfec (modulation and FEC) lookup table */
+/* Check cx24116.c for a detailed description of each field */
+static struct cx24117_modfec {
+ fe_delivery_system_t delivery_system;
+ fe_modulation_t modulation;
+ fe_code_rate_t fec;
+ u8 mask; /* In DVBS mode this is used to autodetect */
+ u8 val; /* Passed to the firmware to indicate mode selection */
+} cx24117_modfec_modes[] = {
+ /* QPSK. For unknown rates we set hardware to auto detect 0xfe 0x30 */
+
+ /*mod fec mask val */
+ { SYS_DVBS, QPSK, FEC_NONE, 0xfe, 0x30 },
+ { SYS_DVBS, QPSK, FEC_1_2, 0x02, 0x2e }, /* 00000010 00101110 */
+ { SYS_DVBS, QPSK, FEC_2_3, 0x04, 0x2f }, /* 00000100 00101111 */
+ { SYS_DVBS, QPSK, FEC_3_4, 0x08, 0x30 }, /* 00001000 00110000 */
+ { SYS_DVBS, QPSK, FEC_4_5, 0xfe, 0x30 }, /* 000?0000 ? */
+ { SYS_DVBS, QPSK, FEC_5_6, 0x20, 0x31 }, /* 00100000 00110001 */
+ { SYS_DVBS, QPSK, FEC_6_7, 0xfe, 0x30 }, /* 0?000000 ? */
+ { SYS_DVBS, QPSK, FEC_7_8, 0x80, 0x32 }, /* 10000000 00110010 */
+ { SYS_DVBS, QPSK, FEC_8_9, 0xfe, 0x30 }, /* 0000000? ? */
+ { SYS_DVBS, QPSK, FEC_AUTO, 0xfe, 0x30 },
+ /* NBC-QPSK */
+ { SYS_DVBS2, QPSK, FEC_NONE, 0x00, 0x00 },
+ { SYS_DVBS2, QPSK, FEC_1_2, 0x00, 0x04 },
+ { SYS_DVBS2, QPSK, FEC_3_5, 0x00, 0x05 },
+ { SYS_DVBS2, QPSK, FEC_2_3, 0x00, 0x06 },
+ { SYS_DVBS2, QPSK, FEC_3_4, 0x00, 0x07 },
+ { SYS_DVBS2, QPSK, FEC_4_5, 0x00, 0x08 },
+ { SYS_DVBS2, QPSK, FEC_5_6, 0x00, 0x09 },
+ { SYS_DVBS2, QPSK, FEC_8_9, 0x00, 0x0a },
+ { SYS_DVBS2, QPSK, FEC_9_10, 0x00, 0x0b },
+ { SYS_DVBS2, QPSK, FEC_AUTO, 0x00, 0x00 },
+ /* 8PSK */
+ { SYS_DVBS2, PSK_8, FEC_NONE, 0x00, 0x00 },
+ { SYS_DVBS2, PSK_8, FEC_3_5, 0x00, 0x0c },
+ { SYS_DVBS2, PSK_8, FEC_2_3, 0x00, 0x0d },
+ { SYS_DVBS2, PSK_8, FEC_3_4, 0x00, 0x0e },
+ { SYS_DVBS2, PSK_8, FEC_5_6, 0x00, 0x0f },
+ { SYS_DVBS2, PSK_8, FEC_8_9, 0x00, 0x10 },
+ { SYS_DVBS2, PSK_8, FEC_9_10, 0x00, 0x11 },
+ { SYS_DVBS2, PSK_8, FEC_AUTO, 0x00, 0x00 },
+ /*
+ * 'val' can be found in the FECSTATUS register when tuning.
+ * FECSTATUS will give the actual FEC in use if tuning was successful.
+ */
+};
+
+
+static int cx24117_writereg(struct cx24117_state *state, u8 reg, u8 data)
+{
+ u8 buf[] = { reg, data };
+ struct i2c_msg msg = { .addr = state->priv->demod_address,
+ .flags = 0, .buf = buf, .len = 2 };
+ int ret;
+
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d i2c wr @0x%02x=0x%02x\n",
+ __func__, state->demod, reg, data);
+
+ ret = i2c_transfer(state->priv->i2c, &msg, 1);
+ if (ret < 0) {
+ dev_warn(&state->priv->i2c->dev,
+ "%s: demod%d i2c wr err(%i) @0x%02x=0x%02x\n",
+ KBUILD_MODNAME, state->demod, ret, reg, data);
+ return ret;
+ }
+ return 0;
+}
+
+static int cx24117_writecmd(struct cx24117_state *state,
+ struct cx24117_cmd *cmd)
+{
+ struct i2c_msg msg;
+ u8 buf[CX24117_ARGLEN+1];
+ int ret;
+
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d i2c wr cmd len=%d\n",
+ __func__, state->demod, cmd->len);
+
+ buf[0] = CX24117_REG_COMMAND;
+ memcpy(&buf[1], cmd->args, cmd->len);
+
+ msg.addr = state->priv->demod_address;
+ msg.flags = 0;
+ msg.len = cmd->len+1;
+ msg.buf = buf;
+ ret = i2c_transfer(state->priv->i2c, &msg, 1);
+ if (ret < 0) {
+ dev_warn(&state->priv->i2c->dev,
+ "%s: demod%d i2c wr cmd err(%i) len=%d\n",
+ KBUILD_MODNAME, state->demod, ret, cmd->len);
+ return ret;
+ }
+ return 0;
+}
+
+static int cx24117_readreg(struct cx24117_state *state, u8 reg)
+{
+ int ret;
+ u8 recv = 0;
+ struct i2c_msg msg[] = {
+ { .addr = state->priv->demod_address, .flags = 0,
+ .buf = &reg, .len = 1 },
+ { .addr = state->priv->demod_address, .flags = I2C_M_RD,
+ .buf = &recv, .len = 1 }
+ };
+
+ ret = i2c_transfer(state->priv->i2c, msg, 2);
+ if (ret < 0) {
+ dev_warn(&state->priv->i2c->dev,
+ "%s: demod%d i2c rd err(%d) @0x%x\n",
+ KBUILD_MODNAME, state->demod, ret, reg);
+ return ret;
+ }
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d i2c rd @0x%02x=0x%02x\n",
+ __func__, state->demod, reg, recv);
+
+ return recv;
+}
+
+static int cx24117_readregN(struct cx24117_state *state,
+ u8 reg, u8 *buf, int len)
+{
+ int ret;
+ struct i2c_msg msg[] = {
+ { .addr = state->priv->demod_address, .flags = 0,
+ .buf = &reg, .len = 1 },
+ { .addr = state->priv->demod_address, .flags = I2C_M_RD,
+ .buf = buf, .len = len }
+ };
+
+ ret = i2c_transfer(state->priv->i2c, msg, 2);
+ if (ret < 0) {
+ dev_warn(&state->priv->i2c->dev,
+ "%s: demod%d i2c rd err(%d) @0x%x\n",
+ KBUILD_MODNAME, state->demod, ret, reg);
+ return ret;
+ }
+ return 0;
+}
+
+static int cx24117_set_inversion(struct cx24117_state *state,
+ fe_spectral_inversion_t inversion)
+{
+ dev_dbg(&state->priv->i2c->dev, "%s(%d) demod%d\n",
+ __func__, inversion, state->demod);
+
+ switch (inversion) {
+ case INVERSION_OFF:
+ state->dnxt.inversion_val = 0x00;
+ break;
+ case INVERSION_ON:
+ state->dnxt.inversion_val = 0x04;
+ break;
+ case INVERSION_AUTO:
+ state->dnxt.inversion_val = 0x0C;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ state->dnxt.inversion = inversion;
+
+ return 0;
+}
+
+static int cx24117_lookup_fecmod(struct cx24117_state *state,
+ fe_delivery_system_t d, fe_modulation_t m, fe_code_rate_t f)
+{
+ int i, ret = -EINVAL;
+
+ dev_dbg(&state->priv->i2c->dev,
+ "%s(demod(0x%02x,0x%02x) demod%d\n",
+ __func__, m, f, state->demod);
+
+ for (i = 0; i < ARRAY_SIZE(cx24117_modfec_modes); i++) {
+ if ((d == cx24117_modfec_modes[i].delivery_system) &&
+ (m == cx24117_modfec_modes[i].modulation) &&
+ (f == cx24117_modfec_modes[i].fec)) {
+ ret = i;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int cx24117_set_fec(struct cx24117_state *state,
+ fe_delivery_system_t delsys, fe_modulation_t mod, fe_code_rate_t fec)
+{
+ int ret;
+
+ dev_dbg(&state->priv->i2c->dev,
+ "%s(0x%02x,0x%02x) demod%d\n",
+ __func__, mod, fec, state->demod);
+
+ ret = cx24117_lookup_fecmod(state, delsys, mod, fec);
+ if (ret < 0)
+ return ret;
+
+ state->dnxt.fec = fec;
+ state->dnxt.fec_val = cx24117_modfec_modes[ret].val;
+ state->dnxt.fec_mask = cx24117_modfec_modes[ret].mask;
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d mask/val = 0x%02x/0x%02x\n", __func__,
+ state->demod, state->dnxt.fec_mask, state->dnxt.fec_val);
+
+ return 0;
+}
+
+static int cx24117_set_symbolrate(struct cx24117_state *state, u32 rate)
+{
+ dev_dbg(&state->priv->i2c->dev, "%s(%d) demod%d\n",
+ __func__, rate, state->demod);
+
+ state->dnxt.symbol_rate = rate;
+
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d symbol_rate = %d\n",
+ __func__, state->demod, rate);
+
+ return 0;
+}
+
+static int cx24117_load_firmware(struct dvb_frontend *fe,
+ const struct firmware *fw);
+
+static int cx24117_firmware_ondemand(struct dvb_frontend *fe)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ const struct firmware *fw;
+ int ret = 0;
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d skip_fw_load=%d\n",
+ __func__, state->demod, state->priv->skip_fw_load);
+
+ if (state->priv->skip_fw_load)
+ return 0;
+
+ /* check if firmware if already running */
+ if (cx24117_readreg(state, 0xeb) != 0xa) {
+ /* Load firmware */
+ /* request the firmware, this will block until loaded */
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: Waiting for firmware upload (%s)...\n",
+ __func__, CX24117_DEFAULT_FIRMWARE);
+ ret = request_firmware(&fw, CX24117_DEFAULT_FIRMWARE,
+ state->priv->i2c->dev.parent);
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: Waiting for firmware upload(2)...\n", __func__);
+ if (ret) {
+ dev_err(&state->priv->i2c->dev,
+ "%s: No firmware uploaded "
+ "(timeout or file not found?)\n", __func__);
+ return ret;
+ }
+
+ /* Make sure we don't recurse back through here
+ * during loading */
+ state->priv->skip_fw_load = 1;
+
+ ret = cx24117_load_firmware(fe, fw);
+ if (ret)
+ dev_err(&state->priv->i2c->dev,
+ "%s: Writing firmware failed\n", __func__);
+ release_firmware(fw);
+
+ dev_info(&state->priv->i2c->dev,
+ "%s: Firmware upload %s\n", __func__,
+ ret == 0 ? "complete" : "failed");
+
+ /* Ensure firmware is always loaded if required */
+ state->priv->skip_fw_load = 0;
+ }
+
+ return ret;
+}
+
+/* Take a basic firmware command structure, format it
+ * and forward it for processing
+ */
+static int cx24117_cmd_execute_nolock(struct dvb_frontend *fe,
+ struct cx24117_cmd *cmd)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ int i, ret;
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n",
+ __func__, state->demod);
+
+ /* Load the firmware if required */
+ ret = cx24117_firmware_ondemand(fe);
+ if (ret != 0)
+ return ret;
+
+ /* Write the command */
+ cx24117_writecmd(state, cmd);
+
+ /* Start execution and wait for cmd to terminate */
+ cx24117_writereg(state, CX24117_REG_EXECUTE, 0x01);
+ i = 0;
+ while (cx24117_readreg(state, CX24117_REG_EXECUTE)) {
+ msleep(20);
+ if (i++ > 40) {
+ /* Avoid looping forever if the firmware does
+ not respond */
+ dev_warn(&state->priv->i2c->dev,
+ "%s() Firmware not responding\n", __func__);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static int cx24117_cmd_execute(struct dvb_frontend *fe, struct cx24117_cmd *cmd)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ int ret;
+
+ mutex_lock(&state->priv->fe_lock);
+ ret = cx24117_cmd_execute_nolock(fe, cmd);
+ mutex_unlock(&state->priv->fe_lock);
+
+ return ret;
+}
+
+static int cx24117_load_firmware(struct dvb_frontend *fe,
+ const struct firmware *fw)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ struct cx24117_cmd cmd;
+ int i, ret;
+ unsigned char vers[4];
+
+ struct i2c_msg msg;
+ u8 *buf;
+
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d FW is %zu bytes (%02x %02x .. %02x %02x)\n",
+ __func__, state->demod, fw->size, fw->data[0], fw->data[1],
+ fw->data[fw->size - 2], fw->data[fw->size - 1]);
+
+ cx24117_writereg(state, 0xea, 0x00);
+ cx24117_writereg(state, 0xea, 0x01);
+ cx24117_writereg(state, 0xea, 0x00);
+
+ cx24117_writereg(state, 0xce, 0x92);
+
+ cx24117_writereg(state, 0xfb, 0x00);
+ cx24117_writereg(state, 0xfc, 0x00);
+
+ cx24117_writereg(state, 0xc3, 0x04);
+ cx24117_writereg(state, 0xc4, 0x04);
+
+ cx24117_writereg(state, 0xce, 0x00);
+ cx24117_writereg(state, 0xcf, 0x00);
+
+ cx24117_writereg(state, 0xea, 0x00);
+ cx24117_writereg(state, 0xeb, 0x0c);
+ cx24117_writereg(state, 0xec, 0x06);
+ cx24117_writereg(state, 0xed, 0x05);
+ cx24117_writereg(state, 0xee, 0x03);
+ cx24117_writereg(state, 0xef, 0x05);
+
+ cx24117_writereg(state, 0xf3, 0x03);
+ cx24117_writereg(state, 0xf4, 0x44);
+
+ cx24117_writereg(state, CX24117_REG_RATEDIV0, 0x04);
+ cx24117_writereg(state, CX24117_REG_CLKDIV0, 0x02);
+
+ cx24117_writereg(state, CX24117_REG_RATEDIV1, 0x04);
+ cx24117_writereg(state, CX24117_REG_CLKDIV1, 0x02);
+
+ cx24117_writereg(state, 0xf2, 0x04);
+ cx24117_writereg(state, 0xe8, 0x02);
+ cx24117_writereg(state, 0xea, 0x01);
+ cx24117_writereg(state, 0xc8, 0x00);
+ cx24117_writereg(state, 0xc9, 0x00);
+ cx24117_writereg(state, 0xca, 0x00);
+ cx24117_writereg(state, 0xcb, 0x00);
+ cx24117_writereg(state, 0xcc, 0x00);
+ cx24117_writereg(state, 0xcd, 0x00);
+ cx24117_writereg(state, 0xe4, 0x03);
+ cx24117_writereg(state, 0xeb, 0x0a);
+
+ cx24117_writereg(state, 0xfb, 0x00);
+ cx24117_writereg(state, 0xe0, 0x76);
+ cx24117_writereg(state, 0xf7, 0x81);
+ cx24117_writereg(state, 0xf8, 0x00);
+ cx24117_writereg(state, 0xf9, 0x00);
+
+ buf = kmalloc(fw->size + 1, GFP_KERNEL);
+ if (buf == NULL) {
+ state->priv->skip_fw_load = 0;
+ return -ENOMEM;
+ }
+
+ /* fw upload reg */
+ buf[0] = 0xfa;
+ memcpy(&buf[1], fw->data, fw->size);
+
+ /* prepare i2c message to send */
+ msg.addr = state->priv->demod_address;
+ msg.flags = 0;
+ msg.len = fw->size + 1;
+ msg.buf = buf;
+
+ /* send fw */
+ ret = i2c_transfer(state->priv->i2c, &msg, 1);
+ if (ret < 0)
+ return ret;
+
+ kfree(buf);
+
+ cx24117_writereg(state, 0xf7, 0x0c);
+ cx24117_writereg(state, 0xe0, 0x00);
+
+ /* CMD 1B */
+ cmd.args[0] = 0x1b;
+ cmd.args[1] = 0x00;
+ cmd.args[2] = 0x01;
+ cmd.args[3] = 0x00;
+ cmd.len = 4;
+ ret = cx24117_cmd_execute_nolock(fe, &cmd);
+ if (ret != 0)
+ goto error;
+
+ /* CMD 10 */
+ cmd.args[0] = CMD_SET_VCO;
+ cmd.args[1] = 0x06;
+ cmd.args[2] = 0x2b;
+ cmd.args[3] = 0xd8;
+ cmd.args[4] = 0xa5;
+ cmd.args[5] = 0xee;
+ cmd.args[6] = 0x03;
+ cmd.args[7] = 0x9d;
+ cmd.args[8] = 0xfc;
+ cmd.args[9] = 0x06;
+ cmd.args[10] = 0x02;
+ cmd.args[11] = 0x9d;
+ cmd.args[12] = 0xfc;
+ cmd.len = 13;
+ ret = cx24117_cmd_execute_nolock(fe, &cmd);
+ if (ret != 0)
+ goto error;
+
+ /* CMD 15 */
+ cmd.args[0] = 0x15;
+ cmd.args[1] = 0x00;
+ cmd.args[2] = 0x01;
+ cmd.args[3] = 0x00;
+ cmd.args[4] = 0x00;
+ cmd.args[5] = 0x01;
+ cmd.args[6] = 0x01;
+ cmd.args[7] = 0x01;
+ cmd.args[8] = 0x00;
+ cmd.args[9] = 0x05;
+ cmd.args[10] = 0x02;
+ cmd.args[11] = 0x02;
+ cmd.args[12] = 0x00;
+ cmd.len = 13;
+ ret = cx24117_cmd_execute_nolock(fe, &cmd);
+ if (ret != 0)
+ goto error;
+
+ /* CMD 13 */
+ cmd.args[0] = CMD_MPEGCONFIG;
+ cmd.args[1] = 0x00;
+ cmd.args[2] = 0x00;
+ cmd.args[3] = 0x00;
+ cmd.args[4] = 0x01;
+ cmd.args[5] = 0x00;
+ cmd.len = 6;
+ ret = cx24117_cmd_execute_nolock(fe, &cmd);
+ if (ret != 0)
+ goto error;
+
+ /* CMD 14 */
+ for (i = 0; i < 2; i++) {
+ cmd.args[0] = CMD_TUNERINIT;
+ cmd.args[1] = (u8) i;
+ cmd.args[2] = 0x00;
+ cmd.args[3] = 0x05;
+ cmd.args[4] = 0x00;
+ cmd.args[5] = 0x00;
+ cmd.args[6] = 0x55;
+ cmd.args[7] = 0x00;
+ cmd.len = 8;
+ ret = cx24117_cmd_execute_nolock(fe, &cmd);
+ if (ret != 0)
+ goto error;
+ }
+
+ cx24117_writereg(state, 0xce, 0xc0);
+ cx24117_writereg(state, 0xcf, 0x00);
+ cx24117_writereg(state, 0xe5, 0x04);
+
+ /* Firmware CMD 35: Get firmware version */
+ cmd.args[0] = CMD_UPDFWVERS;
+ cmd.len = 2;
+ for (i = 0; i < 4; i++) {
+ cmd.args[1] = i;
+ ret = cx24117_cmd_execute_nolock(fe, &cmd);
+ if (ret != 0)
+ goto error;
+ vers[i] = cx24117_readreg(state, 0x33);
+ }
+ dev_info(&state->priv->i2c->dev,
+ "%s: FW version %i.%i.%i.%i\n", __func__,
+ vers[0], vers[1], vers[2], vers[3]);
+ return 0;
+error:
+ state->priv->skip_fw_load = 0;
+ dev_err(&state->priv->i2c->dev, "%s() Error running FW.\n", __func__);
+ return ret;
+}
+
+static int cx24117_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ int lock;
+
+ lock = cx24117_readreg(state,
+ (state->demod == 0) ? CX24117_REG_SSTATUS0 :
+ CX24117_REG_SSTATUS1) &
+ CX24117_STATUS_MASK;
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d status = 0x%02x\n",
+ __func__, state->demod, lock);
+
+ *status = 0;
+
+ if (lock & CX24117_HAS_SIGNAL)
+ *status |= FE_HAS_SIGNAL;
+ if (lock & CX24117_HAS_CARRIER)
+ *status |= FE_HAS_CARRIER;
+ if (lock & CX24117_HAS_VITERBI)
+ *status |= FE_HAS_VITERBI;
+ if (lock & CX24117_HAS_SYNCLOCK)
+ *status |= FE_HAS_SYNC | FE_HAS_LOCK;
+
+ return 0;
+}
+
+static int cx24117_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ int ret;
+ u8 buf[4];
+ u8 base_reg = (state->demod == 0) ?
+ CX24117_REG_BER4_0 :
+ CX24117_REG_BER4_1;
+
+ ret = cx24117_readregN(state, base_reg, buf, 4);
+ if (ret != 0)
+ return ret;
+
+ *ber = (buf[0] << 24) | (buf[1] << 16) |
+ (buf[1] << 8) | buf[0];
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d ber=0x%04x\n",
+ __func__, state->demod, *ber);
+
+ return 0;
+}
+
+static int cx24117_read_signal_strength(struct dvb_frontend *fe,
+ u16 *signal_strength)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ struct cx24117_cmd cmd;
+ int ret;
+ u16 sig_reading;
+ u8 buf[2];
+ u8 reg = (state->demod == 0) ?
+ CX24117_REG_SSTATUS0 : CX24117_REG_SSTATUS1;
+
+ /* Firmware CMD 1A */
+ cmd.args[0] = 0x1a;
+ cmd.args[1] = (u8) state->demod;
+ cmd.len = 2;
+ ret = cx24117_cmd_execute(fe, &cmd);
+ if (ret != 0)
+ return ret;
+
+ ret = cx24117_readregN(state, reg, buf, 2);
+ if (ret != 0)
+ return ret;
+ sig_reading = ((buf[0] & CX24117_SIGNAL_MASK) << 2) | buf[1];
+
+ *signal_strength = -100 * sig_reading + 94324;
+
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d raw / cooked = 0x%04x / 0x%04x\n",
+ __func__, state->demod, sig_reading, *signal_strength);
+
+ return 0;
+}
+
+static int cx24117_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ int ret;
+ u8 buf[2];
+ u8 reg = (state->demod == 0) ?
+ CX24117_REG_QUALITY2_0 : CX24117_REG_QUALITY2_1;
+
+ ret = cx24117_readregN(state, reg, buf, 2);
+ if (ret != 0)
+ return ret;
+
+ *snr = (buf[0] << 8) | buf[1];
+
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d snr = 0x%04x\n",
+ __func__, state->demod, *snr);
+
+ return ret;
+}
+
+static int cx24117_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ fe_delivery_system_t delsys = fe->dtv_property_cache.delivery_system;
+ int ret;
+ u8 buf[2];
+ u8 reg = (state->demod == 0) ?
+ CX24117_REG_DVBS_UCB2_0 :
+ CX24117_REG_DVBS_UCB2_1;
+
+ switch (delsys) {
+ case SYS_DVBS:
+ break;
+ case SYS_DVBS2:
+ reg += (CX24117_REG_DVBS2_UCB2_0 - CX24117_REG_DVBS_UCB2_0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = cx24117_readregN(state, reg, buf, 2);
+ if (ret != 0)
+ return ret;
+ *ucblocks = (buf[0] << 8) | buf[1];
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d ucb=0x%04x\n",
+ __func__, state->demod, *ucblocks);
+
+ return 0;
+}
+
+/* Overwrite the current tuning params, we are about to tune */
+static void cx24117_clone_params(struct dvb_frontend *fe)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ state->dcur = state->dnxt;
+}
+
+/* Wait for LNB */
+static int cx24117_wait_for_lnb(struct dvb_frontend *fe)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ int i;
+ u8 val, reg = (state->demod == 0) ? CX24117_REG_QSTATUS0 :
+ CX24117_REG_QSTATUS1;
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d qstatus = 0x%02x\n",
+ __func__, state->demod, cx24117_readreg(state, reg));
+
+ /* Wait for up to 300 ms */
+ for (i = 0; i < 10; i++) {
+ val = cx24117_readreg(state, reg) & 0x01;
+ if (val != 0)
+ return 0;
+ msleep(30);
+ }
+
+ dev_warn(&state->priv->i2c->dev, "%s: demod%d LNB not ready\n",
+ KBUILD_MODNAME, state->demod);
+
+ return -ETIMEDOUT; /* -EBUSY ? */
+}
+
+static int cx24117_set_voltage(struct dvb_frontend *fe,
+ fe_sec_voltage_t voltage)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ struct cx24117_cmd cmd;
+ int ret;
+ u8 reg = (state->demod == 0) ? 0x10 : 0x20;
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d %s\n",
+ __func__, state->demod,
+ voltage == SEC_VOLTAGE_13 ? "SEC_VOLTAGE_13" :
+ voltage == SEC_VOLTAGE_18 ? "SEC_VOLTAGE_18" :
+ "SEC_VOLTAGE_OFF");
+
+ /* CMD 32 */
+ cmd.args[0] = 0x32;
+ cmd.args[1] = reg;
+ cmd.args[2] = reg;
+ cmd.len = 3;
+ ret = cx24117_cmd_execute(fe, &cmd);
+ if (ret)
+ return ret;
+
+ if ((voltage == SEC_VOLTAGE_13) ||
+ (voltage == SEC_VOLTAGE_18)) {
+ /* CMD 33 */
+ cmd.args[0] = 0x33;
+ cmd.args[1] = reg;
+ cmd.args[2] = reg;
+ cmd.len = 3;
+ ret = cx24117_cmd_execute(fe, &cmd);
+ if (ret != 0)
+ return ret;
+
+ ret = cx24117_wait_for_lnb(fe);
+ if (ret != 0)
+ return ret;
+
+ /* Wait for voltage/min repeat delay */
+ msleep(100);
+
+ /* CMD 22 - CMD_LNBDCLEVEL */
+ cmd.args[0] = CMD_LNBDCLEVEL;
+ cmd.args[1] = state->demod ? 0 : 1;
+ cmd.args[2] = (voltage == SEC_VOLTAGE_18 ? 0x01 : 0x00);
+ cmd.len = 3;
+
+ /* Min delay time before DiSEqC send */
+ msleep(20);
+ } else {
+ cmd.args[0] = 0x33;
+ cmd.args[1] = 0x00;
+ cmd.args[2] = reg;
+ cmd.len = 3;
+ }
+
+ return cx24117_cmd_execute(fe, &cmd);
+}
+
+static int cx24117_set_tone(struct dvb_frontend *fe,
+ fe_sec_tone_mode_t tone)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ struct cx24117_cmd cmd;
+ int ret;
+
+ dev_dbg(&state->priv->i2c->dev, "%s(%d) demod%d\n",
+ __func__, state->demod, tone);
+ if ((tone != SEC_TONE_ON) && (tone != SEC_TONE_OFF)) {
+ dev_warn(&state->priv->i2c->dev, "%s: demod%d invalid tone=%d\n",
+ KBUILD_MODNAME, state->demod, tone);
+ return -EINVAL;
+ }
+
+ /* Wait for LNB ready */
+ ret = cx24117_wait_for_lnb(fe);
+ if (ret != 0)
+ return ret;
+
+ /* Min delay time after DiSEqC send */
+ msleep(20);
+
+ /* Set the tone */
+ /* CMD 23 - CMD_SET_TONE */
+ cmd.args[0] = CMD_SET_TONE;
+ cmd.args[1] = (state->demod ? 0 : 1);
+ cmd.args[2] = 0x00;
+ cmd.args[3] = 0x00;
+ cmd.len = 5;
+ switch (tone) {
+ case SEC_TONE_ON:
+ cmd.args[4] = 0x01;
+ break;
+ case SEC_TONE_OFF:
+ cmd.args[4] = 0x00;
+ break;
+ }
+
+ msleep(20);
+
+ return cx24117_cmd_execute(fe, &cmd);
+}
+
+/* Initialise DiSEqC */
+static int cx24117_diseqc_init(struct dvb_frontend *fe)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+
+ /* Prepare a DiSEqC command */
+ state->dsec_cmd.args[0] = CMD_LNBSEND;
+
+ /* demod */
+ state->dsec_cmd.args[CX24117_DISEQC_DEMOD] = state->demod ? 0 : 1;
+
+ /* DiSEqC burst */
+ state->dsec_cmd.args[CX24117_DISEQC_BURST] = CX24117_DISEQC_MINI_A;
+
+ /* Unknown */
+ state->dsec_cmd.args[CX24117_DISEQC_ARG3_2] = 0x02;
+ state->dsec_cmd.args[CX24117_DISEQC_ARG4_0] = 0x00;
+
+ /* Continuation flag? */
+ state->dsec_cmd.args[CX24117_DISEQC_ARG5_0] = 0x00;
+
+ /* DiSEqC message length */
+ state->dsec_cmd.args[CX24117_DISEQC_MSGLEN] = 0x00;
+
+ /* Command length */
+ state->dsec_cmd.len = 7;
+
+ return 0;
+}
+
+/* Send DiSEqC message */
+static int cx24117_send_diseqc_msg(struct dvb_frontend *fe,
+ struct dvb_diseqc_master_cmd *d)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ int i, ret;
+
+ /* Dump DiSEqC message */
+ dev_dbg(&state->priv->i2c->dev, "%s: demod %d (",
+ __func__, state->demod);
+ for (i = 0; i < d->msg_len; i++)
+ dev_dbg(&state->priv->i2c->dev, "0x%02x ", d->msg[i]);
+ dev_dbg(&state->priv->i2c->dev, ")\n");
+
+ /* Validate length */
+ if (d->msg_len > 15)
+ return -EINVAL;
+
+ /* DiSEqC message */
+ for (i = 0; i < d->msg_len; i++)
+ state->dsec_cmd.args[CX24117_DISEQC_MSGOFS + i] = d->msg[i];
+
+ /* DiSEqC message length */
+ state->dsec_cmd.args[CX24117_DISEQC_MSGLEN] = d->msg_len;
+
+ /* Command length */
+ state->dsec_cmd.len = CX24117_DISEQC_MSGOFS +
+ state->dsec_cmd.args[CX24117_DISEQC_MSGLEN];
+
+ /*
+ * Message is sent with derived else cached burst
+ *
+ * WRITE PORT GROUP COMMAND 38
+ *
+ * 0/A/A: E0 10 38 F0..F3
+ * 1/B/B: E0 10 38 F4..F7
+ * 2/C/A: E0 10 38 F8..FB
+ * 3/D/B: E0 10 38 FC..FF
+ *
+ * databyte[3]= 8421:8421
+ * ABCD:WXYZ
+ * CLR :SET
+ *
+ * WX= PORT SELECT 0..3 (X=TONEBURST)
+ * Y = VOLTAGE (0=13V, 1=18V)
+ * Z = BAND (0=LOW, 1=HIGH(22K))
+ */
+ if (d->msg_len >= 4 && d->msg[2] == 0x38)
+ state->dsec_cmd.args[CX24117_DISEQC_BURST] =
+ ((d->msg[3] & 4) >> 2);
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d burst=%d\n",
+ __func__, state->demod,
+ state->dsec_cmd.args[CX24117_DISEQC_BURST]);
+
+ /* Wait for LNB ready */
+ ret = cx24117_wait_for_lnb(fe);
+ if (ret != 0)
+ return ret;
+
+ /* Wait for voltage/min repeat delay */
+ msleep(100);
+
+ /* Command */
+ ret = cx24117_cmd_execute(fe, &state->dsec_cmd);
+ if (ret != 0)
+ return ret;
+ /*
+ * Wait for send
+ *
+ * Eutelsat spec:
+ * >15ms delay + (XXX determine if FW does this, see set_tone)
+ * 13.5ms per byte +
+ * >15ms delay +
+ * 12.5ms burst +
+ * >15ms delay (XXX determine if FW does this, see set_tone)
+ */
+ msleep((state->dsec_cmd.args[CX24117_DISEQC_MSGLEN] << 4) + 60);
+
+ return 0;
+}
+
+/* Send DiSEqC burst */
+static int cx24117_diseqc_send_burst(struct dvb_frontend *fe,
+ fe_sec_mini_cmd_t burst)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+
+ dev_dbg(&state->priv->i2c->dev, "%s(%d) demod=%d\n",
+ __func__, burst, state->demod);
+
+ /* DiSEqC burst */
+ if (burst == SEC_MINI_A)
+ state->dsec_cmd.args[CX24117_DISEQC_BURST] =
+ CX24117_DISEQC_MINI_A;
+ else if (burst == SEC_MINI_B)
+ state->dsec_cmd.args[CX24117_DISEQC_BURST] =
+ CX24117_DISEQC_MINI_B;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cx24117_get_priv(struct cx24117_priv **priv,
+ struct i2c_adapter *i2c, u8 client_address)
+{
+ int ret;
+
+ mutex_lock(&cx24117_list_mutex);
+ ret = hybrid_tuner_request_state(struct cx24117_priv, (*priv),
+ hybrid_tuner_instance_list, i2c, client_address, "cx24117");
+ mutex_unlock(&cx24117_list_mutex);
+
+ return ret;
+}
+
+static void cx24117_release_priv(struct cx24117_priv *priv)
+{
+ mutex_lock(&cx24117_list_mutex);
+ if (priv != NULL)
+ hybrid_tuner_release_state(priv);
+ mutex_unlock(&cx24117_list_mutex);
+}
+
+static void cx24117_release(struct dvb_frontend *fe)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ dev_dbg(&state->priv->i2c->dev, "%s demod%d\n",
+ __func__, state->demod);
+ cx24117_release_priv(state->priv);
+ kfree(state);
+}
+
+static struct dvb_frontend_ops cx24117_ops;
+
+struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
+ struct i2c_adapter *i2c)
+{
+ struct cx24117_state *state = NULL;
+ struct cx24117_priv *priv = NULL;
+ int demod = 0;
+
+ /* get the common data struct for both demods */
+ demod = cx24117_get_priv(&priv, i2c, config->demod_address);
+
+ switch (demod) {
+ case 0:
+ dev_err(&state->priv->i2c->dev,
+ "%s: Error attaching frontend %d\n",
+ KBUILD_MODNAME, demod);
+ goto error1;
+ break;
+ case 1:
+ /* new priv instance */
+ priv->i2c = i2c;
+ priv->demod_address = config->demod_address;
+ mutex_init(&priv->fe_lock);
+ break;
+ default:
+ /* existing priv instance */
+ break;
+ }
+
+ /* allocate memory for the internal state */
+ state = kzalloc(sizeof(struct cx24117_state), GFP_KERNEL);
+ if (state == NULL)
+ goto error2;
+
+ state->demod = demod - 1;
+ state->priv = priv;
+
+ /* test i2c bus for ack */
+ if (demod == 0) {
+ if (cx24117_readreg(state, 0x00) < 0)
+ goto error3;
+ }
+
+ dev_info(&state->priv->i2c->dev,
+ "%s: Attaching frontend %d\n",
+ KBUILD_MODNAME, state->demod);
+
+ /* create dvb_frontend */
+ memcpy(&state->frontend.ops, &cx24117_ops,
+ sizeof(struct dvb_frontend_ops));
+ state->frontend.demodulator_priv = state;
+ return &state->frontend;
+
+error3:
+ kfree(state);
+error2:
+ cx24117_release_priv(priv);
+error1:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(cx24117_attach);
+
+/*
+ * Initialise or wake up device
+ *
+ * Power config will reset and load initial firmware if required
+ */
+static int cx24117_initfe(struct dvb_frontend *fe)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ struct cx24117_cmd cmd;
+ int ret;
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n",
+ __func__, state->demod);
+
+ mutex_lock(&state->priv->fe_lock);
+
+ /* Firmware CMD 36: Power config */
+ cmd.args[0] = CMD_TUNERSLEEP;
+ cmd.args[1] = (state->demod ? 1 : 0);
+ cmd.args[2] = 0;
+ cmd.len = 3;
+ ret = cx24117_cmd_execute_nolock(fe, &cmd);
+ if (ret != 0)
+ goto exit;
+
+ ret = cx24117_diseqc_init(fe);
+ if (ret != 0)
+ goto exit;
+
+ /* CMD 3C */
+ cmd.args[0] = 0x3c;
+ cmd.args[1] = (state->demod ? 1 : 0);
+ cmd.args[2] = 0x10;
+ cmd.args[3] = 0x10;
+ cmd.len = 4;
+ ret = cx24117_cmd_execute_nolock(fe, &cmd);
+ if (ret != 0)
+ goto exit;
+
+ /* CMD 34 */
+ cmd.args[0] = 0x34;
+ cmd.args[1] = (state->demod ? 1 : 0);
+ cmd.args[2] = CX24117_OCC;
+ cmd.len = 3;
+ ret = cx24117_cmd_execute_nolock(fe, &cmd);
+
+exit:
+ mutex_unlock(&state->priv->fe_lock);
+
+ return ret;
+}
+
+/*
+ * Put device to sleep
+ */
+static int cx24117_sleep(struct dvb_frontend *fe)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ struct cx24117_cmd cmd;
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n",
+ __func__, state->demod);
+
+ /* Firmware CMD 36: Power config */
+ cmd.args[0] = CMD_TUNERSLEEP;
+ cmd.args[1] = (state->demod ? 1 : 0);
+ cmd.args[2] = 1;
+ cmd.len = 3;
+ return cx24117_cmd_execute(fe, &cmd);
+}
+
+/* dvb-core told us to tune, the tv property cache will be complete,
+ * it's safe for is to pull values and use them for tuning purposes.
+ */
+static int cx24117_set_frontend(struct dvb_frontend *fe)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ struct cx24117_cmd cmd;
+ fe_status_t tunerstat;
+ int i, status, ret, retune = 1;
+ u8 reg_clkdiv, reg_ratediv;
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n",
+ __func__, state->demod);
+
+ switch (c->delivery_system) {
+ case SYS_DVBS:
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d DVB-S\n",
+ __func__, state->demod);
+
+ /* Only QPSK is supported for DVB-S */
+ if (c->modulation != QPSK) {
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d unsupported modulation (%d)\n",
+ __func__, state->demod, c->modulation);
+ return -EINVAL;
+ }
+
+ /* Pilot doesn't exist in DVB-S, turn bit off */
+ state->dnxt.pilot_val = CX24117_PILOT_OFF;
+
+ /* DVB-S only supports 0.35 */
+ state->dnxt.rolloff_val = CX24117_ROLLOFF_035;
+ break;
+
+ case SYS_DVBS2:
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d DVB-S2\n",
+ __func__, state->demod);
+
+ /*
+ * NBC 8PSK/QPSK with DVB-S is supported for DVB-S2,
+ * but not hardware auto detection
+ */
+ if (c->modulation != PSK_8 && c->modulation != QPSK) {
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d unsupported modulation (%d)\n",
+ __func__, state->demod, c->modulation);
+ return -EOPNOTSUPP;
+ }
+
+ switch (c->pilot) {
+ case PILOT_AUTO:
+ state->dnxt.pilot_val = CX24117_PILOT_AUTO;
+ break;
+ case PILOT_OFF:
+ state->dnxt.pilot_val = CX24117_PILOT_OFF;
+ break;
+ case PILOT_ON:
+ state->dnxt.pilot_val = CX24117_PILOT_ON;
+ break;
+ default:
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d unsupported pilot mode (%d)\n",
+ __func__, state->demod, c->pilot);
+ return -EOPNOTSUPP;
+ }
+
+ switch (c->rolloff) {
+ case ROLLOFF_20:
+ state->dnxt.rolloff_val = CX24117_ROLLOFF_020;
+ break;
+ case ROLLOFF_25:
+ state->dnxt.rolloff_val = CX24117_ROLLOFF_025;
+ break;
+ case ROLLOFF_35:
+ state->dnxt.rolloff_val = CX24117_ROLLOFF_035;
+ break;
+ case ROLLOFF_AUTO:
+ state->dnxt.rolloff_val = CX24117_ROLLOFF_035;
+ /* soft-auto rolloff */
+ retune = 3;
+ break;
+ default:
+ dev_warn(&state->priv->i2c->dev,
+ "%s: demod%d unsupported rolloff (%d)\n",
+ KBUILD_MODNAME, state->demod, c->rolloff);
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ dev_warn(&state->priv->i2c->dev,
+ "%s: demod %d unsupported delivery system (%d)\n",
+ KBUILD_MODNAME, state->demod, c->delivery_system);
+ return -EINVAL;
+ }
+
+ state->dnxt.delsys = c->delivery_system;
+ state->dnxt.modulation = c->modulation;
+ state->dnxt.frequency = c->frequency;
+ state->dnxt.pilot = c->pilot;
+ state->dnxt.rolloff = c->rolloff;
+
+ ret = cx24117_set_inversion(state, c->inversion);
+ if (ret != 0)
+ return ret;
+
+ ret = cx24117_set_fec(state,
+ c->delivery_system, c->modulation, c->fec_inner);
+ if (ret != 0)
+ return ret;
+
+ ret = cx24117_set_symbolrate(state, c->symbol_rate);
+ if (ret != 0)
+ return ret;
+
+ /* discard the 'current' tuning parameters and prepare to tune */
+ cx24117_clone_params(fe);
+
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: delsys = %d\n", __func__, state->dcur.delsys);
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: modulation = %d\n", __func__, state->dcur.modulation);
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: frequency = %d\n", __func__, state->dcur.frequency);
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: pilot = %d (val = 0x%02x)\n", __func__,
+ state->dcur.pilot, state->dcur.pilot_val);
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: retune = %d\n", __func__, retune);
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: rolloff = %d (val = 0x%02x)\n", __func__,
+ state->dcur.rolloff, state->dcur.rolloff_val);
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: symbol_rate = %d\n", __func__, state->dcur.symbol_rate);
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: FEC = %d (mask/val = 0x%02x/0x%02x)\n", __func__,
+ state->dcur.fec, state->dcur.fec_mask, state->dcur.fec_val);
+ dev_dbg(&state->priv->i2c->dev,
+ "%s: Inversion = %d (val = 0x%02x)\n", __func__,
+ state->dcur.inversion, state->dcur.inversion_val);
+
+ /* Prepare a tune request */
+ cmd.args[0] = CMD_TUNEREQUEST;
+
+ /* demod */
+ cmd.args[1] = state->demod;
+
+ /* Frequency */
+ cmd.args[2] = (state->dcur.frequency & 0xff0000) >> 16;
+ cmd.args[3] = (state->dcur.frequency & 0x00ff00) >> 8;
+ cmd.args[4] = (state->dcur.frequency & 0x0000ff);
+
+ /* Symbol Rate */
+ cmd.args[5] = ((state->dcur.symbol_rate / 1000) & 0xff00) >> 8;
+ cmd.args[6] = ((state->dcur.symbol_rate / 1000) & 0x00ff);
+
+ /* Automatic Inversion */
+ cmd.args[7] = state->dcur.inversion_val;
+
+ /* Modulation / FEC / Pilot */
+ cmd.args[8] = state->dcur.fec_val | state->dcur.pilot_val;
+
+ cmd.args[9] = CX24117_SEARCH_RANGE_KHZ >> 8;
+ cmd.args[10] = CX24117_SEARCH_RANGE_KHZ & 0xff;
+
+ cmd.args[11] = state->dcur.rolloff_val;
+ cmd.args[12] = state->dcur.fec_mask;
+
+ if (state->dcur.symbol_rate > 30000000) {
+ reg_ratediv = 0x04;
+ reg_clkdiv = 0x02;
+ } else if (state->dcur.symbol_rate > 10000000) {
+ reg_ratediv = 0x06;
+ reg_clkdiv = 0x03;
+ } else {
+ reg_ratediv = 0x0a;
+ reg_clkdiv = 0x05;
+ }
+
+ cmd.args[13] = reg_ratediv;
+ cmd.args[14] = reg_clkdiv;
+
+ cx24117_writereg(state, (state->demod == 0) ?
+ CX24117_REG_CLKDIV0 : CX24117_REG_CLKDIV1, reg_clkdiv);
+ cx24117_writereg(state, (state->demod == 0) ?
+ CX24117_REG_RATEDIV0 : CX24117_REG_RATEDIV1, reg_ratediv);
+
+ cmd.args[15] = CX24117_PNE;
+ cmd.len = 16;
+
+ do {
+ /* Reset status register */
+ status = cx24117_readreg(state, (state->demod == 0) ?
+ CX24117_REG_SSTATUS0 : CX24117_REG_SSTATUS1) &
+ CX24117_SIGNAL_MASK;
+
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d status_setfe = %02x\n",
+ __func__, state->demod, status);
+
+ cx24117_writereg(state, (state->demod == 0) ?
+ CX24117_REG_SSTATUS0 : CX24117_REG_SSTATUS1, status);
+
+ /* Tune */
+ ret = cx24117_cmd_execute(fe, &cmd);
+ if (ret != 0)
+ break;
+
+ /*
+ * Wait for up to 500 ms before retrying
+ *
+ * If we are able to tune then generally it occurs within 100ms.
+ * If it takes longer, try a different rolloff setting.
+ */
+ for (i = 0; i < 50; i++) {
+ cx24117_read_status(fe, &tunerstat);
+ status = tunerstat & (FE_HAS_SIGNAL | FE_HAS_SYNC);
+ if (status == (FE_HAS_SIGNAL | FE_HAS_SYNC)) {
+ dev_dbg(&state->priv->i2c->dev,
+ "%s() demod%d tuned\n",
+ __func__, state->demod);
+ return 0;
+ }
+ msleep(20);
+ }
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d not tuned\n",
+ __func__, state->demod);
+
+ /* try next rolloff value */
+ if (state->dcur.rolloff == 3)
+ cmd.args[11]--;
+
+ } while (--retune);
+ return -EINVAL;
+}
+
+static int cx24117_tune(struct dvb_frontend *fe, bool re_tune,
+ unsigned int mode_flags, unsigned int *delay, fe_status_t *status)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+
+ dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n",
+ __func__, state->demod);
+
+ *delay = HZ / 5;
+ if (re_tune) {
+ int ret = cx24117_set_frontend(fe);
+ if (ret)
+ return ret;
+ }
+ return cx24117_read_status(fe, status);
+}
+
+static int cx24117_get_algo(struct dvb_frontend *fe)
+{
+ return DVBFE_ALGO_HW;
+}
+
+static int cx24117_get_frontend(struct dvb_frontend *fe)
+{
+ struct cx24117_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ struct cx24117_cmd cmd;
+ u8 reg, st, inv;
+ int ret, idx;
+ unsigned int freq;
+ short srate_os, freq_os;
+
+ u8 buf[0x1f-4];
+
+ cmd.args[0] = 0x1c;
+ cmd.args[1] = (u8) state->demod;
+ cmd.len = 2;
+ ret = cx24117_cmd_execute(fe, &cmd);
+ if (ret != 0)
+ return ret;
+
+ /* read all required regs at once */
+ reg = (state->demod == 0) ? CX24117_REG_FREQ3_0 : CX24117_REG_FREQ3_1;
+ ret = cx24117_readregN(state, reg, buf, 0x1f-4);
+ if (ret != 0)
+ return ret;
+
+ st = buf[5];
+
+ /* get spectral inversion */
+ inv = (((state->demod == 0) ? ~st : st) >> 6) & 1;
+ if (inv == 0)
+ c->inversion = INVERSION_OFF;
+ else
+ c->inversion = INVERSION_ON;
+
+ /* modulation and fec */
+ idx = st & 0x3f;
+ if (c->delivery_system == SYS_DVBS2) {
+ if (idx > 11)
+ idx += 9;
+ else
+ idx += 7;
+ }
+
+ c->modulation = cx24117_modfec_modes[idx].modulation;
+ c->fec_inner = cx24117_modfec_modes[idx].fec;
+
+ /* frequency */
+ freq = (buf[0] << 16) | (buf[1] << 8) | buf[2];
+ freq_os = (buf[8] << 8) | buf[9];
+ c->frequency = freq + freq_os;
+
+ /* symbol rate */
+ srate_os = (buf[10] << 8) | buf[11];
+ c->symbol_rate = -1000 * srate_os + state->dcur.symbol_rate;
+ return 0;
+}
+
+static struct dvb_frontend_ops cx24117_ops = {
+ .delsys = { SYS_DVBS, SYS_DVBS2 },
+ .info = {
+ .name = "Conexant CX24117/CX24132",
+ .frequency_min = 950000,
+ .frequency_max = 2150000,
+ .frequency_stepsize = 1011, /* kHz for QPSK frontends */
+ .frequency_tolerance = 5000,
+ .symbol_rate_min = 1000000,
+ .symbol_rate_max = 45000000,
+ .caps = FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 |
+ FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
+ FE_CAN_2G_MODULATION |
+ FE_CAN_QPSK | FE_CAN_RECOVER
+ },
+
+ .release = cx24117_release,
+
+ .init = cx24117_initfe,
+ .sleep = cx24117_sleep,
+ .read_status = cx24117_read_status,
+ .read_ber = cx24117_read_ber,
+ .read_signal_strength = cx24117_read_signal_strength,
+ .read_snr = cx24117_read_snr,
+ .read_ucblocks = cx24117_read_ucblocks,
+ .set_tone = cx24117_set_tone,
+ .set_voltage = cx24117_set_voltage,
+ .diseqc_send_master_cmd = cx24117_send_diseqc_msg,
+ .diseqc_send_burst = cx24117_diseqc_send_burst,
+ .get_frontend_algo = cx24117_get_algo,
+ .tune = cx24117_tune,
+
+ .set_frontend = cx24117_set_frontend,
+ .get_frontend = cx24117_get_frontend,
+};
+
+
+MODULE_DESCRIPTION("DVB Frontend module for Conexant cx24117/cx24132 hardware");
+MODULE_AUTHOR("Luis Alves (ljalvs@gmail.com)");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.1");
+MODULE_FIRMWARE(CX24117_DEFAULT_FIRMWARE);
+
diff --git a/drivers/media/dvb-frontends/cx24117.h b/drivers/media/dvb-frontends/cx24117.h
new file mode 100644
index 000000000000..4e59e9574fa7
--- /dev/null
+++ b/drivers/media/dvb-frontends/cx24117.h
@@ -0,0 +1,47 @@
+/*
+ Conexant cx24117/cx24132 - Dual DVBS/S2 Satellite demod/tuner driver
+
+ Copyright (C) 2013 Luis Alves <ljalvs@gmail.com>
+ (based on cx24116.h by Steven Toth)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef CX24117_H
+#define CX24117_H
+
+#include <linux/kconfig.h>
+#include <linux/dvb/frontend.h>
+
+struct cx24117_config {
+ /* the demodulator's i2c address */
+ u8 demod_address;
+};
+
+#if IS_ENABLED(CONFIG_DVB_CX24117)
+extern struct dvb_frontend *cx24117_attach(
+ const struct cx24117_config *config,
+ struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *cx24117_attach(
+ const struct cx24117_config *config,
+ struct i2c_adapter *i2c)
+{
+ dev_warn(&i2c->dev, "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif /* CX24117_H */
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
index a771da3e9f99..72fb5838cae0 100644
--- a/drivers/media/dvb-frontends/cx24123.c
+++ b/drivers/media/dvb-frontends/cx24123.c
@@ -739,7 +739,7 @@ static int cx24123_set_voltage(struct dvb_frontend *fe,
return 0;
default:
return -EINVAL;
- };
+ }
return 0;
}
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index 7ca5c69dd200..d9eeeb1dfa96 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -31,7 +31,7 @@ static int cxd2820r_wr_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg,
{
.addr = i2c,
.flags = 0,
- .len = sizeof(buf),
+ .len = len + 1,
.buf = buf,
}
};
@@ -65,7 +65,7 @@ static int cxd2820r_rd_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg,
}, {
.addr = i2c,
.flags = I2C_M_RD,
- .len = sizeof(buf),
+ .len = len,
.buf = buf,
}
};
diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c
index 6201c59a78dd..e540cfb13bac 100644
--- a/drivers/media/dvb-frontends/dib9000.c
+++ b/drivers/media/dvb-frontends/dib9000.c
@@ -649,9 +649,9 @@ static int dib9000_risc_debug_buf(struct dib9000_state *state, u16 * data, u8 si
b[2 * (size - 2) - 1] = '\0'; /* Bullet proof the buffer */
if (*b == '~') {
b++;
- dprintk(b);
+ dprintk("%s", b);
} else
- dprintk("RISC%d: %d.%04d %s", state->fe_id, ts / 10000, ts % 10000, *b ? b : "<emtpy>");
+ dprintk("RISC%d: %d.%04d %s", state->fe_id, ts / 10000, ts % 10000, *b ? b : "<empty>");
return 1;
}
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 9a2134792cfa..959ae36403b8 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -46,10 +46,6 @@
#define DRX_I2C_MODEFLAGS 0xC0
#define DRX_I2C_FLAGS 0xF0
-#ifndef SIZEOF_ARRAY
-#define SIZEOF_ARRAY(array) (sizeof((array))/sizeof((array)[0]))
-#endif
-
#define DEFAULT_LOCK_TIMEOUT 1100
#define DRX_CHANNEL_AUTO 0
@@ -1018,7 +1014,7 @@ static int HI_CfgCommand(struct drxd_state *state)
status = Write16(state, HI_RA_RAM_SRV_CMD__A,
HI_RA_RAM_SRV_CMD_CONFIG, 0);
else
- status = HI_Command(state, HI_RA_RAM_SRV_CMD_CONFIG, 0);
+ status = HI_Command(state, HI_RA_RAM_SRV_CMD_CONFIG, NULL);
mutex_unlock(&state->mutex);
return status;
}
@@ -1039,7 +1035,7 @@ static int HI_ResetCommand(struct drxd_state *state)
status = Write16(state, HI_RA_RAM_SRV_RST_KEY__A,
HI_RA_RAM_SRV_RST_KEY_ACT, 0);
if (status == 0)
- status = HI_Command(state, HI_RA_RAM_SRV_CMD_RESET, 0);
+ status = HI_Command(state, HI_RA_RAM_SRV_CMD_RESET, NULL);
mutex_unlock(&state->mutex);
msleep(1);
return status;
@@ -2837,7 +2833,7 @@ static int drxd_init(struct dvb_frontend *fe)
int err = 0;
/* if (request_firmware(&state->fw, "drxd.fw", state->dev)<0) */
- return DRXD_init(state, 0, 0);
+ return DRXD_init(state, NULL, 0);
err = DRXD_init(state, state->fw->data, state->fw->size);
release_firmware(state->fw);
@@ -2973,7 +2969,7 @@ struct dvb_frontend *drxd_attach(const struct drxd_config *config,
mutex_init(&state->mutex);
- if (Read16(state, 0, 0, 0) < 0)
+ if (Read16(state, 0, NULL, 0) < 0)
goto error;
state->frontend.ops = drxd_ops;
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 082014de6875..d416c15691da 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -1083,7 +1083,7 @@ static int hi_cfg_command(struct drxk_state *state)
SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY);
if (status < 0)
goto error;
- status = hi_command(state, SIO_HI_RA_RAM_CMD_CONFIG, 0);
+ status = hi_command(state, SIO_HI_RA_RAM_CMD_CONFIG, NULL);
if (status < 0)
goto error;
@@ -2781,7 +2781,7 @@ static int ConfigureI2CBridge(struct drxk_state *state, bool b_enable_bridge)
goto error;
}
- status = hi_command(state, SIO_HI_RA_RAM_CMD_BRDCTRL, 0);
+ status = hi_command(state, SIO_HI_RA_RAM_CMD_BRDCTRL, NULL);
error:
if (status < 0)
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index facb84841518..a95dfe0a5ce3 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -489,6 +489,7 @@ static int rtl2832_init(struct dvb_frontend *fe)
init = rtl2832_tuner_init_e4000;
break;
case RTL2832_TUNER_R820T:
+ case RTL2832_TUNER_R828D:
len = ARRAY_SIZE(rtl2832_tuner_init_r820t);
init = rtl2832_tuner_init_r820t;
break;
diff --git a/drivers/media/dvb-frontends/rtl2832.h b/drivers/media/dvb-frontends/rtl2832.h
index 91b2dcf5a6ea..2cfbb6a97061 100644
--- a/drivers/media/dvb-frontends/rtl2832.h
+++ b/drivers/media/dvb-frontends/rtl2832.h
@@ -53,6 +53,7 @@ struct rtl2832_config {
#define RTL2832_TUNER_E4000 0x27
#define RTL2832_TUNER_FC0013 0x29
#define RTL2832_TUNER_R820T 0x2a
+#define RTL2832_TUNER_R828D 0x2b
u8 tuner;
};
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 2521f7e23018..e79749cfec81 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -912,14 +912,8 @@ static int tda10071_init(struct dvb_frontend *fe)
{ 0xd5, 0x03, 0x03 },
};
- /* firmware status */
- ret = tda10071_rd_reg(priv, 0x51, &tmp);
- if (ret)
- goto error;
-
- if (!tmp) {
+ if (priv->warm) {
/* warm state - wake up device from sleep */
- priv->warm = 1;
for (i = 0; i < ARRAY_SIZE(tab); i++) {
ret = tda10071_wr_reg_mask(priv, tab[i].reg,
@@ -937,7 +931,6 @@ static int tda10071_init(struct dvb_frontend *fe)
goto error;
} else {
/* cold state - try to download firmware */
- priv->warm = 0;
/* request the firmware, this will block and timeout */
ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
diff --git a/drivers/media/dvb-frontends/tda8083.c b/drivers/media/dvb-frontends/tda8083.c
index 9d08350fe4b0..69e62f42e2e1 100644
--- a/drivers/media/dvb-frontends/tda8083.c
+++ b/drivers/media/dvb-frontends/tda8083.c
@@ -189,7 +189,7 @@ static int tda8083_set_tone (struct tda8083_state* state, fe_sec_tone_mode_t ton
return tda8083_writereg (state, 0x29, 0x80);
default:
return -EINVAL;
- };
+ }
}
static int tda8083_set_voltage (struct tda8083_state* state, fe_sec_voltage_t voltage)
@@ -201,7 +201,7 @@ static int tda8083_set_voltage (struct tda8083_state* state, fe_sec_voltage_t vo
return tda8083_writereg (state, 0x20, 0x11);
default:
return -EINVAL;
- };
+ }
}
static int tda8083_send_diseqc_burst (struct tda8083_state* state, fe_sec_mini_cmd_t burst)
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index ad7ad857ab2a..9aba044dabed 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -31,6 +31,7 @@ struct ts2020_priv {
struct i2c_adapter *i2c;
u8 clk_out_div;
u32 frequency;
+ u32 frequency_div;
};
static int ts2020_release(struct dvb_frontend *fe)
@@ -193,7 +194,7 @@ static int ts2020_set_params(struct dvb_frontend *fe)
u8 lo = 0x01, div4 = 0x0;
/* Calculate frequency divider */
- if (frequency < 1060000) {
+ if (frequency < priv->frequency_div) {
lo |= 0x10;
div4 = 0x1;
ndiv = (frequency * 14 * 4) / TS2020_XTAL_FREQ;
@@ -340,8 +341,12 @@ struct dvb_frontend *ts2020_attach(struct dvb_frontend *fe,
priv->i2c_address = config->tuner_address;
priv->i2c = i2c;
priv->clk_out_div = config->clk_out_div;
+ priv->frequency_div = config->frequency_div;
fe->tuner_priv = priv;
+ if (!priv->frequency_div)
+ priv->frequency_div = 1060000;
+
/* Wake Up the tuner */
if ((0x03 & ts2020_readreg(fe, 0x00)) == 0x00) {
ts2020_writereg(fe, 0x00, 0x01);
diff --git a/drivers/media/dvb-frontends/ts2020.h b/drivers/media/dvb-frontends/ts2020.h
index 5bcb9a71ca80..b2fe6bb3a38b 100644
--- a/drivers/media/dvb-frontends/ts2020.h
+++ b/drivers/media/dvb-frontends/ts2020.h
@@ -28,6 +28,7 @@
struct ts2020_config {
u8 tuner_address;
u8 clk_out_div;
+ u32 frequency_div;
};
#if IS_ENABLED(CONFIG_DVB_TS2020)
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index d18be19c96cd..842654d33317 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -621,6 +621,15 @@ config VIDEO_AS3645A
This is a driver for the AS3645A and LM3555 flash controllers. It has
build in control for flash, torch and indicator LEDs.
+config VIDEO_LM3560
+ tristate "LM3560 dual flash driver support"
+ depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on MEDIA_CAMERA_SUPPORT
+ select REGMAP_I2C
+ ---help---
+ This is a driver for the lm3560 dual flash controllers. It controls
+ flash, torch LEDs.
+
comment "Video improvement chips"
config VIDEO_UPD64031A
@@ -646,7 +655,7 @@ config VIDEO_UPD64083
To compile this driver as a module, choose M here: the
module will be called upd64083.
-comment "Miscelaneous helper chips"
+comment "Miscellaneous helper chips"
config VIDEO_THS7303
tristate "THS7303/53 Video Amplifier"
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 9f462df77b4a..e03f1776f4f4 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -70,6 +70,7 @@ obj-$(CONFIG_VIDEO_S5K4ECGX) += s5k4ecgx.o
obj-$(CONFIG_VIDEO_S5C73M3) += s5c73m3/
obj-$(CONFIG_VIDEO_ADP1653) += adp1653.o
obj-$(CONFIG_VIDEO_AS3645A) += as3645a.o
+obj-$(CONFIG_VIDEO_LM3560) += lm3560.o
obj-$(CONFIG_VIDEO_SMIAPP_PLL) += smiapp-pll.o
obj-$(CONFIG_VIDEO_AK881X) += ak881x.o
obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index bb0c99d7a4f1..b06a7e54ee0d 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -628,16 +628,13 @@ static int ad9389b_s_stream(struct v4l2_subdev *sd, int enable)
static const struct v4l2_dv_timings_cap ad9389b_timings_cap = {
.type = V4L2_DV_BT_656_1120,
- .bt = {
- .max_width = 1920,
- .max_height = 1200,
- .min_pixelclock = 25000000,
- .max_pixelclock = 170000000,
- .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+ V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
- .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
- V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM,
- },
+ V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+ V4L2_DV_BT_CAP_CUSTOM)
};
static int ad9389b_s_dv_timings(struct v4l2_subdev *sd,
diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c
index 6f738d8e3a8f..d45e0e3a781d 100644
--- a/drivers/media/i2c/adv7183.c
+++ b/drivers/media/i2c/adv7183.c
@@ -178,7 +178,7 @@ static int adv7183_log_status(struct v4l2_subdev *sd)
adv7183_read(sd, ADV7183_VS_FIELD_CTRL_1),
adv7183_read(sd, ADV7183_VS_FIELD_CTRL_2),
adv7183_read(sd, ADV7183_VS_FIELD_CTRL_3));
- v4l2_info(sd, "adv7183: Hsync positon control 1 2 and 3 = 0x%02x 0x%02x 0x%02x\n",
+ v4l2_info(sd, "adv7183: Hsync position control 1 2 and 3 = 0x%02x 0x%02x 0x%02x\n",
adv7183_read(sd, ADV7183_HS_POS_CTRL_1),
adv7183_read(sd, ADV7183_HS_POS_CTRL_2),
adv7183_read(sd, ADV7183_HS_POS_CTRL_3));
diff --git a/drivers/media/i2c/adv7343.c b/drivers/media/i2c/adv7343.c
index aeb56c53e39f..d4e15a617c3b 100644
--- a/drivers/media/i2c/adv7343.c
+++ b/drivers/media/i2c/adv7343.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/videodev2.h>
#include <linux/uaccess.h>
+#include <linux/of.h>
#include <media/adv7343.h>
#include <media/v4l2-async.h>
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 7a576097471f..7c8d971f1f61 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -119,16 +119,14 @@ static int adv7511_s_clock_freq(struct v4l2_subdev *sd, u32 freq);
static const struct v4l2_dv_timings_cap adv7511_timings_cap = {
.type = V4L2_DV_BT_656_1120,
- .bt = {
- .max_width = ADV7511_MAX_WIDTH,
- .max_height = ADV7511_MAX_HEIGHT,
- .min_pixelclock = ADV7511_MIN_PIXELCLOCK,
- .max_pixelclock = ADV7511_MAX_PIXELCLOCK,
- .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+ V4L2_INIT_BT_TIMINGS(0, ADV7511_MAX_WIDTH, 0, ADV7511_MAX_HEIGHT,
+ ADV7511_MIN_PIXELCLOCK, ADV7511_MAX_PIXELCLOCK,
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
- .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
- V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM,
- },
+ V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+ V4L2_DV_BT_CAP_CUSTOM)
};
static inline struct adv7511_state *get_adv7511_state(struct v4l2_subdev *sd)
@@ -1126,6 +1124,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
state->i2c_edid = i2c_new_dummy(client->adapter, state->i2c_edid_addr >> 1);
if (state->i2c_edid == NULL) {
v4l2_err(sd, "failed to register edid i2c client\n");
+ err = -ENOMEM;
goto err_entity;
}
@@ -1133,6 +1132,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
state->work_queue = create_singlethread_workqueue(sd->name);
if (state->work_queue == NULL) {
v4l2_err(sd, "could not create workqueue\n");
+ err = -ENOMEM;
goto err_unreg_cec;
}
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index d1748901337c..22f729d66a96 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -546,30 +546,24 @@ static inline bool is_digital_input(struct v4l2_subdev *sd)
static const struct v4l2_dv_timings_cap adv7842_timings_cap_analog = {
.type = V4L2_DV_BT_656_1120,
- .bt = {
- .max_width = 1920,
- .max_height = 1200,
- .min_pixelclock = 25000000,
- .max_pixelclock = 170000000,
- .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+ V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
- .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
- V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM,
- },
+ V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+ V4L2_DV_BT_CAP_CUSTOM)
};
static const struct v4l2_dv_timings_cap adv7842_timings_cap_digital = {
.type = V4L2_DV_BT_656_1120,
- .bt = {
- .max_width = 1920,
- .max_height = 1200,
- .min_pixelclock = 25000000,
- .max_pixelclock = 225000000,
- .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+ V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000,
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
- .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
- V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM,
- },
+ V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+ V4L2_DV_BT_CAP_CUSTOM)
};
static inline const struct v4l2_dv_timings_cap *
diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c
new file mode 100644
index 000000000000..3317a9ae3961
--- /dev/null
+++ b/drivers/media/i2c/lm3560.c
@@ -0,0 +1,488 @@
+/*
+ * drivers/media/i2c/lm3560.c
+ * General device driver for TI lm3560, FLASH LED Driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ *
+ * Contact: Daniel Jeong <gshark.jeong@gmail.com>
+ * Ldd-Mlp <ldd-mlp@list.ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/videodev2.h>
+#include <media/lm3560.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+/* registers definitions */
+#define REG_ENABLE 0x10
+#define REG_TORCH_BR 0xa0
+#define REG_FLASH_BR 0xb0
+#define REG_FLASH_TOUT 0xc0
+#define REG_FLAG 0xd0
+#define REG_CONFIG1 0xe0
+
+/* Fault Mask */
+#define FAULT_TIMEOUT (1<<0)
+#define FAULT_OVERTEMP (1<<1)
+#define FAULT_SHORT_CIRCUIT (1<<2)
+
+enum led_enable {
+ MODE_SHDN = 0x0,
+ MODE_TORCH = 0x2,
+ MODE_FLASH = 0x3,
+};
+
+/* struct lm3560_flash
+ *
+ * @pdata: platform data
+ * @regmap: reg. map for i2c
+ * @lock: muxtex for serial access.
+ * @led_mode: V4L2 LED mode
+ * @ctrls_led: V4L2 contols
+ * @subdev_led: V4L2 subdev
+ */
+struct lm3560_flash {
+ struct device *dev;
+ struct lm3560_platform_data *pdata;
+ struct regmap *regmap;
+ struct mutex lock;
+
+ enum v4l2_flash_led_mode led_mode;
+ struct v4l2_ctrl_handler ctrls_led[LM3560_LED_MAX];
+ struct v4l2_subdev subdev_led[LM3560_LED_MAX];
+};
+
+#define to_lm3560_flash(_ctrl, _no) \
+ container_of(_ctrl->handler, struct lm3560_flash, ctrls_led[_no])
+
+/* enable mode control */
+static int lm3560_mode_ctrl(struct lm3560_flash *flash)
+{
+ int rval = -EINVAL;
+
+ switch (flash->led_mode) {
+ case V4L2_FLASH_LED_MODE_NONE:
+ rval = regmap_update_bits(flash->regmap,
+ REG_ENABLE, 0x03, MODE_SHDN);
+ break;
+ case V4L2_FLASH_LED_MODE_TORCH:
+ rval = regmap_update_bits(flash->regmap,
+ REG_ENABLE, 0x03, MODE_TORCH);
+ break;
+ case V4L2_FLASH_LED_MODE_FLASH:
+ rval = regmap_update_bits(flash->regmap,
+ REG_ENABLE, 0x03, MODE_FLASH);
+ break;
+ }
+ return rval;
+}
+
+/* led1/2 enable/disable */
+static int lm3560_enable_ctrl(struct lm3560_flash *flash,
+ enum lm3560_led_id led_no, bool on)
+{
+ int rval;
+
+ if (led_no == LM3560_LED0) {
+ if (on == true)
+ rval = regmap_update_bits(flash->regmap,
+ REG_ENABLE, 0x08, 0x08);
+ else
+ rval = regmap_update_bits(flash->regmap,
+ REG_ENABLE, 0x08, 0x00);
+ } else {
+ if (on == true)
+ rval = regmap_update_bits(flash->regmap,
+ REG_ENABLE, 0x10, 0x10);
+ else
+ rval = regmap_update_bits(flash->regmap,
+ REG_ENABLE, 0x10, 0x00);
+ }
+ return rval;
+}
+
+/* torch1/2 brightness control */
+static int lm3560_torch_brt_ctrl(struct lm3560_flash *flash,
+ enum lm3560_led_id led_no, unsigned int brt)
+{
+ int rval;
+ u8 br_bits;
+
+ if (brt < LM3560_TORCH_BRT_MIN)
+ return lm3560_enable_ctrl(flash, led_no, false);
+ else
+ rval = lm3560_enable_ctrl(flash, led_no, true);
+
+ br_bits = LM3560_TORCH_BRT_uA_TO_REG(brt);
+ if (led_no == LM3560_LED0)
+ rval = regmap_update_bits(flash->regmap,
+ REG_TORCH_BR, 0x07, br_bits);
+ else
+ rval = regmap_update_bits(flash->regmap,
+ REG_TORCH_BR, 0x38, br_bits << 3);
+
+ return rval;
+}
+
+/* flash1/2 brightness control */
+static int lm3560_flash_brt_ctrl(struct lm3560_flash *flash,
+ enum lm3560_led_id led_no, unsigned int brt)
+{
+ int rval;
+ u8 br_bits;
+
+ if (brt < LM3560_FLASH_BRT_MIN)
+ return lm3560_enable_ctrl(flash, led_no, false);
+ else
+ rval = lm3560_enable_ctrl(flash, led_no, true);
+
+ br_bits = LM3560_FLASH_BRT_uA_TO_REG(brt);
+ if (led_no == LM3560_LED0)
+ rval = regmap_update_bits(flash->regmap,
+ REG_FLASH_BR, 0x0f, br_bits);
+ else
+ rval = regmap_update_bits(flash->regmap,
+ REG_FLASH_BR, 0xf0, br_bits << 4);
+
+ return rval;
+}
+
+/* V4L2 controls */
+static int lm3560_get_ctrl(struct v4l2_ctrl *ctrl, enum lm3560_led_id led_no)
+{
+ struct lm3560_flash *flash = to_lm3560_flash(ctrl, led_no);
+
+ mutex_lock(&flash->lock);
+
+ if (ctrl->id == V4L2_CID_FLASH_FAULT) {
+ int rval;
+ s32 fault = 0;
+ unsigned int reg_val;
+ rval = regmap_read(flash->regmap, REG_FLAG, &reg_val);
+ if (rval < 0)
+ return rval;
+ if (rval & FAULT_SHORT_CIRCUIT)
+ fault |= V4L2_FLASH_FAULT_SHORT_CIRCUIT;
+ if (rval & FAULT_OVERTEMP)
+ fault |= V4L2_FLASH_FAULT_OVER_TEMPERATURE;
+ if (rval & FAULT_TIMEOUT)
+ fault |= V4L2_FLASH_FAULT_TIMEOUT;
+ ctrl->cur.val = fault;
+ return 0;
+ }
+
+ mutex_unlock(&flash->lock);
+ return -EINVAL;
+}
+
+static int lm3560_set_ctrl(struct v4l2_ctrl *ctrl, enum lm3560_led_id led_no)
+{
+ struct lm3560_flash *flash = to_lm3560_flash(ctrl, led_no);
+ u8 tout_bits;
+ int rval = -EINVAL;
+
+ mutex_lock(&flash->lock);
+
+ switch (ctrl->id) {
+ case V4L2_CID_FLASH_LED_MODE:
+ flash->led_mode = ctrl->val;
+ if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
+ rval = lm3560_mode_ctrl(flash);
+ break;
+
+ case V4L2_CID_FLASH_STROBE_SOURCE:
+ rval = regmap_update_bits(flash->regmap,
+ REG_CONFIG1, 0x04, (ctrl->val) << 2);
+ if (rval < 0)
+ goto err_out;
+ break;
+
+ case V4L2_CID_FLASH_STROBE:
+ if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
+ return -EBUSY;
+ flash->led_mode = V4L2_FLASH_LED_MODE_FLASH;
+ rval = lm3560_mode_ctrl(flash);
+ break;
+
+ case V4L2_CID_FLASH_STROBE_STOP:
+ if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
+ return -EBUSY;
+ flash->led_mode = V4L2_FLASH_LED_MODE_NONE;
+ rval = lm3560_mode_ctrl(flash);
+ break;
+
+ case V4L2_CID_FLASH_TIMEOUT:
+ tout_bits = LM3560_FLASH_TOUT_ms_TO_REG(ctrl->val);
+ rval = regmap_update_bits(flash->regmap,
+ REG_FLASH_TOUT, 0x1f, tout_bits);
+ break;
+
+ case V4L2_CID_FLASH_INTENSITY:
+ rval = lm3560_flash_brt_ctrl(flash, led_no, ctrl->val);
+ break;
+
+ case V4L2_CID_FLASH_TORCH_INTENSITY:
+ rval = lm3560_torch_brt_ctrl(flash, led_no, ctrl->val);
+ break;
+ }
+
+ mutex_unlock(&flash->lock);
+err_out:
+ return rval;
+}
+
+static int lm3560_led1_get_ctrl(struct v4l2_ctrl *ctrl)
+{
+ return lm3560_get_ctrl(ctrl, LM3560_LED1);
+}
+
+static int lm3560_led1_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ return lm3560_set_ctrl(ctrl, LM3560_LED1);
+}
+
+static int lm3560_led0_get_ctrl(struct v4l2_ctrl *ctrl)
+{
+ return lm3560_get_ctrl(ctrl, LM3560_LED0);
+}
+
+static int lm3560_led0_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ return lm3560_set_ctrl(ctrl, LM3560_LED0);
+}
+
+static const struct v4l2_ctrl_ops lm3560_led_ctrl_ops[LM3560_LED_MAX] = {
+ [LM3560_LED0] = {
+ .g_volatile_ctrl = lm3560_led0_get_ctrl,
+ .s_ctrl = lm3560_led0_set_ctrl,
+ },
+ [LM3560_LED1] = {
+ .g_volatile_ctrl = lm3560_led1_get_ctrl,
+ .s_ctrl = lm3560_led1_set_ctrl,
+ }
+};
+
+static int lm3560_init_controls(struct lm3560_flash *flash,
+ enum lm3560_led_id led_no)
+{
+ struct v4l2_ctrl *fault;
+ u32 max_flash_brt = flash->pdata->max_flash_brt[led_no];
+ u32 max_torch_brt = flash->pdata->max_torch_brt[led_no];
+ struct v4l2_ctrl_handler *hdl = &flash->ctrls_led[led_no];
+ const struct v4l2_ctrl_ops *ops = &lm3560_led_ctrl_ops[led_no];
+
+ v4l2_ctrl_handler_init(hdl, 8);
+ /* flash mode */
+ v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_FLASH_LED_MODE,
+ V4L2_FLASH_LED_MODE_TORCH, ~0x7,
+ V4L2_FLASH_LED_MODE_NONE);
+ flash->led_mode = V4L2_FLASH_LED_MODE_NONE;
+
+ /* flash source */
+ v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_FLASH_STROBE_SOURCE,
+ 0x1, ~0x3, V4L2_FLASH_STROBE_SOURCE_SOFTWARE);
+
+ /* flash strobe */
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_STROBE, 0, 0, 0, 0);
+ /* flash strobe stop */
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_STROBE_STOP, 0, 0, 0, 0);
+
+ /* flash strobe timeout */
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_TIMEOUT,
+ LM3560_FLASH_TOUT_MIN,
+ flash->pdata->max_flash_timeout,
+ LM3560_FLASH_TOUT_STEP,
+ flash->pdata->max_flash_timeout);
+
+ /* flash brt */
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_INTENSITY,
+ LM3560_FLASH_BRT_MIN, max_flash_brt,
+ LM3560_FLASH_BRT_STEP, max_flash_brt);
+
+ /* torch brt */
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_TORCH_INTENSITY,
+ LM3560_TORCH_BRT_MIN, max_torch_brt,
+ LM3560_TORCH_BRT_STEP, max_torch_brt);
+
+ /* fault */
+ fault = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_FAULT, 0,
+ V4L2_FLASH_FAULT_OVER_VOLTAGE
+ | V4L2_FLASH_FAULT_OVER_TEMPERATURE
+ | V4L2_FLASH_FAULT_SHORT_CIRCUIT
+ | V4L2_FLASH_FAULT_TIMEOUT, 0, 0);
+ if (fault != NULL)
+ fault->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ if (hdl->error)
+ return hdl->error;
+
+ flash->subdev_led[led_no].ctrl_handler = hdl;
+ return 0;
+}
+
+/* initialize device */
+static const struct v4l2_subdev_ops lm3560_ops = {
+ .core = NULL,
+};
+
+static const struct regmap_config lm3560_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xFF,
+};
+
+static int lm3560_subdev_init(struct lm3560_flash *flash,
+ enum lm3560_led_id led_no, char *led_name)
+{
+ struct i2c_client *client = to_i2c_client(flash->dev);
+ int rval;
+
+ v4l2_i2c_subdev_init(&flash->subdev_led[led_no], client, &lm3560_ops);
+ flash->subdev_led[led_no].flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ strcpy(flash->subdev_led[led_no].name, led_name);
+ rval = lm3560_init_controls(flash, led_no);
+ if (rval)
+ goto err_out;
+ rval = media_entity_init(&flash->subdev_led[led_no].entity, 0, NULL, 0);
+ if (rval < 0)
+ goto err_out;
+ flash->subdev_led[led_no].entity.type = MEDIA_ENT_T_V4L2_SUBDEV_FLASH;
+
+ return rval;
+
+err_out:
+ v4l2_ctrl_handler_free(&flash->ctrls_led[led_no]);
+ return rval;
+}
+
+static int lm3560_init_device(struct lm3560_flash *flash)
+{
+ int rval;
+ unsigned int reg_val;
+
+ /* set peak current */
+ rval = regmap_update_bits(flash->regmap,
+ REG_FLASH_TOUT, 0x60, flash->pdata->peak);
+ if (rval < 0)
+ return rval;
+ /* output disable */
+ flash->led_mode = V4L2_FLASH_LED_MODE_NONE;
+ rval = lm3560_mode_ctrl(flash);
+ if (rval < 0)
+ return rval;
+ /* Reset faults */
+ rval = regmap_read(flash->regmap, REG_FLAG, &reg_val);
+ return rval;
+}
+
+static int lm3560_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct lm3560_flash *flash;
+ struct lm3560_platform_data *pdata = dev_get_platdata(&client->dev);
+ int rval;
+
+ flash = devm_kzalloc(&client->dev, sizeof(*flash), GFP_KERNEL);
+ if (flash == NULL)
+ return -ENOMEM;
+
+ flash->regmap = devm_regmap_init_i2c(client, &lm3560_regmap);
+ if (IS_ERR(flash->regmap)) {
+ rval = PTR_ERR(flash->regmap);
+ return rval;
+ }
+
+ /* if there is no platform data, use chip default value */
+ if (pdata == NULL) {
+ pdata =
+ kzalloc(sizeof(struct lm3560_platform_data), GFP_KERNEL);
+ if (pdata == NULL)
+ return -ENODEV;
+ pdata->peak = LM3560_PEAK_3600mA;
+ pdata->max_flash_timeout = LM3560_FLASH_TOUT_MAX;
+ /* led 1 */
+ pdata->max_flash_brt[LM3560_LED0] = LM3560_FLASH_BRT_MAX;
+ pdata->max_torch_brt[LM3560_LED0] = LM3560_TORCH_BRT_MAX;
+ /* led 2 */
+ pdata->max_flash_brt[LM3560_LED1] = LM3560_FLASH_BRT_MAX;
+ pdata->max_torch_brt[LM3560_LED1] = LM3560_TORCH_BRT_MAX;
+ }
+ flash->pdata = pdata;
+ flash->dev = &client->dev;
+ mutex_init(&flash->lock);
+
+ rval = lm3560_subdev_init(flash, LM3560_LED0, "lm3560-led0");
+ if (rval < 0)
+ return rval;
+
+ rval = lm3560_subdev_init(flash, LM3560_LED1, "lm3560-led1");
+ if (rval < 0)
+ return rval;
+
+ rval = lm3560_init_device(flash);
+ if (rval < 0)
+ return rval;
+
+ return 0;
+}
+
+static int lm3560_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct lm3560_flash *flash = container_of(subdev, struct lm3560_flash,
+ subdev_led[LM3560_LED_MAX]);
+ unsigned int i;
+
+ for (i = LM3560_LED0; i < LM3560_LED_MAX; i++) {
+ v4l2_device_unregister_subdev(&flash->subdev_led[i]);
+ v4l2_ctrl_handler_free(&flash->ctrls_led[i]);
+ media_entity_cleanup(&flash->subdev_led[i].entity);
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id lm3560_id_table[] = {
+ {LM3560_NAME, 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, lm3560_id_table);
+
+static struct i2c_driver lm3560_i2c_driver = {
+ .driver = {
+ .name = LM3560_NAME,
+ .pm = NULL,
+ },
+ .probe = lm3560_probe,
+ .remove = lm3560_remove,
+ .id_table = lm3560_id_table,
+};
+
+module_i2c_driver(lm3560_i2c_driver);
+
+MODULE_AUTHOR("Daniel Jeong <gshark.jeong@gmail.com>");
+MODULE_AUTHOR("Ldd Mlp <ldd-mlp@list.ti.com>");
+MODULE_DESCRIPTION("Texas Instruments LM3560 LED flash driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index b76ec0e7e685..6fec9384d86e 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1581,7 +1581,7 @@ static int s5c73m3_probe(struct i2c_client *client,
oif_sd = &state->oif_sd;
v4l2_subdev_init(sd, &s5c73m3_subdev_ops);
- sd->owner = client->driver->driver.owner;
+ sd->owner = client->dev.driver->owner;
v4l2_set_subdevdata(sd, state);
strlcpy(sd->name, "S5C73M3", sizeof(sd->name));
@@ -1651,7 +1651,7 @@ static int s5c73m3_probe(struct i2c_client *client,
if (ret < 0)
goto out_err;
- v4l2_info(sd, "%s: completed succesfully\n", __func__);
+ v4l2_info(sd, "%s: completed successfully\n", __func__);
return 0;
out_err:
diff --git a/drivers/media/i2c/soc_camera/imx074.c b/drivers/media/i2c/soc_camera/imx074.c
index 1d384a371b41..5b915936c3f3 100644
--- a/drivers/media/i2c/soc_camera/imx074.c
+++ b/drivers/media/i2c/soc_camera/imx074.c
@@ -451,7 +451,9 @@ static int imx074_probe(struct i2c_client *client,
if (ret < 0)
goto eprobe;
- return v4l2_async_register_subdev(&priv->subdev);
+ ret = v4l2_async_register_subdev(&priv->subdev);
+ if (!ret)
+ return 0;
epwrinit:
eprobe:
diff --git a/drivers/media/i2c/soc_camera/ov9640.c b/drivers/media/i2c/soc_camera/ov9640.c
index e968c3fdbd9e..bc74224503e7 100644
--- a/drivers/media/i2c/soc_camera/ov9640.c
+++ b/drivers/media/i2c/soc_camera/ov9640.c
@@ -371,7 +371,7 @@ static void ov9640_alter_regs(enum v4l2_mbus_pixelcode code,
alt->com13 = OV9640_COM13_RGB_AVG;
alt->com15 = OV9640_COM15_RGB_565;
break;
- };
+ }
}
/* Setup registers according to resolution and color encoding */
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index a58a8f663ffb..04139eec8c4e 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -19,6 +19,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/v4l2-dv-timings.h>
#include <media/v4l2-dv-timings.h>
@@ -46,14 +47,10 @@ struct ths8200_state {
static const struct v4l2_dv_timings_cap ths8200_timings_cap = {
.type = V4L2_DV_BT_656_1120,
- .bt = {
- .max_width = 1920,
- .max_height = 1080,
- .min_pixelclock = 25000000,
- .max_pixelclock = 148500000,
- .standards = V4L2_DV_BT_STD_CEA861,
- .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE,
- },
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+ V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1080, 25000000, 148500000,
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_BT_CAP_PROGRESSIVE)
};
static inline struct ths8200_state *to_state(struct v4l2_subdev *sd)
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index 91f3dd4cda1b..83d85df4853a 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -35,6 +35,7 @@
#include <linux/videodev2.h>
#include <linux/module.h>
#include <linux/v4l2-mediabus.h>
+#include <linux/of.h>
#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 24a08fa7e328..912e1cccdd1c 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/v4l2-dv-timings.h>
#include <media/tvp7002.h>
#include <media/v4l2-async.h>
diff --git a/drivers/media/pci/b2c2/flexcop-pci.c b/drivers/media/pci/b2c2/flexcop-pci.c
index 447afbd904a4..8b5e0b3a92a0 100644
--- a/drivers/media/pci/b2c2/flexcop-pci.c
+++ b/drivers/media/pci/b2c2/flexcop-pci.c
@@ -319,7 +319,6 @@ static int flexcop_pci_init(struct flexcop_pci *fc_pci)
err_pci_iounmap:
pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
- pci_set_drvdata(fc_pci->pdev, NULL);
err_pci_release_regions:
pci_release_regions(fc_pci->pdev);
err_pci_disable_device:
@@ -332,7 +331,6 @@ static void flexcop_pci_exit(struct flexcop_pci *fc_pci)
if (fc_pci->init_state & FC_PCI_INIT) {
free_irq(fc_pci->pdev->irq, fc_pci);
pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
- pci_set_drvdata(fc_pci->pdev, NULL);
pci_release_regions(fc_pci->pdev);
pci_disable_device(fc_pci->pdev);
}
diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c
index 66eb0baab0e9..d0c281f41a0a 100644
--- a/drivers/media/pci/bt8xx/bt878.c
+++ b/drivers/media/pci/bt8xx/bt878.c
@@ -488,8 +488,7 @@ static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
btwrite(0, BT848_INT_MASK);
result = request_irq(bt->irq, bt878_irq,
- IRQF_SHARED | IRQF_DISABLED, "bt878",
- (void *) bt);
+ IRQF_SHARED, "bt878", (void *) bt);
if (result == -EINVAL) {
printk(KERN_ERR "bt878(%d): Bad irq number or handler\n",
bt878_num);
@@ -563,7 +562,6 @@ static void bt878_remove(struct pci_dev *pci_dev)
bt->shutdown = 1;
bt878_mem_free(bt);
- pci_set_drvdata(pci_dev, NULL);
pci_disable_device(pci_dev);
return;
}
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index c6532de0eac7..a3b1ee9c00d7 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -4086,7 +4086,7 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
/* disable irqs, register irq handler */
btwrite(0, BT848_INT_MASK);
result = request_irq(btv->c.pci->irq, bttv_irq,
- IRQF_SHARED | IRQF_DISABLED, btv->c.v4l2_dev.name, (void *)btv);
+ IRQF_SHARED, btv->c.v4l2_dev.name, (void *)btv);
if (result < 0) {
pr_err("%d: can't get IRQ %d\n",
bttv_num, btv->c.pci->irq);
diff --git a/drivers/media/pci/cx18/Kconfig b/drivers/media/pci/cx18/Kconfig
index c675b83c43a9..10e6bc72c460 100644
--- a/drivers/media/pci/cx18/Kconfig
+++ b/drivers/media/pci/cx18/Kconfig
@@ -1,6 +1,7 @@
config VIDEO_CX18
tristate "Conexant cx23418 MPEG encoder support"
depends on VIDEO_V4L2 && DVB_CORE && PCI && I2C
+ depends on !FRV
select I2C_ALGOBIT
select VIDEOBUF_VMALLOC
depends on RC_CORE
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index 004d8ace5019..87f5bcf29e90 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -324,23 +324,24 @@ static void cx18_eeprom_dump(struct cx18 *cx, unsigned char *eedata, int len)
/* Hauppauge card? get values from tveeprom */
void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
{
- struct i2c_client c;
+ struct i2c_client *c;
u8 eedata[256];
- memset(&c, 0, sizeof(c));
- strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
- c.adapter = &cx->i2c_adap[0];
- c.addr = 0xA0 >> 1;
+ c = kzalloc(sizeof(*c), GFP_ATOMIC);
+
+ strlcpy(c->name, "cx18 tveeprom tmp", sizeof(c->name));
+ c->adapter = &cx->i2c_adap[0];
+ c->addr = 0xa0 >> 1;
memset(tv, 0, sizeof(*tv));
- if (tveeprom_read(&c, eedata, sizeof(eedata)))
- return;
+ if (tveeprom_read(c, eedata, sizeof(eedata)))
+ goto ret;
switch (cx->card->type) {
case CX18_CARD_HVR_1600_ESMT:
case CX18_CARD_HVR_1600_SAMSUNG:
case CX18_CARD_HVR_1600_S5H1411:
- tveeprom_hauppauge_analog(&c, tv, eedata);
+ tveeprom_hauppauge_analog(c, tv, eedata);
break;
case CX18_CARD_YUAN_MPC718:
case CX18_CARD_GOTVIEW_PCI_DVD3:
@@ -354,6 +355,9 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
cx18_eeprom_dump(cx, eedata, sizeof(eedata));
break;
}
+
+ret:
+ kfree(c);
}
static void cx18_process_eeprom(struct cx18 *cx)
@@ -1031,8 +1035,7 @@ static int cx18_probe(struct pci_dev *pci_dev,
/* Register IRQ */
retval = request_irq(cx->pci_dev->irq, cx18_irq_handler,
- IRQF_SHARED | IRQF_DISABLED,
- cx->v4l2_dev.name, (void *)cx);
+ IRQF_SHARED, cx->v4l2_dev.name, (void *)cx);
if (retval) {
CX18_ERR("Failed to register irq %d\n", retval);
goto free_i2c;
diff --git a/drivers/media/pci/cx23885/Kconfig b/drivers/media/pci/cx23885/Kconfig
index 5104c802f72f..d1dcb1d2e087 100644
--- a/drivers/media/pci/cx23885/Kconfig
+++ b/drivers/media/pci/cx23885/Kconfig
@@ -23,6 +23,7 @@ config VIDEO_CX23885
select DVB_STB6100 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV6110 if MEDIA_SUBDRV_AUTOSELECT
select DVB_CX24116 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_CX24117 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV0900 if MEDIA_SUBDRV_AUTOSELECT
select DVB_DS3000 if MEDIA_SUBDRV_AUTOSELECT
select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 6a71a965e757..79f20c8c842e 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -223,6 +223,39 @@ struct cx23885_board cx23885_boards[] = {
.name = "Leadtek Winfast PxDVR3200 H",
.portc = CX23885_MPEG_DVB,
},
+ [CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200] = {
+ .name = "Leadtek Winfast PxPVR2200",
+ .porta = CX23885_ANALOG_VIDEO,
+ .tuner_type = TUNER_XC2028,
+ .tuner_addr = 0x61,
+ .tuner_bus = 1,
+ .input = {{
+ .type = CX23885_VMUX_TELEVISION,
+ .vmux = CX25840_VIN2_CH1 |
+ CX25840_VIN5_CH2,
+ .amux = CX25840_AUDIO8,
+ .gpio0 = 0x704040,
+ }, {
+ .type = CX23885_VMUX_COMPOSITE1,
+ .vmux = CX25840_COMPOSITE1,
+ .amux = CX25840_AUDIO7,
+ .gpio0 = 0x704040,
+ }, {
+ .type = CX23885_VMUX_SVIDEO,
+ .vmux = CX25840_SVIDEO_LUMA3 |
+ CX25840_SVIDEO_CHROMA4,
+ .amux = CX25840_AUDIO7,
+ .gpio0 = 0x704040,
+ }, {
+ .type = CX23885_VMUX_COMPONENT,
+ .vmux = CX25840_VIN7_CH1 |
+ CX25840_VIN6_CH2 |
+ CX25840_VIN8_CH3 |
+ CX25840_COMPONENT_ON,
+ .amux = CX25840_AUDIO7,
+ .gpio0 = 0x704040,
+ } },
+ },
[CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000] = {
.name = "Leadtek Winfast PxDVR3200 H XC4000",
.porta = CX23885_ANALOG_VIDEO,
@@ -259,6 +292,16 @@ struct cx23885_board cx23885_boards[] = {
.name = "TurboSight TBS 6920",
.portb = CX23885_MPEG_DVB,
},
+ [CX23885_BOARD_TBS_6980] = {
+ .name = "TurboSight TBS 6980",
+ .portb = CX23885_MPEG_DVB,
+ .portc = CX23885_MPEG_DVB,
+ },
+ [CX23885_BOARD_TBS_6981] = {
+ .name = "TurboSight TBS 6981",
+ .portb = CX23885_MPEG_DVB,
+ .portc = CX23885_MPEG_DVB,
+ },
[CX23885_BOARD_TEVII_S470] = {
.name = "TeVii S470",
.portb = CX23885_MPEG_DVB,
@@ -688,6 +731,10 @@ struct cx23885_subid cx23885_subids[] = {
.card = CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H,
}, {
.subvendor = 0x107d,
+ .subdevice = 0x6f21,
+ .card = CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200,
+ }, {
+ .subvendor = 0x107d,
.subdevice = 0x6f39,
.card = CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000,
}, {
@@ -699,6 +746,14 @@ struct cx23885_subid cx23885_subids[] = {
.subdevice = 0x8888,
.card = CX23885_BOARD_TBS_6920,
}, {
+ .subvendor = 0x6980,
+ .subdevice = 0x8888,
+ .card = CX23885_BOARD_TBS_6980,
+ }, {
+ .subvendor = 0x6981,
+ .subdevice = 0x8888,
+ .card = CX23885_BOARD_TBS_6981,
+ }, {
.subvendor = 0xd470,
.subdevice = 0x9022,
.card = CX23885_BOARD_TEVII_S470,
@@ -1023,6 +1078,35 @@ static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
dev->name, tv.model);
}
+/* Some TBS cards require initing a chip using a bitbanged SPI attached
+ to the cx23885 gpio's. If this chip doesn't get init'ed the demod
+ doesn't respond to any command. */
+static void tbs_card_init(struct cx23885_dev *dev)
+{
+ int i;
+ const u8 buf[] = {
+ 0xe0, 0x06, 0x66, 0x33, 0x65,
+ 0x01, 0x17, 0x06, 0xde};
+
+ switch (dev->board) {
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
+ cx_set(GP0_IO, 0x00070007);
+ usleep_range(1000, 10000);
+ cx_clear(GP0_IO, 2);
+ usleep_range(1000, 10000);
+ for (i = 0; i < 9 * 8; i++) {
+ cx_clear(GP0_IO, 7);
+ usleep_range(1000, 10000);
+ cx_set(GP0_IO,
+ ((buf[i >> 3] >> (7 - (i & 7))) & 1) | 4);
+ usleep_range(1000, 10000);
+ }
+ cx_set(GP0_IO, 7);
+ break;
+ }
+}
+
int cx23885_tuner_callback(void *priv, int component, int command, int arg)
{
struct cx23885_tsport *port = priv;
@@ -1043,6 +1127,7 @@ int cx23885_tuner_callback(void *priv, int component, int command, int arg)
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
+ case CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
@@ -1208,6 +1293,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx_set(GP0_IO, 0x000f000f);
break;
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
+ case CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
@@ -1225,6 +1311,8 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx_set(GP0_IO, 0x00040004);
break;
case CX23885_BOARD_TBS_6920:
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
case CX23885_BOARD_PROF_8000:
cx_write(MC417_CTL, 0x00000036);
cx_write(MC417_OEN, 0x00001000);
@@ -1473,6 +1561,8 @@ int cx23885_ir_init(struct cx23885_dev *dev)
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_MYGICA_X8507:
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
if (!enable_885_ir)
break;
dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_AV_CORE);
@@ -1516,6 +1606,8 @@ void cx23885_ir_fini(struct cx23885_dev *dev)
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_MYGICA_X8507:
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
cx23885_irq_remove(dev, PCI_MSK_AV_CORE);
/* sd_ir is a duplicate pointer to the AV Core, just clear it */
dev->sd_ir = NULL;
@@ -1561,6 +1653,8 @@ void cx23885_ir_pci_int_enable(struct cx23885_dev *dev)
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_MYGICA_X8507:
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
if (dev->sd_ir)
cx23885_irq_add_enable(dev, PCI_MSK_AV_CORE);
break;
@@ -1676,6 +1770,16 @@ void cx23885_card_setup(struct cx23885_dev *dev)
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
+ ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
+ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
+ ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ tbs_card_init(dev);
+ break;
case CX23885_BOARD_MYGICA_X8506:
case CX23885_BOARD_MAGICPRO_PROHDTVE2:
case CX23885_BOARD_MYGICA_X8507:
@@ -1704,6 +1808,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1700:
case CX23885_BOARD_HAUPPAUGE_HVR1400:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
+ case CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_HAUPPAUGE_HVR1270:
@@ -1733,6 +1838,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
case CX23885_BOARD_HAUPPAUGE_HVR1700:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
+ case CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
@@ -1752,6 +1858,8 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_MYGICA_X8507:
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
case CX23885_BOARD_AVERMEDIA_HC81R:
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[2].i2c_adap,
"cx25840", 0x88 >> 1, NULL);
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 9f63d93239ec..edcd79db1e4e 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -2129,7 +2129,7 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
}
err = request_irq(pci_dev->irq, cx23885_irq,
- IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
+ IRQF_SHARED, dev->name, dev);
if (err < 0) {
printk(KERN_ERR "%s: can't get IRQ %d\n",
dev->name, pci_dev->irq);
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 971e4ff1b87f..05492053b473 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -51,6 +51,7 @@
#include "stv6110.h"
#include "lnbh24.h"
#include "cx24116.h"
+#include "cx24117.h"
#include "cimax2.h"
#include "lgs8gxx.h"
#include "netup-eeprom.h"
@@ -461,6 +462,10 @@ static struct cx24116_config tbs_cx24116_config = {
.demod_address = 0x55,
};
+static struct cx24117_config tbs_cx24117_config = {
+ .demod_address = 0x55,
+};
+
static struct ds3000_config tevii_ds3000_config = {
.demod_address = 0x68,
};
@@ -1044,6 +1049,25 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend->ops.set_voltage = f300_set_voltage;
break;
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
+ i2c_bus = &dev->i2c_bus[1];
+
+ switch (port->nr) {
+ /* PORT B */
+ case 1:
+ fe0->dvb.frontend = dvb_attach(cx24117_attach,
+ &tbs_cx24117_config,
+ &i2c_bus->i2c_adap);
+ break;
+ /* PORT C */
+ case 2:
+ fe0->dvb.frontend = dvb_attach(cx24117_attach,
+ &tbs_cx24117_config,
+ &i2c_bus->i2c_adap);
+ break;
+ }
+ break;
case CX23885_BOARD_TEVII_S470:
i2c_bus = &dev->i2c_bus[1];
diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c
index 7875dfbe09ff..8a49e7c9eddd 100644
--- a/drivers/media/pci/cx23885/cx23885-input.c
+++ b/drivers/media/pci/cx23885/cx23885-input.c
@@ -90,6 +90,8 @@ void cx23885_input_rx_work_handler(struct cx23885_dev *dev, u32 events)
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_MYGICA_X8507:
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
/*
* The only boards we handle right now. However other boards
* using the CX2388x integrated IR controller should be similar
@@ -168,6 +170,8 @@ static int cx23885_input_ir_start(struct cx23885_dev *dev)
break;
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
case CX23885_BOARD_TEVII_S470:
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
/*
* The IR controller on this board only returns pulse widths.
* Any other mode setting will fail to set up the device.
@@ -298,6 +302,14 @@ int cx23885_input_init(struct cx23885_dev *dev)
/* A guess at the remote */
rc_map = RC_MAP_TOTAL_MEDIA_IN_HAND_02;
break;
+ case CX23885_BOARD_TBS_6980:
+ case CX23885_BOARD_TBS_6981:
+ /* Integrated CX23885 IR controller */
+ driver_type = RC_DRIVER_IR_RAW;
+ allowed_protos = RC_BIT_ALL;
+ /* A guess at the remote */
+ rc_map = RC_MAP_TBS_NEC;
+ break;
default:
return -ENODEV;
}
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 161686832b20..7891f34157d1 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -1865,7 +1865,8 @@ int cx23885_video_register(struct cx23885_dev *dev)
v4l2_subdev_call(sd, tuner, s_type_addr, &tun_setup);
- if (dev->board == CX23885_BOARD_LEADTEK_WINFAST_PXTV1200) {
+ if ((dev->board == CX23885_BOARD_LEADTEK_WINFAST_PXTV1200) ||
+ (dev->board == CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200)) {
struct xc2028_ctrl ctrl = {
.fname = XC2028_DEFAULT_FIRMWARE,
.max_len = 64
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index 038caf53908b..0fa4048ab872 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -93,6 +93,9 @@
#define CX23885_BOARD_PROF_8000 37
#define CX23885_BOARD_HAUPPAUGE_HVR4400 38
#define CX23885_BOARD_AVERMEDIA_HC81R 39
+#define CX23885_BOARD_TBS_6981 40
+#define CX23885_BOARD_TBS_6980 41
+#define CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200 42
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
diff --git a/drivers/media/pci/cx25821/cx25821-cards.c b/drivers/media/pci/cx25821/cx25821-cards.c
index 3b409feb03d8..f2ebc989b303 100644
--- a/drivers/media/pci/cx25821/cx25821-cards.c
+++ b/drivers/media/pci/cx25821/cx25821-cards.c
@@ -45,5 +45,3 @@ struct cx25821_board cx25821_boards[] = {
},
};
-
-const unsigned int cx25821_bcount = ARRAY_SIZE(cx25821_boards);
diff --git a/drivers/media/pci/cx25821/cx25821-medusa-video.c b/drivers/media/pci/cx25821/cx25821-medusa-video.c
index 22fa04415ccc..43bdfa4dfba1 100644
--- a/drivers/media/pci/cx25821/cx25821-medusa-video.c
+++ b/drivers/media/pci/cx25821/cx25821-medusa-video.c
@@ -438,7 +438,7 @@ void medusa_set_resolution(struct cx25821_dev *dev, int width,
decoder_count = decoder_select + 1;
} else {
decoder = 0;
- decoder_count = _num_decoders;
+ decoder_count = dev->_max_num_decoders;
}
switch (width) {
@@ -506,8 +506,6 @@ static void medusa_set_decoderduration(struct cx25821_dev *dev, int decoder,
break;
}
- _display_field_cnt[decoder] = duration;
-
/* update hardware */
fld_cnt = cx25821_i2c_read(&dev->i2c_bus[0], disp_cnt_reg, &tmp);
@@ -667,8 +665,6 @@ int medusa_video_init(struct cx25821_dev *dev)
int ret_val = 0;
int i = 0;
- _num_decoders = dev->_max_num_decoders;
-
/* disable Auto source selection on all video decoders */
value = cx25821_i2c_read(&dev->i2c_bus[0], MON_A_CTRL, &tmp);
value &= 0xFFFFF0FF;
@@ -685,8 +681,14 @@ int medusa_video_init(struct cx25821_dev *dev)
if (ret_val < 0)
goto error;
- for (i = 0; i < _num_decoders; i++)
- medusa_set_decoderduration(dev, i, _display_field_cnt[i]);
+ /*
+ * FIXME: due to a coding bug the duration was always 0. It's
+ * likely that it really should be something else, but due to the
+ * lack of documentation I have no idea what it should be. For
+ * now just fill in 0 as the duration.
+ */
+ for (i = 0; i < dev->_max_num_decoders; i++)
+ medusa_set_decoderduration(dev, i, 0);
/* Select monitor as DENC A input, power up the DAC */
value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_AB_CTRL, &tmp);
@@ -717,7 +719,7 @@ int medusa_video_init(struct cx25821_dev *dev)
/* Turn on all of the data out and control output pins. */
value = cx25821_i2c_read(&dev->i2c_bus[0], PIN_OE_CTRL, &tmp);
value &= 0xFEF0FE00;
- if (_num_decoders == MAX_DECODERS) {
+ if (dev->_max_num_decoders == MAX_DECODERS) {
/*
* Note: The octal board does not support control pins(bit16-19)
* These bits are ignored in the octal board.
diff --git a/drivers/media/pci/cx25821/cx25821-medusa-video.h b/drivers/media/pci/cx25821/cx25821-medusa-video.h
index 6175e0961855..8bf602ff27b1 100644
--- a/drivers/media/pci/cx25821/cx25821-medusa-video.h
+++ b/drivers/media/pci/cx25821/cx25821-medusa-video.h
@@ -40,10 +40,4 @@
#define CONTRAST_DEFAULT 5000
#define HUE_DEFAULT 5000
-unsigned short _num_decoders;
-unsigned short _num_cameras;
-
-unsigned int _video_standard;
-int _display_field_cnt[MAX_DECODERS];
-
#endif
diff --git a/drivers/media/pci/cx25821/cx25821-video-upstream.c b/drivers/media/pci/cx25821/cx25821-video-upstream.c
index 88ffef410c50..1f43be0b04c8 100644
--- a/drivers/media/pci/cx25821/cx25821-video-upstream.c
+++ b/drivers/media/pci/cx25821/cx25821-video-upstream.c
@@ -159,10 +159,10 @@ static __le32 *cx25821_risc_field_upstream(struct cx25821_channel *chan, __le32
* For the upstream video channel, the risc engine will enable
* the FIFO. */
if (fifo_enable && line == 3) {
- *(rp++) = RISC_WRITECR;
- *(rp++) = sram_ch->dma_ctl;
- *(rp++) = FLD_VID_FIFO_EN;
- *(rp++) = 0x00000001;
+ *(rp++) = cpu_to_le32(RISC_WRITECR);
+ *(rp++) = cpu_to_le32(sram_ch->dma_ctl);
+ *(rp++) = cpu_to_le32(FLD_VID_FIFO_EN);
+ *(rp++) = cpu_to_le32(0x00000001);
}
}
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index aba5b1c649e6..400eb1c42d3f 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -834,7 +834,7 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
/* get irq */
err = request_irq(chip->pci->irq, cx8801_irq,
- IRQF_SHARED | IRQF_DISABLED, chip->core->name, chip);
+ IRQF_SHARED, chip->core->name, chip);
if (err < 0) {
dprintk(0, "%s: can't get IRQ %d\n",
chip->core->name, chip->pci->irq);
@@ -935,8 +935,6 @@ static void cx88_audio_finidev(struct pci_dev *pci)
snd_card_free((void *)card);
- pci_set_drvdata(pci, NULL);
-
devno--;
}
@@ -951,27 +949,4 @@ static struct pci_driver cx88_audio_pci_driver = {
.remove = cx88_audio_finidev,
};
-/****************************************************************************
- LINUX MODULE INIT
- ****************************************************************************/
-
-/*
- * module init
- */
-static int __init cx88_audio_init(void)
-{
- printk(KERN_INFO "cx2388x alsa driver version %s loaded\n",
- CX88_VERSION);
- return pci_register_driver(&cx88_audio_pci_driver);
-}
-
-/*
- * module remove
- */
-static void __exit cx88_audio_fini(void)
-{
- pci_unregister_driver(&cx88_audio_pci_driver);
-}
-
-module_init(cx88_audio_init);
-module_exit(cx88_audio_fini);
+module_pci_driver(cx88_audio_pci_driver);
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index 2d3507eb4897..74b7b8614c23 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -499,7 +499,7 @@ static int cx8802_init_common(struct cx8802_dev *dev)
/* get irq */
err = request_irq(dev->pci->irq, cx8802_irq,
- IRQF_SHARED | IRQF_DISABLED, dev->core->name, dev);
+ IRQF_SHARED, dev->core->name, dev);
if (err < 0) {
printk(KERN_ERR "%s: can't get IRQ %d\n",
dev->core->name, dev->pci->irq);
@@ -520,7 +520,6 @@ static void cx8802_fini_common(struct cx8802_dev *dev)
/* unregister stuff */
free_irq(dev->pci->irq, dev);
- pci_set_drvdata(dev->pci, NULL);
/* free memory */
btcx_riscmem_free(dev->pci,&dev->mpegq.stopper);
@@ -903,20 +902,8 @@ static struct pci_driver cx8802_pci_driver = {
.remove = cx8802_remove,
};
-static int __init cx8802_init(void)
-{
- printk(KERN_INFO "cx88/2: cx2388x MPEG-TS Driver Manager version %s loaded\n",
- CX88_VERSION);
- return pci_register_driver(&cx8802_pci_driver);
-}
-
-static void __exit cx8802_fini(void)
-{
- pci_unregister_driver(&cx8802_pci_driver);
-}
+module_pci_driver(cx8802_pci_driver);
-module_init(cx8802_init);
-module_exit(cx8802_fini);
EXPORT_SYMBOL(cx8802_buf_prepare);
EXPORT_SYMBOL(cx8802_buf_queue);
EXPORT_SYMBOL(cx8802_cancel_buffers);
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index ecf21d9f1f34..ed8cb9037b6f 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1738,7 +1738,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
/* get irq */
err = request_irq(pci_dev->irq, cx8800_irq,
- IRQF_SHARED | IRQF_DISABLED, core->name, dev);
+ IRQF_SHARED, core->name, dev);
if (err < 0) {
printk(KERN_ERR "%s/0: can't get IRQ %d\n",
core->name,pci_dev->irq);
@@ -1922,7 +1922,6 @@ static void cx8800_finidev(struct pci_dev *pci_dev)
free_irq(pci_dev->irq, dev);
cx8800_unregister_video(dev);
- pci_set_drvdata(pci_dev, NULL);
/* free memory */
btcx_riscmem_free(dev->pci,&dev->vidq.stopper);
@@ -2039,17 +2038,4 @@ static struct pci_driver cx8800_pci_driver = {
#endif
};
-static int __init cx8800_init(void)
-{
- printk(KERN_INFO "cx88/0: cx2388x v4l2 driver version %s loaded\n",
- CX88_VERSION);
- return pci_register_driver(&cx8800_pci_driver);
-}
-
-static void __exit cx8800_fini(void)
-{
- pci_unregister_driver(&cx8800_pci_driver);
-}
-
-module_init(cx8800_init);
-module_exit(cx8800_fini);
+module_pci_driver(cx8800_pci_driver);
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index 36e34522b9a8..9375f30d9a81 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -1544,7 +1544,7 @@ static void ddb_unmap(struct ddb *dev)
static void ddb_remove(struct pci_dev *pdev)
{
- struct ddb *dev = (struct ddb *) pci_get_drvdata(pdev);
+ struct ddb *dev = pci_get_drvdata(pdev);
ddb_ports_detach(dev);
ddb_i2c_release(dev);
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index ab797fe466d2..e60ac35fc10c 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -1178,7 +1178,6 @@ err_pci_release_regions:
err_pci_disable_device:
pci_disable_device(pdev);
err_kfree:
- pci_set_drvdata(pdev, NULL);
kfree(dev);
return ret;
}
@@ -1202,8 +1201,7 @@ static void dm1105_remove(struct pci_dev *pdev)
dvb_dmxdev_release(&dev->dmxdev);
dvb_dmx_release(dvbdemux);
dvb_unregister_adapter(dvb_adapter);
- if (&dev->i2c_adap)
- i2c_del_adapter(&dev->i2c_adap);
+ i2c_del_adapter(&dev->i2c_adap);
dm1105_hw_exit(dev);
synchronize_irq(pdev->irq);
@@ -1211,7 +1209,6 @@ static void dm1105_remove(struct pci_dev *pdev)
pci_iounmap(pdev, dev->io_mem);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
dm1105_devcount--;
kfree(dev);
}
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index c08ae3eb9554..802642d26643 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -1261,7 +1261,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
/* Register IRQ */
retval = request_irq(itv->pdev->irq, ivtv_irq_handler,
- IRQF_SHARED | IRQF_DISABLED, itv->v4l2_dev.name, (void *)itv);
+ IRQF_SHARED, itv->v4l2_dev.name, (void *)itv);
if (retval) {
IVTV_ERR("Failed to register irq %d\n", retval);
goto free_i2c;
diff --git a/drivers/media/pci/mantis/mantis_pci.c b/drivers/media/pci/mantis/mantis_pci.c
index a846036ea022..9e89e045213a 100644
--- a/drivers/media/pci/mantis/mantis_pci.c
+++ b/drivers/media/pci/mantis/mantis_pci.c
@@ -143,7 +143,6 @@ fail1:
fail0:
dprintk(MANTIS_ERROR, 1, "ERROR: <%d> exiting", ret);
- pci_set_drvdata(pdev, NULL);
return ret;
}
EXPORT_SYMBOL_GPL(mantis_pci_init);
@@ -161,7 +160,6 @@ void mantis_pci_exit(struct mantis_pci *mantis)
}
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
EXPORT_SYMBOL_GPL(mantis_pci_exit);
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index 2381b05432e6..54d5c821007c 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1698,7 +1698,7 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
meye.mchip_irq = pcidev->irq;
if (request_irq(meye.mchip_irq, meye_irq,
- IRQF_DISABLED | IRQF_SHARED, "meye", meye_irq)) {
+ IRQF_SHARED, "meye", meye_irq)) {
v4l2_err(v4l2_dev, "request_irq failed\n");
goto outreqirq;
}
diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
index 37ebc42392ad..970e83308525 100644
--- a/drivers/media/pci/ngene/ngene-core.c
+++ b/drivers/media/pci/ngene/ngene-core.c
@@ -1622,7 +1622,7 @@ static void ngene_unlink(struct ngene *dev)
void ngene_shutdown(struct pci_dev *pdev)
{
- struct ngene *dev = (struct ngene *)pci_get_drvdata(pdev);
+ struct ngene *dev = pci_get_drvdata(pdev);
if (!dev || !shutdown_workaround)
return;
@@ -1648,7 +1648,6 @@ void ngene_remove(struct pci_dev *pdev)
cxd_detach(dev);
ngene_stop(dev);
ngene_release_buffers(dev);
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
}
@@ -1702,6 +1701,5 @@ fail1:
ngene_release_buffers(dev);
fail0:
pci_disable_device(pci_dev);
- pci_set_drvdata(pci_dev, NULL);
return stat;
}
diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c
index 493828500055..8164d74b46a4 100644
--- a/drivers/media/pci/pluto2/pluto2.c
+++ b/drivers/media/pci/pluto2/pluto2.c
@@ -736,7 +736,6 @@ err_pci_release_regions:
err_pci_disable_device:
pci_disable_device(pdev);
err_kfree:
- pci_set_drvdata(pdev, NULL);
kfree(pluto);
goto out;
}
@@ -765,7 +764,6 @@ static void pluto2_remove(struct pci_dev *pdev)
pci_iounmap(pdev, pluto->io_mem);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
kfree(pluto);
}
diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c
index 75ce14229e03..db887b0c37b1 100644
--- a/drivers/media/pci/pt1/pt1.c
+++ b/drivers/media/pci/pt1/pt1.c
@@ -1076,7 +1076,6 @@ static void pt1_remove(struct pci_dev *pdev)
pt1_update_power(pt1);
pt1_cleanup_adapters(pt1);
i2c_del_adapter(&pt1->i2c_adap);
- pci_set_drvdata(pdev, NULL);
kfree(pt1);
pci_iounmap(pdev, regs);
pci_release_regions(pdev);
@@ -1198,7 +1197,6 @@ err_i2c_del_adapter:
err_pt1_cleanup_adapters:
pt1_cleanup_adapters(pt1);
err_kfree:
- pci_set_drvdata(pdev, NULL);
kfree(pt1);
err_pci_iounmap:
pci_iounmap(pdev, regs);
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index dbcdfbf8aed0..dd67c8a400cc 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -1096,7 +1096,7 @@ static int alsa_card_saa7134_create(struct saa7134_dev *dev, int devnum)
err = request_irq(dev->pci->irq, saa7134_alsa_irq,
- IRQF_SHARED | IRQF_DISABLED, dev->name,
+ IRQF_SHARED, dev->name,
(void*) &dev->dmasound);
if (err < 0) {
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index 45f0aca597ae..27d7ee709c58 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -992,7 +992,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
/* get irq */
err = request_irq(pci_dev->irq, saa7134_irq,
- IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
+ IRQF_SHARED, dev->name, dev);
if (err < 0) {
printk(KERN_ERR "%s: can't get IRQ %d\n",
dev->name,pci_dev->irq);
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index e12bbd8c3f0b..fb60da85bc2c 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1455,6 +1455,7 @@ static int video_release(struct file *file)
/* stop video capture */
if (res_check(fh, RESOURCE_VIDEO)) {
+ pm_qos_remove_request(&dev->qos_request);
videobuf_streamoff(&fh->cap);
res_free(dev,fh,RESOURCE_VIDEO);
}
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index d37ee37aaefe..57ef5456f1e8 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -1232,7 +1232,7 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
}
err = request_irq(pci_dev->irq, saa7164_irq,
- IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
+ IRQF_SHARED, dev->name, dev);
if (err < 0) {
printk(KERN_ERR "%s: can't get IRQ %d\n", dev->name,
pci_dev->irq);
@@ -1439,7 +1439,6 @@ static void saa7164_finidev(struct pci_dev *pci_dev)
/* unregister stuff */
free_irq(pci_dev->irq, dev);
- pci_set_drvdata(pci_dev, NULL);
mutex_lock(&devlist);
list_del(&dev->devlist);
diff --git a/drivers/media/pci/zoran/Kconfig b/drivers/media/pci/zoran/Kconfig
index 26ca8702e33f..39ec35bd21a5 100644
--- a/drivers/media/pci/zoran/Kconfig
+++ b/drivers/media/pci/zoran/Kconfig
@@ -1,6 +1,7 @@
config VIDEO_ZORAN
tristate "Zoran ZR36057/36067 Video For Linux"
depends on PCI && I2C_ALGOBIT && VIDEO_V4L2 && VIRT_TO_BUS
+ depends on !ALPHA
help
Say Y for support for MJPEG capture cards based on the Zoran
36057/36067 PCI controller chipset. This includes the Iomega
diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c
index 923d59a321f8..cec5b7553f28 100644
--- a/drivers/media/pci/zoran/zoran_card.c
+++ b/drivers/media/pci/zoran/zoran_card.c
@@ -1293,7 +1293,7 @@ static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
result = request_irq(zr->pci_dev->irq, zoran_irq,
- IRQF_SHARED | IRQF_DISABLED, ZR_DEVNAME(zr), zr);
+ IRQF_SHARED, ZR_DEVNAME(zr), zr);
if (result < 0) {
if (result == -EINVAL) {
dprintk(1,
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index c7caf94621b4..ce7ae1597549 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -112,7 +112,7 @@ config VIDEO_OMAP3_DEBUG
config VIDEO_S3C_CAMIF
tristate "Samsung S3C24XX/S3C64XX SoC Camera Interface driver"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
- depends on (PLAT_S3C64XX || PLAT_S3C24XX) && PM_RUNTIME
+ depends on (ARCH_S3C64XX || PLAT_S3C24XX) && PM_RUNTIME
select VIDEOBUF2_DMA_CONTIG
---help---
This is a v4l2 driver for s3c24xx and s3c64xx SoC series camera
@@ -143,6 +143,7 @@ if V4L_MEM2MEM_DRIVERS
config VIDEO_CODA
tristate "Chips&Media Coda multi-standard codec IP"
depends on VIDEO_DEV && VIDEO_V4L2 && ARCH_MXC
+ select SRAM
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
---help---
@@ -204,6 +205,7 @@ config VIDEO_SAMSUNG_EXYNOS_GSC
config VIDEO_SH_VEU
tristate "SuperH VEU mem2mem video processing driver"
depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
+ depends on !CRIS && !FRV
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
help
@@ -212,7 +214,8 @@ config VIDEO_SH_VEU
config VIDEO_RENESAS_VSP1
tristate "Renesas VSP1 Video Processing Engine"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
+ depends on !CRIS && !FRV
select VIDEOBUF2_DMA_CONTIG
---help---
This is a V4L2 driver for the Renesas VSP1 video processing engine.
@@ -220,6 +223,22 @@ config VIDEO_RENESAS_VSP1
To compile this driver as a module, choose M here: the module
will be called vsp1.
+config VIDEO_TI_VPE
+ tristate "TI VPE (Video Processing Engine) driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && SOC_DRA7XX
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ default n
+ ---help---
+ Support for the TI VPE(Video Processing Engine) block
+ found on DRA7XX SoC.
+
+config VIDEO_TI_VPE_DEBUG
+ bool "VPE debug messages"
+ depends on VIDEO_TI_VPE
+ ---help---
+ Enable debug messages on VPE driver.
+
endif # V4L_MEM2MEM_DRIVERS
menuconfig V4L_TEST_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 4e4da482c522..1348ba1faf92 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -22,6 +22,8 @@ obj-$(CONFIG_VIDEO_VIVI) += vivi.o
obj-$(CONFIG_VIDEO_MEM2MEM_TESTDEV) += mem2mem_testdev.o
+obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe/
+
obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o
obj-$(CONFIG_VIDEO_CODA) += coda.o
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index 449d2fec9e87..ed4998c224f8 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -39,7 +39,7 @@
#define CODA_NAME "coda"
-#define CODA_MAX_INSTANCES 4
+#define CODADX6_MAX_INSTANCES 4
#define CODA_FMO_BUF_SIZE 32
#define CODADX6_WORK_BUF_SIZE (288 * 1024 + CODA_FMO_BUF_SIZE * 8 * 1024)
@@ -54,8 +54,6 @@
#define CODA_MAX_FRAMEBUFFERS 8
-#define MAX_W 8192
-#define MAX_H 8192
#define CODA_MAX_FRAME_SIZE 0x100000
#define FMO_SLICE_SAVE_BUF_SIZE (32)
#define CODA_DEFAULT_GAMMA 4096
@@ -394,14 +392,57 @@ static struct coda_codec *coda_find_codec(struct coda_dev *dev, int src_fourcc,
return &codecs[k];
}
+static void coda_get_max_dimensions(struct coda_dev *dev,
+ struct coda_codec *codec,
+ int *max_w, int *max_h)
+{
+ struct coda_codec *codecs = dev->devtype->codecs;
+ int num_codecs = dev->devtype->num_codecs;
+ unsigned int w, h;
+ int k;
+
+ if (codec) {
+ w = codec->max_w;
+ h = codec->max_h;
+ } else {
+ for (k = 0, w = 0, h = 0; k < num_codecs; k++) {
+ w = max(w, codecs[k].max_w);
+ h = max(h, codecs[k].max_h);
+ }
+ }
+
+ if (max_w)
+ *max_w = w;
+ if (max_h)
+ *max_h = h;
+}
+
+static char *coda_product_name(int product)
+{
+ static char buf[9];
+
+ switch (product) {
+ case CODA_DX6:
+ return "CodaDx6";
+ case CODA_7541:
+ return "CODA7541";
+ default:
+ snprintf(buf, sizeof(buf), "(0x%04x)", product);
+ return buf;
+ }
+}
+
/*
* V4L2 ioctl() operations.
*/
-static int vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
+static int coda_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+
strlcpy(cap->driver, CODA_NAME, sizeof(cap->driver));
- strlcpy(cap->card, CODA_NAME, sizeof(cap->card));
+ strlcpy(cap->card, coda_product_name(ctx->dev->devtype->product),
+ sizeof(cap->card));
strlcpy(cap->bus_info, "platform:" CODA_NAME, sizeof(cap->bus_info));
/*
* This is only a mem-to-mem video device. The capture and output
@@ -457,6 +498,8 @@ static int enum_fmt(void *priv, struct v4l2_fmtdesc *f,
fmt = &formats[i];
strlcpy(f->description, fmt->name, sizeof(f->description));
f->pixelformat = fmt->fourcc;
+ if (!coda_format_is_yuv(fmt->fourcc))
+ f->flags |= V4L2_FMT_FLAG_COMPRESSED;
return 0;
}
@@ -464,8 +507,8 @@ static int enum_fmt(void *priv, struct v4l2_fmtdesc *f,
return -EINVAL;
}
-static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
+static int coda_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
struct vb2_queue *src_vq;
@@ -483,13 +526,14 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0);
}
-static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
+static int coda_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_OUTPUT, 0);
}
-static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+static int coda_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct vb2_queue *vq;
struct coda_q_data *q_data;
@@ -516,8 +560,11 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
return 0;
}
-static int vidioc_try_fmt(struct coda_codec *codec, struct v4l2_format *f)
+static int coda_try_fmt(struct coda_ctx *ctx, struct coda_codec *codec,
+ struct v4l2_format *f)
{
+ struct coda_dev *dev = ctx->dev;
+ struct coda_q_data *q_data;
unsigned int max_w, max_h;
enum v4l2_field field;
@@ -531,32 +578,48 @@ static int vidioc_try_fmt(struct coda_codec *codec, struct v4l2_format *f)
* if any of the dimensions is unsupported */
f->fmt.pix.field = field;
- if (codec) {
- max_w = codec->max_w;
- max_h = codec->max_h;
- } else {
- max_w = MAX_W;
- max_h = MAX_H;
+ coda_get_max_dimensions(dev, codec, &max_w, &max_h);
+ v4l_bound_align_image(&f->fmt.pix.width, MIN_W, max_w, W_ALIGN,
+ &f->fmt.pix.height, MIN_H, max_h, H_ALIGN,
+ S_ALIGN);
+
+ switch (f->fmt.pix.pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_H264:
+ case V4L2_PIX_FMT_MPEG4:
+ case V4L2_PIX_FMT_JPEG:
+ break;
+ default:
+ q_data = get_q_data(ctx, f->type);
+ f->fmt.pix.pixelformat = q_data->fourcc;
}
- v4l_bound_align_image(&f->fmt.pix.width, MIN_W, max_w,
- W_ALIGN, &f->fmt.pix.height,
- MIN_H, max_h, H_ALIGN, S_ALIGN);
- if (coda_format_is_yuv(f->fmt.pix.pixelformat)) {
+ switch (f->fmt.pix.pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
/* Frame stride must be multiple of 8 */
f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 8);
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
f->fmt.pix.height * 3 / 2;
- } else { /*encoded formats h.264/mpeg4 */
+ break;
+ case V4L2_PIX_FMT_H264:
+ case V4L2_PIX_FMT_MPEG4:
+ case V4L2_PIX_FMT_JPEG:
f->fmt.pix.bytesperline = 0;
f->fmt.pix.sizeimage = CODA_MAX_FRAME_SIZE;
+ break;
+ default:
+ BUG();
}
+ f->fmt.pix.priv = 0;
+
return 0;
}
-static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+static int coda_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
struct coda_codec *codec;
@@ -584,7 +647,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.colorspace = ctx->colorspace;
- ret = vidioc_try_fmt(codec, f);
+ ret = coda_try_fmt(ctx, codec, f);
if (ret < 0)
return ret;
@@ -600,8 +663,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
- struct v4l2_format *f)
+static int coda_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
struct coda_codec *codec;
@@ -613,10 +676,10 @@ static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
if (!f->fmt.pix.colorspace)
f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
- return vidioc_try_fmt(codec, f);
+ return coda_try_fmt(ctx, codec, f);
}
-static int vidioc_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f)
+static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f)
{
struct coda_q_data *q_data;
struct vb2_queue *vq;
@@ -646,61 +709,62 @@ static int vidioc_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f)
return 0;
}
-static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+static int coda_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
int ret;
- ret = vidioc_try_fmt_vid_cap(file, priv, f);
+ ret = coda_try_fmt_vid_cap(file, priv, f);
if (ret)
return ret;
- return vidioc_s_fmt(ctx, f);
+ return coda_s_fmt(ctx, f);
}
-static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
- struct v4l2_format *f)
+static int coda_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
int ret;
- ret = vidioc_try_fmt_vid_out(file, priv, f);
+ ret = coda_try_fmt_vid_out(file, priv, f);
if (ret)
return ret;
- ret = vidioc_s_fmt(ctx, f);
+ ret = coda_s_fmt(ctx, f);
if (ret)
ctx->colorspace = f->fmt.pix.colorspace;
return ret;
}
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *reqbufs)
+static int coda_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
}
-static int vidioc_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
+static int coda_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
}
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+static int coda_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
}
-static int vidioc_expbuf(struct file *file, void *priv,
- struct v4l2_exportbuffer *eb)
+static int coda_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
@@ -718,7 +782,8 @@ static bool coda_buf_is_end_of_stream(struct coda_ctx *ctx,
(buf->sequence == (ctx->qsequence - 1)));
}
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+static int coda_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
int ret;
@@ -738,24 +803,24 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
return ret;
}
-static int vidioc_create_bufs(struct file *file, void *priv,
- struct v4l2_create_buffers *create)
+static int coda_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *create)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
return v4l2_m2m_create_bufs(file, ctx->m2m_ctx, create);
}
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
+static int coda_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
}
-static int vidioc_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
+static int coda_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
int ret;
@@ -772,23 +837,34 @@ static int vidioc_streamoff(struct file *file, void *priv,
return ret;
}
-static int vidioc_decoder_cmd(struct file *file, void *fh,
- struct v4l2_decoder_cmd *dc)
+static int coda_try_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
{
- struct coda_ctx *ctx = fh_to_ctx(fh);
-
if (dc->cmd != V4L2_DEC_CMD_STOP)
return -EINVAL;
- if ((dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK) ||
- (dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY))
+ if (dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
return -EINVAL;
- if (dc->stop.pts != 0)
+ if (!(dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) && (dc->stop.pts != 0))
return -EINVAL;
+ return 0;
+}
+
+static int coda_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+ int ret;
+
+ ret = coda_try_decoder_cmd(file, fh, dc);
+ if (ret < 0)
+ return ret;
+
+ /* Ignore decoder stop command silently in encoder context */
if (ctx->inst_type != CODA_INST_DECODER)
- return -EINVAL;
+ return 0;
/* Set the strem-end flag on this context */
ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
@@ -796,8 +872,8 @@ static int vidioc_decoder_cmd(struct file *file, void *fh,
return 0;
}
-static int vidioc_subscribe_event(struct v4l2_fh *fh,
- const struct v4l2_event_subscription *sub)
+static int coda_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
{
switch (sub->type) {
case V4L2_EVENT_EOS:
@@ -808,32 +884,33 @@ static int vidioc_subscribe_event(struct v4l2_fh *fh,
}
static const struct v4l2_ioctl_ops coda_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
+ .vidioc_querycap = coda_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = coda_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = coda_g_fmt,
+ .vidioc_try_fmt_vid_cap = coda_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = coda_s_fmt_vid_cap,
- .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
- .vidioc_g_fmt_vid_out = vidioc_g_fmt,
- .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
- .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+ .vidioc_enum_fmt_vid_out = coda_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = coda_g_fmt,
+ .vidioc_try_fmt_vid_out = coda_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = coda_s_fmt_vid_out,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_reqbufs = coda_reqbufs,
+ .vidioc_querybuf = coda_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_expbuf = vidioc_expbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
- .vidioc_create_bufs = vidioc_create_bufs,
+ .vidioc_qbuf = coda_qbuf,
+ .vidioc_expbuf = coda_expbuf,
+ .vidioc_dqbuf = coda_dqbuf,
+ .vidioc_create_bufs = coda_create_bufs,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_streamon = coda_streamon,
+ .vidioc_streamoff = coda_streamoff,
- .vidioc_decoder_cmd = vidioc_decoder_cmd,
+ .vidioc_try_decoder_cmd = coda_try_decoder_cmd,
+ .vidioc_decoder_cmd = coda_decoder_cmd,
- .vidioc_subscribe_event = vidioc_subscribe_event,
+ .vidioc_subscribe_event = coda_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
@@ -1928,8 +2005,9 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
if (!(ctx->streamon_out & ctx->streamon_cap))
return 0;
- /* Allow device_run with no buffers queued and after streamoff */
- v4l2_m2m_set_src_buffered(ctx->m2m_ctx, true);
+ /* Allow decoder device_run with no new buffers queued */
+ if (ctx->inst_type == CODA_INST_DECODER)
+ v4l2_m2m_set_src_buffered(ctx->m2m_ctx, true);
ctx->gopcounter = ctx->params.gop_size - 1;
buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
@@ -2071,10 +2149,8 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
coda_setup_iram(ctx);
if (dst_fourcc == V4L2_PIX_FMT_H264) {
- value = (FMO_SLICE_SAVE_BUF_SIZE << 7);
- value |= (0 & CODA_FMOPARAM_TYPE_MASK) << CODA_FMOPARAM_TYPE_OFFSET;
- value |= 0 & CODA_FMOPARAM_SLICENUM_MASK;
if (dev->devtype->product == CODA_DX6) {
+ value = FMO_SLICE_SAVE_BUF_SIZE << 7;
coda_write(dev, value, CODADX6_CMD_ENC_SEQ_FMO);
} else {
coda_write(dev, ctx->iram_info.search_ram_paddr,
@@ -2371,7 +2447,13 @@ static int coda_queue_init(void *priv, struct vb2_queue *src_vq,
static int coda_next_free_instance(struct coda_dev *dev)
{
- return ffz(dev->instance_mask);
+ int idx = ffz(dev->instance_mask);
+
+ if ((idx < 0) ||
+ (dev->devtype->product == CODA_DX6 && idx > CODADX6_MAX_INSTANCES))
+ return -EBUSY;
+
+ return idx;
}
static int coda_open(struct file *file)
@@ -2386,8 +2468,8 @@ static int coda_open(struct file *file)
return -ENOMEM;
idx = coda_next_free_instance(dev);
- if (idx >= CODA_MAX_INSTANCES) {
- ret = -EBUSY;
+ if (idx < 0) {
+ ret = idx;
goto err_coda_max;
}
set_bit(idx, &dev->instance_mask);
@@ -2719,7 +2801,6 @@ static void coda_finish_encode(struct coda_ctx *ctx)
dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
/* Get results from the coda */
- coda_read(dev, CODA_RET_ENC_PIC_TYPE);
start_ptr = coda_read(dev, CODA_CMD_ENC_PIC_BB_START);
wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
@@ -2739,7 +2820,7 @@ static void coda_finish_encode(struct coda_ctx *ctx)
coda_read(dev, CODA_RET_ENC_PIC_SLICE_NUM);
coda_read(dev, CODA_RET_ENC_PIC_FLAG);
- if (src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) {
+ if (coda_read(dev, CODA_RET_ENC_PIC_TYPE) == 0) {
dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME;
} else {
@@ -2861,21 +2942,6 @@ static bool coda_firmware_supported(u32 vernum)
return false;
}
-static char *coda_product_name(int product)
-{
- static char buf[9];
-
- switch (product) {
- case CODA_DX6:
- return "CodaDx6";
- case CODA_7541:
- return "CODA7541";
- default:
- snprintf(buf, sizeof(buf), "(0x%04x)", product);
- return buf;
- }
-}
-
static int coda_hw_init(struct coda_dev *dev)
{
u16 product, major, minor, release;
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index 04609cc6eba7..eac472b5ae83 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -1785,7 +1785,7 @@ static int vpbe_display_probe(struct platform_device *pdev)
}
irq = res->start;
- err = devm_request_irq(&pdev->dev, irq, venc_isr, IRQF_DISABLED,
+ err = devm_request_irq(&pdev->dev, irq, venc_isr, 0,
VPBE_DISPLAY_DRIVER, disp_dev);
if (err) {
v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 93609091cb23..d762246eabf5 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -688,7 +688,7 @@ static int vpfe_attach_irq(struct vpfe_device *vpfe_dev)
frame_format = ccdc_dev->hw_ops.get_frame_format();
if (frame_format == CCDC_FRMFMT_PROGRESSIVE) {
return request_irq(vpfe_dev->ccdc_irq1, vdint1_isr,
- IRQF_DISABLED, "vpfe_capture1",
+ 0, "vpfe_capture1",
vpfe_dev);
}
return 0;
@@ -1863,7 +1863,7 @@ static int vpfe_probe(struct platform_device *pdev)
}
vpfe_dev->ccdc_irq1 = res1->start;
- ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, IRQF_DISABLED,
+ ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, 0,
"vpfe_capture0", vpfe_dev);
if (0 != ret) {
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 1089834a4efe..52ac5e6c8625 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -2154,7 +2154,7 @@ static __init int vpif_probe(struct platform_device *pdev)
if (!vpif_obj.sd[i]) {
vpif_err("Error registering v4l2 subdevice\n");
- err = -ENOMEM;
+ err = -ENODEV;
goto probe_subdev_out;
}
v4l2_info(&vpif_obj.v4l2_dev,
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index 76435d3bf62d..ef0a6564cef9 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -45,6 +45,7 @@
#define GSC_DST_FMT (1 << 2)
#define GSC_CTX_M2M (1 << 3)
#define GSC_CTX_STOP_REQ (1 << 6)
+#define GSC_CTX_ABORT (1 << 7)
enum gsc_dev_flags {
/* for global */
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index e576ff2de3de..810c3e13970c 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -46,6 +46,17 @@ static int gsc_m2m_ctx_stop_req(struct gsc_ctx *ctx)
return ret == 0 ? -ETIMEDOUT : ret;
}
+static void __gsc_m2m_job_abort(struct gsc_ctx *ctx)
+{
+ int ret;
+
+ ret = gsc_m2m_ctx_stop_req(ctx);
+ if ((ret == -ETIMEDOUT) || (ctx->state & GSC_CTX_ABORT)) {
+ gsc_ctx_state_lock_clear(GSC_CTX_STOP_REQ | GSC_CTX_ABORT, ctx);
+ gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+ }
+}
+
static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct gsc_ctx *ctx = q->drv_priv;
@@ -58,11 +69,8 @@ static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
static int gsc_m2m_stop_streaming(struct vb2_queue *q)
{
struct gsc_ctx *ctx = q->drv_priv;
- int ret;
- ret = gsc_m2m_ctx_stop_req(ctx);
- if (ret == -ETIMEDOUT)
- gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+ __gsc_m2m_job_abort(ctx);
pm_runtime_put(&ctx->gsc_dev->pdev->dev);
@@ -91,15 +99,9 @@ void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
}
}
-
static void gsc_m2m_job_abort(void *priv)
{
- struct gsc_ctx *ctx = priv;
- int ret;
-
- ret = gsc_m2m_ctx_stop_req(ctx);
- if (ret == -ETIMEDOUT)
- gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+ __gsc_m2m_job_abort((struct gsc_ctx *)priv);
}
static int gsc_get_bufs(struct gsc_ctx *ctx)
@@ -150,9 +152,10 @@ static void gsc_m2m_device_run(void *priv)
gsc->m2m.ctx = ctx;
}
- is_set = (ctx->state & GSC_CTX_STOP_REQ) ? 1 : 0;
- ctx->state &= ~GSC_CTX_STOP_REQ;
+ is_set = ctx->state & GSC_CTX_STOP_REQ;
if (is_set) {
+ ctx->state &= ~GSC_CTX_STOP_REQ;
+ ctx->state |= GSC_CTX_ABORT;
wake_up(&gsc->irq_queue);
goto put_device;
}
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
index d2e6cba3566d..f3c6136aa5b4 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -511,7 +511,7 @@ static int __ctrl_set_metering(struct fimc_is *is, unsigned int value)
break;
default:
return -EINVAL;
- };
+ }
__is_set_isp_metering(is, IS_METERING_CONFIG_CMD, val);
return 0;
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index a83511278317..7a4ee4c0449d 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -411,8 +411,8 @@ static int fimc_md_of_add_sensor(struct fimc_md *fmd,
device_lock(&client->dev);
- if (!client->driver ||
- !try_module_get(client->driver->driver.owner)) {
+ if (!client->dev.driver ||
+ !try_module_get(client->dev.driver->owner)) {
ret = -EPROBE_DEFER;
v4l2_info(&fmd->v4l2_dev, "No driver found for %s\n",
node->full_name);
@@ -442,7 +442,7 @@ static int fimc_md_of_add_sensor(struct fimc_md *fmd,
fmd->num_sensors++;
mod_put:
- module_put(client->driver->driver.owner);
+ module_put(client->dev.driver->owner);
dev_put:
device_unlock(&client->dev);
put_device(&client->dev);
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index fe9898ca3c84..6a232239ee8c 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -21,6 +21,8 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <media/v4l2-common.h>
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 540516ca872c..36513e896413 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -1084,8 +1084,7 @@ free_dev:
static int deinterlace_remove(struct platform_device *pdev)
{
- struct deinterlace_dev *pcdev =
- (struct deinterlace_dev *)platform_get_drvdata(pdev);
+ struct deinterlace_dev *pcdev = platform_get_drvdata(pdev);
v4l2_info(&pcdev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME);
v4l2_m2m_release(pcdev->m2m_dev);
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index 5184887b155c..32fab30a9105 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -1221,16 +1221,16 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
{
struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
- struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0);
+ struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
struct mcam_dma_desc *desc = mvb->dma_desc;
struct scatterlist *sg;
int i;
- mvb->dma_desc_nent = dma_map_sg(cam->dev, sgd->sglist, sgd->num_pages,
- DMA_FROM_DEVICE);
+ mvb->dma_desc_nent = dma_map_sg(cam->dev, sg_table->sgl,
+ sg_table->nents, DMA_FROM_DEVICE);
if (mvb->dma_desc_nent <= 0)
return -EIO; /* Not sure what's right here */
- for_each_sg(sgd->sglist, sg, mvb->dma_desc_nent, i) {
+ for_each_sg(sg_table->sgl, sg, mvb->dma_desc_nent, i) {
desc->dma_addr = sg_dma_address(sg);
desc->segment_len = sg_dma_len(sg);
desc++;
@@ -1241,9 +1241,11 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
static int mcam_vb_sg_buf_finish(struct vb2_buffer *vb)
{
struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
- struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0);
+ struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
- dma_unmap_sg(cam->dev, sgd->sglist, sgd->num_pages, DMA_FROM_DEVICE);
+ if (sg_table)
+ dma_unmap_sg(cam->dev, sg_table->sgl,
+ sg_table->nents, DMA_FROM_DEVICE);
return 0;
}
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
index b5a19af5c587..3458fa0e2fd5 100644
--- a/drivers/media/platform/marvell-ccic/mmp-driver.c
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -481,7 +481,6 @@ static int mmpcam_remove(struct mmp_camera *cam)
struct mmp_camera_platform_data *pdata;
mmpcam_remove_device(cam);
- free_irq(cam->irq, mcam);
mccic_shutdown(mcam);
mmpcam_power_down(mcam);
pdata = cam->pdev->dev.platform_data;
diff --git a/drivers/media/platform/mem2mem_testdev.c b/drivers/media/platform/mem2mem_testdev.c
index 6a17676f9d72..8df5975b700a 100644
--- a/drivers/media/platform/mem2mem_testdev.c
+++ b/drivers/media/platform/mem2mem_testdev.c
@@ -1090,8 +1090,7 @@ unreg_dev:
static int m2mtest_remove(struct platform_device *pdev)
{
- struct m2mtest_dev *dev =
- (struct m2mtest_dev *)platform_get_drvdata(pdev);
+ struct m2mtest_dev *dev = platform_get_drvdata(pdev);
v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME);
v4l2_m2m_release(dev->m2m_dev);
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index df3a0ec7fd2c..1c3608039663 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -2182,9 +2182,9 @@ static int isp_probe(struct platform_device *pdev)
isp->pdata = pdata;
isp->ref_count = 0;
- isp->raw_dmamask = DMA_BIT_MASK(32);
- isp->dev->dma_mask = &isp->raw_dmamask;
- isp->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(isp->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, isp);
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index cd3eff45ae7d..ce65d3ae1aa7 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -152,7 +152,6 @@ struct isp_xclk {
* @mmio_base_phys: Array with physical L4 bus addresses for ISP register
* regions.
* @mmio_size: Array with ISP register regions size in bytes.
- * @raw_dmamask: Raw DMA mask
* @stat_lock: Spinlock for handling statistics
* @isp_mutex: Mutex for serializing requests to ISP.
* @crashed: Bitmask of crashed entities (indexed by entity ID)
@@ -190,8 +189,6 @@ struct isp_device {
unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST];
resource_size_t mmio_size[OMAP3_ISP_IOMEM_LAST];
- u64 raw_dmamask;
-
/* ISP Obj */
spinlock_t stat_lock; /* common lock for statistic drivers */
struct mutex isp_mutex; /* For handling ref_count field */
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index fd6289d60cde..0b2948376aee 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -840,7 +840,7 @@ put_clk:
static int g2d_remove(struct platform_device *pdev)
{
- struct g2d_dev *dev = (struct g2d_dev *)platform_get_drvdata(pdev);
+ struct g2d_dev *dev = platform_get_drvdata(pdev);
v4l2_info(&dev->v4l2_dev, "Removing " G2D_NAME);
v4l2_m2m_release(dev->m2m_dev);
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 15d23968d1de..9b88a4601007 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1423,6 +1423,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
jpeg->vfd_decoder->release = video_device_release;
jpeg->vfd_decoder->lock = &jpeg->lock;
jpeg->vfd_decoder->v4l2_dev = &jpeg->v4l2_dev;
+ jpeg->vfd_decoder->vfl_dir = VFL_DIR_M2M;
ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1);
if (ret) {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 084263dd126f..5f2c4ad6c2cb 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -404,7 +404,11 @@ leave_handle_frame:
if (test_and_clear_bit(0, &dev->hw_lock) == 0)
BUG();
s5p_mfc_clock_off();
- s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ /* if suspending, wake up device and do not try_run again*/
+ if (test_bit(0, &dev->enter_suspend))
+ wake_up_dev(dev, reason, err);
+ else
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
}
/* Error handling for interrupt */
@@ -1101,7 +1105,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
}
dev->irq = res->start;
ret = devm_request_irq(&pdev->dev, dev->irq, s5p_mfc_irq,
- IRQF_DISABLED, pdev->name, dev);
+ 0, pdev->name, dev);
if (ret) {
dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret);
goto err_res;
@@ -1286,9 +1290,7 @@ static int s5p_mfc_suspend(struct device *dev)
/* Try and lock the HW */
/* Wait on the interrupt waitqueue */
ret = wait_event_interruptible_timeout(m_dev->queue,
- m_dev->int_cond || m_dev->ctx[m_dev->curr_ctx]->int_cond,
- msecs_to_jiffies(MFC_INT_TIMEOUT));
-
+ m_dev->int_cond, msecs_to_jiffies(MFC_INT_TIMEOUT));
if (ret == 0) {
mfc_err("Waiting for hardware to finish timed out\n");
return -EIO;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
index ad4f1df0a18e..9a6efd6c1329 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
@@ -111,7 +111,7 @@ static int s5p_mfc_open_inst_cmd_v5(struct s5p_mfc_ctx *ctx)
break;
default:
h2r_args.arg[0] = S5P_FIMV_CODEC_NONE;
- };
+ }
h2r_args.arg[1] = 0; /* no crc & no pixelcache */
h2r_args.arg[2] = ctx->ctx.ofs;
h2r_args.arg[3] = ctx->ctx.size;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
index db796c8e7874..ec1a5947ed7d 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
@@ -113,7 +113,7 @@ static int s5p_mfc_open_inst_cmd_v6(struct s5p_mfc_ctx *ctx)
break;
default:
codec_type = S5P_FIMV_CODEC_NONE_V6;
- };
+ }
mfc_write(dev, codec_type, S5P_FIMV_CODEC_TYPE_V6);
mfc_write(dev, ctx->ctx.dma, S5P_FIMV_CONTEXT_MEM_ADDR_V6);
mfc_write(dev, ctx->ctx.size, S5P_FIMV_CONTEXT_MEM_SIZE_V6);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 41f5a3c10dbd..4ff3b6cd6842 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -113,7 +113,7 @@ static struct mfc_control controls[] = {
.minimum = 0,
.maximum = (1 << 16) - 1,
.step = 1,
- .default_value = 0,
+ .default_value = 12,
},
{
.id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
@@ -356,7 +356,7 @@ static struct mfc_control controls[] = {
.minimum = 0,
.maximum = 51,
.step = 1,
- .default_value = 1,
+ .default_value = 51,
},
{
.id = V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP,
@@ -399,7 +399,7 @@ static struct mfc_control controls[] = {
.minimum = 1,
.maximum = 31,
.step = 1,
- .default_value = 1,
+ .default_value = 31,
},
{
.id = V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP,
@@ -444,7 +444,7 @@ static struct mfc_control controls[] = {
.minimum = 0,
.maximum = 51,
.step = 1,
- .default_value = 1,
+ .default_value = 51,
},
{
.id = V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index 368582b091bf..58ec7bb26ebc 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -1582,7 +1582,7 @@ static int s5p_mfc_get_int_reason_v5(struct s5p_mfc_dev *dev)
break;
default:
reason = S5P_MFC_R2H_CMD_EMPTY;
- };
+ }
return reason;
}
diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
index b93a21f5aa13..74344c764daa 100644
--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
@@ -226,7 +226,7 @@ static void mxr_graph_fix_geometry(struct mxr_layer *layer,
src->width + src->x_offset, 32767);
src->full_height = clamp_val(src->full_height,
src->height + src->y_offset, 2047);
- };
+ }
}
/* PUBLIC API */
diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
index 3d13a636877b..c9388c45ad75 100644
--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
@@ -197,7 +197,7 @@ static void mxr_vp_fix_geometry(struct mxr_layer *layer,
ALIGN(src->width + src->x_offset, 8), 8192U);
src->full_height = clamp(src->full_height,
src->height + src->y_offset, 8192U);
- };
+ }
}
/* PUBLIC API */
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index 7a9c5e9329f2..4f30341dc2ab 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -776,7 +776,7 @@ static int sh_vou_try_fmt_vid_out(struct file *file, void *priv,
v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 1,
&pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
- for (i = 0; ARRAY_SIZE(vou_fmt); i++)
+ for (i = 0; i < ARRAY_SIZE(vou_fmt); i++)
if (vou_fmt[i].pfmt == pix->pixelformat)
return 0;
diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig
index af39c4665554..df11f69aeba5 100644
--- a/drivers/media/platform/soc_camera/Kconfig
+++ b/drivers/media/platform/soc_camera/Kconfig
@@ -47,6 +47,7 @@ config VIDEO_PXA27x
config VIDEO_RCAR_VIN
tristate "R-Car Video Input (VIN) support"
depends on VIDEO_DEV && SOC_CAMERA
+ depends on !CRIS && !FRV
select VIDEOBUF2_DMA_CONTIG
select SOC_CAMERA_SCALE_CROP
---help---
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c
index 8f9f6211c52e..f975b7008692 100644
--- a/drivers/media/platform/soc_camera/mx3_camera.c
+++ b/drivers/media/platform/soc_camera/mx3_camera.c
@@ -266,7 +266,6 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
struct idmac_video_param *video = &ichan->params.video;
const struct soc_mbus_pixelfmt *host_fmt = icd->current_fmt->host_fmt;
- unsigned long flags;
dma_cookie_t cookie;
size_t new_size;
@@ -328,7 +327,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0));
#endif
- spin_lock_irqsave(&mx3_cam->lock, flags);
+ spin_lock_irq(&mx3_cam->lock);
list_add_tail(&buf->queue, &mx3_cam->capture);
if (!mx3_cam->active)
@@ -351,7 +350,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
if (mx3_cam->active == buf)
mx3_cam->active = NULL;
- spin_unlock_irqrestore(&mx3_cam->lock, flags);
+ spin_unlock_irq(&mx3_cam->lock);
error:
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
}
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index d02a7e0b773f..b21f777f55e7 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -105,6 +105,7 @@
#define VIN_MAX_HEIGHT 2048
enum chip_id {
+ RCAR_H2,
RCAR_H1,
RCAR_M1,
RCAR_E1,
@@ -300,7 +301,8 @@ static int rcar_vin_setup(struct rcar_vin_priv *priv)
dmr = 0;
break;
case V4L2_PIX_FMT_RGB32:
- if (priv->chip == RCAR_H1 || priv->chip == RCAR_E1) {
+ if (priv->chip == RCAR_H2 || priv->chip == RCAR_H1 ||
+ priv->chip == RCAR_E1) {
dmr = VNDMR_EXRGB;
break;
}
@@ -1381,6 +1383,7 @@ static struct soc_camera_host_ops rcar_vin_host_ops = {
};
static struct platform_device_id rcar_vin_id_table[] = {
+ { "r8a7790-vin", RCAR_H2 },
{ "r8a7779-vin", RCAR_H1 },
{ "r8a7778-vin", RCAR_M1 },
{ "uPD35004-vin", RCAR_E1 },
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 8df22f779175..150bd4df413c 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -1800,7 +1800,7 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
/* request irq */
err = devm_request_irq(&pdev->dev, pcdev->irq, sh_mobile_ceu_irq,
- IRQF_DISABLED, dev_name(&pdev->dev), pcdev);
+ 0, dev_name(&pdev->dev), pcdev);
if (err) {
dev_err(&pdev->dev, "Unable to register CEU interrupt.\n");
goto exit_release_mem;
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 387a232d95a4..4b8c024fc487 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -71,13 +71,23 @@ static int video_dev_create(struct soc_camera_device *icd);
int soc_camera_power_on(struct device *dev, struct soc_camera_subdev_desc *ssdd,
struct v4l2_clk *clk)
{
- int ret = clk ? v4l2_clk_enable(clk) : 0;
- if (ret < 0) {
- dev_err(dev, "Cannot enable clock: %d\n", ret);
- return ret;
+ int ret;
+ bool clock_toggle;
+
+ if (clk && (!ssdd->unbalanced_power ||
+ !test_and_set_bit(0, &ssdd->clock_state))) {
+ ret = v4l2_clk_enable(clk);
+ if (ret < 0) {
+ dev_err(dev, "Cannot enable clock: %d\n", ret);
+ return ret;
+ }
+ clock_toggle = true;
+ } else {
+ clock_toggle = false;
}
- ret = regulator_bulk_enable(ssdd->num_regulators,
- ssdd->regulators);
+
+ ret = regulator_bulk_enable(ssdd->sd_pdata.num_regulators,
+ ssdd->sd_pdata.regulators);
if (ret < 0) {
dev_err(dev, "Cannot enable regulators\n");
goto eregenable;
@@ -95,10 +105,10 @@ int soc_camera_power_on(struct device *dev, struct soc_camera_subdev_desc *ssdd,
return 0;
epwron:
- regulator_bulk_disable(ssdd->num_regulators,
- ssdd->regulators);
+ regulator_bulk_disable(ssdd->sd_pdata.num_regulators,
+ ssdd->sd_pdata.regulators);
eregenable:
- if (clk)
+ if (clock_toggle)
v4l2_clk_disable(clk);
return ret;
@@ -120,14 +130,14 @@ int soc_camera_power_off(struct device *dev, struct soc_camera_subdev_desc *ssdd
}
}
- err = regulator_bulk_disable(ssdd->num_regulators,
- ssdd->regulators);
+ err = regulator_bulk_disable(ssdd->sd_pdata.num_regulators,
+ ssdd->sd_pdata.regulators);
if (err < 0) {
dev_err(dev, "Cannot disable regulators\n");
ret = ret ? : err;
}
- if (clk)
+ if (clk && (!ssdd->unbalanced_power || test_and_clear_bit(0, &ssdd->clock_state)))
v4l2_clk_disable(clk);
return ret;
@@ -137,8 +147,8 @@ EXPORT_SYMBOL(soc_camera_power_off);
int soc_camera_power_init(struct device *dev, struct soc_camera_subdev_desc *ssdd)
{
/* Should not have any effect in synchronous case */
- return devm_regulator_bulk_get(dev, ssdd->num_regulators,
- ssdd->regulators);
+ return devm_regulator_bulk_get(dev, ssdd->sd_pdata.num_regulators,
+ ssdd->sd_pdata.regulators);
}
EXPORT_SYMBOL(soc_camera_power_init);
@@ -1346,8 +1356,8 @@ static int soc_camera_i2c_init(struct soc_camera_device *icd,
* soc_camera_pdrv_probe(), make sure the subdevice driver doesn't try
* to allocate them again.
*/
- ssdd->num_regulators = 0;
- ssdd->regulators = NULL;
+ ssdd->sd_pdata.num_regulators = 0;
+ ssdd->sd_pdata.regulators = NULL;
shd->board_info->platform_data = ssdd;
snprintf(clk_name, sizeof(clk_name), "%d-%04x",
@@ -2020,8 +2030,8 @@ static int soc_camera_pdrv_probe(struct platform_device *pdev)
* that case regulators are attached to the I2C device and not to the
* camera platform device.
*/
- ret = devm_regulator_bulk_get(&pdev->dev, ssdd->num_regulators,
- ssdd->regulators);
+ ret = devm_regulator_bulk_get(&pdev->dev, ssdd->sd_pdata.num_regulators,
+ ssdd->sd_pdata.regulators);
if (ret < 0)
return ret;
diff --git a/drivers/media/platform/ti-vpe/Makefile b/drivers/media/platform/ti-vpe/Makefile
new file mode 100644
index 000000000000..cbf0a806ba1d
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o
+
+ti-vpe-y := vpe.o vpdma.o
+
+ccflags-$(CONFIG_VIDEO_TI_VPE_DEBUG) += -DDEBUG
diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c
new file mode 100644
index 000000000000..af0a5ffcaa98
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpdma.c
@@ -0,0 +1,846 @@
+/*
+ * VPDMA helper library
+ *
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include "vpdma.h"
+#include "vpdma_priv.h"
+
+#define VPDMA_FIRMWARE "vpdma-1b8.bin"
+
+const struct vpdma_data_format vpdma_yuv_fmts[] = {
+ [VPDMA_DATA_FMT_Y444] = {
+ .data_type = DATA_TYPE_Y444,
+ .depth = 8,
+ },
+ [VPDMA_DATA_FMT_Y422] = {
+ .data_type = DATA_TYPE_Y422,
+ .depth = 8,
+ },
+ [VPDMA_DATA_FMT_Y420] = {
+ .data_type = DATA_TYPE_Y420,
+ .depth = 8,
+ },
+ [VPDMA_DATA_FMT_C444] = {
+ .data_type = DATA_TYPE_C444,
+ .depth = 8,
+ },
+ [VPDMA_DATA_FMT_C422] = {
+ .data_type = DATA_TYPE_C422,
+ .depth = 8,
+ },
+ [VPDMA_DATA_FMT_C420] = {
+ .data_type = DATA_TYPE_C420,
+ .depth = 4,
+ },
+ [VPDMA_DATA_FMT_YC422] = {
+ .data_type = DATA_TYPE_YC422,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_YC444] = {
+ .data_type = DATA_TYPE_YC444,
+ .depth = 24,
+ },
+ [VPDMA_DATA_FMT_CY422] = {
+ .data_type = DATA_TYPE_CY422,
+ .depth = 16,
+ },
+};
+
+const struct vpdma_data_format vpdma_rgb_fmts[] = {
+ [VPDMA_DATA_FMT_RGB565] = {
+ .data_type = DATA_TYPE_RGB16_565,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_ARGB16_1555] = {
+ .data_type = DATA_TYPE_ARGB_1555,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_ARGB16] = {
+ .data_type = DATA_TYPE_ARGB_4444,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_RGBA16_5551] = {
+ .data_type = DATA_TYPE_RGBA_5551,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_RGBA16] = {
+ .data_type = DATA_TYPE_RGBA_4444,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_ARGB24] = {
+ .data_type = DATA_TYPE_ARGB24_6666,
+ .depth = 24,
+ },
+ [VPDMA_DATA_FMT_RGB24] = {
+ .data_type = DATA_TYPE_RGB24_888,
+ .depth = 24,
+ },
+ [VPDMA_DATA_FMT_ARGB32] = {
+ .data_type = DATA_TYPE_ARGB32_8888,
+ .depth = 32,
+ },
+ [VPDMA_DATA_FMT_RGBA24] = {
+ .data_type = DATA_TYPE_RGBA24_6666,
+ .depth = 24,
+ },
+ [VPDMA_DATA_FMT_RGBA32] = {
+ .data_type = DATA_TYPE_RGBA32_8888,
+ .depth = 32,
+ },
+ [VPDMA_DATA_FMT_BGR565] = {
+ .data_type = DATA_TYPE_BGR16_565,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_ABGR16_1555] = {
+ .data_type = DATA_TYPE_ABGR_1555,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_ABGR16] = {
+ .data_type = DATA_TYPE_ABGR_4444,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_BGRA16_5551] = {
+ .data_type = DATA_TYPE_BGRA_5551,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_BGRA16] = {
+ .data_type = DATA_TYPE_BGRA_4444,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_ABGR24] = {
+ .data_type = DATA_TYPE_ABGR24_6666,
+ .depth = 24,
+ },
+ [VPDMA_DATA_FMT_BGR24] = {
+ .data_type = DATA_TYPE_BGR24_888,
+ .depth = 24,
+ },
+ [VPDMA_DATA_FMT_ABGR32] = {
+ .data_type = DATA_TYPE_ABGR32_8888,
+ .depth = 32,
+ },
+ [VPDMA_DATA_FMT_BGRA24] = {
+ .data_type = DATA_TYPE_BGRA24_6666,
+ .depth = 24,
+ },
+ [VPDMA_DATA_FMT_BGRA32] = {
+ .data_type = DATA_TYPE_BGRA32_8888,
+ .depth = 32,
+ },
+};
+
+const struct vpdma_data_format vpdma_misc_fmts[] = {
+ [VPDMA_DATA_FMT_MV] = {
+ .data_type = DATA_TYPE_MV,
+ .depth = 4,
+ },
+};
+
+struct vpdma_channel_info {
+ int num; /* VPDMA channel number */
+ int cstat_offset; /* client CSTAT register offset */
+};
+
+static const struct vpdma_channel_info chan_info[] = {
+ [VPE_CHAN_LUMA1_IN] = {
+ .num = VPE_CHAN_NUM_LUMA1_IN,
+ .cstat_offset = VPDMA_DEI_LUMA1_CSTAT,
+ },
+ [VPE_CHAN_CHROMA1_IN] = {
+ .num = VPE_CHAN_NUM_CHROMA1_IN,
+ .cstat_offset = VPDMA_DEI_CHROMA1_CSTAT,
+ },
+ [VPE_CHAN_LUMA2_IN] = {
+ .num = VPE_CHAN_NUM_LUMA2_IN,
+ .cstat_offset = VPDMA_DEI_LUMA2_CSTAT,
+ },
+ [VPE_CHAN_CHROMA2_IN] = {
+ .num = VPE_CHAN_NUM_CHROMA2_IN,
+ .cstat_offset = VPDMA_DEI_CHROMA2_CSTAT,
+ },
+ [VPE_CHAN_LUMA3_IN] = {
+ .num = VPE_CHAN_NUM_LUMA3_IN,
+ .cstat_offset = VPDMA_DEI_LUMA3_CSTAT,
+ },
+ [VPE_CHAN_CHROMA3_IN] = {
+ .num = VPE_CHAN_NUM_CHROMA3_IN,
+ .cstat_offset = VPDMA_DEI_CHROMA3_CSTAT,
+ },
+ [VPE_CHAN_MV_IN] = {
+ .num = VPE_CHAN_NUM_MV_IN,
+ .cstat_offset = VPDMA_DEI_MV_IN_CSTAT,
+ },
+ [VPE_CHAN_MV_OUT] = {
+ .num = VPE_CHAN_NUM_MV_OUT,
+ .cstat_offset = VPDMA_DEI_MV_OUT_CSTAT,
+ },
+ [VPE_CHAN_LUMA_OUT] = {
+ .num = VPE_CHAN_NUM_LUMA_OUT,
+ .cstat_offset = VPDMA_VIP_UP_Y_CSTAT,
+ },
+ [VPE_CHAN_CHROMA_OUT] = {
+ .num = VPE_CHAN_NUM_CHROMA_OUT,
+ .cstat_offset = VPDMA_VIP_UP_UV_CSTAT,
+ },
+ [VPE_CHAN_RGB_OUT] = {
+ .num = VPE_CHAN_NUM_RGB_OUT,
+ .cstat_offset = VPDMA_VIP_UP_Y_CSTAT,
+ },
+};
+
+static u32 read_reg(struct vpdma_data *vpdma, int offset)
+{
+ return ioread32(vpdma->base + offset);
+}
+
+static void write_reg(struct vpdma_data *vpdma, int offset, u32 value)
+{
+ iowrite32(value, vpdma->base + offset);
+}
+
+static int read_field_reg(struct vpdma_data *vpdma, int offset,
+ u32 mask, int shift)
+{
+ return (read_reg(vpdma, offset) & (mask << shift)) >> shift;
+}
+
+static void write_field_reg(struct vpdma_data *vpdma, int offset, u32 field,
+ u32 mask, int shift)
+{
+ u32 val = read_reg(vpdma, offset);
+
+ val &= ~(mask << shift);
+ val |= (field & mask) << shift;
+
+ write_reg(vpdma, offset, val);
+}
+
+void vpdma_dump_regs(struct vpdma_data *vpdma)
+{
+ struct device *dev = &vpdma->pdev->dev;
+
+#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(vpdma, VPDMA_##r))
+
+ dev_dbg(dev, "VPDMA Registers:\n");
+
+ DUMPREG(PID);
+ DUMPREG(LIST_ADDR);
+ DUMPREG(LIST_ATTR);
+ DUMPREG(LIST_STAT_SYNC);
+ DUMPREG(BG_RGB);
+ DUMPREG(BG_YUV);
+ DUMPREG(SETUP);
+ DUMPREG(MAX_SIZE1);
+ DUMPREG(MAX_SIZE2);
+ DUMPREG(MAX_SIZE3);
+
+ /*
+ * dumping registers of only group0 and group3, because VPE channels
+ * lie within group0 and group3 registers
+ */
+ DUMPREG(INT_CHAN_STAT(0));
+ DUMPREG(INT_CHAN_MASK(0));
+ DUMPREG(INT_CHAN_STAT(3));
+ DUMPREG(INT_CHAN_MASK(3));
+ DUMPREG(INT_CLIENT0_STAT);
+ DUMPREG(INT_CLIENT0_MASK);
+ DUMPREG(INT_CLIENT1_STAT);
+ DUMPREG(INT_CLIENT1_MASK);
+ DUMPREG(INT_LIST0_STAT);
+ DUMPREG(INT_LIST0_MASK);
+
+ /*
+ * these are registers specific to VPE clients, we can make this
+ * function dump client registers specific to VPE or VIP based on
+ * who is using it
+ */
+ DUMPREG(DEI_CHROMA1_CSTAT);
+ DUMPREG(DEI_LUMA1_CSTAT);
+ DUMPREG(DEI_CHROMA2_CSTAT);
+ DUMPREG(DEI_LUMA2_CSTAT);
+ DUMPREG(DEI_CHROMA3_CSTAT);
+ DUMPREG(DEI_LUMA3_CSTAT);
+ DUMPREG(DEI_MV_IN_CSTAT);
+ DUMPREG(DEI_MV_OUT_CSTAT);
+ DUMPREG(VIP_UP_Y_CSTAT);
+ DUMPREG(VIP_UP_UV_CSTAT);
+ DUMPREG(VPI_CTL_CSTAT);
+}
+
+/*
+ * Allocate a DMA buffer
+ */
+int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size)
+{
+ buf->size = size;
+ buf->mapped = false;
+ buf->addr = kzalloc(size, GFP_KERNEL);
+ if (!buf->addr)
+ return -ENOMEM;
+
+ WARN_ON((u32) buf->addr & VPDMA_DESC_ALIGN);
+
+ return 0;
+}
+
+void vpdma_free_desc_buf(struct vpdma_buf *buf)
+{
+ WARN_ON(buf->mapped);
+ kfree(buf->addr);
+ buf->addr = NULL;
+ buf->size = 0;
+}
+
+/*
+ * map descriptor/payload DMA buffer, enabling DMA access
+ */
+int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
+{
+ struct device *dev = &vpdma->pdev->dev;
+
+ WARN_ON(buf->mapped);
+ buf->dma_addr = dma_map_single(dev, buf->addr, buf->size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, buf->dma_addr)) {
+ dev_err(dev, "failed to map buffer\n");
+ return -EINVAL;
+ }
+
+ buf->mapped = true;
+
+ return 0;
+}
+
+/*
+ * unmap descriptor/payload DMA buffer, disabling DMA access and
+ * allowing the main processor to acces the data
+ */
+void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
+{
+ struct device *dev = &vpdma->pdev->dev;
+
+ if (buf->mapped)
+ dma_unmap_single(dev, buf->dma_addr, buf->size, DMA_TO_DEVICE);
+
+ buf->mapped = false;
+}
+
+/*
+ * create a descriptor list, the user of this list will append configuration,
+ * control and data descriptors to this list, this list will be submitted to
+ * VPDMA. VPDMA's list parser will go through each descriptor and perform the
+ * required DMA operations
+ */
+int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type)
+{
+ int r;
+
+ r = vpdma_alloc_desc_buf(&list->buf, size);
+ if (r)
+ return r;
+
+ list->next = list->buf.addr;
+
+ list->type = type;
+
+ return 0;
+}
+
+/*
+ * once a descriptor list is parsed by VPDMA, we reset the list by emptying it,
+ * to allow new descriptors to be added to the list.
+ */
+void vpdma_reset_desc_list(struct vpdma_desc_list *list)
+{
+ list->next = list->buf.addr;
+}
+
+/*
+ * free the buffer allocated fot the VPDMA descriptor list, this should be
+ * called when the user doesn't want to use VPDMA any more.
+ */
+void vpdma_free_desc_list(struct vpdma_desc_list *list)
+{
+ vpdma_free_desc_buf(&list->buf);
+
+ list->next = NULL;
+}
+
+static bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num)
+{
+ return read_reg(vpdma, VPDMA_LIST_STAT_SYNC) & BIT(list_num + 16);
+}
+
+/*
+ * submit a list of DMA descriptors to the VPE VPDMA, do not wait for completion
+ */
+int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list)
+{
+ /* we always use the first list */
+ int list_num = 0;
+ int list_size;
+
+ if (vpdma_list_busy(vpdma, list_num))
+ return -EBUSY;
+
+ /* 16-byte granularity */
+ list_size = (list->next - list->buf.addr) >> 4;
+
+ write_reg(vpdma, VPDMA_LIST_ADDR, (u32) list->buf.dma_addr);
+
+ write_reg(vpdma, VPDMA_LIST_ATTR,
+ (list_num << VPDMA_LIST_NUM_SHFT) |
+ (list->type << VPDMA_LIST_TYPE_SHFT) |
+ list_size);
+
+ return 0;
+}
+
+static void dump_cfd(struct vpdma_cfd *cfd)
+{
+ int class;
+
+ class = cfd_get_class(cfd);
+
+ pr_debug("config descriptor of payload class: %s\n",
+ class == CFD_CLS_BLOCK ? "simple block" :
+ "address data block");
+
+ if (class == CFD_CLS_BLOCK)
+ pr_debug("word0: dst_addr_offset = 0x%08x\n",
+ cfd->dest_addr_offset);
+
+ if (class == CFD_CLS_BLOCK)
+ pr_debug("word1: num_data_wrds = %d\n", cfd->block_len);
+
+ pr_debug("word2: payload_addr = 0x%08x\n", cfd->payload_addr);
+
+ pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, "
+ "payload_len = %d\n", cfd_get_pkt_type(cfd),
+ cfd_get_direct(cfd), class, cfd_get_dest(cfd),
+ cfd_get_payload_len(cfd));
+}
+
+/*
+ * append a configuration descriptor to the given descriptor list, where the
+ * payload is in the form of a simple data block specified in the descriptor
+ * header, this is used to upload scaler coefficients to the scaler module
+ */
+void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client,
+ struct vpdma_buf *blk, u32 dest_offset)
+{
+ struct vpdma_cfd *cfd;
+ int len = blk->size;
+
+ WARN_ON(blk->dma_addr & VPDMA_DESC_ALIGN);
+
+ cfd = list->next;
+ WARN_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
+
+ cfd->dest_addr_offset = dest_offset;
+ cfd->block_len = len;
+ cfd->payload_addr = (u32) blk->dma_addr;
+ cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_BLOCK,
+ client, len >> 4);
+
+ list->next = cfd + 1;
+
+ dump_cfd(cfd);
+}
+
+/*
+ * append a configuration descriptor to the given descriptor list, where the
+ * payload is in the address data block format, this is used to a configure a
+ * discontiguous set of MMRs
+ */
+void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client,
+ struct vpdma_buf *adb)
+{
+ struct vpdma_cfd *cfd;
+ unsigned int len = adb->size;
+
+ WARN_ON(len & VPDMA_ADB_SIZE_ALIGN);
+ WARN_ON(adb->dma_addr & VPDMA_DESC_ALIGN);
+
+ cfd = list->next;
+ BUG_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
+
+ cfd->w0 = 0;
+ cfd->w1 = 0;
+ cfd->payload_addr = (u32) adb->dma_addr;
+ cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_ADB,
+ client, len >> 4);
+
+ list->next = cfd + 1;
+
+ dump_cfd(cfd);
+};
+
+/*
+ * control descriptor format change based on what type of control descriptor it
+ * is, we only use 'sync on channel' control descriptors for now, so assume it's
+ * that
+ */
+static void dump_ctd(struct vpdma_ctd *ctd)
+{
+ pr_debug("control descriptor\n");
+
+ pr_debug("word3: pkt_type = %d, source = %d, ctl_type = %d\n",
+ ctd_get_pkt_type(ctd), ctd_get_source(ctd), ctd_get_ctl(ctd));
+}
+
+/*
+ * append a 'sync on channel' type control descriptor to the given descriptor
+ * list, this descriptor stalls the VPDMA list till the time DMA is completed
+ * on the specified channel
+ */
+void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list,
+ enum vpdma_channel chan)
+{
+ struct vpdma_ctd *ctd;
+
+ ctd = list->next;
+ WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size));
+
+ ctd->w0 = 0;
+ ctd->w1 = 0;
+ ctd->w2 = 0;
+ ctd->type_source_ctl = ctd_type_source_ctl(chan_info[chan].num,
+ CTD_TYPE_SYNC_ON_CHANNEL);
+
+ list->next = ctd + 1;
+
+ dump_ctd(ctd);
+}
+
+static void dump_dtd(struct vpdma_dtd *dtd)
+{
+ int dir, chan;
+
+ dir = dtd_get_dir(dtd);
+ chan = dtd_get_chan(dtd);
+
+ pr_debug("%s data transfer descriptor for channel %d\n",
+ dir == DTD_DIR_OUT ? "outbound" : "inbound", chan);
+
+ pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, "
+ "even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n",
+ dtd_get_data_type(dtd), dtd_get_notify(dtd), dtd_get_field(dtd),
+ dtd_get_1d(dtd), dtd_get_even_line_skip(dtd),
+ dtd_get_odd_line_skip(dtd), dtd_get_line_stride(dtd));
+
+ if (dir == DTD_DIR_IN)
+ pr_debug("word1: line_length = %d, xfer_height = %d\n",
+ dtd_get_line_length(dtd), dtd_get_xfer_height(dtd));
+
+ pr_debug("word2: start_addr = 0x%08x\n", dtd->start_addr);
+
+ pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, "
+ "pri = %d, next_chan = %d\n", dtd_get_pkt_type(dtd),
+ dtd_get_mode(dtd), dir, chan, dtd_get_priority(dtd),
+ dtd_get_next_chan(dtd));
+
+ if (dir == DTD_DIR_IN)
+ pr_debug("word4: frame_width = %d, frame_height = %d\n",
+ dtd_get_frame_width(dtd), dtd_get_frame_height(dtd));
+ else
+ pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, "
+ "drp_data = %d, use_desc_reg = %d\n",
+ dtd_get_desc_write_addr(dtd), dtd_get_write_desc(dtd),
+ dtd_get_drop_data(dtd), dtd_get_use_desc(dtd));
+
+ if (dir == DTD_DIR_IN)
+ pr_debug("word5: hor_start = %d, ver_start = %d\n",
+ dtd_get_h_start(dtd), dtd_get_v_start(dtd));
+ else
+ pr_debug("word5: max_width %d, max_height %d\n",
+ dtd_get_max_width(dtd), dtd_get_max_height(dtd));
+
+ pr_debug("word6: client specfic attr0 = 0x%08x\n", dtd->client_attr0);
+ pr_debug("word7: client specfic attr1 = 0x%08x\n", dtd->client_attr1);
+}
+
+/*
+ * append an outbound data transfer descriptor to the given descriptor list,
+ * this sets up a 'client to memory' VPDMA transfer for the given VPDMA channel
+ */
+void vpdma_add_out_dtd(struct vpdma_desc_list *list, struct v4l2_rect *c_rect,
+ const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+ enum vpdma_channel chan, u32 flags)
+{
+ int priority = 0;
+ int field = 0;
+ int notify = 1;
+ int channel, next_chan;
+ int depth = fmt->depth;
+ int stride;
+ struct vpdma_dtd *dtd;
+
+ channel = next_chan = chan_info[chan].num;
+
+ if (fmt->data_type == DATA_TYPE_C420)
+ depth = 8;
+
+ stride = (depth * c_rect->width) >> 3;
+ dma_addr += (c_rect->left * depth) >> 3;
+
+ dtd = list->next;
+ WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
+
+ dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
+ notify,
+ field,
+ !!(flags & VPDMA_DATA_FRAME_1D),
+ !!(flags & VPDMA_DATA_EVEN_LINE_SKIP),
+ !!(flags & VPDMA_DATA_ODD_LINE_SKIP),
+ stride);
+ dtd->w1 = 0;
+ dtd->start_addr = (u32) dma_addr;
+ dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
+ DTD_DIR_OUT, channel, priority, next_chan);
+ dtd->desc_write_addr = dtd_desc_write_addr(0, 0, 0, 0);
+ dtd->max_width_height = dtd_max_width_height(MAX_OUT_WIDTH_1920,
+ MAX_OUT_HEIGHT_1080);
+ dtd->client_attr0 = 0;
+ dtd->client_attr1 = 0;
+
+ list->next = dtd + 1;
+
+ dump_dtd(dtd);
+}
+
+/*
+ * append an inbound data transfer descriptor to the given descriptor list,
+ * this sets up a 'memory to client' VPDMA transfer for the given VPDMA channel
+ */
+void vpdma_add_in_dtd(struct vpdma_desc_list *list, int frame_width,
+ int frame_height, struct v4l2_rect *c_rect,
+ const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+ enum vpdma_channel chan, int field, u32 flags)
+{
+ int priority = 0;
+ int notify = 1;
+ int depth = fmt->depth;
+ int channel, next_chan;
+ int stride;
+ int height = c_rect->height;
+ struct vpdma_dtd *dtd;
+
+ channel = next_chan = chan_info[chan].num;
+
+ if (fmt->data_type == DATA_TYPE_C420) {
+ height >>= 1;
+ frame_height >>= 1;
+ depth = 8;
+ }
+
+ stride = (depth * c_rect->width) >> 3;
+ dma_addr += (c_rect->left * depth) >> 3;
+
+ dtd = list->next;
+ WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
+
+ dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
+ notify,
+ field,
+ !!(flags & VPDMA_DATA_FRAME_1D),
+ !!(flags & VPDMA_DATA_EVEN_LINE_SKIP),
+ !!(flags & VPDMA_DATA_ODD_LINE_SKIP),
+ stride);
+
+ dtd->xfer_length_height = dtd_xfer_length_height(c_rect->width, height);
+ dtd->start_addr = (u32) dma_addr;
+ dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
+ DTD_DIR_IN, channel, priority, next_chan);
+ dtd->frame_width_height = dtd_frame_width_height(frame_width,
+ frame_height);
+ dtd->start_h_v = dtd_start_h_v(c_rect->left, c_rect->top);
+ dtd->client_attr0 = 0;
+ dtd->client_attr1 = 0;
+
+ list->next = dtd + 1;
+
+ dump_dtd(dtd);
+}
+
+/* set or clear the mask for list complete interrupt */
+void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int list_num,
+ bool enable)
+{
+ u32 val;
+
+ val = read_reg(vpdma, VPDMA_INT_LIST0_MASK);
+ if (enable)
+ val |= (1 << (list_num * 2));
+ else
+ val &= ~(1 << (list_num * 2));
+ write_reg(vpdma, VPDMA_INT_LIST0_MASK, val);
+}
+
+/* clear previosuly occured list intterupts in the LIST_STAT register */
+void vpdma_clear_list_stat(struct vpdma_data *vpdma)
+{
+ write_reg(vpdma, VPDMA_INT_LIST0_STAT,
+ read_reg(vpdma, VPDMA_INT_LIST0_STAT));
+}
+
+/*
+ * configures the output mode of the line buffer for the given client, the
+ * line buffer content can either be mirrored(each line repeated twice) or
+ * passed to the client as is
+ */
+void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
+ enum vpdma_channel chan)
+{
+ int client_cstat = chan_info[chan].cstat_offset;
+
+ write_field_reg(vpdma, client_cstat, line_mode,
+ VPDMA_CSTAT_LINE_MODE_MASK, VPDMA_CSTAT_LINE_MODE_SHIFT);
+}
+
+/*
+ * configures the event which should trigger VPDMA transfer for the given
+ * client
+ */
+void vpdma_set_frame_start_event(struct vpdma_data *vpdma,
+ enum vpdma_frame_start_event fs_event,
+ enum vpdma_channel chan)
+{
+ int client_cstat = chan_info[chan].cstat_offset;
+
+ write_field_reg(vpdma, client_cstat, fs_event,
+ VPDMA_CSTAT_FRAME_START_MASK, VPDMA_CSTAT_FRAME_START_SHIFT);
+}
+
+static void vpdma_firmware_cb(const struct firmware *f, void *context)
+{
+ struct vpdma_data *vpdma = context;
+ struct vpdma_buf fw_dma_buf;
+ int i, r;
+
+ dev_dbg(&vpdma->pdev->dev, "firmware callback\n");
+
+ if (!f || !f->data) {
+ dev_err(&vpdma->pdev->dev, "couldn't get firmware\n");
+ return;
+ }
+
+ /* already initialized */
+ if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
+ VPDMA_LIST_RDY_SHFT)) {
+ vpdma->ready = true;
+ return;
+ }
+
+ r = vpdma_alloc_desc_buf(&fw_dma_buf, f->size);
+ if (r) {
+ dev_err(&vpdma->pdev->dev,
+ "failed to allocate dma buffer for firmware\n");
+ goto rel_fw;
+ }
+
+ memcpy(fw_dma_buf.addr, f->data, f->size);
+
+ vpdma_map_desc_buf(vpdma, &fw_dma_buf);
+
+ write_reg(vpdma, VPDMA_LIST_ADDR, (u32) fw_dma_buf.dma_addr);
+
+ for (i = 0; i < 100; i++) { /* max 1 second */
+ msleep_interruptible(10);
+
+ if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
+ VPDMA_LIST_RDY_SHFT))
+ break;
+ }
+
+ if (i == 100) {
+ dev_err(&vpdma->pdev->dev, "firmware upload failed\n");
+ goto free_buf;
+ }
+
+ vpdma->ready = true;
+
+free_buf:
+ vpdma_unmap_desc_buf(vpdma, &fw_dma_buf);
+
+ vpdma_free_desc_buf(&fw_dma_buf);
+rel_fw:
+ release_firmware(f);
+}
+
+static int vpdma_load_firmware(struct vpdma_data *vpdma)
+{
+ int r;
+ struct device *dev = &vpdma->pdev->dev;
+
+ r = request_firmware_nowait(THIS_MODULE, 1,
+ (const char *) VPDMA_FIRMWARE, dev, GFP_KERNEL, vpdma,
+ vpdma_firmware_cb);
+ if (r) {
+ dev_err(dev, "firmware not available %s\n", VPDMA_FIRMWARE);
+ return r;
+ } else {
+ dev_info(dev, "loading firmware %s\n", VPDMA_FIRMWARE);
+ }
+
+ return 0;
+}
+
+struct vpdma_data *vpdma_create(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct vpdma_data *vpdma;
+ int r;
+
+ dev_dbg(&pdev->dev, "vpdma_create\n");
+
+ vpdma = devm_kzalloc(&pdev->dev, sizeof(*vpdma), GFP_KERNEL);
+ if (!vpdma) {
+ dev_err(&pdev->dev, "couldn't alloc vpdma_dev\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ vpdma->pdev = pdev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpdma");
+ if (res == NULL) {
+ dev_err(&pdev->dev, "missing platform resources data\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ vpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!vpdma->base) {
+ dev_err(&pdev->dev, "failed to ioremap\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ r = vpdma_load_firmware(vpdma);
+ if (r) {
+ pr_err("failed to load firmware %s\n", VPDMA_FIRMWARE);
+ return ERR_PTR(r);
+ }
+
+ return vpdma;
+}
+MODULE_FIRMWARE(VPDMA_FIRMWARE);
diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h
new file mode 100644
index 000000000000..eaa2a71a5db9
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpdma.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __TI_VPDMA_H_
+#define __TI_VPDMA_H_
+
+/*
+ * A vpdma_buf tracks the size, DMA address and mapping status of each
+ * driver DMA area.
+ */
+struct vpdma_buf {
+ void *addr;
+ dma_addr_t dma_addr;
+ size_t size;
+ bool mapped;
+};
+
+struct vpdma_desc_list {
+ struct vpdma_buf buf;
+ void *next;
+ int type;
+};
+
+struct vpdma_data {
+ void __iomem *base;
+
+ struct platform_device *pdev;
+
+ /* tells whether vpdma firmware is loaded or not */
+ bool ready;
+};
+
+struct vpdma_data_format {
+ int data_type;
+ u8 depth;
+};
+
+#define VPDMA_DESC_ALIGN 16 /* 16-byte descriptor alignment */
+
+#define VPDMA_DTD_DESC_SIZE 32 /* 8 words */
+#define VPDMA_CFD_CTD_DESC_SIZE 16 /* 4 words */
+
+#define VPDMA_LIST_TYPE_NORMAL 0
+#define VPDMA_LIST_TYPE_SELF_MODIFYING 1
+#define VPDMA_LIST_TYPE_DOORBELL 2
+
+enum vpdma_yuv_formats {
+ VPDMA_DATA_FMT_Y444 = 0,
+ VPDMA_DATA_FMT_Y422,
+ VPDMA_DATA_FMT_Y420,
+ VPDMA_DATA_FMT_C444,
+ VPDMA_DATA_FMT_C422,
+ VPDMA_DATA_FMT_C420,
+ VPDMA_DATA_FMT_YC422,
+ VPDMA_DATA_FMT_YC444,
+ VPDMA_DATA_FMT_CY422,
+};
+
+enum vpdma_rgb_formats {
+ VPDMA_DATA_FMT_RGB565 = 0,
+ VPDMA_DATA_FMT_ARGB16_1555,
+ VPDMA_DATA_FMT_ARGB16,
+ VPDMA_DATA_FMT_RGBA16_5551,
+ VPDMA_DATA_FMT_RGBA16,
+ VPDMA_DATA_FMT_ARGB24,
+ VPDMA_DATA_FMT_RGB24,
+ VPDMA_DATA_FMT_ARGB32,
+ VPDMA_DATA_FMT_RGBA24,
+ VPDMA_DATA_FMT_RGBA32,
+ VPDMA_DATA_FMT_BGR565,
+ VPDMA_DATA_FMT_ABGR16_1555,
+ VPDMA_DATA_FMT_ABGR16,
+ VPDMA_DATA_FMT_BGRA16_5551,
+ VPDMA_DATA_FMT_BGRA16,
+ VPDMA_DATA_FMT_ABGR24,
+ VPDMA_DATA_FMT_BGR24,
+ VPDMA_DATA_FMT_ABGR32,
+ VPDMA_DATA_FMT_BGRA24,
+ VPDMA_DATA_FMT_BGRA32,
+};
+
+enum vpdma_misc_formats {
+ VPDMA_DATA_FMT_MV = 0,
+};
+
+extern const struct vpdma_data_format vpdma_yuv_fmts[];
+extern const struct vpdma_data_format vpdma_rgb_fmts[];
+extern const struct vpdma_data_format vpdma_misc_fmts[];
+
+enum vpdma_frame_start_event {
+ VPDMA_FSEVENT_HDMI_FID = 0,
+ VPDMA_FSEVENT_DVO2_FID,
+ VPDMA_FSEVENT_HDCOMP_FID,
+ VPDMA_FSEVENT_SD_FID,
+ VPDMA_FSEVENT_LM_FID0,
+ VPDMA_FSEVENT_LM_FID1,
+ VPDMA_FSEVENT_LM_FID2,
+ VPDMA_FSEVENT_CHANNEL_ACTIVE,
+};
+
+/*
+ * VPDMA channel numbers
+ */
+enum vpdma_channel {
+ VPE_CHAN_LUMA1_IN,
+ VPE_CHAN_CHROMA1_IN,
+ VPE_CHAN_LUMA2_IN,
+ VPE_CHAN_CHROMA2_IN,
+ VPE_CHAN_LUMA3_IN,
+ VPE_CHAN_CHROMA3_IN,
+ VPE_CHAN_MV_IN,
+ VPE_CHAN_MV_OUT,
+ VPE_CHAN_LUMA_OUT,
+ VPE_CHAN_CHROMA_OUT,
+ VPE_CHAN_RGB_OUT,
+};
+
+/* flags for VPDMA data descriptors */
+#define VPDMA_DATA_ODD_LINE_SKIP (1 << 0)
+#define VPDMA_DATA_EVEN_LINE_SKIP (1 << 1)
+#define VPDMA_DATA_FRAME_1D (1 << 2)
+#define VPDMA_DATA_MODE_TILED (1 << 3)
+
+/*
+ * client identifiers used for configuration descriptors
+ */
+#define CFD_MMR_CLIENT 0
+#define CFD_SC_CLIENT 4
+
+/* Address data block header format */
+struct vpdma_adb_hdr {
+ u32 offset;
+ u32 nwords;
+ u32 reserved0;
+ u32 reserved1;
+};
+
+/* helpers for creating ADB headers for config descriptors MMRs as client */
+#define ADB_ADDR(dma_buf, str, fld) ((dma_buf)->addr + offsetof(str, fld))
+#define MMR_ADB_ADDR(buf, str, fld) ADB_ADDR(&(buf), struct str, fld)
+
+#define VPDMA_SET_MMR_ADB_HDR(buf, str, hdr, regs, offset_a) \
+ do { \
+ struct vpdma_adb_hdr *h; \
+ struct str *adb = NULL; \
+ h = MMR_ADB_ADDR(buf, str, hdr); \
+ h->offset = (offset_a); \
+ h->nwords = sizeof(adb->regs) >> 2; \
+ } while (0)
+
+/* vpdma descriptor buffer allocation and management */
+int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size);
+void vpdma_free_desc_buf(struct vpdma_buf *buf);
+int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf);
+void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf);
+
+/* vpdma descriptor list funcs */
+int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type);
+void vpdma_reset_desc_list(struct vpdma_desc_list *list);
+void vpdma_free_desc_list(struct vpdma_desc_list *list);
+int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list);
+
+/* helpers for creating vpdma descriptors */
+void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client,
+ struct vpdma_buf *blk, u32 dest_offset);
+void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client,
+ struct vpdma_buf *adb);
+void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list,
+ enum vpdma_channel chan);
+void vpdma_add_out_dtd(struct vpdma_desc_list *list, struct v4l2_rect *c_rect,
+ const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+ enum vpdma_channel chan, u32 flags);
+void vpdma_add_in_dtd(struct vpdma_desc_list *list, int frame_width,
+ int frame_height, struct v4l2_rect *c_rect,
+ const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+ enum vpdma_channel chan, int field, u32 flags);
+
+/* vpdma list interrupt management */
+void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int list_num,
+ bool enable);
+void vpdma_clear_list_stat(struct vpdma_data *vpdma);
+
+/* vpdma client configuration */
+void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
+ enum vpdma_channel chan);
+void vpdma_set_frame_start_event(struct vpdma_data *vpdma,
+ enum vpdma_frame_start_event fs_event, enum vpdma_channel chan);
+
+void vpdma_dump_regs(struct vpdma_data *vpdma);
+
+/* initialize vpdma, passed with VPE's platform device pointer */
+struct vpdma_data *vpdma_create(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/media/platform/ti-vpe/vpdma_priv.h b/drivers/media/platform/ti-vpe/vpdma_priv.h
new file mode 100644
index 000000000000..f0e9a8038c1b
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpdma_priv.h
@@ -0,0 +1,641 @@
+/*
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _TI_VPDMA_PRIV_H_
+#define _TI_VPDMA_PRIV_H_
+
+/*
+ * VPDMA Register offsets
+ */
+
+/* Top level */
+#define VPDMA_PID 0x00
+#define VPDMA_LIST_ADDR 0x04
+#define VPDMA_LIST_ATTR 0x08
+#define VPDMA_LIST_STAT_SYNC 0x0c
+#define VPDMA_BG_RGB 0x18
+#define VPDMA_BG_YUV 0x1c
+#define VPDMA_SETUP 0x30
+#define VPDMA_MAX_SIZE1 0x34
+#define VPDMA_MAX_SIZE2 0x38
+#define VPDMA_MAX_SIZE3 0x3c
+
+/* Interrupts */
+#define VPDMA_INT_CHAN_STAT(grp) (0x40 + grp * 8)
+#define VPDMA_INT_CHAN_MASK(grp) (VPDMA_INT_CHAN_STAT(grp) + 4)
+#define VPDMA_INT_CLIENT0_STAT 0x78
+#define VPDMA_INT_CLIENT0_MASK 0x7c
+#define VPDMA_INT_CLIENT1_STAT 0x80
+#define VPDMA_INT_CLIENT1_MASK 0x84
+#define VPDMA_INT_LIST0_STAT 0x88
+#define VPDMA_INT_LIST0_MASK 0x8c
+
+#define VPDMA_PERFMON(i) (0x200 + i * 4)
+
+/* VPE specific client registers */
+#define VPDMA_DEI_CHROMA1_CSTAT 0x0300
+#define VPDMA_DEI_LUMA1_CSTAT 0x0304
+#define VPDMA_DEI_LUMA2_CSTAT 0x0308
+#define VPDMA_DEI_CHROMA2_CSTAT 0x030c
+#define VPDMA_DEI_LUMA3_CSTAT 0x0310
+#define VPDMA_DEI_CHROMA3_CSTAT 0x0314
+#define VPDMA_DEI_MV_IN_CSTAT 0x0330
+#define VPDMA_DEI_MV_OUT_CSTAT 0x033c
+#define VPDMA_VIP_UP_Y_CSTAT 0x0390
+#define VPDMA_VIP_UP_UV_CSTAT 0x0394
+#define VPDMA_VPI_CTL_CSTAT 0x03d0
+
+/* Reg field info for VPDMA_CLIENT_CSTAT registers */
+#define VPDMA_CSTAT_LINE_MODE_MASK 0x03
+#define VPDMA_CSTAT_LINE_MODE_SHIFT 8
+#define VPDMA_CSTAT_FRAME_START_MASK 0xf
+#define VPDMA_CSTAT_FRAME_START_SHIFT 10
+
+#define VPDMA_LIST_NUM_MASK 0x07
+#define VPDMA_LIST_NUM_SHFT 24
+#define VPDMA_LIST_STOP_SHFT 20
+#define VPDMA_LIST_RDY_MASK 0x01
+#define VPDMA_LIST_RDY_SHFT 19
+#define VPDMA_LIST_TYPE_MASK 0x03
+#define VPDMA_LIST_TYPE_SHFT 16
+#define VPDMA_LIST_SIZE_MASK 0xffff
+
+/* VPDMA data type values for data formats */
+#define DATA_TYPE_Y444 0x0
+#define DATA_TYPE_Y422 0x1
+#define DATA_TYPE_Y420 0x2
+#define DATA_TYPE_C444 0x4
+#define DATA_TYPE_C422 0x5
+#define DATA_TYPE_C420 0x6
+#define DATA_TYPE_YC422 0x7
+#define DATA_TYPE_YC444 0x8
+#define DATA_TYPE_CY422 0x23
+
+#define DATA_TYPE_RGB16_565 0x0
+#define DATA_TYPE_ARGB_1555 0x1
+#define DATA_TYPE_ARGB_4444 0x2
+#define DATA_TYPE_RGBA_5551 0x3
+#define DATA_TYPE_RGBA_4444 0x4
+#define DATA_TYPE_ARGB24_6666 0x5
+#define DATA_TYPE_RGB24_888 0x6
+#define DATA_TYPE_ARGB32_8888 0x7
+#define DATA_TYPE_RGBA24_6666 0x8
+#define DATA_TYPE_RGBA32_8888 0x9
+#define DATA_TYPE_BGR16_565 0x10
+#define DATA_TYPE_ABGR_1555 0x11
+#define DATA_TYPE_ABGR_4444 0x12
+#define DATA_TYPE_BGRA_5551 0x13
+#define DATA_TYPE_BGRA_4444 0x14
+#define DATA_TYPE_ABGR24_6666 0x15
+#define DATA_TYPE_BGR24_888 0x16
+#define DATA_TYPE_ABGR32_8888 0x17
+#define DATA_TYPE_BGRA24_6666 0x18
+#define DATA_TYPE_BGRA32_8888 0x19
+
+#define DATA_TYPE_MV 0x3
+
+/* VPDMA channel numbers(only VPE channels for now) */
+#define VPE_CHAN_NUM_LUMA1_IN 0
+#define VPE_CHAN_NUM_CHROMA1_IN 1
+#define VPE_CHAN_NUM_LUMA2_IN 2
+#define VPE_CHAN_NUM_CHROMA2_IN 3
+#define VPE_CHAN_NUM_LUMA3_IN 4
+#define VPE_CHAN_NUM_CHROMA3_IN 5
+#define VPE_CHAN_NUM_MV_IN 12
+#define VPE_CHAN_NUM_MV_OUT 15
+#define VPE_CHAN_NUM_LUMA_OUT 102
+#define VPE_CHAN_NUM_CHROMA_OUT 103
+#define VPE_CHAN_NUM_RGB_OUT 106
+
+/*
+ * a VPDMA address data block payload for a configuration descriptor needs to
+ * have each sub block length as a multiple of 16 bytes. Therefore, the overall
+ * size of the payload also needs to be a multiple of 16 bytes. The sub block
+ * lengths should be ensured to be aligned by the VPDMA user.
+ */
+#define VPDMA_ADB_SIZE_ALIGN 0x0f
+
+/*
+ * data transfer descriptor
+ */
+struct vpdma_dtd {
+ u32 type_ctl_stride;
+ union {
+ u32 xfer_length_height;
+ u32 w1;
+ };
+ dma_addr_t start_addr;
+ u32 pkt_ctl;
+ union {
+ u32 frame_width_height; /* inbound */
+ dma_addr_t desc_write_addr; /* outbound */
+ };
+ union {
+ u32 start_h_v; /* inbound */
+ u32 max_width_height; /* outbound */
+ };
+ u32 client_attr0;
+ u32 client_attr1;
+};
+
+/* Data Transfer Descriptor specifics */
+#define DTD_NO_NOTIFY 0
+#define DTD_NOTIFY 1
+
+#define DTD_PKT_TYPE 0xa
+#define DTD_DIR_IN 0
+#define DTD_DIR_OUT 1
+
+/* type_ctl_stride */
+#define DTD_DATA_TYPE_MASK 0x3f
+#define DTD_DATA_TYPE_SHFT 26
+#define DTD_NOTIFY_MASK 0x01
+#define DTD_NOTIFY_SHFT 25
+#define DTD_FIELD_MASK 0x01
+#define DTD_FIELD_SHFT 24
+#define DTD_1D_MASK 0x01
+#define DTD_1D_SHFT 23
+#define DTD_EVEN_LINE_SKIP_MASK 0x01
+#define DTD_EVEN_LINE_SKIP_SHFT 20
+#define DTD_ODD_LINE_SKIP_MASK 0x01
+#define DTD_ODD_LINE_SKIP_SHFT 16
+#define DTD_LINE_STRIDE_MASK 0xffff
+#define DTD_LINE_STRIDE_SHFT 0
+
+/* xfer_length_height */
+#define DTD_LINE_LENGTH_MASK 0xffff
+#define DTD_LINE_LENGTH_SHFT 16
+#define DTD_XFER_HEIGHT_MASK 0xffff
+#define DTD_XFER_HEIGHT_SHFT 0
+
+/* pkt_ctl */
+#define DTD_PKT_TYPE_MASK 0x1f
+#define DTD_PKT_TYPE_SHFT 27
+#define DTD_MODE_MASK 0x01
+#define DTD_MODE_SHFT 26
+#define DTD_DIR_MASK 0x01
+#define DTD_DIR_SHFT 25
+#define DTD_CHAN_MASK 0x01ff
+#define DTD_CHAN_SHFT 16
+#define DTD_PRI_MASK 0x0f
+#define DTD_PRI_SHFT 9
+#define DTD_NEXT_CHAN_MASK 0x01ff
+#define DTD_NEXT_CHAN_SHFT 0
+
+/* frame_width_height */
+#define DTD_FRAME_WIDTH_MASK 0xffff
+#define DTD_FRAME_WIDTH_SHFT 16
+#define DTD_FRAME_HEIGHT_MASK 0xffff
+#define DTD_FRAME_HEIGHT_SHFT 0
+
+/* start_h_v */
+#define DTD_H_START_MASK 0xffff
+#define DTD_H_START_SHFT 16
+#define DTD_V_START_MASK 0xffff
+#define DTD_V_START_SHFT 0
+
+#define DTD_DESC_START_SHIFT 5
+#define DTD_WRITE_DESC_MASK 0x01
+#define DTD_WRITE_DESC_SHIFT 2
+#define DTD_DROP_DATA_MASK 0x01
+#define DTD_DROP_DATA_SHIFT 1
+#define DTD_USE_DESC_MASK 0x01
+#define DTD_USE_DESC_SHIFT 0
+
+/* max_width_height */
+#define DTD_MAX_WIDTH_MASK 0x07
+#define DTD_MAX_WIDTH_SHFT 4
+#define DTD_MAX_HEIGHT_MASK 0x07
+#define DTD_MAX_HEIGHT_SHFT 0
+
+/* max width configurations */
+ /* unlimited width */
+#define MAX_OUT_WIDTH_UNLIMITED 0
+/* as specified in max_size1 reg */
+#define MAX_OUT_WIDTH_REG1 1
+/* as specified in max_size2 reg */
+#define MAX_OUT_WIDTH_REG2 2
+/* as specified in max_size3 reg */
+#define MAX_OUT_WIDTH_REG3 3
+/* maximum of 352 pixels as width */
+#define MAX_OUT_WIDTH_352 4
+/* maximum of 768 pixels as width */
+#define MAX_OUT_WIDTH_768 5
+/* maximum of 1280 pixels width */
+#define MAX_OUT_WIDTH_1280 6
+/* maximum of 1920 pixels as width */
+#define MAX_OUT_WIDTH_1920 7
+
+/* max height configurations */
+ /* unlimited height */
+#define MAX_OUT_HEIGHT_UNLIMITED 0
+/* as specified in max_size1 reg */
+#define MAX_OUT_HEIGHT_REG1 1
+/* as specified in max_size2 reg */
+#define MAX_OUT_HEIGHT_REG2 2
+/* as specified in max_size3 reg */
+#define MAX_OUT_HEIGHT_REG3 3
+/* maximum of 288 lines as height */
+#define MAX_OUT_HEIGHT_288 4
+/* maximum of 576 lines as height */
+#define MAX_OUT_HEIGHT_576 5
+/* maximum of 720 lines as height */
+#define MAX_OUT_HEIGHT_720 6
+/* maximum of 1080 lines as height */
+#define MAX_OUT_HEIGHT_1080 7
+
+static inline u32 dtd_type_ctl_stride(int type, bool notify, int field,
+ bool one_d, bool even_line_skip, bool odd_line_skip,
+ int line_stride)
+{
+ return (type << DTD_DATA_TYPE_SHFT) | (notify << DTD_NOTIFY_SHFT) |
+ (field << DTD_FIELD_SHFT) | (one_d << DTD_1D_SHFT) |
+ (even_line_skip << DTD_EVEN_LINE_SKIP_SHFT) |
+ (odd_line_skip << DTD_ODD_LINE_SKIP_SHFT) |
+ line_stride;
+}
+
+static inline u32 dtd_xfer_length_height(int line_length, int xfer_height)
+{
+ return (line_length << DTD_LINE_LENGTH_SHFT) | xfer_height;
+}
+
+static inline u32 dtd_pkt_ctl(bool mode, bool dir, int chan, int pri,
+ int next_chan)
+{
+ return (DTD_PKT_TYPE << DTD_PKT_TYPE_SHFT) | (mode << DTD_MODE_SHFT) |
+ (dir << DTD_DIR_SHFT) | (chan << DTD_CHAN_SHFT) |
+ (pri << DTD_PRI_SHFT) | next_chan;
+}
+
+static inline u32 dtd_frame_width_height(int width, int height)
+{
+ return (width << DTD_FRAME_WIDTH_SHFT) | height;
+}
+
+static inline u32 dtd_desc_write_addr(unsigned int addr, bool write_desc,
+ bool drop_data, bool use_desc)
+{
+ return (addr << DTD_DESC_START_SHIFT) |
+ (write_desc << DTD_WRITE_DESC_SHIFT) |
+ (drop_data << DTD_DROP_DATA_SHIFT) |
+ use_desc;
+}
+
+static inline u32 dtd_start_h_v(int h_start, int v_start)
+{
+ return (h_start << DTD_H_START_SHFT) | v_start;
+}
+
+static inline u32 dtd_max_width_height(int max_width, int max_height)
+{
+ return (max_width << DTD_MAX_WIDTH_SHFT) | max_height;
+}
+
+static inline int dtd_get_data_type(struct vpdma_dtd *dtd)
+{
+ return dtd->type_ctl_stride >> DTD_DATA_TYPE_SHFT;
+}
+
+static inline bool dtd_get_notify(struct vpdma_dtd *dtd)
+{
+ return (dtd->type_ctl_stride >> DTD_NOTIFY_SHFT) & DTD_NOTIFY_MASK;
+}
+
+static inline int dtd_get_field(struct vpdma_dtd *dtd)
+{
+ return (dtd->type_ctl_stride >> DTD_FIELD_SHFT) & DTD_FIELD_MASK;
+}
+
+static inline bool dtd_get_1d(struct vpdma_dtd *dtd)
+{
+ return (dtd->type_ctl_stride >> DTD_1D_SHFT) & DTD_1D_MASK;
+}
+
+static inline bool dtd_get_even_line_skip(struct vpdma_dtd *dtd)
+{
+ return (dtd->type_ctl_stride >> DTD_EVEN_LINE_SKIP_SHFT)
+ & DTD_EVEN_LINE_SKIP_MASK;
+}
+
+static inline bool dtd_get_odd_line_skip(struct vpdma_dtd *dtd)
+{
+ return (dtd->type_ctl_stride >> DTD_ODD_LINE_SKIP_SHFT)
+ & DTD_ODD_LINE_SKIP_MASK;
+}
+
+static inline int dtd_get_line_stride(struct vpdma_dtd *dtd)
+{
+ return dtd->type_ctl_stride & DTD_LINE_STRIDE_MASK;
+}
+
+static inline int dtd_get_line_length(struct vpdma_dtd *dtd)
+{
+ return dtd->xfer_length_height >> DTD_LINE_LENGTH_SHFT;
+}
+
+static inline int dtd_get_xfer_height(struct vpdma_dtd *dtd)
+{
+ return dtd->xfer_length_height & DTD_XFER_HEIGHT_MASK;
+}
+
+static inline int dtd_get_pkt_type(struct vpdma_dtd *dtd)
+{
+ return dtd->pkt_ctl >> DTD_PKT_TYPE_SHFT;
+}
+
+static inline bool dtd_get_mode(struct vpdma_dtd *dtd)
+{
+ return (dtd->pkt_ctl >> DTD_MODE_SHFT) & DTD_MODE_MASK;
+}
+
+static inline bool dtd_get_dir(struct vpdma_dtd *dtd)
+{
+ return (dtd->pkt_ctl >> DTD_DIR_SHFT) & DTD_DIR_MASK;
+}
+
+static inline int dtd_get_chan(struct vpdma_dtd *dtd)
+{
+ return (dtd->pkt_ctl >> DTD_CHAN_SHFT) & DTD_CHAN_MASK;
+}
+
+static inline int dtd_get_priority(struct vpdma_dtd *dtd)
+{
+ return (dtd->pkt_ctl >> DTD_PRI_SHFT) & DTD_PRI_MASK;
+}
+
+static inline int dtd_get_next_chan(struct vpdma_dtd *dtd)
+{
+ return (dtd->pkt_ctl >> DTD_NEXT_CHAN_SHFT) & DTD_NEXT_CHAN_MASK;
+}
+
+static inline int dtd_get_frame_width(struct vpdma_dtd *dtd)
+{
+ return dtd->frame_width_height >> DTD_FRAME_WIDTH_SHFT;
+}
+
+static inline int dtd_get_frame_height(struct vpdma_dtd *dtd)
+{
+ return dtd->frame_width_height & DTD_FRAME_HEIGHT_MASK;
+}
+
+static inline int dtd_get_desc_write_addr(struct vpdma_dtd *dtd)
+{
+ return dtd->desc_write_addr >> DTD_DESC_START_SHIFT;
+}
+
+static inline bool dtd_get_write_desc(struct vpdma_dtd *dtd)
+{
+ return (dtd->desc_write_addr >> DTD_WRITE_DESC_SHIFT) &
+ DTD_WRITE_DESC_MASK;
+}
+
+static inline bool dtd_get_drop_data(struct vpdma_dtd *dtd)
+{
+ return (dtd->desc_write_addr >> DTD_DROP_DATA_SHIFT) &
+ DTD_DROP_DATA_MASK;
+}
+
+static inline bool dtd_get_use_desc(struct vpdma_dtd *dtd)
+{
+ return dtd->desc_write_addr & DTD_USE_DESC_MASK;
+}
+
+static inline int dtd_get_h_start(struct vpdma_dtd *dtd)
+{
+ return dtd->start_h_v >> DTD_H_START_SHFT;
+}
+
+static inline int dtd_get_v_start(struct vpdma_dtd *dtd)
+{
+ return dtd->start_h_v & DTD_V_START_MASK;
+}
+
+static inline int dtd_get_max_width(struct vpdma_dtd *dtd)
+{
+ return (dtd->max_width_height >> DTD_MAX_WIDTH_SHFT) &
+ DTD_MAX_WIDTH_MASK;
+}
+
+static inline int dtd_get_max_height(struct vpdma_dtd *dtd)
+{
+ return (dtd->max_width_height >> DTD_MAX_HEIGHT_SHFT) &
+ DTD_MAX_HEIGHT_MASK;
+}
+
+/*
+ * configuration descriptor
+ */
+struct vpdma_cfd {
+ union {
+ u32 dest_addr_offset;
+ u32 w0;
+ };
+ union {
+ u32 block_len; /* in words */
+ u32 w1;
+ };
+ u32 payload_addr;
+ u32 ctl_payload_len; /* in words */
+};
+
+/* Configuration descriptor specifics */
+
+#define CFD_PKT_TYPE 0xb
+
+#define CFD_DIRECT 1
+#define CFD_INDIRECT 0
+#define CFD_CLS_ADB 0
+#define CFD_CLS_BLOCK 1
+
+/* block_len */
+#define CFD__BLOCK_LEN_MASK 0xffff
+#define CFD__BLOCK_LEN_SHFT 0
+
+/* ctl_payload_len */
+#define CFD_PKT_TYPE_MASK 0x1f
+#define CFD_PKT_TYPE_SHFT 27
+#define CFD_DIRECT_MASK 0x01
+#define CFD_DIRECT_SHFT 26
+#define CFD_CLASS_MASK 0x03
+#define CFD_CLASS_SHFT 24
+#define CFD_DEST_MASK 0xff
+#define CFD_DEST_SHFT 16
+#define CFD_PAYLOAD_LEN_MASK 0xffff
+#define CFD_PAYLOAD_LEN_SHFT 0
+
+static inline u32 cfd_pkt_payload_len(bool direct, int cls, int dest,
+ int payload_len)
+{
+ return (CFD_PKT_TYPE << CFD_PKT_TYPE_SHFT) |
+ (direct << CFD_DIRECT_SHFT) |
+ (cls << CFD_CLASS_SHFT) |
+ (dest << CFD_DEST_SHFT) |
+ payload_len;
+}
+
+static inline int cfd_get_pkt_type(struct vpdma_cfd *cfd)
+{
+ return cfd->ctl_payload_len >> CFD_PKT_TYPE_SHFT;
+}
+
+static inline bool cfd_get_direct(struct vpdma_cfd *cfd)
+{
+ return (cfd->ctl_payload_len >> CFD_DIRECT_SHFT) & CFD_DIRECT_MASK;
+}
+
+static inline bool cfd_get_class(struct vpdma_cfd *cfd)
+{
+ return (cfd->ctl_payload_len >> CFD_CLASS_SHFT) & CFD_CLASS_MASK;
+}
+
+static inline int cfd_get_dest(struct vpdma_cfd *cfd)
+{
+ return (cfd->ctl_payload_len >> CFD_DEST_SHFT) & CFD_DEST_MASK;
+}
+
+static inline int cfd_get_payload_len(struct vpdma_cfd *cfd)
+{
+ return cfd->ctl_payload_len & CFD_PAYLOAD_LEN_MASK;
+}
+
+/*
+ * control descriptor
+ */
+struct vpdma_ctd {
+ union {
+ u32 timer_value;
+ u32 list_addr;
+ u32 w0;
+ };
+ union {
+ u32 pixel_line_count;
+ u32 list_size;
+ u32 w1;
+ };
+ union {
+ u32 event;
+ u32 fid_ctl;
+ u32 w2;
+ };
+ u32 type_source_ctl;
+};
+
+/* control descriptor types */
+#define CTD_TYPE_SYNC_ON_CLIENT 0
+#define CTD_TYPE_SYNC_ON_LIST 1
+#define CTD_TYPE_SYNC_ON_EXT 2
+#define CTD_TYPE_SYNC_ON_LM_TIMER 3
+#define CTD_TYPE_SYNC_ON_CHANNEL 4
+#define CTD_TYPE_CHNG_CLIENT_IRQ 5
+#define CTD_TYPE_SEND_IRQ 6
+#define CTD_TYPE_RELOAD_LIST 7
+#define CTD_TYPE_ABORT_CHANNEL 8
+
+#define CTD_PKT_TYPE 0xc
+
+/* timer_value */
+#define CTD_TIMER_VALUE_MASK 0xffff
+#define CTD_TIMER_VALUE_SHFT 0
+
+/* pixel_line_count */
+#define CTD_PIXEL_COUNT_MASK 0xffff
+#define CTD_PIXEL_COUNT_SHFT 16
+#define CTD_LINE_COUNT_MASK 0xffff
+#define CTD_LINE_COUNT_SHFT 0
+
+/* list_size */
+#define CTD_LIST_SIZE_MASK 0xffff
+#define CTD_LIST_SIZE_SHFT 0
+
+/* event */
+#define CTD_EVENT_MASK 0x0f
+#define CTD_EVENT_SHFT 0
+
+/* fid_ctl */
+#define CTD_FID2_MASK 0x03
+#define CTD_FID2_SHFT 4
+#define CTD_FID1_MASK 0x03
+#define CTD_FID1_SHFT 2
+#define CTD_FID0_MASK 0x03
+#define CTD_FID0_SHFT 0
+
+/* type_source_ctl */
+#define CTD_PKT_TYPE_MASK 0x1f
+#define CTD_PKT_TYPE_SHFT 27
+#define CTD_SOURCE_MASK 0xff
+#define CTD_SOURCE_SHFT 16
+#define CTD_CONTROL_MASK 0x0f
+#define CTD_CONTROL_SHFT 0
+
+static inline u32 ctd_pixel_line_count(int pixel_count, int line_count)
+{
+ return (pixel_count << CTD_PIXEL_COUNT_SHFT) | line_count;
+}
+
+static inline u32 ctd_set_fid_ctl(int fid0, int fid1, int fid2)
+{
+ return (fid2 << CTD_FID2_SHFT) | (fid1 << CTD_FID1_SHFT) | fid0;
+}
+
+static inline u32 ctd_type_source_ctl(int source, int control)
+{
+ return (CTD_PKT_TYPE << CTD_PKT_TYPE_SHFT) |
+ (source << CTD_SOURCE_SHFT) | control;
+}
+
+static inline u32 ctd_get_pixel_count(struct vpdma_ctd *ctd)
+{
+ return ctd->pixel_line_count >> CTD_PIXEL_COUNT_SHFT;
+}
+
+static inline int ctd_get_line_count(struct vpdma_ctd *ctd)
+{
+ return ctd->pixel_line_count & CTD_LINE_COUNT_MASK;
+}
+
+static inline int ctd_get_event(struct vpdma_ctd *ctd)
+{
+ return ctd->event & CTD_EVENT_MASK;
+}
+
+static inline int ctd_get_fid2_ctl(struct vpdma_ctd *ctd)
+{
+ return (ctd->fid_ctl >> CTD_FID2_SHFT) & CTD_FID2_MASK;
+}
+
+static inline int ctd_get_fid1_ctl(struct vpdma_ctd *ctd)
+{
+ return (ctd->fid_ctl >> CTD_FID1_SHFT) & CTD_FID1_MASK;
+}
+
+static inline int ctd_get_fid0_ctl(struct vpdma_ctd *ctd)
+{
+ return ctd->fid_ctl & CTD_FID2_MASK;
+}
+
+static inline int ctd_get_pkt_type(struct vpdma_ctd *ctd)
+{
+ return ctd->type_source_ctl >> CTD_PKT_TYPE_SHFT;
+}
+
+static inline int ctd_get_source(struct vpdma_ctd *ctd)
+{
+ return (ctd->type_source_ctl >> CTD_SOURCE_SHFT) & CTD_SOURCE_MASK;
+}
+
+static inline int ctd_get_ctl(struct vpdma_ctd *ctd)
+{
+ return ctd->type_source_ctl & CTD_CONTROL_MASK;
+}
+
+#endif
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
new file mode 100644
index 000000000000..4e58069e24ff
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -0,0 +1,2099 @@
+/*
+ * TI VPE mem2mem driver, based on the virtual v4l2-mem2mem example driver
+ *
+ * Copyright (c) 2013 Texas Instruments Inc.
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ *
+ * Based on the virtual v4l2-mem2mem example device
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "vpdma.h"
+#include "vpe_regs.h"
+
+#define VPE_MODULE_NAME "vpe"
+
+/* minimum and maximum frame sizes */
+#define MIN_W 128
+#define MIN_H 128
+#define MAX_W 1920
+#define MAX_H 1080
+
+/* required alignments */
+#define S_ALIGN 0 /* multiple of 1 */
+#define H_ALIGN 1 /* multiple of 2 */
+#define W_ALIGN 1 /* multiple of 2 */
+
+/* multiple of 128 bits, line stride, 16 bytes */
+#define L_ALIGN 4
+
+/* flags that indicate a format can be used for capture/output */
+#define VPE_FMT_TYPE_CAPTURE (1 << 0)
+#define VPE_FMT_TYPE_OUTPUT (1 << 1)
+
+/* used as plane indices */
+#define VPE_MAX_PLANES 2
+#define VPE_LUMA 0
+#define VPE_CHROMA 1
+
+/* per m2m context info */
+#define VPE_MAX_SRC_BUFS 3 /* need 3 src fields to de-interlace */
+
+#define VPE_DEF_BUFS_PER_JOB 1 /* default one buffer per batch job */
+
+/*
+ * each VPE context can need up to 3 config desciptors, 7 input descriptors,
+ * 3 output descriptors, and 10 control descriptors
+ */
+#define VPE_DESC_LIST_SIZE (10 * VPDMA_DTD_DESC_SIZE + \
+ 13 * VPDMA_CFD_CTD_DESC_SIZE)
+
+#define vpe_dbg(vpedev, fmt, arg...) \
+ dev_dbg((vpedev)->v4l2_dev.dev, fmt, ##arg)
+#define vpe_err(vpedev, fmt, arg...) \
+ dev_err((vpedev)->v4l2_dev.dev, fmt, ##arg)
+
+struct vpe_us_coeffs {
+ unsigned short anchor_fid0_c0;
+ unsigned short anchor_fid0_c1;
+ unsigned short anchor_fid0_c2;
+ unsigned short anchor_fid0_c3;
+ unsigned short interp_fid0_c0;
+ unsigned short interp_fid0_c1;
+ unsigned short interp_fid0_c2;
+ unsigned short interp_fid0_c3;
+ unsigned short anchor_fid1_c0;
+ unsigned short anchor_fid1_c1;
+ unsigned short anchor_fid1_c2;
+ unsigned short anchor_fid1_c3;
+ unsigned short interp_fid1_c0;
+ unsigned short interp_fid1_c1;
+ unsigned short interp_fid1_c2;
+ unsigned short interp_fid1_c3;
+};
+
+/*
+ * Default upsampler coefficients
+ */
+static const struct vpe_us_coeffs us_coeffs[] = {
+ {
+ /* Coefficients for progressive input */
+ 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
+ 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
+ },
+ {
+ /* Coefficients for Top Field Interlaced input */
+ 0x0051, 0x03D5, 0x3FE3, 0x3FF7, 0x3FB5, 0x02E9, 0x018F, 0x3FD3,
+ /* Coefficients for Bottom Field Interlaced input */
+ 0x016B, 0x0247, 0x00B1, 0x3F9D, 0x3FCF, 0x03DB, 0x005D, 0x3FF9,
+ },
+};
+
+/*
+ * the following registers are for configuring some of the parameters of the
+ * motion and edge detection blocks inside DEI, these generally remain the same,
+ * these could be passed later via userspace if some one needs to tweak these.
+ */
+struct vpe_dei_regs {
+ unsigned long mdt_spacial_freq_thr_reg; /* VPE_DEI_REG2 */
+ unsigned long edi_config_reg; /* VPE_DEI_REG3 */
+ unsigned long edi_lut_reg0; /* VPE_DEI_REG4 */
+ unsigned long edi_lut_reg1; /* VPE_DEI_REG5 */
+ unsigned long edi_lut_reg2; /* VPE_DEI_REG6 */
+ unsigned long edi_lut_reg3; /* VPE_DEI_REG7 */
+};
+
+/*
+ * default expert DEI register values, unlikely to be modified.
+ */
+static const struct vpe_dei_regs dei_regs = {
+ 0x020C0804u,
+ 0x0118100Fu,
+ 0x08040200u,
+ 0x1010100Cu,
+ 0x10101010u,
+ 0x10101010u,
+};
+
+/*
+ * The port_data structure contains per-port data.
+ */
+struct vpe_port_data {
+ enum vpdma_channel channel; /* VPDMA channel */
+ u8 vb_index; /* input frame f, f-1, f-2 index */
+ u8 vb_part; /* plane index for co-panar formats */
+};
+
+/*
+ * Define indices into the port_data tables
+ */
+#define VPE_PORT_LUMA1_IN 0
+#define VPE_PORT_CHROMA1_IN 1
+#define VPE_PORT_LUMA2_IN 2
+#define VPE_PORT_CHROMA2_IN 3
+#define VPE_PORT_LUMA3_IN 4
+#define VPE_PORT_CHROMA3_IN 5
+#define VPE_PORT_MV_IN 6
+#define VPE_PORT_MV_OUT 7
+#define VPE_PORT_LUMA_OUT 8
+#define VPE_PORT_CHROMA_OUT 9
+#define VPE_PORT_RGB_OUT 10
+
+static const struct vpe_port_data port_data[11] = {
+ [VPE_PORT_LUMA1_IN] = {
+ .channel = VPE_CHAN_LUMA1_IN,
+ .vb_index = 0,
+ .vb_part = VPE_LUMA,
+ },
+ [VPE_PORT_CHROMA1_IN] = {
+ .channel = VPE_CHAN_CHROMA1_IN,
+ .vb_index = 0,
+ .vb_part = VPE_CHROMA,
+ },
+ [VPE_PORT_LUMA2_IN] = {
+ .channel = VPE_CHAN_LUMA2_IN,
+ .vb_index = 1,
+ .vb_part = VPE_LUMA,
+ },
+ [VPE_PORT_CHROMA2_IN] = {
+ .channel = VPE_CHAN_CHROMA2_IN,
+ .vb_index = 1,
+ .vb_part = VPE_CHROMA,
+ },
+ [VPE_PORT_LUMA3_IN] = {
+ .channel = VPE_CHAN_LUMA3_IN,
+ .vb_index = 2,
+ .vb_part = VPE_LUMA,
+ },
+ [VPE_PORT_CHROMA3_IN] = {
+ .channel = VPE_CHAN_CHROMA3_IN,
+ .vb_index = 2,
+ .vb_part = VPE_CHROMA,
+ },
+ [VPE_PORT_MV_IN] = {
+ .channel = VPE_CHAN_MV_IN,
+ },
+ [VPE_PORT_MV_OUT] = {
+ .channel = VPE_CHAN_MV_OUT,
+ },
+ [VPE_PORT_LUMA_OUT] = {
+ .channel = VPE_CHAN_LUMA_OUT,
+ .vb_part = VPE_LUMA,
+ },
+ [VPE_PORT_CHROMA_OUT] = {
+ .channel = VPE_CHAN_CHROMA_OUT,
+ .vb_part = VPE_CHROMA,
+ },
+ [VPE_PORT_RGB_OUT] = {
+ .channel = VPE_CHAN_RGB_OUT,
+ .vb_part = VPE_LUMA,
+ },
+};
+
+
+/* driver info for each of the supported video formats */
+struct vpe_fmt {
+ char *name; /* human-readable name */
+ u32 fourcc; /* standard format identifier */
+ u8 types; /* CAPTURE and/or OUTPUT */
+ u8 coplanar; /* set for unpacked Luma and Chroma */
+ /* vpdma format info for each plane */
+ struct vpdma_data_format const *vpdma_fmt[VPE_MAX_PLANES];
+};
+
+static struct vpe_fmt vpe_formats[] = {
+ {
+ .name = "YUV 422 co-planar",
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
+ .coplanar = 1,
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y444],
+ &vpdma_yuv_fmts[VPDMA_DATA_FMT_C444],
+ },
+ },
+ {
+ .name = "YUV 420 co-planar",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
+ .coplanar = 1,
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420],
+ &vpdma_yuv_fmts[VPDMA_DATA_FMT_C420],
+ },
+ },
+ {
+ .name = "YUYV 422 packed",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YC422],
+ },
+ },
+ {
+ .name = "UYVY 422 packed",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CY422],
+ },
+ },
+};
+
+/*
+ * per-queue, driver-specific private data.
+ * there is one source queue and one destination queue for each m2m context.
+ */
+struct vpe_q_data {
+ unsigned int width; /* frame width */
+ unsigned int height; /* frame height */
+ unsigned int bytesperline[VPE_MAX_PLANES]; /* bytes per line in memory */
+ enum v4l2_colorspace colorspace;
+ enum v4l2_field field; /* supported field value */
+ unsigned int flags;
+ unsigned int sizeimage[VPE_MAX_PLANES]; /* image size in memory */
+ struct v4l2_rect c_rect; /* crop/compose rectangle */
+ struct vpe_fmt *fmt; /* format info */
+};
+
+/* vpe_q_data flag bits */
+#define Q_DATA_FRAME_1D (1 << 0)
+#define Q_DATA_MODE_TILED (1 << 1)
+#define Q_DATA_INTERLACED (1 << 2)
+
+enum {
+ Q_DATA_SRC = 0,
+ Q_DATA_DST = 1,
+};
+
+/* find our format description corresponding to the passed v4l2_format */
+static struct vpe_fmt *find_format(struct v4l2_format *f)
+{
+ struct vpe_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) {
+ fmt = &vpe_formats[k];
+ if (fmt->fourcc == f->fmt.pix.pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+/*
+ * there is one vpe_dev structure in the driver, it is shared by
+ * all instances.
+ */
+struct vpe_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd;
+ struct v4l2_m2m_dev *m2m_dev;
+
+ atomic_t num_instances; /* count of driver instances */
+ dma_addr_t loaded_mmrs; /* shadow mmrs in device */
+ struct mutex dev_mutex;
+ spinlock_t lock;
+
+ int irq;
+ void __iomem *base;
+
+ struct vb2_alloc_ctx *alloc_ctx;
+ struct vpdma_data *vpdma; /* vpdma data handle */
+};
+
+/*
+ * There is one vpe_ctx structure for each m2m context.
+ */
+struct vpe_ctx {
+ struct v4l2_fh fh;
+ struct vpe_dev *dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_ctrl_handler hdl;
+
+ unsigned int field; /* current field */
+ unsigned int sequence; /* current frame/field seq */
+ unsigned int aborting; /* abort after next irq */
+
+ unsigned int bufs_per_job; /* input buffers per batch */
+ unsigned int bufs_completed; /* bufs done in this batch */
+
+ struct vpe_q_data q_data[2]; /* src & dst queue data */
+ struct vb2_buffer *src_vbs[VPE_MAX_SRC_BUFS];
+ struct vb2_buffer *dst_vb;
+
+ dma_addr_t mv_buf_dma[2]; /* dma addrs of motion vector in/out bufs */
+ void *mv_buf[2]; /* virtual addrs of motion vector bufs */
+ size_t mv_buf_size; /* current motion vector buffer size */
+ struct vpdma_buf mmr_adb; /* shadow reg addr/data block */
+ struct vpdma_desc_list desc_list; /* DMA descriptor list */
+
+ bool deinterlacing; /* using de-interlacer */
+ bool load_mmrs; /* have new shadow reg values */
+
+ unsigned int src_mv_buf_selector;
+};
+
+
+/*
+ * M2M devices get 2 queues.
+ * Return the queue given the type.
+ */
+static struct vpe_q_data *get_q_data(struct vpe_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return &ctx->q_data[Q_DATA_SRC];
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ return &ctx->q_data[Q_DATA_DST];
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+static u32 read_reg(struct vpe_dev *dev, int offset)
+{
+ return ioread32(dev->base + offset);
+}
+
+static void write_reg(struct vpe_dev *dev, int offset, u32 value)
+{
+ iowrite32(value, dev->base + offset);
+}
+
+/* register field read/write helpers */
+static int get_field(u32 value, u32 mask, int shift)
+{
+ return (value & (mask << shift)) >> shift;
+}
+
+static int read_field_reg(struct vpe_dev *dev, int offset, u32 mask, int shift)
+{
+ return get_field(read_reg(dev, offset), mask, shift);
+}
+
+static void write_field(u32 *valp, u32 field, u32 mask, int shift)
+{
+ u32 val = *valp;
+
+ val &= ~(mask << shift);
+ val |= (field & mask) << shift;
+ *valp = val;
+}
+
+static void write_field_reg(struct vpe_dev *dev, int offset, u32 field,
+ u32 mask, int shift)
+{
+ u32 val = read_reg(dev, offset);
+
+ write_field(&val, field, mask, shift);
+
+ write_reg(dev, offset, val);
+}
+
+/*
+ * DMA address/data block for the shadow registers
+ */
+struct vpe_mmr_adb {
+ struct vpdma_adb_hdr out_fmt_hdr;
+ u32 out_fmt_reg[1];
+ u32 out_fmt_pad[3];
+ struct vpdma_adb_hdr us1_hdr;
+ u32 us1_regs[8];
+ struct vpdma_adb_hdr us2_hdr;
+ u32 us2_regs[8];
+ struct vpdma_adb_hdr us3_hdr;
+ u32 us3_regs[8];
+ struct vpdma_adb_hdr dei_hdr;
+ u32 dei_regs[8];
+ struct vpdma_adb_hdr sc_hdr;
+ u32 sc_regs[1];
+ u32 sc_pad[3];
+ struct vpdma_adb_hdr csc_hdr;
+ u32 csc_regs[6];
+ u32 csc_pad[2];
+};
+
+#define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a) \
+ VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a)
+/*
+ * Set the headers for all of the address/data block structures.
+ */
+static void init_adb_hdrs(struct vpe_ctx *ctx)
+{
+ VPE_SET_MMR_ADB_HDR(ctx, out_fmt_hdr, out_fmt_reg, VPE_CLK_FORMAT_SELECT);
+ VPE_SET_MMR_ADB_HDR(ctx, us1_hdr, us1_regs, VPE_US1_R0);
+ VPE_SET_MMR_ADB_HDR(ctx, us2_hdr, us2_regs, VPE_US2_R0);
+ VPE_SET_MMR_ADB_HDR(ctx, us3_hdr, us3_regs, VPE_US3_R0);
+ VPE_SET_MMR_ADB_HDR(ctx, dei_hdr, dei_regs, VPE_DEI_FRAME_SIZE);
+ VPE_SET_MMR_ADB_HDR(ctx, sc_hdr, sc_regs, VPE_SC_MP_SC0);
+ VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs, VPE_CSC_CSC00);
+};
+
+/*
+ * Allocate or re-allocate the motion vector DMA buffers
+ * There are two buffers, one for input and one for output.
+ * However, the roles are reversed after each field is processed.
+ * In other words, after each field is processed, the previous
+ * output (dst) MV buffer becomes the new input (src) MV buffer.
+ */
+static int realloc_mv_buffers(struct vpe_ctx *ctx, size_t size)
+{
+ struct device *dev = ctx->dev->v4l2_dev.dev;
+
+ if (ctx->mv_buf_size == size)
+ return 0;
+
+ if (ctx->mv_buf[0])
+ dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[0],
+ ctx->mv_buf_dma[0]);
+
+ if (ctx->mv_buf[1])
+ dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[1],
+ ctx->mv_buf_dma[1]);
+
+ if (size == 0)
+ return 0;
+
+ ctx->mv_buf[0] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[0],
+ GFP_KERNEL);
+ if (!ctx->mv_buf[0]) {
+ vpe_err(ctx->dev, "failed to allocate motion vector buffer\n");
+ return -ENOMEM;
+ }
+
+ ctx->mv_buf[1] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[1],
+ GFP_KERNEL);
+ if (!ctx->mv_buf[1]) {
+ vpe_err(ctx->dev, "failed to allocate motion vector buffer\n");
+ dma_free_coherent(dev, size, ctx->mv_buf[0],
+ ctx->mv_buf_dma[0]);
+
+ return -ENOMEM;
+ }
+
+ ctx->mv_buf_size = size;
+ ctx->src_mv_buf_selector = 0;
+
+ return 0;
+}
+
+static void free_mv_buffers(struct vpe_ctx *ctx)
+{
+ realloc_mv_buffers(ctx, 0);
+}
+
+/*
+ * While de-interlacing, we keep the two most recent input buffers
+ * around. This function frees those two buffers when we have
+ * finished processing the current stream.
+ */
+static void free_vbs(struct vpe_ctx *ctx)
+{
+ struct vpe_dev *dev = ctx->dev;
+ unsigned long flags;
+
+ if (ctx->src_vbs[2] == NULL)
+ return;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (ctx->src_vbs[2]) {
+ v4l2_m2m_buf_done(ctx->src_vbs[2], VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/*
+ * Enable or disable the VPE clocks
+ */
+static void vpe_set_clock_enable(struct vpe_dev *dev, bool on)
+{
+ u32 val = 0;
+
+ if (on)
+ val = VPE_DATA_PATH_CLK_ENABLE | VPE_VPEDMA_CLK_ENABLE;
+ write_reg(dev, VPE_CLK_ENABLE, val);
+}
+
+static void vpe_top_reset(struct vpe_dev *dev)
+{
+
+ write_field_reg(dev, VPE_CLK_RESET, 1, VPE_DATA_PATH_CLK_RESET_MASK,
+ VPE_DATA_PATH_CLK_RESET_SHIFT);
+
+ usleep_range(100, 150);
+
+ write_field_reg(dev, VPE_CLK_RESET, 0, VPE_DATA_PATH_CLK_RESET_MASK,
+ VPE_DATA_PATH_CLK_RESET_SHIFT);
+}
+
+static void vpe_top_vpdma_reset(struct vpe_dev *dev)
+{
+ write_field_reg(dev, VPE_CLK_RESET, 1, VPE_VPDMA_CLK_RESET_MASK,
+ VPE_VPDMA_CLK_RESET_SHIFT);
+
+ usleep_range(100, 150);
+
+ write_field_reg(dev, VPE_CLK_RESET, 0, VPE_VPDMA_CLK_RESET_MASK,
+ VPE_VPDMA_CLK_RESET_SHIFT);
+}
+
+/*
+ * Load the correct of upsampler coefficients into the shadow MMRs
+ */
+static void set_us_coefficients(struct vpe_ctx *ctx)
+{
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
+ u32 *us1_reg = &mmr_adb->us1_regs[0];
+ u32 *us2_reg = &mmr_adb->us2_regs[0];
+ u32 *us3_reg = &mmr_adb->us3_regs[0];
+ const unsigned short *cp, *end_cp;
+
+ cp = &us_coeffs[0].anchor_fid0_c0;
+
+ if (s_q_data->flags & Q_DATA_INTERLACED) /* interlaced */
+ cp += sizeof(us_coeffs[0]) / sizeof(*cp);
+
+ end_cp = cp + sizeof(us_coeffs[0]) / sizeof(*cp);
+
+ while (cp < end_cp) {
+ write_field(us1_reg, *cp++, VPE_US_C0_MASK, VPE_US_C0_SHIFT);
+ write_field(us1_reg, *cp++, VPE_US_C1_MASK, VPE_US_C1_SHIFT);
+ *us2_reg++ = *us1_reg;
+ *us3_reg++ = *us1_reg++;
+ }
+ ctx->load_mmrs = true;
+}
+
+/*
+ * Set the upsampler config mode and the VPDMA line mode in the shadow MMRs.
+ */
+static void set_cfg_and_line_modes(struct vpe_ctx *ctx)
+{
+ struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ u32 *us1_reg0 = &mmr_adb->us1_regs[0];
+ u32 *us2_reg0 = &mmr_adb->us2_regs[0];
+ u32 *us3_reg0 = &mmr_adb->us3_regs[0];
+ int line_mode = 1;
+ int cfg_mode = 1;
+
+ /*
+ * Cfg Mode 0: YUV420 source, enable upsampler, DEI is de-interlacing.
+ * Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing.
+ */
+
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12) {
+ cfg_mode = 0;
+ line_mode = 0; /* double lines to line buffer */
+ }
+
+ write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
+ write_field(us2_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
+ write_field(us3_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
+
+ /* regs for now */
+ vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA1_IN);
+ vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA2_IN);
+ vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA3_IN);
+
+ /* frame start for input luma */
+ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
+ VPE_CHAN_LUMA1_IN);
+ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
+ VPE_CHAN_LUMA2_IN);
+ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
+ VPE_CHAN_LUMA3_IN);
+
+ /* frame start for input chroma */
+ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
+ VPE_CHAN_CHROMA1_IN);
+ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
+ VPE_CHAN_CHROMA2_IN);
+ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
+ VPE_CHAN_CHROMA3_IN);
+
+ /* frame start for MV in client */
+ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
+ VPE_CHAN_MV_IN);
+
+ ctx->load_mmrs = true;
+}
+
+/*
+ * Set the shadow registers that are modified when the source
+ * format changes.
+ */
+static void set_src_registers(struct vpe_ctx *ctx)
+{
+ set_us_coefficients(ctx);
+}
+
+/*
+ * Set the shadow registers that are modified when the destination
+ * format changes.
+ */
+static void set_dst_registers(struct vpe_ctx *ctx)
+{
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt;
+ u32 val = 0;
+
+ /* select RGB path when color space conversion is supported in future */
+ if (fmt->fourcc == V4L2_PIX_FMT_RGB24)
+ val |= VPE_RGB_OUT_SELECT | VPE_CSC_SRC_DEI_SCALER;
+ else if (fmt->fourcc == V4L2_PIX_FMT_NV16)
+ val |= VPE_COLOR_SEPARATE_422;
+
+ /* The source of CHR_DS is always the scaler, whether it's used or not */
+ val |= VPE_DS_SRC_DEI_SCALER;
+
+ if (fmt->fourcc != V4L2_PIX_FMT_NV12)
+ val |= VPE_DS_BYPASS;
+
+ mmr_adb->out_fmt_reg[0] = val;
+
+ ctx->load_mmrs = true;
+}
+
+/*
+ * Set the de-interlacer shadow register values
+ */
+static void set_dei_regs(struct vpe_ctx *ctx)
+{
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
+ unsigned int src_h = s_q_data->c_rect.height;
+ unsigned int src_w = s_q_data->c_rect.width;
+ u32 *dei_mmr0 = &mmr_adb->dei_regs[0];
+ bool deinterlace = true;
+ u32 val = 0;
+
+ /*
+ * according to TRM, we should set DEI in progressive bypass mode when
+ * the input content is progressive, however, DEI is bypassed correctly
+ * for both progressive and interlace content in interlace bypass mode.
+ * It has been recommended not to use progressive bypass mode.
+ */
+ if ((!ctx->deinterlacing && (s_q_data->flags & Q_DATA_INTERLACED)) ||
+ !(s_q_data->flags & Q_DATA_INTERLACED)) {
+ deinterlace = false;
+ val = VPE_DEI_INTERLACE_BYPASS;
+ }
+
+ src_h = deinterlace ? src_h * 2 : src_h;
+
+ val |= (src_h << VPE_DEI_HEIGHT_SHIFT) |
+ (src_w << VPE_DEI_WIDTH_SHIFT) |
+ VPE_DEI_FIELD_FLUSH;
+
+ *dei_mmr0 = val;
+
+ ctx->load_mmrs = true;
+}
+
+static void set_dei_shadow_registers(struct vpe_ctx *ctx)
+{
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ u32 *dei_mmr = &mmr_adb->dei_regs[0];
+ const struct vpe_dei_regs *cur = &dei_regs;
+
+ dei_mmr[2] = cur->mdt_spacial_freq_thr_reg;
+ dei_mmr[3] = cur->edi_config_reg;
+ dei_mmr[4] = cur->edi_lut_reg0;
+ dei_mmr[5] = cur->edi_lut_reg1;
+ dei_mmr[6] = cur->edi_lut_reg2;
+ dei_mmr[7] = cur->edi_lut_reg3;
+
+ ctx->load_mmrs = true;
+}
+
+static void set_csc_coeff_bypass(struct vpe_ctx *ctx)
+{
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ u32 *shadow_csc_reg5 = &mmr_adb->csc_regs[5];
+
+ *shadow_csc_reg5 |= VPE_CSC_BYPASS;
+
+ ctx->load_mmrs = true;
+}
+
+static void set_sc_regs_bypass(struct vpe_ctx *ctx)
+{
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ u32 *sc_reg0 = &mmr_adb->sc_regs[0];
+ u32 val = 0;
+
+ val |= VPE_SC_BYPASS;
+ *sc_reg0 = val;
+
+ ctx->load_mmrs = true;
+}
+
+/*
+ * Set the shadow registers whose values are modified when either the
+ * source or destination format is changed.
+ */
+static int set_srcdst_params(struct vpe_ctx *ctx)
+{
+ struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
+ struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
+ size_t mv_buf_size;
+ int ret;
+
+ ctx->sequence = 0;
+ ctx->field = V4L2_FIELD_TOP;
+
+ if ((s_q_data->flags & Q_DATA_INTERLACED) &&
+ !(d_q_data->flags & Q_DATA_INTERLACED)) {
+ const struct vpdma_data_format *mv =
+ &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
+
+ ctx->deinterlacing = 1;
+ mv_buf_size =
+ (s_q_data->width * s_q_data->height * mv->depth) >> 3;
+ } else {
+ ctx->deinterlacing = 0;
+ mv_buf_size = 0;
+ }
+
+ free_vbs(ctx);
+
+ ret = realloc_mv_buffers(ctx, mv_buf_size);
+ if (ret)
+ return ret;
+
+ set_cfg_and_line_modes(ctx);
+ set_dei_regs(ctx);
+ set_csc_coeff_bypass(ctx);
+ set_sc_regs_bypass(ctx);
+
+ return 0;
+}
+
+/*
+ * Return the vpe_ctx structure for a given struct file
+ */
+static struct vpe_ctx *file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct vpe_ctx, fh);
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+/**
+ * job_ready() - check whether an instance is ready to be scheduled to run
+ */
+static int job_ready(void *priv)
+{
+ struct vpe_ctx *ctx = priv;
+ int needed = ctx->bufs_per_job;
+
+ if (ctx->deinterlacing && ctx->src_vbs[2] == NULL)
+ needed += 2; /* need additional two most recent fields */
+
+ if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < needed)
+ return 0;
+
+ return 1;
+}
+
+static void job_abort(void *priv)
+{
+ struct vpe_ctx *ctx = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ ctx->aborting = 1;
+}
+
+/*
+ * Lock access to the device
+ */
+static void vpe_lock(void *priv)
+{
+ struct vpe_ctx *ctx = priv;
+ struct vpe_dev *dev = ctx->dev;
+ mutex_lock(&dev->dev_mutex);
+}
+
+static void vpe_unlock(void *priv)
+{
+ struct vpe_ctx *ctx = priv;
+ struct vpe_dev *dev = ctx->dev;
+ mutex_unlock(&dev->dev_mutex);
+}
+
+static void vpe_dump_regs(struct vpe_dev *dev)
+{
+#define DUMPREG(r) vpe_dbg(dev, "%-35s %08x\n", #r, read_reg(dev, VPE_##r))
+
+ vpe_dbg(dev, "VPE Registers:\n");
+
+ DUMPREG(PID);
+ DUMPREG(SYSCONFIG);
+ DUMPREG(INT0_STATUS0_RAW);
+ DUMPREG(INT0_STATUS0);
+ DUMPREG(INT0_ENABLE0);
+ DUMPREG(INT0_STATUS1_RAW);
+ DUMPREG(INT0_STATUS1);
+ DUMPREG(INT0_ENABLE1);
+ DUMPREG(CLK_ENABLE);
+ DUMPREG(CLK_RESET);
+ DUMPREG(CLK_FORMAT_SELECT);
+ DUMPREG(CLK_RANGE_MAP);
+ DUMPREG(US1_R0);
+ DUMPREG(US1_R1);
+ DUMPREG(US1_R2);
+ DUMPREG(US1_R3);
+ DUMPREG(US1_R4);
+ DUMPREG(US1_R5);
+ DUMPREG(US1_R6);
+ DUMPREG(US1_R7);
+ DUMPREG(US2_R0);
+ DUMPREG(US2_R1);
+ DUMPREG(US2_R2);
+ DUMPREG(US2_R3);
+ DUMPREG(US2_R4);
+ DUMPREG(US2_R5);
+ DUMPREG(US2_R6);
+ DUMPREG(US2_R7);
+ DUMPREG(US3_R0);
+ DUMPREG(US3_R1);
+ DUMPREG(US3_R2);
+ DUMPREG(US3_R3);
+ DUMPREG(US3_R4);
+ DUMPREG(US3_R5);
+ DUMPREG(US3_R6);
+ DUMPREG(US3_R7);
+ DUMPREG(DEI_FRAME_SIZE);
+ DUMPREG(MDT_BYPASS);
+ DUMPREG(MDT_SF_THRESHOLD);
+ DUMPREG(EDI_CONFIG);
+ DUMPREG(DEI_EDI_LUT_R0);
+ DUMPREG(DEI_EDI_LUT_R1);
+ DUMPREG(DEI_EDI_LUT_R2);
+ DUMPREG(DEI_EDI_LUT_R3);
+ DUMPREG(DEI_FMD_WINDOW_R0);
+ DUMPREG(DEI_FMD_WINDOW_R1);
+ DUMPREG(DEI_FMD_CONTROL_R0);
+ DUMPREG(DEI_FMD_CONTROL_R1);
+ DUMPREG(DEI_FMD_STATUS_R0);
+ DUMPREG(DEI_FMD_STATUS_R1);
+ DUMPREG(DEI_FMD_STATUS_R2);
+ DUMPREG(SC_MP_SC0);
+ DUMPREG(SC_MP_SC1);
+ DUMPREG(SC_MP_SC2);
+ DUMPREG(SC_MP_SC3);
+ DUMPREG(SC_MP_SC4);
+ DUMPREG(SC_MP_SC5);
+ DUMPREG(SC_MP_SC6);
+ DUMPREG(SC_MP_SC8);
+ DUMPREG(SC_MP_SC9);
+ DUMPREG(SC_MP_SC10);
+ DUMPREG(SC_MP_SC11);
+ DUMPREG(SC_MP_SC12);
+ DUMPREG(SC_MP_SC13);
+ DUMPREG(SC_MP_SC17);
+ DUMPREG(SC_MP_SC18);
+ DUMPREG(SC_MP_SC19);
+ DUMPREG(SC_MP_SC20);
+ DUMPREG(SC_MP_SC21);
+ DUMPREG(SC_MP_SC22);
+ DUMPREG(SC_MP_SC23);
+ DUMPREG(SC_MP_SC24);
+ DUMPREG(SC_MP_SC25);
+ DUMPREG(CSC_CSC00);
+ DUMPREG(CSC_CSC01);
+ DUMPREG(CSC_CSC02);
+ DUMPREG(CSC_CSC03);
+ DUMPREG(CSC_CSC04);
+ DUMPREG(CSC_CSC05);
+#undef DUMPREG
+}
+
+static void add_out_dtd(struct vpe_ctx *ctx, int port)
+{
+ struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_DST];
+ const struct vpe_port_data *p_data = &port_data[port];
+ struct vb2_buffer *vb = ctx->dst_vb;
+ struct v4l2_rect *c_rect = &q_data->c_rect;
+ struct vpe_fmt *fmt = q_data->fmt;
+ const struct vpdma_data_format *vpdma_fmt;
+ int mv_buf_selector = !ctx->src_mv_buf_selector;
+ dma_addr_t dma_addr;
+ u32 flags = 0;
+
+ if (port == VPE_PORT_MV_OUT) {
+ vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
+ dma_addr = ctx->mv_buf_dma[mv_buf_selector];
+ } else {
+ /* to incorporate interleaved formats */
+ int plane = fmt->coplanar ? p_data->vb_part : 0;
+
+ vpdma_fmt = fmt->vpdma_fmt[plane];
+ dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
+ if (!dma_addr) {
+ vpe_err(ctx->dev,
+ "acquiring output buffer(%d) dma_addr failed\n",
+ port);
+ return;
+ }
+ }
+
+ if (q_data->flags & Q_DATA_FRAME_1D)
+ flags |= VPDMA_DATA_FRAME_1D;
+ if (q_data->flags & Q_DATA_MODE_TILED)
+ flags |= VPDMA_DATA_MODE_TILED;
+
+ vpdma_add_out_dtd(&ctx->desc_list, c_rect, vpdma_fmt, dma_addr,
+ p_data->channel, flags);
+}
+
+static void add_in_dtd(struct vpe_ctx *ctx, int port)
+{
+ struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_SRC];
+ const struct vpe_port_data *p_data = &port_data[port];
+ struct vb2_buffer *vb = ctx->src_vbs[p_data->vb_index];
+ struct v4l2_rect *c_rect = &q_data->c_rect;
+ struct vpe_fmt *fmt = q_data->fmt;
+ const struct vpdma_data_format *vpdma_fmt;
+ int mv_buf_selector = ctx->src_mv_buf_selector;
+ int field = vb->v4l2_buf.field == V4L2_FIELD_BOTTOM;
+ dma_addr_t dma_addr;
+ u32 flags = 0;
+
+ if (port == VPE_PORT_MV_IN) {
+ vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
+ dma_addr = ctx->mv_buf_dma[mv_buf_selector];
+ } else {
+ /* to incorporate interleaved formats */
+ int plane = fmt->coplanar ? p_data->vb_part : 0;
+
+ vpdma_fmt = fmt->vpdma_fmt[plane];
+
+ dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
+ if (!dma_addr) {
+ vpe_err(ctx->dev,
+ "acquiring input buffer(%d) dma_addr failed\n",
+ port);
+ return;
+ }
+ }
+
+ if (q_data->flags & Q_DATA_FRAME_1D)
+ flags |= VPDMA_DATA_FRAME_1D;
+ if (q_data->flags & Q_DATA_MODE_TILED)
+ flags |= VPDMA_DATA_MODE_TILED;
+
+ vpdma_add_in_dtd(&ctx->desc_list, q_data->width, q_data->height,
+ c_rect, vpdma_fmt, dma_addr, p_data->channel, field, flags);
+}
+
+/*
+ * Enable the expected IRQ sources
+ */
+static void enable_irqs(struct vpe_ctx *ctx)
+{
+ write_reg(ctx->dev, VPE_INT0_ENABLE0_SET, VPE_INT0_LIST0_COMPLETE);
+ write_reg(ctx->dev, VPE_INT0_ENABLE1_SET, VPE_DEI_ERROR_INT |
+ VPE_DS1_UV_ERROR_INT);
+
+ vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, true);
+}
+
+static void disable_irqs(struct vpe_ctx *ctx)
+{
+ write_reg(ctx->dev, VPE_INT0_ENABLE0_CLR, 0xffffffff);
+ write_reg(ctx->dev, VPE_INT0_ENABLE1_CLR, 0xffffffff);
+
+ vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, false);
+}
+
+/* device_run() - prepares and starts the device
+ *
+ * This function is only called when both the source and destination
+ * buffers are in place.
+ */
+static void device_run(void *priv)
+{
+ struct vpe_ctx *ctx = priv;
+ struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
+
+ if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) {
+ ctx->src_vbs[2] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ WARN_ON(ctx->src_vbs[2] == NULL);
+ ctx->src_vbs[1] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ WARN_ON(ctx->src_vbs[1] == NULL);
+ }
+
+ ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ WARN_ON(ctx->src_vbs[0] == NULL);
+ ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ WARN_ON(ctx->dst_vb == NULL);
+
+ /* config descriptors */
+ if (ctx->dev->loaded_mmrs != ctx->mmr_adb.dma_addr || ctx->load_mmrs) {
+ vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->mmr_adb);
+ vpdma_add_cfd_adb(&ctx->desc_list, CFD_MMR_CLIENT, &ctx->mmr_adb);
+ ctx->dev->loaded_mmrs = ctx->mmr_adb.dma_addr;
+ ctx->load_mmrs = false;
+ }
+
+ /* output data descriptors */
+ if (ctx->deinterlacing)
+ add_out_dtd(ctx, VPE_PORT_MV_OUT);
+
+ add_out_dtd(ctx, VPE_PORT_LUMA_OUT);
+ if (d_q_data->fmt->coplanar)
+ add_out_dtd(ctx, VPE_PORT_CHROMA_OUT);
+
+ /* input data descriptors */
+ if (ctx->deinterlacing) {
+ add_in_dtd(ctx, VPE_PORT_LUMA3_IN);
+ add_in_dtd(ctx, VPE_PORT_CHROMA3_IN);
+
+ add_in_dtd(ctx, VPE_PORT_LUMA2_IN);
+ add_in_dtd(ctx, VPE_PORT_CHROMA2_IN);
+ }
+
+ add_in_dtd(ctx, VPE_PORT_LUMA1_IN);
+ add_in_dtd(ctx, VPE_PORT_CHROMA1_IN);
+
+ if (ctx->deinterlacing)
+ add_in_dtd(ctx, VPE_PORT_MV_IN);
+
+ /* sync on channel control descriptors for input ports */
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA1_IN);
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA1_IN);
+
+ if (ctx->deinterlacing) {
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+ VPE_CHAN_LUMA2_IN);
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+ VPE_CHAN_CHROMA2_IN);
+
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+ VPE_CHAN_LUMA3_IN);
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+ VPE_CHAN_CHROMA3_IN);
+
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_IN);
+ }
+
+ /* sync on channel control descriptors for output ports */
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA_OUT);
+ if (d_q_data->fmt->coplanar)
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA_OUT);
+
+ if (ctx->deinterlacing)
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_OUT);
+
+ enable_irqs(ctx);
+
+ vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->desc_list.buf);
+ vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list);
+}
+
+static void dei_error(struct vpe_ctx *ctx)
+{
+ dev_warn(ctx->dev->v4l2_dev.dev,
+ "received DEI error interrupt\n");
+}
+
+static void ds1_uv_error(struct vpe_ctx *ctx)
+{
+ dev_warn(ctx->dev->v4l2_dev.dev,
+ "received downsampler error interrupt\n");
+}
+
+static irqreturn_t vpe_irq(int irq_vpe, void *data)
+{
+ struct vpe_dev *dev = (struct vpe_dev *)data;
+ struct vpe_ctx *ctx;
+ struct vpe_q_data *d_q_data;
+ struct vb2_buffer *s_vb, *d_vb;
+ struct v4l2_buffer *s_buf, *d_buf;
+ unsigned long flags;
+ u32 irqst0, irqst1;
+
+ irqst0 = read_reg(dev, VPE_INT0_STATUS0);
+ if (irqst0) {
+ write_reg(dev, VPE_INT0_STATUS0_CLR, irqst0);
+ vpe_dbg(dev, "INT0_STATUS0 = 0x%08x\n", irqst0);
+ }
+
+ irqst1 = read_reg(dev, VPE_INT0_STATUS1);
+ if (irqst1) {
+ write_reg(dev, VPE_INT0_STATUS1_CLR, irqst1);
+ vpe_dbg(dev, "INT0_STATUS1 = 0x%08x\n", irqst1);
+ }
+
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+ if (!ctx) {
+ vpe_err(dev, "instance released before end of transaction\n");
+ goto handled;
+ }
+
+ if (irqst1) {
+ if (irqst1 & VPE_DEI_ERROR_INT) {
+ irqst1 &= ~VPE_DEI_ERROR_INT;
+ dei_error(ctx);
+ }
+ if (irqst1 & VPE_DS1_UV_ERROR_INT) {
+ irqst1 &= ~VPE_DS1_UV_ERROR_INT;
+ ds1_uv_error(ctx);
+ }
+ }
+
+ if (irqst0) {
+ if (irqst0 & VPE_INT0_LIST0_COMPLETE)
+ vpdma_clear_list_stat(ctx->dev->vpdma);
+
+ irqst0 &= ~(VPE_INT0_LIST0_COMPLETE);
+ }
+
+ if (irqst0 | irqst1) {
+ dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: "
+ "INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n",
+ irqst0, irqst1);
+ }
+
+ disable_irqs(ctx);
+
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
+
+ vpdma_reset_desc_list(&ctx->desc_list);
+
+ /* the previous dst mv buffer becomes the next src mv buffer */
+ ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector;
+
+ if (ctx->aborting)
+ goto finished;
+
+ s_vb = ctx->src_vbs[0];
+ d_vb = ctx->dst_vb;
+ s_buf = &s_vb->v4l2_buf;
+ d_buf = &d_vb->v4l2_buf;
+
+ d_buf->timestamp = s_buf->timestamp;
+ if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE) {
+ d_buf->flags |= V4L2_BUF_FLAG_TIMECODE;
+ d_buf->timecode = s_buf->timecode;
+ }
+ d_buf->sequence = ctx->sequence;
+ d_buf->field = ctx->field;
+
+ d_q_data = &ctx->q_data[Q_DATA_DST];
+ if (d_q_data->flags & Q_DATA_INTERLACED) {
+ if (ctx->field == V4L2_FIELD_BOTTOM) {
+ ctx->sequence++;
+ ctx->field = V4L2_FIELD_TOP;
+ } else {
+ WARN_ON(ctx->field != V4L2_FIELD_TOP);
+ ctx->field = V4L2_FIELD_BOTTOM;
+ }
+ } else {
+ ctx->sequence++;
+ }
+
+ if (ctx->deinterlacing)
+ s_vb = ctx->src_vbs[2];
+
+ spin_lock_irqsave(&dev->lock, flags);
+ v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(d_vb, VB2_BUF_STATE_DONE);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (ctx->deinterlacing) {
+ ctx->src_vbs[2] = ctx->src_vbs[1];
+ ctx->src_vbs[1] = ctx->src_vbs[0];
+ }
+
+ ctx->bufs_completed++;
+ if (ctx->bufs_completed < ctx->bufs_per_job) {
+ device_run(ctx);
+ goto handled;
+ }
+
+finished:
+ vpe_dbg(ctx->dev, "finishing transaction\n");
+ ctx->bufs_completed = 0;
+ v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx);
+handled:
+ return IRQ_HANDLED;
+}
+
+/*
+ * video ioctls
+ */
+static int vpe_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strncpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card) - 1);
+ strlcpy(cap->bus_info, VPE_MODULE_NAME, sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int __enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+ int i, index;
+ struct vpe_fmt *fmt = NULL;
+
+ index = 0;
+ for (i = 0; i < ARRAY_SIZE(vpe_formats); ++i) {
+ if (vpe_formats[i].types & type) {
+ if (index == f->index) {
+ fmt = &vpe_formats[i];
+ break;
+ }
+ index++;
+ }
+ }
+
+ if (!fmt)
+ return -EINVAL;
+
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->fourcc;
+ return 0;
+}
+
+static int vpe_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (V4L2_TYPE_IS_OUTPUT(f->type))
+ return __enum_fmt(f, VPE_FMT_TYPE_OUTPUT);
+
+ return __enum_fmt(f, VPE_FMT_TYPE_CAPTURE);
+}
+
+static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct vpe_ctx *ctx = file2ctx(file);
+ struct vb2_queue *vq;
+ struct vpe_q_data *q_data;
+ int i;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+
+ pix->width = q_data->width;
+ pix->height = q_data->height;
+ pix->pixelformat = q_data->fmt->fourcc;
+ pix->field = q_data->field;
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+ pix->colorspace = q_data->colorspace;
+ } else {
+ struct vpe_q_data *s_q_data;
+
+ /* get colorspace from the source queue */
+ s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ pix->colorspace = s_q_data->colorspace;
+ }
+
+ pix->num_planes = q_data->fmt->coplanar ? 2 : 1;
+
+ for (i = 0; i < pix->num_planes; i++) {
+ pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
+ pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
+ }
+
+ return 0;
+}
+
+static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
+ struct vpe_fmt *fmt, int type)
+{
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *plane_fmt;
+ int i;
+
+ if (!fmt || !(fmt->types & type)) {
+ vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
+ pix->pixelformat);
+ return -EINVAL;
+ }
+
+ if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE)
+ pix->field = V4L2_FIELD_NONE;
+
+ v4l_bound_align_image(&pix->width, MIN_W, MAX_W, W_ALIGN,
+ &pix->height, MIN_H, MAX_H, H_ALIGN,
+ S_ALIGN);
+
+ pix->num_planes = fmt->coplanar ? 2 : 1;
+ pix->pixelformat = fmt->fourcc;
+
+ if (type == VPE_FMT_TYPE_CAPTURE) {
+ struct vpe_q_data *s_q_data;
+
+ /* get colorspace from the source queue */
+ s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ pix->colorspace = s_q_data->colorspace;
+ } else {
+ if (!pix->colorspace)
+ pix->colorspace = V4L2_COLORSPACE_SMPTE240M;
+ }
+
+ for (i = 0; i < pix->num_planes; i++) {
+ int depth;
+
+ plane_fmt = &pix->plane_fmt[i];
+ depth = fmt->vpdma_fmt[i]->depth;
+
+ if (i == VPE_LUMA)
+ plane_fmt->bytesperline =
+ round_up((pix->width * depth) >> 3,
+ 1 << L_ALIGN);
+ else
+ plane_fmt->bytesperline = pix->width;
+
+ plane_fmt->sizeimage =
+ (pix->height * pix->width * depth) >> 3;
+ }
+
+ return 0;
+}
+
+static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_fmt *fmt = find_format(f);
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type))
+ return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_OUTPUT);
+ else
+ return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_CAPTURE);
+}
+
+static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *plane_fmt;
+ struct vpe_q_data *q_data;
+ struct vb2_queue *vq;
+ int i;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ vpe_err(ctx->dev, "queue busy\n");
+ return -EBUSY;
+ }
+
+ q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ q_data->fmt = find_format(f);
+ q_data->width = pix->width;
+ q_data->height = pix->height;
+ q_data->colorspace = pix->colorspace;
+ q_data->field = pix->field;
+
+ for (i = 0; i < pix->num_planes; i++) {
+ plane_fmt = &pix->plane_fmt[i];
+
+ q_data->bytesperline[i] = plane_fmt->bytesperline;
+ q_data->sizeimage[i] = plane_fmt->sizeimage;
+ }
+
+ q_data->c_rect.left = 0;
+ q_data->c_rect.top = 0;
+ q_data->c_rect.width = q_data->width;
+ q_data->c_rect.height = q_data->height;
+
+ if (q_data->field == V4L2_FIELD_ALTERNATE)
+ q_data->flags |= Q_DATA_INTERLACED;
+ else
+ q_data->flags &= ~Q_DATA_INTERLACED;
+
+ vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d",
+ f->type, q_data->width, q_data->height, q_data->fmt->fourcc,
+ q_data->bytesperline[VPE_LUMA]);
+ if (q_data->fmt->coplanar)
+ vpe_dbg(ctx->dev, " bpl_uv %d\n",
+ q_data->bytesperline[VPE_CHROMA]);
+
+ return 0;
+}
+
+static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ int ret;
+ struct vpe_ctx *ctx = file2ctx(file);
+
+ ret = vpe_try_fmt(file, priv, f);
+ if (ret)
+ return ret;
+
+ ret = __vpe_s_fmt(ctx, f);
+ if (ret)
+ return ret;
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type))
+ set_src_registers(ctx);
+ else
+ set_dst_registers(ctx);
+
+ return set_srcdst_params(ctx);
+}
+
+static int vpe_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct vpe_ctx *ctx = file2ctx(file);
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int vpe_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct vpe_ctx *ctx = file2ctx(file);
+
+ return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vpe_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct vpe_ctx *ctx = file2ctx(file);
+
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vpe_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct vpe_ctx *ctx = file2ctx(file);
+
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vpe_streamon(struct file *file, void *priv, enum v4l2_buf_type type)
+{
+ struct vpe_ctx *ctx = file2ctx(file);
+
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int vpe_streamoff(struct file *file, void *priv, enum v4l2_buf_type type)
+{
+ struct vpe_ctx *ctx = file2ctx(file);
+
+ vpe_dump_regs(ctx->dev);
+ vpdma_dump_regs(ctx->dev->vpdma);
+
+ return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+/*
+ * defines number of buffers/frames a context can process with VPE before
+ * switching to a different context. default value is 1 buffer per context
+ */
+#define V4L2_CID_VPE_BUFS_PER_JOB (V4L2_CID_USER_TI_VPE_BASE + 0)
+
+static int vpe_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vpe_ctx *ctx =
+ container_of(ctrl->handler, struct vpe_ctx, hdl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_VPE_BUFS_PER_JOB:
+ ctx->bufs_per_job = ctrl->val;
+ break;
+
+ default:
+ vpe_err(ctx->dev, "Invalid control\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vpe_ctrl_ops = {
+ .s_ctrl = vpe_s_ctrl,
+};
+
+static const struct v4l2_ioctl_ops vpe_ioctl_ops = {
+ .vidioc_querycap = vpe_querycap,
+
+ .vidioc_enum_fmt_vid_cap_mplane = vpe_enum_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = vpe_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = vpe_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = vpe_s_fmt,
+
+ .vidioc_enum_fmt_vid_out_mplane = vpe_enum_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vpe_g_fmt,
+ .vidioc_try_fmt_vid_out_mplane = vpe_try_fmt,
+ .vidioc_s_fmt_vid_out_mplane = vpe_s_fmt,
+
+ .vidioc_reqbufs = vpe_reqbufs,
+ .vidioc_querybuf = vpe_querybuf,
+
+ .vidioc_qbuf = vpe_qbuf,
+ .vidioc_dqbuf = vpe_dqbuf,
+
+ .vidioc_streamon = vpe_streamon,
+ .vidioc_streamoff = vpe_streamoff,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * Queue operations
+ */
+static int vpe_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ int i;
+ struct vpe_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vpe_q_data *q_data;
+
+ q_data = get_q_data(ctx, vq->type);
+
+ *nplanes = q_data->fmt->coplanar ? 2 : 1;
+
+ for (i = 0; i < *nplanes; i++) {
+ sizes[i] = q_data->sizeimage[i];
+ alloc_ctxs[i] = ctx->dev->alloc_ctx;
+ }
+
+ vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers,
+ sizes[VPE_LUMA]);
+ if (q_data->fmt->coplanar)
+ vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]);
+
+ return 0;
+}
+
+static int vpe_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpe_q_data *q_data;
+ int i, num_planes;
+
+ vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type);
+
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
+ num_planes = q_data->fmt->coplanar ? 2 : 1;
+
+ for (i = 0; i < num_planes; i++) {
+ if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
+ vpe_err(ctx->dev,
+ "data will not fit into plane (%lu < %lu)\n",
+ vb2_plane_size(vb, i),
+ (long) q_data->sizeimage[i]);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < num_planes; i++)
+ vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
+
+ return 0;
+}
+
+static void vpe_buf_queue(struct vb2_buffer *vb)
+{
+ struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+}
+
+static void vpe_wait_prepare(struct vb2_queue *q)
+{
+ struct vpe_ctx *ctx = vb2_get_drv_priv(q);
+ vpe_unlock(ctx);
+}
+
+static void vpe_wait_finish(struct vb2_queue *q)
+{
+ struct vpe_ctx *ctx = vb2_get_drv_priv(q);
+ vpe_lock(ctx);
+}
+
+static struct vb2_ops vpe_qops = {
+ .queue_setup = vpe_queue_setup,
+ .buf_prepare = vpe_buf_prepare,
+ .buf_queue = vpe_buf_queue,
+ .wait_prepare = vpe_wait_prepare,
+ .wait_finish = vpe_wait_finish,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct vpe_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &vpe_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &vpe_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static const struct v4l2_ctrl_config vpe_bufs_per_job = {
+ .ops = &vpe_ctrl_ops,
+ .id = V4L2_CID_VPE_BUFS_PER_JOB,
+ .name = "Buffers Per Transaction",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = VPE_DEF_BUFS_PER_JOB,
+ .min = 1,
+ .max = VIDEO_MAX_FRAME,
+ .step = 1,
+};
+
+/*
+ * File operations
+ */
+static int vpe_open(struct file *file)
+{
+ struct vpe_dev *dev = video_drvdata(file);
+ struct vpe_ctx *ctx = NULL;
+ struct vpe_q_data *s_q_data;
+ struct v4l2_ctrl_handler *hdl;
+ int ret;
+
+ vpe_dbg(dev, "vpe_open\n");
+
+ if (!dev->vpdma->ready) {
+ vpe_err(dev, "vpdma firmware not loaded\n");
+ return -ENODEV;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = dev;
+
+ if (mutex_lock_interruptible(&dev->dev_mutex)) {
+ ret = -ERESTARTSYS;
+ goto free_ctx;
+ }
+
+ ret = vpdma_create_desc_list(&ctx->desc_list, VPE_DESC_LIST_SIZE,
+ VPDMA_LIST_TYPE_NORMAL);
+ if (ret != 0)
+ goto unlock;
+
+ ret = vpdma_alloc_desc_buf(&ctx->mmr_adb, sizeof(struct vpe_mmr_adb));
+ if (ret != 0)
+ goto free_desc_list;
+
+ init_adb_hdrs(ctx);
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+
+ hdl = &ctx->hdl;
+ v4l2_ctrl_handler_init(hdl, 1);
+ v4l2_ctrl_new_custom(hdl, &vpe_bufs_per_job, NULL);
+ if (hdl->error) {
+ ret = hdl->error;
+ goto exit_fh;
+ }
+ ctx->fh.ctrl_handler = hdl;
+ v4l2_ctrl_handler_setup(hdl);
+
+ s_q_data = &ctx->q_data[Q_DATA_SRC];
+ s_q_data->fmt = &vpe_formats[2];
+ s_q_data->width = 1920;
+ s_q_data->height = 1080;
+ s_q_data->sizeimage[VPE_LUMA] = (s_q_data->width * s_q_data->height *
+ s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
+ s_q_data->colorspace = V4L2_COLORSPACE_SMPTE240M;
+ s_q_data->field = V4L2_FIELD_NONE;
+ s_q_data->c_rect.left = 0;
+ s_q_data->c_rect.top = 0;
+ s_q_data->c_rect.width = s_q_data->width;
+ s_q_data->c_rect.height = s_q_data->height;
+ s_q_data->flags = 0;
+
+ ctx->q_data[Q_DATA_DST] = *s_q_data;
+
+ set_dei_shadow_registers(ctx);
+ set_src_registers(ctx);
+ set_dst_registers(ctx);
+ ret = set_srcdst_params(ctx);
+ if (ret)
+ goto exit_fh;
+
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+
+ if (IS_ERR(ctx->m2m_ctx)) {
+ ret = PTR_ERR(ctx->m2m_ctx);
+ goto exit_fh;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+
+ /*
+ * for now, just report the creation of the first instance, we can later
+ * optimize the driver to enable or disable clocks when the first
+ * instance is created or the last instance released
+ */
+ if (atomic_inc_return(&dev->num_instances) == 1)
+ vpe_dbg(dev, "first instance created\n");
+
+ ctx->bufs_per_job = VPE_DEF_BUFS_PER_JOB;
+
+ ctx->load_mmrs = true;
+
+ vpe_dbg(dev, "created instance %p, m2m_ctx: %p\n",
+ ctx, ctx->m2m_ctx);
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+exit_fh:
+ v4l2_ctrl_handler_free(hdl);
+ v4l2_fh_exit(&ctx->fh);
+ vpdma_free_desc_buf(&ctx->mmr_adb);
+free_desc_list:
+ vpdma_free_desc_list(&ctx->desc_list);
+unlock:
+ mutex_unlock(&dev->dev_mutex);
+free_ctx:
+ kfree(ctx);
+ return ret;
+}
+
+static int vpe_release(struct file *file)
+{
+ struct vpe_dev *dev = video_drvdata(file);
+ struct vpe_ctx *ctx = file2ctx(file);
+
+ vpe_dbg(dev, "releasing instance %p\n", ctx);
+
+ mutex_lock(&dev->dev_mutex);
+ free_vbs(ctx);
+ free_mv_buffers(ctx);
+ vpdma_free_desc_list(&ctx->desc_list);
+ vpdma_free_desc_buf(&ctx->mmr_adb);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+
+ kfree(ctx);
+
+ /*
+ * for now, just report the release of the last instance, we can later
+ * optimize the driver to enable or disable clocks when the first
+ * instance is created or the last instance released
+ */
+ if (atomic_dec_return(&dev->num_instances) == 0)
+ vpe_dbg(dev, "last instance released\n");
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+}
+
+static unsigned int vpe_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_dev *dev = ctx->dev;
+ int ret;
+
+ mutex_lock(&dev->dev_mutex);
+ ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+ mutex_unlock(&dev->dev_mutex);
+ return ret;
+}
+
+static int vpe_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_dev *dev = ctx->dev;
+ int ret;
+
+ if (mutex_lock_interruptible(&dev->dev_mutex))
+ return -ERESTARTSYS;
+ ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+ mutex_unlock(&dev->dev_mutex);
+ return ret;
+}
+
+static const struct v4l2_file_operations vpe_fops = {
+ .owner = THIS_MODULE,
+ .open = vpe_open,
+ .release = vpe_release,
+ .poll = vpe_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vpe_mmap,
+};
+
+static struct video_device vpe_videodev = {
+ .name = VPE_MODULE_NAME,
+ .fops = &vpe_fops,
+ .ioctl_ops = &vpe_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+ .vfl_dir = VFL_DIR_M2M,
+};
+
+static struct v4l2_m2m_ops m2m_ops = {
+ .device_run = device_run,
+ .job_ready = job_ready,
+ .job_abort = job_abort,
+ .lock = vpe_lock,
+ .unlock = vpe_unlock,
+};
+
+static int vpe_runtime_get(struct platform_device *pdev)
+{
+ int r;
+
+ dev_dbg(&pdev->dev, "vpe_runtime_get\n");
+
+ r = pm_runtime_get_sync(&pdev->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
+}
+
+static void vpe_runtime_put(struct platform_device *pdev)
+{
+
+ int r;
+
+ dev_dbg(&pdev->dev, "vpe_runtime_put\n");
+
+ r = pm_runtime_put_sync(&pdev->dev);
+ WARN_ON(r < 0 && r != -ENOSYS);
+}
+
+static int vpe_probe(struct platform_device *pdev)
+{
+ struct vpe_dev *dev;
+ struct video_device *vfd;
+ struct resource *res;
+ int ret, irq, func;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ spin_lock_init(&dev->lock);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ return ret;
+
+ atomic_set(&dev->num_instances, 0);
+ mutex_init(&dev->dev_mutex);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpe_top");
+ /*
+ * HACK: we get resource info from device tree in the form of a list of
+ * VPE sub blocks, the driver currently uses only the base of vpe_top
+ * for register access, the driver should be changed later to access
+ * registers based on the sub block base addresses
+ */
+ dev->base = devm_ioremap(&pdev->dev, res->start, SZ_32K);
+ if (IS_ERR(dev->base)) {
+ ret = PTR_ERR(dev->base);
+ goto v4l2_dev_unreg;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, irq, vpe_irq, 0, VPE_MODULE_NAME,
+ dev);
+ if (ret)
+ goto v4l2_dev_unreg;
+
+ platform_set_drvdata(pdev, dev);
+
+ dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(dev->alloc_ctx)) {
+ vpe_err(dev, "Failed to alloc vb2 context\n");
+ ret = PTR_ERR(dev->alloc_ctx);
+ goto v4l2_dev_unreg;
+ }
+
+ dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ vpe_err(dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(dev->m2m_dev);
+ goto rel_ctx;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = vpe_runtime_get(pdev);
+ if (ret)
+ goto rel_m2m;
+
+ /* Perform clk enable followed by reset */
+ vpe_set_clock_enable(dev, 1);
+
+ vpe_top_reset(dev);
+
+ func = read_field_reg(dev, VPE_PID, VPE_PID_FUNC_MASK,
+ VPE_PID_FUNC_SHIFT);
+ vpe_dbg(dev, "VPE PID function %x\n", func);
+
+ vpe_top_vpdma_reset(dev);
+
+ dev->vpdma = vpdma_create(pdev);
+ if (IS_ERR(dev->vpdma))
+ goto runtime_put;
+
+ vfd = &dev->vfd;
+ *vfd = vpe_videodev;
+ vfd->lock = &dev->dev_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ vpe_err(dev, "Failed to register video device\n");
+ goto runtime_put;
+ }
+
+ video_set_drvdata(vfd, dev);
+ snprintf(vfd->name, sizeof(vfd->name), "%s", vpe_videodev.name);
+ dev_info(dev->v4l2_dev.dev, "Device registered as /dev/video%d\n",
+ vfd->num);
+
+ return 0;
+
+runtime_put:
+ vpe_runtime_put(pdev);
+rel_m2m:
+ pm_runtime_disable(&pdev->dev);
+ v4l2_m2m_release(dev->m2m_dev);
+rel_ctx:
+ vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
+v4l2_dev_unreg:
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return ret;
+}
+
+static int vpe_remove(struct platform_device *pdev)
+{
+ struct vpe_dev *dev =
+ (struct vpe_dev *) platform_get_drvdata(pdev);
+
+ v4l2_info(&dev->v4l2_dev, "Removing " VPE_MODULE_NAME);
+
+ v4l2_m2m_release(dev->m2m_dev);
+ video_unregister_device(&dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
+
+ vpe_set_clock_enable(dev, 0);
+ vpe_runtime_put(pdev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id vpe_of_match[] = {
+ {
+ .compatible = "ti,vpe",
+ },
+ {},
+};
+#else
+#define vpe_of_match NULL
+#endif
+
+static struct platform_driver vpe_pdrv = {
+ .probe = vpe_probe,
+ .remove = vpe_remove,
+ .driver = {
+ .name = VPE_MODULE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = vpe_of_match,
+ },
+};
+
+static void __exit vpe_exit(void)
+{
+ platform_driver_unregister(&vpe_pdrv);
+}
+
+static int __init vpe_init(void)
+{
+ return platform_driver_register(&vpe_pdrv);
+}
+
+module_init(vpe_init);
+module_exit(vpe_exit);
+
+MODULE_DESCRIPTION("TI VPE driver");
+MODULE_AUTHOR("Dale Farnsworth, <dale@farnsworth.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/ti-vpe/vpe_regs.h b/drivers/media/platform/ti-vpe/vpe_regs.h
new file mode 100644
index 000000000000..ed214e828398
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpe_regs.h
@@ -0,0 +1,496 @@
+/*
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __TI_VPE_REGS_H
+#define __TI_VPE_REGS_H
+
+/* VPE register offsets and field selectors */
+
+/* VPE top level regs */
+#define VPE_PID 0x0000
+#define VPE_PID_MINOR_MASK 0x3f
+#define VPE_PID_MINOR_SHIFT 0
+#define VPE_PID_CUSTOM_MASK 0x03
+#define VPE_PID_CUSTOM_SHIFT 6
+#define VPE_PID_MAJOR_MASK 0x07
+#define VPE_PID_MAJOR_SHIFT 8
+#define VPE_PID_RTL_MASK 0x1f
+#define VPE_PID_RTL_SHIFT 11
+#define VPE_PID_FUNC_MASK 0xfff
+#define VPE_PID_FUNC_SHIFT 16
+#define VPE_PID_SCHEME_MASK 0x03
+#define VPE_PID_SCHEME_SHIFT 30
+
+#define VPE_SYSCONFIG 0x0010
+#define VPE_SYSCONFIG_IDLE_MASK 0x03
+#define VPE_SYSCONFIG_IDLE_SHIFT 2
+#define VPE_SYSCONFIG_STANDBY_MASK 0x03
+#define VPE_SYSCONFIG_STANDBY_SHIFT 4
+#define VPE_FORCE_IDLE_MODE 0
+#define VPE_NO_IDLE_MODE 1
+#define VPE_SMART_IDLE_MODE 2
+#define VPE_SMART_IDLE_WAKEUP_MODE 3
+#define VPE_FORCE_STANDBY_MODE 0
+#define VPE_NO_STANDBY_MODE 1
+#define VPE_SMART_STANDBY_MODE 2
+#define VPE_SMART_STANDBY_WAKEUP_MODE 3
+
+#define VPE_INT0_STATUS0_RAW_SET 0x0020
+#define VPE_INT0_STATUS0_RAW VPE_INT0_STATUS0_RAW_SET
+#define VPE_INT0_STATUS0_CLR 0x0028
+#define VPE_INT0_STATUS0 VPE_INT0_STATUS0_CLR
+#define VPE_INT0_ENABLE0_SET 0x0030
+#define VPE_INT0_ENABLE0 VPE_INT0_ENABLE0_SET
+#define VPE_INT0_ENABLE0_CLR 0x0038
+#define VPE_INT0_LIST0_COMPLETE (1 << 0)
+#define VPE_INT0_LIST0_NOTIFY (1 << 1)
+#define VPE_INT0_LIST1_COMPLETE (1 << 2)
+#define VPE_INT0_LIST1_NOTIFY (1 << 3)
+#define VPE_INT0_LIST2_COMPLETE (1 << 4)
+#define VPE_INT0_LIST2_NOTIFY (1 << 5)
+#define VPE_INT0_LIST3_COMPLETE (1 << 6)
+#define VPE_INT0_LIST3_NOTIFY (1 << 7)
+#define VPE_INT0_LIST4_COMPLETE (1 << 8)
+#define VPE_INT0_LIST4_NOTIFY (1 << 9)
+#define VPE_INT0_LIST5_COMPLETE (1 << 10)
+#define VPE_INT0_LIST5_NOTIFY (1 << 11)
+#define VPE_INT0_LIST6_COMPLETE (1 << 12)
+#define VPE_INT0_LIST6_NOTIFY (1 << 13)
+#define VPE_INT0_LIST7_COMPLETE (1 << 14)
+#define VPE_INT0_LIST7_NOTIFY (1 << 15)
+#define VPE_INT0_DESCRIPTOR (1 << 16)
+#define VPE_DEI_FMD_INT (1 << 18)
+
+#define VPE_INT0_STATUS1_RAW_SET 0x0024
+#define VPE_INT0_STATUS1_RAW VPE_INT0_STATUS1_RAW_SET
+#define VPE_INT0_STATUS1_CLR 0x002c
+#define VPE_INT0_STATUS1 VPE_INT0_STATUS1_CLR
+#define VPE_INT0_ENABLE1_SET 0x0034
+#define VPE_INT0_ENABLE1 VPE_INT0_ENABLE1_SET
+#define VPE_INT0_ENABLE1_CLR 0x003c
+#define VPE_INT0_CHANNEL_GROUP0 (1 << 0)
+#define VPE_INT0_CHANNEL_GROUP1 (1 << 1)
+#define VPE_INT0_CHANNEL_GROUP2 (1 << 2)
+#define VPE_INT0_CHANNEL_GROUP3 (1 << 3)
+#define VPE_INT0_CHANNEL_GROUP4 (1 << 4)
+#define VPE_INT0_CHANNEL_GROUP5 (1 << 5)
+#define VPE_INT0_CLIENT (1 << 7)
+#define VPE_DEI_ERROR_INT (1 << 16)
+#define VPE_DS1_UV_ERROR_INT (1 << 22)
+
+#define VPE_INTC_EOI 0x00a0
+
+#define VPE_CLK_ENABLE 0x0100
+#define VPE_VPEDMA_CLK_ENABLE (1 << 0)
+#define VPE_DATA_PATH_CLK_ENABLE (1 << 1)
+
+#define VPE_CLK_RESET 0x0104
+#define VPE_VPDMA_CLK_RESET_MASK 0x1
+#define VPE_VPDMA_CLK_RESET_SHIFT 0
+#define VPE_DATA_PATH_CLK_RESET_MASK 0x1
+#define VPE_DATA_PATH_CLK_RESET_SHIFT 1
+#define VPE_MAIN_RESET_MASK 0x1
+#define VPE_MAIN_RESET_SHIFT 31
+
+#define VPE_CLK_FORMAT_SELECT 0x010c
+#define VPE_CSC_SRC_SELECT_MASK 0x03
+#define VPE_CSC_SRC_SELECT_SHIFT 0
+#define VPE_RGB_OUT_SELECT (1 << 8)
+#define VPE_DS_SRC_SELECT_MASK 0x07
+#define VPE_DS_SRC_SELECT_SHIFT 9
+#define VPE_DS_BYPASS (1 << 16)
+#define VPE_COLOR_SEPARATE_422 (1 << 18)
+
+#define VPE_DS_SRC_DEI_SCALER (5 << VPE_DS_SRC_SELECT_SHIFT)
+#define VPE_CSC_SRC_DEI_SCALER (3 << VPE_CSC_SRC_SELECT_SHIFT)
+
+#define VPE_CLK_RANGE_MAP 0x011c
+#define VPE_RANGE_RANGE_MAP_Y_MASK 0x07
+#define VPE_RANGE_RANGE_MAP_Y_SHIFT 0
+#define VPE_RANGE_RANGE_MAP_UV_MASK 0x07
+#define VPE_RANGE_RANGE_MAP_UV_SHIFT 3
+#define VPE_RANGE_MAP_ON (1 << 6)
+#define VPE_RANGE_REDUCTION_ON (1 << 28)
+
+/* VPE chrominance upsampler regs */
+#define VPE_US1_R0 0x0304
+#define VPE_US2_R0 0x0404
+#define VPE_US3_R0 0x0504
+#define VPE_US_C1_MASK 0x3fff
+#define VPE_US_C1_SHIFT 2
+#define VPE_US_C0_MASK 0x3fff
+#define VPE_US_C0_SHIFT 18
+#define VPE_US_MODE_MASK 0x03
+#define VPE_US_MODE_SHIFT 16
+#define VPE_ANCHOR_FID0_C1_MASK 0x3fff
+#define VPE_ANCHOR_FID0_C1_SHIFT 2
+#define VPE_ANCHOR_FID0_C0_MASK 0x3fff
+#define VPE_ANCHOR_FID0_C0_SHIFT 18
+
+#define VPE_US1_R1 0x0308
+#define VPE_US2_R1 0x0408
+#define VPE_US3_R1 0x0508
+#define VPE_ANCHOR_FID0_C3_MASK 0x3fff
+#define VPE_ANCHOR_FID0_C3_SHIFT 2
+#define VPE_ANCHOR_FID0_C2_MASK 0x3fff
+#define VPE_ANCHOR_FID0_C2_SHIFT 18
+
+#define VPE_US1_R2 0x030c
+#define VPE_US2_R2 0x040c
+#define VPE_US3_R2 0x050c
+#define VPE_INTERP_FID0_C1_MASK 0x3fff
+#define VPE_INTERP_FID0_C1_SHIFT 2
+#define VPE_INTERP_FID0_C0_MASK 0x3fff
+#define VPE_INTERP_FID0_C0_SHIFT 18
+
+#define VPE_US1_R3 0x0310
+#define VPE_US2_R3 0x0410
+#define VPE_US3_R3 0x0510
+#define VPE_INTERP_FID0_C3_MASK 0x3fff
+#define VPE_INTERP_FID0_C3_SHIFT 2
+#define VPE_INTERP_FID0_C2_MASK 0x3fff
+#define VPE_INTERP_FID0_C2_SHIFT 18
+
+#define VPE_US1_R4 0x0314
+#define VPE_US2_R4 0x0414
+#define VPE_US3_R4 0x0514
+#define VPE_ANCHOR_FID1_C1_MASK 0x3fff
+#define VPE_ANCHOR_FID1_C1_SHIFT 2
+#define VPE_ANCHOR_FID1_C0_MASK 0x3fff
+#define VPE_ANCHOR_FID1_C0_SHIFT 18
+
+#define VPE_US1_R5 0x0318
+#define VPE_US2_R5 0x0418
+#define VPE_US3_R5 0x0518
+#define VPE_ANCHOR_FID1_C3_MASK 0x3fff
+#define VPE_ANCHOR_FID1_C3_SHIFT 2
+#define VPE_ANCHOR_FID1_C2_MASK 0x3fff
+#define VPE_ANCHOR_FID1_C2_SHIFT 18
+
+#define VPE_US1_R6 0x031c
+#define VPE_US2_R6 0x041c
+#define VPE_US3_R6 0x051c
+#define VPE_INTERP_FID1_C1_MASK 0x3fff
+#define VPE_INTERP_FID1_C1_SHIFT 2
+#define VPE_INTERP_FID1_C0_MASK 0x3fff
+#define VPE_INTERP_FID1_C0_SHIFT 18
+
+#define VPE_US1_R7 0x0320
+#define VPE_US2_R7 0x0420
+#define VPE_US3_R7 0x0520
+#define VPE_INTERP_FID0_C3_MASK 0x3fff
+#define VPE_INTERP_FID0_C3_SHIFT 2
+#define VPE_INTERP_FID0_C2_MASK 0x3fff
+#define VPE_INTERP_FID0_C2_SHIFT 18
+
+/* VPE de-interlacer regs */
+#define VPE_DEI_FRAME_SIZE 0x0600
+#define VPE_DEI_WIDTH_MASK 0x07ff
+#define VPE_DEI_WIDTH_SHIFT 0
+#define VPE_DEI_HEIGHT_MASK 0x07ff
+#define VPE_DEI_HEIGHT_SHIFT 16
+#define VPE_DEI_INTERLACE_BYPASS (1 << 29)
+#define VPE_DEI_FIELD_FLUSH (1 << 30)
+#define VPE_DEI_PROGRESSIVE (1 << 31)
+
+#define VPE_MDT_BYPASS 0x0604
+#define VPE_MDT_TEMPMAX_BYPASS (1 << 0)
+#define VPE_MDT_SPATMAX_BYPASS (1 << 1)
+
+#define VPE_MDT_SF_THRESHOLD 0x0608
+#define VPE_MDT_SF_SC_THR1_MASK 0xff
+#define VPE_MDT_SF_SC_THR1_SHIFT 0
+#define VPE_MDT_SF_SC_THR2_MASK 0xff
+#define VPE_MDT_SF_SC_THR2_SHIFT 0
+#define VPE_MDT_SF_SC_THR3_MASK 0xff
+#define VPE_MDT_SF_SC_THR3_SHIFT 0
+
+#define VPE_EDI_CONFIG 0x060c
+#define VPE_EDI_INP_MODE_MASK 0x03
+#define VPE_EDI_INP_MODE_SHIFT 0
+#define VPE_EDI_ENABLE_3D (1 << 2)
+#define VPE_EDI_ENABLE_CHROMA_3D (1 << 3)
+#define VPE_EDI_CHROMA3D_COR_THR_MASK 0xff
+#define VPE_EDI_CHROMA3D_COR_THR_SHIFT 8
+#define VPE_EDI_DIR_COR_LOWER_THR_MASK 0xff
+#define VPE_EDI_DIR_COR_LOWER_THR_SHIFT 16
+#define VPE_EDI_COR_SCALE_FACTOR_MASK 0xff
+#define VPE_EDI_COR_SCALE_FACTOR_SHIFT 23
+
+#define VPE_DEI_EDI_LUT_R0 0x0610
+#define VPE_EDI_LUT0_MASK 0x1f
+#define VPE_EDI_LUT0_SHIFT 0
+#define VPE_EDI_LUT1_MASK 0x1f
+#define VPE_EDI_LUT1_SHIFT 8
+#define VPE_EDI_LUT2_MASK 0x1f
+#define VPE_EDI_LUT2_SHIFT 16
+#define VPE_EDI_LUT3_MASK 0x1f
+#define VPE_EDI_LUT3_SHIFT 24
+
+#define VPE_DEI_EDI_LUT_R1 0x0614
+#define VPE_EDI_LUT0_MASK 0x1f
+#define VPE_EDI_LUT0_SHIFT 0
+#define VPE_EDI_LUT1_MASK 0x1f
+#define VPE_EDI_LUT1_SHIFT 8
+#define VPE_EDI_LUT2_MASK 0x1f
+#define VPE_EDI_LUT2_SHIFT 16
+#define VPE_EDI_LUT3_MASK 0x1f
+#define VPE_EDI_LUT3_SHIFT 24
+
+#define VPE_DEI_EDI_LUT_R2 0x0618
+#define VPE_EDI_LUT4_MASK 0x1f
+#define VPE_EDI_LUT4_SHIFT 0
+#define VPE_EDI_LUT5_MASK 0x1f
+#define VPE_EDI_LUT5_SHIFT 8
+#define VPE_EDI_LUT6_MASK 0x1f
+#define VPE_EDI_LUT6_SHIFT 16
+#define VPE_EDI_LUT7_MASK 0x1f
+#define VPE_EDI_LUT7_SHIFT 24
+
+#define VPE_DEI_EDI_LUT_R3 0x061c
+#define VPE_EDI_LUT8_MASK 0x1f
+#define VPE_EDI_LUT8_SHIFT 0
+#define VPE_EDI_LUT9_MASK 0x1f
+#define VPE_EDI_LUT9_SHIFT 8
+#define VPE_EDI_LUT10_MASK 0x1f
+#define VPE_EDI_LUT10_SHIFT 16
+#define VPE_EDI_LUT11_MASK 0x1f
+#define VPE_EDI_LUT11_SHIFT 24
+
+#define VPE_DEI_FMD_WINDOW_R0 0x0620
+#define VPE_FMD_WINDOW_MINX_MASK 0x07ff
+#define VPE_FMD_WINDOW_MINX_SHIFT 0
+#define VPE_FMD_WINDOW_MAXX_MASK 0x07ff
+#define VPE_FMD_WINDOW_MAXX_SHIFT 16
+#define VPE_FMD_WINDOW_ENABLE (1 << 31)
+
+#define VPE_DEI_FMD_WINDOW_R1 0x0624
+#define VPE_FMD_WINDOW_MINY_MASK 0x07ff
+#define VPE_FMD_WINDOW_MINY_SHIFT 0
+#define VPE_FMD_WINDOW_MAXY_MASK 0x07ff
+#define VPE_FMD_WINDOW_MAXY_SHIFT 16
+
+#define VPE_DEI_FMD_CONTROL_R0 0x0628
+#define VPE_FMD_ENABLE (1 << 0)
+#define VPE_FMD_LOCK (1 << 1)
+#define VPE_FMD_JAM_DIR (1 << 2)
+#define VPE_FMD_BED_ENABLE (1 << 3)
+#define VPE_FMD_CAF_FIELD_THR_MASK 0xff
+#define VPE_FMD_CAF_FIELD_THR_SHIFT 16
+#define VPE_FMD_CAF_LINE_THR_MASK 0xff
+#define VPE_FMD_CAF_LINE_THR_SHIFT 24
+
+#define VPE_DEI_FMD_CONTROL_R1 0x062c
+#define VPE_FMD_CAF_THR_MASK 0x000fffff
+#define VPE_FMD_CAF_THR_SHIFT 0
+
+#define VPE_DEI_FMD_STATUS_R0 0x0630
+#define VPE_FMD_CAF_MASK 0x000fffff
+#define VPE_FMD_CAF_SHIFT 0
+#define VPE_FMD_RESET (1 << 24)
+
+#define VPE_DEI_FMD_STATUS_R1 0x0634
+#define VPE_FMD_FIELD_DIFF_MASK 0x0fffffff
+#define VPE_FMD_FIELD_DIFF_SHIFT 0
+
+#define VPE_DEI_FMD_STATUS_R2 0x0638
+#define VPE_FMD_FRAME_DIFF_MASK 0x000fffff
+#define VPE_FMD_FRAME_DIFF_SHIFT 0
+
+/* VPE scaler regs */
+#define VPE_SC_MP_SC0 0x0700
+#define VPE_INTERLACE_O (1 << 0)
+#define VPE_LINEAR (1 << 1)
+#define VPE_SC_BYPASS (1 << 2)
+#define VPE_INVT_FID (1 << 3)
+#define VPE_USE_RAV (1 << 4)
+#define VPE_ENABLE_EV (1 << 5)
+#define VPE_AUTO_HS (1 << 6)
+#define VPE_DCM_2X (1 << 7)
+#define VPE_DCM_4X (1 << 8)
+#define VPE_HP_BYPASS (1 << 9)
+#define VPE_INTERLACE_I (1 << 10)
+#define VPE_ENABLE_SIN2_VER_INTP (1 << 11)
+#define VPE_Y_PK_EN (1 << 14)
+#define VPE_TRIM (1 << 15)
+#define VPE_SELFGEN_FID (1 << 16)
+
+#define VPE_SC_MP_SC1 0x0704
+#define VPE_ROW_ACC_INC_MASK 0x07ffffff
+#define VPE_ROW_ACC_INC_SHIFT 0
+
+#define VPE_SC_MP_SC2 0x0708
+#define VPE_ROW_ACC_OFFSET_MASK 0x0fffffff
+#define VPE_ROW_ACC_OFFSET_SHIFT 0
+
+#define VPE_SC_MP_SC3 0x070c
+#define VPE_ROW_ACC_OFFSET_B_MASK 0x0fffffff
+#define VPE_ROW_ACC_OFFSET_B_SHIFT 0
+
+#define VPE_SC_MP_SC4 0x0710
+#define VPE_TAR_H_MASK 0x07ff
+#define VPE_TAR_H_SHIFT 0
+#define VPE_TAR_W_MASK 0x07ff
+#define VPE_TAR_W_SHIFT 12
+#define VPE_LIN_ACC_INC_U_MASK 0x07
+#define VPE_LIN_ACC_INC_U_SHIFT 24
+#define VPE_NLIN_ACC_INIT_U_MASK 0x07
+#define VPE_NLIN_ACC_INIT_U_SHIFT 28
+
+#define VPE_SC_MP_SC5 0x0714
+#define VPE_SRC_H_MASK 0x07ff
+#define VPE_SRC_H_SHIFT 0
+#define VPE_SRC_W_MASK 0x07ff
+#define VPE_SRC_W_SHIFT 12
+#define VPE_NLIN_ACC_INC_U_MASK 0x07
+#define VPE_NLIN_ACC_INC_U_SHIFT 24
+
+#define VPE_SC_MP_SC6 0x0718
+#define VPE_ROW_ACC_INIT_RAV_MASK 0x03ff
+#define VPE_ROW_ACC_INIT_RAV_SHIFT 0
+#define VPE_ROW_ACC_INIT_RAV_B_MASK 0x03ff
+#define VPE_ROW_ACC_INIT_RAV_B_SHIFT 10
+
+#define VPE_SC_MP_SC8 0x0720
+#define VPE_NLIN_LEFT_MASK 0x07ff
+#define VPE_NLIN_LEFT_SHIFT 0
+#define VPE_NLIN_RIGHT_MASK 0x07ff
+#define VPE_NLIN_RIGHT_SHIFT 12
+
+#define VPE_SC_MP_SC9 0x0724
+#define VPE_LIN_ACC_INC VPE_SC_MP_SC9
+
+#define VPE_SC_MP_SC10 0x0728
+#define VPE_NLIN_ACC_INIT VPE_SC_MP_SC10
+
+#define VPE_SC_MP_SC11 0x072c
+#define VPE_NLIN_ACC_INC VPE_SC_MP_SC11
+
+#define VPE_SC_MP_SC12 0x0730
+#define VPE_COL_ACC_OFFSET_MASK 0x01ffffff
+#define VPE_COL_ACC_OFFSET_SHIFT 0
+
+#define VPE_SC_MP_SC13 0x0734
+#define VPE_SC_FACTOR_RAV_MASK 0x03ff
+#define VPE_SC_FACTOR_RAV_SHIFT 0
+#define VPE_CHROMA_INTP_THR_MASK 0x03ff
+#define VPE_CHROMA_INTP_THR_SHIFT 12
+#define VPE_DELTA_CHROMA_THR_MASK 0x0f
+#define VPE_DELTA_CHROMA_THR_SHIFT 24
+
+#define VPE_SC_MP_SC17 0x0744
+#define VPE_EV_THR_MASK 0x03ff
+#define VPE_EV_THR_SHIFT 12
+#define VPE_DELTA_LUMA_THR_MASK 0x0f
+#define VPE_DELTA_LUMA_THR_SHIFT 24
+#define VPE_DELTA_EV_THR_MASK 0x0f
+#define VPE_DELTA_EV_THR_SHIFT 28
+
+#define VPE_SC_MP_SC18 0x0748
+#define VPE_HS_FACTOR_MASK 0x03ff
+#define VPE_HS_FACTOR_SHIFT 0
+#define VPE_CONF_DEFAULT_MASK 0x01ff
+#define VPE_CONF_DEFAULT_SHIFT 16
+
+#define VPE_SC_MP_SC19 0x074c
+#define VPE_HPF_COEFF0_MASK 0xff
+#define VPE_HPF_COEFF0_SHIFT 0
+#define VPE_HPF_COEFF1_MASK 0xff
+#define VPE_HPF_COEFF1_SHIFT 8
+#define VPE_HPF_COEFF2_MASK 0xff
+#define VPE_HPF_COEFF2_SHIFT 16
+#define VPE_HPF_COEFF3_MASK 0xff
+#define VPE_HPF_COEFF3_SHIFT 23
+
+#define VPE_SC_MP_SC20 0x0750
+#define VPE_HPF_COEFF4_MASK 0xff
+#define VPE_HPF_COEFF4_SHIFT 0
+#define VPE_HPF_COEFF5_MASK 0xff
+#define VPE_HPF_COEFF5_SHIFT 8
+#define VPE_HPF_NORM_SHIFT_MASK 0x07
+#define VPE_HPF_NORM_SHIFT_SHIFT 16
+#define VPE_NL_LIMIT_MASK 0x1ff
+#define VPE_NL_LIMIT_SHIFT 20
+
+#define VPE_SC_MP_SC21 0x0754
+#define VPE_NL_LO_THR_MASK 0x01ff
+#define VPE_NL_LO_THR_SHIFT 0
+#define VPE_NL_LO_SLOPE_MASK 0xff
+#define VPE_NL_LO_SLOPE_SHIFT 16
+
+#define VPE_SC_MP_SC22 0x0758
+#define VPE_NL_HI_THR_MASK 0x01ff
+#define VPE_NL_HI_THR_SHIFT 0
+#define VPE_NL_HI_SLOPE_SH_MASK 0x07
+#define VPE_NL_HI_SLOPE_SH_SHIFT 16
+
+#define VPE_SC_MP_SC23 0x075c
+#define VPE_GRADIENT_THR_MASK 0x07ff
+#define VPE_GRADIENT_THR_SHIFT 0
+#define VPE_GRADIENT_THR_RANGE_MASK 0x0f
+#define VPE_GRADIENT_THR_RANGE_SHIFT 12
+#define VPE_MIN_GY_THR_MASK 0xff
+#define VPE_MIN_GY_THR_SHIFT 16
+#define VPE_MIN_GY_THR_RANGE_MASK 0x0f
+#define VPE_MIN_GY_THR_RANGE_SHIFT 28
+
+#define VPE_SC_MP_SC24 0x0760
+#define VPE_ORG_H_MASK 0x07ff
+#define VPE_ORG_H_SHIFT 0
+#define VPE_ORG_W_MASK 0x07ff
+#define VPE_ORG_W_SHIFT 16
+
+#define VPE_SC_MP_SC25 0x0764
+#define VPE_OFF_H_MASK 0x07ff
+#define VPE_OFF_H_SHIFT 0
+#define VPE_OFF_W_MASK 0x07ff
+#define VPE_OFF_W_SHIFT 16
+
+/* VPE color space converter regs */
+#define VPE_CSC_CSC00 0x5700
+#define VPE_CSC_A0_MASK 0x1fff
+#define VPE_CSC_A0_SHIFT 0
+#define VPE_CSC_B0_MASK 0x1fff
+#define VPE_CSC_B0_SHIFT 16
+
+#define VPE_CSC_CSC01 0x5704
+#define VPE_CSC_C0_MASK 0x1fff
+#define VPE_CSC_C0_SHIFT 0
+#define VPE_CSC_A1_MASK 0x1fff
+#define VPE_CSC_A1_SHIFT 16
+
+#define VPE_CSC_CSC02 0x5708
+#define VPE_CSC_B1_MASK 0x1fff
+#define VPE_CSC_B1_SHIFT 0
+#define VPE_CSC_C1_MASK 0x1fff
+#define VPE_CSC_C1_SHIFT 16
+
+#define VPE_CSC_CSC03 0x570c
+#define VPE_CSC_A2_MASK 0x1fff
+#define VPE_CSC_A2_SHIFT 0
+#define VPE_CSC_B2_MASK 0x1fff
+#define VPE_CSC_B2_SHIFT 16
+
+#define VPE_CSC_CSC04 0x5710
+#define VPE_CSC_C2_MASK 0x1fff
+#define VPE_CSC_C2_SHIFT 0
+#define VPE_CSC_D0_MASK 0x0fff
+#define VPE_CSC_D0_SHIFT 16
+
+#define VPE_CSC_CSC05 0x5714
+#define VPE_CSC_D1_MASK 0x0fff
+#define VPE_CSC_D1_SHIFT 0
+#define VPE_CSC_D2_MASK 0x0fff
+#define VPE_CSC_D2_SHIFT 16
+#define VPE_CSC_BYPASS (1 << 28)
+
+#endif
diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c
index b557caf5b1a4..6a74ce040d28 100644
--- a/drivers/media/platform/timblogiw.c
+++ b/drivers/media/platform/timblogiw.c
@@ -403,7 +403,7 @@ static int timblogiw_s_input(struct file *file, void *priv, unsigned int input)
return 0;
}
-static int timblogiw_streamon(struct file *file, void *priv, unsigned int type)
+static int timblogiw_streamon(struct file *file, void *priv, enum v4l2_buf_type type)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
@@ -420,7 +420,7 @@ static int timblogiw_streamon(struct file *file, void *priv, unsigned int type)
}
static int timblogiw_streamoff(struct file *file, void *priv,
- unsigned int type)
+ enum v4l2_buf_type type)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
diff --git a/drivers/media/radio/radio-keene.c b/drivers/media/radio/radio-keene.c
index 21db23b196be..fa3964022b96 100644
--- a/drivers/media/radio/radio-keene.c
+++ b/drivers/media/radio/radio-keene.c
@@ -123,7 +123,7 @@ static int keene_cmd_set(struct keene_device *radio)
/* If bit 0 is set, then transmit mono, otherwise stereo.
If bit 2 is set, then enable 75 us preemphasis, otherwise
it is 50 us. */
- radio->buffer[3] = (!radio->stereo) | (radio->preemph_75_us ? 4 : 0);
+ radio->buffer[3] = (radio->stereo ? 0 : 1) | (radio->preemph_75_us ? 4 : 0);
radio->buffer[4] = 0x00;
radio->buffer[5] = 0x00;
radio->buffer[6] = 0x00;
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index f1e3714b5f16..93d864eb8306 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -74,8 +74,8 @@ static u8 fmr2_tea575x_get_pins(struct snd_tea575x *tea)
struct fmr2 *fmr2 = tea->private_data;
u8 bits = inb(fmr2->io);
- return (bits & STR_DATA) ? TEA575X_DATA : 0 |
- (bits & STR_MOST) ? TEA575X_MOST : 0;
+ return ((bits & STR_DATA) ? TEA575X_DATA : 0) |
+ ((bits & STR_MOST) ? TEA575X_MOST : 0);
}
static void fmr2_tea575x_set_direction(struct snd_tea575x *tea, bool output)
@@ -295,7 +295,6 @@ static void fmr2_remove(struct fmr2 *fmr2)
static int fmr2_isa_remove(struct device *pdev, unsigned int ndev)
{
fmr2_remove(dev_get_drvdata(pdev));
- dev_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
index b91477212413..3db8a8cfe1a8 100644
--- a/drivers/media/radio/radio-shark.c
+++ b/drivers/media/radio/radio-shark.c
@@ -271,6 +271,7 @@ static void shark_unregister_leds(struct shark_device *shark)
cancel_work_sync(&shark->led_work);
}
+#ifdef CONFIG_PM
static void shark_resume_leds(struct shark_device *shark)
{
if (test_bit(BLUE_IS_PULSE, &shark->brightness_new))
@@ -280,6 +281,7 @@ static void shark_resume_leds(struct shark_device *shark)
set_bit(RED_LED, &shark->brightness_new);
schedule_work(&shark->led_work);
}
+#endif
#else
static int shark_register_leds(struct shark_device *shark, struct device *dev)
{
diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
index 9fb669721e66..d86d90dab8bf 100644
--- a/drivers/media/radio/radio-shark2.c
+++ b/drivers/media/radio/radio-shark2.c
@@ -237,6 +237,7 @@ static void shark_unregister_leds(struct shark_device *shark)
cancel_work_sync(&shark->led_work);
}
+#ifdef CONFIG_PM
static void shark_resume_leds(struct shark_device *shark)
{
int i;
@@ -246,6 +247,7 @@ static void shark_resume_leds(struct shark_device *shark)
schedule_work(&shark->led_work);
}
+#endif
#else
static int shark_register_leds(struct shark_device *shark, struct device *dev)
{
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index 5c57e5b0f949..4a7a1cc06c3a 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -254,7 +254,7 @@ static unsigned int si470x_get_step(struct si470x_device *radio)
/* 2: 50 kHz */
default:
return 50 * 16;
- };
+ }
}
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index e5fc9acd0c4f..2a497c80c77f 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -463,7 +463,7 @@ static int si470x_i2c_remove(struct i2c_client *client)
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/*
* si470x_i2c_suspend - suspend the device
*/
@@ -509,7 +509,7 @@ static struct i2c_driver si470x_i2c_driver = {
.driver = {
.name = "si470x",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.pm = &si470x_i2c_pm,
#endif
},
diff --git a/drivers/media/radio/si4713-i2c.c b/drivers/media/radio/si4713-i2c.c
index fe160882ee10..9ec48ccbcf0b 100644
--- a/drivers/media/radio/si4713-i2c.c
+++ b/drivers/media/radio/si4713-i2c.c
@@ -1456,7 +1456,7 @@ static int si4713_probe(struct i2c_client *client,
if (client->irq) {
rval = request_irq(client->irq,
- si4713_handler, IRQF_TRIGGER_FALLING | IRQF_DISABLED,
+ si4713_handler, IRQF_TRIGGER_FALLING,
client->name, sdev);
if (rval < 0) {
v4l2_err(&sdev->sd, "Could not request IRQ\n");
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
index 06ac69245ca1..69e3245a58a0 100644
--- a/drivers/media/radio/tef6862.c
+++ b/drivers/media/radio/tef6862.c
@@ -48,15 +48,15 @@
#define WM_SUB_TEST 0xF
/* Different modes of the MSA register */
-#define MODE_BUFFER 0x0
-#define MODE_PRESET 0x1
-#define MODE_SEARCH 0x2
-#define MODE_AF_UPDATE 0x3
-#define MODE_JUMP 0x4
-#define MODE_CHECK 0x5
-#define MODE_LOAD 0x6
-#define MODE_END 0x7
-#define MODE_SHIFT 5
+#define MSA_MODE_BUFFER 0x0
+#define MSA_MODE_PRESET 0x1
+#define MSA_MODE_SEARCH 0x2
+#define MSA_MODE_AF_UPDATE 0x3
+#define MSA_MODE_JUMP 0x4
+#define MSA_MODE_CHECK 0x5
+#define MSA_MODE_LOAD 0x6
+#define MSA_MODE_END 0x7
+#define MSA_MODE_SHIFT 5
struct tef6862_state {
struct v4l2_subdev sd;
@@ -114,7 +114,7 @@ static int tef6862_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequen
clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ);
pll = 1964 + ((freq - TEF6862_LO_FREQ) * 20) / FREQ_MUL;
- i2cmsg[0] = (MODE_PRESET << MODE_SHIFT) | WM_SUB_PLLM;
+ i2cmsg[0] = (MSA_MODE_PRESET << MSA_MODE_SHIFT) | WM_SUB_PLLM;
i2cmsg[1] = (pll >> 8) & 0xff;
i2cmsg[2] = pll & 0xff;
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index 253f307f0b37..4b2e9e8298e1 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -175,7 +175,7 @@ static int_handler_prototype int_handler_table[] = {
fm_irq_handle_intmsk_cmd_resp
};
-long (*g_st_write) (struct sk_buff *skb);
+static long (*g_st_write) (struct sk_buff *skb);
static struct completion wait_for_fmdrv_reg_comp;
static inline void fm_irq_call(struct fmdev *fmdev)
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 11e84bcc23a1..904f11367c29 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -322,4 +322,14 @@ config IR_GPIO_CIR
To compile this driver as a module, choose M here: the module will
be called gpio-ir-recv.
+config RC_ST
+ tristate "ST remote control receiver"
+ depends on ARCH_STI && RC_CORE
+ help
+ Say Y here if you want support for ST remote control driver
+ which allows both IR and UHF RX.
+ The driver passes raw pulse and space information to the LIRC decoder.
+
+ If you're not sure, select N here.
+
endif #RC_DEVICES
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 56bacf07b361..f4eb32c0a455 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -30,3 +30,4 @@ obj-$(CONFIG_RC_LOOPBACK) += rc-loopback.o
obj-$(CONFIG_IR_GPIO_CIR) += gpio-ir-recv.o
obj-$(CONFIG_IR_IGUANA) += iguanair.o
obj-$(CONFIG_IR_TTUSBIR) += ttusbir.o
+obj-$(CONFIG_RC_ST) += st_rc.o
diff --git a/drivers/media/rc/fintek-cir.h b/drivers/media/rc/fintek-cir.h
index 82516a1d39b0..b698f3d2ced9 100644
--- a/drivers/media/rc/fintek-cir.h
+++ b/drivers/media/rc/fintek-cir.h
@@ -76,8 +76,8 @@ struct fintek_dev {
} tx;
/* Config register index/data port pair */
- u8 cr_ip;
- u8 cr_dp;
+ u32 cr_ip;
+ u32 cr_dp;
/* hardware I/O settings */
unsigned long cir_addr;
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index 07aacfa5903d..80c611c2e8c2 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index 19632b1c2190..67e5667db2eb 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -320,6 +320,7 @@ static int iguanair_set_tx_carrier(struct rc_dev *dev, uint32_t carrier)
sevens = 2;
break;
case 3:
+ default:
sevens = 1;
break;
}
diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c
index 31b955bf7664..b1e19a26208d 100644
--- a/drivers/media/rc/ir-rx51.c
+++ b/drivers/media/rc/ir-rx51.c
@@ -201,8 +201,7 @@ static int lirc_rx51_init_port(struct lirc_rx51 *lirc_rx51)
lirc_rx51->irq_num = omap_dm_timer_get_irq(lirc_rx51->pulse_timer);
retval = request_irq(lirc_rx51->irq_num, lirc_rx51_interrupt_handler,
- IRQF_DISABLED | IRQF_SHARED,
- "lirc_pulse_timer", lirc_rx51);
+ IRQF_SHARED, "lirc_pulse_timer", lirc_rx51);
if (retval) {
dev_err(lirc_rx51->dev, ": Failed to request interrupt line\n");
goto err2;
diff --git a/drivers/media/rc/keymaps/rc-dib0700-nec.c b/drivers/media/rc/keymaps/rc-dib0700-nec.c
index 4d13a7f2e5c3..492a05ade7e1 100644
--- a/drivers/media/rc/keymaps/rc-dib0700-nec.c
+++ b/drivers/media/rc/keymaps/rc-dib0700-nec.c
@@ -5,7 +5,7 @@
* TODO: This table is a real mess, as it merges RC codes from several
* devices into a big table. It also has both RC-5 and NEC codes inside.
* It should be broken into small tables, and the protocols should properly
- * be indentificated.
+ * be identificated.
*
* The table were imported from dib0700_devices.c.
*
diff --git a/drivers/media/rc/keymaps/rc-dib0700-rc5.c b/drivers/media/rc/keymaps/rc-dib0700-rc5.c
index ba81d9697cfc..454ea596a7ee 100644
--- a/drivers/media/rc/keymaps/rc-dib0700-rc5.c
+++ b/drivers/media/rc/keymaps/rc-dib0700-rc5.c
@@ -5,7 +5,7 @@
* TODO: This table is a real mess, as it merges RC codes from several
* devices into a big table. It also has both RC-5 and NEC codes inside.
* It should be broken into small tables, and the protocols should properly
- * be indentificated.
+ * be identificated.
*
* The table were imported from dib0700_devices.c.
*
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 7c3674ff5ea2..07e83108df0f 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -84,8 +84,8 @@ struct nvt_dev {
} tx;
/* EFER Config register index/data pair */
- u8 cr_efir;
- u8 cr_efdr;
+ u32 cr_efir;
+ u32 cr_efdr;
/* hardware I/O settings */
unsigned long cir_addr;
diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c
new file mode 100644
index 000000000000..65120c2d47ad
--- /dev/null
+++ b/drivers/media/rc/st_rc.c
@@ -0,0 +1,395 @@
+/*
+ * Copyright (C) 2013 STMicroelectronics Limited
+ * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <media/rc-core.h>
+#include <linux/pinctrl/consumer.h>
+
+struct st_rc_device {
+ struct device *dev;
+ int irq;
+ int irq_wake;
+ struct clk *sys_clock;
+ void *base; /* Register base address */
+ void *rx_base;/* RX Register base address */
+ struct rc_dev *rdev;
+ bool overclocking;
+ int sample_mult;
+ int sample_div;
+ bool rxuhfmode;
+};
+
+/* Registers */
+#define IRB_SAMPLE_RATE_COMM 0x64 /* sample freq divisor*/
+#define IRB_CLOCK_SEL 0x70 /* clock select */
+#define IRB_CLOCK_SEL_STATUS 0x74 /* clock status */
+/* IRB IR/UHF receiver registers */
+#define IRB_RX_ON 0x40 /* pulse time capture */
+#define IRB_RX_SYS 0X44 /* sym period capture */
+#define IRB_RX_INT_EN 0x48 /* IRQ enable (R/W) */
+#define IRB_RX_INT_STATUS 0x4c /* IRQ status (R/W) */
+#define IRB_RX_EN 0x50 /* Receive enable */
+#define IRB_MAX_SYM_PERIOD 0x54 /* max sym value */
+#define IRB_RX_INT_CLEAR 0x58 /* overrun status */
+#define IRB_RX_STATUS 0x6c /* receive status */
+#define IRB_RX_NOISE_SUPPR 0x5c /* noise suppression */
+#define IRB_RX_POLARITY_INV 0x68 /* polarity inverter */
+
+/**
+ * IRQ set: Enable full FIFO 1 -> bit 3;
+ * Enable overrun IRQ 1 -> bit 2;
+ * Enable last symbol IRQ 1 -> bit 1:
+ * Enable RX interrupt 1 -> bit 0;
+ */
+#define IRB_RX_INTS 0x0f
+#define IRB_RX_OVERRUN_INT 0x04
+ /* maximum symbol period (microsecs),timeout to detect end of symbol train */
+#define MAX_SYMB_TIME 0x5000
+#define IRB_SAMPLE_FREQ 10000000
+#define IRB_FIFO_NOT_EMPTY 0xff00
+#define IRB_OVERFLOW 0x4
+#define IRB_TIMEOUT 0xffff
+#define IR_ST_NAME "st-rc"
+
+static void st_rc_send_lirc_timeout(struct rc_dev *rdev)
+{
+ DEFINE_IR_RAW_EVENT(ev);
+ ev.timeout = true;
+ ir_raw_event_store(rdev, &ev);
+}
+
+/**
+ * RX graphical example to better understand the difference between ST IR block
+ * output and standard definition used by LIRC (and most of the world!)
+ *
+ * mark mark
+ * |-IRB_RX_ON-| |-IRB_RX_ON-|
+ * ___ ___ ___ ___ ___ ___ _
+ * | | | | | | | | | | | | |
+ * | | | | | | space 0 | | | | | | space 1 |
+ * _____| |__| |__| |____________________________| |__| |__| |_____________|
+ *
+ * |--------------- IRB_RX_SYS -------------|------ IRB_RX_SYS -------|
+ *
+ * |------------- encoding bit 0 -----------|---- encoding bit 1 -----|
+ *
+ * ST hardware returns mark (IRB_RX_ON) and total symbol time (IRB_RX_SYS), so
+ * convert to standard mark/space we have to calculate space=(IRB_RX_SYS-mark)
+ * The mark time represents the amount of time the carrier (usually 36-40kHz)
+ * is detected.The above examples shows Pulse Width Modulation encoding where
+ * bit 0 is represented by space>mark.
+ */
+
+static irqreturn_t st_rc_rx_interrupt(int irq, void *data)
+{
+ unsigned int symbol, mark = 0;
+ struct st_rc_device *dev = data;
+ int last_symbol = 0;
+ u32 status;
+ DEFINE_IR_RAW_EVENT(ev);
+
+ if (dev->irq_wake)
+ pm_wakeup_event(dev->dev, 0);
+
+ status = readl(dev->rx_base + IRB_RX_STATUS);
+
+ while (status & (IRB_FIFO_NOT_EMPTY | IRB_OVERFLOW)) {
+ u32 int_status = readl(dev->rx_base + IRB_RX_INT_STATUS);
+ if (unlikely(int_status & IRB_RX_OVERRUN_INT)) {
+ /* discard the entire collection in case of errors! */
+ ir_raw_event_reset(dev->rdev);
+ dev_info(dev->dev, "IR RX overrun\n");
+ writel(IRB_RX_OVERRUN_INT,
+ dev->rx_base + IRB_RX_INT_CLEAR);
+ continue;
+ }
+
+ symbol = readl(dev->rx_base + IRB_RX_SYS);
+ mark = readl(dev->rx_base + IRB_RX_ON);
+
+ if (symbol == IRB_TIMEOUT)
+ last_symbol = 1;
+
+ /* Ignore any noise */
+ if ((mark > 2) && (symbol > 1)) {
+ symbol -= mark;
+ if (dev->overclocking) { /* adjustments to timings */
+ symbol *= dev->sample_mult;
+ symbol /= dev->sample_div;
+ mark *= dev->sample_mult;
+ mark /= dev->sample_div;
+ }
+
+ ev.duration = US_TO_NS(mark);
+ ev.pulse = true;
+ ir_raw_event_store(dev->rdev, &ev);
+
+ if (!last_symbol) {
+ ev.duration = US_TO_NS(symbol);
+ ev.pulse = false;
+ ir_raw_event_store(dev->rdev, &ev);
+ } else {
+ st_rc_send_lirc_timeout(dev->rdev);
+ }
+
+ }
+ last_symbol = 0;
+ status = readl(dev->rx_base + IRB_RX_STATUS);
+ }
+
+ writel(IRB_RX_INTS, dev->rx_base + IRB_RX_INT_CLEAR);
+
+ /* Empty software fifo */
+ ir_raw_event_handle(dev->rdev);
+ return IRQ_HANDLED;
+}
+
+static void st_rc_hardware_init(struct st_rc_device *dev)
+{
+ int baseclock, freqdiff;
+ unsigned int rx_max_symbol_per = MAX_SYMB_TIME;
+ unsigned int rx_sampling_freq_div;
+
+ clk_prepare_enable(dev->sys_clock);
+ baseclock = clk_get_rate(dev->sys_clock);
+
+ /* IRB input pins are inverted internally from high to low. */
+ writel(1, dev->rx_base + IRB_RX_POLARITY_INV);
+
+ rx_sampling_freq_div = baseclock / IRB_SAMPLE_FREQ;
+ writel(rx_sampling_freq_div, dev->base + IRB_SAMPLE_RATE_COMM);
+
+ freqdiff = baseclock - (rx_sampling_freq_div * IRB_SAMPLE_FREQ);
+ if (freqdiff) { /* over clocking, workout the adjustment factors */
+ dev->overclocking = true;
+ dev->sample_mult = 1000;
+ dev->sample_div = baseclock / (10000 * rx_sampling_freq_div);
+ rx_max_symbol_per = (rx_max_symbol_per * 1000)/dev->sample_div;
+ }
+
+ writel(rx_max_symbol_per, dev->rx_base + IRB_MAX_SYM_PERIOD);
+}
+
+static int st_rc_remove(struct platform_device *pdev)
+{
+ struct st_rc_device *rc_dev = platform_get_drvdata(pdev);
+ clk_disable_unprepare(rc_dev->sys_clock);
+ rc_unregister_device(rc_dev->rdev);
+ return 0;
+}
+
+static int st_rc_open(struct rc_dev *rdev)
+{
+ struct st_rc_device *dev = rdev->priv;
+ unsigned long flags;
+ local_irq_save(flags);
+ /* enable interrupts and receiver */
+ writel(IRB_RX_INTS, dev->rx_base + IRB_RX_INT_EN);
+ writel(0x01, dev->rx_base + IRB_RX_EN);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static void st_rc_close(struct rc_dev *rdev)
+{
+ struct st_rc_device *dev = rdev->priv;
+ /* disable interrupts and receiver */
+ writel(0x00, dev->rx_base + IRB_RX_EN);
+ writel(0x00, dev->rx_base + IRB_RX_INT_EN);
+}
+
+static int st_rc_probe(struct platform_device *pdev)
+{
+ int ret = -EINVAL;
+ struct rc_dev *rdev;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct st_rc_device *rc_dev;
+ struct device_node *np = pdev->dev.of_node;
+ const char *rx_mode;
+
+ rc_dev = devm_kzalloc(dev, sizeof(struct st_rc_device), GFP_KERNEL);
+
+ if (!rc_dev)
+ return -ENOMEM;
+
+ rdev = rc_allocate_device();
+
+ if (!rdev)
+ return -ENOMEM;
+
+ if (np && !of_property_read_string(np, "rx-mode", &rx_mode)) {
+
+ if (!strcmp(rx_mode, "uhf")) {
+ rc_dev->rxuhfmode = true;
+ } else if (!strcmp(rx_mode, "infrared")) {
+ rc_dev->rxuhfmode = false;
+ } else {
+ dev_err(dev, "Unsupported rx mode [%s]\n", rx_mode);
+ goto err;
+ }
+
+ } else {
+ goto err;
+ }
+
+ rc_dev->sys_clock = devm_clk_get(dev, NULL);
+ if (IS_ERR(rc_dev->sys_clock)) {
+ dev_err(dev, "System clock not found\n");
+ ret = PTR_ERR(rc_dev->sys_clock);
+ goto err;
+ }
+
+ rc_dev->irq = platform_get_irq(pdev, 0);
+ if (rc_dev->irq < 0) {
+ ret = rc_dev->irq;
+ goto err;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ rc_dev->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(rc_dev->base)) {
+ ret = PTR_ERR(rc_dev->base);
+ goto err;
+ }
+
+ if (rc_dev->rxuhfmode)
+ rc_dev->rx_base = rc_dev->base + 0x40;
+ else
+ rc_dev->rx_base = rc_dev->base;
+
+ rc_dev->dev = dev;
+ platform_set_drvdata(pdev, rc_dev);
+ st_rc_hardware_init(rc_dev);
+
+ rdev->driver_type = RC_DRIVER_IR_RAW;
+ rdev->allowed_protos = RC_BIT_ALL;
+ /* rx sampling rate is 10Mhz */
+ rdev->rx_resolution = 100;
+ rdev->timeout = US_TO_NS(MAX_SYMB_TIME);
+ rdev->priv = rc_dev;
+ rdev->open = st_rc_open;
+ rdev->close = st_rc_close;
+ rdev->driver_name = IR_ST_NAME;
+ rdev->map_name = RC_MAP_LIRC;
+ rdev->input_name = "ST Remote Control Receiver";
+
+ /* enable wake via this device */
+ device_set_wakeup_capable(dev, true);
+ device_set_wakeup_enable(dev, true);
+
+ ret = rc_register_device(rdev);
+ if (ret < 0)
+ goto clkerr;
+
+ rc_dev->rdev = rdev;
+ if (devm_request_irq(dev, rc_dev->irq, st_rc_rx_interrupt,
+ IRQF_NO_SUSPEND, IR_ST_NAME, rc_dev) < 0) {
+ dev_err(dev, "IRQ %d register failed\n", rc_dev->irq);
+ ret = -EINVAL;
+ goto rcerr;
+ }
+
+ /**
+ * for LIRC_MODE_MODE2 or LIRC_MODE_PULSE or LIRC_MODE_RAW
+ * lircd expects a long space first before a signal train to sync.
+ */
+ st_rc_send_lirc_timeout(rdev);
+
+ dev_info(dev, "setup in %s mode\n", rc_dev->rxuhfmode ? "UHF" : "IR");
+
+ return ret;
+rcerr:
+ rc_unregister_device(rdev);
+ rdev = NULL;
+clkerr:
+ clk_disable_unprepare(rc_dev->sys_clock);
+err:
+ rc_free_device(rdev);
+ dev_err(dev, "Unable to register device (%d)\n", ret);
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int st_rc_suspend(struct device *dev)
+{
+ struct st_rc_device *rc_dev = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev)) {
+ if (!enable_irq_wake(rc_dev->irq))
+ rc_dev->irq_wake = 1;
+ else
+ return -EINVAL;
+ } else {
+ pinctrl_pm_select_sleep_state(dev);
+ writel(0x00, rc_dev->rx_base + IRB_RX_EN);
+ writel(0x00, rc_dev->rx_base + IRB_RX_INT_EN);
+ clk_disable_unprepare(rc_dev->sys_clock);
+ }
+
+ return 0;
+}
+
+static int st_rc_resume(struct device *dev)
+{
+ struct st_rc_device *rc_dev = dev_get_drvdata(dev);
+ struct rc_dev *rdev = rc_dev->rdev;
+
+ if (rc_dev->irq_wake) {
+ disable_irq_wake(rc_dev->irq);
+ rc_dev->irq_wake = 0;
+ } else {
+ pinctrl_pm_select_default_state(dev);
+ st_rc_hardware_init(rc_dev);
+ if (rdev->users) {
+ writel(IRB_RX_INTS, rc_dev->rx_base + IRB_RX_INT_EN);
+ writel(0x01, rc_dev->rx_base + IRB_RX_EN);
+ }
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(st_rc_pm_ops, st_rc_suspend, st_rc_resume);
+#endif
+
+#ifdef CONFIG_OF
+static struct of_device_id st_rc_match[] = {
+ { .compatible = "st,comms-irb", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, st_rc_match);
+#endif
+
+static struct platform_driver st_rc_driver = {
+ .driver = {
+ .name = IR_ST_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(st_rc_match),
+#ifdef CONFIG_PM
+ .pm = &st_rc_pm_ops,
+#endif
+ },
+ .probe = st_rc_probe,
+ .remove = st_rc_remove,
+};
+
+module_platform_driver(st_rc_driver);
+
+MODULE_DESCRIPTION("RC Transceiver driver for STMicroelectronics platforms");
+MODULE_AUTHOR("STMicroelectronics (R&D) Ltd");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 98bd4960c75e..904baf4eec28 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -1110,7 +1110,7 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
}
err = request_irq(data->irq, wbcir_irq_handler,
- IRQF_DISABLED, DRVNAME, device);
+ 0, DRVNAME, device);
if (err) {
dev_err(dev, "Failed to claim IRQ %u\n", data->irq);
err = -EBUSY;
diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
index ad9309da4a91..6c96e4898777 100644
--- a/drivers/media/tuners/e4000.c
+++ b/drivers/media/tuners/e4000.c
@@ -19,6 +19,7 @@
*/
#include "e4000_priv.h"
+#include <linux/math64.h>
/* write multiple registers */
static int e4000_wr_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len)
@@ -233,7 +234,7 @@ static int e4000_set_params(struct dvb_frontend *fe)
* or more.
*/
f_vco = c->frequency * e4000_pll_lut[i].mul;
- sigma_delta = 0x10000UL * (f_vco % priv->cfg->clock) / priv->cfg->clock;
+ sigma_delta = div_u64(0x10000ULL * (f_vco % priv->cfg->clock), priv->cfg->clock);
buf[0] = f_vco / priv->cfg->clock;
buf[1] = (sigma_delta >> 0) & 0xff;
buf[2] = (sigma_delta >> 8) & 0xff;
diff --git a/drivers/media/tuners/fc0012.c b/drivers/media/tuners/fc0012.c
index f4d0e797a6cc..d74e92056810 100644
--- a/drivers/media/tuners/fc0012.c
+++ b/drivers/media/tuners/fc0012.c
@@ -139,7 +139,7 @@ static int fc0012_set_params(struct dvb_frontend *fe)
unsigned char reg[7], am, pm, multi, tmp;
unsigned long f_vco;
unsigned short xtal_freq_khz_2, xin, xdiv;
- int vco_select = false;
+ bool vco_select = false;
if (fe->callback) {
ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER,
diff --git a/drivers/media/tuners/fc0013.c b/drivers/media/tuners/fc0013.c
index bd8f0f1e8f3b..b4162315773d 100644
--- a/drivers/media/tuners/fc0013.c
+++ b/drivers/media/tuners/fc0013.c
@@ -233,7 +233,7 @@ static int fc0013_set_params(struct dvb_frontend *fe)
unsigned char reg[7], am, pm, multi, tmp;
unsigned long f_vco;
unsigned short xtal_freq_khz_2, xin, xdiv;
- int vco_select = false;
+ bool vco_select = false;
if (fe->callback) {
ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER,
diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
index 1c23666468cf..d9ee43fae62d 100644
--- a/drivers/media/tuners/r820t.c
+++ b/drivers/media/tuners/r820t.c
@@ -612,10 +612,19 @@ static int r820t_set_pll(struct r820t_priv *priv, enum v4l2_tuner_type type,
vco_fine_tune = (data[4] & 0x30) >> 4;
- if (vco_fine_tune > VCO_POWER_REF)
- div_num = div_num - 1;
- else if (vco_fine_tune < VCO_POWER_REF)
- div_num = div_num + 1;
+ tuner_dbg("mix_div=%d div_num=%d vco_fine_tune=%d\n",
+ mix_div, div_num, vco_fine_tune);
+
+ /*
+ * XXX: R828D/16MHz seems to have always vco_fine_tune=1.
+ * Due to that, this calculation goes wrong.
+ */
+ if (priv->cfg->rafael_chip != CHIP_R828D) {
+ if (vco_fine_tune > VCO_POWER_REF)
+ div_num = div_num - 1;
+ else if (vco_fine_tune < VCO_POWER_REF)
+ div_num = div_num + 1;
+ }
rc = r820t_write_reg_mask(priv, 0x10, div_num << 5, 0xe0);
if (rc < 0)
@@ -637,11 +646,6 @@ static int r820t_set_pll(struct r820t_priv *priv, enum v4l2_tuner_type type,
vco_fra = pll_ref * 129 / 128;
}
- if (nint > 63) {
- tuner_info("No valid PLL values for %u kHz!\n", freq);
- return -EINVAL;
- }
-
ni = (nint - 13) / 4;
si = nint - 4 * ni - 13;
diff --git a/drivers/media/tuners/tda9887.c b/drivers/media/tuners/tda9887.c
index 300005c535ba..9823248d743f 100644
--- a/drivers/media/tuners/tda9887.c
+++ b/drivers/media/tuners/tda9887.c
@@ -536,8 +536,8 @@ static int tda9887_status(struct dvb_frontend *fe)
unsigned char buf[1];
int rc;
- memset(buf,0,sizeof(buf));
- if (1 != (rc = tuner_i2c_xfer_recv(&priv->i2c_props,buf,1)))
+ rc = tuner_i2c_xfer_recv(&priv->i2c_props, buf, 1);
+ if (rc != 1)
tuner_info("i2c i/o error: rc == %d (should be 1)\n", rc);
dump_read_message(fe, buf);
return 0;
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 878d2c4d9e8e..e287a7417319 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -572,7 +572,7 @@ static int load_firmware(struct dvb_frontend *fe, unsigned int type,
return -EINVAL;
}
- size = le16_to_cpu(*(__u16 *) p);
+ size = le16_to_cpu(*(__le16 *) p);
p += sizeof(size);
if (size == 0xffff)
@@ -683,7 +683,7 @@ static int load_scode(struct dvb_frontend *fe, unsigned int type,
/* 16 SCODE entries per file; each SCODE entry is 12 bytes and
* has a 2-byte size header in the firmware format. */
if (priv->firm[pos].size != 14 * 16 || scode >= 16 ||
- le16_to_cpu(*(__u16 *)(p + 14 * scode)) != 12)
+ le16_to_cpu(*(__le16 *)(p + 14 * scode)) != 12)
return -EINVAL;
p += 14 * scode + 2;
}
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index 8b6275f85908..0bd969063392 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -390,7 +390,7 @@ static void flexcop_usb_transfer_exit(struct flexcop_usb *fc_usb)
}
if (fc_usb->iso_buffer != NULL)
- pci_free_consistent(NULL,
+ usb_free_coherent(fc_usb->udev,
fc_usb->buffer_size, fc_usb->iso_buffer,
fc_usb->dma_addr);
}
@@ -407,8 +407,8 @@ static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb)
"each of %d bytes size = %d.\n", B2C2_USB_NUM_ISO_URB,
B2C2_USB_FRAMES_PER_ISO, frame_size, bufsize);
- fc_usb->iso_buffer = pci_alloc_consistent(NULL,
- bufsize, &fc_usb->dma_addr);
+ fc_usb->iso_buffer = usb_alloc_coherent(fc_usb->udev,
+ bufsize, GFP_KERNEL, &fc_usb->dma_addr);
if (fc_usb->iso_buffer == NULL)
return -ENOMEM;
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index be1719283609..351a78a84c3d 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -209,7 +209,7 @@ static void cpia2_usb_complete(struct urb *urb)
{
int i;
unsigned char *cdata;
- static int frame_ready = false;
+ static bool frame_ready = false;
struct camera_data *cam = (struct camera_data *) urb->context;
if (urb->status!=0) {
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index a384f80f595e..e9d017bea377 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -978,7 +978,6 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
int minor)
{
int retval = -ENOMEM;
- int errCode;
unsigned int maxh, maxw;
dev->udev = udev;
@@ -1014,8 +1013,8 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
/* Cx231xx pre card setup */
cx231xx_pre_card_setup(dev);
- errCode = cx231xx_config(dev);
- if (errCode) {
+ retval = cx231xx_config(dev);
+ if (retval) {
cx231xx_errdev("error configuring device\n");
return -ENOMEM;
}
@@ -1024,12 +1023,11 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
dev->norm = dev->board.norm;
/* register i2c bus */
- errCode = cx231xx_dev_init(dev);
- if (errCode < 0) {
- cx231xx_dev_uninit(dev);
+ retval = cx231xx_dev_init(dev);
+ if (retval) {
cx231xx_errdev("%s: cx231xx_i2c_register - errCode [%d]!\n",
- __func__, errCode);
- return errCode;
+ __func__, retval);
+ goto err_dev_init;
}
/* Do board specific init */
@@ -1047,11 +1045,11 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
dev->interlaced = 0;
dev->video_input = 0;
- errCode = cx231xx_config(dev);
- if (errCode < 0) {
+ retval = cx231xx_config(dev);
+ if (retval) {
cx231xx_errdev("%s: cx231xx_config - errCode [%d]!\n",
- __func__, errCode);
- return errCode;
+ __func__, retval);
+ goto err_dev_init;
}
/* init video dma queues */
@@ -1075,9 +1073,9 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
}
retval = cx231xx_register_analog_devices(dev);
- if (retval < 0) {
- cx231xx_release_resources(dev);
- return retval;
+ if (retval) {
+ cx231xx_release_analog_resources(dev);
+ goto err_analog;
}
cx231xx_ir_init(dev);
@@ -1085,6 +1083,11 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
cx231xx_init_extension(dev);
return 0;
+err_analog:
+ cx231xx_remove_from_devlist(dev);
+err_dev_init:
+ cx231xx_dev_uninit(dev);
+ return retval;
}
#if defined(CONFIG_MODULES) && defined(MODULE)
@@ -1132,7 +1135,6 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
char *speed;
struct usb_interface_assoc_descriptor *assoc_desc;
- udev = usb_get_dev(interface_to_usbdev(interface));
ifnum = interface->altsetting[0].desc.bInterfaceNumber;
/*
@@ -1161,6 +1163,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
return -ENOMEM;
}
+ udev = usb_get_dev(interface_to_usbdev(interface));
+
snprintf(dev->name, 29, "cx231xx #%d", nr);
dev->devno = nr;
dev->model = id->driver_info;
@@ -1223,10 +1227,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
if (assoc_desc->bFirstInterface != ifnum) {
cx231xx_err(DRIVER_NAME ": Not found "
"matching IAD interface\n");
- clear_bit(dev->devno, &cx231xx_devused);
- kfree(dev);
- dev = NULL;
- return -ENODEV;
+ retval = -ENODEV;
+ goto err_if;
}
cx231xx_info("registering interface %d\n", ifnum);
@@ -1242,22 +1244,13 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
retval = v4l2_device_register(&interface->dev, &dev->v4l2_dev);
if (retval) {
cx231xx_errdev("v4l2_device_register failed\n");
- clear_bit(dev->devno, &cx231xx_devused);
- kfree(dev);
- dev = NULL;
- return -EIO;
+ retval = -EIO;
+ goto err_v4l2;
}
/* allocate device struct */
retval = cx231xx_init_dev(dev, udev, nr);
- if (retval) {
- clear_bit(dev->devno, &cx231xx_devused);
- v4l2_device_unregister(&dev->v4l2_dev);
- kfree(dev);
- dev = NULL;
- usb_set_intfdata(interface, NULL);
-
- return retval;
- }
+ if (retval)
+ goto err_init;
/* compute alternate max packet sizes for video */
uif = udev->actconfig->interface[dev->current_pcb_config.
@@ -1275,11 +1268,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
if (dev->video_mode.alt_max_pkt_size == NULL) {
cx231xx_errdev("out of memory!\n");
- clear_bit(dev->devno, &cx231xx_devused);
- v4l2_device_unregister(&dev->v4l2_dev);
- kfree(dev);
- dev = NULL;
- return -ENOMEM;
+ retval = -ENOMEM;
+ goto err_video_alt;
}
for (i = 0; i < dev->video_mode.num_alt; i++) {
@@ -1309,11 +1299,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
if (dev->vbi_mode.alt_max_pkt_size == NULL) {
cx231xx_errdev("out of memory!\n");
- clear_bit(dev->devno, &cx231xx_devused);
- v4l2_device_unregister(&dev->v4l2_dev);
- kfree(dev);
- dev = NULL;
- return -ENOMEM;
+ retval = -ENOMEM;
+ goto err_vbi_alt;
}
for (i = 0; i < dev->vbi_mode.num_alt; i++) {
@@ -1344,11 +1331,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
if (dev->sliced_cc_mode.alt_max_pkt_size == NULL) {
cx231xx_errdev("out of memory!\n");
- clear_bit(dev->devno, &cx231xx_devused);
- v4l2_device_unregister(&dev->v4l2_dev);
- kfree(dev);
- dev = NULL;
- return -ENOMEM;
+ retval = -ENOMEM;
+ goto err_sliced_cc_alt;
}
for (i = 0; i < dev->sliced_cc_mode.num_alt; i++) {
@@ -1380,11 +1364,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
if (dev->ts1_mode.alt_max_pkt_size == NULL) {
cx231xx_errdev("out of memory!\n");
- clear_bit(dev->devno, &cx231xx_devused);
- v4l2_device_unregister(&dev->v4l2_dev);
- kfree(dev);
- dev = NULL;
- return -ENOMEM;
+ retval = -ENOMEM;
+ goto err_ts1_alt;
}
for (i = 0; i < dev->ts1_mode.num_alt; i++) {
@@ -1411,6 +1392,29 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
request_modules(dev);
return 0;
+err_ts1_alt:
+ kfree(dev->sliced_cc_mode.alt_max_pkt_size);
+err_sliced_cc_alt:
+ kfree(dev->vbi_mode.alt_max_pkt_size);
+err_vbi_alt:
+ kfree(dev->video_mode.alt_max_pkt_size);
+err_video_alt:
+ /* cx231xx_uninit_dev: */
+ cx231xx_close_extension(dev);
+ cx231xx_ir_exit(dev);
+ cx231xx_release_analog_resources(dev);
+ cx231xx_417_unregister(dev);
+ cx231xx_remove_from_devlist(dev);
+ cx231xx_dev_uninit(dev);
+err_init:
+ v4l2_device_unregister(&dev->v4l2_dev);
+err_v4l2:
+ usb_set_intfdata(interface, NULL);
+err_if:
+ usb_put_dev(udev);
+ kfree(dev);
+ clear_bit(dev->devno, &cx231xx_devused);
+ return retval;
}
/*
diff --git a/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c b/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c
index d7308ab7a90f..2a34ceee4802 100644
--- a/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c
+++ b/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c
@@ -28,7 +28,7 @@ MODULE_PARM_DESC(pcb_debug, "enable pcb config debug messages [video]");
/******************************************************************************/
-struct pcb_config cx231xx_Scenario[] = {
+static struct pcb_config cx231xx_Scenario[] = {
{
INDEX_SELFPOWER_DIGITAL_ONLY, /* index */
USB_SELF_POWER, /* power_type */
@@ -672,7 +672,7 @@ u32 initialize_cx231xx(struct cx231xx *dev)
pcb config it is related to */
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT, data, 4);
- config_info = le32_to_cpu(*((u32 *) data));
+ config_info = le32_to_cpu(*((__le32 *)data));
usb_speed = (u8) (config_info & 0x1);
/* Verify this device belongs to Bus power or Self power device */
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index c0cd0848631b..ecca03667f98 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -377,6 +377,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
struct rtl28xxu_req req_e4000 = {0x02c8, CMD_I2C_RD, 1, buf};
struct rtl28xxu_req req_tda18272 = {0x00c0, CMD_I2C_RD, 2, buf};
struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 1, buf};
+ struct rtl28xxu_req req_r828d = {0x0074, CMD_I2C_RD, 1, buf};
dev_dbg(&d->udev->dev, "%s:\n", __func__);
@@ -489,6 +490,15 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
goto found;
}
+ /* check R828D ID register; reg=00 val=69 */
+ ret = rtl28xxu_ctrl_msg(d, &req_r828d);
+ if (ret == 0 && buf[0] == 0x69) {
+ priv->tuner = TUNER_RTL2832_R828D;
+ priv->tuner_name = "R828D";
+ goto found;
+ }
+
+
found:
dev_dbg(&d->udev->dev, "%s: tuner=%s\n", __func__, priv->tuner_name);
@@ -745,6 +755,7 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
rtl2832_config = &rtl28xxu_rtl2832_e4000_config;
break;
case TUNER_RTL2832_R820T:
+ case TUNER_RTL2832_R828D:
rtl2832_config = &rtl28xxu_rtl2832_r820t_config;
break;
default:
@@ -866,6 +877,13 @@ static const struct r820t_config rtl2832u_r820t_config = {
.rafael_chip = CHIP_R820T,
};
+static const struct r820t_config rtl2832u_r828d_config = {
+ .i2c_addr = 0x3a,
+ .xtal = 16000000,
+ .max_i2c_msg_len = 2,
+ .rafael_chip = CHIP_R828D,
+};
+
static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
{
int ret;
@@ -923,6 +941,27 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
adap->fe[0]->ops.read_signal_strength =
adap->fe[0]->ops.tuner_ops.get_rf_strength;
break;
+ case TUNER_RTL2832_R828D:
+ /* power off mn88472 demod on GPIO0 */
+ ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_VAL, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+ ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_DIR, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+ ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_EN, 0x01, 0x01);
+ if (ret)
+ goto err;
+
+ fe = dvb_attach(r820t_attach, adap->fe[0], &d->i2c_adap,
+ &rtl2832u_r828d_config);
+
+ /* Use tuner to get the signal strength */
+ adap->fe[0]->ops.read_signal_strength =
+ adap->fe[0]->ops.tuner_ops.get_rf_strength;
+ break;
default:
fe = NULL;
dev_err(&d->udev->dev, "%s: unknown tuner=%d\n", KBUILD_MODNAME,
@@ -1388,6 +1427,9 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
&rtl2832u_props, "Leadtek WinFast DTV Dongle mini", NULL) },
{ DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_CPYTO_REDI_PC50A,
&rtl2832u_props, "Crypto ReDi PC 50 A", NULL) },
+
+ { DVB_USB_DEVICE(USB_VID_HANFTEK, 0x0131,
+ &rtl2832u_props, "Astrometa DVB-T2", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, rtl28xxu_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
index 729b3540c2f9..2142bcb41b41 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
@@ -83,6 +83,7 @@ enum rtl28xxu_tuner {
TUNER_RTL2832_TDA18272,
TUNER_RTL2832_FC0013,
TUNER_RTL2832_R820T,
+ TUNER_RTL2832_R828D,
};
struct rtl28xxu_req {
diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c
index ea2d5ee86576..c11138ebf6fb 100644
--- a/drivers/media/usb/dvb-usb/az6027.c
+++ b/drivers/media/usb/dvb-usb/az6027.c
@@ -254,7 +254,7 @@ static const struct stb0899_s1_reg az6027_stb0899_s1_init_3[] = {
-struct stb0899_config az6027_stb0899_config = {
+static struct stb0899_config az6027_stb0899_config = {
.init_dev = az6027_stb0899_s1_init_1,
.init_s2_demod = stb0899_s2_init_2,
.init_s1_demod = az6027_stb0899_s1_init_3,
@@ -291,7 +291,7 @@ struct stb0899_config az6027_stb0899_config = {
.tuner_set_rfsiggain = NULL,
};
-struct stb6100_config az6027_stb6100_config = {
+static struct stb6100_config az6027_stb6100_config = {
.tuner_address = 0xc0,
.refclock = 27000000,
};
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 6e237b6dd0a8..6136a2c7dbfd 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -955,9 +955,10 @@ static struct ds3000_config dw2104_ds3000_config = {
.demod_address = 0x68,
};
-static struct ts2020_config dw2104_ts2020_config = {
+static struct ts2020_config dw2104_ts2020_config = {
.tuner_address = 0x60,
.clk_out_div = 1,
+ .frequency_div = 1060000,
};
static struct ds3000_config s660_ds3000_config = {
@@ -966,6 +967,12 @@ static struct ds3000_config s660_ds3000_config = {
.set_lock_led = dw210x_led_ctrl,
};
+static struct ts2020_config s660_ts2020_config = {
+ .tuner_address = 0x60,
+ .clk_out_div = 1,
+ .frequency_div = 1146000,
+};
+
static struct stv0900_config dw2104a_stv0900_config = {
.demod_address = 0x6a,
.demod_mode = 0,
@@ -1205,7 +1212,7 @@ static int ds3000_frontend_attach(struct dvb_usb_adapter *d)
if (d->fe_adap[0].fe == NULL)
return -EIO;
- dvb_attach(ts2020_attach, d->fe_adap[0].fe, &dw2104_ts2020_config,
+ dvb_attach(ts2020_attach, d->fe_adap[0].fe, &s660_ts2020_config,
&d->dev->i2c_adap);
st->old_set_voltage = d->fe_adap[0].fe->ops.set_voltage;
@@ -1213,7 +1220,7 @@ static int ds3000_frontend_attach(struct dvb_usb_adapter *d)
dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG);
- info("Attached ds3000+ds2020!\n");
+ info("Attached ds3000+ts2020!\n");
return 0;
}
diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c
index 73cc50afa5e1..d666741797d4 100644
--- a/drivers/media/usb/em28xx/em28xx-camera.c
+++ b/drivers/media/usb/em28xx/em28xx-camera.c
@@ -22,6 +22,7 @@
#include <linux/i2c.h>
#include <media/soc_camera.h>
#include <media/mt9v011.h>
+#include <media/v4l2-clk.h>
#include <media/v4l2-common.h>
#include "em28xx.h"
@@ -47,6 +48,7 @@ static struct soc_camera_link camlink = {
.bus_id = 0,
.flags = 0,
.module_name = "em28xx",
+ .unbalanced_power = true,
};
@@ -325,13 +327,24 @@ int em28xx_detect_sensor(struct em28xx *dev)
int em28xx_init_camera(struct em28xx *dev)
{
+ char clk_name[V4L2_SUBDEV_NAME_SIZE];
+ struct i2c_client *client = &dev->i2c_client[dev->def_i2c_bus];
+ struct i2c_adapter *adap = &dev->i2c_adap[dev->def_i2c_bus];
+ int ret = 0;
+
+ v4l2_clk_name_i2c(clk_name, sizeof(clk_name),
+ i2c_adapter_id(adap), client->addr);
+ dev->clk = v4l2_clk_register_fixed(clk_name, "mclk", -EINVAL);
+ if (IS_ERR(dev->clk))
+ return PTR_ERR(dev->clk);
+
switch (dev->em28xx_sensor) {
case EM28XX_MT9V011:
{
struct mt9v011_platform_data pdata;
struct i2c_board_info mt9v011_info = {
.type = "mt9v011",
- .addr = dev->i2c_client[dev->def_i2c_bus].addr,
+ .addr = client->addr,
.platform_data = &pdata,
};
@@ -352,10 +365,11 @@ int em28xx_init_camera(struct em28xx *dev)
dev->sensor_xtal = 4300000;
pdata.xtal = dev->sensor_xtal;
if (NULL ==
- v4l2_i2c_new_subdev_board(&dev->v4l2_dev,
- &dev->i2c_adap[dev->def_i2c_bus],
- &mt9v011_info, NULL))
- return -ENODEV;
+ v4l2_i2c_new_subdev_board(&dev->v4l2_dev, adap,
+ &mt9v011_info, NULL)) {
+ ret = -ENODEV;
+ break;
+ }
/* probably means GRGB 16 bit bayer */
dev->vinmode = 0x0d;
dev->vinctl = 0x00;
@@ -391,7 +405,7 @@ int em28xx_init_camera(struct em28xx *dev)
struct i2c_board_info ov2640_info = {
.type = "ov2640",
.flags = I2C_CLIENT_SCCB,
- .addr = dev->i2c_client[dev->def_i2c_bus].addr,
+ .addr = client->addr,
.platform_data = &camlink,
};
struct v4l2_mbus_framefmt fmt;
@@ -408,9 +422,12 @@ int em28xx_init_camera(struct em28xx *dev)
dev->sensor_yres = 480;
subdev =
- v4l2_i2c_new_subdev_board(&dev->v4l2_dev,
- &dev->i2c_adap[dev->def_i2c_bus],
+ v4l2_i2c_new_subdev_board(&dev->v4l2_dev, adap,
&ov2640_info, NULL);
+ if (NULL == subdev) {
+ ret = -ENODEV;
+ break;
+ }
fmt.code = V4L2_MBUS_FMT_YUYV8_2X8;
fmt.width = 640;
@@ -427,8 +444,13 @@ int em28xx_init_camera(struct em28xx *dev)
}
case EM28XX_NOSENSOR:
default:
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+ if (ret < 0) {
+ v4l2_clk_unregister_fixed(dev->clk);
+ dev->clk = NULL;
+ }
+
+ return ret;
}
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index dc65742c4bbc..a5196697627f 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -36,6 +36,7 @@
#include <media/tvaudio.h>
#include <media/i2c-addr.h>
#include <media/tveeprom.h>
+#include <media/v4l2-clk.h>
#include <media/v4l2-common.h>
#include "em28xx.h"
@@ -95,8 +96,8 @@ static struct em28xx_reg_seq default_digital[] = {
/* Board Hauppauge WinTV HVR 900 analog */
static struct em28xx_reg_seq hauppauge_wintv_hvr_900_analog[] = {
{EM2820_R08_GPIO_CTRL, 0x2d, ~EM_GPIO_4, 10},
- {0x05, 0xff, 0x10, 10},
- { -1, -1, -1, -1},
+ { 0x05, 0xff, 0x10, 10},
+ { -1, -1, -1, -1},
};
/* Board Hauppauge WinTV HVR 900 digital */
@@ -104,20 +105,20 @@ static struct em28xx_reg_seq hauppauge_wintv_hvr_900_digital[] = {
{EM2820_R08_GPIO_CTRL, 0x2e, ~EM_GPIO_4, 10},
{EM2880_R04_GPO, 0x04, 0x0f, 10},
{EM2880_R04_GPO, 0x0c, 0x0f, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/* Board Hauppauge WinTV HVR 900 (R2) digital */
static struct em28xx_reg_seq hauppauge_wintv_hvr_900R2_digital[] = {
{EM2820_R08_GPIO_CTRL, 0x2e, ~EM_GPIO_4, 10},
{EM2880_R04_GPO, 0x0c, 0x0f, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/* Boards - EM2880 MSI DIGIVOX AD and EM2880_BOARD_MSI_DIGIVOX_AD_II */
static struct em28xx_reg_seq em2880_msi_digivox_ad_analog[] = {
- {EM2820_R08_GPIO_CTRL, 0x69, ~EM_GPIO_4, 10},
- { -1, -1, -1, -1},
+ {EM2820_R08_GPIO_CTRL, 0x69, ~EM_GPIO_4, 10},
+ { -1, -1, -1, -1},
};
/* Boards - EM2880 MSI DIGIVOX AD and EM2880_BOARD_MSI_DIGIVOX_AD_II */
@@ -132,7 +133,7 @@ static struct em28xx_reg_seq em2882_kworld_315u_digital[] = {
{EM2880_R04_GPO, 0x04, 0xff, 10},
{EM2880_R04_GPO, 0x0c, 0xff, 10},
{EM2820_R08_GPIO_CTRL, 0x7e, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
static struct em28xx_reg_seq em2882_kworld_315u_tuner_gpio[] = {
@@ -140,19 +141,19 @@ static struct em28xx_reg_seq em2882_kworld_315u_tuner_gpio[] = {
{EM2880_R04_GPO, 0x0c, 0xff, 10},
{EM2880_R04_GPO, 0x08, 0xff, 10},
{EM2880_R04_GPO, 0x0c, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
static struct em28xx_reg_seq kworld_330u_analog[] = {
{EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10},
{EM2880_R04_GPO, 0x00, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
static struct em28xx_reg_seq kworld_330u_digital[] = {
{EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10},
{EM2880_R04_GPO, 0x08, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/* Evga inDtube
@@ -170,11 +171,11 @@ static struct em28xx_reg_seq evga_indtube_digital[] = {
{EM2820_R08_GPIO_CTRL, 0x7a, 0xff, 1},
{EM2880_R04_GPO, 0x04, 0xff, 10},
{EM2880_R04_GPO, 0x0c, 0xff, 1},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/*
- * KWorld PlusTV 340U and UB435-Q (ATSC) GPIOs map:
+ * KWorld PlusTV 340U, UB435-Q and UB435-Q V2 (ATSC) GPIOs map:
* EM_GPIO_0 - currently unknown
* EM_GPIO_1 - LED disable/enable (1 = off, 0 = on)
* EM_GPIO_2 - currently unknown
@@ -185,8 +186,8 @@ static struct em28xx_reg_seq evga_indtube_digital[] = {
* EM_GPIO_7 - currently unknown
*/
static struct em28xx_reg_seq kworld_a340_digital[] = {
- {EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10},
- { -1, -1, -1, -1},
+ {EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10},
+ { -1, -1, -1, -1},
};
/* Pinnacle Hybrid Pro eb1a:2881 */
@@ -205,13 +206,13 @@ static struct em28xx_reg_seq pinnacle_hybrid_pro_digital[] = {
static struct em28xx_reg_seq terratec_cinergy_USB_XS_FR_analog[] = {
{EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10},
{EM2880_R04_GPO, 0x00, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
static struct em28xx_reg_seq terratec_cinergy_USB_XS_FR_digital[] = {
{EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10},
{EM2880_R04_GPO, 0x08, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/* eb1a:2868 Reddo DVB-C USB TV Box
@@ -225,7 +226,7 @@ static struct em28xx_reg_seq reddo_dvb_c_usb_box[] = {
{EM2820_R08_GPIO_CTRL, 0x7f, 0xff, 10},
{EM2820_R08_GPIO_CTRL, 0x6f, 0xff, 10},
{EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10},
- {-1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/* Callback for the most boards */
@@ -233,23 +234,23 @@ static struct em28xx_reg_seq default_tuner_gpio[] = {
{EM2820_R08_GPIO_CTRL, EM_GPIO_4, EM_GPIO_4, 10},
{EM2820_R08_GPIO_CTRL, 0, EM_GPIO_4, 10},
{EM2820_R08_GPIO_CTRL, EM_GPIO_4, EM_GPIO_4, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/* Mute/unmute */
static struct em28xx_reg_seq compro_unmute_tv_gpio[] = {
- {EM2820_R08_GPIO_CTRL, 5, 7, 10},
- { -1, -1, -1, -1},
+ {EM2820_R08_GPIO_CTRL, 5, 7, 10},
+ { -1, -1, -1, -1},
};
static struct em28xx_reg_seq compro_unmute_svid_gpio[] = {
- {EM2820_R08_GPIO_CTRL, 4, 7, 10},
- { -1, -1, -1, -1},
+ {EM2820_R08_GPIO_CTRL, 4, 7, 10},
+ { -1, -1, -1, -1},
};
static struct em28xx_reg_seq compro_mute_gpio[] = {
- {EM2820_R08_GPIO_CTRL, 6, 7, 10},
- { -1, -1, -1, -1},
+ {EM2820_R08_GPIO_CTRL, 6, 7, 10},
+ { -1, -1, -1, -1},
};
/* Terratec AV350 */
@@ -279,21 +280,21 @@ static struct em28xx_reg_seq vc211a_enable[] = {
static struct em28xx_reg_seq dikom_dk300_digital[] = {
{EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10},
{EM2880_R04_GPO, 0x08, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/* Reset for the most [digital] boards */
static struct em28xx_reg_seq leadership_digital[] = {
{EM2874_R80_GPIO_P0_CTRL, 0x70, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
static struct em28xx_reg_seq leadership_reset[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xf0, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xb0, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xf0, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/* 2013:024f PCTV nanoStick T2 290e
@@ -304,7 +305,7 @@ static struct em28xx_reg_seq pctv_290e[] = {
{EM2874_R80_GPIO_P0_CTRL, 0x00, 0xff, 80},
{EM2874_R80_GPIO_P0_CTRL, 0x40, 0xff, 80}, /* GPIO_6 = 1 */
{EM2874_R80_GPIO_P0_CTRL, 0xc0, 0xff, 80}, /* GPIO_7 = 1 */
- {-1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
#if 0
@@ -313,14 +314,14 @@ static struct em28xx_reg_seq terratec_h5_gpio[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xf2, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 50},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
static struct em28xx_reg_seq terratec_h5_digital[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
#endif
@@ -335,12 +336,12 @@ static struct em28xx_reg_seq terratec_h5_digital[] = {
* GPIO_7 - LED (green LED)
*/
static struct em28xx_reg_seq pctv_460e[] = {
- {EM2874_R80_GPIO_P0_CTRL, 0x01, 0xff, 50},
- {0x0d, 0xff, 0xff, 50},
- {EM2874_R80_GPIO_P0_CTRL, 0x41, 0xff, 50}, /* GPIO_6=1 */
- {0x0d, 0x42, 0xff, 50},
- {EM2874_R80_GPIO_P0_CTRL, 0x61, 0xff, 50}, /* GPIO_5=1 */
- { -1, -1, -1, -1},
+ {EM2874_R80_GPIO_P0_CTRL, 0x01, 0xff, 50},
+ { 0x0d, 0xff, 0xff, 50},
+ {EM2874_R80_GPIO_P0_CTRL, 0x41, 0xff, 50}, /* GPIO_6=1 */
+ { 0x0d, 0x42, 0xff, 50},
+ {EM2874_R80_GPIO_P0_CTRL, 0x61, 0xff, 50}, /* GPIO_5=1 */
+ { -1, -1, -1, -1},
};
static struct em28xx_reg_seq c3tech_digital_duo_digital[] = {
@@ -352,7 +353,7 @@ static struct em28xx_reg_seq c3tech_digital_duo_digital[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xbe, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 20},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
#if 0
@@ -361,14 +362,14 @@ static struct em28xx_reg_seq hauppauge_930c_gpio[] = {
{EM2874_R80_GPIO_P0_CTRL, 0x4f, 0xff, 10}, /* xc5000 reset */
{EM2874_R80_GPIO_P0_CTRL, 0x6f, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0x4f, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
static struct em28xx_reg_seq hauppauge_930c_digital[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 10},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
#endif
@@ -378,10 +379,10 @@ static struct em28xx_reg_seq hauppauge_930c_digital[] = {
* GPIO_7 - LED, 0=active
*/
static struct em28xx_reg_seq maxmedia_ub425_tc[] = {
- {EM2874_R80_GPIO_P0_CTRL, 0x83, 0xff, 100},
- {EM2874_R80_GPIO_P0_CTRL, 0xc3, 0xff, 100}, /* GPIO_6 = 1 */
- {EM2874_R80_GPIO_P0_CTRL, 0x43, 0xff, 000}, /* GPIO_7 = 0 */
- {-1, -1, -1, -1},
+ {EM2874_R80_GPIO_P0_CTRL, 0x83, 0xff, 100},
+ {EM2874_R80_GPIO_P0_CTRL, 0xc3, 0xff, 100}, /* GPIO_6 = 1 */
+ {EM2874_R80_GPIO_P0_CTRL, 0x43, 0xff, 000}, /* GPIO_7 = 0 */
+ { -1, -1, -1, -1},
};
/* 2304:0242 PCTV QuatroStick (510e)
@@ -391,10 +392,10 @@ static struct em28xx_reg_seq maxmedia_ub425_tc[] = {
* GPIO_7: LED, 1=active
*/
static struct em28xx_reg_seq pctv_510e[] = {
- {EM2874_R80_GPIO_P0_CTRL, 0x10, 0xff, 100},
- {EM2874_R80_GPIO_P0_CTRL, 0x14, 0xff, 100}, /* GPIO_2 = 1 */
- {EM2874_R80_GPIO_P0_CTRL, 0x54, 0xff, 050}, /* GPIO_6 = 1 */
- { -1, -1, -1, -1},
+ {EM2874_R80_GPIO_P0_CTRL, 0x10, 0xff, 100},
+ {EM2874_R80_GPIO_P0_CTRL, 0x14, 0xff, 100}, /* GPIO_2 = 1 */
+ {EM2874_R80_GPIO_P0_CTRL, 0x54, 0xff, 050}, /* GPIO_6 = 1 */
+ { -1, -1, -1, -1},
};
/* 2013:0251 PCTV QuatroStick nano (520e)
@@ -404,11 +405,11 @@ static struct em28xx_reg_seq pctv_510e[] = {
* GPIO_7: LED, 1=active
*/
static struct em28xx_reg_seq pctv_520e[] = {
- {EM2874_R80_GPIO_P0_CTRL, 0x10, 0xff, 100},
- {EM2874_R80_GPIO_P0_CTRL, 0x14, 0xff, 100}, /* GPIO_2 = 1 */
- {EM2874_R80_GPIO_P0_CTRL, 0x54, 0xff, 050}, /* GPIO_6 = 1 */
- {EM2874_R80_GPIO_P0_CTRL, 0xd4, 0xff, 000}, /* GPIO_7 = 1 */
- { -1, -1, -1, -1},
+ {EM2874_R80_GPIO_P0_CTRL, 0x10, 0xff, 100},
+ {EM2874_R80_GPIO_P0_CTRL, 0x14, 0xff, 100}, /* GPIO_2 = 1 */
+ {EM2874_R80_GPIO_P0_CTRL, 0x54, 0xff, 050}, /* GPIO_6 = 1 */
+ {EM2874_R80_GPIO_P0_CTRL, 0xd4, 0xff, 000}, /* GPIO_7 = 1 */
+ { -1, -1, -1, -1},
};
/*
@@ -2030,6 +2031,18 @@ struct em28xx_board em28xx_boards[] = {
.i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE |
EM28XX_I2C_FREQ_400_KHZ,
},
+ /*
+ * 1b80:e346 KWorld USB ATSC TV Stick UB435-Q V2
+ * Empia EM2874B + LG DT3305 + NXP TDA18271HDC2
+ */
+ [EM2874_BOARD_KWORLD_UB435Q_V2] = {
+ .name = "KWorld USB ATSC TV Stick UB435-Q V2",
+ .tuner_type = TUNER_ABSENT,
+ .has_dvb = 1,
+ .dvb_gpio = kworld_a340_digital,
+ .tuner_gpio = default_tuner_gpio,
+ .def_i2c_bus = 1,
+ },
};
const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
@@ -2173,6 +2186,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2860_BOARD_GADMEI_UTV330 },
{ USB_DEVICE(0x1b80, 0xa340),
.driver_info = EM2870_BOARD_KWORLD_A340 },
+ { USB_DEVICE(0x1b80, 0xe346),
+ .driver_info = EM2874_BOARD_KWORLD_UB435Q_V2 },
{ USB_DEVICE(0x2013, 0x024f),
.driver_info = EM28174_BOARD_PCTV_290E },
{ USB_DEVICE(0x2013, 0x024c),
@@ -2857,6 +2872,8 @@ void em28xx_release_resources(struct em28xx *dev)
if (dev->def_i2c_bus)
em28xx_i2c_unregister(dev, 1);
em28xx_i2c_unregister(dev, 0);
+ if (dev->clk)
+ v4l2_clk_unregister_fixed(dev->clk);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index bb1e8dca80cd..344042bb845c 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -298,6 +298,18 @@ static struct lgdt3305_config em2870_lgdt3304_dev = {
.qam_if_khz = 4000,
};
+static struct lgdt3305_config em2874_lgdt3305_dev = {
+ .i2c_addr = 0x0e,
+ .demod_chip = LGDT3305,
+ .spectral_inversion = 1,
+ .deny_i2c_rptr = 0,
+ .mpeg_mode = LGDT3305_MPEG_SERIAL,
+ .tpclk_edge = LGDT3305_TPCLK_FALLING_EDGE,
+ .tpvalid_polarity = LGDT3305_TP_VALID_HIGH,
+ .vsb_if_khz = 3250,
+ .qam_if_khz = 4000,
+};
+
static struct s921_config sharp_isdbt = {
.demod_address = 0x30 >> 1
};
@@ -329,6 +341,11 @@ static struct tda18271_config kworld_a340_config = {
.std_map = &kworld_a340_std_map,
};
+static struct tda18271_config kworld_ub435q_v2_config = {
+ .std_map = &kworld_a340_std_map,
+ .gate = TDA18271_GATE_DIGITAL,
+};
+
static struct zl10353_config em28xx_zl10353_xc3028_no_i2c_gate = {
.demod_address = (0x1e >> 1),
.no_tuner = 1,
@@ -384,7 +401,10 @@ static struct drxk_config maxmedia_ub425_tc_drxk = {
.adr = 0x29,
.single_master = 1,
.no_i2c_bridge = 1,
+ .microcode_name = "dvb-demod-drxk-01.fw",
+ .chunk_size = 62,
.load_firmware_sync = true,
+ .qam_demod_parameter_count = 2,
};
static struct drxk_config pctv_520e_drxk = {
@@ -424,7 +444,7 @@ static void hauppauge_hvr930c_init(struct em28xx *dev)
{EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0x65},
{EM2874_R80_GPIO_P0_CTRL, 0xfb, 0xff, 0x32},
{EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0xb8},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
struct em28xx_reg_seq hauppauge_hvr930c_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x01},
@@ -439,7 +459,7 @@ static void hauppauge_hvr930c_init(struct em28xx *dev)
{EM2874_R80_GPIO_P0_CTRL, 0xcf, 0xff, 0x0b},
{EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x65},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
struct {
@@ -491,13 +511,13 @@ static void terratec_h5_init(struct em28xx *dev)
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xf2, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
struct em28xx_reg_seq terratec_h5_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
struct {
unsigned char r[4];
@@ -547,12 +567,12 @@ static void terratec_htc_stick_init(struct em28xx *dev)
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
struct em28xx_reg_seq terratec_htc_stick_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xb6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 50},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/*
@@ -594,13 +614,13 @@ static void terratec_htc_usb_xs_init(struct em28xx *dev)
{EM2874_R80_GPIO_P0_CTRL, 0xb2, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xb2, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xb6, 0xff, 100},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
struct em28xx_reg_seq terratec_htc_usb_xs_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
- { -1, -1, -1, -1},
+ { -1, -1, -1, -1},
};
/*
@@ -1227,18 +1247,14 @@ static int em28xx_dvb_init(struct em28xx *dev)
dvb->fe[0]->ops.i2c_gate_ctrl = NULL;
/* attach tuner */
- if (!dvb_attach(tda18271c2dd_attach, dvb->fe[0],
- &dev->i2c_adap[dev->def_i2c_bus], 0x60)) {
+ if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60,
+ &dev->i2c_adap[dev->def_i2c_bus],
+ &em28xx_cxd2820r_tda18271_config)) {
dvb_frontend_detach(dvb->fe[0]);
result = -EINVAL;
goto out_free;
}
}
-
- /* TODO: we need drx-3913k firmware in order to support DVB-T */
- em28xx_info("MaxMedia UB425-TC/Delock 61959: only DVB-C " \
- "supported by that driver version\n");
-
break;
case EM2884_BOARD_PCTV_510E:
case EM2884_BOARD_PCTV_520E:
@@ -1297,6 +1313,23 @@ static int em28xx_dvb_init(struct em28xx *dev)
goto out_free;
}
break;
+ case EM2874_BOARD_KWORLD_UB435Q_V2:
+ dvb->fe[0] = dvb_attach(lgdt3305_attach,
+ &em2874_lgdt3305_dev,
+ &dev->i2c_adap[dev->def_i2c_bus]);
+ if (!dvb->fe[0]) {
+ result = -EINVAL;
+ goto out_free;
+ }
+
+ /* Attach the demodulator. */
+ if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60,
+ &dev->i2c_adap[dev->def_i2c_bus],
+ &kworld_ub435q_v2_config)) {
+ result = -EINVAL;
+ goto out_free;
+ }
+ break;
default:
em28xx_errdev("/2: The frontend of your DVB/ATSC card"
" isn't supported yet\n");
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 9d103344f34a..fc5d60efd4ab 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -638,7 +638,7 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
if (rc)
return rc;
- if (dev->streaming_users++ == 0) {
+ if (dev->streaming_users == 0) {
/* First active streaming user, so allocate all the URBs */
/* Allocate the USB bandwidth */
@@ -657,7 +657,7 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
dev->packet_multiplier,
em28xx_urb_data_copy);
if (rc < 0)
- goto fail;
+ return rc;
/*
* djh: it's not clear whether this code is still needed. I'm
@@ -675,7 +675,8 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f);
}
-fail:
+ dev->streaming_users++;
+
return rc;
}
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index 205e9038b1c0..f8726ad5d0a8 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -131,6 +131,7 @@
#define EM2884_BOARD_TERRATEC_HTC_USB_XS 87
#define EM2884_BOARD_C3TECH_DIGITAL_DUO 88
#define EM2874_BOARD_DELOCK_61959 89
+#define EM2874_BOARD_KWORLD_UB435Q_V2 90
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
@@ -492,6 +493,7 @@ struct em28xx {
struct v4l2_device v4l2_dev;
struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_clk *clk;
struct em28xx_board board;
/* Webcam specific fields */
diff --git a/drivers/media/usb/gspca/conex.c b/drivers/media/usb/gspca/conex.c
index 38714df31ac4..2e15c80d6e3d 100644
--- a/drivers/media/usb/gspca/conex.c
+++ b/drivers/media/usb/gspca/conex.c
@@ -783,7 +783,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
/* create the JPEG header */
- jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+ gspca_dev->pixfmt.width,
0x22); /* JPEG 411 */
jpeg_set_qual(sd->jpeg_hdr, QUALITY);
diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
index 064b53043b15..f23df4a9d8c5 100644
--- a/drivers/media/usb/gspca/cpia1.c
+++ b/drivers/media/usb/gspca/cpia1.c
@@ -1553,9 +1553,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
sd->params.format.videoSize = VIDEOSIZE_CIF;
sd->params.roi.colEnd = sd->params.roi.colStart +
- (gspca_dev->width >> 3);
+ (gspca_dev->pixfmt.width >> 3);
sd->params.roi.rowEnd = sd->params.roi.rowStart +
- (gspca_dev->height >> 2);
+ (gspca_dev->pixfmt.height >> 2);
/* And now set the camera to a known state */
ret = do_command(gspca_dev, CPIA_COMMAND_SetGrabMode,
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index 048507b27bb2..f3a7ace0fac9 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -504,8 +504,7 @@ static int frame_alloc(struct gspca_dev *gspca_dev, struct file *file,
unsigned int frsz;
int i;
- i = gspca_dev->curr_mode;
- frsz = gspca_dev->cam.cam_mode[i].sizeimage;
+ frsz = gspca_dev->pixfmt.sizeimage;
PDEBUG(D_STREAM, "frame alloc frsz: %d", frsz);
frsz = PAGE_ALIGN(frsz);
if (count >= GSPCA_MAX_FRAMES)
@@ -627,16 +626,14 @@ static struct usb_host_endpoint *alt_xfer(struct usb_host_interface *alt,
static u32 which_bandwidth(struct gspca_dev *gspca_dev)
{
u32 bandwidth;
- int i;
/* get the (max) image size */
- i = gspca_dev->curr_mode;
- bandwidth = gspca_dev->cam.cam_mode[i].sizeimage;
+ bandwidth = gspca_dev->pixfmt.sizeimage;
/* if the image is compressed, estimate its mean size */
if (!gspca_dev->cam.needs_full_bandwidth &&
- bandwidth < gspca_dev->cam.cam_mode[i].width *
- gspca_dev->cam.cam_mode[i].height)
+ bandwidth < gspca_dev->pixfmt.width *
+ gspca_dev->pixfmt.height)
bandwidth = bandwidth * 3 / 8; /* 0.375 */
/* estimate the frame rate */
@@ -650,7 +647,7 @@ static u32 which_bandwidth(struct gspca_dev *gspca_dev)
/* don't hope more than 15 fps with USB 1.1 and
* image resolution >= 640x480 */
- if (gspca_dev->width >= 640
+ if (gspca_dev->pixfmt.width >= 640
&& gspca_dev->dev->speed == USB_SPEED_FULL)
bandwidth *= 15; /* 15 fps */
else
@@ -982,9 +979,7 @@ static void gspca_set_default_mode(struct gspca_dev *gspca_dev)
i = gspca_dev->cam.nmodes - 1; /* take the highest mode */
gspca_dev->curr_mode = i;
- gspca_dev->width = gspca_dev->cam.cam_mode[i].width;
- gspca_dev->height = gspca_dev->cam.cam_mode[i].height;
- gspca_dev->pixfmt = gspca_dev->cam.cam_mode[i].pixelformat;
+ gspca_dev->pixfmt = gspca_dev->cam.cam_mode[i];
/* does nothing if ctrl_handler == NULL */
v4l2_ctrl_handler_setup(gspca_dev->vdev.ctrl_handler);
@@ -1105,10 +1100,8 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *fmt)
{
struct gspca_dev *gspca_dev = video_drvdata(file);
- int mode;
- mode = gspca_dev->curr_mode;
- fmt->fmt.pix = gspca_dev->cam.cam_mode[mode];
+ fmt->fmt.pix = gspca_dev->pixfmt;
/* some drivers use priv internally, zero it before giving it to
userspace */
fmt->fmt.pix.priv = 0;
@@ -1140,6 +1133,12 @@ static int try_fmt_vid_cap(struct gspca_dev *gspca_dev,
mode = mode2;
}
fmt->fmt.pix = gspca_dev->cam.cam_mode[mode];
+ if (gspca_dev->sd_desc->try_fmt) {
+ /* pass original resolution to subdriver try_fmt */
+ fmt->fmt.pix.width = w;
+ fmt->fmt.pix.height = h;
+ gspca_dev->sd_desc->try_fmt(gspca_dev, fmt);
+ }
/* some drivers use priv internally, zero it before giving it to
userspace */
fmt->fmt.pix.priv = 0;
@@ -1178,19 +1177,16 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
goto out;
}
- if (ret == gspca_dev->curr_mode) {
- ret = 0;
- goto out; /* same mode */
- }
-
if (gspca_dev->streaming) {
ret = -EBUSY;
goto out;
}
- gspca_dev->width = fmt->fmt.pix.width;
- gspca_dev->height = fmt->fmt.pix.height;
- gspca_dev->pixfmt = fmt->fmt.pix.pixelformat;
gspca_dev->curr_mode = ret;
+ if (gspca_dev->sd_desc->try_fmt)
+ /* subdriver try_fmt can modify format parameters */
+ gspca_dev->pixfmt = fmt->fmt.pix;
+ else
+ gspca_dev->pixfmt = gspca_dev->cam.cam_mode[ret];
ret = 0;
out:
@@ -1205,6 +1201,9 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
int i;
__u32 index = 0;
+ if (gspca_dev->sd_desc->enum_framesizes)
+ return gspca_dev->sd_desc->enum_framesizes(gspca_dev, fsize);
+
for (i = 0; i < gspca_dev->cam.nmodes; i++) {
if (fsize->pixel_format !=
gspca_dev->cam.cam_mode[i].pixelformat)
@@ -1471,8 +1470,9 @@ static int vidioc_streamon(struct file *file, void *priv,
if (ret < 0)
goto out;
}
- PDEBUG_MODE(gspca_dev, D_STREAM, "stream on OK", gspca_dev->pixfmt,
- gspca_dev->width, gspca_dev->height);
+ PDEBUG_MODE(gspca_dev, D_STREAM, "stream on OK",
+ gspca_dev->pixfmt.pixelformat,
+ gspca_dev->pixfmt.width, gspca_dev->pixfmt.height);
ret = 0;
out:
mutex_unlock(&gspca_dev->queue_lock);
diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h
index ac0b11f46f50..300642dc1a17 100644
--- a/drivers/media/usb/gspca/gspca.h
+++ b/drivers/media/usb/gspca/gspca.h
@@ -88,6 +88,10 @@ typedef void (*cam_pkt_op) (struct gspca_dev *gspca_dev,
typedef int (*cam_int_pkt_op) (struct gspca_dev *gspca_dev,
u8 *data,
int len);
+typedef void (*cam_format_op) (struct gspca_dev *gspca_dev,
+ struct v4l2_format *fmt);
+typedef int (*cam_frmsize_op) (struct gspca_dev *gspca_dev,
+ struct v4l2_frmsizeenum *fsize);
/* subdriver description */
struct sd_desc {
@@ -109,6 +113,8 @@ struct sd_desc {
cam_set_jpg_op set_jcomp;
cam_streamparm_op get_streamparm;
cam_streamparm_op set_streamparm;
+ cam_format_op try_fmt;
+ cam_frmsize_op enum_framesizes;
#ifdef CONFIG_VIDEO_ADV_DEBUG
cam_set_reg_op set_register;
cam_get_reg_op get_register;
@@ -183,9 +189,7 @@ struct gspca_dev {
__u8 streaming; /* protected by both mutexes (*) */
__u8 curr_mode; /* current camera mode */
- __u32 pixfmt; /* current mode parameters */
- __u16 width;
- __u16 height;
+ struct v4l2_pix_format pixfmt; /* current mode parameters */
__u32 sequence; /* frame sequence number */
wait_queue_head_t wq; /* wait queue */
diff --git a/drivers/media/usb/gspca/jeilinj.c b/drivers/media/usb/gspca/jeilinj.c
index 8da3dde38385..19736e237b37 100644
--- a/drivers/media/usb/gspca/jeilinj.c
+++ b/drivers/media/usb/gspca/jeilinj.c
@@ -378,11 +378,12 @@ static int sd_start(struct gspca_dev *gspca_dev)
struct sd *dev = (struct sd *) gspca_dev;
/* create the JPEG header */
- jpeg_define(dev->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+ jpeg_define(dev->jpeg_hdr, gspca_dev->pixfmt.height,
+ gspca_dev->pixfmt.width,
0x21); /* JPEG 422 */
jpeg_set_qual(dev->jpeg_hdr, dev->quality);
PDEBUG(D_STREAM, "Start streaming at %dx%d",
- gspca_dev->height, gspca_dev->width);
+ gspca_dev->pixfmt.height, gspca_dev->pixfmt.width);
jlj_start(gspca_dev);
return gspca_dev->usb_err;
}
diff --git a/drivers/media/usb/gspca/jl2005bcd.c b/drivers/media/usb/gspca/jl2005bcd.c
index fdaeeb14453f..5b481fa43099 100644
--- a/drivers/media/usb/gspca/jl2005bcd.c
+++ b/drivers/media/usb/gspca/jl2005bcd.c
@@ -455,7 +455,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
sd->cap_mode = gspca_dev->cam.cam_mode;
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 640:
PDEBUG(D_STREAM, "Start streaming at vga resolution");
jl2005c_stream_start_vga_lg(gspca_dev);
diff --git a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
index cfa4663f8934..27fcef11aef4 100644
--- a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
+++ b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
@@ -266,7 +266,7 @@ static int mt9m111_set_hvflip(struct gspca_dev *gspca_dev)
return err;
data[0] = MT9M111_RMB_OVER_SIZED;
- if (gspca_dev->width == 640) {
+ if (gspca_dev->pixfmt.width == 640) {
data[1] = MT9M111_RMB_ROW_SKIP_2X |
MT9M111_RMB_COLUMN_SKIP_2X |
(hflip << 1) | vflip;
diff --git a/drivers/media/usb/gspca/mars.c b/drivers/media/usb/gspca/mars.c
index ff2c5abf115b..779a8785f421 100644
--- a/drivers/media/usb/gspca/mars.c
+++ b/drivers/media/usb/gspca/mars.c
@@ -254,7 +254,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
int i;
/* create the JPEG header */
- jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+ gspca_dev->pixfmt.width,
0x21); /* JPEG 422 */
jpeg_set_qual(sd->jpeg_hdr, QUALITY);
@@ -270,8 +271,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
data[0] = 0x00; /* address */
data[1] = 0x0c | 0x01; /* reg 0 */
data[2] = 0x01; /* reg 1 */
- data[3] = gspca_dev->width / 8; /* h_size , reg 2 */
- data[4] = gspca_dev->height / 8; /* v_size , reg 3 */
+ data[3] = gspca_dev->pixfmt.width / 8; /* h_size , reg 2 */
+ data[4] = gspca_dev->pixfmt.height / 8; /* v_size , reg 3 */
data[5] = 0x30; /* reg 4, MI, PAS5101 :
* 0x30 for 24mhz , 0x28 for 12mhz */
data[6] = 0x02; /* reg 5, H start - was 0x04 */
diff --git a/drivers/media/usb/gspca/mr97310a.c b/drivers/media/usb/gspca/mr97310a.c
index 68bb2f359666..f006e29ca019 100644
--- a/drivers/media/usb/gspca/mr97310a.c
+++ b/drivers/media/usb/gspca/mr97310a.c
@@ -521,7 +521,7 @@ static int start_cif_cam(struct gspca_dev *gspca_dev)
if (sd->sensor_type)
data[5] = 0xbb;
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 160:
data[9] |= 0x04; /* reg 8, 2:1 scale down from 320 */
/* fall thru */
@@ -618,7 +618,7 @@ static int start_vga_cam(struct gspca_dev *gspca_dev)
data[10] = 0x18;
}
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 160:
data[9] |= 0x0c; /* reg 8, 4:1 scale down */
/* fall thru */
@@ -847,7 +847,7 @@ static void setexposure(struct gspca_dev *gspca_dev, s32 expo, s32 min_clockdiv)
u8 clockdiv = (60 * expo + 7999) / 8000;
/* Limit framerate to not exceed usb bandwidth */
- if (clockdiv < min_clockdiv && gspca_dev->width >= 320)
+ if (clockdiv < min_clockdiv && gspca_dev->pixfmt.width >= 320)
clockdiv = min_clockdiv;
else if (clockdiv < 2)
clockdiv = 2;
diff --git a/drivers/media/usb/gspca/nw80x.c b/drivers/media/usb/gspca/nw80x.c
index 44c9964b1b3e..599f755e75b8 100644
--- a/drivers/media/usb/gspca/nw80x.c
+++ b/drivers/media/usb/gspca/nw80x.c
@@ -1708,7 +1708,7 @@ static void setautogain(struct gspca_dev *gspca_dev, s32 val)
reg_r(gspca_dev, 0x1004, 1);
if (gspca_dev->usb_buf[0] & 0x04) { /* if AE_FULL_FRM */
- sd->ae_res = gspca_dev->width * gspca_dev->height;
+ sd->ae_res = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height;
} else { /* get the AE window size */
reg_r(gspca_dev, 0x1011, 8);
w = (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0]
@@ -1717,7 +1717,8 @@ static void setautogain(struct gspca_dev *gspca_dev, s32 val)
- (gspca_dev->usb_buf[7] << 8) - gspca_dev->usb_buf[6];
sd->ae_res = h * w;
if (sd->ae_res == 0)
- sd->ae_res = gspca_dev->width * gspca_dev->height;
+ sd->ae_res = gspca_dev->pixfmt.width *
+ gspca_dev->pixfmt.height;
}
}
@@ -1856,21 +1857,21 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w_buf(gspca_dev, cmd);
switch (sd->webcam) {
case P35u:
- if (gspca_dev->width == 320)
+ if (gspca_dev->pixfmt.width == 320)
reg_w_buf(gspca_dev, nw801_start_qvga);
else
reg_w_buf(gspca_dev, nw801_start_vga);
reg_w_buf(gspca_dev, nw801_start_2);
break;
case Kr651us:
- if (gspca_dev->width == 320)
+ if (gspca_dev->pixfmt.width == 320)
reg_w_buf(gspca_dev, kr651_start_qvga);
else
reg_w_buf(gspca_dev, kr651_start_vga);
reg_w_buf(gspca_dev, kr651_start_2);
break;
case Proscope:
- if (gspca_dev->width == 320)
+ if (gspca_dev->pixfmt.width == 320)
reg_w_buf(gspca_dev, proscope_start_qvga);
else
reg_w_buf(gspca_dev, proscope_start_vga);
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index 8937d79fd176..c95f32a0c02b 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -3468,7 +3468,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
switch (sd->bridge) {
case BRIDGE_OVFX2:
- if (gspca_dev->width != 800)
+ if (gspca_dev->pixfmt.width != 800)
gspca_dev->cam.bulk_size = OVFX2_BULK_SIZE;
else
gspca_dev->cam.bulk_size = 7 * 4096;
@@ -3507,8 +3507,8 @@ static void ov511_mode_init_regs(struct sd *sd)
/* Here I'm assuming that snapshot size == image size.
* I hope that's always true. --claudio
*/
- hsegs = (sd->gspca_dev.width >> 3) - 1;
- vsegs = (sd->gspca_dev.height >> 3) - 1;
+ hsegs = (sd->gspca_dev.pixfmt.width >> 3) - 1;
+ vsegs = (sd->gspca_dev.pixfmt.height >> 3) - 1;
reg_w(sd, R511_CAM_PXCNT, hsegs);
reg_w(sd, R511_CAM_LNCNT, vsegs);
@@ -3541,7 +3541,7 @@ static void ov511_mode_init_regs(struct sd *sd)
case SEN_OV7640:
case SEN_OV7648:
case SEN_OV76BE:
- if (sd->gspca_dev.width == 320)
+ if (sd->gspca_dev.pixfmt.width == 320)
interlaced = 1;
/* Fall through */
case SEN_OV6630:
@@ -3551,7 +3551,7 @@ static void ov511_mode_init_regs(struct sd *sd)
case 30:
case 25:
/* Not enough bandwidth to do 640x480 @ 30 fps */
- if (sd->gspca_dev.width != 640) {
+ if (sd->gspca_dev.pixfmt.width != 640) {
sd->clockdiv = 0;
break;
}
@@ -3584,7 +3584,8 @@ static void ov511_mode_init_regs(struct sd *sd)
/* Check if we have enough bandwidth to disable compression */
fps = (interlaced ? 60 : 30) / (sd->clockdiv + 1) + 1;
- needed = fps * sd->gspca_dev.width * sd->gspca_dev.height * 3 / 2;
+ needed = fps * sd->gspca_dev.pixfmt.width *
+ sd->gspca_dev.pixfmt.height * 3 / 2;
/* 1000 isoc packets/sec */
if (needed > 1000 * packet_size) {
/* Enable Y and UV quantization and compression */
@@ -3646,8 +3647,8 @@ static void ov518_mode_init_regs(struct sd *sd)
reg_w(sd, 0x38, 0x80);
}
- hsegs = sd->gspca_dev.width / 16;
- vsegs = sd->gspca_dev.height / 4;
+ hsegs = sd->gspca_dev.pixfmt.width / 16;
+ vsegs = sd->gspca_dev.pixfmt.height / 4;
reg_w(sd, 0x29, hsegs);
reg_w(sd, 0x2a, vsegs);
@@ -3686,7 +3687,8 @@ static void ov518_mode_init_regs(struct sd *sd)
* happened to be with revision < 2 cams using an
* OV7620 and revision 2 cams using an OV7620AE.
*/
- if (sd->revision > 0 && sd->gspca_dev.width == 640) {
+ if (sd->revision > 0 &&
+ sd->gspca_dev.pixfmt.width == 640) {
reg_w(sd, 0x20, 0x60);
reg_w(sd, 0x21, 0x1f);
} else {
@@ -3812,8 +3814,8 @@ static void ov519_mode_init_regs(struct sd *sd)
break;
}
- reg_w(sd, OV519_R10_H_SIZE, sd->gspca_dev.width >> 4);
- reg_w(sd, OV519_R11_V_SIZE, sd->gspca_dev.height >> 3);
+ reg_w(sd, OV519_R10_H_SIZE, sd->gspca_dev.pixfmt.width >> 4);
+ reg_w(sd, OV519_R11_V_SIZE, sd->gspca_dev.pixfmt.height >> 3);
if (sd->sensor == SEN_OV7670 &&
sd->gspca_dev.cam.cam_mode[sd->gspca_dev.curr_mode].priv)
reg_w(sd, OV519_R12_X_OFFSETL, 0x04);
@@ -3947,14 +3949,16 @@ static void mode_init_ov_sensor_regs(struct sd *sd)
}
case SEN_OV3610:
if (qvga) {
- xstart = (1040 - gspca_dev->width) / 2 + (0x1f << 4);
- ystart = (776 - gspca_dev->height) / 2;
+ xstart = (1040 - gspca_dev->pixfmt.width) / 2 +
+ (0x1f << 4);
+ ystart = (776 - gspca_dev->pixfmt.height) / 2;
} else {
- xstart = (2076 - gspca_dev->width) / 2 + (0x10 << 4);
- ystart = (1544 - gspca_dev->height) / 2;
+ xstart = (2076 - gspca_dev->pixfmt.width) / 2 +
+ (0x10 << 4);
+ ystart = (1544 - gspca_dev->pixfmt.height) / 2;
}
- xend = xstart + gspca_dev->width;
- yend = ystart + gspca_dev->height;
+ xend = xstart + gspca_dev->pixfmt.width;
+ yend = ystart + gspca_dev->pixfmt.height;
/* Writing to the COMH register resets the other windowing regs
to their default values, so we must do this first. */
i2c_w_mask(sd, 0x12, qvga ? 0x40 : 0x00, 0xf0);
@@ -4229,8 +4233,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
/* Default for most bridges, allow bridge_mode_init_regs to override */
- sd->sensor_width = sd->gspca_dev.width;
- sd->sensor_height = sd->gspca_dev.height;
+ sd->sensor_width = sd->gspca_dev.pixfmt.width;
+ sd->sensor_height = sd->gspca_dev.pixfmt.height;
switch (sd->bridge) {
case BRIDGE_OV511:
@@ -4345,12 +4349,13 @@ static void ov511_pkt_scan(struct gspca_dev *gspca_dev,
ov51x_handle_button(gspca_dev, (in[8] >> 2) & 1);
if (in[8] & 0x80) {
/* Frame end */
- if ((in[9] + 1) * 8 != gspca_dev->width ||
- (in[10] + 1) * 8 != gspca_dev->height) {
+ if ((in[9] + 1) * 8 != gspca_dev->pixfmt.width ||
+ (in[10] + 1) * 8 != gspca_dev->pixfmt.height) {
PERR("Invalid frame size, got: %dx%d,"
" requested: %dx%d\n",
(in[9] + 1) * 8, (in[10] + 1) * 8,
- gspca_dev->width, gspca_dev->height);
+ gspca_dev->pixfmt.width,
+ gspca_dev->pixfmt.height);
gspca_dev->last_packet_type = DISCARD_PACKET;
return;
}
@@ -4470,7 +4475,8 @@ static void ovfx2_pkt_scan(struct gspca_dev *gspca_dev,
if (sd->first_frame) {
sd->first_frame--;
if (gspca_dev->image_len <
- sd->gspca_dev.width * sd->gspca_dev.height)
+ sd->gspca_dev.pixfmt.width *
+ sd->gspca_dev.pixfmt.height)
gspca_dev->last_packet_type = DISCARD_PACKET;
}
gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
index 03a33c46ca2c..90f0d637cd9d 100644
--- a/drivers/media/usb/gspca/ov534.c
+++ b/drivers/media/usb/gspca/ov534.c
@@ -1440,9 +1440,10 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
/* If this packet is marked as EOF, end the frame */
} else if (data[1] & UVC_STREAM_EOF) {
sd->last_pts = 0;
- if (gspca_dev->pixfmt == V4L2_PIX_FMT_YUYV
+ if (gspca_dev->pixfmt.pixelformat == V4L2_PIX_FMT_YUYV
&& gspca_dev->image_len + len - 12 !=
- gspca_dev->width * gspca_dev->height * 2) {
+ gspca_dev->pixfmt.width *
+ gspca_dev->pixfmt.height * 2) {
PDEBUG(D_PACK, "wrong sized frame");
goto discard;
}
diff --git a/drivers/media/usb/gspca/ov534_9.c b/drivers/media/usb/gspca/ov534_9.c
index c4cd028fe0b4..47085cf2d723 100644
--- a/drivers/media/usb/gspca/ov534_9.c
+++ b/drivers/media/usb/gspca/ov534_9.c
@@ -59,6 +59,7 @@ enum sensors {
SENSOR_OV965x, /* ov9657 */
SENSOR_OV971x, /* ov9712 */
SENSOR_OV562x, /* ov5621 */
+ SENSOR_OV361x, /* ov3610 */
NSENSORS
};
@@ -106,6 +107,274 @@ static const struct v4l2_pix_format ov562x_mode[] = {
}
};
+enum ov361x {
+ ov361x_2048 = 0,
+ ov361x_1600,
+ ov361x_1024,
+ ov361x_640,
+ ov361x_320,
+ ov361x_160,
+ ov361x_last
+};
+
+static const struct v4l2_pix_format ov361x_mode[] = {
+ {0x800, 0x600, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 0x800,
+ .sizeimage = 0x800 * 0x600,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {1600, 1200, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 1600,
+ .sizeimage = 1600 * 1200,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {1024, 768, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 768,
+ .sizeimage = 1024 * 768,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 640,
+ .sizeimage = 640 * 480,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 160,
+ .sizeimage = 160 * 120,
+ .colorspace = V4L2_COLORSPACE_SRGB}
+};
+
+static const u8 ov361x_start_2048[][2] = {
+ {0x12, 0x80},
+ {0x13, 0xcf},
+ {0x14, 0x40},
+ {0x15, 0x00},
+ {0x01, 0x80},
+ {0x02, 0x80},
+ {0x04, 0x70},
+ {0x0d, 0x40},
+ {0x0f, 0x47},
+ {0x11, 0x81},
+ {0x32, 0x36},
+ {0x33, 0x0c},
+ {0x34, 0x00},
+ {0x35, 0x90},
+ {0x12, 0x00},
+ {0x17, 0x10},
+ {0x18, 0x90},
+ {0x19, 0x00},
+ {0x1a, 0xc0},
+};
+static const u8 ov361x_bridge_start_2048[][2] = {
+ {0xf1, 0x60},
+ {0x88, 0x00},
+ {0x89, 0x08},
+ {0x8a, 0x00},
+ {0x8b, 0x06},
+ {0x8c, 0x01},
+ {0x8d, 0x10},
+ {0x1c, 0x00},
+ {0x1d, 0x48},
+ {0x1d, 0x00},
+ {0x1d, 0xff},
+ {0x1c, 0x0a},
+ {0x1d, 0x2e},
+ {0x1d, 0x1e},
+};
+
+static const u8 ov361x_start_1600[][2] = {
+ {0x12, 0x80},
+ {0x13, 0xcf},
+ {0x14, 0x40},
+ {0x15, 0x00},
+ {0x01, 0x80},
+ {0x02, 0x80},
+ {0x04, 0x70},
+ {0x0d, 0x40},
+ {0x0f, 0x47},
+ {0x11, 0x81},
+ {0x32, 0x36},
+ {0x33, 0x0C},
+ {0x34, 0x00},
+ {0x35, 0x90},
+ {0x12, 0x00},
+ {0x17, 0x10},
+ {0x18, 0x90},
+ {0x19, 0x00},
+ {0x1a, 0xc0},
+};
+static const u8 ov361x_bridge_start_1600[][2] = {
+ {0xf1, 0x60}, /* Hsize[7:0] */
+ {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */
+ {0x89, 0x08}, /* Vsize[7:0] */
+ {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */
+ {0x8b, 0x06}, /* for Iso */
+ {0x8c, 0x01}, /* RAW input */
+ {0x8d, 0x10},
+ {0x1c, 0x00}, /* RAW output, Iso transfer */
+ {0x1d, 0x48},
+ {0x1d, 0x00},
+ {0x1d, 0xff},
+ {0x1c, 0x0a}, /* turn off JPEG, Iso mode */
+ {0x1d, 0x2e}, /* for Iso */
+ {0x1d, 0x1e},
+};
+
+static const u8 ov361x_start_1024[][2] = {
+ {0x12, 0x80},
+ {0x13, 0xcf},
+ {0x14, 0x40},
+ {0x15, 0x00},
+ {0x01, 0x80},
+ {0x02, 0x80},
+ {0x04, 0x70},
+ {0x0d, 0x40},
+ {0x0f, 0x47},
+ {0x11, 0x81},
+ {0x32, 0x36},
+ {0x33, 0x0C},
+ {0x34, 0x00},
+ {0x35, 0x90},
+ {0x12, 0x40},
+ {0x17, 0x1f},
+ {0x18, 0x5f},
+ {0x19, 0x00},
+ {0x1a, 0x68},
+};
+static const u8 ov361x_bridge_start_1024[][2] = {
+ {0xf1, 0x60}, /* Hsize[7:0] */
+ {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */
+ {0x89, 0x04}, /* Vsize[7:0] */
+ {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */
+ {0x8b, 0x03}, /* for Iso */
+ {0x8c, 0x01}, /* RAW input */
+ {0x8d, 0x10},
+ {0x1c, 0x00}, /* RAW output, Iso transfer */
+ {0x1d, 0x48},
+ {0x1d, 0x00},
+ {0x1d, 0xff},
+ {0x1c, 0x0a}, /* turn off JPEG, Iso mode */
+ {0x1d, 0x2e}, /* for Iso */
+ {0x1d, 0x1e},
+};
+
+static const u8 ov361x_start_640[][2] = {
+ {0x12, 0x80},
+ {0x13, 0xcf},
+ {0x14, 0x40},
+ {0x15, 0x00},
+ {0x01, 0x80},
+ {0x02, 0x80},
+ {0x04, 0x70},
+ {0x0d, 0x40},
+ {0x0f, 0x47},
+ {0x11, 0x81},
+ {0x32, 0x36},
+ {0x33, 0x0C},
+ {0x34, 0x00},
+ {0x35, 0x90},
+ {0x12, 0x40},
+ {0x17, 0x1f},
+ {0x18, 0x5f},
+ {0x19, 0x00},
+ {0x1a, 0x68},
+};
+
+static const u8 ov361x_bridge_start_640[][2] = {
+ {0xf1, 0x60}, /* Hsize[7:0]*/
+ {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */
+ {0x89, 0x04}, /* Vsize[7:0] */
+ {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */
+ {0x8b, 0x03}, /* for Iso */
+ {0x8c, 0x01}, /* RAW input */
+ {0x8d, 0x10},
+ {0x1c, 0x00}, /* RAW output, Iso transfer */
+ {0x1d, 0x48},
+ {0x1d, 0x00},
+ {0x1d, 0xff},
+ {0x1c, 0x0a}, /* turn off JPEG, Iso mode */
+ {0x1d, 0x2e}, /* for Iso */
+ {0x1d, 0x1e},
+};
+
+static const u8 ov361x_start_320[][2] = {
+ {0x12, 0x80},
+ {0x13, 0xcf},
+ {0x14, 0x40},
+ {0x15, 0x00},
+ {0x01, 0x80},
+ {0x02, 0x80},
+ {0x04, 0x70},
+ {0x0d, 0x40},
+ {0x0f, 0x47},
+ {0x11, 0x81},
+ {0x32, 0x36},
+ {0x33, 0x0C},
+ {0x34, 0x00},
+ {0x35, 0x90},
+ {0x12, 0x40},
+ {0x17, 0x1f},
+ {0x18, 0x5f},
+ {0x19, 0x00},
+ {0x1a, 0x68},
+};
+
+static const u8 ov361x_bridge_start_320[][2] = {
+ {0xf1, 0x60}, /* Hsize[7:0] */
+ {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */
+ {0x89, 0x04}, /* Vsize[7:0] */
+ {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */
+ {0x8b, 0x03}, /* for Iso */
+ {0x8c, 0x01}, /* RAW input */
+ {0x8d, 0x10},
+ {0x1c, 0x00}, /* RAW output, Iso transfer; */
+ {0x1d, 0x48},
+ {0x1d, 0x00},
+ {0x1d, 0xff},
+ {0x1c, 0x0a}, /* turn off JPEG, Iso mode */
+ {0x1d, 0x2e}, /* for Iso */
+ {0x1d, 0x1e},
+};
+
+static const u8 ov361x_start_160[][2] = {
+ {0x12, 0x80},
+ {0x13, 0xcf},
+ {0x14, 0x40},
+ {0x15, 0x00},
+ {0x01, 0x80},
+ {0x02, 0x80},
+ {0x04, 0x70},
+ {0x0d, 0x40},
+ {0x0f, 0x47},
+ {0x11, 0x81},
+ {0x32, 0x36},
+ {0x33, 0x0C},
+ {0x34, 0x00},
+ {0x35, 0x90},
+ {0x12, 0x40},
+ {0x17, 0x1f},
+ {0x18, 0x5f},
+ {0x19, 0x00},
+ {0x1a, 0x68},
+};
+
+static const u8 ov361x_bridge_start_160[][2] = {
+ {0xf1, 0x60}, /* Hsize[7:0] */
+ {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */
+ {0x89, 0x04}, /* Vsize[7:0] */
+ {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */
+ {0x8b, 0x03}, /* for Iso */
+ {0x8c, 0x01}, /* RAW input */
+ {0x8d, 0x10},
+ {0x1c, 0x00}, /* RAW output, Iso transfer */
+ {0x1d, 0x48},
+ {0x1d, 0x00},
+ {0x1d, 0xff},
+ {0x1c, 0x0a}, /* turn off JPEG, Iso mode */
+ {0x1d, 0x2e}, /* for Iso */
+ {0x1d, 0x1e},
+};
+
static const u8 bridge_init[][2] = {
{0x88, 0xf8},
{0x89, 0xff},
@@ -898,7 +1167,7 @@ static int sccb_check_status(struct gspca_dev *gspca_dev)
int i;
for (i = 0; i < 5; i++) {
- msleep(10);
+ msleep(20);
data = reg_r(gspca_dev, OV534_REG_STATUS);
switch (data) {
@@ -1221,6 +1490,13 @@ static int sd_init(struct gspca_dev *gspca_dev)
sccb_w_array(gspca_dev, ov562x_init_2,
ARRAY_SIZE(ov562x_init_2));
reg_w(gspca_dev, 0xe0, 0x00);
+ } else if ((sensor_id & 0xfff0) == 0x3610) {
+ sd->sensor = SENSOR_OV361x;
+ gspca_dev->cam.cam_mode = ov361x_mode;
+ gspca_dev->cam.nmodes = ARRAY_SIZE(ov361x_mode);
+ reg_w(gspca_dev, 0xe7, 0x3a);
+ reg_w(gspca_dev, 0xf1, 0x60);
+ sccb_write(gspca_dev, 0x12, 0x80);
} else {
pr_err("Unknown sensor %04x", sensor_id);
return -EINVAL;
@@ -1229,6 +1505,53 @@ static int sd_init(struct gspca_dev *gspca_dev)
return gspca_dev->usb_err;
}
+static int sd_start_ov361x(struct gspca_dev *gspca_dev)
+{
+ sccb_write(gspca_dev, 0x12, 0x80);
+ msleep(20);
+ switch (gspca_dev->curr_mode % (ov361x_last)) {
+ case ov361x_2048:
+ reg_w_array(gspca_dev, ov361x_bridge_start_2048,
+ ARRAY_SIZE(ov361x_bridge_start_2048));
+ sccb_w_array(gspca_dev, ov361x_start_2048,
+ ARRAY_SIZE(ov361x_start_2048));
+ break;
+ case ov361x_1600:
+ reg_w_array(gspca_dev, ov361x_bridge_start_1600,
+ ARRAY_SIZE(ov361x_bridge_start_1600));
+ sccb_w_array(gspca_dev, ov361x_start_1600,
+ ARRAY_SIZE(ov361x_start_1600));
+ break;
+ case ov361x_1024:
+ reg_w_array(gspca_dev, ov361x_bridge_start_1024,
+ ARRAY_SIZE(ov361x_bridge_start_1024));
+ sccb_w_array(gspca_dev, ov361x_start_1024,
+ ARRAY_SIZE(ov361x_start_1024));
+ break;
+ case ov361x_640:
+ reg_w_array(gspca_dev, ov361x_bridge_start_640,
+ ARRAY_SIZE(ov361x_bridge_start_640));
+ sccb_w_array(gspca_dev, ov361x_start_640,
+ ARRAY_SIZE(ov361x_start_640));
+ break;
+ case ov361x_320:
+ reg_w_array(gspca_dev, ov361x_bridge_start_320,
+ ARRAY_SIZE(ov361x_bridge_start_320));
+ sccb_w_array(gspca_dev, ov361x_start_320,
+ ARRAY_SIZE(ov361x_start_320));
+ break;
+ case ov361x_160:
+ reg_w_array(gspca_dev, ov361x_bridge_start_160,
+ ARRAY_SIZE(ov361x_bridge_start_160));
+ sccb_w_array(gspca_dev, ov361x_start_160,
+ ARRAY_SIZE(ov361x_start_160));
+ break;
+ }
+ reg_w(gspca_dev, 0xe0, 0x00); /* start transfer */
+
+ return gspca_dev->usb_err;
+}
+
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1237,6 +1560,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
return gspca_dev->usb_err;
if (sd->sensor == SENSOR_OV562x)
return gspca_dev->usb_err;
+ if (sd->sensor == SENSOR_OV361x)
+ return sd_start_ov361x(gspca_dev);
switch (gspca_dev->curr_mode) {
case QVGA_MODE: /* 320x240 */
@@ -1290,6 +1615,11 @@ static int sd_start(struct gspca_dev *gspca_dev)
static void sd_stopN(struct gspca_dev *gspca_dev)
{
+ if (((struct sd *)gspca_dev)->sensor == SENSOR_OV361x) {
+ reg_w(gspca_dev, 0xe0, 0x01); /* stop transfer */
+ /* reg_w(gspca_dev, 0x31, 0x09); */
+ return;
+ }
reg_w(gspca_dev, 0xe0, 0x01);
set_led(gspca_dev, 0);
reg_w(gspca_dev, 0xe0, 0x00);
@@ -1425,6 +1755,8 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
if (sd->sensor == SENSOR_OV971x)
return 0;
+ if (sd->sensor == SENSOR_OV361x)
+ return 0;
gspca_dev->vdev.ctrl_handler = hdl;
v4l2_ctrl_handler_init(hdl, 7);
if (sd->sensor == SENSOR_OV562x) {
diff --git a/drivers/media/usb/gspca/pac207.c b/drivers/media/usb/gspca/pac207.c
index 83519be94e58..cd79c180f67b 100644
--- a/drivers/media/usb/gspca/pac207.c
+++ b/drivers/media/usb/gspca/pac207.c
@@ -299,7 +299,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
pac207_write_regs(gspca_dev, 0x0042, pac207_sensor_init[3], 8);
/* Compression Balance */
- if (gspca_dev->width == 176)
+ if (gspca_dev->pixfmt.width == 176)
pac207_write_reg(gspca_dev, 0x4a, 0xff);
else
pac207_write_reg(gspca_dev, 0x4a, 0x30);
@@ -317,7 +317,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
mode = 0x00;
else
mode = 0x02;
- if (gspca_dev->width == 176) { /* 176x144 */
+ if (gspca_dev->pixfmt.width == 176) { /* 176x144 */
mode |= 0x01;
PDEBUG(D_STREAM, "pac207_start mode 176x144");
} else { /* 352x288 */
diff --git a/drivers/media/usb/gspca/pac7311.c b/drivers/media/usb/gspca/pac7311.c
index 1a5bdc853a80..25f86b1e74a8 100644
--- a/drivers/media/usb/gspca/pac7311.c
+++ b/drivers/media/usb/gspca/pac7311.c
@@ -326,7 +326,7 @@ static void setexposure(struct gspca_dev *gspca_dev, s32 val)
* 640x480 mode and page 4 reg 2 <= 3 then it must be 9
*/
reg_w(gspca_dev, 0xff, 0x01);
- if (gspca_dev->width != 640 && val <= 3)
+ if (gspca_dev->pixfmt.width != 640 && val <= 3)
reg_w(gspca_dev, 0x08, 0x09);
else
reg_w(gspca_dev, 0x08, 0x08);
@@ -337,7 +337,7 @@ static void setexposure(struct gspca_dev *gspca_dev, s32 val)
* camera to use higher compression or we may run out of
* bandwidth.
*/
- if (gspca_dev->width == 640 && val == 2)
+ if (gspca_dev->pixfmt.width == 640 && val == 2)
reg_w(gspca_dev, 0x80, 0x01);
else
reg_w(gspca_dev, 0x80, 0x1c);
@@ -615,7 +615,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
/* Start the new frame with the jpeg header */
pac_start_frame(gspca_dev,
- gspca_dev->height, gspca_dev->width);
+ gspca_dev->pixfmt.height, gspca_dev->pixfmt.width);
}
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
diff --git a/drivers/media/usb/gspca/se401.c b/drivers/media/usb/gspca/se401.c
index 5f729b8aa2bd..5102cea50471 100644
--- a/drivers/media/usb/gspca/se401.c
+++ b/drivers/media/usb/gspca/se401.c
@@ -354,9 +354,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* set size + mode */
se401_write_req(gspca_dev, SE401_REQ_SET_WIDTH,
- gspca_dev->width * mult, 0);
+ gspca_dev->pixfmt.width * mult, 0);
se401_write_req(gspca_dev, SE401_REQ_SET_HEIGHT,
- gspca_dev->height * mult, 0);
+ gspca_dev->pixfmt.height * mult, 0);
/*
* HDG: disabled this as it does not seem to do anything
* se401_write_req(gspca_dev, SE401_REQ_SET_OUTPUT_MODE,
@@ -480,7 +480,7 @@ static void sd_complete_frame(struct gspca_dev *gspca_dev, u8 *data, int len)
static void sd_pkt_scan_janggu(struct gspca_dev *gspca_dev, u8 *data, int len)
{
struct sd *sd = (struct sd *)gspca_dev;
- int imagesize = gspca_dev->width * gspca_dev->height;
+ int imagesize = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height;
int i, plen, bits, pixels, info, count;
if (sd->restart_stream)
diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
index f4453d52801b..2a38621cf718 100644
--- a/drivers/media/usb/gspca/sn9c20x.c
+++ b/drivers/media/usb/gspca/sn9c20x.c
@@ -1955,7 +1955,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
return 0;
}
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 160: /* 160x120 */
gspca_dev->alt = 2;
break;
@@ -1985,8 +1985,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
int mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
- int width = gspca_dev->width;
- int height = gspca_dev->height;
+ int width = gspca_dev->pixfmt.width;
+ int height = gspca_dev->pixfmt.height;
u8 fmt, scale = 0;
jpeg_define(sd->jpeg_hdr, height, width,
diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
index d7ff3b9687c5..7277dbd2afcd 100644
--- a/drivers/media/usb/gspca/sonixb.c
+++ b/drivers/media/usb/gspca/sonixb.c
@@ -513,10 +513,7 @@ static void i2c_w(struct gspca_dev *gspca_dev, const u8 *buf)
if (gspca_dev->usb_buf[0] & 0x04) {
if (gspca_dev->usb_buf[0] & 0x08) {
dev_err(gspca_dev->v4l2_dev.dev,
- "i2c error writing %02x %02x %02x %02x"
- " %02x %02x %02x %02x\n",
- buf[0], buf[1], buf[2], buf[3],
- buf[4], buf[5], buf[6], buf[7]);
+ "i2c error writing %8ph\n", buf);
gspca_dev->usb_err = -EIO;
}
return;
@@ -753,7 +750,7 @@ static void setexposure(struct gspca_dev *gspca_dev)
/* In 640x480, if the reg11 has less than 4, the image is
unstable (the bridge goes into a higher compression mode
which we have not reverse engineered yet). */
- if (gspca_dev->width == 640 && reg11 < 4)
+ if (gspca_dev->pixfmt.width == 640 && reg11 < 4)
reg11 = 4;
/* frame exposure time in ms = 1000 * reg11 / 30 ->
diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c
index 3b5ccb1c4cdf..c69b45d7cfbf 100644
--- a/drivers/media/usb/gspca/sonixj.c
+++ b/drivers/media/usb/gspca/sonixj.c
@@ -2204,7 +2204,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
{ 0x14, 0xe7, 0x1e, 0xdd };
/* create the JPEG header */
- jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+ gspca_dev->pixfmt.width,
0x21); /* JPEG 422 */
/* initialize the bridge */
diff --git a/drivers/media/usb/gspca/spca1528.c b/drivers/media/usb/gspca/spca1528.c
index 688592b289ea..f38fd8949609 100644
--- a/drivers/media/usb/gspca/spca1528.c
+++ b/drivers/media/usb/gspca/spca1528.c
@@ -255,7 +255,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
/* initialize the JPEG header */
- jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+ gspca_dev->pixfmt.width,
0x22); /* JPEG 411 */
/* the JPEG quality shall be 85% */
diff --git a/drivers/media/usb/gspca/spca500.c b/drivers/media/usb/gspca/spca500.c
index 9f8bf51fd64b..f011a309dd65 100644
--- a/drivers/media/usb/gspca/spca500.c
+++ b/drivers/media/usb/gspca/spca500.c
@@ -608,7 +608,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
__u8 xmult, ymult;
/* create the JPEG header */
- jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+ gspca_dev->pixfmt.width,
0x22); /* JPEG 411 */
jpeg_set_qual(sd->jpeg_hdr, QUALITY);
diff --git a/drivers/media/usb/gspca/sq905c.c b/drivers/media/usb/gspca/sq905c.c
index acb19fb9a3df..aa21edc9502d 100644
--- a/drivers/media/usb/gspca/sq905c.c
+++ b/drivers/media/usb/gspca/sq905c.c
@@ -272,7 +272,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
dev->cap_mode = gspca_dev->cam.cam_mode;
/* "Open the shutter" and set size, to start capture */
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 640:
PDEBUG(D_STREAM, "Start streaming at high resolution");
dev->cap_mode++;
diff --git a/drivers/media/usb/gspca/sq930x.c b/drivers/media/usb/gspca/sq930x.c
index b10d0821111c..e274cf19a3ea 100644
--- a/drivers/media/usb/gspca/sq930x.c
+++ b/drivers/media/usb/gspca/sq930x.c
@@ -906,7 +906,8 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
gspca_dev->cam.bulk_nurbs = 1; /* there must be one URB only */
sd->do_ctrl = 0;
- gspca_dev->cam.bulk_size = gspca_dev->width * gspca_dev->height + 8;
+ gspca_dev->cam.bulk_size = gspca_dev->pixfmt.width *
+ gspca_dev->pixfmt.height + 8;
return 0;
}
diff --git a/drivers/media/usb/gspca/stk014.c b/drivers/media/usb/gspca/stk014.c
index 8c0982607f25..b0c70fea760b 100644
--- a/drivers/media/usb/gspca/stk014.c
+++ b/drivers/media/usb/gspca/stk014.c
@@ -250,7 +250,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
int ret, value;
/* create the JPEG header */
- jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+ gspca_dev->pixfmt.width,
0x22); /* JPEG 411 */
jpeg_set_qual(sd->jpeg_hdr, QUALITY);
@@ -261,7 +262,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
set_par(gspca_dev, 0x00000000);
set_par(gspca_dev, 0x8002e001);
set_par(gspca_dev, 0x14000000);
- if (gspca_dev->width > 320)
+ if (gspca_dev->pixfmt.width > 320)
value = 0x8002e001; /* 640x480 */
else
value = 0x4001f000; /* 320x240 */
diff --git a/drivers/media/usb/gspca/stk1135.c b/drivers/media/usb/gspca/stk1135.c
index 585868835ace..1fc80af2a189 100644
--- a/drivers/media/usb/gspca/stk1135.c
+++ b/drivers/media/usb/gspca/stk1135.c
@@ -48,42 +48,11 @@ struct sd {
};
static const struct v4l2_pix_format stk1135_modes[] = {
- {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 160,
- .sizeimage = 160 * 120,
- .colorspace = V4L2_COLORSPACE_SRGB},
- {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 176,
- .sizeimage = 176 * 144,
- .colorspace = V4L2_COLORSPACE_SRGB},
- {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 320,
- .sizeimage = 320 * 240,
- .colorspace = V4L2_COLORSPACE_SRGB},
- {352, 288, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 352,
- .sizeimage = 352 * 288,
- .colorspace = V4L2_COLORSPACE_SRGB},
+ /* default mode (this driver supports variable resolution) */
{640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 640,
.sizeimage = 640 * 480,
.colorspace = V4L2_COLORSPACE_SRGB},
- {720, 576, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 720,
- .sizeimage = 720 * 576,
- .colorspace = V4L2_COLORSPACE_SRGB},
- {800, 600, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 800,
- .sizeimage = 800 * 600,
- .colorspace = V4L2_COLORSPACE_SRGB},
- {1024, 768, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 1024,
- .sizeimage = 1024 * 768,
- .colorspace = V4L2_COLORSPACE_SRGB},
- {1280, 1024, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 1280,
- .sizeimage = 1280 * 1024,
- .colorspace = V4L2_COLORSPACE_SRGB},
};
/* -- read a register -- */
@@ -347,16 +316,16 @@ static void stk1135_configure_mt9m112(struct gspca_dev *gspca_dev)
sensor_write(gspca_dev, cfg[i].reg, cfg[i].val);
/* set output size */
- width = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].width;
- height = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].height;
- if (width <= 640) { /* use context A (half readout speed by default) */
+ width = gspca_dev->pixfmt.width;
+ height = gspca_dev->pixfmt.height;
+ if (width <= 640 && height <= 512) { /* context A (half readout speed)*/
sensor_write(gspca_dev, 0x1a7, width);
sensor_write(gspca_dev, 0x1aa, height);
/* set read mode context A */
sensor_write(gspca_dev, 0x0c8, 0x0000);
/* set resize, read mode, vblank, hblank context A */
sensor_write(gspca_dev, 0x2c8, 0x0000);
- } else { /* use context B (full readout speed by default) */
+ } else { /* context B (full readout speed) */
sensor_write(gspca_dev, 0x1a1, width);
sensor_write(gspca_dev, 0x1a4, height);
/* set read mode context B */
@@ -484,8 +453,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, STK1135_REG_CISPO + 3, 0x00);
/* set capture end position */
- width = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].width;
- height = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].height;
+ width = gspca_dev->pixfmt.width;
+ height = gspca_dev->pixfmt.height;
reg_w(gspca_dev, STK1135_REG_CIEPO + 0, width & 0xff);
reg_w(gspca_dev, STK1135_REG_CIEPO + 1, width >> 8);
reg_w(gspca_dev, STK1135_REG_CIEPO + 2, height & 0xff);
@@ -643,6 +612,35 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
return 0;
}
+static void stk1135_try_fmt(struct gspca_dev *gspca_dev, struct v4l2_format *fmt)
+{
+ fmt->fmt.pix.width = clamp(fmt->fmt.pix.width, 32U, 1280U);
+ fmt->fmt.pix.height = clamp(fmt->fmt.pix.height, 32U, 1024U);
+ /* round up to even numbers */
+ fmt->fmt.pix.width += (fmt->fmt.pix.width & 1);
+ fmt->fmt.pix.height += (fmt->fmt.pix.height & 1);
+
+ fmt->fmt.pix.bytesperline = fmt->fmt.pix.width;
+ fmt->fmt.pix.sizeimage = fmt->fmt.pix.width * fmt->fmt.pix.height;
+}
+
+static int stk1135_enum_framesizes(struct gspca_dev *gspca_dev,
+ struct v4l2_frmsizeenum *fsize)
+{
+ if (fsize->index != 0 || fsize->pixel_format != V4L2_PIX_FMT_SBGGR8)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = 32;
+ fsize->stepwise.min_height = 32;
+ fsize->stepwise.max_width = 1280;
+ fsize->stepwise.max_height = 1024;
+ fsize->stepwise.step_width = 2;
+ fsize->stepwise.step_height = 2;
+
+ return 0;
+}
+
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
@@ -653,6 +651,8 @@ static const struct sd_desc sd_desc = {
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
.dq_callback = stk1135_dq_callback,
+ .try_fmt = stk1135_try_fmt,
+ .enum_framesizes = stk1135_enum_framesizes,
};
/* -- module initialisation -- */
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c
index 55ee7a61c67f..49d209bbf9ee 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c
@@ -452,7 +452,7 @@ frame_data:
NULL, 0);
if (sd->bridge == BRIDGE_ST6422)
- sd->to_skip = gspca_dev->width * 4;
+ sd->to_skip = gspca_dev->pixfmt.width * 4;
if (chunk_len)
PERR("Chunk length is "
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
index 8206b7743300..8d785edcccf2 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
@@ -421,7 +421,7 @@ static int pb0100_set_autogain_target(struct gspca_dev *gspca_dev, __s32 val)
/* Number of pixels counted by the sensor when subsampling the pixels.
* Slightly larger than the real value to avoid oscillation */
- totalpixels = gspca_dev->width * gspca_dev->height;
+ totalpixels = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height;
totalpixels = totalpixels/(8*8) + totalpixels/(64*64);
brightpixels = (totalpixels * val) >> 8;
diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
index af8767a9bd4c..a517d185febe 100644
--- a/drivers/media/usb/gspca/sunplus.c
+++ b/drivers/media/usb/gspca/sunplus.c
@@ -715,7 +715,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
int enable;
/* create the JPEG header */
- jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+ gspca_dev->pixfmt.width,
0x22); /* JPEG 411 */
jpeg_set_qual(sd->jpeg_hdr, QUALITY);
diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
index 4cb511ccc5f6..640c2fe760b3 100644
--- a/drivers/media/usb/gspca/topro.c
+++ b/drivers/media/usb/gspca/topro.c
@@ -3856,7 +3856,7 @@ static void setsharpness(struct gspca_dev *gspca_dev, s32 val)
if (sd->bridge == BRIDGE_TP6800) {
val |= 0x08; /* grid compensation enable */
- if (gspca_dev->width == 640)
+ if (gspca_dev->pixfmt.width == 640)
reg_w(gspca_dev, TP6800_R78_FORMAT, 0x00); /* vga */
else
val |= 0x04; /* scaling down enable */
@@ -3880,7 +3880,7 @@ static void set_resolution(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x00);
- if (gspca_dev->width == 320) {
+ if (gspca_dev->pixfmt.width == 320) {
reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0x06);
msleep(100);
i2c_w(gspca_dev, CX0342_AUTO_ADC_CALIB, 0x01);
@@ -3924,7 +3924,7 @@ static int get_fr_idx(struct gspca_dev *gspca_dev)
/* 640x480 * 30 fps does not work */
if (i == 6 /* if 30 fps */
- && gspca_dev->width == 640)
+ && gspca_dev->pixfmt.width == 640)
i = 0x05; /* 15 fps */
} else {
for (i = 0; i < ARRAY_SIZE(rates_6810) - 1; i++) {
@@ -3935,7 +3935,7 @@ static int get_fr_idx(struct gspca_dev *gspca_dev)
/* 640x480 * 30 fps does not work */
if (i == 7 /* if 30 fps */
- && gspca_dev->width == 640)
+ && gspca_dev->pixfmt.width == 640)
i = 6; /* 15 fps */
i |= 0x80; /* clock * 1 */
}
@@ -4554,7 +4554,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width);
+ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+ gspca_dev->pixfmt.width);
set_dqt(gspca_dev, sd->quality);
if (sd->bridge == BRIDGE_TP6800) {
if (sd->sensor == SENSOR_CX0342)
@@ -4737,7 +4738,7 @@ static void sd_dq_callback(struct gspca_dev *gspca_dev)
(gspca_dev->usb_buf[26] << 8) + gspca_dev->usb_buf[25] +
(gspca_dev->usb_buf[29] << 8) + gspca_dev->usb_buf[28])
/ 8;
- if (gspca_dev->width == 640)
+ if (gspca_dev->pixfmt.width == 640)
luma /= 4;
reg_w(gspca_dev, 0x7d, 0x00);
diff --git a/drivers/media/usb/gspca/tv8532.c b/drivers/media/usb/gspca/tv8532.c
index 8591324a53e1..d497ba38af0d 100644
--- a/drivers/media/usb/gspca/tv8532.c
+++ b/drivers/media/usb/gspca/tv8532.c
@@ -268,7 +268,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
packet_type0 = packet_type1 = INTER_PACKET;
if (gspca_dev->empty_packet) {
gspca_dev->empty_packet = 0;
- sd->packet = gspca_dev->height / 2;
+ sd->packet = gspca_dev->pixfmt.height / 2;
packet_type0 = FIRST_PACKET;
} else if (sd->packet == 0)
return; /* 2 more lines in 352x288 ! */
@@ -284,9 +284,10 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
* - 4 bytes
*/
gspca_frame_add(gspca_dev, packet_type0,
- data + 2, gspca_dev->width);
+ data + 2, gspca_dev->pixfmt.width);
gspca_frame_add(gspca_dev, packet_type1,
- data + gspca_dev->width + 5, gspca_dev->width);
+ data + gspca_dev->pixfmt.width + 5,
+ gspca_dev->pixfmt.width);
}
static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
diff --git a/drivers/media/usb/gspca/vicam.c b/drivers/media/usb/gspca/vicam.c
index a2275cfe0b81..103f6c4236b0 100644
--- a/drivers/media/usb/gspca/vicam.c
+++ b/drivers/media/usb/gspca/vicam.c
@@ -121,13 +121,13 @@ static int vicam_read_frame(struct gspca_dev *gspca_dev, u8 *data, int size)
memset(req_data, 0, 16);
req_data[0] = gain;
- if (gspca_dev->width == 256)
+ if (gspca_dev->pixfmt.width == 256)
req_data[1] |= 0x01; /* low nibble x-scale */
- if (gspca_dev->height <= 122) {
+ if (gspca_dev->pixfmt.height <= 122) {
req_data[1] |= 0x10; /* high nibble y-scale */
- unscaled_height = gspca_dev->height * 2;
+ unscaled_height = gspca_dev->pixfmt.height * 2;
} else
- unscaled_height = gspca_dev->height;
+ unscaled_height = gspca_dev->pixfmt.height;
req_data[2] = 0x90; /* unknown, does not seem to do anything */
if (unscaled_height <= 200)
req_data[3] = 0x06; /* vend? */
diff --git a/drivers/media/usb/gspca/w996Xcf.c b/drivers/media/usb/gspca/w996Xcf.c
index 2165da0c7ce1..fb9fe2ef3a6f 100644
--- a/drivers/media/usb/gspca/w996Xcf.c
+++ b/drivers/media/usb/gspca/w996Xcf.c
@@ -430,11 +430,11 @@ static void w9968cf_set_crop_window(struct sd *sd)
#define SC(x) ((x) << 10)
/* Scaling factors */
- fw = SC(sd->gspca_dev.width) / max_width;
- fh = SC(sd->gspca_dev.height) / max_height;
+ fw = SC(sd->gspca_dev.pixfmt.width) / max_width;
+ fh = SC(sd->gspca_dev.pixfmt.height) / max_height;
- cw = (fw >= fh) ? max_width : SC(sd->gspca_dev.width) / fh;
- ch = (fw >= fh) ? SC(sd->gspca_dev.height) / fw : max_height;
+ cw = (fw >= fh) ? max_width : SC(sd->gspca_dev.pixfmt.width) / fh;
+ ch = (fw >= fh) ? SC(sd->gspca_dev.pixfmt.height) / fw : max_height;
sd->sensor_width = max_width;
sd->sensor_height = max_height;
@@ -454,34 +454,34 @@ static void w9968cf_mode_init_regs(struct sd *sd)
w9968cf_set_crop_window(sd);
- reg_w(sd, 0x14, sd->gspca_dev.width);
- reg_w(sd, 0x15, sd->gspca_dev.height);
+ reg_w(sd, 0x14, sd->gspca_dev.pixfmt.width);
+ reg_w(sd, 0x15, sd->gspca_dev.pixfmt.height);
/* JPEG width & height */
- reg_w(sd, 0x30, sd->gspca_dev.width);
- reg_w(sd, 0x31, sd->gspca_dev.height);
+ reg_w(sd, 0x30, sd->gspca_dev.pixfmt.width);
+ reg_w(sd, 0x31, sd->gspca_dev.pixfmt.height);
/* Y & UV frame buffer strides (in WORD) */
if (w9968cf_vga_mode[sd->gspca_dev.curr_mode].pixelformat ==
V4L2_PIX_FMT_JPEG) {
- reg_w(sd, 0x2c, sd->gspca_dev.width / 2);
- reg_w(sd, 0x2d, sd->gspca_dev.width / 4);
+ reg_w(sd, 0x2c, sd->gspca_dev.pixfmt.width / 2);
+ reg_w(sd, 0x2d, sd->gspca_dev.pixfmt.width / 4);
} else
- reg_w(sd, 0x2c, sd->gspca_dev.width);
+ reg_w(sd, 0x2c, sd->gspca_dev.pixfmt.width);
reg_w(sd, 0x00, 0xbf17); /* reset everything */
reg_w(sd, 0x00, 0xbf10); /* normal operation */
/* Transfer size in WORDS (for UYVY format only) */
- val = sd->gspca_dev.width * sd->gspca_dev.height;
+ val = sd->gspca_dev.pixfmt.width * sd->gspca_dev.pixfmt.height;
reg_w(sd, 0x3d, val & 0xffff); /* low bits */
reg_w(sd, 0x3e, val >> 16); /* high bits */
if (w9968cf_vga_mode[sd->gspca_dev.curr_mode].pixelformat ==
V4L2_PIX_FMT_JPEG) {
/* We may get called multiple times (usb isoc bw negotiat.) */
- jpeg_define(sd->jpeg_hdr, sd->gspca_dev.height,
- sd->gspca_dev.width, 0x22); /* JPEG 420 */
+ jpeg_define(sd->jpeg_hdr, sd->gspca_dev.pixfmt.height,
+ sd->gspca_dev.pixfmt.width, 0x22); /* JPEG 420 */
jpeg_set_qual(sd->jpeg_hdr, v4l2_ctrl_g_ctrl(sd->jpegqual));
w9968cf_upload_quantizationtables(sd);
v4l2_ctrl_grab(sd->jpegqual, true);
diff --git a/drivers/media/usb/gspca/xirlink_cit.c b/drivers/media/usb/gspca/xirlink_cit.c
index 7eaf64eb867c..a41aa7817c54 100644
--- a/drivers/media/usb/gspca/xirlink_cit.c
+++ b/drivers/media/usb/gspca/xirlink_cit.c
@@ -1471,14 +1471,14 @@ static int cit_get_clock_div(struct gspca_dev *gspca_dev)
while (clock_div > 3 &&
1000 * packet_size >
- gspca_dev->width * gspca_dev->height *
+ gspca_dev->pixfmt.width * gspca_dev->pixfmt.height *
fps[clock_div - 1] * 3 / 2)
clock_div--;
PDEBUG(D_PROBE,
"PacketSize: %d, res: %dx%d -> using clockdiv: %d (%d fps)",
- packet_size, gspca_dev->width, gspca_dev->height, clock_div,
- fps[clock_div]);
+ packet_size, gspca_dev->pixfmt.width, gspca_dev->pixfmt.height,
+ clock_div, fps[clock_div]);
return clock_div;
}
@@ -1502,7 +1502,7 @@ static int cit_start_model0(struct gspca_dev *gspca_dev)
cit_write_reg(gspca_dev, 0x0002, 0x0426);
cit_write_reg(gspca_dev, 0x0014, 0x0427);
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 160: /* 160x120 */
cit_write_reg(gspca_dev, 0x0004, 0x010b);
cit_write_reg(gspca_dev, 0x0001, 0x010a);
@@ -1643,7 +1643,7 @@ static int cit_start_model1(struct gspca_dev *gspca_dev)
cit_write_reg(gspca_dev, 0x00, 0x0101);
cit_write_reg(gspca_dev, 0x00, 0x010a);
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 128: /* 128x96 */
cit_write_reg(gspca_dev, 0x80, 0x0103);
cit_write_reg(gspca_dev, 0x60, 0x0105);
@@ -1700,7 +1700,7 @@ static int cit_start_model1(struct gspca_dev *gspca_dev)
}
/* Assorted init */
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 128: /* 128x96 */
cit_Packet_Format1(gspca_dev, 0x2b, 0x1e);
cit_write_reg(gspca_dev, 0xc9, 0x0119); /* Same everywhere */
@@ -1753,7 +1753,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev)
cit_write_reg(gspca_dev, 0x0000, 0x0108);
cit_write_reg(gspca_dev, 0x0001, 0x0133);
cit_write_reg(gspca_dev, 0x0001, 0x0102);
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 176: /* 176x144 */
cit_write_reg(gspca_dev, 0x002c, 0x0103); /* All except 320x240 */
cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */
@@ -1792,7 +1792,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev)
cit_write_reg(gspca_dev, 0x0000, 0x0100); /* LED on */
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 176: /* 176x144 */
cit_write_reg(gspca_dev, 0x0050, 0x0111);
cit_write_reg(gspca_dev, 0x00d0, 0x0111);
@@ -1840,7 +1840,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev)
* Magic control of CMOS sensor. Only lower values like
* 0-3 work, and picture shifts left or right. Don't change.
*/
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 176: /* 176x144 */
cit_model2_Packet1(gspca_dev, 0x0014, 0x0002);
cit_model2_Packet1(gspca_dev, 0x0016, 0x0002); /* Horizontal shift */
@@ -1899,7 +1899,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev)
* does not allow arbitrary values and apparently is a bit mask, to
* be activated only at appropriate time. Don't change it randomly!
*/
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 176: /* 176x144 */
cit_model2_Packet1(gspca_dev, 0x0026, 0x00c2);
break;
@@ -2023,7 +2023,7 @@ static int cit_start_model3(struct gspca_dev *gspca_dev)
cit_model3_Packet1(gspca_dev, 0x009e, 0x0096);
cit_model3_Packet1(gspca_dev, 0x009f, 0x000a);
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 160:
cit_write_reg(gspca_dev, 0x0000, 0x0101); /* Same on 160x120, 320x240 */
cit_write_reg(gspca_dev, 0x00a0, 0x0103); /* Same on 160x120, 320x240 */
@@ -2134,7 +2134,7 @@ static int cit_start_model3(struct gspca_dev *gspca_dev)
like with the IBM netcam pro). */
cit_write_reg(gspca_dev, clock_div, 0x0111); /* Clock Divider */
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 160:
cit_model3_Packet1(gspca_dev, 0x001f, 0x0000); /* Same */
cit_model3_Packet1(gspca_dev, 0x0039, 0x001f); /* Same */
@@ -2211,7 +2211,7 @@ static int cit_start_model4(struct gspca_dev *gspca_dev)
cit_write_reg(gspca_dev, 0xfffa, 0x0124);
cit_model4_Packet1(gspca_dev, 0x0034, 0x0000);
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 128: /* 128x96 */
cit_write_reg(gspca_dev, 0x0070, 0x0119);
cit_write_reg(gspca_dev, 0x00d0, 0x0111);
@@ -2531,7 +2531,7 @@ static int cit_start_ibm_netcam_pro(struct gspca_dev *gspca_dev)
cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */
cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 160: /* 160x120 */
cit_write_reg(gspca_dev, 0x0024, 0x010b);
cit_write_reg(gspca_dev, 0x0089, 0x0119);
@@ -2635,7 +2635,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
struct usb_host_interface *alt;
int max_packet_size;
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 160:
max_packet_size = 450;
break;
@@ -2659,7 +2659,7 @@ static int sd_isoc_nego(struct gspca_dev *gspca_dev)
int ret, packet_size, min_packet_size;
struct usb_host_interface *alt;
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 160:
min_packet_size = 200;
break;
@@ -2780,7 +2780,7 @@ static u8 *cit_find_sof(struct gspca_dev *gspca_dev, u8 *data, int len)
case CIT_MODEL1:
case CIT_MODEL3:
case CIT_IBM_NETCAM_PRO:
- switch (gspca_dev->width) {
+ switch (gspca_dev->pixfmt.width) {
case 160: /* 160x120 */
byte3 = 0x02;
byte4 = 0x0a;
@@ -2864,20 +2864,16 @@ static u8 *cit_find_sof(struct gspca_dev *gspca_dev, u8 *data, int len)
if (data[i] == 0xff) {
if (i >= 4)
PDEBUG(D_FRAM,
- "header found at offset: %d: %02x %02x 00 %02x %02x %02x\n",
+ "header found at offset: %d: %02x %02x 00 %3ph\n",
i - 1,
data[i - 4],
data[i - 3],
- data[i],
- data[i + 1],
- data[i + 2]);
+ &data[i]);
else
PDEBUG(D_FRAM,
- "header found at offset: %d: 00 %02x %02x %02x\n",
+ "header found at offset: %d: 00 %3ph\n",
i - 1,
- data[i],
- data[i + 1],
- data[i + 2]);
+ &data[i]);
return data + i + (sd->sof_len - 1);
}
break;
diff --git a/drivers/media/usb/gspca/zc3xx.c b/drivers/media/usb/gspca/zc3xx.c
index cbfc2f921427..7b95d8e88a20 100644
--- a/drivers/media/usb/gspca/zc3xx.c
+++ b/drivers/media/usb/gspca/zc3xx.c
@@ -6700,7 +6700,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
};
/* create the JPEG header */
- jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+ gspca_dev->pixfmt.width,
0x21); /* JPEG 422 */
mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 6e5070774dc2..2f0c89cbac76 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -78,7 +78,8 @@ void hdpvr_delete(struct hdpvr_device *dev)
static void challenge(u8 *bytes)
{
- u64 *i64P, tmp64;
+ __le64 *i64P;
+ u64 tmp64;
uint i, idx;
for (idx = 0; idx < 32; ++idx) {
@@ -106,10 +107,10 @@ static void challenge(u8 *bytes)
for (i = 0; i < 3; i++)
bytes[1] *= bytes[6] + 1;
for (i = 0; i < 3; i++) {
- i64P = (u64 *)bytes;
+ i64P = (__le64 *)bytes;
tmp64 = le64_to_cpup(i64P);
- tmp64 <<= bytes[7] & 0x0f;
- *i64P += cpu_to_le64(tmp64);
+ tmp64 = tmp64 + (tmp64 << (bytes[7] & 0x0f));
+ *i64P = cpu_to_le64(tmp64);
}
break;
}
@@ -301,8 +302,6 @@ static int hdpvr_probe(struct usb_interface *interface,
goto error;
}
- dev->workqueue = 0;
-
/* init video transfer queues first of all */
/* to prevent oops in hdpvr_delete() on error paths */
INIT_LIST_HEAD(&dev->free_buff_list);
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index c4d51d78f837..ea05f678b559 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -2868,7 +2868,7 @@ static void pvr2_subdev_set_control(struct pvr2_hdw *hdw, int id,
pvr2_subdev_set_control(hdw, id, #lab, (hdw)->lab##_val); \
}
-v4l2_std_id pvr2_hdw_get_detected_std(struct pvr2_hdw *hdw)
+static v4l2_std_id pvr2_hdw_get_detected_std(struct pvr2_hdw *hdw)
{
v4l2_std_id std;
std = (v4l2_std_id)hdw->std_mask_avail;
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index 03761c6f472f..05bd91a60c09 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -209,8 +209,10 @@ static int smsusb_sendrequest(void *context, void *buffer, size_t size)
struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer;
int dummy;
- if (dev->state != SMSUSB_ACTIVE)
+ if (dev->state != SMSUSB_ACTIVE) {
+ sms_debug("Device not active yet");
return -ENOENT;
+ }
sms_debug("sending %s(%d) size: %d",
smscore_translate_msg(phdr->msg_type), phdr->msg_type,
@@ -243,6 +245,9 @@ static int smsusb1_load_firmware(struct usb_device *udev, int id, int board_id)
int rc, dummy;
char *fw_filename;
+ if (id < 0)
+ id = sms_get_board(board_id)->default_mode;
+
if (id < DEVICE_MODE_DVBT || id > DEVICE_MODE_DVBT_BDA) {
sms_err("invalid firmware id specified %d", id);
return -EINVAL;
@@ -445,14 +450,15 @@ static int smsusb_probe(struct usb_interface *intf,
char devpath[32];
int i, rc;
- sms_info("interface number %d",
+ sms_info("board id=%lu, interface number %d",
+ id->driver_info,
intf->cur_altsetting->desc.bInterfaceNumber);
if (sms_get_board(id->driver_info)->intf_num !=
intf->cur_altsetting->desc.bInterfaceNumber) {
- sms_err("interface number is %d expecting %d",
- sms_get_board(id->driver_info)->intf_num,
- intf->cur_altsetting->desc.bInterfaceNumber);
+ sms_debug("interface %d won't be used. Expecting interface %d to popup",
+ intf->cur_altsetting->desc.bInterfaceNumber,
+ sms_get_board(id->driver_info)->intf_num);
return -ENODEV;
}
@@ -483,22 +489,32 @@ static int smsusb_probe(struct usb_interface *intf,
}
if ((udev->actconfig->desc.bNumInterfaces == 2) &&
(intf->cur_altsetting->desc.bInterfaceNumber == 0)) {
- sms_err("rom interface 0 is not used");
+ sms_debug("rom interface 0 is not used");
return -ENODEV;
}
if (id->driver_info == SMS1XXX_BOARD_SIANO_STELLAR_ROM) {
- sms_info("stellar device was found.");
+ /* Detected a Siano Stellar uninitialized */
+
snprintf(devpath, sizeof(devpath), "usb\\%d-%s",
udev->bus->busnum, udev->devpath);
- sms_info("stellar device was found.");
- return smsusb1_load_firmware(
+ sms_info("stellar device in cold state was found at %s.", devpath);
+ rc = smsusb1_load_firmware(
udev, smscore_registry_getmode(devpath),
id->driver_info);
+
+ /* This device will reset and gain another USB ID */
+ if (!rc)
+ sms_info("stellar device now in warm state");
+ else
+ sms_err("Failed to put stellar in warm state. Error: %d", rc);
+
+ return rc;
+ } else {
+ rc = smsusb_init_device(intf, id->driver_info);
}
- rc = smsusb_init_device(intf, id->driver_info);
- sms_info("rc %d", rc);
+ sms_info("Device initialized with return code %d", rc);
sms_board_load_modules(id->driver_info);
return rc;
}
@@ -550,10 +566,13 @@ static int smsusb_resume(struct usb_interface *intf)
}
static const struct usb_device_id smsusb_id_table[] = {
+ /* This device is only present before firmware load */
{ USB_DEVICE(0x187f, 0x0010),
- .driver_info = SMS1XXX_BOARD_SIANO_STELLAR },
+ .driver_info = SMS1XXX_BOARD_SIANO_STELLAR_ROM },
+ /* This device pops up after firmware load */
{ USB_DEVICE(0x187f, 0x0100),
.driver_info = SMS1XXX_BOARD_SIANO_STELLAR },
+
{ USB_DEVICE(0x187f, 0x0200),
.driver_info = SMS1XXX_BOARD_SIANO_NOVA_A },
{ USB_DEVICE(0x187f, 0x0201),
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index c43c8d32be40..be77482c3070 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -111,6 +111,13 @@ static const struct dmi_system_id stk_upside_down_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "F3JC")
}
},
+ {
+ .ident = "T12Rg-H",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HCL Infosystems Limited"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T12Rg-H")
+ }
+ },
{}
};
diff --git a/drivers/media/usb/tlg2300/pd-main.c b/drivers/media/usb/tlg2300/pd-main.c
index 95f94e5aa66d..3316caa4733b 100644
--- a/drivers/media/usb/tlg2300/pd-main.c
+++ b/drivers/media/usb/tlg2300/pd-main.c
@@ -232,7 +232,7 @@ static int firmware_download(struct usb_device *udev)
goto out;
}
- max_packet_size = udev->ep_out[0x1]->desc.wMaxPacketSize;
+ max_packet_size = le16_to_cpu(udev->ep_out[0x1]->desc.wMaxPacketSize);
log("\t\t download size : %d", (int)max_packet_size);
for (offset = 0; offset < fwlength; offset += max_packet_size) {
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index e52c3b97f304..29724af9b9ab 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -366,7 +366,7 @@ static int ttusb_dec_get_stb_state (struct ttusb_dec *dec, unsigned int *mode,
}
return 0;
} else {
- return -1;
+ return -ENOENT;
}
}
@@ -1241,6 +1241,8 @@ static void ttusb_dec_init_v_pes(struct ttusb_dec *dec)
static int ttusb_dec_init_usb(struct ttusb_dec *dec)
{
+ int result;
+
dprintk("%s\n", __func__);
mutex_init(&dec->usb_mutex);
@@ -1258,7 +1260,7 @@ static int ttusb_dec_init_usb(struct ttusb_dec *dec)
return -ENOMEM;
}
dec->irq_buffer = usb_alloc_coherent(dec->udev,IRQ_PACKET_SIZE,
- GFP_ATOMIC, &dec->irq_dma_handle);
+ GFP_KERNEL, &dec->irq_dma_handle);
if(!dec->irq_buffer) {
usb_free_urb(dec->irq_urb);
return -ENOMEM;
@@ -1270,7 +1272,13 @@ static int ttusb_dec_init_usb(struct ttusb_dec *dec)
dec->irq_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
}
- return ttusb_dec_alloc_iso_urbs(dec);
+ result = ttusb_dec_alloc_iso_urbs(dec);
+ if (result) {
+ usb_free_urb(dec->irq_urb);
+ usb_free_coherent(dec->udev, IRQ_PACKET_SIZE,
+ dec->irq_buffer, dec->irq_dma_handle);
+ }
+ return result;
}
static int ttusb_dec_boot_dsp(struct ttusb_dec *dec)
@@ -1293,10 +1301,11 @@ static int ttusb_dec_boot_dsp(struct ttusb_dec *dec)
dprintk("%s\n", __func__);
- if (request_firmware(&fw_entry, dec->firmware_name, &dec->udev->dev)) {
+ result = request_firmware(&fw_entry, dec->firmware_name, &dec->udev->dev);
+ if (result) {
printk(KERN_ERR "%s: Firmware (%s) unavailable.\n",
__func__, dec->firmware_name);
- return 1;
+ return result;
}
firmware = fw_entry->data;
@@ -1306,7 +1315,7 @@ static int ttusb_dec_boot_dsp(struct ttusb_dec *dec)
printk("%s: firmware size too small for DSP code (%zu < 60).\n",
__func__, firmware_size);
release_firmware(fw_entry);
- return -1;
+ return -ENOENT;
}
/* a 32 bit checksum over the first 56 bytes of the DSP Code is stored
@@ -1320,7 +1329,7 @@ static int ttusb_dec_boot_dsp(struct ttusb_dec *dec)
"0x%08x != 0x%08x in file), file invalid.\n",
__func__, crc32_csum, crc32_check);
release_firmware(fw_entry);
- return -1;
+ return -ENOENT;
}
memcpy(idstring, &firmware[36], 20);
idstring[20] = '\0';
@@ -1389,55 +1398,48 @@ static int ttusb_dec_init_stb(struct ttusb_dec *dec)
dprintk("%s\n", __func__);
result = ttusb_dec_get_stb_state(dec, &mode, &model, &version);
+ if (result)
+ return result;
- if (!result) {
- if (!mode) {
- if (version == 0xABCDEFAB)
- printk(KERN_INFO "ttusb_dec: no version "
- "info in Firmware\n");
- else
- printk(KERN_INFO "ttusb_dec: Firmware "
- "%x.%02x%c%c\n",
- version >> 24, (version >> 16) & 0xff,
- (version >> 8) & 0xff, version & 0xff);
-
- result = ttusb_dec_boot_dsp(dec);
- if (result)
- return result;
- else
- return 1;
- } else {
- /* We can't trust the USB IDs that some firmwares
- give the box */
- switch (model) {
- case 0x00070001:
- case 0x00070008:
- case 0x0007000c:
- ttusb_dec_set_model(dec, TTUSB_DEC3000S);
- break;
- case 0x00070009:
- case 0x00070013:
- ttusb_dec_set_model(dec, TTUSB_DEC2000T);
- break;
- case 0x00070011:
- ttusb_dec_set_model(dec, TTUSB_DEC2540T);
- break;
- default:
- printk(KERN_ERR "%s: unknown model returned "
- "by firmware (%08x) - please report\n",
- __func__, model);
- return -1;
- break;
- }
+ if (!mode) {
+ if (version == 0xABCDEFAB)
+ printk(KERN_INFO "ttusb_dec: no version "
+ "info in Firmware\n");
+ else
+ printk(KERN_INFO "ttusb_dec: Firmware "
+ "%x.%02x%c%c\n",
+ version >> 24, (version >> 16) & 0xff,
+ (version >> 8) & 0xff, version & 0xff);
+ result = ttusb_dec_boot_dsp(dec);
+ if (result)
+ return result;
+ } else {
+ /* We can't trust the USB IDs that some firmwares
+ give the box */
+ switch (model) {
+ case 0x00070001:
+ case 0x00070008:
+ case 0x0007000c:
+ ttusb_dec_set_model(dec, TTUSB_DEC3000S);
+ break;
+ case 0x00070009:
+ case 0x00070013:
+ ttusb_dec_set_model(dec, TTUSB_DEC2000T);
+ break;
+ case 0x00070011:
+ ttusb_dec_set_model(dec, TTUSB_DEC2540T);
+ break;
+ default:
+ printk(KERN_ERR "%s: unknown model returned "
+ "by firmware (%08x) - please report\n",
+ __func__, model);
+ return -ENOENT;
+ }
if (version >= 0x01770000)
dec->can_playback = 1;
-
- return 0;
- }
}
- else
- return result;
+ return 0;
}
static int ttusb_dec_init_dvb(struct ttusb_dec *dec)
@@ -1539,19 +1541,7 @@ static void ttusb_dec_exit_dvb(struct ttusb_dec *dec)
static void ttusb_dec_exit_rc(struct ttusb_dec *dec)
{
-
dprintk("%s\n", __func__);
- /* we have to check whether the irq URB is already submitted.
- * As the irq is submitted after the interface is changed,
- * this is the best method i figured out.
- * Any others?*/
- if (dec->interface == TTUSB_DEC_INTERFACE_IN)
- usb_kill_urb(dec->irq_urb);
-
- usb_free_urb(dec->irq_urb);
-
- usb_free_coherent(dec->udev,IRQ_PACKET_SIZE,
- dec->irq_buffer, dec->irq_dma_handle);
if (dec->rc_input_dev) {
input_unregister_device(dec->rc_input_dev);
@@ -1566,6 +1556,20 @@ static void ttusb_dec_exit_usb(struct ttusb_dec *dec)
dprintk("%s\n", __func__);
+ if (enable_rc) {
+ /* we have to check whether the irq URB is already submitted.
+ * As the irq is submitted after the interface is changed,
+ * this is the best method i figured out.
+ * Any others?*/
+ if (dec->interface == TTUSB_DEC_INTERFACE_IN)
+ usb_kill_urb(dec->irq_urb);
+
+ usb_free_urb(dec->irq_urb);
+
+ usb_free_coherent(dec->udev, IRQ_PACKET_SIZE,
+ dec->irq_buffer, dec->irq_dma_handle);
+ }
+
dec->iso_stream_count = 0;
for (i = 0; i < ISO_BUF_COUNT; i++)
@@ -1623,6 +1627,7 @@ static int ttusb_dec_probe(struct usb_interface *intf,
{
struct usb_device *udev;
struct ttusb_dec *dec;
+ int result;
dprintk("%s\n", __func__);
@@ -1651,13 +1656,15 @@ static int ttusb_dec_probe(struct usb_interface *intf,
dec->udev = udev;
- if (ttusb_dec_init_usb(dec))
- return 0;
- if (ttusb_dec_init_stb(dec)) {
- ttusb_dec_exit_usb(dec);
- return 0;
- }
- ttusb_dec_init_dvb(dec);
+ result = ttusb_dec_init_usb(dec);
+ if (result)
+ goto err_usb;
+ result = ttusb_dec_init_stb(dec);
+ if (result)
+ goto err_stb;
+ result = ttusb_dec_init_dvb(dec);
+ if (result)
+ goto err_stb;
dec->adapter.priv = dec;
switch (id->idProduct) {
@@ -1696,6 +1703,11 @@ static int ttusb_dec_probe(struct usb_interface *intf,
ttusb_init_rc(dec);
return 0;
+err_stb:
+ ttusb_dec_exit_usb(dec);
+err_usb:
+ kfree(dec);
+ return result;
}
static void ttusb_dec_disconnect(struct usb_interface *intf)
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index a2f4501c23ca..0eb82106d2ff 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -664,7 +664,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.size = 32,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
- .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
+ .data_type = UVC_CTRL_DATA_TYPE_SIGNED,
},
{
.id = V4L2_CID_TILT_ABSOLUTE,
@@ -674,7 +674,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.size = 32,
.offset = 32,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
- .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
+ .data_type = UVC_CTRL_DATA_TYPE_SIGNED,
},
{
.id = V4L2_CID_PRIVACY,
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 81695d48c13e..c3bb2502225b 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -2090,6 +2090,15 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_PROBE_MINMAX },
+ /* Microsoft Lifecam NX-3000 */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x045e,
+ .idProduct = 0x0721,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_PROBE_DEF },
/* Microsoft Lifecam VX-7000 */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2174,6 +2183,15 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_PROBE_DEF },
+ /* Dell SP2008WFP Monitor */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x05a9,
+ .idProduct = 0x2641,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_PROBE_DEF },
/* Dell Alienware X51 */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 3394c3432011..27006811d866 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -680,7 +680,7 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
stream->dev->name,
sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536),
y, ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC,
- v4l2_buf->timestamp.tv_sec, v4l2_buf->timestamp.tv_usec,
+ v4l2_buf->timestamp.tv_sec, (unsigned long)v4l2_buf->timestamp.tv_usec,
x1, first->host_sof, first->dev_sof,
x2, last->host_sof, last->dev_sof, y1, y2);
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index ddc9379eb276..20c09229a08e 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -43,7 +43,7 @@
#define UNSET (-1U)
-#define PREFIX (t->i2c->driver->driver.name)
+#define PREFIX (t->i2c->dev.driver->name)
/*
* Driver modprobe parameters
@@ -247,7 +247,7 @@ static const struct analog_demod_ops tuner_analog_ops = {
/**
* set_type - Sets the tuner type for a given device
*
- * @c: i2c_client descriptoy
+ * @c: i2c_client descriptor
* @type: type of the tuner (e. g. tuner number)
* @new_mode_mask: Indicates if tuner supports TV and/or Radio
* @new_config: an optional parameter used by a few tuners to adjust
@@ -452,7 +452,7 @@ static void set_type(struct i2c_client *c, unsigned int type,
}
tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
- c->adapter->name, c->driver->driver.name, c->addr << 1, type,
+ c->adapter->name, c->dev.driver->name, c->addr << 1, type,
t->mode_mask);
return;
@@ -556,7 +556,7 @@ static void tuner_lookup(struct i2c_adapter *adap,
int mode_mask;
if (pos->i2c->adapter != adap ||
- strcmp(pos->i2c->driver->driver.name, "tuner"))
+ strcmp(pos->i2c->dev.driver->name, "tuner"))
continue;
mode_mask = pos->mode_mask;
diff --git a/drivers/media/v4l2-core/v4l2-clk.c b/drivers/media/v4l2-core/v4l2-clk.c
index b67de8642b5a..e18cc0469cf8 100644
--- a/drivers/media/v4l2-core/v4l2-clk.c
+++ b/drivers/media/v4l2-core/v4l2-clk.c
@@ -240,3 +240,42 @@ void v4l2_clk_unregister(struct v4l2_clk *clk)
kfree(clk);
}
EXPORT_SYMBOL(v4l2_clk_unregister);
+
+struct v4l2_clk_fixed {
+ unsigned long rate;
+ struct v4l2_clk_ops ops;
+};
+
+static unsigned long fixed_get_rate(struct v4l2_clk *clk)
+{
+ struct v4l2_clk_fixed *priv = clk->priv;
+ return priv->rate;
+}
+
+struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
+ const char *id, unsigned long rate, struct module *owner)
+{
+ struct v4l2_clk *clk;
+ struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->rate = rate;
+ priv->ops.get_rate = fixed_get_rate;
+ priv->ops.owner = owner;
+
+ clk = v4l2_clk_register(&priv->ops, dev_id, id, priv);
+ if (IS_ERR(clk))
+ kfree(priv);
+
+ return clk;
+}
+EXPORT_SYMBOL(__v4l2_clk_register_fixed);
+
+void v4l2_clk_unregister_fixed(struct v4l2_clk *clk)
+{
+ kfree(clk->priv);
+ v4l2_clk_unregister(clk);
+}
+EXPORT_SYMBOL(v4l2_clk_unregister_fixed);
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 037d7a55aa8c..433d6d77942e 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -236,14 +236,14 @@ void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
v4l2_subdev_init(sd, ops);
sd->flags |= V4L2_SUBDEV_FL_IS_I2C;
/* the owner is the same as the i2c_client's driver owner */
- sd->owner = client->driver->driver.owner;
+ sd->owner = client->dev.driver->owner;
sd->dev = &client->dev;
/* i2c_client and v4l2_subdev point to one another */
v4l2_set_subdevdata(sd, client);
i2c_set_clientdata(client, sd);
/* initialize name */
snprintf(sd->name, sizeof(sd->name), "%s %d-%04x",
- client->driver->driver.name, i2c_adapter_id(client->adapter),
+ client->dev.driver->name, i2c_adapter_id(client->adapter),
client->addr);
}
EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init);
@@ -274,11 +274,11 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
loaded. This delay-load mechanism doesn't work if other drivers
want to use the i2c device, so explicitly loading the module
is the best alternative. */
- if (client == NULL || client->driver == NULL)
+ if (client == NULL || client->dev.driver == NULL)
goto error;
/* Lock the module so we can safely get the v4l2_subdev pointer */
- if (!try_module_get(client->driver->driver.owner))
+ if (!try_module_get(client->dev.driver->owner))
goto error;
sd = i2c_get_clientdata(client);
@@ -287,7 +287,7 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
if (v4l2_device_register_subdev(v4l2_dev, sd))
sd = NULL;
/* Decrease the module use count to match the first try_module_get. */
- module_put(client->driver->driver.owner);
+ module_put(client->dev.driver->owner);
error:
/* If we have a client but no subdev, then something went wrong and
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index c3f080388684..60dcc0f3b32e 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -565,13 +565,13 @@ EXPORT_SYMBOL(v4l2_ctrl_get_menu);
* Returns NULL or an s64 type array containing the menu for given
* control ID. The total number of the menu items is returned in @len.
*/
-const s64 const *v4l2_ctrl_get_int_menu(u32 id, u32 *len)
+const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len)
{
- static const s64 const qmenu_int_vpx_num_partitions[] = {
+ static const s64 qmenu_int_vpx_num_partitions[] = {
1, 2, 4, 8,
};
- static const s64 const qmenu_int_vpx_num_ref_frames[] = {
+ static const s64 qmenu_int_vpx_num_ref_frames[] = {
1, 2, 3,
};
@@ -583,7 +583,7 @@ const s64 const *v4l2_ctrl_get_int_menu(u32 id, u32 *len)
default:
*len = 0;
return NULL;
- };
+ }
}
EXPORT_SYMBOL(v4l2_ctrl_get_int_menu);
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 7c4371288215..73035ee0f4de 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -41,6 +41,8 @@ module_param(debug, bool, 0644);
#define TRANS_QUEUED (1 << 0)
/* Instance is currently running in hardware */
#define TRANS_RUNNING (1 << 1)
+/* Instance is currently aborting */
+#define TRANS_ABORT (1 << 2)
/* Offset base for buffers on the destination queue - used to distinguish
@@ -221,6 +223,14 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
}
spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
+
+ /* If the context is aborted then don't schedule it */
+ if (m2m_ctx->job_flags & TRANS_ABORT) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ dprintk("Aborted context\n");
+ return;
+ }
+
if (m2m_ctx->job_flags & TRANS_QUEUED) {
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
dprintk("On job queue already\n");
@@ -280,6 +290,8 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
m2m_dev = m2m_ctx->m2m_dev;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+
+ m2m_ctx->job_flags |= TRANS_ABORT;
if (m2m_ctx->job_flags & TRANS_RUNNING) {
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
@@ -480,13 +492,15 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
m2m_dev = m2m_ctx->m2m_dev;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
/* We should not be scheduled anymore, since we're dropping a queue. */
- INIT_LIST_HEAD(&m2m_ctx->queue);
+ if (m2m_ctx->job_flags & TRANS_QUEUED)
+ list_del(&m2m_ctx->queue);
m2m_ctx->job_flags = 0;
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
/* Drop queue, since streamoff returns device to the same state as after
* calling reqbufs. */
INIT_LIST_HEAD(&q_ctx->rdy_queue);
+ q_ctx->num_rdy = 0;
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
if (m2m_dev->curr_ctx == m2m_ctx) {
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 594c75eab5a5..b19b306c8f7f 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -241,7 +241,8 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
q->bufs[q->num_buffers + buffer] = vb;
}
- __setup_offsets(q, buffer);
+ if (memory == V4L2_MEMORY_MMAP)
+ __setup_offsets(q, buffer);
dprintk(1, "Allocated %d buffers, %d plane(s) each\n",
buffer, num_planes);
@@ -353,7 +354,9 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
if (b->m.planes[plane].bytesused > length)
return -EINVAL;
- if (b->m.planes[plane].data_offset >=
+
+ if (b->m.planes[plane].data_offset > 0 &&
+ b->m.planes[plane].data_offset >=
b->m.planes[plane].bytesused)
return -EINVAL;
}
@@ -1013,6 +1016,10 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
/* Check if the provided plane buffer is large enough */
if (planes[plane].length < q->plane_sizes[plane]) {
+ dprintk(1, "qbuf: provided buffer size %u is less than "
+ "setup size %u for plane %d\n",
+ planes[plane].length,
+ q->plane_sizes[plane], plane);
ret = -EINVAL;
goto err;
}
@@ -1203,8 +1210,11 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
int ret;
ret = __verify_length(vb, b);
- if (ret < 0)
+ if (ret < 0) {
+ dprintk(1, "%s(): plane parameters verification failed: %d\n",
+ __func__, ret);
return ret;
+ }
switch (q->memory) {
case V4L2_MEMORY_MMAP:
@@ -2467,10 +2477,11 @@ size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
}
EXPORT_SYMBOL_GPL(vb2_read);
-size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count,
+size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
loff_t *ppos, int nonblocking)
{
- return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 0);
+ return __vb2_perform_fileio(q, (char __user *) data, count,
+ ppos, nonblocking, 0);
}
EXPORT_SYMBOL_GPL(vb2_write);
@@ -2631,7 +2642,7 @@ int vb2_fop_release(struct file *file)
}
EXPORT_SYMBOL_GPL(vb2_fop_release);
-ssize_t vb2_fop_write(struct file *file, char __user *buf,
+ssize_t vb2_fop_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct video_device *vdev = video_devdata(file);
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index fd56f2563201..646f08f4f504 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -423,6 +423,39 @@ static inline int vma_is_io(struct vm_area_struct *vma)
return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
}
+static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
+ struct vm_area_struct *vma, unsigned long *res)
+{
+ unsigned long pfn, start_pfn, prev_pfn;
+ unsigned int i;
+ int ret;
+
+ if (!vma_is_io(vma))
+ return -EFAULT;
+
+ ret = follow_pfn(vma, start, &pfn);
+ if (ret)
+ return ret;
+
+ start_pfn = pfn;
+ start += PAGE_SIZE;
+
+ for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
+ prev_pfn = pfn;
+ ret = follow_pfn(vma, start, &pfn);
+
+ if (ret) {
+ pr_err("no page for address %lu\n", start);
+ return ret;
+ }
+ if (pfn != prev_pfn + 1)
+ return -EINVAL;
+ }
+
+ *res = start_pfn;
+ return 0;
+}
+
static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
int n_pages, struct vm_area_struct *vma, int write)
{
@@ -433,6 +466,9 @@ static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
unsigned long pfn;
int ret = follow_pfn(vma, start, &pfn);
+ if (!pfn_valid(pfn))
+ return -EINVAL;
+
if (ret) {
pr_err("no page for address %lu\n", start);
return ret;
@@ -468,16 +504,49 @@ static void vb2_dc_put_userptr(void *buf_priv)
struct vb2_dc_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt;
- dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
- if (!vma_is_io(buf->vma))
- vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
+ if (sgt) {
+ dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+ if (!vma_is_io(buf->vma))
+ vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
- sg_free_table(sgt);
- kfree(sgt);
+ sg_free_table(sgt);
+ kfree(sgt);
+ }
vb2_put_vma(buf->vma);
kfree(buf);
}
+/*
+ * For some kind of reserved memory there might be no struct page available,
+ * so all that can be done to support such 'pages' is to try to convert
+ * pfn to dma address or at the last resort just assume that
+ * dma address == physical address (like it has been assumed in earlier version
+ * of videobuf2-dma-contig
+ */
+
+#ifdef __arch_pfn_to_dma
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+ return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
+}
+#elif defined(__pfn_to_bus)
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+ return (dma_addr_t)__pfn_to_bus(pfn);
+}
+#elif defined(__pfn_to_phys)
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+ return (dma_addr_t)__pfn_to_phys(pfn);
+}
+#else
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+ /* really, we cannot do anything better at this point */
+ return (dma_addr_t)(pfn) << PAGE_SHIFT;
+}
+#endif
+
static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
unsigned long size, int write)
{
@@ -548,6 +617,14 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
/* extract page list from userspace mapping */
ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
if (ret) {
+ unsigned long pfn;
+ if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
+ buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
+ buf->size = size;
+ kfree(pages);
+ return buf;
+ }
+
pr_err("failed to get user pages\n");
goto fail_vma;
}
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 16ae3dcc7e29..2f860543912c 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -35,17 +35,61 @@ struct vb2_dma_sg_buf {
struct page **pages;
int write;
int offset;
- struct vb2_dma_sg_desc sg_desc;
+ struct sg_table sg_table;
+ size_t size;
+ unsigned int num_pages;
atomic_t refcount;
struct vb2_vmarea_handler handler;
};
static void vb2_dma_sg_put(void *buf_priv);
+static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
+ gfp_t gfp_flags)
+{
+ unsigned int last_page = 0;
+ int size = buf->size;
+
+ while (size > 0) {
+ struct page *pages;
+ int order;
+ int i;
+
+ order = get_order(size);
+ /* Dont over allocate*/
+ if ((PAGE_SIZE << order) > size)
+ order--;
+
+ pages = NULL;
+ while (!pages) {
+ pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
+ __GFP_NOWARN | gfp_flags, order);
+ if (pages)
+ break;
+
+ if (order == 0) {
+ while (last_page--)
+ __free_page(buf->pages[last_page]);
+ return -ENOMEM;
+ }
+ order--;
+ }
+
+ split_page(pages, order);
+ for (i = 0; i < (1 << order); i++)
+ buf->pages[last_page++] = &pages[i];
+
+ size -= PAGE_SIZE << order;
+ }
+
+ return 0;
+}
+
static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
{
struct vb2_dma_sg_buf *buf;
- int i;
+ int ret;
+ int num_pages;
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
@@ -54,29 +98,23 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla
buf->vaddr = NULL;
buf->write = 0;
buf->offset = 0;
- buf->sg_desc.size = size;
+ buf->size = size;
/* size is already page aligned */
- buf->sg_desc.num_pages = size >> PAGE_SHIFT;
+ buf->num_pages = size >> PAGE_SHIFT;
- buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages *
- sizeof(*buf->sg_desc.sglist));
- if (!buf->sg_desc.sglist)
- goto fail_sglist_alloc;
- sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
-
- buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
+ buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
GFP_KERNEL);
if (!buf->pages)
goto fail_pages_array_alloc;
- for (i = 0; i < buf->sg_desc.num_pages; ++i) {
- buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO |
- __GFP_NOWARN | gfp_flags);
- if (NULL == buf->pages[i])
- goto fail_pages_alloc;
- sg_set_page(&buf->sg_desc.sglist[i],
- buf->pages[i], PAGE_SIZE, 0);
- }
+ ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
+ if (ret)
+ goto fail_pages_alloc;
+
+ ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
+ buf->num_pages, 0, size, gfp_flags);
+ if (ret)
+ goto fail_table_alloc;
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_dma_sg_put;
@@ -85,18 +123,16 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla
atomic_inc(&buf->refcount);
dprintk(1, "%s: Allocated buffer of %d pages\n",
- __func__, buf->sg_desc.num_pages);
+ __func__, buf->num_pages);
return buf;
+fail_table_alloc:
+ num_pages = buf->num_pages;
+ while (num_pages--)
+ __free_page(buf->pages[num_pages]);
fail_pages_alloc:
- while (--i >= 0)
- __free_page(buf->pages[i]);
kfree(buf->pages);
-
fail_pages_array_alloc:
- vfree(buf->sg_desc.sglist);
-
-fail_sglist_alloc:
kfree(buf);
return NULL;
}
@@ -104,14 +140,14 @@ fail_sglist_alloc:
static void vb2_dma_sg_put(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
- int i = buf->sg_desc.num_pages;
+ int i = buf->num_pages;
if (atomic_dec_and_test(&buf->refcount)) {
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
- buf->sg_desc.num_pages);
+ buf->num_pages);
if (buf->vaddr)
- vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
- vfree(buf->sg_desc.sglist);
+ vm_unmap_ram(buf->vaddr, buf->num_pages);
+ sg_free_table(&buf->sg_table);
while (--i >= 0)
__free_page(buf->pages[i]);
kfree(buf->pages);
@@ -124,7 +160,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
{
struct vb2_dma_sg_buf *buf;
unsigned long first, last;
- int num_pages_from_user, i;
+ int num_pages_from_user;
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
@@ -133,56 +169,41 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
buf->vaddr = NULL;
buf->write = write;
buf->offset = vaddr & ~PAGE_MASK;
- buf->sg_desc.size = size;
+ buf->size = size;
first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
- buf->sg_desc.num_pages = last - first + 1;
-
- buf->sg_desc.sglist = vzalloc(
- buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
- if (!buf->sg_desc.sglist)
- goto userptr_fail_sglist_alloc;
-
- sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
+ buf->num_pages = last - first + 1;
- buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
+ buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
GFP_KERNEL);
if (!buf->pages)
- goto userptr_fail_pages_array_alloc;
+ return NULL;
num_pages_from_user = get_user_pages(current, current->mm,
vaddr & PAGE_MASK,
- buf->sg_desc.num_pages,
+ buf->num_pages,
write,
1, /* force */
buf->pages,
NULL);
- if (num_pages_from_user != buf->sg_desc.num_pages)
+ if (num_pages_from_user != buf->num_pages)
goto userptr_fail_get_user_pages;
- sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0],
- PAGE_SIZE - buf->offset, buf->offset);
- size -= PAGE_SIZE - buf->offset;
- for (i = 1; i < buf->sg_desc.num_pages; ++i) {
- sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i],
- min_t(size_t, PAGE_SIZE, size), 0);
- size -= min_t(size_t, PAGE_SIZE, size);
- }
+ if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
+ buf->num_pages, buf->offset, size, 0))
+ goto userptr_fail_alloc_table_from_pages;
+
return buf;
+userptr_fail_alloc_table_from_pages:
userptr_fail_get_user_pages:
dprintk(1, "get_user_pages requested/got: %d/%d]\n",
- num_pages_from_user, buf->sg_desc.num_pages);
+ num_pages_from_user, buf->num_pages);
while (--num_pages_from_user >= 0)
put_page(buf->pages[num_pages_from_user]);
kfree(buf->pages);
-
-userptr_fail_pages_array_alloc:
- vfree(buf->sg_desc.sglist);
-
-userptr_fail_sglist_alloc:
kfree(buf);
return NULL;
}
@@ -194,18 +215,18 @@ userptr_fail_sglist_alloc:
static void vb2_dma_sg_put_userptr(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
- int i = buf->sg_desc.num_pages;
+ int i = buf->num_pages;
dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
- __func__, buf->sg_desc.num_pages);
+ __func__, buf->num_pages);
if (buf->vaddr)
- vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
+ vm_unmap_ram(buf->vaddr, buf->num_pages);
+ sg_free_table(&buf->sg_table);
while (--i >= 0) {
if (buf->write)
set_page_dirty_lock(buf->pages[i]);
put_page(buf->pages[i]);
}
- vfree(buf->sg_desc.sglist);
kfree(buf->pages);
kfree(buf);
}
@@ -218,7 +239,7 @@ static void *vb2_dma_sg_vaddr(void *buf_priv)
if (!buf->vaddr)
buf->vaddr = vm_map_ram(buf->pages,
- buf->sg_desc.num_pages,
+ buf->num_pages,
-1,
PAGE_KERNEL);
@@ -274,7 +295,7 @@ static void *vb2_dma_sg_cookie(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
- return &buf->sg_desc;
+ return &buf->sg_table;
}
const struct vb2_mem_ops vb2_dma_sg_memops = {
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index ffcb10ac4341..bbf4aea1627d 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -153,24 +153,24 @@ static ssize_t name##_show(struct device *dev, struct device_attribute *attr, \
struct memstick_dev *card = container_of(dev, struct memstick_dev, \
dev); \
return sprintf(buf, format, card->id.name); \
-}
+} \
+static DEVICE_ATTR_RO(name);
MEMSTICK_ATTR(type, "%02X");
MEMSTICK_ATTR(category, "%02X");
MEMSTICK_ATTR(class, "%02X");
-#define MEMSTICK_ATTR_RO(name) __ATTR(name, S_IRUGO, name##_show, NULL)
-
-static struct device_attribute memstick_dev_attrs[] = {
- MEMSTICK_ATTR_RO(type),
- MEMSTICK_ATTR_RO(category),
- MEMSTICK_ATTR_RO(class),
- __ATTR_NULL
+static struct attribute *memstick_dev_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_category.attr,
+ &dev_attr_class.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(memstick_dev);
static struct bus_type memstick_bus_type = {
.name = "memstick",
- .dev_attrs = memstick_dev_attrs,
+ .dev_groups = memstick_dev_groups,
.match = memstick_bus_match,
.uevent = memstick_uevent,
.probe = memstick_device_probe,
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index dd239bdbfcb4..00d339c361fc 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2235,10 +2235,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
}
/* do we need to support multiple segments? */
- if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) {
- printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
- ioc->name, __func__, bio_segments(req->bio), blk_rq_bytes(req),
- bio_segments(rsp->bio), blk_rq_bytes(rsp));
+ if (bio_multiple_segments(req->bio) ||
+ bio_multiple_segments(rsp->bio)) {
+ printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u, rsp %u\n",
+ ioc->name, __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
return -EINVAL;
}
diff --git a/drivers/message/i2o/core.h b/drivers/message/i2o/core.h
index cbe384fb848c..91614f11f89a 100644
--- a/drivers/message/i2o/core.h
+++ b/drivers/message/i2o/core.h
@@ -33,7 +33,7 @@ extern int __init i2o_pci_init(void);
extern void __exit i2o_pci_exit(void);
/* device */
-extern struct device_attribute i2o_device_attrs[];
+extern const struct attribute_group *i2o_device_groups[];
extern void i2o_device_remove(struct i2o_device *);
extern int i2o_device_parse_lct(struct i2o_controller *);
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c
index 4547db99f7da..98348f420b52 100644
--- a/drivers/message/i2o/device.c
+++ b/drivers/message/i2o/device.c
@@ -138,45 +138,55 @@ static void i2o_device_release(struct device *dev)
}
/**
- * i2o_device_show_class_id - Displays class id of I2O device
+ * class_id_show - Displays class id of I2O device
* @dev: device of which the class id should be displayed
* @attr: pointer to device attribute
* @buf: buffer into which the class id should be printed
*
* Returns the number of bytes which are printed into the buffer.
*/
-static ssize_t i2o_device_show_class_id(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t class_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct i2o_device *i2o_dev = to_i2o_device(dev);
sprintf(buf, "0x%03x\n", i2o_dev->lct_data.class_id);
return strlen(buf) + 1;
}
+static DEVICE_ATTR_RO(class_id);
/**
- * i2o_device_show_tid - Displays TID of I2O device
+ * tid_show - Displays TID of I2O device
* @dev: device of which the TID should be displayed
* @attr: pointer to device attribute
* @buf: buffer into which the TID should be printed
*
* Returns the number of bytes which are printed into the buffer.
*/
-static ssize_t i2o_device_show_tid(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t tid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct i2o_device *i2o_dev = to_i2o_device(dev);
sprintf(buf, "0x%03x\n", i2o_dev->lct_data.tid);
return strlen(buf) + 1;
}
+static DEVICE_ATTR_RO(tid);
/* I2O device attributes */
-struct device_attribute i2o_device_attrs[] = {
- __ATTR(class_id, S_IRUGO, i2o_device_show_class_id, NULL),
- __ATTR(tid, S_IRUGO, i2o_device_show_tid, NULL),
- __ATTR_NULL
+static struct attribute *i2o_device_attrs[] = {
+ &dev_attr_class_id.attr,
+ &dev_attr_tid.attr,
+ NULL,
+};
+
+static const struct attribute_group i2o_device_group = {
+ .attrs = i2o_device_attrs,
+};
+
+const struct attribute_group *i2o_device_groups[] = {
+ &i2o_device_group,
+ NULL,
};
/**
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index 813eaa33fa14..b6b92d760510 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -62,7 +62,7 @@ static int i2o_bus_match(struct device *dev, struct device_driver *drv)
struct bus_type i2o_bus_type = {
.name = "i2o",
.match = i2o_bus_match,
- .dev_attrs = i2o_device_attrs
+ .dev_groups = i2o_device_groups,
};
/**
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 7ebe9ef1eba6..c9b1f6422941 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -1247,7 +1247,7 @@ static struct i2c_driver pm860x_driver = {
.name = "88PM860x",
.owner = THIS_MODULE,
.pm = &pm860x_pm_ops,
- .of_match_table = of_match_ptr(pm860x_dt_ids),
+ .of_match_table = pm860x_dt_ids,
},
.probe = pm860x_probe,
.remove = pm860x_remove,
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 914c3d142f78..62a60caa5d1f 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -27,6 +27,18 @@ config MFD_AS3711
help
Support for the AS3711 PMIC from AMS
+config MFD_AS3722
+ bool "ams AS3722 Power Management IC"
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ depends on I2C && OF
+ help
+ The ams AS3722 is a compact system PMU suitable for mobile phones,
+ tablets etc. It has 4 DC/DC step-down regulators, 3 DC/DC step-down
+ controllers, 11 LDOs, RTC, automatic battery, temperature and
+ over current monitoring, GPIOs, ADC and a watchdog.
+
config PMIC_ADP5520
bool "Analog Devices ADP5520/01 MFD PMIC Core Support"
depends on I2C=y
@@ -664,14 +676,14 @@ menu "STMicroelectronics STMPE Interface Drivers"
depends on MFD_STMPE
config STMPE_I2C
- bool "STMicroelectronics STMPE I2C Inteface"
+ bool "STMicroelectronics STMPE I2C Interface"
depends on I2C=y
default y
help
This is used to enable I2C interface of STMPE
config STMPE_SPI
- bool "STMicroelectronics STMPE SPI Inteface"
+ bool "STMicroelectronics STMPE SPI Interface"
depends on SPI_MASTER
help
This is used to enable SPI interface of STMPE
@@ -1151,6 +1163,16 @@ config MFD_WM8994
core support for the WM8994, in order to use the actual
functionaltiy of the device other drivers must be enabled.
+config MFD_STW481X
+ bool "Support for ST Microelectronics STw481x"
+ depends on I2C && ARCH_NOMADIK
+ select REGMAP_I2C
+ select MFD_CORE
+ help
+ Select this option to enable the STw481x chip driver used
+ in various ST Microelectronics and ST-Ericsson embedded
+ Nomadik series.
+
endmenu
endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 15b905c6553c..8a28dc90fe78 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -162,3 +162,5 @@ obj-$(CONFIG_MFD_LM3533) += lm3533-core.o lm3533-ctrlbank.o
obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o vexpress-sysreg.o
obj-$(CONFIG_MFD_RETU) += retu-mfd.o
obj-$(CONFIG_MFD_AS3711) += as3711.o
+obj-$(CONFIG_MFD_AS3722) += as3722.o
+obj-$(CONFIG_MFD_STW481X) += stw481x.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index 6f68472e0ca6..14d9542a4eed 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -293,7 +293,7 @@ static ssize_t aat2870_reg_write_file(struct file *file,
unsigned long addr, val;
int ret;
- buf_size = min(count, (sizeof(buf)-1));
+ buf_size = min(count, (size_t)(sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size)) {
dev_err(aat2870->dev, "Failed to copy from user\n");
return -EFAULT;
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 5ac3aa48473b..75e180ceecf3 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -540,7 +540,7 @@ static int arizona_of_get_core_pdata(struct arizona *arizona)
for (i = 0; i < ARRAY_SIZE(arizona->pdata.gpio_defaults); i++) {
if (arizona->pdata.gpio_defaults[i] > 0xffff)
arizona->pdata.gpio_defaults[i] = 0;
- if (arizona->pdata.gpio_defaults[i] == 0)
+ else if (arizona->pdata.gpio_defaults[i] == 0)
arizona->pdata.gpio_defaults[i] = 0x10000;
}
} else {
@@ -569,13 +569,25 @@ static struct mfd_cell early_devs[] = {
{ .name = "arizona-ldo1" },
};
+static const char *wm5102_supplies[] = {
+ "DBVDD2",
+ "DBVDD3",
+ "CPVDD",
+ "SPKVDDL",
+ "SPKVDDR",
+};
+
static struct mfd_cell wm5102_devs[] = {
{ .name = "arizona-micsupp" },
{ .name = "arizona-extcon" },
{ .name = "arizona-gpio" },
{ .name = "arizona-haptics" },
{ .name = "arizona-pwm" },
- { .name = "wm5102-codec" },
+ {
+ .name = "wm5102-codec",
+ .parent_supplies = wm5102_supplies,
+ .num_parent_supplies = ARRAY_SIZE(wm5102_supplies),
+ },
};
static struct mfd_cell wm5110_devs[] = {
@@ -584,7 +596,17 @@ static struct mfd_cell wm5110_devs[] = {
{ .name = "arizona-gpio" },
{ .name = "arizona-haptics" },
{ .name = "arizona-pwm" },
- { .name = "wm5110-codec" },
+ {
+ .name = "wm5110-codec",
+ .parent_supplies = wm5102_supplies,
+ .num_parent_supplies = ARRAY_SIZE(wm5102_supplies),
+ },
+};
+
+static const char *wm8997_supplies[] = {
+ "DBVDD2",
+ "CPVDD",
+ "SPKVDD",
};
static struct mfd_cell wm8997_devs[] = {
@@ -593,7 +615,11 @@ static struct mfd_cell wm8997_devs[] = {
{ .name = "arizona-gpio" },
{ .name = "arizona-haptics" },
{ .name = "arizona-pwm" },
- { .name = "wm8997-codec" },
+ {
+ .name = "wm8997-codec",
+ .parent_supplies = wm8997_supplies,
+ .num_parent_supplies = ARRAY_SIZE(wm8997_supplies),
+ },
};
int arizona_dev_init(struct arizona *arizona)
@@ -607,11 +633,11 @@ int arizona_dev_init(struct arizona *arizona)
dev_set_drvdata(arizona->dev, arizona);
mutex_init(&arizona->clk_lock);
- arizona_of_get_core_pdata(arizona);
-
if (dev_get_platdata(arizona->dev))
memcpy(&arizona->pdata, dev_get_platdata(arizona->dev),
sizeof(arizona->pdata));
+ else
+ arizona_of_get_core_pdata(arizona);
regcache_cache_only(arizona->regmap, true);
diff --git a/drivers/mfd/arizona-i2c.c b/drivers/mfd/arizona-i2c.c
index 51dbabf7c021..beccb790c9ba 100644
--- a/drivers/mfd/arizona-i2c.c
+++ b/drivers/mfd/arizona-i2c.c
@@ -17,6 +17,7 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/mfd/arizona/core.h>
diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
index 47be7b35b5c5..1ca554b18bef 100644
--- a/drivers/mfd/arizona-spi.c
+++ b/drivers/mfd/arizona-spi.c
@@ -17,6 +17,7 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
+#include <linux/of.h>
#include <linux/mfd/arizona/core.h>
diff --git a/drivers/mfd/as3711.c b/drivers/mfd/as3711.c
index abd3ab7c0908..ec684fcedb42 100644
--- a/drivers/mfd/as3711.c
+++ b/drivers/mfd/as3711.c
@@ -17,6 +17,7 @@
#include <linux/mfd/as3711.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/as3722.c b/drivers/mfd/as3722.c
new file mode 100644
index 000000000000..f161f2e00df7
--- /dev/null
+++ b/drivers/mfd/as3722.c
@@ -0,0 +1,449 @@
+/*
+ * Core driver for ams AS3722 PMICs
+ *
+ * Copyright (C) 2013 AMS AG
+ * Copyright (c) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * Author: Florian Lobmaier <florian.lobmaier@ams.com>
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/as3722.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define AS3722_DEVICE_ID 0x0C
+
+static const struct resource as3722_rtc_resource[] = {
+ {
+ .name = "as3722-rtc-alarm",
+ .start = AS3722_IRQ_RTC_ALARM,
+ .end = AS3722_IRQ_RTC_ALARM,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static const struct resource as3722_adc_resource[] = {
+ {
+ .name = "as3722-adc",
+ .start = AS3722_IRQ_ADC,
+ .end = AS3722_IRQ_ADC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell as3722_devs[] = {
+ {
+ .name = "as3722-pinctrl",
+ },
+ {
+ .name = "as3722-regulator",
+ },
+ {
+ .name = "as3722-rtc",
+ .num_resources = ARRAY_SIZE(as3722_rtc_resource),
+ .resources = as3722_rtc_resource,
+ },
+ {
+ .name = "as3722-adc",
+ .num_resources = ARRAY_SIZE(as3722_adc_resource),
+ .resources = as3722_adc_resource,
+ },
+ {
+ .name = "as3722-power-off",
+ },
+};
+
+static const struct regmap_irq as3722_irqs[] = {
+ /* INT1 IRQs */
+ [AS3722_IRQ_LID] = {
+ .mask = AS3722_INTERRUPT_MASK1_LID,
+ },
+ [AS3722_IRQ_ACOK] = {
+ .mask = AS3722_INTERRUPT_MASK1_ACOK,
+ },
+ [AS3722_IRQ_ENABLE1] = {
+ .mask = AS3722_INTERRUPT_MASK1_ENABLE1,
+ },
+ [AS3722_IRQ_OCCUR_ALARM_SD0] = {
+ .mask = AS3722_INTERRUPT_MASK1_OCURR_ALARM_SD0,
+ },
+ [AS3722_IRQ_ONKEY_LONG_PRESS] = {
+ .mask = AS3722_INTERRUPT_MASK1_ONKEY_LONG,
+ },
+ [AS3722_IRQ_ONKEY] = {
+ .mask = AS3722_INTERRUPT_MASK1_ONKEY,
+ },
+ [AS3722_IRQ_OVTMP] = {
+ .mask = AS3722_INTERRUPT_MASK1_OVTMP,
+ },
+ [AS3722_IRQ_LOWBAT] = {
+ .mask = AS3722_INTERRUPT_MASK1_LOWBAT,
+ },
+
+ /* INT2 IRQs */
+ [AS3722_IRQ_SD0_LV] = {
+ .mask = AS3722_INTERRUPT_MASK2_SD0_LV,
+ .reg_offset = 1,
+ },
+ [AS3722_IRQ_SD1_LV] = {
+ .mask = AS3722_INTERRUPT_MASK2_SD1_LV,
+ .reg_offset = 1,
+ },
+ [AS3722_IRQ_SD2_LV] = {
+ .mask = AS3722_INTERRUPT_MASK2_SD2345_LV,
+ .reg_offset = 1,
+ },
+ [AS3722_IRQ_PWM1_OV_PROT] = {
+ .mask = AS3722_INTERRUPT_MASK2_PWM1_OV_PROT,
+ .reg_offset = 1,
+ },
+ [AS3722_IRQ_PWM2_OV_PROT] = {
+ .mask = AS3722_INTERRUPT_MASK2_PWM2_OV_PROT,
+ .reg_offset = 1,
+ },
+ [AS3722_IRQ_ENABLE2] = {
+ .mask = AS3722_INTERRUPT_MASK2_ENABLE2,
+ .reg_offset = 1,
+ },
+ [AS3722_IRQ_SD6_LV] = {
+ .mask = AS3722_INTERRUPT_MASK2_SD6_LV,
+ .reg_offset = 1,
+ },
+ [AS3722_IRQ_RTC_REP] = {
+ .mask = AS3722_INTERRUPT_MASK2_RTC_REP,
+ .reg_offset = 1,
+ },
+
+ /* INT3 IRQs */
+ [AS3722_IRQ_RTC_ALARM] = {
+ .mask = AS3722_INTERRUPT_MASK3_RTC_ALARM,
+ .reg_offset = 2,
+ },
+ [AS3722_IRQ_GPIO1] = {
+ .mask = AS3722_INTERRUPT_MASK3_GPIO1,
+ .reg_offset = 2,
+ },
+ [AS3722_IRQ_GPIO2] = {
+ .mask = AS3722_INTERRUPT_MASK3_GPIO2,
+ .reg_offset = 2,
+ },
+ [AS3722_IRQ_GPIO3] = {
+ .mask = AS3722_INTERRUPT_MASK3_GPIO3,
+ .reg_offset = 2,
+ },
+ [AS3722_IRQ_GPIO4] = {
+ .mask = AS3722_INTERRUPT_MASK3_GPIO4,
+ .reg_offset = 2,
+ },
+ [AS3722_IRQ_GPIO5] = {
+ .mask = AS3722_INTERRUPT_MASK3_GPIO5,
+ .reg_offset = 2,
+ },
+ [AS3722_IRQ_WATCHDOG] = {
+ .mask = AS3722_INTERRUPT_MASK3_WATCHDOG,
+ .reg_offset = 2,
+ },
+ [AS3722_IRQ_ENABLE3] = {
+ .mask = AS3722_INTERRUPT_MASK3_ENABLE3,
+ .reg_offset = 2,
+ },
+
+ /* INT4 IRQs */
+ [AS3722_IRQ_TEMP_SD0_SHUTDOWN] = {
+ .mask = AS3722_INTERRUPT_MASK4_TEMP_SD0_SHUTDOWN,
+ .reg_offset = 3,
+ },
+ [AS3722_IRQ_TEMP_SD1_SHUTDOWN] = {
+ .mask = AS3722_INTERRUPT_MASK4_TEMP_SD1_SHUTDOWN,
+ .reg_offset = 3,
+ },
+ [AS3722_IRQ_TEMP_SD2_SHUTDOWN] = {
+ .mask = AS3722_INTERRUPT_MASK4_TEMP_SD6_SHUTDOWN,
+ .reg_offset = 3,
+ },
+ [AS3722_IRQ_TEMP_SD0_ALARM] = {
+ .mask = AS3722_INTERRUPT_MASK4_TEMP_SD0_ALARM,
+ .reg_offset = 3,
+ },
+ [AS3722_IRQ_TEMP_SD1_ALARM] = {
+ .mask = AS3722_INTERRUPT_MASK4_TEMP_SD1_ALARM,
+ .reg_offset = 3,
+ },
+ [AS3722_IRQ_TEMP_SD6_ALARM] = {
+ .mask = AS3722_INTERRUPT_MASK4_TEMP_SD6_ALARM,
+ .reg_offset = 3,
+ },
+ [AS3722_IRQ_OCCUR_ALARM_SD6] = {
+ .mask = AS3722_INTERRUPT_MASK4_OCCUR_ALARM_SD6,
+ .reg_offset = 3,
+ },
+ [AS3722_IRQ_ADC] = {
+ .mask = AS3722_INTERRUPT_MASK4_ADC,
+ .reg_offset = 3,
+ },
+};
+
+static const struct regmap_irq_chip as3722_irq_chip = {
+ .name = "as3722",
+ .irqs = as3722_irqs,
+ .num_irqs = ARRAY_SIZE(as3722_irqs),
+ .num_regs = 4,
+ .status_base = AS3722_INTERRUPT_STATUS1_REG,
+ .mask_base = AS3722_INTERRUPT_MASK1_REG,
+};
+
+static int as3722_check_device_id(struct as3722 *as3722)
+{
+ u32 val;
+ int ret;
+
+ /* Check that this is actually a AS3722 */
+ ret = as3722_read(as3722, AS3722_ASIC_ID1_REG, &val);
+ if (ret < 0) {
+ dev_err(as3722->dev, "ASIC_ID1 read failed: %d\n", ret);
+ return ret;
+ }
+
+ if (val != AS3722_DEVICE_ID) {
+ dev_err(as3722->dev, "Device is not AS3722, ID is 0x%x\n", val);
+ return -ENODEV;
+ }
+
+ ret = as3722_read(as3722, AS3722_ASIC_ID2_REG, &val);
+ if (ret < 0) {
+ dev_err(as3722->dev, "ASIC_ID2 read failed: %d\n", ret);
+ return ret;
+ }
+
+ dev_info(as3722->dev, "AS3722 with revision 0x%x found\n", val);
+ return 0;
+}
+
+static int as3722_configure_pullups(struct as3722 *as3722)
+{
+ int ret;
+ u32 val = 0;
+
+ if (as3722->en_intern_int_pullup)
+ val |= AS3722_INT_PULL_UP;
+ if (as3722->en_intern_i2c_pullup)
+ val |= AS3722_I2C_PULL_UP;
+
+ ret = as3722_update_bits(as3722, AS3722_IOVOLTAGE_REG,
+ AS3722_INT_PULL_UP | AS3722_I2C_PULL_UP, val);
+ if (ret < 0)
+ dev_err(as3722->dev, "IOVOLTAGE_REG update failed: %d\n", ret);
+ return ret;
+}
+
+static const struct regmap_range as3722_readable_ranges[] = {
+ regmap_reg_range(AS3722_SD0_VOLTAGE_REG, AS3722_SD6_VOLTAGE_REG),
+ regmap_reg_range(AS3722_GPIO0_CONTROL_REG, AS3722_LDO7_VOLTAGE_REG),
+ regmap_reg_range(AS3722_LDO9_VOLTAGE_REG, AS3722_REG_SEQU_MOD3_REG),
+ regmap_reg_range(AS3722_SD_PHSW_CTRL_REG, AS3722_PWM_CONTROL_H_REG),
+ regmap_reg_range(AS3722_WATCHDOG_TIMER_REG, AS3722_WATCHDOG_TIMER_REG),
+ regmap_reg_range(AS3722_WATCHDOG_SOFTWARE_SIGNAL_REG,
+ AS3722_BATTERY_VOLTAGE_MONITOR2_REG),
+ regmap_reg_range(AS3722_SD_CONTROL_REG, AS3722_PWM_VCONTROL4_REG),
+ regmap_reg_range(AS3722_BB_CHARGER_REG, AS3722_SRAM_REG),
+ regmap_reg_range(AS3722_RTC_ACCESS_REG, AS3722_RTC_ACCESS_REG),
+ regmap_reg_range(AS3722_RTC_STATUS_REG, AS3722_TEMP_STATUS_REG),
+ regmap_reg_range(AS3722_ADC0_CONTROL_REG, AS3722_ADC_CONFIGURATION_REG),
+ regmap_reg_range(AS3722_ASIC_ID1_REG, AS3722_ASIC_ID2_REG),
+ regmap_reg_range(AS3722_LOCK_REG, AS3722_LOCK_REG),
+};
+
+static const struct regmap_access_table as3722_readable_table = {
+ .yes_ranges = as3722_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(as3722_readable_ranges),
+};
+
+static const struct regmap_range as3722_writable_ranges[] = {
+ regmap_reg_range(AS3722_SD0_VOLTAGE_REG, AS3722_SD6_VOLTAGE_REG),
+ regmap_reg_range(AS3722_GPIO0_CONTROL_REG, AS3722_LDO7_VOLTAGE_REG),
+ regmap_reg_range(AS3722_LDO9_VOLTAGE_REG, AS3722_GPIO_SIGNAL_OUT_REG),
+ regmap_reg_range(AS3722_REG_SEQU_MOD1_REG, AS3722_REG_SEQU_MOD3_REG),
+ regmap_reg_range(AS3722_SD_PHSW_CTRL_REG, AS3722_PWM_CONTROL_H_REG),
+ regmap_reg_range(AS3722_WATCHDOG_TIMER_REG, AS3722_WATCHDOG_TIMER_REG),
+ regmap_reg_range(AS3722_WATCHDOG_SOFTWARE_SIGNAL_REG,
+ AS3722_BATTERY_VOLTAGE_MONITOR2_REG),
+ regmap_reg_range(AS3722_SD_CONTROL_REG, AS3722_PWM_VCONTROL4_REG),
+ regmap_reg_range(AS3722_BB_CHARGER_REG, AS3722_SRAM_REG),
+ regmap_reg_range(AS3722_INTERRUPT_MASK1_REG, AS3722_TEMP_STATUS_REG),
+ regmap_reg_range(AS3722_ADC0_CONTROL_REG, AS3722_ADC1_CONTROL_REG),
+ regmap_reg_range(AS3722_ADC1_THRESHOLD_HI_MSB_REG,
+ AS3722_ADC_CONFIGURATION_REG),
+ regmap_reg_range(AS3722_LOCK_REG, AS3722_LOCK_REG),
+};
+
+static const struct regmap_access_table as3722_writable_table = {
+ .yes_ranges = as3722_writable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(as3722_writable_ranges),
+};
+
+static const struct regmap_range as3722_cacheable_ranges[] = {
+ regmap_reg_range(AS3722_SD0_VOLTAGE_REG, AS3722_LDO11_VOLTAGE_REG),
+ regmap_reg_range(AS3722_SD_CONTROL_REG, AS3722_LDOCONTROL1_REG),
+};
+
+static const struct regmap_access_table as3722_volatile_table = {
+ .no_ranges = as3722_cacheable_ranges,
+ .n_no_ranges = ARRAY_SIZE(as3722_cacheable_ranges),
+};
+
+static const struct regmap_config as3722_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = AS3722_MAX_REGISTER,
+ .cache_type = REGCACHE_RBTREE,
+ .rd_table = &as3722_readable_table,
+ .wr_table = &as3722_writable_table,
+ .volatile_table = &as3722_volatile_table,
+};
+
+static int as3722_i2c_of_probe(struct i2c_client *i2c,
+ struct as3722 *as3722)
+{
+ struct device_node *np = i2c->dev.of_node;
+ struct irq_data *irq_data;
+
+ if (!np) {
+ dev_err(&i2c->dev, "Device Tree not found\n");
+ return -EINVAL;
+ }
+
+ irq_data = irq_get_irq_data(i2c->irq);
+ if (!irq_data) {
+ dev_err(&i2c->dev, "Invalid IRQ: %d\n", i2c->irq);
+ return -EINVAL;
+ }
+
+ as3722->en_intern_int_pullup = of_property_read_bool(np,
+ "ams,enable-internal-int-pullup");
+ as3722->en_intern_i2c_pullup = of_property_read_bool(np,
+ "ams,enable-internal-i2c-pullup");
+ as3722->irq_flags = irqd_get_trigger_type(irq_data);
+ dev_dbg(&i2c->dev, "IRQ flags are 0x%08lx\n", as3722->irq_flags);
+ return 0;
+}
+
+static int as3722_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct as3722 *as3722;
+ unsigned long irq_flags;
+ int ret;
+
+ as3722 = devm_kzalloc(&i2c->dev, sizeof(struct as3722), GFP_KERNEL);
+ if (!as3722)
+ return -ENOMEM;
+
+ as3722->dev = &i2c->dev;
+ as3722->chip_irq = i2c->irq;
+ i2c_set_clientdata(i2c, as3722);
+
+ ret = as3722_i2c_of_probe(i2c, as3722);
+ if (ret < 0)
+ return ret;
+
+ as3722->regmap = devm_regmap_init_i2c(i2c, &as3722_regmap_config);
+ if (IS_ERR(as3722->regmap)) {
+ ret = PTR_ERR(as3722->regmap);
+ dev_err(&i2c->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = as3722_check_device_id(as3722);
+ if (ret < 0)
+ return ret;
+
+ irq_flags = as3722->irq_flags | IRQF_ONESHOT;
+ ret = regmap_add_irq_chip(as3722->regmap, as3722->chip_irq,
+ irq_flags, -1, &as3722_irq_chip,
+ &as3722->irq_data);
+ if (ret < 0) {
+ dev_err(as3722->dev, "Failed to add regmap irq: %d\n", ret);
+ return ret;
+ }
+
+ ret = as3722_configure_pullups(as3722);
+ if (ret < 0)
+ goto scrub;
+
+ ret = mfd_add_devices(&i2c->dev, -1, as3722_devs,
+ ARRAY_SIZE(as3722_devs), NULL, 0,
+ regmap_irq_get_domain(as3722->irq_data));
+ if (ret) {
+ dev_err(as3722->dev, "Failed to add MFD devices: %d\n", ret);
+ goto scrub;
+ }
+
+ dev_dbg(as3722->dev, "AS3722 core driver initialized successfully\n");
+ return 0;
+
+scrub:
+ regmap_del_irq_chip(as3722->chip_irq, as3722->irq_data);
+ return ret;
+}
+
+static int as3722_i2c_remove(struct i2c_client *i2c)
+{
+ struct as3722 *as3722 = i2c_get_clientdata(i2c);
+
+ mfd_remove_devices(as3722->dev);
+ regmap_del_irq_chip(as3722->chip_irq, as3722->irq_data);
+ return 0;
+}
+
+static const struct of_device_id as3722_of_match[] = {
+ { .compatible = "ams,as3722", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, as3722_of_match);
+
+static const struct i2c_device_id as3722_i2c_id[] = {
+ { "as3722", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, as3722_i2c_id);
+
+static struct i2c_driver as3722_i2c_driver = {
+ .driver = {
+ .name = "as3722",
+ .owner = THIS_MODULE,
+ .of_match_table = as3722_of_match,
+ },
+ .probe = as3722_i2c_probe,
+ .remove = as3722_i2c_remove,
+ .id_table = as3722_i2c_id,
+};
+
+module_i2c_driver(as3722_i2c_driver);
+
+MODULE_DESCRIPTION("I2C support for AS3722 PMICs");
+MODULE_AUTHOR("Florian Lobmaier <florian.lobmaier@ams.com>");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index 6a9fec40d018..c319c4ef5d49 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -86,7 +86,11 @@ static int da9052_i2c_fix(struct da9052 *da9052, unsigned char reg)
return 0;
}
-static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
+/*
+ * According to errata item 24, multiwrite mode should be avoided
+ * in order to prevent register data corruption after power-down.
+ */
+static int da9052_i2c_disable_multiwrite(struct da9052 *da9052)
{
int reg_val, ret;
@@ -94,8 +98,8 @@ static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
if (ret < 0)
return ret;
- if (reg_val & DA9052_CONTROL_B_WRITEMODE) {
- reg_val &= ~DA9052_CONTROL_B_WRITEMODE;
+ if (!(reg_val & DA9052_CONTROL_B_WRITEMODE)) {
+ reg_val |= DA9052_CONTROL_B_WRITEMODE;
ret = regmap_write(da9052->regmap, DA9052_CONTROL_B_REG,
reg_val);
if (ret < 0)
@@ -154,7 +158,7 @@ static int da9052_i2c_probe(struct i2c_client *client,
return ret;
}
- ret = da9052_i2c_enable_multiwrite(da9052);
+ ret = da9052_i2c_disable_multiwrite(da9052);
if (ret < 0)
return ret;
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 53f371dcbb6e..b9ce60c301de 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -480,7 +480,6 @@ static struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = {
CLK_MGT_ENTRY(PER6CLK, PLL_DIV, true),
CLK_MGT_ENTRY(PER7CLK, PLL_DIV, true),
CLK_MGT_ENTRY(LCDCLK, PLL_FIX, true),
- CLK_MGT_ENTRY(BML8580CLK, PLL_DIV, true),
CLK_MGT_ENTRY(BMLCLK, PLL_DIV, true),
CLK_MGT_ENTRY(HSITXCLK, PLL_DIV, true),
CLK_MGT_ENTRY(HSIRXCLK, PLL_DIV, true),
diff --git a/drivers/mfd/dbx500-prcmu-regs.h b/drivers/mfd/dbx500-prcmu-regs.h
index 4f6f0fa5d3b7..7cc32a8ff01c 100644
--- a/drivers/mfd/dbx500-prcmu-regs.h
+++ b/drivers/mfd/dbx500-prcmu-regs.h
@@ -32,7 +32,6 @@
#define PRCM_PER7CLK_MGT (0x040)
#define PRCM_LCDCLK_MGT (0x044)
#define PRCM_BMLCLK_MGT (0x04C)
-#define PRCM_BML8580CLK_MGT (0x108)
#define PRCM_HSITXCLK_MGT (0x050)
#define PRCM_HSIRXCLK_MGT (0x054)
#define PRCM_HDMICLK_MGT (0x058)
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 7245b0c5b794..2ed774e7d342 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -394,16 +394,12 @@ static int pcap_add_subdev(struct pcap_chip *pcap,
static int ezx_pcap_remove(struct spi_device *spi)
{
struct pcap_chip *pcap = spi_get_drvdata(spi);
- struct pcap_platform_data *pdata = dev_get_platdata(&spi->dev);
- int i, adc_irq;
+ int i;
/* remove all registered subdevs */
device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
/* cleanup ADC */
- adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ?
- PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE);
- devm_free_irq(&spi->dev, adc_irq, pcap);
mutex_lock(&pcap->adc_mutex);
for (i = 0; i < PCAP_ADC_MAXQ; i++)
kfree(pcap->adc_queue[i]);
@@ -509,8 +505,6 @@ static int ezx_pcap_probe(struct spi_device *spi)
remove_subdevs:
device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
-/* free_adc: */
- devm_free_irq(&spi->dev, adc_irq, pcap);
free_irqchip:
for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
irq_set_chip_and_handler(i, NULL, NULL);
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 9483bc8472a5..da1c6566d93d 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -53,6 +53,7 @@
* document number TBD : Wellsburg
* document number TBD : Avoton SoC
* document number TBD : Coleto Creek
+ * document number TBD : Wildcat Point-LP
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -211,6 +212,7 @@ enum lpc_chipsets {
LPC_WBG, /* Wellsburg */
LPC_AVN, /* Avoton SoC */
LPC_COLETO, /* Coleto Creek */
+ LPC_WPT_LP, /* Wildcat Point-LP */
};
static struct lpc_ich_info lpc_chipset_info[] = {
@@ -503,6 +505,10 @@ static struct lpc_ich_info lpc_chipset_info[] = {
.name = "Coleto Creek",
.iTCO_version = 2,
},
+ [LPC_WPT_LP] = {
+ .name = "Lynx Point_LP",
+ .iTCO_version = 2,
+ },
};
/*
@@ -721,6 +727,13 @@ static DEFINE_PCI_DEVICE_TABLE(lpc_ich_ids) = {
{ PCI_VDEVICE(INTEL, 0x1f3a), LPC_AVN},
{ PCI_VDEVICE(INTEL, 0x1f3b), LPC_AVN},
{ PCI_VDEVICE(INTEL, 0x2390), LPC_COLETO},
+ { PCI_VDEVICE(INTEL, 0x9cc1), LPC_WPT_LP},
+ { PCI_VDEVICE(INTEL, 0x9cc2), LPC_WPT_LP},
+ { PCI_VDEVICE(INTEL, 0x9cc3), LPC_WPT_LP},
+ { PCI_VDEVICE(INTEL, 0x9cc5), LPC_WPT_LP},
+ { PCI_VDEVICE(INTEL, 0x9cc6), LPC_WPT_LP},
+ { PCI_VDEVICE(INTEL, 0x9cc7), LPC_WPT_LP},
+ { PCI_VDEVICE(INTEL, 0x9cc9), LPC_WPT_LP},
{ 0, }, /* End of list */
};
MODULE_DEVICE_TABLE(pci, lpc_ich_ids);
@@ -969,7 +982,6 @@ static int lpc_ich_probe(struct pci_dev *dev,
if (!cell_added) {
dev_warn(&dev->dev, "No MFD cells added\n");
lpc_ich_restore_config_space(dev);
- pci_set_drvdata(dev, NULL);
return -ENODEV;
}
@@ -980,7 +992,6 @@ static void lpc_ich_remove(struct pci_dev *dev)
{
mfd_remove_devices(&dev->dev);
lpc_ich_restore_config_space(dev);
- pci_set_drvdata(dev, NULL);
}
static struct pci_driver lpc_ich_driver = {
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index 8cc6aac27cb2..fbfbf0b7f97a 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -59,18 +59,21 @@ static struct mfd_cell isch_smbus_cell = {
.name = "isch_smbus",
.num_resources = 1,
.resources = &smbus_sch_resource,
+ .ignore_resource_conflicts = true,
};
static struct mfd_cell sch_gpio_cell = {
.name = "sch_gpio",
.num_resources = 1,
.resources = &gpio_sch_resource,
+ .ignore_resource_conflicts = true,
};
static struct mfd_cell wdt_sch_cell = {
.name = "ie6xx_wdt",
.num_resources = 1,
.resources = &wdt_sch_resource,
+ .ignore_resource_conflicts = true,
};
static DEFINE_PCI_DEVICE_TABLE(lpc_sch_ids) = {
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index 522be67b2e68..34520cbe8afb 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -31,6 +31,7 @@
#include <linux/mfd/max77686.h>
#include <linux/mfd/max77686-private.h>
#include <linux/err.h>
+#include <linux/of.h>
#define I2C_ADDR_RTC (0x0C >> 1)
diff --git a/drivers/mfd/max77693-irq.c b/drivers/mfd/max77693-irq.c
index 1029d018c739..66b58fe77094 100644
--- a/drivers/mfd/max77693-irq.c
+++ b/drivers/mfd/max77693-irq.c
@@ -128,7 +128,8 @@ static void max77693_irq_sync_unlock(struct irq_data *data)
static const inline struct max77693_irq_data *
irq_to_max77693_irq(struct max77693_dev *max77693, int irq)
{
- return &max77693_irqs[irq];
+ struct irq_data *data = irq_get_irq_data(irq);
+ return &max77693_irqs[data->hwirq];
}
static void max77693_irq_mask(struct irq_data *data)
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index c04723efc707..9f92463f4f7e 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -28,6 +28,7 @@
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
#include <linux/mfd/core.h>
@@ -110,15 +111,9 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct max77693_dev *max77693;
- struct max77693_platform_data *pdata = dev_get_platdata(&i2c->dev);
u8 reg_data;
int ret = 0;
- if (!pdata) {
- dev_err(&i2c->dev, "No platform data found.\n");
- return -EINVAL;
- }
-
max77693 = devm_kzalloc(&i2c->dev,
sizeof(struct max77693_dev), GFP_KERNEL);
if (max77693 == NULL)
@@ -138,8 +133,6 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
return ret;
}
- max77693->wakeup = pdata->wakeup;
-
ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_PMIC_ID2,
&reg_data);
if (ret < 0) {
@@ -179,8 +172,6 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
if (ret < 0)
goto err_mfd;
- device_init_wakeup(max77693->dev, pdata->wakeup);
-
return ret;
err_mfd:
@@ -235,11 +226,19 @@ static const struct dev_pm_ops max77693_pm = {
.resume = max77693_resume,
};
+#ifdef CONFIG_OF
+static struct of_device_id max77693_dt_match[] = {
+ { .compatible = "maxim,max77693" },
+ {},
+};
+#endif
+
static struct i2c_driver max77693_i2c_driver = {
.driver = {
.name = "max77693",
.owner = THIS_MODULE,
.pm = &max77693_pm,
+ .of_match_table = of_match_ptr(max77693_dt_match),
},
.probe = max77693_i2c_probe,
.remove = max77693_i2c_remove,
diff --git a/drivers/mfd/max8907.c b/drivers/mfd/max8907.c
index e9b1c93a3ade..3bbfedc07f41 100644
--- a/drivers/mfd/max8907.c
+++ b/drivers/mfd/max8907.c
@@ -17,6 +17,7 @@
#include <linux/mfd/core.h>
#include <linux/mfd/max8907.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index de7fb80a6052..176aa26fc787 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -238,7 +238,7 @@ static struct i2c_driver max8925_driver = {
.name = "max8925",
.owner = THIS_MODULE,
.pm = &max8925_pm_ops,
- .of_match_table = of_match_ptr(max8925_dt_ids),
+ .of_match_table = max8925_dt_ids,
},
.probe = max8925_probe,
.remove = max8925_remove,
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index cee098c0dae3..791aea3e96ce 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -24,6 +24,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index 2a9b100c4825..dbbf8ee3f592 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -158,8 +158,6 @@ int mc13xxx_reg_read(struct mc13xxx *mc13xxx, unsigned int offset, u32 *val)
{
int ret;
- BUG_ON(!mutex_is_locked(&mc13xxx->lock));
-
if (offset > MC13XXX_NUMREGS)
return -EINVAL;
@@ -172,8 +170,6 @@ EXPORT_SYMBOL(mc13xxx_reg_read);
int mc13xxx_reg_write(struct mc13xxx *mc13xxx, unsigned int offset, u32 val)
{
- BUG_ON(!mutex_is_locked(&mc13xxx->lock));
-
dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x\n", offset, val);
if (offset > MC13XXX_NUMREGS || val > 0xffffff)
@@ -186,7 +182,6 @@ EXPORT_SYMBOL(mc13xxx_reg_write);
int mc13xxx_reg_rmw(struct mc13xxx *mc13xxx, unsigned int offset,
u32 mask, u32 val)
{
- BUG_ON(!mutex_is_locked(&mc13xxx->lock));
BUG_ON(val & ~mask);
dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x (mask: 0x%06x)\n",
offset, val, mask);
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
index f745e27ee874..898bd335cd8e 100644
--- a/drivers/mfd/mc13xxx-i2c.c
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -78,7 +78,6 @@ static int mc13xxx_i2c_probe(struct i2c_client *client,
ret = PTR_ERR(mc13xxx->regmap);
dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
ret);
- dev_set_drvdata(&client->dev, NULL);
return ret;
}
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
index 77189daadf1e..5f14ef6693c2 100644
--- a/drivers/mfd/mc13xxx-spi.c
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -94,10 +94,15 @@ static int mc13xxx_spi_write(void *context, const void *data, size_t count)
{
struct device *dev = context;
struct spi_device *spi = to_spi_device(dev);
+ const char *reg = data;
if (count != 4)
return -ENOTSUPP;
+ /* include errata fix for spi audio problems */
+ if (*reg == MC13783_AUDIO_CODEC || *reg == MC13783_AUDIO_DAC)
+ spi_write(spi, data, count);
+
return spi_write(spi, data, count);
}
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index f421586f29fb..adc8ea36e7c4 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
+#include <linux/regulator/consumer.h>
static struct device_type mfd_dev_type = {
.name = "mfd_device",
@@ -99,6 +100,13 @@ static int mfd_add_device(struct device *parent, int id,
pdev->dev.dma_mask = parent->dma_mask;
pdev->dev.dma_parms = parent->dma_parms;
+ ret = devm_regulator_bulk_register_supply_alias(
+ &pdev->dev, cell->parent_supplies,
+ parent, cell->parent_supplies,
+ cell->num_parent_supplies);
+ if (ret < 0)
+ goto fail_res;
+
if (parent->of_node && cell->of_compatible) {
for_each_child_of_node(parent->of_node, np) {
if (of_device_is_compatible(np, cell->of_compatible)) {
@@ -112,12 +120,12 @@ static int mfd_add_device(struct device *parent, int id,
ret = platform_device_add_data(pdev,
cell->platform_data, cell->pdata_size);
if (ret)
- goto fail_res;
+ goto fail_alias;
}
ret = mfd_platform_add_cell(pdev, cell);
if (ret)
- goto fail_res;
+ goto fail_alias;
for (r = 0; r < cell->num_resources; r++) {
res[r].name = cell->resources[r].name;
@@ -152,17 +160,17 @@ static int mfd_add_device(struct device *parent, int id,
if (!cell->ignore_resource_conflicts) {
ret = acpi_check_resource_conflict(&res[r]);
if (ret)
- goto fail_res;
+ goto fail_alias;
}
}
ret = platform_device_add_resources(pdev, res, cell->num_resources);
if (ret)
- goto fail_res;
+ goto fail_alias;
ret = platform_device_add(pdev);
if (ret)
- goto fail_res;
+ goto fail_alias;
if (cell->pm_runtime_no_callbacks)
pm_runtime_no_callbacks(&pdev->dev);
@@ -171,6 +179,10 @@ static int mfd_add_device(struct device *parent, int id,
return 0;
+fail_alias:
+ devm_regulator_bulk_unregister_supply_alias(&pdev->dev,
+ cell->parent_supplies,
+ cell->num_parent_supplies);
fail_res:
kfree(res);
fail_device:
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 29ee54d68512..142650fdc058 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -328,13 +328,13 @@ static int usbhs_runtime_resume(struct device *dev)
omap_tll_enable(pdata);
if (!IS_ERR(omap->ehci_logic_fck))
- clk_enable(omap->ehci_logic_fck);
+ clk_prepare_enable(omap->ehci_logic_fck);
for (i = 0; i < omap->nports; i++) {
switch (pdata->port_mode[i]) {
case OMAP_EHCI_PORT_MODE_HSIC:
if (!IS_ERR(omap->hsic60m_clk[i])) {
- r = clk_enable(omap->hsic60m_clk[i]);
+ r = clk_prepare_enable(omap->hsic60m_clk[i]);
if (r) {
dev_err(dev,
"Can't enable port %d hsic60m clk:%d\n",
@@ -343,7 +343,7 @@ static int usbhs_runtime_resume(struct device *dev)
}
if (!IS_ERR(omap->hsic480m_clk[i])) {
- r = clk_enable(omap->hsic480m_clk[i]);
+ r = clk_prepare_enable(omap->hsic480m_clk[i]);
if (r) {
dev_err(dev,
"Can't enable port %d hsic480m clk:%d\n",
@@ -354,7 +354,7 @@ static int usbhs_runtime_resume(struct device *dev)
case OMAP_EHCI_PORT_MODE_TLL:
if (!IS_ERR(omap->utmi_clk[i])) {
- r = clk_enable(omap->utmi_clk[i]);
+ r = clk_prepare_enable(omap->utmi_clk[i]);
if (r) {
dev_err(dev,
"Can't enable port %d clk : %d\n",
@@ -382,15 +382,15 @@ static int usbhs_runtime_suspend(struct device *dev)
switch (pdata->port_mode[i]) {
case OMAP_EHCI_PORT_MODE_HSIC:
if (!IS_ERR(omap->hsic60m_clk[i]))
- clk_disable(omap->hsic60m_clk[i]);
+ clk_disable_unprepare(omap->hsic60m_clk[i]);
if (!IS_ERR(omap->hsic480m_clk[i]))
- clk_disable(omap->hsic480m_clk[i]);
+ clk_disable_unprepare(omap->hsic480m_clk[i]);
/* Fall through as utmi_clks were used in HSIC mode */
case OMAP_EHCI_PORT_MODE_TLL:
if (!IS_ERR(omap->utmi_clk[i]))
- clk_disable(omap->utmi_clk[i]);
+ clk_disable_unprepare(omap->utmi_clk[i]);
break;
default:
break;
@@ -398,7 +398,7 @@ static int usbhs_runtime_suspend(struct device *dev)
}
if (!IS_ERR(omap->ehci_logic_fck))
- clk_disable(omap->ehci_logic_fck);
+ clk_disable_unprepare(omap->ehci_logic_fck);
omap_tll_disable(pdata);
@@ -893,7 +893,7 @@ static struct platform_driver usbhs_omap_driver = {
.name = (char *)usbhs_driver_name,
.owner = THIS_MODULE,
.pm = &usbhsomap_dev_pm_ops,
- .of_match_table = of_match_ptr(usbhs_omap_dt_ids),
+ .of_match_table = usbhs_omap_dt_ids,
},
.remove = usbhs_omap_remove,
};
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index e59ac4cbac96..0d946ae14453 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -320,7 +320,7 @@ static struct platform_driver usbtll_omap_driver = {
.driver = {
.name = (char *)usbtll_driver_name,
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(usbtll_omap_dt_ids),
+ .of_match_table = usbtll_omap_dt_ids,
},
.probe = usbtll_omap_probe,
.remove = usbtll_omap_remove,
@@ -429,7 +429,7 @@ int omap_tll_enable(struct usbhs_omap_platform_data *pdata)
if (IS_ERR(tll->ch_clk[i]))
continue;
- r = clk_enable(tll->ch_clk[i]);
+ r = clk_prepare_enable(tll->ch_clk[i]);
if (r) {
dev_err(tll_dev,
"Error enabling ch %d clock: %d\n", i, r);
@@ -460,7 +460,7 @@ int omap_tll_disable(struct usbhs_omap_platform_data *pdata)
for (i = 0; i < tll->nch; i++) {
if (omap_usb_mode_needs_tll(pdata->port_mode[i])) {
if (!IS_ERR(tll->ch_clk[i]))
- clk_disable(tll->ch_clk[i]);
+ clk_disable_unprepare(tll->ch_clk[i]);
}
}
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 135afabe4ae2..d280d789e55a 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -368,6 +368,7 @@ static const struct of_device_id of_palmas_match_tbl[] = {
},
{ },
};
+MODULE_DEVICE_TABLE(of, of_palmas_match_tbl);
static int palmas_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
@@ -402,7 +403,7 @@ static int palmas_i2c_probe(struct i2c_client *i2c,
palmas->dev = &i2c->dev;
palmas->irq = i2c->irq;
- match = of_match_device(of_match_ptr(of_palmas_match_tbl), &i2c->dev);
+ match = of_match_device(of_palmas_match_tbl, &i2c->dev);
if (!match)
return -ENODATA;
@@ -421,7 +422,7 @@ static int palmas_i2c_probe(struct i2c_client *i2c,
dev_err(palmas->dev,
"can't attach client %d\n", i);
ret = -ENOMEM;
- goto err;
+ goto err_i2c;
}
palmas->i2c_clients[i]->dev.of_node = of_node_get(node);
}
@@ -432,7 +433,7 @@ static int palmas_i2c_probe(struct i2c_client *i2c,
dev_err(palmas->dev,
"Failed to allocate regmap %d, err: %d\n",
i, ret);
- goto err;
+ goto err_i2c;
}
}
@@ -451,7 +452,7 @@ static int palmas_i2c_probe(struct i2c_client *i2c,
reg);
if (ret < 0) {
dev_err(palmas->dev, "POLARITY_CTRL updat failed: %d\n", ret);
- goto err;
+ goto err_i2c;
}
/* Change IRQ into clear on read mode for efficiency */
@@ -465,7 +466,7 @@ static int palmas_i2c_probe(struct i2c_client *i2c,
IRQF_ONESHOT | pdata->irq_flags, 0, &palmas_irq_chip,
&palmas->irq_data);
if (ret < 0)
- goto err;
+ goto err_i2c;
no_irq:
slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE);
@@ -551,7 +552,6 @@ no_irq:
} else if (pdata->pm_off && !pm_power_off) {
palmas_dev = palmas;
pm_power_off = palmas_power_off;
- return ret;
}
}
@@ -559,17 +559,31 @@ no_irq:
err_irq:
regmap_del_irq_chip(palmas->irq, palmas->irq_data);
-err:
+err_i2c:
+ for (i = 1; i < PALMAS_NUM_CLIENTS; i++) {
+ if (palmas->i2c_clients[i])
+ i2c_unregister_device(palmas->i2c_clients[i]);
+ }
return ret;
}
static int palmas_i2c_remove(struct i2c_client *i2c)
{
struct palmas *palmas = i2c_get_clientdata(i2c);
+ int i;
- mfd_remove_devices(palmas->dev);
regmap_del_irq_chip(palmas->irq, palmas->irq_data);
+ for (i = 1; i < PALMAS_NUM_CLIENTS; i++) {
+ if (palmas->i2c_clients[i])
+ i2c_unregister_device(palmas->i2c_clients[i]);
+ }
+
+ if (palmas == palmas_dev) {
+ pm_power_off = NULL;
+ palmas_dev = NULL;
+ }
+
return 0;
}
diff --git a/drivers/mfd/rts5249.c b/drivers/mfd/rts5249.c
index 3b835f593e35..573de7bfcced 100644
--- a/drivers/mfd/rts5249.c
+++ b/drivers/mfd/rts5249.c
@@ -130,13 +130,57 @@ static int rts5249_optimize_phy(struct rtsx_pcr *pcr)
{
int err;
- err = rtsx_pci_write_phy_register(pcr, PHY_REG_REV, 0xFE46);
+ err = rtsx_pci_write_phy_register(pcr, PHY_REG_REV,
+ PHY_REG_REV_RESV | PHY_REG_REV_RXIDLE_LATCHED |
+ PHY_REG_REV_P1_EN | PHY_REG_REV_RXIDLE_EN |
+ PHY_REG_REV_RX_PWST | PHY_REG_REV_CLKREQ_DLY_TIMER_1_0 |
+ PHY_REG_REV_STOP_CLKRD | PHY_REG_REV_STOP_CLKWR);
if (err < 0)
return err;
msleep(1);
- return rtsx_pci_write_phy_register(pcr, PHY_BPCR, 0x05C0);
+ err = rtsx_pci_write_phy_register(pcr, PHY_BPCR,
+ PHY_BPCR_IBRXSEL | PHY_BPCR_IBTXSEL |
+ PHY_BPCR_IB_FILTER | PHY_BPCR_CMIRROR_EN);
+ if (err < 0)
+ return err;
+ err = rtsx_pci_write_phy_register(pcr, PHY_PCR,
+ PHY_PCR_FORCE_CODE | PHY_PCR_OOBS_CALI_50 |
+ PHY_PCR_OOBS_VCM_08 | PHY_PCR_OOBS_SEN_90 |
+ PHY_PCR_RSSI_EN);
+ if (err < 0)
+ return err;
+ err = rtsx_pci_write_phy_register(pcr, PHY_RCR2,
+ PHY_RCR2_EMPHASE_EN | PHY_RCR2_NADJR |
+ PHY_RCR2_CDR_CP_10 | PHY_RCR2_CDR_SR_2 |
+ PHY_RCR2_FREQSEL_12 | PHY_RCR2_CPADJEN |
+ PHY_RCR2_CDR_SC_8 | PHY_RCR2_CALIB_LATE);
+ if (err < 0)
+ return err;
+ err = rtsx_pci_write_phy_register(pcr, PHY_FLD4,
+ PHY_FLD4_FLDEN_SEL | PHY_FLD4_REQ_REF |
+ PHY_FLD4_RXAMP_OFF | PHY_FLD4_REQ_ADDA |
+ PHY_FLD4_BER_COUNT | PHY_FLD4_BER_TIMER |
+ PHY_FLD4_BER_CHK_EN);
+ if (err < 0)
+ return err;
+ err = rtsx_pci_write_phy_register(pcr, PHY_RDR, PHY_RDR_RXDSEL_1_9);
+ if (err < 0)
+ return err;
+ err = rtsx_pci_write_phy_register(pcr, PHY_RCR1,
+ PHY_RCR1_ADP_TIME | PHY_RCR1_VCO_COARSE);
+ if (err < 0)
+ return err;
+ err = rtsx_pci_write_phy_register(pcr, PHY_FLD3,
+ PHY_FLD3_TIMER_4 | PHY_FLD3_TIMER_6 |
+ PHY_FLD3_RXDELINK);
+ if (err < 0)
+ return err;
+ return rtsx_pci_write_phy_register(pcr, PHY_TUNE,
+ PHY_TUNE_TUNEREF_1_0 | PHY_TUNE_VBGSEL_1252 |
+ PHY_TUNE_SDBUS_33 | PHY_TUNE_TUNED18 |
+ PHY_TUNE_TUNED12);
}
static int rts5249_turn_on_led(struct rtsx_pcr *pcr)
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index e6ae7720f9e1..11e20afbdcac 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -1149,7 +1149,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
pcr->remap_addr = ioremap_nocache(base, len);
if (!pcr->remap_addr) {
ret = -ENOMEM;
- goto free_host;
+ goto free_handle;
}
pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
@@ -1209,8 +1209,6 @@ disable_msi:
pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
unmap:
iounmap(pcr->remap_addr);
-free_host:
- dev_set_drvdata(&pcidev->dev, NULL);
free_handle:
kfree(handle);
free_pcr:
@@ -1242,7 +1240,6 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
pci_disable_msi(pcr->pci);
iounmap(pcr->remap_addr);
- dev_set_drvdata(&pcidev->dev, NULL);
pci_release_regions(pcidev);
pci_disable_device(pcidev);
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index f530e4b73f19..34c18fb8c089 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -17,6 +17,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 33f040c558d0..c2c8c91c6c7b 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1232,7 +1232,7 @@ static ssize_t sm501_dbg_regs(struct device *dev,
}
-static DEVICE_ATTR(dbg_regs, 0666, sm501_dbg_regs, NULL);
+static DEVICE_ATTR(dbg_regs, 0444, sm501_dbg_regs, NULL);
/* sm501_init_reg
*
@@ -1660,7 +1660,6 @@ static int sm501_pci_probe(struct pci_dev *dev,
err3:
pci_disable_device(dev);
err2:
- pci_set_drvdata(dev, NULL);
kfree(sm);
err1:
return err;
@@ -1695,7 +1694,6 @@ static void sm501_pci_remove(struct pci_dev *dev)
release_resource(sm->regs_claim);
kfree(sm->regs_claim);
- pci_set_drvdata(dev, NULL);
pci_disable_device(dev);
}
diff --git a/drivers/mfd/stw481x.c b/drivers/mfd/stw481x.c
new file mode 100644
index 000000000000..1243d5c6a448
--- /dev/null
+++ b/drivers/mfd/stw481x.c
@@ -0,0 +1,250 @@
+/*
+ * Core driver for STw4810/STw4811
+ *
+ * Copyright (C) 2013 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/stw481x.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+
+/*
+ * This driver can only access the non-USB portions of STw4811, the register
+ * range 0x00-0x10 dealing with USB is bound to the two special I2C pins used
+ * for USB control.
+ */
+
+/* Registers inside the power control address space */
+#define STW_PC_VCORE_SEL 0x05U
+#define STW_PC_VAUX_SEL 0x06U
+#define STW_PC_VPLL_SEL 0x07U
+
+/**
+ * stw481x_get_pctl_reg() - get a power control register
+ * @stw481x: handle to the stw481x chip
+ * @reg: power control register to fetch
+ *
+ * The power control registers is a set of one-time-programmable registers
+ * in its own register space, accessed by writing addess bits to these
+ * two registers: bits 7,6,5 of PCTL_REG_LO corresponds to the 3 LSBs of
+ * the address and bits 8,9 of PCTL_REG_HI corresponds to the 2 MSBs of
+ * the address, forming an address space of 5 bits, i.e. 32 registers
+ * 0x00 ... 0x1f can be obtained.
+ */
+static int stw481x_get_pctl_reg(struct stw481x *stw481x, u8 reg)
+{
+ u8 msb = (reg >> 3) & 0x03;
+ u8 lsb = (reg << 5) & 0xe0;
+ unsigned int val;
+ u8 vrfy;
+ int ret;
+
+ ret = regmap_write(stw481x->map, STW_PCTL_REG_HI, msb);
+ if (ret)
+ return ret;
+ ret = regmap_write(stw481x->map, STW_PCTL_REG_LO, lsb);
+ if (ret)
+ return ret;
+ ret = regmap_read(stw481x->map, STW_PCTL_REG_HI, &val);
+ if (ret)
+ return ret;
+ vrfy = (val & 0x03) << 3;
+ ret = regmap_read(stw481x->map, STW_PCTL_REG_LO, &val);
+ if (ret)
+ return ret;
+ vrfy |= ((val >> 5) & 0x07);
+ if (vrfy != reg)
+ return -EIO;
+ return (val >> 1) & 0x0f;
+}
+
+static int stw481x_startup(struct stw481x *stw481x)
+{
+ /* Voltages multiplied by 100 */
+ u8 vcore_val[] = { 100, 105, 110, 115, 120, 122, 124, 126, 128,
+ 130, 132, 134, 136, 138, 140, 145 };
+ u8 vpll_val[] = { 105, 120, 130, 180 };
+ u8 vaux_val[] = { 15, 18, 25, 28 };
+ u8 vcore;
+ u8 vcore_slp;
+ u8 vpll;
+ u8 vaux;
+ bool vaux_en;
+ bool it_warn;
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(stw481x->map, STW_CONF1, &val);
+ if (ret)
+ return ret;
+ vaux_en = !!(val & STW_CONF1_PDN_VAUX);
+ it_warn = !!(val & STW_CONF1_IT_WARN);
+
+ dev_info(&stw481x->client->dev, "voltages %s\n",
+ (val & STW_CONF1_V_MONITORING) ? "OK" : "LOW");
+ dev_info(&stw481x->client->dev, "MMC level shifter %s\n",
+ (val & STW_CONF1_MMC_LS_STATUS) ? "high impedance" : "ON");
+ dev_info(&stw481x->client->dev, "VMMC: %s\n",
+ (val & STW_CONF1_PDN_VMMC) ? "ON" : "disabled");
+
+ dev_info(&stw481x->client->dev, "STw481x power control registers:\n");
+
+ ret = stw481x_get_pctl_reg(stw481x, STW_PC_VCORE_SEL);
+ if (ret < 0)
+ return ret;
+ vcore = ret & 0x0f;
+
+ ret = stw481x_get_pctl_reg(stw481x, STW_PC_VAUX_SEL);
+ if (ret < 0)
+ return ret;
+ vaux = (ret >> 2) & 3;
+ vpll = (ret >> 4) & 1; /* Save bit 4 */
+
+ ret = stw481x_get_pctl_reg(stw481x, STW_PC_VPLL_SEL);
+ if (ret < 0)
+ return ret;
+ vpll |= (ret >> 1) & 2;
+
+ dev_info(&stw481x->client->dev, "VCORE: %u.%uV %s\n",
+ vcore_val[vcore] / 100, vcore_val[vcore] % 100,
+ (ret & 4) ? "ON" : "OFF");
+
+ dev_info(&stw481x->client->dev, "VPLL: %u.%uV %s\n",
+ vpll_val[vpll] / 100, vpll_val[vpll] % 100,
+ (ret & 0x10) ? "ON" : "OFF");
+
+ dev_info(&stw481x->client->dev, "VAUX: %u.%uV %s\n",
+ vaux_val[vaux] / 10, vaux_val[vaux] % 10,
+ vaux_en ? "ON" : "OFF");
+
+ ret = regmap_read(stw481x->map, STW_CONF2, &val);
+ if (ret)
+ return ret;
+
+ dev_info(&stw481x->client->dev, "TWARN: %s threshold, %s\n",
+ it_warn ? "below" : "above",
+ (val & STW_CONF2_MASK_TWARN) ?
+ "enabled" : "mask through VDDOK");
+ dev_info(&stw481x->client->dev, "VMMC: %s\n",
+ (val & STW_CONF2_VMMC_EXT) ? "internal" : "external");
+ dev_info(&stw481x->client->dev, "IT WAKE UP: %s\n",
+ (val & STW_CONF2_MASK_IT_WAKE_UP) ? "enabled" : "masked");
+ dev_info(&stw481x->client->dev, "GPO1: %s\n",
+ (val & STW_CONF2_GPO1) ? "low" : "high impedance");
+ dev_info(&stw481x->client->dev, "GPO2: %s\n",
+ (val & STW_CONF2_GPO2) ? "low" : "high impedance");
+
+ ret = regmap_read(stw481x->map, STW_VCORE_SLEEP, &val);
+ if (ret)
+ return ret;
+ vcore_slp = val & 0x0f;
+ dev_info(&stw481x->client->dev, "VCORE SLEEP: %u.%uV\n",
+ vcore_val[vcore_slp] / 100, vcore_val[vcore_slp] % 100);
+
+ return 0;
+}
+
+/*
+ * MFD cells - we have one cell which is selected operation
+ * mode, and we always have a GPIO cell.
+ */
+static struct mfd_cell stw481x_cells[] = {
+ {
+ .of_compatible = "st,stw481x-vmmc",
+ .name = "stw481x-vmmc-regulator",
+ .id = -1,
+ },
+};
+
+const struct regmap_config stw481x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int stw481x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct stw481x *stw481x;
+ int ret;
+ int i;
+
+ stw481x = devm_kzalloc(&client->dev, sizeof(*stw481x), GFP_KERNEL);
+ if (!stw481x)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, stw481x);
+ stw481x->client = client;
+ stw481x->map = devm_regmap_init_i2c(client, &stw481x_regmap_config);
+
+ ret = stw481x_startup(stw481x);
+ if (ret) {
+ dev_err(&client->dev, "chip initialization failed\n");
+ return ret;
+ }
+
+ /* Set up and register the platform devices. */
+ for (i = 0; i < ARRAY_SIZE(stw481x_cells); i++) {
+ /* One state holder for all drivers, this is simple */
+ stw481x_cells[i].platform_data = stw481x;
+ stw481x_cells[i].pdata_size = sizeof(*stw481x);
+ }
+
+ ret = mfd_add_devices(&client->dev, 0, stw481x_cells,
+ ARRAY_SIZE(stw481x_cells), NULL, 0, NULL);
+ if (ret)
+ return ret;
+
+ dev_info(&client->dev, "initialized STw481x device\n");
+
+ return ret;
+}
+
+static int stw481x_remove(struct i2c_client *client)
+{
+ mfd_remove_devices(&client->dev);
+ return 0;
+}
+
+/*
+ * This ID table is completely unused, as this is a pure
+ * device-tree probed driver, but it has to be here due to
+ * the structure of the I2C core.
+ */
+static const struct i2c_device_id stw481x_id[] = {
+ { "stw481x", 0 },
+ { },
+};
+
+static const struct of_device_id stw481x_match[] = {
+ { .compatible = "st,stw4810", },
+ { .compatible = "st,stw4811", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, stw481x_match);
+
+static struct i2c_driver stw481x_driver = {
+ .driver = {
+ .name = "stw481x",
+ .of_match_table = stw481x_match,
+ },
+ .probe = stw481x_probe,
+ .remove = stw481x_remove,
+ .id_table = stw481x_id,
+};
+
+module_i2c_driver(stw481x_driver);
+
+MODULE_AUTHOR("Linus Walleij");
+MODULE_DESCRIPTION("STw481x PMIC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 70f4909fee13..87ea51dc6234 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -16,6 +16,19 @@
#include <linux/mfd/core.h>
#include <linux/mfd/tc3589x.h>
+/**
+ * enum tc3589x_version - indicates the TC3589x version
+ */
+enum tc3589x_version {
+ TC3589X_TC35890,
+ TC3589X_TC35892,
+ TC3589X_TC35893,
+ TC3589X_TC35894,
+ TC3589X_TC35895,
+ TC3589X_TC35896,
+ TC3589X_UNKNOWN,
+};
+
#define TC3589x_CLKMODE_MODCTL_SLEEP 0x0
#define TC3589x_CLKMODE_MODCTL_OPERATION (1 << 0)
@@ -361,7 +374,21 @@ static int tc3589x_probe(struct i2c_client *i2c,
tc3589x->i2c = i2c;
tc3589x->pdata = pdata;
tc3589x->irq_base = pdata->irq_base;
- tc3589x->num_gpio = id->driver_data;
+
+ switch (id->driver_data) {
+ case TC3589X_TC35893:
+ case TC3589X_TC35895:
+ case TC3589X_TC35896:
+ tc3589x->num_gpio = 20;
+ break;
+ case TC3589X_TC35890:
+ case TC3589X_TC35892:
+ case TC3589X_TC35894:
+ case TC3589X_UNKNOWN:
+ default:
+ tc3589x->num_gpio = 24;
+ break;
+ }
i2c_set_clientdata(i2c, tc3589x);
@@ -432,7 +459,13 @@ static int tc3589x_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(tc3589x_dev_pm_ops, tc3589x_suspend, tc3589x_resume);
static const struct i2c_device_id tc3589x_id[] = {
- { "tc3589x", 24 },
+ { "tc35890", TC3589X_TC35890 },
+ { "tc35892", TC3589X_TC35892 },
+ { "tc35893", TC3589X_TC35893 },
+ { "tc35894", TC3589X_TC35894 },
+ { "tc35895", TC3589X_TC35895 },
+ { "tc35896", TC3589X_TC35896 },
+ { "tc3589x", TC3589X_UNKNOWN },
{ }
};
MODULE_DEVICE_TABLE(i2c, tc3589x_id);
diff --git a/drivers/mfd/ti-ssp.c b/drivers/mfd/ti-ssp.c
index 1c2b994e1f6c..71e3e0c5bf73 100644
--- a/drivers/mfd/ti-ssp.c
+++ b/drivers/mfd/ti-ssp.c
@@ -445,7 +445,6 @@ static int ti_ssp_remove(struct platform_device *pdev)
iounmap(ssp->regs);
release_mem_region(ssp->res->start, resource_size(ssp->res));
kfree(ssp);
- dev_set_drvdata(dev, NULL);
return 0;
}
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index baaf5a8123bb..88718abfb9ba 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -56,21 +56,25 @@ EXPORT_SYMBOL_GPL(am335x_tsc_se_update);
void am335x_tsc_se_set(struct ti_tscadc_dev *tsadc, u32 val)
{
- spin_lock(&tsadc->reg_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tsadc->reg_lock, flags);
tsadc->reg_se_cache = tscadc_readl(tsadc, REG_SE);
tsadc->reg_se_cache |= val;
am335x_tsc_se_update(tsadc);
- spin_unlock(&tsadc->reg_lock);
+ spin_unlock_irqrestore(&tsadc->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(am335x_tsc_se_set);
void am335x_tsc_se_clr(struct ti_tscadc_dev *tsadc, u32 val)
{
- spin_lock(&tsadc->reg_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tsadc->reg_lock, flags);
tsadc->reg_se_cache = tscadc_readl(tsadc, REG_SE);
tsadc->reg_se_cache &= ~val;
am335x_tsc_se_update(tsadc);
- spin_unlock(&tsadc->reg_lock);
+ spin_unlock_irqrestore(&tsadc->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(am335x_tsc_se_clr);
@@ -95,7 +99,7 @@ static int ti_tscadc_probe(struct platform_device *pdev)
const __be32 *cur;
u32 val;
int err, ctrl;
- int clk_value, clock_rate;
+ int clock_rate;
int tsc_wires = 0, adc_channels = 0, total_channels;
int readouts = 0;
@@ -196,11 +200,11 @@ static int ti_tscadc_probe(struct platform_device *pdev)
}
clock_rate = clk_get_rate(clk);
clk_put(clk);
- clk_value = clock_rate / ADC_CLK;
+ tscadc->clk_div = clock_rate / ADC_CLK;
/* TSCADC_CLKDIV needs to be configured to the value minus 1 */
- clk_value = clk_value - 1;
- tscadc_writel(tscadc, REG_CLKDIV, clk_value);
+ tscadc->clk_div--;
+ tscadc_writel(tscadc, REG_CLKDIV, tscadc->clk_div);
/* Set the control register bits */
ctrl = CNTRLREG_STEPCONFIGWRT |
@@ -303,6 +307,8 @@ static int tscadc_resume(struct device *dev)
tscadc_writel(tscadc_dev, REG_CTRL,
(restore | CNTRLREG_TSCSSENB));
+ tscadc_writel(tscadc_dev, REG_CLKDIV, tscadc_dev->clk_div);
+
return 0;
}
@@ -326,7 +332,7 @@ static struct platform_driver ti_tscadc_driver = {
.name = "ti_am3359-tscadc",
.owner = THIS_MODULE,
.pm = TSCADC_PM_OPS,
- .of_match_table = of_match_ptr(ti_tscadc_dt_ids),
+ .of_match_table = ti_tscadc_dt_ids,
},
.probe = ti_tscadc_probe,
.remove = ti_tscadc_remove,
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index a6755ec7bd6a..dbb34f94e5e3 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -678,7 +678,7 @@ static int timb_probe(struct pci_dev *dev,
priv->ctl_mapbase = mapbase + CHIPCTLOFFSET;
if (!request_mem_region(priv->ctl_mapbase, CHIPCTLSIZE, "timb-ctl")) {
dev_err(&dev->dev, "Failed to request ctl mem\n");
- goto err_request;
+ goto err_start;
}
priv->ctl_membase = ioremap(priv->ctl_mapbase, CHIPCTLSIZE);
@@ -828,13 +828,10 @@ err_config:
iounmap(priv->ctl_membase);
err_ioremap:
release_mem_region(priv->ctl_mapbase, CHIPCTLSIZE);
-err_request:
- pci_set_drvdata(dev, NULL);
err_start:
pci_disable_device(dev);
err_enable:
kfree(priv);
- pci_set_drvdata(dev, NULL);
return -ENODEV;
}
@@ -851,7 +848,6 @@ static void timb_remove(struct pci_dev *dev)
pci_disable_msix(dev);
pci_disable_device(dev);
- pci_set_drvdata(dev, NULL);
kfree(priv);
}
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index 5ad4b772b097..a081b925d10b 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps6507x.h>
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index b8f48647661e..b7be0b295575 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -245,7 +245,7 @@ static struct i2c_driver tps65217_driver = {
.driver = {
.name = "tps65217",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(tps65217_of_match),
+ .of_match_table = tps65217_of_match,
},
.id_table = tps65217_id_table,
.probe = tps65217_probe,
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index f54fe4d4f77b..ee61fd7c198d 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -26,6 +26,7 @@
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/of.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps6586x.h>
@@ -124,6 +125,7 @@ struct tps6586x {
struct i2c_client *client;
struct regmap *regmap;
+ int irq;
struct irq_chip irq_chip;
struct mutex irq_lock;
int irq_base;
@@ -261,12 +263,23 @@ static void tps6586x_irq_sync_unlock(struct irq_data *data)
mutex_unlock(&tps6586x->irq_lock);
}
+#ifdef CONFIG_PM_SLEEP
+static int tps6586x_irq_set_wake(struct irq_data *irq_data, unsigned int on)
+{
+ struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data);
+ return irq_set_irq_wake(tps6586x->irq, on);
+}
+#else
+#define tps6586x_irq_set_wake NULL
+#endif
+
static struct irq_chip tps6586x_irq_chip = {
.name = "tps6586x",
.irq_bus_lock = tps6586x_irq_lock,
.irq_bus_sync_unlock = tps6586x_irq_sync_unlock,
.irq_disable = tps6586x_irq_disable,
.irq_enable = tps6586x_irq_enable,
+ .irq_set_wake = tps6586x_irq_set_wake,
};
static int tps6586x_irq_map(struct irq_domain *h, unsigned int virq,
@@ -331,6 +344,8 @@ static int tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
int new_irq_base;
int irq_num = ARRAY_SIZE(tps6586x_irqs);
+ tps6586x->irq = irq;
+
mutex_init(&tps6586x->irq_lock);
for (i = 0; i < 5; i++) {
tps6586x->mask_reg[i] = 0xff;
@@ -360,10 +375,8 @@ static int tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
ret = request_threaded_irq(irq, NULL, tps6586x_irq, IRQF_ONESHOT,
"tps6586x", tps6586x);
- if (!ret) {
+ if (!ret)
device_init_wakeup(tps6586x->dev, 1);
- enable_irq_wake(irq);
- }
return ret;
}
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index d79277204835..c0f608e3ca9e 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -25,6 +25,7 @@
#include <linux/mfd/core.h>
#include <linux/regmap.h>
#include <linux/mfd/tps65910.h>
+#include <linux/of.h>
#include <linux/of_device.h>
static struct resource rtc_resources[] = {
@@ -410,14 +411,10 @@ static struct tps65910_board *tps65910_parse_dt(struct i2c_client *client,
ret = of_property_read_u32(np, "ti,vmbch-threshold", &prop);
if (!ret)
board_info->vmbch_threshold = prop;
- else if (*chip_id == TPS65911)
- dev_warn(&client->dev, "VMBCH-Threshold not specified");
ret = of_property_read_u32(np, "ti,vmbch2-threshold", &prop);
if (!ret)
board_info->vmbch2_threshold = prop;
- else if (*chip_id == TPS65911)
- dev_warn(&client->dev, "VMBCH2-Threshold not specified");
prop = of_property_read_bool(np, "ti,en-ck32k-xtal");
board_info->en_ck32k_xtal = prop;
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index daf66942071c..0779d5ab9ab1 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -565,13 +565,13 @@ static int twl6040_probe(struct i2c_client *client,
twl6040->supplies);
if (ret != 0) {
dev_err(&client->dev, "Failed to get supplies: %d\n", ret);
- goto regulator_get_err;
+ return ret;
}
ret = regulator_bulk_enable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
if (ret != 0) {
dev_err(&client->dev, "Failed to enable supplies: %d\n", ret);
- goto regulator_get_err;
+ return ret;
}
twl6040->dev = &client->dev;
@@ -619,7 +619,7 @@ static int twl6040_probe(struct i2c_client *client,
"twl6040_irq_th", twl6040);
if (ret) {
dev_err(twl6040->dev, "Thermal IRQ request failed: %d\n", ret);
- goto thirq_err;
+ goto readyirq_err;
}
/* dual-access registers controlled by I2C only */
@@ -659,21 +659,14 @@ static int twl6040_probe(struct i2c_client *client,
ret = mfd_add_devices(&client->dev, -1, twl6040->cells, children,
NULL, 0, NULL);
if (ret)
- goto mfd_err;
+ goto readyirq_err;
return 0;
-mfd_err:
- devm_free_irq(&client->dev, twl6040->irq_th, twl6040);
-thirq_err:
- devm_free_irq(&client->dev, twl6040->irq_ready, twl6040);
readyirq_err:
regmap_del_irq_chip(twl6040->irq, twl6040->irq_data);
gpio_err:
regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
-regulator_get_err:
- i2c_set_clientdata(client, NULL);
-
return ret;
}
@@ -684,12 +677,9 @@ static int twl6040_remove(struct i2c_client *client)
if (twl6040->power_count)
twl6040_power(twl6040, 0);
- devm_free_irq(&client->dev, twl6040->irq_ready, twl6040);
- devm_free_irq(&client->dev, twl6040->irq_th, twl6040);
regmap_del_irq_chip(twl6040->irq, twl6040->irq_data);
mfd_remove_devices(&client->dev);
- i2c_set_clientdata(client, NULL);
regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index d5966e6b5a7d..0313f839e8fa 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -553,6 +553,7 @@ static int ucb1x00_probe(struct mcp *mcp)
if (ucb->irq_base < 0) {
dev_err(&ucb->dev, "unable to allocate 16 irqs: %d\n",
ucb->irq_base);
+ ret = ucb->irq_base;
goto err_irq_alloc;
}
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index 802dd3cb18cf..1e9a4b2102f9 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -903,7 +903,6 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000D1B, 0xFFFF }, /* R3355 - IRQ2 Status 4 Mask */
{ 0x00000D1C, 0xFFFF }, /* R3356 - IRQ2 Status 5 Mask */
{ 0x00000D1F, 0x0000 }, /* R3359 - IRQ2 Control */
- { 0x00000D50, 0x0000 }, /* R3408 - AOD wkup and trig */
{ 0x00000D53, 0xFFFF }, /* R3411 - AOD IRQ Mask IRQ1 */
{ 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */
{ 0x00000D56, 0x0000 }, /* R3414 - Jack detect debounce */
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 3113e39b318e..c5c116a7b5ff 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -592,7 +592,7 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x0000043E, 0x0080 }, /* R1086 - DAC Volume Limit 6R */
{ 0x0000043F, 0x0800 }, /* R1087 - Noise Gate Select 6R */
{ 0x00000450, 0x0000 }, /* R1104 - DAC AEC Control 1 */
- { 0x00000458, 0x0001 }, /* R1112 - Noise Gate Control */
+ { 0x00000458, 0x0000 }, /* R1112 - Noise Gate Control */
{ 0x00000480, 0x0040 }, /* R1152 - Class W ANC Threshold 1 */
{ 0x00000481, 0x0040 }, /* R1153 - Class W ANC Threshold 2 */
{ 0x00000490, 0x0069 }, /* R1168 - PDM SPK1 CTRL 1 */
@@ -1204,7 +1204,6 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000D1B, 0xFFFF }, /* R3355 - IRQ2 Status 4 Mask */
{ 0x00000D1C, 0xFFFF }, /* R3356 - IRQ2 Status 5 Mask */
{ 0x00000D1F, 0x0000 }, /* R3359 - IRQ2 Control */
- { 0x00000D50, 0x0000 }, /* R3408 - AOD wkup and trig */
{ 0x00000D53, 0xFFFF }, /* R3411 - AOD IRQ Mask IRQ1 */
{ 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */
{ 0x00000D56, 0x0000 }, /* R3414 - Jack detect debounce */
@@ -2291,21 +2290,37 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_DSP1_STATUS_1:
case ARIZONA_DSP1_STATUS_2:
case ARIZONA_DSP1_STATUS_3:
+ case ARIZONA_DSP1_SCRATCH_0:
+ case ARIZONA_DSP1_SCRATCH_1:
+ case ARIZONA_DSP1_SCRATCH_2:
+ case ARIZONA_DSP1_SCRATCH_3:
case ARIZONA_DSP2_CONTROL_1:
case ARIZONA_DSP2_CLOCKING_1:
case ARIZONA_DSP2_STATUS_1:
case ARIZONA_DSP2_STATUS_2:
case ARIZONA_DSP2_STATUS_3:
+ case ARIZONA_DSP2_SCRATCH_0:
+ case ARIZONA_DSP2_SCRATCH_1:
+ case ARIZONA_DSP2_SCRATCH_2:
+ case ARIZONA_DSP2_SCRATCH_3:
case ARIZONA_DSP3_CONTROL_1:
case ARIZONA_DSP3_CLOCKING_1:
case ARIZONA_DSP3_STATUS_1:
case ARIZONA_DSP3_STATUS_2:
case ARIZONA_DSP3_STATUS_3:
+ case ARIZONA_DSP3_SCRATCH_0:
+ case ARIZONA_DSP3_SCRATCH_1:
+ case ARIZONA_DSP3_SCRATCH_2:
+ case ARIZONA_DSP3_SCRATCH_3:
case ARIZONA_DSP4_CONTROL_1:
case ARIZONA_DSP4_CLOCKING_1:
case ARIZONA_DSP4_STATUS_1:
case ARIZONA_DSP4_STATUS_2:
case ARIZONA_DSP4_STATUS_3:
+ case ARIZONA_DSP4_SCRATCH_0:
+ case ARIZONA_DSP4_SCRATCH_1:
+ case ARIZONA_DSP4_SCRATCH_2:
+ case ARIZONA_DSP4_SCRATCH_3:
return true;
default:
return false;
@@ -2347,25 +2362,41 @@ static bool wm5110_volatile_register(struct device *dev, unsigned int reg)
case ARIZONA_INTERRUPT_RAW_STATUS_7:
case ARIZONA_INTERRUPT_RAW_STATUS_8:
case ARIZONA_IRQ_PIN_STATUS:
+ case ARIZONA_AOD_WKUP_AND_TRIG:
case ARIZONA_AOD_IRQ1:
case ARIZONA_AOD_IRQ2:
+ case ARIZONA_AOD_IRQ_RAW_STATUS:
case ARIZONA_FX_CTRL2:
case ARIZONA_ASRC_STATUS:
case ARIZONA_DSP_STATUS:
- case ARIZONA_DSP1_CONTROL_1:
- case ARIZONA_DSP1_CLOCKING_1:
case ARIZONA_DSP1_STATUS_1:
case ARIZONA_DSP1_STATUS_2:
case ARIZONA_DSP1_STATUS_3:
+ case ARIZONA_DSP1_SCRATCH_0:
+ case ARIZONA_DSP1_SCRATCH_1:
+ case ARIZONA_DSP1_SCRATCH_2:
+ case ARIZONA_DSP1_SCRATCH_3:
case ARIZONA_DSP2_STATUS_1:
case ARIZONA_DSP2_STATUS_2:
case ARIZONA_DSP2_STATUS_3:
+ case ARIZONA_DSP2_SCRATCH_0:
+ case ARIZONA_DSP2_SCRATCH_1:
+ case ARIZONA_DSP2_SCRATCH_2:
+ case ARIZONA_DSP2_SCRATCH_3:
case ARIZONA_DSP3_STATUS_1:
case ARIZONA_DSP3_STATUS_2:
case ARIZONA_DSP3_STATUS_3:
+ case ARIZONA_DSP3_SCRATCH_0:
+ case ARIZONA_DSP3_SCRATCH_1:
+ case ARIZONA_DSP3_SCRATCH_2:
+ case ARIZONA_DSP3_SCRATCH_3:
case ARIZONA_DSP4_STATUS_1:
case ARIZONA_DSP4_STATUS_2:
case ARIZONA_DSP4_STATUS_3:
+ case ARIZONA_DSP4_SCRATCH_0:
+ case ARIZONA_DSP4_SCRATCH_1:
+ case ARIZONA_DSP4_SCRATCH_2:
+ case ARIZONA_DSP4_SCRATCH_3:
return true;
default:
return false;
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index e1c283e6d4e5..030827511667 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -33,84 +33,6 @@
#include "wm8994.h"
-/**
- * wm8994_reg_read: Read a single WM8994 register.
- *
- * @wm8994: Device to read from.
- * @reg: Register to read.
- */
-int wm8994_reg_read(struct wm8994 *wm8994, unsigned short reg)
-{
- unsigned int val;
- int ret;
-
- ret = regmap_read(wm8994->regmap, reg, &val);
-
- if (ret < 0)
- return ret;
- else
- return val;
-}
-EXPORT_SYMBOL_GPL(wm8994_reg_read);
-
-/**
- * wm8994_bulk_read: Read multiple WM8994 registers
- *
- * @wm8994: Device to read from
- * @reg: First register
- * @count: Number of registers
- * @buf: Buffer to fill. The data will be returned big endian.
- */
-int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg,
- int count, u16 *buf)
-{
- return regmap_bulk_read(wm8994->regmap, reg, buf, count);
-}
-
-/**
- * wm8994_reg_write: Write a single WM8994 register.
- *
- * @wm8994: Device to write to.
- * @reg: Register to write to.
- * @val: Value to write.
- */
-int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg,
- unsigned short val)
-{
- return regmap_write(wm8994->regmap, reg, val);
-}
-EXPORT_SYMBOL_GPL(wm8994_reg_write);
-
-/**
- * wm8994_bulk_write: Write multiple WM8994 registers
- *
- * @wm8994: Device to write to
- * @reg: First register
- * @count: Number of registers
- * @buf: Buffer to write from. Data must be big-endian formatted.
- */
-int wm8994_bulk_write(struct wm8994 *wm8994, unsigned short reg,
- int count, const u16 *buf)
-{
- return regmap_raw_write(wm8994->regmap, reg, buf, count * sizeof(u16));
-}
-EXPORT_SYMBOL_GPL(wm8994_bulk_write);
-
-/**
- * wm8994_set_bits: Set the value of a bitfield in a WM8994 register
- *
- * @wm8994: Device to write to.
- * @reg: Register to write to.
- * @mask: Mask of bits to set.
- * @val: Value to set (unshifted)
- */
-int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg,
- unsigned short mask, unsigned short val)
-{
- return regmap_update_bits(wm8994->regmap, reg, mask, val);
-}
-EXPORT_SYMBOL_GPL(wm8994_set_bits);
-
static struct mfd_cell wm8994_regulator_devs[] = {
{
.name = "wm8994-ldo",
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
index c6bd7e84de24..7be89832db19 100644
--- a/drivers/misc/carma/carma-fpga-program.c
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -10,6 +10,8 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/completion.h>
#include <linux/miscdevice.h>
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 7b56563f8b74..08b18f3f5264 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -88,6 +88,8 @@
* interrupt source to the GPIO pin. Tada, we hid the interrupt. :)
*/
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/miscdevice.h>
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 5d4fd69d04ca..4ef01ab67853 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -22,7 +22,7 @@
#include <linux/jiffies.h>
#include <linux/of.h>
#include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
/*
* I2C EEPROMs from most vendors are inexpensive and mostly interchangeable.
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index 0ab7c922212c..a511b2a713b3 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -145,15 +145,17 @@ static ssize_t type_show(struct device *dev, struct device_attribute *attr,
struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);
return sprintf(buf, "%x", sock->type);
}
+static DEVICE_ATTR_RO(type);
-static struct device_attribute tifm_dev_attrs[] = {
- __ATTR(type, S_IRUGO, type_show, NULL),
- __ATTR_NULL
+static struct attribute *tifm_dev_attrs[] = {
+ &dev_attr_type.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(tifm_dev);
static struct bus_type tifm_bus_type = {
.name = "tifm",
- .dev_attrs = tifm_dev_attrs,
+ .dev_groups = tifm_dev_groups,
.match = tifm_bus_match,
.uevent = tifm_uevent,
.probe = tifm_device_probe,
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 1a3163f1407e..29d5d988a51c 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -2448,7 +2448,6 @@ static int _mmc_blk_suspend(struct mmc_card *card)
struct mmc_blk_data *md = mmc_get_drvdata(card);
if (md) {
- pm_runtime_get_sync(&card->dev);
mmc_queue_suspend(&md->queue);
list_for_each_entry(part_md, &md->part, part) {
mmc_queue_suspend(&part_md->queue);
@@ -2483,7 +2482,6 @@ static int mmc_blk_resume(struct mmc_card *card)
list_for_each_entry(part_md, &md->part, part) {
mmc_queue_resume(&part_md->queue);
}
- pm_runtime_put(&card->dev);
}
return 0;
}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index fa9632eb63f1..357bbc54fe4b 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -15,6 +15,7 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -196,7 +197,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
- limit = *mmc_dev(host)->dma_mask;
+ limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
mq->card = card;
mq->queue = blk_init_queue(mmc_request_fn, lock);
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 704bf66f5873..64145a32b917 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -27,7 +27,7 @@
#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv)
-static ssize_t mmc_type_show(struct device *dev,
+static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mmc_card *card = mmc_dev_to_card(dev);
@@ -45,11 +45,13 @@ static ssize_t mmc_type_show(struct device *dev,
return -EFAULT;
}
}
+static DEVICE_ATTR_RO(type);
-static struct device_attribute mmc_dev_attrs[] = {
- __ATTR(type, S_IRUGO, mmc_type_show, NULL),
- __ATTR_NULL,
+static struct attribute *mmc_dev_attrs[] = {
+ &dev_attr_type.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(mmc_dev);
/*
* This currently matches any MMC driver to any MMC card - drivers
@@ -218,7 +220,7 @@ static const struct dev_pm_ops mmc_bus_pm_ops = {
static struct bus_type mmc_bus_type = {
.name = "mmc",
- .dev_attrs = mmc_dev_attrs,
+ .dev_groups = mmc_dev_groups,
.match = mmc_bus_match,
.uevent = mmc_bus_uevent,
.probe = mmc_bus_probe,
@@ -340,7 +342,7 @@ int mmc_add_card(struct mmc_card *card)
break;
}
- if (mmc_sd_card_uhs(card) &&
+ if (mmc_card_uhs(card) &&
(card->sd_bus_speed < ARRAY_SIZE(uhs_speeds)))
uhs_bus_speed_mode = uhs_speeds[card->sd_bus_speed];
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index bf18b6bfce48..57a2b403bf8e 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -23,6 +23,7 @@
#include <linux/log2.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeup.h>
#include <linux/suspend.h>
#include <linux/fault-inject.h>
#include <linux/random.h>
@@ -301,7 +302,7 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
}
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_BKOPS_START, 1, timeout, use_busy_signal);
+ EXT_CSD_BKOPS_START, 1, timeout, use_busy_signal, true);
if (err) {
pr_warn("%s: Error %d starting bkops\n",
mmc_hostname(card->host), err);
@@ -918,31 +919,6 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
EXPORT_SYMBOL(__mmc_claim_host);
/**
- * mmc_try_claim_host - try exclusively to claim a host
- * @host: mmc host to claim
- *
- * Returns %1 if the host is claimed, %0 otherwise.
- */
-int mmc_try_claim_host(struct mmc_host *host)
-{
- int claimed_host = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&host->lock, flags);
- if (!host->claimed || host->claimer == current) {
- host->claimed = 1;
- host->claimer = current;
- host->claim_cnt += 1;
- claimed_host = 1;
- }
- spin_unlock_irqrestore(&host->lock, flags);
- if (host->ops->enable && claimed_host && host->claim_cnt == 1)
- host->ops->enable(host);
- return claimed_host;
-}
-EXPORT_SYMBOL(mmc_try_claim_host);
-
-/**
* mmc_release_host - release a host
* @host: mmc host to release
*
@@ -1382,22 +1358,31 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
{
int bit;
- ocr &= host->ocr_avail;
+ /*
+ * Sanity check the voltages that the card claims to
+ * support.
+ */
+ if (ocr & 0x7F) {
+ dev_warn(mmc_dev(host),
+ "card claims to support voltages below defined range\n");
+ ocr &= ~0x7F;
+ }
- bit = ffs(ocr);
- if (bit) {
- bit -= 1;
+ ocr &= host->ocr_avail;
+ if (!ocr) {
+ dev_warn(mmc_dev(host), "no support for card's volts\n");
+ return 0;
+ }
+ if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
+ bit = ffs(ocr) - 1;
ocr &= 3 << bit;
-
- mmc_host_clk_hold(host);
- host->ios.vdd = bit;
- mmc_set_ios(host);
- mmc_host_clk_release(host);
+ mmc_power_cycle(host, ocr);
} else {
- pr_warning("%s: host doesn't support card's voltages\n",
- mmc_hostname(host));
- ocr = 0;
+ bit = fls(ocr) - 1;
+ ocr &= 3 << bit;
+ if (bit != host->ios.vdd)
+ dev_warn(mmc_dev(host), "exceeding card's volts\n");
}
return ocr;
@@ -1422,7 +1407,7 @@ int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
}
-int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
+int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
{
struct mmc_command cmd = {0};
int err = 0;
@@ -1504,7 +1489,7 @@ power_cycle:
if (err) {
pr_debug("%s: Signal voltage switch failed, "
"power cycling card\n", mmc_hostname(host));
- mmc_power_cycle(host);
+ mmc_power_cycle(host, ocr);
}
mmc_host_clk_release(host);
@@ -1545,22 +1530,14 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
* If a host does all the power sequencing itself, ignore the
* initial MMC_POWER_UP stage.
*/
-void mmc_power_up(struct mmc_host *host)
+void mmc_power_up(struct mmc_host *host, u32 ocr)
{
- int bit;
-
if (host->ios.power_mode == MMC_POWER_ON)
return;
mmc_host_clk_hold(host);
- /* If ocr is set, we use it */
- if (host->ocr)
- bit = ffs(host->ocr) - 1;
- else
- bit = fls(host->ocr_avail) - 1;
-
- host->ios.vdd = bit;
+ host->ios.vdd = fls(ocr) - 1;
if (mmc_host_is_spi(host))
host->ios.chip_select = MMC_CS_HIGH;
else
@@ -1604,13 +1581,6 @@ void mmc_power_off(struct mmc_host *host)
host->ios.clock = 0;
host->ios.vdd = 0;
-
- /*
- * Reset ocr mask to be the highest possible voltage supported for
- * this mmc host. This value will be used at next power up.
- */
- host->ocr = 1 << (fls(host->ocr_avail) - 1);
-
if (!mmc_host_is_spi(host)) {
host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
host->ios.chip_select = MMC_CS_DONTCARE;
@@ -1630,12 +1600,12 @@ void mmc_power_off(struct mmc_host *host)
mmc_host_clk_release(host);
}
-void mmc_power_cycle(struct mmc_host *host)
+void mmc_power_cycle(struct mmc_host *host, u32 ocr)
{
mmc_power_off(host);
/* Wait at least 1 ms according to SD spec */
mmc_delay(1);
- mmc_power_up(host);
+ mmc_power_up(host, ocr);
}
/*
@@ -1723,6 +1693,28 @@ void mmc_detach_bus(struct mmc_host *host)
mmc_bus_put(host);
}
+static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
+ bool cd_irq)
+{
+#ifdef CONFIG_MMC_DEBUG
+ unsigned long flags;
+ spin_lock_irqsave(&host->lock, flags);
+ WARN_ON(host->removed);
+ spin_unlock_irqrestore(&host->lock, flags);
+#endif
+
+ /*
+ * If the device is configured as wakeup, we prevent a new sleep for
+ * 5 s to give provision for user space to consume the event.
+ */
+ if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
+ device_can_wakeup(mmc_dev(host)))
+ pm_wakeup_event(mmc_dev(host), 5000);
+
+ host->detect_change = 1;
+ mmc_schedule_delayed_work(&host->detect, delay);
+}
+
/**
* mmc_detect_change - process change of state on a MMC socket
* @host: host which changed state.
@@ -1735,16 +1727,8 @@ void mmc_detach_bus(struct mmc_host *host)
*/
void mmc_detect_change(struct mmc_host *host, unsigned long delay)
{
-#ifdef CONFIG_MMC_DEBUG
- unsigned long flags;
- spin_lock_irqsave(&host->lock, flags);
- WARN_ON(host->removed);
- spin_unlock_irqrestore(&host->lock, flags);
-#endif
- host->detect_change = 1;
- mmc_schedule_delayed_work(&host->detect, delay);
+ _mmc_detect_change(host, delay, true);
}
-
EXPORT_SYMBOL(mmc_detect_change);
void mmc_init_erase(struct mmc_card *card)
@@ -2334,7 +2318,7 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
pr_info("%s: %s: trying to init card at %u Hz\n",
mmc_hostname(host), __func__, host->f_init);
#endif
- mmc_power_up(host);
+ mmc_power_up(host, host->ocr_avail);
/*
* Some eMMCs (with VCCQ always on) may not be reset after power up, so
@@ -2423,7 +2407,7 @@ int mmc_detect_card_removed(struct mmc_host *host)
* rescan handle the card removal.
*/
cancel_delayed_work(&host->detect);
- mmc_detect_change(host, 0);
+ _mmc_detect_change(host, 0, false);
}
}
@@ -2504,8 +2488,8 @@ void mmc_start_host(struct mmc_host *host)
if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
mmc_power_off(host);
else
- mmc_power_up(host);
- mmc_detect_change(host, 0);
+ mmc_power_up(host, host->ocr_avail);
+ _mmc_detect_change(host, 0, false);
}
void mmc_stop_host(struct mmc_host *host)
@@ -2583,7 +2567,7 @@ int mmc_power_restore_host(struct mmc_host *host)
return -EINVAL;
}
- mmc_power_up(host);
+ mmc_power_up(host, host->card->ocr);
ret = host->bus_ops->power_restore(host);
mmc_bus_put(host);
@@ -2657,28 +2641,6 @@ EXPORT_SYMBOL(mmc_cache_ctrl);
#ifdef CONFIG_PM
-/**
- * mmc_suspend_host - suspend a host
- * @host: mmc host
- */
-int mmc_suspend_host(struct mmc_host *host)
-{
- /* This function is deprecated */
- return 0;
-}
-EXPORT_SYMBOL(mmc_suspend_host);
-
-/**
- * mmc_resume_host - resume a previously suspended host
- * @host: mmc host
- */
-int mmc_resume_host(struct mmc_host *host)
-{
- /* This function is deprecated */
- return 0;
-}
-EXPORT_SYMBOL(mmc_resume_host);
-
/* Do the card removal on suspend if card is assumed removeable
* Do that in pm notifier while userspace isn't yet frozen, so we will be able
to sync the card.
@@ -2724,7 +2686,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
spin_lock_irqsave(&host->lock, flags);
host->rescan_disable = 0;
spin_unlock_irqrestore(&host->lock, flags);
- mmc_detect_change(host, 0);
+ _mmc_detect_change(host, 0, false);
}
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 5345d156493e..443a584660f0 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -42,13 +42,13 @@ void mmc_set_ungated(struct mmc_host *host);
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
-int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage);
+int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr);
int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage);
void mmc_set_timing(struct mmc_host *host, unsigned int timing);
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
-void mmc_power_up(struct mmc_host *host);
+void mmc_power_up(struct mmc_host *host, u32 ocr);
void mmc_power_off(struct mmc_host *host);
-void mmc_power_cycle(struct mmc_host *host);
+void mmc_power_cycle(struct mmc_host *host, u32 ocr);
static inline void mmc_delay(unsigned int ms)
{
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 6d02012a1d0b..f631f5a9bf79 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/stat.h>
+#include <linux/pm_runtime.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
@@ -934,6 +935,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
goto err;
}
+ card->ocr = ocr;
card->type = MMC_TYPE_MMC;
card->rca = 1;
memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
@@ -1404,9 +1406,9 @@ static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
if (notify_type == EXT_CSD_POWER_OFF_LONG)
timeout = card->ext_csd.power_off_longtime;
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_POWER_OFF_NOTIFICATION,
- notify_type, timeout);
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_POWER_OFF_NOTIFICATION,
+ notify_type, timeout, true, false);
if (err)
pr_err("%s: Power Off Notification timed out, %u\n",
mmc_hostname(card->host), timeout);
@@ -1477,6 +1479,9 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
mmc_claim_host(host);
+ if (mmc_card_suspended(host->card))
+ goto out;
+
if (mmc_card_doing_bkops(host->card)) {
err = mmc_stop_bkops(host->card);
if (err)
@@ -1496,51 +1501,93 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
err = mmc_deselect_cards(host);
host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
- if (!err)
+ if (!err) {
mmc_power_off(host);
+ mmc_card_set_suspended(host->card);
+ }
out:
mmc_release_host(host);
return err;
}
/*
- * Suspend callback from host.
+ * Suspend callback
*/
static int mmc_suspend(struct mmc_host *host)
{
- return _mmc_suspend(host, true);
-}
+ int err;
-/*
- * Shutdown callback
- */
-static int mmc_shutdown(struct mmc_host *host)
-{
- return _mmc_suspend(host, false);
+ err = _mmc_suspend(host, true);
+ if (!err) {
+ pm_runtime_disable(&host->card->dev);
+ pm_runtime_set_suspended(&host->card->dev);
+ }
+
+ return err;
}
/*
- * Resume callback from host.
- *
* This function tries to determine if the same card is still present
* and, if so, restore all state to it.
*/
-static int mmc_resume(struct mmc_host *host)
+static int _mmc_resume(struct mmc_host *host)
{
- int err;
+ int err = 0;
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
- mmc_power_up(host);
- mmc_select_voltage(host, host->ocr);
- err = mmc_init_card(host, host->ocr, host->card);
+
+ if (!mmc_card_suspended(host->card))
+ goto out;
+
+ mmc_power_up(host, host->card->ocr);
+ err = mmc_init_card(host, host->card->ocr, host->card);
+ mmc_card_clr_suspended(host->card);
+
+out:
mmc_release_host(host);
+ return err;
+}
+
+/*
+ * Shutdown callback
+ */
+static int mmc_shutdown(struct mmc_host *host)
+{
+ int err = 0;
+
+ /*
+ * In a specific case for poweroff notify, we need to resume the card
+ * before we can shutdown it properly.
+ */
+ if (mmc_can_poweroff_notify(host->card) &&
+ !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
+ err = _mmc_resume(host);
+
+ if (!err)
+ err = _mmc_suspend(host, false);
return err;
}
+/*
+ * Callback for resume.
+ */
+static int mmc_resume(struct mmc_host *host)
+{
+ int err = 0;
+
+ if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) {
+ err = _mmc_resume(host);
+ pm_runtime_set_active(&host->card->dev);
+ pm_runtime_mark_last_busy(&host->card->dev);
+ }
+ pm_runtime_enable(&host->card->dev);
+
+ return err;
+}
/*
* Callback for runtime_suspend.
@@ -1552,18 +1599,11 @@ static int mmc_runtime_suspend(struct mmc_host *host)
if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
return 0;
- mmc_claim_host(host);
-
- err = mmc_suspend(host);
- if (err) {
+ err = _mmc_suspend(host, true);
+ if (err)
pr_err("%s: error %d doing aggessive suspend\n",
mmc_hostname(host), err);
- goto out;
- }
- mmc_power_off(host);
-out:
- mmc_release_host(host);
return err;
}
@@ -1574,18 +1614,14 @@ static int mmc_runtime_resume(struct mmc_host *host)
{
int err;
- if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
+ if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME)))
return 0;
- mmc_claim_host(host);
-
- mmc_power_up(host);
- err = mmc_resume(host);
+ err = _mmc_resume(host);
if (err)
pr_err("%s: error %d doing aggessive resume\n",
mmc_hostname(host), err);
- mmc_release_host(host);
return 0;
}
@@ -1595,7 +1631,7 @@ static int mmc_power_restore(struct mmc_host *host)
host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
mmc_claim_host(host);
- ret = mmc_init_card(host, host->ocr, host->card);
+ ret = mmc_init_card(host, host->card->ocr, host->card);
mmc_release_host(host);
return ret;
@@ -1640,7 +1676,7 @@ static void mmc_attach_bus_ops(struct mmc_host *host)
int mmc_attach_mmc(struct mmc_host *host)
{
int err;
- u32 ocr;
+ u32 ocr, rocr;
BUG_ON(!host);
WARN_ON(!host->claimed);
@@ -1666,23 +1702,12 @@ int mmc_attach_mmc(struct mmc_host *host)
goto err;
}
- /*
- * Sanity check the voltages that the card claims to
- * support.
- */
- if (ocr & 0x7F) {
- pr_warning("%s: card claims to support voltages "
- "below the defined range. These will be ignored.\n",
- mmc_hostname(host));
- ocr &= ~0x7F;
- }
-
- host->ocr = mmc_select_voltage(host, ocr);
+ rocr = mmc_select_voltage(host, ocr);
/*
* Can we support the voltage of the card?
*/
- if (!host->ocr) {
+ if (!rocr) {
err = -EINVAL;
goto err;
}
@@ -1690,7 +1715,7 @@ int mmc_attach_mmc(struct mmc_host *host)
/*
* Detect and init the card.
*/
- err = mmc_init_card(host, host->ocr, NULL);
+ err = mmc_init_card(host, rocr, NULL);
if (err)
goto err;
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index ef183483d5b6..aae8d8b45549 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -23,6 +23,40 @@
#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
+ bool ignore_crc)
+{
+ int err;
+ struct mmc_command cmd = {0};
+
+ BUG_ON(!card);
+ BUG_ON(!card->host);
+
+ cmd.opcode = MMC_SEND_STATUS;
+ if (!mmc_host_is_spi(card->host))
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
+ if (ignore_crc)
+ cmd.flags &= ~MMC_RSP_CRC;
+
+ err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
+ if (err)
+ return err;
+
+ /* NOTE: callers are required to understand the difference
+ * between "native" and SPI format status words!
+ */
+ if (status)
+ *status = cmd.resp[0];
+
+ return 0;
+}
+
+int mmc_send_status(struct mmc_card *card, u32 *status)
+{
+ return __mmc_send_status(card, status, false);
+}
+
static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
{
int err;
@@ -370,16 +404,18 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
* @timeout_ms: timeout (ms) for operation performed by register write,
* timeout of zero implies maximum possible timeout
* @use_busy_signal: use the busy signal as response type
+ * @send_status: send status cmd to poll for busy
*
* Modifies the EXT_CSD register for selected card.
*/
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
- unsigned int timeout_ms, bool use_busy_signal)
+ unsigned int timeout_ms, bool use_busy_signal, bool send_status)
{
int err;
struct mmc_command cmd = {0};
unsigned long timeout;
u32 status;
+ bool ignore_crc = false;
BUG_ON(!card);
BUG_ON(!card->host);
@@ -408,17 +444,37 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
if (!use_busy_signal)
return 0;
- /* Must check status to be sure of no errors */
+ /*
+ * Must check status to be sure of no errors
+ * If CMD13 is to check the busy completion of the timing change,
+ * disable the check of CRC error.
+ */
+ if (index == EXT_CSD_HS_TIMING &&
+ !(card->host->caps & MMC_CAP_WAIT_WHILE_BUSY))
+ ignore_crc = true;
+
timeout = jiffies + msecs_to_jiffies(MMC_OPS_TIMEOUT_MS);
do {
- err = mmc_send_status(card, &status);
- if (err)
- return err;
+ if (send_status) {
+ err = __mmc_send_status(card, &status, ignore_crc);
+ if (err)
+ return err;
+ }
if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
break;
if (mmc_host_is_spi(card->host))
break;
+ /*
+ * We are not allowed to issue a status command and the host
+ * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only
+ * rely on waiting for the stated timeout to be sufficient.
+ */
+ if (!send_status) {
+ mmc_delay(timeout_ms);
+ return 0;
+ }
+
/* Timeout if the device never leaves the program state. */
if (time_after(jiffies, timeout)) {
pr_err("%s: Card stuck in programming state! %s\n",
@@ -445,36 +501,10 @@ EXPORT_SYMBOL_GPL(__mmc_switch);
int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms)
{
- return __mmc_switch(card, set, index, value, timeout_ms, true);
+ return __mmc_switch(card, set, index, value, timeout_ms, true, true);
}
EXPORT_SYMBOL_GPL(mmc_switch);
-int mmc_send_status(struct mmc_card *card, u32 *status)
-{
- int err;
- struct mmc_command cmd = {0};
-
- BUG_ON(!card);
- BUG_ON(!card->host);
-
- cmd.opcode = MMC_SEND_STATUS;
- if (!mmc_host_is_spi(card->host))
- cmd.arg = card->rca << 16;
- cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
-
- err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
- if (err)
- return err;
-
- /* NOTE: callers are required to understand the difference
- * between "native" and SPI format status words!
- */
- if (status)
- *status = cmd.resp[0];
-
- return 0;
-}
-
static int
mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
u8 len)
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 5e8823dc3ef6..6f42050b7ccc 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/stat.h>
+#include <linux/pm_runtime.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
@@ -721,6 +722,7 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
int err;
u32 max_current;
int retries = 10;
+ u32 pocr = ocr;
try_again:
if (!retries) {
@@ -773,7 +775,8 @@ try_again:
*/
if (!mmc_host_is_spi(host) && rocr &&
((*rocr & 0x41000000) == 0x41000000)) {
- err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
+ pocr);
if (err == -EAGAIN) {
retries--;
goto try_again;
@@ -935,6 +938,7 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
if (IS_ERR(card))
return PTR_ERR(card);
+ card->ocr = ocr;
card->type = MMC_TYPE_SD;
memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
}
@@ -1064,10 +1068,7 @@ static void mmc_sd_detect(struct mmc_host *host)
}
}
-/*
- * Suspend callback from host.
- */
-static int mmc_sd_suspend(struct mmc_host *host)
+static int _mmc_sd_suspend(struct mmc_host *host)
{
int err = 0;
@@ -1075,34 +1076,77 @@ static int mmc_sd_suspend(struct mmc_host *host)
BUG_ON(!host->card);
mmc_claim_host(host);
+
+ if (mmc_card_suspended(host->card))
+ goto out;
+
if (!mmc_host_is_spi(host))
err = mmc_deselect_cards(host);
host->card->state &= ~MMC_STATE_HIGHSPEED;
- if (!err)
+ if (!err) {
mmc_power_off(host);
+ mmc_card_set_suspended(host->card);
+ }
+
+out:
mmc_release_host(host);
+ return err;
+}
+
+/*
+ * Callback for suspend
+ */
+static int mmc_sd_suspend(struct mmc_host *host)
+{
+ int err;
+
+ err = _mmc_sd_suspend(host);
+ if (!err) {
+ pm_runtime_disable(&host->card->dev);
+ pm_runtime_set_suspended(&host->card->dev);
+ }
return err;
}
/*
- * Resume callback from host.
- *
* This function tries to determine if the same card is still present
* and, if so, restore all state to it.
*/
-static int mmc_sd_resume(struct mmc_host *host)
+static int _mmc_sd_resume(struct mmc_host *host)
{
- int err;
+ int err = 0;
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
- mmc_power_up(host);
- mmc_select_voltage(host, host->ocr);
- err = mmc_sd_init_card(host, host->ocr, host->card);
+
+ if (!mmc_card_suspended(host->card))
+ goto out;
+
+ mmc_power_up(host, host->card->ocr);
+ err = mmc_sd_init_card(host, host->card->ocr, host->card);
+ mmc_card_clr_suspended(host->card);
+
+out:
mmc_release_host(host);
+ return err;
+}
+
+/*
+ * Callback for resume
+ */
+static int mmc_sd_resume(struct mmc_host *host)
+{
+ int err = 0;
+
+ if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) {
+ err = _mmc_sd_resume(host);
+ pm_runtime_set_active(&host->card->dev);
+ pm_runtime_mark_last_busy(&host->card->dev);
+ }
+ pm_runtime_enable(&host->card->dev);
return err;
}
@@ -1117,18 +1161,11 @@ static int mmc_sd_runtime_suspend(struct mmc_host *host)
if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
return 0;
- mmc_claim_host(host);
-
- err = mmc_sd_suspend(host);
- if (err) {
+ err = _mmc_sd_suspend(host);
+ if (err)
pr_err("%s: error %d doing aggessive suspend\n",
mmc_hostname(host), err);
- goto out;
- }
- mmc_power_off(host);
-out:
- mmc_release_host(host);
return err;
}
@@ -1139,18 +1176,14 @@ static int mmc_sd_runtime_resume(struct mmc_host *host)
{
int err;
- if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
+ if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME)))
return 0;
- mmc_claim_host(host);
-
- mmc_power_up(host);
- err = mmc_sd_resume(host);
+ err = _mmc_sd_resume(host);
if (err)
pr_err("%s: error %d doing aggessive resume\n",
mmc_hostname(host), err);
- mmc_release_host(host);
return 0;
}
@@ -1160,7 +1193,7 @@ static int mmc_sd_power_restore(struct mmc_host *host)
host->card->state &= ~MMC_STATE_HIGHSPEED;
mmc_claim_host(host);
- ret = mmc_sd_init_card(host, host->ocr, host->card);
+ ret = mmc_sd_init_card(host, host->card->ocr, host->card);
mmc_release_host(host);
return ret;
@@ -1205,7 +1238,7 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host)
int mmc_attach_sd(struct mmc_host *host)
{
int err;
- u32 ocr;
+ u32 ocr, rocr;
BUG_ON(!host);
WARN_ON(!host->claimed);
@@ -1229,31 +1262,12 @@ int mmc_attach_sd(struct mmc_host *host)
goto err;
}
- /*
- * Sanity check the voltages that the card claims to
- * support.
- */
- if (ocr & 0x7F) {
- pr_warning("%s: card claims to support voltages "
- "below the defined range. These will be ignored.\n",
- mmc_hostname(host));
- ocr &= ~0x7F;
- }
-
- if ((ocr & MMC_VDD_165_195) &&
- !(host->ocr_avail_sd & MMC_VDD_165_195)) {
- pr_warning("%s: SD card claims to support the "
- "incompletely defined 'low voltage range'. This "
- "will be ignored.\n", mmc_hostname(host));
- ocr &= ~MMC_VDD_165_195;
- }
-
- host->ocr = mmc_select_voltage(host, ocr);
+ rocr = mmc_select_voltage(host, ocr);
/*
* Can we support the voltage(s) of the card(s)?
*/
- if (!host->ocr) {
+ if (!rocr) {
err = -EINVAL;
goto err;
}
@@ -1261,7 +1275,7 @@ int mmc_attach_sd(struct mmc_host *host)
/*
* Detect and init the card.
*/
- err = mmc_sd_init_card(host, host->ocr, NULL);
+ err = mmc_sd_init_card(host, rocr, NULL);
if (err)
goto err;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 80d89cff7306..4d721c6e2af0 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -593,23 +593,28 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
struct mmc_card *card;
int err;
int retries = 10;
+ u32 rocr = 0;
+ u32 ocr_card = ocr;
BUG_ON(!host);
WARN_ON(!host->claimed);
+ /* to query card if 1.8V signalling is supported */
+ if (mmc_host_uhs(host))
+ ocr |= R4_18V_PRESENT;
+
try_again:
if (!retries) {
pr_warning("%s: Skipping voltage switch\n",
mmc_hostname(host));
ocr &= ~R4_18V_PRESENT;
- host->ocr &= ~R4_18V_PRESENT;
}
/*
* Inform the card of the voltage
*/
if (!powered_resume) {
- err = mmc_send_io_op_cond(host, host->ocr, &ocr);
+ err = mmc_send_io_op_cond(host, ocr, &rocr);
if (err)
goto err;
}
@@ -632,8 +637,8 @@ try_again:
goto err;
}
- if ((ocr & R4_MEMORY_PRESENT) &&
- mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid, NULL) == 0) {
+ if ((rocr & R4_MEMORY_PRESENT) &&
+ mmc_sd_get_cid(host, ocr & rocr, card->raw_cid, NULL) == 0) {
card->type = MMC_TYPE_SD_COMBO;
if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
@@ -663,8 +668,9 @@ try_again:
* systems that claim 1.8v signalling in fact do not support
* it.
*/
- if (!powered_resume && (ocr & R4_18V_PRESENT) && mmc_host_uhs(host)) {
- err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+ if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) {
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
+ ocr);
if (err == -EAGAIN) {
sdio_reset(host);
mmc_go_idle(host);
@@ -674,12 +680,10 @@ try_again:
goto try_again;
} else if (err) {
ocr &= ~R4_18V_PRESENT;
- host->ocr &= ~R4_18V_PRESENT;
}
err = 0;
} else {
ocr &= ~R4_18V_PRESENT;
- host->ocr &= ~R4_18V_PRESENT;
}
/*
@@ -759,6 +763,7 @@ try_again:
card = oldcard;
}
+ card->ocr = ocr_card;
mmc_fixup_device(card, NULL);
if (card->type == MMC_TYPE_SD_COMBO) {
@@ -981,8 +986,7 @@ static int mmc_sdio_resume(struct mmc_host *host)
/* Restore power if needed */
if (!mmc_card_keep_power(host)) {
- mmc_power_up(host);
- mmc_select_voltage(host, host->ocr);
+ mmc_power_up(host, host->card->ocr);
/*
* Tell runtime PM core we just powered up the card,
* since it still believes the card is powered off.
@@ -1000,7 +1004,7 @@ static int mmc_sdio_resume(struct mmc_host *host)
if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) {
sdio_reset(host);
mmc_go_idle(host);
- err = mmc_sdio_init_card(host, host->ocr, host->card,
+ err = mmc_sdio_init_card(host, host->card->ocr, host->card,
mmc_card_keep_power(host));
} else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
/* We may have switched to 1-bit mode during suspend */
@@ -1040,7 +1044,6 @@ static int mmc_sdio_resume(struct mmc_host *host)
static int mmc_sdio_power_restore(struct mmc_host *host)
{
int ret;
- u32 ocr;
BUG_ON(!host);
BUG_ON(!host->card);
@@ -1062,32 +1065,17 @@ static int mmc_sdio_power_restore(struct mmc_host *host)
* for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and
* harmless in other situations.
*
- * With these steps taken, mmc_select_voltage() is also required to
- * restore the correct voltage setting of the card.
*/
sdio_reset(host);
mmc_go_idle(host);
mmc_send_if_cond(host, host->ocr_avail);
- ret = mmc_send_io_op_cond(host, 0, &ocr);
+ ret = mmc_send_io_op_cond(host, 0, NULL);
if (ret)
goto out;
- if (host->ocr_avail_sdio)
- host->ocr_avail = host->ocr_avail_sdio;
-
- host->ocr = mmc_select_voltage(host, ocr & ~0x7F);
- if (!host->ocr) {
- ret = -EINVAL;
- goto out;
- }
-
- if (mmc_host_uhs(host))
- /* to query card if 1.8V signalling is supported */
- host->ocr |= R4_18V_PRESENT;
-
- ret = mmc_sdio_init_card(host, host->ocr, host->card,
+ ret = mmc_sdio_init_card(host, host->card->ocr, host->card,
mmc_card_keep_power(host));
if (!ret && host->sdio_irqs)
mmc_signal_sdio_irq(host);
@@ -1108,7 +1096,7 @@ static int mmc_sdio_runtime_suspend(struct mmc_host *host)
static int mmc_sdio_runtime_resume(struct mmc_host *host)
{
/* Restore power and re-initialize. */
- mmc_power_up(host);
+ mmc_power_up(host, host->card->ocr);
return mmc_sdio_power_restore(host);
}
@@ -1131,7 +1119,7 @@ static const struct mmc_bus_ops mmc_sdio_ops = {
int mmc_attach_sdio(struct mmc_host *host)
{
int err, i, funcs;
- u32 ocr;
+ u32 ocr, rocr;
struct mmc_card *card;
BUG_ON(!host);
@@ -1145,23 +1133,13 @@ int mmc_attach_sdio(struct mmc_host *host)
if (host->ocr_avail_sdio)
host->ocr_avail = host->ocr_avail_sdio;
- /*
- * Sanity check the voltages that the card claims to
- * support.
- */
- if (ocr & 0x7F) {
- pr_warning("%s: card claims to support voltages "
- "below the defined range. These will be ignored.\n",
- mmc_hostname(host));
- ocr &= ~0x7F;
- }
- host->ocr = mmc_select_voltage(host, ocr);
+ rocr = mmc_select_voltage(host, ocr);
/*
* Can we support the voltage(s) of the card(s)?
*/
- if (!host->ocr) {
+ if (!rocr) {
err = -EINVAL;
goto err;
}
@@ -1169,22 +1147,10 @@ int mmc_attach_sdio(struct mmc_host *host)
/*
* Detect and init the card.
*/
- if (mmc_host_uhs(host))
- /* to query card if 1.8V signalling is supported */
- host->ocr |= R4_18V_PRESENT;
+ err = mmc_sdio_init_card(host, rocr, NULL, 0);
+ if (err)
+ goto err;
- err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
- if (err) {
- if (err == -EAGAIN) {
- /*
- * Retry initialization with S18R set to 0.
- */
- host->ocr &= ~R4_18V_PRESENT;
- err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
- }
- if (err)
- goto err;
- }
card = host->card;
/*
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 6d67492a9247..ef8956568c3a 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -34,7 +34,8 @@ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
\
func = dev_to_sdio_func (dev); \
return sprintf (buf, format_string, func->field); \
-}
+} \
+static DEVICE_ATTR_RO(field)
sdio_config_attr(class, "0x%02x\n");
sdio_config_attr(vendor, "0x%04x\n");
@@ -47,14 +48,16 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "sdio:c%02Xv%04Xd%04X\n",
func->class, func->vendor, func->device);
}
-
-static struct device_attribute sdio_dev_attrs[] = {
- __ATTR_RO(class),
- __ATTR_RO(vendor),
- __ATTR_RO(device),
- __ATTR_RO(modalias),
- __ATTR_NULL,
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *sdio_dev_attrs[] = {
+ &dev_attr_class.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_device.attr,
+ &dev_attr_modalias.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(sdio_dev);
static const struct sdio_device_id *sdio_match_one(struct sdio_func *func,
const struct sdio_device_id *id)
@@ -225,7 +228,7 @@ static const struct dev_pm_ops sdio_bus_pm_ops = {
static struct bus_type sdio_bus_type = {
.name = "sdio",
- .dev_attrs = sdio_dev_attrs,
+ .dev_groups = sdio_dev_groups,
.match = sdio_bus_match,
.uevent = sdio_bus_uevent,
.probe = sdio_bus_probe,
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 69e438ee043e..2cbb4516d353 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -255,7 +255,6 @@ struct atmel_mci_slot {
#define ATMCI_CARD_PRESENT 0
#define ATMCI_CARD_NEED_INIT 1
#define ATMCI_SHUTDOWN 2
-#define ATMCI_SUSPENDED 3
int detect_pin;
int wp_pin;
@@ -589,6 +588,13 @@ static void atmci_timeout_timer(unsigned long data)
if (host->mrq->cmd->data) {
host->mrq->cmd->data->error = -ETIMEDOUT;
host->data = NULL;
+ /*
+ * With some SDIO modules, sometimes DMA transfer hangs. If
+ * stop_transfer() is not called then the DMA request is not
+ * removed, following ones are queued and never computed.
+ */
+ if (host->state == STATE_DATA_XFER)
+ host->stop_transfer(host);
} else {
host->mrq->cmd->error = -ETIMEDOUT;
host->cmd = NULL;
@@ -1803,12 +1809,14 @@ static void atmci_tasklet_func(unsigned long priv)
if (unlikely(status)) {
host->stop_transfer(host);
host->data = NULL;
- if (status & ATMCI_DTOE) {
- data->error = -ETIMEDOUT;
- } else if (status & ATMCI_DCRCE) {
- data->error = -EILSEQ;
- } else {
- data->error = -EIO;
+ if (data) {
+ if (status & ATMCI_DTOE) {
+ data->error = -ETIMEDOUT;
+ } else if (status & ATMCI_DCRCE) {
+ data->error = -EILSEQ;
+ } else {
+ data->error = -EIO;
+ }
}
}
@@ -2520,70 +2528,10 @@ static int __exit atmci_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int atmci_suspend(struct device *dev)
-{
- struct atmel_mci *host = dev_get_drvdata(dev);
- int i;
-
- for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
- struct atmel_mci_slot *slot = host->slot[i];
- int ret;
-
- if (!slot)
- continue;
- ret = mmc_suspend_host(slot->mmc);
- if (ret < 0) {
- while (--i >= 0) {
- slot = host->slot[i];
- if (slot
- && test_bit(ATMCI_SUSPENDED, &slot->flags)) {
- mmc_resume_host(host->slot[i]->mmc);
- clear_bit(ATMCI_SUSPENDED, &slot->flags);
- }
- }
- return ret;
- } else {
- set_bit(ATMCI_SUSPENDED, &slot->flags);
- }
- }
-
- return 0;
-}
-
-static int atmci_resume(struct device *dev)
-{
- struct atmel_mci *host = dev_get_drvdata(dev);
- int i;
- int ret = 0;
-
- for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
- struct atmel_mci_slot *slot = host->slot[i];
- int err;
-
- slot = host->slot[i];
- if (!slot)
- continue;
- if (!test_bit(ATMCI_SUSPENDED, &slot->flags))
- continue;
- err = mmc_resume_host(slot->mmc);
- if (err < 0)
- ret = err;
- else
- clear_bit(ATMCI_SUSPENDED, &slot->flags);
- }
-
- return ret;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(atmci_pm, atmci_suspend, atmci_resume);
-
static struct platform_driver atmci_driver = {
.remove = __exit_p(atmci_remove),
.driver = {
.name = "atmel_mci",
- .pm = &atmci_pm,
.of_match_table = of_match_ptr(atmci_dt_ids),
},
};
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index df9becdd2e99..f5443a6c4915 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1157,11 +1157,6 @@ static int au1xmmc_remove(struct platform_device *pdev)
static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
{
struct au1xmmc_host *host = platform_get_drvdata(pdev);
- int ret;
-
- ret = mmc_suspend_host(host->mmc);
- if (ret)
- return ret;
au_writel(0, HOST_CONFIG2(host));
au_writel(0, HOST_CONFIG(host));
@@ -1178,7 +1173,7 @@ static int au1xmmc_resume(struct platform_device *pdev)
au1xmmc_reset_controller(host);
- return mmc_resume_host(host->mmc);
+ return 0;
}
#else
#define au1xmmc_suspend NULL
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index 94fae2f1baaf..2b7f37e82ca9 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -391,6 +391,7 @@ static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
/* Disable 4 bit SDIO */
cfg &= ~SD4E;
}
+ bfin_write_SDH_CFG(cfg);
host->power_mode = ios->power_mode;
#ifndef RSI_BLKSZ
@@ -415,7 +416,6 @@ static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
cfg &= ~SD_CMD_OD;
# endif
-
if (ios->power_mode != MMC_POWER_OFF)
cfg |= PWR_ON;
else
@@ -433,7 +433,6 @@ static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
clk_ctl |= CLK_E;
host->clk_div = clk_div;
bfin_write_SDH_CLK_CTL(clk_ctl);
-
} else
sdh_stop_clock(host);
@@ -640,21 +639,15 @@ static int sdh_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int sdh_suspend(struct platform_device *dev, pm_message_t state)
{
- struct mmc_host *mmc = platform_get_drvdata(dev);
struct bfin_sd_host *drv_data = get_sdh_data(dev);
- int ret = 0;
-
- if (mmc)
- ret = mmc_suspend_host(mmc);
peripheral_free_list(drv_data->pin_req);
- return ret;
+ return 0;
}
static int sdh_resume(struct platform_device *dev)
{
- struct mmc_host *mmc = platform_get_drvdata(dev);
struct bfin_sd_host *drv_data = get_sdh_data(dev);
int ret = 0;
@@ -665,10 +658,6 @@ static int sdh_resume(struct platform_device *dev)
}
sdh_reset();
-
- if (mmc)
- ret = mmc_resume_host(mmc);
-
return ret;
}
#else
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 9d6e2b844404..1087b4c79cd6 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -667,12 +667,6 @@ static const struct mmc_host_ops cb710_mmc_host = {
static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state)
{
struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
- struct mmc_host *mmc = cb710_slot_to_mmc(slot);
- int err;
-
- err = mmc_suspend_host(mmc);
- if (err)
- return err;
cb710_mmc_enable_irq(slot, 0, ~0);
return 0;
@@ -681,11 +675,9 @@ static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state)
static int cb710_mmc_resume(struct platform_device *pdev)
{
struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
- struct mmc_host *mmc = cb710_slot_to_mmc(slot);
cb710_mmc_enable_irq(slot, 0, ~0);
-
- return mmc_resume_host(mmc);
+ return 0;
}
#endif /* CONFIG_PM */
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index e9fa87df909c..d6153740b77f 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -193,7 +193,6 @@ struct mmc_davinci_host {
#define DAVINCI_MMC_DATADIR_READ 1
#define DAVINCI_MMC_DATADIR_WRITE 2
unsigned char data_dir;
- unsigned char suspended;
/* buffer is used during PIO of one scatterlist segment, and
* is updated along with buffer_bytes_left. bytes_left applies
@@ -1435,38 +1434,23 @@ static int davinci_mmcsd_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct mmc_davinci_host *host = platform_get_drvdata(pdev);
- int ret;
- ret = mmc_suspend_host(host->mmc);
- if (!ret) {
- writel(0, host->base + DAVINCI_MMCIM);
- mmc_davinci_reset_ctrl(host, 1);
- clk_disable(host->clk);
- host->suspended = 1;
- } else {
- host->suspended = 0;
- }
+ writel(0, host->base + DAVINCI_MMCIM);
+ mmc_davinci_reset_ctrl(host, 1);
+ clk_disable(host->clk);
- return ret;
+ return 0;
}
static int davinci_mmcsd_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct mmc_davinci_host *host = platform_get_drvdata(pdev);
- int ret;
-
- if (!host->suspended)
- return 0;
clk_enable(host->clk);
-
mmc_davinci_reset_ctrl(host, 0);
- ret = mmc_resume_host(host->mmc);
- if (!ret)
- host->suspended = 0;
- return ret;
+ return 0;
}
static const struct dev_pm_ops davinci_mmcsd_pm = {
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 6a1fa2110a05..d42e664e78bc 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -14,8 +14,10 @@
#include <linux/clk.h>
#include <linux/mmc/host.h>
#include <linux/mmc/dw_mmc.h>
+#include <linux/mmc/mmc.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/slab.h>
#include "dw_mmc.h"
#include "dw_mmc-pltfm.h"
@@ -30,16 +32,39 @@
#define SDMMC_CLKSEL_TIMING(x, y, z) (SDMMC_CLKSEL_CCLK_SAMPLE(x) | \
SDMMC_CLKSEL_CCLK_DRIVE(y) | \
SDMMC_CLKSEL_CCLK_DIVIDER(z))
+#define SDMMC_CLKSEL_WAKEUP_INT BIT(11)
#define EXYNOS4210_FIXED_CIU_CLK_DIV 2
#define EXYNOS4412_FIXED_CIU_CLK_DIV 4
+/* Block number in eMMC */
+#define DWMCI_BLOCK_NUM 0xFFFFFFFF
+
+#define SDMMC_EMMCP_BASE 0x1000
+#define SDMMC_MPSECURITY (SDMMC_EMMCP_BASE + 0x0010)
+#define SDMMC_MPSBEGIN0 (SDMMC_EMMCP_BASE + 0x0200)
+#define SDMMC_MPSEND0 (SDMMC_EMMCP_BASE + 0x0204)
+#define SDMMC_MPSCTRL0 (SDMMC_EMMCP_BASE + 0x020C)
+
+/* SMU control bits */
+#define DWMCI_MPSCTRL_SECURE_READ_BIT BIT(7)
+#define DWMCI_MPSCTRL_SECURE_WRITE_BIT BIT(6)
+#define DWMCI_MPSCTRL_NON_SECURE_READ_BIT BIT(5)
+#define DWMCI_MPSCTRL_NON_SECURE_WRITE_BIT BIT(4)
+#define DWMCI_MPSCTRL_USE_FUSE_KEY BIT(3)
+#define DWMCI_MPSCTRL_ECB_MODE BIT(2)
+#define DWMCI_MPSCTRL_ENCRYPTION BIT(1)
+#define DWMCI_MPSCTRL_VALID BIT(0)
+
+#define EXYNOS_CCLKIN_MIN 50000000 /* unit: HZ */
+
/* Variations in Exynos specific dw-mshc controller */
enum dw_mci_exynos_type {
DW_MCI_TYPE_EXYNOS4210,
DW_MCI_TYPE_EXYNOS4412,
DW_MCI_TYPE_EXYNOS5250,
DW_MCI_TYPE_EXYNOS5420,
+ DW_MCI_TYPE_EXYNOS5420_SMU,
};
/* Exynos implementation specific driver private data */
@@ -48,6 +73,7 @@ struct dw_mci_exynos_priv_data {
u8 ciu_div;
u32 sdr_timing;
u32 ddr_timing;
+ u32 cur_speed;
};
static struct dw_mci_exynos_compatible {
@@ -66,44 +92,80 @@ static struct dw_mci_exynos_compatible {
}, {
.compatible = "samsung,exynos5420-dw-mshc",
.ctrl_type = DW_MCI_TYPE_EXYNOS5420,
+ }, {
+ .compatible = "samsung,exynos5420-dw-mshc-smu",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS5420_SMU,
},
};
static int dw_mci_exynos_priv_init(struct dw_mci *host)
{
- struct dw_mci_exynos_priv_data *priv;
- int idx;
-
- priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(host->dev, "mem alloc failed for private data\n");
- return -ENOMEM;
- }
+ struct dw_mci_exynos_priv_data *priv = host->priv;
- for (idx = 0; idx < ARRAY_SIZE(exynos_compat); idx++) {
- if (of_device_is_compatible(host->dev->of_node,
- exynos_compat[idx].compatible))
- priv->ctrl_type = exynos_compat[idx].ctrl_type;
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU) {
+ mci_writel(host, MPSBEGIN0, 0);
+ mci_writel(host, MPSEND0, DWMCI_BLOCK_NUM);
+ mci_writel(host, MPSCTRL0, DWMCI_MPSCTRL_SECURE_WRITE_BIT |
+ DWMCI_MPSCTRL_NON_SECURE_READ_BIT |
+ DWMCI_MPSCTRL_VALID |
+ DWMCI_MPSCTRL_NON_SECURE_WRITE_BIT);
}
- host->priv = priv;
return 0;
}
static int dw_mci_exynos_setup_clock(struct dw_mci *host)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
+ unsigned long rate = clk_get_rate(host->ciu_clk);
- if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5250 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420)
- host->bus_hz /= (priv->ciu_div + 1);
- else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412)
- host->bus_hz /= EXYNOS4412_FIXED_CIU_CLK_DIV;
- else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210)
- host->bus_hz /= EXYNOS4210_FIXED_CIU_CLK_DIV;
+ host->bus_hz = rate / (priv->ciu_div + 1);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dw_mci_exynos_suspend(struct device *dev)
+{
+ struct dw_mci *host = dev_get_drvdata(dev);
+
+ return dw_mci_suspend(host);
+}
+
+static int dw_mci_exynos_resume(struct device *dev)
+{
+ struct dw_mci *host = dev_get_drvdata(dev);
+
+ dw_mci_exynos_priv_init(host);
+ return dw_mci_resume(host);
+}
+
+/**
+ * dw_mci_exynos_resume_noirq - Exynos-specific resume code
+ *
+ * On exynos5420 there is a silicon errata that will sometimes leave the
+ * WAKEUP_INT bit in the CLKSEL register asserted. This bit is 1 to indicate
+ * that it fired and we can clear it by writing a 1 back. Clear it to prevent
+ * interrupts from going off constantly.
+ *
+ * We run this code on all exynos variants because it doesn't hurt.
+ */
+
+static int dw_mci_exynos_resume_noirq(struct device *dev)
+{
+ struct dw_mci *host = dev_get_drvdata(dev);
+ u32 clksel;
+
+ clksel = mci_readl(host, CLKSEL);
+ if (clksel & SDMMC_CLKSEL_WAKEUP_INT)
+ mci_writel(host, CLKSEL, clksel);
return 0;
}
+#else
+#define dw_mci_exynos_suspend NULL
+#define dw_mci_exynos_resume NULL
+#define dw_mci_exynos_resume_noirq NULL
+#endif /* CONFIG_PM_SLEEP */
static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr)
{
@@ -121,40 +183,206 @@ static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr)
static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
+ unsigned int wanted = ios->clock;
+ unsigned long actual;
+ u8 div = priv->ciu_div + 1;
- if (ios->timing == MMC_TIMING_UHS_DDR50)
+ if (ios->timing == MMC_TIMING_UHS_DDR50) {
mci_writel(host, CLKSEL, priv->ddr_timing);
- else
+ /* Should be double rate for DDR mode */
+ if (ios->bus_width == MMC_BUS_WIDTH_8)
+ wanted <<= 1;
+ } else {
mci_writel(host, CLKSEL, priv->sdr_timing);
+ }
+
+ /* Don't care if wanted clock is zero */
+ if (!wanted)
+ return;
+
+ /* Guaranteed minimum frequency for cclkin */
+ if (wanted < EXYNOS_CCLKIN_MIN)
+ wanted = EXYNOS_CCLKIN_MIN;
+
+ if (wanted != priv->cur_speed) {
+ int ret = clk_set_rate(host->ciu_clk, wanted * div);
+ if (ret)
+ dev_warn(host->dev,
+ "failed to set clk-rate %u error: %d\n",
+ wanted * div, ret);
+ actual = clk_get_rate(host->ciu_clk);
+ host->bus_hz = actual / div;
+ priv->cur_speed = wanted;
+ host->current_speed = 0;
+ }
}
static int dw_mci_exynos_parse_dt(struct dw_mci *host)
{
- struct dw_mci_exynos_priv_data *priv = host->priv;
+ struct dw_mci_exynos_priv_data *priv;
struct device_node *np = host->dev->of_node;
u32 timing[2];
u32 div = 0;
+ int idx;
int ret;
- of_property_read_u32(np, "samsung,dw-mshc-ciu-div", &div);
- priv->ciu_div = div;
+ priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(host->dev, "mem alloc failed for private data\n");
+ return -ENOMEM;
+ }
+
+ for (idx = 0; idx < ARRAY_SIZE(exynos_compat); idx++) {
+ if (of_device_is_compatible(np, exynos_compat[idx].compatible))
+ priv->ctrl_type = exynos_compat[idx].ctrl_type;
+ }
+
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412)
+ priv->ciu_div = EXYNOS4412_FIXED_CIU_CLK_DIV - 1;
+ else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210)
+ priv->ciu_div = EXYNOS4210_FIXED_CIU_CLK_DIV - 1;
+ else {
+ of_property_read_u32(np, "samsung,dw-mshc-ciu-div", &div);
+ priv->ciu_div = div;
+ }
ret = of_property_read_u32_array(np,
"samsung,dw-mshc-sdr-timing", timing, 2);
if (ret)
return ret;
- priv->sdr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div);
-
ret = of_property_read_u32_array(np,
"samsung,dw-mshc-ddr-timing", timing, 2);
if (ret)
return ret;
+ priv->sdr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div);
priv->ddr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div);
+ host->priv = priv;
return 0;
}
+static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host)
+{
+ return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL));
+}
+
+static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample)
+{
+ u32 clksel;
+ clksel = mci_readl(host, CLKSEL);
+ clksel = (clksel & ~0x7) | SDMMC_CLKSEL_CCLK_SAMPLE(sample);
+ mci_writel(host, CLKSEL, clksel);
+}
+
+static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
+{
+ u32 clksel;
+ u8 sample;
+
+ clksel = mci_readl(host, CLKSEL);
+ sample = (clksel + 1) & 0x7;
+ clksel = (clksel & ~0x7) | sample;
+ mci_writel(host, CLKSEL, clksel);
+ return sample;
+}
+
+static s8 dw_mci_exynos_get_best_clksmpl(u8 candiates)
+{
+ const u8 iter = 8;
+ u8 __c;
+ s8 i, loc = -1;
+
+ for (i = 0; i < iter; i++) {
+ __c = ror8(candiates, i);
+ if ((__c & 0xc7) == 0xc7) {
+ loc = i;
+ goto out;
+ }
+ }
+
+ for (i = 0; i < iter; i++) {
+ __c = ror8(candiates, i);
+ if ((__c & 0x83) == 0x83) {
+ loc = i;
+ goto out;
+ }
+ }
+
+out:
+ return loc;
+}
+
+static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode,
+ struct dw_mci_tuning_data *tuning_data)
+{
+ struct dw_mci *host = slot->host;
+ struct mmc_host *mmc = slot->mmc;
+ const u8 *blk_pattern = tuning_data->blk_pattern;
+ u8 *blk_test;
+ unsigned int blksz = tuning_data->blksz;
+ u8 start_smpl, smpl, candiates = 0;
+ s8 found = -1;
+ int ret = 0;
+
+ blk_test = kmalloc(blksz, GFP_KERNEL);
+ if (!blk_test)
+ return -ENOMEM;
+
+ start_smpl = dw_mci_exynos_get_clksmpl(host);
+
+ do {
+ struct mmc_request mrq = {NULL};
+ struct mmc_command cmd = {0};
+ struct mmc_command stop = {0};
+ struct mmc_data data = {0};
+ struct scatterlist sg;
+
+ cmd.opcode = opcode;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ stop.opcode = MMC_STOP_TRANSMISSION;
+ stop.arg = 0;
+ stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
+
+ data.blksz = blksz;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ sg_init_one(&sg, blk_test, blksz);
+ mrq.cmd = &cmd;
+ mrq.stop = &stop;
+ mrq.data = &data;
+ host->mrq = &mrq;
+
+ mci_writel(host, TMOUT, ~0);
+ smpl = dw_mci_exynos_move_next_clksmpl(host);
+
+ mmc_wait_for_req(mmc, &mrq);
+
+ if (!cmd.error && !data.error) {
+ if (!memcmp(blk_pattern, blk_test, blksz))
+ candiates |= (1 << smpl);
+ } else {
+ dev_dbg(host->dev,
+ "Tuning error: cmd.error:%d, data.error:%d\n",
+ cmd.error, data.error);
+ }
+ } while (start_smpl != smpl);
+
+ found = dw_mci_exynos_get_best_clksmpl(candiates);
+ if (found >= 0)
+ dw_mci_exynos_set_clksmpl(host, found);
+ else
+ ret = -EIO;
+
+ kfree(blk_test);
+ return ret;
+}
+
/* Common capabilities of Exynos4/Exynos5 SoC */
static unsigned long exynos_dwmmc_caps[4] = {
MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR |
@@ -171,6 +399,7 @@ static const struct dw_mci_drv_data exynos_drv_data = {
.prepare_command = dw_mci_exynos_prepare_command,
.set_ios = dw_mci_exynos_set_ios,
.parse_dt = dw_mci_exynos_parse_dt,
+ .execute_tuning = dw_mci_exynos_execute_tuning,
};
static const struct of_device_id dw_mci_exynos_match[] = {
@@ -180,6 +409,8 @@ static const struct of_device_id dw_mci_exynos_match[] = {
.data = &exynos_drv_data, },
{ .compatible = "samsung,exynos5420-dw-mshc",
.data = &exynos_drv_data, },
+ { .compatible = "samsung,exynos5420-dw-mshc-smu",
+ .data = &exynos_drv_data, },
{},
};
MODULE_DEVICE_TABLE(of, dw_mci_exynos_match);
@@ -194,13 +425,20 @@ static int dw_mci_exynos_probe(struct platform_device *pdev)
return dw_mci_pltfm_register(pdev, drv_data);
}
+const struct dev_pm_ops dw_mci_exynos_pmops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dw_mci_exynos_suspend, dw_mci_exynos_resume)
+ .resume_noirq = dw_mci_exynos_resume_noirq,
+ .thaw_noirq = dw_mci_exynos_resume_noirq,
+ .restore_noirq = dw_mci_exynos_resume_noirq,
+};
+
static struct platform_driver dw_mci_exynos_pltfm_driver = {
.probe = dw_mci_exynos_probe,
.remove = __exit_p(dw_mci_pltfm_remove),
.driver = {
.name = "dwmmc_exynos",
.of_match_table = dw_mci_exynos_match,
- .pm = &dw_mci_pltfm_pmops,
+ .pm = &dw_mci_exynos_pmops,
},
};
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index 20897529ea5e..5c4965655297 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -39,7 +39,6 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
{
struct dw_mci *host;
struct resource *regs;
- int ret;
host = devm_kzalloc(&pdev->dev, sizeof(struct dw_mci), GFP_KERNEL);
if (!host)
@@ -59,12 +58,6 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
if (IS_ERR(host->regs))
return PTR_ERR(host->regs);
- if (drv_data && drv_data->init) {
- ret = drv_data->init(host);
- if (ret)
- return ret;
- }
-
platform_set_drvdata(pdev, host);
return dw_mci_probe(host);
}
diff --git a/drivers/mmc/host/dw_mmc-socfpga.c b/drivers/mmc/host/dw_mmc-socfpga.c
index 14b5961a851c..3e8e53ae3302 100644
--- a/drivers/mmc/host/dw_mmc-socfpga.c
+++ b/drivers/mmc/host/dw_mmc-socfpga.c
@@ -38,21 +38,6 @@ struct dw_mci_socfpga_priv_data {
static int dw_mci_socfpga_priv_init(struct dw_mci *host)
{
- struct dw_mci_socfpga_priv_data *priv;
-
- priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(host->dev, "mem alloc failed for private data\n");
- return -ENOMEM;
- }
-
- priv->sysreg = syscon_regmap_lookup_by_compatible("altr,sys-mgr");
- if (IS_ERR(priv->sysreg)) {
- dev_err(host->dev, "regmap for altr,sys-mgr lookup failed.\n");
- return PTR_ERR(priv->sysreg);
- }
- host->priv = priv;
-
return 0;
}
@@ -79,12 +64,24 @@ static void dw_mci_socfpga_prepare_command(struct dw_mci *host, u32 *cmdr)
static int dw_mci_socfpga_parse_dt(struct dw_mci *host)
{
- struct dw_mci_socfpga_priv_data *priv = host->priv;
+ struct dw_mci_socfpga_priv_data *priv;
struct device_node *np = host->dev->of_node;
u32 timing[2];
u32 div = 0;
int ret;
+ priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(host->dev, "mem alloc failed for private data\n");
+ return -ENOMEM;
+ }
+
+ priv->sysreg = syscon_regmap_lookup_by_compatible("altr,sys-mgr");
+ if (IS_ERR(priv->sysreg)) {
+ dev_err(host->dev, "regmap for altr,sys-mgr lookup failed.\n");
+ return PTR_ERR(priv->sysreg);
+ }
+
ret = of_property_read_u32(np, "altr,dw-mshc-ciu-div", &div);
if (ret)
dev_info(host->dev, "No dw-mshc-ciu-div specified, assuming 1");
@@ -96,6 +93,7 @@ static int dw_mci_socfpga_parse_dt(struct dw_mci *host)
return ret;
priv->hs_timing = SYSMGR_SDMMC_CTRL_SET(timing[0], timing[1]);
+ host->priv = priv;
return 0;
}
@@ -113,7 +111,7 @@ static const struct of_device_id dw_mci_socfpga_match[] = {
};
MODULE_DEVICE_TABLE(of, dw_mci_socfpga_match);
-int dw_mci_socfpga_probe(struct platform_device *pdev)
+static int dw_mci_socfpga_probe(struct platform_device *pdev)
{
const struct dw_mci_drv_data *drv_data;
const struct of_device_id *match;
@@ -128,7 +126,7 @@ static struct platform_driver dw_mci_socfpga_pltfm_driver = {
.remove = __exit_p(dw_mci_pltfm_remove),
.driver = {
.name = "dwmmc_socfpga",
- .of_match_table = of_match_ptr(dw_mci_socfpga_match),
+ .of_match_table = dw_mci_socfpga_match,
.pm = &dw_mci_pltfm_pmops,
},
};
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 018f365e5ae4..4bce0deec362 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -29,6 +29,7 @@
#include <linux/irq.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
#include <linux/mmc/dw_mmc.h>
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
@@ -50,6 +51,9 @@
#define DW_MCI_RECV_STATUS 2
#define DW_MCI_DMA_THRESHOLD 16
+#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
+#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
+
#ifdef CONFIG_MMC_DW_IDMAC
#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
@@ -76,42 +80,39 @@ struct idmac_desc {
};
#endif /* CONFIG_MMC_DW_IDMAC */
-/**
- * struct dw_mci_slot - MMC slot state
- * @mmc: The mmc_host representing this slot.
- * @host: The MMC controller this slot is using.
- * @quirks: Slot-level quirks (DW_MCI_SLOT_QUIRK_XXX)
- * @wp_gpio: If gpio_is_valid() we'll use this to read write protect.
- * @ctype: Card type for this slot.
- * @mrq: mmc_request currently being processed or waiting to be
- * processed, or NULL when the slot is idle.
- * @queue_node: List node for placing this node in the @queue list of
- * &struct dw_mci.
- * @clock: Clock rate configured by set_ios(). Protected by host->lock.
- * @flags: Random state bits associated with the slot.
- * @id: Number of this slot.
- * @last_detect_state: Most recently observed card detect state.
- */
-struct dw_mci_slot {
- struct mmc_host *mmc;
- struct dw_mci *host;
-
- int quirks;
- int wp_gpio;
-
- u32 ctype;
-
- struct mmc_request *mrq;
- struct list_head queue_node;
+static const u8 tuning_blk_pattern_4bit[] = {
+ 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
+ 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
+ 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
+ 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
+ 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
+ 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
+ 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
+ 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
+};
- unsigned int clock;
- unsigned long flags;
-#define DW_MMC_CARD_PRESENT 0
-#define DW_MMC_CARD_NEED_INIT 1
- int id;
- int last_detect_state;
+static const u8 tuning_blk_pattern_8bit[] = {
+ 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
+ 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
+ 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
+ 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
+ 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
+ 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
+ 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
+ 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
+ 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
+ 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
+ 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
+ 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
+ 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
+ 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
};
+static inline bool dw_mci_fifo_reset(struct dw_mci *host);
+static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
+
#if defined(CONFIG_DEBUG_FS)
static int dw_mci_req_show(struct seq_file *s, void *v)
{
@@ -249,10 +250,15 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
cmdr = cmd->opcode;
- if (cmdr == MMC_STOP_TRANSMISSION)
+ if (cmd->opcode == MMC_STOP_TRANSMISSION ||
+ cmd->opcode == MMC_GO_IDLE_STATE ||
+ cmd->opcode == MMC_GO_INACTIVE_STATE ||
+ (cmd->opcode == SD_IO_RW_DIRECT &&
+ ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
cmdr |= SDMMC_CMD_STOP;
else
- cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
+ if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
+ cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
if (cmd->flags & MMC_RSP_PRESENT) {
/* We expect a response, so set this bit */
@@ -279,6 +285,40 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
return cmdr;
}
+static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
+{
+ struct mmc_command *stop;
+ u32 cmdr;
+
+ if (!cmd->data)
+ return 0;
+
+ stop = &host->stop_abort;
+ cmdr = cmd->opcode;
+ memset(stop, 0, sizeof(struct mmc_command));
+
+ if (cmdr == MMC_READ_SINGLE_BLOCK ||
+ cmdr == MMC_READ_MULTIPLE_BLOCK ||
+ cmdr == MMC_WRITE_BLOCK ||
+ cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
+ stop->opcode = MMC_STOP_TRANSMISSION;
+ stop->arg = 0;
+ stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
+ } else if (cmdr == SD_IO_RW_EXTENDED) {
+ stop->opcode = SD_IO_RW_DIRECT;
+ stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
+ ((cmd->arg >> 28) & 0x7);
+ stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
+ } else {
+ return 0;
+ }
+
+ cmdr = stop->opcode | SDMMC_CMD_STOP |
+ SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
+
+ return cmdr;
+}
+
static void dw_mci_start_command(struct dw_mci *host,
struct mmc_command *cmd, u32 cmd_flags)
{
@@ -293,9 +333,10 @@ static void dw_mci_start_command(struct dw_mci *host,
mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
}
-static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
+static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
{
- dw_mci_start_command(host, data->stop, host->stop_cmdr);
+ struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
+ dw_mci_start_command(host, stop, host->stop_cmdr);
}
/* DMA interface functions */
@@ -304,10 +345,10 @@ static void dw_mci_stop_dma(struct dw_mci *host)
if (host->using_dma) {
host->dma_ops->stop(host);
host->dma_ops->cleanup(host);
- } else {
- /* Data transfer was stopped by the interrupt handler */
- set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
+
+ /* Data transfer was stopped by the interrupt handler */
+ set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
static int dw_mci_get_dma_dir(struct mmc_data *data)
@@ -331,6 +372,14 @@ static void dw_mci_dma_cleanup(struct dw_mci *host)
dw_mci_get_dma_dir(data));
}
+static void dw_mci_idmac_reset(struct dw_mci *host)
+{
+ u32 bmod = mci_readl(host, BMOD);
+ /* Software reset of DMA */
+ bmod |= SDMMC_IDMAC_SWRESET;
+ mci_writel(host, BMOD, bmod);
+}
+
static void dw_mci_idmac_stop_dma(struct dw_mci *host)
{
u32 temp;
@@ -344,6 +393,7 @@ static void dw_mci_idmac_stop_dma(struct dw_mci *host)
/* Stop the IDMAC running */
temp = mci_readl(host, BMOD);
temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
+ temp |= SDMMC_IDMAC_SWRESET;
mci_writel(host, BMOD, temp);
}
@@ -435,7 +485,7 @@ static int dw_mci_idmac_init(struct dw_mci *host)
p->des3 = host->sg_dma;
p->des0 = IDMAC_DES0_ER;
- mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
+ dw_mci_idmac_reset(host);
/* Mask out interrupts - get Tx & Rx complete only */
mci_writel(host, IDSTS, IDMAC_INT_CLR);
@@ -532,6 +582,78 @@ static void dw_mci_post_req(struct mmc_host *mmc,
data->host_cookie = 0;
}
+static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
+{
+#ifdef CONFIG_MMC_DW_IDMAC
+ unsigned int blksz = data->blksz;
+ const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
+ u32 fifo_width = 1 << host->data_shift;
+ u32 blksz_depth = blksz / fifo_width, fifoth_val;
+ u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
+ int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
+
+ tx_wmark = (host->fifo_depth) / 2;
+ tx_wmark_invers = host->fifo_depth - tx_wmark;
+
+ /*
+ * MSIZE is '1',
+ * if blksz is not a multiple of the FIFO width
+ */
+ if (blksz % fifo_width) {
+ msize = 0;
+ rx_wmark = 1;
+ goto done;
+ }
+
+ do {
+ if (!((blksz_depth % mszs[idx]) ||
+ (tx_wmark_invers % mszs[idx]))) {
+ msize = idx;
+ rx_wmark = mszs[idx] - 1;
+ break;
+ }
+ } while (--idx > 0);
+ /*
+ * If idx is '0', it won't be tried
+ * Thus, initial values are uesed
+ */
+done:
+ fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
+ mci_writel(host, FIFOTH, fifoth_val);
+#endif
+}
+
+static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
+{
+ unsigned int blksz = data->blksz;
+ u32 blksz_depth, fifo_depth;
+ u16 thld_size;
+
+ WARN_ON(!(data->flags & MMC_DATA_READ));
+
+ if (host->timing != MMC_TIMING_MMC_HS200 &&
+ host->timing != MMC_TIMING_UHS_SDR104)
+ goto disable;
+
+ blksz_depth = blksz / (1 << host->data_shift);
+ fifo_depth = host->fifo_depth;
+
+ if (blksz_depth > fifo_depth)
+ goto disable;
+
+ /*
+ * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
+ * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
+ * Currently just choose blksz.
+ */
+ thld_size = blksz;
+ mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
+ return;
+
+disable:
+ mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
+}
+
static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
{
int sg_len;
@@ -556,6 +678,14 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
(unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
sg_len);
+ /*
+ * Decide the MSIZE and RX/TX Watermark.
+ * If current block size is same with previous size,
+ * no need to update fifoth.
+ */
+ if (host->prev_blksz != data->blksz)
+ dw_mci_adjust_fifoth(host, data);
+
/* Enable the DMA interface */
temp = mci_readl(host, CTRL);
temp |= SDMMC_CTRL_DMA_ENABLE;
@@ -581,10 +711,12 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
host->sg = NULL;
host->data = data;
- if (data->flags & MMC_DATA_READ)
+ if (data->flags & MMC_DATA_READ) {
host->dir_status = DW_MCI_RECV_STATUS;
- else
+ dw_mci_ctrl_rd_thld(host, data);
+ } else {
host->dir_status = DW_MCI_SEND_STATUS;
+ }
if (dw_mci_submit_data_dma(host, data)) {
int flags = SG_MITER_ATOMIC;
@@ -606,6 +738,21 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
temp = mci_readl(host, CTRL);
temp &= ~SDMMC_CTRL_DMA_ENABLE;
mci_writel(host, CTRL, temp);
+
+ /*
+ * Use the initial fifoth_val for PIO mode.
+ * If next issued data may be transfered by DMA mode,
+ * prev_blksz should be invalidated.
+ */
+ mci_writel(host, FIFOTH, host->fifoth_val);
+ host->prev_blksz = 0;
+ } else {
+ /*
+ * Keep the current block size.
+ * It will be used to decide whether to update
+ * fifoth register next time.
+ */
+ host->prev_blksz = data->blksz;
}
}
@@ -632,24 +779,31 @@ static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
{
struct dw_mci *host = slot->host;
+ unsigned int clock = slot->clock;
u32 div;
u32 clk_en_a;
- if (slot->clock != host->current_speed || force_clkinit) {
- div = host->bus_hz / slot->clock;
- if (host->bus_hz % slot->clock && host->bus_hz > slot->clock)
+ if (!clock) {
+ mci_writel(host, CLKENA, 0);
+ mci_send_cmd(slot,
+ SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+ } else if (clock != host->current_speed || force_clkinit) {
+ div = host->bus_hz / clock;
+ if (host->bus_hz % clock && host->bus_hz > clock)
/*
* move the + 1 after the divide to prevent
* over-clocking the card.
*/
div += 1;
- div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0;
+ div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
- dev_info(&slot->mmc->class_dev,
- "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
- " div = %d)\n", slot->id, host->bus_hz, slot->clock,
- div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
+ if ((clock << div) != slot->__clk_old || force_clkinit)
+ dev_info(&slot->mmc->class_dev,
+ "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
+ slot->id, host->bus_hz, clock,
+ div ? ((host->bus_hz / div) >> 1) :
+ host->bus_hz, div);
/* disable clock */
mci_writel(host, CLKENA, 0);
@@ -676,9 +830,12 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
mci_send_cmd(slot,
SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
- host->current_speed = slot->clock;
+ /* keep the clock with reflecting clock dividor */
+ slot->__clk_old = clock << div;
}
+ host->current_speed = clock;
+
/* Set the current slot bus width */
mci_writel(host, CTYPE, (slot->ctype << slot->id));
}
@@ -700,7 +857,9 @@ static void __dw_mci_start_request(struct dw_mci *host,
host->pending_events = 0;
host->completed_events = 0;
+ host->cmd_status = 0;
host->data_status = 0;
+ host->dir_status = 0;
data = cmd->data;
if (data) {
@@ -724,6 +883,8 @@ static void __dw_mci_start_request(struct dw_mci *host,
if (mrq->stop)
host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
+ else
+ host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
}
static void dw_mci_start_request(struct dw_mci *host,
@@ -806,14 +967,13 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
regs &= ~((0x1 << slot->id) << 16);
mci_writel(slot->host, UHS_REG, regs);
+ slot->host->timing = ios->timing;
- if (ios->clock) {
- /*
- * Use mirror of ios->clock to prevent race with mmc
- * core ios update when finding the minimum.
- */
- slot->clock = ios->clock;
- }
+ /*
+ * Use mirror of ios->clock to prevent race with mmc
+ * core ios update when finding the minimum.
+ */
+ slot->clock = ios->clock;
if (drv_data && drv_data->set_ios)
drv_data->set_ios(slot->host, ios);
@@ -939,6 +1099,38 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
}
}
+static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci *host = slot->host;
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
+ struct dw_mci_tuning_data tuning_data;
+ int err = -ENOSYS;
+
+ if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
+ if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
+ tuning_data.blk_pattern = tuning_blk_pattern_8bit;
+ tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
+ } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
+ tuning_data.blk_pattern = tuning_blk_pattern_4bit;
+ tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
+ } else {
+ return -EINVAL;
+ }
+ } else if (opcode == MMC_SEND_TUNING_BLOCK) {
+ tuning_data.blk_pattern = tuning_blk_pattern_4bit;
+ tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
+ } else {
+ dev_err(host->dev,
+ "Undefined command(%d) for tuning\n", opcode);
+ return -EINVAL;
+ }
+
+ if (drv_data && drv_data->execute_tuning)
+ err = drv_data->execute_tuning(slot, opcode, &tuning_data);
+ return err;
+}
+
static const struct mmc_host_ops dw_mci_ops = {
.request = dw_mci_request,
.pre_req = dw_mci_pre_req,
@@ -947,6 +1139,7 @@ static const struct mmc_host_ops dw_mci_ops = {
.get_ro = dw_mci_get_ro,
.get_cd = dw_mci_get_cd,
.enable_sdio_irq = dw_mci_enable_sdio_irq,
+ .execute_tuning = dw_mci_execute_tuning,
};
static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
@@ -978,7 +1171,7 @@ static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
spin_lock(&host->lock);
}
-static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
+static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
{
u32 status = host->cmd_status;
@@ -1012,12 +1205,52 @@ static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd
/* newer ip versions need a delay between retries */
if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
mdelay(20);
+ }
- if (cmd->data) {
- dw_mci_stop_dma(host);
- host->data = NULL;
+ return cmd->error;
+}
+
+static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
+{
+ u32 status = host->data_status;
+
+ if (status & DW_MCI_DATA_ERROR_FLAGS) {
+ if (status & SDMMC_INT_DRTO) {
+ data->error = -ETIMEDOUT;
+ } else if (status & SDMMC_INT_DCRC) {
+ data->error = -EILSEQ;
+ } else if (status & SDMMC_INT_EBE) {
+ if (host->dir_status ==
+ DW_MCI_SEND_STATUS) {
+ /*
+ * No data CRC status was returned.
+ * The number of bytes transferred
+ * will be exaggerated in PIO mode.
+ */
+ data->bytes_xfered = 0;
+ data->error = -ETIMEDOUT;
+ } else if (host->dir_status ==
+ DW_MCI_RECV_STATUS) {
+ data->error = -EIO;
+ }
+ } else {
+ /* SDMMC_INT_SBE is included */
+ data->error = -EIO;
}
+
+ dev_err(host->dev, "data error, status 0x%08x\n", status);
+
+ /*
+ * After an error, there may be data lingering
+ * in the FIFO
+ */
+ dw_mci_fifo_reset(host);
+ } else {
+ data->bytes_xfered = data->blocks * data->blksz;
+ data->error = 0;
}
+
+ return data->error;
}
static void dw_mci_tasklet_func(unsigned long priv)
@@ -1025,14 +1258,16 @@ static void dw_mci_tasklet_func(unsigned long priv)
struct dw_mci *host = (struct dw_mci *)priv;
struct mmc_data *data;
struct mmc_command *cmd;
+ struct mmc_request *mrq;
enum dw_mci_state state;
enum dw_mci_state prev_state;
- u32 status, ctrl;
+ unsigned int err;
spin_lock(&host->lock);
state = host->state;
data = host->data;
+ mrq = host->mrq;
do {
prev_state = state;
@@ -1049,16 +1284,23 @@ static void dw_mci_tasklet_func(unsigned long priv)
cmd = host->cmd;
host->cmd = NULL;
set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
- dw_mci_command_complete(host, cmd);
- if (cmd == host->mrq->sbc && !cmd->error) {
+ err = dw_mci_command_complete(host, cmd);
+ if (cmd == mrq->sbc && !err) {
prev_state = state = STATE_SENDING_CMD;
__dw_mci_start_request(host, host->cur_slot,
- host->mrq->cmd);
+ mrq->cmd);
goto unlock;
}
- if (!host->mrq->data || cmd->error) {
- dw_mci_request_end(host, host->mrq);
+ if (cmd->data && err) {
+ dw_mci_stop_dma(host);
+ send_stop_abort(host, data);
+ state = STATE_SENDING_STOP;
+ break;
+ }
+
+ if (!cmd->data || err) {
+ dw_mci_request_end(host, mrq);
goto unlock;
}
@@ -1069,8 +1311,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
dw_mci_stop_dma(host);
- if (data->stop)
- send_stop_cmd(host, data);
+ send_stop_abort(host, data);
state = STATE_DATA_ERROR;
break;
}
@@ -1090,60 +1331,27 @@ static void dw_mci_tasklet_func(unsigned long priv)
host->data = NULL;
set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
- status = host->data_status;
-
- if (status & DW_MCI_DATA_ERROR_FLAGS) {
- if (status & SDMMC_INT_DRTO) {
- data->error = -ETIMEDOUT;
- } else if (status & SDMMC_INT_DCRC) {
- data->error = -EILSEQ;
- } else if (status & SDMMC_INT_EBE &&
- host->dir_status ==
- DW_MCI_SEND_STATUS) {
- /*
- * No data CRC status was returned.
- * The number of bytes transferred will
- * be exaggerated in PIO mode.
- */
- data->bytes_xfered = 0;
- data->error = -ETIMEDOUT;
- } else {
- dev_err(host->dev,
- "data FIFO error "
- "(status=%08x)\n",
- status);
- data->error = -EIO;
- }
- /*
- * After an error, there may be data lingering
- * in the FIFO, so reset it - doing so
- * generates a block interrupt, hence setting
- * the scatter-gather pointer to NULL.
- */
- sg_miter_stop(&host->sg_miter);
- host->sg = NULL;
- ctrl = mci_readl(host, CTRL);
- ctrl |= SDMMC_CTRL_FIFO_RESET;
- mci_writel(host, CTRL, ctrl);
- } else {
- data->bytes_xfered = data->blocks * data->blksz;
- data->error = 0;
- }
+ err = dw_mci_data_complete(host, data);
- if (!data->stop) {
- dw_mci_request_end(host, host->mrq);
- goto unlock;
- }
+ if (!err) {
+ if (!data->stop || mrq->sbc) {
+ if (mrq->sbc)
+ data->stop->error = 0;
+ dw_mci_request_end(host, mrq);
+ goto unlock;
+ }
- if (host->mrq->sbc && !data->error) {
- data->stop->error = 0;
- dw_mci_request_end(host, host->mrq);
- goto unlock;
+ /* stop command for open-ended transfer*/
+ if (data->stop)
+ send_stop_abort(host, data);
}
+ /*
+ * If err has non-zero,
+ * stop-abort command has been already issued.
+ */
prev_state = state = STATE_SENDING_STOP;
- if (!data->error)
- send_stop_cmd(host, data);
+
/* fall through */
case STATE_SENDING_STOP:
@@ -1151,9 +1359,19 @@ static void dw_mci_tasklet_func(unsigned long priv)
&host->pending_events))
break;
+ /* CMD error in data command */
+ if (mrq->cmd->error && mrq->data)
+ dw_mci_fifo_reset(host);
+
host->cmd = NULL;
- dw_mci_command_complete(host, host->mrq->stop);
- dw_mci_request_end(host, host->mrq);
+ host->data = NULL;
+
+ if (mrq->stop)
+ dw_mci_command_complete(host, mrq->stop);
+ else
+ host->cmd_status = 0;
+
+ dw_mci_request_end(host, mrq);
goto unlock;
case STATE_DATA_ERROR:
@@ -1697,7 +1915,6 @@ static void dw_mci_work_routine_card(struct work_struct *work)
struct mmc_host *mmc = slot->mmc;
struct mmc_request *mrq;
int present;
- u32 ctrl;
present = dw_mci_get_cd(mmc);
while (present != slot->last_detect_state) {
@@ -1736,11 +1953,10 @@ static void dw_mci_work_routine_card(struct work_struct *work)
case STATE_DATA_ERROR:
if (mrq->data->error == -EINPROGRESS)
mrq->data->error = -ENOMEDIUM;
- if (!mrq->stop)
- break;
/* fall through */
case STATE_SENDING_STOP:
- mrq->stop->error = -ENOMEDIUM;
+ if (mrq->stop)
+ mrq->stop->error = -ENOMEDIUM;
break;
}
@@ -1763,23 +1979,10 @@ static void dw_mci_work_routine_card(struct work_struct *work)
if (present == 0) {
clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
- /*
- * Clear down the FIFO - doing so generates a
- * block interrupt, hence setting the
- * scatter-gather pointer to NULL.
- */
- sg_miter_stop(&host->sg_miter);
- host->sg = NULL;
-
- ctrl = mci_readl(host, CTRL);
- ctrl |= SDMMC_CTRL_FIFO_RESET;
- mci_writel(host, CTRL, ctrl);
-
+ /* Clear down the FIFO */
+ dw_mci_fifo_reset(host);
#ifdef CONFIG_MMC_DW_IDMAC
- ctrl = mci_readl(host, BMOD);
- /* Software reset of DMA */
- ctrl |= SDMMC_IDMAC_SWRESET;
- mci_writel(host, BMOD, ctrl);
+ dw_mci_idmac_reset(host);
#endif
}
@@ -1901,6 +2104,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
struct dw_mci_slot *slot;
const struct dw_mci_drv_data *drv_data = host->drv_data;
int ctrl_id, ret;
+ u32 freq[2];
u8 bus_width;
mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
@@ -1916,8 +2120,14 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
mmc->ops = &dw_mci_ops;
- mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
- mmc->f_max = host->bus_hz;
+ if (of_property_read_u32_array(host->dev->of_node,
+ "clock-freq-min-max", freq, 2)) {
+ mmc->f_min = DW_MCI_FREQ_MIN;
+ mmc->f_max = DW_MCI_FREQ_MAX;
+ } else {
+ mmc->f_min = freq[0];
+ mmc->f_max = freq[1];
+ }
if (host->pdata->get_ocr)
mmc->ocr_avail = host->pdata->get_ocr(id);
@@ -1964,9 +2174,6 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
mmc->caps |= MMC_CAP_4_BIT_DATA;
}
- if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
- mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
-
if (host->pdata->blk_settings) {
mmc->max_segs = host->pdata->blk_settings->max_segs;
mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
@@ -2008,12 +2215,6 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
/* Card initially undetected */
slot->last_detect_state = 0;
- /*
- * Card may have been plugged in prior to boot so we
- * need to run the detect tasklet
- */
- queue_work(host->card_workqueue, &host->card_work);
-
return 0;
err_setup_bus:
@@ -2074,36 +2275,57 @@ no_dma:
return;
}
-static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
+static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
{
unsigned long timeout = jiffies + msecs_to_jiffies(500);
- unsigned int ctrl;
+ u32 ctrl;
- mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
- SDMMC_CTRL_DMA_RESET));
+ ctrl = mci_readl(host, CTRL);
+ ctrl |= reset;
+ mci_writel(host, CTRL, ctrl);
/* wait till resets clear */
do {
ctrl = mci_readl(host, CTRL);
- if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
- SDMMC_CTRL_DMA_RESET)))
+ if (!(ctrl & reset))
return true;
} while (time_before(jiffies, timeout));
- dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
+ dev_err(host->dev,
+ "Timeout resetting block (ctrl reset %#x)\n",
+ ctrl & reset);
return false;
}
+static inline bool dw_mci_fifo_reset(struct dw_mci *host)
+{
+ /*
+ * Reseting generates a block interrupt, hence setting
+ * the scatter-gather pointer to NULL.
+ */
+ if (host->sg) {
+ sg_miter_stop(&host->sg_miter);
+ host->sg = NULL;
+ }
+
+ return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
+}
+
+static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
+{
+ return dw_mci_ctrl_reset(host,
+ SDMMC_CTRL_FIFO_RESET |
+ SDMMC_CTRL_RESET |
+ SDMMC_CTRL_DMA_RESET);
+}
+
#ifdef CONFIG_OF
static struct dw_mci_of_quirks {
char *quirk;
int id;
} of_quirks[] = {
{
- .quirk = "supports-highspeed",
- .id = DW_MCI_QUIRK_HIGHSPEED,
- }, {
.quirk = "broken-cd",
.id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
},
@@ -2158,6 +2380,15 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
if (of_find_property(np, "enable-sdio-wakeup", NULL))
pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
+ if (of_find_property(np, "supports-highspeed", NULL))
+ pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
+
+ if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
+ pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+
+ if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
+ pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
+
return pdata;
}
@@ -2221,6 +2452,15 @@ int dw_mci_probe(struct dw_mci *host)
host->bus_hz = clk_get_rate(host->ciu_clk);
}
+ if (drv_data && drv_data->init) {
+ ret = drv_data->init(host);
+ if (ret) {
+ dev_err(host->dev,
+ "implementation specific init failed\n");
+ goto err_clk_ciu;
+ }
+ }
+
if (drv_data && drv_data->setup_clock) {
ret = drv_data->setup_clock(host);
if (ret) {
@@ -2287,7 +2527,7 @@ int dw_mci_probe(struct dw_mci *host)
}
/* Reset all blocks */
- if (!mci_wait_reset(host->dev, host))
+ if (!dw_mci_ctrl_all_reset(host))
return -ENODEV;
host->dma_ops = host->pdata->dma_ops;
@@ -2317,8 +2557,8 @@ int dw_mci_probe(struct dw_mci *host)
fifo_size = host->pdata->fifo_depth;
}
host->fifo_depth = fifo_size;
- host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
- ((fifo_size/2) << 0));
+ host->fifoth_val =
+ SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
mci_writel(host, FIFOTH, host->fifoth_val);
/* disable clock to CIU */
@@ -2456,23 +2696,6 @@ EXPORT_SYMBOL(dw_mci_remove);
*/
int dw_mci_suspend(struct dw_mci *host)
{
- int i, ret = 0;
-
- for (i = 0; i < host->num_slots; i++) {
- struct dw_mci_slot *slot = host->slot[i];
- if (!slot)
- continue;
- ret = mmc_suspend_host(slot->mmc);
- if (ret < 0) {
- while (--i >= 0) {
- slot = host->slot[i];
- if (slot)
- mmc_resume_host(host->slot[i]->mmc);
- }
- return ret;
- }
- }
-
if (host->vmmc)
regulator_disable(host->vmmc);
@@ -2493,7 +2716,7 @@ int dw_mci_resume(struct dw_mci *host)
}
}
- if (!mci_wait_reset(host->dev, host)) {
+ if (!dw_mci_ctrl_all_reset(host)) {
ret = -ENODEV;
return ret;
}
@@ -2501,8 +2724,15 @@ int dw_mci_resume(struct dw_mci *host)
if (host->use_dma && host->dma_ops->init)
host->dma_ops->init(host);
- /* Restore the old value at FIFOTH register */
+ /*
+ * Restore the initial value at FIFOTH register
+ * And Invalidate the prev_blksz with zero
+ */
mci_writel(host, FIFOTH, host->fifoth_val);
+ host->prev_blksz = 0;
+
+ /* Put in max timeout */
+ mci_writel(host, TMOUT, 0xFFFFFFFF);
mci_writel(host, RINTSTS, 0xFFFFFFFF);
mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
@@ -2518,10 +2748,6 @@ int dw_mci_resume(struct dw_mci *host)
dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
dw_mci_setup_bus(slot, true);
}
-
- ret = mmc_resume_host(host->slot[i]->mmc);
- if (ret < 0)
- return ret;
}
return 0;
}
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 81b29941c5b9..6bf24ab917e6 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -53,6 +53,7 @@
#define SDMMC_IDINTEN 0x090
#define SDMMC_DSCADDR 0x094
#define SDMMC_BUFADDR 0x098
+#define SDMMC_CDTHRCTL 0x100
#define SDMMC_DATA(x) (x)
/*
@@ -128,6 +129,10 @@
#define SDMMC_CMD_INDX(n) ((n) & 0x1F)
/* Status register defines */
#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FFF)
+/* FIFOTH register defines */
+#define SDMMC_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \
+ ((r) & 0xFFF) << 16 | \
+ ((t) & 0xFFF))
/* Internal DMAC interrupt defines */
#define SDMMC_IDMAC_INT_AI BIT(9)
#define SDMMC_IDMAC_INT_NI BIT(8)
@@ -142,6 +147,8 @@
#define SDMMC_IDMAC_SWRESET BIT(0)
/* Version ID register define */
#define SDMMC_GET_VERID(x) ((x) & 0xFFFF)
+/* Card read threshold */
+#define SDMMC_SET_RD_THLD(v, x) (((v) & 0x1FFF) << 16 | (x))
/* Register access macros */
#define mci_readl(dev, reg) \
@@ -184,6 +191,52 @@ extern int dw_mci_resume(struct dw_mci *host);
#endif
/**
+ * struct dw_mci_slot - MMC slot state
+ * @mmc: The mmc_host representing this slot.
+ * @host: The MMC controller this slot is using.
+ * @quirks: Slot-level quirks (DW_MCI_SLOT_QUIRK_XXX)
+ * @wp_gpio: If gpio_is_valid() we'll use this to read write protect.
+ * @ctype: Card type for this slot.
+ * @mrq: mmc_request currently being processed or waiting to be
+ * processed, or NULL when the slot is idle.
+ * @queue_node: List node for placing this node in the @queue list of
+ * &struct dw_mci.
+ * @clock: Clock rate configured by set_ios(). Protected by host->lock.
+ * @__clk_old: The last updated clock with reflecting clock divider.
+ * Keeping track of this helps us to avoid spamming the console
+ * with CONFIG_MMC_CLKGATE.
+ * @flags: Random state bits associated with the slot.
+ * @id: Number of this slot.
+ * @last_detect_state: Most recently observed card detect state.
+ */
+struct dw_mci_slot {
+ struct mmc_host *mmc;
+ struct dw_mci *host;
+
+ int quirks;
+ int wp_gpio;
+
+ u32 ctype;
+
+ struct mmc_request *mrq;
+ struct list_head queue_node;
+
+ unsigned int clock;
+ unsigned int __clk_old;
+
+ unsigned long flags;
+#define DW_MMC_CARD_PRESENT 0
+#define DW_MMC_CARD_NEED_INIT 1
+ int id;
+ int last_detect_state;
+};
+
+struct dw_mci_tuning_data {
+ const u8 *blk_pattern;
+ unsigned int blksz;
+};
+
+/**
* dw_mci driver data - dw-mshc implementation specific driver data.
* @caps: mmc subsystem specified capabilities of the controller(s).
* @init: early implementation specific initialization.
@@ -203,5 +256,7 @@ struct dw_mci_drv_data {
void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
int (*parse_dt)(struct dw_mci *host);
+ int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
+ struct dw_mci_tuning_data *tuning_data);
};
#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 66516339e3a0..de2139cf3444 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -880,8 +880,6 @@ static int jz4740_mmc_suspend(struct device *dev)
{
struct jz4740_mmc_host *host = dev_get_drvdata(dev);
- mmc_suspend_host(host->mmc);
-
jz_gpio_bulk_suspend(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
return 0;
@@ -893,8 +891,6 @@ static int jz4740_mmc_resume(struct device *dev)
jz_gpio_bulk_resume(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
- mmc_resume_host(host->mmc);
-
return 0;
}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index c3785edc0e92..f32057972dd7 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -62,6 +62,7 @@ static unsigned int fmax = 515633;
* @signal_direction: input/out direction of bus signals can be indicated
* @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
* @busy_detect: true if busy detection on dat0 is supported
+ * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
*/
struct variant_data {
unsigned int clkreg;
@@ -76,6 +77,7 @@ struct variant_data {
bool signal_direction;
bool pwrreg_clkgate;
bool busy_detect;
+ bool pwrreg_nopower;
};
static struct variant_data variant_arm = {
@@ -109,6 +111,7 @@ static struct variant_data variant_u300 = {
.pwrreg_powerup = MCI_PWR_ON,
.signal_direction = true,
.pwrreg_clkgate = true,
+ .pwrreg_nopower = true,
};
static struct variant_data variant_nomadik = {
@@ -121,6 +124,7 @@ static struct variant_data variant_nomadik = {
.pwrreg_powerup = MCI_PWR_ON,
.signal_direction = true,
.pwrreg_clkgate = true,
+ .pwrreg_nopower = true,
};
static struct variant_data variant_ux500 = {
@@ -135,6 +139,7 @@ static struct variant_data variant_ux500 = {
.signal_direction = true,
.pwrreg_clkgate = true,
.busy_detect = true,
+ .pwrreg_nopower = true,
};
static struct variant_data variant_ux500v2 = {
@@ -150,6 +155,7 @@ static struct variant_data variant_ux500v2 = {
.signal_direction = true,
.pwrreg_clkgate = true,
.busy_detect = true,
+ .pwrreg_nopower = true,
};
static int mmci_card_busy(struct mmc_host *mmc)
@@ -189,6 +195,21 @@ static int mmci_validate_data(struct mmci_host *host,
return 0;
}
+static void mmci_reg_delay(struct mmci_host *host)
+{
+ /*
+ * According to the spec, at least three feedback clock cycles
+ * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
+ * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
+ * Worst delay time during card init is at 100 kHz => 30 us.
+ * Worst delay time when up and running is at 25 MHz => 120 ns.
+ */
+ if (host->cclk < 25000000)
+ udelay(30);
+ else
+ ndelay(120);
+}
+
/*
* This must be called with host->lock held
*/
@@ -1264,6 +1285,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
mmci_set_clkreg(host, ios->clock);
mmci_write_pwrreg(host, pwr);
+ mmci_reg_delay(host);
spin_unlock_irqrestore(&host->lock, flags);
@@ -1510,23 +1532,6 @@ static int mmci_probe(struct amba_device *dev,
mmc->f_max = min(host->mclk, fmax);
dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
- host->pinctrl = devm_pinctrl_get(&dev->dev);
- if (IS_ERR(host->pinctrl)) {
- ret = PTR_ERR(host->pinctrl);
- goto clk_disable;
- }
-
- host->pins_default = pinctrl_lookup_state(host->pinctrl,
- PINCTRL_STATE_DEFAULT);
-
- /* enable pins to be muxed in and configured */
- if (!IS_ERR(host->pins_default)) {
- ret = pinctrl_select_state(host->pinctrl, host->pins_default);
- if (ret)
- dev_warn(&dev->dev, "could not set default pins\n");
- } else
- dev_warn(&dev->dev, "could not get default pinstate\n");
-
/* Get regulators and the supported OCR mask */
mmc_regulator_get_supply(mmc);
if (!mmc->ocr_avail)
@@ -1725,41 +1730,67 @@ static int mmci_suspend(struct device *dev)
{
struct amba_device *adev = to_amba_device(dev);
struct mmc_host *mmc = amba_get_drvdata(adev);
- int ret = 0;
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
-
- ret = mmc_suspend_host(mmc);
- if (ret == 0) {
- pm_runtime_get_sync(dev);
- writel(0, host->base + MMCIMASK0);
- }
+ pm_runtime_get_sync(dev);
+ writel(0, host->base + MMCIMASK0);
}
- return ret;
+ return 0;
}
static int mmci_resume(struct device *dev)
{
struct amba_device *adev = to_amba_device(dev);
struct mmc_host *mmc = amba_get_drvdata(adev);
- int ret = 0;
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
-
writel(MCI_IRQENABLE, host->base + MMCIMASK0);
pm_runtime_put(dev);
-
- ret = mmc_resume_host(mmc);
}
- return ret;
+ return 0;
}
#endif
#ifdef CONFIG_PM_RUNTIME
+static void mmci_save(struct mmci_host *host)
+{
+ unsigned long flags;
+
+ if (host->variant->pwrreg_nopower) {
+ spin_lock_irqsave(&host->lock, flags);
+
+ writel(0, host->base + MMCIMASK0);
+ writel(0, host->base + MMCIDATACTRL);
+ writel(0, host->base + MMCIPOWER);
+ writel(0, host->base + MMCICLOCK);
+ mmci_reg_delay(host);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+
+}
+
+static void mmci_restore(struct mmci_host *host)
+{
+ unsigned long flags;
+
+ if (host->variant->pwrreg_nopower) {
+ spin_lock_irqsave(&host->lock, flags);
+
+ writel(host->clk_reg, host->base + MMCICLOCK);
+ writel(host->datactrl_reg, host->base + MMCIDATACTRL);
+ writel(host->pwr_reg, host->base + MMCIPOWER);
+ writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+ mmci_reg_delay(host);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+}
+
static int mmci_runtime_suspend(struct device *dev)
{
struct amba_device *adev = to_amba_device(dev);
@@ -1767,6 +1798,8 @@ static int mmci_runtime_suspend(struct device *dev)
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
+ pinctrl_pm_select_sleep_state(dev);
+ mmci_save(host);
clk_disable_unprepare(host->clk);
}
@@ -1781,6 +1814,8 @@ static int mmci_runtime_resume(struct device *dev)
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
clk_prepare_enable(host->clk);
+ mmci_restore(host);
+ pinctrl_pm_select_default_state(dev);
}
return 0;
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 69080fab6375..168bc72f7a94 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -200,10 +200,6 @@ struct mmci_host {
struct sg_mapping_iter sg_miter;
unsigned int size;
- /* pinctrl handles */
- struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default;
-
#ifdef CONFIG_DMA_ENGINE
/* DMA stuff */
struct dma_chan *dma_current;
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index b900de4e7e94..9405ecdaf6cf 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -1416,28 +1416,10 @@ ioremap_free:
}
#ifdef CONFIG_PM
-#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
-static void
-do_resume_work(struct work_struct *work)
-{
- struct msmsdcc_host *host =
- container_of(work, struct msmsdcc_host, resume_task);
- struct mmc_host *mmc = host->mmc;
-
- if (mmc) {
- mmc_resume_host(mmc);
- if (host->stat_irq)
- enable_irq(host->stat_irq);
- }
-}
-#endif
-
-
static int
msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
{
struct mmc_host *mmc = mmc_get_drvdata(dev);
- int rc = 0;
if (mmc) {
struct msmsdcc_host *host = mmc_priv(mmc);
@@ -1445,14 +1427,11 @@ msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
if (host->stat_irq)
disable_irq(host->stat_irq);
- if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
- rc = mmc_suspend_host(mmc);
- if (!rc)
- msmsdcc_writel(host, 0, MMCIMASK0);
+ msmsdcc_writel(host, 0, MMCIMASK0);
if (host->clks_on)
msmsdcc_disable_clocks(host, 0);
}
- return rc;
+ return 0;
}
static int
@@ -1467,8 +1446,6 @@ msmsdcc_resume(struct platform_device *dev)
msmsdcc_writel(host, host->saved_irq0mask, MMCIMASK0);
- if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
- mmc_resume_host(mmc);
if (host->stat_irq)
enable_irq(host->stat_irq);
#if BUSCLK_PWRSAVE
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 06c5b0b28ebc..45aa2206741d 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -655,7 +655,7 @@ static const struct mmc_host_ops mvsd_ops = {
.enable_sdio_irq = mvsd_enable_sdio_irq,
};
-static void __init
+static void
mv_conf_mbus_windows(struct mvsd_host *host,
const struct mbus_dram_target_info *dram)
{
@@ -677,7 +677,7 @@ mv_conf_mbus_windows(struct mvsd_host *host,
}
}
-static int __init mvsd_probe(struct platform_device *pdev)
+static int mvsd_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mmc_host *mmc = NULL;
@@ -775,9 +775,9 @@ static int __init mvsd_probe(struct platform_device *pdev)
spin_lock_init(&host->lock);
- host->base = devm_request_and_ioremap(&pdev->dev, r);
- if (!host->base) {
- ret = -ENOMEM;
+ host->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(host->base)) {
+ ret = PTR_ERR(host->base);
goto out;
}
@@ -819,7 +819,7 @@ out:
return ret;
}
-static int __exit mvsd_remove(struct platform_device *pdev)
+static int mvsd_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
@@ -838,33 +838,6 @@ static int __exit mvsd_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int mvsd_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct mmc_host *mmc = platform_get_drvdata(dev);
- int ret = 0;
-
- if (mmc)
- ret = mmc_suspend_host(mmc);
-
- return ret;
-}
-
-static int mvsd_resume(struct platform_device *dev)
-{
- struct mmc_host *mmc = platform_get_drvdata(dev);
- int ret = 0;
-
- if (mmc)
- ret = mmc_resume_host(mmc);
-
- return ret;
-}
-#else
-#define mvsd_suspend NULL
-#define mvsd_resume NULL
-#endif
-
static const struct of_device_id mvsdio_dt_ids[] = {
{ .compatible = "marvell,orion-sdio" },
{ /* sentinel */ }
@@ -872,16 +845,15 @@ static const struct of_device_id mvsdio_dt_ids[] = {
MODULE_DEVICE_TABLE(of, mvsdio_dt_ids);
static struct platform_driver mvsd_driver = {
- .remove = __exit_p(mvsd_remove),
- .suspend = mvsd_suspend,
- .resume = mvsd_resume,
+ .probe = mvsd_probe,
+ .remove = mvsd_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = mvsdio_dt_ids,
},
};
-module_platform_driver_probe(mvsd_driver, mvsd_probe);
+module_platform_driver(mvsd_driver);
/* maximum card clock frequency (default 50MHz) */
module_param(maxfreq, int, 0);
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index c174c6a0d224..f7199c83f5cf 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -1250,28 +1250,20 @@ static int mxcmci_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct mxcmci_host *host = mmc_priv(mmc);
- int ret = 0;
- if (mmc)
- ret = mmc_suspend_host(mmc);
clk_disable_unprepare(host->clk_per);
clk_disable_unprepare(host->clk_ipg);
-
- return ret;
+ return 0;
}
static int mxcmci_resume(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct mxcmci_host *host = mmc_priv(mmc);
- int ret = 0;
clk_prepare_enable(host->clk_per);
clk_prepare_enable(host->clk_ipg);
- if (mmc)
- ret = mmc_resume_host(mmc);
-
- return ret;
+ return 0;
}
static const struct dev_pm_ops mxcmci_pm_ops = {
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index e1fa3ef735e0..50fc9df791b2 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -724,13 +724,9 @@ static int mxs_mmc_suspend(struct device *dev)
struct mmc_host *mmc = dev_get_drvdata(dev);
struct mxs_mmc_host *host = mmc_priv(mmc);
struct mxs_ssp *ssp = &host->ssp;
- int ret = 0;
-
- ret = mmc_suspend_host(mmc);
clk_disable_unprepare(ssp->clk);
-
- return ret;
+ return 0;
}
static int mxs_mmc_resume(struct device *dev)
@@ -738,13 +734,9 @@ static int mxs_mmc_resume(struct device *dev)
struct mmc_host *mmc = dev_get_drvdata(dev);
struct mxs_mmc_host *host = mmc_priv(mmc);
struct mxs_ssp *ssp = &host->ssp;
- int ret = 0;
clk_prepare_enable(ssp->clk);
-
- ret = mmc_resume_host(mmc);
-
- return ret;
+ return 0;
}
static const struct dev_pm_ops mxs_mmc_pm_ops = {
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index b94f38ec2a83..0b10a9030f4e 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -128,7 +128,6 @@ struct mmc_omap_slot {
struct mmc_omap_host {
int initialized;
- int suspended;
struct mmc_request * mrq;
struct mmc_command * cmd;
struct mmc_data * data;
@@ -1513,61 +1512,9 @@ static int mmc_omap_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
-{
- int i, ret = 0;
- struct mmc_omap_host *host = platform_get_drvdata(pdev);
-
- if (host == NULL || host->suspended)
- return 0;
-
- for (i = 0; i < host->nr_slots; i++) {
- struct mmc_omap_slot *slot;
-
- slot = host->slots[i];
- ret = mmc_suspend_host(slot->mmc);
- if (ret < 0) {
- while (--i >= 0) {
- slot = host->slots[i];
- mmc_resume_host(slot->mmc);
- }
- return ret;
- }
- }
- host->suspended = 1;
- return 0;
-}
-
-static int mmc_omap_resume(struct platform_device *pdev)
-{
- int i, ret = 0;
- struct mmc_omap_host *host = platform_get_drvdata(pdev);
-
- if (host == NULL || !host->suspended)
- return 0;
-
- for (i = 0; i < host->nr_slots; i++) {
- struct mmc_omap_slot *slot;
- slot = host->slots[i];
- ret = mmc_resume_host(slot->mmc);
- if (ret < 0)
- return ret;
-
- host->suspended = 0;
- }
- return 0;
-}
-#else
-#define mmc_omap_suspend NULL
-#define mmc_omap_resume NULL
-#endif
-
static struct platform_driver mmc_omap_driver = {
.probe = mmc_omap_probe,
.remove = mmc_omap_remove,
- .suspend = mmc_omap_suspend,
- .resume = mmc_omap_resume,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 6ac63df645c4..dbd32ad3b749 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -75,6 +75,7 @@
#define ICE 0x1
#define ICS 0x2
#define CEN (1 << 2)
+#define CLKD_MAX 0x3FF /* max clock divisor: 1023 */
#define CLKD_MASK 0x0000FFC0
#define CLKD_SHIFT 6
#define DTO_MASK 0x000F0000
@@ -119,7 +120,8 @@
BRR_EN | BWR_EN | TC_EN | CC_EN)
#define MMC_AUTOSUSPEND_DELAY 100
-#define MMC_TIMEOUT_MS 20
+#define MMC_TIMEOUT_MS 20 /* 20 mSec */
+#define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */
#define OMAP_MMC_MIN_CLOCK 400000
#define OMAP_MMC_MAX_CLOCK 52000000
#define DRIVER_NAME "omap_hsmmc"
@@ -171,6 +173,10 @@ struct omap_hsmmc_host {
unsigned char bus_mode;
unsigned char power_mode;
int suspended;
+ u32 con;
+ u32 hctl;
+ u32 sysctl;
+ u32 capa;
int irq;
int use_dma, dma_ch;
struct dma_chan *tx_chan;
@@ -183,7 +189,6 @@ struct omap_hsmmc_host {
int use_reg;
int req_in_progress;
struct omap_hsmmc_next next_data;
-
struct omap_mmc_platform_data *pdata;
};
@@ -493,8 +498,8 @@ static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios)
if (ios->clock) {
dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock);
- if (dsor > 250)
- dsor = 250;
+ if (dsor > CLKD_MAX)
+ dsor = CLKD_MAX;
}
return dsor;
@@ -597,25 +602,20 @@ static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host)
static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
{
struct mmc_ios *ios = &host->mmc->ios;
- struct omap_mmc_platform_data *pdata = host->pdata;
- int context_loss = 0;
u32 hctl, capa;
unsigned long timeout;
- if (pdata->get_context_loss_count) {
- context_loss = pdata->get_context_loss_count(host->dev);
- if (context_loss < 0)
- return 1;
- }
-
- dev_dbg(mmc_dev(host->mmc), "context was %slost\n",
- context_loss == host->context_loss ? "not " : "");
- if (host->context_loss == context_loss)
- return 1;
-
if (!OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE)
return 1;
+ if (host->con == OMAP_HSMMC_READ(host->base, CON) &&
+ host->hctl == OMAP_HSMMC_READ(host->base, HCTL) &&
+ host->sysctl == OMAP_HSMMC_READ(host->base, SYSCTL) &&
+ host->capa == OMAP_HSMMC_READ(host->base, CAPA))
+ return 0;
+
+ host->context_loss++;
+
if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
if (host->power_mode != MMC_POWER_OFF &&
(1 << ios->vdd) <= MMC_VDD_23_24)
@@ -655,9 +655,8 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
omap_hsmmc_set_bus_mode(host);
out:
- host->context_loss = context_loss;
-
- dev_dbg(mmc_dev(host->mmc), "context is restored\n");
+ dev_dbg(mmc_dev(host->mmc), "context is restored: restore count %d\n",
+ host->context_loss);
return 0;
}
@@ -666,15 +665,10 @@ out:
*/
static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
{
- struct omap_mmc_platform_data *pdata = host->pdata;
- int context_loss;
-
- if (pdata->get_context_loss_count) {
- context_loss = pdata->get_context_loss_count(host->dev);
- if (context_loss < 0)
- return;
- host->context_loss = context_loss;
- }
+ host->con = OMAP_HSMMC_READ(host->base, CON);
+ host->hctl = OMAP_HSMMC_READ(host->base, HCTL);
+ host->sysctl = OMAP_HSMMC_READ(host->base, SYSCTL);
+ host->capa = OMAP_HSMMC_READ(host->base, CAPA);
}
#else
@@ -975,8 +969,7 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
unsigned long bit)
{
unsigned long i = 0;
- unsigned long limit = (loops_per_jiffy *
- msecs_to_jiffies(MMC_TIMEOUT_MS));
+ unsigned long limit = MMC_TIMEOUT_US;
OMAP_HSMMC_WRITE(host->base, SYSCTL,
OMAP_HSMMC_READ(host->base, SYSCTL) | bit);
@@ -988,13 +981,13 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
if (mmc_slot(host).features & HSMMC_HAS_UPDATED_RESET) {
while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit))
&& (i++ < limit))
- cpu_relax();
+ udelay(1);
}
i = 0;
while ((OMAP_HSMMC_READ(host->base, SYSCTL) & bit) &&
(i++ < limit))
- cpu_relax();
+ udelay(1);
if (OMAP_HSMMC_READ(host->base, SYSCTL) & bit)
dev_err(mmc_dev(host->mmc),
@@ -1178,9 +1171,6 @@ static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id)
struct omap_mmc_slot_data *slot = &mmc_slot(host);
int carddetect;
- if (host->suspended)
- return IRQ_HANDLED;
-
sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");
if (slot->card_detect)
@@ -1635,18 +1625,9 @@ static int omap_hsmmc_regs_show(struct seq_file *s, void *data)
{
struct mmc_host *mmc = s->private;
struct omap_hsmmc_host *host = mmc_priv(mmc);
- int context_loss = 0;
-
- if (host->pdata->get_context_loss_count)
- context_loss = host->pdata->get_context_loss_count(host->dev);
- seq_printf(s, "mmc%d:\n ctx_loss:\t%d:%d\n\nregs:\n",
- mmc->index, host->context_loss, context_loss);
-
- if (host->suspended) {
- seq_printf(s, "host suspended, can't read registers\n");
- return 0;
- }
+ seq_printf(s, "mmc%d:\n ctx_loss:\t%d\n\nregs:\n",
+ mmc->index, host->context_loss);
pm_runtime_get_sync(host->dev);
@@ -1838,13 +1819,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
mmc->ops = &omap_hsmmc_ops;
- /*
- * If regulator_disable can only put vcc_aux to sleep then there is
- * no off state.
- */
- if (mmc_slot(host).vcc_aux_disable_is_sleep)
- mmc_slot(host).no_off = 1;
-
mmc->f_min = OMAP_MMC_MIN_CLOCK;
if (pdata->max_freq > 0)
@@ -1874,7 +1848,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
omap_hsmmc_context_save(host);
/* This can be removed once we support PBIAS with DT */
- if (host->dev->of_node && host->mapbase == 0x4809c000)
+ if (host->dev->of_node && res->start == 0x4809c000)
host->pbias_disable = 1;
host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
@@ -2119,23 +2093,12 @@ static void omap_hsmmc_complete(struct device *dev)
static int omap_hsmmc_suspend(struct device *dev)
{
- int ret = 0;
struct omap_hsmmc_host *host = dev_get_drvdata(dev);
if (!host)
return 0;
- if (host && host->suspended)
- return 0;
-
pm_runtime_get_sync(host->dev);
- host->suspended = 1;
- ret = mmc_suspend_host(host->mmc);
-
- if (ret) {
- host->suspended = 0;
- goto err;
- }
if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
omap_hsmmc_disable_irq(host);
@@ -2145,23 +2108,19 @@ static int omap_hsmmc_suspend(struct device *dev)
if (host->dbclk)
clk_disable_unprepare(host->dbclk);
-err:
+
pm_runtime_put_sync(host->dev);
- return ret;
+ return 0;
}
/* Routine to resume the MMC device */
static int omap_hsmmc_resume(struct device *dev)
{
- int ret = 0;
struct omap_hsmmc_host *host = dev_get_drvdata(dev);
if (!host)
return 0;
- if (host && !host->suspended)
- return 0;
-
pm_runtime_get_sync(host->dev);
if (host->dbclk)
@@ -2172,16 +2131,9 @@ static int omap_hsmmc_resume(struct device *dev)
omap_hsmmc_protect_card(host);
- /* Notify the core to resume the host */
- ret = mmc_resume_host(host->mmc);
- if (ret == 0)
- host->suspended = 0;
-
pm_runtime_mark_last_busy(host->dev);
pm_runtime_put_autosuspend(host->dev);
-
- return ret;
-
+ return 0;
}
#else
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 1956a3df7cf3..32fe11323f39 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -880,35 +880,6 @@ static int pxamci_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int pxamci_suspend(struct device *dev)
-{
- struct mmc_host *mmc = dev_get_drvdata(dev);
- int ret = 0;
-
- if (mmc)
- ret = mmc_suspend_host(mmc);
-
- return ret;
-}
-
-static int pxamci_resume(struct device *dev)
-{
- struct mmc_host *mmc = dev_get_drvdata(dev);
- int ret = 0;
-
- if (mmc)
- ret = mmc_resume_host(mmc);
-
- return ret;
-}
-
-static const struct dev_pm_ops pxamci_pm_ops = {
- .suspend = pxamci_suspend,
- .resume = pxamci_resume,
-};
-#endif
-
static struct platform_driver pxamci_driver = {
.probe = pxamci_probe,
.remove = pxamci_remove,
@@ -916,9 +887,6 @@ static struct platform_driver pxamci_driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(pxa_mmc_dt_ids),
-#ifdef CONFIG_PM
- .pm = &pxamci_pm_ops,
-#endif
},
};
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 375a880e0c5f..c46feda07d56 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -364,7 +364,7 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
struct mmc_host *mmc = host->mmc;
struct mmc_card *card = mmc->card;
struct mmc_data *data = mrq->data;
- int uhs = mmc_sd_card_uhs(card);
+ int uhs = mmc_card_uhs(card);
int read = (data->flags & MMC_DATA_READ) ? 1 : 0;
u8 cfg2, trans_mode;
int err;
@@ -1197,37 +1197,6 @@ static const struct mmc_host_ops realtek_pci_sdmmc_ops = {
.execute_tuning = sdmmc_execute_tuning,
};
-#ifdef CONFIG_PM
-static int rtsx_pci_sdmmc_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
- struct mmc_host *mmc = host->mmc;
- int err;
-
- dev_dbg(sdmmc_dev(host), "--> %s\n", __func__);
-
- err = mmc_suspend_host(mmc);
- if (err)
- return err;
-
- return 0;
-}
-
-static int rtsx_pci_sdmmc_resume(struct platform_device *pdev)
-{
- struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
- struct mmc_host *mmc = host->mmc;
-
- dev_dbg(sdmmc_dev(host), "--> %s\n", __func__);
-
- return mmc_resume_host(mmc);
-}
-#else /* CONFIG_PM */
-#define rtsx_pci_sdmmc_suspend NULL
-#define rtsx_pci_sdmmc_resume NULL
-#endif /* CONFIG_PM */
-
static void init_extra_caps(struct realtek_pci_sdmmc *host)
{
struct mmc_host *mmc = host->mmc;
@@ -1367,8 +1336,6 @@ static struct platform_driver rtsx_pci_sdmmc_driver = {
.probe = rtsx_pci_sdmmc_drv_probe,
.remove = rtsx_pci_sdmmc_drv_remove,
.id_table = rtsx_pci_sdmmc_ids,
- .suspend = rtsx_pci_sdmmc_suspend,
- .resume = rtsx_pci_sdmmc_resume,
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME_RTSX_PCI_SDMMC,
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 8d6794cdf899..2fce5ea5eb39 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1949,39 +1949,10 @@ static struct platform_device_id s3cmci_driver_ids[] = {
MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids);
-
-#ifdef CONFIG_PM
-
-static int s3cmci_suspend(struct device *dev)
-{
- struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
-
- return mmc_suspend_host(mmc);
-}
-
-static int s3cmci_resume(struct device *dev)
-{
- struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
-
- return mmc_resume_host(mmc);
-}
-
-static const struct dev_pm_ops s3cmci_pm = {
- .suspend = s3cmci_suspend,
- .resume = s3cmci_resume,
-};
-
-#define s3cmci_pm_ops &s3cmci_pm
-#else /* CONFIG_PM */
-#define s3cmci_pm_ops NULL
-#endif /* CONFIG_PM */
-
-
static struct platform_driver s3cmci_driver = {
.driver = {
.name = "s3c-sdi",
.owner = THIS_MODULE,
- .pm = s3cmci_pm_ops,
},
.id_table = s3cmci_driver_ids,
.probe = s3cmci_probe,
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index cdd4ce0d7c90..ef19874fcd1f 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -310,8 +310,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
dma_mask = DMA_BIT_MASK(32);
}
- dev->dma_mask = &dev->coherent_dma_mask;
- dev->coherent_dma_mask = dma_mask;
+ err = dma_coerce_mask_and_coherent(dev, dma_mask);
+ if (err)
+ goto err_free;
}
if (c->slot) {
diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c
index 85472d3fd37f..7a190fe4dff1 100644
--- a/drivers/mmc/host/sdhci-bcm-kona.c
+++ b/drivers/mmc/host/sdhci-bcm-kona.c
@@ -316,19 +316,7 @@ err_pltfm_free:
static int __exit sdhci_bcm_kona_remove(struct platform_device *pdev)
{
- struct sdhci_host *host = platform_get_drvdata(pdev);
- int dead;
- u32 scratch;
-
- dead = 0;
- scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
- if (scratch == (u32)-1)
- dead = 1;
- sdhci_remove_host(host, dead);
-
- sdhci_free_host(host);
-
- return 0;
+ return sdhci_pltfm_unregister(pdev);
}
static struct platform_driver sdhci_bcm_kona_driver = {
diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c
index 36fa2df04660..f6d8d67c545f 100644
--- a/drivers/mmc/host/sdhci-bcm2835.c
+++ b/drivers/mmc/host/sdhci-bcm2835.c
@@ -178,13 +178,7 @@ err:
static int bcm2835_sdhci_remove(struct platform_device *pdev)
{
- struct sdhci_host *host = platform_get_drvdata(pdev);
- int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
-
- sdhci_remove_host(host, dead);
- sdhci_pltfm_free(pdev);
-
- return 0;
+ return sdhci_pltfm_unregister(pdev);
}
static const struct of_device_id bcm2835_sdhci_of_match[] = {
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index abc8cf01e6e3..461a4c3f4ef7 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -34,12 +34,40 @@
/* VENDOR SPEC register */
#define ESDHC_VENDOR_SPEC 0xc0
#define ESDHC_VENDOR_SPEC_SDIO_QUIRK (1 << 1)
+#define ESDHC_VENDOR_SPEC_VSELECT (1 << 1)
+#define ESDHC_VENDOR_SPEC_FRC_SDCLK_ON (1 << 8)
#define ESDHC_WTMK_LVL 0x44
#define ESDHC_MIX_CTRL 0x48
+#define ESDHC_MIX_CTRL_DDREN (1 << 3)
#define ESDHC_MIX_CTRL_AC23EN (1 << 7)
+#define ESDHC_MIX_CTRL_EXE_TUNE (1 << 22)
+#define ESDHC_MIX_CTRL_SMPCLK_SEL (1 << 23)
+#define ESDHC_MIX_CTRL_FBCLK_SEL (1 << 25)
/* Bits 3 and 6 are not SDHCI standard definitions */
#define ESDHC_MIX_CTRL_SDHCI_MASK 0xb7
+/* dll control register */
+#define ESDHC_DLL_CTRL 0x60
+#define ESDHC_DLL_OVERRIDE_VAL_SHIFT 9
+#define ESDHC_DLL_OVERRIDE_EN_SHIFT 8
+
+/* tune control register */
+#define ESDHC_TUNE_CTRL_STATUS 0x68
+#define ESDHC_TUNE_CTRL_STEP 1
+#define ESDHC_TUNE_CTRL_MIN 0
+#define ESDHC_TUNE_CTRL_MAX ((1 << 7) - 1)
+
+#define ESDHC_TUNING_CTRL 0xcc
+#define ESDHC_STD_TUNING_EN (1 << 24)
+/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
+#define ESDHC_TUNING_START_TAP 0x1
+
+#define ESDHC_TUNING_BLOCK_PATTERN_LEN 64
+
+/* pinctrl state */
+#define ESDHC_PINCTRL_STATE_100MHZ "state_100mhz"
+#define ESDHC_PINCTRL_STATE_200MHZ "state_200mhz"
+
/*
* Our interpretation of the SDHCI_HOST_CONTROL register
*/
@@ -66,21 +94,60 @@
* As a result, the TC flag is not asserted and SW received timeout
* exeception. Bit1 of Vendor Spec registor is used to fix it.
*/
-#define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1)
-
-enum imx_esdhc_type {
- IMX25_ESDHC,
- IMX35_ESDHC,
- IMX51_ESDHC,
- IMX53_ESDHC,
- IMX6Q_USDHC,
+#define ESDHC_FLAG_MULTIBLK_NO_INT BIT(1)
+/*
+ * The flag enables the workaround for ESDHC errata ENGcm07207 which
+ * affects i.MX25 and i.MX35.
+ */
+#define ESDHC_FLAG_ENGCM07207 BIT(2)
+/*
+ * The flag tells that the ESDHC controller is an USDHC block that is
+ * integrated on the i.MX6 series.
+ */
+#define ESDHC_FLAG_USDHC BIT(3)
+/* The IP supports manual tuning process */
+#define ESDHC_FLAG_MAN_TUNING BIT(4)
+/* The IP supports standard tuning process */
+#define ESDHC_FLAG_STD_TUNING BIT(5)
+/* The IP has SDHCI_CAPABILITIES_1 register */
+#define ESDHC_FLAG_HAVE_CAP1 BIT(6)
+
+struct esdhc_soc_data {
+ u32 flags;
+};
+
+static struct esdhc_soc_data esdhc_imx25_data = {
+ .flags = ESDHC_FLAG_ENGCM07207,
+};
+
+static struct esdhc_soc_data esdhc_imx35_data = {
+ .flags = ESDHC_FLAG_ENGCM07207,
+};
+
+static struct esdhc_soc_data esdhc_imx51_data = {
+ .flags = 0,
+};
+
+static struct esdhc_soc_data esdhc_imx53_data = {
+ .flags = ESDHC_FLAG_MULTIBLK_NO_INT,
+};
+
+static struct esdhc_soc_data usdhc_imx6q_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING,
+};
+
+static struct esdhc_soc_data usdhc_imx6sl_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ | ESDHC_FLAG_HAVE_CAP1,
};
struct pltfm_imx_data {
- int flags;
u32 scratchpad;
- enum imx_esdhc_type devtype;
struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_100mhz;
+ struct pinctrl_state *pins_200mhz;
+ const struct esdhc_soc_data *socdata;
struct esdhc_platform_data boarddata;
struct clk *clk_ipg;
struct clk *clk_ahb;
@@ -90,25 +157,20 @@ struct pltfm_imx_data {
MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
WAIT_FOR_INT, /* sent CMD12, waiting for response INT */
} multiblock_status;
-
+ u32 uhs_mode;
+ u32 is_ddr;
};
static struct platform_device_id imx_esdhc_devtype[] = {
{
.name = "sdhci-esdhc-imx25",
- .driver_data = IMX25_ESDHC,
+ .driver_data = (kernel_ulong_t) &esdhc_imx25_data,
}, {
.name = "sdhci-esdhc-imx35",
- .driver_data = IMX35_ESDHC,
+ .driver_data = (kernel_ulong_t) &esdhc_imx35_data,
}, {
.name = "sdhci-esdhc-imx51",
- .driver_data = IMX51_ESDHC,
- }, {
- .name = "sdhci-esdhc-imx53",
- .driver_data = IMX53_ESDHC,
- }, {
- .name = "sdhci-usdhc-imx6q",
- .driver_data = IMX6Q_USDHC,
+ .driver_data = (kernel_ulong_t) &esdhc_imx51_data,
}, {
/* sentinel */
}
@@ -116,38 +178,34 @@ static struct platform_device_id imx_esdhc_devtype[] = {
MODULE_DEVICE_TABLE(platform, imx_esdhc_devtype);
static const struct of_device_id imx_esdhc_dt_ids[] = {
- { .compatible = "fsl,imx25-esdhc", .data = &imx_esdhc_devtype[IMX25_ESDHC], },
- { .compatible = "fsl,imx35-esdhc", .data = &imx_esdhc_devtype[IMX35_ESDHC], },
- { .compatible = "fsl,imx51-esdhc", .data = &imx_esdhc_devtype[IMX51_ESDHC], },
- { .compatible = "fsl,imx53-esdhc", .data = &imx_esdhc_devtype[IMX53_ESDHC], },
- { .compatible = "fsl,imx6q-usdhc", .data = &imx_esdhc_devtype[IMX6Q_USDHC], },
+ { .compatible = "fsl,imx25-esdhc", .data = &esdhc_imx25_data, },
+ { .compatible = "fsl,imx35-esdhc", .data = &esdhc_imx35_data, },
+ { .compatible = "fsl,imx51-esdhc", .data = &esdhc_imx51_data, },
+ { .compatible = "fsl,imx53-esdhc", .data = &esdhc_imx53_data, },
+ { .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, },
+ { .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids);
static inline int is_imx25_esdhc(struct pltfm_imx_data *data)
{
- return data->devtype == IMX25_ESDHC;
-}
-
-static inline int is_imx35_esdhc(struct pltfm_imx_data *data)
-{
- return data->devtype == IMX35_ESDHC;
+ return data->socdata == &esdhc_imx25_data;
}
-static inline int is_imx51_esdhc(struct pltfm_imx_data *data)
+static inline int is_imx53_esdhc(struct pltfm_imx_data *data)
{
- return data->devtype == IMX51_ESDHC;
+ return data->socdata == &esdhc_imx53_data;
}
-static inline int is_imx53_esdhc(struct pltfm_imx_data *data)
+static inline int is_imx6q_usdhc(struct pltfm_imx_data *data)
{
- return data->devtype == IMX53_ESDHC;
+ return data->socdata == &usdhc_imx6q_data;
}
-static inline int is_imx6q_usdhc(struct pltfm_imx_data *data)
+static inline int esdhc_is_usdhc(struct pltfm_imx_data *data)
{
- return data->devtype == IMX6Q_USDHC;
+ return !!(data->socdata->flags & ESDHC_FLAG_USDHC);
}
static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg)
@@ -164,7 +222,21 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
struct pltfm_imx_data *imx_data = pltfm_host->priv;
u32 val = readl(host->ioaddr + reg);
+ if (unlikely(reg == SDHCI_PRESENT_STATE)) {
+ u32 fsl_prss = val;
+ /* save the least 20 bits */
+ val = fsl_prss & 0x000FFFFF;
+ /* move dat[0-3] bits */
+ val |= (fsl_prss & 0x0F000000) >> 4;
+ /* move cmd line bit */
+ val |= (fsl_prss & 0x00800000) << 1;
+ }
+
if (unlikely(reg == SDHCI_CAPABILITIES)) {
+ /* ignore bit[0-15] as it stores cap_1 register val for mx6sl */
+ if (imx_data->socdata->flags & ESDHC_FLAG_HAVE_CAP1)
+ val &= 0xffff0000;
+
/* In FSL esdhc IC module, only bit20 is used to indicate the
* ADMA2 capability of esdhc, but this bit is messed up on
* some SOCs (e.g. on MX25, MX35 this bit is set, but they
@@ -178,6 +250,25 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
}
}
+ if (unlikely(reg == SDHCI_CAPABILITIES_1)) {
+ if (esdhc_is_usdhc(imx_data)) {
+ if (imx_data->socdata->flags & ESDHC_FLAG_HAVE_CAP1)
+ val = readl(host->ioaddr + SDHCI_CAPABILITIES) & 0xFFFF;
+ else
+ /* imx6q/dl does not have cap_1 register, fake one */
+ val = SDHCI_SUPPORT_DDR50 | SDHCI_SUPPORT_SDR104
+ | SDHCI_SUPPORT_SDR50
+ | SDHCI_USE_SDR50_TUNING;
+ }
+ }
+
+ if (unlikely(reg == SDHCI_MAX_CURRENT) && esdhc_is_usdhc(imx_data)) {
+ val = 0;
+ val |= 0xFF << SDHCI_MAX_CURRENT_330_SHIFT;
+ val |= 0xFF << SDHCI_MAX_CURRENT_300_SHIFT;
+ val |= 0xFF << SDHCI_MAX_CURRENT_180_SHIFT;
+ }
+
if (unlikely(reg == SDHCI_INT_STATUS)) {
if (val & ESDHC_INT_VENDOR_SPEC_DMA_ERR) {
val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR;
@@ -224,7 +315,7 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
}
}
- if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
+ if (unlikely((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
&& (reg == SDHCI_INT_STATUS)
&& (val & SDHCI_INT_DATA_END))) {
u32 v;
@@ -256,10 +347,12 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ u16 ret = 0;
+ u32 val;
if (unlikely(reg == SDHCI_HOST_VERSION)) {
reg ^= 2;
- if (is_imx6q_usdhc(imx_data)) {
+ if (esdhc_is_usdhc(imx_data)) {
/*
* The usdhc register returns a wrong host version.
* Correct it here.
@@ -268,6 +361,30 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
}
}
+ if (unlikely(reg == SDHCI_HOST_CONTROL2)) {
+ val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ if (val & ESDHC_VENDOR_SPEC_VSELECT)
+ ret |= SDHCI_CTRL_VDD_180;
+
+ if (esdhc_is_usdhc(imx_data)) {
+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
+ val = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
+ /* the std tuning bits is in ACMD12_ERR for imx6sl */
+ val = readl(host->ioaddr + SDHCI_ACMD12_ERR);
+ }
+
+ if (val & ESDHC_MIX_CTRL_EXE_TUNE)
+ ret |= SDHCI_CTRL_EXEC_TUNING;
+ if (val & ESDHC_MIX_CTRL_SMPCLK_SEL)
+ ret |= SDHCI_CTRL_TUNED_CLK;
+
+ ret |= (imx_data->uhs_mode & SDHCI_CTRL_UHS_MASK);
+ ret &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
+
+ return ret;
+ }
+
return readw(host->ioaddr + reg);
}
@@ -275,10 +392,59 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ u32 new_val = 0;
switch (reg) {
+ case SDHCI_CLOCK_CONTROL:
+ new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ if (val & SDHCI_CLOCK_CARD_EN)
+ new_val |= ESDHC_VENDOR_SPEC_FRC_SDCLK_ON;
+ else
+ new_val &= ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON;
+ writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC);
+ return;
+ case SDHCI_HOST_CONTROL2:
+ new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ if (val & SDHCI_CTRL_VDD_180)
+ new_val |= ESDHC_VENDOR_SPEC_VSELECT;
+ else
+ new_val &= ~ESDHC_VENDOR_SPEC_VSELECT;
+ writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC);
+ imx_data->uhs_mode = val & SDHCI_CTRL_UHS_MASK;
+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
+ new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ if (val & SDHCI_CTRL_TUNED_CLK)
+ new_val |= ESDHC_MIX_CTRL_SMPCLK_SEL;
+ else
+ new_val &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
+ writel(new_val , host->ioaddr + ESDHC_MIX_CTRL);
+ } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
+ u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR);
+ u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ new_val = readl(host->ioaddr + ESDHC_TUNING_CTRL);
+ if (val & SDHCI_CTRL_EXEC_TUNING) {
+ new_val |= ESDHC_STD_TUNING_EN |
+ ESDHC_TUNING_START_TAP;
+ v |= ESDHC_MIX_CTRL_EXE_TUNE;
+ m |= ESDHC_MIX_CTRL_FBCLK_SEL;
+ } else {
+ new_val &= ~ESDHC_STD_TUNING_EN;
+ v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
+ m &= ~ESDHC_MIX_CTRL_FBCLK_SEL;
+ }
+
+ if (val & SDHCI_CTRL_TUNED_CLK)
+ v |= ESDHC_MIX_CTRL_SMPCLK_SEL;
+ else
+ v &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
+
+ writel(new_val, host->ioaddr + ESDHC_TUNING_CTRL);
+ writel(v, host->ioaddr + SDHCI_ACMD12_ERR);
+ writel(m, host->ioaddr + ESDHC_MIX_CTRL);
+ }
+ return;
case SDHCI_TRANSFER_MODE:
- if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
+ if ((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
&& (host->cmd->opcode == SD_IO_RW_EXTENDED)
&& (host->cmd->data->blocks > 1)
&& (host->cmd->data->flags & MMC_DATA_READ)) {
@@ -288,7 +454,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
writel(v, host->ioaddr + ESDHC_VENDOR_SPEC);
}
- if (is_imx6q_usdhc(imx_data)) {
+ if (esdhc_is_usdhc(imx_data)) {
u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
/* Swap AC23 bit */
if (val & SDHCI_TRNS_AUTO_CMD23) {
@@ -310,10 +476,10 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
val |= SDHCI_CMD_ABORTCMD;
if ((host->cmd->opcode == MMC_SET_BLOCK_COUNT) &&
- (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
+ (imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
imx_data->multiblock_status = MULTIBLK_IN_PROCESS;
- if (is_imx6q_usdhc(imx_data))
+ if (esdhc_is_usdhc(imx_data))
writel(val << 16,
host->ioaddr + SDHCI_TRANSFER_MODE);
else
@@ -379,8 +545,10 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
* The reset on usdhc fails to clear MIX_CTRL register.
* Do it manually here.
*/
- if (is_imx6q_usdhc(imx_data))
+ if (esdhc_is_usdhc(imx_data)) {
writel(0, host->ioaddr + ESDHC_MIX_CTRL);
+ imx_data->is_ddr = 0;
+ }
}
}
@@ -409,8 +577,60 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ unsigned int host_clock = clk_get_rate(pltfm_host->clk);
+ int pre_div = 2;
+ int div = 1;
+ u32 temp, val;
+
+ if (clock == 0) {
+ if (esdhc_is_usdhc(imx_data)) {
+ val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
+ host->ioaddr + ESDHC_VENDOR_SPEC);
+ }
+ goto out;
+ }
+
+ if (esdhc_is_usdhc(imx_data) && !imx_data->is_ddr)
+ pre_div = 1;
+
+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+ temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
+ | ESDHC_CLOCK_MASK);
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+
+ while (host_clock / pre_div / 16 > clock && pre_div < 256)
+ pre_div *= 2;
+
+ while (host_clock / pre_div / div > clock && div < 16)
+ div++;
- esdhc_set_clock(host, clock, clk_get_rate(pltfm_host->clk));
+ host->mmc->actual_clock = host_clock / pre_div / div;
+ dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
+ clock, host->mmc->actual_clock);
+
+ if (imx_data->is_ddr)
+ pre_div >>= 2;
+ else
+ pre_div >>= 1;
+ div--;
+
+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+ temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
+ | (div << ESDHC_DIVIDER_SHIFT)
+ | (pre_div << ESDHC_PREDIV_SHIFT));
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+
+ if (esdhc_is_usdhc(imx_data)) {
+ val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ writel(val | ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
+ host->ioaddr + ESDHC_VENDOR_SPEC);
+ }
+
+ mdelay(1);
+out:
+ host->clock = clock;
}
static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
@@ -454,7 +674,192 @@ static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width)
return 0;
}
-static const struct sdhci_ops sdhci_esdhc_ops = {
+static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
+{
+ u32 reg;
+
+ /* FIXME: delay a bit for card to be ready for next tuning due to errors */
+ mdelay(1);
+
+ reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL |
+ ESDHC_MIX_CTRL_FBCLK_SEL;
+ writel(reg, host->ioaddr + ESDHC_MIX_CTRL);
+ writel(val << 8, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
+ dev_dbg(mmc_dev(host->mmc),
+ "tunning with delay 0x%x ESDHC_TUNE_CTRL_STATUS 0x%x\n",
+ val, readl(host->ioaddr + ESDHC_TUNE_CTRL_STATUS));
+}
+
+static void esdhc_request_done(struct mmc_request *mrq)
+{
+ complete(&mrq->completion);
+}
+
+static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode)
+{
+ struct mmc_command cmd = {0};
+ struct mmc_request mrq = {0};
+ struct mmc_data data = {0};
+ struct scatterlist sg;
+ char tuning_pattern[ESDHC_TUNING_BLOCK_PATTERN_LEN];
+
+ cmd.opcode = opcode;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = ESDHC_TUNING_BLOCK_PATTERN_LEN;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ sg_init_one(&sg, tuning_pattern, sizeof(tuning_pattern));
+
+ mrq.cmd = &cmd;
+ mrq.cmd->mrq = &mrq;
+ mrq.data = &data;
+ mrq.data->mrq = &mrq;
+ mrq.cmd->data = mrq.data;
+
+ mrq.done = esdhc_request_done;
+ init_completion(&(mrq.completion));
+
+ disable_irq(host->irq);
+ spin_lock(&host->lock);
+ host->mrq = &mrq;
+
+ sdhci_send_command(host, mrq.cmd);
+
+ spin_unlock(&host->lock);
+ enable_irq(host->irq);
+
+ wait_for_completion(&mrq.completion);
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ return 0;
+}
+
+static void esdhc_post_tuning(struct sdhci_host *host)
+{
+ u32 reg;
+
+ reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ reg &= ~ESDHC_MIX_CTRL_EXE_TUNE;
+ writel(reg, host->ioaddr + ESDHC_MIX_CTRL);
+}
+
+static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
+{
+ int min, max, avg, ret;
+
+ /* find the mininum delay first which can pass tuning */
+ min = ESDHC_TUNE_CTRL_MIN;
+ while (min < ESDHC_TUNE_CTRL_MAX) {
+ esdhc_prepare_tuning(host, min);
+ if (!esdhc_send_tuning_cmd(host, opcode))
+ break;
+ min += ESDHC_TUNE_CTRL_STEP;
+ }
+
+ /* find the maxinum delay which can not pass tuning */
+ max = min + ESDHC_TUNE_CTRL_STEP;
+ while (max < ESDHC_TUNE_CTRL_MAX) {
+ esdhc_prepare_tuning(host, max);
+ if (esdhc_send_tuning_cmd(host, opcode)) {
+ max -= ESDHC_TUNE_CTRL_STEP;
+ break;
+ }
+ max += ESDHC_TUNE_CTRL_STEP;
+ }
+
+ /* use average delay to get the best timing */
+ avg = (min + max) / 2;
+ esdhc_prepare_tuning(host, avg);
+ ret = esdhc_send_tuning_cmd(host, opcode);
+ esdhc_post_tuning(host);
+
+ dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n",
+ ret ? "failed" : "passed", avg, ret);
+
+ return ret;
+}
+
+static int esdhc_change_pinstate(struct sdhci_host *host,
+ unsigned int uhs)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pinctrl_state *pinctrl;
+
+ dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs);
+
+ if (IS_ERR(imx_data->pinctrl) ||
+ IS_ERR(imx_data->pins_default) ||
+ IS_ERR(imx_data->pins_100mhz) ||
+ IS_ERR(imx_data->pins_200mhz))
+ return -EINVAL;
+
+ switch (uhs) {
+ case MMC_TIMING_UHS_SDR50:
+ pinctrl = imx_data->pins_100mhz;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ pinctrl = imx_data->pins_200mhz;
+ break;
+ default:
+ /* back to default state for other legacy timing */
+ pinctrl = imx_data->pins_default;
+ }
+
+ return pinctrl_select_state(imx_data->pinctrl, pinctrl);
+}
+
+static int esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+
+ switch (uhs) {
+ case MMC_TIMING_UHS_SDR12:
+ imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR12;
+ break;
+ case MMC_TIMING_UHS_SDR25:
+ imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR25;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR50;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR104;
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ imx_data->uhs_mode = SDHCI_CTRL_UHS_DDR50;
+ writel(readl(host->ioaddr + ESDHC_MIX_CTRL) |
+ ESDHC_MIX_CTRL_DDREN,
+ host->ioaddr + ESDHC_MIX_CTRL);
+ imx_data->is_ddr = 1;
+ if (boarddata->delay_line) {
+ u32 v;
+ v = boarddata->delay_line <<
+ ESDHC_DLL_OVERRIDE_VAL_SHIFT |
+ (1 << ESDHC_DLL_OVERRIDE_EN_SHIFT);
+ if (is_imx53_esdhc(imx_data))
+ v <<= 1;
+ writel(v, host->ioaddr + ESDHC_DLL_CTRL);
+ }
+ break;
+ }
+
+ return esdhc_change_pinstate(host, uhs);
+}
+
+static struct sdhci_ops sdhci_esdhc_ops = {
.read_l = esdhc_readl_le,
.read_w = esdhc_readw_le,
.write_l = esdhc_writel_le,
@@ -465,6 +870,7 @@ static const struct sdhci_ops sdhci_esdhc_ops = {
.get_min_clock = esdhc_pltfm_get_min_clock,
.get_ro = esdhc_pltfm_get_ro,
.platform_bus_width = esdhc_pltfm_bus_width,
+ .set_uhs_signaling = esdhc_set_uhs_signaling,
};
static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
@@ -506,6 +912,14 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
of_property_read_u32(np, "max-frequency", &boarddata->f_max);
+ if (of_find_property(np, "no-1-8-v", NULL))
+ boarddata->support_vsel = false;
+ else
+ boarddata->support_vsel = true;
+
+ if (of_property_read_u32(np, "fsl,delay-line", &boarddata->delay_line))
+ boarddata->delay_line = 0;
+
return 0;
}
#else
@@ -539,9 +953,8 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
goto free_sdhci;
}
- if (of_id)
- pdev->id_entry = of_id->data;
- imx_data->devtype = pdev->id_entry->driver_data;
+ imx_data->socdata = of_id ? of_id->data : (struct esdhc_soc_data *)
+ pdev->id_entry->driver_data;
pltfm_host->priv = imx_data;
imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
@@ -568,29 +981,39 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
clk_prepare_enable(imx_data->clk_ipg);
clk_prepare_enable(imx_data->clk_ahb);
- imx_data->pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ imx_data->pinctrl = devm_pinctrl_get(&pdev->dev);
if (IS_ERR(imx_data->pinctrl)) {
err = PTR_ERR(imx_data->pinctrl);
goto disable_clk;
}
+ imx_data->pins_default = pinctrl_lookup_state(imx_data->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(imx_data->pins_default)) {
+ err = PTR_ERR(imx_data->pins_default);
+ dev_err(mmc_dev(host->mmc), "could not get default state\n");
+ goto disable_clk;
+ }
+
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
- if (is_imx25_esdhc(imx_data) || is_imx35_esdhc(imx_data))
+ if (imx_data->socdata->flags & ESDHC_FLAG_ENGCM07207)
/* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK
| SDHCI_QUIRK_BROKEN_ADMA;
- if (is_imx53_esdhc(imx_data))
- imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT;
-
/*
* The imx6q ROM code will change the default watermark level setting
* to something insane. Change it back here.
*/
- if (is_imx6q_usdhc(imx_data))
+ if (esdhc_is_usdhc(imx_data)) {
writel(0x08100810, host->ioaddr + ESDHC_WTMK_LVL);
+ host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
+ }
+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
+ sdhci_esdhc_ops.platform_execute_tuning =
+ esdhc_executing_tuning;
boarddata = &imx_data->boarddata;
if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
if (!host->mmc->parent->platform_data) {
@@ -650,6 +1073,23 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
break;
}
+ /* sdr50 and sdr104 needs work on 1.8v signal voltage */
+ if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data)) {
+ imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
+ ESDHC_PINCTRL_STATE_100MHZ);
+ imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
+ ESDHC_PINCTRL_STATE_200MHZ);
+ if (IS_ERR(imx_data->pins_100mhz) ||
+ IS_ERR(imx_data->pins_200mhz)) {
+ dev_warn(mmc_dev(host->mmc),
+ "could not get ultra high speed state, work on normal mode\n");
+ /* fall back to not support uhs by specify no 1.8v quirk */
+ host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+ }
+ } else {
+ host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+ }
+
err = sdhci_add_host(host);
if (err)
goto disable_clk;
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index a2a06420e463..a7d9f95a7b03 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -49,41 +49,4 @@
#define ESDHC_HOST_CONTROL_RES 0x05
-static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock,
- unsigned int host_clock)
-{
- int pre_div = 2;
- int div = 1;
- u32 temp;
-
- if (clock == 0)
- goto out;
-
- temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
- temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
- | ESDHC_CLOCK_MASK);
- sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
-
- while (host_clock / pre_div / 16 > clock && pre_div < 256)
- pre_div *= 2;
-
- while (host_clock / pre_div / div > clock && div < 16)
- div++;
-
- dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
- clock, host_clock / pre_div / div);
-
- pre_div >>= 1;
- div--;
-
- temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
- temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
- | (div << ESDHC_DIVIDER_SHIFT)
- | (pre_div << ESDHC_PREDIV_SHIFT));
- sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
- mdelay(1);
-out:
- host->clock = clock;
-}
-
#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index e328252ebf2a..0b249970b119 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -199,6 +199,14 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
{
+
+ int pre_div = 2;
+ int div = 1;
+ u32 temp;
+
+ if (clock == 0)
+ goto out;
+
/* Workaround to reduce the clock frequency for p1010 esdhc */
if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) {
if (clock > 20000000)
@@ -207,8 +215,31 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
clock -= 5000000;
}
- /* Set the clock */
- esdhc_set_clock(host, clock, host->max_clk);
+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+ temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
+ | ESDHC_CLOCK_MASK);
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+
+ while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
+ pre_div *= 2;
+
+ while (host->max_clk / pre_div / div > clock && div < 16)
+ div++;
+
+ dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
+ clock, host->max_clk / pre_div / div);
+
+ pre_div >>= 1;
+ div--;
+
+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+ temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
+ | (div << ESDHC_DIVIDER_SHIFT)
+ | (pre_div << ESDHC_PREDIV_SHIFT));
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ mdelay(1);
+out:
+ host->clock = clock;
}
#ifdef CONFIG_PM
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index d7d6bc8968d2..8f753811fc7a 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -37,6 +37,12 @@
#define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15
#define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16
#define PCI_DEVICE_ID_INTEL_BYT_EMMC2 0x0f50
+#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190
+#define PCI_DEVICE_ID_INTEL_CLV_SDIO0 0x08f9
+#define PCI_DEVICE_ID_INTEL_CLV_SDIO1 0x08fa
+#define PCI_DEVICE_ID_INTEL_CLV_SDIO2 0x08fb
+#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5
+#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6
/*
* PCI registers
@@ -356,6 +362,28 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
.allow_runtime_pm = true,
};
+/* Define Host controllers for Intel Merrifield platform */
+#define INTEL_MRFL_EMMC_0 0
+#define INTEL_MRFL_EMMC_1 1
+
+static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+ if ((PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_0) &&
+ (PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_1))
+ /* SD support is not ready yet */
+ return -ENODEV;
+
+ slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
+ MMC_CAP_1_8V_DDR;
+
+ return 0;
+}
+
+static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .probe_slot = intel_mrfl_mmc_probe_slot,
+};
+
/* O2Micro extra registers */
#define O2_SD_LOCK_WP 0xD3
#define O2_SD_MULTI_VCC3V 0xEE
@@ -939,6 +967,54 @@ static const struct pci_device_id pci_ids[] = {
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
},
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_CLV_SDIO0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sd,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_CLV_SDIO1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_CLV_SDIO2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_CLV_EMMC0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_CLV_EMMC1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MRFL_MMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc,
+ },
{
.vendor = PCI_VENDOR_ID_O2,
.device = PCI_DEVICE_ID_O2_8120,
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 7a7fb4f0d5a4..6785fb1dc5c6 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -49,7 +49,6 @@ static unsigned int debug_quirks2;
static void sdhci_finish_data(struct sdhci_host *);
-static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
static void sdhci_finish_command(struct sdhci_host *);
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
static void sdhci_tuning_timer(unsigned long data);
@@ -981,7 +980,7 @@ static void sdhci_finish_data(struct sdhci_host *host)
tasklet_schedule(&host->finish_tasklet);
}
-static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
+void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
{
int flags;
u32 mask;
@@ -1053,6 +1052,7 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
}
+EXPORT_SYMBOL_GPL(sdhci_send_command);
static void sdhci_finish_command(struct sdhci_host *host)
{
@@ -1435,7 +1435,8 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
}
if (host->version >= SDHCI_SPEC_300 &&
- (ios->power_mode == MMC_POWER_UP))
+ (ios->power_mode == MMC_POWER_UP) &&
+ !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
sdhci_enable_preset_value(host, false);
sdhci_set_clock(host, ios->clock);
@@ -1875,6 +1876,14 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
return 0;
}
+ if (host->ops->platform_execute_tuning) {
+ spin_unlock(&host->lock);
+ enable_irq(host->irq);
+ err = host->ops->platform_execute_tuning(host, opcode);
+ sdhci_runtime_pm_put(host);
+ return err;
+ }
+
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
/*
@@ -1981,6 +1990,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (!tuning_loop_counter || !timeout) {
ctrl &= ~SDHCI_CTRL_TUNED_CLK;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+ err = -EIO;
} else {
if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
pr_info(DRIVER_NAME ": Tuning procedure"
@@ -2546,8 +2556,6 @@ EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups);
int sdhci_suspend_host(struct sdhci_host *host)
{
- int ret;
-
if (host->ops->platform_suspend)
host->ops->platform_suspend(host);
@@ -2559,19 +2567,6 @@ int sdhci_suspend_host(struct sdhci_host *host)
host->flags &= ~SDHCI_NEEDS_RETUNING;
}
- ret = mmc_suspend_host(host->mmc);
- if (ret) {
- if (host->flags & SDHCI_USING_RETUNING_TIMER) {
- host->flags |= SDHCI_NEEDS_RETUNING;
- mod_timer(&host->tuning_timer, jiffies +
- host->tuning_count * HZ);
- }
-
- sdhci_enable_card_detection(host);
-
- return ret;
- }
-
if (!device_may_wakeup(mmc_dev(host->mmc))) {
sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
free_irq(host->irq, host);
@@ -2579,14 +2574,14 @@ int sdhci_suspend_host(struct sdhci_host *host)
sdhci_enable_irq_wakeups(host);
enable_irq_wake(host->irq);
}
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(sdhci_suspend_host);
int sdhci_resume_host(struct sdhci_host *host)
{
- int ret;
+ int ret = 0;
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->enable_dma)
@@ -2615,7 +2610,6 @@ int sdhci_resume_host(struct sdhci_host *host)
mmiowb();
}
- ret = mmc_resume_host(host->mmc);
sdhci_enable_card_detection(host);
if (host->ops->platform_resume)
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index b037f188fe44..0a3ed01887db 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -288,6 +288,7 @@ struct sdhci_ops {
unsigned int (*get_ro)(struct sdhci_host *host);
void (*platform_reset_enter)(struct sdhci_host *host, u8 mask);
void (*platform_reset_exit)(struct sdhci_host *host, u8 mask);
+ int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode);
int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
void (*hw_reset)(struct sdhci_host *host);
void (*platform_suspend)(struct sdhci_host *host);
@@ -393,6 +394,8 @@ static inline void *sdhci_priv(struct sdhci_host *host)
extern void sdhci_card_detect(struct sdhci_host *host);
extern int sdhci_add_host(struct sdhci_host *host);
extern void sdhci_remove_host(struct sdhci_host *host, int dead);
+extern void sdhci_send_command(struct sdhci_host *host,
+ struct mmc_command *cmd);
#ifdef CONFIG_PM
extern int sdhci_suspend_host(struct sdhci_host *host);
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index 50adbd155f35..b7e305775314 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -516,9 +516,7 @@ static void sdricoh_pcmcia_detach(struct pcmcia_device *link)
#ifdef CONFIG_PM
static int sdricoh_pcmcia_suspend(struct pcmcia_device *link)
{
- struct mmc_host *mmc = link->priv;
dev_dbg(&link->dev, "suspend\n");
- mmc_suspend_host(mmc);
return 0;
}
@@ -527,7 +525,6 @@ static int sdricoh_pcmcia_resume(struct pcmcia_device *link)
struct mmc_host *mmc = link->priv;
dev_dbg(&link->dev, "resume\n");
sdricoh_reset(mmc_priv(mmc));
- mmc_resume_host(mmc);
return 0;
}
#else
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 36629a024aa1..6bffebe6f57a 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1542,19 +1542,15 @@ static int sh_mmcif_remove(struct platform_device *pdev)
static int sh_mmcif_suspend(struct device *dev)
{
struct sh_mmcif_host *host = dev_get_drvdata(dev);
- int ret = mmc_suspend_host(host->mmc);
- if (!ret)
- sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
- return ret;
+ return 0;
}
static int sh_mmcif_resume(struct device *dev)
{
- struct sh_mmcif_host *host = dev_get_drvdata(dev);
-
- return mmc_resume_host(host->mmc);
+ return 0;
}
#else
#define sh_mmcif_suspend NULL
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 43d962829f8e..d1760ebcac03 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -1030,7 +1030,7 @@ static void tifm_sd_remove(struct tifm_dev *sock)
static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state)
{
- return mmc_suspend_host(tifm_get_drvdata(sock));
+ return 0;
}
static int tifm_sd_resume(struct tifm_dev *sock)
@@ -1044,8 +1044,6 @@ static int tifm_sd_resume(struct tifm_dev *sock)
if (rc)
host->eject = 1;
- else
- rc = mmc_resume_host(mmc);
return rc;
}
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index b3802256f954..f3b2d8ca1eca 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -1145,12 +1145,9 @@ int tmio_mmc_host_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct tmio_mmc_host *host = mmc_priv(mmc);
- int ret = mmc_suspend_host(mmc);
- if (!ret)
- tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
-
- return ret;
+ tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
+ return 0;
}
EXPORT_SYMBOL(tmio_mmc_host_suspend);
@@ -1163,7 +1160,7 @@ int tmio_mmc_host_resume(struct device *dev)
/* The MMC core will perform the complete set up */
host->resuming = true;
- return mmc_resume_host(mmc);
+ return 0;
}
EXPORT_SYMBOL(tmio_mmc_host_resume);
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 4f84586c6e9e..63fac78b3d46 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1269,21 +1269,18 @@ static void via_init_sdc_pm(struct via_crdr_mmc_host *host)
static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state)
{
struct via_crdr_mmc_host *host;
- int ret = 0;
host = pci_get_drvdata(pcidev);
via_save_pcictrlreg(host);
via_save_sdcreg(host);
- ret = mmc_suspend_host(host->mmc);
-
pci_save_state(pcidev);
pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
pci_disable_device(pcidev);
pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
- return ret;
+ return 0;
}
static int via_sd_resume(struct pci_dev *pcidev)
@@ -1316,8 +1313,6 @@ static int via_sd_resume(struct pci_dev *pcidev)
via_restore_pcictrlreg(sdhost);
via_init_sdc_pm(sdhost);
- ret = mmc_resume_host(sdhost->mmc);
-
return ret;
}
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index e9028ad05ffb..4262296c12fa 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -2392,26 +2392,12 @@ static void vub300_disconnect(struct usb_interface *interface)
#ifdef CONFIG_PM
static int vub300_suspend(struct usb_interface *intf, pm_message_t message)
{
- struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
- if (!vub300 || !vub300->mmc) {
- return 0;
- } else {
- struct mmc_host *mmc = vub300->mmc;
- mmc_suspend_host(mmc);
- return 0;
- }
+ return 0;
}
static int vub300_resume(struct usb_interface *intf)
{
- struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
- if (!vub300 || !vub300->mmc) {
- return 0;
- } else {
- struct mmc_host *mmc = vub300->mmc;
- mmc_resume_host(mmc);
- return 0;
- }
+ return 0;
}
#else
#define vub300_suspend NULL
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index e954b7758876..024c82414996 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1814,28 +1814,11 @@ static void wbsd_pnp_remove(struct pnp_dev *dev)
#ifdef CONFIG_PM
-static int wbsd_suspend(struct wbsd_host *host, pm_message_t state)
-{
- BUG_ON(host == NULL);
-
- return mmc_suspend_host(host->mmc);
-}
-
-static int wbsd_resume(struct wbsd_host *host)
-{
- BUG_ON(host == NULL);
-
- wbsd_init_device(host);
-
- return mmc_resume_host(host->mmc);
-}
-
static int wbsd_platform_suspend(struct platform_device *dev,
pm_message_t state)
{
struct mmc_host *mmc = platform_get_drvdata(dev);
struct wbsd_host *host;
- int ret;
if (mmc == NULL)
return 0;
@@ -1844,12 +1827,7 @@ static int wbsd_platform_suspend(struct platform_device *dev,
host = mmc_priv(mmc);
- ret = wbsd_suspend(host, state);
- if (ret)
- return ret;
-
wbsd_chip_poweroff(host);
-
return 0;
}
@@ -1872,7 +1850,8 @@ static int wbsd_platform_resume(struct platform_device *dev)
*/
mdelay(5);
- return wbsd_resume(host);
+ wbsd_init_device(host);
+ return 0;
}
#ifdef CONFIG_PNP
@@ -1886,10 +1865,7 @@ static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
return 0;
DBGF("Suspending...\n");
-
- host = mmc_priv(mmc);
-
- return wbsd_suspend(host, state);
+ return 0;
}
static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)
@@ -1922,7 +1898,8 @@ static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)
*/
mdelay(5);
- return wbsd_resume(host);
+ wbsd_init_device(host);
+ return 0;
}
#endif /* CONFIG_PNP */
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 34231d5168fc..e902ed7846b0 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -212,28 +212,14 @@ struct wmt_mci_priv {
static void wmt_set_sd_power(struct wmt_mci_priv *priv, int enable)
{
- u32 reg_tmp;
- if (enable) {
- if (priv->power_inverted) {
- reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
- writeb(reg_tmp | BM_SD_OFF,
- priv->sdmmc_base + SDMMC_BUSMODE);
- } else {
- reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
- writeb(reg_tmp & (~BM_SD_OFF),
- priv->sdmmc_base + SDMMC_BUSMODE);
- }
- } else {
- if (priv->power_inverted) {
- reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
- writeb(reg_tmp & (~BM_SD_OFF),
- priv->sdmmc_base + SDMMC_BUSMODE);
- } else {
- reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
- writeb(reg_tmp | BM_SD_OFF,
- priv->sdmmc_base + SDMMC_BUSMODE);
- }
- }
+ u32 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
+
+ if (enable ^ priv->power_inverted)
+ reg_tmp &= ~BM_SD_OFF;
+ else
+ reg_tmp |= BM_SD_OFF;
+
+ writeb(reg_tmp, priv->sdmmc_base + SDMMC_BUSMODE);
}
static void wmt_mci_read_response(struct mmc_host *mmc)
@@ -939,28 +925,23 @@ static int wmt_mci_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct wmt_mci_priv *priv;
- int ret;
if (!mmc)
return 0;
priv = mmc_priv(mmc);
- ret = mmc_suspend_host(mmc);
-
- if (!ret) {
- reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
- writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
- SDMMC_BUSMODE);
+ reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
+ writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
+ SDMMC_BUSMODE);
- reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
- writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN);
+ reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
+ writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN);
- writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
- writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
+ writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
+ writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
- clk_disable(priv->clk_sdmmc);
- }
- return ret;
+ clk_disable(priv->clk_sdmmc);
+ return 0;
}
static int wmt_mci_resume(struct device *dev)
@@ -969,7 +950,6 @@ static int wmt_mci_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct wmt_mci_priv *priv;
- int ret = 0;
if (mmc) {
priv = mmc_priv(mmc);
@@ -987,10 +967,9 @@ static int wmt_mci_resume(struct device *dev)
writeb(reg_tmp | INT0_DI_INT_EN, priv->sdmmc_base +
SDMMC_INTMASK0);
- ret = mmc_resume_host(mmc);
}
- return ret;
+ return 0;
}
static const struct dev_pm_ops wmt_mci_pm = {
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 9279a9174f84..7a6384b0962a 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -27,11 +27,13 @@
/* Magics */
#define BOARD_DATA_MAGIC 0x5246504D /* MPFR */
+#define FACTORY_MAGIC 0x59544346 /* FCTY */
#define POT_MAGIC1 0x54544f50 /* POTT */
#define POT_MAGIC2 0x504f /* OP */
#define ML_MAGIC1 0x39685a42
#define ML_MAGIC2 0x26594131
#define TRX_MAGIC 0x30524448
+#define SQSH_MAGIC 0x71736873 /* shsq */
struct trx_header {
uint32_t magic;
@@ -71,7 +73,14 @@ static int bcm47xxpart_parse(struct mtd_info *master,
/* Alloc */
parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,
GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL);
+ if (!buf) {
+ kfree(parts);
+ return -ENOMEM;
+ }
/* Parse block by block looking for magics */
for (offset = 0; offset <= master->size - blocksize;
@@ -110,6 +119,13 @@ static int bcm47xxpart_parse(struct mtd_info *master,
continue;
}
+ /* Found on Huawei E970 */
+ if (buf[0x000 / 4] == FACTORY_MAGIC) {
+ bcm47xxpart_add_part(&parts[curr_part++], "factory",
+ offset, MTD_WRITEABLE);
+ continue;
+ }
+
/* POT(TOP) */
if (buf[0x000 / 4] == POT_MAGIC1 &&
(buf[0x004 / 4] & 0xFFFF) == POT_MAGIC2) {
@@ -167,6 +183,13 @@ static int bcm47xxpart_parse(struct mtd_info *master,
offset = rounddown(offset + trx->length, blocksize);
continue;
}
+
+ /* Squashfs on devices not using TRX */
+ if (buf[0x000 / 4] == SQSH_MAGIC) {
+ bcm47xxpart_add_part(&parts[curr_part++], "rootfs",
+ offset, 0);
+ continue;
+ }
}
/* Look for NVRAM at the end of the last block. */
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 74ab4b7e523e..01281382180b 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -95,13 +95,6 @@ config MTD_M25P80
if you want to specify device partitioning or to use a device which
doesn't support the JEDEC ID instruction.
-config M25PXX_USE_FAST_READ
- bool "Use FAST_READ OPCode allowing SPI CLK >= 50MHz"
- depends on MTD_M25P80
- default y
- help
- This option enables FAST_READ access supported by ST M25Pxx.
-
config MTD_SPEAR_SMI
tristate "SPEAR MTD NOR Support through SMI controller"
depends on PLAT_SPEAR
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 5cb4c04726b2..d9fd87a4c8dc 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -20,6 +20,7 @@
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/slab.h>
+#include <linux/major.h>
/* Info for the block device */
struct block2mtd_dev {
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 3e1b0a0ef4db..4f091c1a9981 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -2097,7 +2097,7 @@ notfound:
ret = -ENODEV;
dev_info(dev, "No supported DiskOnChip found\n");
err_probe:
- kfree(cascade->bch);
+ free_bch(cascade->bch);
for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
if (cascade->floors[floor])
doc_release_device(cascade->floors[floor]);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 6bc9618af094..7eda71dbc183 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -78,7 +78,7 @@
/* Define max times to check status register before we give up. */
#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
-#define MAX_CMD_SIZE 5
+#define MAX_CMD_SIZE 6
#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
@@ -367,10 +367,6 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
spi_message_init(&m);
memset(t, 0, (sizeof t));
- /* NOTE:
- * OPCODE_FAST_READ (if available) is faster.
- * Should add 1 byte DUMMY_BYTE.
- */
t[0].tx_buf = flash->command;
t[0].len = m25p_cmdsz(flash) + (flash->fast_read ? 1 : 0);
spi_message_add_tail(&t[0], &m);
@@ -388,11 +384,6 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
return 1;
}
- /* FIXME switch to OPCODE_FAST_READ. It's required for higher
- * clocks; and at this writing, every chip this driver handles
- * supports that opcode.
- */
-
/* Set up the write data buffer. */
opcode = flash->read_opcode;
flash->command[0] = opcode;
@@ -749,16 +740,19 @@ static const struct spi_device_id m25p_ids[] = {
{ "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
/* EON -- en25xxx */
- { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
- { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
- { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
- { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
- { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
- { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
+ { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
+ { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
+ { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
+ { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
+ { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
+
+ /* ESMT */
+ { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
/* Everspin */
- { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, M25P_NO_ERASE | M25P_NO_FR) },
- { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, M25P_NO_ERASE | M25P_NO_FR) },
+ { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, M25P_NO_ERASE | M25P_NO_FR) },
+ { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, M25P_NO_ERASE | M25P_NO_FR) },
/* GigaDevice */
{ "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
@@ -775,6 +769,7 @@ static const struct spi_device_id m25p_ids[] = {
{ "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
{ "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
{ "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
+ { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
{ "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
@@ -783,15 +778,16 @@ static const struct spi_device_id m25p_ids[] = {
{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, 0) },
/* Micron */
- { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
- { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
- { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
- { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
+ { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
+ { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
+ { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K) },
/* PMC */
- { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
- { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
- { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
+ { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
+ { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
+ { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
/* Spansion -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
@@ -940,12 +936,7 @@ static int m25p_probe(struct spi_device *spi)
struct flash_info *info;
unsigned i;
struct mtd_part_parser_data ppdata;
- struct device_node __maybe_unused *np = spi->dev.of_node;
-
-#ifdef CONFIG_MTD_OF_PARTS
- if (!of_device_is_available(np))
- return -ENODEV;
-#endif
+ struct device_node *np = spi->dev.of_node;
/* Platform data helps sort out which chip type we have, as
* well as how this board partitions it. If we don't have
@@ -992,15 +983,13 @@ static int m25p_probe(struct spi_device *spi)
}
}
- flash = kzalloc(sizeof *flash, GFP_KERNEL);
+ flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
if (!flash)
return -ENOMEM;
- flash->command = kmalloc(MAX_CMD_SIZE + (flash->fast_read ? 1 : 0),
- GFP_KERNEL);
- if (!flash->command) {
- kfree(flash);
+
+ flash->command = devm_kzalloc(&spi->dev, MAX_CMD_SIZE, GFP_KERNEL);
+ if (!flash->command)
return -ENOMEM;
- }
flash->spi = spi;
mutex_init(&flash->lock);
@@ -1062,13 +1051,14 @@ static int m25p_probe(struct spi_device *spi)
flash->page_size = info->page_size;
flash->mtd.writebufsize = flash->page_size;
- flash->fast_read = false;
- if (np && of_property_read_bool(np, "m25p,fast-read"))
+ if (np)
+ /* If we were instantiated by DT, use it */
+ flash->fast_read = of_property_read_bool(np, "m25p,fast-read");
+ else
+ /* If we weren't instantiated by DT, default to fast-read */
flash->fast_read = true;
-#ifdef CONFIG_M25PXX_USE_FAST_READ
- flash->fast_read = true;
-#endif
+ /* Some devices cannot do fast-read, no matter what DT tells us */
if (info->flags & M25P_NO_FR)
flash->fast_read = false;
@@ -1133,15 +1123,9 @@ static int m25p_probe(struct spi_device *spi)
static int m25p_remove(struct spi_device *spi)
{
struct m25p *flash = spi_get_drvdata(spi);
- int status;
/* Clean up MTD stuff. */
- status = mtd_device_unregister(&flash->mtd);
- if (status == 0) {
- kfree(flash->command);
- kfree(flash);
- }
- return 0;
+ return mtd_device_unregister(&flash->mtd);
}
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 0e8cbfeba11e..1cfbfcfb6e19 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -88,8 +88,6 @@ struct dataflash {
uint8_t command[4];
char name[24];
- unsigned partitioned:1;
-
unsigned short page_offset; /* offset in flash address */
unsigned int page_size; /* of bytes per page */
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 67823de68db6..e1f2aebaa489 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -94,7 +94,7 @@ static void unregister_devices(void)
}
}
-static int register_device(char *name, unsigned long start, unsigned long len)
+static int register_device(char *name, phys_addr_t start, size_t len)
{
struct phram_mtd_list *new;
int ret = -ENOMEM;
@@ -141,35 +141,35 @@ out0:
return ret;
}
-static int ustrtoul(const char *cp, char **endp, unsigned int base)
+static int parse_num64(uint64_t *num64, char *token)
{
- unsigned long result = simple_strtoul(cp, endp, base);
-
- switch (**endp) {
- case 'G':
- result *= 1024;
- case 'M':
- result *= 1024;
- case 'k':
- result *= 1024;
+ size_t len;
+ int shift = 0;
+ int ret;
+
+ len = strlen(token);
/* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
- if ((*endp)[1] == 'i')
- (*endp) += 2;
+ if (len > 2) {
+ if (token[len - 1] == 'i') {
+ switch (token[len - 2]) {
+ case 'G':
+ shift += 10;
+ case 'M':
+ shift += 10;
+ case 'k':
+ shift += 10;
+ token[len - 2] = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
}
- return result;
-}
-static int parse_num32(uint32_t *num32, const char *token)
-{
- char *endp;
- unsigned long n;
+ ret = kstrtou64(token, 0, num64);
+ *num64 <<= shift;
- n = ustrtoul(token, &endp, 0);
- if (*endp)
- return -EINVAL;
-
- *num32 = n;
- return 0;
+ return ret;
}
static int parse_name(char **pname, const char *token)
@@ -209,19 +209,19 @@ static inline void kill_final_newline(char *str)
* This shall contain the module parameter if any. It is of the form:
* - phram=<device>,<address>,<size> for module case
* - phram.phram=<device>,<address>,<size> for built-in case
- * We leave 64 bytes for the device name, 12 for the address and 12 for the
+ * We leave 64 bytes for the device name, 20 for the address and 20 for the
* size.
* Example: phram.phram=rootfs,0xa0000000,512Mi
*/
-static __initdata char phram_paramline[64+12+12];
+static __initdata char phram_paramline[64 + 20 + 20];
static int __init phram_setup(const char *val)
{
- char buf[64+12+12], *str = buf;
+ char buf[64 + 20 + 20], *str = buf;
char *token[3];
char *name;
- uint32_t start;
- uint32_t len;
+ uint64_t start;
+ uint64_t len;
int i, ret;
if (strnlen(val, sizeof(buf)) >= sizeof(buf))
@@ -243,13 +243,13 @@ static int __init phram_setup(const char *val)
if (ret)
return ret;
- ret = parse_num32(&start, token[1]);
+ ret = parse_num64(&start, token[1]);
if (ret) {
kfree(name);
parse_err("illegal start address\n");
}
- ret = parse_num32(&len, token[2]);
+ ret = parse_num64(&len, token[2]);
if (ret) {
kfree(name);
parse_err("illegal device length\n");
@@ -257,7 +257,7 @@ static int __init phram_setup(const char *val)
ret = register_device(name, start, len);
if (!ret)
- pr_info("%s device: %#x at %#x\n", name, len, start);
+ pr_info("%s device: %#llx at %#llx\n", name, len, start);
else
kfree(name);
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index a42f1f0e7281..687bf27ec850 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -364,7 +364,7 @@ static int sst25l_probe(struct spi_device *spi)
if (!flash_info)
return -ENODEV;
- flash = kzalloc(sizeof(struct sst25l_flash), GFP_KERNEL);
+ flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
if (!flash)
return -ENOMEM;
@@ -402,11 +402,8 @@ static int sst25l_probe(struct spi_device *spi)
ret = mtd_device_parse_register(&flash->mtd, NULL, NULL,
data ? data->parts : NULL,
data ? data->nr_parts : 0);
- if (ret) {
- kfree(flash);
- spi_set_drvdata(spi, NULL);
+ if (ret)
return -ENODEV;
- }
return 0;
}
@@ -414,12 +411,8 @@ static int sst25l_probe(struct spi_device *spi)
static int sst25l_remove(struct spi_device *spi)
{
struct sst25l_flash *flash = spi_get_drvdata(spi);
- int ret;
- ret = mtd_device_unregister(&flash->mtd);
- if (ret == 0)
- kfree(flash);
- return ret;
+ return mtd_device_unregister(&flash->mtd);
}
static struct spi_driver sst25l_driver = {
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 3af351484098..b66b541877f0 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -50,7 +50,7 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
struct INFTLrecord *inftl;
unsigned long temp;
- if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX)
+ if (!mtd_type_is_nand(mtd) || mtd->size > UINT_MAX)
return;
/* OK, this is moderately ugly. But probably safe. Alternatives? */
if (memcmp(mtd->name, "DiskOnChip", 10))
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index d3cfe26beeaa..2ef19aa0086b 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -703,7 +703,7 @@ static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
#define DO_XXLOCK_LOCK 1
#define DO_XXLOCK_UNLOCK 2
-int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
+static int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
{
int ret = 0;
struct map_info *map = mtd->priv;
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index f581ac1cf022..46d195fca942 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -180,7 +180,6 @@ static void vr_nor_pci_remove(struct pci_dev *dev)
{
struct vr_nor_mtd *p = pci_get_drvdata(dev);
- pci_set_drvdata(dev, NULL);
vr_nor_destroy_partitions(p);
vr_nor_destroy_mtd_setup(p);
vr_nor_destroy_maps(p);
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index c2604f8b2a5e..36da518915b5 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -316,7 +316,6 @@ static void mtd_pci_remove(struct pci_dev *dev)
map->exit(dev, map);
kfree(map);
- pci_set_drvdata(dev, NULL);
pci_release_regions(dev);
}
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 676271659b37..10196f5a897d 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -55,7 +55,7 @@ struct platram_info {
static inline struct platram_info *to_platram_info(struct platform_device *dev)
{
- return (struct platram_info *)platform_get_drvdata(dev);
+ return platform_get_drvdata(dev);
}
/* platram_setrw
@@ -257,21 +257,7 @@ static struct platform_driver platram_driver = {
},
};
-/* module init/exit */
-
-static int __init platram_init(void)
-{
- printk("Generic platform RAM MTD, (c) 2004 Simtec Electronics\n");
- return platform_driver_register(&platram_driver);
-}
-
-static void __exit platram_exit(void)
-{
- platform_driver_unregister(&platram_driver);
-}
-
-module_init(platram_init);
-module_exit(platram_exit);
+module_platform_driver(platram_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index c77b68c9412f..3051c4c36240 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -212,7 +212,6 @@ static void scb2_flash_remove(struct pci_dev *dev)
if (!region_fail)
release_mem_region(SCB2_ADDR, SCB2_WINDOW);
- pci_set_drvdata(dev, NULL);
}
static struct pci_device_id scb2_flash_pci_ids[] = {
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 2aef5dda522b..485ea751c7f9 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -32,6 +32,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/blktrans.h>
#include <linux/mutex.h>
+#include <linux/major.h>
struct mtdblk_dev {
@@ -373,7 +374,7 @@ static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
static struct mtd_blktrans_ops mtdblock_tr = {
.name = "mtdblock",
- .major = 31,
+ .major = MTD_BLOCK_MAJOR,
.part_bits = 0,
.blksize = 512,
.open = mtdblock_open,
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index 92759a9d2985..fb5dc89369de 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -24,6 +24,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/blktrans.h>
#include <linux/module.h>
+#include <linux/major.h>
static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
unsigned long block, char *buf)
@@ -70,7 +71,7 @@ static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
static struct mtd_blktrans_ops mtdblock_tr = {
.name = "mtdblock",
- .major = 31,
+ .major = MTD_BLOCK_MAJOR,
.part_bits = 0,
.blksize = 512,
.readsect = mtdblock_readsect,
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 684bfa39e4ee..9aa0c5e49c1d 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -32,6 +32,7 @@
#include <linux/mount.h>
#include <linux/blkpg.h>
#include <linux/magic.h>
+#include <linux/major.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/map.h>
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 5e14d540ba2f..92311a56939f 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -157,6 +157,9 @@ static ssize_t mtd_type_show(struct device *dev,
case MTD_UBIVOLUME:
type = "ubi";
break;
+ case MTD_MLCNANDFLASH:
+ type = "mlc-nand";
+ break;
default:
type = "unknown";
}
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 334da5f583c0..20c02a3b7417 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -17,6 +17,7 @@
#include <linux/export.h>
#include <linux/ctype.h>
#include <linux/slab.h>
+#include <linux/major.h>
/*
* compare superblocks to see if they're equivalent
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index d88529841d3f..93ae6a6d94f7 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -96,43 +96,15 @@ config MTD_NAND_OMAP2
config MTD_NAND_OMAP_BCH
depends on MTD_NAND && MTD_NAND_OMAP2 && ARCH_OMAP3
- tristate "Enable support for hardware BCH error correction"
+ tristate "Support hardware based BCH error correction"
default n
select BCH
- select BCH_CONST_PARAMS
help
- Support for hardware BCH error correction.
-
-choice
- prompt "BCH error correction capability"
- depends on MTD_NAND_OMAP_BCH
-
-config MTD_NAND_OMAP_BCH8
- bool "8 bits / 512 bytes (recommended)"
- help
- Support correcting up to 8 bitflips per 512-byte block.
- This will use 13 bytes of spare area per 512 bytes of page data.
- This is the recommended mode, as 4-bit mode does not work
- on some OMAP3 revisions, due to a hardware bug.
-
-config MTD_NAND_OMAP_BCH4
- bool "4 bits / 512 bytes"
- help
- Support correcting up to 4 bitflips per 512-byte block.
- This will use 7 bytes of spare area per 512 bytes of page data.
- Note that this mode does not work on some OMAP3 revisions, due to a
- hardware bug. Please check your OMAP datasheet before selecting this
- mode.
-
-endchoice
-
-if MTD_NAND_OMAP_BCH
-config BCH_CONST_M
- default 13
-config BCH_CONST_T
- default 4 if MTD_NAND_OMAP_BCH4
- default 8 if MTD_NAND_OMAP_BCH8
-endif
+ This config enables the ELM hardware engine, which can be used to
+ locate and correct errors when using BCH ECC scheme. This offloads
+ the cpu from doing ECC error searching and correction. However some
+ legacy OMAP families like OMAP2xxx, OMAP3xxx do not have ELM engine
+ so they should not enable this config symbol.
config MTD_NAND_IDS
tristate
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 060feeaf6b3e..2dbd9133c395 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -1062,56 +1062,28 @@ static void atmel_pmecc_core_init(struct mtd_info *mtd)
}
/*
- * Get ECC requirement in ONFI parameters, returns -1 if ONFI
- * parameters is not supported.
- * return 0 if success to get the ECC requirement.
- */
-static int get_onfi_ecc_param(struct nand_chip *chip,
- int *ecc_bits, int *sector_size)
-{
- *ecc_bits = *sector_size = 0;
-
- if (chip->onfi_params.ecc_bits == 0xff)
- /* TODO: the sector_size and ecc_bits need to be find in
- * extended ecc parameter, currently we don't support it.
- */
- return -1;
-
- *ecc_bits = chip->onfi_params.ecc_bits;
-
- /* The default sector size (ecc codeword size) is 512 */
- *sector_size = 512;
-
- return 0;
-}
-
-/*
- * Get ecc requirement from ONFI parameters ecc requirement.
+ * Get minimum ecc requirements from NAND.
* If pmecc-cap, pmecc-sector-size in DTS are not specified, this function
- * will set them according to ONFI ecc requirement. Otherwise, use the
+ * will set them according to minimum ecc requirement. Otherwise, use the
* value in DTS file.
* return 0 if success. otherwise return error code.
*/
static int pmecc_choose_ecc(struct atmel_nand_host *host,
int *cap, int *sector_size)
{
- /* Get ECC requirement from ONFI parameters */
- *cap = *sector_size = 0;
- if (host->nand_chip.onfi_version) {
- if (!get_onfi_ecc_param(&host->nand_chip, cap, sector_size))
- dev_info(host->dev, "ONFI params, minimum required ECC: %d bits in %d bytes\n",
+ /* Get minimum ECC requirements */
+ if (host->nand_chip.ecc_strength_ds) {
+ *cap = host->nand_chip.ecc_strength_ds;
+ *sector_size = host->nand_chip.ecc_step_ds;
+ dev_info(host->dev, "minimum ECC: %d bits in %d bytes\n",
*cap, *sector_size);
- else
- dev_info(host->dev, "NAND chip ECC reqirement is in Extended ONFI parameter, we don't support yet.\n");
} else {
- dev_info(host->dev, "NAND chip is not ONFI compliant, assume ecc_bits is 2 in 512 bytes");
- }
- if (*cap == 0 && *sector_size == 0) {
*cap = 2;
*sector_size = 512;
+ dev_info(host->dev, "can't detect min. ECC, assume 2 bits in 512 bytes\n");
}
- /* If dts file doesn't specify then use the one in ONFI parameters */
+ /* If device tree doesn't specify, use NAND's minimum ECC parameters */
if (host->pmecc_corr_cap == 0) {
/* use the most fitable ecc bits (the near bigger one ) */
if (*cap <= 2)
@@ -1139,7 +1111,7 @@ static int pmecc_choose_ecc(struct atmel_nand_host *host,
return 0;
}
-static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
+static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
struct atmel_nand_host *host)
{
struct mtd_info *mtd = &host->mtd;
@@ -1449,7 +1421,6 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
}
-#if defined(CONFIG_OF)
static int atmel_of_init_port(struct atmel_nand_host *host,
struct device_node *np)
{
@@ -1457,7 +1428,7 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
u32 offset[2];
int ecc_mode;
struct atmel_nand_data *board = &host->board;
- enum of_gpio_flags flags;
+ enum of_gpio_flags flags = 0;
if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
if (val >= 32) {
@@ -1540,15 +1511,8 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
return 0;
}
-#else
-static int atmel_of_init_port(struct atmel_nand_host *host,
- struct device_node *np)
-{
- return -EINVAL;
-}
-#endif
-static int __init atmel_hw_nand_init_params(struct platform_device *pdev,
+static int atmel_hw_nand_init_params(struct platform_device *pdev,
struct atmel_nand_host *host)
{
struct mtd_info *mtd = &host->mtd;
@@ -1987,7 +1951,7 @@ static struct platform_driver atmel_nand_nfc_driver;
/*
* Probe for the NAND device.
*/
-static int __init atmel_nand_probe(struct platform_device *pdev)
+static int atmel_nand_probe(struct platform_device *pdev)
{
struct atmel_nand_host *host;
struct mtd_info *mtd;
@@ -2019,7 +1983,8 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
mtd = &host->mtd;
nand_chip = &host->nand_chip;
host->dev = &pdev->dev;
- if (pdev->dev.of_node) {
+ if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
+ /* Only when CONFIG_OF is enabled of_node can be parsed */
res = atmel_of_init_port(host, pdev->dev.of_node);
if (res)
goto err_nand_ioremap;
@@ -2184,7 +2149,7 @@ err_nand_ioremap:
/*
* Remove a NAND device.
*/
-static int __exit atmel_nand_remove(struct platform_device *pdev)
+static int atmel_nand_remove(struct platform_device *pdev)
{
struct atmel_nand_host *host = platform_get_drvdata(pdev);
struct mtd_info *mtd = &host->mtd;
@@ -2207,14 +2172,12 @@ static int __exit atmel_nand_remove(struct platform_device *pdev)
return 0;
}
-#if defined(CONFIG_OF)
static const struct of_device_id atmel_nand_dt_ids[] = {
{ .compatible = "atmel,at91rm9200-nand" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_nand_dt_ids);
-#endif
static int atmel_nand_nfc_probe(struct platform_device *pdev)
{
@@ -2253,12 +2216,11 @@ static int atmel_nand_nfc_probe(struct platform_device *pdev)
return 0;
}
-#if defined(CONFIG_OF)
-static struct of_device_id atmel_nand_nfc_match[] = {
+static const struct of_device_id atmel_nand_nfc_match[] = {
{ .compatible = "atmel,sama5d3-nfc" },
{ /* sentinel */ }
};
-#endif
+MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match);
static struct platform_driver atmel_nand_nfc_driver = {
.driver = {
@@ -2270,7 +2232,8 @@ static struct platform_driver atmel_nand_nfc_driver = {
};
static struct platform_driver atmel_nand_driver = {
- .remove = __exit_p(atmel_nand_remove),
+ .probe = atmel_nand_probe,
+ .remove = atmel_nand_remove,
.driver = {
.name = "atmel_nand",
.owner = THIS_MODULE,
@@ -2278,7 +2241,7 @@ static struct platform_driver atmel_nand_driver = {
},
};
-module_platform_driver_probe(atmel_nand_driver, atmel_nand_probe);
+module_platform_driver(atmel_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rick Bronson");
diff --git a/drivers/mtd/nand/bcm47xxnflash/main.c b/drivers/mtd/nand/bcm47xxnflash/main.c
index 7bae569fdc79..107445911315 100644
--- a/drivers/mtd/nand/bcm47xxnflash/main.c
+++ b/drivers/mtd/nand/bcm47xxnflash/main.c
@@ -29,11 +29,9 @@ static int bcm47xxnflash_probe(struct platform_device *pdev)
struct bcm47xxnflash *b47n;
int err = 0;
- b47n = kzalloc(sizeof(*b47n), GFP_KERNEL);
- if (!b47n) {
- err = -ENOMEM;
- goto out;
- }
+ b47n = devm_kzalloc(&pdev->dev, sizeof(*b47n), GFP_KERNEL);
+ if (!b47n)
+ return -ENOMEM;
b47n->nand_chip.priv = b47n;
b47n->mtd.owner = THIS_MODULE;
@@ -48,22 +46,16 @@ static int bcm47xxnflash_probe(struct platform_device *pdev)
}
if (err) {
pr_err("Initialization failed: %d\n", err);
- goto err_init;
+ return err;
}
err = mtd_device_parse_register(&b47n->mtd, probes, NULL, NULL, 0);
if (err) {
pr_err("Failed to register MTD device: %d\n", err);
- goto err_dev_reg;
+ return err;
}
return 0;
-
-err_dev_reg:
-err_init:
- kfree(b47n);
-out:
- return err;
}
static int bcm47xxnflash_remove(struct platform_device *pdev)
@@ -85,22 +77,4 @@ static struct platform_driver bcm47xxnflash_driver = {
},
};
-static int __init bcm47xxnflash_init(void)
-{
- int err;
-
- err = platform_driver_register(&bcm47xxnflash_driver);
- if (err)
- pr_err("Failed to register bcm47xx nand flash driver: %d\n",
- err);
-
- return err;
-}
-
-static void __exit bcm47xxnflash_exit(void)
-{
- platform_driver_unregister(&bcm47xxnflash_driver);
-}
-
-module_init(bcm47xxnflash_init);
-module_exit(bcm47xxnflash_exit);
+module_platform_driver(bcm47xxnflash_driver);
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 2ed2bb33a6e7..370b9dd7a278 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1394,7 +1394,7 @@ static struct nand_bbt_descr bbt_mirror_descr = {
};
/* initialize driver data structures */
-void denali_drv_init(struct denali_nand_info *denali)
+static void denali_drv_init(struct denali_nand_info *denali)
{
denali->idx = 0;
@@ -1520,7 +1520,7 @@ int denali_init(struct denali_nand_info *denali)
* so just let controller do 15bit ECC for MLC and 8bit ECC for
* SLC if possible.
* */
- if (denali->nand.cellinfo & NAND_CI_CELLTYPE_MSK &&
+ if (!nand_is_slc(&denali->nand) &&
(denali->mtd.oobsize > (denali->bbtskipbytes +
ECC_15BITS * (denali->mtd.writesize /
ECC_SECTOR_SIZE)))) {
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index e3e46623b2b4..033f177a6369 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -119,7 +119,6 @@ static void denali_pci_remove(struct pci_dev *dev)
iounmap(denali->flash_mem);
pci_release_regions(dev);
pci_disable_device(dev);
- pci_set_drvdata(dev, NULL);
kfree(denali);
}
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index eaa3c29ad860..b68a4959f700 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -38,7 +38,7 @@
#define CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS 0
#endif
-static unsigned long __initdata doc_locations[] = {
+static unsigned long doc_locations[] __initdata = {
#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
#ifdef CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH
0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index 548db2389fab..1b0265e85a06 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -44,6 +44,7 @@
#include <linux/mtd/nand.h>
#include <linux/bch.h>
#include <linux/bitrev.h>
+#include <linux/jiffies.h>
/*
* In "reliable mode" consecutive 2k pages are used in parallel (in some
@@ -269,7 +270,7 @@ static int poll_status(struct docg4_priv *doc)
*/
uint16_t flash_status;
- unsigned int timeo;
+ unsigned long timeo;
void __iomem *docptr = doc->virtadr;
dev_dbg(doc->dev, "%s...\n", __func__);
@@ -277,22 +278,18 @@ static int poll_status(struct docg4_priv *doc)
/* hardware quirk requires reading twice initially */
flash_status = readw(docptr + DOC_FLASHCONTROL);
- timeo = 1000;
+ timeo = jiffies + msecs_to_jiffies(200); /* generous timeout */
do {
cpu_relax();
flash_status = readb(docptr + DOC_FLASHCONTROL);
- } while (!(flash_status & DOC_CTRL_FLASHREADY) && --timeo);
+ } while (!(flash_status & DOC_CTRL_FLASHREADY) &&
+ time_before(jiffies, timeo));
-
- if (!timeo) {
+ if (unlikely(!(flash_status & DOC_CTRL_FLASHREADY))) {
dev_err(doc->dev, "%s: timed out!\n", __func__);
return NAND_STATUS_FAIL;
}
- if (unlikely(timeo < 50))
- dev_warn(doc->dev, "%s: nearly timed out; %d remaining\n",
- __func__, timeo);
-
return 0;
}
@@ -494,7 +491,7 @@ static uint8_t docg4_read_byte(struct mtd_info *mtd)
return status;
}
- dev_warn(doc->dev, "unexpectd call to read_byte()\n");
+ dev_warn(doc->dev, "unexpected call to read_byte()\n");
return 0;
}
@@ -1239,7 +1236,6 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
nand->block_markbad = docg4_block_markbad;
nand->read_buf = docg4_read_buf;
nand->write_buf = docg4_write_buf16;
- nand->scan_bbt = nand_default_bbt;
nand->erase_cmd = docg4_erase_block;
nand->ecc.read_page = docg4_read_page;
nand->ecc.write_page = docg4_write_page;
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 20657209a472..c966fc7474ce 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ioport.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -650,8 +651,6 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
chip->page_shift);
dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
chip->phys_erase_shift);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->ecclayout = %p\n",
- chip->ecclayout);
dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",
chip->ecc.mode);
dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 317a771f1587..43355779cff5 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -24,6 +24,7 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
@@ -135,6 +136,69 @@ static struct nand_ecclayout oob_4096_ecc8 = {
.oobfree = { {2, 6}, {136, 82} },
};
+/* 8192-byte page size with 4-bit ECC */
+static struct nand_ecclayout oob_8192_ecc4 = {
+ .eccbytes = 128,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ },
+ .oobfree = { {2, 6}, {136, 208} },
+};
+
+/* 8192-byte page size with 8-bit ECC -- requires 218-byte OOB */
+static struct nand_ecclayout oob_8192_ecc8 = {
+ .eccbytes = 256,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 146, 147, 148, 149, 150, 151,
+ 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183,
+ 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255,
+ 256, 257, 258, 259, 260, 261, 262, 263,
+ },
+ .oobfree = { {2, 6}, {264, 80} },
+};
/*
* Generic flash bbt descriptors
@@ -441,20 +505,29 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
if (mtd->writesize > 512) {
nand_fcr0 =
(NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
- (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD1_SHIFT);
+ (NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) |
+ (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT);
iowrite32be(
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
- (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
- (IFC_FIR_OP_CW1 << IFC_NAND_FIR0_OP4_SHIFT),
- &ifc->ifc_nand.nand_fir0);
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(
+ (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
+ (IFC_FIR_OP_RDSTAT <<
+ IFC_NAND_FIR1_OP6_SHIFT) |
+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
+ &ifc->ifc_nand.nand_fir1);
} else {
nand_fcr0 = ((NAND_CMD_PAGEPROG <<
IFC_NAND_FCR0_CMD1_SHIFT) |
(NAND_CMD_SEQIN <<
- IFC_NAND_FCR0_CMD2_SHIFT));
+ IFC_NAND_FCR0_CMD2_SHIFT) |
+ (NAND_CMD_STATUS <<
+ IFC_NAND_FCR0_CMD3_SHIFT));
iowrite32be(
(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
@@ -463,8 +536,13 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
(IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
&ifc->ifc_nand.nand_fir0);
- iowrite32be(IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT,
- &ifc->ifc_nand.nand_fir1);
+ iowrite32be(
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
+ (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
+ (IFC_FIR_OP_RDSTAT <<
+ IFC_NAND_FIR1_OP7_SHIFT) |
+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
+ &ifc->ifc_nand.nand_fir1);
if (column >= mtd->writesize)
nand_fcr0 |=
@@ -718,8 +796,6 @@ static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
chip->page_shift);
dev_dbg(priv->dev, "%s: nand->phys_erase_shift = %d\n", __func__,
chip->phys_erase_shift);
- dev_dbg(priv->dev, "%s: nand->ecclayout = %p\n", __func__,
- chip->ecclayout);
dev_dbg(priv->dev, "%s: nand->ecc.mode = %d\n", __func__,
chip->ecc.mode);
dev_dbg(priv->dev, "%s: nand->ecc.steps = %d\n", __func__,
@@ -872,11 +948,25 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
} else {
layout = &oob_4096_ecc8;
chip->ecc.bytes = 16;
+ chip->ecc.strength = 8;
}
priv->bufnum_mask = 1;
break;
+ case CSOR_NAND_PGS_8K:
+ if ((csor & CSOR_NAND_ECC_MODE_MASK) ==
+ CSOR_NAND_ECC_MODE_4) {
+ layout = &oob_8192_ecc4;
+ } else {
+ layout = &oob_8192_ecc8;
+ chip->ecc.bytes = 16;
+ chip->ecc.strength = 8;
+ }
+
+ priv->bufnum_mask = 0;
+ break;
+
default:
dev_err(priv->dev, "bad csor %#x: bad page size\n", csor);
return -ENODEV;
@@ -907,7 +997,6 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
iounmap(priv->vbase);
ifc_nand_ctrl->chips[priv->bank] = NULL;
- dev_set_drvdata(priv->dev, NULL);
return 0;
}
@@ -1082,25 +1171,7 @@ static struct platform_driver fsl_ifc_nand_driver = {
.remove = fsl_ifc_nand_remove,
};
-static int __init fsl_ifc_nand_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&fsl_ifc_nand_driver);
- if (ret)
- printk(KERN_ERR "fsl-ifc: Failed to register platform"
- "driver\n");
-
- return ret;
-}
-
-static void __exit fsl_ifc_nand_exit(void)
-{
- platform_driver_unregister(&fsl_ifc_nand_driver);
-}
-
-module_init(fsl_ifc_nand_init);
-module_exit(fsl_ifc_nand_exit);
+module_platform_driver(fsl_ifc_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Freescale");
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 04e07252d74b..4d203e84e8ca 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -18,6 +18,7 @@
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/mtd.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/io.h>
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 4f8857fa48a7..aaced29727fb 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -187,6 +187,12 @@ int gpmi_init(struct gpmi_nand_data *this)
/* Select BCH ECC. */
writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+ /*
+ * Decouple the chip select from dma channel. We use dma0 for all
+ * the chips.
+ */
+ writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
gpmi_disable_clk(this);
return 0;
err_out:
@@ -1073,6 +1079,13 @@ int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
} else if (GPMI_IS_MX28(this) || GPMI_IS_MX6Q(this)) {
+ /*
+ * In the imx6, all the ready/busy pins are bound
+ * together. So we only need to check chip 0.
+ */
+ if (GPMI_IS_MX6Q(this))
+ chip = 0;
+
/* MX28 shares the same R/B register as MX6Q. */
mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
reg = readl(r->gpmi_regs + HW_GPMI_STAT);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 59ab0692f0b9..7ac22802e4dc 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -45,7 +45,10 @@ static struct nand_bbt_descr gpmi_bbt_descr = {
.pattern = scan_ff_pattern
};
-/* We will use all the (page + OOB). */
+/*
+ * We may change the layout if we can get the ECC info from the datasheet,
+ * else we will use all the (page + OOB).
+ */
static struct nand_ecclayout gpmi_hw_ecclayout = {
.eccbytes = 0,
.eccpos = { 0, },
@@ -349,14 +352,13 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
int common_nfc_set_geometry(struct gpmi_nand_data *this)
{
- return set_geometry_by_ecc_info(this) ? 0 : legacy_set_geometry(this);
+ return legacy_set_geometry(this);
}
struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
{
- int chipnr = this->current_chip;
-
- return this->dma_chans[chipnr];
+ /* We use the DMA channel 0 to access all the nand chips. */
+ return this->dma_chans[0];
}
/* Can we use the upper's buffer directly for DMA? */
@@ -1263,14 +1265,22 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
static int
gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
{
- /*
- * The BCH will use all the (page + oob).
- * Our gpmi_hw_ecclayout can only prohibit the JFFS2 to write the oob.
- * But it can not stop some ioctls such MEMWRITEOOB which uses
- * MTD_OPS_PLACE_OOB. So We have to implement this function to prohibit
- * these ioctls too.
- */
- return -EPERM;
+ struct nand_oobfree *of = mtd->ecclayout->oobfree;
+ int status = 0;
+
+ /* Do we have available oob area? */
+ if (!of->length)
+ return -EPERM;
+
+ if (!nand_is_slc(chip))
+ return -EPERM;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of->offset, page);
+ chip->write_buf(mtd, chip->oob_poi + of->offset, of->length);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
}
static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
@@ -1664,7 +1674,7 @@ static int gpmi_nfc_init(struct gpmi_nand_data *this)
if (ret)
goto err_out;
- ret = nand_scan_ident(mtd, 1, NULL);
+ ret = nand_scan_ident(mtd, 2, NULL);
if (ret)
goto err_out;
@@ -1691,19 +1701,19 @@ static const struct platform_device_id gpmi_ids[] = {
{ .name = "imx23-gpmi-nand", .driver_data = IS_MX23, },
{ .name = "imx28-gpmi-nand", .driver_data = IS_MX28, },
{ .name = "imx6q-gpmi-nand", .driver_data = IS_MX6Q, },
- {},
+ {}
};
static const struct of_device_id gpmi_nand_id_table[] = {
{
.compatible = "fsl,imx23-gpmi-nand",
- .data = (void *)&gpmi_ids[IS_MX23]
+ .data = (void *)&gpmi_ids[IS_MX23],
}, {
.compatible = "fsl,imx28-gpmi-nand",
- .data = (void *)&gpmi_ids[IS_MX28]
+ .data = (void *)&gpmi_ids[IS_MX28],
}, {
.compatible = "fsl,imx6q-gpmi-nand",
- .data = (void *)&gpmi_ids[IS_MX6Q]
+ .data = (void *)&gpmi_ids[IS_MX6Q],
}, {}
};
MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
index 53397cc290fc..82114cdc8330 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
@@ -108,6 +108,9 @@
#define HW_GPMI_CTRL1_CLR 0x00000068
#define HW_GPMI_CTRL1_TOG 0x0000006c
+#define BP_GPMI_CTRL1_DECOUPLE_CS 24
+#define BM_GPMI_CTRL1_DECOUPLE_CS (1 << BP_GPMI_CTRL1_DECOUPLE_CS)
+
#define BP_GPMI_CTRL1_WRN_DLY_SEL 22
#define BM_GPMI_CTRL1_WRN_DLY_SEL (0x3 << BP_GPMI_CTRL1_WRN_DLY_SEL)
#define BF_GPMI_CTRL1_WRN_DLY_SEL(v) \
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index f4dd2a887ea5..327d96c03505 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -905,7 +905,7 @@ static struct platform_driver lpc32xx_nand_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(lpc32xx_nand_match),
+ .of_match_table = lpc32xx_nand_match,
},
};
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index add75709d415..23e6974ccd20 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -893,7 +893,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
/* Avoid extra scan if using BBT, setup BBT support */
if (host->ncfg->use_bbt) {
- chip->options |= NAND_SKIP_BBTSCAN;
chip->bbt_options |= NAND_BBT_USE_FLASH;
/*
@@ -915,13 +914,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
goto err_exit3;
}
- /* Standard layout in FLASH for bad block tables */
- if (host->ncfg->use_bbt) {
- if (nand_default_bbt(mtd) < 0)
- dev_err(&pdev->dev,
- "Error initializing default bad block tables\n");
- }
-
mtd->name = "nxp_lpc3220_slc";
ppdata.of_node = pdev->dev.of_node;
res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts,
@@ -1023,7 +1015,7 @@ static struct platform_driver lpc32xx_nand_driver = {
.driver = {
.name = LPC32XX_MODNAME,
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(lpc32xx_nand_match),
+ .of_match_table = lpc32xx_nand_match,
},
};
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 3c60a000b426..439bc3896418 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -36,7 +36,9 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/mpc5121.h>
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index ce8242b6c3e7..103775525c53 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -32,6 +32,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/completion.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_mtd.h>
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index d340b2f198c6..ec1db1e19c05 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2912,12 +2912,13 @@ static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
/* get the info we want. */
ecc = (struct onfi_ext_ecc_info *)cursor;
- if (ecc->codeword_size) {
- chip->ecc_strength_ds = ecc->ecc_bits;
- chip->ecc_step_ds = 1 << ecc->codeword_size;
+ if (!ecc->codeword_size) {
+ pr_debug("Invalid codeword size\n");
+ goto ext_out;
}
- pr_info("ONFI extended param page detected.\n");
+ chip->ecc_strength_ds = ecc->ecc_bits;
+ chip->ecc_step_ds = 1 << ecc->codeword_size;
ret = 0;
ext_out:
@@ -2935,29 +2936,34 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
int i;
int val;
- /* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */
- if (chip->options & NAND_BUSWIDTH_16) {
- pr_err("Trying ONFI probe in 16 bits mode, aborting !\n");
- return 0;
- }
/* Try ONFI for unknown chip or LP */
chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
return 0;
+ /*
+ * ONFI must be probed in 8-bit mode or with NAND_BUSWIDTH_AUTO, not
+ * with NAND_BUSWIDTH_16
+ */
+ if (chip->options & NAND_BUSWIDTH_16) {
+ pr_err("ONFI cannot be probed in 16-bit mode; aborting\n");
+ return 0;
+ }
+
chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
for (i = 0; i < 3; i++) {
chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
le16_to_cpu(p->crc)) {
- pr_info("ONFI param page %d valid\n", i);
break;
}
}
- if (i == 3)
+ if (i == 3) {
+ pr_err("Could not find valid ONFI parameter page; aborting\n");
return 0;
+ }
/* Check version */
val = le16_to_cpu(p->revision);
@@ -2981,11 +2987,23 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
sanitize_string(p->model, sizeof(p->model));
if (!mtd->name)
mtd->name = p->model;
+
mtd->writesize = le32_to_cpu(p->byte_per_page);
- mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
+
+ /*
+ * pages_per_block and blocks_per_lun may not be a power-of-2 size
+ * (don't ask me who thought of this...). MTD assumes that these
+ * dimensions will be power-of-2, so just truncate the remaining area.
+ */
+ mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+ mtd->erasesize *= mtd->writesize;
+
mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
- chip->chipsize = le32_to_cpu(p->blocks_per_lun);
+
+ /* See erasesize comment */
+ chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
+ chip->bits_per_cell = p->bits_per_cell;
if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
*busw = NAND_BUSWIDTH_16;
@@ -3009,10 +3027,11 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
/* The Extended Parameter Page is supported since ONFI 2.1. */
if (nand_flash_detect_ext_param_page(mtd, chip, p))
- pr_info("Failed to detect the extended param page.\n");
+ pr_warn("Failed to detect ONFI extended param page\n");
+ } else {
+ pr_warn("Could not retrieve ONFI ECC requirements\n");
}
- pr_info("ONFI flash detected\n");
return 1;
}
@@ -3075,6 +3094,16 @@ static int nand_id_len(u8 *id_data, int arrlen)
return arrlen;
}
+/* Extract the bits of per cell from the 3rd byte of the extended ID */
+static int nand_get_bits_per_cell(u8 cellinfo)
+{
+ int bits;
+
+ bits = cellinfo & NAND_CI_CELLTYPE_MSK;
+ bits >>= NAND_CI_CELLTYPE_SHIFT;
+ return bits + 1;
+}
+
/*
* Many new NAND share similar device ID codes, which represent the size of the
* chip. The rest of the parameters must be decoded according to generic or
@@ -3085,7 +3114,7 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
{
int extid, id_len;
/* The 3rd id byte holds MLC / multichip data */
- chip->cellinfo = id_data[2];
+ chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
/* The 4th id byte is the important one */
extid = id_data[3];
@@ -3101,8 +3130,7 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
* ID to decide what to do.
*/
if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
- (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
- id_data[5] != 0x00) {
+ !nand_is_slc(chip) && id_data[5] != 0x00) {
/* Calc pagesize */
mtd->writesize = 2048 << (extid & 0x03);
extid >>= 2;
@@ -3134,7 +3162,7 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
(((extid >> 1) & 0x04) | (extid & 0x03));
*busw = 0;
} else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX &&
- (chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
+ !nand_is_slc(chip)) {
unsigned int tmp;
/* Calc pagesize */
@@ -3197,7 +3225,7 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
* - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC
*/
if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA &&
- !(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+ nand_is_slc(chip) &&
(id_data[5] & 0x7) == 0x6 /* 24nm */ &&
!(id_data[4] & 0x80) /* !BENAND */) {
mtd->oobsize = 32 * mtd->writesize >> 9;
@@ -3222,6 +3250,9 @@ static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip,
mtd->oobsize = mtd->writesize / 32;
*busw = type->options & NAND_BUSWIDTH_16;
+ /* All legacy ID NAND are small-page, SLC */
+ chip->bits_per_cell = 1;
+
/*
* Check for Spansion/AMD ID + repeating 5th, 6th byte since
* some Spansion chips have erasesize that conflicts with size
@@ -3258,11 +3289,11 @@ static void nand_decode_bbm_options(struct mtd_info *mtd,
* Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
* AMD/Spansion, and Macronix. All others scan only the first page.
*/
- if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+ if (!nand_is_slc(chip) &&
(maf_id == NAND_MFR_SAMSUNG ||
maf_id == NAND_MFR_HYNIX))
chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
- else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+ else if ((nand_is_slc(chip) &&
(maf_id == NAND_MFR_SAMSUNG ||
maf_id == NAND_MFR_HYNIX ||
maf_id == NAND_MFR_TOSHIBA ||
@@ -3286,7 +3317,7 @@ static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
mtd->erasesize = type->erasesize;
mtd->oobsize = type->oobsize;
- chip->cellinfo = id_data[2];
+ chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
chip->chipsize = (uint64_t)type->chipsize << 20;
chip->options |= type->options;
chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
@@ -3441,11 +3472,13 @@ ident_done:
if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
chip->cmdfunc = nand_command_lp;
- pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s),"
- " %dMiB, page size: %d, OOB size: %d\n",
+ pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s)\n",
*maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
- chip->onfi_version ? chip->onfi_params.model : type->name,
- (int)(chip->chipsize >> 20), mtd->writesize, mtd->oobsize);
+ chip->onfi_version ? chip->onfi_params.model : type->name);
+
+ pr_info("NAND device: %dMiB, %s, page size: %d, OOB size: %d\n",
+ (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
+ mtd->writesize, mtd->oobsize);
return type;
}
@@ -3738,8 +3771,7 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
- if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
- !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
+ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
switch (chip->ecc.steps) {
case 2:
mtd->subpage_sft = 1;
@@ -3764,7 +3796,7 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->options |= NAND_SUBPAGE_READ;
/* Fill in remaining MTD driver data */
- mtd->type = MTD_NANDFLASH;
+ mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
MTD_CAP_NANDFLASH;
mtd->_erase = nand_erase;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index bc06196d5739..c0615d1526f9 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -412,25 +412,6 @@ static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
}
}
-/* Scan a given block full */
-static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
- loff_t offs, uint8_t *buf, size_t readlen,
- int scanlen, int numpages)
-{
- int ret, j;
-
- ret = scan_read_oob(mtd, buf, offs, readlen);
- /* Ignore ECC errors when checking for BBM */
- if (ret && !mtd_is_bitflip_or_eccerr(ret))
- return ret;
-
- for (j = 0; j < numpages; j++, buf += scanlen) {
- if (check_pattern(buf, scanlen, mtd->writesize, bd))
- return 1;
- }
- return 0;
-}
-
/* Scan a given block partially */
static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
loff_t offs, uint8_t *buf, int numpages)
@@ -477,24 +458,17 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
struct nand_bbt_descr *bd, int chip)
{
struct nand_chip *this = mtd->priv;
- int i, numblocks, numpages, scanlen;
+ int i, numblocks, numpages;
int startblock;
loff_t from;
- size_t readlen;
pr_info("Scanning device for bad blocks\n");
- if (bd->options & NAND_BBT_SCANALLPAGES)
- numpages = 1 << (this->bbt_erase_shift - this->page_shift);
- else if (bd->options & NAND_BBT_SCAN2NDPAGE)
+ if (bd->options & NAND_BBT_SCAN2NDPAGE)
numpages = 2;
else
numpages = 1;
- /* We need only read few bytes from the OOB area */
- scanlen = 0;
- readlen = bd->len;
-
if (chip == -1) {
numblocks = mtd->size >> this->bbt_erase_shift;
startblock = 0;
@@ -519,12 +493,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
BUG_ON(bd->options & NAND_BBT_NO_OOB);
- if (bd->options & NAND_BBT_SCANALLPAGES)
- ret = scan_block_full(mtd, bd, from, buf, readlen,
- scanlen, numpages);
- else
- ret = scan_block_fast(mtd, bd, from, buf, numpages);
-
+ ret = scan_block_fast(mtd, bd, from, buf, numpages);
if (ret < 0)
return ret;
@@ -1392,4 +1361,3 @@ int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
}
EXPORT_SYMBOL(nand_scan_bbt);
-EXPORT_SYMBOL(nand_default_bbt);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index bdc1d15369f8..3c6d5c601ade 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -575,12 +575,12 @@ static int alloc_device(struct nandsim *ns)
cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
if (IS_ERR(cfile))
return PTR_ERR(cfile);
- if (!cfile->f_op || (!cfile->f_op->read && !cfile->f_op->aio_read)) {
+ if (!file_readable(cfile)) {
NS_ERR("alloc_device: cache file not readable\n");
err = -EINVAL;
goto err_close;
}
- if (!cfile->f_op->write && !cfile->f_op->aio_write) {
+ if (!file_writable(cfile)) {
NS_ERR("alloc_device: cache file not writeable\n");
err = -EINVAL;
goto err_close;
@@ -2372,7 +2372,7 @@ static int __init ns_init_module(void)
if ((retval = init_nandsim(nsmtd)) != 0)
goto err_exit;
- if ((retval = nand_default_bbt(nsmtd)) != 0)
+ if ((retval = chip->scan_bbt(nsmtd)) != 0)
goto err_exit;
if ((retval = parse_badblocks(nand, nsmtd)) != 0)
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index 8e148f1478fd..69eaba690a99 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -30,6 +30,7 @@
#include <linux/mtd/ndfc.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/io.h>
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 4ecf0e5fd484..ec40b8d10201 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -25,10 +25,8 @@
#include <linux/of.h>
#include <linux/of_device.h>
-#ifdef CONFIG_MTD_NAND_OMAP_BCH
-#include <linux/bch.h>
+#include <linux/mtd/nand_bch.h>
#include <linux/platform_data/elm.h>
-#endif
#include <linux/platform_data/mtd-nand-omap2.h>
@@ -141,6 +139,8 @@
#define BCH_ECC_SIZE0 0x0 /* ecc_size0 = 0, no oob protection */
#define BCH_ECC_SIZE1 0x20 /* ecc_size1 = 32 */
+#define BADBLOCK_MARKER_LENGTH 2
+
#ifdef CONFIG_MTD_NAND_OMAP_BCH
static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
0xac, 0x6b, 0xff, 0x99, 0x7b};
@@ -149,17 +149,6 @@ static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
/* oob info generated runtime depending on ecc algorithm and layout selected */
static struct nand_ecclayout omap_oobinfo;
-/* Define some generic bad / good block scan pattern which are used
- * while scanning a device for factory marked good / bad blocks
- */
-static uint8_t scan_ff_pattern[] = { 0xff };
-static struct nand_bbt_descr bb_descrip_flashbased = {
- .options = NAND_BBT_SCANALLPAGES,
- .offs = 0,
- .len = 1,
- .pattern = scan_ff_pattern,
-};
-
struct omap_nand_info {
struct nand_hw_control controller;
@@ -182,14 +171,10 @@ struct omap_nand_info {
u_char *buf;
int buf_len;
struct gpmc_nand_regs reg;
-
-#ifdef CONFIG_MTD_NAND_OMAP_BCH
- struct bch_control *bch;
- struct nand_ecclayout ecclayout;
+ /* fields specific for BCHx_HW ECC scheme */
bool is_elm_used;
struct device *elm_dev;
struct device_node *of_node;
-#endif
};
/**
@@ -1058,8 +1043,7 @@ static int omap_dev_ready(struct mtd_info *mtd)
}
}
-#ifdef CONFIG_MTD_NAND_OMAP_BCH
-
+#if defined(CONFIG_MTD_NAND_ECC_BCH) || defined(CONFIG_MTD_NAND_OMAP_BCH)
/**
* omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction
* @mtd: MTD device structure
@@ -1140,7 +1124,9 @@ static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
/* Clear ecc and enable bits */
writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
}
+#endif
+#ifdef CONFIG_MTD_NAND_ECC_BCH
/**
* omap3_calculate_ecc_bch4 - Generate 7 bytes of ECC bytes
* @mtd: MTD device structure
@@ -1225,7 +1211,9 @@ static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
return 0;
}
+#endif /* CONFIG_MTD_NAND_ECC_BCH */
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
/**
* omap3_calculate_ecc_bch - Generate bytes of ECC bytes
* @mtd: MTD device structure
@@ -1519,38 +1507,6 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
}
/**
- * omap3_correct_data_bch - Decode received data and correct errors
- * @mtd: MTD device structure
- * @data: page data
- * @read_ecc: ecc read from nand flash
- * @calc_ecc: ecc read from HW ECC registers
- */
-static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data,
- u_char *read_ecc, u_char *calc_ecc)
-{
- int i, count;
- /* cannot correct more than 8 errors */
- unsigned int errloc[8];
- struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
- mtd);
-
- count = decode_bch(info->bch, NULL, 512, read_ecc, calc_ecc, NULL,
- errloc);
- if (count > 0) {
- /* correct errors */
- for (i = 0; i < count; i++) {
- /* correct data only, not ecc bytes */
- if (errloc[i] < 8*512)
- data[errloc[i]/8] ^= 1 << (errloc[i] & 7);
- pr_debug("corrected bitflip %u\n", errloc[i]);
- }
- } else if (count < 0) {
- pr_err("ecc unrecoverable error\n");
- }
- return count;
-}
-
-/**
* omap_write_page_bch - BCH ecc based write page function for entire page
* @mtd: mtd info structure
* @chip: nand chip info structure
@@ -1637,197 +1593,46 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
- * omap3_free_bch - Release BCH ecc resources
- * @mtd: MTD device structure
- */
-static void omap3_free_bch(struct mtd_info *mtd)
-{
- struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
- mtd);
- if (info->bch) {
- free_bch(info->bch);
- info->bch = NULL;
- }
-}
-
-/**
- * omap3_init_bch - Initialize BCH ECC
- * @mtd: MTD device structure
- * @ecc_opt: OMAP ECC mode (OMAP_ECC_BCH4_CODE_HW or OMAP_ECC_BCH8_CODE_HW)
- */
-static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
-{
- int max_errors;
- struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
- mtd);
-#ifdef CONFIG_MTD_NAND_OMAP_BCH8
- const int hw_errors = BCH8_MAX_ERROR;
-#else
- const int hw_errors = BCH4_MAX_ERROR;
-#endif
- enum bch_ecc bch_type;
- const __be32 *parp;
- int lenp;
- struct device_node *elm_node;
-
- info->bch = NULL;
-
- max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ?
- BCH8_MAX_ERROR : BCH4_MAX_ERROR;
- if (max_errors != hw_errors) {
- pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported",
- max_errors, hw_errors);
- goto fail;
- }
-
- info->nand.ecc.size = 512;
- info->nand.ecc.hwctl = omap3_enable_hwecc_bch;
- info->nand.ecc.mode = NAND_ECC_HW;
- info->nand.ecc.strength = max_errors;
-
- if (hw_errors == BCH8_MAX_ERROR)
- bch_type = BCH8_ECC;
- else
- bch_type = BCH4_ECC;
-
- /* Detect availability of ELM module */
- parp = of_get_property(info->of_node, "elm_id", &lenp);
- if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) {
- pr_err("Missing elm_id property, fall back to Software BCH\n");
- info->is_elm_used = false;
- } else {
- struct platform_device *pdev;
-
- elm_node = of_find_node_by_phandle(be32_to_cpup(parp));
- pdev = of_find_device_by_node(elm_node);
- info->elm_dev = &pdev->dev;
-
- if (elm_config(info->elm_dev, bch_type) == 0)
- info->is_elm_used = true;
- }
-
- if (info->is_elm_used && (mtd->writesize <= 4096)) {
-
- if (hw_errors == BCH8_MAX_ERROR)
- info->nand.ecc.bytes = BCH8_SIZE;
- else
- info->nand.ecc.bytes = BCH4_SIZE;
-
- info->nand.ecc.correct = omap_elm_correct_data;
- info->nand.ecc.calculate = omap3_calculate_ecc_bch;
- info->nand.ecc.read_page = omap_read_page_bch;
- info->nand.ecc.write_page = omap_write_page_bch;
- } else {
- /*
- * software bch library is only used to detect and
- * locate errors
- */
- info->bch = init_bch(13, max_errors,
- 0x201b /* hw polynomial */);
- if (!info->bch)
- goto fail;
-
- info->nand.ecc.correct = omap3_correct_data_bch;
-
- /*
- * The number of corrected errors in an ecc block that will
- * trigger block scrubbing defaults to the ecc strength (4 or 8)
- * Set mtd->bitflip_threshold here to define a custom threshold.
- */
-
- if (max_errors == 8) {
- info->nand.ecc.bytes = 13;
- info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
- } else {
- info->nand.ecc.bytes = 7;
- info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
- }
- }
-
- pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors);
- return 0;
-fail:
- omap3_free_bch(mtd);
- return -1;
-}
-
-/**
- * omap3_init_bch_tail - Build an oob layout for BCH ECC correction.
- * @mtd: MTD device structure
+ * is_elm_present - checks for presence of ELM module by scanning DT nodes
+ * @omap_nand_info: NAND device structure containing platform data
+ * @bch_type: 0x0=BCH4, 0x1=BCH8, 0x2=BCH16
*/
-static int omap3_init_bch_tail(struct mtd_info *mtd)
+static int is_elm_present(struct omap_nand_info *info,
+ struct device_node *elm_node, enum bch_ecc bch_type)
{
- int i, steps, offset;
- struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
- mtd);
- struct nand_ecclayout *layout = &info->ecclayout;
-
- /* build oob layout */
- steps = mtd->writesize/info->nand.ecc.size;
- layout->eccbytes = steps*info->nand.ecc.bytes;
-
- /* do not bother creating special oob layouts for small page devices */
- if (mtd->oobsize < 64) {
- pr_err("BCH ecc is not supported on small page devices\n");
- goto fail;
+ struct platform_device *pdev;
+ info->is_elm_used = false;
+ /* check whether elm-id is passed via DT */
+ if (!elm_node) {
+ pr_err("nand: error: ELM DT node not found\n");
+ return -ENODEV;
}
-
- /* reserve 2 bytes for bad block marker */
- if (layout->eccbytes+2 > mtd->oobsize) {
- pr_err("no oob layout available for oobsize %d eccbytes %u\n",
- mtd->oobsize, layout->eccbytes);
- goto fail;
+ pdev = of_find_device_by_node(elm_node);
+ /* check whether ELM device is registered */
+ if (!pdev) {
+ pr_err("nand: error: ELM device not found\n");
+ return -ENODEV;
}
-
- /* ECC layout compatible with RBL for BCH8 */
- if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE))
- offset = 2;
- else
- offset = mtd->oobsize - layout->eccbytes;
-
- /* put ecc bytes at oob tail */
- for (i = 0; i < layout->eccbytes; i++)
- layout->eccpos[i] = offset + i;
-
- if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE))
- layout->oobfree[0].offset = 2 + layout->eccbytes * steps;
- else
- layout->oobfree[0].offset = 2;
-
- layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
- info->nand.ecc.layout = layout;
-
- if (!(info->nand.options & NAND_BUSWIDTH_16))
- info->nand.badblock_pattern = &bb_descrip_flashbased;
+ /* ELM module available, now configure it */
+ info->elm_dev = &pdev->dev;
+ if (elm_config(info->elm_dev, bch_type))
+ return -ENODEV;
+ info->is_elm_used = true;
return 0;
-fail:
- omap3_free_bch(mtd);
- return -1;
-}
-
-#else
-static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
-{
- pr_err("CONFIG_MTD_NAND_OMAP_BCH is not enabled\n");
- return -1;
-}
-static int omap3_init_bch_tail(struct mtd_info *mtd)
-{
- return -1;
-}
-static void omap3_free_bch(struct mtd_info *mtd)
-{
}
-#endif /* CONFIG_MTD_NAND_OMAP_BCH */
+#endif /* CONFIG_MTD_NAND_ECC_BCH */
static int omap_nand_probe(struct platform_device *pdev)
{
struct omap_nand_info *info;
struct omap_nand_platform_data *pdata;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ struct nand_ecclayout *ecclayout;
int err;
- int i, offset;
- dma_cap_mask_t mask;
- unsigned sig;
+ int i;
+ dma_cap_mask_t mask;
+ unsigned sig;
struct resource *res;
struct mtd_part_parser_data ppdata = {};
@@ -1837,7 +1642,8 @@ static int omap_nand_probe(struct platform_device *pdev)
return -ENODEV;
}
- info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
+ GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -1846,47 +1652,45 @@ static int omap_nand_probe(struct platform_device *pdev)
spin_lock_init(&info->controller.lock);
init_waitqueue_head(&info->controller.wq);
- info->pdev = pdev;
-
+ info->pdev = pdev;
info->gpmc_cs = pdata->cs;
info->reg = pdata->reg;
-
- info->mtd.priv = &info->nand;
- info->mtd.name = dev_name(&pdev->dev);
- info->mtd.owner = THIS_MODULE;
-
- info->nand.options = pdata->devsize;
- info->nand.options |= NAND_SKIP_BBTSCAN;
-#ifdef CONFIG_MTD_NAND_OMAP_BCH
info->of_node = pdata->of_node;
-#endif
+ mtd = &info->mtd;
+ mtd->priv = &info->nand;
+ mtd->name = dev_name(&pdev->dev);
+ mtd->owner = THIS_MODULE;
+ nand_chip = &info->nand;
+ nand_chip->ecc.priv = NULL;
+ nand_chip->options |= NAND_SKIP_BBTSCAN;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
err = -EINVAL;
dev_err(&pdev->dev, "error getting memory resource\n");
- goto out_free_info;
+ goto return_error;
}
info->phys_base = res->start;
info->mem_size = resource_size(res);
- if (!request_mem_region(info->phys_base, info->mem_size,
- pdev->dev.driver->name)) {
+ if (!devm_request_mem_region(&pdev->dev, info->phys_base,
+ info->mem_size, pdev->dev.driver->name)) {
err = -EBUSY;
- goto out_free_info;
+ goto return_error;
}
- info->nand.IO_ADDR_R = ioremap(info->phys_base, info->mem_size);
- if (!info->nand.IO_ADDR_R) {
+ nand_chip->IO_ADDR_R = devm_ioremap(&pdev->dev, info->phys_base,
+ info->mem_size);
+ if (!nand_chip->IO_ADDR_R) {
err = -ENOMEM;
- goto out_release_mem_region;
+ goto return_error;
}
- info->nand.controller = &info->controller;
+ nand_chip->controller = &info->controller;
- info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
- info->nand.cmd_ctrl = omap_hwcontrol;
+ nand_chip->IO_ADDR_W = nand_chip->IO_ADDR_R;
+ nand_chip->cmd_ctrl = omap_hwcontrol;
/*
* If RDY/BSY line is connected to OMAP then use the omap ready
@@ -1896,26 +1700,42 @@ static int omap_nand_probe(struct platform_device *pdev)
* device and read status register until you get a failure or success
*/
if (pdata->dev_ready) {
- info->nand.dev_ready = omap_dev_ready;
- info->nand.chip_delay = 0;
+ nand_chip->dev_ready = omap_dev_ready;
+ nand_chip->chip_delay = 0;
} else {
- info->nand.waitfunc = omap_wait;
- info->nand.chip_delay = 50;
+ nand_chip->waitfunc = omap_wait;
+ nand_chip->chip_delay = 50;
}
+ /* scan NAND device connected to chip controller */
+ nand_chip->options |= pdata->devsize & NAND_BUSWIDTH_16;
+ if (nand_scan_ident(mtd, 1, NULL)) {
+ pr_err("nand device scan failed, may be bus-width mismatch\n");
+ err = -ENXIO;
+ goto return_error;
+ }
+
+ /* check for small page devices */
+ if ((mtd->oobsize < 64) && (pdata->ecc_opt != OMAP_ECC_HAM1_CODE_HW)) {
+ pr_err("small page devices are not supported\n");
+ err = -EINVAL;
+ goto return_error;
+ }
+
+ /* re-populate low-level callbacks based on xfer modes */
switch (pdata->xfer_type) {
case NAND_OMAP_PREFETCH_POLLED:
- info->nand.read_buf = omap_read_buf_pref;
- info->nand.write_buf = omap_write_buf_pref;
+ nand_chip->read_buf = omap_read_buf_pref;
+ nand_chip->write_buf = omap_write_buf_pref;
break;
case NAND_OMAP_POLLED:
- if (info->nand.options & NAND_BUSWIDTH_16) {
- info->nand.read_buf = omap_read_buf16;
- info->nand.write_buf = omap_write_buf16;
+ if (nand_chip->options & NAND_BUSWIDTH_16) {
+ nand_chip->read_buf = omap_read_buf16;
+ nand_chip->write_buf = omap_write_buf16;
} else {
- info->nand.read_buf = omap_read_buf8;
- info->nand.write_buf = omap_write_buf8;
+ nand_chip->read_buf = omap_read_buf8;
+ nand_chip->write_buf = omap_write_buf8;
}
break;
@@ -1927,7 +1747,7 @@ static int omap_nand_probe(struct platform_device *pdev)
if (!info->dma) {
dev_err(&pdev->dev, "DMA engine request failed\n");
err = -ENXIO;
- goto out_release_mem_region;
+ goto return_error;
} else {
struct dma_slave_config cfg;
@@ -1942,10 +1762,10 @@ static int omap_nand_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
err);
- goto out_release_mem_region;
+ goto return_error;
}
- info->nand.read_buf = omap_read_buf_dma_pref;
- info->nand.write_buf = omap_write_buf_dma_pref;
+ nand_chip->read_buf = omap_read_buf_dma_pref;
+ nand_chip->write_buf = omap_write_buf_dma_pref;
}
break;
@@ -1954,34 +1774,36 @@ static int omap_nand_probe(struct platform_device *pdev)
if (info->gpmc_irq_fifo <= 0) {
dev_err(&pdev->dev, "error getting fifo irq\n");
err = -ENODEV;
- goto out_release_mem_region;
+ goto return_error;
}
- err = request_irq(info->gpmc_irq_fifo, omap_nand_irq,
- IRQF_SHARED, "gpmc-nand-fifo", info);
+ err = devm_request_irq(&pdev->dev, info->gpmc_irq_fifo,
+ omap_nand_irq, IRQF_SHARED,
+ "gpmc-nand-fifo", info);
if (err) {
dev_err(&pdev->dev, "requesting irq(%d) error:%d",
info->gpmc_irq_fifo, err);
info->gpmc_irq_fifo = 0;
- goto out_release_mem_region;
+ goto return_error;
}
info->gpmc_irq_count = platform_get_irq(pdev, 1);
if (info->gpmc_irq_count <= 0) {
dev_err(&pdev->dev, "error getting count irq\n");
err = -ENODEV;
- goto out_release_mem_region;
+ goto return_error;
}
- err = request_irq(info->gpmc_irq_count, omap_nand_irq,
- IRQF_SHARED, "gpmc-nand-count", info);
+ err = devm_request_irq(&pdev->dev, info->gpmc_irq_count,
+ omap_nand_irq, IRQF_SHARED,
+ "gpmc-nand-count", info);
if (err) {
dev_err(&pdev->dev, "requesting irq(%d) error:%d",
info->gpmc_irq_count, err);
info->gpmc_irq_count = 0;
- goto out_release_mem_region;
+ goto return_error;
}
- info->nand.read_buf = omap_read_buf_irq_pref;
- info->nand.write_buf = omap_write_buf_irq_pref;
+ nand_chip->read_buf = omap_read_buf_irq_pref;
+ nand_chip->write_buf = omap_write_buf_irq_pref;
break;
@@ -1989,117 +1811,222 @@ static int omap_nand_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"xfer_type(%d) not supported!\n", pdata->xfer_type);
err = -EINVAL;
- goto out_release_mem_region;
+ goto return_error;
}
- /* select the ecc type */
- if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
- info->nand.ecc.mode = NAND_ECC_SOFT;
- else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
- (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
- info->nand.ecc.bytes = 3;
- info->nand.ecc.size = 512;
- info->nand.ecc.strength = 1;
- info->nand.ecc.calculate = omap_calculate_ecc;
- info->nand.ecc.hwctl = omap_enable_hwecc;
- info->nand.ecc.correct = omap_correct_data;
- info->nand.ecc.mode = NAND_ECC_HW;
- } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
- (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
- err = omap3_init_bch(&info->mtd, pdata->ecc_opt);
- if (err) {
+ /* populate MTD interface based on ECC scheme */
+ nand_chip->ecc.layout = &omap_oobinfo;
+ ecclayout = &omap_oobinfo;
+ switch (pdata->ecc_opt) {
+ case OMAP_ECC_HAM1_CODE_HW:
+ pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.bytes = 3;
+ nand_chip->ecc.size = 512;
+ nand_chip->ecc.strength = 1;
+ nand_chip->ecc.calculate = omap_calculate_ecc;
+ nand_chip->ecc.hwctl = omap_enable_hwecc;
+ nand_chip->ecc.correct = omap_correct_data;
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ if (nand_chip->options & NAND_BUSWIDTH_16)
+ ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
+ else
+ ecclayout->eccpos[0] = 1;
+ ecclayout->oobfree->offset = ecclayout->eccpos[0] +
+ ecclayout->eccbytes;
+ break;
+
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+#ifdef CONFIG_MTD_NAND_ECC_BCH
+ pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = 512;
+ nand_chip->ecc.bytes = 7;
+ nand_chip->ecc.strength = 4;
+ nand_chip->ecc.hwctl = omap3_enable_hwecc_bch;
+ nand_chip->ecc.correct = nand_bch_correct_data;
+ nand_chip->ecc.calculate = omap3_calculate_ecc_bch4;
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
+ ecclayout->oobfree->offset = ecclayout->eccpos[0] +
+ ecclayout->eccbytes;
+ /* software bch library is used for locating errors */
+ nand_chip->ecc.priv = nand_bch_init(mtd,
+ nand_chip->ecc.size,
+ nand_chip->ecc.bytes,
+ &nand_chip->ecc.layout);
+ if (!nand_chip->ecc.priv) {
+ pr_err("nand: error: unable to use s/w BCH library\n");
err = -EINVAL;
- goto out_release_mem_region;
}
- }
+ break;
+#else
+ pr_err("nand: error: CONFIG_MTD_NAND_ECC_BCH not enabled\n");
+ err = -EINVAL;
+ goto return_error;
+#endif
- /* DIP switches on some boards change between 8 and 16 bit
- * bus widths for flash. Try the other width if the first try fails.
- */
- if (nand_scan_ident(&info->mtd, 1, NULL)) {
- info->nand.options ^= NAND_BUSWIDTH_16;
- if (nand_scan_ident(&info->mtd, 1, NULL)) {
- err = -ENXIO;
- goto out_release_mem_region;
+ case OMAP_ECC_BCH4_CODE_HW:
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+ pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = 512;
+ /* 14th bit is kept reserved for ROM-code compatibility */
+ nand_chip->ecc.bytes = 7 + 1;
+ nand_chip->ecc.strength = 4;
+ nand_chip->ecc.hwctl = omap3_enable_hwecc_bch;
+ nand_chip->ecc.correct = omap_elm_correct_data;
+ nand_chip->ecc.calculate = omap3_calculate_ecc_bch;
+ nand_chip->ecc.read_page = omap_read_page_bch;
+ nand_chip->ecc.write_page = omap_write_page_bch;
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
+ ecclayout->oobfree->offset = ecclayout->eccpos[0] +
+ ecclayout->eccbytes;
+ /* This ECC scheme requires ELM H/W block */
+ if (is_elm_present(info, pdata->elm_of_node, BCH4_ECC) < 0) {
+ pr_err("nand: error: could not initialize ELM\n");
+ err = -ENODEV;
+ goto return_error;
}
- }
-
- /* rom code layout */
- if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
+ break;
+#else
+ pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
+ err = -EINVAL;
+ goto return_error;
+#endif
- if (info->nand.options & NAND_BUSWIDTH_16)
- offset = 2;
- else {
- offset = 1;
- info->nand.badblock_pattern = &bb_descrip_flashbased;
- }
- omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16);
- for (i = 0; i < omap_oobinfo.eccbytes; i++)
- omap_oobinfo.eccpos[i] = i+offset;
-
- omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
- omap_oobinfo.oobfree->length = info->mtd.oobsize -
- (offset + omap_oobinfo.eccbytes);
-
- info->nand.ecc.layout = &omap_oobinfo;
- } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
- (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
- /* build OOB layout for BCH ECC correction */
- err = omap3_init_bch_tail(&info->mtd);
- if (err) {
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+#ifdef CONFIG_MTD_NAND_ECC_BCH
+ pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = 512;
+ nand_chip->ecc.bytes = 13;
+ nand_chip->ecc.strength = 8;
+ nand_chip->ecc.hwctl = omap3_enable_hwecc_bch;
+ nand_chip->ecc.correct = nand_bch_correct_data;
+ nand_chip->ecc.calculate = omap3_calculate_ecc_bch8;
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
+ ecclayout->oobfree->offset = ecclayout->eccpos[0] +
+ ecclayout->eccbytes;
+ /* software bch library is used for locating errors */
+ nand_chip->ecc.priv = nand_bch_init(mtd,
+ nand_chip->ecc.size,
+ nand_chip->ecc.bytes,
+ &nand_chip->ecc.layout);
+ if (!nand_chip->ecc.priv) {
+ pr_err("nand: error: unable to use s/w BCH library\n");
err = -EINVAL;
- goto out_release_mem_region;
+ goto return_error;
+ }
+ break;
+#else
+ pr_err("nand: error: CONFIG_MTD_NAND_ECC_BCH not enabled\n");
+ err = -EINVAL;
+ goto return_error;
+#endif
+
+ case OMAP_ECC_BCH8_CODE_HW:
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+ pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = 512;
+ /* 14th bit is kept reserved for ROM-code compatibility */
+ nand_chip->ecc.bytes = 13 + 1;
+ nand_chip->ecc.strength = 8;
+ nand_chip->ecc.hwctl = omap3_enable_hwecc_bch;
+ nand_chip->ecc.correct = omap_elm_correct_data;
+ nand_chip->ecc.calculate = omap3_calculate_ecc_bch;
+ nand_chip->ecc.read_page = omap_read_page_bch;
+ nand_chip->ecc.write_page = omap_write_page_bch;
+ /* This ECC scheme requires ELM H/W block */
+ if (is_elm_present(info, pdata->elm_of_node, BCH8_ECC) < 0) {
+ pr_err("nand: error: could not initialize ELM\n");
+ goto return_error;
}
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
+ ecclayout->oobfree->offset = ecclayout->eccpos[0] +
+ ecclayout->eccbytes;
+ break;
+#else
+ pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
+ err = -EINVAL;
+ goto return_error;
+#endif
+
+ default:
+ pr_err("nand: error: invalid or unsupported ECC scheme\n");
+ err = -EINVAL;
+ goto return_error;
+ }
+
+ /* populate remaining ECC layout data */
+ ecclayout->oobfree->length = mtd->oobsize - (BADBLOCK_MARKER_LENGTH +
+ ecclayout->eccbytes);
+ for (i = 1; i < ecclayout->eccbytes; i++)
+ ecclayout->eccpos[i] = ecclayout->eccpos[0] + i;
+ /* check if NAND device's OOB is enough to store ECC signatures */
+ if (mtd->oobsize < (ecclayout->eccbytes + BADBLOCK_MARKER_LENGTH)) {
+ pr_err("not enough OOB bytes required = %d, available=%d\n",
+ ecclayout->eccbytes, mtd->oobsize);
+ err = -EINVAL;
+ goto return_error;
}
/* second phase scan */
- if (nand_scan_tail(&info->mtd)) {
+ if (nand_scan_tail(mtd)) {
err = -ENXIO;
- goto out_release_mem_region;
+ goto return_error;
}
ppdata.of_node = pdata->of_node;
- mtd_device_parse_register(&info->mtd, NULL, &ppdata, pdata->parts,
+ mtd_device_parse_register(mtd, NULL, &ppdata, pdata->parts,
pdata->nr_parts);
- platform_set_drvdata(pdev, &info->mtd);
+ platform_set_drvdata(pdev, mtd);
return 0;
-out_release_mem_region:
+return_error:
if (info->dma)
dma_release_channel(info->dma);
- if (info->gpmc_irq_count > 0)
- free_irq(info->gpmc_irq_count, info);
- if (info->gpmc_irq_fifo > 0)
- free_irq(info->gpmc_irq_fifo, info);
- release_mem_region(info->phys_base, info->mem_size);
-out_free_info:
- kfree(info);
-
+ if (nand_chip->ecc.priv) {
+ nand_bch_free(nand_chip->ecc.priv);
+ nand_chip->ecc.priv = NULL;
+ }
return err;
}
static int omap_nand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct nand_chip *nand_chip = mtd->priv;
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
- omap3_free_bch(&info->mtd);
-
+ if (nand_chip->ecc.priv) {
+ nand_bch_free(nand_chip->ecc.priv);
+ nand_chip->ecc.priv = NULL;
+ }
if (info->dma)
dma_release_channel(info->dma);
-
- if (info->gpmc_irq_count > 0)
- free_irq(info->gpmc_irq_count, info);
- if (info->gpmc_irq_fifo > 0)
- free_irq(info->gpmc_irq_fifo, info);
-
- /* Release NAND device, its internal structures and partitions */
- nand_release(&info->mtd);
- iounmap(info->nand.IO_ADDR_R);
- release_mem_region(info->phys_base, info->mem_size);
- kfree(info);
+ nand_release(mtd);
return 0;
}
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 5a67082c07ee..4d174366a0f0 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -28,6 +28,8 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index dd03dfdfb0d6..64c258ec0170 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -39,6 +39,13 @@
#define NAND_STOP_DELAY (2 * HZ/50)
#define PAGE_CHUNK_SIZE (2048)
+/*
+ * Define a buffer size for the initial command that detects the flash device:
+ * STATUS, READID and PARAM. The largest of these is the PARAM command,
+ * needing 256 bytes.
+ */
+#define INIT_BUFFER_SIZE 256
+
/* registers and bit definitions */
#define NDCR (0x00) /* Control register */
#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
@@ -164,6 +171,7 @@ struct pxa3xx_nand_info {
unsigned int buf_start;
unsigned int buf_count;
+ unsigned int buf_size;
/* DMA information */
int drcmr_dat;
@@ -540,7 +548,6 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
info->oob_size = 0;
info->use_ecc = 0;
info->use_spare = 1;
- info->use_dma = (use_dma) ? 1 : 0;
info->is_ready = 0;
info->retcode = ERR_NONE;
if (info->cs != 0)
@@ -912,26 +919,20 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
return 0;
}
-/* the maximum possible buffer size for large page with OOB data
- * is: 2048 + 64 = 2112 bytes, allocate a page here for both the
- * data buffer and the DMA descriptor
- */
-#define MAX_BUFF_SIZE PAGE_SIZE
-
#ifdef ARCH_HAS_DMA
static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
{
struct platform_device *pdev = info->pdev;
- int data_desc_offset = MAX_BUFF_SIZE - sizeof(struct pxa_dma_desc);
+ int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
if (use_dma == 0) {
- info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
+ info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
if (info->data_buff == NULL)
return -ENOMEM;
return 0;
}
- info->data_buff = dma_alloc_coherent(&pdev->dev, MAX_BUFF_SIZE,
+ info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
&info->data_buff_phys, GFP_KERNEL);
if (info->data_buff == NULL) {
dev_err(&pdev->dev, "failed to allocate dma buffer\n");
@@ -945,11 +946,16 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
pxa3xx_nand_data_dma_irq, info);
if (info->data_dma_ch < 0) {
dev_err(&pdev->dev, "failed to request data dma\n");
- dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
+ dma_free_coherent(&pdev->dev, info->buf_size,
info->data_buff, info->data_buff_phys);
return info->data_dma_ch;
}
+ /*
+ * Now that DMA buffers are allocated we turn on
+ * DMA proper for I/O operations.
+ */
+ info->use_dma = 1;
return 0;
}
@@ -958,7 +964,7 @@ static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
struct platform_device *pdev = info->pdev;
if (use_dma) {
pxa_free_dma(info->data_dma_ch);
- dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
+ dma_free_coherent(&pdev->dev, info->buf_size,
info->data_buff, info->data_buff_phys);
} else {
kfree(info->data_buff);
@@ -967,7 +973,7 @@ static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
#else
static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
{
- info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
+ info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
if (info->data_buff == NULL)
return -ENOMEM;
return 0;
@@ -1081,7 +1087,16 @@ KEEP_CONFIG:
else
host->col_addr_cycles = 1;
+ /* release the initial buffer */
+ kfree(info->data_buff);
+
+ /* allocate the real data + oob buffer */
+ info->buf_size = mtd->writesize + mtd->oobsize;
+ ret = pxa3xx_nand_init_buff(info);
+ if (ret)
+ return ret;
info->oob_buff = info->data_buff + mtd->writesize;
+
if ((mtd->size >> chip->page_shift) > 65536)
host->row_addr_cycles = 3;
else
@@ -1187,9 +1202,13 @@ static int alloc_nand_resource(struct platform_device *pdev)
}
info->mmio_phys = r->start;
- ret = pxa3xx_nand_init_buff(info);
- if (ret)
+ /* Allocate a buffer to allow flash detection */
+ info->buf_size = INIT_BUFFER_SIZE;
+ info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
+ if (info->data_buff == NULL) {
+ ret = -ENOMEM;
goto fail_disable_clk;
+ }
/* initialize all interrupts to be disabled */
disable_int(info, NDSR_MASK);
@@ -1207,7 +1226,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
fail_free_buf:
free_irq(irq, info);
- pxa3xx_nand_free_buff(info);
+ kfree(info->data_buff);
fail_disable_clk:
clk_disable_unprepare(info->clk);
return ret;
@@ -1320,7 +1339,12 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
for (cs = 0; cs < pdata->num_cs; cs++) {
struct mtd_info *mtd = info->host[cs]->mtd;
- mtd->name = pdev->name;
+ /*
+ * The mtd name matches the one used in 'mtdparts' kernel
+ * parameter. This name cannot be changed or otherwise
+ * user's mtd partitions configuration would get broken.
+ */
+ mtd->name = "pxa3xx_nand-0";
info->cs = cs;
ret = pxa3xx_nand_scan(mtd);
if (ret) {
@@ -1407,7 +1431,7 @@ static int pxa3xx_nand_resume(struct platform_device *pdev)
static struct platform_driver pxa3xx_nand_driver = {
.driver = {
.name = "pxa3xx-nand",
- .of_match_table = of_match_ptr(pxa3xx_nand_dt_ids),
+ .of_match_table = pxa3xx_nand_dt_ids,
},
.probe = pxa3xx_nand_probe,
.remove = pxa3xx_nand_remove,
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index 09dde7d27178..9a9fa4949b4f 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -149,17 +149,13 @@ static int socrates_nand_probe(struct platform_device *ofdev)
struct mtd_part_parser_data ppdata;
/* Allocate memory for the device structure (and zero it) */
- host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL);
- if (!host) {
- printk(KERN_ERR
- "socrates_nand: failed to allocate device structure.\n");
+ host = devm_kzalloc(&ofdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
return -ENOMEM;
- }
host->io_base = of_iomap(ofdev->dev.of_node, 0);
if (host->io_base == NULL) {
- printk(KERN_ERR "socrates_nand: ioremap failed\n");
- kfree(host);
+ dev_err(&ofdev->dev, "ioremap failed\n");
return -EIO;
}
@@ -211,9 +207,7 @@ static int socrates_nand_probe(struct platform_device *ofdev)
nand_release(mtd);
out:
- dev_set_drvdata(&ofdev->dev, NULL);
iounmap(host->io_base);
- kfree(host);
return res;
}
@@ -227,9 +221,7 @@ static int socrates_nand_remove(struct platform_device *ofdev)
nand_release(mtd);
- dev_set_drvdata(&ofdev->dev, NULL);
iounmap(host->io_base);
- kfree(host);
return 0;
}
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index c5f4ebf4b384..46f27de018c3 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -50,7 +50,7 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
struct NFTLrecord *nftl;
unsigned long temp;
- if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX)
+ if (!mtd_type_is_nand(mtd) || mtd->size > UINT_MAX)
return;
/* OK, this is moderately ugly. But probably safe. Alternatives? */
if (memcmp(mtd->name, "DiskOnChip", 10))
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 558071bf92de..2362909d20c0 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -573,28 +573,6 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
static struct platform_driver omap2_onenand_driver;
-static int __adjust_timing(struct device *dev, void *data)
-{
- int ret = 0;
- struct omap2_onenand *c;
-
- c = dev_get_drvdata(dev);
-
- BUG_ON(c->setup == NULL);
-
- /* DMA is not in use so this is all that is needed */
- /* Revisit for OMAP3! */
- ret = c->setup(c->onenand.base, &c->freq);
-
- return ret;
-}
-
-int omap2_onenand_rephase(void)
-{
- return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
- NULL, __adjust_timing);
-}
-
static void omap2_onenand_shutdown(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index b3f41f200622..1de33b5d3903 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -2556,10 +2556,6 @@ static int onenand_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
int ret;
- /* Check for invalid offset */
- if (ofs > mtd->size)
- return -EINVAL;
-
onenand_get_device(mtd, FL_READING);
ret = onenand_block_isbad_nolock(mtd, ofs, 0);
onenand_release_device(mtd);
@@ -3529,7 +3525,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
unsigned die, bdry;
- int ret, syscfg, locked;
+ int syscfg, locked;
/* Disable ECC */
syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
@@ -3540,7 +3536,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd)
this->wait(mtd, FL_SYNCING);
this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
- ret = this->wait(mtd, FL_READING);
+ this->wait(mtd, FL_READING);
bdry = this->read_word(this->base + ONENAND_DATARAM);
if ((bdry >> FLEXONENAND_PI_UNLOCK_SHIFT) == 3)
@@ -3550,7 +3546,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd)
this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
- ret = this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETING);
printk(KERN_INFO "Die %d boundary: %d%s\n", die,
this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
@@ -3734,7 +3730,7 @@ static int flexonenand_set_boundary(struct mtd_info *mtd, int die,
/* Check is boundary is locked */
this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
- ret = this->wait(mtd, FL_READING);
+ this->wait(mtd, FL_READING);
thisboundary = this->read_word(this->base + ONENAND_DATARAM);
if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) {
@@ -3835,7 +3831,7 @@ static int onenand_chip_probe(struct mtd_info *mtd)
static int onenand_probe(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
- int maf_id, dev_id, ver_id;
+ int dev_id, ver_id;
int density;
int ret;
@@ -3843,8 +3839,7 @@ static int onenand_probe(struct mtd_info *mtd)
if (ret)
return ret;
- /* Read manufacturer and device IDs from Register */
- maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
+ /* Device and version IDs from Register */
dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID);
this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY);
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index ab2a52a039c3..daf82ba7aba0 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -290,7 +290,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
int cis_sector;
/* Check for small page NAND flash */
- if (mtd->type != MTD_NANDFLASH || mtd->oobsize != OOB_SIZE ||
+ if (!mtd_type_is_nand(mtd) || mtd->oobsize != OOB_SIZE ||
mtd->size > UINT_MAX)
return;
diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c
index 3cd3aabbe1cd..6f976159611f 100644
--- a/drivers/mtd/tests/nandbiterrs.c
+++ b/drivers/mtd/tests/nandbiterrs.c
@@ -349,7 +349,7 @@ static int __init mtd_nandbiterrs_init(void)
goto exit_mtddev;
}
- if (mtd->type != MTD_NANDFLASH) {
+ if (!mtd_type_is_nand(mtd)) {
pr_info("this test requires NAND flash\n");
err = -ENODEV;
goto exit_nand;
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index ff35c465bfee..2e9e2d11f204 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -289,7 +289,7 @@ static int __init mtd_oobtest_init(void)
return err;
}
- if (mtd->type != MTD_NANDFLASH) {
+ if (!mtd_type_is_nand(mtd)) {
pr_info("this test requires NAND flash\n");
goto out;
}
diff --git a/drivers/mtd/tests/pagetest.c b/drivers/mtd/tests/pagetest.c
index 44b96e999ad4..ed2d3f656fd2 100644
--- a/drivers/mtd/tests/pagetest.c
+++ b/drivers/mtd/tests/pagetest.c
@@ -353,7 +353,7 @@ static int __init mtd_pagetest_init(void)
return err;
}
- if (mtd->type != MTD_NANDFLASH) {
+ if (!mtd_type_is_nand(mtd)) {
pr_info("this test requires NAND flash\n");
goto out;
}
diff --git a/drivers/mtd/tests/subpagetest.c b/drivers/mtd/tests/subpagetest.c
index e2c0adf24cfc..a876371ad410 100644
--- a/drivers/mtd/tests/subpagetest.c
+++ b/drivers/mtd/tests/subpagetest.c
@@ -299,7 +299,7 @@ static int __init mtd_subpagetest_init(void)
return err;
}
- if (mtd->type != MTD_NANDFLASH) {
+ if (!mtd_type_is_nand(mtd)) {
pr_info("this test requires NAND flash\n");
goto out;
}
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index c071d410488f..33bb1f2b63e4 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -900,10 +900,9 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
* number.
*/
image_seq = be32_to_cpu(ech->image_seq);
- if (!ubi->image_seq && image_seq)
+ if (!ubi->image_seq)
ubi->image_seq = image_seq;
- if (ubi->image_seq && image_seq &&
- ubi->image_seq != image_seq) {
+ if (image_seq && ubi->image_seq != image_seq) {
ubi_err("bad image sequence number %d in PEB %d, expected %d",
image_seq, pnum, ubi->image_seq);
ubi_dump_ec_hdr(ech);
@@ -1417,9 +1416,11 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
ai = alloc_ai("ubi_aeb_slab_cache2");
if (!ai)
return -ENOMEM;
- }
- err = scan_all(ubi, ai, UBI_FM_MAX_START);
+ err = scan_all(ubi, ai, 0);
+ } else {
+ err = scan_all(ubi, ai, UBI_FM_MAX_START);
+ }
}
}
#else
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 315dcc6ec1f5..e05dc6298c1d 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -41,6 +41,7 @@
#include <linux/kthread.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/major.h>
#include "ubi.h"
/* Maximum length of the 'mtd=' parameter */
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index f5aa4b02cfa6..ead861307b3c 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -407,6 +407,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
*/
for (i = 0; i < pool_size; i++) {
int scrub = 0;
+ int image_seq;
pnum = be32_to_cpu(pebs[i]);
@@ -425,10 +426,16 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
} else if (ret == UBI_IO_BITFLIPS)
scrub = 1;
- if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
+ /*
+ * Older UBI implementations have image_seq set to zero, so
+ * we shouldn't fail if image_seq == 0.
+ */
+ image_seq = be32_to_cpu(ech->image_seq);
+
+ if (image_seq && (image_seq != ubi->image_seq)) {
ubi_err("bad image seq: 0x%x, expected: 0x%x",
be32_to_cpu(ech->image_seq), ubi->image_seq);
- err = UBI_BAD_FASTMAP;
+ ret = UBI_BAD_FASTMAP;
goto out;
}
@@ -819,6 +826,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
list_move_tail(&tmp_aeb->u.list, &ai->free);
+ ubi_assert(list_empty(&used));
+ ubi_assert(list_empty(&eba_orphans));
+ ubi_assert(list_empty(&free));
+
/*
* If fastmap is leaking PEBs (must not happen), raise a
* fat warning and fall back to scanning mode.
@@ -834,6 +845,19 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
fail_bad:
ret = UBI_BAD_FASTMAP;
fail:
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ list_del(&tmp_aeb->u.list);
+ }
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ list_del(&tmp_aeb->u.list);
+ }
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ list_del(&tmp_aeb->u.list);
+ }
+
return ret;
}
@@ -923,6 +947,8 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
}
for (i = 0; i < used_blocks; i++) {
+ int image_seq;
+
pnum = be32_to_cpu(fmsb->block_loc[i]);
if (ubi_io_is_bad(ubi, pnum)) {
@@ -940,10 +966,17 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
} else if (ret == UBI_IO_BITFLIPS)
fm->to_be_tortured[i] = 1;
+ image_seq = be32_to_cpu(ech->image_seq);
if (!ubi->image_seq)
- ubi->image_seq = be32_to_cpu(ech->image_seq);
+ ubi->image_seq = image_seq;
- if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
+ /*
+ * Older UBI implementations have image_seq set to zero, so
+ * we shouldn't fail if image_seq == 0.
+ */
+ if (image_seq && (image_seq != ubi->image_seq)) {
+ ubi_err("wrong image seq:%d instead of %d",
+ be32_to_cpu(ech->image_seq), ubi->image_seq);
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index c95bfb183c62..02317c1c0238 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -599,10 +599,6 @@ static void refill_wl_user_pool(struct ubi_device *ubi)
return_unused_pool_pebs(ubi, pool);
for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
- if (!ubi->free.rb_node ||
- (ubi->free_count - ubi->beb_rsvd_pebs < 1))
- break;
-
pool->pebs[pool->size] = __wl_get_peb(ubi);
if (pool->pebs[pool->size] < 0)
break;
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 3a8c7532ee0d..a7271e093845 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -102,8 +102,7 @@ static struct devprobe2 isa_probes[] __initdata = {
#ifdef CONFIG_WD80x3
{wd_probe, 0},
#endif
-#if defined(CONFIG_NE2000) || \
- defined(CONFIG_NE_H8300) /* ISA (use ne2k-pci for PCI cards) */
+#if defined(CONFIG_NE2000) /* ISA (use ne2k-pci for PCI cards) */
{ne_probe, 0},
#endif
#ifdef CONFIG_LANCE /* ISA/VLB (use pcnet32 for PCI cards) */
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 4c21bf6b8b2f..5a5d720da929 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -4,7 +4,7 @@
obj-$(CONFIG_BONDING) += bonding.o
-bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o
+bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o bond_netlink.o bond_options.o
proc-$(CONFIG_PROC_FS) += bond_procfs.o
bonding-objs += $(proc-y)
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 0d8f427ade93..187b1b7772ef 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -136,41 +136,6 @@ static inline struct bonding *__get_bond_by_port(struct port *port)
}
/**
- * __get_first_port - get the first port in the bond
- * @bond: the bond we're looking at
- *
- * Return the port of the first slave in @bond, or %NULL if it can't be found.
- */
-static inline struct port *__get_first_port(struct bonding *bond)
-{
- struct slave *first_slave = bond_first_slave(bond);
-
- return first_slave ? &(SLAVE_AD_INFO(first_slave).port) : NULL;
-}
-
-/**
- * __get_next_port - get the next port in the bond
- * @port: the port we're looking at
- *
- * Return the port of the slave that is next in line of @port's slave in the
- * bond, or %NULL if it can't be found.
- */
-static inline struct port *__get_next_port(struct port *port)
-{
- struct bonding *bond = __get_bond_by_port(port);
- struct slave *slave = port->slave, *slave_next;
-
- // If there's no bond for this port, or this is the last slave
- if (bond == NULL)
- return NULL;
- slave_next = bond_next_slave(bond, slave);
- if (!slave_next || bond_is_first_slave(bond, slave_next))
- return NULL;
-
- return &(SLAVE_AD_INFO(slave_next).port);
-}
-
-/**
* __get_first_agg - get the first aggregator in the bond
* @bond: the bond we're looking at
*
@@ -190,28 +155,6 @@ static inline struct aggregator *__get_first_agg(struct port *port)
return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
}
-/**
- * __get_next_agg - get the next aggregator in the bond
- * @aggregator: the aggregator we're looking at
- *
- * Return the aggregator of the slave that is next in line of @aggregator's
- * slave in the bond, or %NULL if it can't be found.
- */
-static inline struct aggregator *__get_next_agg(struct aggregator *aggregator)
-{
- struct slave *slave = aggregator->slave, *slave_next;
- struct bonding *bond = bond_get_bond_by_slave(slave);
-
- // If there's no bond for this aggregator, or this is the last slave
- if (bond == NULL)
- return NULL;
- slave_next = bond_next_slave(bond, slave);
- if (!slave_next || bond_is_first_slave(bond, slave_next))
- return NULL;
-
- return &(SLAVE_AD_INFO(slave_next).aggregator);
-}
-
/*
* __agg_has_partner
*
@@ -755,16 +698,15 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
*/
static struct aggregator *__get_active_agg(struct aggregator *aggregator)
{
- struct aggregator *retval = NULL;
+ struct bonding *bond = aggregator->slave->bond;
+ struct list_head *iter;
+ struct slave *slave;
- for (; aggregator; aggregator = __get_next_agg(aggregator)) {
- if (aggregator->is_active) {
- retval = aggregator;
- break;
- }
- }
+ bond_for_each_slave(bond, slave, iter)
+ if (SLAVE_AD_INFO(slave).aggregator.is_active)
+ return &(SLAVE_AD_INFO(slave).aggregator);
- return retval;
+ return NULL;
}
/**
@@ -1274,12 +1216,17 @@ static void ad_port_selection_logic(struct port *port)
{
struct aggregator *aggregator, *free_aggregator = NULL, *temp_aggregator;
struct port *last_port = NULL, *curr_port;
+ struct list_head *iter;
+ struct bonding *bond;
+ struct slave *slave;
int found = 0;
// if the port is already Selected, do nothing
if (port->sm_vars & AD_PORT_SELECTED)
return;
+ bond = __get_bond_by_port(port);
+
// if the port is connected to other aggregator, detach it
if (port->aggregator) {
// detach the port from its former aggregator
@@ -1320,8 +1267,8 @@ static void ad_port_selection_logic(struct port *port)
}
}
// search on all aggregators for a suitable aggregator for this port
- for (aggregator = __get_first_agg(port); aggregator;
- aggregator = __get_next_agg(aggregator)) {
+ bond_for_each_slave(bond, slave, iter) {
+ aggregator = &(SLAVE_AD_INFO(slave).aggregator);
// keep a free aggregator for later use(if needed)
if (!aggregator->lag_ports) {
@@ -1515,19 +1462,23 @@ static int agg_device_up(const struct aggregator *agg)
static void ad_agg_selection_logic(struct aggregator *agg)
{
struct aggregator *best, *active, *origin;
+ struct bonding *bond = agg->slave->bond;
+ struct list_head *iter;
+ struct slave *slave;
struct port *port;
origin = agg;
active = __get_active_agg(agg);
best = (active && agg_device_up(active)) ? active : NULL;
- do {
+ bond_for_each_slave(bond, slave, iter) {
+ agg = &(SLAVE_AD_INFO(slave).aggregator);
+
agg->is_active = 0;
if (agg->num_of_ports && agg_device_up(agg))
best = ad_agg_selection_test(best, agg);
-
- } while ((agg = __get_next_agg(agg)));
+ }
if (best &&
__get_agg_selection_mode(best->lag_ports) == BOND_AD_STABLE) {
@@ -1565,8 +1516,8 @@ static void ad_agg_selection_logic(struct aggregator *agg)
best->lag_ports, best->slave,
best->slave ? best->slave->dev->name : "NULL");
- for (agg = __get_first_agg(best->lag_ports); agg;
- agg = __get_next_agg(agg)) {
+ bond_for_each_slave(bond, slave, iter) {
+ agg = &(SLAVE_AD_INFO(slave).aggregator);
pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
agg->aggregator_identifier, agg->num_of_ports,
@@ -1614,13 +1565,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
}
}
- if (origin->slave) {
- struct bonding *bond;
-
- bond = bond_get_bond_by_slave(origin->slave);
- if (bond)
- bond_3ad_set_carrier(bond);
- }
+ bond_3ad_set_carrier(bond);
}
/**
@@ -1969,6 +1914,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
struct port *port, *prev_port, *temp_port;
struct aggregator *aggregator, *new_aggregator, *temp_aggregator;
int select_new_active_agg = 0;
+ struct bonding *bond = slave->bond;
+ struct slave *slave_iter;
+ struct list_head *iter;
// find the aggregator related to this slave
aggregator = &(SLAVE_AD_INFO(slave).aggregator);
@@ -1998,14 +1946,16 @@ void bond_3ad_unbind_slave(struct slave *slave)
// reason to search for new aggregator, and that we will find one
if ((aggregator->lag_ports != port) || (aggregator->lag_ports->next_port_in_aggregator)) {
// find new aggregator for the related port(s)
- new_aggregator = __get_first_agg(port);
- for (; new_aggregator; new_aggregator = __get_next_agg(new_aggregator)) {
+ bond_for_each_slave(bond, slave_iter, iter) {
+ new_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
// if the new aggregator is empty, or it is connected to our port only
if (!new_aggregator->lag_ports
|| ((new_aggregator->lag_ports == port)
&& !new_aggregator->lag_ports->next_port_in_aggregator))
break;
}
+ if (!slave_iter)
+ new_aggregator = NULL;
// if new aggregator found, copy the aggregator's parameters
// and connect the related lag_ports to the new aggregator
if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
@@ -2056,15 +2006,17 @@ void bond_3ad_unbind_slave(struct slave *slave)
pr_info("%s: Removing an active aggregator\n",
slave->bond->dev->name);
// select new active aggregator
- ad_agg_selection_logic(__get_first_agg(port));
+ temp_aggregator = __get_first_agg(port);
+ if (temp_aggregator)
+ ad_agg_selection_logic(temp_aggregator);
}
}
}
pr_debug("Unbinding port %d\n", port->actor_port_number);
// find the aggregator that this port is connected to
- temp_aggregator = __get_first_agg(port);
- for (; temp_aggregator; temp_aggregator = __get_next_agg(temp_aggregator)) {
+ bond_for_each_slave(bond, slave_iter, iter) {
+ temp_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
prev_port = NULL;
// search the port in the aggregator's related ports
for (temp_port = temp_aggregator->lag_ports; temp_port;
@@ -2111,19 +2063,24 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
ad_work.work);
- struct port *port;
struct aggregator *aggregator;
+ struct list_head *iter;
+ struct slave *slave;
+ struct port *port;
read_lock(&bond->lock);
//check if there are any slaves
- if (list_empty(&bond->slave_list))
+ if (!bond_has_slaves(bond))
goto re_arm;
// check if agg_select_timer timer after initialize is timed out
if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) {
+ slave = bond_first_slave(bond);
+ port = slave ? &(SLAVE_AD_INFO(slave).port) : NULL;
+
// select the active aggregator for the bond
- if ((port = __get_first_port(bond))) {
+ if (port) {
if (!port->slave) {
pr_warning("%s: Warning: bond's first port is uninitialized\n",
bond->dev->name);
@@ -2137,7 +2094,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
}
// for each port run the state machines
- for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
+ bond_for_each_slave(bond, slave, iter) {
+ port = &(SLAVE_AD_INFO(slave).port);
if (!port->slave) {
pr_warning("%s: Warning: Found an uninitialized port\n",
bond->dev->name);
@@ -2382,9 +2340,12 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
struct ad_info *ad_info)
{
struct aggregator *aggregator = NULL;
+ struct list_head *iter;
+ struct slave *slave;
struct port *port;
- for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ port = &(SLAVE_AD_INFO(slave).port);
if (port->aggregator && port->aggregator->is_active) {
aggregator = port->aggregator;
break;
@@ -2408,25 +2369,25 @@ int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info)
{
int ret;
- read_lock(&bond->lock);
+ rcu_read_lock();
ret = __bond_3ad_get_active_agg_info(bond, ad_info);
- read_unlock(&bond->lock);
+ rcu_read_unlock();
return ret;
}
int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
{
- struct slave *slave, *start_at;
struct bonding *bond = netdev_priv(dev);
- int slave_agg_no;
- int slaves_in_agg;
- int agg_id;
- int i;
+ struct slave *slave, *first_ok_slave;
+ struct aggregator *agg;
struct ad_info ad_info;
+ struct list_head *iter;
+ int slaves_in_agg;
+ int slave_agg_no;
int res = 1;
+ int agg_id;
- read_lock(&bond->lock);
if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
dev->name);
@@ -2437,20 +2398,28 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
agg_id = ad_info.aggregator_id;
if (slaves_in_agg == 0) {
- /*the aggregator is empty*/
pr_debug("%s: Error: active aggregator is empty\n", dev->name);
goto out;
}
- slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg);
+ slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg);
+ first_ok_slave = NULL;
- bond_for_each_slave(bond, slave) {
- struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ agg = SLAVE_AD_INFO(slave).port.aggregator;
+ if (!agg || agg->aggregator_identifier != agg_id)
+ continue;
- if (agg && (agg->aggregator_identifier == agg_id)) {
+ if (slave_agg_no >= 0) {
+ if (!first_ok_slave && SLAVE_IS_OK(slave))
+ first_ok_slave = slave;
slave_agg_no--;
- if (slave_agg_no < 0)
- break;
+ continue;
+ }
+
+ if (SLAVE_IS_OK(slave)) {
+ res = bond_dev_queue_xmit(bond, skb, slave->dev);
+ goto out;
}
}
@@ -2460,23 +2429,12 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
goto out;
}
- start_at = slave;
-
- bond_for_each_slave_from(bond, slave, i, start_at) {
- int slave_agg_id = 0;
- struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
-
- if (agg)
- slave_agg_id = agg->aggregator_identifier;
-
- if (SLAVE_IS_OK(slave) && agg && (slave_agg_id == agg_id)) {
- res = bond_dev_queue_xmit(bond, skb, slave->dev);
- break;
- }
- }
+ /* we couldn't find any suitable slave after the agg_no, so use the
+ * first suitable found, if found. */
+ if (first_ok_slave)
+ res = bond_dev_queue_xmit(bond, skb, first_ok_slave->dev);
out:
- read_unlock(&bond->lock);
if (res) {
/* no suitable interface, frame not sent */
kfree_skb(skb);
@@ -2515,11 +2473,12 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
void bond_3ad_update_lacp_rate(struct bonding *bond)
{
struct port *port = NULL;
+ struct list_head *iter;
struct slave *slave;
int lacp_fast;
lacp_fast = bond->params.lacp_fast;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
port = &(SLAVE_AD_INFO(slave).port);
__get_state_machine_lock(port);
if (lacp_fast)
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index f428ef574372..02872405d35d 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -223,13 +223,14 @@ static long long compute_gap(struct slave *slave)
static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
{
struct slave *slave, *least_loaded;
+ struct list_head *iter;
long long max_gap;
least_loaded = NULL;
max_gap = LLONG_MIN;
/* Find the slave with the largest gap */
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (SLAVE_IS_OK(slave)) {
long long gap = compute_gap(slave);
@@ -382,30 +383,64 @@ out:
static struct slave *rlb_next_rx_slave(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
- struct slave *rx_slave, *slave, *start_at;
- int i = 0;
+ struct slave *before = NULL, *rx_slave = NULL, *slave;
+ struct list_head *iter;
+ bool found = false;
- if (bond_info->next_rx_slave)
- start_at = bond_info->next_rx_slave;
- else
- start_at = bond_first_slave(bond);
+ bond_for_each_slave(bond, slave, iter) {
+ if (!SLAVE_IS_OK(slave))
+ continue;
+ if (!found) {
+ if (!before || before->speed < slave->speed)
+ before = slave;
+ } else {
+ if (!rx_slave || rx_slave->speed < slave->speed)
+ rx_slave = slave;
+ }
+ if (slave == bond_info->rx_slave)
+ found = true;
+ }
+ /* we didn't find anything after the current or we have something
+ * better before and up to the current slave
+ */
+ if (!rx_slave || (before && rx_slave->speed < before->speed))
+ rx_slave = before;
- rx_slave = NULL;
+ if (rx_slave)
+ bond_info->rx_slave = rx_slave;
- bond_for_each_slave_from(bond, slave, i, start_at) {
- if (SLAVE_IS_OK(slave)) {
- if (!rx_slave) {
- rx_slave = slave;
- } else if (slave->speed > rx_slave->speed) {
+ return rx_slave;
+}
+
+/* Caller must hold rcu_read_lock() for read */
+static struct slave *__rlb_next_rx_slave(struct bonding *bond)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct slave *before = NULL, *rx_slave = NULL, *slave;
+ struct list_head *iter;
+ bool found = false;
+
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (!SLAVE_IS_OK(slave))
+ continue;
+ if (!found) {
+ if (!before || before->speed < slave->speed)
+ before = slave;
+ } else {
+ if (!rx_slave || rx_slave->speed < slave->speed)
rx_slave = slave;
- }
}
+ if (slave == bond_info->rx_slave)
+ found = true;
}
+ /* we didn't find anything after the current or we have something
+ * better before and up to the current slave
+ */
+ if (!rx_slave || (before && rx_slave->speed < before->speed))
+ rx_slave = before;
- if (rx_slave) {
- slave = bond_next_slave(bond, rx_slave);
- bond_info->next_rx_slave = slave;
- }
+ if (rx_slave)
+ bond_info->rx_slave = rx_slave;
return rx_slave;
}
@@ -626,12 +661,14 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct arp_pkt *arp = arp_pkt(skb);
- struct slave *assigned_slave;
+ struct slave *assigned_slave, *curr_active_slave;
struct rlb_client_info *client_info;
u32 hash_index = 0;
_lock_rx_hashtbl(bond);
+ curr_active_slave = rcu_dereference(bond->curr_active_slave);
+
hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
client_info = &(bond_info->rx_hashtbl[hash_index]);
@@ -656,14 +693,14 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
* that the new client can be assigned to this entry.
*/
if (bond->curr_active_slave &&
- client_info->slave != bond->curr_active_slave) {
- client_info->slave = bond->curr_active_slave;
+ client_info->slave != curr_active_slave) {
+ client_info->slave = curr_active_slave;
rlb_update_client(client_info);
}
}
}
/* assign a new slave */
- assigned_slave = rlb_next_rx_slave(bond);
+ assigned_slave = __rlb_next_rx_slave(bond);
if (assigned_slave) {
if (!(client_info->assigned &&
@@ -726,7 +763,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
/* Don't modify or load balance ARPs that do not originate locally
* (e.g.,arrive via a bridge).
*/
- if (!bond_slave_has_mac(bond, arp->mac_src))
+ if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
return NULL;
if (arp->op_code == htons(ARPOP_REPLY)) {
@@ -1019,7 +1056,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
/* loop through vlans and send one packet for each */
rcu_read_lock();
- netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+ netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
if (upper->priv_flags & IFF_802_1Q_VLAN)
alb_send_lp_vid(slave, mac_addr,
vlan_dev_vlan_id(upper));
@@ -1172,10 +1209,11 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
*/
static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave)
{
- struct slave *tmp_slave1, *free_mac_slave = NULL;
struct slave *has_bond_addr = bond->curr_active_slave;
+ struct slave *tmp_slave1, *free_mac_slave = NULL;
+ struct list_head *iter;
- if (list_empty(&bond->slave_list)) {
+ if (!bond_has_slaves(bond)) {
/* this is the first slave */
return 0;
}
@@ -1196,7 +1234,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
/* The slave's address is equal to the address of the bond.
* Search for a spare address in the bond for this slave.
*/
- bond_for_each_slave(bond, tmp_slave1) {
+ bond_for_each_slave(bond, tmp_slave1, iter) {
if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) {
/* no slave has tmp_slave1's perm addr
* as its curr addr
@@ -1246,15 +1284,16 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
*/
static int alb_set_mac_address(struct bonding *bond, void *addr)
{
- char tmp_addr[ETH_ALEN];
- struct slave *slave;
+ struct slave *slave, *rollback_slave;
+ struct list_head *iter;
struct sockaddr sa;
+ char tmp_addr[ETH_ALEN];
int res;
if (bond->alb_info.rlb_enabled)
return 0;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
/* save net_device's current hw address */
memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
@@ -1274,10 +1313,12 @@ unwind:
sa.sa_family = bond->dev->type;
/* unwind from head to the slave that failed */
- bond_for_each_slave_continue_reverse(bond, slave) {
- memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
- dev_set_mac_address(slave->dev, &sa);
- memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+ bond_for_each_slave(bond, rollback_slave, iter) {
+ if (rollback_slave == slave)
+ break;
+ memcpy(tmp_addr, rollback_slave->dev->dev_addr, ETH_ALEN);
+ dev_set_mac_address(rollback_slave->dev, &sa);
+ memcpy(rollback_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
}
return res;
@@ -1337,11 +1378,6 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
skb_reset_mac_header(skb);
eth_data = eth_hdr(skb);
- /* make sure that the curr_active_slave do not change during tx
- */
- read_lock(&bond->lock);
- read_lock(&bond->curr_slave_lock);
-
switch (ntohs(skb->protocol)) {
case ETH_P_IP: {
const struct iphdr *iph = ip_hdr(skb);
@@ -1423,12 +1459,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
if (!tx_slave) {
/* unbalanced or unassigned, send through primary */
- tx_slave = bond->curr_active_slave;
+ tx_slave = rcu_dereference(bond->curr_active_slave);
bond_info->unbalanced_load += skb->len;
}
if (tx_slave && SLAVE_IS_OK(tx_slave)) {
- if (tx_slave != bond->curr_active_slave) {
+ if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
memcpy(eth_data->h_source,
tx_slave->dev->dev_addr,
ETH_ALEN);
@@ -1443,8 +1479,6 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
}
}
- read_unlock(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
if (res) {
/* no suitable interface, frame not sent */
kfree_skb(skb);
@@ -1458,11 +1492,12 @@ void bond_alb_monitor(struct work_struct *work)
struct bonding *bond = container_of(work, struct bonding,
alb_work.work);
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct list_head *iter;
struct slave *slave;
read_lock(&bond->lock);
- if (list_empty(&bond->slave_list)) {
+ if (!bond_has_slaves(bond)) {
bond_info->tx_rebalance_counter = 0;
bond_info->lp_counter = 0;
goto re_arm;
@@ -1480,7 +1515,7 @@ void bond_alb_monitor(struct work_struct *work)
*/
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave)
+ bond_for_each_slave(bond, slave, iter)
alb_send_learning_packets(slave, slave->dev->dev_addr);
read_unlock(&bond->curr_slave_lock);
@@ -1493,7 +1528,7 @@ void bond_alb_monitor(struct work_struct *work)
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
tlb_clear_slave(bond, slave, 1);
if (slave == bond->curr_active_slave) {
SLAVE_TLB_INFO(slave).load =
@@ -1599,13 +1634,13 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
*/
void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
{
- if (!list_empty(&bond->slave_list))
+ if (bond_has_slaves(bond))
alb_change_hw_addr_on_detach(bond, slave);
tlb_clear_slave(bond, slave, 0);
if (bond->alb_info.rlb_enabled) {
- bond->alb_info.next_rx_slave = NULL;
+ bond->alb_info.rx_slave = NULL;
rlb_clear_slave(bond, slave);
}
}
@@ -1669,7 +1704,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
swap_slave = bond->curr_active_slave;
rcu_assign_pointer(bond->curr_active_slave, new_slave);
- if (!new_slave || list_empty(&bond->slave_list))
+ if (!new_slave || !bond_has_slaves(bond))
return;
/* set the new curr_active_slave to the bonds mac address
@@ -1692,6 +1727,23 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
ASSERT_RTNL();
+ /* in TLB mode, the slave might flip down/up with the old dev_addr,
+ * and thus filter bond->dev_addr's packets, so force bond's mac
+ */
+ if (bond->params.mode == BOND_MODE_TLB) {
+ struct sockaddr sa;
+ u8 tmp_addr[ETH_ALEN];
+
+ memcpy(tmp_addr, new_slave->dev->dev_addr, ETH_ALEN);
+
+ memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
+ sa.sa_family = bond->dev->type;
+ /* we don't care if it can't change its mac, best effort */
+ dev_set_mac_address(new_slave->dev, &sa);
+
+ memcpy(new_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+ }
+
/* curr_active_slave must be set before calling alb_swap_mac_addr */
if (swap_slave) {
/* swap mac address */
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index c5eff5dafdfe..4226044efd08 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -154,9 +154,7 @@ struct alb_bond_info {
u8 rx_ntt; /* flag - need to transmit
* to all rx clients
*/
- struct slave *next_rx_slave;/* next slave to be assigned
- * to a new rx client for
- */
+ struct slave *rx_slave;/* last slave to xmit from */
u8 primary_is_promisc; /* boolean */
u32 rlb_promisc_timeout_counter;/* counts primary
* promiscuity time
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e883bfe2e727..a141f406cb98 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -78,6 +78,7 @@
#include <net/netns/generic.h>
#include <net/pkt_sched.h>
#include <linux/rculist.h>
+#include <net/flow_keys.h>
#include "bonding.h"
#include "bond_3ad.h"
#include "bond_alb.h"
@@ -159,7 +160,8 @@ MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on
module_param(xmit_hash_policy, charp, 0);
MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; "
"0 for layer 2 (default), 1 for layer 3+4, "
- "2 for layer 2+3");
+ "2 for layer 2+3, 3 for encap layer 2+3, "
+ "4 for encap layer 3+4");
module_param(arp_interval, int, 0);
MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
module_param_array(arp_ip_target, charp, NULL, 0);
@@ -217,6 +219,8 @@ const struct bond_parm_tbl xmit_hashtype_tbl[] = {
{ "layer2", BOND_XMIT_POLICY_LAYER2},
{ "layer3+4", BOND_XMIT_POLICY_LAYER34},
{ "layer2+3", BOND_XMIT_POLICY_LAYER23},
+{ "encap2+3", BOND_XMIT_POLICY_ENCAP23},
+{ "encap3+4", BOND_XMIT_POLICY_ENCAP34},
{ NULL, -1},
};
@@ -332,10 +336,11 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
__be16 proto, u16 vid)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave;
+ struct slave *slave, *rollback_slave;
+ struct list_head *iter;
int res;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
res = vlan_vid_add(slave->dev, proto, vid);
if (res)
goto unwind;
@@ -344,9 +349,13 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
return 0;
unwind:
- /* unwind from the slave that failed */
- bond_for_each_slave_continue_reverse(bond, slave)
- vlan_vid_del(slave->dev, proto, vid);
+ /* unwind to the slave that failed */
+ bond_for_each_slave(bond, rollback_slave, iter) {
+ if (rollback_slave == slave)
+ break;
+
+ vlan_vid_del(rollback_slave->dev, proto, vid);
+ }
return res;
}
@@ -360,9 +369,10 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
__be16 proto, u16 vid)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct list_head *iter;
struct slave *slave;
- bond_for_each_slave(bond, slave)
+ bond_for_each_slave(bond, slave, iter)
vlan_vid_del(slave->dev, proto, vid);
if (bond_is_lb(bond))
@@ -382,15 +392,16 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
*/
static int bond_set_carrier(struct bonding *bond)
{
+ struct list_head *iter;
struct slave *slave;
- if (list_empty(&bond->slave_list))
+ if (!bond_has_slaves(bond))
goto down;
if (bond->params.mode == BOND_MODE_8023AD)
return bond_3ad_set_carrier(bond);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (slave->link == BOND_LINK_UP) {
if (!netif_carrier_ok(bond->dev)) {
netif_carrier_on(bond->dev);
@@ -522,7 +533,9 @@ static int bond_check_dev_link(struct bonding *bond,
*/
static int bond_set_promiscuity(struct bonding *bond, int inc)
{
+ struct list_head *iter;
int err = 0;
+
if (USES_PRIMARY(bond->params.mode)) {
/* write lock already acquired */
if (bond->curr_active_slave) {
@@ -532,7 +545,7 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
} else {
struct slave *slave;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
err = dev_set_promiscuity(slave->dev, inc);
if (err)
return err;
@@ -546,7 +559,9 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
*/
static int bond_set_allmulti(struct bonding *bond, int inc)
{
+ struct list_head *iter;
int err = 0;
+
if (USES_PRIMARY(bond->params.mode)) {
/* write lock already acquired */
if (bond->curr_active_slave) {
@@ -556,7 +571,7 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
} else {
struct slave *slave;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
err = dev_set_allmulti(slave->dev, inc);
if (err)
return err;
@@ -774,43 +789,24 @@ static bool bond_should_change_active(struct bonding *bond)
/**
* find_best_interface - select the best available slave to be the active one
* @bond: our bonding struct
- *
- * Warning: Caller must hold curr_slave_lock for writing.
*/
static struct slave *bond_find_best_slave(struct bonding *bond)
{
- struct slave *new_active, *old_active;
- struct slave *bestslave = NULL;
+ struct slave *slave, *bestslave = NULL;
+ struct list_head *iter;
int mintime = bond->params.updelay;
- int i;
-
- new_active = bond->curr_active_slave;
- if (!new_active) { /* there were no active slaves left */
- new_active = bond_first_slave(bond);
- if (!new_active)
- return NULL; /* still no slave, return NULL */
- }
-
- if ((bond->primary_slave) &&
- bond->primary_slave->link == BOND_LINK_UP &&
- bond_should_change_active(bond)) {
- new_active = bond->primary_slave;
- }
-
- /* remember where to stop iterating over the slaves */
- old_active = new_active;
-
- bond_for_each_slave_from(bond, new_active, i, old_active) {
- if (new_active->link == BOND_LINK_UP) {
- return new_active;
- } else if (new_active->link == BOND_LINK_BACK &&
- IS_UP(new_active->dev)) {
- /* link up, but waiting for stabilization */
- if (new_active->delay < mintime) {
- mintime = new_active->delay;
- bestslave = new_active;
- }
+ if (bond->primary_slave && bond->primary_slave->link == BOND_LINK_UP &&
+ bond_should_change_active(bond))
+ return bond->primary_slave;
+
+ bond_for_each_slave(bond, slave, iter) {
+ if (slave->link == BOND_LINK_UP)
+ return slave;
+ if (slave->link == BOND_LINK_BACK && IS_UP(slave->dev) &&
+ slave->delay < mintime) {
+ mintime = slave->delay;
+ bestslave = slave;
}
}
@@ -971,35 +967,6 @@ void bond_select_active_slave(struct bonding *bond)
}
}
-/*--------------------------- slave list handling ---------------------------*/
-
-/*
- * This function attaches the slave to the end of list.
- *
- * bond->lock held for writing by caller.
- */
-static void bond_attach_slave(struct bonding *bond, struct slave *new_slave)
-{
- list_add_tail_rcu(&new_slave->list, &bond->slave_list);
- bond->slave_cnt++;
-}
-
-/*
- * This function detaches the slave from the list.
- * WARNING: no check is made to verify if the slave effectively
- * belongs to <bond>.
- * Nothing is freed on return, structures are just unchained.
- * If any slave pointer in bond was pointing to <slave>,
- * it should be changed by the calling function.
- *
- * bond->lock held for writing by caller.
- */
-static void bond_detach_slave(struct bonding *bond, struct slave *slave)
-{
- list_del_rcu(&slave->list);
- bond->slave_cnt--;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static inline int slave_enable_netpoll(struct slave *slave)
{
@@ -1046,9 +1013,10 @@ static void bond_poll_controller(struct net_device *bond_dev)
static void bond_netpoll_cleanup(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct list_head *iter;
struct slave *slave;
- bond_for_each_slave(bond, slave)
+ bond_for_each_slave(bond, slave, iter)
if (IS_UP(slave->dev))
slave_disable_netpoll(slave);
}
@@ -1056,10 +1024,11 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)
{
struct bonding *bond = netdev_priv(dev);
+ struct list_head *iter;
struct slave *slave;
int err = 0;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
err = slave_enable_netpoll(slave);
if (err) {
bond_netpoll_cleanup(dev);
@@ -1087,10 +1056,11 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct bonding *bond = netdev_priv(dev);
+ struct list_head *iter;
netdev_features_t mask;
struct slave *slave;
- if (list_empty(&bond->slave_list)) {
+ if (!bond_has_slaves(bond)) {
/* Disable adding VLANs to empty bond. But why? --mq */
features |= NETIF_F_VLAN_CHALLENGED;
return features;
@@ -1100,7 +1070,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
features &= ~NETIF_F_ONE_FOR_ALL;
features |= NETIF_F_ALL_FOR_ALL;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
features = netdev_increment_features(features,
slave->dev->features,
mask);
@@ -1118,16 +1088,17 @@ static void bond_compute_features(struct bonding *bond)
{
unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
netdev_features_t vlan_features = BOND_VLAN_FEATURES;
+ struct net_device *bond_dev = bond->dev;
+ struct list_head *iter;
+ struct slave *slave;
unsigned short max_hard_header_len = ETH_HLEN;
unsigned int gso_max_size = GSO_MAX_SIZE;
- struct net_device *bond_dev = bond->dev;
u16 gso_max_segs = GSO_MAX_SEGS;
- struct slave *slave;
- if (list_empty(&bond->slave_list))
+ if (!bond_has_slaves(bond))
goto done;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
vlan_features = netdev_increment_features(vlan_features,
slave->dev->vlan_features, BOND_VLAN_FEATURES);
@@ -1233,15 +1204,16 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
}
static int bond_master_upper_dev_link(struct net_device *bond_dev,
- struct net_device *slave_dev)
+ struct net_device *slave_dev,
+ struct slave *slave)
{
int err;
- err = netdev_master_upper_dev_link(slave_dev, bond_dev);
+ err = netdev_master_upper_dev_link_private(slave_dev, bond_dev, slave);
if (err)
return err;
slave_dev->flags |= IFF_SLAVE;
- rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE);
+ rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
return 0;
}
@@ -1250,7 +1222,7 @@ static void bond_upper_dev_unlink(struct net_device *bond_dev,
{
netdev_upper_dev_unlink(slave_dev, bond_dev);
slave_dev->flags &= ~IFF_SLAVE;
- rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE);
+ rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
}
/* enslave device <slave> to bond device <master> */
@@ -1258,7 +1230,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
- struct slave *new_slave = NULL;
+ struct slave *new_slave = NULL, *prev_slave;
struct sockaddr addr;
int link_reporting;
int res = 0, i;
@@ -1313,7 +1285,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* bond ether type mutual exclusion - don't allow slaves of dissimilar
* ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
*/
- if (list_empty(&bond->slave_list)) {
+ if (!bond_has_slaves(bond)) {
if (bond_dev->type != slave_dev->type) {
pr_debug("%s: change device type from %d to %d\n",
bond_dev->name,
@@ -1352,7 +1324,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
if (slave_ops->ndo_set_mac_address == NULL) {
- if (list_empty(&bond->slave_list)) {
+ if (!bond_has_slaves(bond)) {
pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
bond_dev->name);
bond->params.fail_over_mac = BOND_FOM_ACTIVE;
@@ -1368,7 +1340,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* If this is the first slave, then we need to set the master's hardware
* address to be the same as the slave's. */
- if (list_empty(&bond->slave_list) &&
+ if (!bond_has_slaves(bond) &&
bond->dev->addr_assign_type == NET_ADDR_RANDOM)
bond_set_dev_addr(bond->dev, slave_dev);
@@ -1377,7 +1349,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
res = -ENOMEM;
goto err_undo_flags;
}
- INIT_LIST_HEAD(&new_slave->list);
/*
* Set the new_slave's queue_id to be zero. Queue ID mapping
* is set via sysfs or module option if desired.
@@ -1413,17 +1384,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
}
- res = bond_master_upper_dev_link(bond_dev, slave_dev);
- if (res) {
- pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
- goto err_restore_mac;
- }
-
/* open the slave since the application closed it */
res = dev_open(slave_dev);
if (res) {
pr_debug("Opening slave %s failed\n", slave_dev->name);
- goto err_unset_master;
+ goto err_restore_mac;
}
new_slave->bond = bond;
@@ -1479,21 +1444,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
goto err_close;
}
- write_lock_bh(&bond->lock);
-
- bond_attach_slave(bond, new_slave);
+ prev_slave = bond_last_slave(bond);
new_slave->delay = 0;
new_slave->link_failure_count = 0;
- write_unlock_bh(&bond->lock);
-
- bond_compute_features(bond);
-
bond_update_speed_duplex(new_slave);
- read_lock(&bond->lock);
-
new_slave->last_arp_rx = jiffies -
(msecs_to_jiffies(bond->params.arp_interval) + 1);
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
@@ -1554,12 +1511,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
}
- write_lock_bh(&bond->curr_slave_lock);
-
switch (bond->params.mode) {
case BOND_MODE_ACTIVEBACKUP:
bond_set_slave_inactive_flags(new_slave);
- bond_select_active_slave(bond);
break;
case BOND_MODE_8023AD:
/* in 802.3ad mode, the internal mechanism
@@ -1568,16 +1522,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
*/
bond_set_slave_inactive_flags(new_slave);
/* if this is the first slave */
- if (bond_first_slave(bond) == new_slave) {
+ if (!prev_slave) {
SLAVE_AD_INFO(new_slave).id = 1;
/* Initialize AD with the number of times that the AD timer is called in 1 second
* can be called only after the mac address of the bond is set
*/
bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
} else {
- struct slave *prev_slave;
-
- prev_slave = bond_prev_slave(bond, new_slave);
SLAVE_AD_INFO(new_slave).id =
SLAVE_AD_INFO(prev_slave).id + 1;
}
@@ -1588,7 +1539,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
case BOND_MODE_ALB:
bond_set_active_slave(new_slave);
bond_set_slave_inactive_flags(new_slave);
- bond_select_active_slave(bond);
break;
default:
pr_debug("This slave is always active in trunk mode\n");
@@ -1606,10 +1556,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
break;
} /* switch(bond_mode) */
- write_unlock_bh(&bond->curr_slave_lock);
-
- bond_set_carrier(bond);
-
#ifdef CONFIG_NET_POLL_CONTROLLER
slave_dev->npinfo = bond->dev->npinfo;
if (slave_dev->npinfo) {
@@ -1624,17 +1570,29 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
#endif
- read_unlock(&bond->lock);
-
- res = bond_create_slave_symlinks(bond_dev, slave_dev);
- if (res)
- goto err_detach;
-
res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
new_slave);
if (res) {
pr_debug("Error %d calling netdev_rx_handler_register\n", res);
- goto err_dest_symlinks;
+ goto err_detach;
+ }
+
+ res = bond_master_upper_dev_link(bond_dev, slave_dev, new_slave);
+ if (res) {
+ pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
+ goto err_unregister;
+ }
+
+ bond->slave_cnt++;
+ bond_compute_features(bond);
+ bond_set_carrier(bond);
+
+ if (USES_PRIMARY(bond->params.mode)) {
+ read_lock(&bond->lock);
+ write_lock_bh(&bond->curr_slave_lock);
+ bond_select_active_slave(bond);
+ write_unlock_bh(&bond->curr_slave_lock);
+ read_unlock(&bond->lock);
}
pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
@@ -1646,8 +1604,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
return 0;
/* Undo stages on error */
-err_dest_symlinks:
- bond_destroy_slave_symlinks(bond_dev, slave_dev);
+err_unregister:
+ netdev_rx_handler_unregister(slave_dev);
err_detach:
if (!USES_PRIMARY(bond->params.mode))
@@ -1655,7 +1613,6 @@ err_detach:
vlan_vids_del_by_dev(slave_dev, bond_dev);
write_lock_bh(&bond->lock);
- bond_detach_slave(bond, new_slave);
if (bond->primary_slave == new_slave)
bond->primary_slave = NULL;
if (bond->curr_active_slave == new_slave) {
@@ -1675,9 +1632,6 @@ err_close:
slave_dev->priv_flags &= ~IFF_BONDING;
dev_close(slave_dev);
-err_unset_master:
- bond_upper_dev_unlink(bond_dev, slave_dev);
-
err_restore_mac:
if (!bond->params.fail_over_mac) {
/* XXX TODO - fom follow mode needs to change master's
@@ -1696,9 +1650,8 @@ err_free:
kfree(new_slave);
err_undo_flags:
- bond_compute_features(bond);
/* Enslave of first slave has failed and we need to fix master's mac */
- if (list_empty(&bond->slave_list) &&
+ if (!bond_has_slaves(bond) &&
ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
eth_hw_addr_random(bond_dev);
@@ -1749,6 +1702,11 @@ static int __bond_release_one(struct net_device *bond_dev,
}
write_unlock_bh(&bond->lock);
+
+ /* release the slave from its bond */
+ bond->slave_cnt--;
+
+ bond_upper_dev_unlink(bond_dev, slave_dev);
/* unregister rx_handler early so bond_handle_frame wouldn't be called
* for this slave anymore.
*/
@@ -1772,12 +1730,9 @@ static int __bond_release_one(struct net_device *bond_dev,
bond->current_arp_slave = NULL;
- /* release the slave from its bond */
- bond_detach_slave(bond, slave);
-
if (!all && !bond->params.fail_over_mac) {
if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
- !list_empty(&bond->slave_list))
+ bond_has_slaves(bond))
pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
bond_dev->name, slave_dev->name,
slave->perm_hwaddr,
@@ -1820,7 +1775,7 @@ static int __bond_release_one(struct net_device *bond_dev,
write_lock_bh(&bond->lock);
}
- if (list_empty(&bond->slave_list)) {
+ if (!bond_has_slaves(bond)) {
bond_set_carrier(bond);
eth_hw_addr_random(bond_dev);
@@ -1836,7 +1791,7 @@ static int __bond_release_one(struct net_device *bond_dev,
unblock_netpoll_tx();
synchronize_rcu();
- if (list_empty(&bond->slave_list)) {
+ if (!bond_has_slaves(bond)) {
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
}
@@ -1848,8 +1803,6 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_dev->name, slave_dev->name, bond_dev->name);
/* must do this from outside any spinlocks */
- bond_destroy_slave_symlinks(bond_dev, slave_dev);
-
vlan_vids_del_by_dev(slave_dev, bond_dev);
/* If the mode USES_PRIMARY, then this cases was handled above by
@@ -1873,8 +1826,6 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_hw_addr_flush(bond_dev, slave_dev);
}
- bond_upper_dev_unlink(bond_dev, slave_dev);
-
slave_disable_netpoll(slave);
/* close slave before restoring its mac address */
@@ -1913,7 +1864,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
int ret;
ret = bond_release(bond_dev, slave_dev);
- if (ret == 0 && list_empty(&bond->slave_list)) {
+ if (ret == 0 && !bond_has_slaves(bond)) {
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
pr_info("%s: destroying bond %s.\n",
bond_dev->name, bond_dev->name);
@@ -1922,61 +1873,6 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
return ret;
}
-/*
- * This function changes the active slave to slave <slave_dev>.
- * It returns -EINVAL in the following cases.
- * - <slave_dev> is not found in the list.
- * - There is not active slave now.
- * - <slave_dev> is already active.
- * - The link state of <slave_dev> is not BOND_LINK_UP.
- * - <slave_dev> is not running.
- * In these cases, this function does nothing.
- * In the other cases, current_slave pointer is changed and 0 is returned.
- */
-static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_device *slave_dev)
-{
- struct bonding *bond = netdev_priv(bond_dev);
- struct slave *old_active = NULL;
- struct slave *new_active = NULL;
- int res = 0;
-
- if (!USES_PRIMARY(bond->params.mode))
- return -EINVAL;
-
- /* Verify that bond_dev is indeed the master of slave_dev */
- if (!(slave_dev->flags & IFF_SLAVE) ||
- !netdev_has_upper_dev(slave_dev, bond_dev))
- return -EINVAL;
-
- read_lock(&bond->lock);
-
- old_active = bond->curr_active_slave;
- new_active = bond_get_slave_by_dev(bond, slave_dev);
- /*
- * Changing to the current active: do nothing; return success.
- */
- if (new_active && new_active == old_active) {
- read_unlock(&bond->lock);
- return 0;
- }
-
- if (new_active &&
- old_active &&
- new_active->link == BOND_LINK_UP &&
- IS_UP(new_active->dev)) {
- block_netpoll_tx();
- write_lock_bh(&bond->curr_slave_lock);
- bond_change_active_slave(bond, new_active);
- write_unlock_bh(&bond->curr_slave_lock);
- unblock_netpoll_tx();
- } else
- res = -EINVAL;
-
- read_unlock(&bond->lock);
-
- return res;
-}
-
static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
{
struct bonding *bond = netdev_priv(bond_dev);
@@ -1994,11 +1890,12 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct list_head *iter;
int i = 0, res = -ENODEV;
struct slave *slave;
read_lock(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (i++ == (int)info->slave_id) {
res = 0;
strcpy(info->slave_name, slave->dev->name);
@@ -2019,12 +1916,13 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
static int bond_miimon_inspect(struct bonding *bond)
{
int link_state, commit = 0;
+ struct list_head *iter;
struct slave *slave;
bool ignore_updelay;
ignore_updelay = !bond->curr_active_slave ? true : false;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE;
link_state = bond_check_dev_link(bond, slave->dev, 0);
@@ -2118,9 +2016,10 @@ static int bond_miimon_inspect(struct bonding *bond)
static void bond_miimon_commit(struct bonding *bond)
{
+ struct list_head *iter;
struct slave *slave;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
switch (slave->new_link) {
case BOND_LINK_NOCHANGE:
continue;
@@ -2225,7 +2124,7 @@ void bond_mii_monitor(struct work_struct *work)
delay = msecs_to_jiffies(bond->params.miimon);
- if (list_empty(&bond->slave_list))
+ if (!bond_has_slaves(bond))
goto re_arm;
should_notify_peers = bond_should_notify_peers(bond);
@@ -2274,7 +2173,7 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
return true;
rcu_read_lock();
- netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+ netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
if (ip == bond_confirm_addr(upper, 0, ip)) {
ret = true;
break;
@@ -2349,10 +2248,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
*
* TODO: QinQ?
*/
- netdev_for_each_upper_dev_rcu(bond->dev, vlan_upper, vlan_iter) {
+ netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
+ vlan_iter) {
if (!is_vlan_dev(vlan_upper))
continue;
- netdev_for_each_upper_dev_rcu(vlan_upper, upper, iter) {
+ netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
+ iter) {
if (upper == rt->dst.dev) {
vlan_id = vlan_dev_vlan_id(vlan_upper);
rcu_read_unlock();
@@ -2365,7 +2266,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
* our upper vlans, then just search for any dev that
* matches, and in case it's a vlan - save the id
*/
- netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+ netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
if (upper == rt->dst.dev) {
/* if it's a vlan - get its VID */
if (is_vlan_dev(upper))
@@ -2512,11 +2413,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
struct bonding *bond = container_of(work, struct bonding,
arp_work.work);
struct slave *slave, *oldcurrent;
+ struct list_head *iter;
int do_failover = 0;
read_lock(&bond->lock);
- if (list_empty(&bond->slave_list))
+ if (!bond_has_slaves(bond))
goto re_arm;
oldcurrent = bond->curr_active_slave;
@@ -2528,7 +2430,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* TODO: what about up/down delay in arp mode? it wasn't here before
* so it can wait
*/
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
unsigned long trans_start = dev_trans_start(slave->dev);
if (slave->link != BOND_LINK_UP) {
@@ -2619,10 +2521,11 @@ re_arm:
static int bond_ab_arp_inspect(struct bonding *bond)
{
unsigned long trans_start, last_rx;
+ struct list_head *iter;
struct slave *slave;
int commit = 0;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE;
last_rx = slave_last_rx(bond, slave);
@@ -2689,9 +2592,10 @@ static int bond_ab_arp_inspect(struct bonding *bond)
static void bond_ab_arp_commit(struct bonding *bond)
{
unsigned long trans_start;
+ struct list_head *iter;
struct slave *slave;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
switch (slave->new_link) {
case BOND_LINK_NOCHANGE:
continue;
@@ -2762,8 +2666,9 @@ do_failover:
*/
static void bond_ab_arp_probe(struct bonding *bond)
{
- struct slave *slave, *next_slave;
- int i;
+ struct slave *slave, *before = NULL, *new_slave = NULL;
+ struct list_head *iter;
+ bool found = false;
read_lock(&bond->curr_slave_lock);
@@ -2793,18 +2698,12 @@ static void bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_inactive_flags(bond->current_arp_slave);
- /* search for next candidate */
- next_slave = bond_next_slave(bond, bond->current_arp_slave);
- bond_for_each_slave_from(bond, slave, i, next_slave) {
- if (IS_UP(slave->dev)) {
- slave->link = BOND_LINK_BACK;
- bond_set_slave_active_flags(slave);
- bond_arp_send_all(bond, slave);
- slave->jiffies = jiffies;
- bond->current_arp_slave = slave;
- break;
- }
+ bond_for_each_slave(bond, slave, iter) {
+ if (!found && !before && IS_UP(slave->dev))
+ before = slave;
+ if (found && !new_slave && IS_UP(slave->dev))
+ new_slave = slave;
/* if the link state is up at this point, we
* mark it down - this can happen if we have
* simultaneous link failures and
@@ -2812,7 +2711,7 @@ static void bond_ab_arp_probe(struct bonding *bond)
* one the current slave so it is still marked
* up when it is actually down
*/
- if (slave->link == BOND_LINK_UP) {
+ if (!IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
slave->link = BOND_LINK_DOWN;
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
@@ -2822,7 +2721,22 @@ static void bond_ab_arp_probe(struct bonding *bond)
pr_info("%s: backup interface %s is now down.\n",
bond->dev->name, slave->dev->name);
}
+ if (slave == bond->current_arp_slave)
+ found = true;
}
+
+ if (!new_slave && before)
+ new_slave = before;
+
+ if (!new_slave)
+ return;
+
+ new_slave->link = BOND_LINK_BACK;
+ bond_set_slave_active_flags(new_slave);
+ bond_arp_send_all(bond, new_slave);
+ new_slave->jiffies = jiffies;
+ bond->current_arp_slave = new_slave;
+
}
void bond_activebackup_arp_mon(struct work_struct *work)
@@ -2836,7 +2750,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
- if (list_empty(&bond->slave_list))
+ if (!bond_has_slaves(bond))
goto re_arm;
should_notify_peers = bond_should_notify_peers(bond);
@@ -3033,99 +2947,85 @@ static struct notifier_block bond_netdev_notifier = {
/*---------------------------- Hashing Policies -----------------------------*/
-/*
- * Hash for the output device based upon layer 2 data
- */
-static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
+/* L2 hash helper */
+static inline u32 bond_eth_hash(struct sk_buff *skb)
{
struct ethhdr *data = (struct ethhdr *)skb->data;
if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto))
- return (data->h_dest[5] ^ data->h_source[5]) % count;
+ return data->h_dest[5] ^ data->h_source[5];
return 0;
}
-/*
- * Hash for the output device based upon layer 2 and layer 3 data. If
- * the packet is not IP, fall back on bond_xmit_hash_policy_l2()
- */
-static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
+/* Extract the appropriate headers based on bond's xmit policy */
+static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
+ struct flow_keys *fk)
{
- const struct ethhdr *data;
+ const struct ipv6hdr *iph6;
const struct iphdr *iph;
- const struct ipv6hdr *ipv6h;
- u32 v6hash;
- const __be32 *s, *d;
+ int noff, proto = -1;
- if (skb->protocol == htons(ETH_P_IP) &&
- pskb_network_may_pull(skb, sizeof(*iph))) {
+ if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
+ return skb_flow_dissect(skb, fk);
+
+ fk->ports = 0;
+ noff = skb_network_offset(skb);
+ if (skb->protocol == htons(ETH_P_IP)) {
+ if (!pskb_may_pull(skb, noff + sizeof(*iph)))
+ return false;
iph = ip_hdr(skb);
- data = (struct ethhdr *)skb->data;
- return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
- (data->h_dest[5] ^ data->h_source[5])) % count;
- } else if (skb->protocol == htons(ETH_P_IPV6) &&
- pskb_network_may_pull(skb, sizeof(*ipv6h))) {
- ipv6h = ipv6_hdr(skb);
- data = (struct ethhdr *)skb->data;
- s = &ipv6h->saddr.s6_addr32[0];
- d = &ipv6h->daddr.s6_addr32[0];
- v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
- v6hash ^= (v6hash >> 24) ^ (v6hash >> 16) ^ (v6hash >> 8);
- return (v6hash ^ data->h_dest[5] ^ data->h_source[5]) % count;
- }
-
- return bond_xmit_hash_policy_l2(skb, count);
+ fk->src = iph->saddr;
+ fk->dst = iph->daddr;
+ noff += iph->ihl << 2;
+ if (!ip_is_fragment(iph))
+ proto = iph->protocol;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ if (!pskb_may_pull(skb, noff + sizeof(*iph6)))
+ return false;
+ iph6 = ipv6_hdr(skb);
+ fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr);
+ fk->dst = (__force __be32)ipv6_addr_hash(&iph6->daddr);
+ noff += sizeof(*iph6);
+ proto = iph6->nexthdr;
+ } else {
+ return false;
+ }
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
+ fk->ports = skb_flow_get_ports(skb, noff, proto);
+
+ return true;
}
-/*
- * Hash for the output device based upon layer 3 and layer 4 data. If
- * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is
- * altogether not IP, fall back on bond_xmit_hash_policy_l2()
+/**
+ * bond_xmit_hash - generate a hash value based on the xmit policy
+ * @bond: bonding device
+ * @skb: buffer to use for headers
+ * @count: modulo value
+ *
+ * This function will extract the necessary headers from the skb buffer and use
+ * them to generate a hash based on the xmit_policy set in the bonding device
+ * which will be reduced modulo count before returning.
*/
-static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
+int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
{
- u32 layer4_xor = 0;
- const struct iphdr *iph;
- const struct ipv6hdr *ipv6h;
- const __be32 *s, *d;
- const __be16 *l4 = NULL;
- __be16 _l4[2];
- int noff = skb_network_offset(skb);
- int poff;
-
- if (skb->protocol == htons(ETH_P_IP) &&
- pskb_may_pull(skb, noff + sizeof(*iph))) {
- iph = ip_hdr(skb);
- poff = proto_ports_offset(iph->protocol);
+ struct flow_keys flow;
+ u32 hash;
- if (!ip_is_fragment(iph) && poff >= 0) {
- l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff,
- sizeof(_l4), &_l4);
- if (l4)
- layer4_xor = ntohs(l4[0] ^ l4[1]);
- }
- return (layer4_xor ^
- ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
- } else if (skb->protocol == htons(ETH_P_IPV6) &&
- pskb_may_pull(skb, noff + sizeof(*ipv6h))) {
- ipv6h = ipv6_hdr(skb);
- poff = proto_ports_offset(ipv6h->nexthdr);
- if (poff >= 0) {
- l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff,
- sizeof(_l4), &_l4);
- if (l4)
- layer4_xor = ntohs(l4[0] ^ l4[1]);
- }
- s = &ipv6h->saddr.s6_addr32[0];
- d = &ipv6h->daddr.s6_addr32[0];
- layer4_xor ^= (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
- layer4_xor ^= (layer4_xor >> 24) ^ (layer4_xor >> 16) ^
- (layer4_xor >> 8);
- return layer4_xor % count;
- }
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
+ !bond_flow_dissect(bond, skb, &flow))
+ return bond_eth_hash(skb) % count;
+
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
+ bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
+ hash = bond_eth_hash(skb);
+ else
+ hash = (__force u32)flow.ports;
+ hash ^= (__force u32)flow.dst ^ (__force u32)flow.src;
+ hash ^= (hash >> 16);
+ hash ^= (hash >> 8);
- return bond_xmit_hash_policy_l2(skb, count);
+ return hash % count;
}
/*-------------------------- Device entry points ----------------------------*/
@@ -3155,13 +3055,14 @@ static void bond_work_cancel_all(struct bonding *bond)
static int bond_open(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct list_head *iter;
struct slave *slave;
/* reset slave->backup and slave->inactive */
read_lock(&bond->lock);
- if (!list_empty(&bond->slave_list)) {
+ if (bond_has_slaves(bond)) {
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
&& (slave != bond->curr_active_slave)) {
bond_set_slave_inactive_flags(slave);
@@ -3221,12 +3122,13 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
{
struct bonding *bond = netdev_priv(bond_dev);
struct rtnl_link_stats64 temp;
+ struct list_head *iter;
struct slave *slave;
memset(stats, 0, sizeof(*stats));
read_lock_bh(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
const struct rtnl_link_stats64 *sstats =
dev_get_stats(slave->dev, &temp);
@@ -3263,6 +3165,7 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
{
+ struct bonding *bond = netdev_priv(bond_dev);
struct net_device *slave_dev = NULL;
struct ifbond k_binfo;
struct ifbond __user *u_binfo = NULL;
@@ -3293,7 +3196,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
if (mii->reg_num == 1) {
- struct bonding *bond = netdev_priv(bond_dev);
mii->val_out = 0;
read_lock(&bond->lock);
read_lock(&bond->curr_slave_lock);
@@ -3365,7 +3267,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
break;
case BOND_CHANGE_ACTIVE_OLD:
case SIOCBONDCHANGEACTIVE:
- res = bond_ioctl_change_active(bond_dev, slave_dev);
+ res = bond_option_active_slave_set(bond, slave_dev);
break;
default:
res = -EOPNOTSUPP;
@@ -3393,22 +3295,24 @@ static void bond_change_rx_flags(struct net_device *bond_dev, int change)
static void bond_set_rx_mode(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct list_head *iter;
struct slave *slave;
- ASSERT_RTNL();
+ rcu_read_lock();
if (USES_PRIMARY(bond->params.mode)) {
- slave = rtnl_dereference(bond->curr_active_slave);
+ slave = rcu_dereference(bond->curr_active_slave);
if (slave) {
dev_uc_sync(slave->dev, bond_dev);
dev_mc_sync(slave->dev, bond_dev);
}
} else {
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
dev_uc_sync_multiple(slave->dev, bond_dev);
dev_mc_sync_multiple(slave->dev, bond_dev);
}
}
+ rcu_read_unlock();
}
static int bond_neigh_init(struct neighbour *n)
@@ -3471,7 +3375,8 @@ static int bond_neigh_setup(struct net_device *dev,
static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave;
+ struct slave *slave, *rollback_slave;
+ struct list_head *iter;
int res = 0;
pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
@@ -3492,10 +3397,9 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
* call to the base driver.
*/
- bond_for_each_slave(bond, slave) {
- pr_debug("s %p s->p %p c_m %p\n",
+ bond_for_each_slave(bond, slave, iter) {
+ pr_debug("s %p c_m %p\n",
slave,
- bond_prev_slave(bond, slave),
slave->dev->netdev_ops->ndo_change_mtu);
res = dev_set_mtu(slave->dev, new_mtu);
@@ -3520,13 +3424,16 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
unwind:
/* unwind from head to the slave that failed */
- bond_for_each_slave_continue_reverse(bond, slave) {
+ bond_for_each_slave(bond, rollback_slave, iter) {
int tmp_res;
- tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu);
+ if (rollback_slave == slave)
+ break;
+
+ tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
if (tmp_res) {
pr_debug("unwind err %d dev %s\n",
- tmp_res, slave->dev->name);
+ tmp_res, rollback_slave->dev->name);
}
}
@@ -3543,8 +3450,9 @@ unwind:
static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave, *rollback_slave;
struct sockaddr *sa = addr, tmp_sa;
- struct slave *slave;
+ struct list_head *iter;
int res = 0;
if (bond->params.mode == BOND_MODE_ALB)
@@ -3578,7 +3486,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
* call to the base driver.
*/
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
const struct net_device_ops *slave_ops = slave->dev->netdev_ops;
pr_debug("slave %p %s\n", slave, slave->dev->name);
@@ -3610,13 +3518,16 @@ unwind:
tmp_sa.sa_family = bond_dev->type;
/* unwind from head to the slave that failed */
- bond_for_each_slave_continue_reverse(bond, slave) {
+ bond_for_each_slave(bond, rollback_slave, iter) {
int tmp_res;
- tmp_res = dev_set_mac_address(slave->dev, &tmp_sa);
+ if (rollback_slave == slave)
+ break;
+
+ tmp_res = dev_set_mac_address(rollback_slave->dev, &tmp_sa);
if (tmp_res) {
pr_debug("unwind err %d dev %s\n",
- tmp_res, slave->dev->name);
+ tmp_res, rollback_slave->dev->name);
}
}
@@ -3635,11 +3546,12 @@ unwind:
*/
void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
{
+ struct list_head *iter;
struct slave *slave;
int i = slave_id;
/* Here we start from the slave with slave_id */
- bond_for_each_slave_rcu(bond, slave) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0) {
if (slave_can_tx(slave)) {
bond_dev_queue_xmit(bond, skb, slave->dev);
@@ -3650,7 +3562,7 @@ void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
/* Here we start from the first slave up to slave_id */
i = slave_id;
- bond_for_each_slave_rcu(bond, slave) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0)
break;
if (slave_can_tx(slave)) {
@@ -3707,8 +3619,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
return NETDEV_TX_OK;
}
-/*
- * In bond_xmit_xor() , we determine the output device by using a pre-
+/* In bond_xmit_xor() , we determine the output device by using a pre-
* determined xmit_hash_policy(), If the selected device is not enabled,
* find the next active slave.
*/
@@ -3716,8 +3627,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- bond_xmit_slave_id(bond, skb,
- bond->xmit_hash_policy(skb, bond->slave_cnt));
+ bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb, bond->slave_cnt));
return NETDEV_TX_OK;
}
@@ -3727,8 +3637,9 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave = NULL;
+ struct list_head *iter;
- bond_for_each_slave_rcu(bond, slave) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (bond_is_last_slave(bond, slave))
break;
if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
@@ -3753,22 +3664,6 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
/*------------------------- Device initialization ---------------------------*/
-static void bond_set_xmit_hash_policy(struct bonding *bond)
-{
- switch (bond->params.xmit_policy) {
- case BOND_XMIT_POLICY_LAYER23:
- bond->xmit_hash_policy = bond_xmit_hash_policy_l23;
- break;
- case BOND_XMIT_POLICY_LAYER34:
- bond->xmit_hash_policy = bond_xmit_hash_policy_l34;
- break;
- case BOND_XMIT_POLICY_LAYER2:
- default:
- bond->xmit_hash_policy = bond_xmit_hash_policy_l2;
- break;
- }
-}
-
/*
* Lookup the slave that corresponds to a qid
*/
@@ -3777,13 +3672,14 @@ static inline int bond_slave_override(struct bonding *bond,
{
struct slave *slave = NULL;
struct slave *check_slave;
+ struct list_head *iter;
int res = 1;
if (!skb->queue_mapping)
return 1;
/* Find out if any slaves have the same mapping as this skb. */
- bond_for_each_slave_rcu(bond, check_slave) {
+ bond_for_each_slave_rcu(bond, check_slave, iter) {
if (check_slave->queue_id == skb->queue_mapping) {
slave = check_slave;
break;
@@ -3869,7 +3765,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
rcu_read_lock();
- if (!list_empty(&bond->slave_list))
+ if (bond_has_slaves(bond))
ret = __bond_start_xmit(skb, dev);
else
kfree_skb(skb);
@@ -3878,43 +3774,12 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
return ret;
}
-/*
- * set bond mode specific net device operations
- */
-void bond_set_mode_ops(struct bonding *bond, int mode)
-{
- struct net_device *bond_dev = bond->dev;
-
- switch (mode) {
- case BOND_MODE_ROUNDROBIN:
- break;
- case BOND_MODE_ACTIVEBACKUP:
- break;
- case BOND_MODE_XOR:
- bond_set_xmit_hash_policy(bond);
- break;
- case BOND_MODE_BROADCAST:
- break;
- case BOND_MODE_8023AD:
- bond_set_xmit_hash_policy(bond);
- break;
- case BOND_MODE_ALB:
- /* FALLTHRU */
- case BOND_MODE_TLB:
- break;
- default:
- /* Should never happen, mode already checked */
- pr_err("%s: Error: Unknown bonding mode %d\n",
- bond_dev->name, mode);
- break;
- }
-}
-
static int bond_ethtool_get_settings(struct net_device *bond_dev,
struct ethtool_cmd *ecmd)
{
struct bonding *bond = netdev_priv(bond_dev);
unsigned long speed = 0;
+ struct list_head *iter;
struct slave *slave;
ecmd->duplex = DUPLEX_UNKNOWN;
@@ -3926,7 +3791,7 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
* this is an accurate maximum.
*/
read_lock(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (SLAVE_IS_OK(slave)) {
if (slave->speed != SPEED_UNKNOWN)
speed += slave->speed;
@@ -3994,14 +3859,13 @@ static void bond_destructor(struct net_device *bond_dev)
free_netdev(bond_dev);
}
-static void bond_setup(struct net_device *bond_dev)
+void bond_setup(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
/* initialize rwlocks */
rwlock_init(&bond->lock);
rwlock_init(&bond->curr_slave_lock);
- INIT_LIST_HEAD(&bond->slave_list);
bond->params = bonding_defaults;
/* Initialize pointers */
@@ -4011,7 +3875,6 @@ static void bond_setup(struct net_device *bond_dev)
ether_setup(bond_dev);
bond_dev->netdev_ops = &bond_netdev_ops;
bond_dev->ethtool_ops = &bond_ethtool_ops;
- bond_set_mode_ops(bond, bond->params.mode);
bond_dev->destructor = bond_destructor;
@@ -4057,12 +3920,13 @@ static void bond_setup(struct net_device *bond_dev)
static void bond_uninit(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave, *tmp_slave;
+ struct list_head *iter;
+ struct slave *slave;
bond_netpoll_cleanup(bond_dev);
/* Release the bonded slaves */
- list_for_each_entry_safe(slave, tmp_slave, &bond->slave_list, list)
+ bond_for_each_slave(bond, slave, iter)
__bond_release_one(bond_dev, slave->dev, true);
pr_info("%s: released all slaves\n", bond_dev->name);
@@ -4495,32 +4359,11 @@ static int bond_init(struct net_device *bond_dev)
return 0;
}
-static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
-{
- if (tb[IFLA_ADDRESS]) {
- if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
- return -EINVAL;
- if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
- return -EADDRNOTAVAIL;
- }
- return 0;
-}
-
-static unsigned int bond_get_num_tx_queues(void)
+unsigned int bond_get_num_tx_queues(void)
{
return tx_queues;
}
-static struct rtnl_link_ops bond_link_ops __read_mostly = {
- .kind = "bond",
- .priv_size = sizeof(struct bonding),
- .setup = bond_setup,
- .validate = bond_validate,
- .get_num_tx_queues = bond_get_num_tx_queues,
- .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
- as for TX queues */
-};
-
/* Create a new bond based on the specified name and bonding parameters.
* If name is NULL, obtain a suitable "bond%d" name for us.
* Caller must NOT hold rtnl_lock; we need to release it here before we
@@ -4607,7 +4450,7 @@ static int __init bonding_init(void)
if (res)
goto out;
- res = rtnl_link_register(&bond_link_ops);
+ res = bond_netlink_init();
if (res)
goto err_link;
@@ -4623,7 +4466,7 @@ static int __init bonding_init(void)
out:
return res;
err:
- rtnl_link_unregister(&bond_link_ops);
+ bond_netlink_fini();
err_link:
unregister_pernet_subsys(&bond_net_ops);
goto out;
@@ -4636,7 +4479,7 @@ static void __exit bonding_exit(void)
bond_destroy_debugfs();
- rtnl_link_unregister(&bond_link_ops);
+ bond_netlink_fini();
unregister_pernet_subsys(&bond_net_ops);
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -4653,4 +4496,3 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
-MODULE_ALIAS_RTNL_LINK("bond");
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
new file mode 100644
index 000000000000..40e7b1cb4aea
--- /dev/null
+++ b/drivers/net/bonding/bond_netlink.c
@@ -0,0 +1,131 @@
+/*
+ * drivers/net/bond/bond_netlink.c - Netlink interface for bonding
+ * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_link.h>
+#include <linux/if_ether.h>
+#include <net/netlink.h>
+#include <net/rtnetlink.h>
+#include "bonding.h"
+
+static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
+ [IFLA_BOND_MODE] = { .type = NLA_U8 },
+ [IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
+};
+
+static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+ return -EINVAL;
+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+ return -EADDRNOTAVAIL;
+ }
+ return 0;
+}
+
+static int bond_changelink(struct net_device *bond_dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ int err;
+
+ if (data && data[IFLA_BOND_MODE]) {
+ int mode = nla_get_u8(data[IFLA_BOND_MODE]);
+
+ err = bond_option_mode_set(bond, mode);
+ if (err)
+ return err;
+ }
+ if (data && data[IFLA_BOND_ACTIVE_SLAVE]) {
+ int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
+ struct net_device *slave_dev;
+
+ if (ifindex == 0) {
+ slave_dev = NULL;
+ } else {
+ slave_dev = __dev_get_by_index(dev_net(bond_dev),
+ ifindex);
+ if (!slave_dev)
+ return -ENODEV;
+ }
+ err = bond_option_active_slave_set(bond, slave_dev);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ int err;
+
+ err = bond_changelink(bond_dev, tb, data);
+ if (err < 0)
+ return err;
+
+ return register_netdevice(bond_dev);
+}
+
+static size_t bond_get_size(const struct net_device *bond_dev)
+{
+ return nla_total_size(sizeof(u8)) + /* IFLA_BOND_MODE */
+ nla_total_size(sizeof(u32)); /* IFLA_BOND_ACTIVE_SLAVE */
+}
+
+static int bond_fill_info(struct sk_buff *skb,
+ const struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct net_device *slave_dev = bond_option_active_slave_get(bond);
+
+ if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode) ||
+ (slave_dev &&
+ nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex)))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+struct rtnl_link_ops bond_link_ops __read_mostly = {
+ .kind = "bond",
+ .priv_size = sizeof(struct bonding),
+ .setup = bond_setup,
+ .maxtype = IFLA_BOND_MAX,
+ .policy = bond_policy,
+ .validate = bond_validate,
+ .newlink = bond_newlink,
+ .changelink = bond_changelink,
+ .get_size = bond_get_size,
+ .fill_info = bond_fill_info,
+ .get_num_tx_queues = bond_get_num_tx_queues,
+ .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
+ as for TX queues */
+};
+
+int __init bond_netlink_init(void)
+{
+ return rtnl_link_register(&bond_link_ops);
+}
+
+void bond_netlink_fini(void)
+{
+ rtnl_link_unregister(&bond_link_ops);
+}
+
+MODULE_ALIAS_RTNL_LINK("bond");
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
new file mode 100644
index 000000000000..9a5223c7b4d1
--- /dev/null
+++ b/drivers/net/bonding/bond_options.c
@@ -0,0 +1,142 @@
+/*
+ * drivers/net/bond/bond_options.c - bonding options
+ * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/rwlock.h>
+#include <linux/rcupdate.h>
+#include "bonding.h"
+
+static bool bond_mode_is_valid(int mode)
+{
+ int i;
+
+ for (i = 0; bond_mode_tbl[i].modename; i++);
+
+ return mode >= 0 && mode < i;
+}
+
+int bond_option_mode_set(struct bonding *bond, int mode)
+{
+ if (!bond_mode_is_valid(mode)) {
+ pr_err("invalid mode value %d.\n", mode);
+ return -EINVAL;
+ }
+
+ if (bond->dev->flags & IFF_UP) {
+ pr_err("%s: unable to update mode because interface is up.\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+
+ if (bond_has_slaves(bond)) {
+ pr_err("%s: unable to update mode because bond has slaves.\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+
+ if (BOND_MODE_IS_LB(mode) && bond->params.arp_interval) {
+ pr_err("%s: %s mode is incompatible with arp monitoring.\n",
+ bond->dev->name, bond_mode_tbl[mode].modename);
+ return -EINVAL;
+ }
+
+ /* don't cache arp_validate between modes */
+ bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
+ bond->params.mode = mode;
+ return 0;
+}
+
+static struct net_device *__bond_option_active_slave_get(struct bonding *bond,
+ struct slave *slave)
+{
+ return USES_PRIMARY(bond->params.mode) && slave ? slave->dev : NULL;
+}
+
+struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
+{
+ struct slave *slave = rcu_dereference(bond->curr_active_slave);
+
+ return __bond_option_active_slave_get(bond, slave);
+}
+
+struct net_device *bond_option_active_slave_get(struct bonding *bond)
+{
+ return __bond_option_active_slave_get(bond, bond->curr_active_slave);
+}
+
+int bond_option_active_slave_set(struct bonding *bond,
+ struct net_device *slave_dev)
+{
+ int ret = 0;
+
+ if (slave_dev) {
+ if (!netif_is_bond_slave(slave_dev)) {
+ pr_err("Device %s is not bonding slave.\n",
+ slave_dev->name);
+ return -EINVAL;
+ }
+
+ if (bond->dev != netdev_master_upper_dev_get(slave_dev)) {
+ pr_err("%s: Device %s is not our slave.\n",
+ bond->dev->name, slave_dev->name);
+ return -EINVAL;
+ }
+ }
+
+ if (!USES_PRIMARY(bond->params.mode)) {
+ pr_err("%s: Unable to change active slave; %s is in mode %d\n",
+ bond->dev->name, bond->dev->name, bond->params.mode);
+ return -EINVAL;
+ }
+
+ block_netpoll_tx();
+ read_lock(&bond->lock);
+ write_lock_bh(&bond->curr_slave_lock);
+
+ /* check to see if we are clearing active */
+ if (!slave_dev) {
+ pr_info("%s: Clearing current active slave.\n",
+ bond->dev->name);
+ rcu_assign_pointer(bond->curr_active_slave, NULL);
+ bond_select_active_slave(bond);
+ } else {
+ struct slave *old_active = bond->curr_active_slave;
+ struct slave *new_active = bond_slave_get_rtnl(slave_dev);
+
+ BUG_ON(!new_active);
+
+ if (new_active == old_active) {
+ /* do nothing */
+ pr_info("%s: %s is already the current active slave.\n",
+ bond->dev->name, new_active->dev->name);
+ } else {
+ if (old_active && (new_active->link == BOND_LINK_UP) &&
+ IS_UP(new_active->dev)) {
+ pr_info("%s: Setting %s as active slave.\n",
+ bond->dev->name, new_active->dev->name);
+ bond_change_active_slave(bond, new_active);
+ } else {
+ pr_err("%s: Could not set %s as active slave; either %s is down or the link is down.\n",
+ bond->dev->name, new_active->dev->name,
+ new_active->dev->name);
+ ret = -EINVAL;
+ }
+ }
+ }
+
+ write_unlock_bh(&bond->curr_slave_lock);
+ read_unlock(&bond->lock);
+ unblock_netpoll_tx();
+ return ret;
+}
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 20a6ee25bb63..fb868d6c22da 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -10,8 +10,9 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(&bond->lock)
{
struct bonding *bond = seq->private;
- loff_t off = 0;
+ struct list_head *iter;
struct slave *slave;
+ loff_t off = 0;
/* make sure the bond won't be taken away */
rcu_read_lock();
@@ -20,7 +21,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
if (*pos == 0)
return SEQ_START_TOKEN;
- bond_for_each_slave(bond, slave)
+ bond_for_each_slave(bond, slave, iter)
if (++off == *pos)
return slave;
@@ -30,17 +31,25 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct bonding *bond = seq->private;
- struct slave *slave = v;
+ struct list_head *iter;
+ struct slave *slave;
+ bool found = false;
++*pos;
if (v == SEQ_START_TOKEN)
return bond_first_slave(bond);
- if (bond_is_last_slave(bond, slave))
+ if (bond_is_last_slave(bond, v))
return NULL;
- slave = bond_next_slave(bond, slave);
- return slave;
+ bond_for_each_slave(bond, slave, iter) {
+ if (found)
+ return slave;
+ if (slave == v)
+ found = true;
+ }
+
+ return NULL;
}
static void bond_info_seq_stop(struct seq_file *seq, void *v)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index c29b836749b6..b9d8f1175ff5 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -149,14 +149,6 @@ err_no_cmd:
return -EPERM;
}
-static const void *bonding_namespace(struct class *cls,
- const struct class_attribute *attr)
-{
- const struct bond_net *bn =
- container_of(attr, struct bond_net, class_attr_bonding_masters);
- return bn->net;
-}
-
/* class attribute for bond_masters file. This ends up in /sys/class/net */
static const struct class_attribute class_attr_bonding_masters = {
.attr = {
@@ -165,44 +157,8 @@ static const struct class_attribute class_attr_bonding_masters = {
},
.show = bonding_show_bonds,
.store = bonding_store_bonds,
- .namespace = bonding_namespace,
};
-int bond_create_slave_symlinks(struct net_device *master,
- struct net_device *slave)
-{
- char linkname[IFNAMSIZ+7];
- int ret = 0;
-
- /* first, create a link from the slave back to the master */
- ret = sysfs_create_link(&(slave->dev.kobj), &(master->dev.kobj),
- "master");
- if (ret)
- return ret;
- /* next, create a link from the master to the slave */
- sprintf(linkname, "slave_%s", slave->name);
- ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj),
- linkname);
-
- /* free the master link created earlier in case of error */
- if (ret)
- sysfs_remove_link(&(slave->dev.kobj), "master");
-
- return ret;
-
-}
-
-void bond_destroy_slave_symlinks(struct net_device *master,
- struct net_device *slave)
-{
- char linkname[IFNAMSIZ+7];
-
- sysfs_remove_link(&(slave->dev.kobj), "master");
- sprintf(linkname, "slave_%s", slave->name);
- sysfs_remove_link(&(master->dev.kobj), linkname);
-}
-
-
/*
* Show the slaves in the current bond.
*/
@@ -210,11 +166,14 @@ static ssize_t bonding_show_slaves(struct device *d,
struct device_attribute *attr, char *buf)
{
struct bonding *bond = to_bond(d);
+ struct list_head *iter;
struct slave *slave;
int res = 0;
- read_lock(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ bond_for_each_slave(bond, slave, iter) {
if (res > (PAGE_SIZE - IFNAMSIZ)) {
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
@@ -224,7 +183,9 @@ static ssize_t bonding_show_slaves(struct device *d,
}
res += sprintf(buf + res, "%s ", slave->dev->name);
}
- read_unlock(&bond->lock);
+
+ rtnl_unlock();
+
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -313,50 +274,26 @@ static ssize_t bonding_store_mode(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
+ int new_value, ret;
struct bonding *bond = to_bond(d);
- if (!rtnl_trylock())
- return restart_syscall();
-
- if (bond->dev->flags & IFF_UP) {
- pr_err("unable to update mode of %s because interface is up.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
-
- if (!list_empty(&bond->slave_list)) {
- pr_err("unable to update mode of %s because it has slaves.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
-
new_value = bond_parse_parm(buf, bond_mode_tbl);
if (new_value < 0) {
pr_err("%s: Ignoring invalid mode value %.*s.\n",
bond->dev->name, (int)strlen(buf) - 1, buf);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
- if ((new_value == BOND_MODE_ALB ||
- new_value == BOND_MODE_TLB) &&
- bond->params.arp_interval) {
- pr_err("%s: %s mode is incompatible with arp monitoring.\n",
- bond->dev->name, bond_mode_tbl[new_value].modename);
- ret = -EINVAL;
- goto out;
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_mode_set(bond, new_value);
+ if (!ret) {
+ pr_info("%s: setting mode to %s (%d).\n",
+ bond->dev->name, bond_mode_tbl[new_value].modename,
+ new_value);
+ ret = count;
}
- /* don't cache arp_validate between modes */
- bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
- bond->params.mode = new_value;
- bond_set_mode_ops(bond, bond->params.mode);
- pr_info("%s: setting mode to %s (%d).\n",
- bond->dev->name, bond_mode_tbl[new_value].modename,
- new_value);
-out:
rtnl_unlock();
return ret;
}
@@ -392,7 +329,6 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
ret = -EINVAL;
} else {
bond->params.xmit_policy = new_value;
- bond_set_mode_ops(bond, bond->params.mode);
pr_info("%s: setting xmit hash policy to %s (%d).\n",
bond->dev->name,
xmit_hashtype_tbl[new_value].modename, new_value);
@@ -522,7 +458,7 @@ static ssize_t bonding_store_fail_over_mac(struct device *d,
if (!rtnl_trylock())
return restart_syscall();
- if (!list_empty(&bond->slave_list)) {
+ if (bond_has_slaves(bond)) {
pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n",
bond->dev->name);
ret = -EPERM;
@@ -656,11 +592,15 @@ static ssize_t bonding_store_arp_targets(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
+ struct list_head *iter;
struct slave *slave;
__be32 newtarget, *targets;
unsigned long *targets_rx;
int ind, i, j, ret = -EINVAL;
+ if (!rtnl_trylock())
+ return restart_syscall();
+
targets = bond->params.arp_targets;
newtarget = in_aton(buf + 1);
/* look for adds */
@@ -688,7 +628,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
&newtarget);
/* not to race with bond_arp_rcv */
write_lock_bh(&bond->lock);
- bond_for_each_slave(bond, slave)
+ bond_for_each_slave(bond, slave, iter)
slave->target_last_arp_rx[ind] = jiffies;
targets[ind] = newtarget;
write_unlock_bh(&bond->lock);
@@ -714,7 +654,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
&newtarget);
write_lock_bh(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
targets_rx = slave->target_last_arp_rx;
j = ind;
for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++)
@@ -734,6 +674,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
ret = count;
out:
+ rtnl_unlock();
return ret;
}
static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
@@ -1111,6 +1052,7 @@ static ssize_t bonding_store_primary(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
+ struct list_head *iter;
char ifname[IFNAMSIZ];
struct slave *slave;
@@ -1138,7 +1080,7 @@ static ssize_t bonding_store_primary(struct device *d,
goto out;
}
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
pr_info("%s: Setting %s as primary slave.\n",
bond->dev->name, slave->dev->name);
@@ -1268,13 +1210,13 @@ static ssize_t bonding_show_active_slave(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
- struct slave *curr;
+ struct net_device *slave_dev;
int count = 0;
rcu_read_lock();
- curr = rcu_dereference(bond->curr_active_slave);
- if (USES_PRIMARY(bond->params.mode) && curr)
- count = sprintf(buf, "%s\n", curr->dev->name);
+ slave_dev = bond_option_active_slave_get_rcu(bond);
+ if (slave_dev)
+ count = sprintf(buf, "%s\n", slave_dev->name);
rcu_read_unlock();
return count;
@@ -1284,80 +1226,33 @@ static ssize_t bonding_store_active_slave(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct slave *slave, *old_active, *new_active;
+ int ret;
struct bonding *bond = to_bond(d);
char ifname[IFNAMSIZ];
+ struct net_device *dev;
if (!rtnl_trylock())
return restart_syscall();
- old_active = new_active = NULL;
- block_netpoll_tx();
- read_lock(&bond->lock);
- write_lock_bh(&bond->curr_slave_lock);
-
- if (!USES_PRIMARY(bond->params.mode)) {
- pr_info("%s: Unable to change active slave; %s is in mode %d\n",
- bond->dev->name, bond->dev->name, bond->params.mode);
- goto out;
- }
-
sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
-
- /* check to see if we are clearing active */
if (!strlen(ifname) || buf[0] == '\n') {
- pr_info("%s: Clearing current active slave.\n",
- bond->dev->name);
- rcu_assign_pointer(bond->curr_active_slave, NULL);
- bond_select_active_slave(bond);
- goto out;
- }
-
- bond_for_each_slave(bond, slave) {
- if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
- old_active = bond->curr_active_slave;
- new_active = slave;
- if (new_active == old_active) {
- /* do nothing */
- pr_info("%s: %s is already the current"
- " active slave.\n",
- bond->dev->name,
- slave->dev->name);
- goto out;
- } else {
- if ((new_active) &&
- (old_active) &&
- (new_active->link == BOND_LINK_UP) &&
- IS_UP(new_active->dev)) {
- pr_info("%s: Setting %s as active"
- " slave.\n",
- bond->dev->name,
- slave->dev->name);
- bond_change_active_slave(bond,
- new_active);
- } else {
- pr_info("%s: Could not set %s as"
- " active slave; either %s is"
- " down or the link is down.\n",
- bond->dev->name,
- slave->dev->name,
- slave->dev->name);
- }
- goto out;
- }
+ dev = NULL;
+ } else {
+ dev = __dev_get_by_name(dev_net(bond->dev), ifname);
+ if (!dev) {
+ ret = -ENODEV;
+ goto out;
}
}
- pr_info("%s: Unable to set %.*s as active slave.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
- out:
- write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
- unblock_netpoll_tx();
+ ret = bond_option_active_slave_set(bond, dev);
+ if (!ret)
+ ret = count;
+ out:
rtnl_unlock();
- return count;
+ return ret;
}
static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR,
@@ -1493,14 +1388,14 @@ static ssize_t bonding_show_queue_id(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
+ struct list_head *iter;
struct slave *slave;
int res = 0;
if (!rtnl_trylock())
return restart_syscall();
- read_lock(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (res > (PAGE_SIZE - IFNAMSIZ - 6)) {
/* not enough space for another interface_name:queue_id pair */
if ((PAGE_SIZE - res) > 10)
@@ -1511,9 +1406,9 @@ static ssize_t bonding_show_queue_id(struct device *d,
res += sprintf(buf + res, "%s:%d ",
slave->dev->name, slave->queue_id);
}
- read_unlock(&bond->lock);
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
+
rtnl_unlock();
return res;
@@ -1529,6 +1424,7 @@ static ssize_t bonding_store_queue_id(struct device *d,
{
struct slave *slave, *update_slave;
struct bonding *bond = to_bond(d);
+ struct list_head *iter;
u16 qid;
int ret = count;
char *delim;
@@ -1561,11 +1457,9 @@ static ssize_t bonding_store_queue_id(struct device *d,
if (!sdev)
goto err_no_cmd;
- read_lock(&bond->lock);
-
/* Search for thes slave and check for duplicate qids */
update_slave = NULL;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (sdev == slave->dev)
/*
* We don't need to check the matching
@@ -1573,23 +1467,20 @@ static ssize_t bonding_store_queue_id(struct device *d,
*/
update_slave = slave;
else if (qid && qid == slave->queue_id) {
- goto err_no_cmd_unlock;
+ goto err_no_cmd;
}
}
if (!update_slave)
- goto err_no_cmd_unlock;
+ goto err_no_cmd;
/* Actually set the qids for the slave */
update_slave->queue_id = qid;
- read_unlock(&bond->lock);
out:
rtnl_unlock();
return ret;
-err_no_cmd_unlock:
- read_unlock(&bond->lock);
err_no_cmd:
pr_info("invalid input for queue_id set for %s.\n",
bond->dev->name);
@@ -1619,8 +1510,12 @@ static ssize_t bonding_store_slaves_active(struct device *d,
{
struct bonding *bond = to_bond(d);
int new_value, ret = count;
+ struct list_head *iter;
struct slave *slave;
+ if (!rtnl_trylock())
+ return restart_syscall();
+
if (sscanf(buf, "%d", &new_value) != 1) {
pr_err("%s: no all_slaves_active value specified.\n",
bond->dev->name);
@@ -1640,8 +1535,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
goto out;
}
- read_lock(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (!bond_is_active_slave(slave)) {
if (new_value)
slave->inactive = 0;
@@ -1649,8 +1543,8 @@ static ssize_t bonding_store_slaves_active(struct device *d,
slave->inactive = 1;
}
}
- read_unlock(&bond->lock);
out:
+ rtnl_unlock();
return ret;
}
static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
@@ -1787,7 +1681,8 @@ int bond_create_sysfs(struct bond_net *bn)
bn->class_attr_bonding_masters = class_attr_bonding_masters;
sysfs_attr_init(&bn->class_attr_bonding_masters.attr);
- ret = netdev_class_create_file(&bn->class_attr_bonding_masters);
+ ret = netdev_class_create_file_ns(&bn->class_attr_bonding_masters,
+ bn->net);
/*
* Permit multiple loads of the module by ignoring failures to
* create the bonding_masters sysfs file. Bonding devices
@@ -1817,7 +1712,7 @@ int bond_create_sysfs(struct bond_net *bn)
*/
void bond_destroy_sysfs(struct bond_net *bn)
{
- netdev_class_remove_file(&bn->class_attr_bonding_masters);
+ netdev_class_remove_file_ns(&bn->class_attr_bonding_masters, bn->net);
}
/*
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 03cf3fd14490..046a60535e04 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -58,6 +58,11 @@
#define TX_QUEUE_OVERRIDE(mode) \
(((mode) == BOND_MODE_ACTIVEBACKUP) || \
((mode) == BOND_MODE_ROUNDROBIN))
+
+#define BOND_MODE_IS_LB(mode) \
+ (((mode) == BOND_MODE_TLB) || \
+ ((mode) == BOND_MODE_ALB))
+
/*
* Less bad way to call ioctl from within the kernel; this needs to be
* done some other way to get the call out of interrupt context.
@@ -72,63 +77,37 @@
res; })
/* slave list primitives */
-#define bond_to_slave(ptr) list_entry(ptr, struct slave, list)
+#define bond_slave_list(bond) (&(bond)->dev->adj_list.lower)
+
+#define bond_has_slaves(bond) !list_empty(bond_slave_list(bond))
/* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */
#define bond_first_slave(bond) \
- list_first_entry_or_null(&(bond)->slave_list, struct slave, list)
+ (bond_has_slaves(bond) ? \
+ netdev_adjacent_get_private(bond_slave_list(bond)->next) : \
+ NULL)
#define bond_last_slave(bond) \
- (list_empty(&(bond)->slave_list) ? NULL : \
- bond_to_slave((bond)->slave_list.prev))
+ (bond_has_slaves(bond) ? \
+ netdev_adjacent_get_private(bond_slave_list(bond)->prev) : \
+ NULL)
-#define bond_is_first_slave(bond, pos) ((pos)->list.prev == &(bond)->slave_list)
-#define bond_is_last_slave(bond, pos) ((pos)->list.next == &(bond)->slave_list)
-
-/* Since bond_first/last_slave can return NULL, these can return NULL too */
-#define bond_next_slave(bond, pos) \
- (bond_is_last_slave(bond, pos) ? bond_first_slave(bond) : \
- bond_to_slave((pos)->list.next))
-
-#define bond_prev_slave(bond, pos) \
- (bond_is_first_slave(bond, pos) ? bond_last_slave(bond) : \
- bond_to_slave((pos)->list.prev))
-
-/**
- * bond_for_each_slave_from - iterate the slaves list from a starting point
- * @bond: the bond holding this list.
- * @pos: current slave.
- * @cnt: counter for max number of moves
- * @start: starting point.
- *
- * Caller must hold bond->lock
- */
-#define bond_for_each_slave_from(bond, pos, cnt, start) \
- for (cnt = 0, pos = start; pos && cnt < (bond)->slave_cnt; \
- cnt++, pos = bond_next_slave(bond, pos))
+#define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond))
+#define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond))
/**
* bond_for_each_slave - iterate over all slaves
* @bond: the bond holding this list
* @pos: current slave
+ * @iter: list_head * iterator
*
* Caller must hold bond->lock
*/
-#define bond_for_each_slave(bond, pos) \
- list_for_each_entry(pos, &(bond)->slave_list, list)
+#define bond_for_each_slave(bond, pos, iter) \
+ netdev_for_each_lower_private((bond)->dev, pos, iter)
/* Caller must have rcu_read_lock */
-#define bond_for_each_slave_rcu(bond, pos) \
- list_for_each_entry_rcu(pos, &(bond)->slave_list, list)
-
-/**
- * bond_for_each_slave_reverse - iterate in reverse from a given position
- * @bond: the bond holding this list
- * @pos: slave to continue from
- *
- * Caller must hold bond->lock
- */
-#define bond_for_each_slave_continue_reverse(bond, pos) \
- list_for_each_entry_continue_reverse(pos, &(bond)->slave_list, list)
+#define bond_for_each_slave_rcu(bond, pos, iter) \
+ netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
#ifdef CONFIG_NET_POLL_CONTROLLER
extern atomic_t netpoll_block_tx;
@@ -188,7 +167,6 @@ struct bond_parm_tbl {
struct slave {
struct net_device *dev; /* first - useful for panic debug */
- struct list_head list;
struct bonding *bond; /* our master */
int delay;
unsigned long jiffies;
@@ -228,7 +206,6 @@ struct slave {
*/
struct bonding {
struct net_device *dev; /* first - useful for panic debug */
- struct list_head slave_list;
struct slave *curr_active_slave;
struct slave *current_arp_slave;
struct slave *primary_slave;
@@ -245,7 +222,6 @@ struct bonding {
char proc_file_name[IFNAMSIZ];
#endif /* CONFIG_PROC_FS */
struct list_head bond_list;
- int (*xmit_hash_policy)(struct sk_buff *, int);
u16 rr_tx_counter;
struct ad_bond_info ad_info;
struct alb_bond_info alb_info;
@@ -276,13 +252,7 @@ struct bonding {
static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
struct net_device *slave_dev)
{
- struct slave *slave = NULL;
-
- bond_for_each_slave(bond, slave)
- if (slave->dev == slave_dev)
- return slave;
-
- return NULL;
+ return netdev_lower_dev_get_private(bond->dev, slave_dev);
}
static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
@@ -294,8 +264,7 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
static inline bool bond_is_lb(const struct bonding *bond)
{
- return (bond->params.mode == BOND_MODE_TLB ||
- bond->params.mode == BOND_MODE_ALB);
+ return BOND_MODE_IS_LB(bond->params.mode);
}
static inline void bond_set_active_slave(struct slave *slave)
@@ -432,21 +401,18 @@ static inline bool slave_can_tx(struct slave *slave)
struct bond_net;
int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
-struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
int bond_create(struct net *net, const char *name);
int bond_create_sysfs(struct bond_net *net);
void bond_destroy_sysfs(struct bond_net *net);
void bond_prepare_sysfs_group(struct bonding *bond);
-int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave);
-void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave);
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
void bond_mii_monitor(struct work_struct *);
void bond_loadbalance_arp_mon(struct work_struct *);
void bond_activebackup_arp_mon(struct work_struct *);
-void bond_set_mode_ops(struct bonding *bond, int mode);
+int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl);
void bond_select_active_slave(struct bonding *bond);
void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
@@ -456,6 +422,14 @@ void bond_debug_register(struct bonding *bond);
void bond_debug_unregister(struct bonding *bond);
void bond_debug_reregister(struct bonding *bond);
const char *bond_mode_name(int mode);
+void bond_setup(struct net_device *bond_dev);
+unsigned int bond_get_num_tx_queues(void);
+int bond_netlink_init(void);
+void bond_netlink_fini(void);
+int bond_option_mode_set(struct bonding *bond, int mode);
+int bond_option_active_slave_set(struct bonding *bond, struct net_device *slave_dev);
+struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
+struct net_device *bond_option_active_slave_get(struct bonding *bond);
struct bond_net {
struct net * net; /* Associated network namespace */
@@ -492,9 +466,24 @@ static inline void bond_destroy_proc_dir(struct bond_net *bn)
static inline struct slave *bond_slave_has_mac(struct bonding *bond,
const u8 *mac)
{
+ struct list_head *iter;
struct slave *tmp;
- bond_for_each_slave(bond, tmp)
+ bond_for_each_slave(bond, tmp, iter)
+ if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+ return tmp;
+
+ return NULL;
+}
+
+/* Caller must hold rcu_read_lock() for read */
+static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
+ const u8 *mac)
+{
+ struct list_head *iter;
+ struct slave *tmp;
+
+ bond_for_each_slave_rcu(bond, tmp, iter)
if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
return tmp;
@@ -528,4 +517,7 @@ extern const struct bond_parm_tbl fail_over_mac_tbl[];
extern const struct bond_parm_tbl pri_reselect_tbl[];
extern struct bond_parm_tbl ad_select_tbl[];
+/* exported from bond_netlink.c */
+extern struct rtnl_link_ops bond_link_ops;
+
#endif /* _LINUX_BONDING_H */
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index b9ed1288ce2d..985608634f8c 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -686,18 +686,19 @@ static int cfv_probe(struct virtio_device *vdev)
goto err;
/* Get the CAIF configuration from virtio config space, if available */
-#define GET_VIRTIO_CONFIG_OPS(_v, _var, _f) \
- ((_v)->config->get(_v, offsetof(struct virtio_caif_transf_config, _f), \
- &_var, \
- FIELD_SIZEOF(struct virtio_caif_transf_config, _f)))
-
if (vdev->config->get) {
- GET_VIRTIO_CONFIG_OPS(vdev, cfv->tx_hr, headroom);
- GET_VIRTIO_CONFIG_OPS(vdev, cfv->rx_hr, headroom);
- GET_VIRTIO_CONFIG_OPS(vdev, cfv->tx_tr, tailroom);
- GET_VIRTIO_CONFIG_OPS(vdev, cfv->rx_tr, tailroom);
- GET_VIRTIO_CONFIG_OPS(vdev, cfv->mtu, mtu);
- GET_VIRTIO_CONFIG_OPS(vdev, cfv->mru, mtu);
+ virtio_cread(vdev, struct virtio_caif_transf_config, headroom,
+ &cfv->tx_hr);
+ virtio_cread(vdev, struct virtio_caif_transf_config, headroom,
+ &cfv->rx_hr);
+ virtio_cread(vdev, struct virtio_caif_transf_config, tailroom,
+ &cfv->tx_tr);
+ virtio_cread(vdev, struct virtio_caif_transf_config, tailroom,
+ &cfv->rx_tr);
+ virtio_cread(vdev, struct virtio_caif_transf_config, mtu,
+ &cfv->mtu);
+ virtio_cread(vdev, struct virtio_caif_transf_config, mtu,
+ &cfv->mru);
} else {
cfv->tx_hr = CFV_DEF_HEADROOM;
cfv->rx_hr = CFV_DEF_HEADROOM;
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 3b1ff6148702..cf0f63e14e53 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1347,7 +1347,7 @@ static int at91_can_probe(struct platform_device *pdev)
priv->reg_base = addr;
priv->devtype_data = *devtype_data;
priv->clk = clk;
- priv->pdata = pdev->dev.platform_data;
+ priv->pdata = dev_get_platdata(&pdev->dev);
priv->mb0_id = 0x7ff;
netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
@@ -1405,10 +1405,10 @@ static int at91_can_remove(struct platform_device *pdev)
static const struct platform_device_id at91_can_id_table[] = {
{
- .name = "at91_can",
+ .name = "at91sam9x5_can",
.driver_data = (kernel_ulong_t)&at91_at91sam9x5_data,
}, {
- .name = "at91sam9x5_can",
+ .name = "at91_can",
.driver_data = (kernel_ulong_t)&at91_at91sam9263_data,
}, {
/* sentinel */
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index a2700d25ff0e..8a0b515b33ea 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -539,7 +539,7 @@ static int bfin_can_probe(struct platform_device *pdev)
struct resource *res_mem, *rx_irq, *tx_irq, *err_irq;
unsigned short *pdata;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "No platform data provided!\n");
err = -EINVAL;
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index a668cd491cb3..e3fc07cf2f62 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -814,9 +814,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
msg_ctrl_save = priv->read_reg(priv,
C_CAN_IFACE(MSGCTRL_REG, 0));
- if (msg_ctrl_save & IF_MCONT_EOB)
- return num_rx_pkts;
-
if (msg_ctrl_save & IF_MCONT_MSGLST) {
c_can_handle_lost_msg_obj(dev, 0, msg_obj);
num_rx_pkts++;
@@ -824,6 +821,9 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
continue;
}
+ if (msg_ctrl_save & IF_MCONT_EOB)
+ return num_rx_pkts;
+
if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
continue;
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index b374be7891a2..bce0be54c2f5 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -160,7 +160,6 @@ static int c_can_pci_probe(struct pci_dev *pdev,
return 0;
out_free_c_can:
- pci_set_drvdata(pdev, NULL);
free_c_can_dev(dev);
out_iounmap:
pci_iounmap(pdev, addr);
@@ -181,7 +180,6 @@ static void c_can_pci_remove(struct pci_dev *pdev)
unregister_c_can_dev(dev);
- pci_set_drvdata(pdev, NULL);
free_c_can_dev(dev);
pci_iounmap(pdev, priv->base);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 294ced3cc227..d66ac265269c 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -322,7 +322,7 @@ static struct platform_driver c_can_plat_driver = {
.driver = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(c_can_of_table),
+ .of_match_table = c_can_of_table,
},
.probe = c_can_plat_probe,
.remove = c_can_plat_remove,
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 034bdd816a60..ad76734b3ecc 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -152,7 +152,7 @@ static int cc770_get_platform_data(struct platform_device *pdev,
struct cc770_priv *priv)
{
- struct cc770_platform_data *pdata = pdev->dev.platform_data;
+ struct cc770_platform_data *pdata = dev_get_platdata(&pdev->dev);
priv->can.clock.freq = pdata->osc_freq;
if (priv->cpu_interface & CPUIF_DSC)
@@ -203,7 +203,7 @@ static int cc770_platform_probe(struct platform_device *pdev)
if (pdev->dev.of_node)
err = cc770_get_of_node_data(pdev, priv);
- else if (pdev->dev.platform_data)
+ else if (dev_get_platdata(&pdev->dev))
err = cc770_get_platform_data(pdev, priv);
else
err = -ENODEV;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index f9cba4123c66..1870c4731a57 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -705,14 +705,14 @@ static size_t can_get_size(const struct net_device *dev)
size_t size;
size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
- size += sizeof(struct can_ctrlmode); /* IFLA_CAN_CTRLMODE */
+ size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
- size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */
- size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */
+ size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
+ size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
- size += sizeof(struct can_berr_counter);
+ size += nla_total_size(sizeof(struct can_berr_counter));
if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
- size += sizeof(struct can_bittiming_const);
+ size += nla_total_size(sizeof(struct can_bittiming_const));
return size;
}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 3f21142138b7..ae08cf129ebb 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -62,7 +62,7 @@
#define FLEXCAN_MCR_BCC BIT(16)
#define FLEXCAN_MCR_LPRIO_EN BIT(13)
#define FLEXCAN_MCR_AEN BIT(12)
-#define FLEXCAN_MCR_MAXMB(x) ((x) & 0xf)
+#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x1f)
#define FLEXCAN_MCR_IDAM_A (0 << 8)
#define FLEXCAN_MCR_IDAM_B (1 << 8)
#define FLEXCAN_MCR_IDAM_C (2 << 8)
@@ -735,9 +735,11 @@ static int flexcan_chip_start(struct net_device *dev)
*
*/
reg_mcr = flexcan_read(&regs->mcr);
+ reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN |
- FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS;
+ FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS |
+ FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID);
netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
flexcan_write(reg_mcr, &regs->mcr);
@@ -771,6 +773,10 @@ static int flexcan_chip_start(struct net_device *dev)
netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
flexcan_write(reg_ctrl, &regs->ctrl);
+ /* Abort any pending TX, mark Mailbox as INACTIVE */
+ flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
+ &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
+
/* acceptance mask/acceptance code (accept everything) */
flexcan_write(0x0, &regs->rxgmask);
flexcan_write(0x0, &regs->rx14mask);
@@ -979,9 +985,9 @@ static void unregister_flexcandev(struct net_device *dev)
}
static const struct of_device_id flexcan_of_match[] = {
- { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
- { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
+ { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
+ { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, flexcan_of_match);
@@ -1062,7 +1068,7 @@ static int flexcan_probe(struct platform_device *pdev)
priv->dev = dev;
priv->clk_ipg = clk_ipg;
priv->clk_per = clk_per;
- priv->pdata = pdev->dev.platform_data;
+ priv->pdata = dev_get_platdata(&pdev->dev);
priv->devtype_data = devtype_data;
priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 6aa737a24393..ab506d6cab37 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -34,10 +34,7 @@
#include <linux/io.h>
#include <linux/can/dev.h>
#include <linux/spinlock.h>
-
#include <linux/of_platform.h>
-#include <asm/prom.h>
-
#include <linux/of_irq.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 36bd6fa1c7f3..ab5909a7bae9 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1769,7 +1769,7 @@ static int ican3_probe(struct platform_device *pdev)
struct device *dev;
int ret;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata)
return -ENXIO;
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index fe7dd696957e..08ac401e0214 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -999,7 +999,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
{
struct net_device *net;
struct mcp251x_priv *priv;
- struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+ struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
int ret = -ENODEV;
if (!pdata)
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index 9c24d60a23b1..e98abb97a050 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -297,8 +297,8 @@ struct mscan_priv {
struct napi_struct napi;
};
-extern struct net_device *alloc_mscandev(void);
-extern int register_mscandev(struct net_device *dev, int mscan_clksrc);
-extern void unregister_mscandev(struct net_device *dev);
+struct net_device *alloc_mscandev(void);
+int register_mscandev(struct net_device *dev, int mscan_clksrc);
+void unregister_mscandev(struct net_device *dev);
#endif /* __MSCAN_H__ */
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 5c314a961970..5f0e9b3bfa7b 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -964,7 +964,6 @@ static void pch_can_remove(struct pci_dev *pdev)
pci_disable_msi(priv->dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
pch_can_reset(priv);
pci_iounmap(pdev, priv->regs);
free_candev(priv->ndev);
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 3752342a678a..835921388e7b 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -207,7 +207,6 @@ static void ems_pci_del_card(struct pci_dev *pdev)
kfree(card);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static void ems_pci_card_reset(struct ems_pci_card *card)
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 217585b97cd3..087b13bd300e 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -387,7 +387,6 @@ static void kvaser_pci_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static struct pci_driver kvaser_pci_driver = {
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 6b6f0ad75090..065ca49eb45e 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -744,8 +744,6 @@ static void peak_pci_remove(struct pci_dev *pdev)
pci_iounmap(pdev, cfg_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
-
- pci_set_drvdata(pdev, NULL);
}
static struct pci_driver peak_pci_driver = {
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index c52c1e96bf90..f9b4f81cd86a 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -477,7 +477,6 @@ static void plx_pci_del_card(struct pci_dev *pdev)
kfree(card);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
/*
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 31ad33911167..047accd4ede5 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -44,7 +44,6 @@
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <asm/prom.h>
#include "sja1000.h"
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 8e259c541036..29f9b6321187 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -76,7 +76,7 @@ static int sp_probe(struct platform_device *pdev)
struct resource *res_mem, *res_irq;
struct sja1000_platform_data *pdata;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "No platform data provided!\n");
err = -ENODEV;
diff --git a/drivers/net/can/softing/softing.h b/drivers/net/can/softing/softing.h
index afd7d85b6915..35f062282dbd 100644
--- a/drivers/net/can/softing/softing.h
+++ b/drivers/net/can/softing/softing.h
@@ -71,34 +71,34 @@ struct softing {
} id;
};
-extern int softing_default_output(struct net_device *netdev);
+int softing_default_output(struct net_device *netdev);
-extern ktime_t softing_raw2ktime(struct softing *card, u32 raw);
+ktime_t softing_raw2ktime(struct softing *card, u32 raw);
-extern int softing_chip_poweron(struct softing *card);
+int softing_chip_poweron(struct softing *card);
-extern int softing_bootloader_command(struct softing *card, int16_t cmd,
- const char *msg);
+int softing_bootloader_command(struct softing *card, int16_t cmd,
+ const char *msg);
/* Load firmware after reset */
-extern int softing_load_fw(const char *file, struct softing *card,
- __iomem uint8_t *virt, unsigned int size, int offset);
+int softing_load_fw(const char *file, struct softing *card,
+ __iomem uint8_t *virt, unsigned int size, int offset);
/* Load final application firmware after bootloader */
-extern int softing_load_app_fw(const char *file, struct softing *card);
+int softing_load_app_fw(const char *file, struct softing *card);
/*
* enable or disable irq
* only called with fw.lock locked
*/
-extern int softing_enable_irq(struct softing *card, int enable);
+int softing_enable_irq(struct softing *card, int enable);
/* start/stop 1 bus on card */
-extern int softing_startstop(struct net_device *netdev, int up);
+int softing_startstop(struct net_device *netdev, int up);
/* netif_rx() */
-extern int softing_netdev_rx(struct net_device *netdev,
- const struct can_frame *msg, ktime_t ktime);
+int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg,
+ ktime_t ktime);
/* SOFTING DPRAM mappings */
#define DPRAM_RX 0x0000
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 65eef1eea2e2..6cd5c01b624d 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -768,7 +768,7 @@ static int softing_pdev_remove(struct platform_device *pdev)
static int softing_pdev_probe(struct platform_device *pdev)
{
- const struct softing_platform_data *pdat = pdev->dev.platform_data;
+ const struct softing_platform_data *pdat = dev_get_platdata(&pdev->dev);
struct softing *card;
struct net_device *netdev;
struct softing_priv *priv;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 3a349a22d5bc..beb5ef834f0f 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -894,7 +894,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
void __iomem *addr;
int err = -ENODEV;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "No platform data\n");
goto probe_exit;
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 3b9546588240..4b2d5ed62b11 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -1544,9 +1544,9 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
return 0;
}
-static void kvaser_usb_get_endpoints(const struct usb_interface *intf,
- struct usb_endpoint_descriptor **in,
- struct usb_endpoint_descriptor **out)
+static int kvaser_usb_get_endpoints(const struct usb_interface *intf,
+ struct usb_endpoint_descriptor **in,
+ struct usb_endpoint_descriptor **out)
{
const struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
@@ -1557,12 +1557,18 @@ static void kvaser_usb_get_endpoints(const struct usb_interface *intf,
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
- if (usb_endpoint_is_bulk_in(endpoint))
+ if (!*in && usb_endpoint_is_bulk_in(endpoint))
*in = endpoint;
- if (usb_endpoint_is_bulk_out(endpoint))
+ if (!*out && usb_endpoint_is_bulk_out(endpoint))
*out = endpoint;
+
+ /* use first bulk endpoint for in and out */
+ if (*in && *out)
+ return 0;
}
+
+ return -ENODEV;
}
static int kvaser_usb_probe(struct usb_interface *intf,
@@ -1576,8 +1582,8 @@ static int kvaser_usb_probe(struct usb_interface *intf,
if (!dev)
return -ENOMEM;
- kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out);
- if (!dev->bulk_in || !dev->bulk_out) {
+ err = kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out);
+ if (err) {
dev_err(&intf->dev, "Cannot get usb endpoint(s)");
return err;
}
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index f00c76377b44..65b735d4a6ad 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -35,7 +35,7 @@ config EL3
config 3C515
tristate "3c515 ISA \"Fast EtherLink\""
- depends on (ISA || EISA) && ISA_DMA_API
+ depends on ISA && ISA_DMA_API
---help---
If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
network card, say Y and read the Ethernet-HOWTO, available from
@@ -70,7 +70,7 @@ config VORTEX
select MII
---help---
This option enables driver support for a large number of 10Mbps and
- 10/100Mbps EISA, PCI and PCMCIA 3Com network cards:
+ 10/100Mbps EISA, PCI and Cardbus 3Com network cards:
"Vortex" (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI
"Boomerang" (EtherLink XL 3c900 or 3c905) PCI
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 144942f6372b..465cc7108d8a 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -2525,7 +2525,6 @@ typhoon_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_clear_mwi(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h
index ef325ffa1b5a..2923c51bb351 100644
--- a/drivers/net/ethernet/8390/8390.h
+++ b/drivers/net/ethernet/8390/8390.h
@@ -28,42 +28,42 @@ extern int ei_debug;
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
-extern void ei_poll(struct net_device *dev);
-extern void eip_poll(struct net_device *dev);
+void ei_poll(struct net_device *dev);
+void eip_poll(struct net_device *dev);
#endif
/* Without I/O delay - non ISA or later chips */
-extern void NS8390_init(struct net_device *dev, int startp);
-extern int ei_open(struct net_device *dev);
-extern int ei_close(struct net_device *dev);
-extern irqreturn_t ei_interrupt(int irq, void *dev_id);
-extern void ei_tx_timeout(struct net_device *dev);
-extern netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev);
-extern void ei_set_multicast_list(struct net_device *dev);
-extern struct net_device_stats *ei_get_stats(struct net_device *dev);
+void NS8390_init(struct net_device *dev, int startp);
+int ei_open(struct net_device *dev);
+int ei_close(struct net_device *dev);
+irqreturn_t ei_interrupt(int irq, void *dev_id);
+void ei_tx_timeout(struct net_device *dev);
+netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void ei_set_multicast_list(struct net_device *dev);
+struct net_device_stats *ei_get_stats(struct net_device *dev);
extern const struct net_device_ops ei_netdev_ops;
-extern struct net_device *__alloc_ei_netdev(int size);
+struct net_device *__alloc_ei_netdev(int size);
static inline struct net_device *alloc_ei_netdev(void)
{
return __alloc_ei_netdev(0);
}
/* With I/O delay form */
-extern void NS8390p_init(struct net_device *dev, int startp);
-extern int eip_open(struct net_device *dev);
-extern int eip_close(struct net_device *dev);
-extern irqreturn_t eip_interrupt(int irq, void *dev_id);
-extern void eip_tx_timeout(struct net_device *dev);
-extern netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev);
-extern void eip_set_multicast_list(struct net_device *dev);
-extern struct net_device_stats *eip_get_stats(struct net_device *dev);
+void NS8390p_init(struct net_device *dev, int startp);
+int eip_open(struct net_device *dev);
+int eip_close(struct net_device *dev);
+irqreturn_t eip_interrupt(int irq, void *dev_id);
+void eip_tx_timeout(struct net_device *dev);
+netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void eip_set_multicast_list(struct net_device *dev);
+struct net_device_stats *eip_get_stats(struct net_device *dev);
extern const struct net_device_ops eip_netdev_ops;
-extern struct net_device *__alloc_eip_netdev(int size);
+struct net_device *__alloc_eip_netdev(int size);
static inline struct net_device *alloc_eip_netdev(void)
{
return __alloc_eip_netdev(0);
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index becef25fa194..0988811f4e40 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -146,13 +146,6 @@ config PCMCIA_PCNET
To compile this driver as a module, choose M here: the module will be
called pcnet_cs. If unsure, say N.
-config NE_H8300
- tristate "NE2000 compatible support for H8/300"
- depends on H8300H_AKI3068NET || H8300H_H8MAX
- ---help---
- Say Y here if you want to use the NE2000 compatible
- controller on the Renesas H8/300 processor.
-
config STNIC
tristate "National DP83902AV support"
depends on SUPERH
diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile
index 588954a79b2a..ff3b31894188 100644
--- a/drivers/net/ethernet/8390/Makefile
+++ b/drivers/net/ethernet/8390/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_HYDRA) += hydra.o 8390.o
obj-$(CONFIG_MCF8390) += mcf8390.o 8390.o
obj-$(CONFIG_NE2000) += ne.o 8390p.o
obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o
-obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
obj-$(CONFIG_PCMCIA_AXNET) += axnet_cs.o 8390.o
obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o
obj-$(CONFIG_STNIC) += stnic.o 8390.o
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index f92f001551da..36fa577970bb 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -702,7 +702,7 @@ static int ax_init_dev(struct net_device *dev)
for (i = 0; i < 16; i++)
SA_prom[i] = SA_prom[i+i];
- memcpy(dev->dev_addr, SA_prom, 6);
+ memcpy(dev->dev_addr, SA_prom, ETH_ALEN);
}
#ifdef CONFIG_AX88796_93CX6
diff --git a/drivers/net/ethernet/8390/ne-h8300.c b/drivers/net/ethernet/8390/ne-h8300.c
deleted file mode 100644
index 7fc28f2d28a6..000000000000
--- a/drivers/net/ethernet/8390/ne-h8300.c
+++ /dev/null
@@ -1,684 +0,0 @@
-/* ne-h8300.c: A NE2000 clone on H8/300 driver for linux. */
-/*
- original ne.c
- Written 1992-94 by Donald Becker.
-
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
-
- H8/300 modified
- Yoshinori Sato <ysato@users.sourceforge.jp>
-*/
-
-static const char version1[] =
-"ne-h8300.c:v1.00 2004/04/11 ysato\n";
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/jiffies.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#define EI_SHIFT(x) (ei_local->reg_offset[x])
-
-#include "8390.h"
-
-#define DRV_NAME "ne-h8300"
-
-/* Some defines that people can play with if so inclined. */
-
-/* Do we perform extra sanity checks on stuff ? */
-/* #define NE_SANITY_CHECK */
-
-/* Do we implement the read before write bugfix ? */
-/* #define NE_RW_BUGFIX */
-
-/* Do we have a non std. amount of memory? (in units of 256 byte pages) */
-/* #define PACKETBUF_MEMSIZE 0x40 */
-
-/* A zero-terminated list of I/O addresses to be probed at boot. */
-
-/* ---- No user-serviceable parts below ---- */
-
-static const char version[] =
- "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-#include "lib8390.c"
-
-#define NE_BASE (dev->base_addr)
-#define NE_CMD 0x00
-#define NE_DATAPORT (ei_status.word16?0x20:0x10) /* NatSemi-defined port window offset. */
-#define NE_RESET (ei_status.word16?0x3f:0x1f) /* Issue a read to reset, a write to clear. */
-#define NE_IO_EXTENT (ei_status.word16?0x40:0x20)
-
-#define NESM_START_PG 0x40 /* First page of TX buffer */
-#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
-
-static int ne_probe1(struct net_device *dev, int ioaddr);
-
-static int ne_open(struct net_device *dev);
-static int ne_close(struct net_device *dev);
-
-static void ne_reset_8390(struct net_device *dev);
-static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-static void ne_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset);
-static void ne_block_output(struct net_device *dev, const int count,
- const unsigned char *buf, const int start_page);
-
-
-static u32 reg_offset[16];
-
-static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr)
-{
- struct ei_device *ei_local = netdev_priv(dev);
- int i;
- unsigned char bus_width;
-
- bus_width = *(volatile unsigned char *)ABWCR;
- bus_width &= 1 << ((base_addr >> 21) & 7);
-
- for (i = 0; i < ARRAY_SIZE(reg_offset); i++)
- if (bus_width == 0)
- reg_offset[i] = i * 2 + 1;
- else
- reg_offset[i] = i;
-
- ei_local->reg_offset = reg_offset;
- return 0;
-}
-
-static int __initdata h8300_ne_count = 0;
-#ifdef CONFIG_H8300H_H8MAX
-static unsigned long __initdata h8300_ne_base[] = { 0x800600 };
-static int h8300_ne_irq[] = {EXT_IRQ4};
-#endif
-#ifdef CONFIG_H8300H_AKI3068NET
-static unsigned long __initdata h8300_ne_base[] = { 0x200000 };
-static int h8300_ne_irq[] = {EXT_IRQ5};
-#endif
-
-static inline int init_dev(struct net_device *dev)
-{
- if (h8300_ne_count < ARRAY_SIZE(h8300_ne_base)) {
- dev->base_addr = h8300_ne_base[h8300_ne_count];
- dev->irq = h8300_ne_irq[h8300_ne_count];
- h8300_ne_count++;
- return 0;
- } else
- return -ENODEV;
-}
-
-/* Probe for various non-shared-memory ethercards.
-
- NEx000-clone boards have a Station Address PROM (SAPROM) in the packet
- buffer memory space. NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of
- the SAPROM, while other supposed NE2000 clones must be detected by their
- SA prefix.
-
- Reading the SAPROM from a word-wide card with the 8390 set in byte-wide
- mode results in doubled values, which can be detected and compensated for.
-
- The probe is also responsible for initializing the card and filling
- in the 'dev' and 'ei_status' structures.
-
- We use the minimum memory size for some ethercard product lines, iff we can't
- distinguish models. You can increase the packet buffer size by setting
- PACKETBUF_MEMSIZE. Reported Cabletron packet buffer locations are:
- E1010 starts at 0x100 and ends at 0x2000.
- E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory")
- E2010 starts at 0x100 and ends at 0x4000.
- E2010-x starts at 0x100 and ends at 0xffff. */
-
-static int __init do_ne_probe(struct net_device *dev)
-{
- unsigned int base_addr = dev->base_addr;
-
- /* First check any supplied i/o locations. User knows best. <cough> */
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return ne_probe1(dev, base_addr);
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- return -ENODEV;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, NE_IO_EXTENT);
-}
-
-#ifndef MODULE
-struct net_device * __init ne_probe(int unit)
-{
- struct net_device *dev = ____alloc_ei_netdev(0);
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (init_dev(dev))
- return ERR_PTR(-ENODEV);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = init_reg_offset(dev, dev->base_addr);
- if (err)
- goto out;
-
- err = do_ne_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static const struct net_device_ops ne_netdev_ops = {
- .ndo_open = ne_open,
- .ndo_stop = ne_close,
-
- .ndo_start_xmit = __ei_start_xmit,
- .ndo_tx_timeout = __ei_tx_timeout,
- .ndo_get_stats = __ei_get_stats,
- .ndo_set_rx_mode = __ei_set_multicast_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = __ei_poll,
-#endif
-};
-
-static int __init ne_probe1(struct net_device *dev, int ioaddr)
-{
- int i;
- unsigned char SA_prom[16];
- int wordlength = 2;
- const char *name = NULL;
- int start_page, stop_page;
- int reg0, ret;
- static unsigned version_printed;
- struct ei_device *ei_local = netdev_priv(dev);
- unsigned char bus_width;
-
- if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- reg0 = inb_p(ioaddr);
- if (reg0 == 0xFF) {
- ret = -ENODEV;
- goto err_out;
- }
-
- /* Do a preliminary verification that we have a 8390. */
- {
- int regd;
- outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
- regd = inb_p(ioaddr + EI_SHIFT(0x0d));
- outb_p(0xff, ioaddr + EI_SHIFT(0x0d));
- outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
- inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
- if (inb_p(ioaddr + EN0_COUNTER0) != 0) {
- outb_p(reg0, ioaddr + EI_SHIFT(0));
- outb_p(regd, ioaddr + EI_SHIFT(0x0d)); /* Restore the old values. */
- ret = -ENODEV;
- goto err_out;
- }
- }
-
- if (ei_debug && version_printed++ == 0)
- printk(KERN_INFO "%s", version1);
-
- printk(KERN_INFO "NE*000 ethercard probe at %08x:", ioaddr);
-
- /* Read the 16 bytes of station address PROM.
- We must first initialize registers, similar to NS8390_init(eifdev, 0).
- We can't reliably read the SAPROM address without this.
- (I learned the hard way!). */
- {
- struct {unsigned char value, offset; } program_seq[] =
- {
- {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
- {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */
- {0x00, EN0_RCNTLO}, /* Clear the count regs. */
- {0x00, EN0_RCNTHI},
- {0x00, EN0_IMR}, /* Mask completion irq. */
- {0xFF, EN0_ISR},
- {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
- {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
- {32, EN0_RCNTLO},
- {0x00, EN0_RCNTHI},
- {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
- {0x00, EN0_RSARHI},
- {E8390_RREAD+E8390_START, E8390_CMD},
- };
-
- for (i = 0; i < ARRAY_SIZE(program_seq); i++)
- outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
-
- }
- bus_width = *(volatile unsigned char *)ABWCR;
- bus_width &= 1 << ((ioaddr >> 21) & 7);
- ei_status.word16 = (bus_width == 0); /* temporary setting */
- for(i = 0; i < 16 /*sizeof(SA_prom)*/; i++) {
- SA_prom[i] = inb_p(ioaddr + NE_DATAPORT);
- inb_p(ioaddr + NE_DATAPORT); /* dummy read */
- }
-
- start_page = NESM_START_PG;
- stop_page = NESM_STOP_PG;
-
- if (bus_width)
- wordlength = 1;
- else
- outb_p(0x49, ioaddr + EN0_DCFG);
-
- /* Set up the rest of the parameters. */
- name = (wordlength == 2) ? "NE2000" : "NE1000";
-
- if (! dev->irq) {
- printk(" failed to detect IRQ line.\n");
- ret = -EAGAIN;
- goto err_out;
- }
-
- /* Snarf the interrupt now. There's no point in waiting since we cannot
- share and the board will usually be enabled. */
- ret = request_irq(dev->irq, __ei_interrupt, 0, name, dev);
- if (ret) {
- printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
- goto err_out;
- }
-
- dev->base_addr = ioaddr;
-
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = SA_prom[i];
- printk(" %pM\n", dev->dev_addr);
-
- printk("%s: %s found at %#x, using IRQ %d.\n",
- dev->name, name, ioaddr, dev->irq);
-
- ei_status.name = name;
- ei_status.tx_start_page = start_page;
- ei_status.stop_page = stop_page;
- ei_status.word16 = (wordlength == 2);
-
- ei_status.rx_start_page = start_page + TX_PAGES;
-#ifdef PACKETBUF_MEMSIZE
- /* Allow the packet buffer size to be overridden by know-it-alls. */
- ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
-#endif
-
- ei_status.reset_8390 = &ne_reset_8390;
- ei_status.block_input = &ne_block_input;
- ei_status.block_output = &ne_block_output;
- ei_status.get_8390_hdr = &ne_get_8390_hdr;
- ei_status.priv = 0;
-
- dev->netdev_ops = &ne_netdev_ops;
-
- __NS8390_init(dev, 0);
-
- ret = register_netdev(dev);
- if (ret)
- goto out_irq;
- return 0;
-out_irq:
- free_irq(dev->irq, dev);
-err_out:
- release_region(ioaddr, NE_IO_EXTENT);
- return ret;
-}
-
-static int ne_open(struct net_device *dev)
-{
- __ei_open(dev);
- return 0;
-}
-
-static int ne_close(struct net_device *dev)
-{
- if (ei_debug > 1)
- printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
- __ei_close(dev);
- return 0;
-}
-
-/* Hard reset the card. This used to pause for the same period that a
- 8390 reset command required, but that shouldn't be necessary. */
-
-static void ne_reset_8390(struct net_device *dev)
-{
- unsigned long reset_start_time = jiffies;
- struct ei_device *ei_local = netdev_priv(dev);
-
- if (ei_debug > 1)
- printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
-
- /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
- outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
-
- ei_status.txing = 0;
- ei_status.dmaing = 0;
-
- /* This check _should_not_ be necessary, omit eventually. */
- while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
- if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
- printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name);
- break;
- }
- outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
-}
-
-/* Grab the 8390 specific header. Similar to the block_input routine, but
- we don't need to be concerned with ring wrap as the header will be at
- the start of a page, so we optimize accordingly. */
-
-static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- struct ei_device *ei_local = netdev_priv(dev);
- /* This *shouldn't* happen. If it does, it's the last thing you'll see */
-
- if (ei_status.dmaing)
- {
- printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr "
- "[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
- return;
- }
-
- ei_status.dmaing |= 0x01;
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD);
- outb_p(sizeof(struct e8390_pkt_hdr), NE_BASE + EN0_RCNTLO);
- outb_p(0, NE_BASE + EN0_RCNTHI);
- outb_p(0, NE_BASE + EN0_RSARLO); /* On page boundary */
- outb_p(ring_page, NE_BASE + EN0_RSARHI);
- outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD);
-
- if (ei_status.word16) {
- int len;
- unsigned short *p = (unsigned short *)hdr;
- for (len = sizeof(struct e8390_pkt_hdr)>>1; len > 0; len--)
- *p++ = inw(NE_BASE + NE_DATAPORT);
- } else
- insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
-
- outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */
- ei_status.dmaing &= ~0x01;
-
- le16_to_cpus(&hdr->count);
-}
-
-/* Block input and output, similar to the Crynwr packet driver. If you
- are porting to a new ethercard, look at the packet driver source for hints.
- The NEx000 doesn't share the on-board packet memory -- you have to put
- the packet out through the "remote DMA" dataport using outb. */
-
-static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-{
- struct ei_device *ei_local = netdev_priv(dev);
-#ifdef NE_SANITY_CHECK
- int xfer_count = count;
-#endif
- char *buf = skb->data;
-
- /* This *shouldn't* happen. If it does, it's the last thing you'll see */
- if (ei_status.dmaing)
- {
- printk(KERN_EMERG "%s: DMAing conflict in ne_block_input "
- "[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
- return;
- }
- ei_status.dmaing |= 0x01;
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD);
- outb_p(count & 0xff, NE_BASE + EN0_RCNTLO);
- outb_p(count >> 8, NE_BASE + EN0_RCNTHI);
- outb_p(ring_offset & 0xff, NE_BASE + EN0_RSARLO);
- outb_p(ring_offset >> 8, NE_BASE + EN0_RSARHI);
- outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD);
- if (ei_status.word16)
- {
- int len;
- unsigned short *p = (unsigned short *)buf;
- for (len = count>>1; len > 0; len--)
- *p++ = inw(NE_BASE + NE_DATAPORT);
- if (count & 0x01)
- {
- buf[count-1] = inb(NE_BASE + NE_DATAPORT);
-#ifdef NE_SANITY_CHECK
- xfer_count++;
-#endif
- }
- } else {
- insb(NE_BASE + NE_DATAPORT, buf, count);
- }
-
-#ifdef NE_SANITY_CHECK
- /* This was for the ALPHA version only, but enough people have
- been encountering problems so it is still here. If you see
- this message you either 1) have a slightly incompatible clone
- or 2) have noise/speed problems with your bus. */
-
- if (ei_debug > 1)
- {
- /* DMA termination address check... */
- int addr, tries = 20;
- do {
- /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
- -- it's broken for Rx on some cards! */
- int high = inb_p(NE_BASE + EN0_RSARHI);
- int low = inb_p(NE_BASE + EN0_RSARLO);
- addr = (high << 8) + low;
- if (((ring_offset + xfer_count) & 0xff) == low)
- break;
- } while (--tries > 0);
- if (tries <= 0)
- printk(KERN_WARNING "%s: RX transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- dev->name, ring_offset + xfer_count, addr);
- }
-#endif
- outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */
- ei_status.dmaing &= ~0x01;
-}
-
-static void ne_block_output(struct net_device *dev, int count,
- const unsigned char *buf, const int start_page)
-{
- struct ei_device *ei_local = netdev_priv(dev);
- unsigned long dma_start;
-#ifdef NE_SANITY_CHECK
- int retries = 0;
-#endif
-
- /* Round the count up for word writes. Do we need to do this?
- What effect will an odd byte count have on the 8390?
- I should check someday. */
-
- if (ei_status.word16 && (count & 0x01))
- count++;
-
- /* This *shouldn't* happen. If it does, it's the last thing you'll see */
- if (ei_status.dmaing)
- {
- printk(KERN_EMERG "%s: DMAing conflict in ne_block_output."
- "[DMAstat:%d][irqlock:%d]\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
- return;
- }
- ei_status.dmaing |= 0x01;
- /* We should already be in page 0, but to be safe... */
- outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, NE_BASE + NE_CMD);
-
-#ifdef NE_SANITY_CHECK
-retry:
-#endif
-
-#ifdef NE8390_RW_BUGFIX
- /* Handle the read-before-write bug the same way as the
- Crynwr packet driver -- the NatSemi method doesn't work.
- Actually this doesn't always work either, but if you have
- problems with your NEx000 this is better than nothing! */
-
- outb_p(0x42, NE_BASE + EN0_RCNTLO);
- outb_p(0x00, NE_BASE + EN0_RCNTHI);
- outb_p(0x42, NE_BASE + EN0_RSARLO);
- outb_p(0x00, NE_BASE + EN0_RSARHI);
- outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD);
- /* Make certain that the dummy read has occurred. */
- udelay(6);
-#endif
-
- outb_p(ENISR_RDC, NE_BASE + EN0_ISR);
-
- /* Now the normal output. */
- outb_p(count & 0xff, NE_BASE + EN0_RCNTLO);
- outb_p(count >> 8, NE_BASE + EN0_RCNTHI);
- outb_p(0x00, NE_BASE + EN0_RSARLO);
- outb_p(start_page, NE_BASE + EN0_RSARHI);
-
- outb_p(E8390_RWRITE+E8390_START, NE_BASE + NE_CMD);
- if (ei_status.word16) {
- int len;
- unsigned short *p = (unsigned short *)buf;
- for (len = count>>1; len > 0; len--)
- outw(*p++, NE_BASE + NE_DATAPORT);
- } else {
- outsb(NE_BASE + NE_DATAPORT, buf, count);
- }
-
- dma_start = jiffies;
-
-#ifdef NE_SANITY_CHECK
- /* This was for the ALPHA version only, but enough people have
- been encountering problems so it is still here. */
-
- if (ei_debug > 1)
- {
- /* DMA termination address check... */
- int addr, tries = 20;
- do {
- int high = inb_p(NE_BASE + EN0_RSARHI);
- int low = inb_p(NE_BASE + EN0_RSARLO);
- addr = (high << 8) + low;
- if ((start_page << 8) + count == addr)
- break;
- } while (--tries > 0);
-
- if (tries <= 0)
- {
- printk(KERN_WARNING "%s: Tx packet transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- dev->name, (start_page << 8) + count, addr);
- if (retries++ == 0)
- goto retry;
- }
- }
-#endif
-
- while ((inb_p(NE_BASE + EN0_ISR) & ENISR_RDC) == 0)
- if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
- printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
- ne_reset_8390(dev);
- __NS8390_init(dev,1);
- break;
- }
-
- outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */
- ei_status.dmaing &= ~0x01;
-}
-
-
-#ifdef MODULE
-#define MAX_NE_CARDS 1 /* Max number of NE cards per module */
-static struct net_device *dev_ne[MAX_NE_CARDS];
-static int io[MAX_NE_CARDS];
-static int irq[MAX_NE_CARDS];
-static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(bad, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s)");
-MODULE_DESCRIPTION("H8/300 NE2000 Ethernet driver");
-MODULE_LICENSE("GPL");
-
-/* This is set up so that no ISA autoprobe takes place. We can't guarantee
-that the ne2k probe is the last 8390 based probe to take place (as it
-is at boot) and so the probe will get confused by any other 8390 cards.
-ISA device autoprobes on a running machine are not recommended anyway. */
-
-int init_module(void)
-{
- int this_dev, found = 0;
- int err;
-
- for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
- struct net_device *dev = ____alloc_ei_netdev(0);
- if (!dev)
- break;
- if (io[this_dev]) {
- dev->irq = irq[this_dev];
- dev->mem_end = bad[this_dev];
- dev->base_addr = io[this_dev];
- } else {
- dev->base_addr = h8300_ne_base[this_dev];
- dev->irq = h8300_ne_irq[this_dev];
- }
- err = init_reg_offset(dev, dev->base_addr);
- if (!err) {
- if (do_ne_probe(dev) == 0) {
- dev_ne[found++] = dev;
- continue;
- }
- }
- free_netdev(dev);
- if (found)
- break;
- if (io[this_dev] != 0)
- printk(KERN_WARNING "ne.c: No NE*000 card found at i/o = %#x\n", dev->base_addr);
- else
- printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\" value(s) for ISA cards.\n");
- return -ENXIO;
- }
- if (found)
- return 0;
- return -ENODEV;
-}
-
-void cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
- struct net_device *dev = dev_ne[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 92201080e07a..fc14a85e4d5f 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -389,9 +389,7 @@ err_out_free_netdev:
free_netdev (dev);
err_out_free_res:
release_region (ioaddr, NE_IO_EXTENT);
- pci_set_drvdata (pdev, NULL);
return -ENODEV;
-
}
/*
@@ -655,7 +653,6 @@ static void ne2k_pci_remove_one(struct pci_dev *pdev)
release_region(dev->base_addr, NE_IO_EXTENT);
free_netdev(dev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 8b04bfc20cfb..171d73c1d3c2 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -835,7 +835,6 @@ static int starfire_init_one(struct pci_dev *pdev,
return 0;
err_out_cleardev:
- pci_set_drvdata(pdev, NULL);
iounmap(base);
err_out_free_res:
pci_release_regions (pdev);
@@ -2012,7 +2011,6 @@ static void starfire_remove_one(struct pci_dev *pdev)
iounmap(np->base);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev); /* Will also free np!! */
}
diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h
index 7a07ee07906b..6dec86ac97cd 100644
--- a/drivers/net/ethernet/adi/bfin_mac.h
+++ b/drivers/net/ethernet/adi/bfin_mac.h
@@ -104,6 +104,6 @@ struct bfin_mac_local {
#endif
};
-extern int bfin_get_ether_addr(char *addr);
+int bfin_get_ether_addr(char *addr);
#endif
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
index 0a5837b96421..ae33a99bf476 100644
--- a/drivers/net/ethernet/amd/7990.h
+++ b/drivers/net/ethernet/amd/7990.h
@@ -242,13 +242,13 @@ struct lance_private
#define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
/* Now the prototypes we export */
-extern int lance_open(struct net_device *dev);
-extern int lance_close (struct net_device *dev);
-extern int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
-extern void lance_set_multicast (struct net_device *dev);
-extern void lance_tx_timeout(struct net_device *dev);
+int lance_open(struct net_device *dev);
+int lance_close (struct net_device *dev);
+int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
+void lance_set_multicast (struct net_device *dev);
+void lance_tx_timeout(struct net_device *dev);
#ifdef CONFIG_NET_POLL_CONTROLLER
-extern void lance_poll(struct net_device *dev);
+void lance_poll(struct net_device *dev);
#endif
#endif /* ndef _7990_H */
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 1b1429d5d5c2..d042511bdc13 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1711,7 +1711,6 @@ static void amd8111e_remove_one(struct pci_dev *pdev)
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
static void amd8111e_config_ipg(struct net_device* dev)
@@ -1967,7 +1966,6 @@ err_free_reg:
err_disable_pdev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return err;
}
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 10ceca523fc0..e07ce5ff2d48 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -586,10 +586,10 @@ static unsigned long __init lance_probe1( struct net_device *dev,
switch( lp->cardtype ) {
case OLD_RIEBL:
/* No ethernet address! (Set some default address) */
- memcpy( dev->dev_addr, OldRieblDefHwaddr, 6 );
+ memcpy(dev->dev_addr, OldRieblDefHwaddr, ETH_ALEN);
break;
case NEW_RIEBL:
- lp->memcpy_f( dev->dev_addr, RIEBL_HWADDR_ADDR, 6 );
+ lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
break;
case PAM_CARD:
i = IO->eeprom;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 91d52b495848..427c148bb643 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1138,7 +1138,7 @@ static int au1000_probe(struct platform_device *pdev)
aup->phy1_search_mac0 = 1;
} else {
if (is_valid_ether_addr(pd->mac)) {
- memcpy(dev->dev_addr, pd->mac, 6);
+ memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
} else {
/* Set a random MAC since no valid provided by platform_data. */
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 94edc9c6fbbf..57397295887c 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -344,8 +344,8 @@ static void cp_to_buf(const int type, void *to, const void *from, int len)
}
clen = len & 1;
- rtp = tp;
- rfp = fp;
+ rtp = (unsigned char *)tp;
+ rfp = (const unsigned char *)fp;
while (clen--) {
*rtp++ = *rfp++;
}
@@ -372,8 +372,8 @@ static void cp_to_buf(const int type, void *to, const void *from, int len)
* do the rest, if any.
*/
clen = len & 15;
- rtp = (unsigned char *) tp;
- rfp = (unsigned char *) fp;
+ rtp = (unsigned char *)tp;
+ rfp = (const unsigned char *)fp;
while (clen--) {
*rtp++ = *rfp++;
}
@@ -403,8 +403,8 @@ static void cp_from_buf(const int type, void *to, const void *from, int len)
clen = len & 1;
- rtp = tp;
- rfp = fp;
+ rtp = (unsigned char *)tp;
+ rfp = (const unsigned char *)fp;
while (clen--) {
*rtp++ = *rfp++;
@@ -433,8 +433,8 @@ static void cp_from_buf(const int type, void *to, const void *from, int len)
* do the rest, if any.
*/
clen = len & 15;
- rtp = (unsigned char *) tp;
- rfp = (unsigned char *) fp;
+ rtp = (unsigned char *)tp;
+ rfp = (const unsigned char *)fp;
while (clen--) {
*rtp++ = *rfp++;
}
@@ -725,7 +725,6 @@ static irqreturn_t lance_dma_merr_int(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
- clear_ioasic_dma_irq(irq);
printk(KERN_ERR "%s: DMA error\n", dev->name);
return IRQ_HANDLED;
}
@@ -812,7 +811,7 @@ static int lance_open(struct net_device *dev)
if (lp->dma_irq >= 0) {
unsigned long flags;
- if (request_irq(lp->dma_irq, lance_dma_merr_int, 0,
+ if (request_irq(lp->dma_irq, lance_dma_merr_int, IRQF_ONESHOT,
"lance error", dev)) {
free_irq(dev->irq, dev);
printk("%s: Can't get DMA IRQ %d\n", dev->name,
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 5c728436b85e..256f590f6bb1 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -754,7 +754,7 @@ lance_open(struct net_device *dev)
int i;
if (dev->irq == 0 ||
- request_irq(dev->irq, lance_interrupt, 0, lp->name, dev)) {
+ request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) {
return -EAGAIN;
}
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 2d8e28819779..38492e0b704e 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1675,7 +1675,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
pr_cont(" warning: CSR address invalid,\n");
pr_info(" using instead PROM address of");
}
- memcpy(dev->dev_addr, promaddr, 6);
+ memcpy(dev->dev_addr, promaddr, ETH_ALEN);
}
}
@@ -2818,7 +2818,6 @@ static void pcnet32_remove_one(struct pci_dev *pdev)
lp->init_block, lp->init_dma_addr);
free_netdev(dev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index a597b766f080..daae0e016253 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1220,8 +1220,8 @@ static void bmac_reset_and_enable(struct net_device *dev)
if (skb != NULL) {
data = skb_put(skb, ETHERMINPACKET);
memset(data, 0, ETHERMINPACKET);
- memcpy(data, dev->dev_addr, 6);
- memcpy(data+6, dev->dev_addr, 6);
+ memcpy(data, dev->dev_addr, ETH_ALEN);
+ memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
bmac_transmit_packet(skb, dev);
}
spin_unlock_irqrestore(&bp->lock, flags);
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 9e1601487263..70ad8adaa12c 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -628,12 +628,12 @@ static const struct net_device_ops arc_emac_netdev_ops = {
static int arc_emac_probe(struct platform_device *pdev)
{
- struct resource res_regs, res_irq;
+ struct resource res_regs;
struct device_node *phy_node;
struct arc_emac_priv *priv;
struct net_device *ndev;
const char *mac_addr;
- unsigned int id, clock_frequency;
+ unsigned int id, clock_frequency, irq;
int err;
if (!pdev->dev.of_node)
@@ -661,8 +661,8 @@ static int arc_emac_probe(struct platform_device *pdev)
}
/* Get IRQ from device tree */
- err = of_irq_to_resource(pdev->dev.of_node, 0, &res_irq);
- if (!err) {
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (!irq) {
dev_err(&pdev->dev, "failed to retrieve <irq> value from device tree\n");
return -ENODEV;
}
@@ -711,7 +711,7 @@ static int arc_emac_probe(struct platform_device *pdev)
goto out;
}
- ndev->irq = res_irq.start;
+ ndev->irq = irq;
dev_info(&pdev->dev, "IRQ is %d\n", ndev->irq);
/* Register interrupt handler for device */
@@ -725,10 +725,10 @@ static int arc_emac_probe(struct platform_device *pdev)
/* Get MAC address from device tree */
mac_addr = of_get_mac_address(pdev->dev.of_node);
- if (!mac_addr || !is_valid_ether_addr(mac_addr))
- eth_hw_addr_random(ndev);
- else
+ if (mac_addr)
memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ else
+ eth_hw_addr_random(ndev);
dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr);
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index fc95b235e210..5aa5e8146496 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1367,7 +1367,6 @@ static void alx_remove(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(alx->dev);
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index 0f0556526ba9..7f9369a3b378 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -600,7 +600,7 @@ struct atl1c_adapter {
extern char atl1c_driver_name[];
extern char atl1c_driver_version[];
-extern void atl1c_reinit_locked(struct atl1c_adapter *adapter);
-extern s32 atl1c_reset_hw(struct atl1c_hw *hw);
-extern void atl1c_set_ethtool_ops(struct net_device *netdev);
+void atl1c_reinit_locked(struct atl1c_adapter *adapter);
+s32 atl1c_reset_hw(struct atl1c_hw *hw);
+void atl1c_set_ethtool_ops(struct net_device *netdev);
#endif /* _ATL1C_H_ */
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index 3ef7092e3f1c..1cda49a28f7f 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -153,7 +153,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value)
{
int i;
- int ret = false;
+ bool ret = false;
u32 otp_ctrl_data;
u32 control;
u32 data;
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
index b5fd934585e9..1b0fe2d04a0e 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
@@ -499,10 +499,10 @@ struct atl1e_adapter {
extern char atl1e_driver_name[];
extern char atl1e_driver_version[];
-extern void atl1e_check_options(struct atl1e_adapter *adapter);
-extern int atl1e_up(struct atl1e_adapter *adapter);
-extern void atl1e_down(struct atl1e_adapter *adapter);
-extern void atl1e_reinit_locked(struct atl1e_adapter *adapter);
-extern s32 atl1e_reset_hw(struct atl1e_hw *hw);
-extern void atl1e_set_ethtool_ops(struct net_device *netdev);
+void atl1e_check_options(struct atl1e_adapter *adapter);
+int atl1e_up(struct atl1e_adapter *adapter);
+void atl1e_down(struct atl1e_adapter *adapter);
+void atl1e_reinit_locked(struct atl1e_adapter *adapter);
+s32 atl1e_reset_hw(struct atl1e_hw *hw);
+void atl1e_set_ethtool_ops(struct net_device *netdev);
#endif /* _ATL1_E_H_ */
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 1966444590f6..7a73f3a9fcb5 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -313,6 +313,34 @@ static void atl1e_set_multi(struct net_device *netdev)
}
}
+static void __atl1e_rx_mode(netdev_features_t features, u32 *mac_ctrl_data)
+{
+
+ if (features & NETIF_F_RXALL) {
+ /* enable RX of ALL frames */
+ *mac_ctrl_data |= MAC_CTRL_DBG;
+ } else {
+ /* disable RX of ALL frames */
+ *mac_ctrl_data &= ~MAC_CTRL_DBG;
+ }
+}
+
+static void atl1e_rx_mode(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ u32 mac_ctrl_data = 0;
+
+ netdev_dbg(adapter->netdev, "%s\n", __func__);
+
+ atl1e_irq_disable(adapter);
+ mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL);
+ __atl1e_rx_mode(features, &mac_ctrl_data);
+ AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
+ atl1e_irq_enable(adapter);
+}
+
+
static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
{
if (features & NETIF_F_HW_VLAN_CTAG_RX) {
@@ -394,6 +422,10 @@ static int atl1e_set_features(struct net_device *netdev,
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
atl1e_vlan_mode(netdev, features);
+ if (changed & NETIF_F_RXALL)
+ atl1e_rx_mode(netdev, features);
+
+
return 0;
}
@@ -1057,7 +1089,8 @@ static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
value |= MAC_CTRL_PROMIS_EN;
if (netdev->flags & IFF_ALLMULTI)
value |= MAC_CTRL_MC_ALL_EN;
-
+ if (netdev->features & NETIF_F_RXALL)
+ value |= MAC_CTRL_DBG;
AT_WRITE_REG(hw, REG_MAC_CTRL, value);
}
@@ -1405,7 +1438,8 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
rx_page_desc[que].rx_nxseq++;
/* error packet */
- if (prrs->pkt_flag & RRS_IS_ERR_FRAME) {
+ if ((prrs->pkt_flag & RRS_IS_ERR_FRAME) &&
+ !(netdev->features & NETIF_F_RXALL)) {
if (prrs->err_flag & (RRS_ERR_BAD_CRC |
RRS_ERR_DRIBBLE | RRS_ERR_CODE |
RRS_ERR_TRUNC)) {
@@ -1418,7 +1452,10 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
}
packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
- RRS_PKT_SIZE_MASK) - 4; /* CRC */
+ RRS_PKT_SIZE_MASK);
+ if (likely(!(netdev->features & NETIF_F_RXFCS)))
+ packet_size -= 4; /* CRC */
+
skb = netdev_alloc_skb_ip_align(netdev, packet_size);
if (skb == NULL)
goto skip_pkt;
@@ -2245,7 +2282,8 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
NETIF_F_HW_VLAN_CTAG_RX;
netdev->features = netdev->hw_features | NETIF_F_LLTX |
NETIF_F_HW_VLAN_CTAG_TX;
-
+ /* not enabled by default */
+ netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS;
return 0;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.h b/drivers/net/ethernet/atheros/atlx/atl2.h
index 3ebe19f7242b..2f27d4c4c3ad 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.h
+++ b/drivers/net/ethernet/atheros/atlx/atl2.h
@@ -42,7 +42,7 @@
#include "atlx.h"
#ifdef ETHTOOL_OPS_COMPAT
-extern int ethtool_ioctl(struct ifreq *ifr);
+int ethtool_ioctl(struct ifreq *ifr);
#endif
#define PCI_COMMAND_REGISTER PCI_COMMAND
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 9b017d9c58e9..90e54d5488dc 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -596,6 +596,7 @@ static void b44_timer(unsigned long __opaque)
static void b44_tx(struct b44 *bp)
{
u32 cur, cons;
+ unsigned bytes_compl = 0, pkts_compl = 0;
cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
cur /= sizeof(struct dma_desc);
@@ -612,9 +613,14 @@ static void b44_tx(struct b44 *bp)
skb->len,
DMA_TO_DEVICE);
rp->skb = NULL;
+
+ bytes_compl += skb->len;
+ pkts_compl++;
+
dev_kfree_skb_irq(skb);
}
+ netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
bp->tx_cons = cons;
if (netif_queue_stopped(bp->dev) &&
TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
@@ -1018,6 +1024,8 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (bp->flags & B44_FLAG_REORDER_BUG)
br32(bp, B44_DMATX_PTR);
+ netdev_sent_queue(dev, skb->len);
+
if (TX_BUFFS_AVAIL(bp) < 1)
netif_stop_queue(dev);
@@ -1416,6 +1424,8 @@ static void b44_init_hw(struct b44 *bp, int reset_kind)
val = br32(bp, B44_ENET_CTRL);
bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
+
+ netdev_reset_queue(bp->dev);
}
static int b44_open(struct net_device *dev)
@@ -2101,7 +2111,7 @@ static int b44_get_invariants(struct b44 *bp)
* valid PHY address. */
bp->phy_addr &= 0x1F;
- memcpy(bp->dev->dev_addr, addr, 6);
+ memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
pr_err("Invalid MAC address found in EEPROM\n");
@@ -2183,8 +2193,7 @@ static int b44_init_one(struct ssb_device *sdev,
goto err_out_free_dev;
}
- if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
- dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
+ if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
dev_err(sdev->dev,
"Required 30BIT DMA mask unsupported by the system\n");
goto err_out_powerdown;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 249468f95365..e2aa09ce6af7 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -149,6 +149,8 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
dma_desc->ctl0 = cpu_to_le32(ctl0);
dma_desc->ctl1 = cpu_to_le32(ctl1);
+ netdev_sent_queue(net_dev, skb->len);
+
wmb();
/* Increase ring->end to point empty slot. We tell hardware the first
@@ -178,6 +180,7 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
struct device *dma_dev = bgmac->core->dma_dev;
int empty_slot;
bool freed = false;
+ unsigned bytes_compl = 0, pkts_compl = 0;
/* The last slot that hardware didn't consume yet */
empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
@@ -195,6 +198,9 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
slot->skb->len, DMA_TO_DEVICE);
slot->dma_addr = 0;
+ bytes_compl += slot->skb->len;
+ pkts_compl++;
+
/* Free memory! :) */
dev_kfree_skb(slot->skb);
slot->skb = NULL;
@@ -208,6 +214,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
freed = true;
}
+ netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
+
if (freed && netif_queue_stopped(bgmac->net_dev))
netif_wake_queue(bgmac->net_dev);
}
@@ -244,31 +252,59 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
struct bgmac_slot_info *slot)
{
struct device *dma_dev = bgmac->core->dma_dev;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
struct bgmac_rx_header *rx;
/* Alloc skb */
- slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
- if (!slot->skb)
+ skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
+ if (!skb)
return -ENOMEM;
/* Poison - if everything goes fine, hardware will overwrite it */
- rx = (struct bgmac_rx_header *)slot->skb->data;
+ rx = (struct bgmac_rx_header *)skb->data;
rx->len = cpu_to_le16(0xdead);
rx->flags = cpu_to_le16(0xbeef);
/* Map skb for the DMA */
- slot->dma_addr = dma_map_single(dma_dev, slot->skb->data,
- BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(dma_dev, slot->dma_addr)) {
+ dma_addr = dma_map_single(dma_dev, skb->data,
+ BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dma_dev, dma_addr)) {
bgmac_err(bgmac, "DMA mapping error\n");
+ dev_kfree_skb(skb);
return -ENOMEM;
}
+
+ /* Update the slot */
+ slot->skb = skb;
+ slot->dma_addr = dma_addr;
+
if (slot->dma_addr & 0xC0000000)
bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
return 0;
}
+static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring, int desc_idx)
+{
+ struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
+ u32 ctl0 = 0, ctl1 = 0;
+
+ if (desc_idx == ring->num_slots - 1)
+ ctl0 |= BGMAC_DESC_CTL0_EOT;
+ ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
+ /* Is there any BGMAC device that requires extension? */
+ /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
+ * B43_DMA64_DCTL1_ADDREXT_MASK;
+ */
+
+ dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
+ dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
+ dma_desc->ctl0 = cpu_to_le32(ctl0);
+ dma_desc->ctl1 = cpu_to_le32(ctl1);
+}
+
static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
int weight)
{
@@ -287,7 +323,6 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
struct device *dma_dev = bgmac->core->dma_dev;
struct bgmac_slot_info *slot = &ring->slots[ring->start];
struct sk_buff *skb = slot->skb;
- struct sk_buff *new_skb;
struct bgmac_rx_header *rx;
u16 len, flags;
@@ -300,38 +335,51 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
len = le16_to_cpu(rx->len);
flags = le16_to_cpu(rx->flags);
- /* Check for poison and drop or pass the packet */
- if (len == 0xdead && flags == 0xbeef) {
- bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
- ring->start);
- } else {
+ do {
+ dma_addr_t old_dma_addr = slot->dma_addr;
+ int err;
+
+ /* Check for poison and drop or pass the packet */
+ if (len == 0xdead && flags == 0xbeef) {
+ bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
+ ring->start);
+ dma_sync_single_for_device(dma_dev,
+ slot->dma_addr,
+ BGMAC_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ break;
+ }
+
/* Omit CRC. */
len -= ETH_FCS_LEN;
- new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
- if (new_skb) {
- skb_put(new_skb, len);
- skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
- new_skb->data,
- len);
- skb_checksum_none_assert(skb);
- new_skb->protocol =
- eth_type_trans(new_skb, bgmac->net_dev);
- netif_receive_skb(new_skb);
- handled++;
- } else {
- bgmac->net_dev->stats.rx_dropped++;
- bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
+ /* Prepare new skb as replacement */
+ err = bgmac_dma_rx_skb_for_slot(bgmac, slot);
+ if (err) {
+ /* Poison the old skb */
+ rx->len = cpu_to_le16(0xdead);
+ rx->flags = cpu_to_le16(0xbeef);
+
+ dma_sync_single_for_device(dma_dev,
+ slot->dma_addr,
+ BGMAC_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ break;
}
+ bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
- /* Poison the old skb */
- rx->len = cpu_to_le16(0xdead);
- rx->flags = cpu_to_le16(0xbeef);
- }
+ /* Unmap old skb, we'll pass it to the netfif */
+ dma_unmap_single(dma_dev, old_dma_addr,
+ BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+
+ skb_put(skb, BGMAC_RX_FRAME_OFFSET + len);
+ skb_pull(skb, BGMAC_RX_FRAME_OFFSET);
- /* Make it back accessible to the hardware */
- dma_sync_single_for_device(dma_dev, slot->dma_addr,
- BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ skb_checksum_none_assert(skb);
+ skb->protocol = eth_type_trans(skb, bgmac->net_dev);
+ netif_receive_skb(skb);
+ handled++;
+ } while (0);
if (++ring->start >= BGMAC_RX_RING_SLOTS)
ring->start = 0;
@@ -495,8 +543,6 @@ err_dma_free:
static void bgmac_dma_init(struct bgmac *bgmac)
{
struct bgmac_dma_ring *ring;
- struct bgmac_dma_desc *dma_desc;
- u32 ctl0, ctl1;
int i;
for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
@@ -529,23 +575,8 @@ static void bgmac_dma_init(struct bgmac *bgmac)
if (ring->unaligned)
bgmac_dma_rx_enable(bgmac, ring);
- for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
- j++, dma_desc++) {
- ctl0 = ctl1 = 0;
-
- if (j == ring->num_slots - 1)
- ctl0 |= BGMAC_DESC_CTL0_EOT;
- ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
- /* Is there any BGMAC device that requires extension? */
- /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
- * B43_DMA64_DCTL1_ADDREXT_MASK;
- */
-
- dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[j].dma_addr));
- dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[j].dma_addr));
- dma_desc->ctl0 = cpu_to_le32(ctl0);
- dma_desc->ctl1 = cpu_to_le32(ctl1);
- }
+ for (j = 0; j < ring->num_slots; j++)
+ bgmac_dma_rx_setup_desc(bgmac, ring, j);
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
ring->index_base +
@@ -988,6 +1019,8 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
bgmac_miiconfig(bgmac);
bgmac_phy_init(bgmac);
+ netdev_reset_queue(bgmac->net_dev);
+
bgmac->int_status = 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index e838a3f74b69..d9980ad00b4b 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -5761,8 +5761,8 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
if (!skb)
return -ENOMEM;
packet = skb_put(skb, pkt_size);
- memcpy(packet, bp->dev->dev_addr, 6);
- memset(packet + 6, 0x0, 8);
+ memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
+ memset(packet + ETH_ALEN, 0x0, 8);
for (i = 14; i < pkt_size; i++)
packet[i] = (unsigned char) (i & 0xff);
@@ -8413,7 +8413,6 @@ err_out_release:
err_out_disable:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
err_out:
return rc;
@@ -8514,7 +8513,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
- memcpy(dev->dev_addr, bp->mac_addr, 6);
+ memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN |
@@ -8546,7 +8545,6 @@ error:
pci_iounmap(pdev, bp->regview);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
err_free:
free_netdev(dev);
return rc;
@@ -8578,7 +8576,6 @@ bnx2_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static int
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 97b3d32a98bd..4e01c57d8c8d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1197,8 +1197,9 @@ union cdu_context {
/* TM (timers) host DB constants */
#define TM_ILT_PAGE_SZ_HW 0
#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
-/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
-#define TM_CONN_NUM 1024
+#define TM_CONN_NUM (BNX2X_FIRST_VF_CID + \
+ BNX2X_VF_CIDS + \
+ CNIC_ISCSI_CID_MAX)
#define TM_ILT_SZ (8 * TM_CONN_NUM)
#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
@@ -1527,7 +1528,6 @@ struct bnx2x {
#define PCI_32BIT_FLAG (1 << 1)
#define ONE_PORT_FLAG (1 << 2)
#define NO_WOL_FLAG (1 << 3)
-#define USING_DAC_FLAG (1 << 4)
#define USING_MSIX_FLAG (1 << 5)
#define USING_MSI_FLAG (1 << 6)
#define DISABLE_MSI_FLAG (1 << 7)
@@ -1546,6 +1546,7 @@ struct bnx2x {
#define IS_VF_FLAG (1 << 22)
#define INTERRUPTS_ENABLED_FLAG (1 << 23)
#define BC_SUPPORTS_RMMOD_CMD (1 << 24)
+#define HAS_PHYS_PORT_ID (1 << 25)
#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
@@ -1621,7 +1622,7 @@ struct bnx2x {
u16 rx_ticks_int;
u16 rx_ticks;
/* Maximal coalescing timeout in us */
-#define BNX2X_MAX_COALESCE_TOUT (0xf0*12)
+#define BNX2X_MAX_COALESCE_TOUT (0xff*BNX2X_BTR)
u32 lin_cnt;
@@ -1876,6 +1877,8 @@ struct bnx2x {
u32 dump_preset_idx;
bool stats_started;
struct semaphore stats_sema;
+
+ u8 phys_port_id[ETH_ALEN];
};
/* Tx queues may be less or equal to Rx queues */
@@ -2072,7 +2075,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
u8 src_type, u8 dst_type);
-int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae);
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+ u32 *comp);
/* FLR related routines */
u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp);
@@ -2231,7 +2235,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define BNX2X_NUM_TESTS_SF 7
#define BNX2X_NUM_TESTS_MF 3
#define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
- BNX2X_NUM_TESTS_SF)
+ IS_VF(bp) ? 0 : BNX2X_NUM_TESTS_SF)
#define BNX2X_PHY_LOOPBACK 0
#define BNX2X_MAC_LOOPBACK 1
@@ -2491,11 +2495,9 @@ enum {
#define NUM_MACS 8
-enum bnx2x_pci_bus_speed {
- BNX2X_PCI_LINK_SPEED_2500 = 2500,
- BNX2X_PCI_LINK_SPEED_5000 = 5000,
- BNX2X_PCI_LINK_SPEED_8000 = 8000
-};
-
void bnx2x_set_local_cmng(struct bnx2x *bp);
+
+#define MCPR_SCRATCH_BASE(bp) \
+ (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e66beff2704d..dcafbda3e5be 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -681,6 +681,7 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
}
#endif
+ skb_record_rx_queue(skb, fp->rx_queue);
napi_gro_receive(&fp->napi, skb);
}
@@ -2544,10 +2545,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
}
- /* Allocated memory for FW statistics */
- if (bnx2x_alloc_fw_stats_mem(bp))
- LOAD_ERROR_EXIT(bp, load_error0);
-
/* need to be done after alloc mem, since it's self adjusting to amount
* of memory available for RSS queues
*/
@@ -2557,6 +2554,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
LOAD_ERROR_EXIT(bp, load_error0);
}
+ /* Allocated memory for FW statistics */
+ if (bnx2x_alloc_fw_stats_mem(bp))
+ LOAD_ERROR_EXIT(bp, load_error0);
+
/* request pf to initialize status blocks */
if (IS_VF(bp)) {
rc = bnx2x_vfpf_init(bp);
@@ -2811,8 +2812,8 @@ load_error1:
if (IS_PF(bp))
bnx2x_clear_pf_load(bp);
load_error0:
- bnx2x_free_fp_mem(bp);
bnx2x_free_fw_stats_mem(bp);
+ bnx2x_free_fp_mem(bp);
bnx2x_free_mem(bp);
return rc;
@@ -3255,14 +3256,16 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
if (prot == IPPROTO_TCP)
rc |= XMIT_CSUM_TCP;
- if (skb_is_gso_v6(skb)) {
- rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
- if (rc & XMIT_CSUM_ENC)
- rc |= XMIT_GSO_ENC_V6;
- } else if (skb_is_gso(skb)) {
- rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
- if (rc & XMIT_CSUM_ENC)
- rc |= XMIT_GSO_ENC_V4;
+ if (skb_is_gso(skb)) {
+ if (skb_is_gso_v6(skb)) {
+ rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V6;
+ } else {
+ rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V4;
+ }
}
return rc;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 324de5f05332..32d0f1435fb4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -639,6 +639,9 @@ static int bnx2x_get_regs_len(struct net_device *dev)
struct bnx2x *bp = netdev_priv(dev);
int regdump_len = 0;
+ if (IS_VF(bp))
+ return 0;
+
regdump_len = __bnx2x_get_regs_len(bp);
regdump_len *= 4;
regdump_len += sizeof(struct dump_header);
@@ -891,17 +894,8 @@ static void bnx2x_get_regs(struct net_device *dev,
* will re-enable parity attentions right after the dump.
*/
- /* Disable parity on path 0 */
- bnx2x_pretend_func(bp, 0);
bnx2x_disable_blocks_parity(bp);
- /* Disable parity on path 1 */
- bnx2x_pretend_func(bp, 1);
- bnx2x_disable_blocks_parity(bp);
-
- /* Return to current function */
- bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
-
dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
dump_hdr.preset = DUMP_ALL_PRESETS;
dump_hdr.version = BNX2X_DUMP_VERSION;
@@ -928,18 +922,9 @@ static void bnx2x_get_regs(struct net_device *dev,
/* Actually read the registers */
__bnx2x_get_regs(bp, p);
- /* Re-enable parity attentions on path 0 */
- bnx2x_pretend_func(bp, 0);
+ /* Re-enable parity attentions */
bnx2x_clear_blocks_parity(bp);
bnx2x_enable_blocks_parity(bp);
-
- /* Re-enable parity attentions on path 1 */
- bnx2x_pretend_func(bp, 1);
- bnx2x_clear_blocks_parity(bp);
- bnx2x_enable_blocks_parity(bp);
-
- /* Return to current function */
- bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
}
static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset)
@@ -993,17 +978,8 @@ static int bnx2x_get_dump_data(struct net_device *dev,
* will re-enable parity attentions right after the dump.
*/
- /* Disable parity on path 0 */
- bnx2x_pretend_func(bp, 0);
bnx2x_disable_blocks_parity(bp);
- /* Disable parity on path 1 */
- bnx2x_pretend_func(bp, 1);
- bnx2x_disable_blocks_parity(bp);
-
- /* Return to current function */
- bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
-
dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
dump_hdr.preset = bp->dump_preset_idx;
dump_hdr.version = BNX2X_DUMP_VERSION;
@@ -1032,19 +1008,10 @@ static int bnx2x_get_dump_data(struct net_device *dev,
/* Actually read the registers */
__bnx2x_get_preset_regs(bp, p, dump_hdr.preset);
- /* Re-enable parity attentions on path 0 */
- bnx2x_pretend_func(bp, 0);
- bnx2x_clear_blocks_parity(bp);
- bnx2x_enable_blocks_parity(bp);
-
- /* Re-enable parity attentions on path 1 */
- bnx2x_pretend_func(bp, 1);
+ /* Re-enable parity attentions */
bnx2x_clear_blocks_parity(bp);
bnx2x_enable_blocks_parity(bp);
- /* Return to current function */
- bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
-
return 0;
}
@@ -2900,9 +2867,16 @@ static void bnx2x_self_test(struct net_device *dev,
memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
+ if (bnx2x_test_nvram(bp) != 0) {
+ if (!IS_MF(bp))
+ buf[4] = 1;
+ else
+ buf[0] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
if (!netif_running(dev)) {
- DP(BNX2X_MSG_ETHTOOL,
- "Can't perform self-test when interface is down\n");
+ DP(BNX2X_MSG_ETHTOOL, "Interface is down\n");
return;
}
@@ -2964,13 +2938,7 @@ static void bnx2x_self_test(struct net_device *dev,
/* wait until link state is restored */
bnx2x_wait_for_link(bp, link_up, is_serdes);
}
- if (bnx2x_test_nvram(bp) != 0) {
- if (!IS_MF(bp))
- buf[4] = 1;
- else
- buf[0] = 1;
- etest->flags |= ETH_TEST_FL_FAILED;
- }
+
if (bnx2x_test_intr(bp) != 0) {
if (!IS_MF(bp))
buf[5] = 1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 32767f6aa33f..cf1df8b62e2c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -172,6 +172,7 @@ struct shared_hw_cfg { /* NVRAM Offset */
#define SHARED_HW_CFG_LED_MAC4 0x000c0000
#define SHARED_HW_CFG_LED_PHY8 0x000d0000
#define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000
+ #define SHARED_HW_CFG_LED_EXTPHY2 0x000f0000
#define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 76df015f486a..c2dfea7968f4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -640,23 +640,35 @@ static const struct {
* [30] MCP Latched ump_tx_parity
* [31] MCP Latched scpad_parity
*/
-#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
+#define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS \
(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
- AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
+
+#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
+ (MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
/* Below registers control the MCP parity attention output. When
* MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
* enabled, when cleared - disabled.
*/
-static const u32 mcp_attn_ctl_regs[] = {
- MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
- MISC_REG_AEU_ENABLE4_NIG_0,
- MISC_REG_AEU_ENABLE4_PXP_0,
- MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
- MISC_REG_AEU_ENABLE4_NIG_1,
- MISC_REG_AEU_ENABLE4_PXP_1
+static const struct {
+ u32 addr;
+ u32 bits;
+} mcp_attn_ctl_regs[] = {
+ { MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
+ MISC_AEU_ENABLE_MCP_PRTY_BITS },
+ { MISC_REG_AEU_ENABLE4_NIG_0,
+ MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+ { MISC_REG_AEU_ENABLE4_PXP_0,
+ MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+ { MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
+ MISC_AEU_ENABLE_MCP_PRTY_BITS },
+ { MISC_REG_AEU_ENABLE4_NIG_1,
+ MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+ { MISC_REG_AEU_ENABLE4_PXP_1,
+ MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }
};
static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
@@ -665,14 +677,14 @@ static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
u32 reg_val;
for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
- reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
+ reg_val = REG_RD(bp, mcp_attn_ctl_regs[i].addr);
if (enable)
- reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
+ reg_val |= mcp_attn_ctl_regs[i].bits;
else
- reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
+ reg_val &= ~mcp_attn_ctl_regs[i].bits;
- REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
+ REG_WR(bp, mcp_attn_ctl_regs[i].addr, reg_val);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 51468227bf3b..20dcc02431ca 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -3122,7 +3122,7 @@ static void bnx2x_bsc_module_sel(struct link_params *params)
}
static int bnx2x_bsc_read(struct link_params *params,
- struct bnx2x_phy *phy,
+ struct bnx2x *bp,
u8 sl_devid,
u16 sl_addr,
u8 lc_addr,
@@ -3131,7 +3131,6 @@ static int bnx2x_bsc_read(struct link_params *params,
{
u32 val, i;
int rc = 0;
- struct bnx2x *bp = params->bp;
if (xfer_cnt > 16) {
DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
@@ -6371,9 +6370,15 @@ int bnx2x_set_led(struct link_params *params,
* intended override.
*/
break;
- } else
+ } else {
+ u32 nig_led_mode = ((params->hw_led_mode <<
+ SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY2) ?
+ (SHARED_HW_CFG_LED_PHY1 >>
+ SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode;
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
- hw_led_mode);
+ nig_led_mode);
+ }
REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
/* Set blinking rate to ~15.9Hz */
@@ -7917,7 +7922,7 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
usleep_range(1000, 2000);
bnx2x_warpcore_power_module(params, 1);
}
- rc = bnx2x_bsc_read(params, phy, dev_addr, addr32, 0, byte_cnt,
+ rc = bnx2x_bsc_read(params, bp, dev_addr, addr32, 0, byte_cnt,
data_array);
} while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
@@ -10653,10 +10658,18 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
0x40);
} else {
+ /* EXTPHY2 LED mode indicate that the 100M/1G/10G LED
+ * sources are all wired through LED1, rather than only
+ * 10G in other modes.
+ */
+ val = ((params->hw_led_mode <<
+ SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY2) ? 0x98 : 0x80;
+
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LED1_MASK,
- 0x80);
+ val);
/* Tell LED3 to blink on source */
bnx2x_cl45_read(bp, phy,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 82b658d8c04c..e622cc1f96ff 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -503,9 +503,9 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
}
/* issue a dmae command over the init-channel and wait for completion */
-int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+ u32 *comp)
{
- u32 *wb_comp = bnx2x_sp(bp, wb_comp);
int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
int rc = 0;
@@ -518,14 +518,14 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
spin_lock_bh(&bp->dmae_lock);
/* reset completion */
- *wb_comp = 0;
+ *comp = 0;
/* post the command on the channel used for initializations */
bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
/* wait for completion */
udelay(5);
- while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
+ while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
if (!cnt ||
(bp->recovery_state != BNX2X_RECOVERY_DONE &&
@@ -537,7 +537,7 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
cnt--;
udelay(50);
}
- if (*wb_comp & DMAE_PCI_ERR_FLAG) {
+ if (*comp & DMAE_PCI_ERR_FLAG) {
BNX2X_ERR("DMAE PCI error!\n");
rc = DMAE_PCI_ERROR;
}
@@ -574,7 +574,7 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
dmae.len = len32;
/* issue the command and wait for completion */
- rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
+ rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
if (rc) {
BNX2X_ERR("DMAE returned failure %d\n", rc);
bnx2x_panic();
@@ -611,7 +611,7 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
dmae.len = len32;
/* issue the command and wait for completion */
- rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
+ rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
if (rc) {
BNX2X_ERR("DMAE returned failure %d\n", rc);
bnx2x_panic();
@@ -751,6 +751,10 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
return rc;
}
+#define MCPR_TRACE_BUFFER_SIZE (0x800)
+#define SCRATCH_BUFFER_SIZE(bp) \
+ (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
+
void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
{
u32 addr, val;
@@ -775,7 +779,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
trace_shmem_base = bp->common.shmem_base;
else
trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
- addr = trace_shmem_base - 0x800;
+
+ /* sanity */
+ if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
+ trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
+ SCRATCH_BUFFER_SIZE(bp)) {
+ BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
+ trace_shmem_base);
+ return;
+ }
+
+ addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
/* validate TRCB signature */
mark = REG_RD(bp, addr);
@@ -787,14 +801,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
/* read cyclic buffer pointer */
addr += 4;
mark = REG_RD(bp, addr);
- mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
- + ((mark + 0x3) & ~0x3) - 0x08000000;
+ mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
+ if (mark >= trace_shmem_base || mark < addr + 4) {
+ BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
+ return;
+ }
printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
printk("%s", lvl);
/* dump buffer after the mark */
- for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
+ for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
for (word = 0; word < 8; word++)
data[word] = htonl(REG_RD(bp, offset + 4*word));
data[8] = 0x0;
@@ -4280,65 +4297,60 @@ static void _print_next_block(int idx, const char *blk)
pr_cont("%s%s", idx ? ", " : "", blk);
}
-static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
- int par_num, bool print)
+static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
+ int *par_num, bool print)
{
- int i = 0;
- u32 cur_bit = 0;
+ u32 cur_bit;
+ bool res;
+ int i;
+
+ res = false;
+
for (i = 0; sig; i++) {
- cur_bit = ((u32)0x1 << i);
+ cur_bit = (0x1UL << i);
if (sig & cur_bit) {
- switch (cur_bit) {
- case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "BRB");
+ res |= true; /* Each bit is real error! */
+
+ if (print) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
+ _print_next_block((*par_num)++, "BRB");
_print_parity(bp,
BRB1_REG_BRB1_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "PARSER");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "PARSER");
_print_parity(bp, PRS_REG_PRS_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "TSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
+ _print_next_block((*par_num)++, "TSDM");
_print_parity(bp,
TSDM_REG_TSDM_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++,
+ break;
+ case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
+ _print_next_block((*par_num)++,
"SEARCHER");
_print_parity(bp, SRC_REG_SRC_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "TCM");
- _print_parity(bp,
- TCM_REG_TCM_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "TSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
+ _print_next_block((*par_num)++, "TCM");
+ _print_parity(bp, TCM_REG_TCM_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "TSEMI");
_print_parity(bp,
TSEM_REG_TSEM_PRTY_STS_0);
_print_parity(bp,
TSEM_REG_TSEM_PRTY_STS_1);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "XPB");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+ _print_next_block((*par_num)++, "XPB");
_print_parity(bp, GRCBASE_XPB +
PB_REG_PB_PRTY_STS);
+ break;
}
- break;
}
/* Clear the bit */
@@ -4346,53 +4358,59 @@ static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
}
}
- return par_num;
+ return res;
}
-static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
- int par_num, bool *global,
+static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
+ int *par_num, bool *global,
bool print)
{
- int i = 0;
- u32 cur_bit = 0;
+ u32 cur_bit;
+ bool res;
+ int i;
+
+ res = false;
+
for (i = 0; sig; i++) {
- cur_bit = ((u32)0x1 << i);
+ cur_bit = (0x1UL << i);
if (sig & cur_bit) {
+ res |= true; /* Each bit is real error! */
switch (cur_bit) {
case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "PBF");
+ _print_next_block((*par_num)++, "PBF");
_print_parity(bp, PBF_REG_PBF_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "QM");
+ _print_next_block((*par_num)++, "QM");
_print_parity(bp, QM_REG_QM_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "TM");
+ _print_next_block((*par_num)++, "TM");
_print_parity(bp, TM_REG_TM_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "XSDM");
+ _print_next_block((*par_num)++, "XSDM");
_print_parity(bp,
XSDM_REG_XSDM_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "XCM");
+ _print_next_block((*par_num)++, "XCM");
_print_parity(bp, XCM_REG_XCM_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "XSEMI");
+ _print_next_block((*par_num)++,
+ "XSEMI");
_print_parity(bp,
XSEM_REG_XSEM_PRTY_STS_0);
_print_parity(bp,
@@ -4401,7 +4419,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
break;
case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++,
+ _print_next_block((*par_num)++,
"DOORBELLQ");
_print_parity(bp,
DORQ_REG_DORQ_PRTY_STS);
@@ -4409,7 +4427,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
break;
case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "NIG");
+ _print_next_block((*par_num)++, "NIG");
if (CHIP_IS_E1x(bp)) {
_print_parity(bp,
NIG_REG_NIG_PRTY_STS);
@@ -4423,32 +4441,34 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
break;
case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
if (print)
- _print_next_block(par_num++,
+ _print_next_block((*par_num)++,
"VAUX PCI CORE");
*global = true;
break;
case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "DEBUG");
+ _print_next_block((*par_num)++,
+ "DEBUG");
_print_parity(bp, DBG_REG_DBG_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "USDM");
+ _print_next_block((*par_num)++, "USDM");
_print_parity(bp,
USDM_REG_USDM_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "UCM");
+ _print_next_block((*par_num)++, "UCM");
_print_parity(bp, UCM_REG_UCM_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "USEMI");
+ _print_next_block((*par_num)++,
+ "USEMI");
_print_parity(bp,
USEM_REG_USEM_PRTY_STS_0);
_print_parity(bp,
@@ -4457,21 +4477,21 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
break;
case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "UPB");
+ _print_next_block((*par_num)++, "UPB");
_print_parity(bp, GRCBASE_UPB +
PB_REG_PB_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "CSDM");
+ _print_next_block((*par_num)++, "CSDM");
_print_parity(bp,
CSDM_REG_CSDM_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "CCM");
+ _print_next_block((*par_num)++, "CCM");
_print_parity(bp, CCM_REG_CCM_PRTY_STS);
}
break;
@@ -4482,80 +4502,73 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
}
}
- return par_num;
+ return res;
}
-static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
- int par_num, bool print)
+static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
+ int *par_num, bool print)
{
- int i = 0;
- u32 cur_bit = 0;
+ u32 cur_bit;
+ bool res;
+ int i;
+
+ res = false;
+
for (i = 0; sig; i++) {
- cur_bit = ((u32)0x1 << i);
+ cur_bit = (0x1UL << i);
if (sig & cur_bit) {
- switch (cur_bit) {
- case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "CSEMI");
+ res |= true; /* Each bit is real error! */
+ if (print) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "CSEMI");
_print_parity(bp,
CSEM_REG_CSEM_PRTY_STS_0);
_print_parity(bp,
CSEM_REG_CSEM_PRTY_STS_1);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "PXP");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
+ _print_next_block((*par_num)++, "PXP");
_print_parity(bp, PXP_REG_PXP_PRTY_STS);
_print_parity(bp,
PXP2_REG_PXP2_PRTY_STS_0);
_print_parity(bp,
PXP2_REG_PXP2_PRTY_STS_1);
- }
- break;
- case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++,
- "PXPPCICLOCKCLIENT");
- break;
- case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "CFC");
+ break;
+ case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "PXPPCICLOCKCLIENT");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
+ _print_next_block((*par_num)++, "CFC");
_print_parity(bp,
CFC_REG_CFC_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "CDU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
+ _print_next_block((*par_num)++, "CDU");
_print_parity(bp, CDU_REG_CDU_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "DMAE");
+ break;
+ case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
+ _print_next_block((*par_num)++, "DMAE");
_print_parity(bp,
DMAE_REG_DMAE_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "IGU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
+ _print_next_block((*par_num)++, "IGU");
if (CHIP_IS_E1x(bp))
_print_parity(bp,
HC_REG_HC_PRTY_STS);
else
_print_parity(bp,
IGU_REG_IGU_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "MISC");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
+ _print_next_block((*par_num)++, "MISC");
_print_parity(bp,
MISC_REG_MISC_PRTY_STS);
+ break;
}
- break;
}
/* Clear the bit */
@@ -4563,40 +4576,49 @@ static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
}
}
- return par_num;
+ return res;
}
-static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
- bool *global, bool print)
+static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
+ int *par_num, bool *global,
+ bool print)
{
- int i = 0;
- u32 cur_bit = 0;
+ bool res = false;
+ u32 cur_bit;
+ int i;
+
for (i = 0; sig; i++) {
- cur_bit = ((u32)0x1 << i);
+ cur_bit = (0x1UL << i);
if (sig & cur_bit) {
switch (cur_bit) {
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
if (print)
- _print_next_block(par_num++, "MCP ROM");
+ _print_next_block((*par_num)++,
+ "MCP ROM");
*global = true;
+ res |= true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
if (print)
- _print_next_block(par_num++,
+ _print_next_block((*par_num)++,
"MCP UMP RX");
*global = true;
+ res |= true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
if (print)
- _print_next_block(par_num++,
+ _print_next_block((*par_num)++,
"MCP UMP TX");
*global = true;
+ res |= true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
if (print)
- _print_next_block(par_num++,
+ _print_next_block((*par_num)++,
"MCP SCPAD");
- *global = true;
+ /* clear latched SCPAD PATIRY from MCP */
+ REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
+ 1UL << 10);
break;
}
@@ -4605,45 +4627,50 @@ static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
}
}
- return par_num;
+ return res;
}
-static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
- int par_num, bool print)
+static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
+ int *par_num, bool print)
{
- int i = 0;
- u32 cur_bit = 0;
+ u32 cur_bit;
+ bool res;
+ int i;
+
+ res = false;
+
for (i = 0; sig; i++) {
- cur_bit = ((u32)0x1 << i);
+ cur_bit = (0x1UL << i);
if (sig & cur_bit) {
- switch (cur_bit) {
- case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "PGLUE_B");
+ res |= true; /* Each bit is real error! */
+ if (print) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "PGLUE_B");
_print_parity(bp,
- PGLUE_B_REG_PGLUE_B_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "ATC");
+ PGLUE_B_REG_PGLUE_B_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
+ _print_next_block((*par_num)++, "ATC");
_print_parity(bp,
ATC_REG_ATC_PRTY_STS);
+ break;
}
- break;
}
-
/* Clear the bit */
sig &= ~cur_bit;
}
}
- return par_num;
+ return res;
}
static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
u32 *sig)
{
+ bool res = false;
+
if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
(sig[1] & HW_PRTY_ASSERT_SET_1) ||
(sig[2] & HW_PRTY_ASSERT_SET_2) ||
@@ -4660,23 +4687,22 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
if (print)
netdev_err(bp->dev,
"Parity errors detected in blocks: ");
- par_num = bnx2x_check_blocks_with_parity0(bp,
- sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
- par_num = bnx2x_check_blocks_with_parity1(bp,
- sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
- par_num = bnx2x_check_blocks_with_parity2(bp,
- sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
- par_num = bnx2x_check_blocks_with_parity3(
- sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
- par_num = bnx2x_check_blocks_with_parity4(bp,
- sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
+ res |= bnx2x_check_blocks_with_parity0(bp,
+ sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
+ res |= bnx2x_check_blocks_with_parity1(bp,
+ sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
+ res |= bnx2x_check_blocks_with_parity2(bp,
+ sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
+ res |= bnx2x_check_blocks_with_parity3(bp,
+ sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
+ res |= bnx2x_check_blocks_with_parity4(bp,
+ sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
if (print)
pr_cont("\n");
+ }
- return true;
- } else
- return false;
+ return res;
}
/**
@@ -7126,7 +7152,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
int port = BP_PORT(bp);
int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
u32 low, high;
- u32 val;
+ u32 val, reg;
DP(NETIF_MSG_HW, "starting port init port %d\n", port);
@@ -7271,6 +7297,17 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
val |= CHIP_IS_E1(bp) ? 0 : 0x10;
REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
+ /* SCPAD_PARITY should NOT trigger close the gates */
+ reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
+ REG_WR(bp, reg,
+ REG_RD(bp, reg) &
+ ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
+
+ reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
+ REG_WR(bp, reg,
+ REG_RD(bp, reg) &
+ ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
+
bnx2x_init_block(bp, BLOCK_NIG, init_phase);
if (!CHIP_IS_E1x(bp)) {
@@ -9879,7 +9916,7 @@ static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
{
struct bnx2x_prev_path_list *tmp_list;
- int rc = false;
+ bool rc = false;
if (down_trylock(&bnx2x_prev_sem))
return false;
@@ -11149,6 +11186,14 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
bnx2x_get_cnic_mac_hwinfo(bp);
}
+ if (!BP_NOMCP(bp)) {
+ /* Read physical port identifier from shmem */
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+ bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
+ bp->flags |= HAS_PHYS_PORT_ID;
+ }
+
memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
@@ -11685,9 +11730,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
static int bnx2x_open(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- bool global = false;
- int other_engine = BP_PATH(bp) ? 0 : 1;
- bool other_load_status, load_status;
int rc;
bp->stats_init = true;
@@ -11703,6 +11745,10 @@ static int bnx2x_open(struct net_device *dev)
* Parity recovery is only relevant for PF driver.
*/
if (IS_PF(bp)) {
+ int other_engine = BP_PATH(bp) ? 0 : 1;
+ bool other_load_status, load_status;
+ bool global = false;
+
other_load_status = bnx2x_get_load_status(bp, other_engine);
load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
@@ -11746,7 +11792,7 @@ static int bnx2x_open(struct net_device *dev)
rc = bnx2x_nic_load(bp, LOAD_OPEN);
if (rc)
return rc;
- return bnx2x_open_epilog(bp);
+ return 0;
}
/* called with rtnl_lock */
@@ -12044,6 +12090,20 @@ static int bnx2x_validate_addr(struct net_device *dev)
return 0;
}
+static int bnx2x_get_phys_port_id(struct net_device *netdev,
+ struct netdev_phys_port_id *ppid)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ if (!(bp->flags & HAS_PHYS_PORT_ID))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = sizeof(bp->phys_port_id);
+ memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
+
+ return 0;
+}
+
static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_open = bnx2x_open,
.ndo_stop = bnx2x_close,
@@ -12073,19 +12133,15 @@ static const struct net_device_ops bnx2x_netdev_ops = {
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = bnx2x_low_latency_recv,
#endif
+ .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
{
struct device *dev = &bp->pdev->dev;
- if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
- bp->flags |= USING_DAC_FLAG;
- if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
- dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
- return -EIO;
- }
- } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
dev_err(dev, "System does not support DMA, aborting\n");
return -EIO;
}
@@ -12237,10 +12293,13 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
if (!CHIP_IS_E1x(bp)) {
- dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
+ dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
dev->hw_enc_features =
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+ NETIF_F_GSO_IPIP |
+ NETIF_F_GSO_SIT |
NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
}
@@ -12248,8 +12307,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
- if (bp->flags & USING_DAC_FLAG)
- dev->features |= NETIF_F_HIGHDMA;
+ dev->features |= NETIF_F_HIGHDMA;
/* Add Loopback capability to the device */
dev->hw_features |= NETIF_F_LOOPBACK;
@@ -12274,34 +12332,11 @@ err_out_release:
err_out_disable:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
err_out:
return rc;
}
-static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width,
- enum bnx2x_pci_bus_speed *speed)
-{
- u32 link_speed, val = 0;
-
- pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val);
- *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
-
- link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
-
- switch (link_speed) {
- case 3:
- *speed = BNX2X_PCI_LINK_SPEED_8000;
- break;
- case 2:
- *speed = BNX2X_PCI_LINK_SPEED_5000;
- break;
- default:
- *speed = BNX2X_PCI_LINK_SPEED_2500;
- }
-}
-
static int bnx2x_check_firmware(struct bnx2x *bp)
{
const struct firmware *firmware = bp->firmware;
@@ -12612,24 +12647,24 @@ static int set_max_cos_est(int chip_id)
return BNX2X_MULTI_TX_COS_E1X;
case BCM57712:
case BCM57712_MF:
- case BCM57712_VF:
return BNX2X_MULTI_TX_COS_E2_E3A0;
case BCM57800:
case BCM57800_MF:
- case BCM57800_VF:
case BCM57810:
case BCM57810_MF:
case BCM57840_4_10:
case BCM57840_2_20:
case BCM57840_O:
case BCM57840_MFO:
- case BCM57810_VF:
case BCM57840_MF:
- case BCM57840_VF:
case BCM57811:
case BCM57811_MF:
- case BCM57811_VF:
return BNX2X_MULTI_TX_COS_E3B0;
+ case BCM57712_VF:
+ case BCM57800_VF:
+ case BCM57810_VF:
+ case BCM57840_VF:
+ case BCM57811_VF:
return 1;
default:
pr_err("Unknown board_type (%d), aborting\n", chip_id);
@@ -12658,8 +12693,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
{
struct net_device *dev = NULL;
struct bnx2x *bp;
- int pcie_width;
- enum bnx2x_pci_bus_speed pcie_speed;
+ enum pcie_link_width pcie_width;
+ enum pci_bus_speed pcie_speed;
int rc, max_non_def_sbs;
int rx_count, tx_count, rss_count, doorbell_size;
int max_cos_est;
@@ -12808,18 +12843,19 @@ static int bnx2x_init_one(struct pci_dev *pdev,
dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
rtnl_unlock();
}
-
- bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
- BNX2X_DEV_INFO("got pcie width %d and speed %d\n",
- pcie_width, pcie_speed);
-
- BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
+ if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
+ pcie_speed == PCI_SPEED_UNKNOWN ||
+ pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
+ BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
+ else
+ BNX2X_DEV_INFO(
+ "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
board_info[ent->driver_data].name,
(CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
pcie_width,
- pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" :
- pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" :
- pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" :
+ pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
+ pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
+ pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
"Unknown",
dev->base_addr, bp->pdev->irq, dev->dev_addr);
@@ -12838,7 +12874,6 @@ init_one_exit:
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return rc;
}
@@ -12921,7 +12956,6 @@ static void __bnx2x_remove(struct pci_dev *pdev,
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static void bnx2x_remove_one(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 9ad012bdd915..0216d592d0ce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -470,10 +470,10 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
bnx2x_vfop_qdtor, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
cmd->block);
+ } else {
+ BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid);
+ return -ENOMEM;
}
- DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n",
- vf->abs_vfid, vfop->rc);
- return -ENOMEM;
}
static void
@@ -2018,6 +2018,8 @@ failed:
void bnx2x_iov_remove_one(struct bnx2x *bp)
{
+ int vf_idx;
+
/* if SRIOV is not enabled there's nothing to do */
if (!IS_SRIOV(bp))
return;
@@ -2026,6 +2028,18 @@ void bnx2x_iov_remove_one(struct bnx2x *bp)
pci_disable_sriov(bp->pdev);
DP(BNX2X_MSG_IOV, "sriov disabled\n");
+ /* disable access to all VFs */
+ for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
+ bnx2x_pretend_func(bp,
+ HW_VF_HANDLE(bp,
+ bp->vfdb->sriov.first_vf_in_pf +
+ vf_idx));
+ DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
+ bp->vfdb->sriov.first_vf_in_pf + vf_idx);
+ bnx2x_vf_enable_internal(bp, 0);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+ }
+
/* free vf database */
__bnx2x_iov_free_vfdb(bp);
}
@@ -2802,7 +2816,7 @@ struct set_vf_state_cookie {
u8 state;
};
-void bnx2x_set_vf_state(void *cookie)
+static void bnx2x_set_vf_state(void *cookie)
{
struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
@@ -3197,7 +3211,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
* the "acquire" messages to appear on the VF PF channel.
*/
DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
- pci_disable_sriov(bp->pdev);
+ bnx2x_disable_sriov(bp);
rc = pci_enable_sriov(bp->pdev, req_vfs);
if (rc) {
BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
@@ -3225,8 +3239,9 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
pci_disable_sriov(bp->pdev);
}
-int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf,
- struct pf_vf_bulletin_content **bulletin)
+static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
+ struct bnx2x_virtf **vf,
+ struct pf_vf_bulletin_content **bulletin)
{
if (bp->state != BNX2X_STATE_OPEN) {
BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3390,14 +3405,16 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
if (rc) {
BNX2X_ERR("failed to delete eth macs\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
/* remove existing uc list macs */
rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
if (rc) {
BNX2X_ERR("failed to delete uc_list macs\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
/* configure the new mac to device */
@@ -3405,6 +3422,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
BNX2X_ETH_MAC, &ramrod_flags);
+out:
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
}
@@ -3467,7 +3485,8 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
&ramrod_flags);
if (rc) {
BNX2X_ERR("failed to delete vlans\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
/* send queue update ramrod to configure default vlan and silent
@@ -3501,7 +3520,8 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
if (rc) {
BNX2X_ERR("failed to configure vlan\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
/* configure default vlan to vf queue and set silent
@@ -3519,18 +3539,18 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
rc = bnx2x_queue_state_change(bp, &q_params);
if (rc) {
BNX2X_ERR("Failed to configure default VLAN\n");
- return rc;
+ goto out;
}
/* clear the flag indicating that this VF needs its vlan
- * (will only be set if the HV configured th Vlan before vf was
- * and we were called because the VF came up later
+ * (will only be set if the HV configured the Vlan before vf was
+ * up and we were called because the VF came up later
*/
+out:
vf->cfg_flags &= ~VF_CFG_VLAN;
-
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
}
- return 0;
+ return rc;
}
/* crc is the first field in the bulletin board. Compute the crc over the
@@ -3637,29 +3657,6 @@ alloc_mem_err:
return -ENOMEM;
}
-int bnx2x_open_epilog(struct bnx2x *bp)
-{
- /* Enable sriov via delayed work. This must be done via delayed work
- * because it causes the probe of the vf devices to be run, which invoke
- * register_netdevice which must have rtnl lock taken. As we are holding
- * the lock right now, that could only work if the probe would not take
- * the lock. However, as the probe of the vf may be called from other
- * contexts as well (such as passthrough to vm fails) it can't assume
- * the lock is being held for it. Using delayed work here allows the
- * probe code to simply take the lock (i.e. wait for it to be released
- * if it is being held). We only want to do this if the number of VFs
- * was set before PF driver was loaded.
- */
- if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
- }
-
- return 0;
-}
-
void bnx2x_iov_channel_down(struct bnx2x *bp)
{
int vf_idx;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 059f0d460af2..1ff6a9366629 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -782,7 +782,6 @@ static inline int bnx2x_vf_headroom(struct bnx2x *bp)
void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
void bnx2x_iov_channel_down(struct bnx2x *bp);
-int bnx2x_open_epilog(struct bnx2x *bp);
#else /* CONFIG_BNX2X_SRIOV */
@@ -842,7 +841,6 @@ static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
-static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; }
#endif /* CONFIG_BNX2X_SRIOV */
#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 86436c77af03..3b75070411aa 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -196,7 +196,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
} else if (bp->func_stx) {
*stats_comp = 0;
- bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
+ bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index da16953eb2ec..9199adf32d33 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -60,6 +60,30 @@ void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
mutex_unlock(&bp->vf2pf_mutex);
}
+/* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */
+static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
+ enum channel_tlvs req_tlv)
+{
+ struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
+
+ do {
+ if (tlv->type == req_tlv)
+ return tlv;
+
+ if (!tlv->length) {
+ BNX2X_ERR("Found TLV with length 0\n");
+ return NULL;
+ }
+
+ tlvs_list += tlv->length;
+ tlv = (struct channel_tlv *)tlvs_list;
+ } while (tlv->type != CHANNEL_TLV_LIST_END);
+
+ DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv);
+
+ return NULL;
+}
+
/* list the types and lengths of the tlvs on the buffer */
void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
{
@@ -196,6 +220,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
int rc = 0, attempts = 0;
struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
+ struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
u32 vf_id;
bool resources_acquired = false;
@@ -219,8 +244,14 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* pf 2 vf bulletin board address */
req->bulletin_addr = bp->pf2vf_bulletin_mapping;
+ /* Request physical port identifier */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
+ CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
+
/* add list termination tlv */
- bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ bnx2x_add_tlv(bp, req,
+ req->first_tlv.tl.length + sizeof(struct channel_tlv),
+ CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* output tlvs list */
@@ -287,6 +318,15 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
}
}
+ /* Retrieve physical port id (if possible) */
+ phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *)
+ bnx2x_search_tlv_list(bp, resp,
+ CHANNEL_TLV_PHYS_PORT_ID);
+ if (phys_port_resp) {
+ memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN);
+ bp->flags |= HAS_PHYS_PORT_ID;
+ }
+
/* get HW info */
bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
bp->link_params.chip_id = bp->common.chip_id;
@@ -980,56 +1020,62 @@ static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
dmae.len = len32;
/* issue the command and wait for completion */
- return bnx2x_issue_dmae_with_comp(bp, &dmae);
+ return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
}
-static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
{
struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
- u64 vf_addr;
- dma_addr_t pf_addr;
u16 length, type;
- int rc;
- struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
/* prepare response */
type = mbx->first_tlv.tl.type;
length = type == CHANNEL_TLV_ACQUIRE ?
sizeof(struct pfvf_acquire_resp_tlv) :
sizeof(struct pfvf_general_resp_tlv);
- bnx2x_add_tlv(bp, resp, 0, type, length);
- resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
- bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END,
+ bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length);
+ bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
+}
+
+static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
+ struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
+ dma_addr_t pf_addr;
+ u64 vf_addr;
+ int rc;
+
bnx2x_dp_tlv_list(bp, resp);
DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
+ resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
+
/* send response */
vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
mbx->first_tlv.resp_msg_offset;
pf_addr = mbx->msg_mapping +
offsetof(struct bnx2x_vf_mbx_msg, resp);
- /* copy the response body, if there is one, before the header, as the vf
- * is sensitive to the header being written
+ /* Copy the response buffer. The first u64 is written afterwards, as
+ * the vf is sensitive to the header being written
*/
- if (resp->hdr.tl.length > sizeof(u64)) {
- length = resp->hdr.tl.length - sizeof(u64);
- vf_addr += sizeof(u64);
- pf_addr += sizeof(u64);
- rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
- U64_HI(vf_addr),
- U64_LO(vf_addr),
- length/4);
- if (rc) {
- BNX2X_ERR("Failed to copy response body to VF %d\n",
- vf->abs_vfid);
- goto mbx_error;
- }
- vf_addr -= sizeof(u64);
- pf_addr -= sizeof(u64);
+ vf_addr += sizeof(u64);
+ pf_addr += sizeof(u64);
+ rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
+ U64_HI(vf_addr),
+ U64_LO(vf_addr),
+ (sizeof(union pfvf_tlvs) - sizeof(u64))/4);
+ if (rc) {
+ BNX2X_ERR("Failed to copy response body to VF %d\n",
+ vf->abs_vfid);
+ goto mbx_error;
}
+ vf_addr -= sizeof(u64);
+ pf_addr -= sizeof(u64);
/* ack the FW */
storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
@@ -1060,6 +1106,36 @@ mbx_error:
bnx2x_vf_release(bp, vf, false); /* non blocking */
}
+static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ bnx2x_vf_mbx_resp_single_tlv(bp, vf);
+ bnx2x_vf_mbx_resp_send_msg(bp, vf);
+}
+
+static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ void *buffer,
+ u16 *offset)
+{
+ struct vfpf_port_phys_id_resp_tlv *port_id;
+
+ if (!(bp->flags & HAS_PHYS_PORT_ID))
+ return;
+
+ bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID,
+ sizeof(struct vfpf_port_phys_id_resp_tlv));
+
+ port_id = (struct vfpf_port_phys_id_resp_tlv *)
+ (((u8 *)buffer) + *offset);
+ memcpy(port_id->id, bp->phys_port_id, ETH_ALEN);
+
+ /* Offset should continue representing the offset to the tail
+ * of TLV data (outside this function scope)
+ */
+ *offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
+}
+
static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx, int vfop_status)
{
@@ -1067,6 +1143,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
struct pf_vf_resc *resc = &resp->resc;
u8 status = bnx2x_pfvf_status_codes(vfop_status);
+ u16 length;
memset(resp, 0, sizeof(*resp));
@@ -1140,9 +1217,24 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
resc->hw_sbs[i].sb_qid);
DP_CONT(BNX2X_MSG_IOV, "]\n");
+ /* prepare response */
+ length = sizeof(struct pfvf_acquire_resp_tlv);
+ bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
+
+ /* Handle possible VF requests for physical port identifiers.
+ * 'length' should continue to indicate the offset of the first empty
+ * place in the buffer (i.e., where next TLV should be inserted)
+ */
+ if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
+ CHANNEL_TLV_PHYS_PORT_ID))
+ bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
+
+ bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
/* send the response */
vf->op_rc = vfop_status;
- bnx2x_vf_mbx_resp(bp, vf);
+ bnx2x_vf_mbx_resp_send_msg(bp, vf);
}
static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -1874,6 +1966,9 @@ void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
/* process the VF message header */
mbx->first_tlv = mbx->msg->req.first_tlv;
+ /* Clean response buffer to refrain from falsely seeing chains */
+ memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
+
/* dispatch the request (will prepare the response) */
bnx2x_vf_mbx_request(bp, vf, mbx);
goto mbx_done;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index 1179fe06d0c7..208568bc7a71 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -188,6 +188,12 @@ struct pfvf_acquire_resp_tlv {
} resc;
};
+struct vfpf_port_phys_id_resp_tlv {
+ struct channel_tlv tl;
+ u8 id[ETH_ALEN];
+ u8 padding[2];
+};
+
#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues
* stats will be coalesced on
* the leading RSS queue
@@ -398,6 +404,7 @@ enum channel_tlvs {
CHANNEL_TLV_PF_SET_MAC,
CHANNEL_TLV_PF_SET_VLAN,
CHANNEL_TLV_UPDATE_RSS,
+ CHANNEL_TLV_PHYS_PORT_ID,
CHANNEL_TLV_MAX
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 99394bd49a13..f58a8b80302d 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -393,7 +393,7 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
csk->vlan_id = path_resp->vlan_id;
- memcpy(csk->ha, path_resp->mac_addr, 6);
+ memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
if (test_bit(SK_F_IPV6, &csk->flags))
memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
sizeof(struct in6_addr));
@@ -5572,7 +5572,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
- memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
+ memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
cp->cnic_ops = &cnic_bnx2x_ops;
cp->start_hw = cnic_start_bnx2x_hw;
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 0658b43e148c..ebbfe25acaa6 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -353,8 +353,8 @@ struct cnic_ulp_ops {
atomic_t ref_count;
};
-extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
+int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
-extern int cnic_unregister_driver(int ulp_type);
+int cnic_unregister_driver(int ulp_type);
#endif
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 12d961c4ebca..819d87c281bf 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 133
+#define TG3_MIN_NUM 134
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "Jul 29, 2013"
+#define DRV_MODULE_RELDATE "Sep 16, 2013"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -337,6 +337,11 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -1326,6 +1331,12 @@ static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
return err;
}
+static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
+{
+ return tg3_writephy(tp, MII_TG3_MISC_SHDW,
+ reg | val | MII_TG3_MISC_SHDW_WREN);
+}
+
static int tg3_bmcr_reset(struct tg3 *tp)
{
u32 phy_control;
@@ -1364,7 +1375,7 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
spin_lock_bh(&tp->lock);
- if (tg3_readphy(tp, reg, &val))
+ if (__tg3_readphy(tp, mii_id, reg, &val))
val = -EIO;
spin_unlock_bh(&tp->lock);
@@ -1379,7 +1390,7 @@ static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
spin_lock_bh(&tp->lock);
- if (tg3_writephy(tp, reg, val))
+ if (__tg3_writephy(tp, mii_id, reg, val))
ret = -EIO;
spin_unlock_bh(&tp->lock);
@@ -1397,7 +1408,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
u32 val;
struct phy_device *phydev;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
case PHY_ID_BCM50610:
case PHY_ID_BCM50610M:
@@ -1502,6 +1513,13 @@ static int tg3_mdio_init(struct tg3 *tp)
TG3_CPMU_PHY_STRAP_IS_SERDES;
if (is_serdes)
tp->phy_addr += 7;
+ } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
+ int addr;
+
+ addr = ssb_gige_get_phyaddr(tp->pdev);
+ if (addr < 0)
+ return addr;
+ tp->phy_addr = addr;
} else
tp->phy_addr = TG3_PHY_MII_ADDR;
@@ -1522,7 +1540,7 @@ static int tg3_mdio_init(struct tg3 *tp)
tp->mdio_bus->read = &tg3_mdio_read;
tp->mdio_bus->write = &tg3_mdio_write;
tp->mdio_bus->reset = &tg3_mdio_reset;
- tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
+ tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
tp->mdio_bus->irq = &tp->mdio_irq[0];
for (i = 0; i < PHY_MAX_ADDR; i++)
@@ -1543,7 +1561,7 @@ static int tg3_mdio_init(struct tg3 *tp)
return i;
}
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
if (!phydev || !phydev->drv) {
dev_warn(&tp->pdev->dev, "No PHY devices\n");
@@ -1953,7 +1971,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
u32 old_tx_mode = tp->tx_mode;
if (tg3_flag(tp, USE_PHYLIB))
- autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
+ autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
else
autoneg = tp->link_config.autoneg;
@@ -1989,7 +2007,7 @@ static void tg3_adjust_link(struct net_device *dev)
u8 oldflowctrl, linkmesg = 0;
u32 mac_mode, lcl_adv, rmt_adv;
struct tg3 *tp = netdev_priv(dev);
- struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
spin_lock_bh(&tp->lock);
@@ -2078,7 +2096,7 @@ static int tg3_phy_init(struct tg3 *tp)
/* Bring the PHY back to a known state. */
tg3_bmcr_reset(tp);
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
/* Attach the MAC to the PHY. */
phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
@@ -2105,7 +2123,7 @@ static int tg3_phy_init(struct tg3 *tp)
SUPPORTED_Asym_Pause);
break;
default:
- phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+ phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
return -EINVAL;
}
@@ -2123,7 +2141,7 @@ static void tg3_phy_start(struct tg3 *tp)
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
@@ -2143,13 +2161,13 @@ static void tg3_phy_stop(struct tg3 *tp)
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return;
- phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+ phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
}
static void tg3_phy_fini(struct tg3 *tp)
{
if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
- phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+ phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
}
}
@@ -2218,25 +2236,21 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
return;
}
- reg = MII_TG3_MISC_SHDW_WREN |
- MII_TG3_MISC_SHDW_SCR5_SEL |
- MII_TG3_MISC_SHDW_SCR5_LPED |
+ reg = MII_TG3_MISC_SHDW_SCR5_LPED |
MII_TG3_MISC_SHDW_SCR5_DLPTLM |
MII_TG3_MISC_SHDW_SCR5_SDTL |
MII_TG3_MISC_SHDW_SCR5_C125OE;
if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
- tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
+ tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
- reg = MII_TG3_MISC_SHDW_WREN |
- MII_TG3_MISC_SHDW_APD_SEL |
- MII_TG3_MISC_SHDW_APD_WKTM_84MS;
+ reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
if (enable)
reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
- tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
+ tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
}
static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
@@ -4027,7 +4041,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
struct phy_device *phydev;
u32 phyid, advertising;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
@@ -9196,10 +9210,7 @@ static int tg3_halt(struct tg3 *tp, int kind, bool silent)
memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
}
- if (err)
- return err;
-
- return 0;
+ return err;
}
static int tg3_set_mac_addr(struct net_device *dev, void *p)
@@ -11035,7 +11046,18 @@ static int tg3_request_irq(struct tg3 *tp, int irq_num)
name = tp->dev->name;
else {
name = &tnapi->irq_lbl[0];
- snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
+ if (tnapi->tx_buffers && tnapi->rx_rcb)
+ snprintf(name, IFNAMSIZ,
+ "%s-txrx-%d", tp->dev->name, irq_num);
+ else if (tnapi->tx_buffers)
+ snprintf(name, IFNAMSIZ,
+ "%s-tx-%d", tp->dev->name, irq_num);
+ else if (tnapi->rx_rcb)
+ snprintf(name, IFNAMSIZ,
+ "%s-rx-%d", tp->dev->name, irq_num);
+ else
+ snprintf(name, IFNAMSIZ,
+ "%s-%d", tp->dev->name, irq_num);
name[IFNAMSIZ-1] = 0;
}
@@ -11907,7 +11929,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct phy_device *phydev;
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
return phy_ethtool_gset(phydev, cmd);
}
@@ -11974,7 +11996,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct phy_device *phydev;
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
return phy_ethtool_sset(phydev, cmd);
}
@@ -12093,12 +12115,10 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
- spin_lock_bh(&tp->lock);
if (device_may_wakeup(dp))
tg3_flag_set(tp, WOL_ENABLE);
else
tg3_flag_clear(tp, WOL_ENABLE);
- spin_unlock_bh(&tp->lock);
return 0;
}
@@ -12131,7 +12151,7 @@ static int tg3_nway_reset(struct net_device *dev)
if (tg3_flag(tp, USE_PHYLIB)) {
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
- r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+ r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
} else {
u32 bmcr;
@@ -12247,7 +12267,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
u32 newadv;
struct phy_device *phydev;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
if (!(phydev->supported & SUPPORTED_Pause) ||
(!(phydev->supported & SUPPORTED_Asym_Pause) &&
@@ -13194,8 +13214,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
return -ENOMEM;
tx_data = skb_put(skb, tx_len);
- memcpy(tx_data, tp->dev->dev_addr, 6);
- memset(tx_data + 6, 0x0, 8);
+ memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
+ memset(tx_data + ETH_ALEN, 0x0, 8);
tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
@@ -13683,7 +13703,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct phy_device *phydev;
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
return phy_mii_ioctl(phydev, ifr, cmd);
}
@@ -14921,6 +14941,12 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
LED_CTRL_MODE_PHY_2);
+
+ if (tg3_flag(tp, 5717_PLUS) ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
+ tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
+ LED_CTRL_BLINK_RATE_MASK;
+
break;
case SHASTA_EXT_LED_MAC:
@@ -15759,9 +15785,12 @@ static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
reg = TG3PCI_GEN2_PRODID_ASICREV;
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
@@ -16632,8 +16661,8 @@ static int tg3_get_macaddr_sparc(struct tg3 *tp)
int len;
addr = of_get_property(dp, "local-mac-address", &len);
- if (addr && len == 6) {
- memcpy(dev->dev_addr, addr, 6);
+ if (addr && len == ETH_ALEN) {
+ memcpy(dev->dev_addr, addr, ETH_ALEN);
return 0;
}
return -ENODEV;
@@ -16643,7 +16672,7 @@ static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
{
struct net_device *dev = tp->dev;
- memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+ memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
return 0;
}
#endif
@@ -17052,10 +17081,6 @@ static int tg3_test_dma(struct tg3 *tp)
tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
-#if 0
- /* Unneeded, already done by tg3_get_invariants. */
- tg3_switch_clocks(tp);
-#endif
if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
tg3_asic_rev(tp) != ASIC_REV_5701)
@@ -17083,20 +17108,6 @@ static int tg3_test_dma(struct tg3 *tp)
break;
}
-#if 0
- /* validate data reached card RAM correctly. */
- for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
- u32 val;
- tg3_read_mem(tp, 0x2100 + (i*4), &val);
- if (le32_to_cpu(val) != p[i]) {
- dev_err(&tp->pdev->dev,
- "%s: Buffer corrupted on device! "
- "(%d != %d)\n", __func__, val, i);
- /* ret = -ENODEV here? */
- }
- p[i] = 0;
- }
-#endif
/* Now read it back. */
ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
if (ret) {
@@ -17362,8 +17373,10 @@ static int tg3_init_one(struct pci_dev *pdev,
tg3_flag_set(tp, FLUSH_POSTED_WRITES);
if (ssb_gige_one_dma_at_once(pdev))
tg3_flag_set(tp, ONE_DMA_AT_ONCE);
- if (ssb_gige_have_roboswitch(pdev))
+ if (ssb_gige_have_roboswitch(pdev)) {
+ tg3_flag_set(tp, USE_PHYLIB);
tg3_flag_set(tp, ROBOSWITCH);
+ }
if (ssb_gige_is_rgmii(pdev))
tg3_flag_set(tp, RGMII_MODE);
}
@@ -17409,9 +17422,12 @@ static int tg3_init_one(struct pci_dev *pdev,
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
tg3_flag_set(tp, ENABLE_APE);
tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
if (!tp->aperegs) {
@@ -17628,7 +17644,7 @@ static int tg3_init_one(struct pci_dev *pdev,
if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
struct phy_device *phydev;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
netdev_info(dev,
"attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
phydev->drv->name, dev_name(&phydev->dev));
@@ -17685,7 +17701,6 @@ err_out_free_res:
err_out_disable_pdev:
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return err;
}
@@ -17717,7 +17732,6 @@ static void tg3_remove_one(struct pci_dev *pdev)
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 70257808aa37..5c3835aa1e1b 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -68,6 +68,9 @@
#define TG3PCI_DEVICE_TIGON3_5762 0x1687
#define TG3PCI_DEVICE_TIGON3_5725 0x1643
#define TG3PCI_DEVICE_TIGON3_5727 0x16f3
+#define TG3PCI_DEVICE_TIGON3_57764 0x1642
+#define TG3PCI_DEVICE_TIGON3_57767 0x1683
+#define TG3PCI_DEVICE_TIGON3_57787 0x1641
/* 0x04 --> 0x2c unused */
#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index b78e69e0e52a..248bc37cb41b 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3212,7 +3212,6 @@ bnad_init(struct bnad *bnad,
bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
if (!bnad->bar0) {
dev_err(&pdev->dev, "ioremap for bar0 failed\n");
- pci_set_drvdata(pdev, NULL);
return -ENOMEM;
}
pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
@@ -3300,17 +3299,12 @@ bnad_pci_init(struct bnad *bnad,
err = pci_request_regions(pdev, BNAD_NAME);
if (err)
goto disable_device;
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
- !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
*using_dac = true;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err)
- goto release_regions;
- }
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ goto release_regions;
*using_dac = false;
}
pci_set_master(pdev);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index aefee77523f2..f7e033f8a00e 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -372,38 +372,37 @@ extern u32 bnad_rxqs_per_cq;
/*
* EXTERN PROTOTYPES
*/
-extern u32 *cna_get_firmware_buf(struct pci_dev *pdev);
+u32 *cna_get_firmware_buf(struct pci_dev *pdev);
/* Netdev entry point prototypes */
-extern void bnad_set_rx_mode(struct net_device *netdev);
-extern struct net_device_stats *bnad_get_netdev_stats(
- struct net_device *netdev);
-extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
-extern int bnad_enable_default_bcast(struct bnad *bnad);
-extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
-extern void bnad_set_ethtool_ops(struct net_device *netdev);
-extern void bnad_cb_completion(void *arg, enum bfa_status status);
+void bnad_set_rx_mode(struct net_device *netdev);
+struct net_device_stats *bnad_get_netdev_stats(struct net_device *netdev);
+int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
+int bnad_enable_default_bcast(struct bnad *bnad);
+void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
+void bnad_set_ethtool_ops(struct net_device *netdev);
+void bnad_cb_completion(void *arg, enum bfa_status status);
/* Configuration & setup */
-extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
-extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
+void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
+void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
-extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
-extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
-extern void bnad_destroy_tx(struct bnad *bnad, u32 tx_id);
-extern void bnad_destroy_rx(struct bnad *bnad, u32 rx_id);
+int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
+int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
+void bnad_destroy_tx(struct bnad *bnad, u32 tx_id);
+void bnad_destroy_rx(struct bnad *bnad, u32 rx_id);
/* Timer start/stop protos */
-extern void bnad_dim_timer_start(struct bnad *bnad);
+void bnad_dim_timer_start(struct bnad *bnad);
/* Statistics */
-extern void bnad_netdev_qstats_fill(struct bnad *bnad,
- struct rtnl_link_stats64 *stats);
-extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
- struct rtnl_link_stats64 *stats);
+void bnad_netdev_qstats_fill(struct bnad *bnad,
+ struct rtnl_link_stats64 *stats);
+void bnad_netdev_hwstats_fill(struct bnad *bnad,
+ struct rtnl_link_stats64 *stats);
/* Debugfs */
-void bnad_debugfs_init(struct bnad *bnad);
-void bnad_debugfs_uninit(struct bnad *bnad);
+void bnad_debugfs_init(struct bnad *bnad);
+void bnad_debugfs_uninit(struct bnad *bnad);
/* MACROS */
/* To set & get the stats counters */
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 78d6d6b970e1..48f52882a22b 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -106,7 +106,6 @@
#define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */
#define XGMAC_ADDR_AE 0x80000000
-#define XGMAC_MAX_FILTER_ADDR 31
/* PMT Control and Status */
#define XGMAC_PMT_POINTER_RESET 0x80000000
@@ -384,6 +383,7 @@ struct xgmac_priv {
struct device *device;
struct napi_struct napi;
+ int max_macs;
struct xgmac_extra_stats xstats;
spinlock_t stats_lock;
@@ -1291,14 +1291,12 @@ static void xgmac_set_rx_mode(struct net_device *dev)
netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
netdev_mc_count(dev), netdev_uc_count(dev));
- if (dev->flags & IFF_PROMISC) {
- writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER);
- return;
- }
+ if (dev->flags & IFF_PROMISC)
+ value |= XGMAC_FRAME_FILTER_PR;
memset(hash_filter, 0, sizeof(hash_filter));
- if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) {
+ if (netdev_uc_count(dev) > priv->max_macs) {
use_hash = true;
value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
}
@@ -1321,7 +1319,7 @@ static void xgmac_set_rx_mode(struct net_device *dev)
goto out;
}
- if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
+ if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) {
use_hash = true;
value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
} else {
@@ -1342,8 +1340,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
}
out:
- for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++)
- xgmac_set_mac_addr(ioaddr, NULL, reg);
+ for (i = reg; i <= priv->max_macs; i++)
+ xgmac_set_mac_addr(ioaddr, NULL, i);
for (i = 0; i < XGMAC_NUM_HASH; i++)
writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
@@ -1761,6 +1759,13 @@ static int xgmac_probe(struct platform_device *pdev)
uid = readl(priv->base + XGMAC_VERSION);
netdev_info(ndev, "h/w version is 0x%x\n", uid);
+ /* Figure out how many valid mac address filter registers we have */
+ writel(1, priv->base + XGMAC_ADDR_HIGH(31));
+ if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1)
+ priv->max_macs = 31;
+ else
+ priv->max_macs = 7;
+
writel(0, priv->base + XGMAC_DMA_INTR_ENA);
ndev->irq = platform_get_irq(pdev, 0);
if (ndev->irq == -ENXIO) {
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 5ccbed1784d2..8abb46b39032 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -324,30 +324,30 @@ static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
return board_info(adap)->clock_core / 1000000;
}
-extern int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp);
-extern int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
-extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
-extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
-
-extern void t1_interrupts_enable(adapter_t *adapter);
-extern void t1_interrupts_disable(adapter_t *adapter);
-extern void t1_interrupts_clear(adapter_t *adapter);
-extern int t1_elmer0_ext_intr_handler(adapter_t *adapter);
-extern void t1_elmer0_ext_intr(adapter_t *adapter);
-extern int t1_slow_intr_handler(adapter_t *adapter);
-
-extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
-extern const struct board_info *t1_get_board_info(unsigned int board_id);
-extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
+int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp);
+int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
+int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
+int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
+
+void t1_interrupts_enable(adapter_t *adapter);
+void t1_interrupts_disable(adapter_t *adapter);
+void t1_interrupts_clear(adapter_t *adapter);
+int t1_elmer0_ext_intr_handler(adapter_t *adapter);
+void t1_elmer0_ext_intr(adapter_t *adapter);
+int t1_slow_intr_handler(adapter_t *adapter);
+
+int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
+const struct board_info *t1_get_board_info(unsigned int board_id);
+const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
unsigned short ssid);
-extern int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data);
-extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
+int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data);
+int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
struct adapter_params *p);
-extern int t1_init_hw_modules(adapter_t *adapter);
-extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
-extern void t1_free_sw_modules(adapter_t *adapter);
-extern void t1_fatal_err(adapter_t *adapter);
-extern void t1_link_changed(adapter_t *adapter, int port_id);
-extern void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
+int t1_init_hw_modules(adapter_t *adapter);
+int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
+void t1_free_sw_modules(adapter_t *adapter);
+void t1_fatal_err(adapter_t *adapter);
+void t1_link_changed(adapter_t *adapter, int port_id);
+void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
int speed, int duplex, int pause);
#endif /* _CXGB_COMMON_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index d7048db9863d..1d021059f097 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -1168,7 +1168,6 @@ out_free_dev:
pci_release_regions(pdev);
out_disable_pdev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return err;
}
@@ -1347,7 +1346,6 @@ static void remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
t1_sw_reset(pdev);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index 40c7b93ababc..eb33a31b08a0 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -499,7 +499,7 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
{
- memcpy(mac_addr, cmac->instance->mac_addr, 6);
+ memcpy(mac_addr, cmac->instance->mac_addr, ETH_ALEN);
return 0;
}
@@ -526,7 +526,7 @@ static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
*/
/* Store local copy */
- memcpy(cmac->instance->mac_addr, ma, 6);
+ memcpy(cmac->instance->mac_addr, ma, ETH_ALEN);
lo = ((u32) ma[1] << 8) | (u32) ma[0];
mid = ((u32) ma[3] << 8) | (u32) ma[2];
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index b650951791dd..45d77334d7d9 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3374,7 +3374,6 @@ out_release_regions:
pci_release_regions(pdev);
out_disable_device:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
out:
return err;
}
@@ -3415,7 +3414,6 @@ static void remove_one(struct pci_dev *pdev)
kfree(adapter);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/regs.h b/drivers/net/ethernet/chelsio/cxgb3/regs.h
index 6990f6c65221..81029b872bdd 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/regs.h
@@ -685,10 +685,6 @@
#define V_BUSY(x) ((x) << S_BUSY)
#define F_BUSY V_BUSY(1U)
-#define S_BUSY 31
-#define V_BUSY(x) ((x) << S_BUSY)
-#define F_BUSY V_BUSY(1U)
-
#define A_MC7_EXT_MODE1 0x108
#define A_MC7_EXT_MODE2 0x10c
@@ -749,14 +745,6 @@
#define A_MC7_CAL 0x128
-#define S_BUSY 31
-#define V_BUSY(x) ((x) << S_BUSY)
-#define F_BUSY V_BUSY(1U)
-
-#define S_BUSY 31
-#define V_BUSY(x) ((x) << S_BUSY)
-#define F_BUSY V_BUSY(1U)
-
#define S_CAL_FAULT 30
#define V_CAL_FAULT(x) ((x) << S_CAL_FAULT)
#define F_CAL_FAULT V_CAL_FAULT(1U)
@@ -815,9 +803,6 @@
#define V_OP(x) ((x) << S_OP)
#define F_OP V_OP(1U)
-#define F_OP V_OP(1U)
-#define A_SF_OP 0x6dc
-
#define A_MC7_BIST_ADDR_BEG 0x168
#define A_MC7_BIST_ADDR_END 0x16c
@@ -830,8 +815,6 @@
#define V_CONT(x) ((x) << S_CONT)
#define F_CONT V_CONT(1U)
-#define F_CONT V_CONT(1U)
-
#define A_MC7_INT_ENABLE 0x178
#define S_AE 17
@@ -1017,8 +1000,6 @@
#define V_NICMODE(x) ((x) << S_NICMODE)
#define F_NICMODE V_NICMODE(1U)
-#define F_NICMODE V_NICMODE(1U)
-
#define S_IPV6ENABLE 15
#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
#define F_IPV6ENABLE V_IPV6ENABLE(1U)
@@ -1562,27 +1543,15 @@
#define A_ULPRX_STAG_ULIMIT 0x530
#define A_ULPRX_RQ_LLIMIT 0x534
-#define A_ULPRX_RQ_LLIMIT 0x534
#define A_ULPRX_RQ_ULIMIT 0x538
-#define A_ULPRX_RQ_ULIMIT 0x538
#define A_ULPRX_PBL_LLIMIT 0x53c
#define A_ULPRX_PBL_ULIMIT 0x540
-#define A_ULPRX_PBL_ULIMIT 0x540
#define A_ULPRX_TDDP_TAGMASK 0x524
-#define A_ULPRX_RQ_LLIMIT 0x534
-#define A_ULPRX_RQ_LLIMIT 0x534
-
-#define A_ULPRX_RQ_ULIMIT 0x538
-#define A_ULPRX_RQ_ULIMIT 0x538
-
-#define A_ULPRX_PBL_ULIMIT 0x540
-#define A_ULPRX_PBL_ULIMIT 0x540
-
#define A_ULPTX_CONFIG 0x580
#define S_CFG_CQE_SOP_MASK 1
@@ -2053,8 +2022,6 @@
#define V_TMMODE(x) ((x) << S_TMMODE)
#define F_TMMODE V_TMMODE(1U)
-#define F_TMMODE V_TMMODE(1U)
-
#define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c
#define A_MC5_DB_FILTER_TABLE 0x710
@@ -2454,8 +2421,6 @@
#define V_TXACTENABLE(x) ((x) << S_TXACTENABLE)
#define F_TXACTENABLE V_TXACTENABLE(1U)
-#define A_XGM_SERDES_CTRL0 0x8e0
-
#define S_RESET3 23
#define V_RESET3(x) ((x) << S_RESET3)
#define F_RESET3 V_RESET3(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 9c89dc8fe105..632b318eb38a 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -1599,7 +1599,8 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
flits = skb_transport_offset(skb) / 8;
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
- skb->tail - skb->transport_header,
+ skb_tail_pointer(skb) -
+ skb_transport_header(skb),
adap->pdev);
if (need_skb_unmap()) {
setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index dfd1e36f5753..ecd2fb3ef695 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -48,7 +48,6 @@
#include <linux/vmalloc.h>
#include <asm/io.h>
#include "cxgb4_uld.h"
-#include "t4_hw.h"
#define FW_VERSION_MAJOR 1
#define FW_VERSION_MINOR 4
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index c73cabdbd4c0..8b929eeecd2d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3983,6 +3983,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
struct net_device *event_dev;
int ret = NOTIFY_DONE;
struct bonding *bond = netdev_priv(ifa->idev->dev);
+ struct list_head *iter;
struct slave *slave;
struct pci_dev *first_pdev = NULL;
@@ -3995,7 +3996,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
* in all of them only once.
*/
read_lock(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (!first_pdev) {
ret = clip_add(slave->dev, ifa, event);
/* If clip_add is success then only initialize
@@ -6074,7 +6075,6 @@ sriov:
pci_disable_device(pdev);
out_release_regions:
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
return err;
}
@@ -6122,7 +6122,6 @@ static void remove_one(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
} else
pci_release_regions(pdev);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 40c22e7de15c..5f90ec5f7519 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2782,11 +2782,9 @@ err_unmap_bar:
err_free_adapter:
kfree(adapter);
- pci_set_drvdata(pdev, NULL);
err_release_regions:
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
pci_clear_master(pdev);
err_disable_device:
@@ -2851,7 +2849,6 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
}
iounmap(adapter->regs);
kfree(adapter);
- pci_set_drvdata(pdev, NULL);
}
/*
@@ -2908,7 +2905,7 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
#define CH_DEVICE(devid, idx) \
{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
-static struct pci_device_id cxgb4vf_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(cxgb4vf_pci_tbl) = {
CH_DEVICE(0xb000, 0), /* PE10K FPGA */
CH_DEVICE(0x4800, 0), /* T440-dbg */
CH_DEVICE(0x4801, 0), /* T420-cr */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index df296af20bd5..8475c4cda9e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1396,8 +1396,9 @@ static inline void copy_frags(struct sk_buff *skb,
* Builds an sk_buff from the given packet gather list. Returns the
* sk_buff or %NULL if sk_buff allocation failed.
*/
-struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
- unsigned int skb_len, unsigned int pull_len)
+static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
+ unsigned int skb_len,
+ unsigned int pull_len)
{
struct sk_buff *skb;
@@ -1443,7 +1444,7 @@ out:
* Releases the pages of a packet gather list. We do not own the last
* page on the list and do not free it.
*/
-void t4vf_pktgl_free(const struct pkt_gl *gl)
+static void t4vf_pktgl_free(const struct pkt_gl *gl)
{
int frag;
@@ -1640,7 +1641,7 @@ static inline void rspq_next(struct sge_rspq *rspq)
* on this queue. If the system is under memory shortage use a fairly
* long delay to help recovery.
*/
-int process_responses(struct sge_rspq *rspq, int budget)
+static int process_responses(struct sge_rspq *rspq, int budget)
{
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
int budget_left = budget;
@@ -1893,7 +1894,7 @@ static unsigned int process_intrq(struct adapter *adapter)
* The MSI interrupt handler handles data events from SGE response queues as
* well as error and other async events as they all use the same MSI vector.
*/
-irqreturn_t t4vf_intr_msi(int irq, void *cookie)
+static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
{
struct adapter *adapter = cookie;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 7b756cf9474a..ff78dfaec508 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2309,7 +2309,6 @@ err_out_release_regions:
err_out_disable_device:
pci_disable_device(pdev);
err_out_free_netdev:
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
return err;
@@ -2338,7 +2337,6 @@ static void enic_remove(struct pci_dev *pdev)
enic_iounmap(enic);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
}
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 5f5896e522d2..7080ad6c4014 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -158,18 +158,6 @@ static inline board_info_t *to_dm9000_board(struct net_device *dev)
/* DM9000 network board routine ---------------------------- */
-static void
-dm9000_reset(board_info_t * db)
-{
- dev_dbg(db->dev, "resetting device\n");
-
- /* RESET device */
- writeb(DM9000_NCR, db->io_addr);
- udelay(200);
- writeb(NCR_RST, db->io_data);
- udelay(200);
-}
-
/*
* Read a byte from I/O port
*/
@@ -191,6 +179,27 @@ iow(board_info_t * db, int reg, int value)
writeb(value, db->io_data);
}
+static void
+dm9000_reset(board_info_t *db)
+{
+ dev_dbg(db->dev, "resetting device\n");
+
+ /* Reset DM9000, see DM9000 Application Notes V1.22 Jun 11, 2004 page 29
+ * The essential point is that we have to do a double reset, and the
+ * instruction is to set LBK into MAC internal loopback mode.
+ */
+ iow(db, DM9000_NCR, 0x03);
+ udelay(100); /* Application note says at least 20 us */
+ if (ior(db, DM9000_NCR) & 1)
+ dev_err(db->dev, "dm9000 did not respond to first reset\n");
+
+ iow(db, DM9000_NCR, 0);
+ iow(db, DM9000_NCR, 0x03);
+ udelay(100);
+ if (ior(db, DM9000_NCR) & 1)
+ dev_err(db->dev, "dm9000 did not respond to second reset\n");
+}
+
/* routines for sending block to chip */
static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
@@ -744,15 +753,20 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
static void dm9000_show_carrier(board_info_t *db,
unsigned carrier, unsigned nsr)
{
+ int lpa;
struct net_device *ndev = db->ndev;
+ struct mii_if_info *mii = &db->mii;
unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
- if (carrier)
- dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n",
+ if (carrier) {
+ lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
+ dev_info(db->dev,
+ "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n",
ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
- (ncr & NCR_FDX) ? "full" : "half");
- else
+ (ncr & NCR_FDX) ? "full" : "half", lpa);
+ } else {
dev_info(db->dev, "%s: link down\n", ndev->name);
+ }
}
static void
@@ -890,9 +904,15 @@ dm9000_init_dm9000(struct net_device *dev)
(dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
+ iow(db, DM9000_GPR, 0);
- dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
- dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */
+ /* If we are dealing with DM9000B, some extra steps are required: a
+ * manual phy reset, and setting init params.
+ */
+ if (db->type == TYPE_DM9000B) {
+ dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
+ dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM);
+ }
ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
@@ -1603,7 +1623,7 @@ dm9000_probe(struct platform_device *pdev)
if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
mac_src = "platform data";
- memcpy(ndev->dev_addr, pdata->dev_addr, 6);
+ memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN);
}
if (!is_valid_ether_addr(ndev->dev_addr)) {
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index eaab73cf27ca..38148b0e3a95 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -2110,7 +2110,6 @@ static void de_remove_one(struct pci_dev *pdev)
iounmap(de->regs);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 263b92c00cbf..c05b66dfcc30 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -2328,7 +2328,7 @@ static void de4x5_pci_remove(struct pci_dev *pdev)
pci_disable_device (pdev);
}
-static struct pci_device_id de4x5_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(de4x5_pci_tbl) = {
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 83139307861c..5ad9e3e3c0b8 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -523,7 +523,6 @@ err_out_res:
err_out_disable:
pci_disable_device(pdev);
err_out_free:
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
return err;
@@ -548,8 +547,6 @@ static void dmfe_remove_one(struct pci_dev *pdev)
db->buf_pool_ptr, db->buf_pool_dma_ptr);
pci_release_regions(pdev);
free_netdev(dev); /* free board information */
-
- pci_set_drvdata(pdev, NULL);
}
DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 4e8cfa2ac803..add05f14b38b 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1939,7 +1939,6 @@ static void tulip_remove_one(struct pci_dev *pdev)
pci_iounmap(pdev, tp->base_addr);
free_netdev (dev);
pci_release_regions (pdev);
- pci_set_drvdata (pdev, NULL);
/* pci_power_off (pdev, -1); */
}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 93845afe1cea..a5397b130724 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -429,7 +429,6 @@ err_out_release:
err_out_disable:
pci_disable_device(pdev);
err_out_free:
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
return err;
@@ -450,7 +449,6 @@ static void uli526x_remove_one(struct pci_dev *pdev)
db->buf_pool_ptr, db->buf_pool_dma_ptr);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index c7b04ecf5b49..62fe512bb216 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -468,7 +468,6 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_out_cleardev:
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
@@ -1542,8 +1541,6 @@ static void w840_remove1(struct pci_dev *pdev)
pci_iounmap(pdev, np->base_addr);
free_netdev(dev);
}
-
- pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 9b84cb04fe5f..ab7ebac6fbea 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -289,7 +289,6 @@ out:
err_unmap:
pci_iounmap(pdev, private->ioaddr);
reg_fail:
- pci_set_drvdata(pdev, NULL);
dma_free_coherent(d, 8192, private->tx_buffer, private->tx_dma_handle);
tx_buf_fail:
dma_free_coherent(d, 8192, private->rx_buffer, private->rx_dma_handle);
@@ -317,7 +316,6 @@ static void xircom_remove(struct pci_dev *pdev)
unregister_netdev(dev);
pci_iounmap(pdev, card->ioaddr);
- pci_set_drvdata(pdev, NULL);
dma_free_coherent(d, 8192, card->tx_buffer, card->tx_dma_handle);
dma_free_coherent(d, 8192, card->rx_buffer, card->rx_dma_handle);
free_netdev(dev);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index afa8e3af2c4d..4fb756d219f7 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1746,7 +1746,6 @@ rio_remove1 (struct pci_dev *pdev)
pci_release_regions (pdev);
pci_disable_device (pdev);
}
- pci_set_drvdata (pdev, NULL);
}
static struct pci_driver rio_driver = {
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index bf3bf6f22c99..113cd799a131 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -703,7 +703,6 @@ err_out_unmap_tx:
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
np->tx_ring, np->tx_ring_dma);
err_out_cleardev:
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ioaddr);
err_out_res:
pci_release_regions(pdev);
@@ -1941,7 +1940,6 @@ static void sundance_remove1(struct pci_dev *pdev)
pci_iounmap(pdev, np->base);
pci_release_regions(pdev);
free_netdev(dev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index db020230bd0b..f4825db5d179 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "4.9.134.0u"
+#define DRV_VER "4.9.224.0u"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -89,7 +89,7 @@ static inline char *nic_name(struct pci_dev *pdev)
#define BE_NUM_VLANS_SUPPORTED 64
#define BE_UMC_NUM_VLANS_SUPPORTED 15
-#define BE_MAX_EQD 96u
+#define BE_MAX_EQD 128u
#define BE_MAX_TX_FRAG_COUNT 30
#define EVNT_Q_LEN 1024
@@ -199,8 +199,37 @@ struct be_eq_obj {
u16 spurious_intr;
struct napi_struct napi;
struct be_adapter *adapter;
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#define BE_EQ_IDLE 0
+#define BE_EQ_NAPI 1 /* napi owns this EQ */
+#define BE_EQ_POLL 2 /* poll owns this EQ */
+#define BE_EQ_LOCKED (BE_EQ_NAPI | BE_EQ_POLL)
+#define BE_EQ_NAPI_YIELD 4 /* napi yielded this EQ */
+#define BE_EQ_POLL_YIELD 8 /* poll yielded this EQ */
+#define BE_EQ_YIELD (BE_EQ_NAPI_YIELD | BE_EQ_POLL_YIELD)
+#define BE_EQ_USER_PEND (BE_EQ_POLL | BE_EQ_POLL_YIELD)
+ unsigned int state;
+ spinlock_t lock; /* lock to serialize napi and busy-poll */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
} ____cacheline_aligned_in_smp;
+struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
+ bool enable;
+ u32 min_eqd; /* in usecs */
+ u32 max_eqd; /* in usecs */
+ u32 prev_eqd; /* in usecs */
+ u32 et_eqd; /* configured val when aic is off */
+ ulong jiffies;
+ u64 rx_pkts_prev; /* Used to calculate RX pps */
+ u64 tx_reqs_prev; /* Used to calculate TX pps */
+};
+
+enum {
+ NAPI_POLLING,
+ BUSY_POLLING
+};
+
struct be_mcc_obj {
struct be_queue_info q;
struct be_queue_info cq;
@@ -215,6 +244,7 @@ struct be_tx_stats {
u64 tx_compl;
ulong tx_jiffies;
u32 tx_stops;
+ u32 tx_drv_drops; /* pkts dropped by driver */
struct u64_stats_sync sync;
struct u64_stats_sync sync_compl;
};
@@ -239,15 +269,12 @@ struct be_rx_page_info {
struct be_rx_stats {
u64 rx_bytes;
u64 rx_pkts;
- u64 rx_pkts_prev;
- ulong rx_jiffies;
u32 rx_drops_no_skbs; /* skb allocation errors */
u32 rx_drops_no_frags; /* HW has no fetched frags */
u32 rx_post_fail; /* page post alloc failures */
u32 rx_compl;
u32 rx_mcast_pkts;
u32 rx_compl_err; /* completions with err set */
- u32 rx_pps; /* pkts per second */
struct u64_stats_sync sync;
};
@@ -316,6 +343,11 @@ struct be_drv_stats {
u32 rx_input_fifo_overflow_drop;
u32 pmem_fifo_overflow_drop;
u32 jabber_events;
+ u32 rx_roce_bytes_lsd;
+ u32 rx_roce_bytes_msd;
+ u32 rx_roce_frames;
+ u32 roce_drops_payload_len;
+ u32 roce_drops_crc;
};
struct be_vf_cfg {
@@ -405,6 +437,7 @@ struct be_adapter {
u32 big_page_size; /* Compounded page size shared by rx wrbs */
struct be_drv_stats drv_stats;
+ struct be_aic_obj aic_obj[MAX_EVT_QS];
u16 vlans_added;
u8 vlan_tag[VLAN_N_VID];
u8 vlan_prio_bmap; /* Available Priority BitMap */
@@ -437,7 +470,6 @@ struct be_adapter {
u32 rx_fc; /* Rx flow control */
u32 tx_fc; /* Tx flow control */
bool stats_cmd_sent;
- u32 if_type;
struct {
u32 size;
u32 total_size;
@@ -472,8 +504,8 @@ struct be_adapter {
#define be_physfn(adapter) (!adapter->virtfn)
#define sriov_enabled(adapter) (adapter->num_vfs > 0)
-#define sriov_want(adapter) (be_max_vfs(adapter) && num_vfs && \
- be_physfn(adapter))
+#define sriov_want(adapter) (be_physfn(adapter) && \
+ (num_vfs || pci_num_vf(adapter->pdev)))
#define for_all_vfs(adapter, vf_cfg, i) \
for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
i++, vf_cfg++)
@@ -546,6 +578,10 @@ extern const struct ethtool_ops be_ethtool_ops;
for (i = 0, eqo = &adapter->eq_obj[i]; i < adapter->num_evt_qs; \
i++, eqo++)
+#define for_all_rx_queues_on_eq(adapter, eqo, rxo, i) \
+ for (i = eqo->idx, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;\
+ i += adapter->num_evt_qs, rxo += adapter->num_evt_qs)
+
#define is_mcc_eqo(eqo) (eqo->idx == 0)
#define mcc_eqo(adapter) (&adapter->eq_obj[0])
@@ -696,27 +732,137 @@ static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
}
-extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
- u16 num_popped);
-extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
-extern void be_parse_stats(struct be_adapter *adapter);
-extern int be_load_fw(struct be_adapter *adapter, u8 *func);
-extern bool be_is_wol_supported(struct be_adapter *adapter);
-extern bool be_pause_supported(struct be_adapter *adapter);
-extern u32 be_get_fw_log_level(struct be_adapter *adapter);
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline bool be_lock_napi(struct be_eq_obj *eqo)
+{
+ bool status = true;
+
+ spin_lock(&eqo->lock); /* BH is already disabled */
+ if (eqo->state & BE_EQ_LOCKED) {
+ WARN_ON(eqo->state & BE_EQ_NAPI);
+ eqo->state |= BE_EQ_NAPI_YIELD;
+ status = false;
+ } else {
+ eqo->state = BE_EQ_NAPI;
+ }
+ spin_unlock(&eqo->lock);
+ return status;
+}
+
+static inline void be_unlock_napi(struct be_eq_obj *eqo)
+{
+ spin_lock(&eqo->lock); /* BH is already disabled */
+
+ WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
+ eqo->state = BE_EQ_IDLE;
+
+ spin_unlock(&eqo->lock);
+}
+
+static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
+{
+ bool status = true;
+
+ spin_lock_bh(&eqo->lock);
+ if (eqo->state & BE_EQ_LOCKED) {
+ eqo->state |= BE_EQ_POLL_YIELD;
+ status = false;
+ } else {
+ eqo->state |= BE_EQ_POLL;
+ }
+ spin_unlock_bh(&eqo->lock);
+ return status;
+}
+
+static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
+{
+ spin_lock_bh(&eqo->lock);
+
+ WARN_ON(eqo->state & (BE_EQ_NAPI));
+ eqo->state = BE_EQ_IDLE;
+
+ spin_unlock_bh(&eqo->lock);
+}
+
+static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
+{
+ spin_lock_init(&eqo->lock);
+ eqo->state = BE_EQ_IDLE;
+}
+
+static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
+{
+ local_bh_disable();
+
+ /* It's enough to just acquire napi lock on the eqo to stop
+ * be_busy_poll() from processing any queueus.
+ */
+ while (!be_lock_napi(eqo))
+ mdelay(1);
+
+ local_bh_enable();
+}
+
+#else /* CONFIG_NET_RX_BUSY_POLL */
+
+static inline bool be_lock_napi(struct be_eq_obj *eqo)
+{
+ return true;
+}
+
+static inline void be_unlock_napi(struct be_eq_obj *eqo)
+{
+}
+
+static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
+{
+ return false;
+}
+
+static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
+{
+}
+
+static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
+{
+}
+
+static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
+{
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
+ u16 num_popped);
+void be_link_status_update(struct be_adapter *adapter, u8 link_status);
+void be_parse_stats(struct be_adapter *adapter);
+int be_load_fw(struct be_adapter *adapter, u8 *func);
+bool be_is_wol_supported(struct be_adapter *adapter);
+bool be_pause_supported(struct be_adapter *adapter);
+u32 be_get_fw_log_level(struct be_adapter *adapter);
+
+static inline int fw_major_num(const char *fw_ver)
+{
+ int fw_major = 0;
+
+ sscanf(fw_ver, "%d.", &fw_major);
+
+ return fw_major;
+}
+
int be_update_queues(struct be_adapter *adapter);
int be_poll(struct napi_struct *napi, int budget);
/*
* internal function to initialize-cleanup roce device.
*/
-extern void be_roce_dev_add(struct be_adapter *);
-extern void be_roce_dev_remove(struct be_adapter *);
+void be_roce_dev_add(struct be_adapter *);
+void be_roce_dev_remove(struct be_adapter *);
/*
* internal function to open-close roce device during ifup-ifdown.
*/
-extern void be_roce_dev_open(struct be_adapter *);
-extern void be_roce_dev_close(struct be_adapter *);
+void be_roce_dev_open(struct be_adapter *);
+void be_roce_dev_close(struct be_adapter *);
#endif /* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index bd0e0c0bbcd8..7fb0edfe3d24 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -522,7 +522,7 @@ static u16 be_POST_stage_get(struct be_adapter *adapter)
return sem & POST_STAGE_MASK;
}
-int lancer_wait_ready(struct be_adapter *adapter)
+static int lancer_wait_ready(struct be_adapter *adapter)
{
#define SLIPORT_READY_TIMEOUT 30
u32 sliport_status;
@@ -1198,7 +1198,6 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
if (lancer_chip(adapter)) {
req->hdr.version = 1;
- req->if_id = cpu_to_le16(adapter->if_handle);
} else if (BEx_chip(adapter)) {
if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
req->hdr.version = 2;
@@ -1206,6 +1205,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
req->hdr.version = 2;
}
+ if (req->hdr.version > 0)
+ req->if_id = cpu_to_le16(adapter->if_handle);
req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
req->ulp_num = BE_ULP1_NUM;
req->type = BE_ETH_TX_RING_TYPE_STANDARD;
@@ -1435,8 +1436,12 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
/* version 1 of the cmd is not supported only by BE2 */
- if (!BE2_chip(adapter))
+ if (BE2_chip(adapter))
+ hdr->version = 0;
+ if (BE3_chip(adapter) || lancer_chip(adapter))
hdr->version = 1;
+ else
+ hdr->version = 2;
be_mcc_notify(adapter);
adapter->stats_cmd_sent = true;
@@ -1718,11 +1723,12 @@ err:
/* set the EQ delay interval of an EQ to specified value
* Uses async mcc
*/
-int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
+int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
+ int num)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_modify_eq_delay *req;
- int status = 0;
+ int status = 0, i;
spin_lock_bh(&adapter->mcc_lock);
@@ -1736,13 +1742,15 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
- req->num_eq = cpu_to_le32(1);
- req->delay[0].eq_id = cpu_to_le32(eq_id);
- req->delay[0].phase = 0;
- req->delay[0].delay_multiplier = cpu_to_le32(eqd);
+ req->num_eq = cpu_to_le32(num);
+ for (i = 0; i < num; i++) {
+ req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
+ req->set_eqd[i].phase = 0;
+ req->set_eqd[i].delay_multiplier =
+ cpu_to_le32(set_eqd[i].delay_multiplier);
+ }
be_mcc_notify(adapter);
-
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -3519,7 +3527,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
struct be_cmd_enable_disable_vf *req;
int status;
- if (!lancer_chip(adapter))
+ if (BEx_chip(adapter))
return 0;
spin_lock_bh(&adapter->mcc_lock);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 108ca8abf0af..edf3e8a0ff83 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1057,14 +1057,16 @@ struct be_cmd_resp_get_flow_control {
} __packed;
/******************** Modify EQ Delay *******************/
+struct be_set_eqd {
+ u32 eq_id;
+ u32 phase;
+ u32 delay_multiplier;
+};
+
struct be_cmd_req_modify_eq_delay {
struct be_cmd_req_hdr hdr;
u32 num_eq;
- struct {
- u32 eq_id;
- u32 phase;
- u32 delay_multiplier;
- } delay[8];
+ struct be_set_eqd set_eqd[MAX_EVT_QS];
} __packed;
struct be_cmd_resp_modify_eq_delay {
@@ -1660,6 +1662,67 @@ struct be_erx_stats_v1 {
u32 rsvd[4];
};
+struct be_port_rxf_stats_v2 {
+ u32 rsvd0[10];
+ u32 roce_bytes_received_lsd;
+ u32 roce_bytes_received_msd;
+ u32 rsvd1[5];
+ u32 roce_frames_received;
+ u32 rx_crc_errors;
+ u32 rx_alignment_symbol_errors;
+ u32 rx_pause_frames;
+ u32 rx_priority_pause_frames;
+ u32 rx_control_frames;
+ u32 rx_in_range_errors;
+ u32 rx_out_range_errors;
+ u32 rx_frame_too_long;
+ u32 rx_address_filtered;
+ u32 rx_dropped_too_small;
+ u32 rx_dropped_too_short;
+ u32 rx_dropped_header_too_small;
+ u32 rx_dropped_tcp_length;
+ u32 rx_dropped_runt;
+ u32 rsvd2[10];
+ u32 rx_ip_checksum_errs;
+ u32 rx_tcp_checksum_errs;
+ u32 rx_udp_checksum_errs;
+ u32 rsvd3[7];
+ u32 rx_switched_unicast_packets;
+ u32 rx_switched_multicast_packets;
+ u32 rx_switched_broadcast_packets;
+ u32 rsvd4[3];
+ u32 tx_pauseframes;
+ u32 tx_priority_pauseframes;
+ u32 tx_controlframes;
+ u32 rsvd5[10];
+ u32 rxpp_fifo_overflow_drop;
+ u32 rx_input_fifo_overflow_drop;
+ u32 pmem_fifo_overflow_drop;
+ u32 jabber_events;
+ u32 rsvd6[3];
+ u32 rx_drops_payload_size;
+ u32 rx_drops_clipped_header;
+ u32 rx_drops_crc;
+ u32 roce_drops_payload_len;
+ u32 roce_drops_crc;
+ u32 rsvd7[19];
+};
+
+struct be_rxf_stats_v2 {
+ struct be_port_rxf_stats_v2 port[4];
+ u32 rsvd0[2];
+ u32 rx_drops_no_pbuf;
+ u32 rx_drops_no_txpb;
+ u32 rx_drops_no_erx_descr;
+ u32 rx_drops_no_tpre_descr;
+ u32 rsvd1[6];
+ u32 rx_drops_too_many_frags;
+ u32 rx_drops_invalid_ring;
+ u32 forwarded_packets;
+ u32 rx_drops_mtu;
+ u32 rsvd2[35];
+};
+
struct be_hw_stats_v1 {
struct be_rxf_stats_v1 rxf;
u32 rsvd0[BE_TXP_SW_SZ];
@@ -1678,6 +1741,29 @@ struct be_cmd_resp_get_stats_v1 {
struct be_hw_stats_v1 hw_stats;
};
+struct be_erx_stats_v2 {
+ u32 rx_drops_no_fragments[136]; /* dwordS 0 to 135*/
+ u32 rsvd[3];
+};
+
+struct be_hw_stats_v2 {
+ struct be_rxf_stats_v2 rxf;
+ u32 rsvd0[BE_TXP_SW_SZ];
+ struct be_erx_stats_v2 erx;
+ struct be_pmem_stats pmem;
+ u32 rsvd1[18];
+};
+
+struct be_cmd_req_get_stats_v2 {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd[sizeof(struct be_hw_stats_v2)];
+};
+
+struct be_cmd_resp_get_stats_v2 {
+ struct be_cmd_resp_hdr hdr;
+ struct be_hw_stats_v2 hw_stats;
+};
+
/************** get fat capabilites *******************/
#define MAX_MODULES 27
#define MAX_MODES 4
@@ -1865,137 +1951,119 @@ struct be_cmd_resp_get_iface_list {
struct be_if_desc if_desc;
};
-extern int be_pci_fnum_get(struct be_adapter *adapter);
-extern int be_fw_wait_ready(struct be_adapter *adapter);
-extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
- bool permanent, u32 if_handle, u32 pmac_id);
-extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
- u32 if_id, u32 *pmac_id, u32 domain);
-extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
- int pmac_id, u32 domain);
-extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
- u32 en_flags, u32 *if_handle, u32 domain);
-extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
- u32 domain);
-extern int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo);
-extern int be_cmd_cq_create(struct be_adapter *adapter,
- struct be_queue_info *cq, struct be_queue_info *eq,
- bool no_delay, int num_cqe_dma_coalesce);
-extern int be_cmd_mccq_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq);
-extern int be_cmd_txq_create(struct be_adapter *adapter,
- struct be_tx_obj *txo);
-extern int be_cmd_rxq_create(struct be_adapter *adapter,
- struct be_queue_info *rxq, u16 cq_id,
- u16 frag_size, u32 if_id, u32 rss, u8 *rss_id);
-extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
- int type);
-extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
- struct be_queue_info *q);
-extern int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
- u8 *link_status, u32 dom);
-extern int be_cmd_reset(struct be_adapter *adapter);
-extern int be_cmd_get_stats(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd);
-extern int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
- char *fw_on_flash);
-
-extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
-extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
- u16 *vtag_array, u32 num, bool untagged,
- bool promiscuous);
-extern int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
-extern int be_cmd_set_flow_control(struct be_adapter *adapter,
- u32 tx_fc, u32 rx_fc);
-extern int be_cmd_get_flow_control(struct be_adapter *adapter,
- u32 *tx_fc, u32 *rx_fc);
-extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
+int be_pci_fnum_get(struct be_adapter *adapter);
+int be_fw_wait_ready(struct be_adapter *adapter);
+int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
+ bool permanent, u32 if_handle, u32 pmac_id);
+int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id,
+ u32 *pmac_id, u32 domain);
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id,
+ u32 domain);
+int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
+ u32 *if_handle, u32 domain);
+int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, u32 domain);
+int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo);
+int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
+ struct be_queue_info *eq, bool no_delay,
+ int num_cqe_dma_coalesce);
+int be_cmd_mccq_create(struct be_adapter *adapter, struct be_queue_info *mccq,
+ struct be_queue_info *cq);
+int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo);
+int be_cmd_rxq_create(struct be_adapter *adapter, struct be_queue_info *rxq,
+ u16 cq_id, u16 frag_size, u32 if_id, u32 rss, u8 *rss_id);
+int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
+ int type);
+int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q);
+int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
+ u8 *link_status, u32 dom);
+int be_cmd_reset(struct be_adapter *adapter);
+int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd);
+int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd);
+int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
+ char *fw_on_flash);
+int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
+int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
+ u32 num, bool untagged, bool promiscuous);
+int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
+int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
+int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
+int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
u32 *function_mode, u32 *function_caps, u16 *asic_rev);
-extern int be_cmd_reset_function(struct be_adapter *adapter);
-extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
- u32 rss_hash_opts, u16 table_size);
-extern int be_process_mcc(struct be_adapter *adapter);
-extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
- u8 port_num, u8 beacon, u8 status, u8 state);
-extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
- u8 port_num, u32 *state);
-extern int be_cmd_write_flashrom(struct be_adapter *adapter,
- struct be_dma_mem *cmd, u32 flash_oper,
- u32 flash_opcode, u32 buf_size);
-extern int lancer_cmd_write_object(struct be_adapter *adapter,
- struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset,
- const char *obj_name,
- u32 *data_written, u8 *change_status,
- u8 *addn_status);
+int be_cmd_reset_function(struct be_adapter *adapter);
+int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
+ u32 rss_hash_opts, u16 table_size);
+int be_process_mcc(struct be_adapter *adapter);
+int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
+ u8 status, u8 state);
+int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
+ u32 *state);
+int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ u32 flash_oper, u32 flash_opcode, u32 buf_size);
+int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ u32 data_size, u32 data_offset,
+ const char *obj_name, u32 *data_written,
+ u8 *change_status, u8 *addn_status);
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset, const char *obj_name,
- u32 *data_read, u32 *eof, u8 *addn_status);
+ u32 data_size, u32 data_offset, const char *obj_name,
+ u32 *data_read, u32 *eof, u8 *addn_status);
int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
- int offset);
-extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
- struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_fw_init(struct be_adapter *adapter);
-extern int be_cmd_fw_clean(struct be_adapter *adapter);
-extern void be_async_mcc_enable(struct be_adapter *adapter);
-extern void be_async_mcc_disable(struct be_adapter *adapter);
-extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
- u32 loopback_type, u32 pkt_size,
- u32 num_pkts, u64 pattern);
-extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
- u32 byte_cnt, struct be_dma_mem *cmd);
-extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
- u8 loopback_type, u8 enable);
-extern int be_cmd_get_phy_info(struct be_adapter *adapter);
-extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
-extern void be_detect_error(struct be_adapter *adapter);
-extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
-extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
-extern int be_cmd_req_native_mode(struct be_adapter *adapter);
-extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
-extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
-extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
- u32 *privilege, u32 domain);
-extern int be_cmd_set_fn_privileges(struct be_adapter *adapter,
- u32 privileges, u32 vf_num);
-extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
- bool *pmac_id_active, u32 *pmac_id,
- u8 domain);
-extern int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id,
- u8 *mac);
-extern int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
-extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
- u8 mac_count, u32 domain);
-extern int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id,
- u32 dom);
-extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
- u32 domain, u16 intf_id, u16 hsw_mode);
-extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
- u32 domain, u16 intf_id, u8 *mode);
-extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
-extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
- struct be_dma_mem *cmd);
-extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
- struct be_dma_mem *cmd,
- struct be_fat_conf_params *cfgs);
-extern int lancer_wait_ready(struct be_adapter *adapter);
-extern int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask);
-extern int lancer_initiate_dump(struct be_adapter *adapter);
-extern bool dump_present(struct be_adapter *adapter);
-extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
-extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
+ int offset);
+int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
+ struct be_dma_mem *nonemb_cmd);
+int be_cmd_fw_init(struct be_adapter *adapter);
+int be_cmd_fw_clean(struct be_adapter *adapter);
+void be_async_mcc_enable(struct be_adapter *adapter);
+void be_async_mcc_disable(struct be_adapter *adapter);
+int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
+ u32 loopback_type, u32 pkt_size, u32 num_pkts,
+ u64 pattern);
+int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, u32 byte_cnt,
+ struct be_dma_mem *cmd);
+int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd);
+int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+ u8 loopback_type, u8 enable);
+int be_cmd_get_phy_info(struct be_adapter *adapter);
+int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
+void be_detect_error(struct be_adapter *adapter);
+int be_cmd_get_die_temperature(struct be_adapter *adapter);
+int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
+int be_cmd_req_native_mode(struct be_adapter *adapter);
+int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
+void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
+int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
+ u32 domain);
+int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
+ u32 vf_num);
+int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
+ bool *pmac_id_active, u32 *pmac_id, u8 domain);
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac);
+int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
+int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count,
+ u32 domain);
+int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom);
+int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain,
+ u16 intf_id, u16 hsw_mode);
+int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain,
+ u16 intf_id, u8 *mode);
+int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
+int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
+ struct be_dma_mem *cmd);
+int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
+ struct be_dma_mem *cmd,
+ struct be_fat_conf_params *cfgs);
+int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask);
+int lancer_initiate_dump(struct be_adapter *adapter);
+bool dump_present(struct be_adapter *adapter);
+int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
+int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
int be_cmd_get_func_config(struct be_adapter *adapter,
struct be_resources *res);
int be_cmd_get_profile_config(struct be_adapter *adapter,
struct be_resources *res, u8 domain);
-extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
- u8 domain);
-extern int be_cmd_get_if_id(struct be_adapter *adapter,
- struct be_vf_cfg *vf_cfg, int vf_num);
-extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
-extern int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
+int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain);
+int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
+ int vf_num);
+int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
+int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index b440a1fac77b..08330034d9ef 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -116,7 +116,12 @@ static const struct be_ethtool_stat et_stats[] = {
{DRVSTAT_INFO(rx_drops_mtu)},
/* Number of packets dropped due to random early drop function */
{DRVSTAT_INFO(eth_red_drops)},
- {DRVSTAT_INFO(be_on_die_temperature)}
+ {DRVSTAT_INFO(be_on_die_temperature)},
+ {DRVSTAT_INFO(rx_roce_bytes_lsd)},
+ {DRVSTAT_INFO(rx_roce_bytes_msd)},
+ {DRVSTAT_INFO(rx_roce_frames)},
+ {DRVSTAT_INFO(roce_drops_payload_len)},
+ {DRVSTAT_INFO(roce_drops_crc)}
};
#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
@@ -155,7 +160,9 @@ static const struct be_ethtool_stat et_tx_stats[] = {
/* Number of times the TX queue was stopped due to lack
* of spaces in the TXQ.
*/
- {DRVSTAT_TX_INFO(tx_stops)}
+ {DRVSTAT_TX_INFO(tx_stops)},
+ /* Pkts dropped in the driver's transmit path */
+ {DRVSTAT_TX_INFO(tx_drv_drops)}
};
#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
@@ -290,19 +297,19 @@ static int be_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *et)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_eq_obj *eqo = &adapter->eq_obj[0];
+ struct be_aic_obj *aic = &adapter->aic_obj[0];
- et->rx_coalesce_usecs = eqo->cur_eqd;
- et->rx_coalesce_usecs_high = eqo->max_eqd;
- et->rx_coalesce_usecs_low = eqo->min_eqd;
+ et->rx_coalesce_usecs = aic->prev_eqd;
+ et->rx_coalesce_usecs_high = aic->max_eqd;
+ et->rx_coalesce_usecs_low = aic->min_eqd;
- et->tx_coalesce_usecs = eqo->cur_eqd;
- et->tx_coalesce_usecs_high = eqo->max_eqd;
- et->tx_coalesce_usecs_low = eqo->min_eqd;
+ et->tx_coalesce_usecs = aic->prev_eqd;
+ et->tx_coalesce_usecs_high = aic->max_eqd;
+ et->tx_coalesce_usecs_low = aic->min_eqd;
- et->use_adaptive_rx_coalesce = eqo->enable_aic;
- et->use_adaptive_tx_coalesce = eqo->enable_aic;
+ et->use_adaptive_rx_coalesce = aic->enable;
+ et->use_adaptive_tx_coalesce = aic->enable;
return 0;
}
@@ -314,14 +321,17 @@ static int be_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *et)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_aic_obj *aic = &adapter->aic_obj[0];
struct be_eq_obj *eqo;
int i;
for_all_evt_queues(adapter, eqo, i) {
- eqo->enable_aic = et->use_adaptive_rx_coalesce;
- eqo->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
- eqo->min_eqd = min(et->rx_coalesce_usecs_low, eqo->max_eqd);
- eqo->eqd = et->rx_coalesce_usecs;
+ aic->enable = et->use_adaptive_rx_coalesce;
+ aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
+ aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd);
+ aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd);
+ aic->et_eqd = max(aic->et_eqd, aic->min_eqd);
+ aic++;
}
return 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 2c38cc402119..cb2bb6fccbc8 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -22,6 +22,7 @@
#include <asm/div64.h>
#include <linux/aer.h>
#include <linux/if_bridge.h>
+#include <net/busy_poll.h>
MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -306,10 +307,14 @@ static void *hw_stats_from_cmd(struct be_adapter *adapter)
struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
return &cmd->hw_stats;
- } else {
+ } else if (BE3_chip(adapter)) {
struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
return &cmd->hw_stats;
+ } else {
+ struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
+
+ return &cmd->hw_stats;
}
}
@@ -320,10 +325,14 @@ static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
return &hw_stats->erx;
- } else {
+ } else if (BE3_chip(adapter)) {
struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
return &hw_stats->erx;
+ } else {
+ struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
+
+ return &hw_stats->erx;
}
}
@@ -422,6 +431,60 @@ static void populate_be_v1_stats(struct be_adapter *adapter)
adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
}
+static void populate_be_v2_stats(struct be_adapter *adapter)
+{
+ struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
+ struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
+ struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
+ struct be_port_rxf_stats_v2 *port_stats =
+ &rxf_stats->port[adapter->port_num];
+ struct be_drv_stats *drvs = &adapter->drv_stats;
+
+ be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
+ drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
+ drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
+ drvs->rx_pause_frames = port_stats->rx_pause_frames;
+ drvs->rx_crc_errors = port_stats->rx_crc_errors;
+ drvs->rx_control_frames = port_stats->rx_control_frames;
+ drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
+ drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
+ drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
+ drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
+ drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
+ drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
+ drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
+ drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
+ drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
+ drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
+ drvs->rx_dropped_header_too_small =
+ port_stats->rx_dropped_header_too_small;
+ drvs->rx_input_fifo_overflow_drop =
+ port_stats->rx_input_fifo_overflow_drop;
+ drvs->rx_address_filtered = port_stats->rx_address_filtered;
+ drvs->rx_alignment_symbol_errors =
+ port_stats->rx_alignment_symbol_errors;
+ drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
+ drvs->tx_pauseframes = port_stats->tx_pauseframes;
+ drvs->tx_controlframes = port_stats->tx_controlframes;
+ drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
+ drvs->jabber_events = port_stats->jabber_events;
+ drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
+ drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
+ drvs->forwarded_packets = rxf_stats->forwarded_packets;
+ drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
+ drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
+ drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
+ adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
+ if (be_roce_supported(adapter)) {
+ drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
+ drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
+ drvs->rx_roce_frames = port_stats->roce_frames_received;
+ drvs->roce_drops_crc = port_stats->roce_drops_crc;
+ drvs->roce_drops_payload_len =
+ port_stats->roce_drops_payload_len;
+ }
+}
+
static void populate_lancer_stats(struct be_adapter *adapter)
{
@@ -489,7 +552,7 @@ static void populate_erx_stats(struct be_adapter *adapter,
void be_parse_stats(struct be_adapter *adapter)
{
- struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
+ struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
struct be_rx_obj *rxo;
int i;
u32 erx_stat;
@@ -499,11 +562,13 @@ void be_parse_stats(struct be_adapter *adapter)
} else {
if (BE2_chip(adapter))
populate_be_v0_stats(adapter);
- else
- /* for BE3 and Skyhawk */
+ else if (BE3_chip(adapter))
+ /* for BE3 */
populate_be_v1_stats(adapter);
+ else
+ populate_be_v2_stats(adapter);
- /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
+ /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
for_all_rx_queues(adapter, rxo, i) {
erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
populate_erx_stats(adapter, rxo, erx_stat);
@@ -935,8 +1000,10 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
u32 start = txq->head;
skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
- if (!skb)
+ if (!skb) {
+ tx_stats(txo)->tx_drv_drops++;
return NETDEV_TX_OK;
+ }
wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
@@ -965,6 +1032,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
} else {
txq->head = start;
+ tx_stats(txo)->tx_drv_drops++;
dev_kfree_skb_any(skb);
}
return NETDEV_TX_OK;
@@ -1275,53 +1343,79 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
return status;
}
-static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
+static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
+ ulong now)
{
- struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
- ulong now = jiffies;
- ulong delta = now - stats->rx_jiffies;
- u64 pkts;
- unsigned int start, eqd;
+ aic->rx_pkts_prev = rx_pkts;
+ aic->tx_reqs_prev = tx_pkts;
+ aic->jiffies = now;
+}
- if (!eqo->enable_aic) {
- eqd = eqo->eqd;
- goto modify_eqd;
- }
+static void be_eqd_update(struct be_adapter *adapter)
+{
+ struct be_set_eqd set_eqd[MAX_EVT_QS];
+ int eqd, i, num = 0, start;
+ struct be_aic_obj *aic;
+ struct be_eq_obj *eqo;
+ struct be_rx_obj *rxo;
+ struct be_tx_obj *txo;
+ u64 rx_pkts, tx_pkts;
+ ulong now;
+ u32 pps, delta;
- if (eqo->idx >= adapter->num_rx_qs)
- return;
+ for_all_evt_queues(adapter, eqo, i) {
+ aic = &adapter->aic_obj[eqo->idx];
+ if (!aic->enable) {
+ if (aic->jiffies)
+ aic->jiffies = 0;
+ eqd = aic->et_eqd;
+ goto modify_eqd;
+ }
- stats = rx_stats(&adapter->rx_obj[eqo->idx]);
+ rxo = &adapter->rx_obj[eqo->idx];
+ do {
+ start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
+ rx_pkts = rxo->stats.rx_pkts;
+ } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
- /* Wrapped around */
- if (time_before(now, stats->rx_jiffies)) {
- stats->rx_jiffies = now;
- return;
- }
+ txo = &adapter->tx_obj[eqo->idx];
+ do {
+ start = u64_stats_fetch_begin_bh(&txo->stats.sync);
+ tx_pkts = txo->stats.tx_reqs;
+ } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
- /* Update once a second */
- if (delta < HZ)
- return;
- do {
- start = u64_stats_fetch_begin_bh(&stats->sync);
- pkts = stats->rx_pkts;
- } while (u64_stats_fetch_retry_bh(&stats->sync, start));
-
- stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
- stats->rx_pkts_prev = pkts;
- stats->rx_jiffies = now;
- eqd = (stats->rx_pps / 110000) << 3;
- eqd = min(eqd, eqo->max_eqd);
- eqd = max(eqd, eqo->min_eqd);
- if (eqd < 10)
- eqd = 0;
+ /* Skip, if wrapped around or first calculation */
+ now = jiffies;
+ if (!aic->jiffies || time_before(now, aic->jiffies) ||
+ rx_pkts < aic->rx_pkts_prev ||
+ tx_pkts < aic->tx_reqs_prev) {
+ be_aic_update(aic, rx_pkts, tx_pkts, now);
+ continue;
+ }
+
+ delta = jiffies_to_msecs(now - aic->jiffies);
+ pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
+ (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
+ eqd = (pps / 15000) << 2;
+
+ if (eqd < 8)
+ eqd = 0;
+ eqd = min_t(u32, eqd, aic->max_eqd);
+ eqd = max_t(u32, eqd, aic->min_eqd);
+ be_aic_update(aic, rx_pkts, tx_pkts, now);
modify_eqd:
- if (eqd != eqo->cur_eqd) {
- be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
- eqo->cur_eqd = eqd;
+ if (eqd != aic->prev_eqd) {
+ set_eqd[num].delay_multiplier = (eqd * 65)/100;
+ set_eqd[num].eq_id = eqo->q.id;
+ aic->prev_eqd = eqd;
+ num++;
+ }
}
+
+ if (num)
+ be_cmd_modify_eqd(adapter, set_eqd, num);
}
static void be_rx_stats_update(struct be_rx_obj *rxo,
@@ -1463,7 +1557,7 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
}
/* Process the RX completion indicated by rxcp when GRO is disabled */
-static void be_rx_compl_process(struct be_rx_obj *rxo,
+static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
struct be_rx_compl_info *rxcp)
{
struct be_adapter *adapter = rxo->adapter;
@@ -1488,7 +1582,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo,
skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
if (netdev->features & NETIF_F_RXHASH)
skb->rxhash = rxcp->rss_hash;
-
+ skb_mark_napi_id(skb, napi);
if (rxcp->vlanf)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
@@ -1546,6 +1640,7 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
if (adapter->netdev->features & NETIF_F_RXHASH)
skb->rxhash = rxcp->rss_hash;
+ skb_mark_napi_id(skb, napi);
if (rxcp->vlanf)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
@@ -1726,6 +1821,8 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
if (posted) {
atomic_add(posted, &rxq->used);
+ if (rxo->rx_post_starved)
+ rxo->rx_post_starved = false;
be_rxq_notify(adapter, rxq->id, posted);
} else if (atomic_read(&rxq->used) == 0) {
/* Let be_worker replenish when memory is available */
@@ -1928,6 +2025,7 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
if (eqo->q.created) {
be_eq_clean(eqo);
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
+ napi_hash_del(&eqo->napi);
netif_napi_del(&eqo->napi);
}
be_queue_free(adapter, &eqo->q);
@@ -1938,6 +2036,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
{
struct be_queue_info *eq;
struct be_eq_obj *eqo;
+ struct be_aic_obj *aic;
int i, rc;
adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
@@ -1946,11 +2045,13 @@ static int be_evt_queues_create(struct be_adapter *adapter)
for_all_evt_queues(adapter, eqo, i) {
netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
BE_NAPI_WEIGHT);
+ napi_hash_add(&eqo->napi);
+ aic = &adapter->aic_obj[i];
eqo->adapter = adapter;
eqo->tx_budget = BE_TX_BUDGET;
eqo->idx = i;
- eqo->max_eqd = BE_MAX_EQD;
- eqo->enable_aic = true;
+ aic->max_eqd = BE_MAX_EQD;
+ aic->enable = true;
eq = &eqo->q;
rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
@@ -2167,7 +2268,7 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp)
}
static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
- int budget)
+ int budget, int polling)
{
struct be_adapter *adapter = rxo->adapter;
struct be_queue_info *rx_cq = &rxo->cq;
@@ -2198,10 +2299,12 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
goto loop_continue;
}
- if (do_gro(rxcp))
+ /* Don't do gro when we're busy_polling */
+ if (do_gro(rxcp) && polling != BUSY_POLLING)
be_rx_compl_process_gro(rxo, napi, rxcp);
else
- be_rx_compl_process(rxo, rxcp);
+ be_rx_compl_process(rxo, napi, rxcp);
+
loop_continue:
be_rx_stats_update(rxo, rxcp);
}
@@ -2209,7 +2312,11 @@ loop_continue:
if (work_done) {
be_cq_notify(adapter, rx_cq->id, true, work_done);
- if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
+ /* When an rx-obj gets into post_starved state, just
+ * let be_worker do the posting.
+ */
+ if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
+ !rxo->rx_post_starved)
be_post_rx_frags(rxo, GFP_ATOMIC);
}
@@ -2254,6 +2361,7 @@ int be_poll(struct napi_struct *napi, int budget)
struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
struct be_adapter *adapter = eqo->adapter;
int max_work = 0, work, i, num_evts;
+ struct be_rx_obj *rxo;
bool tx_done;
num_evts = events_get(eqo);
@@ -2266,13 +2374,18 @@ int be_poll(struct napi_struct *napi, int budget)
max_work = budget;
}
- /* This loop will iterate twice for EQ0 in which
- * completions of the last RXQ (default one) are also processed
- * For other EQs the loop iterates only once
- */
- for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
- work = be_process_rx(&adapter->rx_obj[i], napi, budget);
- max_work = max(work, max_work);
+ if (be_lock_napi(eqo)) {
+ /* This loop will iterate twice for EQ0 in which
+ * completions of the last RXQ (default one) are also processed
+ * For other EQs the loop iterates only once
+ */
+ for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
+ work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
+ max_work = max(work, max_work);
+ }
+ be_unlock_napi(eqo);
+ } else {
+ max_work = budget;
}
if (is_mcc_eqo(eqo))
@@ -2288,6 +2401,28 @@ int be_poll(struct napi_struct *napi, int budget)
return max_work;
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static int be_busy_poll(struct napi_struct *napi)
+{
+ struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
+ struct be_adapter *adapter = eqo->adapter;
+ struct be_rx_obj *rxo;
+ int i, work = 0;
+
+ if (!be_lock_busy_poll(eqo))
+ return LL_FLUSH_BUSY;
+
+ for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
+ work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
+ if (work)
+ break;
+ }
+
+ be_unlock_busy_poll(eqo);
+ return work;
+}
+#endif
+
void be_detect_error(struct be_adapter *adapter)
{
u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
@@ -2519,9 +2654,11 @@ static int be_close(struct net_device *netdev)
be_roce_dev_close(adapter);
- if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
- for_all_evt_queues(adapter, eqo, i)
+ for_all_evt_queues(adapter, eqo, i) {
+ if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
napi_disable(&eqo->napi);
+ be_disable_busy_poll(eqo);
+ }
adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
}
@@ -2632,6 +2769,7 @@ static int be_open(struct net_device *netdev)
for_all_evt_queues(adapter, eqo, i) {
napi_enable(&eqo->napi);
+ be_enable_busy_poll(eqo);
be_eq_notify(adapter, eqo->q.id, true, false, 0);
}
adapter->flags |= BE_FLAGS_NAPI_ENABLED;
@@ -2937,7 +3075,8 @@ static int be_vf_setup(struct be_adapter *adapter)
goto err;
vf_cfg->def_vid = def_vlan;
- be_cmd_enable_vf(adapter, vf + 1);
+ if (!old_vfs)
+ be_cmd_enable_vf(adapter, vf + 1);
}
if (!old_vfs) {
@@ -2962,12 +3101,12 @@ static void BEx_get_resources(struct be_adapter *adapter,
struct pci_dev *pdev = adapter->pdev;
bool use_sriov = false;
- if (BE3_chip(adapter) && be_physfn(adapter)) {
+ if (BE3_chip(adapter) && sriov_want(adapter)) {
int max_vfs;
max_vfs = pci_sriov_get_totalvfs(pdev);
res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
- use_sriov = res->max_vfs && num_vfs;
+ use_sriov = res->max_vfs;
}
if (be_physfn(adapter))
@@ -2983,8 +3122,9 @@ static void BEx_get_resources(struct be_adapter *adapter,
res->max_vlans = BE_NUM_VLANS_SUPPORTED;
res->max_mcast_mac = BE_MAX_MC;
+ /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
- !be_physfn(adapter))
+ !be_physfn(adapter) || (adapter->port_num > 1))
res->max_tx_qs = 1;
else
res->max_tx_qs = BE3_MAX_TX_QS;
@@ -3026,14 +3166,6 @@ static int be_get_resources(struct be_adapter *adapter)
adapter->res = res;
}
- /* For BE3 only check if FW suggests a different max-txqs value */
- if (BE3_chip(adapter)) {
- status = be_cmd_get_profile_config(adapter, &res, 0);
- if (!status && res.max_tx_qs)
- adapter->res.max_tx_qs =
- min(adapter->res.max_tx_qs, res.max_tx_qs);
- }
-
/* For Lancer, SH etc read per-function resource limits from FW.
* GET_FUNC_CONFIG returns per function guaranteed limits.
* GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
@@ -3247,6 +3379,12 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
+ if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
+ dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
+ adapter->fw_ver);
+ dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
+ }
+
if (adapter->vlans_added)
be_vid_config(adapter);
@@ -3258,7 +3396,7 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
- if (be_physfn(adapter) && num_vfs) {
+ if (sriov_want(adapter)) {
if (be_max_vfs(adapter))
be_vf_setup(adapter);
else
@@ -3900,6 +4038,9 @@ static const struct net_device_ops be_netdev_ops = {
#endif
.ndo_bridge_setlink = be_ndo_bridge_setlink,
.ndo_bridge_getlink = be_ndo_bridge_getlink,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ .ndo_busy_poll = be_busy_poll
+#endif
};
static void be_netdev_init(struct net_device *netdev)
@@ -3960,11 +4101,6 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter)
static int be_map_pci_bars(struct be_adapter *adapter)
{
u8 __iomem *addr;
- u32 sli_intf;
-
- pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
- adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
- SLI_INTF_IF_TYPE_SHIFT;
if (BEx_chip(adapter) && be_physfn(adapter)) {
adapter->csr = pci_iomap(adapter->pdev, 2, 0);
@@ -4077,9 +4213,11 @@ static int be_stats_init(struct be_adapter *adapter)
cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
else if (BE2_chip(adapter))
cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
- else
- /* BE3 and Skyhawk */
+ else if (BE3_chip(adapter))
cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
+ else
+ /* ALL non-BE ASICs */
+ cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
GFP_KERNEL);
@@ -4113,7 +4251,6 @@ static void be_remove(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
- pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -4262,7 +4399,6 @@ static void be_worker(struct work_struct *work)
struct be_adapter *adapter =
container_of(work, struct be_adapter, work.work);
struct be_rx_obj *rxo;
- struct be_eq_obj *eqo;
int i;
/* when interrupts are not yet enabled, just reap any pending
@@ -4287,14 +4423,14 @@ static void be_worker(struct work_struct *work)
be_cmd_get_die_temperature(adapter);
for_all_rx_queues(adapter, rxo, i) {
- if (rxo->rx_post_starved) {
- rxo->rx_post_starved = false;
+ /* Replenish RX-queues starved due to memory
+ * allocation failures.
+ */
+ if (rxo->rx_post_starved)
be_post_rx_frags(rxo, GFP_KERNEL);
- }
}
- for_all_evt_queues(adapter, eqo, i)
- be_eqd_update(adapter, eqo);
+ be_eqd_update(adapter);
reschedule:
adapter->work_counter++;
@@ -4351,28 +4487,22 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
adapter->netdev = netdev;
SET_NETDEV_DEV(netdev, &pdev->dev);
- status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!status) {
- status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (status < 0) {
- dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
- goto free_netdev;
- }
netdev->features |= NETIF_F_HIGHDMA;
} else {
- status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (!status)
- status = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
+ status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (status) {
dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
goto free_netdev;
}
}
- status = pci_enable_pcie_error_reporting(pdev);
- if (status)
- dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
+ if (be_physfn(adapter)) {
+ status = pci_enable_pcie_error_reporting(pdev);
+ if (!status)
+ dev_info(&pdev->dev, "PCIe error reporting enabled\n");
+ }
status = be_ctrl_init(adapter);
if (status)
@@ -4443,7 +4573,6 @@ ctrl_clean:
be_ctrl_cleanup(adapter);
free_netdev:
free_netdev(netdev);
- pci_set_drvdata(pdev, NULL);
rel_reg:
pci_release_regions(pdev);
disable_dev:
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index c706b7a9397e..4b22a9579f85 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -699,7 +699,6 @@ static void fealnx_remove_one(struct pci_dev *pdev)
pci_iounmap(pdev, np->mem);
free_netdev(dev);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
} else
printk(KERN_ERR "fealnx: remove for unknown device\n");
}
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 6b60582ce8cf..56f2f608a9f4 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1083,7 +1083,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
mac_addr = of_get_mac_address(ofdev->dev.of_node);
if (mac_addr)
- memcpy(ndev->dev_addr, mac_addr, 6);
+ memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
ret = fep->ops->allocate_bd(ndev);
if (ret)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index 7583a9572bcc..f8b92864fc52 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -32,7 +32,9 @@
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/gfp.h>
#include <asm/immap_cpm2.h>
@@ -88,7 +90,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
struct fs_platform_info *fpi = fep->fpi;
int ret = -EINVAL;
- fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
+ fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
if (fep->interrupt == NO_IRQ)
goto out;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 9ae6cdbcac2e..a9a00f39521a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -31,7 +31,9 @@
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/gfp.h>
#include <asm/irq.h>
@@ -98,7 +100,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
{
struct platform_device *ofdev = to_platform_device(fep->dev);
- fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
+ fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
if (fep->interrupt == NO_IRQ)
return -EINVAL;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index 22a02a767069..d37cd4ebac65 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -31,6 +31,8 @@
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/irq.h>
@@ -98,7 +100,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
{
struct platform_device *ofdev = to_platform_device(fep->dev);
- fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
+ fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
if (fep->interrupt == NO_IRQ)
return -EINVAL;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index 844ecfa84d17..67caaacd19ec 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -22,6 +22,7 @@
#include <linux/mii.h>
#include <linux/platform_device.h>
#include <linux/mdio-bitbang.h>
+#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 2f1c46a12f05..ac5d447ff8c4 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -31,6 +31,7 @@
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/platform_device.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/pgtable.h>
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index c4eaadeb572f..b14d7904a075 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -78,6 +78,8 @@
#include <linux/if_vlan.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/ip.h>
@@ -88,6 +90,7 @@
#include <asm/io.h>
#include <asm/reg.h>
+#include <asm/mpc85xx.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <linux/module.h>
@@ -939,9 +942,8 @@ static void gfar_init_filer_table(struct gfar_private *priv)
}
}
-static void gfar_detect_errata(struct gfar_private *priv)
+static void __gfar_detect_errata_83xx(struct gfar_private *priv)
{
- struct device *dev = &priv->ofdev->dev;
unsigned int pvr = mfspr(SPRN_PVR);
unsigned int svr = mfspr(SPRN_SVR);
unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
@@ -957,15 +959,33 @@ static void gfar_detect_errata(struct gfar_private *priv)
(pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
priv->errata |= GFAR_ERRATA_76;
- /* MPC8313 and MPC837x all rev */
- if ((pvr == 0x80850010 && mod == 0x80b0) ||
- (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
- priv->errata |= GFAR_ERRATA_A002;
+ /* MPC8313 Rev < 2.0 */
+ if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
+ priv->errata |= GFAR_ERRATA_12;
+}
+
+static void __gfar_detect_errata_85xx(struct gfar_private *priv)
+{
+ unsigned int svr = mfspr(SPRN_SVR);
- /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
- if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
- (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
+ if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
priv->errata |= GFAR_ERRATA_12;
+ if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
+ ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
+ priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
+}
+
+static void gfar_detect_errata(struct gfar_private *priv)
+{
+ struct device *dev = &priv->ofdev->dev;
+
+ /* no plans to fix */
+ priv->errata |= GFAR_ERRATA_A002;
+
+ if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
+ __gfar_detect_errata_85xx(priv);
+ else /* non-mpc85xx parts, i.e. e300 core based */
+ __gfar_detect_errata_83xx(priv);
if (priv->errata)
dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
@@ -1599,7 +1619,7 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)
/* Normaly TSEC should not hang on GRS commands, so we should
* actually wait for IEVENT_GRSC flag.
*/
- if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
+ if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
return 0;
/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
@@ -2900,7 +2920,7 @@ static int gfar_poll(struct napi_struct *napi, int budget)
struct gfar_priv_rx_q *rx_queue = NULL;
int work_done = 0, work_done_per_q = 0;
int i, budget_per_q = 0;
- int has_tx_work;
+ int has_tx_work = 0;
unsigned long rstat_rxf;
int num_act_queues;
@@ -2915,62 +2935,51 @@ static int gfar_poll(struct napi_struct *napi, int budget)
if (num_act_queues)
budget_per_q = budget/num_act_queues;
- while (1) {
- has_tx_work = 0;
- for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
- tx_queue = priv->tx_queue[i];
- /* run Tx cleanup to completion */
- if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
- gfar_clean_tx_ring(tx_queue);
- has_tx_work = 1;
- }
+ for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
+ tx_queue = priv->tx_queue[i];
+ /* run Tx cleanup to completion */
+ if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
+ gfar_clean_tx_ring(tx_queue);
+ has_tx_work = 1;
}
+ }
- for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
- /* skip queue if not active */
- if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
- continue;
-
- rx_queue = priv->rx_queue[i];
- work_done_per_q =
- gfar_clean_rx_ring(rx_queue, budget_per_q);
- work_done += work_done_per_q;
-
- /* finished processing this queue */
- if (work_done_per_q < budget_per_q) {
- /* clear active queue hw indication */
- gfar_write(&regs->rstat,
- RSTAT_CLEAR_RXF0 >> i);
- rstat_rxf &= ~(RSTAT_CLEAR_RXF0 >> i);
- num_act_queues--;
-
- if (!num_act_queues)
- break;
- /* recompute budget per Rx queue */
- budget_per_q =
- (budget - work_done) / num_act_queues;
- }
- }
+ for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
+ /* skip queue if not active */
+ if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
+ continue;
- if (work_done >= budget)
- break;
+ rx_queue = priv->rx_queue[i];
+ work_done_per_q =
+ gfar_clean_rx_ring(rx_queue, budget_per_q);
+ work_done += work_done_per_q;
+
+ /* finished processing this queue */
+ if (work_done_per_q < budget_per_q) {
+ /* clear active queue hw indication */
+ gfar_write(&regs->rstat,
+ RSTAT_CLEAR_RXF0 >> i);
+ num_act_queues--;
+
+ if (!num_act_queues)
+ break;
+ }
+ }
- if (!num_act_queues && !has_tx_work) {
+ if (!num_act_queues && !has_tx_work) {
- napi_complete(napi);
+ napi_complete(napi);
- /* Clear the halt bit in RSTAT */
- gfar_write(&regs->rstat, gfargrp->rstat);
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&regs->rstat, gfargrp->rstat);
- gfar_write(&regs->imask, IMASK_DEFAULT);
+ gfar_write(&regs->imask, IMASK_DEFAULT);
- /* If we are coalescing interrupts, update the timer
- * Otherwise, clear it
- */
- gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
- gfargrp->tx_bit_map);
- break;
- }
+ /* If we are coalescing interrupts, update the timer
+ * Otherwise, clear it
+ */
+ gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
+ gfargrp->tx_bit_map);
}
return work_done;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 04112b98ff5d..114c58f9d8d2 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1177,21 +1177,21 @@ static inline void gfar_read_filer(struct gfar_private *priv,
*fpr = gfar_read(&regs->rqfpr);
}
-extern void lock_rx_qs(struct gfar_private *priv);
-extern void lock_tx_qs(struct gfar_private *priv);
-extern void unlock_rx_qs(struct gfar_private *priv);
-extern void unlock_tx_qs(struct gfar_private *priv);
-extern irqreturn_t gfar_receive(int irq, void *dev_id);
-extern int startup_gfar(struct net_device *dev);
-extern void stop_gfar(struct net_device *dev);
-extern void gfar_halt(struct net_device *dev);
-extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
- int enable, u32 regnum, u32 read);
-extern void gfar_configure_coalescing_all(struct gfar_private *priv);
+void lock_rx_qs(struct gfar_private *priv);
+void lock_tx_qs(struct gfar_private *priv);
+void unlock_rx_qs(struct gfar_private *priv);
+void unlock_tx_qs(struct gfar_private *priv);
+irqreturn_t gfar_receive(int irq, void *dev_id);
+int startup_gfar(struct net_device *dev);
+void stop_gfar(struct net_device *dev);
+void gfar_halt(struct net_device *dev);
+void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable,
+ u32 regnum, u32 read);
+void gfar_configure_coalescing_all(struct gfar_private *priv);
void gfar_init_sysfs(struct net_device *dev);
int gfar_set_features(struct net_device *dev, netdev_features_t features);
-extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
-extern void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
+void gfar_check_rx_parser_mode(struct gfar_private *priv);
+void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
extern const struct ethtool_ops gfar_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 5930c39672db..5548b6d00c31 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -31,6 +31,8 @@
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/workqueue.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
@@ -3899,7 +3901,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
mac_addr = of_get_mac_address(np);
if (mac_addr)
- memcpy(dev->dev_addr, mac_addr, 6);
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
ugeth->ug_info = ug_info;
ugeth->dev = device;
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index c1b6e7e31aac..d449fcb90199 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/mdio.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_mdio.h>
diff --git a/drivers/net/ethernet/fujitsu/Kconfig b/drivers/net/ethernet/fujitsu/Kconfig
index 6231bc02b964..1085257385d2 100644
--- a/drivers/net/ethernet/fujitsu/Kconfig
+++ b/drivers/net/ethernet/fujitsu/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_FUJITSU
bool "Fujitsu devices"
default y
- depends on ISA || PCMCIA
+ depends on PCMCIA
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 91227d03274e..37860096f744 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1098,7 +1098,7 @@ static int hp100_open(struct net_device *dev)
if (request_irq(dev->irq, hp100_interrupt,
lp->bus == HP100_BUS_PCI || lp->bus ==
HP100_BUS_EISA ? IRQF_SHARED : 0,
- "hp100", dev)) {
+ dev->name, dev)) {
printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
return -EAGAIN;
}
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index e38816145395..a15877affc9b 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -711,7 +711,7 @@ static int init_i596_mem(struct net_device *dev)
i596_add_cmd(dev, &lp->cf_cmd.cmd);
DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
- memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
+ memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
lp->sa_cmd.cmd.command = CmdSASetup;
i596_add_cmd(dev, &lp->sa_cmd.cmd);
@@ -1155,7 +1155,7 @@ struct net_device * __init i82596_probe(int unit)
err = -ENODEV;
goto out;
}
- memcpy(eth_addr, (void *) 0xfffc1f2c, 6); /* YUCK! Get addr from NOVRAM */
+ memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN); /* YUCK! Get addr from NOVRAM */
dev->base_addr = MVME_I596_BASE;
dev->irq = (unsigned) MVME16x_IRQ_I596;
goto found;
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index d653bac4cfc4..861fa15e1e81 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -607,7 +607,7 @@ static int init_i596_mem(struct net_device *dev)
i596_add_cmd(dev, &dma->cf_cmd.cmd);
DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
- memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, 6);
+ memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
i596_add_cmd(dev, &dma->sa_cmd.cmd);
@@ -1396,13 +1396,13 @@ static void set_multicast_list(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev) {
if (!cnt--)
break;
- memcpy(cp, ha->addr, 6);
+ memcpy(cp, ha->addr, ETH_ALEN);
if (i596_debug > 1)
DEB(DEB_MULTI,
printk(KERN_DEBUG
"%s: Adding address %pM\n",
dev->name, cp));
- cp += 6;
+ cp += ETH_ALEN;
}
DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
i596_add_cmd(dev, &cmd->cmd);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 6b5c7222342c..ae342fdb42c8 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -39,6 +39,8 @@
#include <linux/bitops.h>
#include <linux/workqueue.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_net.h>
#include <linux/slab.h>
@@ -2676,7 +2678,7 @@ static int emac_init_config(struct emac_instance *dev)
np->full_name);
return -ENXIO;
}
- memcpy(dev->ndev->dev_addr, p, 6);
+ memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
/* IAHT and GAHT filter parameterization */
if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
diff --git a/drivers/net/ethernet/ibm/emac/debug.h b/drivers/net/ethernet/ibm/emac/debug.h
index 59a92d5870b5..9c45efe4c8fe 100644
--- a/drivers/net/ethernet/ibm/emac/debug.h
+++ b/drivers/net/ethernet/ibm/emac/debug.h
@@ -29,13 +29,13 @@
struct emac_instance;
struct mal_instance;
-extern void emac_dbg_register(struct emac_instance *dev);
-extern void emac_dbg_unregister(struct emac_instance *dev);
-extern void mal_dbg_register(struct mal_instance *mal);
-extern void mal_dbg_unregister(struct mal_instance *mal);
-extern int emac_init_debug(void) __init;
-extern void emac_fini_debug(void) __exit;
-extern void emac_dbg_dump_all(void);
+void emac_dbg_register(struct emac_instance *dev);
+void emac_dbg_unregister(struct emac_instance *dev);
+void mal_dbg_register(struct mal_instance *mal);
+void mal_dbg_unregister(struct mal_instance *mal);
+int emac_init_debug(void) __init;
+void emac_fini_debug(void) __exit;
+void emac_dbg_dump_all(void);
# define DBG_LEVEL 1
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index dac564c25440..9d75fef6396f 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -27,6 +27,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/of_irq.h>
#include "core.h"
#include <asm/dcr-regs.h>
@@ -263,7 +264,9 @@ static inline void mal_schedule_poll(struct mal_instance *mal)
{
if (likely(napi_schedule_prep(&mal->napi))) {
MAL_DBG2(mal, "schedule_poll" NL);
+ spin_lock(&mal->lock);
mal_disable_eob_irq(mal);
+ spin_unlock(&mal->lock);
__napi_schedule(&mal->napi);
} else
MAL_DBG2(mal, "already in poll" NL);
@@ -442,15 +445,13 @@ static int mal_poll(struct napi_struct *napi, int budget)
if (unlikely(mc->ops->peek_rx(mc->dev) ||
test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) {
MAL_DBG2(mal, "rotting packet" NL);
- if (napi_reschedule(napi))
- mal_disable_eob_irq(mal);
- else
- MAL_DBG2(mal, "already in poll list" NL);
-
- if (budget > 0)
- goto again;
- else
+ if (!napi_reschedule(napi))
goto more_work;
+
+ spin_lock_irqsave(&mal->lock, flags);
+ mal_disable_eob_irq(mal);
+ spin_unlock_irqrestore(&mal->lock, flags);
+ goto again;
}
mc->ops->poll_tx(mc->dev);
}
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index c47e23d6eeaa..4fb2f96da23b 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/ethtool.h>
+#include <linux/of_address.h>
#include <asm/io.h>
#include "emac.h"
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.h b/drivers/net/ethernet/ibm/emac/rgmii.h
index 668bceeff4a2..d4f1374d1900 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.h
+++ b/drivers/net/ethernet/ibm/emac/rgmii.h
@@ -56,15 +56,15 @@ struct rgmii_instance {
#ifdef CONFIG_IBM_EMAC_RGMII
-extern int rgmii_init(void);
-extern void rgmii_exit(void);
-extern int rgmii_attach(struct platform_device *ofdev, int input, int mode);
-extern void rgmii_detach(struct platform_device *ofdev, int input);
-extern void rgmii_get_mdio(struct platform_device *ofdev, int input);
-extern void rgmii_put_mdio(struct platform_device *ofdev, int input);
-extern void rgmii_set_speed(struct platform_device *ofdev, int input, int speed);
-extern int rgmii_get_regs_len(struct platform_device *ofdev);
-extern void *rgmii_dump_regs(struct platform_device *ofdev, void *buf);
+int rgmii_init(void);
+void rgmii_exit(void);
+int rgmii_attach(struct platform_device *ofdev, int input, int mode);
+void rgmii_detach(struct platform_device *ofdev, int input);
+void rgmii_get_mdio(struct platform_device *ofdev, int input);
+void rgmii_put_mdio(struct platform_device *ofdev, int input);
+void rgmii_set_speed(struct platform_device *ofdev, int input, int speed);
+int rgmii_get_regs_len(struct platform_device *ofdev);
+void *rgmii_dump_regs(struct platform_device *ofdev, void *buf);
#else
diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c
index c231a4a32c4d..9f24769ed826 100644
--- a/drivers/net/ethernet/ibm/emac/tah.c
+++ b/drivers/net/ethernet/ibm/emac/tah.c
@@ -18,6 +18,7 @@
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
+#include <linux/of_address.h>
#include <asm/io.h>
#include "emac.h"
diff --git a/drivers/net/ethernet/ibm/emac/tah.h b/drivers/net/ethernet/ibm/emac/tah.h
index 350b7096a041..4d5f336f07b3 100644
--- a/drivers/net/ethernet/ibm/emac/tah.h
+++ b/drivers/net/ethernet/ibm/emac/tah.h
@@ -72,13 +72,13 @@ struct tah_instance {
#ifdef CONFIG_IBM_EMAC_TAH
-extern int tah_init(void);
-extern void tah_exit(void);
-extern int tah_attach(struct platform_device *ofdev, int channel);
-extern void tah_detach(struct platform_device *ofdev, int channel);
-extern void tah_reset(struct platform_device *ofdev);
-extern int tah_get_regs_len(struct platform_device *ofdev);
-extern void *tah_dump_regs(struct platform_device *ofdev, void *buf);
+int tah_init(void);
+void tah_exit(void);
+int tah_attach(struct platform_device *ofdev, int channel);
+void tah_detach(struct platform_device *ofdev, int channel);
+void tah_reset(struct platform_device *ofdev);
+int tah_get_regs_len(struct platform_device *ofdev);
+void *tah_dump_regs(struct platform_device *ofdev, void *buf);
#else
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index 4cdf286f7ee3..9ca67a38c062 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/ethtool.h>
+#include <linux/of_address.h>
#include <asm/io.h>
#include "emac.h"
diff --git a/drivers/net/ethernet/ibm/emac/zmii.h b/drivers/net/ethernet/ibm/emac/zmii.h
index 455bfb085493..0959c55b1459 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.h
+++ b/drivers/net/ethernet/ibm/emac/zmii.h
@@ -53,15 +53,15 @@ struct zmii_instance {
#ifdef CONFIG_IBM_EMAC_ZMII
-extern int zmii_init(void);
-extern void zmii_exit(void);
-extern int zmii_attach(struct platform_device *ofdev, int input, int *mode);
-extern void zmii_detach(struct platform_device *ofdev, int input);
-extern void zmii_get_mdio(struct platform_device *ofdev, int input);
-extern void zmii_put_mdio(struct platform_device *ofdev, int input);
-extern void zmii_set_speed(struct platform_device *ofdev, int input, int speed);
-extern int zmii_get_regs_len(struct platform_device *ocpdev);
-extern void *zmii_dump_regs(struct platform_device *ofdev, void *buf);
+int zmii_init(void);
+void zmii_exit(void);
+int zmii_attach(struct platform_device *ofdev, int input, int *mode);
+void zmii_detach(struct platform_device *ofdev, int input);
+void zmii_get_mdio(struct platform_device *ofdev, int input);
+void zmii_put_mdio(struct platform_device *ofdev, int input);
+void zmii_set_speed(struct platform_device *ofdev, int input, int speed);
+int zmii_get_regs_len(struct platform_device *ocpdev);
+void *zmii_dump_regs(struct platform_device *ofdev, void *buf);
#else
# define zmii_init() 0
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 5d41aee69d16..952d795230a4 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1185,7 +1185,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
netdev_for_each_mc_addr(ha, netdev) {
/* add the multicast address to the filter table */
unsigned long mcast_addr = 0;
- memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
+ memcpy(((char *)&mcast_addr)+2, ha->addr, ETH_ALEN);
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastAddFilter,
mcast_addr);
@@ -1370,7 +1370,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
adapter->mac_addr = 0;
- memcpy(&adapter->mac_addr, mac_addr_p, 6);
+ memcpy(&adapter->mac_addr, mac_addr_p, ETH_ALEN);
netdev->irq = dev->irq;
netdev->netdev_ops = &ibmveth_netdev_ops;
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index bdf5023724e7..25045ae07171 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -2183,7 +2183,6 @@ static void ipg_remove(struct pci_dev *pdev)
free_netdev(dev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static const struct net_device_ops ipg_netdev_ops = {
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index ada6e210279f..cbaba4442d4b 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2985,7 +2985,6 @@ err_out_free_res:
err_out_disable_pdev:
pci_disable_device(pdev);
err_out_free_dev:
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
return err;
}
@@ -3003,7 +3002,6 @@ static void e100_remove(struct pci_dev *pdev)
free_netdev(netdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 26d9cd59ec75..58c147271a36 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -325,7 +325,7 @@ enum e1000_state_t {
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
+struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
#define e_dbg(format, arg...) \
netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
#define e_err(msglvl, format, arg...) \
@@ -346,20 +346,20 @@ extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
extern char e1000_driver_name[];
extern const char e1000_driver_version[];
-extern int e1000_up(struct e1000_adapter *adapter);
-extern void e1000_down(struct e1000_adapter *adapter);
-extern void e1000_reinit_locked(struct e1000_adapter *adapter);
-extern void e1000_reset(struct e1000_adapter *adapter);
-extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
-extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
-extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_update_stats(struct e1000_adapter *adapter);
-extern bool e1000_has_link(struct e1000_adapter *adapter);
-extern void e1000_power_up_phy(struct e1000_adapter *);
-extern void e1000_set_ethtool_ops(struct net_device *netdev);
-extern void e1000_check_options(struct e1000_adapter *adapter);
-extern char *e1000_get_hw_dev_name(struct e1000_hw *hw);
+int e1000_up(struct e1000_adapter *adapter);
+void e1000_down(struct e1000_adapter *adapter);
+void e1000_reinit_locked(struct e1000_adapter *adapter);
+void e1000_reset(struct e1000_adapter *adapter);
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_update_stats(struct e1000_adapter *adapter);
+bool e1000_has_link(struct e1000_adapter *adapter);
+void e1000_power_up_phy(struct e1000_adapter *);
+void e1000_set_ethtool_ops(struct net_device *netdev);
+void e1000_check_options(struct e1000_adapter *adapter);
+char *e1000_get_hw_dev_name(struct e1000_hw *hw);
#endif /* _E1000_H_ */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 59ad007dd5aa..e38622825fa7 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1018,19 +1018,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
pci_using_dac = 0;
if ((hw->bus_type == e1000_bus_type_pcix) &&
- !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- /* according to DMA-API-HOWTO, coherent calls will always
- * succeed if the set call did
- */
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
pr_err("No usable DMA config, aborting\n");
goto err_dma;
}
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
}
netdev->netdev_ops = &e1000_netdev_ops;
@@ -3917,8 +3912,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
" next_to_watch <%x>\n"
" jiffies <%lx>\n"
" next_to_watch.status <%x>\n",
- (unsigned long)((tx_ring - adapter->tx_ring) /
- sizeof(struct e1000_tx_ring)),
+ (unsigned long)(tx_ring - adapter->tx_ring),
readl(hw->hw_addr + tx_ring->tdh),
readl(hw->hw_addr + tx_ring->tdt),
tx_ring->next_to_use,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ad0edd11015d..0150f7fc893d 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -472,26 +472,25 @@ enum latency_range {
extern char e1000e_driver_name[];
extern const char e1000e_driver_version[];
-extern void e1000e_check_options(struct e1000_adapter *adapter);
-extern void e1000e_set_ethtool_ops(struct net_device *netdev);
-
-extern int e1000e_up(struct e1000_adapter *adapter);
-extern void e1000e_down(struct e1000_adapter *adapter);
-extern void e1000e_reinit_locked(struct e1000_adapter *adapter);
-extern void e1000e_reset(struct e1000_adapter *adapter);
-extern void e1000e_power_up_phy(struct e1000_adapter *adapter);
-extern int e1000e_setup_rx_resources(struct e1000_ring *ring);
-extern int e1000e_setup_tx_resources(struct e1000_ring *ring);
-extern void e1000e_free_rx_resources(struct e1000_ring *ring);
-extern void e1000e_free_tx_resources(struct e1000_ring *ring);
-extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64
- *stats);
-extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
-extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
-extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
-extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
-extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
+void e1000e_check_options(struct e1000_adapter *adapter);
+void e1000e_set_ethtool_ops(struct net_device *netdev);
+
+int e1000e_up(struct e1000_adapter *adapter);
+void e1000e_down(struct e1000_adapter *adapter);
+void e1000e_reinit_locked(struct e1000_adapter *adapter);
+void e1000e_reset(struct e1000_adapter *adapter);
+void e1000e_power_up_phy(struct e1000_adapter *adapter);
+int e1000e_setup_rx_resources(struct e1000_ring *ring);
+int e1000e_setup_tx_resources(struct e1000_ring *ring);
+void e1000e_free_rx_resources(struct e1000_ring *ring);
+void e1000e_free_tx_resources(struct e1000_ring *ring);
+struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats);
+void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
+void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+void e1000e_get_hw_control(struct e1000_adapter *adapter);
+void e1000e_release_hw_control(struct e1000_adapter *adapter);
+void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
extern unsigned int copybreak;
@@ -508,8 +507,8 @@ extern const struct e1000_info e1000_pch2_info;
extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_es2_info;
-extern void e1000e_ptp_init(struct e1000_adapter *adapter);
-extern void e1000e_ptp_remove(struct e1000_adapter *adapter);
+void e1000e_ptp_init(struct e1000_adapter *adapter);
+void e1000e_ptp_remove(struct e1000_adapter *adapter);
static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
{
@@ -536,7 +535,7 @@ static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
return hw->phy.ops.write_reg_locked(hw, offset, data);
}
-extern void e1000e_reload_nvm_generic(struct e1000_hw *hw);
+void e1000e_reload_nvm_generic(struct e1000_hw *hw);
static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 4ef786775acb..aedd5736a87d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6553,21 +6553,15 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
pci_using_dac = 0;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
+ pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index b5252eb8a6c7..1ca9834cdfda 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -46,7 +46,6 @@
#include <linux/sctp.h>
#include <linux/pkt_sched.h>
#include <linux/ipv6.h>
-#include <linux/version.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/ethtool.h>
@@ -347,9 +346,9 @@ struct i40e_vsi {
u32 rx_buf_failed;
u32 rx_page_failed;
- /* These are arrays of rings, allocated at run-time */
- struct i40e_ring *rx_rings;
- struct i40e_ring *tx_rings;
+ /* These are containers of ring pointers, allocated at run-time */
+ struct i40e_ring **rx_rings;
+ struct i40e_ring **tx_rings;
u16 work_limit;
/* high bit set means dynamic, use accessor routines to read/write.
@@ -366,7 +365,7 @@ struct i40e_vsi {
u8 dtype;
/* List of q_vectors allocated to this VSI */
- struct i40e_q_vector *q_vectors;
+ struct i40e_q_vector **q_vectors;
int num_q_vectors;
int base_vector;
@@ -422,8 +421,9 @@ struct i40e_q_vector {
u8 num_ringpairs; /* total number of ring pairs in vector */
- char name[IFNAMSIZ + 9];
cpumask_t affinity_mask;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+ char name[IFNAMSIZ + 9];
} ____cacheline_internodealigned_in_smp;
/* lan device */
@@ -544,6 +544,7 @@ static inline void i40e_dbg_init(void) {}
static inline void i40e_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS*/
void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 8dbd91f64b74..ef4cb1cf31f2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -151,9 +151,7 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct i40e_pf *pf = filp->private_data;
- char dump_request_buf[16];
bool seid_found = false;
- int bytes_not_copied;
long seid = -1;
int buflen = 0;
int i, ret;
@@ -163,21 +161,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
/* don't allow partial writes */
if (*ppos != 0)
return 0;
- if (count >= sizeof(dump_request_buf))
- return -ENOSPC;
-
- bytes_not_copied = copy_from_user(dump_request_buf, buffer, count);
- if (bytes_not_copied < 0)
- return bytes_not_copied;
- if (bytes_not_copied > 0)
- count -= bytes_not_copied;
- dump_request_buf[count] = '\0';
/* decode the SEID given to be dumped */
- ret = kstrtol(dump_request_buf, 0, &seid);
- if (ret < 0) {
- dev_info(&pf->pdev->dev, "bad seid value '%s'\n",
- dump_request_buf);
+ ret = kstrtol_from_user(buffer, count, 0, &seid);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev, "bad seid value\n");
} else if (seid == 0) {
seid_found = true;
@@ -245,26 +234,33 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
memcpy(p, vsi, len);
p += len;
- len = (sizeof(struct i40e_q_vector)
- * vsi->num_q_vectors);
- memcpy(p, vsi->q_vectors, len);
- p += len;
-
- len = (sizeof(struct i40e_ring) * vsi->num_queue_pairs);
- memcpy(p, vsi->tx_rings, len);
- p += len;
- memcpy(p, vsi->rx_rings, len);
- p += len;
+ if (vsi->num_q_vectors) {
+ len = (sizeof(struct i40e_q_vector)
+ * vsi->num_q_vectors);
+ memcpy(p, vsi->q_vectors, len);
+ p += len;
+ }
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- len = sizeof(struct i40e_tx_buffer);
- memcpy(p, vsi->tx_rings[i].tx_bi, len);
+ if (vsi->num_queue_pairs) {
+ len = (sizeof(struct i40e_ring) *
+ vsi->num_queue_pairs);
+ memcpy(p, vsi->tx_rings, len);
+ p += len;
+ memcpy(p, vsi->rx_rings, len);
p += len;
}
- for (i = 0; i < vsi->num_queue_pairs; i++) {
+
+ if (vsi->tx_rings[0]) {
+ len = sizeof(struct i40e_tx_buffer);
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ memcpy(p, vsi->tx_rings[i]->tx_bi, len);
+ p += len;
+ }
len = sizeof(struct i40e_rx_buffer);
- memcpy(p, vsi->rx_rings[i].rx_bi, len);
- p += len;
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ memcpy(p, vsi->rx_rings[i]->rx_bi, len);
+ p += len;
+ }
}
/* macvlan filter list */
@@ -484,100 +480,104 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
vsi->tx_restart, vsi->tx_busy,
vsi->rx_buf_failed, vsi->rx_page_failed);
- if (vsi->rx_rings) {
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: desc = %p\n",
- i, vsi->rx_rings[i].desc);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
- i, vsi->rx_rings[i].dev,
- vsi->rx_rings[i].netdev,
- vsi->rx_rings[i].rx_bi);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
- i, vsi->rx_rings[i].state,
- vsi->rx_rings[i].queue_index,
- vsi->rx_rings[i].reg_idx);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
- i, vsi->rx_rings[i].rx_hdr_len,
- vsi->rx_rings[i].rx_buf_len,
- vsi->rx_rings[i].dtype);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
- i, vsi->rx_rings[i].hsplit,
- vsi->rx_rings[i].next_to_use,
- vsi->rx_rings[i].next_to_clean,
- vsi->rx_rings[i].ring_active);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
- i, vsi->rx_rings[i].rx_stats.packets,
- vsi->rx_rings[i].rx_stats.bytes,
- vsi->rx_rings[i].rx_stats.non_eop_descs);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
- i,
- vsi->rx_rings[i].rx_stats.alloc_rx_page_failed,
- vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: size = %i, dma = 0x%08lx\n",
- i, vsi->rx_rings[i].size,
- (long unsigned int)vsi->rx_rings[i].dma);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: vsi = %p, q_vector = %p\n",
- i, vsi->rx_rings[i].vsi,
- vsi->rx_rings[i].q_vector);
- }
+ rcu_read_lock();
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
+ if (!rx_ring)
+ continue;
+
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: desc = %p\n",
+ i, rx_ring->desc);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
+ i, rx_ring->dev,
+ rx_ring->netdev,
+ rx_ring->rx_bi);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+ i, rx_ring->state,
+ rx_ring->queue_index,
+ rx_ring->reg_idx);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
+ i, rx_ring->rx_hdr_len,
+ rx_ring->rx_buf_len,
+ rx_ring->dtype);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i, rx_ring->hsplit,
+ rx_ring->next_to_use,
+ rx_ring->next_to_clean,
+ rx_ring->ring_active);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
+ i, rx_ring->stats.packets,
+ rx_ring->stats.bytes,
+ rx_ring->rx_stats.non_eop_descs);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
+ i,
+ rx_ring->rx_stats.alloc_rx_page_failed,
+ rx_ring->rx_stats.alloc_rx_buff_failed);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: size = %i, dma = 0x%08lx\n",
+ i, rx_ring->size,
+ (long unsigned int)rx_ring->dma);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: vsi = %p, q_vector = %p\n",
+ i, rx_ring->vsi,
+ rx_ring->q_vector);
}
- if (vsi->tx_rings) {
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: desc = %p\n",
- i, vsi->tx_rings[i].desc);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
- i, vsi->tx_rings[i].dev,
- vsi->tx_rings[i].netdev,
- vsi->tx_rings[i].tx_bi);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
- i, vsi->tx_rings[i].state,
- vsi->tx_rings[i].queue_index,
- vsi->tx_rings[i].reg_idx);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: dtype = %d\n",
- i, vsi->tx_rings[i].dtype);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
- i, vsi->tx_rings[i].hsplit,
- vsi->tx_rings[i].next_to_use,
- vsi->tx_rings[i].next_to_clean,
- vsi->tx_rings[i].ring_active);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
- i, vsi->tx_rings[i].tx_stats.packets,
- vsi->tx_rings[i].tx_stats.bytes,
- vsi->tx_rings[i].tx_stats.restart_queue);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: tx_stats: tx_busy = %lld, completed = %lld, tx_done_old = %lld\n",
- i,
- vsi->tx_rings[i].tx_stats.tx_busy,
- vsi->tx_rings[i].tx_stats.completed,
- vsi->tx_rings[i].tx_stats.tx_done_old);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: size = %i, dma = 0x%08lx\n",
- i, vsi->tx_rings[i].size,
- (long unsigned int)vsi->tx_rings[i].dma);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: vsi = %p, q_vector = %p\n",
- i, vsi->tx_rings[i].vsi,
- vsi->tx_rings[i].q_vector);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: DCB tc = %d\n",
- i, vsi->tx_rings[i].dcb_tc);
- }
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+ if (!tx_ring)
+ continue;
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: desc = %p\n",
+ i, tx_ring->desc);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
+ i, tx_ring->dev,
+ tx_ring->netdev,
+ tx_ring->tx_bi);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+ i, tx_ring->state,
+ tx_ring->queue_index,
+ tx_ring->reg_idx);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: dtype = %d\n",
+ i, tx_ring->dtype);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i, tx_ring->hsplit,
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_ring->ring_active);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
+ i, tx_ring->stats.packets,
+ tx_ring->stats.bytes,
+ tx_ring->tx_stats.restart_queue);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
+ i,
+ tx_ring->tx_stats.tx_busy,
+ tx_ring->tx_stats.tx_done_old);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: size = %i, dma = 0x%08lx\n",
+ i, tx_ring->size,
+ (long unsigned int)tx_ring->dma);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: vsi = %p, q_vector = %p\n",
+ i, tx_ring->vsi,
+ tx_ring->q_vector);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: DCB tc = %d\n",
+ i, tx_ring->dcb_tc);
}
+ rcu_read_unlock();
dev_info(&pf->pdev->dev,
" work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
vsi->work_limit, vsi->rx_itr_setting,
@@ -587,15 +587,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev,
" max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
- if (vsi->q_vectors) {
- for (i = 0; i < vsi->num_q_vectors; i++) {
- dev_info(&pf->pdev->dev,
- " q_vectors[%i]: base index = %ld\n",
- i, ((long int)*vsi->q_vectors[i].rx.ring-
- (long int)*vsi->q_vectors[0].rx.ring)/
- sizeof(struct i40e_ring));
- }
- }
dev_info(&pf->pdev->dev,
" num_q_vectors = %i, base_vector = %i\n",
vsi->num_q_vectors, vsi->base_vector);
@@ -792,9 +783,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
return;
}
if (is_rx_ring)
- ring = vsi->rx_rings[ring_id];
+ ring = *vsi->rx_rings[ring_id];
else
- ring = vsi->tx_rings[ring_id];
+ ring = *vsi->tx_rings[ring_id];
if (cnt == 2) {
dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
@@ -1028,11 +1019,11 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct i40e_pf *pf = filp->private_data;
+ char *cmd_buf, *cmd_buf_tmp;
int bytes_not_copied;
struct i40e_vsi *vsi;
u8 *print_buf_start;
u8 *print_buf;
- char *cmd_buf;
int vsi_seid;
int veb_seid;
int cnt;
@@ -1051,6 +1042,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
count -= bytes_not_copied;
cmd_buf[count] = '\0';
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL);
if (!print_buf_start)
goto command_write_done;
@@ -1157,9 +1154,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
i40e_veb_release(pf->veb[i]);
} else if (strncmp(cmd_buf, "add macaddr", 11) == 0) {
- u8 ma[6];
- int vlan = 0;
struct i40e_mac_filter *f;
+ int vlan = 0;
+ u8 ma[6];
int ret;
cnt = sscanf(&cmd_buf[11],
@@ -1195,8 +1192,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
ma, vlan, vsi_seid, f, ret);
} else if (strncmp(cmd_buf, "del macaddr", 11) == 0) {
- u8 ma[6];
int vlan = 0;
+ u8 ma[6];
int ret;
cnt = sscanf(&cmd_buf[11],
@@ -1232,9 +1229,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
ma, vlan, vsi_seid, ret);
} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
- int v;
- u16 vid;
i40e_status ret;
+ u16 vid;
+ int v;
cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
if (cnt != 2) {
@@ -1545,10 +1542,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
(strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
struct i40e_fdir_data fd_data;
- int ret;
u16 packet_len, i, j = 0;
char *asc_packet;
bool add = false;
+ int ret;
asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
GFP_KERNEL);
@@ -1636,9 +1633,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
} else if (strncmp(&cmd_buf[5],
"get local", 9) == 0) {
+ u16 llen, rlen;
int ret, i;
u8 *buff;
- u16 llen, rlen;
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
goto command_write_done;
@@ -1669,9 +1666,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
kfree(buff);
buff = NULL;
} else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
+ u16 llen, rlen;
int ret, i;
u8 *buff;
- u16 llen, rlen;
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
goto command_write_done;
@@ -1747,11 +1744,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
- /* Read at least 512 words */
- if (buffer_len == 0)
- buffer_len = 512;
+ /* set the max length */
+ buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2);
bytes = 2 * buffer_len;
+
+ /* read at least 1k bytes, no more than 4kB */
+ bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE);
buff = kzalloc(bytes, GFP_KERNEL);
if (!buff)
goto command_write_done;
@@ -1903,6 +1902,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
struct i40e_pf *pf = filp->private_data;
int bytes_not_copied;
struct i40e_vsi *vsi;
+ char *buf_tmp;
int vsi_seid;
int i, cnt;
@@ -1921,6 +1921,12 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
count -= bytes_not_copied;
i40e_dbg_netdev_ops_buf[count] = '\0';
+ buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
+ if (buf_tmp) {
+ *buf_tmp = '\0';
+ count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
+ }
+
if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
if (cnt != 1) {
@@ -1996,7 +2002,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
goto netdev_ops_write_done;
}
for (i = 0; i < vsi->num_q_vectors; i++)
- napi_schedule(&vsi->q_vectors[i].napi);
+ napi_schedule(&vsi->q_vectors[i]->napi);
dev_info(&pf->pdev->dev, "napi called\n");
} else {
dev_info(&pf->pdev->dev, "unknown command '%s'\n",
@@ -2024,21 +2030,35 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = {
**/
void i40e_dbg_pf_init(struct i40e_pf *pf)
{
- struct dentry *pfile __attribute__((unused));
+ struct dentry *pfile;
const char *name = pci_name(pf->pdev);
+ const struct device *dev = &pf->pdev->dev;
pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
- if (pf->i40e_dbg_pf) {
- pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf,
- pf, &i40e_dbg_command_fops);
- pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
- &i40e_dbg_dump_fops);
- pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf,
- pf, &i40e_dbg_netdev_ops_fops);
- } else {
- dev_info(&pf->pdev->dev,
- "debugfs entry for %s failed\n", name);
- }
+ if (!pf->i40e_dbg_pf)
+ return;
+
+ pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_command_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_dump_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_netdev_ops_fops);
+ if (!pfile)
+ goto create_failed;
+
+ return;
+
+create_failed:
+ dev_info(dev, "debugfs dir/file for %s failed\n", name);
+ debugfs_remove_recursive(pf->i40e_dbg_pf);
+ return;
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 9a76b8cec76c..1b86138fa9e1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -399,8 +399,8 @@ static void i40e_get_ringparam(struct net_device *netdev,
ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
- ring->rx_pending = vsi->rx_rings[0].count;
- ring->tx_pending = vsi->tx_rings[0].count;
+ ring->rx_pending = vsi->rx_rings[0]->count;
+ ring->tx_pending = vsi->tx_rings[0]->count;
ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0;
}
@@ -429,8 +429,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
/* if nothing to do return success */
- if ((new_tx_count == vsi->tx_rings[0].count) &&
- (new_rx_count == vsi->rx_rings[0].count))
+ if ((new_tx_count == vsi->tx_rings[0]->count) &&
+ (new_rx_count == vsi->rx_rings[0]->count))
return 0;
while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
@@ -439,8 +439,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (!netif_running(vsi->netdev)) {
/* simple case - set for the next time the netdev is started */
for (i = 0; i < vsi->num_queue_pairs; i++) {
- vsi->tx_rings[i].count = new_tx_count;
- vsi->rx_rings[i].count = new_rx_count;
+ vsi->tx_rings[i]->count = new_tx_count;
+ vsi->rx_rings[i]->count = new_rx_count;
}
goto done;
}
@@ -451,10 +451,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
*/
/* alloc updated Tx resources */
- if (new_tx_count != vsi->tx_rings[0].count) {
+ if (new_tx_count != vsi->tx_rings[0]->count) {
netdev_info(netdev,
"Changing Tx descriptor count from %d to %d.\n",
- vsi->tx_rings[0].count, new_tx_count);
+ vsi->tx_rings[0]->count, new_tx_count);
tx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!tx_rings) {
@@ -464,7 +464,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
for (i = 0; i < vsi->num_queue_pairs; i++) {
/* clone ring and setup updated count */
- tx_rings[i] = vsi->tx_rings[i];
+ tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_count;
err = i40e_setup_tx_descriptors(&tx_rings[i]);
if (err) {
@@ -481,10 +481,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
}
/* alloc updated Rx resources */
- if (new_rx_count != vsi->rx_rings[0].count) {
+ if (new_rx_count != vsi->rx_rings[0]->count) {
netdev_info(netdev,
"Changing Rx descriptor count from %d to %d\n",
- vsi->rx_rings[0].count, new_rx_count);
+ vsi->rx_rings[0]->count, new_rx_count);
rx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!rx_rings) {
@@ -494,7 +494,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
for (i = 0; i < vsi->num_queue_pairs; i++) {
/* clone ring and setup updated count */
- rx_rings[i] = vsi->rx_rings[i];
+ rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_count;
err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err) {
@@ -517,8 +517,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (tx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
- i40e_free_tx_resources(&vsi->tx_rings[i]);
- vsi->tx_rings[i] = tx_rings[i];
+ i40e_free_tx_resources(vsi->tx_rings[i]);
+ *vsi->tx_rings[i] = tx_rings[i];
}
kfree(tx_rings);
tx_rings = NULL;
@@ -526,8 +526,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (rx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
- i40e_free_rx_resources(&vsi->rx_rings[i]);
- vsi->rx_rings[i] = rx_rings[i];
+ i40e_free_rx_resources(vsi->rx_rings[i]);
+ *vsi->rx_rings[i] = rx_rings[i];
}
kfree(rx_rings);
rx_rings = NULL;
@@ -579,6 +579,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
char *p;
int j;
struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
+ unsigned int start;
i40e_update_stats(vsi);
@@ -587,14 +588,30 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (j = 0; j < vsi->num_queue_pairs; j++) {
- data[i++] = vsi->tx_rings[j].tx_stats.packets;
- data[i++] = vsi->tx_rings[j].tx_stats.bytes;
- }
- for (j = 0; j < vsi->num_queue_pairs; j++) {
- data[i++] = vsi->rx_rings[j].rx_stats.packets;
- data[i++] = vsi->rx_rings[j].rx_stats.bytes;
+ rcu_read_lock();
+ for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
+ struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
+ struct i40e_ring *rx_ring;
+
+ if (!tx_ring)
+ continue;
+
+ /* process Tx ring statistics */
+ do {
+ start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
+ data[i] = tx_ring->stats.packets;
+ data[i + 1] = tx_ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
+
+ /* Rx ring is the 2nd half of the queue pair */
+ rx_ring = &tx_ring[1];
+ do {
+ start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
+ data[i + 2] = rx_ring->stats.packets;
+ data[i + 3] = rx_ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
}
+ rcu_read_unlock();
if (vsi == pf->vsi[pf->lan_vsi]) {
for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
@@ -641,8 +658,6 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < vsi->num_queue_pairs; i++) {
snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
@@ -910,8 +925,8 @@ static int i40e_set_coalesce(struct net_device *netdev,
}
vector = vsi->base_vector;
- q_vector = vsi->q_vectors;
- for (i = 0; i < vsi->num_q_vectors; i++, vector++, q_vector++) {
+ for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ q_vector = vsi->q_vectors[i];
q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 221aa4795017..be15938ba213 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -36,7 +36,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 9
+#define DRV_VERSION_BUILD 11
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -347,14 +347,53 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
**/
static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct net_device *netdev,
- struct rtnl_link_stats64 *storage)
+ struct rtnl_link_stats64 *stats)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
+ struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *tx_ring, *rx_ring;
+ u64 bytes, packets;
+ unsigned int start;
+
+ tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+ if (!tx_ring)
+ continue;
- *storage = *i40e_get_vsi_stats_struct(vsi);
+ do {
+ start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
+ packets = tx_ring->stats.packets;
+ bytes = tx_ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
+
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+ rx_ring = &tx_ring[1];
- return storage;
+ do {
+ start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
+ packets = rx_ring->stats.packets;
+ bytes = rx_ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
+
+ stats->rx_packets += packets;
+ stats->rx_bytes += bytes;
+ }
+ rcu_read_unlock();
+
+ /* following stats updated by ixgbe_watchdog_task() */
+ stats->multicast = vsi_stats->multicast;
+ stats->tx_errors = vsi_stats->tx_errors;
+ stats->tx_dropped = vsi_stats->tx_dropped;
+ stats->rx_errors = vsi_stats->rx_errors;
+ stats->rx_crc_errors = vsi_stats->rx_crc_errors;
+ stats->rx_length_errors = vsi_stats->rx_length_errors;
+
+ return stats;
}
/**
@@ -376,10 +415,14 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
if (vsi->rx_rings)
for (i = 0; i < vsi->num_queue_pairs; i++) {
- memset(&vsi->rx_rings[i].rx_stats, 0 ,
- sizeof(vsi->rx_rings[i].rx_stats));
- memset(&vsi->tx_rings[i].tx_stats, 0,
- sizeof(vsi->tx_rings[i].tx_stats));
+ memset(&vsi->rx_rings[i]->stats, 0 ,
+ sizeof(vsi->rx_rings[i]->stats));
+ memset(&vsi->rx_rings[i]->rx_stats, 0 ,
+ sizeof(vsi->rx_rings[i]->rx_stats));
+ memset(&vsi->tx_rings[i]->stats, 0 ,
+ sizeof(vsi->tx_rings[i]->stats));
+ memset(&vsi->tx_rings[i]->tx_stats, 0,
+ sizeof(vsi->tx_rings[i]->tx_stats));
}
vsi->stat_offsets_loaded = false;
}
@@ -598,7 +641,7 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
continue;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *ring = &vsi->tx_rings[i];
+ struct i40e_ring *ring = vsi->tx_rings[i];
clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
}
}
@@ -652,7 +695,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
continue;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *ring = &vsi->tx_rings[i];
+ struct i40e_ring *ring = vsi->tx_rings[i];
tc = ring->dcb_tc;
if (xoff[tc])
@@ -704,21 +747,38 @@ void i40e_update_stats(struct i40e_vsi *vsi)
tx_restart = tx_busy = 0;
rx_page = 0;
rx_buf = 0;
+ rcu_read_lock();
for (q = 0; q < vsi->num_queue_pairs; q++) {
struct i40e_ring *p;
+ u64 bytes, packets;
+ unsigned int start;
- p = &vsi->rx_rings[q];
- rx_b += p->rx_stats.bytes;
- rx_p += p->rx_stats.packets;
- rx_buf += p->rx_stats.alloc_rx_buff_failed;
- rx_page += p->rx_stats.alloc_rx_page_failed;
+ /* locate Tx ring */
+ p = ACCESS_ONCE(vsi->tx_rings[q]);
- p = &vsi->tx_rings[q];
- tx_b += p->tx_stats.bytes;
- tx_p += p->tx_stats.packets;
+ do {
+ start = u64_stats_fetch_begin_bh(&p->syncp);
+ packets = p->stats.packets;
+ bytes = p->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+ tx_b += bytes;
+ tx_p += packets;
tx_restart += p->tx_stats.restart_queue;
tx_busy += p->tx_stats.tx_busy;
+
+ /* Rx queue is part of the same block as Tx queue */
+ p = &p[1];
+ do {
+ start = u64_stats_fetch_begin_bh(&p->syncp);
+ packets = p->stats.packets;
+ bytes = p->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+ rx_b += bytes;
+ rx_p += packets;
+ rx_buf += p->rx_stats.alloc_rx_buff_failed;
+ rx_page += p->rx_stats.alloc_rx_page_failed;
}
+ rcu_read_unlock();
vsi->tx_restart = tx_restart;
vsi->tx_busy = tx_busy;
vsi->rx_page_failed = rx_page;
@@ -1988,7 +2048,7 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
int i, err = 0;
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
- err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]);
+ err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
return err;
}
@@ -2004,8 +2064,8 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
int i;
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->tx_rings[i].desc)
- i40e_free_tx_resources(&vsi->tx_rings[i]);
+ if (vsi->tx_rings[i]->desc)
+ i40e_free_tx_resources(vsi->tx_rings[i]);
}
/**
@@ -2023,7 +2083,7 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
int i, err = 0;
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
- err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]);
+ err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
return err;
}
@@ -2038,8 +2098,8 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
int i;
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->rx_rings[i].desc)
- i40e_free_rx_resources(&vsi->rx_rings[i]);
+ if (vsi->rx_rings[i]->desc)
+ i40e_free_rx_resources(vsi->rx_rings[i]);
}
/**
@@ -2114,8 +2174,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
/* Now associate this queue with this PCI function */
qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
- qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
- & I40E_QTX_CTL_PF_INDX_MASK);
+ qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
+ I40E_QTX_CTL_PF_INDX_MASK);
wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
i40e_flush(hw);
@@ -2223,8 +2283,8 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
int err = 0;
u16 i;
- for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++)
- err = i40e_configure_tx_ring(&vsi->tx_rings[i]);
+ for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
+ err = i40e_configure_tx_ring(vsi->tx_rings[i]);
return err;
}
@@ -2274,7 +2334,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
/* set up individual rings */
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
- err = i40e_configure_rx_ring(&vsi->rx_rings[i]);
+ err = i40e_configure_rx_ring(vsi->rx_rings[i]);
return err;
}
@@ -2298,8 +2358,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
qoffset = vsi->tc_config.tc_info[n].qoffset;
qcount = vsi->tc_config.tc_info[n].qcount;
for (i = qoffset; i < (qoffset + qcount); i++) {
- struct i40e_ring *rx_ring = &vsi->rx_rings[i];
- struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+ struct i40e_ring *rx_ring = vsi->rx_rings[i];
+ struct i40e_ring *tx_ring = vsi->tx_rings[i];
rx_ring->dcb_tc = n;
tx_ring->dcb_tc = n;
}
@@ -2354,8 +2414,8 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
*/
qp = vsi->base_queue;
vector = vsi->base_vector;
- q_vector = vsi->q_vectors;
- for (i = 0; i < vsi->num_q_vectors; i++, q_vector++, vector++) {
+ for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ q_vector = vsi->q_vectors[i];
q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
q_vector->rx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
@@ -2435,7 +2495,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
**/
static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
{
- struct i40e_q_vector *q_vector = vsi->q_vectors;
+ struct i40e_q_vector *q_vector = vsi->q_vectors[0];
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u32 val;
@@ -2472,7 +2532,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
* i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
* @pf: board private structure
**/
-static void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
u32 val;
@@ -2500,7 +2560,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
- i40e_flush(hw);
+ /* skip the flush */
}
/**
@@ -2512,7 +2572,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
{
struct i40e_q_vector *q_vector = data;
- if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
+ if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED;
napi_schedule(&q_vector->napi);
@@ -2529,7 +2589,7 @@ static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
{
struct i40e_q_vector *q_vector = data;
- if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
+ if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED;
pr_info("fdir ring cleaning needed\n");
@@ -2554,16 +2614,16 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
int vector, err;
for (vector = 0; vector < q_vectors; vector++) {
- struct i40e_q_vector *q_vector = &(vsi->q_vectors[vector]);
+ struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
- if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) {
+ if (q_vector->tx.ring && q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "TxRx", rx_int_idx++);
tx_int_idx++;
- } else if (q_vector->rx.ring[0]) {
+ } else if (q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "rx", rx_int_idx++);
- } else if (q_vector->tx.ring[0]) {
+ } else if (q_vector->tx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "tx", tx_int_idx++);
} else {
@@ -2611,8 +2671,8 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
int i;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0);
- wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0);
+ wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
+ wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
}
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
@@ -2649,6 +2709,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
i40e_irq_dynamic_enable_icr0(pf);
}
+ i40e_flush(&pf->hw);
return 0;
}
@@ -2681,14 +2742,14 @@ static irqreturn_t i40e_intr(int irq, void *data)
icr0 = rd32(hw, I40E_PFINT_ICR0);
- /* if sharing a legacy IRQ, we might get called w/o an intr pending */
- if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
- return IRQ_NONE;
-
val = rd32(hw, I40E_PFINT_DYN_CTL0);
val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
wr32(hw, I40E_PFINT_DYN_CTL0, val);
+ /* if sharing a legacy IRQ, we might get called w/o an intr pending */
+ if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
+ return IRQ_NONE;
+
ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
@@ -2702,10 +2763,9 @@ static irqreturn_t i40e_intr(int irq, void *data)
qval = rd32(hw, I40E_QINT_TQCTL(0));
qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
wr32(hw, I40E_QINT_TQCTL(0), qval);
- i40e_flush(hw);
if (!test_bit(__I40E_DOWN, &pf->state))
- napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0].napi);
+ napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
}
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
@@ -2764,7 +2824,6 @@ static irqreturn_t i40e_intr(int irq, void *data)
/* re-enable interrupt causes */
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
- i40e_flush(hw);
if (!test_bit(__I40E_DOWN, &pf->state)) {
i40e_service_event_schedule(pf);
i40e_irq_dynamic_enable_icr0(pf);
@@ -2774,40 +2833,26 @@ static irqreturn_t i40e_intr(int irq, void *data)
}
/**
- * i40e_map_vector_to_rxq - Assigns the Rx queue to the vector
+ * i40e_map_vector_to_qp - Assigns the queue pair to the vector
* @vsi: the VSI being configured
* @v_idx: vector index
- * @r_idx: rx queue index
+ * @qp_idx: queue pair index
**/
-static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx)
+static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
{
- struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
- struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]);
-
- rx_ring->q_vector = q_vector;
- q_vector->rx.ring[q_vector->rx.count] = rx_ring;
- q_vector->rx.count++;
- q_vector->rx.latency_range = I40E_LOW_LATENCY;
- q_vector->vsi = vsi;
-}
-
-/**
- * i40e_map_vector_to_txq - Assigns the Tx queue to the vector
- * @vsi: the VSI being configured
- * @v_idx: vector index
- * @t_idx: tx queue index
- **/
-static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx)
-{
- struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
- struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]);
+ struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
+ struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
+ struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
tx_ring->q_vector = q_vector;
- q_vector->tx.ring[q_vector->tx.count] = tx_ring;
+ tx_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = tx_ring;
q_vector->tx.count++;
- q_vector->tx.latency_range = I40E_LOW_LATENCY;
- q_vector->num_ringpairs++;
- q_vector->vsi = vsi;
+
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = q_vector->rx.ring;
+ q_vector->rx.ring = rx_ring;
+ q_vector->rx.count++;
}
/**
@@ -2823,7 +2868,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
{
int qp_remaining = vsi->num_queue_pairs;
int q_vectors = vsi->num_q_vectors;
- int qp_per_vector;
+ int num_ringpairs;
int v_start = 0;
int qp_idx = 0;
@@ -2831,11 +2876,21 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
* group them so there are multiple queues per vector.
*/
for (; v_start < q_vectors && qp_remaining; v_start++) {
- qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
- for (; qp_per_vector;
- qp_per_vector--, qp_idx++, qp_remaining--) {
- map_vector_to_rxq(vsi, v_start, qp_idx);
- map_vector_to_txq(vsi, v_start, qp_idx);
+ struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
+
+ num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
+
+ q_vector->num_ringpairs = num_ringpairs;
+
+ q_vector->rx.count = 0;
+ q_vector->tx.count = 0;
+ q_vector->rx.ring = NULL;
+ q_vector->tx.ring = NULL;
+
+ while (num_ringpairs--) {
+ map_vector_to_qp(vsi, v_start, qp_idx);
+ qp_idx++;
+ qp_remaining--;
}
}
}
@@ -2887,7 +2942,7 @@ static void i40e_netpoll(struct net_device *netdev)
pf->flags |= I40E_FLAG_IN_NETPOLL;
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
for (i = 0; i < vsi->num_q_vectors; i++)
- i40e_msix_clean_rings(0, &vsi->q_vectors[i]);
+ i40e_msix_clean_rings(0, vsi->q_vectors[i]);
} else {
i40e_intr(pf->pdev->irq, netdev);
}
@@ -3073,14 +3128,14 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
u16 vector = i + base;
/* free only the irqs that were actually requested */
- if (vsi->q_vectors[i].num_ringpairs == 0)
+ if (vsi->q_vectors[i]->num_ringpairs == 0)
continue;
/* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(pf->msix_entries[vector].vector,
NULL);
free_irq(pf->msix_entries[vector].vector,
- &vsi->q_vectors[i]);
+ vsi->q_vectors[i]);
/* Tear down the interrupt queue link list
*
@@ -3164,6 +3219,39 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
}
/**
+ * i40e_free_q_vector - Free memory allocated for specific interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
+{
+ struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
+ struct i40e_ring *ring;
+
+ if (!q_vector)
+ return;
+
+ /* disassociate q_vector from rings */
+ i40e_for_each_ring(ring, q_vector->tx)
+ ring->q_vector = NULL;
+
+ i40e_for_each_ring(ring, q_vector->rx)
+ ring->q_vector = NULL;
+
+ /* only VSI w/ an associated netdev is set up w/ NAPI */
+ if (vsi->netdev)
+ netif_napi_del(&q_vector->napi);
+
+ vsi->q_vectors[v_idx] = NULL;
+
+ kfree_rcu(q_vector, rcu);
+}
+
+/**
* i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
* @vsi: the VSI being un-configured
*
@@ -3174,24 +3262,8 @@ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
{
int v_idx;
- for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
- struct i40e_q_vector *q_vector = &vsi->q_vectors[v_idx];
- int r_idx;
-
- if (!q_vector)
- continue;
-
- /* disassociate q_vector from rings */
- for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++)
- q_vector->tx.ring[r_idx]->q_vector = NULL;
- for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++)
- q_vector->rx.ring[r_idx]->q_vector = NULL;
-
- /* only VSI w/ an associated netdev is set up w/ NAPI */
- if (vsi->netdev)
- netif_napi_del(&q_vector->napi);
- }
- kfree(vsi->q_vectors);
+ for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+ i40e_free_q_vector(vsi, v_idx);
}
/**
@@ -3241,7 +3313,7 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
return;
for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
- napi_enable(&vsi->q_vectors[q_idx].napi);
+ napi_enable(&vsi->q_vectors[q_idx]->napi);
}
/**
@@ -3256,7 +3328,7 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
return;
for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
- napi_disable(&vsi->q_vectors[q_idx].napi);
+ napi_disable(&vsi->q_vectors[q_idx]->napi);
}
/**
@@ -3703,8 +3775,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
(vsi->netdev)) {
+ netdev_info(vsi->netdev, "NIC Link is Up\n");
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
+ } else if (vsi->netdev) {
+ netdev_info(vsi->netdev, "NIC Link is Down\n");
}
i40e_service_event_schedule(pf);
@@ -3772,8 +3847,8 @@ void i40e_down(struct i40e_vsi *vsi)
i40e_napi_disable_all(vsi);
for (i = 0; i < vsi->num_queue_pairs; i++) {
- i40e_clean_tx_ring(&vsi->tx_rings[i]);
- i40e_clean_rx_ring(&vsi->rx_rings[i]);
+ i40e_clean_tx_ring(vsi->tx_rings[i]);
+ i40e_clean_rx_ring(vsi->rx_rings[i]);
}
}
@@ -4153,8 +4228,9 @@ static void i40e_link_event(struct i40e_pf *pf)
if (new_link == old_link)
return;
- netdev_info(pf->vsi[pf->lan_vsi]->netdev,
- "NIC Link is %s\n", (new_link ? "Up" : "Down"));
+ if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
+ netdev_info(pf->vsi[pf->lan_vsi]->netdev,
+ "NIC Link is %s\n", (new_link ? "Up" : "Down"));
/* Notify the base of the switch tree connected to
* the link. Floating VEBs are not notified.
@@ -4199,9 +4275,9 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
continue;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- set_check_for_tx_hang(&vsi->tx_rings[i]);
+ set_check_for_tx_hang(vsi->tx_rings[i]);
if (test_bit(__I40E_HANG_CHECK_ARMED,
- &vsi->tx_rings[i].state))
+ &vsi->tx_rings[i]->state))
armed++;
}
@@ -4537,7 +4613,8 @@ static void i40e_fdir_setup(struct i40e_pf *pf)
bool new_vsi = false;
int err, i;
- if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED|I40E_FLAG_FDIR_ATR_ENABLED)))
+ if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED |
+ I40E_FLAG_FDIR_ATR_ENABLED)))
return;
pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
@@ -4937,6 +5014,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
{
int ret = -ENODEV;
struct i40e_vsi *vsi;
+ int sz_vectors;
+ int sz_rings;
int vsi_idx;
int i;
@@ -4962,14 +5041,14 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
vsi_idx = i; /* Found one! */
} else {
ret = -ENODEV;
- goto err_alloc_vsi; /* out of VSI slots! */
+ goto unlock_pf; /* out of VSI slots! */
}
pf->next_vsi = ++i;
vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
if (!vsi) {
ret = -ENOMEM;
- goto err_alloc_vsi;
+ goto unlock_pf;
}
vsi->type = type;
vsi->back = pf;
@@ -4982,14 +5061,40 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
INIT_LIST_HEAD(&vsi->mac_filter_list);
- i40e_set_num_rings_in_vsi(vsi);
+ ret = i40e_set_num_rings_in_vsi(vsi);
+ if (ret)
+ goto err_rings;
+
+ /* allocate memory for ring pointers */
+ sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
+ vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL);
+ if (!vsi->tx_rings) {
+ ret = -ENOMEM;
+ goto err_rings;
+ }
+ vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
+
+ /* allocate memory for q_vector pointers */
+ sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
+ vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL);
+ if (!vsi->q_vectors) {
+ ret = -ENOMEM;
+ goto err_vectors;
+ }
/* Setup default MSIX irq handler for VSI */
i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
pf->vsi[vsi_idx] = vsi;
ret = vsi_idx;
-err_alloc_vsi:
+ goto unlock_pf;
+
+err_vectors:
+ kfree(vsi->tx_rings);
+err_rings:
+ pf->next_vsi = i - 1;
+ kfree(vsi);
+unlock_pf:
mutex_unlock(&pf->switch_mutex);
return ret;
}
@@ -5030,6 +5135,10 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
+ /* free the ring and vector containers */
+ kfree(vsi->q_vectors);
+ kfree(vsi->tx_rings);
+
pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi)
pf->next_vsi = vsi->idx;
@@ -5043,34 +5152,40 @@ free_vsi:
}
/**
+ * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being cleaned
+ **/
+static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+{
+ int i;
+
+ if (vsi->tx_rings[0])
+ for (i = 0; i < vsi->alloc_queue_pairs; i++) {
+ kfree_rcu(vsi->tx_rings[i], rcu);
+ vsi->tx_rings[i] = NULL;
+ vsi->rx_rings[i] = NULL;
+ }
+
+ return 0;
+}
+
+/**
* i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
* @vsi: the VSI being configured
**/
static int i40e_alloc_rings(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- int ret = 0;
int i;
- vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs,
- sizeof(struct i40e_ring), GFP_KERNEL);
- if (!vsi->rx_rings) {
- ret = -ENOMEM;
- goto err_alloc_rings;
- }
-
- vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs,
- sizeof(struct i40e_ring), GFP_KERNEL);
- if (!vsi->tx_rings) {
- ret = -ENOMEM;
- kfree(vsi->rx_rings);
- goto err_alloc_rings;
- }
-
/* Set basic values in the rings to be used later during open() */
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
- struct i40e_ring *rx_ring = &vsi->rx_rings[i];
- struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+ struct i40e_ring *tx_ring;
+ struct i40e_ring *rx_ring;
+
+ tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
+ if (!tx_ring)
+ goto err_out;
tx_ring->queue_index = i;
tx_ring->reg_idx = vsi->base_queue + i;
@@ -5081,7 +5196,9 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
tx_ring->count = vsi->num_desc;
tx_ring->size = 0;
tx_ring->dcb_tc = 0;
+ vsi->tx_rings[i] = tx_ring;
+ rx_ring = &tx_ring[1];
rx_ring->queue_index = i;
rx_ring->reg_idx = vsi->base_queue + i;
rx_ring->ring_active = false;
@@ -5095,24 +5212,14 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
set_ring_16byte_desc_enabled(rx_ring);
else
clear_ring_16byte_desc_enabled(rx_ring);
- }
-
-err_alloc_rings:
- return ret;
-}
-
-/**
- * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
- * @vsi: the VSI being cleaned
- **/
-static int i40e_vsi_clear_rings(struct i40e_vsi *vsi)
-{
- if (vsi) {
- kfree(vsi->rx_rings);
- kfree(vsi->tx_rings);
+ vsi->rx_rings[i] = rx_ring;
}
return 0;
+
+err_out:
+ i40e_vsi_clear_rings(vsi);
+ return -ENOMEM;
}
/**
@@ -5249,6 +5356,38 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
/**
+ * i40e_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: index of the vector in the vsi struct
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ **/
+static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+{
+ struct i40e_q_vector *q_vector;
+
+ /* allocate q_vector */
+ q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ q_vector->vsi = vsi;
+ q_vector->v_idx = v_idx;
+ cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+ if (vsi->netdev)
+ netif_napi_add(vsi->netdev, &q_vector->napi,
+ i40e_napi_poll, vsi->work_limit);
+
+ q_vector->rx.latency_range = I40E_LOW_LATENCY;
+ q_vector->tx.latency_range = I40E_LOW_LATENCY;
+
+ /* tie q_vector and vsi together */
+ vsi->q_vectors[v_idx] = q_vector;
+
+ return 0;
+}
+
+/**
* i40e_alloc_q_vectors - Allocate memory for interrupt vectors
* @vsi: the VSI being configured
*
@@ -5259,6 +5398,7 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
int v_idx, num_q_vectors;
+ int err;
/* if not MSIX, give the one vector only to the LAN VSI */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -5268,22 +5408,19 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
else
return -EINVAL;
- vsi->q_vectors = kcalloc(num_q_vectors,
- sizeof(struct i40e_q_vector),
- GFP_KERNEL);
- if (!vsi->q_vectors)
- return -ENOMEM;
-
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- vsi->q_vectors[v_idx].vsi = vsi;
- vsi->q_vectors[v_idx].v_idx = v_idx;
- cpumask_set_cpu(v_idx, &vsi->q_vectors[v_idx].affinity_mask);
- if (vsi->netdev)
- netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx].napi,
- i40e_napi_poll, vsi->work_limit);
+ err = i40e_alloc_q_vector(vsi, v_idx);
+ if (err)
+ goto err_out;
}
return 0;
+
+err_out:
+ while (v_idx--)
+ i40e_free_q_vector(vsi, v_idx);
+
+ return err;
}
/**
@@ -5297,7 +5434,8 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
err = i40e_init_msix(pf);
if (err) {
- pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
+ I40E_FLAG_RSS_ENABLED |
I40E_FLAG_MQ_ENABLED |
I40E_FLAG_DCB_ENABLED |
I40E_FLAG_SRIOV_ENABLED |
@@ -5312,14 +5450,17 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
(pf->flags & I40E_FLAG_MSI_ENABLED)) {
+ dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n");
err = pci_enable_msi(pf->pdev);
if (err) {
- dev_info(&pf->pdev->dev,
- "MSI init failed (%d), trying legacy.\n", err);
+ dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
pf->flags &= ~I40E_FLAG_MSI_ENABLED;
}
}
+ if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
+ dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n");
+
/* track first vector for misc interrupts */
err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
}
@@ -5950,7 +6091,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
int ret = -ENOENT;
struct i40e_pf *pf = vsi->back;
- if (vsi->q_vectors) {
+ if (vsi->q_vectors[0]) {
dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
vsi->seid);
return -EEXIST;
@@ -5972,8 +6113,9 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
goto vector_setup_out;
}
- vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
- vsi->num_q_vectors, vsi->idx);
+ if (vsi->num_q_vectors)
+ vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
+ vsi->num_q_vectors, vsi->idx);
if (vsi->base_vector < 0) {
dev_info(&pf->pdev->dev,
"failed to get q tracking for VSI %d, err=%d\n",
@@ -7062,8 +7204,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
pf->vsi = kzalloc(len, GFP_KERNEL);
- if (!pf->vsi)
+ if (!pf->vsi) {
+ err = -ENOMEM;
goto err_switch_setup;
+ }
err = i40e_setup_pf_switch(pf);
if (err) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 49d2cfa9b0cc..f1f03bc5c729 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -37,6 +37,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
}
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
/**
* i40e_program_fdir_filter - Program a Flow Director filter
* @fdir_input: Packet data that will be filter parameters
@@ -50,6 +51,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
struct i40e_tx_buffer *tx_buf;
struct i40e_tx_desc *tx_desc;
struct i40e_ring *tx_ring;
+ unsigned int fpt, dcc;
struct i40e_vsi *vsi;
struct device *dev;
dma_addr_t dma;
@@ -64,93 +66,78 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
if (!vsi)
return -ENOENT;
- tx_ring = &vsi->tx_rings[0];
+ tx_ring = vsi->tx_rings[0];
dev = tx_ring->dev;
dma = dma_map_single(dev, fdir_data->raw_packet,
- I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
+ I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
goto dma_fail;
/* grab the next descriptor */
- fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
- tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
- tx_ring->next_to_use++;
- if (tx_ring->next_to_use == tx_ring->count)
- tx_ring->next_to_use = 0;
+ i = tx_ring->next_to_use;
+ fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+ tx_buf = &tx_ring->tx_bi[i];
+
+ tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
- fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index
- << I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
- & I40E_TXD_FLTR_QW0_QINDEX_MASK);
+ fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW0_QINDEX_MASK;
- fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->flex_off
- << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
- & I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+ fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
+ I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
- fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->pctype
- << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
- & I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+ fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
+ I40E_TXD_FLTR_QW0_PCTYPE_MASK;
/* Use LAN VSI Id if not programmed by user */
if (fdir_data->dest_vsi == 0)
- fdir_desc->qindex_flex_ptype_vsi |=
- cpu_to_le32((pf->vsi[pf->lan_vsi]->id)
- << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
+ fpt |= (pf->vsi[pf->lan_vsi]->id) <<
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
else
- fdir_desc->qindex_flex_ptype_vsi |=
- cpu_to_le32((fdir_data->dest_vsi
- << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
- & I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+ fpt |= ((u32)fdir_data->dest_vsi <<
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
+ I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
- fdir_desc->dtype_cmd_cntindex =
- cpu_to_le32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+ fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
+
+ dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
if (add)
- fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
- I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
- << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+ dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT;
else
- fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
- I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
- << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+ dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT;
- fdir_desc->dtype_cmd_cntindex |= cpu_to_le32((fdir_data->dest_ctl
- << I40E_TXD_FLTR_QW1_DEST_SHIFT)
- & I40E_TXD_FLTR_QW1_DEST_MASK);
+ dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
+ I40E_TXD_FLTR_QW1_DEST_MASK;
- fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
- (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
- & I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+ dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
+ I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
if (fdir_data->cnt_index != 0) {
- fdir_desc->dtype_cmd_cntindex |=
- cpu_to_le32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
- fdir_desc->dtype_cmd_cntindex |=
- cpu_to_le32((fdir_data->cnt_index
- << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
- & I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+ dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
+ dcc |= ((u32)fdir_data->cnt_index <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
}
+ fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
/* Now program a dummy descriptor */
- tx_desc = I40E_TX_DESC(tx_ring, tx_ring->next_to_use);
- tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
- tx_ring->next_to_use++;
- if (tx_ring->next_to_use == tx_ring->count)
- tx_ring->next_to_use = 0;
+ i = tx_ring->next_to_use;
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+
+ tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
tx_desc->buffer_addr = cpu_to_le64(dma);
- td_cmd = I40E_TX_DESC_CMD_EOP |
- I40E_TX_DESC_CMD_RS |
- I40E_TX_DESC_CMD_DUMMY;
+ td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
- /* Mark the data descriptor to be watched */
- tx_buf->next_to_watch = tx_desc;
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -158,6 +145,9 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
*/
wmb();
+ /* Mark the data descriptor to be watched */
+ tx_buf->next_to_watch = tx_desc;
+
writel(tx_ring->next_to_use, tx_ring->tail);
return 0;
@@ -188,27 +178,30 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id)
}
/**
- * i40e_unmap_tx_resource - Release a Tx buffer
+ * i40e_unmap_and_free_tx_resource - Release a Tx buffer
* @ring: the ring that owns the buffer
* @tx_buffer: the buffer to free
**/
-static inline void i40e_unmap_tx_resource(struct i40e_ring *ring,
- struct i40e_tx_buffer *tx_buffer)
+static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
+ struct i40e_tx_buffer *tx_buffer)
{
- if (tx_buffer->dma) {
- if (tx_buffer->tx_flags & I40E_TX_FLAGS_MAPPED_AS_PAGE)
- dma_unmap_page(ring->dev,
- tx_buffer->dma,
- tx_buffer->length,
- DMA_TO_DEVICE);
- else
+ if (tx_buffer->skb) {
+ dev_kfree_skb_any(tx_buffer->skb);
+ if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
- tx_buffer->dma,
- tx_buffer->length,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
+ } else if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
}
- tx_buffer->dma = 0;
- tx_buffer->time_stamp = 0;
+ tx_buffer->next_to_watch = NULL;
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+ /* tx_buffer must be completely set up in the transmit path */
}
/**
@@ -217,7 +210,6 @@ static inline void i40e_unmap_tx_resource(struct i40e_ring *ring,
**/
void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
{
- struct i40e_tx_buffer *tx_buffer;
unsigned long bi_size;
u16 i;
@@ -226,13 +218,8 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
return;
/* Free all the Tx ring sk_buffs */
- for (i = 0; i < tx_ring->count; i++) {
- tx_buffer = &tx_ring->tx_bi[i];
- i40e_unmap_tx_resource(tx_ring, tx_buffer);
- if (tx_buffer->skb)
- dev_kfree_skb_any(tx_buffer->skb);
- tx_buffer->skb = NULL;
- }
+ for (i = 0; i < tx_ring->count; i++)
+ i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
memset(tx_ring->tx_bi, 0, bi_size);
@@ -242,6 +229,13 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
+
+ if (!tx_ring->netdev)
+ return;
+
+ /* cleanup Tx queue statistics */
+ netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index));
}
/**
@@ -300,14 +294,14 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
* run the check_tx_hang logic with a transmit completion
* pending but without time to complete it yet.
*/
- if ((tx_ring->tx_stats.tx_done_old == tx_ring->tx_stats.packets) &&
+ if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
tx_pending) {
/* make sure it is true for two checks in a row */
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
&tx_ring->state);
} else {
/* update completed stats and disarm the hang check */
- tx_ring->tx_stats.tx_done_old = tx_ring->tx_stats.packets;
+ tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
}
@@ -331,62 +325,88 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_buf = &tx_ring->tx_bi[i];
tx_desc = I40E_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
- for (; budget; budget--) {
- struct i40e_tx_desc *eop_desc;
-
- eop_desc = tx_buf->next_to_watch;
+ do {
+ struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
/* if next_to_watch is not set then there is no work pending */
if (!eop_desc)
break;
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
/* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->cmd_type_offset_bsz &
cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
break;
- /* count the packet as being completed */
- tx_ring->tx_stats.completed++;
+ /* clear next_to_watch to prevent false hangs */
tx_buf->next_to_watch = NULL;
- tx_buf->time_stamp = 0;
-
- /* set memory barrier before eop_desc is verified */
- rmb();
- do {
- i40e_unmap_tx_resource(tx_ring, tx_buf);
+ /* update the statistics for this packet */
+ total_bytes += tx_buf->bytecount;
+ total_packets += tx_buf->gso_segs;
- /* clear dtype status */
- tx_desc->cmd_type_offset_bsz &=
- ~cpu_to_le64(I40E_TXD_QW1_DTYPE_MASK);
+ /* free the skb */
+ dev_kfree_skb_any(tx_buf->skb);
- if (likely(tx_desc == eop_desc)) {
- eop_desc = NULL;
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
- dev_kfree_skb_any(tx_buf->skb);
- tx_buf->skb = NULL;
+ /* clear tx_buffer data */
+ tx_buf->skb = NULL;
+ dma_unmap_len_set(tx_buf, len, 0);
- total_bytes += tx_buf->bytecount;
- total_packets += tx_buf->gso_segs;
- }
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
tx_buf++;
tx_desc++;
i++;
- if (unlikely(i == tx_ring->count)) {
- i = 0;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
tx_buf = tx_ring->tx_bi;
tx_desc = I40E_TX_DESC(tx_ring, 0);
}
- } while (eop_desc);
- }
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ }
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
tx_ring->next_to_clean = i;
- tx_ring->tx_stats.bytes += total_bytes;
- tx_ring->tx_stats.packets += total_packets;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets;
+
if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
/* schedule immediate reset if we believe we hung */
dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -414,6 +434,10 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
return true;
}
+ netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ total_packets, total_bytes);
+
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
(I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
@@ -524,8 +548,6 @@ static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
i40e_set_new_dynamic_itr(&q_vector->tx);
if (old_itr != q_vector->tx.itr)
wr32(hw, reg_addr, q_vector->tx.itr);
-
- i40e_flush(hw);
}
/**
@@ -1042,8 +1064,10 @@ next_desc:
}
rx_ring->next_to_clean = i;
- rx_ring->rx_stats.packets += total_rx_packets;
- rx_ring->rx_stats.bytes += total_rx_bytes;
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
@@ -1067,27 +1091,28 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
struct i40e_q_vector *q_vector =
container_of(napi, struct i40e_q_vector, napi);
struct i40e_vsi *vsi = q_vector->vsi;
+ struct i40e_ring *ring;
bool clean_complete = true;
int budget_per_ring;
- int i;
if (test_bit(__I40E_DOWN, &vsi->state)) {
napi_complete(napi);
return 0;
}
+ /* Since the actual Tx work is minimal, we can give the Tx a larger
+ * budget and be more aggressive about cleaning up the Tx descriptors.
+ */
+ i40e_for_each_ring(ring, q_vector->tx)
+ clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+
/* We attempt to distribute budget to each Rx queue fairly, but don't
* allow the budget to go below 1 because that would exit polling early.
- * Since the actual Tx work is minimal, we can give the Tx a larger
- * budget and be more aggressive about cleaning up the Tx descriptors.
*/
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
- for (i = 0; i < q_vector->num_ringpairs; i++) {
- clean_complete &= i40e_clean_tx_irq(q_vector->tx.ring[i],
- vsi->work_limit);
- clean_complete &= i40e_clean_rx_irq(q_vector->rx.ring[i],
- budget_per_ring);
- }
+
+ i40e_for_each_ring(ring, q_vector->rx)
+ clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
/* If work not completed, return budget and polling will return */
if (!clean_complete)
@@ -1117,7 +1142,8 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
qval = rd32(hw, I40E_QINT_TQCTL(0));
qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
wr32(hw, I40E_QINT_TQCTL(0), qval);
- i40e_flush(hw);
+
+ i40e_irq_dynamic_enable_icr0(vsi->back);
}
}
@@ -1144,6 +1170,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct tcphdr *th;
unsigned int hlen;
u32 flex_ptype, dtype_cmd;
+ u16 i;
/* make sure ATR is enabled */
if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED))
@@ -1183,10 +1210,11 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->atr_count = 0;
/* grab the next descriptor */
- fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
- tx_ring->next_to_use++;
- if (tx_ring->next_to_use == tx_ring->count)
- tx_ring->next_to_use = 0;
+ i = tx_ring->next_to_use;
+ fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
I40E_TXD_FLTR_QW0_QINDEX_MASK;
@@ -1216,7 +1244,6 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
}
-#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
/**
* i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
* @skb: send buffer
@@ -1276,27 +1303,6 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
}
/**
- * i40e_tx_csum - is checksum offload requested
- * @tx_ring: ptr to the ring to send
- * @skb: ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
- *
- * Returns true if checksum offload is requested
- **/
-static bool i40e_tx_csum(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, __be16 protocol)
-{
- if ((skb->ip_summed != CHECKSUM_PARTIAL) &&
- !(tx_flags & I40E_TX_FLAGS_TXSW)) {
- if (!(tx_flags & I40E_TX_FLAGS_HW_VLAN))
- return false;
- }
-
- return skb->ip_summed == CHECKSUM_PARTIAL;
-}
-
-/**
* i40e_tso - set up the tso context descriptor
* @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
@@ -1482,15 +1488,16 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
const u32 cd_tunneling, const u32 cd_l2tag2)
{
struct i40e_tx_context_desc *context_desc;
+ int i = tx_ring->next_to_use;
if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
return;
/* grab the next descriptor */
- context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use);
- tx_ring->next_to_use++;
- if (tx_ring->next_to_use == tx_ring->count)
- tx_ring->next_to_use = 0;
+ context_desc = I40E_TX_CTXTDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
/* cpu_to_le32 and assign to struct fields */
context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
@@ -1512,68 +1519,71 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset)
{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
- struct device *dev = tx_ring->dev;
- u32 paylen = skb->len - hdr_len;
- u16 i = tx_ring->next_to_use;
+ struct skb_frag_struct *frag;
struct i40e_tx_buffer *tx_bi;
struct i40e_tx_desc *tx_desc;
- u32 buf_offset = 0;
+ u16 i = tx_ring->next_to_use;
u32 td_tag = 0;
dma_addr_t dma;
u16 gso_segs;
- dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma))
- goto dma_error;
-
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
I40E_TX_FLAGS_VLAN_SHIFT;
}
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
+ gso_segs = skb_shinfo(skb)->gso_segs;
+ else
+ gso_segs = 1;
+
+ /* multiply data chunks by size of headers */
+ first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
+ first->gso_segs = gso_segs;
+ first->skb = skb;
+ first->tx_flags = tx_flags;
+
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
tx_desc = I40E_TX_DESC(tx_ring, i);
- for (;;) {
- while (size > I40E_MAX_DATA_PER_TXD) {
- tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
+ tx_bi = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_bi, len, size);
+ dma_unmap_addr_set(tx_bi, dma, dma);
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+
+ while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset,
I40E_MAX_DATA_PER_TXD, td_tag);
- buf_offset += I40E_MAX_DATA_PER_TXD;
- size -= I40E_MAX_DATA_PER_TXD;
-
tx_desc++;
i++;
if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0);
i = 0;
}
- }
- tx_bi = &tx_ring->tx_bi[i];
- tx_bi->length = buf_offset + size;
- tx_bi->tx_flags = tx_flags;
- tx_bi->dma = dma;
+ dma += I40E_MAX_DATA_PER_TXD;
+ size -= I40E_MAX_DATA_PER_TXD;
- tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
- tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
- size, td_tag);
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ }
if (likely(!data_len))
break;
- size = skb_frag_size(frag);
- data_len -= size;
- buf_offset = 0;
- tx_flags |= I40E_TX_FLAGS_MAPPED_AS_PAGE;
-
- dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma))
- goto dma_error;
+ tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
+ size, td_tag);
tx_desc++;
i++;
@@ -1582,31 +1592,25 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
i = 0;
}
- frag++;
- }
-
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+ size = skb_frag_size(frag);
+ data_len -= size;
- i++;
- if (i == tx_ring->count)
- i = 0;
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+ DMA_TO_DEVICE);
- tx_ring->next_to_use = i;
+ tx_bi = &tx_ring->tx_bi[i];
+ }
- if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
- gso_segs = skb_shinfo(skb)->gso_segs;
- else
- gso_segs = 1;
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
- /* multiply data chunks by size of headers */
- tx_bi->bytecount = paylen + (gso_segs * hdr_len);
- tx_bi->gso_segs = gso_segs;
- tx_bi->skb = skb;
+ netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ first->bytecount);
- /* set the timestamp and next to watch values */
+ /* set the timestamp */
first->time_stamp = jiffies;
- first->next_to_watch = tx_desc;
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
@@ -1615,16 +1619,27 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
*/
wmb();
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ /* notify HW of packet */
writel(i, tx_ring->tail);
+
return;
dma_error:
- dev_info(dev, "TX DMA map failed\n");
+ dev_info(tx_ring->dev, "TX DMA map failed\n");
/* clear dma mappings for failed tx_bi map */
for (;;) {
tx_bi = &tx_ring->tx_bi[i];
- i40e_unmap_tx_resource(tx_ring, tx_bi);
+ i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
if (tx_bi == first)
break;
if (i == 0)
@@ -1632,8 +1647,6 @@ dma_error:
i--;
}
- dev_kfree_skb_any(skb);
-
tx_ring->next_to_use = i;
}
@@ -1758,16 +1771,16 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
skb_tx_timestamp(skb);
+ /* always enable CRC insertion offload */
+ td_cmd |= I40E_TX_DESC_CMD_ICRC;
+
/* Always offload the checksum, since it's in the data descriptor */
- if (i40e_tx_csum(tx_ring, skb, tx_flags, protocol))
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_flags |= I40E_TX_FLAGS_CSUM;
- /* always enable offload insertion */
- td_cmd |= I40E_TX_DESC_CMD_ICRC;
-
- if (tx_flags & I40E_TX_FLAGS_CSUM)
i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
tx_ring, &cd_tunneling);
+ }
i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
cd_tunneling, cd_l2tag2);
@@ -1801,7 +1814,7 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- struct i40e_ring *tx_ring = &vsi->tx_rings[skb->queue_mapping];
+ struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
/* hardware can't handle really short frames, hardware padding works
* beyond this point
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index b1d7722d98a7..db55d9947f15 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -102,23 +102,20 @@
#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
-#define I40E_TX_FLAGS_TXSW (u32)(1 << 8)
-#define I40E_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 9)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
#define I40E_TX_FLAGS_VLAN_SHIFT 16
struct i40e_tx_buffer {
- struct sk_buff *skb;
- dma_addr_t dma;
- unsigned long time_stamp;
- u16 length;
- u32 tx_flags;
struct i40e_tx_desc *next_to_watch;
+ unsigned long time_stamp;
+ struct sk_buff *skb;
unsigned int bytecount;
- u16 gso_segs;
- u8 mapped_as_page;
+ unsigned short gso_segs;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
};
struct i40e_rx_buffer {
@@ -129,18 +126,18 @@ struct i40e_rx_buffer {
unsigned int page_offset;
};
-struct i40e_tx_queue_stats {
+struct i40e_queue_stats {
u64 packets;
u64 bytes;
+};
+
+struct i40e_tx_queue_stats {
u64 restart_queue;
u64 tx_busy;
- u64 completed;
u64 tx_done_old;
};
struct i40e_rx_queue_stats {
- u64 packets;
- u64 bytes;
u64 non_eop_descs;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
@@ -183,6 +180,7 @@ enum i40e_ring_state_t {
/* struct that defines a descriptor ring, associated with a VSI */
struct i40e_ring {
+ struct i40e_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */
@@ -219,6 +217,8 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */
/* stats structs */
+ struct i40e_queue_stats stats;
+ struct u64_stats_sync syncp;
union {
struct i40e_tx_queue_stats tx_stats;
struct i40e_rx_queue_stats rx_stats;
@@ -229,6 +229,8 @@ struct i40e_ring {
struct i40e_vsi *vsi; /* Backreference to associated VSI */
struct i40e_q_vector *q_vector; /* Backreference to associated vector */
+
+ struct rcu_head rcu; /* to avoid race on free */
} ____cacheline_internodealigned_in_smp;
enum i40e_latency_range {
@@ -238,9 +240,8 @@ enum i40e_latency_range {
};
struct i40e_ring_container {
-#define I40E_MAX_RINGPAIR_PER_VECTOR 8
/* array of pointers to rings */
- struct i40e_ring *ring[I40E_MAX_RINGPAIR_PER_VECTOR];
+ struct i40e_ring *ring;
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
u16 count;
@@ -248,6 +249,10 @@ struct i40e_ring_container {
u16 itr;
};
+/* iterator for handling rings in ring container */
+#define i40e_for_each_ring(pos, head) \
+ for (pos = (head).ring; pos != NULL; pos = pos->next)
+
void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 8967e58e2408..07596982a477 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -251,7 +251,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
else
reg_idx = I40E_VPINT_LNKLSTN(
- ((pf->hw.func_caps.num_msix_vectors_vf - 1)
+ (pf->hw.func_caps.num_msix_vectors_vf
* vf->vf_id) + (vector_id - 1));
if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
@@ -383,7 +383,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
/* associate this queue with the PCI VF function */
qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
- qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
+ qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
& I40E_QTX_CTL_PF_INDX_MASK);
qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
<< I40E_QTX_CTL_VFVM_INDX_SHIFT)
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 74a1506b4235..8c2437722aad 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -28,14 +28,14 @@
#ifndef _E1000_82575_H_
#define _E1000_82575_H_
-extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
-extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
-extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
-extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
-extern s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data);
-extern s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data);
+void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
+void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
+void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
+void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
+s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+ u8 *data);
+s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+ u8 data);
#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
(ID_LED_DEF1_DEF2 << 8) | \
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 37a9c06a6c68..2e166b22d52b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -562,11 +562,11 @@ struct e1000_hw {
u8 revision_id;
};
-extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
+struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
#define hw_dbg(format, arg...) \
netdev_dbg(igb_get_hw_dev(hw), format, ##arg)
/* These functions must be implemented by drivers */
-s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
-s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index dde3c4b7ea99..2d913716573a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -28,26 +28,24 @@
#ifndef _E1000_I210_H_
#define _E1000_I210_H_
-extern s32 igb_update_flash_i210(struct e1000_hw *hw);
-extern s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
-extern s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
-extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
- u16 words, u16 *data);
-extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
- u16 words, u16 *data);
-extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
-extern void igb_release_nvm_i210(struct e1000_hw *hw);
-extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
-extern s32 igb_read_invm_version(struct e1000_hw *hw,
- struct e1000_fw_version *invm_ver);
-extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
- u16 *data);
-extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
- u16 data);
-extern s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
-extern bool igb_get_flash_presence_i210(struct e1000_hw *hw);
+s32 igb_update_flash_i210(struct e1000_hw *hw);
+s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
+s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
+s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
+void igb_release_nvm_i210(struct e1000_hw *hw);
+s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
+s32 igb_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver);
+s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
+s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
+s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
+bool igb_get_flash_presence_i210(struct e1000_hw *hw);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 5e13e83cc608..e4cbe8ef67b3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -86,6 +86,6 @@ enum e1000_mng_mode {
#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
-extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
+void e1000_init_function_pointers_82575(struct e1000_hw *hw);
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index e7266759a10b..c4c4fe332c7e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -708,11 +708,6 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
hw_dbg("Error committing the PHY changes\n");
goto out;
}
- if (phy->type == e1000_phy_i210) {
- ret_val = igb_set_master_slave_mode(hw);
- if (ret_val)
- return ret_val;
- }
out:
return ret_val;
@@ -806,6 +801,9 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
hw_dbg("Error committing the PHY changes\n");
return ret_val;
}
+ ret_val = igb_set_master_slave_mode(hw);
+ if (ret_val)
+ return ret_val;
return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 6807b098edae..5e9ed89403aa 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -483,40 +483,38 @@ enum igb_boards {
extern char igb_driver_name[];
extern char igb_driver_version[];
-extern int igb_up(struct igb_adapter *);
-extern void igb_down(struct igb_adapter *);
-extern void igb_reinit_locked(struct igb_adapter *);
-extern void igb_reset(struct igb_adapter *);
-extern void igb_write_rss_indir_tbl(struct igb_adapter *);
-extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
-extern int igb_setup_tx_resources(struct igb_ring *);
-extern int igb_setup_rx_resources(struct igb_ring *);
-extern void igb_free_tx_resources(struct igb_ring *);
-extern void igb_free_rx_resources(struct igb_ring *);
-extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
-extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
-extern void igb_setup_tctl(struct igb_adapter *);
-extern void igb_setup_rctl(struct igb_adapter *);
-extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
-extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
- struct igb_tx_buffer *);
-extern void igb_alloc_rx_buffers(struct igb_ring *, u16);
-extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
-extern bool igb_has_link(struct igb_adapter *adapter);
-extern void igb_set_ethtool_ops(struct net_device *);
-extern void igb_power_up_link(struct igb_adapter *);
-extern void igb_set_fw_version(struct igb_adapter *);
-extern void igb_ptp_init(struct igb_adapter *adapter);
-extern void igb_ptp_stop(struct igb_adapter *adapter);
-extern void igb_ptp_reset(struct igb_adapter *adapter);
-extern void igb_ptp_tx_work(struct work_struct *work);
-extern void igb_ptp_rx_hang(struct igb_adapter *adapter);
-extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
-extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
- struct sk_buff *skb);
-extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
- unsigned char *va,
- struct sk_buff *skb);
+int igb_up(struct igb_adapter *);
+void igb_down(struct igb_adapter *);
+void igb_reinit_locked(struct igb_adapter *);
+void igb_reset(struct igb_adapter *);
+int igb_reinit_queues(struct igb_adapter *);
+void igb_write_rss_indir_tbl(struct igb_adapter *);
+int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
+int igb_setup_tx_resources(struct igb_ring *);
+int igb_setup_rx_resources(struct igb_ring *);
+void igb_free_tx_resources(struct igb_ring *);
+void igb_free_rx_resources(struct igb_ring *);
+void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_setup_tctl(struct igb_adapter *);
+void igb_setup_rctl(struct igb_adapter *);
+netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
+void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *);
+void igb_alloc_rx_buffers(struct igb_ring *, u16);
+void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
+bool igb_has_link(struct igb_adapter *adapter);
+void igb_set_ethtool_ops(struct net_device *);
+void igb_power_up_link(struct igb_adapter *);
+void igb_set_fw_version(struct igb_adapter *);
+void igb_ptp_init(struct igb_adapter *adapter);
+void igb_ptp_stop(struct igb_adapter *adapter);
+void igb_ptp_reset(struct igb_adapter *adapter);
+void igb_ptp_tx_work(struct work_struct *work);
+void igb_ptp_rx_hang(struct igb_adapter *adapter);
+void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
+void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
+ struct sk_buff *skb);
static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -531,11 +529,11 @@ static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
rx_ring->last_rx_timestamp = jiffies;
}
-extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd);
+int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd);
#ifdef CONFIG_IGB_HWMON
-extern void igb_sysfs_exit(struct igb_adapter *adapter);
-extern int igb_sysfs_init(struct igb_adapter *adapter);
+void igb_sysfs_exit(struct igb_adapter *adapter);
+int igb_sysfs_init(struct igb_adapter *adapter);
#endif
static inline s32 igb_reset_phy(struct e1000_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 86d51429a189..b918ba3640f9 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -771,8 +771,10 @@ static int igb_set_eeprom(struct net_device *netdev,
if (eeprom->len == 0)
return -EOPNOTSUPP;
- if (hw->mac.type == e1000_i211)
+ if ((hw->mac.type >= e1000_i210) &&
+ !igb_get_flash_presence_i210(hw)) {
return -EOPNOTSUPP;
+ }
if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
return -EFAULT;
@@ -1659,7 +1661,8 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+ (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
/* Enable DH89xxCC MPHY for near end loopback */
reg = rd32(E1000_MPHY_ADDR_CTL);
@@ -1725,7 +1728,8 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter)
if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+ (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
u32 reg;
/* Disable near end loopback on DH89xxCC */
@@ -2655,6 +2659,8 @@ static int igb_set_eee(struct net_device *netdev,
(hw->phy.media_type != e1000_media_type_copper))
return -EOPNOTSUPP;
+ memset(&eee_curr, 0, sizeof(struct ethtool_eee));
+
ret_val = igb_get_eee(netdev, &eee_curr);
if (ret_val)
return ret_val;
@@ -2875,6 +2881,88 @@ static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
return 0;
}
+static unsigned int igb_max_channels(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned int max_combined = 0;
+
+ switch (hw->mac.type) {
+ case e1000_i211:
+ max_combined = IGB_MAX_RX_QUEUES_I211;
+ break;
+ case e1000_82575:
+ case e1000_i210:
+ max_combined = IGB_MAX_RX_QUEUES_82575;
+ break;
+ case e1000_i350:
+ if (!!adapter->vfs_allocated_count) {
+ max_combined = 1;
+ break;
+ }
+ /* fall through */
+ case e1000_82576:
+ if (!!adapter->vfs_allocated_count) {
+ max_combined = 2;
+ break;
+ }
+ /* fall through */
+ case e1000_82580:
+ case e1000_i354:
+ default:
+ max_combined = IGB_MAX_RX_QUEUES;
+ break;
+ }
+
+ return max_combined;
+}
+
+static void igb_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ /* Report maximum channels */
+ ch->max_combined = igb_max_channels(adapter);
+
+ /* Report info for other vector */
+ if (adapter->msix_entries) {
+ ch->max_other = NON_Q_VECTORS;
+ ch->other_count = NON_Q_VECTORS;
+ }
+
+ ch->combined_count = adapter->rss_queues;
+}
+
+static int igb_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ unsigned int count = ch->combined_count;
+
+ /* Verify they are not requesting separate vectors */
+ if (!count || ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ /* Verify other_count is valid and has not been changed */
+ if (ch->other_count != NON_Q_VECTORS)
+ return -EINVAL;
+
+ /* Verify the number of channels doesn't exceed hw limits */
+ if (count > igb_max_channels(adapter))
+ return -EINVAL;
+
+ if (count != adapter->rss_queues) {
+ adapter->rss_queues = count;
+
+ /* Hardware has to reinitialize queues and interrupts to
+ * match the new configuration.
+ */
+ return igb_reinit_queues(adapter);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
@@ -2911,6 +2999,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_rxfh_indir_size = igb_get_rxfh_indir_size,
.get_rxfh_indir = igb_get_rxfh_indir,
.set_rxfh_indir = igb_set_rxfh_indir,
+ .get_channels = igb_get_channels,
+ .set_channels = igb_set_channels,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
};
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 8cf44f2a8ccd..2ac14bdd5fbb 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -182,6 +182,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *);
#ifdef CONFIG_PCI_IOV
static int igb_vf_configure(struct igb_adapter *adapter, int vf);
+static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
#endif
#ifdef CONFIG_PM
@@ -2034,21 +2035,15 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
pci_using_dac = 0;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
+ pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
}
@@ -2429,7 +2424,7 @@ err_dma:
}
#ifdef CONFIG_PCI_IOV
-static int igb_disable_sriov(struct pci_dev *pdev)
+static int igb_disable_sriov(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2470,27 +2465,19 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
int err = 0;
int i;
- if (!adapter->msix_entries) {
+ if (!adapter->msix_entries || num_vfs > 7) {
err = -EPERM;
goto out;
}
-
if (!num_vfs)
goto out;
- else if (old_vfs && old_vfs == num_vfs)
- goto out;
- else if (old_vfs && old_vfs != num_vfs)
- err = igb_disable_sriov(pdev);
-
- if (err)
- goto out;
-
- if (num_vfs > 7) {
- err = -EPERM;
- goto out;
- }
- adapter->vfs_allocated_count = num_vfs;
+ if (old_vfs) {
+ dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
+ old_vfs, max_vfs);
+ adapter->vfs_allocated_count = old_vfs;
+ } else
+ adapter->vfs_allocated_count = num_vfs;
adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
sizeof(struct vf_data_storage), GFP_KERNEL);
@@ -2504,10 +2491,12 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
goto out;
}
- err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
- if (err)
- goto err_out;
-
+ /* only call pci_enable_sriov() if no VFs are allocated already */
+ if (!old_vfs) {
+ err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
+ if (err)
+ goto err_out;
+ }
dev_info(&pdev->dev, "%d VFs allocated\n",
adapter->vfs_allocated_count);
for (i = 0; i < adapter->vfs_allocated_count; i++)
@@ -2623,7 +2612,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
return;
pci_sriov_set_totalvfs(pdev, 7);
- igb_enable_sriov(pdev, max_vfs);
+ igb_pci_enable_sriov(pdev, max_vfs);
#endif /* CONFIG_PCI_IOV */
}
@@ -5708,7 +5697,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
/* reply to reset with ack and vf mac address */
msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
- memcpy(addr, vf_mac, 6);
+ memcpy(addr, vf_mac, ETH_ALEN);
igb_write_mbx(hw, msgbuf, 3, vf);
}
@@ -7838,4 +7827,26 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
return E1000_SUCCESS;
}
+
+int igb_reinit_queues(struct igb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err = 0;
+
+ if (netif_running(netdev))
+ igb_close(netdev);
+
+ igb_clear_interrupt_scheme(adapter);
+
+ if (igb_init_interrupt_scheme(adapter, true)) {
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ if (netif_running(netdev))
+ err = igb_open(netdev);
+
+ return err;
+}
/* igb_main.c */
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index a1463e3d14c0..7d6a25c8f889 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -312,17 +312,17 @@ enum igbvf_state_t {
extern char igbvf_driver_name[];
extern const char igbvf_driver_version[];
-extern void igbvf_check_options(struct igbvf_adapter *);
-extern void igbvf_set_ethtool_ops(struct net_device *);
-
-extern int igbvf_up(struct igbvf_adapter *);
-extern void igbvf_down(struct igbvf_adapter *);
-extern void igbvf_reinit_locked(struct igbvf_adapter *);
-extern int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
-extern int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
-extern void igbvf_free_rx_resources(struct igbvf_ring *);
-extern void igbvf_free_tx_resources(struct igbvf_ring *);
-extern void igbvf_update_stats(struct igbvf_adapter *);
+void igbvf_check_options(struct igbvf_adapter *);
+void igbvf_set_ethtool_ops(struct net_device *);
+
+int igbvf_up(struct igbvf_adapter *);
+void igbvf_down(struct igbvf_adapter *);
+void igbvf_reinit_locked(struct igbvf_adapter *);
+int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
+int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
+void igbvf_free_rx_resources(struct igbvf_ring *);
+void igbvf_free_tx_resources(struct igbvf_ring *);
+void igbvf_update_stats(struct igbvf_adapter *);
extern unsigned int copybreak;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 93eb7ee06d3e..04bf22e5ee31 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2343,10 +2343,9 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
struct igbvf_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
- dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
+ if (new_mtu < 68 || new_mtu > INT_MAX - ETH_HLEN - ETH_FCS_LEN ||
+ max_frame > MAX_JUMBO_FRAME_SIZE)
return -EINVAL;
- }
#define MAX_STD_JUMBO_FRAME_SIZE 9234
if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
@@ -2638,21 +2637,15 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
pci_using_dac = 0;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
+ pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "No usable DMA "
- "configuration, aborting\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev, "No usable DMA "
+ "configuration, aborting\n");
+ goto err_dma;
}
}
@@ -2699,7 +2692,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ei->get_variants) {
err = ei->get_variants(adapter);
if (err)
- goto err_ioremap;
+ goto err_get_variants;
}
/* setup adapter struct */
@@ -2796,6 +2789,7 @@ err_hw_init:
kfree(adapter->rx_ring);
err_sw_init:
igbvf_reset_interrupt_capability(adapter);
+err_get_variants:
iounmap(adapter->hw.hw_addr);
err_ioremap:
free_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index eea0e10ce12f..955ad8c2c534 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -154,7 +154,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
if (!ret_val) {
if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK))
- memcpy(hw->mac.perm_addr, addr, 6);
+ memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
else
ret_val = -E1000_ERR_MAC_INIT;
}
@@ -314,7 +314,7 @@ static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
memset(msgbuf, 0, 12);
msgbuf[0] = E1000_VF_SET_MAC_ADDR;
- memcpy(msg_addr, addr, 6);
+ memcpy(msg_addr, addr, ETH_ALEN);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
index 4d2ae97ff1b3..2224cc2edf13 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb.h
@@ -187,21 +187,21 @@ enum ixgb_state_t {
};
/* Exported from other modules */
-extern void ixgb_check_options(struct ixgb_adapter *adapter);
-extern void ixgb_set_ethtool_ops(struct net_device *netdev);
+void ixgb_check_options(struct ixgb_adapter *adapter);
+void ixgb_set_ethtool_ops(struct net_device *netdev);
extern char ixgb_driver_name[];
extern const char ixgb_driver_version[];
-extern void ixgb_set_speed_duplex(struct net_device *netdev);
+void ixgb_set_speed_duplex(struct net_device *netdev);
-extern int ixgb_up(struct ixgb_adapter *adapter);
-extern void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
-extern void ixgb_reset(struct ixgb_adapter *adapter);
-extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
-extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_update_stats(struct ixgb_adapter *adapter);
+int ixgb_up(struct ixgb_adapter *adapter);
+void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
+void ixgb_reset(struct ixgb_adapter *adapter);
+int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
+int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
+void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
+void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
+void ixgb_update_stats(struct ixgb_adapter *adapter);
#endif /* _IXGB_H_ */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
index 2a99a35c33aa..0bd5d72e1af5 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
@@ -759,27 +759,20 @@ struct ixgb_hw_stats {
};
/* Function Prototypes */
-extern bool ixgb_adapter_stop(struct ixgb_hw *hw);
-extern bool ixgb_init_hw(struct ixgb_hw *hw);
-extern bool ixgb_adapter_start(struct ixgb_hw *hw);
-extern void ixgb_check_for_link(struct ixgb_hw *hw);
-extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
-
-extern void ixgb_rar_set(struct ixgb_hw *hw,
- u8 *addr,
- u32 index);
+bool ixgb_adapter_stop(struct ixgb_hw *hw);
+bool ixgb_init_hw(struct ixgb_hw *hw);
+bool ixgb_adapter_start(struct ixgb_hw *hw);
+void ixgb_check_for_link(struct ixgb_hw *hw);
+bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
+void ixgb_rar_set(struct ixgb_hw *hw, u8 *addr, u32 index);
/* Filters (multicast, vlan, receive) */
-extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw,
- u8 *mc_addr_list,
- u32 mc_addr_count,
- u32 pad);
+void ixgb_mc_addr_list_update(struct ixgb_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, u32 pad);
/* Vfta functions */
-extern void ixgb_write_vfta(struct ixgb_hw *hw,
- u32 offset,
- u32 value);
+void ixgb_write_vfta(struct ixgb_hw *hw, u32 offset, u32 value);
/* Access functions to eeprom data */
void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 9f6b236828e6..57e390cbe6d0 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -408,20 +408,14 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
pci_using_dac = 0;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
+ pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- pr_err("No usable DMA configuration, aborting\n");
- goto err_dma_mask;
- }
+ pr_err("No usable DMA configuration, aborting\n");
+ goto err_dma_mask;
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 0ac6b11c6e4e..09149143ee0f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -55,7 +55,7 @@
#include <net/busy_poll.h>
#ifdef CONFIG_NET_RX_BUSY_POLL
-#define LL_EXTENDED_STATS
+#define BP_EXTENDED_STATS
#endif
/* common prefix used by pr_<> macros */
#undef pr_fmt
@@ -67,7 +67,11 @@
#define IXGBE_MAX_TXD 4096
#define IXGBE_MIN_TXD 64
+#if (PAGE_SIZE < 8192)
#define IXGBE_DEFAULT_RXD 512
+#else
+#define IXGBE_DEFAULT_RXD 128
+#endif
#define IXGBE_MAX_RXD 4096
#define IXGBE_MIN_RXD 64
@@ -187,11 +191,11 @@ struct ixgbe_rx_buffer {
struct ixgbe_queue_stats {
u64 packets;
u64 bytes;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
u64 yields;
u64 misses;
u64 cleaned;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
};
struct ixgbe_tx_queue_stats {
@@ -369,11 +373,13 @@ struct ixgbe_q_vector {
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define IXGBE_QV_STATE_IDLE 0
-#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
-#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */
-#define IXGBE_QV_LOCKED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
-#define IXGBE_QV_STATE_NAPI_YIELD 4 /* NAPI yielded this QV */
-#define IXGBE_QV_STATE_POLL_YIELD 8 /* poll yielded this QV */
+#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
+#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */
+#define IXGBE_QV_STATE_DISABLED 4 /* QV is disabled */
+#define IXGBE_QV_OWNED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
+#define IXGBE_QV_LOCKED (IXGBE_QV_OWNED | IXGBE_QV_STATE_DISABLED)
+#define IXGBE_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
+#define IXGBE_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
spinlock_t lock;
@@ -394,18 +400,18 @@ static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
{
int rc = true;
- spin_lock(&q_vector->lock);
+ spin_lock_bh(&q_vector->lock);
if (q_vector->state & IXGBE_QV_LOCKED) {
WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
rc = false;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
q_vector->tx.ring->stats.yields++;
#endif
} else
/* we don't care if someone yielded */
q_vector->state = IXGBE_QV_STATE_NAPI;
- spin_unlock(&q_vector->lock);
+ spin_unlock_bh(&q_vector->lock);
return rc;
}
@@ -413,14 +419,15 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
{
int rc = false;
- spin_lock(&q_vector->lock);
+ spin_lock_bh(&q_vector->lock);
WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL |
IXGBE_QV_STATE_NAPI_YIELD));
if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
rc = true;
- q_vector->state = IXGBE_QV_STATE_IDLE;
- spin_unlock(&q_vector->lock);
+ /* will reset state to idle, unless QV is disabled */
+ q_vector->state &= IXGBE_QV_STATE_DISABLED;
+ spin_unlock_bh(&q_vector->lock);
return rc;
}
@@ -432,7 +439,7 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
if ((q_vector->state & IXGBE_QV_LOCKED)) {
q_vector->state |= IXGBE_QV_STATE_POLL_YIELD;
rc = false;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
q_vector->rx.ring->stats.yields++;
#endif
} else
@@ -451,17 +458,32 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
rc = true;
- q_vector->state = IXGBE_QV_STATE_IDLE;
+ /* will reset state to idle, unless QV is disabled */
+ q_vector->state &= IXGBE_QV_STATE_DISABLED;
spin_unlock_bh(&q_vector->lock);
return rc;
}
/* true if a socket is polling, even if it did not get the lock */
-static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
{
- WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
+ WARN_ON(!(q_vector->state & IXGBE_QV_OWNED));
return q_vector->state & IXGBE_QV_USER_PEND;
}
+
+/* false if QV is currently owned */
+static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
+{
+ int rc = true;
+ spin_lock_bh(&q_vector->lock);
+ if (q_vector->state & IXGBE_QV_OWNED)
+ rc = false;
+ q_vector->state |= IXGBE_QV_STATE_DISABLED;
+ spin_unlock_bh(&q_vector->lock);
+
+ return rc;
+}
+
#else /* CONFIG_NET_RX_BUSY_POLL */
static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
{
@@ -487,10 +509,16 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
return false;
}
-static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
{
return false;
}
+
+static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
+{
+ return true;
+}
+
#endif /* CONFIG_NET_RX_BUSY_POLL */
#ifdef CONFIG_IXGBE_HWMON
@@ -786,93 +814,89 @@ extern const char ixgbe_driver_version[];
extern char ixgbe_default_device_descr[];
#endif /* IXGBE_FCOE */
-extern void ixgbe_up(struct ixgbe_adapter *adapter);
-extern void ixgbe_down(struct ixgbe_adapter *adapter);
-extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
-extern void ixgbe_reset(struct ixgbe_adapter *adapter);
-extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
-extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
-extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
-extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
-extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
-extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
-extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *);
-extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
-extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
-extern int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+void ixgbe_up(struct ixgbe_adapter *adapter);
+void ixgbe_down(struct ixgbe_adapter *adapter);
+void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
+void ixgbe_reset(struct ixgbe_adapter *adapter);
+void ixgbe_set_ethtool_ops(struct net_device *netdev);
+int ixgbe_setup_rx_resources(struct ixgbe_ring *);
+int ixgbe_setup_tx_resources(struct ixgbe_ring *);
+void ixgbe_free_rx_resources(struct ixgbe_ring *);
+void ixgbe_free_tx_resources(struct ixgbe_ring *);
+void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
+void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
+void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
+void ixgbe_update_stats(struct ixgbe_adapter *adapter);
+int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
+int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
u16 subdevice_id);
-extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
-extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
- struct ixgbe_adapter *,
- struct ixgbe_ring *);
-extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
- struct ixgbe_tx_buffer *);
-extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
-extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
-extern int ixgbe_poll(struct napi_struct *napi, int budget);
-extern int ethtool_ioctl(struct ifreq *ifr);
-extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_hash_dword input,
- union ixgbe_atr_hash_dword common,
- u8 queue);
-extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input_mask);
-extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input,
- u16 soft_id, u8 queue);
-extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input,
- u16 soft_id);
-extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
- union ixgbe_atr_input *mask);
-extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
-extern void ixgbe_set_rx_mode(struct net_device *netdev);
+void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
+ struct ixgbe_ring *);
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
+ struct ixgbe_tx_buffer *);
+void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
+void ixgbe_write_eitr(struct ixgbe_q_vector *);
+int ixgbe_poll(struct napi_struct *napi, int budget);
+int ethtool_ioctl(struct ifreq *ifr);
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
+ u8 queue);
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input_mask);
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id, u8 queue);
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id);
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ union ixgbe_atr_input *mask);
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
+void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
-extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
+void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
#endif
-extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
-extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
-extern void ixgbe_do_reset(struct net_device *netdev);
+int ixgbe_setup_tc(struct net_device *dev, u8 tc);
+void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
+void ixgbe_do_reset(struct net_device *netdev);
#ifdef CONFIG_IXGBE_HWMON
-extern void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
-extern int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
+void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
+int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
#endif /* CONFIG_IXGBE_HWMON */
#ifdef IXGBE_FCOE
-extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
-extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
- struct ixgbe_tx_buffer *first,
- u8 *hdr_len);
-extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb);
-extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
- struct scatterlist *sgl, unsigned int sgc);
-extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
- struct scatterlist *sgl, unsigned int sgc);
-extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
-extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
-extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
-extern int ixgbe_fcoe_enable(struct net_device *netdev);
-extern int ixgbe_fcoe_disable(struct net_device *netdev);
+void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
+int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
+ u8 *hdr_len);
+int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
+ union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb);
+int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc);
+int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc);
+int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
+int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+int ixgbe_fcoe_enable(struct net_device *netdev);
+int ixgbe_fcoe_disable(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
-extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
-extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
+u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
+u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
#endif /* CONFIG_IXGBE_DCB */
-extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
-extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
- struct netdev_fcoe_hbainfo *info);
-extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
+int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
+int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
+ struct netdev_fcoe_hbainfo *info);
+u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
#endif /* IXGBE_FCOE */
#ifdef CONFIG_DEBUG_FS
-extern void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
-extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
-extern void ixgbe_dbg_init(void);
-extern void ixgbe_dbg_exit(void);
+void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
+void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
+void ixgbe_dbg_init(void);
+void ixgbe_dbg_exit(void);
#else
static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
@@ -884,12 +908,12 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
return netdev_get_tx_queue(ring->netdev, ring->queue_index);
}
-extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
-extern void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
- struct sk_buff *skb);
+void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
+void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+ struct sk_buff *skb);
static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -906,11 +930,11 @@ static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
rx_ring->last_rx_timestamp = jiffies;
}
-extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
- struct ifreq *ifr, int cmd);
-extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
+int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr,
+ int cmd);
+void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
#ifdef CONFIG_PCI_IOV
void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index e8649abf97c0..4e7c9b098b58 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -442,7 +442,7 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
static int ixgbe_get_regs_len(struct net_device *netdev)
{
-#define IXGBE_REGS_LEN 1129
+#define IXGBE_REGS_LEN 1139
return IXGBE_REGS_LEN * sizeof(u32);
}
@@ -602,22 +602,53 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
/* DCB */
- regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
- regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
- regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
- regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
- for (i = 0; i < 8; i++)
- regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
- for (i = 0; i < 8; i++)
- regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
- for (i = 0; i < 8; i++)
- regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
- for (i = 0; i < 8; i++)
- regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
+ regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */
+ regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
+ regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
+ for (i = 0; i < 8; i++)
+ regs_buff[833 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[841 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[849 + i] =
+ IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[857 + i] =
+ IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
+ for (i = 0; i < 8; i++)
+ regs_buff[833 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[841 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[849 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[857 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
+ break;
+ default:
+ break;
+ }
+
for (i = 0; i < 8; i++)
- regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
+ regs_buff[865 + i] =
+ IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
for (i = 0; i < 8; i++)
- regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
+ regs_buff[873 + i] =
+ IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
/* Statistics */
regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
@@ -757,6 +788,20 @@ static void ixgbe_get_regs(struct net_device *netdev,
/* 82599 X540 specific registers */
regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+
+ /* 82599 X540 specific DCB registers */
+ regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
+ regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
+ for (i = 0; i < 4; i++)
+ regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
+ regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
+ /* same as RTTQCNRM */
+ regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
+ /* same as RTTQCNRR */
+
+ /* X540 specific DCB registers */
+ regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
+ regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
}
static int ixgbe_get_eeprom_len(struct net_device *netdev)
@@ -1072,7 +1117,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i] = 0;
data[i+1] = 0;
i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
data[i] = 0;
data[i+1] = 0;
data[i+2] = 0;
@@ -1087,7 +1132,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i+1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
data[i] = ring->stats.yields;
data[i+1] = ring->stats.misses;
data[i+2] = ring->stats.cleaned;
@@ -1100,7 +1145,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i] = 0;
data[i+1] = 0;
i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
data[i] = 0;
data[i+1] = 0;
data[i+2] = 0;
@@ -1115,7 +1160,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i+1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
data[i] = ring->stats.yields;
data[i+1] = ring->stats.misses;
data[i+2] = ring->stats.cleaned;
@@ -1157,28 +1202,28 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
-#ifdef LL_EXTENDED_STATS
- sprintf(p, "tx_queue_%u_ll_napi_yield", i);
+#ifdef BP_EXTENDED_STATS
+ sprintf(p, "tx_queue_%u_bp_napi_yield", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_ll_misses", i);
+ sprintf(p, "tx_queue_%u_bp_misses", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_ll_cleaned", i);
+ sprintf(p, "tx_queue_%u_bp_cleaned", i);
p += ETH_GSTRING_LEN;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
}
for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
-#ifdef LL_EXTENDED_STATS
- sprintf(p, "rx_queue_%u_ll_poll_yield", i);
+#ifdef BP_EXTENDED_STATS
+ sprintf(p, "rx_queue_%u_bp_poll_yield", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_ll_misses", i);
+ sprintf(p, "rx_queue_%u_bp_misses", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_ll_cleaned", i);
+ sprintf(p, "rx_queue_%u_bp_cleaned", i);
p += ETH_GSTRING_LEN;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
}
for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
sprintf(p, "tx_pb_%u_pxon", i);
@@ -2212,13 +2257,13 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
#if IS_ENABLED(CONFIG_BQL)
/* detect ITR changes that require update of TXDCTL.WTHRESH */
- if ((adapter->tx_itr_setting > 1) &&
+ if ((adapter->tx_itr_setting != 1) &&
(adapter->tx_itr_setting < IXGBE_100K_ITR)) {
if ((tx_itr_prev == 1) ||
- (tx_itr_prev > IXGBE_100K_ITR))
+ (tx_itr_prev >= IXGBE_100K_ITR))
need_reset = true;
} else {
- if ((tx_itr_prev > 1) &&
+ if ((tx_itr_prev != 1) &&
(tx_itr_prev < IXGBE_100K_ITR))
need_reset = true;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0ade0cd5ef53..276b5d0d6c45 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -153,7 +153,6 @@ MODULE_VERSION(DRV_VERSION);
static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
u32 reg, u16 *value)
{
- int pos = 0;
struct pci_dev *parent_dev;
struct pci_bus *parent_bus;
@@ -165,11 +164,10 @@ static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
if (!parent_dev)
return -1;
- pos = pci_find_capability(parent_dev, PCI_CAP_ID_EXP);
- if (!pos)
+ if (!pci_is_pcie(parent_dev))
return -1;
- pci_read_config_word(parent_dev, pos + reg, value);
+ pcie_capability_read_word(parent_dev, reg, value);
return 0;
}
@@ -247,7 +245,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
max_gts = 4 * width;
break;
case PCIE_SPEED_8_0GT:
- /* 128b/130b encoding only reduces throughput by 1% */
+ /* 128b/130b encoding reduces throughput by less than 2% */
max_gts = 8 * width;
break;
default:
@@ -265,7 +263,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
width,
(speed == PCIE_SPEED_2_5GT ? "20%" :
speed == PCIE_SPEED_5_0GT ? "20%" :
- speed == PCIE_SPEED_8_0GT ? "N/a" :
+ speed == PCIE_SPEED_8_0GT ? "<2%" :
"Unknown"));
if (max_gts < expected_gts) {
@@ -1585,7 +1583,7 @@ static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
{
struct ixgbe_adapter *adapter = q_vector->adapter;
- if (ixgbe_qv_ll_polling(q_vector))
+ if (ixgbe_qv_busy_polling(q_vector))
netif_receive_skb(skb);
else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
napi_gro_receive(&q_vector->napi, skb);
@@ -2097,7 +2095,7 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi)
ixgbe_for_each_ring(ring, q_vector->rx) {
found = ixgbe_clean_rx_irq(q_vector, ring, 4);
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
if (found)
ring->stats.cleaned += found;
else
@@ -3825,14 +3823,6 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
vmolr |= IXGBE_VMOLR_MPE;
- } else {
- /*
- * Write addresses to the MTA, if the attempt fails
- * then we should just turn on promiscuous mode so
- * that we can at least receive multicast traffic
- */
- hw->mac.ops.update_mc_addr_list(hw, netdev);
- vmolr |= IXGBE_VMOLR_ROMPE;
}
ixgbe_vlan_filter_enable(adapter);
hw->addr_ctrl.user_set_promisc = false;
@@ -3849,6 +3839,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
vmolr |= IXGBE_VMOLR_ROPE;
}
+ /* Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
+ vmolr |= IXGBE_VMOLR_ROMPE;
+
if (adapter->num_vfs)
ixgbe_restore_vf_multicasts(adapter);
@@ -3893,15 +3890,13 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
{
int q_idx;
- local_bh_disable(); /* for ixgbe_qv_lock_napi() */
for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
napi_disable(&adapter->q_vector[q_idx]->napi);
- while (!ixgbe_qv_lock_napi(adapter->q_vector[q_idx])) {
+ while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
pr_info("QV %d locked\n", q_idx);
- mdelay(1);
+ usleep_range(1000, 20000);
}
}
- local_bh_enable();
}
#ifdef CONFIG_IXGBE_DCB
@@ -7362,19 +7357,16 @@ static const struct net_device_ops ixgbe_netdev_ops = {
**/
static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
{
- struct ixgbe_hw *hw = &adapter->hw;
struct list_head *entry;
int physfns = 0;
- /* Some cards can not use the generic count PCIe functions method, and
- * so must be hardcoded to the correct value.
+ /* Some cards can not use the generic count PCIe functions method,
+ * because they are behind a parent switch, so we hardcode these with
+ * the correct number of functions.
*/
- switch (hw->device_id) {
- case IXGBE_DEV_ID_82599_SFP_SF_QP:
- case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+ if (ixgbe_pcie_from_parent(&adapter->hw)) {
physfns = 4;
- break;
- default:
+ } else {
list_for_each(entry, &adapter->pdev->bus_list) {
struct pci_dev *pdev =
list_entry(entry, struct pci_dev, bus_list);
@@ -7490,19 +7482,14 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
- !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
pci_using_dac = 0;
}
@@ -7759,29 +7746,6 @@ skip_sriov:
if (ixgbe_pcie_from_parent(hw))
ixgbe_get_parent_bus_info(adapter);
- /* print bus type/speed/width info */
- e_dev_info("(PCI Express:%s:%s) %pM\n",
- (hw->bus.speed == ixgbe_bus_speed_8000 ? "8.0GT/s" :
- hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
- hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
- "Unknown"),
- (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
- hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
- hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
- "Unknown"),
- netdev->dev_addr);
-
- err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
- if (err)
- strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
- if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
- e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
- hw->mac.type, hw->phy.type, hw->phy.sfp_type,
- part_str);
- else
- e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
- hw->mac.type, hw->phy.type, part_str);
-
/* calculate the expected PCIe bandwidth required for optimal
* performance. Note that some older parts will never have enough
* bandwidth due to being older generation PCIe parts. We clamp these
@@ -7797,6 +7761,19 @@ skip_sriov:
}
ixgbe_check_minimum_link(adapter, expected_gts);
+ err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
+ if (err)
+ strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
+ if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
+ e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
+ hw->mac.type, hw->phy.type, hw->phy.sfp_type,
+ part_str);
+ else
+ e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
+ hw->mac.type, hw->phy.type, part_str);
+
+ e_dev_info("%pM\n", netdev->dev_addr);
+
/* reset the hardware with the new settings */
err = hw->mac.ops.start_hw(hw);
if (err == IXGBE_ERR_EEPROM_VERSION) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 24af12e3719e..aae900a256da 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -57,28 +57,28 @@
#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93
/* Bitmasks */
-#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
-#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
-#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
-#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
-#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
-#define IXGBE_SFF_1GBASET_CAPABLE 0x8
-#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
-#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
-#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
-#define IXGBE_SFF_ADDRESSING_MODE 0x4
-#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
-#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
+#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
+#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
+#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
+#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
+#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
+#define IXGBE_SFF_1GBASET_CAPABLE 0x8
+#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
+#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
+#define IXGBE_SFF_ADDRESSING_MODE 0x4
+#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
+#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0
-#define IXGBE_I2C_EEPROM_READ_MASK 0x100
-#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
-#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
-#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
-#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
-#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
+#define IXGBE_I2C_EEPROM_READ_MASK 0x100
+#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
+#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
+#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
+#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
+#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
/* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 276d7b135332..1fe7cb0142e1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -558,7 +558,7 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw;
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
- memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
+ memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
return 0;
@@ -621,16 +621,13 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
{
- unsigned char vf_mac_addr[6];
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
unsigned int vfn = (event_mask & 0x3f);
bool enable = ((event_mask & 0x10000000U) != 0);
- if (enable) {
- eth_zero_addr(vf_mac_addr);
- memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
- }
+ if (enable)
+ eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 10775cb9b6d8..7c19e969576f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -561,6 +561,10 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_RTTDQSEL 0x04904
#define IXGBE_RTTDT1C 0x04908
#define IXGBE_RTTDT1S 0x0490C
+#define IXGBE_RTTQCNCR 0x08B00
+#define IXGBE_RTTQCNTG 0x04A90
+#define IXGBE_RTTBCNRD 0x0498C
+#define IXGBE_RTTQCNRR 0x0498C
#define IXGBE_RTTDTECC 0x04990
#define IXGBE_RTTDTECC_NO_BCN 0x00000100
#define IXGBE_RTTBCNRC 0x04984
@@ -570,6 +574,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_RTTBCNRC_RF_INT_MASK \
(IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
#define IXGBE_RTTBCNRM 0x04980
+#define IXGBE_RTTQCNRM 0x04980
/* FCoE DMA Context Registers */
#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 389324f5929a..24b80a6cfca4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -32,12 +32,12 @@
#include "ixgbe.h"
#include "ixgbe_phy.h"
-#define IXGBE_X540_MAX_TX_QUEUES 128
-#define IXGBE_X540_MAX_RX_QUEUES 128
-#define IXGBE_X540_RAR_ENTRIES 128
-#define IXGBE_X540_MC_TBL_SIZE 128
-#define IXGBE_X540_VFT_TBL_SIZE 128
-#define IXGBE_X540_RX_PB_SIZE 384
+#define IXGBE_X540_MAX_TX_QUEUES 128
+#define IXGBE_X540_MAX_RX_QUEUES 128
+#define IXGBE_X540_RAR_ENTRIES 128
+#define IXGBE_X540_MC_TBL_SIZE 128
+#define IXGBE_X540_VFT_TBL_SIZE 128
+#define IXGBE_X540_RX_PB_SIZE 384
static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index c9d0c12d6f04..54d9acef9c4e 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -45,16 +45,27 @@
struct ixgbe_stats {
char stat_string[ETH_GSTRING_LEN];
- int sizeof_stat;
- int stat_offset;
- int base_stat_offset;
- int saved_reset_offset;
+ struct {
+ int sizeof_stat;
+ int stat_offset;
+ int base_stat_offset;
+ int saved_reset_offset;
+ };
};
-#define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \
- offsetof(struct ixgbevf_adapter, m), \
- offsetof(struct ixgbevf_adapter, b), \
- offsetof(struct ixgbevf_adapter, r)
+#define IXGBEVF_STAT(m, b, r) { \
+ .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
+ .stat_offset = offsetof(struct ixgbevf_adapter, m), \
+ .base_stat_offset = offsetof(struct ixgbevf_adapter, b), \
+ .saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \
+}
+
+#define IXGBEVF_ZSTAT(m) { \
+ .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
+ .stat_offset = offsetof(struct ixgbevf_adapter, m), \
+ .base_stat_offset = -1, \
+ .saved_reset_offset = -1 \
+}
static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
@@ -65,15 +76,20 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
stats.saved_reset_vfgorc)},
{"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
stats.saved_reset_vfgotc)},
- {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)},
+ {"tx_busy", IXGBEVF_ZSTAT(tx_busy)},
{"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
stats.saved_reset_vfmprc)},
- {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base,
- zero_base)},
- {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base,
- zero_base)},
- {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
- zero_base)},
+ {"rx_csum_offload_good", IXGBEVF_ZSTAT(hw_csum_rx_good)},
+ {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
+ {"tx_csum_offload_ctxt", IXGBEVF_ZSTAT(hw_csum_tx_good)},
+#ifdef BP_EXTENDED_STATS
+ {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
+ {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
+ {"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)},
+ {"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)},
+ {"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)},
+ {"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)},
+#endif
};
#define IXGBE_QUEUE_STATS_LEN 0
@@ -140,58 +156,10 @@ static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
-static char *ixgbevf_reg_names[] = {
- "IXGBE_VFCTRL",
- "IXGBE_VFSTATUS",
- "IXGBE_VFLINKS",
- "IXGBE_VFRXMEMWRAP",
- "IXGBE_VFFRTIMER",
- "IXGBE_VTEICR",
- "IXGBE_VTEICS",
- "IXGBE_VTEIMS",
- "IXGBE_VTEIMC",
- "IXGBE_VTEIAC",
- "IXGBE_VTEIAM",
- "IXGBE_VTEITR",
- "IXGBE_VTIVAR",
- "IXGBE_VTIVAR_MISC",
- "IXGBE_VFRDBAL0",
- "IXGBE_VFRDBAL1",
- "IXGBE_VFRDBAH0",
- "IXGBE_VFRDBAH1",
- "IXGBE_VFRDLEN0",
- "IXGBE_VFRDLEN1",
- "IXGBE_VFRDH0",
- "IXGBE_VFRDH1",
- "IXGBE_VFRDT0",
- "IXGBE_VFRDT1",
- "IXGBE_VFRXDCTL0",
- "IXGBE_VFRXDCTL1",
- "IXGBE_VFSRRCTL0",
- "IXGBE_VFSRRCTL1",
- "IXGBE_VFPSRTYPE",
- "IXGBE_VFTDBAL0",
- "IXGBE_VFTDBAL1",
- "IXGBE_VFTDBAH0",
- "IXGBE_VFTDBAH1",
- "IXGBE_VFTDLEN0",
- "IXGBE_VFTDLEN1",
- "IXGBE_VFTDH0",
- "IXGBE_VFTDH1",
- "IXGBE_VFTDT0",
- "IXGBE_VFTDT1",
- "IXGBE_VFTXDCTL0",
- "IXGBE_VFTXDCTL1",
- "IXGBE_VFTDWBAL0",
- "IXGBE_VFTDWBAL1",
- "IXGBE_VFTDWBAH0",
- "IXGBE_VFTDWBAH1"
-};
-
-
static int ixgbevf_get_regs_len(struct net_device *netdev)
{
- return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
+#define IXGBE_REGS_LEN 45
+ return IXGBE_REGS_LEN * sizeof(u32);
}
static void ixgbevf_get_regs(struct net_device *netdev,
@@ -264,9 +232,6 @@ static void ixgbevf_get_regs(struct net_device *netdev,
regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
for (i = 0; i < 2; i++)
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
-
- for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
- hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
}
static void ixgbevf_get_drvinfo(struct net_device *netdev,
@@ -441,22 +406,50 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ char *base = (char *) adapter;
int i;
+#ifdef BP_EXTENDED_STATS
+ u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0,
+ tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rx_yields += adapter->rx_ring[i].bp_yields;
+ rx_cleaned += adapter->rx_ring[i].bp_cleaned;
+ rx_yields += adapter->rx_ring[i].bp_yields;
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tx_yields += adapter->tx_ring[i].bp_yields;
+ tx_cleaned += adapter->tx_ring[i].bp_cleaned;
+ tx_yields += adapter->tx_ring[i].bp_yields;
+ }
+
+ adapter->bp_rx_yields = rx_yields;
+ adapter->bp_rx_cleaned = rx_cleaned;
+ adapter->bp_rx_missed = rx_missed;
+
+ adapter->bp_tx_yields = tx_yields;
+ adapter->bp_tx_cleaned = tx_cleaned;
+ adapter->bp_tx_missed = tx_missed;
+#endif
ixgbevf_update_stats(adapter);
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
- char *p = (char *)adapter +
- ixgbe_gstrings_stats[i].stat_offset;
- char *b = (char *)adapter +
- ixgbe_gstrings_stats[i].base_stat_offset;
- char *r = (char *)adapter +
- ixgbe_gstrings_stats[i].saved_reset_offset;
- data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
- ((ixgbe_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)b : *(u32 *)b) +
- ((ixgbe_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)r : *(u32 *)r);
+ char *p = base + ixgbe_gstrings_stats[i].stat_offset;
+ char *b = base + ixgbe_gstrings_stats[i].base_stat_offset;
+ char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset;
+
+ if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) {
+ if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
+ data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r;
+ else
+ data[i] = *(u64 *)p;
+ } else {
+ if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
+ data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r;
+ else
+ data[i] = *(u32 *)p;
+ }
}
}
@@ -685,6 +678,85 @@ static int ixgbevf_nway_reset(struct net_device *netdev)
return 0;
}
+static int ixgbevf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ /* only valid if in constant ITR mode */
+ if (adapter->rx_itr_setting <= 1)
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting;
+ else
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
+
+ /* if in mixed tx/rx queues per vector mode, report only rx settings */
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
+ return 0;
+
+ /* only valid if in constant ITR mode */
+ if (adapter->tx_itr_setting <= 1)
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting;
+ else
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
+
+ return 0;
+}
+
+static int ixgbevf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_q_vector *q_vector;
+ int num_vectors, i;
+ u16 tx_itr_param, rx_itr_param;
+
+ /* don't accept tx specific changes if we've got mixed RxTx vectors */
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
+ && ec->tx_coalesce_usecs)
+ return -EINVAL;
+
+
+ if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
+ (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
+ return -EINVAL;
+
+ if (ec->rx_coalesce_usecs > 1)
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
+ else
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs;
+
+ if (adapter->rx_itr_setting == 1)
+ rx_itr_param = IXGBE_20K_ITR;
+ else
+ rx_itr_param = adapter->rx_itr_setting;
+
+
+ if (ec->tx_coalesce_usecs > 1)
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
+ else
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs;
+
+ if (adapter->tx_itr_setting == 1)
+ tx_itr_param = IXGBE_10K_ITR;
+ else
+ tx_itr_param = adapter->tx_itr_setting;
+
+ num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (i = 0; i < num_vectors; i++) {
+ q_vector = adapter->q_vector[i];
+ if (q_vector->tx.count && !q_vector->rx.count)
+ /* tx only */
+ q_vector->itr = tx_itr_param;
+ else
+ /* rx only or mixed */
+ q_vector->itr = rx_itr_param;
+ ixgbevf_write_eitr(q_vector);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_settings = ixgbevf_get_settings,
.get_drvinfo = ixgbevf_get_drvinfo,
@@ -700,6 +772,8 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_sset_count = ixgbevf_get_sset_count,
.get_strings = ixgbevf_get_strings,
.get_ethtool_stats = ixgbevf_get_ethtool_stats,
+ .get_coalesce = ixgbevf_get_coalesce,
+ .set_coalesce = ixgbevf_set_coalesce,
};
void ixgbevf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index fff0d9867529..8971e2d0a984 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -38,6 +38,11 @@
#include "vf.h"
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include <net/busy_poll.h>
+#define BP_EXTENDED_STATS
+#endif
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbevf_tx_buffer {
@@ -76,6 +81,11 @@ struct ixgbevf_ring {
struct u64_stats_sync syncp;
u64 hw_csum_rx_error;
u64 hw_csum_rx_good;
+#ifdef BP_EXTENDED_STATS
+ u64 bp_yields;
+ u64 bp_misses;
+ u64 bp_cleaned;
+#endif
u16 head;
u16 tail;
@@ -145,7 +155,118 @@ struct ixgbevf_q_vector {
struct napi_struct napi;
struct ixgbevf_ring_container rx, tx;
char name[IFNAMSIZ + 9];
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned int state;
+#define IXGBEVF_QV_STATE_IDLE 0
+#define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */
+#define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */
+#define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */
+#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL)
+#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED)
+#define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
+#define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
+#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | IXGBEVF_QV_STATE_POLL_YIELD)
+#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_POLL_YIELD)
+ spinlock_t lock;
+#endif /* CONFIG_NET_RX_BUSY_POLL */
};
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector)
+{
+
+ spin_lock_init(&q_vector->lock);
+ q_vector->state = IXGBEVF_QV_STATE_IDLE;
+}
+
+/* called from the device poll routine to get ownership of a q_vector */
+static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = true;
+ spin_lock_bh(&q_vector->lock);
+ if (q_vector->state & IXGBEVF_QV_LOCKED) {
+ WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI);
+ q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
+ rc = false;
+#ifdef BP_EXTENDED_STATS
+ q_vector->tx.ring->bp_yields++;
+#endif
+ } else {
+ /* we don't care if someone yielded */
+ q_vector->state = IXGBEVF_QV_STATE_NAPI;
+ }
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+/* returns true is someone tried to get the qv while napi had it */
+static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = false;
+ spin_lock_bh(&q_vector->lock);
+ WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL |
+ IXGBEVF_QV_STATE_NAPI_YIELD));
+
+ if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
+ rc = true;
+ /* reset state to idle, unless QV is disabled */
+ q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+/* called from ixgbevf_low_latency_poll() */
+static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = true;
+ spin_lock_bh(&q_vector->lock);
+ if ((q_vector->state & IXGBEVF_QV_LOCKED)) {
+ q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
+ rc = false;
+#ifdef BP_EXTENDED_STATS
+ q_vector->rx.ring->bp_yields++;
+#endif
+ } else {
+ /* preserve yield marks */
+ q_vector->state |= IXGBEVF_QV_STATE_POLL;
+ }
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+/* returns true if someone tried to get the qv while it was locked */
+static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = false;
+ spin_lock_bh(&q_vector->lock);
+ WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI));
+
+ if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
+ rc = true;
+ /* reset state to idle, unless QV is disabled */
+ q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+/* true if a socket is polling, even if it did not get the lock */
+static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector)
+{
+ WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED));
+ return q_vector->state & IXGBEVF_QV_USER_PEND;
+}
+
+/* false if QV is currently owned */
+static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = true;
+ spin_lock_bh(&q_vector->lock);
+ if (q_vector->state & IXGBEVF_QV_OWNED)
+ rc = false;
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+#endif /* CONFIG_NET_RX_BUSY_POLL */
/*
* microsecond values for various ITR rates shifted by 2 to fit itr register
@@ -165,9 +286,13 @@ struct ixgbevf_q_vector {
((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
-#define IXGBE_DESC_UNUSED(R) \
- ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
- (R)->next_to_clean - (R)->next_to_use - 1)
+static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
+{
+ u16 ntc = ring->next_to_clean;
+ u16 ntu = ring->next_to_use;
+
+ return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
#define IXGBEVF_RX_DESC(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
@@ -240,7 +365,6 @@ struct ixgbevf_adapter {
struct ixgbe_hw hw;
u16 msg_enable;
struct ixgbevf_hw_stats stats;
- u64 zero_base;
/* Interrupt Throttle Rate */
u32 eitr_param;
@@ -249,6 +373,16 @@ struct ixgbevf_adapter {
unsigned int tx_ring_count;
unsigned int rx_ring_count;
+#ifdef BP_EXTENDED_STATS
+ u64 bp_rx_yields;
+ u64 bp_rx_cleaned;
+ u64 bp_rx_missed;
+
+ u64 bp_tx_yields;
+ u64 bp_tx_cleaned;
+ u64 bp_tx_missed;
+#endif
+
u32 link_speed;
bool link_up;
@@ -281,27 +415,25 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
extern const char ixgbevf_driver_name[];
extern const char ixgbevf_driver_version[];
-extern void ixgbevf_up(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *,
- struct ixgbevf_ring *);
-extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *,
- struct ixgbevf_ring *);
-extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
- struct ixgbevf_ring *);
-extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
- struct ixgbevf_ring *);
-extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
-extern int ethtool_ioctl(struct ifreq *ifr);
-
-extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
-extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
+void ixgbevf_up(struct ixgbevf_adapter *adapter);
+void ixgbevf_down(struct ixgbevf_adapter *adapter);
+void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
+void ixgbevf_reset(struct ixgbevf_adapter *adapter);
+void ixgbevf_set_ethtool_ops(struct net_device *netdev);
+int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
+int ethtool_ioctl(struct ifreq *ifr);
+
+extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector);
+
+void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
+void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
#ifdef DEBUG
-extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
+char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
#define hw_dbg(hw, format, arg...) \
printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
#else
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 59a62bbfb371..92ef4cb5a8e8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -58,7 +58,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
-#define DRV_VERSION "2.7.12-k"
+#define DRV_VERSION "2.11.3-k"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -251,7 +251,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
- (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
@@ -300,6 +300,30 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
}
/**
+ * ixgbevf_rx_skb - Helper function to determine proper Rx method
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: packet to send up
+ * @status: hardware indication of status of receive
+ * @rx_desc: rx descriptor
+ **/
+static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
+ struct sk_buff *skb, u8 status,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ skb_mark_napi_id(skb, &q_vector->napi);
+
+ if (ixgbevf_qv_busy_polling(q_vector)) {
+ netif_receive_skb(skb);
+ /* exit early if we busy polled */
+ return;
+ }
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+ ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
+}
+
+/**
* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
* @ring: pointer to Rx descriptor ring structure
* @status_err: hardware indication of status of receive
@@ -396,9 +420,9 @@ static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
}
-static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
- struct ixgbevf_ring *rx_ring,
- int budget)
+static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
+ struct ixgbevf_ring *rx_ring,
+ int budget)
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
struct pci_dev *pdev = adapter->pdev;
@@ -473,15 +497,6 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
total_rx_bytes += skb->len;
total_rx_packets++;
- /*
- * Work around issue of some types of VM to VM loop back
- * packets not getting split correctly
- */
- if (staterr & IXGBE_RXD_STAT_LB) {
- u32 header_fixup_len = skb_headlen(skb);
- if (header_fixup_len < 14)
- skb_push(skb, header_fixup_len);
- }
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
/* Workaround hardware that can't do proper VEPA multicast
@@ -494,7 +509,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
goto next_desc;
}
- ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
+ ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
next_desc:
rx_desc->wb.upper.status_error = 0;
@@ -514,7 +529,7 @@ next_desc:
}
rx_ring->next_to_clean = i;
- cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
+ cleaned_count = ixgbevf_desc_unused(rx_ring);
if (cleaned_count)
ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
@@ -526,7 +541,7 @@ next_desc:
q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes;
- return !!budget;
+ return total_rx_packets;
}
/**
@@ -549,6 +564,11 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
ixgbevf_for_each_ring(ring, q_vector->tx)
clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ if (!ixgbevf_qv_lock_napi(q_vector))
+ return budget;
+#endif
+
/* attempt to distribute budget to each queue fairly, but don't allow
* the budget to go below 1 because we'll exit polling */
if (q_vector->rx.count > 1)
@@ -558,10 +578,15 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
ixgbevf_for_each_ring(ring, q_vector->rx)
- clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
- per_ring_budget);
+ clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
+ per_ring_budget)
+ < per_ring_budget);
adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ ixgbevf_qv_unlock_napi(q_vector);
+#endif
+
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return budget;
@@ -580,7 +605,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
* ixgbevf_write_eitr - write VTEITR register in hardware specific way
* @q_vector: structure containing interrupt and ring information
*/
-static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
+void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
struct ixgbe_hw *hw = &adapter->hw;
@@ -596,6 +621,40 @@ static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+/* must be called with local_bh_disable()d */
+static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
+{
+ struct ixgbevf_q_vector *q_vector =
+ container_of(napi, struct ixgbevf_q_vector, napi);
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *ring;
+ int found = 0;
+
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+ return LL_FLUSH_FAILED;
+
+ if (!ixgbevf_qv_lock_poll(q_vector))
+ return LL_FLUSH_BUSY;
+
+ ixgbevf_for_each_ring(ring, q_vector->rx) {
+ found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
+#ifdef BP_EXTENDED_STATS
+ if (found)
+ ring->bp_cleaned += found;
+ else
+ ring->bp_misses++;
+#endif
+ if (found)
+ break;
+ }
+
+ ixgbevf_qv_unlock_poll(q_vector);
+
+ return found;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
/**
* ixgbevf_configure_msix - Configure MSI-X hardware
* @adapter: board private structure
@@ -756,37 +815,12 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
static irqreturn_t ixgbevf_msix_other(int irq, void *data)
{
struct ixgbevf_adapter *adapter = data;
- struct pci_dev *pdev = adapter->pdev;
struct ixgbe_hw *hw = &adapter->hw;
- u32 msg;
- bool got_ack = false;
hw->mac.get_link_status = 1;
- if (!hw->mbx.ops.check_for_ack(hw))
- got_ack = true;
-
- if (!hw->mbx.ops.check_for_msg(hw)) {
- hw->mbx.ops.read(hw, &msg, 1);
-
- if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) {
- mod_timer(&adapter->watchdog_timer,
- round_jiffies(jiffies + 1));
- adapter->link_up = false;
- }
- if (msg & IXGBE_VT_MSGTYPE_NACK)
- dev_info(&pdev->dev,
- "Last Request of type %2.2x to PF Nacked\n",
- msg & 0xFF);
- hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
- }
-
- /* checking for the ack clears the PFACK bit. Place
- * it back in the v2p_mailbox cache so that anyone
- * polling for an ack will not miss it
- */
- if (got_ack)
- hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
@@ -1107,6 +1141,21 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
}
+static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* PSRTYPE must be initialized in 82599 */
+ u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
+ IXGBE_PSRTYPE_L2HDR;
+
+ if (adapter->num_rx_queues > 1)
+ psrtype |= 1 << 29;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
+}
+
static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -1154,8 +1203,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
int i, j;
u32 rdlen;
- /* PSRTYPE must be initialized in 82599 */
- IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+ ixgbevf_setup_psrtype(adapter);
/* set_rx_buffer_len must be called before ring initialization */
ixgbevf_set_rx_buffer_len(adapter);
@@ -1293,6 +1341,9 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx];
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
+#endif
napi_enable(&q_vector->napi);
}
}
@@ -1306,6 +1357,12 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx];
napi_disable(&q_vector->napi);
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
+ pr_info("QV %d locked\n", q_idx);
+ usleep_range(1000, 20000);
+ }
+#endif /* CONFIG_NET_RX_BUSY_POLL */
}
}
@@ -1323,31 +1380,55 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbevf_ring *ring = &adapter->rx_ring[i];
ixgbevf_alloc_rx_buffers(adapter, ring,
- IXGBE_DESC_UNUSED(ring));
+ ixgbevf_desc_unused(ring));
}
}
-#define IXGBE_MAX_RX_DESC_POLL 10
-static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
- int rxr)
+#define IXGBEVF_MAX_RX_DESC_POLL 10
+static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
+ int rxr)
{
struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+ u32 rxdctl;
int j = adapter->rx_ring[rxr].reg_idx;
- int k;
- for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
- if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
- break;
- else
- msleep(1);
- }
- if (k >= IXGBE_MAX_RX_DESC_POLL) {
- hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
- "not set within the polling period\n", rxr);
- }
+ do {
+ usleep_range(1000, 2000);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+ } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
- ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr],
- adapter->rx_ring[rxr].count - 1);
+ if (!wait_loop)
+ hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
+ rxr);
+
+ ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
+ (adapter->rx_ring[rxr].count - 1));
+}
+
+static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+
+ /* the hardware may take up to 100us to really disable the rx queue */
+ do {
+ udelay(10);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop)
+ hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
+ reg_idx);
}
static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
@@ -1545,8 +1626,6 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- ixgbevf_negotiate_api(adapter);
-
ixgbevf_reset_queues(adapter);
ixgbevf_configure(adapter);
@@ -1679,7 +1758,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
/* signal that we are down to the interrupt handler */
set_bit(__IXGBEVF_DOWN, &adapter->state);
- /* disable receives */
+
+ /* disable all enabled rx queues */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
netif_tx_disable(netdev);
@@ -1733,10 +1815,12 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- if (hw->mac.ops.reset_hw(hw))
+ if (hw->mac.ops.reset_hw(hw)) {
hw_dbg(hw, "PF still resetting\n");
- else
+ } else {
hw->mac.ops.init_hw(hw);
+ ixgbevf_negotiate_api(adapter);
+ }
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
memcpy(netdev->dev_addr, adapter->hw.mac.addr,
@@ -1929,6 +2013,9 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
q_vector->v_idx = q_idx;
netif_napi_add(adapter->netdev, &q_vector->napi,
ixgbevf_poll, 64);
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ napi_hash_add(&q_vector->napi);
+#endif
adapter->q_vector[q_idx] = q_vector;
}
@@ -1938,6 +2025,9 @@ err_out:
while (q_idx) {
q_idx--;
q_vector = adapter->q_vector[q_idx];
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ napi_hash_del(&q_vector->napi);
+#endif
netif_napi_del(&q_vector->napi);
kfree(q_vector);
adapter->q_vector[q_idx] = NULL;
@@ -1961,6 +2051,9 @@ static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
adapter->q_vector[q_idx] = NULL;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ napi_hash_del(&q_vector->napi);
+#endif
netif_napi_del(&q_vector->napi);
kfree(q_vector);
}
@@ -2072,6 +2165,9 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
hw->mac.max_tx_queues = 2;
hw->mac.max_rx_queues = 2;
+ /* lock to protect mailbox accesses */
+ spin_lock_init(&adapter->mbx_lock);
+
err = hw->mac.ops.reset_hw(hw);
if (err) {
dev_info(&pdev->dev,
@@ -2082,6 +2178,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
pr_err("init_shared_code failed: %d\n", err);
goto out;
}
+ ixgbevf_negotiate_api(adapter);
err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
if (err)
dev_info(&pdev->dev, "Error reading MAC address\n");
@@ -2097,9 +2194,6 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
}
- /* lock to protect mailbox accesses */
- spin_lock_init(&adapter->mbx_lock);
-
/* Enable dynamic interrupt throttling rates */
adapter->rx_itr_setting = 1;
adapter->tx_itr_setting = 1;
@@ -2620,8 +2714,6 @@ static int ixgbevf_open(struct net_device *netdev)
}
}
- ixgbevf_negotiate_api(adapter);
-
/* setup queue reg_idx and Rx queue count */
err = ixgbevf_setup_queues(adapter);
if (err)
@@ -3010,7 +3102,7 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
/* We need to check again in a case another CPU has just
* made room available. */
- if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
+ if (likely(ixgbevf_desc_unused(tx_ring) < size))
return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */
@@ -3021,7 +3113,7 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
{
- if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
+ if (likely(ixgbevf_desc_unused(tx_ring) >= size))
return 0;
return __ixgbevf_maybe_stop_tx(tx_ring, size);
}
@@ -3216,6 +3308,8 @@ static int ixgbevf_resume(struct pci_dev *pdev)
}
pci_set_master(pdev);
+ ixgbevf_reset(adapter);
+
rtnl_lock();
err = ixgbevf_init_interrupt_scheme(adapter);
rtnl_unlock();
@@ -3224,8 +3318,6 @@ static int ixgbevf_resume(struct pci_dev *pdev)
return err;
}
- ixgbevf_reset(adapter);
-
if (netif_running(netdev)) {
err = ixgbevf_open(netdev);
if (err)
@@ -3293,6 +3385,9 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
.ndo_tx_timeout = ixgbevf_tx_timeout,
.ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ .ndo_busy_poll = ixgbevf_busy_poll_recv,
+#endif
};
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@ -3326,19 +3421,14 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
- !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "No usable DMA "
- "configuration, aborting\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev, "No usable DMA "
+ "configuration, aborting\n");
+ goto err_dma;
}
pci_using_dac = 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 387b52635bc0..4d44d64ae387 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -242,7 +242,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
if (addr)
- memcpy(msg_addr, addr, 6);
+ memcpy(msg_addr, addr, ETH_ALEN);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
@@ -275,7 +275,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
memset(msgbuf, 0, sizeof(msgbuf));
msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
- memcpy(msg_addr, addr, 6);
+ memcpy(msg_addr, addr, ETH_ALEN);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 23de82a9da82..f5685c0d0579 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -309,7 +309,7 @@ static void
jme_load_macaddr(struct net_device *netdev)
{
struct jme_adapter *jme = netdev_priv(netdev);
- unsigned char macaddr[6];
+ unsigned char macaddr[ETH_ALEN];
u32 val;
spin_lock_bh(&jme->macaddr_lock);
@@ -321,7 +321,7 @@ jme_load_macaddr(struct net_device *netdev)
val = jread32(jme, JME_RXUMA_HI);
macaddr[4] = (val >> 0) & 0xFF;
macaddr[5] = (val >> 8) & 0xFF;
- memcpy(netdev->dev_addr, macaddr, 6);
+ memcpy(netdev->dev_addr, macaddr, ETH_ALEN);
spin_unlock_bh(&jme->macaddr_lock);
}
@@ -3192,7 +3192,6 @@ jme_init_one(struct pci_dev *pdev,
err_out_unmap:
iounmap(jme->regs);
err_out_free_netdev:
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
err_out_release_regions:
pci_release_regions(pdev);
@@ -3210,7 +3209,6 @@ jme_remove_one(struct pci_dev *pdev)
unregister_netdev(netdev);
iounmap(jme->regs);
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index a36fa80968eb..4a5e3b0f712e 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1110,7 +1110,7 @@ static int korina_probe(struct platform_device *pdev)
lp = netdev_priv(dev);
bif->dev = dev;
- memcpy(dev->dev_addr, bif->mac, 6);
+ memcpy(dev->dev_addr, bif->mac, ETH_ALEN);
lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 7fb5677451f9..4cfae6c9a63f 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1131,15 +1131,13 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
spin_unlock_bh(&mp->mib_counters_lock);
-
- mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
}
static void mib_counters_timer_wrapper(unsigned long _mp)
{
struct mv643xx_eth_private *mp = (void *)_mp;
-
mib_counters_update(mp);
+ mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
}
@@ -2237,6 +2235,7 @@ static int mv643xx_eth_open(struct net_device *dev)
mp->int_mask |= INT_TX_END_0 << i;
}
+ add_timer(&mp->mib_counters_timer);
port_start(mp);
wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
@@ -2514,7 +2513,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
mac_addr = of_get_mac_address(pnp);
if (mac_addr)
- memcpy(ppd.mac_addr, mac_addr, 6);
+ memcpy(ppd.mac_addr, mac_addr, ETH_ALEN);
mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
@@ -2534,6 +2533,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
if (!ppdev)
return -ENOMEM;
ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ppdev->dev.of_node = pnp;
ret = platform_device_add_resources(ppdev, &res, 1);
if (ret)
@@ -2696,7 +2696,7 @@ static void set_params(struct mv643xx_eth_private *mp,
struct net_device *dev = mp->dev;
if (is_valid_ether_addr(pd->mac_addr))
- memcpy(dev->dev_addr, pd->mac_addr, 6);
+ memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
else
uc_addr_get(mp, dev->dev_addr);
@@ -2916,7 +2916,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mp->mib_counters_timer.data = (unsigned long)mp;
mp->mib_counters_timer.function = mib_counters_timer_wrapper;
mp->mib_counters_timer.expires = jiffies + 30 * HZ;
- add_timer(&mp->mib_counters_timer);
spin_lock_init(&mp->mib_counters_lock);
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index e2f662660313..7354960b583b 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -4,11 +4,9 @@
* Since the MDIO interface of Marvell network interfaces is shared
* between all network interfaces, having a single driver allows to
* handle concurrent accesses properly (you may have four Ethernet
- * ports, but they in fact share the same SMI interface to access the
- * MDIO bus). Moreover, this MDIO interface code is similar between
- * the mv643xx_eth driver and the mvneta driver. For now, it is only
- * used by the mvneta driver, but it could later be used by the
- * mv643xx_eth driver as well.
+ * ports, but they in fact share the same SMI interface to access
+ * the MDIO bus). This driver is currently used by the mvneta and
+ * mv643xx_eth drivers.
*
* Copyright (C) 2012 Marvell
*
@@ -44,6 +42,15 @@
#define MVMDIO_ERR_INT_SMI_DONE 0x00000010
#define MVMDIO_ERR_INT_MASK 0x0080
+/*
+ * SMI Timeout measurements:
+ * - Kirkwood 88F6281 (Globalscale Dreamplug): 45us to 95us (Interrupt)
+ * - Armada 370 (Globalscale Mirabox): 41us to 43us (Polled)
+ */
+#define MVMDIO_SMI_TIMEOUT 1000 /* 1000us = 1ms */
+#define MVMDIO_SMI_POLL_INTERVAL_MIN 45
+#define MVMDIO_SMI_POLL_INTERVAL_MAX 55
+
struct orion_mdio_dev {
struct mutex lock;
void __iomem *regs;
@@ -68,77 +75,68 @@ static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev)
static int orion_mdio_wait_ready(struct mii_bus *bus)
{
struct orion_mdio_dev *dev = bus->priv;
- int count;
-
- if (dev->err_interrupt <= 0) {
- count = 0;
- while (1) {
- if (orion_mdio_smi_is_done(dev))
- break;
-
- if (count > 100) {
- dev_err(bus->parent,
- "Timeout: SMI busy for too long\n");
- return -ETIMEDOUT;
- }
-
- udelay(10);
- count++;
- }
- } else {
- if (!orion_mdio_smi_is_done(dev)) {
+ unsigned long timeout = usecs_to_jiffies(MVMDIO_SMI_TIMEOUT);
+ unsigned long end = jiffies + timeout;
+ int timedout = 0;
+
+ while (1) {
+ if (orion_mdio_smi_is_done(dev))
+ return 0;
+ else if (timedout)
+ break;
+
+ if (dev->err_interrupt <= 0) {
+ usleep_range(MVMDIO_SMI_POLL_INTERVAL_MIN,
+ MVMDIO_SMI_POLL_INTERVAL_MAX);
+
+ if (time_is_before_jiffies(end))
+ ++timedout;
+ } else {
wait_event_timeout(dev->smi_busy_wait,
- orion_mdio_smi_is_done(dev),
- msecs_to_jiffies(100));
- if (!orion_mdio_smi_is_done(dev))
- return -ETIMEDOUT;
- }
+ orion_mdio_smi_is_done(dev),
+ timeout);
+
+ ++timedout;
+ }
}
- return 0;
+ dev_err(bus->parent, "Timeout: SMI busy for too long\n");
+ return -ETIMEDOUT;
}
static int orion_mdio_read(struct mii_bus *bus, int mii_id,
int regnum)
{
struct orion_mdio_dev *dev = bus->priv;
- int count;
u32 val;
int ret;
mutex_lock(&dev->lock);
ret = orion_mdio_wait_ready(bus);
- if (ret < 0) {
- mutex_unlock(&dev->lock);
- return ret;
- }
+ if (ret < 0)
+ goto out;
writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
(regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
MVMDIO_SMI_READ_OPERATION),
dev->regs);
- /* Wait for the value to become available */
- count = 0;
- while (1) {
- val = readl(dev->regs);
- if (val & MVMDIO_SMI_READ_VALID)
- break;
-
- if (count > 100) {
- dev_err(bus->parent, "Timeout when reading PHY\n");
- mutex_unlock(&dev->lock);
- return -ETIMEDOUT;
- }
+ ret = orion_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
- udelay(10);
- count++;
+ val = readl(dev->regs);
+ if (!(val & MVMDIO_SMI_READ_VALID)) {
+ dev_err(bus->parent, "SMI bus read not valid\n");
+ ret = -ENODEV;
+ goto out;
}
+ ret = val & 0xFFFF;
+out:
mutex_unlock(&dev->lock);
-
- return val & 0xFFFF;
+ return ret;
}
static int orion_mdio_write(struct mii_bus *bus, int mii_id,
@@ -150,10 +148,8 @@ static int orion_mdio_write(struct mii_bus *bus, int mii_id,
mutex_lock(&dev->lock);
ret = orion_mdio_wait_ready(bus);
- if (ret < 0) {
- mutex_unlock(&dev->lock);
- return ret;
- }
+ if (ret < 0)
+ goto out;
writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
(regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
@@ -161,9 +157,9 @@ static int orion_mdio_write(struct mii_bus *bus, int mii_id,
(value << MVMDIO_SMI_DATA_SHIFT)),
dev->regs);
+out:
mutex_unlock(&dev->lock);
-
- return 0;
+ return ret;
}
static int orion_mdio_reset(struct mii_bus *bus)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index e35bac7cfdf1..7d99e695a110 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2811,7 +2811,7 @@ static int mvneta_probe(struct platform_device *pdev)
}
dt_mac_addr = of_get_mac_address(dn);
- if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
+ if (dt_mac_addr) {
mac_from = "device tree";
memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
} else {
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index ecc7f7b696b8..597846193869 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -4046,7 +4046,6 @@ err_out_free_regions:
pci_release_regions(pdev);
err_out_disable_pdev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
err_out:
return err;
}
@@ -4090,7 +4089,6 @@ static void skge_remove(struct pci_dev *pdev)
iounmap(hw->regs);
kfree(hw);
- pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index e09a8c6f8536..a7df981d2123 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5081,7 +5081,6 @@ err_out_free_regions:
err_out_disable:
pci_disable_device(pdev);
err_out:
- pci_set_drvdata(pdev, NULL);
return err;
}
@@ -5124,8 +5123,6 @@ static void sky2_remove(struct pci_dev *pdev)
iounmap(hw->regs);
kfree(hw);
-
- pci_set_drvdata(pdev, NULL);
}
static int sky2_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index ea20182c6969..65d41b76fa2c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1687,11 +1687,11 @@ static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (NO_INDX != vp_oper->vlan_idx) {
__mlx4_unregister_vlan(&priv->dev,
- port, vp_oper->vlan_idx);
+ port, vp_oper->state.default_vlan);
vp_oper->vlan_idx = NO_INDX;
}
if (NO_INDX != vp_oper->mac_idx) {
- __mlx4_unregister_mac(&priv->dev, port, vp_oper->mac_idx);
+ __mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
vp_oper->mac_idx = NO_INDX;
}
}
@@ -1718,6 +1718,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
if (cmd == MLX4_COMM_CMD_RESET) {
mlx4_warn(dev, "Received reset from slave:%d\n", slave);
slave_state[slave].active = false;
+ slave_state[slave].old_vlan_api = false;
mlx4_master_deactivate_admin_state(priv, slave);
for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
slave_state[slave].event_eq[i].eqn = -1;
@@ -2253,7 +2254,6 @@ EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- struct mlx4_vport_oper_state *vf_oper;
struct mlx4_vport_state *vf_admin;
int slave;
@@ -2269,7 +2269,6 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
return -EINVAL;
vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
- vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if ((0 == vlan) && (0 == qos))
vf_admin->default_vlan = MLX4_VGT;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index fa37b7a61213..b5554121aca4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -417,7 +417,6 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
- int idx;
en_dbg(HW, priv, "Killing VID:%d\n", vid);
@@ -425,10 +424,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
/* Remove VID from port VLAN filter */
mutex_lock(&mdev->state_lock);
- if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
- mlx4_unregister_vlan(mdev->dev, priv->port, idx);
- else
- en_dbg(HW, priv, "could not find vid %d in cache\n", vid);
+ mlx4_unregister_vlan(mdev->dev, priv->port, vid);
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
@@ -1733,7 +1729,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
/* Unregister Mac address for the port */
mlx4_en_put_qp(priv);
- if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
+ if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
mdev->mac_removed[priv->port] = 1;
/* Free RX Rings */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index dec455c8f627..afe2efa69c86 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -70,14 +70,15 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
put_page(page);
return -ENOMEM;
}
- page_alloc->size = PAGE_SIZE << order;
+ page_alloc->page_size = PAGE_SIZE << order;
page_alloc->page = page;
page_alloc->dma = dma;
- page_alloc->offset = frag_info->frag_align;
+ page_alloc->page_offset = frag_info->frag_align;
/* Not doing get_page() for each frag is a big win
* on asymetric workloads.
*/
- atomic_set(&page->_count, page_alloc->size / frag_info->frag_stride);
+ atomic_set(&page->_count,
+ page_alloc->page_size / frag_info->frag_stride);
return 0;
}
@@ -96,16 +97,19 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
for (i = 0; i < priv->num_frags; i++) {
frag_info = &priv->frag_info[i];
page_alloc[i] = ring_alloc[i];
- page_alloc[i].offset += frag_info->frag_stride;
- if (page_alloc[i].offset + frag_info->frag_stride <= ring_alloc[i].size)
+ page_alloc[i].page_offset += frag_info->frag_stride;
+
+ if (page_alloc[i].page_offset + frag_info->frag_stride <=
+ ring_alloc[i].page_size)
continue;
+
if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
goto out;
}
for (i = 0; i < priv->num_frags; i++) {
frags[i] = ring_alloc[i];
- dma = ring_alloc[i].dma + ring_alloc[i].offset;
+ dma = ring_alloc[i].dma + ring_alloc[i].page_offset;
ring_alloc[i] = page_alloc[i];
rx_desc->data[i].addr = cpu_to_be64(dma);
}
@@ -117,7 +121,7 @@ out:
frag_info = &priv->frag_info[i];
if (page_alloc[i].page != ring_alloc[i].page) {
dma_unmap_page(priv->ddev, page_alloc[i].dma,
- page_alloc[i].size, PCI_DMA_FROMDEVICE);
+ page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
page = page_alloc[i].page;
atomic_set(&page->_count, 1);
put_page(page);
@@ -131,10 +135,12 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
int i)
{
const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
+ u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
+
- if (frags[i].offset + frag_info->frag_stride > frags[i].size)
- dma_unmap_page(priv->ddev, frags[i].dma, frags[i].size,
- PCI_DMA_FROMDEVICE);
+ if (next_frag_end > frags[i].page_size)
+ dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
+ PCI_DMA_FROMDEVICE);
if (frags[i].page)
put_page(frags[i].page);
@@ -161,7 +167,7 @@ out:
page_alloc = &ring->page_alloc[i];
dma_unmap_page(priv->ddev, page_alloc->dma,
- page_alloc->size, PCI_DMA_FROMDEVICE);
+ page_alloc->page_size, PCI_DMA_FROMDEVICE);
page = page_alloc->page;
atomic_set(&page->_count, 1);
put_page(page);
@@ -184,10 +190,11 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
i, page_count(page_alloc->page));
dma_unmap_page(priv->ddev, page_alloc->dma,
- page_alloc->size, PCI_DMA_FROMDEVICE);
- while (page_alloc->offset + frag_info->frag_stride < page_alloc->size) {
+ page_alloc->page_size, PCI_DMA_FROMDEVICE);
+ while (page_alloc->page_offset + frag_info->frag_stride <
+ page_alloc->page_size) {
put_page(page_alloc->page);
- page_alloc->offset += frag_info->frag_stride;
+ page_alloc->page_offset += frag_info->frag_stride;
}
page_alloc->page = NULL;
}
@@ -478,7 +485,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
/* Save page reference in skb */
__skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
- skb_frags_rx[nr].page_offset = frags[nr].offset;
+ skb_frags_rx[nr].page_offset = frags[nr].page_offset;
skb->truesize += frag_info->frag_stride;
frags[nr].page = NULL;
}
@@ -517,7 +524,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
/* Get pointer to first fragment so we could copy the headers into the
* (linear part of the) skb */
- va = page_address(frags[0].page) + frags[0].offset;
+ va = page_address(frags[0].page) + frags[0].page_offset;
if (length <= SMALL_PACKET_SIZE) {
/* We are copying all relevant data to the skb - temporarily
@@ -645,7 +652,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
DMA_FROM_DEVICE);
ethh = (struct ethhdr *)(page_address(frags[0].page) +
- frags[0].offset);
+ frags[0].page_offset);
if (is_multicast_ether_addr(ethh->h_dest)) {
struct mlx4_mac_entry *entry;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 0d63daa2f422..7d2e24bc674c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -177,6 +177,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
+ struct mlx4_priv *priv = mlx4_priv(dev);
u8 field;
u32 size;
int err = 0;
@@ -185,18 +186,26 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
#define QUERY_FUNC_CAP_FMR_OFFSET 0x8
-#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10
-#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14
-#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18
-#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x20
-#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x24
-#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x28
+#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10
+#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14
+#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18
+#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20
+#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24
+#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
+#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
+#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
+#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58
+#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60
+#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
+#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
+
#define QUERY_FUNC_CAP_FMR_FLAG 0x80
#define QUERY_FUNC_CAP_FLAG_RDMA 0x40
#define QUERY_FUNC_CAP_FLAG_ETH 0x80
+#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
/* when opcode modifier = 1 */
#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
@@ -237,8 +246,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
} else if (vhcr->op_modifier == 0) {
- /* enable rdma and ethernet interfaces */
- field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA);
+ /* enable rdma and ethernet interfaces, and new quota locations */
+ field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
+ QUERY_FUNC_CAP_FLAG_QUOTAS);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
field = dev->caps.num_ports;
@@ -250,14 +260,20 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
field = 0; /* protected FMR support not available as yet */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
- size = dev->caps.num_qps;
+ size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+ size = dev->caps.num_qps;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
- size = dev->caps.num_srqs;
+ size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+ size = dev->caps.num_srqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
- size = dev->caps.num_cqs;
+ size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+ size = dev->caps.num_cqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
size = dev->caps.num_eqs;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
@@ -265,14 +281,19 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
size = dev->caps.reserved_eqs;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
- size = dev->caps.num_mpts;
+ size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+ size = dev->caps.num_mpts;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
- size = dev->caps.num_mtts;
+ size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+ size = dev->caps.num_mtts;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
size = dev->caps.num_mgms + dev->caps.num_amgms;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
} else
err = -EINVAL;
@@ -287,7 +308,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
u32 *outbox;
u8 field, op_modifier;
u32 size;
- int err = 0;
+ int err = 0, quotas = 0;
op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
@@ -311,6 +332,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
goto out;
}
func_cap->flags = field;
+ quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS);
MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
func_cap->num_ports = field;
@@ -318,29 +340,50 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
func_cap->pf_context_behaviour = size;
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
- func_cap->qp_quota = size & 0xFFFFFF;
+ if (quotas) {
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+ func_cap->qp_quota = size & 0xFFFFFF;
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
- func_cap->srq_quota = size & 0xFFFFFF;
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+ func_cap->srq_quota = size & 0xFFFFFF;
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
- func_cap->cq_quota = size & 0xFFFFFF;
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+ func_cap->cq_quota = size & 0xFFFFFF;
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+ func_cap->mpt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+ func_cap->mtt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+ func_cap->mcg_quota = size & 0xFFFFFF;
+
+ } else {
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
+ func_cap->qp_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
+ func_cap->srq_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
+ func_cap->cq_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
+ func_cap->mpt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
+ func_cap->mtt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
+ func_cap->mcg_quota = size & 0xFFFFFF;
+ }
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
func_cap->max_eq = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
func_cap->reserved_eq = size & 0xFFFFFF;
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
- func_cap->mpt_quota = size & 0xFFFFFF;
-
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
- func_cap->mtt_quota = size & 0xFFFFFF;
-
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
- func_cap->mcg_quota = size & 0xFFFFFF;
goto out;
}
@@ -652,7 +695,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
if (field & 1<<6)
- dev_cap->flags2 |= MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN;
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
MLX4_GET(dev_cap->max_icm_sz, outbox,
QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@@ -1713,7 +1756,6 @@ void mlx4_opreq_action(struct work_struct *work)
u32 *outbox;
u32 modifier;
u16 token;
- u16 type_m;
u16 type;
int err;
u32 num_qps;
@@ -1739,14 +1781,13 @@ void mlx4_opreq_action(struct work_struct *work)
MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err) {
- mlx4_err(dev, "Failed to retreive required operation: %d\n",
+ mlx4_err(dev, "Failed to retrieve required operation: %d\n",
err);
return;
}
MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
- type_m = type >> 12;
type &= 0xfff;
switch (type) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 60c9f4f103fc..7d2628dfdc29 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -42,6 +42,7 @@
#include <linux/io-mapping.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
+#include <linux/kmod.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
@@ -561,13 +562,17 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
}
dev->caps.num_ports = func_cap.num_ports;
- dev->caps.num_qps = func_cap.qp_quota;
- dev->caps.num_srqs = func_cap.srq_quota;
- dev->caps.num_cqs = func_cap.cq_quota;
- dev->caps.num_eqs = func_cap.max_eq;
- dev->caps.reserved_eqs = func_cap.reserved_eq;
- dev->caps.num_mpts = func_cap.mpt_quota;
- dev->caps.num_mtts = func_cap.mtt_quota;
+ dev->quotas.qp = func_cap.qp_quota;
+ dev->quotas.srq = func_cap.srq_quota;
+ dev->quotas.cq = func_cap.cq_quota;
+ dev->quotas.mpt = func_cap.mpt_quota;
+ dev->quotas.mtt = func_cap.mtt_quota;
+ dev->caps.num_qps = 1 << hca_param.log_num_qps;
+ dev->caps.num_srqs = 1 << hca_param.log_num_srqs;
+ dev->caps.num_cqs = 1 << hca_param.log_num_cqs;
+ dev->caps.num_mpts = 1 << hca_param.log_mpt_sz;
+ dev->caps.num_eqs = func_cap.max_eq;
+ dev->caps.reserved_eqs = func_cap.reserved_eq;
dev->caps.num_pds = MLX4_NUM_PDS;
dev->caps.num_mgms = 0;
dev->caps.num_amgms = 0;
@@ -650,6 +655,27 @@ err_mem:
return err;
}
+static void mlx4_request_modules(struct mlx4_dev *dev)
+{
+ int port;
+ int has_ib_port = false;
+ int has_eth_port = false;
+#define EN_DRV_NAME "mlx4_en"
+#define IB_DRV_NAME "mlx4_ib"
+
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
+ has_ib_port = true;
+ else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
+ has_eth_port = true;
+ }
+
+ if (has_ib_port)
+ request_module_nowait(IB_DRV_NAME);
+ if (has_eth_port)
+ request_module_nowait(EN_DRV_NAME);
+}
+
/*
* Change the port configuration of the device.
* Every user of this function must hold the port mutex.
@@ -681,6 +707,11 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
}
mlx4_set_port_mask(dev);
err = mlx4_register_device(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to register device\n");
+ goto out;
+ }
+ mlx4_request_modules(dev);
}
out:
@@ -2075,9 +2106,15 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
"aborting.\n");
return err;
}
- if (num_vfs > MLX4_MAX_NUM_VF) {
- printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n",
- num_vfs, MLX4_MAX_NUM_VF);
+
+ /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
+ * per port, we must limit the number of VFs to 63 (since their are
+ * 128 MACs)
+ */
+ if (num_vfs >= MLX4_MAX_NUM_VF) {
+ dev_err(&pdev->dev,
+ "Requested more VF's (%d) than allowed (%d)\n",
+ num_vfs, MLX4_MAX_NUM_VF - 1);
return -EINVAL;
}
@@ -2295,6 +2332,8 @@ slave_start:
if (err)
goto err_steer;
+ mlx4_init_quotas(dev);
+
for (port = 1; port <= dev->caps.num_ports; port++) {
err = mlx4_init_port_info(dev, port);
if (err)
@@ -2305,6 +2344,8 @@ slave_start:
if (err)
goto err_port;
+ mlx4_request_modules(dev);
+
mlx4_sense_init(dev);
mlx4_start_sense(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 55f6245efb6c..70f0213d68c4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -645,7 +645,7 @@ static const u8 __promisc_mode[] = {
int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
enum mlx4_net_trans_promisc_mode flow_type)
{
- if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) {
+ if (flow_type >= MLX4_FS_MODE_NUM) {
mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
return -EINVAL;
}
@@ -681,7 +681,7 @@ const u16 __sw_id_hw[] = {
int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id)
{
- if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
+ if (id >= MLX4_NET_TRANS_RULE_NUM) {
mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
return -EINVAL;
}
@@ -706,7 +706,7 @@ static const int __rule_hw_sz[] = {
int mlx4_hw_rule_sz(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id)
{
- if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
+ if (id >= MLX4_NET_TRANS_RULE_NUM) {
mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 348bb8c7d9a7..e582a41a802b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -455,6 +455,7 @@ struct mlx4_slave_state {
u8 last_cmd;
u8 init_port_mask;
bool active;
+ bool old_vlan_api;
u8 function;
dma_addr_t vhcr_dma;
u16 mtu[MLX4_MAX_PORTS + 1];
@@ -503,12 +504,28 @@ struct slave_list {
struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE];
};
+struct resource_allocator {
+ spinlock_t alloc_lock; /* protect quotas */
+ union {
+ int res_reserved;
+ int res_port_rsvd[MLX4_MAX_PORTS];
+ };
+ union {
+ int res_free;
+ int res_port_free[MLX4_MAX_PORTS];
+ };
+ int *quota;
+ int *allocated;
+ int *guaranteed;
+};
+
struct mlx4_resource_tracker {
spinlock_t lock;
/* tree for each resources */
struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
/* num_of_slave's lists, one per slave */
struct slave_list *slave_list;
+ struct resource_allocator res_alloc[MLX4_NUM_OF_RESOURCE_TYPE];
};
#define SLAVE_EVENT_EQ_SIZE 128
@@ -1111,7 +1128,7 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
-void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
+void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz);
@@ -1252,4 +1269,6 @@ static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev)
void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
+void mlx4_init_quotas(struct mlx4_dev *dev);
+
#endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 5e0aa569306a..bf06e3610d27 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -237,8 +237,8 @@ struct mlx4_en_tx_desc {
struct mlx4_en_rx_alloc {
struct page *page;
dma_addr_t dma;
- u32 offset;
- u32 size;
+ u32 page_offset;
+ u32 page_size;
};
struct mlx4_en_tx_ring {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index f91719a08cba..63391a1a7f8c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -755,14 +755,14 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
struct mlx4_mr_table *mr_table = &priv->mr_table;
int err;
- if (!is_power_of_2(dev->caps.num_mpts))
- return -EINVAL;
-
/* Nothing to do for slaves - all MR handling is forwarded
* to the master */
if (mlx4_is_slave(dev))
return 0;
+ if (!is_power_of_2(dev->caps.num_mpts))
+ return -EINVAL;
+
err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
~0, dev->caps.reserved_mrws, 0);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 946e0af5faef..caaa15470395 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -178,13 +178,24 @@ EXPORT_SYMBOL_GPL(__mlx4_register_mac);
int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
u64 out_param = 0;
- int err;
+ int err = -EINVAL;
if (mlx4_is_mfunc(dev)) {
- set_param_l(&out_param, port);
- err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
- RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
- MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
+ err = mlx4_cmd_imm(dev, mac, &out_param,
+ ((u32) port) << 8 | (u32) RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ }
+ if (err && err == -EINVAL && mlx4_is_slave(dev)) {
+ /* retry using old REG_MAC format */
+ set_param_l(&out_param, port);
+ err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (!err)
+ dev->flags |= MLX4_FLAG_OLD_REG_MAC;
+ }
if (err)
return err;
@@ -231,10 +242,18 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
u64 out_param = 0;
if (mlx4_is_mfunc(dev)) {
- set_param_l(&out_param, port);
- (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
- RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
- MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
+ (void) mlx4_cmd_imm(dev, mac, &out_param,
+ ((u32) port) << 8 | (u32) RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ } else {
+ /* use old unregister mac format */
+ set_param_l(&out_param, port);
+ (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ }
return;
}
__mlx4_unregister_mac(dev, port, mac);
@@ -284,7 +303,7 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -370,9 +389,12 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
u64 out_param = 0;
int err;
+ if (vlan > 4095)
+ return -EINVAL;
+
if (mlx4_is_mfunc(dev)) {
- set_param_l(&out_param, port);
- err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN,
+ err = mlx4_cmd_imm(dev, vlan, &out_param,
+ ((u32) port) << 8 | (u32) RES_VLAN,
RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (!err)
@@ -384,23 +406,26 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
}
EXPORT_SYMBOL_GPL(mlx4_register_vlan);
-void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
+ int index;
- if (index < MLX4_VLAN_REGULAR) {
- mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
- return;
+ mutex_lock(&table->mutex);
+ if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
+ mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan);
+ goto out;
}
- mutex_lock(&table->mutex);
- if (!table->refs[index]) {
- mlx4_warn(dev, "No vlan entry for index %d\n", index);
+ if (index < MLX4_VLAN_REGULAR) {
+ mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
goto out;
}
+
if (--table->refs[index]) {
- mlx4_dbg(dev, "Have more references for index %d,"
- "no need to modify vlan table\n", index);
+ mlx4_dbg(dev, "Have %d more references for index %d,"
+ "no need to modify vlan table\n", table->refs[index],
+ index);
goto out;
}
table->entries[index] = 0;
@@ -410,23 +435,19 @@ out:
mutex_unlock(&table->mutex);
}
-void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
{
- u64 in_param = 0;
- int err;
+ u64 out_param = 0;
if (mlx4_is_mfunc(dev)) {
- set_param_l(&in_param, port);
- err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP,
- MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
- MLX4_CMD_WRAPPED);
- if (!err)
- mlx4_warn(dev, "Failed freeing vlan at index:%d\n",
- index);
-
+ (void) mlx4_cmd_imm(dev, vlan, &out_param,
+ ((u32) port) << 8 | (u32) RES_VLAN,
+ RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
return;
}
- __mlx4_unregister_vlan(dev, port, index);
+ __mlx4_unregister_vlan(dev, port, vlan);
}
EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index e891b058c1be..2715e61dbb74 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -480,8 +480,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
*/
err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
- (1 << 23) - 1, dev->phys_caps.base_sqpn + 8 +
- 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev),
+ (1 << 23) - 1, mlx4_num_reserved_sqps(dev),
reserved_from_top);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index dd6876321116..b1603e2287a7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -55,6 +55,14 @@ struct mac_res {
u8 port;
};
+struct vlan_res {
+ struct list_head list;
+ u16 vlan;
+ int ref_count;
+ int vlan_index;
+ u8 port;
+};
+
struct res_common {
struct list_head list;
struct rb_node node;
@@ -266,6 +274,7 @@ static const char *ResourceType(enum mlx4_resource rt)
case RES_MPT: return "RES_MPT";
case RES_MTT: return "RES_MTT";
case RES_MAC: return "RES_MAC";
+ case RES_VLAN: return "RES_VLAN";
case RES_EQ: return "RES_EQ";
case RES_COUNTER: return "RES_COUNTER";
case RES_FS_RULE: return "RES_FS_RULE";
@@ -274,10 +283,139 @@ static const char *ResourceType(enum mlx4_resource rt)
};
}
+static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
+static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource res_type, int count,
+ int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct resource_allocator *res_alloc =
+ &priv->mfunc.master.res_tracker.res_alloc[res_type];
+ int err = -EINVAL;
+ int allocated, free, reserved, guaranteed, from_free;
+
+ if (slave > dev->num_vfs)
+ return -EINVAL;
+
+ spin_lock(&res_alloc->alloc_lock);
+ allocated = (port > 0) ?
+ res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
+ res_alloc->allocated[slave];
+ free = (port > 0) ? res_alloc->res_port_free[port - 1] :
+ res_alloc->res_free;
+ reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
+ res_alloc->res_reserved;
+ guaranteed = res_alloc->guaranteed[slave];
+
+ if (allocated + count > res_alloc->quota[slave])
+ goto out;
+
+ if (allocated + count <= guaranteed) {
+ err = 0;
+ } else {
+ /* portion may need to be obtained from free area */
+ if (guaranteed - allocated > 0)
+ from_free = count - (guaranteed - allocated);
+ else
+ from_free = count;
+
+ if (free - from_free > reserved)
+ err = 0;
+ }
+
+ if (!err) {
+ /* grant the request */
+ if (port > 0) {
+ res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
+ res_alloc->res_port_free[port - 1] -= count;
+ } else {
+ res_alloc->allocated[slave] += count;
+ res_alloc->res_free -= count;
+ }
+ }
+
+out:
+ spin_unlock(&res_alloc->alloc_lock);
+ return err;
+}
+
+static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource res_type, int count,
+ int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct resource_allocator *res_alloc =
+ &priv->mfunc.master.res_tracker.res_alloc[res_type];
+
+ if (slave > dev->num_vfs)
+ return;
+
+ spin_lock(&res_alloc->alloc_lock);
+ if (port > 0) {
+ res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
+ res_alloc->res_port_free[port - 1] += count;
+ } else {
+ res_alloc->allocated[slave] -= count;
+ res_alloc->res_free += count;
+ }
+
+ spin_unlock(&res_alloc->alloc_lock);
+ return;
+}
+
+static inline void initialize_res_quotas(struct mlx4_dev *dev,
+ struct resource_allocator *res_alloc,
+ enum mlx4_resource res_type,
+ int vf, int num_instances)
+{
+ res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
+ res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
+ if (vf == mlx4_master_func_num(dev)) {
+ res_alloc->res_free = num_instances;
+ if (res_type == RES_MTT) {
+ /* reserved mtts will be taken out of the PF allocation */
+ res_alloc->res_free += dev->caps.reserved_mtts;
+ res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
+ res_alloc->quota[vf] += dev->caps.reserved_mtts;
+ }
+ }
+}
+
+void mlx4_init_quotas(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int pf;
+
+ /* quotas for VFs are initialized in mlx4_slave_cap */
+ if (mlx4_is_slave(dev))
+ return;
+
+ if (!mlx4_is_mfunc(dev)) {
+ dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
+ mlx4_num_reserved_sqps(dev);
+ dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
+ dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
+ dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
+ dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
+ return;
+ }
+
+ pf = mlx4_master_func_num(dev);
+ dev->quotas.qp =
+ priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
+ dev->quotas.cq =
+ priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
+ dev->quotas.srq =
+ priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
+ dev->quotas.mtt =
+ priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
+ dev->quotas.mpt =
+ priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
+}
int mlx4_init_resource_tracker(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- int i;
+ int i, j;
int t;
priv->mfunc.master.res_tracker.slave_list =
@@ -298,8 +436,105 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
+ for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
+ struct resource_allocator *res_alloc =
+ &priv->mfunc.master.res_tracker.res_alloc[i];
+ res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
+ res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
+ if (i == RES_MAC || i == RES_VLAN)
+ res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
+ (dev->num_vfs + 1) * sizeof(int),
+ GFP_KERNEL);
+ else
+ res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
+
+ if (!res_alloc->quota || !res_alloc->guaranteed ||
+ !res_alloc->allocated)
+ goto no_mem_err;
+
+ spin_lock_init(&res_alloc->alloc_lock);
+ for (t = 0; t < dev->num_vfs + 1; t++) {
+ switch (i) {
+ case RES_QP:
+ initialize_res_quotas(dev, res_alloc, RES_QP,
+ t, dev->caps.num_qps -
+ dev->caps.reserved_qps -
+ mlx4_num_reserved_sqps(dev));
+ break;
+ case RES_CQ:
+ initialize_res_quotas(dev, res_alloc, RES_CQ,
+ t, dev->caps.num_cqs -
+ dev->caps.reserved_cqs);
+ break;
+ case RES_SRQ:
+ initialize_res_quotas(dev, res_alloc, RES_SRQ,
+ t, dev->caps.num_srqs -
+ dev->caps.reserved_srqs);
+ break;
+ case RES_MPT:
+ initialize_res_quotas(dev, res_alloc, RES_MPT,
+ t, dev->caps.num_mpts -
+ dev->caps.reserved_mrws);
+ break;
+ case RES_MTT:
+ initialize_res_quotas(dev, res_alloc, RES_MTT,
+ t, dev->caps.num_mtts -
+ dev->caps.reserved_mtts);
+ break;
+ case RES_MAC:
+ if (t == mlx4_master_func_num(dev)) {
+ res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
+ res_alloc->guaranteed[t] = 2;
+ for (j = 0; j < MLX4_MAX_PORTS; j++)
+ res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
+ } else {
+ res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
+ res_alloc->guaranteed[t] = 2;
+ }
+ break;
+ case RES_VLAN:
+ if (t == mlx4_master_func_num(dev)) {
+ res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
+ res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
+ for (j = 0; j < MLX4_MAX_PORTS; j++)
+ res_alloc->res_port_free[j] =
+ res_alloc->quota[t];
+ } else {
+ res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
+ res_alloc->guaranteed[t] = 0;
+ }
+ break;
+ case RES_COUNTER:
+ res_alloc->quota[t] = dev->caps.max_counters;
+ res_alloc->guaranteed[t] = 0;
+ if (t == mlx4_master_func_num(dev))
+ res_alloc->res_free = res_alloc->quota[t];
+ break;
+ default:
+ break;
+ }
+ if (i == RES_MAC || i == RES_VLAN) {
+ for (j = 0; j < MLX4_MAX_PORTS; j++)
+ res_alloc->res_port_rsvd[j] +=
+ res_alloc->guaranteed[t];
+ } else {
+ res_alloc->res_reserved += res_alloc->guaranteed[t];
+ }
+ }
+ }
spin_lock_init(&priv->mfunc.master.res_tracker.lock);
- return 0 ;
+ return 0;
+
+no_mem_err:
+ for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
+ priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
+ priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
+ priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
+ }
+ return -ENOMEM;
}
void mlx4_free_resource_tracker(struct mlx4_dev *dev,
@@ -309,13 +544,28 @@ void mlx4_free_resource_tracker(struct mlx4_dev *dev,
int i;
if (priv->mfunc.master.res_tracker.slave_list) {
- if (type != RES_TR_FREE_STRUCTS_ONLY)
- for (i = 0 ; i < dev->num_slaves; i++)
+ if (type != RES_TR_FREE_STRUCTS_ONLY) {
+ for (i = 0; i < dev->num_slaves; i++) {
if (type == RES_TR_FREE_ALL ||
dev->caps.function != i)
mlx4_delete_all_resources_for_slave(dev, i);
+ }
+ /* free master's vlans */
+ i = dev->caps.function;
+ mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
+ rem_slave_vlans(dev, i);
+ mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
+ }
if (type != RES_TR_FREE_SLAVES_ONLY) {
+ for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
+ priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
+ priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
+ priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
+ }
kfree(priv->mfunc.master.res_tracker.slave_list);
priv->mfunc.master.res_tracker.slave_list = NULL;
}
@@ -1229,12 +1479,19 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
case RES_OP_RESERVE:
count = get_param_l(&in_param);
align = get_param_h(&in_param);
- err = __mlx4_qp_reserve_range(dev, count, align, &base);
+ err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
if (err)
return err;
+ err = __mlx4_qp_reserve_range(dev, count, align, &base);
+ if (err) {
+ mlx4_release_resource(dev, slave, RES_QP, count, 0);
+ return err;
+ }
+
err = add_res_range(dev, slave, base, count, RES_QP, 0);
if (err) {
+ mlx4_release_resource(dev, slave, RES_QP, count, 0);
__mlx4_qp_release_range(dev, base, count);
return err;
}
@@ -1282,15 +1539,24 @@ static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err;
order = get_param_l(&in_param);
+
+ err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
+ if (err)
+ return err;
+
base = __mlx4_alloc_mtt_range(dev, order);
- if (base == -1)
+ if (base == -1) {
+ mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
return -ENOMEM;
+ }
err = add_res_range(dev, slave, base, 1, RES_MTT, order);
- if (err)
+ if (err) {
+ mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
__mlx4_free_mtt_range(dev, base, order);
- else
+ } else {
set_param_l(out_param, base);
+ }
return err;
}
@@ -1305,13 +1571,20 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) {
case RES_OP_RESERVE:
+ err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
+ if (err)
+ break;
+
index = __mlx4_mpt_reserve(dev);
- if (index == -1)
+ if (index == -1) {
+ mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
break;
+ }
id = index & mpt_mask(dev);
err = add_res_range(dev, slave, id, 1, RES_MPT, index);
if (err) {
+ mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
__mlx4_mpt_release(dev, index);
break;
}
@@ -1345,12 +1618,19 @@ static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) {
case RES_OP_RESERVE_AND_MAP:
- err = __mlx4_cq_alloc_icm(dev, &cqn);
+ err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
if (err)
break;
+ err = __mlx4_cq_alloc_icm(dev, &cqn);
+ if (err) {
+ mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
+ break;
+ }
+
err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
if (err) {
+ mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
__mlx4_cq_free_icm(dev, cqn);
break;
}
@@ -1373,12 +1653,19 @@ static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) {
case RES_OP_RESERVE_AND_MAP:
- err = __mlx4_srq_alloc_icm(dev, &srqn);
+ err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
if (err)
break;
+ err = __mlx4_srq_alloc_icm(dev, &srqn);
+ if (err) {
+ mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
+ break;
+ }
+
err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
if (err) {
+ mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
__mlx4_srq_free_icm(dev, srqn);
break;
}
@@ -1399,9 +1686,13 @@ static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct mac_res *res;
+ if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
+ return -EINVAL;
res = kzalloc(sizeof *res, GFP_KERNEL);
- if (!res)
+ if (!res) {
+ mlx4_release_resource(dev, slave, RES_MAC, 1, port);
return -ENOMEM;
+ }
res->mac = mac;
res->port = (u8) port;
list_add_tail(&res->list,
@@ -1421,6 +1712,7 @@ static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
list_for_each_entry_safe(res, tmp, mac_list, list) {
if (res->mac == mac && res->port == (u8) port) {
list_del(&res->list);
+ mlx4_release_resource(dev, slave, RES_MAC, 1, port);
kfree(res);
break;
}
@@ -1438,12 +1730,13 @@ static void rem_slave_macs(struct mlx4_dev *dev, int slave)
list_for_each_entry_safe(res, tmp, mac_list, list) {
list_del(&res->list);
__mlx4_unregister_mac(dev, res->port, res->mac);
+ mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
kfree(res);
}
}
static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
- u64 in_param, u64 *out_param)
+ u64 in_param, u64 *out_param, int in_port)
{
int err = -EINVAL;
int port;
@@ -1452,7 +1745,7 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (op != RES_OP_RESERVE_AND_MAP)
return err;
- port = get_param_l(out_param);
+ port = !in_port ? get_param_l(out_param) : in_port;
mac = in_param;
err = __mlx4_register_mac(dev, port, mac);
@@ -1469,12 +1762,114 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err;
}
-static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
- u64 in_param, u64 *out_param)
+static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
+ int port, int vlan_index)
{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *vlan_list =
+ &tracker->slave_list[slave].res_list[RES_VLAN];
+ struct vlan_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, vlan_list, list) {
+ if (res->vlan == vlan && res->port == (u8) port) {
+ /* vlan found. update ref count */
+ ++res->ref_count;
+ return 0;
+ }
+ }
+
+ if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
+ return -EINVAL;
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
+ return -ENOMEM;
+ }
+ res->vlan = vlan;
+ res->port = (u8) port;
+ res->vlan_index = vlan_index;
+ res->ref_count = 1;
+ list_add_tail(&res->list,
+ &tracker->slave_list[slave].res_list[RES_VLAN]);
return 0;
}
+
+static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
+ int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *vlan_list =
+ &tracker->slave_list[slave].res_list[RES_VLAN];
+ struct vlan_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, vlan_list, list) {
+ if (res->vlan == vlan && res->port == (u8) port) {
+ if (!--res->ref_count) {
+ list_del(&res->list);
+ mlx4_release_resource(dev, slave, RES_VLAN,
+ 1, port);
+ kfree(res);
+ }
+ break;
+ }
+ }
+}
+
+static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *vlan_list =
+ &tracker->slave_list[slave].res_list[RES_VLAN];
+ struct vlan_res *res, *tmp;
+ int i;
+
+ list_for_each_entry_safe(res, tmp, vlan_list, list) {
+ list_del(&res->list);
+ /* dereference the vlan the num times the slave referenced it */
+ for (i = 0; i < res->ref_count; i++)
+ __mlx4_unregister_vlan(dev, res->port, res->vlan);
+ mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
+ kfree(res);
+ }
+}
+
+static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param, int in_port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+ int err;
+ u16 vlan;
+ int vlan_index;
+ int port;
+
+ port = !in_port ? get_param_l(out_param) : in_port;
+
+ if (!port || op != RES_OP_RESERVE_AND_MAP)
+ return -EINVAL;
+
+ /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
+ if (!in_port && port > 0 && port <= dev->caps.num_ports) {
+ slave_state[slave].old_vlan_api = true;
+ return 0;
+ }
+
+ vlan = (u16) in_param;
+
+ err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
+ if (!err) {
+ set_param_l(out_param, (u32) vlan_index);
+ err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
+ if (err)
+ __mlx4_unregister_vlan(dev, port, vlan);
+ }
+ return err;
+}
+
static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
@@ -1484,15 +1879,23 @@ static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (op != RES_OP_RESERVE)
return -EINVAL;
- err = __mlx4_counter_alloc(dev, &index);
+ err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
if (err)
return err;
+ err = __mlx4_counter_alloc(dev, &index);
+ if (err) {
+ mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
+ return err;
+ }
+
err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
- if (err)
+ if (err) {
__mlx4_counter_free(dev, index);
- else
+ mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
+ } else {
set_param_l(out_param, index);
+ }
return err;
}
@@ -1528,7 +1931,7 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
int err;
int alop = vhcr->op_modifier;
- switch (vhcr->in_modifier) {
+ switch (vhcr->in_modifier & 0xFF) {
case RES_QP:
err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
@@ -1556,12 +1959,14 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
case RES_MAC:
err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
- vhcr->in_param, &vhcr->out_param);
+ vhcr->in_param, &vhcr->out_param,
+ (vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_VLAN:
err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
- vhcr->in_param, &vhcr->out_param);
+ vhcr->in_param, &vhcr->out_param,
+ (vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_COUNTER:
@@ -1597,6 +2002,7 @@ static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
err = rem_res_range(dev, slave, base, count, RES_QP, 0);
if (err)
break;
+ mlx4_release_resource(dev, slave, RES_QP, count, 0);
__mlx4_qp_release_range(dev, base, count);
break;
case RES_OP_MAP_ICM:
@@ -1634,8 +2040,10 @@ static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
base = get_param_l(&in_param);
order = get_param_h(&in_param);
err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
- if (!err)
+ if (!err) {
+ mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
__mlx4_free_mtt_range(dev, base, order);
+ }
return err;
}
@@ -1660,6 +2068,7 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
if (err)
break;
+ mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
__mlx4_mpt_release(dev, index);
break;
case RES_OP_MAP_ICM:
@@ -1694,6 +2103,7 @@ static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (err)
break;
+ mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
__mlx4_cq_free_icm(dev, cqn);
break;
@@ -1718,6 +2128,7 @@ static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (err)
break;
+ mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
__mlx4_srq_free_icm(dev, srqn);
break;
@@ -1730,14 +2141,14 @@ static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
}
static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
- u64 in_param, u64 *out_param)
+ u64 in_param, u64 *out_param, int in_port)
{
int port;
int err = 0;
switch (op) {
case RES_OP_RESERVE_AND_MAP:
- port = get_param_l(out_param);
+ port = !in_port ? get_param_l(out_param) : in_port;
mac_del_from_slave(dev, slave, in_param, port);
__mlx4_unregister_mac(dev, port, in_param);
break;
@@ -1751,9 +2162,27 @@ static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
}
static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
- u64 in_param, u64 *out_param)
+ u64 in_param, u64 *out_param, int port)
{
- return 0;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+ int err = 0;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ if (slave_state[slave].old_vlan_api)
+ return 0;
+ if (!port)
+ return -EINVAL;
+ vlan_del_from_slave(dev, slave, in_param, port);
+ __mlx4_unregister_vlan(dev, port, in_param);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
}
static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
@@ -1771,6 +2200,7 @@ static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err;
__mlx4_counter_free(dev, index);
+ mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
return err;
}
@@ -1803,7 +2233,7 @@ int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
int err = -EINVAL;
int alop = vhcr->op_modifier;
- switch (vhcr->in_modifier) {
+ switch (vhcr->in_modifier & 0xFF) {
case RES_QP:
err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param);
@@ -1831,12 +2261,14 @@ int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
case RES_MAC:
err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
- vhcr->in_param, &vhcr->out_param);
+ vhcr->in_param, &vhcr->out_param,
+ (vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_VLAN:
err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
- vhcr->in_param, &vhcr->out_param);
+ vhcr->in_param, &vhcr->out_param,
+ (vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_COUNTER:
@@ -3498,6 +3930,11 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_QP]);
list_del(&qp->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ if (!valid_reserved(dev, slave, qpn)) {
+ __mlx4_qp_release_range(dev, qpn, 1);
+ mlx4_release_resource(dev, slave,
+ RES_QP, 1, 0);
+ }
kfree(qp);
state = 0;
break;
@@ -3569,6 +4006,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_SRQ]);
list_del(&srq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ mlx4_release_resource(dev, slave,
+ RES_SRQ, 1, 0);
kfree(srq);
state = 0;
break;
@@ -3635,6 +4074,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_CQ]);
list_del(&cq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ mlx4_release_resource(dev, slave,
+ RES_CQ, 1, 0);
kfree(cq);
state = 0;
break;
@@ -3698,6 +4139,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_MPT]);
list_del(&mpt->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ mlx4_release_resource(dev, slave,
+ RES_MPT, 1, 0);
kfree(mpt);
state = 0;
break;
@@ -3767,6 +4210,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_MTT]);
list_del(&mtt->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ mlx4_release_resource(dev, slave, RES_MTT,
+ 1 << mtt->order, 0);
kfree(mtt);
state = 0;
break;
@@ -3925,6 +4370,7 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
list_del(&counter->com.list);
kfree(counter);
__mlx4_counter_free(dev, index);
+ mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
}
}
spin_unlock_irq(mlx4_tlock(dev));
@@ -3964,7 +4410,7 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
struct mlx4_priv *priv = mlx4_priv(dev);
mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
- /*VLAN*/
+ rem_slave_vlans(dev, slave);
rem_slave_macs(dev, slave);
rem_slave_fs_rule(dev, slave);
rem_slave_qps(dev, slave);
@@ -4081,7 +4527,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
NO_INDX != work->orig_vlan_ix)
__mlx4_unregister_vlan(&work->priv->dev, work->port,
- work->orig_vlan_ix);
+ work->orig_vlan_id);
out:
kfree(work);
return;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 79fd269e2c54..9e08e35ce351 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -34,6 +34,7 @@
#include <linux/init.h>
#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/srq.h>
#include <linux/export.h>
#include <linux/gfp.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 6ca30739625f..8675d26a678b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -98,6 +98,7 @@ enum {
static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
struct mlx5_cmd_msg *in,
struct mlx5_cmd_msg *out,
+ void *uout, int uout_size,
mlx5_cmd_cbk_t cbk,
void *context, int page_queue)
{
@@ -110,6 +111,8 @@ static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
ent->in = in;
ent->out = out;
+ ent->uout = uout;
+ ent->uout_size = uout_size;
ent->callback = cbk;
ent->context = context;
ent->cmd = cmd;
@@ -534,6 +537,7 @@ static void cmd_work_handler(struct work_struct *work)
ent->lay = lay;
memset(lay, 0, sizeof(*lay));
memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
+ ent->op = be32_to_cpu(lay->in[0]) >> 16;
if (ent->in->next)
lay->in_ptr = cpu_to_be64(ent->in->next->dma);
lay->inlen = cpu_to_be32(ent->in->len);
@@ -628,7 +632,8 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
* 2. page queue commands do not support asynchrous completion
*/
static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
- struct mlx5_cmd_msg *out, mlx5_cmd_cbk_t callback,
+ struct mlx5_cmd_msg *out, void *uout, int uout_size,
+ mlx5_cmd_cbk_t callback,
void *context, int page_queue, u8 *status)
{
struct mlx5_cmd *cmd = &dev->cmd;
@@ -642,7 +647,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
if (callback && page_queue)
return -EINVAL;
- ent = alloc_cmd(cmd, in, out, callback, context, page_queue);
+ ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context,
+ page_queue);
if (IS_ERR(ent))
return PTR_ERR(ent);
@@ -670,10 +676,10 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
if (op < ARRAY_SIZE(cmd->stats)) {
stats = &cmd->stats[op];
- spin_lock(&stats->lock);
+ spin_lock_irq(&stats->lock);
stats->sum += ds;
++stats->n;
- spin_unlock(&stats->lock);
+ spin_unlock_irq(&stats->lock);
}
mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
"fw exec time for %s is %lld nsec\n",
@@ -826,7 +832,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
int n;
int i;
- msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ msg = kzalloc(sizeof(*msg), flags);
if (!msg)
return ERR_PTR(-ENOMEM);
@@ -1109,6 +1115,19 @@ void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
up(&cmd->sem);
}
+static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
+{
+ unsigned long flags;
+
+ if (msg->cache) {
+ spin_lock_irqsave(&msg->cache->lock, flags);
+ list_add_tail(&msg->list, &msg->cache->head);
+ spin_unlock_irqrestore(&msg->cache->lock, flags);
+ } else {
+ mlx5_free_cmd_msg(dev, msg);
+ }
+}
+
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
{
struct mlx5_cmd *cmd = &dev->cmd;
@@ -1117,6 +1136,10 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
void *context;
int err;
int i;
+ ktime_t t1, t2, delta;
+ s64 ds;
+ struct mlx5_cmd_stats *stats;
+ unsigned long flags;
for (i = 0; i < (1 << cmd->log_sz); i++) {
if (test_bit(i, &vector)) {
@@ -1141,9 +1164,29 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
}
free_ent(cmd, ent->idx);
if (ent->callback) {
+ t1 = timespec_to_ktime(ent->ts1);
+ t2 = timespec_to_ktime(ent->ts2);
+ delta = ktime_sub(t2, t1);
+ ds = ktime_to_ns(delta);
+ if (ent->op < ARRAY_SIZE(cmd->stats)) {
+ stats = &cmd->stats[ent->op];
+ spin_lock_irqsave(&stats->lock, flags);
+ stats->sum += ds;
+ ++stats->n;
+ spin_unlock_irqrestore(&stats->lock, flags);
+ }
+
callback = ent->callback;
context = ent->context;
err = ent->ret;
+ if (!err)
+ err = mlx5_copy_from_msg(ent->uout,
+ ent->out,
+ ent->uout_size);
+
+ mlx5_free_cmd_msg(dev, ent->out);
+ free_msg(dev, ent->in);
+
free_cmd(ent);
callback(err, context);
} else {
@@ -1160,7 +1203,8 @@ static int status_to_err(u8 status)
return status ? -1 : 0; /* TBD more meaningful codes */
}
-static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size)
+static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
+ gfp_t gfp)
{
struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
struct mlx5_cmd *cmd = &dev->cmd;
@@ -1172,7 +1216,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size)
ent = &cmd->cache.med;
if (ent) {
- spin_lock(&ent->lock);
+ spin_lock_irq(&ent->lock);
if (!list_empty(&ent->head)) {
msg = list_entry(ent->head.next, typeof(*msg), list);
/* For cached lists, we must explicitly state what is
@@ -1181,43 +1225,34 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size)
msg->len = in_size;
list_del(&msg->list);
}
- spin_unlock(&ent->lock);
+ spin_unlock_irq(&ent->lock);
}
if (IS_ERR(msg))
- msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, in_size);
+ msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
return msg;
}
-static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
-{
- if (msg->cache) {
- spin_lock(&msg->cache->lock);
- list_add_tail(&msg->list, &msg->cache->head);
- spin_unlock(&msg->cache->lock);
- } else {
- mlx5_free_cmd_msg(dev, msg);
- }
-}
-
static int is_manage_pages(struct mlx5_inbox_hdr *in)
{
return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
}
-int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
- int out_size)
+static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
+ int out_size, mlx5_cmd_cbk_t callback, void *context)
{
struct mlx5_cmd_msg *inb;
struct mlx5_cmd_msg *outb;
int pages_queue;
+ gfp_t gfp;
int err;
u8 status = 0;
pages_queue = is_manage_pages(in);
+ gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
- inb = alloc_msg(dev, in_size);
+ inb = alloc_msg(dev, in_size, gfp);
if (IS_ERR(inb)) {
err = PTR_ERR(inb);
return err;
@@ -1229,13 +1264,14 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
goto out_in;
}
- outb = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, out_size);
+ outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
if (IS_ERR(outb)) {
err = PTR_ERR(outb);
goto out_in;
}
- err = mlx5_cmd_invoke(dev, inb, outb, NULL, NULL, pages_queue, &status);
+ err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
+ pages_queue, &status);
if (err)
goto out_out;
@@ -1248,14 +1284,30 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
err = mlx5_copy_from_msg(out, outb, out_size);
out_out:
- mlx5_free_cmd_msg(dev, outb);
+ if (!callback)
+ mlx5_free_cmd_msg(dev, outb);
out_in:
- free_msg(dev, inb);
+ if (!callback)
+ free_msg(dev, inb);
return err;
}
+
+int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
+ int out_size)
+{
+ return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
+}
EXPORT_SYMBOL(mlx5_cmd_exec);
+int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
+ void *out, int out_size, mlx5_cmd_cbk_t callback,
+ void *context)
+{
+ return cmd_exec(dev, in, in_size, out, out_size, callback, context);
+}
+EXPORT_SYMBOL(mlx5_cmd_exec_cb);
+
static void destroy_msg_cache(struct mlx5_core_dev *dev)
{
struct mlx5_cmd *cmd = &dev->cmd;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 9c7194b26ee2..80f6d127257a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -154,10 +154,10 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
return 0;
stats = filp->private_data;
- spin_lock(&stats->lock);
+ spin_lock_irq(&stats->lock);
if (stats->n)
field = div64_u64(stats->sum, stats->n);
- spin_unlock(&stats->lock);
+ spin_unlock_irq(&stats->lock);
ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
if (ret > 0) {
if (copy_to_user(buf, tbuf, ret))
@@ -175,10 +175,10 @@ static ssize_t average_write(struct file *filp, const char __user *buf,
struct mlx5_cmd_stats *stats;
stats = filp->private_data;
- spin_lock(&stats->lock);
+ spin_lock_irq(&stats->lock);
stats->sum = 0;
stats->n = 0;
- spin_unlock(&stats->lock);
+ spin_unlock_irq(&stats->lock);
*pos += count;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 2231d93cc7ad..64a61b286b2c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -354,7 +354,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ);
in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index);
in->ctx.intr = vecidx;
- in->ctx.log_page_size = PAGE_SHIFT - 12;
+ in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
in->events_mask = cpu_to_be64(mask);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index bc0f5fb66e24..40a9f5ed814d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -159,6 +159,36 @@ struct mlx5_reg_host_endianess {
u8 rsvd[15];
};
+
+#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
+
+enum {
+ MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
+ CAP_MASK(MLX5_CAP_OFF_DCT, 1),
+};
+
+/* selectively copy writable fields clearing any reserved area
+ */
+static void copy_rw_fields(struct mlx5_hca_cap *to, struct mlx5_hca_cap *from)
+{
+ u64 v64;
+
+ to->log_max_qp = from->log_max_qp & 0x1f;
+ to->log_max_ra_req_dc = from->log_max_ra_req_dc & 0x3f;
+ to->log_max_ra_res_dc = from->log_max_ra_res_dc & 0x3f;
+ to->log_max_ra_req_qp = from->log_max_ra_req_qp & 0x3f;
+ to->log_max_ra_res_qp = from->log_max_ra_res_qp & 0x3f;
+ to->log_max_atomic_size_qp = from->log_max_atomic_size_qp;
+ to->log_max_atomic_size_dc = from->log_max_atomic_size_dc;
+ v64 = be64_to_cpu(from->flags) & MLX5_CAP_BITS_RW_MASK;
+ to->flags = cpu_to_be64(v64);
+}
+
+enum {
+ HCA_CAP_OPMOD_GET_MAX = 0,
+ HCA_CAP_OPMOD_GET_CUR = 1,
+};
+
static int handle_hca_cap(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_query_hca_cap_mbox_out *query_out = NULL;
@@ -180,7 +210,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
}
query_ctx.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP);
- query_ctx.hdr.opmod = cpu_to_be16(0x1);
+ query_ctx.hdr.opmod = cpu_to_be16(HCA_CAP_OPMOD_GET_CUR);
err = mlx5_cmd_exec(dev, &query_ctx, sizeof(query_ctx),
query_out, sizeof(*query_out));
if (err)
@@ -192,8 +222,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
goto query_ex;
}
- memcpy(&set_ctx->hca_cap, &query_out->hca_cap,
- sizeof(set_ctx->hca_cap));
+ copy_rw_fields(&set_ctx->hca_cap, &query_out->hca_cap);
if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 5b44e2e46daf..35e514dc7b7d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -37,31 +37,41 @@
#include "mlx5_core.h"
int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
- struct mlx5_create_mkey_mbox_in *in, int inlen)
+ struct mlx5_create_mkey_mbox_in *in, int inlen,
+ mlx5_cmd_cbk_t callback, void *context,
+ struct mlx5_create_mkey_mbox_out *out)
{
- struct mlx5_create_mkey_mbox_out out;
+ struct mlx5_create_mkey_mbox_out lout;
int err;
u8 key;
- memset(&out, 0, sizeof(out));
- spin_lock(&dev->priv.mkey_lock);
+ memset(&lout, 0, sizeof(lout));
+ spin_lock_irq(&dev->priv.mkey_lock);
key = dev->priv.mkey_key++;
- spin_unlock(&dev->priv.mkey_lock);
+ spin_unlock_irq(&dev->priv.mkey_lock);
in->seg.qpn_mkey7_0 |= cpu_to_be32(key);
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY);
- err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
+ if (callback) {
+ err = mlx5_cmd_exec_cb(dev, in, inlen, out, sizeof(*out),
+ callback, context);
+ return err;
+ } else {
+ err = mlx5_cmd_exec(dev, in, inlen, &lout, sizeof(lout));
+ }
+
if (err) {
mlx5_core_dbg(dev, "cmd exec faile %d\n", err);
return err;
}
- if (out.hdr.status) {
- mlx5_core_dbg(dev, "status %d\n", out.hdr.status);
- return mlx5_cmd_status_to_err(&out.hdr);
+ if (lout.hdr.status) {
+ mlx5_core_dbg(dev, "status %d\n", lout.hdr.status);
+ return mlx5_cmd_status_to_err(&lout.hdr);
}
- mr->key = mlx5_idx_to_mkey(be32_to_cpu(out.mkey) & 0xffffff) | key;
- mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", be32_to_cpu(out.mkey), key, mr->key);
+ mr->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key;
+ mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
+ be32_to_cpu(lout.mkey), key, mr->key);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 7b12acf210f8..ba816c25c5c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -57,10 +57,13 @@ struct mlx5_pages_req {
};
struct fw_page {
- struct rb_node rb_node;
- u64 addr;
- struct page *page;
- u16 func_id;
+ struct rb_node rb_node;
+ u64 addr;
+ struct page *page;
+ u16 func_id;
+ unsigned long bitmask;
+ struct list_head list;
+ unsigned free_count;
};
struct mlx5_query_pages_inbox {
@@ -94,6 +97,11 @@ enum {
MAX_RECLAIM_TIME_MSECS = 5000,
};
+enum {
+ MLX5_MAX_RECLAIM_TIME_MILI = 5000,
+ MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / 4096,
+};
+
static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
{
struct rb_root *root = &dev->priv.page_root;
@@ -101,6 +109,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
struct rb_node *parent = NULL;
struct fw_page *nfp;
struct fw_page *tfp;
+ int i;
while (*new) {
parent = *new;
@@ -113,25 +122,29 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
return -EEXIST;
}
- nfp = kmalloc(sizeof(*nfp), GFP_KERNEL);
+ nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
if (!nfp)
return -ENOMEM;
nfp->addr = addr;
nfp->page = page;
nfp->func_id = func_id;
+ nfp->free_count = MLX5_NUM_4K_IN_PAGE;
+ for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
+ set_bit(i, &nfp->bitmask);
rb_link_node(&nfp->rb_node, parent, new);
rb_insert_color(&nfp->rb_node, root);
+ list_add(&nfp->list, &dev->priv.free_list);
return 0;
}
-static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
+static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
{
struct rb_root *root = &dev->priv.page_root;
struct rb_node *tmp = root->rb_node;
- struct page *result = NULL;
+ struct fw_page *result = NULL;
struct fw_page *tfp;
while (tmp) {
@@ -141,9 +154,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
} else if (tfp->addr > addr) {
tmp = tmp->rb_right;
} else {
- rb_erase(&tfp->rb_node, root);
- result = tfp->page;
- kfree(tfp);
+ result = tfp;
break;
}
}
@@ -176,12 +187,97 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
return err;
}
+static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
+{
+ struct fw_page *fp;
+ unsigned n;
+
+ if (list_empty(&dev->priv.free_list)) {
+ return -ENOMEM;
+ mlx5_core_warn(dev, "\n");
+ }
+
+ fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
+ n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
+ if (n >= MLX5_NUM_4K_IN_PAGE) {
+ mlx5_core_warn(dev, "alloc 4k bug\n");
+ return -ENOENT;
+ }
+ clear_bit(n, &fp->bitmask);
+ fp->free_count--;
+ if (!fp->free_count)
+ list_del(&fp->list);
+
+ *addr = fp->addr + n * 4096;
+
+ return 0;
+}
+
+static void free_4k(struct mlx5_core_dev *dev, u64 addr)
+{
+ struct fw_page *fwp;
+ int n;
+
+ fwp = find_fw_page(dev, addr & PAGE_MASK);
+ if (!fwp) {
+ mlx5_core_warn(dev, "page not found\n");
+ return;
+ }
+
+ n = (addr & ~PAGE_MASK) % 4096;
+ fwp->free_count++;
+ set_bit(n, &fwp->bitmask);
+ if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
+ rb_erase(&fwp->rb_node, &dev->priv.page_root);
+ list_del(&fwp->list);
+ dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(fwp->page);
+ kfree(fwp);
+ } else if (fwp->free_count == 1) {
+ list_add(&fwp->list, &dev->priv.free_list);
+ }
+}
+
+static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
+{
+ struct page *page;
+ u64 addr;
+ int err;
+
+ page = alloc_page(GFP_HIGHUSER);
+ if (!page) {
+ mlx5_core_warn(dev, "failed to allocate page\n");
+ return -ENOMEM;
+ }
+ addr = dma_map_page(&dev->pdev->dev, page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&dev->pdev->dev, addr)) {
+ mlx5_core_warn(dev, "failed dma mapping page\n");
+ err = -ENOMEM;
+ goto out_alloc;
+ }
+ err = insert_page(dev, addr, page, func_id);
+ if (err) {
+ mlx5_core_err(dev, "failed to track allocated page\n");
+ goto out_mapping;
+ }
+
+ return 0;
+
+out_mapping:
+ dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+out_alloc:
+ __free_page(page);
+
+ return err;
+}
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int notify_fail)
{
struct mlx5_manage_pages_inbox *in;
struct mlx5_manage_pages_outbox out;
- struct page *page;
+ struct mlx5_manage_pages_inbox *nin;
int inlen;
u64 addr;
int err;
@@ -196,27 +292,15 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
memset(&out, 0, sizeof(out));
for (i = 0; i < npages; i++) {
- page = alloc_page(GFP_HIGHUSER);
- if (!page) {
- err = -ENOMEM;
- mlx5_core_warn(dev, "failed to allocate page\n");
- goto out_alloc;
- }
- addr = dma_map_page(&dev->pdev->dev, page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&dev->pdev->dev, addr)) {
- mlx5_core_warn(dev, "failed dma mapping page\n");
- __free_page(page);
- err = -ENOMEM;
- goto out_alloc;
- }
- err = insert_page(dev, addr, page, func_id);
+retry:
+ err = alloc_4k(dev, &addr);
if (err) {
- mlx5_core_err(dev, "failed to track allocated page\n");
- dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
- __free_page(page);
- err = -ENOMEM;
- goto out_alloc;
+ if (err == -ENOMEM)
+ err = alloc_system_page(dev, func_id);
+ if (err)
+ goto out_4k;
+
+ goto retry;
}
in->pas[i] = cpu_to_be64(addr);
}
@@ -226,7 +310,6 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
in->func_id = cpu_to_be16(func_id);
in->num_entries = cpu_to_be32(npages);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
- mlx5_core_dbg(dev, "err %d\n", err);
if (err) {
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err);
goto out_alloc;
@@ -247,25 +330,22 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
out_alloc:
if (notify_fail) {
- memset(in, 0, inlen);
- memset(&out, 0, sizeof(out));
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
- in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
- if (mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)))
- mlx5_core_warn(dev, "\n");
- }
- for (i--; i >= 0; i--) {
- addr = be64_to_cpu(in->pas[i]);
- page = remove_page(dev, addr);
- if (!page) {
- mlx5_core_err(dev, "BUG: can't remove page at addr 0x%llx\n",
- addr);
- continue;
+ nin = kzalloc(sizeof(*nin), GFP_KERNEL);
+ if (!nin) {
+ mlx5_core_warn(dev, "allocation failed\n");
+ goto out_4k;
}
- dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
- __free_page(page);
+ memset(&out, 0, sizeof(out));
+ nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
+ nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
+ if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out)))
+ mlx5_core_warn(dev, "page notify failed\n");
+ kfree(nin);
}
+out_4k:
+ for (i--; i >= 0; i--)
+ free_4k(dev, be64_to_cpu(in->pas[i]));
out_free:
mlx5_vfree(in);
return err;
@@ -276,7 +356,6 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
{
struct mlx5_manage_pages_inbox in;
struct mlx5_manage_pages_outbox *out;
- struct page *page;
int num_claimed;
int outlen;
u64 addr;
@@ -315,13 +394,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
for (i = 0; i < num_claimed; i++) {
addr = be64_to_cpu(out->pas[i]);
- page = remove_page(dev, addr);
- if (!page) {
- mlx5_core_warn(dev, "FW reported unknown DMA address 0x%llx\n", addr);
- } else {
- dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
- __free_page(page);
- }
+ free_4k(dev, addr);
}
out_free:
@@ -381,14 +454,19 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
return give_pages(dev, func_id, npages, 0);
}
+enum {
+ MLX5_BLKS_FOR_RECLAIM_PAGES = 12
+};
+
static int optimal_reclaimed_pages(void)
{
struct mlx5_cmd_prot_block *block;
struct mlx5_cmd_layout *lay;
int ret;
- ret = (sizeof(lay->in) + sizeof(block->data) -
- sizeof(struct mlx5_manage_pages_outbox)) / 8;
+ ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
+ sizeof(struct mlx5_manage_pages_outbox)) /
+ FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]);
return ret;
}
@@ -427,6 +505,7 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
{
dev->priv.page_root = RB_ROOT;
+ INIT_LIST_HEAD(&dev->priv.free_list);
}
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 075f4e21d33d..c83d16dc7cd5 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1248,7 +1248,7 @@ static void ks_set_mac(struct ks_net *ks, u8 *data)
w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
ks_wrreg16(ks, KS_MARL, w);
- memcpy(ks->mac_addr, data, 6);
+ memcpy(ks->mac_addr, data, ETH_ALEN);
if (ks->enabled)
ks_start_rx(ks);
@@ -1651,7 +1651,7 @@ static int ks8851_probe(struct platform_device *pdev)
}
netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
- memcpy(netdev->dev_addr, ks->mac_addr, 6);
+ memcpy(netdev->dev_addr, ks->mac_addr, ETH_ALEN);
ks_set_mac(ks, netdev->dev_addr);
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 8ebc352bcbe6..ddd252a3da9c 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -7150,8 +7150,6 @@ static void pcidev_exit(struct pci_dev *pdev)
struct platform_info *info = pci_get_drvdata(pdev);
struct dev_info *hw_priv = &info->dev_info;
- pci_set_drvdata(pdev, NULL);
-
release_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
for (i = 0; i < hw_priv->hw.dev_count; i++) {
@@ -7227,7 +7225,7 @@ static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
static char pcidev_name[] = "ksz884xp";
-static struct pci_device_id pcidev_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(pcidev_table) = {
{ PCI_VENDOR_ID_MICREL_KS, 0x8841,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_MICREL_KS, 0x8842,
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index bd1a2d2bc2ae..cbd013379252 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -26,7 +26,6 @@
#include <linux/of_irq.h>
#include <linux/crc32.h>
#include <linux/crc32c.h>
-#include <linux/dma-mapping.h>
#include "moxart_ether.h"
@@ -448,7 +447,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
irq = irq_of_parse_and_map(node, 0);
if (irq <= 0) {
netdev_err(ndev, "irq_of_parse_and_map failed\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto irq_map_fail;
}
priv = netdev_priv(ndev);
@@ -472,24 +472,32 @@ static int moxart_mac_probe(struct platform_device *pdev)
priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
TX_DESC_NUM, &priv->tx_base,
GFP_DMA | GFP_KERNEL);
- if (priv->tx_desc_base == NULL)
+ if (priv->tx_desc_base == NULL) {
+ ret = -ENOMEM;
goto init_fail;
+ }
priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
RX_DESC_NUM, &priv->rx_base,
GFP_DMA | GFP_KERNEL);
- if (priv->rx_desc_base == NULL)
+ if (priv->rx_desc_base == NULL) {
+ ret = -ENOMEM;
goto init_fail;
+ }
priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM,
GFP_ATOMIC);
- if (!priv->tx_buf_base)
+ if (!priv->tx_buf_base) {
+ ret = -ENOMEM;
goto init_fail;
+ }
priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM,
GFP_ATOMIC);
- if (!priv->rx_buf_base)
+ if (!priv->rx_buf_base) {
+ ret = -ENOMEM;
goto init_fail;
+ }
platform_set_drvdata(pdev, ndev);
@@ -522,7 +530,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
init_fail:
netdev_err(ndev, "init failed\n");
moxart_mac_free_memory(ndev);
-
+irq_map_fail:
+ free_netdev(ndev);
return ret;
}
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 149355b52ad0..68026f7e8ba3 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -934,7 +934,7 @@ static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
{
- int rc = true;
+ bool rc = true;
spin_lock(&ss->lock);
if ((ss->state & SLICE_LOCKED)) {
WARN_ON((ss->state & SLICE_STATE_NAPI));
@@ -957,7 +957,7 @@ static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
{
- int rc = true;
+ bool rc = true;
spin_lock_bh(&ss->lock);
if ((ss->state & SLICE_LOCKED)) {
ss->state |= SLICE_STATE_POLL_YIELD;
@@ -3164,7 +3164,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
/* Walk the multicast list, and add each address */
netdev_for_each_mc_addr(ha, dev) {
- memcpy(data, &ha->addr, 6);
+ memcpy(data, &ha->addr, ETH_ALEN);
cmd.data0 = ntohl(data[0]);
cmd.data1 = ntohl(data[1]);
err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
@@ -3207,7 +3207,7 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
}
/* change the dev structure */
- memcpy(dev->dev_addr, sa->sa_data, 6);
+ memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
return 0;
}
@@ -4208,7 +4208,6 @@ static void myri10ge_remove(struct pci_dev *pdev)
set_fw_name(mgp, NULL, false);
free_netdev(netdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 7a5e295588b0..64ec2a437f46 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -970,7 +970,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
err_ioremap:
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
err_pci_request_regions:
free_netdev(dev);
@@ -3220,7 +3219,6 @@ static void natsemi_remove1(struct pci_dev *pdev)
pci_release_regions (pdev);
iounmap(ioaddr);
free_netdev (dev);
- pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 51b00941302c..9eeddbd0b2c7 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -8185,7 +8185,6 @@ mem_alloc_failed:
free_shared_mem(sp);
pci_disable_device(pdev);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
return ret;
@@ -8221,7 +8220,6 @@ static void s2io_rem_nic(struct pci_dev *pdev)
iounmap(sp->bar0);
iounmap(sp->bar1);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 5a20eaf903dd..8614eeb7de81 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -4739,7 +4739,6 @@ _exit6:
_exit5:
vxge_device_unregister(hldev);
_exit4:
- pci_set_drvdata(pdev, NULL);
vxge_hw_device_terminate(hldev);
pci_disable_sriov(pdev);
_exit3:
@@ -4782,7 +4781,6 @@ static void vxge_remove(struct pci_dev *pdev)
vxge_free_mac_add_list(&vdev->vpaths[i]);
vxge_device_unregister(hldev);
- pci_set_drvdata(pdev, NULL);
/* Do not call pci_disable_sriov here, as it will break child devices */
vxge_hw_device_terminate(hldev);
iounmap(vdev->bar0);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index a061b93efe66..ba3ca18611f7 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1399,8 +1399,10 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
}
if (pldat->dma_buff_base_v == 0) {
- pldat->pdev->dev.coherent_dma_mask = 0xFFFFFFFF;
- pldat->pdev->dev.dma_mask = &pldat->pdev->dev.coherent_dma_mask;
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto err_out_free_irq;
+
pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
/* Allocate a chunk of memory for the DMA ethernet buffers
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 622aa75904c4..7dc3e9b06d75 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -1545,15 +1545,16 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
mac = of_get_mac_address(pdev->dev.of_node);
- if (mac && is_valid_ether_addr(mac))
+ if (mac)
memcpy(netdev->dev_addr, mac, ETH_ALEN);
else
eth_hw_addr_random(netdev);
p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (result)
+ goto err;
netif_carrier_off(netdev);
result = register_netdev(netdev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 6797b1075874..2a9003071d51 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -653,38 +653,38 @@ struct pch_gbe_adapter {
extern const char pch_driver_version[];
/* pch_gbe_main.c */
-extern int pch_gbe_up(struct pch_gbe_adapter *adapter);
-extern void pch_gbe_down(struct pch_gbe_adapter *adapter);
-extern void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter);
-extern void pch_gbe_reset(struct pch_gbe_adapter *adapter);
-extern int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
- struct pch_gbe_tx_ring *txdr);
-extern int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
- struct pch_gbe_rx_ring *rxdr);
-extern void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
- struct pch_gbe_tx_ring *tx_ring);
-extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
- struct pch_gbe_rx_ring *rx_ring);
-extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
-extern u32 pch_ch_control_read(struct pci_dev *pdev);
-extern void pch_ch_control_write(struct pci_dev *pdev, u32 val);
-extern u32 pch_ch_event_read(struct pci_dev *pdev);
-extern void pch_ch_event_write(struct pci_dev *pdev, u32 val);
-extern u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
-extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
-extern u64 pch_rx_snap_read(struct pci_dev *pdev);
-extern u64 pch_tx_snap_read(struct pci_dev *pdev);
-extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
+int pch_gbe_up(struct pch_gbe_adapter *adapter);
+void pch_gbe_down(struct pch_gbe_adapter *adapter);
+void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter);
+void pch_gbe_reset(struct pch_gbe_adapter *adapter);
+int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *txdr);
+int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rxdr);
+void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring);
+void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring);
+void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
+u32 pch_ch_control_read(struct pci_dev *pdev);
+void pch_ch_control_write(struct pci_dev *pdev, u32 val);
+u32 pch_ch_event_read(struct pci_dev *pdev);
+void pch_ch_event_write(struct pci_dev *pdev, u32 val);
+u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
+u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
+u64 pch_rx_snap_read(struct pci_dev *pdev);
+u64 pch_tx_snap_read(struct pci_dev *pdev);
+int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
/* pch_gbe_param.c */
-extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
+void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
/* pch_gbe_ethtool.c */
-extern void pch_gbe_set_ethtool_ops(struct net_device *netdev);
+void pch_gbe_set_ethtool_ops(struct net_device *netdev);
/* pch_gbe_mac.c */
-extern s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
-extern s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
-extern u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw,
- u32 addr, u32 dir, u32 reg, u16 data);
+s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
+s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
+u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
+ u16 data);
#endif /* _PCH_GBE_H_ */
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index cac33e5f9bc2..b6bdeb3c1971 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1910,7 +1910,6 @@ static void hamachi_remove_one(struct pci_dev *pdev)
iounmap(hmp->base);
free_netdev(dev);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index d28593b1fc3e..07a890eb72ad 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -513,7 +513,6 @@ err_out_unmap_rx:
err_out_unmap_tx:
pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
err_out_cleardev:
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
@@ -1392,7 +1391,6 @@ static void yellowfin_remove_one(struct pci_dev *pdev)
pci_release_regions (pdev);
free_netdev (dev);
- pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 5b65356e7568..dbaa49e58b0c 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1870,7 +1870,6 @@ static void pasemi_mac_remove(struct pci_dev *pdev)
pasemi_dma_free_chan(&mac->tx->chan);
pasemi_dma_free_chan(&mac->rx->chan);
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 32675e16021e..9adcdbb49476 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 81
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.81"
+#define _NETXEN_NIC_LINUX_SUBVERSION 82
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.82"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
@@ -1883,9 +1883,8 @@ static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac);
int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac);
-extern void netxen_change_ringparam(struct netxen_adapter *adapter);
-extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
- int *valp);
+void netxen_change_ringparam(struct netxen_adapter *adapter);
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
extern const struct ethtool_ops netxen_nic_ethtool_ops;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
index 32c790659f9c..0c64c82b9acf 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
@@ -958,6 +958,7 @@ enum {
#define NETXEN_PEG_HALT_STATUS2 (NETXEN_CAM_RAM(0xac))
#define NX_CRB_DEV_REF_COUNT (NETXEN_CAM_RAM(0x138))
#define NX_CRB_DEV_STATE (NETXEN_CAM_RAM(0x140))
+#define NETXEN_ULA_KEY (NETXEN_CAM_RAM(0x178))
/* MiniDIMM related macros */
#define NETXEN_DIMM_CAPABILITY (NETXEN_CAM_RAM(0x258))
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 8375cbde9969..67efe754367d 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -648,7 +648,7 @@ nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op)
mac_req = (nx_mac_req_t *)&req.words[0];
mac_req->op = op;
- memcpy(mac_req->mac_addr, addr, 6);
+ memcpy(mac_req->mac_addr, addr, ETH_ALEN);
return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index cbd75f97ffb3..3bec8cfebf99 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1415,6 +1415,32 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
return 0;
}
+#define NETXEN_ULA_ADAPTER_KEY (0xdaddad01)
+#define NETXEN_NON_ULA_ADAPTER_KEY (0xdaddad00)
+
+static void netxen_read_ula_info(struct netxen_adapter *adapter)
+{
+ u32 temp;
+
+ /* Print ULA info only once for an adapter */
+ if (adapter->portnum != 0)
+ return;
+
+ temp = NXRD32(adapter, NETXEN_ULA_KEY);
+ switch (temp) {
+ case NETXEN_ULA_ADAPTER_KEY:
+ dev_info(&adapter->pdev->dev, "ULA adapter");
+ break;
+ case NETXEN_NON_ULA_ADAPTER_KEY:
+ dev_info(&adapter->pdev->dev, "non ULA adapter");
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
#ifdef CONFIG_PCIEAER
static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
{
@@ -1561,6 +1587,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_disable_msi;
}
+ netxen_read_ula_info(adapter);
+
err = netxen_setup_netdev(adapter, netdev);
if (err)
goto err_out_disable_msi;
@@ -1602,7 +1630,6 @@ err_out_free_res:
pci_release_regions(pdev);
err_out_disable_pdev:
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return err;
}
@@ -1661,7 +1688,6 @@ static void netxen_nic_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 91a8fcd6c246..0758b9435358 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3916,7 +3916,6 @@ err_out_free_regions:
pci_release_regions(pdev);
err_out_disable_pdev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
err_out:
return err;
}
@@ -3939,7 +3938,6 @@ static void ql3xxx_remove(struct pci_dev *pdev)
iounmap(qdev->mem_map_registers);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(ndev);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 81bf83604c4f..631ea0ac1cd8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -38,8 +38,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 50
-#define QLCNIC_LINUX_VERSIONID "5.3.50"
+#define _QLCNIC_LINUX_SUBVERSION 52
+#define QLCNIC_LINUX_VERSIONID "5.3.52"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -98,8 +98,22 @@
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+ MGMT_CMD_DESC_RESV)
#define QLCNIC_MAX_TX_TIMEOUTS 2
-#define QLCNIC_MAX_TX_RINGS 8
-#define QLCNIC_MAX_SDS_RINGS 8
+
+/* Driver will use 1 Tx ring in INT-x/MSI/SRIOV mode. */
+#define QLCNIC_SINGLE_RING 1
+#define QLCNIC_DEF_SDS_RINGS 4
+#define QLCNIC_DEF_TX_RINGS 4
+#define QLCNIC_MAX_VNIC_TX_RINGS 4
+#define QLCNIC_MAX_VNIC_SDS_RINGS 4
+
+enum qlcnic_queue_type {
+ QLCNIC_TX_QUEUE = 1,
+ QLCNIC_RX_QUEUE,
+};
+
+/* Operational mode for driver */
+#define QLCNIC_VNIC_MODE 0xFF
+#define QLCNIC_DEFAULT_MODE 0x0
/*
* Following are the states of the Phantom. Phantom will set them and
@@ -533,6 +547,14 @@ struct qlcnic_host_sds_ring {
char name[IFNAMSIZ + 12];
} ____cacheline_internodealigned_in_smp;
+struct qlcnic_tx_queue_stats {
+ u64 xmit_on;
+ u64 xmit_off;
+ u64 xmit_called;
+ u64 xmit_finished;
+ u64 tx_bytes;
+};
+
struct qlcnic_host_tx_ring {
int irq;
void __iomem *crb_intr_mask;
@@ -544,10 +566,7 @@ struct qlcnic_host_tx_ring {
u32 sw_consumer;
u32 num_desc;
- u64 xmit_on;
- u64 xmit_off;
- u64 xmit_called;
- u64 xmit_finished;
+ struct qlcnic_tx_queue_stats tx_stats;
void __iomem *crb_cmd_producer;
struct cmd_desc_type0 *desc_head;
@@ -940,8 +959,6 @@ struct qlcnic_ipaddr {
#define QLCNIC_BEACON_EANBLE 0xC
#define QLCNIC_BEACON_DISABLE 0xD
-#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4
-#define QLCNIC_DEF_NUM_TX_RINGS 4
#define QLCNIC_MSIX_TBL_SPACE 8192
#define QLCNIC_PCI_REG_MSIX_TBL 0x44
#define QLCNIC_MSIX_TBL_PGSIZE 4096
@@ -961,8 +978,7 @@ struct qlcnic_ipaddr {
#define __QLCNIC_SRIOV_CAPABLE 11
#define __QLCNIC_MBX_POLL_ENABLE 12
#define __QLCNIC_DIAG_MODE 13
-#define __QLCNIC_DCB_STATE 14
-#define __QLCNIC_DCB_IN_AEN 15
+#define __QLCNIC_MAINTENANCE_MODE 16
#define QLCNIC_INTERRUPT_TEST 1
#define QLCNIC_LOOPBACK_TEST 2
@@ -1013,7 +1029,6 @@ struct qlcnic_adapter {
unsigned long state;
u32 flags;
- int max_drv_tx_rings;
u16 num_txd;
u16 num_rxd;
u16 num_jumbo_rxd;
@@ -1021,7 +1036,13 @@ struct qlcnic_adapter {
u16 max_jumbo_rxd;
u8 max_rds_rings;
- u8 max_sds_rings;
+
+ u8 max_sds_rings; /* max sds rings supported by adapter */
+ u8 max_tx_rings; /* max tx rings supported by adapter */
+
+ u8 drv_tx_rings; /* max tx rings supported by driver */
+ u8 drv_sds_rings; /* max sds rings supported by driver */
+
u8 rx_csum;
u8 portnum;
@@ -1199,6 +1220,7 @@ struct qlcnic_npar_info {
u8 promisc_mode;
u8 offload_flags;
u8 pci_func;
+ u8 mac[ETH_ALEN];
};
struct qlcnic_eswitch {
@@ -1543,12 +1565,13 @@ int qlcnic_loopback_test(struct net_device *, u8);
/* Functions from qlcnic_main.c */
int qlcnic_reset_context(struct qlcnic_adapter *);
-void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
-int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
-netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, int);
-int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32);
-int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *, u32 txq);
+void qlcnic_diag_free_res(struct net_device *netdev, int);
+int qlcnic_diag_alloc_res(struct net_device *netdev, int);
+netdev_tx_t qlcnic_xmit_frame(struct sk_buff *, struct net_device *);
+void qlcnic_set_tx_ring_count(struct qlcnic_adapter *, u8);
+void qlcnic_set_sds_ring_count(struct qlcnic_adapter *, u8);
+int qlcnic_setup_rings(struct qlcnic_adapter *, u8, u8);
+int qlcnic_validate_rings(struct qlcnic_adapter *, __u32, int);
void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
@@ -1641,19 +1664,18 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
struct net_device *netdev)
{
- int err, tx_q;
-
- tx_q = adapter->max_drv_tx_rings;
+ int err;
- netdev->num_tx_queues = tx_q;
- netdev->real_num_tx_queues = tx_q;
+ netdev->num_tx_queues = adapter->drv_tx_rings;
+ netdev->real_num_tx_queues = adapter->drv_tx_rings;
- err = netif_set_real_num_tx_queues(netdev, tx_q);
+ err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
if (err)
dev_err(&adapter->pdev->dev, "failed to set %d Tx queues\n",
- tx_q);
+ adapter->drv_tx_rings);
else
- dev_info(&adapter->pdev->dev, "set %d Tx queues\n", tx_q);
+ dev_info(&adapter->pdev->dev, "Set %d Tx queues\n",
+ adapter->drv_tx_rings);
return err;
}
@@ -1695,7 +1717,7 @@ struct qlcnic_hardware_ops {
int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
void (*get_ocm_win) (struct qlcnic_hardware_context *);
int (*get_mac_address) (struct qlcnic_adapter *, u8 *, u8);
- int (*setup_intr) (struct qlcnic_adapter *, u8, int);
+ int (*setup_intr) (struct qlcnic_adapter *);
int (*alloc_mbx_args)(struct qlcnic_cmd_args *,
struct qlcnic_adapter *, u32);
int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
@@ -1766,10 +1788,9 @@ static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter,
return adapter->ahw->hw_ops->get_mac_address(adapter, mac, function);
}
-static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter,
- u8 num_intr, int txq)
+static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter)
{
- return adapter->ahw->hw_ops->setup_intr(adapter, num_intr, txq);
+ return adapter->ahw->hw_ops->setup_intr(adapter);
}
static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
@@ -2005,7 +2026,7 @@ static inline bool qlcnic_check_multi_tx(struct qlcnic_adapter *adapter)
static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
{
test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
- adapter->max_drv_tx_rings = 1;
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
}
/* When operating in a muti tx mode, driver needs to write 0x1
@@ -2115,98 +2136,4 @@ static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
return status;
}
-
-static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->get_hw_capability)
- return dcb->ops->get_hw_capability(adapter);
-
- return 0;
-}
-
-static inline void qlcnic_dcb_free(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->free)
- dcb->ops->free(adapter);
-}
-
-static inline int qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->attach)
- return dcb->ops->attach(adapter);
-
- return 0;
-}
-
-static inline int
-qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter, char *buf)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->query_hw_capability)
- return dcb->ops->query_hw_capability(adapter, buf);
-
- return 0;
-}
-
-static inline void qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->get_info)
- dcb->ops->get_info(adapter);
-}
-
-static inline int
-qlcnic_dcb_query_cee_param(struct qlcnic_adapter *adapter, char *buf, u8 type)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->query_cee_param)
- return dcb->ops->query_cee_param(adapter, buf, type);
-
- return 0;
-}
-
-static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->get_cee_cfg)
- return dcb->ops->get_cee_cfg(adapter);
-
- return 0;
-}
-
-static inline void
-qlcnic_dcb_register_aen(struct qlcnic_adapter *adapter, u8 flag)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->register_aen)
- dcb->ops->register_aen(adapter, flag);
-}
-
-static inline void qlcnic_dcb_handle_aen(struct qlcnic_adapter *adapter,
- void *msg)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->handle_aen)
- dcb->ops->handle_aen(adapter, msg);
-}
-
-static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->init_dcbnl_ops)
- dcb->ops->init_dcbnl_ops(adapter);
-}
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 3ca00e05f23d..3e7b6177cd24 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -13,7 +13,6 @@
#include <linux/interrupt.h>
#include <linux/aer.h>
-#define QLCNIC_MAX_TX_QUEUES 1
#define RSS_HASHTYPE_IP_TCP 0x3
#define QLC_83XX_FW_MBX_CMD 0
@@ -268,20 +267,18 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
}
}
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
{
int err, i, num_msix;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- if (!num_intr)
- num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
- num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
- num_intr));
+ num_msix = adapter->drv_sds_rings;
+
/* account for AEN interrupt MSI-X based interrupts */
num_msix += 1;
if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
- num_msix += adapter->max_drv_tx_rings;
+ num_msix += adapter->drv_tx_rings;
err = qlcnic_enable_msix(adapter, num_msix);
if (err == -ENOMEM)
@@ -325,7 +322,8 @@ inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
{
- writel(1, adapter->tgt_mask_reg);
+ if (adapter->tgt_mask_reg)
+ writel(1, adapter->tgt_mask_reg);
}
/* Enable MSI-x and INT-x interrupts */
@@ -498,8 +496,11 @@ void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter)
num_msix = 0;
msleep(20);
- synchronize_irq(adapter->msix_entries[num_msix].vector);
- free_irq(adapter->msix_entries[num_msix].vector, adapter);
+
+ if (adapter->msix_entries) {
+ synchronize_irq(adapter->msix_entries[num_msix].vector);
+ free_irq(adapter->msix_entries[num_msix].vector, adapter);
+ }
}
int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
@@ -760,6 +761,9 @@ int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter,
int cmd_type, err, opcode;
unsigned long timeout;
+ if (!mbx)
+ return -EIO;
+
opcode = LSW(cmd->req.arg[0]);
cmd_type = cmd->type;
err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout);
@@ -902,7 +906,7 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
QLCNIC_MBX_RSP(event[0]));
break;
case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT:
- qlcnic_dcb_handle_aen(adapter, (void *)&event[1]);
+ qlcnic_dcb_aen_handler(adapter->dcb, (void *)&event[1]);
break;
default:
dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n",
@@ -979,14 +983,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
context_id = recv_ctx->context_id;
- num_sds = (adapter->max_sds_rings - QLCNIC_MAX_RING_SETS);
+ num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
QLCNIC_CMD_ADD_RCV_RINGS);
cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
/* set up status rings, mbx 2-81 */
index = 2;
- for (i = 8; i < adapter->max_sds_rings; i++) {
+ for (i = 8; i < adapter->drv_sds_rings; i++) {
memset(&sds_mbx, 0, sds_mbx_size);
sds = &recv_ctx->sds_rings[i];
sds->consumer = 0;
@@ -1021,7 +1025,7 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
mbx_out = (struct qlcnic_add_rings_mbx_out *)&cmd.rsp.arg[1];
index = 0;
/* status descriptor ring */
- for (i = 8; i < adapter->max_sds_rings; i++) {
+ for (i = 8; i < adapter->drv_sds_rings; i++) {
sds = &recv_ctx->sds_rings[i];
sds->crb_sts_consumer = ahw->pci_base0 +
mbx_out->host_csmr[index];
@@ -1079,10 +1083,10 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
struct qlcnic_hardware_context *ahw = adapter->ahw;
num_rds = adapter->max_rds_rings;
- if (adapter->max_sds_rings <= QLCNIC_MAX_RING_SETS)
- num_sds = adapter->max_sds_rings;
+ if (adapter->drv_sds_rings <= QLCNIC_MAX_SDS_RINGS)
+ num_sds = adapter->drv_sds_rings;
else
- num_sds = QLCNIC_MAX_RING_SETS;
+ num_sds = QLCNIC_MAX_SDS_RINGS;
sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
rds_mbx_size = sizeof(struct qlcnic_rds_mbx);
@@ -1183,7 +1187,7 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
sds->crb_intr_mask = ahw->pci_base0 + intr_mask;
}
- if (adapter->max_sds_rings > QLCNIC_MAX_RING_SETS)
+ if (adapter->drv_sds_rings > QLCNIC_MAX_SDS_RINGS)
err = qlcnic_83xx_add_rings(adapter);
out:
qlcnic_free_mbx_args(&cmd);
@@ -1239,9 +1243,9 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
mbx.size = tx->num_desc;
if (adapter->flags & QLCNIC_MSIX_ENABLED) {
if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
- msix_vector = adapter->max_sds_rings + ring;
+ msix_vector = adapter->drv_sds_rings + ring;
else
- msix_vector = adapter->max_sds_rings - 1;
+ msix_vector = adapter->drv_sds_rings - 1;
msix_id = ahw->intr_tbl[msix_vector].id;
} else {
msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
@@ -1264,7 +1268,8 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
qlcnic_pf_set_interface_id_create_tx_ctx(adapter, &temp);
cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT;
- cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES | temp;
+ cmd.req.arg[5] = QLCNIC_SINGLE_RING | temp;
+
buf = &cmd.req.arg[6];
memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx));
/* send the mailbox command*/
@@ -1279,7 +1284,7 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
tx->ctx_id = mbx_out->ctx_id;
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
- intr_mask = ahw->intr_tbl[adapter->max_sds_rings + ring].src;
+ intr_mask = ahw->intr_tbl[adapter->drv_sds_rings + ring].src;
tx->crb_intr_mask = ahw->pci_base0 + intr_mask;
}
dev_info(&adapter->pdev->dev, "Tx Context[0x%x] Created, state:0x%x\n",
@@ -1290,7 +1295,7 @@ out:
}
static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
- int num_sds_ring)
+ u8 num_sds_ring)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
@@ -1306,7 +1311,7 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
qlcnic_detach(adapter);
- adapter->max_sds_rings = 1;
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
adapter->ahw->diag_test = test;
adapter->ahw->linkup = 0;
@@ -1320,7 +1325,7 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
if (ret) {
qlcnic_detach(adapter);
if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) {
- adapter->max_sds_rings = num_sds_ring;
+ adapter->drv_sds_rings = num_sds_ring;
qlcnic_attach(adapter);
}
netif_device_attach(netdev);
@@ -1333,7 +1338,7 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
}
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_83xx_enable_intr(adapter, sds_ring);
}
@@ -1354,7 +1359,7 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
}
static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
- int max_sds_rings)
+ u8 drv_sds_rings)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
@@ -1362,7 +1367,7 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
clear_bit(__QLCNIC_DEV_UP, &adapter->state);
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_83xx_disable_intr(adapter, sds_ring);
if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
@@ -1386,7 +1391,7 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
}
}
adapter->ahw->diag_test = 0;
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
if (qlcnic_attach(adapter))
goto out;
@@ -1648,7 +1653,9 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
- int ret = 0, loop = 0, max_sds_rings = adapter->max_sds_rings;
+ u8 drv_sds_rings = adapter->drv_sds_rings;
+ u8 drv_tx_rings = adapter->drv_tx_rings;
+ int ret = 0, loop = 0;
if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
netdev_warn(netdev,
@@ -1670,7 +1677,7 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
mode == QLCNIC_ILB_MODE ? "internal" : "external");
ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST,
- max_sds_rings);
+ drv_sds_rings);
if (ret)
goto fail_diag_alloc;
@@ -1708,10 +1715,11 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
qlcnic_83xx_clear_lb_mode(adapter, mode);
free_diag_res:
- qlcnic_83xx_diag_free_res(netdev, max_sds_rings);
+ qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
fail_diag_alloc:
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
qlcnic_release_diag_lock(adapter);
return ret;
}
@@ -1722,7 +1730,7 @@ static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter,
struct qlcnic_hardware_context *ahw = adapter->ahw;
int temp;
- netdev_info(adapter->netdev, "Recieved loopback IDC time extend event for 0x%x seconds\n",
+ netdev_info(adapter->netdev, "Received loopback IDC time extend event for 0x%x seconds\n",
ahw->extend_lb_time);
temp = ahw->extend_lb_time * 1000;
*max_wait_count += temp / QLC_83XX_LB_MSLEEP_COUNT;
@@ -2276,9 +2284,9 @@ int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter,
temp = (cmd.rsp.arg[8] & 0x7FFE0000) >> 17;
npar_info->max_linkspeed_reg_offset = temp;
}
- if (npar_info->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS)
- memcpy(ahw->extra_capability, &cmd.rsp.arg[16],
- sizeof(ahw->extra_capability));
+
+ memcpy(ahw->extra_capability, &cmd.rsp.arg[16],
+ sizeof(ahw->extra_capability));
out:
qlcnic_free_mbx_args(&cmd);
@@ -2321,19 +2329,7 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
i++;
memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2);
i = i + 3;
- if (ahw->op_mode == QLCNIC_MGMT_FUNC)
- dev_info(dev, "id = %d active = %d type = %d\n"
- "\tport = %d min bw = %d max bw = %d\n"
- "\tmac_addr = %pM\n", pci_info->id,
- pci_info->active, pci_info->type,
- pci_info->default_port,
- pci_info->tx_min_bw,
- pci_info->tx_max_bw, pci_info->mac);
}
- if (ahw->op_mode == QLCNIC_MGMT_FUNC)
- dev_info(dev, "Max functions = %d, active functions = %d\n",
- ahw->max_pci_func, ahw->act_pci_func);
-
} else {
dev_err(dev, "Failed to get PCI Info, error = %d\n", err);
err = -EIO;
@@ -3061,11 +3057,14 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
int status = 0;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- /* Get port configuration info */
- status = qlcnic_83xx_get_port_info(adapter);
- /* Get Link Status related info */
- config = qlcnic_83xx_test_link(adapter);
- ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config);
+ if (!test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+ /* Get port configuration info */
+ status = qlcnic_83xx_get_port_info(adapter);
+ /* Get Link Status related info */
+ config = qlcnic_83xx_test_link(adapter);
+ ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config);
+ }
+
/* hard code until there is a way to get it from flash */
ahw->board_type = QLCNIC_BRDTYPE_83XX_10G;
@@ -3279,12 +3278,12 @@ int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter)
return 0;
}
-int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter)
+inline int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter)
{
return (ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl) *
- sizeof(adapter->ahw->ext_reg_tbl)) +
- (ARRAY_SIZE(qlcnic_83xx_reg_tbl) +
- sizeof(adapter->ahw->reg_tbl));
+ sizeof(*adapter->ahw->ext_reg_tbl)) +
+ (ARRAY_SIZE(qlcnic_83xx_reg_tbl) *
+ sizeof(*adapter->ahw->reg_tbl));
}
int qlcnic_83xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff)
@@ -3305,10 +3304,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_cmd_args cmd;
+ u8 val, drv_sds_rings = adapter->drv_sds_rings;
+ u8 drv_tx_rings = adapter->drv_tx_rings;
u32 data;
u16 intrpt_id, id;
- u8 val;
- int ret, max_sds_rings = adapter->max_sds_rings;
+ int ret;
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
netdev_info(netdev, "Device is resetting\n");
@@ -3321,7 +3321,7 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
}
ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST,
- max_sds_rings);
+ drv_sds_rings);
if (ret)
goto fail_diag_irq;
@@ -3358,10 +3358,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
done:
qlcnic_free_mbx_args(&cmd);
- qlcnic_83xx_diag_free_res(netdev, max_sds_rings);
+ qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
fail_diag_irq:
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
qlcnic_release_diag_lock(adapter);
return ret;
}
@@ -3381,10 +3382,21 @@ void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *adapter,
}
config = ahw->port_config;
if (config & QLC_83XX_CFG_STD_PAUSE) {
- if (config & QLC_83XX_CFG_STD_TX_PAUSE)
+ switch (MSW(config)) {
+ case QLC_83XX_TX_PAUSE:
+ pause->tx_pause = 1;
+ break;
+ case QLC_83XX_RX_PAUSE:
+ pause->rx_pause = 1;
+ break;
+ case QLC_83XX_TX_RX_PAUSE:
+ default:
+ /* Backward compatibility for existing
+ * flash definitions
+ */
pause->tx_pause = 1;
- if (config & QLC_83XX_CFG_STD_RX_PAUSE)
pause->rx_pause = 1;
+ }
}
if (QLC_83XX_AUTONEG(config))
@@ -3427,7 +3439,8 @@ int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter,
ahw->port_config &= ~QLC_83XX_CFG_STD_RX_PAUSE;
ahw->port_config |= QLC_83XX_CFG_STD_TX_PAUSE;
} else if (!pause->rx_pause && !pause->tx_pause) {
- ahw->port_config &= ~QLC_83XX_CFG_STD_TX_RX_PAUSE;
+ ahw->port_config &= ~(QLC_83XX_CFG_STD_TX_RX_PAUSE |
+ QLC_83XX_CFG_STD_PAUSE);
}
status = qlcnic_83xx_set_port_config(adapter);
if (status) {
@@ -3503,7 +3516,7 @@ int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
if (err)
return err;
- if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE) {
+ if (ahw->nic_mode == QLCNIC_VNIC_MODE) {
if (ahw->op_mode == QLCNIC_MGMT_FUNC) {
qlcnic_83xx_set_vnic_opmode(adapter);
} else {
@@ -3530,6 +3543,9 @@ void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx)
void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx)
{
+ if (!mbx)
+ return;
+
destroy_workqueue(mbx->work_q);
kfree(mbx);
}
@@ -3650,6 +3666,9 @@ void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter)
{
struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ if (!mbx)
+ return;
+
clear_bit(QLC_83XX_MBX_READY, &mbx->status);
complete(&mbx->completion);
cancel_work_sync(&mbx->work);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 533e150503af..4cae6caa6bfa 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -61,7 +61,6 @@
#define QLC_83XX_HOST_SDS_MBX_IDX 8
#define QLCNIC_HOST_RDS_MBX_IDX 88
-#define QLCNIC_MAX_RING_SETS 8
/* Pause control registers */
#define QLC_83XX_SRE_SHIM_REG 0x0D200284
@@ -183,8 +182,8 @@ struct qlcnic_rcv_mbx_out {
u8 num_pci_func;
u8 state;
#endif
- u32 host_csmr[QLCNIC_MAX_RING_SETS];
- struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
+ u32 host_csmr[QLCNIC_MAX_SDS_RINGS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS];
} __packed;
struct qlcnic_add_rings_mbx_out {
@@ -197,8 +196,8 @@ struct qlcnic_add_rings_mbx_out {
u8 sts_num;
u8 rcv_num;
#endif
- u32 host_csmr[QLCNIC_MAX_RING_SETS];
- struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
+ u32 host_csmr[QLCNIC_MAX_SDS_RINGS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS];
} __packed;
/* Transmit context mailbox inbox registers
@@ -363,6 +362,9 @@ enum qlcnic_83xx_states {
#define QLC_83XX_LINK_EEE(data) ((data) & BIT_13)
#define QLC_83XX_DCBX(data) (((data) >> 28) & 7)
#define QLC_83XX_AUTONEG(data) ((data) & BIT_15)
+#define QLC_83XX_TX_PAUSE 0x10
+#define QLC_83XX_RX_PAUSE 0x20
+#define QLC_83XX_TX_RX_PAUSE 0x30
#define QLC_83XX_CFG_STD_PAUSE (1 << 5)
#define QLC_83XX_CFG_STD_TX_PAUSE (1 << 20)
#define QLC_83XX_CFG_STD_RX_PAUSE (2 << 20)
@@ -412,8 +414,6 @@ enum qlcnic_83xx_states {
#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000)
#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
#define QLC_83XX_ESWITCH_CAPABILITY BIT_23
-#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF
-#define QLC_83XX_DEFAULT_MODE 0x0
#define QLC_83XX_SRIOV_MODE 0x1
#define QLCNIC_BRDTYPE_83XX_10G 0x0083
@@ -521,7 +521,7 @@ enum qlc_83xx_ext_regs {
/* 83xx funcitons */
int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8, int);
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *);
void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
@@ -626,7 +626,7 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
struct qlcnic_info *, u8);
int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
-int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *, int);
+int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *);
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index f09e787af0b2..89208e5b25d6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -636,7 +636,7 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
if (adapter->portnum == 0)
qlcnic_set_drv_version(adapter);
- qlcnic_dcb_get_info(adapter);
+ qlcnic_dcb_get_info(adapter->dcb);
qlcnic_83xx_idc_attach_driver(adapter);
return 0;
@@ -818,6 +818,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_mailbox *mbx = ahw->mailbox;
int ret = 0;
+ u32 owner;
u32 val;
/* Perform NIC configuration based ready state entry actions */
@@ -846,6 +847,10 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
clear_bit(QLC_83XX_MBX_READY, &mbx->status);
set_bit(__QLCNIC_RESETTING, &adapter->state);
qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
+ } else {
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (ahw->pci_func == owner)
+ qlcnic_dump_fw(adapter);
}
return -EIO;
}
@@ -897,7 +902,7 @@ static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter)
qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
set_bit(__QLCNIC_RESETTING, &adapter->state);
clear_bit(QLC_83XX_MBX_READY, &mbx->status);
- if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
+ if (adapter->ahw->nic_mode == QLCNIC_VNIC_MODE)
qlcnic_83xx_disable_vnic_mode(adapter, 1);
if (qlcnic_check_diag_status(adapter)) {
@@ -1058,6 +1063,12 @@ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
qlcnic_83xx_periodic_tasks(adapter);
+ /* Do not reschedule if firmaware is in hanged state and auto
+ * recovery is disabled
+ */
+ if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset)
+ return;
+
/* Re-schedule the function */
if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
@@ -2022,6 +2033,8 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
ahw->max_mac_filters = nic_info.max_mac_filters;
ahw->max_mtu = nic_info.max_mtu;
+ adapter->max_tx_rings = ahw->max_tx_ques;
+ adapter->max_sds_rings = ahw->max_rx_ques;
/* eSwitch capability indicates vNIC mode.
* vNIC and SRIOV are mutually exclusive operational modes.
* If SR-IOV capability is detected, SR-IOV physical function
@@ -2034,7 +2047,7 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
return QLC_83XX_DEFAULT_OPMODE;
if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
- return QLC_83XX_VIRTUAL_NIC_MODE;
+ return QLCNIC_VNIC_MODE;
return QLC_83XX_DEFAULT_OPMODE;
}
@@ -2048,15 +2061,20 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
if (ret == -EIO)
return -EIO;
- if (ret == QLC_83XX_VIRTUAL_NIC_MODE) {
- ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE;
+ if (ret == QLCNIC_VNIC_MODE) {
+ ahw->nic_mode = QLCNIC_VNIC_MODE;
+
if (qlcnic_83xx_config_vnic_opmode(adapter))
return -EIO;
+ adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
+ adapter->max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
} else if (ret == QLC_83XX_DEFAULT_OPMODE) {
- ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
+ ahw->nic_mode = QLCNIC_DEFAULT_MODE;
adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ adapter->max_sds_rings = ahw->max_rx_ques;
+ adapter->max_tx_rings = ahw->max_tx_ques;
} else {
return -EIO;
}
@@ -2159,13 +2177,34 @@ static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter)
return err;
}
+static void qlcnic_83xx_init_rings(struct qlcnic_adapter *adapter)
+{
+ u8 rx_cnt = QLCNIC_DEF_SDS_RINGS;
+ u8 tx_cnt = QLCNIC_DEF_TX_RINGS;
+
+ adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
+ adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+
+ if (!adapter->ahw->msix_supported) {
+ rx_cnt = QLCNIC_SINGLE_RING;
+ tx_cnt = QLCNIC_SINGLE_RING;
+ }
+
+ /* compute and set drv sds rings */
+ qlcnic_set_tx_ring_count(adapter, tx_cnt);
+ qlcnic_set_sds_ring_count(adapter, rx_cnt);
+}
int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_dcb *dcb;
int err = 0;
ahw->msix_supported = !!qlcnic_use_msi_x;
+
+ qlcnic_83xx_init_rings(adapter);
+
err = qlcnic_83xx_init_mailbox_work(adapter);
if (err)
goto exit;
@@ -2178,22 +2217,26 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
return err;
}
+ if (qlcnic_83xx_read_flash_descriptor_table(adapter) ||
+ qlcnic_83xx_read_flash_mfg_id(adapter)) {
+ dev_err(&adapter->pdev->dev, "Failed reading flash mfg id\n");
+ err = -ENOTRECOVERABLE;
+ goto detach_mbx;
+ }
+
err = qlcnic_83xx_check_hw_status(adapter);
if (err)
goto detach_mbx;
- if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
- qlcnic_83xx_read_flash_mfg_id(adapter);
-
err = qlcnic_83xx_get_fw_info(adapter);
if (err)
goto detach_mbx;
err = qlcnic_83xx_idc_init(adapter);
if (err)
- goto clear_fw_info;
+ goto detach_mbx;
- err = qlcnic_setup_intr(adapter, 0, 0);
+ err = qlcnic_setup_intr(adapter);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
goto disable_intr;
@@ -2215,13 +2258,16 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
if (err)
goto disable_mbx_intr;
+
/* Perform operating mode specific initialization */
err = adapter->nic_ops->init_driver(adapter);
if (err)
goto disable_mbx_intr;
- if (adapter->dcb && qlcnic_dcb_attach(adapter))
- qlcnic_clear_dcb_ops(adapter);
+ dcb = adapter->dcb;
+
+ if (dcb && qlcnic_dcb_attach(dcb))
+ qlcnic_clear_dcb_ops(dcb);
/* Periodically monitor device status */
qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
@@ -2233,12 +2279,10 @@ disable_mbx_intr:
disable_intr:
qlcnic_teardown_intr(adapter);
-clear_fw_info:
- kfree(ahw->fw_info);
-
detach_mbx:
qlcnic_83xx_detach_mailbox_work(adapter);
qlcnic_83xx_free_mailbox(ahw->mailbox);
+ ahw->mailbox = NULL;
exit:
return err;
}
@@ -2251,7 +2295,7 @@ void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter)
clear_bit(QLC_83XX_MBX_READY, &idc->status);
cancel_delayed_work_sync(&adapter->fw_work);
- if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
+ if (ahw->nic_mode == QLCNIC_VNIC_MODE)
qlcnic_83xx_disable_vnic_mode(adapter, 1);
qlcnic_83xx_idc_detach_driver(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index 0248a4c2f5dd..734d28602ac3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -94,13 +94,29 @@ qlcnic_83xx_config_vnic_buff_descriptors(struct qlcnic_adapter *adapter)
**/
static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
{
- int err = -EIO;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_npar_info *npar;
+ int i, err = -EIO;
qlcnic_83xx_get_minidump_template(adapter);
+
if (!(adapter->flags & QLCNIC_ADAPTER_INITIALIZED)) {
if (qlcnic_init_pci_info(adapter))
return err;
+ npar = adapter->npars;
+
+ for (i = 0; i < ahw->act_pci_func; i++, npar++) {
+ dev_info(dev, "id:%d active:%d type:%d port:%d min_bw:%d max_bw:%d mac_addr:%pM\n",
+ npar->pci_func, npar->active, npar->type,
+ npar->phy_port, npar->min_bw, npar->max_bw,
+ npar->mac);
+ }
+
+ dev_info(dev, "Max functions = %d, active functions = %d\n",
+ ahw->max_pci_func, ahw->act_pci_func);
+
if (qlcnic_83xx_set_vnic_opmode(adapter))
return err;
@@ -115,12 +131,12 @@ static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
return err;
qlcnic_83xx_config_vnic_buff_descriptors(adapter);
- adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ ahw->msix_supported = qlcnic_use_msi_x ? 1 : 0;
adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
qlcnic_83xx_enable_vnic_mode(adapter, 1);
- dev_info(&adapter->pdev->dev, "HAL Version: %d, Management function\n",
- adapter->ahw->fw_hal_version);
+ dev_info(dev, "HAL Version: %d, Management function\n",
+ ahw->fw_hal_version);
return 0;
}
@@ -240,8 +256,8 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
return 0;
}
-static int qlcnic_83xx_get_eswitch_port_info(struct qlcnic_adapter *adapter,
- int func, int *port_id)
+int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *adapter,
+ int func, int *port_id)
{
struct qlcnic_info nic_info;
int err = 0;
@@ -257,23 +273,8 @@ static int qlcnic_83xx_get_eswitch_port_info(struct qlcnic_adapter *adapter,
else
err = -EIO;
- return err;
-}
-
-int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *adapter, int func)
-{
- int id, err = 0;
-
- err = qlcnic_83xx_get_eswitch_port_info(adapter, func, &id);
- if (err)
- return err;
-
- if (!(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
- if (!qlcnic_enable_eswitch(adapter, id, 1))
- adapter->eswitch[id].flags |= QLCNIC_SWITCH_ENABLE;
- else
- err = -EIO;
- }
+ if (!err)
+ adapter->eswitch[*port_id].flags |= QLCNIC_SWITCH_ENABLE;
return err;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 86850dd633a1..859cb161fc63 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -270,7 +270,7 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
int err;
nrds_rings = adapter->max_rds_rings;
- nsds_rings = adapter->max_sds_rings;
+ nsds_rings = adapter->drv_sds_rings;
rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
nsds_rings);
@@ -475,7 +475,7 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test) {
- temp_nsds_rings = adapter->max_sds_rings;
+ temp_nsds_rings = adapter->drv_sds_rings;
index = temp_nsds_rings + ring;
msix_id = ahw->intr_tbl[index].id;
prq->msi_index = cpu_to_le16(msix_id);
@@ -512,7 +512,7 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test &&
(adapter->flags & QLCNIC_MSIX_ENABLED)) {
- index = adapter->max_sds_rings + ring;
+ index = adapter->drv_sds_rings + ring;
intr_mask = ahw->intr_tbl[index].src;
tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask;
}
@@ -582,7 +582,7 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
recv_ctx = adapter->recv_ctx;
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
&tx_ring->hw_cons_phys_addr,
@@ -616,7 +616,7 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
}
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
addr = dma_alloc_coherent(&adapter->pdev->dev,
@@ -664,7 +664,7 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
if (err)
goto err_out;
- for (ring = 0; ring < dev->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < dev->drv_tx_rings; ring++) {
err = qlcnic_fw_cmd_create_tx_ctx(dev,
&dev->tx_ring[ring],
ring);
@@ -703,7 +703,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
qlcnic_fw_cmd_del_rx_ctx(adapter);
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++)
qlcnic_fw_cmd_del_tx_ctx(adapter,
&adapter->tx_ring[ring]);
@@ -733,7 +733,7 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
recv_ctx = adapter->recv_ctx;
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
if (tx_ring->hw_consumer != NULL) {
dma_free_coherent(&adapter->pdev->dev, sizeof(u32),
@@ -764,7 +764,7 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
}
}
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (sds_ring->desc_head != NULL) {
@@ -895,6 +895,8 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
+ adapter->max_tx_rings = npar_info->max_tx_ques;
+ adapter->max_sds_rings = npar_info->max_rx_ques;
}
qlcnic_free_mbx_args(&cmd);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index d62d5ce432ec..86bca7c14f99 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -57,22 +57,22 @@ static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops;
static void qlcnic_dcb_aen_work(struct work_struct *);
static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *);
-static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *);
-static void __qlcnic_dcb_free(struct qlcnic_adapter *);
-static int __qlcnic_dcb_attach(struct qlcnic_adapter *);
-static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *, char *);
-static void __qlcnic_dcb_get_info(struct qlcnic_adapter *);
-
-static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *);
-static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
-static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
-static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
-
-static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *);
-static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
-static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *, bool);
-static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *);
+static void __qlcnic_dcb_free(struct qlcnic_dcb *);
+static int __qlcnic_dcb_attach(struct qlcnic_dcb *);
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *, char *);
+static void __qlcnic_dcb_get_info(struct qlcnic_dcb *);
+
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *);
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
+static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
+
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *);
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
+static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *, bool);
+static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
struct qlcnic_dcb_capability {
bool tsa_capability;
@@ -180,7 +180,7 @@ static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
.query_cee_param = qlcnic_83xx_dcb_query_cee_param,
.get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg,
.register_aen = qlcnic_83xx_dcb_register_aen,
- .handle_aen = qlcnic_83xx_dcb_handle_aen,
+ .aen_handler = qlcnic_83xx_dcb_aen_handler,
};
static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
@@ -193,7 +193,7 @@ static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
.get_hw_capability = qlcnic_82xx_dcb_get_hw_capability,
.query_cee_param = qlcnic_82xx_dcb_query_cee_param,
.get_cee_cfg = qlcnic_82xx_dcb_get_cee_cfg,
- .handle_aen = qlcnic_82xx_dcb_handle_aen,
+ .aen_handler = qlcnic_82xx_dcb_aen_handler,
};
static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val)
@@ -242,10 +242,10 @@ static int qlcnic_dcb_prio_count(u8 up_tc_map)
return j;
}
-static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *adapter)
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *dcb)
{
- if (test_bit(__QLCNIC_DCB_STATE, &adapter->state))
- adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
+ if (test_bit(QLCNIC_DCB_STATE, &dcb->state))
+ dcb->adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
}
static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
@@ -256,7 +256,7 @@ static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
adapter->dcb->ops = &qlcnic_83xx_dcb_ops;
}
-int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
{
struct qlcnic_dcb *dcb;
@@ -267,20 +267,22 @@ int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
adapter->dcb = dcb;
dcb->adapter = adapter;
qlcnic_set_dcb_ops(adapter);
+ dcb->state = 0;
return 0;
}
-static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter)
+static void __qlcnic_dcb_free(struct qlcnic_dcb *dcb)
{
- struct qlcnic_dcb *dcb = adapter->dcb;
+ struct qlcnic_adapter *adapter;
if (!dcb)
return;
- qlcnic_dcb_register_aen(adapter, 0);
+ adapter = dcb->adapter;
+ qlcnic_dcb_register_aen(dcb, 0);
- while (test_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+ while (test_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
usleep_range(10000, 11000);
cancel_delayed_work_sync(&dcb->aen_work);
@@ -298,23 +300,22 @@ static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter)
adapter->dcb = NULL;
}
-static void __qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
+static void __qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
{
- qlcnic_dcb_get_hw_capability(adapter);
- qlcnic_dcb_get_cee_cfg(adapter);
- qlcnic_dcb_register_aen(adapter, 1);
+ qlcnic_dcb_get_hw_capability(dcb);
+ qlcnic_dcb_get_cee_cfg(dcb);
+ qlcnic_dcb_register_aen(dcb, 1);
}
-static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
+static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
{
- struct qlcnic_dcb *dcb = adapter->dcb;
int err = 0;
INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work);
dcb->wq = create_singlethread_workqueue("qlcnic-dcb");
if (!dcb->wq) {
- dev_err(&adapter->pdev->dev,
+ dev_err(&dcb->adapter->pdev->dev,
"DCB workqueue allocation failed. DCB will be disabled\n");
return -1;
}
@@ -331,7 +332,7 @@ static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
goto out_free_cfg;
}
- qlcnic_dcb_get_info(adapter);
+ qlcnic_dcb_get_info(dcb);
return 0;
out_free_cfg:
@@ -345,9 +346,9 @@ out_free_wq:
return err;
}
-static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter,
- char *buf)
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
{
+ struct qlcnic_adapter *adapter = dcb->adapter;
struct qlcnic_cmd_args cmd;
u32 mbx_out;
int err;
@@ -371,15 +372,15 @@ static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter,
return err;
}
-static int __qlcnic_dcb_get_capability(struct qlcnic_adapter *adapter, u32 *val)
+static int __qlcnic_dcb_get_capability(struct qlcnic_dcb *dcb, u32 *val)
{
- struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
+ struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
u32 mbx_out;
int err;
memset(cap, 0, sizeof(struct qlcnic_dcb_capability));
- err = qlcnic_dcb_query_hw_capability(adapter, (char *)val);
+ err = qlcnic_dcb_query_hw_capability(dcb, (char *)val);
if (err)
return err;
@@ -397,21 +398,21 @@ static int __qlcnic_dcb_get_capability(struct qlcnic_adapter *adapter, u32 *val)
if (cap->max_num_tc > QLC_DCB_MAX_TC ||
cap->max_ets_tc > cap->max_num_tc ||
cap->max_pfc_tc > cap->max_num_tc) {
- dev_err(&adapter->pdev->dev, "Invalid DCB configuration\n");
+ dev_err(&dcb->adapter->pdev->dev, "Invalid DCB configuration\n");
return -EINVAL;
}
return err;
}
-static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
{
- struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+ struct qlcnic_dcb_cfg *cfg = dcb->cfg;
struct qlcnic_dcb_capability *cap;
u32 mbx_out;
int err;
- err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
+ err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
if (err)
return err;
@@ -419,15 +420,16 @@ static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED;
if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
- set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+ set_bit(QLCNIC_DCB_STATE, &dcb->state);
return err;
}
-static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
char *buf, u8 type)
{
u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le);
+ struct qlcnic_adapter *adapter = dcb->adapter;
struct qlcnic_82xx_dcb_param_mbx_le *prsp_le;
struct device *dev = &adapter->pdev->dev;
dma_addr_t cardrsp_phys_addr;
@@ -447,8 +449,7 @@ static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
return -EINVAL;
}
- addr = dma_alloc_coherent(&adapter->pdev->dev, size, &cardrsp_phys_addr,
- GFP_KERNEL);
+ addr = dma_alloc_coherent(dev, size, &cardrsp_phys_addr, GFP_KERNEL);
if (addr == NULL)
return -ENOMEM;
@@ -488,72 +489,67 @@ out:
qlcnic_free_mbx_args(&cmd);
out_free_rsp:
- dma_free_coherent(&adapter->pdev->dev, size, addr, cardrsp_phys_addr);
+ dma_free_coherent(dev, size, addr, cardrsp_phys_addr);
return err;
}
-static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
{
struct qlcnic_dcb_mbx_params *mbx;
int err;
- mbx = adapter->dcb->param;
+ mbx = dcb->param;
if (!mbx)
return 0;
- err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[0],
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[0],
QLC_DCB_LOCAL_PARAM_FWID);
if (err)
return err;
- err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[1],
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[1],
QLC_DCB_OPER_PARAM_FWID);
if (err)
return err;
- err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[2],
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[2],
QLC_DCB_PEER_PARAM_FWID);
if (err)
return err;
mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP;
- qlcnic_dcb_data_cee_param_map(adapter);
+ qlcnic_dcb_data_cee_param_map(dcb->adapter);
return err;
}
static void qlcnic_dcb_aen_work(struct work_struct *work)
{
- struct qlcnic_adapter *adapter;
struct qlcnic_dcb *dcb;
dcb = container_of(work, struct qlcnic_dcb, aen_work.work);
- adapter = dcb->adapter;
- qlcnic_dcb_get_cee_cfg(adapter);
- clear_bit(__QLCNIC_DCB_IN_AEN, &adapter->state);
+ qlcnic_dcb_get_cee_cfg(dcb);
+ clear_bit(QLCNIC_DCB_AEN_MODE, &dcb->state);
}
-static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
- void *data)
+static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+ if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
return;
queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
}
-static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
{
- struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
+ struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
u32 mbx_out;
int err;
- err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
+ err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
if (err)
return err;
@@ -565,14 +561,15 @@ static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED;
if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
- set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+ set_bit(QLCNIC_DCB_STATE, &dcb->state);
return err;
}
-static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
char *buf, u8 idx)
{
+ struct qlcnic_adapter *adapter = dcb->adapter;
struct qlcnic_dcb_mbx_params mbx_out;
int err, i, j, k, max_app, size;
struct qlcnic_dcb_param *each;
@@ -632,24 +629,23 @@ out:
return err;
}
-static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
{
- struct qlcnic_dcb *dcb = adapter->dcb;
int err;
- err = qlcnic_dcb_query_cee_param(adapter, (char *)dcb->param, 0);
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)dcb->param, 0);
if (err)
return err;
- qlcnic_dcb_data_cee_param_map(adapter);
+ qlcnic_dcb_data_cee_param_map(dcb->adapter);
return err;
}
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter,
- bool flag)
+static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *dcb, bool flag)
{
u8 val = (flag ? QLCNIC_CMD_INIT_NIC_FUNC : QLCNIC_CMD_STOP_NIC_FUNC);
+ struct qlcnic_adapter *adapter = dcb->adapter;
struct qlcnic_cmd_args cmd;
int err;
@@ -669,19 +665,17 @@ static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter,
return err;
}
-static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
- void *data)
+static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
{
- struct qlcnic_dcb *dcb = adapter->dcb;
u32 *val = data;
- if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+ if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
return;
if (*val & BIT_8)
- set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+ set_bit(QLCNIC_DCB_STATE, &dcb->state);
else
- clear_bit(__QLCNIC_DCB_STATE, &adapter->state);
+ clear_bit(QLCNIC_DCB_STATE, &dcb->state);
queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
}
@@ -814,12 +808,12 @@ static u8 qlcnic_dcb_get_state(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- return test_bit(__QLCNIC_DCB_STATE, &adapter->state);
+ return test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state);
}
static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr)
{
- memcpy(addr, netdev->dev_addr, netdev->addr_len);
+ memcpy(addr, netdev->perm_addr, netdev->addr_len);
}
static void
@@ -834,7 +828,7 @@ qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio,
type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
*prio = *pgid = *bw_per = *up_tc_map = 0;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
!type->tc_param_valid)
return;
@@ -870,7 +864,7 @@ static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
*bw_pct = 0;
type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
!type->tc_param_valid)
return;
@@ -896,7 +890,7 @@ static void qlcnic_dcb_get_pfc_cfg(struct net_device *netdev, int prio,
*setting = 0;
type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
!type->pfc_mode_enable)
return;
@@ -915,7 +909,7 @@ static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0;
switch (capid) {
@@ -944,7 +938,7 @@ static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return -EINVAL;
switch (attr) {
@@ -967,7 +961,7 @@ static u8 qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id)
.protocol = id,
};
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0;
return dcb_getapp(netdev, &app);
@@ -978,7 +972,7 @@ static u8 qlcnic_dcb_get_pfc_state(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_dcb *dcb = adapter->dcb;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &dcb->state))
return 0;
return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable;
@@ -989,7 +983,7 @@ static u8 qlcnic_dcb_get_dcbx(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0;
return cfg->capability.dcb_capability;
@@ -1000,7 +994,7 @@ static u8 qlcnic_dcb_get_feat_cfg(struct net_device *netdev, int fid, u8 *flag)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_dcb_cee *type;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 1;
type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
@@ -1055,7 +1049,7 @@ static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
*app_count = 0;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0;
peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
@@ -1076,7 +1070,7 @@ static int qlcnic_dcb_peer_app_table(struct net_device *netdev,
struct qlcnic_dcb_app *app;
int i, j;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0;
peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
@@ -1101,7 +1095,7 @@ static int qlcnic_dcb_cee_peer_get_pg(struct net_device *netdev,
struct qlcnic_dcb_cee *peer;
u8 i, j, k, map;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0;
peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
@@ -1136,7 +1130,7 @@ static int qlcnic_dcb_cee_peer_get_pfc(struct net_device *netdev,
pfc->pfc_en = 0;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0;
peer = &cfg->type[QLC_DCB_PEER_IDX];
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
index b87ce9fb503e..c04ae0cdc108 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -8,26 +8,29 @@
#ifndef __QLCNIC_DCBX_H
#define __QLCNIC_DCBX_H
-void qlcnic_clear_dcb_ops(struct qlcnic_adapter *);
+#define QLCNIC_DCB_STATE 0
+#define QLCNIC_DCB_AEN_MODE 1
#ifdef CONFIG_QLCNIC_DCB
-int __qlcnic_register_dcb(struct qlcnic_adapter *);
+int qlcnic_register_dcb(struct qlcnic_adapter *);
#else
-static inline int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+static inline int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
{ return 0; }
#endif
+struct qlcnic_dcb;
+
struct qlcnic_dcb_ops {
- void (*init_dcbnl_ops) (struct qlcnic_adapter *);
- void (*free) (struct qlcnic_adapter *);
- int (*attach) (struct qlcnic_adapter *);
- int (*query_hw_capability) (struct qlcnic_adapter *, char *);
- int (*get_hw_capability) (struct qlcnic_adapter *);
- void (*get_info) (struct qlcnic_adapter *);
- int (*query_cee_param) (struct qlcnic_adapter *, char *, u8);
- int (*get_cee_cfg) (struct qlcnic_adapter *);
- int (*register_aen) (struct qlcnic_adapter *, bool);
- void (*handle_aen) (struct qlcnic_adapter *, void *);
+ int (*query_hw_capability) (struct qlcnic_dcb *, char *);
+ int (*get_hw_capability) (struct qlcnic_dcb *);
+ int (*query_cee_param) (struct qlcnic_dcb *, char *, u8);
+ void (*init_dcbnl_ops) (struct qlcnic_dcb *);
+ int (*register_aen) (struct qlcnic_dcb *, bool);
+ void (*aen_handler) (struct qlcnic_dcb *, void *);
+ int (*get_cee_cfg) (struct qlcnic_dcb *);
+ void (*get_info) (struct qlcnic_dcb *);
+ int (*attach) (struct qlcnic_dcb *);
+ void (*free) (struct qlcnic_dcb *);
};
struct qlcnic_dcb {
@@ -37,5 +40,85 @@ struct qlcnic_dcb {
struct workqueue_struct *wq;
struct qlcnic_dcb_ops *ops;
struct qlcnic_dcb_cfg *cfg;
+ unsigned long state;
};
+
+static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb)
+{
+ kfree(dcb);
+ dcb = NULL;
+}
+
+static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->get_hw_capability)
+ return dcb->ops->get_hw_capability(dcb);
+
+ return 0;
+}
+
+static inline void qlcnic_dcb_free(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->free)
+ dcb->ops->free(dcb);
+}
+
+static inline int qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->attach)
+ return dcb->ops->attach(dcb);
+
+ return 0;
+}
+
+static inline int
+qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
+{
+ if (dcb && dcb->ops->query_hw_capability)
+ return dcb->ops->query_hw_capability(dcb, buf);
+
+ return 0;
+}
+
+static inline void qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->get_info)
+ dcb->ops->get_info(dcb);
+}
+
+static inline int
+qlcnic_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type)
+{
+ if (dcb && dcb->ops->query_cee_param)
+ return dcb->ops->query_cee_param(dcb, buf, type);
+
+ return 0;
+}
+
+static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->get_cee_cfg)
+ return dcb->ops->get_cee_cfg(dcb);
+
+ return 0;
+}
+
+static inline void
+qlcnic_dcb_register_aen(struct qlcnic_dcb *dcb, u8 flag)
+{
+ if (dcb && dcb->ops->register_aen)
+ dcb->ops->register_aen(dcb, flag);
+}
+
+static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg)
+{
+ if (dcb && dcb->ops->aen_handler)
+ dcb->ops->aen_handler(dcb, msg);
+}
+
+static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->init_dcbnl_ops)
+ dcb->ops->init_dcbnl_ops(dcb);
+}
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index ebe4c86e5230..b36c02fafcfd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -27,43 +27,36 @@ static const u32 qlcnic_fw_dump_level[] = {
};
static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
+ {"xmit_on", QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
+ {"xmit_off", QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
{"xmit_called", QLC_SIZEOF(stats.xmitcalled),
- QLC_OFF(stats.xmitcalled)},
+ QLC_OFF(stats.xmitcalled)},
{"xmit_finished", QLC_SIZEOF(stats.xmitfinished),
- QLC_OFF(stats.xmitfinished)},
- {"rx_dropped", QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
+ QLC_OFF(stats.xmitfinished)},
+ {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
+ QLC_OFF(stats.tx_dma_map_error)},
+ {"tx_bytes", QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
{"tx_dropped", QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
- {"csummed", QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
+ {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
+ QLC_OFF(stats.rx_dma_map_error)},
{"rx_pkts", QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
- {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
{"rx_bytes", QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
- {"tx_bytes", QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
+ {"rx_dropped", QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
+ {"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
+ {"csummed", QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
+ {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
{"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
{"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
- {"xmit_on", QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
- {"xmit_off", QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
{"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
QLC_OFF(stats.skb_alloc_failure)},
- {"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
- {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
- QLC_OFF(stats.rx_dma_map_error)},
- {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
- QLC_OFF(stats.tx_dma_map_error)},
{"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun),
- QLC_OFF(stats.mac_filter_limit_overrun)},
+ QLC_OFF(stats.mac_filter_limit_overrun)},
{"spurious intr", QLC_SIZEOF(stats.spurious_intr),
QLC_OFF(stats.spurious_intr)},
};
static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
- "rx unicast frames",
- "rx multicast frames",
- "rx broadcast frames",
- "rx dropped frames",
- "rx errors",
- "rx local frames",
- "rx numbytes",
"tx unicast frames",
"tx multicast frames",
"tx broadcast frames",
@@ -71,6 +64,13 @@ static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
"tx errors",
"tx local frames",
"tx numbytes",
+ "rx unicast frames",
+ "rx multicast frames",
+ "rx broadcast frames",
+ "rx dropped frames",
+ "rx errors",
+ "rx local frames",
+ "rx numbytes",
};
static const char qlcnic_83xx_tx_stats_strings[][ETH_GSTRING_LEN] = {
@@ -126,13 +126,16 @@ static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = {
#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
-static const char qlcnic_tx_ring_stats_strings[][ETH_GSTRING_LEN] = {
+static const char qlcnic_tx_queue_stats_strings[][ETH_GSTRING_LEN] = {
"xmit_on",
"xmit_off",
"xmit_called",
"xmit_finished",
+ "tx_bytes",
};
+#define QLCNIC_TX_STATS_LEN ARRAY_SIZE(qlcnic_tx_queue_stats_strings)
+
static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
"ctx_rx_bytes",
"ctx_rx_pkts",
@@ -187,8 +190,8 @@ static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
return -1;
}
-#define QLCNIC_RING_REGS_COUNT 20
-#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32))
+#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412
+
#define QLCNIC_MAX_EEPROM_LEN 1024
static const u32 diag_registers[] = {
@@ -219,7 +222,15 @@ static const u32 ext_diag_registers[] = {
};
#define QLCNIC_MGMT_API_VERSION 2
-#define QLCNIC_ETHTOOL_REGS_VER 3
+#define QLCNIC_ETHTOOL_REGS_VER 4
+
+static inline int qlcnic_get_ring_regs_len(struct qlcnic_adapter *adapter)
+{
+ int ring_regs_cnt = (adapter->drv_tx_rings * 5) +
+ (adapter->max_rds_rings * 2) +
+ (adapter->drv_sds_rings * 3) + 5;
+ return ring_regs_cnt * sizeof(u32);
+}
static int qlcnic_get_regs_len(struct net_device *dev)
{
@@ -231,7 +242,9 @@ static int qlcnic_get_regs_len(struct net_device *dev)
else
len = sizeof(ext_diag_registers) + sizeof(diag_registers);
- return QLCNIC_RING_REGS_LEN + len + QLCNIC_DEV_INFO_SIZE + 1;
+ len += ((QLCNIC_DEV_INFO_SIZE + 2) * sizeof(u32));
+ len += qlcnic_get_ring_regs_len(adapter);
+ return len;
}
static int qlcnic_get_eeprom_len(struct net_device *dev)
@@ -493,6 +506,8 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
struct qlcnic_adapter *adapter = netdev_priv(dev);
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_rds_ring *rds_rings;
+ struct qlcnic_host_tx_ring *tx_ring;
u32 *regs_buff = p;
int ring, i = 0;
@@ -512,21 +527,35 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
return;
- regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
-
- regs_buff[i++] = 1; /* No. of tx ring */
- regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
- regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer);
-
- regs_buff[i++] = 2; /* No. of rx ring */
- regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer);
- regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer);
+ /* Marker btw regs and TX ring count */
+ regs_buff[i++] = 0xFFEFCDAB;
+
+ regs_buff[i++] = adapter->drv_tx_rings; /* No. of TX ring */
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ regs_buff[i++] = le32_to_cpu(*(tx_ring->hw_consumer));
+ regs_buff[i++] = tx_ring->sw_consumer;
+ regs_buff[i++] = readl(tx_ring->crb_cmd_producer);
+ regs_buff[i++] = tx_ring->producer;
+ if (tx_ring->crb_intr_mask)
+ regs_buff[i++] = readl(tx_ring->crb_intr_mask);
+ else
+ regs_buff[i++] = QLCNIC_TX_INTR_NOT_CONFIGURED;
+ }
- regs_buff[i++] = adapter->max_sds_rings;
+ regs_buff[i++] = adapter->max_rds_rings; /* No. of RX ring */
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_rings = &recv_ctx->rds_rings[ring];
+ regs_buff[i++] = readl(rds_rings->crb_rcv_producer);
+ regs_buff[i++] = rds_rings->producer;
+ }
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ regs_buff[i++] = adapter->drv_sds_rings; /* No. of SDS ring */
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &(recv_ctx->sds_rings[ring]);
regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
+ regs_buff[i++] = sds_ring->consumer;
+ regs_buff[i++] = readl(sds_ring->crb_intr_mask);
}
}
@@ -635,46 +664,88 @@ qlcnic_set_ringparam(struct net_device *dev,
return qlcnic_reset_context(adapter);
}
+static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter,
+ u8 rx_ring, u8 tx_ring)
+{
+ if (rx_ring != 0) {
+ if (rx_ring > adapter->max_sds_rings) {
+ netdev_err(adapter->netdev, "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n",
+ rx_ring, adapter->max_sds_rings);
+ return -EINVAL;
+ }
+ }
+
+ if (tx_ring != 0) {
+ if (qlcnic_82xx_check(adapter) &&
+ (tx_ring > adapter->max_tx_rings)) {
+ netdev_err(adapter->netdev,
+ "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n",
+ tx_ring, adapter->max_tx_rings);
+ return -EINVAL;
+ }
+
+ if (qlcnic_83xx_check(adapter) &&
+ (tx_ring > QLCNIC_SINGLE_RING)) {
+ netdev_err(adapter->netdev,
+ "Invalid ring count, Tx ring count %d should not be greater than %d driver Tx rings.\n",
+ tx_ring, QLCNIC_SINGLE_RING);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static void qlcnic_get_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
- int min;
-
- min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus());
- channel->max_rx = rounddown_pow_of_two(min);
- channel->max_tx = min_t(int, QLCNIC_MAX_TX_RINGS, num_online_cpus());
- channel->rx_count = adapter->max_sds_rings;
- channel->tx_count = adapter->max_drv_tx_rings;
+ channel->max_rx = adapter->max_sds_rings;
+ channel->max_tx = adapter->max_tx_rings;
+ channel->rx_count = adapter->drv_sds_rings;
+ channel->tx_count = adapter->drv_tx_rings;
}
static int qlcnic_set_channels(struct net_device *dev,
- struct ethtool_channels *channel)
+ struct ethtool_channels *channel)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int err;
- int txq = 0;
if (channel->other_count || channel->combined_count)
return -EINVAL;
+ err = qlcnic_validate_ring_count(adapter, channel->rx_count,
+ channel->tx_count);
+ if (err)
+ return err;
+
if (channel->rx_count) {
- err = qlcnic_validate_max_rss(adapter, channel->rx_count);
- if (err)
+ err = qlcnic_validate_rings(adapter, channel->rx_count,
+ QLCNIC_RX_QUEUE);
+ if (err) {
+ netdev_err(dev, "Unable to configure %u SDS rings\n",
+ channel->rx_count);
return err;
+ }
}
if (channel->tx_count) {
- err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count);
- if (err)
+ err = qlcnic_validate_rings(adapter, channel->tx_count,
+ QLCNIC_TX_QUEUE);
+ if (err) {
+ netdev_err(dev, "Unable to configure %u Tx rings\n",
+ channel->tx_count);
return err;
- txq = channel->tx_count;
+ }
}
- err = qlcnic_set_max_rss(adapter, channel->rx_count, txq);
- netdev_info(dev, "allocated 0x%x sds rings and 0x%x tx rings\n",
- adapter->max_sds_rings, adapter->max_drv_tx_rings);
+ err = qlcnic_setup_rings(adapter, channel->rx_count,
+ channel->tx_count);
+ netdev_info(dev, "Allocated %d SDS rings and %d Tx rings\n",
+ adapter->drv_sds_rings, adapter->drv_tx_rings);
+
return err;
}
@@ -876,7 +947,7 @@ static int qlcnic_irq_test(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_cmd_args cmd;
- int ret, max_sds_rings = adapter->max_sds_rings;
+ int ret, drv_sds_rings = adapter->drv_sds_rings;
if (qlcnic_83xx_check(adapter))
return qlcnic_83xx_interrupt_test(netdev);
@@ -905,10 +976,10 @@ done:
qlcnic_free_mbx_args(&cmd);
free_diag_res:
- qlcnic_diag_free_res(netdev, max_sds_rings);
+ qlcnic_diag_free_res(netdev, drv_sds_rings);
clear_diag_irq:
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
@@ -984,8 +1055,8 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- int max_drv_tx_rings = adapter->max_drv_tx_rings;
- int max_sds_rings = adapter->max_sds_rings;
+ int drv_tx_rings = adapter->drv_tx_rings;
+ int drv_sds_rings = adapter->drv_sds_rings;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_hardware_context *ahw = adapter->ahw;
int loop = 0;
@@ -1040,11 +1111,11 @@ int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
qlcnic_clear_lb_mode(adapter, mode);
free_res:
- qlcnic_diag_free_res(netdev, max_sds_rings);
+ qlcnic_diag_free_res(netdev, drv_sds_rings);
clear_it:
- adapter->max_sds_rings = max_sds_rings;
- adapter->max_drv_tx_rings = max_drv_tx_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
}
@@ -1097,11 +1168,11 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
- num_stats = ARRAY_SIZE(qlcnic_tx_ring_stats_strings);
- for (i = 0; i < adapter->max_drv_tx_rings; i++) {
+ num_stats = ARRAY_SIZE(qlcnic_tx_queue_stats_strings);
+ for (i = 0; i < adapter->drv_tx_rings; i++) {
for (index = 0; index < num_stats; index++) {
- sprintf(data, "tx_ring_%d %s", i,
- qlcnic_tx_ring_stats_strings[index]);
+ sprintf(data, "tx_queue_%d %s", i,
+ qlcnic_tx_queue_stats_strings[index]);
data += ETH_GSTRING_LEN;
}
}
@@ -1199,6 +1270,36 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
return data;
}
+static void qlcnic_update_stats(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ adapter->stats.xmit_on += tx_ring->tx_stats.xmit_on;
+ adapter->stats.xmit_off += tx_ring->tx_stats.xmit_off;
+ adapter->stats.xmitcalled += tx_ring->tx_stats.xmit_called;
+ adapter->stats.xmitfinished += tx_ring->tx_stats.xmit_finished;
+ adapter->stats.txbytes += tx_ring->tx_stats.tx_bytes;
+ }
+}
+
+static u64 *qlcnic_fill_tx_queue_stats(u64 *data, void *stats)
+{
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ tx_ring = (struct qlcnic_host_tx_ring *)stats;
+
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_on);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_off);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_called);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_finished);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.tx_bytes);
+
+ return data;
+}
+
static void qlcnic_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -1206,19 +1307,20 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_esw_statistics port_stats;
struct qlcnic_mac_statistics mac_stats;
- int index, ret, length, size, ring;
+ int index, ret, length, size, tx_size, ring;
char *p;
- memset(data, 0, adapter->max_drv_tx_rings * 4 * sizeof(u64));
- for (ring = 0, index = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_size = adapter->drv_tx_rings * QLCNIC_TX_STATS_LEN;
+
+ memset(data, 0, tx_size * sizeof(u64));
+ for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) {
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
tx_ring = &adapter->tx_ring[ring];
- *data++ = tx_ring->xmit_on;
- *data++ = tx_ring->xmit_off;
- *data++ = tx_ring->xmit_called;
- *data++ = tx_ring->xmit_finished;
+ data = qlcnic_fill_tx_queue_stats(data, tx_ring);
+ qlcnic_update_stats(adapter);
}
}
+
memset(data, 0, stats->n_stats * sizeof(u64));
length = QLCNIC_STATS_LEN;
for (index = 0; index < length; index++) {
@@ -1260,7 +1362,7 @@ static int qlcnic_set_led(struct net_device *dev,
enum ethtool_phys_id_state state)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
- int max_sds_rings = adapter->max_sds_rings;
+ int drv_sds_rings = adapter->drv_sds_rings;
int err = -EIO, active = 1;
if (qlcnic_83xx_check(adapter))
@@ -1318,7 +1420,7 @@ static int qlcnic_set_led(struct net_device *dev,
}
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
- qlcnic_diag_free_res(dev, max_sds_rings);
+ qlcnic_diag_free_res(dev, drv_sds_rings);
if (!active || err)
clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
@@ -1659,7 +1761,6 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
bool valid_mask = false;
int i, ret = 0;
- u32 state;
switch (val->flag) {
case QLCNIC_FORCE_FW_DUMP_KEY:
@@ -1712,9 +1813,8 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
case QLCNIC_SET_QUIESCENT:
case QLCNIC_RESET_QUIESCENT:
- state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
- netdev_info(netdev, "Device in FAILED state\n");
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
+ netdev_info(netdev, "Device is in non-operational state\n");
break;
default:
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index f8adc7b01f1f..6f7f60c09f07 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -445,7 +445,7 @@ int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
mac_req = (struct qlcnic_mac_req *)&req.words[0];
mac_req->op = op;
- memcpy(mac_req->mac_addr, addr, 6);
+ memcpy(mac_req->mac_addr, addr, ETH_ALEN);
vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
vlan_req->vlan_id = cpu_to_le16(vlan_id);
@@ -785,8 +785,6 @@ void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter)
#define QLCNIC_ENABLE_IPV4_LRO 1
#define QLCNIC_ENABLE_IPV6_LRO 2
-#define QLCNIC_NO_DEST_IPV4_CHECK (1 << 8)
-#define QLCNIC_NO_DEST_IPV6_CHECK (2 << 8)
int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
{
@@ -806,11 +804,10 @@ int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
word = 0;
if (enable) {
- word = QLCNIC_ENABLE_IPV4_LRO | QLCNIC_NO_DEST_IPV4_CHECK;
+ word = QLCNIC_ENABLE_IPV4_LRO;
if (adapter->ahw->extra_capability[0] &
QLCNIC_FW_CAP2_HW_LRO_IPV6)
- word |= QLCNIC_ENABLE_IPV6_LRO |
- QLCNIC_NO_DEST_IPV6_CHECK;
+ word |= QLCNIC_ENABLE_IPV6_LRO;
}
req.words[0] = cpu_to_le64(word);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 272c356cf9b2..13303e7d1ed7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -146,6 +146,12 @@ struct qlcnic_mailbox_metadata {
#define QLCNIC_MBX_PORT_RSP_OK 0x1a
#define QLCNIC_MBX_ASYNC_EVENT BIT_15
+/* Set HW Tx ring limit for 82xx adapter. */
+#define QLCNIC_MAX_HW_TX_RINGS 8
+#define QLCNIC_MAX_HW_VNIC_TX_RINGS 4
+#define QLCNIC_MAX_TX_RINGS 8
+#define QLCNIC_MAX_SDS_RINGS 8
+
struct qlcnic_pci_info;
struct qlcnic_info;
struct qlcnic_cmd_args;
@@ -176,7 +182,7 @@ int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8);
void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8, int);
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *);
irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 66c26cf7a2b8..e9c21e5d0ca9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -236,7 +236,7 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
spin_lock_init(&rds_ring->lock);
}
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
sds_ring->irq = adapter->msix_entries[ring].vector;
sds_ring->adapter = adapter;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 11b4bb83b930..0149c9495347 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -581,10 +581,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
goto drop_packet;
}
- if (qlcnic_check_multi_tx(adapter))
- tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
- else
- tx_ring = &adapter->tx_ring[0];
+ tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
num_txd = tx_ring->num_desc;
frag_count = skb_shinfo(skb)->nr_frags + 1;
@@ -607,8 +604,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
netif_tx_start_queue(tx_ring->txq);
} else {
- adapter->stats.xmit_off++;
- tx_ring->xmit_off++;
+ tx_ring->tx_stats.xmit_off++;
return NETDEV_TX_BUSY;
}
}
@@ -669,9 +665,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (adapter->drv_mac_learn)
qlcnic_send_filter(adapter, first_desc, skb);
- adapter->stats.txbytes += skb->len;
- adapter->stats.xmitcalled++;
- tx_ring->xmit_called++;
+ tx_ring->tx_stats.tx_bytes += skb->len;
+ tx_ring->tx_stats.xmit_called++;
qlcnic_update_cmd_producer(tx_ring);
@@ -789,6 +784,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
struct net_device *netdev = adapter->netdev;
struct qlcnic_skb_frag *frag;
+ if (!spin_trylock(&adapter->tx_clean_lock))
+ return 1;
+
sw_consumer = tx_ring->sw_consumer;
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
@@ -805,8 +803,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
PCI_DMA_TODEVICE);
frag->dma = 0ULL;
}
- adapter->stats.xmitfinished++;
- tx_ring->xmit_finished++;
+ tx_ring->tx_stats.xmit_finished++;
dev_kfree_skb_any(buffer->skb);
buffer->skb = NULL;
}
@@ -823,8 +820,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
netif_carrier_ok(netdev)) {
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
netif_tx_wake_queue(tx_ring->txq);
- adapter->stats.xmit_on++;
- tx_ring->xmit_on++;
+ tx_ring->tx_stats.xmit_on++;
}
}
adapter->tx_timeo_cnt = 0;
@@ -844,6 +840,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
*/
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
done = (sw_consumer == hw_consumer);
+ spin_unlock(&adapter->tx_clean_lock);
return done;
}
@@ -1011,7 +1008,7 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index,
}
break;
case QLCNIC_C2H_OPCODE_GET_DCB_AEN:
- qlcnic_dcb_handle_aen(adapter, (void *)&msg);
+ qlcnic_dcb_aen_handler(adapter->dcb, (void *)&msg);
break;
default:
break;
@@ -1463,18 +1460,18 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_tx_ring *tx_ring;
- if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
return -ENOMEM;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test &&
- (adapter->max_drv_tx_rings > 1)) {
+ (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
NAPI_POLL_WEIGHT);
} else {
- if (ring == (adapter->max_sds_rings - 1))
+ if (ring == (adapter->drv_sds_rings - 1))
netif_napi_add(netdev, &sds_ring->napi,
qlcnic_poll,
NAPI_POLL_WEIGHT);
@@ -1491,7 +1488,7 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
}
if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
NAPI_POLL_WEIGHT);
@@ -1508,7 +1505,7 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_tx_ring *tx_ring;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
netif_napi_del(&sds_ring->napi);
}
@@ -1516,7 +1513,7 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
qlcnic_free_sds_rings(adapter->recv_ctx);
if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
netif_napi_del(&tx_ring->napi);
}
@@ -1535,7 +1532,7 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
napi_enable(&sds_ring->napi);
qlcnic_enable_int(sds_ring);
@@ -1544,8 +1541,8 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
if (qlcnic_check_multi_tx(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED) &&
!adapter->ahw->diag_test &&
- (adapter->max_drv_tx_rings > 1)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
napi_enable(&tx_ring->napi);
qlcnic_enable_tx_intr(adapter, tx_ring);
@@ -1563,7 +1560,7 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
qlcnic_disable_int(sds_ring);
napi_synchronize(&sds_ring->napi);
@@ -1573,7 +1570,7 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!adapter->ahw->diag_test &&
qlcnic_check_multi_tx(adapter)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
qlcnic_disable_tx_int(adapter, tx_ring);
napi_synchronize(&tx_ring->napi);
@@ -1911,7 +1908,7 @@ void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
napi_enable(&sds_ring->napi);
if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -1920,7 +1917,7 @@ void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
napi_enable(&tx_ring->napi);
qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
@@ -1938,7 +1935,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (adapter->flags & QLCNIC_MSIX_ENABLED)
qlcnic_83xx_disable_intr(adapter, sds_ring);
@@ -1948,7 +1945,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
napi_synchronize(&tx_ring->napi);
@@ -1965,10 +1962,10 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
return -ENOMEM;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (adapter->flags & QLCNIC_MSIX_ENABLED) {
if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
@@ -1994,7 +1991,7 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
netif_napi_add(netdev, &tx_ring->napi,
qlcnic_83xx_msix_tx_poll,
@@ -2012,7 +2009,7 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_tx_ring *tx_ring;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
netif_napi_del(&sds_ring->napi);
}
@@ -2021,7 +2018,7 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
netif_napi_del(&tx_ring->napi);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 21d00a0449a1..05c1eef8df13 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -548,36 +548,75 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
.io_resume = qlcnic_82xx_io_resume,
};
-static void qlcnic_get_multiq_capability(struct qlcnic_adapter *adapter)
+static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
- int num_tx_q;
- if (ahw->msix_supported &&
+ if (qlcnic_82xx_check(adapter) &&
(ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_MULTI_TX)) {
- num_tx_q = min_t(int, QLCNIC_DEF_NUM_TX_RINGS,
- num_online_cpus());
- if (num_tx_q > 1) {
- test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE,
- &adapter->state);
- adapter->max_drv_tx_rings = num_tx_q;
- }
+ test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+ return 0;
} else {
- adapter->max_drv_tx_rings = 1;
+ return 1;
}
}
+static int qlcnic_max_rings(struct qlcnic_adapter *adapter, u8 ring_cnt,
+ int queue_type)
+{
+ int num_rings, max_rings = QLCNIC_MAX_SDS_RINGS;
+
+ if (queue_type == QLCNIC_RX_QUEUE)
+ max_rings = adapter->max_sds_rings;
+ else if (queue_type == QLCNIC_TX_QUEUE)
+ max_rings = adapter->max_tx_rings;
+
+ num_rings = rounddown_pow_of_two(min_t(int, num_online_cpus(),
+ max_rings));
+
+ if (ring_cnt > num_rings)
+ return num_rings;
+ else
+ return ring_cnt;
+}
+
+void qlcnic_set_tx_ring_count(struct qlcnic_adapter *adapter, u8 tx_cnt)
+{
+ /* 83xx adapter does not have max_tx_rings intialized in probe */
+ if (adapter->max_tx_rings)
+ adapter->drv_tx_rings = qlcnic_max_rings(adapter, tx_cnt,
+ QLCNIC_TX_QUEUE);
+ else
+ adapter->drv_tx_rings = tx_cnt;
+
+ dev_info(&adapter->pdev->dev, "Set %d Tx rings\n",
+ adapter->drv_tx_rings);
+}
+
+void qlcnic_set_sds_ring_count(struct qlcnic_adapter *adapter, u8 rx_cnt)
+{
+ /* 83xx adapter does not have max_sds_rings intialized in probe */
+ if (adapter->max_sds_rings)
+ adapter->drv_sds_rings = qlcnic_max_rings(adapter, rx_cnt,
+ QLCNIC_RX_QUEUE);
+ else
+ adapter->drv_sds_rings = rx_cnt;
+
+ dev_info(&adapter->pdev->dev, "Set %d SDS rings\n",
+ adapter->drv_sds_rings);
+}
+
int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
{
struct pci_dev *pdev = adapter->pdev;
- int max_tx_rings, max_sds_rings, tx_vector;
+ int drv_tx_rings, drv_sds_rings, tx_vector;
int err = -1, i;
if (adapter->flags & QLCNIC_TX_INTR_SHARED) {
- max_tx_rings = 0;
+ drv_tx_rings = 0;
tx_vector = 0;
} else {
- max_tx_rings = adapter->max_drv_tx_rings;
+ drv_tx_rings = adapter->drv_tx_rings;
tx_vector = 1;
}
@@ -589,7 +628,7 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
return -ENOMEM;
}
- adapter->max_sds_rings = 1;
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
if (adapter->ahw->msix_supported) {
@@ -602,18 +641,18 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
if (qlcnic_83xx_check(adapter)) {
adapter->ahw->num_msix = num_msix;
/* subtract mail box and tx ring vectors */
- adapter->max_sds_rings = num_msix -
- max_tx_rings - 1;
+ adapter->drv_sds_rings = num_msix -
+ drv_tx_rings - 1;
} else {
adapter->ahw->num_msix = num_msix;
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test &&
- (adapter->max_drv_tx_rings > 1))
- max_sds_rings = num_msix - max_tx_rings;
+ (adapter->drv_tx_rings > 1))
+ drv_sds_rings = num_msix - drv_tx_rings;
else
- max_sds_rings = num_msix;
+ drv_sds_rings = num_msix;
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
}
dev_info(&pdev->dev, "using msi-x interrupts\n");
return err;
@@ -624,13 +663,13 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
if (qlcnic_83xx_check(adapter)) {
if (err < (QLC_83XX_MINIMUM_VECTOR - tx_vector))
return err;
- err -= (max_tx_rings + 1);
+ err -= drv_tx_rings + 1;
num_msix = rounddown_pow_of_two(err);
- num_msix += (max_tx_rings + 1);
+ num_msix += drv_tx_rings + 1;
} else {
num_msix = rounddown_pow_of_two(err);
if (qlcnic_check_multi_tx(adapter))
- num_msix += max_tx_rings;
+ num_msix += drv_tx_rings;
}
if (num_msix) {
@@ -683,25 +722,14 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter)
{
- struct qlcnic_hardware_context *ahw = adapter->ahw;
int num_msix, err = 0;
- if (!num_intr)
- num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
+ num_msix = adapter->drv_sds_rings;
- if (ahw->msix_supported) {
- num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
- num_intr));
- if (qlcnic_check_multi_tx(adapter)) {
- if (txq)
- adapter->max_drv_tx_rings = txq;
- num_msix += adapter->max_drv_tx_rings;
- }
- } else {
- num_msix = 1;
- }
+ if (qlcnic_check_multi_tx(adapter))
+ num_msix += adapter->drv_tx_rings;
err = qlcnic_enable_msix(adapter, num_msix);
if (err == -ENOMEM)
@@ -819,7 +847,7 @@ static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter)
int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
{
struct qlcnic_pci_info *pci_info;
- int i, ret = 0, j = 0;
+ int i, id = 0, ret = 0, j = 0;
u16 act_pci_func;
u8 pfn;
@@ -860,7 +888,8 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
continue;
if (qlcnic_port_eswitch_cfg_capability(adapter)) {
- if (!qlcnic_83xx_enable_port_eswitch(adapter, pfn))
+ if (!qlcnic_83xx_set_port_eswitch_status(adapter, pfn,
+ &id))
adapter->npars[j].eswitch_status = true;
else
continue;
@@ -875,15 +904,16 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
adapter->npars[j].min_bw = pci_info[i].tx_min_bw;
adapter->npars[j].max_bw = pci_info[i].tx_max_bw;
+ memcpy(&adapter->npars[j].mac, &pci_info[i].mac, ETH_ALEN);
j++;
}
- if (qlcnic_82xx_check(adapter)) {
+ /* Update eSwitch status for adapters without per port eSwitch
+ * configuration capability
+ */
+ if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
- } else if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
- for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
- qlcnic_enable_eswitch(adapter, i, 1);
}
kfree(pci_info);
@@ -1131,18 +1161,25 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
if (err == -EIO)
return err;
adapter->ahw->extra_capability[0] = temp;
+ } else {
+ adapter->ahw->extra_capability[0] = 0;
}
+
adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
adapter->ahw->max_mtu = nic_info.max_mtu;
- /* Disable NPAR for 83XX */
- if (qlcnic_83xx_check(adapter))
- return err;
-
- if (adapter->ahw->capabilities & BIT_6)
+ if (adapter->ahw->capabilities & BIT_6) {
adapter->flags |= QLCNIC_ESWITCH_ENABLED;
- else
+ adapter->ahw->nic_mode = QLCNIC_VNIC_MODE;
+ adapter->max_tx_rings = QLCNIC_MAX_HW_VNIC_TX_RINGS;
+ adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
+
+ dev_info(&adapter->pdev->dev, "vNIC mode enabled.\n");
+ } else {
+ adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
+ adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS;
adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+ }
return err;
}
@@ -1290,6 +1327,8 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
"HAL Version: %d, Privileged function\n",
adapter->ahw->fw_hal_version);
}
+ } else {
+ adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
}
adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
@@ -1549,7 +1588,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
if (qlcnic_82xx_check(adapter) ||
(qlcnic_83xx_check(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED))) {
- num_sds_rings = adapter->max_sds_rings;
+ num_sds_rings = adapter->drv_sds_rings;
for (ring = 0; ring < num_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (qlcnic_82xx_check(adapter) &&
@@ -1583,7 +1622,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
(adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED))) {
handler = qlcnic_msix_tx_intr;
- for (ring = 0; ring < adapter->max_drv_tx_rings;
+ for (ring = 0; ring < adapter->drv_tx_rings;
ring++) {
tx_ring = &adapter->tx_ring[ring];
snprintf(tx_ring->name, sizeof(tx_ring->name),
@@ -1611,7 +1650,7 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
if (qlcnic_82xx_check(adapter) ||
(qlcnic_83xx_check(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED))) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
free_irq(sds_ring->irq, sds_ring);
}
@@ -1620,7 +1659,7 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
(qlcnic_82xx_check(adapter) &&
qlcnic_check_multi_tx(adapter))) {
- for (ring = 0; ring < adapter->max_drv_tx_rings;
+ for (ring = 0; ring < adapter->drv_tx_rings;
ring++) {
tx_ring = &adapter->tx_ring[ring];
if (tx_ring->irq)
@@ -1674,7 +1713,7 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
adapter->ahw->linkup = 0;
- if (adapter->max_sds_rings > 1)
+ if (adapter->drv_sds_rings > 1)
qlcnic_config_rss(adapter, 1);
qlcnic_config_intr_coalesce(adapter);
@@ -1716,6 +1755,7 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (qlcnic_sriov_vf_check(adapter))
qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
smp_mb();
+ spin_lock(&adapter->tx_clean_lock);
netif_carrier_off(netdev);
adapter->ahw->linkup = 0;
netif_tx_disable(netdev);
@@ -1734,8 +1774,9 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_reset_rx_buffers_list(adapter);
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++)
qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
+ spin_unlock(&adapter->tx_clean_lock);
}
/* Usage: During suspend and firmware recovery module */
@@ -1811,16 +1852,16 @@ void qlcnic_detach(struct qlcnic_adapter *adapter)
adapter->is_up = 0;
}
-void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
+void qlcnic_diag_free_res(struct net_device *netdev, int drv_sds_rings)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
- int max_tx_rings = adapter->max_drv_tx_rings;
+ int drv_tx_rings = adapter->drv_tx_rings;
int ring;
clear_bit(__QLCNIC_DEV_UP, &adapter->state);
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_disable_int(sds_ring);
}
@@ -1831,8 +1872,8 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
qlcnic_detach(adapter);
adapter->ahw->diag_test = 0;
- adapter->max_sds_rings = max_sds_rings;
- adapter->max_drv_tx_rings = max_tx_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
if (qlcnic_attach(adapter))
goto out;
@@ -1898,10 +1939,10 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
qlcnic_detach(adapter);
- adapter->max_sds_rings = 1;
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
adapter->ahw->diag_test = test;
adapter->ahw->linkup = 0;
- adapter->max_drv_tx_rings = 1;
ret = qlcnic_attach(adapter);
if (ret) {
@@ -1922,7 +1963,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
}
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_enable_int(sds_ring);
}
@@ -2069,7 +2110,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
return err;
}
- qlcnic_dcb_init_dcbnl_ops(adapter);
+ qlcnic_dcb_init_dcbnl_ops(adapter->dcb);
return 0;
}
@@ -2095,7 +2136,7 @@ void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter)
int ring;
struct qlcnic_host_tx_ring *tx_ring;
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
if (tx_ring && tx_ring->cmd_buf_arr != NULL) {
vfree(tx_ring->cmd_buf_arr);
@@ -2113,14 +2154,14 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_cmd_buffer *cmd_buf_arr;
- tx_ring = kcalloc(adapter->max_drv_tx_rings,
+ tx_ring = kcalloc(adapter->drv_tx_rings,
sizeof(struct qlcnic_host_tx_ring), GFP_KERNEL);
if (tx_ring == NULL)
return -ENOMEM;
adapter->tx_ring = tx_ring;
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
tx_ring->num_desc = adapter->num_txd;
tx_ring->txq = netdev_get_tx_queue(netdev, ring);
@@ -2135,11 +2176,11 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
if (qlcnic_83xx_check(adapter) ||
(qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
tx_ring->adapter = adapter;
if (adapter->flags & QLCNIC_MSIX_ENABLED) {
- index = adapter->max_sds_rings + ring;
+ index = adapter->drv_sds_rings + ring;
vector = adapter->msix_entries[index].vector;
tx_ring->irq = vector;
}
@@ -2159,22 +2200,10 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
else if (qlcnic_83xx_check(adapter))
fw_cmd = QLCNIC_CMD_83XX_SET_DRV_VER;
- if ((ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) &&
- (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_SET_DRV_VER))
+ if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_SET_DRV_VER)
qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
}
-static int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
-{
- return __qlcnic_register_dcb(adapter);
-}
-
-void qlcnic_clear_dcb_ops(struct qlcnic_adapter *adapter)
-{
- kfree(adapter->dcb);
- adapter->dcb = NULL;
-}
-
static int
qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -2183,6 +2212,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct qlcnic_hardware_context *ahw;
int err, pci_using_dac = -1;
char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
+ struct qlcnic_dcb *dcb;
if (pdev->is_virtfn)
return -ENODEV;
@@ -2257,7 +2287,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = qlcnic_alloc_adapter_resources(adapter);
if (err)
- goto err_out_free_netdev;
+ goto err_out_free_wq;
adapter->dev_rst_time = jiffies;
adapter->ahw->revision_id = pdev->revision;
@@ -2269,6 +2299,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rwlock_init(&adapter->ahw->crb_lock);
mutex_init(&adapter->ahw->mem_lock);
+ spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
qlcnic_register_dcb(adapter);
@@ -2283,38 +2314,51 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_maintenance_mode;
}
- qlcnic_get_multiq_capability(adapter);
-
- if ((adapter->ahw->act_pci_func > 2) &&
- qlcnic_check_multi_tx(adapter)) {
- adapter->max_drv_tx_rings = QLCNIC_DEF_NUM_TX_RINGS;
- dev_info(&adapter->pdev->dev,
- "vNIC mode enabled, Set max TX rings = %d\n",
- adapter->max_drv_tx_rings);
+ /* compute and set default and max tx/sds rings */
+ if (adapter->ahw->msix_supported) {
+ if (qlcnic_check_multi_tx_capability(adapter) == 1)
+ qlcnic_set_tx_ring_count(adapter,
+ QLCNIC_SINGLE_RING);
+ else
+ qlcnic_set_tx_ring_count(adapter,
+ QLCNIC_DEF_TX_RINGS);
+ qlcnic_set_sds_ring_count(adapter,
+ QLCNIC_DEF_SDS_RINGS);
+ } else {
+ qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
+ qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
}
- if (!qlcnic_check_multi_tx(adapter)) {
- clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
- adapter->max_drv_tx_rings = 1;
- }
err = qlcnic_setup_idc_param(adapter);
if (err)
goto err_out_free_hw;
adapter->flags |= QLCNIC_NEED_FLR;
- if (adapter->dcb && qlcnic_dcb_attach(adapter))
- qlcnic_clear_dcb_ops(adapter);
+ dcb = adapter->dcb;
+ if (dcb && qlcnic_dcb_attach(dcb))
+ qlcnic_clear_dcb_ops(dcb);
} else if (qlcnic_83xx_check(adapter)) {
- adapter->max_drv_tx_rings = 1;
qlcnic_83xx_check_vf(adapter, ent);
adapter->portnum = adapter->ahw->pci_func;
err = qlcnic_83xx_init(adapter, pci_using_dac);
if (err) {
- dev_err(&pdev->dev, "%s: failed\n", __func__);
- goto err_out_free_hw;
+ switch (err) {
+ case -ENOTRECOVERABLE:
+ dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware. Please reboot\n");
+ dev_err(&pdev->dev, "If reboot doesn't help, please replace the adapter with new one and return the faulty adapter for repair\n");
+ goto err_out_free_hw;
+ case -ENOMEM:
+ dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
+ goto err_out_free_hw;
+ default:
+ dev_err(&pdev->dev, "Adapter initialization failed. A reboot may be required to recover from this failure\n");
+ dev_err(&pdev->dev, "If reboot does not help to recover from this failure, try a flash update of the adapter\n");
+ goto err_out_maintenance_mode;
+ }
}
+
if (qlcnic_sriov_vf_check(adapter))
return 0;
} else {
@@ -2342,7 +2386,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"Device does not support MSI interrupts\n");
if (qlcnic_82xx_check(adapter)) {
- err = qlcnic_setup_intr(adapter, 0, 0);
+ err = qlcnic_setup_intr(adapter);
if (err) {
dev_err(&pdev->dev, "Failed to setup interrupt\n");
goto err_out_disable_msi;
@@ -2396,6 +2440,9 @@ err_out_disable_msi:
err_out_free_hw:
qlcnic_free_adapter_resources(adapter);
+err_out_free_wq:
+ destroy_workqueue(adapter->qlcnic_wq);
+
err_out_free_netdev:
free_netdev(netdev);
@@ -2409,13 +2456,20 @@ err_out_free_res:
pci_release_regions(pdev);
err_out_disable_pdev:
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return err;
err_out_maintenance_mode:
+ set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state);
netdev->netdev_ops = &qlcnic_netdev_failed_ops;
SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
+ ahw->port_type = QLCNIC_XGBE;
+
+ if (qlcnic_83xx_check(adapter))
+ adapter->tgt_status_reg = NULL;
+ else
+ ahw->board_type = QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS;
+
err = register_netdev(netdev);
if (err) {
@@ -2446,7 +2500,7 @@ static void qlcnic_remove(struct pci_dev *pdev)
qlcnic_cancel_idc_work(adapter);
ahw = adapter->ahw;
- qlcnic_dcb_free(adapter);
+ qlcnic_dcb_free(adapter->dcb);
unregister_netdev(netdev);
qlcnic_sriov_cleanup(adapter);
@@ -2485,7 +2539,6 @@ static void qlcnic_remove(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
if (adapter->qlcnic_wq) {
destroy_workqueue(adapter->qlcnic_wq);
@@ -2538,12 +2591,11 @@ static int qlcnic_resume(struct pci_dev *pdev)
static int qlcnic_open(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- u32 state;
int err;
- state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
- netdev_err(netdev, "%s: Device is in FAILED state\n", __func__);
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+ netdev_err(netdev, "%s: Device is in non-operational state\n",
+ __func__);
return -EIO;
}
@@ -2705,24 +2757,21 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
QLCNIC_FORCE_FW_DUMP_KEY);
} else {
netdev_info(netdev, "Tx timeout, reset adapter context.\n");
- if (qlcnic_82xx_check(adapter)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings;
- ring++) {
- tx_ring = &adapter->tx_ring[ring];
- dev_info(&netdev->dev, "ring=%d\n", ring);
- dev_info(&netdev->dev, "crb_intr_mask=%d\n",
- readl(tx_ring->crb_intr_mask));
- dev_info(&netdev->dev, "producer=%d\n",
- readl(tx_ring->crb_cmd_producer));
- dev_info(&netdev->dev, "sw_consumer = %d\n",
- tx_ring->sw_consumer);
- dev_info(&netdev->dev, "hw_consumer = %d\n",
- le32_to_cpu(*(tx_ring->hw_consumer)));
- dev_info(&netdev->dev, "xmit-on=%llu\n",
- tx_ring->xmit_on);
- dev_info(&netdev->dev, "xmit-off=%llu\n",
- tx_ring->xmit_off);
- }
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netdev_info(netdev, "Tx ring=%d\n", ring);
+ netdev_info(netdev,
+ "crb_intr_mask=%d, producer=%d, sw_consumer=%d, hw_consumer=%d\n",
+ readl(tx_ring->crb_intr_mask),
+ readl(tx_ring->crb_cmd_producer),
+ tx_ring->sw_consumer,
+ le32_to_cpu(*(tx_ring->hw_consumer)));
+ netdev_info(netdev,
+ "xmit_finished=%llu, xmit_called=%llu, xmit_on=%llu, xmit_off=%llu\n",
+ tx_ring->tx_stats.xmit_finished,
+ tx_ring->tx_stats.xmit_called,
+ tx_ring->tx_stats.xmit_on,
+ tx_ring->tx_stats.xmit_off);
}
adapter->ahw->reset_context = 1;
}
@@ -2836,7 +2885,7 @@ static void qlcnic_poll_controller(struct net_device *netdev)
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
disable_irq(adapter->irq);
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
qlcnic_intr(adapter->irq, sds_ring);
}
@@ -3256,8 +3305,9 @@ void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key)
return;
state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
- netdev_err(adapter->netdev, "%s: Device is in FAILED state\n",
+
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+ netdev_err(adapter->netdev, "%s: Device is in non-operational state\n",
__func__);
qlcnic_api_unlock(adapter);
@@ -3324,7 +3374,7 @@ qlcnic_attach_work(struct work_struct *work)
return;
}
attach:
- qlcnic_dcb_get_info(adapter);
+ qlcnic_dcb_get_info(adapter->dcb);
if (netif_running(netdev)) {
if (qlcnic_up(adapter, netdev))
@@ -3349,6 +3399,8 @@ done:
static int
qlcnic_check_health(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
u32 state = 0, heartbeat;
u32 peg_status;
int err = 0;
@@ -3373,7 +3425,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
if (adapter->need_fw_reset)
goto detach;
- if (adapter->ahw->reset_context && qlcnic_auto_fw_reset)
+ if (ahw->reset_context && qlcnic_auto_fw_reset)
qlcnic_reset_hw_context(adapter);
return 0;
@@ -3416,6 +3468,9 @@ detach:
qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
QLCDB(adapter, DRV, "fw recovery scheduled.\n");
+ } else if (!qlcnic_auto_fw_reset && fw_dump->enable &&
+ adapter->flags & QLCNIC_FW_RESET_OWNER) {
+ qlcnic_dump_fw(adapter);
}
return 1;
@@ -3497,7 +3552,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
qlcnic_clr_drv_state(adapter);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
- err = qlcnic_setup_intr(adapter, 0, 0);
+ err = qlcnic_setup_intr(adapter);
if (err) {
kfree(adapter->msix_entries);
@@ -3642,136 +3697,90 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, u32 txq)
+int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
+ int queue_type)
{
struct net_device *netdev = adapter->netdev;
- u8 max_hw = QLCNIC_MAX_TX_RINGS;
- u32 max_allowed;
+ u8 max_hw_rings = 0;
+ char buf[8];
+ int cur_rings;
- if (!qlcnic_82xx_check(adapter)) {
- netdev_err(netdev, "No Multi TX-Q support\n");
- return -EINVAL;
+ if (queue_type == QLCNIC_RX_QUEUE) {
+ max_hw_rings = adapter->max_sds_rings;
+ cur_rings = adapter->drv_sds_rings;
+ strcpy(buf, "SDS");
+ } else if (queue_type == QLCNIC_TX_QUEUE) {
+ max_hw_rings = adapter->max_tx_rings;
+ cur_rings = adapter->drv_tx_rings;
+ strcpy(buf, "Tx");
}
if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
- netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n");
+ netdev_err(netdev, "No RSS/TSS support in INT-x mode\n");
return -EINVAL;
}
- if (!qlcnic_check_multi_tx(adapter)) {
- netdev_err(netdev, "No Multi TX-Q support\n");
+ if (adapter->flags & QLCNIC_MSI_ENABLED) {
+ netdev_err(netdev, "No RSS/TSS support in MSI mode\n");
return -EINVAL;
}
- if (txq > QLCNIC_MAX_TX_RINGS) {
- netdev_err(netdev, "Invalid ring count\n");
+ if (ring_cnt < 2) {
+ netdev_err(netdev,
+ "%s rings value should not be lower than 2\n", buf);
return -EINVAL;
}
- max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
- num_online_cpus()));
- if ((txq > max_allowed) || !is_power_of_2(txq)) {
- if (!is_power_of_2(txq))
- netdev_err(netdev,
- "TX queue should be a power of 2\n");
- if (txq > num_online_cpus())
- netdev_err(netdev,
- "Tx queue should not be higher than [%u], number of online CPUs in the system\n",
- num_online_cpus());
- netdev_err(netdev, "Unable to configure %u Tx rings\n", txq);
+ if (!is_power_of_2(ring_cnt)) {
+ netdev_err(netdev, "%s rings value should be a power of 2\n",
+ buf);
return -EINVAL;
}
- return 0;
-}
-
-int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
- __u32 val)
-{
- struct net_device *netdev = adapter->netdev;
- u8 max_hw = adapter->ahw->max_rx_ques;
- u32 max_allowed;
-
- if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
- !qlcnic_use_msi) {
- netdev_err(netdev, "No RSS support in INT-x mode\n");
- return -EINVAL;
+ if (qlcnic_82xx_check(adapter) && (queue_type == QLCNIC_TX_QUEUE) &&
+ !qlcnic_check_multi_tx(adapter)) {
+ netdev_err(netdev, "No Multi Tx queue support\n");
+ return -EINVAL;
}
- if (val > QLCNIC_MAX_SDS_RINGS) {
- netdev_err(netdev, "RSS value should not be higher than %u\n",
- QLCNIC_MAX_SDS_RINGS);
+ if (ring_cnt > num_online_cpus()) {
+ netdev_err(netdev,
+ "%s value[%u] should not be higher than, number of online CPUs\n",
+ buf, num_online_cpus());
return -EINVAL;
}
- max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
- num_online_cpus()));
- if ((val > max_allowed) || (val < 2) || !is_power_of_2(val)) {
- if (!is_power_of_2(val))
- netdev_err(netdev, "RSS value should be a power of 2\n");
-
- if (val < 2)
- netdev_err(netdev, "RSS value should not be lower than 2\n");
-
- if (val > max_hw)
- netdev_err(netdev,
- "RSS value should not be higher than[%u], the max RSS rings supported by the adapter\n",
- max_hw);
-
- if (val > num_online_cpus())
- netdev_err(netdev,
- "RSS value should not be higher than[%u], number of online CPUs in the system\n",
- num_online_cpus());
-
- netdev_err(netdev, "Unable to configure %u RSS rings\n", val);
-
- return -EINVAL;
- }
return 0;
}
-int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, int txq)
+int qlcnic_setup_rings(struct qlcnic_adapter *adapter, u8 rx_cnt, u8 tx_cnt)
{
- int err;
struct net_device *netdev = adapter->netdev;
- int num_msix;
+ int err;
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return -EBUSY;
- if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
- !qlcnic_use_msi) {
- netdev_err(netdev, "No RSS support in INT-x mode\n");
- return -EINVAL;
- }
-
netif_device_detach(netdev);
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
qlcnic_detach(adapter);
- if (qlcnic_82xx_check(adapter)) {
- if (txq != 0)
- adapter->max_drv_tx_rings = txq;
-
- if (qlcnic_check_multi_tx(adapter) &&
- (txq > adapter->max_drv_tx_rings))
- num_msix = adapter->max_drv_tx_rings;
- else
- num_msix = data;
- }
-
if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_free_mbx_intr(adapter);
qlcnic_83xx_enable_mbx_poll(adapter);
}
- netif_set_real_num_tx_queues(netdev, adapter->max_drv_tx_rings);
-
qlcnic_teardown_intr(adapter);
- err = qlcnic_setup_intr(adapter, data, txq);
+ /* compute and set default and max tx/sds rings */
+ qlcnic_set_tx_ring_count(adapter, tx_cnt);
+ qlcnic_set_sds_ring_count(adapter, rx_cnt);
+
+ netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
+
+ err = qlcnic_setup_intr(adapter);
if (err) {
kfree(adapter->msix_entries);
netdev_err(netdev, "failed to setup interrupt\n");
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 15513608d480..7763962e2ec4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1187,41 +1187,38 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
}
if (ops_index == ops_cnt) {
- dev_info(&adapter->pdev->dev,
- "Invalid entry type %d, exiting dump\n",
+ dev_info(dev, "Skipping unknown entry opcode %d\n",
entry->hdr.type);
- goto error;
+ entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ entry_offset += entry->hdr.offset;
+ continue;
}
/* Collect dump for this entry */
dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
- if (!qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, dump))
+ if (!qlcnic_valid_dump_entry(dev, entry, dump)) {
entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ entry_offset += entry->hdr.offset;
+ continue;
+ }
+
buf_offset += entry->hdr.cap_size;
entry_offset += entry->hdr.offset;
buffer = fw_dump->data + buf_offset;
}
- if (dump_size != buf_offset) {
- dev_info(&adapter->pdev->dev,
- "Captured(%d) and expected size(%d) do not match\n",
- buf_offset, dump_size);
- goto error;
- } else {
- fw_dump->clr = 1;
- snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
- adapter->netdev->name);
- dev_info(&adapter->pdev->dev, "%s: Dump data, %d bytes captured\n",
- adapter->netdev->name, fw_dump->size);
- /* Send a udev event to notify availability of FW dump */
- kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
- return 0;
- }
-error:
+
+ fw_dump->clr = 1;
+ snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
+ dev_info(dev, "%s: Dump data %d bytes captured, template header size %d bytes\n",
+ adapter->netdev->name, fw_dump->size, tmpl_hdr->size);
+ /* Send a udev event to notify availability of FW dump */
+ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
+
if (fw_dump->use_pex_dma)
dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE,
fw_dump->dma_buffer, fw_dump->phys_addr);
- vfree(fw_dump->data);
- return -EINVAL;
+
+ return 0;
}
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 392b9bd12b4f..21a4b274d2e4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -500,6 +500,7 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
int pci_using_dac)
{
+ struct qlcnic_dcb *dcb;
int err;
INIT_LIST_HEAD(&adapter->vf_mc_list);
@@ -507,7 +508,11 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
dev_warn(&adapter->pdev->dev,
"Device does not support MSI interrupts\n");
- err = qlcnic_setup_intr(adapter, 1, 0);
+ /* compute and set default and max tx/sds rings */
+ qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
+ qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
+
+ err = qlcnic_setup_intr(adapter);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
goto err_out_disable_msi;
@@ -533,8 +538,10 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
if (err)
goto err_out_send_channel_term;
- if (adapter->dcb && qlcnic_dcb_attach(adapter))
- qlcnic_clear_dcb_ops(adapter);
+ dcb = adapter->dcb;
+
+ if (dcb && qlcnic_dcb_attach(dcb))
+ qlcnic_clear_dcb_ops(dcb);
err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
if (err)
@@ -1577,7 +1584,7 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
if (err)
goto err_out_term_channel;
- qlcnic_dcb_get_info(adapter);
+ qlcnic_dcb_get_info(adapter->dcb);
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 019f4377307f..1a9f8a400e50 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -156,7 +156,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
const char *buf, size_t len)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
- int err, max_sds_rings = adapter->max_sds_rings;
+ int err, drv_sds_rings = adapter->drv_sds_rings;
u16 beacon;
u8 h_beacon_state, b_state, b_rate;
@@ -211,7 +211,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
}
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
- qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
+ qlcnic_diag_free_res(adapter->netdev, drv_sds_rings);
out:
if (!ahw->beacon_state)
@@ -1272,7 +1272,6 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
- u32 state;
if (device_create_bin_file(dev, &bin_attr_port_stats))
dev_info(dev, "failed to create port stats sysfs entry");
@@ -1286,8 +1285,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
if (device_create_bin_file(dev, &bin_attr_mem))
dev_info(dev, "failed to create mem sysfs entry\n");
- state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
return;
if (device_create_bin_file(dev, &bin_attr_pci_config))
@@ -1313,7 +1311,6 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
- u32 state;
device_remove_bin_file(dev, &bin_attr_port_stats);
@@ -1323,8 +1320,7 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
device_remove_bin_file(dev, &bin_attr_crb);
device_remove_bin_file(dev, &bin_attr_mem);
- state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
return;
device_remove_bin_file(dev, &bin_attr_pci_config);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 899433778466..0c9c4e895595 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "v1.00.00.32"
+#define DRV_VERSION "1.00.00.33"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
@@ -2206,14 +2206,14 @@ extern char qlge_driver_name[];
extern const char qlge_driver_version[];
extern const struct ethtool_ops qlge_ethtool_ops;
-extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
-extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
-extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
-extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
- u32 *value);
-extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
-extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
- u16 q_id);
+int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
+void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
+int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
+ u32 *value);
+int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
+int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
+ u16 q_id);
void ql_queue_fw_error(struct ql_adapter *qdev);
void ql_mpi_work(struct work_struct *work);
void ql_mpi_reset_work(struct work_struct *work);
@@ -2233,10 +2233,9 @@ int ql_unpause_mpi_risc(struct ql_adapter *qdev);
int ql_pause_mpi_risc(struct ql_adapter *qdev);
int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
-int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
- u32 ram_addr, int word_count);
-int ql_core_dump(struct ql_adapter *qdev,
- struct ql_mpi_coredump *mpi_coredump);
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr,
+ int word_count);
+int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump);
int ql_mb_about_fw(struct ql_adapter *qdev);
int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
@@ -2249,8 +2248,7 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev);
int ql_mb_set_port_cfg(struct ql_adapter *qdev);
int ql_wait_fifo_empty(struct ql_adapter *qdev);
void ql_get_dump(struct ql_adapter *qdev, void *buff);
-void ql_gen_reg_dump(struct ql_adapter *qdev,
- struct ql_reg_dump *mpi_coredump);
+void ql_gen_reg_dump(struct ql_adapter *qdev, struct ql_reg_dump *mpi_coredump);
netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
int ql_own_firmware(struct ql_adapter *qdev);
@@ -2264,9 +2262,9 @@ int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
/* #define QL_OB_DUMP */
#ifdef QL_REG_DUMP
-extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
-extern void ql_dump_routing_entries(struct ql_adapter *qdev);
-extern void ql_dump_regs(struct ql_adapter *qdev);
+void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
+void ql_dump_routing_entries(struct ql_adapter *qdev);
+void ql_dump_regs(struct ql_adapter *qdev);
#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
@@ -2277,26 +2275,26 @@ extern void ql_dump_regs(struct ql_adapter *qdev);
#endif
#ifdef QL_STAT_DUMP
-extern void ql_dump_stat(struct ql_adapter *qdev);
+void ql_dump_stat(struct ql_adapter *qdev);
#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
#else
#define QL_DUMP_STAT(qdev)
#endif
#ifdef QL_DEV_DUMP
-extern void ql_dump_qdev(struct ql_adapter *qdev);
+void ql_dump_qdev(struct ql_adapter *qdev);
#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
#else
#define QL_DUMP_QDEV(qdev)
#endif
#ifdef QL_CB_DUMP
-extern void ql_dump_wqicb(struct wqicb *wqicb);
-extern void ql_dump_tx_ring(struct tx_ring *tx_ring);
-extern void ql_dump_ricb(struct ricb *ricb);
-extern void ql_dump_cqicb(struct cqicb *cqicb);
-extern void ql_dump_rx_ring(struct rx_ring *rx_ring);
-extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
+void ql_dump_wqicb(struct wqicb *wqicb);
+void ql_dump_tx_ring(struct tx_ring *tx_ring);
+void ql_dump_ricb(struct ricb *ricb);
+void ql_dump_cqicb(struct cqicb *cqicb);
+void ql_dump_rx_ring(struct rx_ring *rx_ring);
+void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
@@ -2314,9 +2312,9 @@ extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
#endif
#ifdef QL_OB_DUMP
-extern void ql_dump_tx_desc(struct tx_buf_desc *tbd);
-extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
-extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
+void ql_dump_tx_desc(struct tx_buf_desc *tbd);
+void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
+void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
#else
@@ -2325,14 +2323,14 @@ extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
#endif
#ifdef QL_IB_DUMP
-extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
+void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
#else
#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
#endif
#ifdef QL_ALL_DUMP
-extern void ql_dump_all(struct ql_adapter *qdev);
+void ql_dump_all(struct ql_adapter *qdev);
#define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
#else
#define QL_DUMP_ALL(qdev)
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 2553cf4503b9..a245dc18d769 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -96,8 +96,10 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
-static int ql_wol(struct ql_adapter *qdev);
-static void qlge_set_multicast_list(struct net_device *ndev);
+static int ql_wol(struct ql_adapter *);
+static void qlge_set_multicast_list(struct net_device *);
+static int ql_adapter_down(struct ql_adapter *);
+static int ql_adapter_up(struct ql_adapter *);
/* This hardware semaphore causes exclusive access to
* resources shared between the NIC driver, MPI firmware,
@@ -1464,6 +1466,29 @@ static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
}
}
+/**
+ * ql_update_mac_hdr_len - helper routine to update the mac header length
+ * based on vlan tags if present
+ */
+static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ void *page, size_t *len)
+{
+ u16 *tags;
+
+ if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ return;
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
+ tags = (u16 *)page;
+ /* Look for stacked vlan tags in ethertype field */
+ if (tags[6] == ETH_P_8021Q &&
+ tags[8] == ETH_P_8021Q)
+ *len += 2 * VLAN_HLEN;
+ else
+ *len += VLAN_HLEN;
+ }
+}
+
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
@@ -1523,6 +1548,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
void *addr;
struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
+ size_t hlen = ETH_HLEN;
skb = netdev_alloc_skb(ndev, length);
if (!skb) {
@@ -1540,25 +1566,28 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
goto err_out;
}
+ /* Update the MAC header length*/
+ ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
+
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
- if (skb->len > ndev->mtu + ETH_HLEN) {
+ if (skb->len > ndev->mtu + hlen) {
netif_err(qdev, drv, qdev->ndev,
"Segment too small, dropping.\n");
rx_ring->rx_dropped++;
goto err_out;
}
- memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
+ memcpy(skb_put(skb, hlen), addr, hlen);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset+ETH_HLEN,
- length-ETH_HLEN);
- skb->len += length-ETH_HLEN;
- skb->data_len += length-ETH_HLEN;
- skb->truesize += length-ETH_HLEN;
+ lbq_desc->p.pg_chunk.offset + hlen,
+ length - hlen);
+ skb->len += length - hlen;
+ skb->data_len += length - hlen;
+ skb->truesize += length - hlen;
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
@@ -1576,7 +1605,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph =
- (struct iphdr *) ((u8 *)addr + ETH_HLEN);
+ (struct iphdr *)((u8 *)addr + hlen);
if (!(iph->frag_off &
htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1726,7 +1755,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
struct bq_desc *sbq_desc;
struct sk_buff *skb = NULL;
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
- u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+ u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+ size_t hlen = ETH_HLEN;
/*
* Handle the header buffer if present.
@@ -1853,9 +1883,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
skb->data_len += length;
skb->truesize += length;
length -= length;
- __pskb_pull_tail(skb,
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
- VLAN_ETH_HLEN : ETH_HLEN);
+ ql_update_mac_hdr_len(qdev, ib_mac_rsp,
+ lbq_desc->p.pg_chunk.va,
+ &hlen);
+ __pskb_pull_tail(skb, hlen);
}
} else {
/*
@@ -1910,8 +1941,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
length -= size;
i++;
}
- __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
- VLAN_ETH_HLEN : ETH_HLEN);
+ ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
+ &hlen);
+ __pskb_pull_tail(skb, hlen);
}
return skb;
}
@@ -2003,7 +2035,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
skb_record_rx_queue(skb, rx_ring->cq_id);
- if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
+ if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(&rx_ring->napi, skb);
@@ -2017,7 +2049,8 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
struct ib_mac_iocb_rsp *ib_mac_rsp)
{
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
- u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
+ u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
+ (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
((le16_to_cpu(ib_mac_rsp->vlan_id) &
IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
@@ -2310,9 +2343,39 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
}
}
+/**
+ * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
+ * based on the features to enable/disable hardware vlan accel
+ */
+static int qlge_update_hw_vlan_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int status = 0;
+
+ status = ql_adapter_down(qdev);
+ if (status) {
+ netif_err(qdev, link, qdev->ndev,
+ "Failed to bring down the adapter\n");
+ return status;
+ }
+
+ /* update the features with resent change */
+ ndev->features = features;
+
+ status = ql_adapter_up(qdev);
+ if (status) {
+ netif_err(qdev, link, qdev->ndev,
+ "Failed to bring up the adapter\n");
+ return status;
+ }
+ return status;
+}
+
static netdev_features_t qlge_fix_features(struct net_device *ndev,
netdev_features_t features)
{
+ int err;
/*
* Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx.
@@ -2322,6 +2385,11 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev,
else
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+ /* Update the behavior of vlan accel in the adapter */
+ err = qlge_update_hw_vlan_features(ndev, features);
+ if (err)
+ return err;
+
return features;
}
@@ -3704,8 +3772,12 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
ql_write32(qdev, SYS, mask | value);
/* Set the default queue, and VLAN behavior. */
- value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
- mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
+ value = NIC_RCV_CFG_DFQ;
+ mask = NIC_RCV_CFG_DFQ_MASK;
+ if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ value |= NIC_RCV_CFG_RV;
+ mask |= (NIC_RCV_CFG_RV << 16);
+ }
ql_write32(qdev, NIC_RCV_CFG, (mask | value));
/* Set the MPI interrupt to enabled. */
@@ -4505,7 +4577,6 @@ static void ql_release_all(struct pci_dev *pdev)
iounmap(qdev->doorbell_area);
vfree(qdev->mpi_coredump);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
}
static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
@@ -4692,11 +4763,15 @@ static int qlge_probe(struct pci_dev *pdev,
qdev = netdev_priv(ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
- ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_TSO | NETIF_F_TSO_ECN |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM;
- ndev->features = ndev->hw_features |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->hw_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_RXCSUM;
+ ndev->features = ndev->hw_features;
ndev->vlan_features = ndev->hw_features;
if (test_bit(QL_DMA64, &qdev->flags))
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index e9dc84943cfc..1e49ec5b2232 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1231,7 +1231,6 @@ err_out_mdio:
mdiobus_free(lp->mii_bus);
err_out_unmap:
netif_napi_del(&lp->napi);
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
@@ -1257,7 +1256,6 @@ static void r6040_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
free_netdev(dev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index d2e591955bdd..f2a2128165dd 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -2052,7 +2052,6 @@ static void cp_remove_one (struct pci_dev *pdev)
pci_release_regions(pdev);
pci_clear_mwi(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 3ccedeb8aba0..50a92104dd0a 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -727,7 +727,6 @@ static void __rtl8139_cleanup_dev (struct net_device *dev)
pci_release_regions (pdev);
free_netdev(dev);
- pci_set_drvdata (pdev, NULL);
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3397cee89777..799387570766 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6811,7 +6811,6 @@ static void rtl_remove_one(struct pci_dev *pdev)
rtl_disable_msi(pdev, tp);
rtl8169_release_board(pdev, dev, tp->mmio_addr);
- pci_set_drvdata(pdev, NULL);
}
static const struct net_device_ops rtl_netdev_ops = {
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 5cd831ebfa83..d256ce19d4de 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -483,7 +483,7 @@ static struct sh_eth_cpu_data sh7757_data = {
.register_type = SH_ETH_REG_FAST_SH4,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
- .rmcr_value = 0x00000001,
+ .rmcr_value = RMCR_RNC,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
@@ -561,7 +561,7 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_TDE | EESR_ECI,
.fdr_value = 0x0000072f,
- .rmcr_value = 0x00000001,
+ .rmcr_value = RMCR_RNC,
.irq_flags = IRQF_SHARED,
.apr = 1,
@@ -688,12 +688,16 @@ static struct sh_eth_cpu_data r8a7740_data = {
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_TDE | EESR_ECI,
+ .fdr_value = 0x0000070f,
+ .rmcr_value = RMCR_RNC,
.apr = 1,
.mpr = 1,
.tpauser = 1,
.bculr = 1,
.hw_swap = 1,
+ .rpadir = 1,
+ .rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
.tsu = 1,
@@ -868,7 +872,7 @@ static void update_mac_address(struct net_device *ndev)
static void read_mac_address(struct net_device *ndev, unsigned char *mac)
{
if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
- memcpy(ndev->dev_addr, mac, 6);
+ memcpy(ndev->dev_addr, mac, ETH_ALEN);
} else {
ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
@@ -2659,6 +2663,12 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
+ if (!pd) {
+ dev_err(&pdev->dev, "no platform data\n");
+ ret = -EINVAL;
+ goto out_release;
+ }
+
/* get PHY ID */
mdp->phy_id = pd->phy;
mdp->phy_interface = pd->phy_interface;
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index a0db02c63b11..f32c1692d310 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -321,6 +321,9 @@ enum TD_STS_BIT {
#define TD_TFP (TD_TFP1|TD_TFP0)
/* RMCR */
+enum RMCR_BIT {
+ RMCR_RNC = 0x00000001,
+};
#define DEFAULT_RMCR_VALUE 0x00000000
/* ECMR */
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 9f18ae984f9e..676c3c057bfb 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -285,6 +285,181 @@ static int efx_ef10_free_vis(struct efx_nic *efx)
return rc;
}
+#ifdef EFX_USE_PIO
+
+static void efx_ef10_free_piobufs(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
+ unsigned int i;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
+
+ for (i = 0; i < nic_data->n_piobufs; i++) {
+ MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
+ nic_data->piobuf_handle[i]);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ WARN_ON(rc);
+ }
+
+ nic_data->n_piobufs = 0;
+}
+
+static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
+ unsigned int i;
+ size_t outlen;
+ int rc = 0;
+
+ BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
+
+ for (i = 0; i < n; i++) {
+ rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ break;
+ if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
+ rc = -EIO;
+ break;
+ }
+ nic_data->piobuf_handle[i] =
+ MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
+ netif_dbg(efx, probe, efx->net_dev,
+ "allocated PIO buffer %u handle %x\n", i,
+ nic_data->piobuf_handle[i]);
+ }
+
+ nic_data->n_piobufs = i;
+ if (rc)
+ efx_ef10_free_piobufs(efx);
+ return rc;
+}
+
+static int efx_ef10_link_piobufs(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ MCDI_DECLARE_BUF(inbuf,
+ max(MC_CMD_LINK_PIOBUF_IN_LEN,
+ MC_CMD_UNLINK_PIOBUF_IN_LEN));
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ unsigned int offset, index;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
+ BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
+
+ /* Link a buffer to each VI in the write-combining mapping */
+ for (index = 0; index < nic_data->n_piobufs; ++index) {
+ MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
+ nic_data->piobuf_handle[index]);
+ MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
+ nic_data->pio_write_vi_base + index);
+ rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
+ inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
+ NULL, 0, NULL);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to link VI %u to PIO buffer %u (%d)\n",
+ nic_data->pio_write_vi_base + index, index,
+ rc);
+ goto fail;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "linked VI %u to PIO buffer %u\n",
+ nic_data->pio_write_vi_base + index, index);
+ }
+
+ /* Link a buffer to each TX queue */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ /* We assign the PIO buffers to queues in
+ * reverse order to allow for the following
+ * special case.
+ */
+ offset = ((efx->tx_channel_offset + efx->n_tx_channels -
+ tx_queue->channel->channel - 1) *
+ efx_piobuf_size);
+ index = offset / ER_DZ_TX_PIOBUF_SIZE;
+ offset = offset % ER_DZ_TX_PIOBUF_SIZE;
+
+ /* When the host page size is 4K, the first
+ * host page in the WC mapping may be within
+ * the same VI page as the last TX queue. We
+ * can only link one buffer to each VI.
+ */
+ if (tx_queue->queue == nic_data->pio_write_vi_base) {
+ BUG_ON(index != 0);
+ rc = 0;
+ } else {
+ MCDI_SET_DWORD(inbuf,
+ LINK_PIOBUF_IN_PIOBUF_HANDLE,
+ nic_data->piobuf_handle[index]);
+ MCDI_SET_DWORD(inbuf,
+ LINK_PIOBUF_IN_TXQ_INSTANCE,
+ tx_queue->queue);
+ rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
+ inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
+ NULL, 0, NULL);
+ }
+
+ if (rc) {
+ /* This is non-fatal; the TX path just
+ * won't use PIO for this queue
+ */
+ netif_err(efx, drv, efx->net_dev,
+ "failed to link VI %u to PIO buffer %u (%d)\n",
+ tx_queue->queue, index, rc);
+ tx_queue->piobuf = NULL;
+ } else {
+ tx_queue->piobuf =
+ nic_data->pio_write_base +
+ index * EFX_VI_PAGE_SIZE + offset;
+ tx_queue->piobuf_offset = offset;
+ netif_dbg(efx, probe, efx->net_dev,
+ "linked VI %u to PIO buffer %u offset %x addr %p\n",
+ tx_queue->queue, index,
+ tx_queue->piobuf_offset,
+ tx_queue->piobuf);
+ }
+ }
+ }
+
+ return 0;
+
+fail:
+ while (index--) {
+ MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
+ nic_data->pio_write_vi_base + index);
+ efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
+ inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
+ NULL, 0, NULL);
+ }
+ return rc;
+}
+
+#else /* !EFX_USE_PIO */
+
+static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
+{
+ return n == 0 ? 0 : -ENOBUFS;
+}
+
+static int efx_ef10_link_piobufs(struct efx_nic *efx)
+{
+ return 0;
+}
+
+static void efx_ef10_free_piobufs(struct efx_nic *efx)
+{
+}
+
+#endif /* EFX_USE_PIO */
+
static void efx_ef10_remove(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -295,9 +470,15 @@ static void efx_ef10_remove(struct efx_nic *efx)
/* This needs to be after efx_ptp_remove_channel() with no filters */
efx_ef10_rx_free_indir_table(efx);
+ if (nic_data->wc_membase)
+ iounmap(nic_data->wc_membase);
+
rc = efx_ef10_free_vis(efx);
WARN_ON(rc != 0);
+ if (!nic_data->must_restore_piobufs)
+ efx_ef10_free_piobufs(efx);
+
efx_mcdi_fini(efx);
efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
kfree(nic_data);
@@ -330,12 +511,126 @@ static int efx_ef10_alloc_vis(struct efx_nic *efx,
return 0;
}
+/* Note that the failure path of this function does not free
+ * resources, as this will be done by efx_ef10_remove().
+ */
static int efx_ef10_dimension_resources(struct efx_nic *efx)
{
- unsigned int n_vis =
- max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int uc_mem_map_size, wc_mem_map_size;
+ unsigned int min_vis, pio_write_vi_base, max_vis;
+ void __iomem *membase;
+ int rc;
+
+ min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+
+#ifdef EFX_USE_PIO
+ /* Try to allocate PIO buffers if wanted and if the full
+ * number of PIO buffers would be sufficient to allocate one
+ * copy-buffer per TX channel. Failure is non-fatal, as there
+ * are only a small number of PIO buffers shared between all
+ * functions of the controller.
+ */
+ if (efx_piobuf_size != 0 &&
+ ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
+ efx->n_tx_channels) {
+ unsigned int n_piobufs =
+ DIV_ROUND_UP(efx->n_tx_channels,
+ ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
+
+ rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
+ if (rc)
+ netif_err(efx, probe, efx->net_dev,
+ "failed to allocate PIO buffers (%d)\n", rc);
+ else
+ netif_dbg(efx, probe, efx->net_dev,
+ "allocated %u PIO buffers\n", n_piobufs);
+ }
+#else
+ nic_data->n_piobufs = 0;
+#endif
+
+ /* PIO buffers should be mapped with write-combining enabled,
+ * and we want to make single UC and WC mappings rather than
+ * several of each (in fact that's the only option if host
+ * page size is >4K). So we may allocate some extra VIs just
+ * for writing PIO buffers through.
+ */
+ uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
+ ER_DZ_TX_PIOBUF);
+ if (nic_data->n_piobufs) {
+ pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
+ wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
+ nic_data->n_piobufs) *
+ EFX_VI_PAGE_SIZE) -
+ uc_mem_map_size);
+ max_vis = pio_write_vi_base + nic_data->n_piobufs;
+ } else {
+ pio_write_vi_base = 0;
+ wc_mem_map_size = 0;
+ max_vis = min_vis;
+ }
+
+ /* In case the last attached driver failed to free VIs, do it now */
+ rc = efx_ef10_free_vis(efx);
+ if (rc != 0)
+ return rc;
+
+ rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
+ if (rc != 0)
+ return rc;
+
+ /* If we didn't get enough VIs to map all the PIO buffers, free the
+ * PIO buffers
+ */
+ if (nic_data->n_piobufs &&
+ nic_data->n_allocated_vis <
+ pio_write_vi_base + nic_data->n_piobufs) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "%u VIs are not sufficient to map %u PIO buffers\n",
+ nic_data->n_allocated_vis, nic_data->n_piobufs);
+ efx_ef10_free_piobufs(efx);
+ }
+
+ /* Shrink the original UC mapping of the memory BAR */
+ membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
+ if (!membase) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not shrink memory BAR to %x\n",
+ uc_mem_map_size);
+ return -ENOMEM;
+ }
+ iounmap(efx->membase);
+ efx->membase = membase;
+
+ /* Set up the WC mapping if needed */
+ if (wc_mem_map_size) {
+ nic_data->wc_membase = ioremap_wc(efx->membase_phys +
+ uc_mem_map_size,
+ wc_mem_map_size);
+ if (!nic_data->wc_membase) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not allocate WC mapping of size %x\n",
+ wc_mem_map_size);
+ return -ENOMEM;
+ }
+ nic_data->pio_write_vi_base = pio_write_vi_base;
+ nic_data->pio_write_base =
+ nic_data->wc_membase +
+ (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
+ uc_mem_map_size);
- return efx_ef10_alloc_vis(efx, n_vis, n_vis);
+ rc = efx_ef10_link_piobufs(efx);
+ if (rc)
+ efx_ef10_free_piobufs(efx);
+ }
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
+ &efx->membase_phys, efx->membase, uc_mem_map_size,
+ nic_data->wc_membase, wc_mem_map_size);
+
+ return 0;
}
static int efx_ef10_init_nic(struct efx_nic *efx)
@@ -359,6 +654,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_realloc_vis = false;
}
+ if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
+ rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
+ if (rc == 0) {
+ rc = efx_ef10_link_piobufs(efx);
+ if (rc)
+ efx_ef10_free_piobufs(efx);
+ }
+
+ /* Log an error on failure, but this is non-fatal */
+ if (rc)
+ netif_err(efx, drv, efx->net_dev,
+ "failed to restore PIO buffers (%d)\n", rc);
+ nic_data->must_restore_piobufs = false;
+ }
+
efx_ef10_rx_push_indir_table(efx);
return 0;
}
@@ -444,6 +754,18 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
+ EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
+ EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
+ EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
+ EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
+ EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
+ EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
+ EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
+ EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
+ EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
+ EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
+ EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
+ EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
};
#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
@@ -498,44 +820,72 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
(1ULL << EF10_STAT_rx_length_error))
-#if BITS_PER_LONG == 64
-#define STAT_MASK_BITMAP(bits) (bits)
-#else
-#define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32
-#endif
-
-static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx)
-{
- static const unsigned long hunt_40g_stat_mask[] = {
- STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
- HUNT_40G_EXTRA_STAT_MASK)
- };
- static const unsigned long hunt_10g_only_stat_mask[] = {
- STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
- HUNT_10G_ONLY_STAT_MASK)
- };
+/* These statistics are only provided if the firmware supports the
+ * capability PM_AND_RXDP_COUNTERS.
+ */
+#define HUNT_PM_AND_RXDP_STAT_MASK ( \
+ (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \
+ (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \
+ (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \
+ (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \
+ (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \
+ (1ULL << EF10_STAT_rx_pm_discard_qbb) | \
+ (1ULL << EF10_STAT_rx_pm_discard_mapping) | \
+ (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
+ (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
+ (1ULL << EF10_STAT_rx_dp_streaming_packets) | \
+ (1ULL << EF10_STAT_rx_dp_emerg_fetch) | \
+ (1ULL << EF10_STAT_rx_dp_emerg_wait))
+
+static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
+{
+ u64 raw_mask = HUNT_COMMON_STAT_MASK;
u32 port_caps = efx_mcdi_phy_get_caps(efx);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
- return hunt_40g_stat_mask;
+ raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
else
- return hunt_10g_only_stat_mask;
+ raw_mask |= HUNT_10G_ONLY_STAT_MASK;
+
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
+ raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
+
+ return raw_mask;
+}
+
+static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
+{
+ u64 raw_mask = efx_ef10_raw_stat_mask(efx);
+
+#if BITS_PER_LONG == 64
+ mask[0] = raw_mask;
+#else
+ mask[0] = raw_mask & 0xffffffff;
+ mask[1] = raw_mask >> 32;
+#endif
}
static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
{
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+
+ efx_ef10_get_stat_mask(efx, mask);
return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
- efx_ef10_stat_mask(efx), names);
+ mask, names);
}
static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
- const unsigned long *stats_mask = efx_ef10_stat_mask(efx);
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
__le64 generation_start, generation_end;
u64 *stats = nic_data->stats;
__le64 *dma_stats;
+ efx_ef10_get_stat_mask(efx, mask);
+
dma_stats = efx->stats_buffer.addr;
nic_data = efx->nic_data;
@@ -543,8 +893,9 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
return 0;
rmb();
- efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask,
+ efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
stats, efx->stats_buffer.addr, false);
+ rmb();
generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
if (generation_end != generation_start)
return -EAGAIN;
@@ -563,12 +914,14 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats)
{
- const unsigned long *mask = efx_ef10_stat_mask(efx);
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
u64 *stats = nic_data->stats;
size_t stats_count = 0, index;
int retry;
+ efx_ef10_get_stat_mask(efx, mask);
+
/* If we're unlucky enough to read statistics during the DMA, wait
* up to 10ms for it to finish (typically takes <500us)
*/
@@ -716,6 +1069,7 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
/* All our allocations have been reset */
nic_data->must_realloc_vis = true;
nic_data->must_restore_filters = true;
+ nic_data->must_restore_piobufs = true;
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
/* The datapath firmware might have been changed */
@@ -2137,7 +2491,7 @@ out_unlock:
return rc;
}
-void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
+static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
{
/* no need to do anything here on EF10 */
}
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
index b3f4e3755fd9..207ac9a1e3de 100644
--- a/drivers/net/ethernet/sfc/ef10_regs.h
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -315,6 +315,7 @@
#define ESF_DZ_TX_PIO_TYPE_WIDTH 1
#define ESF_DZ_TX_PIO_OPT_LBN 60
#define ESF_DZ_TX_PIO_OPT_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_PIO 1
#define ESF_DZ_TX_PIO_CONT_LBN 59
#define ESF_DZ_TX_PIO_CONT_WIDTH 1
#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 07c9bc4c61bc..2e27837ce6a2 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1121,7 +1121,7 @@ static int efx_init_io(struct efx_nic *efx)
*/
while (dma_mask > 0x7fffffffUL) {
if (dma_supported(&pci_dev->dev, dma_mask)) {
- rc = dma_set_mask(&pci_dev->dev, dma_mask);
+ rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
if (rc == 0)
break;
}
@@ -1134,16 +1134,6 @@ static int efx_init_io(struct efx_nic *efx)
}
netif_dbg(efx, probe, efx->net_dev,
"using DMA mask %llx\n", (unsigned long long) dma_mask);
- rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask);
- if (rc) {
- /* dma_set_coherent_mask() is not *allowed* to
- * fail with a mask that dma_set_mask() accepted,
- * but just in case...
- */
- netif_err(efx, probe, efx->net_dev,
- "failed to set consistent DMA mask\n");
- goto fail2;
- }
efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 34d00f5771fe..b8235ee5d7d7 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -18,37 +18,36 @@
#define EFX_MEM_BAR 2
/* TX */
-extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
-extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
-extern netdev_tx_t
-efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
-extern netdev_tx_t
-efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
-extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
-extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
-extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
+netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev);
+netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
+extern unsigned int efx_piobuf_size;
/* RX */
-extern void efx_rx_config_page_split(struct efx_nic *efx);
-extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
-extern void efx_rx_slow_fill(unsigned long context);
-extern void __efx_rx_packet(struct efx_channel *channel);
-extern void efx_rx_packet(struct efx_rx_queue *rx_queue,
- unsigned int index, unsigned int n_frags,
- unsigned int len, u16 flags);
+void efx_rx_config_page_split(struct efx_nic *efx);
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
+void efx_rx_slow_fill(unsigned long context);
+void __efx_rx_packet(struct efx_channel *channel);
+void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+ unsigned int n_frags, unsigned int len, u16 flags);
static inline void efx_rx_flush_packet(struct efx_channel *channel)
{
if (channel->rx_pkt_n_frags)
__efx_rx_packet(channel);
}
-extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_MAX_DMAQ_SIZE 4096UL
#define EFX_DEFAULT_DMAQ_SIZE 1024UL
@@ -162,9 +161,9 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
return efx->type->filter_get_rx_ids(efx, priority, buf, size);
}
#ifdef CONFIG_RFS_ACCEL
-extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
- u16 rxq_index, u32 flow_id);
-extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id);
+bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
static inline void efx_filter_rfs_expire(struct efx_channel *channel)
{
if (channel->rfs_filters_added >= 60 &&
@@ -176,50 +175,48 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel)
static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
#define efx_filter_rfs_enabled() 0
#endif
-extern bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
+bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
/* Channels */
-extern int efx_channel_dummy_op_int(struct efx_channel *channel);
-extern void efx_channel_dummy_op_void(struct efx_channel *channel);
-extern int
-efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
+int efx_channel_dummy_op_int(struct efx_channel *channel);
+void efx_channel_dummy_op_void(struct efx_channel *channel);
+int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
/* Ports */
-extern int efx_reconfigure_port(struct efx_nic *efx);
-extern int __efx_reconfigure_port(struct efx_nic *efx);
+int efx_reconfigure_port(struct efx_nic *efx);
+int __efx_reconfigure_port(struct efx_nic *efx);
/* Ethtool support */
extern const struct ethtool_ops efx_ethtool_ops;
/* Reset handling */
-extern int efx_reset(struct efx_nic *efx, enum reset_type method);
-extern void efx_reset_down(struct efx_nic *efx, enum reset_type method);
-extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
-extern int efx_try_recovery(struct efx_nic *efx);
+int efx_reset(struct efx_nic *efx, enum reset_type method);
+void efx_reset_down(struct efx_nic *efx, enum reset_type method);
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
+int efx_try_recovery(struct efx_nic *efx);
/* Global */
-extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
-extern int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
- unsigned int rx_usecs, bool rx_adaptive,
- bool rx_may_override_tx);
-extern void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
- unsigned int *rx_usecs, bool *rx_adaptive);
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
+int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
+ unsigned int rx_usecs, bool rx_adaptive,
+ bool rx_may_override_tx);
+void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
+ unsigned int *rx_usecs, bool *rx_adaptive);
/* Dummy PHY ops for PHY drivers */
-extern int efx_port_dummy_op_int(struct efx_nic *efx);
-extern void efx_port_dummy_op_void(struct efx_nic *efx);
-
+int efx_port_dummy_op_int(struct efx_nic *efx);
+void efx_port_dummy_op_void(struct efx_nic *efx);
/* MTD */
#ifdef CONFIG_SFC_MTD
-extern int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
- size_t n_parts, size_t sizeof_part);
+int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+ size_t n_parts, size_t sizeof_part);
static inline int efx_mtd_probe(struct efx_nic *efx)
{
return efx->type->mtd_probe(efx);
}
-extern void efx_mtd_rename(struct efx_nic *efx);
-extern void efx_mtd_remove(struct efx_nic *efx);
+void efx_mtd_rename(struct efx_nic *efx);
+void efx_mtd_remove(struct efx_nic *efx);
#else
static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
static inline void efx_mtd_rename(struct efx_nic *efx) {}
@@ -241,9 +238,9 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel)
efx_schedule_channel(channel);
}
-extern void efx_link_status_changed(struct efx_nic *efx);
-extern void efx_link_set_advertising(struct efx_nic *efx, u32);
-extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
+void efx_link_status_changed(struct efx_nic *efx);
+void efx_link_set_advertising(struct efx_nic *efx, u32);
+void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
static inline void efx_device_detach_sync(struct efx_nic *efx)
{
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 5b471cf5c323..1f529fa2edb1 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -70,6 +70,7 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
+ EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
@@ -1035,8 +1036,8 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
return 0;
}
-int efx_ethtool_get_ts_info(struct net_device *net_dev,
- struct ethtool_ts_info *ts_info)
+static int efx_ethtool_get_ts_info(struct net_device *net_dev,
+ struct ethtool_ts_info *ts_info)
{
struct efx_nic *efx = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 96ce507d8602..4d3f119b67b3 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -66,6 +66,11 @@
#define EFX_USE_QWORD_IO 1
#endif
+/* PIO is a win only if write-combining is possible */
+#ifdef ARCH_HAS_IOREMAP_WC
+#define EFX_USE_PIO 1
+#endif
+
#ifdef EFX_USE_QWORD_IO
static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
unsigned int reg)
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index c082562dbf4e..366c8e3e3784 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -963,7 +963,7 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
bool *was_attached)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
size_t outlen;
int rc;
@@ -981,6 +981,22 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
goto fail;
}
+ /* We currently assume we have control of the external link
+ * and are completely trusted by firmware. Abort probing
+ * if that's not true for this function.
+ */
+ if (driver_operating &&
+ outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN &&
+ (MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) {
+ netif_err(efx, probe, efx->net_dev,
+ "This driver version only supports one function per port\n");
+ return -ENODEV;
+ }
+
if (was_attached != NULL)
*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
return 0;
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index c34d0d4e10ee..656a3277c2b2 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -108,38 +108,35 @@ static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
}
#endif
-extern int efx_mcdi_init(struct efx_nic *efx);
-extern void efx_mcdi_fini(struct efx_nic *efx);
+int efx_mcdi_init(struct efx_nic *efx);
+void efx_mcdi_fini(struct efx_nic *efx);
-extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
- const efx_dword_t *inbuf, size_t inlen,
+int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
+ size_t inlen, efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual);
+
+int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen);
+int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
-extern int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
- const efx_dword_t *inbuf, size_t inlen);
-extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
- efx_dword_t *outbuf, size_t outlen,
- size_t *outlen_actual);
-
typedef void efx_mcdi_async_completer(struct efx_nic *efx,
unsigned long cookie, int rc,
efx_dword_t *outbuf,
size_t outlen_actual);
-extern int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
- const efx_dword_t *inbuf, size_t inlen,
- size_t outlen,
- efx_mcdi_async_completer *complete,
- unsigned long cookie);
+int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie);
-extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
-extern void efx_mcdi_mode_poll(struct efx_nic *efx);
-extern void efx_mcdi_mode_event(struct efx_nic *efx);
-extern void efx_mcdi_flush_async(struct efx_nic *efx);
+int efx_mcdi_poll_reboot(struct efx_nic *efx);
+void efx_mcdi_mode_poll(struct efx_nic *efx);
+void efx_mcdi_mode_event(struct efx_nic *efx);
+void efx_mcdi_flush_async(struct efx_nic *efx);
-extern void efx_mcdi_process_event(struct efx_channel *channel,
- efx_qword_t *event);
-extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
+void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
/* We expect that 16- and 32-bit fields in MCDI requests and responses
* are appropriately aligned, but 64-bit fields are only
@@ -275,55 +272,54 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
#define MCDI_EVENT_FIELD(_ev, _field) \
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
-extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
-extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
- u16 *fw_subtype_list, u32 *capabilities);
-extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
- u32 dest_evq);
-extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
-extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
- size_t *size_out, size_t *erase_size_out,
- bool *protected_out);
-extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
-extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
-extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
-extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
- const u8 *mac, int *id_out);
-extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
-extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
-extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
-extern int efx_mcdi_flush_rxqs(struct efx_nic *efx);
-extern int efx_mcdi_port_probe(struct efx_nic *efx);
-extern void efx_mcdi_port_remove(struct efx_nic *efx);
-extern int efx_mcdi_port_reconfigure(struct efx_nic *efx);
-extern int efx_mcdi_port_get_number(struct efx_nic *efx);
-extern u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
-extern void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
-extern int efx_mcdi_set_mac(struct efx_nic *efx);
+void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
+int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+ u16 *fw_subtype_list, u32 *capabilities);
+int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq);
+int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
+int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
+ size_t *size_out, size_t *erase_size_out,
+ bool *protected_out);
+int efx_mcdi_nvram_test_all(struct efx_nic *efx);
+int efx_mcdi_handle_assertion(struct efx_nic *efx);
+void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
+ int *id_out);
+int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
+int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
+int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
+int efx_mcdi_flush_rxqs(struct efx_nic *efx);
+int efx_mcdi_port_probe(struct efx_nic *efx);
+void efx_mcdi_port_remove(struct efx_nic *efx);
+int efx_mcdi_port_reconfigure(struct efx_nic *efx);
+int efx_mcdi_port_get_number(struct efx_nic *efx);
+u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
+int efx_mcdi_set_mac(struct efx_nic *efx);
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
-extern void efx_mcdi_mac_start_stats(struct efx_nic *efx);
-extern void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
-extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
-extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
-extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
-extern int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
+void efx_mcdi_mac_start_stats(struct efx_nic *efx);
+void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
+bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
+enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
+int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
#ifdef CONFIG_SFC_MCDI_MON
-extern int efx_mcdi_mon_probe(struct efx_nic *efx);
-extern void efx_mcdi_mon_remove(struct efx_nic *efx);
+int efx_mcdi_mon_probe(struct efx_nic *efx);
+void efx_mcdi_mon_remove(struct efx_nic *efx);
#else
static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
#endif
#ifdef CONFIG_SFC_MTD
-extern int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, u8 *buffer);
-extern int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
-extern int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, const u8 *buffer);
-extern int efx_mcdi_mtd_sync(struct mtd_info *mtd);
-extern void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
+int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, u8 *buffer);
+int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
+int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, const u8 *buffer);
+int efx_mcdi_mtd_sync(struct mtd_info *mtd);
+void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
#endif
#endif /* EFX_MCDI_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index b5cf62492f8e..e0a63ddb7a6c 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -2574,8 +2574,58 @@
#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
-#define MC_CMD_GMAC_DMABUF_START 0x40 /* enum */
-#define MC_CMD_GMAC_DMABUF_END 0x5f /* enum */
+/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c
+/* enum: PM discard_bb_overflow counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d
+/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e
+/* enum: PM discard_vfifo_full counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f
+/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_QBB 0x40
+/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_QBB 0x41
+/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42
+/* enum: RXDP counter: Number of packets dropped due to the queue being
+ * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43
+/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10
+ * with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45
+/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
+/* enum: RXDP counter: Number of times an emergency descriptor fetch was
+ * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS 0x47
+/* enum: RXDP counter: Number of times the DPCPU waited for an existing
+ * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS 0x48
+/* enum: Start of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_START 0x40
+/* enum: End of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_END 0x5f
#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
#define MC_CMD_MAC_NSTATS 0x61 /* enum */
@@ -5065,6 +5115,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
/* RxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
diff --git a/drivers/net/ethernet/sfc/mdio_10g.h b/drivers/net/ethernet/sfc/mdio_10g.h
index 16824fecc5ee..4a2dc4c281b7 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.h
+++ b/drivers/net/ethernet/sfc/mdio_10g.h
@@ -20,7 +20,7 @@
static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
-extern unsigned efx_mdio_id_oui(u32 id);
+unsigned efx_mdio_id_oui(u32 id);
static inline int efx_mdio_read(struct efx_nic *efx, int devad, int addr)
{
@@ -56,7 +56,7 @@ static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx)
return sync;
}
-extern const char *efx_mdio_mmd_name(int mmd);
+const char *efx_mdio_mmd_name(int mmd);
/*
* Reset a specific MMD and wait for reset to clear.
@@ -64,30 +64,29 @@ extern const char *efx_mdio_mmd_name(int mmd);
*
* This function will sleep
*/
-extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd,
- int spins, int spintime);
+int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd, int spins, int spintime);
/* As efx_mdio_check_mmd but for multiple MMDs */
int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
/* Check the link status of specified mmds in bit mask */
-extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
+bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
/* Generic transmit disable support though PMAPMD */
-extern void efx_mdio_transmit_disable(struct efx_nic *efx);
+void efx_mdio_transmit_disable(struct efx_nic *efx);
/* Generic part of reconfigure: set/clear loopback bits */
-extern void efx_mdio_phy_reconfigure(struct efx_nic *efx);
+void efx_mdio_phy_reconfigure(struct efx_nic *efx);
/* Set the power state of the specified MMDs */
-extern void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
- int low_power, unsigned int mmd_mask);
+void efx_mdio_set_mmds_lpower(struct efx_nic *efx, int low_power,
+ unsigned int mmd_mask);
/* Set (some of) the PHY settings over MDIO */
-extern int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
+int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
/* Push advertising flags and restart autonegotiation */
-extern void efx_mdio_an_reconfigure(struct efx_nic *efx);
+void efx_mdio_an_reconfigure(struct efx_nic *efx);
/* Get pause parameters from AN if available (otherwise return
* requested pause parameters)
@@ -95,8 +94,7 @@ extern void efx_mdio_an_reconfigure(struct efx_nic *efx);
u8 efx_mdio_get_pause(struct efx_nic *efx);
/* Wait for specified MMDs to exit reset within a timeout */
-extern int efx_mdio_wait_reset_mmds(struct efx_nic *efx,
- unsigned int mmd_mask);
+int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask);
/* Set or clear flag, debouncing */
static inline void
@@ -107,6 +105,6 @@ efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr,
}
/* Liveness self-test for MDIO PHYs */
-extern int efx_mdio_test_alive(struct efx_nic *efx);
+int efx_mdio_test_alive(struct efx_nic *efx);
#endif /* EFX_MDIO_10G_H */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b172ed133055..b14a717ac3e8 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -141,6 +141,8 @@ struct efx_special_buffer {
* @len: Length of this fragment.
* This field is zero when the queue slot is empty.
* @unmap_len: Length of this fragment to unmap
+ * @dma_offset: Offset of @dma_addr from the address of the backing DMA mapping.
+ * Only valid if @unmap_len != 0.
*/
struct efx_tx_buffer {
union {
@@ -154,6 +156,7 @@ struct efx_tx_buffer {
unsigned short flags;
unsigned short len;
unsigned short unmap_len;
+ unsigned short dma_offset;
};
#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */
#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
@@ -182,6 +185,9 @@ struct efx_tx_buffer {
* @tsoh_page: Array of pages of TSO header buffers
* @txd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
+ * @piobuf: PIO buffer region for this TX queue (shared with its partner).
+ * Size of the region is efx_piobuf_size.
+ * @piobuf_offset: Buffer offset to be specified in PIO descriptors
* @initialised: Has hardware queue been initialised?
* @read_count: Current read pointer.
* This is the number of buffers that have been removed from both rings.
@@ -209,6 +215,7 @@ struct efx_tx_buffer {
* blocks
* @tso_packets: Number of packets via the TSO xmit path
* @pushes: Number of times the TX push feature has been used
+ * @pio_packets: Number of times the TX PIO feature has been used
* @empty_read_count: If the completion path has seen the queue as empty
* and the transmission path has not yet checked this, the value of
* @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
@@ -223,6 +230,8 @@ struct efx_tx_queue {
struct efx_buffer *tsoh_page;
struct efx_special_buffer txd;
unsigned int ptr_mask;
+ void __iomem *piobuf;
+ unsigned int piobuf_offset;
bool initialised;
/* Members used mainly on the completion path */
@@ -238,6 +247,7 @@ struct efx_tx_queue {
unsigned int tso_long_headers;
unsigned int tso_packets;
unsigned int pushes;
+ unsigned int pio_packets;
/* Members shared between paths and sometimes updated */
unsigned int empty_read_count ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index e7dbd2dd202e..9c90bf56090f 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -19,6 +19,7 @@
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
+#include "ef10_regs.h"
#include "farch_regs.h"
#include "io.h"
#include "workarounds.h"
@@ -166,26 +167,30 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
/* Register dump */
-#define REGISTER_REVISION_A 1
-#define REGISTER_REVISION_B 2
-#define REGISTER_REVISION_C 3
-#define REGISTER_REVISION_Z 3 /* latest revision */
+#define REGISTER_REVISION_FA 1
+#define REGISTER_REVISION_FB 2
+#define REGISTER_REVISION_FC 3
+#define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */
+#define REGISTER_REVISION_ED 4
+#define REGISTER_REVISION_EZ 4 /* latest EF10 revision */
struct efx_nic_reg {
u32 offset:24;
- u32 min_revision:2, max_revision:2;
+ u32 min_revision:3, max_revision:3;
};
-#define REGISTER(name, min_rev, max_rev) { \
- FR_ ## min_rev ## max_rev ## _ ## name, \
- REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
+#define REGISTER(name, arch, min_rev, max_rev) { \
+ arch ## R_ ## min_rev ## max_rev ## _ ## name, \
+ REGISTER_REVISION_ ## arch ## min_rev, \
+ REGISTER_REVISION_ ## arch ## max_rev \
}
-#define REGISTER_AA(name) REGISTER(name, A, A)
-#define REGISTER_AB(name) REGISTER(name, A, B)
-#define REGISTER_AZ(name) REGISTER(name, A, Z)
-#define REGISTER_BB(name) REGISTER(name, B, B)
-#define REGISTER_BZ(name) REGISTER(name, B, Z)
-#define REGISTER_CZ(name) REGISTER(name, C, Z)
+#define REGISTER_AA(name) REGISTER(name, F, A, A)
+#define REGISTER_AB(name) REGISTER(name, F, A, B)
+#define REGISTER_AZ(name) REGISTER(name, F, A, Z)
+#define REGISTER_BB(name) REGISTER(name, F, B, B)
+#define REGISTER_BZ(name) REGISTER(name, F, B, Z)
+#define REGISTER_CZ(name) REGISTER(name, F, C, Z)
+#define REGISTER_DZ(name) REGISTER(name, E, D, Z)
static const struct efx_nic_reg efx_nic_regs[] = {
REGISTER_AZ(ADR_REGION),
@@ -292,37 +297,42 @@ static const struct efx_nic_reg efx_nic_regs[] = {
REGISTER_AB(XX_TXDRV_CTL),
/* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
/* XX_CORE_STAT is partly RC */
+ REGISTER_DZ(BIU_HW_REV_ID),
+ REGISTER_DZ(MC_DB_LWRD),
+ REGISTER_DZ(MC_DB_HWRD),
};
struct efx_nic_reg_table {
u32 offset:24;
- u32 min_revision:2, max_revision:2;
+ u32 min_revision:3, max_revision:3;
u32 step:6, rows:21;
};
-#define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
+#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \
offset, \
- REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
+ REGISTER_REVISION_ ## arch ## min_rev, \
+ REGISTER_REVISION_ ## arch ## max_rev, \
step, rows \
}
-#define REGISTER_TABLE(name, min_rev, max_rev) \
+#define REGISTER_TABLE(name, arch, min_rev, max_rev) \
REGISTER_TABLE_DIMENSIONS( \
- name, FR_ ## min_rev ## max_rev ## _ ## name, \
- min_rev, max_rev, \
- FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
- FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
-#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
-#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
-#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
-#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
+ name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \
+ arch, min_rev, max_rev, \
+ arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
+ arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
+#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
+#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
+#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
+#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
#define REGISTER_TABLE_BB_CZ(name) \
- REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
+ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \
FR_BZ_ ## name ## _STEP, \
FR_BB_ ## name ## _ROWS), \
- REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
+ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \
FR_BZ_ ## name ## _STEP, \
FR_CZ_ ## name ## _ROWS)
-#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
+#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
+#define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z)
static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
/* DRIVER is not used */
@@ -340,9 +350,9 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
* 1K entries allows for some expansion of queue count and
* size before we need to change the version. */
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
- A, A, 8, 1024),
+ F, A, A, 8, 1024),
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
- B, Z, 8, 1024),
+ F, B, Z, 8, 1024),
REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
REGISTER_TABLE_BB_CZ(TIMER_TBL),
REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
@@ -353,6 +363,7 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
/* MSIX_PBA_TABLE is not mapped */
/* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
REGISTER_TABLE_BZ(RX_FILTER_TBL0),
+ REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS),
};
size_t efx_nic_get_regs_len(struct efx_nic *efx)
@@ -469,8 +480,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
* @count: Length of the @desc array
* @mask: Bitmask of which elements of @desc are enabled
* @stats: Buffer to update with the converted statistics. The length
- * of this array must be at least the number of set bits in the
- * first @count bits of @mask.
+ * of this array must be at least @count.
* @dma_buf: DMA buffer containing hardware statistics
* @accumulate: If set, the converted values will be added rather than
* directly stored to the corresponding elements of @stats
@@ -503,11 +513,9 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
}
if (accumulate)
- *stats += val;
+ stats[index] += val;
else
- *stats = val;
+ stats[index] = val;
}
-
- ++stats;
}
}
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index fda29d39032f..11b6112d9249 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -30,7 +30,7 @@ static inline int efx_nic_rev(struct efx_nic *efx)
return efx->type->revision;
}
-extern u32 efx_farch_fpga_ver(struct efx_nic *efx);
+u32 efx_farch_fpga_ver(struct efx_nic *efx);
/* NIC has two interlinked PCI functions for the same port. */
static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
@@ -71,6 +71,26 @@ efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
}
+/* Report whether the NIC considers this TX queue empty, given the
+ * write_count used for the last doorbell push. May return false
+ * negative.
+ */
+static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
+ unsigned int write_count)
+{
+ unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+
+ if (empty_read_count == 0)
+ return false;
+
+ return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
+}
+
+static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue)
+{
+ return __efx_nic_tx_is_empty(tx_queue, tx_queue->write_count);
+}
+
/* Decide whether to push a TX descriptor to the NIC vs merely writing
* the doorbell. This can reduce latency when we are adding a single
* descriptor to an empty queue, but is otherwise pointless. Further,
@@ -80,14 +100,10 @@ efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
unsigned int write_count)
{
- unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
-
- if (empty_read_count == 0)
- return false;
+ bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count);
tx_queue->empty_read_count = 0;
- return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
- && tx_queue->write_count - write_count == 1;
+ return was_empty && tx_queue->write_count - write_count == 1;
}
/* Returns a pointer to the specified descriptor in the RX descriptor queue */
@@ -386,9 +402,27 @@ enum {
EF10_STAT_rx_align_error,
EF10_STAT_rx_length_error,
EF10_STAT_rx_nodesc_drops,
+ EF10_STAT_rx_pm_trunc_bb_overflow,
+ EF10_STAT_rx_pm_discard_bb_overflow,
+ EF10_STAT_rx_pm_trunc_vfifo_full,
+ EF10_STAT_rx_pm_discard_vfifo_full,
+ EF10_STAT_rx_pm_trunc_qbb,
+ EF10_STAT_rx_pm_discard_qbb,
+ EF10_STAT_rx_pm_discard_mapping,
+ EF10_STAT_rx_dp_q_disabled_packets,
+ EF10_STAT_rx_dp_di_dropped_packets,
+ EF10_STAT_rx_dp_streaming_packets,
+ EF10_STAT_rx_dp_emerg_fetch,
+ EF10_STAT_rx_dp_emerg_wait,
EF10_STAT_COUNT
};
+/* Maximum number of TX PIO buffers we may allocate to a function.
+ * This matches the total number of buffers on each SFC9100-family
+ * controller.
+ */
+#define EF10_TX_PIOBUF_COUNT 16
+
/**
* struct efx_ef10_nic_data - EF10 architecture NIC state
* @mcdi_buf: DMA buffer for MCDI
@@ -397,6 +431,13 @@ enum {
* @n_allocated_vis: Number of VIs allocated to this function
* @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
* @must_restore_filters: Flag: filters have yet to be restored after MC reboot
+ * @n_piobufs: Number of PIO buffers allocated to this function
+ * @wc_membase: Base address of write-combining mapping of the memory BAR
+ * @pio_write_base: Base address for writing PIO buffers
+ * @pio_write_vi_base: Relative VI number for @pio_write_base
+ * @piobuf_handle: Handle of each PIO buffer allocated
+ * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
+ * reboot
* @rx_rss_context: Firmware handle for our RSS context
* @stats: Hardware statistics
* @workaround_35388: Flag: firmware supports workaround for bug 35388
@@ -412,6 +453,11 @@ struct efx_ef10_nic_data {
unsigned int n_allocated_vis;
bool must_realloc_vis;
bool must_restore_filters;
+ unsigned int n_piobufs;
+ void __iomem *wc_membase, *pio_write_base;
+ unsigned int pio_write_vi_base;
+ unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
+ bool must_restore_piobufs;
u32 rx_rss_context;
u64 stats[EF10_STAT_COUNT];
bool workaround_35388;
@@ -463,18 +509,18 @@ static inline unsigned int efx_vf_size(struct efx_nic *efx)
return 1 << efx->vi_scale;
}
-extern int efx_init_sriov(void);
-extern void efx_sriov_probe(struct efx_nic *efx);
-extern int efx_sriov_init(struct efx_nic *efx);
-extern void efx_sriov_mac_address_changed(struct efx_nic *efx);
-extern void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-extern void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-extern void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
-extern void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
-extern void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
-extern void efx_sriov_reset(struct efx_nic *efx);
-extern void efx_sriov_fini(struct efx_nic *efx);
-extern void efx_fini_sriov(void);
+int efx_init_sriov(void);
+void efx_sriov_probe(struct efx_nic *efx);
+int efx_sriov_init(struct efx_nic *efx);
+void efx_sriov_mac_address_changed(struct efx_nic *efx);
+void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
+void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
+void efx_sriov_reset(struct efx_nic *efx);
+void efx_sriov_fini(struct efx_nic *efx);
+void efx_fini_sriov(void);
#else
@@ -500,22 +546,20 @@ static inline void efx_fini_sriov(void) {}
#endif
-extern int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
-extern int efx_sriov_set_vf_vlan(struct net_device *dev, int vf,
- u16 vlan, u8 qos);
-extern int efx_sriov_get_vf_config(struct net_device *dev, int vf,
- struct ifla_vf_info *ivf);
-extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
- bool spoofchk);
+int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
+int efx_sriov_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos);
+int efx_sriov_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivf);
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
+ bool spoofchk);
struct ethtool_ts_info;
-extern void efx_ptp_probe(struct efx_nic *efx);
-extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
-extern void efx_ptp_get_ts_info(struct efx_nic *efx,
- struct ethtool_ts_info *ts_info);
-extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
-extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
-extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
+void efx_ptp_probe(struct efx_nic *efx);
+int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
+void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
+bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
@@ -529,7 +573,7 @@ extern const struct efx_nic_type efx_hunt_a0_nic_type;
**************************************************************************
*/
-extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
+int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
/* TX data path */
static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
@@ -597,58 +641,58 @@ static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
{
channel->efx->type->ev_read_ack(channel);
}
-extern void efx_nic_event_test_start(struct efx_channel *channel);
+void efx_nic_event_test_start(struct efx_channel *channel);
/* Falcon/Siena queue operations */
-extern int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
-extern int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
-extern int efx_farch_ev_probe(struct efx_channel *channel);
-extern int efx_farch_ev_init(struct efx_channel *channel);
-extern void efx_farch_ev_fini(struct efx_channel *channel);
-extern void efx_farch_ev_remove(struct efx_channel *channel);
-extern int efx_farch_ev_process(struct efx_channel *channel, int quota);
-extern void efx_farch_ev_read_ack(struct efx_channel *channel);
-extern void efx_farch_ev_test_generate(struct efx_channel *channel);
+int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
+int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
+int efx_farch_ev_probe(struct efx_channel *channel);
+int efx_farch_ev_init(struct efx_channel *channel);
+void efx_farch_ev_fini(struct efx_channel *channel);
+void efx_farch_ev_remove(struct efx_channel *channel);
+int efx_farch_ev_process(struct efx_channel *channel, int quota);
+void efx_farch_ev_read_ack(struct efx_channel *channel);
+void efx_farch_ev_test_generate(struct efx_channel *channel);
/* Falcon/Siena filter operations */
-extern int efx_farch_filter_table_probe(struct efx_nic *efx);
-extern void efx_farch_filter_table_restore(struct efx_nic *efx);
-extern void efx_farch_filter_table_remove(struct efx_nic *efx);
-extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
-extern s32 efx_farch_filter_insert(struct efx_nic *efx,
- struct efx_filter_spec *spec, bool replace);
-extern int efx_farch_filter_remove_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id);
-extern int efx_farch_filter_get_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id, struct efx_filter_spec *);
-extern void efx_farch_filter_clear_rx(struct efx_nic *efx,
- enum efx_filter_priority priority);
-extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
- enum efx_filter_priority priority);
-extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
-extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 *buf, u32 size);
+int efx_farch_filter_table_probe(struct efx_nic *efx);
+void efx_farch_filter_table_restore(struct efx_nic *efx);
+void efx_farch_filter_table_remove(struct efx_nic *efx);
+void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
+s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
+ bool replace);
+int efx_farch_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id);
+int efx_farch_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority, u32 filter_id,
+ struct efx_filter_spec *);
+void efx_farch_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
+s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority, u32 *buf,
+ u32 size);
#ifdef CONFIG_RFS_ACCEL
-extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
- struct efx_filter_spec *spec);
-extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
- unsigned int index);
+s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec);
+bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int index);
#endif
-extern void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
+void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
-extern bool efx_nic_event_present(struct efx_channel *channel);
+bool efx_nic_event_present(struct efx_channel *channel);
/* Some statistics are computed as A - B where A and B each increase
* linearly with some hardware counter(s) and the counters are read
@@ -669,17 +713,17 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff)
}
/* Interrupts */
-extern int efx_nic_init_interrupt(struct efx_nic *efx);
-extern void efx_nic_irq_test_start(struct efx_nic *efx);
-extern void efx_nic_fini_interrupt(struct efx_nic *efx);
+int efx_nic_init_interrupt(struct efx_nic *efx);
+void efx_nic_irq_test_start(struct efx_nic *efx);
+void efx_nic_fini_interrupt(struct efx_nic *efx);
/* Falcon/Siena interrupts */
-extern void efx_farch_irq_enable_master(struct efx_nic *efx);
-extern void efx_farch_irq_test_generate(struct efx_nic *efx);
-extern void efx_farch_irq_disable_master(struct efx_nic *efx);
-extern irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
-extern irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
-extern irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
+void efx_farch_irq_enable_master(struct efx_nic *efx);
+void efx_farch_irq_test_generate(struct efx_nic *efx);
+void efx_farch_irq_disable_master(struct efx_nic *efx);
+irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
+irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
+irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
{
@@ -691,21 +735,21 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
}
/* Global Resources */
-extern int efx_nic_flush_queues(struct efx_nic *efx);
-extern void siena_prepare_flush(struct efx_nic *efx);
-extern int efx_farch_fini_dmaq(struct efx_nic *efx);
-extern void siena_finish_flush(struct efx_nic *efx);
-extern void falcon_start_nic_stats(struct efx_nic *efx);
-extern void falcon_stop_nic_stats(struct efx_nic *efx);
-extern int falcon_reset_xaui(struct efx_nic *efx);
-extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
-extern void efx_farch_init_common(struct efx_nic *efx);
-extern void efx_ef10_handle_drain_event(struct efx_nic *efx);
+int efx_nic_flush_queues(struct efx_nic *efx);
+void siena_prepare_flush(struct efx_nic *efx);
+int efx_farch_fini_dmaq(struct efx_nic *efx);
+void siena_finish_flush(struct efx_nic *efx);
+void falcon_start_nic_stats(struct efx_nic *efx);
+void falcon_stop_nic_stats(struct efx_nic *efx);
+int falcon_reset_xaui(struct efx_nic *efx);
+void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
+void efx_farch_init_common(struct efx_nic *efx);
+void efx_ef10_handle_drain_event(struct efx_nic *efx);
static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
{
efx->type->rx_push_indir_table(efx);
}
-extern void efx_farch_rx_push_indir_table(struct efx_nic *efx);
+void efx_farch_rx_push_indir_table(struct efx_nic *efx);
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len, gfp_t gfp_flags);
@@ -716,24 +760,22 @@ struct efx_farch_register_test {
unsigned address;
efx_oword_t mask;
};
-extern int efx_farch_test_registers(struct efx_nic *efx,
- const struct efx_farch_register_test *regs,
- size_t n_regs);
+int efx_farch_test_registers(struct efx_nic *efx,
+ const struct efx_farch_register_test *regs,
+ size_t n_regs);
-extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
-extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
+size_t efx_nic_get_regs_len(struct efx_nic *efx);
+void efx_nic_get_regs(struct efx_nic *efx, void *buf);
-extern size_t
-efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
- const unsigned long *mask, u8 *names);
-extern void
-efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
- const unsigned long *mask,
- u64 *stats, const void *dma_buf, bool accumulate);
+size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u8 *names);
+void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u64 *stats,
+ const void *dma_buf, bool accumulate);
#define EFX_MAX_FLUSH_TIME 5000
-extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
- efx_qword_t *event);
+void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
+ efx_qword_t *event);
#endif /* EFX_NIC_H */
diff --git a/drivers/net/ethernet/sfc/phy.h b/drivers/net/ethernet/sfc/phy.h
index 45eeb7075156..803bf445c08e 100644
--- a/drivers/net/ethernet/sfc/phy.h
+++ b/drivers/net/ethernet/sfc/phy.h
@@ -15,7 +15,7 @@
*/
extern const struct efx_phy_operations falcon_sfx7101_phy_ops;
-extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
/****************************************************************************
* AMCC/Quake QT202x PHYs
@@ -34,7 +34,7 @@ extern const struct efx_phy_operations falcon_qt202x_phy_ops;
#define QUAKE_LED_TXLINK (0)
#define QUAKE_LED_RXLINK (8)
-extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
+void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
/****************************************************************************
* Transwitch CX4 retimer
@@ -44,7 +44,7 @@ extern const struct efx_phy_operations falcon_txc_phy_ops;
#define TXC_GPIO_DIR_INPUT 0
#define TXC_GPIO_DIR_OUTPUT 1
-extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
-extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
+void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
+void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
#endif
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 4a596725023f..8f09e686fc23 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -12,6 +12,7 @@
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/prefetch.h>
@@ -818,44 +819,70 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
struct efx_filter_spec spec;
- const struct iphdr *ip;
const __be16 *ports;
+ __be16 ether_type;
int nhoff;
int rc;
- nhoff = skb_network_offset(skb);
+ /* The core RPS/RFS code has already parsed and validated
+ * VLAN, IP and transport headers. We assume they are in the
+ * header area.
+ */
if (skb->protocol == htons(ETH_P_8021Q)) {
- EFX_BUG_ON_PARANOID(skb_headlen(skb) <
- nhoff + sizeof(struct vlan_hdr));
- if (((const struct vlan_hdr *)skb->data + nhoff)->
- h_vlan_encapsulated_proto != htons(ETH_P_IP))
- return -EPROTONOSUPPORT;
+ const struct vlan_hdr *vh =
+ (const struct vlan_hdr *)skb->data;
- /* This is IP over 802.1q VLAN. We can't filter on the
- * IP 5-tuple and the vlan together, so just strip the
- * vlan header and filter on the IP part.
+ /* We can't filter on the IP 5-tuple and the vlan
+ * together, so just strip the vlan header and filter
+ * on the IP part.
*/
- nhoff += sizeof(struct vlan_hdr);
- } else if (skb->protocol != htons(ETH_P_IP)) {
- return -EPROTONOSUPPORT;
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
+ ether_type = vh->h_vlan_encapsulated_proto;
+ nhoff = sizeof(struct vlan_hdr);
+ } else {
+ ether_type = skb->protocol;
+ nhoff = 0;
}
- /* RFS must validate the IP header length before calling us */
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
- ip = (const struct iphdr *)(skb->data + nhoff);
- if (ip_is_fragment(ip))
+ if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
return -EPROTONOSUPPORT;
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
- ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
rxq_index);
- rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
- ip->daddr, ports[1], ip->saddr, ports[0]);
- if (rc)
- return rc;
+ spec.match_flags =
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+ spec.ether_type = ether_type;
+
+ if (ether_type == htons(ETH_P_IP)) {
+ const struct iphdr *ip =
+ (const struct iphdr *)(skb->data + nhoff);
+
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
+ if (ip_is_fragment(ip))
+ return -EPROTONOSUPPORT;
+ spec.ip_proto = ip->protocol;
+ spec.rem_host[0] = ip->saddr;
+ spec.loc_host[0] = ip->daddr;
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
+ ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+ } else {
+ const struct ipv6hdr *ip6 =
+ (const struct ipv6hdr *)(skb->data + nhoff);
+
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) <
+ nhoff + sizeof(*ip6) + 4);
+ spec.ip_proto = ip6->nexthdr;
+ memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
+ memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
+ ports = (const __be16 *)(ip6 + 1);
+ }
+
+ spec.rem_port = ports[0];
+ spec.loc_port = ports[1];
rc = efx->type->filter_rfs_insert(efx, &spec);
if (rc < 0)
@@ -866,11 +893,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
channel = efx_get_channel(efx, skb_get_rx_queue(skb));
++channel->rfs_filters_added;
- netif_info(efx, rx_status, efx->net_dev,
- "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
- (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
- &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
- rxq_index, flow_id, rc);
+ if (ether_type == htons(ETH_P_IP))
+ netif_info(efx, rx_status, efx->net_dev,
+ "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+ (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+ spec.rem_host, ntohs(ports[0]), spec.loc_host,
+ ntohs(ports[1]), rxq_index, flow_id, rc);
+ else
+ netif_info(efx, rx_status, efx->net_dev,
+ "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
+ (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+ spec.rem_host, ntohs(ports[0]), spec.loc_host,
+ ntohs(ports[1]), rxq_index, flow_id, rc);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index 87698ae0bf75..a2f4a06ffa4e 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -43,13 +43,12 @@ struct efx_self_tests {
struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
};
-extern void efx_loopback_rx_packet(struct efx_nic *efx,
- const char *buf_ptr, int pkt_len);
-extern int efx_selftest(struct efx_nic *efx,
- struct efx_self_tests *tests,
- unsigned flags);
-extern void efx_selftest_async_start(struct efx_nic *efx);
-extern void efx_selftest_async_cancel(struct efx_nic *efx);
-extern void efx_selftest_async_work(struct work_struct *data);
+void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr,
+ int pkt_len);
+int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
+ unsigned flags);
+void efx_selftest_async_start(struct efx_nic *efx);
+void efx_selftest_async_cancel(struct efx_nic *efx);
+void efx_selftest_async_work(struct work_struct *data);
#endif /* EFX_SELFTEST_H */
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 2ac91c5b5eea..c49d1fb16965 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -17,10 +17,46 @@
#include <net/ipv6.h>
#include <linux/if_ether.h>
#include <linux/highmem.h>
+#include <linux/cache.h>
#include "net_driver.h"
#include "efx.h"
+#include "io.h"
#include "nic.h"
#include "workarounds.h"
+#include "ef10_regs.h"
+
+#ifdef EFX_USE_PIO
+
+#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
+#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
+unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
+
+#endif /* EFX_USE_PIO */
+
+static inline unsigned int
+efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
+{
+ return tx_queue->insert_count & tx_queue->ptr_mask;
+}
+
+static inline struct efx_tx_buffer *
+__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
+{
+ return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
+}
+
+static inline struct efx_tx_buffer *
+efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
+{
+ struct efx_tx_buffer *buffer =
+ __efx_tx_queue_get_insert_buffer(tx_queue);
+
+ EFX_BUG_ON_PARANOID(buffer->len);
+ EFX_BUG_ON_PARANOID(buffer->flags);
+ EFX_BUG_ON_PARANOID(buffer->unmap_len);
+
+ return buffer;
+}
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer,
@@ -29,8 +65,7 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
{
if (buffer->unmap_len) {
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
- dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
- buffer->unmap_len);
+ dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
DMA_TO_DEVICE);
@@ -83,8 +118,10 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
*/
unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
- /* Possibly one more per segment for the alignment workaround */
- if (EFX_WORKAROUND_5391(efx))
+ /* Possibly one more per segment for the alignment workaround,
+ * or for option descriptors
+ */
+ if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
max_descs += EFX_TSO_MAX_SEGS;
/* Possibly more for PCIe page boundaries within input fragments */
@@ -145,6 +182,145 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
}
}
+#ifdef EFX_USE_PIO
+
+struct efx_short_copy_buffer {
+ int used;
+ u8 buf[L1_CACHE_BYTES];
+};
+
+/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
+ * Advances piobuf pointer. Leaves additional data in the copy buffer.
+ */
+static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
+ u8 *data, int len,
+ struct efx_short_copy_buffer *copy_buf)
+{
+ int block_len = len & ~(sizeof(copy_buf->buf) - 1);
+
+ memcpy_toio(*piobuf, data, block_len);
+ *piobuf += block_len;
+ len -= block_len;
+
+ if (len) {
+ data += block_len;
+ BUG_ON(copy_buf->used);
+ BUG_ON(len > sizeof(copy_buf->buf));
+ memcpy(copy_buf->buf, data, len);
+ copy_buf->used = len;
+ }
+}
+
+/* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
+ * Advances piobuf pointer. Leaves additional data in the copy buffer.
+ */
+static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
+ u8 *data, int len,
+ struct efx_short_copy_buffer *copy_buf)
+{
+ if (copy_buf->used) {
+ /* if the copy buffer is partially full, fill it up and write */
+ int copy_to_buf =
+ min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
+
+ memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
+ copy_buf->used += copy_to_buf;
+
+ /* if we didn't fill it up then we're done for now */
+ if (copy_buf->used < sizeof(copy_buf->buf))
+ return;
+
+ memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+ *piobuf += sizeof(copy_buf->buf);
+ data += copy_to_buf;
+ len -= copy_to_buf;
+ copy_buf->used = 0;
+ }
+
+ efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
+}
+
+static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
+ struct efx_short_copy_buffer *copy_buf)
+{
+ /* if there's anything in it, write the whole buffer, including junk */
+ if (copy_buf->used)
+ memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+}
+
+/* Traverse skb structure and copy fragments in to PIO buffer.
+ * Advances piobuf pointer.
+ */
+static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
+ u8 __iomem **piobuf,
+ struct efx_short_copy_buffer *copy_buf)
+{
+ int i;
+
+ efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
+ copy_buf);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+ u8 *vaddr;
+
+ vaddr = kmap_atomic(skb_frag_page(f));
+
+ efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
+ skb_frag_size(f), copy_buf);
+ kunmap_atomic(vaddr);
+ }
+
+ EFX_BUG_ON_PARANOID(skb_shinfo(skb)->frag_list);
+}
+
+static struct efx_tx_buffer *
+efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+ struct efx_tx_buffer *buffer =
+ efx_tx_queue_get_insert_buffer(tx_queue);
+ u8 __iomem *piobuf = tx_queue->piobuf;
+
+ /* Copy to PIO buffer. Ensure the writes are padded to the end
+ * of a cache line, as this is required for write-combining to be
+ * effective on at least x86.
+ */
+
+ if (skb_shinfo(skb)->nr_frags) {
+ /* The size of the copy buffer will ensure all writes
+ * are the size of a cache line.
+ */
+ struct efx_short_copy_buffer copy_buf;
+
+ copy_buf.used = 0;
+
+ efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
+ &piobuf, &copy_buf);
+ efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf);
+ } else {
+ /* Pad the write to the size of a cache line.
+ * We can do this because we know the skb_shared_info sruct is
+ * after the source, and the destination buffer is big enough.
+ */
+ BUILD_BUG_ON(L1_CACHE_BYTES >
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ memcpy_toio(tx_queue->piobuf, skb->data,
+ ALIGN(skb->len, L1_CACHE_BYTES));
+ }
+
+ EFX_POPULATE_QWORD_5(buffer->option,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
+ ESF_DZ_TX_PIO_CONT, 0,
+ ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
+ ESF_DZ_TX_PIO_BUF_ADDR,
+ tx_queue->piobuf_offset);
+ ++tx_queue->pio_packets;
+ ++tx_queue->insert_count;
+ return buffer;
+}
+#endif /* EFX_USE_PIO */
+
/*
* Add a socket buffer to a TX queue
*
@@ -167,7 +343,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
struct device *dma_dev = &efx->pci_dev->dev;
struct efx_tx_buffer *buffer;
skb_frag_t *fragment;
- unsigned int len, unmap_len = 0, insert_ptr;
+ unsigned int len, unmap_len = 0;
dma_addr_t dma_addr, unmap_addr = 0;
unsigned int dma_len;
unsigned short dma_flags;
@@ -189,6 +365,17 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
return NETDEV_TX_OK;
}
+ /* Consider using PIO for short packets */
+#ifdef EFX_USE_PIO
+ if (skb->len <= efx_piobuf_size && tx_queue->piobuf &&
+ efx_nic_tx_is_empty(tx_queue) &&
+ efx_nic_tx_is_empty(efx_tx_queue_partner(tx_queue))) {
+ buffer = efx_enqueue_skb_pio(tx_queue, skb);
+ dma_flags = EFX_TX_BUF_OPTION;
+ goto finish_packet;
+ }
+#endif
+
/* Map for DMA. Use dma_map_single rather than dma_map_page
* since this is more efficient on machines with sparse
* memory.
@@ -208,11 +395,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Add to TX queue, splitting across DMA boundaries */
do {
- insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
- buffer = &tx_queue->buffer[insert_ptr];
- EFX_BUG_ON_PARANOID(buffer->flags);
- EFX_BUG_ON_PARANOID(buffer->len);
- EFX_BUG_ON_PARANOID(buffer->unmap_len);
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
dma_len = efx_max_tx_len(efx, dma_addr);
if (likely(dma_len >= len))
@@ -230,6 +413,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Transfer ownership of the unmapping to the final buffer */
buffer->flags = EFX_TX_BUF_CONT | dma_flags;
buffer->unmap_len = unmap_len;
+ buffer->dma_offset = buffer->dma_addr - unmap_addr;
unmap_len = 0;
/* Get address and size of next fragment */
@@ -245,6 +429,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
}
/* Transfer ownership of the skb to the final buffer */
+finish_packet:
buffer->skb = skb;
buffer->flags = EFX_TX_BUF_SKB | dma_flags;
@@ -270,8 +455,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
while (tx_queue->insert_count != tx_queue->write_count) {
unsigned int pkts_compl = 0, bytes_compl = 0;
--tx_queue->insert_count;
- insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
- buffer = &tx_queue->buffer[insert_ptr];
+ buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
}
@@ -628,6 +812,9 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
* @tcp_off: Offset of TCP header
* @header_len: Number of bytes of header
* @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
+ * @header_dma_addr: Header DMA address, when using option descriptors
+ * @header_unmap_len: Header DMA mapped length, or 0 if not using option
+ * descriptors
*
* The state used during segmentation. It is put into this data structure
* just to make it easy to pass into inline functions.
@@ -636,7 +823,7 @@ struct tso_state {
/* Output position */
unsigned out_len;
unsigned seqnum;
- unsigned ipv4_id;
+ u16 ipv4_id;
unsigned packet_space;
/* Input position */
@@ -651,6 +838,8 @@ struct tso_state {
unsigned int tcp_off;
unsigned header_len;
unsigned int ip_base_len;
+ dma_addr_t header_dma_addr;
+ unsigned int header_unmap_len;
};
@@ -737,23 +926,18 @@ static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
{
struct efx_tx_buffer *buffer;
struct efx_nic *efx = tx_queue->efx;
- unsigned dma_len, insert_ptr;
+ unsigned dma_len;
EFX_BUG_ON_PARANOID(len <= 0);
while (1) {
- insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
- buffer = &tx_queue->buffer[insert_ptr];
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
++tx_queue->insert_count;
EFX_BUG_ON_PARANOID(tx_queue->insert_count -
tx_queue->read_count >=
efx->txq_entries);
- EFX_BUG_ON_PARANOID(buffer->len);
- EFX_BUG_ON_PARANOID(buffer->unmap_len);
- EFX_BUG_ON_PARANOID(buffer->flags);
-
buffer->dma_addr = dma_addr;
dma_len = efx_max_tx_len(efx, dma_addr);
@@ -796,6 +980,7 @@ static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
return -ENOMEM;
}
buffer->unmap_len = buffer->len;
+ buffer->dma_offset = 0;
buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
}
@@ -814,19 +999,27 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
/* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != tx_queue->write_count) {
--tx_queue->insert_count;
- buffer = &tx_queue->buffer[tx_queue->insert_count &
- tx_queue->ptr_mask];
+ buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
}
}
/* Parse the SKB header and initialise state. */
-static void tso_start(struct tso_state *st, const struct sk_buff *skb)
+static int tso_start(struct tso_state *st, struct efx_nic *efx,
+ const struct sk_buff *skb)
{
+ bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
+ struct device *dma_dev = &efx->pci_dev->dev;
+ unsigned int header_len, in_len;
+ dma_addr_t dma_addr;
+
st->ip_off = skb_network_header(skb) - skb->data;
st->tcp_off = skb_transport_header(skb) - skb->data;
- st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
+ header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
+ in_len = skb_headlen(skb) - header_len;
+ st->header_len = header_len;
+ st->in_len = in_len;
if (st->protocol == htons(ETH_P_IP)) {
st->ip_base_len = st->header_len - st->ip_off;
st->ipv4_id = ntohs(ip_hdr(skb)->id);
@@ -840,9 +1033,34 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
- st->out_len = skb->len - st->header_len;
- st->unmap_len = 0;
- st->dma_flags = 0;
+ st->out_len = skb->len - header_len;
+
+ if (!use_options) {
+ st->header_unmap_len = 0;
+
+ if (likely(in_len == 0)) {
+ st->dma_flags = 0;
+ st->unmap_len = 0;
+ return 0;
+ }
+
+ dma_addr = dma_map_single(dma_dev, skb->data + header_len,
+ in_len, DMA_TO_DEVICE);
+ st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
+ st->dma_addr = dma_addr;
+ st->unmap_addr = dma_addr;
+ st->unmap_len = in_len;
+ } else {
+ dma_addr = dma_map_single(dma_dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ st->header_dma_addr = dma_addr;
+ st->header_unmap_len = skb_headlen(skb);
+ st->dma_flags = 0;
+ st->dma_addr = dma_addr + header_len;
+ st->unmap_len = 0;
+ }
+
+ return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
}
static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
@@ -860,24 +1078,6 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
return -ENOMEM;
}
-static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
- const struct sk_buff *skb)
-{
- int hl = st->header_len;
- int len = skb_headlen(skb) - hl;
-
- st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
- len, DMA_TO_DEVICE);
- if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
- st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
- st->unmap_len = len;
- st->in_len = len;
- st->dma_addr = st->unmap_addr;
- return 0;
- }
- return -ENOMEM;
-}
-
/**
* tso_fill_packet_with_fragment - form descriptors for the current fragment
@@ -922,6 +1122,7 @@ static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
if (st->in_len == 0) {
/* Transfer ownership of the DMA mapping */
buffer->unmap_len = st->unmap_len;
+ buffer->dma_offset = buffer->unmap_len - buffer->len;
buffer->flags |= st->dma_flags;
st->unmap_len = 0;
}
@@ -944,55 +1145,98 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
struct tso_state *st)
{
struct efx_tx_buffer *buffer =
- &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
- struct tcphdr *tsoh_th;
- unsigned ip_length;
- u8 *header;
- int rc;
+ efx_tx_queue_get_insert_buffer(tx_queue);
+ bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
+ u8 tcp_flags_clear;
- /* Allocate and insert a DMA-mapped header buffer. */
- header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
- if (!header)
- return -ENOMEM;
-
- tsoh_th = (struct tcphdr *)(header + st->tcp_off);
-
- /* Copy and update the headers. */
- memcpy(header, skb->data, st->header_len);
-
- tsoh_th->seq = htonl(st->seqnum);
- st->seqnum += skb_shinfo(skb)->gso_size;
- if (st->out_len > skb_shinfo(skb)->gso_size) {
- /* This packet will not finish the TSO burst. */
+ if (!is_last) {
st->packet_space = skb_shinfo(skb)->gso_size;
- tsoh_th->fin = 0;
- tsoh_th->psh = 0;
+ tcp_flags_clear = 0x09; /* mask out FIN and PSH */
} else {
- /* This packet will be the last in the TSO burst. */
st->packet_space = st->out_len;
- tsoh_th->fin = tcp_hdr(skb)->fin;
- tsoh_th->psh = tcp_hdr(skb)->psh;
+ tcp_flags_clear = 0x00;
}
- ip_length = st->ip_base_len + st->packet_space;
- if (st->protocol == htons(ETH_P_IP)) {
- struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off);
+ if (!st->header_unmap_len) {
+ /* Allocate and insert a DMA-mapped header buffer. */
+ struct tcphdr *tsoh_th;
+ unsigned ip_length;
+ u8 *header;
+ int rc;
+
+ header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
+ if (!header)
+ return -ENOMEM;
- tsoh_iph->tot_len = htons(ip_length);
+ tsoh_th = (struct tcphdr *)(header + st->tcp_off);
+
+ /* Copy and update the headers. */
+ memcpy(header, skb->data, st->header_len);
+
+ tsoh_th->seq = htonl(st->seqnum);
+ ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear;
+
+ ip_length = st->ip_base_len + st->packet_space;
+
+ if (st->protocol == htons(ETH_P_IP)) {
+ struct iphdr *tsoh_iph =
+ (struct iphdr *)(header + st->ip_off);
+
+ tsoh_iph->tot_len = htons(ip_length);
+ tsoh_iph->id = htons(st->ipv4_id);
+ } else {
+ struct ipv6hdr *tsoh_iph =
+ (struct ipv6hdr *)(header + st->ip_off);
+
+ tsoh_iph->payload_len = htons(ip_length);
+ }
- /* Linux leaves suitable gaps in the IP ID space for us to fill. */
- tsoh_iph->id = htons(st->ipv4_id);
- st->ipv4_id++;
+ rc = efx_tso_put_header(tx_queue, buffer, header);
+ if (unlikely(rc))
+ return rc;
} else {
- struct ipv6hdr *tsoh_iph =
- (struct ipv6hdr *)(header + st->ip_off);
+ /* Send the original headers with a TSO option descriptor
+ * in front
+ */
+ u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear;
- tsoh_iph->payload_len = htons(ip_length);
+ buffer->flags = EFX_TX_BUF_OPTION;
+ buffer->len = 0;
+ buffer->unmap_len = 0;
+ EFX_POPULATE_QWORD_5(buffer->option,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
+ ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
+ ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
+ ++tx_queue->insert_count;
+
+ /* We mapped the headers in tso_start(). Unmap them
+ * when the last segment is completed.
+ */
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+ buffer->dma_addr = st->header_dma_addr;
+ buffer->len = st->header_len;
+ if (is_last) {
+ buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
+ buffer->unmap_len = st->header_unmap_len;
+ buffer->dma_offset = 0;
+ /* Ensure we only unmap them once in case of a
+ * later DMA mapping error and rollback
+ */
+ st->header_unmap_len = 0;
+ } else {
+ buffer->flags = EFX_TX_BUF_CONT;
+ buffer->unmap_len = 0;
+ }
+ ++tx_queue->insert_count;
}
- rc = efx_tso_put_header(tx_queue, buffer, header);
- if (unlikely(rc))
- return rc;
+ st->seqnum += skb_shinfo(skb)->gso_size;
+
+ /* Linux leaves suitable gaps in the IP ID space for us to fill. */
+ ++st->ipv4_id;
++tx_queue->tso_packets;
@@ -1023,12 +1267,11 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
- tso_start(&state, skb);
+ rc = tso_start(&state, efx, skb);
+ if (rc)
+ goto mem_err;
- /* Assume that skb header area contains exactly the headers, and
- * all payload is in the frag list.
- */
- if (skb_headlen(skb) == state.header_len) {
+ if (likely(state.in_len == 0)) {
/* Grab the first payload fragment. */
EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
frag_i = 0;
@@ -1037,9 +1280,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
if (rc)
goto mem_err;
} else {
- rc = tso_get_head_fragment(&state, efx, skb);
- if (rc)
- goto mem_err;
+ /* Payload starts in the header area. */
frag_i = -1;
}
@@ -1091,6 +1332,11 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
state.unmap_len, DMA_TO_DEVICE);
}
+ /* Free the header DMA mapping, if using option descriptors */
+ if (state.header_unmap_len)
+ dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
+ state.header_unmap_len, DMA_TO_DEVICE);
+
efx_enqueue_unwind(tx_queue);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 770036bc2d87..513ed8b1ba58 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -839,7 +839,7 @@ static int meth_probe(struct platform_device *pdev)
dev->watchdog_timeo = timeout;
dev->irq = MACE_ETHERNET_IRQ;
dev->base_addr = (unsigned long)&mace->eth;
- memcpy(dev->dev_addr, o2meth_eaddr, 6);
+ memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN);
priv = netdev_priv(dev);
spin_lock_init(&priv->meth_lock);
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index ee18e6f7b4fe..acbbe48a519c 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1921,7 +1921,6 @@ static void sis190_remove_one(struct pci_dev *pdev)
cancel_work_sync(&tp->phy_task);
unregister_netdev(dev);
sis190_release_board(pdev);
- pci_set_drvdata(pdev, NULL);
}
static struct pci_driver sis190_pci_driver = {
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 03b256af7ed5..8ae1f8a7bf38 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -91,9 +91,9 @@ static int rx_copybreak;
/* These identify the driver base version and may not be removed. */
static char version[] =
-DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
+DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>";
static char version2[] =
-" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
+" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
@@ -332,9 +332,7 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
- static int printed_version;
- if (!printed_version++)
- printk(KERN_INFO "%s%s", version, version2);
+ pr_info_once("%s%s\n", version, version2);
#endif
card_idx++;
@@ -423,9 +421,9 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
if (debug > 2) {
- dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
+ dev_dbg(&pdev->dev, "EEPROM contents:\n");
for (i = 0; i < 64; i++)
- printk(" %4.4x%s", read_eeprom(ep, i),
+ pr_cont(" %4.4x%s", read_eeprom(ep, i),
i % 16 == 15 ? "\n" : "");
}
@@ -490,10 +488,10 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto err_out_unmap_rx;
- printk(KERN_INFO "%s: %s at %lx, IRQ %d, %pM\n",
- dev->name, pci_id_tbl[chip_idx].name,
- (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
- dev->dev_addr);
+ netdev_info(dev, "%s at %lx, IRQ %d, %pM\n",
+ pci_id_tbl[chip_idx].name,
+ (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
+ dev->dev_addr);
out:
return ret;
@@ -703,9 +701,8 @@ static int epic_open(struct net_device *dev)
mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
if (dev->if_port == 1) {
if (debug > 1)
- printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
- "status %4.4x.\n",
- dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
+ netdev_info(dev, "Using the 10base2 transceiver, MII status %4.4x.\n",
+ mdio_read(dev, ep->phys[0], MII_BMSR));
}
} else {
int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
@@ -715,10 +712,10 @@ static int epic_open(struct net_device *dev)
else if (! (mii_lpa & LPA_LPACK))
mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
if (debug > 1)
- printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
- " register read of %4.4x.\n", dev->name,
- ep->mii.full_duplex ? "full" : "half",
- ep->phys[0], mii_lpa);
+ netdev_info(dev, "Setting %s-duplex based on MII xcvr %d register read of %4.4x.\n",
+ ep->mii.full_duplex ? "full"
+ : "half",
+ ep->phys[0], mii_lpa);
}
}
@@ -738,10 +735,9 @@ static int epic_open(struct net_device *dev)
TxUnderrun);
if (debug > 1) {
- printk(KERN_DEBUG "%s: epic_open() ioaddr %p IRQ %d "
- "status %4.4x %s-duplex.\n",
- dev->name, ioaddr, irq, er32(GENCTL),
- ep->mii.full_duplex ? "full" : "half");
+ netdev_dbg(dev, "epic_open() ioaddr %p IRQ %d status %4.4x %s-duplex.\n",
+ ioaddr, irq, er32(GENCTL),
+ ep->mii.full_duplex ? "full" : "half");
}
/* Set the timer to switch to check for link beat and perhaps switch
@@ -790,8 +786,8 @@ static void epic_restart(struct net_device *dev)
/* Soft reset the chip. */
ew32(GENCTL, 0x4001);
- printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
- dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
+ netdev_dbg(dev, "Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
+ ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
udelay(1);
/* This magic is documented in SMSC app note 7.15 */
@@ -827,9 +823,8 @@ static void epic_restart(struct net_device *dev)
((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
TxUnderrun);
- printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
- " interrupt %4.4x.\n",
- dev->name, er32(COMMAND), er32(GENCTL), er32(INTSTAT));
+ netdev_dbg(dev, "epic_restart() done, cmd status %4.4x, ctl %4.4x interrupt %4.4x.\n",
+ er32(COMMAND), er32(GENCTL), er32(INTSTAT));
}
static void check_media(struct net_device *dev)
@@ -846,9 +841,9 @@ static void check_media(struct net_device *dev)
return;
if (ep->mii.full_duplex != duplex) {
ep->mii.full_duplex = duplex;
- printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
- " partner capability of %4.4x.\n", dev->name,
- ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
+ netdev_info(dev, "Setting %s-duplex based on MII #%d link partner capability of %4.4x.\n",
+ ep->mii.full_duplex ? "full" : "half",
+ ep->phys[0], mii_lpa);
ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
}
}
@@ -861,11 +856,10 @@ static void epic_timer(unsigned long data)
int next_tick = 5*HZ;
if (debug > 3) {
- printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
- dev->name, er32(TxSTAT));
- printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
- "IntStatus %4.4x RxStatus %4.4x.\n", dev->name,
- er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
+ netdev_dbg(dev, "Media monitor tick, Tx status %8.8x.\n",
+ er32(TxSTAT));
+ netdev_dbg(dev, "Other registers are IntMask %4.4x IntStatus %4.4x RxStatus %4.4x.\n",
+ er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
}
check_media(dev);
@@ -880,11 +874,11 @@ static void epic_tx_timeout(struct net_device *dev)
void __iomem *ioaddr = ep->ioaddr;
if (debug > 0) {
- printk(KERN_WARNING "%s: Transmit timeout using MII device, "
- "Tx status %4.4x.\n", dev->name, er16(TxSTAT));
+ netdev_warn(dev, "Transmit timeout using MII device, Tx status %4.4x.\n",
+ er16(TxSTAT));
if (debug > 1) {
- printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
- dev->name, ep->dirty_tx, ep->cur_tx);
+ netdev_dbg(dev, "Tx indices: dirty_tx %d, cur_tx %d.\n",
+ ep->dirty_tx, ep->cur_tx);
}
}
if (er16(TxSTAT) & 0x10) { /* Tx FIFO underflow. */
@@ -994,9 +988,8 @@ static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
ew32(COMMAND, TxQueued);
if (debug > 4)
- printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
- "flag %2.2x Tx status %8.8x.\n", dev->name, skb->len,
- entry, ctrl_word, er32(TxSTAT));
+ netdev_dbg(dev, "Queued Tx packet size %d to slot %d, flag %2.2x Tx status %8.8x.\n",
+ skb->len, entry, ctrl_word, er32(TxSTAT));
return NETDEV_TX_OK;
}
@@ -1009,8 +1002,8 @@ static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
#ifndef final_version
/* There was an major error, log it. */
if (debug > 1)
- printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
- dev->name, status);
+ netdev_dbg(dev, "Transmit error, Tx status %8.8x.\n",
+ status);
#endif
stats->tx_errors++;
if (status & 0x1050)
@@ -1057,9 +1050,8 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep)
#ifndef final_version
if (cur_tx - dirty_tx > TX_RING_SIZE) {
- printk(KERN_WARNING
- "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
- dev->name, dirty_tx, cur_tx, ep->tx_full);
+ netdev_warn(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dirty_tx, cur_tx, ep->tx_full);
dirty_tx += TX_RING_SIZE;
}
#endif
@@ -1086,8 +1078,8 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
ew32(INTSTAT, status & EpicNormalEvent);
if (debug > 4) {
- printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
- "intstat=%#8.8x.\n", dev->name, status, er32(INTSTAT));
+ netdev_dbg(dev, "Interrupt, status=%#8.8x new intstat=%#8.8x.\n",
+ status, er32(INTSTAT));
}
if ((status & IntrSummary) == 0)
@@ -1125,8 +1117,8 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
ew32(COMMAND, RestartTx);
}
if (status & PCIBusErr170) {
- printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
- dev->name, status);
+ netdev_err(dev, "PCI Bus Error! status %4.4x.\n",
+ status);
epic_pause(dev);
epic_restart(dev);
}
@@ -1136,8 +1128,8 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
out:
if (debug > 3) {
- printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
- dev->name, status);
+ netdev_dbg(dev, "exit interrupt, intr_status=%#4.4x.\n",
+ status);
}
return IRQ_RETVAL(handled);
@@ -1151,7 +1143,7 @@ static int epic_rx(struct net_device *dev, int budget)
int work_done = 0;
if (debug > 4)
- printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
+ netdev_dbg(dev, " In epic_rx(), entry %d %8.8x.\n", entry,
ep->rx_ring[entry].rxstatus);
if (rx_work_limit > budget)
@@ -1162,16 +1154,17 @@ static int epic_rx(struct net_device *dev, int budget)
int status = ep->rx_ring[entry].rxstatus;
if (debug > 4)
- printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
+ netdev_dbg(dev, " epic_rx() status was %8.8x.\n",
+ status);
if (--rx_work_limit < 0)
break;
if (status & 0x2006) {
if (debug > 2)
- printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
- dev->name, status);
+ netdev_dbg(dev, "epic_rx() error status was %8.8x.\n",
+ status);
if (status & 0x2000) {
- printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
- "multiple buffers, status %4.4x!\n", dev->name, status);
+ netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %4.4x!\n",
+ status);
dev->stats.rx_length_errors++;
} else if (status & 0x0006)
/* Rx Frame errors are counted in hardware. */
@@ -1183,9 +1176,8 @@ static int epic_rx(struct net_device *dev, int budget)
struct sk_buff *skb;
if (pkt_len > PKT_BUF_SZ - 4) {
- printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
- "%d bytes.\n",
- dev->name, status, pkt_len);
+ netdev_err(dev, "Oversized Ethernet frame, status %x %d bytes.\n",
+ status, pkt_len);
pkt_len = 1514;
}
/* Check if the packet is long enough to accept without copying
@@ -1305,8 +1297,8 @@ static int epic_close(struct net_device *dev)
napi_disable(&ep->napi);
if (debug > 1)
- printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
- dev->name, er32(INTSTAT));
+ netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n",
+ er32(INTSTAT));
del_timer_sync(&ep->timer);
@@ -1324,7 +1316,7 @@ static int epic_close(struct net_device *dev)
ep->rx_ring[i].buflength = 0;
if (skb) {
pci_unmap_single(pdev, ep->rx_ring[i].bufaddr,
- ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
}
ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
@@ -1535,7 +1527,6 @@ static void epic_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
free_netdev(dev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
/* pci_power_off(pdev, -1); */
}
@@ -1588,8 +1579,7 @@ static int __init epic_init (void)
{
/* when a module, this is printed whether or not devices are found in probe */
#ifdef MODULE
- printk (KERN_INFO "%s%s",
- version, version2);
+ pr_info("%s%s\n", version, version2);
#endif
return pci_register_driver(&epic_driver);
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index afe01c4088a3..0f096a890059 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -106,16 +106,16 @@ MODULE_ALIAS("platform:smc911x");
#define POWER_DOWN 1
#if SMC_DEBUG > 0
-#define DBG(n, args...) \
+#define DBG(n, dev, args...) \
do { \
if (SMC_DEBUG & (n)) \
- printk(args); \
+ netdev_dbg(dev, args); \
} while (0)
-#define PRINTK(args...) printk(args)
+#define PRINTK(dev, args...) netdev_info(dev, args)
#else
-#define DBG(n, args...) do { } while (0)
-#define PRINTK(args...) printk(KERN_DEBUG args)
+#define DBG(n, dev, args...) do { } while (0)
+#define PRINTK(dev, args...) netdev_dbg(dev, args)
#endif
#if SMC_DEBUG_PKTS > 0
@@ -130,21 +130,23 @@ static void PRINT_PKT(u_char *buf, int length)
for (i = 0; i < lines ; i ++) {
int cur;
+ printk(KERN_DEBUG);
for (cur = 0; cur < 8; cur++) {
u_char a, b;
a = *buf++;
b = *buf++;
- printk("%02x%02x ", a, b);
+ pr_cont("%02x%02x ", a, b);
}
- printk("\n");
+ pr_cont("\n");
}
+ printk(KERN_DEBUG);
for (i = 0; i < remainder/2 ; i++) {
u_char a, b;
a = *buf++;
b = *buf++;
- printk("%02x%02x ", a, b);
+ pr_cont("%02x%02x ", a, b);
}
- printk("\n");
+ pr_cont("\n");
}
#else
#define PRINT_PKT(x...) do { } while (0)
@@ -176,7 +178,7 @@ static void smc911x_reset(struct net_device *dev)
unsigned int reg, timeout=0, resets=1, irq_cfg;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
/* Take out of PM setting first */
if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) {
@@ -188,7 +190,7 @@ static void smc911x_reset(struct net_device *dev)
reg = SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_;
} while (--timeout && !reg);
if (timeout == 0) {
- PRINTK("%s: smc911x_reset timeout waiting for PM restore\n", dev->name);
+ PRINTK(dev, "smc911x_reset timeout waiting for PM restore\n");
return;
}
}
@@ -206,14 +208,14 @@ static void smc911x_reset(struct net_device *dev)
reg = SMC_GET_HW_CFG(lp);
/* If chip indicates reset timeout then try again */
if (reg & HW_CFG_SRST_TO_) {
- PRINTK("%s: chip reset timeout, retrying...\n", dev->name);
+ PRINTK(dev, "chip reset timeout, retrying...\n");
resets++;
break;
}
} while (--timeout && (reg & HW_CFG_SRST_));
}
if (timeout == 0) {
- PRINTK("%s: smc911x_reset timeout waiting for reset\n", dev->name);
+ PRINTK(dev, "smc911x_reset timeout waiting for reset\n");
return;
}
@@ -223,7 +225,7 @@ static void smc911x_reset(struct net_device *dev)
udelay(10);
if (timeout == 0){
- PRINTK("%s: smc911x_reset timeout waiting for EEPROM busy\n", dev->name);
+ PRINTK(dev, "smc911x_reset timeout waiting for EEPROM busy\n");
return;
}
@@ -270,7 +272,7 @@ static void smc911x_enable(struct net_device *dev)
unsigned mask, cfg, cr;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
spin_lock_irqsave(&lp->lock, flags);
@@ -296,7 +298,7 @@ static void smc911x_enable(struct net_device *dev)
/* Turn on receiver and enable RX */
if (cr & MAC_CR_RXEN_)
- DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name);
+ DBG(SMC_DEBUG_RX, dev, "Receiver already enabled\n");
SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_);
@@ -327,7 +329,7 @@ static void smc911x_shutdown(struct net_device *dev)
unsigned cr;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "%s: --> %s\n", CARDNAME, __func__);
/* Disable IRQ's */
SMC_SET_INT_EN(lp, 0);
@@ -346,7 +348,8 @@ static inline void smc911x_drop_pkt(struct net_device *dev)
struct smc911x_local *lp = netdev_priv(dev);
unsigned int fifo_count, timeout, reg;
- DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __func__);
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "%s: --> %s\n",
+ CARDNAME, __func__);
fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF;
if (fifo_count <= 4) {
/* Manually dump the packet data */
@@ -361,7 +364,7 @@ static inline void smc911x_drop_pkt(struct net_device *dev)
reg = SMC_GET_RX_DP_CTRL(lp) & RX_DP_CTRL_FFWD_BUSY_;
} while (--timeout && reg);
if (timeout == 0) {
- PRINTK("%s: timeout waiting for RX fast forward\n", dev->name);
+ PRINTK(dev, "timeout waiting for RX fast forward\n");
}
}
}
@@ -379,11 +382,11 @@ static inline void smc911x_rcv(struct net_device *dev)
struct sk_buff *skb;
unsigned char *data;
- DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n",
- dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "--> %s\n",
+ __func__);
status = SMC_GET_RX_STS_FIFO(lp);
- DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x\n",
- dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff);
+ DBG(SMC_DEBUG_RX, dev, "Rx pkt len %d status 0x%08x\n",
+ (status & 0x3fff0000) >> 16, status & 0xc000ffff);
pkt_len = (status & RX_STS_PKT_LEN_) >> 16;
if (status & RX_STS_ES_) {
/* Deal with a bad packet */
@@ -403,8 +406,7 @@ static inline void smc911x_rcv(struct net_device *dev)
/* Alloc a buffer with extra room for DMA alignment */
skb = netdev_alloc_skb(dev, pkt_len+32);
if (unlikely(skb == NULL)) {
- PRINTK( "%s: Low memory, rcvd packet dropped.\n",
- dev->name);
+ PRINTK(dev, "Low memory, rcvd packet dropped.\n");
dev->stats.rx_dropped++;
smc911x_drop_pkt(dev);
return;
@@ -422,8 +424,8 @@ static inline void smc911x_rcv(struct net_device *dev)
/* Lower the FIFO threshold if possible */
fifo = SMC_GET_FIFO_INT(lp);
if (fifo & 0xFF) fifo--;
- DBG(SMC_DEBUG_RX, "%s: Setting RX stat FIFO threshold to %d\n",
- dev->name, fifo & 0xff);
+ DBG(SMC_DEBUG_RX, dev, "Setting RX stat FIFO threshold to %d\n",
+ fifo & 0xff);
SMC_SET_FIFO_INT(lp, fifo);
/* Setup RX DMA */
SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_));
@@ -436,7 +438,7 @@ static inline void smc911x_rcv(struct net_device *dev)
SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_));
SMC_PULL_DATA(lp, data, pkt_len+2+3);
- DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name);
+ DBG(SMC_DEBUG_PKTS, dev, "Received packet\n");
PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
@@ -456,7 +458,7 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
unsigned int cmdA, cmdB, len;
unsigned char *buf;
- DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n", __func__);
BUG_ON(lp->pending_tx_skb == NULL);
skb = lp->pending_tx_skb;
@@ -481,12 +483,12 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
/* tag is packet length so we can use this in stats update later */
cmdB = (skb->len << 16) | (skb->len & 0x7FF);
- DBG(SMC_DEBUG_TX, "%s: TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n",
- dev->name, len, len, buf, cmdA, cmdB);
+ DBG(SMC_DEBUG_TX, dev, "TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n",
+ len, len, buf, cmdA, cmdB);
SMC_SET_TX_FIFO(lp, cmdA);
SMC_SET_TX_FIFO(lp, cmdB);
- DBG(SMC_DEBUG_PKTS, "%s: Transmitted packet\n", dev->name);
+ DBG(SMC_DEBUG_PKTS, dev, "Transmitted packet\n");
PRINT_PKT(buf, len <= 64 ? len : 64);
/* Send pkt via PIO or DMA */
@@ -517,20 +519,20 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int free;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
- dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n",
+ __func__);
spin_lock_irqsave(&lp->lock, flags);
BUG_ON(lp->pending_tx_skb != NULL);
free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_;
- DBG(SMC_DEBUG_TX, "%s: TX free space %d\n", dev->name, free);
+ DBG(SMC_DEBUG_TX, dev, "TX free space %d\n", free);
/* Turn off the flow when running out of space in FIFO */
if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) {
- DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n",
- dev->name, free);
+ DBG(SMC_DEBUG_TX, dev, "Disabling data flow due to low FIFO space (%d)\n",
+ free);
/* Reenable when at least 1 packet of size MTU present */
SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64);
lp->tx_throttle = 1;
@@ -545,8 +547,8 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
* End padding 15 bytes
*/
if (unlikely(free < (skb->len + 8 + 15 + 15))) {
- printk("%s: No Tx free space %d < %d\n",
- dev->name, free, skb->len);
+ netdev_warn(dev, "No Tx free space %d < %d\n",
+ free, skb->len);
lp->pending_tx_skb = NULL;
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
@@ -561,13 +563,13 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
* the DMA IRQ starts it
*/
if (lp->txdma_active) {
- DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name);
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Tx DMA running, deferring packet\n");
lp->pending_tx_skb = skb;
netif_stop_queue(dev);
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
} else {
- DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name);
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Activating Tx DMA\n");
lp->txdma_active = 1;
}
}
@@ -589,20 +591,19 @@ static void smc911x_tx(struct net_device *dev)
struct smc911x_local *lp = netdev_priv(dev);
unsigned int tx_status;
- DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
- dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n",
+ __func__);
/* Collect the TX status */
while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) {
- DBG(SMC_DEBUG_TX, "%s: Tx stat FIFO used 0x%04x\n",
- dev->name,
- (SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16);
+ DBG(SMC_DEBUG_TX, dev, "Tx stat FIFO used 0x%04x\n",
+ (SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16);
tx_status = SMC_GET_TX_STS_FIFO(lp);
dev->stats.tx_packets++;
dev->stats.tx_bytes+=tx_status>>16;
- DBG(SMC_DEBUG_TX, "%s: Tx FIFO tag 0x%04x status 0x%04x\n",
- dev->name, (tx_status & 0xffff0000) >> 16,
- tx_status & 0x0000ffff);
+ DBG(SMC_DEBUG_TX, dev, "Tx FIFO tag 0x%04x status 0x%04x\n",
+ (tx_status & 0xffff0000) >> 16,
+ tx_status & 0x0000ffff);
/* count Tx errors, but ignore lost carrier errors when in
* full-duplex mode */
if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx &&
@@ -640,8 +641,8 @@ static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg)
SMC_GET_MII(lp, phyreg, phyaddr, phydata);
- DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
- __func__, phyaddr, phyreg, phydata);
+ DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
+ __func__, phyaddr, phyreg, phydata);
return phydata;
}
@@ -654,8 +655,8 @@ static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg,
{
struct smc911x_local *lp = netdev_priv(dev);
- DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
- __func__, phyaddr, phyreg, phydata);
+ DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
+ __func__, phyaddr, phyreg, phydata);
SMC_SET_MII(lp, phyreg, phyaddr, phydata);
}
@@ -670,7 +671,7 @@ static void smc911x_phy_detect(struct net_device *dev)
int phyaddr;
unsigned int cfg, id1, id2;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
lp->phy_type = 0;
@@ -731,8 +732,8 @@ static void smc911x_phy_detect(struct net_device *dev)
lp->phy_type = id1 << 16 | id2;
}
- DBG(SMC_DEBUG_MISC, "%s: phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%d\n",
- dev->name, id1, id2, lp->mii.phy_id);
+ DBG(SMC_DEBUG_MISC, dev, "phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%d\n",
+ id1, id2, lp->mii.phy_id);
}
/*
@@ -745,7 +746,7 @@ static int smc911x_phy_fixed(struct net_device *dev)
int phyaddr = lp->mii.phy_id;
int bmcr;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
/* Enter Link Disable state */
SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
@@ -792,7 +793,7 @@ static int smc911x_phy_reset(struct net_device *dev, int phy)
unsigned long flags;
unsigned int reg;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__);
spin_lock_irqsave(&lp->lock, flags);
reg = SMC_GET_PMT_CTRL(lp);
@@ -851,18 +852,18 @@ static void smc911x_phy_check_media(struct net_device *dev, int init)
int phyaddr = lp->mii.phy_id;
unsigned int bmcr, cr;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
/* duplex state has changed */
SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
SMC_GET_MAC_CR(lp, cr);
if (lp->mii.full_duplex) {
- DBG(SMC_DEBUG_MISC, "%s: Configuring for full-duplex mode\n", dev->name);
+ DBG(SMC_DEBUG_MISC, dev, "Configuring for full-duplex mode\n");
bmcr |= BMCR_FULLDPLX;
cr |= MAC_CR_RCVOWN_;
} else {
- DBG(SMC_DEBUG_MISC, "%s: Configuring for half-duplex mode\n", dev->name);
+ DBG(SMC_DEBUG_MISC, dev, "Configuring for half-duplex mode\n");
bmcr &= ~BMCR_FULLDPLX;
cr &= ~MAC_CR_RCVOWN_;
}
@@ -891,7 +892,7 @@ static void smc911x_phy_configure(struct work_struct *work)
int status;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__);
/*
* We should not be called if phy_type is zero.
@@ -900,7 +901,7 @@ static void smc911x_phy_configure(struct work_struct *work)
return;
if (smc911x_phy_reset(dev, phyaddr)) {
- printk("%s: PHY reset timed out\n", dev->name);
+ netdev_info(dev, "PHY reset timed out\n");
return;
}
spin_lock_irqsave(&lp->lock, flags);
@@ -922,7 +923,7 @@ static void smc911x_phy_configure(struct work_struct *work)
/* Copy our capabilities from MII_BMSR to MII_ADVERTISE */
SMC_GET_PHY_BMSR(lp, phyaddr, my_phy_caps);
if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
- printk(KERN_INFO "Auto negotiation NOT supported\n");
+ netdev_info(dev, "Auto negotiation NOT supported\n");
smc911x_phy_fixed(dev);
goto smc911x_phy_configure_exit;
}
@@ -960,8 +961,8 @@ static void smc911x_phy_configure(struct work_struct *work)
udelay(10);
SMC_GET_PHY_MII_ADV(lp, phyaddr, status);
- DBG(SMC_DEBUG_MISC, "%s: phy caps=0x%04x\n", dev->name, my_phy_caps);
- DBG(SMC_DEBUG_MISC, "%s: phy advertised caps=0x%04x\n", dev->name, my_ad_caps);
+ DBG(SMC_DEBUG_MISC, dev, "phy caps=0x%04x\n", my_phy_caps);
+ DBG(SMC_DEBUG_MISC, dev, "phy advertised caps=0x%04x\n", my_ad_caps);
/* Restart auto-negotiation process in order to advertise my caps */
SMC_SET_PHY_BMCR(lp, phyaddr, BMCR_ANENABLE | BMCR_ANRESTART);
@@ -984,7 +985,7 @@ static void smc911x_phy_interrupt(struct net_device *dev)
int phyaddr = lp->mii.phy_id;
int status;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
if (lp->phy_type == 0)
return;
@@ -992,10 +993,10 @@ static void smc911x_phy_interrupt(struct net_device *dev)
smc911x_phy_check_media(dev, 0);
/* read to clear status bits */
SMC_GET_PHY_INT_SRC(lp, phyaddr,status);
- DBG(SMC_DEBUG_MISC, "%s: PHY interrupt status 0x%04x\n",
- dev->name, status & 0xffff);
- DBG(SMC_DEBUG_MISC, "%s: AFC_CFG 0x%08x\n",
- dev->name, SMC_GET_AFC_CFG(lp));
+ DBG(SMC_DEBUG_MISC, dev, "PHY interrupt status 0x%04x\n",
+ status & 0xffff);
+ DBG(SMC_DEBUG_MISC, dev, "AFC_CFG 0x%08x\n",
+ SMC_GET_AFC_CFG(lp));
}
/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/
@@ -1012,7 +1013,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
unsigned int rx_overrun=0, cr, pkts;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
spin_lock_irqsave(&lp->lock, flags);
@@ -1033,8 +1034,8 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
do {
status = SMC_GET_INT(lp);
- DBG(SMC_DEBUG_MISC, "%s: INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n",
- dev->name, status, mask, status & ~mask);
+ DBG(SMC_DEBUG_MISC, dev, "INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n",
+ status, mask, status & ~mask);
status &= mask;
if (!status)
@@ -1066,7 +1067,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
SMC_GET_MAC_CR(lp, cr);
cr &= ~MAC_CR_RXEN_;
SMC_SET_MAC_CR(lp, cr);
- DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name);
+ DBG(SMC_DEBUG_RX, dev, "RX overrun\n");
dev->stats.rx_errors++;
dev->stats.rx_fifo_errors++;
}
@@ -1078,7 +1079,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
cr &= ~MAC_CR_RXEN_;
SMC_SET_MAC_CR(lp, cr);
rx_overrun=1;
- DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name);
+ DBG(SMC_DEBUG_RX, dev, "RX overrun\n");
dev->stats.rx_errors++;
dev->stats.rx_fifo_errors++;
}
@@ -1087,23 +1088,23 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
/* Handle receive condition */
if ((status & INT_STS_RSFL_) || rx_overrun) {
unsigned int fifo;
- DBG(SMC_DEBUG_RX, "%s: RX irq\n", dev->name);
+ DBG(SMC_DEBUG_RX, dev, "RX irq\n");
fifo = SMC_GET_RX_FIFO_INF(lp);
pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16;
- DBG(SMC_DEBUG_RX, "%s: Rx FIFO pkts %d, bytes %d\n",
- dev->name, pkts, fifo & 0xFFFF );
+ DBG(SMC_DEBUG_RX, dev, "Rx FIFO pkts %d, bytes %d\n",
+ pkts, fifo & 0xFFFF);
if (pkts != 0) {
#ifdef SMC_USE_DMA
unsigned int fifo;
if (lp->rxdma_active){
- DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA,
- "%s: RX DMA active\n", dev->name);
+ DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev,
+ "RX DMA active\n");
/* The DMA is already running so up the IRQ threshold */
fifo = SMC_GET_FIFO_INT(lp) & ~0xFF;
fifo |= pkts & 0xFF;
- DBG(SMC_DEBUG_RX,
- "%s: Setting RX stat FIFO threshold to %d\n",
- dev->name, fifo & 0xff);
+ DBG(SMC_DEBUG_RX, dev,
+ "Setting RX stat FIFO threshold to %d\n",
+ fifo & 0xff);
SMC_SET_FIFO_INT(lp, fifo);
} else
#endif
@@ -1113,7 +1114,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
}
/* Handle transmit FIFO available */
if (status & INT_STS_TDFA_) {
- DBG(SMC_DEBUG_TX, "%s: TX data FIFO space available irq\n", dev->name);
+ DBG(SMC_DEBUG_TX, dev, "TX data FIFO space available irq\n");
SMC_SET_FIFO_TDA(lp, 0xFF);
lp->tx_throttle = 0;
#ifdef SMC_USE_DMA
@@ -1125,9 +1126,9 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
/* Handle transmit done condition */
#if 1
if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) {
- DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC,
- "%s: Tx stat FIFO limit (%d) /GPT irq\n",
- dev->name, (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16);
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC, dev,
+ "Tx stat FIFO limit (%d) /GPT irq\n",
+ (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16);
smc911x_tx(dev);
SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
SMC_ACK_INT(lp, INT_STS_TSFL_);
@@ -1135,23 +1136,20 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
}
#else
if (status & INT_STS_TSFL_) {
- DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq\n", dev->name, );
+ DBG(SMC_DEBUG_TX, dev, "TX status FIFO limit (%d) irq\n", ?);
smc911x_tx(dev);
SMC_ACK_INT(lp, INT_STS_TSFL_);
}
if (status & INT_STS_GPT_INT_) {
- DBG(SMC_DEBUG_RX, "%s: IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n",
- dev->name,
- SMC_GET_IRQ_CFG(lp),
- SMC_GET_FIFO_INT(lp),
- SMC_GET_RX_CFG(lp));
- DBG(SMC_DEBUG_RX, "%s: Rx Stat FIFO Used 0x%02x "
- "Data FIFO Used 0x%04x Stat FIFO 0x%08x\n",
- dev->name,
- (SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16,
- SMC_GET_RX_FIFO_INF(lp) & 0xffff,
- SMC_GET_RX_STS_FIFO_PEEK(lp));
+ DBG(SMC_DEBUG_RX, dev, "IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n",
+ SMC_GET_IRQ_CFG(lp),
+ SMC_GET_FIFO_INT(lp),
+ SMC_GET_RX_CFG(lp));
+ DBG(SMC_DEBUG_RX, dev, "Rx Stat FIFO Used 0x%02x Data FIFO Used 0x%04x Stat FIFO 0x%08x\n",
+ (SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16,
+ SMC_GET_RX_FIFO_INF(lp) & 0xffff,
+ SMC_GET_RX_STS_FIFO_PEEK(lp));
SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
SMC_ACK_INT(lp, INT_STS_GPT_INT_);
}
@@ -1159,7 +1157,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
/* Handle PHY interrupt condition */
if (status & INT_STS_PHY_INT_) {
- DBG(SMC_DEBUG_MISC, "%s: PHY irq\n", dev->name);
+ DBG(SMC_DEBUG_MISC, dev, "PHY irq\n");
smc911x_phy_interrupt(dev);
SMC_ACK_INT(lp, INT_STS_PHY_INT_);
}
@@ -1168,8 +1166,8 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
/* restore mask state */
SMC_SET_INT_EN(lp, mask);
- DBG(SMC_DEBUG_MISC, "%s: Interrupt done (%d loops)\n",
- dev->name, 8-timeout);
+ DBG(SMC_DEBUG_MISC, dev, "Interrupt done (%d loops)\n",
+ 8-timeout);
spin_unlock_irqrestore(&lp->lock, flags);
@@ -1185,9 +1183,9 @@ smc911x_tx_dma_irq(int dma, void *data)
struct sk_buff *skb = lp->current_tx_skb;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
- DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: TX DMA irq handler\n", dev->name);
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n");
/* Clear the DMA interrupt sources */
SMC_DMA_ACK_IRQ(dev, dma);
BUG_ON(skb == NULL);
@@ -1198,8 +1196,8 @@ smc911x_tx_dma_irq(int dma, void *data)
if (lp->pending_tx_skb != NULL)
smc911x_hardware_send_pkt(dev);
else {
- DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA,
- "%s: No pending Tx packets. DMA disabled\n", dev->name);
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev,
+ "No pending Tx packets. DMA disabled\n");
spin_lock_irqsave(&lp->lock, flags);
lp->txdma_active = 0;
if (!lp->tx_throttle) {
@@ -1208,8 +1206,8 @@ smc911x_tx_dma_irq(int dma, void *data)
spin_unlock_irqrestore(&lp->lock, flags);
}
- DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA,
- "%s: TX DMA irq completed\n", dev->name);
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev,
+ "TX DMA irq completed\n");
}
static void
smc911x_rx_dma_irq(int dma, void *data)
@@ -1221,8 +1219,8 @@ smc911x_rx_dma_irq(int dma, void *data)
unsigned long flags;
unsigned int pkts;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
- DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, "%s: RX DMA irq handler\n", dev->name);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
+ DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq handler\n");
/* Clear the DMA interrupt sources */
SMC_DMA_ACK_IRQ(dev, dma);
dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE);
@@ -1242,9 +1240,9 @@ smc911x_rx_dma_irq(int dma, void *data)
lp->rxdma_active = 0;
}
spin_unlock_irqrestore(&lp->lock, flags);
- DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA,
- "%s: RX DMA irq completed. DMA RX FIFO PKTS %d\n",
- dev->name, pkts);
+ DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev,
+ "RX DMA irq completed. DMA RX FIFO PKTS %d\n",
+ pkts);
}
#endif /* SMC_USE_DMA */
@@ -1268,14 +1266,14 @@ static void smc911x_timeout(struct net_device *dev)
int status, mask;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
spin_lock_irqsave(&lp->lock, flags);
status = SMC_GET_INT(lp);
mask = SMC_GET_INT_EN(lp);
spin_unlock_irqrestore(&lp->lock, flags);
- DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x\n",
- dev->name, status, mask);
+ DBG(SMC_DEBUG_MISC, dev, "INT 0x%02x MASK 0x%02x\n",
+ status, mask);
/* Dump the current TX FIFO contents and restart */
mask = SMC_GET_TX_CFG(lp);
@@ -1306,7 +1304,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
unsigned int mcr, update_multicast = 0;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
spin_lock_irqsave(&lp->lock, flags);
SMC_GET_MAC_CR(lp, mcr);
@@ -1314,7 +1312,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
- DBG(SMC_DEBUG_MISC, "%s: RCR_PRMS\n", dev->name);
+ DBG(SMC_DEBUG_MISC, dev, "RCR_PRMS\n");
mcr |= MAC_CR_PRMS_;
}
/*
@@ -1323,7 +1321,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
* checked before the table is
*/
else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
- DBG(SMC_DEBUG_MISC, "%s: RCR_ALMUL\n", dev->name);
+ DBG(SMC_DEBUG_MISC, dev, "RCR_ALMUL\n");
mcr |= MAC_CR_MCPAS_;
}
@@ -1363,8 +1361,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
/* now, the table can be loaded into the chipset */
update_multicast = 1;
} else {
- DBG(SMC_DEBUG_MISC, "%s: ~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n",
- dev->name);
+ DBG(SMC_DEBUG_MISC, dev, "~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n");
mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
/*
@@ -1378,9 +1375,9 @@ static void smc911x_set_multicast_list(struct net_device *dev)
spin_lock_irqsave(&lp->lock, flags);
SMC_SET_MAC_CR(lp, mcr);
if (update_multicast) {
- DBG(SMC_DEBUG_MISC,
- "%s: update mcast hash table 0x%08x 0x%08x\n",
- dev->name, multicast_table[0], multicast_table[1]);
+ DBG(SMC_DEBUG_MISC, dev,
+ "update mcast hash table 0x%08x 0x%08x\n",
+ multicast_table[0], multicast_table[1]);
SMC_SET_HASHL(lp, multicast_table[0]);
SMC_SET_HASHH(lp, multicast_table[1]);
}
@@ -1398,7 +1395,7 @@ smc911x_open(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
/* reset the hardware */
smc911x_reset(dev);
@@ -1425,7 +1422,7 @@ static int smc911x_close(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
netif_stop_queue(dev);
netif_carrier_off(dev);
@@ -1459,7 +1456,7 @@ smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
int ret, status;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
cmd->maxtxpkt = 1;
cmd->maxrxpkt = 1;
@@ -1597,16 +1594,16 @@ static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
e2p_cmd = SMC_GET_E2P_CMD(lp);
for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) {
if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) {
- PRINTK("%s: %s timeout waiting for EEPROM to respond\n",
- dev->name, __func__);
+ PRINTK(dev, "%s timeout waiting for EEPROM to respond\n",
+ __func__);
return -EFAULT;
}
mdelay(1);
e2p_cmd = SMC_GET_E2P_CMD(lp);
}
if (timeout == 0) {
- PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n",
- dev->name, __func__);
+ PRINTK(dev, "%s timeout waiting for EEPROM CMD not busy\n",
+ __func__);
return -ETIMEDOUT;
}
return 0;
@@ -1719,7 +1716,7 @@ static int smc911x_findirq(struct net_device *dev)
int timeout = 20;
unsigned long cookie;
- DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
cookie = probe_irq_on();
@@ -1799,13 +1796,14 @@ static int smc911x_probe(struct net_device *dev)
const char *version_string;
unsigned long irq_flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
/* First, see if the endian word is recognized */
val = SMC_GET_BYTE_TEST(lp);
- DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val);
+ DBG(SMC_DEBUG_MISC, dev, "%s: endian probe returned 0x%04x\n",
+ CARDNAME, val);
if (val != 0x87654321) {
- printk(KERN_ERR "Invalid chip endian 0x%08x\n",val);
+ netdev_err(dev, "Invalid chip endian 0x%08x\n", val);
retval = -ENODEV;
goto err_out;
}
@@ -1816,26 +1814,29 @@ static int smc911x_probe(struct net_device *dev)
* as future revisions could be added.
*/
chip_id = SMC_GET_PN(lp);
- DBG(SMC_DEBUG_MISC, "%s: id probe returned 0x%04x\n", CARDNAME, chip_id);
+ DBG(SMC_DEBUG_MISC, dev, "%s: id probe returned 0x%04x\n",
+ CARDNAME, chip_id);
for(i=0;chip_ids[i].id != 0; i++) {
if (chip_ids[i].id == chip_id) break;
}
if (!chip_ids[i].id) {
- printk(KERN_ERR "Unknown chip ID %04x\n", chip_id);
+ netdev_err(dev, "Unknown chip ID %04x\n", chip_id);
retval = -ENODEV;
goto err_out;
}
version_string = chip_ids[i].name;
revision = SMC_GET_REV(lp);
- DBG(SMC_DEBUG_MISC, "%s: revision = 0x%04x\n", CARDNAME, revision);
+ DBG(SMC_DEBUG_MISC, dev, "%s: revision = 0x%04x\n", CARDNAME, revision);
/* At this point I'll assume that the chip is an SMC911x. */
- DBG(SMC_DEBUG_MISC, "%s: Found a %s\n", CARDNAME, chip_ids[i].name);
+ DBG(SMC_DEBUG_MISC, dev, "%s: Found a %s\n",
+ CARDNAME, chip_ids[i].name);
/* Validate the TX FIFO size requested */
if ((tx_fifo_kb < 2) || (tx_fifo_kb > 14)) {
- printk(KERN_ERR "Invalid TX FIFO size requested %d\n", tx_fifo_kb);
+ netdev_err(dev, "Invalid TX FIFO size requested %d\n",
+ tx_fifo_kb);
retval = -EINVAL;
goto err_out;
}
@@ -1887,14 +1888,13 @@ static int smc911x_probe(struct net_device *dev)
case 14:/* 1920 Rx Data Fifo Size */
lp->afc_cfg=0x0006032F;break;
default:
- PRINTK("%s: ERROR -- no AFC_CFG setting found",
- dev->name);
+ PRINTK(dev, "ERROR -- no AFC_CFG setting found");
break;
}
- DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX,
- "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME,
- lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg);
+ DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX, dev,
+ "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME,
+ lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg);
spin_lock_init(&lp->lock);
@@ -1924,8 +1924,7 @@ static int smc911x_probe(struct net_device *dev)
}
}
if (dev->irq == 0) {
- printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n",
- dev->name);
+ netdev_warn(dev, "Couldn't autodetect your IRQ. Use irq=xx.\n");
retval = -ENODEV;
goto err_out;
}
@@ -1980,33 +1979,32 @@ static int smc911x_probe(struct net_device *dev)
retval = register_netdev(dev);
if (retval == 0) {
/* now, print out the card info, in a short format.. */
- printk("%s: %s (rev %d) at %#lx IRQ %d",
- dev->name, version_string, lp->revision,
- dev->base_addr, dev->irq);
+ netdev_info(dev, "%s (rev %d) at %#lx IRQ %d",
+ version_string, lp->revision,
+ dev->base_addr, dev->irq);
#ifdef SMC_USE_DMA
if (lp->rxdma != -1)
- printk(" RXDMA %d ", lp->rxdma);
+ pr_cont(" RXDMA %d", lp->rxdma);
if (lp->txdma != -1)
- printk("TXDMA %d", lp->txdma);
+ pr_cont(" TXDMA %d", lp->txdma);
#endif
- printk("\n");
+ pr_cont("\n");
if (!is_valid_ether_addr(dev->dev_addr)) {
- printk("%s: Invalid ethernet MAC address. Please "
- "set using ifconfig\n", dev->name);
+ netdev_warn(dev, "Invalid ethernet MAC address. Please set using ifconfig\n");
} else {
/* Print the Ethernet address */
- printk("%s: Ethernet addr: %pM\n",
- dev->name, dev->dev_addr);
+ netdev_info(dev, "Ethernet addr: %pM\n",
+ dev->dev_addr);
}
if (lp->phy_type == 0) {
- PRINTK("%s: No PHY found\n", dev->name);
+ PRINTK(dev, "No PHY found\n");
} else if ((lp->phy_type & ~0xff) == LAN911X_INTERNAL_PHY_ID) {
- PRINTK("%s: LAN911x Internal PHY\n", dev->name);
+ PRINTK(dev, "LAN911x Internal PHY\n");
} else {
- PRINTK("%s: External PHY 0x%08x\n", dev->name, lp->phy_type);
+ PRINTK(dev, "External PHY 0x%08x\n", lp->phy_type);
}
}
@@ -2025,7 +2023,7 @@ err_out:
}
/*
- * smc911x_init(void)
+ * smc911x_drv_probe(void)
*
* Output:
* 0 --> there is a device
@@ -2039,6 +2037,7 @@ static int smc911x_drv_probe(struct platform_device *pdev)
void __iomem *addr;
int ret;
+ /* ndev is not valid yet, so avoid passing it in. */
DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -2093,7 +2092,7 @@ release_both:
release_1:
release_mem_region(res->start, SMC911X_IO_EXTENT);
out:
- printk("%s: not found (%d).\n", CARDNAME, ret);
+ pr_info("%s: not found (%d).\n", CARDNAME, ret);
}
#ifdef SMC_USE_DMA
else {
@@ -2111,7 +2110,7 @@ static int smc911x_drv_remove(struct platform_device *pdev)
struct smc911x_local *lp = netdev_priv(ndev);
struct resource *res;
- DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
+ DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
unregister_netdev(ndev);
@@ -2140,7 +2139,7 @@ static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state)
struct net_device *ndev = platform_get_drvdata(dev);
struct smc911x_local *lp = netdev_priv(ndev);
- DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
+ DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
if (ndev) {
if (netif_running(ndev)) {
netif_device_detach(ndev);
@@ -2158,7 +2157,7 @@ static int smc911x_drv_resume(struct platform_device *dev)
{
struct net_device *ndev = platform_get_drvdata(dev);
- DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
+ DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
if (ndev) {
struct smc911x_local *lp = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/smsc/smc911x.h b/drivers/net/ethernet/smsc/smc911x.h
index d51261ba4642..9965da39281b 100644
--- a/drivers/net/ethernet/smsc/smc911x.h
+++ b/drivers/net/ethernet/smsc/smc911x.h
@@ -227,7 +227,7 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg,
#define SMC_DMA_ACK_IRQ(dev, dma) \
{ \
if (DCSR(dma) & DCSR_BUSERR) { \
- printk("%s: DMA %d bus error!\n", dev->name, dma); \
+ netdev_err(dev, "DMA %d bus error!\n", dma); \
} \
DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR; \
}
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index e85c2e7e8246..6f3491f67c42 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -55,7 +55,7 @@
----------------------------------------------------------------------------*/
static const char version[] =
- "smc9194.c:v0.14 12/15/00 by Erik Stahlman (erik@vt.edu)\n";
+ "smc9194.c:v0.14 12/15/00 by Erik Stahlman (erik@vt.edu)";
#include <linux/module.h>
#include <linux/kernel.h>
@@ -95,14 +95,6 @@ static const char version[] =
#define USE_32_BIT 1
#endif
-#if defined(__H8300H__) || defined(__H8300S__)
-#define NO_AUTOPROBE
-#undef insl
-#undef outsl
-#define insl(a,b,l) io_insl_noswap(a,b,l)
-#define outsl(a,b,l) io_outsl_noswap(a,b,l)
-#endif
-
/*
.the SMC9194 can be at any of the following port addresses. To change,
.for a slightly different card, you can add it to the array. Keep in
@@ -114,12 +106,6 @@ struct devlist {
unsigned int irq;
};
-#if defined(CONFIG_H8S_EDOSK2674)
-static struct devlist smc_devlist[] __initdata = {
- {.port = 0xf80000, .irq = 16},
- {.port = 0, .irq = 0 },
-};
-#else
static struct devlist smc_devlist[] __initdata = {
{.port = 0x200, .irq = 0},
{.port = 0x220, .irq = 0},
@@ -139,7 +125,6 @@ static struct devlist smc_devlist[] __initdata = {
{.port = 0x3E0, .irq = 0},
{.port = 0, .irq = 0},
};
-#endif
/*
. Wait time for memory to be free. This probably shouldn't be
. tuned that much, as waiting for this means nothing else happens
@@ -612,7 +597,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
packet_no = inb( ioaddr + PNR_ARR + 1 );
if ( packet_no & 0x80 ) {
/* or isn't there? BAD CHIP! */
- printk(KERN_DEBUG CARDNAME": Memory allocation failed.\n");
+ netdev_dbg(dev, CARDNAME": Memory allocation failed.\n");
dev_kfree_skb_any(skb);
lp->saved_skb = NULL;
netif_wake_queue(dev);
@@ -625,7 +610,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
/* point to the beginning of the packet */
outw( PTR_AUTOINC , ioaddr + POINTER );
- PRINTK3((CARDNAME": Trying to xmit packet of length %x\n", length ));
+ PRINTK3((CARDNAME": Trying to xmit packet of length %x\n", length));
#if SMC_DEBUG > 2
print_packet( buf, length );
#endif
@@ -651,11 +636,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
#ifdef USE_32_BIT
if ( length & 0x2 ) {
outsl(ioaddr + DATA_1, buf, length >> 2 );
-#if !defined(__H8300H__) && !defined(__H8300S__)
outw( *((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_1);
-#else
- ctrl_outw( *((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_1);
-#endif
}
else
outsl(ioaddr + DATA_1, buf, length >> 2 );
@@ -865,7 +846,6 @@ static const struct net_device_ops smc_netdev_ops = {
static int __init smc_probe(struct net_device *dev, int ioaddr)
{
int i, memory, retval;
- static unsigned version_printed;
unsigned int bank;
const char *version_string;
@@ -899,7 +879,6 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
retval = -ENODEV;
goto err_out;
}
-#if !defined(CONFIG_H8S_EDOSK2674)
/* well, we've already written once, so hopefully another time won't
hurt. This time, I need to switch the bank register to bank 1,
so I can access the base address register */
@@ -914,10 +893,6 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
retval = -ENODEV;
goto err_out;
}
-#else
- (void)base_address_register; /* Warning suppression */
-#endif
-
/* check if the revision register is something that I recognize.
These might need to be added to later, as future revisions
@@ -937,8 +912,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
It might be prudent to check a listing of MAC addresses
against the hardware address, or do some other tests. */
- if (version_printed++ == 0)
- printk("%s", version);
+ pr_info_once("%s\n", version);
/* fill in some of the fields */
dev->base_addr = ioaddr;
@@ -1027,21 +1001,21 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
/* now, print out the card info, in a short format.. */
- printk("%s: %s(r:%d) at %#3x IRQ:%d INTF:%s MEM:%db ", dev->name,
- version_string, revision_register & 0xF, ioaddr, dev->irq,
- if_string, memory );
+ netdev_info(dev, "%s(r:%d) at %#3x IRQ:%d INTF:%s MEM:%db ",
+ version_string, revision_register & 0xF, ioaddr, dev->irq,
+ if_string, memory);
/*
. Print the Ethernet address
*/
- printk("ADDR: %pM\n", dev->dev_addr);
+ netdev_info(dev, "ADDR: %pM\n", dev->dev_addr);
/* Grab the IRQ */
- retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev);
- if (retval) {
- printk("%s: unable to get IRQ %d (irqval=%d).\n", DRV_NAME,
- dev->irq, retval);
- goto err_out;
- }
+ retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev);
+ if (retval) {
+ netdev_warn(dev, "%s: unable to get IRQ %d (irqval=%d).\n",
+ DRV_NAME, dev->irq, retval);
+ goto err_out;
+ }
dev->netdev_ops = &smc_netdev_ops;
dev->watchdog_timeo = HZ/20;
@@ -1061,30 +1035,32 @@ static void print_packet( byte * buf, int length )
int remainder;
int lines;
- printk("Packet of length %d\n", length);
+ pr_dbg("Packet of length %d\n", length);
lines = length / 16;
remainder = length % 16;
for ( i = 0; i < lines ; i ++ ) {
int cur;
+ printk(KERN_DEBUG);
for ( cur = 0; cur < 8; cur ++ ) {
byte a, b;
a = *(buf ++ );
b = *(buf ++ );
- printk("%02x%02x ", a, b );
+ pr_cont("%02x%02x ", a, b);
}
- printk("\n");
+ pr_cont("\n");
}
+ printk(KERN_DEBUG);
for ( i = 0; i < remainder/2 ; i++ ) {
byte a, b;
a = *(buf ++ );
b = *(buf ++ );
- printk("%02x%02x ", a, b );
+ pr_cont("%02x%02x ", a, b);
}
- printk("\n");
+ pr_cont("\n");
#endif
}
#endif
@@ -1151,9 +1127,8 @@ static void smc_timeout(struct net_device *dev)
{
/* If we get here, some higher level has decided we are broken.
There should really be a "kick me" function call instead. */
- printk(KERN_WARNING CARDNAME": transmit timed out, %s?\n",
- tx_done(dev) ? "IRQ conflict" :
- "network cable problem");
+ netdev_warn(dev, CARDNAME": transmit timed out, %s?\n",
+ tx_done(dev) ? "IRQ conflict" : "network cable problem");
/* "kick" the adaptor */
smc_reset( dev->base_addr );
smc_enable( dev->base_addr );
@@ -1323,8 +1298,7 @@ static void smc_tx( struct net_device * dev )
dev->stats.tx_errors++;
if ( tx_status & TS_LOSTCAR ) dev->stats.tx_carrier_errors++;
if ( tx_status & TS_LATCOL ) {
- printk(KERN_DEBUG CARDNAME
- ": Late collision occurred on last xmit.\n");
+ netdev_dbg(dev, CARDNAME": Late collision occurred on last xmit.\n");
dev->stats.tx_window_errors++;
}
#if 0
@@ -1332,7 +1306,7 @@ static void smc_tx( struct net_device * dev )
#endif
if ( tx_status & TS_SUCCESS ) {
- printk(CARDNAME": Successful packet caused interrupt\n");
+ netdev_info(dev, CARDNAME": Successful packet caused interrupt\n");
}
/* re-enable transmit */
SMC_SELECT_BANK( 0 );
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 656d2e2ebfc9..8ef70d9c20c1 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -740,7 +740,7 @@ static int smc91c92_resume(struct pcmcia_device *link)
(smc->cardid == PRODID_PSION_NET100))) {
i = osi_load_firmware(link);
if (i) {
- pr_err("smc91c92_cs: Failed to load firmware\n");
+ netdev_err(dev, "Failed to load firmware\n");
return i;
}
}
@@ -793,7 +793,7 @@ static int check_sig(struct pcmcia_device *link)
}
if (width) {
- pr_info("using 8-bit IO window\n");
+ netdev_info(dev, "using 8-bit IO window\n");
smc91c92_suspend(link);
pcmcia_fixup_iowidth(link);
@@ -1036,7 +1036,7 @@ static void smc_dump(struct net_device *dev)
save = inw(ioaddr + BANK_SELECT);
for (w = 0; w < 4; w++) {
SMC_SELECT_BANK(w);
- netdev_printk(KERN_DEBUG, dev, "bank %d: ", w);
+ netdev_dbg(dev, "bank %d: ", w);
for (i = 0; i < 14; i += 2)
pr_cont(" %04x", inw(ioaddr + i));
pr_cont("\n");
@@ -1213,8 +1213,7 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
if (smc->saved_skb) {
/* THIS SHOULD NEVER HAPPEN. */
dev->stats.tx_aborted_errors++;
- netdev_printk(KERN_DEBUG, dev,
- "Internal error -- sent packet while busy\n");
+ netdev_dbg(dev, "Internal error -- sent packet while busy\n");
return NETDEV_TX_BUSY;
}
smc->saved_skb = skb;
@@ -1254,7 +1253,7 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
}
/* Otherwise defer until the Tx-space-allocated interrupt. */
- pr_debug("%s: memory allocation deferred.\n", dev->name);
+ netdev_dbg(dev, "memory allocation deferred.\n");
outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT);
spin_unlock_irqrestore(&smc->lock, flags);
@@ -1317,8 +1316,8 @@ static void smc_eph_irq(struct net_device *dev)
SMC_SELECT_BANK(0);
ephs = inw(ioaddr + EPH);
- pr_debug("%s: Ethernet protocol handler interrupt, status"
- " %4.4x.\n", dev->name, ephs);
+ netdev_dbg(dev, "Ethernet protocol handler interrupt, status %4.4x.\n",
+ ephs);
/* Could be a counter roll-over warning: update stats. */
card_stats = inw(ioaddr + COUNTER);
/* single collisions */
@@ -1357,8 +1356,8 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
ioaddr = dev->base_addr;
- pr_debug("%s: SMC91c92 interrupt %d at %#x.\n", dev->name,
- irq, ioaddr);
+ netdev_dbg(dev, "SMC91c92 interrupt %d at %#x.\n",
+ irq, ioaddr);
spin_lock(&smc->lock);
smc->watchdog = 0;
@@ -1366,8 +1365,8 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
if ((saved_bank & 0xff00) != 0x3300) {
/* The device does not exist -- the card could be off-line, or
maybe it has been ejected. */
- pr_debug("%s: SMC91c92 interrupt %d for non-existent"
- "/ejected device.\n", dev->name, irq);
+ netdev_dbg(dev, "SMC91c92 interrupt %d for non-existent/ejected device.\n",
+ irq);
handled = 0;
goto irq_done;
}
@@ -1380,8 +1379,8 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
do { /* read the status flag, and mask it */
status = inw(ioaddr + INTERRUPT) & 0xff;
- pr_debug("%s: Status is %#2.2x (mask %#2.2x).\n", dev->name,
- status, mask);
+ netdev_dbg(dev, "Status is %#2.2x (mask %#2.2x).\n",
+ status, mask);
if ((status & mask) == 0) {
if (bogus_cnt == INTR_WORK)
handled = 0;
@@ -1425,15 +1424,15 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
smc_eph_irq(dev);
} while (--bogus_cnt);
- pr_debug(" Restoring saved registers mask %2.2x bank %4.4x"
- " pointer %4.4x.\n", mask, saved_bank, saved_pointer);
+ netdev_dbg(dev, " Restoring saved registers mask %2.2x bank %4.4x pointer %4.4x.\n",
+ mask, saved_bank, saved_pointer);
/* restore state register */
outw((mask<<8), ioaddr + INTERRUPT);
outw(saved_pointer, ioaddr + POINTER);
SMC_SELECT_BANK(saved_bank);
- pr_debug("%s: Exiting interrupt IRQ%d.\n", dev->name, irq);
+ netdev_dbg(dev, "Exiting interrupt IRQ%d.\n", irq);
irq_done:
@@ -1491,10 +1490,10 @@ static void smc_rx(struct net_device *dev)
rx_status = inw(ioaddr + DATA_1);
packet_length = inw(ioaddr + DATA_1) & 0x07ff;
- pr_debug("%s: Receive status %4.4x length %d.\n",
- dev->name, rx_status, packet_length);
+ netdev_dbg(dev, "Receive status %4.4x length %d.\n",
+ rx_status, packet_length);
- if (!(rx_status & RS_ERRORS)) {
+ if (!(rx_status & RS_ERRORS)) {
/* do stuff to make a new packet */
struct sk_buff *skb;
@@ -1502,7 +1501,7 @@ static void smc_rx(struct net_device *dev)
skb = netdev_alloc_skb(dev, packet_length+2);
if (skb == NULL) {
- pr_debug("%s: Low memory, packet dropped.\n", dev->name);
+ netdev_dbg(dev, "Low memory, packet dropped.\n");
dev->stats.rx_dropped++;
outw(MC_RELEASE, ioaddr + MMU_CMD);
return;
@@ -1643,7 +1642,7 @@ static void smc_reset(struct net_device *dev)
struct smc_private *smc = netdev_priv(dev);
int i;
- pr_debug("%s: smc91c92 reset called.\n", dev->name);
+ netdev_dbg(dev, "smc91c92 reset called.\n");
/* The first interaction must be a write to bring the chip out
of sleep mode. */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 73be7f3982e6..0c9b5d94154f 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -58,7 +58,7 @@
* 22/09/04 Nicolas Pitre big update (see commit log for details)
*/
static const char version[] =
- "smc91x.c: v1.1, sep 22 2004 by Nicolas Pitre <nico@fluxnic.net>\n";
+ "smc91x.c: v1.1, sep 22 2004 by Nicolas Pitre <nico@fluxnic.net>";
/* Debugging level */
#ifndef SMC_DEBUG
@@ -149,16 +149,16 @@ MODULE_ALIAS("platform:smc91x");
#define MII_DELAY 1
#if SMC_DEBUG > 0
-#define DBG(n, args...) \
+#define DBG(n, dev, args...) \
do { \
if (SMC_DEBUG >= (n)) \
- printk(args); \
+ netdev_dbg(dev, args); \
} while (0)
-#define PRINTK(args...) printk(args)
+#define PRINTK(dev, args...) netdev_info(dev, args)
#else
-#define DBG(n, args...) do { } while(0)
-#define PRINTK(args...) printk(KERN_DEBUG args)
+#define DBG(n, dev, args...) do { } while (0)
+#define PRINTK(dev, args...) netdev_dbg(dev, args)
#endif
#if SMC_DEBUG > 3
@@ -173,24 +173,26 @@ static void PRINT_PKT(u_char *buf, int length)
for (i = 0; i < lines ; i ++) {
int cur;
+ printk(KERN_DEBUG);
for (cur = 0; cur < 8; cur++) {
u_char a, b;
a = *buf++;
b = *buf++;
- printk("%02x%02x ", a, b);
+ pr_cont("%02x%02x ", a, b);
}
- printk("\n");
+ pr_cont("\n");
}
+ printk(KERN_DEBUG);
for (i = 0; i < remainder/2 ; i++) {
u_char a, b;
a = *buf++;
b = *buf++;
- printk("%02x%02x ", a, b);
+ pr_cont("%02x%02x ", a, b);
}
- printk("\n");
+ pr_cont("\n");
}
#else
-#define PRINT_PKT(x...) do { } while(0)
+#define PRINT_PKT(x...) do { } while (0)
#endif
@@ -226,8 +228,8 @@ static void PRINT_PKT(u_char *buf, int length)
unsigned long timeout = jiffies + 2; \
while (SMC_GET_MMU_CMD(lp) & MC_BUSY) { \
if (time_after(jiffies, timeout)) { \
- printk("%s: timeout %s line %d\n", \
- dev->name, __FILE__, __LINE__); \
+ netdev_dbg(dev, "timeout %s line %d\n", \
+ __FILE__, __LINE__); \
break; \
} \
cpu_relax(); \
@@ -246,7 +248,7 @@ static void smc_reset(struct net_device *dev)
unsigned int ctl, cfg;
struct sk_buff *pending_skb;
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
/* Disable all interrupts, block TX tasklet */
spin_lock_irq(&lp->lock);
@@ -339,7 +341,7 @@ static void smc_enable(struct net_device *dev)
void __iomem *ioaddr = lp->base;
int mask;
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
/* see the header file for options in TCR/RCR DEFAULT */
SMC_SELECT_BANK(lp, 0);
@@ -373,7 +375,7 @@ static void smc_shutdown(struct net_device *dev)
void __iomem *ioaddr = lp->base;
struct sk_buff *pending_skb;
- DBG(2, "%s: %s\n", CARDNAME, __func__);
+ DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
/* no more interrupts for me */
spin_lock_irq(&lp->lock);
@@ -406,11 +408,11 @@ static inline void smc_rcv(struct net_device *dev)
void __iomem *ioaddr = lp->base;
unsigned int packet_number, status, packet_len;
- DBG(3, "%s: %s\n", dev->name, __func__);
+ DBG(3, dev, "%s\n", __func__);
packet_number = SMC_GET_RXFIFO(lp);
if (unlikely(packet_number & RXFIFO_REMPTY)) {
- PRINTK("%s: smc_rcv with nothing on FIFO.\n", dev->name);
+ PRINTK(dev, "smc_rcv with nothing on FIFO.\n");
return;
}
@@ -420,9 +422,8 @@ static inline void smc_rcv(struct net_device *dev)
/* First two words are status and packet length */
SMC_GET_PKT_HDR(lp, status, packet_len);
packet_len &= 0x07ff; /* mask off top bits */
- DBG(2, "%s: RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n",
- dev->name, packet_number, status,
- packet_len, packet_len);
+ DBG(2, dev, "RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n",
+ packet_number, status, packet_len, packet_len);
back:
if (unlikely(packet_len < 6 || status & RS_ERRORS)) {
@@ -433,8 +434,8 @@ static inline void smc_rcv(struct net_device *dev)
}
if (packet_len < 6) {
/* bloody hardware */
- printk(KERN_ERR "%s: fubar (rxlen %u status %x\n",
- dev->name, packet_len, status);
+ netdev_err(dev, "fubar (rxlen %u status %x\n",
+ packet_len, status);
status |= RS_TOOSHORT;
}
SMC_WAIT_MMU_BUSY(lp);
@@ -551,7 +552,7 @@ static void smc_hardware_send_pkt(unsigned long data)
unsigned char *buf;
unsigned long flags;
- DBG(3, "%s: %s\n", dev->name, __func__);
+ DBG(3, dev, "%s\n", __func__);
if (!smc_special_trylock(&lp->lock, flags)) {
netif_stop_queue(dev);
@@ -568,7 +569,7 @@ static void smc_hardware_send_pkt(unsigned long data)
packet_no = SMC_GET_AR(lp);
if (unlikely(packet_no & AR_FAILED)) {
- printk("%s: Memory allocation failed.\n", dev->name);
+ netdev_err(dev, "Memory allocation failed.\n");
dev->stats.tx_errors++;
dev->stats.tx_fifo_errors++;
smc_special_unlock(&lp->lock, flags);
@@ -581,8 +582,8 @@ static void smc_hardware_send_pkt(unsigned long data)
buf = skb->data;
len = skb->len;
- DBG(2, "%s: TX PNR 0x%x LENGTH 0x%04x (%d) BUF 0x%p\n",
- dev->name, packet_no, len, len, buf);
+ DBG(2, dev, "TX PNR 0x%x LENGTH 0x%04x (%d) BUF 0x%p\n",
+ packet_no, len, len, buf);
PRINT_PKT(buf, len);
/*
@@ -637,7 +638,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int numPages, poll_count, status;
unsigned long flags;
- DBG(3, "%s: %s\n", dev->name, __func__);
+ DBG(3, dev, "%s\n", __func__);
BUG_ON(lp->pending_tx_skb != NULL);
@@ -654,7 +655,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
numPages = ((skb->len & ~1) + (6 - 1)) >> 8;
if (unlikely(numPages > 7)) {
- printk("%s: Far too big packet error.\n", dev->name);
+ netdev_warn(dev, "Far too big packet error.\n");
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
dev_kfree_skb(skb);
@@ -685,7 +686,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!poll_count) {
/* oh well, wait until the chip finds memory later */
netif_stop_queue(dev);
- DBG(2, "%s: TX memory allocation deferred.\n", dev->name);
+ DBG(2, dev, "TX memory allocation deferred.\n");
SMC_ENABLE_INT(lp, IM_ALLOC_INT);
} else {
/*
@@ -709,12 +710,12 @@ static void smc_tx(struct net_device *dev)
void __iomem *ioaddr = lp->base;
unsigned int saved_packet, packet_no, tx_status, pkt_len;
- DBG(3, "%s: %s\n", dev->name, __func__);
+ DBG(3, dev, "%s\n", __func__);
/* If the TX FIFO is empty then nothing to do */
packet_no = SMC_GET_TXFIFO(lp);
if (unlikely(packet_no & TXFIFO_TEMPTY)) {
- PRINTK("%s: smc_tx with nothing on FIFO.\n", dev->name);
+ PRINTK(dev, "smc_tx with nothing on FIFO.\n");
return;
}
@@ -725,8 +726,8 @@ static void smc_tx(struct net_device *dev)
/* read the first word (status word) from this packet */
SMC_SET_PTR(lp, PTR_AUTOINC | PTR_READ);
SMC_GET_PKT_HDR(lp, tx_status, pkt_len);
- DBG(2, "%s: TX STATUS 0x%04x PNR 0x%02x\n",
- dev->name, tx_status, packet_no);
+ DBG(2, dev, "TX STATUS 0x%04x PNR 0x%02x\n",
+ tx_status, packet_no);
if (!(tx_status & ES_TX_SUC))
dev->stats.tx_errors++;
@@ -735,14 +736,12 @@ static void smc_tx(struct net_device *dev)
dev->stats.tx_carrier_errors++;
if (tx_status & (ES_LATCOL | ES_16COL)) {
- PRINTK("%s: %s occurred on last xmit\n", dev->name,
+ PRINTK(dev, "%s occurred on last xmit\n",
(tx_status & ES_LATCOL) ?
"late collision" : "too many collisions");
dev->stats.tx_window_errors++;
if (!(dev->stats.tx_window_errors & 63) && net_ratelimit()) {
- printk(KERN_INFO "%s: unexpectedly large number of "
- "bad collisions. Please check duplex "
- "setting.\n", dev->name);
+ netdev_info(dev, "unexpectedly large number of bad collisions. Please check duplex setting.\n");
}
}
@@ -830,8 +829,8 @@ static int smc_phy_read(struct net_device *dev, int phyaddr, int phyreg)
/* Return to idle state */
SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
- DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
- __func__, phyaddr, phyreg, phydata);
+ DBG(3, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
+ __func__, phyaddr, phyreg, phydata);
SMC_SELECT_BANK(lp, 2);
return phydata;
@@ -857,8 +856,8 @@ static void smc_phy_write(struct net_device *dev, int phyaddr, int phyreg,
/* Return to idle state */
SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
- DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
- __func__, phyaddr, phyreg, phydata);
+ DBG(3, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
+ __func__, phyaddr, phyreg, phydata);
SMC_SELECT_BANK(lp, 2);
}
@@ -871,7 +870,7 @@ static void smc_phy_detect(struct net_device *dev)
struct smc_local *lp = netdev_priv(dev);
int phyaddr;
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
lp->phy_type = 0;
@@ -886,8 +885,8 @@ static void smc_phy_detect(struct net_device *dev)
id1 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID1);
id2 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID2);
- DBG(3, "%s: phy_id1=0x%x, phy_id2=0x%x\n",
- dev->name, id1, id2);
+ DBG(3, dev, "phy_id1=0x%x, phy_id2=0x%x\n",
+ id1, id2);
/* Make sure it is a valid identifier */
if (id1 != 0x0000 && id1 != 0xffff && id1 != 0x8000 &&
@@ -910,7 +909,7 @@ static int smc_phy_fixed(struct net_device *dev)
int phyaddr = lp->mii.phy_id;
int bmcr, cfg1;
- DBG(3, "%s: %s\n", dev->name, __func__);
+ DBG(3, dev, "%s\n", __func__);
/* Enter Link Disable state */
cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG);
@@ -1044,7 +1043,7 @@ static void smc_phy_configure(struct work_struct *work)
int my_ad_caps; /* My Advertised capabilities */
int status;
- DBG(3, "%s:smc_program_phy()\n", dev->name);
+ DBG(3, dev, "smc_program_phy()\n");
spin_lock_irq(&lp->lock);
@@ -1055,7 +1054,7 @@ static void smc_phy_configure(struct work_struct *work)
goto smc_phy_configure_exit;
if (smc_phy_reset(dev, phyaddr)) {
- printk("%s: PHY reset timed out\n", dev->name);
+ netdev_info(dev, "PHY reset timed out\n");
goto smc_phy_configure_exit;
}
@@ -1082,7 +1081,7 @@ static void smc_phy_configure(struct work_struct *work)
my_phy_caps = smc_phy_read(dev, phyaddr, MII_BMSR);
if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
- printk(KERN_INFO "Auto negotiation NOT supported\n");
+ netdev_info(dev, "Auto negotiation NOT supported\n");
smc_phy_fixed(dev);
goto smc_phy_configure_exit;
}
@@ -1118,8 +1117,8 @@ static void smc_phy_configure(struct work_struct *work)
*/
status = smc_phy_read(dev, phyaddr, MII_ADVERTISE);
- DBG(2, "%s: phy caps=%x\n", dev->name, my_phy_caps);
- DBG(2, "%s: phy advertised caps=%x\n", dev->name, my_ad_caps);
+ DBG(2, dev, "phy caps=%x\n", my_phy_caps);
+ DBG(2, dev, "phy advertised caps=%x\n", my_ad_caps);
/* Restart auto-negotiation process in order to advertise my caps */
smc_phy_write(dev, phyaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
@@ -1143,7 +1142,7 @@ static void smc_phy_interrupt(struct net_device *dev)
int phyaddr = lp->mii.phy_id;
int phy18;
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
if (lp->phy_type == 0)
return;
@@ -1179,8 +1178,8 @@ static void smc_10bt_check_media(struct net_device *dev, int init)
netif_carrier_on(dev);
}
if (netif_msg_link(lp))
- printk(KERN_INFO "%s: link %s\n", dev->name,
- new_carrier ? "up" : "down");
+ netdev_info(dev, "link %s\n",
+ new_carrier ? "up" : "down");
}
}
@@ -1211,7 +1210,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
int status, mask, timeout, card_stats;
int saved_pointer;
- DBG(3, "%s: %s\n", dev->name, __func__);
+ DBG(3, dev, "%s\n", __func__);
spin_lock(&lp->lock);
@@ -1230,12 +1229,12 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
do {
status = SMC_GET_INT(lp);
- DBG(2, "%s: INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n",
- dev->name, status, mask,
- ({ int meminfo; SMC_SELECT_BANK(lp, 0);
- meminfo = SMC_GET_MIR(lp);
- SMC_SELECT_BANK(lp, 2); meminfo; }),
- SMC_GET_FIFO(lp));
+ DBG(2, dev, "INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n",
+ status, mask,
+ ({ int meminfo; SMC_SELECT_BANK(lp, 0);
+ meminfo = SMC_GET_MIR(lp);
+ SMC_SELECT_BANK(lp, 2); meminfo; }),
+ SMC_GET_FIFO(lp));
status &= mask;
if (!status)
@@ -1243,20 +1242,20 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
if (status & IM_TX_INT) {
/* do this before RX as it will free memory quickly */
- DBG(3, "%s: TX int\n", dev->name);
+ DBG(3, dev, "TX int\n");
smc_tx(dev);
SMC_ACK_INT(lp, IM_TX_INT);
if (THROTTLE_TX_PKTS)
netif_wake_queue(dev);
} else if (status & IM_RCV_INT) {
- DBG(3, "%s: RX irq\n", dev->name);
+ DBG(3, dev, "RX irq\n");
smc_rcv(dev);
} else if (status & IM_ALLOC_INT) {
- DBG(3, "%s: Allocation irq\n", dev->name);
+ DBG(3, dev, "Allocation irq\n");
tasklet_hi_schedule(&lp->tx_task);
mask &= ~IM_ALLOC_INT;
} else if (status & IM_TX_EMPTY_INT) {
- DBG(3, "%s: TX empty\n", dev->name);
+ DBG(3, dev, "TX empty\n");
mask &= ~IM_TX_EMPTY_INT;
/* update stats */
@@ -1271,10 +1270,10 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
/* multiple collisions */
dev->stats.collisions += card_stats & 0xF;
} else if (status & IM_RX_OVRN_INT) {
- DBG(1, "%s: RX overrun (EPH_ST 0x%04x)\n", dev->name,
- ({ int eph_st; SMC_SELECT_BANK(lp, 0);
- eph_st = SMC_GET_EPH_STATUS(lp);
- SMC_SELECT_BANK(lp, 2); eph_st; }));
+ DBG(1, dev, "RX overrun (EPH_ST 0x%04x)\n",
+ ({ int eph_st; SMC_SELECT_BANK(lp, 0);
+ eph_st = SMC_GET_EPH_STATUS(lp);
+ SMC_SELECT_BANK(lp, 2); eph_st; }));
SMC_ACK_INT(lp, IM_RX_OVRN_INT);
dev->stats.rx_errors++;
dev->stats.rx_fifo_errors++;
@@ -1285,7 +1284,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
smc_phy_interrupt(dev);
} else if (status & IM_ERCV_INT) {
SMC_ACK_INT(lp, IM_ERCV_INT);
- PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT\n", dev->name);
+ PRINTK(dev, "UNSUPPORTED: ERCV INTERRUPT\n");
}
} while (--timeout);
@@ -1296,11 +1295,11 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
#ifndef CONFIG_NET_POLL_CONTROLLER
if (timeout == MAX_IRQ_LOOPS)
- PRINTK("%s: spurious interrupt (mask = 0x%02x)\n",
- dev->name, mask);
+ PRINTK(dev, "spurious interrupt (mask = 0x%02x)\n",
+ mask);
#endif
- DBG(3, "%s: Interrupt done (%d loops)\n",
- dev->name, MAX_IRQ_LOOPS - timeout);
+ DBG(3, dev, "Interrupt done (%d loops)\n",
+ MAX_IRQ_LOOPS - timeout);
/*
* We return IRQ_HANDLED unconditionally here even if there was
@@ -1333,7 +1332,7 @@ static void smc_timeout(struct net_device *dev)
void __iomem *ioaddr = lp->base;
int status, mask, eph_st, meminfo, fifo;
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
spin_lock_irq(&lp->lock);
status = SMC_GET_INT(lp);
@@ -1344,9 +1343,8 @@ static void smc_timeout(struct net_device *dev)
meminfo = SMC_GET_MIR(lp);
SMC_SELECT_BANK(lp, 2);
spin_unlock_irq(&lp->lock);
- PRINTK( "%s: TX timeout (INT 0x%02x INTMASK 0x%02x "
- "MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n",
- dev->name, status, mask, meminfo, fifo, eph_st );
+ PRINTK(dev, "TX timeout (INT 0x%02x INTMASK 0x%02x MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n",
+ status, mask, meminfo, fifo, eph_st);
smc_reset(dev);
smc_enable(dev);
@@ -1377,10 +1375,10 @@ static void smc_set_multicast_list(struct net_device *dev)
unsigned char multicast_table[8];
int update_multicast = 0;
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
if (dev->flags & IFF_PROMISC) {
- DBG(2, "%s: RCR_PRMS\n", dev->name);
+ DBG(2, dev, "RCR_PRMS\n");
lp->rcr_cur_mode |= RCR_PRMS;
}
@@ -1395,7 +1393,7 @@ static void smc_set_multicast_list(struct net_device *dev)
* checked before the table is
*/
else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
- DBG(2, "%s: RCR_ALMUL\n", dev->name);
+ DBG(2, dev, "RCR_ALMUL\n");
lp->rcr_cur_mode |= RCR_ALMUL;
}
@@ -1437,7 +1435,7 @@ static void smc_set_multicast_list(struct net_device *dev)
/* now, the table can be loaded into the chipset */
update_multicast = 1;
} else {
- DBG(2, "%s: ~(RCR_PRMS|RCR_ALMUL)\n", dev->name);
+ DBG(2, dev, "~(RCR_PRMS|RCR_ALMUL)\n");
lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL);
/*
@@ -1470,7 +1468,7 @@ smc_open(struct net_device *dev)
{
struct smc_local *lp = netdev_priv(dev);
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
/* Setup the default Register Modes */
lp->tcr_cur_mode = TCR_DEFAULT;
@@ -1514,7 +1512,7 @@ static int smc_close(struct net_device *dev)
{
struct smc_local *lp = netdev_priv(dev);
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
netif_stop_queue(dev);
netif_carrier_off(dev);
@@ -1694,7 +1692,7 @@ static int smc_ethtool_geteeprom(struct net_device *dev,
int i;
int imax;
- DBG(1, "Reading %d bytes at %d(0x%x)\n",
+ DBG(1, dev, "Reading %d bytes at %d(0x%x)\n",
eeprom->len, eeprom->offset, eeprom->offset);
imax = smc_ethtool_geteeprom_len(dev);
for (i = 0; i < eeprom->len; i += 2) {
@@ -1706,7 +1704,7 @@ static int smc_ethtool_geteeprom(struct net_device *dev,
ret = smc_read_eeprom_word(dev, offset >> 1, &wbuf);
if (ret != 0)
return ret;
- DBG(2, "Read 0x%x from 0x%x\n", wbuf, offset >> 1);
+ DBG(2, dev, "Read 0x%x from 0x%x\n", wbuf, offset >> 1);
data[i] = (wbuf >> 8) & 0xff;
data[i+1] = wbuf & 0xff;
}
@@ -1719,8 +1717,8 @@ static int smc_ethtool_seteeprom(struct net_device *dev,
int i;
int imax;
- DBG(1, "Writing %d bytes to %d(0x%x)\n",
- eeprom->len, eeprom->offset, eeprom->offset);
+ DBG(1, dev, "Writing %d bytes to %d(0x%x)\n",
+ eeprom->len, eeprom->offset, eeprom->offset);
imax = smc_ethtool_geteeprom_len(dev);
for (i = 0; i < eeprom->len; i += 2) {
int ret;
@@ -1729,7 +1727,7 @@ static int smc_ethtool_seteeprom(struct net_device *dev,
if (offset > imax)
break;
wbuf = (data[i] << 8) | data[i + 1];
- DBG(2, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1);
+ DBG(2, dev, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1);
ret = smc_write_eeprom_word(dev, offset >> 1, wbuf);
if (ret != 0)
return ret;
@@ -1784,7 +1782,7 @@ static int smc_findirq(struct smc_local *lp)
int timeout = 20;
unsigned long cookie;
- DBG(2, "%s: %s\n", CARDNAME, __func__);
+ DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
cookie = probe_irq_on();
@@ -1856,21 +1854,21 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
unsigned long irq_flags)
{
struct smc_local *lp = netdev_priv(dev);
- static int version_printed = 0;
int retval;
unsigned int val, revision_register;
const char *version_string;
- DBG(2, "%s: %s\n", CARDNAME, __func__);
+ DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
/* First, see if the high byte is 0x33 */
val = SMC_CURRENT_BANK(lp);
- DBG(2, "%s: bank signature probe returned 0x%04x\n", CARDNAME, val);
+ DBG(2, dev, "%s: bank signature probe returned 0x%04x\n",
+ CARDNAME, val);
if ((val & 0xFF00) != 0x3300) {
if ((val & 0xFF) == 0x33) {
- printk(KERN_WARNING
- "%s: Detected possible byte-swapped interface"
- " at IOADDR %p\n", CARDNAME, ioaddr);
+ netdev_warn(dev,
+ "%s: Detected possible byte-swapped interface at IOADDR %p\n",
+ CARDNAME, ioaddr);
}
retval = -ENODEV;
goto err_out;
@@ -1897,8 +1895,8 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
val = SMC_GET_BASE(lp);
val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT;
if (((unsigned int)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) {
- printk("%s: IOADDR %p doesn't match configuration (%x).\n",
- CARDNAME, ioaddr, val);
+ netdev_warn(dev, "%s: IOADDR %p doesn't match configuration (%x).\n",
+ CARDNAME, ioaddr, val);
}
/*
@@ -1908,21 +1906,19 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
*/
SMC_SELECT_BANK(lp, 3);
revision_register = SMC_GET_REV(lp);
- DBG(2, "%s: revision = 0x%04x\n", CARDNAME, revision_register);
+ DBG(2, dev, "%s: revision = 0x%04x\n", CARDNAME, revision_register);
version_string = chip_ids[ (revision_register >> 4) & 0xF];
if (!version_string || (revision_register & 0xff00) != 0x3300) {
/* I don't recognize this chip, so... */
- printk("%s: IO %p: Unrecognized revision register 0x%04x"
- ", Contact author.\n", CARDNAME,
- ioaddr, revision_register);
+ netdev_warn(dev, "%s: IO %p: Unrecognized revision register 0x%04x, Contact author.\n",
+ CARDNAME, ioaddr, revision_register);
retval = -ENODEV;
goto err_out;
}
/* At this point I'll assume that the chip is an SMC91x. */
- if (version_printed++ == 0)
- printk("%s", version);
+ pr_info_once("%s\n", version);
/* fill in some of the fields */
dev->base_addr = (unsigned long)ioaddr;
@@ -1940,7 +1936,7 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
/*
* If dev->irq is 0, then the device has to be banged on to see
* what the IRQ is.
- *
+ *
* This banging doesn't always detect the IRQ, for unknown reasons.
* a workaround is to reset the chip and try again.
*
@@ -1965,8 +1961,7 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
}
}
if (dev->irq == 0) {
- printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n",
- dev->name);
+ netdev_warn(dev, "Couldn't autodetect your IRQ. Use irq=xx.\n");
retval = -ENODEV;
goto err_out;
}
@@ -2030,32 +2025,31 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
retval = register_netdev(dev);
if (retval == 0) {
/* now, print out the card info, in a short format.. */
- printk("%s: %s (rev %d) at %p IRQ %d",
- dev->name, version_string, revision_register & 0x0f,
- lp->base, dev->irq);
+ netdev_info(dev, "%s (rev %d) at %p IRQ %d",
+ version_string, revision_register & 0x0f,
+ lp->base, dev->irq);
if (dev->dma != (unsigned char)-1)
- printk(" DMA %d", dev->dma);
+ pr_cont(" DMA %d", dev->dma);
- printk("%s%s\n",
+ pr_cont("%s%s\n",
lp->cfg.flags & SMC91X_NOWAIT ? " [nowait]" : "",
THROTTLE_TX_PKTS ? " [throttle_tx]" : "");
if (!is_valid_ether_addr(dev->dev_addr)) {
- printk("%s: Invalid ethernet MAC address. Please "
- "set using ifconfig\n", dev->name);
+ netdev_warn(dev, "Invalid ethernet MAC address. Please set using ifconfig\n");
} else {
/* Print the Ethernet address */
- printk("%s: Ethernet addr: %pM\n",
- dev->name, dev->dev_addr);
+ netdev_info(dev, "Ethernet addr: %pM\n",
+ dev->dev_addr);
}
if (lp->phy_type == 0) {
- PRINTK("%s: No PHY found\n", dev->name);
+ PRINTK(dev, "No PHY found\n");
} else if ((lp->phy_type & 0xfffffff0) == 0x0016f840) {
- PRINTK("%s: PHY LAN83C183 (LAN91C111 Internal)\n", dev->name);
+ PRINTK(dev, "PHY LAN83C183 (LAN91C111 Internal)\n");
} else if ((lp->phy_type & 0xfffffff0) == 0x02821c50) {
- PRINTK("%s: PHY LAN83C180\n", dev->name);
+ PRINTK(dev, "PHY LAN83C180\n");
}
}
@@ -2165,7 +2159,8 @@ static inline void smc_request_datacs(struct platform_device *pdev, struct net_d
return;
if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) {
- printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME);
+ netdev_info(ndev, "%s: failed to request datacs memory region.\n",
+ CARDNAME);
return;
}
@@ -2307,7 +2302,7 @@ static int smc_drv_probe(struct platform_device *pdev)
out_free_netdev:
free_netdev(ndev);
out:
- printk("%s: not found (%d).\n", CARDNAME, ret);
+ pr_info("%s: not found (%d).\n", CARDNAME, ret);
return ret;
}
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 5730fe2445a6..c9d4c872e81d 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -907,8 +907,8 @@ static const char * chip_ids[ 16 ] = {
({ \
int __b = SMC_CURRENT_BANK(lp); \
if (unlikely((__b & ~0xf0) != (0x3300 | bank))) { \
- printk( "%s: bank reg screwed (0x%04x)\n", \
- CARDNAME, __b ); \
+ pr_err("%s: bank reg screwed (0x%04x)\n", \
+ CARDNAME, __b); \
BUG(); \
} \
reg<<SMC_IO_SHIFT; \
@@ -1124,8 +1124,7 @@ static const char * chip_ids[ 16 ] = {
void __iomem *__ioaddr = ioaddr; \
if (__len >= 2 && (unsigned long)__ptr & 2) { \
__len -= 2; \
- SMC_outw(*(u16 *)__ptr, ioaddr, \
- DATA_REG(lp)); \
+ SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
__ptr += 2; \
} \
if (SMC_CAN_USE_DATACS && lp->datacs) \
@@ -1133,8 +1132,7 @@ static const char * chip_ids[ 16 ] = {
SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \
if (__len & 2) { \
__ptr += (__len & ~3); \
- SMC_outw(*((u16 *)__ptr), ioaddr, \
- DATA_REG(lp)); \
+ SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
} \
} else if (SMC_16BIT(lp)) \
SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1); \
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 5fdbc2686eb3..8564f23a6796 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2167,7 +2167,7 @@ static int smsc911x_init(struct net_device *dev)
udelay(1000);
if (to == 0) {
- pr_err("Device not READY in 100ms aborting\n");
+ netdev_err(dev, "Device not READY in 100ms aborting\n");
return -ENODEV;
}
@@ -2502,7 +2502,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
SMSC_TRACE(pdata, probe,
"MAC Address is specified by configuration");
} else if (is_valid_ether_addr(pdata->config.mac)) {
- memcpy(dev->dev_addr, pdata->config.mac, 6);
+ memcpy(dev->dev_addr, pdata->config.mac, ETH_ALEN);
SMSC_TRACE(pdata, probe,
"MAC Address specified by platform data");
} else {
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 5f9e79f7f2df..059bcafc5e62 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -99,17 +99,17 @@ MODULE_PARM_DESC(debug, "debug level");
#define smsc_dbg(TYPE, f, a...) \
do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
- printk(KERN_DEBUG PFX f "\n", ## a); \
+ netdev_dbg((pd)->dev, PFX f "\n", ## a); \
} while (0)
#define smsc_info(TYPE, f, a...) \
do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
- printk(KERN_INFO PFX f "\n", ## a); \
+ netdev_info((pd)->dev, PFX f "\n", ## a); \
} while (0)
#define smsc_warn(TYPE, f, a...) \
do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
- printk(KERN_WARNING PFX f "\n", ## a); \
+ netdev_warn((pd)->dev, PFX f "\n", ## a); \
} while (0)
static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset)
@@ -1168,7 +1168,7 @@ static int smsc9420_mii_probe(struct net_device *dev)
/* Device only supports internal PHY at address 1 */
if (!pd->mii_bus->phy_map[1]) {
- pr_err("%s: no PHY found at address 1\n", dev->name);
+ netdev_err(dev, "no PHY found at address 1\n");
return -ENODEV;
}
@@ -1180,12 +1180,12 @@ static int smsc9420_mii_probe(struct net_device *dev)
smsc9420_phy_adjust_link, PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
- pr_err("%s: Could not attach to PHY\n", dev->name);
+ netdev_err(dev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
}
- pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
- dev->name, phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
+ netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
/* mask with MAC supported features */
phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
@@ -1582,12 +1582,12 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
int result = 0;
u32 id_rev;
- printk(KERN_INFO DRV_DESCRIPTION " version " DRV_VERSION "\n");
+ pr_info(DRV_DESCRIPTION " version " DRV_VERSION "\n");
/* First do the PCI initialisation */
result = pci_enable_device(pdev);
if (unlikely(result)) {
- printk(KERN_ERR "Cannot enable smsc9420\n");
+ pr_err("Cannot enable smsc9420\n");
goto out_0;
}
@@ -1600,24 +1600,24 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
SET_NETDEV_DEV(dev, &pdev->dev);
if (!(pci_resource_flags(pdev, SMSC_BAR) & IORESOURCE_MEM)) {
- printk(KERN_ERR "Cannot find PCI device base address\n");
+ netdev_err(dev, "Cannot find PCI device base address\n");
goto out_free_netdev_2;
}
if ((pci_request_regions(pdev, DRV_NAME))) {
- printk(KERN_ERR "Cannot obtain PCI resources, aborting.\n");
+ netdev_err(dev, "Cannot obtain PCI resources, aborting.\n");
goto out_free_netdev_2;
}
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- printk(KERN_ERR "No usable DMA configuration, aborting.\n");
+ netdev_err(dev, "No usable DMA configuration, aborting.\n");
goto out_free_regions_3;
}
virt_addr = ioremap(pci_resource_start(pdev, SMSC_BAR),
pci_resource_len(pdev, SMSC_BAR));
if (!virt_addr) {
- printk(KERN_ERR "Cannot map device registers, aborting.\n");
+ netdev_err(dev, "Cannot map device registers, aborting.\n");
goto out_free_regions_3;
}
@@ -1707,8 +1707,6 @@ static void smsc9420_remove(struct pci_dev *pdev)
if (!dev)
return;
- pci_set_drvdata(pdev, NULL);
-
pd = netdev_priv(dev);
unregister_netdev(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 7eb8babed2cb..fc94f202a43e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -451,14 +451,14 @@ struct mac_device_info {
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr);
struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
-extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
- unsigned int high, unsigned int low);
-extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
- unsigned int high, unsigned int low);
+void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low);
+void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low);
-extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
+void stmmac_set_mac(void __iomem *ioaddr, bool enable);
-extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
+void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
extern const struct stmmac_ring_mode_ops ring_mode_ops;
extern const struct stmmac_chain_mode_ops chain_mode_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 8e5662ce488b..def266da55db 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -104,14 +104,13 @@
#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
-extern void dwmac_enable_dma_transmission(void __iomem *ioaddr);
-extern void dwmac_enable_dma_irq(void __iomem *ioaddr);
-extern void dwmac_disable_dma_irq(void __iomem *ioaddr);
-extern void dwmac_dma_start_tx(void __iomem *ioaddr);
-extern void dwmac_dma_stop_tx(void __iomem *ioaddr);
-extern void dwmac_dma_start_rx(void __iomem *ioaddr);
-extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
-extern int dwmac_dma_interrupt(void __iomem *ioaddr,
- struct stmmac_extra_stats *x);
+void dwmac_enable_dma_transmission(void __iomem *ioaddr);
+void dwmac_enable_dma_irq(void __iomem *ioaddr);
+void dwmac_disable_dma_irq(void __iomem *ioaddr);
+void dwmac_dma_start_tx(void __iomem *ioaddr);
+void dwmac_dma_stop_tx(void __iomem *ioaddr);
+void dwmac_dma_start_rx(void __iomem *ioaddr);
+void dwmac_dma_stop_rx(void __iomem *ioaddr);
+int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x);
#endif /* __DWMAC_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 48ec001566b5..8607488cbcfc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -128,8 +128,8 @@ struct stmmac_counters {
unsigned int mmc_rx_icmp_err_octets;
};
-extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
-extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
-extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
+void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
+void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
+void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f16a9bdf45bb..22f89ffdfd95 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -110,14 +110,14 @@ struct stmmac_priv {
extern int phyaddr;
-extern int stmmac_mdio_unregister(struct net_device *ndev);
-extern int stmmac_mdio_register(struct net_device *ndev);
-extern void stmmac_set_ethtool_ops(struct net_device *netdev);
+int stmmac_mdio_unregister(struct net_device *ndev);
+int stmmac_mdio_register(struct net_device *ndev);
+void stmmac_set_ethtool_ops(struct net_device *netdev);
extern const struct stmmac_desc_ops enh_desc_ops;
extern const struct stmmac_desc_ops ndesc_ops;
extern const struct stmmac_hwtimestamp stmmac_ptp;
-extern int stmmac_ptp_register(struct stmmac_priv *priv);
-extern void stmmac_ptp_unregister(struct stmmac_priv *priv);
+int stmmac_ptp_register(struct stmmac_priv *priv);
+void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_freeze(struct net_device *ndev);
int stmmac_restore(struct net_device *ndev);
int stmmac_resume(struct net_device *ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 023b7c29cb2f..644d80ece067 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -138,7 +138,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
stmmac_dvr_remove(ndev);
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, priv->ioaddr);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 759441b29e53..b4d50d74ba18 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -3354,7 +3354,7 @@ use_random_mac_addr:
#if defined(CONFIG_SPARC)
addr = of_get_property(cp->of_node, "local-mac-address", NULL);
if (addr != NULL) {
- memcpy(dev_addr, addr, 6);
+ memcpy(dev_addr, addr, ETH_ALEN);
goto done;
}
#endif
@@ -5168,7 +5168,6 @@ err_out_free_netdev:
err_out_disable_pdev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return -ENODEV;
}
@@ -5206,7 +5205,6 @@ static void cas_remove_one(struct pci_dev *pdev)
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index f28460ce24a7..388540fcb977 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9875,7 +9875,6 @@ err_out_free_res:
err_out_disable_pdev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return err;
}
@@ -9900,7 +9899,6 @@ static void niu_pci_remove_one(struct pci_dev *pdev)
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index e62df2b81302..b5655b79bd3b 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2779,7 +2779,7 @@ static int gem_get_device_address(struct gem *gp)
return -1;
#endif
}
- memcpy(dev->dev_addr, addr, 6);
+ memcpy(dev->dev_addr, addr, ETH_ALEN);
#else
get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
#endif
@@ -2806,8 +2806,6 @@ static void gem_remove_one(struct pci_dev *pdev)
iounmap(gp->regs);
pci_release_regions(pdev);
free_netdev(dev);
-
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index e37b587b3860..0dbf46f08ed5 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2675,10 +2675,10 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
addr = of_get_property(dp, "local-mac-address", &len);
- if (qfe_slot != -1 && addr && len == 6)
- memcpy(dev->dev_addr, addr, 6);
+ if (qfe_slot != -1 && addr && len == ETH_ALEN)
+ memcpy(dev->dev_addr, addr, ETH_ALEN);
else
- memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+ memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
}
hp = netdev_priv(dev);
@@ -3024,9 +3024,9 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
(addr = of_get_property(dp, "local-mac-address", &len))
!= NULL &&
len == 6) {
- memcpy(dev->dev_addr, addr, 6);
+ memcpy(dev->dev_addr, addr, ETH_ALEN);
} else {
- memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+ memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
}
#else
get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
@@ -3170,8 +3170,6 @@ static void happy_meal_pci_remove(struct pci_dev *pdev)
pci_release_regions(hp->happy_dev);
free_netdev(net_dev);
-
- pci_set_drvdata(pdev, NULL);
}
static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index b072f4dba033..5695ae2411de 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -843,7 +843,7 @@ static int qec_ether_init(struct platform_device *op)
if (!dev)
return -ENOMEM;
- memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+ memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
qe = netdev_priv(dev);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 571452e786d5..dd0dd6279b4e 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2447,7 +2447,6 @@ static void bdx_remove(struct pci_dev *pdev)
iounmap(nic->regs);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
vfree(nic);
RET();
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index de71b1ec4625..53150c25a96b 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -49,11 +49,19 @@ config TI_DAVINCI_CPDMA
To compile this driver as a module, choose M here: the module
will be called davinci_cpdma. This is recommended.
+config TI_CPSW_PHY_SEL
+ boolean "TI CPSW Switch Phy sel Support"
+ depends on TI_CPSW
+ ---help---
+ This driver supports configuring of the phy mode connected to
+ the CPSW.
+
config TI_CPSW
tristate "TI CPSW Switch Support"
depends on ARM && (ARCH_DAVINCI || SOC_AM33XX)
select TI_DAVINCI_CPDMA
select TI_DAVINCI_MDIO
+ select TI_CPSW_PHY_SEL
---help---
This driver supports TI's CPSW Ethernet Switch.
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index c65148e8aa1d..9cfaab8152be 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -7,5 +7,6 @@ obj-$(CONFIG_CPMAC) += cpmac.o
obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
+obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
new file mode 100644
index 000000000000..148da9ae8366
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -0,0 +1,161 @@
+/* Texas Instruments Ethernet Switch Driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "cpsw.h"
+
+/* AM33xx SoC specific definitions for the CONTROL port */
+#define AM33XX_GMII_SEL_MODE_MII 0
+#define AM33XX_GMII_SEL_MODE_RMII 1
+#define AM33XX_GMII_SEL_MODE_RGMII 2
+
+#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7)
+#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6)
+
+struct cpsw_phy_sel_priv {
+ struct device *dev;
+ u32 __iomem *gmii_sel;
+ bool rmii_clock_external;
+ void (*cpsw_phy_sel)(struct cpsw_phy_sel_priv *priv,
+ phy_interface_t phy_mode, int slave);
+};
+
+
+static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
+ phy_interface_t phy_mode, int slave)
+{
+ u32 reg;
+ u32 mask;
+ u32 mode = 0;
+
+ reg = readl(priv->gmii_sel);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ mode = AM33XX_GMII_SEL_MODE_RMII;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ mode = AM33XX_GMII_SEL_MODE_RGMII;
+ break;
+
+ case PHY_INTERFACE_MODE_MII:
+ default:
+ mode = AM33XX_GMII_SEL_MODE_MII;
+ break;
+ };
+
+ mask = 0x3 << (slave * 2) | BIT(slave + 6);
+ mode <<= slave * 2;
+
+ if (priv->rmii_clock_external) {
+ if (slave == 0)
+ mode |= AM33XX_GMII_SEL_RMII1_IO_CLK_EN;
+ else
+ mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN;
+ }
+
+ reg &= ~mask;
+ reg |= mode;
+
+ writel(reg, priv->gmii_sel);
+}
+
+static struct platform_driver cpsw_phy_sel_driver;
+static int match(struct device *dev, void *data)
+{
+ struct device_node *node = (struct device_node *)data;
+ return dev->of_node == node &&
+ dev->driver == &cpsw_phy_sel_driver.driver;
+}
+
+void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
+{
+ struct device_node *node;
+ struct cpsw_phy_sel_priv *priv;
+
+ node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
+ if (!node) {
+ dev_err(dev, "Phy mode driver DT not found\n");
+ return;
+ }
+
+ dev = bus_find_device(&platform_bus_type, NULL, node, match);
+ priv = dev_get_drvdata(dev);
+
+ priv->cpsw_phy_sel(priv, phy_mode, slave);
+}
+EXPORT_SYMBOL_GPL(cpsw_phy_sel);
+
+static const struct of_device_id cpsw_phy_sel_id_table[] = {
+ {
+ .compatible = "ti,am3352-cpsw-phy-sel",
+ .data = &cpsw_gmii_sel_am3352,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table);
+
+static int cpsw_phy_sel_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ const struct of_device_id *of_id;
+ struct cpsw_phy_sel_priv *priv;
+
+ of_id = of_match_node(cpsw_phy_sel_id_table, pdev->dev.of_node);
+ if (!of_id)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "unable to alloc memory for cpsw phy sel\n");
+ return -ENOMEM;
+ }
+
+ priv->cpsw_phy_sel = of_id->data;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel");
+ priv->gmii_sel = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->gmii_sel))
+ return PTR_ERR(priv->gmii_sel);
+
+ if (of_find_property(pdev->dev.of_node, "rmii-clock-ext", NULL))
+ priv->rmii_clock_external = true;
+
+ dev_set_drvdata(&pdev->dev, priv);
+
+ return 0;
+}
+
+static struct platform_driver cpsw_phy_sel_driver = {
+ .probe = cpsw_phy_sel_probe,
+ .driver = {
+ .name = "cpsw-phy-sel",
+ .owner = THIS_MODULE,
+ .of_match_table = cpsw_phy_sel_id_table,
+ },
+};
+
+module_platform_driver(cpsw_phy_sel_driver);
+MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 79974e31187a..90d41d26ec6d 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -367,8 +367,6 @@ struct cpsw_priv {
spinlock_t lock;
struct platform_device *pdev;
struct net_device *ndev;
- struct resource *cpsw_res;
- struct resource *cpsw_wr_res;
struct napi_struct napi;
struct device *dev;
struct cpsw_platform_data data;
@@ -639,13 +637,6 @@ void cpsw_rx_handler(void *token, int len, int status)
static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
{
struct cpsw_priv *priv = dev_id;
- u32 rx, tx, rx_thresh;
-
- rx_thresh = __raw_readl(&priv->wr_regs->rx_thresh_stat);
- rx = __raw_readl(&priv->wr_regs->rx_stat);
- tx = __raw_readl(&priv->wr_regs->tx_stat);
- if (!rx_thresh && !rx && !tx)
- return IRQ_NONE;
cpsw_intr_disable(priv);
if (priv->irq_enabled == true) {
@@ -1023,6 +1014,10 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
dev_info(priv->dev, "phy found : id is : 0x%x\n",
slave->phy->phy_id);
phy_start(slave->phy);
+
+ /* Configure GMII_SEL register */
+ cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface,
+ slave->slave_num);
}
}
@@ -1169,9 +1164,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
}
}
+ napi_enable(&priv->napi);
cpdma_ctlr_start(priv->dma);
cpsw_intr_enable(priv);
- napi_enable(&priv->napi);
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
@@ -1712,67 +1707,60 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
if (of_property_read_u32(node, "active_slave", &prop)) {
pr_err("Missing active_slave property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->active_slave = prop;
if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
pr_err("Missing cpts_clock_mult property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->cpts_clock_mult = prop;
if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
pr_err("Missing cpts_clock_shift property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->cpts_clock_shift = prop;
- data->slave_data = kcalloc(data->slaves, sizeof(struct cpsw_slave_data),
- GFP_KERNEL);
+ data->slave_data = devm_kzalloc(&pdev->dev, data->slaves
+ * sizeof(struct cpsw_slave_data),
+ GFP_KERNEL);
if (!data->slave_data)
- return -EINVAL;
+ return -ENOMEM;
if (of_property_read_u32(node, "cpdma_channels", &prop)) {
pr_err("Missing cpdma_channels property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->channels = prop;
if (of_property_read_u32(node, "ale_entries", &prop)) {
pr_err("Missing ale_entries property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->ale_entries = prop;
if (of_property_read_u32(node, "bd_ram_size", &prop)) {
pr_err("Missing bd_ram_size property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->bd_ram_size = prop;
if (of_property_read_u32(node, "rx_descs", &prop)) {
pr_err("Missing rx_descs property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->rx_descs = prop;
if (of_property_read_u32(node, "mac_control", &prop)) {
pr_err("Missing mac_control property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->mac_control = prop;
- if (!of_property_read_u32(node, "dual_emac", &prop))
- data->dual_emac = prop;
+ if (of_property_read_bool(node, "dual_emac"))
+ data->dual_emac = 1;
/*
* Populate all the child nodes here...
@@ -1782,7 +1770,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
if (ret)
pr_warn("Doesn't have any child node\n");
- for_each_node_by_name(slave_node, "slave") {
+ for_each_child_of_node(node, slave_node) {
struct cpsw_slave_data *slave_data = data->slave_data + i;
const void *mac_addr = NULL;
u32 phyid;
@@ -1791,11 +1779,14 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
struct device_node *mdio_node;
struct platform_device *mdio;
+ /* This is no slave child node, continue */
+ if (strcmp(slave_node->name, "slave"))
+ continue;
+
parp = of_get_property(slave_node, "phy_id", &lenp);
if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
pr_err("Missing slave[%d] phy_id property\n", i);
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
phyid = be32_to_cpup(parp+1);
@@ -1825,10 +1816,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
return 0;
-
-error_ret:
- kfree(data->slave_data);
- return ret;
}
static int cpsw_probe_dual_emac(struct platform_device *pdev,
@@ -1870,7 +1857,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
priv_sl2->coal_intvl = 0;
priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
- priv_sl2->cpsw_res = priv->cpsw_res;
priv_sl2->regs = priv->regs;
priv_sl2->host_port = priv->host_port;
priv_sl2->host_port_regs = priv->host_port_regs;
@@ -1914,8 +1900,8 @@ static int cpsw_probe(struct platform_device *pdev)
struct cpsw_priv *priv;
struct cpdma_params dma_params;
struct cpsw_ale_params ale_params;
- void __iomem *ss_regs, *wr_regs;
- struct resource *res;
+ void __iomem *ss_regs;
+ struct resource *res, *ss_res;
u32 slave_offset, sliver_offset, slave_size;
int ret = 0, i, k = 0;
@@ -1951,7 +1937,7 @@ static int cpsw_probe(struct platform_device *pdev)
if (cpsw_probe_dt(&priv->data, pdev)) {
pr_err("cpsw: platform data missing\n");
ret = -ENODEV;
- goto clean_ndev_ret;
+ goto clean_runtime_disable_ret;
}
data = &priv->data;
@@ -1965,11 +1951,12 @@ static int cpsw_probe(struct platform_device *pdev)
memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
- priv->slaves = kzalloc(sizeof(struct cpsw_slave) * data->slaves,
- GFP_KERNEL);
+ priv->slaves = devm_kzalloc(&pdev->dev,
+ sizeof(struct cpsw_slave) * data->slaves,
+ GFP_KERNEL);
if (!priv->slaves) {
- ret = -EBUSY;
- goto clean_ndev_ret;
+ ret = -ENOMEM;
+ goto clean_runtime_disable_ret;
}
for (i = 0; i < data->slaves; i++)
priv->slaves[i].slave_num = i;
@@ -1977,55 +1964,31 @@ static int cpsw_probe(struct platform_device *pdev)
priv->slaves[0].ndev = ndev;
priv->emac_port = 0;
- priv->clk = clk_get(&pdev->dev, "fck");
+ priv->clk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(priv->clk)) {
- dev_err(&pdev->dev, "fck is not found\n");
+ dev_err(priv->dev, "fck is not found\n");
ret = -ENODEV;
- goto clean_slave_ret;
+ goto clean_runtime_disable_ret;
}
priv->coal_intvl = 0;
priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000;
- priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!priv->cpsw_res) {
- dev_err(priv->dev, "error getting i/o resource\n");
- ret = -ENOENT;
- goto clean_clk_ret;
- }
- if (!request_mem_region(priv->cpsw_res->start,
- resource_size(priv->cpsw_res), ndev->name)) {
- dev_err(priv->dev, "failed request i/o region\n");
- ret = -ENXIO;
- goto clean_clk_ret;
- }
- ss_regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res));
- if (!ss_regs) {
- dev_err(priv->dev, "unable to map i/o region\n");
- goto clean_cpsw_iores_ret;
+ ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
+ if (IS_ERR(ss_regs)) {
+ ret = PTR_ERR(ss_regs);
+ goto clean_runtime_disable_ret;
}
priv->regs = ss_regs;
priv->version = __raw_readl(&priv->regs->id_ver);
priv->host_port = HOST_PORT_NUM;
- priv->cpsw_wr_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!priv->cpsw_wr_res) {
- dev_err(priv->dev, "error getting i/o resource\n");
- ret = -ENOENT;
- goto clean_iomap_ret;
- }
- if (!request_mem_region(priv->cpsw_wr_res->start,
- resource_size(priv->cpsw_wr_res), ndev->name)) {
- dev_err(priv->dev, "failed request i/o region\n");
- ret = -ENXIO;
- goto clean_iomap_ret;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->wr_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->wr_regs)) {
+ ret = PTR_ERR(priv->wr_regs);
+ goto clean_runtime_disable_ret;
}
- wr_regs = ioremap(priv->cpsw_wr_res->start,
- resource_size(priv->cpsw_wr_res));
- if (!wr_regs) {
- dev_err(priv->dev, "unable to map i/o region\n");
- goto clean_cpsw_wr_iores_ret;
- }
- priv->wr_regs = wr_regs;
memset(&dma_params, 0, sizeof(dma_params));
memset(&ale_params, 0, sizeof(ale_params));
@@ -2056,12 +2019,12 @@ static int cpsw_probe(struct platform_device *pdev)
slave_size = CPSW2_SLAVE_SIZE;
sliver_offset = CPSW2_SLIVER_OFFSET;
dma_params.desc_mem_phys =
- (u32 __force) priv->cpsw_res->start + CPSW2_BD_OFFSET;
+ (u32 __force) ss_res->start + CPSW2_BD_OFFSET;
break;
default:
dev_err(priv->dev, "unknown version 0x%08x\n", priv->version);
ret = -ENODEV;
- goto clean_cpsw_wr_iores_ret;
+ goto clean_runtime_disable_ret;
}
for (i = 0; i < priv->data.slaves; i++) {
struct cpsw_slave *slave = &priv->slaves[i];
@@ -2089,7 +2052,7 @@ static int cpsw_probe(struct platform_device *pdev)
if (!priv->dma) {
dev_err(priv->dev, "error initializing dma\n");
ret = -ENOMEM;
- goto clean_wr_iomap_ret;
+ goto clean_runtime_disable_ret;
}
priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
@@ -2124,8 +2087,8 @@ static int cpsw_probe(struct platform_device *pdev)
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
for (i = res->start; i <= res->end; i++) {
- if (request_irq(i, cpsw_interrupt, 0,
- dev_name(&pdev->dev), priv)) {
+ if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
+ dev_name(priv->dev), priv)) {
dev_err(priv->dev, "error attaching irq\n");
goto clean_ale_ret;
}
@@ -2147,7 +2110,7 @@ static int cpsw_probe(struct platform_device *pdev)
if (ret) {
dev_err(priv->dev, "error registering net device\n");
ret = -ENODEV;
- goto clean_irq_ret;
+ goto clean_ale_ret;
}
if (cpts_register(&pdev->dev, priv->cpts,
@@ -2155,44 +2118,27 @@ static int cpsw_probe(struct platform_device *pdev)
dev_err(priv->dev, "error registering cpts device\n");
cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
- priv->cpsw_res->start, ndev->irq);
+ ss_res->start, ndev->irq);
if (priv->data.dual_emac) {
ret = cpsw_probe_dual_emac(pdev, priv);
if (ret) {
cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
- goto clean_irq_ret;
+ goto clean_ale_ret;
}
}
return 0;
-clean_irq_ret:
- for (i = 0; i < priv->num_irqs; i++)
- free_irq(priv->irqs_table[i], priv);
clean_ale_ret:
cpsw_ale_destroy(priv->ale);
clean_dma_ret:
cpdma_chan_destroy(priv->txch);
cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
-clean_wr_iomap_ret:
- iounmap(priv->wr_regs);
-clean_cpsw_wr_iores_ret:
- release_mem_region(priv->cpsw_wr_res->start,
- resource_size(priv->cpsw_wr_res));
-clean_iomap_ret:
- iounmap(priv->regs);
-clean_cpsw_iores_ret:
- release_mem_region(priv->cpsw_res->start,
- resource_size(priv->cpsw_res));
-clean_clk_ret:
- clk_put(priv->clk);
-clean_slave_ret:
+clean_runtime_disable_ret:
pm_runtime_disable(&pdev->dev);
- kfree(priv->slaves);
clean_ndev_ret:
- kfree(priv->data.slave_data);
free_netdev(priv->ndev);
return ret;
}
@@ -2201,30 +2147,18 @@ static int cpsw_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct cpsw_priv *priv = netdev_priv(ndev);
- int i;
if (priv->data.dual_emac)
unregister_netdev(cpsw_get_slave_ndev(priv, 1));
unregister_netdev(ndev);
cpts_unregister(priv->cpts);
- for (i = 0; i < priv->num_irqs; i++)
- free_irq(priv->irqs_table[i], priv);
cpsw_ale_destroy(priv->ale);
cpdma_chan_destroy(priv->txch);
cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
- iounmap(priv->regs);
- release_mem_region(priv->cpsw_res->start,
- resource_size(priv->cpsw_res));
- iounmap(priv->wr_regs);
- release_mem_region(priv->cpsw_wr_res->start,
- resource_size(priv->cpsw_wr_res));
pm_runtime_disable(&pdev->dev);
- clk_put(priv->clk);
- kfree(priv->slaves);
- kfree(priv->data.slave_data);
if (priv->data.dual_emac)
free_netdev(cpsw_get_slave_ndev(priv, 1));
free_netdev(ndev);
@@ -2280,7 +2214,7 @@ static struct platform_driver cpsw_driver = {
.name = "cpsw",
.owner = THIS_MODULE,
.pm = &cpsw_pm_ops,
- .of_match_table = of_match_ptr(cpsw_of_mtable),
+ .of_match_table = cpsw_of_mtable,
},
.probe = cpsw_probe,
.remove = cpsw_remove,
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index eb3e101ec048..574f49da693f 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -39,4 +39,6 @@ struct cpsw_platform_data {
bool dual_emac; /* Enable Dual EMAC mode */
};
+void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
+
#endif /* __CPSW_H__ */
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index fe993cdd7e23..1a581ef7eee8 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -127,8 +127,8 @@ struct cpts {
};
#ifdef CONFIG_TI_CPTS
-extern void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
-extern void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
#else
static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
@@ -138,8 +138,7 @@ static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
}
#endif
-extern int cpts_register(struct device *dev, struct cpts *cpts,
- u32 mult, u32 shift);
-extern void cpts_unregister(struct cpts *cpts);
+int cpts_register(struct device *dev, struct cpts *cpts, u32 mult, u32 shift);
+void cpts_unregister(struct cpts *cpts);
#endif
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 67df09ea9d04..41ba974bf37c 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -876,8 +876,7 @@ static void emac_dev_mcast_set(struct net_device *ndev)
netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
- }
- if (!netdev_mc_empty(ndev)) {
+ } else if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
@@ -1853,7 +1852,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
}
/* MAC addr and PHY mask , RMII enable info from platform_data */
- memcpy(priv->mac_addr, pdata->mac_addr, 6);
+ memcpy(priv->mac_addr, pdata->mac_addr, ETH_ALEN);
priv->phy_id = pdata->phy_id;
priv->rmii_en = pdata->rmii_en;
priv->version = pdata->version;
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 591437e59b90..62b19be5183d 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -319,7 +319,6 @@ static void tlan_remove_one(struct pci_dev *pdev)
free_netdev(dev);
- pci_set_drvdata(pdev, NULL);
cancel_work_sync(&priv->tlan_tqueue);
}
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 13e6fff8ca23..628b736e5ae7 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2230,7 +2230,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
nz_addr |= mac[i];
if (nz_addr) {
- memcpy(dev->dev_addr, mac, 6);
+ memcpy(dev->dev_addr, mac, ETH_ALEN);
dev->addr_len = 6;
} else {
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index 309abb472aa2..8505196be9f5 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -359,27 +359,26 @@ static inline void *port_priv(struct gelic_port *port)
}
#ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC
-extern void udbg_shutdown_ps3gelic(void);
+void udbg_shutdown_ps3gelic(void);
#else
static inline void udbg_shutdown_ps3gelic(void) {}
#endif
-extern int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
+int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
/* shared netdev ops */
-extern void gelic_card_up(struct gelic_card *card);
-extern void gelic_card_down(struct gelic_card *card);
-extern int gelic_net_open(struct net_device *netdev);
-extern int gelic_net_stop(struct net_device *netdev);
-extern int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
-extern void gelic_net_set_multi(struct net_device *netdev);
-extern void gelic_net_tx_timeout(struct net_device *netdev);
-extern int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
-extern int gelic_net_setup_netdev(struct net_device *netdev,
- struct gelic_card *card);
+void gelic_card_up(struct gelic_card *card);
+void gelic_card_down(struct gelic_card *card);
+int gelic_net_open(struct net_device *netdev);
+int gelic_net_stop(struct net_device *netdev);
+int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
+void gelic_net_set_multi(struct net_device *netdev);
+void gelic_net_tx_timeout(struct net_device *netdev);
+int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
+int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card);
/* shared ethtool ops */
-extern void gelic_net_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *info);
-extern void gelic_net_poll_controller(struct net_device *netdev);
+void gelic_net_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info);
+void gelic_net_poll_controller(struct net_device *netdev);
#endif /* _GELIC_NET_H */
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
index f7e51b7d7049..11f443d8e4ea 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
@@ -320,7 +320,7 @@ struct gelic_eurus_cmd {
#define GELIC_WL_PRIV_SET_PSK (SIOCIWFIRSTPRIV + 0)
#define GELIC_WL_PRIV_GET_PSK (SIOCIWFIRSTPRIV + 1)
-extern int gelic_wl_driver_probe(struct gelic_card *card);
-extern int gelic_wl_driver_remove(struct gelic_card *card);
-extern void gelic_wl_interrupt(struct net_device *netdev, u64 status);
+int gelic_wl_driver_probe(struct gelic_card *card);
+int gelic_wl_driver_remove(struct gelic_card *card);
+void gelic_wl_interrupt(struct net_device *netdev, u64 status);
#endif /* _GELIC_WIRELESS_H */
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 5734480c1ecf..3f4a32e39d27 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -2478,7 +2478,6 @@ out_release_regions:
pci_release_regions(pdev);
out_disable_dev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return NULL;
}
diff --git a/drivers/net/ethernet/toshiba/spider_net.h b/drivers/net/ethernet/toshiba/spider_net.h
index 4ba2135474d1..9b6af0845a11 100644
--- a/drivers/net/ethernet/toshiba/spider_net.h
+++ b/drivers/net/ethernet/toshiba/spider_net.h
@@ -29,8 +29,8 @@
#include <linux/sungem_phy.h>
-extern int spider_net_stop(struct net_device *netdev);
-extern int spider_net_open(struct net_device *netdev);
+int spider_net_stop(struct net_device *netdev);
+int spider_net_open(struct net_device *netdev);
extern const struct ethtool_ops spider_net_ethtool_ops;
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index a971b9cca564..1322546d92ac 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -887,7 +887,6 @@ static void tc35815_remove_one(struct pci_dev *pdev)
mdiobus_free(lp->mii_bus);
unregister_netdev(dev);
free_netdev(dev);
- pci_set_drvdata(pdev, NULL);
}
static int
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index bdf697b184ae..4a7293ed95e9 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -2292,7 +2292,6 @@ static void rhine_remove_one(struct pci_dev *pdev)
free_netdev(dev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static void rhine_shutdown (struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 0029148077a9..1f2364126323 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -36,6 +36,7 @@
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 4c619ea5189f..74234a51c851 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -31,7 +31,7 @@
#define DRIVER_NAME "xilinx_emaclite"
/* Register offsets for the EmacLite Core */
-#define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */
+#define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */
#define XEL_MDIOADDR_OFFSET 0x07E4 /* MDIO Address Register */
#define XEL_MDIOWR_OFFSET 0x07E8 /* MDIO Write Data Register */
#define XEL_MDIORD_OFFSET 0x07EC /* MDIO Read Data Register */
@@ -63,13 +63,13 @@
#define XEL_MDIOCTRL_MDIOEN_MASK 0x00000008 /* MDIO Enable */
/* Global Interrupt Enable Register (GIER) Bit Masks */
-#define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */
+#define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */
/* Transmit Status Register (TSR) Bit Masks */
-#define XEL_TSR_XMIT_BUSY_MASK 0x00000001 /* Tx complete */
-#define XEL_TSR_PROGRAM_MASK 0x00000002 /* Program the MAC address */
-#define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */
-#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit
+#define XEL_TSR_XMIT_BUSY_MASK 0x00000001 /* Tx complete */
+#define XEL_TSR_PROGRAM_MASK 0x00000002 /* Program the MAC address */
+#define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */
+#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit
* only. This is not documented
* in the HW spec */
@@ -77,21 +77,21 @@
#define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK)
/* Receive Status Register (RSR) */
-#define XEL_RSR_RECV_DONE_MASK 0x00000001 /* Rx complete */
-#define XEL_RSR_RECV_IE_MASK 0x00000008 /* Rx interrupt enable bit */
+#define XEL_RSR_RECV_DONE_MASK 0x00000001 /* Rx complete */
+#define XEL_RSR_RECV_IE_MASK 0x00000008 /* Rx interrupt enable bit */
/* Transmit Packet Length Register (TPLR) */
-#define XEL_TPLR_LENGTH_MASK 0x0000FFFF /* Tx packet length */
+#define XEL_TPLR_LENGTH_MASK 0x0000FFFF /* Tx packet length */
/* Receive Packet Length Register (RPLR) */
-#define XEL_RPLR_LENGTH_MASK 0x0000FFFF /* Rx packet length */
+#define XEL_RPLR_LENGTH_MASK 0x0000FFFF /* Rx packet length */
-#define XEL_HEADER_OFFSET 12 /* Offset to length field */
-#define XEL_HEADER_SHIFT 16 /* Shift value for length */
+#define XEL_HEADER_OFFSET 12 /* Offset to length field */
+#define XEL_HEADER_SHIFT 16 /* Shift value for length */
/* General Ethernet Definitions */
-#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */
-#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */
+#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */
+#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */
@@ -1075,14 +1075,9 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
* This function un maps the IO region of the Emaclite device and frees the net
* device.
*/
-static void xemaclite_remove_ndev(struct net_device *ndev,
- struct platform_device *pdev)
+static void xemaclite_remove_ndev(struct net_device *ndev)
{
if (ndev) {
- struct net_local *lp = netdev_priv(ndev);
-
- if (lp->base_addr)
- devm_iounmap(&pdev->dev, lp->base_addr);
free_netdev(ndev);
}
}
@@ -1177,7 +1172,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
if (mac_address)
/* Set the MAC address. */
- memcpy(ndev->dev_addr, mac_address, 6);
+ memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
else
dev_warn(dev, "No MAC address found\n");
@@ -1214,7 +1209,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
return 0;
error:
- xemaclite_remove_ndev(ndev, ofdev);
+ xemaclite_remove_ndev(ndev);
return rc;
}
@@ -1248,7 +1243,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
of_node_put(lp->phy_node);
lp->phy_node = NULL;
- xemaclite_remove_ndev(ndev, of_dev);
+ xemaclite_remove_ndev(ndev);
return 0;
}
diff --git a/drivers/net/fddi/skfp/fplustm.c b/drivers/net/fddi/skfp/fplustm.c
index a20ed1a98099..f83993590174 100644
--- a/drivers/net/fddi/skfp/fplustm.c
+++ b/drivers/net/fddi/skfp/fplustm.c
@@ -453,7 +453,7 @@ static void directed_beacon(struct s_smc *smc)
*/
* (char *) a = (char) ((long)DBEACON_INFO<<24L) ;
a[1] = 0 ;
- memcpy((char *)a+1,(char *) &smc->mib.m[MAC0].fddiMACUpstreamNbr,6) ;
+ memcpy((char *)a+1, (char *) &smc->mib.m[MAC0].fddiMACUpstreamNbr, ETH_ALEN);
CHECK_NPP() ;
/* set memory address reg for writes */
diff --git a/drivers/net/fddi/skfp/h/smc.h b/drivers/net/fddi/skfp/h/smc.h
index 3ca308b28214..bd1166bf8f61 100644
--- a/drivers/net/fddi/skfp/h/smc.h
+++ b/drivers/net/fddi/skfp/h/smc.h
@@ -469,20 +469,20 @@ struct s_smc {
extern const struct fddi_addr fddi_broadcast;
-extern void all_selection_criteria(struct s_smc *smc);
-extern void card_stop(struct s_smc *smc);
-extern void init_board(struct s_smc *smc, u_char *mac_addr);
-extern int init_fplus(struct s_smc *smc);
-extern void init_plc(struct s_smc *smc);
-extern int init_smt(struct s_smc *smc, u_char * mac_addr);
-extern void mac1_irq(struct s_smc *smc, u_short stu, u_short stl);
-extern void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l);
-extern void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l);
-extern int pcm_status_twisted(struct s_smc *smc);
-extern void plc1_irq(struct s_smc *smc);
-extern void plc2_irq(struct s_smc *smc);
-extern void read_address(struct s_smc *smc, u_char * mac_addr);
-extern void timer_irq(struct s_smc *smc);
+void all_selection_criteria(struct s_smc *smc);
+void card_stop(struct s_smc *smc);
+void init_board(struct s_smc *smc, u_char *mac_addr);
+int init_fplus(struct s_smc *smc);
+void init_plc(struct s_smc *smc);
+int init_smt(struct s_smc *smc, u_char *mac_addr);
+void mac1_irq(struct s_smc *smc, u_short stu, u_short stl);
+void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l);
+void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l);
+int pcm_status_twisted(struct s_smc *smc);
+void plc1_irq(struct s_smc *smc);
+void plc2_irq(struct s_smc *smc);
+void read_address(struct s_smc *smc, u_char *mac_addr);
+void timer_irq(struct s_smc *smc);
#endif /* _SCMECM_ */
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index f5d7305a5784..713d303a06a9 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -436,7 +436,7 @@ static int skfp_driver_init(struct net_device *dev)
}
read_address(smc, NULL);
pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
- memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
+ memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
smt_reset_defaults(smc, 0);
@@ -503,7 +503,7 @@ static int skfp_open(struct net_device *dev)
* address.
*/
read_address(smc, NULL);
- memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
+ memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
init_smt(smc, NULL);
smt_online(smc, 1);
@@ -1213,7 +1213,7 @@ static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
if ((unsigned short) frame[1 + 10] != 0)
return;
SRBit = frame[1 + 6] & 0x01;
- memcpy(&frame[1 + 6], hw_addr, 6);
+ memcpy(&frame[1 + 6], hw_addr, ETH_ALEN);
frame[8] |= SRBit;
} // CheckSourceAddress
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index a974727dd9a2..636b65c66d49 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -445,7 +445,7 @@ static int ser12_open(struct net_device *dev)
outb(0, FCR(dev->base_addr)); /* disable FIFOs */
outb(0x0d, MCR(dev->base_addr));
outb(0, IER(dev->base_addr));
- if (request_irq(dev->irq, ser12_interrupt, IRQF_DISABLED | IRQF_SHARED,
+ if (request_irq(dev->irq, ser12_interrupt, IRQF_SHARED,
"baycom_ser_fdx", dev)) {
release_region(dev->base_addr, SER12_EXTENT);
return -EBUSY;
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index e349d867449b..f9a8976195ba 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -490,7 +490,7 @@ static int ser12_open(struct net_device *dev)
outb(0, FCR(dev->base_addr)); /* disable FIFOs */
outb(0x0d, MCR(dev->base_addr));
outb(0, IER(dev->base_addr));
- if (request_irq(dev->irq, ser12_interrupt, IRQF_DISABLED | IRQF_SHARED,
+ if (request_irq(dev->irq, ser12_interrupt, IRQF_SHARED,
"baycom_ser12", dev)) {
release_region(dev->base_addr, SER12_EXTENT);
return -EBUSY;
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index bc1d52170389..4bc6ee8e7987 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1734,7 +1734,7 @@ static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!Ivec[hwcfg.irq].used && hwcfg.irq)
{
if (request_irq(hwcfg.irq, scc_isr,
- IRQF_DISABLED, "AX.25 SCC",
+ 0, "AX.25 SCC",
(void *)(long) hwcfg.irq))
printk(KERN_WARNING "z8530drv: warning, cannot get IRQ %d\n", hwcfg.irq);
else
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 0721e72f9299..1971411574db 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -888,7 +888,7 @@ static int yam_open(struct net_device *dev)
goto out_release_base;
}
outb(0, IER(dev->base_addr));
- if (request_irq(dev->irq, yam_interrupt, IRQF_DISABLED | IRQF_SHARED, dev->name, dev)) {
+ if (request_irq(dev->irq, yam_interrupt, IRQF_SHARED, dev->name, dev)) {
printk(KERN_ERR "%s: irq %d busy\n", dev->name, dev->irq);
ret = -EBUSY;
goto out_release_base;
@@ -975,7 +975,6 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EINVAL; /* Cannot change this parameter when up */
if ((ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_KERNEL)) == NULL)
return -ENOBUFS;
- ym->bitrate = 9600;
if (copy_from_user(ym, ifr->ifr_data, sizeof(struct yamdrv_ioctl_mcs))) {
kfree(ym);
return -EFAULT;
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 42e6deee6db5..0632d34905c7 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -82,7 +82,6 @@ struct mrf24j40 {
struct mutex buffer_mutex; /* only used to protect buf */
struct completion tx_complete;
- struct work_struct irqwork;
u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */
};
@@ -344,6 +343,8 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
if (ret)
goto err;
+ INIT_COMPLETION(devrec->tx_complete);
+
/* Set TXNTRIG bit of TXNCON to send packet */
ret = read_short_reg(devrec, REG_TXNCON, &val);
if (ret)
@@ -354,8 +355,6 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
val |= 0x4;
write_short_reg(devrec, REG_TXNCON, val);
- INIT_COMPLETION(devrec->tx_complete);
-
/* Wait for the device to send the TX complete interrupt. */
ret = wait_for_completion_interruptible_timeout(
&devrec->tx_complete,
@@ -590,17 +589,6 @@ static struct ieee802154_ops mrf24j40_ops = {
static irqreturn_t mrf24j40_isr(int irq, void *data)
{
struct mrf24j40 *devrec = data;
-
- disable_irq_nosync(irq);
-
- schedule_work(&devrec->irqwork);
-
- return IRQ_HANDLED;
-}
-
-static void mrf24j40_isrwork(struct work_struct *work)
-{
- struct mrf24j40 *devrec = container_of(work, struct mrf24j40, irqwork);
u8 intstat;
int ret;
@@ -618,7 +606,7 @@ static void mrf24j40_isrwork(struct work_struct *work)
mrf24j40_handle_rx(devrec);
out:
- enable_irq(devrec->spi->irq);
+ return IRQ_HANDLED;
}
static int mrf24j40_probe(struct spi_device *spi)
@@ -642,7 +630,6 @@ static int mrf24j40_probe(struct spi_device *spi)
mutex_init(&devrec->buffer_mutex);
init_completion(&devrec->tx_complete);
- INIT_WORK(&devrec->irqwork, mrf24j40_isrwork);
devrec->spi = spi;
spi_set_drvdata(spi, devrec);
@@ -688,11 +675,12 @@ static int mrf24j40_probe(struct spi_device *spi)
val &= ~0x3; /* Clear RX mode (normal) */
write_short_reg(devrec, REG_RXMCR, val);
- ret = request_irq(spi->irq,
- mrf24j40_isr,
- IRQF_TRIGGER_FALLING,
- dev_name(&spi->dev),
- devrec);
+ ret = request_threaded_irq(spi->irq,
+ NULL,
+ mrf24j40_isr,
+ IRQF_TRIGGER_LOW|IRQF_ONESHOT,
+ dev_name(&spi->dev),
+ devrec);
if (ret) {
dev_err(printdev(devrec), "Unable to get IRQ");
@@ -721,7 +709,6 @@ static int mrf24j40_remove(struct spi_device *spi)
dev_dbg(printdev(devrec), "remove\n");
free_irq(spi->irq, devrec);
- flush_work(&devrec->irqwork); /* TODO: Is this the right call? */
ieee802154_unregister_device(devrec->dev);
ieee802154_free_device(devrec->dev);
/* TODO: Will ieee802154_free_device() wait until ->xmit() is
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 7bbd318bc93e..befa45f809c3 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -627,7 +627,7 @@ static int ali_ircc_setup(chipio_t *info)
/*
* Function ali_ircc_read_dongle_id (int index, info)
*
- * Try to read dongle indentification. This procedure needs to be executed
+ * Try to read dongle identification. This procedure needs to be executed
* once after power-on/reset. It also needs to be used whenever you suspect
* that the user may have plugged/unplugged the IrDA Dongle.
*/
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index c74f384c87d5..303c4bd26e17 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -411,12 +411,12 @@ static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
#else
- if (request_irq(port->irq, bfin_sir_rx_int, IRQF_DISABLED, "BFIN_SIR_RX", dev)) {
+ if (request_irq(port->irq, bfin_sir_rx_int, 0, "BFIN_SIR_RX", dev)) {
dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n");
return -EBUSY;
}
- if (request_irq(port->irq+1, bfin_sir_tx_int, IRQF_DISABLED, "BFIN_SIR_TX", dev)) {
+ if (request_irq(port->irq+1, bfin_sir_tx_int, 0, "BFIN_SIR_TX", dev)) {
dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n");
free_irq(port->irq, dev);
return -EBUSY;
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 31bcb98ef356..768dfe9a9315 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1352,7 +1352,7 @@ toshoboe_net_open (struct net_device *dev)
return 0;
rc = request_irq (self->io.irq, toshoboe_interrupt,
- IRQF_SHARED | IRQF_DISABLED, dev->name, self);
+ IRQF_SHARED, dev->name, self);
if (rc)
return rc;
@@ -1559,7 +1559,7 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
self->io.fir_base = self->base;
self->io.fir_ext = OBOE_IO_EXTENT;
self->io.irq = pci_dev->irq;
- self->io.irqflags = IRQF_SHARED | IRQF_DISABLED;
+ self->io.irqflags = IRQF_SHARED;
self->speed = self->io.speed = 9600;
self->async = 0;
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index ceeb53737f86..66bc03bdb138 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -1035,7 +1035,7 @@ static int nsc_ircc_setup(chipio_t *info)
/*
* Function nsc_ircc_read_dongle_id (void)
*
- * Try to read dongle indentification. This procedure needs to be executed
+ * Try to read dongle identification. This procedure needs to be executed
* once after power-on/reset. It also needs to be used whenever you suspect
* that the user may have plugged/unplugged the IrDA Dongle.
*/
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 4455425f1c77..ff45cd0d60e8 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -804,7 +804,7 @@ static int sh_irda_probe(struct platform_device *pdev)
goto err_mem_4;
platform_set_drvdata(pdev, ndev);
- err = request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self);
+ err = request_irq(irq, sh_irda_irq, 0, "sh_irda", self);
if (err) {
dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
goto err_mem_4;
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 89682b49900f..8d9ae5a086d5 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -761,7 +761,7 @@ static int sh_sir_probe(struct platform_device *pdev)
goto err_mem_4;
platform_set_drvdata(pdev, ndev);
- err = request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self);
+ err = request_irq(irq, sh_sir_irq, 0, "sh_sir", self);
if (err) {
dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
goto err_mem_4;
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
index 6d5b1e2b1289..f50b9c1c0639 100644
--- a/drivers/net/irda/sir-dev.h
+++ b/drivers/net/irda/sir-dev.h
@@ -102,28 +102,29 @@ struct sir_driver {
/* exported */
-extern int irda_register_dongle(struct dongle_driver *new);
-extern int irda_unregister_dongle(struct dongle_driver *drv);
+int irda_register_dongle(struct dongle_driver *new);
+int irda_unregister_dongle(struct dongle_driver *drv);
-extern struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name);
-extern int sirdev_put_instance(struct sir_dev *self);
+struct sir_dev *sirdev_get_instance(const struct sir_driver *drv,
+ const char *name);
+int sirdev_put_instance(struct sir_dev *self);
-extern int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type);
-extern void sirdev_write_complete(struct sir_dev *dev);
-extern int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count);
+int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type);
+void sirdev_write_complete(struct sir_dev *dev);
+int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count);
/* low level helpers for SIR device/dongle setup */
-extern int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len);
-extern int sirdev_raw_read(struct sir_dev *dev, char *buf, int len);
-extern int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts);
+int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len);
+int sirdev_raw_read(struct sir_dev *dev, char *buf, int len);
+int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts);
/* not exported */
-extern int sirdev_get_dongle(struct sir_dev *self, IRDA_DONGLE type);
-extern int sirdev_put_dongle(struct sir_dev *self);
+int sirdev_get_dongle(struct sir_dev *self, IRDA_DONGLE type);
+int sirdev_put_dongle(struct sir_dev *self);
-extern void sirdev_enable_rx(struct sir_dev *dev);
-extern int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param);
+void sirdev_enable_rx(struct sir_dev *dev);
+int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param);
/* inline helpers */
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 9bf46bd19b87..cc9845ec91c1 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -828,22 +828,21 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
eth_hw_addr_inherit(dev, lowerdev);
}
+ port->count += 1;
+ err = register_netdevice(dev);
+ if (err < 0)
+ goto destroy_port;
+
err = netdev_upper_dev_link(lowerdev, dev);
if (err)
goto destroy_port;
- port->count += 1;
- err = register_netdevice(dev);
- if (err < 0)
- goto upper_dev_unlink;
list_add_tail_rcu(&vlan->list, &port->vlans);
netif_stacked_transfer_operstate(lowerdev, dev);
return 0;
-upper_dev_unlink:
- netdev_upper_dev_unlink(lowerdev, dev);
destroy_port:
port->count -= 1;
if (!port->count)
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index adeee615dd19..ba2f5e710af1 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -34,6 +34,8 @@
*
****************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -310,6 +312,7 @@ static ssize_t store_enabled(struct netconsole_target *nt,
const char *buf,
size_t count)
{
+ unsigned long flags;
int enabled;
int err;
@@ -319,14 +322,12 @@ static ssize_t store_enabled(struct netconsole_target *nt,
if (enabled < 0 || enabled > 1)
return -EINVAL;
if (enabled == nt->enabled) {
- printk(KERN_INFO "netconsole: network logging has already %s\n",
- nt->enabled ? "started" : "stopped");
+ pr_info("network logging has already %s\n",
+ nt->enabled ? "started" : "stopped");
return -EINVAL;
}
- mutex_lock(&nt->mutex);
if (enabled) { /* 1 */
-
/*
* Skip netpoll_parse_options() -- all the attributes are
* already configured via configfs. Just print them out.
@@ -334,19 +335,22 @@ static ssize_t store_enabled(struct netconsole_target *nt,
netpoll_print_options(&nt->np);
err = netpoll_setup(&nt->np);
- if (err) {
- mutex_unlock(&nt->mutex);
+ if (err)
return err;
- }
-
- printk(KERN_INFO "netconsole: network logging started\n");
+ pr_info("netconsole: network logging started\n");
} else { /* 0 */
+ /* We need to disable the netconsole before cleaning it up
+ * otherwise we might end up in write_msg() with
+ * nt->np.dev == NULL and nt->enabled == 1
+ */
+ spin_lock_irqsave(&target_list_lock, flags);
+ nt->enabled = 0;
+ spin_unlock_irqrestore(&target_list_lock, flags);
netpoll_cleanup(&nt->np);
}
nt->enabled = enabled;
- mutex_unlock(&nt->mutex);
return strnlen(buf, count);
}
@@ -358,9 +362,8 @@ static ssize_t store_dev_name(struct netconsole_target *nt,
size_t len;
if (nt->enabled) {
- printk(KERN_ERR "netconsole: target (%s) is enabled, "
- "disable to update parameters\n",
- config_item_name(&nt->item));
+ pr_err("target (%s) is enabled, disable to update parameters\n",
+ config_item_name(&nt->item));
return -EINVAL;
}
@@ -381,9 +384,8 @@ static ssize_t store_local_port(struct netconsole_target *nt,
int rv;
if (nt->enabled) {
- printk(KERN_ERR "netconsole: target (%s) is enabled, "
- "disable to update parameters\n",
- config_item_name(&nt->item));
+ pr_err("target (%s) is enabled, disable to update parameters\n",
+ config_item_name(&nt->item));
return -EINVAL;
}
@@ -400,9 +402,8 @@ static ssize_t store_remote_port(struct netconsole_target *nt,
int rv;
if (nt->enabled) {
- printk(KERN_ERR "netconsole: target (%s) is enabled, "
- "disable to update parameters\n",
- config_item_name(&nt->item));
+ pr_err("target (%s) is enabled, disable to update parameters\n",
+ config_item_name(&nt->item));
return -EINVAL;
}
@@ -417,9 +418,8 @@ static ssize_t store_local_ip(struct netconsole_target *nt,
size_t count)
{
if (nt->enabled) {
- printk(KERN_ERR "netconsole: target (%s) is enabled, "
- "disable to update parameters\n",
- config_item_name(&nt->item));
+ pr_err("target (%s) is enabled, disable to update parameters\n",
+ config_item_name(&nt->item));
return -EINVAL;
}
@@ -427,7 +427,7 @@ static ssize_t store_local_ip(struct netconsole_target *nt,
const char *end;
if (in6_pton(buf, count, nt->np.local_ip.in6.s6_addr, -1, &end) > 0) {
if (*end && *end != '\n') {
- printk(KERN_ERR "netconsole: invalid IPv6 address at: <%c>\n", *end);
+ pr_err("invalid IPv6 address at: <%c>\n", *end);
return -EINVAL;
}
nt->np.ipv6 = true;
@@ -448,9 +448,8 @@ static ssize_t store_remote_ip(struct netconsole_target *nt,
size_t count)
{
if (nt->enabled) {
- printk(KERN_ERR "netconsole: target (%s) is enabled, "
- "disable to update parameters\n",
- config_item_name(&nt->item));
+ pr_err("target (%s) is enabled, disable to update parameters\n",
+ config_item_name(&nt->item));
return -EINVAL;
}
@@ -458,7 +457,7 @@ static ssize_t store_remote_ip(struct netconsole_target *nt,
const char *end;
if (in6_pton(buf, count, nt->np.remote_ip.in6.s6_addr, -1, &end) > 0) {
if (*end && *end != '\n') {
- printk(KERN_ERR "netconsole: invalid IPv6 address at: <%c>\n", *end);
+ pr_err("invalid IPv6 address at: <%c>\n", *end);
return -EINVAL;
}
nt->np.ipv6 = true;
@@ -481,9 +480,8 @@ static ssize_t store_remote_mac(struct netconsole_target *nt,
u8 remote_mac[ETH_ALEN];
if (nt->enabled) {
- printk(KERN_ERR "netconsole: target (%s) is enabled, "
- "disable to update parameters\n",
- config_item_name(&nt->item));
+ pr_err("target (%s) is enabled, disable to update parameters\n",
+ config_item_name(&nt->item));
return -EINVAL;
}
@@ -563,8 +561,10 @@ static ssize_t netconsole_target_attr_store(struct config_item *item,
struct netconsole_target_attr *na =
container_of(attr, struct netconsole_target_attr, attr);
+ mutex_lock(&nt->mutex);
if (na->store)
ret = na->store(nt, buf, count);
+ mutex_unlock(&nt->mutex);
return ret;
}
@@ -704,19 +704,20 @@ restart:
}
spin_unlock_irqrestore(&target_list_lock, flags);
if (stopped) {
- printk(KERN_INFO "netconsole: network logging stopped on "
- "interface %s as it ", dev->name);
+ const char *msg = "had an event";
switch (event) {
case NETDEV_UNREGISTER:
- printk(KERN_CONT "unregistered\n");
+ msg = "unregistered";
break;
case NETDEV_RELEASE:
- printk(KERN_CONT "released slaves\n");
+ msg = "released slaves";
break;
case NETDEV_JOIN:
- printk(KERN_CONT "is joining a master device\n");
+ msg = "is joining a master device";
break;
}
+ pr_info("network logging stopped on interface %s as it %s\n",
+ dev->name, msg);
}
done:
@@ -802,7 +803,7 @@ static int __init init_netconsole(void)
goto undonotifier;
register_console(&netconsole);
- printk(KERN_INFO "netconsole: network logging started\n");
+ pr_info("network logging started\n");
return err;
@@ -810,7 +811,7 @@ undonotifier:
unregister_netdevice_notifier(&netconsole_netdev_notifier);
fail:
- printk(KERN_ERR "netconsole: cleaning up\n");
+ pr_err("cleaning up\n");
/*
* Remove all targets and destroy them (only targets created
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index ac22283aaf23..bc71947b1ec3 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -100,6 +100,45 @@ static void at803x_get_wol(struct phy_device *phydev,
wol->wolopts |= WAKE_MAGIC;
}
+static int at803x_suspend(struct phy_device *phydev)
+{
+ int value;
+ int wol_enabled;
+
+ mutex_lock(&phydev->lock);
+
+ value = phy_read(phydev, AT803X_INTR_ENABLE);
+ wol_enabled = value & AT803X_WOL_ENABLE;
+
+ value = phy_read(phydev, MII_BMCR);
+
+ if (wol_enabled)
+ value |= BMCR_ISOLATE;
+ else
+ value |= BMCR_PDOWN;
+
+ phy_write(phydev, MII_BMCR, value);
+
+ mutex_unlock(&phydev->lock);
+
+ return 0;
+}
+
+static int at803x_resume(struct phy_device *phydev)
+{
+ int value;
+
+ mutex_lock(&phydev->lock);
+
+ value = phy_read(phydev, MII_BMCR);
+ value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
+ phy_write(phydev, MII_BMCR, value);
+
+ mutex_unlock(&phydev->lock);
+
+ return 0;
+}
+
static int at803x_config_init(struct phy_device *phydev)
{
int val;
@@ -161,10 +200,12 @@ static struct phy_driver at803x_driver[] = {
.config_init = at803x_config_init,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
.driver = {
.owner = THIS_MODULE,
},
@@ -176,10 +217,12 @@ static struct phy_driver at803x_driver[] = {
.config_init = at803x_config_init,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
.driver = {
.owner = THIS_MODULE,
},
@@ -191,10 +234,12 @@ static struct phy_driver at803x_driver[] = {
.config_init = at803x_config_init,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
.driver = {
.owner = THIS_MODULE,
},
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 2e91477362d4..2e3c778ea9bf 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -34,9 +34,9 @@
#include <linux/marvell_phy.h>
#include <linux/of.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define MII_MARVELL_PHY_PAGE 22
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index dc920974204e..56178761ce93 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -438,17 +438,19 @@ phy_id_show(struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id);
}
+static DEVICE_ATTR_RO(phy_id);
-static struct device_attribute mdio_dev_attrs[] = {
- __ATTR_RO(phy_id),
- __ATTR_NULL
+static struct attribute *mdio_dev_attrs[] = {
+ &dev_attr_phy_id.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(mdio_dev);
struct bus_type mdio_bus_type = {
.name = "mdio_bus",
.match = mdio_bus_match,
.pm = MDIO_BUS_PM_OPS,
- .dev_attrs = mdio_dev_attrs,
+ .dev_groups = mdio_dev_groups,
};
EXPORT_SYMBOL(mdio_bus_type);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index c31aad0004cb..3ae28f420868 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -287,6 +287,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = ks8737_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8021,
@@ -300,6 +302,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8031,
@@ -313,6 +317,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8041,
@@ -326,6 +332,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8051,
@@ -339,6 +347,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8001,
@@ -351,6 +361,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8081,
@@ -363,6 +375,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8061,
@@ -375,6 +389,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ9021,
@@ -387,6 +403,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = ksz9021_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE, },
}, {
.phy_id = PHY_ID_KSZ9031,
@@ -400,6 +418,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = ksz9021_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE, },
}, {
.phy_id = PHY_ID_KSZ8873MLL,
@@ -410,6 +430,8 @@ static struct phy_driver ksphy_driver[] = {
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE, },
}, {
.phy_id = PHY_ID_KSZ886X,
@@ -420,6 +442,8 @@ static struct phy_driver ksphy_driver[] = {
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE, },
} };
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 1f7bef90b467..7b4ff35c8bf7 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -1002,7 +1002,7 @@ plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
/* Any address will do - we take the first */
const struct in_ifaddr *ifa = in_dev->ifa_list;
if (ifa) {
- memcpy(eth->h_source, dev->dev_addr, 6);
+ memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
memset(eth->h_dest, 0xfc, 2);
memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 807815fc9968..7cb105c103fe 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1293,7 +1293,8 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
if (unlikely(!noblock))
add_wait_queue(&tfile->wq.wait, &wait);
while (len) {
- current->state = TASK_INTERRUPTIBLE;
+ if (unlikely(!noblock))
+ current->state = TASK_INTERRUPTIBLE;
/* Read frames from the queue */
if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
@@ -1320,9 +1321,10 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
break;
}
- current->state = TASK_RUNNING;
- if (unlikely(!noblock))
+ if (unlikely(!noblock)) {
+ current->state = TASK_RUNNING;
remove_wait_queue(&tfile->wq.wait, &wait);
+ }
return ret;
}
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 3569293df872..8e8d0fcd4979 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -36,8 +36,8 @@
#define AX_RXHDR_L4_TYPE_TCP 16
#define AX_RXHDR_L3CSUM_ERR 2
#define AX_RXHDR_L4CSUM_ERR 1
-#define AX_RXHDR_CRC_ERR ((u32)BIT(31))
-#define AX_RXHDR_DROP_ERR ((u32)BIT(30))
+#define AX_RXHDR_CRC_ERR ((u32)BIT(29))
+#define AX_RXHDR_DROP_ERR ((u32)BIT(31))
#define AX_ACCESS_MAC 0x01
#define AX_ACCESS_PHY 0x02
#define AX_ACCESS_EEPROM 0x04
@@ -78,7 +78,6 @@
#define AX_MEDIUM_STATUS_MODE 0x22
#define AX_MEDIUM_GIGAMODE 0x01
#define AX_MEDIUM_FULL_DUPLEX 0x02
- #define AX_MEDIUM_ALWAYS_ONE 0x04
#define AX_MEDIUM_EN_125MHZ 0x08
#define AX_MEDIUM_RXFLOW_CTRLEN 0x10
#define AX_MEDIUM_TXFLOW_CTRLEN 0x20
@@ -1065,8 +1064,8 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
/* Configure default medium type => giga */
*tmp16 = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
- AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE |
- AX_MEDIUM_FULL_DUPLEX | AX_MEDIUM_GIGAMODE;
+ AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_FULL_DUPLEX |
+ AX_MEDIUM_GIGAMODE;
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
2, 2, tmp16);
@@ -1225,7 +1224,7 @@ static int ax88179_link_reset(struct usbnet *dev)
}
mode = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
- AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE;
+ AX_MEDIUM_RXFLOW_CTRLEN;
ax88179_read_cmd(dev, AX_ACCESS_MAC, PHYSICAL_LINK_STATUS,
1, 1, &link_sts);
@@ -1339,8 +1338,8 @@ static int ax88179_reset(struct usbnet *dev)
/* Configure default medium type => giga */
*tmp16 = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
- AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE |
- AX_MEDIUM_FULL_DUPLEX | AX_MEDIUM_GIGAMODE;
+ AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_FULL_DUPLEX |
+ AX_MEDIUM_GIGAMODE;
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
2, 2, tmp16);
@@ -1406,6 +1405,19 @@ static const struct driver_info sitecom_info = {
.tx_fixup = ax88179_tx_fixup,
};
+static const struct driver_info samsung_info = {
+ .description = "Samsung USB Ethernet Adapter",
+ .bind = ax88179_bind,
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+ .reset = ax88179_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+ .tx_fixup = ax88179_tx_fixup,
+};
+
static const struct usb_device_id products[] = {
{
/* ASIX AX88179 10/100/1000 */
@@ -1418,7 +1430,11 @@ static const struct usb_device_id products[] = {
}, {
/* Sitecom USB 3.0 to Gigabit Adapter */
USB_DEVICE(0x0df6, 0x0072),
- .driver_info = (unsigned long) &sitecom_info,
+ .driver_info = (unsigned long)&sitecom_info,
+}, {
+ /* Samsung USB Ethernet Adapter */
+ USB_DEVICE(0x04e8, 0xa100),
+ .driver_info = (unsigned long)&samsung_info,
},
{ },
};
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 8d5cac2d8e33..df507e6dbb9c 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -640,10 +640,10 @@ static void catc_set_multicast_list(struct net_device *netdev)
{
struct catc *catc = netdev_priv(netdev);
struct netdev_hw_addr *ha;
- u8 broadcast[6];
+ u8 broadcast[ETH_ALEN];
u8 rx = RxEnable | RxPolarity | RxMultiCast;
- memset(broadcast, 0xff, 6);
+ memset(broadcast, 0xff, ETH_ALEN);
memset(catc->multicast, 0, 64);
catc_multicast(broadcast, catc->multicast);
@@ -778,7 +778,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
struct usb_device *usbdev = interface_to_usbdev(intf);
struct net_device *netdev;
struct catc *catc;
- u8 broadcast[6];
+ u8 broadcast[ETH_ALEN];
int i, pktsz;
if (usb_set_interface(usbdev,
@@ -882,7 +882,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
dev_dbg(dev, "Filling the multicast list.\n");
- memset(broadcast, 0xff, 6);
+ memset(broadcast, 0xff, ETH_ALEN);
catc_multicast(broadcast, catc->multicast);
catc_multicast(netdev->dev_addr, catc->multicast);
catc_write_mem(catc, 0xfa80, catc->multicast, 64);
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 7d78669000d7..6358d420e185 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -328,7 +328,7 @@ MODULE_DEVICE_TABLE(usb, usbpn_ids);
static struct usb_driver usbpn_driver;
-int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
+static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
static const char ifname[] = "usbpn%d";
const struct usb_cdc_union_desc *union_header = NULL;
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 25ba7eca9a13..c9f3281506af 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -21,6 +21,8 @@
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc-wdm.h>
#include <linux/usb/cdc_ncm.h>
+#include <net/ipv6.h>
+#include <net/addrconf.h>
/* driver specific data - must match cdc_ncm usage */
struct cdc_mbim_state {
@@ -42,13 +44,11 @@ static int cdc_mbim_manage_power(struct usbnet *dev, int on)
if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
/* need autopm_get/put here to ensure the usbcore sees the new value */
rv = usb_autopm_get_interface(dev->intf);
- if (rv < 0)
- goto err;
dev->intf->needs_remote_wakeup = on;
- usb_autopm_put_interface(dev->intf);
+ if (!rv)
+ usb_autopm_put_interface(dev->intf);
}
-err:
- return rv;
+ return 0;
}
static int cdc_mbim_wdm_manage_power(struct usb_interface *intf, int status)
@@ -173,7 +173,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
}
spin_lock_bh(&ctx->mtx);
- skb_out = cdc_ncm_fill_tx_frame(ctx, skb, sign);
+ skb_out = cdc_ncm_fill_tx_frame(dev, skb, sign);
spin_unlock_bh(&ctx->mtx);
return skb_out;
@@ -184,6 +184,60 @@ error:
return NULL;
}
+/* Some devices are known to send Neigbor Solicitation messages and
+ * require Neigbor Advertisement replies. The IPv6 core will not
+ * respond since IFF_NOARP is set, so we must handle them ourselves.
+ */
+static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
+{
+ struct ipv6hdr *iph = (void *)buf;
+ struct nd_msg *msg = (void *)(iph + 1);
+ struct net_device *netdev;
+ struct inet6_dev *in6_dev;
+ bool is_router;
+
+ /* we'll only respond to requests from unicast addresses to
+ * our solicited node addresses.
+ */
+ if (!ipv6_addr_is_solict_mult(&iph->daddr) ||
+ !(ipv6_addr_type(&iph->saddr) & IPV6_ADDR_UNICAST))
+ return;
+
+ /* need to send the NA on the VLAN dev, if any */
+ if (tci)
+ netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
+ tci);
+ else
+ netdev = dev->net;
+ if (!netdev)
+ return;
+
+ in6_dev = in6_dev_get(netdev);
+ if (!in6_dev)
+ return;
+ is_router = !!in6_dev->cnf.forwarding;
+ in6_dev_put(in6_dev);
+
+ /* ipv6_stub != NULL if in6_dev_get returned an inet6_dev */
+ ipv6_stub->ndisc_send_na(netdev, NULL, &iph->saddr, &msg->target,
+ is_router /* router */,
+ true /* solicited */,
+ false /* override */,
+ true /* inc_opt */);
+}
+
+static bool is_neigh_solicit(u8 *buf, size_t len)
+{
+ struct ipv6hdr *iph = (void *)buf;
+ struct nd_msg *msg = (void *)(iph + 1);
+
+ return (len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) &&
+ iph->nexthdr == IPPROTO_ICMPV6 &&
+ msg->icmph.icmp6_code == 0 &&
+ msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION);
+}
+
+
static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_t len, u16 tci)
{
__be16 proto = htons(ETH_P_802_3);
@@ -198,6 +252,8 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_
proto = htons(ETH_P_IP);
break;
case 0x60:
+ if (is_neigh_solicit(buf, len))
+ do_neigh_solicit(dev, buf, tci);
proto = htons(ETH_P_IPV6);
break;
default:
@@ -313,15 +369,13 @@ error:
static int cdc_mbim_suspend(struct usb_interface *intf, pm_message_t message)
{
- int ret = 0;
+ int ret = -ENODEV;
struct usbnet *dev = usb_get_intfdata(intf);
struct cdc_mbim_state *info = (void *)&dev->data;
struct cdc_ncm_ctx *ctx = info->ctx;
- if (ctx == NULL) {
- ret = -1;
+ if (!ctx)
goto error;
- }
/*
* Both usbnet_suspend() and subdriver->suspend() MUST return 0
@@ -354,7 +408,7 @@ static int cdc_mbim_resume(struct usb_interface *intf)
if (ret < 0)
goto err;
ret = usbnet_resume(intf);
- if (ret < 0 && callsub && info->subdriver->suspend)
+ if (ret < 0 && callsub)
info->subdriver->suspend(intf, PMSG_SUSPEND);
err:
return ret;
@@ -371,9 +425,18 @@ static const struct driver_info cdc_mbim_info = {
};
/* MBIM and NCM devices should not need a ZLP after NTBs with
- * dwNtbOutMaxSize length. This driver_info is for the exceptional
- * devices requiring it anyway, allowing them to be supported without
- * forcing the performance penalty on all the sane devices.
+ * dwNtbOutMaxSize length. Nevertheless, a number of devices from
+ * different vendor IDs will fail unless we send ZLPs, forcing us
+ * to make this the default.
+ *
+ * This default may cause a performance penalty for spec conforming
+ * devices wanting to take advantage of optimizations possible without
+ * ZLPs. A whitelist is added in an attempt to avoid this for devices
+ * known to conform to the MBIM specification.
+ *
+ * All known devices supporting NCM compatibility mode are also
+ * conforming to the NCM and MBIM specifications. For this reason, the
+ * NCM subclass entry is also in the ZLP whitelist.
*/
static const struct driver_info cdc_mbim_info_zlp = {
.description = "CDC MBIM",
@@ -396,16 +459,13 @@ static const struct usb_device_id mbim_devs[] = {
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info,
},
- /* Sierra Wireless MC7710 need ZLPs */
- { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long)&cdc_mbim_info_zlp,
- },
- /* HP hs2434 Mobile Broadband Module needs ZLPs */
- { USB_DEVICE_AND_INTERFACE_INFO(0x3f0, 0x4b1d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long)&cdc_mbim_info_zlp,
+ /* ZLP conformance whitelist: All Ericsson MBIM devices */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info,
},
+ /* default entry */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long)&cdc_mbim_info,
+ .driver_info = (unsigned long)&cdc_mbim_info_zlp,
},
{
},
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 43afde8f48d2..11c703337577 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -53,8 +53,6 @@
#include <linux/usb/cdc.h>
#include <linux/usb/cdc_ncm.h>
-#define DRIVER_VERSION "14-Mar-2012"
-
#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM)
static bool prefer_mbim = true;
#else
@@ -68,71 +66,67 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
static struct usb_driver cdc_ncm_driver;
-static void
-cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
-{
- struct usbnet *dev = netdev_priv(net);
-
- strlcpy(info->driver, dev->driver_name, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, dev->driver_info->description,
- sizeof(info->fw_version));
- usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
-}
-
-static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
+static u8 cdc_ncm_setup(struct usbnet *dev)
{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ struct usb_cdc_ncm_ntb_parameters ncm_parm;
u32 val;
u8 flags;
u8 iface_no;
int err;
int eth_hlen;
u16 ntb_fmt_supported;
- u32 min_dgram_size;
- u32 min_hdr_size;
- struct usbnet *dev = netdev_priv(ctx->netdev);
+ __le16 max_datagram_size;
iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
USB_TYPE_CLASS | USB_DIR_IN
|USB_RECIP_INTERFACE,
- 0, iface_no, &ctx->ncm_parm,
- sizeof(ctx->ncm_parm));
+ 0, iface_no, &ncm_parm,
+ sizeof(ncm_parm));
if (err < 0) {
- pr_debug("failed GET_NTB_PARAMETERS\n");
- return 1;
+ dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n");
+ return err; /* GET_NTB_PARAMETERS is required */
}
/* read correct set of parameters according to device mode */
- ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
- ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
- ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
- ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
- ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
+ ctx->rx_max = le32_to_cpu(ncm_parm.dwNtbInMaxSize);
+ ctx->tx_max = le32_to_cpu(ncm_parm.dwNtbOutMaxSize);
+ ctx->tx_remainder = le16_to_cpu(ncm_parm.wNdpOutPayloadRemainder);
+ ctx->tx_modulus = le16_to_cpu(ncm_parm.wNdpOutDivisor);
+ ctx->tx_ndp_modulus = le16_to_cpu(ncm_parm.wNdpOutAlignment);
/* devices prior to NCM Errata shall set this field to zero */
- ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
- ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
+ ctx->tx_max_datagrams = le16_to_cpu(ncm_parm.wNtbOutMaxDatagrams);
+ ntb_fmt_supported = le16_to_cpu(ncm_parm.bmNtbFormatsSupported);
- eth_hlen = ETH_HLEN;
- min_dgram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
- min_hdr_size = CDC_NCM_MIN_HDR_SIZE;
- if (ctx->mbim_desc != NULL) {
- flags = ctx->mbim_desc->bmNetworkCapabilities;
+ /* there are some minor differences in NCM and MBIM defaults */
+ if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) {
+ if (!ctx->mbim_desc)
+ return -EINVAL;
eth_hlen = 0;
- min_dgram_size = CDC_MBIM_MIN_DATAGRAM_SIZE;
- min_hdr_size = 0;
- } else if (ctx->func_desc != NULL) {
- flags = ctx->func_desc->bmNetworkCapabilities;
+ flags = ctx->mbim_desc->bmNetworkCapabilities;
+ ctx->max_datagram_size = le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
+ if (ctx->max_datagram_size < CDC_MBIM_MIN_DATAGRAM_SIZE)
+ ctx->max_datagram_size = CDC_MBIM_MIN_DATAGRAM_SIZE;
} else {
- flags = 0;
+ if (!ctx->func_desc)
+ return -EINVAL;
+ eth_hlen = ETH_HLEN;
+ flags = ctx->func_desc->bmNetworkCapabilities;
+ ctx->max_datagram_size = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
+ if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
+ ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
}
- pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
- "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
- "wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
- ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
- ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
+ /* common absolute max for NCM and MBIM */
+ if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
+ ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
+
+ dev_dbg(&dev->intf->dev,
+ "dwNtbInMaxSize=%u dwNtbOutMaxSize=%u wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
+ ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
+ ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
/* max count of tx datagrams */
if ((ctx->tx_max_datagrams == 0) ||
@@ -141,19 +135,19 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
/* verify maximum size of received NTB in bytes */
if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) {
- pr_debug("Using min receive length=%d\n",
- USB_CDC_NCM_NTB_MIN_IN_SIZE);
+ dev_dbg(&dev->intf->dev, "Using min receive length=%d\n",
+ USB_CDC_NCM_NTB_MIN_IN_SIZE);
ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE;
}
if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) {
- pr_debug("Using default maximum receive length=%d\n",
- CDC_NCM_NTB_MAX_SIZE_RX);
+ dev_dbg(&dev->intf->dev, "Using default maximum receive length=%d\n",
+ CDC_NCM_NTB_MAX_SIZE_RX);
ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
}
/* inform device about NTB input size changes */
- if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
+ if (ctx->rx_max != le32_to_cpu(ncm_parm.dwNtbInMaxSize)) {
__le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
@@ -161,16 +155,22 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
| USB_RECIP_INTERFACE,
0, iface_no, &dwNtbInMaxSize, 4);
if (err < 0)
- pr_debug("Setting NTB Input Size failed\n");
+ dev_dbg(&dev->intf->dev, "Setting NTB Input Size failed\n");
}
/* verify maximum size of transmitted NTB in bytes */
- if ((ctx->tx_max <
- (min_hdr_size + min_dgram_size)) ||
- (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX)) {
- pr_debug("Using default maximum transmit length=%d\n",
- CDC_NCM_NTB_MAX_SIZE_TX);
+ if (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX) {
+ dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n",
+ CDC_NCM_NTB_MAX_SIZE_TX);
ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
+
+ /* Adding a pad byte here simplifies the handling in
+ * cdc_ncm_fill_tx_frame, by making tx_max always
+ * represent the real skb max size.
+ */
+ if (ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
+ ctx->tx_max++;
+
}
/*
@@ -183,7 +183,7 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
(val != ((-val) & val)) || (val >= ctx->tx_max)) {
- pr_debug("Using default alignment: 4 bytes\n");
+ dev_dbg(&dev->intf->dev, "Using default alignment: 4 bytes\n");
ctx->tx_ndp_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
}
@@ -197,13 +197,13 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
(val != ((-val) & val)) || (val >= ctx->tx_max)) {
- pr_debug("Using default transmit modulus: 4 bytes\n");
+ dev_dbg(&dev->intf->dev, "Using default transmit modulus: 4 bytes\n");
ctx->tx_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
}
/* verify the payload remainder */
if (ctx->tx_remainder >= ctx->tx_modulus) {
- pr_debug("Using default transmit remainder: 0 bytes\n");
+ dev_dbg(&dev->intf->dev, "Using default transmit remainder: 0 bytes\n");
ctx->tx_remainder = 0;
}
@@ -221,7 +221,7 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
USB_CDC_NCM_CRC_NOT_APPENDED,
iface_no, NULL, 0);
if (err < 0)
- pr_debug("Setting CRC mode off failed\n");
+ dev_dbg(&dev->intf->dev, "Setting CRC mode off failed\n");
}
/* set NTB format, if both formats are supported */
@@ -232,69 +232,43 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
USB_CDC_NCM_NTB16_FORMAT,
iface_no, NULL, 0);
if (err < 0)
- pr_debug("Setting NTB format to 16-bit failed\n");
+ dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit failed\n");
}
- ctx->max_datagram_size = min_dgram_size;
+ /* inform the device about the selected Max Datagram Size */
+ if (!(flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE))
+ goto out;
- /* set Max Datagram Size (MTU) */
- if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
- __le16 max_datagram_size;
- u16 eth_max_sz;
- if (ctx->ether_desc != NULL)
- eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
- else if (ctx->mbim_desc != NULL)
- eth_max_sz = le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
- else
- goto max_dgram_err;
-
- err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
- USB_TYPE_CLASS | USB_DIR_IN
- | USB_RECIP_INTERFACE,
- 0, iface_no, &max_datagram_size, 2);
- if (err < 0) {
- pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
- min_dgram_size);
- } else {
- ctx->max_datagram_size =
- le16_to_cpu(max_datagram_size);
- /* Check Eth descriptor value */
- if (ctx->max_datagram_size > eth_max_sz)
- ctx->max_datagram_size = eth_max_sz;
-
- if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
- ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
-
- if (ctx->max_datagram_size < min_dgram_size)
- ctx->max_datagram_size = min_dgram_size;
-
- /* if value changed, update device */
- if (ctx->max_datagram_size !=
- le16_to_cpu(max_datagram_size)) {
- err = usbnet_write_cmd(dev,
- USB_CDC_SET_MAX_DATAGRAM_SIZE,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- 0,
- iface_no, &max_datagram_size,
- 2);
- if (err < 0)
- pr_debug("SET_MAX_DGRAM_SIZE failed\n");
- }
- }
+ /* read current mtu value from device */
+ err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
+ USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
+ 0, iface_no, &max_datagram_size, 2);
+ if (err < 0) {
+ dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
+ goto out;
}
-max_dgram_err:
- if (ctx->netdev->mtu != (ctx->max_datagram_size - eth_hlen))
- ctx->netdev->mtu = ctx->max_datagram_size - eth_hlen;
+ if (le16_to_cpu(max_datagram_size) == ctx->max_datagram_size)
+ goto out;
+ max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
+ err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
+ USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
+ 0, iface_no, &max_datagram_size, 2);
+ if (err < 0)
+ dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
+
+out:
+ /* set MTU to max supported by the device if necessary */
+ if (dev->net->mtu > ctx->max_datagram_size - eth_hlen)
+ dev->net->mtu = ctx->max_datagram_size - eth_hlen;
return 0;
}
static void
-cdc_ncm_find_endpoints(struct cdc_ncm_ctx *ctx, struct usb_interface *intf)
+cdc_ncm_find_endpoints(struct usbnet *dev, struct usb_interface *intf)
{
- struct usb_host_endpoint *e;
+ struct usb_host_endpoint *e, *in = NULL, *out = NULL;
u8 ep;
for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) {
@@ -303,18 +277,18 @@ cdc_ncm_find_endpoints(struct cdc_ncm_ctx *ctx, struct usb_interface *intf)
switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_INT:
if (usb_endpoint_dir_in(&e->desc)) {
- if (ctx->status_ep == NULL)
- ctx->status_ep = e;
+ if (!dev->status)
+ dev->status = e;
}
break;
case USB_ENDPOINT_XFER_BULK:
if (usb_endpoint_dir_in(&e->desc)) {
- if (ctx->in_ep == NULL)
- ctx->in_ep = e;
+ if (!in)
+ in = e;
} else {
- if (ctx->out_ep == NULL)
- ctx->out_ep = e;
+ if (!out)
+ out = e;
}
break;
@@ -322,6 +296,14 @@ cdc_ncm_find_endpoints(struct cdc_ncm_ctx *ctx, struct usb_interface *intf)
break;
}
}
+ if (in && !dev->in)
+ dev->in = usb_rcvbulkpipe(dev->udev,
+ in->desc.bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK);
+ if (out && !dev->out)
+ dev->out = usb_sndbulkpipe(dev->udev,
+ out->desc.bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK);
}
static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
@@ -342,18 +324,9 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
kfree(ctx);
}
-static const struct ethtool_ops cdc_ncm_ethtool_ops = {
- .get_drvinfo = cdc_ncm_get_drvinfo,
- .get_link = usbnet_get_link,
- .get_msglevel = usbnet_get_msglevel,
- .set_msglevel = usbnet_set_msglevel,
- .get_settings = usbnet_get_settings,
- .set_settings = usbnet_set_settings,
- .nway_reset = usbnet_nway_reset,
-};
-
int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting)
{
+ const struct usb_cdc_union_desc *union_desc = NULL;
struct cdc_ncm_ctx *ctx;
struct usb_driver *driver;
u8 *buf;
@@ -367,23 +340,22 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ctx->tx_timer.function = &cdc_ncm_tx_timer_cb;
- ctx->bh.data = (unsigned long)ctx;
+ ctx->bh.data = (unsigned long)dev;
ctx->bh.func = cdc_ncm_txpath_bh;
atomic_set(&ctx->stop, 0);
spin_lock_init(&ctx->mtx);
- ctx->netdev = dev->net;
/* store ctx pointer in device data field */
dev->data[0] = (unsigned long)ctx;
+ /* only the control interface can be successfully probed */
+ ctx->control = intf;
+
/* get some pointers */
driver = driver_of(intf);
buf = intf->cur_altsetting->extra;
len = intf->cur_altsetting->extralen;
- ctx->udev = dev->udev;
- ctx->intf = intf;
-
/* parse through descriptors associated with control interface */
while ((len > 0) && (buf[0] > 2) && (buf[0] <= len)) {
@@ -392,16 +364,18 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
switch (buf[2]) {
case USB_CDC_UNION_TYPE:
- if (buf[0] < sizeof(*(ctx->union_desc)))
+ if (buf[0] < sizeof(*union_desc))
break;
- ctx->union_desc =
- (const struct usb_cdc_union_desc *)buf;
-
- ctx->control = usb_ifnum_to_if(dev->udev,
- ctx->union_desc->bMasterInterface0);
+ union_desc = (const struct usb_cdc_union_desc *)buf;
+ /* the master must be the interface we are probing */
+ if (intf->cur_altsetting->desc.bInterfaceNumber !=
+ union_desc->bMasterInterface0) {
+ dev_dbg(&intf->dev, "bogus CDC Union\n");
+ goto error;
+ }
ctx->data = usb_ifnum_to_if(dev->udev,
- ctx->union_desc->bSlaveInterface0);
+ union_desc->bSlaveInterface0);
break;
case USB_CDC_ETHERNET_TYPE:
@@ -410,13 +384,6 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
ctx->ether_desc =
(const struct usb_cdc_ether_desc *)buf;
- dev->hard_mtu =
- le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
-
- if (dev->hard_mtu < CDC_NCM_MIN_DATAGRAM_SIZE)
- dev->hard_mtu = CDC_NCM_MIN_DATAGRAM_SIZE;
- else if (dev->hard_mtu > CDC_NCM_MAX_DATAGRAM_SIZE)
- dev->hard_mtu = CDC_NCM_MAX_DATAGRAM_SIZE;
break;
case USB_CDC_NCM_TYPE:
@@ -444,69 +411,71 @@ advance:
}
/* some buggy devices have an IAD but no CDC Union */
- if (!ctx->union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
- ctx->control = intf;
+ if (!union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1);
dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n");
}
/* check if we got everything */
- if ((ctx->control == NULL) || (ctx->data == NULL) ||
- ((!ctx->mbim_desc) && ((ctx->ether_desc == NULL) || (ctx->control != intf))))
+ if (!ctx->data || (!ctx->mbim_desc && !ctx->ether_desc)) {
+ dev_dbg(&intf->dev, "CDC descriptors missing\n");
goto error;
+ }
/* claim data interface, if different from control */
if (ctx->data != ctx->control) {
temp = usb_driver_claim_interface(driver, ctx->data, dev);
- if (temp)
+ if (temp) {
+ dev_dbg(&intf->dev, "failed to claim data intf\n");
goto error;
+ }
}
iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
/* reset data interface */
temp = usb_set_interface(dev->udev, iface_no, 0);
- if (temp)
- goto error2;
-
- /* initialize data interface */
- if (cdc_ncm_setup(ctx))
+ if (temp) {
+ dev_dbg(&intf->dev, "set interface failed\n");
goto error2;
+ }
/* configure data interface */
temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
- if (temp)
+ if (temp) {
+ dev_dbg(&intf->dev, "set interface failed\n");
goto error2;
+ }
- cdc_ncm_find_endpoints(ctx, ctx->data);
- cdc_ncm_find_endpoints(ctx, ctx->control);
-
- if ((ctx->in_ep == NULL) || (ctx->out_ep == NULL) ||
- (ctx->status_ep == NULL))
+ cdc_ncm_find_endpoints(dev, ctx->data);
+ cdc_ncm_find_endpoints(dev, ctx->control);
+ if (!dev->in || !dev->out || !dev->status) {
+ dev_dbg(&intf->dev, "failed to collect endpoints\n");
goto error2;
+ }
- dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
+ /* initialize data interface */
+ if (cdc_ncm_setup(dev)) {
+ dev_dbg(&intf->dev, "cdc_ncm_setup() failed\n");
+ goto error2;
+ }
usb_set_intfdata(ctx->data, dev);
usb_set_intfdata(ctx->control, dev);
- usb_set_intfdata(ctx->intf, dev);
if (ctx->ether_desc) {
temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress);
- if (temp)
+ if (temp) {
+ dev_dbg(&intf->dev, "failed to get mac address\n");
goto error2;
- dev_info(&dev->udev->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
+ }
+ dev_info(&intf->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
}
-
- dev->in = usb_rcvbulkpipe(dev->udev,
- ctx->in_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
- dev->out = usb_sndbulkpipe(dev->udev,
- ctx->out_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
- dev->status = ctx->status_ep;
+ /* usbnet use these values for sizing tx/rx queues */
+ dev->hard_mtu = ctx->tx_max;
dev->rx_urb_size = ctx->rx_max;
- ctx->tx_speed = ctx->rx_speed = 0;
return 0;
error2:
@@ -517,7 +486,7 @@ error2:
error:
cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);
dev->data[0] = 0;
- dev_info(&dev->udev->dev, "bind() failure\n");
+ dev_info(&intf->dev, "bind() failure\n");
return -ENODEV;
}
EXPORT_SYMBOL_GPL(cdc_ncm_bind_common);
@@ -553,7 +522,7 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
ctx->control = NULL;
}
- usb_set_intfdata(ctx->intf, NULL);
+ usb_set_intfdata(intf, NULL);
cdc_ncm_free(ctx);
}
EXPORT_SYMBOL_GPL(cdc_ncm_unbind);
@@ -662,8 +631,9 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
}
struct sk_buff *
-cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
+cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
struct usb_cdc_ncm_nth16 *nth16;
struct usb_cdc_ncm_ndp16 *ndp16;
struct sk_buff *skb_out;
@@ -683,11 +653,11 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
/* allocate a new OUT skb */
if (!skb_out) {
- skb_out = alloc_skb((ctx->tx_max + 1), GFP_ATOMIC);
+ skb_out = alloc_skb(ctx->tx_max, GFP_ATOMIC);
if (skb_out == NULL) {
if (skb != NULL) {
dev_kfree_skb_any(skb);
- ctx->netdev->stats.tx_dropped++;
+ dev->net->stats.tx_dropped++;
}
goto exit_no_skb;
}
@@ -725,12 +695,12 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
/* won't fit, MTU problem? */
dev_kfree_skb_any(skb);
skb = NULL;
- ctx->netdev->stats.tx_dropped++;
+ dev->net->stats.tx_dropped++;
} else {
/* no room for skb - store for later */
if (ctx->tx_rem_skb != NULL) {
dev_kfree_skb_any(ctx->tx_rem_skb);
- ctx->netdev->stats.tx_dropped++;
+ dev->net->stats.tx_dropped++;
}
ctx->tx_rem_skb = skb;
ctx->tx_rem_sign = sign;
@@ -763,7 +733,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
if (skb != NULL) {
dev_kfree_skb_any(skb);
skb = NULL;
- ctx->netdev->stats.tx_dropped++;
+ dev->net->stats.tx_dropped++;
}
ctx->tx_curr_frame_num = n;
@@ -788,19 +758,20 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
/* variables will be reset at next call */
}
- /*
- * If collected data size is less or equal CDC_NCM_MIN_TX_PKT bytes,
- * we send buffers as it is. If we get more data, it would be more
- * efficient for USB HS mobile device with DMA engine to receive a full
- * size NTB, than canceling DMA transfer and receiving a short packet.
+ /* If collected data size is less or equal CDC_NCM_MIN_TX_PKT
+ * bytes, we send buffers as it is. If we get more data, it
+ * would be more efficient for USB HS mobile device with DMA
+ * engine to receive a full size NTB, than canceling DMA
+ * transfer and receiving a short packet.
+ *
+ * This optimization support is pointless if we end up sending
+ * a ZLP after full sized NTBs.
*/
- if (skb_out->len > CDC_NCM_MIN_TX_PKT)
- /* final zero padding */
- memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, ctx->tx_max - skb_out->len);
-
- /* do we need to prevent a ZLP? */
- if (((skb_out->len % le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0) &&
- (skb_out->len < le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)) && skb_tailroom(skb_out))
+ if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
+ skb_out->len > CDC_NCM_MIN_TX_PKT)
+ memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
+ ctx->tx_max - skb_out->len);
+ else if ((skb_out->len % dev->maxpacket) == 0)
*skb_put(skb_out, 1) = 0; /* force short packet */
/* set final frame length */
@@ -809,7 +780,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
/* return skb */
ctx->tx_curr_skb = NULL;
- ctx->netdev->stats.tx_packets += ctx->tx_curr_frame_num;
+ dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
return skb_out;
exit_no_skb:
@@ -841,18 +812,19 @@ static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *timer)
static void cdc_ncm_txpath_bh(unsigned long param)
{
- struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)param;
+ struct usbnet *dev = (struct usbnet *)param;
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
spin_lock_bh(&ctx->mtx);
if (ctx->tx_timer_pending != 0) {
ctx->tx_timer_pending--;
cdc_ncm_tx_timeout_start(ctx);
spin_unlock_bh(&ctx->mtx);
- } else if (ctx->netdev != NULL) {
+ } else if (dev->net != NULL) {
spin_unlock_bh(&ctx->mtx);
- netif_tx_lock_bh(ctx->netdev);
- usbnet_start_xmit(NULL, ctx->netdev);
- netif_tx_unlock_bh(ctx->netdev);
+ netif_tx_lock_bh(dev->net);
+ usbnet_start_xmit(NULL, dev->net);
+ netif_tx_unlock_bh(dev->net);
} else {
spin_unlock_bh(&ctx->mtx);
}
@@ -875,7 +847,7 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
goto error;
spin_lock_bh(&ctx->mtx);
- skb_out = cdc_ncm_fill_tx_frame(ctx, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
+ skb_out = cdc_ncm_fill_tx_frame(dev, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
spin_unlock_bh(&ctx->mtx);
return skb_out;
@@ -889,6 +861,7 @@ error:
/* verify NTB header and return offset of first NDP, or negative error */
int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in)
{
+ struct usbnet *dev = netdev_priv(skb_in->dev);
struct usb_cdc_ncm_nth16 *nth16;
int len;
int ret = -EINVAL;
@@ -898,30 +871,33 @@ int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in)
if (skb_in->len < (sizeof(struct usb_cdc_ncm_nth16) +
sizeof(struct usb_cdc_ncm_ndp16))) {
- pr_debug("frame too short\n");
+ netif_dbg(dev, rx_err, dev->net, "frame too short\n");
goto error;
}
nth16 = (struct usb_cdc_ncm_nth16 *)skb_in->data;
- if (le32_to_cpu(nth16->dwSignature) != USB_CDC_NCM_NTH16_SIGN) {
- pr_debug("invalid NTH16 signature <%u>\n",
- le32_to_cpu(nth16->dwSignature));
+ if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid NTH16 signature <%#010x>\n",
+ le32_to_cpu(nth16->dwSignature));
goto error;
}
len = le16_to_cpu(nth16->wBlockLength);
if (len > ctx->rx_max) {
- pr_debug("unsupported NTB block length %u/%u\n", len,
- ctx->rx_max);
+ netif_dbg(dev, rx_err, dev->net,
+ "unsupported NTB block length %u/%u\n", len,
+ ctx->rx_max);
goto error;
}
if ((ctx->rx_seq + 1) != le16_to_cpu(nth16->wSequence) &&
- (ctx->rx_seq || le16_to_cpu(nth16->wSequence)) &&
- !((ctx->rx_seq == 0xffff) && !le16_to_cpu(nth16->wSequence))) {
- pr_debug("sequence number glitch prev=%d curr=%d\n",
- ctx->rx_seq, le16_to_cpu(nth16->wSequence));
+ (ctx->rx_seq || le16_to_cpu(nth16->wSequence)) &&
+ !((ctx->rx_seq == 0xffff) && !le16_to_cpu(nth16->wSequence))) {
+ netif_dbg(dev, rx_err, dev->net,
+ "sequence number glitch prev=%d curr=%d\n",
+ ctx->rx_seq, le16_to_cpu(nth16->wSequence));
}
ctx->rx_seq = le16_to_cpu(nth16->wSequence);
@@ -934,18 +910,20 @@ EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_nth16);
/* verify NDP header and return number of datagrams, or negative error */
int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset)
{
+ struct usbnet *dev = netdev_priv(skb_in->dev);
struct usb_cdc_ncm_ndp16 *ndp16;
int ret = -EINVAL;
if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp16)) > skb_in->len) {
- pr_debug("invalid NDP offset <%u>\n", ndpoffset);
+ netif_dbg(dev, rx_err, dev->net, "invalid NDP offset <%u>\n",
+ ndpoffset);
goto error;
}
ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
- pr_debug("invalid DPT16 length <%u>\n",
- le32_to_cpu(ndp16->dwSignature));
+ netif_dbg(dev, rx_err, dev->net, "invalid DPT16 length <%u>\n",
+ le16_to_cpu(ndp16->wLength));
goto error;
}
@@ -954,9 +932,9 @@ int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset)
sizeof(struct usb_cdc_ncm_dpe16));
ret--; /* we process NDP entries except for the last one */
- if ((sizeof(struct usb_cdc_ncm_ndp16) + ret * (sizeof(struct usb_cdc_ncm_dpe16))) >
- skb_in->len) {
- pr_debug("Invalid nframes = %d\n", ret);
+ if ((sizeof(struct usb_cdc_ncm_ndp16) +
+ ret * (sizeof(struct usb_cdc_ncm_dpe16))) > skb_in->len) {
+ netif_dbg(dev, rx_err, dev->net, "Invalid nframes = %d\n", ret);
ret = -EINVAL;
}
@@ -989,9 +967,10 @@ next_ndp:
ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
- if (le32_to_cpu(ndp16->dwSignature) != USB_CDC_NCM_NDP16_NOCRC_SIGN) {
- pr_debug("invalid DPT16 signature <%u>\n",
- le32_to_cpu(ndp16->dwSignature));
+ if (ndp16->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid DPT16 signature <%#010x>\n",
+ le32_to_cpu(ndp16->dwSignature));
goto err_ndp;
}
dpe16 = ndp16->dpe16;
@@ -1013,9 +992,9 @@ next_ndp:
/* sanity checking */
if (((offset + len) > skb_in->len) ||
(len > ctx->rx_max) || (len < ETH_HLEN)) {
- pr_debug("invalid frame detected (ignored)"
- "offset[%u]=%u, length=%u, skb=%p\n",
- x, offset, len, skb_in);
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n",
+ x, offset, len, skb_in);
if (!x)
goto err_ndp;
break;
@@ -1042,7 +1021,7 @@ error:
}
static void
-cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
+cdc_ncm_speed_change(struct usbnet *dev,
struct usb_cdc_speed_change *data)
{
uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
@@ -1052,25 +1031,16 @@ cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
* Currently the USB-NET API does not support reporting the actual
* device speed. Do print it instead.
*/
- if ((tx_speed != ctx->tx_speed) || (rx_speed != ctx->rx_speed)) {
- ctx->tx_speed = tx_speed;
- ctx->rx_speed = rx_speed;
-
- if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
- printk(KERN_INFO KBUILD_MODNAME
- ": %s: %u mbit/s downlink "
- "%u mbit/s uplink\n",
- ctx->netdev->name,
- (unsigned int)(rx_speed / 1000000U),
- (unsigned int)(tx_speed / 1000000U));
- } else {
- printk(KERN_INFO KBUILD_MODNAME
- ": %s: %u kbit/s downlink "
- "%u kbit/s uplink\n",
- ctx->netdev->name,
- (unsigned int)(rx_speed / 1000U),
- (unsigned int)(tx_speed / 1000U));
- }
+ if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
+ netif_info(dev, link, dev->net,
+ "%u mbit/s downlink %u mbit/s uplink\n",
+ (unsigned int)(rx_speed / 1000000U),
+ (unsigned int)(tx_speed / 1000000U));
+ } else {
+ netif_info(dev, link, dev->net,
+ "%u kbit/s downlink %u kbit/s uplink\n",
+ (unsigned int)(rx_speed / 1000U),
+ (unsigned int)(tx_speed / 1000U));
}
}
@@ -1086,7 +1056,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
/* test for split data in 8-byte chunks */
if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
- cdc_ncm_speed_change(ctx,
+ cdc_ncm_speed_change(dev,
(struct usb_cdc_speed_change *)urb->transfer_buffer);
return;
}
@@ -1101,14 +1071,10 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
* sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
*/
ctx->connected = le16_to_cpu(event->wValue);
-
- printk(KERN_INFO KBUILD_MODNAME ": %s: network connection:"
- " %sconnected\n",
- ctx->netdev->name, ctx->connected ? "" : "dis");
-
+ netif_info(dev, link, dev->net,
+ "network connection: %sconnected\n",
+ ctx->connected ? "" : "dis");
usbnet_link_change(dev, ctx->connected, 0);
- if (!ctx->connected)
- ctx->tx_speed = ctx->rx_speed = 0;
break;
case USB_CDC_NOTIFY_SPEED_CHANGE:
@@ -1116,8 +1082,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
sizeof(struct usb_cdc_speed_change)))
set_bit(EVENT_STS_SPLIT, &dev->flags);
else
- cdc_ncm_speed_change(ctx,
- (struct usb_cdc_speed_change *) &event[1]);
+ cdc_ncm_speed_change(dev,
+ (struct usb_cdc_speed_change *)&event[1]);
break;
default:
@@ -1139,22 +1105,6 @@ static int cdc_ncm_check_connect(struct usbnet *dev)
return !ctx->connected;
}
-static int
-cdc_ncm_probe(struct usb_interface *udev, const struct usb_device_id *prod)
-{
- return usbnet_probe(udev, prod);
-}
-
-static void cdc_ncm_disconnect(struct usb_interface *intf)
-{
- struct usbnet *dev = usb_get_intfdata(intf);
-
- if (dev == NULL)
- return; /* already disconnected */
-
- usbnet_disconnect(intf);
-}
-
static const struct driver_info cdc_ncm_info = {
.description = "CDC NCM",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
@@ -1265,8 +1215,8 @@ MODULE_DEVICE_TABLE(usb, cdc_devs);
static struct usb_driver cdc_ncm_driver = {
.name = "cdc_ncm",
.id_table = cdc_devs,
- .probe = cdc_ncm_probe,
- .disconnect = cdc_ncm_disconnect,
+ .probe = usbnet_probe,
+ .disconnect = usbnet_disconnect,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
.reset_resume = usbnet_resume,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3d6aaf79d8b2..23bdd5b9274d 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -143,24 +143,28 @@ static const struct net_device_ops qmi_wwan_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-/* using a counter to merge subdriver requests with our own into a combined state */
+/* using a counter to merge subdriver requests with our own into a
+ * combined state
+ */
static int qmi_wwan_manage_power(struct usbnet *dev, int on)
{
struct qmi_wwan_state *info = (void *)&dev->data;
- int rv = 0;
+ int rv;
- dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on);
+ dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__,
+ atomic_read(&info->pmcount), on);
- if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
- /* need autopm_get/put here to ensure the usbcore sees the new value */
+ if ((on && atomic_add_return(1, &info->pmcount) == 1) ||
+ (!on && atomic_dec_and_test(&info->pmcount))) {
+ /* need autopm_get/put here to ensure the usbcore sees
+ * the new value
+ */
rv = usb_autopm_get_interface(dev->intf);
- if (rv < 0)
- goto err;
dev->intf->needs_remote_wakeup = on;
- usb_autopm_put_interface(dev->intf);
+ if (!rv)
+ usb_autopm_put_interface(dev->intf);
}
-err:
- return rv;
+ return 0;
}
static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
@@ -199,7 +203,8 @@ static int qmi_wwan_register_subdriver(struct usbnet *dev)
atomic_set(&info->pmcount, 0);
/* register subdriver */
- subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 4096, &qmi_wwan_cdc_wdm_manage_power);
+ subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc,
+ 4096, &qmi_wwan_cdc_wdm_manage_power);
if (IS_ERR(subdriver)) {
dev_err(&info->control->dev, "subdriver registration failed\n");
rv = PTR_ERR(subdriver);
@@ -228,7 +233,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
struct usb_driver *driver = driver_of(intf);
struct qmi_wwan_state *info = (void *)&dev->data;
- BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
+ BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) <
+ sizeof(struct qmi_wwan_state)));
/* set up initial state */
info->control = intf;
@@ -250,7 +256,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
goto err;
}
if (h->bLength != sizeof(struct usb_cdc_header_desc)) {
- dev_dbg(&intf->dev, "CDC header len %u\n", h->bLength);
+ dev_dbg(&intf->dev, "CDC header len %u\n",
+ h->bLength);
goto err;
}
break;
@@ -260,7 +267,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
goto err;
}
if (h->bLength != sizeof(struct usb_cdc_union_desc)) {
- dev_dbg(&intf->dev, "CDC union len %u\n", h->bLength);
+ dev_dbg(&intf->dev, "CDC union len %u\n",
+ h->bLength);
goto err;
}
cdc_union = (struct usb_cdc_union_desc *)buf;
@@ -271,15 +279,15 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
goto err;
}
if (h->bLength != sizeof(struct usb_cdc_ether_desc)) {
- dev_dbg(&intf->dev, "CDC ether len %u\n", h->bLength);
+ dev_dbg(&intf->dev, "CDC ether len %u\n",
+ h->bLength);
goto err;
}
cdc_ether = (struct usb_cdc_ether_desc *)buf;
break;
}
- /*
- * Remember which CDC functional descriptors we've seen. Works
+ /* Remember which CDC functional descriptors we've seen. Works
* for all types we care about, of which USB_CDC_ETHERNET_TYPE
* (0x0f) is the highest numbered
*/
@@ -293,10 +301,14 @@ next_desc:
/* Use separate control and data interfaces if we found a CDC Union */
if (cdc_union) {
- info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0);
- if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 || !info->data) {
- dev_err(&intf->dev, "bogus CDC Union: master=%u, slave=%u\n",
- cdc_union->bMasterInterface0, cdc_union->bSlaveInterface0);
+ info->data = usb_ifnum_to_if(dev->udev,
+ cdc_union->bSlaveInterface0);
+ if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 ||
+ !info->data) {
+ dev_err(&intf->dev,
+ "bogus CDC Union: master=%u, slave=%u\n",
+ cdc_union->bMasterInterface0,
+ cdc_union->bSlaveInterface0);
goto err;
}
}
@@ -374,8 +386,7 @@ static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
struct qmi_wwan_state *info = (void *)&dev->data;
int ret;
- /*
- * Both usbnet_suspend() and subdriver->suspend() MUST return 0
+ /* Both usbnet_suspend() and subdriver->suspend() MUST return 0
* in system sleep context, otherwise, the resume callback has
* to recover device from previous suspend failure.
*/
@@ -383,7 +394,8 @@ static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
if (ret < 0)
goto err;
- if (intf == info->control && info->subdriver && info->subdriver->suspend)
+ if (intf == info->control && info->subdriver &&
+ info->subdriver->suspend)
ret = info->subdriver->suspend(intf, message);
if (ret < 0)
usbnet_resume(intf);
@@ -396,14 +408,15 @@ static int qmi_wwan_resume(struct usb_interface *intf)
struct usbnet *dev = usb_get_intfdata(intf);
struct qmi_wwan_state *info = (void *)&dev->data;
int ret = 0;
- bool callsub = (intf == info->control && info->subdriver && info->subdriver->resume);
+ bool callsub = (intf == info->control && info->subdriver &&
+ info->subdriver->resume);
if (callsub)
ret = info->subdriver->resume(intf);
if (ret < 0)
goto err;
ret = usbnet_resume(intf);
- if (ret < 0 && callsub && info->subdriver->suspend)
+ if (ret < 0 && callsub)
info->subdriver->suspend(intf, PMSG_SUSPEND);
err:
return ret;
@@ -714,6 +727,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
+ {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
/* 4. Gobi 1000 devices */
@@ -776,7 +791,8 @@ static const struct usb_device_id products[] = {
};
MODULE_DEVICE_TABLE(usb, products);
-static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id *prod)
+static int qmi_wwan_probe(struct usb_interface *intf,
+ const struct usb_device_id *prod)
{
struct usb_device_id *id = (struct usb_device_id *)prod;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index bf94e10a37c8..90a429b7ebad 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1688,8 +1688,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) &&
!(info->flags & FLAG_MULTI_PACKET)) {
dev->padding_pkt = kzalloc(1, GFP_KERNEL);
- if (!dev->padding_pkt)
+ if (!dev->padding_pkt) {
+ status = -ENOMEM;
goto out4;
+ }
}
status = register_netdev (net);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index eee1f19ef1e9..b24db7acbf12 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -188,6 +188,11 @@ static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
return tot;
}
+/* fake multicast ability */
+static void veth_set_multicast_list(struct net_device *dev)
+{
+}
+
static int veth_open(struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
@@ -250,11 +255,14 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_start_xmit = veth_xmit,
.ndo_change_mtu = veth_change_mtu,
.ndo_get_stats64 = veth_get_stats64,
+ .ndo_set_rx_mode = veth_set_multicast_list,
.ndo_set_mac_address = eth_mac_addr,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \
+ NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT | NETIF_F_UFO | \
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
@@ -273,6 +281,7 @@ static void veth_setup(struct net_device *dev)
dev->destructor = veth_dev_free;
dev->hw_features = VETH_FEATURES;
+ dev->hw_enc_features = VETH_FEATURES;
}
/*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index defec2b3c5a4..a18131b35284 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -124,6 +124,11 @@ struct virtnet_info {
/* Lock for config space updates */
struct mutex config_lock;
+ /* Page_frag for GFP_KERNEL packet buffer allocation when we run
+ * low on memory.
+ */
+ struct page_frag alloc_frag;
+
/* Does the affinity hint is set for virtqueues? */
bool affinity_hint_set;
@@ -217,33 +222,18 @@ static void skb_xmit_done(struct virtqueue *vq)
netif_wake_subqueue(vi->dev, vq2txq(vq));
}
-static void set_skb_frag(struct sk_buff *skb, struct page *page,
- unsigned int offset, unsigned int *len)
-{
- int size = min((unsigned)PAGE_SIZE - offset, *len);
- int i = skb_shinfo(skb)->nr_frags;
-
- __skb_fill_page_desc(skb, i, page, offset, size);
-
- skb->data_len += size;
- skb->len += size;
- skb->truesize += PAGE_SIZE;
- skb_shinfo(skb)->nr_frags++;
- skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
- *len -= size;
-}
-
/* Called from bottom half context */
static struct sk_buff *page_to_skb(struct receive_queue *rq,
- struct page *page, unsigned int len)
+ struct page *page, unsigned int offset,
+ unsigned int len, unsigned int truesize)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
struct sk_buff *skb;
struct skb_vnet_hdr *hdr;
- unsigned int copy, hdr_len, offset;
+ unsigned int copy, hdr_len, hdr_padded_len;
char *p;
- p = page_address(page);
+ p = page_address(page) + offset;
/* copy small packet so we can reuse these pages for small data */
skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
@@ -254,16 +244,17 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
if (vi->mergeable_rx_bufs) {
hdr_len = sizeof hdr->mhdr;
- offset = hdr_len;
+ hdr_padded_len = sizeof hdr->mhdr;
} else {
hdr_len = sizeof hdr->hdr;
- offset = sizeof(struct padded_vnet_hdr);
+ hdr_padded_len = sizeof(struct padded_vnet_hdr);
}
memcpy(hdr, p, hdr_len);
len -= hdr_len;
- p += offset;
+ offset += hdr_padded_len;
+ p += hdr_padded_len;
copy = len;
if (copy > skb_tailroom(skb))
@@ -273,6 +264,14 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
len -= copy;
offset += copy;
+ if (vi->mergeable_rx_bufs) {
+ if (len)
+ skb_add_rx_frag(skb, 0, page, offset, len, truesize);
+ else
+ put_page(page);
+ return skb;
+ }
+
/*
* Verify that we can indeed put this data into a skb.
* This is here to handle cases when the device erroneously
@@ -284,9 +283,12 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
dev_kfree_skb(skb);
return NULL;
}
-
+ BUG_ON(offset >= PAGE_SIZE);
while (len) {
- set_skb_frag(skb, page, offset, &len);
+ unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
+ frag_size, truesize);
+ len -= frag_size;
page = (struct page *)page->private;
offset = 0;
}
@@ -297,33 +299,52 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
return skb;
}
-static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb)
+static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
{
- struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
+ struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb);
+ struct sk_buff *curr_skb = head_skb;
+ char *buf;
struct page *page;
- int num_buf, i, len;
+ int num_buf, len;
num_buf = hdr->mhdr.num_buffers;
while (--num_buf) {
- i = skb_shinfo(skb)->nr_frags;
- if (i >= MAX_SKB_FRAGS) {
- pr_debug("%s: packet too long\n", skb->dev->name);
- skb->dev->stats.rx_length_errors++;
- return -EINVAL;
- }
- page = virtqueue_get_buf(rq->vq, &len);
- if (!page) {
+ int num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
+ buf = virtqueue_get_buf(rq->vq, &len);
+ if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers missing\n",
- skb->dev->name, hdr->mhdr.num_buffers);
- skb->dev->stats.rx_length_errors++;
+ head_skb->dev->name, hdr->mhdr.num_buffers);
+ head_skb->dev->stats.rx_length_errors++;
return -EINVAL;
}
-
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
-
- set_skb_frag(skb, page, 0, &len);
-
+ if (unlikely(len > MAX_PACKET_LEN)) {
+ pr_debug("%s: rx error: merge buffer too long\n",
+ head_skb->dev->name);
+ len = MAX_PACKET_LEN;
+ }
+ if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
+ struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
+ if (unlikely(!nskb)) {
+ head_skb->dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+ if (curr_skb == head_skb)
+ skb_shinfo(curr_skb)->frag_list = nskb;
+ else
+ curr_skb->next = nskb;
+ curr_skb = nskb;
+ head_skb->truesize += nskb->truesize;
+ num_skb_frags = 0;
+ }
+ if (curr_skb != head_skb) {
+ head_skb->data_len += len;
+ head_skb->len += len;
+ head_skb->truesize += MAX_PACKET_LEN;
+ }
+ page = virt_to_head_page(buf);
+ skb_add_rx_frag(curr_skb, num_skb_frags, page,
+ buf - (char *)page_address(page), len,
+ MAX_PACKET_LEN);
--rq->num;
}
return 0;
@@ -341,8 +362,10 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
dev->stats.rx_length_errors++;
- if (vi->mergeable_rx_bufs || vi->big_packets)
+ if (vi->big_packets)
give_pages(rq, buf);
+ else if (vi->mergeable_rx_bufs)
+ put_page(virt_to_head_page(buf));
else
dev_kfree_skb(buf);
return;
@@ -352,19 +375,28 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
skb = buf;
len -= sizeof(struct virtio_net_hdr);
skb_trim(skb, len);
+ } else if (vi->mergeable_rx_bufs) {
+ struct page *page = virt_to_head_page(buf);
+ skb = page_to_skb(rq, page,
+ (char *)buf - (char *)page_address(page),
+ len, MAX_PACKET_LEN);
+ if (unlikely(!skb)) {
+ dev->stats.rx_dropped++;
+ put_page(page);
+ return;
+ }
+ if (receive_mergeable(rq, skb)) {
+ dev_kfree_skb(skb);
+ return;
+ }
} else {
page = buf;
- skb = page_to_skb(rq, page, len);
+ skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
if (unlikely(!skb)) {
dev->stats.rx_dropped++;
give_pages(rq, page);
return;
}
- if (vi->mergeable_rx_bufs)
- if (receive_mergeable(rq, skb)) {
- dev_kfree_skb(skb);
- return;
- }
}
hdr = skb_vnet_hdr(skb);
@@ -501,18 +533,28 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
{
- struct page *page;
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ char *buf = NULL;
int err;
- page = get_a_page(rq, gfp);
- if (!page)
+ if (gfp & __GFP_WAIT) {
+ if (skb_page_frag_refill(MAX_PACKET_LEN, &vi->alloc_frag,
+ gfp)) {
+ buf = (char *)page_address(vi->alloc_frag.page) +
+ vi->alloc_frag.offset;
+ get_page(vi->alloc_frag.page);
+ vi->alloc_frag.offset += MAX_PACKET_LEN;
+ }
+ } else {
+ buf = netdev_alloc_frag(MAX_PACKET_LEN);
+ }
+ if (!buf)
return -ENOMEM;
- sg_init_one(rq->sg, page_address(page), PAGE_SIZE);
-
- err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp);
+ sg_init_one(rq->sg, buf, MAX_PACKET_LEN);
+ err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
if (err < 0)
- give_pages(rq, page);
+ put_page(virt_to_head_page(buf));
return err;
}
@@ -545,7 +587,8 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
} while (rq->vq->num_free);
if (unlikely(rq->num > rq->max))
rq->max = rq->num;
- virtqueue_kick(rq->vq);
+ if (unlikely(!virtqueue_kick(rq->vq)))
+ return false;
return !oom;
}
@@ -751,7 +794,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
err = xmit_skb(sq, skb);
/* This should not happen! */
- if (unlikely(err)) {
+ if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) {
dev->stats.tx_fifo_errors++;
if (net_ratelimit())
dev_warn(&dev->dev,
@@ -760,7 +803,6 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
kfree_skb(skb);
return NETDEV_TX_OK;
}
- virtqueue_kick(sq->vq);
/* Don't wait up for transmitted skbs to be freed. */
skb_orphan(skb);
@@ -819,12 +861,14 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
< 0);
- virtqueue_kick(vi->cvq);
+ if (unlikely(!virtqueue_kick(vi->cvq)))
+ return status == VIRTIO_NET_OK;
/* Spin for a response, the kick causes an ioport write, trapping
* into the hypervisor, so the request should be handled immediately.
*/
- while (!virtqueue_get_buf(vi->cvq, &tmp))
+ while (!virtqueue_get_buf(vi->cvq, &tmp) &&
+ !virtqueue_is_broken(vi->cvq))
cpu_relax();
return status == VIRTIO_NET_OK;
@@ -852,8 +896,13 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
return -EINVAL;
}
} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
- vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
- addr->sa_data, dev->addr_len);
+ unsigned int i;
+
+ /* Naturally, this has an atomicity problem. */
+ for (i = 0; i < dev->addr_len; i++)
+ virtio_cwrite8(vdev,
+ offsetof(struct virtio_net_config, mac) +
+ i, addr->sa_data[i]);
}
eth_commit_mac_addr_change(dev, p);
@@ -938,7 +987,9 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
return -EINVAL;
} else {
vi->curr_queue_pairs = queue_pairs;
- schedule_delayed_work(&vi->refill, 0);
+ /* virtnet_open() will refill when device is going to up. */
+ if (dev->flags & IFF_UP)
+ schedule_delayed_work(&vi->refill, 0);
}
return 0;
@@ -1128,6 +1179,7 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
default:
break;
}
+
return NOTIFY_OK;
}
@@ -1266,9 +1318,8 @@ static void virtnet_config_changed_work(struct work_struct *work)
if (!vi->config_enable)
goto done;
- if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
- offsetof(struct virtio_net_config, status),
- &v) < 0)
+ if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
+ struct virtio_net_config, status, &v) < 0)
goto done;
if (v & VIRTIO_NET_S_ANNOUNCE) {
@@ -1333,8 +1384,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
struct virtqueue *vq = vi->rq[i].vq;
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (vi->mergeable_rx_bufs || vi->big_packets)
+ if (vi->big_packets)
give_pages(&vi->rq[i], buf);
+ else if (vi->mergeable_rx_bufs)
+ put_page(virt_to_head_page(buf));
else
dev_kfree_skb(buf);
--vi->rq[i].num;
@@ -1490,9 +1543,9 @@ static int virtnet_probe(struct virtio_device *vdev)
u16 max_queue_pairs;
/* Find if host supports multiqueue virtio_net device */
- err = virtio_config_val(vdev, VIRTIO_NET_F_MQ,
- offsetof(struct virtio_net_config,
- max_virtqueue_pairs), &max_queue_pairs);
+ err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
+ struct virtio_net_config,
+ max_virtqueue_pairs, &max_queue_pairs);
/* We need at least 2 queue's */
if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
@@ -1544,9 +1597,11 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->vlan_features = dev->features;
/* Configuration may specify what MAC to use. Otherwise random. */
- if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC,
- offsetof(struct virtio_net_config, mac),
- dev->dev_addr, dev->addr_len) < 0)
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
+ virtio_cread_bytes(vdev,
+ offsetof(struct virtio_net_config, mac),
+ dev->dev_addr, dev->addr_len);
+ else
eth_hw_addr_random(dev);
/* Set up our device-specific information */
@@ -1640,6 +1695,8 @@ free_recv_bufs:
free_vqs:
cancel_delayed_work_sync(&vi->refill);
virtnet_del_vqs(vi);
+ if (vi->alloc_frag.page)
+ put_page(vi->alloc_frag.page);
free_index:
free_percpu(vi->vq_index);
free_stats:
@@ -1675,6 +1732,8 @@ static void virtnet_remove(struct virtio_device *vdev)
unregister_netdev(vi->dev);
remove_vq_common(vi);
+ if (vi->alloc_frag.page)
+ put_page(vi->alloc_frag.page);
flush_work(&vi->config_work);
@@ -1683,12 +1742,14 @@ static void virtnet_remove(struct virtio_device *vdev)
free_netdev(vi->dev);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int virtnet_freeze(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
int i;
+ unregister_hotcpu_notifier(&vi->nb);
+
/* Prevent config work handler from accessing the device */
mutex_lock(&vi->config_lock);
vi->config_enable = false;
@@ -1733,7 +1794,13 @@ static int virtnet_restore(struct virtio_device *vdev)
vi->config_enable = true;
mutex_unlock(&vi->config_lock);
+ rtnl_lock();
virtnet_set_queues(vi, vi->curr_queue_pairs);
+ rtnl_unlock();
+
+ err = register_hotcpu_notifier(&vi->nb);
+ if (err)
+ return err;
return 0;
}
@@ -1766,7 +1833,7 @@ static struct virtio_driver virtio_net_driver = {
.probe = virtnet_probe,
.remove = virtnet_remove,
.config_changed = virtnet_config_changed,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.freeze = virtnet_freeze,
.restore = virtnet_restore,
#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index a03f358fd58b..12040a35d95d 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -410,9 +410,9 @@ int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size);
-extern void vmxnet3_set_ethtool_ops(struct net_device *netdev);
+void vmxnet3_set_ethtool_ops(struct net_device *netdev);
-extern struct rtnl_link_stats64 *
+struct rtnl_link_stats64 *
vmxnet3_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
extern char vmxnet3_driver_name[];
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 2ef5b6219f3f..24260ced86d2 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -60,10 +60,6 @@
#define VXLAN_N_VID (1u << 24)
#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
-/* IP header + UDP + VXLAN + Ethernet header */
-#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
-/* IPv6 header + UDP + VXLAN + Ethernet header */
-#define VXLAN6_HEADROOM (40 + 8 + 8 + 14)
#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
@@ -2087,7 +2083,7 @@ static void vxlan_setup(struct net_device *dev)
vxlan->age_timer.function = vxlan_cleanup;
vxlan->age_timer.data = (unsigned long) vxlan;
- inet_get_local_port_range(&low, &high);
+ inet_get_local_port_range(dev_net(dev), &low, &high);
vxlan->port_min = low;
vxlan->port_max = high;
vxlan->dst_port = htons(vxlan_port);
@@ -2180,7 +2176,7 @@ static void vxlan_del_work(struct work_struct *work)
* could be used for both IPv4 and IPv6 communications, but
* users may set bindv6only=1.
*/
-static int create_v6_sock(struct net *net, __be16 port, struct socket **psock)
+static struct socket *create_v6_sock(struct net *net, __be16 port)
{
struct sock *sk;
struct socket *sock;
@@ -2193,7 +2189,7 @@ static int create_v6_sock(struct net *net, __be16 port, struct socket **psock)
rc = sock_create_kern(AF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock);
if (rc < 0) {
pr_debug("UDPv6 socket create failed\n");
- return rc;
+ return ERR_PTR(rc);
}
/* Put in proper namespace */
@@ -2208,28 +2204,27 @@ static int create_v6_sock(struct net *net, __be16 port, struct socket **psock)
pr_debug("bind for UDPv6 socket %pI6:%u (%d)\n",
&vxlan_addr.sin6_addr, ntohs(vxlan_addr.sin6_port), rc);
sk_release_kernel(sk);
- return rc;
+ return ERR_PTR(rc);
}
/* At this point, IPv6 module should have been loaded in
* sock_create_kern().
*/
BUG_ON(!ipv6_stub);
- *psock = sock;
/* Disable multicast loopback */
inet_sk(sk)->mc_loop = 0;
- return 0;
+ return sock;
}
#else
-static int create_v6_sock(struct net *net, __be16 port, struct socket **psock)
+static struct socket *create_v6_sock(struct net *net, __be16 port)
{
- return -EPFNOSUPPORT;
+ return ERR_PTR(-EPFNOSUPPORT);
}
#endif
-static int create_v4_sock(struct net *net, __be16 port, struct socket **psock)
+static struct socket *create_v4_sock(struct net *net, __be16 port)
{
struct sock *sk;
struct socket *sock;
@@ -2244,7 +2239,7 @@ static int create_v4_sock(struct net *net, __be16 port, struct socket **psock)
rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
if (rc < 0) {
pr_debug("UDP socket create failed\n");
- return rc;
+ return ERR_PTR(rc);
}
/* Put in proper namespace */
@@ -2257,13 +2252,12 @@ static int create_v4_sock(struct net *net, __be16 port, struct socket **psock)
pr_debug("bind for UDP socket %pI4:%u (%d)\n",
&vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
sk_release_kernel(sk);
- return rc;
+ return ERR_PTR(rc);
}
- *psock = sock;
/* Disable multicast loopback */
inet_sk(sk)->mc_loop = 0;
- return 0;
+ return sock;
}
/* Create new listen socket if needed */
@@ -2274,7 +2268,6 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
struct vxlan_sock *vs;
struct socket *sock;
struct sock *sk;
- int rc = 0;
unsigned int h;
vs = kmalloc(sizeof(*vs), GFP_KERNEL);
@@ -2287,12 +2280,12 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
INIT_WORK(&vs->del_work, vxlan_del_work);
if (ipv6)
- rc = create_v6_sock(net, port, &sock);
+ sock = create_v6_sock(net, port);
else
- rc = create_v4_sock(net, port, &sock);
- if (rc < 0) {
+ sock = create_v4_sock(net, port);
+ if (IS_ERR(sock)) {
kfree(vs);
- return ERR_PTR(rc);
+ return ERR_PTR(PTR_ERR(sock));
}
vs->sock = sock;
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 3f0c4f268751..bcfff0d62de4 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -1972,6 +1972,7 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
}
i = port->index;
+ memset(&sync, 0, sizeof(sync));
sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed);
/* Lucky card and linux use same encoding here */
sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index 3d80e4267de8..3d741663fd67 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -220,7 +220,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
/* We want a fast IRQ for this device. Actually we'd like an even faster
IRQ ;) - This is one driver RtLinux is made for */
- if (request_irq(irq, z8530_interrupt, IRQF_DISABLED,
+ if (request_irq(irq, z8530_interrupt, 0,
"Hostess SV11", sv) < 0) {
pr_warn("IRQ %d already in use\n", irq);
goto err_irq;
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 5bbcb5e3ee0c..388ddf60a66d 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -148,10 +148,6 @@ static int enslave( struct net_device *, struct net_device * );
static int emancipate( struct net_device * );
#endif
-#ifdef __i386__
-#define ASM_CRC 1
-#endif
-
static const char version[] =
"Granch SBNI12 driver ver 5.0.1 Jun 22 2001 Denis I.Timofeev.\n";
@@ -1551,88 +1547,6 @@ __setup( "sbni=", sbni_setup );
/* -------------------------------------------------------------------------- */
-#ifdef ASM_CRC
-
-static u32
-calc_crc32( u32 crc, u8 *p, u32 len )
-{
- register u32 _crc;
- _crc = crc;
-
- __asm__ __volatile__ (
- "xorl %%ebx, %%ebx\n"
- "movl %2, %%esi\n"
- "movl %3, %%ecx\n"
- "movl $crc32tab, %%edi\n"
- "shrl $2, %%ecx\n"
- "jz 1f\n"
-
- ".align 4\n"
- "0:\n"
- "movb %%al, %%bl\n"
- "movl (%%esi), %%edx\n"
- "shrl $8, %%eax\n"
- "xorb %%dl, %%bl\n"
- "shrl $8, %%edx\n"
- "xorl (%%edi,%%ebx,4), %%eax\n"
-
- "movb %%al, %%bl\n"
- "shrl $8, %%eax\n"
- "xorb %%dl, %%bl\n"
- "shrl $8, %%edx\n"
- "xorl (%%edi,%%ebx,4), %%eax\n"
-
- "movb %%al, %%bl\n"
- "shrl $8, %%eax\n"
- "xorb %%dl, %%bl\n"
- "movb %%dh, %%dl\n"
- "xorl (%%edi,%%ebx,4), %%eax\n"
-
- "movb %%al, %%bl\n"
- "shrl $8, %%eax\n"
- "xorb %%dl, %%bl\n"
- "addl $4, %%esi\n"
- "xorl (%%edi,%%ebx,4), %%eax\n"
-
- "decl %%ecx\n"
- "jnz 0b\n"
-
- "1:\n"
- "movl %3, %%ecx\n"
- "andl $3, %%ecx\n"
- "jz 2f\n"
-
- "movb %%al, %%bl\n"
- "shrl $8, %%eax\n"
- "xorb (%%esi), %%bl\n"
- "xorl (%%edi,%%ebx,4), %%eax\n"
-
- "decl %%ecx\n"
- "jz 2f\n"
-
- "movb %%al, %%bl\n"
- "shrl $8, %%eax\n"
- "xorb 1(%%esi), %%bl\n"
- "xorl (%%edi,%%ebx,4), %%eax\n"
-
- "decl %%ecx\n"
- "jz 2f\n"
-
- "movb %%al, %%bl\n"
- "shrl $8, %%eax\n"
- "xorb 2(%%esi), %%bl\n"
- "xorl (%%edi,%%ebx,4), %%eax\n"
- "2:\n"
- : "=a" (_crc)
- : "0" (_crc), "g" (p), "g" (len)
- : "bx", "cx", "dx", "si", "di"
- );
-
- return _crc;
-}
-
-#else /* ASM_CRC */
-
static u32
calc_crc32( u32 crc, u8 *p, u32 len )
{
@@ -1642,9 +1556,6 @@ calc_crc32( u32 crc, u8 *p, u32 len )
return crc;
}
-#endif /* ASM_CRC */
-
-
static u32 crc32tab[] __attribute__ ((aligned(8))) = {
0xD202EF8D, 0xA505DF1B, 0x3C0C8EA1, 0x4B0BBE37,
0xD56F2B94, 0xA2681B02, 0x3B614AB8, 0x4C667A2E,
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 4f7748478984..27860b4f5908 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -266,7 +266,7 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
/* We want a fast IRQ for this device. Actually we'd like an even faster
IRQ ;) - This is one driver RtLinux is made for */
- if (request_irq(irq, z8530_interrupt, IRQF_DISABLED,
+ if (request_irq(irq, z8530_interrupt, 0,
"SeaLevel", dev) < 0) {
pr_warn("IRQ %d already in use\n", irq);
goto err_request_irq;
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 6a24a5a70cc7..4c0a69779b89 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -355,6 +355,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
ifr->ifr_settings.size = size; /* data size wanted */
return -ENOBUFS;
}
+ memset(&line, 0, sizeof(line));
line.clock_type = get_status(port)->clocking;
line.clock_rate = 0;
line.loopback = 0;
diff --git a/drivers/net/wan/x25_asy.h b/drivers/net/wan/x25_asy.h
index 8f0fc2e57e2b..f57ee67836ae 100644
--- a/drivers/net/wan/x25_asy.h
+++ b/drivers/net/wan/x25_asy.h
@@ -41,6 +41,6 @@ struct x25_asy {
#define X25_ASY_MAGIC 0x5303
-extern int x25_asy_init(struct net_device *dev);
+int x25_asy_init(struct net_device *dev);
#endif /* _LINUX_X25_ASY.H */
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h
index f29d554fc07d..2416a9d60bd6 100644
--- a/drivers/net/wan/z85230.h
+++ b/drivers/net/wan/z85230.h
@@ -395,20 +395,19 @@ struct z8530_dev
extern u8 z8530_dead_port[];
extern u8 z8530_hdlc_kilostream_85230[];
extern u8 z8530_hdlc_kilostream[];
-extern irqreturn_t z8530_interrupt(int, void *);
-extern void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
-extern int z8530_init(struct z8530_dev *);
-extern int z8530_shutdown(struct z8530_dev *);
-extern int z8530_sync_open(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_close(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
-extern int z8530_channel_load(struct z8530_channel *, u8 *);
-extern netdev_tx_t z8530_queue_xmit(struct z8530_channel *c,
- struct sk_buff *skb);
-extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
+irqreturn_t z8530_interrupt(int, void *);
+void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
+int z8530_init(struct z8530_dev *);
+int z8530_shutdown(struct z8530_dev *);
+int z8530_sync_open(struct net_device *, struct z8530_channel *);
+int z8530_sync_close(struct net_device *, struct z8530_channel *);
+int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
+int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
+int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
+int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
+int z8530_channel_load(struct z8530_channel *, u8 *);
+netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);
+void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
/*
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index 9f1e947f3557..649ecad6844c 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -256,21 +256,20 @@ void i2400mu_init(struct i2400mu *i2400mu)
i2400mu->rx_size_auto_shrink = 1;
}
-extern int i2400mu_notification_setup(struct i2400mu *);
-extern void i2400mu_notification_release(struct i2400mu *);
+int i2400mu_notification_setup(struct i2400mu *);
+void i2400mu_notification_release(struct i2400mu *);
-extern int i2400mu_rx_setup(struct i2400mu *);
-extern void i2400mu_rx_release(struct i2400mu *);
-extern void i2400mu_rx_kick(struct i2400mu *);
+int i2400mu_rx_setup(struct i2400mu *);
+void i2400mu_rx_release(struct i2400mu *);
+void i2400mu_rx_kick(struct i2400mu *);
-extern int i2400mu_tx_setup(struct i2400mu *);
-extern void i2400mu_tx_release(struct i2400mu *);
-extern void i2400mu_bus_tx_kick(struct i2400m *);
+int i2400mu_tx_setup(struct i2400mu *);
+void i2400mu_tx_release(struct i2400mu *);
+void i2400mu_bus_tx_kick(struct i2400m *);
-extern ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *,
- const struct i2400m_bootrom_header *,
- size_t, int);
-extern ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *,
- struct i2400m_bootrom_header *,
- size_t);
+ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *,
+ const struct i2400m_bootrom_header *, size_t,
+ int);
+ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *,
+ struct i2400m_bootrom_header *, size_t);
#endif /* #ifndef __I2400M_USB_H__ */
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 79c6505b5c20..5a34e72bab9a 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -710,18 +710,18 @@ enum i2400m_bri {
I2400M_BRI_MAC_REINIT = 1 << 3,
};
-extern void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *);
-extern int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri);
-extern int i2400m_read_mac_addr(struct i2400m *);
-extern int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri);
-extern int i2400m_is_boot_barker(struct i2400m *, const void *, size_t);
+void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *);
+int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri);
+int i2400m_read_mac_addr(struct i2400m *);
+int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri);
+int i2400m_is_boot_barker(struct i2400m *, const void *, size_t);
static inline
int i2400m_is_d2h_barker(const void *buf)
{
const __le32 *barker = buf;
return le32_to_cpu(*barker) == I2400M_D2H_MSG_BARKER;
}
-extern void i2400m_unknown_barker(struct i2400m *, const void *, size_t);
+void i2400m_unknown_barker(struct i2400m *, const void *, size_t);
/* Make/grok boot-rom header commands */
@@ -789,32 +789,31 @@ unsigned i2400m_brh_get_signature(const struct i2400m_bootrom_header *hdr)
/*
* Driver / device setup and internal functions
*/
-extern void i2400m_init(struct i2400m *);
-extern int i2400m_reset(struct i2400m *, enum i2400m_reset_type);
-extern void i2400m_netdev_setup(struct net_device *net_dev);
-extern int i2400m_sysfs_setup(struct device_driver *);
-extern void i2400m_sysfs_release(struct device_driver *);
-extern int i2400m_tx_setup(struct i2400m *);
-extern void i2400m_wake_tx_work(struct work_struct *);
-extern void i2400m_tx_release(struct i2400m *);
-
-extern int i2400m_rx_setup(struct i2400m *);
-extern void i2400m_rx_release(struct i2400m *);
-
-extern void i2400m_fw_cache(struct i2400m *);
-extern void i2400m_fw_uncache(struct i2400m *);
-
-extern void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned,
- const void *, int);
-extern void i2400m_net_erx(struct i2400m *, struct sk_buff *,
- enum i2400m_cs);
-extern void i2400m_net_wake_stop(struct i2400m *);
+void i2400m_init(struct i2400m *);
+int i2400m_reset(struct i2400m *, enum i2400m_reset_type);
+void i2400m_netdev_setup(struct net_device *net_dev);
+int i2400m_sysfs_setup(struct device_driver *);
+void i2400m_sysfs_release(struct device_driver *);
+int i2400m_tx_setup(struct i2400m *);
+void i2400m_wake_tx_work(struct work_struct *);
+void i2400m_tx_release(struct i2400m *);
+
+int i2400m_rx_setup(struct i2400m *);
+void i2400m_rx_release(struct i2400m *);
+
+void i2400m_fw_cache(struct i2400m *);
+void i2400m_fw_uncache(struct i2400m *);
+
+void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned, const void *,
+ int);
+void i2400m_net_erx(struct i2400m *, struct sk_buff *, enum i2400m_cs);
+void i2400m_net_wake_stop(struct i2400m *);
enum i2400m_pt;
-extern int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
+int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
#ifdef CONFIG_DEBUG_FS
-extern int i2400m_debugfs_add(struct i2400m *);
-extern void i2400m_debugfs_rm(struct i2400m *);
+int i2400m_debugfs_add(struct i2400m *);
+void i2400m_debugfs_rm(struct i2400m *);
#else
static inline int i2400m_debugfs_add(struct i2400m *i2400m)
{
@@ -824,8 +823,8 @@ static inline void i2400m_debugfs_rm(struct i2400m *i2400m) {}
#endif
/* Initialize/shutdown the device */
-extern int i2400m_dev_initialize(struct i2400m *);
-extern void i2400m_dev_shutdown(struct i2400m *);
+int i2400m_dev_initialize(struct i2400m *);
+void i2400m_dev_shutdown(struct i2400m *);
extern struct attribute_group i2400m_dev_attr_group;
@@ -873,21 +872,21 @@ void i2400m_put(struct i2400m *i2400m)
dev_put(i2400m->wimax_dev.net_dev);
}
-extern int i2400m_dev_reset_handle(struct i2400m *, const char *);
-extern int i2400m_pre_reset(struct i2400m *);
-extern int i2400m_post_reset(struct i2400m *);
-extern void i2400m_error_recovery(struct i2400m *);
+int i2400m_dev_reset_handle(struct i2400m *, const char *);
+int i2400m_pre_reset(struct i2400m *);
+int i2400m_post_reset(struct i2400m *);
+void i2400m_error_recovery(struct i2400m *);
/*
* _setup()/_release() are called by the probe/disconnect functions of
* the bus-specific drivers.
*/
-extern int i2400m_setup(struct i2400m *, enum i2400m_bri bm_flags);
-extern void i2400m_release(struct i2400m *);
+int i2400m_setup(struct i2400m *, enum i2400m_bri bm_flags);
+void i2400m_release(struct i2400m *);
-extern int i2400m_rx(struct i2400m *, struct sk_buff *);
-extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
-extern void i2400m_tx_msg_sent(struct i2400m *);
+int i2400m_rx(struct i2400m *, struct sk_buff *);
+struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
+void i2400m_tx_msg_sent(struct i2400m *);
/*
@@ -900,20 +899,19 @@ struct device *i2400m_dev(struct i2400m *i2400m)
return i2400m->wimax_dev.net_dev->dev.parent;
}
-extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *,
- char *, size_t);
-extern int i2400m_msg_size_check(struct i2400m *,
- const struct i2400m_l3l4_hdr *, size_t);
-extern struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t);
-extern void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int);
-extern void i2400m_report_hook(struct i2400m *,
- const struct i2400m_l3l4_hdr *, size_t);
-extern void i2400m_report_hook_work(struct work_struct *);
-extern int i2400m_cmd_enter_powersave(struct i2400m *);
-extern int i2400m_cmd_exit_idle(struct i2400m *);
-extern struct sk_buff *i2400m_get_device_info(struct i2400m *);
-extern int i2400m_firmware_check(struct i2400m *);
-extern int i2400m_set_idle_timeout(struct i2400m *, unsigned);
+int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *, char *, size_t);
+int i2400m_msg_size_check(struct i2400m *, const struct i2400m_l3l4_hdr *,
+ size_t);
+struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t);
+void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int);
+void i2400m_report_hook(struct i2400m *, const struct i2400m_l3l4_hdr *,
+ size_t);
+void i2400m_report_hook_work(struct work_struct *);
+int i2400m_cmd_enter_powersave(struct i2400m *);
+int i2400m_cmd_exit_idle(struct i2400m *);
+struct sk_buff *i2400m_get_device_info(struct i2400m *);
+int i2400m_firmware_check(struct i2400m *);
+int i2400m_set_idle_timeout(struct i2400m *, unsigned);
static inline
struct usb_endpoint_descriptor *usb_get_epd(struct usb_interface *iface, int ep)
@@ -921,10 +919,9 @@ struct usb_endpoint_descriptor *usb_get_epd(struct usb_interface *iface, int ep)
return &iface->cur_altsetting->endpoint[ep].desc;
}
-extern int i2400m_op_rfkill_sw_toggle(struct wimax_dev *,
- enum wimax_rf_state);
-extern void i2400m_report_tlv_rf_switches_status(
- struct i2400m *, const struct i2400m_tlv_rf_switches_status *);
+int i2400m_op_rfkill_sw_toggle(struct wimax_dev *, enum wimax_rf_state);
+void i2400m_report_tlv_rf_switches_status(struct i2400m *,
+ const struct i2400m_tlv_rf_switches_status *);
/*
* Helpers for firmware backwards compatibility
@@ -968,8 +965,8 @@ void __i2400m_msleep(unsigned ms)
/* module initialization helpers */
-extern int i2400m_barker_db_init(const char *);
-extern void i2400m_barker_db_exit(void);
+int i2400m_barker_db_init(const char *);
+void i2400m_barker_db_exit(void);
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index f9a24e599dee..cfce83e1f273 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1924,7 +1924,6 @@ static int adm8211_probe(struct pci_dev *pdev,
pci_iounmap(pdev, priv->map);
err_free_dev:
- pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(dev);
err_free_reg:
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 7fe19648f10e..edf4b57c4aaa 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -5570,7 +5570,6 @@ static void airo_pci_remove(struct pci_dev *pdev)
airo_print_info(dev->name, "Unregistering...");
stop_airo_card(dev, 1);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 1abf1d421173..c63d1159db5c 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -25,6 +25,23 @@ config ATH_DEBUG
Say Y, if you want to debug atheros wireless drivers.
Right now only ath9k makes use of this.
+config ATH_REG_DYNAMIC_USER_REG_HINTS
+ bool "Atheros dynamic user regulatory hints"
+ depends on CFG80211_CERTIFICATION_ONUS
+ default n
+ ---help---
+ Say N. This should only be enabled in countries where
+ this feature is explicitly allowed and only on cards that
+ specifically have been tested for this.
+
+config ATH_REG_DYNAMIC_USER_CERT_TESTING
+ bool "Atheros dynamic user regulatory testing"
+ depends on ATH_REG_DYNAMIC_USER_REG_HINTS && CFG80211_CERTIFICATION_ONUS
+ default n
+ ---help---
+ Say N. This should only be enabled on systems
+ undergoing certification testing.
+
source "drivers/net/wireless/ath/ath5k/Kconfig"
source "drivers/net/wireless/ath/ath9k/Kconfig"
source "drivers/net/wireless/ath/carl9170/Kconfig"
@@ -32,5 +49,6 @@ source "drivers/net/wireless/ath/ath6kl/Kconfig"
source "drivers/net/wireless/ath/ar5523/Kconfig"
source "drivers/net/wireless/ath/wil6210/Kconfig"
source "drivers/net/wireless/ath/ath10k/Kconfig"
+source "drivers/net/wireless/ath/wcn36xx/Kconfig"
endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index fb05cfd19361..7d023b0f13b4 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -5,13 +5,16 @@ obj-$(CONFIG_ATH6KL) += ath6kl/
obj-$(CONFIG_AR5523) += ar5523/
obj-$(CONFIG_WIL6210) += wil6210/
obj-$(CONFIG_ATH10K) += ath10k/
+obj-$(CONFIG_WCN36XX) += wcn36xx/
obj-$(CONFIG_ATH_COMMON) += ath.o
ath-objs := main.o \
regd.o \
hw.o \
- key.o
+ key.o \
+ dfs_pattern_detector.o \
+ dfs_pri_detector.o
ath-$(CONFIG_ATH_DEBUG) += debug.o
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 17d7fece35d2..280fc3d53a36 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1762,6 +1762,7 @@ static struct usb_device_id ar5523_id_table[] = {
AR5523_DEVICE_UX(0x2001, 0x3a00), /* Dlink / DWLAG132 */
AR5523_DEVICE_UG(0x2001, 0x3a02), /* Dlink / DWLG132 */
AR5523_DEVICE_UX(0x2001, 0x3a04), /* Dlink / DWLAG122 */
+ AR5523_DEVICE_UG(0x07d1, 0x3a07), /* D-Link / WUA-2340 rev A1 */
AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */
AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */
AR5523_DEVICE_UG(0x129b, 0x160c), /* Gigaset / USB stick 108
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 744da6d1c405..a1f099628850 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -22,7 +22,8 @@
void ath10k_bmi_start(struct ath10k *ar)
{
- ath10k_dbg(ATH10K_DBG_CORE, "BMI started\n");
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi start\n");
+
ar->bmi.done_sent = false;
}
@@ -32,8 +33,10 @@ int ath10k_bmi_done(struct ath10k *ar)
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi done\n");
+
if (ar->bmi.done_sent) {
- ath10k_dbg(ATH10K_DBG_CORE, "%s skipped\n", __func__);
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi skipped\n");
return 0;
}
@@ -46,7 +49,6 @@ int ath10k_bmi_done(struct ath10k *ar)
return ret;
}
- ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n");
return 0;
}
@@ -59,6 +61,8 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,
u32 resplen = sizeof(resp.get_target_info);
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi get target info\n");
+
if (ar->bmi.done_sent) {
ath10k_warn("BMI Get Target Info Command disallowed\n");
return -EBUSY;
@@ -80,6 +84,7 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,
target_info->version = __le32_to_cpu(resp.get_target_info.version);
target_info->type = __le32_to_cpu(resp.get_target_info.type);
+
return 0;
}
@@ -92,15 +97,14 @@ int ath10k_bmi_read_memory(struct ath10k *ar,
u32 rxlen;
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
+ address, length);
+
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
- ath10k_dbg(ATH10K_DBG_CORE,
- "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
- __func__, ar, address, length);
-
while (length) {
rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
@@ -133,15 +137,14 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
u32 txlen;
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
+ address, length);
+
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
- ath10k_dbg(ATH10K_DBG_CORE,
- "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
- __func__, ar, address, length);
-
while (length) {
txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
@@ -180,15 +183,14 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
u32 resplen = sizeof(resp.execute);
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
+ address, *param);
+
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
- ath10k_dbg(ATH10K_DBG_CORE,
- "%s: (device: 0x%p, address: 0x%x, param: %d)\n",
- __func__, ar, address, *param);
-
cmd.id = __cpu_to_le32(BMI_EXECUTE);
cmd.execute.addr = __cpu_to_le32(address);
cmd.execute.param = __cpu_to_le32(*param);
@@ -216,6 +218,9 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
u32 txlen;
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
+ buffer, length);
+
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
@@ -250,6 +255,9 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
+ address);
+
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
@@ -275,6 +283,10 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
u32 trailer_len = length - head_len;
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI,
+ "bmi fast download address 0x%x buffer 0x%p length %d\n",
+ address, buffer, length);
+
ret = ath10k_bmi_lz_stream_start(ar, address);
if (ret)
return ret;
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index f8b969f518f8..e46951b8fb92 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -76,36 +76,7 @@ static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- void __iomem *indicator_addr;
-
- if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
- ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
- return;
- }
-
- /* workaround for QCA988x_1.0 HW CE */
- indicator_addr = ar_pci->mem + ce_ctrl_addr + DST_WATERMARK_ADDRESS;
-
- if (ce_ctrl_addr == ath10k_ce_base_address(CDC_WAR_DATA_CE)) {
- iowrite32((CDC_WAR_MAGIC_STR | n), indicator_addr);
- } else {
- unsigned long irq_flags;
- local_irq_save(irq_flags);
- iowrite32(1, indicator_addr);
-
- /*
- * PCIE write waits for ACK in IPQ8K, there is no
- * need to read back value.
- */
- (void)ioread32(indicator_addr);
- (void)ioread32(indicator_addr); /* conservative */
-
- ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
-
- iowrite32(0, indicator_addr);
- local_irq_restore(irq_flags);
- }
+ ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
}
static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
@@ -285,7 +256,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
* ath10k_ce_sendlist_send.
* The caller takes responsibility for any needed locking.
*/
-static int ath10k_ce_send_nolock(struct ce_state *ce_state,
+static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
unsigned int nbytes,
@@ -293,7 +264,7 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state,
unsigned int flags)
{
struct ath10k *ar = ce_state->ar;
- struct ce_ring_state *src_ring = ce_state->src_ring;
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
struct ce_desc *desc, *sdesc;
unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int sw_index = src_ring->sw_index;
@@ -306,11 +277,13 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state,
ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
__func__, nbytes, ce_state->src_sz_max);
- ath10k_pci_wake(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return ret;
if (unlikely(CE_RING_DELTA(nentries_mask,
write_index, sw_index - 1) <= 0)) {
- ret = -EIO;
+ ret = -ENOSR;
goto exit;
}
@@ -346,7 +319,7 @@ exit:
return ret;
}
-int ath10k_ce_send(struct ce_state *ce_state,
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
unsigned int nbytes,
@@ -365,77 +338,26 @@ int ath10k_ce_send(struct ce_state *ce_state,
return ret;
}
-void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer,
- unsigned int nbytes, u32 flags)
+int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
{
- unsigned int num_items = sendlist->num_items;
- struct ce_sendlist_item *item;
-
- item = &sendlist->item[num_items];
- item->data = buffer;
- item->u.nbytes = nbytes;
- item->flags = flags;
- sendlist->num_items++;
-}
-
-int ath10k_ce_sendlist_send(struct ce_state *ce_state,
- void *per_transfer_context,
- struct ce_sendlist *sendlist,
- unsigned int transfer_id)
-{
- struct ce_ring_state *src_ring = ce_state->src_ring;
- struct ce_sendlist_item *item;
- struct ath10k *ar = ce_state->ar;
+ struct ath10k *ar = pipe->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- unsigned int nentries_mask = src_ring->nentries_mask;
- unsigned int num_items = sendlist->num_items;
- unsigned int sw_index;
- unsigned int write_index;
- int i, delta, ret = -ENOMEM;
+ int delta;
spin_lock_bh(&ar_pci->ce_lock);
-
- sw_index = src_ring->sw_index;
- write_index = src_ring->write_index;
-
- delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
-
- if (delta >= num_items) {
- /*
- * Handle all but the last item uniformly.
- */
- for (i = 0; i < num_items - 1; i++) {
- item = &sendlist->item[i];
- ret = ath10k_ce_send_nolock(ce_state,
- CE_SENDLIST_ITEM_CTXT,
- (u32) item->data,
- item->u.nbytes, transfer_id,
- item->flags |
- CE_SEND_FLAG_GATHER);
- if (ret)
- ath10k_warn("CE send failed for item: %d\n", i);
- }
- /*
- * Provide valid context pointer for final item.
- */
- item = &sendlist->item[i];
- ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
- (u32) item->data, item->u.nbytes,
- transfer_id, item->flags);
- if (ret)
- ath10k_warn("CE send failed for last item: %d\n", i);
- }
-
+ delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
+ pipe->src_ring->write_index,
+ pipe->src_ring->sw_index - 1);
spin_unlock_bh(&ar_pci->ce_lock);
- return ret;
+ return delta;
}
-int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
+int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
void *per_recv_context,
u32 buffer)
{
- struct ce_ring_state *dest_ring = ce_state->dest_ring;
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -448,7 +370,9 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
write_index = dest_ring->write_index;
sw_index = dest_ring->sw_index;
- ath10k_pci_wake(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ goto out;
if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
struct ce_desc *base = dest_ring->base_addr_owner_space;
@@ -470,6 +394,8 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
ret = -EIO;
}
ath10k_pci_sleep(ar);
+
+out:
spin_unlock_bh(&ar_pci->ce_lock);
return ret;
@@ -479,14 +405,14 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
* Guts of ath10k_ce_completed_recv_next.
* The caller takes responsibility for any necessary locking.
*/
-static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
+static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
unsigned int *flagsp)
{
- struct ce_ring_state *dest_ring = ce_state->dest_ring;
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
unsigned int sw_index = dest_ring->sw_index;
@@ -535,7 +461,7 @@ static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
return 0;
}
-int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
@@ -556,11 +482,11 @@ int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
return ret;
}
-int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp)
{
- struct ce_ring_state *dest_ring;
+ struct ath10k_ce_ring *dest_ring;
unsigned int nentries_mask;
unsigned int sw_index;
unsigned int write_index;
@@ -612,19 +538,20 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
* Guts of ath10k_ce_completed_send_next.
* The caller takes responsibility for any necessary locking.
*/
-static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
+static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp)
{
- struct ce_ring_state *src_ring = ce_state->src_ring;
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar;
unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int sw_index = src_ring->sw_index;
+ struct ce_desc *sdesc, *sbase;
unsigned int read_index;
- int ret = -EIO;
+ int ret;
if (src_ring->hw_index == sw_index) {
/*
@@ -634,48 +561,54 @@ static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
* the SW has really caught up to the HW, or if the cached
* value of the HW index has become stale.
*/
- ath10k_pci_wake(ar);
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return ret;
+
src_ring->hw_index =
ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->hw_index &= nentries_mask;
+
ath10k_pci_sleep(ar);
}
+
read_index = src_ring->hw_index;
- if ((read_index != sw_index) && (read_index != 0xffffffff)) {
- struct ce_desc *sbase = src_ring->shadow_base;
- struct ce_desc *sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
+ if ((read_index == sw_index) || (read_index == 0xffffffff))
+ return -EIO;
- /* Return data from completed source descriptor */
- *bufferp = __le32_to_cpu(sdesc->addr);
- *nbytesp = __le16_to_cpu(sdesc->nbytes);
- *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
- CE_DESC_FLAGS_META_DATA);
+ sbase = src_ring->shadow_base;
+ sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
- if (per_transfer_contextp)
- *per_transfer_contextp =
- src_ring->per_transfer_context[sw_index];
+ /* Return data from completed source descriptor */
+ *bufferp = __le32_to_cpu(sdesc->addr);
+ *nbytesp = __le16_to_cpu(sdesc->nbytes);
+ *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
+ CE_DESC_FLAGS_META_DATA);
- /* sanity */
- src_ring->per_transfer_context[sw_index] = NULL;
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ src_ring->per_transfer_context[sw_index];
- /* Update sw_index */
- sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
- src_ring->sw_index = sw_index;
- ret = 0;
- }
+ /* sanity */
+ src_ring->per_transfer_context[sw_index] = NULL;
- return ret;
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ src_ring->sw_index = sw_index;
+
+ return 0;
}
/* NB: Modeled after ath10k_ce_completed_send_next */
-int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp)
{
- struct ce_ring_state *src_ring;
+ struct ath10k_ce_ring *src_ring;
unsigned int nentries_mask;
unsigned int sw_index;
unsigned int write_index;
@@ -727,7 +660,7 @@ int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
return ret;
}
-int ath10k_ce_completed_send_next(struct ce_state *ce_state,
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
@@ -756,53 +689,29 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state,
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ce_state->ctrl_addr;
- void *transfer_context;
- u32 buf;
- unsigned int nbytes;
- unsigned int id;
- unsigned int flags;
+ int ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
- ath10k_pci_wake(ar);
spin_lock_bh(&ar_pci->ce_lock);
/* Clear the copy-complete interrupts that will be handled here. */
ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
HOST_IS_COPY_COMPLETE_MASK);
- if (ce_state->recv_cb) {
- /*
- * Pop completed recv buffers and call the registered
- * recv callback for each
- */
- while (ath10k_ce_completed_recv_next_nolock(ce_state,
- &transfer_context,
- &buf, &nbytes,
- &id, &flags) == 0) {
- spin_unlock_bh(&ar_pci->ce_lock);
- ce_state->recv_cb(ce_state, transfer_context, buf,
- nbytes, id, flags);
- spin_lock_bh(&ar_pci->ce_lock);
- }
- }
+ spin_unlock_bh(&ar_pci->ce_lock);
- if (ce_state->send_cb) {
- /*
- * Pop completed send buffers and call the registered
- * send callback for each
- */
- while (ath10k_ce_completed_send_next_nolock(ce_state,
- &transfer_context,
- &buf,
- &nbytes,
- &id) == 0) {
- spin_unlock_bh(&ar_pci->ce_lock);
- ce_state->send_cb(ce_state, transfer_context,
- buf, nbytes, id);
- spin_lock_bh(&ar_pci->ce_lock);
- }
- }
+ if (ce_state->recv_cb)
+ ce_state->recv_cb(ce_state);
+
+ if (ce_state->send_cb)
+ ce_state->send_cb(ce_state);
+
+ spin_lock_bh(&ar_pci->ce_lock);
/*
* Misc CE interrupts are not being handled, but still need
@@ -823,10 +732,13 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
void ath10k_ce_per_engine_service_any(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ce_id;
+ int ce_id, ret;
u32 intr_summary;
- ath10k_pci_wake(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
+
intr_summary = CE_INTERRUPT_SUMMARY(ar);
for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
@@ -849,13 +761,16 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
*
* Called with ce_lock held.
*/
-static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
+static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
int disable_copy_compl_intr)
{
u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar;
+ int ret;
- ath10k_pci_wake(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
if ((!disable_copy_compl_intr) &&
(ce_state->send_cb || ce_state->recv_cb))
@@ -871,11 +786,14 @@ static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
void ath10k_ce_disable_interrupts(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ce_id;
+ int ce_id, ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
- ath10k_pci_wake(ar);
for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
- struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ce_state->ctrl_addr;
ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
@@ -883,12 +801,8 @@ void ath10k_ce_disable_interrupts(struct ath10k *ar)
ath10k_pci_sleep(ar);
}
-void ath10k_ce_send_cb_register(struct ce_state *ce_state,
- void (*send_cb) (struct ce_state *ce_state,
- void *transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id),
+void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
+ void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts)
{
struct ath10k *ar = ce_state->ar;
@@ -900,13 +814,8 @@ void ath10k_ce_send_cb_register(struct ce_state *ce_state,
spin_unlock_bh(&ar_pci->ce_lock);
}
-void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
- void (*recv_cb) (struct ce_state *ce_state,
- void *transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags))
+void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
+ void (*recv_cb)(struct ath10k_ce_pipe *))
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -919,11 +828,11 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
static int ath10k_ce_init_src_ring(struct ath10k *ar,
unsigned int ce_id,
- struct ce_state *ce_state,
+ struct ath10k_ce_pipe *ce_state,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_ring_state *src_ring;
+ struct ath10k_ce_ring *src_ring;
unsigned int nentries = attr->src_nentries;
unsigned int ce_nbytes;
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
@@ -937,19 +846,18 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
return 0;
}
- ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
+ ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
ptr = kzalloc(ce_nbytes, GFP_KERNEL);
if (ptr == NULL)
return -ENOMEM;
- ce_state->src_ring = (struct ce_ring_state *)ptr;
+ ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
src_ring = ce_state->src_ring;
- ptr += sizeof(struct ce_ring_state);
+ ptr += sizeof(struct ath10k_ce_ring);
src_ring->nentries = nentries;
src_ring->nentries_mask = nentries - 1;
- ath10k_pci_wake(ar);
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->sw_index &= src_ring->nentries_mask;
src_ring->hw_index = src_ring->sw_index;
@@ -957,7 +865,6 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
src_ring->write_index =
ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
src_ring->write_index &= src_ring->nentries_mask;
- ath10k_pci_sleep(ar);
src_ring->per_transfer_context = (void **)ptr;
@@ -970,6 +877,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
(nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
&base_addr);
+ if (!src_ring->base_addr_owner_space_unaligned) {
+ kfree(ce_state->src_ring);
+ ce_state->src_ring = NULL;
+ return -ENOMEM;
+ }
+
src_ring->base_addr_ce_space_unaligned = base_addr;
src_ring->base_addr_owner_space = PTR_ALIGN(
@@ -986,12 +899,21 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
src_ring->shadow_base_unaligned =
kmalloc((nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN), GFP_KERNEL);
+ if (!src_ring->shadow_base_unaligned) {
+ pci_free_consistent(ar_pci->pdev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ src_ring->base_addr_owner_space,
+ src_ring->base_addr_ce_space);
+ kfree(ce_state->src_ring);
+ ce_state->src_ring = NULL;
+ return -ENOMEM;
+ }
src_ring->shadow_base = PTR_ALIGN(
src_ring->shadow_base_unaligned,
CE_DESC_RING_ALIGN);
- ath10k_pci_wake(ar);
ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
src_ring->base_addr_ce_space);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
@@ -999,18 +921,21 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
- ath10k_pci_sleep(ar);
+
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot ce src ring id %d entries %d base_addr %p\n",
+ ce_id, nentries, src_ring->base_addr_owner_space);
return 0;
}
static int ath10k_ce_init_dest_ring(struct ath10k *ar,
unsigned int ce_id,
- struct ce_state *ce_state,
+ struct ath10k_ce_pipe *ce_state,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_ring_state *dest_ring;
+ struct ath10k_ce_ring *dest_ring;
unsigned int nentries = attr->dest_nentries;
unsigned int ce_nbytes;
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
@@ -1024,25 +949,23 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
return 0;
}
- ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
+ ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
ptr = kzalloc(ce_nbytes, GFP_KERNEL);
if (ptr == NULL)
return -ENOMEM;
- ce_state->dest_ring = (struct ce_ring_state *)ptr;
+ ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
dest_ring = ce_state->dest_ring;
- ptr += sizeof(struct ce_ring_state);
+ ptr += sizeof(struct ath10k_ce_ring);
dest_ring->nentries = nentries;
dest_ring->nentries_mask = nentries - 1;
- ath10k_pci_wake(ar);
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
dest_ring->sw_index &= dest_ring->nentries_mask;
dest_ring->write_index =
ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
dest_ring->write_index &= dest_ring->nentries_mask;
- ath10k_pci_sleep(ar);
dest_ring->per_transfer_context = (void **)ptr;
@@ -1055,6 +978,12 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
(nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
&base_addr);
+ if (!dest_ring->base_addr_owner_space_unaligned) {
+ kfree(ce_state->dest_ring);
+ ce_state->dest_ring = NULL;
+ return -ENOMEM;
+ }
+
dest_ring->base_addr_ce_space_unaligned = base_addr;
/*
@@ -1071,44 +1000,35 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
dest_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
- ath10k_pci_wake(ar);
ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
dest_ring->base_addr_ce_space);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
- ath10k_pci_sleep(ar);
+
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot ce dest ring id %d entries %d base_addr %p\n",
+ ce_id, nentries, dest_ring->base_addr_owner_space);
return 0;
}
-static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
+static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_state = NULL;
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
spin_lock_bh(&ar_pci->ce_lock);
- if (!ar_pci->ce_id_to_state[ce_id]) {
- ce_state = kzalloc(sizeof(*ce_state), GFP_ATOMIC);
- if (ce_state == NULL) {
- spin_unlock_bh(&ar_pci->ce_lock);
- return NULL;
- }
-
- ar_pci->ce_id_to_state[ce_id] = ce_state;
- ce_state->ar = ar;
- ce_state->id = ce_id;
- ce_state->ctrl_addr = ctrl_addr;
- ce_state->state = CE_RUNNING;
- /* Save attribute flags */
- ce_state->attr_flags = attr->flags;
- ce_state->src_sz_max = attr->src_sz_max;
- }
+ ce_state->ar = ar;
+ ce_state->id = ce_id;
+ ce_state->ctrl_addr = ctrl_addr;
+ ce_state->attr_flags = attr->flags;
+ ce_state->src_sz_max = attr->src_sz_max;
spin_unlock_bh(&ar_pci->ce_lock);
@@ -1122,12 +1042,17 @@ static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
* initialization. It may be that only one side or the other is
* initialized by software/firmware.
*/
-struct ce_state *ath10k_ce_init(struct ath10k *ar,
+struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
{
- struct ce_state *ce_state;
+ struct ath10k_ce_pipe *ce_state;
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+ int ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return NULL;
ce_state = ath10k_ce_init_state(ar, ce_id, attr);
if (!ce_state) {
@@ -1136,40 +1061,38 @@ struct ce_state *ath10k_ce_init(struct ath10k *ar,
}
if (attr->src_nentries) {
- if (ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr)) {
- ath10k_err("Failed to initialize CE src ring for ID: %d\n",
- ce_id);
+ ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
+ if (ret) {
+ ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
+ ce_id, ret);
ath10k_ce_deinit(ce_state);
return NULL;
}
}
if (attr->dest_nentries) {
- if (ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr)) {
- ath10k_err("Failed to initialize CE dest ring for ID: %d\n",
- ce_id);
+ ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
+ if (ret) {
+ ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
+ ce_id, ret);
ath10k_ce_deinit(ce_state);
return NULL;
}
}
/* Enable CE error interrupts */
- ath10k_pci_wake(ar);
ath10k_ce_error_intr_enable(ar, ctrl_addr);
+
ath10k_pci_sleep(ar);
return ce_state;
}
-void ath10k_ce_deinit(struct ce_state *ce_state)
+void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
{
- unsigned int ce_id = ce_state->id;
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- ce_state->state = CE_UNUSED;
- ar_pci->ce_id_to_state[ce_id] = NULL;
-
if (ce_state->src_ring) {
kfree(ce_state->src_ring->shadow_base_unaligned);
pci_free_consistent(ar_pci->pdev,
@@ -1190,5 +1113,7 @@ void ath10k_ce_deinit(struct ce_state *ce_state)
ce_state->dest_ring->base_addr_ce_space);
kfree(ce_state->dest_ring);
}
- kfree(ce_state);
+
+ ce_state->src_ring = NULL;
+ ce_state->dest_ring = NULL;
}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index c17f07c026f4..15d45b5b7615 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -27,7 +27,6 @@
/* Descriptor rings must be aligned to this boundary */
#define CE_DESC_RING_ALIGN 8
-#define CE_SENDLIST_ITEMS_MAX 12
#define CE_SEND_FLAG_GATHER 0x00010000
/*
@@ -36,16 +35,9 @@
* how to use copy engines.
*/
-struct ce_state;
+struct ath10k_ce_pipe;
-/* Copy Engine operational state */
-enum ce_op_state {
- CE_UNUSED,
- CE_PAUSED,
- CE_RUNNING,
-};
-
#define CE_DESC_FLAGS_GATHER (1 << 0)
#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
@@ -57,8 +49,7 @@ struct ce_desc {
__le16 flags; /* %CE_DESC_FLAGS_ */
};
-/* Copy Engine Ring internal state */
-struct ce_ring_state {
+struct ath10k_ce_ring {
/* Number of entries in this ring; must be power of 2 */
unsigned int nentries;
unsigned int nentries_mask;
@@ -116,49 +107,20 @@ struct ce_ring_state {
void **per_transfer_context;
};
-/* Copy Engine internal state */
-struct ce_state {
+struct ath10k_ce_pipe {
struct ath10k *ar;
unsigned int id;
unsigned int attr_flags;
u32 ctrl_addr;
- enum ce_op_state state;
-
- void (*send_cb) (struct ce_state *ce_state,
- void *per_transfer_send_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id);
- void (*recv_cb) (struct ce_state *ce_state,
- void *per_transfer_recv_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags);
-
- unsigned int src_sz_max;
- struct ce_ring_state *src_ring;
- struct ce_ring_state *dest_ring;
-};
-struct ce_sendlist_item {
- /* e.g. buffer or desc list */
- dma_addr_t data;
- union {
- /* simple buffer */
- unsigned int nbytes;
- /* Rx descriptor list */
- unsigned int ndesc;
- } u;
- /* externally-specified flags; OR-ed with internal flags */
- u32 flags;
-};
+ void (*send_cb)(struct ath10k_ce_pipe *);
+ void (*recv_cb)(struct ath10k_ce_pipe *);
-struct ce_sendlist {
- unsigned int num_items;
- struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX];
+ unsigned int src_sz_max;
+ struct ath10k_ce_ring *src_ring;
+ struct ath10k_ce_ring *dest_ring;
};
/* Copy Engine settable attributes */
@@ -182,7 +144,7 @@ struct ce_attr;
*
* Implementation note: pushes 1 buffer to Source ring
*/
-int ath10k_ce_send(struct ce_state *ce_state,
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_send_context,
u32 buffer,
unsigned int nbytes,
@@ -190,36 +152,11 @@ int ath10k_ce_send(struct ce_state *ce_state,
unsigned int transfer_id,
unsigned int flags);
-void ath10k_ce_send_cb_register(struct ce_state *ce_state,
- void (*send_cb) (struct ce_state *ce_state,
- void *transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id),
+void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
+ void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts);
-/* Append a simple buffer (address/length) to a sendlist. */
-void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
- u32 buffer,
- unsigned int nbytes,
- /* OR-ed with internal flags */
- u32 flags);
-
-/*
- * Queue a "sendlist" of buffers to be sent using gather to a single
- * anonymous destination buffer
- * ce - which copy engine to use
- * sendlist - list of simple buffers to send using gather
- * transfer_id - arbitrary ID; reflected to destination
- * Returns 0 on success; otherwise an error status.
- *
- * Implemenation note: Pushes multiple buffers with Gather to Source ring.
- */
-int ath10k_ce_sendlist_send(struct ce_state *ce_state,
- void *per_transfer_send_context,
- struct ce_sendlist *sendlist,
- /* 14 bits */
- unsigned int transfer_id);
+int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
/*==================Recv=======================*/
@@ -233,17 +170,12 @@ int ath10k_ce_sendlist_send(struct ce_state *ce_state,
*
* Implemenation note: Pushes a buffer to Dest ring.
*/
-int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
+int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
void *per_transfer_recv_context,
u32 buffer);
-void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
- void (*recv_cb) (struct ce_state *ce_state,
- void *transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags));
+void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
+ void (*recv_cb)(struct ath10k_ce_pipe *));
/* recv flags */
/* Data is byte-swapped */
@@ -253,7 +185,7 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
* Supply data for the next completed unprocessed receive descriptor.
* Pops buffer from Dest ring.
*/
-int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
@@ -263,7 +195,7 @@ int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
* Supply data for the next completed unprocessed send descriptor.
* Pops 1 completed send buffer from Source ring.
*/
-int ath10k_ce_completed_send_next(struct ce_state *ce_state,
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
@@ -272,7 +204,7 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state,
/*==================CE Engine Initialization=======================*/
/* Initialize an instance of a CE */
-struct ce_state *ath10k_ce_init(struct ath10k *ar,
+struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr);
@@ -282,7 +214,7 @@ struct ce_state *ath10k_ce_init(struct ath10k *ar,
* receive buffers. Target DMA must be stopped before using
* this API.
*/
-int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp);
@@ -291,13 +223,13 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
* pending sends. Target DMA must be stopped before using
* this API.
*/
-int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp);
-void ath10k_ce_deinit(struct ce_state *ce_state);
+void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
/*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
@@ -322,9 +254,6 @@ struct ce_attr {
/* CE_ATTR_* values */
unsigned int flags;
- /* currently not in use */
- unsigned int priority;
-
/* #entries in source ring - Must be a power of 2 */
unsigned int src_nentries;
@@ -336,21 +265,8 @@ struct ce_attr {
/* #entries in destination ring - Must be a power of 2 */
unsigned int dest_nentries;
-
- /* Future use */
- void *reserved;
};
-/*
- * When using sendlist_send to transfer multiple buffer fragments, the
- * transfer context of each fragment, except last one, will be filled
- * with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for
- * each fragment done with send and the transfer context would be
- * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
- * status of a send completion.
- */
-#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
-
#define SR_BA_ADDRESS 0x0000
#define SR_SIZE_ADDRESS 0x0004
#define DR_BA_ADDRESS 0x0008
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 7226c23b9569..1129994fb105 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -39,17 +39,6 @@ MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");
static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{
- .id = QCA988X_HW_1_0_VERSION,
- .name = "qca988x hw1.0",
- .patch_load_addr = QCA988X_HW_1_0_PATCH_LOAD_ADDR,
- .fw = {
- .dir = QCA988X_HW_1_0_FW_DIR,
- .fw = QCA988X_HW_1_0_FW_FILE,
- .otp = QCA988X_HW_1_0_OTP_FILE,
- .board = QCA988X_HW_1_0_BOARD_DATA_FILE,
- },
- },
- {
.id = QCA988X_HW_2_0_VERSION,
.name = "qca988x hw2.0",
.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
@@ -64,33 +53,12 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
static void ath10k_send_suspend_complete(struct ath10k *ar)
{
- ath10k_dbg(ATH10K_DBG_CORE, "%s\n", __func__);
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n");
ar->is_target_paused = true;
wake_up(&ar->event_queue);
}
-static int ath10k_check_fw_version(struct ath10k *ar)
-{
- char version[32];
-
- if (ar->fw_version_major >= SUPPORTED_FW_MAJOR &&
- ar->fw_version_minor >= SUPPORTED_FW_MINOR &&
- ar->fw_version_release >= SUPPORTED_FW_RELEASE &&
- ar->fw_version_build >= SUPPORTED_FW_BUILD)
- return 0;
-
- snprintf(version, sizeof(version), "%u.%u.%u.%u",
- SUPPORTED_FW_MAJOR, SUPPORTED_FW_MINOR,
- SUPPORTED_FW_RELEASE, SUPPORTED_FW_BUILD);
-
- ath10k_warn("WARNING: Firmware version %s is not officially supported.\n",
- ar->hw->wiphy->fw_version);
- ath10k_warn("Please upgrade to version %s (or newer)\n", version);
-
- return 0;
-}
-
static int ath10k_init_connect_htc(struct ath10k *ar)
{
int status;
@@ -112,7 +80,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar)
goto timeout;
}
- ath10k_dbg(ATH10K_DBG_CORE, "core wmi ready\n");
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot wmi ready\n");
return 0;
timeout:
@@ -200,8 +168,7 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
return fw;
}
-static int ath10k_push_board_ext_data(struct ath10k *ar,
- const struct firmware *fw)
+static int ath10k_push_board_ext_data(struct ath10k *ar)
{
u32 board_data_size = QCA988X_BOARD_DATA_SZ;
u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ;
@@ -214,21 +181,21 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
return ret;
}
- ath10k_dbg(ATH10K_DBG_CORE,
- "ath10k: Board extended Data download addr: 0x%x\n",
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot push board extended data addr 0x%x\n",
board_ext_data_addr);
if (board_ext_data_addr == 0)
return 0;
- if (fw->size != (board_data_size + board_ext_data_size)) {
+ if (ar->board_len != (board_data_size + board_ext_data_size)) {
ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n",
- fw->size, board_data_size, board_ext_data_size);
+ ar->board_len, board_data_size, board_ext_data_size);
return -EINVAL;
}
ret = ath10k_bmi_write_memory(ar, board_ext_data_addr,
- fw->data + board_data_size,
+ ar->board_data + board_data_size,
board_ext_data_size);
if (ret) {
ath10k_err("could not write board ext data (%d)\n", ret);
@@ -247,12 +214,11 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
static int ath10k_download_board_data(struct ath10k *ar)
{
- const struct firmware *fw = ar->board_data;
u32 board_data_size = QCA988X_BOARD_DATA_SZ;
u32 address;
int ret;
- ret = ath10k_push_board_ext_data(ar, fw);
+ ret = ath10k_push_board_ext_data(ar);
if (ret) {
ath10k_err("could not push board ext data (%d)\n", ret);
goto exit;
@@ -264,8 +230,9 @@ static int ath10k_download_board_data(struct ath10k *ar)
goto exit;
}
- ret = ath10k_bmi_write_memory(ar, address, fw->data,
- min_t(u32, board_data_size, fw->size));
+ ret = ath10k_bmi_write_memory(ar, address, ar->board_data,
+ min_t(u32, board_data_size,
+ ar->board_len));
if (ret) {
ath10k_err("could not write board data (%d)\n", ret);
goto exit;
@@ -283,17 +250,16 @@ exit:
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
- const struct firmware *fw = ar->otp;
u32 address = ar->hw_params.patch_load_addr;
u32 exec_param;
int ret;
/* OTP is optional */
- if (!ar->otp)
+ if (!ar->otp_data || !ar->otp_len)
return 0;
- ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
+ ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
if (ret) {
ath10k_err("could not write otp (%d)\n", ret);
goto exit;
@@ -312,13 +278,13 @@ exit:
static int ath10k_download_fw(struct ath10k *ar)
{
- const struct firmware *fw = ar->firmware;
u32 address;
int ret;
address = ar->hw_params.patch_load_addr;
- ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
+ ret = ath10k_bmi_fast_download(ar, address, ar->firmware_data,
+ ar->firmware_len);
if (ret) {
ath10k_err("could not write fw (%d)\n", ret);
goto exit;
@@ -330,8 +296,8 @@ exit:
static void ath10k_core_free_firmware_files(struct ath10k *ar)
{
- if (ar->board_data && !IS_ERR(ar->board_data))
- release_firmware(ar->board_data);
+ if (ar->board && !IS_ERR(ar->board))
+ release_firmware(ar->board);
if (ar->otp && !IS_ERR(ar->otp))
release_firmware(ar->otp);
@@ -339,12 +305,20 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
if (ar->firmware && !IS_ERR(ar->firmware))
release_firmware(ar->firmware);
+ ar->board = NULL;
ar->board_data = NULL;
+ ar->board_len = 0;
+
ar->otp = NULL;
+ ar->otp_data = NULL;
+ ar->otp_len = 0;
+
ar->firmware = NULL;
+ ar->firmware_data = NULL;
+ ar->firmware_len = 0;
}
-static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
+static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
{
int ret = 0;
@@ -358,15 +332,18 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
return -EINVAL;
}
- ar->board_data = ath10k_fetch_fw_file(ar,
- ar->hw_params.fw.dir,
- ar->hw_params.fw.board);
- if (IS_ERR(ar->board_data)) {
- ret = PTR_ERR(ar->board_data);
+ ar->board = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.board);
+ if (IS_ERR(ar->board)) {
+ ret = PTR_ERR(ar->board);
ath10k_err("could not fetch board data (%d)\n", ret);
goto err;
}
+ ar->board_data = ar->board->data;
+ ar->board_len = ar->board->size;
+
ar->firmware = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
ar->hw_params.fw.fw);
@@ -376,6 +353,9 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
goto err;
}
+ ar->firmware_data = ar->firmware->data;
+ ar->firmware_len = ar->firmware->size;
+
/* OTP may be undefined. If so, don't fetch it at all */
if (ar->hw_params.fw.otp == NULL)
return 0;
@@ -389,6 +369,172 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
goto err;
}
+ ar->otp_data = ar->otp->data;
+ ar->otp_len = ar->otp->size;
+
+ return 0;
+
+err:
+ ath10k_core_free_firmware_files(ar);
+ return ret;
+}
+
+static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
+{
+ size_t magic_len, len, ie_len;
+ int ie_id, i, index, bit, ret;
+ struct ath10k_fw_ie *hdr;
+ const u8 *data;
+ __le32 *timestamp;
+
+ /* first fetch the firmware file (firmware-*.bin) */
+ ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
+ if (IS_ERR(ar->firmware)) {
+ ath10k_err("Could not fetch firmware file '%s': %ld\n",
+ name, PTR_ERR(ar->firmware));
+ return PTR_ERR(ar->firmware);
+ }
+
+ data = ar->firmware->data;
+ len = ar->firmware->size;
+
+ /* magic also includes the null byte, check that as well */
+ magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
+
+ if (len < magic_len) {
+ ath10k_err("firmware image too small to contain magic: %zu\n",
+ len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
+ ath10k_err("Invalid firmware magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* jump over the padding */
+ magic_len = ALIGN(magic_len, 4);
+
+ len -= magic_len;
+ data += magic_len;
+
+ /* loop elements */
+ while (len > sizeof(struct ath10k_fw_ie)) {
+ hdr = (struct ath10k_fw_ie *)data;
+
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data += sizeof(*hdr);
+
+ if (len < ie_len) {
+ ath10k_err("Invalid length for FW IE %d (%zu < %zu)\n",
+ ie_id, len, ie_len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (ie_id) {
+ case ATH10K_FW_IE_FW_VERSION:
+ if (ie_len > sizeof(ar->hw->wiphy->fw_version) - 1)
+ break;
+
+ memcpy(ar->hw->wiphy->fw_version, data, ie_len);
+ ar->hw->wiphy->fw_version[ie_len] = '\0';
+
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "found fw version %s\n",
+ ar->hw->wiphy->fw_version);
+ break;
+ case ATH10K_FW_IE_TIMESTAMP:
+ if (ie_len != sizeof(u32))
+ break;
+
+ timestamp = (__le32 *)data;
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "found fw timestamp %d\n",
+ le32_to_cpup(timestamp));
+ break;
+ case ATH10K_FW_IE_FEATURES:
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "found firmware features ie (%zd B)\n",
+ ie_len);
+
+ for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
+ index = i / 8;
+ bit = i % 8;
+
+ if (index == ie_len)
+ break;
+
+ if (data[index] & (1 << bit))
+ __set_bit(i, ar->fw_features);
+ }
+
+ ath10k_dbg_dump(ATH10K_DBG_BOOT, "features", "",
+ ar->fw_features,
+ sizeof(ar->fw_features));
+ break;
+ case ATH10K_FW_IE_FW_IMAGE:
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "found fw image ie (%zd B)\n",
+ ie_len);
+
+ ar->firmware_data = data;
+ ar->firmware_len = ie_len;
+
+ break;
+ case ATH10K_FW_IE_OTP_IMAGE:
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "found otp image ie (%zd B)\n",
+ ie_len);
+
+ ar->otp_data = data;
+ ar->otp_len = ie_len;
+
+ break;
+ default:
+ ath10k_warn("Unknown FW IE: %u\n",
+ le32_to_cpu(hdr->id));
+ break;
+ }
+
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+ if (!ar->firmware_data || !ar->firmware_len) {
+ ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from %s, skipping\n",
+ name);
+ ret = -ENOMEDIUM;
+ goto err;
+ }
+
+ /* now fetch the board file */
+ if (ar->hw_params.fw.board == NULL) {
+ ath10k_err("board data file not defined");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ar->board = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.board);
+ if (IS_ERR(ar->board)) {
+ ret = PTR_ERR(ar->board);
+ ath10k_err("could not fetch board data (%d)\n", ret);
+ goto err;
+ }
+
+ ar->board_data = ar->board->data;
+ ar->board_len = ar->board->size;
+
return 0;
err:
@@ -396,6 +542,28 @@ err:
return ret;
}
+static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
+{
+ int ret;
+
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
+ if (ret == 0) {
+ ar->fw_api = 2;
+ goto out;
+ }
+
+ ret = ath10k_core_fetch_firmware_api_1(ar);
+ if (ret)
+ return ret;
+
+ ar->fw_api = 1;
+
+out:
+ ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
+
+ return 0;
+}
+
static int ath10k_init_download_firmware(struct ath10k *ar)
{
int ret;
@@ -446,6 +614,13 @@ static int ath10k_init_uart(struct ath10k *ar)
return ret;
}
+ /* Set the UART baud rate to 19200. */
+ ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200);
+ if (ret) {
+ ath10k_warn("could not set the baud rate (%d)\n", ret);
+ return ret;
+ }
+
ath10k_info("UART prints enabled\n");
return 0;
}
@@ -545,6 +720,9 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
skb_queue_head_init(&ar->offchan_tx_queue);
+ INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
+ skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+
init_waitqueue_head(&ar->event_queue);
INIT_WORK(&ar->restart_work, ath10k_core_restart);
@@ -559,6 +737,8 @@ EXPORT_SYMBOL(ath10k_core_create);
void ath10k_core_destroy(struct ath10k *ar)
{
+ ath10k_debug_destroy(ar);
+
flush_workqueue(ar->workqueue);
destroy_workqueue(ar->workqueue);
@@ -570,6 +750,8 @@ int ath10k_core_start(struct ath10k *ar)
{
int status;
+ lockdep_assert_held(&ar->conf_mutex);
+
ath10k_bmi_start(ar);
if (ath10k_init_configure_target(ar)) {
@@ -620,10 +802,6 @@ int ath10k_core_start(struct ath10k *ar)
ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version);
- status = ath10k_check_fw_version(ar);
- if (status)
- goto err_disconnect_htc;
-
status = ath10k_wmi_cmd_init(ar);
if (status) {
ath10k_err("could not send WMI init command (%d)\n", status);
@@ -641,7 +819,12 @@ int ath10k_core_start(struct ath10k *ar)
if (status)
goto err_disconnect_htc;
+ status = ath10k_debug_start(ar);
+ if (status)
+ goto err_disconnect_htc;
+
ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
+ INIT_LIST_HEAD(&ar->arvifs);
return 0;
@@ -658,6 +841,9 @@ EXPORT_SYMBOL(ath10k_core_start);
void ath10k_core_stop(struct ath10k *ar)
{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_debug_stop(ar);
ath10k_htc_stop(&ar->htc);
ath10k_htt_detach(&ar->htt);
ath10k_wmi_detach(ar);
@@ -704,23 +890,65 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
return ret;
}
+ mutex_lock(&ar->conf_mutex);
+
ret = ath10k_core_start(ar);
if (ret) {
ath10k_err("could not init core (%d)\n", ret);
ath10k_core_free_firmware_files(ar);
ath10k_hif_power_down(ar);
+ mutex_unlock(&ar->conf_mutex);
return ret;
}
ath10k_core_stop(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
ath10k_hif_power_down(ar);
return 0;
}
-int ath10k_core_register(struct ath10k *ar)
+static int ath10k_core_check_chip_id(struct ath10k *ar)
+{
+ u32 hw_revision = MS(ar->chip_id, SOC_CHIP_ID_REV);
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n",
+ ar->chip_id, hw_revision);
+
+ /* Check that we are not using hw1.0 (some of them have same pci id
+ * as hw2.0) before doing anything else as ath10k crashes horribly
+ * due to missing hw1.0 workarounds. */
+ switch (hw_revision) {
+ case QCA988X_HW_1_0_CHIP_ID_REV:
+ ath10k_err("ERROR: qca988x hw1.0 is not supported\n");
+ return -EOPNOTSUPP;
+
+ case QCA988X_HW_2_0_CHIP_ID_REV:
+ /* known hardware revision, continue normally */
+ return 0;
+
+ default:
+ ath10k_warn("Warning: hardware revision unknown (0x%x), expect problems\n",
+ ar->chip_id);
+ return 0;
+ }
+
+ return 0;
+}
+
+int ath10k_core_register(struct ath10k *ar, u32 chip_id)
{
int status;
+ ar->chip_id = chip_id;
+
+ status = ath10k_core_check_chip_id(ar);
+ if (status) {
+ ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
+ return status;
+ }
+
status = ath10k_core_probe_fw(ar);
if (status) {
ath10k_err("could not probe fw (%d)\n", status);
@@ -755,6 +983,7 @@ void ath10k_core_unregister(struct ath10k *ar)
* Otherwise we will fail to submit commands to FW and mac80211 will be
* unhappy about callback failures. */
ath10k_mac_unregister(ar);
+
ath10k_core_free_firmware_files(ar);
}
EXPORT_SYMBOL(ath10k_core_unregister);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index e4bba563ed42..0934f7633de3 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -43,27 +43,23 @@
/* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95
+#define ATH10K_MAX_NUM_MGMT_PENDING 16
+
struct ath10k;
struct ath10k_skb_cb {
dma_addr_t paddr;
bool is_mapped;
bool is_aborted;
+ u8 vdev_id;
struct {
- u8 vdev_id;
- u16 msdu_id;
u8 tid;
bool is_offchan;
- bool is_conf;
- bool discard;
- bool no_ack;
- u8 refcount;
- struct sk_buff *txfrag;
- struct sk_buff *msdu;
- } __packed htt;
- /* 4 bytes left on 64bit arch */
+ u8 frag_len;
+ u8 pad_len;
+ } __packed htt;
} __packed;
static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
@@ -108,15 +104,26 @@ struct ath10k_bmi {
bool done_sent;
};
+#define ATH10K_MAX_MEM_REQS 16
+
+struct ath10k_mem_chunk {
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 len;
+ u32 req_id;
+};
+
struct ath10k_wmi {
enum ath10k_htc_ep_id eid;
struct completion service_ready;
struct completion unified_ready;
- atomic_t pending_tx_count;
- wait_queue_head_t wq;
+ wait_queue_head_t tx_credits_wq;
+ struct wmi_cmd_map *cmd;
+ struct wmi_vdev_param_map *vdev_param;
+ struct wmi_pdev_param_map *pdev_param;
- struct sk_buff_head wmi_event_list;
- struct work_struct wmi_event_work;
+ u32 num_mem_chunks;
+ struct ath10k_mem_chunk mem_chunks[ATH10K_MAX_MEM_REQS];
};
struct ath10k_peer_stat {
@@ -198,17 +205,22 @@ struct ath10k_peer {
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
struct ath10k_vif {
+ struct list_head list;
+
u32 vdev_id;
enum wmi_vdev_type vdev_type;
enum wmi_vdev_subtype vdev_subtype;
u32 beacon_interval;
u32 dtim_period;
+ struct sk_buff *beacon;
struct ath10k *ar;
struct ieee80211_vif *vif;
+ struct work_struct wep_key_work;
struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
- u8 def_wep_key_index;
+ u8 def_wep_key_idx;
+ u8 def_wep_key_newidx;
u16 tx_seq_no;
@@ -246,6 +258,9 @@ struct ath10k_debug {
u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
struct completion event_stats_compl;
+
+ unsigned long htt_stats_mask;
+ struct delayed_work htt_stats_dwork;
};
enum ath10k_state {
@@ -270,12 +285,27 @@ enum ath10k_state {
ATH10K_STATE_WEDGED,
};
+enum ath10k_fw_features {
+ /* wmi_mgmt_rx_hdr contains extra RSSI information */
+ ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
+
+ /* firmware from 10X branch */
+ ATH10K_FW_FEATURE_WMI_10X = 1,
+
+ /* firmware support tx frame management over WMI, otherwise it's HTT */
+ ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX = 2,
+
+ /* keep last */
+ ATH10K_FW_FEATURE_COUNT,
+};
+
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
struct device *dev;
u8 mac_addr[ETH_ALEN];
+ u32 chip_id;
u32 target_version;
u8 fw_version_major;
u32 fw_version_minor;
@@ -288,6 +318,8 @@ struct ath10k {
u32 vht_cap_info;
u32 num_rf_chains;
+ DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
+
struct targetdef *targetdef;
struct hostdef *hostdef;
@@ -319,9 +351,19 @@ struct ath10k {
} fw;
} hw_params;
- const struct firmware *board_data;
+ const struct firmware *board;
+ const void *board_data;
+ size_t board_len;
+
const struct firmware *otp;
+ const void *otp_data;
+ size_t otp_len;
+
const struct firmware *firmware;
+ const void *firmware_data;
+ size_t firmware_len;
+
+ int fw_api;
struct {
struct completion started;
@@ -364,6 +406,7 @@ struct ath10k {
/* protects shared structure data */
spinlock_t data_lock;
+ struct list_head arvifs;
struct list_head peers;
wait_queue_head_t peer_mapping_wq;
@@ -372,6 +415,9 @@ struct ath10k {
struct completion offchan_tx_completed;
struct sk_buff *offchan_tx_skb;
+ struct work_struct wmi_mgmt_tx_work;
+ struct sk_buff_head wmi_mgmt_tx_queue;
+
enum ath10k_state state;
struct work_struct restart_work;
@@ -393,7 +439,7 @@ void ath10k_core_destroy(struct ath10k *ar);
int ath10k_core_start(struct ath10k *ar);
void ath10k_core_stop(struct ath10k *ar);
-int ath10k_core_register(struct ath10k *ar);
+int ath10k_core_register(struct ath10k *ar, u32 chip_id);
void ath10k_core_unregister(struct ath10k *ar);
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 3d65594fa098..760ff2289e3c 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -21,6 +21,9 @@
#include "core.h"
#include "debug.h"
+/* ms */
+#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
+
static int ath10k_printk(const char *level, const char *fmt, ...)
{
struct va_format vaf;
@@ -260,7 +263,6 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
}
spin_unlock_bh(&ar->data_lock);
- mutex_unlock(&ar->conf_mutex);
complete(&ar->debug.event_stats_compl);
}
@@ -499,6 +501,144 @@ static const struct file_operations fops_simulate_fw_crash = {
.llseek = default_llseek,
};
+static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned int len;
+ char buf[50];
+
+ len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_chip_id = {
+ .read = ath10k_read_chip_id,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath10k_debug_htt_stats_req(struct ath10k *ar)
+{
+ u64 cookie;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->debug.htt_stats_mask == 0)
+ /* htt stats are disabled */
+ return 0;
+
+ if (ar->state != ATH10K_STATE_ON)
+ return 0;
+
+ cookie = get_jiffies_64();
+
+ ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask,
+ cookie);
+ if (ret) {
+ ath10k_warn("failed to send htt stats request: %d\n", ret);
+ return ret;
+ }
+
+ queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork,
+ msecs_to_jiffies(ATH10K_DEBUG_HTT_STATS_INTERVAL));
+
+ return 0;
+}
+
+static void ath10k_debug_htt_stats_dwork(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k,
+ debug.htt_stats_dwork.work);
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_debug_htt_stats_req(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static ssize_t ath10k_read_htt_stats_mask(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_htt_stats_mask(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned long mask;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &mask);
+ if (ret)
+ return ret;
+
+ /* max 8 bit masks (for now) */
+ if (mask > 0xff)
+ return -E2BIG;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ar->debug.htt_stats_mask = mask;
+
+ ret = ath10k_debug_htt_stats_req(ar);
+ if (ret)
+ goto out;
+
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_htt_stats_mask = {
+ .read = ath10k_read_htt_stats_mask,
+ .write = ath10k_write_htt_stats_mask,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath10k_debug_start(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_debug_htt_stats_req(ar);
+ if (ret)
+ /* continue normally anyway, this isn't serious */
+ ath10k_warn("failed to start htt stats workqueue: %d\n", ret);
+
+ return 0;
+}
+
+void ath10k_debug_stop(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* Must not use _sync to avoid deadlock, we do that in
+ * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
+ * warning from del_timer(). */
+ if (ar->debug.htt_stats_mask != 0)
+ cancel_delayed_work(&ar->debug.htt_stats_dwork);
+}
+
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
@@ -507,6 +647,9 @@ int ath10k_debug_create(struct ath10k *ar)
if (!ar->debug.debugfs_phy)
return -ENOMEM;
+ INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
+ ath10k_debug_htt_stats_dwork);
+
init_completion(&ar->debug.event_stats_compl);
debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
@@ -518,8 +661,20 @@ int ath10k_debug_create(struct ath10k *ar)
debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_simulate_fw_crash);
+ debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_chip_id);
+
+ debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_htt_stats_mask);
+
return 0;
}
+
+void ath10k_debug_destroy(struct ath10k *ar)
+{
+ cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
+}
+
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 168140c54028..3cfe3ee90dbe 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -27,22 +27,26 @@ enum ath10k_debug_mask {
ATH10K_DBG_HTC = 0x00000004,
ATH10K_DBG_HTT = 0x00000008,
ATH10K_DBG_MAC = 0x00000010,
- ATH10K_DBG_CORE = 0x00000020,
+ ATH10K_DBG_BOOT = 0x00000020,
ATH10K_DBG_PCI_DUMP = 0x00000040,
ATH10K_DBG_HTT_DUMP = 0x00000080,
ATH10K_DBG_MGMT = 0x00000100,
ATH10K_DBG_DATA = 0x00000200,
+ ATH10K_DBG_BMI = 0x00000400,
ATH10K_DBG_ANY = 0xffffffff,
};
extern unsigned int ath10k_debug_mask;
-extern __printf(1, 2) int ath10k_info(const char *fmt, ...);
-extern __printf(1, 2) int ath10k_err(const char *fmt, ...);
-extern __printf(1, 2) int ath10k_warn(const char *fmt, ...);
+__printf(1, 2) int ath10k_info(const char *fmt, ...);
+__printf(1, 2) int ath10k_err(const char *fmt, ...);
+__printf(1, 2) int ath10k_warn(const char *fmt, ...);
#ifdef CONFIG_ATH10K_DEBUGFS
+int ath10k_debug_start(struct ath10k *ar);
+void ath10k_debug_stop(struct ath10k *ar);
int ath10k_debug_create(struct ath10k *ar);
+void ath10k_debug_destroy(struct ath10k *ar);
void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
size_t map_size);
@@ -50,11 +54,24 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev);
#else
+static inline int ath10k_debug_start(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_debug_stop(struct ath10k *ar)
+{
+}
+
static inline int ath10k_debug_create(struct ath10k *ar)
{
return 0;
}
+static inline void ath10k_debug_destroy(struct ath10k *ar)
+{
+}
+
static inline void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
size_t map_size)
@@ -68,7 +85,7 @@ static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
-extern __printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
+__printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
const char *fmt, ...);
void ath10k_dbg_dump(enum ath10k_debug_mask mask,
const char *msg, const char *prefix,
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index ef3329ef52f3..3118d7506734 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -103,10 +103,10 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
struct ath10k_htc_hdr *hdr;
hdr = (struct ath10k_htc_hdr *)skb->data;
- memset(hdr, 0, sizeof(*hdr));
hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
+ hdr->flags = 0;
spin_lock_bh(&ep->htc->tx_lock);
hdr->seq_no = ep->seq_no++;
@@ -117,134 +117,13 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
spin_unlock_bh(&ep->htc->tx_lock);
}
-static int ath10k_htc_issue_skb(struct ath10k_htc *htc,
- struct ath10k_htc_ep *ep,
- struct sk_buff *skb,
- u8 credits)
-{
- struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
- int ret;
-
- ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
- ep->eid, skb);
-
- ath10k_htc_prepare_tx_skb(ep, skb);
-
- ret = ath10k_skb_map(htc->ar->dev, skb);
- if (ret)
- goto err;
-
- ret = ath10k_hif_send_head(htc->ar,
- ep->ul_pipe_id,
- ep->eid,
- skb->len,
- skb);
- if (unlikely(ret))
- goto err;
-
- return 0;
-err:
- ath10k_warn("HTC issue failed: %d\n", ret);
-
- spin_lock_bh(&htc->tx_lock);
- ep->tx_credits += credits;
- spin_unlock_bh(&htc->tx_lock);
-
- /* this is the simplest way to handle out-of-resources for non-credit
- * based endpoints. credit based endpoints can still get -ENOSR, but
- * this is highly unlikely as credit reservation should prevent that */
- if (ret == -ENOSR) {
- spin_lock_bh(&htc->tx_lock);
- __skb_queue_head(&ep->tx_queue, skb);
- spin_unlock_bh(&htc->tx_lock);
-
- return ret;
- }
-
- skb_cb->is_aborted = true;
- ath10k_htc_notify_tx_completion(ep, skb);
-
- return ret;
-}
-
-static struct sk_buff *ath10k_htc_get_skb_credit_based(struct ath10k_htc *htc,
- struct ath10k_htc_ep *ep,
- u8 *credits)
-{
- struct sk_buff *skb;
- struct ath10k_skb_cb *skb_cb;
- int credits_required;
- int remainder;
- unsigned int transfer_len;
-
- lockdep_assert_held(&htc->tx_lock);
-
- skb = __skb_dequeue(&ep->tx_queue);
- if (!skb)
- return NULL;
-
- skb_cb = ATH10K_SKB_CB(skb);
- transfer_len = skb->len;
-
- if (likely(transfer_len <= htc->target_credit_size)) {
- credits_required = 1;
- } else {
- /* figure out how many credits this message requires */
- credits_required = transfer_len / htc->target_credit_size;
- remainder = transfer_len % htc->target_credit_size;
-
- if (remainder)
- credits_required++;
- }
-
- ath10k_dbg(ATH10K_DBG_HTC, "Credits required %d got %d\n",
- credits_required, ep->tx_credits);
-
- if (ep->tx_credits < credits_required) {
- __skb_queue_head(&ep->tx_queue, skb);
- return NULL;
- }
-
- ep->tx_credits -= credits_required;
- *credits = credits_required;
- return skb;
-}
-
-static void ath10k_htc_send_work(struct work_struct *work)
-{
- struct ath10k_htc_ep *ep = container_of(work,
- struct ath10k_htc_ep, send_work);
- struct ath10k_htc *htc = ep->htc;
- struct sk_buff *skb;
- u8 credits = 0;
- int ret;
-
- while (true) {
- if (ep->ul_is_polled)
- ath10k_htc_send_complete_check(ep, 0);
-
- spin_lock_bh(&htc->tx_lock);
- if (ep->tx_credit_flow_enabled)
- skb = ath10k_htc_get_skb_credit_based(htc, ep,
- &credits);
- else
- skb = __skb_dequeue(&ep->tx_queue);
- spin_unlock_bh(&htc->tx_lock);
-
- if (!skb)
- break;
-
- ret = ath10k_htc_issue_skb(htc, ep, skb, credits);
- if (ret == -ENOSR)
- break;
- }
-}
-
int ath10k_htc_send(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid,
struct sk_buff *skb)
{
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+ int credits = 0;
+ int ret;
if (htc->ar->state == ATH10K_STATE_WEDGED)
return -ECOMM;
@@ -254,18 +133,55 @@ int ath10k_htc_send(struct ath10k_htc *htc,
return -ENOENT;
}
+ /* FIXME: This looks ugly, can we fix it? */
spin_lock_bh(&htc->tx_lock);
if (htc->stopped) {
spin_unlock_bh(&htc->tx_lock);
return -ESHUTDOWN;
}
+ spin_unlock_bh(&htc->tx_lock);
- __skb_queue_tail(&ep->tx_queue, skb);
skb_push(skb, sizeof(struct ath10k_htc_hdr));
- spin_unlock_bh(&htc->tx_lock);
- queue_work(htc->ar->workqueue, &ep->send_work);
+ if (ep->tx_credit_flow_enabled) {
+ credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
+ spin_lock_bh(&htc->tx_lock);
+ if (ep->tx_credits < credits) {
+ spin_unlock_bh(&htc->tx_lock);
+ ret = -EAGAIN;
+ goto err_pull;
+ }
+ ep->tx_credits -= credits;
+ spin_unlock_bh(&htc->tx_lock);
+ }
+
+ ath10k_htc_prepare_tx_skb(ep, skb);
+
+ ret = ath10k_skb_map(htc->ar->dev, skb);
+ if (ret)
+ goto err_credits;
+
+ ret = ath10k_hif_send_head(htc->ar, ep->ul_pipe_id, ep->eid,
+ skb->len, skb);
+ if (ret)
+ goto err_unmap;
+
return 0;
+
+err_unmap:
+ ath10k_skb_unmap(htc->ar->dev, skb);
+err_credits:
+ if (ep->tx_credit_flow_enabled) {
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits += credits;
+ spin_unlock_bh(&htc->tx_lock);
+
+ if (ep->ep_ops.ep_tx_credits)
+ ep->ep_ops.ep_tx_credits(htc->ar);
+ }
+err_pull:
+ skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+ return ret;
}
static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
@@ -278,39 +194,9 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
ath10k_htc_notify_tx_completion(ep, skb);
/* the skb now belongs to the completion handler */
- /* note: when using TX credit flow, the re-checking of queues happens
- * when credits flow back from the target. in the non-TX credit case,
- * we recheck after the packet completes */
- spin_lock_bh(&htc->tx_lock);
- if (!ep->tx_credit_flow_enabled && !htc->stopped)
- queue_work(ar->workqueue, &ep->send_work);
- spin_unlock_bh(&htc->tx_lock);
-
return 0;
}
-/* flush endpoint TX queue */
-static void ath10k_htc_flush_endpoint_tx(struct ath10k_htc *htc,
- struct ath10k_htc_ep *ep)
-{
- struct sk_buff *skb;
- struct ath10k_skb_cb *skb_cb;
-
- spin_lock_bh(&htc->tx_lock);
- for (;;) {
- skb = __skb_dequeue(&ep->tx_queue);
- if (!skb)
- break;
-
- skb_cb = ATH10K_SKB_CB(skb);
- skb_cb->is_aborted = true;
- ath10k_htc_notify_tx_completion(ep, skb);
- }
- spin_unlock_bh(&htc->tx_lock);
-
- cancel_work_sync(&ep->send_work);
-}
-
/***********/
/* Receive */
/***********/
@@ -340,8 +226,11 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc,
ep = &htc->endpoint[report->eid];
ep->tx_credits += report->credits;
- if (ep->tx_credits && !skb_queue_empty(&ep->tx_queue))
- queue_work(htc->ar->workqueue, &ep->send_work);
+ if (ep->ep_ops.ep_tx_credits) {
+ spin_unlock_bh(&htc->tx_lock);
+ ep->ep_ops.ep_tx_credits(htc->ar);
+ spin_lock_bh(&htc->tx_lock);
+ }
}
spin_unlock_bh(&htc->tx_lock);
}
@@ -599,10 +488,8 @@ static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
ep->max_ep_message_len = 0;
ep->max_tx_queue_depth = 0;
ep->eid = i;
- skb_queue_head_init(&ep->tx_queue);
ep->htc = htc;
ep->tx_credit_flow_enabled = true;
- INIT_WORK(&ep->send_work, ath10k_htc_send_work);
}
}
@@ -752,8 +639,8 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
tx_alloc = ath10k_htc_get_credit_allocation(htc,
conn_req->service_id);
if (!tx_alloc)
- ath10k_dbg(ATH10K_DBG_HTC,
- "HTC Service %s does not allocate target credits\n",
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot htc service %s does not allocate target credits\n",
htc_service_name(conn_req->service_id));
skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
@@ -772,16 +659,16 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
- req_msg = &msg->connect_service;
- req_msg->flags = __cpu_to_le16(flags);
- req_msg->service_id = __cpu_to_le16(conn_req->service_id);
-
/* Only enable credit flow control for WMI ctrl service */
if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
disable_credit_flow_ctrl = true;
}
+ req_msg = &msg->connect_service;
+ req_msg->flags = __cpu_to_le16(flags);
+ req_msg->service_id = __cpu_to_le16(conn_req->service_id);
+
INIT_COMPLETION(htc->ctl_resp);
status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
@@ -873,19 +760,19 @@ setup:
if (status)
return status;
- ath10k_dbg(ATH10K_DBG_HTC,
- "HTC service: %s UL pipe: %d DL pipe: %d eid: %d ready\n",
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
htc_service_name(ep->service_id), ep->ul_pipe_id,
ep->dl_pipe_id, ep->eid);
- ath10k_dbg(ATH10K_DBG_HTC,
- "EP %d UL polled: %d, DL polled: %d\n",
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot htc ep %d ul polled %d dl polled %d\n",
ep->eid, ep->ul_is_polled, ep->dl_is_polled);
if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
ep->tx_credit_flow_enabled = false;
- ath10k_dbg(ATH10K_DBG_HTC,
- "HTC service: %s eid: %d TX flow control disabled\n",
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot htc service '%s' eid %d TX flow control disabled\n",
htc_service_name(ep->service_id), assigned_eid);
}
@@ -945,18 +832,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
*/
void ath10k_htc_stop(struct ath10k_htc *htc)
{
- int i;
- struct ath10k_htc_ep *ep;
-
spin_lock_bh(&htc->tx_lock);
htc->stopped = true;
spin_unlock_bh(&htc->tx_lock);
- for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
- ep = &htc->endpoint[i];
- ath10k_htc_flush_endpoint_tx(htc, ep);
- }
-
ath10k_hif_stop(htc->ar);
}
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index e1dd8c761853..4716d331e6b6 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -276,6 +276,7 @@ struct ath10k_htc_ops {
struct ath10k_htc_ep_ops {
void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
+ void (*ep_tx_credits)(struct ath10k *);
};
/* service connection information */
@@ -315,15 +316,11 @@ struct ath10k_htc_ep {
int ul_is_polled; /* call HIF to get tx completions */
int dl_is_polled; /* call HIF to fetch rx (not implemented) */
- struct sk_buff_head tx_queue;
-
u8 seq_no; /* for debugging */
int tx_credits;
int tx_credit_size;
int tx_credits_per_max_message;
bool tx_credit_flow_enabled;
-
- struct work_struct send_work;
};
struct ath10k_htc_svc_tx_credits {
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 39342c5cfcb2..5f7eeebc5432 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -104,21 +104,16 @@ err_htc_attach:
static int ath10k_htt_verify_version(struct ath10k_htt *htt)
{
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt target version %d.%d; host version %d.%d\n",
- htt->target_version_major,
- htt->target_version_minor,
- HTT_CURRENT_VERSION_MAJOR,
- HTT_CURRENT_VERSION_MINOR);
-
- if (htt->target_version_major != HTT_CURRENT_VERSION_MAJOR) {
- ath10k_err("htt major versions are incompatible!\n");
+ ath10k_info("htt target version %d.%d\n",
+ htt->target_version_major, htt->target_version_minor);
+
+ if (htt->target_version_major != 2 &&
+ htt->target_version_major != 3) {
+ ath10k_err("unsupported htt major version %d. supported versions are 2 and 3\n",
+ htt->target_version_major);
return -ENOTSUPP;
}
- if (htt->target_version_minor != HTT_CURRENT_VERSION_MINOR)
- ath10k_warn("htt minor version differ but still compatible\n");
-
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 318be4629cde..1a337e93b7e9 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -19,13 +19,11 @@
#define _HTT_H_
#include <linux/bug.h>
+#include <linux/interrupt.h>
#include "htc.h"
#include "rx_desc.h"
-#define HTT_CURRENT_VERSION_MAJOR 2
-#define HTT_CURRENT_VERSION_MINOR 1
-
enum htt_dbg_stats_type {
HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
HTT_DBG_STATS_RX_REORDER = 1 << 1,
@@ -45,6 +43,9 @@ enum htt_h2t_msg_type { /* host-to-target */
HTT_H2T_MSG_TYPE_SYNC = 4,
HTT_H2T_MSG_TYPE_AGGR_CFG = 5,
HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
+
+ /* This command is used for sending management frames in HTT < 3.0.
+ * HTT >= 3.0 uses TX_FRM for everything. */
HTT_H2T_MSG_TYPE_MGMT_TX = 7,
HTT_H2T_NUM_MSGS /* keep this last */
@@ -1268,6 +1269,7 @@ struct ath10k_htt {
/* set if host-fw communication goes haywire
* used to avoid further failures */
bool rx_confused;
+ struct tasklet_struct rx_replenish_task;
};
#define RX_HTT_HDR_STATUS_LEN 64
@@ -1308,6 +1310,10 @@ struct htt_rx_desc {
#define HTT_RX_BUF_SIZE 1920
#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
+/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
+ * aggregated traffic more nicely. */
+#define ATH10K_HTT_MAX_NUM_REFILL 16
+
/*
* DMA_MAP expects the buffer to be an integral number of cache lines.
* Rather than checking the actual cache line size, this code makes a
@@ -1327,6 +1333,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt);
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index e784c40b904b..90d4f74c28d7 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -20,6 +20,7 @@
#include "htt.h"
#include "txrx.h"
#include "debug.h"
+#include "trace.h"
#include <linux/log2.h>
@@ -40,6 +41,10 @@
/* when under memory pressure rx ring refill may fail and needs a retry */
#define HTT_RX_RING_REFILL_RETRY_MS 50
+
+static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
+
+
static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
{
int size;
@@ -177,10 +182,27 @@ static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
{
- int ret, num_to_fill;
+ int ret, num_deficit, num_to_fill;
+ /* Refilling the whole RX ring buffer proves to be a bad idea. The
+ * reason is RX may take up significant amount of CPU cycles and starve
+ * other tasks, e.g. TX on an ethernet device while acting as a bridge
+ * with ath10k wlan interface. This ended up with very poor performance
+ * once CPU the host system was overwhelmed with RX on ath10k.
+ *
+ * By limiting the number of refills the replenishing occurs
+ * progressively. This in turns makes use of the fact tasklets are
+ * processed in FIFO order. This means actual RX processing can starve
+ * out refilling. If there's not enough buffers on RX ring FW will not
+ * report RX until it is refilled with enough buffers. This
+ * automatically balances load wrt to CPU power.
+ *
+ * This probably comes at a cost of lower maximum throughput but
+ * improves the avarage and stability. */
spin_lock_bh(&htt->rx_ring.lock);
- num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
+ num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
+ num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
+ num_deficit -= num_to_fill;
ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
if (ret == -ENOMEM) {
/*
@@ -191,6 +213,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
*/
mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
+ } else if (num_deficit > 0) {
+ tasklet_schedule(&htt->rx_replenish_task);
}
spin_unlock_bh(&htt->rx_ring.lock);
}
@@ -212,6 +236,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt)
int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
del_timer_sync(&htt->rx_ring.refill_retry_timer);
+ tasklet_kill(&htt->rx_replenish_task);
while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
struct sk_buff *skb =
@@ -441,6 +466,12 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
return msdu_chaining;
}
+static void ath10k_htt_rx_replenish_task(unsigned long ptr)
+{
+ struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
+ ath10k_htt_rx_msdu_buff_replenish(htt);
+}
+
int ath10k_htt_rx_attach(struct ath10k_htt *htt)
{
dma_addr_t paddr;
@@ -501,7 +532,10 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
goto err_fill_ring;
- ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n",
+ tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
+ (unsigned long)htt);
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
htt->rx_ring.size, htt->rx_ring.fill_level);
return 0;
@@ -590,134 +624,144 @@ static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
return false;
}
-static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
- struct htt_rx_info *info)
+struct rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 snap_type;
+} __packed;
+
+struct amsdu_subframe_hdr {
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ __be16 len;
+} __packed;
+
+static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
+ struct htt_rx_info *info)
{
struct htt_rx_desc *rxd;
- struct sk_buff *amsdu;
struct sk_buff *first;
- struct ieee80211_hdr *hdr;
struct sk_buff *skb = info->skb;
enum rx_msdu_decap_format fmt;
enum htt_rx_mpdu_encrypt_type enctype;
+ struct ieee80211_hdr *hdr;
+ u8 hdr_buf[64], addr[ETH_ALEN], *qos;
unsigned int hdr_len;
- int crypto_len;
rxd = (void *)skb->data - sizeof(*rxd);
- fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
- RX_MSDU_START_INFO1_DECAP_FORMAT);
enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
- /* FIXME: No idea what assumptions are safe here. Need logs */
- if ((fmt == RX_MSDU_DECAP_RAW && skb->next) ||
- (fmt == RX_MSDU_DECAP_8023_SNAP_LLC)) {
- ath10k_htt_rx_free_msdu_chain(skb->next);
- skb->next = NULL;
- return -ENOTSUPP;
- }
+ hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(hdr_buf, hdr, hdr_len);
+ hdr = (struct ieee80211_hdr *)hdr_buf;
- /* A-MSDU max is a little less than 8K */
- amsdu = dev_alloc_skb(8*1024);
- if (!amsdu) {
- ath10k_warn("A-MSDU allocation failed\n");
- ath10k_htt_rx_free_msdu_chain(skb->next);
- skb->next = NULL;
- return -ENOMEM;
- }
-
- if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) {
- int hdrlen;
-
- hdr = (void *)rxd->rx_hdr_status;
- hdrlen = ieee80211_hdrlen(hdr->frame_control);
- memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen);
- }
+ /* FIXME: Hopefully this is a temporary measure.
+ *
+ * Reporting individual A-MSDU subframes means each reported frame
+ * shares the same sequence number.
+ *
+ * mac80211 drops frames it recognizes as duplicates, i.e.
+ * retransmission flag is set and sequence number matches sequence
+ * number from a previous frame (as per IEEE 802.11-2012: 9.3.2.10
+ * "Duplicate detection and recovery")
+ *
+ * To avoid frames being dropped clear retransmission flag for all
+ * received A-MSDUs.
+ *
+ * Worst case: actual duplicate frames will be reported but this should
+ * still be handled gracefully by other OSI/ISO layers. */
+ hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_RETRY);
first = skb;
while (skb) {
void *decap_hdr;
- int decap_len = 0;
+ int len;
rxd = (void *)skb->data - sizeof(*rxd);
fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
- RX_MSDU_START_INFO1_DECAP_FORMAT);
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
decap_hdr = (void *)rxd->rx_hdr_status;
- if (skb == first) {
- /* We receive linked A-MSDU subframe skbuffs. The
- * first one contains the original 802.11 header (and
- * possible crypto param) in the RX descriptor. The
- * A-MSDU subframe header follows that. Each part is
- * aligned to 4 byte boundary. */
-
- hdr = (void *)amsdu->data;
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
- crypto_len = ath10k_htt_rx_crypto_param_len(enctype);
-
- decap_hdr += roundup(hdr_len, 4);
- decap_hdr += roundup(crypto_len, 4);
- }
+ skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
- if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
- /* Ethernet2 decap inserts ethernet header in place of
- * A-MSDU subframe header. */
- skb_pull(skb, 6 + 6 + 2);
-
- /* A-MSDU subframe header length */
- decap_len += 6 + 6 + 2;
-
- /* Ethernet2 decap also strips the LLC/SNAP so we need
- * to re-insert it. The LLC/SNAP follows A-MSDU
- * subframe header. */
- /* FIXME: Not all LLCs are 8 bytes long */
- decap_len += 8;
-
- memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
+ /* First frame in an A-MSDU chain has more decapped data. */
+ if (skb == first) {
+ len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
+ len += round_up(ath10k_htt_rx_crypto_param_len(enctype),
+ 4);
+ decap_hdr += len;
}
- if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) {
- /* Native Wifi decap inserts regular 802.11 header
- * in place of A-MSDU subframe header. */
+ switch (fmt) {
+ case RX_MSDU_DECAP_RAW:
+ /* remove trailing FCS */
+ skb_trim(skb, skb->len - FCS_LEN);
+ break;
+ case RX_MSDU_DECAP_NATIVE_WIFI:
+ /* pull decapped header and copy DA */
hdr = (struct ieee80211_hdr *)skb->data;
- skb_pull(skb, ieee80211_hdrlen(hdr->frame_control));
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
+ skb_pull(skb, hdr_len);
- /* A-MSDU subframe header length */
- decap_len += 6 + 6 + 2;
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)hdr_buf;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
- memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
- }
+ /* original A-MSDU header has the bit set but we're
+ * not including A-MSDU subframe header */
+ hdr = (struct ieee80211_hdr *)skb->data;
+ qos = ieee80211_get_qos_ctl(hdr);
+ qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
- if (fmt == RX_MSDU_DECAP_RAW)
- skb_trim(skb, skb->len - 4); /* remove FCS */
+ /* original 802.11 header has a different DA */
+ memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN);
+ break;
+ case RX_MSDU_DECAP_ETHERNET2_DIX:
+ /* strip ethernet header and insert decapped 802.11
+ * header, amsdu subframe header and rfc1042 header */
- memcpy(skb_put(amsdu, skb->len), skb->data, skb->len);
+ len = 0;
+ len += sizeof(struct rfc1042_hdr);
+ len += sizeof(struct amsdu_subframe_hdr);
- /* A-MSDU subframes are padded to 4bytes
- * but relative to first subframe, not the whole MPDU */
- if (skb->next && ((decap_len + skb->len) & 3)) {
- int padlen = 4 - ((decap_len + skb->len) & 3);
- memset(skb_put(amsdu, padlen), 0, padlen);
+ skb_pull(skb, sizeof(struct ethhdr));
+ memcpy(skb_push(skb, len), decap_hdr, len);
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ break;
+ case RX_MSDU_DECAP_8023_SNAP_LLC:
+ /* insert decapped 802.11 header making a singly
+ * A-MSDU */
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ break;
}
+ info->skb = skb;
+ info->encrypt_type = enctype;
skb = skb->next;
- }
+ info->skb->next = NULL;
- info->skb = amsdu;
- info->encrypt_type = enctype;
-
- ath10k_htt_rx_free_msdu_chain(first);
+ ath10k_process_rx(htt->ar, info);
+ }
- return 0;
+ /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
+ * monitor interface active for sniffing purposes. */
}
-static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
+static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
{
struct sk_buff *skb = info->skb;
struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr;
enum rx_msdu_decap_format fmt;
enum htt_rx_mpdu_encrypt_type enctype;
+ int hdr_len;
+ void *rfc1042;
/* This shouldn't happen. If it does than it may be a FW bug. */
if (skb->next) {
@@ -731,49 +775,53 @@ static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
RX_MSDU_START_INFO1_DECAP_FORMAT);
enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
- hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
+ hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
switch (fmt) {
case RX_MSDU_DECAP_RAW:
/* remove trailing FCS */
- skb_trim(skb, skb->len - 4);
+ skb_trim(skb, skb->len - FCS_LEN);
break;
case RX_MSDU_DECAP_NATIVE_WIFI:
- /* nothing to do here */
+ /* Pull decapped header */
+ hdr = (struct ieee80211_hdr *)skb->data;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ skb_pull(skb, hdr_len);
+
+ /* Push original header */
+ hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
break;
case RX_MSDU_DECAP_ETHERNET2_DIX:
- /* macaddr[6] + macaddr[6] + ethertype[2] */
- skb_pull(skb, 6 + 6 + 2);
- break;
- case RX_MSDU_DECAP_8023_SNAP_LLC:
- /* macaddr[6] + macaddr[6] + len[2] */
- /* we don't need this for non-A-MSDU */
- skb_pull(skb, 6 + 6 + 2);
- break;
- }
+ /* strip ethernet header and insert decapped 802.11 header and
+ * rfc1042 header */
- if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
- void *llc;
- int llclen;
+ rfc1042 = hdr;
+ rfc1042 += roundup(hdr_len, 4);
+ rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
- llclen = 8;
- llc = hdr;
- llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4);
- llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
-
- skb_push(skb, llclen);
- memcpy(skb->data, llc, llclen);
- }
+ skb_pull(skb, sizeof(struct ethhdr));
+ memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
+ rfc1042, sizeof(struct rfc1042_hdr));
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ break;
+ case RX_MSDU_DECAP_8023_SNAP_LLC:
+ /* remove A-MSDU subframe header and insert
+ * decapped 802.11 header. rfc1042 header is already there */
- if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) {
- int len = ieee80211_hdrlen(hdr->frame_control);
- skb_push(skb, len);
- memcpy(skb->data, hdr, len);
+ skb_pull(skb, sizeof(struct amsdu_subframe_hdr));
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ break;
}
info->skb = skb;
info->encrypt_type = enctype;
- return 0;
+
+ ath10k_process_rx(htt->ar, info);
}
static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
@@ -845,8 +893,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
int fw_desc_len;
u8 *fw_desc;
int i, j;
- int ret;
- int ip_summed;
memset(&info, 0, sizeof(info));
@@ -921,11 +967,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
continue;
}
- /* The skb is not yet processed and it may be
- * reallocated. Since the offload is in the original
- * skb extract the checksum now and assign it later */
- ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
-
info.skb = msdu_head;
info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
@@ -938,28 +979,13 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
if (ath10k_htt_rx_hdr_is_amsdu(hdr))
- ret = ath10k_htt_rx_amsdu(htt, &info);
+ ath10k_htt_rx_amsdu(htt, &info);
else
- ret = ath10k_htt_rx_msdu(htt, &info);
-
- if (ret && !info.fcs_err) {
- ath10k_warn("error processing msdus %d\n", ret);
- dev_kfree_skb_any(info.skb);
- continue;
- }
-
- if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
- ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
-
- info.skb->ip_summed = ip_summed;
-
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
- info.skb->data, info.skb->len);
- ath10k_process_rx(htt->ar, &info);
+ ath10k_htt_rx_msdu(htt, &info);
}
}
- ath10k_htt_rx_msdu_buff_replenish(htt);
+ tasklet_schedule(&htt->rx_replenish_task);
}
static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
@@ -1131,7 +1157,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
- ath10k_txrx_tx_completed(htt, &tx_done);
+ ath10k_txrx_tx_unref(htt, &tx_done);
break;
}
case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
@@ -1165,7 +1191,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
msdu_id = resp->data_tx_completion.msdus[i];
tx_done.msdu_id = __le16_to_cpu(msdu_id);
- ath10k_txrx_tx_completed(htt, &tx_done);
+ ath10k_txrx_tx_unref(htt, &tx_done);
}
break;
}
@@ -1190,8 +1216,10 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
case HTT_T2H_MSG_TYPE_TEST:
/* FIX THIS */
break;
- case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
case HTT_T2H_MSG_TYPE_STATS_CONF:
+ trace_ath10k_htt_stats(skb->data, skb->len);
+ break;
+ case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
case HTT_T2H_MSG_TYPE_RX_ADDBA:
case HTT_T2H_MSG_TYPE_RX_DELBA:
case HTT_T2H_MSG_TYPE_RX_FLUSH:
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 656c2546b294..d9335e9d0d04 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -96,7 +96,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
pipe);
- ath10k_dbg(ATH10K_DBG_HTT, "htt tx max num pending tx %d\n",
+ ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
@@ -117,7 +117,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
{
- struct sk_buff *txdesc;
+ struct htt_tx_done tx_done = {0};
int msdu_id;
/* No locks needed. Called after communication with the device has
@@ -127,18 +127,13 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
if (!test_bit(msdu_id, htt->used_msdu_ids))
continue;
- txdesc = htt->pending_tx[msdu_id];
- if (!txdesc)
- continue;
-
ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
msdu_id);
- if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
- ATH10K_SKB_CB(txdesc)->htt.refcount = 1;
+ tx_done.discard = 1;
+ tx_done.msdu_id = msdu_id;
- ATH10K_SKB_CB(txdesc)->htt.discard = true;
- ath10k_txrx_tx_unref(htt, txdesc);
+ ath10k_txrx_tx_unref(htt, &tx_done);
}
}
@@ -152,26 +147,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt)
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
- struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
- struct ath10k_htt *htt = &ar->htt;
-
- if (skb_cb->htt.is_conf) {
- dev_kfree_skb_any(skb);
- return;
- }
-
- if (skb_cb->is_aborted) {
- skb_cb->htt.discard = true;
-
- /* if the skbuff is aborted we need to make sure we'll free up
- * the tx resources, we can't simply run tx_unref() 2 times
- * because if htt tx completion came in earlier we'd access
- * unallocated memory */
- if (skb_cb->htt.refcount > 1)
- skb_cb->htt.refcount = 1;
- }
-
- ath10k_txrx_tx_unref(htt, skb);
+ dev_kfree_skb_any(skb);
}
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
@@ -192,10 +168,48 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
cmd = (struct htt_cmd *)skb->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
- ATH10K_SKB_CB(skb)->htt.is_conf = true;
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
+{
+ struct htt_stats_req *req;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ int len = 0, ret;
+
+ len += sizeof(cmd->hdr);
+ len += sizeof(cmd->stats_req);
+
+ skb = ath10k_htc_alloc_skb(len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
+
+ req = &cmd->stats_req;
+
+ memset(req, 0, sizeof(*req));
+
+ /* currently we support only max 8 bit masks so no need to worry
+ * about endian support */
+ req->upload_types[0] = mask;
+ req->reset_types[0] = mask;
+ req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
+ req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
+ req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
+ ath10k_warn("failed to send htt type stats request: %d", ret);
dev_kfree_skb_any(skb);
return ret;
}
@@ -279,8 +293,6 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
#undef desc_offset
- ATH10K_SKB_CB(skb)->htt.is_conf = true;
-
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
@@ -293,10 +305,10 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
struct device *dev = htt->ar->dev;
- struct ath10k_skb_cb *skb_cb;
struct sk_buff *txdesc = NULL;
struct htt_cmd *cmd;
- u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+ u8 vdev_id = skb_cb->vdev_id;
int len = 0;
int msdu_id = -1;
int res;
@@ -304,30 +316,30 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
res = ath10k_htt_tx_inc_pending(htt);
if (res)
- return res;
+ goto err;
len += sizeof(cmd->hdr);
len += sizeof(cmd->mgmt_tx);
- txdesc = ath10k_htc_alloc_skb(len);
- if (!txdesc) {
- res = -ENOMEM;
- goto err;
- }
-
spin_lock_bh(&htt->tx_lock);
- msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
- if (msdu_id < 0) {
+ res = ath10k_htt_tx_alloc_msdu_id(htt);
+ if (res < 0) {
spin_unlock_bh(&htt->tx_lock);
- res = msdu_id;
- goto err;
+ goto err_tx_dec;
}
- htt->pending_tx[msdu_id] = txdesc;
+ msdu_id = res;
+ htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock);
+ txdesc = ath10k_htc_alloc_skb(len);
+ if (!txdesc) {
+ res = -ENOMEM;
+ goto err_free_msdu_id;
+ }
+
res = ath10k_skb_map(dev, msdu);
if (res)
- goto err;
+ goto err_free_txdesc;
skb_put(txdesc, len);
cmd = (struct htt_cmd *)txdesc->data;
@@ -339,31 +351,27 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
memcpy(cmd->mgmt_tx.hdr, msdu->data,
min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
- /* refcount is decremented by HTC and HTT completions until it reaches
- * zero and is freed */
- skb_cb = ATH10K_SKB_CB(txdesc);
- skb_cb->htt.msdu_id = msdu_id;
- skb_cb->htt.refcount = 2;
- skb_cb->htt.msdu = msdu;
+ skb_cb->htt.frag_len = 0;
+ skb_cb->htt.pad_len = 0;
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
- goto err;
+ goto err_unmap_msdu;
return 0;
-err:
+err_unmap_msdu:
ath10k_skb_unmap(dev, msdu);
-
- if (txdesc)
- dev_kfree_skb_any(txdesc);
- if (msdu_id >= 0) {
- spin_lock_bh(&htt->tx_lock);
- htt->pending_tx[msdu_id] = NULL;
- ath10k_htt_tx_free_msdu_id(htt, msdu_id);
- spin_unlock_bh(&htt->tx_lock);
- }
+err_free_txdesc:
+ dev_kfree_skb_any(txdesc);
+err_free_msdu_id:
+ spin_lock_bh(&htt->tx_lock);
+ htt->pending_tx[msdu_id] = NULL;
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
+err_tx_dec:
ath10k_htt_tx_dec_pending(htt);
+err:
return res;
}
@@ -373,13 +381,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
struct htt_cmd *cmd;
struct htt_data_tx_desc_frag *tx_frags;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
- struct ath10k_skb_cb *skb_cb;
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct sk_buff *txdesc = NULL;
- struct sk_buff *txfrag = NULL;
- u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
+ bool use_frags;
+ u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id;
u8 tid;
- int prefetch_len, desc_len, frag_len;
- dma_addr_t frags_paddr;
+ int prefetch_len, desc_len;
int msdu_id = -1;
int res;
u8 flags0;
@@ -387,69 +394,82 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
res = ath10k_htt_tx_inc_pending(htt);
if (res)
- return res;
+ goto err;
+
+ spin_lock_bh(&htt->tx_lock);
+ res = ath10k_htt_tx_alloc_msdu_id(htt);
+ if (res < 0) {
+ spin_unlock_bh(&htt->tx_lock);
+ goto err_tx_dec;
+ }
+ msdu_id = res;
+ htt->pending_tx[msdu_id] = msdu;
+ spin_unlock_bh(&htt->tx_lock);
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
- frag_len = sizeof(*tx_frags) * 2;
txdesc = ath10k_htc_alloc_skb(desc_len);
if (!txdesc) {
res = -ENOMEM;
- goto err;
+ goto err_free_msdu_id;
}
- txfrag = dev_alloc_skb(frag_len);
- if (!txfrag) {
- res = -ENOMEM;
- goto err;
- }
+ /* Since HTT 3.0 there is no separate mgmt tx command. However in case
+ * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
+ * fragment list host driver specifies directly frame pointer. */
+ use_frags = htt->target_version_major < 3 ||
+ !ieee80211_is_mgmt(hdr->frame_control);
if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
ath10k_warn("htt alignment check failed. dropping packet.\n");
res = -EIO;
- goto err;
+ goto err_free_txdesc;
}
- spin_lock_bh(&htt->tx_lock);
- msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
- if (msdu_id < 0) {
- spin_unlock_bh(&htt->tx_lock);
- res = msdu_id;
- goto err;
+ if (use_frags) {
+ skb_cb->htt.frag_len = sizeof(*tx_frags) * 2;
+ skb_cb->htt.pad_len = (unsigned long)msdu->data -
+ round_down((unsigned long)msdu->data, 4);
+
+ skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
+ } else {
+ skb_cb->htt.frag_len = 0;
+ skb_cb->htt.pad_len = 0;
}
- htt->pending_tx[msdu_id] = txdesc;
- spin_unlock_bh(&htt->tx_lock);
res = ath10k_skb_map(dev, msdu);
if (res)
- goto err;
-
- /* tx fragment list must be terminated with zero-entry */
- skb_put(txfrag, frag_len);
- tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data;
- tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
- tx_frags[0].len = __cpu_to_le32(msdu->len);
- tx_frags[1].paddr = __cpu_to_le32(0);
- tx_frags[1].len = __cpu_to_le32(0);
-
- res = ath10k_skb_map(dev, txfrag);
- if (res)
- goto err;
+ goto err_pull_txfrag;
+
+ if (use_frags) {
+ dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
+ DMA_TO_DEVICE);
+
+ /* tx fragment list must be terminated with zero-entry */
+ tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
+ tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
+ skb_cb->htt.frag_len +
+ skb_cb->htt.pad_len);
+ tx_frags[0].len = __cpu_to_le32(msdu->len -
+ skb_cb->htt.frag_len -
+ skb_cb->htt.pad_len);
+ tx_frags[1].paddr = __cpu_to_le32(0);
+ tx_frags[1].len = __cpu_to_le32(0);
+
+ dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
+ DMA_TO_DEVICE);
+ }
- ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n",
- (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr,
+ ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n",
(unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ",
- txfrag->data, frag_len);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
msdu->data, msdu->len);
skb_put(txdesc, desc_len);
cmd = (struct htt_cmd *)txdesc->data;
- memset(cmd, 0, desc_len);
tid = ATH10K_SKB_CB(msdu)->htt.tid;
@@ -459,8 +479,13 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
if (!ieee80211_has_protected(hdr->frame_control))
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
- flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
- HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+
+ if (use_frags)
+ flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ else
+ flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
flags1 = 0;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
@@ -468,45 +493,37 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
- frags_paddr = ATH10K_SKB_CB(txfrag)->paddr;
-
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
cmd->data_tx.flags0 = flags0;
cmd->data_tx.flags1 = __cpu_to_le16(flags1);
- cmd->data_tx.len = __cpu_to_le16(msdu->len);
+ cmd->data_tx.len = __cpu_to_le16(msdu->len -
+ skb_cb->htt.frag_len -
+ skb_cb->htt.pad_len);
cmd->data_tx.id = __cpu_to_le16(msdu_id);
- cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr);
+ cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr);
cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
- memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len);
-
- /* refcount is decremented by HTC and HTT completions until it reaches
- * zero and is freed */
- skb_cb = ATH10K_SKB_CB(txdesc);
- skb_cb->htt.msdu_id = msdu_id;
- skb_cb->htt.refcount = 2;
- skb_cb->htt.txfrag = txfrag;
- skb_cb->htt.msdu = msdu;
+ memcpy(cmd->data_tx.prefetch, hdr, prefetch_len);
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
- goto err;
+ goto err_unmap_msdu;
return 0;
-err:
- if (txfrag)
- ath10k_skb_unmap(dev, txfrag);
- if (txdesc)
- dev_kfree_skb_any(txdesc);
- if (txfrag)
- dev_kfree_skb_any(txfrag);
- if (msdu_id >= 0) {
- spin_lock_bh(&htt->tx_lock);
- htt->pending_tx[msdu_id] = NULL;
- ath10k_htt_tx_free_msdu_id(htt, msdu_id);
- spin_unlock_bh(&htt->tx_lock);
- }
- ath10k_htt_tx_dec_pending(htt);
+
+err_unmap_msdu:
ath10k_skb_unmap(dev, msdu);
+err_pull_txfrag:
+ skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
+err_free_txdesc:
+ dev_kfree_skb_any(txdesc);
+err_free_msdu_id:
+ spin_lock_bh(&htt->tx_lock);
+ htt->pending_tx[msdu_id] = NULL;
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
+err_tx_dec:
+ ath10k_htt_tx_dec_pending(htt);
+err:
return res;
}
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 44ed5af0a204..8aeb46d9b534 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -20,28 +20,37 @@
#include "targaddrs.h"
-/* Supported FW version */
-#define SUPPORTED_FW_MAJOR 1
-#define SUPPORTED_FW_MINOR 0
-#define SUPPORTED_FW_RELEASE 0
-#define SUPPORTED_FW_BUILD 629
-
-/* QCA988X 1.0 definitions */
-#define QCA988X_HW_1_0_VERSION 0x4000002c
-#define QCA988X_HW_1_0_FW_DIR "ath10k/QCA988X/hw1.0"
-#define QCA988X_HW_1_0_FW_FILE "firmware.bin"
-#define QCA988X_HW_1_0_OTP_FILE "otp.bin"
-#define QCA988X_HW_1_0_BOARD_DATA_FILE "board.bin"
-#define QCA988X_HW_1_0_PATCH_LOAD_ADDR 0x1234
+/* QCA988X 1.0 definitions (unsupported) */
+#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
/* QCA988X 2.0 definitions */
#define QCA988X_HW_2_0_VERSION 0x4100016c
+#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
+#define ATH10K_FW_API2_FILE "firmware-2.bin"
+
+/* includes also the null byte */
+#define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K"
+
+struct ath10k_fw_ie {
+ __le32 id;
+ __le32 len;
+ u8 data[0];
+};
+
+enum ath10k_fw_ie_type {
+ ATH10K_FW_IE_FW_VERSION = 0,
+ ATH10K_FW_IE_TIMESTAMP = 1,
+ ATH10K_FW_IE_FEATURES = 2,
+ ATH10K_FW_IE_FW_IMAGE = 3,
+ ATH10K_FW_IE_OTP_IMAGE = 4,
+};
+
/* Known pecularities:
* - current FW doesn't support raw rx mode (last tested v599)
* - current FW dumps upon raw tx mode (last tested v599)
@@ -53,6 +62,9 @@ enum ath10k_hw_txrx_mode {
ATH10K_HW_TXRX_RAW = 0,
ATH10K_HW_TXRX_NATIVE_WIFI = 1,
ATH10K_HW_TXRX_ETHERNET = 2,
+
+ /* Valid for HTT >= 3.0. Used for management frames in TX_FRM. */
+ ATH10K_HW_TXRX_MGMT = 3,
};
enum ath10k_mcast2ucast_mode {
@@ -60,6 +72,7 @@ enum ath10k_mcast2ucast_mode {
ATH10K_MCAST2UCAST_ENABLED = 1,
};
+/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
#define TARGET_NUM_WDS_ENTRIES 32
@@ -75,7 +88,11 @@ enum ath10k_mcast2ucast_mode {
#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_RX_TIMEOUT_LO_PRI 100
#define TARGET_RX_TIMEOUT_HI_PRI 40
-#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_ETHERNET
+
+/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and
+ * avoid a very expensive re-alignment in mac80211. */
+#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
+
#define TARGET_SCAN_MAX_PENDING_REQS 4
#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
@@ -90,6 +107,36 @@ enum ath10k_mcast2ucast_mode {
#define TARGET_NUM_MSDU_DESC (1024 + 400)
#define TARGET_MAX_FRAG_ENTRIES 0
+/* Target specific defines for 10.X firmware */
+#define TARGET_10X_NUM_VDEVS 16
+#define TARGET_10X_NUM_PEER_AST 2
+#define TARGET_10X_NUM_WDS_ENTRIES 32
+#define TARGET_10X_DMA_BURST_SIZE 0
+#define TARGET_10X_MAC_AGGR_DELIM 0
+#define TARGET_10X_AST_SKID_LIMIT 16
+#define TARGET_10X_NUM_PEERS (128 + (TARGET_10X_NUM_VDEVS))
+#define TARGET_10X_NUM_OFFLOAD_PEERS 0
+#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0
+#define TARGET_10X_NUM_PEER_KEYS 2
+#define TARGET_10X_NUM_TIDS 256
+#define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_10X_RX_TIMEOUT_LO_PRI 100
+#define TARGET_10X_RX_TIMEOUT_HI_PRI 40
+#define TARGET_10X_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
+#define TARGET_10X_SCAN_MAX_PENDING_REQS 4
+#define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV 2
+#define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV 2
+#define TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES 8
+#define TARGET_10X_GTK_OFFLOAD_MAX_VDEV 3
+#define TARGET_10X_NUM_MCAST_GROUPS 0
+#define TARGET_10X_NUM_MCAST_TABLE_ELEMS 0
+#define TARGET_10X_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED
+#define TARGET_10X_TX_DBG_LOG_SIZE 1024
+#define TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_10X_VOW_CONFIG 0
+#define TARGET_10X_NUM_MSDU_DESC (1024 + 400)
+#define TARGET_10X_MAX_FRAG_ENTRIES 0
/* Number of Copy Engines supported */
#define CE_COUNT 8
@@ -169,6 +216,10 @@ enum ath10k_mcast2ucast_mode {
#define SOC_LPO_CAL_ENABLE_LSB 20
#define SOC_LPO_CAL_ENABLE_MASK 0x00100000
+#define SOC_CHIP_ID_ADDRESS 0x000000ec
+#define SOC_CHIP_ID_REV_LSB 8
+#define SOC_CHIP_ID_REV_MASK 0x00000f00
+
#define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008
#define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004
#define WLAN_SYSTEM_SLEEP_DISABLE_LSB 0
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index cf2ba4d850c9..0b1cc516e778 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -334,25 +334,29 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+
if (value != 0xFFFFFFFF)
value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold,
ATH10K_RTS_MAX);
- return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
- WMI_VDEV_PARAM_RTS_THRESHOLD,
- value);
+ vdev_param = ar->wmi.vdev_param->rts_threshold;
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
}
static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+
if (value != 0xFFFFFFFF)
value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
ATH10K_FRAGMT_THRESHOLD_MIN,
ATH10K_FRAGMT_THRESHOLD_MAX);
- return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
- WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
- value);
+ vdev_param = ar->wmi.vdev_param->fragmentation_threshold;
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
}
static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
@@ -460,6 +464,11 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
arg.ssid_len = arvif->vif->bss_conf.ssid_len;
}
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d start center_freq %d phymode %s\n",
+ arg.vdev_id, arg.channel.freq,
+ ath10k_wmi_phymode_str(arg.channel.mode));
+
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
ath10k_warn("WMI vdev start failed: ret %d\n", ret);
@@ -503,13 +512,10 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
{
struct ieee80211_channel *channel = ar->hw->conf.chandef.chan;
struct wmi_vdev_start_request_arg arg = {};
- enum nl80211_channel_type type;
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- type = cfg80211_get_chandef_type(&ar->hw->conf.chandef);
-
arg.vdev_id = vdev_id;
arg.channel.freq = channel->center_freq;
arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1;
@@ -560,12 +566,9 @@ static int ath10k_monitor_stop(struct ath10k *ar)
lockdep_assert_held(&ar->conf_mutex);
- /* For some reasons, ath10k_wmi_vdev_down() here couse
- * often ath10k_wmi_vdev_stop() to fail. Next we could
- * not run monitor vdev and driver reload
- * required. Don't see such problems we skip
- * ath10k_wmi_vdev_down() here.
- */
+ ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
+ if (ret)
+ ath10k_warn("Monitor vdev down failed: %d\n", ret);
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
@@ -607,7 +610,7 @@ static int ath10k_monitor_create(struct ath10k *ar)
goto vdev_fail;
}
- ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface created, vdev id: %d\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
ar->monitor_vdev_id);
ar->monitor_present = true;
@@ -639,7 +642,7 @@ static int ath10k_monitor_destroy(struct ath10k *ar)
ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
ar->monitor_present = false;
- ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface destroyed, vdev id: %d\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
ar->monitor_vdev_id);
return ret;
}
@@ -668,13 +671,14 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
arvif->vdev_id);
return;
}
- ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d up\n", arvif->vdev_id);
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
}
static void ath10k_control_ibss(struct ath10k_vif *arvif,
struct ieee80211_bss_conf *info,
const u8 self_peer[ETH_ALEN])
{
+ u32 vdev_param;
int ret = 0;
lockdep_assert_held(&arvif->ar->conf_mutex);
@@ -708,8 +712,8 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
return;
}
- ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
- WMI_VDEV_PARAM_ATIM_WINDOW,
+ vdev_param = arvif->ar->wmi.vdev_param->atim_window;
+ ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
ATH10K_DEFAULT_ATIM);
if (ret)
ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n",
@@ -719,47 +723,45 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
/*
* Review this when mac80211 gains per-interface powersave support.
*/
-static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
{
- struct ath10k_generic_iter *ar_iter = data;
- struct ieee80211_conf *conf = &ar_iter->ar->hw->conf;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_conf *conf = &ar->hw->conf;
enum wmi_sta_powersave_param param;
enum wmi_sta_ps_mode psmode;
int ret;
lockdep_assert_held(&arvif->ar->conf_mutex);
- if (vif->type != NL80211_IFTYPE_STATION)
- return;
+ if (arvif->vif->type != NL80211_IFTYPE_STATION)
+ return 0;
if (conf->flags & IEEE80211_CONF_PS) {
psmode = WMI_STA_PS_MODE_ENABLED;
param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
- ret = ath10k_wmi_set_sta_ps_param(ar_iter->ar,
- arvif->vdev_id,
- param,
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
conf->dynamic_ps_timeout);
if (ret) {
ath10k_warn("Failed to set inactivity time for VDEV: %d\n",
arvif->vdev_id);
- return;
+ return ret;
}
-
- ar_iter->ret = ret;
} else {
psmode = WMI_STA_PS_MODE_DISABLED;
}
- ar_iter->ret = ath10k_wmi_set_psmode(ar_iter->ar, arvif->vdev_id,
- psmode);
- if (ar_iter->ret)
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
+ arvif->vdev_id, psmode ? "enable" : "disable");
+
+ ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
+ if (ret) {
ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n",
psmode, arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC, "Set PS Mode: %d for VDEV: %d\n",
- psmode, arvif->vdev_id);
+ return ret;
+ }
+
+ return 0;
}
/**********************/
@@ -949,7 +951,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
arg->peer_ht_rates.num_rates = n;
arg->peer_num_spatial_streams = max((n+7) / 8, 1);
- ath10k_dbg(ATH10K_DBG_MAC, "mcs cnt %d nss %d\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
+ arg->addr,
arg->peer_ht_rates.num_rates,
arg->peer_num_spatial_streams);
}
@@ -969,11 +972,11 @@ static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
arg->peer_flags |= WMI_PEER_QOS;
if (sta->wme && sta->uapsd_queues) {
- ath10k_dbg(ATH10K_DBG_MAC, "uapsd_queues: 0x%X, max_sp: %d\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
sta->uapsd_queues, sta->max_sp);
arg->peer_flags |= WMI_PEER_APSD;
- arg->peer_flags |= WMI_RC_UAPSD_FLAG;
+ arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
@@ -1028,14 +1031,27 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
struct wmi_peer_assoc_complete_arg *arg)
{
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ u8 ampdu_factor;
if (!vht_cap->vht_supported)
return;
arg->peer_flags |= WMI_PEER_VHT;
-
arg->peer_vht_caps = vht_cap->cap;
+
+ ampdu_factor = (vht_cap->cap &
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+
+ /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
+ * zero in VHT IE. Using it would result in degraded throughput.
+ * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
+ * it if VHT max_mpdu is smaller. */
+ arg->peer_max_mpdu = max(arg->peer_max_mpdu,
+ (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1);
+
if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
arg->peer_flags |= WMI_PEER_80MHZ;
@@ -1048,7 +1064,8 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
arg->peer_vht_rates.tx_mcs_set =
__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
- ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer\n");
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
+ sta->addr, arg->peer_max_mpdu, arg->peer_flags);
}
static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
@@ -1076,8 +1093,6 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
{
enum wmi_phy_mode phymode = MODE_UNKNOWN;
- /* FIXME: add VHT */
-
switch (ar->hw->conf.chandef.chan->band) {
case IEEE80211_BAND_2GHZ:
if (sta->ht_cap.ht_supported) {
@@ -1091,7 +1106,17 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
break;
case IEEE80211_BAND_5GHZ:
- if (sta->ht_cap.ht_supported) {
+ /*
+ * Check VHT first.
+ */
+ if (sta->vht_cap.vht_supported) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ phymode = MODE_11AC_VHT80;
+ else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AC_VHT40;
+ else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ phymode = MODE_11AC_VHT20;
+ } else if (sta->ht_cap.ht_supported) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11NA_HT40;
else
@@ -1105,30 +1130,32 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
break;
}
+ ath10k_dbg(ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
+ sta->addr, ath10k_wmi_phymode_str(phymode));
+
arg->peer_phymode = phymode;
WARN_ON(phymode == MODE_UNKNOWN);
}
-static int ath10k_peer_assoc(struct ath10k *ar,
- struct ath10k_vif *arvif,
- struct ieee80211_sta *sta,
- struct ieee80211_bss_conf *bss_conf)
+static int ath10k_peer_assoc_prepare(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_bss_conf *bss_conf,
+ struct wmi_peer_assoc_complete_arg *arg)
{
- struct wmi_peer_assoc_complete_arg arg;
-
lockdep_assert_held(&ar->conf_mutex);
- memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg));
+ memset(arg, 0, sizeof(*arg));
- ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg);
- ath10k_peer_assoc_h_crypto(ar, arvif, &arg);
- ath10k_peer_assoc_h_rates(ar, sta, &arg);
- ath10k_peer_assoc_h_ht(ar, sta, &arg);
- ath10k_peer_assoc_h_vht(ar, sta, &arg);
- ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, &arg);
- ath10k_peer_assoc_h_phymode(ar, arvif, sta, &arg);
+ ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, arg);
+ ath10k_peer_assoc_h_crypto(ar, arvif, arg);
+ ath10k_peer_assoc_h_rates(ar, sta, arg);
+ ath10k_peer_assoc_h_ht(ar, sta, arg);
+ ath10k_peer_assoc_h_vht(ar, sta, arg);
+ ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, arg);
+ ath10k_peer_assoc_h_phymode(ar, arvif, sta, arg);
- return ath10k_wmi_peer_assoc(ar, &arg);
+ return 0;
}
/* can be called only in mac80211 callbacks due to `key_count` usage */
@@ -1138,6 +1165,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct wmi_peer_assoc_complete_arg peer_arg;
struct ieee80211_sta *ap_sta;
int ret;
@@ -1153,24 +1181,33 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
return;
}
- ret = ath10k_peer_assoc(ar, arvif, ap_sta, bss_conf);
+ ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
+ bss_conf, &peer_arg);
if (ret) {
- ath10k_warn("Peer assoc failed for %pM\n", bss_conf->bssid);
+ ath10k_warn("Peer assoc prepare failed for %pM\n: %d",
+ bss_conf->bssid, ret);
rcu_read_unlock();
return;
}
rcu_read_unlock();
+ ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
+ if (ret) {
+ ath10k_warn("Peer assoc failed for %pM\n: %d",
+ bss_conf->bssid, ret);
+ return;
+ }
+
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d up (associated) bssid %pM aid %d\n",
+ arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
+
ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid,
bss_conf->bssid);
if (ret)
ath10k_warn("VDEV: %d up failed: ret %d\n",
arvif->vdev_id, ret);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "VDEV: %d associated, BSSID: %pM, AID: %d\n",
- arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
}
/*
@@ -1191,10 +1228,11 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
* No idea why this happens, even though VDEV-DOWN is supposed
* to be analogous to link down, so just stop the VDEV.
*/
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n",
+ arvif->vdev_id);
+
+ /* FIXME: check return value */
ret = ath10k_vdev_stop(arvif);
- if (!ret)
- ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d stopped\n",
- arvif->vdev_id);
/*
* If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and
@@ -1203,26 +1241,33 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
* interfaces as it expects there is no rx when no interface is
* running.
*/
- ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
- if (ret)
- ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d ath10k_wmi_vdev_down failed (%d)\n",
- arvif->vdev_id, ret);
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id);
- ath10k_wmi_flush_tx(ar);
+ /* FIXME: why don't we print error if wmi call fails? */
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
- arvif->def_wep_key_index = 0;
+ arvif->def_wep_key_idx = 0;
}
static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
struct ieee80211_sta *sta)
{
+ struct wmi_peer_assoc_complete_arg peer_arg;
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- ret = ath10k_peer_assoc(ar, arvif, sta, NULL);
+ ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
+ if (ret) {
+ ath10k_warn("WMI peer assoc prepare failed for %pM\n",
+ sta->addr);
+ return ret;
+ }
+
+ ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
- ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr);
+ ath10k_warn("Peer assoc failed for STA %pM\n: %d",
+ sta->addr, ret);
return ret;
}
@@ -1333,8 +1378,8 @@ static int ath10k_update_channel_list(struct ath10k *ar)
continue;
ath10k_dbg(ATH10K_DBG_WMI,
- "%s: [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
- __func__, ch - arg.channels, arg.n_channels,
+ "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
+ ch - arg.channels, arg.n_channels,
ch->freq, ch->max_power, ch->max_reg_power,
ch->max_antenna_gain, ch->mode);
@@ -1391,6 +1436,33 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
/* TX handlers */
/***************/
+static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr)
+{
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ return HTT_DATA_TX_EXT_TID_MGMT;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+ if (!is_unicast_ether_addr(ieee80211_get_DA(hdr)))
+ return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+ return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
+}
+
+static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar,
+ struct ieee80211_tx_info *info)
+{
+ if (info->control.vif)
+ return ath10k_vif_to_arvif(info->control.vif)->vdev_id;
+
+ if (ar->monitor_enabled)
+ return ar->monitor_vdev_id;
+
+ ath10k_warn("could not resolve vdev id\n");
+ return 0;
+}
+
/*
* Frames sent to the FW have to be in "Native Wifi" format.
* Strip the QoS field from the 802.11 header.
@@ -1411,6 +1483,30 @@ static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
skb_pull(skb, IEEE80211_QOS_CTL_LEN);
}
+static void ath10k_tx_wep_key_work(struct work_struct *work)
+{
+ struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
+ wep_key_work);
+ int ret, keyidx = arvif->def_wep_key_newidx;
+
+ if (arvif->def_wep_key_idx == keyidx)
+ return;
+
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
+ arvif->vdev_id, keyidx);
+
+ ret = ath10k_wmi_vdev_set_param(arvif->ar,
+ arvif->vdev_id,
+ arvif->ar->wmi.vdev_param->def_keyid,
+ keyidx);
+ if (ret) {
+ ath10k_warn("could not update wep keyidx (%d)\n", ret);
+ return;
+ }
+
+ arvif->def_wep_key_idx = keyidx;
+}
+
static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1419,11 +1515,6 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
struct ath10k *ar = arvif->ar;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key_conf *key = info->control.hw_key;
- int ret;
-
- /* TODO AP mode should be implemented */
- if (vif->type != NL80211_IFTYPE_STATION)
- return;
if (!ieee80211_has_protected(hdr->frame_control))
return;
@@ -1435,20 +1526,14 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
key->cipher != WLAN_CIPHER_SUITE_WEP104)
return;
- if (key->keyidx == arvif->def_wep_key_index)
+ if (key->keyidx == arvif->def_wep_key_idx)
return;
- ath10k_dbg(ATH10K_DBG_MAC, "new wep keyidx will be %d\n", key->keyidx);
-
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_DEF_KEYID,
- key->keyidx);
- if (ret) {
- ath10k_warn("could not update wep keyidx (%d)\n", ret);
- return;
- }
-
- arvif->def_wep_key_index = key->keyidx;
+ /* FIXME: Most likely a few frames will be TXed with an old key. Simply
+ * queueing frames until key index is updated is not an option because
+ * sk_buff may need more processing to be done, e.g. offchannel */
+ arvif->def_wep_key_newidx = key->keyidx;
+ ieee80211_queue_work(ar->hw, &arvif->wep_key_work);
}
static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)
@@ -1478,19 +1563,42 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)
static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int ret;
+ int ret = 0;
- if (ieee80211_is_mgmt(hdr->frame_control))
- ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
- else if (ieee80211_is_nullfunc(hdr->frame_control))
+ if (ar->htt.target_version_major >= 3) {
+ /* Since HTT 3.0 there is no separate mgmt tx command */
+ ret = ath10k_htt_tx(&ar->htt, skb);
+ goto exit;
+ }
+
+ if (ieee80211_is_mgmt(hdr->frame_control)) {
+ if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+ ar->fw_features)) {
+ if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
+ ATH10K_MAX_NUM_MGMT_PENDING) {
+ ath10k_warn("wmi mgmt_tx queue limit reached\n");
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ skb_queue_tail(&ar->wmi_mgmt_tx_queue, skb);
+ ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+ } else {
+ ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
+ }
+ } else if (!test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+ ar->fw_features) &&
+ ieee80211_is_nullfunc(hdr->frame_control)) {
/* FW does not report tx status properly for NullFunc frames
* unless they are sent through mgmt tx path. mac80211 sends
- * those frames when it detects link/beacon loss and depends on
- * the tx status to be correct. */
+ * those frames when it detects link/beacon loss and depends
+ * on the tx status to be correct. */
ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
- else
+ } else {
ret = ath10k_htt_tx(&ar->htt, skb);
+ }
+exit:
if (ret) {
ath10k_warn("tx failed (%d). dropping packet.\n", ret);
ieee80211_free_txskb(ar->hw, skb);
@@ -1534,18 +1642,19 @@ void ath10k_offchan_tx_work(struct work_struct *work)
mutex_lock(&ar->conf_mutex);
- ath10k_dbg(ATH10K_DBG_MAC, "processing offchannel skb %p\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac offchannel skb %p\n",
skb);
hdr = (struct ieee80211_hdr *)skb->data;
peer_addr = ieee80211_get_DA(hdr);
- vdev_id = ATH10K_SKB_CB(skb)->htt.vdev_id;
+ vdev_id = ATH10K_SKB_CB(skb)->vdev_id;
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find(ar, vdev_id, peer_addr);
spin_unlock_bh(&ar->data_lock);
if (peer)
+ /* FIXME: should this use ath10k_warn()? */
ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
peer_addr, vdev_id);
@@ -1580,6 +1689,36 @@ void ath10k_offchan_tx_work(struct work_struct *work)
}
}
+void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ for (;;) {
+ skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
+ if (!skb)
+ break;
+
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+}
+
+void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
+ struct sk_buff *skb;
+ int ret;
+
+ for (;;) {
+ skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
+ if (!skb)
+ break;
+
+ ret = ath10k_wmi_mgmt_tx(ar, skb);
+ if (ret)
+ ath10k_warn("wmi mgmt_tx failed (%d)\n", ret);
+ }
+}
+
/************/
/* Scanning */
/************/
@@ -1643,8 +1782,6 @@ static int ath10k_abort_scan(struct ath10k *ar)
return -EIO;
}
- ath10k_wmi_flush_tx(ar);
-
ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
if (ret == 0)
ath10k_warn("timed out while waiting for scan to stop\n");
@@ -1678,10 +1815,6 @@ static int ath10k_start_scan(struct ath10k *ar,
if (ret)
return ret;
- /* make sure we submit the command so the completion
- * timeout makes sense */
- ath10k_wmi_flush_tx(ar);
-
ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
if (ret == 0) {
ath10k_abort_scan(ar);
@@ -1709,16 +1842,7 @@ static void ath10k_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ath10k *ar = hw->priv;
- struct ath10k_vif *arvif = NULL;
- u32 vdev_id = 0;
- u8 tid;
-
- if (info->control.vif) {
- arvif = ath10k_vif_to_arvif(info->control.vif);
- vdev_id = arvif->vdev_id;
- } else if (ar->monitor_enabled) {
- vdev_id = ar->monitor_vdev_id;
- }
+ u8 tid, vdev_id;
/* We should disable CCK RATE due to P2P */
if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
@@ -1726,12 +1850,8 @@ static void ath10k_tx(struct ieee80211_hw *hw,
/* we must calculate tid before we apply qos workaround
* as we'd lose the qos control field */
- tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
- if (ieee80211_is_data_qos(hdr->frame_control) &&
- is_unicast_ether_addr(ieee80211_get_DA(hdr))) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- }
+ tid = ath10k_tx_h_get_tid(hdr);
+ vdev_id = ath10k_tx_h_get_vdev_id(ar, info);
/* it makes no sense to process injected frames like that */
if (info->control.vif &&
@@ -1742,14 +1862,14 @@ static void ath10k_tx(struct ieee80211_hw *hw,
ath10k_tx_h_seq_no(skb);
}
- memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb)));
- ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id;
+ ATH10K_SKB_CB(skb)->vdev_id = vdev_id;
+ ATH10K_SKB_CB(skb)->htt.is_offchan = false;
ATH10K_SKB_CB(skb)->htt.tid = tid;
if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
spin_lock_bh(&ar->data_lock);
ATH10K_SKB_CB(skb)->htt.is_offchan = true;
- ATH10K_SKB_CB(skb)->htt.vdev_id = ar->scan.vdev_id;
+ ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id;
spin_unlock_bh(&ar->data_lock);
ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb);
@@ -1771,6 +1891,7 @@ void ath10k_halt(struct ath10k *ar)
del_timer_sync(&ar->scan.timeout);
ath10k_offchan_tx_purge(ar);
+ ath10k_mgmt_over_wmi_tx_purge(ar);
ath10k_peer_cleanup_all(ar);
ath10k_core_stop(ar);
ath10k_hif_power_down(ar);
@@ -1817,12 +1938,12 @@ static int ath10k_start(struct ieee80211_hw *hw)
else if (ar->state == ATH10K_STATE_RESTARTING)
ar->state = ATH10K_STATE_RESTARTED;
- ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1);
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
if (ret)
ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n",
ret);
- ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 0);
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 0);
if (ret)
ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
ret);
@@ -1847,32 +1968,29 @@ static void ath10k_stop(struct ieee80211_hw *hw)
ar->state = ATH10K_STATE_OFF;
mutex_unlock(&ar->conf_mutex);
+ ath10k_mgmt_over_wmi_tx_purge(ar);
+
cancel_work_sync(&ar->offchan_tx_work);
+ cancel_work_sync(&ar->wmi_mgmt_tx_work);
cancel_work_sync(&ar->restart_work);
}
-static void ath10k_config_ps(struct ath10k *ar)
+static int ath10k_config_ps(struct ath10k *ar)
{
- struct ath10k_generic_iter ar_iter;
+ struct ath10k_vif *arvif;
+ int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- /* During HW reconfiguration mac80211 reports all interfaces that were
- * running until reconfiguration was started. Since FW doesn't have any
- * vdevs at this point we must not iterate over this interface list.
- * This setting will be updated upon add_interface(). */
- if (ar->state == ATH10K_STATE_RESTARTED)
- return;
-
- memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
- ar_iter.ar = ar;
-
- ieee80211_iterate_active_interfaces_atomic(
- ar->hw, IEEE80211_IFACE_ITER_NORMAL,
- ath10k_ps_iter, &ar_iter);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath10k_mac_vif_setup_ps(arvif);
+ if (ret) {
+ ath10k_warn("could not setup powersave (%d)\n", ret);
+ break;
+ }
+ }
- if (ar_iter.ret)
- ath10k_warn("failed to set ps config (%d)\n", ar_iter.ret);
+ return ret;
}
static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
@@ -1884,7 +2002,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&ar->conf_mutex);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ath10k_dbg(ATH10K_DBG_MAC, "Config channel %d mhz\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac config channel %d mhz\n",
conf->chandef.chan->center_freq);
spin_lock_bh(&ar->data_lock);
ar->rx_channel = conf->chandef.chan;
@@ -1901,7 +2019,6 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
ret = ath10k_monitor_destroy(ar);
}
- ath10k_wmi_flush_tx(ar);
mutex_unlock(&ar->conf_mutex);
return ret;
}
@@ -1922,6 +2039,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
int ret = 0;
u32 value;
int bit;
+ u32 vdev_param;
mutex_lock(&ar->conf_mutex);
@@ -1930,21 +2048,22 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
arvif->ar = ar;
arvif->vif = vif;
+ INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
+
if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) {
ath10k_warn("Only one monitor interface allowed\n");
ret = -EBUSY;
- goto exit;
+ goto err;
}
bit = ffs(ar->free_vdev_map);
if (bit == 0) {
ret = -EBUSY;
- goto exit;
+ goto err;
}
arvif->vdev_id = bit - 1;
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
- ar->free_vdev_map &= ~(1 << arvif->vdev_id);
if (ar->p2p)
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
@@ -1973,32 +2092,41 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
break;
}
- ath10k_dbg(ATH10K_DBG_MAC, "Add interface: id %d type %d subtype %d\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n",
arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype);
ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
arvif->vdev_subtype, vif->addr);
if (ret) {
ath10k_warn("WMI vdev create failed: ret %d\n", ret);
- goto exit;
+ goto err;
}
- ret = ath10k_wmi_vdev_set_param(ar, 0, WMI_VDEV_PARAM_DEF_KEYID,
- arvif->def_wep_key_index);
- if (ret)
+ ar->free_vdev_map &= ~BIT(arvif->vdev_id);
+ list_add(&arvif->list, &ar->arvifs);
+
+ vdev_param = ar->wmi.vdev_param->def_keyid;
+ ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
+ arvif->def_wep_key_idx);
+ if (ret) {
ath10k_warn("Failed to set default keyid: %d\n", ret);
+ goto err_vdev_delete;
+ }
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+ vdev_param = ar->wmi.vdev_param->tx_encap_type;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
ATH10K_HW_TXRX_NATIVE_WIFI);
- if (ret)
+ /* 10.X firmware does not support this VDEV parameter. Do not warn */
+ if (ret && ret != -EOPNOTSUPP) {
ath10k_warn("Failed to set TX encap: %d\n", ret);
+ goto err_vdev_delete;
+ }
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
if (ret) {
ath10k_warn("Failed to create peer for AP: %d\n", ret);
- goto exit;
+ goto err_vdev_delete;
}
}
@@ -2007,39 +2135,62 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
- if (ret)
+ if (ret) {
ath10k_warn("Failed to set RX wake policy: %d\n", ret);
+ goto err_peer_delete;
+ }
param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
- if (ret)
+ if (ret) {
ath10k_warn("Failed to set TX wake thresh: %d\n", ret);
+ goto err_peer_delete;
+ }
param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
- if (ret)
+ if (ret) {
ath10k_warn("Failed to set PSPOLL count: %d\n", ret);
+ goto err_peer_delete;
+ }
}
ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
- if (ret)
+ if (ret) {
ath10k_warn("failed to set rts threshold for vdev %d (%d)\n",
arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
- if (ret)
+ if (ret) {
ath10k_warn("failed to set frag threshold for vdev %d (%d)\n",
arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
ar->monitor_present = true;
-exit:
mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_peer_delete:
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
+
+err_vdev_delete:
+ ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
+ ar->free_vdev_map &= ~BIT(arvif->vdev_id);
+ list_del(&arvif->list);
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+
return ret;
}
@@ -2052,9 +2203,17 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
- ath10k_dbg(ATH10K_DBG_MAC, "Remove interface: id %d\n", arvif->vdev_id);
+ cancel_work_sync(&arvif->wep_key_work);
+
+ spin_lock_bh(&ar->data_lock);
+ if (arvif->beacon) {
+ dev_kfree_skb_any(arvif->beacon);
+ arvif->beacon = NULL;
+ }
+ spin_unlock_bh(&ar->data_lock);
ar->free_vdev_map |= 1 << (arvif->vdev_id);
+ list_del(&arvif->list);
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
@@ -2064,6 +2223,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
kfree(arvif->u.ap.noa_data);
}
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev delete %d (remove interface)\n",
+ arvif->vdev_id);
+
ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
if (ret)
ath10k_warn("WMI vdev delete failed: %d\n", ret);
@@ -2105,18 +2267,20 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
if ((ar->filter_flags & FIF_PROMISC_IN_BSS) &&
!ar->monitor_enabled) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n",
+ ar->monitor_vdev_id);
+
ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
if (ret)
ath10k_warn("Unable to start monitor mode\n");
- else
- ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode started\n");
} else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
ar->monitor_enabled) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n",
+ ar->monitor_vdev_id);
+
ret = ath10k_monitor_stop(ar);
if (ret)
ath10k_warn("Unable to stop monitor mode\n");
- else
- ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode stopped\n");
}
mutex_unlock(&ar->conf_mutex);
@@ -2130,6 +2294,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
int ret = 0;
+ u32 vdev_param, pdev_param;
mutex_lock(&ar->conf_mutex);
@@ -2138,44 +2303,44 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BEACON_INT) {
arvif->beacon_interval = info->beacon_int;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_BEACON_INTERVAL,
+ vdev_param = ar->wmi.vdev_param->beacon_interval;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
arvif->beacon_interval);
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d beacon_interval %d\n",
+ arvif->vdev_id, arvif->beacon_interval);
+
if (ret)
ath10k_warn("Failed to set beacon interval for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Beacon interval: %d set for VDEV: %d\n",
- arvif->beacon_interval, arvif->vdev_id);
}
if (changed & BSS_CHANGED_BEACON) {
- ret = ath10k_wmi_pdev_set_param(ar,
- WMI_PDEV_PARAM_BEACON_TX_MODE,
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "vdev %d set beacon tx mode to staggered\n",
+ arvif->vdev_id);
+
+ pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
+ ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
WMI_BEACON_STAGGERED_MODE);
if (ret)
ath10k_warn("Failed to set beacon mode for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set staggered beacon mode for VDEV: %d\n",
- arvif->vdev_id);
}
if (changed & BSS_CHANGED_BEACON_INFO) {
arvif->dtim_period = info->dtim_period;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_DTIM_PERIOD,
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d dtim_period %d\n",
+ arvif->vdev_id, arvif->dtim_period);
+
+ vdev_param = ar->wmi.vdev_param->dtim_period;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
arvif->dtim_period);
if (ret)
ath10k_warn("Failed to set dtim period for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set dtim period: %d for VDEV: %d\n",
- arvif->dtim_period, arvif->vdev_id);
}
if (changed & BSS_CHANGED_SSID &&
@@ -2188,16 +2353,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID) {
if (!is_zero_ether_addr(info->bssid)) {
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d create peer %pM\n",
+ arvif->vdev_id, info->bssid);
+
ret = ath10k_peer_create(ar, arvif->vdev_id,
info->bssid);
if (ret)
ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
info->bssid, arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Added peer: %pM for VDEV: %d\n",
- info->bssid, arvif->vdev_id);
-
if (vif->type == NL80211_IFTYPE_STATION) {
/*
@@ -2207,11 +2371,12 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
memcpy(arvif->u.sta.bssid, info->bssid,
ETH_ALEN);
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d start %pM\n",
+ arvif->vdev_id, info->bssid);
+
+ /* FIXME: check return value */
ret = ath10k_vdev_start(arvif);
- if (!ret)
- ath10k_dbg(ATH10K_DBG_MAC,
- "VDEV: %d started with BSSID: %pM\n",
- arvif->vdev_id, info->bssid);
}
/*
@@ -2235,16 +2400,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
else
cts_prot = 0;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_ENABLE_RTSCTS,
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
+ arvif->vdev_id, cts_prot);
+
+ vdev_param = ar->wmi.vdev_param->enable_rtscts;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
cts_prot);
if (ret)
ath10k_warn("Failed to set CTS prot for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set CTS prot: %d for VDEV: %d\n",
- cts_prot, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -2255,16 +2419,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
else
slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_SLOT_TIME,
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
+ arvif->vdev_id, slottime);
+
+ vdev_param = ar->wmi.vdev_param->slot_time;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
slottime);
if (ret)
ath10k_warn("Failed to set erp slot for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set slottime: %d for VDEV: %d\n",
- slottime, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
@@ -2274,16 +2437,16 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
else
preamble = WMI_VDEV_PREAMBLE_LONG;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_PREAMBLE,
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d preamble %dn",
+ arvif->vdev_id, preamble);
+
+ vdev_param = ar->wmi.vdev_param->preamble;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
preamble);
if (ret)
ath10k_warn("Failed to set preamble for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set preamble: %d for VDEV: %d\n",
- preamble, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ASSOC) {
@@ -2474,27 +2637,26 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* New station addition.
*/
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d peer create %pM (new sta)\n",
+ arvif->vdev_id, sta->addr);
+
ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Added peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
/*
* Existing station deletion.
*/
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d peer delete %pM (sta gone)\n",
+ arvif->vdev_id, sta->addr);
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Removed peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
if (vif->type == NL80211_IFTYPE_STATION)
ath10k_bss_disassoc(hw, vif);
@@ -2505,14 +2667,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* New association.
*/
+ ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n",
+ sta->addr);
+
ret = ath10k_station_assoc(ar, arvif, sta);
if (ret)
ath10k_warn("Failed to associate station: %pM\n",
sta->addr);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Station %pM moved to assoc state\n",
- sta->addr);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
(vif->type == NL80211_IFTYPE_AP ||
@@ -2520,14 +2681,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* Disassociation.
*/
+ ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
+ sta->addr);
+
ret = ath10k_station_disassoc(ar, arvif, sta);
if (ret)
ath10k_warn("Failed to disassociate station: %pM\n",
sta->addr);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Station %pM moved to disassociated state\n",
- sta->addr);
}
mutex_unlock(&ar->conf_mutex);
@@ -2732,88 +2892,51 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
* Both RTS and Fragmentation threshold are interface-specific
* in ath10k, but device-specific in mac80211.
*/
-static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
- struct ath10k_generic_iter *ar_iter = data;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
- u32 rts = ar_iter->ar->hw->wiphy->rts_threshold;
-
- lockdep_assert_held(&arvif->ar->conf_mutex);
-
- /* During HW reconfiguration mac80211 reports all interfaces that were
- * running until reconfiguration was started. Since FW doesn't have any
- * vdevs at this point we must not iterate over this interface list.
- * This setting will be updated upon add_interface(). */
- if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
- return;
-
- ar_iter->ret = ath10k_mac_set_rts(arvif, rts);
- if (ar_iter->ret)
- ath10k_warn("Failed to set RTS threshold for VDEV: %d\n",
- arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set RTS threshold: %d for VDEV: %d\n",
- rts, arvif->vdev_id);
-}
static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
- struct ath10k_generic_iter ar_iter;
struct ath10k *ar = hw->priv;
-
- memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
- ar_iter.ar = ar;
+ struct ath10k_vif *arvif;
+ int ret = 0;
mutex_lock(&ar->conf_mutex);
- ieee80211_iterate_active_interfaces_atomic(
- hw, IEEE80211_IFACE_ITER_NORMAL,
- ath10k_set_rts_iter, &ar_iter);
- mutex_unlock(&ar->conf_mutex);
-
- return ar_iter.ret;
-}
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
+ arvif->vdev_id, value);
-static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
- struct ath10k_generic_iter *ar_iter = data;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
- u32 frag = ar_iter->ar->hw->wiphy->frag_threshold;
-
- lockdep_assert_held(&arvif->ar->conf_mutex);
-
- /* During HW reconfiguration mac80211 reports all interfaces that were
- * running until reconfiguration was started. Since FW doesn't have any
- * vdevs at this point we must not iterate over this interface list.
- * This setting will be updated upon add_interface(). */
- if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
- return;
+ ret = ath10k_mac_set_rts(arvif, value);
+ if (ret) {
+ ath10k_warn("could not set rts threshold for vdev %d (%d)\n",
+ arvif->vdev_id, ret);
+ break;
+ }
+ }
+ mutex_unlock(&ar->conf_mutex);
- ar_iter->ret = ath10k_mac_set_frag(arvif, frag);
- if (ar_iter->ret)
- ath10k_warn("Failed to set frag threshold for VDEV: %d\n",
- arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set frag threshold: %d for VDEV: %d\n",
- frag, arvif->vdev_id);
+ return ret;
}
static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
{
- struct ath10k_generic_iter ar_iter;
struct ath10k *ar = hw->priv;
-
- memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
- ar_iter.ar = ar;
+ struct ath10k_vif *arvif;
+ int ret = 0;
mutex_lock(&ar->conf_mutex);
- ieee80211_iterate_active_interfaces_atomic(
- hw, IEEE80211_IFACE_ITER_NORMAL,
- ath10k_set_frag_iter, &ar_iter);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n",
+ arvif->vdev_id, value);
+
+ ret = ath10k_mac_set_rts(arvif, value);
+ if (ret) {
+ ath10k_warn("could not set fragmentation threshold for vdev %d (%d)\n",
+ arvif->vdev_id, ret);
+ break;
+ }
+ }
mutex_unlock(&ar->conf_mutex);
- return ar_iter.ret;
+ return ret;
}
static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
@@ -2836,8 +2959,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
bool empty;
spin_lock_bh(&ar->htt.tx_lock);
- empty = bitmap_empty(ar->htt.used_msdu_ids,
- ar->htt.max_num_pending_tx);
+ empty = (ar->htt.num_pending_tx == 0);
spin_unlock_bh(&ar->htt.tx_lock);
skip = (ar->state == ATH10K_STATE_WEDGED);
@@ -3326,6 +3448,10 @@ int ath10k_mac_register(struct ath10k *ar)
IEEE80211_HW_WANT_MONITOR_VIF |
IEEE80211_HW_AP_LINK_PS;
+ /* MSDU can have HTT TX fragment pushed in front. The additional 4
+ * bytes is used for padding/alignment if necessary. */
+ ar->hw->extra_tx_headroom += sizeof(struct htt_data_tx_desc_frag)*2 + 4;
+
if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 6fce9bfb19a5..ba1021997b8f 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -34,6 +34,8 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
void ath10k_reset_scan(unsigned long ptr);
void ath10k_offchan_tx_purge(struct ath10k *ar);
void ath10k_offchan_tx_work(struct work_struct *work);
+void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar);
+void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);
void ath10k_halt(struct ath10k *ar);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index e2f9ef50b1bd..9e86a811086f 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -36,11 +36,9 @@ static unsigned int ath10k_target_ps;
module_param(ath10k_target_ps, uint, 0644);
MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
-#define QCA988X_1_0_DEVICE_ID (0xabcd)
#define QCA988X_2_0_DEVICE_ID (0x003c)
static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
- { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
{0}
};
@@ -50,9 +48,9 @@ static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
static void ath10k_pci_process_ce(struct ath10k *ar);
static int ath10k_pci_post_rx(struct ath10k *ar);
-static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
+static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
int num);
-static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
+static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
static void ath10k_pci_stop_ce(struct ath10k *ar);
static void ath10k_pci_device_reset(struct ath10k *ar);
static int ath10k_pci_reset_target(struct ath10k *ar);
@@ -60,43 +58,145 @@ static int ath10k_pci_start_intr(struct ath10k *ar);
static void ath10k_pci_stop_intr(struct ath10k *ar);
static const struct ce_attr host_ce_config_wlan[] = {
- /* host->target HTC control and raw streams */
- { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
- /* could be moved to share CE3 */
- /* target->host HTT + HTC control */
- { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,},
- /* target->host WMI */
- { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
- /* host->target WMI */
- { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
- /* host->target HTT */
- { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0,
- CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
- /* unused */
- { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
- /* Target autonomous hif_memcpy */
- { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
- /* ce_diag, the Diagnostic Window */
- { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 512,
+ .dest_nentries = 512,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 32,
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: unused */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: ce_diag, the Diagnostic Window */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 2,
+ .src_sz_max = DIAG_TRANSFER_LIMIT,
+ .dest_nentries = 2,
+ },
};
/* Target firmware's Copy Engine configuration. */
static const struct ce_pipe_config target_ce_config_wlan[] = {
- /* host->target HTC control and raw streams */
- { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
- /* target->host HTT + HTC control */
- { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
- /* target->host WMI */
- { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
- /* host->target WMI */
- { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
- /* host->target HTT */
- { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = 0,
+ .pipedir = PIPEDIR_OUT,
+ .nentries = 32,
+ .nbytes_max = 256,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = 1,
+ .pipedir = PIPEDIR_IN,
+ .nentries = 32,
+ .nbytes_max = 512,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = 2,
+ .pipedir = PIPEDIR_IN,
+ .nentries = 32,
+ .nbytes_max = 2048,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = 3,
+ .pipedir = PIPEDIR_OUT,
+ .nentries = 32,
+ .nbytes_max = 2048,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = 4,
+ .pipedir = PIPEDIR_OUT,
+ .nentries = 256,
+ .nbytes_max = 256,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
/* NB: 50% of src nentries, since tx has 2 frags */
- /* unused */
- { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
- /* Reserved for target autonomous hif_memcpy */
- { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
+
+ /* CE5: unused */
+ {
+ .pipenum = 5,
+ .pipedir = PIPEDIR_OUT,
+ .nentries = 32,
+ .nbytes_max = 2048,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = 6,
+ .pipedir = PIPEDIR_INOUT,
+ .nentries = 32,
+ .nbytes_max = 4096,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
/* CE7 used only by Host */
};
@@ -114,7 +214,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
unsigned int id;
unsigned int flags;
- struct ce_state *ce_diag;
+ struct ath10k_ce_pipe *ce_diag;
/* Host buffer address in CE space */
u32 ce_data;
dma_addr_t ce_data_base = 0;
@@ -278,7 +378,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
unsigned int id;
unsigned int flags;
- struct ce_state *ce_diag;
+ struct ath10k_ce_pipe *ce_diag;
void *data_buf = NULL;
u32 ce_data; /* Host buffer address in CE space */
dma_addr_t ce_data_base = 0;
@@ -437,7 +537,7 @@ static void ath10k_pci_wait(struct ath10k *ar)
ath10k_warn("Unable to wakeup target\n");
}
-void ath10k_do_pci_wake(struct ath10k *ar)
+int ath10k_do_pci_wake(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
void __iomem *pci_addr = ar_pci->mem;
@@ -453,18 +553,19 @@ void ath10k_do_pci_wake(struct ath10k *ar)
atomic_inc(&ar_pci->keep_awake_count);
if (ar_pci->verified_awake)
- return;
+ return 0;
for (;;) {
if (ath10k_pci_target_is_awake(ar)) {
ar_pci->verified_awake = true;
- break;
+ return 0;
}
if (tot_delay > PCIE_WAKE_TIMEOUT) {
- ath10k_warn("target takes too long to wake up (awake count %d)\n",
+ ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
+ PCIE_WAKE_TIMEOUT,
atomic_read(&ar_pci->keep_awake_count));
- break;
+ return -ETIMEDOUT;
}
udelay(curr_delay);
@@ -493,7 +594,7 @@ void ath10k_do_pci_sleep(struct ath10k *ar)
* FIXME: Handle OOM properly.
*/
static inline
-struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info)
+struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
{
struct ath10k_pci_compl *compl = NULL;
@@ -511,39 +612,28 @@ exit:
}
/* Called by lower (CE) layer when a send to Target completes. */
-static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
- void *transfer_context,
- u32 ce_data,
- unsigned int nbytes,
- unsigned int transfer_id)
+static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
+ struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
struct ath10k_pci_compl *compl;
- bool process = false;
-
- do {
- /*
- * For the send completion of an item in sendlist, just
- * increment num_sends_allowed. The upper layer callback will
- * be triggered when last fragment is done with send.
- */
- if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
- spin_lock_bh(&pipe_info->pipe_lock);
- pipe_info->num_sends_allowed++;
- spin_unlock_bh(&pipe_info->pipe_lock);
- continue;
- }
+ void *transfer_context;
+ u32 ce_data;
+ unsigned int nbytes;
+ unsigned int transfer_id;
+ while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
+ &ce_data, &nbytes,
+ &transfer_id) == 0) {
compl = get_free_compl(pipe_info);
if (!compl)
break;
- compl->send_or_recv = HIF_CE_COMPLETE_SEND;
+ compl->state = ATH10K_PCI_COMPL_SEND;
compl->ce_state = ce_state;
compl->pipe_info = pipe_info;
- compl->transfer_context = transfer_context;
+ compl->skb = transfer_context;
compl->nbytes = nbytes;
compl->transfer_id = transfer_id;
compl->flags = 0;
@@ -554,46 +644,36 @@ static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
spin_lock_bh(&ar_pci->compl_lock);
list_add_tail(&compl->list, &ar_pci->compl_process);
spin_unlock_bh(&ar_pci->compl_lock);
-
- process = true;
- } while (ath10k_ce_completed_send_next(ce_state,
- &transfer_context,
- &ce_data, &nbytes,
- &transfer_id) == 0);
-
- /*
- * If only some of the items within a sendlist have completed,
- * don't invoke completion processing until the entire sendlist
- * has been sent.
- */
- if (!process)
- return;
+ }
ath10k_pci_process_ce(ar);
}
/* Called by lower (CE) layer when data is received from the Target. */
-static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
- void *transfer_context, u32 ce_data,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags)
+static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
+ struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
struct ath10k_pci_compl *compl;
struct sk_buff *skb;
+ void *transfer_context;
+ u32 ce_data;
+ unsigned int nbytes;
+ unsigned int transfer_id;
+ unsigned int flags;
- do {
+ while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
+ &ce_data, &nbytes, &transfer_id,
+ &flags) == 0) {
compl = get_free_compl(pipe_info);
if (!compl)
break;
- compl->send_or_recv = HIF_CE_COMPLETE_RECV;
+ compl->state = ATH10K_PCI_COMPL_RECV;
compl->ce_state = ce_state;
compl->pipe_info = pipe_info;
- compl->transfer_context = transfer_context;
+ compl->skb = transfer_context;
compl->nbytes = nbytes;
compl->transfer_id = transfer_id;
compl->flags = flags;
@@ -608,12 +688,7 @@ static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
spin_lock_bh(&ar_pci->compl_lock);
list_add_tail(&compl->list, &ar_pci->compl_process);
spin_unlock_bh(&ar_pci->compl_lock);
-
- } while (ath10k_ce_completed_recv_next(ce_state,
- &transfer_context,
- &ce_data, &nbytes,
- &transfer_id,
- &flags) == 0);
+ }
ath10k_pci_process_ce(ar);
}
@@ -625,15 +700,12 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]);
- struct ce_state *ce_hdl = pipe_info->ce_hdl;
- struct ce_sendlist sendlist;
+ struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
+ struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
unsigned int len;
u32 flags = 0;
int ret;
- memset(&sendlist, 0, sizeof(struct ce_sendlist));
-
len = min(bytes, nbuf->len);
bytes -= len;
@@ -648,19 +720,8 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
"ath10k tx: data: ",
nbuf->data, nbuf->len);
- ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
-
- /* Make sure we have resources to handle this request */
- spin_lock_bh(&pipe_info->pipe_lock);
- if (!pipe_info->num_sends_allowed) {
- ath10k_warn("Pipe: %d is full\n", pipe_id);
- spin_unlock_bh(&pipe_info->pipe_lock);
- return -ENOSR;
- }
- pipe_info->num_sends_allowed--;
- spin_unlock_bh(&pipe_info->pipe_lock);
-
- ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
+ ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
+ flags);
if (ret)
ath10k_warn("CE send failed: %p\n", nbuf);
@@ -670,14 +731,7 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]);
- int ret;
-
- spin_lock_bh(&pipe_info->pipe_lock);
- ret = pipe_info->num_sends_allowed;
- spin_unlock_bh(&pipe_info->pipe_lock);
-
- return ret;
+ return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
}
static void ath10k_pci_hif_dump_area(struct ath10k *ar)
@@ -764,9 +818,9 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
static int ath10k_pci_start_ce(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_diag = ar_pci->ce_diag;
+ struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
const struct ce_attr *attr;
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
struct ath10k_pci_compl *compl;
int i, pipe_num, completions, disable_interrupts;
@@ -792,7 +846,6 @@ static int ath10k_pci_start_ce(struct ath10k *ar)
ath10k_pci_ce_send_done,
disable_interrupts);
completions += attr->src_nentries;
- pipe_info->num_sends_allowed = attr->src_nentries - 1;
}
if (attr->dest_nentries) {
@@ -805,15 +858,14 @@ static int ath10k_pci_start_ce(struct ath10k *ar)
continue;
for (i = 0; i < completions; i++) {
- compl = kmalloc(sizeof(struct ath10k_pci_compl),
- GFP_KERNEL);
+ compl = kmalloc(sizeof(*compl), GFP_KERNEL);
if (!compl) {
ath10k_warn("No memory for completion state\n");
ath10k_pci_stop_ce(ar);
return -ENOMEM;
}
- compl->send_or_recv = HIF_CE_COMPLETE_FREE;
+ compl->state = ATH10K_PCI_COMPL_FREE;
list_add_tail(&compl->list, &pipe_info->compl_free);
}
}
@@ -840,7 +892,7 @@ static void ath10k_pci_stop_ce(struct ath10k *ar)
* their associated resources */
spin_lock_bh(&ar_pci->compl_lock);
list_for_each_entry(compl, &ar_pci->compl_process, list) {
- skb = (struct sk_buff *)compl->transfer_context;
+ skb = compl->skb;
ATH10K_SKB_CB(skb)->is_aborted = true;
}
spin_unlock_bh(&ar_pci->compl_lock);
@@ -850,7 +902,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_compl *compl, *tmp;
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
struct sk_buff *netbuf;
int pipe_num;
@@ -861,7 +913,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar)
list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
list_del(&compl->list);
- netbuf = (struct sk_buff *)compl->transfer_context;
+ netbuf = compl->skb;
dev_kfree_skb_any(netbuf);
kfree(compl);
}
@@ -912,12 +964,14 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
list_del(&compl->list);
spin_unlock_bh(&ar_pci->compl_lock);
- if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) {
+ switch (compl->state) {
+ case ATH10K_PCI_COMPL_SEND:
cb->tx_completion(ar,
- compl->transfer_context,
+ compl->skb,
compl->transfer_id);
send_done = 1;
- } else {
+ break;
+ case ATH10K_PCI_COMPL_RECV:
ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
if (ret) {
ath10k_warn("Unable to post recv buffer for pipe: %d\n",
@@ -925,7 +979,7 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
break;
}
- skb = (struct sk_buff *)compl->transfer_context;
+ skb = compl->skb;
nbytes = compl->nbytes;
ath10k_dbg(ATH10K_DBG_PCI,
@@ -944,16 +998,23 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
nbytes,
skb->len + skb_tailroom(skb));
}
+ break;
+ case ATH10K_PCI_COMPL_FREE:
+ ath10k_warn("free completion cannot be processed\n");
+ break;
+ default:
+ ath10k_warn("invalid completion state (%d)\n",
+ compl->state);
+ break;
}
- compl->send_or_recv = HIF_CE_COMPLETE_FREE;
+ compl->state = ATH10K_PCI_COMPL_FREE;
/*
* Add completion back to the pipe's free list.
*/
spin_lock_bh(&compl->pipe_info->pipe_lock);
list_add_tail(&compl->list, &compl->pipe_info->compl_free);
- compl->pipe_info->num_sends_allowed += send_done;
spin_unlock_bh(&compl->pipe_info->pipe_lock);
}
@@ -1037,12 +1098,12 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
&dl_is_polled);
}
-static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
+static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
int num)
{
struct ath10k *ar = pipe_info->hif_ce_state;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_state = pipe_info->ce_hdl;
+ struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
struct sk_buff *skb;
dma_addr_t ce_data;
int i, ret = 0;
@@ -1097,7 +1158,7 @@ err:
static int ath10k_pci_post_rx(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
const struct ce_attr *attr;
int pipe_num, ret = 0;
@@ -1147,11 +1208,11 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
return 0;
}
-static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
+static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
{
struct ath10k *ar;
struct ath10k_pci *ar_pci;
- struct ce_state *ce_hdl;
+ struct ath10k_ce_pipe *ce_hdl;
u32 buf_sz;
struct sk_buff *netbuf;
u32 ce_data;
@@ -1179,11 +1240,11 @@ static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
}
}
-static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
+static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
{
struct ath10k *ar;
struct ath10k_pci *ar_pci;
- struct ce_state *ce_hdl;
+ struct ath10k_ce_pipe *ce_hdl;
struct sk_buff *netbuf;
u32 ce_data;
unsigned int nbytes;
@@ -1206,15 +1267,14 @@ static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
&ce_data, &nbytes, &id) == 0) {
- if (netbuf != CE_SENDLIST_ITEM_CTXT)
- /*
- * Indicate the completion to higer layer to free
- * the buffer
- */
- ATH10K_SKB_CB(netbuf)->is_aborted = true;
- ar_pci->msg_callbacks_current.tx_completion(ar,
- netbuf,
- id);
+ /*
+ * Indicate the completion to higer layer to free
+ * the buffer
+ */
+ ATH10K_SKB_CB(netbuf)->is_aborted = true;
+ ar_pci->msg_callbacks_current.tx_completion(ar,
+ netbuf,
+ id);
}
}
@@ -1232,7 +1292,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
int pipe_num;
for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
pipe_info = &ar_pci->pipe_info[pipe_num];
ath10k_pci_rx_pipe_cleanup(pipe_info);
@@ -1243,7 +1303,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
static void ath10k_pci_ce_deinit(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
int pipe_num;
for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
@@ -1293,8 +1353,10 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
void *resp, u32 *resp_len)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl;
- struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl;
+ struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
+ struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
+ struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
+ struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
dma_addr_t req_paddr = 0;
dma_addr_t resp_paddr = 0;
struct bmi_xfer xfer = {};
@@ -1378,13 +1440,16 @@ err_dma:
return ret;
}
-static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
- void *transfer_context,
- u32 data,
- unsigned int nbytes,
- unsigned int transfer_id)
+static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
{
- struct bmi_xfer *xfer = transfer_context;
+ struct bmi_xfer *xfer;
+ u32 ce_data;
+ unsigned int nbytes;
+ unsigned int transfer_id;
+
+ if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
+ &nbytes, &transfer_id))
+ return;
if (xfer->wait_for_resp)
return;
@@ -1392,14 +1457,17 @@ static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
complete(&xfer->done);
}
-static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
- void *transfer_context,
- u32 data,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags)
+static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
{
- struct bmi_xfer *xfer = transfer_context;
+ struct bmi_xfer *xfer;
+ u32 ce_data;
+ unsigned int nbytes;
+ unsigned int transfer_id;
+ unsigned int flags;
+
+ if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
+ &nbytes, &transfer_id, &flags))
+ return;
if (!xfer->wait_for_resp) {
ath10k_warn("unexpected: BMI data received; ignoring\n");
@@ -1679,7 +1747,7 @@ static int ath10k_pci_init_config(struct ath10k *ar)
static int ath10k_pci_ce_init(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
const struct ce_attr *attr;
int pipe_num;
@@ -1895,7 +1963,7 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
static void ath10k_pci_ce_tasklet(unsigned long ptr)
{
- struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr;
+ struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
struct ath10k_pci *ar_pci = pipe->ar_pci;
ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
@@ -2212,18 +2280,13 @@ static int ath10k_pci_reset_target(struct ath10k *ar)
static void ath10k_pci_device_reset(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- void __iomem *mem = ar_pci->mem;
int i;
u32 val;
if (!SOC_GLOBAL_RESET_ADDRESS)
return;
- if (!mem)
- return;
-
- ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
+ ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
PCIE_SOC_WAKE_V_MASK);
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
if (ath10k_pci_target_is_awake(ar))
@@ -2232,12 +2295,12 @@ static void ath10k_pci_device_reset(struct ath10k *ar)
}
/* Put Target, including PCIe, into RESET. */
- val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
+ val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
val |= 1;
- ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
+ ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
- if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
+ if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
RTC_STATE_COLD_RESET_MASK)
break;
msleep(1);
@@ -2245,16 +2308,16 @@ static void ath10k_pci_device_reset(struct ath10k *ar)
/* Pull Target, including PCIe, out of RESET. */
val &= ~1;
- ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
+ ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
- if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
+ if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
RTC_STATE_COLD_RESET_MASK))
break;
msleep(1);
}
- ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
+ ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
}
static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
@@ -2267,13 +2330,10 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
switch (i) {
case ATH10K_PCI_FEATURE_MSI_X:
- ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
- break;
- case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
- ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
+ ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
break;
case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
- ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
+ ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
break;
}
}
@@ -2286,7 +2346,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
int ret = 0;
struct ath10k *ar;
struct ath10k_pci *ar_pci;
- u32 lcr_val;
+ u32 lcr_val, chip_id;
ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
@@ -2298,15 +2358,12 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar_pci->dev = &pdev->dev;
switch (pci_dev->device) {
- case QCA988X_1_0_DEVICE_ID:
- set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features);
- break;
case QCA988X_2_0_DEVICE_ID:
set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
break;
default:
ret = -ENODEV;
- ath10k_err("Unkown device ID: %d\n", pci_dev->device);
+ ath10k_err("Unknown device ID: %d\n", pci_dev->device);
goto err_ar_pci;
}
@@ -2322,10 +2379,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_ar_pci;
}
- /* Enable QCA988X_1.0 HW workarounds */
- if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features))
- spin_lock_init(&ar_pci->hw_v1_workaround_lock);
-
ar_pci->ar = ar;
ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
atomic_set(&ar_pci->keep_awake_count, 0);
@@ -2395,9 +2448,20 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
spin_lock_init(&ar_pci->ce_lock);
- ar_pci->cacheline_sz = dma_get_cache_alignment();
+ ret = ath10k_do_pci_wake(ar);
+ if (ret) {
+ ath10k_err("Failed to get chip id: %d\n", ret);
+ return ret;
+ }
+
+ chip_id = ath10k_pci_read32(ar,
+ RTC_SOC_BASE_ADDRESS + SOC_CHIP_ID_ADDRESS);
+
+ ath10k_do_pci_sleep(ar);
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
- ret = ath10k_core_register(ar);
+ ret = ath10k_core_register(ar, chip_id);
if (ret) {
ath10k_err("could not register driver core (%d)\n", ret);
goto err_iomap;
@@ -2414,7 +2478,6 @@ err_region:
err_device:
pci_disable_device(pdev);
err_ar:
- pci_set_drvdata(pdev, NULL);
ath10k_core_destroy(ar);
err_ar_pci:
/* call HIF PCI free here */
@@ -2442,7 +2505,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
ath10k_core_unregister(ar);
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ar_pci->mem);
pci_release_region(pdev, BAR_NUM);
pci_clear_master(pdev);
@@ -2483,9 +2545,6 @@ module_exit(ath10k_pci_exit);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE);
-MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE);
-MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 871bb339d56d..52fb7b973571 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -43,22 +43,23 @@ struct bmi_xfer {
u32 resp_len;
};
+enum ath10k_pci_compl_state {
+ ATH10K_PCI_COMPL_FREE = 0,
+ ATH10K_PCI_COMPL_SEND,
+ ATH10K_PCI_COMPL_RECV,
+};
+
struct ath10k_pci_compl {
struct list_head list;
- int send_or_recv;
- struct ce_state *ce_state;
- struct hif_ce_pipe_info *pipe_info;
- void *transfer_context;
+ enum ath10k_pci_compl_state state;
+ struct ath10k_ce_pipe *ce_state;
+ struct ath10k_pci_pipe *pipe_info;
+ struct sk_buff *skb;
unsigned int nbytes;
unsigned int transfer_id;
unsigned int flags;
};
-/* compl_state.send_or_recv */
-#define HIF_CE_COMPLETE_FREE 0
-#define HIF_CE_COMPLETE_SEND 1
-#define HIF_CE_COMPLETE_RECV 2
-
/*
* PCI-specific Target state
*
@@ -152,17 +153,16 @@ struct service_to_pipe {
enum ath10k_pci_features {
ATH10K_PCI_FEATURE_MSI_X = 0,
- ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND = 1,
- ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 2,
+ ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 1,
/* keep last */
ATH10K_PCI_FEATURE_COUNT
};
/* Per-pipe state. */
-struct hif_ce_pipe_info {
+struct ath10k_pci_pipe {
/* Handle of underlying Copy Engine */
- struct ce_state *ce_hdl;
+ struct ath10k_ce_pipe *ce_hdl;
/* Our pipe number; facilitiates use of pipe_info ptrs. */
u8 pipe_num;
@@ -178,9 +178,6 @@ struct hif_ce_pipe_info {
/* List of free CE completion slots */
struct list_head compl_free;
- /* Limit the number of outstanding send requests. */
- int num_sends_allowed;
-
struct ath10k_pci *ar_pci;
struct tasklet_struct intr;
};
@@ -190,7 +187,6 @@ struct ath10k_pci {
struct device *dev;
struct ath10k *ar;
void __iomem *mem;
- int cacheline_sz;
DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT);
@@ -219,7 +215,7 @@ struct ath10k_pci {
bool compl_processing;
- struct hif_ce_pipe_info pipe_info[CE_COUNT_MAX];
+ struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
struct ath10k_hif_cb msg_callbacks_current;
@@ -227,16 +223,13 @@ struct ath10k_pci {
u32 fw_indicator_address;
/* Copy Engine used for Diagnostic Accesses */
- struct ce_state *ce_diag;
+ struct ath10k_ce_pipe *ce_diag;
/* FIXME: document what this really protects */
spinlock_t ce_lock;
/* Map CE id to ce_state */
- struct ce_state *ce_id_to_state[CE_COUNT_MAX];
-
- /* makes sure that dummy reads are atomic */
- spinlock_t hw_v1_workaround_lock;
+ struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
};
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
@@ -244,14 +237,18 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
return ar->hif.priv;
}
-static inline u32 ath10k_pci_reg_read32(void __iomem *mem, u32 addr)
+static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
{
- return ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + addr);
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
-static inline void ath10k_pci_reg_write32(void __iomem *mem, u32 addr, u32 val)
+static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
{
- iowrite32(val, mem + PCIE_LOCAL_BASE_ADDRESS + addr);
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
@@ -310,23 +307,8 @@ static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
u32 value)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- void __iomem *addr = ar_pci->mem;
-
- if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
- unsigned long irq_flags;
- spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags);
-
- ioread32(addr+offset+4); /* 3rd read prior to write */
- ioread32(addr+offset+4); /* 2nd read prior to write */
- ioread32(addr+offset+4); /* 1st read prior to write */
- iowrite32(value, addr+offset);
-
- spin_unlock_irqrestore(&ar_pci->hw_v1_workaround_lock,
- irq_flags);
- } else {
- iowrite32(value, addr+offset);
- }
+ iowrite32(value, ar_pci->mem + offset);
}
static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
@@ -336,15 +318,17 @@ static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
return ioread32(ar_pci->mem + offset);
}
-void ath10k_do_pci_wake(struct ath10k *ar);
+int ath10k_do_pci_wake(struct ath10k *ar);
void ath10k_do_pci_sleep(struct ath10k *ar);
-static inline void ath10k_pci_wake(struct ath10k *ar)
+static inline int ath10k_pci_wake(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
- ath10k_do_pci_wake(ar);
+ return ath10k_do_pci_wake(ar);
+
+ return 0;
}
static inline void ath10k_pci_sleep(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index bfec6c8f2ecb..1c584c4b019c 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -422,10 +422,30 @@ struct rx_mpdu_end {
#define RX_MSDU_START_INFO1_IP_FRAG (1 << 14)
#define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15)
+/* The decapped header (rx_hdr_status) contains the following:
+ * a) 802.11 header
+ * [padding to 4 bytes]
+ * b) HW crypto parameter
+ * - 0 bytes for no security
+ * - 4 bytes for WEP
+ * - 8 bytes for TKIP, AES
+ * [padding to 4 bytes]
+ * c) A-MSDU subframe header (14 bytes) if appliable
+ * d) LLC/SNAP (RFC1042, 8 bytes)
+ *
+ * In case of A-MSDU only first frame in sequence contains (a) and (b). */
enum rx_msdu_decap_format {
- RX_MSDU_DECAP_RAW = 0,
- RX_MSDU_DECAP_NATIVE_WIFI = 1,
+ RX_MSDU_DECAP_RAW = 0,
+
+ /* Note: QoS frames are reported as non-QoS. The rx_hdr_status in
+ * htt_rx_desc contains the original decapped 802.11 header. */
+ RX_MSDU_DECAP_NATIVE_WIFI = 1,
+
+ /* Payload contains an ethernet header (struct ethhdr). */
RX_MSDU_DECAP_ETHERNET2_DIX = 2,
+
+ /* Payload contains two 48-bit addresses and 2-byte length (14 bytes
+ * total), followed by an RFC1042 header (8 bytes). */
RX_MSDU_DECAP_8023_SNAP_LLC = 3
};
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 85e806bf7257..90817ddc92ba 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -111,26 +111,29 @@ TRACE_EVENT(ath10k_log_dbg_dump,
);
TRACE_EVENT(ath10k_wmi_cmd,
- TP_PROTO(int id, void *buf, size_t buf_len),
+ TP_PROTO(int id, void *buf, size_t buf_len, int ret),
- TP_ARGS(id, buf, buf_len),
+ TP_ARGS(id, buf, buf_len, ret),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
+ __field(int, ret)
),
TP_fast_assign(
__entry->id = id;
__entry->buf_len = buf_len;
+ __entry->ret = ret;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
- "id %d len %zu",
+ "id %d len %zu ret %d",
__entry->id,
- __entry->buf_len
+ __entry->buf_len,
+ __entry->ret
)
);
@@ -158,6 +161,27 @@ TRACE_EVENT(ath10k_wmi_event,
)
);
+TRACE_EVENT(ath10k_htt_stats,
+ TP_PROTO(void *buf, size_t buf_len),
+
+ TP_ARGS(buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "len %zu",
+ __entry->buf_len
+ )
+);
+
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 68b6faefd1d8..5ae373a1e294 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -44,40 +44,39 @@ out:
spin_unlock_bh(&ar->data_lock);
}
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done)
{
struct device *dev = htt->ar->dev;
struct ieee80211_tx_info *info;
- struct sk_buff *txfrag = ATH10K_SKB_CB(txdesc)->htt.txfrag;
- struct sk_buff *msdu = ATH10K_SKB_CB(txdesc)->htt.msdu;
+ struct ath10k_skb_cb *skb_cb;
+ struct sk_buff *msdu;
int ret;
- if (ATH10K_SKB_CB(txdesc)->htt.refcount == 0)
- return;
-
- ATH10K_SKB_CB(txdesc)->htt.refcount--;
+ ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
+ tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
- if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
+ if (tx_done->msdu_id >= htt->max_num_pending_tx) {
+ ath10k_warn("warning: msdu_id %d too big, ignoring\n",
+ tx_done->msdu_id);
return;
-
- if (txfrag) {
- ret = ath10k_skb_unmap(dev, txfrag);
- if (ret)
- ath10k_warn("txfrag unmap failed (%d)\n", ret);
-
- dev_kfree_skb_any(txfrag);
}
+ msdu = htt->pending_tx[tx_done->msdu_id];
+ skb_cb = ATH10K_SKB_CB(msdu);
+
ret = ath10k_skb_unmap(dev, msdu);
if (ret)
ath10k_warn("data skb unmap failed (%d)\n", ret);
+ if (skb_cb->htt.frag_len)
+ skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
+
ath10k_report_offchan_tx(htt->ar, msdu);
info = IEEE80211_SKB_CB(msdu);
- memset(&info->status, 0, sizeof(info->status));
- if (ATH10K_SKB_CB(txdesc)->htt.discard) {
+ if (tx_done->discard) {
ieee80211_free_txskb(htt->ar->hw, msdu);
goto exit;
}
@@ -85,7 +84,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
- if (ATH10K_SKB_CB(txdesc)->htt.no_ack)
+ if (tx_done->no_ack)
info->flags &= ~IEEE80211_TX_STAT_ACK;
ieee80211_tx_status(htt->ar->hw, msdu);
@@ -93,36 +92,12 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
exit:
spin_lock_bh(&htt->tx_lock);
- htt->pending_tx[ATH10K_SKB_CB(txdesc)->htt.msdu_id] = NULL;
- ath10k_htt_tx_free_msdu_id(htt, ATH10K_SKB_CB(txdesc)->htt.msdu_id);
+ htt->pending_tx[tx_done->msdu_id] = NULL;
+ ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
__ath10k_htt_tx_dec_pending(htt);
- if (bitmap_empty(htt->used_msdu_ids, htt->max_num_pending_tx))
+ if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
-
- dev_kfree_skb_any(txdesc);
-}
-
-void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
- const struct htt_tx_done *tx_done)
-{
- struct sk_buff *txdesc;
-
- ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
- tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
-
- if (tx_done->msdu_id >= htt->max_num_pending_tx) {
- ath10k_warn("warning: msdu_id %d too big, ignoring\n",
- tx_done->msdu_id);
- return;
- }
-
- txdesc = htt->pending_tx[tx_done->msdu_id];
-
- ATH10K_SKB_CB(txdesc)->htt.discard = tx_done->discard;
- ATH10K_SKB_CB(txdesc)->htt.no_ack = tx_done->no_ack;
-
- ath10k_txrx_tx_unref(htt, txdesc);
}
static const u8 rx_legacy_rate_idx[] = {
@@ -293,6 +268,8 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
status->vht_nss,
status->freq,
status->band);
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
+ info->skb->data, info->skb->len);
ieee80211_rx(ar->hw, info->skb);
}
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
index e78632a76df7..356dc9c04c9e 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.h
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -19,9 +19,8 @@
#include "htt.h"
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc);
-void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
- const struct htt_tx_done *tx_done);
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done);
void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 55f90c761868..ccf3597fd9e2 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -23,29 +23,470 @@
#include "wmi.h"
#include "mac.h"
-void ath10k_wmi_flush_tx(struct ath10k *ar)
-{
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- if (ar->state == ATH10K_STATE_WEDGED) {
- ath10k_warn("wmi flush skipped - device is wedged anyway\n");
- return;
- }
-
- ret = wait_event_timeout(ar->wmi.wq,
- atomic_read(&ar->wmi.pending_tx_count) == 0,
- 5*HZ);
- if (atomic_read(&ar->wmi.pending_tx_count) == 0)
- return;
-
- if (ret == 0)
- ret = -ETIMEDOUT;
-
- if (ret < 0)
- ath10k_warn("wmi flush failed (%d)\n", ret);
-}
+/* MAIN WMI cmd track */
+static struct wmi_cmd_map wmi_cmd_map = {
+ .init_cmdid = WMI_INIT_CMDID,
+ .start_scan_cmdid = WMI_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
+ .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
+ .bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
+ .addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+ .ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+ .peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
+ .network_list_offload_config_cmdid =
+ WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
+ .gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
+ .csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
+ .csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ .chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
+ .peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
+ .peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
+ .sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
+ .sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+ .sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
+ .echo_cmdid = WMI_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
+ .vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
+ .vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
+ .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
+ .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
+};
+
+/* 10.X WMI cmd track */
+static struct wmi_cmd_map wmi_10x_cmd_map = {
+ .init_cmdid = WMI_10X_INIT_CMDID,
+ .start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold =
+ WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
+ .ap_ps_peer_param_cmdid = WMI_CMD_UNSUPPORTED,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid =
+ WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid =
+ WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
+ .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+ .echo_cmdid = WMI_10X_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
+ .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
+};
+
+/* MAIN WMI VDEV param map */
+static struct wmi_vdev_param_map wmi_vdev_param_map = {
+ .rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_VDEV_PARAM_WDS,
+ .atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+ .bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+ .feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_VDEV_PARAM_SGI,
+ .ldpc = WMI_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_VDEV_PARAM_TXBF,
+ .packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
+ .drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
+ .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+/* 10.X WMI VDEV param map */
+static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
+ .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_10X_VDEV_PARAM_WDS,
+ .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+ .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_10X_VDEV_PARAM_SGI,
+ .ldpc = WMI_10X_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_10X_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+ .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
+ .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
+ .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
+ .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+};
+
+static struct wmi_pdev_param_map wmi_pdev_param_map = {
+ .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ .pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ .pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ .arpdhcp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .dcs = WMI_PDEV_PARAM_DCS,
+ .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
+ .idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ .power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+ .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
+ .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
+ .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period =
+ WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arpdhcp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+ .dcs = WMI_10X_PDEV_PARAM_DCS,
+ .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
+ .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
+ .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
+ .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+ .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
+ .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
+};
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
@@ -85,18 +526,14 @@ static struct sk_buff *ath10k_wmi_alloc_skb(u32 len)
static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
dev_kfree_skb(skb);
-
- if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0)
- wake_up(&ar->wmi.wq);
}
-/* WMI command API */
-static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
- enum wmi_cmd_id cmd_id)
+static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
+ u32 cmd_id)
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct wmi_cmd_hdr *cmd_hdr;
- int status;
+ int ret;
u32 cmd = 0;
if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
@@ -107,25 +544,146 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
cmd_hdr->cmd_id = __cpu_to_le32(cmd);
- if (atomic_add_return(1, &ar->wmi.pending_tx_count) >
- WMI_MAX_PENDING_TX_COUNT) {
- /* avoid using up memory when FW hangs */
- atomic_dec(&ar->wmi.pending_tx_count);
- return -EBUSY;
+ memset(skb_cb, 0, sizeof(*skb_cb));
+ ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
+ trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len, ret);
+
+ if (ret)
+ goto err_pull;
+
+ return 0;
+
+err_pull:
+ skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+ return ret;
+}
+
+static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
+{
+ struct wmi_bcn_tx_arg arg = {0};
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->data_lock);
+
+ if (arvif->beacon == NULL)
+ return;
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.tx_rate = 0;
+ arg.tx_power = 0;
+ arg.bcn = arvif->beacon->data;
+ arg.bcn_len = arvif->beacon->len;
+
+ ret = ath10k_wmi_beacon_send_nowait(arvif->ar, &arg);
+ if (ret)
+ return;
+
+ dev_kfree_skb_any(arvif->beacon);
+ arvif->beacon = NULL;
+}
+
+static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ ath10k_wmi_tx_beacon_nowait(arvif);
+}
+
+static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_wmi_tx_beacons_iter,
+ NULL);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
+{
+ /* try to send pending beacons first. they take priority */
+ ath10k_wmi_tx_beacons_nowait(ar);
+
+ wake_up(&ar->wmi.tx_credits_wq);
+}
+
+static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
+ u32 cmd_id)
+{
+ int ret = -EOPNOTSUPP;
+
+ might_sleep();
+
+ if (cmd_id == WMI_CMD_UNSUPPORTED) {
+ ath10k_warn("wmi command %d is not supported by firmware\n",
+ cmd_id);
+ return ret;
}
- memset(skb_cb, 0, sizeof(*skb_cb));
+ wait_event_timeout(ar->wmi.tx_credits_wq, ({
+ /* try to send pending beacons first. they take priority */
+ ath10k_wmi_tx_beacons_nowait(ar);
- trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len);
+ ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
+ (ret != -EAGAIN);
+ }), 3*HZ);
- status = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
- if (status) {
+ if (ret)
dev_kfree_skb_any(skb);
- atomic_dec(&ar->wmi.pending_tx_count);
- return status;
+
+ return ret;
+}
+
+int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
+{
+ int ret = 0;
+ struct wmi_mgmt_tx_cmd *cmd;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *wmi_skb;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int len;
+ u16 fc;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
+ return -EINVAL;
+
+ len = sizeof(cmd->hdr) + skb->len;
+ len = round_up(len, 4);
+
+ wmi_skb = ath10k_wmi_alloc_skb(len);
+ if (!wmi_skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mgmt_tx_cmd *)wmi_skb->data;
+
+ cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id);
+ cmd->hdr.tx_rate = 0;
+ cmd->hdr.tx_power = 0;
+ cmd->hdr.buf_len = __cpu_to_le32((u32)(skb->len));
+
+ memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN);
+ memcpy(cmd->buf, skb->data, skb->len);
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
+ wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE,
+ fc & IEEE80211_FCTL_STYPE);
+
+ /* Send the management frame buffer to the target */
+ ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
}
- return 0;
+ /* TODO: report tx status to mac80211 - temporary just ACK */
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ ieee80211_tx_status_irqsafe(ar->hw, skb);
+
+ return ret;
}
static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
@@ -315,7 +873,9 @@ static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
{
- struct wmi_mgmt_rx_event *event = (struct wmi_mgmt_rx_event *)skb->data;
+ struct wmi_mgmt_rx_event_v1 *ev_v1;
+ struct wmi_mgmt_rx_event_v2 *ev_v2;
+ struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr;
u32 rx_status;
@@ -325,13 +885,24 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
u32 rate;
u32 buf_len;
u16 fc;
+ int pull_len;
+
+ if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
+ ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
+ ev_hdr = &ev_v2->hdr.v1;
+ pull_len = sizeof(*ev_v2);
+ } else {
+ ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
+ ev_hdr = &ev_v1->hdr;
+ pull_len = sizeof(*ev_v1);
+ }
- channel = __le32_to_cpu(event->hdr.channel);
- buf_len = __le32_to_cpu(event->hdr.buf_len);
- rx_status = __le32_to_cpu(event->hdr.status);
- snr = __le32_to_cpu(event->hdr.snr);
- phy_mode = __le32_to_cpu(event->hdr.phy_mode);
- rate = __le32_to_cpu(event->hdr.rate);
+ channel = __le32_to_cpu(ev_hdr->channel);
+ buf_len = __le32_to_cpu(ev_hdr->buf_len);
+ rx_status = __le32_to_cpu(ev_hdr->status);
+ snr = __le32_to_cpu(ev_hdr->snr);
+ phy_mode = __le32_to_cpu(ev_hdr->phy_mode);
+ rate = __le32_to_cpu(ev_hdr->rate);
memset(status, 0, sizeof(*status));
@@ -358,7 +929,7 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
status->rate_idx = get_rate_idx(rate, status->band);
- skb_pull(skb, sizeof(event->hdr));
+ skb_pull(skb, pull_len);
hdr = (struct ieee80211_hdr *)skb->data;
fc = le16_to_cpu(hdr->frame_control);
@@ -734,10 +1305,8 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
int i = -1;
struct wmi_bcn_info *bcn_info;
struct ath10k_vif *arvif;
- struct wmi_bcn_tx_arg arg;
struct sk_buff *bcn;
int vdev_id = 0;
- int ret;
ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
@@ -794,17 +1363,17 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
- arg.vdev_id = arvif->vdev_id;
- arg.tx_rate = 0;
- arg.tx_power = 0;
- arg.bcn = bcn->data;
- arg.bcn_len = bcn->len;
+ spin_lock_bh(&ar->data_lock);
+ if (arvif->beacon) {
+ ath10k_warn("SWBA overrun on vdev %d\n",
+ arvif->vdev_id);
+ dev_kfree_skb_any(arvif->beacon);
+ }
- ret = ath10k_wmi_beacon_send(ar, &arg);
- if (ret)
- ath10k_warn("could not send beacon (%d)\n", ret);
+ arvif->beacon = bcn;
- dev_kfree_skb_any(bcn);
+ ath10k_wmi_tx_beacon_nowait(arvif);
+ spin_unlock_bh(&ar->data_lock);
}
}
@@ -919,6 +1488,55 @@ static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
}
+static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
+}
+
+static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
+}
+
+static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
+}
+
+static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
+ u32 num_units, u32 unit_len)
+{
+ dma_addr_t paddr;
+ u32 pool_size;
+ int idx = ar->wmi.num_mem_chunks;
+
+ pool_size = num_units * round_up(unit_len, 4);
+
+ if (!pool_size)
+ return -EINVAL;
+
+ ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
+ pool_size,
+ &paddr,
+ GFP_ATOMIC);
+ if (!ar->wmi.mem_chunks[idx].vaddr) {
+ ath10k_warn("failed to allocate memory chunk\n");
+ return -ENOMEM;
+ }
+
+ memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size);
+
+ ar->wmi.mem_chunks[idx].paddr = paddr;
+ ar->wmi.mem_chunks[idx].len = pool_size;
+ ar->wmi.mem_chunks[idx].req_id = req_id;
+ ar->wmi.num_mem_chunks++;
+
+ return 0;
+}
+
static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
struct sk_buff *skb)
{
@@ -943,6 +1561,10 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
ar->phy_capability = __le32_to_cpu(ev->phy_capability);
ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
+ /* only manually set fw features when not using FW IE format */
+ if (ar->fw_api == 1 && ar->fw_version_build > 636)
+ set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
+
if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
@@ -987,6 +1609,108 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
complete(&ar->wmi.service_ready);
}
+static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
+ int ret;
+ struct wmi_service_ready_event_10x *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev)) {
+ ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
+ skb->len, sizeof(*ev));
+ return;
+ }
+
+ ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
+ ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
+ ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
+ ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
+ ar->fw_version_major =
+ (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
+ ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
+ ar->phy_capability = __le32_to_cpu(ev->phy_capability);
+ ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
+
+ if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
+ ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
+ ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
+ ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
+ }
+
+ ar->ath_common.regulatory.current_rd =
+ __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
+
+ ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap,
+ sizeof(ev->wmi_service_bitmap));
+
+ if (strlen(ar->hw->wiphy->fw_version) == 0) {
+ snprintf(ar->hw->wiphy->fw_version,
+ sizeof(ar->hw->wiphy->fw_version),
+ "%u.%u",
+ ar->fw_version_major,
+ ar->fw_version_minor);
+ }
+
+ num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs);
+
+ if (num_mem_reqs > ATH10K_MAX_MEM_REQS) {
+ ath10k_warn("requested memory chunks number (%d) exceeds the limit\n",
+ num_mem_reqs);
+ return;
+ }
+
+ if (!num_mem_reqs)
+ goto exit;
+
+ ath10k_dbg(ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n",
+ num_mem_reqs);
+
+ for (i = 0; i < num_mem_reqs; ++i) {
+ req_id = __le32_to_cpu(ev->mem_reqs[i].req_id);
+ num_units = __le32_to_cpu(ev->mem_reqs[i].num_units);
+ unit_size = __le32_to_cpu(ev->mem_reqs[i].unit_size);
+ num_unit_info = __le32_to_cpu(ev->mem_reqs[i].num_unit_info);
+
+ if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
+ /* number of units to allocate is number of
+ * peers, 1 extra for self peer on target */
+ /* this needs to be tied, host and target
+ * can get out of sync */
+ num_units = TARGET_10X_NUM_PEERS + 1;
+ else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
+ num_units = TARGET_10X_NUM_VDEVS + 1;
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
+ req_id,
+ __le32_to_cpu(ev->mem_reqs[i].num_units),
+ num_unit_info,
+ unit_size,
+ num_units);
+
+ ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
+ unit_size);
+ if (ret)
+ return;
+ }
+
+exit:
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
+ __le32_to_cpu(ev->sw_version),
+ __le32_to_cpu(ev->abi_version),
+ __le32_to_cpu(ev->phy_capability),
+ __le32_to_cpu(ev->ht_cap_info),
+ __le32_to_cpu(ev->vht_cap_info),
+ __le32_to_cpu(ev->vht_supp_mcs),
+ __le32_to_cpu(ev->sys_cap_info),
+ __le32_to_cpu(ev->num_mem_reqs),
+ __le32_to_cpu(ev->num_rf_chains));
+
+ complete(&ar->wmi.service_ready);
+}
+
static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
@@ -1007,7 +1731,7 @@ static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
-static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
+static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_event_id id;
@@ -1126,64 +1850,158 @@ static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
dev_kfree_skb(skb);
}
-static void ath10k_wmi_event_work(struct work_struct *work)
+static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
- struct ath10k *ar = container_of(work, struct ath10k,
- wmi.wmi_event_work);
- struct sk_buff *skb;
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_10x_event_id id;
+ u16 len;
- for (;;) {
- skb = skb_dequeue(&ar->wmi.wmi_event_list);
- if (!skb)
- break;
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
- ath10k_wmi_event_process(ar, skb);
- }
-}
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ return;
-static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
-{
- struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
- enum wmi_event_id event_id;
+ len = skb->len;
- event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+ trace_ath10k_wmi_event(id, skb->data, skb->len);
- /* some events require to be handled ASAP
- * thus can't be defered to a worker thread */
- switch (event_id) {
- case WMI_HOST_SWBA_EVENTID:
- case WMI_MGMT_RX_EVENTID:
- ath10k_wmi_event_process(ar, skb);
+ switch (id) {
+ case WMI_10X_MGMT_RX_EVENTID:
+ ath10k_wmi_event_mgmt_rx(ar, skb);
+ /* mgmt_rx() owns the skb now! */
return;
+ case WMI_10X_SCAN_EVENTID:
+ ath10k_wmi_event_scan(ar, skb);
+ break;
+ case WMI_10X_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_chan_info(ar, skb);
+ break;
+ case WMI_10X_ECHO_EVENTID:
+ ath10k_wmi_event_echo(ar, skb);
+ break;
+ case WMI_10X_DEBUG_MESG_EVENTID:
+ ath10k_wmi_event_debug_mesg(ar, skb);
+ break;
+ case WMI_10X_UPDATE_STATS_EVENTID:
+ ath10k_wmi_event_update_stats(ar, skb);
+ break;
+ case WMI_10X_VDEV_START_RESP_EVENTID:
+ ath10k_wmi_event_vdev_start_resp(ar, skb);
+ break;
+ case WMI_10X_VDEV_STOPPED_EVENTID:
+ ath10k_wmi_event_vdev_stopped(ar, skb);
+ break;
+ case WMI_10X_PEER_STA_KICKOUT_EVENTID:
+ ath10k_wmi_event_peer_sta_kickout(ar, skb);
+ break;
+ case WMI_10X_HOST_SWBA_EVENTID:
+ ath10k_wmi_event_host_swba(ar, skb);
+ break;
+ case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
+ ath10k_wmi_event_tbttoffset_update(ar, skb);
+ break;
+ case WMI_10X_PHYERR_EVENTID:
+ ath10k_wmi_event_phyerr(ar, skb);
+ break;
+ case WMI_10X_ROAM_EVENTID:
+ ath10k_wmi_event_roam(ar, skb);
+ break;
+ case WMI_10X_PROFILE_MATCH:
+ ath10k_wmi_event_profile_match(ar, skb);
+ break;
+ case WMI_10X_DEBUG_PRINT_EVENTID:
+ ath10k_wmi_event_debug_print(ar, skb);
+ break;
+ case WMI_10X_PDEV_QVIT_EVENTID:
+ ath10k_wmi_event_pdev_qvit(ar, skb);
+ break;
+ case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
+ ath10k_wmi_event_wlan_profile_data(ar, skb);
+ break;
+ case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_measurement_report(ar, skb);
+ break;
+ case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_tsf_measurement_report(ar, skb);
+ break;
+ case WMI_10X_RTT_ERROR_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_error_report(ar, skb);
+ break;
+ case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
+ ath10k_wmi_event_wow_wakeup_host(ar, skb);
+ break;
+ case WMI_10X_DCS_INTERFERENCE_EVENTID:
+ ath10k_wmi_event_dcs_interference(ar, skb);
+ break;
+ case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
+ ath10k_wmi_event_pdev_tpc_config(ar, skb);
+ break;
+ case WMI_10X_INST_RSSI_STATS_EVENTID:
+ ath10k_wmi_event_inst_rssi_stats(ar, skb);
+ break;
+ case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
+ ath10k_wmi_event_vdev_standby_req(ar, skb);
+ break;
+ case WMI_10X_VDEV_RESUME_REQ_EVENTID:
+ ath10k_wmi_event_vdev_resume_req(ar, skb);
+ break;
+ case WMI_10X_SERVICE_READY_EVENTID:
+ ath10k_wmi_10x_service_ready_event_rx(ar, skb);
+ break;
+ case WMI_10X_READY_EVENTID:
+ ath10k_wmi_ready_event_rx(ar, skb);
+ break;
default:
+ ath10k_warn("Unknown eventid: %d\n", id);
break;
}
- skb_queue_tail(&ar->wmi.wmi_event_list, skb);
- queue_work(ar->workqueue, &ar->wmi.wmi_event_work);
+ dev_kfree_skb(skb);
+}
+
+
+static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ ath10k_wmi_10x_process_rx(ar, skb);
+ else
+ ath10k_wmi_main_process_rx(ar, skb);
}
/* WMI Initialization functions */
int ath10k_wmi_attach(struct ath10k *ar)
{
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ ar->wmi.cmd = &wmi_10x_cmd_map;
+ ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+ } else {
+ ar->wmi.cmd = &wmi_cmd_map;
+ ar->wmi.vdev_param = &wmi_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_pdev_param_map;
+ }
+
init_completion(&ar->wmi.service_ready);
init_completion(&ar->wmi.unified_ready);
- init_waitqueue_head(&ar->wmi.wq);
-
- skb_queue_head_init(&ar->wmi.wmi_event_list);
- INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work);
+ init_waitqueue_head(&ar->wmi.tx_credits_wq);
return 0;
}
void ath10k_wmi_detach(struct ath10k *ar)
{
- /* HTC should've drained the packets already */
- if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0))
- ath10k_warn("there are still pending packets\n");
+ int i;
+
+ /* free the host memory chunks requested by firmware */
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ dma_free_coherent(ar->dev,
+ ar->wmi.mem_chunks[i].len,
+ ar->wmi.mem_chunks[i].vaddr,
+ ar->wmi.mem_chunks[i].paddr);
+ }
- cancel_work_sync(&ar->wmi.wmi_event_work);
- skb_queue_purge(&ar->wmi.wmi_event_list);
+ ar->wmi.num_mem_chunks = 0;
}
int ath10k_wmi_connect_htc_service(struct ath10k *ar)
@@ -1198,6 +2016,7 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
/* these fields are the same for all service endpoints */
conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
+ conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
@@ -1234,7 +2053,8 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
"wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
rd, rd2g, rd5g, ctl2g, ctl5g);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_regdomain_cmdid);
}
int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
@@ -1264,7 +2084,8 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
"wmi set channel mode %d freq %d\n",
arg->mode, arg->freq);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_CHANNEL_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_channel_cmdid);
}
int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
@@ -1279,7 +2100,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
cmd->suspend_opt = WMI_PDEV_SUSPEND;
- return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SUSPEND_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
}
int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
@@ -1290,15 +2111,19 @@ int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
if (skb == NULL)
return -ENOMEM;
- return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_RESUME_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
}
-int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
- u32 value)
+int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
{
struct wmi_pdev_set_param_cmd *cmd;
struct sk_buff *skb;
+ if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
+ ath10k_warn("pdev param %d not supported by firmware\n", id);
+ return -EOPNOTSUPP;
+ }
+
skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
if (!skb)
return -ENOMEM;
@@ -1309,15 +2134,16 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
id, value);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_PARAM_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
}
-int ath10k_wmi_cmd_init(struct ath10k *ar)
+static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
{
struct wmi_init_cmd *cmd;
struct sk_buff *buf;
struct wmi_resource_config config = {};
- u32 val;
+ u32 len, val;
+ int i;
config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
@@ -1370,23 +2196,158 @@ int ath10k_wmi_cmd_init(struct ath10k *ar)
config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
- buf = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ len = sizeof(*cmd) +
+ (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+ buf = ath10k_wmi_alloc_skb(len);
if (!buf)
return -ENOMEM;
cmd = (struct wmi_init_cmd *)buf->data;
- cmd->num_host_mem_chunks = 0;
+
+ if (ar->wmi.num_mem_chunks == 0) {
+ cmd->num_host_mem_chunks = 0;
+ goto out;
+ }
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
+ __cpu_to_le32(ar->wmi.num_mem_chunks));
+
+ cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
+
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ cmd->host_mem_chunks[i].ptr =
+ __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
+ cmd->host_mem_chunks[i].size =
+ __cpu_to_le32(ar->wmi.mem_chunks[i].len);
+ cmd->host_mem_chunks[i].req_id =
+ __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi chunk %d len %d requested, addr 0x%x\n",
+ i,
+ cmd->host_mem_chunks[i].size,
+ cmd->host_mem_chunks[i].ptr);
+ }
+out:
memcpy(&cmd->resource_config, &config, sizeof(config));
ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n");
- return ath10k_wmi_cmd_send(ar, buf, WMI_INIT_CMDID);
+ return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
}
-static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg)
+static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
+{
+ struct wmi_init_cmd_10x *cmd;
+ struct sk_buff *buf;
+ struct wmi_resource_config_10x config = {};
+ u32 len, val;
+ int i;
+
+ config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
+ config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
+ config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
+ config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
+ config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
+ config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
+ config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
+ config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
+ config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
+
+ config.scan_max_pending_reqs =
+ __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
+
+ config.bmiss_offload_max_vdev =
+ __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_vdev =
+ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_ap_profiles =
+ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
+
+ config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
+ config.num_mcast_table_elems =
+ __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
+
+ config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
+ config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
+ config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
+ config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
+ config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
+
+ val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
+
+ config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
+
+ config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
+ config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
+
+ len = sizeof(*cmd) +
+ (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+ buf = ath10k_wmi_alloc_skb(len);
+ if (!buf)
+ return -ENOMEM;
+
+ cmd = (struct wmi_init_cmd_10x *)buf->data;
+
+ if (ar->wmi.num_mem_chunks == 0) {
+ cmd->num_host_mem_chunks = 0;
+ goto out;
+ }
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
+ __cpu_to_le32(ar->wmi.num_mem_chunks));
+
+ cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
+
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ cmd->host_mem_chunks[i].ptr =
+ __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
+ cmd->host_mem_chunks[i].size =
+ __cpu_to_le32(ar->wmi.mem_chunks[i].len);
+ cmd->host_mem_chunks[i].req_id =
+ __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi chunk %d len %d requested, addr 0x%x\n",
+ i,
+ cmd->host_mem_chunks[i].size,
+ cmd->host_mem_chunks[i].ptr);
+ }
+out:
+ memcpy(&cmd->resource_config, &config, sizeof(config));
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi init 10x\n");
+ return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
+}
+
+int ath10k_wmi_cmd_init(struct ath10k *ar)
+{
+ int ret;
+
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ ret = ath10k_wmi_10x_cmd_init(ar);
+ else
+ ret = ath10k_wmi_main_cmd_init(ar);
+
+ return ret;
+}
+
+static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
{
int len;
- len = sizeof(struct wmi_start_scan_cmd);
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ len = sizeof(struct wmi_start_scan_cmd_10x);
+ else
+ len = sizeof(struct wmi_start_scan_cmd);
if (arg->ie_len) {
if (!arg->ie)
@@ -1446,7 +2407,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
int len = 0;
int i;
- len = ath10k_wmi_start_scan_calc_len(arg);
+ len = ath10k_wmi_start_scan_calc_len(ar, arg);
if (len < 0)
return len; /* len contains error code here */
@@ -1478,7 +2439,14 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
/* TLV list starts after fields included in the struct */
- off = sizeof(*cmd);
+ /* There's just one filed that differes the two start_scan
+ * structures - burst_duration, which we are not using btw,
+ no point to make the split here, just shift the buffer to fit with
+ given FW */
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ off = sizeof(struct wmi_start_scan_cmd_10x);
+ else
+ off = sizeof(struct wmi_start_scan_cmd);
if (arg->n_channels) {
channels = (void *)skb->data + off;
@@ -1540,7 +2508,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
}
ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n");
- return ath10k_wmi_cmd_send(ar, skb, WMI_START_SCAN_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
}
void ath10k_wmi_start_scan_init(struct ath10k *ar,
@@ -1556,7 +2524,7 @@ void ath10k_wmi_start_scan_init(struct ath10k *ar,
arg->repeat_probe_time = 0;
arg->probe_spacing_time = 0;
arg->idle_time = 0;
- arg->max_scan_time = 5000;
+ arg->max_scan_time = 20000;
arg->probe_delay = 5;
arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
| WMI_SCAN_EVENT_COMPLETED
@@ -1600,7 +2568,7 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
ath10k_dbg(ATH10K_DBG_WMI,
"wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
arg->req_id, arg->req_type, arg->u.scan_id);
- return ath10k_wmi_cmd_send(ar, skb, WMI_STOP_SCAN_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
}
int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
@@ -1625,7 +2593,7 @@ int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
"WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
vdev_id, type, subtype, macaddr);
- return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_CREATE_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
}
int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
@@ -1643,20 +2611,20 @@ int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
ath10k_dbg(ATH10K_DBG_WMI,
"WMI vdev delete id %d\n", vdev_id);
- return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DELETE_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
}
static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
const struct wmi_vdev_start_request_arg *arg,
- enum wmi_cmd_id cmd_id)
+ u32 cmd_id)
{
struct wmi_vdev_start_request_cmd *cmd;
struct sk_buff *skb;
const char *cmdname;
u32 flags = 0;
- if (cmd_id != WMI_VDEV_START_REQUEST_CMDID &&
- cmd_id != WMI_VDEV_RESTART_REQUEST_CMDID)
+ if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid &&
+ cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)
return -EINVAL;
if (WARN_ON(arg->ssid && arg->ssid_len == 0))
return -EINVAL;
@@ -1665,9 +2633,9 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
return -EINVAL;
- if (cmd_id == WMI_VDEV_START_REQUEST_CMDID)
+ if (cmd_id == ar->wmi.cmd->vdev_start_request_cmdid)
cmdname = "start";
- else if (cmd_id == WMI_VDEV_RESTART_REQUEST_CMDID)
+ else if (cmd_id == ar->wmi.cmd->vdev_restart_request_cmdid)
cmdname = "restart";
else
return -EINVAL; /* should not happen, we already check cmd_id */
@@ -1718,15 +2686,17 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
int ath10k_wmi_vdev_start(struct ath10k *ar,
const struct wmi_vdev_start_request_arg *arg)
{
- return ath10k_wmi_vdev_start_restart(ar, arg,
- WMI_VDEV_START_REQUEST_CMDID);
+ u32 cmd_id = ar->wmi.cmd->vdev_start_request_cmdid;
+
+ return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
}
int ath10k_wmi_vdev_restart(struct ath10k *ar,
const struct wmi_vdev_start_request_arg *arg)
{
- return ath10k_wmi_vdev_start_restart(ar, arg,
- WMI_VDEV_RESTART_REQUEST_CMDID);
+ u32 cmd_id = ar->wmi.cmd->vdev_restart_request_cmdid;
+
+ return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
}
int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
@@ -1743,7 +2713,7 @@ int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
- return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_STOP_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
}
int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
@@ -1758,13 +2728,13 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
cmd = (struct wmi_vdev_up_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->vdev_assoc_id = __cpu_to_le32(aid);
- memcpy(&cmd->vdev_bssid.addr, bssid, 6);
+ memcpy(&cmd->vdev_bssid.addr, bssid, ETH_ALEN);
ath10k_dbg(ATH10K_DBG_WMI,
"wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
vdev_id, aid, bssid);
- return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_UP_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
}
int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
@@ -1782,15 +2752,22 @@ int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
ath10k_dbg(ATH10K_DBG_WMI,
"wmi mgmt vdev down id 0x%x\n", vdev_id);
- return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DOWN_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
}
int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
- enum wmi_vdev_param param_id, u32 param_value)
+ u32 param_id, u32 param_value)
{
struct wmi_vdev_set_param_cmd *cmd;
struct sk_buff *skb;
+ if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "vdev param %d not supported by firmware\n",
+ param_id);
+ return -EOPNOTSUPP;
+ }
+
skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
if (!skb)
return -ENOMEM;
@@ -1804,7 +2781,7 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
"wmi vdev id 0x%x set param %d value %d\n",
vdev_id, param_id, param_value);
- return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_SET_PARAM_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
}
int ath10k_wmi_vdev_install_key(struct ath10k *ar,
@@ -1839,7 +2816,8 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi vdev install key idx %d cipher %d len %d\n",
arg->key_idx, arg->key_cipher, arg->key_len);
- return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->vdev_install_key_cmdid);
}
int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
@@ -1859,7 +2837,7 @@ int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi peer create vdev_id %d peer_addr %pM\n",
vdev_id, peer_addr);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_CREATE_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
}
int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
@@ -1879,7 +2857,7 @@ int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi peer delete vdev_id %d peer_addr %pM\n",
vdev_id, peer_addr);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_DELETE_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
}
int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
@@ -1900,7 +2878,7 @@ int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
vdev_id, peer_addr, tid_bitmap);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_FLUSH_TIDS_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
}
int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
@@ -1918,13 +2896,13 @@ int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->param_id = __cpu_to_le32(param_id);
cmd->param_value = __cpu_to_le32(param_value);
- memcpy(&cmd->peer_macaddr.addr, peer_addr, 6);
+ memcpy(&cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
ath10k_dbg(ATH10K_DBG_WMI,
"wmi vdev %d peer 0x%pM set param %d value %d\n",
vdev_id, peer_addr, param_id, param_value);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_SET_PARAM_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
}
int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
@@ -1945,7 +2923,8 @@ int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
"wmi set powersave id 0x%x mode %d\n",
vdev_id, psmode);
- return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_MODE_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->sta_powersave_mode_cmdid);
}
int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
@@ -1967,7 +2946,8 @@ int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi sta ps param vdev_id 0x%x param %d value %d\n",
vdev_id, param_id, value);
- return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->sta_powersave_param_cmdid);
}
int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
@@ -1993,7 +2973,8 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
"wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
vdev_id, param_id, value, mac);
- return ath10k_wmi_cmd_send(ar, skb, WMI_AP_PS_PEER_PARAM_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->ap_ps_peer_param_cmdid);
}
int ath10k_wmi_scan_chan_list(struct ath10k *ar,
@@ -2046,7 +3027,7 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
ci->flags |= __cpu_to_le32(flags);
}
- return ath10k_wmi_cmd_send(ar, skb, WMI_SCAN_CHAN_LIST_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
}
int ath10k_wmi_peer_assoc(struct ath10k *ar,
@@ -2105,10 +3086,11 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi peer assoc vdev %d addr %pM\n",
arg->vdev_id, arg->addr);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
}
-int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
+int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
+ const struct wmi_bcn_tx_arg *arg)
{
struct wmi_bcn_tx_cmd *cmd;
struct sk_buff *skb;
@@ -2124,7 +3106,7 @@ int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len);
memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
- return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID);
+ return ath10k_wmi_cmd_send_nowait(ar, skb, ar->wmi.cmd->bcn_tx_cmdid);
}
static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
@@ -2155,7 +3137,8 @@ int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
- return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_WMM_PARAMS_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_wmm_params_cmdid);
}
int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
@@ -2171,7 +3154,7 @@ int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
cmd->stats_id = __cpu_to_le32(stats_id);
ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
- return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
}
int ath10k_wmi_force_fw_hang(struct ath10k *ar,
@@ -2190,5 +3173,5 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar,
ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
type, delay_ms);
- return ath10k_wmi_cmd_send(ar, skb, WMI_FORCE_FW_HANG_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 2c5a4f8daf2e..78c991aec7f9 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -208,6 +208,118 @@ struct wmi_mac_addr {
(c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \
} while (0)
+struct wmi_cmd_map {
+ u32 init_cmdid;
+ u32 start_scan_cmdid;
+ u32 stop_scan_cmdid;
+ u32 scan_chan_list_cmdid;
+ u32 scan_sch_prio_tbl_cmdid;
+ u32 pdev_set_regdomain_cmdid;
+ u32 pdev_set_channel_cmdid;
+ u32 pdev_set_param_cmdid;
+ u32 pdev_pktlog_enable_cmdid;
+ u32 pdev_pktlog_disable_cmdid;
+ u32 pdev_set_wmm_params_cmdid;
+ u32 pdev_set_ht_cap_ie_cmdid;
+ u32 pdev_set_vht_cap_ie_cmdid;
+ u32 pdev_set_dscp_tid_map_cmdid;
+ u32 pdev_set_quiet_mode_cmdid;
+ u32 pdev_green_ap_ps_enable_cmdid;
+ u32 pdev_get_tpc_config_cmdid;
+ u32 pdev_set_base_macaddr_cmdid;
+ u32 vdev_create_cmdid;
+ u32 vdev_delete_cmdid;
+ u32 vdev_start_request_cmdid;
+ u32 vdev_restart_request_cmdid;
+ u32 vdev_up_cmdid;
+ u32 vdev_stop_cmdid;
+ u32 vdev_down_cmdid;
+ u32 vdev_set_param_cmdid;
+ u32 vdev_install_key_cmdid;
+ u32 peer_create_cmdid;
+ u32 peer_delete_cmdid;
+ u32 peer_flush_tids_cmdid;
+ u32 peer_set_param_cmdid;
+ u32 peer_assoc_cmdid;
+ u32 peer_add_wds_entry_cmdid;
+ u32 peer_remove_wds_entry_cmdid;
+ u32 peer_mcast_group_cmdid;
+ u32 bcn_tx_cmdid;
+ u32 pdev_send_bcn_cmdid;
+ u32 bcn_tmpl_cmdid;
+ u32 bcn_filter_rx_cmdid;
+ u32 prb_req_filter_rx_cmdid;
+ u32 mgmt_tx_cmdid;
+ u32 prb_tmpl_cmdid;
+ u32 addba_clear_resp_cmdid;
+ u32 addba_send_cmdid;
+ u32 addba_status_cmdid;
+ u32 delba_send_cmdid;
+ u32 addba_set_resp_cmdid;
+ u32 send_singleamsdu_cmdid;
+ u32 sta_powersave_mode_cmdid;
+ u32 sta_powersave_param_cmdid;
+ u32 sta_mimo_ps_mode_cmdid;
+ u32 pdev_dfs_enable_cmdid;
+ u32 pdev_dfs_disable_cmdid;
+ u32 roam_scan_mode;
+ u32 roam_scan_rssi_threshold;
+ u32 roam_scan_period;
+ u32 roam_scan_rssi_change_threshold;
+ u32 roam_ap_profile;
+ u32 ofl_scan_add_ap_profile;
+ u32 ofl_scan_remove_ap_profile;
+ u32 ofl_scan_period;
+ u32 p2p_dev_set_device_info;
+ u32 p2p_dev_set_discoverability;
+ u32 p2p_go_set_beacon_ie;
+ u32 p2p_go_set_probe_resp_ie;
+ u32 p2p_set_vendor_ie_data_cmdid;
+ u32 ap_ps_peer_param_cmdid;
+ u32 ap_ps_peer_uapsd_coex_cmdid;
+ u32 peer_rate_retry_sched_cmdid;
+ u32 wlan_profile_trigger_cmdid;
+ u32 wlan_profile_set_hist_intvl_cmdid;
+ u32 wlan_profile_get_profile_data_cmdid;
+ u32 wlan_profile_enable_profile_id_cmdid;
+ u32 wlan_profile_list_profile_id_cmdid;
+ u32 pdev_suspend_cmdid;
+ u32 pdev_resume_cmdid;
+ u32 add_bcn_filter_cmdid;
+ u32 rmv_bcn_filter_cmdid;
+ u32 wow_add_wake_pattern_cmdid;
+ u32 wow_del_wake_pattern_cmdid;
+ u32 wow_enable_disable_wake_event_cmdid;
+ u32 wow_enable_cmdid;
+ u32 wow_hostwakeup_from_sleep_cmdid;
+ u32 rtt_measreq_cmdid;
+ u32 rtt_tsf_cmdid;
+ u32 vdev_spectral_scan_configure_cmdid;
+ u32 vdev_spectral_scan_enable_cmdid;
+ u32 request_stats_cmdid;
+ u32 set_arp_ns_offload_cmdid;
+ u32 network_list_offload_config_cmdid;
+ u32 gtk_offload_cmdid;
+ u32 csa_offload_enable_cmdid;
+ u32 csa_offload_chanswitch_cmdid;
+ u32 chatter_set_mode_cmdid;
+ u32 peer_tid_addba_cmdid;
+ u32 peer_tid_delba_cmdid;
+ u32 sta_dtim_ps_method_cmdid;
+ u32 sta_uapsd_auto_trig_cmdid;
+ u32 sta_keepalive_cmd;
+ u32 echo_cmdid;
+ u32 pdev_utf_cmdid;
+ u32 dbglog_cfg_cmdid;
+ u32 pdev_qvit_cmdid;
+ u32 pdev_ftm_intg_cmdid;
+ u32 vdev_set_keepalive_cmdid;
+ u32 vdev_get_keepalive_cmdid;
+ u32 force_fw_hang_cmdid;
+ u32 gpio_config_cmdid;
+ u32 gpio_output_cmdid;
+};
+
/*
* wmi command groups.
*/
@@ -247,7 +359,9 @@ enum wmi_cmd_group {
#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
-/* Command IDs and commande events. */
+#define WMI_CMD_UNSUPPORTED 0
+
+/* Command IDs and command events for MAIN FW. */
enum wmi_cmd_id {
WMI_INIT_CMDID = 0x1,
@@ -488,6 +602,217 @@ enum wmi_event_id {
WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO),
};
+/* Command IDs and command events for 10.X firmware */
+enum wmi_10x_cmd_id {
+ WMI_10X_START_CMDID = 0x9000,
+ WMI_10X_END_CMDID = 0x9FFF,
+
+ /* initialize the wlan sub system */
+ WMI_10X_INIT_CMDID,
+
+ /* Scan specific commands */
+
+ WMI_10X_START_SCAN_CMDID = WMI_10X_START_CMDID,
+ WMI_10X_STOP_SCAN_CMDID,
+ WMI_10X_SCAN_CHAN_LIST_CMDID,
+ WMI_10X_ECHO_CMDID,
+
+ /* PDEV(physical device) specific commands */
+ WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
+ WMI_10X_PDEV_SET_CHANNEL_CMDID,
+ WMI_10X_PDEV_SET_PARAM_CMDID,
+ WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
+
+ /* VDEV(virtual device) specific commands */
+ WMI_10X_VDEV_CREATE_CMDID,
+ WMI_10X_VDEV_DELETE_CMDID,
+ WMI_10X_VDEV_START_REQUEST_CMDID,
+ WMI_10X_VDEV_RESTART_REQUEST_CMDID,
+ WMI_10X_VDEV_UP_CMDID,
+ WMI_10X_VDEV_STOP_CMDID,
+ WMI_10X_VDEV_DOWN_CMDID,
+ WMI_10X_VDEV_STANDBY_RESPONSE_CMDID,
+ WMI_10X_VDEV_RESUME_RESPONSE_CMDID,
+ WMI_10X_VDEV_SET_PARAM_CMDID,
+ WMI_10X_VDEV_INSTALL_KEY_CMDID,
+
+ /* peer specific commands */
+ WMI_10X_PEER_CREATE_CMDID,
+ WMI_10X_PEER_DELETE_CMDID,
+ WMI_10X_PEER_FLUSH_TIDS_CMDID,
+ WMI_10X_PEER_SET_PARAM_CMDID,
+ WMI_10X_PEER_ASSOC_CMDID,
+ WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_10X_PEER_MCAST_GROUP_CMDID,
+
+ /* beacon/management specific commands */
+
+ WMI_10X_BCN_TX_CMDID,
+ WMI_10X_BCN_PRB_TMPL_CMDID,
+ WMI_10X_BCN_FILTER_RX_CMDID,
+ WMI_10X_PRB_REQ_FILTER_RX_CMDID,
+ WMI_10X_MGMT_TX_CMDID,
+
+ /* commands to directly control ba negotiation directly from host. */
+ WMI_10X_ADDBA_CLEAR_RESP_CMDID,
+ WMI_10X_ADDBA_SEND_CMDID,
+ WMI_10X_ADDBA_STATUS_CMDID,
+ WMI_10X_DELBA_SEND_CMDID,
+ WMI_10X_ADDBA_SET_RESP_CMDID,
+ WMI_10X_SEND_SINGLEAMSDU_CMDID,
+
+ /* Station power save specific config */
+ WMI_10X_STA_POWERSAVE_MODE_CMDID,
+ WMI_10X_STA_POWERSAVE_PARAM_CMDID,
+ WMI_10X_STA_MIMO_PS_MODE_CMDID,
+
+ /* set debug log config */
+ WMI_10X_DBGLOG_CFG_CMDID,
+
+ /* DFS-specific commands */
+ WMI_10X_PDEV_DFS_ENABLE_CMDID,
+ WMI_10X_PDEV_DFS_DISABLE_CMDID,
+
+ /* QVIT specific command id */
+ WMI_10X_PDEV_QVIT_CMDID,
+
+ /* Offload Scan and Roaming related commands */
+ WMI_10X_ROAM_SCAN_MODE,
+ WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_10X_ROAM_SCAN_PERIOD,
+ WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_10X_ROAM_AP_PROFILE,
+ WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
+ WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_10X_OFL_SCAN_PERIOD,
+
+ /* P2P specific commands */
+ WMI_10X_P2P_DEV_SET_DEVICE_INFO,
+ WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_10X_P2P_GO_SET_BEACON_IE,
+ WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
+
+ /* AP power save specific config */
+ WMI_10X_AP_PS_PEER_PARAM_CMDID,
+ WMI_10X_AP_PS_PEER_UAPSD_COEX_CMDID,
+
+ /* Rate-control specific commands */
+ WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
+
+ /* WLAN Profiling commands. */
+ WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
+ WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+
+ /* Suspend resume command Ids */
+ WMI_10X_PDEV_SUSPEND_CMDID,
+ WMI_10X_PDEV_RESUME_CMDID,
+
+ /* Beacon filter commands */
+ WMI_10X_ADD_BCN_FILTER_CMDID,
+ WMI_10X_RMV_BCN_FILTER_CMDID,
+
+ /* WOW Specific WMI commands*/
+ WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
+ WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_10X_WOW_ENABLE_CMDID,
+ WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+
+ /* RTT measurement related cmd */
+ WMI_10X_RTT_MEASREQ_CMDID,
+ WMI_10X_RTT_TSF_CMDID,
+
+ /* transmit beacon by value */
+ WMI_10X_PDEV_SEND_BCN_CMDID,
+
+ /* F/W stats */
+ WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_10X_REQUEST_STATS_CMDID,
+
+ /* GPIO Configuration */
+ WMI_10X_GPIO_CONFIG_CMDID,
+ WMI_10X_GPIO_OUTPUT_CMDID,
+
+ WMI_10X_PDEV_UTF_CMDID = WMI_10X_END_CMDID - 1,
+};
+
+enum wmi_10x_event_id {
+ WMI_10X_SERVICE_READY_EVENTID = 0x8000,
+ WMI_10X_READY_EVENTID,
+ WMI_10X_START_EVENTID = 0x9000,
+ WMI_10X_END_EVENTID = 0x9FFF,
+
+ /* Scan specific events */
+ WMI_10X_SCAN_EVENTID = WMI_10X_START_EVENTID,
+ WMI_10X_ECHO_EVENTID,
+ WMI_10X_DEBUG_MESG_EVENTID,
+ WMI_10X_UPDATE_STATS_EVENTID,
+
+ /* Instantaneous RSSI event */
+ WMI_10X_INST_RSSI_STATS_EVENTID,
+
+ /* VDEV specific events */
+ WMI_10X_VDEV_START_RESP_EVENTID,
+ WMI_10X_VDEV_STANDBY_REQ_EVENTID,
+ WMI_10X_VDEV_RESUME_REQ_EVENTID,
+ WMI_10X_VDEV_STOPPED_EVENTID,
+
+ /* peer specific events */
+ WMI_10X_PEER_STA_KICKOUT_EVENTID,
+
+ /* beacon/mgmt specific events */
+ WMI_10X_HOST_SWBA_EVENTID,
+ WMI_10X_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_10X_MGMT_RX_EVENTID,
+
+ /* Channel stats event */
+ WMI_10X_CHAN_INFO_EVENTID,
+
+ /* PHY Error specific WMI event */
+ WMI_10X_PHYERR_EVENTID,
+
+ /* Roam event to trigger roaming on host */
+ WMI_10X_ROAM_EVENTID,
+
+ /* matching AP found from list of profiles */
+ WMI_10X_PROFILE_MATCH,
+
+ /* debug print message used for tracing FW code while debugging */
+ WMI_10X_DEBUG_PRINT_EVENTID,
+ /* VI spoecific event */
+ WMI_10X_PDEV_QVIT_EVENTID,
+ /* FW code profile data in response to profile request */
+ WMI_10X_WLAN_PROFILE_DATA_EVENTID,
+
+ /*RTT related event ID*/
+ WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID,
+ WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_10X_RTT_ERROR_REPORT_EVENTID,
+
+ WMI_10X_WOW_WAKEUP_HOST_EVENTID,
+ WMI_10X_DCS_INTERFERENCE_EVENTID,
+
+ /* TPC config for the current operating channel */
+ WMI_10X_PDEV_TPC_CONFIG_EVENTID,
+
+ WMI_10X_GPIO_INPUT_EVENTID,
+ WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1,
+};
+
enum wmi_phy_mode {
MODE_11A = 0, /* 11a Mode */
MODE_11G = 1, /* 11b/g Mode */
@@ -508,6 +833,48 @@ enum wmi_phy_mode {
MODE_MAX = 14
};
+static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
+{
+ switch (mode) {
+ case MODE_11A:
+ return "11a";
+ case MODE_11G:
+ return "11g";
+ case MODE_11B:
+ return "11b";
+ case MODE_11GONLY:
+ return "11gonly";
+ case MODE_11NA_HT20:
+ return "11na-ht20";
+ case MODE_11NG_HT20:
+ return "11ng-ht20";
+ case MODE_11NA_HT40:
+ return "11na-ht40";
+ case MODE_11NG_HT40:
+ return "11ng-ht40";
+ case MODE_11AC_VHT20:
+ return "11ac-vht20";
+ case MODE_11AC_VHT40:
+ return "11ac-vht40";
+ case MODE_11AC_VHT80:
+ return "11ac-vht80";
+ case MODE_11AC_VHT20_2G:
+ return "11ac-vht20-2g";
+ case MODE_11AC_VHT40_2G:
+ return "11ac-vht40-2g";
+ case MODE_11AC_VHT80_2G:
+ return "11ac-vht80-2g";
+ case MODE_UNKNOWN:
+ /* skip */
+ break;
+
+ /* no default handler to allow compiler to check that the
+ * enum is fully handled */
+ };
+
+ return "<unknown>";
+}
+
#define WMI_CHAN_LIST_TAG 0x1
#define WMI_SSID_LIST_TAG 0x2
#define WMI_BSSID_LIST_TAG 0x3
@@ -763,13 +1130,45 @@ struct wmi_service_ready_event {
struct wlan_host_mem_req mem_reqs[1];
} __packed;
-/*
- * status consists of upper 16 bits fo int status and lower 16 bits of
- * module ID that retuned status
- */
-#define WLAN_INIT_STATUS_SUCCESS 0x0
-#define WLAN_GET_INIT_STATUS_REASON(status) ((status) & 0xffff)
-#define WLAN_GET_INIT_STATUS_MODULE_ID(status) (((status) >> 16) & 0xffff)
+/* This is the definition from 10.X firmware branch */
+struct wmi_service_ready_event_10x {
+ __le32 sw_version;
+ __le32 abi_version;
+
+ /* WMI_PHY_CAPABILITY */
+ __le32 phy_capability;
+
+ /* Maximum number of frag table entries that SW will populate less 1 */
+ __le32 max_frag_entry;
+ __le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
+ __le32 num_rf_chains;
+
+ /*
+ * The following field is only valid for service type
+ * WMI_SERVICE_11AC
+ */
+ __le32 ht_cap_info; /* WMI HT Capability */
+ __le32 vht_cap_info; /* VHT capability info field of 802.11ac */
+ __le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+
+ struct hal_reg_capabilities hal_reg_capabilities;
+
+ __le32 sys_cap_info;
+ __le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */
+
+ /*
+ * request to host to allocate a chuck of memory and pss it down to FW
+ * via WM_INIT. FW uses this as FW extesnsion memory for saving its
+ * data structures. Only valid for low latency interfaces like PCIE
+ * where FW can access this memory directly (or) by DMA.
+ */
+ __le32 num_mem_reqs;
+
+ struct wlan_host_mem_req mem_reqs[1];
+} __packed;
+
#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
@@ -978,6 +1377,192 @@ struct wmi_resource_config {
__le32 max_frag_entries;
} __packed;
+struct wmi_resource_config_10x {
+ /* number of virtual devices (VAPs) to support */
+ __le32 num_vdevs;
+
+ /* number of peer nodes to support */
+ __le32 num_peers;
+
+ /* number of keys per peer */
+ __le32 num_peer_keys;
+
+ /* total number of TX/RX data TIDs */
+ __le32 num_tids;
+
+ /*
+ * max skid for resolving hash collisions
+ *
+ * The address search table is sparse, so that if two MAC addresses
+ * result in the same hash value, the second of these conflicting
+ * entries can slide to the next index in the address search table,
+ * and use it, if it is unoccupied. This ast_skid_limit parameter
+ * specifies the upper bound on how many subsequent indices to search
+ * over to find an unoccupied space.
+ */
+ __le32 ast_skid_limit;
+
+ /*
+ * the nominal chain mask for transmit
+ *
+ * The chain mask may be modified dynamically, e.g. to operate AP
+ * tx with a reduced number of chains if no clients are associated.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of tx chains.
+ */
+ __le32 tx_chain_mask;
+
+ /*
+ * the nominal chain mask for receive
+ *
+ * The chain mask may be modified dynamically, e.g. for a client
+ * to use a reduced number of chains for receive if the traffic to
+ * the client is low enough that it doesn't require downlink MIMO
+ * or antenna diversity.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of rx chains.
+ */
+ __le32 rx_chain_mask;
+
+ /*
+ * what rx reorder timeout (ms) to use for the AC
+ *
+ * Each WMM access class (voice, video, best-effort, background) will
+ * have its own timeout value to dictate how long to wait for missing
+ * rx MPDUs to arrive before flushing subsequent MPDUs that have
+ * already been received.
+ * This parameter specifies the timeout in milliseconds for each
+ * class.
+ */
+ __le32 rx_timeout_pri_vi;
+ __le32 rx_timeout_pri_vo;
+ __le32 rx_timeout_pri_be;
+ __le32 rx_timeout_pri_bk;
+
+ /*
+ * what mode the rx should decap packets to
+ *
+ * MAC can decap to RAW (no decap), native wifi or Ethernet types
+ * THis setting also determines the default TX behavior, however TX
+ * behavior can be modified on a per VAP basis during VAP init
+ */
+ __le32 rx_decap_mode;
+
+ /* what is the maximum scan requests than can be queued */
+ __le32 scan_max_pending_reqs;
+
+ /* maximum VDEV that could use BMISS offload */
+ __le32 bmiss_offload_max_vdev;
+
+ /* maximum VDEV that could use offload roaming */
+ __le32 roam_offload_max_vdev;
+
+ /* maximum AP profiles that would push to offload roaming */
+ __le32 roam_offload_max_ap_profiles;
+
+ /*
+ * how many groups to use for mcast->ucast conversion
+ *
+ * The target's WAL maintains a table to hold information regarding
+ * which peers belong to a given multicast group, so that if
+ * multicast->unicast conversion is enabled, the target can convert
+ * multicast tx frames to a series of unicast tx frames, to each
+ * peer within the multicast group.
+ This num_mcast_groups configuration parameter tells the target how
+ * many multicast groups to provide storage for within its multicast
+ * group membership table.
+ */
+ __le32 num_mcast_groups;
+
+ /*
+ * size to alloc for the mcast membership table
+ *
+ * This num_mcast_table_elems configuration parameter tells the
+ * target how many peer elements it needs to provide storage for in
+ * its multicast group membership table.
+ * These multicast group membership table elements are shared by the
+ * multicast groups stored within the table.
+ */
+ __le32 num_mcast_table_elems;
+
+ /*
+ * whether/how to do multicast->unicast conversion
+ *
+ * This configuration parameter specifies whether the target should
+ * perform multicast --> unicast conversion on transmit, and if so,
+ * what to do if it finds no entries in its multicast group
+ * membership table for the multicast IP address in the tx frame.
+ * Configuration value:
+ * 0 -> Do not perform multicast to unicast conversion.
+ * 1 -> Convert multicast frames to unicast, if the IP multicast
+ * address from the tx frame is found in the multicast group
+ * membership table. If the IP multicast address is not found,
+ * drop the frame.
+ * 2 -> Convert multicast frames to unicast, if the IP multicast
+ * address from the tx frame is found in the multicast group
+ * membership table. If the IP multicast address is not found,
+ * transmit the frame as multicast.
+ */
+ __le32 mcast2ucast_mode;
+
+ /*
+ * how much memory to allocate for a tx PPDU dbg log
+ *
+ * This parameter controls how much memory the target will allocate
+ * to store a log of tx PPDU meta-information (how large the PPDU
+ * was, when it was sent, whether it was successful, etc.)
+ */
+ __le32 tx_dbg_log_size;
+
+ /* how many AST entries to be allocated for WDS */
+ __le32 num_wds_entries;
+
+ /*
+ * MAC DMA burst size, e.g., For target PCI limit can be
+ * 0 -default, 1 256B
+ */
+ __le32 dma_burst_size;
+
+ /*
+ * Fixed delimiters to be inserted after every MPDU to
+ * account for interface latency to avoid underrun.
+ */
+ __le32 mac_aggr_delim;
+
+ /*
+ * determine whether target is responsible for detecting duplicate
+ * non-aggregate MPDU and timing out stale fragments.
+ *
+ * A-MPDU reordering is always performed on the target.
+ *
+ * 0: target responsible for frag timeout and dup checking
+ * 1: host responsible for frag timeout and dup checking
+ */
+ __le32 rx_skip_defrag_timeout_dup_detection_check;
+
+ /*
+ * Configuration for VoW :
+ * No of Video Nodes to be supported
+ * and Max no of descriptors for each Video link (node).
+ */
+ __le32 vow_config;
+
+ /* Number of msdu descriptors target should use */
+ __le32 num_msdu_desc;
+
+ /*
+ * Max. number of Tx fragments per MSDU
+ * This parameter controls the max number of Tx fragments per MSDU.
+ * This is sent by the target as part of the WMI_SERVICE_READY event
+ * and is overriden by the OS shim as required.
+ */
+ __le32 max_frag_entries;
+} __packed;
+
+
+#define NUM_UNITS_IS_NUM_VDEVS 0x1
+#define NUM_UNITS_IS_NUM_PEERS 0x2
+
/* strucutre describing host memory chunk. */
struct host_memory_chunk {
/* id of the request that is passed up in service ready */
@@ -999,6 +1584,18 @@ struct wmi_init_cmd {
struct host_memory_chunk host_mem_chunks[1];
} __packed;
+/* _10x stucture is from 10.X FW API */
+struct wmi_init_cmd_10x {
+ struct wmi_resource_config_10x resource_config;
+ __le32 num_host_mem_chunks;
+
+ /*
+ * variable number of host memory chunks.
+ * This should be the last element in the structure
+ */
+ struct host_memory_chunk host_mem_chunks[1];
+} __packed;
+
/* TLV for channel list */
struct wmi_chan_list {
__le32 tag; /* WMI_CHAN_LIST_TAG */
@@ -1118,6 +1715,88 @@ struct wmi_start_scan_cmd {
*/
} __packed;
+/* This is the definition from 10.X firmware branch */
+struct wmi_start_scan_cmd_10x {
+ /* Scan ID */
+ __le32 scan_id;
+
+ /* Scan requestor ID */
+ __le32 scan_req_id;
+
+ /* VDEV id(interface) that is requesting scan */
+ __le32 vdev_id;
+
+ /* Scan Priority, input to scan scheduler */
+ __le32 scan_priority;
+
+ /* Scan events subscription */
+ __le32 notify_scan_events;
+
+ /* dwell time in msec on active channels */
+ __le32 dwell_time_active;
+
+ /* dwell time in msec on passive channels */
+ __le32 dwell_time_passive;
+
+ /*
+ * min time in msec on the BSS channel,only valid if atleast one
+ * VDEV is active
+ */
+ __le32 min_rest_time;
+
+ /*
+ * max rest time in msec on the BSS channel,only valid if at least
+ * one VDEV is active
+ */
+ /*
+ * the scanner will rest on the bss channel at least min_rest_time
+ * after min_rest_time the scanner will start checking for tx/rx
+ * activity on all VDEVs. if there is no activity the scanner will
+ * switch to off channel. if there is activity the scanner will let
+ * the radio on the bss channel until max_rest_time expires.at
+ * max_rest_time scanner will switch to off channel irrespective of
+ * activity. activity is determined by the idle_time parameter.
+ */
+ __le32 max_rest_time;
+
+ /*
+ * time before sending next set of probe requests.
+ * The scanner keeps repeating probe requests transmission with
+ * period specified by repeat_probe_time.
+ * The number of probe requests specified depends on the ssid_list
+ * and bssid_list
+ */
+ __le32 repeat_probe_time;
+
+ /* time in msec between 2 consequetive probe requests with in a set. */
+ __le32 probe_spacing_time;
+
+ /*
+ * data inactivity time in msec on bss channel that will be used by
+ * scanner for measuring the inactivity.
+ */
+ __le32 idle_time;
+
+ /* maximum time in msec allowed for scan */
+ __le32 max_scan_time;
+
+ /*
+ * delay in msec before sending first probe request after switching
+ * to a channel
+ */
+ __le32 probe_delay;
+
+ /* Scan control flags */
+ __le32 scan_ctrl_flags;
+
+ /*
+ * TLV (tag length value ) paramerters follow the scan_cmd structure.
+ * TLV can contain channel list, bssid list, ssid list and
+ * ie. the TLV tags are defined above;
+ */
+} __packed;
+
+
struct wmi_ssid_arg {
int len;
const u8 *ssid;
@@ -1268,7 +1947,7 @@ struct wmi_scan_event {
* good idea to pass all the fields in the RX status
* descriptor up to the host.
*/
-struct wmi_mgmt_rx_hdr {
+struct wmi_mgmt_rx_hdr_v1 {
__le32 channel;
__le32 snr;
__le32 rate;
@@ -1277,8 +1956,18 @@ struct wmi_mgmt_rx_hdr {
__le32 status; /* %WMI_RX_STATUS_ */
} __packed;
-struct wmi_mgmt_rx_event {
- struct wmi_mgmt_rx_hdr hdr;
+struct wmi_mgmt_rx_hdr_v2 {
+ struct wmi_mgmt_rx_hdr_v1 v1;
+ __le32 rssi_ctl[4];
+} __packed;
+
+struct wmi_mgmt_rx_event_v1 {
+ struct wmi_mgmt_rx_hdr_v1 hdr;
+ u8 buf[0];
+} __packed;
+
+struct wmi_mgmt_rx_event_v2 {
+ struct wmi_mgmt_rx_hdr_v2 hdr;
u8 buf[0];
} __packed;
@@ -1465,6 +2154,60 @@ struct wmi_csa_event {
#define VDEV_DEFAULT_STATS_UPDATE_PERIOD 500
#define PEER_DEFAULT_STATS_UPDATE_PERIOD 500
+struct wmi_pdev_param_map {
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 txpower_limit2g;
+ u32 txpower_limit5g;
+ u32 txpower_scale;
+ u32 beacon_gen_mode;
+ u32 beacon_tx_mode;
+ u32 resmgr_offchan_mode;
+ u32 protection_mode;
+ u32 dynamic_bw;
+ u32 non_agg_sw_retry_th;
+ u32 agg_sw_retry_th;
+ u32 sta_kickout_th;
+ u32 ac_aggrsize_scaling;
+ u32 ltr_enable;
+ u32 ltr_ac_latency_be;
+ u32 ltr_ac_latency_bk;
+ u32 ltr_ac_latency_vi;
+ u32 ltr_ac_latency_vo;
+ u32 ltr_ac_latency_timeout;
+ u32 ltr_sleep_override;
+ u32 ltr_rx_override;
+ u32 ltr_tx_activity_timeout;
+ u32 l1ss_enable;
+ u32 dsleep_enable;
+ u32 pcielp_txbuf_flush;
+ u32 pcielp_txbuf_watermark;
+ u32 pcielp_txbuf_tmo_en;
+ u32 pcielp_txbuf_tmo_value;
+ u32 pdev_stats_update_period;
+ u32 vdev_stats_update_period;
+ u32 peer_stats_update_period;
+ u32 bcnflt_stats_update_period;
+ u32 pmf_qos;
+ u32 arp_ac_override;
+ u32 arpdhcp_ac_override;
+ u32 dcs;
+ u32 ani_enable;
+ u32 ani_poll_period;
+ u32 ani_listen_period;
+ u32 ani_ofdm_level;
+ u32 ani_cck_level;
+ u32 dyntxchain;
+ u32 proxy_sta;
+ u32 idle_ps_config;
+ u32 power_gating_sleep;
+ u32 fast_channel_reset;
+ u32 burst_dur;
+ u32 burst_enable;
+};
+
+#define WMI_PDEV_PARAM_UNSUPPORTED 0
+
enum wmi_pdev_param {
/* TX chian mask */
WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
@@ -1564,6 +2307,97 @@ enum wmi_pdev_param {
WMI_PDEV_PARAM_POWER_GATING_SLEEP,
};
+enum wmi_10x_pdev_param {
+ /* TX chian mask */
+ WMI_10X_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ /* RX chian mask */
+ WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+ /* TX power limit for 2G Radio */
+ WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+ /* TX power limit for 5G Radio */
+ WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+ /* TX power scale */
+ WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+ /* Beacon generation mode . 0: host, 1: target */
+ WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+ /* Beacon generation mode . 0: staggered 1: bursted */
+ WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+ /*
+ * Resource manager off chan mode .
+ * 0: turn off off chan mode. 1: turn on offchan mode
+ */
+ WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ /*
+ * Protection mode:
+ * 0: no protection 1:use CTS-to-self 2: use RTS/CTS
+ */
+ WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+ /* Dynamic bandwidth 0: disable 1: enable */
+ WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+ /* Non aggregrate/ 11g sw retry threshold.0-disable */
+ WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ /* aggregrate sw retry threshold. 0-disable*/
+ WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+ /* Station kickout threshold (non of consecutive failures).0-disable */
+ WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+ /* Aggerate size scaling configuration per AC */
+ WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ /* LTR enable */
+ WMI_10X_PDEV_PARAM_LTR_ENABLE,
+ /* LTR latency for BE, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ /* LTR latency for BK, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ /* LTR latency for VI, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ /* LTR latency for VO, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ /* LTR AC latency timeout, in ms */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ /* LTR platform latency override, in us */
+ WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ /* LTR-RX override, in us */
+ WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+ /* Tx activity timeout for LTR, in us */
+ WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ /* L1SS state machine enable */
+ WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+ /* Deep sleep state machine enable */
+ WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+ /* pdev level stats update period in ms */
+ WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ /* vdev level stats update period in ms */
+ WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ /* peer level stats update period in ms */
+ WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ /* beacon filter status update period */
+ WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ /* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */
+ WMI_10X_PDEV_PARAM_PMF_QOS,
+ /* Access category on which ARP and DHCP frames are sent */
+ WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+ /* DCS configuration */
+ WMI_10X_PDEV_PARAM_DCS,
+ /* Enable/Disable ANI on target */
+ WMI_10X_PDEV_PARAM_ANI_ENABLE,
+ /* configure the ANI polling period */
+ WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+ /* configure the ANI listening period */
+ WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ /* configure OFDM immunity level */
+ WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+ /* configure CCK immunity level */
+ WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+ /* Enable/Disable CDD for 1x1 STAs in rate control module */
+ WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+ /* Enable/Disable Fast channel reset*/
+ WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+ /* Set Bursting DUR */
+ WMI_10X_PDEV_PARAM_BURST_DUR,
+ /* Set Bursting Enable*/
+ WMI_10X_PDEV_PARAM_BURST_ENABLE,
+};
+
struct wmi_pdev_set_param_cmd {
__le32 param_id;
__le32 param_value;
@@ -2088,6 +2922,61 @@ enum wmi_rate_preamble {
/* Value to disable fixed rate setting */
#define WMI_FIXED_RATE_NONE (0xff)
+struct wmi_vdev_param_map {
+ u32 rts_threshold;
+ u32 fragmentation_threshold;
+ u32 beacon_interval;
+ u32 listen_interval;
+ u32 multicast_rate;
+ u32 mgmt_tx_rate;
+ u32 slot_time;
+ u32 preamble;
+ u32 swba_time;
+ u32 wmi_vdev_stats_update_period;
+ u32 wmi_vdev_pwrsave_ageout_time;
+ u32 wmi_vdev_host_swba_interval;
+ u32 dtim_period;
+ u32 wmi_vdev_oc_scheduler_air_time_limit;
+ u32 wds;
+ u32 atim_window;
+ u32 bmiss_count_max;
+ u32 bmiss_first_bcnt;
+ u32 bmiss_final_bcnt;
+ u32 feature_wmm;
+ u32 chwidth;
+ u32 chextoffset;
+ u32 disable_htprotection;
+ u32 sta_quickkickout;
+ u32 mgmt_rate;
+ u32 protection_mode;
+ u32 fixed_rate;
+ u32 sgi;
+ u32 ldpc;
+ u32 tx_stbc;
+ u32 rx_stbc;
+ u32 intra_bss_fwd;
+ u32 def_keyid;
+ u32 nss;
+ u32 bcast_data_rate;
+ u32 mcast_data_rate;
+ u32 mcast_indicate;
+ u32 dhcp_indicate;
+ u32 unknown_dest_indicate;
+ u32 ap_keepalive_min_idle_inactive_time_secs;
+ u32 ap_keepalive_max_idle_inactive_time_secs;
+ u32 ap_keepalive_max_unresponsive_time_secs;
+ u32 ap_enable_nawds;
+ u32 mcast2ucast_set;
+ u32 enable_rtscts;
+ u32 txbf;
+ u32 packet_powersave;
+ u32 drop_unencry;
+ u32 tx_encap_type;
+ u32 ap_detect_out_of_sync_sleeping_sta_time_secs;
+};
+
+#define WMI_VDEV_PARAM_UNSUPPORTED 0
+
/* the definition of different VDEV parameters */
enum wmi_vdev_param {
/* RTS Threshold */
@@ -2219,6 +3108,121 @@ enum wmi_vdev_param {
WMI_VDEV_PARAM_TX_ENCAP_TYPE,
};
+/* the definition of different VDEV parameters */
+enum wmi_10x_vdev_param {
+ /* RTS Threshold */
+ WMI_10X_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ /* Fragmentation threshold */
+ WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ /* beacon interval in TUs */
+ WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+ /* Listen interval in TUs */
+ WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+ /* muticast rate in Mbps */
+ WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+ /* management frame rate in Mbps */
+ WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+ /* slot time (long vs short) */
+ WMI_10X_VDEV_PARAM_SLOT_TIME,
+ /* preamble (long vs short) */
+ WMI_10X_VDEV_PARAM_PREAMBLE,
+ /* SWBA time (time before tbtt in msec) */
+ WMI_10X_VDEV_PARAM_SWBA_TIME,
+ /* time period for updating VDEV stats */
+ WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+ /* age out time in msec for frames queued for station in power save */
+ WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+ /*
+ * Host SWBA interval (time in msec before tbtt for SWBA event
+ * generation).
+ */
+ WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+ /* DTIM period (specified in units of num beacon intervals) */
+ WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+ /*
+ * scheduler air time limit for this VDEV. used by off chan
+ * scheduler.
+ */
+ WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ /* enable/dsiable WDS for this VDEV */
+ WMI_10X_VDEV_PARAM_WDS,
+ /* ATIM Window */
+ WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+ /* BMISS max */
+ WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+ /* WMM enables/disabled */
+ WMI_10X_VDEV_PARAM_FEATURE_WMM,
+ /* Channel width */
+ WMI_10X_VDEV_PARAM_CHWIDTH,
+ /* Channel Offset */
+ WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+ /* Disable HT Protection */
+ WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+ /* Quick STA Kickout */
+ WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+ /* Rate to be used with Management frames */
+ WMI_10X_VDEV_PARAM_MGMT_RATE,
+ /* Protection Mode */
+ WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+ /* Fixed rate setting */
+ WMI_10X_VDEV_PARAM_FIXED_RATE,
+ /* Short GI Enable/Disable */
+ WMI_10X_VDEV_PARAM_SGI,
+ /* Enable LDPC */
+ WMI_10X_VDEV_PARAM_LDPC,
+ /* Enable Tx STBC */
+ WMI_10X_VDEV_PARAM_TX_STBC,
+ /* Enable Rx STBC */
+ WMI_10X_VDEV_PARAM_RX_STBC,
+ /* Intra BSS forwarding */
+ WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+ /* Setting Default xmit key for Vdev */
+ WMI_10X_VDEV_PARAM_DEF_KEYID,
+ /* NSS width */
+ WMI_10X_VDEV_PARAM_NSS,
+ /* Set the custom rate for the broadcast data frames */
+ WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+ /* Set the custom rate (rate-code) for multicast data frames */
+ WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+ /* Tx multicast packet indicate Enable/Disable */
+ WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+ /* Tx DHCP packet indicate Enable/Disable */
+ WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+ /* Enable host inspection of Tx unicast packet to unknown destination */
+ WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+
+ /* The minimum amount of time AP begins to consider STA inactive */
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+
+ /*
+ * An associated STA is considered inactive when there is no recent
+ * TX/RX activity and no downlink frames are buffered for it. Once a
+ * STA exceeds the maximum idle inactive time, the AP will send an
+ * 802.11 data-null as a keep alive to verify the STA is still
+ * associated. If the STA does ACK the data-null, or if the data-null
+ * is buffered and the STA does not retrieve it, the STA will be
+ * considered unresponsive
+ * (see WMI_10X_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS).
+ */
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+
+ /*
+ * An associated STA is considered unresponsive if there is no recent
+ * TX/RX activity and downlink frames are buffered for it. Once a STA
+ * exceeds the maximum unresponsive time, the AP will send a
+ * WMI_10X_STA_KICKOUT event to the host so the STA can be deleted. */
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+
+ /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
+ WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+
+ WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+ /* Enable/Disable RTS-CTS */
+ WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+
+ WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+};
+
/* slot time long */
#define WMI_VDEV_SLOT_TIME_LONG 0x1
/* slot time short */
@@ -3000,7 +4004,6 @@ struct wmi_force_fw_hang_cmd {
#define WMI_MAX_EVENT 0x1000
/* Maximum number of pending TXed WMI packets */
-#define WMI_MAX_PENDING_TX_COUNT 128
#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
/* By default disable power save for IBSS */
@@ -3013,7 +4016,6 @@ int ath10k_wmi_attach(struct ath10k *ar);
void ath10k_wmi_detach(struct ath10k *ar);
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
-void ath10k_wmi_flush_tx(struct ath10k *ar);
int ath10k_wmi_connect_htc_service(struct ath10k *ar);
int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
@@ -3022,8 +4024,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar);
int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
u16 rd5g, u16 ctl2g, u16 ctl5g);
-int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
- u32 value);
+int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value);
int ath10k_wmi_cmd_init(struct ath10k *ar);
int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
@@ -3043,7 +4044,7 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
const u8 *bssid);
int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id);
int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
- enum wmi_vdev_param param_id, u32 param_value);
+ u32 param_id, u32 param_value);
int ath10k_wmi_vdev_install_key(struct ath10k *ar,
const struct wmi_vdev_install_key_arg *arg);
int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
@@ -3066,11 +4067,13 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
enum wmi_ap_ps_peer_param param_id, u32 value);
int ath10k_wmi_scan_chan_list(struct ath10k *ar,
const struct wmi_scan_chan_list_arg *arg);
-int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg);
+int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
+ const struct wmi_bcn_tx_arg *arg);
int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
const struct wmi_pdev_set_wmm_params_arg *arg);
int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
int ath10k_wmi_force_fw_hang(struct ath10k *ar,
enum wmi_force_fw_hang_type type, u32 delay_ms);
+int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb);
#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index e9bc9e616b69..79bffe165cab 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -37,12 +37,9 @@ ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
struct ath5k_hw *ah = common->priv;
struct platform_device *pdev = to_platform_device(ah->dev);
- struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
u16 *eeprom, *eeprom_end;
-
-
- bcfg = pdev->dev.platform_data;
eeprom = (u16 *) bcfg->radio;
eeprom_end = ((void *) bcfg->config) + BOARD_CONFIG_BUFSZ;
@@ -57,7 +54,7 @@ ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
int ath5k_hw_read_srev(struct ath5k_hw *ah)
{
struct platform_device *pdev = to_platform_device(ah->dev);
- struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
ah->ah_mac_srev = bcfg->devid;
return 0;
}
@@ -65,7 +62,7 @@ int ath5k_hw_read_srev(struct ath5k_hw *ah)
static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
{
struct platform_device *pdev = to_platform_device(ah->dev);
- struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
u8 *cfg_mac;
if (to_platform_device(ah->dev)->id == 0)
@@ -87,7 +84,7 @@ static const struct ath_bus_ops ath_ahb_bus_ops = {
/*Initialization*/
static int ath_ahb_probe(struct platform_device *pdev)
{
- struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
struct ath5k_hw *ah;
struct ieee80211_hw *hw;
struct resource *res;
@@ -96,7 +93,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
int ret = 0;
u32 reg;
- if (!pdev->dev.platform_data) {
+ if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "no platform data specified\n");
ret = -EINVAL;
goto err_out;
@@ -193,7 +190,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
static int ath_ahb_remove(struct platform_device *pdev)
{
- struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
struct ath5k_hw *ah;
u32 reg;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 48161edec8de..69f58b073e85 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1663,15 +1663,15 @@ ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
ah->stats.tx_bytes_count += skb->len;
info = IEEE80211_SKB_CB(skb);
+ size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
+ memcpy(info->status.rates, bf->rates, size);
+
tries[0] = info->status.rates[0].count;
tries[1] = info->status.rates[1].count;
tries[2] = info->status.rates[2].count;
ieee80211_tx_info_clear_status(info);
- size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
- memcpy(info->status.rates, bf->rates, size);
-
for (i = 0; i < ts->ts_final_idx; i++) {
struct ieee80211_tx_rate *r =
&info->status.rates[i];
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index ce86f158423b..ba200b24be64 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -661,7 +661,7 @@ ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
AR5K_SISR1_QCU_TXEOL);
- /* Currently this is not much usefull since we treat
+ /* Currently this is not much useful since we treat
* all queues the same way if we get a TXURN (update
* tx trigger level) but we might need it later on*/
if (pisr & AR5K_ISR_TXURN)
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
index 98a886154d9c..05debf700a84 100644
--- a/drivers/net/wireless/ath/ath6kl/common.h
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -22,8 +22,7 @@
#define ATH6KL_MAX_IE 256
-extern __printf(2, 3)
-int ath6kl_printk(const char *level, const char *fmt, ...);
+__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
/*
* Reflects the version of binary interface exposed by ATH6KL target
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index 74369de00fb5..ca9ba005f287 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -50,11 +50,10 @@ enum ATH6K_DEBUG_MASK {
};
extern unsigned int debug_mask;
-extern __printf(2, 3)
-int ath6kl_printk(const char *level, const char *fmt, ...);
-extern __printf(1, 2) int ath6kl_info(const char *fmt, ...);
-extern __printf(1, 2) int ath6kl_err(const char *fmt, ...);
-extern __printf(1, 2) int ath6kl_warn(const char *fmt, ...);
+__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
+__printf(1, 2) int ath6kl_info(const char *fmt, ...);
+__printf(1, 2) int ath6kl_err(const char *fmt, ...);
+__printf(1, 2) int ath6kl_warn(const char *fmt, ...);
enum ath6kl_war {
ATH6KL_WAR_INVALID_RATE,
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
index a2c8ff809793..14cab1403dd6 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.h
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -60,7 +60,7 @@
/* disable credit flow control on a specific service */
#define HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL (1 << 3)
#define HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT 8
-#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK 0xFF00
+#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK 0xFF00U
/* connect response status codes */
#define HTC_SERVICE_SUCCESS 0
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 7944c25c9a43..32f139e2e897 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -84,6 +84,26 @@ config ATH9K_DFS_CERTIFIED
developed. At this point enabling this option won't do anything
except increase code size.
+config ATH9K_TX99
+ bool "Atheros ath9k TX99 testing support"
+ depends on CFG80211_CERTIFICATION_ONUS
+ default n
+ ---help---
+ Say N. This should only be enabled on systems undergoing
+ certification testing and evaluation in a controlled environment.
+ Enabling this will only enable TX99 support, all other modes of
+ operation will be disabled.
+
+ TX99 support enables Specific Absorption Rate (SAR) testing.
+ SAR is the unit of measurement for the amount of radio frequency(RF)
+ absorbed by the body when using a wireless device. The RF exposure
+ limits used are expressed in the terms of SAR, which is a measure
+ of the electric and magnetic field strength and power density for
+ transmitters operating at frequencies from 300 kHz to 100 GHz.
+ Regulatory bodies around the world require that wireless device
+ be evaluated to meet the RF exposure limits set forth in the
+ governmental SAR regulations.
+
config ATH9K_LEGACY_RATE_CONTROL
bool "Atheros ath9k rate control"
depends on ATH9K
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 75ee9e7704ce..6205ef5a9321 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -14,9 +14,7 @@ ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += \
- dfs.o \
- dfs_pattern_detector.o \
- dfs_pri_detector.o
+ dfs.o
ath9k-$(CONFIG_PM_SLEEP) += wow.o
obj-$(CONFIG_ATH9K) += ath9k.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 072e4b531067..2dff2765769b 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -54,7 +54,7 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
struct platform_device *pdev = to_platform_device(sc->dev);
struct ath9k_platform_data *pdata;
- pdata = (struct ath9k_platform_data *) pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
ath_err(common,
"%s: flash read failed, offset %08x is out of range\n",
@@ -84,7 +84,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
struct ath_hw *ah;
char hw_name[64];
- if (!pdev->dev.platform_data) {
+ if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "no platform data specified\n");
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index be466b0ef7a7..d28923b7435b 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -338,10 +338,9 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
aniState->cckNoiseImmunityLevel !=
ATH9K_ANI_CCK_DEF_LEVEL) {
ath_dbg(common, ANI,
- "Restore defaults: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
+ "Restore defaults: opmode %u chan %d Mhz is_scanning=%d ofdm:%d cck:%d\n",
ah->opmode,
chan->channel,
- chan->channelFlags,
is_scanning,
aniState->ofdmNoiseImmunityLevel,
aniState->cckNoiseImmunityLevel);
@@ -354,10 +353,9 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
* restore historical levels for this channel
*/
ath_dbg(common, ANI,
- "Restore history: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
+ "Restore history: opmode %u chan %d Mhz is_scanning=%d ofdm:%d cck:%d\n",
ah->opmode,
chan->channel,
- chan->channelFlags,
is_scanning,
aniState->ofdmNoiseImmunityLevel,
aniState->cckNoiseImmunityLevel);
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index dd1cc73d7946..bd048cc69a33 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -332,7 +332,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
}
if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
- ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
+ div_ant_conf->lna1_lna2_switch_delta)
div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
else
div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
@@ -554,42 +554,22 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->fast_div_bias = 0x1;
break;
case 0x10: /* LNA2 A-B */
- if ((antcomb->scan == 0) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
- ant_conf->fast_div_bias = 0x3f;
- } else {
- ant_conf->fast_div_bias = 0x1;
- }
+ ant_conf->fast_div_bias = 0x2;
break;
case 0x12: /* LNA2 LNA1 */
- ant_conf->fast_div_bias = 0x39;
+ ant_conf->fast_div_bias = 0x3f;
break;
case 0x13: /* LNA2 A+B */
- if ((antcomb->scan == 0) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
- ant_conf->fast_div_bias = 0x3f;
- } else {
- ant_conf->fast_div_bias = 0x1;
- }
+ ant_conf->fast_div_bias = 0x2;
break;
case 0x20: /* LNA1 A-B */
- if ((antcomb->scan == 0) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
- ant_conf->fast_div_bias = 0x3f;
- } else {
- ant_conf->fast_div_bias = 0x4;
- }
+ ant_conf->fast_div_bias = 0x3;
break;
case 0x21: /* LNA1 LNA2 */
- ant_conf->fast_div_bias = 0x6;
+ ant_conf->fast_div_bias = 0x3;
break;
case 0x23: /* LNA1 A+B */
- if ((antcomb->scan == 0) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
- ant_conf->fast_div_bias = 0x3f;
- } else {
- ant_conf->fast_div_bias = 0x6;
- }
+ ant_conf->fast_div_bias = 0x3;
break;
case 0x30: /* A+B A-B */
ant_conf->fast_div_bias = 0x1;
@@ -638,7 +618,7 @@ static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
antcomb->rssi_sub = alt_rssi_avg;
antcomb->scan = false;
if (antcomb->rssi_lna2 >
- (antcomb->rssi_lna1 + ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
+ (antcomb->rssi_lna1 + conf->lna1_lna2_switch_delta)) {
/* use LNA2 as main LNA */
if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
(antcomb->rssi_add > antcomb->rssi_sub)) {
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 08656473c63e..ff415e863ee9 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -626,12 +626,11 @@ static void ar5008_hw_override_ini(struct ath_hw *ah,
if (AR_SREV_9287_11_OR_LATER(ah))
val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
+ val |= AR_PCU_MISC_MODE2_CFP_IGNORE;
+
REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
}
- REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
- AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
-
if (AR_SREV_9280_20_OR_LATER(ah))
return;
/*
@@ -667,14 +666,13 @@ static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
if (IS_CHAN_HT40(chan)) {
phymode |= AR_PHY_FC_DYN2040_EN;
- if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
- (chan->chanmode == CHANNEL_G_HT40PLUS))
+ if (IS_CHAN_HT40PLUS(chan))
phymode |= AR_PHY_FC_DYN2040_PRI_CH;
}
REG_WRITE(ah, AR_PHY_TURBO, phymode);
- ath9k_hw_set11nmac2040(ah);
+ ath9k_hw_set11nmac2040(ah, chan);
ENABLE_REGWRITE_BUFFER(ah);
@@ -692,31 +690,12 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
int i, regWrites = 0;
u32 modesIndex, freqIndex;
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- modesIndex = 1;
- freqIndex = 1;
- break;
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- modesIndex = 2;
+ if (IS_CHAN_5GHZ(chan)) {
freqIndex = 1;
- break;
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- modesIndex = 4;
- freqIndex = 2;
- break;
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- modesIndex = 3;
+ modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+ } else {
freqIndex = 2;
- break;
-
- default:
- return -EINVAL;
+ modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
}
/*
@@ -815,8 +794,10 @@ static void ar5008_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
if (chan == NULL)
return;
- rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
- ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+ if (IS_CHAN_2GHZ(chan))
+ rfMode |= AR_PHY_MODE_DYNAMIC;
+ else
+ rfMode |= AR_PHY_MODE_OFDM;
if (!AR_SREV_9280_20_OR_LATER(ah))
rfMode |= (IS_CHAN_5GHZ(chan)) ?
@@ -1219,12 +1200,11 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
iniDef = &aniState->iniDef;
- ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+ ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz\n",
ah->hw_version.macVersion,
ah->hw_version.macRev,
ah->opmode,
- chan->channel,
- chan->channelFlags);
+ chan->channel);
val = REG_READ(ah, AR_PHY_SFCORR);
iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 9f589744a9f9..cdc74005650c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -33,15 +33,12 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
bool supported = false;
switch (ah->supp_cals & cal_type) {
case IQ_MISMATCH_CAL:
- /* Run IQ Mismatch for non-CCK only */
- if (!IS_CHAN_B(chan))
- supported = true;
+ supported = true;
break;
case ADC_GAIN_CAL:
case ADC_DC_CAL:
/* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
- if (!IS_CHAN_B(chan) &&
- !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
+ if (!((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
IS_CHAN_HT20(chan)))
supported = true;
break;
@@ -671,7 +668,7 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF);
if (ah->caldata)
- nfcal_pending = ah->caldata->nfcal_pending;
+ nfcal_pending = test_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
if (currCal && !nfcal &&
(currCal->calState == CAL_RUNNING ||
@@ -861,7 +858,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
ar9002_hw_pa_cal(ah, true);
if (ah->caldata)
- ah->caldata->nfcal_pending = true;
+ set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index fb61b081d172..5c95fd9e9c9e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -419,28 +419,10 @@ void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan)
u32 modesIndex;
int i;
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- modesIndex = 1;
- break;
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- modesIndex = 2;
- break;
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- modesIndex = 4;
- break;
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- modesIndex = 3;
- break;
-
- default:
- return;
- }
+ if (IS_CHAN_5GHZ(chan))
+ modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+ else
+ modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
ENABLE_REGWRITE_BUFFER(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 1fc1fa955d44..f087117b2e6b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -485,7 +485,7 @@ static void ar9002_hw_do_getnf(struct ath_hw *ah,
if (IS_CHAN_HT40(ah->curchan))
nfarray[3] = sign_extend32(nf, 8);
- if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
+ if (!(ah->rxchainmask & BIT(1)))
return;
nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR);
@@ -532,6 +532,7 @@ static void ar9002_hw_antdiv_comb_conf_get(struct ath_hw *ah,
AR_PHY_9285_ANT_DIV_ALT_LNACONF_S;
antconf->fast_div_bias = (regval & AR_PHY_9285_FAST_DIV_BIAS) >>
AR_PHY_9285_FAST_DIV_BIAS_S;
+ antconf->lna1_lna2_switch_delta = -1;
antconf->lna1_lna2_delta = -3;
antconf->div_group = 0;
}
@@ -679,6 +680,26 @@ static void ar9002_hw_spectral_scan_wait(struct ath_hw *ah)
}
}
+static void ar9002_hw_tx99_start(struct ath_hw *ah, u32 qnum)
+{
+ REG_SET_BIT(ah, 0x9864, 0x7f000);
+ REG_SET_BIT(ah, 0x9924, 0x7f00fe);
+ REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+ REG_WRITE(ah, AR_CR, AR_CR_RXD);
+ REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
+ REG_WRITE(ah, AR_D_GBL_IFS_SIFS, 20);
+ REG_WRITE(ah, AR_D_GBL_IFS_EIFS, 20);
+ REG_WRITE(ah, AR_D_FPCTL, 0x10|qnum);
+ REG_WRITE(ah, AR_TIME_OUT, 0x00000400);
+ REG_WRITE(ah, AR_DRETRY_LIMIT(qnum), 0xffffffff);
+ REG_SET_BIT(ah, AR_QMISC(qnum), AR_Q_MISC_DCU_EARLY_TERM_REQ);
+}
+
+static void ar9002_hw_tx99_stop(struct ath_hw *ah)
+{
+ REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+}
+
void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -700,6 +721,8 @@ void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
ops->set_bt_ant_diversity = ar9002_hw_set_bt_ant_diversity;
#endif
+ ops->tx99_start = ar9002_hw_tx99_start;
+ ops->tx99_stop = ar9002_hw_tx99_stop;
ar9002_hw_set_nf_limits(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 6988e1d081f2..22934d3ca544 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -727,8 +727,12 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
- if (caldata)
- caldata->done_txiqcal_once = is_reusable;
+ if (caldata) {
+ if (is_reusable)
+ set_bit(TXIQCAL_DONE, &caldata->cal_flags);
+ else
+ clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
+ }
return;
}
@@ -961,18 +965,44 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
}
static void ar9003_hw_do_manual_peak_cal(struct ath_hw *ah,
- struct ath9k_channel *chan)
+ struct ath9k_channel *chan,
+ bool run_rtt_cal)
{
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
int i;
if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah))
return;
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && !run_rtt_cal)
+ return;
+
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (!(ah->rxchainmask & (1 << i)))
continue;
ar9003_hw_manual_peak_cal(ah, i, IS_CHAN_2GHZ(chan));
}
+
+ if (caldata)
+ set_bit(SW_PKDET_DONE, &caldata->cal_flags);
+
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && caldata) {
+ if (IS_CHAN_2GHZ(chan)){
+ caldata->caldac[0] = REG_READ_FIELD(ah,
+ AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR);
+ caldata->caldac[1] = REG_READ_FIELD(ah,
+ AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR);
+ } else {
+ caldata->caldac[0] = REG_READ_FIELD(ah,
+ AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR);
+ caldata->caldac[1] = REG_READ_FIELD(ah,
+ AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR);
+ }
+ }
}
static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
@@ -990,7 +1020,7 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) &
AR_PHY_AGC_CONTROL_CLC_SUCCESS);
- if (caldata->done_txclcal_once) {
+ if (test_bit(TXCLCAL_DONE, &caldata->cal_flags)) {
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (!(ah->txchainmask & (1 << i)))
continue;
@@ -1006,7 +1036,7 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
caldata->tx_clcal[i][j] =
REG_READ(ah, CL_TAB_ENTRY(cl_idx[i]));
}
- caldata->done_txclcal_once = true;
+ set_bit(TXCLCAL_DONE, &caldata->cal_flags);
}
}
@@ -1019,6 +1049,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
bool is_reusable = true, status = true;
bool run_rtt_cal = false, run_agc_cal, sep_iq_cal = false;
bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
+ u32 rx_delay = 0;
u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
AR_PHY_AGC_CONTROL_FLTR_CAL |
AR_PHY_AGC_CONTROL_PKDET_CAL;
@@ -1042,17 +1073,22 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
ar9003_hw_rtt_clear_hist(ah);
}
- if (rtt && !run_rtt_cal) {
- agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL);
- agc_supp_cals &= agc_ctrl;
- agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL |
- AR_PHY_AGC_CONTROL_FLTR_CAL |
- AR_PHY_AGC_CONTROL_PKDET_CAL);
- REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
+ if (rtt) {
+ if (!run_rtt_cal) {
+ agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL);
+ agc_supp_cals &= agc_ctrl;
+ agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL |
+ AR_PHY_AGC_CONTROL_FLTR_CAL |
+ AR_PHY_AGC_CONTROL_PKDET_CAL);
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
+ } else {
+ if (ah->ah_flags & AH_FASTCC)
+ run_agc_cal = true;
+ }
}
if (ah->enabled_cals & TX_CL_CAL) {
- if (caldata && caldata->done_txclcal_once)
+ if (caldata && test_bit(TXCLCAL_DONE, &caldata->cal_flags))
REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL,
AR_PHY_CL_CAL_ENABLE);
else {
@@ -1076,14 +1112,14 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
* AGC calibration
*/
if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
- if (caldata && !caldata->done_txiqcal_once)
+ if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags))
REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
else
REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
txiqcal_done = run_agc_cal = true;
- } else if (caldata && !caldata->done_txiqcal_once) {
+ } else if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags)) {
run_agc_cal = true;
sep_iq_cal = true;
}
@@ -1099,6 +1135,15 @@ skip_tx_iqcal:
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
}
+ if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
+ rx_delay = REG_READ(ah, AR_PHY_RX_DELAY);
+ /* Disable BB_active */
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+ udelay(5);
+ REG_WRITE(ah, AR_PHY_RX_DELAY, AR_PHY_RX_DELAY_DELAY);
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+ }
+
if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
/* Calibrate the AGC */
REG_WRITE(ah, AR_PHY_AGC_CONTROL,
@@ -1110,7 +1155,12 @@ skip_tx_iqcal:
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
- ar9003_hw_do_manual_peak_cal(ah, chan);
+ ar9003_hw_do_manual_peak_cal(ah, chan, run_rtt_cal);
+ }
+
+ if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
+ REG_WRITE(ah, AR_PHY_RX_DELAY, rx_delay);
+ udelay(5);
}
if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
@@ -1133,19 +1183,23 @@ skip_tx_iqcal:
if (txiqcal_done)
ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
- else if (caldata && caldata->done_txiqcal_once)
+ else if (caldata && test_bit(TXIQCAL_DONE, &caldata->cal_flags))
ar9003_hw_tx_iq_cal_reload(ah);
ar9003_hw_cl_cal_post_proc(ah, is_reusable);
if (run_rtt_cal && caldata) {
if (is_reusable) {
- if (!ath9k_hw_rfbus_req(ah))
+ if (!ath9k_hw_rfbus_req(ah)) {
ath_err(ath9k_hw_common(ah),
"Could not stop baseband\n");
- else
+ } else {
ar9003_hw_rtt_fill_hist(ah);
+ if (test_bit(SW_PKDET_DONE, &caldata->cal_flags))
+ ar9003_hw_rtt_load_hist(ah);
+ }
+
ath9k_hw_rfbus_done(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index f4864807e15b..1ec52356b5a1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -2991,7 +2991,10 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
case EEP_CHAIN_MASK_REDUCE:
return (pBase->miscConfiguration >> 0x3) & 0x1;
case EEP_ANT_DIV_CTL1:
- return eep->base_ext1.ant_div_control;
+ if (AR_SREV_9565(ah))
+ return AR9300_EEP_ANTDIV_CONTROL_DEFAULT_VALUE;
+ else
+ return eep->base_ext1.ant_div_control;
case EEP_ANTENNA_GAIN_5G:
return eep->modalHeader5G.antennaGain;
case EEP_ANTENNA_GAIN_2G:
@@ -3424,12 +3427,12 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
struct ar9300_base_eep_hdr *pBase;
if (!dump_base_hdr) {
- len += snprintf(buf + len, size - len,
- "%20s :\n", "2GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
len = ar9003_dump_modal_eeprom(buf, len, size,
&eep->modalHeader2G);
- len += snprintf(buf + len, size - len,
- "%20s :\n", "5GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "5GHz modal Header");
len = ar9003_dump_modal_eeprom(buf, len, size,
&eep->modalHeader5G);
goto out;
@@ -3479,8 +3482,8 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Rx Gain", pBase->txrxgain & 0xf);
PR_EEP("SW Reg", le32_to_cpu(pBase->swreg));
- len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- ah->eeprom.ar9300_eep.macAddr);
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ ah->eeprom.ar9300_eep.macAddr);
out:
if (len > size)
len = size;
@@ -3656,9 +3659,23 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
if (AR_SREV_9565(ah)) {
if (common->bt_ant_diversity) {
regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S);
+
+ REG_SET_BIT(ah, AR_PHY_RESTART,
+ AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
+
+ /* Force WLAN LNA diversity ON */
+ REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
+ AR_BTCOEX_WL_LNADIV_FORCE_ON);
} else {
regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S);
regval &= ~(1 << AR_PHY_ANT_SW_RX_PROT_S);
+
+ REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ (1 << AR_PHY_ANT_SW_RX_PROT_S));
+
+ /* Force WLAN LNA diversity OFF */
+ REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
+ AR_BTCOEX_WL_LNADIV_FORCE_ON);
}
}
@@ -3669,7 +3686,8 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
regval &= (~AR_FAST_DIV_ENABLE);
regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
- if (AR_SREV_9485(ah) && common->bt_ant_diversity)
+ if ((AR_SREV_9485(ah) || AR_SREV_9565(ah))
+ && common->bt_ant_diversity)
regval |= AR_FAST_DIV_ENABLE;
REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 75d4fb41962f..0e5daa58a4fc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -52,6 +52,8 @@
#define AR9300_PAPRD_SCALE_2 0x70000000
#define AR9300_PAPRD_SCALE_2_S 28
+#define AR9300_EEP_ANTDIV_CONTROL_DEFAULT_VALUE 0xc9
+
/* Delta from which to start power to pdadc table */
/* This offset is used in both open loop and closed loop power control
* schemes. In open loop power control, it is not really needed, but for
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 608bb4824e2a..b07f164d65cf 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -364,6 +364,8 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniModesFastClock,
ar9565_1p0_modes_fast_clock);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9565_1p0_baseband_core_txfir_coeff_japan_2484);
} else {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -628,6 +630,9 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9462_common_rx_gain_table_2p0);
+ else if (AR_SREV_9565(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9565_1p0_Common_rx_gain_table);
else
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9300Common_rx_gain_table_2p2);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 8dd069259e7b..7b94a6c7db3d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -753,9 +753,9 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT);
if (caldata) {
- caldata->done_txiqcal_once = false;
- caldata->done_txclcal_once = false;
- caldata->rtt_done = false;
+ clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
+ clear_bit(TXCLCAL_DONE, &caldata->cal_flags);
+ clear_bit(RTT_DONE, &caldata->cal_flags);
}
if (!ath9k_hw_init_cal(ah, chan))
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index e897648d3233..11f53589a3f3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -551,8 +551,7 @@ static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
if (IS_CHAN_HT40(chan)) {
phymode |= AR_PHY_GC_DYN2040_EN;
/* Configure control (primary) channel at +-10MHz */
- if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
- (chan->chanmode == CHANNEL_G_HT40PLUS))
+ if (IS_CHAN_HT40PLUS(chan))
phymode |= AR_PHY_GC_DYN2040_PRI_CH;
}
@@ -565,7 +564,7 @@ static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
REG_WRITE(ah, AR_PHY_GEN_CTRL, phymode);
/* Configure MAC for 20/40 operation */
- ath9k_hw_set11nmac2040(ah);
+ ath9k_hw_set11nmac2040(ah, chan);
/* global transmit timeout (25 TUs default)*/
REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
@@ -627,11 +626,10 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
* MAC addr only will fail.
*/
val = REG_READ(ah, AR_PCU_MISC_MODE2) & (~AR_ADHOC_MCAST_KEYID_ENABLE);
- REG_WRITE(ah, AR_PCU_MISC_MODE2,
- val | AR_AGG_WEP_ENABLE_FIX | AR_AGG_WEP_ENABLE);
-
- REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
- AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
+ val |= AR_AGG_WEP_ENABLE_FIX |
+ AR_AGG_WEP_ENABLE |
+ AR_PCU_MISC_MODE2_CFP_IGNORE;
+ REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
@@ -683,41 +681,22 @@ static int ar9550_hw_get_modes_txgain_index(struct ath_hw *ah,
{
int ret;
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- if (chan->channel <= 5350)
- ret = 1;
- else if ((chan->channel > 5350) && (chan->channel <= 5600))
- ret = 3;
- else
- ret = 5;
- break;
-
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- if (chan->channel <= 5350)
- ret = 2;
- else if ((chan->channel > 5350) && (chan->channel <= 5600))
- ret = 4;
+ if (IS_CHAN_2GHZ(chan)) {
+ if (IS_CHAN_HT40(chan))
+ return 7;
else
- ret = 6;
- break;
-
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- ret = 8;
- break;
+ return 8;
+ }
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- ret = 7;
- break;
+ if (chan->channel <= 5350)
+ ret = 1;
+ else if ((chan->channel > 5350) && (chan->channel <= 5600))
+ ret = 3;
+ else
+ ret = 5;
- default:
- ret = -EINVAL;
- }
+ if (IS_CHAN_HT40(chan))
+ ret++;
return ret;
}
@@ -728,28 +707,10 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
unsigned int regWrites = 0, i;
u32 modesIndex;
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- modesIndex = 1;
- break;
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- modesIndex = 2;
- break;
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- modesIndex = 4;
- break;
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- modesIndex = 3;
- break;
-
- default:
- return -EINVAL;
- }
+ if (IS_CHAN_5GHZ(chan))
+ modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+ else
+ modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
/*
* SOC, MAC, BB, RADIO initvals.
@@ -847,8 +808,10 @@ static void ar9003_hw_set_rfmode(struct ath_hw *ah,
if (chan == NULL)
return;
- rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
- ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+ if (IS_CHAN_2GHZ(chan))
+ rfMode |= AR_PHY_MODE_DYNAMIC;
+ else
+ rfMode |= AR_PHY_MODE_OFDM;
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
@@ -1274,12 +1237,11 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
aniState = &ah->ani;
iniDef = &aniState->iniDef;
- ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+ ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz\n",
ah->hw_version.macVersion,
ah->hw_version.macRev,
ah->opmode,
- chan->channel,
- chan->channelFlags);
+ chan->channel);
val = REG_READ(ah, AR_PHY_SFCORR);
iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
@@ -1375,15 +1337,19 @@ static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah,
AR_PHY_ANT_FAST_DIV_BIAS_S;
if (AR_SREV_9330_11(ah)) {
+ antconf->lna1_lna2_switch_delta = -1;
antconf->lna1_lna2_delta = -9;
antconf->div_group = 1;
} else if (AR_SREV_9485(ah)) {
+ antconf->lna1_lna2_switch_delta = -1;
antconf->lna1_lna2_delta = -9;
antconf->div_group = 2;
} else if (AR_SREV_9565(ah)) {
- antconf->lna1_lna2_delta = -3;
+ antconf->lna1_lna2_switch_delta = 3;
+ antconf->lna1_lna2_delta = -9;
antconf->div_group = 3;
} else {
+ antconf->lna1_lna2_switch_delta = -1;
antconf->lna1_lna2_delta = -3;
antconf->div_group = 0;
}
@@ -1489,17 +1455,24 @@ static void ar9003_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
} else if (AR_SREV_9565(ah)) {
if (enable) {
REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ AR_ANT_DIV_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
(1 << AR_PHY_ANT_SW_RX_PROT_S));
- if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
- REG_SET_BIT(ah, AR_PHY_RESTART,
- AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
+ REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
+ AR_FAST_DIV_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_RESTART,
+ AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
AR_BTCOEX_WL_LNADIV_FORCE_ON);
} else {
- REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ AR_ANT_DIV_ENABLE);
REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
(1 << AR_PHY_ANT_SW_RX_PROT_S));
- REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_CCK_DETECT,
+ AR_FAST_DIV_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_RESTART,
+ AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
AR_BTCOEX_WL_LNADIV_FORCE_ON);
@@ -1526,28 +1499,10 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
unsigned int regWrites = 0;
u32 modesIndex;
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- modesIndex = 1;
- break;
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- modesIndex = 2;
- break;
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- modesIndex = 4;
- break;
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- modesIndex = 3;
- break;
-
- default:
- return -EINVAL;
- }
+ if (IS_CHAN_5GHZ(chan))
+ modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+ else
+ modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
if (modesIndex == ah->modes_index) {
*ini_reloaded = false;
@@ -1662,6 +1617,98 @@ static void ar9003_hw_spectral_scan_wait(struct ath_hw *ah)
}
}
+static void ar9003_hw_tx99_start(struct ath_hw *ah, u32 qnum)
+{
+ REG_SET_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
+ REG_SET_BIT(ah, 0x9864, 0x7f000);
+ REG_SET_BIT(ah, 0x9924, 0x7f00fe);
+ REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+ REG_WRITE(ah, AR_CR, AR_CR_RXD);
+ REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
+ REG_WRITE(ah, AR_D_GBL_IFS_SIFS, 20); /* 50 OK */
+ REG_WRITE(ah, AR_D_GBL_IFS_EIFS, 20);
+ REG_WRITE(ah, AR_TIME_OUT, 0x00000400);
+ REG_WRITE(ah, AR_DRETRY_LIMIT(qnum), 0xffffffff);
+ REG_SET_BIT(ah, AR_QMISC(qnum), AR_Q_MISC_DCU_EARLY_TERM_REQ);
+}
+
+static void ar9003_hw_tx99_stop(struct ath_hw *ah)
+{
+ REG_CLR_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
+ REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+}
+
+static void ar9003_hw_tx99_set_txpower(struct ath_hw *ah, u8 txpower)
+{
+ static s16 p_pwr_array[ar9300RateSize] = { 0 };
+ unsigned int i;
+
+ if (txpower <= MAX_RATE_POWER) {
+ for (i = 0; i < ar9300RateSize; i++)
+ p_pwr_array[i] = txpower;
+ } else {
+ for (i = 0; i < ar9300RateSize; i++)
+ p_pwr_array[i] = MAX_RATE_POWER;
+ }
+
+ REG_WRITE(ah, 0xa458, 0);
+
+ REG_WRITE(ah, 0xa3c0,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 0));
+ REG_WRITE(ah, 0xa3c4,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_54], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_48], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_36], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 0));
+ REG_WRITE(ah, 0xa3c8,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 0));
+ REG_WRITE(ah, 0xa3cc,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_11S], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_11L], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_5S], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 0));
+ REG_WRITE(ah, 0xa3d0,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_5], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_4], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_1_3_9_11_17_19], 8)|
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_0_8_16], 0));
+ REG_WRITE(ah, 0xa3d4,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_13], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_12], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_7], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_6], 0));
+ REG_WRITE(ah, 0xa3e4,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_21], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_20], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_15], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_14], 0));
+ REG_WRITE(ah, 0xa3e8,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_23], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_22], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_23], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_22], 0));
+ REG_WRITE(ah, 0xa3d8,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_5], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_4], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_0_8_16], 0));
+ REG_WRITE(ah, 0xa3dc,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_13], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_12], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_7], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_6], 0));
+ REG_WRITE(ah, 0xa3ec,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_21], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_20], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_15], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_14], 0));
+}
+
void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -1701,6 +1748,9 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
ops->set_bt_ant_diversity = ar9003_hw_set_bt_ant_diversity;
#endif
+ ops->tx99_start = ar9003_hw_tx99_start;
+ ops->tx99_stop = ar9003_hw_tx99_stop;
+ ops->tx99_set_txpower = ar9003_hw_tx99_set_txpower;
ar9003_hw_set_nf_limits(ah);
ar9003_hw_set_radar_conf(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 6fd752321e36..fca624322dc8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -343,8 +343,12 @@
#define AR_PHY_CCA_NOM_VAL_9462_2GHZ -127
#define AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ -127
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_2GHZ -60
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ -95
#define AR_PHY_CCA_NOM_VAL_9462_5GHZ -127
#define AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ -127
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_5GHZ -60
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ -100
#define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
index 74de3539c2c8..934418872e8e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
@@ -118,6 +118,27 @@ void ar9003_hw_rtt_load_hist(struct ath_hw *ah)
}
}
+static void ar9003_hw_patch_rtt(struct ath_hw *ah, int index, int chain)
+{
+ int agc, caldac;
+
+ if (!test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags))
+ return;
+
+ if ((index != 5) || (chain >= 2))
+ return;
+
+ agc = REG_READ_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE);
+ if (!agc)
+ return;
+
+ caldac = ah->caldata->caldac[chain];
+ ah->caldata->rtt_table[chain][index] &= 0xFFFF05FF;
+ caldac = (caldac & 0x20) | ((caldac & 0x1F) << 7);
+ ah->caldata->rtt_table[chain][index] |= (caldac << 4);
+}
+
static int ar9003_hw_rtt_fill_hist_entry(struct ath_hw *ah, u8 chain, u32 index)
{
u32 val;
@@ -155,13 +176,16 @@ void ar9003_hw_rtt_fill_hist(struct ath_hw *ah)
for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
ah->caldata->rtt_table[chain][i] =
ar9003_hw_rtt_fill_hist_entry(ah, chain, i);
+
+ ar9003_hw_patch_rtt(ah, i, chain);
+
ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"RTT value at idx %d, chain %d is: 0x%x\n",
i, chain, ah->caldata->rtt_table[chain][i]);
}
}
- ah->caldata->rtt_done = true;
+ set_bit(RTT_DONE, &ah->caldata->cal_flags);
}
void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
@@ -176,7 +200,7 @@ void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
}
if (ah->caldata)
- ah->caldata->rtt_done = false;
+ clear_bit(RTT_DONE, &ah->caldata->cal_flags);
}
bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
@@ -186,11 +210,37 @@ bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
if (!ah->caldata)
return false;
- if (!ah->caldata->rtt_done)
+ if (test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags)) {
+ if (IS_CHAN_2GHZ(chan)){
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
+ ah->caldata->caldac[0]);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
+ ah->caldata->caldac[1]);
+ } else {
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
+ ah->caldata->caldac[0]);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
+ ah->caldata->caldac[1]);
+ }
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
+ }
+
+ if (!test_bit(RTT_DONE, &ah->caldata->cal_flags))
return false;
ar9003_hw_rtt_enable(ah);
- ar9003_hw_rtt_set_mask(ah, 0x10);
+
+ if (test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags))
+ ar9003_hw_rtt_set_mask(ah, 0x30);
+ else
+ ar9003_hw_rtt_set_mask(ah, 0x10);
if (!ath9k_hw_rfbus_req(ah)) {
ath_err(ath9k_hw_common(ah), "Could not stop baseband\n");
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index 88ff1d7b53ab..6f899c692647 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -20,7 +20,17 @@
/* AR9485 1.1 */
-#define ar9485_1_1_mac_postamble ar9300_2p2_mac_postamble
+static const u32 ar9485_1_1_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
/* Addr allmodes */
@@ -34,6 +44,7 @@ static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
{0x00009e00, 0x037216a0},
{0x00009e04, 0x00182020},
{0x00009e18, 0x00000000},
+ {0x00009e20, 0x000003a8},
{0x00009e2c, 0x00004121},
{0x00009e44, 0x02282324},
{0x0000a000, 0x00060005},
@@ -174,7 +185,7 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
{0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050da, 0x000050da},
{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
{0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
@@ -200,14 +211,14 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
{0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
{0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
{0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62001eee, 0x62001eee},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001ff6, 0x66001ff6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
{0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -263,6 +274,11 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
static const u32 ar9485Modes_green_ob_db_tx_gain_1_1[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
{0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
@@ -297,6 +313,22 @@ static const u32 ar9485Modes_green_ob_db_tx_gain_1_1[][5] = {
{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+ {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+ {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+ {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
{0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
{0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
{0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
@@ -341,6 +373,100 @@ static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
{0x0000a2e0, 0x00000000, 0x00000000, 0xffc63a84, 0xffc63a84},
{0x0000a2e4, 0x00000000, 0x00000000, 0xfe0fc000, 0xfe0fc000},
{0x0000a2e8, 0x00000000, 0x00000000, 0xfff00000, 0xfff00000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050da, 0x000050da},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62001eee, 0x62001eee},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001ff6, 0x66001ff6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+ {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+ {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+ {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
@@ -427,7 +553,7 @@ static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
};
-static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
+static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
@@ -521,12 +647,15 @@ static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
};
-#define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1
-
static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0x00000000, 0x00000000, 0xffad452a, 0xffad452a},
+ {0x0000a2e0, 0x00000000, 0x00000000, 0xffc98634, 0xffc98634},
+ {0x0000a2e4, 0x00000000, 0x00000000, 0xfff60780, 0xfff60780},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0xfffff800, 0xfffff800},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
{0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
@@ -543,23 +672,39 @@ static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = {
{0x0000a530, 0x48023ec6, 0x48023ec6, 0x310006e0, 0x310006e0},
{0x0000a534, 0x4d023f01, 0x4d023f01, 0x330006e0, 0x330006e0},
{0x0000a538, 0x53023f4b, 0x53023f4b, 0x3e0008e3, 0x3e0008e3},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x410008e5, 0x410008e5},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x430008e6, 0x430008e6},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x4a0008ec, 0x4a0008ec},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4e0008f1, 0x4e0008f1},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x520008f3, 0x520008f3},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x54000eed, 0x54000eed},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x58000ef1, 0x58000ef1},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5c000ef3, 0x5c000ef3},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x60000ef5, 0x60000ef5},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x62000ef6, 0x62000ef6},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x430008e6, 0x430008e6},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4a0008ec, 0x4a0008ec},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4e0008f1, 0x4e0008f1},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x520008f3, 0x520008f3},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x54000eed, 0x54000eed},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x58000ef1, 0x58000ef1},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5c000ef3, 0x5c000ef3},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001ff0, 0x66001ff0},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x68001ff6, 0x68001ff6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a58c, 0x00000000, 0x00000000, 0x01804000, 0x01804000},
+ {0x0000a590, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a594, 0x00000000, 0x00000000, 0x0340ca02, 0x0340ca02},
+ {0x0000a598, 0x00000000, 0x00000000, 0x0340cd03, 0x0340cd03},
+ {0x0000a59c, 0x00000000, 0x00000000, 0x0340cd03, 0x0340cd03},
+ {0x0000a5a0, 0x00000000, 0x00000000, 0x06415304, 0x06415304},
+ {0x0000a5a4, 0x00000000, 0x00000000, 0x04c11905, 0x04c11905},
+ {0x0000a5a8, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5ac, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5b0, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5b4, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5b8, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5bc, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
{0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
{0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
{0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
@@ -823,6 +968,7 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = {
{0x00009e00, 0x03721b20},
{0x00009e04, 0x00082020},
{0x00009e18, 0x0300501e},
+ {0x00009e20, 0x000003ba},
{0x00009e2c, 0x00002e21},
{0x00009e44, 0x02182324},
{0x0000a000, 0x00060005},
@@ -1001,7 +1147,6 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
{0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
{0x00009e14, 0x31395d53, 0x31396053, 0x312e6053, 0x312e5d53},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -1020,7 +1165,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
{0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -1206,6 +1351,11 @@ static const u32 ar9485_1_1_mac_core[][2] = {
{0x000083d0, 0x000301ff},
};
-#define ar9485_1_1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
+static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x6f7f0301},
+ {0x0000a3a0, 0xca9228ee},
+};
#endif /* INITVALS_9485_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
index e85a8b076c22..a8c757b6124f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -272,9 +272,9 @@ static const u32 ar9565_1p0_baseband_core[][2] = {
{0x0000a398, 0x001f0e0f},
{0x0000a39c, 0x0075393f},
{0x0000a3a0, 0xb79f6427},
- {0x0000a3a4, 0x00000000},
- {0x0000a3a8, 0xaaaaaaaa},
- {0x0000a3ac, 0x3c466478},
+ {0x0000a3a4, 0x00000011},
+ {0x0000a3a8, 0xaaaaaa6e},
+ {0x0000a3ac, 0x3c466455},
{0x0000a3c0, 0x20202020},
{0x0000a3c4, 0x22222220},
{0x0000a3c8, 0x20200020},
@@ -295,11 +295,11 @@ static const u32 ar9565_1p0_baseband_core[][2] = {
{0x0000a404, 0x00000000},
{0x0000a408, 0x0e79e5c6},
{0x0000a40c, 0x00820820},
- {0x0000a414, 0x1ce739ce},
+ {0x0000a414, 0x1ce739c5},
{0x0000a418, 0x2d001dce},
- {0x0000a41c, 0x1ce739ce},
+ {0x0000a41c, 0x1ce739c5},
{0x0000a420, 0x000001ce},
- {0x0000a424, 0x1ce739ce},
+ {0x0000a424, 0x1ce739c5},
{0x0000a428, 0x000001ce},
{0x0000a42c, 0x1ce739ce},
{0x0000a430, 0x1ce739ce},
@@ -351,9 +351,9 @@ static const u32 ar9565_1p0_baseband_postamble[][5] = {
{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003a4, 0x000003a4},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+ {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946220, 0xcf946220},
{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -452,6 +452,7 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
/* Addr allmodes */
{0x00004050, 0x00300300},
{0x0000406c, 0x00100000},
+ {0x00009e20, 0x000003b6},
{0x0000a000, 0x00010000},
{0x0000a004, 0x00030002},
{0x0000a008, 0x00050004},
@@ -1230,4 +1231,11 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
{0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
};
+static const u32 ar9565_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x6f7f0301},
+ {0x0000a3a0, 0xca9228ee},
+};
+
#endif /* INITVALS_9565_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 2ee35f677c0e..e7a38d844a6a 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -64,7 +64,6 @@ struct ath_node;
struct ath_config {
u16 txpowlimit;
- u8 cabqReadytime;
};
/*************************/
@@ -207,6 +206,14 @@ struct ath_frame_info {
u8 baw_tracked : 1;
};
+struct ath_rxbuf {
+ struct list_head list;
+ struct sk_buff *bf_mpdu;
+ void *bf_desc;
+ dma_addr_t bf_daddr;
+ dma_addr_t bf_buf_addr;
+};
+
struct ath_buf_state {
u8 bf_type;
u8 bfs_paprd;
@@ -307,7 +314,7 @@ struct ath_rx {
struct ath_descdma rxdma;
struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
- struct ath_buf *buf_hold;
+ struct ath_rxbuf *buf_hold;
struct sk_buff *frag;
u32 ampdu_ref;
@@ -459,8 +466,8 @@ void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
#define ATH_DUMP_BTCOEX(_s, _val) \
do { \
- len += snprintf(buf + len, size - len, \
- "%20s : %10d\n", _s, (_val)); \
+ len += scnprintf(buf + len, size - len, \
+ "%20s : %10d\n", _s, (_val)); \
} while (0)
enum bt_op_flags {
@@ -581,7 +588,6 @@ static inline void ath_fill_led_pin(struct ath_softc *sc)
#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI 50
#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI 50
-#define ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA -1
#define ATH_ANT_DIV_COMB_LNA1_DELTA_HI -4
#define ATH_ANT_DIV_COMB_LNA1_DELTA_MID -2
#define ATH_ANT_DIV_COMB_LNA1_DELTA_LOW 2
@@ -626,12 +632,15 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
/* Main driver core */
/********************/
-#define ATH9K_PCI_CUS198 0x0001
-#define ATH9K_PCI_CUS230 0x0002
-#define ATH9K_PCI_CUS217 0x0004
-#define ATH9K_PCI_WOW 0x0008
-#define ATH9K_PCI_BT_ANT_DIV 0x0010
-#define ATH9K_PCI_D3_L1_WAR 0x0020
+#define ATH9K_PCI_CUS198 0x0001
+#define ATH9K_PCI_CUS230 0x0002
+#define ATH9K_PCI_CUS217 0x0004
+#define ATH9K_PCI_CUS252 0x0008
+#define ATH9K_PCI_WOW 0x0010
+#define ATH9K_PCI_BT_ANT_DIV 0x0020
+#define ATH9K_PCI_D3_L1_WAR 0x0040
+#define ATH9K_PCI_AR9565_1ANT 0x0080
+#define ATH9K_PCI_AR9565_2ANT 0x0100
/*
* Default cache line size, in bytes.
@@ -769,6 +778,11 @@ struct ath_softc {
enum spectral_mode spectral_mode;
struct ath_spec_scan spec_config;
+ struct ieee80211_vif *tx99_vif;
+ struct sk_buff *tx99_skb;
+ bool tx99_state;
+ s16 tx99_power;
+
#ifdef CONFIG_PM_SLEEP
atomic_t wow_got_bmiss_intr;
atomic_t wow_sleep_proc_intr; /* in the middle of WoW sleep ? */
@@ -877,6 +891,7 @@ static inline u8 spectral_bitmap_weight(u8 *bins)
*/
enum ath_fft_sample_type {
ATH_FFT_SAMPLE_HT20 = 1,
+ ATH_FFT_SAMPLE_HT20_40,
};
struct fft_sample_tlv {
@@ -903,6 +918,39 @@ struct fft_sample_ht20 {
u8 data[SPECTRAL_HT20_NUM_BINS];
} __packed;
+struct fft_sample_ht20_40 {
+ struct fft_sample_tlv tlv;
+
+ u8 channel_type;
+ __be16 freq;
+
+ s8 lower_rssi;
+ s8 upper_rssi;
+
+ __be64 tsf;
+
+ s8 lower_noise;
+ s8 upper_noise;
+
+ __be16 lower_max_magnitude;
+ __be16 upper_max_magnitude;
+
+ u8 lower_max_index;
+ u8 upper_max_index;
+
+ u8 lower_bitmap_weight;
+ u8 upper_bitmap_weight;
+
+ u8 max_exp;
+
+ u8 data[SPECTRAL_HT20_40_NUM_BINS];
+} __packed;
+
+int ath9k_tx99_init(struct ath_softc *sc);
+void ath9k_tx99_deinit(struct ath_softc *sc);
+int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
+ struct ath_tx_control *txctl);
+
void ath9k_tasklet(unsigned long data);
int ath_cabq_update(struct ath_softc *);
@@ -924,7 +972,6 @@ void ath9k_deinit_device(struct ath_softc *sc);
void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath9k_reload_chainmask_settings(struct ath_softc *sc);
-bool ath9k_uses_beacons(int type);
void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw);
int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
enum spectral_mode spectral_mode);
@@ -952,7 +999,7 @@ void ath9k_ps_restore(struct ath_softc *sc);
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
void ath_start_rfkill_poll(struct ath_softc *sc);
-extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ath9k_vif_iter_data *iter_data);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index b5c16b3a37b9..17be35392bb4 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -334,6 +334,8 @@ void ath9k_beacon_tasklet(unsigned long data)
if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) {
sc->beacon.bmisscnt++;
+ ath9k_hw_check_nav(ah);
+
if (!ath9k_hw_check_alive(ah))
ieee80211_queue_work(sc->hw, &sc->hw_check_work);
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 5e8219a91e25..278365b8a895 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -63,13 +63,13 @@ static s16 ath9k_hw_get_default_nf(struct ath_hw *ah,
return ath9k_hw_get_nf_limits(ah, chan)->nominal;
}
-s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
+s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan,
+ s16 nf)
{
s8 noise = ATH_DEFAULT_NOISE_FLOOR;
- if (chan && chan->noisefloor) {
- s8 delta = chan->noisefloor -
- ATH9K_NF_CAL_NOISE_THRESH -
+ if (nf) {
+ s8 delta = nf - ATH9K_NF_CAL_NOISE_THRESH -
ath9k_hw_get_default_nf(ah, chan);
if (delta > 0)
noise += delta;
@@ -119,7 +119,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
ath_dbg(common, CALIBRATE,
"NFmid[%d] (%d) > MAX (%d), %s\n",
i, h[i].privNF, limit->max,
- (cal->nfcal_interference ?
+ (test_bit(NFCAL_INTF, &cal->cal_flags) ?
"not corrected (due to interference)" :
"correcting to MAX"));
@@ -130,7 +130,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
* we bypass this limit here in order to better deal
* with our environment.
*/
- if (!cal->nfcal_interference)
+ if (!test_bit(NFCAL_INTF, &cal->cal_flags))
h[i].privNF = limit->max;
}
}
@@ -141,7 +141,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
* Re-enable the enforcement of the NF maximum again.
*/
if (!high_nf_mid)
- cal->nfcal_interference = false;
+ clear_bit(NFCAL_INTF, &cal->cal_flags);
}
static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
@@ -186,7 +186,6 @@ void ath9k_hw_reset_calibration(struct ath_hw *ah,
bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- struct ieee80211_conf *conf = &common->hw->conf;
struct ath9k_cal_list *currCal = ah->cal_list_curr;
if (!ah->caldata)
@@ -208,7 +207,7 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
return true;
ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n",
- currCal->calData->calType, conf->chandef.chan->center_freq);
+ currCal->calData->calType, ah->curchan->chan->center_freq);
ah->caldata->CalValid &= ~currCal->calData->calType;
currCal->calState = CAL_WAITING;
@@ -220,7 +219,7 @@ EXPORT_SYMBOL(ath9k_hw_reset_calvalid);
void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update)
{
if (ah->caldata)
- ah->caldata->nfcal_pending = true;
+ set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_ENABLE_NF);
@@ -242,7 +241,6 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
int32_t val;
u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
struct ath_common *common = ath9k_hw_common(ah);
- struct ieee80211_conf *conf = &common->hw->conf;
s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
if (ah->caldata)
@@ -252,7 +250,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
if (chainmask & (1 << i)) {
s16 nfval;
- if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
+ if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
continue;
if (h)
@@ -314,7 +312,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
ENABLE_REGWRITE_BUFFER(ah);
for (i = 0; i < NUM_NF_READINGS; i++) {
if (chainmask & (1 << i)) {
- if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
+ if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
continue;
val = REG_READ(ah, ah->nf_regs[i]);
@@ -391,10 +389,10 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
}
h = caldata->nfCalHist;
- caldata->nfcal_pending = false;
+ clear_bit(NFCAL_PENDING, &caldata->cal_flags);
ath9k_hw_update_nfcal_hist_buffer(ah, caldata, nfarray);
chan->noisefloor = h[0].privNF;
- ah->noise = ath9k_hw_getchan_noise(ah, chan);
+ ah->noise = ath9k_hw_getchan_noise(ah, chan, chan->noisefloor);
return true;
}
EXPORT_SYMBOL(ath9k_hw_getnf);
@@ -408,7 +406,6 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
ah->caldata->channel = chan->channel;
ah->caldata->channelFlags = chan->channelFlags;
- ah->caldata->chanmode = chan->chanmode;
h = ah->caldata->nfCalHist;
default_nf = ath9k_hw_get_default_nf(ah, chan);
for (i = 0; i < NUM_NF_READINGS; i++) {
@@ -437,12 +434,12 @@ void ath9k_hw_bstuck_nfcal(struct ath_hw *ah)
* the baseband update the internal NF value itself, similar to
* what is being done after a full reset.
*/
- if (!caldata->nfcal_pending)
+ if (!test_bit(NFCAL_PENDING, &caldata->cal_flags))
ath9k_hw_start_nfcal(ah, true);
else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF))
ath9k_hw_getnf(ah, ah->curchan);
- caldata->nfcal_interference = true;
+ set_bit(NFCAL_INTF, &caldata->cal_flags);
}
EXPORT_SYMBOL(ath9k_hw_bstuck_nfcal);
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 3d70b8c2bcdd..b8ed95e9a335 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -116,7 +116,8 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
void ath9k_hw_bstuck_nfcal(struct ath_hw *ah);
void ath9k_hw_reset_calibration(struct ath_hw *ah,
struct ath9k_cal_list *currCal);
-s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan);
+s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan,
+ s16 nf);
#endif /* CALIB_H */
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index d3063c21e16c..a7e5a05b2eff 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -49,103 +49,64 @@ int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
}
EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype);
-static u32 ath9k_get_extchanmode(struct cfg80211_chan_def *chandef)
-{
- u32 chanmode = 0;
-
- switch (chandef->chan->band) {
- case IEEE80211_BAND_2GHZ:
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_20_NOHT:
- case NL80211_CHAN_WIDTH_20:
- chanmode = CHANNEL_G_HT20;
- break;
- case NL80211_CHAN_WIDTH_40:
- if (chandef->center_freq1 > chandef->chan->center_freq)
- chanmode = CHANNEL_G_HT40PLUS;
- else
- chanmode = CHANNEL_G_HT40MINUS;
- break;
- default:
- break;
- }
- break;
- case IEEE80211_BAND_5GHZ:
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_20_NOHT:
- case NL80211_CHAN_WIDTH_20:
- chanmode = CHANNEL_A_HT20;
- break;
- case NL80211_CHAN_WIDTH_40:
- if (chandef->center_freq1 > chandef->chan->center_freq)
- chanmode = CHANNEL_A_HT40PLUS;
- else
- chanmode = CHANNEL_A_HT40MINUS;
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
-
- return chanmode;
-}
-
/*
* Update internal channel flags.
*/
-void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
- struct cfg80211_chan_def *chandef)
+static void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
+ struct cfg80211_chan_def *chandef)
{
- ichan->channel = chandef->chan->center_freq;
- ichan->chan = chandef->chan;
-
- if (chandef->chan->band == IEEE80211_BAND_2GHZ) {
- ichan->chanmode = CHANNEL_G;
- ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM;
- } else {
- ichan->chanmode = CHANNEL_A;
- ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
- }
+ struct ieee80211_channel *chan = chandef->chan;
+ u16 flags = 0;
+
+ ichan->channel = chan->center_freq;
+ ichan->chan = chan;
+
+ if (chan->band == IEEE80211_BAND_5GHZ)
+ flags |= CHANNEL_5GHZ;
switch (chandef->width) {
case NL80211_CHAN_WIDTH_5:
- ichan->channelFlags |= CHANNEL_QUARTER;
+ flags |= CHANNEL_QUARTER;
break;
case NL80211_CHAN_WIDTH_10:
- ichan->channelFlags |= CHANNEL_HALF;
+ flags |= CHANNEL_HALF;
break;
case NL80211_CHAN_WIDTH_20_NOHT:
break;
case NL80211_CHAN_WIDTH_20:
+ flags |= CHANNEL_HT;
+ break;
case NL80211_CHAN_WIDTH_40:
- ichan->chanmode = ath9k_get_extchanmode(chandef);
+ if (chandef->center_freq1 > chandef->chan->center_freq)
+ flags |= CHANNEL_HT40PLUS | CHANNEL_HT;
+ else
+ flags |= CHANNEL_HT40MINUS | CHANNEL_HT;
break;
default:
WARN_ON(1);
}
+
+ ichan->channelFlags = flags;
}
-EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
/*
* Get the internal channel reference.
*/
-struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
- struct ath_hw *ah)
+struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
+ struct ath_hw *ah,
+ struct cfg80211_chan_def *chandef)
{
- struct ieee80211_channel *curchan = hw->conf.chandef.chan;
+ struct ieee80211_channel *curchan = chandef->chan;
struct ath9k_channel *channel;
u8 chan_idx;
chan_idx = curchan->hw_value;
channel = &ah->channels[chan_idx];
- ath9k_cmn_update_ichannel(channel, &hw->conf.chandef);
+ ath9k_cmn_update_ichannel(channel, chandef);
return channel;
}
-EXPORT_SYMBOL(ath9k_cmn_get_curchannel);
+EXPORT_SYMBOL(ath9k_cmn_get_channel);
int ath9k_cmn_count_streams(unsigned int chainmask, int max)
{
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index e039bcbfbd79..eb85e1bdca88 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -43,10 +43,9 @@
(((x) + ((mul)/2)) / (mul))
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
-void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
- struct cfg80211_chan_def *chandef);
-struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
- struct ath_hw *ah);
+struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
+ struct ath_hw *ah,
+ struct cfg80211_chan_def *chandef);
int ath9k_cmn_count_streams(unsigned int chainmask, int max);
void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
enum ath_stomp_type stomp_type);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index c088744a6bfb..83a2c59f680b 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -104,37 +104,37 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
return -ENOMEM;
if (common->disable_ani) {
- len += snprintf(buf + len, size - len, "%s: %s\n",
- "ANI", "DISABLED");
+ len += scnprintf(buf + len, size - len, "%s: %s\n",
+ "ANI", "DISABLED");
goto exit;
}
- len += snprintf(buf + len, size - len, "%15s: %s\n",
- "ANI", "ENABLED");
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "ANI RESET", ah->stats.ast_ani_reset);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "SPUR UP", ah->stats.ast_ani_spurup);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "SPUR DOWN", ah->stats.ast_ani_spurup);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "MRC-CCK ON", ah->stats.ast_ani_ccklow);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "FIR-STEP UP", ah->stats.ast_ani_stepup);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "CCK ERRORS", ah->stats.ast_ani_cckerrs);
+ len += scnprintf(buf + len, size - len, "%15s: %s\n",
+ "ANI", "ENABLED");
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "ANI RESET", ah->stats.ast_ani_reset);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "SPUR UP", ah->stats.ast_ani_spurup);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "SPUR DOWN", ah->stats.ast_ani_spurup);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "MRC-CCK ON", ah->stats.ast_ani_ccklow);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "FIR-STEP UP", ah->stats.ast_ani_stepup);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "CCK ERRORS", ah->stats.ast_ani_cckerrs);
exit:
if (len > size)
len = size;
@@ -280,70 +280,70 @@ static ssize_t read_file_antenna_diversity(struct file *file,
return -ENOMEM;
if (!(pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) {
- len += snprintf(buf + len, size - len, "%s\n",
- "Antenna Diversity Combining is disabled");
+ len += scnprintf(buf + len, size - len, "%s\n",
+ "Antenna Diversity Combining is disabled");
goto exit;
}
ath9k_ps_wakeup(sc);
ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
- len += snprintf(buf + len, size - len, "Current MAIN config : %s\n",
- lna_conf_str[div_ant_conf.main_lna_conf]);
- len += snprintf(buf + len, size - len, "Current ALT config : %s\n",
- lna_conf_str[div_ant_conf.alt_lna_conf]);
- len += snprintf(buf + len, size - len, "Average MAIN RSSI : %d\n",
- as_main->rssi_avg);
- len += snprintf(buf + len, size - len, "Average ALT RSSI : %d\n\n",
- as_alt->rssi_avg);
+ len += scnprintf(buf + len, size - len, "Current MAIN config : %s\n",
+ lna_conf_str[div_ant_conf.main_lna_conf]);
+ len += scnprintf(buf + len, size - len, "Current ALT config : %s\n",
+ lna_conf_str[div_ant_conf.alt_lna_conf]);
+ len += scnprintf(buf + len, size - len, "Average MAIN RSSI : %d\n",
+ as_main->rssi_avg);
+ len += scnprintf(buf + len, size - len, "Average ALT RSSI : %d\n\n",
+ as_alt->rssi_avg);
ath9k_ps_restore(sc);
- len += snprintf(buf + len, size - len, "Packet Receive Cnt:\n");
- len += snprintf(buf + len, size - len, "-------------------\n");
-
- len += snprintf(buf + len, size - len, "%30s%15s\n",
- "MAIN", "ALT");
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "TOTAL COUNT",
- as_main->recv_cnt,
- as_alt->recv_cnt);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1",
- as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
- as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA2",
- as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
- as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1 + LNA2",
- as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
- as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1 - LNA2",
- as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
- as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
-
- len += snprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
- len += snprintf(buf + len, size - len, "--------------------\n");
-
- len += snprintf(buf + len, size - len, "%30s%15s\n",
- "MAIN", "ALT");
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1",
- as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
- as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA2",
- as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
- as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1 + LNA2",
- as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
- as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1 - LNA2",
- as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
- as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
+ len += scnprintf(buf + len, size - len, "Packet Receive Cnt:\n");
+ len += scnprintf(buf + len, size - len, "-------------------\n");
+
+ len += scnprintf(buf + len, size - len, "%30s%15s\n",
+ "MAIN", "ALT");
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "TOTAL COUNT",
+ as_main->recv_cnt,
+ as_alt->recv_cnt);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA2",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 + LNA2",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 - LNA2",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
+
+ len += scnprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
+ len += scnprintf(buf + len, size - len, "--------------------\n");
+
+ len += scnprintf(buf + len, size - len, "%30s%15s\n",
+ "MAIN", "ALT");
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA2",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 + LNA2",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 - LNA2",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
exit:
if (len > size)
@@ -385,21 +385,21 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
(AR_MACMISC_MISC_OBS_BUS_1 <<
AR_MACMISC_MISC_OBS_BUS_MSB_S)));
- len += snprintf(buf + len, DMA_BUF_LEN - len,
- "Raw DMA Debug values:\n");
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
+ "Raw DMA Debug values:\n");
for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
if (i % 4 == 0)
- len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
+ len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n");
val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32)));
- len += snprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
- i, val[i]);
+ len += scnprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
+ i, val[i]);
}
- len += snprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
- len += snprintf(buf + len, DMA_BUF_LEN - len,
- "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
+ len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
+ "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) {
if (i == 8) {
@@ -412,39 +412,39 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
dcuBase++;
}
- len += snprintf(buf + len, DMA_BUF_LEN - len,
- "%2d %2x %1x %2x %2x\n",
- i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
- (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
- val[2] & (0x7 << (i * 3)) >> (i * 3),
- (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
+ "%2d %2x %1x %2x %2x\n",
+ i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
+ (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
+ val[2] & (0x7 << (i * 3)) >> (i * 3),
+ (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
}
- len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
+ len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n");
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"qcu_stitch state: %2x qcu_fetch state: %2x\n",
(val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"qcu_complete state: %2x dcu_complete state: %2x\n",
(val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"dcu_arb state: %2x dcu_fp state: %2x\n",
(val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
(val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
(val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
(val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
- len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
- REG_READ_D(ah, AR_OBS_BUS_1));
- len += snprintf(buf + len, DMA_BUF_LEN - len,
- "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
+ len += scnprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
+ REG_READ_D(ah, AR_OBS_BUS_1));
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
+ "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
ath9k_ps_restore(sc);
@@ -530,9 +530,9 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
#define PR_IS(a, s) \
do { \
- len += snprintf(buf + len, mxlen - len, \
- "%21s: %10u\n", a, \
- sc->debug.stats.istats.s); \
+ len += scnprintf(buf + len, mxlen - len, \
+ "%21s: %10u\n", a, \
+ sc->debug.stats.istats.s); \
} while (0)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
@@ -563,8 +563,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
PR_IS("GENTIMER", gen_timer);
PR_IS("TOTAL", total);
- len += snprintf(buf + len, mxlen - len,
- "SYNC_CAUSE stats:\n");
+ len += scnprintf(buf + len, mxlen - len,
+ "SYNC_CAUSE stats:\n");
PR_IS("Sync-All", sync_cause_all);
PR_IS("RTC-IRQ", sync_rtc_irq);
@@ -655,16 +655,16 @@ static ssize_t print_queue(struct ath_softc *sc, struct ath_txq *txq,
ath_txq_lock(sc, txq);
- len += snprintf(buf + len, size - len, "%s: %d ",
- "qnum", txq->axq_qnum);
- len += snprintf(buf + len, size - len, "%s: %2d ",
- "qdepth", txq->axq_depth);
- len += snprintf(buf + len, size - len, "%s: %2d ",
- "ampdu-depth", txq->axq_ampdu_depth);
- len += snprintf(buf + len, size - len, "%s: %3d ",
- "pending", txq->pending_frames);
- len += snprintf(buf + len, size - len, "%s: %d\n",
- "stopped", txq->stopped);
+ len += scnprintf(buf + len, size - len, "%s: %d ",
+ "qnum", txq->axq_qnum);
+ len += scnprintf(buf + len, size - len, "%s: %2d ",
+ "qdepth", txq->axq_depth);
+ len += scnprintf(buf + len, size - len, "%s: %2d ",
+ "ampdu-depth", txq->axq_ampdu_depth);
+ len += scnprintf(buf + len, size - len, "%s: %3d ",
+ "pending", txq->pending_frames);
+ len += scnprintf(buf + len, size - len, "%s: %d\n",
+ "stopped", txq->stopped);
ath_txq_unlock(sc, txq);
return len;
@@ -687,11 +687,11 @@ static ssize_t read_file_queues(struct file *file, char __user *user_buf,
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
txq = sc->tx.txq_map[i];
- len += snprintf(buf + len, size - len, "(%s): ", qname[i]);
+ len += scnprintf(buf + len, size - len, "(%s): ", qname[i]);
len += print_queue(sc, txq, buf + len, size - len);
}
- len += snprintf(buf + len, size - len, "(CAB): ");
+ len += scnprintf(buf + len, size - len, "(CAB): ");
len += print_queue(sc, sc->beacon.cabq, buf + len, size - len);
if (len > size)
@@ -716,80 +716,82 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
unsigned int reg;
u32 rxfilter;
- len += snprintf(buf + len, sizeof(buf) - len,
- "BSSID: %pM\n", common->curbssid);
- len += snprintf(buf + len, sizeof(buf) - len,
- "BSSID-MASK: %pM\n", common->bssidmask);
- len += snprintf(buf + len, sizeof(buf) - len,
- "OPMODE: %s\n", ath_opmode_to_string(sc->sc_ah->opmode));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "BSSID: %pM\n", common->curbssid);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "BSSID-MASK: %pM\n", common->bssidmask);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "OPMODE: %s\n",
+ ath_opmode_to_string(sc->sc_ah->opmode));
ath9k_ps_wakeup(sc);
rxfilter = ath9k_hw_getrxfilter(sc->sc_ah);
ath9k_ps_restore(sc);
- len += snprintf(buf + len, sizeof(buf) - len,
- "RXFILTER: 0x%x", rxfilter);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "RXFILTER: 0x%x", rxfilter);
if (rxfilter & ATH9K_RX_FILTER_UCAST)
- len += snprintf(buf + len, sizeof(buf) - len, " UCAST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " UCAST");
if (rxfilter & ATH9K_RX_FILTER_MCAST)
- len += snprintf(buf + len, sizeof(buf) - len, " MCAST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " MCAST");
if (rxfilter & ATH9K_RX_FILTER_BCAST)
- len += snprintf(buf + len, sizeof(buf) - len, " BCAST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " BCAST");
if (rxfilter & ATH9K_RX_FILTER_CONTROL)
- len += snprintf(buf + len, sizeof(buf) - len, " CONTROL");
+ len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL");
if (rxfilter & ATH9K_RX_FILTER_BEACON)
- len += snprintf(buf + len, sizeof(buf) - len, " BEACON");
+ len += scnprintf(buf + len, sizeof(buf) - len, " BEACON");
if (rxfilter & ATH9K_RX_FILTER_PROM)
- len += snprintf(buf + len, sizeof(buf) - len, " PROM");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PROM");
if (rxfilter & ATH9K_RX_FILTER_PROBEREQ)
- len += snprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
if (rxfilter & ATH9K_RX_FILTER_PHYERR)
- len += snprintf(buf + len, sizeof(buf) - len, " PHYERR");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PHYERR");
if (rxfilter & ATH9K_RX_FILTER_MYBEACON)
- len += snprintf(buf + len, sizeof(buf) - len, " MYBEACON");
+ len += scnprintf(buf + len, sizeof(buf) - len, " MYBEACON");
if (rxfilter & ATH9K_RX_FILTER_COMP_BAR)
- len += snprintf(buf + len, sizeof(buf) - len, " COMP_BAR");
+ len += scnprintf(buf + len, sizeof(buf) - len, " COMP_BAR");
if (rxfilter & ATH9K_RX_FILTER_PSPOLL)
- len += snprintf(buf + len, sizeof(buf) - len, " PSPOLL");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PSPOLL");
if (rxfilter & ATH9K_RX_FILTER_PHYRADAR)
- len += snprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
if (rxfilter & ATH9K_RX_FILTER_MCAST_BCAST_ALL)
- len += snprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL");
+ len += scnprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL");
if (rxfilter & ATH9K_RX_FILTER_CONTROL_WRAPPER)
- len += snprintf(buf + len, sizeof(buf) - len, " CONTROL_WRAPPER");
+ len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL_WRAPPER");
- len += snprintf(buf + len, sizeof(buf) - len, "\n");
+ len += scnprintf(buf + len, sizeof(buf) - len, "\n");
reg = sc->sc_ah->imask;
- len += snprintf(buf + len, sizeof(buf) - len, "INTERRUPT-MASK: 0x%x", reg);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "INTERRUPT-MASK: 0x%x", reg);
if (reg & ATH9K_INT_SWBA)
- len += snprintf(buf + len, sizeof(buf) - len, " SWBA");
+ len += scnprintf(buf + len, sizeof(buf) - len, " SWBA");
if (reg & ATH9K_INT_BMISS)
- len += snprintf(buf + len, sizeof(buf) - len, " BMISS");
+ len += scnprintf(buf + len, sizeof(buf) - len, " BMISS");
if (reg & ATH9K_INT_CST)
- len += snprintf(buf + len, sizeof(buf) - len, " CST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " CST");
if (reg & ATH9K_INT_RX)
- len += snprintf(buf + len, sizeof(buf) - len, " RX");
+ len += scnprintf(buf + len, sizeof(buf) - len, " RX");
if (reg & ATH9K_INT_RXHP)
- len += snprintf(buf + len, sizeof(buf) - len, " RXHP");
+ len += scnprintf(buf + len, sizeof(buf) - len, " RXHP");
if (reg & ATH9K_INT_RXLP)
- len += snprintf(buf + len, sizeof(buf) - len, " RXLP");
+ len += scnprintf(buf + len, sizeof(buf) - len, " RXLP");
if (reg & ATH9K_INT_BB_WATCHDOG)
- len += snprintf(buf + len, sizeof(buf) - len, " BB_WATCHDOG");
+ len += scnprintf(buf + len, sizeof(buf) - len, " BB_WATCHDOG");
- len += snprintf(buf + len, sizeof(buf) - len, "\n");
+ len += scnprintf(buf + len, sizeof(buf) - len, "\n");
ath9k_calculate_iter_data(hw, NULL, &iter_data);
- len += snprintf(buf + len, sizeof(buf) - len,
- "VIF-COUNTS: AP: %i STA: %i MESH: %i WDS: %i"
- " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
- iter_data.naps, iter_data.nstations, iter_data.nmeshes,
- iter_data.nwds, iter_data.nadhocs,
- sc->nvifs, sc->nbcnvifs);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "VIF-COUNTS: AP: %i STA: %i MESH: %i WDS: %i"
+ " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
+ iter_data.naps, iter_data.nstations, iter_data.nmeshes,
+ iter_data.nwds, iter_data.nadhocs,
+ sc->nvifs, sc->nbcnvifs);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -805,27 +807,27 @@ static ssize_t read_file_reset(struct file *file, char __user *user_buf,
char buf[512];
unsigned int len = 0;
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "Baseband Hang",
- sc->debug.stats.reset[RESET_TYPE_BB_HANG]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "Baseband Watchdog",
- sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "Fatal HW Error",
- sc->debug.stats.reset[RESET_TYPE_FATAL_INT]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "TX HW error",
- sc->debug.stats.reset[RESET_TYPE_TX_ERROR]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "TX Path Hang",
- sc->debug.stats.reset[RESET_TYPE_TX_HANG]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "PLL RX Hang",
- sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "MCI Reset",
- sc->debug.stats.reset[RESET_TYPE_MCI]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "Baseband Hang",
+ sc->debug.stats.reset[RESET_TYPE_BB_HANG]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "Baseband Watchdog",
+ sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "Fatal HW Error",
+ sc->debug.stats.reset[RESET_TYPE_FATAL_INT]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "TX HW error",
+ sc->debug.stats.reset[RESET_TYPE_TX_ERROR]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "TX Path Hang",
+ sc->debug.stats.reset[RESET_TYPE_TX_HANG]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "PLL RX Hang",
+ sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "MCI Reset",
+ sc->debug.stats.reset[RESET_TYPE_MCI]);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -902,14 +904,14 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
#define PHY_ERR(s, p) \
- len += snprintf(buf + len, size - len, "%22s : %10u\n", s, \
- sc->debug.stats.rxstats.phy_err_stats[p]);
+ len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
+ sc->debug.stats.rxstats.phy_err_stats[p]);
#define RXS_ERR(s, e) \
do { \
- len += snprintf(buf + len, size - len, \
- "%22s : %10u\n", s, \
- sc->debug.stats.rxstats.e); \
+ len += scnprintf(buf + len, size - len, \
+ "%22s : %10u\n", s, \
+ sc->debug.stats.rxstats.e);\
} while (0)
struct ath_softc *sc = file->private_data;
@@ -1048,6 +1050,9 @@ static ssize_t write_file_spec_scan_ctl(struct file *file,
char buf[32];
ssize_t len;
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return -EOPNOTSUPP;
+
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
@@ -1439,22 +1444,22 @@ static ssize_t read_file_dump_nfcal(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
- len += snprintf(buf + len, size - len,
- "Channel Noise Floor : %d\n", ah->noise);
- len += snprintf(buf + len, size - len,
- "Chain | privNF | # Readings | NF Readings\n");
+ len += scnprintf(buf + len, size - len,
+ "Channel Noise Floor : %d\n", ah->noise);
+ len += scnprintf(buf + len, size - len,
+ "Chain | privNF | # Readings | NF Readings\n");
for (i = 0; i < NUM_NF_READINGS; i++) {
if (!(chainmask & (1 << i)) ||
((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
continue;
nread = AR_PHY_CCA_FILTERWINDOW_LENGTH - h[i].invalidNFcount;
- len += snprintf(buf + len, size - len, " %d\t %d\t %d\t\t",
- i, h[i].privNF, nread);
+ len += scnprintf(buf + len, size - len, " %d\t %d\t %d\t\t",
+ i, h[i].privNF, nread);
for (j = 0; j < nread; j++)
- len += snprintf(buf + len, size - len,
- " %d", h[i].nfCalBuffer[j]);
- len += snprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len,
+ " %d", h[i].nfCalBuffer[j]);
+ len += scnprintf(buf + len, size - len, "\n");
}
if (len > size)
@@ -1543,8 +1548,8 @@ static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
return -ENOMEM;
if (!sc->sc_ah->common.btcoex_enabled) {
- len = snprintf(buf, size, "%s\n",
- "BTCOEX is disabled");
+ len = scnprintf(buf, size, "%s\n",
+ "BTCOEX is disabled");
goto exit;
}
@@ -1582,43 +1587,43 @@ static ssize_t read_file_node_stat(struct file *file, char __user *user_buf,
return -ENOMEM;
if (!an->sta->ht_cap.ht_supported) {
- len = snprintf(buf, size, "%s\n",
- "HT not supported");
+ len = scnprintf(buf, size, "%s\n",
+ "HT not supported");
goto exit;
}
- len = snprintf(buf, size, "Max-AMPDU: %d\n",
- an->maxampdu);
- len += snprintf(buf + len, size - len, "MPDU Density: %d\n\n",
- an->mpdudensity);
+ len = scnprintf(buf, size, "Max-AMPDU: %d\n",
+ an->maxampdu);
+ len += scnprintf(buf + len, size - len, "MPDU Density: %d\n\n",
+ an->mpdudensity);
- len += snprintf(buf + len, size - len,
- "%2s%7s\n", "AC", "SCHED");
+ len += scnprintf(buf + len, size - len,
+ "%2s%7s\n", "AC", "SCHED");
for (acno = 0, ac = &an->ac[acno];
acno < IEEE80211_NUM_ACS; acno++, ac++) {
txq = ac->txq;
ath_txq_lock(sc, txq);
- len += snprintf(buf + len, size - len,
- "%2d%7d\n",
- acno, ac->sched);
+ len += scnprintf(buf + len, size - len,
+ "%2d%7d\n",
+ acno, ac->sched);
ath_txq_unlock(sc, txq);
}
- len += snprintf(buf + len, size - len,
- "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
- "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
- "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
+ len += scnprintf(buf + len, size - len,
+ "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
+ "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
+ "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
txq = tid->ac->txq;
ath_txq_lock(sc, txq);
- len += snprintf(buf + len, size - len,
- "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
- tid->tidno, tid->seq_start, tid->seq_next,
- tid->baw_size, tid->baw_head, tid->baw_tail,
- tid->bar_index, tid->sched, tid->paused);
+ len += scnprintf(buf + len, size - len,
+ "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
+ tid->tidno, tid->seq_start, tid->seq_next,
+ tid->baw_size, tid->baw_head, tid->baw_tail,
+ tid->bar_index, tid->sched, tid->paused);
ath_txq_unlock(sc, txq);
}
exit:
@@ -1773,6 +1778,111 @@ void ath9k_deinit_debug(struct ath_softc *sc)
}
}
+static ssize_t read_file_tx99(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[3];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->tx99_state);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ char buf[32];
+ bool start;
+ ssize_t len;
+ int r;
+
+ if (sc->nvifs > 1)
+ return -EOPNOTSUPP;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ if (strtobool(buf, &start))
+ return -EINVAL;
+
+ if (start == sc->tx99_state) {
+ if (!start)
+ return count;
+ ath_dbg(common, XMIT, "Resetting TX99\n");
+ ath9k_tx99_deinit(sc);
+ }
+
+ if (!start) {
+ ath9k_tx99_deinit(sc);
+ return count;
+ }
+
+ r = ath9k_tx99_init(sc);
+ if (r)
+ return r;
+
+ return count;
+}
+
+static const struct file_operations fops_tx99 = {
+ .read = read_file_tx99,
+ .write = write_file_tx99,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_tx99_power(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d (%d dBm)\n",
+ sc->tx99_power,
+ sc->tx99_power / 2);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_tx99_power(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ int r;
+ u8 tx_power;
+
+ r = kstrtou8_from_user(user_buf, count, 0, &tx_power);
+ if (r)
+ return r;
+
+ if (tx_power > MAX_RATE_POWER)
+ return -EINVAL;
+
+ sc->tx99_power = tx_power;
+
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_tx99_set_txpower(sc->sc_ah, sc->tx99_power);
+ ath9k_ps_restore(sc);
+
+ return count;
+}
+
+static const struct file_operations fops_tx99_power = {
+ .read = read_file_tx99_power,
+ .write = write_file_tx99_power,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -1864,5 +1974,15 @@ int ath9k_init_debug(struct ath_hw *ah)
debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_btcoex);
#endif
+ if (config_enabled(CONFIG_ATH9K_TX99) &&
+ AR_SREV_9300_20_OR_LATER(ah)) {
+ debugfs_create_file("tx99", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_tx99);
+ debugfs_create_file("tx99_power", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_tx99_power);
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 6e1556fa2f3e..d6e3fa4299a4 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -193,12 +193,12 @@ struct ath_tx_stats {
#define TXSTATS sc->debug.stats.txstats
#define PR(str, elem) \
do { \
- len += snprintf(buf + len, size - len, \
- "%s%13u%11u%10u%10u\n", str, \
- TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem, \
- TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem, \
- TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem, \
- TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
+ len += scnprintf(buf + len, size - len, \
+ "%s%13u%11u%10u%10u\n", str, \
+ TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem,\
+ TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem,\
+ TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem,\
+ TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
} while(0)
#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
diff --git a/drivers/net/wireless/ath/ath9k/dfs.h b/drivers/net/wireless/ath/ath9k/dfs.h
index 3c839f06a06a..c6fa3d5b5d74 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.h
+++ b/drivers/net/wireless/ath/ath9k/dfs.h
@@ -17,7 +17,7 @@
#ifndef ATH9K_DFS_H
#define ATH9K_DFS_H
-#include "dfs_pattern_detector.h"
+#include "../dfs_pattern_detector.h"
#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
/**
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
index 3c6e4138a95d..90b8342d1ed4 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -20,16 +20,16 @@
#include "ath9k.h"
#include "dfs_debug.h"
+#include "../dfs_pattern_detector.h"
-
-struct ath_dfs_pool_stats global_dfs_pool_stats = { 0 };
+static struct ath_dfs_pool_stats dfs_pool_stats = { 0 };
#define ATH9K_DFS_STAT(s, p) \
- len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
- sc->debug.stats.dfs_stats.p);
+ len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \
+ sc->debug.stats.dfs_stats.p);
#define ATH9K_DFS_POOL_STAT(s, p) \
- len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
- global_dfs_pool_stats.p);
+ len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \
+ dfs_pool_stats.p);
static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -44,12 +44,15 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
- len += snprintf(buf + len, size - len, "DFS support for "
- "macVersion = 0x%x, macRev = 0x%x: %s\n",
- hw_ver->macVersion, hw_ver->macRev,
- (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
+ if (sc->dfs_detector)
+ dfs_pool_stats = sc->dfs_detector->get_stats(sc->dfs_detector);
+
+ len += scnprintf(buf + len, size - len, "DFS support for "
+ "macVersion = 0x%x, macRev = 0x%x: %s\n",
+ hw_ver->macVersion, hw_ver->macRev,
+ (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
"enabled" : "disabled");
- len += snprintf(buf + len, size - len, "Pulse detector statistics:\n");
+ len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n");
ATH9K_DFS_STAT("pulse events reported ", pulses_total);
ATH9K_DFS_STAT("invalid pulse events ", pulses_no_dfs);
ATH9K_DFS_STAT("DFS pulses detected ", pulses_detected);
@@ -59,11 +62,12 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
ATH9K_DFS_STAT("Primary channel pulses ", pri_phy_errors);
ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors);
ATH9K_DFS_STAT("Dual channel pulses ", dc_phy_errors);
- len += snprintf(buf + len, size - len, "Radar detector statistics "
- "(current DFS region: %d)\n", sc->dfs_detector->region);
+ len += scnprintf(buf + len, size - len, "Radar detector statistics "
+ "(current DFS region: %d)\n",
+ sc->dfs_detector->region);
ATH9K_DFS_STAT("Pulse events processed ", pulses_processed);
ATH9K_DFS_STAT("Radars detected ", radar_detected);
- len += snprintf(buf + len, size - len, "Global Pool statistics:\n");
+ len += scnprintf(buf + len, size - len, "Global Pool statistics:\n");
ATH9K_DFS_POOL_STAT("Pool references ", pool_reference);
ATH9K_DFS_POOL_STAT("Pulses allocated ", pulse_allocated);
ATH9K_DFS_POOL_STAT("Pulses alloc error ", pulse_alloc_error);
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h
index e36810a4b585..0a7ddf4c88c9 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.h
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h
@@ -51,25 +51,11 @@ struct ath_dfs_stats {
u32 radar_detected;
};
-/**
- * struct ath_dfs_pool_stats - DFS Statistics for global pools
- */
-struct ath_dfs_pool_stats {
- u32 pool_reference;
- u32 pulse_allocated;
- u32 pulse_alloc_error;
- u32 pulse_used;
- u32 pseq_allocated;
- u32 pseq_alloc_error;
- u32 pseq_used;
-};
#if defined(CONFIG_ATH9K_DFS_DEBUGFS)
#define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++)
void ath9k_dfs_init_debug(struct ath_softc *sc);
-#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
-#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
extern struct ath_dfs_pool_stats global_dfs_pool_stats;
#else
@@ -77,8 +63,6 @@ extern struct ath_dfs_pool_stats global_dfs_pool_stats;
#define DFS_STAT_INC(sc, c) do { } while (0)
static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { }
-#define DFS_POOL_STAT_INC(c) do { } while (0)
-#define DFS_POOL_STAT_DEC(c) do { } while (0)
#endif /* CONFIG_ATH9K_DFS_DEBUGFS */
#endif /* ATH9K_DFS_DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 9ea8e4b779c9..b4091716e9b3 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -129,10 +129,10 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
struct base_eep_header_4k *pBase = &eep->baseEepHeader;
if (!dump_base_hdr) {
- len += snprintf(buf + len, size - len,
- "%20s :\n", "2GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
len = ath9k_dump_4k_modal_eeprom(buf, len, size,
- &eep->modalHeader);
+ &eep->modalHeader);
goto out;
}
@@ -160,8 +160,8 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
PR_EEP("TX Gain type", pBase->txGainType);
- len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- pBase->macAddr);
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
out:
if (len > size)
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 3ae1f3df0637..e1d0c217c104 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -125,8 +125,8 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
struct base_eep_ar9287_header *pBase = &eep->baseEepHeader;
if (!dump_base_hdr) {
- len += snprintf(buf + len, size - len,
- "%20s :\n", "2GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
len = ar9287_dump_modal_eeprom(buf, len, size,
&eep->modalHeader);
goto out;
@@ -157,8 +157,8 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Power Table Offset", pBase->pwrTableOffset);
PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
- len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- pBase->macAddr);
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
out:
if (len > size)
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 1c25368b3836..39107e31e79a 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -205,12 +205,12 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
struct base_eep_header *pBase = &eep->baseEepHeader;
if (!dump_base_hdr) {
- len += snprintf(buf + len, size - len,
- "%20s :\n", "2GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
len = ath9k_def_dump_modal_eeprom(buf, len, size,
&eep->modalHeader[0]);
- len += snprintf(buf + len, size - len,
- "%20s :\n", "5GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "5GHz modal Header");
len = ath9k_def_dump_modal_eeprom(buf, len, size,
&eep->modalHeader[1]);
goto out;
@@ -240,8 +240,8 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
- len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- pBase->macAddr);
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
out:
if (len > size)
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 4b412aaf4f36..c34f21241da9 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -522,22 +522,22 @@ static int ath9k_dump_mci_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
ATH_DUMP_BTCOEX("Concurrent Tx", btcoex_hw->mci.concur_tx);
ATH_DUMP_BTCOEX("Concurrent RSSI cnt", btcoex->rssi_count);
- len += snprintf(buf + len, size - len, "BT Weights: ");
+ len += scnprintf(buf + len, size - len, "BT Weights: ");
for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
- len += snprintf(buf + len, size - len, "%08x ",
- btcoex_hw->bt_weight[i]);
- len += snprintf(buf + len, size - len, "\n");
- len += snprintf(buf + len, size - len, "WLAN Weights: ");
+ len += scnprintf(buf + len, size - len, "%08x ",
+ btcoex_hw->bt_weight[i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, "WLAN Weights: ");
for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
- len += snprintf(buf + len, size - len, "%08x ",
- btcoex_hw->wlan_weight[i]);
- len += snprintf(buf + len, size - len, "\n");
- len += snprintf(buf + len, size - len, "Tx Priorities: ");
+ len += scnprintf(buf + len, size - len, "%08x ",
+ btcoex_hw->wlan_weight[i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, "Tx Priorities: ");
for (i = 0; i < ATH_BTCOEX_STOMP_MAX; i++)
- len += snprintf(buf + len, size - len, "%08x ",
+ len += scnprintf(buf + len, size - len, "%08x ",
btcoex_hw->tx_prio[i]);
- len += snprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, "\n");
return len;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index c1b45e2f8481..fb071ee4fcfb 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -37,29 +37,29 @@ static ssize_t read_file_tgt_int_stats(struct file *file, char __user *user_buf,
ath9k_htc_ps_restore(priv);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "RX",
- be32_to_cpu(cmd_rsp.rx));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "RX",
+ be32_to_cpu(cmd_rsp.rx));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "RXORN",
- be32_to_cpu(cmd_rsp.rxorn));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "RXORN",
+ be32_to_cpu(cmd_rsp.rxorn));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "RXEOL",
- be32_to_cpu(cmd_rsp.rxeol));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "RXEOL",
+ be32_to_cpu(cmd_rsp.rxeol));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "TXURN",
- be32_to_cpu(cmd_rsp.txurn));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "TXURN",
+ be32_to_cpu(cmd_rsp.txurn));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "TXTO",
- be32_to_cpu(cmd_rsp.txto));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "TXTO",
+ be32_to_cpu(cmd_rsp.txto));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "CST",
- be32_to_cpu(cmd_rsp.cst));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "CST",
+ be32_to_cpu(cmd_rsp.cst));
if (len > sizeof(buf))
len = sizeof(buf);
@@ -95,41 +95,41 @@ static ssize_t read_file_tgt_tx_stats(struct file *file, char __user *user_buf,
ath9k_htc_ps_restore(priv);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "Xretries",
- be32_to_cpu(cmd_rsp.xretries));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Xretries",
+ be32_to_cpu(cmd_rsp.xretries));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "FifoErr",
- be32_to_cpu(cmd_rsp.fifoerr));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "FifoErr",
+ be32_to_cpu(cmd_rsp.fifoerr));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "Filtered",
- be32_to_cpu(cmd_rsp.filtered));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Filtered",
+ be32_to_cpu(cmd_rsp.filtered));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "TimerExp",
- be32_to_cpu(cmd_rsp.timer_exp));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "TimerExp",
+ be32_to_cpu(cmd_rsp.timer_exp));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "ShortRetries",
- be32_to_cpu(cmd_rsp.shortretries));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "ShortRetries",
+ be32_to_cpu(cmd_rsp.shortretries));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "LongRetries",
- be32_to_cpu(cmd_rsp.longretries));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "LongRetries",
+ be32_to_cpu(cmd_rsp.longretries));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "QueueNull",
- be32_to_cpu(cmd_rsp.qnull));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "QueueNull",
+ be32_to_cpu(cmd_rsp.qnull));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "EncapFail",
- be32_to_cpu(cmd_rsp.encap_fail));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "EncapFail",
+ be32_to_cpu(cmd_rsp.encap_fail));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "NoBuf",
- be32_to_cpu(cmd_rsp.nobuf));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "NoBuf",
+ be32_to_cpu(cmd_rsp.nobuf));
if (len > sizeof(buf))
len = sizeof(buf);
@@ -165,17 +165,17 @@ static ssize_t read_file_tgt_rx_stats(struct file *file, char __user *user_buf,
ath9k_htc_ps_restore(priv);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "NoBuf",
- be32_to_cpu(cmd_rsp.nobuf));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "NoBuf",
+ be32_to_cpu(cmd_rsp.nobuf));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "HostSend",
- be32_to_cpu(cmd_rsp.host_send));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "HostSend",
+ be32_to_cpu(cmd_rsp.host_send));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "HostDone",
- be32_to_cpu(cmd_rsp.host_done));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "HostDone",
+ be32_to_cpu(cmd_rsp.host_done));
if (len > sizeof(buf))
len = sizeof(buf);
@@ -197,37 +197,37 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
char buf[512];
unsigned int len = 0;
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "Buffers queued",
- priv->debug.tx_stats.buf_queued);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "Buffers completed",
- priv->debug.tx_stats.buf_completed);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "SKBs queued",
- priv->debug.tx_stats.skb_queued);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "SKBs success",
- priv->debug.tx_stats.skb_success);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "SKBs failed",
- priv->debug.tx_stats.skb_failed);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "CAB queued",
- priv->debug.tx_stats.cab_queued);
-
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "BE queued",
- priv->debug.tx_stats.queue_stats[IEEE80211_AC_BE]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "BK queued",
- priv->debug.tx_stats.queue_stats[IEEE80211_AC_BK]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "VI queued",
- priv->debug.tx_stats.queue_stats[IEEE80211_AC_VI]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "VO queued",
- priv->debug.tx_stats.queue_stats[IEEE80211_AC_VO]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Buffers queued",
+ priv->debug.tx_stats.buf_queued);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Buffers completed",
+ priv->debug.tx_stats.buf_completed);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs queued",
+ priv->debug.tx_stats.skb_queued);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs success",
+ priv->debug.tx_stats.skb_success);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs failed",
+ priv->debug.tx_stats.skb_failed);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "CAB queued",
+ priv->debug.tx_stats.cab_queued);
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "BE queued",
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_BE]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "BK queued",
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_BK]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "VI queued",
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_VI]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "VO queued",
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_VO]);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -273,8 +273,8 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
#define PHY_ERR(s, p) \
- len += snprintf(buf + len, size - len, "%20s : %10u\n", s, \
- priv->debug.rx_stats.err_phy_stats[p]);
+ len += scnprintf(buf + len, size - len, "%20s : %10u\n", s, \
+ priv->debug.rx_stats.err_phy_stats[p]);
struct ath9k_htc_priv *priv = file->private_data;
char *buf;
@@ -285,37 +285,37 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "SKBs allocated",
- priv->debug.rx_stats.skb_allocated);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "SKBs completed",
- priv->debug.rx_stats.skb_completed);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "SKBs Dropped",
- priv->debug.rx_stats.skb_dropped);
-
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "CRC ERR",
- priv->debug.rx_stats.err_crc);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "DECRYPT CRC ERR",
- priv->debug.rx_stats.err_decrypt_crc);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "MIC ERR",
- priv->debug.rx_stats.err_mic);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "PRE-DELIM CRC ERR",
- priv->debug.rx_stats.err_pre_delim);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "POST-DELIM CRC ERR",
- priv->debug.rx_stats.err_post_delim);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "DECRYPT BUSY ERR",
- priv->debug.rx_stats.err_decrypt_busy);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "TOTAL PHY ERR",
- priv->debug.rx_stats.err_phy);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "SKBs allocated",
+ priv->debug.rx_stats.skb_allocated);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "SKBs completed",
+ priv->debug.rx_stats.skb_completed);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "SKBs Dropped",
+ priv->debug.rx_stats.skb_dropped);
+
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "CRC ERR",
+ priv->debug.rx_stats.err_crc);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "DECRYPT CRC ERR",
+ priv->debug.rx_stats.err_decrypt_crc);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "MIC ERR",
+ priv->debug.rx_stats.err_mic);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "PRE-DELIM CRC ERR",
+ priv->debug.rx_stats.err_pre_delim);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "POST-DELIM CRC ERR",
+ priv->debug.rx_stats.err_post_delim);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "DECRYPT BUSY ERR",
+ priv->debug.rx_stats.err_decrypt_busy);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "TOTAL PHY ERR",
+ priv->debug.rx_stats.err_phy);
PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
@@ -372,16 +372,16 @@ static ssize_t read_file_slot(struct file *file, char __user *user_buf,
spin_lock_bh(&priv->tx.tx_lock);
- len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
+ len += scnprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
len += bitmap_scnprintf(buf + len, sizeof(buf) - len,
priv->tx.tx_slot, MAX_TX_BUF_NUM);
- len += snprintf(buf + len, sizeof(buf) - len, "\n");
+ len += scnprintf(buf + len, sizeof(buf) - len, "\n");
- len += snprintf(buf + len, sizeof(buf) - len,
- "Used slots : %d\n",
- bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "Used slots : %d\n",
+ bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM));
spin_unlock_bh(&priv->tx.tx_lock);
@@ -405,30 +405,30 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
char buf[512];
unsigned int len = 0;
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Cab endpoint", skb_queue_len(&priv->tx.cab_ep_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Cab endpoint", skb_queue_len(&priv->tx.cab_ep_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Data BE endpoint", skb_queue_len(&priv->tx.data_be_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Data BE endpoint", skb_queue_len(&priv->tx.data_be_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Data BK endpoint", skb_queue_len(&priv->tx.data_bk_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Data BK endpoint", skb_queue_len(&priv->tx.data_bk_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Data VI endpoint", skb_queue_len(&priv->tx.data_vi_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Data VI endpoint", skb_queue_len(&priv->tx.data_vi_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Data VO endpoint", skb_queue_len(&priv->tx.data_vo_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Data VO endpoint", skb_queue_len(&priv->tx.data_vo_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Failed queue", skb_queue_len(&priv->tx.tx_failed));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Failed queue", skb_queue_len(&priv->tx.tx_failed));
spin_lock_bh(&priv->tx.tx_lock);
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Queued count", priv->tx.queued_cnt);
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Queued count", priv->tx.queued_cnt);
spin_unlock_bh(&priv->tx.tx_lock);
if (len > sizeof(buf))
@@ -507,70 +507,70 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "Major Version",
- pBase->version >> 12);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "Minor Version",
- pBase->version & 0xFFF);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "Checksum",
- pBase->checksum);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "Length",
- pBase->length);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "RegDomain1",
- pBase->regDmn[0]);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "RegDomain2",
- pBase->regDmn[1]);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "TX Mask", pBase->txMask);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "RX Mask", pBase->rxMask);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Allow 5GHz",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Allow 2GHz",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 2GHz HT20",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 2GHz HT40",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 5Ghz HT20",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 5Ghz HT40",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Big Endian",
- !!(pBase->eepMisc & 0x01));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Cal Bin Major Ver",
- (pBase->binBuildNumber >> 24) & 0xFF);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Cal Bin Minor Ver",
- (pBase->binBuildNumber >> 16) & 0xFF);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Cal Bin Build",
- (pBase->binBuildNumber >> 8) & 0xFF);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "Major Version",
+ pBase->version >> 12);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "Minor Version",
+ pBase->version & 0xFFF);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "Checksum",
+ pBase->checksum);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "Length",
+ pBase->length);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "RegDomain1",
+ pBase->regDmn[0]);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "RegDomain2",
+ pBase->regDmn[1]);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "TX Mask", pBase->txMask);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "RX Mask", pBase->rxMask);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Allow 5GHz",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Allow 2GHz",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Disable 2GHz HT20",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Disable 2GHz HT40",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Disable 5Ghz HT20",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Disable 5Ghz HT40",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Big Endian",
+ !!(pBase->eepMisc & 0x01));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Cal Bin Major Ver",
+ (pBase->binBuildNumber >> 24) & 0xFF);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Cal Bin Minor Ver",
+ (pBase->binBuildNumber >> 16) & 0xFF);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Cal Bin Build",
+ (pBase->binBuildNumber >> 8) & 0xFF);
/*
* UB91 specific data.
@@ -579,10 +579,10 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
struct base_eep_header_4k *pBase4k =
&priv->ah->eeprom.map4k.baseEepHeader;
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "TX Gain type",
- pBase4k->txGainType);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "TX Gain type",
+ pBase4k->txGainType);
}
/*
@@ -592,19 +592,19 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
struct base_eep_ar9287_header *pBase9287 =
&priv->ah->eeprom.map9287.baseEepHeader;
- len += snprintf(buf + len, size - len,
- "%20s : %10ddB\n",
- "Power Table Offset",
- pBase9287->pwrTableOffset);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10ddB\n",
+ "Power Table Offset",
+ pBase9287->pwrTableOffset);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "OpenLoop Power Ctrl",
- pBase9287->openLoopPwrCntl);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "OpenLoop Power Ctrl",
+ pBase9287->openLoopPwrCntl);
}
- len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- pBase->macAddr);
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
if (len > size)
len = size;
@@ -627,8 +627,8 @@ static ssize_t read_4k_modal_eeprom(struct file *file,
{
#define PR_EEP(_s, _val) \
do { \
- len += snprintf(buf + len, size - len, "%20s : %10d\n", \
- _s, (_val)); \
+ len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
+ _s, (_val)); \
} while (0)
struct ath9k_htc_priv *priv = file->private_data;
@@ -708,12 +708,12 @@ static ssize_t read_def_modal_eeprom(struct file *file,
do { \
if (pBase->opCapFlags & AR5416_OPFLAGS_11G) { \
pModal = &priv->ah->eeprom.def.modalHeader[1]; \
- len += snprintf(buf + len, size - len, "%20s : %8d%7s", \
- _s, (_val), "|"); \
+ len += scnprintf(buf + len, size - len, "%20s : %8d%7s", \
+ _s, (_val), "|"); \
} \
if (pBase->opCapFlags & AR5416_OPFLAGS_11A) { \
pModal = &priv->ah->eeprom.def.modalHeader[0]; \
- len += snprintf(buf + len, size - len, "%9d\n", \
+ len += scnprintf(buf + len, size - len, "%9d\n",\
(_val)); \
} \
} while (0)
@@ -729,10 +729,10 @@ static ssize_t read_def_modal_eeprom(struct file *file,
if (buf == NULL)
return -ENOMEM;
- len += snprintf(buf + len, size - len,
- "%31s %15s\n", "2G", "5G");
- len += snprintf(buf + len, size - len,
- "%32s %16s\n", "====", "====\n");
+ len += scnprintf(buf + len, size - len,
+ "%31s %15s\n", "2G", "5G");
+ len += scnprintf(buf + len, size - len,
+ "%32s %16s\n", "====", "====\n");
PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
PR_EEP("Chain1 Ant. Control", pModal->antCtrlChain[1]);
@@ -814,8 +814,8 @@ static ssize_t read_9287_modal_eeprom(struct file *file,
{
#define PR_EEP(_s, _val) \
do { \
- len += snprintf(buf + len, size - len, "%20s : %10d\n", \
- _s, (_val)); \
+ len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
+ _s, (_val)); \
} while (0)
struct ath9k_htc_priv *priv = file->private_data;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index d44258172c0f..9a2657fdd9cc 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -24,30 +24,10 @@
static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
struct ath9k_channel *ichan)
{
- enum htc_phymode mode;
-
- mode = -EINVAL;
-
- switch (ichan->chanmode) {
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- mode = HTC_MODE_11NG;
- break;
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- mode = HTC_MODE_11NA;
- break;
- default:
- break;
- }
+ if (IS_CHAN_5GHZ(ichan))
+ return HTC_MODE_11NA;
- WARN_ON(mode < 0);
-
- return mode;
+ return HTC_MODE_11NG;
}
bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
@@ -926,7 +906,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
WMI_CMD(WMI_FLUSH_RECV_CMDID);
/* setup initial channel */
- init_channel = ath9k_cmn_get_curchannel(hw, ah);
+ init_channel = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
if (ret) {
@@ -1208,9 +1188,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
ath_dbg(common, CONFIG, "Set channel: %d MHz\n",
curchan->center_freq);
- ath9k_cmn_update_ichannel(&priv->ah->channels[pos],
- &hw->conf.chandef);
-
+ ath9k_cmn_get_channel(hw, priv->ah, &hw->conf.chandef);
if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
ath_err(common, "Unable to set channel\n");
ret = -EINVAL;
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 83f4927aeaca..4f9378ddf07f 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -78,6 +78,22 @@ static inline void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf);
}
+static inline void ath9k_hw_tx99_start(struct ath_hw *ah, u32 qnum)
+{
+ ath9k_hw_ops(ah)->tx99_start(ah, qnum);
+}
+
+static inline void ath9k_hw_tx99_stop(struct ath_hw *ah)
+{
+ ath9k_hw_ops(ah)->tx99_stop(ah);
+}
+
+static inline void ath9k_hw_tx99_set_txpower(struct ath_hw *ah, u8 power)
+{
+ if (ath9k_hw_ops(ah)->tx99_set_txpower)
+ ath9k_hw_ops(ah)->tx99_set_txpower(ah, power);
+}
+
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index ecc6ec4a1edb..54b04155e43b 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -130,29 +130,29 @@ void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause)
static void ath9k_hw_set_clockrate(struct ath_hw *ah)
{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_channel *chan = ah->curchan;
unsigned int clockrate;
/* AR9287 v1.3+ uses async FIFO and runs the MAC at 117 MHz */
if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah))
clockrate = 117;
- else if (!ah->curchan) /* should really check for CCK instead */
+ else if (!chan) /* should really check for CCK instead */
clockrate = ATH9K_CLOCK_RATE_CCK;
- else if (conf->chandef.chan->band == IEEE80211_BAND_2GHZ)
+ else if (IS_CHAN_2GHZ(chan))
clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM;
else if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK)
clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
else
clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM;
- if (conf_is_ht40(conf))
+ if (IS_CHAN_HT40(chan))
clockrate *= 2;
if (ah->curchan) {
- if (IS_CHAN_HALF_RATE(ah->curchan))
+ if (IS_CHAN_HALF_RATE(chan))
clockrate /= 2;
- if (IS_CHAN_QUARTER_RATE(ah->curchan))
+ if (IS_CHAN_QUARTER_RATE(chan))
clockrate /= 4;
}
@@ -190,10 +190,7 @@ EXPORT_SYMBOL(ath9k_hw_wait);
void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
int hw_delay)
{
- if (IS_CHAN_B(chan))
- hw_delay = (4 * hw_delay) / 22;
- else
- hw_delay /= 10;
+ hw_delay /= 10;
if (IS_CHAN_HALF_RATE(chan))
hw_delay *= 2;
@@ -294,8 +291,7 @@ void ath9k_hw_get_channel_centers(struct ath_hw *ah,
return;
}
- if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
- (chan->chanmode == CHANNEL_G_HT40PLUS)) {
+ if (IS_CHAN_HT40PLUS(chan)) {
centers->synth_center =
chan->channel + HT40_CHANNEL_CENTER_SHIFT;
extoff = 1;
@@ -549,6 +545,18 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
ath9k_hw_ani_init(ah);
+ /*
+ * EEPROM needs to be initialized before we do this.
+ * This is required for regulatory compliance.
+ */
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ u16 regdmn = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
+ if ((regdmn & 0xF0) == CTL_FCC) {
+ ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ;
+ ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ;
+ }
+ }
+
return 0;
}
@@ -1030,7 +1038,6 @@ static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
void ath9k_hw_init_global_settings(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- struct ieee80211_conf *conf = &common->hw->conf;
const struct ath9k_channel *chan = ah->curchan;
int acktimeout, ctstimeout, ack_offset = 0;
int slottime;
@@ -1105,8 +1112,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
* BA frames in some implementations, but it has been found to fix ACK
* timeout issues in other cases as well.
*/
- if (conf->chandef.chan &&
- conf->chandef.chan->band == IEEE80211_BAND_2GHZ &&
+ if (IS_CHAN_2GHZ(chan) &&
!IS_CHAN_HALF_RATE(chan) && !IS_CHAN_QUARTER_RATE(chan)) {
acktimeout += 64 - sifstime - ah->slottime;
ctstimeout += 48 - sifstime - ah->slottime;
@@ -1148,9 +1154,7 @@ u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan)
{
u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band);
- if (IS_CHAN_B(chan))
- ctl |= CTL_11B;
- else if (IS_CHAN_G(chan))
+ if (IS_CHAN_2GHZ(chan))
ctl |= CTL_11G;
else
ctl |= CTL_11A;
@@ -1498,10 +1502,8 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
int r;
if (pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) {
- u32 cur = ah->curchan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
- u32 new = chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
- band_switch = (cur != new);
- mode_diff = (chan->chanmode != ah->curchan->chanmode);
+ band_switch = IS_CHAN_5GHZ(ah->curchan) != IS_CHAN_5GHZ(chan);
+ mode_diff = (chan->channelFlags != ah->curchan->channelFlags);
}
for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
@@ -1540,9 +1542,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
ath9k_hw_set_clockrate(ah);
ath9k_hw_apply_txpower(ah, chan, false);
- if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
- ath9k_hw_set_delta_slope(ah, chan);
-
+ ath9k_hw_set_delta_slope(ah, chan);
ath9k_hw_spur_mitigate_freq(ah, chan);
if (band_switch || ini_reloaded)
@@ -1644,6 +1644,19 @@ hang_check_iter:
return true;
}
+void ath9k_hw_check_nav(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 val;
+
+ val = REG_READ(ah, AR_NAV);
+ if (val != 0xdeadbeef && val > 0x7fff) {
+ ath_dbg(common, BSTUCK, "Abnormal NAV: 0x%x\n", val);
+ REG_WRITE(ah, AR_NAV, 0);
+ }
+}
+EXPORT_SYMBOL(ath9k_hw_check_nav);
+
bool ath9k_hw_check_alive(struct ath_hw *ah)
{
int count = 50;
@@ -1799,20 +1812,11 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
goto fail;
/*
- * If cross-band fcc is not supoprted, bail out if
- * either channelFlags or chanmode differ.
- *
- * chanmode will be different if the HT operating mode
- * changes because of CSA.
+ * If cross-band fcc is not supoprted, bail out if channelFlags differ.
*/
- if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH)) {
- if ((chan->channelFlags & CHANNEL_ALL) !=
- (ah->curchan->channelFlags & CHANNEL_ALL))
- goto fail;
-
- if (chan->chanmode != ah->curchan->chanmode)
- goto fail;
- }
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) &&
+ chan->channelFlags != ah->curchan->channelFlags)
+ goto fail;
if (!ath9k_hw_check_alive(ah))
goto fail;
@@ -1822,9 +1826,9 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
* re-using are present.
*/
if (AR_SREV_9462(ah) && (ah->caldata &&
- (!ah->caldata->done_txiqcal_once ||
- !ah->caldata->done_txclcal_once ||
- !ah->caldata->rtt_done)))
+ (!test_bit(TXIQCAL_DONE, &ah->caldata->cal_flags) ||
+ !test_bit(TXCLCAL_DONE, &ah->caldata->cal_flags) ||
+ !test_bit(RTT_DONE, &ah->caldata->cal_flags))))
goto fail;
ath_dbg(common, RESET, "FastChannelChange for %d -> %d\n",
@@ -1874,15 +1878,14 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ah->caldata = caldata;
if (caldata && (chan->channel != caldata->channel ||
- chan->channelFlags != caldata->channelFlags ||
- chan->chanmode != caldata->chanmode)) {
+ chan->channelFlags != caldata->channelFlags)) {
/* Operating channel changed, reset channel calibration data */
memset(caldata, 0, sizeof(*caldata));
ath9k_init_nfcal_hist_buffer(ah, chan);
} else if (caldata) {
- caldata->paprd_packet_sent = false;
+ clear_bit(PAPRD_PACKET_SENT, &caldata->cal_flags);
}
- ah->noise = ath9k_hw_getchan_noise(ah, chan);
+ ah->noise = ath9k_hw_getchan_noise(ah, chan, chan->noisefloor);
if (fastcc) {
r = ath9k_hw_do_fastcc(ah, chan);
@@ -1964,9 +1967,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_init_mfp(ah);
- if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
- ath9k_hw_set_delta_slope(ah, chan);
-
+ ath9k_hw_set_delta_slope(ah, chan);
ath9k_hw_spur_mitigate_freq(ah, chan);
ah->eep_ops->set_board_values(ah, chan);
@@ -2017,8 +2018,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_init_bb(ah, chan);
if (caldata) {
- caldata->done_txiqcal_once = false;
- caldata->done_txclcal_once = false;
+ clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
+ clear_bit(TXCLCAL_DONE, &caldata->cal_flags);
}
if (!ath9k_hw_init_cal(ah, chan))
return -EIO;
@@ -2943,12 +2944,11 @@ void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set)
}
EXPORT_SYMBOL(ath9k_hw_set_tsfadjust);
-void ath9k_hw_set11nmac2040(struct ath_hw *ah)
+void ath9k_hw_set11nmac2040(struct ath_hw *ah, struct ath9k_channel *chan)
{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
u32 macmode;
- if (conf_is_ht40(conf) && !ah->config.cwm_ignore_extcca)
+ if (IS_CHAN_HT40(chan) && !ah->config.cwm_ignore_extcca)
macmode = AR_2040_JOINED_RX_CLEAR;
else
macmode = 0;
@@ -3240,19 +3240,19 @@ void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len)
/* chipsets >= AR9280 are single-chip */
if (AR_SREV_9280_20_OR_LATER(ah)) {
- used = snprintf(hw_name, len,
- "Atheros AR%s Rev:%x",
- ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
- ah->hw_version.macRev);
+ used = scnprintf(hw_name, len,
+ "Atheros AR%s Rev:%x",
+ ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
+ ah->hw_version.macRev);
}
else {
- used = snprintf(hw_name, len,
- "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x",
- ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
- ah->hw_version.macRev,
- ath9k_hw_rf_name((ah->hw_version.analog5GhzRev &
- AR_RADIO_SREV_MAJOR)),
- ah->hw_version.phyRev);
+ used = scnprintf(hw_name, len,
+ "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x",
+ ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
+ ah->hw_version.macRev,
+ ath9k_hw_rf_name((ah->hw_version.analog5GhzRev
+ & AR_RADIO_SREV_MAJOR)),
+ ah->hw_version.phyRev);
}
hw_name[used] = '\0';
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 69a907b55a73..9ea24f1cba73 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -98,8 +98,8 @@
#define PR_EEP(_s, _val) \
do { \
- len += snprintf(buf + len, size - len, "%20s : %10d\n", \
- _s, (_val)); \
+ len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
+ _s, (_val)); \
} while (0)
#define SM(_v, _f) (((_v) << _f##_S) & _f)
@@ -369,55 +369,30 @@ enum ath9k_int {
ATH9K_INT_NOCARD = 0xffffffff
};
-#define CHANNEL_CCK 0x00020
-#define CHANNEL_OFDM 0x00040
-#define CHANNEL_2GHZ 0x00080
-#define CHANNEL_5GHZ 0x00100
-#define CHANNEL_PASSIVE 0x00200
-#define CHANNEL_DYN 0x00400
-#define CHANNEL_HALF 0x04000
-#define CHANNEL_QUARTER 0x08000
-#define CHANNEL_HT20 0x10000
-#define CHANNEL_HT40PLUS 0x20000
-#define CHANNEL_HT40MINUS 0x40000
-
-#define CHANNEL_A (CHANNEL_5GHZ|CHANNEL_OFDM)
-#define CHANNEL_B (CHANNEL_2GHZ|CHANNEL_CCK)
-#define CHANNEL_G (CHANNEL_2GHZ|CHANNEL_OFDM)
-#define CHANNEL_G_HT20 (CHANNEL_2GHZ|CHANNEL_HT20)
-#define CHANNEL_A_HT20 (CHANNEL_5GHZ|CHANNEL_HT20)
-#define CHANNEL_G_HT40PLUS (CHANNEL_2GHZ|CHANNEL_HT40PLUS)
-#define CHANNEL_G_HT40MINUS (CHANNEL_2GHZ|CHANNEL_HT40MINUS)
-#define CHANNEL_A_HT40PLUS (CHANNEL_5GHZ|CHANNEL_HT40PLUS)
-#define CHANNEL_A_HT40MINUS (CHANNEL_5GHZ|CHANNEL_HT40MINUS)
-#define CHANNEL_ALL \
- (CHANNEL_OFDM| \
- CHANNEL_CCK| \
- CHANNEL_2GHZ | \
- CHANNEL_5GHZ | \
- CHANNEL_HT20 | \
- CHANNEL_HT40PLUS | \
- CHANNEL_HT40MINUS)
-
#define MAX_RTT_TABLE_ENTRY 6
#define MAX_IQCAL_MEASUREMENT 8
#define MAX_CL_TAB_ENTRY 16
#define CL_TAB_ENTRY(reg_base) (reg_base + (4 * j))
+enum ath9k_cal_flags {
+ RTT_DONE,
+ PAPRD_PACKET_SENT,
+ PAPRD_DONE,
+ NFCAL_PENDING,
+ NFCAL_INTF,
+ TXIQCAL_DONE,
+ TXCLCAL_DONE,
+ SW_PKDET_DONE,
+};
+
struct ath9k_hw_cal_data {
u16 channel;
- u32 channelFlags;
- u32 chanmode;
+ u16 channelFlags;
+ unsigned long cal_flags;
int32_t CalValid;
int8_t iCoff;
int8_t qCoff;
- bool rtt_done;
- bool paprd_packet_sent;
- bool paprd_done;
- bool nfcal_pending;
- bool nfcal_interference;
- bool done_txiqcal_once;
- bool done_txclcal_once;
+ u8 caldac[2];
u16 small_signal_gain[AR9300_MAX_CHAINS];
u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ];
u32 num_measures[AR9300_MAX_CHAINS];
@@ -430,33 +405,34 @@ struct ath9k_hw_cal_data {
struct ath9k_channel {
struct ieee80211_channel *chan;
u16 channel;
- u32 channelFlags;
- u32 chanmode;
+ u16 channelFlags;
s16 noisefloor;
};
-#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
- (((_c)->channelFlags & CHANNEL_G_HT20) == CHANNEL_G_HT20) || \
- (((_c)->channelFlags & CHANNEL_G_HT40PLUS) == CHANNEL_G_HT40PLUS) || \
- (((_c)->channelFlags & CHANNEL_G_HT40MINUS) == CHANNEL_G_HT40MINUS))
-#define IS_CHAN_OFDM(_c) (((_c)->channelFlags & CHANNEL_OFDM) != 0)
-#define IS_CHAN_5GHZ(_c) (((_c)->channelFlags & CHANNEL_5GHZ) != 0)
-#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0)
-#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0)
-#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0)
+#define CHANNEL_5GHZ BIT(0)
+#define CHANNEL_HALF BIT(1)
+#define CHANNEL_QUARTER BIT(2)
+#define CHANNEL_HT BIT(3)
+#define CHANNEL_HT40PLUS BIT(4)
+#define CHANNEL_HT40MINUS BIT(5)
+
+#define IS_CHAN_5GHZ(_c) (!!((_c)->channelFlags & CHANNEL_5GHZ))
+#define IS_CHAN_2GHZ(_c) (!IS_CHAN_5GHZ(_c))
+
+#define IS_CHAN_HALF_RATE(_c) (!!((_c)->channelFlags & CHANNEL_HALF))
+#define IS_CHAN_QUARTER_RATE(_c) (!!((_c)->channelFlags & CHANNEL_QUARTER))
#define IS_CHAN_A_FAST_CLOCK(_ah, _c) \
- ((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \
- ((_ah)->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK))
-
-/* These macros check chanmode and not channelFlags */
-#define IS_CHAN_B(_c) ((_c)->chanmode == CHANNEL_B)
-#define IS_CHAN_HT20(_c) (((_c)->chanmode == CHANNEL_A_HT20) || \
- ((_c)->chanmode == CHANNEL_G_HT20))
-#define IS_CHAN_HT40(_c) (((_c)->chanmode == CHANNEL_A_HT40PLUS) || \
- ((_c)->chanmode == CHANNEL_A_HT40MINUS) || \
- ((_c)->chanmode == CHANNEL_G_HT40PLUS) || \
- ((_c)->chanmode == CHANNEL_G_HT40MINUS))
-#define IS_CHAN_HT(_c) (IS_CHAN_HT20((_c)) || IS_CHAN_HT40((_c)))
+ (IS_CHAN_5GHZ(_c) && ((_ah)->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK))
+
+#define IS_CHAN_HT(_c) ((_c)->channelFlags & CHANNEL_HT)
+
+#define IS_CHAN_HT20(_c) (IS_CHAN_HT(_c) && !IS_CHAN_HT40(_c))
+
+#define IS_CHAN_HT40(_c) \
+ (!!((_c)->channelFlags & (CHANNEL_HT40PLUS | CHANNEL_HT40MINUS)))
+
+#define IS_CHAN_HT40PLUS(_c) ((_c)->channelFlags & CHANNEL_HT40PLUS)
+#define IS_CHAN_HT40MINUS(_c) ((_c)->channelFlags & CHANNEL_HT40MINUS)
enum ath9k_power_mode {
ATH9K_PM_AWAKE = 0,
@@ -558,6 +534,7 @@ struct ath_hw_antcomb_conf {
u8 main_gaintb;
u8 alt_gaintb;
int lna1_lna2_delta;
+ int lna1_lna2_switch_delta;
u8 div_group;
};
@@ -726,6 +703,10 @@ struct ath_hw_ops {
void (*spectral_scan_trigger)(struct ath_hw *ah);
void (*spectral_scan_wait)(struct ath_hw *ah);
+ void (*tx99_start)(struct ath_hw *ah, u32 qnum);
+ void (*tx99_stop)(struct ath_hw *ah);
+ void (*tx99_set_txpower)(struct ath_hw *ah, u8 power);
+
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
#endif
@@ -1026,10 +1007,11 @@ void ath9k_hw_reset_tsf(struct ath_hw *ah);
void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set);
void ath9k_hw_init_global_settings(struct ath_hw *ah);
u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
-void ath9k_hw_set11nmac2040(struct ath_hw *ah);
+void ath9k_hw_set11nmac2040(struct ath_hw *ah, struct ath9k_channel *chan);
void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
const struct ath9k_beacon_state *bs);
+void ath9k_hw_check_nav(struct ath_hw *ah);
bool ath9k_hw_check_alive(struct ath_hw *ah);
bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 9a1f349f9260..e89db64532f5 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -347,7 +347,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
u8 *ds;
- struct ath_buf *bf;
int i, bsize, desc_len;
ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
@@ -399,33 +398,68 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
/* allocate buffers */
- bsize = sizeof(struct ath_buf) * nbuf;
- bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
- if (!bf)
- return -ENOMEM;
+ if (is_tx) {
+ struct ath_buf *bf;
+
+ bsize = sizeof(struct ath_buf) * nbuf;
+ bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
+ if (!bf)
+ return -ENOMEM;
+
+ for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+
+ if (!(sc->sc_ah->caps.hw_caps &
+ ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ /*
+ * Skip descriptor addresses which can cause 4KB
+ * boundary crossing (addr + length) with a 32 dword
+ * descriptor fetch.
+ */
+ while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
+ BUG_ON((caddr_t) bf->bf_desc >=
+ ((caddr_t) dd->dd_desc +
+ dd->dd_desc_len));
+
+ ds += (desc_len * ndesc);
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+ }
+ }
+ list_add_tail(&bf->list, head);
+ }
+ } else {
+ struct ath_rxbuf *bf;
- for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
- bf->bf_desc = ds;
- bf->bf_daddr = DS2PHYS(dd, ds);
-
- if (!(sc->sc_ah->caps.hw_caps &
- ATH9K_HW_CAP_4KB_SPLITTRANS)) {
- /*
- * Skip descriptor addresses which can cause 4KB
- * boundary crossing (addr + length) with a 32 dword
- * descriptor fetch.
- */
- while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
- BUG_ON((caddr_t) bf->bf_desc >=
- ((caddr_t) dd->dd_desc +
- dd->dd_desc_len));
-
- ds += (desc_len * ndesc);
- bf->bf_desc = ds;
- bf->bf_daddr = DS2PHYS(dd, ds);
+ bsize = sizeof(struct ath_rxbuf) * nbuf;
+ bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
+ if (!bf)
+ return -ENOMEM;
+
+ for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+
+ if (!(sc->sc_ah->caps.hw_caps &
+ ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ /*
+ * Skip descriptor addresses which can cause 4KB
+ * boundary crossing (addr + length) with a 32 dword
+ * descriptor fetch.
+ */
+ while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
+ BUG_ON((caddr_t) bf->bf_desc >=
+ ((caddr_t) dd->dd_desc +
+ dd->dd_desc_len));
+
+ ds += (desc_len * ndesc);
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+ }
}
+ list_add_tail(&bf->list, head);
}
- list_add_tail(&bf->list, head);
}
return 0;
}
@@ -437,7 +471,6 @@ static int ath9k_init_queues(struct ath_softc *sc)
sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
- sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
ath_cabq_update(sc);
sc->tx.uapsdq = ath_txq_setup(sc, ATH9K_TX_QUEUE_UAPSD, 0);
@@ -547,6 +580,26 @@ static void ath9k_init_platform(struct ath_softc *sc)
if (sc->driver_data & ATH9K_PCI_CUS217)
ath_info(common, "CUS217 card detected\n");
+ if (sc->driver_data & ATH9K_PCI_CUS252)
+ ath_info(common, "CUS252 card detected\n");
+
+ if (sc->driver_data & ATH9K_PCI_AR9565_1ANT)
+ ath_info(common, "WB335 1-ANT card detected\n");
+
+ if (sc->driver_data & ATH9K_PCI_AR9565_2ANT)
+ ath_info(common, "WB335 2-ANT card detected\n");
+
+ /*
+ * Some WB335 cards do not support antenna diversity. Since
+ * we use a hardcoded value for AR9565 instead of using the
+ * EEPROM/OTP data, remove the combining feature from
+ * the HW capabilities bitmap.
+ */
+ if (sc->driver_data & (ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_AR9565_2ANT)) {
+ if (!(sc->driver_data & ATH9K_PCI_BT_ANT_DIV))
+ pCap->hw_caps &= ~ATH9K_HW_CAP_ANT_DIV_COMB;
+ }
+
if (sc->driver_data & ATH9K_PCI_BT_ANT_DIV) {
pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV;
ath_info(common, "Set BT/WLAN RX diversity capability\n");
@@ -627,7 +680,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
sc->sc_ah = ah;
pCap = &ah->caps;
- sc->dfs_detector = dfs_pattern_detector_init(ah, NL80211_DFS_UNSET);
+ common = ath9k_hw_common(ah);
+ sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET);
+ sc->tx99_power = MAX_RATE_POWER + 1;
if (!pdata) {
ah->ah_flags |= AH_USE_EEPROM;
@@ -641,7 +696,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ah->external_reset = pdata->external_reset;
}
- common = ath9k_hw_common(ah);
common->ops = &ah->reg_ops;
common->bus_ops = bus_ops;
common->ah = ah;
@@ -732,6 +786,7 @@ err_queues:
ath9k_hw_deinit(ah);
err_hw:
ath9k_eeprom_release(sc);
+ dev_kfree_skb_any(sc->tx99_skb);
return ret;
}
@@ -748,7 +803,7 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
chan = &sband->channels[i];
ah->curchan = &ah->channels[chan->hw_value];
cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20);
- ath9k_cmn_update_ichannel(ah->curchan, &chandef);
+ ath9k_cmn_get_channel(sc->hw, ah, &chandef);
ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
}
}
@@ -789,7 +844,6 @@ static const struct ieee80211_iface_limit if_limits[] = {
BIT(NL80211_IFTYPE_P2P_GO) },
};
-
static const struct ieee80211_iface_limit if_dfs_limits[] = {
{ .max = 1, .types = BIT(NL80211_IFTYPE_AP) },
};
@@ -850,17 +904,18 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
- hw->wiphy->interface_modes =
- BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_WDS) |
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_MESH_POINT);
-
- hw->wiphy->iface_combinations = if_comb;
- hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ if (!config_enabled(CONFIG_ATH9K_TX99)) {
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_WDS) |
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+ hw->wiphy->iface_combinations = if_comb;
+ hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ }
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 2f831db396ac..aed7e29dc50f 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -28,6 +28,13 @@ void ath_tx_complete_poll_work(struct work_struct *work)
int i;
bool needreset = false;
+
+ if (sc->tx99_state) {
+ ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
+ "skip tx hung detection on tx99\n");
+ return;
+ }
+
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
txq = sc->tx.txq_map[i];
@@ -70,7 +77,7 @@ void ath_hw_check(struct work_struct *work)
ath9k_ps_wakeup(sc);
is_alive = ath9k_hw_check_alive(sc->sc_ah);
- if (is_alive && !AR_SREV_9300(sc->sc_ah))
+ if ((is_alive && !AR_SREV_9300(sc->sc_ah)) || sc->tx99_state)
goto out;
else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
ath_dbg(common, RESET,
@@ -141,6 +148,9 @@ void ath_hw_pll_work(struct work_struct *work)
if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
return;
+ if (sc->tx99_state)
+ return;
+
ath9k_ps_wakeup(sc);
pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah);
ath9k_ps_restore(sc);
@@ -184,7 +194,7 @@ static void ath_paprd_activate(struct ath_softc *sc)
struct ath9k_hw_cal_data *caldata = ah->caldata;
int chain;
- if (!caldata || !caldata->paprd_done) {
+ if (!caldata || !test_bit(PAPRD_DONE, &caldata->cal_flags)) {
ath_dbg(common, CALIBRATE, "Failed to activate PAPRD\n");
return;
}
@@ -256,7 +266,9 @@ void ath_paprd_calibrate(struct work_struct *work)
int len = 1800;
int ret;
- if (!caldata || !caldata->paprd_packet_sent || caldata->paprd_done) {
+ if (!caldata ||
+ !test_bit(PAPRD_PACKET_SENT, &caldata->cal_flags) ||
+ test_bit(PAPRD_DONE, &caldata->cal_flags)) {
ath_dbg(common, CALIBRATE, "Skipping PAPRD calibration\n");
return;
}
@@ -316,7 +328,7 @@ void ath_paprd_calibrate(struct work_struct *work)
kfree_skb(skb);
if (chain_ok) {
- caldata->paprd_done = true;
+ set_bit(PAPRD_DONE, &caldata->cal_flags);
ath_paprd_activate(sc);
}
@@ -343,7 +355,7 @@ void ath_ani_calibrate(unsigned long data)
u32 cal_interval, short_cal_interval, long_cal_interval;
unsigned long flags;
- if (ah->caldata && ah->caldata->nfcal_interference)
+ if (ah->caldata && test_bit(NFCAL_INTF, &ah->caldata->cal_flags))
long_cal_interval = ATH_LONG_CALINTERVAL_INT;
else
long_cal_interval = ATH_LONG_CALINTERVAL;
@@ -432,7 +444,7 @@ set_timer:
mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
if (ar9003_is_paprd_enabled(ah) && ah->caldata) {
- if (!ah->caldata->paprd_done) {
+ if (!test_bit(PAPRD_DONE, &ah->caldata->cal_flags)) {
ieee80211_queue_work(sc->hw, &sc->paprd_work);
} else if (!ah->paprd_table_write_done) {
ath9k_ps_wakeup(sc);
@@ -516,7 +528,8 @@ void ath_update_survey_nf(struct ath_softc *sc, int channel)
if (chan->noisefloor) {
survey->filled |= SURVEY_INFO_NOISE_DBM;
- survey->noise = ath9k_hw_getchan_noise(ah, chan);
+ survey->noise = ath9k_hw_getchan_noise(ah, chan,
+ chan->noisefloor);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index a3eff0986a3f..6a18f9d3e9cc 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -374,7 +374,6 @@ EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
{
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_channel *chan = ah->curchan;
struct ath9k_tx_queue_info *qi;
u32 cwMin, chanCwMin, value;
@@ -387,10 +386,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q);
if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
- if (chan && IS_CHAN_B(chan))
- chanCwMin = INIT_CWMIN_11B;
- else
- chanCwMin = INIT_CWMIN;
+ chanCwMin = INIT_CWMIN;
for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
} else
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index bfccaceed44e..e3eed81f2439 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -603,8 +603,6 @@ enum ath9k_tx_queue_flags {
#define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001
#define ATH9K_DECOMP_MASK_SIZE 128
-#define ATH9K_READY_TIME_LO_BOUND 50
-#define ATH9K_READY_TIME_HI_BOUND 96
enum ath9k_pkt_type {
ATH9K_PKT_TYPE_NORMAL = 0,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index e4f65900132d..74f452c7b166 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -208,6 +208,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
unsigned long flags;
+ int i;
if (ath_startrecv(sc) != 0) {
ath_err(common, "Unable to restart recv logic\n");
@@ -235,6 +236,15 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
}
work:
ath_restart_work(sc);
+
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (!ATH_TXQ_SETUP(sc, i))
+ continue;
+
+ spin_lock_bh(&sc->tx.txq[i].axq_lock);
+ ath_txq_schedule(sc, &sc->tx.txq[i]);
+ spin_unlock_bh(&sc->tx.txq[i].axq_lock);
+ }
}
ieee80211_wake_queues(sc->hw);
@@ -302,17 +312,91 @@ out:
* by reseting the chip. To accomplish this we must first cleanup any pending
* DMA, then restart stuff.
*/
-static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
- struct ath9k_channel *hchan)
+static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chandef)
{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath9k_channel *hchan;
+ struct ieee80211_channel *chan = chandef->chan;
+ unsigned long flags;
+ bool offchannel;
+ int pos = chan->hw_value;
+ int old_pos = -1;
int r;
if (test_bit(SC_OP_INVALID, &sc->sc_flags))
return -EIO;
+ offchannel = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
+
+ if (ah->curchan)
+ old_pos = ah->curchan - &ah->channels[0];
+
+ ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
+ chan->center_freq, chandef->width);
+
+ /* update survey stats for the old channel before switching */
+ spin_lock_irqsave(&common->cc_lock, flags);
+ ath_update_survey_stats(sc);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
+
+ ath9k_cmn_get_channel(hw, ah, chandef);
+
+ /*
+ * If the operating channel changes, change the survey in-use flags
+ * along with it.
+ * Reset the survey data for the new channel, unless we're switching
+ * back to the operating channel from an off-channel operation.
+ */
+ if (!offchannel && sc->cur_survey != &sc->survey[pos]) {
+ if (sc->cur_survey)
+ sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
+
+ sc->cur_survey = &sc->survey[pos];
+
+ memset(sc->cur_survey, 0, sizeof(struct survey_info));
+ sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
+ } else if (!(sc->survey[pos].filled & SURVEY_INFO_IN_USE)) {
+ memset(&sc->survey[pos], 0, sizeof(struct survey_info));
+ }
+
+ hchan = &sc->sc_ah->channels[pos];
r = ath_reset_internal(sc, hchan);
+ if (r)
+ return r;
- return r;
+ /*
+ * The most recent snapshot of channel->noisefloor for the old
+ * channel is only available after the hardware reset. Copy it to
+ * the survey stats now.
+ */
+ if (old_pos >= 0)
+ ath_update_survey_nf(sc, old_pos);
+
+ /*
+ * Enable radar pulse detection if on a DFS channel. Spectral
+ * scanning and radar detection can not be used concurrently.
+ */
+ if (hw->conf.radar_enabled) {
+ u32 rxfilter;
+
+ /* set HW specific DFS configuration */
+ ath9k_hw_set_radar_params(ah);
+ rxfilter = ath9k_hw_getrxfilter(ah);
+ rxfilter |= ATH9K_RX_FILTER_PHYRADAR |
+ ATH9K_RX_FILTER_PHYERR;
+ ath9k_hw_setrxfilter(ah, rxfilter);
+ ath_dbg(common, DFS, "DFS enabled at freq %d\n",
+ chan->center_freq);
+ } else {
+ /* perform spectral scan if requested. */
+ if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
+ sc->spectral_mode == SPECTRAL_CHANSCAN)
+ ath9k_spectral_scan_trigger(hw);
+ }
+
+ return 0;
}
static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -362,6 +446,13 @@ void ath9k_tasklet(unsigned long data)
type = RESET_TYPE_BB_WATCHDOG;
ath9k_queue_reset(sc, type);
+
+ /*
+ * Increment the ref. counter here so that
+ * interrupts are enabled in the reset routine.
+ */
+ atomic_inc(&ah->intr_ref_cnt);
+ ath_dbg(common, ANY, "FATAL: Skipping interrupts\n");
goto out;
}
@@ -400,10 +491,9 @@ void ath9k_tasklet(unsigned long data)
ath9k_btcoex_handle_interrupt(sc, status);
-out:
/* re-enable hardware interrupt */
ath9k_hw_enable_interrupts(ah);
-
+out:
spin_unlock(&sc->sc_pcu_lock);
ath9k_ps_restore(sc);
}
@@ -539,21 +629,10 @@ chip_reset:
static int ath_reset(struct ath_softc *sc)
{
- int i, r;
+ int r;
ath9k_ps_wakeup(sc);
-
r = ath_reset_internal(sc, NULL);
-
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
- if (!ATH_TXQ_SETUP(sc, i))
- continue;
-
- spin_lock_bh(&sc->tx.txq[i].axq_lock);
- ath_txq_schedule(sc, &sc->tx.txq[i]);
- spin_unlock_bh(&sc->tx.txq[i].axq_lock);
- }
-
ath9k_ps_restore(sc);
return r;
@@ -595,7 +674,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
- init_channel = ath9k_cmn_get_curchannel(hw, ah);
+ init_channel = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
/* Reset SERDES registers */
ath9k_hw_configpcipowersave(ah, false);
@@ -798,7 +877,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
}
if (!ah->curchan)
- ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
+ ah->curchan = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
ath9k_hw_phy_disable(ah);
@@ -817,7 +896,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath_dbg(common, CONFIG, "Driver halt\n");
}
-bool ath9k_uses_beacons(int type)
+static bool ath9k_uses_beacons(int type)
{
switch (type) {
case NL80211_IFTYPE_AP:
@@ -967,6 +1046,14 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
+ if (config_enabled(CONFIG_ATH9K_TX99)) {
+ if (sc->nvifs >= 1) {
+ mutex_unlock(&sc->mutex);
+ return -EOPNOTSUPP;
+ }
+ sc->tx99_vif = vif;
+ }
+
ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
sc->nvifs++;
@@ -995,9 +1082,15 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- ath_dbg(common, CONFIG, "Change Interface\n");
mutex_lock(&sc->mutex);
+ if (config_enabled(CONFIG_ATH9K_TX99)) {
+ mutex_unlock(&sc->mutex);
+ return -EOPNOTSUPP;
+ }
+
+ ath_dbg(common, CONFIG, "Change Interface\n");
+
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_remove_slot(sc, vif);
@@ -1027,6 +1120,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
sc->nvifs--;
+ sc->tx99_vif = NULL;
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_remove_slot(sc, vif);
@@ -1048,6 +1142,9 @@ static void ath9k_enable_ps(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return;
+
sc->ps_enabled = true;
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) {
@@ -1064,6 +1161,9 @@ static void ath9k_disable_ps(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return;
+
sc->ps_enabled = false;
ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
@@ -1087,6 +1187,9 @@ void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw)
struct ath_common *common = ath9k_hw_common(ah);
u32 rxfilter;
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return;
+
if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
ath_err(common, "spectrum analyzer not implemented on this hardware\n");
return;
@@ -1202,81 +1305,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
}
if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) {
- struct ieee80211_channel *curchan = hw->conf.chandef.chan;
- int pos = curchan->hw_value;
- int old_pos = -1;
- unsigned long flags;
-
- if (ah->curchan)
- old_pos = ah->curchan - &ah->channels[0];
-
- ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
- curchan->center_freq, hw->conf.chandef.width);
-
- /* update survey stats for the old channel before switching */
- spin_lock_irqsave(&common->cc_lock, flags);
- ath_update_survey_stats(sc);
- spin_unlock_irqrestore(&common->cc_lock, flags);
-
- ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
- &conf->chandef);
-
- /*
- * If the operating channel changes, change the survey in-use flags
- * along with it.
- * Reset the survey data for the new channel, unless we're switching
- * back to the operating channel from an off-channel operation.
- */
- if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) &&
- sc->cur_survey != &sc->survey[pos]) {
-
- if (sc->cur_survey)
- sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
-
- sc->cur_survey = &sc->survey[pos];
-
- memset(sc->cur_survey, 0, sizeof(struct survey_info));
- sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
- } else if (!(sc->survey[pos].filled & SURVEY_INFO_IN_USE)) {
- memset(&sc->survey[pos], 0, sizeof(struct survey_info));
- }
-
- if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
+ if (ath_set_channel(sc, &hw->conf.chandef) < 0) {
ath_err(common, "Unable to set channel\n");
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
return -EINVAL;
}
-
- /*
- * The most recent snapshot of channel->noisefloor for the old
- * channel is only available after the hardware reset. Copy it to
- * the survey stats now.
- */
- if (old_pos >= 0)
- ath_update_survey_nf(sc, old_pos);
-
- /*
- * Enable radar pulse detection if on a DFS channel. Spectral
- * scanning and radar detection can not be used concurrently.
- */
- if (hw->conf.radar_enabled) {
- u32 rxfilter;
-
- /* set HW specific DFS configuration */
- ath9k_hw_set_radar_params(ah);
- rxfilter = ath9k_hw_getrxfilter(ah);
- rxfilter |= ATH9K_RX_FILTER_PHYRADAR |
- ATH9K_RX_FILTER_PHYERR;
- ath9k_hw_setrxfilter(ah, rxfilter);
- ath_dbg(common, DFS, "DFS enabled at freq %d\n",
- curchan->center_freq);
- } else {
- /* perform spectral scan if requested. */
- if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
- sc->spectral_mode == SPECTRAL_CHANSCAN)
- ath9k_spectral_scan_trigger(hw);
- }
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
@@ -1735,6 +1769,9 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
unsigned long flags;
int pos;
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return -EOPNOTSUPP;
+
spin_lock_irqsave(&common->cc_lock, flags);
if (idx == 0)
ath_update_survey_stats(sc);
@@ -1767,6 +1804,9 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return;
+
mutex_lock(&sc->mutex);
ah->coverage_class = coverage_class;
@@ -2333,6 +2373,134 @@ static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
sc->csa_vif = vif;
}
+static void ath9k_tx99_stop(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ ath_drain_all_txq(sc);
+ ath_startrecv(sc);
+
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
+
+ ieee80211_wake_queues(sc->hw);
+
+ kfree_skb(sc->tx99_skb);
+ sc->tx99_skb = NULL;
+ sc->tx99_state = false;
+
+ ath9k_hw_tx99_stop(sc->sc_ah);
+ ath_dbg(common, XMIT, "TX99 stopped\n");
+}
+
+static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
+{
+ static u8 PN9Data[] = {0xff, 0x87, 0xb8, 0x59, 0xb7, 0xa1, 0xcc, 0x24,
+ 0x57, 0x5e, 0x4b, 0x9c, 0x0e, 0xe9, 0xea, 0x50,
+ 0x2a, 0xbe, 0xb4, 0x1b, 0xb6, 0xb0, 0x5d, 0xf1,
+ 0xe6, 0x9a, 0xe3, 0x45, 0xfd, 0x2c, 0x53, 0x18,
+ 0x0c, 0xca, 0xc9, 0xfb, 0x49, 0x37, 0xe5, 0xa8,
+ 0x51, 0x3b, 0x2f, 0x61, 0xaa, 0x72, 0x18, 0x84,
+ 0x02, 0x23, 0x23, 0xab, 0x63, 0x89, 0x51, 0xb3,
+ 0xe7, 0x8b, 0x72, 0x90, 0x4c, 0xe8, 0xfb, 0xc0};
+ u32 len = 1200;
+ struct ieee80211_hw *hw = sc->hw;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *tx_info;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_put(skb, len);
+
+ memset(skb->data, 0, len);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA);
+ hdr->duration_id = 0;
+
+ memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
+
+ hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
+
+ tx_info = IEEE80211_SKB_CB(skb);
+ memset(tx_info, 0, sizeof(*tx_info));
+ tx_info->band = hw->conf.chandef.chan->band;
+ tx_info->flags = IEEE80211_TX_CTL_NO_ACK;
+ tx_info->control.vif = sc->tx99_vif;
+
+ memcpy(skb->data + sizeof(*hdr), PN9Data, sizeof(PN9Data));
+
+ return skb;
+}
+
+void ath9k_tx99_deinit(struct ath_softc *sc)
+{
+ ath_reset(sc);
+
+ ath9k_ps_wakeup(sc);
+ ath9k_tx99_stop(sc);
+ ath9k_ps_restore(sc);
+}
+
+int ath9k_tx99_init(struct ath_softc *sc)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_tx_control txctl;
+ int r;
+
+ if (sc->sc_flags & SC_OP_INVALID) {
+ ath_err(common,
+ "driver is in invalid state unable to use TX99");
+ return -EINVAL;
+ }
+
+ sc->tx99_skb = ath9k_build_tx99_skb(sc);
+ if (!sc->tx99_skb)
+ return -ENOMEM;
+
+ memset(&txctl, 0, sizeof(txctl));
+ txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
+
+ ath_reset(sc);
+
+ ath9k_ps_wakeup(sc);
+
+ ath9k_hw_disable_interrupts(ah);
+ atomic_set(&ah->intr_ref_cnt, -1);
+ ath_drain_all_txq(sc);
+ ath_stoprecv(sc);
+
+ sc->tx99_state = true;
+
+ ieee80211_stop_queues(hw);
+
+ if (sc->tx99_power == MAX_RATE_POWER + 1)
+ sc->tx99_power = MAX_RATE_POWER;
+
+ ath9k_hw_tx99_set_txpower(ah, sc->tx99_power);
+ r = ath9k_tx99_send(sc, sc->tx99_skb, &txctl);
+ if (r) {
+ ath_dbg(common, XMIT, "Failed to xmit TX99 skb\n");
+ return r;
+ }
+
+ ath_dbg(common, XMIT, "TX99 xmit started using %d ( %ddBm)\n",
+ sc->tx99_power,
+ sc->tx99_power / 2);
+
+ /* We leave the harware awake as it will be chugging on */
+
+ return 0;
+}
+
struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx,
.start = ath9k_start,
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 815bee21c19a..0ac1b5f04256 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -661,9 +661,9 @@ void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all)
chan_start = wlan_chan - 10;
chan_end = wlan_chan + 10;
- if (chan->chanmode == CHANNEL_G_HT40PLUS)
+ if (IS_CHAN_HT40PLUS(chan))
chan_end += 20;
- else if (chan->chanmode == CHANNEL_G_HT40MINUS)
+ else if (IS_CHAN_HT40MINUS(chan))
chan_start -= 20;
/* adjust side band */
@@ -707,11 +707,11 @@ void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
if (setchannel) {
struct ath9k_hw_cal_data *caldata = &sc->caldata;
- if ((caldata->chanmode == CHANNEL_G_HT40PLUS) &&
+ if (IS_CHAN_HT40PLUS(ah->curchan) &&
(ah->curchan->channel > caldata->channel) &&
(ah->curchan->channel <= caldata->channel + 20))
return;
- if ((caldata->chanmode == CHANNEL_G_HT40MINUS) &&
+ if (IS_CHAN_HT40MINUS(ah->curchan) &&
(ah->curchan->channel < caldata->channel) &&
(ah->curchan->channel >= caldata->channel - 20))
return;
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index d089a7cf01c4..7e4c2524b630 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -269,7 +269,200 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
{ PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
- { PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E AR9565 */
+
+ /* CUS252 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3028),
+ .driver_data = ATH9K_PCI_CUS252 |
+ ATH9K_PCI_AR9565_2ANT |
+ ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2176),
+ .driver_data = ATH9K_PCI_CUS252 |
+ ATH9K_PCI_AR9565_2ANT |
+ ATH9K_PCI_BT_ANT_DIV },
+
+ /* WB335 1-ANT */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE068),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0xA119),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0632),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x6671),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x2811),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x2812),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+
+ /* WB335 1-ANT / Antenna Diversity */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3025),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3026),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x302B),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE069),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0x3028),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0622),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0672),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0662),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x213A),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_LENOVO,
+ 0x3026),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_HP,
+ 0x18E3),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_HP,
+ 0x217F),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_DELL,
+ 0x020E),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+
+ /* WB335 2-ANT */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411A),
+ .driver_data = ATH9K_PCI_AR9565_2ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411B),
+ .driver_data = ATH9K_PCI_AR9565_2ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411C),
+ .driver_data = ATH9K_PCI_AR9565_2ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411D),
+ .driver_data = ATH9K_PCI_AR9565_2ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411E),
+ .driver_data = ATH9K_PCI_AR9565_2ANT },
+
+ /* WB335 2-ANT / Antenna-Diversity */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3027),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x302C),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0642),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0652),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0612),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2130),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x144F, /* ASKEY */
+ 0x7202),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x2810),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0x3027),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+
+ /* PCI-E AR9565 (WB335) */
+ { PCI_VDEVICE(ATHEROS, 0x0036),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+
{ 0 }
};
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index d3d7c51fa6c8..d829bb62a3fc 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1387,31 +1387,31 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
int used_mcs = 0, used_htmode = 0;
if (WLAN_RC_PHY_HT(rc->rate_table->info[i].phy)) {
- used_mcs = snprintf(mcs, 5, "%d",
- rc->rate_table->info[i].ratecode);
+ used_mcs = scnprintf(mcs, 5, "%d",
+ rc->rate_table->info[i].ratecode);
if (WLAN_RC_PHY_40(rc->rate_table->info[i].phy))
- used_htmode = snprintf(htmode, 5, "HT40");
+ used_htmode = scnprintf(htmode, 5, "HT40");
else if (WLAN_RC_PHY_20(rc->rate_table->info[i].phy))
- used_htmode = snprintf(htmode, 5, "HT20");
+ used_htmode = scnprintf(htmode, 5, "HT20");
else
- used_htmode = snprintf(htmode, 5, "????");
+ used_htmode = scnprintf(htmode, 5, "????");
}
mcs[used_mcs] = '\0';
htmode[used_htmode] = '\0';
- len += snprintf(buf + len, max - len,
- "%6s %6s %3u.%d: "
- "%10u %10u %10u %10u\n",
- htmode,
- mcs,
- ratekbps / 1000,
- (ratekbps % 1000) / 100,
- stats->success,
- stats->retries,
- stats->xretries,
- stats->per);
+ len += scnprintf(buf + len, max - len,
+ "%6s %6s %3u.%d: "
+ "%10u %10u %10u %10u\n",
+ htmode,
+ mcs,
+ ratekbps / 1000,
+ (ratekbps % 1000) / 100,
+ stats->success,
+ stats->retries,
+ stats->xretries,
+ stats->per);
}
if (len > max)
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index ab9e3a8410bc..95ddca5495d4 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -19,7 +19,7 @@
#include "ath9k.h"
#include "ar9003_mac.h"
-#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
+#define SKB_CB_ATHBUF(__skb) (*((struct ath_rxbuf **)__skb->cb))
static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
{
@@ -35,7 +35,7 @@ static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
* buffer (or rx fifo). This can incorrectly acknowledge packets
* to a sender if last desc is self-linked.
*/
-static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
+static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -68,7 +68,7 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
sc->rx.rxlink = &ds->ds_link;
}
-static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
+static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf)
{
if (sc->rx.buf_hold)
ath_rx_buf_link(sc, sc->rx.buf_hold);
@@ -112,13 +112,13 @@ static bool ath_rx_edma_buf_link(struct ath_softc *sc,
struct ath_hw *ah = sc->sc_ah;
struct ath_rx_edma *rx_edma;
struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
rx_edma = &sc->rx.rx_edma[qtype];
if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
return false;
- bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
list_del_init(&bf->list);
skb = bf->bf_mpdu;
@@ -138,7 +138,7 @@ static void ath_rx_addbuffer_edma(struct ath_softc *sc,
enum ath9k_rx_qtype qtype)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_buf *bf, *tbf;
+ struct ath_rxbuf *bf, *tbf;
if (list_empty(&sc->rx.rxbuf)) {
ath_dbg(common, QUEUE, "No free rx buf available\n");
@@ -154,7 +154,7 @@ static void ath_rx_addbuffer_edma(struct ath_softc *sc,
static void ath_rx_remove_buffer(struct ath_softc *sc,
enum ath9k_rx_qtype qtype)
{
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
struct ath_rx_edma *rx_edma;
struct sk_buff *skb;
@@ -171,7 +171,7 @@ static void ath_rx_edma_cleanup(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
@@ -199,7 +199,7 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_hw *ah = sc->sc_ah;
struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
int error = 0, i;
u32 size;
@@ -211,7 +211,7 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
ah->caps.rx_hp_qdepth);
- size = sizeof(struct ath_buf) * nbufs;
+ size = sizeof(struct ath_rxbuf) * nbufs;
bf = devm_kzalloc(sc->dev, size, GFP_KERNEL);
if (!bf)
return -ENOMEM;
@@ -271,7 +271,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
int error = 0;
spin_lock_init(&sc->sc_pcu_lock);
@@ -332,7 +332,7 @@ void ath_rx_cleanup(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
ath_rx_edma_cleanup(sc);
@@ -375,6 +375,9 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
{
u32 rfilt;
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return 0;
+
rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
| ATH9K_RX_FILTER_MCAST;
@@ -427,7 +430,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
int ath_startrecv(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
- struct ath_buf *bf, *tbf;
+ struct ath_rxbuf *bf, *tbf;
if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
ath_edma_start_recv(sc);
@@ -447,7 +450,7 @@ int ath_startrecv(struct ath_softc *sc)
if (list_empty(&sc->rx.rxbuf))
goto start_recv;
- bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
ath9k_hw_putrxbuf(ah, bf->bf_daddr);
ath9k_hw_rxena(ah);
@@ -603,13 +606,13 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
static bool ath_edma_get_buffers(struct ath_softc *sc,
enum ath9k_rx_qtype qtype,
struct ath_rx_status *rs,
- struct ath_buf **dest)
+ struct ath_rxbuf **dest)
{
struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
int ret;
skb = skb_peek(&rx_edma->rx_fifo);
@@ -653,11 +656,11 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
return true;
}
-static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
+static struct ath_rxbuf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
struct ath_rx_status *rs,
enum ath9k_rx_qtype qtype)
{
- struct ath_buf *bf = NULL;
+ struct ath_rxbuf *bf = NULL;
while (ath_edma_get_buffers(sc, qtype, rs, &bf)) {
if (!bf)
@@ -668,13 +671,13 @@ static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
return NULL;
}
-static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
+static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
struct ath_rx_status *rs)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_desc *ds;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
int ret;
if (list_empty(&sc->rx.rxbuf)) {
@@ -682,7 +685,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
return NULL;
}
- bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
if (bf == sc->rx.buf_hold)
return NULL;
@@ -702,7 +705,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
ret = ath9k_hw_rxprocdesc(ah, ds, rs);
if (ret == -EINPROGRESS) {
struct ath_rx_status trs;
- struct ath_buf *tbf;
+ struct ath_rxbuf *tbf;
struct ath_desc *tds;
memset(&trs, 0, sizeof(trs));
@@ -711,7 +714,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
return NULL;
}
- tbf = list_entry(bf->list.next, struct ath_buf, list);
+ tbf = list_entry(bf->list.next, struct ath_rxbuf, list);
/*
* On some hardware the descriptor status words could
@@ -972,14 +975,15 @@ static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
{
#ifdef CONFIG_ATH9K_DEBUGFS
struct ath_hw *ah = sc->sc_ah;
- u8 bins[SPECTRAL_HT20_NUM_BINS];
- u8 *vdata = (u8 *)hdr;
- struct fft_sample_ht20 fft_sample;
+ u8 num_bins, *bins, *vdata = (u8 *)hdr;
+ struct fft_sample_ht20 fft_sample_20;
+ struct fft_sample_ht20_40 fft_sample_40;
+ struct fft_sample_tlv *tlv;
struct ath_radar_info *radar_info;
- struct ath_ht20_mag_info *mag_info;
int len = rs->rs_datalen;
int dc_pos;
- u16 length, max_magnitude;
+ u16 fft_len, length, freq = ah->curchan->chan->center_freq;
+ enum nl80211_channel_type chan_type;
/* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
* via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
@@ -997,45 +1001,44 @@ static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
return 0;
- /* Variation in the data length is possible and will be fixed later.
- * Note that we only support HT20 for now.
- *
- * TODO: add HT20_40 support as well.
- */
- if ((len > SPECTRAL_HT20_TOTAL_DATA_LEN + 2) ||
- (len < SPECTRAL_HT20_TOTAL_DATA_LEN - 1))
- return 1;
-
- fft_sample.tlv.type = ATH_FFT_SAMPLE_HT20;
- length = sizeof(fft_sample) - sizeof(fft_sample.tlv);
- fft_sample.tlv.length = __cpu_to_be16(length);
+ chan_type = cfg80211_get_chandef_type(&sc->hw->conf.chandef);
+ if ((chan_type == NL80211_CHAN_HT40MINUS) ||
+ (chan_type == NL80211_CHAN_HT40PLUS)) {
+ fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
+ num_bins = SPECTRAL_HT20_40_NUM_BINS;
+ bins = (u8 *)fft_sample_40.data;
+ } else {
+ fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN;
+ num_bins = SPECTRAL_HT20_NUM_BINS;
+ bins = (u8 *)fft_sample_20.data;
+ }
- fft_sample.freq = __cpu_to_be16(ah->curchan->chan->center_freq);
- fft_sample.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
- fft_sample.noise = ah->noise;
+ /* Variation in the data length is possible and will be fixed later */
+ if ((len > fft_len + 2) || (len < fft_len - 1))
+ return 1;
- switch (len - SPECTRAL_HT20_TOTAL_DATA_LEN) {
+ switch (len - fft_len) {
case 0:
/* length correct, nothing to do. */
- memcpy(bins, vdata, SPECTRAL_HT20_NUM_BINS);
+ memcpy(bins, vdata, num_bins);
break;
case -1:
/* first byte missing, duplicate it. */
- memcpy(&bins[1], vdata, SPECTRAL_HT20_NUM_BINS - 1);
+ memcpy(&bins[1], vdata, num_bins - 1);
bins[0] = vdata[0];
break;
case 2:
/* MAC added 2 extra bytes at bin 30 and 32, remove them. */
memcpy(bins, vdata, 30);
bins[30] = vdata[31];
- memcpy(&bins[31], &vdata[33], SPECTRAL_HT20_NUM_BINS - 31);
+ memcpy(&bins[31], &vdata[33], num_bins - 31);
break;
case 1:
/* MAC added 2 extra bytes AND first byte is missing. */
bins[0] = vdata[0];
- memcpy(&bins[0], vdata, 30);
+ memcpy(&bins[1], vdata, 30);
bins[31] = vdata[31];
- memcpy(&bins[32], &vdata[33], SPECTRAL_HT20_NUM_BINS - 32);
+ memcpy(&bins[32], &vdata[33], num_bins - 32);
break;
default:
return 1;
@@ -1044,23 +1047,93 @@ static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
/* DC value (value in the middle) is the blind spot of the spectral
* sample and invalid, interpolate it.
*/
- dc_pos = SPECTRAL_HT20_NUM_BINS / 2;
+ dc_pos = num_bins / 2;
bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
- /* mag data is at the end of the frame, in front of radar_info */
- mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
+ if ((chan_type == NL80211_CHAN_HT40MINUS) ||
+ (chan_type == NL80211_CHAN_HT40PLUS)) {
+ s8 lower_rssi, upper_rssi;
+ s16 ext_nf;
+ u8 lower_max_index, upper_max_index;
+ u8 lower_bitmap_w, upper_bitmap_w;
+ u16 lower_mag, upper_mag;
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ struct ath_ht20_40_mag_info *mag_info;
+
+ if (caldata)
+ ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
+ caldata->nfCalHist[3].privNF);
+ else
+ ext_nf = ATH_DEFAULT_NOISE_FLOOR;
+
+ length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
+ fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
+ fft_sample_40.tlv.length = __cpu_to_be16(length);
+ fft_sample_40.freq = __cpu_to_be16(freq);
+ fft_sample_40.channel_type = chan_type;
+
+ if (chan_type == NL80211_CHAN_HT40PLUS) {
+ lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
+ upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0);
- /* copy raw bins without scaling them */
- memcpy(fft_sample.data, bins, SPECTRAL_HT20_NUM_BINS);
- fft_sample.max_exp = mag_info->max_exp & 0xf;
+ fft_sample_40.lower_noise = ah->noise;
+ fft_sample_40.upper_noise = ext_nf;
+ } else {
+ lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0);
+ upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
- max_magnitude = spectral_max_magnitude(mag_info->all_bins);
- fft_sample.max_magnitude = __cpu_to_be16(max_magnitude);
- fft_sample.max_index = spectral_max_index(mag_info->all_bins);
- fft_sample.bitmap_weight = spectral_bitmap_weight(mag_info->all_bins);
- fft_sample.tsf = __cpu_to_be64(tsf);
+ fft_sample_40.lower_noise = ext_nf;
+ fft_sample_40.upper_noise = ah->noise;
+ }
+ fft_sample_40.lower_rssi = lower_rssi;
+ fft_sample_40.upper_rssi = upper_rssi;
+
+ mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1;
+ lower_mag = spectral_max_magnitude(mag_info->lower_bins);
+ upper_mag = spectral_max_magnitude(mag_info->upper_bins);
+ fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
+ fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
+ lower_max_index = spectral_max_index(mag_info->lower_bins);
+ upper_max_index = spectral_max_index(mag_info->upper_bins);
+ fft_sample_40.lower_max_index = lower_max_index;
+ fft_sample_40.upper_max_index = upper_max_index;
+ lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
+ upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
+ fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
+ fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
+ fft_sample_40.max_exp = mag_info->max_exp & 0xf;
+
+ fft_sample_40.tsf = __cpu_to_be64(tsf);
+
+ tlv = (struct fft_sample_tlv *)&fft_sample_40;
+ } else {
+ u8 max_index, bitmap_w;
+ u16 magnitude;
+ struct ath_ht20_mag_info *mag_info;
+
+ length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
+ fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
+ fft_sample_20.tlv.length = __cpu_to_be16(length);
+ fft_sample_20.freq = __cpu_to_be16(freq);
+
+ fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
+ fft_sample_20.noise = ah->noise;
+
+ mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
+ magnitude = spectral_max_magnitude(mag_info->all_bins);
+ fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
+ max_index = spectral_max_index(mag_info->all_bins);
+ fft_sample_20.max_index = max_index;
+ bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
+ fft_sample_20.bitmap_weight = bitmap_w;
+ fft_sample_20.max_exp = mag_info->max_exp & 0xf;
+
+ fft_sample_20.tsf = __cpu_to_be64(tsf);
+
+ tlv = (struct fft_sample_tlv *)&fft_sample_20;
+ }
- ath_debug_send_fft_sample(sc, &fft_sample.tlv);
+ ath_debug_send_fft_sample(sc, tlv);
return 1;
#else
return 0;
@@ -1308,7 +1381,7 @@ static void ath9k_apply_ampdu_details(struct ath_softc *sc,
int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
{
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
struct ieee80211_rx_status *rxs;
struct ath_hw *ah = sc->sc_ah;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
index fde6da619f30..0db37f230018 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.h
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -39,7 +39,7 @@ struct wmi_fw_version {
struct wmi_event_swba {
__be64 tsf;
u8 beacon_pending;
-};
+} __packed;
/*
* 64 - HTC header - WMI header - 1 / txstatus
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 5ac713d2ff5d..09cdbcd09739 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1241,12 +1241,13 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
if (bf->bf_next)
info.link = bf->bf_next->bf_daddr;
else
- info.link = 0;
+ info.link = (sc->tx99_state) ? bf->bf_daddr : 0;
if (!bf_first) {
bf_first = bf;
- info.flags = ATH9K_TXDESC_INTREQ;
+ if (!sc->tx99_state)
+ info.flags = ATH9K_TXDESC_INTREQ;
if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ||
txq == sc->tx.uapsdq)
info.flags |= ATH9K_TXDESC_CLRDMASK;
@@ -1704,16 +1705,9 @@ int ath_cabq_update(struct ath_softc *sc)
int qnum = sc->beacon.cabq->axq_qnum;
ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
- /*
- * Ensure the readytime % is within the bounds.
- */
- if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
- sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
- else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
- sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
qi.tqi_readyTime = (cur_conf->beacon_interval *
- sc->config.cabqReadytime) / 100;
+ ATH_CABQ_READY_TIME) / 100;
ath_txq_update(sc, qnum, &qi);
return 0;
@@ -1948,7 +1942,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
}
- if (!edma) {
+ if (!edma || sc->tx99_state) {
TX_STAT_INC(txq->axq_qnum, txstart);
ath9k_hw_txstart(ah, txq->axq_qnum);
}
@@ -1969,15 +1963,18 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid, struct sk_buff *skb)
{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_frame_info *fi = get_frame_info(skb);
struct list_head bf_head;
- struct ath_buf *bf;
-
- bf = fi->bf;
+ struct ath_buf *bf = fi->bf;
INIT_LIST_HEAD(&bf_head);
list_add_tail(&bf->list, &bf_head);
bf->bf_state.bf_type = 0;
+ if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
+ bf->bf_state.bf_type = BUF_AMPDU;
+ ath_tx_addto_baw(sc, tid, bf);
+ }
bf->bf_next = NULL;
bf->bf_lastbf = bf;
@@ -2024,6 +2021,9 @@ static void setup_frame_info(struct ieee80211_hw *hw,
fi->keyix = ATH9K_TXKEYIX_INVALID;
fi->keytype = keytype;
fi->framelen = framelen;
+
+ if (!rate)
+ return;
fi->rtscts_rate = rate->hw_value;
if (short_preamble)
fi->rtscts_rate |= rate->hw_value_short;
@@ -2034,8 +2034,7 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
struct ath_hw *ah = sc->sc_ah;
struct ath9k_channel *curchan = ah->curchan;
- if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
- (curchan->channelFlags & CHANNEL_5GHZ) &&
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && IS_CHAN_5GHZ(curchan) &&
(chainmask == 0x7) && (rate < 0x90))
return 0x3;
else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
@@ -2326,7 +2325,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
if (sc->sc_ah->caldata)
- sc->sc_ah->caldata->paprd_packet_sent = true;
+ set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags);
if (!(tx_flags & ATH_TX_ERROR))
/* Frame was ACKed */
@@ -2376,6 +2375,8 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
bf->bf_buf_addr = 0;
+ if (sc->tx99_state)
+ goto skip_tx_complete;
if (bf->bf_state.bfs_paprd) {
if (time_after(jiffies,
@@ -2388,6 +2389,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
ath_tx_complete(sc, skb, tx_flags, txq);
}
+skip_tx_complete:
/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
* accidentally reference it later.
*/
@@ -2746,3 +2748,46 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
ath_txq_unlock(sc, txq);
}
}
+
+int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
+ struct ath_tx_control *txctl)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ath_frame_info *fi = get_frame_info(skb);
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_buf *bf;
+ int padpos, padsize;
+
+ padpos = ieee80211_hdrlen(hdr->frame_control);
+ padsize = padpos & 3;
+
+ if (padsize && skb->len > padpos) {
+ if (skb_headroom(skb) < padsize) {
+ ath_dbg(common, XMIT,
+ "tx99 padding failed\n");
+ return -EINVAL;
+ }
+
+ skb_push(skb, padsize);
+ memmove(skb->data, skb->data + padsize, padpos);
+ }
+
+ fi->keyix = ATH9K_TXKEYIX_INVALID;
+ fi->framelen = skb->len + FCS_LEN;
+ fi->keytype = ATH9K_KEY_TYPE_CLEAR;
+
+ bf = ath_tx_setup_buffer(sc, txctl->txq, NULL, skb);
+ if (!bf) {
+ ath_dbg(common, XMIT, "tx99 buffer setup failed\n");
+ return -EINVAL;
+ }
+
+ ath_set_rates(sc->tx99_vif, NULL, bf);
+
+ ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, bf->bf_daddr);
+ ath9k_hw_tx99_start(sc->sc_ah, txctl->txq->axq_qnum);
+
+ ath_tx_send_normal(sc, txctl->txq, NULL, skb);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index 491305c81fce..a1a69c5db409 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -19,7 +19,7 @@
#include "dfs_pattern_detector.h"
#include "dfs_pri_detector.h"
-#include "ath9k.h"
+#include "ath.h"
/*
* tolerated deviation of radar time stamp in usecs on both sides
@@ -143,7 +143,6 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
{
u32 sz, i;
struct channel_detector *cd;
- struct ath_common *common = ath9k_hw_common(dpd->ah);
cd = kmalloc(sizeof(*cd), GFP_ATOMIC);
if (cd == NULL)
@@ -167,7 +166,7 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
return cd;
fail:
- ath_dbg(common, DFS,
+ ath_dbg(dpd->common, DFS,
"failed to allocate channel_detector for freq=%d\n", freq);
channel_detector_exit(dpd, cd);
return NULL;
@@ -242,7 +241,7 @@ dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
struct pri_detector *pd = cd->detectors[i];
struct pri_sequence *ps = pd->add_pulse(pd, event);
if (ps != NULL) {
- ath_dbg(ath9k_hw_common(dpd->ah), DFS,
+ ath_dbg(dpd->common, DFS,
"DFS: radar found on freq=%d: id=%d, pri=%d, "
"count=%d, count_false=%d\n",
event->freq, pd->rs->type_id,
@@ -254,6 +253,12 @@ dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
return false;
}
+static struct ath_dfs_pool_stats
+dpd_get_stats(struct dfs_pattern_detector *dpd)
+{
+ return global_dfs_pool_stats;
+}
+
static bool dpd_set_domain(struct dfs_pattern_detector *dpd,
enum nl80211_dfs_regions region)
{
@@ -284,14 +289,18 @@ static struct dfs_pattern_detector default_dpd = {
.exit = dpd_exit,
.set_dfs_domain = dpd_set_domain,
.add_pulse = dpd_add_pulse,
+ .get_stats = dpd_get_stats,
.region = NL80211_DFS_UNSET,
};
struct dfs_pattern_detector *
-dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region)
+dfs_pattern_detector_init(struct ath_common *common,
+ enum nl80211_dfs_regions region)
{
struct dfs_pattern_detector *dpd;
- struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!config_enabled(CONFIG_CFG80211_CERTIFICATION_ONUS))
+ return NULL;
dpd = kmalloc(sizeof(*dpd), GFP_KERNEL);
if (dpd == NULL)
@@ -300,7 +309,7 @@ dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region)
*dpd = default_dpd;
INIT_LIST_HEAD(&dpd->channel_detectors);
- dpd->ah = ah;
+ dpd->common = common;
if (dpd->set_dfs_domain(dpd, region))
return dpd;
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h b/drivers/net/wireless/ath/dfs_pattern_detector.h
index 90a5abcc4265..dde2652b787c 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.h
@@ -22,6 +22,19 @@
#include <linux/nl80211.h>
/**
+ * struct ath_dfs_pool_stats - DFS Statistics for global pools
+ */
+struct ath_dfs_pool_stats {
+ u32 pool_reference;
+ u32 pulse_allocated;
+ u32 pulse_alloc_error;
+ u32 pulse_used;
+ u32 pseq_allocated;
+ u32 pseq_alloc_error;
+ u32 pseq_used;
+};
+
+/**
* struct pulse_event - describing pulses reported by PHY
* @ts: pulse time stamp in us
* @freq: channel frequency in MHz
@@ -77,11 +90,12 @@ struct dfs_pattern_detector {
bool (*add_pulse)(struct dfs_pattern_detector *dpd,
struct pulse_event *pe);
+ struct ath_dfs_pool_stats (*get_stats)(struct dfs_pattern_detector *dpd);
enum nl80211_dfs_regions region;
u8 num_radar_types;
u64 last_pulse_ts;
/* needed for ath_dbg() */
- struct ath_hw *ah;
+ struct ath_common *common;
const struct radar_detector_specs *radar_spec;
struct list_head channel_detectors;
@@ -92,15 +106,7 @@ struct dfs_pattern_detector {
* @param region: DFS domain to be used, can be NL80211_DFS_UNSET at creation
* @return instance pointer on success, NULL otherwise
*/
-#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
extern struct dfs_pattern_detector *
-dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region);
-#else
-static inline struct dfs_pattern_detector *
-dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region)
-{
- return NULL;
-}
-#endif /* CONFIG_ATH9K_DFS_CERTIFIED */
-
+dfs_pattern_detector_init(struct ath_common *common,
+ enum nl80211_dfs_regions region);
#endif /* DFS_PATTERN_DETECTOR_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c b/drivers/net/wireless/ath/dfs_pri_detector.c
index 5ba4b6fe37c0..43b608178884 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
+++ b/drivers/net/wireless/ath/dfs_pri_detector.c
@@ -17,10 +17,14 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include "ath9k.h"
+#include "ath.h"
#include "dfs_pattern_detector.h"
#include "dfs_pri_detector.h"
-#include "dfs_debug.h"
+
+struct ath_dfs_pool_stats global_dfs_pool_stats = {};
+
+#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
+#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
/**
* struct pulse_elem - elements in pulse queue
@@ -392,7 +396,7 @@ static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
pri_detector_reset(de, ts);
- return false;
+ return NULL;
}
ps = pseq_handler_check_detection(de);
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h b/drivers/net/wireless/ath/dfs_pri_detector.h
index 723962d1abc6..79f0fff4d1e6 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h
+++ b/drivers/net/wireless/ath/dfs_pri_detector.h
@@ -19,6 +19,8 @@
#include <linux/list.h>
+extern struct ath_dfs_pool_stats global_dfs_pool_stats;
+
/**
* struct pri_sequence - sequence of pulses matching one PRI
* @head: list_head
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 7d077c752dd5..c00687e05688 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -356,14 +356,131 @@ static u16 ath_regd_find_country_by_name(char *alpha2)
return -1;
}
+static int __ath_reg_dyn_country(struct wiphy *wiphy,
+ struct ath_regulatory *reg,
+ struct regulatory_request *request)
+{
+ u16 country_code;
+
+ if (!ath_is_world_regd(reg))
+ return -EINVAL;
+
+ country_code = ath_regd_find_country_by_name(request->alpha2);
+ if (country_code == (u16) -1)
+ return -EINVAL;
+
+ reg->current_rd = COUNTRY_ERD_FLAG;
+ reg->current_rd |= country_code;
+
+ __ath_regd_init(reg);
+
+ ath_reg_apply_world_flags(wiphy, request->initiator, reg);
+
+ return 0;
+}
+
+static void ath_reg_dyn_country(struct wiphy *wiphy,
+ struct ath_regulatory *reg,
+ struct regulatory_request *request)
+{
+ if (__ath_reg_dyn_country(wiphy, reg, request))
+ return;
+
+ printk(KERN_DEBUG "ath: regdomain 0x%0x "
+ "dynamically updated by %s\n",
+ reg->current_rd,
+ reg_initiator_name(request->initiator));
+}
+
+static bool dynamic_country_user_possible(struct ath_regulatory *reg)
+{
+ if (config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING))
+ return true;
+
+ switch (reg->country_code) {
+ case CTRY_UNITED_STATES:
+ case CTRY_JAPAN1:
+ case CTRY_JAPAN2:
+ case CTRY_JAPAN3:
+ case CTRY_JAPAN4:
+ case CTRY_JAPAN5:
+ case CTRY_JAPAN6:
+ case CTRY_JAPAN7:
+ case CTRY_JAPAN8:
+ case CTRY_JAPAN9:
+ case CTRY_JAPAN10:
+ case CTRY_JAPAN11:
+ case CTRY_JAPAN12:
+ case CTRY_JAPAN13:
+ case CTRY_JAPAN14:
+ case CTRY_JAPAN15:
+ case CTRY_JAPAN16:
+ case CTRY_JAPAN17:
+ case CTRY_JAPAN18:
+ case CTRY_JAPAN19:
+ case CTRY_JAPAN20:
+ case CTRY_JAPAN21:
+ case CTRY_JAPAN22:
+ case CTRY_JAPAN23:
+ case CTRY_JAPAN24:
+ case CTRY_JAPAN25:
+ case CTRY_JAPAN26:
+ case CTRY_JAPAN27:
+ case CTRY_JAPAN28:
+ case CTRY_JAPAN29:
+ case CTRY_JAPAN30:
+ case CTRY_JAPAN31:
+ case CTRY_JAPAN32:
+ case CTRY_JAPAN33:
+ case CTRY_JAPAN34:
+ case CTRY_JAPAN35:
+ case CTRY_JAPAN36:
+ case CTRY_JAPAN37:
+ case CTRY_JAPAN38:
+ case CTRY_JAPAN39:
+ case CTRY_JAPAN40:
+ case CTRY_JAPAN41:
+ case CTRY_JAPAN42:
+ case CTRY_JAPAN43:
+ case CTRY_JAPAN44:
+ case CTRY_JAPAN45:
+ case CTRY_JAPAN46:
+ case CTRY_JAPAN47:
+ case CTRY_JAPAN48:
+ case CTRY_JAPAN49:
+ case CTRY_JAPAN50:
+ case CTRY_JAPAN51:
+ case CTRY_JAPAN52:
+ case CTRY_JAPAN53:
+ case CTRY_JAPAN54:
+ case CTRY_JAPAN55:
+ case CTRY_JAPAN56:
+ case CTRY_JAPAN57:
+ case CTRY_JAPAN58:
+ case CTRY_JAPAN59:
+ return false;
+ }
+
+ return true;
+}
+
+static void ath_reg_dyn_country_user(struct wiphy *wiphy,
+ struct ath_regulatory *reg,
+ struct regulatory_request *request)
+{
+ if (!config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS))
+ return;
+ if (!dynamic_country_user_possible(reg))
+ return;
+ ath_reg_dyn_country(wiphy, reg, request);
+}
+
void ath_reg_notifier_apply(struct wiphy *wiphy,
struct regulatory_request *request,
struct ath_regulatory *reg)
{
struct ath_common *common = container_of(reg, struct ath_common,
regulatory);
- u16 country_code;
-
/* We always apply this */
ath_reg_apply_radar_flags(wiphy);
@@ -388,25 +505,12 @@ void ath_reg_notifier_apply(struct wiphy *wiphy,
sizeof(struct ath_regulatory));
break;
case NL80211_REGDOM_SET_BY_DRIVER:
+ break;
case NL80211_REGDOM_SET_BY_USER:
+ ath_reg_dyn_country_user(wiphy, reg, request);
break;
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
- if (!ath_is_world_regd(reg))
- break;
-
- country_code = ath_regd_find_country_by_name(request->alpha2);
- if (country_code == (u16) -1)
- break;
-
- reg->current_rd = COUNTRY_ERD_FLAG;
- reg->current_rd |= country_code;
-
- printk(KERN_DEBUG "ath: regdomain 0x%0x updated by CountryIE\n",
- reg->current_rd);
- __ath_regd_init(reg);
-
- ath_reg_apply_world_flags(wiphy, request->initiator, reg);
-
+ ath_reg_dyn_country(wiphy, reg, request);
break;
}
}
diff --git a/drivers/net/wireless/ath/wcn36xx/Kconfig b/drivers/net/wireless/ath/wcn36xx/Kconfig
new file mode 100644
index 000000000000..591ebaea8265
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/Kconfig
@@ -0,0 +1,16 @@
+config WCN36XX
+ tristate "Qualcomm Atheros WCN3660/3680 support"
+ depends on MAC80211 && HAS_DMA
+ ---help---
+ This module adds support for wireless adapters based on
+ Qualcomm Atheros WCN3660 and WCN3680 mobile chipsets.
+
+ If you choose to build a module, it'll be called wcn36xx.
+
+config WCN36XX_DEBUGFS
+ bool "WCN36XX debugfs support"
+ depends on WCN36XX
+ ---help---
+ Enabled debugfs support
+
+ If unsure, say Y to make it easier to debug problems.
diff --git a/drivers/net/wireless/ath/wcn36xx/Makefile b/drivers/net/wireless/ath/wcn36xx/Makefile
new file mode 100644
index 000000000000..50c43b4382ba
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_WCN36XX) := wcn36xx.o
+wcn36xx-y += main.o \
+ dxe.o \
+ txrx.o \
+ smd.o \
+ pmc.o \
+ debug.o
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c
new file mode 100644
index 000000000000..5b84f7ae0b1e
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/debug.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include "wcn36xx.h"
+#include "debug.h"
+#include "pmc.h"
+
+#ifdef CONFIG_WCN36XX_DEBUGFS
+
+static ssize_t read_file_bool_bmps(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wcn36xx *wcn = file->private_data;
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_vif *vif = NULL;
+ char buf[3];
+
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ vif = container_of((void *)vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+ if (NL80211_IFTYPE_STATION == vif->type) {
+ if (vif_priv->pw_state == WCN36XX_BMPS)
+ buf[0] = '1';
+ else
+ buf[0] = '0';
+ break;
+ }
+ }
+ buf[1] = '\n';
+ buf[2] = 0x00;
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t write_file_bool_bmps(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wcn36xx *wcn = file->private_data;
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_vif *vif = NULL;
+
+ char buf[32];
+ int buf_size;
+
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ switch (buf[0]) {
+ case 'y':
+ case 'Y':
+ case '1':
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ vif = container_of((void *)vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+ if (NL80211_IFTYPE_STATION == vif->type) {
+ wcn36xx_enable_keep_alive_null_packet(wcn, vif);
+ wcn36xx_pmc_enter_bmps_state(wcn, vif);
+ }
+ }
+ break;
+ case 'n':
+ case 'N':
+ case '0':
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ vif = container_of((void *)vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+ if (NL80211_IFTYPE_STATION == vif->type)
+ wcn36xx_pmc_exit_bmps_state(wcn, vif);
+ }
+ break;
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_wcn36xx_bmps = {
+ .open = simple_open,
+ .read = read_file_bool_bmps,
+ .write = write_file_bool_bmps,
+};
+
+static ssize_t write_file_dump(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wcn36xx *wcn = file->private_data;
+ char buf[255], *tmp;
+ int buf_size;
+ u32 arg[WCN36xx_MAX_DUMP_ARGS];
+ int i;
+
+ memset(buf, 0, sizeof(buf));
+ memset(arg, 0, sizeof(arg));
+
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ tmp = buf;
+
+ for (i = 0; i < WCN36xx_MAX_DUMP_ARGS; i++) {
+ char *begin;
+ begin = strsep(&tmp, " ");
+ if (begin == NULL)
+ break;
+
+ if (kstrtoul(begin, 0, (unsigned long *)(arg + i)) != 0)
+ break;
+ }
+
+ wcn36xx_info("DUMP args is %d %d %d %d %d\n", arg[0], arg[1], arg[2],
+ arg[3], arg[4]);
+ wcn36xx_smd_dump_cmd_req(wcn, arg[0], arg[1], arg[2], arg[3], arg[4]);
+
+ return count;
+}
+
+static const struct file_operations fops_wcn36xx_dump = {
+ .open = simple_open,
+ .write = write_file_dump,
+};
+
+#define ADD_FILE(name, mode, fop, priv_data) \
+ do { \
+ struct dentry *d; \
+ d = debugfs_create_file(__stringify(name), \
+ mode, dfs->rootdir, \
+ priv_data, fop); \
+ dfs->file_##name.dentry = d; \
+ if (IS_ERR(d)) { \
+ wcn36xx_warn("Create the debugfs entry failed");\
+ dfs->file_##name.dentry = NULL; \
+ } \
+ } while (0)
+
+
+void wcn36xx_debugfs_init(struct wcn36xx *wcn)
+{
+ struct wcn36xx_dfs_entry *dfs = &wcn->dfs;
+
+ dfs->rootdir = debugfs_create_dir(KBUILD_MODNAME,
+ wcn->hw->wiphy->debugfsdir);
+ if (IS_ERR(dfs->rootdir)) {
+ wcn36xx_warn("Create the debugfs failed\n");
+ dfs->rootdir = NULL;
+ }
+
+ ADD_FILE(bmps_switcher, S_IRUSR | S_IWUSR,
+ &fops_wcn36xx_bmps, wcn);
+ ADD_FILE(dump, S_IWUSR, &fops_wcn36xx_dump, wcn);
+}
+
+void wcn36xx_debugfs_exit(struct wcn36xx *wcn)
+{
+ struct wcn36xx_dfs_entry *dfs = &wcn->dfs;
+ debugfs_remove_recursive(dfs->rootdir);
+}
+
+#endif /* CONFIG_WCN36XX_DEBUGFS */
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.h b/drivers/net/wireless/ath/wcn36xx/debug.h
new file mode 100644
index 000000000000..46307aa562d3
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/debug.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WCN36XX_DEBUG_H_
+#define _WCN36XX_DEBUG_H_
+
+#include <linux/kernel.h>
+
+#define WCN36xx_MAX_DUMP_ARGS 5
+
+#ifdef CONFIG_WCN36XX_DEBUGFS
+struct wcn36xx_dfs_file {
+ struct dentry *dentry;
+ u32 value;
+};
+
+struct wcn36xx_dfs_entry {
+ struct dentry *rootdir;
+ struct wcn36xx_dfs_file file_bmps_switcher;
+ struct wcn36xx_dfs_file file_dump;
+};
+
+void wcn36xx_debugfs_init(struct wcn36xx *wcn);
+void wcn36xx_debugfs_exit(struct wcn36xx *wcn);
+
+#else
+static inline void wcn36xx_debugfs_init(struct wcn36xx *wcn)
+{
+}
+static inline void wcn36xx_debugfs_exit(struct wcn36xx *wcn)
+{
+}
+
+#endif /* CONFIG_WCN36XX_DEBUGFS */
+
+#endif /* _WCN36XX_DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
new file mode 100644
index 000000000000..ee25786b4447
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -0,0 +1,805 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* DXE - DMA transfer engine
+ * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
+ * through low channels data packets are transfered
+ * through high channels managment packets are transfered
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include "wcn36xx.h"
+#include "txrx.h"
+
+void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low)
+{
+ struct wcn36xx_dxe_ch *ch = is_low ?
+ &wcn->dxe_tx_l_ch :
+ &wcn->dxe_tx_h_ch;
+
+ return ch->head_blk_ctl->bd_cpu_addr;
+}
+
+static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
+{
+ wcn36xx_dbg(WCN36XX_DBG_DXE,
+ "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
+ addr, data);
+
+ writel(data, wcn->mmio + addr);
+}
+
+static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
+{
+ *data = readl(wcn->mmio + addr);
+
+ wcn36xx_dbg(WCN36XX_DBG_DXE,
+ "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
+ addr, *data);
+}
+
+static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
+ int i;
+
+ for (i = 0; i < ch->desc_num && ctl; i++) {
+ next = ctl->next;
+ kfree(ctl);
+ ctl = next;
+ }
+}
+
+static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *prev_ctl = NULL;
+ struct wcn36xx_dxe_ctl *cur_ctl = NULL;
+ int i;
+
+ for (i = 0; i < ch->desc_num; i++) {
+ cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
+ if (!cur_ctl)
+ goto out_fail;
+
+ cur_ctl->ctl_blk_order = i;
+ if (i == 0) {
+ ch->head_blk_ctl = cur_ctl;
+ ch->tail_blk_ctl = cur_ctl;
+ } else if (ch->desc_num - 1 == i) {
+ prev_ctl->next = cur_ctl;
+ cur_ctl->next = ch->head_blk_ctl;
+ } else {
+ prev_ctl->next = cur_ctl;
+ }
+ prev_ctl = cur_ctl;
+ }
+
+ return 0;
+
+out_fail:
+ wcn36xx_dxe_free_ctl_block(ch);
+ return -ENOMEM;
+}
+
+int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
+{
+ int ret;
+
+ wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
+ wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
+ wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
+ wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
+
+ wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
+ wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
+ wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
+ wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
+
+ wcn->dxe_tx_l_ch.dxe_wq = WCN36XX_DXE_WQ_TX_L;
+ wcn->dxe_tx_h_ch.dxe_wq = WCN36XX_DXE_WQ_TX_H;
+
+ wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
+ wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
+
+ wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
+ wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
+
+ wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
+ wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
+
+ wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
+ wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
+
+ /* DXE control block allocation */
+ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
+ if (ret)
+ goto out_err;
+ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
+ if (ret)
+ goto out_err;
+ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
+ if (ret)
+ goto out_err;
+ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
+ if (ret)
+ goto out_err;
+
+ /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */
+ ret = wcn->ctrl_ops->smsm_change_state(
+ WCN36XX_SMSM_WLAN_TX_ENABLE,
+ WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
+
+ return 0;
+
+out_err:
+ wcn36xx_err("Failed to allocate DXE control blocks\n");
+ wcn36xx_dxe_free_ctl_blks(wcn);
+ return -ENOMEM;
+}
+
+void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
+{
+ wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
+ wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
+ wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
+ wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
+}
+
+static int wcn36xx_dxe_init_descs(struct wcn36xx_dxe_ch *wcn_ch)
+{
+ struct wcn36xx_dxe_desc *cur_dxe = NULL;
+ struct wcn36xx_dxe_desc *prev_dxe = NULL;
+ struct wcn36xx_dxe_ctl *cur_ctl = NULL;
+ size_t size;
+ int i;
+
+ size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
+ wcn_ch->cpu_addr = dma_alloc_coherent(NULL, size, &wcn_ch->dma_addr,
+ GFP_KERNEL);
+ if (!wcn_ch->cpu_addr)
+ return -ENOMEM;
+
+ memset(wcn_ch->cpu_addr, 0, size);
+
+ cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
+ cur_ctl = wcn_ch->head_blk_ctl;
+
+ for (i = 0; i < wcn_ch->desc_num; i++) {
+ cur_ctl->desc = cur_dxe;
+ cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
+ i * sizeof(struct wcn36xx_dxe_desc);
+
+ switch (wcn_ch->ch_type) {
+ case WCN36XX_DXE_CH_TX_L:
+ cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
+ cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
+ break;
+ case WCN36XX_DXE_CH_TX_H:
+ cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
+ cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
+ break;
+ case WCN36XX_DXE_CH_RX_L:
+ cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
+ cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
+ break;
+ case WCN36XX_DXE_CH_RX_H:
+ cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
+ cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
+ break;
+ }
+ if (0 == i) {
+ cur_dxe->phy_next_l = 0;
+ } else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
+ prev_dxe->phy_next_l =
+ cur_ctl->desc_phy_addr;
+ } else if (i == (wcn_ch->desc_num - 1)) {
+ prev_dxe->phy_next_l =
+ cur_ctl->desc_phy_addr;
+ cur_dxe->phy_next_l =
+ wcn_ch->head_blk_ctl->desc_phy_addr;
+ }
+ cur_ctl = cur_ctl->next;
+ prev_dxe = cur_dxe;
+ cur_dxe++;
+ }
+
+ return 0;
+}
+
+static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
+ struct wcn36xx_dxe_mem_pool *pool)
+{
+ int i, chunk_size = pool->chunk_size;
+ dma_addr_t bd_phy_addr = pool->phy_addr;
+ void *bd_cpu_addr = pool->virt_addr;
+ struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
+
+ for (i = 0; i < ch->desc_num; i++) {
+ /* Only every second dxe needs a bd pointer,
+ the other will point to the skb data */
+ if (!(i & 1)) {
+ cur->bd_phy_addr = bd_phy_addr;
+ cur->bd_cpu_addr = bd_cpu_addr;
+ bd_phy_addr += chunk_size;
+ bd_cpu_addr += chunk_size;
+ } else {
+ cur->bd_phy_addr = 0;
+ cur->bd_cpu_addr = NULL;
+ }
+ cur = cur->next;
+ }
+}
+
+static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
+{
+ int reg_data = 0;
+
+ wcn36xx_dxe_read_register(wcn,
+ WCN36XX_DXE_INT_MASK_REG,
+ &reg_data);
+
+ reg_data |= wcn_ch;
+
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_INT_MASK_REG,
+ (int)reg_data);
+ return 0;
+}
+
+static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *ctl)
+{
+ struct wcn36xx_dxe_desc *dxe = ctl->desc;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ dxe->dst_addr_l = dma_map_single(NULL,
+ skb_tail_pointer(skb),
+ WCN36XX_PKT_SIZE,
+ DMA_FROM_DEVICE);
+ ctl->skb = skb;
+
+ return 0;
+}
+
+static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
+ struct wcn36xx_dxe_ch *wcn_ch)
+{
+ int i;
+ struct wcn36xx_dxe_ctl *cur_ctl = NULL;
+
+ cur_ctl = wcn_ch->head_blk_ctl;
+
+ for (i = 0; i < wcn_ch->desc_num; i++) {
+ wcn36xx_dxe_fill_skb(cur_ctl);
+ cur_ctl = cur_ctl->next;
+ }
+
+ return 0;
+}
+
+static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
+ struct wcn36xx_dxe_ch *wcn_ch)
+{
+ struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
+ int i;
+
+ for (i = 0; i < wcn_ch->desc_num; i++) {
+ kfree_skb(cur->skb);
+ cur = cur->next;
+ }
+}
+
+void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
+{
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&wcn->dxe_lock, flags);
+ skb = wcn->tx_ack_skb;
+ wcn->tx_ack_skb = NULL;
+ spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+
+ if (!skb) {
+ wcn36xx_warn("Spurious TX complete indication\n");
+ return;
+ }
+
+ info = IEEE80211_SKB_CB(skb);
+
+ if (status == 1)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
+
+ ieee80211_tx_status_irqsafe(wcn->hw, skb);
+ ieee80211_wake_queues(wcn->hw);
+}
+
+static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *ctl = ch->tail_blk_ctl;
+ struct ieee80211_tx_info *info;
+ unsigned long flags;
+
+ /*
+ * Make at least one loop of do-while because in case ring is
+ * completely full head and tail are pointing to the same element
+ * and while-do will not make any cycles.
+ */
+ do {
+ if (ctl->skb) {
+ dma_unmap_single(NULL, ctl->desc->src_addr_l,
+ ctl->skb->len, DMA_TO_DEVICE);
+ info = IEEE80211_SKB_CB(ctl->skb);
+ if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
+ /* Keep frame until TX status comes */
+ ieee80211_free_txskb(wcn->hw, ctl->skb);
+ }
+ spin_lock_irqsave(&ctl->skb_lock, flags);
+ if (wcn->queues_stopped) {
+ wcn->queues_stopped = false;
+ ieee80211_wake_queues(wcn->hw);
+ }
+ spin_unlock_irqrestore(&ctl->skb_lock, flags);
+
+ ctl->skb = NULL;
+ }
+ ctl = ctl->next;
+ } while (ctl != ch->head_blk_ctl &&
+ !(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK));
+
+ ch->tail_blk_ctl = ctl;
+}
+
+static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
+{
+ struct wcn36xx *wcn = (struct wcn36xx *)dev;
+ int int_src, int_reason;
+
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
+
+ if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
+ wcn36xx_dxe_read_register(wcn,
+ WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
+ &int_reason);
+
+ /* TODO: Check int_reason */
+
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_INT_MASK_CHAN_TX_H);
+
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
+ WCN36XX_INT_MASK_CHAN_TX_H);
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n");
+ reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
+ }
+
+ if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
+ wcn36xx_dxe_read_register(wcn,
+ WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
+ &int_reason);
+ /* TODO: Check int_reason */
+
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_INT_MASK_CHAN_TX_L);
+
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
+ WCN36XX_INT_MASK_CHAN_TX_L);
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n");
+ reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
+{
+ struct wcn36xx *wcn = (struct wcn36xx *)dev;
+
+ disable_irq_nosync(wcn->rx_irq);
+ wcn36xx_dxe_rx_frame(wcn);
+ enable_irq(wcn->rx_irq);
+ return IRQ_HANDLED;
+}
+
+static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
+{
+ int ret;
+
+ ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
+ IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
+ if (ret) {
+ wcn36xx_err("failed to alloc tx irq\n");
+ goto out_err;
+ }
+
+ ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
+ "wcn36xx_rx", wcn);
+ if (ret) {
+ wcn36xx_err("failed to alloc rx irq\n");
+ goto out_txirq;
+ }
+
+ enable_irq_wake(wcn->rx_irq);
+
+ return 0;
+
+out_txirq:
+ free_irq(wcn->tx_irq, wcn);
+out_err:
+ return ret;
+
+}
+
+static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
+ struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl;
+ struct wcn36xx_dxe_desc *dxe = ctl->desc;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+
+ while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
+ skb = ctl->skb;
+ dma_addr = dxe->dst_addr_l;
+ wcn36xx_dxe_fill_skb(ctl);
+
+ switch (ch->ch_type) {
+ case WCN36XX_DXE_CH_RX_L:
+ dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
+ WCN36XX_DXE_INT_CH1_MASK);
+ break;
+ case WCN36XX_DXE_CH_RX_H:
+ dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
+ WCN36XX_DXE_INT_CH3_MASK);
+ break;
+ default:
+ wcn36xx_warn("Unknown channel\n");
+ }
+
+ dma_unmap_single(NULL, dma_addr, WCN36XX_PKT_SIZE,
+ DMA_FROM_DEVICE);
+ wcn36xx_rx_skb(wcn, skb);
+ ctl = ctl->next;
+ dxe = ctl->desc;
+ }
+
+ ch->head_blk_ctl = ctl;
+
+ return 0;
+}
+
+void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
+{
+ int int_src;
+
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
+
+ /* RX_LOW_PRI */
+ if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_DXE_INT_CH1_MASK);
+ wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
+ }
+
+ /* RX_HIGH_PRI */
+ if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
+ /* Clean up all the INT within this channel */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_DXE_INT_CH3_MASK);
+ wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
+ }
+
+ if (!int_src)
+ wcn36xx_warn("No DXE interrupt pending\n");
+}
+
+int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
+{
+ size_t s;
+ void *cpu_addr;
+
+ /* Allocate BD headers for MGMT frames */
+
+ /* Where this come from ask QC */
+ wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
+ 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
+
+ s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
+ cpu_addr = dma_alloc_coherent(NULL, s, &wcn->mgmt_mem_pool.phy_addr,
+ GFP_KERNEL);
+ if (!cpu_addr)
+ goto out_err;
+
+ wcn->mgmt_mem_pool.virt_addr = cpu_addr;
+ memset(cpu_addr, 0, s);
+
+ /* Allocate BD headers for DATA frames */
+
+ /* Where this come from ask QC */
+ wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
+ 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
+
+ s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
+ cpu_addr = dma_alloc_coherent(NULL, s, &wcn->data_mem_pool.phy_addr,
+ GFP_KERNEL);
+ if (!cpu_addr)
+ goto out_err;
+
+ wcn->data_mem_pool.virt_addr = cpu_addr;
+ memset(cpu_addr, 0, s);
+
+ return 0;
+
+out_err:
+ wcn36xx_dxe_free_mem_pools(wcn);
+ wcn36xx_err("Failed to allocate BD mempool\n");
+ return -ENOMEM;
+}
+
+void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
+{
+ if (wcn->mgmt_mem_pool.virt_addr)
+ dma_free_coherent(NULL, wcn->mgmt_mem_pool.chunk_size *
+ WCN36XX_DXE_CH_DESC_NUMB_TX_H,
+ wcn->mgmt_mem_pool.virt_addr,
+ wcn->mgmt_mem_pool.phy_addr);
+
+ if (wcn->data_mem_pool.virt_addr) {
+ dma_free_coherent(NULL, wcn->data_mem_pool.chunk_size *
+ WCN36XX_DXE_CH_DESC_NUMB_TX_L,
+ wcn->data_mem_pool.virt_addr,
+ wcn->data_mem_pool.phy_addr);
+ }
+}
+
+int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
+ struct wcn36xx_vif *vif_priv,
+ struct sk_buff *skb,
+ bool is_low)
+{
+ struct wcn36xx_dxe_ctl *ctl = NULL;
+ struct wcn36xx_dxe_desc *desc = NULL;
+ struct wcn36xx_dxe_ch *ch = NULL;
+ unsigned long flags;
+
+ ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
+
+ ctl = ch->head_blk_ctl;
+
+ spin_lock_irqsave(&ctl->next->skb_lock, flags);
+
+ /*
+ * If skb is not null that means that we reached the tail of the ring
+ * hence ring is full. Stop queues to let mac80211 back off until ring
+ * has an empty slot again.
+ */
+ if (NULL != ctl->next->skb) {
+ ieee80211_stop_queues(wcn->hw);
+ wcn->queues_stopped = true;
+ spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
+
+ ctl->skb = NULL;
+ desc = ctl->desc;
+
+ /* Set source address of the BD we send */
+ desc->src_addr_l = ctl->bd_phy_addr;
+
+ desc->dst_addr_l = ch->dxe_wq;
+ desc->fr_len = sizeof(struct wcn36xx_tx_bd);
+ desc->ctrl = ch->ctrl_bd;
+
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
+
+ wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
+ (char *)desc, sizeof(*desc));
+ wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
+ "BD >>> ", (char *)ctl->bd_cpu_addr,
+ sizeof(struct wcn36xx_tx_bd));
+
+ /* Set source address of the SKB we send */
+ ctl = ctl->next;
+ ctl->skb = skb;
+ desc = ctl->desc;
+ if (ctl->bd_cpu_addr) {
+ wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
+ return -EINVAL;
+ }
+
+ desc->src_addr_l = dma_map_single(NULL,
+ ctl->skb->data,
+ ctl->skb->len,
+ DMA_TO_DEVICE);
+
+ desc->dst_addr_l = ch->dxe_wq;
+ desc->fr_len = ctl->skb->len;
+
+ /* set dxe descriptor to VALID */
+ desc->ctrl = ch->ctrl_skb;
+
+ wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
+ (char *)desc, sizeof(*desc));
+ wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ",
+ (char *)ctl->skb->data, ctl->skb->len);
+
+ /* Move the head of the ring to the next empty descriptor */
+ ch->head_blk_ctl = ctl->next;
+
+ /*
+ * When connected and trying to send data frame chip can be in sleep
+ * mode and writing to the register will not wake up the chip. Instead
+ * notify chip about new frame through SMSM bus.
+ */
+ if (is_low && vif_priv->pw_state == WCN36XX_BMPS) {
+ wcn->ctrl_ops->smsm_change_state(
+ 0,
+ WCN36XX_SMSM_WLAN_TX_ENABLE);
+ } else {
+ /* indicate End Of Packet and generate interrupt on descriptor
+ * done.
+ */
+ wcn36xx_dxe_write_register(wcn,
+ ch->reg_ctrl, ch->def_ctrl);
+ }
+
+ return 0;
+}
+
+int wcn36xx_dxe_init(struct wcn36xx *wcn)
+{
+ int reg_data = 0, ret;
+
+ reg_data = WCN36XX_DXE_REG_RESET;
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
+
+ /* Setting interrupt path */
+ reg_data = WCN36XX_DXE_CCU_INT;
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
+
+ /***************************************/
+ /* Init descriptors for TX LOW channel */
+ /***************************************/
+ wcn36xx_dxe_init_descs(&wcn->dxe_tx_l_ch);
+ wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
+
+ /* Write channel head to a NEXT register */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
+ wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
+
+ /* Program DMA destination addr for TX LOW */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_DEST_ADDR_TX_L,
+ WCN36XX_DXE_WQ_TX_L);
+
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
+
+ /***************************************/
+ /* Init descriptors for TX HIGH channel */
+ /***************************************/
+ wcn36xx_dxe_init_descs(&wcn->dxe_tx_h_ch);
+ wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
+
+ /* Write channel head to a NEXT register */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
+ wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
+
+ /* Program DMA destination addr for TX HIGH */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_DEST_ADDR_TX_H,
+ WCN36XX_DXE_WQ_TX_H);
+
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
+
+ /* Enable channel interrupts */
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
+
+ /***************************************/
+ /* Init descriptors for RX LOW channel */
+ /***************************************/
+ wcn36xx_dxe_init_descs(&wcn->dxe_rx_l_ch);
+
+ /* For RX we need to preallocated buffers */
+ wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
+
+ /* Write channel head to a NEXT register */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
+ wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
+
+ /* Write DMA source address */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_SRC_ADDR_RX_L,
+ WCN36XX_DXE_WQ_RX_L);
+
+ /* Program preallocated destination address */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_DEST_ADDR_RX_L,
+ wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
+
+ /* Enable default control registers */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_REG_CTL_RX_L,
+ WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
+
+ /* Enable channel interrupts */
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
+
+ /***************************************/
+ /* Init descriptors for RX HIGH channel */
+ /***************************************/
+ wcn36xx_dxe_init_descs(&wcn->dxe_rx_h_ch);
+
+ /* For RX we need to prealocat buffers */
+ wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
+
+ /* Write chanel head to a NEXT register */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
+ wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
+
+ /* Write DMA source address */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_SRC_ADDR_RX_H,
+ WCN36XX_DXE_WQ_RX_H);
+
+ /* Program preallocated destination address */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_DEST_ADDR_RX_H,
+ wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
+
+ /* Enable default control registers */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_REG_CTL_RX_H,
+ WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
+
+ /* Enable channel interrupts */
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
+
+ ret = wcn36xx_dxe_request_irqs(wcn);
+ if (ret < 0)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ return ret;
+}
+
+void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
+{
+ free_irq(wcn->tx_irq, wcn);
+ free_irq(wcn->rx_irq, wcn);
+
+ if (wcn->tx_ack_skb) {
+ ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
+ wcn->tx_ack_skb = NULL;
+ }
+
+ wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
+ wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h
new file mode 100644
index 000000000000..c88562f85de1
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DXE_H_
+#define _DXE_H_
+
+#include "wcn36xx.h"
+
+/*
+TX_LOW = DMA0
+TX_HIGH = DMA4
+RX_LOW = DMA1
+RX_HIGH = DMA3
+H2H_TEST_RX_TX = DMA2
+*/
+
+/* DXE registers */
+#define WCN36XX_DXE_MEM_BASE 0x03000000
+#define WCN36XX_DXE_MEM_REG 0x202000
+
+#define WCN36XX_DXE_CCU_INT 0xA0011
+#define WCN36XX_DXE_REG_CCU_INT 0x200b10
+
+/* TODO This must calculated properly but not hardcoded */
+#define WCN36XX_DXE_CTRL_TX_L 0x328a44
+#define WCN36XX_DXE_CTRL_TX_H 0x32ce44
+#define WCN36XX_DXE_CTRL_RX_L 0x12ad2f
+#define WCN36XX_DXE_CTRL_RX_H 0x12d12f
+#define WCN36XX_DXE_CTRL_TX_H_BD 0x30ce45
+#define WCN36XX_DXE_CTRL_TX_H_SKB 0x32ce4d
+#define WCN36XX_DXE_CTRL_TX_L_BD 0x308a45
+#define WCN36XX_DXE_CTRL_TX_L_SKB 0x328a4d
+
+/* TODO This must calculated properly but not hardcoded */
+#define WCN36XX_DXE_WQ_TX_L 0x17
+#define WCN36XX_DXE_WQ_TX_H 0x17
+#define WCN36XX_DXE_WQ_RX_L 0xB
+#define WCN36XX_DXE_WQ_RX_H 0x4
+
+/* DXE descriptor control filed */
+#define WCN36XX_DXE_CTRL_VALID_MASK (0x00000001)
+
+/* TODO This must calculated properly but not hardcoded */
+/* DXE default control register values */
+#define WCN36XX_DXE_CH_DEFAULT_CTL_RX_L 0x847EAD2F
+#define WCN36XX_DXE_CH_DEFAULT_CTL_RX_H 0x84FED12F
+#define WCN36XX_DXE_CH_DEFAULT_CTL_TX_H 0x853ECF4D
+#define WCN36XX_DXE_CH_DEFAULT_CTL_TX_L 0x843e8b4d
+
+/* Common DXE registers */
+#define WCN36XX_DXE_MEM_CSR (WCN36XX_DXE_MEM_REG + 0x00)
+#define WCN36XX_DXE_REG_CSR_RESET (WCN36XX_DXE_MEM_REG + 0x00)
+#define WCN36XX_DXE_ENCH_ADDR (WCN36XX_DXE_MEM_REG + 0x04)
+#define WCN36XX_DXE_REG_CH_EN (WCN36XX_DXE_MEM_REG + 0x08)
+#define WCN36XX_DXE_REG_CH_DONE (WCN36XX_DXE_MEM_REG + 0x0C)
+#define WCN36XX_DXE_REG_CH_ERR (WCN36XX_DXE_MEM_REG + 0x10)
+#define WCN36XX_DXE_INT_MASK_REG (WCN36XX_DXE_MEM_REG + 0x18)
+#define WCN36XX_DXE_INT_SRC_RAW_REG (WCN36XX_DXE_MEM_REG + 0x20)
+ /* #define WCN36XX_DXE_INT_CH6_MASK 0x00000040 */
+ /* #define WCN36XX_DXE_INT_CH5_MASK 0x00000020 */
+ #define WCN36XX_DXE_INT_CH4_MASK 0x00000010
+ #define WCN36XX_DXE_INT_CH3_MASK 0x00000008
+ /* #define WCN36XX_DXE_INT_CH2_MASK 0x00000004 */
+ #define WCN36XX_DXE_INT_CH1_MASK 0x00000002
+ #define WCN36XX_DXE_INT_CH0_MASK 0x00000001
+#define WCN36XX_DXE_0_INT_CLR (WCN36XX_DXE_MEM_REG + 0x30)
+#define WCN36XX_DXE_0_INT_ED_CLR (WCN36XX_DXE_MEM_REG + 0x34)
+#define WCN36XX_DXE_0_INT_DONE_CLR (WCN36XX_DXE_MEM_REG + 0x38)
+#define WCN36XX_DXE_0_INT_ERR_CLR (WCN36XX_DXE_MEM_REG + 0x3C)
+
+#define WCN36XX_DXE_0_CH0_STATUS (WCN36XX_DXE_MEM_REG + 0x404)
+#define WCN36XX_DXE_0_CH1_STATUS (WCN36XX_DXE_MEM_REG + 0x444)
+#define WCN36XX_DXE_0_CH2_STATUS (WCN36XX_DXE_MEM_REG + 0x484)
+#define WCN36XX_DXE_0_CH3_STATUS (WCN36XX_DXE_MEM_REG + 0x4C4)
+#define WCN36XX_DXE_0_CH4_STATUS (WCN36XX_DXE_MEM_REG + 0x504)
+
+#define WCN36XX_DXE_REG_RESET 0x5c89
+
+/* Temporary BMU Workqueue 4 */
+#define WCN36XX_DXE_BMU_WQ_RX_LOW 0xB
+#define WCN36XX_DXE_BMU_WQ_RX_HIGH 0x4
+/* DMA channel offset */
+#define WCN36XX_DXE_TX_LOW_OFFSET 0x400
+#define WCN36XX_DXE_TX_HIGH_OFFSET 0x500
+#define WCN36XX_DXE_RX_LOW_OFFSET 0x440
+#define WCN36XX_DXE_RX_HIGH_OFFSET 0x4C0
+
+/* Address of the next DXE descriptor */
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR 0x001C
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+
+/* DXE Descriptor source address */
+#define WCN36XX_DXE_CH_SRC_ADDR 0x000C
+#define WCN36XX_DXE_CH_SRC_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_SRC_ADDR)
+#define WCN36XX_DXE_CH_SRC_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_SRC_ADDR)
+
+/* DXE Descriptor address destination address */
+#define WCN36XX_DXE_CH_DEST_ADDR 0x0014
+#define WCN36XX_DXE_CH_DEST_ADDR_TX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_DEST_ADDR)
+#define WCN36XX_DXE_CH_DEST_ADDR_TX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_DEST_ADDR)
+#define WCN36XX_DXE_CH_DEST_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_DEST_ADDR)
+#define WCN36XX_DXE_CH_DEST_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_DEST_ADDR)
+
+/* Interrupt status */
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR 0x0004
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_STATUS_REG_ADDR)
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_STATUS_REG_ADDR)
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_STATUS_REG_ADDR)
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_STATUS_REG_ADDR)
+
+
+/* DXE default control register */
+#define WCN36XX_DXE_REG_CTL_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET)
+#define WCN36XX_DXE_REG_CTL_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET)
+#define WCN36XX_DXE_REG_CTL_TX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_HIGH_OFFSET)
+#define WCN36XX_DXE_REG_CTL_TX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_LOW_OFFSET)
+
+#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400
+#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200
+
+
+/* Interrupt control channel mask */
+#define WCN36XX_INT_MASK_CHAN_TX_L 0x00000001
+#define WCN36XX_INT_MASK_CHAN_RX_L 0x00000002
+#define WCN36XX_INT_MASK_CHAN_RX_H 0x00000008
+#define WCN36XX_INT_MASK_CHAN_TX_H 0x00000010
+
+#define WCN36XX_BD_CHUNK_SIZE 128
+
+#define WCN36XX_PKT_SIZE 0xF20
+enum wcn36xx_dxe_ch_type {
+ WCN36XX_DXE_CH_TX_L,
+ WCN36XX_DXE_CH_TX_H,
+ WCN36XX_DXE_CH_RX_L,
+ WCN36XX_DXE_CH_RX_H
+};
+
+/* amount of descriptors per channel */
+enum wcn36xx_dxe_ch_desc_num {
+ WCN36XX_DXE_CH_DESC_NUMB_TX_L = 128,
+ WCN36XX_DXE_CH_DESC_NUMB_TX_H = 10,
+ WCN36XX_DXE_CH_DESC_NUMB_RX_L = 512,
+ WCN36XX_DXE_CH_DESC_NUMB_RX_H = 40
+};
+
+/**
+ * struct wcn36xx_dxe_desc - describes descriptor of one DXE buffer
+ *
+ * @ctrl: is a union that consists of following bits:
+ * union {
+ * u32 valid :1; //0 = DMA stop, 1 = DMA continue with this
+ * //descriptor
+ * u32 transfer_type :2; //0 = Host to Host space
+ * u32 eop :1; //End of Packet
+ * u32 bd_handling :1; //if transferType = Host to BMU, then 0
+ * // means first 128 bytes contain BD, and 1
+ * // means create new empty BD
+ * u32 siq :1; // SIQ
+ * u32 diq :1; // DIQ
+ * u32 pdu_rel :1; //0 = don't release BD and PDUs when done,
+ * // 1 = release them
+ * u32 bthld_sel :4; //BMU Threshold Select
+ * u32 prio :3; //Specifies the priority level to use for
+ * // the transfer
+ * u32 stop_channel :1; //1 = DMA stops processing further, channel
+ * //requires re-enabling after this
+ * u32 intr :1; //Interrupt on Descriptor Done
+ * u32 rsvd :1; //reserved
+ * u32 size :14;//14 bits used - ignored for BMU transfers,
+ * //only used for host to host transfers?
+ * } ctrl;
+ */
+struct wcn36xx_dxe_desc {
+ u32 ctrl;
+ u32 fr_len;
+
+ u32 src_addr_l;
+ u32 dst_addr_l;
+ u32 phy_next_l;
+ u32 src_addr_h;
+ u32 dst_addr_h;
+ u32 phy_next_h;
+} __packed;
+
+/* DXE Control block */
+struct wcn36xx_dxe_ctl {
+ struct wcn36xx_dxe_ctl *next;
+ struct wcn36xx_dxe_desc *desc;
+ unsigned int desc_phy_addr;
+ int ctl_blk_order;
+ struct sk_buff *skb;
+ spinlock_t skb_lock;
+ void *bd_cpu_addr;
+ dma_addr_t bd_phy_addr;
+};
+
+struct wcn36xx_dxe_ch {
+ enum wcn36xx_dxe_ch_type ch_type;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ enum wcn36xx_dxe_ch_desc_num desc_num;
+ /* DXE control block ring */
+ struct wcn36xx_dxe_ctl *head_blk_ctl;
+ struct wcn36xx_dxe_ctl *tail_blk_ctl;
+
+ /* DXE channel specific configs */
+ u32 dxe_wq;
+ u32 ctrl_bd;
+ u32 ctrl_skb;
+ u32 reg_ctrl;
+ u32 def_ctrl;
+};
+
+/* Memory Pool for BD headers */
+struct wcn36xx_dxe_mem_pool {
+ int chunk_size;
+ void *virt_addr;
+ dma_addr_t phy_addr;
+};
+
+struct wcn36xx_vif;
+int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn);
+void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn);
+void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn);
+int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn);
+void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn);
+int wcn36xx_dxe_init(struct wcn36xx *wcn);
+void wcn36xx_dxe_deinit(struct wcn36xx *wcn);
+int wcn36xx_dxe_init_channels(struct wcn36xx *wcn);
+int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
+ struct wcn36xx_vif *vif_priv,
+ struct sk_buff *skb,
+ bool is_low);
+void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status);
+void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low);
+#endif /* _DXE_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
new file mode 100644
index 000000000000..c02dbc618724
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -0,0 +1,4657 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HAL_H_
+#define _HAL_H_
+
+/*---------------------------------------------------------------------------
+ API VERSIONING INFORMATION
+
+ The RIVA API is versioned as MAJOR.MINOR.VERSION.REVISION
+ The MAJOR is incremented for major product/architecture changes
+ (and then MINOR/VERSION/REVISION are zeroed)
+ The MINOR is incremented for minor product/architecture changes
+ (and then VERSION/REVISION are zeroed)
+ The VERSION is incremented if a significant API change occurs
+ (and then REVISION is zeroed)
+ The REVISION is incremented if an insignificant API change occurs
+ or if a new API is added
+ All values are in the range 0..255 (ie they are 8-bit values)
+ ---------------------------------------------------------------------------*/
+#define WCN36XX_HAL_VER_MAJOR 1
+#define WCN36XX_HAL_VER_MINOR 4
+#define WCN36XX_HAL_VER_VERSION 1
+#define WCN36XX_HAL_VER_REVISION 2
+
+/* This is to force compiler to use the maximum of an int ( 4 bytes ) */
+#define WCN36XX_HAL_MAX_ENUM_SIZE 0x7FFFFFFF
+#define WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE 0x7FFF
+
+/* Max no. of transmit categories */
+#define STACFG_MAX_TC 8
+
+/* The maximum value of access category */
+#define WCN36XX_HAL_MAX_AC 4
+
+#define WCN36XX_HAL_IPV4_ADDR_LEN 4
+
+#define WALN_HAL_STA_INVALID_IDX 0xFF
+#define WCN36XX_HAL_BSS_INVALID_IDX 0xFF
+
+/* Default Beacon template size */
+#define BEACON_TEMPLATE_SIZE 0x180
+
+/* Param Change Bitmap sent to HAL */
+#define PARAM_BCN_INTERVAL_CHANGED (1 << 0)
+#define PARAM_SHORT_PREAMBLE_CHANGED (1 << 1)
+#define PARAM_SHORT_SLOT_TIME_CHANGED (1 << 2)
+#define PARAM_llACOEXIST_CHANGED (1 << 3)
+#define PARAM_llBCOEXIST_CHANGED (1 << 4)
+#define PARAM_llGCOEXIST_CHANGED (1 << 5)
+#define PARAM_HT20MHZCOEXIST_CHANGED (1<<6)
+#define PARAM_NON_GF_DEVICES_PRESENT_CHANGED (1<<7)
+#define PARAM_RIFS_MODE_CHANGED (1<<8)
+#define PARAM_LSIG_TXOP_FULL_SUPPORT_CHANGED (1<<9)
+#define PARAM_OBSS_MODE_CHANGED (1<<10)
+#define PARAM_BEACON_UPDATE_MASK \
+ (PARAM_BCN_INTERVAL_CHANGED | \
+ PARAM_SHORT_PREAMBLE_CHANGED | \
+ PARAM_SHORT_SLOT_TIME_CHANGED | \
+ PARAM_llACOEXIST_CHANGED | \
+ PARAM_llBCOEXIST_CHANGED | \
+ PARAM_llGCOEXIST_CHANGED | \
+ PARAM_HT20MHZCOEXIST_CHANGED | \
+ PARAM_NON_GF_DEVICES_PRESENT_CHANGED | \
+ PARAM_RIFS_MODE_CHANGED | \
+ PARAM_LSIG_TXOP_FULL_SUPPORT_CHANGED | \
+ PARAM_OBSS_MODE_CHANGED)
+
+/* dump command response Buffer size */
+#define DUMPCMD_RSP_BUFFER 100
+
+/* version string max length (including NULL) */
+#define WCN36XX_HAL_VERSION_LENGTH 64
+
+/* message types for messages exchanged between WDI and HAL */
+enum wcn36xx_hal_host_msg_type {
+ /* Init/De-Init */
+ WCN36XX_HAL_START_REQ = 0,
+ WCN36XX_HAL_START_RSP = 1,
+ WCN36XX_HAL_STOP_REQ = 2,
+ WCN36XX_HAL_STOP_RSP = 3,
+
+ /* Scan */
+ WCN36XX_HAL_INIT_SCAN_REQ = 4,
+ WCN36XX_HAL_INIT_SCAN_RSP = 5,
+ WCN36XX_HAL_START_SCAN_REQ = 6,
+ WCN36XX_HAL_START_SCAN_RSP = 7,
+ WCN36XX_HAL_END_SCAN_REQ = 8,
+ WCN36XX_HAL_END_SCAN_RSP = 9,
+ WCN36XX_HAL_FINISH_SCAN_REQ = 10,
+ WCN36XX_HAL_FINISH_SCAN_RSP = 11,
+
+ /* HW STA configuration/deconfiguration */
+ WCN36XX_HAL_CONFIG_STA_REQ = 12,
+ WCN36XX_HAL_CONFIG_STA_RSP = 13,
+ WCN36XX_HAL_DELETE_STA_REQ = 14,
+ WCN36XX_HAL_DELETE_STA_RSP = 15,
+ WCN36XX_HAL_CONFIG_BSS_REQ = 16,
+ WCN36XX_HAL_CONFIG_BSS_RSP = 17,
+ WCN36XX_HAL_DELETE_BSS_REQ = 18,
+ WCN36XX_HAL_DELETE_BSS_RSP = 19,
+
+ /* Infra STA asscoiation */
+ WCN36XX_HAL_JOIN_REQ = 20,
+ WCN36XX_HAL_JOIN_RSP = 21,
+ WCN36XX_HAL_POST_ASSOC_REQ = 22,
+ WCN36XX_HAL_POST_ASSOC_RSP = 23,
+
+ /* Security */
+ WCN36XX_HAL_SET_BSSKEY_REQ = 24,
+ WCN36XX_HAL_SET_BSSKEY_RSP = 25,
+ WCN36XX_HAL_SET_STAKEY_REQ = 26,
+ WCN36XX_HAL_SET_STAKEY_RSP = 27,
+ WCN36XX_HAL_RMV_BSSKEY_REQ = 28,
+ WCN36XX_HAL_RMV_BSSKEY_RSP = 29,
+ WCN36XX_HAL_RMV_STAKEY_REQ = 30,
+ WCN36XX_HAL_RMV_STAKEY_RSP = 31,
+
+ /* Qos Related */
+ WCN36XX_HAL_ADD_TS_REQ = 32,
+ WCN36XX_HAL_ADD_TS_RSP = 33,
+ WCN36XX_HAL_DEL_TS_REQ = 34,
+ WCN36XX_HAL_DEL_TS_RSP = 35,
+ WCN36XX_HAL_UPD_EDCA_PARAMS_REQ = 36,
+ WCN36XX_HAL_UPD_EDCA_PARAMS_RSP = 37,
+ WCN36XX_HAL_ADD_BA_REQ = 38,
+ WCN36XX_HAL_ADD_BA_RSP = 39,
+ WCN36XX_HAL_DEL_BA_REQ = 40,
+ WCN36XX_HAL_DEL_BA_RSP = 41,
+
+ WCN36XX_HAL_CH_SWITCH_REQ = 42,
+ WCN36XX_HAL_CH_SWITCH_RSP = 43,
+ WCN36XX_HAL_SET_LINK_ST_REQ = 44,
+ WCN36XX_HAL_SET_LINK_ST_RSP = 45,
+ WCN36XX_HAL_GET_STATS_REQ = 46,
+ WCN36XX_HAL_GET_STATS_RSP = 47,
+ WCN36XX_HAL_UPDATE_CFG_REQ = 48,
+ WCN36XX_HAL_UPDATE_CFG_RSP = 49,
+
+ WCN36XX_HAL_MISSED_BEACON_IND = 50,
+ WCN36XX_HAL_UNKNOWN_ADDR2_FRAME_RX_IND = 51,
+ WCN36XX_HAL_MIC_FAILURE_IND = 52,
+ WCN36XX_HAL_FATAL_ERROR_IND = 53,
+ WCN36XX_HAL_SET_KEYDONE_MSG = 54,
+
+ /* NV Interface */
+ WCN36XX_HAL_DOWNLOAD_NV_REQ = 55,
+ WCN36XX_HAL_DOWNLOAD_NV_RSP = 56,
+
+ WCN36XX_HAL_ADD_BA_SESSION_REQ = 57,
+ WCN36XX_HAL_ADD_BA_SESSION_RSP = 58,
+ WCN36XX_HAL_TRIGGER_BA_REQ = 59,
+ WCN36XX_HAL_TRIGGER_BA_RSP = 60,
+ WCN36XX_HAL_UPDATE_BEACON_REQ = 61,
+ WCN36XX_HAL_UPDATE_BEACON_RSP = 62,
+ WCN36XX_HAL_SEND_BEACON_REQ = 63,
+ WCN36XX_HAL_SEND_BEACON_RSP = 64,
+
+ WCN36XX_HAL_SET_BCASTKEY_REQ = 65,
+ WCN36XX_HAL_SET_BCASTKEY_RSP = 66,
+ WCN36XX_HAL_DELETE_STA_CONTEXT_IND = 67,
+ WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ = 68,
+ WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_RSP = 69,
+
+ /* PTT interface support */
+ WCN36XX_HAL_PROCESS_PTT_REQ = 70,
+ WCN36XX_HAL_PROCESS_PTT_RSP = 71,
+
+ /* BTAMP related events */
+ WCN36XX_HAL_SIGNAL_BTAMP_EVENT_REQ = 72,
+ WCN36XX_HAL_SIGNAL_BTAMP_EVENT_RSP = 73,
+ WCN36XX_HAL_TL_HAL_FLUSH_AC_REQ = 74,
+ WCN36XX_HAL_TL_HAL_FLUSH_AC_RSP = 75,
+
+ WCN36XX_HAL_ENTER_IMPS_REQ = 76,
+ WCN36XX_HAL_EXIT_IMPS_REQ = 77,
+ WCN36XX_HAL_ENTER_BMPS_REQ = 78,
+ WCN36XX_HAL_EXIT_BMPS_REQ = 79,
+ WCN36XX_HAL_ENTER_UAPSD_REQ = 80,
+ WCN36XX_HAL_EXIT_UAPSD_REQ = 81,
+ WCN36XX_HAL_UPDATE_UAPSD_PARAM_REQ = 82,
+ WCN36XX_HAL_CONFIGURE_RXP_FILTER_REQ = 83,
+ WCN36XX_HAL_ADD_BCN_FILTER_REQ = 84,
+ WCN36XX_HAL_REM_BCN_FILTER_REQ = 85,
+ WCN36XX_HAL_ADD_WOWL_BCAST_PTRN = 86,
+ WCN36XX_HAL_DEL_WOWL_BCAST_PTRN = 87,
+ WCN36XX_HAL_ENTER_WOWL_REQ = 88,
+ WCN36XX_HAL_EXIT_WOWL_REQ = 89,
+ WCN36XX_HAL_HOST_OFFLOAD_REQ = 90,
+ WCN36XX_HAL_SET_RSSI_THRESH_REQ = 91,
+ WCN36XX_HAL_GET_RSSI_REQ = 92,
+ WCN36XX_HAL_SET_UAPSD_AC_PARAMS_REQ = 93,
+ WCN36XX_HAL_CONFIGURE_APPS_CPU_WAKEUP_STATE_REQ = 94,
+
+ WCN36XX_HAL_ENTER_IMPS_RSP = 95,
+ WCN36XX_HAL_EXIT_IMPS_RSP = 96,
+ WCN36XX_HAL_ENTER_BMPS_RSP = 97,
+ WCN36XX_HAL_EXIT_BMPS_RSP = 98,
+ WCN36XX_HAL_ENTER_UAPSD_RSP = 99,
+ WCN36XX_HAL_EXIT_UAPSD_RSP = 100,
+ WCN36XX_HAL_SET_UAPSD_AC_PARAMS_RSP = 101,
+ WCN36XX_HAL_UPDATE_UAPSD_PARAM_RSP = 102,
+ WCN36XX_HAL_CONFIGURE_RXP_FILTER_RSP = 103,
+ WCN36XX_HAL_ADD_BCN_FILTER_RSP = 104,
+ WCN36XX_HAL_REM_BCN_FILTER_RSP = 105,
+ WCN36XX_HAL_SET_RSSI_THRESH_RSP = 106,
+ WCN36XX_HAL_HOST_OFFLOAD_RSP = 107,
+ WCN36XX_HAL_ADD_WOWL_BCAST_PTRN_RSP = 108,
+ WCN36XX_HAL_DEL_WOWL_BCAST_PTRN_RSP = 109,
+ WCN36XX_HAL_ENTER_WOWL_RSP = 110,
+ WCN36XX_HAL_EXIT_WOWL_RSP = 111,
+ WCN36XX_HAL_RSSI_NOTIFICATION_IND = 112,
+ WCN36XX_HAL_GET_RSSI_RSP = 113,
+ WCN36XX_HAL_CONFIGURE_APPS_CPU_WAKEUP_STATE_RSP = 114,
+
+ /* 11k related events */
+ WCN36XX_HAL_SET_MAX_TX_POWER_REQ = 115,
+ WCN36XX_HAL_SET_MAX_TX_POWER_RSP = 116,
+
+ /* 11R related msgs */
+ WCN36XX_HAL_AGGR_ADD_TS_REQ = 117,
+ WCN36XX_HAL_AGGR_ADD_TS_RSP = 118,
+
+ /* P2P WLAN_FEATURE_P2P */
+ WCN36XX_HAL_SET_P2P_GONOA_REQ = 119,
+ WCN36XX_HAL_SET_P2P_GONOA_RSP = 120,
+
+ /* WLAN Dump commands */
+ WCN36XX_HAL_DUMP_COMMAND_REQ = 121,
+ WCN36XX_HAL_DUMP_COMMAND_RSP = 122,
+
+ /* OEM_DATA FEATURE SUPPORT */
+ WCN36XX_HAL_START_OEM_DATA_REQ = 123,
+ WCN36XX_HAL_START_OEM_DATA_RSP = 124,
+
+ /* ADD SELF STA REQ and RSP */
+ WCN36XX_HAL_ADD_STA_SELF_REQ = 125,
+ WCN36XX_HAL_ADD_STA_SELF_RSP = 126,
+
+ /* DEL SELF STA SUPPORT */
+ WCN36XX_HAL_DEL_STA_SELF_REQ = 127,
+ WCN36XX_HAL_DEL_STA_SELF_RSP = 128,
+
+ /* Coex Indication */
+ WCN36XX_HAL_COEX_IND = 129,
+
+ /* Tx Complete Indication */
+ WCN36XX_HAL_OTA_TX_COMPL_IND = 130,
+
+ /* Host Suspend/resume messages */
+ WCN36XX_HAL_HOST_SUSPEND_IND = 131,
+ WCN36XX_HAL_HOST_RESUME_REQ = 132,
+ WCN36XX_HAL_HOST_RESUME_RSP = 133,
+
+ WCN36XX_HAL_SET_TX_POWER_REQ = 134,
+ WCN36XX_HAL_SET_TX_POWER_RSP = 135,
+ WCN36XX_HAL_GET_TX_POWER_REQ = 136,
+ WCN36XX_HAL_GET_TX_POWER_RSP = 137,
+
+ WCN36XX_HAL_P2P_NOA_ATTR_IND = 138,
+
+ WCN36XX_HAL_ENABLE_RADAR_DETECT_REQ = 139,
+ WCN36XX_HAL_ENABLE_RADAR_DETECT_RSP = 140,
+ WCN36XX_HAL_GET_TPC_REPORT_REQ = 141,
+ WCN36XX_HAL_GET_TPC_REPORT_RSP = 142,
+ WCN36XX_HAL_RADAR_DETECT_IND = 143,
+ WCN36XX_HAL_RADAR_DETECT_INTR_IND = 144,
+ WCN36XX_HAL_KEEP_ALIVE_REQ = 145,
+ WCN36XX_HAL_KEEP_ALIVE_RSP = 146,
+
+ /* PNO messages */
+ WCN36XX_HAL_SET_PREF_NETWORK_REQ = 147,
+ WCN36XX_HAL_SET_PREF_NETWORK_RSP = 148,
+ WCN36XX_HAL_SET_RSSI_FILTER_REQ = 149,
+ WCN36XX_HAL_SET_RSSI_FILTER_RSP = 150,
+ WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ = 151,
+ WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP = 152,
+ WCN36XX_HAL_PREF_NETW_FOUND_IND = 153,
+
+ WCN36XX_HAL_SET_TX_PER_TRACKING_REQ = 154,
+ WCN36XX_HAL_SET_TX_PER_TRACKING_RSP = 155,
+ WCN36XX_HAL_TX_PER_HIT_IND = 156,
+
+ WCN36XX_HAL_8023_MULTICAST_LIST_REQ = 157,
+ WCN36XX_HAL_8023_MULTICAST_LIST_RSP = 158,
+
+ WCN36XX_HAL_SET_PACKET_FILTER_REQ = 159,
+ WCN36XX_HAL_SET_PACKET_FILTER_RSP = 160,
+ WCN36XX_HAL_PACKET_FILTER_MATCH_COUNT_REQ = 161,
+ WCN36XX_HAL_PACKET_FILTER_MATCH_COUNT_RSP = 162,
+ WCN36XX_HAL_CLEAR_PACKET_FILTER_REQ = 163,
+ WCN36XX_HAL_CLEAR_PACKET_FILTER_RSP = 164,
+
+ /*
+ * This is temp fix. Should be removed once Host and Riva code is
+ * in sync.
+ */
+ WCN36XX_HAL_INIT_SCAN_CON_REQ = 165,
+
+ WCN36XX_HAL_SET_POWER_PARAMS_REQ = 166,
+ WCN36XX_HAL_SET_POWER_PARAMS_RSP = 167,
+
+ WCN36XX_HAL_TSM_STATS_REQ = 168,
+ WCN36XX_HAL_TSM_STATS_RSP = 169,
+
+ /* wake reason indication (WOW) */
+ WCN36XX_HAL_WAKE_REASON_IND = 170,
+
+ /* GTK offload support */
+ WCN36XX_HAL_GTK_OFFLOAD_REQ = 171,
+ WCN36XX_HAL_GTK_OFFLOAD_RSP = 172,
+ WCN36XX_HAL_GTK_OFFLOAD_GETINFO_REQ = 173,
+ WCN36XX_HAL_GTK_OFFLOAD_GETINFO_RSP = 174,
+
+ WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ = 175,
+ WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP = 176,
+ WCN36XX_HAL_EXCLUDE_UNENCRYPTED_IND = 177,
+
+ WCN36XX_HAL_SET_THERMAL_MITIGATION_REQ = 178,
+ WCN36XX_HAL_SET_THERMAL_MITIGATION_RSP = 179,
+
+ WCN36XX_HAL_UPDATE_VHT_OP_MODE_REQ = 182,
+ WCN36XX_HAL_UPDATE_VHT_OP_MODE_RSP = 183,
+
+ WCN36XX_HAL_P2P_NOA_START_IND = 184,
+
+ WCN36XX_HAL_GET_ROAM_RSSI_REQ = 185,
+ WCN36XX_HAL_GET_ROAM_RSSI_RSP = 186,
+
+ WCN36XX_HAL_CLASS_B_STATS_IND = 187,
+ WCN36XX_HAL_DEL_BA_IND = 188,
+ WCN36XX_HAL_DHCP_START_IND = 189,
+ WCN36XX_HAL_DHCP_STOP_IND = 190,
+
+ WCN36XX_HAL_MSG_MAX = WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE
+};
+
+/* Enumeration for Version */
+enum wcn36xx_hal_host_msg_version {
+ WCN36XX_HAL_MSG_VERSION0 = 0,
+ WCN36XX_HAL_MSG_VERSION1 = 1,
+ /* define as 2 bytes data */
+ WCN36XX_HAL_MSG_WCNSS_CTRL_VERSION = 0x7FFF,
+ WCN36XX_HAL_MSG_VERSION_MAX_FIELD = WCN36XX_HAL_MSG_WCNSS_CTRL_VERSION
+};
+
+enum driver_type {
+ DRIVER_TYPE_PRODUCTION = 0,
+ DRIVER_TYPE_MFG = 1,
+ DRIVER_TYPE_DVT = 2,
+ DRIVER_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_stop_type {
+ HAL_STOP_TYPE_SYS_RESET,
+ HAL_STOP_TYPE_SYS_DEEP_SLEEP,
+ HAL_STOP_TYPE_RF_KILL,
+ HAL_STOP_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_sys_mode {
+ HAL_SYS_MODE_NORMAL,
+ HAL_SYS_MODE_LEARN,
+ HAL_SYS_MODE_SCAN,
+ HAL_SYS_MODE_PROMISC,
+ HAL_SYS_MODE_SUSPEND_LINK,
+ HAL_SYS_MODE_ROAM_SCAN,
+ HAL_SYS_MODE_ROAM_SUSPEND_LINK,
+ HAL_SYS_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum phy_chan_bond_state {
+ /* 20MHz IF bandwidth centered on IF carrier */
+ PHY_SINGLE_CHANNEL_CENTERED = 0,
+
+ /* 40MHz IF bandwidth with lower 20MHz supporting the primary channel */
+ PHY_DOUBLE_CHANNEL_LOW_PRIMARY = 1,
+
+ /* 40MHz IF bandwidth centered on IF carrier */
+ PHY_DOUBLE_CHANNEL_CENTERED = 2,
+
+ /* 40MHz IF bandwidth with higher 20MHz supporting the primary ch */
+ PHY_DOUBLE_CHANNEL_HIGH_PRIMARY = 3,
+
+ /* 20/40MHZ offset LOW 40/80MHZ offset CENTERED */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_CENTERED = 4,
+
+ /* 20/40MHZ offset CENTERED 40/80MHZ offset CENTERED */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_CENTERED_40MHZ_CENTERED = 5,
+
+ /* 20/40MHZ offset HIGH 40/80MHZ offset CENTERED */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_CENTERED = 6,
+
+ /* 20/40MHZ offset LOW 40/80MHZ offset LOW */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW = 7,
+
+ /* 20/40MHZ offset HIGH 40/80MHZ offset LOW */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW = 8,
+
+ /* 20/40MHZ offset LOW 40/80MHZ offset HIGH */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH = 9,
+
+ /* 20/40MHZ offset-HIGH 40/80MHZ offset HIGH */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH = 10,
+
+ PHY_CHANNEL_BONDING_STATE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Spatial Multiplexing(SM) Power Save mode */
+enum wcn36xx_hal_ht_mimo_state {
+ /* Static SM Power Save mode */
+ WCN36XX_HAL_HT_MIMO_PS_STATIC = 0,
+
+ /* Dynamic SM Power Save mode */
+ WCN36XX_HAL_HT_MIMO_PS_DYNAMIC = 1,
+
+ /* reserved */
+ WCN36XX_HAL_HT_MIMO_PS_NA = 2,
+
+ /* SM Power Save disabled */
+ WCN36XX_HAL_HT_MIMO_PS_NO_LIMIT = 3,
+
+ WCN36XX_HAL_HT_MIMO_PS_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* each station added has a rate mode which specifies the sta attributes */
+enum sta_rate_mode {
+ STA_TAURUS = 0,
+ STA_TITAN,
+ STA_POLARIS,
+ STA_11b,
+ STA_11bg,
+ STA_11a,
+ STA_11n,
+ STA_11ac,
+ STA_INVALID_RATE_MODE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* 1,2,5.5,11 */
+#define WCN36XX_HAL_NUM_DSSS_RATES 4
+
+/* 6,9,12,18,24,36,48,54 */
+#define WCN36XX_HAL_NUM_OFDM_RATES 8
+
+/* 72,96,108 */
+#define WCN36XX_HAL_NUM_POLARIS_RATES 3
+
+#define WCN36XX_HAL_MAC_MAX_SUPPORTED_MCS_SET 16
+
+enum wcn36xx_hal_bss_type {
+ WCN36XX_HAL_INFRASTRUCTURE_MODE,
+
+ /* Added for softAP support */
+ WCN36XX_HAL_INFRA_AP_MODE,
+
+ WCN36XX_HAL_IBSS_MODE,
+
+ /* Added for BT-AMP support */
+ WCN36XX_HAL_BTAMP_STA_MODE,
+
+ /* Added for BT-AMP support */
+ WCN36XX_HAL_BTAMP_AP_MODE,
+
+ WCN36XX_HAL_AUTO_MODE,
+
+ WCN36XX_HAL_DONOT_USE_BSS_TYPE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_nw_type {
+ WCN36XX_HAL_11A_NW_TYPE,
+ WCN36XX_HAL_11B_NW_TYPE,
+ WCN36XX_HAL_11G_NW_TYPE,
+ WCN36XX_HAL_11N_NW_TYPE,
+ WCN36XX_HAL_DONOT_USE_NW_TYPE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+#define WCN36XX_HAL_MAC_RATESET_EID_MAX 12
+
+enum wcn36xx_hal_ht_operating_mode {
+ /* No Protection */
+ WCN36XX_HAL_HT_OP_MODE_PURE,
+
+ /* Overlap Legacy device present, protection is optional */
+ WCN36XX_HAL_HT_OP_MODE_OVERLAP_LEGACY,
+
+ /* No legacy device, but 20 MHz HT present */
+ WCN36XX_HAL_HT_OP_MODE_NO_LEGACY_20MHZ_HT,
+
+ /* Protection is required */
+ WCN36XX_HAL_HT_OP_MODE_MIXED,
+
+ WCN36XX_HAL_HT_OP_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Encryption type enum used with peer */
+enum ani_ed_type {
+ WCN36XX_HAL_ED_NONE,
+ WCN36XX_HAL_ED_WEP40,
+ WCN36XX_HAL_ED_WEP104,
+ WCN36XX_HAL_ED_TKIP,
+ WCN36XX_HAL_ED_CCMP,
+ WCN36XX_HAL_ED_WPI,
+ WCN36XX_HAL_ED_AES_128_CMAC,
+ WCN36XX_HAL_ED_NOT_IMPLEMENTED = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+#define WLAN_MAX_KEY_RSC_LEN 16
+#define WLAN_WAPI_KEY_RSC_LEN 16
+
+/* MAX key length when ULA is used */
+#define WCN36XX_HAL_MAC_MAX_KEY_LENGTH 32
+#define WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS 4
+
+/*
+ * Enum to specify whether key is used for TX only, RX only or both.
+ */
+enum ani_key_direction {
+ WCN36XX_HAL_TX_ONLY,
+ WCN36XX_HAL_RX_ONLY,
+ WCN36XX_HAL_TX_RX,
+ WCN36XX_HAL_TX_DEFAULT,
+ WCN36XX_HAL_DONOT_USE_KEY_DIRECTION = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum ani_wep_type {
+ WCN36XX_HAL_WEP_STATIC,
+ WCN36XX_HAL_WEP_DYNAMIC,
+ WCN36XX_HAL_WEP_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_link_state {
+
+ WCN36XX_HAL_LINK_IDLE_STATE = 0,
+ WCN36XX_HAL_LINK_PREASSOC_STATE = 1,
+ WCN36XX_HAL_LINK_POSTASSOC_STATE = 2,
+ WCN36XX_HAL_LINK_AP_STATE = 3,
+ WCN36XX_HAL_LINK_IBSS_STATE = 4,
+
+ /* BT-AMP Case */
+ WCN36XX_HAL_LINK_BTAMP_PREASSOC_STATE = 5,
+ WCN36XX_HAL_LINK_BTAMP_POSTASSOC_STATE = 6,
+ WCN36XX_HAL_LINK_BTAMP_AP_STATE = 7,
+ WCN36XX_HAL_LINK_BTAMP_STA_STATE = 8,
+
+ /* Reserved for HAL Internal Use */
+ WCN36XX_HAL_LINK_LEARN_STATE = 9,
+ WCN36XX_HAL_LINK_SCAN_STATE = 10,
+ WCN36XX_HAL_LINK_FINISH_SCAN_STATE = 11,
+ WCN36XX_HAL_LINK_INIT_CAL_STATE = 12,
+ WCN36XX_HAL_LINK_FINISH_CAL_STATE = 13,
+ WCN36XX_HAL_LINK_LISTEN_STATE = 14,
+
+ WCN36XX_HAL_LINK_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_stats_mask {
+ HAL_SUMMARY_STATS_INFO = 0x00000001,
+ HAL_GLOBAL_CLASS_A_STATS_INFO = 0x00000002,
+ HAL_GLOBAL_CLASS_B_STATS_INFO = 0x00000004,
+ HAL_GLOBAL_CLASS_C_STATS_INFO = 0x00000008,
+ HAL_GLOBAL_CLASS_D_STATS_INFO = 0x00000010,
+ HAL_PER_STA_STATS_INFO = 0x00000020
+};
+
+/* BT-AMP events type */
+enum bt_amp_event_type {
+ BTAMP_EVENT_CONNECTION_START,
+ BTAMP_EVENT_CONNECTION_STOP,
+ BTAMP_EVENT_CONNECTION_TERMINATED,
+
+ /* This and beyond are invalid values */
+ BTAMP_EVENT_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
+};
+
+/* PE Statistics */
+enum pe_stats_mask {
+ PE_SUMMARY_STATS_INFO = 0x00000001,
+ PE_GLOBAL_CLASS_A_STATS_INFO = 0x00000002,
+ PE_GLOBAL_CLASS_B_STATS_INFO = 0x00000004,
+ PE_GLOBAL_CLASS_C_STATS_INFO = 0x00000008,
+ PE_GLOBAL_CLASS_D_STATS_INFO = 0x00000010,
+ PE_PER_STA_STATS_INFO = 0x00000020,
+
+ /* This and beyond are invalid values */
+ PE_STATS_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/*
+ * Configuration Parameter IDs
+ */
+#define WCN36XX_HAL_CFG_STA_ID 0
+#define WCN36XX_HAL_CFG_CURRENT_TX_ANTENNA 1
+#define WCN36XX_HAL_CFG_CURRENT_RX_ANTENNA 2
+#define WCN36XX_HAL_CFG_LOW_GAIN_OVERRIDE 3
+#define WCN36XX_HAL_CFG_POWER_STATE_PER_CHAIN 4
+#define WCN36XX_HAL_CFG_CAL_PERIOD 5
+#define WCN36XX_HAL_CFG_CAL_CONTROL 6
+#define WCN36XX_HAL_CFG_PROXIMITY 7
+#define WCN36XX_HAL_CFG_NETWORK_DENSITY 8
+#define WCN36XX_HAL_CFG_MAX_MEDIUM_TIME 9
+#define WCN36XX_HAL_CFG_MAX_MPDUS_IN_AMPDU 10
+#define WCN36XX_HAL_CFG_RTS_THRESHOLD 11
+#define WCN36XX_HAL_CFG_SHORT_RETRY_LIMIT 12
+#define WCN36XX_HAL_CFG_LONG_RETRY_LIMIT 13
+#define WCN36XX_HAL_CFG_FRAGMENTATION_THRESHOLD 14
+#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_ZERO 15
+#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_ONE 16
+#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_TWO 17
+#define WCN36XX_HAL_CFG_FIXED_RATE 18
+#define WCN36XX_HAL_CFG_RETRYRATE_POLICY 19
+#define WCN36XX_HAL_CFG_RETRYRATE_SECONDARY 20
+#define WCN36XX_HAL_CFG_RETRYRATE_TERTIARY 21
+#define WCN36XX_HAL_CFG_FORCE_POLICY_PROTECTION 22
+#define WCN36XX_HAL_CFG_FIXED_RATE_MULTICAST_24GHZ 23
+#define WCN36XX_HAL_CFG_FIXED_RATE_MULTICAST_5GHZ 24
+#define WCN36XX_HAL_CFG_DEFAULT_RATE_INDEX_24GHZ 25
+#define WCN36XX_HAL_CFG_DEFAULT_RATE_INDEX_5GHZ 26
+#define WCN36XX_HAL_CFG_MAX_BA_SESSIONS 27
+#define WCN36XX_HAL_CFG_PS_DATA_INACTIVITY_TIMEOUT 28
+#define WCN36XX_HAL_CFG_PS_ENABLE_BCN_FILTER 29
+#define WCN36XX_HAL_CFG_PS_ENABLE_RSSI_MONITOR 30
+#define WCN36XX_HAL_CFG_NUM_BEACON_PER_RSSI_AVERAGE 31
+#define WCN36XX_HAL_CFG_STATS_PERIOD 32
+#define WCN36XX_HAL_CFG_CFP_MAX_DURATION 33
+#define WCN36XX_HAL_CFG_FRAME_TRANS_ENABLED 34
+#define WCN36XX_HAL_CFG_DTIM_PERIOD 35
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACBK 36
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACBE 37
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACVO 38
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACVI 39
+#define WCN36XX_HAL_CFG_BA_THRESHOLD_HIGH 40
+#define WCN36XX_HAL_CFG_MAX_BA_BUFFERS 41
+#define WCN36XX_HAL_CFG_RPE_POLLING_THRESHOLD 42
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC0_REG 43
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC1_REG 44
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC2_REG 45
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC3_REG 46
+#define WCN36XX_HAL_CFG_NO_OF_ONCHIP_REORDER_SESSIONS 47
+#define WCN36XX_HAL_CFG_PS_LISTEN_INTERVAL 48
+#define WCN36XX_HAL_CFG_PS_HEART_BEAT_THRESHOLD 49
+#define WCN36XX_HAL_CFG_PS_NTH_BEACON_FILTER 50
+#define WCN36XX_HAL_CFG_PS_MAX_PS_POLL 51
+#define WCN36XX_HAL_CFG_PS_MIN_RSSI_THRESHOLD 52
+#define WCN36XX_HAL_CFG_PS_RSSI_FILTER_PERIOD 53
+#define WCN36XX_HAL_CFG_PS_BROADCAST_FRAME_FILTER_ENABLE 54
+#define WCN36XX_HAL_CFG_PS_IGNORE_DTIM 55
+#define WCN36XX_HAL_CFG_PS_ENABLE_BCN_EARLY_TERM 56
+#define WCN36XX_HAL_CFG_DYNAMIC_PS_POLL_VALUE 57
+#define WCN36XX_HAL_CFG_PS_NULLDATA_AP_RESP_TIMEOUT 58
+#define WCN36XX_HAL_CFG_TELE_BCN_WAKEUP_EN 59
+#define WCN36XX_HAL_CFG_TELE_BCN_TRANS_LI 60
+#define WCN36XX_HAL_CFG_TELE_BCN_TRANS_LI_IDLE_BCNS 61
+#define WCN36XX_HAL_CFG_TELE_BCN_MAX_LI 62
+#define WCN36XX_HAL_CFG_TELE_BCN_MAX_LI_IDLE_BCNS 63
+#define WCN36XX_HAL_CFG_TX_PWR_CTRL_ENABLE 64
+#define WCN36XX_HAL_CFG_VALID_RADAR_CHANNEL_LIST 65
+#define WCN36XX_HAL_CFG_TX_POWER_24_20 66
+#define WCN36XX_HAL_CFG_TX_POWER_24_40 67
+#define WCN36XX_HAL_CFG_TX_POWER_50_20 68
+#define WCN36XX_HAL_CFG_TX_POWER_50_40 69
+#define WCN36XX_HAL_CFG_MCAST_BCAST_FILTER_SETTING 70
+#define WCN36XX_HAL_CFG_BCN_EARLY_TERM_WAKEUP_INTERVAL 71
+#define WCN36XX_HAL_CFG_MAX_TX_POWER_2_4 72
+#define WCN36XX_HAL_CFG_MAX_TX_POWER_5 73
+#define WCN36XX_HAL_CFG_INFRA_STA_KEEP_ALIVE_PERIOD 74
+#define WCN36XX_HAL_CFG_ENABLE_CLOSE_LOOP 75
+#define WCN36XX_HAL_CFG_BTC_EXECUTION_MODE 76
+#define WCN36XX_HAL_CFG_BTC_DHCP_BT_SLOTS_TO_BLOCK 77
+#define WCN36XX_HAL_CFG_BTC_A2DP_DHCP_BT_SUB_INTERVALS 78
+#define WCN36XX_HAL_CFG_PS_TX_INACTIVITY_TIMEOUT 79
+#define WCN36XX_HAL_CFG_WCNSS_API_VERSION 80
+#define WCN36XX_HAL_CFG_AP_KEEPALIVE_TIMEOUT 81
+#define WCN36XX_HAL_CFG_GO_KEEPALIVE_TIMEOUT 82
+#define WCN36XX_HAL_CFG_ENABLE_MC_ADDR_LIST 83
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_INQ_BT 84
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_PAGE_BT 85
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_CONN_BT 86
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_LE_BT 87
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_INQ_WLAN 88
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_PAGE_WLAN 89
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_CONN_WLAN 90
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_LE_WLAN 91
+#define WCN36XX_HAL_CFG_BTC_DYN_MAX_LEN_BT 92
+#define WCN36XX_HAL_CFG_BTC_DYN_MAX_LEN_WLAN 93
+#define WCN36XX_HAL_CFG_BTC_MAX_SCO_BLOCK_PERC 94
+#define WCN36XX_HAL_CFG_BTC_DHCP_PROT_ON_A2DP 95
+#define WCN36XX_HAL_CFG_BTC_DHCP_PROT_ON_SCO 96
+#define WCN36XX_HAL_CFG_ENABLE_UNICAST_FILTER 97
+#define WCN36XX_HAL_CFG_MAX_ASSOC_LIMIT 98
+#define WCN36XX_HAL_CFG_ENABLE_LPWR_IMG_TRANSITION 99
+#define WCN36XX_HAL_CFG_ENABLE_MCC_ADAPTIVE_SCHEDULER 100
+#define WCN36XX_HAL_CFG_ENABLE_DETECT_PS_SUPPORT 101
+#define WCN36XX_HAL_CFG_AP_LINK_MONITOR_TIMEOUT 102
+#define WCN36XX_HAL_CFG_BTC_DWELL_TIME_MULTIPLIER 103
+#define WCN36XX_HAL_CFG_ENABLE_TDLS_OXYGEN_MODE 104
+#define WCN36XX_HAL_CFG_MAX_PARAMS 105
+
+/* Message definitons - All the messages below need to be packed */
+
+/* Definition for HAL API Version. */
+struct wcnss_wlan_version {
+ u8 revision;
+ u8 version;
+ u8 minor;
+ u8 major;
+} __packed;
+
+/* Definition for Encryption Keys */
+struct wcn36xx_hal_keys {
+ u8 id;
+
+ /* 0 for multicast */
+ u8 unicast;
+
+ enum ani_key_direction direction;
+
+ /* Usage is unknown */
+ u8 rsc[WLAN_MAX_KEY_RSC_LEN];
+
+ /* =1 for authenticator,=0 for supplicant */
+ u8 pae_role;
+
+ u16 length;
+ u8 key[WCN36XX_HAL_MAC_MAX_KEY_LENGTH];
+} __packed;
+
+/*
+ * set_sta_key_params Moving here since it is shared by
+ * configbss/setstakey msgs
+ */
+struct wcn36xx_hal_set_sta_key_params {
+ /* STA Index */
+ u16 sta_index;
+
+ /* Encryption Type used with peer */
+ enum ani_ed_type enc_type;
+
+ /* STATIC/DYNAMIC - valid only for WEP */
+ enum ani_wep_type wep_type;
+
+ /* Default WEP key, valid only for static WEP, must between 0 and 3. */
+ u8 def_wep_idx;
+
+ /* valid only for non-static WEP encyrptions */
+ struct wcn36xx_hal_keys key[WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS];
+
+ /*
+ * Control for Replay Count, 1= Single TID based replay count on Tx
+ * 0 = Per TID based replay count on TX
+ */
+ u8 single_tid_rc;
+
+} __packed;
+
+/* 4-byte control message header used by HAL*/
+struct wcn36xx_hal_msg_header {
+ enum wcn36xx_hal_host_msg_type msg_type:16;
+ enum wcn36xx_hal_host_msg_version msg_version:16;
+ u32 len;
+} __packed;
+
+/* Config format required by HAL for each CFG item*/
+struct wcn36xx_hal_cfg {
+ /* Cfg Id. The Id required by HAL is exported by HAL
+ * in shared header file between UMAC and HAL.*/
+ u16 id;
+
+ /* Length of the Cfg. This parameter is used to go to next cfg
+ * in the TLV format.*/
+ u16 len;
+
+ /* Padding bytes for unaligned address's */
+ u16 pad_bytes;
+
+ /* Reserve bytes for making cfgVal to align address */
+ u16 reserve;
+
+ /* Following the uCfgLen field there should be a 'uCfgLen' bytes
+ * containing the uCfgValue ; u8 uCfgValue[uCfgLen] */
+} __packed;
+
+struct wcn36xx_hal_mac_start_parameters {
+ /* Drive Type - Production or FTM etc */
+ enum driver_type type;
+
+ /* Length of the config buffer */
+ u32 len;
+
+ /* Following this there is a TLV formatted buffer of length
+ * "len" bytes containing all config values.
+ * The TLV is expected to be formatted like this:
+ * 0 15 31 31+CFG_LEN-1 length-1
+ * | CFG_ID | CFG_LEN | CFG_BODY | CFG_ID |......|
+ */
+} __packed;
+
+struct wcn36xx_hal_mac_start_req_msg {
+ /* config buffer must start in TLV format just here */
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_mac_start_parameters params;
+} __packed;
+
+struct wcn36xx_hal_mac_start_rsp_params {
+ /* success or failure */
+ u16 status;
+
+ /* Max number of STA supported by the device */
+ u8 stations;
+
+ /* Max number of BSS supported by the device */
+ u8 bssids;
+
+ /* API Version */
+ struct wcnss_wlan_version version;
+
+ /* CRM build information */
+ u8 crm_version[WCN36XX_HAL_VERSION_LENGTH];
+
+ /* hardware/chipset/misc version information */
+ u8 wlan_version[WCN36XX_HAL_VERSION_LENGTH];
+
+} __packed;
+
+struct wcn36xx_hal_mac_start_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_mac_start_rsp_params start_rsp_params;
+} __packed;
+
+struct wcn36xx_hal_mac_stop_req_params {
+ /* The reason for which the device is being stopped */
+ enum wcn36xx_hal_stop_type reason;
+
+} __packed;
+
+struct wcn36xx_hal_mac_stop_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_mac_stop_req_params stop_req_params;
+} __packed;
+
+struct wcn36xx_hal_mac_stop_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_update_cfg_req_msg {
+ /*
+ * Note: The length specified in tHalUpdateCfgReqMsg messages should be
+ * header.msgLen = sizeof(tHalUpdateCfgReqMsg) + uConfigBufferLen
+ */
+ struct wcn36xx_hal_msg_header header;
+
+ /* Length of the config buffer. Allows UMAC to update multiple CFGs */
+ u32 len;
+
+ /*
+ * Following this there is a TLV formatted buffer of length
+ * "uConfigBufferLen" bytes containing all config values.
+ * The TLV is expected to be formatted like this:
+ * 0 15 31 31+CFG_LEN-1 length-1
+ * | CFG_ID | CFG_LEN | CFG_BODY | CFG_ID |......|
+ */
+
+} __packed;
+
+struct wcn36xx_hal_update_cfg_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+} __packed;
+
+/* Frame control field format (2 bytes) */
+struct wcn36xx_hal_mac_frame_ctl {
+
+#ifndef ANI_LITTLE_BIT_ENDIAN
+
+ u8 subType:4;
+ u8 type:2;
+ u8 protVer:2;
+
+ u8 order:1;
+ u8 wep:1;
+ u8 moreData:1;
+ u8 powerMgmt:1;
+ u8 retry:1;
+ u8 moreFrag:1;
+ u8 fromDS:1;
+ u8 toDS:1;
+
+#else
+
+ u8 protVer:2;
+ u8 type:2;
+ u8 subType:4;
+
+ u8 toDS:1;
+ u8 fromDS:1;
+ u8 moreFrag:1;
+ u8 retry:1;
+ u8 powerMgmt:1;
+ u8 moreData:1;
+ u8 wep:1;
+ u8 order:1;
+
+#endif
+
+};
+
+/* Sequence control field */
+struct wcn36xx_hal_mac_seq_ctl {
+ u8 fragNum:4;
+ u8 seqNumLo:4;
+ u8 seqNumHi:8;
+};
+
+/* Management header format */
+struct wcn36xx_hal_mac_mgmt_hdr {
+ struct wcn36xx_hal_mac_frame_ctl fc;
+ u8 durationLo;
+ u8 durationHi;
+ u8 da[6];
+ u8 sa[6];
+ u8 bssId[6];
+ struct wcn36xx_hal_mac_seq_ctl seqControl;
+};
+
+/* FIXME: pronto v1 apparently has 4 */
+#define WCN36XX_HAL_NUM_BSSID 2
+
+/* Scan Entry to hold active BSS idx's */
+struct wcn36xx_hal_scan_entry {
+ u8 bss_index[WCN36XX_HAL_NUM_BSSID];
+ u8 active_bss_count;
+};
+
+struct wcn36xx_hal_init_scan_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* LEARN - AP Role
+ SCAN - STA Role */
+ enum wcn36xx_hal_sys_mode mode;
+
+ /* BSSID of the BSS */
+ u8 bssid[ETH_ALEN];
+
+ /* Whether BSS needs to be notified */
+ u8 notify;
+
+ /* Kind of frame to be used for notifying the BSS (Data Null, QoS
+ * Null, or CTS to Self). Must always be a valid frame type. */
+ u8 frame_type;
+
+ /* UMAC has the option of passing the MAC frame to be used for
+ * notifying the BSS. If non-zero, HAL will use the MAC frame
+ * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
+ * appropriate MAC frame based on frameType. */
+ u8 frame_len;
+
+ /* Following the framelength there is a MAC frame buffer if
+ * frameLength is non-zero. */
+ struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
+
+ /* Entry to hold number of active BSS idx's */
+ struct wcn36xx_hal_scan_entry scan_entry;
+};
+
+struct wcn36xx_hal_init_scan_con_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* LEARN - AP Role
+ SCAN - STA Role */
+ enum wcn36xx_hal_sys_mode mode;
+
+ /* BSSID of the BSS */
+ u8 bssid[ETH_ALEN];
+
+ /* Whether BSS needs to be notified */
+ u8 notify;
+
+ /* Kind of frame to be used for notifying the BSS (Data Null, QoS
+ * Null, or CTS to Self). Must always be a valid frame type. */
+ u8 frame_type;
+
+ /* UMAC has the option of passing the MAC frame to be used for
+ * notifying the BSS. If non-zero, HAL will use the MAC frame
+ * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
+ * appropriate MAC frame based on frameType. */
+ u8 frame_length;
+
+ /* Following the framelength there is a MAC frame buffer if
+ * frameLength is non-zero. */
+ struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
+
+ /* Entry to hold number of active BSS idx's */
+ struct wcn36xx_hal_scan_entry scan_entry;
+
+ /* Single NoA usage in Scanning */
+ u8 use_noa;
+
+ /* Indicates the scan duration (in ms) */
+ u16 scan_duration;
+
+};
+
+struct wcn36xx_hal_init_scan_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+} __packed;
+
+struct wcn36xx_hal_start_scan_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Indicates the channel to scan */
+ u8 scan_channel;
+} __packed;
+
+struct wcn36xx_hal_start_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u32 start_tsf[2];
+ u8 tx_mgmt_power;
+
+} __packed;
+
+struct wcn36xx_hal_end_scan_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Indicates the channel to stop scanning. Not used really. But
+ * retained for symmetry with "start Scan" message. It can also
+ * help in error check if needed. */
+ u8 scan_channel;
+} __packed;
+
+struct wcn36xx_hal_end_scan_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_finish_scan_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Identifies the operational state of the AP/STA
+ * LEARN - AP Role SCAN - STA Role */
+ enum wcn36xx_hal_sys_mode mode;
+
+ /* Operating channel to tune to. */
+ u8 oper_channel;
+
+ /* Channel Bonding state If 20/40 MHz is operational, this will
+ * indicate the 40 MHz extension channel in combination with the
+ * control channel */
+ enum phy_chan_bond_state cb_state;
+
+ /* BSSID of the BSS */
+ u8 bssid[ETH_ALEN];
+
+ /* Whether BSS needs to be notified */
+ u8 notify;
+
+ /* Kind of frame to be used for notifying the BSS (Data Null, QoS
+ * Null, or CTS to Self). Must always be a valid frame type. */
+ u8 frame_type;
+
+ /* UMAC has the option of passing the MAC frame to be used for
+ * notifying the BSS. If non-zero, HAL will use the MAC frame
+ * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
+ * appropriate MAC frame based on frameType. */
+ u8 frame_length;
+
+ /* Following the framelength there is a MAC frame buffer if
+ * frameLength is non-zero. */
+ struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
+
+ /* Entry to hold number of active BSS idx's */
+ struct wcn36xx_hal_scan_entry scan_entry;
+
+} __packed;
+
+struct wcn36xx_hal_finish_scan_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+} __packed;
+
+enum wcn36xx_hal_rate_index {
+ HW_RATE_INDEX_1MBPS = 0x82,
+ HW_RATE_INDEX_2MBPS = 0x84,
+ HW_RATE_INDEX_5_5MBPS = 0x8B,
+ HW_RATE_INDEX_6MBPS = 0x0C,
+ HW_RATE_INDEX_9MBPS = 0x12,
+ HW_RATE_INDEX_11MBPS = 0x96,
+ HW_RATE_INDEX_12MBPS = 0x18,
+ HW_RATE_INDEX_18MBPS = 0x24,
+ HW_RATE_INDEX_24MBPS = 0x30,
+ HW_RATE_INDEX_36MBPS = 0x48,
+ HW_RATE_INDEX_48MBPS = 0x60,
+ HW_RATE_INDEX_54MBPS = 0x6C
+};
+
+struct wcn36xx_hal_supported_rates {
+ /*
+ * For Self STA Entry: this represents Self Mode.
+ * For Peer Stations, this represents the mode of the peer.
+ * On Station:
+ *
+ * --this mode is updated when PE adds the Self Entry.
+ *
+ * -- OR when PE sends 'ADD_BSS' message and station context in BSS
+ * is used to indicate the mode of the AP.
+ *
+ * ON AP:
+ *
+ * -- this mode is updated when PE sends 'ADD_BSS' and Sta entry
+ * for that BSS is used to indicate the self mode of the AP.
+ *
+ * -- OR when a station is associated, PE sends 'ADD_STA' message
+ * with this mode updated.
+ */
+
+ enum sta_rate_mode op_rate_mode;
+
+ /* 11b, 11a and aniLegacyRates are IE rates which gives rate in
+ * unit of 500Kbps */
+ u16 dsss_rates[WCN36XX_HAL_NUM_DSSS_RATES];
+ u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES];
+ u16 legacy_rates[WCN36XX_HAL_NUM_POLARIS_RATES];
+ u16 reserved;
+
+ /* Taurus only supports 26 Titan Rates(no ESF/concat Rates will be
+ * supported) First 26 bits are reserved for those Titan rates and
+ * the last 4 bits(bit28-31) for Taurus, 2(bit26-27) bits are
+ * reserved. */
+ /* Titan and Taurus Rates */
+ u32 enhanced_rate_bitmap;
+
+ /*
+ * 0-76 bits used, remaining reserved
+ * bits 0-15 and 32 should be set.
+ */
+ u8 supported_mcs_set[WCN36XX_HAL_MAC_MAX_SUPPORTED_MCS_SET];
+
+ /*
+ * RX Highest Supported Data Rate defines the highest data
+ * rate that the STA is able to receive, in unites of 1Mbps.
+ * This value is derived from "Supported MCS Set field" inside
+ * the HT capability element.
+ */
+ u16 rx_highest_data_rate;
+
+} __packed;
+
+struct wcn36xx_hal_config_sta_params {
+ /* BSSID of STA */
+ u8 bssid[ETH_ALEN];
+
+ /* ASSOC ID, as assigned by UMAC */
+ u16 aid;
+
+ /* STA entry Type: 0 - Self, 1 - Other/Peer, 2 - BSSID, 3 - BCAST */
+ u8 type;
+
+ /* Short Preamble Supported. */
+ u8 short_preamble_supported;
+
+ /* MAC Address of STA */
+ u8 mac[ETH_ALEN];
+
+ /* Listen interval of the STA */
+ u16 listen_interval;
+
+ /* Support for 11e/WMM */
+ u8 wmm_enabled;
+
+ /* 11n HT capable STA */
+ u8 ht_capable;
+
+ /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+ u8 tx_channel_width_set;
+
+ /* RIFS mode 0 - NA, 1 - Allowed */
+ u8 rifs_mode;
+
+ /* L-SIG TXOP Protection mechanism
+ 0 - No Support, 1 - Supported
+ SG - there is global field */
+ u8 lsig_txop_protection;
+
+ /* Max Ampdu Size supported by STA. TPE programming.
+ 0 : 8k , 1 : 16k, 2 : 32k, 3 : 64k */
+ u8 max_ampdu_size;
+
+ /* Max Ampdu density. Used by RA. 3 : 0~7 : 2^(11nAMPDUdensity -4) */
+ u8 max_ampdu_density;
+
+ /* Max AMSDU size 1 : 3839 bytes, 0 : 7935 bytes */
+ u8 max_amsdu_size;
+
+ /* Short GI support for 40Mhz packets */
+ u8 sgi_40mhz;
+
+ /* Short GI support for 20Mhz packets */
+ u8 sgi_20Mhz;
+
+ /* TODO move this parameter to the end for 3680 */
+ /* These rates are the intersection of peer and self capabilities. */
+ struct wcn36xx_hal_supported_rates supported_rates;
+
+ /* Robust Management Frame (RMF) enabled/disabled */
+ u8 rmf;
+
+ /* The unicast encryption type in the association */
+ u32 encrypt_type;
+
+ /* HAL should update the existing STA entry, if this flag is set. UMAC
+ will set this flag in case of RE-ASSOC, where we want to reuse the
+ old STA ID. 0 = Add, 1 = Update */
+ u8 action;
+
+ /* U-APSD Flags: 1b per AC. Encoded as follows:
+ b7 b6 b5 b4 b3 b2 b1 b0 =
+ X X X X BE BK VI VO */
+ u8 uapsd;
+
+ /* Max SP Length */
+ u8 max_sp_len;
+
+ /* 11n Green Field preamble support
+ 0 - Not supported, 1 - Supported */
+ u8 green_field_capable;
+
+ /* MIMO Power Save mode */
+ enum wcn36xx_hal_ht_mimo_state mimo_ps;
+
+ /* Delayed BA Support */
+ u8 delayed_ba_support;
+
+ /* Max AMPDU duration in 32us */
+ u8 max_ampdu_duration;
+
+ /* HT STA should set it to 1 if it is enabled in BSS. HT STA should
+ * set it to 0 if AP does not support it. This indication is sent
+ * to HAL and HAL uses this flag to pickup up appropriate 40Mhz
+ * rates. */
+ u8 dsss_cck_mode_40mhz;
+
+ /* Valid STA Idx when action=Update. Set to 0xFF when invalid!
+ * Retained for backward compalibity with existing HAL code */
+ u8 sta_index;
+
+ /* BSSID of BSS to which station is associated. Set to 0xFF when
+ * invalid. Retained for backward compalibity with existing HAL
+ * code */
+ u8 bssid_index;
+
+ u8 p2p;
+
+ /* TODO add this parameter for 3680. */
+ /* Reserved to align next field on a dword boundary */
+ /* u8 reserved; */
+} __packed;
+
+struct wcn36xx_hal_config_sta_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_sta_params sta_params;
+} __packed;
+
+struct wcn36xx_hal_config_sta_params_v1 {
+ /* BSSID of STA */
+ u8 bssid[ETH_ALEN];
+
+ /* ASSOC ID, as assigned by UMAC */
+ u16 aid;
+
+ /* STA entry Type: 0 - Self, 1 - Other/Peer, 2 - BSSID, 3 - BCAST */
+ u8 type;
+
+ /* Short Preamble Supported. */
+ u8 short_preamble_supported;
+
+ /* MAC Address of STA */
+ u8 mac[ETH_ALEN];
+
+ /* Listen interval of the STA */
+ u16 listen_interval;
+
+ /* Support for 11e/WMM */
+ u8 wmm_enabled;
+
+ /* 11n HT capable STA */
+ u8 ht_capable;
+
+ /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+ u8 tx_channel_width_set;
+
+ /* RIFS mode 0 - NA, 1 - Allowed */
+ u8 rifs_mode;
+
+ /* L-SIG TXOP Protection mechanism
+ 0 - No Support, 1 - Supported
+ SG - there is global field */
+ u8 lsig_txop_protection;
+
+ /* Max Ampdu Size supported by STA. TPE programming.
+ 0 : 8k , 1 : 16k, 2 : 32k, 3 : 64k */
+ u8 max_ampdu_size;
+
+ /* Max Ampdu density. Used by RA. 3 : 0~7 : 2^(11nAMPDUdensity -4) */
+ u8 max_ampdu_density;
+
+ /* Max AMSDU size 1 : 3839 bytes, 0 : 7935 bytes */
+ u8 max_amsdu_size;
+
+ /* Short GI support for 40Mhz packets */
+ u8 sgi_40mhz;
+
+ /* Short GI support for 20Mhz packets */
+ u8 sgi_20Mhz;
+
+ /* Robust Management Frame (RMF) enabled/disabled */
+ u8 rmf;
+
+ /* The unicast encryption type in the association */
+ u32 encrypt_type;
+
+ /* HAL should update the existing STA entry, if this flag is set. UMAC
+ will set this flag in case of RE-ASSOC, where we want to reuse the
+ old STA ID. 0 = Add, 1 = Update */
+ u8 action;
+
+ /* U-APSD Flags: 1b per AC. Encoded as follows:
+ b7 b6 b5 b4 b3 b2 b1 b0 =
+ X X X X BE BK VI VO */
+ u8 uapsd;
+
+ /* Max SP Length */
+ u8 max_sp_len;
+
+ /* 11n Green Field preamble support
+ 0 - Not supported, 1 - Supported */
+ u8 green_field_capable;
+
+ /* MIMO Power Save mode */
+ enum wcn36xx_hal_ht_mimo_state mimo_ps;
+
+ /* Delayed BA Support */
+ u8 delayed_ba_support;
+
+ /* Max AMPDU duration in 32us */
+ u8 max_ampdu_duration;
+
+ /* HT STA should set it to 1 if it is enabled in BSS. HT STA should
+ * set it to 0 if AP does not support it. This indication is sent
+ * to HAL and HAL uses this flag to pickup up appropriate 40Mhz
+ * rates. */
+ u8 dsss_cck_mode_40mhz;
+
+ /* Valid STA Idx when action=Update. Set to 0xFF when invalid!
+ * Retained for backward compalibity with existing HAL code */
+ u8 sta_index;
+
+ /* BSSID of BSS to which station is associated. Set to 0xFF when
+ * invalid. Retained for backward compalibity with existing HAL
+ * code */
+ u8 bssid_index;
+
+ u8 p2p;
+
+ /* Reserved to align next field on a dword boundary */
+ u8 reserved;
+
+ /* These rates are the intersection of peer and self capabilities. */
+ struct wcn36xx_hal_supported_rates supported_rates;
+} __packed;
+
+struct wcn36xx_hal_config_sta_req_msg_v1 {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_sta_params_v1 sta_params;
+} __packed;
+
+struct config_sta_rsp_params {
+ /* success or failure */
+ u32 status;
+
+ /* Station index; valid only when 'status' field value SUCCESS */
+ u8 sta_index;
+
+ /* BSSID Index of BSS to which the station is associated */
+ u8 bssid_index;
+
+ /* DPU Index for PTK */
+ u8 dpu_index;
+
+ /* DPU Index for GTK */
+ u8 bcast_dpu_index;
+
+ /* DPU Index for IGTK */
+ u8 bcast_mgmt_dpu_idx;
+
+ /* PTK DPU signature */
+ u8 uc_ucast_sig;
+
+ /* GTK DPU isignature */
+ u8 uc_bcast_sig;
+
+ /* IGTK DPU signature */
+ u8 uc_mgmt_sig;
+
+ u8 p2p;
+
+} __packed;
+
+struct wcn36xx_hal_config_sta_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ struct config_sta_rsp_params params;
+} __packed;
+
+/* Delete STA Request message */
+struct wcn36xx_hal_delete_sta_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Index of STA to delete */
+ u8 sta_index;
+
+} __packed;
+
+/* Delete STA Response message */
+struct wcn36xx_hal_delete_sta_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Index of STA deleted */
+ u8 sta_id;
+} __packed;
+
+/* 12 Bytes long because this structure can be used to represent rate and
+ * extended rate set IEs. The parser assume this to be at least 12 */
+struct wcn36xx_hal_rate_set {
+ u8 num_rates;
+ u8 rate[WCN36XX_HAL_MAC_RATESET_EID_MAX];
+} __packed;
+
+/* access category record */
+struct wcn36xx_hal_aci_aifsn {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+ u8 rsvd:1;
+ u8 aci:2;
+ u8 acm:1;
+ u8 aifsn:4;
+#else
+ u8 aifsn:4;
+ u8 acm:1;
+ u8 aci:2;
+ u8 rsvd:1;
+#endif
+} __packed;
+
+/* contention window size */
+struct wcn36xx_hal_mac_cw {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+ u8 max:4;
+ u8 min:4;
+#else
+ u8 min:4;
+ u8 max:4;
+#endif
+} __packed;
+
+struct wcn36xx_hal_edca_param_record {
+ struct wcn36xx_hal_aci_aifsn aci;
+ struct wcn36xx_hal_mac_cw cw;
+ u16 txop_limit;
+} __packed;
+
+struct wcn36xx_hal_mac_ssid {
+ u8 length;
+ u8 ssid[32];
+} __packed;
+
+/* Concurrency role. These are generic IDs that identify the various roles
+ * in the software system. */
+enum wcn36xx_hal_con_mode {
+ WCN36XX_HAL_STA_MODE = 0,
+
+ /* to support softAp mode . This is misleading.
+ It means AP MODE only. */
+ WCN36XX_HAL_STA_SAP_MODE = 1,
+
+ WCN36XX_HAL_P2P_CLIENT_MODE,
+ WCN36XX_HAL_P2P_GO_MODE,
+ WCN36XX_HAL_MONITOR_MODE,
+};
+
+/* This is a bit pattern to be set for each mode
+ * bit 0 - sta mode
+ * bit 1 - ap mode
+ * bit 2 - p2p client mode
+ * bit 3 - p2p go mode */
+enum wcn36xx_hal_concurrency_mode {
+ HAL_STA = 1,
+ HAL_SAP = 2,
+
+ /* to support sta, softAp mode . This means STA+AP mode */
+ HAL_STA_SAP = 3,
+
+ HAL_P2P_CLIENT = 4,
+ HAL_P2P_GO = 8,
+ HAL_MAX_CONCURRENCY_PERSONA = 4
+};
+
+struct wcn36xx_hal_config_bss_params {
+ /* BSSID */
+ u8 bssid[ETH_ALEN];
+
+ /* Self Mac Address */
+ u8 self_mac_addr[ETH_ALEN];
+
+ /* BSS type */
+ enum wcn36xx_hal_bss_type bss_type;
+
+ /* Operational Mode: AP =0, STA = 1 */
+ u8 oper_mode;
+
+ /* Network Type */
+ enum wcn36xx_hal_nw_type nw_type;
+
+ /* Used to classify PURE_11G/11G_MIXED to program MTU */
+ u8 short_slot_time_supported;
+
+ /* Co-exist with 11a STA */
+ u8 lla_coexist;
+
+ /* Co-exist with 11b STA */
+ u8 llb_coexist;
+
+ /* Co-exist with 11g STA */
+ u8 llg_coexist;
+
+ /* Coexistence with 11n STA */
+ u8 ht20_coexist;
+
+ /* Non GF coexist flag */
+ u8 lln_non_gf_coexist;
+
+ /* TXOP protection support */
+ u8 lsig_tx_op_protection_full_support;
+
+ /* RIFS mode */
+ u8 rifs_mode;
+
+ /* Beacon Interval in TU */
+ u16 beacon_interval;
+
+ /* DTIM period */
+ u8 dtim_period;
+
+ /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+ u8 tx_channel_width_set;
+
+ /* Operating channel */
+ u8 oper_channel;
+
+ /* Extension channel for channel bonding */
+ u8 ext_channel;
+
+ /* Reserved to align next field on a dword boundary */
+ u8 reserved;
+
+ /* TODO move sta to the end for 3680 */
+ /* Context of the station being added in HW
+ * Add a STA entry for "itself" -
+ *
+ * On AP - Add the AP itself in an "STA context"
+ *
+ * On STA - Add the AP to which this STA is joining in an
+ * "STA context"
+ */
+ struct wcn36xx_hal_config_sta_params sta;
+ /* SSID of the BSS */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* HAL should update the existing BSS entry, if this flag is set.
+ * UMAC will set this flag in case of reassoc, where we want to
+ * resue the the old BSSID and still return success 0 = Add, 1 =
+ * Update */
+ u8 action;
+
+ /* MAC Rate Set */
+ struct wcn36xx_hal_rate_set rateset;
+
+ /* Enable/Disable HT capabilities of the BSS */
+ u8 ht;
+
+ /* Enable/Disable OBSS protection */
+ u8 obss_prot_enabled;
+
+ /* RMF enabled/disabled */
+ u8 rmf;
+
+ /* HT Operating Mode operating mode of the 802.11n STA */
+ enum wcn36xx_hal_ht_operating_mode ht_oper_mode;
+
+ /* Dual CTS Protection: 0 - Unused, 1 - Used */
+ u8 dual_cts_protection;
+
+ /* Probe Response Max retries */
+ u8 max_probe_resp_retry_limit;
+
+ /* To Enable Hidden ssid */
+ u8 hidden_ssid;
+
+ /* To Enable Disable FW Proxy Probe Resp */
+ u8 proxy_probe_resp;
+
+ /* Boolean to indicate if EDCA params are valid. UMAC might not
+ * have valid EDCA params or might not desire to apply EDCA params
+ * during config BSS. 0 implies Not Valid ; Non-Zero implies
+ * valid */
+ u8 edca_params_valid;
+
+ /* EDCA Parameters for Best Effort Access Category */
+ struct wcn36xx_hal_edca_param_record acbe;
+
+ /* EDCA Parameters forBackground Access Category */
+ struct wcn36xx_hal_edca_param_record acbk;
+
+ /* EDCA Parameters for Video Access Category */
+ struct wcn36xx_hal_edca_param_record acvi;
+
+ /* EDCA Parameters for Voice Access Category */
+ struct wcn36xx_hal_edca_param_record acvo;
+
+ /* Ext Bss Config Msg if set */
+ u8 ext_set_sta_key_param_valid;
+
+ /* SetStaKeyParams for ext bss msg */
+ struct wcn36xx_hal_set_sta_key_params ext_set_sta_key_param;
+
+ /* Persona for the BSS can be STA,AP,GO,CLIENT value same as enum
+ * wcn36xx_hal_con_mode */
+ u8 wcn36xx_hal_persona;
+
+ u8 spectrum_mgt_enable;
+
+ /* HAL fills in the tx power used for mgmt frames in txMgmtPower */
+ s8 tx_mgmt_power;
+
+ /* maxTxPower has max power to be used after applying the power
+ * constraint if any */
+ s8 max_tx_power;
+} __packed;
+
+struct wcn36xx_hal_config_bss_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_bss_params bss_params;
+} __packed;
+
+struct wcn36xx_hal_config_bss_params_v1 {
+ /* BSSID */
+ u8 bssid[ETH_ALEN];
+
+ /* Self Mac Address */
+ u8 self_mac_addr[ETH_ALEN];
+
+ /* BSS type */
+ enum wcn36xx_hal_bss_type bss_type;
+
+ /* Operational Mode: AP =0, STA = 1 */
+ u8 oper_mode;
+
+ /* Network Type */
+ enum wcn36xx_hal_nw_type nw_type;
+
+ /* Used to classify PURE_11G/11G_MIXED to program MTU */
+ u8 short_slot_time_supported;
+
+ /* Co-exist with 11a STA */
+ u8 lla_coexist;
+
+ /* Co-exist with 11b STA */
+ u8 llb_coexist;
+
+ /* Co-exist with 11g STA */
+ u8 llg_coexist;
+
+ /* Coexistence with 11n STA */
+ u8 ht20_coexist;
+
+ /* Non GF coexist flag */
+ u8 lln_non_gf_coexist;
+
+ /* TXOP protection support */
+ u8 lsig_tx_op_protection_full_support;
+
+ /* RIFS mode */
+ u8 rifs_mode;
+
+ /* Beacon Interval in TU */
+ u16 beacon_interval;
+
+ /* DTIM period */
+ u8 dtim_period;
+
+ /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+ u8 tx_channel_width_set;
+
+ /* Operating channel */
+ u8 oper_channel;
+
+ /* Extension channel for channel bonding */
+ u8 ext_channel;
+
+ /* Reserved to align next field on a dword boundary */
+ u8 reserved;
+
+ /* SSID of the BSS */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* HAL should update the existing BSS entry, if this flag is set.
+ * UMAC will set this flag in case of reassoc, where we want to
+ * resue the the old BSSID and still return success 0 = Add, 1 =
+ * Update */
+ u8 action;
+
+ /* MAC Rate Set */
+ struct wcn36xx_hal_rate_set rateset;
+
+ /* Enable/Disable HT capabilities of the BSS */
+ u8 ht;
+
+ /* Enable/Disable OBSS protection */
+ u8 obss_prot_enabled;
+
+ /* RMF enabled/disabled */
+ u8 rmf;
+
+ /* HT Operating Mode operating mode of the 802.11n STA */
+ enum wcn36xx_hal_ht_operating_mode ht_oper_mode;
+
+ /* Dual CTS Protection: 0 - Unused, 1 - Used */
+ u8 dual_cts_protection;
+
+ /* Probe Response Max retries */
+ u8 max_probe_resp_retry_limit;
+
+ /* To Enable Hidden ssid */
+ u8 hidden_ssid;
+
+ /* To Enable Disable FW Proxy Probe Resp */
+ u8 proxy_probe_resp;
+
+ /* Boolean to indicate if EDCA params are valid. UMAC might not
+ * have valid EDCA params or might not desire to apply EDCA params
+ * during config BSS. 0 implies Not Valid ; Non-Zero implies
+ * valid */
+ u8 edca_params_valid;
+
+ /* EDCA Parameters for Best Effort Access Category */
+ struct wcn36xx_hal_edca_param_record acbe;
+
+ /* EDCA Parameters forBackground Access Category */
+ struct wcn36xx_hal_edca_param_record acbk;
+
+ /* EDCA Parameters for Video Access Category */
+ struct wcn36xx_hal_edca_param_record acvi;
+
+ /* EDCA Parameters for Voice Access Category */
+ struct wcn36xx_hal_edca_param_record acvo;
+
+ /* Ext Bss Config Msg if set */
+ u8 ext_set_sta_key_param_valid;
+
+ /* SetStaKeyParams for ext bss msg */
+ struct wcn36xx_hal_set_sta_key_params ext_set_sta_key_param;
+
+ /* Persona for the BSS can be STA,AP,GO,CLIENT value same as enum
+ * wcn36xx_hal_con_mode */
+ u8 wcn36xx_hal_persona;
+
+ u8 spectrum_mgt_enable;
+
+ /* HAL fills in the tx power used for mgmt frames in txMgmtPower */
+ s8 tx_mgmt_power;
+
+ /* maxTxPower has max power to be used after applying the power
+ * constraint if any */
+ s8 max_tx_power;
+
+ /* Context of the station being added in HW
+ * Add a STA entry for "itself" -
+ *
+ * On AP - Add the AP itself in an "STA context"
+ *
+ * On STA - Add the AP to which this STA is joining in an
+ * "STA context"
+ */
+ struct wcn36xx_hal_config_sta_params_v1 sta;
+} __packed;
+
+struct wcn36xx_hal_config_bss_req_msg_v1 {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_bss_params_v1 bss_params;
+} __packed;
+
+struct wcn36xx_hal_config_bss_rsp_params {
+ /* Success or Failure */
+ u32 status;
+
+ /* BSS index allocated by HAL */
+ u8 bss_index;
+
+ /* DPU descriptor index for PTK */
+ u8 dpu_desc_index;
+
+ /* PTK DPU signature */
+ u8 ucast_dpu_signature;
+
+ /* DPU descriptor index for GTK */
+ u8 bcast_dpu_desc_indx;
+
+ /* GTK DPU signature */
+ u8 bcast_dpu_signature;
+
+ /* DPU descriptor for IGTK */
+ u8 mgmt_dpu_desc_index;
+
+ /* IGTK DPU signature */
+ u8 mgmt_dpu_signature;
+
+ /* Station Index for BSS entry */
+ u8 bss_sta_index;
+
+ /* Self station index for this BSS */
+ u8 bss_self_sta_index;
+
+ /* Bcast station for buffering bcast frames in AP role */
+ u8 bss_bcast_sta_idx;
+
+ /* MAC Address of STA(PEER/SELF) in staContext of configBSSReq */
+ u8 mac[ETH_ALEN];
+
+ /* HAL fills in the tx power used for mgmt frames in this field. */
+ s8 tx_mgmt_power;
+
+} __packed;
+
+struct wcn36xx_hal_config_bss_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_bss_rsp_params bss_rsp_params;
+} __packed;
+
+struct wcn36xx_hal_delete_bss_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSS index to be deleted */
+ u8 bss_index;
+
+} __packed;
+
+struct wcn36xx_hal_delete_bss_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Success or Failure */
+ u32 status;
+
+ /* BSS index that has been deleted */
+ u8 bss_index;
+
+} __packed;
+
+struct wcn36xx_hal_join_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Indicates the BSSID to which STA is going to associate */
+ u8 bssid[ETH_ALEN];
+
+ /* Indicates the channel to switch to. */
+ u8 channel;
+
+ /* Self STA MAC */
+ u8 self_sta_mac_addr[ETH_ALEN];
+
+ /* Local power constraint */
+ u8 local_power_constraint;
+
+ /* Secondary channel offset */
+ enum phy_chan_bond_state secondary_channel_offset;
+
+ /* link State */
+ enum wcn36xx_hal_link_state link_state;
+
+ /* Max TX power */
+ s8 max_tx_power;
+} __packed;
+
+struct wcn36xx_hal_join_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* HAL fills in the tx power used for mgmt frames in this field */
+ u8 tx_mgmt_power;
+} __packed;
+
+struct post_assoc_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ struct wcn36xx_hal_config_sta_params sta_params;
+ struct wcn36xx_hal_config_bss_params bss_params;
+};
+
+struct post_assoc_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct config_sta_rsp_params sta_rsp_params;
+ struct wcn36xx_hal_config_bss_rsp_params bss_rsp_params;
+};
+
+/* This is used to create a set of WEP keys for a given BSS. */
+struct wcn36xx_hal_set_bss_key_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSS Index of the BSS */
+ u8 bss_idx;
+
+ /* Encryption Type used with peer */
+ enum ani_ed_type enc_type;
+
+ /* Number of keys */
+ u8 num_keys;
+
+ /* Array of keys. */
+ struct wcn36xx_hal_keys keys[WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS];
+
+ /* Control for Replay Count, 1= Single TID based replay count on Tx
+ * 0 = Per TID based replay count on TX */
+ u8 single_tid_rc;
+} __packed;
+
+/* tagged version of set bss key */
+struct wcn36xx_hal_set_bss_key_req_msg_tagged {
+ struct wcn36xx_hal_set_bss_key_req_msg Msg;
+ u32 tag;
+} __packed;
+
+struct wcn36xx_hal_set_bss_key_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+/*
+ * This is used configure the key information on a given station.
+ * When the sec_type is WEP40 or WEP104, the def_wep_idx is used to locate
+ * a preconfigured key from a BSS the station assoicated with; otherwise
+ * a new key descriptor is created based on the key field.
+ */
+struct wcn36xx_hal_set_sta_key_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_set_sta_key_params set_sta_key_params;
+} __packed;
+
+struct wcn36xx_hal_set_sta_key_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_remove_bss_key_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSS Index of the BSS */
+ u8 bss_idx;
+
+ /* Encryption Type used with peer */
+ enum ani_ed_type enc_type;
+
+ /* Key Id */
+ u8 key_id;
+
+ /* STATIC/DYNAMIC. Used in Nullifying in Key Descriptors for
+ * Static/Dynamic keys */
+ enum ani_wep_type wep_type;
+} __packed;
+
+struct wcn36xx_hal_remove_bss_key_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+/*
+ * This is used by PE to Remove the key information on a given station.
+ */
+struct wcn36xx_hal_remove_sta_key_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* STA Index */
+ u16 sta_idx;
+
+ /* Encryption Type used with peer */
+ enum ani_ed_type enc_type;
+
+ /* Key Id */
+ u8 key_id;
+
+ /* Whether to invalidate the Broadcast key or Unicast key. In case
+ * of WEP, the same key is used for both broadcast and unicast. */
+ u8 unicast;
+
+} __packed;
+
+struct wcn36xx_hal_remove_sta_key_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*success or failure */
+ u32 status;
+
+} __packed;
+
+#ifdef FEATURE_OEM_DATA_SUPPORT
+
+#ifndef OEM_DATA_REQ_SIZE
+#define OEM_DATA_REQ_SIZE 134
+#endif
+
+#ifndef OEM_DATA_RSP_SIZE
+#define OEM_DATA_RSP_SIZE 1968
+#endif
+
+struct start_oem_data_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 status;
+ tSirMacAddr self_mac_addr;
+ u8 oem_data_req[OEM_DATA_REQ_SIZE];
+
+};
+
+struct start_oem_data_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 oem_data_rsp[OEM_DATA_RSP_SIZE];
+};
+
+#endif
+
+struct wcn36xx_hal_switch_channel_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Channel number */
+ u8 channel_number;
+
+ /* Local power constraint */
+ u8 local_power_constraint;
+
+ /* Secondary channel offset */
+ enum phy_chan_bond_state secondary_channel_offset;
+
+ /* HAL fills in the tx power used for mgmt frames in this field. */
+ u8 tx_mgmt_power;
+
+ /* Max TX power */
+ u8 max_tx_power;
+
+ /* Self STA MAC */
+ u8 self_sta_mac_addr[ETH_ALEN];
+
+ /* VO WIFI comment: BSSID needed to identify session. As the
+ * request has power constraints, this should be applied only to
+ * that session Since MTU timing and EDCA are sessionized, this
+ * struct needs to be sessionized and bssid needs to be out of the
+ * VOWifi feature flag V IMP: Keep bssId field at the end of this
+ * msg. It is used to mantain backward compatbility by way of
+ * ignoring if using new host/old FW or old host/new FW since it is
+ * at the end of this struct
+ */
+ u8 bssid[ETH_ALEN];
+} __packed;
+
+struct wcn36xx_hal_switch_channel_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Status */
+ u32 status;
+
+ /* Channel number - same as in request */
+ u8 channel_number;
+
+ /* HAL fills in the tx power used for mgmt frames in this field */
+ u8 tx_mgmt_power;
+
+ /* BSSID needed to identify session - same as in request */
+ u8 bssid[ETH_ALEN];
+
+} __packed;
+
+struct update_edca_params_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*BSS Index */
+ u16 bss_index;
+
+ /* Best Effort */
+ struct wcn36xx_hal_edca_param_record acbe;
+
+ /* Background */
+ struct wcn36xx_hal_edca_param_record acbk;
+
+ /* Video */
+ struct wcn36xx_hal_edca_param_record acvi;
+
+ /* Voice */
+ struct wcn36xx_hal_edca_param_record acvo;
+};
+
+struct update_edca_params_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct dpu_stats_params {
+ /* Index of STA to which the statistics */
+ u16 sta_index;
+
+ /* Encryption mode */
+ u8 enc_mode;
+
+ /* status */
+ u32 status;
+
+ /* Statistics */
+ u32 send_blocks;
+ u32 recv_blocks;
+ u32 replays;
+ u8 mic_error_cnt;
+ u32 prot_excl_cnt;
+ u16 format_err_cnt;
+ u16 un_decryptable_cnt;
+ u32 decrypt_err_cnt;
+ u32 decrypt_ok_cnt;
+};
+
+struct wcn36xx_hal_stats_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Valid STA Idx for per STA stats request */
+ u32 sta_id;
+
+ /* Categories of stats requested as specified in eHalStatsMask */
+ u32 stats_mask;
+};
+
+struct ani_summary_stats_info {
+ /* Total number of packets(per AC) that were successfully
+ * transmitted with retries */
+ u32 retry_cnt[4];
+
+ /* The number of MSDU packets and MMPDU frames per AC that the
+ * 802.11 station successfully transmitted after more than one
+ * retransmission attempt */
+ u32 multiple_retry_cnt[4];
+
+ /* Total number of packets(per AC) that were successfully
+ * transmitted (with and without retries, including multi-cast,
+ * broadcast) */
+ u32 tx_frm_cnt[4];
+
+ /* Total number of packets that were successfully received (after
+ * appropriate filter rules including multi-cast, broadcast) */
+ u32 rx_frm_cnt;
+
+ /* Total number of duplicate frames received successfully */
+ u32 frm_dup_cnt;
+
+ /* Total number packets(per AC) failed to transmit */
+ u32 fail_cnt[4];
+
+ /* Total number of RTS/CTS sequence failures for transmission of a
+ * packet */
+ u32 rts_fail_cnt;
+
+ /* Total number packets failed transmit because of no ACK from the
+ * remote entity */
+ u32 ack_fail_cnt;
+
+ /* Total number of RTS/CTS sequence success for transmission of a
+ * packet */
+ u32 rts_succ_cnt;
+
+ /* The sum of the receive error count and dropped-receive-buffer
+ * error count. HAL will provide this as a sum of (FCS error) +
+ * (Fail get BD/PDU in HW) */
+ u32 rx_discard_cnt;
+
+ /*
+ * The receive error count. HAL will provide the RxP FCS error
+ * global counter. */
+ u32 rx_error_cnt;
+
+ /* The sum of the transmit-directed byte count, transmit-multicast
+ * byte count and transmit-broadcast byte count. HAL will sum TPE
+ * UC/MC/BCAST global counters to provide this. */
+ u32 tx_byte_cnt;
+};
+
+/* defines tx_rate_flags */
+enum tx_rate_info {
+ /* Legacy rates */
+ HAL_TX_RATE_LEGACY = 0x1,
+
+ /* HT20 rates */
+ HAL_TX_RATE_HT20 = 0x2,
+
+ /* HT40 rates */
+ HAL_TX_RATE_HT40 = 0x4,
+
+ /* Rate with Short guard interval */
+ HAL_TX_RATE_SGI = 0x8,
+
+ /* Rate with Long guard interval */
+ HAL_TX_RATE_LGI = 0x10
+};
+
+struct ani_global_class_a_stats_info {
+ /* The number of MPDU frames received by the 802.11 station for
+ * MSDU packets or MMPDU frames */
+ u32 rx_frag_cnt;
+
+ /* The number of MPDU frames received by the 802.11 station for
+ * MSDU packets or MMPDU frames when a promiscuous packet filter
+ * was enabled */
+ u32 promiscuous_rx_frag_cnt;
+
+ /* The receiver input sensitivity referenced to a FER of 8% at an
+ * MPDU length of 1024 bytes at the antenna connector. Each element
+ * of the array shall correspond to a supported rate and the order
+ * shall be the same as the supporteRates parameter. */
+ u32 rx_input_sensitivity;
+
+ /* The maximum transmit power in dBm upto one decimal. for eg: if
+ * it is 10.5dBm, the value would be 105 */
+ u32 max_pwr;
+
+ /* Number of times the receiver failed to synchronize with the
+ * incoming signal after detecting the sync in the preamble of the
+ * transmitted PLCP protocol data unit. */
+ u32 sync_fail_cnt;
+
+ /* Legacy transmit rate, in units of 500 kbit/sec, for the most
+ * recently transmitted frame */
+ u32 tx_rate;
+
+ /* mcs index for HT20 and HT40 rates */
+ u32 mcs_index;
+
+ /* to differentiate between HT20 and HT40 rates; short and long
+ * guard interval */
+ u32 tx_rate_flags;
+};
+
+struct ani_global_security_stats {
+ /* The number of unencrypted received MPDU frames that the MAC
+ * layer discarded when the IEEE 802.11 dot11ExcludeUnencrypted
+ * management information base (MIB) object is enabled */
+ u32 rx_wep_unencrypted_frm_cnt;
+
+ /* The number of received MSDU packets that that the 802.11 station
+ * discarded because of MIC failures */
+ u32 rx_mic_fail_cnt;
+
+ /* The number of encrypted MPDU frames that the 802.11 station
+ * failed to decrypt because of a TKIP ICV error */
+ u32 tkip_icv_err;
+
+ /* The number of received MPDU frames that the 802.11 discarded
+ * because of an invalid AES-CCMP format */
+ u32 aes_ccmp_format_err;
+
+ /* The number of received MPDU frames that the 802.11 station
+ * discarded because of the AES-CCMP replay protection procedure */
+ u32 aes_ccmp_replay_cnt;
+
+ /* The number of received MPDU frames that the 802.11 station
+ * discarded because of errors detected by the AES-CCMP decryption
+ * algorithm */
+ u32 aes_ccmp_decrpt_err;
+
+ /* The number of encrypted MPDU frames received for which a WEP
+ * decryption key was not available on the 802.11 station */
+ u32 wep_undecryptable_cnt;
+
+ /* The number of encrypted MPDU frames that the 802.11 station
+ * failed to decrypt because of a WEP ICV error */
+ u32 wep_icv_err;
+
+ /* The number of received encrypted packets that the 802.11 station
+ * successfully decrypted */
+ u32 rx_decrypt_succ_cnt;
+
+ /* The number of encrypted packets that the 802.11 station failed
+ * to decrypt */
+ u32 rx_decrypt_fail_cnt;
+};
+
+struct ani_global_class_b_stats_info {
+ struct ani_global_security_stats uc_stats;
+ struct ani_global_security_stats mc_bc_stats;
+};
+
+struct ani_global_class_c_stats_info {
+ /* This counter shall be incremented for a received A-MSDU frame
+ * with the stations MAC address in the address 1 field or an
+ * A-MSDU frame with a group address in the address 1 field */
+ u32 rx_amsdu_cnt;
+
+ /* This counter shall be incremented when the MAC receives an AMPDU
+ * from the PHY */
+ u32 rx_ampdu_cnt;
+
+ /* This counter shall be incremented when a Frame is transmitted
+ * only on the primary channel */
+ u32 tx_20_frm_cnt;
+
+ /* This counter shall be incremented when a Frame is received only
+ * on the primary channel */
+ u32 rx_20_frm_cnt;
+
+ /* This counter shall be incremented by the number of MPDUs
+ * received in the A-MPDU when an A-MPDU is received */
+ u32 rx_mpdu_in_ampdu_cnt;
+
+ /* This counter shall be incremented when an MPDU delimiter has a
+ * CRC error when this is the first CRC error in the received AMPDU
+ * or when the previous delimiter has been decoded correctly */
+ u32 ampdu_delimiter_crc_err;
+};
+
+struct ani_per_sta_stats_info {
+ /* The number of MPDU frames that the 802.11 station transmitted
+ * and acknowledged through a received 802.11 ACK frame */
+ u32 tx_frag_cnt[4];
+
+ /* This counter shall be incremented when an A-MPDU is transmitted */
+ u32 tx_ampdu_cnt;
+
+ /* This counter shall increment by the number of MPDUs in the AMPDU
+ * when an A-MPDU is transmitted */
+ u32 tx_mpdu_in_ampdu_cnt;
+};
+
+struct wcn36xx_hal_stats_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Success or Failure */
+ u32 status;
+
+ /* STA Idx */
+ u32 sta_index;
+
+ /* Categories of STATS being returned as per eHalStatsMask */
+ u32 stats_mask;
+
+ /* message type is same as the request type */
+ u16 msg_type;
+
+ /* length of the entire request, includes the pStatsBuf length too */
+ u16 msg_len;
+};
+
+struct wcn36xx_hal_set_link_state_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bssid[ETH_ALEN];
+ enum wcn36xx_hal_link_state state;
+ u8 self_mac_addr[ETH_ALEN];
+
+} __packed;
+
+struct set_link_state_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+/* TSPEC Params */
+struct wcn36xx_hal_ts_info_tfc {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+ u16 ackPolicy:2;
+ u16 userPrio:3;
+ u16 psb:1;
+ u16 aggregation:1;
+ u16 accessPolicy:2;
+ u16 direction:2;
+ u16 tsid:4;
+ u16 trafficType:1;
+#else
+ u16 trafficType:1;
+ u16 tsid:4;
+ u16 direction:2;
+ u16 accessPolicy:2;
+ u16 aggregation:1;
+ u16 psb:1;
+ u16 userPrio:3;
+ u16 ackPolicy:2;
+#endif
+};
+
+/* Flag to schedule the traffic type */
+struct wcn36xx_hal_ts_info_sch {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+ u8 rsvd:7;
+ u8 schedule:1;
+#else
+ u8 schedule:1;
+ u8 rsvd:7;
+#endif
+};
+
+/* Traffic and scheduling info */
+struct wcn36xx_hal_ts_info {
+ struct wcn36xx_hal_ts_info_tfc traffic;
+ struct wcn36xx_hal_ts_info_sch schedule;
+};
+
+/* Information elements */
+struct wcn36xx_hal_tspec_ie {
+ u8 type;
+ u8 length;
+ struct wcn36xx_hal_ts_info ts_info;
+ u16 nom_msdu_size;
+ u16 max_msdu_size;
+ u32 min_svc_interval;
+ u32 max_svc_interval;
+ u32 inact_interval;
+ u32 suspend_interval;
+ u32 svc_start_time;
+ u32 min_data_rate;
+ u32 mean_data_rate;
+ u32 peak_data_rate;
+ u32 max_burst_sz;
+ u32 delay_bound;
+ u32 min_phy_rate;
+ u16 surplus_bw;
+ u16 medium_time;
+};
+
+struct add_ts_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_index;
+
+ /* TSPEC handler uniquely identifying a TSPEC for a STA in a BSS */
+ u16 tspec_index;
+
+ /* To program TPE with required parameters */
+ struct wcn36xx_hal_tspec_ie tspec;
+
+ /* U-APSD Flags: 1b per AC. Encoded as follows:
+ b7 b6 b5 b4 b3 b2 b1 b0 =
+ X X X X BE BK VI VO */
+ u8 uapsd;
+
+ /* These parameters are for all the access categories */
+
+ /* Service Interval */
+ u32 service_interval[WCN36XX_HAL_MAX_AC];
+
+ /* Suspend Interval */
+ u32 suspend_interval[WCN36XX_HAL_MAX_AC];
+
+ /* Delay Interval */
+ u32 delay_interval[WCN36XX_HAL_MAX_AC];
+};
+
+struct add_rs_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct del_ts_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_index;
+
+ /* TSPEC identifier uniquely identifying a TSPEC for a STA in a BSS */
+ u16 tspec_index;
+
+ /* To lookup station id using the mac address */
+ u8 bssid[ETH_ALEN];
+};
+
+struct del_ts_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+/* End of TSpec Parameters */
+
+/* Start of BLOCK ACK related Parameters */
+
+struct wcn36xx_hal_add_ba_session_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_index;
+
+ /* Peer MAC Address */
+ u8 mac_addr[ETH_ALEN];
+
+ /* ADDBA Action Frame dialog token
+ HAL will not interpret this object */
+ u8 dialog_token;
+
+ /* TID for which the BA is being setup
+ This identifies the TC or TS of interest */
+ u8 tid;
+
+ /* 0 - Delayed BA (Not supported)
+ 1 - Immediate BA */
+ u8 policy;
+
+ /* Indicates the number of buffers for this TID (baTID)
+ NOTE - This is the requested buffer size. When this
+ is processed by HAL and subsequently by HDD, it is
+ possible that HDD may change this buffer size. Any
+ change in the buffer size should be noted by PE and
+ advertized appropriately in the ADDBA response */
+ u16 buffer_size;
+
+ /* BA timeout in TU's 0 means no timeout will occur */
+ u16 timeout;
+
+ /* b0..b3 - Fragment Number - Always set to 0
+ b4..b15 - Starting Sequence Number of first MSDU
+ for which this BA is setup */
+ u16 ssn;
+
+ /* ADDBA direction
+ 1 - Originator
+ 0 - Recipient */
+ u8 direction;
+} __packed;
+
+struct wcn36xx_hal_add_ba_session_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Dialog token */
+ u8 dialog_token;
+
+ /* TID for which the BA session has been setup */
+ u8 ba_tid;
+
+ /* BA Buffer Size allocated for the current BA session */
+ u8 ba_buffer_size;
+
+ u8 ba_session_id;
+
+ /* Reordering Window buffer */
+ u8 win_size;
+
+ /* Station Index to id the sta */
+ u8 sta_index;
+
+ /* Starting Sequence Number */
+ u16 ssn;
+} __packed;
+
+struct wcn36xx_hal_add_ba_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Session Id */
+ u8 session_id;
+
+ /* Reorder Window Size */
+ u8 win_size;
+/* Old FW 1.2.2.4 does not support this*/
+#ifdef FEATURE_ON_CHIP_REORDERING
+ u8 reordering_done_on_chip;
+#endif
+} __packed;
+
+struct wcn36xx_hal_add_ba_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Dialog token */
+ u8 dialog_token;
+} __packed;
+
+struct add_ba_info {
+ u16 ba_enable:1;
+ u16 starting_seq_num:12;
+ u16 reserved:3;
+};
+
+struct wcn36xx_hal_trigger_ba_rsp_candidate {
+ u8 sta_addr[ETH_ALEN];
+ struct add_ba_info ba_info[STACFG_MAX_TC];
+} __packed;
+
+struct wcn36xx_hal_trigget_ba_req_candidate {
+ u8 sta_index;
+ u8 tid_bitmap;
+} __packed;
+
+struct wcn36xx_hal_trigger_ba_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Session Id */
+ u8 session_id;
+
+ /* baCandidateCnt is followed by trigger BA
+ * Candidate List(tTriggerBaCandidate)
+ */
+ u16 candidate_cnt;
+
+} __packed;
+
+struct wcn36xx_hal_trigger_ba_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* TO SUPPORT BT-AMP */
+ u8 bssid[ETH_ALEN];
+
+ /* success or failure */
+ u32 status;
+
+ /* baCandidateCnt is followed by trigger BA
+ * Rsp Candidate List(tTriggerRspBaCandidate)
+ */
+ u16 candidate_cnt;
+} __packed;
+
+struct wcn36xx_hal_del_ba_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_index;
+
+ /* TID for which the BA session is being deleted */
+ u8 tid;
+
+ /* DELBA direction
+ 1 - Originator
+ 0 - Recipient */
+ u8 direction;
+} __packed;
+
+struct wcn36xx_hal_del_ba_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+struct tsm_stats_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Traffic Id */
+ u8 tid;
+
+ u8 bssid[ETH_ALEN];
+};
+
+struct tsm_stats_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*success or failure */
+ u32 status;
+
+ /* Uplink Packet Queue delay */
+ u16 uplink_pkt_queue_delay;
+
+ /* Uplink Packet Queue delay histogram */
+ u16 uplink_pkt_queue_delay_hist[4];
+
+ /* Uplink Packet Transmit delay */
+ u32 uplink_pkt_tx_delay;
+
+ /* Uplink Packet loss */
+ u16 uplink_pkt_loss;
+
+ /* Uplink Packet count */
+ u16 uplink_pkt_count;
+
+ /* Roaming count */
+ u8 roaming_count;
+
+ /* Roaming Delay */
+ u16 roaming_delay;
+};
+
+struct set_key_done_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*bssid of the keys */
+ u8 bssidx;
+ u8 enc_type;
+};
+
+struct wcn36xx_hal_nv_img_download_req_msg {
+ /* Note: The length specified in wcn36xx_hal_nv_img_download_req_msg
+ * messages should be
+ * header.len = sizeof(wcn36xx_hal_nv_img_download_req_msg) +
+ * nv_img_buffer_size */
+ struct wcn36xx_hal_msg_header header;
+
+ /* Fragment sequence number of the NV Image. Note that NV Image
+ * might not fit into one message due to size limitation of the SMD
+ * channel FIFO. UMAC can hence choose to chop the NV blob into
+ * multiple fragments starting with seqeunce number 0, 1, 2 etc.
+ * The last fragment MUST be indicated by marking the
+ * isLastFragment field to 1. Note that all the NV blobs would be
+ * concatenated together by HAL without any padding bytes in
+ * between.*/
+ u16 frag_number;
+
+ /* Is this the last fragment? When set to 1 it indicates that no
+ * more fragments will be sent by UMAC and HAL can concatenate all
+ * the NV blobs rcvd & proceed with the parsing. HAL would generate
+ * a WCN36XX_HAL_DOWNLOAD_NV_RSP to the WCN36XX_HAL_DOWNLOAD_NV_REQ
+ * after it receives each fragment */
+ u16 last_fragment;
+
+ /* NV Image size (number of bytes) */
+ u32 nv_img_buffer_size;
+
+ /* Following the 'nv_img_buffer_size', there should be
+ * nv_img_buffer_size bytes of NV Image i.e.
+ * u8[nv_img_buffer_size] */
+} __packed;
+
+struct wcn36xx_hal_nv_img_download_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Success or Failure. HAL would generate a
+ * WCN36XX_HAL_DOWNLOAD_NV_RSP after each fragment */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_nv_store_ind {
+ /* Note: The length specified in tHalNvStoreInd messages should be
+ * header.msgLen = sizeof(tHalNvStoreInd) + nvBlobSize */
+ struct wcn36xx_hal_msg_header header;
+
+ /* NV Item */
+ u32 table_id;
+
+ /* Size of NV Blob */
+ u32 nv_blob_size;
+
+ /* Following the 'nvBlobSize', there should be nvBlobSize bytes of
+ * NV blob i.e. u8[nvBlobSize] */
+};
+
+/* End of Block Ack Related Parameters */
+
+#define WCN36XX_HAL_CIPHER_SEQ_CTR_SIZE 6
+
+/* Definition for MIC failure indication MAC reports this each time a MIC
+ * failure occures on Rx TKIP packet
+ */
+struct mic_failure_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bssid[ETH_ALEN];
+
+ /* address used to compute MIC */
+ u8 src_addr[ETH_ALEN];
+
+ /* transmitter address */
+ u8 ta_addr[ETH_ALEN];
+
+ u8 dst_addr[ETH_ALEN];
+
+ u8 multicast;
+
+ /* first byte of IV */
+ u8 iv1;
+
+ /* second byte of IV */
+ u8 key_id;
+
+ /* sequence number */
+ u8 tsc[WCN36XX_HAL_CIPHER_SEQ_CTR_SIZE];
+
+ /* receive address */
+ u8 rx_addr[ETH_ALEN];
+};
+
+struct update_vht_op_mode_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u16 op_mode;
+ u16 sta_id;
+};
+
+struct update_vht_op_mode_params_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 status;
+};
+
+struct update_beacon_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+
+ /* shortPreamble mode. HAL should update all the STA rates when it
+ * receives this message */
+ u8 short_preamble;
+
+ /* short Slot time. */
+ u8 short_slot_time;
+
+ /* Beacon Interval */
+ u16 beacon_interval;
+
+ /* Protection related */
+ u8 lla_coexist;
+ u8 llb_coexist;
+ u8 llg_coexist;
+ u8 ht20_coexist;
+ u8 lln_non_gf_coexist;
+ u8 lsig_tx_op_protection_full_support;
+ u8 rifs_mode;
+
+ u16 param_change_bitmap;
+};
+
+struct update_beacon_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ u32 status;
+};
+
+struct wcn36xx_hal_send_beacon_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* length of the template. */
+ u32 beacon_length;
+
+ /* Beacon data. */
+ u8 beacon[BEACON_TEMPLATE_SIZE];
+
+ u8 bssid[ETH_ALEN];
+
+ /* TIM IE offset from the beginning of the template. */
+ u32 tim_ie_offset;
+
+ /* P2P IE offset from the begining of the template */
+ u16 p2p_ie_offset;
+} __packed;
+
+struct send_beacon_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ u32 status;
+} __packed;
+
+struct enable_radar_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bssid[ETH_ALEN];
+ u8 channel;
+};
+
+struct enable_radar_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Link Parameters */
+ u8 bssid[ETH_ALEN];
+
+ /* success or failure */
+ u32 status;
+};
+
+struct radar_detect_intr_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 radar_det_channel;
+};
+
+struct radar_detect_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* channel number in which the RADAR detected */
+ u8 channel_number;
+
+ /* RADAR pulse width in usecond */
+ u16 radar_pulse_width;
+
+ /* Number of RADAR pulses */
+ u16 num_radar_pulse;
+};
+
+struct wcn36xx_hal_get_tpc_report_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 sta[ETH_ALEN];
+ u8 dialog_token;
+ u8 txpower;
+};
+
+struct wcn36xx_hal_get_tpc_report_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_send_probe_resp_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 probe_resp_template[BEACON_TEMPLATE_SIZE];
+ u32 probe_resp_template_len;
+ u32 proxy_probe_req_valid_ie_bmap[8];
+ u8 bssid[ETH_ALEN];
+};
+
+struct send_probe_resp_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct send_unknown_frame_rx_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_delete_sta_context_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u16 aid;
+ u16 sta_id;
+
+ /* TO SUPPORT BT-AMP */
+ u8 bssid[ETH_ALEN];
+
+ /* HAL copies bssid from the sta table. */
+ u8 addr2[ETH_ALEN];
+
+ /* To unify the keepalive / unknown A2 / tim-based disa */
+ u16 reason_code;
+} __packed;
+
+struct indicate_del_sta {
+ struct wcn36xx_hal_msg_header header;
+ u8 aid;
+ u8 sta_index;
+ u8 bss_index;
+ u8 reason_code;
+ u32 status;
+};
+
+struct bt_amp_event_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ enum bt_amp_event_type btAmpEventType;
+};
+
+struct bt_amp_event_rsp {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct tl_hal_flush_ac_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index. originates from HAL */
+ u8 sta_id;
+
+ /* TID for which the transmit queue is being flushed */
+ u8 tid;
+};
+
+struct tl_hal_flush_ac_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index. originates from HAL */
+ u8 sta_id;
+
+ /* TID for which the transmit queue is being flushed */
+ u8 tid;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_enter_imps_req_msg {
+ struct wcn36xx_hal_msg_header header;
+};
+
+struct wcn36xx_hal_exit_imps_req {
+ struct wcn36xx_hal_msg_header header;
+};
+
+struct wcn36xx_hal_enter_bmps_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+
+ /* TBTT value derived from the last beacon */
+#ifndef BUILD_QWPTTSTATIC
+ u64 tbtt;
+#endif
+ u8 dtim_count;
+
+ /* DTIM period given to HAL during association may not be valid, if
+ * association is based on ProbeRsp instead of beacon. */
+ u8 dtim_period;
+
+ /* For CCX and 11R Roaming */
+ u32 rssi_filter_period;
+
+ u32 num_beacon_per_rssi_average;
+ u8 rssi_filter_enable;
+} __packed;
+
+struct wcn36xx_hal_exit_bmps_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 send_data_null;
+ u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_missed_beacon_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+} __packed;
+
+/* Beacon Filtering data structures */
+
+/* The above structure would be followed by multiple of below mentioned
+ * structure
+ */
+struct beacon_filter_ie {
+ u8 element_id;
+ u8 check_ie_presence;
+ u8 offset;
+ u8 value;
+ u8 bitmask;
+ u8 ref;
+};
+
+struct wcn36xx_hal_add_bcn_filter_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u16 capability_info;
+ u16 capability_mask;
+ u16 beacon_interval;
+ u16 ie_num;
+ u8 bss_index;
+ u8 reserved;
+};
+
+struct wcn36xx_hal_rem_bcn_filter_req {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 ie_Count;
+ u8 rem_ie_id[1];
+};
+
+#define WCN36XX_HAL_IPV4_ARP_REPLY_OFFLOAD 0
+#define WCN36XX_HAL_IPV6_NEIGHBOR_DISCOVERY_OFFLOAD 1
+#define WCN36XX_HAL_IPV6_NS_OFFLOAD 2
+#define WCN36XX_HAL_IPV6_ADDR_LEN 16
+#define WCN36XX_HAL_OFFLOAD_DISABLE 0
+#define WCN36XX_HAL_OFFLOAD_ENABLE 1
+#define WCN36XX_HAL_OFFLOAD_BCAST_FILTER_ENABLE 0x2
+#define WCN36XX_HAL_OFFLOAD_ARP_AND_BCAST_FILTER_ENABLE \
+ (HAL_OFFLOAD_ENABLE|HAL_OFFLOAD_BCAST_FILTER_ENABLE)
+
+struct wcn36xx_hal_ns_offload_params {
+ u8 src_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
+ u8 self_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
+
+ /* Only support 2 possible Network Advertisement IPv6 address */
+ u8 target_ipv6_addr1[WCN36XX_HAL_IPV6_ADDR_LEN];
+ u8 target_ipv6_addr2[WCN36XX_HAL_IPV6_ADDR_LEN];
+
+ u8 self_addr[ETH_ALEN];
+ u8 src_ipv6_addr_valid:1;
+ u8 target_ipv6_addr1_valid:1;
+ u8 target_ipv6_addr2_valid:1;
+ u8 reserved1:5;
+
+ /* make it DWORD aligned */
+ u8 reserved2;
+
+ /* slot index for this offload */
+ u32 slot_index;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_host_offload_req {
+ u8 offload_Type;
+
+ /* enable or disable */
+ u8 enable;
+
+ union {
+ u8 host_ipv4_addr[4];
+ u8 host_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
+ } u;
+};
+
+struct wcn36xx_hal_host_offload_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_host_offload_req host_offload_params;
+ struct wcn36xx_hal_ns_offload_params ns_offload_params;
+};
+
+/* Packet Types. */
+#define WCN36XX_HAL_KEEP_ALIVE_NULL_PKT 1
+#define WCN36XX_HAL_KEEP_ALIVE_UNSOLICIT_ARP_RSP 2
+
+/* Enable or disable keep alive */
+#define WCN36XX_HAL_KEEP_ALIVE_DISABLE 0
+#define WCN36XX_HAL_KEEP_ALIVE_ENABLE 1
+#define WCN36XX_KEEP_ALIVE_TIME_PERIOD 30 /* unit: s */
+
+/* Keep Alive request. */
+struct wcn36xx_hal_keep_alive_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 packet_type;
+ u32 time_period;
+ u8 host_ipv4_addr[WCN36XX_HAL_IPV4_ADDR_LEN];
+ u8 dest_ipv4_addr[WCN36XX_HAL_IPV4_ADDR_LEN];
+ u8 dest_addr[ETH_ALEN];
+ u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_rssi_threshold_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ s8 threshold1:8;
+ s8 threshold2:8;
+ s8 threshold3:8;
+ u8 thres1_pos_notify:1;
+ u8 thres1_neg_notify:1;
+ u8 thres2_pos_notify:1;
+ u8 thres2_neg_notify:1;
+ u8 thres3_pos_notify:1;
+ u8 thres3_neg_notify:1;
+ u8 reserved10:2;
+};
+
+struct wcn36xx_hal_enter_uapsd_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bk_delivery:1;
+ u8 be_delivery:1;
+ u8 vi_delivery:1;
+ u8 vo_delivery:1;
+ u8 bk_trigger:1;
+ u8 be_trigger:1;
+ u8 vi_trigger:1;
+ u8 vo_trigger:1;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_exit_uapsd_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ u8 bss_index;
+};
+
+#define WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE 128
+#define WCN36XX_HAL_WOWL_BCAST_MAX_NUM_PATTERNS 16
+
+struct wcn36xx_hal_wowl_add_bcast_ptrn_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Pattern ID */
+ u8 id;
+
+ /* Pattern byte offset from beginning of the 802.11 packet to start
+ * of the wake-up pattern */
+ u8 byte_Offset;
+
+ /* Non-Zero Pattern size */
+ u8 size;
+
+ /* Pattern */
+ u8 pattern[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+ /* Non-zero pattern mask size */
+ u8 mask_size;
+
+ /* Pattern mask */
+ u8 mask[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+ /* Extra pattern */
+ u8 extra[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+ /* Extra pattern mask */
+ u8 mask_extra[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_wow_del_bcast_ptrn_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Pattern ID of the wakeup pattern to be deleted */
+ u8 id;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_wowl_enter_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Enables/disables magic packet filtering */
+ u8 magic_packet_enable;
+
+ /* Magic pattern */
+ u8 magic_pattern[ETH_ALEN];
+
+ /* Enables/disables packet pattern filtering in firmware. Enabling
+ * this flag enables broadcast pattern matching in Firmware. If
+ * unicast pattern matching is also desired,
+ * ucUcastPatternFilteringEnable flag must be set tot true as well
+ */
+ u8 pattern_filtering_enable;
+
+ /* Enables/disables unicast packet pattern filtering. This flag
+ * specifies whether we want to do pattern match on unicast packets
+ * as well and not just broadcast packets. This flag has no effect
+ * if the ucPatternFilteringEnable (main controlling flag) is set
+ * to false
+ */
+ u8 ucast_pattern_filtering_enable;
+
+ /* This configuration is valid only when magicPktEnable=1. It
+ * requests hardware to wake up when it receives the Channel Switch
+ * Action Frame.
+ */
+ u8 wow_channel_switch_receive;
+
+ /* This configuration is valid only when magicPktEnable=1. It
+ * requests hardware to wake up when it receives the
+ * Deauthentication Frame.
+ */
+ u8 wow_deauth_receive;
+
+ /* This configuration is valid only when magicPktEnable=1. It
+ * requests hardware to wake up when it receives the Disassociation
+ * Frame.
+ */
+ u8 wow_disassoc_receive;
+
+ /* This configuration is valid only when magicPktEnable=1. It
+ * requests hardware to wake up when it has missed consecutive
+ * beacons. This is a hardware register configuration (NOT a
+ * firmware configuration).
+ */
+ u8 wow_max_missed_beacons;
+
+ /* This configuration is valid only when magicPktEnable=1. This is
+ * a timeout value in units of microsec. It requests hardware to
+ * unconditionally wake up after it has stayed in WoWLAN mode for
+ * some time. Set 0 to disable this feature.
+ */
+ u8 wow_max_sleep;
+
+ /* This configuration directs the WoW packet filtering to look for
+ * EAP-ID requests embedded in EAPOL frames and use this as a wake
+ * source.
+ */
+ u8 wow_eap_id_request_enable;
+
+ /* This configuration directs the WoW packet filtering to look for
+ * EAPOL-4WAY requests and use this as a wake source.
+ */
+ u8 wow_eapol_4way_enable;
+
+ /* This configuration allows a host wakeup on an network scan
+ * offload match.
+ */
+ u8 wow_net_scan_offload_match;
+
+ /* This configuration allows a host wakeup on any GTK rekeying
+ * error.
+ */
+ u8 wow_gtk_rekey_error;
+
+ /* This configuration allows a host wakeup on BSS connection loss.
+ */
+ u8 wow_bss_connection_loss;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_wowl_exit_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_get_rssi_req_msg {
+ struct wcn36xx_hal_msg_header header;
+};
+
+struct wcn36xx_hal_get_roam_rssi_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Valid STA Idx for per STA stats request */
+ u32 sta_id;
+};
+
+struct wcn36xx_hal_set_uapsd_ac_params_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* STA index */
+ u8 sta_idx;
+
+ /* Access Category */
+ u8 ac;
+
+ /* User Priority */
+ u8 up;
+
+ /* Service Interval */
+ u32 service_interval;
+
+ /* Suspend Interval */
+ u32 suspend_interval;
+
+ /* Delay Interval */
+ u32 delay_interval;
+};
+
+struct wcn36xx_hal_configure_rxp_filter_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 set_mcst_bcst_filter_setting;
+ u8 set_mcst_bcst_filter;
+};
+
+struct wcn36xx_hal_enter_imps_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_exit_imps_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_enter_bmps_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_exit_bmps_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_enter_uapsd_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_exit_uapsd_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rssi_notification_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 rssi_thres1_pos_cross:1;
+ u32 rssi_thres1_neg_cross:1;
+ u32 rssi_thres2_pos_cross:1;
+ u32 rssi_thres2_neg_cross:1;
+ u32 rssi_thres3_pos_cross:1;
+ u32 rssi_thres3_neg_cross:1;
+ u32 avg_rssi:8;
+ u32 reserved:18;
+
+};
+
+struct wcn36xx_hal_get_rssio_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ s8 rssi;
+
+};
+
+struct wcn36xx_hal_get_roam_rssi_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 sta_id;
+ s8 rssi;
+};
+
+struct wcn36xx_hal_wowl_enter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_wowl_exit_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_add_bcn_filter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_rem_bcn_filter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_add_wowl_bcast_ptrn_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_del_wowl_bcast_ptrn_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_host_offload_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_keep_alive_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_set_rssi_thresh_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_set_uapsd_ac_params_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_configure_rxp_filter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct set_max_tx_pwr_req {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSSID is needed to identify which session issued this request.
+ * As the request has power constraints, this should be applied
+ * only to that session */
+ u8 bssid[ETH_ALEN];
+
+ u8 self_addr[ETH_ALEN];
+
+ /* In request, power == MaxTx power to be used. */
+ u8 power;
+};
+
+struct set_max_tx_pwr_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* power == tx power used for management frames */
+ u8 power;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct set_tx_pwr_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* TX Power in milli watts */
+ u32 tx_power;
+
+ u8 bss_index;
+};
+
+struct set_tx_pwr_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct get_tx_pwr_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 sta_id;
+};
+
+struct get_tx_pwr_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* TX Power in milli watts */
+ u32 tx_power;
+};
+
+struct set_p2p_gonoa_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 opp_ps;
+ u32 ct_window;
+ u8 count;
+ u32 duration;
+ u32 interval;
+ u32 single_noa_duration;
+ u8 ps_selection;
+};
+
+struct set_p2p_gonoa_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_add_sta_self_req {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 self_addr[ETH_ALEN];
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_add_sta_self_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Self STA Index */
+ u8 self_sta_index;
+
+ /* DPU Index (IGTK, PTK, GTK all same) */
+ u8 dpu_index;
+
+ /* DPU Signature */
+ u8 dpu_signature;
+} __packed;
+
+struct wcn36xx_hal_del_sta_self_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 self_addr[ETH_ALEN];
+} __packed;
+
+struct wcn36xx_hal_del_sta_self_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*success or failure */
+ u32 status;
+
+ u8 self_addr[ETH_ALEN];
+} __packed;
+
+struct aggr_add_ts_req {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_idx;
+
+ /* TSPEC handler uniquely identifying a TSPEC for a STA in a BSS.
+ * This will carry the bitmap with the bit positions representing
+ * different AC.s */
+ u16 tspec_index;
+
+ /* Tspec info per AC To program TPE with required parameters */
+ struct wcn36xx_hal_tspec_ie tspec[WCN36XX_HAL_MAX_AC];
+
+ /* U-APSD Flags: 1b per AC. Encoded as follows:
+ b7 b6 b5 b4 b3 b2 b1 b0 =
+ X X X X BE BK VI VO */
+ u8 uapsd;
+
+ /* These parameters are for all the access categories */
+
+ /* Service Interval */
+ u32 service_interval[WCN36XX_HAL_MAX_AC];
+
+ /* Suspend Interval */
+ u32 suspend_interval[WCN36XX_HAL_MAX_AC];
+
+ /* Delay Interval */
+ u32 delay_interval[WCN36XX_HAL_MAX_AC];
+};
+
+struct aggr_add_ts_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status0;
+
+ /* FIXME PRIMA for future use for 11R */
+ u32 status1;
+};
+
+struct wcn36xx_hal_configure_apps_cpu_wakeup_state_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 is_apps_cpu_awake;
+};
+
+struct wcn36xx_hal_configure_apps_cpu_wakeup_state_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_dump_cmd_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 arg1;
+ u32 arg2;
+ u32 arg3;
+ u32 arg4;
+ u32 arg5;
+} __packed;
+
+struct wcn36xx_hal_dump_cmd_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Length of the responce message */
+ u32 rsp_length;
+
+ /* FIXME: Currently considering the the responce will be less than
+ * 100bytes */
+ u8 rsp_buffer[DUMPCMD_RSP_BUFFER];
+} __packed;
+
+#define WLAN_COEX_IND_DATA_SIZE (4)
+#define WLAN_COEX_IND_TYPE_DISABLE_HB_MONITOR (0)
+#define WLAN_COEX_IND_TYPE_ENABLE_HB_MONITOR (1)
+
+struct coex_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Coex Indication Type */
+ u32 type;
+
+ /* Coex Indication Data */
+ u32 data[WLAN_COEX_IND_DATA_SIZE];
+};
+
+struct wcn36xx_hal_tx_compl_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Tx Complete Indication Success or Failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_wlan_host_suspend_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 configured_mcst_bcst_filter_setting;
+ u32 active_session_count;
+};
+
+struct wcn36xx_hal_wlan_exclude_unencrpted_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 dot11_exclude_unencrypted;
+ u8 bssid[ETH_ALEN];
+};
+
+struct noa_attr_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 index;
+ u8 opp_ps_flag;
+ u16 ctwin;
+
+ u16 noa1_interval_count;
+ u16 bss_index;
+ u32 noa1_duration;
+ u32 noa1_interval;
+ u32 noa1_starttime;
+
+ u16 noa2_interval_count;
+ u16 reserved2;
+ u32 noa2_duration;
+ u32 noa2_interval;
+ u32 noa2_start_time;
+
+ u32 status;
+};
+
+struct noa_start_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 status;
+ u32 bss_index;
+};
+
+struct wcn36xx_hal_wlan_host_resume_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 configured_mcst_bcst_filter_setting;
+};
+
+struct wcn36xx_hal_host_resume_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_del_ba_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u16 sta_idx;
+
+ /* Peer MAC Address, whose BA session has timed out */
+ u8 peer_addr[ETH_ALEN];
+
+ /* TID for which a BA session timeout is being triggered */
+ u8 ba_tid;
+
+ /* DELBA direction
+ * 1 - Originator
+ * 0 - Recipient
+ */
+ u8 direction;
+
+ u32 reason_code;
+
+ /* TO SUPPORT BT-AMP */
+ u8 bssid[ETH_ALEN];
+};
+
+/* PNO Messages */
+
+/* Max number of channels that a network can be found on */
+#define WCN36XX_HAL_PNO_MAX_NETW_CHANNELS 26
+
+/* Max number of channels that a network can be found on */
+#define WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX 60
+
+/* Maximum numbers of networks supported by PNO */
+#define WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS 16
+
+/* The number of scan time intervals that can be programmed into PNO */
+#define WCN36XX_HAL_PNO_MAX_SCAN_TIMERS 10
+
+/* Maximum size of the probe template */
+#define WCN36XX_HAL_PNO_MAX_PROBE_SIZE 450
+
+/* Type of PNO enabling:
+ *
+ * Immediate - scanning will start immediately and PNO procedure will be
+ * repeated based on timer
+ *
+ * Suspend - scanning will start at suspend
+ *
+ * Resume - scanning will start on system resume
+ */
+enum pno_mode {
+ PNO_MODE_IMMEDIATE,
+ PNO_MODE_ON_SUSPEND,
+ PNO_MODE_ON_RESUME,
+ PNO_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Authentication type */
+enum auth_type {
+ AUTH_TYPE_ANY = 0,
+ AUTH_TYPE_OPEN_SYSTEM = 1,
+
+ /* Upper layer authentication types */
+ AUTH_TYPE_WPA = 2,
+ AUTH_TYPE_WPA_PSK = 3,
+
+ AUTH_TYPE_RSN = 4,
+ AUTH_TYPE_RSN_PSK = 5,
+ AUTH_TYPE_FT_RSN = 6,
+ AUTH_TYPE_FT_RSN_PSK = 7,
+ AUTH_TYPE_WAPI_WAI_CERTIFICATE = 8,
+ AUTH_TYPE_WAPI_WAI_PSK = 9,
+
+ AUTH_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Encryption type */
+enum ed_type {
+ ED_ANY = 0,
+ ED_NONE = 1,
+ ED_WEP = 2,
+ ED_TKIP = 3,
+ ED_CCMP = 4,
+ ED_WPI = 5,
+
+ ED_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* SSID broadcast type */
+enum ssid_bcast_type {
+ BCAST_UNKNOWN = 0,
+ BCAST_NORMAL = 1,
+ BCAST_HIDDEN = 2,
+
+ BCAST_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* The network description for which PNO will have to look for */
+struct network_type {
+ /* SSID of the BSS */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* Authentication type for the network */
+ enum auth_type authentication;
+
+ /* Encryption type for the network */
+ enum ed_type encryption;
+
+ /* Indicate the channel on which the Network can be found 0 - if
+ * all channels */
+ u8 channel_count;
+ u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
+
+ /* Indicates the RSSI threshold for the network to be considered */
+ u8 rssi_threshold;
+};
+
+struct scan_timer {
+ /* How much it should wait */
+ u32 value;
+
+ /* How many times it should repeat that wait value 0 - keep using
+ * this timer until PNO is disabled */
+ u32 repeat;
+
+ /* e.g: 2 3 4 0 - it will wait 2s between consecutive scans for 3
+ * times - after that it will wait 4s between consecutive scans
+ * until disabled */
+};
+
+/* The network parameters to be sent to the PNO algorithm */
+struct scan_timers_type {
+ /* set to 0 if you wish for PNO to use its default telescopic timer */
+ u8 count;
+
+ /* A set value represents the amount of time that PNO will wait
+ * between two consecutive scan procedures If the desired is for a
+ * uniform timer that fires always at the exact same interval - one
+ * single value is to be set If there is a desire for a more
+ * complex - telescopic like timer multiple values can be set -
+ * once PNO reaches the end of the array it will continue scanning
+ * at intervals presented by the last value */
+ struct scan_timer values[WCN36XX_HAL_PNO_MAX_SCAN_TIMERS];
+};
+
+/* Preferred network list request */
+struct set_pref_netw_list_req {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Enable PNO */
+ u32 enable;
+
+ /* Immediate, On Suspend, On Resume */
+ enum pno_mode mode;
+
+ /* Number of networks sent for PNO */
+ u32 networks_count;
+
+ /* The networks that PNO needs to look for */
+ struct network_type networks[WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS];
+
+ /* The scan timers required for PNO */
+ struct scan_timers_type scan_timers;
+
+ /* Probe template for 2.4GHz band */
+ u16 band_24g_probe_size;
+ u8 band_24g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+
+ /* Probe template for 5GHz band */
+ u16 band_5g_probe_size;
+ u8 band_5g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+};
+
+/* The network description for which PNO will have to look for */
+struct network_type_new {
+ /* SSID of the BSS */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* Authentication type for the network */
+ enum auth_type authentication;
+
+ /* Encryption type for the network */
+ enum ed_type encryption;
+
+ /* SSID broadcast type, normal, hidden or unknown */
+ enum ssid_bcast_type bcast_network_type;
+
+ /* Indicate the channel on which the Network can be found 0 - if
+ * all channels */
+ u8 channel_count;
+ u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
+
+ /* Indicates the RSSI threshold for the network to be considered */
+ u8 rssi_threshold;
+};
+
+/* Preferred network list request new */
+struct set_pref_netw_list_req_new {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Enable PNO */
+ u32 enable;
+
+ /* Immediate, On Suspend, On Resume */
+ enum pno_mode mode;
+
+ /* Number of networks sent for PNO */
+ u32 networks_count;
+
+ /* The networks that PNO needs to look for */
+ struct network_type_new networks[WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS];
+
+ /* The scan timers required for PNO */
+ struct scan_timers_type scan_timers;
+
+ /* Probe template for 2.4GHz band */
+ u16 band_24g_probe_size;
+ u8 band_24g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+
+ /* Probe template for 5GHz band */
+ u16 band_5g_probe_size;
+ u8 band_5g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+};
+
+/* Preferred network list response */
+struct set_pref_netw_list_resp {
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request - just to indicate that PNO has
+ * acknowledged the request and will start scanning */
+ u32 status;
+};
+
+/* Preferred network found indication */
+struct pref_netw_found_ind {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* Network that was found with the highest RSSI */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* Indicates the RSSI */
+ u8 rssi;
+};
+
+/* RSSI Filter request */
+struct set_rssi_filter_req {
+ struct wcn36xx_hal_msg_header header;
+
+ /* RSSI Threshold */
+ u8 rssi_threshold;
+};
+
+/* Set RSSI filter resp */
+struct set_rssi_filter_resp {
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request */
+ u32 status;
+};
+
+/* Update scan params - sent from host to PNO to be used during PNO
+ * scanningx */
+struct wcn36xx_hal_update_scan_params_req {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* Host setting for 11d */
+ u8 dot11d_enabled;
+
+ /* Lets PNO know that host has determined the regulatory domain */
+ u8 dot11d_resolved;
+
+ /* Channels on which PNO is allowed to scan */
+ u8 channel_count;
+ u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
+
+ /* Minimum channel time */
+ u16 active_min_ch_time;
+
+ /* Maximum channel time */
+ u16 active_max_ch_time;
+
+ /* Minimum channel time */
+ u16 passive_min_ch_time;
+
+ /* Maximum channel time */
+ u16 passive_max_ch_time;
+
+ /* Cb State */
+ enum phy_chan_bond_state state;
+} __packed;
+
+/* Update scan params - sent from host to PNO to be used during PNO
+ * scanningx */
+struct update_scan_params_req_ex {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* Host setting for 11d */
+ u8 dot11d_enabled;
+
+ /* Lets PNO know that host has determined the regulatory domain */
+ u8 dot11d_resolved;
+
+ /* Channels on which PNO is allowed to scan */
+ u8 channel_count;
+ u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX];
+
+ /* Minimum channel time */
+ u16 active_min_ch_time;
+
+ /* Maximum channel time */
+ u16 active_max_ch_time;
+
+ /* Minimum channel time */
+ u16 passive_min_ch_time;
+
+ /* Maximum channel time */
+ u16 passive_max_ch_time;
+
+ /* Cb State */
+ enum phy_chan_bond_state state;
+};
+
+/* Update scan params - sent from host to PNO to be used during PNO
+ * scanningx */
+struct wcn36xx_hal_update_scan_params_resp {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_set_tx_per_tracking_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* 0: disable, 1:enable */
+ u8 tx_per_tracking_enable;
+
+ /* Check period, unit is sec. */
+ u8 tx_per_tracking_period;
+
+ /* (Fail TX packet)/(Total TX packet) ratio, the unit is 10%. */
+ u8 tx_per_tracking_ratio;
+
+ /* A watermark of check number, once the tx packet exceed this
+ * number, we do the check, default is 5 */
+ u32 tx_per_tracking_watermark;
+};
+
+struct wcn36xx_hal_set_tx_per_tracking_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+};
+
+struct tx_per_hit_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+};
+
+/* Packet Filtering Definitions Begin */
+#define WCN36XX_HAL_PROTOCOL_DATA_LEN 8
+#define WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS 240
+#define WCN36XX_HAL_MAX_NUM_FILTERS 20
+#define WCN36XX_HAL_MAX_CMP_PER_FILTER 10
+
+enum wcn36xx_hal_receive_packet_filter_type {
+ HAL_RCV_FILTER_TYPE_INVALID,
+ HAL_RCV_FILTER_TYPE_FILTER_PKT,
+ HAL_RCV_FILTER_TYPE_BUFFER_PKT,
+ HAL_RCV_FILTER_TYPE_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_rcv_pkt_flt_protocol_type {
+ HAL_FILTER_PROTO_TYPE_INVALID,
+ HAL_FILTER_PROTO_TYPE_MAC,
+ HAL_FILTER_PROTO_TYPE_ARP,
+ HAL_FILTER_PROTO_TYPE_IPV4,
+ HAL_FILTER_PROTO_TYPE_IPV6,
+ HAL_FILTER_PROTO_TYPE_UDP,
+ HAL_FILTER_PROTO_TYPE_MAX
+};
+
+enum wcn36xx_hal_rcv_pkt_flt_cmp_flag_type {
+ HAL_FILTER_CMP_TYPE_INVALID,
+ HAL_FILTER_CMP_TYPE_EQUAL,
+ HAL_FILTER_CMP_TYPE_MASK_EQUAL,
+ HAL_FILTER_CMP_TYPE_NOT_EQUAL,
+ HAL_FILTER_CMP_TYPE_MAX
+};
+
+struct wcn36xx_hal_rcv_pkt_filter_params {
+ u8 protocol_layer;
+ u8 cmp_flag;
+
+ /* Length of the data to compare */
+ u16 data_length;
+
+ /* from start of the respective frame header */
+ u8 data_offset;
+
+ /* Reserved field */
+ u8 reserved;
+
+ /* Data to compare */
+ u8 compare_data[WCN36XX_HAL_PROTOCOL_DATA_LEN];
+
+ /* Mask to be applied on the received packet data before compare */
+ u8 data_mask[WCN36XX_HAL_PROTOCOL_DATA_LEN];
+};
+
+struct wcn36xx_hal_sessionized_rcv_pkt_filter_cfg_type {
+ u8 id;
+ u8 type;
+ u8 params_count;
+ u32 coleasce_time;
+ u8 bss_index;
+ struct wcn36xx_hal_rcv_pkt_filter_params params[1];
+};
+
+struct wcn36xx_hal_set_rcv_pkt_filter_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 id;
+ u8 type;
+ u8 params_count;
+ u32 coalesce_time;
+ struct wcn36xx_hal_rcv_pkt_filter_params params[1];
+};
+
+struct wcn36xx_hal_rcv_flt_mc_addr_list_type {
+ /* from start of the respective frame header */
+ u8 data_offset;
+
+ u32 mc_addr_count;
+ u8 mc_addr[ETH_ALEN][WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS];
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_set_pkt_filter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_match_cnt_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_match_cnt {
+ u8 id;
+ u32 match_cnt;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_match_cnt_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Success or Failure */
+ u32 status;
+
+ u32 match_count;
+ struct wcn36xx_hal_rcv_flt_pkt_match_cnt
+ matches[WCN36XX_HAL_MAX_NUM_FILTERS];
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_clear_param {
+ /* only valid for response message */
+ u32 status;
+ u8 id;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_clear_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_rcv_flt_pkt_clear_param param;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_clear_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_rcv_flt_pkt_clear_param param;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_rcv_flt_mc_addr_list_type mc_addr_list;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ u32 status;
+ u8 bss_index;
+};
+
+/* Packet Filtering Definitions End */
+
+struct wcn36xx_hal_set_power_params_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Ignore DTIM */
+ u32 ignore_dtim;
+
+ /* DTIM Period */
+ u32 dtim_period;
+
+ /* Listen Interval */
+ u32 listen_interval;
+
+ /* Broadcast Multicast Filter */
+ u32 bcast_mcast_filter;
+
+ /* Beacon Early Termination */
+ u32 enable_bet;
+
+ /* Beacon Early Termination Interval */
+ u32 bet_interval;
+} __packed;
+
+struct wcn36xx_hal_set_power_params_resp {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request */
+ u32 status;
+} __packed;
+
+/* Capability bitmap exchange definitions and macros starts */
+
+enum place_holder_in_cap_bitmap {
+ MCC = 0,
+ P2P = 1,
+ DOT11AC = 2,
+ SLM_SESSIONIZATION = 3,
+ DOT11AC_OPMODE = 4,
+ SAP32STA = 5,
+ TDLS = 6,
+ P2P_GO_NOA_DECOUPLE_INIT_SCAN = 7,
+ WLANACTIVE_OFFLOAD = 8,
+ BEACON_OFFLOAD = 9,
+ SCAN_OFFLOAD = 10,
+ ROAM_OFFLOAD = 11,
+ BCN_MISS_OFFLOAD = 12,
+ STA_POWERSAVE = 13,
+ STA_ADVANCED_PWRSAVE = 14,
+ AP_UAPSD = 15,
+ AP_DFS = 16,
+ BLOCKACK = 17,
+ PHY_ERR = 18,
+ BCN_FILTER = 19,
+ RTT = 20,
+ RATECTRL = 21,
+ WOW = 22,
+ MAX_FEATURE_SUPPORTED = 128,
+};
+
+struct wcn36xx_hal_feat_caps_msg {
+
+ struct wcn36xx_hal_msg_header header;
+
+ u32 feat_caps[4];
+} __packed;
+
+/* status codes to help debug rekey failures */
+enum gtk_rekey_status {
+ WCN36XX_HAL_GTK_REKEY_STATUS_SUCCESS = 0,
+
+ /* rekey detected, but not handled */
+ WCN36XX_HAL_GTK_REKEY_STATUS_NOT_HANDLED = 1,
+
+ /* MIC check error on M1 */
+ WCN36XX_HAL_GTK_REKEY_STATUS_MIC_ERROR = 2,
+
+ /* decryption error on M1 */
+ WCN36XX_HAL_GTK_REKEY_STATUS_DECRYPT_ERROR = 3,
+
+ /* M1 replay detected */
+ WCN36XX_HAL_GTK_REKEY_STATUS_REPLAY_ERROR = 4,
+
+ /* missing GTK key descriptor in M1 */
+ WCN36XX_HAL_GTK_REKEY_STATUS_MISSING_KDE = 5,
+
+ /* missing iGTK key descriptor in M1 */
+ WCN36XX_HAL_GTK_REKEY_STATUS_MISSING_IGTK_KDE = 6,
+
+ /* key installation error */
+ WCN36XX_HAL_GTK_REKEY_STATUS_INSTALL_ERROR = 7,
+
+ /* iGTK key installation error */
+ WCN36XX_HAL_GTK_REKEY_STATUS_IGTK_INSTALL_ERROR = 8,
+
+ /* GTK rekey M2 response TX error */
+ WCN36XX_HAL_GTK_REKEY_STATUS_RESP_TX_ERROR = 9,
+
+ /* non-specific general error */
+ WCN36XX_HAL_GTK_REKEY_STATUS_GEN_ERROR = 255
+};
+
+/* wake reason types */
+enum wake_reason_type {
+ WCN36XX_HAL_WAKE_REASON_NONE = 0,
+
+ /* magic packet match */
+ WCN36XX_HAL_WAKE_REASON_MAGIC_PACKET = 1,
+
+ /* host defined pattern match */
+ WCN36XX_HAL_WAKE_REASON_PATTERN_MATCH = 2,
+
+ /* EAP-ID frame detected */
+ WCN36XX_HAL_WAKE_REASON_EAPID_PACKET = 3,
+
+ /* start of EAPOL 4-way handshake detected */
+ WCN36XX_HAL_WAKE_REASON_EAPOL4WAY_PACKET = 4,
+
+ /* network scan offload match */
+ WCN36XX_HAL_WAKE_REASON_NETSCAN_OFFL_MATCH = 5,
+
+ /* GTK rekey status wakeup (see status) */
+ WCN36XX_HAL_WAKE_REASON_GTK_REKEY_STATUS = 6,
+
+ /* BSS connection lost */
+ WCN36XX_HAL_WAKE_REASON_BSS_CONN_LOST = 7,
+};
+
+/*
+ Wake Packet which is saved at tWakeReasonParams.DataStart
+ This data is sent for any wake reasons that involve a packet-based wakeup :
+
+ WCN36XX_HAL_WAKE_REASON_TYPE_MAGIC_PACKET
+ WCN36XX_HAL_WAKE_REASON_TYPE_PATTERN_MATCH
+ WCN36XX_HAL_WAKE_REASON_TYPE_EAPID_PACKET
+ WCN36XX_HAL_WAKE_REASON_TYPE_EAPOL4WAY_PACKET
+ WCN36XX_HAL_WAKE_REASON_TYPE_GTK_REKEY_STATUS
+
+ The information is provided to the host for auditing and debug purposes
+
+*/
+
+/* Wake reason indication */
+struct wcn36xx_hal_wake_reason_ind {
+ struct wcn36xx_hal_msg_header header;
+
+ /* see tWakeReasonType */
+ u32 reason;
+
+ /* argument specific to the reason type */
+ u32 reason_arg;
+
+ /* length of optional data stored in this message, in case HAL
+ * truncates the data (i.e. data packets) this length will be less
+ * than the actual length */
+ u32 stored_data_len;
+
+ /* actual length of data */
+ u32 actual_data_len;
+
+ /* variable length start of data (length == storedDataLen) see
+ * specific wake type */
+ u8 data_start[1];
+
+ u32 bss_index:8;
+ u32 reserved:24;
+};
+
+#define WCN36XX_HAL_GTK_KEK_BYTES 16
+#define WCN36XX_HAL_GTK_KCK_BYTES 16
+
+#define WCN36XX_HAL_GTK_OFFLOAD_FLAGS_DISABLE (1 << 0)
+
+#define GTK_SET_BSS_KEY_TAG 0x1234AA55
+
+struct wcn36xx_hal_gtk_offload_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* optional flags */
+ u32 flags;
+
+ /* Key confirmation key */
+ u8 kck[WCN36XX_HAL_GTK_KCK_BYTES];
+
+ /* key encryption key */
+ u8 kek[WCN36XX_HAL_GTK_KEK_BYTES];
+
+ /* replay counter */
+ u64 key_replay_counter;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_gtk_offload_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_gtk_offload_get_info_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_gtk_offload_get_info_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* last rekey status when the rekey was offloaded */
+ u32 last_rekey_status;
+
+ /* current replay counter value */
+ u64 key_replay_counter;
+
+ /* total rekey attempts */
+ u32 total_rekey_count;
+
+ /* successful GTK rekeys */
+ u32 gtk_rekey_count;
+
+ /* successful iGTK rekeys */
+ u32 igtk_rekey_count;
+
+ u8 bss_index;
+};
+
+struct dhcp_info {
+ /* Indicates the device mode which indicates about the DHCP activity */
+ u8 device_mode;
+
+ u8 addr[ETH_ALEN];
+};
+
+struct dhcp_ind_status {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+/*
+ * Thermal Mitigation mode of operation.
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_MODE_0 - Based on AMPDU disabling aggregation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_MODE_1 - Based on AMPDU disabling aggregation
+ * and reducing transmit power
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_MODE_2 - Not supported */
+enum wcn36xx_hal_thermal_mitigation_mode_type {
+ HAL_THERMAL_MITIGATION_MODE_INVALID = -1,
+ HAL_THERMAL_MITIGATION_MODE_0,
+ HAL_THERMAL_MITIGATION_MODE_1,
+ HAL_THERMAL_MITIGATION_MODE_2,
+ HAL_THERMAL_MITIGATION_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
+};
+
+
+/*
+ * Thermal Mitigation level.
+ * Note the levels are incremental i.e WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_2 =
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_0 +
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_1
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_0 - lowest level of thermal mitigation.
+ * This level indicates normal mode of operation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_1 - 1st level of thermal mitigation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_2 - 2nd level of thermal mitigation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_3 - 3rd level of thermal mitigation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_4 - 4th level of thermal mitigation
+ */
+enum wcn36xx_hal_thermal_mitigation_level_type {
+ HAL_THERMAL_MITIGATION_LEVEL_INVALID = -1,
+ HAL_THERMAL_MITIGATION_LEVEL_0,
+ HAL_THERMAL_MITIGATION_LEVEL_1,
+ HAL_THERMAL_MITIGATION_LEVEL_2,
+ HAL_THERMAL_MITIGATION_LEVEL_3,
+ HAL_THERMAL_MITIGATION_LEVEL_4,
+ HAL_THERMAL_MITIGATION_LEVEL_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
+};
+
+
+/* WCN36XX_HAL_SET_THERMAL_MITIGATION_REQ */
+struct set_thermal_mitigation_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Thermal Mitigation Operation Mode */
+ enum wcn36xx_hal_thermal_mitigation_mode_type mode;
+
+ /* Thermal Mitigation Level */
+ enum wcn36xx_hal_thermal_mitigation_level_type level;
+};
+
+struct set_thermal_mitigation_resp {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request */
+ u32 status;
+};
+
+/* Per STA Class B Statistics. Class B statistics are STA TX/RX stats
+ * provided to FW from Host via periodic messages */
+struct stats_class_b_ind {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Duration over which this stats was collected */
+ u32 duration;
+
+ /* Per STA Stats */
+
+ /* TX stats */
+ u32 tx_bytes_pushed;
+ u32 tx_packets_pushed;
+
+ /* RX stats */
+ u32 rx_bytes_rcvd;
+ u32 rx_packets_rcvd;
+ u32 rx_time_total;
+};
+
+#endif /* _HAL_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
new file mode 100644
index 000000000000..7839b31e4826
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -0,0 +1,1036 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "wcn36xx.h"
+
+unsigned int wcn36xx_dbg_mask;
+module_param_named(debug_mask, wcn36xx_dbg_mask, uint, 0644);
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+
+#define CHAN2G(_freq, _idx) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 25, \
+}
+
+#define CHAN5G(_freq, _idx) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 25, \
+}
+
+/* The wcn firmware expects channel values to matching
+ * their mnemonic values. So use these for .hw_value. */
+static struct ieee80211_channel wcn_2ghz_channels[] = {
+ CHAN2G(2412, 1), /* Channel 1 */
+ CHAN2G(2417, 2), /* Channel 2 */
+ CHAN2G(2422, 3), /* Channel 3 */
+ CHAN2G(2427, 4), /* Channel 4 */
+ CHAN2G(2432, 5), /* Channel 5 */
+ CHAN2G(2437, 6), /* Channel 6 */
+ CHAN2G(2442, 7), /* Channel 7 */
+ CHAN2G(2447, 8), /* Channel 8 */
+ CHAN2G(2452, 9), /* Channel 9 */
+ CHAN2G(2457, 10), /* Channel 10 */
+ CHAN2G(2462, 11), /* Channel 11 */
+ CHAN2G(2467, 12), /* Channel 12 */
+ CHAN2G(2472, 13), /* Channel 13 */
+ CHAN2G(2484, 14) /* Channel 14 */
+
+};
+
+static struct ieee80211_channel wcn_5ghz_channels[] = {
+ CHAN5G(5180, 36),
+ CHAN5G(5200, 40),
+ CHAN5G(5220, 44),
+ CHAN5G(5240, 48),
+ CHAN5G(5260, 52),
+ CHAN5G(5280, 56),
+ CHAN5G(5300, 60),
+ CHAN5G(5320, 64),
+ CHAN5G(5500, 100),
+ CHAN5G(5520, 104),
+ CHAN5G(5540, 108),
+ CHAN5G(5560, 112),
+ CHAN5G(5580, 116),
+ CHAN5G(5600, 120),
+ CHAN5G(5620, 124),
+ CHAN5G(5640, 128),
+ CHAN5G(5660, 132),
+ CHAN5G(5700, 140),
+ CHAN5G(5745, 149),
+ CHAN5G(5765, 153),
+ CHAN5G(5785, 157),
+ CHAN5G(5805, 161),
+ CHAN5G(5825, 165)
+};
+
+#define RATE(_bitrate, _hw_rate, _flags) { \
+ .bitrate = (_bitrate), \
+ .flags = (_flags), \
+ .hw_value = (_hw_rate), \
+ .hw_value_short = (_hw_rate) \
+}
+
+static struct ieee80211_rate wcn_2ghz_rates[] = {
+ RATE(10, HW_RATE_INDEX_1MBPS, 0),
+ RATE(20, HW_RATE_INDEX_2MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(55, HW_RATE_INDEX_5_5MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(110, HW_RATE_INDEX_11MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(60, HW_RATE_INDEX_6MBPS, 0),
+ RATE(90, HW_RATE_INDEX_9MBPS, 0),
+ RATE(120, HW_RATE_INDEX_12MBPS, 0),
+ RATE(180, HW_RATE_INDEX_18MBPS, 0),
+ RATE(240, HW_RATE_INDEX_24MBPS, 0),
+ RATE(360, HW_RATE_INDEX_36MBPS, 0),
+ RATE(480, HW_RATE_INDEX_48MBPS, 0),
+ RATE(540, HW_RATE_INDEX_54MBPS, 0)
+};
+
+static struct ieee80211_rate wcn_5ghz_rates[] = {
+ RATE(60, HW_RATE_INDEX_6MBPS, 0),
+ RATE(90, HW_RATE_INDEX_9MBPS, 0),
+ RATE(120, HW_RATE_INDEX_12MBPS, 0),
+ RATE(180, HW_RATE_INDEX_18MBPS, 0),
+ RATE(240, HW_RATE_INDEX_24MBPS, 0),
+ RATE(360, HW_RATE_INDEX_36MBPS, 0),
+ RATE(480, HW_RATE_INDEX_48MBPS, 0),
+ RATE(540, HW_RATE_INDEX_54MBPS, 0)
+};
+
+static struct ieee80211_supported_band wcn_band_2ghz = {
+ .channels = wcn_2ghz_channels,
+ .n_channels = ARRAY_SIZE(wcn_2ghz_channels),
+ .bitrates = wcn_2ghz_rates,
+ .n_bitrates = ARRAY_SIZE(wcn_2ghz_rates),
+ .ht_cap = {
+ .cap = IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_DSSSCCK40 |
+ IEEE80211_HT_CAP_LSIG_TXOP_PROT,
+ .ht_supported = true,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .mcs = {
+ .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ .rx_highest = cpu_to_le16(72),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ }
+ }
+};
+
+static struct ieee80211_supported_band wcn_band_5ghz = {
+ .channels = wcn_5ghz_channels,
+ .n_channels = ARRAY_SIZE(wcn_5ghz_channels),
+ .bitrates = wcn_5ghz_rates,
+ .n_bitrates = ARRAY_SIZE(wcn_5ghz_rates),
+ .ht_cap = {
+ .cap = IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_DSSSCCK40 |
+ IEEE80211_HT_CAP_LSIG_TXOP_PROT |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40,
+ .ht_supported = true,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .mcs = {
+ .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ .rx_highest = cpu_to_le16(72),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ }
+ }
+};
+
+#ifdef CONFIG_PM
+
+static const struct wiphy_wowlan_support wowlan_support = {
+ .flags = WIPHY_WOWLAN_ANY
+};
+
+#endif
+
+static inline u8 get_sta_index(struct ieee80211_vif *vif,
+ struct wcn36xx_sta *sta_priv)
+{
+ return NL80211_IFTYPE_STATION == vif->type ?
+ sta_priv->bss_sta_index :
+ sta_priv->sta_index;
+}
+
+static int wcn36xx_start(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+ int ret;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac start\n");
+
+ /* SMD initialization */
+ ret = wcn36xx_smd_open(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to open smd channel: %d\n", ret);
+ goto out_err;
+ }
+
+ /* Allocate memory pools for Mgmt BD headers and Data BD headers */
+ ret = wcn36xx_dxe_allocate_mem_pools(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to alloc DXE mempool: %d\n", ret);
+ goto out_smd_close;
+ }
+
+ ret = wcn36xx_dxe_alloc_ctl_blks(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to alloc DXE ctl blocks: %d\n", ret);
+ goto out_free_dxe_pool;
+ }
+
+ wcn->hal_buf = kmalloc(WCN36XX_HAL_BUF_SIZE, GFP_KERNEL);
+ if (!wcn->hal_buf) {
+ wcn36xx_err("Failed to allocate smd buf\n");
+ ret = -ENOMEM;
+ goto out_free_dxe_ctl;
+ }
+
+ ret = wcn36xx_smd_load_nv(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to push NV to chip\n");
+ goto out_free_smd_buf;
+ }
+
+ ret = wcn36xx_smd_start(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to start chip\n");
+ goto out_free_smd_buf;
+ }
+
+ /* DMA channel initialization */
+ ret = wcn36xx_dxe_init(wcn);
+ if (ret) {
+ wcn36xx_err("DXE init failed\n");
+ goto out_smd_stop;
+ }
+
+ wcn36xx_debugfs_init(wcn);
+
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ ret = wcn36xx_smd_feature_caps_exchange(wcn);
+ if (ret)
+ wcn36xx_warn("Exchange feature caps failed\n");
+ }
+ INIT_LIST_HEAD(&wcn->vif_list);
+ return 0;
+
+out_smd_stop:
+ wcn36xx_smd_stop(wcn);
+out_free_smd_buf:
+ kfree(wcn->hal_buf);
+out_free_dxe_pool:
+ wcn36xx_dxe_free_mem_pools(wcn);
+out_free_dxe_ctl:
+ wcn36xx_dxe_free_ctl_blks(wcn);
+out_smd_close:
+ wcn36xx_smd_close(wcn);
+out_err:
+ return ret;
+}
+
+static void wcn36xx_stop(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac stop\n");
+
+ wcn36xx_debugfs_exit(wcn);
+ wcn36xx_smd_stop(wcn);
+ wcn36xx_dxe_deinit(wcn);
+ wcn36xx_smd_close(wcn);
+
+ wcn36xx_dxe_free_mem_pools(wcn);
+ wcn36xx_dxe_free_ctl_blks(wcn);
+
+ kfree(wcn->hal_buf);
+}
+
+static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct ieee80211_vif *vif = NULL;
+ struct wcn36xx_vif *tmp;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed);
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ int ch = WCN36XX_HW_CHANNEL(wcn);
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n",
+ ch);
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ vif = container_of((void *)tmp,
+ struct ieee80211_vif,
+ drv_priv);
+ wcn36xx_smd_switch_channel(wcn, vif, ch);
+ }
+ }
+
+ return 0;
+}
+
+#define WCN36XX_SUPPORTED_FILTERS (0)
+
+static void wcn36xx_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed,
+ unsigned int *total, u64 multicast)
+{
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac configure filter\n");
+
+ *total &= WCN36XX_SUPPORTED_FILTERS;
+}
+
+static void wcn36xx_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_sta *sta_priv = NULL;
+
+ if (control->sta)
+ sta_priv = (struct wcn36xx_sta *)control->sta->drv_priv;
+
+ if (wcn36xx_start_tx(wcn, sta_priv, skb))
+ ieee80211_free_txskb(wcn->hw, skb);
+}
+
+static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key_conf)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_sta *sta_priv = vif_priv->sta;
+ int ret = 0;
+ u8 key[WLAN_MAX_KEY_LEN];
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac80211 set key\n");
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "Key: cmd=0x%x algo:0x%x, id:%d, len:%d flags 0x%x\n",
+ cmd, key_conf->cipher, key_conf->keyidx,
+ key_conf->keylen, key_conf->flags);
+ wcn36xx_dbg_dump(WCN36XX_DBG_MAC, "KEY: ",
+ key_conf->key,
+ key_conf->keylen);
+
+ switch (key_conf->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_CCMP;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_TKIP;
+ break;
+ default:
+ wcn36xx_err("Unsupported key type 0x%x\n",
+ key_conf->cipher);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ switch (cmd) {
+ case SET_KEY:
+ if (WCN36XX_HAL_ED_TKIP == vif_priv->encrypt_type) {
+ /*
+ * Supplicant is sending key in the wrong order:
+ * Temporal Key (16 b) - TX MIC (8 b) - RX MIC (8 b)
+ * but HW expects it to be in the order as described in
+ * IEEE 802.11 spec (see chapter 11.7) like this:
+ * Temporal Key (16 b) - RX MIC (8 b) - TX MIC (8 b)
+ */
+ memcpy(key, key_conf->key, 16);
+ memcpy(key + 16, key_conf->key + 24, 8);
+ memcpy(key + 24, key_conf->key + 16, 8);
+ } else {
+ memcpy(key, key_conf->key, key_conf->keylen);
+ }
+
+ if (IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags) {
+ sta_priv->is_data_encrypted = true;
+ /* Reconfigure bss with encrypt_type */
+ if (NL80211_IFTYPE_STATION == vif->type)
+ wcn36xx_smd_config_bss(wcn,
+ vif,
+ sta,
+ sta->addr,
+ true);
+
+ wcn36xx_smd_set_stakey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ key_conf->keylen,
+ key,
+ get_sta_index(vif, sta_priv));
+ } else {
+ wcn36xx_smd_set_bsskey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ key_conf->keylen,
+ key);
+ if ((WLAN_CIPHER_SUITE_WEP40 == key_conf->cipher) ||
+ (WLAN_CIPHER_SUITE_WEP104 == key_conf->cipher)) {
+ sta_priv->is_data_encrypted = true;
+ wcn36xx_smd_set_stakey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ key_conf->keylen,
+ key,
+ get_sta_index(vif, sta_priv));
+ }
+ }
+ break;
+ case DISABLE_KEY:
+ if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) {
+ wcn36xx_smd_remove_bsskey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx);
+ } else {
+ sta_priv->is_data_encrypted = false;
+ /* do not remove key if disassociated */
+ if (sta_priv->aid)
+ wcn36xx_smd_remove_stakey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ get_sta_index(vif, sta_priv));
+ }
+ break;
+ default:
+ wcn36xx_err("Unsupported key cmd 0x%x\n", cmd);
+ ret = -EOPNOTSUPP;
+ goto out;
+ break;
+ }
+
+out:
+ return ret;
+}
+
+static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN);
+ wcn36xx_smd_start_scan(wcn);
+}
+
+static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_smd_end_scan(wcn);
+ wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN);
+}
+
+static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
+ enum ieee80211_band band)
+{
+ int i, size;
+ u16 *rates_table;
+ struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+ u32 rates = sta->supp_rates[band];
+
+ memset(&sta_priv->supported_rates, 0,
+ sizeof(sta_priv->supported_rates));
+ sta_priv->supported_rates.op_rate_mode = STA_11n;
+
+ size = ARRAY_SIZE(sta_priv->supported_rates.dsss_rates);
+ rates_table = sta_priv->supported_rates.dsss_rates;
+ if (band == IEEE80211_BAND_2GHZ) {
+ for (i = 0; i < size; i++) {
+ if (rates & 0x01) {
+ rates_table[i] = wcn_2ghz_rates[i].hw_value;
+ rates = rates >> 1;
+ }
+ }
+ }
+
+ size = ARRAY_SIZE(sta_priv->supported_rates.ofdm_rates);
+ rates_table = sta_priv->supported_rates.ofdm_rates;
+ for (i = 0; i < size; i++) {
+ if (rates & 0x01) {
+ rates_table[i] = wcn_5ghz_rates[i].hw_value;
+ rates = rates >> 1;
+ }
+ }
+
+ if (sta->ht_cap.ht_supported) {
+ BUILD_BUG_ON(sizeof(sta->ht_cap.mcs.rx_mask) >
+ sizeof(sta_priv->supported_rates.supported_mcs_set));
+ memcpy(sta_priv->supported_rates.supported_mcs_set,
+ sta->ht_cap.mcs.rx_mask,
+ sizeof(sta->ht_cap.mcs.rx_mask));
+ }
+}
+void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates)
+{
+ u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES] = {
+ HW_RATE_INDEX_6MBPS,
+ HW_RATE_INDEX_9MBPS,
+ HW_RATE_INDEX_12MBPS,
+ HW_RATE_INDEX_18MBPS,
+ HW_RATE_INDEX_24MBPS,
+ HW_RATE_INDEX_36MBPS,
+ HW_RATE_INDEX_48MBPS,
+ HW_RATE_INDEX_54MBPS
+ };
+ u16 dsss_rates[WCN36XX_HAL_NUM_DSSS_RATES] = {
+ HW_RATE_INDEX_1MBPS,
+ HW_RATE_INDEX_2MBPS,
+ HW_RATE_INDEX_5_5MBPS,
+ HW_RATE_INDEX_11MBPS
+ };
+
+ rates->op_rate_mode = STA_11n;
+ memcpy(rates->dsss_rates, dsss_rates,
+ sizeof(*dsss_rates) * WCN36XX_HAL_NUM_DSSS_RATES);
+ memcpy(rates->ofdm_rates, ofdm_rates,
+ sizeof(*ofdm_rates) * WCN36XX_HAL_NUM_OFDM_RATES);
+ rates->supported_mcs_set[0] = 0xFF;
+}
+static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct sk_buff *skb = NULL;
+ u16 tim_off, tim_len;
+ enum wcn36xx_hal_link_state link_state;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%08x\n",
+ vif, changed);
+
+ if (changed & BSS_CHANGED_BEACON_INFO) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac bss changed dtim period %d\n",
+ bss_conf->dtim_period);
+
+ vif_priv->dtim_period = bss_conf->dtim_period;
+ }
+
+ if (changed & BSS_CHANGED_PS) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac bss PS set %d\n",
+ bss_conf->ps);
+ if (bss_conf->ps) {
+ wcn36xx_pmc_enter_bmps_state(wcn, vif);
+ } else {
+ wcn36xx_pmc_exit_bmps_state(wcn, vif);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",
+ bss_conf->bssid);
+
+ if (!is_zero_ether_addr(bss_conf->bssid)) {
+ vif_priv->is_joining = true;
+ vif_priv->bss_index = 0xff;
+ wcn36xx_smd_join(wcn, bss_conf->bssid,
+ vif->addr, WCN36XX_HW_CHANNEL(wcn));
+ wcn36xx_smd_config_bss(wcn, vif, NULL,
+ bss_conf->bssid, false);
+ } else {
+ vif_priv->is_joining = false;
+ wcn36xx_smd_delete_bss(wcn, vif);
+ }
+ }
+
+ if (changed & BSS_CHANGED_SSID) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac bss changed ssid\n");
+ wcn36xx_dbg_dump(WCN36XX_DBG_MAC, "ssid ",
+ bss_conf->ssid, bss_conf->ssid_len);
+
+ vif_priv->ssid.length = bss_conf->ssid_len;
+ memcpy(&vif_priv->ssid.ssid,
+ bss_conf->ssid,
+ bss_conf->ssid_len);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ vif_priv->is_joining = false;
+ if (bss_conf->assoc) {
+ struct ieee80211_sta *sta;
+ struct wcn36xx_sta *sta_priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac assoc bss %pM vif %pM AID=%d\n",
+ bss_conf->bssid,
+ vif->addr,
+ bss_conf->aid);
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!sta) {
+ wcn36xx_err("sta %pM is not found\n",
+ bss_conf->bssid);
+ rcu_read_unlock();
+ goto out;
+ }
+ sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+ wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
+
+ wcn36xx_smd_set_link_st(wcn, bss_conf->bssid,
+ vif->addr,
+ WCN36XX_HAL_LINK_POSTASSOC_STATE);
+ wcn36xx_smd_config_bss(wcn, vif, sta,
+ bss_conf->bssid,
+ true);
+ sta_priv->aid = bss_conf->aid;
+ /*
+ * config_sta must be called from because this is the
+ * place where AID is available.
+ */
+ wcn36xx_smd_config_sta(wcn, vif, sta);
+ rcu_read_unlock();
+ } else {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "disassociated bss %pM vif %pM AID=%d\n",
+ bss_conf->bssid,
+ vif->addr,
+ bss_conf->aid);
+ wcn36xx_smd_set_link_st(wcn,
+ bss_conf->bssid,
+ vif->addr,
+ WCN36XX_HAL_LINK_IDLE_STATE);
+ }
+ }
+
+ if (changed & BSS_CHANGED_AP_PROBE_RESP) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed ap probe resp\n");
+ skb = ieee80211_proberesp_get(hw, vif);
+ if (!skb) {
+ wcn36xx_err("failed to alloc probereq skb\n");
+ goto out;
+ }
+
+ wcn36xx_smd_update_proberesp_tmpl(wcn, vif, skb);
+ dev_kfree_skb(skb);
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac bss changed beacon enabled %d\n",
+ bss_conf->enable_beacon);
+
+ if (bss_conf->enable_beacon) {
+ vif_priv->bss_index = 0xff;
+ wcn36xx_smd_config_bss(wcn, vif, NULL,
+ vif->addr, false);
+ skb = ieee80211_beacon_get_tim(hw, vif, &tim_off,
+ &tim_len);
+ if (!skb) {
+ wcn36xx_err("failed to alloc beacon skb\n");
+ goto out;
+ }
+ wcn36xx_smd_send_beacon(wcn, vif, skb, tim_off, 0);
+ dev_kfree_skb(skb);
+
+ if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT)
+ link_state = WCN36XX_HAL_LINK_IBSS_STATE;
+ else
+ link_state = WCN36XX_HAL_LINK_AP_STATE;
+
+ wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
+ link_state);
+ } else {
+ wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
+ WCN36XX_HAL_LINK_IDLE_STATE);
+ wcn36xx_smd_delete_bss(wcn, vif);
+ }
+ }
+out:
+ return;
+}
+
+/* this is required when using IEEE80211_HW_HAS_RATE_CONTROL */
+static int wcn36xx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct wcn36xx *wcn = hw->priv;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac set RTS threshold %d\n", value);
+
+ wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_RTS_THRESHOLD, value);
+ return 0;
+}
+
+static void wcn36xx_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac remove interface vif %p\n", vif);
+
+ list_del(&vif_priv->list);
+ wcn36xx_smd_delete_sta_self(wcn, vif->addr);
+}
+
+static int wcn36xx_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac add interface vif %p type %d\n",
+ vif, vif->type);
+
+ if (!(NL80211_IFTYPE_STATION == vif->type ||
+ NL80211_IFTYPE_AP == vif->type ||
+ NL80211_IFTYPE_ADHOC == vif->type ||
+ NL80211_IFTYPE_MESH_POINT == vif->type)) {
+ wcn36xx_warn("Unsupported interface type requested: %d\n",
+ vif->type);
+ return -EOPNOTSUPP;
+ }
+
+ list_add(&vif_priv->list, &wcn->vif_list);
+ wcn36xx_smd_add_sta_self(wcn, vif);
+
+ return 0;
+}
+
+static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n",
+ vif, sta->addr);
+
+ vif_priv->sta = sta_priv;
+ sta_priv->vif = vif_priv;
+ /*
+ * For STA mode HW will be configured on BSS_CHANGED_ASSOC because
+ * at this stage AID is not available yet.
+ */
+ if (NL80211_IFTYPE_STATION != vif->type) {
+ wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
+ sta_priv->aid = sta->aid;
+ wcn36xx_smd_config_sta(wcn, vif, sta);
+ }
+ return 0;
+}
+
+static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n",
+ vif, sta->addr, sta_priv->sta_index);
+
+ wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
+ vif_priv->sta = NULL;
+ sta_priv->vif = NULL;
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int wcn36xx_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wow)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac suspend\n");
+
+ flush_workqueue(wcn->hal_ind_wq);
+ wcn36xx_smd_set_power_params(wcn, true);
+ return 0;
+}
+
+static int wcn36xx_resume(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac resume\n");
+
+ flush_workqueue(wcn->hal_ind_wq);
+ wcn36xx_smd_set_power_params(wcn, false);
+ return 0;
+}
+
+#endif
+
+static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_sta *sta_priv = NULL;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
+ action, tid);
+
+ sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ sta_priv->tid = tid;
+ wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 0,
+ get_sta_index(vif, sta_priv));
+ wcn36xx_smd_add_ba(wcn);
+ wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv));
+ ieee80211_start_tx_ba_session(sta, tid, 0);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ wcn36xx_smd_del_ba(wcn, tid, get_sta_index(vif, sta_priv));
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 1,
+ get_sta_index(vif, sta_priv));
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ default:
+ wcn36xx_err("Unknown AMPDU action\n");
+ }
+
+ return 0;
+}
+
+static const struct ieee80211_ops wcn36xx_ops = {
+ .start = wcn36xx_start,
+ .stop = wcn36xx_stop,
+ .add_interface = wcn36xx_add_interface,
+ .remove_interface = wcn36xx_remove_interface,
+#ifdef CONFIG_PM
+ .suspend = wcn36xx_suspend,
+ .resume = wcn36xx_resume,
+#endif
+ .config = wcn36xx_config,
+ .configure_filter = wcn36xx_configure_filter,
+ .tx = wcn36xx_tx,
+ .set_key = wcn36xx_set_key,
+ .sw_scan_start = wcn36xx_sw_scan_start,
+ .sw_scan_complete = wcn36xx_sw_scan_complete,
+ .bss_info_changed = wcn36xx_bss_info_changed,
+ .set_rts_threshold = wcn36xx_set_rts_threshold,
+ .sta_add = wcn36xx_sta_add,
+ .sta_remove = wcn36xx_sta_remove,
+ .ampdu_action = wcn36xx_ampdu_action,
+};
+
+static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
+{
+ int ret = 0;
+
+ static const u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ };
+
+ wcn->hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_HAS_RATE_CONTROL |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_CONNECTION_MONITOR |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_TIMING_BEACON_ONLY;
+
+ wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ wcn->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wcn_band_2ghz;
+ wcn->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wcn_band_5ghz;
+
+ wcn->hw->wiphy->cipher_suites = cipher_suites;
+ wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ wcn->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+
+#ifdef CONFIG_PM
+ wcn->hw->wiphy->wowlan = &wowlan_support;
+#endif
+
+ wcn->hw->max_listen_interval = 200;
+
+ wcn->hw->queues = 4;
+
+ SET_IEEE80211_DEV(wcn->hw, wcn->dev);
+
+ wcn->hw->sta_data_size = sizeof(struct wcn36xx_sta);
+ wcn->hw->vif_data_size = sizeof(struct wcn36xx_vif);
+
+ return ret;
+}
+
+static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+ /* Set TX IRQ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "wcnss_wlantx_irq");
+ if (!res) {
+ wcn36xx_err("failed to get tx_irq\n");
+ return -ENOENT;
+ }
+ wcn->tx_irq = res->start;
+
+ /* Set RX IRQ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "wcnss_wlanrx_irq");
+ if (!res) {
+ wcn36xx_err("failed to get rx_irq\n");
+ return -ENOENT;
+ }
+ wcn->rx_irq = res->start;
+
+ /* Map the memory */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "wcnss_mmio");
+ if (!res) {
+ wcn36xx_err("failed to get mmio\n");
+ return -ENOENT;
+ }
+ wcn->mmio = ioremap(res->start, resource_size(res));
+ if (!wcn->mmio) {
+ wcn36xx_err("failed to map io memory\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int wcn36xx_probe(struct platform_device *pdev)
+{
+ struct ieee80211_hw *hw;
+ struct wcn36xx *wcn;
+ int ret;
+ u8 addr[ETH_ALEN];
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "platform probe\n");
+
+ hw = ieee80211_alloc_hw(sizeof(struct wcn36xx), &wcn36xx_ops);
+ if (!hw) {
+ wcn36xx_err("failed to alloc hw\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ platform_set_drvdata(pdev, hw);
+ wcn = hw->priv;
+ wcn->hw = hw;
+ wcn->dev = &pdev->dev;
+ wcn->ctrl_ops = pdev->dev.platform_data;
+
+ mutex_init(&wcn->hal_mutex);
+
+ if (!wcn->ctrl_ops->get_hw_mac(addr)) {
+ wcn36xx_info("mac address: %pM\n", addr);
+ SET_IEEE80211_PERM_ADDR(wcn->hw, addr);
+ }
+
+ ret = wcn36xx_platform_get_resources(wcn, pdev);
+ if (ret)
+ goto out_wq;
+
+ wcn36xx_init_ieee80211(wcn);
+ ret = ieee80211_register_hw(wcn->hw);
+ if (ret)
+ goto out_unmap;
+
+ return 0;
+
+out_unmap:
+ iounmap(wcn->mmio);
+out_wq:
+ ieee80211_free_hw(hw);
+out_err:
+ return ret;
+}
+static int wcn36xx_remove(struct platform_device *pdev)
+{
+ struct ieee80211_hw *hw = platform_get_drvdata(pdev);
+ struct wcn36xx *wcn = hw->priv;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n");
+
+ mutex_destroy(&wcn->hal_mutex);
+
+ ieee80211_unregister_hw(hw);
+ iounmap(wcn->mmio);
+ ieee80211_free_hw(hw);
+
+ return 0;
+}
+static const struct platform_device_id wcn36xx_platform_id_table[] = {
+ {
+ .name = "wcn36xx",
+ .driver_data = 0
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, wcn36xx_platform_id_table);
+
+static struct platform_driver wcn36xx_driver = {
+ .probe = wcn36xx_probe,
+ .remove = wcn36xx_remove,
+ .driver = {
+ .name = "wcn36xx",
+ .owner = THIS_MODULE,
+ },
+ .id_table = wcn36xx_platform_id_table,
+};
+
+static int __init wcn36xx_init(void)
+{
+ platform_driver_register(&wcn36xx_driver);
+ return 0;
+}
+module_init(wcn36xx_init);
+
+static void __exit wcn36xx_exit(void)
+{
+ platform_driver_unregister(&wcn36xx_driver);
+}
+module_exit(wcn36xx_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com");
+MODULE_FIRMWARE(WLAN_NV_FILE);
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
new file mode 100644
index 000000000000..28b515c81b0e
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "wcn36xx.h"
+
+int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif)
+{
+ int ret = 0;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ /* TODO: Make sure the TX chain clean */
+ ret = wcn36xx_smd_enter_bmps(wcn, vif);
+ if (!ret) {
+ wcn36xx_dbg(WCN36XX_DBG_PMC, "Entered BMPS\n");
+ vif_priv->pw_state = WCN36XX_BMPS;
+ } else {
+ /*
+ * One of the reasons why HW will not enter BMPS is because
+ * driver is trying to enter bmps before first beacon was
+ * received just after auth complete
+ */
+ wcn36xx_err("Can not enter BMPS!\n");
+ }
+ return ret;
+}
+
+int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+
+ if (WCN36XX_BMPS != vif_priv->pw_state) {
+ wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n");
+ return -EINVAL;
+ }
+ wcn36xx_smd_exit_bmps(wcn, vif);
+ vif_priv->pw_state = WCN36XX_FULL_POWER;
+ return 0;
+}
+
+int wcn36xx_enable_keep_alive_null_packet(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif)
+{
+ wcn36xx_dbg(WCN36XX_DBG_PMC, "%s\n", __func__);
+ return wcn36xx_smd_keep_alive_req(wcn, vif,
+ WCN36XX_HAL_KEEP_ALIVE_NULL_PKT);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.h b/drivers/net/wireless/ath/wcn36xx/pmc.h
new file mode 100644
index 000000000000..f72ed68b5a07
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WCN36XX_PMC_H_
+#define _WCN36XX_PMC_H_
+
+struct wcn36xx;
+
+enum wcn36xx_power_state {
+ WCN36XX_FULL_POWER,
+ WCN36XX_BMPS
+};
+
+int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif);
+int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif);
+int wcn36xx_enable_keep_alive_null_packet(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif);
+#endif /* _WCN36XX_PMC_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
new file mode 100644
index 000000000000..f8c3a10510c2
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -0,0 +1,2126 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/bitops.h>
+#include "smd.h"
+
+static int put_cfg_tlv_u32(struct wcn36xx *wcn, size_t *len, u32 id, u32 value)
+{
+ struct wcn36xx_hal_cfg *entry;
+ u32 *val;
+
+ if (*len + sizeof(*entry) + sizeof(u32) >= WCN36XX_HAL_BUF_SIZE) {
+ wcn36xx_err("Not enough room for TLV entry\n");
+ return -ENOMEM;
+ }
+
+ entry = (struct wcn36xx_hal_cfg *) (wcn->hal_buf + *len);
+ entry->id = id;
+ entry->len = sizeof(u32);
+ entry->pad_bytes = 0;
+ entry->reserve = 0;
+
+ val = (u32 *) (entry + 1);
+ *val = value;
+
+ *len += sizeof(*entry) + sizeof(u32);
+
+ return 0;
+}
+
+static void wcn36xx_smd_set_bss_nw_type(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_bss_params *bss_params)
+{
+ if (IEEE80211_BAND_5GHZ == WCN36XX_BAND(wcn))
+ bss_params->nw_type = WCN36XX_HAL_11A_NW_TYPE;
+ else if (sta && sta->ht_cap.ht_supported)
+ bss_params->nw_type = WCN36XX_HAL_11N_NW_TYPE;
+ else if (sta && (sta->supp_rates[IEEE80211_BAND_2GHZ] & 0x7f))
+ bss_params->nw_type = WCN36XX_HAL_11G_NW_TYPE;
+ else
+ bss_params->nw_type = WCN36XX_HAL_11B_NW_TYPE;
+}
+
+static inline u8 is_cap_supported(unsigned long caps, unsigned long flag)
+{
+ return caps & flag ? 1 : 0;
+}
+static void wcn36xx_smd_set_bss_ht_params(struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_bss_params *bss_params)
+{
+ if (sta && sta->ht_cap.ht_supported) {
+ unsigned long caps = sta->ht_cap.cap;
+ bss_params->ht = sta->ht_cap.ht_supported;
+ bss_params->tx_channel_width_set = is_cap_supported(caps,
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+ bss_params->lsig_tx_op_protection_full_support =
+ is_cap_supported(caps,
+ IEEE80211_HT_CAP_LSIG_TXOP_PROT);
+
+ bss_params->ht_oper_mode = vif->bss_conf.ht_operation_mode;
+ bss_params->lln_non_gf_coexist =
+ !!(vif->bss_conf.ht_operation_mode &
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+ /* IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT */
+ bss_params->dual_cts_protection = 0;
+ /* IEEE80211_HT_OP_MODE_PROTECTION_20MHZ */
+ bss_params->ht20_coexist = 0;
+ }
+}
+
+static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_sta_params *sta_params)
+{
+ if (sta->ht_cap.ht_supported) {
+ unsigned long caps = sta->ht_cap.cap;
+ sta_params->ht_capable = sta->ht_cap.ht_supported;
+ sta_params->tx_channel_width_set = is_cap_supported(caps,
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+ sta_params->lsig_txop_protection = is_cap_supported(caps,
+ IEEE80211_HT_CAP_LSIG_TXOP_PROT);
+
+ sta_params->max_ampdu_size = sta->ht_cap.ampdu_factor;
+ sta_params->max_ampdu_density = sta->ht_cap.ampdu_density;
+ sta_params->max_amsdu_size = is_cap_supported(caps,
+ IEEE80211_HT_CAP_MAX_AMSDU);
+ sta_params->sgi_20Mhz = is_cap_supported(caps,
+ IEEE80211_HT_CAP_SGI_20);
+ sta_params->sgi_40mhz = is_cap_supported(caps,
+ IEEE80211_HT_CAP_SGI_40);
+ sta_params->green_field_capable = is_cap_supported(caps,
+ IEEE80211_HT_CAP_GRN_FLD);
+ sta_params->delayed_ba_support = is_cap_supported(caps,
+ IEEE80211_HT_CAP_DELAY_BA);
+ sta_params->dsss_cck_mode_40mhz = is_cap_supported(caps,
+ IEEE80211_HT_CAP_DSSSCCK40);
+ }
+}
+
+static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_sta_params *sta_params)
+{
+ struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_sta *priv_sta = NULL;
+ if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ sta_params->type = 1;
+ sta_params->sta_index = 0xFF;
+ } else {
+ sta_params->type = 0;
+ sta_params->sta_index = 1;
+ }
+
+ sta_params->listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
+
+ /*
+ * In STA mode ieee80211_sta contains bssid and ieee80211_vif
+ * contains our mac address. In AP mode we are bssid so vif
+ * contains bssid and ieee80211_sta contains mac.
+ */
+ if (NL80211_IFTYPE_STATION == vif->type)
+ memcpy(&sta_params->mac, vif->addr, ETH_ALEN);
+ else
+ memcpy(&sta_params->bssid, vif->addr, ETH_ALEN);
+
+ sta_params->encrypt_type = priv_vif->encrypt_type;
+ sta_params->short_preamble_supported =
+ !(WCN36XX_FLAGS(wcn) &
+ IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE);
+
+ sta_params->rifs_mode = 0;
+ sta_params->rmf = 0;
+ sta_params->action = 0;
+ sta_params->uapsd = 0;
+ sta_params->mimo_ps = WCN36XX_HAL_HT_MIMO_PS_STATIC;
+ sta_params->max_ampdu_duration = 0;
+ sta_params->bssid_index = priv_vif->bss_index;
+ sta_params->p2p = 0;
+
+ if (sta) {
+ priv_sta = (struct wcn36xx_sta *)sta->drv_priv;
+ if (NL80211_IFTYPE_STATION == vif->type)
+ memcpy(&sta_params->bssid, sta->addr, ETH_ALEN);
+ else
+ memcpy(&sta_params->mac, sta->addr, ETH_ALEN);
+ sta_params->wmm_enabled = sta->wme;
+ sta_params->max_sp_len = sta->max_sp;
+ sta_params->aid = priv_sta->aid;
+ wcn36xx_smd_set_sta_ht_params(sta, sta_params);
+ memcpy(&sta_params->supported_rates, &priv_sta->supported_rates,
+ sizeof(priv_sta->supported_rates));
+ } else {
+ wcn36xx_set_default_rates(&sta_params->supported_rates);
+ }
+}
+
+static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
+{
+ int ret = 0;
+ wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "HAL >>> ", wcn->hal_buf, len);
+
+ init_completion(&wcn->hal_rsp_compl);
+ ret = wcn->ctrl_ops->tx(wcn->hal_buf, len);
+ if (ret) {
+ wcn36xx_err("HAL TX failed\n");
+ goto out;
+ }
+ if (wait_for_completion_timeout(&wcn->hal_rsp_compl,
+ msecs_to_jiffies(HAL_MSG_TIMEOUT)) <= 0) {
+ wcn36xx_err("Timeout while waiting SMD response\n");
+ ret = -ETIME;
+ goto out;
+ }
+out:
+ return ret;
+}
+
+#define INIT_HAL_MSG(msg_body, type) \
+ do { \
+ memset(&msg_body, 0, sizeof(msg_body)); \
+ msg_body.header.msg_type = type; \
+ msg_body.header.msg_version = WCN36XX_HAL_MSG_VERSION0; \
+ msg_body.header.len = sizeof(msg_body); \
+ } while (0) \
+
+#define PREPARE_HAL_BUF(send_buf, msg_body) \
+ do { \
+ memset(send_buf, 0, msg_body.header.len); \
+ memcpy(send_buf, &msg_body, sizeof(msg_body)); \
+ } while (0) \
+
+static int wcn36xx_smd_rsp_status_check(void *buf, size_t len)
+{
+ struct wcn36xx_fw_msg_status_rsp *rsp;
+
+ if (len < sizeof(struct wcn36xx_hal_msg_header) +
+ sizeof(struct wcn36xx_fw_msg_status_rsp))
+ return -EIO;
+
+ rsp = (struct wcn36xx_fw_msg_status_rsp *)
+ (buf + sizeof(struct wcn36xx_hal_msg_header));
+
+ if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status)
+ return rsp->status;
+
+ return 0;
+}
+
+int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
+{
+ const struct firmware *nv;
+ struct nv_data *nv_d;
+ struct wcn36xx_hal_nv_img_download_req_msg msg_body;
+ int fw_bytes_left;
+ int ret;
+ u16 fm_offset = 0;
+
+ ret = request_firmware(&nv, WLAN_NV_FILE, wcn->dev);
+ if (ret) {
+ wcn36xx_err("Failed to load nv file %s: %d\n",
+ WLAN_NV_FILE, ret);
+ goto out_free_nv;
+ }
+
+ nv_d = (struct nv_data *)nv->data;
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DOWNLOAD_NV_REQ);
+
+ msg_body.header.len += WCN36XX_NV_FRAGMENT_SIZE;
+
+ msg_body.frag_number = 0;
+ /* hal_buf must be protected with mutex */
+ mutex_lock(&wcn->hal_mutex);
+
+ do {
+ fw_bytes_left = nv->size - fm_offset - 4;
+ if (fw_bytes_left > WCN36XX_NV_FRAGMENT_SIZE) {
+ msg_body.last_fragment = 0;
+ msg_body.nv_img_buffer_size = WCN36XX_NV_FRAGMENT_SIZE;
+ } else {
+ msg_body.last_fragment = 1;
+ msg_body.nv_img_buffer_size = fw_bytes_left;
+
+ /* Do not forget update general message len */
+ msg_body.header.len = sizeof(msg_body) + fw_bytes_left;
+
+ }
+
+ /* Add load NV request message header */
+ memcpy(wcn->hal_buf, &msg_body, sizeof(msg_body));
+
+ /* Add NV body itself */
+ memcpy(wcn->hal_buf + sizeof(msg_body),
+ &nv_d->table + fm_offset,
+ msg_body.nv_img_buffer_size);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret)
+ goto out_unlock;
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_load_nv response failed err=%d\n",
+ ret);
+ goto out_unlock;
+ }
+ msg_body.frag_number++;
+ fm_offset += WCN36XX_NV_FRAGMENT_SIZE;
+
+ } while (msg_body.last_fragment != 1);
+
+out_unlock:
+ mutex_unlock(&wcn->hal_mutex);
+out_free_nv:
+ release_firmware(nv);
+
+ return ret;
+}
+
+static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
+{
+ struct wcn36xx_hal_mac_start_rsp_msg *rsp;
+
+ if (len < sizeof(*rsp))
+ return -EIO;
+
+ rsp = (struct wcn36xx_hal_mac_start_rsp_msg *)buf;
+
+ if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->start_rsp_params.status)
+ return -EIO;
+
+ memcpy(wcn->crm_version, rsp->start_rsp_params.crm_version,
+ WCN36XX_HAL_VERSION_LENGTH);
+ memcpy(wcn->wlan_version, rsp->start_rsp_params.wlan_version,
+ WCN36XX_HAL_VERSION_LENGTH);
+
+ /* null terminate the strings, just in case */
+ wcn->crm_version[WCN36XX_HAL_VERSION_LENGTH] = '\0';
+ wcn->wlan_version[WCN36XX_HAL_VERSION_LENGTH] = '\0';
+
+ wcn->fw_revision = rsp->start_rsp_params.version.revision;
+ wcn->fw_version = rsp->start_rsp_params.version.version;
+ wcn->fw_minor = rsp->start_rsp_params.version.minor;
+ wcn->fw_major = rsp->start_rsp_params.version.major;
+
+ wcn36xx_info("firmware WLAN version '%s' and CRM version '%s'\n",
+ wcn->wlan_version, wcn->crm_version);
+
+ wcn36xx_info("firmware API %u.%u.%u.%u, %u stations, %u bssids\n",
+ wcn->fw_major, wcn->fw_minor,
+ wcn->fw_version, wcn->fw_revision,
+ rsp->start_rsp_params.stations,
+ rsp->start_rsp_params.bssids);
+
+ return 0;
+}
+
+int wcn36xx_smd_start(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_mac_start_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_REQ);
+
+ msg_body.params.type = DRIVER_TYPE_PRODUCTION;
+ msg_body.params.len = 0;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start type %d\n",
+ msg_body.params.type);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_start failed\n");
+ goto out;
+ }
+
+ ret = wcn36xx_smd_start_rsp(wcn, wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_start response failed err=%d\n", ret);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_stop(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_mac_stop_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_REQ);
+
+ msg_body.stop_req_params.reason = HAL_STOP_TYPE_RF_KILL;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_stop failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_stop response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode)
+{
+ struct wcn36xx_hal_init_scan_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_INIT_SCAN_REQ);
+
+ msg_body.mode = mode;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal init scan mode %d\n", msg_body.mode);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_init_scan failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_init_scan response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_start_scan(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_start_scan_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ);
+
+ msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start scan channel %d\n",
+ msg_body.scan_channel);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_start_scan failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_start_scan response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_end_scan(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_end_scan_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ);
+
+ msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal end scan channel %d\n",
+ msg_body.scan_channel);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_end_scan failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_end_scan response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
+ enum wcn36xx_hal_sys_mode mode)
+{
+ struct wcn36xx_hal_finish_scan_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_FINISH_SCAN_REQ);
+
+ msg_body.mode = mode;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal finish scan mode %d\n",
+ msg_body.mode);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_finish_scan failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_finish_scan response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
+{
+ struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
+ int ret = 0;
+
+ ret = wcn36xx_smd_rsp_status_check(buf, len);
+ if (ret)
+ return ret;
+ rsp = (struct wcn36xx_hal_switch_channel_rsp_msg *)buf;
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "channel switched to: %d, status: %d\n",
+ rsp->channel_number, rsp->status);
+ return ret;
+}
+
+int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif, int ch)
+{
+ struct wcn36xx_hal_switch_channel_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_CH_SWITCH_REQ);
+
+ msg_body.channel_number = (u8)ch;
+ msg_body.tx_mgmt_power = 0xbf;
+ msg_body.max_tx_power = 0xbf;
+ memcpy(msg_body.self_sta_mac_addr, vif->addr, ETH_ALEN);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_switch_channel failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_switch_channel_rsp(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_switch_channel response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_update_scan_params_rsp(void *buf, size_t len)
+{
+ struct wcn36xx_hal_update_scan_params_resp *rsp;
+
+ rsp = (struct wcn36xx_hal_update_scan_params_resp *)buf;
+
+ /* Remove the PNO version bit */
+ rsp->status &= (~(WCN36XX_FW_MSG_PNO_VERSION_MASK));
+
+ if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status) {
+ wcn36xx_warn("error response from update scan\n");
+ return rsp->status;
+ }
+
+ return 0;
+}
+
+int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_update_scan_params_req msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ);
+
+ msg_body.dot11d_enabled = 0;
+ msg_body.dot11d_resolved = 0;
+ msg_body.channel_count = 26;
+ msg_body.active_min_ch_time = 60;
+ msg_body.active_max_ch_time = 120;
+ msg_body.passive_min_ch_time = 60;
+ msg_body.passive_max_ch_time = 110;
+ msg_body.state = 0;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal update scan params channel_count %d\n",
+ msg_body.channel_count);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_update_scan_params failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_update_scan_params_rsp(wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_update_scan_params response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_add_sta_self_rsp_msg *rsp;
+ struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ rsp = (struct wcn36xx_hal_add_sta_self_rsp_msg *)buf;
+
+ if (rsp->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+ wcn36xx_warn("hal add sta self failure: %d\n",
+ rsp->status);
+ return rsp->status;
+ }
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal add sta self status %d self_sta_index %d dpu_index %d\n",
+ rsp->status, rsp->self_sta_index, rsp->dpu_index);
+
+ priv_vif->self_sta_index = rsp->self_sta_index;
+ priv_vif->self_dpu_desc_index = rsp->dpu_index;
+
+ return 0;
+}
+
+int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+ struct wcn36xx_hal_add_sta_self_req msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_STA_SELF_REQ);
+
+ memcpy(&msg_body.self_addr, vif->addr, ETH_ALEN);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal add sta self self_addr %pM status %d\n",
+ msg_body.self_addr, msg_body.status);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_add_sta_self failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_add_sta_self_rsp(wcn,
+ vif,
+ wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_add_sta_self response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr)
+{
+ struct wcn36xx_hal_del_sta_self_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_STA_SELF_REQ);
+
+ memcpy(&msg_body.self_addr, addr, ETH_ALEN);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_delete_sta_self failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_delete_sta_self response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index)
+{
+ struct wcn36xx_hal_delete_sta_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_STA_REQ);
+
+ msg_body.sta_index = sta_index;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal delete sta sta_index %d\n",
+ msg_body.sta_index);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_delete_sta failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_delete_sta response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_join_rsp(void *buf, size_t len)
+{
+ struct wcn36xx_hal_join_rsp_msg *rsp;
+
+ if (wcn36xx_smd_rsp_status_check(buf, len))
+ return -EIO;
+
+ rsp = (struct wcn36xx_hal_join_rsp_msg *)buf;
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal rsp join status %d tx_mgmt_power %d\n",
+ rsp->status, rsp->tx_mgmt_power);
+
+ return 0;
+}
+
+int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch)
+{
+ struct wcn36xx_hal_join_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_JOIN_REQ);
+
+ memcpy(&msg_body.bssid, bssid, ETH_ALEN);
+ memcpy(&msg_body.self_sta_mac_addr, vif, ETH_ALEN);
+ msg_body.channel = ch;
+
+ if (conf_is_ht40_minus(&wcn->hw->conf))
+ msg_body.secondary_channel_offset =
+ PHY_DOUBLE_CHANNEL_HIGH_PRIMARY;
+ else if (conf_is_ht40_plus(&wcn->hw->conf))
+ msg_body.secondary_channel_offset =
+ PHY_DOUBLE_CHANNEL_LOW_PRIMARY;
+ else
+ msg_body.secondary_channel_offset =
+ PHY_SINGLE_CHANNEL_CENTERED;
+
+ msg_body.link_state = WCN36XX_HAL_LINK_PREASSOC_STATE;
+
+ msg_body.max_tx_power = 0xbf;
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal join req bssid %pM self_sta_mac_addr %pM channel %d link_state %d\n",
+ msg_body.bssid, msg_body.self_sta_mac_addr,
+ msg_body.channel, msg_body.link_state);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_join failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_join_rsp(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_join response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
+ const u8 *sta_mac,
+ enum wcn36xx_hal_link_state state)
+{
+ struct wcn36xx_hal_set_link_state_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_LINK_ST_REQ);
+
+ memcpy(&msg_body.bssid, bssid, ETH_ALEN);
+ memcpy(&msg_body.self_mac_addr, sta_mac, ETH_ALEN);
+ msg_body.state = state;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal set link state bssid %pM self_mac_addr %pM state %d\n",
+ msg_body.bssid, msg_body.self_mac_addr, msg_body.state);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_set_link_st failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_set_link_st response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static void wcn36xx_smd_convert_sta_to_v1(struct wcn36xx *wcn,
+ const struct wcn36xx_hal_config_sta_params *orig,
+ struct wcn36xx_hal_config_sta_params_v1 *v1)
+{
+ /* convert orig to v1 format */
+ memcpy(&v1->bssid, orig->bssid, ETH_ALEN);
+ memcpy(&v1->mac, orig->mac, ETH_ALEN);
+ v1->aid = orig->aid;
+ v1->type = orig->type;
+ v1->listen_interval = orig->listen_interval;
+ v1->ht_capable = orig->ht_capable;
+
+ v1->max_ampdu_size = orig->max_ampdu_size;
+ v1->max_ampdu_density = orig->max_ampdu_density;
+ v1->sgi_40mhz = orig->sgi_40mhz;
+ v1->sgi_20Mhz = orig->sgi_20Mhz;
+
+ memcpy(&v1->supported_rates, &orig->supported_rates,
+ sizeof(orig->supported_rates));
+ v1->sta_index = orig->sta_index;
+}
+
+static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_config_sta_rsp_msg *rsp;
+ struct config_sta_rsp_params *params;
+ struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ rsp = (struct wcn36xx_hal_config_sta_rsp_msg *)buf;
+ params = &rsp->params;
+
+ if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+ wcn36xx_warn("hal config sta response failure: %d\n",
+ params->status);
+ return -EIO;
+ }
+
+ sta_priv->sta_index = params->sta_index;
+ sta_priv->dpu_desc_index = params->dpu_index;
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config sta rsp status %d sta_index %d bssid_index %d p2p %d\n",
+ params->status, params->sta_index, params->bssid_index,
+ params->p2p);
+
+ return 0;
+}
+
+static int wcn36xx_smd_config_sta_v1(struct wcn36xx *wcn,
+ const struct wcn36xx_hal_config_sta_req_msg *orig)
+{
+ struct wcn36xx_hal_config_sta_req_msg_v1 msg_body;
+ struct wcn36xx_hal_config_sta_params_v1 *sta = &msg_body.sta_params;
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_STA_REQ);
+
+ wcn36xx_smd_convert_sta_to_v1(wcn, &orig->sta_params,
+ &msg_body.sta_params);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config sta v1 action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
+ sta->action, sta->sta_index, sta->bssid_index,
+ sta->bssid, sta->type, sta->mac, sta->aid);
+
+ return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+}
+
+int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wcn36xx_hal_config_sta_req_msg msg;
+ struct wcn36xx_hal_config_sta_params *sta_params;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_STA_REQ);
+
+ sta_params = &msg.sta_params;
+
+ wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
+
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ ret = wcn36xx_smd_config_sta_v1(wcn, &msg);
+ } else {
+ PREPARE_HAL_BUF(wcn->hal_buf, msg);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config sta action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
+ sta_params->action, sta_params->sta_index,
+ sta_params->bssid_index, sta_params->bssid,
+ sta_params->type, sta_params->mac, sta_params->aid);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+ }
+ if (ret) {
+ wcn36xx_err("Sending hal_config_sta failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_config_sta_rsp(wcn,
+ sta,
+ wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_config_sta response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn,
+ const struct wcn36xx_hal_config_bss_req_msg *orig)
+{
+ struct wcn36xx_hal_config_bss_req_msg_v1 msg_body;
+ struct wcn36xx_hal_config_bss_params_v1 *bss = &msg_body.bss_params;
+ struct wcn36xx_hal_config_sta_params_v1 *sta = &bss->sta;
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_BSS_REQ);
+
+ /* convert orig to v1 */
+ memcpy(&msg_body.bss_params.bssid,
+ &orig->bss_params.bssid, ETH_ALEN);
+ memcpy(&msg_body.bss_params.self_mac_addr,
+ &orig->bss_params.self_mac_addr, ETH_ALEN);
+
+ msg_body.bss_params.bss_type = orig->bss_params.bss_type;
+ msg_body.bss_params.oper_mode = orig->bss_params.oper_mode;
+ msg_body.bss_params.nw_type = orig->bss_params.nw_type;
+
+ msg_body.bss_params.short_slot_time_supported =
+ orig->bss_params.short_slot_time_supported;
+ msg_body.bss_params.lla_coexist = orig->bss_params.lla_coexist;
+ msg_body.bss_params.llb_coexist = orig->bss_params.llb_coexist;
+ msg_body.bss_params.llg_coexist = orig->bss_params.llg_coexist;
+ msg_body.bss_params.ht20_coexist = orig->bss_params.ht20_coexist;
+ msg_body.bss_params.lln_non_gf_coexist =
+ orig->bss_params.lln_non_gf_coexist;
+
+ msg_body.bss_params.lsig_tx_op_protection_full_support =
+ orig->bss_params.lsig_tx_op_protection_full_support;
+ msg_body.bss_params.rifs_mode = orig->bss_params.rifs_mode;
+ msg_body.bss_params.beacon_interval = orig->bss_params.beacon_interval;
+ msg_body.bss_params.dtim_period = orig->bss_params.dtim_period;
+ msg_body.bss_params.tx_channel_width_set =
+ orig->bss_params.tx_channel_width_set;
+ msg_body.bss_params.oper_channel = orig->bss_params.oper_channel;
+ msg_body.bss_params.ext_channel = orig->bss_params.ext_channel;
+
+ msg_body.bss_params.reserved = orig->bss_params.reserved;
+
+ memcpy(&msg_body.bss_params.ssid,
+ &orig->bss_params.ssid,
+ sizeof(orig->bss_params.ssid));
+
+ msg_body.bss_params.action = orig->bss_params.action;
+ msg_body.bss_params.rateset = orig->bss_params.rateset;
+ msg_body.bss_params.ht = orig->bss_params.ht;
+ msg_body.bss_params.obss_prot_enabled =
+ orig->bss_params.obss_prot_enabled;
+ msg_body.bss_params.rmf = orig->bss_params.rmf;
+ msg_body.bss_params.ht_oper_mode = orig->bss_params.ht_oper_mode;
+ msg_body.bss_params.dual_cts_protection =
+ orig->bss_params.dual_cts_protection;
+
+ msg_body.bss_params.max_probe_resp_retry_limit =
+ orig->bss_params.max_probe_resp_retry_limit;
+ msg_body.bss_params.hidden_ssid = orig->bss_params.hidden_ssid;
+ msg_body.bss_params.proxy_probe_resp =
+ orig->bss_params.proxy_probe_resp;
+ msg_body.bss_params.edca_params_valid =
+ orig->bss_params.edca_params_valid;
+
+ memcpy(&msg_body.bss_params.acbe,
+ &orig->bss_params.acbe,
+ sizeof(orig->bss_params.acbe));
+ memcpy(&msg_body.bss_params.acbk,
+ &orig->bss_params.acbk,
+ sizeof(orig->bss_params.acbk));
+ memcpy(&msg_body.bss_params.acvi,
+ &orig->bss_params.acvi,
+ sizeof(orig->bss_params.acvi));
+ memcpy(&msg_body.bss_params.acvo,
+ &orig->bss_params.acvo,
+ sizeof(orig->bss_params.acvo));
+
+ msg_body.bss_params.ext_set_sta_key_param_valid =
+ orig->bss_params.ext_set_sta_key_param_valid;
+
+ memcpy(&msg_body.bss_params.ext_set_sta_key_param,
+ &orig->bss_params.ext_set_sta_key_param,
+ sizeof(orig->bss_params.acvo));
+
+ msg_body.bss_params.wcn36xx_hal_persona =
+ orig->bss_params.wcn36xx_hal_persona;
+ msg_body.bss_params.spectrum_mgt_enable =
+ orig->bss_params.spectrum_mgt_enable;
+ msg_body.bss_params.tx_mgmt_power = orig->bss_params.tx_mgmt_power;
+ msg_body.bss_params.max_tx_power = orig->bss_params.max_tx_power;
+
+ wcn36xx_smd_convert_sta_to_v1(wcn, &orig->bss_params.sta,
+ &msg_body.bss_params.sta);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config bss v1 bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
+ bss->bssid, bss->self_mac_addr, bss->bss_type,
+ bss->oper_mode, bss->nw_type);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
+ sta->bssid, sta->action, sta->sta_index,
+ sta->bssid_index, sta->aid, sta->type, sta->mac);
+
+ return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+}
+
+
+static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_config_bss_rsp_msg *rsp;
+ struct wcn36xx_hal_config_bss_rsp_params *params;
+ struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ rsp = (struct wcn36xx_hal_config_bss_rsp_msg *)buf;
+ params = &rsp->bss_rsp_params;
+
+ if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+ wcn36xx_warn("hal config bss response failure: %d\n",
+ params->status);
+ return -EIO;
+ }
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config bss rsp status %d bss_idx %d dpu_desc_index %d"
+ " sta_idx %d self_idx %d bcast_idx %d mac %pM"
+ " power %d ucast_dpu_signature %d\n",
+ params->status, params->bss_index, params->dpu_desc_index,
+ params->bss_sta_index, params->bss_self_sta_index,
+ params->bss_bcast_sta_idx, params->mac,
+ params->tx_mgmt_power, params->ucast_dpu_signature);
+
+ priv_vif->bss_index = params->bss_index;
+
+ if (priv_vif->sta) {
+ priv_vif->sta->bss_sta_index = params->bss_sta_index;
+ priv_vif->sta->bss_dpu_desc_index = params->dpu_desc_index;
+ }
+
+ priv_vif->ucast_dpu_signature = params->ucast_dpu_signature;
+
+ return 0;
+}
+
+int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, const u8 *bssid,
+ bool update)
+{
+ struct wcn36xx_hal_config_bss_req_msg msg;
+ struct wcn36xx_hal_config_bss_params *bss;
+ struct wcn36xx_hal_config_sta_params *sta_params;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_BSS_REQ);
+
+ bss = &msg.bss_params;
+ sta_params = &bss->sta;
+
+ WARN_ON(is_zero_ether_addr(bssid));
+
+ memcpy(&bss->bssid, bssid, ETH_ALEN);
+
+ memcpy(bss->self_mac_addr, vif->addr, ETH_ALEN);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ bss->bss_type = WCN36XX_HAL_INFRASTRUCTURE_MODE;
+
+ /* STA */
+ bss->oper_mode = 1;
+ bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_MODE;
+ } else if (vif->type == NL80211_IFTYPE_AP) {
+ bss->bss_type = WCN36XX_HAL_INFRA_AP_MODE;
+
+ /* AP */
+ bss->oper_mode = 0;
+ bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_SAP_MODE;
+ } else if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ bss->bss_type = WCN36XX_HAL_IBSS_MODE;
+
+ /* STA */
+ bss->oper_mode = 1;
+ } else {
+ wcn36xx_warn("Unknown type for bss config: %d\n", vif->type);
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ wcn36xx_smd_set_bss_nw_type(wcn, sta, bss);
+ else
+ bss->nw_type = WCN36XX_HAL_11N_NW_TYPE;
+
+ bss->short_slot_time_supported = vif->bss_conf.use_short_slot;
+ bss->lla_coexist = 0;
+ bss->llb_coexist = 0;
+ bss->llg_coexist = 0;
+ bss->rifs_mode = 0;
+ bss->beacon_interval = vif->bss_conf.beacon_int;
+ bss->dtim_period = vif_priv->dtim_period;
+
+ wcn36xx_smd_set_bss_ht_params(vif, sta, bss);
+
+ bss->oper_channel = WCN36XX_HW_CHANNEL(wcn);
+
+ if (conf_is_ht40_minus(&wcn->hw->conf))
+ bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ else if (conf_is_ht40_plus(&wcn->hw->conf))
+ bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ else
+ bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+
+ bss->reserved = 0;
+ wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
+
+ /* wcn->ssid is only valid in AP and IBSS mode */
+ bss->ssid.length = vif_priv->ssid.length;
+ memcpy(bss->ssid.ssid, vif_priv->ssid.ssid, vif_priv->ssid.length);
+
+ bss->obss_prot_enabled = 0;
+ bss->rmf = 0;
+ bss->max_probe_resp_retry_limit = 0;
+ bss->hidden_ssid = vif->bss_conf.hidden_ssid;
+ bss->proxy_probe_resp = 0;
+ bss->edca_params_valid = 0;
+
+ /* FIXME: set acbe, acbk, acvi and acvo */
+
+ bss->ext_set_sta_key_param_valid = 0;
+
+ /* FIXME: set ext_set_sta_key_param */
+
+ bss->spectrum_mgt_enable = 0;
+ bss->tx_mgmt_power = 0;
+ bss->max_tx_power = WCN36XX_MAX_POWER(wcn);
+
+ bss->action = update;
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config bss bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
+ bss->bssid, bss->self_mac_addr, bss->bss_type,
+ bss->oper_mode, bss->nw_type);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
+ sta_params->bssid, sta_params->action,
+ sta_params->sta_index, sta_params->bssid_index,
+ sta_params->aid, sta_params->type,
+ sta_params->mac);
+
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ ret = wcn36xx_smd_config_bss_v1(wcn, &msg);
+ } else {
+ PREPARE_HAL_BUF(wcn->hal_buf, msg);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+ }
+ if (ret) {
+ wcn36xx_err("Sending hal_config_bss failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_config_bss_rsp(wcn,
+ vif,
+ wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_config_bss response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+ struct wcn36xx_hal_delete_bss_req_msg msg_body;
+ struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_BSS_REQ);
+
+ msg_body.bss_index = priv_vif->bss_index;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal delete bss %d\n", msg_body.bss_index);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_delete_bss failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_delete_bss response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct sk_buff *skb_beacon, u16 tim_off,
+ u16 p2p_off)
+{
+ struct wcn36xx_hal_send_beacon_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ);
+
+ /* TODO need to find out why this is needed? */
+ msg_body.beacon_length = skb_beacon->len + 6;
+
+ if (BEACON_TEMPLATE_SIZE > msg_body.beacon_length) {
+ memcpy(&msg_body.beacon, &skb_beacon->len, sizeof(u32));
+ memcpy(&(msg_body.beacon[4]), skb_beacon->data,
+ skb_beacon->len);
+ } else {
+ wcn36xx_err("Beacon is to big: beacon size=%d\n",
+ msg_body.beacon_length);
+ return -ENOMEM;
+ }
+ memcpy(msg_body.bssid, vif->addr, ETH_ALEN);
+
+ /* TODO need to find out why this is needed? */
+ msg_body.tim_ie_offset = tim_off+4;
+ msg_body.p2p_ie_offset = p2p_off;
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal send beacon beacon_length %d\n",
+ msg_body.beacon_length);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_send_beacon failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_send_beacon response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct wcn36xx_hal_send_probe_resp_req_msg msg;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg, WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ);
+
+ if (skb->len > BEACON_TEMPLATE_SIZE) {
+ wcn36xx_warn("probe response template is too big: %d\n",
+ skb->len);
+ return -E2BIG;
+ }
+
+ msg.probe_resp_template_len = skb->len;
+ memcpy(&msg.probe_resp_template, skb->data, skb->len);
+
+ memcpy(msg.bssid, vif->addr, ETH_ALEN);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal update probe rsp len %d bssid %pM\n",
+ msg.probe_resp_template_len, msg.bssid);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_update_proberesp_tmpl failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_update_proberesp_tmpl response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 keylen,
+ u8 *key,
+ u8 sta_index)
+{
+ struct wcn36xx_hal_set_sta_key_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_STAKEY_REQ);
+
+ msg_body.set_sta_key_params.sta_index = sta_index;
+ msg_body.set_sta_key_params.enc_type = enc_type;
+
+ msg_body.set_sta_key_params.key[0].id = keyidx;
+ msg_body.set_sta_key_params.key[0].unicast = 1;
+ msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX;
+ msg_body.set_sta_key_params.key[0].pae_role = 0;
+ msg_body.set_sta_key_params.key[0].length = keylen;
+ memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen);
+ msg_body.set_sta_key_params.single_tid_rc = 1;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_set_stakey failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_set_stakey response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 keylen,
+ u8 *key)
+{
+ struct wcn36xx_hal_set_bss_key_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_BSSKEY_REQ);
+ msg_body.bss_idx = 0;
+ msg_body.enc_type = enc_type;
+ msg_body.num_keys = 1;
+ msg_body.keys[0].id = keyidx;
+ msg_body.keys[0].unicast = 0;
+ msg_body.keys[0].direction = WCN36XX_HAL_RX_ONLY;
+ msg_body.keys[0].pae_role = 0;
+ msg_body.keys[0].length = keylen;
+ memcpy(msg_body.keys[0].key, key, keylen);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_set_bsskey failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_set_bsskey response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 sta_index)
+{
+ struct wcn36xx_hal_remove_sta_key_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_STAKEY_REQ);
+
+ msg_body.sta_idx = sta_index;
+ msg_body.enc_type = enc_type;
+ msg_body.key_id = keyidx;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_remove_stakey failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_remove_stakey response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx)
+{
+ struct wcn36xx_hal_remove_bss_key_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_BSSKEY_REQ);
+ msg_body.bss_idx = 0;
+ msg_body.enc_type = enc_type;
+ msg_body.key_id = keyidx;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_remove_bsskey failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_remove_bsskey response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+ struct wcn36xx_hal_enter_bmps_req_msg msg_body;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_BMPS_REQ);
+
+ msg_body.bss_index = vif_priv->bss_index;
+ msg_body.tbtt = vif->bss_conf.sync_tsf;
+ msg_body.dtim_period = vif_priv->dtim_period;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_enter_bmps failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_enter_bmps response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+ struct wcn36xx_hal_enter_bmps_req_msg msg_body;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_BMPS_REQ);
+
+ msg_body.bss_index = vif_priv->bss_index;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_exit_bmps failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim)
+{
+ struct wcn36xx_hal_set_power_params_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_POWER_PARAMS_REQ);
+
+ /*
+ * When host is down ignore every second dtim
+ */
+ if (ignore_dtim) {
+ msg_body.ignore_dtim = 1;
+ msg_body.dtim_period = 2;
+ }
+ msg_body.listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_set_power_params failed\n");
+ goto out;
+ }
+
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+/* Notice: This function should be called after associated, or else it
+ * will be invalid
+ */
+int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ int packet_type)
+{
+ struct wcn36xx_hal_keep_alive_req_msg msg_body;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_KEEP_ALIVE_REQ);
+
+ if (packet_type == WCN36XX_HAL_KEEP_ALIVE_NULL_PKT) {
+ msg_body.bss_index = vif_priv->bss_index;
+ msg_body.packet_type = WCN36XX_HAL_KEEP_ALIVE_NULL_PKT;
+ msg_body.time_period = WCN36XX_KEEP_ALIVE_TIME_PERIOD;
+ } else if (packet_type == WCN36XX_HAL_KEEP_ALIVE_UNSOLICIT_ARP_RSP) {
+ /* TODO: it also support ARP response type */
+ } else {
+ wcn36xx_warn("unknow keep alive packet type %d\n", packet_type);
+ return -EINVAL;
+ }
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_exit_bmps failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
+ u32 arg3, u32 arg4, u32 arg5)
+{
+ struct wcn36xx_hal_dump_cmd_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DUMP_COMMAND_REQ);
+
+ msg_body.arg1 = arg1;
+ msg_body.arg2 = arg2;
+ msg_body.arg3 = arg3;
+ msg_body.arg4 = arg4;
+ msg_body.arg5 = arg5;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_dump_cmd failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_dump_cmd response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static inline void set_feat_caps(u32 *bitmap,
+ enum place_holder_in_cap_bitmap cap)
+{
+ int arr_idx, bit_idx;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+ bitmap[arr_idx] |= (1 << bit_idx);
+}
+
+static inline int get_feat_caps(u32 *bitmap,
+ enum place_holder_in_cap_bitmap cap)
+{
+ int arr_idx, bit_idx;
+ int ret = 0;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return -EINVAL;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+ ret = (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
+ return ret;
+}
+
+static inline void clear_feat_caps(u32 *bitmap,
+ enum place_holder_in_cap_bitmap cap)
+{
+ int arr_idx, bit_idx;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+ bitmap[arr_idx] &= ~(1 << bit_idx);
+}
+
+int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_feat_caps_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
+
+ set_feat_caps(msg_body.feat_caps, STA_POWERSAVE);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_feature_caps_exchange failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_feature_caps_exchange response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ u16 tid,
+ u16 *ssn,
+ u8 direction,
+ u8 sta_index)
+{
+ struct wcn36xx_hal_add_ba_session_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_SESSION_REQ);
+
+ msg_body.sta_index = sta_index;
+ memcpy(&msg_body.mac_addr, sta->addr, ETH_ALEN);
+ msg_body.dialog_token = 0x10;
+ msg_body.tid = tid;
+
+ /* Immediate BA because Delayed BA is not supported */
+ msg_body.policy = 1;
+ msg_body.buffer_size = WCN36XX_AGGR_BUFFER_SIZE;
+ msg_body.timeout = 0;
+ if (ssn)
+ msg_body.ssn = *ssn;
+ msg_body.direction = direction;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_add_ba_session failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_add_ba_session response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_add_ba(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_add_ba_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_REQ);
+
+ msg_body.session_id = 0;
+ msg_body.win_size = WCN36XX_AGGR_BUFFER_SIZE;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_add_ba failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_add_ba response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index)
+{
+ struct wcn36xx_hal_del_ba_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_BA_REQ);
+
+ msg_body.sta_index = sta_index;
+ msg_body.tid = tid;
+ msg_body.direction = 0;
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_del_ba failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_del_ba response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
+{
+ struct wcn36xx_hal_trigger_ba_req_msg msg_body;
+ struct wcn36xx_hal_trigget_ba_req_candidate *candidate;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ);
+
+ msg_body.session_id = 0;
+ msg_body.candidate_cnt = 1;
+ msg_body.header.len += sizeof(*candidate);
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ candidate = (struct wcn36xx_hal_trigget_ba_req_candidate *)
+ (wcn->hal_buf + sizeof(msg_body));
+ candidate->sta_index = sta_index;
+ candidate->tid_bitmap = 1;
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_trigger_ba failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_trigger_ba response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_tx_compl_ind(struct wcn36xx *wcn, void *buf, size_t len)
+{
+ struct wcn36xx_hal_tx_compl_ind_msg *rsp = buf;
+
+ if (len != sizeof(*rsp)) {
+ wcn36xx_warn("Bad TX complete indication\n");
+ return -EIO;
+ }
+
+ wcn36xx_dxe_tx_ack_ind(wcn, rsp->status);
+
+ return 0;
+}
+
+static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_missed_beacon_ind_msg *rsp = buf;
+ struct ieee80211_vif *vif = NULL;
+ struct wcn36xx_vif *tmp;
+
+ /* Old FW does not have bss index */
+ if (wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
+ tmp->bss_index);
+ vif = container_of((void *)tmp,
+ struct ieee80211_vif,
+ drv_priv);
+ ieee80211_connection_loss(vif);
+ }
+ return 0;
+ }
+
+ if (len != sizeof(*rsp)) {
+ wcn36xx_warn("Corrupted missed beacon indication\n");
+ return -EIO;
+ }
+
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ if (tmp->bss_index == rsp->bss_index) {
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
+ rsp->bss_index);
+ vif = container_of((void *)tmp,
+ struct ieee80211_vif,
+ drv_priv);
+ ieee80211_connection_loss(vif);
+ return 0;
+ }
+ }
+
+ wcn36xx_warn("BSS index %d not found\n", rsp->bss_index);
+ return -ENOENT;
+}
+
+static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_delete_sta_context_ind_msg *rsp = buf;
+ struct wcn36xx_vif *tmp;
+ struct ieee80211_sta *sta = NULL;
+
+ if (len != sizeof(*rsp)) {
+ wcn36xx_warn("Corrupted delete sta indication\n");
+ return -EIO;
+ }
+
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ if (sta && (tmp->sta->sta_index == rsp->sta_id)) {
+ sta = container_of((void *)tmp->sta,
+ struct ieee80211_sta,
+ drv_priv);
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "delete station indication %pM index %d\n",
+ rsp->addr2,
+ rsp->sta_id);
+ ieee80211_report_low_ack(sta, 0);
+ return 0;
+ }
+ }
+
+ wcn36xx_warn("STA with addr %pM and index %d not found\n",
+ rsp->addr2,
+ rsp->sta_id);
+ return -ENOENT;
+}
+
+int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value)
+{
+ struct wcn36xx_hal_update_cfg_req_msg msg_body, *body;
+ size_t len;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_CFG_REQ);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ body = (struct wcn36xx_hal_update_cfg_req_msg *) wcn->hal_buf;
+ len = msg_body.header.len;
+
+ put_cfg_tlv_u32(wcn, &len, cfg_id, value);
+ body->header.len = len;
+ body->len = len - sizeof(*body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, body->header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_update_cfg failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_update_cfg response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
+{
+ struct wcn36xx_hal_msg_header *msg_header = buf;
+ struct wcn36xx_hal_ind_msg *msg_ind;
+ wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "SMD <<< ", buf, len);
+
+ switch (msg_header->msg_type) {
+ case WCN36XX_HAL_START_RSP:
+ case WCN36XX_HAL_CONFIG_STA_RSP:
+ case WCN36XX_HAL_CONFIG_BSS_RSP:
+ case WCN36XX_HAL_ADD_STA_SELF_RSP:
+ case WCN36XX_HAL_STOP_RSP:
+ case WCN36XX_HAL_DEL_STA_SELF_RSP:
+ case WCN36XX_HAL_DELETE_STA_RSP:
+ case WCN36XX_HAL_INIT_SCAN_RSP:
+ case WCN36XX_HAL_START_SCAN_RSP:
+ case WCN36XX_HAL_END_SCAN_RSP:
+ case WCN36XX_HAL_FINISH_SCAN_RSP:
+ case WCN36XX_HAL_DOWNLOAD_NV_RSP:
+ case WCN36XX_HAL_DELETE_BSS_RSP:
+ case WCN36XX_HAL_SEND_BEACON_RSP:
+ case WCN36XX_HAL_SET_LINK_ST_RSP:
+ case WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_RSP:
+ case WCN36XX_HAL_SET_BSSKEY_RSP:
+ case WCN36XX_HAL_SET_STAKEY_RSP:
+ case WCN36XX_HAL_RMV_STAKEY_RSP:
+ case WCN36XX_HAL_RMV_BSSKEY_RSP:
+ case WCN36XX_HAL_ENTER_BMPS_RSP:
+ case WCN36XX_HAL_SET_POWER_PARAMS_RSP:
+ case WCN36XX_HAL_EXIT_BMPS_RSP:
+ case WCN36XX_HAL_KEEP_ALIVE_RSP:
+ case WCN36XX_HAL_DUMP_COMMAND_RSP:
+ case WCN36XX_HAL_ADD_BA_SESSION_RSP:
+ case WCN36XX_HAL_ADD_BA_RSP:
+ case WCN36XX_HAL_DEL_BA_RSP:
+ case WCN36XX_HAL_TRIGGER_BA_RSP:
+ case WCN36XX_HAL_UPDATE_CFG_RSP:
+ case WCN36XX_HAL_JOIN_RSP:
+ case WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP:
+ case WCN36XX_HAL_CH_SWITCH_RSP:
+ case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP:
+ memcpy(wcn->hal_buf, buf, len);
+ wcn->hal_rsp_len = len;
+ complete(&wcn->hal_rsp_compl);
+ break;
+
+ case WCN36XX_HAL_OTA_TX_COMPL_IND:
+ case WCN36XX_HAL_MISSED_BEACON_IND:
+ case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
+ mutex_lock(&wcn->hal_ind_mutex);
+ msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL);
+ msg_ind->msg_len = len;
+ msg_ind->msg = kmalloc(len, GFP_KERNEL);
+ memcpy(msg_ind->msg, buf, len);
+ list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
+ queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
+ mutex_unlock(&wcn->hal_ind_mutex);
+ break;
+ default:
+ wcn36xx_err("SMD_EVENT (%d) not supported\n",
+ msg_header->msg_type);
+ }
+}
+static void wcn36xx_ind_smd_work(struct work_struct *work)
+{
+ struct wcn36xx *wcn =
+ container_of(work, struct wcn36xx, hal_ind_work);
+ struct wcn36xx_hal_msg_header *msg_header;
+ struct wcn36xx_hal_ind_msg *hal_ind_msg;
+
+ mutex_lock(&wcn->hal_ind_mutex);
+
+ hal_ind_msg = list_first_entry(&wcn->hal_ind_queue,
+ struct wcn36xx_hal_ind_msg,
+ list);
+
+ msg_header = (struct wcn36xx_hal_msg_header *)hal_ind_msg->msg;
+
+ switch (msg_header->msg_type) {
+ case WCN36XX_HAL_OTA_TX_COMPL_IND:
+ wcn36xx_smd_tx_compl_ind(wcn,
+ hal_ind_msg->msg,
+ hal_ind_msg->msg_len);
+ break;
+ case WCN36XX_HAL_MISSED_BEACON_IND:
+ wcn36xx_smd_missed_beacon_ind(wcn,
+ hal_ind_msg->msg,
+ hal_ind_msg->msg_len);
+ break;
+ case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
+ wcn36xx_smd_delete_sta_context_ind(wcn,
+ hal_ind_msg->msg,
+ hal_ind_msg->msg_len);
+ break;
+ default:
+ wcn36xx_err("SMD_EVENT (%d) not supported\n",
+ msg_header->msg_type);
+ }
+ list_del(wcn->hal_ind_queue.next);
+ kfree(hal_ind_msg->msg);
+ kfree(hal_ind_msg);
+ mutex_unlock(&wcn->hal_ind_mutex);
+}
+int wcn36xx_smd_open(struct wcn36xx *wcn)
+{
+ int ret = 0;
+ wcn->hal_ind_wq = create_freezable_workqueue("wcn36xx_smd_ind");
+ if (!wcn->hal_ind_wq) {
+ wcn36xx_err("failed to allocate wq\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ INIT_WORK(&wcn->hal_ind_work, wcn36xx_ind_smd_work);
+ INIT_LIST_HEAD(&wcn->hal_ind_queue);
+ mutex_init(&wcn->hal_ind_mutex);
+
+ ret = wcn->ctrl_ops->open(wcn, wcn36xx_smd_rsp_process);
+ if (ret) {
+ wcn36xx_err("failed to open control channel\n");
+ goto free_wq;
+ }
+
+ return ret;
+
+free_wq:
+ destroy_workqueue(wcn->hal_ind_wq);
+out:
+ return ret;
+}
+
+void wcn36xx_smd_close(struct wcn36xx *wcn)
+{
+ wcn->ctrl_ops->close();
+ destroy_workqueue(wcn->hal_ind_wq);
+ mutex_destroy(&wcn->hal_ind_mutex);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
new file mode 100644
index 000000000000..e7c39019c6f1
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _SMD_H_
+#define _SMD_H_
+
+#include "wcn36xx.h"
+
+/* Max shared size is 4k but we take less.*/
+#define WCN36XX_NV_FRAGMENT_SIZE 3072
+
+#define WCN36XX_HAL_BUF_SIZE 4096
+
+#define HAL_MSG_TIMEOUT 200
+#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400
+#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200
+/* The PNO version info be contained in the rsp msg */
+#define WCN36XX_FW_MSG_PNO_VERSION_MASK 0x8000
+
+enum wcn36xx_fw_msg_result {
+ WCN36XX_FW_MSG_RESULT_SUCCESS = 0,
+ WCN36XX_FW_MSG_RESULT_SUCCESS_SYNC = 1,
+
+ WCN36XX_FW_MSG_RESULT_MEM_FAIL = 5,
+};
+
+/******************************/
+/* SMD requests and responses */
+/******************************/
+struct wcn36xx_fw_msg_status_rsp {
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_ind_msg {
+ struct list_head list;
+ u8 *msg;
+ size_t msg_len;
+};
+
+struct wcn36xx;
+
+int wcn36xx_smd_open(struct wcn36xx *wcn);
+void wcn36xx_smd_close(struct wcn36xx *wcn);
+
+int wcn36xx_smd_load_nv(struct wcn36xx *wcn);
+int wcn36xx_smd_start(struct wcn36xx *wcn);
+int wcn36xx_smd_stop(struct wcn36xx *wcn);
+int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode);
+int wcn36xx_smd_start_scan(struct wcn36xx *wcn);
+int wcn36xx_smd_end_scan(struct wcn36xx *wcn);
+int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
+ enum wcn36xx_hal_sys_mode mode);
+int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn);
+int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr);
+int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index);
+int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch);
+int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
+ const u8 *sta_mac,
+ enum wcn36xx_hal_link_state state);
+int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, const u8 *bssid,
+ bool update);
+int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct sk_buff *skb_beacon, u16 tim_off,
+ u16 p2p_off);
+int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif, int ch);
+int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb);
+int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 keylen,
+ u8 *key,
+ u8 sta_index);
+int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 keylen,
+ u8 *key);
+int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 sta_index);
+int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx);
+int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim);
+int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ int packet_type);
+int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
+ u32 arg3, u32 arg4, u32 arg5);
+int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn);
+
+int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ u16 tid,
+ u16 *ssn,
+ u8 direction,
+ u8 sta_index);
+int wcn36xx_smd_add_ba(struct wcn36xx *wcn);
+int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index);
+int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index);
+
+int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
+#endif /* _SMD_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
new file mode 100644
index 000000000000..b2b60e30caaf
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "txrx.h"
+
+static inline int get_rssi0(struct wcn36xx_rx_bd *bd)
+{
+ return 100 - ((bd->phy_stat0 >> 24) & 0xff);
+}
+
+int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
+{
+ struct ieee80211_rx_status status;
+ struct ieee80211_hdr *hdr;
+ struct wcn36xx_rx_bd *bd;
+ u16 fc, sn;
+
+ /*
+ * All fields must be 0, otherwise it can lead to
+ * unexpected consequences.
+ */
+ memset(&status, 0, sizeof(status));
+
+ bd = (struct wcn36xx_rx_bd *)skb->data;
+ buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
+ wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP,
+ "BD <<< ", (char *)bd,
+ sizeof(struct wcn36xx_rx_bd));
+
+ skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len);
+ skb_pull(skb, bd->pdu.mpdu_header_off);
+
+ status.mactime = 10;
+ status.freq = WCN36XX_CENTER_FREQ(wcn);
+ status.band = WCN36XX_BAND(wcn);
+ status.signal = -get_rssi0(bd);
+ status.antenna = 1;
+ status.rate_idx = 1;
+ status.flag = 0;
+ status.rx_flags = 0;
+ status.flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED |
+ RX_FLAG_DECRYPTED;
+
+ wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x status->vendor_radiotap_len=%x\n",
+ status.flag, status.vendor_radiotap_len);
+
+ memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ fc = __le16_to_cpu(hdr->frame_control);
+ sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
+
+ if (ieee80211_is_beacon(hdr->frame_control)) {
+ wcn36xx_dbg(WCN36XX_DBG_BEACON, "beacon skb %p len %d fc %04x sn %d\n",
+ skb, skb->len, fc, sn);
+ wcn36xx_dbg_dump(WCN36XX_DBG_BEACON_DUMP, "SKB <<< ",
+ (char *)skb->data, skb->len);
+ } else {
+ wcn36xx_dbg(WCN36XX_DBG_RX, "rx skb %p len %d fc %04x sn %d\n",
+ skb, skb->len, fc, sn);
+ wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP, "SKB <<< ",
+ (char *)skb->data, skb->len);
+ }
+
+ ieee80211_rx_irqsafe(wcn->hw, skb);
+
+ return 0;
+}
+
+static void wcn36xx_set_tx_pdu(struct wcn36xx_tx_bd *bd,
+ u32 mpdu_header_len,
+ u32 len,
+ u16 tid)
+{
+ bd->pdu.mpdu_header_len = mpdu_header_len;
+ bd->pdu.mpdu_header_off = sizeof(*bd);
+ bd->pdu.mpdu_data_off = bd->pdu.mpdu_header_len +
+ bd->pdu.mpdu_header_off;
+ bd->pdu.mpdu_len = len;
+ bd->pdu.tid = tid;
+}
+
+static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn,
+ u8 *addr)
+{
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_vif *vif = NULL;
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ vif = container_of((void *)vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+ if (memcmp(vif->addr, addr, ETH_ALEN) == 0)
+ return vif_priv;
+ }
+ wcn36xx_warn("vif %pM not found\n", addr);
+ return NULL;
+}
+static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
+ struct wcn36xx *wcn,
+ struct wcn36xx_vif **vif_priv,
+ struct wcn36xx_sta *sta_priv,
+ struct ieee80211_hdr *hdr,
+ bool bcast)
+{
+ struct ieee80211_vif *vif = NULL;
+ struct wcn36xx_vif *__vif_priv = NULL;
+ bd->bd_rate = WCN36XX_BD_RATE_DATA;
+
+ /*
+ * For not unicast frames mac80211 will not set sta pointer so use
+ * self_sta_index instead.
+ */
+ if (sta_priv) {
+ __vif_priv = sta_priv->vif;
+ vif = container_of((void *)__vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ bd->sta_index = sta_priv->bss_sta_index;
+ bd->dpu_desc_idx = sta_priv->bss_dpu_desc_index;
+ } else if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ bd->sta_index = sta_priv->sta_index;
+ bd->dpu_desc_idx = sta_priv->dpu_desc_index;
+ }
+ } else {
+ __vif_priv = get_vif_by_addr(wcn, hdr->addr2);
+ bd->sta_index = __vif_priv->self_sta_index;
+ bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
+ }
+
+ bd->dpu_sign = __vif_priv->ucast_dpu_signature;
+
+ if (ieee80211_is_nullfunc(hdr->frame_control) ||
+ (sta_priv && !sta_priv->is_data_encrypted))
+ bd->dpu_ne = 1;
+
+ if (bcast) {
+ bd->ub = 1;
+ bd->ack_policy = 1;
+ }
+ *vif_priv = __vif_priv;
+}
+
+static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd,
+ struct wcn36xx *wcn,
+ struct wcn36xx_vif **vif_priv,
+ struct ieee80211_hdr *hdr,
+ bool bcast)
+{
+ struct wcn36xx_vif *__vif_priv =
+ get_vif_by_addr(wcn, hdr->addr2);
+ bd->sta_index = __vif_priv->self_sta_index;
+ bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
+ bd->dpu_ne = 1;
+
+ /* default rate for unicast */
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ bd->bd_rate = (WCN36XX_BAND(wcn) == IEEE80211_BAND_5GHZ) ?
+ WCN36XX_BD_RATE_CTRL :
+ WCN36XX_BD_RATE_MGMT;
+ else if (ieee80211_is_ctl(hdr->frame_control))
+ bd->bd_rate = WCN36XX_BD_RATE_CTRL;
+ else
+ wcn36xx_warn("frame control type unknown\n");
+
+ /*
+ * In joining state trick hardware that probe is sent as
+ * unicast even if address is broadcast.
+ */
+ if (__vif_priv->is_joining &&
+ ieee80211_is_probe_req(hdr->frame_control))
+ bcast = false;
+
+ if (bcast) {
+ /* broadcast */
+ bd->ub = 1;
+ /* No ack needed not unicast */
+ bd->ack_policy = 1;
+ bd->queue_id = WCN36XX_TX_B_WQ_ID;
+ } else
+ bd->queue_id = WCN36XX_TX_U_WQ_ID;
+ *vif_priv = __vif_priv;
+}
+
+int wcn36xx_start_tx(struct wcn36xx *wcn,
+ struct wcn36xx_sta *sta_priv,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ unsigned long flags;
+ bool is_low = ieee80211_is_data(hdr->frame_control);
+ bool bcast = is_broadcast_ether_addr(hdr->addr1) ||
+ is_multicast_ether_addr(hdr->addr1);
+ struct wcn36xx_tx_bd *bd = wcn36xx_dxe_get_next_bd(wcn, is_low);
+
+ if (!bd) {
+ /*
+ * TX DXE are used in pairs. One for the BD and one for the
+ * actual frame. The BD DXE's has a preallocated buffer while
+ * the skb ones does not. If this isn't true something is really
+ * wierd. TODO: Recover from this situation
+ */
+
+ wcn36xx_err("bd address may not be NULL for BD DXE\n");
+ return -EINVAL;
+ }
+
+ memset(bd, 0, sizeof(*bd));
+
+ wcn36xx_dbg(WCN36XX_DBG_TX,
+ "tx skb %p len %d fc %04x sn %d %s %s\n",
+ skb, skb->len, __le16_to_cpu(hdr->frame_control),
+ IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)),
+ is_low ? "low" : "high", bcast ? "bcast" : "ucast");
+
+ wcn36xx_dbg_dump(WCN36XX_DBG_TX_DUMP, "", skb->data, skb->len);
+
+ bd->dpu_rf = WCN36XX_BMU_WQ_TX;
+
+ bd->tx_comp = info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS;
+ if (bd->tx_comp) {
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "TX_ACK status requested\n");
+ spin_lock_irqsave(&wcn->dxe_lock, flags);
+ if (wcn->tx_ack_skb) {
+ spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+ wcn36xx_warn("tx_ack_skb already set\n");
+ return -EINVAL;
+ }
+
+ wcn->tx_ack_skb = skb;
+ spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+
+ /* Only one at a time is supported by fw. Stop the TX queues
+ * until the ack status gets back.
+ *
+ * TODO: Add watchdog in case FW does not answer
+ */
+ ieee80211_stop_queues(wcn->hw);
+ }
+
+ /* Data frames served first*/
+ if (is_low) {
+ wcn36xx_set_tx_data(bd, wcn, &vif_priv, sta_priv, hdr, bcast);
+ wcn36xx_set_tx_pdu(bd,
+ ieee80211_is_data_qos(hdr->frame_control) ?
+ sizeof(struct ieee80211_qos_hdr) :
+ sizeof(struct ieee80211_hdr_3addr),
+ skb->len, sta_priv ? sta_priv->tid : 0);
+ } else {
+ /* MGMT and CTRL frames are handeld here*/
+ wcn36xx_set_tx_mgmt(bd, wcn, &vif_priv, hdr, bcast);
+ wcn36xx_set_tx_pdu(bd,
+ ieee80211_is_data_qos(hdr->frame_control) ?
+ sizeof(struct ieee80211_qos_hdr) :
+ sizeof(struct ieee80211_hdr_3addr),
+ skb->len, WCN36XX_TID);
+ }
+
+ buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
+ bd->tx_bd_sign = 0xbdbdbdbd;
+
+ return wcn36xx_dxe_tx_frame(wcn, vif_priv, skb, is_low);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.h b/drivers/net/wireless/ath/wcn36xx/txrx.h
new file mode 100644
index 000000000000..bbfbcf808c77
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _TXRX_H_
+#define _TXRX_H_
+
+#include <linux/etherdevice.h>
+#include "wcn36xx.h"
+
+/* TODO describe all properties */
+#define WCN36XX_802_11_HEADER_LEN 24
+#define WCN36XX_BMU_WQ_TX 25
+#define WCN36XX_TID 7
+/* broadcast wq ID */
+#define WCN36XX_TX_B_WQ_ID 0xA
+#define WCN36XX_TX_U_WQ_ID 0x9
+/* bd_rate */
+#define WCN36XX_BD_RATE_DATA 0
+#define WCN36XX_BD_RATE_MGMT 2
+#define WCN36XX_BD_RATE_CTRL 3
+
+struct wcn36xx_pdu {
+ u32 dpu_fb:8;
+ u32 adu_fb:8;
+ u32 pdu_id:16;
+
+ /* 0x04*/
+ u32 tail_pdu_idx:16;
+ u32 head_pdu_idx:16;
+
+ /* 0x08*/
+ u32 pdu_count:7;
+ u32 mpdu_data_off:9;
+ u32 mpdu_header_off:8;
+ u32 mpdu_header_len:8;
+
+ /* 0x0c*/
+ u32 reserved4:8;
+ u32 tid:4;
+ u32 reserved3:4;
+ u32 mpdu_len:16;
+};
+
+struct wcn36xx_rx_bd {
+ u32 bdt:2;
+ u32 ft:1;
+ u32 dpu_ne:1;
+ u32 rx_key_id:3;
+ u32 ub:1;
+ u32 rmf:1;
+ u32 uma_bypass:1;
+ u32 csr11:1;
+ u32 reserved0:1;
+ u32 scan_learn:1;
+ u32 rx_ch:4;
+ u32 rtsf:1;
+ u32 bsf:1;
+ u32 a2hf:1;
+ u32 st_auf:1;
+ u32 dpu_sign:3;
+ u32 dpu_rf:8;
+
+ struct wcn36xx_pdu pdu;
+
+ /* 0x14*/
+ u32 addr3:8;
+ u32 addr2:8;
+ u32 addr1:8;
+ u32 dpu_desc_idx:8;
+
+ /* 0x18*/
+ u32 rxp_flags:23;
+ u32 rate_id:9;
+
+ u32 phy_stat0;
+ u32 phy_stat1;
+
+ /* 0x24 */
+ u32 rx_times;
+
+ u32 pmi_cmd[6];
+
+ /* 0x40 */
+ u32 reserved7:4;
+ u32 reorder_slot_id:6;
+ u32 reorder_fwd_id:6;
+ u32 reserved6:12;
+ u32 reorder_code:4;
+
+ /* 0x44 */
+ u32 exp_seq_num:12;
+ u32 cur_seq_num:12;
+ u32 fr_type_subtype:8;
+
+ /* 0x48 */
+ u32 msdu_size:16;
+ u32 sub_fr_id:4;
+ u32 proc_order:4;
+ u32 reserved9:4;
+ u32 aef:1;
+ u32 lsf:1;
+ u32 esf:1;
+ u32 asf:1;
+};
+
+struct wcn36xx_tx_bd {
+ u32 bdt:2;
+ u32 ft:1;
+ u32 dpu_ne:1;
+ u32 fw_tx_comp:1;
+ u32 tx_comp:1;
+ u32 reserved1:1;
+ u32 ub:1;
+ u32 rmf:1;
+ u32 reserved0:12;
+ u32 dpu_sign:3;
+ u32 dpu_rf:8;
+
+ struct wcn36xx_pdu pdu;
+
+ /* 0x14*/
+ u32 reserved5:7;
+ u32 queue_id:5;
+ u32 bd_rate:2;
+ u32 ack_policy:2;
+ u32 sta_index:8;
+ u32 dpu_desc_idx:8;
+
+ u32 tx_bd_sign;
+ u32 reserved6;
+ u32 dxe_start_time;
+ u32 dxe_end_time;
+
+ /*u32 tcp_udp_start_off:10;
+ u32 header_cks:16;
+ u32 reserved7:6;*/
+};
+
+struct wcn36xx_sta;
+struct wcn36xx;
+
+int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb);
+int wcn36xx_start_tx(struct wcn36xx *wcn,
+ struct wcn36xx_sta *sta_priv,
+ struct sk_buff *skb);
+
+#endif /* _TXRX_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
new file mode 100644
index 000000000000..58b63833e8e7
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WCN36XX_H_
+#define _WCN36XX_H_
+
+#include <linux/completion.h>
+#include <linux/printk.h>
+#include <linux/spinlock.h>
+#include <net/mac80211.h>
+
+#include "hal.h"
+#include "smd.h"
+#include "txrx.h"
+#include "dxe.h"
+#include "pmc.h"
+#include "debug.h"
+
+#define WLAN_NV_FILE "wlan/prima/WCNSS_qcom_wlan_nv.bin"
+#define WCN36XX_AGGR_BUFFER_SIZE 64
+
+extern unsigned int wcn36xx_dbg_mask;
+
+enum wcn36xx_debug_mask {
+ WCN36XX_DBG_DXE = 0x00000001,
+ WCN36XX_DBG_DXE_DUMP = 0x00000002,
+ WCN36XX_DBG_SMD = 0x00000004,
+ WCN36XX_DBG_SMD_DUMP = 0x00000008,
+ WCN36XX_DBG_RX = 0x00000010,
+ WCN36XX_DBG_RX_DUMP = 0x00000020,
+ WCN36XX_DBG_TX = 0x00000040,
+ WCN36XX_DBG_TX_DUMP = 0x00000080,
+ WCN36XX_DBG_HAL = 0x00000100,
+ WCN36XX_DBG_HAL_DUMP = 0x00000200,
+ WCN36XX_DBG_MAC = 0x00000400,
+ WCN36XX_DBG_BEACON = 0x00000800,
+ WCN36XX_DBG_BEACON_DUMP = 0x00001000,
+ WCN36XX_DBG_PMC = 0x00002000,
+ WCN36XX_DBG_PMC_DUMP = 0x00004000,
+ WCN36XX_DBG_ANY = 0xffffffff,
+};
+
+#define wcn36xx_err(fmt, arg...) \
+ printk(KERN_ERR pr_fmt("ERROR " fmt), ##arg);
+
+#define wcn36xx_warn(fmt, arg...) \
+ printk(KERN_WARNING pr_fmt("WARNING " fmt), ##arg)
+
+#define wcn36xx_info(fmt, arg...) \
+ printk(KERN_INFO pr_fmt(fmt), ##arg)
+
+#define wcn36xx_dbg(mask, fmt, arg...) do { \
+ if (wcn36xx_dbg_mask & mask) \
+ printk(KERN_DEBUG pr_fmt(fmt), ##arg); \
+} while (0)
+
+#define wcn36xx_dbg_dump(mask, prefix_str, buf, len) do { \
+ if (wcn36xx_dbg_mask & mask) \
+ print_hex_dump(KERN_DEBUG, pr_fmt(prefix_str), \
+ DUMP_PREFIX_OFFSET, 32, 1, \
+ buf, len, false); \
+} while (0)
+
+#define WCN36XX_HW_CHANNEL(__wcn) (__wcn->hw->conf.chandef.chan->hw_value)
+#define WCN36XX_BAND(__wcn) (__wcn->hw->conf.chandef.chan->band)
+#define WCN36XX_CENTER_FREQ(__wcn) (__wcn->hw->conf.chandef.chan->center_freq)
+#define WCN36XX_LISTEN_INTERVAL(__wcn) (__wcn->hw->conf.listen_interval)
+#define WCN36XX_FLAGS(__wcn) (__wcn->hw->flags)
+#define WCN36XX_MAX_POWER(__wcn) (__wcn->hw->conf.chandef.chan->max_power)
+
+static inline void buff_to_be(u32 *buf, size_t len)
+{
+ int i;
+ for (i = 0; i < len; i++)
+ buf[i] = cpu_to_be32(buf[i]);
+}
+
+struct nv_data {
+ int is_valid;
+ u8 table;
+};
+
+/* Interface for platform control path
+ *
+ * @open: hook must be called when wcn36xx wants to open control channel.
+ * @tx: sends a buffer.
+ */
+struct wcn36xx_platform_ctrl_ops {
+ int (*open)(void *drv_priv, void *rsp_cb);
+ void (*close)(void);
+ int (*tx)(char *buf, size_t len);
+ int (*get_hw_mac)(u8 *addr);
+ int (*smsm_change_state)(u32 clear_mask, u32 set_mask);
+};
+
+/**
+ * struct wcn36xx_vif - holds VIF related fields
+ *
+ * @bss_index: bss_index is initially set to 0xFF. bss_index is received from
+ * HW after first config_bss call and must be used in delete_bss and
+ * enter/exit_bmps.
+ */
+struct wcn36xx_vif {
+ struct list_head list;
+ struct wcn36xx_sta *sta;
+ u8 dtim_period;
+ enum ani_ed_type encrypt_type;
+ bool is_joining;
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* Power management */
+ enum wcn36xx_power_state pw_state;
+
+ u8 bss_index;
+ u8 ucast_dpu_signature;
+ /* Returned from WCN36XX_HAL_ADD_STA_SELF_RSP */
+ u8 self_sta_index;
+ u8 self_dpu_desc_index;
+};
+
+/**
+ * struct wcn36xx_sta - holds STA related fields
+ *
+ * @tid: traffic ID that is used during AMPDU and in TX BD.
+ * @sta_index: STA index is returned from HW after config_sta call and is
+ * used in both SMD channel and TX BD.
+ * @dpu_desc_index: DPU descriptor index is returned from HW after config_sta
+ * call and is used in TX BD.
+ * @bss_sta_index: STA index is returned from HW after config_bss call and is
+ * used in both SMD channel and TX BD. See table bellow when it is used.
+ * @bss_dpu_desc_index: DPU descriptor index is returned from HW after
+ * config_bss call and is used in TX BD.
+ * ______________________________________________
+ * | | STA | AP |
+ * |______________|_____________|_______________|
+ * | TX BD |bss_sta_index| sta_index |
+ * |______________|_____________|_______________|
+ * |all SMD calls |bss_sta_index| sta_index |
+ * |______________|_____________|_______________|
+ * |smd_delete_sta| sta_index | sta_index |
+ * |______________|_____________|_______________|
+ */
+struct wcn36xx_sta {
+ struct wcn36xx_vif *vif;
+ u16 aid;
+ u16 tid;
+ u8 sta_index;
+ u8 dpu_desc_index;
+ u8 bss_sta_index;
+ u8 bss_dpu_desc_index;
+ bool is_data_encrypted;
+ /* Rates */
+ struct wcn36xx_hal_supported_rates supported_rates;
+};
+struct wcn36xx_dxe_ch;
+struct wcn36xx {
+ struct ieee80211_hw *hw;
+ struct device *dev;
+ struct list_head vif_list;
+
+ u8 fw_revision;
+ u8 fw_version;
+ u8 fw_minor;
+ u8 fw_major;
+
+ /* extra byte for the NULL termination */
+ u8 crm_version[WCN36XX_HAL_VERSION_LENGTH + 1];
+ u8 wlan_version[WCN36XX_HAL_VERSION_LENGTH + 1];
+
+ /* IRQs */
+ int tx_irq;
+ int rx_irq;
+ void __iomem *mmio;
+
+ struct wcn36xx_platform_ctrl_ops *ctrl_ops;
+ /*
+ * smd_buf must be protected with smd_mutex to garantee
+ * that all messages are sent one after another
+ */
+ u8 *hal_buf;
+ size_t hal_rsp_len;
+ struct mutex hal_mutex;
+ struct completion hal_rsp_compl;
+ struct workqueue_struct *hal_ind_wq;
+ struct work_struct hal_ind_work;
+ struct mutex hal_ind_mutex;
+ struct list_head hal_ind_queue;
+
+ /* DXE channels */
+ struct wcn36xx_dxe_ch dxe_tx_l_ch; /* TX low */
+ struct wcn36xx_dxe_ch dxe_tx_h_ch; /* TX high */
+ struct wcn36xx_dxe_ch dxe_rx_l_ch; /* RX low */
+ struct wcn36xx_dxe_ch dxe_rx_h_ch; /* RX high */
+
+ /* For synchronization of DXE resources from BH, IRQ and WQ contexts */
+ spinlock_t dxe_lock;
+ bool queues_stopped;
+
+ /* Memory pools */
+ struct wcn36xx_dxe_mem_pool mgmt_mem_pool;
+ struct wcn36xx_dxe_mem_pool data_mem_pool;
+
+ struct sk_buff *tx_ack_skb;
+
+#ifdef CONFIG_WCN36XX_DEBUGFS
+ /* Debug file system entry */
+ struct wcn36xx_dfs_entry dfs;
+#endif /* CONFIG_WCN36XX_DEBUGFS */
+
+};
+
+static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
+ u8 major,
+ u8 minor,
+ u8 version,
+ u8 revision)
+{
+ return (wcn->fw_major == major &&
+ wcn->fw_minor == minor &&
+ wcn->fw_version == version &&
+ wcn->fw_revision == revision);
+}
+void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates);
+
+#endif /* _WCN36XX_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 61c302a6bdea..5b340769d5bb 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -316,8 +316,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
}
conn.channel = ch - 1;
- memcpy(conn.bssid, bss->bssid, 6);
- memcpy(conn.dst_mac, bss->bssid, 6);
+ memcpy(conn.bssid, bss->bssid, ETH_ALEN);
+ memcpy(conn.dst_mac, bss->bssid, ETH_ALEN);
/*
* FW don't support scan after connection attempt
*/
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index eb1dc7ad80fb..eeceab39cda2 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -197,7 +197,6 @@ static void wil_pcie_remove(struct pci_dev *pdev)
pci_iounmap(pdev, wil->csr);
pci_release_region(pdev, 0);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static DEFINE_PCI_DEVICE_TABLE(wil6210_pcie_ids) = {
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index b827d51c30a3..0d950f209dae 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -844,18 +844,18 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
if (priv->wep_is_on)
frame_ctl |= IEEE80211_FCTL_PROTECTED;
if (priv->operating_mode == IW_MODE_ADHOC) {
- skb_copy_from_linear_data(skb, &header.addr1, 6);
- memcpy(&header.addr2, dev->dev_addr, 6);
- memcpy(&header.addr3, priv->BSSID, 6);
+ skb_copy_from_linear_data(skb, &header.addr1, ETH_ALEN);
+ memcpy(&header.addr2, dev->dev_addr, ETH_ALEN);
+ memcpy(&header.addr3, priv->BSSID, ETH_ALEN);
} else {
frame_ctl |= IEEE80211_FCTL_TODS;
- memcpy(&header.addr1, priv->CurrentBSSID, 6);
- memcpy(&header.addr2, dev->dev_addr, 6);
- skb_copy_from_linear_data(skb, &header.addr3, 6);
+ memcpy(&header.addr1, priv->CurrentBSSID, ETH_ALEN);
+ memcpy(&header.addr2, dev->dev_addr, ETH_ALEN);
+ skb_copy_from_linear_data(skb, &header.addr3, ETH_ALEN);
}
if (priv->use_wpa)
- memcpy(&header.addr4, SNAP_RFC1024, 6);
+ memcpy(&header.addr4, SNAP_RFC1024, ETH_ALEN);
header.frame_control = cpu_to_le16(frame_ctl);
/* Copy the wireless header into the card */
@@ -929,11 +929,11 @@ static void fast_rx_path(struct atmel_private *priv,
}
}
- memcpy(skbp, header->addr1, 6); /* destination address */
+ memcpy(skbp, header->addr1, ETH_ALEN); /* destination address */
if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
- memcpy(&skbp[6], header->addr3, 6);
+ memcpy(&skbp[ETH_ALEN], header->addr3, ETH_ALEN);
else
- memcpy(&skbp[6], header->addr2, 6); /* source address */
+ memcpy(&skbp[ETH_ALEN], header->addr2, ETH_ALEN); /* source address */
skb->protocol = eth_type_trans(skb, priv->dev);
skb->ip_summed = CHECKSUM_NONE;
@@ -969,14 +969,14 @@ static void frag_rx_path(struct atmel_private *priv,
u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no,
u8 frag_no, int more_frags)
{
- u8 mac4[6];
- u8 source[6];
+ u8 mac4[ETH_ALEN];
+ u8 source[ETH_ALEN];
struct sk_buff *skb;
if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
- memcpy(source, header->addr3, 6);
+ memcpy(source, header->addr3, ETH_ALEN);
else
- memcpy(source, header->addr2, 6);
+ memcpy(source, header->addr2, ETH_ALEN);
rx_packet_loc += 24; /* skip header */
@@ -984,9 +984,9 @@ static void frag_rx_path(struct atmel_private *priv,
msdu_size -= 4;
if (frag_no == 0) { /* first fragment */
- atmel_copy_to_host(priv->dev, mac4, rx_packet_loc, 6);
- msdu_size -= 6;
- rx_packet_loc += 6;
+ atmel_copy_to_host(priv->dev, mac4, rx_packet_loc, ETH_ALEN);
+ msdu_size -= ETH_ALEN;
+ rx_packet_loc += ETH_ALEN;
if (priv->do_rx_crc)
crc = crc32_le(crc, mac4, 6);
@@ -994,9 +994,9 @@ static void frag_rx_path(struct atmel_private *priv,
priv->frag_seq = seq_no;
priv->frag_no = 1;
priv->frag_len = msdu_size;
- memcpy(priv->frag_source, source, 6);
- memcpy(&priv->rx_buf[6], source, 6);
- memcpy(priv->rx_buf, header->addr1, 6);
+ memcpy(priv->frag_source, source, ETH_ALEN);
+ memcpy(&priv->rx_buf[ETH_ALEN], source, ETH_ALEN);
+ memcpy(priv->rx_buf, header->addr1, ETH_ALEN);
atmel_copy_to_host(priv->dev, &priv->rx_buf[12], rx_packet_loc, msdu_size);
@@ -1006,13 +1006,13 @@ static void frag_rx_path(struct atmel_private *priv,
atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
if ((crc ^ 0xffffffff) != netcrc) {
priv->dev->stats.rx_crc_errors++;
- memset(priv->frag_source, 0xff, 6);
+ memset(priv->frag_source, 0xff, ETH_ALEN);
}
}
} else if (priv->frag_no == frag_no &&
priv->frag_seq == seq_no &&
- memcmp(priv->frag_source, source, 6) == 0) {
+ memcmp(priv->frag_source, source, ETH_ALEN) == 0) {
atmel_copy_to_host(priv->dev, &priv->rx_buf[12 + priv->frag_len],
rx_packet_loc, msdu_size);
@@ -1024,7 +1024,7 @@ static void frag_rx_path(struct atmel_private *priv,
atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
if ((crc ^ 0xffffffff) != netcrc) {
priv->dev->stats.rx_crc_errors++;
- memset(priv->frag_source, 0xff, 6);
+ memset(priv->frag_source, 0xff, ETH_ALEN);
more_frags = 1; /* don't send broken assembly */
}
}
@@ -1033,7 +1033,7 @@ static void frag_rx_path(struct atmel_private *priv,
priv->frag_no++;
if (!more_frags) { /* last one */
- memset(priv->frag_source, 0xff, 6);
+ memset(priv->frag_source, 0xff, ETH_ALEN);
if (!(skb = dev_alloc_skb(priv->frag_len + 14))) {
priv->dev->stats.rx_dropped++;
} else {
@@ -1129,7 +1129,7 @@ static void rx_done_irq(struct atmel_private *priv)
atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size);
/* we use the same buffer for frag reassembly and control packets */
- memset(priv->frag_source, 0xff, 6);
+ memset(priv->frag_source, 0xff, ETH_ALEN);
if (priv->do_rx_crc) {
/* last 4 octets is crc */
@@ -1557,7 +1557,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
priv->last_qual = jiffies;
priv->last_beacon_timestamp = 0;
memset(priv->frag_source, 0xff, sizeof(priv->frag_source));
- memset(priv->BSSID, 0, 6);
+ memset(priv->BSSID, 0, ETH_ALEN);
priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */
priv->station_was_associated = 0;
@@ -1718,7 +1718,7 @@ static int atmel_get_wap(struct net_device *dev,
char *extra)
{
struct atmel_private *priv = netdev_priv(dev);
- memcpy(awrq->sa_data, priv->CurrentBSSID, 6);
+ memcpy(awrq->sa_data, priv->CurrentBSSID, ETH_ALEN);
awrq->sa_family = ARPHRD_ETHER;
return 0;
@@ -2356,7 +2356,7 @@ static int atmel_get_scan(struct net_device *dev,
for (i = 0; i < priv->BSS_list_entries; i++) {
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, 6);
+ memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, ETH_ALEN);
current_ev = iwe_stream_add_event(info, current_ev,
extra + IW_SCAN_MAX_DATA,
&iwe, IW_EV_ADDR_LEN);
@@ -2760,7 +2760,7 @@ static void atmel_enter_state(struct atmel_private *priv, int new_state)
static void atmel_scan(struct atmel_private *priv, int specific_ssid)
{
struct {
- u8 BSSID[6];
+ u8 BSSID[ETH_ALEN];
u8 SSID[MAX_SSID_LENGTH];
u8 scan_type;
u8 channel;
@@ -2771,7 +2771,7 @@ static void atmel_scan(struct atmel_private *priv, int specific_ssid)
u8 SSID_size;
} cmd;
- memset(cmd.BSSID, 0xff, 6);
+ memset(cmd.BSSID, 0xff, ETH_ALEN);
if (priv->fast_scan) {
cmd.SSID_size = priv->SSID_size;
@@ -2816,7 +2816,7 @@ static void join(struct atmel_private *priv, int type)
cmd.SSID_size = priv->SSID_size;
memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
- memcpy(cmd.BSSID, priv->CurrentBSSID, 6);
+ memcpy(cmd.BSSID, priv->CurrentBSSID, ETH_ALEN);
cmd.channel = (priv->channel & 0x7f);
cmd.BSS_type = type;
cmd.timeout = cpu_to_le16(2000);
@@ -2837,7 +2837,7 @@ static void start(struct atmel_private *priv, int type)
cmd.SSID_size = priv->SSID_size;
memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
- memcpy(cmd.BSSID, priv->BSSID, 6);
+ memcpy(cmd.BSSID, priv->BSSID, ETH_ALEN);
cmd.BSS_type = type;
cmd.channel = (priv->channel & 0x7f);
@@ -2883,9 +2883,9 @@ static void send_authentication_request(struct atmel_private *priv, u16 system,
header.frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
header.duration_id = cpu_to_le16(0x8000);
header.seq_ctrl = 0;
- memcpy(header.addr1, priv->CurrentBSSID, 6);
- memcpy(header.addr2, priv->dev->dev_addr, 6);
- memcpy(header.addr3, priv->CurrentBSSID, 6);
+ memcpy(header.addr1, priv->CurrentBSSID, ETH_ALEN);
+ memcpy(header.addr2, priv->dev->dev_addr, ETH_ALEN);
+ memcpy(header.addr3, priv->CurrentBSSID, ETH_ALEN);
if (priv->wep_is_on && priv->CurrentAuthentTransactionSeqNum != 1)
/* no WEP for authentication frames with TrSeqNo 1 */
@@ -2916,7 +2916,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
struct ass_req_format {
__le16 capability;
__le16 listen_interval;
- u8 ap[6]; /* nothing after here directly accessible */
+ u8 ap[ETH_ALEN]; /* nothing after here directly accessible */
u8 ssid_el_id;
u8 ssid_len;
u8 ssid[MAX_SSID_LENGTH];
@@ -2930,9 +2930,9 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
header.duration_id = cpu_to_le16(0x8000);
header.seq_ctrl = 0;
- memcpy(header.addr1, priv->CurrentBSSID, 6);
- memcpy(header.addr2, priv->dev->dev_addr, 6);
- memcpy(header.addr3, priv->CurrentBSSID, 6);
+ memcpy(header.addr1, priv->CurrentBSSID, ETH_ALEN);
+ memcpy(header.addr2, priv->dev->dev_addr, ETH_ALEN);
+ memcpy(header.addr3, priv->CurrentBSSID, ETH_ALEN);
body.capability = cpu_to_le16(WLAN_CAPABILITY_ESS);
if (priv->wep_is_on)
@@ -2944,7 +2944,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
/* current AP address - only in reassoc frame */
if (is_reassoc) {
- memcpy(body.ap, priv->CurrentBSSID, 6);
+ memcpy(body.ap, priv->CurrentBSSID, ETH_ALEN);
ssid_el_p = &body.ssid_el_id;
bodysize = 18 + priv->SSID_size;
} else {
@@ -3021,7 +3021,7 @@ static void store_bss_info(struct atmel_private *priv,
int i, index;
for (index = -1, i = 0; i < priv->BSS_list_entries; i++)
- if (memcmp(bss, priv->BSSinfo[i].BSSID, 6) == 0)
+ if (memcmp(bss, priv->BSSinfo[i].BSSID, ETH_ALEN) == 0)
index = i;
/* If we process a probe and an entry from this BSS exists
@@ -3032,7 +3032,7 @@ static void store_bss_info(struct atmel_private *priv,
if (priv->BSS_list_entries == MAX_BSS_ENTRIES)
return;
index = priv->BSS_list_entries++;
- memcpy(priv->BSSinfo[index].BSSID, bss, 6);
+ memcpy(priv->BSSinfo[index].BSSID, bss, ETH_ALEN);
priv->BSSinfo[index].RSSI = rssi;
} else {
if (rssi > priv->BSSinfo[index].RSSI)
@@ -3212,7 +3212,7 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
if (subtype == IEEE80211_STYPE_REASSOC_RESP &&
status != WLAN_STATUS_ASSOC_DENIED_RATES &&
status != WLAN_STATUS_CAPS_UNSUPPORTED &&
- priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) {
+ priv->ReAssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) {
mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
priv->ReAssociationRequestRetryCnt++;
send_association_request(priv, 1);
@@ -3235,7 +3235,7 @@ static void atmel_join_bss(struct atmel_private *priv, int bss_index)
{
struct bss_info *bss = &priv->BSSinfo[bss_index];
- memcpy(priv->CurrentBSSID, bss->BSSID, 6);
+ memcpy(priv->CurrentBSSID, bss->BSSID, ETH_ALEN);
memcpy(priv->SSID, bss->SSID, priv->SSID_size = bss->SSIDsize);
/* The WPA stuff cares about the current AP address */
@@ -3767,7 +3767,7 @@ static int probe_atmel_card(struct net_device *dev)
0x00, 0x04, 0x25, 0x00, 0x00, 0x00
};
printk(KERN_ALERT "%s: *** Invalid MAC address. UPGRADE Firmware ****\n", dev->name);
- memcpy(dev->dev_addr, default_mac, 6);
+ memcpy(dev->dev_addr, default_mac, ETH_ALEN);
}
}
@@ -3819,7 +3819,7 @@ static void build_wpa_mib(struct atmel_private *priv)
struct { /* NB this is matched to the hardware, don't change. */
u8 cipher_default_key_value[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE];
- u8 receiver_address[6];
+ u8 receiver_address[ETH_ALEN];
u8 wep_is_on;
u8 default_key; /* 0..3 */
u8 group_key;
@@ -3837,7 +3837,7 @@ static void build_wpa_mib(struct atmel_private *priv)
mib.wep_is_on = priv->wep_is_on;
mib.exclude_unencrypted = priv->exclude_unencrypted;
- memcpy(mib.receiver_address, priv->CurrentBSSID, 6);
+ memcpy(mib.receiver_address, priv->CurrentBSSID, ETH_ALEN);
/* zero all the keys before adding in valid ones. */
memset(mib.cipher_default_key_value, 0, sizeof(mib.cipher_default_key_value));
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index c51d2dc489e4..1d7982afc0ad 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1065,12 +1065,9 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
/* Try to set the DMA mask. If it fails, try falling back to a
* lower mask, as we can always also support a lower one. */
while (1) {
- err = dma_set_mask(dev->dev->dma_dev, mask);
- if (!err) {
- err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
- if (!err)
- break;
- }
+ err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask);
+ if (!err)
+ break;
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_BIT_MASK(32);
fallback = true;
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 7c970d3ae358..05ee7f10cc8f 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -164,7 +164,8 @@ static void b43_nphy_rf_ctl_override_rev7(struct b43_wldev *dev, u16 field,
}
en_addr = en_addrs[override][i];
- val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
+ if (e)
+ val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
if (off) {
b43_phy_mask(dev, en_addr, ~en_mask);
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 8cb206a89083..4ae63f4ddfb2 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -278,7 +278,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
else
txhdr->phy_rate = b43_plcp_get_ratecode_cck(rate);
txhdr->mac_frame_ctl = wlhdr->frame_control;
- memcpy(txhdr->tx_receiver, wlhdr->addr1, 6);
+ memcpy(txhdr->tx_receiver, wlhdr->addr1, ETH_ALEN);
/* Calculate duration for fallback rate */
if ((rate_fb == rate) ||
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 42eb26c99e11..b2ed1795130b 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -806,12 +806,9 @@ static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
/* Try to set the DMA mask. If it fails, try falling back to a
* lower mask, as we can always also support a lower one. */
while (1) {
- err = dma_set_mask(dev->dev->dma_dev, mask);
- if (!err) {
- err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
- if (!err)
- break;
- }
+ err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask);
+ if (!err)
+ break;
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_BIT_MASK(32);
fallback = true;
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 849a28c80302..86588c9ff0f2 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -215,7 +215,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
rate_fb_ofdm = b43legacy_is_ofdm_rate(rate_fb->hw_value);
txhdr->mac_frame_ctl = wlhdr->frame_control;
- memcpy(txhdr->tx_receiver, wlhdr->addr1, 6);
+ memcpy(txhdr->tx_receiver, wlhdr->addr1, ETH_ALEN);
/* Calculate duration for fallback rate */
if ((rate_fb->hw_value == rate) ||
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index e13b1a65c65f..3e10b801eee8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -26,7 +26,6 @@
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
#include <linux/platform_data/brcmfmac-sdio.h>
#include <defs.h>
@@ -239,7 +238,9 @@ brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
func_num = SDIO_FUNC_1;
reg_size = 4;
- brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
+ ret = brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
+ if (ret)
+ goto done;
}
do {
@@ -255,6 +256,7 @@ brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
func_num, addr, data, 4);
} while (ret != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
+done:
if (ret != 0)
brcmf_err("failed with %d\n", ret);
@@ -315,8 +317,36 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
*ret = retval;
}
+static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
+ bool write, u32 addr, struct sk_buff *pkt)
+{
+ unsigned int req_sz;
+
+ brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
+ if (brcmf_pm_resume_error(sdiodev))
+ return -EIO;
+
+ /* Single skb use the standard mmc interface */
+ req_sz = pkt->len + 3;
+ req_sz &= (uint)~3;
+
+ if (write)
+ return sdio_memcpy_toio(sdiodev->func[fn], addr,
+ ((u8 *)(pkt->data)),
+ req_sz);
+ else if (fn == 1)
+ return sdio_memcpy_fromio(sdiodev->func[fn],
+ ((u8 *)(pkt->data)),
+ addr, req_sz);
+ else
+ /* function 2 read is FIFO operation */
+ return sdio_readsb(sdiodev->func[fn],
+ ((u8 *)(pkt->data)), addr,
+ req_sz);
+}
+
/**
- * brcmf_sdio_buffrw - SDIO interface function for block data access
+ * brcmf_sdio_sglist_rw - SDIO interface function for block data access
* @sdiodev: brcmfmac sdio device
* @fn: SDIO function number
* @write: direction flag
@@ -327,12 +357,13 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
* stack for block data access. It assumes that the skb passed down by the
* caller has already been padded and aligned.
*/
-static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
- bool write, u32 addr, struct sk_buff_head *pktlist)
+static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
+ bool write, u32 addr,
+ struct sk_buff_head *pktlist)
{
unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
- unsigned int max_blks, max_req_sz, orig_offset, dst_offset;
- unsigned short max_seg_sz, seg_sz;
+ unsigned int max_req_sz, orig_offset, dst_offset;
+ unsigned short max_seg_cnt, seg_sz;
unsigned char *pkt_data, *orig_data, *dst_data;
struct sk_buff *pkt_next = NULL, *local_pkt_next;
struct sk_buff_head local_list, *target_list;
@@ -341,7 +372,6 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
struct mmc_data mmc_dat;
struct sg_table st;
struct scatterlist *sgl;
- struct mmc_host *host;
int ret = 0;
if (!pktlist->qlen)
@@ -351,27 +381,6 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
if (brcmf_pm_resume_error(sdiodev))
return -EIO;
- /* Single skb use the standard mmc interface */
- if (pktlist->qlen == 1) {
- pkt_next = pktlist->next;
- req_sz = pkt_next->len + 3;
- req_sz &= (uint)~3;
-
- if (write)
- return sdio_memcpy_toio(sdiodev->func[fn], addr,
- ((u8 *)(pkt_next->data)),
- req_sz);
- else if (fn == 1)
- return sdio_memcpy_fromio(sdiodev->func[fn],
- ((u8 *)(pkt_next->data)),
- addr, req_sz);
- else
- /* function 2 read is FIFO operation */
- return sdio_readsb(sdiodev->func[fn],
- ((u8 *)(pkt_next->data)), addr,
- req_sz);
- }
-
target_list = pktlist;
/* for host with broken sg support, prepare a page aligned list */
__skb_queue_head_init(&local_list);
@@ -398,38 +407,46 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
target_list = &local_list;
}
- host = sdiodev->func[fn]->card->host;
func_blk_sz = sdiodev->func[fn]->cur_blksize;
- /* Blocks per command is limited by host count, host transfer
- * size and the maximum for IO_RW_EXTENDED of 511 blocks.
- */
- max_blks = min_t(unsigned int, host->max_blk_count, 511u);
- max_req_sz = min_t(unsigned int, host->max_req_size,
- max_blks * func_blk_sz);
- max_seg_sz = min_t(unsigned short, host->max_segs, SG_MAX_SINGLE_ALLOC);
- max_seg_sz = min_t(unsigned short, max_seg_sz, target_list->qlen);
+ max_req_sz = sdiodev->max_request_size;
+ max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
+ target_list->qlen);
seg_sz = target_list->qlen;
pkt_offset = 0;
pkt_next = target_list->next;
- if (sg_alloc_table(&st, max_seg_sz, GFP_KERNEL)) {
+ if (sg_alloc_table(&st, max_seg_cnt, GFP_KERNEL)) {
ret = -ENOMEM;
goto exit;
}
+ memset(&mmc_req, 0, sizeof(struct mmc_request));
+ memset(&mmc_cmd, 0, sizeof(struct mmc_command));
+ memset(&mmc_dat, 0, sizeof(struct mmc_data));
+
+ mmc_dat.sg = st.sgl;
+ mmc_dat.blksz = func_blk_sz;
+ mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+ mmc_cmd.opcode = SD_IO_RW_EXTENDED;
+ mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
+ mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
+ mmc_cmd.arg |= 1<<27; /* block mode */
+ /* for function 1 the addr will be incremented */
+ mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
+ mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+ mmc_req.cmd = &mmc_cmd;
+ mmc_req.data = &mmc_dat;
+
while (seg_sz) {
req_sz = 0;
sg_cnt = 0;
- memset(&mmc_req, 0, sizeof(struct mmc_request));
- memset(&mmc_cmd, 0, sizeof(struct mmc_command));
- memset(&mmc_dat, 0, sizeof(struct mmc_data));
sgl = st.sgl;
/* prep sg table */
while (pkt_next != (struct sk_buff *)target_list) {
pkt_data = pkt_next->data + pkt_offset;
sg_data_sz = pkt_next->len - pkt_offset;
- if (sg_data_sz > host->max_seg_size)
- sg_data_sz = host->max_seg_size;
+ if (sg_data_sz > sdiodev->max_segment_size)
+ sg_data_sz = sdiodev->max_segment_size;
if (sg_data_sz > max_req_sz - req_sz)
sg_data_sz = max_req_sz - req_sz;
@@ -444,7 +461,7 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
pkt_next = pkt_next->next;
}
- if (req_sz >= max_req_sz || sg_cnt >= max_seg_sz)
+ if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
break;
}
seg_sz -= sg_cnt;
@@ -455,27 +472,17 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
ret = -ENOTBLK;
goto exit;
}
- mmc_dat.sg = st.sgl;
+
mmc_dat.sg_len = sg_cnt;
- mmc_dat.blksz = func_blk_sz;
mmc_dat.blocks = req_sz / func_blk_sz;
- mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
- mmc_cmd.opcode = SD_IO_RW_EXTENDED;
- mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
- mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
- mmc_cmd.arg |= 1<<27; /* block mode */
- /* incrementing addr for function 1 */
- mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
- mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
- mmc_req.cmd = &mmc_cmd;
- mmc_req.data = &mmc_dat;
+ /* incrementing addr for function 1 */
if (fn == 1)
addr += req_sz;
mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
- mmc_wait_for_req(host, &mmc_req);
+ mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
if (ret != 0) {
@@ -546,7 +553,6 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
{
uint width;
int err = 0;
- struct sk_buff_head pkt_list;
brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
fn, addr, pkt->len);
@@ -556,19 +562,17 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
if (err)
goto done;
- skb_queue_head_init(&pkt_list);
- skb_queue_tail(&pkt_list, pkt);
- err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, &pkt_list);
- skb_dequeue_tail(&pkt_list);
+ err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pkt);
done:
return err;
}
int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff_head *pktq)
+ uint flags, struct sk_buff_head *pktq, uint totlen)
{
- uint incr_fix;
+ struct sk_buff *glom_skb;
+ struct sk_buff *skb;
uint width;
int err = 0;
@@ -580,8 +584,22 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
if (err)
goto done;
- incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
- err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq);
+ if (pktq->qlen == 1)
+ err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq->next);
+ else if (!sdiodev->sg_support) {
+ glom_skb = brcmu_pkt_buf_get_skb(totlen);
+ if (!glom_skb)
+ return -ENOMEM;
+ err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, glom_skb);
+ if (err)
+ goto done;
+
+ skb_queue_walk(pktq, skb) {
+ memcpy(skb->data, glom_skb->data, skb->len);
+ skb_pull(glom_skb, skb->len);
+ }
+ } else
+ err = brcmf_sdio_sglist_rw(sdiodev, fn, false, addr, pktq);
done:
return err;
@@ -592,7 +610,7 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, u8 *buf, uint nbytes)
{
struct sk_buff *mypkt;
- struct sk_buff_head pktq;
+ uint width;
int err;
mypkt = brcmu_pkt_buf_get_skb(nbytes);
@@ -603,10 +621,12 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
}
memcpy(mypkt->data, buf, nbytes);
- __skb_queue_head_init(&pktq);
- __skb_queue_tail(&pktq, mypkt);
- err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, &pktq);
- __skb_dequeue_tail(&pktq);
+
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+
+ if (!err)
+ err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, mypkt);
brcmu_pkt_buf_free_skb(mypkt);
return err;
@@ -617,16 +637,26 @@ int
brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, struct sk_buff_head *pktq)
{
+ struct sk_buff *skb;
uint width;
- int err = 0;
+ int err;
brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
fn, addr, pktq->qlen);
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
- brcmf_sdio_addrprep(sdiodev, width, &addr);
+ err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+ if (err)
+ return err;
- err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, pktq);
+ if (pktq->qlen == 1 || !sdiodev->sg_support)
+ skb_queue_walk(pktq, skb) {
+ err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, skb);
+ if (err)
+ break;
+ }
+ else
+ err = brcmf_sdio_sglist_rw(sdiodev, fn, true, addr, pktq);
return err;
}
@@ -639,7 +669,6 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
struct sk_buff *pkt;
u32 sdaddr;
uint dsize;
- struct sk_buff_head pkt_list;
dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
pkt = dev_alloc_skb(dsize);
@@ -648,7 +677,6 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
return -EIO;
}
pkt->priority = 0;
- skb_queue_head_init(&pkt_list);
/* Determine initial transfer parameters */
sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
@@ -676,10 +704,8 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
skb_put(pkt, dsize);
if (write)
memcpy(pkt->data, data, dsize);
- skb_queue_tail(&pkt_list, pkt);
bcmerror = brcmf_sdio_buffrw(sdiodev, SDIO_FUNC_1, write,
- sdaddr, &pkt_list);
- skb_dequeue_tail(&pkt_list);
+ sdaddr, pkt);
if (bcmerror) {
brcmf_err("membytes transfer failed\n");
break;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index c3462b75bd08..905704e335d7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -21,6 +21,7 @@
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
#include <linux/suspend.h>
#include <linux/errno.h>
#include <linux/sched.h> /* request_irq() */
@@ -34,6 +35,7 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
#include "sdio_host.h"
+#include "sdio_chip.h"
#include "dhd_dbg.h"
#include "dhd_bus.h"
@@ -41,13 +43,6 @@
#define DMA_ALIGN_MASK 0x03
-#define SDIO_DEVICE_ID_BROADCOM_43143 43143
-#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
-#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
-#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
-#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
-#define SDIO_DEVICE_ID_BROADCOM_4335 0x4335
-
#define SDIO_FUNC1_BLOCKSIZE 64
#define SDIO_FUNC2_BLOCKSIZE 512
@@ -58,7 +53,8 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4335)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
+ SDIO_DEVICE_ID_BROADCOM_4335_4339)},
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -320,6 +316,8 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
int err;
struct brcmf_sdio_dev *sdiodev;
struct brcmf_bus *bus_if;
+ struct mmc_host *host;
+ uint max_blocks;
brcmf_dbg(SDIO, "Enter\n");
brcmf_dbg(SDIO, "Class=%x\n", func->class);
@@ -366,6 +364,20 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
brcmf_err("F2 error, probe failed %d...\n", err);
goto fail;
}
+
+ /*
+ * determine host related variables after brcmf_sdio_probe()
+ * as func->cur_blksize is properly set and F2 init has been
+ * completed successfully.
+ */
+ host = func->card->host;
+ sdiodev->sg_support = host->max_segs > 1;
+ max_blocks = min_t(uint, host->max_blk_count, 511u);
+ sdiodev->max_request_size = min_t(uint, host->max_req_size,
+ max_blocks * func->cur_blksize);
+ sdiodev->max_segment_count = min_t(uint, host->max_segs,
+ SG_MAX_SINGLE_ALLOC);
+ sdiodev->max_segment_size = host->max_seg_size;
brcmf_dbg(SDIO, "F2 init completed...\n");
return 0;
@@ -466,7 +478,7 @@ static int brcmf_sdio_pd_probe(struct platform_device *pdev)
{
brcmf_dbg(SDIO, "Enter\n");
- brcmfmac_sdio_pdata = pdev->dev.platform_data;
+ brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
if (brcmfmac_sdio_pdata->power_on)
brcmfmac_sdio_pdata->power_on();
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 2eb9e642c9bf..899a2ada5b82 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -97,8 +97,6 @@
#define WLC_PHY_TYPE_LCN 8
#define WLC_PHY_TYPE_NULL 0xf
-#define BRCMF_EVENTING_MASK_LEN 16
-
#define TOE_TX_CSUM_OL 0x00000001
#define TOE_RX_CSUM_OL 0x00000002
@@ -632,29 +630,29 @@ struct brcmf_skb_reorder_data {
u8 *reorder;
};
-extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
+int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
/* Return pointer to interface name */
-extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
+char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
/* Query dongle */
-extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx,
- uint cmd, void *buf, uint len);
-extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
- void *buf, uint len);
+int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+ void *buf, uint len);
+int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+ void *buf, uint len);
/* Remove any protocol-specific data header. */
-extern int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
- struct sk_buff *rxp);
+int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
+ struct sk_buff *rxp);
-extern int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
-extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx,
- s32 ifidx, char *name, u8 *mac_addr);
-extern void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
+int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
+struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
+ char *name, u8 *mac_addr);
+void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state);
-extern u32 brcmf_get_chip_info(struct brcmf_if *ifp);
-extern void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
- bool success);
+u32 brcmf_get_chip_info(struct brcmf_if *ifp);
+void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
+ bool success);
#endif /* _BRCMF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 74156f84180c..a6eb09e5d46f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -132,35 +132,34 @@ struct pktq *brcmf_bus_gettxq(struct brcmf_bus *bus)
* interface functions from common layer
*/
-extern bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
- struct sk_buff *pkt, int prec);
+bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt,
+ int prec);
/* Receive frame for delivery to OS. Callee disposes of rxp. */
-extern void brcmf_rx_frames(struct device *dev, struct sk_buff_head *rxlist);
+void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp);
/* Indication from bus module regarding presence/insertion of dongle. */
-extern int brcmf_attach(uint bus_hdrlen, struct device *dev);
+int brcmf_attach(uint bus_hdrlen, struct device *dev);
/* Indication from bus module regarding removal/absence of dongle */
-extern void brcmf_detach(struct device *dev);
+void brcmf_detach(struct device *dev);
/* Indication from bus module that dongle should be reset */
-extern void brcmf_dev_reset(struct device *dev);
+void brcmf_dev_reset(struct device *dev);
/* Indication from bus module to change flow-control state */
-extern void brcmf_txflowblock(struct device *dev, bool state);
+void brcmf_txflowblock(struct device *dev, bool state);
/* Notify the bus has transferred the tx packet to firmware */
-extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
- bool success);
+void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
-extern int brcmf_bus_start(struct device *dev);
+int brcmf_bus_start(struct device *dev);
#ifdef CONFIG_BRCMFMAC_SDIO
-extern void brcmf_sdio_exit(void);
-extern void brcmf_sdio_init(void);
-extern void brcmf_sdio_register(void);
+void brcmf_sdio_exit(void);
+void brcmf_sdio_init(void);
+void brcmf_sdio_register(void);
#endif
#ifdef CONFIG_BRCMFMAC_USB
-extern void brcmf_usb_exit(void);
-extern void brcmf_usb_register(void);
+void brcmf_usb_exit(void);
+void brcmf_usb_register(void);
#endif
#endif /* _BRCMF_BUS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 40e7f854e10f..64e9cff241b9 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -509,9 +509,8 @@ netif_rx:
}
}
-void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
+void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
{
- struct sk_buff *skb, *pnext;
struct brcmf_if *ifp;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
@@ -519,29 +518,24 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
u8 ifidx;
int ret;
- brcmf_dbg(DATA, "Enter: %s: count=%u\n", dev_name(dev),
- skb_queue_len(skb_list));
+ brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
- skb_queue_walk_safe(skb_list, skb, pnext) {
- skb_unlink(skb, skb_list);
-
- /* process and remove protocol-specific header */
- ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
- ifp = drvr->iflist[ifidx];
-
- if (ret || !ifp || !ifp->ndev) {
- if ((ret != -ENODATA) && ifp)
- ifp->stats.rx_errors++;
- brcmu_pkt_buf_free_skb(skb);
- continue;
- }
+ /* process and remove protocol-specific header */
+ ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
+ ifp = drvr->iflist[ifidx];
- rd = (struct brcmf_skb_reorder_data *)skb->cb;
- if (rd->reorder)
- brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
- else
- brcmf_netif_rx(ifp, skb);
+ if (ret || !ifp || !ifp->ndev) {
+ if ((ret != -ENODATA) && ifp)
+ ifp->stats.rx_errors++;
+ brcmu_pkt_buf_free_skb(skb);
+ return;
}
+
+ rd = (struct brcmf_skb_reorder_data *)skb->cb;
+ if (rd->reorder)
+ brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
+ else
+ brcmf_netif_rx(ifp, skb);
}
void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
index ef9179883748..53c6e710f2cb 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
@@ -22,21 +22,21 @@
*/
/* Linkage, sets prot link and updates hdrlen in pub */
-extern int brcmf_proto_attach(struct brcmf_pub *drvr);
+int brcmf_proto_attach(struct brcmf_pub *drvr);
/* Unlink, frees allocated protocol memory (including brcmf_proto) */
-extern void brcmf_proto_detach(struct brcmf_pub *drvr);
+void brcmf_proto_detach(struct brcmf_pub *drvr);
/* Stop protocol: sync w/dongle state. */
-extern void brcmf_proto_stop(struct brcmf_pub *drvr);
+void brcmf_proto_stop(struct brcmf_pub *drvr);
/* Add any protocol-specific data header.
* Caller must reserve prot_hdrlen prepend space.
*/
-extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset,
- struct sk_buff *txp);
+void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset,
+ struct sk_buff *txp);
/* Sets dongle media info (drv_version, mac address). */
-extern int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
#endif /* _BRCMF_PROTO_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 1aa75d5951b8..b02953c4ade7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -275,11 +275,6 @@ struct rte_console {
/* Flags for SDH calls */
#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
-#define BRCMF_SDIO_FW_NAME "brcm/brcmfmac-sdio.bin"
-#define BRCMF_SDIO_NV_NAME "brcm/brcmfmac-sdio.txt"
-MODULE_FIRMWARE(BRCMF_SDIO_FW_NAME);
-MODULE_FIRMWARE(BRCMF_SDIO_NV_NAME);
-
#define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
* when idle
@@ -454,9 +449,6 @@ struct brcmf_sdio {
struct work_struct datawork;
atomic_t dpc_tskcnt;
- const struct firmware *firmware;
- u32 fw_ptr;
-
bool txoff; /* Transmit flow-controlled */
struct brcmf_sdio_count sdcnt;
bool sr_enabled; /* SaveRestore enabled */
@@ -493,6 +485,100 @@ enum brcmf_sdio_frmtype {
BRCMF_SDIO_FT_SUB,
};
+#define BCM43143_FIRMWARE_NAME "brcm/brcmfmac43143-sdio.bin"
+#define BCM43143_NVRAM_NAME "brcm/brcmfmac43143-sdio.txt"
+#define BCM43241B0_FIRMWARE_NAME "brcm/brcmfmac43241b0-sdio.bin"
+#define BCM43241B0_NVRAM_NAME "brcm/brcmfmac43241b0-sdio.txt"
+#define BCM43241B4_FIRMWARE_NAME "brcm/brcmfmac43241b4-sdio.bin"
+#define BCM43241B4_NVRAM_NAME "brcm/brcmfmac43241b4-sdio.txt"
+#define BCM4329_FIRMWARE_NAME "brcm/brcmfmac4329-sdio.bin"
+#define BCM4329_NVRAM_NAME "brcm/brcmfmac4329-sdio.txt"
+#define BCM4330_FIRMWARE_NAME "brcm/brcmfmac4330-sdio.bin"
+#define BCM4330_NVRAM_NAME "brcm/brcmfmac4330-sdio.txt"
+#define BCM4334_FIRMWARE_NAME "brcm/brcmfmac4334-sdio.bin"
+#define BCM4334_NVRAM_NAME "brcm/brcmfmac4334-sdio.txt"
+#define BCM4335_FIRMWARE_NAME "brcm/brcmfmac4335-sdio.bin"
+#define BCM4335_NVRAM_NAME "brcm/brcmfmac4335-sdio.txt"
+
+MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43241B0_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4329_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4330_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4334_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4335_NVRAM_NAME);
+
+struct brcmf_firmware_names {
+ u32 chipid;
+ u32 revmsk;
+ const char *bin;
+ const char *nv;
+};
+
+enum brcmf_firmware_type {
+ BRCMF_FIRMWARE_BIN,
+ BRCMF_FIRMWARE_NVRAM
+};
+
+#define BRCMF_FIRMWARE_NVRAM(name) \
+ name ## _FIRMWARE_NAME, name ## _NVRAM_NAME
+
+static const struct brcmf_firmware_names brcmf_fwname_data[] = {
+ { BCM43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
+ { BCM43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
+ { BCM43241_CHIP_ID, 0xFFFFFFE0, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
+ { BCM4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
+ { BCM4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
+ { BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
+ { BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) }
+};
+
+
+static const struct firmware *brcmf_sdbrcm_get_fw(struct brcmf_sdio *bus,
+ enum brcmf_firmware_type type)
+{
+ const struct firmware *fw;
+ const char *name;
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
+ if (brcmf_fwname_data[i].chipid == bus->ci->chip &&
+ brcmf_fwname_data[i].revmsk & BIT(bus->ci->chiprev)) {
+ switch (type) {
+ case BRCMF_FIRMWARE_BIN:
+ name = brcmf_fwname_data[i].bin;
+ break;
+ case BRCMF_FIRMWARE_NVRAM:
+ name = brcmf_fwname_data[i].nv;
+ break;
+ default:
+ brcmf_err("invalid firmware type (%d)\n", type);
+ return NULL;
+ }
+ goto found;
+ }
+ }
+ brcmf_err("Unknown chipid %d [%d]\n",
+ bus->ci->chip, bus->ci->chiprev);
+ return NULL;
+
+found:
+ err = request_firmware(&fw, name, &bus->sdiodev->func[2]->dev);
+ if ((err) || (!fw)) {
+ brcmf_err("fail to request firmware %s (%d)\n", name, err);
+ return NULL;
+ }
+
+ return fw;
+}
+
static void pkt_align(struct sk_buff *p, int len, int align)
{
uint datalign;
@@ -1061,6 +1147,8 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
u8 rx_seq, fc, tx_seq_max;
u32 swheader;
+ trace_brcmf_sdpcm_hdr(false, header);
+
/* hw header */
len = get_unaligned_le16(header);
checksum = get_unaligned_le16(header + sizeof(u16));
@@ -1183,6 +1271,7 @@ static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header,
SDPCM_DOFFSET_MASK;
*(((__le32 *)header) + 1) = cpu_to_le32(sw_header);
*(((__le32 *)header) + 2) = 0;
+ trace_brcmf_sdpcm_hdr(true, header);
}
static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
@@ -1303,7 +1392,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
sdio_claim_host(bus->sdiodev->func[1]);
errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, &bus->glom);
+ SDIO_FUNC_2, F2SYNC, &bus->glom, dlen);
sdio_release_host(bus->sdiodev->func[1]);
bus->sdcnt.f2rxdata++;
@@ -1406,13 +1495,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
bus->glom.qlen, pfirst, pfirst->data,
pfirst->len, pfirst->next,
pfirst->prev);
+ skb_unlink(pfirst, &bus->glom);
+ brcmf_rx_frame(bus->sdiodev->dev, pfirst);
+ bus->sdcnt.rxglompkts++;
}
- /* sent any remaining packets up */
- if (bus->glom.qlen)
- brcmf_rx_frames(bus->sdiodev->dev, &bus->glom);
bus->sdcnt.rxglomframes++;
- bus->sdcnt.rxglompkts += bus->glom.qlen;
}
return num;
}
@@ -1557,7 +1645,6 @@ static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
{
struct sk_buff *pkt; /* Packet for event or data frames */
- struct sk_buff_head pktlist; /* needed for bus interface */
u16 pad; /* Number of pad bytes to read */
uint rxleft = 0; /* Remaining number of frames allowed */
int ret; /* Return code from calls */
@@ -1759,9 +1846,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
continue;
}
- skb_queue_head_init(&pktlist);
- skb_queue_tail(&pktlist, pkt);
- brcmf_rx_frames(bus->sdiodev->dev, &pktlist);
+ brcmf_rx_frame(bus->sdiodev->dev, pkt);
}
rxcount = maxframes - rxleft;
@@ -1786,10 +1871,65 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
return;
}
+/**
+ * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
+ * bus layer usage.
+ */
/* flag marking a dummy skb added for DMA alignment requirement */
-#define DUMMY_SKB_FLAG 0x10000
+#define ALIGN_SKB_FLAG 0x8000
/* bit mask of data length chopped from the previous packet */
-#define DUMMY_SKB_CHOP_LEN_MASK 0xffff
+#define ALIGN_SKB_CHOP_LEN_MASK 0x7fff
+
+static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio_dev *sdiodev,
+ struct sk_buff_head *pktq,
+ struct sk_buff *pkt, uint chan)
+{
+ struct sk_buff *pkt_pad;
+ u16 tail_pad, tail_chop, sg_align;
+ unsigned int blksize;
+ u8 *dat_buf;
+ int ntail;
+
+ blksize = sdiodev->func[SDIO_FUNC_2]->cur_blksize;
+ sg_align = 4;
+ if (sdiodev->pdata && sdiodev->pdata->sd_sgentry_align > 4)
+ sg_align = sdiodev->pdata->sd_sgentry_align;
+ /* sg entry alignment should be a divisor of block size */
+ WARN_ON(blksize % sg_align);
+
+ /* Check tail padding */
+ pkt_pad = NULL;
+ tail_chop = pkt->len % sg_align;
+ tail_pad = sg_align - tail_chop;
+ tail_pad += blksize - (pkt->len + tail_pad) % blksize;
+ if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) {
+ pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
+ if (pkt_pad == NULL)
+ return -ENOMEM;
+ memcpy(pkt_pad->data,
+ pkt->data + pkt->len - tail_chop,
+ tail_chop);
+ *(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
+ skb_trim(pkt, pkt->len - tail_chop);
+ __skb_queue_after(pktq, pkt, pkt_pad);
+ } else {
+ ntail = pkt->data_len + tail_pad -
+ (pkt->end - pkt->tail);
+ if (skb_cloned(pkt) || ntail > 0)
+ if (pskb_expand_head(pkt, 0, ntail, GFP_ATOMIC))
+ return -ENOMEM;
+ if (skb_linearize(pkt))
+ return -ENOMEM;
+ dat_buf = (u8 *)(pkt->data);
+ __skb_put(pkt, tail_pad);
+ }
+
+ if (pkt_pad)
+ return pkt->len + tail_chop;
+ else
+ return pkt->len - tail_pad;
+}
+
/**
* brcmf_sdio_txpkt_prep - packet preparation for transmit
* @bus: brcmf_sdio structure pointer
@@ -1806,24 +1946,16 @@ static int
brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
uint chan)
{
- u16 head_pad, tail_pad, tail_chop, head_align, sg_align;
- int ntail;
- struct sk_buff *pkt_next, *pkt_new;
+ u16 head_pad, head_align;
+ struct sk_buff *pkt_next;
u8 *dat_buf;
- unsigned blksize = bus->sdiodev->func[SDIO_FUNC_2]->cur_blksize;
+ int err;
struct brcmf_sdio_hdrinfo hd_info = {0};
/* SDIO ADMA requires at least 32 bit alignment */
head_align = 4;
- sg_align = 4;
- if (bus->sdiodev->pdata) {
- head_align = bus->sdiodev->pdata->sd_head_align > 4 ?
- bus->sdiodev->pdata->sd_head_align : 4;
- sg_align = bus->sdiodev->pdata->sd_sgentry_align > 4 ?
- bus->sdiodev->pdata->sd_sgentry_align : 4;
- }
- /* sg entry alignment should be a divisor of block size */
- WARN_ON(blksize % sg_align);
+ if (bus->sdiodev->pdata && bus->sdiodev->pdata->sd_head_align > 4)
+ head_align = bus->sdiodev->pdata->sd_head_align;
pkt_next = pktq->next;
dat_buf = (u8 *)(pkt_next->data);
@@ -1842,40 +1974,20 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
}
- /* Check tail padding */
- pkt_new = NULL;
- tail_chop = pkt_next->len % sg_align;
- tail_pad = sg_align - tail_chop;
- tail_pad += blksize - (pkt_next->len + tail_pad) % blksize;
- if (skb_tailroom(pkt_next) < tail_pad && pkt_next->len > blksize) {
- pkt_new = brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
- if (pkt_new == NULL)
- return -ENOMEM;
- memcpy(pkt_new->data,
- pkt_next->data + pkt_next->len - tail_chop,
- tail_chop);
- *(u32 *)(pkt_new->cb) = DUMMY_SKB_FLAG + tail_chop;
- skb_trim(pkt_next, pkt_next->len - tail_chop);
- __skb_queue_after(pktq, pkt_next, pkt_new);
+ if (bus->sdiodev->sg_support && pktq->qlen > 1) {
+ err = brcmf_sdio_txpkt_prep_sg(bus->sdiodev, pktq,
+ pkt_next, chan);
+ if (err < 0)
+ return err;
+ hd_info.len = (u16)err;
} else {
- ntail = pkt_next->data_len + tail_pad -
- (pkt_next->end - pkt_next->tail);
- if (skb_cloned(pkt_next) || ntail > 0)
- if (pskb_expand_head(pkt_next, 0, ntail, GFP_ATOMIC))
- return -ENOMEM;
- if (skb_linearize(pkt_next))
- return -ENOMEM;
- dat_buf = (u8 *)(pkt_next->data);
- __skb_put(pkt_next, tail_pad);
+ hd_info.len = pkt_next->len;
}
- /* Now prep the header */
- if (pkt_new)
- hd_info.len = pkt_next->len + tail_chop;
- else
- hd_info.len = pkt_next->len - tail_pad;
hd_info.channel = chan;
hd_info.dat_offset = head_pad + bus->tx_hdrlen;
+
+ /* Now fill the header */
brcmf_sdio_hdpack(bus, dat_buf, &hd_info);
if (BRCMF_BYTES_ON() &&
@@ -1908,8 +2020,8 @@ brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
skb_queue_walk_safe(pktq, pkt_next, tmp) {
dummy_flags = *(u32 *)(pkt_next->cb);
- if (dummy_flags & DUMMY_SKB_FLAG) {
- chop_len = dummy_flags & DUMMY_SKB_CHOP_LEN_MASK;
+ if (dummy_flags & ALIGN_SKB_FLAG) {
+ chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
if (chop_len) {
pkt_prev = pkt_next->prev;
memcpy(pkt_prev->data + pkt_prev->len,
@@ -3037,69 +3149,43 @@ static bool brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
return true;
}
-static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
-{
- if (bus->firmware->size < bus->fw_ptr + len)
- len = bus->firmware->size - bus->fw_ptr;
-
- memcpy(buf, &bus->firmware->data[bus->fw_ptr], len);
- bus->fw_ptr += len;
- return len;
-}
-
static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
{
+ const struct firmware *fw;
+ int err;
int offset;
- uint len;
- u8 *memblock = NULL, *memptr;
- int ret;
- u8 idx;
-
- brcmf_dbg(INFO, "Enter\n");
-
- ret = request_firmware(&bus->firmware, BRCMF_SDIO_FW_NAME,
- &bus->sdiodev->func[2]->dev);
- if (ret) {
- brcmf_err("Fail to request firmware %d\n", ret);
- return ret;
- }
- bus->fw_ptr = 0;
-
- memptr = memblock = kmalloc(MEMBLOCK + BRCMF_SDALIGN, GFP_ATOMIC);
- if (memblock == NULL) {
- ret = -ENOMEM;
- goto err;
- }
- if ((u32)(unsigned long)memblock % BRCMF_SDALIGN)
- memptr += (BRCMF_SDALIGN -
- ((u32)(unsigned long)memblock % BRCMF_SDALIGN));
-
- offset = bus->ci->rambase;
-
- /* Download image */
- len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
- idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4);
- if (BRCMF_MAX_CORENUM != idx)
- memcpy(&bus->ci->rst_vec, memptr, sizeof(bus->ci->rst_vec));
- while (len) {
- ret = brcmf_sdio_ramrw(bus->sdiodev, true, offset, memptr, len);
- if (ret) {
+ int address;
+ int len;
+
+ fw = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_BIN);
+ if (fw == NULL)
+ return -ENOENT;
+
+ if (brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4) !=
+ BRCMF_MAX_CORENUM)
+ memcpy(&bus->ci->rst_vec, fw->data, sizeof(bus->ci->rst_vec));
+
+ err = 0;
+ offset = 0;
+ address = bus->ci->rambase;
+ while (offset < fw->size) {
+ len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
+ fw->size - offset;
+ err = brcmf_sdio_ramrw(bus->sdiodev, true, address,
+ (u8 *)&fw->data[offset], len);
+ if (err) {
brcmf_err("error %d on writing %d membytes at 0x%08x\n",
- ret, MEMBLOCK, offset);
- goto err;
+ err, len, address);
+ goto failure;
}
-
- offset += MEMBLOCK;
- len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
+ offset += len;
+ address += len;
}
-err:
- kfree(memblock);
-
- release_firmware(bus->firmware);
- bus->fw_ptr = 0;
+failure:
+ release_firmware(fw);
- return ret;
+ return err;
}
/*
@@ -3111,7 +3197,8 @@ err:
* by two NULs.
*/
-static int brcmf_process_nvram_vars(struct brcmf_sdio *bus)
+static int brcmf_process_nvram_vars(struct brcmf_sdio *bus,
+ const struct firmware *nv)
{
char *varbuf;
char *dp;
@@ -3120,12 +3207,12 @@ static int brcmf_process_nvram_vars(struct brcmf_sdio *bus)
int ret = 0;
uint buf_len, n, len;
- len = bus->firmware->size;
+ len = nv->size;
varbuf = vmalloc(len);
if (!varbuf)
return -ENOMEM;
- memcpy(varbuf, bus->firmware->data, len);
+ memcpy(varbuf, nv->data, len);
dp = varbuf;
findNewline = false;
@@ -3177,18 +3264,16 @@ err:
static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
{
+ const struct firmware *nv;
int ret;
- ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME,
- &bus->sdiodev->func[2]->dev);
- if (ret) {
- brcmf_err("Fail to request nvram %d\n", ret);
- return ret;
- }
+ nv = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
+ if (nv == NULL)
+ return -ENOENT;
- ret = brcmf_process_nvram_vars(bus);
+ ret = brcmf_process_nvram_vars(bus, nv);
- release_firmware(bus->firmware);
+ release_firmware(nv);
return ret;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
index e679214b3c98..14bc24dc5bae 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -102,7 +102,8 @@ struct brcmf_event;
BRCMF_ENUM_DEF(DCS_REQUEST, 73) \
BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \
- BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127)
+ BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) \
+ BRCMF_ENUM_DEF(PSTA_PRIMARY_INTF_IND, 128)
#define BRCMF_ENUM_DEF(id, val) \
BRCMF_E_##id = (val),
@@ -114,6 +115,8 @@ enum brcmf_fweh_event_code {
};
#undef BRCMF_ENUM_DEF
+#define BRCMF_EVENTING_MASK_LEN DIV_ROUND_UP(BRCMF_E_LAST, 8)
+
/* flags field values in struct brcmf_event_msg */
#define BRCMF_EVENT_MSG_LINK 0x01
#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index 82f9140f3d35..d0cd0bf95c5a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -168,6 +168,7 @@ enum brcmf_fws_skb_state {
/**
* struct brcmf_skbuff_cb - control buffer associated with skbuff.
*
+ * @bus_flags: 2 bytes reserved for bus specific parameters
* @if_flags: holds interface index and packet related flags.
* @htod: host to device packet identifier (used in PKTTAG tlv).
* @state: transmit state of the packet.
@@ -177,6 +178,7 @@ enum brcmf_fws_skb_state {
* provides 48 bytes of storage so this structure should not exceed that.
*/
struct brcmf_skbuff_cb {
+ u16 bus_flags;
u16 if_flags;
u32 htod;
enum brcmf_fws_skb_state state;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index ca72177388b9..2096a14ef1fb 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -18,6 +18,7 @@
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
#include <linux/ssb/ssb_regs.h>
#include <linux/bcma/bcma.h>
@@ -136,6 +137,8 @@ brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ if (idx == BRCMF_MAX_CORENUM)
+ return false;
regdata = brcmf_sdio_regrl(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
@@ -154,6 +157,8 @@ brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
bool ret;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ if (idx == BRCMF_MAX_CORENUM)
+ return false;
regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
NULL);
@@ -261,6 +266,8 @@ brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
u32 regdata;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ if (idx == BRCMF_MAX_CORENUM)
+ return;
/* if core is already in reset, just return */
regdata = brcmf_sdio_regrl(sdiodev,
@@ -304,6 +311,8 @@ brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ if (idx == BRCMF_MAX_CORENUM)
+ return;
/*
* Must do the disable sequence first to work for
@@ -368,6 +377,8 @@ brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
u32 regdata;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ if (idx == BRCMF_MAX_CORENUM)
+ return;
/* must disable first to work for arbitrary current core state */
brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, core_bits);
@@ -444,6 +455,9 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
NULL);
ci->chip = regdata & CID_ID_MASK;
ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
+ if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
+ ci->chiprev >= 2)
+ ci->chip = BCM4339_CHIP_ID;
ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
@@ -541,6 +555,20 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
ci->ramsize = 0xc0000;
ci->rambase = 0x180000;
break;
+ case BCM4339_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x2e084411;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18005000;
+ ci->c_inf[1].wrapbase = 0x18105000;
+ ci->c_inf[1].cib = 0x15004211;
+ ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
+ ci->c_inf[2].base = 0x18002000;
+ ci->c_inf[2].wrapbase = 0x18102000;
+ ci->c_inf[2].cib = 0x04084411;
+ ci->ramsize = 0xc0000;
+ ci->rambase = 0x180000;
+ break;
default:
brcmf_err("chipid 0x%x is not supported\n", ci->chip);
return -ENODEV;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
index 83c041f1bf4a..507c61c991fa 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
@@ -54,6 +54,14 @@
#define BRCMF_MAX_CORENUM 6
+/* SDIO device ID */
+#define SDIO_DEVICE_ID_BROADCOM_43143 43143
+#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
+#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
+#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
+#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
+#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
+
struct chip_core_info {
u16 id;
u16 rev;
@@ -215,17 +223,16 @@ struct sdpcmd_regs {
u16 PAD[0x80];
};
-extern int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
- struct chip_info **ci_ptr, u32 regs);
-extern void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
-extern void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci,
- u32 drivestrength);
-extern u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
-extern void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci);
-extern bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, char *nvram_dat,
- uint nvram_sz);
+int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info **ci_ptr, u32 regs);
+void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
+void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u32 drivestrength);
+u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
+void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci);
+bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, char *nvram_dat,
+ uint nvram_sz);
#endif /* _BRCMFMAC_SDIO_CHIP_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 2b5407f002e5..bfadcb836b6d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -178,21 +178,25 @@ struct brcmf_sdio_dev {
bool irq_en; /* irq enable flags */
spinlock_t irq_en_lock;
bool irq_wake; /* irq wake enable flags */
+ bool sg_support;
+ uint max_request_size;
+ ushort max_segment_count;
+ uint max_segment_size;
};
/* Register/deregister interrupt handler. */
-extern int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
-extern int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
/* sdio device register access interface */
-extern u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-extern u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-extern void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
- u8 data, int *ret);
-extern void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
- u32 data, int *ret);
-extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
- void *data, bool write);
+u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
+ int *ret);
+void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
+ int *ret);
+int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
+ void *data, bool write);
/* Buffer transfer to/from device (client) core via cmd53.
* fn: function number
@@ -206,22 +210,18 @@ extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
* Returns 0 or error code.
* NOTE: Async operation is not currently supported.
*/
-extern int
-brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff_head *pktq);
-extern int
-brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes);
-
-extern int
-brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff *pkt);
-extern int
-brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes);
-extern int
-brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff_head *pktq);
+int brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff_head *pktq);
+int brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, u8 *buf, uint nbytes);
+
+int brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff *pkt);
+int brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, u8 *buf, uint nbytes);
+int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff_head *pktq,
+ uint totlen);
/* Flags bits */
@@ -237,46 +237,43 @@ brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
* nbytes: number of bytes to transfer to/from buf
* Returns 0 or error code.
*/
-extern int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw,
- u32 addr, u8 *buf, uint nbytes);
-extern int brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write,
- u32 address, u8 *data, uint size);
+int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
+ u8 *buf, uint nbytes);
+int brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
+ u8 *data, uint size);
/* Issue an abort to the specified function */
-extern int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
+int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
/* platform specific/high level functions */
-extern int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
-extern int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
/* attach, return handler on success, NULL if failed.
* The handler shall be provided by all subsequent calls. No local cache
* cfghdl points to the starting address of pci device mapped memory
*/
-extern int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev);
-extern void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev);
/* read or write one byte using cmd52 */
-extern int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw,
- uint fnc, uint addr, u8 *byte);
+int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
+ uint addr, u8 *byte);
/* read or write 2/4 bytes using cmd53 */
-extern int
-brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
- uint rw, uint fnc, uint addr,
- u32 *word, uint nbyte);
+int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
+ uint addr, u32 *word, uint nbyte);
/* Watchdog timer interface for pm ops */
-extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev,
- bool enable);
+void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable);
-extern void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
-extern void brcmf_sdbrcm_disconnect(void *ptr);
-extern void brcmf_sdbrcm_isr(void *arg);
+void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdbrcm_disconnect(void *ptr);
+void brcmf_sdbrcm_isr(void *arg);
-extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
+void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
-extern void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
- wait_queue_head_t *wq);
-extern bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
+void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
+ wait_queue_head_t *wq);
+bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
#endif /* _BRCM_SDH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
index bc2917112899..3c67529b9074 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
@@ -78,13 +78,15 @@ TRACE_EVENT(brcmf_hexdump,
TP_ARGS(data, len),
TP_STRUCT__entry(
__field(unsigned long, len)
+ __field(unsigned long, addr)
__dynamic_array(u8, hdata, len)
),
TP_fast_assign(
__entry->len = len;
+ __entry->addr = (unsigned long)data;
memcpy(__get_dynamic_array(hdata), data, len);
),
- TP_printk("hexdump [length=%lu]", __entry->len)
+ TP_printk("hexdump [addr=%lx, length=%lu]", __entry->addr, __entry->len)
);
TRACE_EVENT(brcmf_bdchdr,
@@ -108,6 +110,23 @@ TRACE_EVENT(brcmf_bdchdr,
TP_printk("bdc: prio=%d siglen=%d", __entry->prio, __entry->siglen)
);
+TRACE_EVENT(brcmf_sdpcm_hdr,
+ TP_PROTO(bool tx, void *data),
+ TP_ARGS(tx, data),
+ TP_STRUCT__entry(
+ __field(u8, tx)
+ __field(u16, len)
+ __array(u8, hdr, 12)
+ ),
+ TP_fast_assign(
+ memcpy(__entry->hdr, data, 12);
+ __entry->len = __entry->hdr[0] | (__entry->hdr[1] << 8);
+ __entry->tx = tx ? 1 : 0;
+ ),
+ TP_printk("sdpcm: %s len %u, seq %d", __entry->tx ? "TX" : "RX",
+ __entry->len, __entry->hdr[4])
+);
+
#ifdef CONFIG_BRCM_TRACING
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index f4aea47e0730..422f44c63175 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -435,7 +435,6 @@ static void brcmf_usb_rx_complete(struct urb *urb)
struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
struct brcmf_usbdev_info *devinfo = req->devinfo;
struct sk_buff *skb;
- struct sk_buff_head skbq;
brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
brcmf_usb_del_fromq(devinfo, req);
@@ -450,10 +449,8 @@ static void brcmf_usb_rx_complete(struct urb *urb)
}
if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
- skb_queue_head_init(&skbq);
- skb_queue_tail(&skbq, skb);
skb_put(skb, urb->actual_length);
- brcmf_rx_frames(devinfo->dev, &skbq);
+ brcmf_rx_frame(devinfo->dev, skb);
brcmf_usb_rx_refill(devinfo, req);
} else {
brcmu_pkt_buf_free_skb(skb);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
index a8a267b5b87a..2d08c155c23b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -172,19 +172,19 @@ struct si_info {
/* AMBA Interconnect exported externs */
-extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
+u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
/* === exported functions === */
-extern struct si_pub *ai_attach(struct bcma_bus *pbus);
-extern void ai_detach(struct si_pub *sih);
-extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
-extern void ai_clkctl_init(struct si_pub *sih);
-extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
-extern bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
-extern bool ai_deviceremoved(struct si_pub *sih);
+struct si_pub *ai_attach(struct bcma_bus *pbus);
+void ai_detach(struct si_pub *sih);
+uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
+void ai_clkctl_init(struct si_pub *sih);
+u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
+bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
+bool ai_deviceremoved(struct si_pub *sih);
/* Enable Ex-PA for 4313 */
-extern void ai_epa_4313war(struct si_pub *sih);
+void ai_epa_4313war(struct si_pub *sih);
static inline u32 ai_get_cccaps(struct si_pub *sih)
{
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
index 73d01e586109..03bdcf29bd50 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
@@ -37,17 +37,17 @@ struct brcms_ampdu_session {
u16 dma_len;
};
-extern void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
- struct brcms_c_info *wlc);
-extern int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
- struct sk_buff *p);
-extern void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session);
+void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
+ struct brcms_c_info *wlc);
+int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
+ struct sk_buff *p);
+void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session);
-extern struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc);
-extern void brcms_c_ampdu_detach(struct ampdu_info *ampdu);
-extern void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
- struct sk_buff *p, struct tx_status *txs);
-extern void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc);
-extern void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu);
+struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc);
+void brcms_c_ampdu_detach(struct ampdu_info *ampdu);
+void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
+ struct sk_buff *p, struct tx_status *txs);
+void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc);
+void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu);
#endif /* _BRCM_AMPDU_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/antsel.h b/drivers/net/wireless/brcm80211/brcmsmac/antsel.h
index 97ea3881a8ec..a3d487ab1964 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/antsel.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/antsel.h
@@ -17,13 +17,11 @@
#ifndef _BRCM_ANTSEL_H_
#define _BRCM_ANTSEL_H_
-extern struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc);
-extern void brcms_c_antsel_detach(struct antsel_info *asi);
-extern void brcms_c_antsel_init(struct antsel_info *asi);
-extern void brcms_c_antsel_antcfg_get(struct antsel_info *asi, bool usedef,
- bool sel,
- u8 id, u8 fbid, u8 *antcfg,
- u8 *fbantcfg);
-extern u8 brcms_c_antsel_antsel2id(struct antsel_info *asi, u16 antsel);
+struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc);
+void brcms_c_antsel_detach(struct antsel_info *asi);
+void brcms_c_antsel_init(struct antsel_info *asi);
+void brcms_c_antsel_antcfg_get(struct antsel_info *asi, bool usedef, bool sel,
+ u8 id, u8 fbid, u8 *antcfg, u8 *fbantcfg);
+u8 brcms_c_antsel_antsel2id(struct antsel_info *asi, u16 antsel);
#endif /* _BRCM_ANTSEL_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.h b/drivers/net/wireless/brcm80211/brcmsmac/channel.h
index 006483a0abe6..39dd3a5b2979 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.h
@@ -32,20 +32,16 @@
#define BRCMS_DFS_EU (BRCMS_DFS_TPC | BRCMS_RADAR_TYPE_EU) /* Flag for DFS EU */
-extern struct brcms_cm_info *
-brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
+struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
-extern void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
+void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
-extern bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm,
- u16 chspec);
+bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, u16 chspec);
-extern void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm,
- u16 chanspec,
- struct txpwr_limits *txpwr);
-extern void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm,
- u16 chanspec,
- u8 local_constraint_qdbm);
-extern void brcms_c_regd_init(struct brcms_c_info *wlc);
+void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
+ struct txpwr_limits *txpwr);
+void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
+ u8 local_constraint_qdbm);
+void brcms_c_regd_init(struct brcms_c_info *wlc);
#endif /* _WLC_CHANNEL_H */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
index 4090032e81a2..198053dfc310 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -88,26 +88,26 @@ struct brcms_info {
};
/* misc callbacks */
-extern void brcms_init(struct brcms_info *wl);
-extern uint brcms_reset(struct brcms_info *wl);
-extern void brcms_intrson(struct brcms_info *wl);
-extern u32 brcms_intrsoff(struct brcms_info *wl);
-extern void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask);
-extern int brcms_up(struct brcms_info *wl);
-extern void brcms_down(struct brcms_info *wl);
-extern void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
- bool state, int prio);
-extern bool brcms_rfkill_set_hw_state(struct brcms_info *wl);
+void brcms_init(struct brcms_info *wl);
+uint brcms_reset(struct brcms_info *wl);
+void brcms_intrson(struct brcms_info *wl);
+u32 brcms_intrsoff(struct brcms_info *wl);
+void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask);
+int brcms_up(struct brcms_info *wl);
+void brcms_down(struct brcms_info *wl);
+void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
+ bool state, int prio);
+bool brcms_rfkill_set_hw_state(struct brcms_info *wl);
/* timer functions */
-extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
- void (*fn) (void *arg), void *arg,
- const char *name);
-extern void brcms_free_timer(struct brcms_timer *timer);
-extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
-extern bool brcms_del_timer(struct brcms_timer *timer);
-extern void brcms_dpc(unsigned long data);
-extern void brcms_timer(struct brcms_timer *t);
-extern void brcms_fatal_error(struct brcms_info *wl);
+struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
+ void (*fn) (void *arg), void *arg,
+ const char *name);
+void brcms_free_timer(struct brcms_timer *timer);
+void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
+bool brcms_del_timer(struct brcms_timer *timer);
+void brcms_dpc(unsigned long data);
+void brcms_timer(struct brcms_timer *t);
+void brcms_fatal_error(struct brcms_info *wl);
#endif /* _BRCM_MAC80211_IF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 4608e0eb1493..8138f1cff4e5 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -1906,14 +1906,14 @@ static void brcms_c_get_macaddr(struct brcms_hardware *wlc_hw, u8 etheraddr[ETH_
/* If macaddr exists, use it (Sromrev4, CIS, ...). */
if (!is_zero_ether_addr(sprom->il0mac)) {
- memcpy(etheraddr, sprom->il0mac, 6);
+ memcpy(etheraddr, sprom->il0mac, ETH_ALEN);
return;
}
if (wlc_hw->_nbands > 1)
- memcpy(etheraddr, sprom->et1mac, 6);
+ memcpy(etheraddr, sprom->et1mac, ETH_ALEN);
else
- memcpy(etheraddr, sprom->il0mac, 6);
+ memcpy(etheraddr, sprom->il0mac, ETH_ALEN);
}
/* power both the pll and external oscillator on/off */
@@ -5695,7 +5695,7 @@ static bool brcms_c_chipmatch_pci(struct bcma_device *core)
return true;
if ((device == BCM43224_D11N_ID) || (device == BCM43225_D11N2G_ID))
return true;
- if (device == BCM4313_D11N2G_ID)
+ if (device == BCM4313_D11N2G_ID || device == BCM4313_CHIP_ID)
return true;
if ((device == BCM43236_D11N_ID) || (device == BCM43236_D11N2G_ID))
return true;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.h b/drivers/net/wireless/brcm80211/brcmsmac/main.h
index b5d7a38b53fe..c4d135cff04a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.h
@@ -616,66 +616,54 @@ struct brcms_bss_cfg {
struct brcms_bss_info *current_bss;
};
-extern int brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo,
- struct sk_buff *p);
-extern int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
- uint *blocks);
-
-extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
-extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
-extern u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec,
- uint mac_len);
-extern u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc,
- u32 rspec,
- bool use_rspec, u16 mimo_ctlchbw);
-extern u16 brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
- u32 rts_rate,
- u32 frame_rate,
- u8 rts_preamble_type,
- u8 frame_preamble_type, uint frame_len,
- bool ba);
-extern void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
- struct ieee80211_sta *sta,
- void (*dma_callback_fn));
-extern void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend);
-extern int brcms_c_set_nmode(struct brcms_c_info *wlc);
-extern void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc,
- u32 bcn_rate);
-extern void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw,
- u8 antsel_type);
-extern void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw,
- u16 chanspec,
- bool mute, struct txpwr_limits *txpwr);
-extern void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset,
- u16 v);
-extern u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset);
-extern void brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask,
- u16 val, int bands);
-extern void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags);
-extern void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val);
-extern void brcms_b_phy_reset(struct brcms_hardware *wlc_hw);
-extern void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw);
-extern void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw);
-extern void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
- u32 override_bit);
-extern void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
- u32 override_bit);
-extern void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw,
- int offset, int len, void *buf);
-extern u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate);
-extern void brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw,
- uint offset, const void *buf, int len,
- u32 sel);
-extern void brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset,
- void *buf, int len, u32 sel);
-extern void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode);
-extern u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw);
-extern void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk);
-extern void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk);
-extern void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on);
-extern void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant);
-extern void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw,
- u8 stf_mode);
-extern void brcms_c_init_scb(struct scb *scb);
+int brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p);
+int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
+ uint *blocks);
+
+int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
+void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
+u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec, uint mac_len);
+u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc, u32 rspec,
+ bool use_rspec, u16 mimo_ctlchbw);
+u16 brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
+ u32 rts_rate, u32 frame_rate,
+ u8 rts_preamble_type, u8 frame_preamble_type,
+ uint frame_len, bool ba);
+void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
+ struct ieee80211_sta *sta, void (*dma_callback_fn));
+void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend);
+int brcms_c_set_nmode(struct brcms_c_info *wlc);
+void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc, u32 bcn_rate);
+void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw, u8 antsel_type);
+void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
+ bool mute, struct txpwr_limits *txpwr);
+void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset, u16 v);
+u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset);
+void brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask, u16 val,
+ int bands);
+void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags);
+void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val);
+void brcms_b_phy_reset(struct brcms_hardware *wlc_hw);
+void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw);
+void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw);
+void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
+ u32 override_bit);
+void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
+ u32 override_bit);
+void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset,
+ int len, void *buf);
+u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate);
+void brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw, uint offset,
+ const void *buf, int len, u32 sel);
+void brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset,
+ void *buf, int len, u32 sel);
+void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode);
+u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw);
+void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk);
+void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk);
+void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on);
+void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant);
+void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw, u8 stf_mode);
+void brcms_c_init_scb(struct scb *scb);
#endif /* _BRCM_MAIN_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
index e34a71e7d242..4d3734f48d9c 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
@@ -179,121 +179,106 @@ struct shared_phy_params {
};
-extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
-extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
- struct bcma_device *d11core,
- int bandtype, struct wiphy *wiphy);
-extern void wlc_phy_detach(struct brcms_phy_pub *ppi);
-
-extern bool wlc_phy_get_phyversion(struct brcms_phy_pub *pih, u16 *phytype,
- u16 *phyrev, u16 *radioid,
- u16 *radiover);
-extern bool wlc_phy_get_encore(struct brcms_phy_pub *pih);
-extern u32 wlc_phy_get_coreflags(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_hw_clk_state_upd(struct brcms_phy_pub *ppi, bool newstate);
-extern void wlc_phy_hw_state_upd(struct brcms_phy_pub *ppi, bool newstate);
-extern void wlc_phy_init(struct brcms_phy_pub *ppi, u16 chanspec);
-extern void wlc_phy_watchdog(struct brcms_phy_pub *ppi);
-extern int wlc_phy_down(struct brcms_phy_pub *ppi);
-extern u32 wlc_phy_clk_bwbits(struct brcms_phy_pub *pih);
-extern void wlc_phy_cal_init(struct brcms_phy_pub *ppi);
-extern void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init);
-
-extern void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi,
- u16 chanspec);
-extern u16 wlc_phy_chanspec_get(struct brcms_phy_pub *ppi);
-extern void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi,
- u16 newch);
-extern u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi);
-extern void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw);
-
-extern int wlc_phy_rssi_compute(struct brcms_phy_pub *pih,
- struct d11rxhdr *rxh);
-extern void wlc_phy_por_inform(struct brcms_phy_pub *ppi);
-extern void wlc_phy_noise_sample_intr(struct brcms_phy_pub *ppi);
-extern bool wlc_phy_bist_check_phy(struct brcms_phy_pub *ppi);
-
-extern void wlc_phy_set_deaf(struct brcms_phy_pub *ppi, bool user_flag);
-
-extern void wlc_phy_switch_radio(struct brcms_phy_pub *ppi, bool on);
-extern void wlc_phy_anacore(struct brcms_phy_pub *ppi, bool on);
-
-
-extern void wlc_phy_BSSinit(struct brcms_phy_pub *ppi, bool bonlyap, int rssi);
-
-extern void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi,
- bool wide_filter);
-extern void wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
- struct brcms_chanvec *channels);
-extern u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi,
- uint band);
-
-extern void wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint chan,
- u8 *_min_, u8 *_max_, int rate);
-extern void wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi,
- uint chan, u8 *_max_, u8 *_min_);
-extern void wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi,
- uint band, s32 *, s32 *, u32 *);
-extern void wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi,
- struct txpwr_limits *,
- u16 chanspec);
-extern int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm,
- bool *override);
-extern int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm,
- bool override);
-extern void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
- struct txpwr_limits *);
-extern bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi);
-extern void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi,
- bool hwpwrctrl);
-extern u8 wlc_phy_txpower_get_target_min(struct brcms_phy_pub *ppi);
-extern u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi);
-extern bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain,
- u8 rxchain);
-extern void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain,
- u8 rxchain);
-extern void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain,
- u8 *rxchain);
-extern u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih);
-extern s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih,
- u16 chanspec);
-extern void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool val);
-
-extern void wlc_phy_cal_perical(struct brcms_phy_pub *ppi, u8 reason);
-extern void wlc_phy_noise_sample_request_external(struct brcms_phy_pub *ppi);
-extern void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock);
-extern void wlc_phy_cal_papd_recal(struct brcms_phy_pub *ppi);
-
-extern void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val);
-extern void wlc_phy_clear_tssi(struct brcms_phy_pub *ppi);
-extern void wlc_phy_hold_upd(struct brcms_phy_pub *ppi, u32 id, bool val);
-extern void wlc_phy_mute_upd(struct brcms_phy_pub *ppi, bool val, u32 flags);
-
-extern void wlc_phy_antsel_type_set(struct brcms_phy_pub *ppi, u8 antsel_type);
-
-extern void wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi,
- struct tx_power *power, uint channel);
-
-extern void wlc_phy_initcal_enable(struct brcms_phy_pub *pih, bool initcal);
-extern bool wlc_phy_test_ison(struct brcms_phy_pub *ppi);
-extern void wlc_phy_txpwr_percent_set(struct brcms_phy_pub *ppi,
- u8 txpwr_percent);
-extern void wlc_phy_ofdm_rateset_war(struct brcms_phy_pub *pih, bool war);
-extern void wlc_phy_bf_preempt_enable(struct brcms_phy_pub *pih,
- bool bf_preempt);
-extern void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap);
-
-extern void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end);
-
-extern void wlc_phy_freqtrack_start(struct brcms_phy_pub *ppi);
-extern void wlc_phy_freqtrack_end(struct brcms_phy_pub *ppi);
-
-extern const u8 *wlc_phy_get_ofdm_rate_lookup(void);
-
-extern s8 wlc_phy_get_tx_power_offset_by_mcs(struct brcms_phy_pub *ppi,
- u8 mcs_offset);
-extern s8 wlc_phy_get_tx_power_offset(struct brcms_phy_pub *ppi, u8 tbl_offset);
+struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
+struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
+ struct bcma_device *d11core, int bandtype,
+ struct wiphy *wiphy);
+void wlc_phy_detach(struct brcms_phy_pub *ppi);
+
+bool wlc_phy_get_phyversion(struct brcms_phy_pub *pih, u16 *phytype,
+ u16 *phyrev, u16 *radioid, u16 *radiover);
+bool wlc_phy_get_encore(struct brcms_phy_pub *pih);
+u32 wlc_phy_get_coreflags(struct brcms_phy_pub *pih);
+
+void wlc_phy_hw_clk_state_upd(struct brcms_phy_pub *ppi, bool newstate);
+void wlc_phy_hw_state_upd(struct brcms_phy_pub *ppi, bool newstate);
+void wlc_phy_init(struct brcms_phy_pub *ppi, u16 chanspec);
+void wlc_phy_watchdog(struct brcms_phy_pub *ppi);
+int wlc_phy_down(struct brcms_phy_pub *ppi);
+u32 wlc_phy_clk_bwbits(struct brcms_phy_pub *pih);
+void wlc_phy_cal_init(struct brcms_phy_pub *ppi);
+void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init);
+
+void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi, u16 chanspec);
+u16 wlc_phy_chanspec_get(struct brcms_phy_pub *ppi);
+void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi, u16 newch);
+u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi);
+void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw);
+
+int wlc_phy_rssi_compute(struct brcms_phy_pub *pih, struct d11rxhdr *rxh);
+void wlc_phy_por_inform(struct brcms_phy_pub *ppi);
+void wlc_phy_noise_sample_intr(struct brcms_phy_pub *ppi);
+bool wlc_phy_bist_check_phy(struct brcms_phy_pub *ppi);
+
+void wlc_phy_set_deaf(struct brcms_phy_pub *ppi, bool user_flag);
+
+void wlc_phy_switch_radio(struct brcms_phy_pub *ppi, bool on);
+void wlc_phy_anacore(struct brcms_phy_pub *ppi, bool on);
+
+
+void wlc_phy_BSSinit(struct brcms_phy_pub *ppi, bool bonlyap, int rssi);
+
+void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi,
+ bool wide_filter);
+void wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
+ struct brcms_chanvec *channels);
+u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi, uint band);
+
+void wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint chan, u8 *_min_,
+ u8 *_max_, int rate);
+void wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi, uint chan,
+ u8 *_max_, u8 *_min_);
+void wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi, uint band,
+ s32 *, s32 *, u32 *);
+void wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi, struct txpwr_limits *,
+ u16 chanspec);
+int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm, bool *override);
+int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override);
+void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
+ struct txpwr_limits *);
+bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi);
+void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, bool hwpwrctrl);
+u8 wlc_phy_txpower_get_target_min(struct brcms_phy_pub *ppi);
+u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi);
+bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *pih);
+
+void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain);
+void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain);
+void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain, u8 *rxchain);
+u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih);
+s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih, u16 chanspec);
+void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool val);
+
+void wlc_phy_cal_perical(struct brcms_phy_pub *ppi, u8 reason);
+void wlc_phy_noise_sample_request_external(struct brcms_phy_pub *ppi);
+void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock);
+void wlc_phy_cal_papd_recal(struct brcms_phy_pub *ppi);
+
+void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val);
+void wlc_phy_clear_tssi(struct brcms_phy_pub *ppi);
+void wlc_phy_hold_upd(struct brcms_phy_pub *ppi, u32 id, bool val);
+void wlc_phy_mute_upd(struct brcms_phy_pub *ppi, bool val, u32 flags);
+
+void wlc_phy_antsel_type_set(struct brcms_phy_pub *ppi, u8 antsel_type);
+
+void wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi,
+ struct tx_power *power, uint channel);
+
+void wlc_phy_initcal_enable(struct brcms_phy_pub *pih, bool initcal);
+bool wlc_phy_test_ison(struct brcms_phy_pub *ppi);
+void wlc_phy_txpwr_percent_set(struct brcms_phy_pub *ppi, u8 txpwr_percent);
+void wlc_phy_ofdm_rateset_war(struct brcms_phy_pub *pih, bool war);
+void wlc_phy_bf_preempt_enable(struct brcms_phy_pub *pih, bool bf_preempt);
+void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap);
+
+void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end);
+
+void wlc_phy_freqtrack_start(struct brcms_phy_pub *ppi);
+void wlc_phy_freqtrack_end(struct brcms_phy_pub *ppi);
+
+const u8 *wlc_phy_get_ofdm_rate_lookup(void);
+
+s8 wlc_phy_get_tx_power_offset_by_mcs(struct brcms_phy_pub *ppi,
+ u8 mcs_offset);
+s8 wlc_phy_get_tx_power_offset(struct brcms_phy_pub *ppi, u8 tbl_offset);
#endif /* _BRCM_PHY_HAL_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
index 1dc767c31653..4960f7d26804 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
@@ -910,113 +910,103 @@ struct lcnphy_radio_regs {
u8 do_init_g;
};
-extern u16 read_phy_reg(struct brcms_phy *pi, u16 addr);
-extern void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
-
-extern u16 read_radio_reg(struct brcms_phy *pi, u16 addr);
-extern void or_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void and_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask,
- u16 val);
-extern void xor_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask);
-
-extern void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
-
-extern void wlc_phyreg_enter(struct brcms_phy_pub *pih);
-extern void wlc_phyreg_exit(struct brcms_phy_pub *pih);
-extern void wlc_radioreg_enter(struct brcms_phy_pub *pih);
-extern void wlc_radioreg_exit(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_read_table(struct brcms_phy *pi,
- const struct phytbl_info *ptbl_info,
- u16 tblAddr, u16 tblDataHi,
- u16 tblDatalo);
-extern void wlc_phy_write_table(struct brcms_phy *pi,
- const struct phytbl_info *ptbl_info,
- u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
-extern void wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id,
- uint tbl_offset, u16 tblAddr, u16 tblDataHi,
- u16 tblDataLo);
-extern void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val);
-
-extern void write_phy_channel_reg(struct brcms_phy *pi, uint val);
-extern void wlc_phy_txpower_update_shm(struct brcms_phy *pi);
-
-extern u8 wlc_phy_nbits(s32 value);
-extern void wlc_phy_compute_dB(u32 *cmplx_pwr, s8 *p_dB, u8 core);
-
-extern uint wlc_phy_init_radio_regs_allbands(struct brcms_phy *pi,
- struct radio_20xx_regs *radioregs);
-extern uint wlc_phy_init_radio_regs(struct brcms_phy *pi,
- const struct radio_regs *radioregs,
- u16 core_offset);
-
-extern void wlc_phy_txpower_ipa_upd(struct brcms_phy *pi);
-
-extern void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on);
-extern void wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real,
- s32 *eps_imag);
-
-extern void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi);
-extern void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi);
-
-extern bool wlc_phy_attach_nphy(struct brcms_phy *pi);
-extern bool wlc_phy_attach_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_detach_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_init_nphy(struct brcms_phy *pi);
-extern void wlc_phy_init_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_cal_init_nphy(struct brcms_phy *pi);
-extern void wlc_phy_cal_init_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi,
- u16 chanspec);
-extern void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi,
- u16 chanspec);
-extern void wlc_phy_chanspec_set_fixup_lcnphy(struct brcms_phy *pi,
- u16 chanspec);
-extern int wlc_phy_channel2freq(uint channel);
-extern int wlc_phy_chanspec_freq2bandrange_lpssn(uint);
-extern int wlc_phy_chanspec_bandrange_get(struct brcms_phy *, u16 chanspec);
-
-extern void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode);
-extern s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi);
-
-extern void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_txpower_recalc_target(struct brcms_phy *pi);
-extern void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_lcnphy_set_tx_pwr_by_index(struct brcms_phy *pi, int index);
-extern void wlc_lcnphy_tx_pu(struct brcms_phy *pi, bool bEnable);
-extern void wlc_lcnphy_stop_tx_tone(struct brcms_phy *pi);
-extern void wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz,
- u16 max_val, bool iqcalmode);
-
-extern void wlc_phy_txpower_sromlimit_get_nphy(struct brcms_phy *pi, uint chan,
- u8 *max_pwr, u8 rate_id);
-extern void wlc_phy_ofdm_to_mcs_powers_nphy(u8 *power, u8 rate_mcs_start,
- u8 rate_mcs_end,
- u8 rate_ofdm_start);
-extern void wlc_phy_mcs_to_ofdm_powers_nphy(u8 *power,
- u8 rate_ofdm_start,
- u8 rate_ofdm_end,
- u8 rate_mcs_start);
-
-extern u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode);
-extern s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode);
-extern s8 wlc_lcnphy_tempsense_degree(struct brcms_phy *pi, bool mode);
-extern s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode);
-extern void wlc_phy_carrier_suppress_lcnphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_crsuprs(struct brcms_phy *pi, int channel);
-extern void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode);
-extern void wlc_2064_vco_cal(struct brcms_phy *pi);
-
-extern void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
+u16 read_phy_reg(struct brcms_phy *pi, u16 addr);
+void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
+
+u16 read_radio_reg(struct brcms_phy *pi, u16 addr);
+void or_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void and_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
+void xor_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask);
+
+void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+
+void wlc_phyreg_enter(struct brcms_phy_pub *pih);
+void wlc_phyreg_exit(struct brcms_phy_pub *pih);
+void wlc_radioreg_enter(struct brcms_phy_pub *pih);
+void wlc_radioreg_exit(struct brcms_phy_pub *pih);
+
+void wlc_phy_read_table(struct brcms_phy *pi,
+ const struct phytbl_info *ptbl_info,
+ u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
+void wlc_phy_write_table(struct brcms_phy *pi,
+ const struct phytbl_info *ptbl_info,
+ u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
+void wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id, uint tbl_offset,
+ u16 tblAddr, u16 tblDataHi, u16 tblDataLo);
+void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val);
+
+void write_phy_channel_reg(struct brcms_phy *pi, uint val);
+void wlc_phy_txpower_update_shm(struct brcms_phy *pi);
+
+u8 wlc_phy_nbits(s32 value);
+void wlc_phy_compute_dB(u32 *cmplx_pwr, s8 *p_dB, u8 core);
+
+uint wlc_phy_init_radio_regs_allbands(struct brcms_phy *pi,
+ struct radio_20xx_regs *radioregs);
+uint wlc_phy_init_radio_regs(struct brcms_phy *pi,
+ const struct radio_regs *radioregs,
+ u16 core_offset);
+
+void wlc_phy_txpower_ipa_upd(struct brcms_phy *pi);
+
+void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on);
+void wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real, s32 *eps_imag);
+
+void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi);
+void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi);
+
+bool wlc_phy_attach_nphy(struct brcms_phy *pi);
+bool wlc_phy_attach_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_detach_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_init_nphy(struct brcms_phy *pi);
+void wlc_phy_init_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_cal_init_nphy(struct brcms_phy *pi);
+void wlc_phy_cal_init_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi, u16 chanspec);
+void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec);
+void wlc_phy_chanspec_set_fixup_lcnphy(struct brcms_phy *pi, u16 chanspec);
+int wlc_phy_channel2freq(uint channel);
+int wlc_phy_chanspec_freq2bandrange_lpssn(uint);
+int wlc_phy_chanspec_bandrange_get(struct brcms_phy *, u16 chanspec);
+
+void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode);
+s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi);
+
+void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi);
+void wlc_lcnphy_txpower_recalc_target(struct brcms_phy *pi);
+void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi);
+
+void wlc_lcnphy_set_tx_pwr_by_index(struct brcms_phy *pi, int index);
+void wlc_lcnphy_tx_pu(struct brcms_phy *pi, bool bEnable);
+void wlc_lcnphy_stop_tx_tone(struct brcms_phy *pi);
+void wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz, u16 max_val,
+ bool iqcalmode);
+
+void wlc_phy_txpower_sromlimit_get_nphy(struct brcms_phy *pi, uint chan,
+ u8 *max_pwr, u8 rate_id);
+void wlc_phy_ofdm_to_mcs_powers_nphy(u8 *power, u8 rate_mcs_start,
+ u8 rate_mcs_end, u8 rate_ofdm_start);
+void wlc_phy_mcs_to_ofdm_powers_nphy(u8 *power, u8 rate_ofdm_start,
+ u8 rate_ofdm_end, u8 rate_mcs_start);
+
+u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode);
+s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode);
+s8 wlc_lcnphy_tempsense_degree(struct brcms_phy *pi, bool mode);
+s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode);
+void wlc_phy_carrier_suppress_lcnphy(struct brcms_phy *pi);
+void wlc_lcnphy_crsuprs(struct brcms_phy *pi, int channel);
+void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode);
+void wlc_2064_vco_cal(struct brcms_phy *pi);
+
+void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
#define LCNPHY_TBL_ID_PAPDCOMPDELTATBL 0x18
#define LCNPHY_TX_POWER_TABLE_SIZE 128
@@ -1030,26 +1020,24 @@ extern void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
#define LCNPHY_TX_PWR_CTRL_TEMPBASED 0xE001
-extern void wlc_lcnphy_write_table(struct brcms_phy *pi,
- const struct phytbl_info *pti);
-extern void wlc_lcnphy_read_table(struct brcms_phy *pi,
- struct phytbl_info *pti);
-extern void wlc_lcnphy_set_tx_iqcc(struct brcms_phy *pi, u16 a, u16 b);
-extern void wlc_lcnphy_set_tx_locc(struct brcms_phy *pi, u16 didq);
-extern void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b);
-extern u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi);
-extern void wlc_lcnphy_get_radio_loft(struct brcms_phy *pi, u8 *ei0,
- u8 *eq0, u8 *fi0, u8 *fq0);
-extern void wlc_lcnphy_calib_modes(struct brcms_phy *pi, uint mode);
-extern void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode);
-extern bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_tx_pwr_update_npt(struct brcms_phy *pi);
-extern s32 wlc_lcnphy_tssi2dbm(s32 tssi, s32 a1, s32 b0, s32 b1);
-extern void wlc_lcnphy_get_tssi(struct brcms_phy *pi, s8 *ofdm_pwr,
- s8 *cck_pwr);
-extern void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi);
-
-extern s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index);
+void wlc_lcnphy_write_table(struct brcms_phy *pi,
+ const struct phytbl_info *pti);
+void wlc_lcnphy_read_table(struct brcms_phy *pi, struct phytbl_info *pti);
+void wlc_lcnphy_set_tx_iqcc(struct brcms_phy *pi, u16 a, u16 b);
+void wlc_lcnphy_set_tx_locc(struct brcms_phy *pi, u16 didq);
+void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b);
+u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi);
+void wlc_lcnphy_get_radio_loft(struct brcms_phy *pi, u8 *ei0, u8 *eq0, u8 *fi0,
+ u8 *fq0);
+void wlc_lcnphy_calib_modes(struct brcms_phy *pi, uint mode);
+void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode);
+bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi);
+void wlc_lcnphy_tx_pwr_update_npt(struct brcms_phy *pi);
+s32 wlc_lcnphy_tssi2dbm(s32 tssi, s32 a1, s32 b0, s32 b1);
+void wlc_lcnphy_get_tssi(struct brcms_phy *pi, s8 *ofdm_pwr, s8 *cck_pwr);
+void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi);
+
+s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index);
#define NPHY_MAX_HPVGA1_INDEX 10
#define NPHY_DEF_HPVGA1_INDEXLIMIT 7
@@ -1060,9 +1048,8 @@ struct phy_iq_est {
u32 q_pwr;
};
-extern void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi,
- bool enable);
-extern void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
+void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi, bool enable);
+void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
#define wlc_phy_write_table_nphy(pi, pti) \
wlc_phy_write_table(pi, pti, 0x72, 0x74, 0x73)
@@ -1076,10 +1063,10 @@ extern void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
#define wlc_nphy_table_data_write(pi, w, v) \
wlc_phy_table_data_write((pi), (w), (v))
-extern void wlc_phy_table_read_nphy(struct brcms_phy *pi, u32, u32 l, u32 o,
- u32 w, void *d);
-extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
- u32, const void *);
+void wlc_phy_table_read_nphy(struct brcms_phy *pi, u32, u32 l, u32 o, u32 w,
+ void *d);
+void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32, u32,
+ const void *);
#define PHY_IPA(pi) \
((pi->ipa2g_on && CHSPEC_IS2G(pi->radio_chanspec)) || \
@@ -1089,73 +1076,67 @@ extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
if (NREV_LT((pi)->pubpi.phy_rev, 3)) \
(void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol))
-extern void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
-extern void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
-extern void wlc_phy_pa_override_nphy(struct brcms_phy *pi, bool en);
-
-extern u8 wlc_phy_get_chan_freq_range_nphy(struct brcms_phy *pi, uint chan);
-extern void wlc_phy_switch_radio_nphy(struct brcms_phy *pi, bool on);
-
-extern void wlc_phy_stf_chain_upd_nphy(struct brcms_phy *pi);
-
-extern void wlc_phy_force_rfseq_nphy(struct brcms_phy *pi, u8 cmd);
-extern s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi);
-
-extern u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val);
-
-extern void wlc_phy_rx_iq_est_nphy(struct brcms_phy *pi, struct phy_iq_est *est,
- u16 num_samps, u8 wait_time,
- u8 wait_for_crs);
-
-extern void wlc_phy_rx_iq_coeffs_nphy(struct brcms_phy *pi, u8 write,
- struct nphy_iq_comp *comp);
-extern void wlc_phy_aci_and_noise_reduction_nphy(struct brcms_phy *pi);
-
-extern void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih,
- u8 rxcore_bitmask);
-extern u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type);
-extern void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi);
-extern void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi);
-extern void wlc_phy_txpwr_papd_cal_nphy(struct brcms_phy *pi);
-extern u16 wlc_phy_txpwr_idx_get_nphy(struct brcms_phy *pi);
-
-extern struct nphy_txgains wlc_phy_get_tx_gain_nphy(struct brcms_phy *pi);
-extern int wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi,
- struct nphy_txgains target_gain,
- bool full, bool m);
-extern int wlc_phy_cal_rxiq_nphy(struct brcms_phy *pi,
- struct nphy_txgains target_gain,
- u8 type, bool d);
-extern void wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask,
- s8 txpwrindex, bool res);
-extern void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core, u8 rssi_type);
-extern int wlc_phy_poll_rssi_nphy(struct brcms_phy *pi, u8 rssi_type,
- s32 *rssi_buf, u8 nsamps);
-extern void wlc_phy_rssi_cal_nphy(struct brcms_phy *pi);
-extern int wlc_phy_aci_scan_nphy(struct brcms_phy *pi);
-extern void wlc_phy_cal_txgainctrl_nphy(struct brcms_phy *pi,
- s32 dBm_targetpower, bool debug);
-extern int wlc_phy_tx_tone_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val,
- u8 mode, u8, bool);
-extern void wlc_phy_stopplayback_nphy(struct brcms_phy *pi);
-extern void wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf,
- u8 num_samps);
-extern void wlc_phy_radio205x_vcocal_nphy(struct brcms_phy *pi);
-
-extern int wlc_phy_rssi_compute_nphy(struct brcms_phy *pi,
- struct d11rxhdr *rxh);
+void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
+void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
+void wlc_phy_pa_override_nphy(struct brcms_phy *pi, bool en);
+
+u8 wlc_phy_get_chan_freq_range_nphy(struct brcms_phy *pi, uint chan);
+void wlc_phy_switch_radio_nphy(struct brcms_phy *pi, bool on);
+
+void wlc_phy_stf_chain_upd_nphy(struct brcms_phy *pi);
+
+void wlc_phy_force_rfseq_nphy(struct brcms_phy *pi, u8 cmd);
+s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi);
+
+u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val);
+
+void wlc_phy_rx_iq_est_nphy(struct brcms_phy *pi, struct phy_iq_est *est,
+ u16 num_samps, u8 wait_time, u8 wait_for_crs);
+
+void wlc_phy_rx_iq_coeffs_nphy(struct brcms_phy *pi, u8 write,
+ struct nphy_iq_comp *comp);
+void wlc_phy_aci_and_noise_reduction_nphy(struct brcms_phy *pi);
+
+void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, u8 rxcore_bitmask);
+u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih);
+
+void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type);
+void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi);
+void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi);
+void wlc_phy_txpwr_papd_cal_nphy(struct brcms_phy *pi);
+u16 wlc_phy_txpwr_idx_get_nphy(struct brcms_phy *pi);
+
+struct nphy_txgains wlc_phy_get_tx_gain_nphy(struct brcms_phy *pi);
+int wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi,
+ struct nphy_txgains target_gain, bool full, bool m);
+int wlc_phy_cal_rxiq_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
+ u8 type, bool d);
+void wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask,
+ s8 txpwrindex, bool res);
+void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core, u8 rssi_type);
+int wlc_phy_poll_rssi_nphy(struct brcms_phy *pi, u8 rssi_type,
+ s32 *rssi_buf, u8 nsamps);
+void wlc_phy_rssi_cal_nphy(struct brcms_phy *pi);
+int wlc_phy_aci_scan_nphy(struct brcms_phy *pi);
+void wlc_phy_cal_txgainctrl_nphy(struct brcms_phy *pi, s32 dBm_targetpower,
+ bool debug);
+int wlc_phy_tx_tone_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val, u8 mode,
+ u8, bool);
+void wlc_phy_stopplayback_nphy(struct brcms_phy *pi);
+void wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf,
+ u8 num_samps);
+void wlc_phy_radio205x_vcocal_nphy(struct brcms_phy *pi);
+
+int wlc_phy_rssi_compute_nphy(struct brcms_phy *pi, struct d11rxhdr *rxh);
#define NPHY_TESTPATTERN_BPHY_EVM 0
#define NPHY_TESTPATTERN_BPHY_RFCS 1
-extern void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs);
+void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs);
void wlc_phy_get_pwrdet_offsets(struct brcms_phy *pi, s8 *cckoffset,
s8 *ofdmoffset);
-extern s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi,
- u16 chanspec);
+s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi, u16 chanspec);
-extern bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pih);
+bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pih);
#endif /* _BRCM_PHY_INT_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
index 2c5b66b75970..dd8774717ade 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
@@ -124,56 +124,49 @@
struct brcms_phy;
-extern struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
- struct brcms_info *wl,
- struct brcms_c_info *wlc);
-extern void wlc_phy_shim_detach(struct phy_shim_info *physhim);
+struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
+ struct brcms_info *wl,
+ struct brcms_c_info *wlc);
+void wlc_phy_shim_detach(struct phy_shim_info *physhim);
/* PHY to WL utility functions */
-extern struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
- void (*fn) (struct brcms_phy *pi),
- void *arg, const char *name);
-extern void wlapi_free_timer(struct wlapi_timer *t);
-extern void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
-extern bool wlapi_del_timer(struct wlapi_timer *t);
-extern void wlapi_intrson(struct phy_shim_info *physhim);
-extern u32 wlapi_intrsoff(struct phy_shim_info *physhim);
-extern void wlapi_intrsrestore(struct phy_shim_info *physhim,
- u32 macintmask);
-
-extern void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset,
- u16 v);
-extern u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset);
-extern void wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx,
- u16 mask, u16 val, int bands);
-extern void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags);
-extern void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim);
-extern void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode);
-extern void wlapi_enable_mac(struct phy_shim_info *physhim);
-extern void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask,
- u32 val);
-extern void wlapi_bmac_phy_reset(struct phy_shim_info *physhim);
-extern void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw);
-extern void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk);
-extern void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk);
-extern void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on);
-extern void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim);
-extern void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info *
- physhim);
-extern void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info *
- physhim);
-extern void wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int o,
- int len, void *buf);
-extern u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim,
- u8 rate);
-extern void wlapi_ucode_sample_init(struct phy_shim_info *physhim);
-extern void wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint,
- void *buf, int, u32 sel);
-extern void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint,
- const void *buf, int, u32);
-
-extern void wlapi_high_update_phy_mode(struct phy_shim_info *physhim,
- u32 phy_mode);
-extern u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim);
+struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
+ void (*fn)(struct brcms_phy *pi),
+ void *arg, const char *name);
+void wlapi_free_timer(struct wlapi_timer *t);
+void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
+bool wlapi_del_timer(struct wlapi_timer *t);
+void wlapi_intrson(struct phy_shim_info *physhim);
+u32 wlapi_intrsoff(struct phy_shim_info *physhim);
+void wlapi_intrsrestore(struct phy_shim_info *physhim, u32 macintmask);
+
+void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset, u16 v);
+u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset);
+void wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx, u16 mask, u16 val,
+ int bands);
+void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags);
+void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim);
+void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode);
+void wlapi_enable_mac(struct phy_shim_info *physhim);
+void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask, u32 val);
+void wlapi_bmac_phy_reset(struct phy_shim_info *physhim);
+void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw);
+void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk);
+void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk);
+void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on);
+void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim);
+void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info *physhim);
+void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info *physhim);
+void wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int o,
+ int len, void *buf);
+u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim, u8 rate);
+void wlapi_ucode_sample_init(struct phy_shim_info *physhim);
+void wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint, void *buf,
+ int, u32 sel);
+void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint, const void *buf,
+ int, u32);
+
+void wlapi_high_update_phy_mode(struct phy_shim_info *physhim, u32 phy_mode);
+u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim);
#endif /* _BRCM_PHY_SHIM_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
index 20e2012d5a3a..a014bbc4f935 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
@@ -20,7 +20,7 @@
#include "types.h"
-extern u16 si_pmu_fast_pwrup_delay(struct si_pub *sih);
-extern u32 si_pmu_measure_alpclk(struct si_pub *sih);
+u16 si_pmu_fast_pwrup_delay(struct si_pub *sih);
+u32 si_pmu_measure_alpclk(struct si_pub *sih);
#endif /* _BRCM_PMU_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index d36ea5e1cc49..4da38cb4f318 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -266,83 +266,76 @@ struct brcms_antselcfg {
};
/* common functions for every port */
-extern struct brcms_c_info *
-brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
- bool piomode, uint *perr);
-extern uint brcms_c_detach(struct brcms_c_info *wlc);
-extern int brcms_c_up(struct brcms_c_info *wlc);
-extern uint brcms_c_down(struct brcms_c_info *wlc);
-
-extern bool brcms_c_chipmatch(struct bcma_device *core);
-extern void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
-extern void brcms_c_reset(struct brcms_c_info *wlc);
-
-extern void brcms_c_intrson(struct brcms_c_info *wlc);
-extern u32 brcms_c_intrsoff(struct brcms_c_info *wlc);
-extern void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask);
-extern bool brcms_c_intrsupd(struct brcms_c_info *wlc);
-extern bool brcms_c_isr(struct brcms_c_info *wlc);
-extern bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded);
-extern bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc,
- struct sk_buff *sdu,
- struct ieee80211_hw *hw);
-extern bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid);
-extern void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx,
- int val);
-extern int brcms_c_get_header_len(void);
-extern void brcms_c_set_addrmatch(struct brcms_c_info *wlc,
- int match_reg_offset,
- const u8 *addr);
-extern void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
- const struct ieee80211_tx_queue_params *arg,
- bool suspend);
-extern struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc);
-extern void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
- struct ieee80211_sta *sta, u16 tid);
-extern void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
- u8 ba_wsize, uint max_rx_ampdu_bytes);
-extern int brcms_c_module_register(struct brcms_pub *pub,
- const char *name, struct brcms_info *hdl,
- int (*down_fn)(void *handle));
-extern int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
- struct brcms_info *hdl);
-extern void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc);
-extern void brcms_c_enable_mac(struct brcms_c_info *wlc);
-extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
-extern void brcms_c_scan_start(struct brcms_c_info *wlc);
-extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
-extern int brcms_c_get_curband(struct brcms_c_info *wlc);
-extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
-extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
-extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
+struct brcms_c_info *brcms_c_attach(struct brcms_info *wl,
+ struct bcma_device *core, uint unit,
+ bool piomode, uint *perr);
+uint brcms_c_detach(struct brcms_c_info *wlc);
+int brcms_c_up(struct brcms_c_info *wlc);
+uint brcms_c_down(struct brcms_c_info *wlc);
+
+bool brcms_c_chipmatch(struct bcma_device *core);
+void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
+void brcms_c_reset(struct brcms_c_info *wlc);
+
+void brcms_c_intrson(struct brcms_c_info *wlc);
+u32 brcms_c_intrsoff(struct brcms_c_info *wlc);
+void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask);
+bool brcms_c_intrsupd(struct brcms_c_info *wlc);
+bool brcms_c_isr(struct brcms_c_info *wlc);
+bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded);
+bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc, struct sk_buff *sdu,
+ struct ieee80211_hw *hw);
+bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid);
+void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx, int val);
+int brcms_c_get_header_len(void);
+void brcms_c_set_addrmatch(struct brcms_c_info *wlc, int match_reg_offset,
+ const u8 *addr);
+void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
+ const struct ieee80211_tx_queue_params *arg,
+ bool suspend);
+struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc);
+void brcms_c_ampdu_flush(struct brcms_c_info *wlc, struct ieee80211_sta *sta,
+ u16 tid);
+void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
+ u8 ba_wsize, uint max_rx_ampdu_bytes);
+int brcms_c_module_register(struct brcms_pub *pub, const char *name,
+ struct brcms_info *hdl,
+ int (*down_fn)(void *handle));
+int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
+ struct brcms_info *hdl);
+void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc);
+void brcms_c_enable_mac(struct brcms_c_info *wlc);
+void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
+void brcms_c_scan_start(struct brcms_c_info *wlc);
+void brcms_c_scan_stop(struct brcms_c_info *wlc);
+int brcms_c_get_curband(struct brcms_c_info *wlc);
+int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
+int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
+void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
struct brcm_rateset *currs);
-extern int brcms_c_set_rateset(struct brcms_c_info *wlc,
- struct brcm_rateset *rs);
-extern int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period);
-extern u16 brcms_c_get_phy_type(struct brcms_c_info *wlc, int phyidx);
-extern void brcms_c_set_shortslot_override(struct brcms_c_info *wlc,
+int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs);
+int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period);
+u16 brcms_c_get_phy_type(struct brcms_c_info *wlc, int phyidx);
+void brcms_c_set_shortslot_override(struct brcms_c_info *wlc,
s8 sslot_override);
-extern void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc,
- u8 interval);
-extern u64 brcms_c_tsf_get(struct brcms_c_info *wlc);
-extern void brcms_c_tsf_set(struct brcms_c_info *wlc, u64 tsf);
-extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
-extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
-extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
-extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
-extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
-extern void brcms_c_start_station(struct brcms_c_info *wlc, u8 *addr);
-extern void brcms_c_start_ap(struct brcms_c_info *wlc, u8 *addr,
- const u8 *bssid, u8 *ssid, size_t ssid_len);
-extern void brcms_c_start_adhoc(struct brcms_c_info *wlc, u8 *addr);
-extern void brcms_c_update_beacon(struct brcms_c_info *wlc);
-extern void brcms_c_set_new_beacon(struct brcms_c_info *wlc,
- struct sk_buff *beacon, u16 tim_offset,
- u16 dtim_period);
-extern void brcms_c_set_new_probe_resp(struct brcms_c_info *wlc,
- struct sk_buff *probe_resp);
-extern void brcms_c_enable_probe_resp(struct brcms_c_info *wlc, bool enable);
-extern void brcms_c_set_ssid(struct brcms_c_info *wlc, u8 *ssid,
- size_t ssid_len);
+void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval);
+u64 brcms_c_tsf_get(struct brcms_c_info *wlc);
+void brcms_c_tsf_set(struct brcms_c_info *wlc, u64 tsf);
+int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
+int brcms_c_get_tx_power(struct brcms_c_info *wlc);
+bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
+void brcms_c_mute(struct brcms_c_info *wlc, bool on);
+bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
+void brcms_c_start_station(struct brcms_c_info *wlc, u8 *addr);
+void brcms_c_start_ap(struct brcms_c_info *wlc, u8 *addr, const u8 *bssid,
+ u8 *ssid, size_t ssid_len);
+void brcms_c_start_adhoc(struct brcms_c_info *wlc, u8 *addr);
+void brcms_c_update_beacon(struct brcms_c_info *wlc);
+void brcms_c_set_new_beacon(struct brcms_c_info *wlc, struct sk_buff *beacon,
+ u16 tim_offset, u16 dtim_period);
+void brcms_c_set_new_probe_resp(struct brcms_c_info *wlc,
+ struct sk_buff *probe_resp);
+void brcms_c_enable_probe_resp(struct brcms_c_info *wlc, bool enable);
+void brcms_c_set_ssid(struct brcms_c_info *wlc, u8 *ssid, size_t ssid_len);
#endif /* _BRCM_PUB_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/rate.h b/drivers/net/wireless/brcm80211/brcmsmac/rate.h
index 980d578825cc..5bb88b78ed64 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/rate.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/rate.h
@@ -216,34 +216,30 @@ static inline u8 cck_phy2mac_rate(u8 signal)
/* sanitize, and sort a rateset with the basic bit(s) preserved, validate
* rateset */
-extern bool
-brcms_c_rate_hwrs_filter_sort_validate(struct brcms_c_rateset *rs,
- const struct brcms_c_rateset *hw_rs,
- bool check_brate, u8 txstreams);
+bool brcms_c_rate_hwrs_filter_sort_validate(struct brcms_c_rateset *rs,
+ const struct brcms_c_rateset *hw_rs,
+ bool check_brate, u8 txstreams);
/* copy rateset src to dst as-is (no masking or sorting) */
-extern void brcms_c_rateset_copy(const struct brcms_c_rateset *src,
- struct brcms_c_rateset *dst);
+void brcms_c_rateset_copy(const struct brcms_c_rateset *src,
+ struct brcms_c_rateset *dst);
/* would be nice to have these documented ... */
-extern u32 brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp);
-
-extern void brcms_c_rateset_filter(struct brcms_c_rateset *src,
- struct brcms_c_rateset *dst, bool basic_only, u8 rates, uint xmask,
- bool mcsallow);
-
-extern void
-brcms_c_rateset_default(struct brcms_c_rateset *rs_tgt,
- const struct brcms_c_rateset *rs_hw, uint phy_type,
- int bandtype, bool cck_only, uint rate_mask,
- bool mcsallow, u8 bw, u8 txstreams);
-
-extern s16 brcms_c_rate_legacy_phyctl(uint rate);
-
-extern void brcms_c_rateset_mcs_upd(struct brcms_c_rateset *rs, u8 txstreams);
-extern void brcms_c_rateset_mcs_clear(struct brcms_c_rateset *rateset);
-extern void brcms_c_rateset_mcs_build(struct brcms_c_rateset *rateset,
- u8 txstreams);
-extern void brcms_c_rateset_bw_mcs_filter(struct brcms_c_rateset *rateset,
- u8 bw);
+u32 brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp);
+
+void brcms_c_rateset_filter(struct brcms_c_rateset *src,
+ struct brcms_c_rateset *dst, bool basic_only,
+ u8 rates, uint xmask, bool mcsallow);
+
+void brcms_c_rateset_default(struct brcms_c_rateset *rs_tgt,
+ const struct brcms_c_rateset *rs_hw, uint phy_type,
+ int bandtype, bool cck_only, uint rate_mask,
+ bool mcsallow, u8 bw, u8 txstreams);
+
+s16 brcms_c_rate_legacy_phyctl(uint rate);
+
+void brcms_c_rateset_mcs_upd(struct brcms_c_rateset *rs, u8 txstreams);
+void brcms_c_rateset_mcs_clear(struct brcms_c_rateset *rateset);
+void brcms_c_rateset_mcs_build(struct brcms_c_rateset *rateset, u8 txstreams);
+void brcms_c_rateset_bw_mcs_filter(struct brcms_c_rateset *rateset, u8 bw);
#endif /* _BRCM_RATE_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/stf.h b/drivers/net/wireless/brcm80211/brcmsmac/stf.h
index 19f6580f69be..ba9493009a33 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/stf.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/stf.h
@@ -19,24 +19,19 @@
#include "types.h"
-extern int brcms_c_stf_attach(struct brcms_c_info *wlc);
-extern void brcms_c_stf_detach(struct brcms_c_info *wlc);
+int brcms_c_stf_attach(struct brcms_c_info *wlc);
+void brcms_c_stf_detach(struct brcms_c_info *wlc);
-extern void brcms_c_tempsense_upd(struct brcms_c_info *wlc);
-extern void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc,
- u16 *ss_algo_channel,
- u16 chanspec);
-extern int brcms_c_stf_ss_update(struct brcms_c_info *wlc,
- struct brcms_band *band);
-extern void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
-extern int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val,
- bool force);
-extern bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);
-extern void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
-extern void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc);
-extern u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc,
- u32 rspec);
-extern u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc,
- u32 rspec);
+void brcms_c_tempsense_upd(struct brcms_c_info *wlc);
+void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc,
+ u16 *ss_algo_channel, u16 chanspec);
+int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band);
+void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
+int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val, bool force);
+bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);
+void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
+void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc);
+u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc, u32 rspec);
+u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc, u32 rspec);
#endif /* _BRCM_STF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h b/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h
index 18750a814b4f..c87dd89bcb78 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h
@@ -43,16 +43,14 @@ struct brcms_ucode {
u32 *bcm43xx_bomminor;
};
-extern int
-brcms_ucode_data_init(struct brcms_info *wl, struct brcms_ucode *ucode);
+int brcms_ucode_data_init(struct brcms_info *wl, struct brcms_ucode *ucode);
-extern void brcms_ucode_data_free(struct brcms_ucode *ucode);
+void brcms_ucode_data_free(struct brcms_ucode *ucode);
-extern int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf,
- unsigned int idx);
-extern int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes,
- unsigned int idx);
-extern void brcms_ucode_free_buf(void *);
-extern int brcms_check_firmwares(struct brcms_info *wl);
+int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, unsigned int idx);
+int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes,
+ unsigned int idx);
+void brcms_ucode_free_buf(void *);
+int brcms_check_firmwares(struct brcms_info *wl);
#endif /* _BRCM_UCODE_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index c1fe245bb07e..84113ea16f84 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -41,5 +41,6 @@
#define BCM4331_CHIP_ID 0x4331
#define BCM4334_CHIP_ID 0x4334
#define BCM4335_CHIP_ID 0x4335
+#define BCM4339_CHIP_ID 0x4339
#endif /* _BRCM_HW_IDS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_d11.h b/drivers/net/wireless/brcm80211/include/brcmu_d11.h
index 92623f02b1c0..8660a2cba098 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_d11.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_d11.h
@@ -140,6 +140,6 @@ struct brcmu_d11inf {
void (*decchspec)(struct brcmu_chan *ch);
};
-extern void brcmu_d11_attach(struct brcmu_d11inf *d11inf);
+void brcmu_d11_attach(struct brcmu_d11inf *d11inf);
#endif /* _BRCMU_CHANNELS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
index 898cacb8d01d..8ba445b3fd72 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_utils.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
@@ -114,31 +114,29 @@ static inline struct sk_buff *pktq_ppeek_tail(struct pktq *pq, int prec)
return skb_peek_tail(&pq->q[prec].skblist);
}
-extern struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
- struct sk_buff *p);
-extern struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
- struct sk_buff *p);
-extern struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
-extern struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
-extern struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
- bool (*match_fn)(struct sk_buff *p,
- void *arg),
- void *arg);
+struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec, struct sk_buff *p);
+struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
+ struct sk_buff *p);
+struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
+struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
+struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
+ bool (*match_fn)(struct sk_buff *p,
+ void *arg),
+ void *arg);
/* packet primitives */
-extern struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
-extern void brcmu_pkt_buf_free_skb(struct sk_buff *skb);
+struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
+void brcmu_pkt_buf_free_skb(struct sk_buff *skb);
/* Empty the queue at particular precedence level */
/* callback function fn(pkt, arg) returns true if pkt belongs to if */
-extern void brcmu_pktq_pflush(struct pktq *pq, int prec,
- bool dir, bool (*fn)(struct sk_buff *, void *), void *arg);
+void brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir,
+ bool (*fn)(struct sk_buff *, void *), void *arg);
/* operations on a set of precedences in packet queue */
-extern int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp);
-extern struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
- int *prec_out);
+int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp);
+struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
/* operations on packet queue as a whole */
@@ -167,11 +165,11 @@ static inline bool pktq_empty(struct pktq *pq)
return pq->len == 0;
}
-extern void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len);
+void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len);
/* prec_out may be NULL if caller is not interested in return value */
-extern struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out);
-extern void brcmu_pktq_flush(struct pktq *pq, bool dir,
- bool (*fn)(struct sk_buff *, void *), void *arg);
+struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out);
+void brcmu_pktq_flush(struct pktq *pq, bool dir,
+ bool (*fn)(struct sk_buff *, void *), void *arg);
/* externs */
/* ip address */
@@ -204,13 +202,13 @@ static inline u16 brcmu_maskget16(u16 var, u16 mask, u8 shift)
/* externs */
/* format/print */
#ifdef DEBUG
-extern void brcmu_prpkt(const char *msg, struct sk_buff *p0);
+void brcmu_prpkt(const char *msg, struct sk_buff *p0);
#else
#define brcmu_prpkt(a, b)
#endif /* DEBUG */
#ifdef DEBUG
-extern __printf(3, 4)
+__printf(3, 4)
void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...);
#else
__printf(3, 4)
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index 899cad34ccd3..40078f5f932e 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -237,7 +237,9 @@ static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id)
struct hwbus_priv *self = dev_id;
if (self->core) {
+ cw1200_spi_lock(self);
cw1200_irq_handler(self->core);
+ cw1200_spi_unlock(self);
return IRQ_HANDLED;
} else {
return IRQ_NONE;
@@ -363,7 +365,7 @@ static struct hwbus_ops cw1200_spi_hwbus_ops = {
static int cw1200_spi_probe(struct spi_device *func)
{
const struct cw1200_platform_data_spi *plat_data =
- func->dev.platform_data;
+ dev_get_platdata(&func->dev);
struct hwbus_priv *self;
int status;
@@ -441,7 +443,7 @@ static int cw1200_spi_disconnect(struct spi_device *func)
}
kfree(self);
}
- cw1200_spi_off(func->dev.platform_data);
+ cw1200_spi_off(dev_get_platdata(&func->dev));
return 0;
}
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 970a48baaf80..de7c4ffec309 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -217,7 +217,7 @@ static void prism2_host_roaming(local_info_t *local)
}
}
- memcpy(req.bssid, selected->bssid, 6);
+ memcpy(req.bssid, selected->bssid, ETH_ALEN);
req.channel = selected->chid;
spin_unlock_irqrestore(&local->lock, flags);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 6b823a1ab789..81903e33d5b1 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -2698,7 +2698,7 @@ static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
/* data's copy of the eeprom data */
static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
{
- memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
+ memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], ETH_ALEN);
}
static void ipw_read_eeprom(struct ipw_priv *priv)
@@ -11885,7 +11885,6 @@ static int ipw_pci_probe(struct pci_dev *pdev,
pci_release_regions(pdev);
out_pci_disable_device:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
out_free_libipw:
free_libipw(priv->net_dev, 0);
out:
@@ -11966,7 +11965,6 @@ static void ipw_pci_remove(struct pci_dev *pdev)
iounmap(priv->hw_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
/* wiphy_unregister needs to be here, before free_libipw */
wiphy_unregister(priv->ieee->wdev.wiphy);
kfree(priv->ieee->a_band.channels);
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 6eede52ad8c0..5ce2f59d3378 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -950,66 +950,55 @@ static inline int libipw_is_cck_rate(u8 rate)
}
/* libipw.c */
-extern void free_libipw(struct net_device *dev, int monitor);
-extern struct net_device *alloc_libipw(int sizeof_priv, int monitor);
-extern int libipw_change_mtu(struct net_device *dev, int new_mtu);
+void free_libipw(struct net_device *dev, int monitor);
+struct net_device *alloc_libipw(int sizeof_priv, int monitor);
+int libipw_change_mtu(struct net_device *dev, int new_mtu);
-extern void libipw_networks_age(struct libipw_device *ieee,
- unsigned long age_secs);
+void libipw_networks_age(struct libipw_device *ieee, unsigned long age_secs);
-extern int libipw_set_encryption(struct libipw_device *ieee);
+int libipw_set_encryption(struct libipw_device *ieee);
/* libipw_tx.c */
-extern netdev_tx_t libipw_xmit(struct sk_buff *skb,
- struct net_device *dev);
-extern void libipw_txb_free(struct libipw_txb *);
+netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev);
+void libipw_txb_free(struct libipw_txb *);
/* libipw_rx.c */
-extern void libipw_rx_any(struct libipw_device *ieee,
- struct sk_buff *skb, struct libipw_rx_stats *stats);
-extern int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
- struct libipw_rx_stats *rx_stats);
+void libipw_rx_any(struct libipw_device *ieee, struct sk_buff *skb,
+ struct libipw_rx_stats *stats);
+int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
+ struct libipw_rx_stats *rx_stats);
/* make sure to set stats->len */
-extern void libipw_rx_mgt(struct libipw_device *ieee,
- struct libipw_hdr_4addr *header,
- struct libipw_rx_stats *stats);
-extern void libipw_network_reset(struct libipw_network *network);
+void libipw_rx_mgt(struct libipw_device *ieee, struct libipw_hdr_4addr *header,
+ struct libipw_rx_stats *stats);
+void libipw_network_reset(struct libipw_network *network);
/* libipw_geo.c */
-extern const struct libipw_geo *libipw_get_geo(struct libipw_device
- *ieee);
-extern void libipw_set_geo(struct libipw_device *ieee,
- const struct libipw_geo *geo);
-
-extern int libipw_is_valid_channel(struct libipw_device *ieee,
- u8 channel);
-extern int libipw_channel_to_index(struct libipw_device *ieee,
- u8 channel);
-extern u8 libipw_freq_to_channel(struct libipw_device *ieee, u32 freq);
-extern u8 libipw_get_channel_flags(struct libipw_device *ieee,
- u8 channel);
-extern const struct libipw_channel *libipw_get_channel(struct
- libipw_device
- *ieee, u8 channel);
-extern u32 libipw_channel_to_freq(struct libipw_device * ieee,
- u8 channel);
+const struct libipw_geo *libipw_get_geo(struct libipw_device *ieee);
+void libipw_set_geo(struct libipw_device *ieee, const struct libipw_geo *geo);
+
+int libipw_is_valid_channel(struct libipw_device *ieee, u8 channel);
+int libipw_channel_to_index(struct libipw_device *ieee, u8 channel);
+u8 libipw_freq_to_channel(struct libipw_device *ieee, u32 freq);
+u8 libipw_get_channel_flags(struct libipw_device *ieee, u8 channel);
+const struct libipw_channel *libipw_get_channel(struct libipw_device *ieee,
+ u8 channel);
+u32 libipw_channel_to_freq(struct libipw_device *ieee, u8 channel);
/* libipw_wx.c */
-extern int libipw_wx_get_scan(struct libipw_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-extern int libipw_wx_set_encode(struct libipw_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-extern int libipw_wx_get_encode(struct libipw_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-extern int libipw_wx_set_encodeext(struct libipw_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-extern int libipw_wx_get_encodeext(struct libipw_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+int libipw_wx_get_scan(struct libipw_device *ieee, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *key);
+int libipw_wx_set_encode(struct libipw_device *ieee,
+ struct iw_request_info *info, union iwreq_data *wrqu,
+ char *key);
+int libipw_wx_get_encode(struct libipw_device *ieee,
+ struct iw_request_info *info, union iwreq_data *wrqu,
+ char *key);
+int libipw_wx_set_encodeext(struct libipw_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+int libipw_wx_get_encodeext(struct libipw_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
static inline void libipw_increment_scans(struct libipw_device *ieee)
{
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index 9581d07a4242..dea3b50d68b9 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3811,7 +3811,6 @@ out_iounmap:
out_pci_release_regions:
pci_release_regions(pdev);
out_pci_disable_device:
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
out_ieee80211_free_hw:
ieee80211_free_hw(il->hw);
@@ -3888,7 +3887,6 @@ il3945_pci_remove(struct pci_dev *pdev)
iounmap(il->hw_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
il_free_channel_map(il);
il_free_geos(il);
diff --git a/drivers/net/wireless/iwlegacy/3945.h b/drivers/net/wireless/iwlegacy/3945.h
index 9a8703def0ba..00030d43a194 100644
--- a/drivers/net/wireless/iwlegacy/3945.h
+++ b/drivers/net/wireless/iwlegacy/3945.h
@@ -189,15 +189,14 @@ struct il3945_ibss_seq {
* for use by iwl-*.c
*
*****************************************************************************/
-extern int il3945_calc_db_from_ratio(int sig_ratio);
-extern void il3945_rx_replenish(void *data);
-extern void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
-extern unsigned int il3945_fill_beacon_frame(struct il_priv *il,
- struct ieee80211_hdr *hdr,
- int left);
-extern int il3945_dump_nic_event_log(struct il_priv *il, bool full_log,
- char **buf, bool display);
-extern void il3945_dump_nic_error_log(struct il_priv *il);
+int il3945_calc_db_from_ratio(int sig_ratio);
+void il3945_rx_replenish(void *data);
+void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
+unsigned int il3945_fill_beacon_frame(struct il_priv *il,
+ struct ieee80211_hdr *hdr, int left);
+int il3945_dump_nic_event_log(struct il_priv *il, bool full_log, char **buf,
+ bool display);
+void il3945_dump_nic_error_log(struct il_priv *il);
/******************************************************************************
*
@@ -215,39 +214,36 @@ extern void il3945_dump_nic_error_log(struct il_priv *il);
* il3945_mac_ <-- mac80211 callback
*
****************************************************************************/
-extern void il3945_hw_handler_setup(struct il_priv *il);
-extern void il3945_hw_setup_deferred_work(struct il_priv *il);
-extern void il3945_hw_cancel_deferred_work(struct il_priv *il);
-extern int il3945_hw_rxq_stop(struct il_priv *il);
-extern int il3945_hw_set_hw_params(struct il_priv *il);
-extern int il3945_hw_nic_init(struct il_priv *il);
-extern int il3945_hw_nic_stop_master(struct il_priv *il);
-extern void il3945_hw_txq_ctx_free(struct il_priv *il);
-extern void il3945_hw_txq_ctx_stop(struct il_priv *il);
-extern int il3945_hw_nic_reset(struct il_priv *il);
-extern int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il,
- struct il_tx_queue *txq,
- dma_addr_t addr, u16 len, u8 reset,
- u8 pad);
-extern void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
-extern int il3945_hw_get_temperature(struct il_priv *il);
-extern int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
-extern unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
- struct il3945_frame *frame,
- u8 rate);
+void il3945_hw_handler_setup(struct il_priv *il);
+void il3945_hw_setup_deferred_work(struct il_priv *il);
+void il3945_hw_cancel_deferred_work(struct il_priv *il);
+int il3945_hw_rxq_stop(struct il_priv *il);
+int il3945_hw_set_hw_params(struct il_priv *il);
+int il3945_hw_nic_init(struct il_priv *il);
+int il3945_hw_nic_stop_master(struct il_priv *il);
+void il3945_hw_txq_ctx_free(struct il_priv *il);
+void il3945_hw_txq_ctx_stop(struct il_priv *il);
+int il3945_hw_nic_reset(struct il_priv *il);
+int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset, u8 pad);
+void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
+int il3945_hw_get_temperature(struct il_priv *il);
+int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
+unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
+ struct il3945_frame *frame, u8 rate);
void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
struct ieee80211_tx_info *info,
struct ieee80211_hdr *hdr, int sta_id);
-extern int il3945_hw_reg_send_txpower(struct il_priv *il);
-extern int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
-extern void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
+int il3945_hw_reg_send_txpower(struct il_priv *il);
+int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
+void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
-extern void il3945_disable_events(struct il_priv *il);
-extern int il4965_get_temperature(const struct il_priv *il);
-extern void il3945_post_associate(struct il_priv *il);
-extern void il3945_config_ap(struct il_priv *il);
+void il3945_disable_events(struct il_priv *il);
+int il4965_get_temperature(const struct il_priv *il);
+void il3945_post_associate(struct il_priv *il);
+void il3945_config_ap(struct il_priv *il);
-extern int il3945_commit_rxon(struct il_priv *il);
+int il3945_commit_rxon(struct il_priv *il);
/**
* il3945_hw_find_station - Find station id for a given BSSID
@@ -257,14 +253,14 @@ extern int il3945_commit_rxon(struct il_priv *il);
* not yet been merged into a single common layer for managing the
* station tables.
*/
-extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid);
+u8 il3945_hw_find_station(struct il_priv *il, const u8 *bssid);
-extern __le32 il3945_get_antenna_flags(const struct il_priv *il);
-extern int il3945_init_hw_rate_table(struct il_priv *il);
-extern void il3945_reg_txpower_periodic(struct il_priv *il);
-extern int il3945_txpower_set_from_eeprom(struct il_priv *il);
+__le32 il3945_get_antenna_flags(const struct il_priv *il);
+int il3945_init_hw_rate_table(struct il_priv *il);
+void il3945_reg_txpower_periodic(struct il_priv *il);
+int il3945_txpower_set_from_eeprom(struct il_priv *il);
-extern int il3945_rs_next_rate(struct il_priv *il, int rate);
+int il3945_rs_next_rate(struct il_priv *il, int rate);
/* scanning */
int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 5ab50a5b48b1..3982ab76f375 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -6706,7 +6706,6 @@ out_free_eeprom:
out_iounmap:
iounmap(il->hw_base);
out_pci_release_regions:
- pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
out_pci_disable_device:
pci_disable_device(pdev);
@@ -6787,7 +6786,6 @@ il4965_pci_remove(struct pci_dev *pdev)
iounmap(il->hw_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
il4965_uninit_drv(il);
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
index 1b15b0b2292b..337dfcf3bbde 100644
--- a/drivers/net/wireless/iwlegacy/4965.h
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -272,7 +272,7 @@ il4965_hw_valid_rtc_data_addr(u32 addr)
((t) < IL_TX_POWER_TEMPERATURE_MIN || \
(t) > IL_TX_POWER_TEMPERATURE_MAX)
-extern void il4965_temperature_calib(struct il_priv *il);
+void il4965_temperature_calib(struct il_priv *il);
/********************* END TEMPERATURE ***************************************/
/********************* START TXPOWER *****************************************/
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index 83f8ed8a5528..ad123d66ab6c 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -858,9 +858,9 @@ struct il_hw_params {
* il4965_mac_ <-- mac80211 callback
*
****************************************************************************/
-extern void il4965_update_chain_flags(struct il_priv *il);
+void il4965_update_chain_flags(struct il_priv *il);
extern const u8 il_bcast_addr[ETH_ALEN];
-extern int il_queue_space(const struct il_queue *q);
+int il_queue_space(const struct il_queue *q);
static inline int
il_queue_used(const struct il_queue *q, int i)
{
@@ -1727,7 +1727,7 @@ int il_alloc_txq_mem(struct il_priv *il);
void il_free_txq_mem(struct il_priv *il);
#ifdef CONFIG_IWLEGACY_DEBUGFS
-extern void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
+void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
#else
static inline void
il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
@@ -1760,12 +1760,12 @@ void il_chswitch_done(struct il_priv *il, bool is_success);
/*****************************************************
* TX
******************************************************/
-extern void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
-extern int il_tx_queue_init(struct il_priv *il, u32 txq_id);
-extern void il_tx_queue_reset(struct il_priv *il, u32 txq_id);
-extern void il_tx_queue_unmap(struct il_priv *il, int txq_id);
-extern void il_tx_queue_free(struct il_priv *il, int txq_id);
-extern void il_setup_watchdog(struct il_priv *il);
+void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
+int il_tx_queue_init(struct il_priv *il, u32 txq_id);
+void il_tx_queue_reset(struct il_priv *il, u32 txq_id);
+void il_tx_queue_unmap(struct il_priv *il, int txq_id);
+void il_tx_queue_free(struct il_priv *il, int txq_id);
+void il_setup_watchdog(struct il_priv *il);
/*****************************************************
* TX power
****************************************************/
@@ -1931,10 +1931,10 @@ il_is_ready_rf(struct il_priv *il)
return il_is_ready(il);
}
-extern void il_send_bt_config(struct il_priv *il);
-extern int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
-extern void il_apm_stop(struct il_priv *il);
-extern void _il_apm_stop(struct il_priv *il);
+void il_send_bt_config(struct il_priv *il);
+int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
+void il_apm_stop(struct il_priv *il);
+void _il_apm_stop(struct il_priv *il);
int il_apm_init(struct il_priv *il);
@@ -1968,15 +1968,15 @@ void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
irqreturn_t il_isr(int irq, void *data);
-extern void il_set_bit(struct il_priv *p, u32 r, u32 m);
-extern void il_clear_bit(struct il_priv *p, u32 r, u32 m);
-extern bool _il_grab_nic_access(struct il_priv *il);
-extern int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
-extern int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
-extern u32 il_rd_prph(struct il_priv *il, u32 reg);
-extern void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
-extern u32 il_read_targ_mem(struct il_priv *il, u32 addr);
-extern void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
+void il_set_bit(struct il_priv *p, u32 r, u32 m);
+void il_clear_bit(struct il_priv *p, u32 r, u32 m);
+bool _il_grab_nic_access(struct il_priv *il);
+int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
+int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
+u32 il_rd_prph(struct il_priv *il, u32 reg);
+void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
+u32 il_read_targ_mem(struct il_priv *il, u32 addr);
+void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
static inline void
_il_write8(struct il_priv *il, u32 ofs, u8 val)
@@ -2868,13 +2868,13 @@ il4965_first_antenna(u8 mask)
* The specific throughput table used is based on the type of network
* the associated with, including A, B, G, and G w/ TGG protection
*/
-extern void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
+void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
/* Initialize station's rate scaling information after adding station */
-extern void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
- u8 sta_id);
-extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
- u8 sta_id);
+void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+ u8 sta_id);
+void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+ u8 sta_id);
/**
* il_rate_control_register - Register the rate control algorithm callbacks
@@ -2886,8 +2886,8 @@ extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
* ieee80211_register_hw
*
*/
-extern int il4965_rate_control_register(void);
-extern int il3945_rate_control_register(void);
+int il4965_rate_control_register(void);
+int il3945_rate_control_register(void);
/**
* il_rate_control_unregister - Unregister the rate control callbacks
@@ -2895,11 +2895,11 @@ extern int il3945_rate_control_register(void);
* This should be called after calling ieee80211_unregister_hw, but before
* the driver is unloaded.
*/
-extern void il4965_rate_control_unregister(void);
-extern void il3945_rate_control_unregister(void);
+void il4965_rate_control_unregister(void);
+void il3945_rate_control_unregister(void);
-extern int il_power_update_mode(struct il_priv *il, bool force);
-extern void il_power_initialize(struct il_priv *il);
+int il_power_update_mode(struct il_priv *il, bool force);
+void il_power_initialize(struct il_priv *il);
extern u32 il_debug_level;
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index f2a86ffc3b4c..23d5f0275ce9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -397,7 +397,7 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
return cpu_to_le32(flags|(u32)rate);
}
-extern int iwl_alive_start(struct iwl_priv *priv);
+int iwl_alive_start(struct iwl_priv *priv);
#ifdef CONFIG_IWLWIFI_DEBUG
void iwl_print_rx_config_cmd(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index a79fdd137f95..7434d9edf3b7 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -270,7 +270,7 @@ struct iwl_sensitivity_ranges {
* iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
*
****************************************************************************/
-extern void iwl_update_chain_flags(struct iwl_priv *priv);
+void iwl_update_chain_flags(struct iwl_priv *priv);
extern const u8 iwl_bcast_addr[ETH_ALEN];
#define IWL_OPERATION_MODE_AUTO 0
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h
index 5d83cab22d62..26fc550cd68c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.h
@@ -407,8 +407,8 @@ static inline u8 first_antenna(u8 mask)
/* Initialize station's rate scaling information after adding station */
-extern void iwl_rs_rate_init(struct iwl_priv *priv,
- struct ieee80211_sta *sta, u8 sta_id);
+void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ u8 sta_id);
/**
* iwl_rate_control_register - Register the rate control algorithm callbacks
@@ -420,7 +420,7 @@ extern void iwl_rs_rate_init(struct iwl_priv *priv,
* ieee80211_register_hw
*
*/
-extern int iwlagn_rate_control_register(void);
+int iwlagn_rate_control_register(void);
/**
* iwl_rate_control_unregister - Unregister the rate control callbacks
@@ -428,6 +428,6 @@ extern int iwlagn_rate_control_register(void);
* This should be called after calling ieee80211_unregister_hw, but before
* the driver is unloaded.
*/
-extern void iwlagn_rate_control_unregister(void);
+void iwlagn_rate_control_unregister(void);
#endif /* __iwl_agn__rs__ */
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index da442b81370a..1fef5240e6ad 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -433,27 +433,19 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdr_len);
+ txq_id = info->hw_queue;
+
if (is_agg)
txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
/*
- * Send this frame after DTIM -- there's a special queue
- * reserved for this for contexts that support AP mode.
- */
- txq_id = ctx->mcast_queue;
-
- /*
* The microcode will clear the more data
* bit in the last frame it transmits.
*/
hdr->frame_control |=
cpu_to_le16(IEEE80211_FCTL_MOREDATA);
- } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
- txq_id = IWL_AUX_QUEUE;
- else
- txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
+ }
- WARN_ON_ONCE(!is_agg && txq_id != info->hw_queue);
WARN_ON_ONCE(is_agg &&
priv->queue_to_mac80211[txq_id] != info->hw_queue);
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 30d45e2fc193..8ac305be68f4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -240,6 +240,12 @@ const struct iwl_cfg iwl6035_2agn_cfg = {
.ht_params = &iwl6000_ht_params,
};
+const struct iwl_cfg iwl6035_2agn_sff_cfg = {
+ .name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN",
+ IWL_DEVICE_6035,
+ .ht_params = &iwl6000_ht_params,
+};
+
const struct iwl_cfg iwl1030_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
IWL_DEVICE_6030,
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 76e14c046d94..85879dbaa402 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -83,6 +83,8 @@
#define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL3160_NVM_VERSION 0x709
#define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */
+#define IWL7265_NVM_VERSION 0x0a1d
+#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL7260_FW_PRE "iwlwifi-7260-"
#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode"
@@ -90,6 +92,9 @@
#define IWL3160_FW_PRE "iwlwifi-3160-"
#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
+#define IWL7265_FW_PRE "iwlwifi-7265-"
+#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
+
static const struct iwl_base_params iwl7000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
@@ -182,5 +187,14 @@ const struct iwl_cfg iwl3160_n_cfg = {
.nvm_calib_ver = IWL3160_TX_POWER_VERSION,
};
+const struct iwl_cfg iwl7265_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 7265",
+ .fw_name_pre = IWL7265_FW_PRE,
+ IWL_DEVICE_7000,
+ .ht_params = &iwl7000_ht_params,
+ .nvm_ver = IWL7265_NVM_VERSION,
+ .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+};
+
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index e4d370bff306..18f232e8e812 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -280,6 +280,7 @@ extern const struct iwl_cfg iwl2000_2bgn_cfg;
extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
extern const struct iwl_cfg iwl2030_2bgn_cfg;
extern const struct iwl_cfg iwl6035_2agn_cfg;
+extern const struct iwl_cfg iwl6035_2agn_sff_cfg;
extern const struct iwl_cfg iwl105_bgn_cfg;
extern const struct iwl_cfg iwl105_bgn_d_cfg;
extern const struct iwl_cfg iwl135_bgn_cfg;
@@ -292,6 +293,7 @@ extern const struct iwl_cfg iwl7260_n_cfg;
extern const struct iwl_cfg iwl3160_2ac_cfg;
extern const struct iwl_cfg iwl3160_2n_cfg;
extern const struct iwl_cfg iwl3160_n_cfg;
+extern const struct iwl_cfg iwl7265_2ac_cfg;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index a276af476e2d..54a4fdc631b7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -394,6 +394,38 @@
#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
+/* SECURE boot registers */
+#define CSR_SECURE_BOOT_CONFIG_ADDR (0x100)
+enum secure_boot_config_reg {
+ CSR_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP = 0x00000001,
+ CSR_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002,
+};
+
+#define CSR_SECURE_BOOT_CPU1_STATUS_ADDR (0x100)
+#define CSR_SECURE_BOOT_CPU2_STATUS_ADDR (0x100)
+enum secure_boot_status_reg {
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS = 0x00000003,
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED = 0x00000002,
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS = 0x00000004,
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_FAIL = 0x00000008,
+ CSR_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL = 0x00000010,
+};
+
+#define CSR_UCODE_LOAD_STATUS_ADDR (0x100)
+enum secure_load_status_reg {
+ CSR_CPU_STATUS_LOADING_STARTED = 0x00000001,
+ CSR_CPU_STATUS_LOADING_COMPLETED = 0x00000002,
+ CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
+ CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
+};
+
+#define CSR_SECURE_INSPECTOR_CODE_ADDR (0x100)
+#define CSR_SECURE_INSPECTOR_DATA_ADDR (0x100)
+
+#define CSR_SECURE_TIME_OUT (100)
+
+#define FH_TCSR_0_REG0 (0x1D00)
+
/*
* HBUS (Host-side Bus)
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 99e1da3123c9..ff570027e9dd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -483,6 +483,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
const u8 *tlv_data;
char buildstr[25];
u32 build;
+ int num_of_cpus;
if (len < sizeof(*ucode)) {
IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@@ -692,6 +693,42 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
goto invalid_tlv_len;
drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data);
break;
+ case IWL_UCODE_TLV_SECURE_SEC_RT:
+ iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
+ tlv_len);
+ drv->fw.mvm_fw = true;
+ drv->fw.img[IWL_UCODE_REGULAR].is_secure = true;
+ break;
+ case IWL_UCODE_TLV_SECURE_SEC_INIT:
+ iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
+ tlv_len);
+ drv->fw.mvm_fw = true;
+ drv->fw.img[IWL_UCODE_INIT].is_secure = true;
+ break;
+ case IWL_UCODE_TLV_SECURE_SEC_WOWLAN:
+ iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
+ tlv_len);
+ drv->fw.mvm_fw = true;
+ drv->fw.img[IWL_UCODE_WOWLAN].is_secure = true;
+ break;
+ case IWL_UCODE_TLV_NUM_OF_CPU:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ num_of_cpus =
+ le32_to_cpup((__le32 *)tlv_data);
+
+ if (num_of_cpus == 2) {
+ drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus =
+ true;
+ drv->fw.img[IWL_UCODE_INIT].is_dual_cpus =
+ true;
+ drv->fw.img[IWL_UCODE_WOWLAN].is_dual_cpus =
+ true;
+ } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
+ IWL_ERR(drv, "Driver support upto 2 CPUs\n");
+ return -EINVAL;
+ }
+ break;
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 8b6c6fd95ed0..6c6c35c5228c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -121,6 +121,10 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_SEC_WOWLAN = 21,
IWL_UCODE_TLV_DEF_CALIB = 22,
IWL_UCODE_TLV_PHY_SKU = 23,
+ IWL_UCODE_TLV_SECURE_SEC_RT = 24,
+ IWL_UCODE_TLV_SECURE_SEC_INIT = 25,
+ IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26,
+ IWL_UCODE_TLV_NUM_OF_CPU = 27,
};
struct iwl_ucode_tlv {
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index a1223680bc70..87b66a821ec8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -75,11 +75,23 @@
* @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
* @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
* @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
+ * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
+ * offload profile config command.
* @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
* @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
* @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
* (rather than two) IPv6 addresses
* @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
+ * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
+ * from the probe request template.
+ * @IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API: modified D3 API to allow keeping
+ * connection when going back to D0
+ * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
+ * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
+ * @IWL_UCODE_TLV_FLAGS_SCHED_SCAN: this uCode image supports scheduled scan.
+ * @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
+ * @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
+ * containing CAM (Continuous Active Mode) indication.
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
@@ -87,11 +99,20 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
+ IWL_UCODE_TLV_FLAGS_NEWBT_COEX = BIT(5),
IWL_UCODE_TLV_FLAGS_UAPSD = BIT(6),
+ IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8),
IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2 = BIT(9),
IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
IWL_UCODE_TLV_FLAGS_BF_UPDATED = BIT(11),
+ IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
+ IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API = BIT(14),
+ IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
+ IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
+ IWL_UCODE_TLV_FLAGS_SCHED_SCAN = BIT(17),
+ IWL_UCODE_TLV_FLAGS_STA_KEY_CMD = BIT(19),
+ IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD = BIT(20),
};
/* The default calibrate table size if not specified by firmware file */
@@ -133,7 +154,8 @@ enum iwl_ucode_sec {
* For 16.0 uCode and above, there is no differentiation between sections,
* just an offset to the HW address.
*/
-#define IWL_UCODE_SECTION_MAX 4
+#define IWL_UCODE_SECTION_MAX 6
+#define IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU (IWL_UCODE_SECTION_MAX/2)
struct iwl_ucode_capabilities {
u32 max_probe_length;
@@ -150,6 +172,8 @@ struct fw_desc {
struct fw_img {
struct fw_desc sec[IWL_UCODE_SECTION_MAX];
+ bool is_secure;
+ bool is_dual_cpus;
};
/* uCode version contains 4 values: Major/Minor/API/Serial */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index ff8cc75c189d..a70c7b9d9bad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -97,6 +97,8 @@
#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
+#define APMG_RTC_INT_STT_RFKILL (0x10000000)
+
/* Device system time */
#define DEVICE_SYSTEM_TIME_REG 0xA0206C
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index dd57a36ecb10..c6bac7c90b00 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -601,8 +601,10 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
{
int ret;
- WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
- "%s bad state = %d", __func__, trans->state);
+ if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
+ IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
+ return -EIO;
+ }
if (!(cmd->flags & CMD_ASYNC))
lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
@@ -638,8 +640,8 @@ static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int queue)
{
- WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
- "%s bad state = %d", __func__, trans->state);
+ if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+ IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
return trans->ops->tx(trans, skb, dev_cmd, queue);
}
@@ -647,16 +649,16 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
int ssn, struct sk_buff_head *skbs)
{
- WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
- "%s bad state = %d", __func__, trans->state);
+ if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+ IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
trans->ops->reclaim(trans, queue, ssn, skbs);
}
static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue)
{
- WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
- "%s bad state = %d", __func__, trans->state);
+ if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+ IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
trans->ops->txq_disable(trans, queue);
}
@@ -667,8 +669,8 @@ static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
{
might_sleep();
- WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
- "%s bad state = %d", __func__, trans->state);
+ if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
+ IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
frame_limit, ssn);
@@ -683,8 +685,8 @@ static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
{
- WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
- "%s bad state = %d", __func__, trans->state);
+ if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+ IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
return trans->ops->wait_tx_queue_empty(trans);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index 0fad98b85f60..5b630f12bbff 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -98,126 +98,258 @@ static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
#undef EVENT_PRIO_ANT
-/* BT Antenna Coupling Threshold (dB) */
-#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35)
-#define IWL_BT_LOAD_FORCE_SISO_THRESHOLD (3)
-
#define BT_ENABLE_REDUCED_TXPOWER_THRESHOLD (-62)
#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD (-65)
-#define BT_REDUCED_TX_POWER_BIT BIT(7)
-
-static inline bool is_loose_coex(void)
-{
- return iwlwifi_mod_params.ant_coupling >
- IWL_BT_ANTENNA_COUPLING_THRESHOLD;
-}
+#define BT_ANTENNA_COUPLING_THRESHOLD (30)
int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
{
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
+ return 0;
+
return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
sizeof(struct iwl_bt_coex_prio_tbl_cmd),
&iwl_bt_prio_tbl);
}
-static int iwl_send_bt_env(struct iwl_mvm *mvm, u8 action, u8 type)
-{
- struct iwl_bt_coex_prot_env_cmd env_cmd;
- int ret;
-
- env_cmd.action = action;
- env_cmd.type = type;
- ret = iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PROT_ENV, CMD_SYNC,
- sizeof(env_cmd), &env_cmd);
- if (ret)
- IWL_ERR(mvm, "failed to send BT env command\n");
- return ret;
-}
-
-enum iwl_bt_kill_msk {
- BT_KILL_MSK_DEFAULT,
- BT_KILL_MSK_SCO_HID_A2DP,
- BT_KILL_MSK_REDUCED_TXPOW,
- BT_KILL_MSK_MAX,
-};
-
-static const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX] = {
+const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX] = {
[BT_KILL_MSK_DEFAULT] = 0xffff0000,
[BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
[BT_KILL_MSK_REDUCED_TXPOW] = 0,
};
-static const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
+const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
[BT_KILL_MSK_DEFAULT] = 0xffff0000,
[BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
[BT_KILL_MSK_REDUCED_TXPOW] = 0,
};
-#define IWL_BT_DEFAULT_BOOST (0xf0f0f0f0)
-
-/* Tight Coex */
-static const __le32 iwl_tight_lookup[BT_COEX_LUT_SIZE] = {
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaeaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xcc00ff28),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xcc00aaaa),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xf0005000),
+static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
+ cpu_to_le32(0xf0f0f0f0),
+ cpu_to_le32(0xc0c0c0c0),
+ cpu_to_le32(0xfcfcfcfc),
+ cpu_to_le32(0xff00ff00),
};
-/* Loose Coex */
-static const __le32 iwl_loose_lookup[BT_COEX_LUT_SIZE] = {
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xcc00ff28),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xcc00aaaa),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xf0005000),
+static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
+ {
+ cpu_to_le32(0x40000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x44000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x40000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x44000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0xf0005000),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0xf0005000),
+ },
+ {
+ cpu_to_le32(0x40000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x44000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x40000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x44000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0xf0005000),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0xf0005000),
+ },
+ {
+ cpu_to_le32(0x40000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x44000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x40000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x44000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0xf0005000),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0xf0005000),
+ },
};
-/* Full concurrency */
-static const __le32 iwl_concurrent_lookup[BT_COEX_LUT_SIZE] = {
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000),
+static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
+ {
+ /* Tight */
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaeaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xcc00ff28),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0xcc00aaaa),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0xf0005000),
+ cpu_to_le32(0xf0005000),
+ },
+ {
+ /* Loose */
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xcc00ff28),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0xcc00aaaa),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0xf0005000),
+ cpu_to_le32(0xf0005000),
+ },
+ {
+ /* Tx Tx disabled */
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xcc00ff28),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0xcc00aaaa),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0xC0004000),
+ cpu_to_le32(0xC0004000),
+ cpu_to_le32(0xF0005000),
+ cpu_to_le32(0xF0005000),
+ },
};
-/* single shared antenna */
-static const __le32 iwl_single_shared_ant_lookup[BT_COEX_LUT_SIZE] = {
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xC0004000),
- cpu_to_le32(0xF0005000),
- cpu_to_le32(0xC0004000),
- cpu_to_le32(0xF0005000),
+/* 20MHz / 40MHz below / 40Mhz above*/
+static const __le64 iwl_ci_mask[][3] = {
+ /* dummy entry for channel 0 */
+ {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
+ {
+ cpu_to_le64(0x0000001FFFULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x00007FFFFFULL),
+ },
+ {
+ cpu_to_le64(0x000000FFFFULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x0003FFFFFFULL),
+ },
+ {
+ cpu_to_le64(0x000003FFFCULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x000FFFFFFCULL),
+ },
+ {
+ cpu_to_le64(0x00001FFFE0ULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x007FFFFFE0ULL),
+ },
+ {
+ cpu_to_le64(0x00007FFF80ULL),
+ cpu_to_le64(0x00007FFFFFULL),
+ cpu_to_le64(0x01FFFFFF80ULL),
+ },
+ {
+ cpu_to_le64(0x0003FFFC00ULL),
+ cpu_to_le64(0x0003FFFFFFULL),
+ cpu_to_le64(0x0FFFFFFC00ULL),
+ },
+ {
+ cpu_to_le64(0x000FFFF000ULL),
+ cpu_to_le64(0x000FFFFFFCULL),
+ cpu_to_le64(0x3FFFFFF000ULL),
+ },
+ {
+ cpu_to_le64(0x007FFF8000ULL),
+ cpu_to_le64(0x007FFFFFE0ULL),
+ cpu_to_le64(0xFFFFFF8000ULL),
+ },
+ {
+ cpu_to_le64(0x01FFFE0000ULL),
+ cpu_to_le64(0x01FFFFFF80ULL),
+ cpu_to_le64(0xFFFFFE0000ULL),
+ },
+ {
+ cpu_to_le64(0x0FFFF00000ULL),
+ cpu_to_le64(0x0FFFFFFC00ULL),
+ cpu_to_le64(0x0ULL),
+ },
+ {
+ cpu_to_le64(0x3FFFC00000ULL),
+ cpu_to_le64(0x3FFFFFF000ULL),
+ cpu_to_le64(0x0)
+ },
+ {
+ cpu_to_le64(0xFFFE000000ULL),
+ cpu_to_le64(0xFFFFFF8000ULL),
+ cpu_to_le64(0x0)
+ },
+ {
+ cpu_to_le64(0xFFF8000000ULL),
+ cpu_to_le64(0xFFFFFE0000ULL),
+ cpu_to_le64(0x0)
+ },
+ {
+ cpu_to_le64(0xFE00000000ULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x0)
+ },
};
+static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
+ cpu_to_le32(0x22002200),
+ cpu_to_le32(0x33113311),
+};
+
+static enum iwl_bt_coex_lut_type
+iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
+{
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ enum iwl_bt_coex_lut_type ret;
+ u16 phy_ctx_id;
+
+ /*
+ * Checking that we hold mvm->mutex is a good idea, but the rate
+ * control can't acquire the mutex since it runs in Tx path.
+ * So this is racy in that case, but in the worst case, the AMPDU
+ * size limit will be wrong for a short time which is not a big
+ * issue.
+ */
+
+ rcu_read_lock();
+
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+
+ if (!chanctx_conf ||
+ chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
+ rcu_read_unlock();
+ return BT_COEX_LOOSE_LUT;
+ }
+
+ ret = BT_COEX_TX_DIS_LUT;
+
+ if (mvm->cfg->bt_shared_single_ant) {
+ rcu_read_unlock();
+ return ret;
+ }
+
+ phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
+
+ if (mvm->last_bt_ci_cmd.primary_ch_phy_id == phy_ctx_id)
+ ret = le32_to_cpu(mvm->last_bt_notif.primary_ch_lut);
+ else if (mvm->last_bt_ci_cmd.secondary_ch_phy_id == phy_ctx_id)
+ ret = le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut);
+ /* else - default = TX TX disallowed */
+
+ rcu_read_unlock();
+
+ return ret;
+}
+
int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
{
struct iwl_bt_coex_cmd *bt_cmd;
@@ -228,17 +360,10 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
.flags = CMD_SYNC,
};
int ret;
+ u32 flags;
- /* go to CALIB state in internal BT-Coex state machine */
- ret = iwl_send_bt_env(mvm, BT_COEX_ENV_OPEN,
- BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
- if (ret)
- return ret;
-
- ret = iwl_send_bt_env(mvm, BT_COEX_ENV_CLOSE,
- BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
- if (ret)
- return ret;
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
+ return 0;
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
if (!bt_cmd)
@@ -246,40 +371,52 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
cmd.data[0] = bt_cmd;
bt_cmd->max_kill = 5;
- bt_cmd->bt3_time_t7_value = 1;
- bt_cmd->bt3_prio_sample_time = 2;
- bt_cmd->bt3_timer_t2_value = 0xc;
+ bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD,
+ bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling,
+ bt_cmd->bt4_tx_tx_delta_freq_thr = 15,
+ bt_cmd->bt4_tx_rx_max_freq0 = 15,
- bt_cmd->flags = iwlwifi_mod_params.bt_coex_active ?
+ flags = iwlwifi_mod_params.bt_coex_active ?
BT_COEX_NW : BT_COEX_DISABLE;
- bt_cmd->flags |= BT_CH_PRIMARY_EN | BT_SYNC_2_BT_DISABLE;
+ flags |= BT_CH_PRIMARY_EN | BT_CH_SECONDARY_EN | BT_SYNC_2_BT_DISABLE;
+ bt_cmd->flags = cpu_to_le32(flags);
- bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE |
+ bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
BT_VALID_BT_PRIO_BOOST |
BT_VALID_MAX_KILL |
BT_VALID_3W_TMRS |
BT_VALID_KILL_ACK |
BT_VALID_KILL_CTS |
BT_VALID_REDUCED_TX_POWER |
- BT_VALID_LUT);
+ BT_VALID_LUT |
+ BT_VALID_WIFI_RX_SW_PRIO_BOOST |
+ BT_VALID_WIFI_TX_SW_PRIO_BOOST |
+ BT_VALID_MULTI_PRIO_LUT |
+ BT_VALID_CORUN_LUT_20 |
+ BT_VALID_CORUN_LUT_40 |
+ BT_VALID_ANT_ISOLATION |
+ BT_VALID_ANT_ISOLATION_THRS |
+ BT_VALID_TXTX_DELTA_FREQ_THRS |
+ BT_VALID_TXRX_MAX_FREQ_0);
if (mvm->cfg->bt_shared_single_ant)
- memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant_lookup,
- sizeof(iwl_single_shared_ant_lookup));
- else if (is_loose_coex())
- memcpy(&bt_cmd->decision_lut, iwl_loose_lookup,
- sizeof(iwl_tight_lookup));
+ memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
+ sizeof(iwl_single_shared_ant));
else
- memcpy(&bt_cmd->decision_lut, iwl_tight_lookup,
- sizeof(iwl_tight_lookup));
+ memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
+ sizeof(iwl_combined_lookup));
- bt_cmd->bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST);
+ memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
+ sizeof(iwl_bt_prio_boost));
+ memcpy(&bt_cmd->bt4_multiprio_lut, iwl_bt_mprio_lut,
+ sizeof(iwl_bt_mprio_lut));
bt_cmd->kill_ack_msk =
cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]);
bt_cmd->kill_cts_msk =
cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]);
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
+ memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
ret = iwl_mvm_send_cmd(mvm, &cmd);
@@ -334,13 +471,17 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
if (!bt_cmd)
return -ENOMEM;
cmd.data[0] = bt_cmd;
+ bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
- bt_cmd->valid_bit_msk =
- cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS);
+ bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
+ BT_VALID_KILL_ACK |
+ BT_VALID_KILL_CTS);
- IWL_DEBUG_COEX(mvm, "bt_kill_msk = %d\n", bt_kill_msk);
+ IWL_DEBUG_COEX(mvm, "ACK Kill msk = 0x%08x, CTS Kill msk = 0x%08x\n",
+ iwl_bt_ack_kill_msk[bt_kill_msk],
+ iwl_bt_cts_kill_msk[bt_kill_msk]);
ret = iwl_mvm_send_cmd(mvm, &cmd);
@@ -380,8 +521,10 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
if (!bt_cmd)
return -ENOMEM;
cmd.data[0] = bt_cmd;
+ bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
- bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_REDUCED_TX_POWER),
+ bt_cmd->valid_bit_msk =
+ cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER);
bt_cmd->bt_reduced_tx_power = sta_id;
if (enable)
@@ -403,8 +546,25 @@ struct iwl_bt_iterator_data {
struct iwl_mvm *mvm;
u32 num_bss_ifaces;
bool reduced_tx_power;
+ struct ieee80211_chanctx_conf *primary;
+ struct ieee80211_chanctx_conf *secondary;
};
+static inline
+void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool enable, int rssi)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvmvif->bf_data.last_bt_coex_event = rssi;
+ mvmvif->bf_data.bt_coex_max_thold =
+ enable ? BT_ENABLE_REDUCED_TXPOWER_THRESHOLD : 0;
+ mvmvif->bf_data.bt_coex_min_thold =
+ enable ? BT_DISABLE_REDUCED_TXPOWER_THRESHOLD : 0;
+}
+
+/* must be called under rcu_read_lock */
static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -413,65 +573,94 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
struct iwl_mvm *mvm = data->mvm;
struct ieee80211_chanctx_conf *chanctx_conf;
enum ieee80211_smps_mode smps_mode;
- enum ieee80211_band band;
int ave_rssi;
lockdep_assert_held(&mvm->mutex);
- if (vif->type != NL80211_IFTYPE_STATION)
- return;
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
- if (chanctx_conf && chanctx_conf->def.chan)
- band = chanctx_conf->def.chan->band;
- else
- band = -1;
- rcu_read_unlock();
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP)
+ return;
smps_mode = IEEE80211_SMPS_AUTOMATIC;
- /* non associated BSSes aren't to be considered */
- if (!vif->bss_conf.assoc)
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+
+ /* If channel context is invalid or not on 2.4GHz .. */
+ if ((!chanctx_conf ||
+ chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
+ /* ... and it is an associated STATION, relax constraints */
+ if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc)
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+ smps_mode);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
return;
+ }
+
+ /* SoftAP / GO will always be primary */
+ if (vif->type == NL80211_IFTYPE_AP) {
+ if (!mvmvif->ap_ibss_active)
+ return;
+
+ /* the Ack / Cts kill mask must be default if AP / GO */
+ data->reduced_tx_power = false;
- if (band != IEEE80211_BAND_2GHZ) {
- iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
- smps_mode);
+ if (chanctx_conf == data->primary)
+ return;
+
+ /* downgrade the current primary no matter what its type is */
+ data->secondary = data->primary;
+ data->primary = chanctx_conf;
return;
}
- if (data->notif->bt_status)
- smps_mode = IEEE80211_SMPS_DYNAMIC;
+ data->num_bss_ifaces++;
+
+ /* we are now a STA / P2P Client, and take associated ones only */
+ if (!vif->bss_conf.assoc)
+ return;
+
+ /* STA / P2P Client, try to be primary if first vif */
+ if (!data->primary || data->primary == chanctx_conf)
+ data->primary = chanctx_conf;
+ else if (!data->secondary)
+ /* if secondary is not NULL, it might be a GO */
+ data->secondary = chanctx_conf;
- if (data->notif->bt_traffic_load >= IWL_BT_LOAD_FORCE_SISO_THRESHOLD)
+ if (le32_to_cpu(data->notif->bt_activity_grading) >= BT_HIGH_TRAFFIC)
smps_mode = IEEE80211_SMPS_STATIC;
+ else if (le32_to_cpu(data->notif->bt_activity_grading) >=
+ BT_LOW_TRAFFIC)
+ smps_mode = IEEE80211_SMPS_DYNAMIC;
IWL_DEBUG_COEX(data->mvm,
- "mac %d: bt_status %d traffic_load %d smps_req %d\n",
+ "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
mvmvif->id, data->notif->bt_status,
- data->notif->bt_traffic_load, smps_mode);
+ data->notif->bt_activity_grading, smps_mode);
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
/* don't reduce the Tx power if in loose scheme */
- if (is_loose_coex())
+ if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
+ mvm->cfg->bt_shared_single_ant) {
+ data->reduced_tx_power = false;
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
return;
+ }
- data->num_bss_ifaces++;
-
- /* reduced Txpower only if there are open BT connections, so ...*/
- if (!BT_MBOX_MSG(data->notif, 3, OPEN_CON_2)) {
+ /* reduced Txpower only if BT is on, so ...*/
+ if (!data->notif->bt_status) {
/* ... cancel reduced Tx power ... */
if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
data->reduced_tx_power = false;
/* ... and there is no need to get reports on RSSI any more. */
- ieee80211_disable_rssi_reports(vif);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
return;
}
- ave_rssi = ieee80211_ave_rssi(vif);
+ /* try to get the avg rssi from fw */
+ ave_rssi = mvmvif->bf_data.ave_beacon_signal;
/* if the RSSI isn't valid, fake it is very low */
if (!ave_rssi)
@@ -499,8 +688,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
}
/* Begin to monitor the RSSI: it may influence the reduced Tx power */
- ieee80211_enable_rssi_reports(vif, BT_DISABLE_REDUCED_TXPOWER_THRESHOLD,
- BT_ENABLE_REDUCED_TXPOWER_THRESHOLD);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
}
static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
@@ -510,11 +698,72 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
.notif = &mvm->last_bt_notif,
.reduced_tx_power = true,
};
+ struct iwl_bt_coex_ci_cmd cmd = {};
+ u8 ci_bw_idx;
+ rcu_read_lock();
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_bt_notif_iterator, &data);
+ if (data.primary) {
+ struct ieee80211_chanctx_conf *chan = data.primary;
+ if (WARN_ON(!chan->def.chan)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+ ci_bw_idx = 0;
+ cmd.co_run_bw_primary = 0;
+ } else {
+ cmd.co_run_bw_primary = 1;
+ if (chan->def.center_freq1 >
+ chan->def.chan->center_freq)
+ ci_bw_idx = 2;
+ else
+ ci_bw_idx = 1;
+ }
+
+ cmd.bt_primary_ci =
+ iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+ cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv);
+ }
+
+ if (data.secondary) {
+ struct ieee80211_chanctx_conf *chan = data.secondary;
+ if (WARN_ON(!data.secondary->def.chan)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+ ci_bw_idx = 0;
+ cmd.co_run_bw_secondary = 0;
+ } else {
+ cmd.co_run_bw_secondary = 1;
+ if (chan->def.center_freq1 >
+ chan->def.chan->center_freq)
+ ci_bw_idx = 2;
+ else
+ ci_bw_idx = 1;
+ }
+
+ cmd.bt_secondary_ci =
+ iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+ cmd.secondary_ch_phy_id = *((u16 *)data.primary->drv_priv);
+ }
+
+ rcu_read_unlock();
+
+ /* Don't spam the fw with the same command over and over */
+ if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) {
+ if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, CMD_SYNC,
+ sizeof(cmd), &cmd))
+ IWL_ERR(mvm, "Failed to send BT_CI cmd");
+ memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
+ }
+
/*
* If there are no BSS / P2P client interfaces, reduced Tx Power is
* irrelevant since it is based on the RSSI coming from the beacon.
@@ -536,12 +785,18 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
- IWL_DEBUG_COEX(mvm, "\tBT %salive\n", notif->bt_status ? "" : "not ");
+ IWL_DEBUG_COEX(mvm, "\tBT status: %s\n",
+ notif->bt_status ? "ON" : "OFF");
IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn);
- IWL_DEBUG_COEX(mvm, "\tBT traffic load %d\n", notif->bt_traffic_load);
+ IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
+ IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
+ le32_to_cpu(notif->primary_ch_lut));
+ IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
+ le32_to_cpu(notif->secondary_ch_lut));
+ IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
+ le32_to_cpu(notif->bt_activity_grading));
IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n",
notif->bt_agg_traffic_load);
- IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
/* remember this notification for future use: rssi fluctuations */
memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
@@ -565,6 +820,18 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ /* If channel context is invalid or not on 2.4GHz - don't count it */
+ if (!chanctx_conf ||
+ chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+
if (vif->type != NL80211_IFTYPE_STATION ||
mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
return;
@@ -594,15 +861,15 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
};
int ret;
- mutex_lock(&mvm->mutex);
+ lockdep_assert_held(&mvm->mutex);
/* Rssi update while not associated ?! */
if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
- goto out_unlock;
+ return;
- /* No open connection - reports should be disabled */
- if (!BT_MBOX_MSG(&mvm->last_bt_notif, 3, OPEN_CON_2))
- goto out_unlock;
+ /* No BT - reports should be disabled */
+ if (!mvm->last_bt_notif.bt_status)
+ return;
IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
@@ -611,7 +878,8 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* Check if rssi is good enough for reduced Tx power, but not in loose
* scheme.
*/
- if (rssi_event == RSSI_EVENT_LOW || is_loose_coex())
+ if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
+ iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
false);
else
@@ -633,12 +901,52 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm, data.reduced_tx_power))
IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
+}
+
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
+#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200)
- out_unlock:
- mutex_unlock(&mvm->mutex);
+u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ enum iwl_bt_coex_lut_type lut_type;
+
+ if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+ BT_LOW_TRAFFIC)
+ return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+ lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
+
+ if (lut_type == BT_COEX_LOOSE_LUT)
+ return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+ /* tight coex, high bt traffic, reduce AGG time limit */
+ return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
+}
+
+bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+
+ if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+ BT_HIGH_TRAFFIC)
+ return true;
+
+ /*
+ * In Tight, BT can't Rx while we Tx, so use both antennas since BT is
+ * already killed.
+ * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while we
+ * Tx.
+ */
+ return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
}
-void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
{
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
+ return;
+
iwl_mvm_bt_coex_notif_handle(mvm);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index 2bf29f7992ee..4b6d670c3509 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -70,7 +70,9 @@
#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS 20
-#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 20
+#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 8
+#define IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS 30
+#define IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS 20
#define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT 50
#define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT 50
#define IWL_MVM_PS_SNOOZE_INTERVAL 25
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 417639f77b01..6f45966817bb 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -67,6 +67,7 @@
#include <net/cfg80211.h>
#include <net/ipv6.h>
#include <net/tcp.h>
+#include <net/addrconf.h>
#include "iwl-modparams.h"
#include "fw-api.h"
#include "mvm.h"
@@ -381,14 +382,74 @@ static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
union {
struct iwl_proto_offload_cmd_v1 v1;
struct iwl_proto_offload_cmd_v2 v2;
+ struct iwl_proto_offload_cmd_v3_small v3s;
+ struct iwl_proto_offload_cmd_v3_large v3l;
} cmd = {};
+ struct iwl_host_cmd hcmd = {
+ .id = PROT_OFFLOAD_CONFIG_CMD,
+ .flags = CMD_SYNC,
+ .data[0] = &cmd,
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ };
struct iwl_proto_offload_cmd_common *common;
u32 enabled = 0, size;
+ u32 capa_flags = mvm->fw->ucode_capa.flags;
#if IS_ENABLED(CONFIG_IPV6)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int i;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
+ capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+ struct iwl_ns_config *nsc;
+ struct iwl_targ_addr *addrs;
+ int n_nsc, n_addrs;
+ int c;
+
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+ nsc = cmd.v3s.ns_config;
+ n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
+ addrs = cmd.v3s.targ_addrs;
+ n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
+ } else {
+ nsc = cmd.v3l.ns_config;
+ n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
+ addrs = cmd.v3l.targ_addrs;
+ n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
+ }
+
+ if (mvmvif->num_target_ipv6_addrs)
+ enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+
+ /*
+ * For each address we have (and that will fit) fill a target
+ * address struct and combine for NS offload structs with the
+ * solicited node addresses.
+ */
+ for (i = 0, c = 0;
+ i < mvmvif->num_target_ipv6_addrs &&
+ i < n_addrs && c < n_nsc; i++) {
+ struct in6_addr solicited_addr;
+ int j;
+
+ addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
+ &solicited_addr);
+ for (j = 0; j < c; j++)
+ if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
+ &solicited_addr) == 0)
+ break;
+ if (j == c)
+ c++;
+ addrs[i].addr = mvmvif->target_ipv6_addrs[i];
+ addrs[i].config_num = cpu_to_le32(j);
+ nsc[j].dest_ipv6_addr = solicited_addr;
+ memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
+ cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
+ else
+ cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
+ } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
if (mvmvif->num_target_ipv6_addrs) {
enabled |= IWL_D3_PROTO_OFFLOAD_NS;
memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
@@ -419,7 +480,13 @@ static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
}
#endif
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+ common = &cmd.v3s.common;
+ size = sizeof(cmd.v3s);
+ } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+ common = &cmd.v3l.common;
+ size = sizeof(cmd.v3l);
+ } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
common = &cmd.v2.common;
size = sizeof(cmd.v2);
} else {
@@ -438,8 +505,8 @@ static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
common->enabled = cpu_to_le32(enabled);
- return iwl_mvm_send_cmd_pdu(mvm, PROT_OFFLOAD_CONFIG_CMD, CMD_SYNC,
- size, &cmd);
+ hcmd.len[0] = size;
+ return iwl_mvm_send_cmd(mvm, &hcmd);
}
enum iwl_mvm_tcp_packet_type {
@@ -793,6 +860,74 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return 0;
}
+static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_nonqos_seq_query_cmd query_cmd = {
+ .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET),
+ .mac_id_n_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ };
+ struct iwl_host_cmd cmd = {
+ .id = NON_QOS_TX_COUNTER_CMD,
+ .flags = CMD_SYNC | CMD_WANT_SKB,
+ };
+ int err;
+ u32 size;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
+ cmd.data[0] = &query_cmd;
+ cmd.len[0] = sizeof(query_cmd);
+ }
+
+ err = iwl_mvm_send_cmd(mvm, &cmd);
+ if (err)
+ return err;
+
+ size = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ size -= sizeof(cmd.resp_pkt->hdr);
+ if (size < sizeof(__le16)) {
+ err = -EINVAL;
+ } else {
+ err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
+ /* new API returns next, not last-used seqno */
+ if (mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
+ err -= 0x10;
+ }
+
+ iwl_free_resp(&cmd);
+ return err;
+}
+
+void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_nonqos_seq_query_cmd query_cmd = {
+ .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET),
+ .mac_id_n_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .value = cpu_to_le16(mvmvif->seqno),
+ };
+
+ /* return if called during restart, not resume from D3 */
+ if (!mvmvif->seqno_valid)
+ return;
+
+ mvmvif->seqno_valid = false;
+
+ if (!(mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API))
+ return;
+
+ if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, CMD_SYNC,
+ sizeof(query_cmd), &query_cmd))
+ IWL_ERR(mvm, "failed to set non-QoS seqno\n");
+}
+
static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan,
bool test)
@@ -829,7 +964,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
};
int ret, i;
int len __maybe_unused;
- u16 seq;
u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT;
if (!wowlan) {
@@ -872,26 +1006,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
- /*
- * The D3 firmware still hardcodes the AP station ID for the
- * BSS we're associated with as 0. Store the real STA ID here
- * and assign 0. When we leave this function, we'll restore
- * the original value for the resume code.
- */
- old_ap_sta_id = mvm_ap_sta->sta_id;
- mvm_ap_sta->sta_id = 0;
- mvmvif->ap_sta_id = 0;
-
/* TODO: wowlan_config_cmd.wowlan_ba_teardown_tids */
wowlan_config_cmd.is_11n_connection = ap_sta->ht_cap.ht_supported;
- /*
- * We know the last used seqno, and the uCode expects to know that
- * one, it will increment before TX.
- */
- seq = mvm_ap_sta->last_seq_ctl & IEEE80211_SCTL_SEQ;
- wowlan_config_cmd.non_qos_seq = cpu_to_le16(seq);
+ /* Query the last used seqno and set it */
+ ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
+ if (ret < 0)
+ goto out_noreset;
+ wowlan_config_cmd.non_qos_seq = cpu_to_le16(ret);
/*
* For QoS counters, we store the one to use next, so subtract 0x10
@@ -899,7 +1022,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
* increment after using the value (i.e. store the next value to use).
*/
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
- seq = mvm_ap_sta->tid_data[i].seq_number;
+ u16 seq = mvm_ap_sta->tid_data[i].seq_number;
seq -= 0x10;
wowlan_config_cmd.qos_seq[i] = cpu_to_le16(seq);
}
@@ -945,6 +1068,16 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
iwl_trans_stop_device(mvm->trans);
/*
+ * The D3 firmware still hardcodes the AP station ID for the
+ * BSS we're associated with as 0. Store the real STA ID here
+ * and assign 0. When we leave this function, we'll restore
+ * the original value for the resume code.
+ */
+ old_ap_sta_id = mvm_ap_sta->sta_id;
+ mvm_ap_sta->sta_id = 0;
+ mvmvif->ap_sta_id = 0;
+
+ /*
* Set the HW restart bit -- this is mostly true as we're
* going to load new firmware and reprogram that, though
* the reprogramming is going to be manual to avoid adding
@@ -1059,6 +1192,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret)
goto out;
+ ret = iwl_mvm_power_update_device_mode(mvm);
+ if (ret)
+ goto out;
+
ret = iwl_mvm_power_update_mode(mvm, vif);
if (ret)
goto out;
@@ -1109,16 +1246,26 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
return __iwl_mvm_suspend(hw, wowlan, false);
}
+/* converted data from the different status responses */
+struct iwl_wowlan_status_data {
+ u16 pattern_number;
+ u16 qos_seq_ctr[8];
+ u32 wakeup_reasons;
+ u32 wake_packet_length;
+ u32 wake_packet_bufsize;
+ const u8 *wake_packet;
+};
+
static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_wowlan_status *status)
+ struct iwl_wowlan_status_data *status)
{
struct sk_buff *pkt = NULL;
struct cfg80211_wowlan_wakeup wakeup = {
.pattern_idx = -1,
};
struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
- u32 reasons = le32_to_cpu(status->wakeup_reasons);
+ u32 reasons = status->wakeup_reasons;
if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
wakeup_report = NULL;
@@ -1130,7 +1277,7 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
wakeup.pattern_idx =
- le16_to_cpu(status->pattern_number);
+ status->pattern_number;
if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
@@ -1158,8 +1305,8 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
wakeup.tcp_match = true;
if (status->wake_packet_bufsize) {
- int pktsize = le32_to_cpu(status->wake_packet_bufsize);
- int pktlen = le32_to_cpu(status->wake_packet_length);
+ int pktsize = status->wake_packet_bufsize;
+ int pktlen = status->wake_packet_length;
const u8 *pktdata = status->wake_packet;
struct ieee80211_hdr *hdr = (void *)pktdata;
int truncated = pktlen - pktsize;
@@ -1239,8 +1386,229 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
kfree_skb(pkt);
}
+static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc,
+ struct ieee80211_key_seq *seq)
+{
+ u64 pn;
+
+ pn = le64_to_cpu(sc->pn);
+ seq->ccmp.pn[0] = pn >> 40;
+ seq->ccmp.pn[1] = pn >> 32;
+ seq->ccmp.pn[2] = pn >> 24;
+ seq->ccmp.pn[3] = pn >> 16;
+ seq->ccmp.pn[4] = pn >> 8;
+ seq->ccmp.pn[5] = pn;
+}
+
+static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc,
+ struct ieee80211_key_seq *seq)
+{
+ seq->tkip.iv32 = le32_to_cpu(sc->iv32);
+ seq->tkip.iv16 = le16_to_cpu(sc->iv16);
+}
+
+static void iwl_mvm_set_aes_rx_seq(struct aes_sc *scs,
+ struct ieee80211_key_conf *key)
+{
+ int tid;
+
+ BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
+
+ for (tid = 0; tid < IWL_NUM_RSC; tid++) {
+ struct ieee80211_key_seq seq = {};
+
+ iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
+ ieee80211_set_key_rx_seq(key, tid, &seq);
+ }
+}
+
+static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
+ struct ieee80211_key_conf *key)
+{
+ int tid;
+
+ BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
+
+ for (tid = 0; tid < IWL_NUM_RSC; tid++) {
+ struct ieee80211_key_seq seq = {};
+
+ iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq);
+ ieee80211_set_key_rx_seq(key, tid, &seq);
+ }
+}
+
+static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
+ struct iwl_wowlan_status_v6 *status)
+{
+ union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ iwl_mvm_set_aes_rx_seq(rsc->aes.multicast_rsc, key);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+struct iwl_mvm_d3_gtk_iter_data {
+ struct iwl_wowlan_status_v6 *status;
+ void *last_gtk;
+ u32 cipher;
+ bool find_phase, unhandled_cipher;
+ int num_keys;
+};
+
+static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct iwl_mvm_d3_gtk_iter_data *data = _data;
+
+ if (data->unhandled_cipher)
+ return;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ /* ignore WEP completely, nothing to do */
+ return;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_TKIP:
+ /* we support these */
+ break;
+ default:
+ /* everything else (even CMAC for MFP) - disconnect from AP */
+ data->unhandled_cipher = true;
+ return;
+ }
+
+ data->num_keys++;
+
+ /*
+ * pairwise key - update sequence counters only;
+ * note that this assumes no TDLS sessions are active
+ */
+ if (sta) {
+ struct ieee80211_key_seq seq = {};
+ union iwl_all_tsc_rsc *sc = &data->status->gtk.rsc.all_tsc_rsc;
+
+ if (data->find_phase)
+ return;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ iwl_mvm_aes_sc_to_seq(&sc->aes.tsc, &seq);
+ iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
+ iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
+ break;
+ }
+ ieee80211_set_key_tx_seq(key, &seq);
+
+ /* that's it for this key */
+ return;
+ }
+
+ if (data->find_phase) {
+ data->last_gtk = key;
+ data->cipher = key->cipher;
+ return;
+ }
+
+ if (data->status->num_of_gtk_rekeys)
+ ieee80211_remove_key(key);
+ else if (data->last_gtk == key)
+ iwl_mvm_set_key_rx_seq(key, data->status);
+}
+
+static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_wowlan_status_v6 *status)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_d3_gtk_iter_data gtkdata = {
+ .status = status,
+ };
+
+ if (!status || !vif->bss_conf.bssid)
+ return false;
+
+ /* find last GTK that we used initially, if any */
+ gtkdata.find_phase = true;
+ ieee80211_iter_keys(mvm->hw, vif,
+ iwl_mvm_d3_update_gtks, &gtkdata);
+ /* not trying to keep connections with MFP/unhandled ciphers */
+ if (gtkdata.unhandled_cipher)
+ return false;
+ if (!gtkdata.num_keys)
+ return true;
+ if (!gtkdata.last_gtk)
+ return false;
+
+ /*
+ * invalidate all other GTKs that might still exist and update
+ * the one that we used
+ */
+ gtkdata.find_phase = false;
+ ieee80211_iter_keys(mvm->hw, vif,
+ iwl_mvm_d3_update_gtks, &gtkdata);
+
+ if (status->num_of_gtk_rekeys) {
+ struct ieee80211_key_conf *key;
+ struct {
+ struct ieee80211_key_conf conf;
+ u8 key[32];
+ } conf = {
+ .conf.cipher = gtkdata.cipher,
+ .conf.keyidx = status->gtk.key_index,
+ };
+
+ switch (gtkdata.cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ conf.conf.keylen = WLAN_KEY_LEN_CCMP;
+ memcpy(conf.conf.key, status->gtk.decrypt_key,
+ WLAN_KEY_LEN_CCMP);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ conf.conf.keylen = WLAN_KEY_LEN_TKIP;
+ memcpy(conf.conf.key, status->gtk.decrypt_key, 16);
+ /* leave TX MIC key zeroed, we don't use it anyway */
+ memcpy(conf.conf.key +
+ NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+ status->gtk.tkip_mic_key, 8);
+ break;
+ }
+
+ key = ieee80211_gtk_rekey_add(vif, &conf.conf);
+ if (IS_ERR(key))
+ return false;
+ iwl_mvm_set_key_rx_seq(key, status);
+ }
+
+ if (status->num_of_gtk_rekeys) {
+ __be64 replay_ctr =
+ cpu_to_be64(le64_to_cpu(status->replay_ctr));
+ ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
+ (void *)&replay_ctr, GFP_KERNEL);
+ }
+
+ mvmvif->seqno_valid = true;
+ /* +0x10 because the set API expects next-to-use, not last-used */
+ mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
+
+ return true;
+}
+
/* releases the MVM mutex */
-static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
+static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
u32 base = mvm->error_event_table;
@@ -1253,8 +1621,12 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
.id = WOWLAN_GET_STATUSES,
.flags = CMD_SYNC | CMD_WANT_SKB,
};
- struct iwl_wowlan_status *status;
- int ret, len;
+ struct iwl_wowlan_status_data status;
+ struct iwl_wowlan_status_v6 *status_v6;
+ int ret, len, status_size, i;
+ bool keep;
+ struct ieee80211_sta *ap_sta;
+ struct iwl_mvm_sta *mvm_ap_sta;
iwl_trans_read_mem_bytes(mvm->trans, base,
&err_info, sizeof(err_info));
@@ -1287,32 +1659,83 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
if (!cmd.resp_pkt)
goto out_unlock;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
+ status_size = sizeof(struct iwl_wowlan_status_v6);
+ else
+ status_size = sizeof(struct iwl_wowlan_status_v4);
+
len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) {
+ if (len - sizeof(struct iwl_cmd_header) < status_size) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out_free_resp;
}
- status = (void *)cmd.resp_pkt->data;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
+ status_v6 = (void *)cmd.resp_pkt->data;
+
+ status.pattern_number = le16_to_cpu(status_v6->pattern_number);
+ for (i = 0; i < 8; i++)
+ status.qos_seq_ctr[i] =
+ le16_to_cpu(status_v6->qos_seq_ctr[i]);
+ status.wakeup_reasons = le32_to_cpu(status_v6->wakeup_reasons);
+ status.wake_packet_length =
+ le32_to_cpu(status_v6->wake_packet_length);
+ status.wake_packet_bufsize =
+ le32_to_cpu(status_v6->wake_packet_bufsize);
+ status.wake_packet = status_v6->wake_packet;
+ } else {
+ struct iwl_wowlan_status_v4 *status_v4;
+ status_v6 = NULL;
+ status_v4 = (void *)cmd.resp_pkt->data;
+
+ status.pattern_number = le16_to_cpu(status_v4->pattern_number);
+ for (i = 0; i < 8; i++)
+ status.qos_seq_ctr[i] =
+ le16_to_cpu(status_v4->qos_seq_ctr[i]);
+ status.wakeup_reasons = le32_to_cpu(status_v4->wakeup_reasons);
+ status.wake_packet_length =
+ le32_to_cpu(status_v4->wake_packet_length);
+ status.wake_packet_bufsize =
+ le32_to_cpu(status_v4->wake_packet_bufsize);
+ status.wake_packet = status_v4->wake_packet;
+ }
if (len - sizeof(struct iwl_cmd_header) !=
- sizeof(*status) +
- ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
+ status_size + ALIGN(status.wake_packet_bufsize, 4)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out_free_resp;
}
+ /* still at hard-coded place 0 for D3 image */
+ ap_sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[0],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(ap_sta))
+ goto out_free_resp;
+
+ mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ u16 seq = status.qos_seq_ctr[i];
+ /* firmware stores last-used value, we store next value */
+ seq += 0x10;
+ mvm_ap_sta->tid_data[i].seq_number = seq;
+ }
+
/* now we have all the data we need, unlock to avoid mac80211 issues */
mutex_unlock(&mvm->mutex);
- iwl_mvm_report_wakeup_reasons(mvm, vif, status);
+ iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
+
+ keep = iwl_mvm_setup_connection_keep(mvm, vif, status_v6);
+
iwl_free_resp(&cmd);
- return;
+ return keep;
out_free_resp:
iwl_free_resp(&cmd);
out_unlock:
mutex_unlock(&mvm->mutex);
+ return false;
}
static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
@@ -1335,6 +1758,17 @@ static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
#endif
}
+static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ /* skip the one we keep connection on */
+ if (data == vif)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_resume_disconnect(vif);
+}
+
static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
{
struct iwl_d3_iter_data resume_iter_data = {
@@ -1343,6 +1777,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
struct ieee80211_vif *vif = NULL;
int ret;
enum iwl_d3_status d3_status;
+ bool keep = false;
mutex_lock(&mvm->mutex);
@@ -1368,7 +1803,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
/* query SRAM first in case we want event logging */
iwl_mvm_read_d3_sram(mvm);
- iwl_mvm_query_wakeup_reasons(mvm, vif);
+ keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
/* has unlocked the mutex, so skip that */
goto out;
@@ -1376,8 +1811,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
mutex_unlock(&mvm->mutex);
out:
- if (!test && vif)
- ieee80211_resume_disconnect(vif);
+ if (!test)
+ ieee80211_iterate_active_interfaces_rtnl(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
/* return 1 to reconfigure the device */
set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index aac81b8984b0..0675f0c8ef93 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -246,58 +246,56 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static ssize_t iwl_dbgfs_power_down_allow_write(struct file *file,
- const char __user *user_buf,
+static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file,
+ char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
- char buf[8] = {};
- int allow;
-
- if (!mvm->ucode_loaded)
- return -EIO;
-
- if (copy_from_user(buf, user_buf, sizeof(buf)))
- return -EFAULT;
-
- if (sscanf(buf, "%d", &allow) != 1)
- return -EINVAL;
-
- IWL_DEBUG_POWER(mvm, "%s device power down\n",
- allow ? "allow" : "prevent");
+ char buf[64];
+ int bufsz = sizeof(buf);
+ int pos = 0;
- /*
- * TODO: Send REPLY_DEBUG_CMD (0xf0) when FW support it
- */
+ pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d0=%d\n",
+ mvm->disable_power_off);
+ pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d3=%d\n",
+ mvm->disable_power_off_d3);
- return count;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static ssize_t iwl_dbgfs_power_down_d3_allow_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
+static ssize_t iwl_dbgfs_disable_power_off_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
- char buf[8] = {};
- int allow;
+ char buf[64] = {};
+ int ret;
+ int val;
- if (copy_from_user(buf, user_buf, sizeof(buf)))
+ if (!mvm->ucode_loaded)
+ return -EIO;
+
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, count))
return -EFAULT;
- if (sscanf(buf, "%d", &allow) != 1)
+ if (!strncmp("disable_power_off_d0=", buf, 21)) {
+ if (sscanf(buf + 21, "%d", &val) != 1)
+ return -EINVAL;
+ mvm->disable_power_off = val;
+ } else if (!strncmp("disable_power_off_d3=", buf, 21)) {
+ if (sscanf(buf + 21, "%d", &val) != 1)
+ return -EINVAL;
+ mvm->disable_power_off_d3 = val;
+ } else {
return -EINVAL;
+ }
- IWL_DEBUG_POWER(mvm, "%s device power down in d3\n",
- allow ? "allow" : "prevent");
-
- /*
- * TODO: When WoWLAN FW alive notification happens, driver will send
- * REPLY_DEBUG_CMD setting power_down_allow flag according to
- * mvm->prevent_power_down_d3
- */
- mvm->prevent_power_down_d3 = !allow;
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_power_update_device_mode(mvm);
+ mutex_unlock(&mvm->mutex);
- return count;
+ return ret ?: count;
}
static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
@@ -371,7 +369,8 @@ static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
int val;
int ret;
- if (copy_from_user(buf, user_buf, sizeof(buf)))
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, count))
return -EFAULT;
if (!strncmp("keep_alive=", buf, 11)) {
@@ -394,7 +393,9 @@ static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
if (sscanf(buf + 16, "%d", &val) != 1)
return -EINVAL;
param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
- } else if (!strncmp("disable_power_off=", buf, 18)) {
+ } else if (!strncmp("disable_power_off=", buf, 18) &&
+ !(mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
if (sscanf(buf + 18, "%d", &val) != 1)
return -EINVAL;
param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
@@ -581,15 +582,21 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
pos += scnprintf(buf+pos, bufsz-pos, "bt_status = %d\n",
- notif->bt_status);
+ notif->bt_status);
pos += scnprintf(buf+pos, bufsz-pos, "bt_open_conn = %d\n",
- notif->bt_open_conn);
+ notif->bt_open_conn);
pos += scnprintf(buf+pos, bufsz-pos, "bt_traffic_load = %d\n",
- notif->bt_traffic_load);
+ notif->bt_traffic_load);
pos += scnprintf(buf+pos, bufsz-pos, "bt_agg_traffic_load = %d\n",
- notif->bt_agg_traffic_load);
+ notif->bt_agg_traffic_load);
pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
- notif->bt_ci_compliance);
+ notif->bt_ci_compliance);
+ pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
+ le32_to_cpu(notif->primary_ch_lut));
+ pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
+ le32_to_cpu(notif->secondary_ch_lut));
+ pos += scnprintf(buf+pos, bufsz-pos, "bt_activity_grading = %d\n",
+ le32_to_cpu(notif->bt_activity_grading));
mutex_unlock(&mvm->mutex);
@@ -600,6 +607,38 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
}
#undef BT_MBOX_PRINT
+static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
+ char buf[256];
+ int bufsz = sizeof(buf);
+ int pos = 0;
+
+ mutex_lock(&mvm->mutex);
+
+ pos += scnprintf(buf+pos, bufsz-pos, "Channel inhibition CMD\n");
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "\tPrimary Channel Bitmap 0x%016llx Fat: %d\n",
+ le64_to_cpu(cmd->bt_primary_ci),
+ !!cmd->co_run_bw_primary);
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "\tSecondary Channel Bitmap 0x%016llx Fat: %d\n",
+ le64_to_cpu(cmd->bt_secondary_ci),
+ !!cmd->co_run_bw_secondary);
+
+ pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n");
+ pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill Mask 0x%08x\n",
+ iwl_bt_ack_kill_msk[mvm->bt_kill_msk]);
+ pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill Mask 0x%08x\n",
+ iwl_bt_cts_kill_msk[mvm->bt_kill_msk]);
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
#define PRINT_STATS_LE32(_str, _val) \
pos += scnprintf(buf + pos, bufsz - pos, \
fmt_table, _str, \
@@ -615,9 +654,11 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
int pos = 0;
char *buf;
int ret;
- int bufsz = sizeof(struct mvm_statistics_rx_phy) * 20 +
- sizeof(struct mvm_statistics_rx_non_phy) * 10 +
- sizeof(struct mvm_statistics_rx_ht_phy) * 10 + 200;
+ /* 43 is the size of each data line, 33 is the size of each header */
+ size_t bufsz =
+ ((sizeof(struct mvm_statistics_rx) / sizeof(__le32)) * 43) +
+ (4 * 33) + 1;
+
struct mvm_statistics_rx_phy *ofdm;
struct mvm_statistics_rx_phy *cck;
struct mvm_statistics_rx_non_phy *general;
@@ -712,6 +753,7 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
PRINT_STATS_LE32("beacon_energy_b", general->beacon_energy_b);
PRINT_STATS_LE32("beacon_energy_c", general->beacon_energy_c);
PRINT_STATS_LE32("num_bt_kills", general->num_bt_kills);
+ PRINT_STATS_LE32("mac_id", general->mac_id);
PRINT_STATS_LE32("directed_data_mpdu", general->directed_data_mpdu);
pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
@@ -757,6 +799,59 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
return count;
}
+static ssize_t
+iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ int pos = 0;
+ char buf[32];
+ const size_t bufsz = sizeof(buf);
+
+ /* print which antennas were set for the scan command by the user */
+ pos += scnprintf(buf + pos, bufsz - pos, "Antennas for scan: ");
+ if (mvm->scan_rx_ant & ANT_A)
+ pos += scnprintf(buf + pos, bufsz - pos, "A");
+ if (mvm->scan_rx_ant & ANT_B)
+ pos += scnprintf(buf + pos, bufsz - pos, "B");
+ if (mvm->scan_rx_ant & ANT_C)
+ pos += scnprintf(buf + pos, bufsz - pos, "C");
+ pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+iwl_dbgfs_scan_ant_rxchain_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char buf[8];
+ int buf_size;
+ u8 scan_rx_ant;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+
+ /* get the argument from the user and check if it is valid */
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%hhx", &scan_rx_ant) != 1)
+ return -EINVAL;
+ if (scan_rx_ant > ANT_ABC)
+ return -EINVAL;
+ if (scan_rx_ant & ~iwl_fw_valid_rx_ant(mvm->fw))
+ return -EINVAL;
+
+ /* change the rx antennas for scan command */
+ mvm->scan_rx_ant = scan_rx_ant;
+
+ return count;
+}
+
+
static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
enum iwl_dbgfs_bf_mask param, int value)
{
@@ -968,7 +1063,8 @@ static ssize_t iwl_dbgfs_d3_sram_write(struct file *file,
char buf[8] = {};
int store;
- if (copy_from_user(buf, user_buf, sizeof(buf)))
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, count))
return -EFAULT;
if (sscanf(buf, "%d", &store) != 1)
@@ -1063,10 +1159,12 @@ MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram);
MVM_DEBUGFS_READ_FILE_OPS(stations);
MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
-MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow);
-MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow);
+MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off);
MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain);
+
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram);
#endif
@@ -1087,10 +1185,14 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
- MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR);
- MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)
+ MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
+ S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
+ S_IWUSR | S_IRUSR);
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
index 05c61d6f384e..4ea5e24ca92d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
@@ -82,6 +82,8 @@
* @BT_USE_DEFAULTS:
* @BT_SYNC_2_BT_DISABLE:
* @BT_COEX_CORUNNING_TBL_EN:
+ *
+ * The COEX_MODE must be set for each command. Even if it is not changed.
*/
enum iwl_bt_coex_flags {
BT_CH_PRIMARY_EN = BIT(0),
@@ -95,14 +97,16 @@ enum iwl_bt_coex_flags {
BT_COEX_NW = 0x3 << BT_COEX_MODE_POS,
BT_USE_DEFAULTS = BIT(6),
BT_SYNC_2_BT_DISABLE = BIT(7),
- /*
- * For future use - when the flags will be enlarged
- * BT_COEX_CORUNNING_TBL_EN = BIT(8),
- */
+ BT_COEX_CORUNNING_TBL_EN = BIT(8),
+ BT_COEX_MPLUT_TBL_EN = BIT(9),
+ /* Bit 10 is reserved */
+ BT_COEX_WF_PRIO_BOOST_CHECK_EN = BIT(11),
};
/*
* indicates what has changed in the BT_COEX command.
+ * BT_VALID_ENABLE must be set for each command. Commands without this bit will
+ * discarded by the firmware
*/
enum iwl_bt_coex_valid_bit_msk {
BT_VALID_ENABLE = BIT(0),
@@ -121,11 +125,8 @@ enum iwl_bt_coex_valid_bit_msk {
BT_VALID_CORUN_LUT_40 = BIT(13),
BT_VALID_ANT_ISOLATION = BIT(14),
BT_VALID_ANT_ISOLATION_THRS = BIT(15),
- /*
- * For future use - when the valid flags will be enlarged
- * BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16),
- * BT_VALID_TXRX_MAX_FREQ_0 = BIT(17),
- */
+ BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16),
+ BT_VALID_TXRX_MAX_FREQ_0 = BIT(17),
};
/**
@@ -142,48 +143,88 @@ enum iwl_bt_reduced_tx_power {
BT_REDUCED_TX_POWER_DATA = BIT(1),
};
+enum iwl_bt_coex_lut_type {
+ BT_COEX_TIGHT_LUT = 0,
+ BT_COEX_LOOSE_LUT,
+ BT_COEX_TX_DIS_LUT,
+
+ BT_COEX_MAX_LUT,
+};
+
#define BT_COEX_LUT_SIZE (12)
+#define BT_COEX_CORUN_LUT_SIZE (32)
+#define BT_COEX_MULTI_PRIO_LUT_SIZE (2)
+#define BT_COEX_BOOST_SIZE (4)
+#define BT_REDUCED_TX_POWER_BIT BIT(7)
/**
* struct iwl_bt_coex_cmd - bt coex configuration command
* @flags:&enum iwl_bt_coex_flags
- * @lead_time:
* @max_kill:
- * @bt3_time_t7_value:
- * @kill_ack_msk:
- * @kill_cts_msk:
- * @bt3_prio_sample_time:
- * @bt3_timer_t2_value:
- * @bt4_reaction_time:
- * @decision_lut[12]:
* @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
- * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
- * @bt_prio_boost: values for PTA boost register
+ * @bt4_antenna_isolation:
+ * @bt4_antenna_isolation_thr:
+ * @bt4_tx_tx_delta_freq_thr:
+ * @bt4_tx_rx_max_freq0:
+ * @bt_prio_boost:
* @wifi_tx_prio_boost: SW boost of wifi tx priority
* @wifi_rx_prio_boost: SW boost of wifi rx priority
+ * @kill_ack_msk:
+ * @kill_cts_msk:
+ * @decision_lut:
+ * @bt4_multiprio_lut:
+ * @bt4_corun_lut20:
+ * @bt4_corun_lut40:
+ * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
*
* The structure is used for the BT_COEX command.
*/
struct iwl_bt_coex_cmd {
- u8 flags;
- u8 lead_time;
+ __le32 flags;
u8 max_kill;
- u8 bt3_time_t7_value;
+ u8 bt_reduced_tx_power;
+ u8 reserved[2];
+
+ u8 bt4_antenna_isolation;
+ u8 bt4_antenna_isolation_thr;
+ u8 bt4_tx_tx_delta_freq_thr;
+ u8 bt4_tx_rx_max_freq0;
+
+ __le32 bt_prio_boost[BT_COEX_BOOST_SIZE];
+ __le32 wifi_tx_prio_boost;
+ __le32 wifi_rx_prio_boost;
__le32 kill_ack_msk;
__le32 kill_cts_msk;
- u8 bt3_prio_sample_time;
- u8 bt3_timer_t2_value;
- __le16 bt4_reaction_time;
- __le32 decision_lut[BT_COEX_LUT_SIZE];
- u8 bt_reduced_tx_power;
- u8 reserved;
- __le16 valid_bit_msk;
- __le32 bt_prio_boost;
- u8 reserved2;
- u8 wifi_tx_prio_boost;
- __le16 wifi_rx_prio_boost;
+
+ __le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE];
+ __le32 bt4_multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE];
+ __le32 bt4_corun_lut20[BT_COEX_CORUN_LUT_SIZE];
+ __le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE];
+
+ __le32 valid_bit_msk;
} __packed; /* BT_COEX_CMD_API_S_VER_3 */
+/**
+ * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
+ * @bt_primary_ci:
+ * @bt_secondary_ci:
+ * @co_run_bw_primary:
+ * @co_run_bw_secondary:
+ * @primary_ch_phy_id:
+ * @secondary_ch_phy_id:
+ *
+ * Used for BT_COEX_CI command
+ */
+struct iwl_bt_coex_ci_cmd {
+ __le64 bt_primary_ci;
+ __le64 bt_secondary_ci;
+
+ u8 co_run_bw_primary;
+ u8 co_run_bw_secondary;
+ u8 primary_ch_phy_id;
+ u8 secondary_ch_phy_id;
+} __packed; /* BT_CI_MSG_API_S_VER_1 */
+
#define BT_MBOX(n_dw, _msg, _pos, _nbits) \
BT_MBOX##n_dw##_##_msg##_POS = (_pos), \
BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS
@@ -244,23 +285,39 @@ enum iwl_bt_mxbox_dw3 {
((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
>> BT_MBOX##_num##_##_field##_POS)
+enum iwl_bt_activity_grading {
+ BT_OFF = 0,
+ BT_ON_NO_CONNECTION = 1,
+ BT_LOW_TRAFFIC = 2,
+ BT_HIGH_TRAFFIC = 3,
+};
+
/**
* struct iwl_bt_coex_profile_notif - notification about BT coex
* @mbox_msg: message from BT to WiFi
- * @:bt_status: 0 - off, 1 - on
- * @:bt_open_conn: number of BT connections open
- * @:bt_traffic_load: load of BT traffic
- * @:bt_agg_traffic_load: aggregated load of BT traffic
- * @:bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
+ * @msg_idx: the index of the message
+ * @bt_status: 0 - off, 1 - on
+ * @bt_open_conn: number of BT connections open
+ * @bt_traffic_load: load of BT traffic
+ * @bt_agg_traffic_load: aggregated load of BT traffic
+ * @bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
+ * @primary_ch_lut: LUT used for primary channel
+ * @secondary_ch_lut: LUT used for secondary channel
+ * @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading
*/
struct iwl_bt_coex_profile_notif {
__le32 mbox_msg[4];
+ __le32 msg_idx;
u8 bt_status;
u8 bt_open_conn;
u8 bt_traffic_load;
u8 bt_agg_traffic_load;
u8 bt_ci_compliance;
u8 reserved[3];
+
+ __le32 primary_ch_lut;
+ __le32 secondary_ch_lut;
+ __le32 bt_activity_grading;
} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_2 */
enum iwl_bt_coex_prio_table_event {
@@ -300,20 +357,4 @@ struct iwl_bt_coex_prio_tbl_cmd {
u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
} __packed;
-enum iwl_bt_coex_env_action {
- BT_COEX_ENV_CLOSE = 0,
- BT_COEX_ENV_OPEN = 1,
-}; /* BT_COEX_PROT_ENV_ACTION_API_E_VER_1 */
-
-/**
- * struct iwl_bt_coex_prot_env_cmd - BT Protection Envelope
- * @action: enum %iwl_bt_coex_env_action
- * @type: enum %iwl_bt_coex_prio_table_event
- */
-struct iwl_bt_coex_prot_env_cmd {
- u8 action; /* 0 = closed, 1 = open */
- u8 type; /* 0 .. 15 */
- u8 reserved[2];
-} __packed;
-
#endif /* __fw_api_bt_coex_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index df72fcdf8170..4e7dd8cf87dc 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -100,7 +100,12 @@ enum iwl_proto_offloads {
#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2
#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2 6
-#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 6
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L 12
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S 4
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 12
+
+#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L 4
+#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S 2
/**
* struct iwl_proto_offload_cmd_common - ARP/NS offload common part
@@ -155,6 +160,43 @@ struct iwl_proto_offload_cmd_v2 {
u8 reserved2[3];
} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */
+struct iwl_ns_config {
+ struct in6_addr source_ipv6_addr;
+ struct in6_addr dest_ipv6_addr;
+ u8 target_mac_addr[ETH_ALEN];
+ __le16 reserved;
+} __packed; /* NS_OFFLOAD_CONFIG */
+
+struct iwl_targ_addr {
+ struct in6_addr addr;
+ __le32 config_num;
+} __packed; /* TARGET_IPV6_ADDRESS */
+
+/**
+ * struct iwl_proto_offload_cmd_v3_small - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @target_ipv6_addr: target IPv6 addresses
+ * @ns_config: NS offload configurations
+ */
+struct iwl_proto_offload_cmd_v3_small {
+ struct iwl_proto_offload_cmd_common common;
+ __le32 num_valid_ipv6_addrs;
+ struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S];
+ struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
+
+/**
+ * struct iwl_proto_offload_cmd_v3_large - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @target_ipv6_addr: target IPv6 addresses
+ * @ns_config: NS offload configurations
+ */
+struct iwl_proto_offload_cmd_v3_large {
+ struct iwl_proto_offload_cmd_common common;
+ __le32 num_valid_ipv6_addrs;
+ struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L];
+ struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
/*
* WOWLAN_PATTERNS
@@ -293,7 +335,7 @@ enum iwl_wowlan_wakeup_reason {
IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
-struct iwl_wowlan_status {
+struct iwl_wowlan_status_v4 {
__le64 replay_ctr;
__le16 pattern_number;
__le16 non_qos_seq_ctr;
@@ -308,6 +350,29 @@ struct iwl_wowlan_status {
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */
+struct iwl_wowlan_gtk_status {
+ u8 key_index;
+ u8 reserved[3];
+ u8 decrypt_key[16];
+ u8 tkip_mic_key[8];
+ struct iwl_wowlan_rsc_tsc_params_cmd rsc;
+} __packed;
+
+struct iwl_wowlan_status_v6 {
+ struct iwl_wowlan_gtk_status gtk;
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 non_qos_seq_ctr;
+ __le16 qos_seq_ctr[8];
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ __le32 wake_packet_length;
+ __le32 wake_packet_bufsize;
+ u8 wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed; /* WOWLAN_STATUSES_API_S_VER_6 */
+
#define IWL_WOWLAN_TCP_MAX_PACKET_LEN 64
#define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN 128
#define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS 2048
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
index 98b1feb43d38..39c3148bdfa8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
@@ -170,12 +170,14 @@ struct iwl_mac_data_ap {
* @beacon_tsf: beacon transmit time in TSF
* @bi: beacon interval in TU
* @bi_reciprocal: 2^32 / bi
+ * @beacon_template: beacon template ID
*/
struct iwl_mac_data_ibss {
__le32 beacon_time;
__le64 beacon_tsf;
__le32 bi;
__le32 bi_reciprocal;
+ __le32 beacon_template;
} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */
/**
@@ -372,4 +374,13 @@ static inline u32 iwl_mvm_reciprocal(u32 v)
return 0xFFFFFFFF / v;
}
+#define IWL_NONQOS_SEQ_GET 0x1
+#define IWL_NONQOS_SEQ_SET 0x2
+struct iwl_nonqos_seq_query_cmd {
+ __le32 get_set_flag;
+ __le32 mac_id_n_color;
+ __le16 value;
+ __le16 reserved;
+} __packed; /* NON_QOS_TX_COUNTER_GET_SET_API_S_VER_1 */
+
#endif /* __fw_api_mac_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 8e7ab41079ca..5cb93ae5cd2f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -132,6 +132,33 @@ struct iwl_powertable_cmd {
} __packed;
/**
+ * enum iwl_device_power_flags - masks for device power command flags
+ * @DEVIC_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
+ * receiver and transmitter. '0' - does not allow. This flag should be
+ * always set to '1' unless one need to disable actual power down for debug
+ * purposes.
+ * @DEVICE_POWER_FLAGS_CAM_MSK: '1' CAM (Continuous Active Mode) is set, meaning
+ * that power management is disabled. '0' Power management is enabled, one
+ * of power schemes is applied.
+*/
+enum iwl_device_power_flags {
+ DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
+ DEVICE_POWER_FLAGS_CAM_MSK = BIT(13),
+};
+
+/**
+ * struct iwl_device_power_cmd - device wide power command.
+ * DEVICE_POWER_CMD = 0x77 (command, has simple generic response)
+ *
+ * @flags: Power table command flags from DEVICE_POWER_FLAGS_*
+ */
+struct iwl_device_power_cmd {
+ /* PM_POWER_TABLE_CMD_API_S_VER_6 */
+ __le16 flags;
+ __le16 reserved;
+} __packed;
+
+/**
* struct iwl_mac_power_cmd - New power command containing uAPSD support
* MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
* @id_and_color: MAC contex identifier
@@ -290,7 +317,7 @@ struct iwl_beacon_filter_cmd {
#define IWL_BF_ESCAPE_TIMER_MIN 0
#define IWL_BA_ESCAPE_TIMER_DEFAULT 6
-#define IWL_BA_ESCAPE_TIMER_D3 6
+#define IWL_BA_ESCAPE_TIMER_D3 9
#define IWL_BA_ESCAPE_TIMER_MAX 1024
#define IWL_BA_ESCAPE_TIMER_MIN 0
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
index fdd33bc0a594..538f1c7a5966 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -68,6 +68,7 @@
/*
* These serve as indexes into
* struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT];
+ * TODO: avoid overlap between legacy and HT rates
*/
enum {
IWL_RATE_1M_INDEX = 0,
@@ -78,18 +79,31 @@ enum {
IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
IWL_RATE_6M_INDEX,
IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
+ IWL_RATE_MCS_0_INDEX = IWL_RATE_6M_INDEX,
+ IWL_FIRST_HT_RATE = IWL_RATE_MCS_0_INDEX,
+ IWL_FIRST_VHT_RATE = IWL_RATE_MCS_0_INDEX,
IWL_RATE_9M_INDEX,
IWL_RATE_12M_INDEX,
+ IWL_RATE_MCS_1_INDEX = IWL_RATE_12M_INDEX,
IWL_RATE_18M_INDEX,
+ IWL_RATE_MCS_2_INDEX = IWL_RATE_18M_INDEX,
IWL_RATE_24M_INDEX,
+ IWL_RATE_MCS_3_INDEX = IWL_RATE_24M_INDEX,
IWL_RATE_36M_INDEX,
+ IWL_RATE_MCS_4_INDEX = IWL_RATE_36M_INDEX,
IWL_RATE_48M_INDEX,
+ IWL_RATE_MCS_5_INDEX = IWL_RATE_48M_INDEX,
IWL_RATE_54M_INDEX,
+ IWL_RATE_MCS_6_INDEX = IWL_RATE_54M_INDEX,
IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX,
IWL_RATE_60M_INDEX,
- IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
+ IWL_RATE_MCS_7_INDEX = IWL_RATE_60M_INDEX,
+ IWL_LAST_HT_RATE = IWL_RATE_MCS_7_INDEX,
+ IWL_RATE_MCS_8_INDEX,
+ IWL_RATE_MCS_9_INDEX,
+ IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX,
IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
- IWL_RATE_COUNT,
+ IWL_RATE_COUNT = IWL_LAST_VHT_RATE + 1,
};
#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
@@ -108,6 +122,7 @@ enum {
IWL_RATE_2M_PLCP = 20,
IWL_RATE_5M_PLCP = 55,
IWL_RATE_11M_PLCP = 110,
+ IWL_RATE_INVM_PLCP = -1,
};
/*
@@ -164,6 +179,8 @@ enum {
* which is the duplicate 20 MHz MCS (bit 5 set, all others zero.)
*/
#define RATE_HT_MCS_RATE_CODE_MSK 0x7
+#define RATE_HT_MCS_NSS_POS 3
+#define RATE_HT_MCS_NSS_MSK (3 << RATE_HT_MCS_NSS_POS)
/* Bit 10: (1) Use Green Field preamble */
#define RATE_HT_MCS_GF_POS 10
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 83cb9b992ea4..c3782b48ded1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -356,6 +356,7 @@ struct iwl_scan_complete_notif {
/* scan offload */
#define IWL_MAX_SCAN_CHANNELS 40
#define IWL_SCAN_MAX_BLACKLIST_LEN 64
+#define IWL_SCAN_SHORT_BLACKLIST_LEN 16
#define IWL_SCAN_MAX_PROFILES 11
#define SCAN_OFFLOAD_PROBE_REQ_SIZE 512
@@ -368,6 +369,12 @@ struct iwl_scan_complete_notif {
#define IWL_FULL_SCAN_MULTIPLIER 5
#define IWL_FAST_SCHED_SCAN_ITERATIONS 3
+enum scan_framework_client {
+ SCAN_CLIENT_SCHED_SCAN = BIT(0),
+ SCAN_CLIENT_NETDETECT = BIT(1),
+ SCAN_CLIENT_ASSET_TRACKING = BIT(2),
+};
+
/**
* struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6
* @scan_flags: see enum iwl_scan_flags
@@ -449,11 +456,12 @@ struct iwl_scan_offload_cfg {
* iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
* @ssid: MAC address to filter out
* @reported_rssi: AP rssi reported to the host
+ * @client_bitmap: clients ignore this entry - enum scan_framework_client
*/
struct iwl_scan_offload_blacklist {
u8 ssid[ETH_ALEN];
u8 reported_rssi;
- u8 reserved;
+ u8 client_bitmap;
} __packed;
enum iwl_scan_offload_network_type {
@@ -475,6 +483,7 @@ enum iwl_scan_offload_band_selection {
* @aut_alg: authentication olgorithm to match - bitmap
* @network_type: enum iwl_scan_offload_network_type
* @band_selection: enum iwl_scan_offload_band_selection
+ * @client_bitmap: clients waiting for match - enum scan_framework_client
*/
struct iwl_scan_offload_profile {
u8 ssid_index;
@@ -482,7 +491,8 @@ struct iwl_scan_offload_profile {
u8 auth_alg;
u8 network_type;
u8 band_selection;
- u8 reserved[3];
+ u8 client_bitmap;
+ u8 reserved[2];
} __packed;
/**
@@ -491,13 +501,18 @@ struct iwl_scan_offload_profile {
* @profiles: profiles to search for match
* @blacklist_len: length of blacklist
* @num_profiles: num of profiles in the list
+ * @match_notify: clients waiting for match found notification
+ * @pass_match: clients waiting for the results
+ * @active_clients: active clients bitmap - enum scan_framework_client
*/
struct iwl_scan_offload_profile_cfg {
- struct iwl_scan_offload_blacklist blacklist[IWL_SCAN_MAX_BLACKLIST_LEN];
struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
u8 blacklist_len;
u8 num_profiles;
- u8 reserved[2];
+ u8 match_notify;
+ u8 pass_match;
+ u8 active_clients;
+ u8 reserved[3];
} __packed;
/**
@@ -560,4 +575,15 @@ struct iwl_scan_offload_complete {
u8 reserved;
} __packed;
+/**
+ * iwl_sched_scan_results - SCAN_OFFLOAD_MATCH_FOUND_NTF_API_S_VER_1
+ * @ssid_bitmap: SSIDs indexes found in this iteration
+ * @client_bitmap: clients that are active and wait for this notification
+ */
+struct iwl_sched_scan_results {
+ __le16 ssid_bitmap;
+ u8 client_bitmap;
+ u8 reserved;
+};
+
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index a30691a8a85b..4aca5933a65d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -247,7 +247,7 @@ struct iwl_mvm_keyinfo {
} __packed;
/**
- * struct iwl_mvm_add_sta_cmd - Add / modify a station in the fw's station table
+ * struct iwl_mvm_add_sta_cmd_v5 - Add/modify a station in the fw's sta table.
* ( REPLY_ADD_STA = 0x18 )
* @add_modify: 1: modify existing, 0: add new station
* @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent
@@ -286,7 +286,7 @@ struct iwl_mvm_keyinfo {
* ADD_STA sets up the table entry for one station, either creating a new
* entry, or modifying a pre-existing one.
*/
-struct iwl_mvm_add_sta_cmd {
+struct iwl_mvm_add_sta_cmd_v5 {
u8 add_modify;
u8 unicast_tx_key_id;
u8 multicast_tx_key_id;
@@ -313,6 +313,57 @@ struct iwl_mvm_add_sta_cmd {
} __packed; /* ADD_STA_CMD_API_S_VER_5 */
/**
+ * struct iwl_mvm_add_sta_cmd_v6 - Add / modify a station
+ * VER_6 of this command is quite similar to VER_5 except
+ * exclusion of all fields related to the security key installation.
+ */
+struct iwl_mvm_add_sta_cmd_v6 {
+ u8 add_modify;
+ u8 reserved1;
+ __le16 tid_disable_tx;
+ __le32 mac_id_n_color;
+ u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
+ __le16 reserved2;
+ u8 sta_id;
+ u8 modify_mask;
+ __le16 reserved3;
+ __le32 station_flags;
+ __le32 station_flags_msk;
+ u8 add_immediate_ba_tid;
+ u8 remove_immediate_ba_tid;
+ __le16 add_immediate_ba_ssn;
+ __le16 sleep_tx_count;
+ __le16 sleep_state_flags;
+ __le16 assoc_id;
+ __le16 beamform_flags;
+ __le32 tfd_queue_msk;
+} __packed; /* ADD_STA_CMD_API_S_VER_6 */
+
+/**
+ * struct iwl_mvm_add_sta_key_cmd - add/modify sta key
+ * ( REPLY_ADD_STA_KEY = 0x17 )
+ * @sta_id: index of station in uCode's station table
+ * @key_offset: key offset in key storage
+ * @key_flags: type %iwl_sta_key_flag
+ * @key: key material data
+ * @key2: key material data
+ * @rx_secur_seq_cnt: RX security sequence counter for the key
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ */
+struct iwl_mvm_add_sta_key_cmd {
+ u8 sta_id;
+ u8 key_offset;
+ __le16 key_flags;
+ u8 key[16];
+ u8 key2[16];
+ u8 rx_secur_seq_cnt[16];
+ u8 tkip_rx_tsc_byte2;
+ u8 reserved;
+ __le16 tkip_rx_ttak[5];
+} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */
+
+/**
* enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command
* @ADD_STA_SUCCESS: operation was executed successfully
* @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 66264cc5a016..bad5a552dd8d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -72,17 +72,17 @@
#include "fw-api-d3.h"
#include "fw-api-bt-coex.h"
-/* queue and FIFO numbers by usage */
+/* maximal number of Tx queues in any platform */
+#define IWL_MVM_MAX_QUEUES 20
+
+/* Tx queue numbers */
enum {
IWL_MVM_OFFCHANNEL_QUEUE = 8,
IWL_MVM_CMD_QUEUE = 9,
- IWL_MVM_AUX_QUEUE = 15,
- IWL_MVM_FIRST_AGG_QUEUE = 16,
- IWL_MVM_NUM_QUEUES = 20,
- IWL_MVM_LAST_AGG_QUEUE = IWL_MVM_NUM_QUEUES - 1,
- IWL_MVM_CMD_FIFO = 7
};
+#define IWL_MVM_CMD_FIFO 7
+
#define IWL_MVM_STATION_COUNT 16
/* commands */
@@ -97,6 +97,7 @@ enum {
DBG_CFG = 0x9,
/* station table */
+ ADD_STA_KEY = 0x17,
ADD_STA = 0x18,
REMOVE_STA = 0x19,
@@ -114,6 +115,7 @@ enum {
TIME_EVENT_NOTIFICATION = 0x2a,
BINDING_CONTEXT_CMD = 0x2b,
TIME_QUOTA_CMD = 0x2c,
+ NON_QOS_TX_COUNTER_CMD = 0x2d,
LQ_CMD = 0x4e,
@@ -130,6 +132,7 @@ enum {
SCAN_OFFLOAD_COMPLETE = 0x6D,
SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E,
SCAN_OFFLOAD_CONFIG_CMD = 0x6f,
+ MATCH_FOUND_NOTIFICATION = 0xd9,
/* Phy */
PHY_CONFIGURATION_CMD = 0x6a,
@@ -178,6 +181,7 @@ enum {
BT_COEX_PRIO_TABLE = 0xcc,
BT_COEX_PROT_ENV = 0xcd,
BT_PROFILE_NOTIFICATION = 0xce,
+ BT_COEX_CI = 0x5d,
REPLY_BEACON_FILTERING_CMD = 0xd2,
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index c76299a3a1e0..83fc5ca04433 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -199,7 +199,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
*/
for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
- if (i < IWL_MVM_FIRST_AGG_QUEUE && i != IWL_MVM_CMD_QUEUE)
+ if (i < mvm->first_agg_queue && i != IWL_MVM_CMD_QUEUE)
mvm->queue_to_mac80211[i] = i;
else
mvm->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
@@ -243,7 +243,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
lockdep_assert_held(&mvm->mutex);
- if (mvm->init_ucode_run)
+ if (mvm->init_ucode_complete)
return 0;
iwl_init_notification_wait(&mvm->notif_wait,
@@ -264,6 +264,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
if (ret)
goto error;
+ /* Read the NVM only at driver load time, no need to do this twice */
if (read_nvm) {
/* Read nvm */
ret = iwl_nvm_init(mvm);
@@ -273,6 +274,10 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
}
}
+ /* In case we read the NVM from external file, load it to the NIC */
+ if (iwlwifi_mod_params.nvm_file)
+ iwl_mvm_load_nvm_to_nic(mvm);
+
ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
WARN_ON(ret);
@@ -310,7 +315,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
MVM_UCODE_CALIB_TIMEOUT);
if (!ret)
- mvm->init_ucode_run = true;
+ mvm->init_ucode_complete = true;
goto out;
error:
@@ -353,8 +358,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
return ret;
- /* If we were in RFKILL during module loading, load init ucode now */
- if (!mvm->init_ucode_run) {
+ /*
+ * If we haven't completed the run of the init ucode during
+ * module loading, load init ucode now
+ * (for example, if we were in RFKILL)
+ */
+ if (!mvm->init_ucode_complete) {
ret = iwl_run_init_mvm_ucode(mvm, false);
if (ret && !iwlmvm_mod_params.init_dbg) {
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
@@ -424,6 +433,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
+ ret = iwl_mvm_power_update_device_mode(mvm);
+ if (ret)
+ goto error;
+
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 5fe23a5ea9b6..ab5a7ac90dcd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -80,7 +80,7 @@ struct iwl_mvm_mac_iface_iterator_data {
struct ieee80211_vif *vif;
unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)];
unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)];
- unsigned long used_hw_queues[BITS_TO_LONGS(IWL_MVM_FIRST_AGG_QUEUE)];
+ unsigned long used_hw_queues[BITS_TO_LONGS(IWL_MVM_MAX_QUEUES)];
enum iwl_tsf_id preferred_tsf;
bool found_vif;
};
@@ -218,7 +218,7 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
.preferred_tsf = NUM_TSF_IDS,
.used_hw_queues = {
BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
- BIT(IWL_MVM_AUX_QUEUE) |
+ BIT(mvm->aux_queue) |
BIT(IWL_MVM_CMD_QUEUE)
},
.found_vif = false,
@@ -242,9 +242,17 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
* that we should share it with another interface.
*/
- /* Currently, MAC ID 0 should be used only for the managed vif */
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ /* Currently, MAC ID 0 should be used only for the managed/IBSS vif */
+ switch (vif->type) {
+ case NL80211_IFTYPE_ADHOC:
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (!vif->p2p)
+ break;
+ /* fall through */
+ default:
__clear_bit(0, data.available_mac_ids);
+ }
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
@@ -302,9 +310,9 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
/* Find available queues, and allocate them to the ACs */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
u8 queue = find_first_zero_bit(data.used_hw_queues,
- IWL_MVM_FIRST_AGG_QUEUE);
+ mvm->first_agg_queue);
- if (queue >= IWL_MVM_FIRST_AGG_QUEUE) {
+ if (queue >= mvm->first_agg_queue) {
IWL_ERR(mvm, "Failed to allocate queue\n");
ret = -EIO;
goto exit_fail;
@@ -317,9 +325,9 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
/* Allocate the CAB queue for softAP and GO interfaces */
if (vif->type == NL80211_IFTYPE_AP) {
u8 queue = find_first_zero_bit(data.used_hw_queues,
- IWL_MVM_FIRST_AGG_QUEUE);
+ mvm->first_agg_queue);
- if (queue >= IWL_MVM_FIRST_AGG_QUEUE) {
+ if (queue >= mvm->first_agg_queue) {
IWL_ERR(mvm, "Failed to allocate cab queue\n");
ret = -EIO;
goto exit_fail;
@@ -559,8 +567,12 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
/* Don't use cts to self as the fw doesn't support it currently. */
- if (vif->bss_conf.use_cts_prot)
+ if (vif->bss_conf.use_cts_prot) {
cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
+ if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
+ cmd->protection_flags |=
+ cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
+ }
/*
* I think that we should enable these 2 flags regardless the HT PROT
@@ -712,6 +724,31 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
}
+static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_ctx_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_ADHOC);
+
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+ cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON |
+ MAC_FILTER_IN_PROBE_REQUEST);
+
+ /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
+ cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
+ cmd.ibss.bi_reciprocal =
+ cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
+
+ /* TODO: Assumes that the beacon id == mac context id */
+ cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id);
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
struct iwl_mvm_go_iterator_data {
bool go_active;
};
@@ -721,7 +758,8 @@ static void iwl_mvm_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif)
struct iwl_mvm_go_iterator_data *data = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- if (vif->type == NL80211_IFTYPE_AP && vif->p2p && mvmvif->ap_active)
+ if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
+ mvmvif->ap_ibss_active)
data->go_active = true;
}
@@ -833,9 +871,10 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate));
/* Set up TX beacon command fields */
- iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd,
- beacon->data,
- beacon_skb_len);
+ if (vif->type == NL80211_IFTYPE_AP)
+ iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd,
+ beacon->data,
+ beacon_skb_len);
/* Submit command */
cmd.len[0] = sizeof(beacon_cmd);
@@ -848,14 +887,15 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
return iwl_mvm_send_cmd(mvm, &cmd);
}
-/* The beacon template for the AP/GO context has changed and needs update */
+/* The beacon template for the AP/GO/IBSS has changed and needs update */
int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct sk_buff *beacon;
int ret;
- WARN_ON(vif->type != NL80211_IFTYPE_AP);
+ WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC);
beacon = ieee80211_beacon_get(mvm->hw, vif);
if (!beacon)
@@ -1018,6 +1058,8 @@ static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action);
case NL80211_IFTYPE_P2P_DEVICE:
return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action);
+ case NL80211_IFTYPE_ADHOC:
+ return iwl_mvm_mac_ctxt_cmd_ibss(mvm, vif, action);
default:
break;
}
@@ -1038,6 +1080,9 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (ret)
return ret;
+ /* will only do anything at resume from D3 time */
+ iwl_mvm_set_last_nonqos_seq(mvm, vif);
+
mvmvif->uploaded = true;
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 9833cdf6177c..f40685c3764e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -77,6 +77,7 @@
#include "iwl-eeprom-parse.h"
#include "fw-api-scan.h"
#include "iwl-phy-db.h"
+#include "testmode.h"
static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
{
@@ -138,6 +139,14 @@ static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
}
}
+static int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
+{
+ /* we create the 802.11 header and SSID element */
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID)
+ return mvm->fw->ucode_capa.max_probe_length - 24 - 2;
+ return mvm->fw->ucode_capa.max_probe_length - 24 - 34;
+}
+
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
{
struct ieee80211_hw *hw = mvm->hw;
@@ -158,7 +167,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IEEE80211_HW_SUPPORTS_STATIC_SMPS |
IEEE80211_HW_SUPPORTS_UAPSD;
- hw->queues = IWL_MVM_FIRST_AGG_QUEUE;
+ hw->queues = mvm->first_agg_queue;
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
hw->rate_control_algorithm = "iwl-mvm-rs";
@@ -181,6 +190,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_DEVICE);
+ /* IBSS has bugs in older versions */
+ if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+
hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
WIPHY_FLAG_DISABLE_BEACON_HINTS |
WIPHY_FLAG_IBSS_RSN;
@@ -212,9 +225,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
iwl_mvm_reset_phy_ctxts(mvm);
- /* we create the 802.11 header and a max-length SSID element */
- hw->wiphy->max_scan_ie_len =
- mvm->fw->ucode_capa.max_probe_length - 24 - 34;
+ hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
+
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
@@ -231,6 +243,15 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+ hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+ hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+ /* we create the 802.11 header and zero length SSID IE. */
+ hw->wiphy->max_sched_scan_ie_len =
+ SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
+ }
+
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
NL80211_FEATURE_P2P_GO_OPPPS;
@@ -548,7 +569,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
* In short: there's not much we can do at this point, other than
* allocating resources :)
*/
- if (vif->type == NL80211_IFTYPE_AP) {
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC) {
u32 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta,
qmask);
@@ -698,7 +720,14 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
* For AP/GO interface, the tear down of the resources allocated to the
* interface is be handled as part of the stop_ap flow.
*/
- if (vif->type == NL80211_IFTYPE_AP) {
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC) {
+#ifdef CONFIG_NL80211_TESTMODE
+ if (vif == mvm->noa_vif) {
+ mvm->noa_vif = NULL;
+ mvm->noa_duration = 0;
+ }
+#endif
iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
goto out_release;
}
@@ -796,6 +825,27 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
return;
}
iwl_mvm_configure_mcast_filter(mvm, vif);
+
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+ &mvm->status)) {
+ /*
+ * If we're restarting then the firmware will
+ * obviously have lost synchronisation with
+ * the AP. It will attempt to synchronise by
+ * itself, but we can make it more reliable by
+ * scheduling a session protection time event.
+ *
+ * The firmware needs to receive a beacon to
+ * catch up with synchronisation, use 110% of
+ * the beacon interval.
+ *
+ * Set a large maximum delay to allow for more
+ * than a single interface.
+ */
+ u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
+ iwl_mvm_protect_session(mvm, vif, dur, dur,
+ 5 * dur);
+ }
} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
/* remove AP station now that the MAC is unassoc */
ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
@@ -819,7 +869,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
}
- iwl_mvm_bt_coex_vif_assoc(mvm, vif);
+ iwl_mvm_bt_coex_vif_change(mvm);
} else if (changes & BSS_CHANGED_BEACON_INFO) {
/*
* We received a beacon _after_ association so
@@ -848,7 +898,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
}
}
-static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -871,7 +922,7 @@ static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
if (ret)
goto out_remove;
- mvmvif->ap_active = true;
+ mvmvif->ap_ibss_active = true;
/* Send the bcast station. At this stage the TBTT and DTIM time events
* are added and applied to the scheduler */
@@ -883,10 +934,12 @@ static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
if (ret)
goto out_rm_bcast;
- /* Need to update the P2P Device MAC */
+ /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
if (vif->p2p && mvm->p2p_device_vif)
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
+ iwl_mvm_bt_coex_vif_change(mvm);
+
mutex_unlock(&mvm->mutex);
return 0;
@@ -901,7 +954,8 @@ out_unlock:
return ret;
}
-static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -910,9 +964,11 @@ static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mutex_lock(&mvm->mutex);
- mvmvif->ap_active = false;
+ mvmvif->ap_ibss_active = false;
+
+ iwl_mvm_bt_coex_vif_change(mvm);
- /* Need to update the P2P Device MAC */
+ /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
if (vif->p2p && mvm->p2p_device_vif)
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
@@ -924,10 +980,11 @@ static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mutex_unlock(&mvm->mutex);
}
-static void iwl_mvm_bss_info_changed_ap(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u32 changes)
+static void
+iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes)
{
/* Need to send a new beacon template to the FW */
if (changes & BSS_CHANGED_BEACON) {
@@ -950,7 +1007,8 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
break;
case NL80211_IFTYPE_AP:
- iwl_mvm_bss_info_changed_ap(mvm, vif, bss_conf, changes);
+ case NL80211_IFTYPE_ADHOC:
+ iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
break;
default:
/* shouldn't happen */
@@ -1163,7 +1221,54 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
/* Try really hard to protect the session and hear a beacon */
- iwl_mvm_protect_session(mvm, vif, duration, min_duration);
+ iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500);
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
+ IWL_DEBUG_SCAN(mvm,
+ "SCHED SCAN request during internal scan - abort\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ mvm->scan_status = IWL_MVM_SCAN_SCHED;
+
+ ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies);
+ if (ret)
+ goto err;
+
+ ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
+ if (ret)
+ goto err;
+
+ ret = iwl_mvm_sched_scan_start(mvm, req);
+ if (!ret)
+ goto out;
+err:
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static void iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_sched_scan_stop(mvm);
mutex_unlock(&mvm->mutex);
}
@@ -1207,8 +1312,13 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
switch (cmd) {
case SET_KEY:
- if (vif->type == NL80211_IFTYPE_AP && !sta) {
- /* GTK on AP interface is a TX-only key, return 0 */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_AP) && !sta) {
+ /*
+ * GTK on AP interface is a TX-only key, return 0;
+ * on IBSS they're per-station and because we're lazy
+ * we don't support them for RX, so do the same.
+ */
ret = 0;
key->hw_key_idx = STA_KEY_IDX_INVALID;
break;
@@ -1252,6 +1362,9 @@ static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
+ return;
+
iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
}
@@ -1445,6 +1558,7 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
+ iwl_mvm_bt_coex_vif_change(mvm);
mutex_unlock(&mvm->mutex);
}
@@ -1464,14 +1578,14 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
switch (vif->type) {
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
/*
* The AP binding flow is handled as part of the start_ap flow
- * (in bss_info_changed).
+ * (in bss_info_changed), similarly for IBSS.
*/
ret = 0;
goto out_unlock;
case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MONITOR:
break;
default:
@@ -1517,10 +1631,10 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
- if (vif->type == NL80211_IFTYPE_AP)
- goto out_unlock;
-
switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ goto out_unlock;
case NL80211_IFTYPE_MONITOR:
mvmvif->monitor_active = false;
iwl_mvm_update_quotas(mvm, NULL);
@@ -1550,14 +1664,72 @@ static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
}
-static void iwl_mvm_mac_rssi_callback(struct ieee80211_hw *hw,
+#ifdef CONFIG_NL80211_TESTMODE
+static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
+ [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
+ [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
+};
+
+static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- enum ieee80211_rssi_event rssi_event)
+ void *data, int len)
+{
+ struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
+ int err;
+ u32 noa_duration;
+
+ err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy);
+ if (err)
+ return err;
+
+ if (!tb[IWL_MVM_TM_ATTR_CMD])
+ return -EINVAL;
+
+ switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
+ case IWL_MVM_TM_CMD_SET_NOA:
+ if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
+ !vif->bss_conf.enable_beacon ||
+ !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
+ return -EINVAL;
+
+ noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
+ if (noa_duration >= vif->bss_conf.beacon_int)
+ return -EINVAL;
+
+ mvm->noa_duration = noa_duration;
+ mvm->noa_vif = vif;
+
+ return iwl_mvm_update_quotas(mvm, NULL);
+ case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
+ /* must be associated client vif - ignore authorized */
+ if (!vif || vif->type != NL80211_IFTYPE_STATION ||
+ !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
+ !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
+ return -EINVAL;
+
+ if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
+ return iwl_mvm_enable_beacon_filter(mvm, vif);
+ return iwl_mvm_disable_beacon_filter(mvm, vif);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ void *data, int len)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int err;
- iwl_mvm_bt_rssi_event(mvm, vif, rssi_event);
+ mutex_lock(&mvm->mutex);
+ err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
+ mutex_unlock(&mvm->mutex);
+
+ return err;
}
+#endif
struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx,
@@ -1578,23 +1750,27 @@ struct ieee80211_ops iwl_mvm_hw_ops = {
.set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
.conf_tx = iwl_mvm_mac_conf_tx,
.mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
+ .sched_scan_start = iwl_mvm_mac_sched_scan_start,
+ .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
.set_key = iwl_mvm_mac_set_key,
.update_tkip_key = iwl_mvm_mac_update_tkip_key,
.remain_on_channel = iwl_mvm_roc,
.cancel_remain_on_channel = iwl_mvm_cancel_roc,
- .rssi_callback = iwl_mvm_mac_rssi_callback,
-
.add_chanctx = iwl_mvm_add_chanctx,
.remove_chanctx = iwl_mvm_remove_chanctx,
.change_chanctx = iwl_mvm_change_chanctx,
.assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
.unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
- .start_ap = iwl_mvm_start_ap,
- .stop_ap = iwl_mvm_stop_ap,
+ .start_ap = iwl_mvm_start_ap_ibss,
+ .stop_ap = iwl_mvm_stop_ap_ibss,
+ .join_ibss = iwl_mvm_start_ap_ibss,
+ .leave_ibss = iwl_mvm_stop_ap_ibss,
.set_tim = iwl_mvm_set_tim,
+ CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
+
#ifdef CONFIG_PM_SLEEP
/* look at d3.c */
.suspend = iwl_mvm_suspend,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index b0389279cc1e..6235cb729f5c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -162,6 +162,7 @@ enum iwl_power_scheme {
struct iwl_mvm_power_ops {
int (*power_update_mode)(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
+ int (*power_update_device_mode)(struct iwl_mvm *mvm);
int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -241,12 +242,18 @@ enum iwl_mvm_smps_type_request {
* @last_beacon_signal: last beacon rssi signal in dbm
* @ave_beacon_signal: average beacon signal
* @last_cqm_event: rssi of the last cqm event
+* @bt_coex_min_thold: minimum threshold for BT coex
+* @bt_coex_max_thold: maximum threshold for BT coex
+* @last_bt_coex_event: rssi of the last BT coex event
*/
struct iwl_mvm_vif_bf_data {
bool bf_enabled;
bool ba_enabled;
s8 ave_beacon_signal;
s8 last_cqm_event;
+ s8 bt_coex_min_thold;
+ s8 bt_coex_max_thold;
+ s8 last_bt_coex_event;
};
/**
@@ -255,8 +262,8 @@ struct iwl_mvm_vif_bf_data {
* @color: to solve races upon MAC addition and removal
* @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA
* @uploaded: indicates the MAC context has been added to the device
- * @ap_active: indicates that ap context is configured, and that the interface
- * should get quota etc.
+ * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
+ * should get quota etc.
* @monitor_active: indicates that monitor context is configured, and that the
* interface should get quota etc.
* @queue_params: QoS params for this MAC
@@ -272,7 +279,7 @@ struct iwl_mvm_vif {
u8 ap_sta_id;
bool uploaded;
- bool ap_active;
+ bool ap_ibss_active;
bool monitor_active;
struct iwl_mvm_vif_bf_data bf_data;
@@ -306,6 +313,9 @@ struct iwl_mvm_vif {
int tx_key_idx;
+ bool seqno_valid;
+ u16 seqno;
+
#if IS_ENABLED(CONFIG_IPV6)
/* IPv6 addresses for WoWLAN */
struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
@@ -333,6 +343,7 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
enum iwl_scan_status {
IWL_MVM_SCAN_NONE,
IWL_MVM_SCAN_OS,
+ IWL_MVM_SCAN_SCHED,
};
/**
@@ -434,7 +445,7 @@ struct iwl_mvm {
enum iwl_ucode_type cur_ucode;
bool ucode_loaded;
- bool init_ucode_run;
+ bool init_ucode_complete;
u32 error_event_table;
u32 log_event_table;
@@ -470,6 +481,9 @@ struct iwl_mvm {
enum iwl_scan_status scan_status;
struct iwl_scan_cmd *scan_cmd;
+ /* rx chain antennas set through debugfs for the scan command */
+ u8 scan_rx_ant;
+
/* Internal station */
struct iwl_mvm_int_sta aux_sta;
@@ -479,7 +493,8 @@ struct iwl_mvm {
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct dentry *debugfs_dir;
u32 dbgfs_sram_offset, dbgfs_sram_len;
- bool prevent_power_down_d3;
+ bool disable_power_off;
+ bool disable_power_off_d3;
#endif
struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
@@ -523,12 +538,23 @@ struct iwl_mvm {
/* BT-Coex */
u8 bt_kill_msk;
struct iwl_bt_coex_profile_notif last_bt_notif;
+ struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
/* Thermal Throttling and CTkill */
struct iwl_mvm_tt_mgmt thermal_throttle;
s32 temperature; /* Celsius */
const struct iwl_mvm_power_ops *pm_ops;
+
+#ifdef CONFIG_NL80211_TESTMODE
+ u32 noa_duration;
+ struct ieee80211_vif *noa_vif;
+#endif
+
+ /* Tx queues */
+ u8 aux_queue;
+ u8 first_agg_queue;
+ u8 last_agg_queue;
};
/* Extract MVM priv from op_mode and _hw */
@@ -570,6 +596,9 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
/* Utils */
int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
enum ieee80211_band band);
+void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
+ enum ieee80211_band band,
+ struct ieee80211_tx_rate *r);
u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
void iwl_mvm_dump_sram(struct iwl_mvm *mvm);
@@ -608,6 +637,7 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
/* NVM */
int iwl_nvm_init(struct iwl_mvm *mvm);
+int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
int iwl_mvm_up(struct iwl_mvm *mvm);
int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
@@ -682,6 +712,23 @@ int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
+/* Scheduled scan */
+int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies);
+int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req);
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req);
+void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm);
+int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
/* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
@@ -720,6 +767,13 @@ static inline int iwl_mvm_power_disable(struct iwl_mvm *mvm,
return mvm->pm_ops->power_disable(mvm, vif);
}
+static inline int iwl_mvm_power_update_device_mode(struct iwl_mvm *mvm)
+{
+ if (mvm->pm_ops->power_update_device_mode)
+ return mvm->pm_ops->power_update_device_mode(mvm);
+ return 0;
+}
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -745,6 +799,15 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int idx);
extern const struct file_operations iwl_dbgfs_d3_test_ops;
+#ifdef CONFIG_PM_SLEEP
+void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+#else
+static inline void
+iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+}
+#endif
/* BT Coex */
int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
@@ -754,7 +817,20 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_device_cmd *cmd);
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event rssi_event);
-void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
+u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta);
+bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta);
+
+enum iwl_bt_kill_msk {
+ BT_KILL_MSK_DEFAULT,
+ BT_KILL_MSK_SCO_HID_A2DP,
+ BT_KILL_MSK_REDUCED_TXPOW,
+ BT_KILL_MSK_MAX,
+};
+extern const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX];
+extern const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX];
/* beacon filtering */
#ifdef CONFIG_IWLWIFI_DEBUGFS
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index edb94ea31654..2beffd028b67 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -77,7 +77,7 @@ static const int nvm_to_read[] = {
/* Default NVM size to read */
#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
-#define IWL_MAX_NVM_SECTION_SIZE 6000
+#define IWL_MAX_NVM_SECTION_SIZE 7000
#define NVM_WRITE_OPCODE 1
#define NVM_READ_OPCODE 0
@@ -259,6 +259,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
#define MAX_NVM_FILE_LEN 16384
/*
+ * Reads external NVM from a file into mvm->nvm_sections
+ *
* HOW TO CREATE THE NVM FILE FORMAT:
* ------------------------------
* 1. create hex file, format:
@@ -277,20 +279,23 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
*
* 4. save as "iNVM_xxx.bin" under /lib/firmware
*/
-static int iwl_mvm_load_external_nvm(struct iwl_mvm *mvm)
+static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
{
- int ret, section_id, section_size;
+ int ret, section_size;
+ u16 section_id;
const struct firmware *fw_entry;
const struct {
__le16 word1;
__le16 word2;
u8 data[];
} *file_sec;
- const u8 *eof;
+ const u8 *eof, *temp;
#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
#define NVM_WORD2_ID(x) (x >> 12)
+ IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
+
/*
* Obtain NVM image via request_firmware. Since we already used
* request_firmware_nowait() for the firmware binary load and only
@@ -362,12 +367,18 @@ static int iwl_mvm_load_external_nvm(struct iwl_mvm *mvm)
break;
}
- ret = iwl_nvm_write_section(mvm, section_id, file_sec->data,
- section_size);
- if (ret < 0) {
- IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
+ temp = kmemdup(file_sec->data, section_size, GFP_KERNEL);
+ if (!temp) {
+ ret = -ENOMEM;
+ break;
+ }
+ if (WARN_ON(section_id >= NVM_NUM_OF_SECTIONS)) {
+ IWL_ERR(mvm, "Invalid NVM section ID\n");
+ ret = -EINVAL;
break;
}
+ mvm->nvm_sections[section_id].data = temp;
+ mvm->nvm_sections[section_id].length = section_size;
/* advance to the next section */
file_sec = (void *)(file_sec->data + section_size);
@@ -377,6 +388,28 @@ out:
return ret;
}
+/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
+int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
+{
+ int i, ret;
+ u16 section_id;
+ struct iwl_nvm_section *sections = mvm->nvm_sections;
+
+ IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n");
+
+ for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
+ section_id = nvm_to_read[i];
+ ret = iwl_nvm_write_section(mvm, section_id,
+ sections[section_id].data,
+ sections[section_id].length);
+ if (ret < 0) {
+ IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
+ break;
+ }
+ }
+ return ret;
+}
+
int iwl_nvm_init(struct iwl_mvm *mvm)
{
int ret, i, section;
@@ -385,36 +418,36 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
/* load external NVM if configured */
if (iwlwifi_mod_params.nvm_file) {
/* move to External NVM flow */
- ret = iwl_mvm_load_external_nvm(mvm);
+ ret = iwl_mvm_read_external_nvm(mvm);
if (ret)
return ret;
- }
-
- /* Read From FW NVM */
- IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
-
- /* TODO: find correct NVM max size for a section */
- nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
- GFP_KERNEL);
- if (!nvm_buffer)
- return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
- section = nvm_to_read[i];
- /* we override the constness for initial read */
- ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
- if (ret < 0)
- break;
- temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
- if (!temp) {
- ret = -ENOMEM;
- break;
+ } else {
+ /* Read From FW NVM */
+ IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
+
+ /* TODO: find correct NVM max size for a section */
+ nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
+ GFP_KERNEL);
+ if (!nvm_buffer)
+ return -ENOMEM;
+ for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
+ section = nvm_to_read[i];
+ /* we override the constness for initial read */
+ ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
+ if (ret < 0)
+ break;
+ temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
+ if (!temp) {
+ ret = -ENOMEM;
+ break;
+ }
+ mvm->nvm_sections[section].data = temp;
+ mvm->nvm_sections[section].length = ret;
}
- mvm->nvm_sections[section].data = temp;
- mvm->nvm_sections[section].length = ret;
+ kfree(nvm_buffer);
+ if (ret < 0)
+ return ret;
}
- kfree(nvm_buffer);
- if (ret < 0)
- return ret;
mvm->nvm_data = iwl_parse_nvm_sections(mvm);
if (!mvm->nvm_data)
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 2fcc8ef88a68..59b7cb3c6134 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -224,6 +224,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
+ RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
+ iwl_mvm_rx_scan_offload_complete_notif, false),
+ RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_sched_scan_results,
+ false),
RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
@@ -249,6 +253,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(TIME_EVENT_NOTIFICATION),
CMD(BINDING_CONTEXT_CMD),
CMD(TIME_QUOTA_CMD),
+ CMD(NON_QOS_TX_COUNTER_CMD),
CMD(RADIO_VERSION_NOTIFICATION),
CMD(SCAN_REQUEST_CMD),
CMD(SCAN_ABORT_CMD),
@@ -260,10 +265,12 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(CALIB_RES_NOTIF_PHY_DB),
CMD(SET_CALIB_DEFAULT_CMD),
CMD(CALIBRATION_COMPLETE_NOTIFICATION),
+ CMD(ADD_STA_KEY),
CMD(ADD_STA),
CMD(REMOVE_STA),
CMD(LQ_CMD),
CMD(SCAN_OFFLOAD_CONFIG_CMD),
+ CMD(MATCH_FOUND_NOTIFICATION),
CMD(SCAN_OFFLOAD_REQUEST_CMD),
CMD(SCAN_OFFLOAD_ABORT_CMD),
CMD(SCAN_OFFLOAD_COMPLETE),
@@ -303,6 +310,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(REPLY_BEACON_FILTERING_CMD),
CMD(REPLY_THERMAL_MNG_BACKOFF),
CMD(MAC_PM_POWER_TABLE),
+ CMD(BT_COEX_CI),
};
#undef CMD
@@ -344,6 +352,14 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
+ mvm->aux_queue = 15;
+ mvm->first_agg_queue = 16;
+ mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
+ if (mvm->cfg->base_params->num_of_queues == 16) {
+ mvm->aux_queue = 11;
+ mvm->first_agg_queue = 12;
+ }
+
mutex_init(&mvm->mutex);
spin_lock_init(&mvm->async_handlers_lock);
INIT_LIST_HEAD(&mvm->time_event_list);
@@ -401,24 +417,32 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
mvm->cfg->name, mvm->trans->hw_rev);
- err = iwl_trans_start_hw(mvm->trans);
- if (err)
- goto out_free;
-
iwl_mvm_tt_initialize(mvm);
- mutex_lock(&mvm->mutex);
- err = iwl_run_init_mvm_ucode(mvm, true);
- mutex_unlock(&mvm->mutex);
- /* returns 0 if successful, 1 if success but in rfkill */
- if (err < 0 && !iwlmvm_mod_params.init_dbg) {
- IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
- goto out_free;
- }
+ /*
+ * If the NVM exists in an external file,
+ * there is no need to unnecessarily power up the NIC at driver load
+ */
+ if (iwlwifi_mod_params.nvm_file) {
+ iwl_nvm_init(mvm);
+ } else {
+ err = iwl_trans_start_hw(mvm->trans);
+ if (err)
+ goto out_free;
+
+ mutex_lock(&mvm->mutex);
+ err = iwl_run_init_mvm_ucode(mvm, true);
+ mutex_unlock(&mvm->mutex);
+ /* returns 0 if successful, 1 if success but in rfkill */
+ if (err < 0 && !iwlmvm_mod_params.init_dbg) {
+ IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
+ goto out_free;
+ }
- /* Stop the hw after the ALIVE and NVM has been read */
- if (!iwlmvm_mod_params.init_dbg)
- iwl_trans_stop_hw(mvm->trans, false);
+ /* Stop the hw after the ALIVE and NVM has been read */
+ if (!iwlmvm_mod_params.init_dbg)
+ iwl_trans_stop_hw(mvm->trans, false);
+ }
scan_size = sizeof(struct iwl_scan_cmd) +
mvm->fw->ucode_capa.max_probe_length +
@@ -449,7 +473,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
out_free:
iwl_phy_db_free(mvm->phy_db);
kfree(mvm->scan_cmd);
- iwl_trans_stop_hw(trans, true);
+ if (!iwlwifi_mod_params.nvm_file)
+ iwl_trans_stop_hw(trans, true);
ieee80211_free_hw(mvm->hw);
return NULL;
}
@@ -715,6 +740,9 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
case IWL_MVM_SCAN_OS:
ieee80211_scan_completed(mvm->hw, true);
break;
+ case IWL_MVM_SCAN_SCHED:
+ ieee80211_sched_scan_stopped(mvm->hw);
+ break;
}
if (mvm->restart_fw > 0)
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 21407a353a3b..550824aa84ea 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -273,7 +273,10 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
if (!mvmvif->queue_params[ac].uapsd)
continue;
- cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+ if (mvm->cur_ucode != IWL_UCODE_WOWLAN)
+ cmd->flags |=
+ cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+
cmd->uapsd_ac_flags |= BIT(ac);
/* QNDP TID - the highest TID with no admission control */
@@ -297,11 +300,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
}
if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
- cmd->rx_data_timeout_uapsd =
- cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
- cmd->tx_data_timeout_uapsd =
- cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
-
if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
BIT(IEEE80211_AC_VI) |
BIT(IEEE80211_AC_BE) |
@@ -316,10 +314,31 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
}
cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
- cmd->heavy_tx_thld_packets =
- IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
- cmd->heavy_rx_thld_packets =
- IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
+
+ if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
+ cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ cmd->rx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+ } else {
+ cmd->rx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
+ }
+
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ cmd->heavy_tx_thld_packets =
+ IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
+ cmd->heavy_rx_thld_packets =
+ IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS;
+ } else {
+ cmd->heavy_tx_thld_packets =
+ IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
+ cmd->heavy_rx_thld_packets =
+ IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
+ }
cmd->heavy_tx_thld_percentage =
IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
cmd->heavy_rx_thld_percentage =
@@ -427,6 +446,32 @@ static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm,
sizeof(cmd), &cmd);
}
+static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
+{
+ struct iwl_device_power_cmd cmd = {
+ .flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
+ };
+
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
+ return 0;
+
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+ cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if ((mvm->cur_ucode == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3 :
+ mvm->disable_power_off)
+ cmd.flags &=
+ cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+#endif
+ IWL_DEBUG_POWER(mvm,
+ "Sending device power command with flags = 0x%X\n",
+ cmd.flags);
+
+ return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC, sizeof(cmd),
+ &cmd);
+}
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, char *buf,
@@ -437,10 +482,11 @@ static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
- pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
- 0 : 1);
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
+ pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
+ 0 : 1);
pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
iwlmvm_mod_params.power_scheme);
pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
@@ -606,6 +652,7 @@ int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
const struct iwl_mvm_power_ops pm_mac_ops = {
.power_update_mode = iwl_mvm_power_mac_update_mode,
+ .power_update_device_mode = iwl_mvm_power_update_device,
.power_disable = iwl_mvm_power_mac_disable,
#ifdef CONFIG_IWLWIFI_DEBUGFS
.power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read,
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index 5c6ae16ec52b..17e2bc827f9a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -110,7 +110,8 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
data->n_interfaces[id]++;
break;
case NL80211_IFTYPE_AP:
- if (mvmvif->ap_active)
+ case NL80211_IFTYPE_ADHOC:
+ if (mvmvif->ap_ibss_active)
data->n_interfaces[id]++;
break;
case NL80211_IFTYPE_MONITOR:
@@ -119,16 +120,45 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
break;
case NL80211_IFTYPE_P2P_DEVICE:
break;
- case NL80211_IFTYPE_ADHOC:
- if (vif->bss_conf.ibss_joined)
- data->n_interfaces[id]++;
- break;
default:
WARN_ON_ONCE(1);
break;
}
}
+static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
+ struct iwl_time_quota_cmd *cmd)
+{
+#ifdef CONFIG_NL80211_TESTMODE
+ struct iwl_mvm_vif *mvmvif;
+ int i, phy_id = -1, beacon_int = 0;
+
+ if (!mvm->noa_duration || !mvm->noa_vif)
+ return;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(mvm->noa_vif);
+ if (!mvmvif->ap_ibss_active)
+ return;
+
+ phy_id = mvmvif->phy_ctxt->id;
+ beacon_int = mvm->noa_vif->bss_conf.beacon_int;
+
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ u32 id_n_c = le32_to_cpu(cmd->quotas[i].id_and_color);
+ u32 id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS;
+ u32 quota = le32_to_cpu(cmd->quotas[i].quota);
+
+ if (id != phy_id)
+ continue;
+
+ quota *= (beacon_int - mvm->noa_duration);
+ quota /= beacon_int;
+
+ cmd->quotas[i].quota = cpu_to_le32(quota);
+ }
+#endif
+}
+
int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
{
struct iwl_time_quota_cmd cmd = {};
@@ -196,6 +226,8 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
/* Give the remainder of the session to the first binding */
le32_add_cpu(&cmd.quotas[0].quota, quota_rem);
+ iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
+
ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
sizeof(cmd), &cmd);
if (ret)
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 4ffaa3fa153f..a0b4cc8d9c3b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -82,13 +82,24 @@ static const u8 ant_toggle_lookup[] = {
[ANT_ABC] = ANT_ABC,
};
-#define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \
- [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
- IWL_RATE_SISO_##s##M_PLCP, \
- IWL_RATE_MIMO2_##s##M_PLCP,\
- IWL_RATE_##rp##M_INDEX, \
+#define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \
+ [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
+ IWL_RATE_HT_SISO_MCS_##s##_PLCP, \
+ IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \
+ IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \
+ IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP,\
+ IWL_RATE_##rp##M_INDEX, \
IWL_RATE_##rn##M_INDEX }
+#define IWL_DECLARE_MCS_RATE(s) \
+ [IWL_RATE_MCS_##s##_INDEX] = { IWL_RATE_INVM_PLCP, \
+ IWL_RATE_HT_SISO_MCS_##s##_PLCP, \
+ IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \
+ IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \
+ IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP, \
+ IWL_RATE_INVM_INDEX, \
+ IWL_RATE_INVM_INDEX }
+
/*
* Parameter order:
* rate, ht rate, prev rate, next rate
@@ -102,16 +113,17 @@ static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
IWL_DECLARE_RATE_INFO(2, INV, 1, 5), /* 2mbps */
IWL_DECLARE_RATE_INFO(5, INV, 2, 11), /*5.5mbps */
IWL_DECLARE_RATE_INFO(11, INV, 9, 12), /* 11mbps */
- IWL_DECLARE_RATE_INFO(6, 6, 5, 11), /* 6mbps */
- IWL_DECLARE_RATE_INFO(9, 6, 6, 11), /* 9mbps */
- IWL_DECLARE_RATE_INFO(12, 12, 11, 18), /* 12mbps */
- IWL_DECLARE_RATE_INFO(18, 18, 12, 24), /* 18mbps */
- IWL_DECLARE_RATE_INFO(24, 24, 18, 36), /* 24mbps */
- IWL_DECLARE_RATE_INFO(36, 36, 24, 48), /* 36mbps */
- IWL_DECLARE_RATE_INFO(48, 48, 36, 54), /* 48mbps */
- IWL_DECLARE_RATE_INFO(54, 54, 48, INV), /* 54mbps */
- IWL_DECLARE_RATE_INFO(60, 60, 48, INV), /* 60mbps */
- /* FIXME:RS: ^^ should be INV (legacy) */
+ IWL_DECLARE_RATE_INFO(6, 0, 5, 11), /* 6mbps ; MCS 0 */
+ IWL_DECLARE_RATE_INFO(9, INV, 6, 11), /* 9mbps */
+ IWL_DECLARE_RATE_INFO(12, 1, 11, 18), /* 12mbps ; MCS 1 */
+ IWL_DECLARE_RATE_INFO(18, 2, 12, 24), /* 18mbps ; MCS 2 */
+ IWL_DECLARE_RATE_INFO(24, 3, 18, 36), /* 24mbps ; MCS 3 */
+ IWL_DECLARE_RATE_INFO(36, 4, 24, 48), /* 36mbps ; MCS 4 */
+ IWL_DECLARE_RATE_INFO(48, 5, 36, 54), /* 48mbps ; MCS 5 */
+ IWL_DECLARE_RATE_INFO(54, 6, 48, INV), /* 54mbps ; MCS 6 */
+ IWL_DECLARE_MCS_RATE(7), /* MCS 7 */
+ IWL_DECLARE_MCS_RATE(8), /* MCS 8 */
+ IWL_DECLARE_MCS_RATE(9), /* MCS 9 */
};
static inline u8 rs_extract_rate(u32 rate_n_flags)
@@ -124,26 +136,30 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
{
int idx = 0;
- /* HT rate format */
if (rate_n_flags & RATE_MCS_HT_MSK) {
- idx = rs_extract_rate(rate_n_flags);
-
- WARN_ON_ONCE(idx >= IWL_RATE_MIMO3_6M_PLCP);
- if (idx >= IWL_RATE_MIMO2_6M_PLCP)
- idx = idx - IWL_RATE_MIMO2_6M_PLCP;
+ idx = rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK;
+ idx += IWL_RATE_MCS_0_INDEX;
- idx += IWL_FIRST_OFDM_RATE;
- /* skip 9M not supported in ht*/
+ /* skip 9M not supported in HT*/
if (idx >= IWL_RATE_9M_INDEX)
idx += 1;
- if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
+ if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE))
return idx;
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+ idx += IWL_RATE_MCS_0_INDEX;
- /* legacy rate format, search for match in table */
+ /* skip 9M not supported in VHT*/
+ if (idx >= IWL_RATE_9M_INDEX)
+ idx++;
+ if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE))
+ return idx;
} else {
+ /* legacy rate format, search for match in table */
+
+ u8 legacy_rate = rs_extract_rate(rate_n_flags);
for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
- if (iwl_rates[idx].plcp ==
- rs_extract_rate(rate_n_flags))
+ if (iwl_rates[idx].plcp == legacy_rate)
return idx;
}
@@ -155,6 +171,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta);
static void rs_fill_link_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
@@ -180,35 +197,52 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
*/
static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
- 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
+ 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0
};
-static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */
- {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */
- {0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */
- {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
+/* Expected TpT tables. 4 indexes:
+ * 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI
+ */
+static s32 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202, 216, 0},
+ {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210, 225, 0},
+ {0, 0, 0, 0, 49, 0, 97, 145, 192, 285, 375, 420, 464, 551, 0},
+ {0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0},
};
-static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
- {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
- {0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
- {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
+static s32 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257, 269, 275},
+ {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264, 275, 280},
+ {0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828, 911, 1070, 1173},
+ {0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284},
+};
+
+static s32 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 130, 0, 191, 223, 244, 273, 288, 294, 298, 305, 308},
+ {0, 0, 0, 0, 138, 0, 200, 231, 251, 279, 293, 298, 302, 308, 312},
+ {0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466},
+ {0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691},
};
static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
- {0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
- {0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
- {0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
+ {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250, 261, 0},
+ {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256, 267, 0},
+ {0, 0, 0, 0, 98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0},
+ {0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0},
};
static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
- {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
- {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
- {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
+ {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289, 296, 300},
+ {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293, 300, 303},
+ {0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053},
+ {0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221},
+};
+
+static s32 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 182, 0, 240, 264, 278, 299, 308, 311, 313, 317, 319},
+ {0, 0, 0, 0, 190, 0, 247, 269, 282, 302, 310, 313, 315, 319, 320},
+ {0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219},
+ {0, 0, 0, 0, 474, 0, 920, 1338, 1732, 2464, 3116, 3418, 3705, 4225, 4545},
};
/* mbps, mcs */
@@ -263,7 +297,7 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
if (lq_sta->dbg_fixed_rate) {
- rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
+ rs_fill_link_cmd(NULL, NULL, lq_sta, lq_sta->dbg_fixed_rate);
iwl_mvm_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
}
}
@@ -275,17 +309,6 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
{
int ret = -EAGAIN;
- /*
- * Don't create TX aggregation sessions when in high
- * BT traffic, as they would just be disrupted by BT.
- */
- if (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2) {
- IWL_DEBUG_COEX(mvm, "BT traffic (%d), no aggregation allowed\n",
- BT_MBOX_MSG(&mvm->last_bt_notif,
- 3, TRAFFIC_LOAD));
- return ret;
- }
-
IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
sta->addr, tid);
ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
@@ -416,49 +439,54 @@ static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
*/
/* FIXME:RS:remove this function and put the flags statically in the table */
static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm,
- struct iwl_scale_tbl_info *tbl,
- int index, u8 use_green)
+ struct iwl_scale_tbl_info *tbl, int index)
{
u32 rate_n_flags = 0;
+ rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
+ RATE_MCS_ANT_ABC_MSK);
+
if (is_legacy(tbl->lq_type)) {
- rate_n_flags = iwl_rates[index].plcp;
+ rate_n_flags |= iwl_rates[index].plcp;
if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
rate_n_flags |= RATE_MCS_CCK_MSK;
- } else if (is_Ht(tbl->lq_type)) {
- if (index > IWL_LAST_OFDM_RATE) {
+ return rate_n_flags;
+ }
+
+ if (is_ht(tbl->lq_type)) {
+ if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) {
IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
- index = IWL_LAST_OFDM_RATE;
+ index = IWL_LAST_HT_RATE;
}
- rate_n_flags = RATE_MCS_HT_MSK;
+ rate_n_flags |= RATE_MCS_HT_MSK;
- if (is_siso(tbl->lq_type))
- rate_n_flags |= iwl_rates[index].plcp_siso;
- else if (is_mimo2(tbl->lq_type))
- rate_n_flags |= iwl_rates[index].plcp_mimo2;
+ if (is_ht_siso(tbl->lq_type))
+ rate_n_flags |= iwl_rates[index].plcp_ht_siso;
+ else if (is_ht_mimo2(tbl->lq_type))
+ rate_n_flags |= iwl_rates[index].plcp_ht_mimo2;
else
WARN_ON_ONCE(1);
+ } else if (is_vht(tbl->lq_type)) {
+ if (index < IWL_FIRST_VHT_RATE || index > IWL_LAST_VHT_RATE) {
+ IWL_ERR(mvm, "Invalid VHT rate index %d\n", index);
+ index = IWL_LAST_VHT_RATE;
+ }
+ rate_n_flags |= RATE_MCS_VHT_MSK;
+ if (is_vht_siso(tbl->lq_type))
+ rate_n_flags |= iwl_rates[index].plcp_vht_siso;
+ else if (is_vht_mimo2(tbl->lq_type))
+ rate_n_flags |= iwl_rates[index].plcp_vht_mimo2;
+ else
+ WARN_ON_ONCE(1);
+
} else {
IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type);
}
- rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
- RATE_MCS_ANT_ABC_MSK);
-
- if (is_Ht(tbl->lq_type)) {
- if (tbl->is_ht40)
- rate_n_flags |= RATE_MCS_CHAN_WIDTH_40;
- if (tbl->is_SGI)
- rate_n_flags |= RATE_MCS_SGI_MSK;
-
- if (use_green) {
- rate_n_flags |= RATE_HT_MCS_GF_MSK;
- if (is_siso(tbl->lq_type) && tbl->is_SGI) {
- rate_n_flags &= ~RATE_MCS_SGI_MSK;
- IWL_ERR(mvm, "GF was set with SGI:SISO\n");
- }
- }
- }
+ rate_n_flags |= tbl->bw;
+ if (tbl->is_SGI)
+ rate_n_flags |= RATE_MCS_SGI_MSK;
+
return rate_n_flags;
}
@@ -473,7 +501,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
{
u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
- u8 mcs;
+ u8 nss;
memset(tbl, 0, offsetof(struct iwl_scale_tbl_info, win));
*rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
@@ -483,41 +511,62 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
return -EINVAL;
}
tbl->is_SGI = 0; /* default legacy setup */
- tbl->is_ht40 = 0;
+ tbl->bw = 0;
tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
tbl->lq_type = LQ_NONE;
tbl->max_search = IWL_MAX_SEARCH;
- /* legacy rate format */
- if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
+ /* Legacy */
+ if (!(rate_n_flags & RATE_MCS_HT_MSK) &&
+ !(rate_n_flags & RATE_MCS_VHT_MSK)) {
if (num_of_ant == 1) {
if (band == IEEE80211_BAND_5GHZ)
- tbl->lq_type = LQ_A;
+ tbl->lq_type = LQ_LEGACY_A;
else
- tbl->lq_type = LQ_G;
+ tbl->lq_type = LQ_LEGACY_G;
}
- /* HT rate format */
- } else {
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- tbl->is_SGI = 1;
-
- if (rate_n_flags & RATE_MCS_CHAN_WIDTH_40) /* TODO */
- tbl->is_ht40 = 1;
-
- mcs = rs_extract_rate(rate_n_flags);
-
- /* SISO */
- if (mcs <= IWL_RATE_SISO_60M_PLCP) {
- if (num_of_ant == 1)
- tbl->lq_type = LQ_SISO; /*else NONE*/
- /* MIMO2 */
- } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
- if (num_of_ant == 2)
- tbl->lq_type = LQ_MIMO2;
+
+ return 0;
+ }
+
+ /* HT or VHT */
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ tbl->is_SGI = 1;
+
+ tbl->bw = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
+
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ nss = ((rate_n_flags & RATE_HT_MCS_NSS_MSK) >>
+ RATE_HT_MCS_NSS_POS) + 1;
+
+ if (nss == 1) {
+ tbl->lq_type = LQ_HT_SISO;
+ WARN_ON_ONCE(num_of_ant != 1);
+ } else if (nss == 2) {
+ tbl->lq_type = LQ_HT_MIMO2;
+ WARN_ON_ONCE(num_of_ant != 2);
+ } else {
+ WARN_ON_ONCE(1);
+ }
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+
+ if (nss == 1) {
+ tbl->lq_type = LQ_VHT_SISO;
+ WARN_ON_ONCE(num_of_ant != 1);
+ } else if (nss == 2) {
+ tbl->lq_type = LQ_VHT_MIMO2;
+ WARN_ON_ONCE(num_of_ant != 2);
} else {
- WARN_ON_ONCE(num_of_ant == 3);
+ WARN_ON_ONCE(1);
}
}
+
+ WARN_ON_ONCE(tbl->bw == RATE_MCS_CHAN_WIDTH_160);
+ WARN_ON_ONCE(tbl->bw == RATE_MCS_CHAN_WIDTH_80 &&
+ !is_vht(tbl->lq_type));
+
return 0;
}
@@ -550,22 +599,6 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
}
/**
- * Green-field mode is valid if the station supports it and
- * there are no non-GF stations present in the BSS.
- */
-static bool rs_use_green(struct ieee80211_sta *sta)
-{
- /*
- * There's a bug somewhere in this code that causes the
- * scaling to get stuck because GF+SGI can't be combined
- * in SISO rates. Until we find that bug, disable GF, it
- * has only limited benefit and we still interoperate with
- * GF APs since we can always receive GF transmissions.
- */
- return false;
-}
-
-/**
* rs_get_supported_rates - get the available rates
*
* if management frame or broadcast frame only return
@@ -576,16 +609,15 @@ static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
struct ieee80211_hdr *hdr,
enum iwl_table_type rate_type)
{
- if (is_legacy(rate_type)) {
+ if (is_legacy(rate_type))
return lq_sta->active_legacy_rate;
- } else {
- if (is_siso(rate_type))
- return lq_sta->active_siso_rate;
- else {
- WARN_ON_ONCE(!is_mimo2(rate_type));
- return lq_sta->active_mimo2_rate;
- }
- }
+ else if (is_siso(rate_type))
+ return lq_sta->active_siso_rate;
+ else if (is_mimo2(rate_type))
+ return lq_sta->active_mimo2_rate;
+
+ WARN_ON_ONCE(1);
+ return 0;
}
static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
@@ -652,7 +684,6 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
u16 rate_mask;
u16 high_low;
u8 switch_to_legacy = 0;
- u8 is_green = lq_sta->is_green;
struct iwl_mvm *mvm = lq_sta->drv;
/* check if we need to switch from HT to legacy rates.
@@ -662,15 +693,15 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
switch_to_legacy = 1;
scale_index = rs_ht_to_legacy[scale_index];
if (lq_sta->band == IEEE80211_BAND_5GHZ)
- tbl->lq_type = LQ_A;
+ tbl->lq_type = LQ_LEGACY_A;
else
- tbl->lq_type = LQ_G;
+ tbl->lq_type = LQ_LEGACY_G;
if (num_of_ant(tbl->ant_type) > 1)
tbl->ant_type =
first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
- tbl->is_ht40 = 0;
+ tbl->bw = 0;
tbl->is_SGI = 0;
tbl->max_search = IWL_MAX_SEARCH;
}
@@ -701,7 +732,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
low = scale_index;
out:
- return rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
+ return rate_n_flags_from_tbl(lq_sta->drv, tbl, low);
}
/*
@@ -714,6 +745,18 @@ static bool table_type_matches(struct iwl_scale_tbl_info *a,
(a->is_SGI == b->is_SGI);
}
+static u32 rs_ch_width_from_mac_flags(enum mac80211_rate_control_flags flags)
+{
+ if (flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ return RATE_MCS_CHAN_WIDTH_40;
+ else if (flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ return RATE_MCS_CHAN_WIDTH_80;
+ else if (flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+ return RATE_MCS_CHAN_WIDTH_160;
+
+ return RATE_MCS_CHAN_WIDTH_20;
+}
+
/*
* mac80211 sends us Tx status
*/
@@ -783,16 +826,23 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
*/
if (info->band == IEEE80211_BAND_2GHZ)
mac_index += IWL_FIRST_OFDM_RATE;
+ } else if (mac_flags & IEEE80211_TX_RC_VHT_MCS) {
+ mac_index &= RATE_VHT_MCS_RATE_CODE_MSK;
+ if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
+ mac_index++;
}
+
/* Here we actually compare this rate to the latest LQ command */
if ((mac_index < 0) ||
(tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
- (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
+ (tbl_type.bw != rs_ch_width_from_mac_flags(mac_flags)) ||
(tbl_type.ant_type != info->status.antenna) ||
(!!(tx_rate & RATE_MCS_HT_MSK) !=
- !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
+ !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
+ (!!(tx_rate & RATE_MCS_VHT_MSK) !=
+ !!(mac_flags & IEEE80211_TX_RC_VHT_MCS)) ||
(!!(tx_rate & RATE_HT_MCS_GF_MSK) !=
- !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
+ !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
(rs_index != mac_index)) {
IWL_DEBUG_RATE(mvm,
"initial rate %d does not match %d (0x%x)\n",
@@ -947,7 +997,8 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
/* Check for invalid LQ type */
- if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
+ if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_ht(tbl->lq_type) &&
+ !(is_vht(tbl->lq_type)))) {
tbl->expected_tpt = expected_tpt_legacy;
return;
}
@@ -958,18 +1009,40 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
return;
}
+ ht_tbl_pointer = expected_tpt_mimo2_20MHz;
/* Choose among many HT tables depending on number of streams
- * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
+ * (SISO/MIMO2), channel width (20/40/80), SGI, and aggregation
* status */
- if (is_siso(tbl->lq_type) && !tbl->is_ht40)
- ht_tbl_pointer = expected_tpt_siso20MHz;
- else if (is_siso(tbl->lq_type))
- ht_tbl_pointer = expected_tpt_siso40MHz;
- else if (is_mimo2(tbl->lq_type) && !tbl->is_ht40)
- ht_tbl_pointer = expected_tpt_mimo2_20MHz;
- else {
- WARN_ON_ONCE(!is_mimo2(tbl->lq_type));
- ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+ if (is_siso(tbl->lq_type)) {
+ switch (tbl->bw) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ ht_tbl_pointer = expected_tpt_siso_20MHz;
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ ht_tbl_pointer = expected_tpt_siso_40MHz;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ ht_tbl_pointer = expected_tpt_siso_80MHz;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ } else if (is_mimo2(tbl->lq_type)) {
+ switch (tbl->bw) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ ht_tbl_pointer = expected_tpt_mimo2_80MHz;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ } else {
+ WARN_ON_ONCE(1);
}
if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
@@ -1084,9 +1157,47 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
return new_rate;
}
-static bool iwl_is_ht40_tx_allowed(struct ieee80211_sta *sta)
+/* Move to the next action and wrap around to the first action in case
+ * we're at the last action. Assumes actions start at 0.
+ */
+static inline void rs_move_next_action(struct iwl_scale_tbl_info *tbl,
+ u8 last_action)
+{
+ BUILD_BUG_ON(IWL_LEGACY_FIRST_ACTION != 0);
+ BUILD_BUG_ON(IWL_SISO_FIRST_ACTION != 0);
+ BUILD_BUG_ON(IWL_MIMO2_FIRST_ACTION != 0);
+
+ tbl->action = (tbl->action + 1) % (last_action + 1);
+}
+
+static void rs_set_bw_from_sta(struct iwl_scale_tbl_info *tbl,
+ struct ieee80211_sta *sta)
+{
+ if (sta->bandwidth >= IEEE80211_STA_RX_BW_80)
+ tbl->bw = RATE_MCS_CHAN_WIDTH_80;
+ else if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
+ tbl->bw = RATE_MCS_CHAN_WIDTH_40;
+ else
+ tbl->bw = RATE_MCS_CHAN_WIDTH_20;
+}
+
+static bool rs_sgi_allowed(struct iwl_scale_tbl_info *tbl,
+ struct ieee80211_sta *sta)
{
- return sta->bandwidth >= IEEE80211_STA_RX_BW_40;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+
+ if (is_ht20(tbl) && (ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_20))
+ return true;
+ if (is_ht40(tbl) && (ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_40))
+ return true;
+ if (is_ht80(tbl) && (vht_cap->cap &
+ IEEE80211_VHT_CAP_SHORT_GI_80))
+ return true;
+
+ return false;
}
/*
@@ -1099,7 +1210,6 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
{
u16 rate_mask;
s32 rate;
- s8 is_green = lq_sta->is_green;
if (!sta->ht_cap.ht_supported)
return -1;
@@ -1113,16 +1223,12 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO2\n");
- tbl->lq_type = LQ_MIMO2;
+ tbl->lq_type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
tbl->action = 0;
tbl->max_search = IWL_MAX_SEARCH;
rate_mask = lq_sta->active_mimo2_rate;
- if (iwl_is_ht40_tx_allowed(sta))
- tbl->is_ht40 = 1;
- else
- tbl->is_ht40 = 0;
-
+ rs_set_bw_from_sta(tbl, sta);
rs_set_expected_tpt_table(lq_sta, tbl);
rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
@@ -1134,10 +1240,10 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
rate, rate_mask);
return -1;
}
- tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
+ tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate);
- IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
- tbl->current_rate, is_green);
+ IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index\n",
+ tbl->current_rate);
return 0;
}
@@ -1150,7 +1256,6 @@ static int rs_switch_to_siso(struct iwl_mvm *mvm,
struct iwl_scale_tbl_info *tbl, int index)
{
u16 rate_mask;
- u8 is_green = lq_sta->is_green;
s32 rate;
if (!sta->ht_cap.ht_supported)
@@ -1158,19 +1263,12 @@ static int rs_switch_to_siso(struct iwl_mvm *mvm,
IWL_DEBUG_RATE(mvm, "LQ: try to switch to SISO\n");
- tbl->lq_type = LQ_SISO;
+ tbl->lq_type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
tbl->action = 0;
tbl->max_search = IWL_MAX_SEARCH;
rate_mask = lq_sta->active_siso_rate;
- if (iwl_is_ht40_tx_allowed(sta))
- tbl->is_ht40 = 1;
- else
- tbl->is_ht40 = 0;
-
- if (is_green)
- tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
-
+ rs_set_bw_from_sta(tbl, sta);
rs_set_expected_tpt_table(lq_sta, tbl);
rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
@@ -1181,9 +1279,9 @@ static int rs_switch_to_siso(struct iwl_mvm *mvm,
rate, rate_mask);
return -1;
}
- tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
- IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
- tbl->current_rate, is_green);
+ tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate);
+ IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index\n",
+ tbl->current_rate);
return 0;
}
@@ -1211,14 +1309,10 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
while (1) {
lq_sta->action_counter++;
switch (tbl->action) {
- case IWL_LEGACY_SWITCH_ANTENNA1:
- case IWL_LEGACY_SWITCH_ANTENNA2:
+ case IWL_LEGACY_SWITCH_ANTENNA:
IWL_DEBUG_RATE(mvm, "LQ: Legacy toggle Antenna\n");
- if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
- tx_chains_num <= 1) ||
- (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
- tx_chains_num <= 2))
+ if (tx_chains_num <= 1)
break;
/* Don't change antenna if success has been great */
@@ -1273,9 +1367,7 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
default:
WARN_ON_ONCE(1);
}
- tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
- tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+ rs_move_next_action(tbl, IWL_LEGACY_LAST_ACTION);
if (tbl->action == start_action)
break;
@@ -1285,9 +1377,7 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
out:
lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
- tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+ rs_move_next_action(tbl, IWL_LEGACY_LAST_ACTION);
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
return 0;
@@ -1300,12 +1390,10 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta,
struct ieee80211_sta *sta, int index)
{
- u8 is_green = lq_sta->is_green;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct iwl_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
@@ -1314,40 +1402,17 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
u8 update_search_tbl_counter = 0;
int ret;
- switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
- case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
- /* nothing */
- break;
- case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
- /* avoid antenna B unless MIMO */
- if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
- tbl->action = IWL_SISO_SWITCH_MIMO2;
- break;
- case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
- case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
- /* avoid antenna B and MIMO */
- valid_tx_ant =
- first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
- if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
- tbl->action = IWL_SISO_SWITCH_ANTENNA1;
- break;
- default:
- IWL_ERR(mvm, "Invalid BT load %d",
- BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
- break;
- }
+ if (tbl->action == IWL_SISO_SWITCH_MIMO2 &&
+ !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
+ tbl->action = IWL_SISO_SWITCH_ANTENNA;
start_action = tbl->action;
while (1) {
lq_sta->action_counter++;
switch (tbl->action) {
- case IWL_SISO_SWITCH_ANTENNA1:
- case IWL_SISO_SWITCH_ANTENNA2:
+ case IWL_SISO_SWITCH_ANTENNA:
IWL_DEBUG_RATE(mvm, "LQ: SISO toggle Antenna\n");
- if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
- tx_chains_num <= 1) ||
- (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
- tx_chains_num <= 2))
+ if (tx_chains_num <= 1)
break;
if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
@@ -1380,23 +1445,12 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
goto out;
break;
case IWL_SISO_SWITCH_GI:
- if (!tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_20))
- break;
- if (tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_40))
+ if (!rs_sgi_allowed(tbl, sta))
break;
IWL_DEBUG_RATE(mvm, "LQ: SISO toggle SGI/NGI\n");
memcpy(search_tbl, tbl, sz);
- if (is_green) {
- if (!tbl->is_SGI)
- break;
- else
- IWL_ERR(mvm,
- "SGI was set in GF+SISO\n");
- }
search_tbl->is_SGI = !tbl->is_SGI;
rs_set_expected_tpt_table(lq_sta, search_tbl);
if (tbl->is_SGI) {
@@ -1405,16 +1459,13 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
break;
}
search_tbl->current_rate =
- rate_n_flags_from_tbl(mvm, search_tbl,
- index, is_green);
+ rate_n_flags_from_tbl(mvm, search_tbl, index);
update_search_tbl_counter = 1;
goto out;
default:
WARN_ON_ONCE(1);
}
- tbl->action++;
- if (tbl->action > IWL_SISO_SWITCH_GI)
- tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+ rs_move_next_action(tbl, IWL_SISO_LAST_ACTION);
if (tbl->action == start_action)
break;
@@ -1424,9 +1475,7 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
out:
lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_SISO_SWITCH_GI)
- tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+ rs_move_next_action(tbl, IWL_SISO_LAST_ACTION);
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
@@ -1440,63 +1489,20 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta,
struct ieee80211_sta *sta, int index)
{
- s8 is_green = lq_sta->is_green;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct iwl_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
- u8 tx_chains_num = num_of_ant(valid_tx_ant);
u8 update_search_tbl_counter = 0;
int ret;
- switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
- case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
- /* nothing */
- break;
- case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
- case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
- /* avoid antenna B and MIMO */
- if (tbl->action != IWL_MIMO2_SWITCH_SISO_A)
- tbl->action = IWL_MIMO2_SWITCH_SISO_A;
- break;
- case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
- /* avoid antenna B unless MIMO */
- if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
- tbl->action = IWL_MIMO2_SWITCH_SISO_A;
- break;
- default:
- IWL_ERR(mvm, "Invalid BT load %d",
- BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
- break;
- }
-
start_action = tbl->action;
while (1) {
lq_sta->action_counter++;
switch (tbl->action) {
- case IWL_MIMO2_SWITCH_ANTENNA1:
- case IWL_MIMO2_SWITCH_ANTENNA2:
- IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle Antennas\n");
-
- if (tx_chains_num <= 2)
- break;
-
- if (window->success_ratio >= IWL_RS_GOOD_RATIO)
- break;
-
- memcpy(search_tbl, tbl, sz);
- if (rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate,
- search_tbl)) {
- update_search_tbl_counter = 1;
- goto out;
- }
- break;
case IWL_MIMO2_SWITCH_SISO_A:
case IWL_MIMO2_SWITCH_SISO_B:
IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n");
@@ -1521,11 +1527,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
break;
case IWL_MIMO2_SWITCH_GI:
- if (!tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_20))
- break;
- if (tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_40))
+ if (!rs_sgi_allowed(tbl, sta))
break;
IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle SGI/NGI\n");
@@ -1546,16 +1548,13 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
break;
}
search_tbl->current_rate =
- rate_n_flags_from_tbl(mvm, search_tbl,
- index, is_green);
+ rate_n_flags_from_tbl(mvm, search_tbl, index);
update_search_tbl_counter = 1;
goto out;
default:
WARN_ON_ONCE(1);
}
- tbl->action++;
- if (tbl->action > IWL_MIMO2_SWITCH_GI)
- tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+ rs_move_next_action(tbl, IWL_MIMO2_LAST_ACTION);
if (tbl->action == start_action)
break;
@@ -1564,9 +1563,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
return 0;
out:
lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_MIMO2_SWITCH_GI)
- tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+ rs_move_next_action(tbl, IWL_MIMO2_LAST_ACTION);
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
@@ -1660,15 +1657,16 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
* setup rate table in uCode
*/
static void rs_update_rate_tbl(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl,
- int index, u8 is_green)
+ int index)
{
u32 rate;
/* Update uCode's rate table. */
- rate = rate_n_flags_from_tbl(mvm, tbl, index, is_green);
- rs_fill_link_cmd(mvm, lq_sta, rate);
+ rate = rate_n_flags_from_tbl(mvm, tbl, index);
+ rs_fill_link_cmd(mvm, sta, lq_sta, rate);
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
}
@@ -1712,7 +1710,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
u8 update_lq = 0;
struct iwl_scale_tbl_info *tbl, *tbl1;
u16 rate_scale_index_msk = 0;
- u8 is_green = 0;
u8 active_tbl = 0;
u8 done_search = 0;
u16 high_low;
@@ -1754,11 +1751,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
active_tbl = 1 - lq_sta->active_tbl;
tbl = &(lq_sta->lq_info[active_tbl]);
- if (is_legacy(tbl->lq_type))
- lq_sta->is_green = 0;
- else
- lq_sta->is_green = rs_use_green(sta);
- is_green = lq_sta->is_green;
/* current tx rate */
index = lq_sta->last_txrate_idx;
@@ -1797,7 +1789,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
/* get "active" rate info */
index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
- rs_update_rate_tbl(mvm, lq_sta, tbl, index, is_green);
+ rs_update_rate_tbl(mvm, sta, lq_sta, tbl, index);
}
return;
}
@@ -1978,24 +1970,24 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
(current_tpt > (100 * tbl->expected_tpt[low]))))
scale_action = 0;
- if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
+ if ((le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) >=
IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && (is_mimo(tbl->lq_type))) {
if (lq_sta->last_bt_traffic >
- BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
+ le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)) {
/*
* don't set scale_action, don't want to scale up if
* the rate scale doesn't otherwise think that is a
* good idea.
*/
} else if (lq_sta->last_bt_traffic <=
- BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
+ le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)) {
scale_action = -1;
}
}
lq_sta->last_bt_traffic =
- BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD);
+ le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
- if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
+ if ((le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) >=
IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && is_mimo(tbl->lq_type)) {
/* search for a new modulation */
rs_stay_in_table(lq_sta, true);
@@ -2032,7 +2024,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
lq_update:
/* Replace uCode's rate table for the destination station. */
if (update_lq)
- rs_update_rate_tbl(mvm, lq_sta, tbl, index, is_green);
+ rs_update_rate_tbl(mvm, sta, lq_sta, tbl, index);
rs_stay_in_table(lq_sta, false);
@@ -2071,7 +2063,7 @@ lq_update:
IWL_DEBUG_RATE(mvm,
"Switch current mcs: %X index: %d\n",
tbl->current_rate, index);
- rs_fill_link_cmd(mvm, lq_sta, tbl->current_rate);
+ rs_fill_link_cmd(mvm, sta, lq_sta, tbl->current_rate);
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
} else {
done_search = 1;
@@ -2113,7 +2105,7 @@ lq_update:
}
out:
- tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, index, is_green);
+ tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, index);
lq_sta->last_txrate_idx = index;
}
@@ -2140,7 +2132,6 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
int rate_idx;
int i;
u32 rate;
- u8 use_green = rs_use_green(sta);
u8 active_tbl = 0;
u8 valid_tx_ant;
@@ -2172,10 +2163,10 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
rs_toggle_antenna(valid_tx_ant, &rate, tbl);
- rate = rate_n_flags_from_tbl(mvm, tbl, rate_idx, use_green);
+ rate = rate_n_flags_from_tbl(mvm, tbl, rate_idx);
tbl->current_rate = rate;
rs_set_expected_tpt_table(lq_sta, tbl);
- rs_fill_link_cmd(NULL, lq_sta, rate);
+ rs_fill_link_cmd(NULL, NULL, lq_sta, rate);
/* TODO restore station should remember the lq cmd */
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_SYNC, true);
}
@@ -2190,7 +2181,6 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_lq_sta *lq_sta = mvm_sta;
- int rate_idx;
IWL_DEBUG_RATE_LIMIT(mvm, "rate scale calculate new rate for skb\n");
@@ -2215,36 +2205,9 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
if (rate_control_send_low(sta, mvm_sta, txrc))
return;
- rate_idx = lq_sta->last_txrate_idx;
-
- if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
- rate_idx -= IWL_FIRST_OFDM_RATE;
- /* 6M and 9M shared same MCS index */
- rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
- WARN_ON_ONCE(rs_extract_rate(lq_sta->last_rate_n_flags) >=
- IWL_RATE_MIMO3_6M_PLCP);
- if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
- IWL_RATE_MIMO2_6M_PLCP)
- rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
- info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
- if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
- info->control.rates[0].flags |= IEEE80211_TX_RC_SHORT_GI;
- if (lq_sta->last_rate_n_flags & RATE_MCS_CHAN_WIDTH_40) /* TODO */
- info->control.rates[0].flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (lq_sta->last_rate_n_flags & RATE_HT_MCS_GF_MSK)
- info->control.rates[0].flags |= IEEE80211_TX_RC_GREEN_FIELD;
- } else {
- /* Check for invalid rates */
- if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
- ((sband->band == IEEE80211_BAND_5GHZ) &&
- (rate_idx < IWL_FIRST_OFDM_RATE)))
- rate_idx = rate_lowest_index(sband, sta);
- /* On valid 5 GHz rate, adjust index */
- else if (sband->band == IEEE80211_BAND_5GHZ)
- rate_idx -= IWL_FIRST_OFDM_RATE;
- info->control.rates[0].flags = 0;
- }
- info->control.rates[0].idx = rate_idx;
+ iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
+ info->band, &info->control.rates[0]);
+
info->control.rates[0].count = 1;
}
@@ -2261,6 +2224,24 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
return &sta_priv->lq_sta;
}
+static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
+ int nss)
+{
+ u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
+ (0x3 << (2 * (nss - 1)));
+ rx_mcs >>= (2 * (nss - 1));
+
+ if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_7)
+ return IWL_RATE_MCS_7_INDEX;
+ else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_8)
+ return IWL_RATE_MCS_8_INDEX;
+ else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_9)
+ return IWL_RATE_MCS_9_INDEX;
+
+ WARN_ON_ONCE(rx_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED);
+ return -1;
+}
+
/*
* Called after adding a new station to initialize rate scaling
*/
@@ -2270,6 +2251,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int i, j;
struct ieee80211_hw *hw = mvm->hw;
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
struct iwl_mvm_sta *sta_priv;
struct iwl_lq_sta *lq_sta;
struct ieee80211_supported_band *sband;
@@ -2298,7 +2280,6 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->max_rate_idx = -1;
lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
- lq_sta->is_green = rs_use_green(sta);
lq_sta->band = sband->band;
/*
* active legacy rates as per supported rates bitmap
@@ -2308,25 +2289,54 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
for_each_set_bit(i, &supp, BITS_PER_LONG)
lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
- /*
- * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
- * supp_rates[] does not; shift to convert format, force 9 MBits off.
- */
- lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
- lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
- lq_sta->active_siso_rate &= ~((u16)0x2);
- lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
+ /* TODO: should probably account for rx_highest for both HT/VHT */
+ if (!vht_cap || !vht_cap->vht_supported) {
+ /* active_siso_rate mask includes 9 MBits (bit 5),
+ * and CCK (bits 0-3), supp_rates[] does not;
+ * shift to convert format, force 9 MBits off.
+ */
+ lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+ lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+ lq_sta->active_siso_rate &= ~((u16)0x2);
+ lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
+
+ /* Same here */
+ lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+ lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+ lq_sta->active_mimo2_rate &= ~((u16)0x2);
+ lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
+
+ lq_sta->is_vht = false;
+ } else {
+ int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1);
+ if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
+ for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
+ if (i == IWL_RATE_9M_INDEX)
+ continue;
+
+ lq_sta->active_siso_rate |= BIT(i);
+ }
+ }
+
+ highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
+ if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
+ for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
+ if (i == IWL_RATE_9M_INDEX)
+ continue;
- /* Same here */
- lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
- lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
- lq_sta->active_mimo2_rate &= ~((u16)0x2);
- lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
+ lq_sta->active_mimo2_rate |= BIT(i);
+ }
+ }
+
+ /* TODO: avoid MCS9 in 20Mhz which isn't valid for 11ac */
+ lq_sta->is_vht = true;
+ }
IWL_DEBUG_RATE(mvm,
- "SISO-RATE=%X MIMO2-RATE=%X\n",
+ "SISO-RATE=%X MIMO2-RATE=%X VHT=%d\n",
lq_sta->active_siso_rate,
- lq_sta->active_mimo2_rate);
+ lq_sta->active_mimo2_rate,
+ lq_sta->is_vht);
/* These values will be overridden later */
lq_sta->lq.single_stream_ant_msk =
@@ -2358,6 +2368,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
static void rs_fill_link_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta, u32 new_rate)
{
struct iwl_scale_tbl_info tbl_type;
@@ -2429,7 +2440,6 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
&rate_idx);
-
/* Indicate to uCode which entries might be MIMO.
* If initial rate was MIMO, this will finally end up
* as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
@@ -2455,7 +2465,9 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
}
/* Don't allow HT rates after next pass.
- * rs_get_lower_rate() will change type to LQ_A or LQ_G. */
+ * rs_get_lower_rate() will change type to LQ_LEGACY_A
+ * or LQ_LEGACY_G.
+ */
use_ht_possible = 0;
/* Override next rate if needed for debug purposes */
@@ -2474,12 +2486,9 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
lq_cmd->agg_time_limit =
cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
- /*
- * overwrite if needed, pass aggregation time limit
- * to uCode in uSec - This is racy - but heh, at least it helps...
- */
- if (mvm && BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2)
- lq_cmd->agg_time_limit = cpu_to_le16(1200);
+ if (sta)
+ lq_cmd->agg_time_limit =
+ cpu_to_le16(iwl_mvm_bt_coex_agg_time_limit(mvm, sta));
}
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -2586,16 +2595,18 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
(iwl_fw_valid_tx_ant(mvm->fw) & ANT_B) ? "ANT_B," : "",
(iwl_fw_valid_tx_ant(mvm->fw) & ANT_C) ? "ANT_C" : "");
desc += sprintf(buff+desc, "lq type %s\n",
- (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
- if (is_Ht(tbl->lq_type)) {
+ (is_legacy(tbl->lq_type)) ? "legacy" :
+ is_vht(tbl->lq_type) ? "VHT" : "HT");
+ if (is_ht(tbl->lq_type)) {
desc += sprintf(buff+desc, " %s",
(is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
desc += sprintf(buff+desc, " %s",
- (tbl->is_ht40) ? "40MHz" : "20MHz");
- desc += sprintf(buff+desc, " %s %s %s\n",
+ (is_ht20(tbl)) ? "20MHz" :
+ (is_ht40(tbl)) ? "40MHz" :
+ (is_ht80(tbl)) ? "80Mhz" : "BAD BW");
+ desc += sprintf(buff+desc, " %s %s\n",
(tbl->is_SGI) ? "SGI" : "",
- (lq_sta->is_green) ? "GF enabled" : "",
- (lq_sta->is_agg) ? "AGG on" : "");
+ (lq_sta->is_agg) ? "AGG on" : "");
}
desc += sprintf(buff+desc, "last tx rate=0x%X\n",
lq_sta->last_rate_n_flags);
@@ -2653,7 +2664,7 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
int desc = 0;
int i, j;
ssize_t ret;
-
+ struct iwl_scale_tbl_info *tbl;
struct iwl_lq_sta *lq_sta = file->private_data;
buff = kmalloc(1024, GFP_KERNEL);
@@ -2661,21 +2672,23 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
return -ENOMEM;
for (i = 0; i < LQ_SIZE; i++) {
+ tbl = &(lq_sta->lq_info[i]);
desc += sprintf(buff+desc,
- "%s type=%d SGI=%d HT40=%d DUP=0 GF=%d\n"
+ "%s type=%d SGI=%d BW=%s DUP=0\n"
"rate=0x%X\n",
lq_sta->active_tbl == i ? "*" : "x",
- lq_sta->lq_info[i].lq_type,
- lq_sta->lq_info[i].is_SGI,
- lq_sta->lq_info[i].is_ht40,
- lq_sta->is_green,
- lq_sta->lq_info[i].current_rate);
+ tbl->lq_type,
+ tbl->is_SGI,
+ is_ht20(tbl) ? "20Mhz" :
+ is_ht40(tbl) ? "40Mhz" :
+ is_ht80(tbl) ? "80Mhz" : "ERR",
+ tbl->current_rate);
for (j = 0; j < IWL_RATE_COUNT; j++) {
desc += sprintf(buff+desc,
"counter=%d success=%d %%=%d\n",
- lq_sta->lq_info[i].win[j].counter,
- lq_sta->lq_info[i].win[j].success_counter,
- lq_sta->lq_info[i].win[j].success_ratio);
+ tbl->win[j].counter,
+ tbl->win[j].success_counter,
+ tbl->win[j].success_ratio);
}
}
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 335cf1682902..5d5344f7070b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -35,9 +35,11 @@
#include "iwl-trans.h"
struct iwl_rs_rate_info {
- u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
- u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
- u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
+ u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
+ u8 plcp_ht_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
+ u8 plcp_ht_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
+ u8 plcp_vht_siso;
+ u8 plcp_vht_mimo2;
u8 prev_rs; /* previous rate used in rs algo */
u8 next_rs; /* next rate used in rs algo */
};
@@ -83,35 +85,52 @@ enum {
#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
-/* uCode API values for OFDM high-throughput (HT) bit rates */
+/* uCode API values for HT/VHT bit rates */
enum {
- IWL_RATE_SISO_6M_PLCP = 0,
- IWL_RATE_SISO_12M_PLCP = 1,
- IWL_RATE_SISO_18M_PLCP = 2,
- IWL_RATE_SISO_24M_PLCP = 3,
- IWL_RATE_SISO_36M_PLCP = 4,
- IWL_RATE_SISO_48M_PLCP = 5,
- IWL_RATE_SISO_54M_PLCP = 6,
- IWL_RATE_SISO_60M_PLCP = 7,
- IWL_RATE_MIMO2_6M_PLCP = 0x8,
- IWL_RATE_MIMO2_12M_PLCP = 0x9,
- IWL_RATE_MIMO2_18M_PLCP = 0xa,
- IWL_RATE_MIMO2_24M_PLCP = 0xb,
- IWL_RATE_MIMO2_36M_PLCP = 0xc,
- IWL_RATE_MIMO2_48M_PLCP = 0xd,
- IWL_RATE_MIMO2_54M_PLCP = 0xe,
- IWL_RATE_MIMO2_60M_PLCP = 0xf,
- IWL_RATE_MIMO3_6M_PLCP = 0x10,
- IWL_RATE_MIMO3_12M_PLCP = 0x11,
- IWL_RATE_MIMO3_18M_PLCP = 0x12,
- IWL_RATE_MIMO3_24M_PLCP = 0x13,
- IWL_RATE_MIMO3_36M_PLCP = 0x14,
- IWL_RATE_MIMO3_48M_PLCP = 0x15,
- IWL_RATE_MIMO3_54M_PLCP = 0x16,
- IWL_RATE_MIMO3_60M_PLCP = 0x17,
- IWL_RATE_SISO_INVM_PLCP,
- IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
- IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
+ IWL_RATE_HT_SISO_MCS_0_PLCP = 0,
+ IWL_RATE_HT_SISO_MCS_1_PLCP = 1,
+ IWL_RATE_HT_SISO_MCS_2_PLCP = 2,
+ IWL_RATE_HT_SISO_MCS_3_PLCP = 3,
+ IWL_RATE_HT_SISO_MCS_4_PLCP = 4,
+ IWL_RATE_HT_SISO_MCS_5_PLCP = 5,
+ IWL_RATE_HT_SISO_MCS_6_PLCP = 6,
+ IWL_RATE_HT_SISO_MCS_7_PLCP = 7,
+ IWL_RATE_HT_MIMO2_MCS_0_PLCP = 0x8,
+ IWL_RATE_HT_MIMO2_MCS_1_PLCP = 0x9,
+ IWL_RATE_HT_MIMO2_MCS_2_PLCP = 0xA,
+ IWL_RATE_HT_MIMO2_MCS_3_PLCP = 0xB,
+ IWL_RATE_HT_MIMO2_MCS_4_PLCP = 0xC,
+ IWL_RATE_HT_MIMO2_MCS_5_PLCP = 0xD,
+ IWL_RATE_HT_MIMO2_MCS_6_PLCP = 0xE,
+ IWL_RATE_HT_MIMO2_MCS_7_PLCP = 0xF,
+ IWL_RATE_VHT_SISO_MCS_0_PLCP = 0,
+ IWL_RATE_VHT_SISO_MCS_1_PLCP = 1,
+ IWL_RATE_VHT_SISO_MCS_2_PLCP = 2,
+ IWL_RATE_VHT_SISO_MCS_3_PLCP = 3,
+ IWL_RATE_VHT_SISO_MCS_4_PLCP = 4,
+ IWL_RATE_VHT_SISO_MCS_5_PLCP = 5,
+ IWL_RATE_VHT_SISO_MCS_6_PLCP = 6,
+ IWL_RATE_VHT_SISO_MCS_7_PLCP = 7,
+ IWL_RATE_VHT_SISO_MCS_8_PLCP = 8,
+ IWL_RATE_VHT_SISO_MCS_9_PLCP = 9,
+ IWL_RATE_VHT_MIMO2_MCS_0_PLCP = 0x10,
+ IWL_RATE_VHT_MIMO2_MCS_1_PLCP = 0x11,
+ IWL_RATE_VHT_MIMO2_MCS_2_PLCP = 0x12,
+ IWL_RATE_VHT_MIMO2_MCS_3_PLCP = 0x13,
+ IWL_RATE_VHT_MIMO2_MCS_4_PLCP = 0x14,
+ IWL_RATE_VHT_MIMO2_MCS_5_PLCP = 0x15,
+ IWL_RATE_VHT_MIMO2_MCS_6_PLCP = 0x16,
+ IWL_RATE_VHT_MIMO2_MCS_7_PLCP = 0x17,
+ IWL_RATE_VHT_MIMO2_MCS_8_PLCP = 0x18,
+ IWL_RATE_VHT_MIMO2_MCS_9_PLCP = 0x19,
+ IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_HT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_VHT_SISO_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_VHT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_HT_SISO_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_HT_SISO_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_HT_MIMO2_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_HT_MIMO2_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
};
#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
@@ -139,25 +158,33 @@ enum {
#define IWL_RATE_DECREASE_TH 1920 /* 15% */
/* possible actions when in legacy mode */
-#define IWL_LEGACY_SWITCH_ANTENNA1 0
-#define IWL_LEGACY_SWITCH_ANTENNA2 1
-#define IWL_LEGACY_SWITCH_SISO 2
-#define IWL_LEGACY_SWITCH_MIMO2 3
+enum {
+ IWL_LEGACY_SWITCH_ANTENNA,
+ IWL_LEGACY_SWITCH_SISO,
+ IWL_LEGACY_SWITCH_MIMO2,
+ IWL_LEGACY_FIRST_ACTION = IWL_LEGACY_SWITCH_ANTENNA,
+ IWL_LEGACY_LAST_ACTION = IWL_LEGACY_SWITCH_MIMO2,
+};
/* possible actions when in siso mode */
-#define IWL_SISO_SWITCH_ANTENNA1 0
-#define IWL_SISO_SWITCH_ANTENNA2 1
-#define IWL_SISO_SWITCH_MIMO2 2
-#define IWL_SISO_SWITCH_GI 3
+enum {
+ IWL_SISO_SWITCH_ANTENNA,
+ IWL_SISO_SWITCH_MIMO2,
+ IWL_SISO_SWITCH_GI,
+ IWL_SISO_FIRST_ACTION = IWL_SISO_SWITCH_ANTENNA,
+ IWL_SISO_LAST_ACTION = IWL_SISO_SWITCH_GI,
+};
/* possible actions when in mimo mode */
-#define IWL_MIMO2_SWITCH_ANTENNA1 0
-#define IWL_MIMO2_SWITCH_ANTENNA2 1
-#define IWL_MIMO2_SWITCH_SISO_A 2
-#define IWL_MIMO2_SWITCH_SISO_B 3
-#define IWL_MIMO2_SWITCH_GI 4
+enum {
+ IWL_MIMO2_SWITCH_SISO_A,
+ IWL_MIMO2_SWITCH_SISO_B,
+ IWL_MIMO2_SWITCH_GI,
+ IWL_MIMO2_FIRST_ACTION = IWL_MIMO2_SWITCH_SISO_A,
+ IWL_MIMO2_LAST_ACTION = IWL_MIMO2_SWITCH_GI,
+};
-#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
+#define IWL_MAX_SEARCH IWL_MIMO2_LAST_ACTION
#define IWL_ACTION_LIMIT 3 /* # possible actions */
@@ -188,20 +215,31 @@ enum {
enum iwl_table_type {
LQ_NONE,
- LQ_G, /* legacy types */
- LQ_A,
- LQ_SISO, /* high-throughput types */
- LQ_MIMO2,
+ LQ_LEGACY_G, /* legacy types */
+ LQ_LEGACY_A,
+ LQ_HT_SISO, /* HT types */
+ LQ_HT_MIMO2,
+ LQ_VHT_SISO, /* VHT types */
+ LQ_VHT_MIMO2,
LQ_MAX,
};
-#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
-#define is_siso(tbl) ((tbl) == LQ_SISO)
-#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
-#define is_mimo(tbl) is_mimo2(tbl)
-#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
-#define is_a_band(tbl) ((tbl) == LQ_A)
-#define is_g_and(tbl) ((tbl) == LQ_G)
+#define is_legacy(tbl) (((tbl) == LQ_LEGACY_G) || ((tbl) == LQ_LEGACY_A))
+#define is_ht_siso(tbl) ((tbl) == LQ_HT_SISO)
+#define is_ht_mimo2(tbl) ((tbl) == LQ_HT_MIMO2)
+#define is_vht_siso(tbl) ((tbl) == LQ_VHT_SISO)
+#define is_vht_mimo2(tbl) ((tbl) == LQ_VHT_MIMO2)
+#define is_siso(tbl) (is_ht_siso(tbl) || is_vht_siso(tbl))
+#define is_mimo2(tbl) (is_ht_mimo2(tbl) || is_vht_mimo2(tbl))
+#define is_mimo(tbl) (is_mimo2(tbl))
+#define is_ht(tbl) (is_ht_siso(tbl) || is_ht_mimo2(tbl))
+#define is_vht(tbl) (is_vht_siso(tbl) || is_vht_mimo2(tbl))
+#define is_a_band(tbl) ((tbl) == LQ_LEGACY_A)
+#define is_g_band(tbl) ((tbl) == LQ_LEGACY_G)
+
+#define is_ht20(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_20)
+#define is_ht40(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_40)
+#define is_ht80(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_80)
#define IWL_MAX_MCS_DISPLAY_SIZE 12
@@ -232,7 +270,7 @@ struct iwl_scale_tbl_info {
enum iwl_table_type lq_type;
u8 ant_type;
u8 is_SGI; /* 1 = short guard interval */
- u8 is_ht40; /* 1 = 40 MHz channel width */
+ u32 bw; /* channel bandwidth; RATE_MCS_CHAN_WIDTH_XX */
u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
u8 max_search; /* maximun number of tables we can search */
s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
@@ -262,7 +300,7 @@ struct iwl_lq_sta {
u64 flush_timer; /* time staying in mode before new search */
u8 action_counter; /* # mode-switch actions tried */
- u8 is_green;
+ bool is_vht;
enum ieee80211_band band;
/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
@@ -314,9 +352,8 @@ static inline u8 num_of_ant(u8 mask)
}
/* Initialize station's rate scaling information after adding station */
-extern void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta,
- enum ieee80211_band band);
+void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ enum ieee80211_band band);
/**
* iwl_rate_control_register - Register the rate control algorithm callbacks
@@ -328,7 +365,7 @@ extern void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm,
* ieee80211_register_hw
*
*/
-extern int iwl_mvm_rate_control_register(void);
+int iwl_mvm_rate_control_register(void);
/**
* iwl_rate_control_unregister - Unregister the rate control callbacks
@@ -336,7 +373,7 @@ extern int iwl_mvm_rate_control_register(void);
* This should be called after calling ieee80211_unregister_hw, but before
* the driver is unloaded.
*/
-extern void iwl_mvm_rate_control_unregister(void);
+void iwl_mvm_rate_control_unregister(void);
struct iwl_mvm_sta;
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 2a8cb5a60535..a4af5019a496 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -422,6 +422,27 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
mvmvif->bf_data.ave_beacon_signal = sig;
+ /* BT Coex */
+ if (mvmvif->bf_data.bt_coex_min_thold !=
+ mvmvif->bf_data.bt_coex_max_thold) {
+ last_event = mvmvif->bf_data.last_bt_coex_event;
+ if (sig > mvmvif->bf_data.bt_coex_max_thold &&
+ (last_event <= mvmvif->bf_data.bt_coex_min_thold ||
+ last_event == 0)) {
+ mvmvif->bf_data.last_bt_coex_event = sig;
+ IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n",
+ sig);
+ iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH);
+ } else if (sig < mvmvif->bf_data.bt_coex_min_thold &&
+ (last_event >= mvmvif->bf_data.bt_coex_max_thold ||
+ last_event == 0)) {
+ mvmvif->bf_data.last_bt_coex_event = sig;
+ IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n",
+ sig);
+ iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW);
+ }
+ }
+
if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
return;
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 9a7ab8495300..dff7592e1ff8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -74,8 +74,12 @@
static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
{
u16 rx_chain;
- u8 rx_ant = iwl_fw_valid_rx_ant(mvm->fw);
+ u8 rx_ant;
+ if (mvm->scan_rx_ant != ANT_NONE)
+ rx_ant = mvm->scan_rx_ant;
+ else
+ rx_ant = iwl_fw_valid_rx_ant(mvm->fw);
rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
@@ -93,10 +97,10 @@ static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif)
static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif)
{
- if (vif->bss_conf.assoc)
- return cpu_to_le32(vif->bss_conf.beacon_int);
- else
+ if (!vif->bss_conf.assoc)
return 0;
+
+ return cpu_to_le32(ieee80211_tu_to_usec(vif->bss_conf.beacon_int));
}
static inline __le32
@@ -133,11 +137,12 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
* request.
*/
static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
- struct cfg80211_scan_request *req)
+ struct cfg80211_scan_request *req,
+ int first)
{
int fw_idx, req_idx;
- for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx > 0;
+ for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx >= first;
req_idx--, fw_idx++) {
cmd->direct_scan[fw_idx].id = WLAN_EID_SSID;
cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len;
@@ -153,9 +158,9 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
* just to notify that this scan is active and not passive.
* In order to notify the FW of the number of SSIDs we wish to scan (including
* the zero-length one), we need to set the corresponding bits in chan->type,
- * one for each SSID, and set the active bit (first). The first SSID is already
- * included in the probe template, so we need to set only req->n_ssids - 1 bits
- * in addition to the first bit.
+ * one for each SSID, and set the active bit (first). If the first SSID is
+ * already included in the probe template, so we need to set only
+ * req->n_ssids - 1 bits in addition to the first bit.
*/
static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
{
@@ -170,7 +175,8 @@ static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band)
}
static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
- struct cfg80211_scan_request *req)
+ struct cfg80211_scan_request *req,
+ bool basic_ssid)
{
u16 passive_dwell = iwl_mvm_get_passive_dwell(req->channels[0]->band);
u16 active_dwell = iwl_mvm_get_active_dwell(req->channels[0]->band,
@@ -178,10 +184,14 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
(cmd->data + le16_to_cpu(cmd->tx_cmd.len));
int i;
+ int type = BIT(req->n_ssids) - 1;
+
+ if (!basic_ssid)
+ type |= BIT(req->n_ssids);
for (i = 0; i < cmd->channel_count; i++) {
chan->channel = cpu_to_le16(req->channels[i]->hw_value);
- chan->type = cpu_to_le32(BIT(req->n_ssids) - 1);
+ chan->type = cpu_to_le32(type);
if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN)
chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
chan->active_dwell = cpu_to_le16(active_dwell);
@@ -268,6 +278,8 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
u32 status;
int ssid_len = 0;
u8 *ssid = NULL;
+ bool basic_ssid = !(mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
lockdep_assert_held(&mvm->mutex);
BUG_ON(mvm->scan_cmd == NULL);
@@ -302,14 +314,16 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
if (req->n_ssids > 0) {
cmd->passive2active = cpu_to_le16(1);
cmd->scan_flags |= SCAN_FLAGS_PASSIVE2ACTIVE;
- ssid = req->ssids[0].ssid;
- ssid_len = req->ssids[0].ssid_len;
+ if (basic_ssid) {
+ ssid = req->ssids[0].ssid;
+ ssid_len = req->ssids[0].ssid_len;
+ }
} else {
cmd->passive2active = 0;
cmd->scan_flags &= ~SCAN_FLAGS_PASSIVE2ACTIVE;
}
- iwl_mvm_scan_fill_ssids(cmd, req);
+ iwl_mvm_scan_fill_ssids(cmd, req, basic_ssid ? 1 : 0);
cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
@@ -326,7 +340,7 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
req->ie, req->ie_len,
mvm->fw->ucode_capa.max_probe_length));
- iwl_mvm_scan_fill_channels(cmd, req);
+ iwl_mvm_scan_fill_channels(cmd, req, basic_ssid);
cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) +
le16_to_cpu(cmd->tx_cmd.len) +
@@ -377,6 +391,21 @@ int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
return 0;
}
+int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_sched_scan_results *notif = (void *)pkt->data;
+
+ if (notif->client_bitmap & SCAN_CLIENT_SCHED_SCAN) {
+ IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
+ ieee80211_sched_scan_results(mvm->hw);
+ }
+
+ return 0;
+}
+
static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
@@ -394,6 +423,11 @@ static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
return false;
}
+ /*
+ * If scan cannot be aborted, it means that we had a
+ * SCAN_COMPLETE_NOTIFICATION in the pipe and it called
+ * ieee80211_scan_completed already.
+ */
IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n",
*resp);
return true;
@@ -417,14 +451,19 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
SCAN_COMPLETE_NOTIFICATION };
int ret;
+ if (mvm->scan_status == IWL_MVM_SCAN_NONE)
+ return;
+
iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
scan_abort_notif,
ARRAY_SIZE(scan_abort_notif),
iwl_mvm_scan_abort_notif, NULL);
- ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL);
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD,
+ CMD_SYNC | CMD_SEND_IN_RFKILL, 0, NULL);
if (ret) {
IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
+ /* mac80211's state will be cleaned in the fw_restart flow */
goto out_remove_notif;
}
@@ -437,3 +476,406 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
out_remove_notif:
iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort);
}
+
+int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_scan_offload_complete *scan_notif = (void *)pkt->data;
+
+ IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
+ scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
+ "completed" : "aborted");
+
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
+ ieee80211_sched_scan_stopped(mvm->hw);
+
+ return 0;
+}
+
+static void iwl_scan_offload_build_tx_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sched_scan_ies *ies,
+ enum ieee80211_band band,
+ struct iwl_tx_cmd *cmd,
+ u8 *data)
+{
+ u16 cmd_len;
+
+ cmd->tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
+ cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+ cmd->sta_id = mvm->aux_sta.sta_id;
+
+ cmd->rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, band, false);
+
+ cmd_len = iwl_mvm_fill_probe_req((struct ieee80211_mgmt *)data,
+ vif->addr,
+ 1, NULL, 0,
+ ies->ie[band], ies->len[band],
+ SCAN_OFFLOAD_PROBE_REQ_SIZE);
+ cmd->len = cpu_to_le16(cmd_len);
+}
+
+static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct iwl_scan_offload_cmd *scan)
+{
+ scan->channel_count =
+ mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
+ mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
+ scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
+ scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
+ scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
+ scan->rx_chain = iwl_mvm_scan_rx_chain(mvm);
+ scan->max_out_time = cpu_to_le32(200 * 1024);
+ scan->suspend_time = iwl_mvm_scan_suspend_time(vif);
+ scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
+ MAC_FILTER_IN_BEACON);
+ scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND);
+ scan->rep_count = cpu_to_le32(1);
+}
+
+static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
+{
+ int i;
+
+ for (i = 0; i < PROBE_OPTION_MAX; i++) {
+ if (!ssid_list[i].len)
+ break;
+ if (ssid_list[i].len == ssid_len &&
+ !memcmp(ssid_list->ssid, ssid, ssid_len))
+ return i;
+ }
+ return -1;
+}
+
+static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
+ struct iwl_scan_offload_cmd *scan,
+ u32 *ssid_bitmap)
+{
+ int i, j;
+ int index;
+
+ /*
+ * copy SSIDs from match list.
+ * iwl_config_sched_scan_profiles() uses the order of these ssids to
+ * config match list.
+ */
+ for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) {
+ scan->direct_scan[i].id = WLAN_EID_SSID;
+ scan->direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
+ memcpy(scan->direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
+ scan->direct_scan[i].len);
+ }
+
+ /* add SSIDs from scan SSID list */
+ *ssid_bitmap = 0;
+ for (j = 0; j < req->n_ssids && i < PROBE_OPTION_MAX; j++) {
+ index = iwl_ssid_exist(req->ssids[j].ssid,
+ req->ssids[j].ssid_len,
+ scan->direct_scan);
+ if (index < 0) {
+ if (!req->ssids[j].ssid_len)
+ continue;
+ scan->direct_scan[i].id = WLAN_EID_SSID;
+ scan->direct_scan[i].len = req->ssids[j].ssid_len;
+ memcpy(scan->direct_scan[i].ssid, req->ssids[j].ssid,
+ scan->direct_scan[i].len);
+ *ssid_bitmap |= BIT(i + 1);
+ i++;
+ } else {
+ *ssid_bitmap |= BIT(index + 1);
+ }
+ }
+}
+
+static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req,
+ struct iwl_scan_channel_cfg *channels,
+ enum ieee80211_band band,
+ int *head, int *tail,
+ u32 ssid_bitmap)
+{
+ struct ieee80211_supported_band *s_band;
+ int n_probes = req->n_ssids;
+ int n_channels = req->n_channels;
+ u8 active_dwell, passive_dwell;
+ int i, j, index = 0;
+ bool partial;
+
+ /*
+ * We have to configure all supported channels, even if we don't want to
+ * scan on them, but we have to send channels in the order that we want
+ * to scan. So add requested channels to head of the list and others to
+ * the end.
+ */
+ active_dwell = iwl_mvm_get_active_dwell(band, n_probes);
+ passive_dwell = iwl_mvm_get_passive_dwell(band);
+ s_band = &mvm->nvm_data->bands[band];
+
+ for (i = 0; i < s_band->n_channels && *head <= *tail; i++) {
+ partial = false;
+ for (j = 0; j < n_channels; j++)
+ if (s_band->channels[i].center_freq ==
+ req->channels[j]->center_freq) {
+ index = *head;
+ (*head)++;
+ /*
+ * Channels that came with the request will be
+ * in partial scan .
+ */
+ partial = true;
+ break;
+ }
+ if (!partial) {
+ index = *tail;
+ (*tail)--;
+ }
+ channels->channel_number[index] =
+ cpu_to_le16(ieee80211_frequency_to_channel(
+ s_band->channels[i].center_freq));
+ channels->dwell_time[index][0] = active_dwell;
+ channels->dwell_time[index][1] = passive_dwell;
+
+ channels->iter_count[index] = cpu_to_le16(1);
+ channels->iter_interval[index] = 0;
+
+ if (!(s_band->channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ channels->type[index] |=
+ cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
+
+ channels->type[index] |=
+ cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL);
+ if (partial)
+ channels->type[index] |=
+ cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
+
+ if (s_band->channels[i].flags & IEEE80211_CHAN_NO_HT40)
+ channels->type[index] |=
+ cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
+
+ /* scan for all SSIDs from req->ssids */
+ channels->type[index] |= cpu_to_le32(ssid_bitmap);
+ }
+}
+
+int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies)
+{
+ int supported_bands = 0;
+ int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
+ int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
+ int head = 0;
+ int tail = band_2ghz + band_5ghz;
+ u32 ssid_bitmap;
+ int cmd_len;
+ int ret;
+
+ struct iwl_scan_offload_cfg *scan_cfg;
+ struct iwl_host_cmd cmd = {
+ .id = SCAN_OFFLOAD_CONFIG_CMD,
+ .flags = CMD_SYNC,
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (band_2ghz)
+ supported_bands++;
+ if (band_5ghz)
+ supported_bands++;
+
+ cmd_len = sizeof(struct iwl_scan_offload_cfg) +
+ supported_bands * SCAN_OFFLOAD_PROBE_REQ_SIZE;
+
+ scan_cfg = kzalloc(cmd_len, GFP_KERNEL);
+ if (!scan_cfg)
+ return -ENOMEM;
+
+ iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd);
+ scan_cfg->scan_cmd.len = cpu_to_le16(cmd_len);
+
+ iwl_scan_offload_build_ssid(req, &scan_cfg->scan_cmd, &ssid_bitmap);
+ /* build tx frames for supported bands */
+ if (band_2ghz) {
+ iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
+ IEEE80211_BAND_2GHZ,
+ &scan_cfg->scan_cmd.tx_cmd[0],
+ scan_cfg->data);
+ iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
+ IEEE80211_BAND_2GHZ, &head, &tail,
+ ssid_bitmap);
+ }
+ if (band_5ghz) {
+ iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
+ IEEE80211_BAND_5GHZ,
+ &scan_cfg->scan_cmd.tx_cmd[1],
+ scan_cfg->data +
+ SCAN_OFFLOAD_PROBE_REQ_SIZE);
+ iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
+ IEEE80211_BAND_5GHZ, &head, &tail,
+ ssid_bitmap);
+ }
+
+ cmd.data[0] = scan_cfg;
+ cmd.len[0] = cmd_len;
+ cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+
+ IWL_DEBUG_SCAN(mvm, "Sending scheduled scan config\n");
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ kfree(scan_cfg);
+ return ret;
+}
+
+int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req)
+{
+ struct iwl_scan_offload_profile *profile;
+ struct iwl_scan_offload_profile_cfg *profile_cfg;
+ struct iwl_scan_offload_blacklist *blacklist;
+ struct iwl_host_cmd cmd = {
+ .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
+ .flags = CMD_SYNC,
+ .len[1] = sizeof(*profile_cfg),
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ .dataflags[1] = IWL_HCMD_DFL_NOCOPY,
+ };
+ int blacklist_len;
+ int i;
+ int ret;
+
+ if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
+ return -EIO;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
+ blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
+ else
+ blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
+
+ blacklist = kzalloc(sizeof(*blacklist) * blacklist_len, GFP_KERNEL);
+ if (!blacklist)
+ return -ENOMEM;
+
+ profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL);
+ if (!profile_cfg) {
+ ret = -ENOMEM;
+ goto free_blacklist;
+ }
+
+ cmd.data[0] = blacklist;
+ cmd.len[0] = sizeof(*blacklist) * blacklist_len;
+ cmd.data[1] = profile_cfg;
+
+ /* No blacklist configuration */
+
+ profile_cfg->num_profiles = req->n_match_sets;
+ profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
+ profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
+ profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
+
+ for (i = 0; i < req->n_match_sets; i++) {
+ profile = &profile_cfg->profiles[i];
+ profile->ssid_index = i;
+ /* Support any cipher and auth algorithm */
+ profile->unicast_cipher = 0xff;
+ profile->auth_alg = 0xff;
+ profile->network_type = IWL_NETWORK_TYPE_ANY;
+ profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
+ profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
+ }
+
+ IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ kfree(profile_cfg);
+free_blacklist:
+ kfree(blacklist);
+
+ return ret;
+}
+
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req)
+{
+ struct iwl_scan_offload_req scan_req = {
+ .watchdog = IWL_SCHED_SCAN_WATCHDOG,
+
+ .schedule_line[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS,
+ .schedule_line[0].delay = req->interval / 1000,
+ .schedule_line[0].full_scan_mul = 1,
+
+ .schedule_line[1].iterations = 0xff,
+ .schedule_line[1].delay = req->interval / 1000,
+ .schedule_line[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER,
+ };
+
+ if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
+ IWL_DEBUG_SCAN(mvm,
+ "Sending scheduled scan with filtering, filter len %d\n",
+ req->n_match_sets);
+ scan_req.flags |=
+ cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID);
+ } else {
+ IWL_DEBUG_SCAN(mvm,
+ "Sending Scheduled scan without filtering\n");
+ }
+
+ return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, CMD_SYNC,
+ sizeof(scan_req), &scan_req);
+}
+
+static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
+{
+ int ret;
+ struct iwl_host_cmd cmd = {
+ .id = SCAN_OFFLOAD_ABORT_CMD,
+ .flags = CMD_SYNC,
+ };
+ u32 status;
+
+ /* Exit instantly with error when device is not ready
+ * to receive scan abort command or it does not perform
+ * scheduled scan currently */
+ if (mvm->scan_status != IWL_MVM_SCAN_SCHED)
+ return -EIO;
+
+ ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
+ if (ret)
+ return ret;
+
+ if (status != CAN_ABORT_STATUS) {
+ /*
+ * The scan abort will return 1 for success or
+ * 2 for "failure". A failure condition can be
+ * due to simply not being in an active scan which
+ * can occur if we send the scan abort before the
+ * microcode has notified us that a scan is completed.
+ */
+ IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->scan_status != IWL_MVM_SCAN_SCHED) {
+ IWL_DEBUG_SCAN(mvm, "No offloaded scan to stop\n");
+ return;
+ }
+
+ ret = iwl_mvm_send_sched_scan_abort(mvm);
+ if (ret)
+ IWL_DEBUG_SCAN(mvm, "Send stop offload scan failed %d\n", ret);
+ else
+ IWL_DEBUG_SCAN(mvm, "Successfully sent stop offload scan\n");
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 44add291531b..329952363a54 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -66,6 +66,115 @@
#include "sta.h"
#include "rs.h"
+static void iwl_mvm_add_sta_cmd_v6_to_v5(struct iwl_mvm_add_sta_cmd_v6 *cmd_v6,
+ struct iwl_mvm_add_sta_cmd_v5 *cmd_v5)
+{
+ memset(cmd_v5, 0, sizeof(*cmd_v5));
+
+ cmd_v5->add_modify = cmd_v6->add_modify;
+ cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
+ cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
+ memcpy(cmd_v5->addr, cmd_v6->addr, ETH_ALEN);
+ cmd_v5->sta_id = cmd_v6->sta_id;
+ cmd_v5->modify_mask = cmd_v6->modify_mask;
+ cmd_v5->station_flags = cmd_v6->station_flags;
+ cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
+ cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
+ cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
+ cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
+ cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
+ cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
+ cmd_v5->assoc_id = cmd_v6->assoc_id;
+ cmd_v5->beamform_flags = cmd_v6->beamform_flags;
+ cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
+}
+
+static void
+iwl_mvm_add_sta_key_to_add_sta_cmd_v5(struct iwl_mvm_add_sta_key_cmd *key_cmd,
+ struct iwl_mvm_add_sta_cmd_v5 *sta_cmd,
+ u32 mac_id_n_color)
+{
+ memset(sta_cmd, 0, sizeof(*sta_cmd));
+
+ sta_cmd->sta_id = key_cmd->sta_id;
+ sta_cmd->add_modify = STA_MODE_MODIFY;
+ sta_cmd->modify_mask = STA_MODIFY_KEY;
+ sta_cmd->mac_id_n_color = cpu_to_le32(mac_id_n_color);
+
+ sta_cmd->key.key_offset = key_cmd->key_offset;
+ sta_cmd->key.key_flags = key_cmd->key_flags;
+ memcpy(sta_cmd->key.key, key_cmd->key, sizeof(sta_cmd->key.key));
+ sta_cmd->key.tkip_rx_tsc_byte2 = key_cmd->tkip_rx_tsc_byte2;
+ memcpy(sta_cmd->key.tkip_rx_ttak, key_cmd->tkip_rx_ttak,
+ sizeof(sta_cmd->key.tkip_rx_ttak));
+}
+
+static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
+ struct iwl_mvm_add_sta_cmd_v6 *cmd,
+ int *status)
+{
+ struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
+ return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd),
+ cmd, status);
+
+ iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
+
+ return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5),
+ &cmd_v5, status);
+}
+
+static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
+ struct iwl_mvm_add_sta_cmd_v6 *cmd)
+{
+ struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
+ return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags,
+ sizeof(*cmd), cmd);
+
+ iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
+
+ return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5),
+ &cmd_v5);
+}
+
+static int
+iwl_mvm_send_add_sta_key_cmd_status(struct iwl_mvm *mvm,
+ struct iwl_mvm_add_sta_key_cmd *cmd,
+ u32 mac_id_n_color,
+ int *status)
+{
+ struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
+ return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY,
+ sizeof(*cmd), cmd, status);
+
+ iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
+
+ return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(sta_cmd),
+ &sta_cmd, status);
+}
+
+static int iwl_mvm_send_add_sta_key_cmd(struct iwl_mvm *mvm,
+ u32 flags,
+ struct iwl_mvm_add_sta_key_cmd *cmd,
+ u32 mac_id_n_color)
+{
+ struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
+ return iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, flags,
+ sizeof(*cmd), cmd);
+
+ iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
+
+ return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(sta_cmd),
+ &sta_cmd);
+}
+
static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm)
{
int sta_id;
@@ -87,7 +196,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
bool update)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd add_sta_cmd;
+ struct iwl_mvm_add_sta_cmd_v6 add_sta_cmd;
int ret;
u32 status;
u32 agg_size = 0, mpdu_dens = 0;
@@ -175,8 +284,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
- &add_sta_cmd, &status);
+ ret = iwl_mvm_send_add_sta_cmd_status(mvm, &add_sta_cmd, &status);
if (ret)
return ret;
@@ -229,8 +337,12 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
- /* for HW restart - need to reset the seq_number etc... */
- memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data));
+ /* for HW restart - reset everything but the sequence number */
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ u16 seq = mvm_sta->tid_data[i].seq_number;
+ memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
+ mvm_sta->tid_data[i].seq_number = seq;
+ }
ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
if (ret)
@@ -256,7 +368,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain)
{
- struct iwl_mvm_add_sta_cmd cmd = {};
+ struct iwl_mvm_add_sta_cmd_v6 cmd = {};
int ret;
u32 status;
@@ -269,8 +381,7 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
if (ret)
return ret;
@@ -469,13 +580,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
const u8 *addr,
u16 mac_id, u16 color)
{
- struct iwl_mvm_add_sta_cmd cmd;
+ struct iwl_mvm_add_sta_cmd_v6 cmd;
int ret;
u32 status;
lockdep_assert_held(&mvm->mutex);
- memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd));
+ memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v6));
cmd.sta_id = sta->sta_id;
cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
color));
@@ -485,8 +596,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
if (addr)
memcpy(cmd.addr, addr, ETH_ALEN);
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
if (ret)
return ret;
@@ -534,10 +644,14 @@ int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_int_sta *bsta)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- static const u8 baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ static const u8 *baddr = _baddr;
lockdep_assert_held(&mvm->mutex);
+ if (vif->type == NL80211_IFTYPE_ADHOC)
+ baddr = vif->bss_conf.bssid;
+
if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
return -ENOSPC;
@@ -614,7 +728,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd cmd = {};
+ struct iwl_mvm_add_sta_cmd_v6 cmd = {};
int ret;
u32 status;
@@ -638,8 +752,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
STA_MODIFY_REMOVE_BA_TID;
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
if (ret)
return ret;
@@ -674,7 +787,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u8 queue, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd cmd = {};
+ struct iwl_mvm_add_sta_cmd_v6 cmd = {};
int ret;
u32 status;
@@ -696,8 +809,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
if (ret)
return ret;
@@ -743,13 +855,13 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_assert_held(&mvm->mutex);
- for (txq_id = IWL_MVM_FIRST_AGG_QUEUE;
- txq_id <= IWL_MVM_LAST_AGG_QUEUE; txq_id++)
+ for (txq_id = mvm->first_agg_queue;
+ txq_id <= mvm->last_agg_queue; txq_id++)
if (mvm->queue_to_mac80211[txq_id] ==
IWL_INVALID_MAC80211_QUEUE)
break;
- if (txq_id > IWL_MVM_LAST_AGG_QUEUE) {
+ if (txq_id > mvm->last_agg_queue) {
IWL_ERR(mvm, "Failed to allocate agg queue\n");
return -EIO;
}
@@ -987,10 +1099,11 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
u32 cmd_flags)
{
__le16 key_flags;
- struct iwl_mvm_add_sta_cmd cmd = {};
+ struct iwl_mvm_add_sta_key_cmd cmd = {};
int ret, status;
u16 keyidx;
int i;
+ u32 mac_id_n_color = mvm_sta->mac_id_n_color;
keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
STA_KEY_FLG_KEYID_MSK;
@@ -1000,14 +1113,14 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
- cmd.key.tkip_rx_tsc_byte2 = tkip_iv32;
+ cmd.tkip_rx_tsc_byte2 = tkip_iv32;
for (i = 0; i < 5; i++)
- cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
- memcpy(cmd.key.key, keyconf->key, keyconf->keylen);
+ cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
+ memcpy(cmd.key, keyconf->key, keyconf->keylen);
break;
case WLAN_CIPHER_SUITE_CCMP:
key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
- memcpy(cmd.key.key, keyconf->key, keyconf->keylen);
+ memcpy(cmd.key, keyconf->key, keyconf->keylen);
break;
default:
WARN_ON(1);
@@ -1017,20 +1130,18 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
- cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
- cmd.key.key_offset = keyconf->hw_key_idx;
- cmd.key.key_flags = key_flags;
- cmd.add_modify = STA_MODE_MODIFY;
- cmd.modify_mask = STA_MODIFY_KEY;
+ cmd.key_offset = keyconf->hw_key_idx;
+ cmd.key_flags = key_flags;
cmd.sta_id = sta_id;
status = ADD_STA_SUCCESS;
if (cmd_flags == CMD_SYNC)
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
+ mac_id_n_color,
+ &status);
else
- ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
- sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_add_sta_key_cmd(mvm, CMD_ASYNC, &cmd,
+ mac_id_n_color);
switch (status) {
case ADD_STA_SUCCESS:
@@ -1197,7 +1308,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
struct ieee80211_key_conf *keyconf)
{
struct iwl_mvm_sta *mvm_sta;
- struct iwl_mvm_add_sta_cmd cmd = {};
+ struct iwl_mvm_add_sta_key_cmd cmd = {};
__le16 key_flags;
int ret, status;
u8 sta_id;
@@ -1252,17 +1363,14 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
- cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
- cmd.key.key_flags = key_flags;
- cmd.key.key_offset = keyconf->hw_key_idx;
+ cmd.key_flags = key_flags;
+ cmd.key_offset = keyconf->hw_key_idx;
cmd.sta_id = sta_id;
- cmd.modify_mask = STA_MODIFY_KEY;
- cmd.add_modify = STA_MODE_MODIFY;
-
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
+ mvm_sta->mac_id_n_color,
+ &status);
switch (status) {
case ADD_STA_SUCCESS:
@@ -1309,7 +1417,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd cmd = {
+ struct iwl_mvm_add_sta_cmd_v6 cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.station_flags_msk = cpu_to_le32(STA_FLG_PS),
@@ -1317,7 +1425,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
};
int ret;
- ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
@@ -1331,7 +1439,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
(reason == IEEE80211_FRAME_RELEASE_UAPSD) ?
STA_SLEEP_STATE_UAPSD : STA_SLEEP_STATE_PS_POLL;
struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd cmd = {
+ struct iwl_mvm_add_sta_cmd_v6 cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
@@ -1346,7 +1454,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
int ret;
/* TODO: somehow the fw doesn't seem to take PS_POLL into account */
- ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 94b265eb32b8..4dfc359a4bdd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -293,10 +293,6 @@ struct iwl_mvm_sta {
struct iwl_lq_sta lq_sta;
struct ieee80211_vif *vif;
-#ifdef CONFIG_PM_SLEEP
- u16 last_seq_ctl;
-#endif
-
/* Temporary, until the new TLC will control the Tx protection */
s8 tx_protection;
bool tt_tx_protection;
diff --git a/drivers/net/wireless/iwlwifi/mvm/testmode.h b/drivers/net/wireless/iwlwifi/mvm/testmode.h
new file mode 100644
index 000000000000..eb74391d91ca
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/testmode.h
@@ -0,0 +1,95 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_MVM_TESTMODE_H__
+#define __IWL_MVM_TESTMODE_H__
+
+/**
+ * enum iwl_mvm_testmode_attrs - testmode attributes inside NL80211_ATTR_TESTDATA
+ * @IWL_MVM_TM_ATTR_UNSPEC: (invalid attribute)
+ * @IWL_MVM_TM_ATTR_CMD: sub command, see &enum iwl_mvm_testmode_commands (u32)
+ * @IWL_MVM_TM_ATTR_NOA_DURATION: requested NoA duration (u32)
+ * @IWL_MVM_TM_ATTR_BEACON_FILTER_STATE: beacon filter state (0 or 1, u32)
+ */
+enum iwl_mvm_testmode_attrs {
+ IWL_MVM_TM_ATTR_UNSPEC,
+ IWL_MVM_TM_ATTR_CMD,
+ IWL_MVM_TM_ATTR_NOA_DURATION,
+ IWL_MVM_TM_ATTR_BEACON_FILTER_STATE,
+
+ /* keep last */
+ NUM_IWL_MVM_TM_ATTRS,
+ IWL_MVM_TM_ATTR_MAX = NUM_IWL_MVM_TM_ATTRS - 1,
+};
+
+/**
+ * enum iwl_mvm_testmode_commands - MVM testmode commands
+ * @IWL_MVM_TM_CMD_SET_NOA: set NoA on GO vif for testing
+ * @IWL_MVM_TM_CMD_SET_BEACON_FILTER: turn beacon filtering off/on
+ */
+enum iwl_mvm_testmode_commands {
+ IWL_MVM_TM_CMD_SET_NOA,
+ IWL_MVM_TM_CMD_SET_BEACON_FILTER,
+};
+
+#endif /* __IWL_MVM_TESTMODE_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 76a3c177e100..33cf56fdfc41 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -387,7 +387,8 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
void iwl_mvm_protect_session(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- u32 duration, u32 min_duration)
+ u32 duration, u32 min_duration,
+ u32 max_delay)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
@@ -426,7 +427,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
time_cmd.max_frags = TE_V2_FRAG_NONE;
- time_cmd.max_delay = cpu_to_le32(500);
+ time_cmd.max_delay = cpu_to_le32(max_delay);
/* TODO: why do we need to interval = bi if it is not periodic? */
time_cmd.interval = cpu_to_le32(1);
time_cmd.duration = cpu_to_le32(duration);
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h
index f86c51065ed3..d9c8d6cfa2db 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h
@@ -123,6 +123,7 @@
* @duration: the duration of the session in TU.
* @min_duration: will start a new session if the current session will end
* in less than min_duration.
+ * @max_delay: maximum delay before starting the time event (in TU)
*
* This function can be used to start a session protection which means that the
* fw will stay on the channel for %duration_ms milliseconds. This function
@@ -133,7 +134,8 @@
*/
void iwl_mvm_protect_session(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- u32 duration, u32 min_duration);
+ u32 duration, u32 min_duration,
+ u32 max_delay);
/**
* iwl_mvm_stop_session_protection - cancel the session protection.
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index e05440d90319..43d97c33a75a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -417,7 +417,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
spin_unlock(&mvmsta->lock);
- if (txq_id < IWL_MVM_FIRST_AGG_QUEUE)
+ if (txq_id < mvm->first_agg_queue)
atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
return 0;
@@ -511,16 +511,10 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status)
}
#endif /* CONFIG_IWLWIFI_DEBUG */
-/**
- * translate ucode response to mac80211 tx status control values
- */
-static void iwl_mvm_hwrate_to_tx_control(u32 rate_n_flags,
- struct ieee80211_tx_info *info)
+void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
+ enum ieee80211_band band,
+ struct ieee80211_tx_rate *r)
{
- struct ieee80211_tx_rate *r = &info->status.rates[0];
-
- info->status.antenna =
- ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
if (rate_n_flags & RATE_HT_MCS_GF_MSK)
r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
@@ -549,10 +543,23 @@ static void iwl_mvm_hwrate_to_tx_control(u32 rate_n_flags,
r->flags |= IEEE80211_TX_RC_VHT_MCS;
} else {
r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
- info->band);
+ band);
}
}
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_tx_rate *r = &info->status.rates[0];
+
+ info->status.antenna =
+ ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+ iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r);
+}
+
static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
@@ -602,11 +609,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
}
info->status.rates[0].count = tx_resp->failure_frame + 1;
- iwl_mvm_hwrate_to_tx_control(le32_to_cpu(tx_resp->initial_rate),
- info);
+ iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
+ info);
/* Single frame failure in an AMPDU queue => send BAR */
- if (txq_id >= IWL_MVM_FIRST_AGG_QUEUE &&
+ if (txq_id >= mvm->first_agg_queue &&
!(info->flags & IEEE80211_TX_STAT_ACK))
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
@@ -619,7 +626,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
ieee80211_tx_status_ni(mvm->hw, skb);
}
- if (txq_id >= IWL_MVM_FIRST_AGG_QUEUE) {
+ if (txq_id >= mvm->first_agg_queue) {
/* If this is an aggregation queue, we use the ssn since:
* ssn = wifi seq_num % 256.
* The seq_ctl is the sequence control of the packet to which
@@ -668,10 +675,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
iwl_mvm_check_ratid_empty(mvm, sta, tid);
spin_unlock_bh(&mvmsta->lock);
}
-
-#ifdef CONFIG_PM_SLEEP
- mvmsta->last_seq_ctl = seq_ctl;
-#endif
} else {
sta = NULL;
mvmsta = NULL;
@@ -681,7 +684,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
* If the txq is not an AMPDU queue, there is no chance we freed
* several skbs. Check that out...
*/
- if (txq_id < IWL_MVM_FIRST_AGG_QUEUE && !WARN_ON(skb_freed > 1) &&
+ if (txq_id < mvm->first_agg_queue && !WARN_ON(skb_freed > 1) &&
atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) {
if (mvmsta) {
/*
@@ -777,7 +780,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
struct ieee80211_sta *sta;
- if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < IWL_MVM_FIRST_AGG_QUEUE))
+ if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < mvm->first_agg_queue))
return;
if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
@@ -904,8 +907,8 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
info->flags |= IEEE80211_TX_STAT_AMPDU;
info->status.ampdu_ack_len = ba_notif->txed_2_done;
info->status.ampdu_len = ba_notif->txed;
- iwl_mvm_hwrate_to_tx_control(tid_data->rate_n_flags,
- info);
+ iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
+ info);
}
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index a9c357491434..ed69e9b78e82 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -466,7 +466,7 @@ void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
len = img->sec[IWL_UCODE_SECTION_DATA].len;
- buf = kzalloc(len, GFP_KERNEL);
+ buf = kzalloc(len, GFP_ATOMIC);
if (!buf)
return;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index dc02cb9792af..941c0c88f982 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -139,13 +139,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
/* 6x00 Series */
{IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)},
{IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)},
{IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
{IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
{IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
{IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
{IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
{IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)},
{IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
{IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
@@ -153,12 +156,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)},
{IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)},
{IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
{IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
{IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
@@ -240,8 +247,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
/* 6x35 Series */
{IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
{IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)},
@@ -258,56 +268,91 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
#endif /* CONFIG_IWLDVM */
#if IS_ENABLED(CONFIG_IWLMVM)
-/* 7000 Series */
+/* 7260 Series */
{IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)},
{IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)},
{IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)},
/* 3160 Series */
{IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)},
{IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
{IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
+
+/* 7265 Series */
+ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -349,7 +394,6 @@ out_free_drv:
iwl_drv_stop(trans_pcie->drv);
out_free_trans:
iwl_trans_pcie_free(iwl_trans);
- pci_set_drvdata(pdev, NULL);
return ret;
}
@@ -360,8 +404,6 @@ static void iwl_pci_remove(struct pci_dev *pdev)
iwl_drv_stop(trans_pcie->drv);
iwl_trans_pcie_free(trans);
-
- pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index bad95d28d50d..5d9337bec67a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -220,6 +220,9 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+ /* Clear the interrupt in APMG if the NIC is in RFKILL */
+ iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL);
+
set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
out:
@@ -443,22 +446,138 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
return ret;
}
+static int iwl_pcie_secure_set(struct iwl_trans *trans, int cpu)
+{
+ int shift_param;
+ u32 address;
+ int ret = 0;
+
+ if (cpu == 1) {
+ shift_param = 0;
+ address = CSR_SECURE_BOOT_CPU1_STATUS_ADDR;
+ } else {
+ shift_param = 16;
+ address = CSR_SECURE_BOOT_CPU2_STATUS_ADDR;
+ }
+
+ /* set CPU to started */
+ iwl_trans_set_bits_mask(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ CSR_CPU_STATUS_LOADING_STARTED << shift_param,
+ 1);
+
+ /* set last complete descriptor number */
+ iwl_trans_set_bits_mask(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED
+ << shift_param,
+ 1);
+
+ /* set last loaded block */
+ iwl_trans_set_bits_mask(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK
+ << shift_param,
+ 1);
+
+ /* image loading complete */
+ iwl_trans_set_bits_mask(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ CSR_CPU_STATUS_LOADING_COMPLETED
+ << shift_param,
+ 1);
+
+ /* set FH_TCSR_0_REG */
+ iwl_trans_set_bits_mask(trans, FH_TCSR_0_REG0, 0x00400000, 1);
+
+ /* verify image verification started */
+ ret = iwl_poll_bit(trans, address,
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
+ CSR_SECURE_TIME_OUT);
+ if (ret < 0) {
+ IWL_ERR(trans, "secure boot process didn't start\n");
+ return ret;
+ }
+
+ /* wait for image verification to complete */
+ ret = iwl_poll_bit(trans, address,
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
+ CSR_SECURE_TIME_OUT);
+
+ if (ret < 0) {
+ IWL_ERR(trans, "Time out on secure boot process\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
const struct fw_img *image)
{
int i, ret = 0;
- for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
+ IWL_DEBUG_FW(trans,
+ "working with %s image\n",
+ image->is_secure ? "Secured" : "Non Secured");
+ IWL_DEBUG_FW(trans,
+ "working with %s CPU\n",
+ image->is_dual_cpus ? "Dual" : "Single");
+
+ /* configure the ucode to be ready to get the secured image */
+ if (image->is_secure) {
+ /* set secure boot inspector addresses */
+ iwl_write32(trans, CSR_SECURE_INSPECTOR_CODE_ADDR, 0);
+ iwl_write32(trans, CSR_SECURE_INSPECTOR_DATA_ADDR, 0);
+
+ /* release CPU1 reset if secure inspector image burned in OTP */
+ iwl_write32(trans, CSR_RESET, 0);
+ }
+
+ /* load to FW the binary sections of CPU1 */
+ IWL_DEBUG_INFO(trans, "Loading CPU1\n");
+ for (i = 0;
+ i < IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
+ i++) {
if (!image->sec[i].data)
break;
-
ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
if (ret)
return ret;
}
- /* Remove all resets to allow NIC to operate */
- iwl_write32(trans, CSR_RESET, 0);
+ /* configure the ucode to start secure process on CPU1 */
+ if (image->is_secure) {
+ /* config CPU1 to start secure protocol */
+ ret = iwl_pcie_secure_set(trans, 1);
+ if (ret)
+ return ret;
+ } else {
+ /* Remove all resets to allow NIC to operate */
+ iwl_write32(trans, CSR_RESET, 0);
+ }
+
+ if (image->is_dual_cpus) {
+ /* load to FW the binary sections of CPU2 */
+ IWL_DEBUG_INFO(trans, "working w/ DUAL CPUs - Loading CPU2\n");
+ for (i = IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
+ i < IWL_UCODE_SECTION_MAX; i++) {
+ if (!image->sec[i].data)
+ break;
+ ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+ if (ret)
+ return ret;
+ }
+
+ if (image->is_secure) {
+ /* set CPU2 for secure protocol */
+ ret = iwl_pcie_secure_set(trans, 2);
+ if (ret)
+ return ret;
+ }
+ }
return 0;
}
@@ -1401,6 +1520,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
spin_lock_init(&trans_pcie->reg_lock);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
+ err = pci_enable_device(pdev);
+ if (err)
+ goto out_no_pci;
+
if (!cfg->base_params->pcie_l1_allowed) {
/*
* W/A - seems to solve weird behavior. We need to remove this
@@ -1412,10 +1535,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
PCIE_LINK_STATE_CLKPM);
}
- err = pci_enable_device(pdev);
- if (err)
- goto out_no_pci;
-
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index f45eb29c2ede..f644fcf861a8 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1102,6 +1102,8 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
* non-AGG queue.
*/
iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+
+ ssn = trans_pcie->txq[txq_id].q.read_ptr;
}
/* Place first TFD at index corresponding to start sequence number.
@@ -1463,7 +1465,8 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
spin_unlock_bh(&txq->lock);
}
-#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+#define COMMAND_POKE_TIMEOUT (HZ / 10)
static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
@@ -1491,6 +1494,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int cmd_idx;
int ret;
+ int timeout = HOST_COMPLETE_TIMEOUT;
IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
get_cmd_string(trans_pcie, cmd->id));
@@ -1515,10 +1519,29 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
return ret;
}
- ret = wait_event_timeout(trans_pcie->wait_command_queue,
- !test_bit(STATUS_HCMD_ACTIVE,
- &trans_pcie->status),
- HOST_COMPLETE_TIMEOUT);
+ while (timeout > 0) {
+ unsigned long flags;
+
+ timeout -= COMMAND_POKE_TIMEOUT;
+ ret = wait_event_timeout(trans_pcie->wait_command_queue,
+ !test_bit(STATUS_HCMD_ACTIVE,
+ &trans_pcie->status),
+ COMMAND_POKE_TIMEOUT);
+ if (ret)
+ break;
+ /* poke the device - it may have lost the command */
+ if (iwl_trans_grab_nic_access(trans, true, &flags)) {
+ iwl_trans_release_nic_access(trans, &flags);
+ IWL_DEBUG_INFO(trans,
+ "Tried to wake NIC for command %s\n",
+ get_cmd_string(trans_pcie, cmd->id));
+ } else {
+ IWL_ERR(trans, "Failed to poke NIC for command %s\n",
+ get_cmd_string(trans_pcie, cmd->id));
+ break;
+ }
+ }
+
if (!ret) {
if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
struct iwl_txq *txq =
@@ -1539,6 +1562,9 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
"Clearing HCMD_ACTIVE for command %s\n",
get_cmd_string(trans_pcie, cmd->id));
ret = -ETIMEDOUT;
+
+ iwl_op_mode_nic_error(trans->op_mode);
+
goto cancel;
}
}
diff --git a/drivers/net/wireless/libertas/firmware.c b/drivers/net/wireless/libertas/firmware.c
index c0f9e7e862f6..51b92b5df119 100644
--- a/drivers/net/wireless/libertas/firmware.c
+++ b/drivers/net/wireless/libertas/firmware.c
@@ -53,6 +53,11 @@ static void main_firmware_cb(const struct firmware *firmware, void *context)
/* Firmware found! */
lbs_fw_loaded(priv, 0, priv->helper_fw, firmware);
+ if (priv->helper_fw) {
+ release_firmware (priv->helper_fw);
+ priv->helper_fw = NULL;
+ }
+ release_firmware (firmware);
}
static void helper_firmware_cb(const struct firmware *firmware, void *context)
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index c94dd6802672..ef8c98e21098 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -754,14 +754,14 @@ static void if_cs_prog_firmware(struct lbs_private *priv, int ret,
if (ret == 0 && (card->model != MODEL_8305))
ret = if_cs_prog_real(card, mainfw);
if (ret)
- goto out;
+ return;
/* Now actually get the IRQ */
ret = request_irq(card->p_dev->irq, if_cs_interrupt,
IRQF_SHARED, DRV_NAME, card);
if (ret) {
pr_err("error in request_irq\n");
- goto out;
+ return;
}
/*
@@ -777,10 +777,6 @@ static void if_cs_prog_firmware(struct lbs_private *priv, int ret,
pr_err("could not activate card\n");
free_irq(card->p_dev->irq, card);
}
-
-out:
- release_firmware(helper);
- release_firmware(mainfw);
}
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 45578335e420..991238afd1b6 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -708,20 +708,16 @@ static void if_sdio_do_prog_firmware(struct lbs_private *priv, int ret,
ret = if_sdio_prog_helper(card, helper);
if (ret)
- goto out;
+ return;
lbs_deb_sdio("Helper firmware loaded\n");
ret = if_sdio_prog_real(card, mainfw);
if (ret)
- goto out;
+ return;
lbs_deb_sdio("Firmware loaded\n");
if_sdio_finish_power_on(card);
-
-out:
- release_firmware(helper);
- release_firmware(mainfw);
}
static int if_sdio_prog_firmware(struct if_sdio_card *card)
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 4bb6574f4073..83669151bb82 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -1094,11 +1094,7 @@ static int if_spi_init_card(struct if_spi_card *card)
goto out;
out:
- release_firmware(helper);
- release_firmware(mainfw);
-
lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
-
return err;
}
@@ -1128,7 +1124,7 @@ static int if_spi_probe(struct spi_device *spi)
{
struct if_spi_card *card;
struct lbs_private *priv = NULL;
- struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
+ struct libertas_spi_platform_data *pdata = dev_get_platdata(&spi->dev);
int err = 0;
lbs_deb_enter(LBS_DEB_SPI);
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 27980778d992..dff08a2896a3 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -844,7 +844,7 @@ static void if_usb_prog_firmware(struct lbs_private *priv, int ret,
cardp->fw = fw;
if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) {
ret = -EINVAL;
- goto release_fw;
+ goto done;
}
/* Cancel any pending usb business */
@@ -861,7 +861,7 @@ restart:
if (if_usb_submit_rx_urb_fwload(cardp) < 0) {
lbs_deb_usbd(&cardp->udev->dev, "URB submission is failed\n");
ret = -EIO;
- goto release_fw;
+ goto done;
}
cardp->bootcmdresp = 0;
@@ -883,14 +883,14 @@ restart:
usb_kill_urb(cardp->tx_urb);
if (if_usb_submit_rx_urb(cardp) < 0)
ret = -EIO;
- goto release_fw;
+ goto done;
} else if (cardp->bootcmdresp <= 0) {
if (--reset_count >= 0) {
if_usb_reset_device(cardp);
goto restart;
}
ret = -EIO;
- goto release_fw;
+ goto done;
}
i = 0;
@@ -921,14 +921,14 @@ restart:
pr_info("FW download failure, time = %d ms\n", i * 100);
ret = -EIO;
- goto release_fw;
+ goto done;
}
cardp->priv->fw_ready = 1;
if_usb_submit_rx_urb(cardp);
if (lbs_start_card(priv))
- goto release_fw;
+ goto done;
if_usb_setup_firmware(priv);
@@ -939,11 +939,8 @@ restart:
if (lbs_host_sleep_cfg(priv, priv->wol_criteria, NULL))
priv->ehs_remove_supported = false;
- release_fw:
- release_firmware(cardp->fw);
- cardp->fw = NULL;
-
done:
+ cardp->fw = NULL;
lbs_deb_leave(LBS_DEB_USB);
}
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index a6c46f3b6e3a..e47f4e3012b8 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -1048,7 +1048,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
unsigned long cmd_flags;
unsigned long scan_pending_q_flags;
- uint16_t cancel_scan_cmd = false;
+ bool cancel_scan_cmd = false;
if ((adapter->curr_cmd) &&
(adapter->curr_cmd->wait_q_enabled)) {
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 9d7c0e6c4fc7..4e4686e6ac09 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -621,7 +621,7 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
int ret = 0;
struct ieee_types_assoc_rsp *assoc_rsp;
struct mwifiex_bssdescriptor *bss_desc;
- u8 enable_data = true;
+ bool enable_data = true;
u16 cap_info, status_code;
assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
@@ -1422,13 +1422,19 @@ static int mwifiex_deauthenticate_infra(struct mwifiex_private *priv, u8 *mac)
*/
int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
{
+ int ret = 0;
+
if (!priv->media_connected)
return 0;
switch (priv->bss_mode) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
- return mwifiex_deauthenticate_infra(priv, mac);
+ ret = mwifiex_deauthenticate_infra(priv, mac);
+ if (ret)
+ cfg80211_disconnected(priv->netdev, 0, NULL, 0,
+ GFP_KERNEL);
+ break;
case NL80211_IFTYPE_ADHOC:
return mwifiex_send_cmd_sync(priv,
HostCmd_CMD_802_11_AD_HOC_STOP,
@@ -1440,7 +1446,7 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
break;
}
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(mwifiex_deauthenticate);
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index fd778337deee..9d7c9d354d34 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -358,10 +358,12 @@ process_start:
}
} while (true);
- if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter))
+ spin_lock_irqsave(&adapter->main_proc_lock, flags);
+ if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter)) {
+ spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
goto process_start;
+ }
- spin_lock_irqsave(&adapter->main_proc_lock, flags);
adapter->mwifiex_processing = false;
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
@@ -880,7 +882,9 @@ mwifiex_add_card(void *card, struct semaphore *sem,
adapter->cmd_wait_q.status = 0;
adapter->scan_wait_q_woken = false;
- adapter->workqueue = create_workqueue("MWIFIEX_WORK_QUEUE");
+ adapter->workqueue =
+ alloc_workqueue("MWIFIEX_WORK_QUEUE",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
if (!adapter->workqueue)
goto err_kmalloc;
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 52da8ee7599a..33fa9432b241 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -93,7 +93,7 @@ static int mwifiex_pcie_suspend(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
if (pdev) {
- card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+ card = pci_get_drvdata(pdev);
if (!card || !card->adapter) {
pr_err("Card or adapter structure is not valid\n");
return 0;
@@ -128,7 +128,7 @@ static int mwifiex_pcie_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
if (pdev) {
- card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+ card = pci_get_drvdata(pdev);
if (!card || !card->adapter) {
pr_err("Card or adapter structure is not valid\n");
return 0;
@@ -2037,7 +2037,7 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
goto exit;
}
- card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+ card = pci_get_drvdata(pdev);
if (!card || !card->adapter) {
pr_debug("info: %s: card=%p adapter=%p\n", __func__, card,
card ? card->adapter : NULL);
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index c0268b597748..7d66018a2e33 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -327,7 +327,7 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
{
struct mwifiex_adapter *adapter = priv->adapter;
struct host_cmd_ds_802_11_hs_cfg_enh *hs_cfg = &cmd->params.opt_hs_cfg;
- u16 hs_activate = false;
+ bool hs_activate = false;
if (!hscfg_param)
/* New Activate command */
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 8b057524b252..8c351f71f72f 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -118,7 +118,8 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
dev_dbg(adapter->dev,
"info: successfully disconnected from %pM: reason code %d\n",
priv->cfg_bssid, reason_code);
- if (priv->bss_mode == NL80211_IFTYPE_STATION) {
+ if (priv->bss_mode == NL80211_IFTYPE_STATION ||
+ priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
GFP_KERNEL);
}
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 95fa3599b407..5dd0ccc70b86 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -708,7 +708,7 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
{
u8 *curr = (u8 *) &resp->params.get_wmm_status;
uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
- int valid = true;
+ bool valid = true;
struct mwifiex_ie_types_data *tlv_hdr;
struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index 644d6e0c51cc..0f129d498fb1 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -83,11 +83,10 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead)
}
void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
- struct sk_buff *skb);
+ struct sk_buff *skb);
void mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra);
void mwifiex_rotate_priolists(struct mwifiex_private *priv,
- struct mwifiex_ra_list_tbl *ra,
- int tid);
+ struct mwifiex_ra_list_tbl *ra, int tid);
int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter);
void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter);
@@ -95,21 +94,18 @@ int mwifiex_is_ralist_valid(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *ra_list, int tid);
u8 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
- const struct sk_buff *skb);
+ const struct sk_buff *skb);
void mwifiex_wmm_init(struct mwifiex_adapter *adapter);
-extern u32 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
- u8 **assoc_buf,
- struct ieee_types_wmm_parameter
- *wmmie,
- struct ieee80211_ht_cap
- *htcap);
+u32 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
+ u8 **assoc_buf,
+ struct ieee_types_wmm_parameter *wmmie,
+ struct ieee80211_ht_cap *htcap);
void mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
- struct ieee_types_wmm_parameter
- *wmm_ie);
+ struct ieee_types_wmm_parameter *wmm_ie);
void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv);
-extern int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
- const struct host_cmd_ds_command *resp);
+int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
+ const struct host_cmd_ds_command *resp);
#endif /* !_MWIFIEX_WMM_H_ */
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index a3707fd4ef62..b953ad621e0b 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -6093,7 +6093,6 @@ err_iounmap:
if (priv->sram != NULL)
pci_iounmap(pdev, priv->sram);
- pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(hw);
err_free_reg:
@@ -6147,7 +6146,6 @@ static void mwl8k_remove(struct pci_dev *pdev)
unmap:
pci_iounmap(pdev, priv->regs);
pci_iounmap(pdev, priv->sram);
- pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(hw);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index 3bb936b9558c..eebd2be21ee9 100644
--- a/drivers/net/wireless/orinoco/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -182,23 +182,20 @@ extern int orinoco_debug;
/* Exported prototypes */
/********************************************************************/
-extern struct orinoco_private *alloc_orinocodev(
- int sizeof_card, struct device *device,
- int (*hard_reset)(struct orinoco_private *),
- int (*stop_fw)(struct orinoco_private *, int));
-extern void free_orinocodev(struct orinoco_private *priv);
-extern int orinoco_init(struct orinoco_private *priv);
-extern int orinoco_if_add(struct orinoco_private *priv,
- unsigned long base_addr,
- unsigned int irq,
- const struct net_device_ops *ops);
-extern void orinoco_if_del(struct orinoco_private *priv);
-extern int orinoco_up(struct orinoco_private *priv);
-extern void orinoco_down(struct orinoco_private *priv);
-extern irqreturn_t orinoco_interrupt(int irq, void *dev_id);
-
-extern void __orinoco_ev_info(struct net_device *dev, struct hermes *hw);
-extern void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw);
+struct orinoco_private *alloc_orinocodev(int sizeof_card, struct device *device,
+ int (*hard_reset)(struct orinoco_private *),
+ int (*stop_fw)(struct orinoco_private *, int));
+void free_orinocodev(struct orinoco_private *priv);
+int orinoco_init(struct orinoco_private *priv);
+int orinoco_if_add(struct orinoco_private *priv, unsigned long base_addr,
+ unsigned int irq, const struct net_device_ops *ops);
+void orinoco_if_del(struct orinoco_private *priv);
+int orinoco_up(struct orinoco_private *priv);
+void orinoco_down(struct orinoco_private *priv);
+irqreturn_t orinoco_interrupt(int irq, void *dev_id);
+
+void __orinoco_ev_info(struct net_device *dev, struct hermes *hw);
+void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw);
int orinoco_process_xmit_skb(struct sk_buff *skb,
struct net_device *dev,
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index d73fdf6185a2..ffb2469eb679 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -234,7 +234,6 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev,
free_irq(pdev->irq, priv);
fail_irq:
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
@@ -265,7 +264,6 @@ static void orinoco_nortel_remove_one(struct pci_dev *pdev)
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_iounmap(pdev, card->attr_io);
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index 677bf14eca84..5ae1191d2532 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -184,7 +184,6 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
free_irq(pdev->irq, priv);
fail_irq:
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
@@ -205,7 +204,6 @@ static void orinoco_pci_remove_one(struct pci_dev *pdev)
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_release_regions(pdev);
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index 2559dbd6184b..bbd36d1676ff 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -273,7 +273,6 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
free_irq(pdev->irq, priv);
fail_irq:
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
@@ -301,7 +300,6 @@ static void orinoco_plx_remove_one(struct pci_dev *pdev)
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_iounmap(pdev, card->attr_io);
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index 42afeeea2c40..04b08de5fd5d 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -170,7 +170,6 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
free_irq(pdev->irq, priv);
fail_irq:
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
@@ -195,7 +194,6 @@ static void orinoco_tmd_remove_one(struct pci_dev *pdev)
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_iounmap(pdev, card->bridge_io);
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 57e3af8ebb4b..f9a07b0d83ac 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -631,7 +631,6 @@ static int p54p_probe(struct pci_dev *pdev,
iounmap(priv->map);
err_free_dev:
- pci_set_drvdata(pdev, NULL);
p54_free_common(dev);
err_free_reg:
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 7fc46f26cf2b..de15171e2cd8 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -636,7 +636,7 @@ static int p54spi_probe(struct spi_device *spi)
gpio_direction_input(p54spi_gpio_irq);
ret = request_irq(gpio_to_irq(p54spi_gpio_irq),
- p54spi_interrupt, IRQF_DISABLED, "p54spi",
+ p54spi_interrupt, 0, "p54spi",
priv->spi);
if (ret < 0) {
dev_err(&priv->spi->dev, "request_irq() failed");
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 1c22b81e6ef3..8863a6cb2388 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -183,7 +183,7 @@ prism54_update_stats(struct work_struct *work)
data = r.ptr;
/* copy this MAC to the bss */
- memcpy(bss.address, data, 6);
+ memcpy(bss.address, data, ETH_ALEN);
kfree(data);
/* now ask for the corresponding bss */
@@ -531,7 +531,7 @@ prism54_set_wap(struct net_device *ndev, struct iw_request_info *info,
return -EINVAL;
/* prepare the structure for the set object */
- memcpy(&bssid[0], awrq->sa_data, 6);
+ memcpy(&bssid[0], awrq->sa_data, ETH_ALEN);
/* set the bssid -- does this make sense when in AP mode? */
rvalue = mgt_set_request(priv, DOT11_OID_BSSID, 0, &bssid);
@@ -550,7 +550,7 @@ prism54_get_wap(struct net_device *ndev, struct iw_request_info *info,
int rvalue;
rvalue = mgt_get_request(priv, DOT11_OID_BSSID, 0, NULL, &r);
- memcpy(awrq->sa_data, r.ptr, 6);
+ memcpy(awrq->sa_data, r.ptr, ETH_ALEN);
awrq->sa_family = ARPHRD_ETHER;
kfree(r.ptr);
@@ -582,7 +582,7 @@ prism54_translate_bss(struct net_device *ndev, struct iw_request_info *info,
size_t wpa_ie_len;
/* The first entry must be the MAC address */
- memcpy(iwe.u.ap_addr.sa_data, bss->address, 6);
+ memcpy(iwe.u.ap_addr.sa_data, bss->address, ETH_ALEN);
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
iwe.cmd = SIOCGIWAP;
current_ev = iwe_stream_add_event(info, current_ev, end_buf,
@@ -2489,7 +2489,7 @@ prism54_set_mac_address(struct net_device *ndev, void *addr)
&((struct sockaddr *) addr)->sa_data);
if (!ret)
memcpy(priv->ndev->dev_addr,
- &((struct sockaddr *) addr)->sa_data, 6);
+ &((struct sockaddr *) addr)->sa_data, ETH_ALEN);
return ret;
}
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 5970ff6f40cc..41a16d30c79c 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -837,7 +837,7 @@ islpci_setup(struct pci_dev *pdev)
/* ndev->set_multicast_list = &islpci_set_multicast_list; */
ndev->addr_len = ETH_ALEN;
/* Get a non-zero dummy MAC address for nameif. Jean II */
- memcpy(ndev->dev_addr, dummy_mac, 6);
+ memcpy(ndev->dev_addr, dummy_mac, ETH_ALEN);
ndev->watchdog_timeo = ISLPCI_TX_TIMEOUT;
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index a01606b36e03..056af38e72e3 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -682,7 +682,7 @@ mgt_update_addr(islpci_private *priv)
isl_oid[GEN_OID_MACADDRESS].size, &res);
if ((ret == 0) && res && (res->header->operation != PIMFOR_OP_ERROR))
- memcpy(priv->ndev->dev_addr, res->data, 6);
+ memcpy(priv->ndev->dev_addr, res->data, ETH_ALEN);
else
ret = -EIO;
if (res)
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 68dbbb9c6d12..006b8bcb2e31 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -58,11 +58,11 @@ config RT61PCI
config RT2800PCI
tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
- depends on PCI || SOC_RT288X || SOC_RT305X
+ depends on PCI
select RT2800_LIB
+ select RT2800_LIB_MMIO
select RT2X00_LIB_MMIO
- select RT2X00_LIB_PCI if PCI
- select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X
+ select RT2X00_LIB_PCI
select RT2X00_LIB_FIRMWARE
select RT2X00_LIB_CRYPTO
select CRC_CCITT
@@ -199,9 +199,30 @@ config RT2800USB_UNKNOWN
endif
+config RT2800SOC
+ tristate "Ralink WiSoC support"
+ depends on SOC_RT288X || SOC_RT305X
+ select RT2X00_LIB_SOC
+ select RT2X00_LIB_MMIO
+ select RT2X00_LIB_CRYPTO
+ select RT2X00_LIB_FIRMWARE
+ select RT2800_LIB
+ select RT2800_LIB_MMIO
+ ---help---
+ This adds support for Ralink WiSoC devices.
+ Supported chips: RT2880, RT3050, RT3052, RT3350, RT3352.
+
+ When compiled as a module, this driver will be called rt2800soc.
+
+
config RT2800_LIB
tristate
+config RT2800_LIB_MMIO
+ tristate
+ select RT2X00_LIB_MMIO
+ select RT2800_LIB
+
config RT2X00_LIB_MMIO
tristate
@@ -219,6 +240,7 @@ config RT2X00_LIB_USB
config RT2X00_LIB
tristate
+ select AVERAGE
config RT2X00_LIB_FIRMWARE
boolean
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile
index f069d8bc5b67..24a66015a495 100644
--- a/drivers/net/wireless/rt2x00/Makefile
+++ b/drivers/net/wireless/rt2x00/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o
obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o
obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o
obj-$(CONFIG_RT2800_LIB) += rt2800lib.o
+obj-$(CONFIG_RT2800_LIB_MMIO) += rt2800mmio.o
obj-$(CONFIG_RT2400PCI) += rt2400pci.o
obj-$(CONFIG_RT2500PCI) += rt2500pci.o
obj-$(CONFIG_RT61PCI) += rt61pci.o
@@ -21,3 +22,4 @@ obj-$(CONFIG_RT2800PCI) += rt2800pci.o
obj-$(CONFIG_RT2500USB) += rt2500usb.o
obj-$(CONFIG_RT73USB) += rt73usb.o
obj-$(CONFIG_RT2800USB) += rt2800usb.o
+obj-$(CONFIG_RT2800SOC) += rt2800soc.o
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 3d53a09da5a1..38ed9a3e44c8 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1261,7 +1261,7 @@ static void rt2400pci_fill_rxdone(struct queue_entry *entry,
*/
rxdesc->timestamp = ((u64)rx_high << 32) | rx_low;
rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL) & ~0x08;
- rxdesc->rssi = rt2x00_get_field32(word2, RXD_W3_RSSI) -
+ rxdesc->rssi = rt2x00_get_field32(word3, RXD_W3_RSSI) -
entry->queue->rt2x00dev->rssi_offset;
rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index fa33b5edf931..aab6b5e4f5dd 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -52,6 +52,7 @@
* RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
* RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
* RF5592 2.4G/5G 2T2R
+ * RF3070 2.4G 1T1R
* RF5360 2.4G 1T1R
* RF5370 2.4G 1T1R
* RF5390 2.4G 1T1R
@@ -70,6 +71,7 @@
#define RF3322 0x000c
#define RF3053 0x000d
#define RF5592 0x000f
+#define RF3070 0x3070
#define RF3290 0x3290
#define RF5360 0x5360
#define RF5370 0x5370
@@ -122,7 +124,7 @@
/*
* MAC_CSR0_3290: MAC_CSR0 for RT3290 to identity MAC version number.
*/
-#define MAC_CSR0_3290 0x0000
+#define MAC_CSR0_3290 0x0000
/*
* E2PROM_CSR: PCI EEPROM control register.
@@ -211,17 +213,17 @@
/*
* COEX_CFG_0
*/
-#define COEX_CFG0 0x0040
+#define COEX_CFG0 0x0040
#define COEX_CFG_ANT FIELD32(0xff000000)
/*
* COEX_CFG_1
*/
-#define COEX_CFG1 0x0044
+#define COEX_CFG1 0x0044
/*
* COEX_CFG_2
*/
-#define COEX_CFG2 0x0048
+#define COEX_CFG2 0x0048
#define BT_COEX_CFG1 FIELD32(0xff000000)
#define BT_COEX_CFG0 FIELD32(0x00ff0000)
#define WL_COEX_CFG1 FIELD32(0x0000ff00)
@@ -235,8 +237,8 @@
#define PLL_RESERVED_INPUT2 FIELD32(0x0000ff00)
#define PLL_CONTROL FIELD32(0x00070000)
#define PLL_LPF_R1 FIELD32(0x00080000)
-#define PLL_LPF_C1_CTRL FIELD32(0x00300000)
-#define PLL_LPF_C2_CTRL FIELD32(0x00c00000)
+#define PLL_LPF_C1_CTRL FIELD32(0x00300000)
+#define PLL_LPF_C2_CTRL FIELD32(0x00c00000)
#define PLL_CP_CURRENT_CTRL FIELD32(0x03000000)
#define PLL_PFD_DELAY_CTRL FIELD32(0x0c000000)
#define PLL_LOCK_CTRL FIELD32(0x70000000)
@@ -2164,7 +2166,7 @@ struct mac_iveiv_entry {
*/
#define RFCSR6_R1 FIELD8(0x03)
#define RFCSR6_R2 FIELD8(0x40)
-#define RFCSR6_TXDIV FIELD8(0x0c)
+#define RFCSR6_TXDIV FIELD8(0x0c)
/* bits for RF3053 */
#define RFCSR6_VCO_IC FIELD8(0xc0)
@@ -2202,13 +2204,13 @@ struct mac_iveiv_entry {
* RFCSR 12:
*/
#define RFCSR12_TX_POWER FIELD8(0x1f)
-#define RFCSR12_DR0 FIELD8(0xe0)
+#define RFCSR12_DR0 FIELD8(0xe0)
/*
* RFCSR 13:
*/
#define RFCSR13_TX_POWER FIELD8(0x1f)
-#define RFCSR13_DR0 FIELD8(0xe0)
+#define RFCSR13_DR0 FIELD8(0xe0)
/*
* RFCSR 15:
@@ -2226,7 +2228,7 @@ struct mac_iveiv_entry {
#define RFCSR17_TXMIXER_GAIN FIELD8(0x07)
#define RFCSR17_TX_LO1_EN FIELD8(0x08)
#define RFCSR17_R FIELD8(0x20)
-#define RFCSR17_CODE FIELD8(0x7f)
+#define RFCSR17_CODE FIELD8(0x7f)
/* RFCSR 18 */
#define RFCSR18_XO_TUNE_BYPASS FIELD8(0x40)
@@ -2449,7 +2451,7 @@ enum rt2800_eeprom_word {
*/
#define EEPROM_NIC_CONF0_RXPATH FIELD16(0x000f)
#define EEPROM_NIC_CONF0_TXPATH FIELD16(0x00f0)
-#define EEPROM_NIC_CONF0_RF_TYPE FIELD16(0x0f00)
+#define EEPROM_NIC_CONF0_RF_TYPE FIELD16(0x0f00)
/*
* EEPROM NIC Configuration 1
@@ -2471,18 +2473,18 @@ enum rt2800_eeprom_word {
* DAC_TEST: 0: disable, 1: enable
*/
#define EEPROM_NIC_CONF1_HW_RADIO FIELD16(0x0001)
-#define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC FIELD16(0x0002)
-#define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G FIELD16(0x0004)
-#define EEPROM_NIC_CONF1_EXTERNAL_LNA_5G FIELD16(0x0008)
+#define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC FIELD16(0x0002)
+#define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G FIELD16(0x0004)
+#define EEPROM_NIC_CONF1_EXTERNAL_LNA_5G FIELD16(0x0008)
#define EEPROM_NIC_CONF1_CARDBUS_ACCEL FIELD16(0x0010)
#define EEPROM_NIC_CONF1_BW40M_SB_2G FIELD16(0x0020)
#define EEPROM_NIC_CONF1_BW40M_SB_5G FIELD16(0x0040)
#define EEPROM_NIC_CONF1_WPS_PBC FIELD16(0x0080)
#define EEPROM_NIC_CONF1_BW40M_2G FIELD16(0x0100)
#define EEPROM_NIC_CONF1_BW40M_5G FIELD16(0x0200)
-#define EEPROM_NIC_CONF1_BROADBAND_EXT_LNA FIELD16(0x400)
+#define EEPROM_NIC_CONF1_BROADBAND_EXT_LNA FIELD16(0x400)
#define EEPROM_NIC_CONF1_ANT_DIVERSITY FIELD16(0x1800)
-#define EEPROM_NIC_CONF1_INTERNAL_TX_ALC FIELD16(0x2000)
+#define EEPROM_NIC_CONF1_INTERNAL_TX_ALC FIELD16(0x2000)
#define EEPROM_NIC_CONF1_BT_COEXIST FIELD16(0x4000)
#define EEPROM_NIC_CONF1_DAC_TEST FIELD16(0x8000)
@@ -2521,9 +2523,9 @@ enum rt2800_eeprom_word {
* TX_STREAM: 0: Reserved, 1: 1 Stream, 2: 2 Stream
* CRYSTAL: 00: Reserved, 01: One crystal, 10: Two crystal, 11: Reserved
*/
-#define EEPROM_NIC_CONF2_RX_STREAM FIELD16(0x000f)
-#define EEPROM_NIC_CONF2_TX_STREAM FIELD16(0x00f0)
-#define EEPROM_NIC_CONF2_CRYSTAL FIELD16(0x0600)
+#define EEPROM_NIC_CONF2_RX_STREAM FIELD16(0x000f)
+#define EEPROM_NIC_CONF2_TX_STREAM FIELD16(0x00f0)
+#define EEPROM_NIC_CONF2_CRYSTAL FIELD16(0x0600)
/*
* EEPROM LNA
@@ -2790,7 +2792,7 @@ enum rt2800_eeprom_word {
#define MCU_CURRENT 0x36
#define MCU_LED 0x50
#define MCU_LED_STRENGTH 0x51
-#define MCU_LED_AG_CONF 0x52
+#define MCU_LED_AG_CONF 0x52
#define MCU_LED_ACT_CONF 0x53
#define MCU_LED_LED_POLARITY 0x54
#define MCU_RADAR 0x60
@@ -2799,7 +2801,7 @@ enum rt2800_eeprom_word {
#define MCU_FREQ_OFFSET 0x74
#define MCU_BBP_SIGNAL 0x80
#define MCU_POWER_SAVE 0x83
-#define MCU_BAND_SELECT 0x91
+#define MCU_BAND_SELECT 0x91
/*
* MCU mailbox tokens
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 88ce656f96cd..c5738f14c4ba 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -278,12 +278,9 @@ static const unsigned int rt2800_eeprom_map_ext[EEPROM_WORD_COUNT] = {
[EEPROM_LNA] = 0x0026,
[EEPROM_EXT_LNA2] = 0x0027,
[EEPROM_RSSI_BG] = 0x0028,
- [EEPROM_TXPOWER_DELTA] = 0x0028, /* Overlaps with RSSI_BG */
[EEPROM_RSSI_BG2] = 0x0029,
- [EEPROM_TXMIXER_GAIN_BG] = 0x0029, /* Overlaps with RSSI_BG2 */
[EEPROM_RSSI_A] = 0x002a,
[EEPROM_RSSI_A2] = 0x002b,
- [EEPROM_TXMIXER_GAIN_A] = 0x002b, /* Overlaps with RSSI_A2 */
[EEPROM_TXPOWER_BG1] = 0x0030,
[EEPROM_TXPOWER_BG2] = 0x0037,
[EEPROM_EXT_TXPOWER_BG3] = 0x003e,
@@ -1783,7 +1780,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
rt2800_bbp_read(rt2x00dev, 3, &r3);
if (rt2x00_rt(rt2x00dev, RT3572) &&
- test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+ rt2x00_has_cap_bt_coexist(rt2x00dev))
rt2800_config_3572bt_ant(rt2x00dev);
/*
@@ -1795,7 +1792,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
break;
case 2:
if (rt2x00_rt(rt2x00dev, RT3572) &&
- test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+ rt2x00_has_cap_bt_coexist(rt2x00dev))
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 1);
else
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
@@ -1825,7 +1822,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
break;
case 2:
if (rt2x00_rt(rt2x00dev, RT3572) &&
- test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+ rt2x00_has_cap_bt_coexist(rt2x00dev)) {
rt2x00_set_field8(&r3, BBP3_RX_ADC, 1);
rt2x00_set_field8(&r3, BBP3_RX_ANTENNA,
rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
@@ -2029,13 +2026,6 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
rt2x00dev->default_ant.tx_chain_num <= 2);
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
- rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
- rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
- msleep(1);
- rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
- rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
-
rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
@@ -2141,7 +2131,7 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
- if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
if (rf->channel <= 14) {
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
@@ -2674,7 +2664,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
if (rf->channel <= 14) {
int idx = rf->channel-1;
- if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
/* r55/r59 value array of channel 1~14 */
static const char r55_bt_rev[] = {0x83, 0x83,
@@ -3152,6 +3142,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
case RF3322:
rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info);
break;
+ case RF3070:
case RF5360:
case RF5370:
case RF5372:
@@ -3166,7 +3157,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
}
- if (rt2x00_rf(rt2x00dev, RF3290) ||
+ if (rt2x00_rf(rt2x00dev, RF3070) ||
+ rt2x00_rf(rt2x00dev, RF3290) ||
rt2x00_rf(rt2x00dev, RF3322) ||
rt2x00_rf(rt2x00dev, RF5360) ||
rt2x00_rf(rt2x00dev, RF5370) ||
@@ -3218,8 +3210,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
if (rf->channel <= 14) {
if (!rt2x00_rt(rt2x00dev, RT5390) &&
!rt2x00_rt(rt2x00dev, RT5392)) {
- if (test_bit(CAPABILITY_EXTERNAL_LNA_BG,
- &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
} else {
@@ -3244,7 +3235,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
if (rt2x00_rt(rt2x00dev, RT3593))
rt2800_bbp_write(rt2x00dev, 83, 0x9a);
- if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_external_lna_a(rt2x00dev))
rt2800_bbp_write(rt2x00dev, 75, 0x46);
else
rt2800_bbp_write(rt2x00dev, 75, 0x50);
@@ -3280,7 +3271,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
/* Turn on primary PAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN,
rf->channel > 14);
- if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_bt_coexist(rt2x00dev))
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1);
else
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN,
@@ -3311,33 +3302,50 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
- if (rt2x00_rt(rt2x00dev, RT3572))
+ if (rt2x00_rt(rt2x00dev, RT3572)) {
rt2800_rfcsr_write(rt2x00dev, 8, 0x80);
+ /* AGC init */
+ if (rf->channel <= 14)
+ reg = 0x1c + (2 * rt2x00dev->lna_gain);
+ else
+ reg = 0x22 + ((rt2x00dev->lna_gain * 5) / 3);
+
+ rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
+ }
+
if (rt2x00_rt(rt2x00dev, RT3593)) {
- if (rt2x00_is_usb(rt2x00dev)) {
- rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+ rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
- /* Band selection. GPIO #8 controls all paths */
+ /* Band selection */
+ if (rt2x00_is_usb(rt2x00dev) ||
+ rt2x00_is_pcie(rt2x00dev)) {
+ /* GPIO #8 controls all paths */
rt2x00_set_field32(&reg, GPIO_CTRL_DIR8, 0);
if (rf->channel <= 14)
rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 1);
else
rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 0);
+ }
+ /* LNA PE control. */
+ if (rt2x00_is_usb(rt2x00dev)) {
+ /* GPIO #4 controls PE0 and PE1,
+ * GPIO #7 controls PE2
+ */
rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0);
rt2x00_set_field32(&reg, GPIO_CTRL_DIR7, 0);
- /* LNA PE control.
- * GPIO #4 controls PE0 and PE1,
- * GPIO #7 controls PE2
- */
rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1);
rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 1);
-
- rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
+ } else if (rt2x00_is_pcie(rt2x00dev)) {
+ /* GPIO #4 controls PE0, PE1 and PE2 */
+ rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0);
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1);
}
+ rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
+
/* AGC init */
if (rf->channel <= 14)
reg = 0x1c + 2 * rt2x00dev->lna_gain;
@@ -3565,7 +3573,7 @@ static int rt2800_get_txpower_reg_delta(struct rt2x00_dev *rt2x00dev,
{
int delta;
- if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_power_limit(rt2x00dev))
return 0;
/*
@@ -3594,7 +3602,7 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
if (rt2x00_rt(rt2x00dev, RT3593))
return min_t(u8, txpower, 0xc);
- if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_power_limit(rt2x00dev)) {
/*
* Check if eirp txpower exceed txpower_limit.
* We use OFDM 6M as criterion and its eirp txpower
@@ -4264,6 +4272,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
break;
case RF3053:
+ case RF3070:
case RF3290:
case RF5360:
case RF5370:
@@ -4405,6 +4414,7 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT3390) ||
rt2x00_rt(rt2x00dev, RT3572) ||
+ rt2x00_rt(rt2x00dev, RT3593) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392) ||
rt2x00_rt(rt2x00dev, RT5592))
@@ -4412,8 +4422,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
else
vgc = 0x2e + rt2x00dev->lna_gain;
} else { /* 5GHZ band */
- if (rt2x00_rt(rt2x00dev, RT3572))
- vgc = 0x22 + (rt2x00dev->lna_gain * 5) / 3;
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ vgc = 0x20 + (rt2x00dev->lna_gain * 5) / 3;
else if (rt2x00_rt(rt2x00dev, RT5592))
vgc = 0x24 + (2 * rt2x00dev->lna_gain);
else {
@@ -4431,11 +4441,17 @@ static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
struct link_qual *qual, u8 vgc_level)
{
if (qual->vgc_level != vgc_level) {
- if (rt2x00_rt(rt2x00dev, RT5592)) {
+ if (rt2x00_rt(rt2x00dev, RT3572) ||
+ rt2x00_rt(rt2x00dev, RT3593)) {
+ rt2800_bbp_write_with_rx_chain(rt2x00dev, 66,
+ vgc_level);
+ } else if (rt2x00_rt(rt2x00dev, RT5592)) {
rt2800_bbp_write(rt2x00dev, 83, qual->rssi > -65 ? 0x4a : 0x7a);
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, vgc_level);
- } else
+ } else {
rt2800_bbp_write(rt2x00dev, 66, vgc_level);
+ }
+
qual->vgc_level = vgc_level;
qual->vgc_level_reg = vgc_level;
}
@@ -4454,17 +4470,35 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C))
return;
- /*
- * When RSSI is better then -80 increase VGC level with 0x10, except
- * for rt5592 chip.
+
+ /* When RSSI is better than a certain threshold, increase VGC
+ * with a chip specific value in order to improve the balance
+ * between sensibility and noise isolation.
*/
vgc = rt2800_get_default_vgc(rt2x00dev);
- if (rt2x00_rt(rt2x00dev, RT5592) && qual->rssi > -65)
- vgc += 0x20;
- else if (qual->rssi > -80)
- vgc += 0x10;
+ switch (rt2x00dev->chip.rt) {
+ case RT3572:
+ case RT3593:
+ if (qual->rssi > -65) {
+ if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ)
+ vgc += 0x20;
+ else
+ vgc += 0x10;
+ }
+ break;
+
+ case RT5592:
+ if (qual->rssi > -65)
+ vgc += 0x20;
+ break;
+
+ default:
+ if (qual->rssi > -80)
+ vgc += 0x10;
+ break;
+ }
rt2800_set_vgc(rt2x00dev, qual, vgc);
}
@@ -5489,7 +5523,7 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
ant = (div_mode == 3) ? 1 : 0;
/* check if this is a Bluetooth combo card */
- if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
u32 reg;
rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
@@ -5798,7 +5832,7 @@ static void rt2800_normal_mode_setup_3xxx(struct rt2x00_dev *rt2x00dev)
rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
- if (!test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
+ if (!rt2x00_has_cap_external_lna_bg(rt2x00dev))
rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
}
@@ -5985,7 +6019,7 @@ static void rt2800_init_rfcsr_30xx(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 20, 0xba);
rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
- rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x03);
rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@@ -6441,7 +6475,7 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
- rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
@@ -6479,7 +6513,7 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
- rt2800_rfcsr_write(rt2x00dev, 59, 0x63);
+ rt2800_rfcsr_write(rt2x00dev, 59, 0x8f);
rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
@@ -6499,7 +6533,6 @@ static void rt2800_init_rfcsr_5392(struct rt2x00_dev *rt2x00dev)
rt2800_rf_init_calibration(rt2x00dev, 2);
rt2800_rfcsr_write(rt2x00dev, 1, 0x17);
- rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
@@ -6653,17 +6686,20 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
u16 word;
/*
- * Initialize all registers.
+ * Initialize MAC registers.
*/
if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800_init_registers(rt2x00dev)))
return -EIO;
+ /*
+ * Wait BBP/RF to wake up.
+ */
if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev)))
return -EIO;
/*
- * Send signal to firmware during boot time.
+ * Send signal during boot time to initialize firmware.
*/
rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
@@ -6672,9 +6708,15 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
msleep(1);
+ /*
+ * Make sure BBP is up and running.
+ */
if (unlikely(rt2800_wait_bbp_ready(rt2x00dev)))
return -EIO;
+ /*
+ * Initialize BBP/RF registers.
+ */
rt2800_init_bbp(rt2x00dev);
rt2800_init_rfcsr(rt2x00dev);
@@ -7021,6 +7063,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
case RF3022:
case RF3052:
case RF3053:
+ case RF3070:
case RF3290:
case RF3320:
case RF3322:
@@ -7203,7 +7246,7 @@ static const struct rf_channel rf_vals[] = {
/*
* RF value list for rt3xxx
- * Supports: 2.4 GHz (all) & 5.2 GHz (RF3052)
+ * Supports: 2.4 GHz (all) & 5.2 GHz (RF3052 & RF3053)
*/
static const struct rf_channel rf_vals_3x[] = {
{1, 241, 2, 2 },
@@ -7399,72 +7442,6 @@ static const struct rf_channel rf_vals_5592_xtal40[] = {
{196, 83, 0, 12, 1},
};
-static const struct rf_channel rf_vals_3053[] = {
- /* Channel, N, R, K */
- {1, 241, 2, 2},
- {2, 241, 2, 7},
- {3, 242, 2, 2},
- {4, 242, 2, 7},
- {5, 243, 2, 2},
- {6, 243, 2, 7},
- {7, 244, 2, 2},
- {8, 244, 2, 7},
- {9, 245, 2, 2},
- {10, 245, 2, 7},
- {11, 246, 2, 2},
- {12, 246, 2, 7},
- {13, 247, 2, 2},
- {14, 248, 2, 4},
-
- {36, 0x56, 0, 4},
- {38, 0x56, 0, 6},
- {40, 0x56, 0, 8},
- {44, 0x57, 0, 0},
- {46, 0x57, 0, 2},
- {48, 0x57, 0, 4},
- {52, 0x57, 0, 8},
- {54, 0x57, 0, 10},
- {56, 0x58, 0, 0},
- {60, 0x58, 0, 4},
- {62, 0x58, 0, 6},
- {64, 0x58, 0, 8},
-
- {100, 0x5B, 0, 8},
- {102, 0x5B, 0, 10},
- {104, 0x5C, 0, 0},
- {108, 0x5C, 0, 4},
- {110, 0x5C, 0, 6},
- {112, 0x5C, 0, 8},
-
- /* NOTE: Channel 114 has been removed intentionally.
- * The EEPROM contains no TX power values for that,
- * and it is disabled in the vendor driver as well.
- */
-
- {116, 0x5D, 0, 0},
- {118, 0x5D, 0, 2},
- {120, 0x5D, 0, 4},
- {124, 0x5D, 0, 8},
- {126, 0x5D, 0, 10},
- {128, 0x5E, 0, 0},
- {132, 0x5E, 0, 4},
- {134, 0x5E, 0, 6},
- {136, 0x5E, 0, 8},
- {140, 0x5F, 0, 0},
-
- {149, 0x5F, 0, 9},
- {151, 0x5F, 0, 11},
- {153, 0x60, 0, 1},
- {157, 0x60, 0, 5},
- {159, 0x60, 0, 7},
- {161, 0x60, 0, 9},
- {165, 0x61, 0, 1},
- {167, 0x61, 0, 3},
- {169, 0x61, 0, 5},
- {171, 0x61, 0, 7},
- {173, 0x61, 0, 9},
-};
-
static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
@@ -7473,7 +7450,6 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
char *default_power2;
char *default_power3;
unsigned int i;
- u16 eeprom;
u32 reg;
/*
@@ -7522,48 +7498,48 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
rt2x00dev->hw->max_report_rates = 7;
rt2x00dev->hw->max_rate_tries = 1;
- rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
-
/*
* Initialize hw_mode information.
*/
- spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (rt2x00_rf(rt2x00dev, RF2820) ||
- rt2x00_rf(rt2x00dev, RF2720)) {
+ switch (rt2x00dev->chip.rf) {
+ case RF2720:
+ case RF2820:
spec->num_channels = 14;
spec->channels = rf_vals;
- } else if (rt2x00_rf(rt2x00dev, RF2850) ||
- rt2x00_rf(rt2x00dev, RF2750)) {
- spec->supported_bands |= SUPPORT_BAND_5GHZ;
+ break;
+
+ case RF2750:
+ case RF2850:
spec->num_channels = ARRAY_SIZE(rf_vals);
spec->channels = rf_vals;
- } else if (rt2x00_rf(rt2x00dev, RF3020) ||
- rt2x00_rf(rt2x00dev, RF2020) ||
- rt2x00_rf(rt2x00dev, RF3021) ||
- rt2x00_rf(rt2x00dev, RF3022) ||
- rt2x00_rf(rt2x00dev, RF3290) ||
- rt2x00_rf(rt2x00dev, RF3320) ||
- rt2x00_rf(rt2x00dev, RF3322) ||
- rt2x00_rf(rt2x00dev, RF5360) ||
- rt2x00_rf(rt2x00dev, RF5370) ||
- rt2x00_rf(rt2x00dev, RF5372) ||
- rt2x00_rf(rt2x00dev, RF5390) ||
- rt2x00_rf(rt2x00dev, RF5392)) {
+ break;
+
+ case RF2020:
+ case RF3020:
+ case RF3021:
+ case RF3022:
+ case RF3070:
+ case RF3290:
+ case RF3320:
+ case RF3322:
+ case RF5360:
+ case RF5370:
+ case RF5372:
+ case RF5390:
+ case RF5392:
spec->num_channels = 14;
spec->channels = rf_vals_3x;
- } else if (rt2x00_rf(rt2x00dev, RF3052)) {
- spec->supported_bands |= SUPPORT_BAND_5GHZ;
+ break;
+
+ case RF3052:
+ case RF3053:
spec->num_channels = ARRAY_SIZE(rf_vals_3x);
spec->channels = rf_vals_3x;
- } else if (rt2x00_rf(rt2x00dev, RF3053)) {
- spec->supported_bands |= SUPPORT_BAND_5GHZ;
- spec->num_channels = ARRAY_SIZE(rf_vals_3053);
- spec->channels = rf_vals_3053;
- } else if (rt2x00_rf(rt2x00dev, RF5592)) {
- spec->supported_bands |= SUPPORT_BAND_5GHZ;
+ break;
+ case RF5592:
rt2800_register_read(rt2x00dev, MAC_DEBUG_INDEX, &reg);
if (rt2x00_get_field32(reg, MAC_DEBUG_INDEX_XTAL)) {
spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal40);
@@ -7572,11 +7548,16 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal20);
spec->channels = rf_vals_5592_xtal20;
}
+ break;
}
if (WARN_ON_ONCE(!spec->channels))
return -ENODEV;
+ spec->supported_bands = SUPPORT_BAND_2GHZ;
+ if (spec->num_channels > 14)
+ spec->supported_bands |= SUPPORT_BAND_5GHZ;
+
/*
* Initialize HT information.
*/
@@ -7591,22 +7572,21 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40;
- if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) >= 2)
+ if (rt2x00dev->default_ant.tx_chain_num >= 2)
spec->ht.cap |= IEEE80211_HT_CAP_TX_STBC;
- spec->ht.cap |=
- rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) <<
- IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ spec->ht.cap |= rt2x00dev->default_ant.rx_chain_num <<
+ IEEE80211_HT_CAP_RX_STBC_SHIFT;
spec->ht.ampdu_factor = 3;
spec->ht.ampdu_density = 4;
spec->ht.mcs.tx_params =
IEEE80211_HT_MCS_TX_DEFINED |
IEEE80211_HT_MCS_TX_RX_DIFF |
- ((rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ ((rt2x00dev->default_ant.tx_chain_num - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
- switch (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH)) {
+ switch (rt2x00dev->default_ant.rx_chain_num) {
case 3:
spec->ht.mcs.rx_mask[2] = 0xff;
case 2:
@@ -7671,6 +7651,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
case RF3320:
case RF3052:
case RF3053:
+ case RF3070:
case RF3290:
case RF5360:
case RF5370:
diff --git a/drivers/net/wireless/rt2x00/rt2800mmio.c b/drivers/net/wireless/rt2x00/rt2800mmio.c
new file mode 100644
index 000000000000..ae152280e071
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800mmio.c
@@ -0,0 +1,873 @@
+/* Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
+ * Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
+ * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
+ * Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
+ * Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
+ * Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
+ * Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
+ * <http://rt2x00.serialmonkey.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* Module: rt2800mmio
+ * Abstract: rt2800 MMIO device routines.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/export.h>
+
+#include "rt2x00.h"
+#include "rt2x00mmio.h"
+#include "rt2800.h"
+#include "rt2800lib.h"
+#include "rt2800mmio.h"
+
+/*
+ * TX descriptor initialization
+ */
+__le32 *rt2800mmio_get_txwi(struct queue_entry *entry)
+{
+ return (__le32 *) entry->skb->data;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_get_txwi);
+
+void rt2800mmio_write_tx_desc(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
+{
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+ struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
+ __le32 *txd = entry_priv->desc;
+ u32 word;
+ const unsigned int txwi_size = entry->queue->winfo_size;
+
+ /*
+ * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
+ * must contains a TXWI structure + 802.11 header + padding + 802.11
+ * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
+ * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
+ * data. It means that LAST_SEC0 is always 0.
+ */
+
+ /*
+ * Initialize TX descriptor
+ */
+ word = 0;
+ rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
+ rt2x00_desc_write(txd, 0, word);
+
+ word = 0;
+ rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
+ rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
+ !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W1_BURST,
+ test_bit(ENTRY_TXD_BURST, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
+ rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
+ rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
+ rt2x00_desc_write(txd, 1, word);
+
+ word = 0;
+ rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
+ skbdesc->skb_dma + txwi_size);
+ rt2x00_desc_write(txd, 2, word);
+
+ word = 0;
+ rt2x00_set_field32(&word, TXD_W3_WIV,
+ !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
+ rt2x00_desc_write(txd, 3, word);
+
+ /*
+ * Register descriptor details in skb frame descriptor.
+ */
+ skbdesc->desc = txd;
+ skbdesc->desc_len = TXD_DESC_SIZE;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_write_tx_desc);
+
+/*
+ * RX control handlers
+ */
+void rt2800mmio_fill_rxdone(struct queue_entry *entry,
+ struct rxdone_entry_desc *rxdesc)
+{
+ struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
+ __le32 *rxd = entry_priv->desc;
+ u32 word;
+
+ rt2x00_desc_read(rxd, 3, &word);
+
+ if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
+ rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
+
+ /*
+ * Unfortunately we don't know the cipher type used during
+ * decryption. This prevents us from correct providing
+ * correct statistics through debugfs.
+ */
+ rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
+
+ if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
+ /*
+ * Hardware has stripped IV/EIV data from 802.11 frame during
+ * decryption. Unfortunately the descriptor doesn't contain
+ * any fields with the EIV/IV data either, so they can't
+ * be restored by rt2x00lib.
+ */
+ rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+
+ /*
+ * The hardware has already checked the Michael Mic and has
+ * stripped it from the frame. Signal this to mac80211.
+ */
+ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
+ if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
+ rxdesc->flags |= RX_FLAG_DECRYPTED;
+ else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
+ rxdesc->flags |= RX_FLAG_MMIC_ERROR;
+ }
+
+ if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
+ rxdesc->dev_flags |= RXDONE_MY_BSS;
+
+ if (rt2x00_get_field32(word, RXD_W3_L2PAD))
+ rxdesc->dev_flags |= RXDONE_L2PAD;
+
+ /*
+ * Process the RXWI structure that is at the start of the buffer.
+ */
+ rt2800_process_rxwi(entry, rxdesc);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_fill_rxdone);
+
+/*
+ * Interrupt functions.
+ */
+static void rt2800mmio_wakeup(struct rt2x00_dev *rt2x00dev)
+{
+ struct ieee80211_conf conf = { .flags = 0 };
+ struct rt2x00lib_conf libconf = { .conf = &conf };
+
+ rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
+}
+
+static bool rt2800mmio_txdone_entry_check(struct queue_entry *entry, u32 status)
+{
+ __le32 *txwi;
+ u32 word;
+ int wcid, tx_wcid;
+
+ wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
+
+ txwi = rt2800_drv_get_txwi(entry);
+ rt2x00_desc_read(txwi, 1, &word);
+ tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
+
+ return (tx_wcid == wcid);
+}
+
+static bool rt2800mmio_txdone_find_entry(struct queue_entry *entry, void *data)
+{
+ u32 status = *(u32 *)data;
+
+ /*
+ * rt2800pci hardware might reorder frames when exchanging traffic
+ * with multiple BA enabled STAs.
+ *
+ * For example, a tx queue
+ * [ STA1 | STA2 | STA1 | STA2 ]
+ * can result in tx status reports
+ * [ STA1 | STA1 | STA2 | STA2 ]
+ * when the hw decides to aggregate the frames for STA1 into one AMPDU.
+ *
+ * To mitigate this effect, associate the tx status to the first frame
+ * in the tx queue with a matching wcid.
+ */
+ if (rt2800mmio_txdone_entry_check(entry, status) &&
+ !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
+ /*
+ * Got a matching frame, associate the tx status with
+ * the frame
+ */
+ entry->status = status;
+ set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
+ return true;
+ }
+
+ /* Check the next frame */
+ return false;
+}
+
+static bool rt2800mmio_txdone_match_first(struct queue_entry *entry, void *data)
+{
+ u32 status = *(u32 *)data;
+
+ /*
+ * Find the first frame without tx status and assign this status to it
+ * regardless if it matches or not.
+ */
+ if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
+ /*
+ * Got a matching frame, associate the tx status with
+ * the frame
+ */
+ entry->status = status;
+ set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
+ return true;
+ }
+
+ /* Check the next frame */
+ return false;
+}
+static bool rt2800mmio_txdone_release_entries(struct queue_entry *entry,
+ void *data)
+{
+ if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
+ rt2800_txdone_entry(entry, entry->status,
+ rt2800mmio_get_txwi(entry));
+ return false;
+ }
+
+ /* No more frames to release */
+ return true;
+}
+
+static bool rt2800mmio_txdone(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+ u32 status;
+ u8 qid;
+ int max_tx_done = 16;
+
+ while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
+ qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
+ if (unlikely(qid >= QID_RX)) {
+ /*
+ * Unknown queue, this shouldn't happen. Just drop
+ * this tx status.
+ */
+ rt2x00_warn(rt2x00dev, "Got TX status report with unexpected pid %u, dropping\n",
+ qid);
+ break;
+ }
+
+ queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
+ if (unlikely(queue == NULL)) {
+ /*
+ * The queue is NULL, this shouldn't happen. Stop
+ * processing here and drop the tx status
+ */
+ rt2x00_warn(rt2x00dev, "Got TX status for an unavailable queue %u, dropping\n",
+ qid);
+ break;
+ }
+
+ if (unlikely(rt2x00queue_empty(queue))) {
+ /*
+ * The queue is empty. Stop processing here
+ * and drop the tx status.
+ */
+ rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
+ qid);
+ break;
+ }
+
+ /*
+ * Let's associate this tx status with the first
+ * matching frame.
+ */
+ if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
+ Q_INDEX, &status,
+ rt2800mmio_txdone_find_entry)) {
+ /*
+ * We cannot match the tx status to any frame, so just
+ * use the first one.
+ */
+ if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
+ Q_INDEX, &status,
+ rt2800mmio_txdone_match_first)) {
+ rt2x00_warn(rt2x00dev, "No frame found for TX status on queue %u, dropping\n",
+ qid);
+ break;
+ }
+ }
+
+ /*
+ * Release all frames with a valid tx status.
+ */
+ rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
+ Q_INDEX, NULL,
+ rt2800mmio_txdone_release_entries);
+
+ if (--max_tx_done == 0)
+ break;
+ }
+
+ return !max_tx_done;
+}
+
+static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00_field32 irq_field)
+{
+ u32 reg;
+
+ /*
+ * Enable a single interrupt. The interrupt mask register
+ * access needs locking.
+ */
+ spin_lock_irq(&rt2x00dev->irqmask_lock);
+ rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+ rt2x00_set_field32(&reg, irq_field, 1);
+ rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ spin_unlock_irq(&rt2x00dev->irqmask_lock);
+}
+
+void rt2800mmio_txstatus_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ if (rt2800mmio_txdone(rt2x00dev))
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+
+ /*
+ * No need to enable the tx status interrupt here as we always
+ * leave it enabled to minimize the possibility of a tx status
+ * register overflow. See comment in interrupt handler.
+ */
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet);
+
+void rt2800mmio_pretbtt_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00lib_pretbtt(rt2x00dev);
+ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_pretbtt_tasklet);
+
+void rt2800mmio_tbtt_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+ u32 reg;
+
+ rt2x00lib_beacondone(rt2x00dev);
+
+ if (rt2x00dev->intf_ap_count) {
+ /*
+ * The rt2800pci hardware tbtt timer is off by 1us per tbtt
+ * causing beacon skew and as a result causing problems with
+ * some powersaving clients over time. Shorten the beacon
+ * interval every 64 beacons by 64us to mitigate this effect.
+ */
+ if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
+ rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
+ (rt2x00dev->beacon_int * 16) - 1);
+ rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+ } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
+ rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
+ (rt2x00dev->beacon_int * 16));
+ rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+ }
+ drv_data->tbtt_tick++;
+ drv_data->tbtt_tick %= BCN_TBTT_OFFSET;
+ }
+
+ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_tbtt_tasklet);
+
+void rt2800mmio_rxdone_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ if (rt2x00mmio_rxdone(rt2x00dev))
+ tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+ else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_rxdone_tasklet);
+
+void rt2800mmio_autowake_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2800mmio_wakeup(rt2x00dev);
+ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt2800mmio_enable_interrupt(rt2x00dev,
+ INT_MASK_CSR_AUTO_WAKEUP);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet);
+
+static void rt2800mmio_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
+{
+ u32 status;
+ int i;
+
+ /*
+ * The TX_FIFO_STATUS interrupt needs special care. We should
+ * read TX_STA_FIFO but we should do it immediately as otherwise
+ * the register can overflow and we would lose status reports.
+ *
+ * Hence, read the TX_STA_FIFO register and copy all tx status
+ * reports into a kernel FIFO which is handled in the txstatus
+ * tasklet. We use a tasklet to process the tx status reports
+ * because we can schedule the tasklet multiple times (when the
+ * interrupt fires again during tx status processing).
+ *
+ * Furthermore we don't disable the TX_FIFO_STATUS
+ * interrupt here but leave it enabled so that the TX_STA_FIFO
+ * can also be read while the tx status tasklet gets executed.
+ *
+ * Since we have only one producer and one consumer we don't
+ * need to lock the kfifo.
+ */
+ for (i = 0; i < rt2x00dev->tx->limit; i++) {
+ rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status);
+
+ if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
+ break;
+
+ if (!kfifo_put(&rt2x00dev->txstatus_fifo, &status)) {
+ rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n");
+ break;
+ }
+ }
+
+ /* Schedule the tasklet for processing the tx status. */
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+}
+
+irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance)
+{
+ struct rt2x00_dev *rt2x00dev = dev_instance;
+ u32 reg, mask;
+
+ /* Read status and ACK all interrupts */
+ rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
+ rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
+
+ if (!reg)
+ return IRQ_NONE;
+
+ if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ return IRQ_HANDLED;
+
+ /*
+ * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
+ * for interrupts and interrupt masks we can just use the value of
+ * INT_SOURCE_CSR to create the interrupt mask.
+ */
+ mask = ~reg;
+
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
+ rt2800mmio_txstatus_interrupt(rt2x00dev);
+ /*
+ * Never disable the TX_FIFO_STATUS interrupt.
+ */
+ rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+ }
+
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
+ tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
+
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
+ tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
+
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
+ tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
+ tasklet_schedule(&rt2x00dev->autowake_tasklet);
+
+ /*
+ * Disable all interrupts for which a tasklet was scheduled right now,
+ * the tasklet will reenable the appropriate interrupts.
+ */
+ spin_lock(&rt2x00dev->irqmask_lock);
+ rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+ reg &= mask;
+ rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ spin_unlock(&rt2x00dev->irqmask_lock);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_interrupt);
+
+void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
+ enum dev_state state)
+{
+ u32 reg;
+ unsigned long flags;
+
+ /*
+ * When interrupts are being enabled, the interrupt registers
+ * should clear the register to assure a clean state.
+ */
+ if (state == STATE_RADIO_IRQ_ON) {
+ rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
+ rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
+ }
+
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+ reg = 0;
+ if (state == STATE_RADIO_IRQ_ON) {
+ rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
+ }
+ rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+ if (state == STATE_RADIO_IRQ_OFF) {
+ /*
+ * Wait for possibly running tasklets to finish.
+ */
+ tasklet_kill(&rt2x00dev->txstatus_tasklet);
+ tasklet_kill(&rt2x00dev->rxdone_tasklet);
+ tasklet_kill(&rt2x00dev->autowake_tasklet);
+ tasklet_kill(&rt2x00dev->tbtt_tasklet);
+ tasklet_kill(&rt2x00dev->pretbtt_tasklet);
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_toggle_irq);
+
+/*
+ * Queue handlers.
+ */
+void rt2800mmio_start_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u32 reg;
+
+ switch (queue->qid) {
+ case QID_RX:
+ rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
+ rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+ break;
+ case QID_BEACON:
+ rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+ rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
+ rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
+ rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_start_queue);
+
+void rt2800mmio_kick_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ struct queue_entry *entry;
+
+ switch (queue->qid) {
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ entry = rt2x00queue_get_entry(queue, Q_INDEX);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
+ entry->entry_idx);
+ break;
+ case QID_MGMT:
+ entry = rt2x00queue_get_entry(queue, Q_INDEX);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5),
+ entry->entry_idx);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_kick_queue);
+
+void rt2800mmio_stop_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u32 reg;
+
+ switch (queue->qid) {
+ case QID_RX:
+ rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
+ rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+ break;
+ case QID_BEACON:
+ rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
+ rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
+ rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
+ rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
+
+ /*
+ * Wait for current invocation to finish. The tasklet
+ * won't be scheduled anymore afterwards since we disabled
+ * the TBTT and PRE TBTT timer.
+ */
+ tasklet_kill(&rt2x00dev->tbtt_tasklet);
+ tasklet_kill(&rt2x00dev->pretbtt_tasklet);
+
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_stop_queue);
+
+void rt2800mmio_queue_init(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ unsigned short txwi_size, rxwi_size;
+
+ rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
+
+ switch (queue->qid) {
+ case QID_RX:
+ queue->limit = 128;
+ queue->data_size = AGGREGATION_SIZE;
+ queue->desc_size = RXD_DESC_SIZE;
+ queue->winfo_size = rxwi_size;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
+
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ queue->limit = 64;
+ queue->data_size = AGGREGATION_SIZE;
+ queue->desc_size = TXD_DESC_SIZE;
+ queue->winfo_size = txwi_size;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
+
+ case QID_BEACON:
+ queue->limit = 8;
+ queue->data_size = 0; /* No DMA required for beacons */
+ queue->desc_size = TXD_DESC_SIZE;
+ queue->winfo_size = txwi_size;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
+
+ case QID_ATIM:
+ /* fallthrough */
+ default:
+ BUG();
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_queue_init);
+
+/*
+ * Initialization functions.
+ */
+bool rt2800mmio_get_entry_state(struct queue_entry *entry)
+{
+ struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
+ u32 word;
+
+ if (entry->queue->qid == QID_RX) {
+ rt2x00_desc_read(entry_priv->desc, 1, &word);
+
+ return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
+ } else {
+ rt2x00_desc_read(entry_priv->desc, 1, &word);
+
+ return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_get_entry_state);
+
+void rt2800mmio_clear_entry(struct queue_entry *entry)
+{
+ struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ u32 word;
+
+ if (entry->queue->qid == QID_RX) {
+ rt2x00_desc_read(entry_priv->desc, 0, &word);
+ rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
+ rt2x00_desc_write(entry_priv->desc, 0, word);
+
+ rt2x00_desc_read(entry_priv->desc, 1, &word);
+ rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
+ rt2x00_desc_write(entry_priv->desc, 1, word);
+
+ /*
+ * Set RX IDX in register to inform hardware that we have
+ * handled this entry and it is available for reuse again.
+ */
+ rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
+ entry->entry_idx);
+ } else {
+ rt2x00_desc_read(entry_priv->desc, 1, &word);
+ rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
+ rt2x00_desc_write(entry_priv->desc, 1, word);
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_clear_entry);
+
+int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev)
+{
+ struct queue_entry_priv_mmio *entry_priv;
+
+ /*
+ * Initialize registers.
+ */
+ entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
+ rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0,
+ entry_priv->desc_dma);
+ rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0,
+ rt2x00dev->tx[0].limit);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0);
+
+ entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
+ rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1,
+ entry_priv->desc_dma);
+ rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1,
+ rt2x00dev->tx[1].limit);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0);
+
+ entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
+ rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2,
+ entry_priv->desc_dma);
+ rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2,
+ rt2x00dev->tx[2].limit);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0);
+
+ entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
+ rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3,
+ entry_priv->desc_dma);
+ rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3,
+ rt2x00dev->tx[3].limit);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0);
+
+ rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0);
+
+ rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0);
+
+ entry_priv = rt2x00dev->rx->entries[0].priv_data;
+ rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR,
+ entry_priv->desc_dma);
+ rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT,
+ rt2x00dev->rx[0].limit);
+ rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
+ rt2x00dev->rx[0].limit - 1);
+ rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0);
+
+ rt2800_disable_wpdma(rt2x00dev);
+
+ rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_init_queues);
+
+int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev)
+{
+ u32 reg;
+
+ /*
+ * Reset DMA indexes
+ */
+ rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
+ rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
+
+ rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
+ rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
+
+ if (rt2x00_is_pcie(rt2x00dev) &&
+ (rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390) ||
+ rt2x00_rt(rt2x00dev, RT3572) ||
+ rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392) ||
+ rt2x00_rt(rt2x00dev, RT5592))) {
+ rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg);
+ rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
+ rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
+ rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg);
+ }
+
+ rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
+
+ reg = 0;
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
+ rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+
+ rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_init_registers);
+
+/*
+ * Device state switch handlers.
+ */
+int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev)
+{
+ /* Wait for DMA, ignore error until we initialize queues. */
+ rt2800_wait_wpdma_ready(rt2x00dev);
+
+ if (unlikely(rt2800mmio_init_queues(rt2x00dev)))
+ return -EIO;
+
+ return rt2800_enable_radio(rt2x00dev);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_enable_radio);
+
+MODULE_AUTHOR(DRV_PROJECT);
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("rt2800 MMIO library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2800mmio.h b/drivers/net/wireless/rt2x00/rt2800mmio.h
new file mode 100644
index 000000000000..6a10de3eee3e
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800mmio.h
@@ -0,0 +1,165 @@
+/* Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
+ * Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
+ * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
+ * Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
+ * Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
+ * Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
+ * Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
+ * <http://rt2x00.serialmonkey.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* Module: rt2800mmio
+ * Abstract: forward declarations for the rt2800mmio module.
+ */
+
+#ifndef RT2800MMIO_H
+#define RT2800MMIO_H
+
+/*
+ * Queue register offset macros
+ */
+#define TX_QUEUE_REG_OFFSET 0x10
+#define TX_BASE_PTR(__x) (TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET))
+#define TX_MAX_CNT(__x) (TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET))
+#define TX_CTX_IDX(__x) (TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
+#define TX_DTX_IDX(__x) (TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
+
+/*
+ * DMA descriptor defines.
+ */
+#define TXD_DESC_SIZE (4 * sizeof(__le32))
+#define RXD_DESC_SIZE (4 * sizeof(__le32))
+
+/*
+ * TX descriptor format for TX, PRIO and Beacon Ring.
+ */
+
+/*
+ * Word0
+ */
+#define TXD_W0_SD_PTR0 FIELD32(0xffffffff)
+
+/*
+ * Word1
+ */
+#define TXD_W1_SD_LEN1 FIELD32(0x00003fff)
+#define TXD_W1_LAST_SEC1 FIELD32(0x00004000)
+#define TXD_W1_BURST FIELD32(0x00008000)
+#define TXD_W1_SD_LEN0 FIELD32(0x3fff0000)
+#define TXD_W1_LAST_SEC0 FIELD32(0x40000000)
+#define TXD_W1_DMA_DONE FIELD32(0x80000000)
+
+/*
+ * Word2
+ */
+#define TXD_W2_SD_PTR1 FIELD32(0xffffffff)
+
+/*
+ * Word3
+ * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI
+ * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
+ * 0:MGMT, 1:HCCA 2:EDCA
+ */
+#define TXD_W3_WIV FIELD32(0x01000000)
+#define TXD_W3_QSEL FIELD32(0x06000000)
+#define TXD_W3_TCO FIELD32(0x20000000)
+#define TXD_W3_UCO FIELD32(0x40000000)
+#define TXD_W3_ICO FIELD32(0x80000000)
+
+/*
+ * RX descriptor format for RX Ring.
+ */
+
+/*
+ * Word0
+ */
+#define RXD_W0_SDP0 FIELD32(0xffffffff)
+
+/*
+ * Word1
+ */
+#define RXD_W1_SDL1 FIELD32(0x00003fff)
+#define RXD_W1_SDL0 FIELD32(0x3fff0000)
+#define RXD_W1_LS0 FIELD32(0x40000000)
+#define RXD_W1_DMA_DONE FIELD32(0x80000000)
+
+/*
+ * Word2
+ */
+#define RXD_W2_SDP1 FIELD32(0xffffffff)
+
+/*
+ * Word3
+ * AMSDU: RX with 802.3 header, not 802.11 header.
+ * DECRYPTED: This frame is being decrypted.
+ */
+#define RXD_W3_BA FIELD32(0x00000001)
+#define RXD_W3_DATA FIELD32(0x00000002)
+#define RXD_W3_NULLDATA FIELD32(0x00000004)
+#define RXD_W3_FRAG FIELD32(0x00000008)
+#define RXD_W3_UNICAST_TO_ME FIELD32(0x00000010)
+#define RXD_W3_MULTICAST FIELD32(0x00000020)
+#define RXD_W3_BROADCAST FIELD32(0x00000040)
+#define RXD_W3_MY_BSS FIELD32(0x00000080)
+#define RXD_W3_CRC_ERROR FIELD32(0x00000100)
+#define RXD_W3_CIPHER_ERROR FIELD32(0x00000600)
+#define RXD_W3_AMSDU FIELD32(0x00000800)
+#define RXD_W3_HTC FIELD32(0x00001000)
+#define RXD_W3_RSSI FIELD32(0x00002000)
+#define RXD_W3_L2PAD FIELD32(0x00004000)
+#define RXD_W3_AMPDU FIELD32(0x00008000)
+#define RXD_W3_DECRYPTED FIELD32(0x00010000)
+#define RXD_W3_PLCP_SIGNAL FIELD32(0x00020000)
+#define RXD_W3_PLCP_RSSI FIELD32(0x00040000)
+
+/* TX descriptor initialization */
+__le32 *rt2800mmio_get_txwi(struct queue_entry *entry);
+void rt2800mmio_write_tx_desc(struct queue_entry *entry,
+ struct txentry_desc *txdesc);
+
+/* RX control handlers */
+void rt2800mmio_fill_rxdone(struct queue_entry *entry,
+ struct rxdone_entry_desc *rxdesc);
+
+/* Interrupt functions */
+void rt2800mmio_txstatus_tasklet(unsigned long data);
+void rt2800mmio_pretbtt_tasklet(unsigned long data);
+void rt2800mmio_tbtt_tasklet(unsigned long data);
+void rt2800mmio_rxdone_tasklet(unsigned long data);
+void rt2800mmio_autowake_tasklet(unsigned long data);
+irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance);
+void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
+ enum dev_state state);
+
+/* Queue handlers */
+void rt2800mmio_start_queue(struct data_queue *queue);
+void rt2800mmio_kick_queue(struct data_queue *queue);
+void rt2800mmio_stop_queue(struct data_queue *queue);
+void rt2800mmio_queue_init(struct data_queue *queue);
+
+/* Initialization functions */
+bool rt2800mmio_get_entry_state(struct queue_entry *entry);
+void rt2800mmio_clear_entry(struct queue_entry *entry);
+int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev);
+int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev);
+
+/* Device state switch handlers. */
+int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev);
+
+#endif /* RT2800MMIO_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index f8f2abbfbb65..b504455b4fec 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -37,14 +37,13 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/platform_device.h>
#include <linux/eeprom_93cx6.h>
#include "rt2x00.h"
#include "rt2x00mmio.h"
#include "rt2x00pci.h"
-#include "rt2x00soc.h"
#include "rt2800lib.h"
+#include "rt2800mmio.h"
#include "rt2800.h"
#include "rt2800pci.h"
@@ -90,27 +89,6 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
}
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
-static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
-{
- void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE);
-
- if (!base_addr)
- return -ENOMEM;
-
- memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE);
-
- iounmap(base_addr);
- return 0;
-}
-#else
-static inline int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
-{
- return -ENOMEM;
-}
-#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
-
-#ifdef CONFIG_PCI
static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
{
struct rt2x00_dev *rt2x00dev = eeprom->data;
@@ -183,112 +161,6 @@ static inline int rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
{
return rt2800_read_eeprom_efuse(rt2x00dev);
}
-#else
-static inline int rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev)
-{
- return 0;
-}
-
-static inline int rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
-{
- return -EOPNOTSUPP;
-}
-#endif /* CONFIG_PCI */
-
-/*
- * Queue handlers.
- */
-static void rt2800pci_start_queue(struct data_queue *queue)
-{
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
- u32 reg;
-
- switch (queue->qid) {
- case QID_RX:
- rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
- rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
- rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
- break;
- case QID_BEACON:
- rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
- rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
- rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
- rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
- rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
- break;
- default:
- break;
- }
-}
-
-static void rt2800pci_kick_queue(struct data_queue *queue)
-{
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
- struct queue_entry *entry;
-
- switch (queue->qid) {
- case QID_AC_VO:
- case QID_AC_VI:
- case QID_AC_BE:
- case QID_AC_BK:
- entry = rt2x00queue_get_entry(queue, Q_INDEX);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
- entry->entry_idx);
- break;
- case QID_MGMT:
- entry = rt2x00queue_get_entry(queue, Q_INDEX);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5),
- entry->entry_idx);
- break;
- default:
- break;
- }
-}
-
-static void rt2800pci_stop_queue(struct data_queue *queue)
-{
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
- u32 reg;
-
- switch (queue->qid) {
- case QID_RX:
- rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
- rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
- rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
- break;
- case QID_BEACON:
- rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
- rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
- rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
- rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
- rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
-
- /*
- * Wait for current invocation to finish. The tasklet
- * won't be scheduled anymore afterwards since we disabled
- * the TBTT and PRE TBTT timer.
- */
- tasklet_kill(&rt2x00dev->tbtt_tasklet);
- tasklet_kill(&rt2x00dev->pretbtt_tasklet);
-
- break;
- default:
- break;
- }
-}
/*
* Firmware functions
@@ -332,217 +204,13 @@ static int rt2800pci_write_firmware(struct rt2x00_dev *rt2x00dev,
}
/*
- * Initialization functions.
- */
-static bool rt2800pci_get_entry_state(struct queue_entry *entry)
-{
- struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
- u32 word;
-
- if (entry->queue->qid == QID_RX) {
- rt2x00_desc_read(entry_priv->desc, 1, &word);
-
- return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
- } else {
- rt2x00_desc_read(entry_priv->desc, 1, &word);
-
- return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
- }
-}
-
-static void rt2800pci_clear_entry(struct queue_entry *entry)
-{
- struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
- struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- u32 word;
-
- if (entry->queue->qid == QID_RX) {
- rt2x00_desc_read(entry_priv->desc, 0, &word);
- rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
- rt2x00_desc_write(entry_priv->desc, 0, word);
-
- rt2x00_desc_read(entry_priv->desc, 1, &word);
- rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
- rt2x00_desc_write(entry_priv->desc, 1, word);
-
- /*
- * Set RX IDX in register to inform hardware that we have
- * handled this entry and it is available for reuse again.
- */
- rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
- entry->entry_idx);
- } else {
- rt2x00_desc_read(entry_priv->desc, 1, &word);
- rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
- rt2x00_desc_write(entry_priv->desc, 1, word);
- }
-}
-
-static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
-{
- struct queue_entry_priv_mmio *entry_priv;
-
- /*
- * Initialize registers.
- */
- entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
- rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0,
- entry_priv->desc_dma);
- rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0,
- rt2x00dev->tx[0].limit);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0);
-
- entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
- rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1,
- entry_priv->desc_dma);
- rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1,
- rt2x00dev->tx[1].limit);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0);
-
- entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
- rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2,
- entry_priv->desc_dma);
- rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2,
- rt2x00dev->tx[2].limit);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0);
-
- entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
- rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3,
- entry_priv->desc_dma);
- rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3,
- rt2x00dev->tx[3].limit);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0);
-
- rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0);
-
- rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0);
-
- entry_priv = rt2x00dev->rx->entries[0].priv_data;
- rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR,
- entry_priv->desc_dma);
- rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT,
- rt2x00dev->rx[0].limit);
- rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
- rt2x00dev->rx[0].limit - 1);
- rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0);
-
- rt2800_disable_wpdma(rt2x00dev);
-
- rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0);
-
- return 0;
-}
-
-/*
* Device state switch handlers.
*/
-static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
- enum dev_state state)
-{
- u32 reg;
- unsigned long flags;
-
- /*
- * When interrupts are being enabled, the interrupt registers
- * should clear the register to assure a clean state.
- */
- if (state == STATE_RADIO_IRQ_ON) {
- rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
- rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
- }
-
- spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
- reg = 0;
- if (state == STATE_RADIO_IRQ_ON) {
- rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
- rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
- rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
- rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
- rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
- }
- rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
- spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
-
- if (state == STATE_RADIO_IRQ_OFF) {
- /*
- * Wait for possibly running tasklets to finish.
- */
- tasklet_kill(&rt2x00dev->txstatus_tasklet);
- tasklet_kill(&rt2x00dev->rxdone_tasklet);
- tasklet_kill(&rt2x00dev->autowake_tasklet);
- tasklet_kill(&rt2x00dev->tbtt_tasklet);
- tasklet_kill(&rt2x00dev->pretbtt_tasklet);
- }
-}
-
-static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
-{
- u32 reg;
-
- /*
- * Reset DMA indexes
- */
- rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
- rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
-
- rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
- rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
-
- if (rt2x00_is_pcie(rt2x00dev) &&
- (rt2x00_rt(rt2x00dev, RT3090) ||
- rt2x00_rt(rt2x00dev, RT3390) ||
- rt2x00_rt(rt2x00dev, RT3572) ||
- rt2x00_rt(rt2x00dev, RT3593) ||
- rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392) ||
- rt2x00_rt(rt2x00dev, RT5592))) {
- rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg);
- rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
- rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
- rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg);
- }
-
- rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
-
- reg = 0;
- rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
- rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
- rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
-
- rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
-
- return 0;
-}
-
static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
{
int retval;
- /* Wait for DMA, ignore error until we initialize queues. */
- rt2800_wait_wpdma_ready(rt2x00dev);
-
- if (unlikely(rt2800pci_init_queues(rt2x00dev)))
- return -EIO;
-
- retval = rt2800_enable_radio(rt2x00dev);
+ retval = rt2800mmio_enable_radio(rt2x00dev);
if (retval)
return retval;
@@ -559,15 +227,6 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
return retval;
}
-static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
-{
- if (rt2x00_is_soc(rt2x00dev)) {
- rt2800_disable_radio(rt2x00dev);
- rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_PIN_CFG, 0);
- }
-}
-
static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
@@ -601,12 +260,11 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
* After the radio has been disabled, the device should
* be put to sleep for powersaving.
*/
- rt2800pci_disable_radio(rt2x00dev);
rt2800pci_set_state(rt2x00dev, STATE_SLEEP);
break;
case STATE_RADIO_IRQ_ON:
case STATE_RADIO_IRQ_OFF:
- rt2800pci_toggle_irq(rt2x00dev, state);
+ rt2800mmio_toggle_irq(rt2x00dev, state);
break;
case STATE_DEEP_SLEEP:
case STATE_SLEEP:
@@ -627,479 +285,13 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
}
/*
- * TX descriptor initialization
- */
-static __le32 *rt2800pci_get_txwi(struct queue_entry *entry)
-{
- return (__le32 *) entry->skb->data;
-}
-
-static void rt2800pci_write_tx_desc(struct queue_entry *entry,
- struct txentry_desc *txdesc)
-{
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
- struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
- __le32 *txd = entry_priv->desc;
- u32 word;
- const unsigned int txwi_size = entry->queue->winfo_size;
-
- /*
- * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
- * must contains a TXWI structure + 802.11 header + padding + 802.11
- * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
- * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
- * data. It means that LAST_SEC0 is always 0.
- */
-
- /*
- * Initialize TX descriptor
- */
- word = 0;
- rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
- rt2x00_desc_write(txd, 0, word);
-
- word = 0;
- rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
- rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
- !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W1_BURST,
- test_bit(ENTRY_TXD_BURST, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
- rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
- rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
- rt2x00_desc_write(txd, 1, word);
-
- word = 0;
- rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
- skbdesc->skb_dma + txwi_size);
- rt2x00_desc_write(txd, 2, word);
-
- word = 0;
- rt2x00_set_field32(&word, TXD_W3_WIV,
- !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
- rt2x00_desc_write(txd, 3, word);
-
- /*
- * Register descriptor details in skb frame descriptor.
- */
- skbdesc->desc = txd;
- skbdesc->desc_len = TXD_DESC_SIZE;
-}
-
-/*
- * RX control handlers
- */
-static void rt2800pci_fill_rxdone(struct queue_entry *entry,
- struct rxdone_entry_desc *rxdesc)
-{
- struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
- __le32 *rxd = entry_priv->desc;
- u32 word;
-
- rt2x00_desc_read(rxd, 3, &word);
-
- if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
- rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
-
- /*
- * Unfortunately we don't know the cipher type used during
- * decryption. This prevents us from correct providing
- * correct statistics through debugfs.
- */
- rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
-
- if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
- /*
- * Hardware has stripped IV/EIV data from 802.11 frame during
- * decryption. Unfortunately the descriptor doesn't contain
- * any fields with the EIV/IV data either, so they can't
- * be restored by rt2x00lib.
- */
- rxdesc->flags |= RX_FLAG_IV_STRIPPED;
-
- /*
- * The hardware has already checked the Michael Mic and has
- * stripped it from the frame. Signal this to mac80211.
- */
- rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
-
- if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
- rxdesc->flags |= RX_FLAG_DECRYPTED;
- else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
- rxdesc->flags |= RX_FLAG_MMIC_ERROR;
- }
-
- if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
- rxdesc->dev_flags |= RXDONE_MY_BSS;
-
- if (rt2x00_get_field32(word, RXD_W3_L2PAD))
- rxdesc->dev_flags |= RXDONE_L2PAD;
-
- /*
- * Process the RXWI structure that is at the start of the buffer.
- */
- rt2800_process_rxwi(entry, rxdesc);
-}
-
-/*
- * Interrupt functions.
- */
-static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev)
-{
- struct ieee80211_conf conf = { .flags = 0 };
- struct rt2x00lib_conf libconf = { .conf = &conf };
-
- rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
-}
-
-static bool rt2800pci_txdone_entry_check(struct queue_entry *entry, u32 status)
-{
- __le32 *txwi;
- u32 word;
- int wcid, tx_wcid;
-
- wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
-
- txwi = rt2800_drv_get_txwi(entry);
- rt2x00_desc_read(txwi, 1, &word);
- tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
-
- return (tx_wcid == wcid);
-}
-
-static bool rt2800pci_txdone_find_entry(struct queue_entry *entry, void *data)
-{
- u32 status = *(u32 *)data;
-
- /*
- * rt2800pci hardware might reorder frames when exchanging traffic
- * with multiple BA enabled STAs.
- *
- * For example, a tx queue
- * [ STA1 | STA2 | STA1 | STA2 ]
- * can result in tx status reports
- * [ STA1 | STA1 | STA2 | STA2 ]
- * when the hw decides to aggregate the frames for STA1 into one AMPDU.
- *
- * To mitigate this effect, associate the tx status to the first frame
- * in the tx queue with a matching wcid.
- */
- if (rt2800pci_txdone_entry_check(entry, status) &&
- !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
- /*
- * Got a matching frame, associate the tx status with
- * the frame
- */
- entry->status = status;
- set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
- return true;
- }
-
- /* Check the next frame */
- return false;
-}
-
-static bool rt2800pci_txdone_match_first(struct queue_entry *entry, void *data)
-{
- u32 status = *(u32 *)data;
-
- /*
- * Find the first frame without tx status and assign this status to it
- * regardless if it matches or not.
- */
- if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
- /*
- * Got a matching frame, associate the tx status with
- * the frame
- */
- entry->status = status;
- set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
- return true;
- }
-
- /* Check the next frame */
- return false;
-}
-static bool rt2800pci_txdone_release_entries(struct queue_entry *entry,
- void *data)
-{
- if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
- rt2800_txdone_entry(entry, entry->status,
- rt2800pci_get_txwi(entry));
- return false;
- }
-
- /* No more frames to release */
- return true;
-}
-
-static bool rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue;
- u32 status;
- u8 qid;
- int max_tx_done = 16;
-
- while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
- qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
- if (unlikely(qid >= QID_RX)) {
- /*
- * Unknown queue, this shouldn't happen. Just drop
- * this tx status.
- */
- rt2x00_warn(rt2x00dev, "Got TX status report with unexpected pid %u, dropping\n",
- qid);
- break;
- }
-
- queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
- if (unlikely(queue == NULL)) {
- /*
- * The queue is NULL, this shouldn't happen. Stop
- * processing here and drop the tx status
- */
- rt2x00_warn(rt2x00dev, "Got TX status for an unavailable queue %u, dropping\n",
- qid);
- break;
- }
-
- if (unlikely(rt2x00queue_empty(queue))) {
- /*
- * The queue is empty. Stop processing here
- * and drop the tx status.
- */
- rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
- qid);
- break;
- }
-
- /*
- * Let's associate this tx status with the first
- * matching frame.
- */
- if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
- Q_INDEX, &status,
- rt2800pci_txdone_find_entry)) {
- /*
- * We cannot match the tx status to any frame, so just
- * use the first one.
- */
- if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
- Q_INDEX, &status,
- rt2800pci_txdone_match_first)) {
- rt2x00_warn(rt2x00dev, "No frame found for TX status on queue %u, dropping\n",
- qid);
- break;
- }
- }
-
- /*
- * Release all frames with a valid tx status.
- */
- rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
- Q_INDEX, NULL,
- rt2800pci_txdone_release_entries);
-
- if (--max_tx_done == 0)
- break;
- }
-
- return !max_tx_done;
-}
-
-static inline void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
- struct rt2x00_field32 irq_field)
-{
- u32 reg;
-
- /*
- * Enable a single interrupt. The interrupt mask register
- * access needs locking.
- */
- spin_lock_irq(&rt2x00dev->irqmask_lock);
- rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
- rt2x00_set_field32(&reg, irq_field, 1);
- rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
- spin_unlock_irq(&rt2x00dev->irqmask_lock);
-}
-
-static void rt2800pci_txstatus_tasklet(unsigned long data)
-{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
- if (rt2800pci_txdone(rt2x00dev))
- tasklet_schedule(&rt2x00dev->txstatus_tasklet);
-
- /*
- * No need to enable the tx status interrupt here as we always
- * leave it enabled to minimize the possibility of a tx status
- * register overflow. See comment in interrupt handler.
- */
-}
-
-static void rt2800pci_pretbtt_tasklet(unsigned long data)
-{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
- rt2x00lib_pretbtt(rt2x00dev);
- if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
-}
-
-static void rt2800pci_tbtt_tasklet(unsigned long data)
-{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
- struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
- u32 reg;
-
- rt2x00lib_beacondone(rt2x00dev);
-
- if (rt2x00dev->intf_ap_count) {
- /*
- * The rt2800pci hardware tbtt timer is off by 1us per tbtt
- * causing beacon skew and as a result causing problems with
- * some powersaving clients over time. Shorten the beacon
- * interval every 64 beacons by 64us to mitigate this effect.
- */
- if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
- rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
- (rt2x00dev->beacon_int * 16) - 1);
- rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
- } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
- rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
- (rt2x00dev->beacon_int * 16));
- rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
- }
- drv_data->tbtt_tick++;
- drv_data->tbtt_tick %= BCN_TBTT_OFFSET;
- }
-
- if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
-}
-
-static void rt2800pci_rxdone_tasklet(unsigned long data)
-{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
- if (rt2x00mmio_rxdone(rt2x00dev))
- tasklet_schedule(&rt2x00dev->rxdone_tasklet);
- else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
-}
-
-static void rt2800pci_autowake_tasklet(unsigned long data)
-{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
- rt2800pci_wakeup(rt2x00dev);
- if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_AUTO_WAKEUP);
-}
-
-static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
-{
- u32 status;
- int i;
-
- /*
- * The TX_FIFO_STATUS interrupt needs special care. We should
- * read TX_STA_FIFO but we should do it immediately as otherwise
- * the register can overflow and we would lose status reports.
- *
- * Hence, read the TX_STA_FIFO register and copy all tx status
- * reports into a kernel FIFO which is handled in the txstatus
- * tasklet. We use a tasklet to process the tx status reports
- * because we can schedule the tasklet multiple times (when the
- * interrupt fires again during tx status processing).
- *
- * Furthermore we don't disable the TX_FIFO_STATUS
- * interrupt here but leave it enabled so that the TX_STA_FIFO
- * can also be read while the tx status tasklet gets executed.
- *
- * Since we have only one producer and one consumer we don't
- * need to lock the kfifo.
- */
- for (i = 0; i < rt2x00dev->tx->limit; i++) {
- rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status);
-
- if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
- break;
-
- if (!kfifo_put(&rt2x00dev->txstatus_fifo, &status)) {
- rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n");
- break;
- }
- }
-
- /* Schedule the tasklet for processing the tx status. */
- tasklet_schedule(&rt2x00dev->txstatus_tasklet);
-}
-
-static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
-{
- struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg, mask;
-
- /* Read status and ACK all interrupts */
- rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
- rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
-
- if (!reg)
- return IRQ_NONE;
-
- if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- return IRQ_HANDLED;
-
- /*
- * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
- * for interrupts and interrupt masks we can just use the value of
- * INT_SOURCE_CSR to create the interrupt mask.
- */
- mask = ~reg;
-
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
- rt2800pci_txstatus_interrupt(rt2x00dev);
- /*
- * Never disable the TX_FIFO_STATUS interrupt.
- */
- rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
- }
-
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
- tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
-
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
- tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
-
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
- tasklet_schedule(&rt2x00dev->rxdone_tasklet);
-
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
- tasklet_schedule(&rt2x00dev->autowake_tasklet);
-
- /*
- * Disable all interrupts for which a tasklet was scheduled right now,
- * the tasklet will reenable the appropriate interrupts.
- */
- spin_lock(&rt2x00dev->irqmask_lock);
- rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
- reg &= mask;
- rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
- spin_unlock(&rt2x00dev->irqmask_lock);
-
- return IRQ_HANDLED;
-}
-
-/*
* Device probe functions.
*/
static int rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev)
{
int retval;
- if (rt2x00_is_soc(rt2x00dev))
- retval = rt2800pci_read_eeprom_soc(rt2x00dev);
- else if (rt2800pci_efuse_detect(rt2x00dev))
+ if (rt2800pci_efuse_detect(rt2x00dev))
retval = rt2800pci_read_eeprom_efuse(rt2x00dev);
else
retval = rt2800pci_read_eeprom_pci(rt2x00dev);
@@ -1145,25 +337,25 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
.read_eeprom = rt2800pci_read_eeprom,
.hwcrypt_disabled = rt2800pci_hwcrypt_disabled,
.drv_write_firmware = rt2800pci_write_firmware,
- .drv_init_registers = rt2800pci_init_registers,
- .drv_get_txwi = rt2800pci_get_txwi,
+ .drv_init_registers = rt2800mmio_init_registers,
+ .drv_get_txwi = rt2800mmio_get_txwi,
};
static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
- .irq_handler = rt2800pci_interrupt,
- .txstatus_tasklet = rt2800pci_txstatus_tasklet,
- .pretbtt_tasklet = rt2800pci_pretbtt_tasklet,
- .tbtt_tasklet = rt2800pci_tbtt_tasklet,
- .rxdone_tasklet = rt2800pci_rxdone_tasklet,
- .autowake_tasklet = rt2800pci_autowake_tasklet,
+ .irq_handler = rt2800mmio_interrupt,
+ .txstatus_tasklet = rt2800mmio_txstatus_tasklet,
+ .pretbtt_tasklet = rt2800mmio_pretbtt_tasklet,
+ .tbtt_tasklet = rt2800mmio_tbtt_tasklet,
+ .rxdone_tasklet = rt2800mmio_rxdone_tasklet,
+ .autowake_tasklet = rt2800mmio_autowake_tasklet,
.probe_hw = rt2800_probe_hw,
.get_firmware_name = rt2800pci_get_firmware_name,
.check_firmware = rt2800_check_firmware,
.load_firmware = rt2800_load_firmware,
.initialize = rt2x00mmio_initialize,
.uninitialize = rt2x00mmio_uninitialize,
- .get_entry_state = rt2800pci_get_entry_state,
- .clear_entry = rt2800pci_clear_entry,
+ .get_entry_state = rt2800mmio_get_entry_state,
+ .clear_entry = rt2800mmio_clear_entry,
.set_device_state = rt2800pci_set_device_state,
.rfkill_poll = rt2800_rfkill_poll,
.link_stats = rt2800_link_stats,
@@ -1171,15 +363,15 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.link_tuner = rt2800_link_tuner,
.gain_calibration = rt2800_gain_calibration,
.vco_calibration = rt2800_vco_calibration,
- .start_queue = rt2800pci_start_queue,
- .kick_queue = rt2800pci_kick_queue,
- .stop_queue = rt2800pci_stop_queue,
+ .start_queue = rt2800mmio_start_queue,
+ .kick_queue = rt2800mmio_kick_queue,
+ .stop_queue = rt2800mmio_stop_queue,
.flush_queue = rt2x00mmio_flush_queue,
- .write_tx_desc = rt2800pci_write_tx_desc,
+ .write_tx_desc = rt2800mmio_write_tx_desc,
.write_tx_data = rt2800_write_tx_data,
.write_beacon = rt2800_write_beacon,
.clear_beacon = rt2800_clear_beacon,
- .fill_rxdone = rt2800pci_fill_rxdone,
+ .fill_rxdone = rt2800mmio_fill_rxdone,
.config_shared_key = rt2800_config_shared_key,
.config_pairwise_key = rt2800_config_pairwise_key,
.config_filter = rt2800_config_filter,
@@ -1191,49 +383,6 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.sta_remove = rt2800_sta_remove,
};
-static void rt2800pci_queue_init(struct data_queue *queue)
-{
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
- unsigned short txwi_size, rxwi_size;
-
- rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
-
- switch (queue->qid) {
- case QID_RX:
- queue->limit = 128;
- queue->data_size = AGGREGATION_SIZE;
- queue->desc_size = RXD_DESC_SIZE;
- queue->winfo_size = rxwi_size;
- queue->priv_size = sizeof(struct queue_entry_priv_mmio);
- break;
-
- case QID_AC_VO:
- case QID_AC_VI:
- case QID_AC_BE:
- case QID_AC_BK:
- queue->limit = 64;
- queue->data_size = AGGREGATION_SIZE;
- queue->desc_size = TXD_DESC_SIZE;
- queue->winfo_size = txwi_size;
- queue->priv_size = sizeof(struct queue_entry_priv_mmio);
- break;
-
- case QID_BEACON:
- queue->limit = 8;
- queue->data_size = 0; /* No DMA required for beacons */
- queue->desc_size = TXD_DESC_SIZE;
- queue->winfo_size = txwi_size;
- queue->priv_size = sizeof(struct queue_entry_priv_mmio);
- break;
-
- case QID_ATIM:
- /* fallthrough */
- default:
- BUG();
- break;
- }
-}
-
static const struct rt2x00_ops rt2800pci_ops = {
.name = KBUILD_MODNAME,
.drv_data_size = sizeof(struct rt2800_drv_data),
@@ -1241,7 +390,7 @@ static const struct rt2x00_ops rt2800pci_ops = {
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
.tx_queues = NUM_TX_QUEUES,
- .queue_init = rt2800pci_queue_init,
+ .queue_init = rt2800mmio_queue_init,
.lib = &rt2800pci_rt2x00_ops,
.drv = &rt2800pci_rt2800_ops,
.hw = &rt2800pci_mac80211_ops,
@@ -1253,7 +402,6 @@ static const struct rt2x00_ops rt2800pci_ops = {
/*
* RT2800pci module information.
*/
-#ifdef CONFIG_PCI
static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x0601) },
{ PCI_DEVICE(0x1814, 0x0681) },
@@ -1298,38 +446,15 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
#endif
{ 0, }
};
-#endif /* CONFIG_PCI */
MODULE_AUTHOR(DRV_PROJECT);
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver.");
MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards");
-#ifdef CONFIG_PCI
MODULE_FIRMWARE(FIRMWARE_RT2860);
MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
-#endif /* CONFIG_PCI */
MODULE_LICENSE("GPL");
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
-static int rt2800soc_probe(struct platform_device *pdev)
-{
- return rt2x00soc_probe(pdev, &rt2800pci_ops);
-}
-
-static struct platform_driver rt2800soc_driver = {
- .driver = {
- .name = "rt2800_wmac",
- .owner = THIS_MODULE,
- .mod_name = KBUILD_MODNAME,
- },
- .probe = rt2800soc_probe,
- .remove = rt2x00soc_remove,
- .suspend = rt2x00soc_suspend,
- .resume = rt2x00soc_resume,
-};
-#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
-
-#ifdef CONFIG_PCI
static int rt2800pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
@@ -1344,39 +469,5 @@ static struct pci_driver rt2800pci_driver = {
.suspend = rt2x00pci_suspend,
.resume = rt2x00pci_resume,
};
-#endif /* CONFIG_PCI */
-
-static int __init rt2800pci_init(void)
-{
- int ret = 0;
-
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
- ret = platform_driver_register(&rt2800soc_driver);
- if (ret)
- return ret;
-#endif
-#ifdef CONFIG_PCI
- ret = pci_register_driver(&rt2800pci_driver);
- if (ret) {
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
- platform_driver_unregister(&rt2800soc_driver);
-#endif
- return ret;
- }
-#endif
-
- return ret;
-}
-
-static void __exit rt2800pci_exit(void)
-{
-#ifdef CONFIG_PCI
- pci_unregister_driver(&rt2800pci_driver);
-#endif
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
- platform_driver_unregister(&rt2800soc_driver);
-#endif
-}
-module_init(rt2800pci_init);
-module_exit(rt2800pci_exit);
+module_pci_driver(rt2800pci_driver);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
index ab22a087c50d..a81c9ee281c0 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.h
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -35,107 +35,10 @@
#define RT2800PCI_H
/*
- * Queue register offset macros
- */
-#define TX_QUEUE_REG_OFFSET 0x10
-#define TX_BASE_PTR(__x) (TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET))
-#define TX_MAX_CNT(__x) (TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET))
-#define TX_CTX_IDX(__x) (TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
-#define TX_DTX_IDX(__x) (TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
-
-/*
* 8051 firmware image.
*/
#define FIRMWARE_RT2860 "rt2860.bin"
#define FIRMWARE_RT3290 "rt3290.bin"
#define FIRMWARE_IMAGE_BASE 0x2000
-/*
- * DMA descriptor defines.
- */
-#define TXD_DESC_SIZE (4 * sizeof(__le32))
-#define RXD_DESC_SIZE (4 * sizeof(__le32))
-
-/*
- * TX descriptor format for TX, PRIO and Beacon Ring.
- */
-
-/*
- * Word0
- */
-#define TXD_W0_SD_PTR0 FIELD32(0xffffffff)
-
-/*
- * Word1
- */
-#define TXD_W1_SD_LEN1 FIELD32(0x00003fff)
-#define TXD_W1_LAST_SEC1 FIELD32(0x00004000)
-#define TXD_W1_BURST FIELD32(0x00008000)
-#define TXD_W1_SD_LEN0 FIELD32(0x3fff0000)
-#define TXD_W1_LAST_SEC0 FIELD32(0x40000000)
-#define TXD_W1_DMA_DONE FIELD32(0x80000000)
-
-/*
- * Word2
- */
-#define TXD_W2_SD_PTR1 FIELD32(0xffffffff)
-
-/*
- * Word3
- * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI
- * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
- * 0:MGMT, 1:HCCA 2:EDCA
- */
-#define TXD_W3_WIV FIELD32(0x01000000)
-#define TXD_W3_QSEL FIELD32(0x06000000)
-#define TXD_W3_TCO FIELD32(0x20000000)
-#define TXD_W3_UCO FIELD32(0x40000000)
-#define TXD_W3_ICO FIELD32(0x80000000)
-
-/*
- * RX descriptor format for RX Ring.
- */
-
-/*
- * Word0
- */
-#define RXD_W0_SDP0 FIELD32(0xffffffff)
-
-/*
- * Word1
- */
-#define RXD_W1_SDL1 FIELD32(0x00003fff)
-#define RXD_W1_SDL0 FIELD32(0x3fff0000)
-#define RXD_W1_LS0 FIELD32(0x40000000)
-#define RXD_W1_DMA_DONE FIELD32(0x80000000)
-
-/*
- * Word2
- */
-#define RXD_W2_SDP1 FIELD32(0xffffffff)
-
-/*
- * Word3
- * AMSDU: RX with 802.3 header, not 802.11 header.
- * DECRYPTED: This frame is being decrypted.
- */
-#define RXD_W3_BA FIELD32(0x00000001)
-#define RXD_W3_DATA FIELD32(0x00000002)
-#define RXD_W3_NULLDATA FIELD32(0x00000004)
-#define RXD_W3_FRAG FIELD32(0x00000008)
-#define RXD_W3_UNICAST_TO_ME FIELD32(0x00000010)
-#define RXD_W3_MULTICAST FIELD32(0x00000020)
-#define RXD_W3_BROADCAST FIELD32(0x00000040)
-#define RXD_W3_MY_BSS FIELD32(0x00000080)
-#define RXD_W3_CRC_ERROR FIELD32(0x00000100)
-#define RXD_W3_CIPHER_ERROR FIELD32(0x00000600)
-#define RXD_W3_AMSDU FIELD32(0x00000800)
-#define RXD_W3_HTC FIELD32(0x00001000)
-#define RXD_W3_RSSI FIELD32(0x00002000)
-#define RXD_W3_L2PAD FIELD32(0x00004000)
-#define RXD_W3_AMPDU FIELD32(0x00008000)
-#define RXD_W3_DECRYPTED FIELD32(0x00010000)
-#define RXD_W3_PLCP_SIGNAL FIELD32(0x00020000)
-#define RXD_W3_PLCP_RSSI FIELD32(0x00040000)
-
#endif /* RT2800PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800soc.c b/drivers/net/wireless/rt2x00/rt2800soc.c
new file mode 100644
index 000000000000..1359227ca411
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800soc.c
@@ -0,0 +1,263 @@
+/* Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
+ * Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
+ * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
+ * Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
+ * Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
+ * Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
+ * Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
+ * <http://rt2x00.serialmonkey.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* Module: rt2800soc
+ * Abstract: rt2800 WiSoC specific routines.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "rt2x00.h"
+#include "rt2x00mmio.h"
+#include "rt2x00soc.h"
+#include "rt2800.h"
+#include "rt2800lib.h"
+#include "rt2800mmio.h"
+
+/* Allow hardware encryption to be disabled. */
+static bool modparam_nohwcrypt;
+module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
+
+static bool rt2800soc_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
+{
+ return modparam_nohwcrypt;
+}
+
+static void rt2800soc_disable_radio(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_disable_radio(rt2x00dev);
+ rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_PIN_CFG, 0);
+}
+
+static int rt2800soc_set_device_state(struct rt2x00_dev *rt2x00dev,
+ enum dev_state state)
+{
+ int retval = 0;
+
+ switch (state) {
+ case STATE_RADIO_ON:
+ retval = rt2800mmio_enable_radio(rt2x00dev);
+ break;
+
+ case STATE_RADIO_OFF:
+ rt2800soc_disable_radio(rt2x00dev);
+ break;
+
+ case STATE_RADIO_IRQ_ON:
+ case STATE_RADIO_IRQ_OFF:
+ rt2800mmio_toggle_irq(rt2x00dev, state);
+ break;
+
+ case STATE_DEEP_SLEEP:
+ case STATE_SLEEP:
+ case STATE_STANDBY:
+ case STATE_AWAKE:
+ /* These states are not supported, but don't report an error */
+ retval = 0;
+ break;
+
+ default:
+ retval = -ENOTSUPP;
+ break;
+ }
+
+ if (unlikely(retval))
+ rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n",
+ state, retval);
+
+ return retval;
+}
+
+static int rt2800soc_read_eeprom(struct rt2x00_dev *rt2x00dev)
+{
+ void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE);
+
+ if (!base_addr)
+ return -ENOMEM;
+
+ memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE);
+
+ iounmap(base_addr);
+ return 0;
+}
+
+/* Firmware functions */
+static char *rt2800soc_get_firmware_name(struct rt2x00_dev *rt2x00dev)
+{
+ WARN_ON_ONCE(1);
+ return NULL;
+}
+
+static int rt2800soc_load_firmware(struct rt2x00_dev *rt2x00dev,
+ const u8 *data, const size_t len)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static int rt2800soc_check_firmware(struct rt2x00_dev *rt2x00dev,
+ const u8 *data, const size_t len)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static int rt2800soc_write_firmware(struct rt2x00_dev *rt2x00dev,
+ const u8 *data, const size_t len)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static const struct ieee80211_ops rt2800soc_mac80211_ops = {
+ .tx = rt2x00mac_tx,
+ .start = rt2x00mac_start,
+ .stop = rt2x00mac_stop,
+ .add_interface = rt2x00mac_add_interface,
+ .remove_interface = rt2x00mac_remove_interface,
+ .config = rt2x00mac_config,
+ .configure_filter = rt2x00mac_configure_filter,
+ .set_key = rt2x00mac_set_key,
+ .sw_scan_start = rt2x00mac_sw_scan_start,
+ .sw_scan_complete = rt2x00mac_sw_scan_complete,
+ .get_stats = rt2x00mac_get_stats,
+ .get_tkip_seq = rt2800_get_tkip_seq,
+ .set_rts_threshold = rt2800_set_rts_threshold,
+ .sta_add = rt2x00mac_sta_add,
+ .sta_remove = rt2x00mac_sta_remove,
+ .bss_info_changed = rt2x00mac_bss_info_changed,
+ .conf_tx = rt2800_conf_tx,
+ .get_tsf = rt2800_get_tsf,
+ .rfkill_poll = rt2x00mac_rfkill_poll,
+ .ampdu_action = rt2800_ampdu_action,
+ .flush = rt2x00mac_flush,
+ .get_survey = rt2800_get_survey,
+ .get_ringparam = rt2x00mac_get_ringparam,
+ .tx_frames_pending = rt2x00mac_tx_frames_pending,
+};
+
+static const struct rt2800_ops rt2800soc_rt2800_ops = {
+ .register_read = rt2x00mmio_register_read,
+ .register_read_lock = rt2x00mmio_register_read, /* same for SoCs */
+ .register_write = rt2x00mmio_register_write,
+ .register_write_lock = rt2x00mmio_register_write, /* same for SoCs */
+ .register_multiread = rt2x00mmio_register_multiread,
+ .register_multiwrite = rt2x00mmio_register_multiwrite,
+ .regbusy_read = rt2x00mmio_regbusy_read,
+ .read_eeprom = rt2800soc_read_eeprom,
+ .hwcrypt_disabled = rt2800soc_hwcrypt_disabled,
+ .drv_write_firmware = rt2800soc_write_firmware,
+ .drv_init_registers = rt2800mmio_init_registers,
+ .drv_get_txwi = rt2800mmio_get_txwi,
+};
+
+static const struct rt2x00lib_ops rt2800soc_rt2x00_ops = {
+ .irq_handler = rt2800mmio_interrupt,
+ .txstatus_tasklet = rt2800mmio_txstatus_tasklet,
+ .pretbtt_tasklet = rt2800mmio_pretbtt_tasklet,
+ .tbtt_tasklet = rt2800mmio_tbtt_tasklet,
+ .rxdone_tasklet = rt2800mmio_rxdone_tasklet,
+ .autowake_tasklet = rt2800mmio_autowake_tasklet,
+ .probe_hw = rt2800_probe_hw,
+ .get_firmware_name = rt2800soc_get_firmware_name,
+ .check_firmware = rt2800soc_check_firmware,
+ .load_firmware = rt2800soc_load_firmware,
+ .initialize = rt2x00mmio_initialize,
+ .uninitialize = rt2x00mmio_uninitialize,
+ .get_entry_state = rt2800mmio_get_entry_state,
+ .clear_entry = rt2800mmio_clear_entry,
+ .set_device_state = rt2800soc_set_device_state,
+ .rfkill_poll = rt2800_rfkill_poll,
+ .link_stats = rt2800_link_stats,
+ .reset_tuner = rt2800_reset_tuner,
+ .link_tuner = rt2800_link_tuner,
+ .gain_calibration = rt2800_gain_calibration,
+ .vco_calibration = rt2800_vco_calibration,
+ .start_queue = rt2800mmio_start_queue,
+ .kick_queue = rt2800mmio_kick_queue,
+ .stop_queue = rt2800mmio_stop_queue,
+ .flush_queue = rt2x00mmio_flush_queue,
+ .write_tx_desc = rt2800mmio_write_tx_desc,
+ .write_tx_data = rt2800_write_tx_data,
+ .write_beacon = rt2800_write_beacon,
+ .clear_beacon = rt2800_clear_beacon,
+ .fill_rxdone = rt2800mmio_fill_rxdone,
+ .config_shared_key = rt2800_config_shared_key,
+ .config_pairwise_key = rt2800_config_pairwise_key,
+ .config_filter = rt2800_config_filter,
+ .config_intf = rt2800_config_intf,
+ .config_erp = rt2800_config_erp,
+ .config_ant = rt2800_config_ant,
+ .config = rt2800_config,
+ .sta_add = rt2800_sta_add,
+ .sta_remove = rt2800_sta_remove,
+};
+
+static const struct rt2x00_ops rt2800soc_ops = {
+ .name = KBUILD_MODNAME,
+ .drv_data_size = sizeof(struct rt2800_drv_data),
+ .max_ap_intf = 8,
+ .eeprom_size = EEPROM_SIZE,
+ .rf_size = RF_SIZE,
+ .tx_queues = NUM_TX_QUEUES,
+ .queue_init = rt2800mmio_queue_init,
+ .lib = &rt2800soc_rt2x00_ops,
+ .drv = &rt2800soc_rt2800_ops,
+ .hw = &rt2800soc_mac80211_ops,
+#ifdef CONFIG_RT2X00_LIB_DEBUGFS
+ .debugfs = &rt2800_rt2x00debug,
+#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
+};
+
+static int rt2800soc_probe(struct platform_device *pdev)
+{
+ return rt2x00soc_probe(pdev, &rt2800soc_ops);
+}
+
+static struct platform_driver rt2800soc_driver = {
+ .driver = {
+ .name = "rt2800_wmac",
+ .owner = THIS_MODULE,
+ .mod_name = KBUILD_MODNAME,
+ },
+ .probe = rt2800soc_probe,
+ .remove = rt2x00soc_remove,
+ .suspend = rt2x00soc_suspend,
+ .resume = rt2x00soc_resume,
+};
+
+module_platform_driver(rt2800soc_driver);
+
+MODULE_AUTHOR(DRV_PROJECT);
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("Ralink WiSoC Wireless LAN driver.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 96961b9a395c..997df03a0c2e 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -148,6 +148,8 @@ static bool rt2800usb_txstatus_timeout(struct rt2x00_dev *rt2x00dev)
return false;
}
+#define TXSTATUS_READ_INTERVAL 1000000
+
static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
int urb_status, u32 tx_status)
{
@@ -176,8 +178,9 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
if (rt2800usb_txstatus_pending(rt2x00dev)) {
- /* Read register after 250 us */
- hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 250000),
+ /* Read register after 1 ms */
+ hrtimer_start(&rt2x00dev->txstatus_timer,
+ ktime_set(0, TXSTATUS_READ_INTERVAL),
HRTIMER_MODE_REL);
return false;
}
@@ -202,8 +205,9 @@ static void rt2800usb_async_read_tx_status(struct rt2x00_dev *rt2x00dev)
if (test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags))
return;
- /* Read TX_STA_FIFO register after 500 us */
- hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 500000),
+ /* Read TX_STA_FIFO register after 2 ms */
+ hrtimer_start(&rt2x00dev->txstatus_timer,
+ ktime_set(0, 2*TXSTATUS_READ_INTERVAL),
HRTIMER_MODE_REL);
}
@@ -1176,6 +1180,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Linksys */
{ USB_DEVICE(0x13b1, 0x002f) },
{ USB_DEVICE(0x1737, 0x0079) },
+ /* Logitec */
+ { USB_DEVICE(0x0789, 0x0170) },
/* Ralink */
{ USB_DEVICE(0x148f, 0x3572) },
/* Sitecom */
@@ -1199,6 +1205,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x050d, 0x1103) },
/* Cameo */
{ USB_DEVICE(0x148f, 0xf301) },
+ /* D-Link */
+ { USB_DEVICE(0x2001, 0x3c1f) },
/* Edimax */
{ USB_DEVICE(0x7392, 0x7733) },
/* Hawking */
@@ -1212,6 +1220,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0789, 0x016b) },
/* NETGEAR */
{ USB_DEVICE(0x0846, 0x9012) },
+ { USB_DEVICE(0x0846, 0x9013) },
{ USB_DEVICE(0x0846, 0x9019) },
/* Planex */
{ USB_DEVICE(0x2019, 0xed19) },
@@ -1220,6 +1229,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Sitecom */
{ USB_DEVICE(0x0df6, 0x0067) },
{ USB_DEVICE(0x0df6, 0x006a) },
+ { USB_DEVICE(0x0df6, 0x006e) },
/* ZyXEL */
{ USB_DEVICE(0x0586, 0x3421) },
#endif
@@ -1236,6 +1246,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x2001, 0x3c1c) },
{ USB_DEVICE(0x2001, 0x3c1d) },
{ USB_DEVICE(0x2001, 0x3c1e) },
+ { USB_DEVICE(0x2001, 0x3c20) },
+ { USB_DEVICE(0x2001, 0x3c22) },
+ { USB_DEVICE(0x2001, 0x3c23) },
/* LG innotek */
{ USB_DEVICE(0x043e, 0x7a22) },
{ USB_DEVICE(0x043e, 0x7a42) },
@@ -1258,12 +1271,17 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x043e, 0x7a32) },
/* AVM GmbH */
{ USB_DEVICE(0x057c, 0x8501) },
- /* D-Link DWA-160-B2 */
+ /* Buffalo */
+ { USB_DEVICE(0x0411, 0x0241) },
+ /* D-Link */
{ USB_DEVICE(0x2001, 0x3c1a) },
+ { USB_DEVICE(0x2001, 0x3c21) },
/* Proware */
{ USB_DEVICE(0x043e, 0x7a13) },
/* Ralink */
{ USB_DEVICE(0x148f, 0x5572) },
+ /* TRENDnet */
+ { USB_DEVICE(0x20f4, 0x724a) },
#endif
#ifdef CONFIG_RT2800USB_UNKNOWN
/*
@@ -1333,6 +1351,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x1d4d, 0x0010) },
/* Planex */
{ USB_DEVICE(0x2019, 0xab24) },
+ { USB_DEVICE(0x2019, 0xab29) },
/* Qcom */
{ USB_DEVICE(0x18e8, 0x6259) },
/* RadioShack */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index fe4c572db52c..e4ba2ce0f212 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -39,6 +39,7 @@
#include <linux/input-polldev.h>
#include <linux/kfifo.h>
#include <linux/hrtimer.h>
+#include <linux/average.h>
#include <net/mac80211.h>
@@ -138,17 +139,6 @@
#define SHORT_EIFS ( SIFS + SHORT_DIFS + \
GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) )
-/*
- * Structure for average calculation
- * The avg field contains the actual average value,
- * but avg_weight is internally used during calculations
- * to prevent rounding errors.
- */
-struct avg_val {
- int avg;
- int avg_weight;
-};
-
enum rt2x00_chip_intf {
RT2X00_CHIP_INTF_PCI,
RT2X00_CHIP_INTF_PCIE,
@@ -297,7 +287,7 @@ struct link_ant {
* Similar to the avg_rssi in the link_qual structure
* this value is updated by using the walking average.
*/
- struct avg_val rssi_ant;
+ struct ewma rssi_ant;
};
/*
@@ -326,7 +316,7 @@ struct link {
/*
* Currently active average RSSI value
*/
- struct avg_val avg_rssi;
+ struct ewma avg_rssi;
/*
* Work structure for scheduling periodic link tuning.
@@ -1179,6 +1169,93 @@ static inline bool rt2x00_is_soc(struct rt2x00_dev *rt2x00dev)
return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC);
}
+/* Helpers for capability flags */
+
+static inline bool
+rt2x00_has_cap_flag(struct rt2x00_dev *rt2x00dev,
+ enum rt2x00_capability_flags cap_flag)
+{
+ return test_bit(cap_flag, &rt2x00dev->cap_flags);
+}
+
+static inline bool
+rt2x00_has_cap_hw_crypto(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_HW_CRYPTO);
+}
+
+static inline bool
+rt2x00_has_cap_power_limit(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_POWER_LIMIT);
+}
+
+static inline bool
+rt2x00_has_cap_control_filters(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_CONTROL_FILTERS);
+}
+
+static inline bool
+rt2x00_has_cap_control_filter_pspoll(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_CONTROL_FILTER_PSPOLL);
+}
+
+static inline bool
+rt2x00_has_cap_pre_tbtt_interrupt(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_PRE_TBTT_INTERRUPT);
+}
+
+static inline bool
+rt2x00_has_cap_link_tuning(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_LINK_TUNING);
+}
+
+static inline bool
+rt2x00_has_cap_frame_type(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_FRAME_TYPE);
+}
+
+static inline bool
+rt2x00_has_cap_rf_sequence(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_RF_SEQUENCE);
+}
+
+static inline bool
+rt2x00_has_cap_external_lna_a(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_EXTERNAL_LNA_A);
+}
+
+static inline bool
+rt2x00_has_cap_external_lna_bg(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_EXTERNAL_LNA_BG);
+}
+
+static inline bool
+rt2x00_has_cap_double_antenna(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_DOUBLE_ANTENNA);
+}
+
+static inline bool
+rt2x00_has_cap_bt_coexist(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_BT_COEXIST);
+}
+
+static inline bool
+rt2x00_has_cap_vco_recalibration(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_VCO_RECALIBRATION);
+}
+
/**
* rt2x00queue_map_txskb - Map a skb into DMA for TX purposes.
* @entry: Pointer to &struct queue_entry
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index 1ca4c7ffc189..3db0d99d9da7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -52,7 +52,7 @@ void rt2x00crypto_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
- if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags) || !hw_key)
+ if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || !hw_key)
return;
__set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags);
@@ -80,7 +80,7 @@ unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev,
struct ieee80211_key_conf *key = tx_info->control.hw_key;
unsigned int overhead = 0;
- if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags) || !key)
+ if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || !key)
return overhead;
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index fe7a7f63a9ed..7f7baae5ae02 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -750,7 +750,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
intf, &rt2x00debug_fop_queue_stats);
#ifdef CONFIG_RT2X00_LIB_CRYPTO
- if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_hw_crypto(rt2x00dev))
intf->crypto_stats_entry =
debugfs_create_file("crypto", S_IRUGO, intf->queue_folder,
intf, &rt2x00debug_fop_crypto_stats);
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 712eea9d398f..080b1fcae5fa 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -88,7 +88,7 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
rt2x00queue_start_queues(rt2x00dev);
rt2x00link_start_tuner(rt2x00dev);
rt2x00link_start_agc(rt2x00dev);
- if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
rt2x00link_start_vcocal(rt2x00dev);
/*
@@ -113,7 +113,7 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
* Stop all queues
*/
rt2x00link_stop_agc(rt2x00dev);
- if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
rt2x00link_stop_vcocal(rt2x00dev);
rt2x00link_stop_tuner(rt2x00dev);
rt2x00queue_stop_queues(rt2x00dev);
@@ -234,7 +234,7 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
* here as they will fetch the next beacon directly prior to
* transmission.
*/
- if (test_bit(CAPABILITY_PRE_TBTT_INTERRUPT, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_pre_tbtt_interrupt(rt2x00dev))
return;
/* fetch next beacon */
@@ -358,7 +358,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
* mac80211 will expect the same data to be present it the
* frame as it was passed to us.
*/
- if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_hw_crypto(rt2x00dev))
rt2x00crypto_tx_insert_iv(entry->skb, header_length);
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index 8368aab86f28..c2b3b6629188 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -35,50 +35,28 @@
*/
#define DEFAULT_RSSI -128
-/*
- * Helper struct and macro to work with moving/walking averages.
- * When adding a value to the average value the following calculation
- * is needed:
- *
- * avg_rssi = ((avg_rssi * 7) + rssi) / 8;
- *
- * The advantage of this approach is that we only need 1 variable
- * to store the average in (No need for a count and a total).
- * But more importantly, normal average values will over time
- * move less and less towards newly added values this results
- * that with link tuning, the device can have a very good RSSI
- * for a few minutes but when the device is moved away from the AP
- * the average will not decrease fast enough to compensate.
- * The walking average compensates this and will move towards
- * the new values correctly allowing a effective link tuning,
- * the speed of the average moving towards other values depends
- * on the value for the number of samples. The higher the number
- * of samples, the slower the average will move.
- * We use two variables to keep track of the average value to
- * compensate for the rounding errors. This can be a significant
- * error (>5dBm) if the factor is too low.
- */
-#define AVG_SAMPLES 8
-#define AVG_FACTOR 1000
-#define MOVING_AVERAGE(__avg, __val) \
-({ \
- struct avg_val __new; \
- __new.avg_weight = \
- (__avg).avg_weight ? \
- ((((__avg).avg_weight * ((AVG_SAMPLES) - 1)) + \
- ((__val) * (AVG_FACTOR))) / \
- (AVG_SAMPLES)) : \
- ((__val) * (AVG_FACTOR)); \
- __new.avg = __new.avg_weight / (AVG_FACTOR); \
- __new; \
-})
+/* Constants for EWMA calculations. */
+#define RT2X00_EWMA_FACTOR 1024
+#define RT2X00_EWMA_WEIGHT 8
+
+static inline int rt2x00link_get_avg_rssi(struct ewma *ewma)
+{
+ unsigned long avg;
+
+ avg = ewma_read(ewma);
+ if (avg)
+ return -avg;
+
+ return DEFAULT_RSSI;
+}
static int rt2x00link_antenna_get_link_rssi(struct rt2x00_dev *rt2x00dev)
{
struct link_ant *ant = &rt2x00dev->link.ant;
- if (ant->rssi_ant.avg && rt2x00dev->link.qual.rx_success)
- return ant->rssi_ant.avg;
+ if (rt2x00dev->link.qual.rx_success)
+ return rt2x00link_get_avg_rssi(&ant->rssi_ant);
+
return DEFAULT_RSSI;
}
@@ -100,8 +78,8 @@ static void rt2x00link_antenna_update_rssi_history(struct rt2x00_dev *rt2x00dev,
static void rt2x00link_antenna_reset(struct rt2x00_dev *rt2x00dev)
{
- rt2x00dev->link.ant.rssi_ant.avg = 0;
- rt2x00dev->link.ant.rssi_ant.avg_weight = 0;
+ ewma_init(&rt2x00dev->link.ant.rssi_ant, RT2X00_EWMA_FACTOR,
+ RT2X00_EWMA_WEIGHT);
}
static void rt2x00lib_antenna_diversity_sample(struct rt2x00_dev *rt2x00dev)
@@ -249,12 +227,12 @@ void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev,
/*
* Update global RSSI
*/
- link->avg_rssi = MOVING_AVERAGE(link->avg_rssi, rxdesc->rssi);
+ ewma_add(&link->avg_rssi, -rxdesc->rssi);
/*
* Update antenna RSSI
*/
- ant->rssi_ant = MOVING_AVERAGE(ant->rssi_ant, rxdesc->rssi);
+ ewma_add(&ant->rssi_ant, -rxdesc->rssi);
}
void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
@@ -309,6 +287,8 @@ void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna)
*/
rt2x00dev->link.count = 0;
memset(qual, 0, sizeof(*qual));
+ ewma_init(&rt2x00dev->link.avg_rssi, RT2X00_EWMA_FACTOR,
+ RT2X00_EWMA_WEIGHT);
/*
* Restore the VGC level as stored in the registers,
@@ -363,17 +343,17 @@ static void rt2x00link_tuner(struct work_struct *work)
* collect the RSSI data we could use this. Otherwise we
* must fallback to the default RSSI value.
*/
- if (!link->avg_rssi.avg || !qual->rx_success)
+ if (!qual->rx_success)
qual->rssi = DEFAULT_RSSI;
else
- qual->rssi = link->avg_rssi.avg;
+ qual->rssi = rt2x00link_get_avg_rssi(&link->avg_rssi);
/*
* Check if link tuning is supported by the hardware, some hardware
* do not support link tuning at all, while other devices can disable
* the feature from the EEPROM.
*/
- if (test_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_link_tuning(rt2x00dev))
rt2x00dev->ops->lib->link_tuner(rt2x00dev, qual, link->count);
/*
@@ -513,7 +493,7 @@ static void rt2x00link_vcocal(struct work_struct *work)
void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
{
INIT_DELAYED_WORK(&rt2x00dev->link.agc_work, rt2x00link_agc);
- if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
INIT_DELAYED_WORK(&rt2x00dev->link.vco_work, rt2x00link_vcocal);
INIT_DELAYED_WORK(&rt2x00dev->link.watchdog_work, rt2x00link_watchdog);
INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner);
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index f883802f3505..7c157857f5ce 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -382,11 +382,11 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
* of different types, but has no a separate filter for PS Poll frames,
* FIF_CONTROL flag implies FIF_PSPOLL.
*/
- if (!test_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags)) {
+ if (!rt2x00_has_cap_control_filters(rt2x00dev)) {
if (*total_flags & FIF_CONTROL || *total_flags & FIF_PSPOLL)
*total_flags |= FIF_CONTROL | FIF_PSPOLL;
}
- if (!test_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags)) {
+ if (!rt2x00_has_cap_control_filter_pspoll(rt2x00dev)) {
if (*total_flags & FIF_CONTROL)
*total_flags |= FIF_PSPOLL;
}
@@ -469,7 +469,7 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return 0;
- if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+ if (!rt2x00_has_cap_hw_crypto(rt2x00dev))
return -EOPNOTSUPP;
/*
@@ -754,6 +754,9 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
struct rt2x00_dev *rt2x00dev = hw->priv;
struct data_queue *queue;
+ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
+ return;
+
tx_queue_for_each(rt2x00dev, queue)
rt2x00queue_flush_queue(queue, drop);
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 76d95deb274b..25da20e7e1f3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -105,13 +105,11 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
goto exit_release_regions;
}
- pci_enable_msi(pci_dev);
-
hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
if (!hw) {
rt2x00_probe_err("Failed to allocate hardware\n");
retval = -ENOMEM;
- goto exit_disable_msi;
+ goto exit_release_regions;
}
pci_set_drvdata(pci_dev, hw);
@@ -121,7 +119,7 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
rt2x00dev->ops = ops;
rt2x00dev->hw = hw;
rt2x00dev->irq = pci_dev->irq;
- rt2x00dev->name = pci_name(pci_dev);
+ rt2x00dev->name = ops->name;
if (pci_is_pcie(pci_dev))
rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
@@ -152,9 +150,6 @@ exit_free_reg:
exit_free_device:
ieee80211_free_hw(hw);
-exit_disable_msi:
- pci_disable_msi(pci_dev);
-
exit_release_regions:
pci_release_regions(pci_dev);
@@ -179,8 +174,6 @@ void rt2x00pci_remove(struct pci_dev *pci_dev)
rt2x00pci_free_reg(rt2x00dev);
ieee80211_free_hw(hw);
- pci_disable_msi(pci_dev);
-
/*
* Free the PCI device data.
*/
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 6c8a33b6ee22..50590b1420a5 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -61,7 +61,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
* at least 8 bytes bytes available in headroom for IV/EIV
* and 8 bytes for ICV data as tailroon.
*/
- if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_hw_crypto(rt2x00dev)) {
head_size += 8;
tail_size += 8;
}
@@ -1033,38 +1033,21 @@ EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
{
- bool started;
bool tx_queue =
(queue->qid == QID_AC_VO) ||
(queue->qid == QID_AC_VI) ||
(queue->qid == QID_AC_BE) ||
(queue->qid == QID_AC_BK);
- mutex_lock(&queue->status_lock);
/*
- * If the queue has been started, we must stop it temporarily
- * to prevent any new frames to be queued on the device. If
- * we are not dropping the pending frames, the queue must
- * only be stopped in the software and not the hardware,
- * otherwise the queue will never become empty on its own.
+ * If we are not supposed to drop any pending
+ * frames, this means we must force a start (=kick)
+ * to the queue to make sure the hardware will
+ * start transmitting.
*/
- started = test_bit(QUEUE_STARTED, &queue->flags);
- if (started) {
- /*
- * Pause the queue
- */
- rt2x00queue_pause_queue(queue);
-
- /*
- * If we are not supposed to drop any pending
- * frames, this means we must force a start (=kick)
- * to the queue to make sure the hardware will
- * start transmitting.
- */
- if (!drop && tx_queue)
- queue->rt2x00dev->ops->lib->kick_queue(queue);
- }
+ if (!drop && tx_queue)
+ queue->rt2x00dev->ops->lib->kick_queue(queue);
/*
* Check if driver supports flushing, if that is the case we can
@@ -1080,14 +1063,6 @@ void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
if (unlikely(!rt2x00queue_empty(queue)))
rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n",
queue->qid);
-
- /*
- * Restore the queue to the previous status
- */
- if (started)
- rt2x00queue_unpause_queue(queue);
-
- mutex_unlock(&queue->status_lock);
}
EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 88289873c0cf..4e121627925d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -523,7 +523,9 @@ static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n",
queue->qid);
+ rt2x00queue_stop_queue(queue);
rt2x00queue_flush_queue(queue, true);
+ rt2x00queue_start_queue(queue);
}
static int rt2x00usb_dma_timeout(struct data_queue *queue)
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 54d3ddfc9888..a5b69cb49012 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -685,7 +685,7 @@ static void rt61pci_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF2529));
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
- !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags));
+ !rt2x00_has_cap_frame_type(rt2x00dev));
/*
* Configure the RX antenna.
@@ -813,10 +813,10 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
sel = antenna_sel_a;
- lna = test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
+ lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
} else {
sel = antenna_sel_bg;
- lna = test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags);
+ lna = rt2x00_has_cap_external_lna_bg(rt2x00dev);
}
for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++)
@@ -836,7 +836,7 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
else if (rt2x00_rf(rt2x00dev, RF2527))
rt61pci_config_antenna_2x(rt2x00dev, ant);
else if (rt2x00_rf(rt2x00dev, RF2529)) {
- if (test_bit(CAPABILITY_DOUBLE_ANTENNA, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_double_antenna(rt2x00dev))
rt61pci_config_antenna_2x(rt2x00dev, ant);
else
rt61pci_config_antenna_2529(rt2x00dev, ant);
@@ -850,13 +850,13 @@ static void rt61pci_config_lna_gain(struct rt2x00_dev *rt2x00dev,
short lna_gain = 0;
if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
- if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
lna_gain += 14;
rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
} else {
- if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_external_lna_a(rt2x00dev))
lna_gain += 14;
rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
@@ -1054,14 +1054,14 @@ static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev,
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
low_bound = 0x28;
up_bound = 0x48;
- if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
low_bound += 0x10;
up_bound += 0x10;
}
} else {
low_bound = 0x20;
up_bound = 0x40;
- if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
low_bound += 0x10;
up_bound += 0x10;
}
@@ -2578,7 +2578,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
* eeprom word.
*/
if (rt2x00_rf(rt2x00dev, RF2529) &&
- !test_bit(CAPABILITY_DOUBLE_ANTENNA, &rt2x00dev->cap_flags)) {
+ !rt2x00_has_cap_double_antenna(rt2x00dev)) {
rt2x00dev->default_ant.rx =
ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED);
rt2x00dev->default_ant.tx =
@@ -2793,7 +2793,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (!test_bit(CAPABILITY_RF_SEQUENCE, &rt2x00dev->cap_flags)) {
+ if (!rt2x00_has_cap_rf_sequence(rt2x00dev)) {
spec->num_channels = 14;
spec->channels = rf_vals_noseq;
} else {
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 1d3880e09a13..1baf9c896dcd 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -595,8 +595,8 @@ static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
switch (ant->rx) {
case ANTENNA_HW_DIVERSITY:
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
- temp = !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags)
- && (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ);
+ temp = !rt2x00_has_cap_frame_type(rt2x00dev) &&
+ (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp);
break;
case ANTENNA_A:
@@ -636,7 +636,7 @@ static void rt73usb_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
- !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags));
+ !rt2x00_has_cap_frame_type(rt2x00dev));
/*
* Configure the RX antenna.
@@ -709,10 +709,10 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
sel = antenna_sel_a;
- lna = test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
+ lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
} else {
sel = antenna_sel_bg;
- lna = test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags);
+ lna = rt2x00_has_cap_external_lna_bg(rt2x00dev);
}
for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++)
@@ -740,7 +740,7 @@ static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
short lna_gain = 0;
if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
- if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
lna_gain += 14;
rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
@@ -930,7 +930,7 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
low_bound = 0x28;
up_bound = 0x48;
- if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
low_bound += 0x10;
up_bound += 0x10;
}
@@ -946,7 +946,7 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
up_bound = 0x1c;
}
- if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
low_bound += 0x14;
up_bound += 0x10;
}
@@ -1661,7 +1661,7 @@ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
}
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
- if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
if (lna == 3 || lna == 2)
offset += 10;
} else {
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index fc207b268e4f..a91506b12a62 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -1122,7 +1122,6 @@ static int rtl8180_probe(struct pci_dev *pdev,
iounmap(priv->map);
err_free_dev:
- pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(dev);
err_free_reg:
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 8bb4a9a01a18..9a78e3daf742 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1613,6 +1613,35 @@ err_free:
}
EXPORT_SYMBOL(rtl_send_smps_action);
+void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ enum io_type iotype;
+
+ if (!is_hal_stop(rtlhal)) {
+ switch (operation) {
+ case SCAN_OPT_BACKUP:
+ iotype = IO_CMD_PAUSE_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_IO_CMD,
+ (u8 *)&iotype);
+ break;
+ case SCAN_OPT_RESTORE:
+ iotype = IO_CMD_RESUME_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_IO_CMD,
+ (u8 *)&iotype);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Unknown Scan Backup operation.\n");
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(rtl_phy_scan_operation_backup);
+
/* There seem to be issues in mac80211 regarding when del ba frames can be
* received. As a work around, we make a fake del_ba if we receive a ba_req;
* however, rx_agg was opened to let mac80211 release some ba related
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 0e5fe0902daf..0cd07420777a 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -114,7 +114,6 @@ void rtl_init_rfkill(struct ieee80211_hw *hw);
void rtl_deinit_rfkill(struct ieee80211_hw *hw);
void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb);
-void rtl_watch_dog_timer_callback(unsigned long data);
void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
@@ -153,5 +152,6 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw,
u8 *sa, u8 *bssid, u16 tid);
+void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
#endif
diff --git a/drivers/net/wireless/rtlwifi/cam.h b/drivers/net/wireless/rtlwifi/cam.h
index 35e00086a520..0105e6c1901e 100644
--- a/drivers/net/wireless/rtlwifi/cam.h
+++ b/drivers/net/wireless/rtlwifi/cam.h
@@ -41,12 +41,12 @@
#define CAM_CONFIG_USEDK 1
#define CAM_CONFIG_NO_USEDK 0
-extern void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
-extern u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
- u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
- u32 ul_default_key, u8 *key_content);
+void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
+u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
+ u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
+ u32 ul_default_key, u8 *key_content);
int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
- u32 ul_key_id);
+ u32 ul_key_id);
void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index);
void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index);
void rtl_cam_reset_sec_info(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 733b7ce7f0e2..210ce7cd94d8 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -115,7 +115,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
mutex_lock(&rtlpriv->locks.conf_mutex);
mac->link_state = MAC80211_NOLINK;
- memset(mac->bssid, 0, 6);
+ memset(mac->bssid, 0, ETH_ALEN);
mac->vendor = PEER_UNKNOWN;
/*reset sec info */
@@ -280,7 +280,7 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
mac->p2p = 0;
mac->vif = NULL;
mac->link_state = MAC80211_NOLINK;
- memset(mac->bssid, 0, 6);
+ memset(mac->bssid, 0, ETH_ALEN);
mac->vendor = PEER_UNKNOWN;
mac->opmode = NL80211_IFTYPE_UNSPECIFIED;
rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
@@ -721,7 +721,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
mac->link_state = MAC80211_LINKED;
mac->cnt_after_linked = 0;
mac->assoc_id = bss_conf->aid;
- memcpy(mac->bssid, bss_conf->bssid, 6);
+ memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
if (rtlpriv->cfg->ops->linked_set_reg)
rtlpriv->cfg->ops->linked_set_reg(hw);
@@ -750,7 +750,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
mac->link_state = MAC80211_NOLINK;
- memset(mac->bssid, 0, 6);
+ memset(mac->bssid, 0, ETH_ALEN);
mac->vendor = PEER_UNKNOWN;
if (rtlpriv->dm.supp_phymode_switch) {
@@ -826,7 +826,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->bssid);
mac->vendor = PEER_UNKNOWN;
- memcpy(mac->bssid, bss_conf->bssid, 6);
+ memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
rtlpriv->cfg->ops->set_network_type(hw, vif->type);
rcu_read_lock();
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 838a1ed3f194..ae13fb94b2e8 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -1203,20 +1203,18 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
static u16 efuse_get_current_size(struct ieee80211_hw *hw)
{
- int continual = true;
u16 efuse_addr = 0;
u8 hworden;
u8 efuse_data, word_cnts;
- while (continual && efuse_one_byte_read(hw, efuse_addr, &efuse_data)
- && (efuse_addr < EFUSE_MAX_SIZE)) {
- if (efuse_data != 0xFF) {
- hworden = efuse_data & 0x0F;
- word_cnts = efuse_calculate_word_cnts(hworden);
- efuse_addr = efuse_addr + (word_cnts * 2) + 1;
- } else {
- continual = false;
- }
+ while (efuse_one_byte_read(hw, efuse_addr, &efuse_data) &&
+ efuse_addr < EFUSE_MAX_SIZE) {
+ if (efuse_data == 0xFF)
+ break;
+
+ hworden = efuse_data & 0x0F;
+ word_cnts = efuse_calculate_word_cnts(hworden);
+ efuse_addr = efuse_addr + (word_cnts * 2) + 1;
}
return efuse_addr;
diff --git a/drivers/net/wireless/rtlwifi/efuse.h b/drivers/net/wireless/rtlwifi/efuse.h
index 395a326acfb4..1663b3afd41e 100644
--- a/drivers/net/wireless/rtlwifi/efuse.h
+++ b/drivers/net/wireless/rtlwifi/efuse.h
@@ -104,20 +104,19 @@ struct efuse_priv {
u8 tx_power_g[14];
};
-extern void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
-extern void efuse_initialize(struct ieee80211_hw *hw);
-extern u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
-extern void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
-extern void read_efuse(struct ieee80211_hw *hw, u16 _offset,
- u16 _size_byte, u8 *pbuf);
-extern void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
- u16 offset, u32 *value);
-extern void efuse_shadow_write(struct ieee80211_hw *hw, u8 type,
- u16 offset, u32 value);
-extern bool efuse_shadow_update(struct ieee80211_hw *hw);
-extern bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
-extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
-extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
-extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
+void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
+void efuse_initialize(struct ieee80211_hw *hw);
+u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
+void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
+void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf);
+void efuse_shadow_read(struct ieee80211_hw *hw, u8 type, u16 offset,
+ u32 *value);
+void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, u16 offset,
+ u32 value);
+bool efuse_shadow_update(struct ieee80211_hw *hw);
+bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
+void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
+void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
+void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
#endif
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 703f839af6ca..0f494444bcd1 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -736,7 +736,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
struct rtl_stats stats = {
.signal = 0,
- .noise = -98,
.rate = 0,
};
int index = rtlpci->rx_ring[rx_queue_idx].idx;
@@ -2009,7 +2008,6 @@ fail2:
fail1:
if (hw)
ieee80211_free_hw(hw);
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return err;
@@ -2064,8 +2062,6 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
rtl_pci_disable_aspm(hw);
- pci_set_drvdata(pdev, NULL);
-
ieee80211_free_hw(hw);
}
EXPORT_SYMBOL(rtl_pci_disconnect);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
index b68cae3024fc..e06971be7df7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
@@ -143,6 +143,7 @@ static void _rtl88ee_set_fw_clock_on(struct ieee80211_hw *hw,
} else {
rtlhal->fw_clk_change_in_progress = false;
spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ break;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
index e655c0473225..d67f9c731cc4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
@@ -1136,34 +1136,6 @@ void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
&bw40_pwr[0], channel);
}
-void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- enum io_type iotype;
-
- if (!is_hal_stop(rtlhal)) {
- switch (operation) {
- case SCAN_OPT_BACKUP:
- iotype = IO_CMD_PAUSE_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- case SCAN_OPT_RESTORE:
- iotype = IO_CMD_RESUME_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation.\n");
- break;
- }
- }
-}
-
void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
index f1acd6d27e44..89f0f1ef1465 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
@@ -200,37 +200,35 @@ enum _ANT_DIV_TYPE {
CGCS_RX_SW_ANTDIV = 0x05,
};
-extern u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask);
-extern void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask, u32 data);
-extern u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask);
-extern void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask, u32 data);
-extern bool rtl88e_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl88e_phy_bb_config(struct ieee80211_hw *hw);
-extern bool rtl88e_phy_rf_config(struct ieee80211_hw *hw);
-extern void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-extern void rtl88e_phy_get_txpower_level(struct ieee80211_hw *hw,
- long *powerlevel);
-extern void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
-extern void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw,
- u8 operation);
-extern void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
-extern void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
- enum nl80211_channel_type ch_type);
-extern void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw);
-extern u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw);
-extern void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
+u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask);
+void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data);
+u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask);
+void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask, u32 data);
+bool rtl88e_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl88e_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl88e_phy_rf_config(struct ieee80211_hw *hw);
+void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl88e_phy_get_txpower_level(struct ieee80211_hw *hw,
+ long *powerlevel);
+void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw);
+void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
void rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw);
void rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath);
bool rtl88e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl88e_phy_set_rf_power_state(struct ieee80211_hw *hw,
- enum rf_pwrstate rfpwr_state);
+bool rtl88e_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
index c254693a1e6a..347af1e4f438 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
@@ -30,6 +30,7 @@
#include "../wifi.h"
#include "../core.h"
#include "../pci.h"
+#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -244,7 +245,7 @@ static struct rtl_hal_ops rtl8188ee_hal_ops = {
.set_bw_mode = rtl88e_phy_set_bw_mode,
.switch_channel = rtl88e_phy_sw_chnl,
.dm_watchdog = rtl88e_dm_watchdog,
- .scan_operation_backup = rtl88e_phy_scan_operation_backup,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl88e_phy_set_rf_power_state,
.led_control = rtl88ee_led_control,
.set_desc = rtl88ee_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
index 68685a898257..aece6c9cccf1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -478,7 +478,6 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
/*rx_status->qual = status->signal; */
rx_status->signal = status->recvsignalpower + 10;
- /*rx_status->noise = -status->noise; */
if (status->packet_report_type == TX_REPORT2) {
status->macid_valid_entry[0] =
GET_RX_RPT2_DESC_MACID_VALID_1(pdesc);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index d2d57a27a7c1..e9caa5d4cff0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -541,29 +541,6 @@ EXPORT_SYMBOL(rtl92c_dm_write_dig);
static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
-
- u8 h2c_parameter[3] = { 0 };
-
- return;
-
- if (tmpentry_max_pwdb != 0) {
- rtlpriv->dm.entry_max_undec_sm_pwdb = tmpentry_max_pwdb;
- } else {
- rtlpriv->dm.entry_max_undec_sm_pwdb = 0;
- }
-
- if (tmpentry_min_pwdb != 0xff) {
- rtlpriv->dm.entry_min_undec_sm_pwdb = tmpentry_min_pwdb;
- } else {
- rtlpriv->dm.entry_min_undec_sm_pwdb = 0;
- }
-
- h2c_parameter[2] = (u8) (rtlpriv->dm.undec_sm_pwdb & 0xFF);
- h2c_parameter[0] = 0;
-
- rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
}
void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
@@ -673,7 +650,7 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
s8 cck_index = 0;
int i;
bool is2t = IS_92C_SERIAL(rtlhal->version);
- s8 txpwr_level[2] = {0, 0};
+ s8 txpwr_level[3] = {0, 0, 0};
u8 ofdm_min_index = 6, rf;
rtlpriv->dm.txpower_trackinginit = true;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index 246e5352f2e1..0c0e78263a66 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -592,36 +592,6 @@ long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(_rtl92c_phy_txpwr_idx_to_dbm);
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- enum io_type iotype;
-
- if (!is_hal_stop(rtlhal)) {
- switch (operation) {
- case SCAN_OPT_BACKUP:
- iotype = IO_CMD_PAUSE_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
-
- break;
- case SCAN_OPT_RESTORE:
- iotype = IO_CMD_RESUME_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation\n");
- break;
- }
- }
-}
-EXPORT_SYMBOL(rtl92c_phy_scan_operation_backup);
-
void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
enum nl80211_channel_type ch_type)
{
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
index cec10d696492..e79dabe9ba1d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
@@ -39,9 +39,7 @@
#define RT_CANNOT_IO(hw) false
#define HIGHPOWER_RADIOA_ARRAYLEN 22
-#define IQK_ADDA_REG_NUM 16
#define MAX_TOLERANCE 5
-#define IQK_DELAY_TIME 1
#define APK_BB_REG_NUM 5
#define APK_AFE_REG_NUM 16
@@ -205,8 +203,6 @@ void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
long power_indbm);
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
- u8 operation);
void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
enum nl80211_channel_type ch_type);
void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
index 3cfa1bb0f476..fa24de43ce79 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
@@ -152,8 +152,6 @@ enum version_8192c {
#define IS_VENDOR_UMC_A_CUT(version) ((IS_CHIP_VENDOR_UMC(version)) ? \
((GET_CVID_CUT_VERSION(version)) ? false : true) : false)
#define IS_CHIP_VER_B(version) ((version & CHIP_VER_B) ? true : false)
-#define IS_VENDOR_UMC_A_CUT(version) ((IS_CHIP_VENDOR_UMC(version)) ? \
- ((GET_CVID_CUT_VERSION(version)) ? false : true) : false)
#define IS_92C_SERIAL(version) ((version & CHIP_92C_BITMASK) ? true : false)
#define IS_CHIP_VENDOR_UMC(version) \
((version & CHIP_VENDOR_UMC) ? true : false)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
index d5e3b704f930..94486cca4000 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
@@ -39,9 +39,7 @@
#define RT_CANNOT_IO(hw) false
#define HIGHPOWER_RADIOA_ARRAYLEN 22
-#define IQK_ADDA_REG_NUM 16
#define MAX_TOLERANCE 5
-#define IQK_DELAY_TIME 1
#define APK_BB_REG_NUM 5
#define APK_AFE_REG_NUM 16
@@ -188,36 +186,29 @@ struct tx_power_struct {
};
bool rtl92c_phy_bb_config(struct ieee80211_hw *hw);
-u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask);
-void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask, u32 data);
-u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask);
-extern void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask, u32 data);
+u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
+void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
+ u32 data);
+u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask);
+void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data);
bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw);
bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
- enum radio_path rfpath);
+ enum radio_path rfpath);
void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
- long *powerlevel);
+void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel);
void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
long power_indbm);
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
- u8 operation);
void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
- enum nl80211_channel_type ch_type);
+ enum nl80211_channel_type ch_type);
void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw);
void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
-void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw,
- u16 beaconinterval);
+void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval);
void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
@@ -225,28 +216,25 @@ void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath);
bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
- u32 rfpath);
-bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
+ u32 rfpath);
bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
- enum rf_pwrstate rfpwr_state);
+ enum rf_pwrstate rfpwr_state);
void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
void rtl92c_phy_set_io(struct ieee80211_hw *hw);
void rtl92c_bb_block_on(struct ieee80211_hw *hw);
-u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset);
+u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 offset);
u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset);
+ enum radio_path rfpath, u32 offset);
u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset,
- u32 data);
+ enum radio_path rfpath, u32 offset, u32 data);
void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset,
- u32 data);
+ enum radio_path rfpath, u32 offset,
+ u32 data);
void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask,
- u32 data);
+ u32 regaddr, u32 bitmask, u32 data);
bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
index bd4aef74c056..8922ecb47ad2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
@@ -560,7 +560,6 @@
#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22
#define EEPROM_DEFAULT_HT40_2SDIFF 0x0
#define EEPROM_DEFAULT_HT20_DIFF 2
-#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0
#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0
@@ -639,17 +638,8 @@
#define EEPROM_TXPWR_GROUP 0x6F
-#define EEPROM_TSSI_A 0x76
-#define EEPROM_TSSI_B 0x77
-#define EEPROM_THERMAL_METER 0x78
-
#define EEPROM_CHANNELPLAN 0x75
-#define RF_OPTION1 0x79
-#define RF_OPTION2 0x7A
-#define RF_OPTION3 0x7B
-#define RF_OPTION4 0x7C
-
#define STOPBECON BIT(6)
#define STOPHIGHT BIT(5)
#define STOPMGT BIT(4)
@@ -689,13 +679,6 @@
#define RSV_CTRL 0x001C
#define RD_CTRL 0x0524
-#define REG_USB_INFO 0xFE17
-#define REG_USB_SPECIAL_OPTION 0xFE55
-
-#define REG_USB_DMA_AGG_TO 0xFE5B
-#define REG_USB_AGG_TO 0xFE5C
-#define REG_USB_AGG_TH 0xFE5D
-
#define REG_USB_VID 0xFE60
#define REG_USB_PID 0xFE62
#define REG_USB_OPTIONAL 0xFE64
@@ -1196,9 +1179,6 @@
#define POLLING_LLT_THRESHOLD 20
#define POLLING_READY_TIMEOUT_COUNT 1000
-#define MAX_MSS_DENSITY_2T 0x13
-#define MAX_MSS_DENSITY_1T 0x0A
-
#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6))
#define EPROM_CMD_CONFIG 0x3
#define EPROM_CMD_LOAD 1
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
index 6c8d56efceae..d8fe68b389d2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
@@ -34,11 +34,10 @@
#define RF6052_MAX_REG 0x3F
#define RF6052_MAX_PATH 2
-extern void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
- u8 bandwidth);
-extern void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel);
-extern void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel);
-extern bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
+void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index 14203561b6ee..b790320d2030 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -30,6 +30,7 @@
#include "../wifi.h"
#include "../core.h"
#include "../pci.h"
+#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -219,7 +220,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
.set_bw_mode = rtl92c_phy_set_bw_mode,
.switch_channel = rtl92c_phy_sw_chnl,
.dm_watchdog = rtl92c_dm_watchdog,
- .scan_operation_backup = rtl92c_phy_scan_operation_backup,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl92c_phy_set_rf_power_state,
.led_control = rtl92ce_led_control,
.set_desc = rtl92ce_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 6ad23b413eb3..52abf0a862fa 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -420,7 +420,6 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
/*rx_status->qual = stats->signal; */
rx_status->signal = stats->recvsignalpower + 10;
- /*rx_status->noise = -stats->noise; */
return true;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index da4f587199ee..393685390f3e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -32,6 +32,7 @@
#include "../usb.h"
#include "../ps.h"
#include "../cam.h"
+#include "../stats.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -738,16 +739,6 @@ static u8 _rtl92c_evm_db_to_percentage(char value)
return ret_val;
}
-static long _rtl92c_translate_todbm(struct ieee80211_hw *hw,
- u8 signal_strength_index)
-{
- long signal_power;
-
- signal_power = (long)((signal_strength_index + 1) >> 1);
- signal_power -= 95;
- return signal_power;
-}
-
static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
long currsig)
{
@@ -913,180 +904,6 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
(hw, total_rssi /= rf_rx_num));
}
-static void _rtl92c_process_ui_rssi(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- u8 rfpath;
- u32 last_rssi, tmpval;
-
- if (pstats->packet_toself || pstats->packet_beacon) {
- rtlpriv->stats.rssi_calculate_cnt++;
- if (rtlpriv->stats.ui_rssi.total_num++ >=
- PHY_RSSI_SLID_WIN_MAX) {
- rtlpriv->stats.ui_rssi.total_num =
- PHY_RSSI_SLID_WIN_MAX;
- last_rssi =
- rtlpriv->stats.ui_rssi.elements[rtlpriv->
- stats.ui_rssi.index];
- rtlpriv->stats.ui_rssi.total_val -= last_rssi;
- }
- rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
- rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.
- index++] = pstats->signalstrength;
- if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
- rtlpriv->stats.ui_rssi.index = 0;
- tmpval = rtlpriv->stats.ui_rssi.total_val /
- rtlpriv->stats.ui_rssi.total_num;
- rtlpriv->stats.signal_strength =
- _rtl92c_translate_todbm(hw, (u8) tmpval);
- pstats->rssi = rtlpriv->stats.signal_strength;
- }
- if (!pstats->is_cck && pstats->packet_toself) {
- for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
- rfpath++) {
- if (!rtl8192_phy_check_is_legal_rfpath(hw, rfpath))
- continue;
- if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- pstats->rx_mimo_signalstrength[rfpath];
- }
- if (pstats->rx_mimo_signalstrength[rfpath] >
- rtlpriv->stats.rx_rssi_percentage[rfpath]) {
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- ((rtlpriv->stats.
- rx_rssi_percentage[rfpath] *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_mimo_signalstrength[rfpath])) /
- (RX_SMOOTH_FACTOR);
-
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- rtlpriv->stats.rx_rssi_percentage[rfpath] +
- 1;
- } else {
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- ((rtlpriv->stats.
- rx_rssi_percentage[rfpath] *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_mimo_signalstrength[rfpath])) /
- (RX_SMOOTH_FACTOR);
- }
- }
- }
-}
-
-static void _rtl92c_update_rxsignalstatistics(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- int weighting = 0;
-
- if (rtlpriv->stats.recv_signal_power == 0)
- rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
- if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
- weighting = 5;
- else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
- weighting = (-5);
- rtlpriv->stats.recv_signal_power =
- (rtlpriv->stats.recv_signal_power * 5 +
- pstats->recvsignalpower + weighting) / 6;
-}
-
-static void _rtl92c_process_pwdb(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- long undec_sm_pwdb = 0;
-
- if (mac->opmode == NL80211_IFTYPE_ADHOC) {
- return;
- } else {
- undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
- }
- if (pstats->packet_toself || pstats->packet_beacon) {
- if (undec_sm_pwdb < 0)
- undec_sm_pwdb = pstats->rx_pwdb_all;
- if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) {
- undec_sm_pwdb = (((undec_sm_pwdb) *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
- undec_sm_pwdb += 1;
- } else {
- undec_sm_pwdb = (((undec_sm_pwdb) *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
- }
- rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
- _rtl92c_update_rxsignalstatistics(hw, pstats);
- }
-}
-
-static void _rtl92c_process_LINK_Q(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 last_evm = 0, n_stream, tmpval;
-
- if (pstats->signalquality != 0) {
- if (pstats->packet_toself || pstats->packet_beacon) {
- if (rtlpriv->stats.LINK_Q.total_num++ >=
- PHY_LINKQUALITY_SLID_WIN_MAX) {
- rtlpriv->stats.LINK_Q.total_num =
- PHY_LINKQUALITY_SLID_WIN_MAX;
- last_evm =
- rtlpriv->stats.LINK_Q.elements
- [rtlpriv->stats.LINK_Q.index];
- rtlpriv->stats.LINK_Q.total_val -=
- last_evm;
- }
- rtlpriv->stats.LINK_Q.total_val +=
- pstats->signalquality;
- rtlpriv->stats.LINK_Q.elements
- [rtlpriv->stats.LINK_Q.index++] =
- pstats->signalquality;
- if (rtlpriv->stats.LINK_Q.index >=
- PHY_LINKQUALITY_SLID_WIN_MAX)
- rtlpriv->stats.LINK_Q.index = 0;
- tmpval = rtlpriv->stats.LINK_Q.total_val /
- rtlpriv->stats.LINK_Q.total_num;
- rtlpriv->stats.signal_quality = tmpval;
- rtlpriv->stats.last_sigstrength_inpercent = tmpval;
- for (n_stream = 0; n_stream < 2;
- n_stream++) {
- if (pstats->RX_SIGQ[n_stream] != -1) {
- if (!rtlpriv->stats.RX_EVM[n_stream]) {
- rtlpriv->stats.RX_EVM[n_stream]
- = pstats->RX_SIGQ[n_stream];
- }
- rtlpriv->stats.RX_EVM[n_stream] =
- ((rtlpriv->stats.RX_EVM
- [n_stream] *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->RX_SIGQ
- [n_stream] * 1)) /
- (RX_SMOOTH_FACTOR);
- }
- }
- }
- } else {
- ;
- }
-}
-
-static void _rtl92c_process_phyinfo(struct ieee80211_hw *hw,
- u8 *buffer,
- struct rtl_stats *pcurrent_stats)
-{
- if (!pcurrent_stats->packet_matchbssid &&
- !pcurrent_stats->packet_beacon)
- return;
- _rtl92c_process_ui_rssi(hw, pcurrent_stats);
- _rtl92c_process_pwdb(hw, pcurrent_stats);
- _rtl92c_process_LINK_Q(hw, pcurrent_stats);
-}
-
void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct rtl_stats *pstats,
@@ -1123,5 +940,5 @@ void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
_rtl92c_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
packet_matchbssid, packet_toself,
packet_beacon);
- _rtl92c_process_phyinfo(hw, tmp_buf, pstats);
+ rtl_process_phyinfo(hw, tmp_buf, pstats);
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
index 090fd33a158d..11b439d6b671 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
@@ -34,15 +34,14 @@
#define RF6052_MAX_REG 0x3F
#define RF6052_MAX_PATH 2
-extern void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
- u8 bandwidth);
-extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel);
-extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel);
+void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw);
bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
- enum radio_path rfpath);
+ enum radio_path rfpath);
void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
u8 *ppowerlevel);
void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 2bd598526217..9936de716ad5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -31,6 +31,7 @@
#include "../core.h"
#include "../usb.h"
#include "../efuse.h"
+#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -117,7 +118,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
.set_bw_mode = rtl92c_phy_set_bw_mode,
.switch_channel = rtl92c_phy_sw_chnl,
.dm_watchdog = rtl92c_dm_watchdog,
- .scan_operation_backup = rtl92c_phy_scan_operation_backup,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl92cu_phy_set_rf_power_state,
.led_control = rtl92cu_led_control,
.enable_hw_sec = rtl92cu_enable_hw_security_config,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 763cf1defab5..25e50ffc44ec 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -343,13 +343,13 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
(bool)GET_RX_DESC_PAGGR(pdesc));
rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
if (phystatus) {
- p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE);
+ p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
+ stats->rx_bufshift);
rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
p_drvinfo);
}
/*rx_status->qual = stats->signal; */
rx_status->signal = stats->rssi + 10;
- /*rx_status->noise = -stats->noise; */
return true;
}
@@ -364,7 +364,6 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
u8 *rxdesc;
struct rtl_stats stats = {
.signal = 0,
- .noise = -98,
.rate = 0,
};
struct rx_fwinfo_92c *p_drvinfo;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index f700f7a614b2..7908e1c85819 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -840,9 +840,9 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
bool internal_pa = false;
long ele_a = 0, ele_d, temp_cck, val_x, value32;
long val_y, ele_c = 0;
- u8 ofdm_index[2];
+ u8 ofdm_index[3];
s8 cck_index = 0;
- u8 ofdm_index_old[2] = {0, 0};
+ u8 ofdm_index_old[3] = {0, 0, 0};
s8 cck_index_old = 0;
u8 index;
int i;
@@ -1118,6 +1118,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
val_x, val_y, ele_a, ele_c, ele_d,
val_x, val_y);
+ if (cck_index >= CCK_TABLE_SIZE)
+ cck_index = CCK_TABLE_SIZE - 1;
+ if (cck_index < 0)
+ cck_index = 0;
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
/* Adjust CCK according to IQK result */
if (!rtlpriv->dm.cck_inch14) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index 7dd8f6de0550..c4a7db9135d6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -1194,25 +1194,7 @@ void rtl92d_linked_set_reg(struct ieee80211_hw *hw)
* mac80211 will send pkt when scan */
void rtl92de_set_qos(struct ieee80211_hw *hw, int aci)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl92d_dm_init_edca_turbo(hw);
- return;
- switch (aci) {
- case AC1_BK:
- rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
- break;
- case AC0_BE:
- break;
- case AC2_VI:
- rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322);
- break;
- case AC3_VO:
- rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
- break;
- default:
- RT_ASSERT(false, "invalid aci: %d !\n", aci);
- break;
- }
}
void rtl92de_enable_interrupt(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/rtlwifi/rtl8192de/hw.h
index 7c9f7a2f1e42..1bc7b1a96d4a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.h
@@ -55,10 +55,9 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 *p_macaddr, bool is_group, u8 enc_algo,
bool is_wepkey, bool clear_all);
-extern void rtl92de_write_dword_dbi(struct ieee80211_hw *hw, u16 offset,
- u32 value, u8 direct);
-extern u32 rtl92de_read_dword_dbi(struct ieee80211_hw *hw, u16 offset,
- u8 direct);
+void rtl92de_write_dword_dbi(struct ieee80211_hw *hw, u16 offset, u32 value,
+ u8 direct);
+u32 rtl92de_read_dword_dbi(struct ieee80211_hw *hw, u16 offset, u8 direct);
void rtl92de_suspend(struct ieee80211_hw *hw);
void rtl92de_resume(struct ieee80211_hw *hw);
void rtl92d_linked_set_reg(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 840bac5fa2f8..13196cc4b1d3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -1022,34 +1022,6 @@ void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
rtl92d_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel);
}
-void rtl92d_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- enum io_type iotype;
-
- if (!is_hal_stop(rtlhal)) {
- switch (operation) {
- case SCAN_OPT_BACKUP:
- rtlhal->current_bandtypebackup =
- rtlhal->current_bandtype;
- iotype = IO_CMD_PAUSE_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- case SCAN_OPT_RESTORE:
- iotype = IO_CMD_RESUME_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation\n");
- break;
- }
- }
-}
-
void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
enum nl80211_channel_type ch_type)
{
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
index f074952bf25c..48d5c6835b6a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
@@ -39,9 +39,7 @@
#define RT_CANNOT_IO(hw) false
#define HIGHPOWER_RADIOA_ARRAYLEN 22
-#define IQK_ADDA_REG_NUM 16
#define MAX_TOLERANCE 5
-#define IQK_DELAY_TIME 1
#define APK_BB_REG_NUM 5
#define APK_AFE_REG_NUM 16
@@ -127,34 +125,32 @@ static inline void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
*flag);
}
-extern u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask);
-extern void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask, u32 data);
-extern u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask);
-extern void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask, u32 data);
-extern bool rtl92d_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl92d_phy_bb_config(struct ieee80211_hw *hw);
-extern bool rtl92d_phy_rf_config(struct ieee80211_hw *hw);
-extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
- enum radio_path rfpath);
-extern void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-extern void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
-extern void rtl92d_phy_scan_operation_backup(struct ieee80211_hw *hw,
- u8 operation);
-extern void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
- enum nl80211_channel_type ch_type);
-extern u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw);
+u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask);
+void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data);
+u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask);
+void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask, u32 data);
+bool rtl92d_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl92d_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl92d_phy_rf_config(struct ieee80211_hw *hw);
+bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw);
bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum rf_content content,
enum radio_path rfpath);
bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
- enum rf_pwrstate rfpwr_state);
+bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw);
void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw);
@@ -173,6 +169,5 @@ void rtl92d_acquire_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
unsigned long *flag);
u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl);
void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel);
-void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.h b/drivers/net/wireless/rtlwifi/rtl8192de/rf.h
index 0fe1a48593e8..7303d12c266f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/rf.h
@@ -30,15 +30,13 @@
#ifndef __RTL92D_RF_H__
#define __RTL92D_RF_H__
-extern void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
- u8 bandwidth);
-extern void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel);
-extern void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel);
-extern bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw);
-extern bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0);
-extern void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw,
- bool bmac0);
+void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw);
+bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0);
+void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index c18c04bf0c13..edab5a5351b5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -30,6 +30,7 @@
#include "../wifi.h"
#include "../core.h"
#include "../pci.h"
+#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -236,7 +237,7 @@ static struct rtl_hal_ops rtl8192de_hal_ops = {
.set_bw_mode = rtl92d_phy_set_bw_mode,
.switch_channel = rtl92d_phy_sw_chnl,
.dm_watchdog = rtl92d_dm_watchdog,
- .scan_operation_backup = rtl92d_phy_scan_operation_backup,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl92d_phy_set_rf_power_state,
.led_control = rtl92de_led_control,
.set_desc = rtl92de_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index b8ec718a0fab..945ddecf90c9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -526,7 +526,6 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
}
/*rx_status->qual = stats->signal; */
rx_status->signal = stats->rssi + 10;
- /*rx_status->noise = -stats->noise; */
return true;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
index 84d1181795b8..c81c83591940 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
@@ -425,14 +425,9 @@
#define EXT_IMEM_CODE_DONE BIT(2)
#define IMEM_CHK_RPT BIT(1)
#define IMEM_CODE_DONE BIT(0)
-#define IMEM_CODE_DONE BIT(0)
-#define IMEM_CHK_RPT BIT(1)
#define EMEM_CODE_DONE BIT(2)
#define EMEM_CHK_RPT BIT(3)
-#define DMEM_CODE_DONE BIT(4)
#define IMEM_RDY BIT(5)
-#define BASECHG BIT(6)
-#define FWRDY BIT(7)
#define LOAD_FW_READY (IMEM_CODE_DONE | \
IMEM_CHK_RPT | \
EMEM_CODE_DONE | \
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index c7095118de6e..222d2e792ca6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -330,7 +330,6 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
/*rx_status->qual = stats->signal; */
rx_status->signal = stats->rssi + 10;
- /*rx_status->noise = -stats->noise; */
return true;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
index eafbb18dd48e..5d318a85eda4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
@@ -934,35 +934,6 @@ static long _phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
return pwrout_dbm;
}
-void rtl8723ae_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- enum io_type iotype;
-
- if (!is_hal_stop(rtlhal)) {
- switch (operation) {
- case SCAN_OPT_BACKUP:
- iotype = IO_CMD_PAUSE_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
-
- break;
- case SCAN_OPT_RESTORE:
- iotype = IO_CMD_RESUME_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation.\n");
- break;
- }
- }
-}
-
void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
index e7a59eba351a..007ebdbbe108 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
@@ -183,42 +183,40 @@ struct tx_power_struct {
u32 mcs_original_offset[4][16];
};
-extern u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask);
-extern void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask, u32 data);
-extern u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask);
-extern void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask, u32 data);
-extern bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw);
-extern bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw);
-extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
- enum radio_path rfpath);
-extern void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-extern void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw,
- long *powerlevel);
-extern void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw,
- u8 channel);
-extern bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw,
- long power_indbm);
-extern void rtl8723ae_phy_scan_operation_backup(struct ieee80211_hw *hw,
- u8 operation);
-extern void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
-extern void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw,
- enum nl80211_channel_type ch_type);
-extern void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw);
-extern u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw);
-extern void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
+u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask);
+void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data);
+u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask);
+void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask, u32 data);
+bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw);
+bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw,
+ long *powerlevel);
+void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw,
+ u8 channel);
+bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw,
+ long power_indbm);
+void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw);
+void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
void rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw);
void rtl8723ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath);
bool rtl8723ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
- enum rf_pwrstate rfpwr_state);
+bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
index d0f9dd79abea..57f1933ee663 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
@@ -32,12 +32,11 @@
#define RF6052_MAX_TX_PWR 0x3F
-extern void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
- u8 bandwidth);
-extern void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel);
-extern void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel);
-extern bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw);
+void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index d9ee2efffe5f..62b204faf773 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -33,6 +33,7 @@
#include "../core.h"
#include "../pci.h"
+#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -220,7 +221,7 @@ static struct rtl_hal_ops rtl8723ae_hal_ops = {
.set_bw_mode = rtl8723ae_phy_set_bw_mode,
.switch_channel = rtl8723ae_phy_sw_chnl,
.dm_watchdog = rtl8723ae_dm_watchdog,
- .scan_operation_backup = rtl8723ae_phy_scan_operation_backup,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl8723ae_phy_set_rf_power_state,
.led_control = rtl8723ae_led_control,
.set_desc = rtl8723ae_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
index bcd82a1020a5..50b7be3f3a60 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -359,7 +359,6 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
/*rx_status->qual = status->signal; */
rx_status->signal = status->recvsignalpower + 10;
- /*rx_status->noise = -status->noise; */
return true;
}
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index e56778cac9bf..6e2b5c5c83c8 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -455,7 +455,6 @@ static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
struct ieee80211_rx_status rx_status = {0};
struct rtl_stats stats = {
.signal = 0,
- .noise = -98,
.rate = 0,
};
@@ -498,7 +497,6 @@ static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
struct ieee80211_rx_status rx_status = {0};
struct rtl_stats stats = {
.signal = 0,
- .noise = -98,
.rate = 0,
};
@@ -582,12 +580,15 @@ static void _rtl_rx_work(unsigned long param)
static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr,
unsigned int len)
{
+#if NET_IP_ALIGN != 0
unsigned int padding = 0;
+#endif
/* make function no-op when possible */
if (NET_IP_ALIGN == 0 || len < sizeof(*hdr))
return 0;
+#if NET_IP_ALIGN != 0
/* alignment calculation as in lbtf_rx() / carl9170_rx_copy_data() */
/* TODO: deduplicate common code, define helper function instead? */
@@ -608,6 +609,7 @@ static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr,
padding ^= NET_IP_ALIGN;
return padding;
+#endif
}
#define __RADIO_TAP_SIZE_RSV 32
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 703258742d28..d224dc3bb092 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -192,8 +192,6 @@ enum hardware_type {
(IS_HARDWARE_TYPE_8192DE(rtlhal) || IS_HARDWARE_TYPE_8192DU(rtlhal))
#define IS_HARDWARE_TYPE_8723(rtlhal) \
(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal))
-#define IS_HARDWARE_TYPE_8723U(rtlhal) \
- (rtlhal->hw_type == HARDWARE_TYPE_RTL8723U)
#define RX_HAL_IS_CCK_RATE(_pdesc)\
(_pdesc->rxmcs == DESC92_RATE1M || \
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index c7dc6feab2ff..1342f81e683d 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -243,7 +243,7 @@ static int wl1251_spi_probe(struct spi_device *spi)
struct wl1251 *wl;
int ret;
- pdata = spi->dev.platform_data;
+ pdata = dev_get_platdata(&spi->dev);
if (!pdata) {
wl1251_error("no platform data");
return -ENODEV;
diff --git a/drivers/net/wireless/ti/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
index fd02060038de..2c3bd1bff3f6 100644
--- a/drivers/net/wireless/ti/wl1251/wl1251.h
+++ b/drivers/net/wireless/ti/wl1251/wl1251.h
@@ -424,8 +424,8 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
#define CHIP_ID_1271_PG10 (0x4030101)
#define CHIP_ID_1271_PG20 (0x4030111)
-#define WL1251_FW_NAME "wl1251-fw.bin"
-#define WL1251_NVS_NAME "wl1251-nvs.bin"
+#define WL1251_FW_NAME "ti-connectivity/wl1251-fw.bin"
+#define WL1251_NVS_NAME "ti-connectivity/wl1251-nvs.bin"
#define WL1251_POWER_ON_SLEEP 10 /* in milliseconds */
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index 1c627da85083..591526b99154 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -1704,7 +1704,7 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
static int wl12xx_setup(struct wl1271 *wl)
{
struct wl12xx_priv *priv = wl->priv;
- struct wlcore_platdev_data *pdev_data = wl->pdev->dev.platform_data;
+ struct wlcore_platdev_data *pdev_data = dev_get_platdata(&wl->pdev->dev);
struct wl12xx_platform_data *pdata = pdev_data->pdata;
wl->rtable = wl12xx_rtable;
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 7aa0eb848c5a..d0daca1d23bc 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -623,6 +623,18 @@ static const int wl18xx_rtable[REG_TABLE_LEN] = {
[REG_RAW_FW_STATUS_ADDR] = WL18XX_FW_STATUS_ADDR,
};
+static const struct wl18xx_clk_cfg wl18xx_clk_table_coex[NUM_CLOCK_CONFIGS] = {
+ [CLOCK_CONFIG_16_2_M] = { 8, 121, 0, 0, false },
+ [CLOCK_CONFIG_16_368_M] = { 8, 120, 0, 0, false },
+ [CLOCK_CONFIG_16_8_M] = { 8, 117, 0, 0, false },
+ [CLOCK_CONFIG_19_2_M] = { 10, 128, 0, 0, false },
+ [CLOCK_CONFIG_26_M] = { 11, 104, 0, 0, false },
+ [CLOCK_CONFIG_32_736_M] = { 8, 120, 0, 0, false },
+ [CLOCK_CONFIG_33_6_M] = { 8, 117, 0, 0, false },
+ [CLOCK_CONFIG_38_468_M] = { 10, 128, 0, 0, false },
+ [CLOCK_CONFIG_52_M] = { 11, 104, 0, 0, false },
+};
+
static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
[CLOCK_CONFIG_16_2_M] = { 7, 104, 801, 4, true },
[CLOCK_CONFIG_16_368_M] = { 9, 132, 3751, 4, true },
@@ -704,6 +716,23 @@ static int wl18xx_set_clk(struct wl1271 *wl)
wl18xx_clk_table[clk_freq].p, wl18xx_clk_table[clk_freq].q,
wl18xx_clk_table[clk_freq].swallow ? "swallow" : "spit");
+ /* coex PLL configuration */
+ ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_N,
+ wl18xx_clk_table_coex[clk_freq].n);
+ if (ret < 0)
+ goto out;
+
+ ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_M,
+ wl18xx_clk_table_coex[clk_freq].m);
+ if (ret < 0)
+ goto out;
+
+ /* bypass the swallowing logic */
+ ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_SWALLOW_EN,
+ PLLSH_COEX_PLL_SWALLOW_EN_VAL1);
+ if (ret < 0)
+ goto out;
+
ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_N,
wl18xx_clk_table[clk_freq].n);
if (ret < 0)
@@ -745,6 +774,30 @@ static int wl18xx_set_clk(struct wl1271 *wl)
PLLSH_WCS_PLL_SWALLOW_EN_VAL2);
}
+ /* choose WCS PLL */
+ ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_SEL,
+ PLLSH_WL_PLL_SEL_WCS_PLL);
+ if (ret < 0)
+ goto out;
+
+ /* enable both PLLs */
+ ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_EN, PLLSH_WL_PLL_EN_VAL1);
+ if (ret < 0)
+ goto out;
+
+ udelay(1000);
+
+ /* disable coex PLL */
+ ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_EN, PLLSH_WL_PLL_EN_VAL2);
+ if (ret < 0)
+ goto out;
+
+ /* reset the swallowing logic */
+ ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_SWALLOW_EN,
+ PLLSH_COEX_PLL_SWALLOW_EN_VAL2);
+ if (ret < 0)
+ goto out;
+
out:
return ret;
}
@@ -1175,16 +1228,48 @@ static u32 wl18xx_ap_get_mimo_wide_rate_mask(struct wl1271 *wl,
}
}
+static const char *wl18xx_rdl_name(enum wl18xx_rdl_num rdl_num)
+{
+ switch (rdl_num) {
+ case RDL_1_HP:
+ return "183xH";
+ case RDL_2_SP:
+ return "183x or 180x";
+ case RDL_3_HP:
+ return "187xH";
+ case RDL_4_SP:
+ return "187x";
+ case RDL_5_SP:
+ return "RDL11 - Not Supported";
+ case RDL_6_SP:
+ return "180xD";
+ case RDL_7_SP:
+ return "RDL13 - Not Supported (1893Q)";
+ case RDL_8_SP:
+ return "18xxQ";
+ case RDL_NONE:
+ return "UNTRIMMED";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
{
u32 fuse;
- s8 rom = 0, metal = 0, pg_ver = 0, rdl_ver = 0;
+ s8 rom = 0, metal = 0, pg_ver = 0, rdl_ver = 0, package_type = 0;
int ret;
ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
if (ret < 0)
goto out;
+ ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_2_3, &fuse);
+ if (ret < 0)
+ goto out;
+
+ package_type = (fuse >> WL18XX_PACKAGE_TYPE_OFFSET) & 1;
+
ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_1_3, &fuse);
if (ret < 0)
goto out;
@@ -1192,7 +1277,7 @@ static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
pg_ver = (fuse & WL18XX_PG_VER_MASK) >> WL18XX_PG_VER_OFFSET;
rom = (fuse & WL18XX_ROM_VER_MASK) >> WL18XX_ROM_VER_OFFSET;
- if (rom <= 0xE)
+ if ((rom <= 0xE) && (package_type == WL18XX_PACKAGE_TYPE_WSP))
metal = (fuse & WL18XX_METAL_VER_MASK) >>
WL18XX_METAL_VER_OFFSET;
else
@@ -1204,11 +1289,9 @@ static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
goto out;
rdl_ver = (fuse & WL18XX_RDL_VER_MASK) >> WL18XX_RDL_VER_OFFSET;
- if (rdl_ver > RDL_MAX)
- rdl_ver = RDL_NONE;
- wl1271_info("wl18xx HW: RDL %d, %s, PG %x.%x (ROM %x)",
- rdl_ver, rdl_names[rdl_ver], pg_ver, metal, rom);
+ wl1271_info("wl18xx HW: %s, PG %d.%d (ROM 0x%x)",
+ wl18xx_rdl_name(rdl_ver), pg_ver, metal, rom);
if (ver)
*ver = pg_ver;
diff --git a/drivers/net/wireless/ti/wl18xx/reg.h b/drivers/net/wireless/ti/wl18xx/reg.h
index 05dd8bad2746..a433a75f3cd7 100644
--- a/drivers/net/wireless/ti/wl18xx/reg.h
+++ b/drivers/net/wireless/ti/wl18xx/reg.h
@@ -114,6 +114,11 @@
#define PLATFORM_DETECTION 0xA0E3E0
#define OCS_EN 0xA02080
#define PRIMARY_CLK_DETECT 0xA020A6
+#define PLLSH_COEX_PLL_N 0xA02384
+#define PLLSH_COEX_PLL_M 0xA02382
+#define PLLSH_COEX_PLL_SWALLOW_EN 0xA0238E
+#define PLLSH_WL_PLL_SEL 0xA02398
+
#define PLLSH_WCS_PLL_N 0xA02362
#define PLLSH_WCS_PLL_M 0xA02360
#define PLLSH_WCS_PLL_Q_FACTOR_CFG_1 0xA02364
@@ -128,19 +133,30 @@
#define PLLSH_WCS_PLL_P_FACTOR_CFG_1_MASK 0xFFFF
#define PLLSH_WCS_PLL_P_FACTOR_CFG_2_MASK 0x000F
+#define PLLSH_WL_PLL_EN_VAL1 0x7
+#define PLLSH_WL_PLL_EN_VAL2 0x2
+#define PLLSH_COEX_PLL_SWALLOW_EN_VAL1 0x2
+#define PLLSH_COEX_PLL_SWALLOW_EN_VAL2 0x11
+
#define PLLSH_WCS_PLL_SWALLOW_EN_VAL1 0x1
#define PLLSH_WCS_PLL_SWALLOW_EN_VAL2 0x12
+#define PLLSH_WL_PLL_SEL_WCS_PLL 0x0
+#define PLLSH_WL_PLL_SEL_COEX_PLL 0x1
+
#define WL18XX_REG_FUSE_DATA_1_3 0xA0260C
#define WL18XX_PG_VER_MASK 0x70
#define WL18XX_PG_VER_OFFSET 4
-#define WL18XX_ROM_VER_MASK 0x3
-#define WL18XX_ROM_VER_OFFSET 0
+#define WL18XX_ROM_VER_MASK 0x3e00
+#define WL18XX_ROM_VER_OFFSET 9
#define WL18XX_METAL_VER_MASK 0xC
#define WL18XX_METAL_VER_OFFSET 2
#define WL18XX_NEW_METAL_VER_MASK 0x180
#define WL18XX_NEW_METAL_VER_OFFSET 7
+#define WL18XX_PACKAGE_TYPE_OFFSET 13
+#define WL18XX_PACKAGE_TYPE_WSP 0
+
#define WL18XX_REG_FUSE_DATA_2_3 0xA02614
#define WL18XX_RDL_VER_MASK 0x1f00
#define WL18XX_RDL_VER_OFFSET 8
@@ -201,24 +217,21 @@ enum {
NUM_BOARD_TYPES,
};
-enum {
+enum wl18xx_rdl_num {
RDL_NONE = 0,
RDL_1_HP = 1,
RDL_2_SP = 2,
RDL_3_HP = 3,
RDL_4_SP = 4,
+ RDL_5_SP = 0x11,
+ RDL_6_SP = 0x12,
+ RDL_7_SP = 0x13,
+ RDL_8_SP = 0x14,
_RDL_LAST,
RDL_MAX = _RDL_LAST - 1,
};
-static const char * const rdl_names[] = {
- [RDL_NONE] = "",
- [RDL_1_HP] = "1853 SISO",
- [RDL_2_SP] = "1857 MIMO",
- [RDL_3_HP] = "1893 SISO",
- [RDL_4_SP] = "1897 MIMO",
-};
/* FPGA_SPARE_1 register - used to change the PHY ATPG clock at boot time */
#define WL18XX_PHY_FPGA_SPARE_1 0x8093CA40
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index c9e060795d13..9e5416f8764d 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -1126,6 +1126,8 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u16 template_id_2_4 = wl->scan_templ_id_2_4;
u16 template_id_5 = wl->scan_templ_id_5;
+ wl1271_debug(DEBUG_SCAN, "build probe request band %d", band);
+
skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
ie_len);
if (!skb) {
@@ -1135,8 +1137,6 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
if (ie_len)
memcpy(skb_put(skb, ie_len), ie, ie_len);
- wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
-
if (sched_scan &&
(wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL)) {
template_id_2_4 = wl->sched_scan_templ_id_2_4;
@@ -1172,7 +1172,7 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
if (!skb)
goto out;
- wl1271_dump(DEBUG_SCAN, "AP PROBE REQ: ", skb->data, skb->len);
+ wl1271_debug(DEBUG_SCAN, "set ap probe request template");
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
if (wlvif->band == IEEE80211_BAND_2GHZ)
@@ -1607,33 +1607,43 @@ out:
static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch)
{
- int idx = -1;
-
+ /*
+ * map the given band/channel to the respective predefined
+ * bit expected by the fw
+ */
switch (band) {
- case IEEE80211_BAND_5GHZ:
- if (ch >= 8 && ch <= 16)
- idx = ((ch-8)/4 + 18);
- else if (ch >= 34 && ch <= 64)
- idx = ((ch-34)/2 + 3 + 18);
- else if (ch >= 100 && ch <= 140)
- idx = ((ch-100)/4 + 15 + 18);
- else if (ch >= 149 && ch <= 165)
- idx = ((ch-149)/4 + 26 + 18);
- else
- idx = -1;
- break;
case IEEE80211_BAND_2GHZ:
+ /* channels 1..14 are mapped to 0..13 */
if (ch >= 1 && ch <= 14)
- idx = ch - 1;
- else
- idx = -1;
+ return ch - 1;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ switch (ch) {
+ case 8 ... 16:
+ /* channels 8,12,16 are mapped to 18,19,20 */
+ return 18 + (ch-8)/4;
+ case 34 ... 48:
+ /* channels 34,36..48 are mapped to 21..28 */
+ return 21 + (ch-34)/2;
+ case 52 ... 64:
+ /* channels 52,56..64 are mapped to 29..32 */
+ return 29 + (ch-52)/4;
+ case 100 ... 140:
+ /* channels 100,104..140 are mapped to 33..43 */
+ return 33 + (ch-100)/4;
+ case 149 ... 165:
+ /* channels 149,153..165 are mapped to 44..48 */
+ return 44 + (ch-149)/4;
+ default:
+ break;
+ }
break;
default:
- wl1271_error("get reg conf ch idx - unknown band: %d",
- (int)band);
+ break;
}
- return idx;
+ wl1271_error("%s: unknown band/channel: %d/%d", __func__, band, ch);
+ return -1;
}
void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
@@ -1646,7 +1656,7 @@ void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
ch_bit_idx = wlcore_get_reg_conf_ch_idx(band, channel);
- if (ch_bit_idx > 0 && ch_bit_idx <= WL1271_MAX_CHANNELS)
+ if (ch_bit_idx >= 0 && ch_bit_idx <= WL1271_MAX_CHANNELS)
set_bit(ch_bit_idx, (long *)wl->reg_ch_conf_pending);
}
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 38995f90040d..bbdd10632373 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1062,7 +1062,8 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
static const char* const PLT_MODE[] = {
"PLT_OFF",
"PLT_ON",
- "PLT_FEM_DETECT"
+ "PLT_FEM_DETECT",
+ "PLT_CHIP_AWAKE"
};
int ret;
@@ -1088,9 +1089,11 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
if (ret < 0)
goto power_off;
- ret = wl->ops->plt_init(wl);
- if (ret < 0)
- goto power_off;
+ if (plt_mode != PLT_CHIP_AWAKE) {
+ ret = wl->ops->plt_init(wl);
+ if (ret < 0)
+ goto power_off;
+ }
wl->state = WLCORE_STATE_ON;
wl1271_notice("firmware booted in PLT mode %s (%s)",
@@ -2008,6 +2011,47 @@ out:
mutex_unlock(&wl->mutex);
}
+static void wlcore_pending_auth_complete_work(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct wl1271 *wl;
+ struct wl12xx_vif *wlvif;
+ unsigned long time_spare;
+ int ret;
+
+ dwork = container_of(work, struct delayed_work, work);
+ wlvif = container_of(dwork, struct wl12xx_vif,
+ pending_auth_complete_work);
+ wl = wlvif->wl;
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state != WLCORE_STATE_ON))
+ goto out;
+
+ /*
+ * Make sure a second really passed since the last auth reply. Maybe
+ * a second auth reply arrived while we were stuck on the mutex.
+ * Check for a little less than the timeout to protect from scheduler
+ * irregularities.
+ */
+ time_spare = jiffies +
+ msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
+ if (!time_after(time_spare, wlvif->pending_auth_reply_time))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ /* cancel the ROC if active */
+ wlcore_update_inconn_sta(wl, wlvif, NULL, false);
+
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+}
+
static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
{
u8 policy = find_first_zero_bit(wl->rate_policies_map,
@@ -2159,6 +2203,8 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
wlcore_channel_switch_work);
INIT_DELAYED_WORK(&wlvif->connection_loss_work,
wlcore_connection_loss_work);
+ INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
+ wlcore_pending_auth_complete_work);
INIT_LIST_HEAD(&wlvif->list);
setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
@@ -2376,6 +2422,11 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
int ret = 0;
u8 role_type;
+ if (wl->plt) {
+ wl1271_error("Adding Interface not allowed while in PLT mode");
+ return -EBUSY;
+ }
+
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
@@ -2590,6 +2641,7 @@ unlock:
cancel_work_sync(&wlvif->rx_streaming_disable_work);
cancel_delayed_work_sync(&wlvif->connection_loss_work);
cancel_delayed_work_sync(&wlvif->channel_switch_work);
+ cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
mutex_lock(&wl->mutex);
}
@@ -2875,6 +2927,25 @@ static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
wlvif->rate_set = wlvif->basic_rate_set;
}
+static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool idle)
+{
+ bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
+
+ if (idle == cur_idle)
+ return;
+
+ if (idle) {
+ clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
+ } else {
+ /* The current firmware only supports sched_scan in idle */
+ if (wl->sched_vif == wlvif)
+ wl->ops->sched_scan_stop(wl, wlvif);
+
+ set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
+ }
+}
+
static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_conf *conf, u32 changed)
{
@@ -3969,6 +4040,13 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
}
} else {
if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+ /*
+ * AP might be in ROC in case we have just
+ * sent auth reply. handle it.
+ */
+ if (test_bit(wlvif->role_id, wl->roc_map))
+ wl12xx_croc(wl, wlvif->role_id);
+
ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
if (ret < 0)
goto out;
@@ -4120,6 +4198,9 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
do_join = true;
}
+ if (changed & BSS_CHANGED_IDLE && !is_ibss)
+ wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
+
if (changed & BSS_CHANGED_CQM) {
bool enable = false;
if (bss_conf->cqm_rssi_thold)
@@ -4656,29 +4737,49 @@ static void wlcore_roc_if_possible(struct wl1271 *wl,
wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
}
-static void wlcore_update_inconn_sta(struct wl1271 *wl,
- struct wl12xx_vif *wlvif,
- struct wl1271_station *wl_sta,
- bool in_connection)
+/*
+ * when wl_sta is NULL, we treat this call as if coming from a
+ * pending auth reply.
+ * wl->mutex must be taken and the FW must be awake when the call
+ * takes place.
+ */
+void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct wl1271_station *wl_sta, bool in_conn)
{
- if (in_connection) {
- if (WARN_ON(wl_sta->in_connection))
+ if (in_conn) {
+ if (WARN_ON(wl_sta && wl_sta->in_connection))
return;
- wl_sta->in_connection = true;
- if (!wlvif->inconn_count++)
+
+ if (!wlvif->ap_pending_auth_reply &&
+ !wlvif->inconn_count)
wlcore_roc_if_possible(wl, wlvif);
+
+ if (wl_sta) {
+ wl_sta->in_connection = true;
+ wlvif->inconn_count++;
+ } else {
+ wlvif->ap_pending_auth_reply = true;
+ }
} else {
- if (!wl_sta->in_connection)
+ if (wl_sta && !wl_sta->in_connection)
+ return;
+
+ if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
return;
- wl_sta->in_connection = false;
- wlvif->inconn_count--;
- if (WARN_ON(wlvif->inconn_count < 0))
+ if (WARN_ON(wl_sta && !wlvif->inconn_count))
return;
- if (!wlvif->inconn_count)
- if (test_bit(wlvif->role_id, wl->roc_map))
- wl12xx_croc(wl, wlvif->role_id);
+ if (wl_sta) {
+ wl_sta->in_connection = false;
+ wlvif->inconn_count--;
+ } else {
+ wlvif->ap_pending_auth_reply = false;
+ }
+
+ if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
+ test_bit(wlvif->role_id, wl->roc_map))
+ wl12xx_croc(wl, wlvif->role_id);
}
}
@@ -5313,10 +5414,7 @@ static struct ieee80211_rate wl1271_rates_5ghz[] = {
/* 5 GHz band channels for WL1273 */
static struct ieee80211_channel wl1271_channels_5ghz[] = {
- { .hw_value = 7, .center_freq = 5035, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
- { .hw_value = 9, .center_freq = 5045, .max_power = WLCORE_MAX_TXPWR },
- { .hw_value = 11, .center_freq = 5055, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
@@ -5896,14 +5994,20 @@ static const struct wiphy_wowlan_support wlcore_wowlan_support = {
};
#endif
+static irqreturn_t wlcore_hardirq(int irq, void *cookie)
+{
+ return IRQ_WAKE_THREAD;
+}
+
static void wlcore_nvs_cb(const struct firmware *fw, void *context)
{
struct wl1271 *wl = context;
struct platform_device *pdev = wl->pdev;
- struct wlcore_platdev_data *pdev_data = pdev->dev.platform_data;
+ struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
struct wl12xx_platform_data *pdata = pdev_data->pdata;
unsigned long irqflags;
int ret;
+ irq_handler_t hardirq_fn = NULL;
if (fw) {
wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
@@ -5932,12 +6036,14 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
wl->platform_quirks = pdata->platform_quirks;
wl->if_ops = pdev_data->if_ops;
- if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
+ if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
irqflags = IRQF_TRIGGER_RISING;
- else
+ hardirq_fn = wlcore_hardirq;
+ } else {
irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+ }
- ret = request_threaded_irq(wl->irq, NULL, wlcore_irq,
+ ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
irqflags, pdev->name, wl);
if (ret < 0) {
wl1271_error("request_irq() failed: %d", ret);
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 98066d40c2ad..26bfc365ba70 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -83,6 +83,10 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
struct wl12xx_vif *wlvif;
u32 timeout;
+ /* We do not enter elp sleep in PLT mode */
+ if (wl->plt)
+ return;
+
if (wl->sleep_auth != WL1271_PSM_ELP)
return;
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index f407101e525b..13e743df2e31 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -174,17 +174,7 @@ wlcore_scan_get_channels(struct wl1271 *wl,
/* if radar is set, we ignore the passive flag */
(radar ||
!!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) {
- wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
- req_channels[i]->band,
- req_channels[i]->center_freq);
- wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X",
- req_channels[i]->hw_value,
- req_channels[i]->flags);
- wl1271_debug(DEBUG_SCAN, "max_power %d",
- req_channels[i]->max_power);
- wl1271_debug(DEBUG_SCAN, "min_dwell_time %d max dwell time %d",
- min_dwell_time_active,
- max_dwell_time_active);
+
if (flags & IEEE80211_CHAN_RADAR) {
channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
@@ -222,6 +212,17 @@ wlcore_scan_get_channels(struct wl1271 *wl,
*n_pactive_ch);
}
+ wl1271_debug(DEBUG_SCAN, "freq %d, ch. %d, flags 0x%x, power %d, min/max_dwell %d/%d%s%s",
+ req_channels[i]->center_freq,
+ req_channels[i]->hw_value,
+ req_channels[i]->flags,
+ req_channels[i]->max_power,
+ min_dwell_time_active,
+ max_dwell_time_active,
+ flags & IEEE80211_CHAN_RADAR ?
+ ", DFS" : "",
+ flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+ ", PASSIVE" : "");
j++;
}
}
@@ -364,7 +365,7 @@ wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl,
struct cfg80211_ssid *ssids = req->ssids;
int ret = 0, type, i, j, n_match_ssids = 0;
- wl1271_debug(DEBUG_CMD, "cmd sched scan ssid list");
+ wl1271_debug((DEBUG_CMD | DEBUG_SCAN), "cmd sched scan ssid list");
/* count the match sets that contain SSIDs */
for (i = 0; i < req->n_match_sets; i++)
@@ -442,8 +443,6 @@ wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl,
}
}
- wl1271_dump(DEBUG_SCAN, "SSID_LIST: ", cmd, sizeof(*cmd));
-
ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_SSID_CFG, cmd,
sizeof(*cmd), 0);
if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 1b0cd98e35f1..b2c018dccf18 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -335,7 +335,7 @@ static int wl1271_probe(struct spi_device *spi)
if (!pdev_data)
goto out;
- pdev_data->pdata = spi->dev.platform_data;
+ pdev_data->pdata = dev_get_platdata(&spi->dev);
if (!pdev_data->pdata) {
dev_err(&spi->dev, "no platform data\n");
ret = -ENODEV;
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 527590f2adfb..a3b7d950d8e9 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -297,7 +297,8 @@ static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
ret = wl1271_plt_stop(wl);
break;
case PLT_ON:
- ret = wl1271_plt_start(wl, PLT_ON);
+ case PLT_CHIP_AWAKE:
+ ret = wl1271_plt_start(wl, val);
break;
case PLT_FEM_DETECT:
ret = wl1271_tm_detect_fem(wl, tb);
@@ -361,6 +362,7 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct wl1271 *wl = hw->priv;
struct nlattr *tb[WL1271_TM_ATTR_MAX + 1];
+ u32 nla_cmd;
int err;
err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy);
@@ -370,7 +372,14 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (!tb[WL1271_TM_ATTR_CMD_ID])
return -EINVAL;
- switch (nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID])) {
+ nla_cmd = nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID]);
+
+ /* Only SET_PLT_MODE is allowed in case of mode PLT_CHIP_AWAKE */
+ if (wl->plt_mode == PLT_CHIP_AWAKE &&
+ nla_cmd != WL1271_TM_CMD_SET_PLT_MODE)
+ return -EOPNOTSUPP;
+
+ switch (nla_cmd) {
case WL1271_TM_CMD_TEST:
return wl1271_tm_cmd_test(wl, tb);
case WL1271_TM_CMD_INTERROGATE:
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 7e93fe63a2c7..87cd707affa2 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -86,19 +86,34 @@ void wl1271_free_tx_id(struct wl1271 *wl, int id)
EXPORT_SYMBOL(wl1271_free_tx_id);
static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
+ hdr = (struct ieee80211_hdr *)(skb->data +
+ sizeof(struct wl1271_tx_hw_descr));
+ if (!ieee80211_is_auth(hdr->frame_control))
+ return;
+
/*
* add the station to the known list before transmitting the
* authentication response. this way it won't get de-authed by FW
* when transmitting too soon.
*/
- hdr = (struct ieee80211_hdr *)(skb->data +
- sizeof(struct wl1271_tx_hw_descr));
- if (ieee80211_is_auth(hdr->frame_control))
- wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+ wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+
+ /*
+ * ROC for 1 second on the AP channel for completing the connection.
+ * Note the ROC will be continued by the update_sta_state callbacks
+ * once the station reaches the associated state.
+ */
+ wlcore_update_inconn_sta(wl, wlvif, NULL, true);
+ wlvif->pending_auth_reply_time = jiffies;
+ cancel_delayed_work(&wlvif->pending_auth_complete_work);
+ ieee80211_queue_delayed_work(wl->hw,
+ &wlvif->pending_auth_complete_work,
+ msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT));
}
static void wl1271_tx_regulate_link(struct wl1271 *wl,
@@ -386,7 +401,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
(cipher == WLAN_CIPHER_SUITE_WEP104);
- if (WARN_ON(is_wep && wlvif->default_key != idx)) {
+ if (WARN_ON(is_wep && wlvif && wlvif->default_key != idx)) {
ret = wl1271_set_default_wep_key(wl, wlvif, idx);
if (ret < 0)
return ret;
@@ -404,7 +419,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
- wl1271_tx_ap_update_inconnection_sta(wl, skb);
+ wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb);
wl1271_tx_regulate_link(wl, wlvif, hlid);
}
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 55aa4acf9105..35489c300da1 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -56,6 +56,9 @@
/* Used for management frames and dummy packets */
#define WL1271_TID_MGMT 7
+/* stop a ROC for pending authentication reply after this time (ms) */
+#define WLCORE_PEND_AUTH_ROC_TIMEOUT 1000
+
struct wl127x_tx_mem {
/*
* Number of extra memory blocks to allocate for this packet
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 0034979e97cb..54ce5d5e84db 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -481,6 +481,8 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key_conf);
void wlcore_regdomain_config(struct wl1271 *wl);
+void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct wl1271_station *wl_sta, bool in_conn);
static inline void
wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band,
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index e5e146435fe7..2a50e089b0e7 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -255,6 +255,7 @@ enum wl12xx_vif_flags {
WLVIF_FLAG_CS_PROGRESS,
WLVIF_FLAG_AP_PROBE_RESP_SET,
WLVIF_FLAG_IN_USE,
+ WLVIF_FLAG_ACTIVE,
};
struct wl12xx_vif;
@@ -307,6 +308,7 @@ enum plt_mode {
PLT_OFF = 0,
PLT_ON = 1,
PLT_FEM_DETECT = 2,
+ PLT_CHIP_AWAKE = 3
};
struct wl12xx_rx_filter_field {
@@ -456,6 +458,15 @@ struct wl12xx_vif {
*/
int hw_queue_base;
+ /* do we have a pending auth reply? (and ROC) */
+ bool ap_pending_auth_reply;
+
+ /* time when we sent the pending auth reply */
+ unsigned long pending_auth_reply_time;
+
+ /* work for canceling ROC after pending auth reply */
+ struct delayed_work pending_auth_complete_work;
+
/*
* This struct must be last!
* data that has to be saved acrossed reconfigs (e.g. recovery)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 5715318d6bab..08ae01b41c83 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -87,9 +87,13 @@ struct pending_tx_info {
struct xenvif_rx_meta {
int id;
int size;
+ int gso_type;
int gso_size;
};
+#define GSO_BIT(type) \
+ (1 << XEN_NETIF_GSO_TYPE_ ## type)
+
/* Discriminate from any valid pending_idx value. */
#define INVALID_PENDING_IDX 0xFFFF
@@ -150,10 +154,12 @@ struct xenvif {
u8 fe_dev_addr[6];
/* Frontend feature information. */
+ int gso_mask;
+ int gso_prefix_mask;
+
u8 can_sg:1;
- u8 gso:1;
- u8 gso_prefix:1;
- u8 csum:1;
+ u8 ip_csum:1;
+ u8 ipv6_csum:1;
/* Internal feature information. */
u8 can_queue:1; /* can queue packets for receiver? */
@@ -163,6 +169,7 @@ struct xenvif {
unsigned long credit_usec;
unsigned long remaining_credit;
struct timer_list credit_timeout;
+ u64 credit_window_start;
/* Statistics */
unsigned long rx_gso_checksum_fixup;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 01bb854c7f62..b78ee10a956a 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -214,10 +214,14 @@ static netdev_features_t xenvif_fix_features(struct net_device *dev,
if (!vif->can_sg)
features &= ~NETIF_F_SG;
- if (!vif->gso && !vif->gso_prefix)
+ if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
features &= ~NETIF_F_TSO;
- if (!vif->csum)
+ if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
+ features &= ~NETIF_F_TSO6;
+ if (!vif->ip_csum)
features &= ~NETIF_F_IP_CSUM;
+ if (!vif->ipv6_csum)
+ features &= ~NETIF_F_IPV6_CSUM;
return features;
}
@@ -306,18 +310,19 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif->domid = domid;
vif->handle = handle;
vif->can_sg = 1;
- vif->csum = 1;
+ vif->ip_csum = 1;
vif->dev = dev;
vif->credit_bytes = vif->remaining_credit = ~0UL;
vif->credit_usec = 0UL;
init_timer(&vif->credit_timeout);
- /* Initialize 'expires' now: it's used to track the credit window. */
- vif->credit_timeout.expires = jiffies;
+ vif->credit_window_start = get_jiffies_64();
dev->netdev_ops = &xenvif_netdev_ops;
- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
- dev->features = dev->hw_features;
+ dev->hw_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
+ dev->features = dev->hw_features | NETIF_F_RXCSUM;
SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f3e591c611de..919b6509455c 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -109,15 +109,12 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
}
-/*
- * This is the amount of packet we copy rather than map, so that the
- * guest can't fiddle with the contents of the headers while we do
- * packet processing on them (netfilter, routing, etc).
+/* This is a miniumum size for the linear area to avoid lots of
+ * calls to __pskb_pull_tail() as we set up checksum offsets. The
+ * value 128 was chosen as it covers all IPv4 and most likely
+ * IPv6 headers.
*/
-#define PKT_PROT_LEN (ETH_HLEN + \
- VLAN_HLEN + \
- sizeof(struct iphdr) + MAX_IPOPTLEN + \
- sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
+#define PKT_PROT_LEN 128
static u16 frag_get_pending_idx(skb_frag_t *frag)
{
@@ -145,7 +142,7 @@ static int max_required_rx_slots(struct xenvif *vif)
int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
/* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
- if (vif->can_sg || vif->gso || vif->gso_prefix)
+ if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask)
max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
return max;
@@ -317,6 +314,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
+ meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
meta->gso_size = 0;
meta->size = 0;
meta->id = req->id;
@@ -339,6 +337,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
struct gnttab_copy *copy_gop;
struct xenvif_rx_meta *meta;
unsigned long bytes;
+ int gso_type;
/* Data must not cross a page boundary. */
BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
@@ -397,7 +396,14 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
}
/* Leave a gap for the GSO descriptor. */
- if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+ else
+ gso_type = XEN_NETIF_GSO_TYPE_NONE;
+
+ if (*head && ((1 << gso_type) & vif->gso_mask))
vif->rx.req_cons++;
*head = 0; /* There must be something in this buffer now. */
@@ -428,14 +434,28 @@ static int xenvif_gop_skb(struct sk_buff *skb,
unsigned char *data;
int head = 1;
int old_meta_prod;
+ int gso_type;
+ int gso_size;
old_meta_prod = npo->meta_prod;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+ gso_size = skb_shinfo(skb)->gso_size;
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+ gso_size = skb_shinfo(skb)->gso_size;
+ } else {
+ gso_type = XEN_NETIF_GSO_TYPE_NONE;
+ gso_size = 0;
+ }
+
/* Set up a GSO prefix descriptor, if necessary */
- if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
+ if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) {
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
- meta->gso_size = skb_shinfo(skb)->gso_size;
+ meta->gso_type = gso_type;
+ meta->gso_size = gso_size;
meta->size = 0;
meta->id = req->id;
}
@@ -443,10 +463,13 @@ static int xenvif_gop_skb(struct sk_buff *skb,
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
- if (!vif->gso_prefix)
- meta->gso_size = skb_shinfo(skb)->gso_size;
- else
+ if ((1 << gso_type) & vif->gso_mask) {
+ meta->gso_type = gso_type;
+ meta->gso_size = gso_size;
+ } else {
+ meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
meta->gso_size = 0;
+ }
meta->size = 0;
meta->id = req->id;
@@ -592,7 +615,8 @@ void xenvif_rx_action(struct xenvif *vif)
vif = netdev_priv(skb->dev);
- if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
+ if ((1 << vif->meta[npo.meta_cons].gso_type) &
+ vif->gso_prefix_mask) {
resp = RING_GET_RESPONSE(&vif->rx,
vif->rx.rsp_prod_pvt++);
@@ -629,7 +653,8 @@ void xenvif_rx_action(struct xenvif *vif)
vif->meta[npo.meta_cons].size,
flags);
- if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
+ if ((1 << vif->meta[npo.meta_cons].gso_type) &
+ vif->gso_mask) {
struct xen_netif_extra_info *gso =
(struct xen_netif_extra_info *)
RING_GET_RESPONSE(&vif->rx,
@@ -637,8 +662,8 @@ void xenvif_rx_action(struct xenvif *vif)
resp->flags |= XEN_NETRXF_extra_info;
+ gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
- gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
gso->u.gso.pad = 0;
gso->u.gso.features = 0;
@@ -1101,15 +1126,20 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
return -EINVAL;
}
- /* Currently only TCPv4 S.O. is supported. */
- if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
+ switch (gso->u.gso.type) {
+ case XEN_NETIF_GSO_TYPE_TCPV4:
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ break;
+ case XEN_NETIF_GSO_TYPE_TCPV6:
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ break;
+ default:
netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
xenvif_fatal_tx_err(vif);
return -EINVAL;
}
skb_shinfo(skb)->gso_size = gso->u.gso.size;
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
/* Header must be checked, and gso_segs computed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
@@ -1118,61 +1148,74 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
return 0;
}
-static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
+static inline void maybe_pull_tail(struct sk_buff *skb, unsigned int len)
+{
+ if (skb_is_nonlinear(skb) && skb_headlen(skb) < len) {
+ /* If we need to pullup then pullup to the max, so we
+ * won't need to do it again.
+ */
+ int target = min_t(int, skb->len, MAX_TCP_HEADER);
+ __pskb_pull_tail(skb, target - skb_headlen(skb));
+ }
+}
+
+static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
+ int recalculate_partial_csum)
{
- struct iphdr *iph;
+ struct iphdr *iph = (void *)skb->data;
+ unsigned int header_size;
+ unsigned int off;
int err = -EPROTO;
- int recalculate_partial_csum = 0;
- /*
- * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
- * peers can fail to set NETRXF_csum_blank when sending a GSO
- * frame. In this case force the SKB to CHECKSUM_PARTIAL and
- * recalculate the partial checksum.
- */
- if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
- vif->rx_gso_checksum_fixup++;
- skb->ip_summed = CHECKSUM_PARTIAL;
- recalculate_partial_csum = 1;
- }
+ off = sizeof(struct iphdr);
- /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- return 0;
+ header_size = skb->network_header + off + MAX_IPOPTLEN;
+ maybe_pull_tail(skb, header_size);
- if (skb->protocol != htons(ETH_P_IP))
- goto out;
+ off = iph->ihl * 4;
- iph = (void *)skb->data;
switch (iph->protocol) {
case IPPROTO_TCP:
- if (!skb_partial_csum_set(skb, 4 * iph->ihl,
+ if (!skb_partial_csum_set(skb, off,
offsetof(struct tcphdr, check)))
goto out;
if (recalculate_partial_csum) {
struct tcphdr *tcph = tcp_hdr(skb);
+
+ header_size = skb->network_header +
+ off +
+ sizeof(struct tcphdr);
+ maybe_pull_tail(skb, header_size);
+
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- skb->len - iph->ihl*4,
+ skb->len - off,
IPPROTO_TCP, 0);
}
break;
case IPPROTO_UDP:
- if (!skb_partial_csum_set(skb, 4 * iph->ihl,
+ if (!skb_partial_csum_set(skb, off,
offsetof(struct udphdr, check)))
goto out;
if (recalculate_partial_csum) {
struct udphdr *udph = udp_hdr(skb);
+
+ header_size = skb->network_header +
+ off +
+ sizeof(struct udphdr);
+ maybe_pull_tail(skb, header_size);
+
udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- skb->len - iph->ihl*4,
+ skb->len - off,
IPPROTO_UDP, 0);
}
break;
default:
if (net_ratelimit())
netdev_err(vif->dev,
- "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
+ "Attempting to checksum a non-TCP/UDP packet, "
+ "dropping a protocol %d packet\n",
iph->protocol);
goto out;
}
@@ -1183,11 +1226,162 @@ out:
return err;
}
+static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
+ int recalculate_partial_csum)
+{
+ int err = -EPROTO;
+ struct ipv6hdr *ipv6h = (void *)skb->data;
+ u8 nexthdr;
+ unsigned int header_size;
+ unsigned int off;
+ bool fragment;
+ bool done;
+
+ done = false;
+
+ off = sizeof(struct ipv6hdr);
+
+ header_size = skb->network_header + off;
+ maybe_pull_tail(skb, header_size);
+
+ nexthdr = ipv6h->nexthdr;
+
+ while ((off <= sizeof(struct ipv6hdr) + ntohs(ipv6h->payload_len)) &&
+ !done) {
+ switch (nexthdr) {
+ case IPPROTO_DSTOPTS:
+ case IPPROTO_HOPOPTS:
+ case IPPROTO_ROUTING: {
+ struct ipv6_opt_hdr *hp = (void *)(skb->data + off);
+
+ header_size = skb->network_header +
+ off +
+ sizeof(struct ipv6_opt_hdr);
+ maybe_pull_tail(skb, header_size);
+
+ nexthdr = hp->nexthdr;
+ off += ipv6_optlen(hp);
+ break;
+ }
+ case IPPROTO_AH: {
+ struct ip_auth_hdr *hp = (void *)(skb->data + off);
+
+ header_size = skb->network_header +
+ off +
+ sizeof(struct ip_auth_hdr);
+ maybe_pull_tail(skb, header_size);
+
+ nexthdr = hp->nexthdr;
+ off += (hp->hdrlen+2)<<2;
+ break;
+ }
+ case IPPROTO_FRAGMENT:
+ fragment = true;
+ /* fall through */
+ default:
+ done = true;
+ break;
+ }
+ }
+
+ if (!done) {
+ if (net_ratelimit())
+ netdev_err(vif->dev, "Failed to parse packet header\n");
+ goto out;
+ }
+
+ if (fragment) {
+ if (net_ratelimit())
+ netdev_err(vif->dev, "Packet is a fragment!\n");
+ goto out;
+ }
+
+ switch (nexthdr) {
+ case IPPROTO_TCP:
+ if (!skb_partial_csum_set(skb, off,
+ offsetof(struct tcphdr, check)))
+ goto out;
+
+ if (recalculate_partial_csum) {
+ struct tcphdr *tcph = tcp_hdr(skb);
+
+ header_size = skb->network_header +
+ off +
+ sizeof(struct tcphdr);
+ maybe_pull_tail(skb, header_size);
+
+ tcph->check = ~csum_ipv6_magic(&ipv6h->saddr,
+ &ipv6h->daddr,
+ skb->len - off,
+ IPPROTO_TCP, 0);
+ }
+ break;
+ case IPPROTO_UDP:
+ if (!skb_partial_csum_set(skb, off,
+ offsetof(struct udphdr, check)))
+ goto out;
+
+ if (recalculate_partial_csum) {
+ struct udphdr *udph = udp_hdr(skb);
+
+ header_size = skb->network_header +
+ off +
+ sizeof(struct udphdr);
+ maybe_pull_tail(skb, header_size);
+
+ udph->check = ~csum_ipv6_magic(&ipv6h->saddr,
+ &ipv6h->daddr,
+ skb->len - off,
+ IPPROTO_UDP, 0);
+ }
+ break;
+ default:
+ if (net_ratelimit())
+ netdev_err(vif->dev,
+ "Attempting to checksum a non-TCP/UDP packet, "
+ "dropping a protocol %d packet\n",
+ nexthdr);
+ goto out;
+ }
+
+ err = 0;
+
+out:
+ return err;
+}
+
+static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
+{
+ int err = -EPROTO;
+ int recalculate_partial_csum = 0;
+
+ /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
+ * peers can fail to set NETRXF_csum_blank when sending a GSO
+ * frame. In this case force the SKB to CHECKSUM_PARTIAL and
+ * recalculate the partial checksum.
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
+ vif->rx_gso_checksum_fixup++;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ recalculate_partial_csum = 1;
+ }
+
+ /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ err = checksum_setup_ip(vif, skb, recalculate_partial_csum);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ err = checksum_setup_ipv6(vif, skb, recalculate_partial_csum);
+
+ return err;
+}
+
static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
{
- unsigned long now = jiffies;
- unsigned long next_credit =
- vif->credit_timeout.expires +
+ u64 now = get_jiffies_64();
+ u64 next_credit = vif->credit_window_start +
msecs_to_jiffies(vif->credit_usec / 1000);
/* Timer could already be pending in rare cases. */
@@ -1195,8 +1389,8 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
return true;
/* Passed the point where we can replenish credit? */
- if (time_after_eq(now, next_credit)) {
- vif->credit_timeout.expires = now;
+ if (time_after_eq64(now, next_credit)) {
+ vif->credit_window_start = now;
tx_add_credit(vif);
}
@@ -1208,6 +1402,7 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
tx_credit_callback;
mod_timer(&vif->credit_timeout,
next_credit);
+ vif->credit_window_start = next_credit;
return true;
}
@@ -1428,12 +1623,7 @@ static int xenvif_tx_submit(struct xenvif *vif, int budget)
xenvif_fill_frags(vif, skb);
- /*
- * If the initial fragment was < PKT_PROT_LEN then
- * pull through some bytes from the other fragments to
- * increase the linear region to PKT_PROT_LEN bytes.
- */
- if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
+ if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
int target = min_t(int, skb->len, PKT_PROT_LEN);
__pskb_pull_tail(skb, target - skb_headlen(skb));
}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index b45bce20ad76..f0358992b04f 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -39,11 +39,15 @@ static int connect_rings(struct backend_info *);
static void connect(struct backend_info *);
static void backend_create_xenvif(struct backend_info *be);
static void unregister_hotplug_status_watch(struct backend_info *be);
+static void set_backend_state(struct backend_info *be,
+ enum xenbus_state state);
static int netback_remove(struct xenbus_device *dev)
{
struct backend_info *be = dev_get_drvdata(&dev->dev);
+ set_backend_state(be, XenbusStateClosed);
+
unregister_hotplug_status_watch(be);
if (be->vif) {
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
@@ -101,6 +105,22 @@ static int netback_probe(struct xenbus_device *dev,
goto abort_transaction;
}
+ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6",
+ "%d", sg);
+ if (err) {
+ message = "writing feature-gso-tcpv6";
+ goto abort_transaction;
+ }
+
+ /* We support partial checksum setup for IPv6 packets */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-ipv6-csum-offload",
+ "%d", 1);
+ if (err) {
+ message = "writing feature-ipv6-csum-offload";
+ goto abort_transaction;
+ }
+
/* We support rx-copy path. */
err = xenbus_printf(xbt, dev->nodename,
"feature-rx-copy", "%d", 1);
@@ -557,20 +577,50 @@ static int connect_rings(struct backend_info *be)
val = 0;
vif->can_sg = !!val;
+ vif->gso_mask = 0;
+ vif->gso_prefix_mask = 0;
+
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
"%d", &val) < 0)
val = 0;
- vif->gso = !!val;
+ if (val)
+ vif->gso_mask |= GSO_BIT(TCPV4);
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix",
"%d", &val) < 0)
val = 0;
- vif->gso_prefix = !!val;
+ if (val)
+ vif->gso_prefix_mask |= GSO_BIT(TCPV4);
+
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6",
+ "%d", &val) < 0)
+ val = 0;
+ if (val)
+ vif->gso_mask |= GSO_BIT(TCPV6);
+
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix",
+ "%d", &val) < 0)
+ val = 0;
+ if (val)
+ vif->gso_prefix_mask |= GSO_BIT(TCPV6);
+
+ if (vif->gso_mask & vif->gso_prefix_mask) {
+ xenbus_dev_fatal(dev, err,
+ "%s: gso and gso prefix flags are not "
+ "mutually exclusive",
+ dev->otherend);
+ return -EOPNOTSUPP;
+ }
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
"%d", &val) < 0)
val = 0;
- vif->csum = !val;
+ vif->ip_csum = !val;
+
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload",
+ "%d", &val) < 0)
+ val = 0;
+ vif->ipv6_csum = !!val;
/* Map the shared frame, irq etc. */
err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref,
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 36808bf25677..dd1011e55cb5 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -952,7 +952,7 @@ static int handle_incoming_queue(struct net_device *dev,
u64_stats_update_end(&stats->syncp);
/* Pass it up. */
- netif_receive_skb(skb);
+ napi_gro_receive(&np->napi, skb);
}
return packets_dropped;
@@ -1051,6 +1051,8 @@ err:
if (work_done < budget) {
int more_to_do = 0;
+ napi_gro_flush(napi, false);
+
local_irq_save(flags);
RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index b0b64ccb7d7d..c1fb20603338 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -46,6 +46,16 @@ config NFC_SIM
If unsure, say N.
+config NFC_PORT100
+ tristate "Sony NFC Port-100 Series USB device support"
+ depends on USB
+ depends on NFC_DIGITAL
+ help
+ This adds support for Sony Port-100 chip based USB devices such as the
+ RC-S380 dongle.
+
+ If unsure, say N.
+
source "drivers/nfc/pn544/Kconfig"
source "drivers/nfc/microread/Kconfig"
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index be7636abcb3f..c715fe8582a8 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -8,5 +8,6 @@ obj-$(CONFIG_NFC_PN533) += pn533.o
obj-$(CONFIG_NFC_WILINK) += nfcwilink.o
obj-$(CONFIG_NFC_MEI_PHY) += mei_phy.o
obj-$(CONFIG_NFC_SIM) += nfcsim.o
+obj-$(CONFIG_NFC_PORT100) += port100.o
ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index 606bf55e76ec..85f90090cc1d 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -18,6 +18,8 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/nfc.h>
@@ -60,13 +62,13 @@ int nfc_mei_phy_enable(void *phy_id)
r = mei_cl_enable_device(phy->device);
if (r < 0) {
- pr_err("MEI_PHY: Could not enable device\n");
+ pr_err("Could not enable device\n");
return r;
}
r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy);
if (r) {
- pr_err("MEY_PHY: Event cb registration failed\n");
+ pr_err("Event cb registration failed\n");
mei_cl_disable_device(phy->device);
phy->powered = 0;
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
index 101089495bf8..696e3467eccc 100644
--- a/drivers/nfc/microread/i2c.c
+++ b/drivers/nfc/microread/i2c.c
@@ -18,6 +18,8 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/delay.h>
@@ -95,12 +97,8 @@ static int check_crc(struct sk_buff *skb)
crc = crc ^ skb->data[i];
if (crc != skb->data[skb->len-1]) {
- pr_err(MICROREAD_I2C_DRIVER_NAME
- ": CRC error 0x%x != 0x%x\n",
- crc, skb->data[skb->len-1]);
-
- pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__);
-
+ pr_err("CRC error 0x%x != 0x%x\n", crc, skb->data[skb->len-1]);
+ pr_info("%s: BAD CRC\n", __func__);
return -EPERM;
}
@@ -160,18 +158,15 @@ static int microread_i2c_read(struct microread_i2c_phy *phy,
u8 tmp[MICROREAD_I2C_LLC_MAX_SIZE - 1];
struct i2c_client *client = phy->i2c_dev;
- pr_debug("%s\n", __func__);
-
r = i2c_master_recv(client, &len, 1);
if (r != 1) {
- dev_err(&client->dev, "cannot read len byte\n");
+ nfc_err(&client->dev, "cannot read len byte\n");
return -EREMOTEIO;
}
if ((len < MICROREAD_I2C_LLC_MIN_SIZE) ||
(len > MICROREAD_I2C_LLC_MAX_SIZE)) {
- dev_err(&client->dev, "invalid len byte\n");
- pr_err("invalid len byte\n");
+ nfc_err(&client->dev, "invalid len byte\n");
r = -EBADMSG;
goto flush;
}
@@ -228,7 +223,6 @@ static irqreturn_t microread_i2c_irq_thread_fn(int irq, void *phy_id)
}
client = phy->i2c_dev;
- dev_dbg(&client->dev, "IRQ\n");
if (phy->hard_fault != 0)
return IRQ_HANDLED;
@@ -263,20 +257,18 @@ static int microread_i2c_probe(struct i2c_client *client,
dev_get_platdata(&client->dev);
int r;
- dev_dbg(&client->dev, "client %p", client);
+ dev_dbg(&client->dev, "client %p\n", client);
if (!pdata) {
- dev_err(&client->dev, "client %p: missing platform data",
+ nfc_err(&client->dev, "client %p: missing platform data\n",
client);
return -EINVAL;
}
phy = devm_kzalloc(&client->dev, sizeof(struct microread_i2c_phy),
GFP_KERNEL);
- if (!phy) {
- dev_err(&client->dev, "Can't allocate microread phy");
+ if (!phy)
return -ENOMEM;
- }
i2c_set_clientdata(client, phy);
phy->i2c_dev = client;
@@ -285,7 +277,7 @@ static int microread_i2c_probe(struct i2c_client *client,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
MICROREAD_I2C_DRIVER_NAME, phy);
if (r) {
- dev_err(&client->dev, "Unable to register IRQ handler");
+ nfc_err(&client->dev, "Unable to register IRQ handler\n");
return r;
}
@@ -296,7 +288,7 @@ static int microread_i2c_probe(struct i2c_client *client,
if (r < 0)
goto err_irq;
- dev_info(&client->dev, "Probed");
+ nfc_info(&client->dev, "Probed");
return 0;
@@ -310,8 +302,6 @@ static int microread_i2c_remove(struct i2c_client *client)
{
struct microread_i2c_phy *phy = i2c_get_clientdata(client);
- dev_dbg(&client->dev, "%s\n", __func__);
-
microread_remove(phy->hdev);
free_irq(client->irq, phy);
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index cdf1bc53b257..72fafec3d460 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -18,6 +18,8 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/nfc.h>
@@ -59,8 +61,6 @@ static int microread_mei_remove(struct mei_cl_device *device)
{
struct nfc_mei_phy *phy = mei_cl_get_drvdata(device);
- pr_info("Removing microread\n");
-
microread_remove(phy->hdev);
nfc_mei_phy_free(phy);
diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c
index cdb9f6de132a..970ded6bfcf5 100644
--- a/drivers/nfc/microread/microread.c
+++ b/drivers/nfc/microread/microread.c
@@ -18,6 +18,8 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -546,7 +548,7 @@ exit:
kfree_skb(skb);
if (r)
- pr_err("Failed to handle discovered target err=%d", r);
+ pr_err("Failed to handle discovered target err=%d\n", r);
}
static int microread_event_received(struct nfc_hci_dev *hdev, u8 gate,
@@ -656,7 +658,6 @@ int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
info = kzalloc(sizeof(struct microread_info), GFP_KERNEL);
if (!info) {
- pr_err("Cannot allocate memory for microread_info.\n");
r = -ENOMEM;
goto err_info_alloc;
}
@@ -686,7 +687,7 @@ int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
MICROREAD_CMD_TAILROOM,
phy_payload);
if (!info->hdev) {
- pr_err("Cannot allocate nfc hdev.\n");
+ pr_err("Cannot allocate nfc hdev\n");
r = -ENOMEM;
goto err_alloc_hdev;
}
diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c
index 9a53f13c88df..93111fa8d282 100644
--- a/drivers/nfc/nfcsim.c
+++ b/drivers/nfc/nfcsim.c
@@ -19,10 +19,10 @@
#include <linux/nfc.h>
#include <net/nfc/nfc.h>
-#define DEV_ERR(_dev, fmt, args...) nfc_dev_err(&_dev->nfc_dev->dev, \
+#define DEV_ERR(_dev, fmt, args...) nfc_err(&_dev->nfc_dev->dev, \
"%s: " fmt, __func__, ## args)
-#define DEV_DBG(_dev, fmt, args...) nfc_dev_dbg(&_dev->nfc_dev->dev, \
+#define DEV_DBG(_dev, fmt, args...) dev_dbg(&_dev->nfc_dev->dev, \
"%s: " fmt, __func__, ## args)
#define NFCSIM_VERSION "0.1"
@@ -64,7 +64,7 @@ static struct workqueue_struct *wq;
static void nfcsim_cleanup_dev(struct nfcsim *dev, u8 shutdown)
{
- DEV_DBG(dev, "shutdown=%d", shutdown);
+ DEV_DBG(dev, "shutdown=%d\n", shutdown);
mutex_lock(&dev->lock);
@@ -84,7 +84,7 @@ static int nfcsim_target_found(struct nfcsim *dev)
{
struct nfc_target nfc_tgt;
- DEV_DBG(dev, "");
+ DEV_DBG(dev, "\n");
memset(&nfc_tgt, 0, sizeof(struct nfc_target));
@@ -98,7 +98,7 @@ static int nfcsim_dev_up(struct nfc_dev *nfc_dev)
{
struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
- DEV_DBG(dev, "");
+ DEV_DBG(dev, "\n");
mutex_lock(&dev->lock);
@@ -113,7 +113,7 @@ static int nfcsim_dev_down(struct nfc_dev *nfc_dev)
{
struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
- DEV_DBG(dev, "");
+ DEV_DBG(dev, "\n");
mutex_lock(&dev->lock);
@@ -143,7 +143,7 @@ static int nfcsim_dep_link_up(struct nfc_dev *nfc_dev,
remote_gb = nfc_get_local_general_bytes(peer->nfc_dev, &remote_gb_len);
if (!remote_gb) {
- DEV_ERR(peer, "Can't get remote general bytes");
+ DEV_ERR(peer, "Can't get remote general bytes\n");
mutex_unlock(&peer->lock);
return -EINVAL;
@@ -155,7 +155,7 @@ static int nfcsim_dep_link_up(struct nfc_dev *nfc_dev,
rc = nfc_set_remote_general_bytes(nfc_dev, remote_gb, remote_gb_len);
if (rc) {
- DEV_ERR(dev, "Can't set remote general bytes");
+ DEV_ERR(dev, "Can't set remote general bytes\n");
mutex_unlock(&dev->lock);
return rc;
}
@@ -172,7 +172,7 @@ static int nfcsim_dep_link_down(struct nfc_dev *nfc_dev)
{
struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
- DEV_DBG(dev, "");
+ DEV_DBG(dev, "\n");
nfcsim_cleanup_dev(dev, 0);
@@ -188,7 +188,7 @@ static int nfcsim_start_poll(struct nfc_dev *nfc_dev,
mutex_lock(&dev->lock);
if (dev->polling_mode != NFCSIM_POLL_NONE) {
- DEV_ERR(dev, "Already in polling mode");
+ DEV_ERR(dev, "Already in polling mode\n");
rc = -EBUSY;
goto exit;
}
@@ -200,7 +200,7 @@ static int nfcsim_start_poll(struct nfc_dev *nfc_dev,
dev->polling_mode |= NFCSIM_POLL_TARGET;
if (dev->polling_mode == NFCSIM_POLL_NONE) {
- DEV_ERR(dev, "Unsupported polling mode");
+ DEV_ERR(dev, "Unsupported polling mode\n");
rc = -EINVAL;
goto exit;
}
@@ -210,7 +210,7 @@ static int nfcsim_start_poll(struct nfc_dev *nfc_dev,
queue_delayed_work(wq, &dev->poll_work, 0);
- DEV_DBG(dev, "Start polling: im: 0x%X, tm: 0x%X", im_protocols,
+ DEV_DBG(dev, "Start polling: im: 0x%X, tm: 0x%X\n", im_protocols,
tm_protocols);
rc = 0;
@@ -224,7 +224,7 @@ static void nfcsim_stop_poll(struct nfc_dev *nfc_dev)
{
struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
- DEV_DBG(dev, "Stop poll");
+ DEV_DBG(dev, "Stop poll\n");
mutex_lock(&dev->lock);
@@ -240,7 +240,7 @@ static int nfcsim_activate_target(struct nfc_dev *nfc_dev,
{
struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
- DEV_DBG(dev, "");
+ DEV_DBG(dev, "\n");
return -ENOTSUPP;
}
@@ -250,7 +250,7 @@ static void nfcsim_deactivate_target(struct nfc_dev *nfc_dev,
{
struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
- DEV_DBG(dev, "");
+ DEV_DBG(dev, "\n");
}
static void nfcsim_wq_recv(struct work_struct *work)
@@ -267,7 +267,7 @@ static void nfcsim_wq_recv(struct work_struct *work)
if (dev->initiator) {
if (!dev->cb) {
- DEV_ERR(dev, "Null recv callback");
+ DEV_ERR(dev, "Null recv callback\n");
dev_kfree_skb(dev->clone_skb);
goto exit;
}
@@ -310,7 +310,7 @@ static int nfcsim_tx(struct nfc_dev *nfc_dev, struct nfc_target *target,
peer->clone_skb = skb_clone(skb, GFP_KERNEL);
if (!peer->clone_skb) {
- DEV_ERR(dev, "skb_clone failed");
+ DEV_ERR(dev, "skb_clone failed\n");
mutex_unlock(&peer->lock);
err = -ENOMEM;
goto exit;
@@ -397,13 +397,13 @@ static void nfcsim_wq_poll(struct work_struct *work)
nfcsim_set_polling_mode(dev);
if (dev->curr_polling_mode == NFCSIM_POLL_NONE) {
- DEV_DBG(dev, "Not polling");
+ DEV_DBG(dev, "Not polling\n");
goto unlock;
}
DEV_DBG(dev, "Polling as %s",
dev->curr_polling_mode == NFCSIM_POLL_INITIATOR ?
- "initiator" : "target");
+ "initiator\n" : "target\n");
if (dev->curr_polling_mode == NFCSIM_POLL_TARGET)
goto sched_work;
diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
index 59f95d8fc98c..71308645593f 100644
--- a/drivers/nfc/nfcwilink.c
+++ b/drivers/nfc/nfcwilink.c
@@ -146,13 +146,11 @@ static int nfcwilink_get_bts_file_name(struct nfcwilink *drv, char *file_name)
unsigned long comp_ret;
int rc;
- nfc_dev_dbg(&drv->pdev->dev, "get_bts_file_name entry");
-
skb = nfcwilink_skb_alloc(sizeof(struct nci_vs_nfcc_info_cmd),
GFP_KERNEL);
if (!skb) {
- nfc_dev_err(&drv->pdev->dev,
- "no memory for nci_vs_nfcc_info_cmd");
+ nfc_err(&drv->pdev->dev,
+ "no memory for nci_vs_nfcc_info_cmd\n");
return -ENOMEM;
}
@@ -170,21 +168,19 @@ static int nfcwilink_get_bts_file_name(struct nfcwilink *drv, char *file_name)
comp_ret = wait_for_completion_timeout(&drv->completed,
msecs_to_jiffies(NFCWILINK_CMD_TIMEOUT));
- nfc_dev_dbg(&drv->pdev->dev, "wait_for_completion_timeout returned %ld",
- comp_ret);
+ dev_dbg(&drv->pdev->dev, "wait_for_completion_timeout returned %ld\n",
+ comp_ret);
if (comp_ret == 0) {
- nfc_dev_err(&drv->pdev->dev,
- "timeout on wait_for_completion_timeout");
+ nfc_err(&drv->pdev->dev,
+ "timeout on wait_for_completion_timeout\n");
return -ETIMEDOUT;
}
- nfc_dev_dbg(&drv->pdev->dev, "nci_vs_nfcc_info_rsp: plen %d, status %d",
- drv->nfcc_info.plen,
- drv->nfcc_info.status);
+ dev_dbg(&drv->pdev->dev, "nci_vs_nfcc_info_rsp: plen %d, status %d\n",
+ drv->nfcc_info.plen, drv->nfcc_info.status);
if ((drv->nfcc_info.plen != 5) || (drv->nfcc_info.status != 0)) {
- nfc_dev_err(&drv->pdev->dev,
- "invalid nci_vs_nfcc_info_rsp");
+ nfc_err(&drv->pdev->dev, "invalid nci_vs_nfcc_info_rsp\n");
return -EINVAL;
}
@@ -195,7 +191,7 @@ static int nfcwilink_get_bts_file_name(struct nfcwilink *drv, char *file_name)
drv->nfcc_info.sw_ver_z,
drv->nfcc_info.patch_id);
- nfc_dev_info(&drv->pdev->dev, "nfcwilink FW file name: %s", file_name);
+ nfc_info(&drv->pdev->dev, "nfcwilink FW file name: %s\n", file_name);
return 0;
}
@@ -207,15 +203,13 @@ static int nfcwilink_send_bts_cmd(struct nfcwilink *drv, __u8 *data, int len)
unsigned long comp_ret;
int rc;
- nfc_dev_dbg(&drv->pdev->dev, "send_bts_cmd entry");
-
/* verify valid cmd for the NFC channel */
if ((len <= sizeof(struct nfcwilink_hdr)) ||
(len > BTS_FILE_CMD_MAX_LEN) ||
(hdr->chnl != NFCWILINK_CHNL) ||
(hdr->opcode != NFCWILINK_OPCODE)) {
- nfc_dev_err(&drv->pdev->dev,
- "ignoring invalid bts cmd, len %d, chnl %d, opcode %d",
+ nfc_err(&drv->pdev->dev,
+ "ignoring invalid bts cmd, len %d, chnl %d, opcode %d\n",
len, hdr->chnl, hdr->opcode);
return 0;
}
@@ -226,7 +220,7 @@ static int nfcwilink_send_bts_cmd(struct nfcwilink *drv, __u8 *data, int len)
skb = nfcwilink_skb_alloc(len, GFP_KERNEL);
if (!skb) {
- nfc_dev_err(&drv->pdev->dev, "no memory for bts cmd");
+ nfc_err(&drv->pdev->dev, "no memory for bts cmd\n");
return -ENOMEM;
}
@@ -238,11 +232,11 @@ static int nfcwilink_send_bts_cmd(struct nfcwilink *drv, __u8 *data, int len)
comp_ret = wait_for_completion_timeout(&drv->completed,
msecs_to_jiffies(NFCWILINK_CMD_TIMEOUT));
- nfc_dev_dbg(&drv->pdev->dev, "wait_for_completion_timeout returned %ld",
- comp_ret);
+ dev_dbg(&drv->pdev->dev, "wait_for_completion_timeout returned %ld\n",
+ comp_ret);
if (comp_ret == 0) {
- nfc_dev_err(&drv->pdev->dev,
- "timeout on wait_for_completion_timeout");
+ nfc_err(&drv->pdev->dev,
+ "timeout on wait_for_completion_timeout\n");
return -ETIMEDOUT;
}
@@ -257,8 +251,6 @@ static int nfcwilink_download_fw(struct nfcwilink *drv)
__u8 *ptr;
int len, rc;
- nfc_dev_dbg(&drv->pdev->dev, "download_fw entry");
-
set_bit(NFCWILINK_FW_DOWNLOAD, &drv->flags);
rc = nfcwilink_get_bts_file_name(drv, file_name);
@@ -267,7 +259,7 @@ static int nfcwilink_download_fw(struct nfcwilink *drv)
rc = request_firmware(&fw, file_name, &drv->pdev->dev);
if (rc) {
- nfc_dev_err(&drv->pdev->dev, "request_firmware failed %d", rc);
+ nfc_err(&drv->pdev->dev, "request_firmware failed %d\n", rc);
/* if the file is not found, don't exit with failure */
if (rc == -ENOENT)
@@ -280,14 +272,14 @@ static int nfcwilink_download_fw(struct nfcwilink *drv)
ptr = (__u8 *)fw->data;
if ((len == 0) || (ptr == NULL)) {
- nfc_dev_dbg(&drv->pdev->dev,
- "request_firmware returned size %d", len);
+ dev_dbg(&drv->pdev->dev,
+ "request_firmware returned size %d\n", len);
goto release_fw;
}
if (__le32_to_cpu(((struct bts_file_hdr *)ptr)->magic) !=
BTS_FILE_HDR_MAGIC) {
- nfc_dev_err(&drv->pdev->dev, "wrong bts magic number");
+ nfc_err(&drv->pdev->dev, "wrong bts magic number\n");
rc = -EINVAL;
goto release_fw;
}
@@ -302,8 +294,8 @@ static int nfcwilink_download_fw(struct nfcwilink *drv)
action_len =
__le16_to_cpu(((struct bts_file_action *)ptr)->len);
- nfc_dev_dbg(&drv->pdev->dev, "bts_file_action type %d, len %d",
- action_type, action_len);
+ dev_dbg(&drv->pdev->dev, "bts_file_action type %d, len %d\n",
+ action_type, action_len);
switch (action_type) {
case BTS_FILE_ACTION_TYPE_SEND_CMD:
@@ -333,8 +325,6 @@ static void nfcwilink_register_complete(void *priv_data, char data)
{
struct nfcwilink *drv = priv_data;
- nfc_dev_dbg(&drv->pdev->dev, "register_complete entry");
-
/* store ST registration status */
drv->st_register_cb_status = data;
@@ -356,7 +346,7 @@ static long nfcwilink_receive(void *priv_data, struct sk_buff *skb)
return -EFAULT;
}
- nfc_dev_dbg(&drv->pdev->dev, "receive entry, len %d", skb->len);
+ dev_dbg(&drv->pdev->dev, "receive entry, len %d\n", skb->len);
/* strip the ST header
(apart for the chnl byte, which is not received in the hdr) */
@@ -370,7 +360,7 @@ static long nfcwilink_receive(void *priv_data, struct sk_buff *skb)
/* Forward skb to NCI core layer */
rc = nci_recv_frame(drv->ndev, skb);
if (rc < 0) {
- nfc_dev_err(&drv->pdev->dev, "nci_recv_frame failed %d", rc);
+ nfc_err(&drv->pdev->dev, "nci_recv_frame failed %d\n", rc);
return rc;
}
@@ -396,8 +386,6 @@ static int nfcwilink_open(struct nci_dev *ndev)
unsigned long comp_ret;
int rc;
- nfc_dev_dbg(&drv->pdev->dev, "open entry");
-
if (test_and_set_bit(NFCWILINK_RUNNING, &drv->flags)) {
rc = -EBUSY;
goto exit;
@@ -415,9 +403,9 @@ static int nfcwilink_open(struct nci_dev *ndev)
&drv->completed,
msecs_to_jiffies(NFCWILINK_REGISTER_TIMEOUT));
- nfc_dev_dbg(&drv->pdev->dev,
- "wait_for_completion_timeout returned %ld",
- comp_ret);
+ dev_dbg(&drv->pdev->dev,
+ "wait_for_completion_timeout returned %ld\n",
+ comp_ret);
if (comp_ret == 0) {
/* timeout */
@@ -425,13 +413,12 @@ static int nfcwilink_open(struct nci_dev *ndev)
goto clear_exit;
} else if (drv->st_register_cb_status != 0) {
rc = drv->st_register_cb_status;
- nfc_dev_err(&drv->pdev->dev,
- "st_register_cb failed %d", rc);
+ nfc_err(&drv->pdev->dev,
+ "st_register_cb failed %d\n", rc);
goto clear_exit;
}
} else {
- nfc_dev_err(&drv->pdev->dev,
- "st_register failed %d", rc);
+ nfc_err(&drv->pdev->dev, "st_register failed %d\n", rc);
goto clear_exit;
}
}
@@ -441,8 +428,8 @@ static int nfcwilink_open(struct nci_dev *ndev)
drv->st_write = nfcwilink_proto.write;
if (nfcwilink_download_fw(drv)) {
- nfc_dev_err(&drv->pdev->dev, "nfcwilink_download_fw failed %d",
- rc);
+ nfc_err(&drv->pdev->dev, "nfcwilink_download_fw failed %d\n",
+ rc);
/* open should succeed, even if the FW download failed */
}
@@ -460,14 +447,12 @@ static int nfcwilink_close(struct nci_dev *ndev)
struct nfcwilink *drv = nci_get_drvdata(ndev);
int rc;
- nfc_dev_dbg(&drv->pdev->dev, "close entry");
-
if (!test_and_clear_bit(NFCWILINK_RUNNING, &drv->flags))
return 0;
rc = st_unregister(&nfcwilink_proto);
if (rc)
- nfc_dev_err(&drv->pdev->dev, "st_unregister failed %d", rc);
+ nfc_err(&drv->pdev->dev, "st_unregister failed %d\n", rc);
drv->st_write = NULL;
@@ -480,7 +465,7 @@ static int nfcwilink_send(struct nci_dev *ndev, struct sk_buff *skb)
struct nfcwilink_hdr hdr = {NFCWILINK_CHNL, NFCWILINK_OPCODE, 0x0000};
long len;
- nfc_dev_dbg(&drv->pdev->dev, "send entry, len %d", skb->len);
+ dev_dbg(&drv->pdev->dev, "send entry, len %d\n", skb->len);
if (!test_bit(NFCWILINK_RUNNING, &drv->flags)) {
kfree_skb(skb);
@@ -498,7 +483,7 @@ static int nfcwilink_send(struct nci_dev *ndev, struct sk_buff *skb)
len = drv->st_write(skb);
if (len < 0) {
kfree_skb(skb);
- nfc_dev_err(&drv->pdev->dev, "st_write failed %ld", len);
+ nfc_err(&drv->pdev->dev, "st_write failed %ld\n", len);
return -EFAULT;
}
@@ -517,8 +502,6 @@ static int nfcwilink_probe(struct platform_device *pdev)
int rc;
__u32 protocols;
- nfc_dev_dbg(&pdev->dev, "probe entry");
-
drv = devm_kzalloc(&pdev->dev, sizeof(struct nfcwilink), GFP_KERNEL);
if (!drv) {
rc = -ENOMEM;
@@ -538,7 +521,7 @@ static int nfcwilink_probe(struct platform_device *pdev)
NFCWILINK_HDR_LEN,
0);
if (!drv->ndev) {
- nfc_dev_err(&pdev->dev, "nci_allocate_device failed");
+ nfc_err(&pdev->dev, "nci_allocate_device failed\n");
rc = -ENOMEM;
goto exit;
}
@@ -548,7 +531,7 @@ static int nfcwilink_probe(struct platform_device *pdev)
rc = nci_register_device(drv->ndev);
if (rc < 0) {
- nfc_dev_err(&pdev->dev, "nci_register_device failed %d", rc);
+ nfc_err(&pdev->dev, "nci_register_device failed %d\n", rc);
goto free_dev_exit;
}
@@ -568,8 +551,6 @@ static int nfcwilink_remove(struct platform_device *pdev)
struct nfcwilink *drv = dev_get_drvdata(&pdev->dev);
struct nci_dev *ndev;
- nfc_dev_dbg(&pdev->dev, "remove entry");
-
if (!drv)
return -EFAULT;
@@ -578,8 +559,6 @@ static int nfcwilink_remove(struct platform_device *pdev)
nci_unregister_device(ndev);
nci_free_device(ndev);
- dev_set_drvdata(&pdev->dev, NULL);
-
return 0;
}
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index 5df730be88a3..2daf04c07338 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -150,6 +150,7 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
#define PN533_CMD_TG_INIT_AS_TARGET 0x8c
#define PN533_CMD_TG_GET_DATA 0x86
#define PN533_CMD_TG_SET_DATA 0x8e
+#define PN533_CMD_TG_SET_META_DATA 0x94
#define PN533_CMD_UNDEF 0xff
#define PN533_CMD_RESPONSE(cmd) (cmd + 1)
@@ -373,6 +374,8 @@ struct pn533 {
struct delayed_work poll_work;
struct work_struct mi_rx_work;
struct work_struct mi_tx_work;
+ struct work_struct mi_tm_rx_work;
+ struct work_struct mi_tm_tx_work;
struct work_struct tg_work;
struct work_struct rf_work;
@@ -387,6 +390,7 @@ struct pn533 {
struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1];
u8 poll_mod_count;
u8 poll_mod_curr;
+ u8 poll_dep;
u32 poll_protocols;
u32 listen_protocols;
struct timer_list listen_timer;
@@ -722,32 +726,32 @@ static void pn533_recv_response(struct urb *urb)
break; /* success */
case -ECONNRESET:
case -ENOENT:
- nfc_dev_dbg(&dev->interface->dev,
- "The urb has been canceled (status %d)",
- urb->status);
+ dev_dbg(&dev->interface->dev,
+ "The urb has been canceled (status %d)\n",
+ urb->status);
goto sched_wq;
case -ESHUTDOWN:
default:
- nfc_dev_err(&dev->interface->dev,
- "Urb failure (status %d)", urb->status);
+ nfc_err(&dev->interface->dev,
+ "Urb failure (status %d)\n", urb->status);
goto sched_wq;
}
in_frame = dev->in_urb->transfer_buffer;
- nfc_dev_dbg(&dev->interface->dev, "Received a frame.");
+ dev_dbg(&dev->interface->dev, "Received a frame\n");
print_hex_dump_debug("PN533 RX: ", DUMP_PREFIX_NONE, 16, 1, in_frame,
dev->ops->rx_frame_size(in_frame), false);
if (!dev->ops->rx_is_frame_valid(in_frame, dev)) {
- nfc_dev_err(&dev->interface->dev, "Received an invalid frame");
+ nfc_err(&dev->interface->dev, "Received an invalid frame\n");
cmd->status = -EIO;
goto sched_wq;
}
if (!pn533_rx_frame_is_cmd_response(dev, in_frame)) {
- nfc_dev_err(&dev->interface->dev,
- "It it not the response to the last command");
+ nfc_err(&dev->interface->dev,
+ "It it not the response to the last command\n");
cmd->status = -EIO;
goto sched_wq;
}
@@ -777,29 +781,29 @@ static void pn533_recv_ack(struct urb *urb)
break; /* success */
case -ECONNRESET:
case -ENOENT:
- nfc_dev_dbg(&dev->interface->dev,
- "The urb has been stopped (status %d)",
- urb->status);
+ dev_dbg(&dev->interface->dev,
+ "The urb has been stopped (status %d)\n",
+ urb->status);
goto sched_wq;
case -ESHUTDOWN:
default:
- nfc_dev_err(&dev->interface->dev,
- "Urb failure (status %d)", urb->status);
+ nfc_err(&dev->interface->dev,
+ "Urb failure (status %d)\n", urb->status);
goto sched_wq;
}
in_frame = dev->in_urb->transfer_buffer;
if (!pn533_std_rx_frame_is_ack(in_frame)) {
- nfc_dev_err(&dev->interface->dev, "Received an invalid ack");
+ nfc_err(&dev->interface->dev, "Received an invalid ack\n");
cmd->status = -EIO;
goto sched_wq;
}
rc = pn533_submit_urb_for_response(dev, GFP_ATOMIC);
if (rc) {
- nfc_dev_err(&dev->interface->dev,
- "usb_submit_urb failed with result %d", rc);
+ nfc_err(&dev->interface->dev,
+ "usb_submit_urb failed with result %d\n", rc);
cmd->status = rc;
goto sched_wq;
}
@@ -823,8 +827,6 @@ static int pn533_send_ack(struct pn533 *dev, gfp_t flags)
/* spec 7.1.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
-
dev->out_urb->transfer_buffer = ack;
dev->out_urb->transfer_buffer_length = sizeof(ack);
rc = usb_submit_urb(dev->out_urb, flags);
@@ -927,7 +929,7 @@ static int __pn533_send_async(struct pn533 *dev, u8 cmd_code,
struct pn533_cmd *cmd;
int rc = 0;
- nfc_dev_dbg(&dev->interface->dev, "Sending command 0x%x", cmd_code);
+ dev_dbg(&dev->interface->dev, "Sending command 0x%x\n", cmd_code);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
@@ -954,8 +956,8 @@ static int __pn533_send_async(struct pn533 *dev, u8 cmd_code,
goto unlock;
}
- nfc_dev_dbg(&dev->interface->dev, "%s Queueing command 0x%x", __func__,
- cmd_code);
+ dev_dbg(&dev->interface->dev, "%s Queueing command 0x%x\n",
+ __func__, cmd_code);
INIT_LIST_HEAD(&cmd->queue);
list_add_tail(&cmd->queue, &dev->cmd_queue);
@@ -1168,14 +1170,14 @@ static void pn533_send_complete(struct urb *urb)
break; /* success */
case -ECONNRESET:
case -ENOENT:
- nfc_dev_dbg(&dev->interface->dev,
- "The urb has been stopped (status %d)",
- urb->status);
+ dev_dbg(&dev->interface->dev,
+ "The urb has been stopped (status %d)\n",
+ urb->status);
break;
case -ESHUTDOWN:
default:
- nfc_dev_err(&dev->interface->dev,
- "Urb failure (status %d)", urb->status);
+ nfc_err(&dev->interface->dev, "Urb failure (status %d)\n",
+ urb->status);
}
}
@@ -1452,8 +1454,8 @@ static int pn533_target_found(struct pn533 *dev, u8 tg, u8 *tgdata,
struct nfc_target nfc_tgt;
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s - modulation=%d", __func__,
- dev->poll_mod_curr);
+ dev_dbg(&dev->interface->dev, "%s: modulation=%d\n",
+ __func__, dev->poll_mod_curr);
if (tg != 1)
return -EPROTO;
@@ -1475,8 +1477,8 @@ static int pn533_target_found(struct pn533 *dev, u8 tg, u8 *tgdata,
rc = pn533_target_found_type_b(&nfc_tgt, tgdata, tgdata_len);
break;
default:
- nfc_dev_err(&dev->interface->dev,
- "Unknown current poll modulation");
+ nfc_err(&dev->interface->dev,
+ "Unknown current poll modulation\n");
return -EPROTO;
}
@@ -1484,14 +1486,14 @@ static int pn533_target_found(struct pn533 *dev, u8 tg, u8 *tgdata,
return rc;
if (!(nfc_tgt.supported_protocols & dev->poll_protocols)) {
- nfc_dev_dbg(&dev->interface->dev,
- "The Tg found doesn't have the desired protocol");
+ dev_dbg(&dev->interface->dev,
+ "The Tg found doesn't have the desired protocol\n");
return -EAGAIN;
}
- nfc_dev_dbg(&dev->interface->dev,
- "Target found - supported protocols: 0x%x",
- nfc_tgt.supported_protocols);
+ dev_dbg(&dev->interface->dev,
+ "Target found - supported protocols: 0x%x\n",
+ nfc_tgt.supported_protocols);
dev->tgt_available_prots = nfc_tgt.supported_protocols;
@@ -1548,7 +1550,8 @@ static int pn533_start_poll_complete(struct pn533 *dev, struct sk_buff *resp)
u8 nbtg, tg, *tgdata;
int rc, tgdata_len;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ /* Toggle the DEP polling */
+ dev->poll_dep = 1;
nbtg = resp->data[0];
tg = resp->data[1];
@@ -1624,37 +1627,130 @@ static struct sk_buff *pn533_alloc_poll_tg_frame(struct pn533 *dev)
#define PN533_CMD_DATAEXCH_HEAD_LEN 1
#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
+static void pn533_wq_tm_mi_recv(struct work_struct *work);
+static struct sk_buff *pn533_build_response(struct pn533 *dev);
+
static int pn533_tm_get_data_complete(struct pn533 *dev, void *arg,
struct sk_buff *resp)
{
- u8 status;
+ struct sk_buff *skb;
+ u8 status, ret, mi;
+ int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
- if (IS_ERR(resp))
+ if (IS_ERR(resp)) {
+ skb_queue_purge(&dev->resp_q);
return PTR_ERR(resp);
+ }
status = resp->data[0];
+
+ ret = status & PN533_CMD_RET_MASK;
+ mi = status & PN533_CMD_MI_MASK;
+
skb_pull(resp, sizeof(status));
- if (status != 0) {
- nfc_tm_deactivated(dev->nfc_dev);
- dev->tgt_mode = 0;
- dev_kfree_skb(resp);
- return 0;
+ if (ret != PN533_CMD_RET_SUCCESS) {
+ rc = -EIO;
+ goto error;
}
- return nfc_tm_data_received(dev->nfc_dev, resp);
+ skb_queue_tail(&dev->resp_q, resp);
+
+ if (mi) {
+ queue_work(dev->wq, &dev->mi_tm_rx_work);
+ return -EINPROGRESS;
+ }
+
+ skb = pn533_build_response(dev);
+ if (!skb) {
+ rc = -EIO;
+ goto error;
+ }
+
+ return nfc_tm_data_received(dev->nfc_dev, skb);
+
+error:
+ nfc_tm_deactivated(dev->nfc_dev);
+ dev->tgt_mode = 0;
+ skb_queue_purge(&dev->resp_q);
+ dev_kfree_skb(resp);
+
+ return rc;
+}
+
+static void pn533_wq_tm_mi_recv(struct work_struct *work)
+{
+ struct pn533 *dev = container_of(work, struct pn533, mi_tm_rx_work);
+ struct sk_buff *skb;
+ int rc;
+
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
+
+ skb = pn533_alloc_skb(dev, 0);
+ if (!skb)
+ return;
+
+ rc = pn533_send_cmd_direct_async(dev,
+ PN533_CMD_TG_GET_DATA,
+ skb,
+ pn533_tm_get_data_complete,
+ NULL);
+
+ if (rc < 0)
+ dev_kfree_skb(skb);
+
+ return;
+}
+
+static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
+ struct sk_buff *resp);
+static void pn533_wq_tm_mi_send(struct work_struct *work)
+{
+ struct pn533 *dev = container_of(work, struct pn533, mi_tm_tx_work);
+ struct sk_buff *skb;
+ int rc;
+
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
+
+ /* Grab the first skb in the queue */
+ skb = skb_dequeue(&dev->fragment_skb);
+ if (skb == NULL) { /* No more data */
+ /* Reset the queue for future use */
+ skb_queue_head_init(&dev->fragment_skb);
+ goto error;
+ }
+
+ /* last entry - remove MI bit */
+ if (skb_queue_len(&dev->fragment_skb) == 0) {
+ rc = pn533_send_cmd_direct_async(dev, PN533_CMD_TG_SET_DATA,
+ skb, pn533_tm_send_complete, NULL);
+ } else
+ rc = pn533_send_cmd_direct_async(dev,
+ PN533_CMD_TG_SET_META_DATA,
+ skb, pn533_tm_send_complete, NULL);
+
+ if (rc == 0) /* success */
+ return;
+
+ dev_err(&dev->interface->dev,
+ "Error %d when trying to perform set meta data_exchange", rc);
+
+ dev_kfree_skb(skb);
+
+error:
+ pn533_send_ack(dev, GFP_KERNEL);
+ queue_work(dev->wq, &dev->cmd_work);
}
static void pn533_wq_tg_get_data(struct work_struct *work)
{
struct pn533 *dev = container_of(work, struct pn533, tg_work);
-
struct sk_buff *skb;
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
skb = pn533_alloc_skb(dev, 0);
if (!skb)
@@ -1676,7 +1772,7 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
size_t gb_len;
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
if (resp->len < ATR_REQ_GB_OFFSET + 1)
return -EINVAL;
@@ -1684,8 +1780,8 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
mode = resp->data[0];
cmd = &resp->data[1];
- nfc_dev_dbg(&dev->interface->dev, "Target mode 0x%x len %d\n",
- mode, resp->len);
+ dev_dbg(&dev->interface->dev, "Target mode 0x%x len %d\n",
+ mode, resp->len);
if ((mode & PN533_INIT_TARGET_RESP_FRAME_MASK) ==
PN533_INIT_TARGET_RESP_ACTIVE)
@@ -1700,8 +1796,8 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
rc = nfc_tm_activated(dev->nfc_dev, NFC_PROTO_NFC_DEP_MASK,
comm_mode, gb, gb_len);
if (rc < 0) {
- nfc_dev_err(&dev->interface->dev,
- "Error when signaling target activation");
+ nfc_err(&dev->interface->dev,
+ "Error when signaling target activation\n");
return rc;
}
@@ -1715,7 +1811,7 @@ static void pn533_listen_mode_timer(unsigned long data)
{
struct pn533 *dev = (struct pn533 *)data;
- nfc_dev_dbg(&dev->interface->dev, "Listen mode timeout");
+ dev_dbg(&dev->interface->dev, "Listen mode timeout\n");
dev->cancel_listen = 1;
@@ -1730,13 +1826,12 @@ static int pn533_rf_complete(struct pn533 *dev, void *arg,
{
int rc = 0;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
- nfc_dev_err(&dev->interface->dev, "%s RF setting error %d",
- __func__, rc);
+ nfc_err(&dev->interface->dev, "RF setting error %d", rc);
return rc;
}
@@ -1754,7 +1849,7 @@ static void pn533_wq_rf(struct work_struct *work)
struct sk_buff *skb;
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
skb = pn533_alloc_skb(dev, 2);
if (!skb)
@@ -1767,25 +1862,136 @@ static void pn533_wq_rf(struct work_struct *work)
pn533_rf_complete, NULL);
if (rc < 0) {
dev_kfree_skb(skb);
- nfc_dev_err(&dev->interface->dev, "RF setting error %d", rc);
+ nfc_err(&dev->interface->dev, "RF setting error %d\n", rc);
}
return;
}
+static int pn533_poll_dep_complete(struct pn533 *dev, void *arg,
+ struct sk_buff *resp)
+{
+ struct pn533_cmd_jump_dep_response *rsp;
+ struct nfc_target nfc_target;
+ u8 target_gt_len;
+ int rc;
+
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
+ rsp = (struct pn533_cmd_jump_dep_response *)resp->data;
+
+ rc = rsp->status & PN533_CMD_RET_MASK;
+ if (rc != PN533_CMD_RET_SUCCESS) {
+ /* Not target found, turn radio off */
+ queue_work(dev->wq, &dev->rf_work);
+
+ dev_kfree_skb(resp);
+ return 0;
+ }
+
+ dev_dbg(&dev->interface->dev, "Creating new target");
+
+ nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+ nfc_target.nfcid1_len = 10;
+ memcpy(nfc_target.nfcid1, rsp->nfcid3t, nfc_target.nfcid1_len);
+ rc = nfc_targets_found(dev->nfc_dev, &nfc_target, 1);
+ if (rc)
+ goto error;
+
+ dev->tgt_available_prots = 0;
+ dev->tgt_active_prot = NFC_PROTO_NFC_DEP;
+
+ /* ATR_RES general bytes are located at offset 17 */
+ target_gt_len = resp->len - 17;
+ rc = nfc_set_remote_general_bytes(dev->nfc_dev,
+ rsp->gt, target_gt_len);
+ if (!rc) {
+ rc = nfc_dep_link_is_up(dev->nfc_dev,
+ dev->nfc_dev->targets[0].idx,
+ 0, NFC_RF_INITIATOR);
+
+ if (!rc)
+ pn533_poll_reset_mod_list(dev);
+ }
+error:
+ dev_kfree_skb(resp);
+ return rc;
+}
+
+#define PASSIVE_DATA_LEN 5
+static int pn533_poll_dep(struct nfc_dev *nfc_dev)
+{
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ struct sk_buff *skb;
+ int rc, skb_len;
+ u8 *next, nfcid3[NFC_NFCID3_MAXSIZE];
+ u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
+
+ dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ if (!dev->gb) {
+ dev->gb = nfc_get_local_general_bytes(nfc_dev, &dev->gb_len);
+
+ if (!dev->gb || !dev->gb_len) {
+ dev->poll_dep = 0;
+ queue_work(dev->wq, &dev->rf_work);
+ }
+ }
+
+ skb_len = 3 + dev->gb_len; /* ActPass + BR + Next */
+ skb_len += PASSIVE_DATA_LEN;
+
+ /* NFCID3 */
+ skb_len += NFC_NFCID3_MAXSIZE;
+ nfcid3[0] = 0x1;
+ nfcid3[1] = 0xfe;
+ get_random_bytes(nfcid3 + 2, 6);
+
+ skb = pn533_alloc_skb(dev, skb_len);
+ if (!skb)
+ return -ENOMEM;
+
+ *skb_put(skb, 1) = 0x01; /* Active */
+ *skb_put(skb, 1) = 0x02; /* 424 kbps */
+
+ next = skb_put(skb, 1); /* Next */
+ *next = 0;
+
+ /* Copy passive data */
+ memcpy(skb_put(skb, PASSIVE_DATA_LEN), passive_data, PASSIVE_DATA_LEN);
+ *next |= 1;
+
+ /* Copy NFCID3 (which is NFCID2 from SENSF_RES) */
+ memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), nfcid3,
+ NFC_NFCID3_MAXSIZE);
+ *next |= 2;
+
+ memcpy(skb_put(skb, dev->gb_len), dev->gb, dev->gb_len);
+ *next |= 4; /* We have some Gi */
+
+ rc = pn533_send_cmd_async(dev, PN533_CMD_IN_JUMP_FOR_DEP, skb,
+ pn533_poll_dep_complete, NULL);
+
+ if (rc < 0)
+ dev_kfree_skb(skb);
+
+ return rc;
+}
+
static int pn533_poll_complete(struct pn533 *dev, void *arg,
struct sk_buff *resp)
{
struct pn533_poll_modulations *cur_mod;
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
- nfc_dev_err(&dev->interface->dev, "%s Poll complete error %d",
- __func__, rc);
+ nfc_err(&dev->interface->dev, "%s Poll complete error %d\n",
+ __func__, rc);
if (rc == -ENOENT) {
if (dev->poll_mod_count != 0)
@@ -1793,8 +1999,8 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg,
else
goto stop_poll;
} else if (rc < 0) {
- nfc_dev_err(&dev->interface->dev,
- "Error %d when running poll", rc);
+ nfc_err(&dev->interface->dev,
+ "Error %d when running poll\n", rc);
goto stop_poll;
}
}
@@ -1813,7 +2019,7 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg,
goto done;
if (!dev->poll_mod_count) {
- nfc_dev_dbg(&dev->interface->dev, "Polling has been stopped.");
+ dev_dbg(&dev->interface->dev, "Polling has been stopped\n");
goto done;
}
@@ -1826,7 +2032,7 @@ done:
return rc;
stop_poll:
- nfc_dev_err(&dev->interface->dev, "Polling operation has been stopped");
+ nfc_err(&dev->interface->dev, "Polling operation has been stopped\n");
pn533_poll_reset_mod_list(dev);
dev->poll_protocols = 0;
@@ -1856,8 +2062,13 @@ static int pn533_send_poll_frame(struct pn533 *dev)
mod = dev->poll_mod_active[dev->poll_mod_curr];
- nfc_dev_dbg(&dev->interface->dev, "%s mod len %d\n",
- __func__, mod->len);
+ dev_dbg(&dev->interface->dev, "%s mod len %d\n",
+ __func__, mod->len);
+
+ if (dev->poll_dep) {
+ dev->poll_dep = 0;
+ return pn533_poll_dep(dev->nfc_dev);
+ }
if (mod->len == 0) { /* Listen mode */
cmd_code = PN533_CMD_TG_INIT_AS_TARGET;
@@ -1868,7 +2079,7 @@ static int pn533_send_poll_frame(struct pn533 *dev)
}
if (!skb) {
- nfc_dev_err(&dev->interface->dev, "Failed to allocate skb.");
+ nfc_err(&dev->interface->dev, "Failed to allocate skb\n");
return -ENOMEM;
}
@@ -1876,7 +2087,7 @@ static int pn533_send_poll_frame(struct pn533 *dev)
NULL);
if (rc < 0) {
dev_kfree_skb(skb);
- nfc_dev_err(&dev->interface->dev, "Polling loop error %d", rc);
+ nfc_err(&dev->interface->dev, "Polling loop error %d\n", rc);
}
return rc;
@@ -1890,9 +2101,9 @@ static void pn533_wq_poll(struct work_struct *work)
cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
- nfc_dev_dbg(&dev->interface->dev,
- "%s cancel_listen %d modulation len %d",
- __func__, dev->cancel_listen, cur_mod->len);
+ dev_dbg(&dev->interface->dev,
+ "%s cancel_listen %d modulation len %d\n",
+ __func__, dev->cancel_listen, cur_mod->len);
if (dev->cancel_listen == 1) {
dev->cancel_listen = 0;
@@ -1913,21 +2124,23 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
u32 im_protocols, u32 tm_protocols)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ struct pn533_poll_modulations *cur_mod;
u8 rand_mod;
+ int rc;
- nfc_dev_dbg(&dev->interface->dev,
- "%s: im protocols 0x%x tm protocols 0x%x",
- __func__, im_protocols, tm_protocols);
+ dev_dbg(&dev->interface->dev,
+ "%s: im protocols 0x%x tm protocols 0x%x\n",
+ __func__, im_protocols, tm_protocols);
if (dev->tgt_active_prot) {
- nfc_dev_err(&dev->interface->dev,
- "Cannot poll with a target already activated");
+ nfc_err(&dev->interface->dev,
+ "Cannot poll with a target already activated\n");
return -EBUSY;
}
if (dev->tgt_mode) {
- nfc_dev_err(&dev->interface->dev,
- "Cannot poll while already being activated");
+ nfc_err(&dev->interface->dev,
+ "Cannot poll while already being activated\n");
return -EBUSY;
}
@@ -1946,20 +2159,26 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
rand_mod %= dev->poll_mod_count;
dev->poll_mod_curr = rand_mod;
- return pn533_send_poll_frame(dev);
+ cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
+
+ rc = pn533_send_poll_frame(dev);
+
+ /* Start listen timer */
+ if (!rc && cur_mod->len == 0 && dev->poll_mod_count > 1)
+ mod_timer(&dev->listen_timer, jiffies + PN533_LISTEN_TIME * HZ);
+
+ return rc;
}
static void pn533_stop_poll(struct nfc_dev *nfc_dev)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
-
del_timer(&dev->listen_timer);
if (!dev->poll_mod_count) {
- nfc_dev_dbg(&dev->interface->dev,
- "Polling operation was not running");
+ dev_dbg(&dev->interface->dev,
+ "Polling operation was not running\n");
return;
}
@@ -1973,11 +2192,10 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
struct pn533_cmd_activate_response *rsp;
u16 gt_len;
int rc;
-
struct sk_buff *skb;
struct sk_buff *resp;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
skb = pn533_alloc_skb(dev, sizeof(u8) * 2); /*TG + Next*/
if (!skb)
@@ -1993,8 +2211,8 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
rsp = (struct pn533_cmd_activate_response *)resp->data;
rc = rsp->status & PN533_CMD_RET_MASK;
if (rc != PN533_CMD_RET_SUCCESS) {
- nfc_dev_err(&dev->interface->dev,
- "Target activation failed (error 0x%x)", rc);
+ nfc_err(&dev->interface->dev,
+ "Target activation failed (error 0x%x)\n", rc);
dev_kfree_skb(resp);
return -EIO;
}
@@ -2013,39 +2231,38 @@ static int pn533_activate_target(struct nfc_dev *nfc_dev,
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s - protocol=%u", __func__,
- protocol);
+ dev_dbg(&dev->interface->dev, "%s: protocol=%u\n", __func__, protocol);
if (dev->poll_mod_count) {
- nfc_dev_err(&dev->interface->dev,
- "Cannot activate while polling");
+ nfc_err(&dev->interface->dev,
+ "Cannot activate while polling\n");
return -EBUSY;
}
if (dev->tgt_active_prot) {
- nfc_dev_err(&dev->interface->dev,
- "There is already an active target");
+ nfc_err(&dev->interface->dev,
+ "There is already an active target\n");
return -EBUSY;
}
if (!dev->tgt_available_prots) {
- nfc_dev_err(&dev->interface->dev,
- "There is no available target to activate");
+ nfc_err(&dev->interface->dev,
+ "There is no available target to activate\n");
return -EINVAL;
}
if (!(dev->tgt_available_prots & (1 << protocol))) {
- nfc_dev_err(&dev->interface->dev,
- "Target doesn't support requested proto %u",
- protocol);
+ nfc_err(&dev->interface->dev,
+ "Target doesn't support requested proto %u\n",
+ protocol);
return -EINVAL;
}
if (protocol == NFC_PROTO_NFC_DEP) {
rc = pn533_activate_target_nfcdep(dev);
if (rc) {
- nfc_dev_err(&dev->interface->dev,
- "Activating target with DEP failed %d", rc);
+ nfc_err(&dev->interface->dev,
+ "Activating target with DEP failed %d\n", rc);
return rc;
}
}
@@ -2060,16 +2277,14 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev,
struct nfc_target *target)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
-
struct sk_buff *skb;
struct sk_buff *resp;
-
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
if (!dev->tgt_active_prot) {
- nfc_dev_err(&dev->interface->dev, "There is no active target");
+ nfc_err(&dev->interface->dev, "There is no active target\n");
return;
}
@@ -2088,8 +2303,8 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev,
rc = resp->data[0] & PN533_CMD_RET_MASK;
if (rc != PN533_CMD_RET_SUCCESS)
- nfc_dev_err(&dev->interface->dev,
- "Error 0x%x when releasing the target", rc);
+ nfc_err(&dev->interface->dev,
+ "Error 0x%x when releasing the target\n", rc);
dev_kfree_skb(resp);
return;
@@ -2111,8 +2326,8 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
if (dev->tgt_available_prots &&
!(dev->tgt_available_prots & (1 << NFC_PROTO_NFC_DEP))) {
- nfc_dev_err(&dev->interface->dev,
- "The target does not support DEP");
+ nfc_err(&dev->interface->dev,
+ "The target does not support DEP\n");
rc = -EINVAL;
goto error;
}
@@ -2121,15 +2336,15 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
rc = rsp->status & PN533_CMD_RET_MASK;
if (rc != PN533_CMD_RET_SUCCESS) {
- nfc_dev_err(&dev->interface->dev,
- "Bringing DEP link up failed (error 0x%x)", rc);
+ nfc_err(&dev->interface->dev,
+ "Bringing DEP link up failed (error 0x%x)\n", rc);
goto error;
}
if (!dev->tgt_available_prots) {
struct nfc_target nfc_target;
- nfc_dev_dbg(&dev->interface->dev, "Creating new target");
+ dev_dbg(&dev->interface->dev, "Creating new target\n");
nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
nfc_target.nfcid1_len = 10;
@@ -2158,7 +2373,6 @@ error:
}
static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf);
-#define PASSIVE_DATA_LEN 5
static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
u8 comm_mode, u8 *gb, size_t gb_len)
{
@@ -2166,20 +2380,19 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
struct sk_buff *skb;
int rc, skb_len;
u8 *next, *arg, nfcid3[NFC_NFCID3_MAXSIZE];
-
u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
if (dev->poll_mod_count) {
- nfc_dev_err(&dev->interface->dev,
- "Cannot bring the DEP link up while polling");
+ nfc_err(&dev->interface->dev,
+ "Cannot bring the DEP link up while polling\n");
return -EBUSY;
}
if (dev->tgt_active_prot) {
- nfc_dev_err(&dev->interface->dev,
- "There is already an active target");
+ nfc_err(&dev->interface->dev,
+ "There is already an active target\n");
return -EBUSY;
}
@@ -2249,7 +2462,7 @@ static int pn533_dep_link_down(struct nfc_dev *nfc_dev)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
pn533_poll_reset_mod_list(dev);
@@ -2274,7 +2487,7 @@ static struct sk_buff *pn533_build_response(struct pn533 *dev)
struct sk_buff *skb, *tmp, *t;
unsigned int skb_len = 0, tmp_len = 0;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
if (skb_queue_empty(&dev->resp_q))
return NULL;
@@ -2287,8 +2500,8 @@ static struct sk_buff *pn533_build_response(struct pn533 *dev)
skb_queue_walk_safe(&dev->resp_q, tmp, t)
skb_len += tmp->len;
- nfc_dev_dbg(&dev->interface->dev, "%s total length %d\n",
- __func__, skb_len);
+ dev_dbg(&dev->interface->dev, "%s total length %d\n",
+ __func__, skb_len);
skb = alloc_skb(skb_len, GFP_KERNEL);
if (skb == NULL)
@@ -2315,7 +2528,7 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
int rc = 0;
u8 status, ret, mi;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
@@ -2329,8 +2542,8 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
skb_pull(resp, sizeof(status));
if (ret != PN533_CMD_RET_SUCCESS) {
- nfc_dev_err(&dev->interface->dev,
- "Exchanging data failed (error 0x%x)", ret);
+ nfc_err(&dev->interface->dev,
+ "Exchanging data failed (error 0x%x)\n", ret);
rc = -EIO;
goto error;
}
@@ -2388,14 +2601,17 @@ static int pn533_fill_fragment_skbs(struct pn533 *dev, struct sk_buff *skb)
break;
}
- /* Reserve the TG/MI byte */
- skb_reserve(frag, 1);
+ if (!dev->tgt_mode) {
+ /* Reserve the TG/MI byte */
+ skb_reserve(frag, 1);
- /* MI + TG */
- if (frag_size == PN533_CMD_DATAFRAME_MAXLEN)
- *skb_push(frag, sizeof(u8)) = (PN533_CMD_MI_MASK | 1);
- else
- *skb_push(frag, sizeof(u8)) = 1; /* TG */
+ /* MI + TG */
+ if (frag_size == PN533_CMD_DATAFRAME_MAXLEN)
+ *skb_push(frag, sizeof(u8)) =
+ (PN533_CMD_MI_MASK | 1);
+ else
+ *skb_push(frag, sizeof(u8)) = 1; /* TG */
+ }
memcpy(skb_put(frag, frag_size), skb->data, frag_size);
@@ -2420,11 +2636,11 @@ static int pn533_transceive(struct nfc_dev *nfc_dev,
struct pn533_data_exchange_arg *arg = NULL;
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
if (!dev->tgt_active_prot) {
- nfc_dev_err(&dev->interface->dev,
- "Can't exchange data if there is no active target");
+ nfc_err(&dev->interface->dev,
+ "Can't exchange data if there is no active target\n");
rc = -EINVAL;
goto error;
}
@@ -2487,13 +2703,18 @@ static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
{
u8 status;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
if (IS_ERR(resp))
return PTR_ERR(resp);
status = resp->data[0];
+ /* Prepare for the next round */
+ if (skb_queue_len(&dev->fragment_skb) > 0) {
+ queue_work(dev->wq, &dev->mi_tm_tx_work);
+ return -EINPROGRESS;
+ }
dev_kfree_skb(resp);
if (status != 0) {
@@ -2514,19 +2735,34 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ /* let's split in multiple chunks if size's too big */
if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
- nfc_dev_err(&dev->interface->dev,
- "Data length greater than the max allowed: %d",
- PN533_CMD_DATAEXCH_DATA_MAXLEN);
- return -ENOSYS;
+ rc = pn533_fill_fragment_skbs(dev, skb);
+ if (rc <= 0)
+ goto error;
+
+ /* get the first skb */
+ skb = skb_dequeue(&dev->fragment_skb);
+ if (!skb) {
+ rc = -EIO;
+ goto error;
+ }
+
+ rc = pn533_send_data_async(dev, PN533_CMD_TG_SET_META_DATA, skb,
+ pn533_tm_send_complete, NULL);
+ } else {
+ /* Send th skb */
+ rc = pn533_send_data_async(dev, PN533_CMD_TG_SET_DATA, skb,
+ pn533_tm_send_complete, NULL);
}
- rc = pn533_send_data_async(dev, PN533_CMD_TG_SET_DATA, skb,
- pn533_tm_send_complete, NULL);
- if (rc < 0)
+error:
+ if (rc < 0) {
dev_kfree_skb(skb);
+ skb_queue_purge(&dev->fragment_skb);
+ }
return rc;
}
@@ -2534,11 +2770,10 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
static void pn533_wq_mi_recv(struct work_struct *work)
{
struct pn533 *dev = container_of(work, struct pn533, mi_rx_work);
-
struct sk_buff *skb;
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
skb = pn533_alloc_skb(dev, PN533_CMD_DATAEXCH_HEAD_LEN);
if (!skb)
@@ -2570,8 +2805,8 @@ static void pn533_wq_mi_recv(struct work_struct *work)
if (rc == 0) /* success */
return;
- nfc_dev_err(&dev->interface->dev,
- "Error %d when trying to perform data_exchange", rc);
+ nfc_err(&dev->interface->dev,
+ "Error %d when trying to perform data_exchange\n", rc);
dev_kfree_skb(skb);
kfree(dev->cmd_complete_mi_arg);
@@ -2587,7 +2822,7 @@ static void pn533_wq_mi_send(struct work_struct *work)
struct sk_buff *skb;
int rc;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
/* Grab the first skb in the queue */
skb = skb_dequeue(&dev->fragment_skb);
@@ -2625,8 +2860,8 @@ static void pn533_wq_mi_send(struct work_struct *work)
if (rc == 0) /* success */
return;
- nfc_dev_err(&dev->interface->dev,
- "Error %d when trying to perform data_exchange", rc);
+ nfc_err(&dev->interface->dev,
+ "Error %d when trying to perform data_exchange\n", rc);
dev_kfree_skb(skb);
kfree(dev->cmd_complete_dep_arg);
@@ -2641,10 +2876,9 @@ static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
{
struct sk_buff *skb;
struct sk_buff *resp;
-
int skb_len;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
skb_len = sizeof(cfgitem) + cfgdata_len; /* cfgitem + cfgdata */
@@ -2691,7 +2925,7 @@ static int pn533_pasori_fw_reset(struct pn533 *dev)
struct sk_buff *skb;
struct sk_buff *resp;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
skb = pn533_alloc_skb(dev, sizeof(u8));
if (!skb)
@@ -2717,7 +2951,7 @@ static void pn533_acr122_poweron_rdr_resp(struct urb *urb)
{
struct pn533_acr122_poweron_rdr_arg *arg = urb->context;
- nfc_dev_dbg(&urb->dev->dev, "%s", __func__);
+ dev_dbg(&urb->dev->dev, "%s\n", __func__);
print_hex_dump_debug("ACR122 RX: ", DUMP_PREFIX_NONE, 16, 1,
urb->transfer_buffer, urb->transfer_buffer_length,
@@ -2737,7 +2971,7 @@ static int pn533_acr122_poweron_rdr(struct pn533 *dev)
void *cntx;
struct pn533_acr122_poweron_rdr_arg arg;
- nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
init_completion(&arg.done);
cntx = dev->in_urb->context; /* backup context */
@@ -2755,16 +2989,15 @@ static int pn533_acr122_poweron_rdr(struct pn533 *dev)
rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
if (rc) {
- nfc_dev_err(&dev->interface->dev,
- "Reader power on cmd error %d", rc);
+ nfc_err(&dev->interface->dev,
+ "Reader power on cmd error %d\n", rc);
return rc;
}
rc = usb_submit_urb(dev->in_urb, GFP_KERNEL);
if (rc) {
- nfc_dev_err(&dev->interface->dev,
- "Can't submit for reader power on cmd response %d",
- rc);
+ nfc_err(&dev->interface->dev,
+ "Can't submit reader poweron cmd response %d\n", rc);
return rc;
}
@@ -2785,20 +3018,19 @@ static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf)
rc = pn533_set_configuration(dev, PN533_CFGITEM_RF_FIELD,
(u8 *)&rf_field, 1);
if (rc) {
- nfc_dev_err(&dev->interface->dev,
- "Error on setting RF field");
+ nfc_err(&dev->interface->dev, "Error on setting RF field\n");
return rc;
}
return rc;
}
-int pn533_dev_up(struct nfc_dev *nfc_dev)
+static int pn533_dev_up(struct nfc_dev *nfc_dev)
{
return pn533_rf_field(nfc_dev, 1);
}
-int pn533_dev_down(struct nfc_dev *nfc_dev)
+static int pn533_dev_down(struct nfc_dev *nfc_dev)
{
return pn533_rf_field(nfc_dev, 0);
}
@@ -2839,16 +3071,16 @@ static int pn533_setup(struct pn533 *dev)
break;
default:
- nfc_dev_err(&dev->interface->dev, "Unknown device type %d\n",
- dev->device_type);
+ nfc_err(&dev->interface->dev, "Unknown device type %d\n",
+ dev->device_type);
return -EINVAL;
}
rc = pn533_set_configuration(dev, PN533_CFGITEM_MAX_RETRIES,
(u8 *)&max_retries, sizeof(max_retries));
if (rc) {
- nfc_dev_err(&dev->interface->dev,
- "Error on setting MAX_RETRIES config");
+ nfc_err(&dev->interface->dev,
+ "Error on setting MAX_RETRIES config\n");
return rc;
}
@@ -2856,8 +3088,7 @@ static int pn533_setup(struct pn533 *dev)
rc = pn533_set_configuration(dev, PN533_CFGITEM_TIMING,
(u8 *)&timing, sizeof(timing));
if (rc) {
- nfc_dev_err(&dev->interface->dev,
- "Error on setting RF timings");
+ nfc_err(&dev->interface->dev, "Error on setting RF timings\n");
return rc;
}
@@ -2871,8 +3102,8 @@ static int pn533_setup(struct pn533 *dev)
rc = pn533_set_configuration(dev, PN533_CFGITEM_PASORI,
pasori_cfg, 3);
if (rc) {
- nfc_dev_err(&dev->interface->dev,
- "Error while settings PASORI config");
+ nfc_err(&dev->interface->dev,
+ "Error while settings PASORI config\n");
return rc;
}
@@ -2917,8 +3148,8 @@ static int pn533_probe(struct usb_interface *interface,
}
if (!in_endpoint || !out_endpoint) {
- nfc_dev_err(&interface->dev,
- "Could not find bulk-in or bulk-out endpoint");
+ nfc_err(&interface->dev,
+ "Could not find bulk-in or bulk-out endpoint\n");
rc = -ENODEV;
goto error;
}
@@ -2941,6 +3172,8 @@ static int pn533_probe(struct usb_interface *interface,
INIT_WORK(&dev->mi_rx_work, pn533_wq_mi_recv);
INIT_WORK(&dev->mi_tx_work, pn533_wq_mi_send);
INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data);
+ INIT_WORK(&dev->mi_tm_rx_work, pn533_wq_tm_mi_recv);
+ INIT_WORK(&dev->mi_tm_tx_work, pn533_wq_tm_mi_send);
INIT_DELAYED_WORK(&dev->poll_work, pn533_wq_poll);
INIT_WORK(&dev->rf_work, pn533_wq_rf);
dev->wq = alloc_ordered_workqueue("pn533", 0);
@@ -2978,16 +3211,15 @@ static int pn533_probe(struct usb_interface *interface,
rc = pn533_acr122_poweron_rdr(dev);
if (rc < 0) {
- nfc_dev_err(&dev->interface->dev,
- "Couldn't poweron the reader (error %d)",
- rc);
+ nfc_err(&dev->interface->dev,
+ "Couldn't poweron the reader (error %d)\n", rc);
goto destroy_wq;
}
break;
default:
- nfc_dev_err(&dev->interface->dev, "Unknown device type %d\n",
- dev->device_type);
+ nfc_err(&dev->interface->dev, "Unknown device type %d\n",
+ dev->device_type);
rc = -EINVAL;
goto destroy_wq;
}
@@ -2997,9 +3229,9 @@ static int pn533_probe(struct usb_interface *interface,
if (rc < 0)
goto destroy_wq;
- nfc_dev_info(&dev->interface->dev,
- "NXP PN5%02X firmware ver %d.%d now attached",
- fw_ver.ic, fw_ver.ver, fw_ver.rev);
+ nfc_info(&dev->interface->dev,
+ "NXP PN5%02X firmware ver %d.%d now attached\n",
+ fw_ver.ic, fw_ver.ver, fw_ver.rev);
dev->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
@@ -3070,7 +3302,7 @@ static void pn533_disconnect(struct usb_interface *interface)
usb_free_urb(dev->out_urb);
kfree(dev);
- nfc_dev_info(&interface->dev, "NXP PN533 NFC device disconnected");
+ nfc_info(&interface->dev, "NXP PN533 NFC device disconnected\n");
}
static struct usb_driver pn533_driver = {
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 01e27d4bdd0d..b158ee1c2ac6 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -18,6 +18,8 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/crc-ccitt.h>
#include <linux/module.h>
#include <linux/i2c.h>
@@ -151,8 +153,7 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
char rset_cmd[] = { 0x05, 0xF9, 0x04, 0x00, 0xC3, 0xE5 };
int count = sizeof(rset_cmd);
- pr_info(DRIVER_DESC ": %s\n", __func__);
- dev_info(&phy->i2c_dev->dev, "Detecting nfc_en polarity\n");
+ nfc_info(&phy->i2c_dev->dev, "Detecting nfc_en polarity\n");
/* Disable fw download */
gpio_set_value(phy->gpio_fw, 0);
@@ -173,7 +174,7 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
dev_dbg(&phy->i2c_dev->dev, "Sending reset cmd\n");
ret = i2c_master_send(phy->i2c_dev, rset_cmd, count);
if (ret == count) {
- dev_info(&phy->i2c_dev->dev,
+ nfc_info(&phy->i2c_dev->dev,
"nfc_en polarity : active %s\n",
(polarity == 0 ? "low" : "high"));
goto out;
@@ -181,7 +182,7 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
}
}
- dev_err(&phy->i2c_dev->dev,
+ nfc_err(&phy->i2c_dev->dev,
"Could not detect nfc_en polarity, fallback to active high\n");
out:
@@ -201,7 +202,7 @@ static int pn544_hci_i2c_enable(void *phy_id)
{
struct pn544_i2c_phy *phy = phy_id;
- pr_info(DRIVER_DESC ": %s\n", __func__);
+ pr_info("%s\n", __func__);
pn544_hci_i2c_enable_mode(phy, PN544_HCI_MODE);
@@ -214,8 +215,6 @@ static void pn544_hci_i2c_disable(void *phy_id)
{
struct pn544_i2c_phy *phy = phy_id;
- pr_info(DRIVER_DESC ": %s\n", __func__);
-
gpio_set_value(phy->gpio_fw, 0);
gpio_set_value(phy->gpio_en, !phy->en_polarity);
usleep_range(10000, 15000);
@@ -298,11 +297,9 @@ static int check_crc(u8 *buf, int buflen)
crc = ~crc;
if (buf[len - 2] != (crc & 0xff) || buf[len - 1] != (crc >> 8)) {
- pr_err(PN544_HCI_I2C_DRIVER_NAME
- ": CRC error 0x%x != 0x%x 0x%x\n",
+ pr_err("CRC error 0x%x != 0x%x 0x%x\n",
crc, buf[len - 1], buf[len - 2]);
-
- pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__);
+ pr_info("%s: BAD CRC\n", __func__);
print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
16, 2, buf, buflen, false);
return -EPERM;
@@ -328,13 +325,13 @@ static int pn544_hci_i2c_read(struct pn544_i2c_phy *phy, struct sk_buff **skb)
r = i2c_master_recv(client, &len, 1);
if (r != 1) {
- dev_err(&client->dev, "cannot read len byte\n");
+ nfc_err(&client->dev, "cannot read len byte\n");
return -EREMOTEIO;
}
if ((len < (PN544_HCI_I2C_LLC_MIN_SIZE - 1)) ||
(len > (PN544_HCI_I2C_LLC_MAX_SIZE - 1))) {
- dev_err(&client->dev, "invalid len byte\n");
+ nfc_err(&client->dev, "invalid len byte\n");
r = -EBADMSG;
goto flush;
}
@@ -386,7 +383,7 @@ static int pn544_hci_i2c_fw_read_status(struct pn544_i2c_phy *phy)
r = i2c_master_recv(client, (char *) &response, sizeof(response));
if (r != sizeof(response)) {
- dev_err(&client->dev, "cannot read fw status\n");
+ nfc_err(&client->dev, "cannot read fw status\n");
return -EIO;
}
@@ -478,8 +475,7 @@ static int pn544_hci_i2c_fw_download(void *phy_id, const char *firmware_name)
{
struct pn544_i2c_phy *phy = phy_id;
- pr_info(DRIVER_DESC ": Starting Firmware Download (%s)\n",
- firmware_name);
+ pr_info("Starting Firmware Download (%s)\n", firmware_name);
strcpy(phy->firmware_name, firmware_name);
@@ -493,7 +489,7 @@ static int pn544_hci_i2c_fw_download(void *phy_id, const char *firmware_name)
static void pn544_hci_i2c_fw_work_complete(struct pn544_i2c_phy *phy,
int result)
{
- pr_info(DRIVER_DESC ": Firmware Download Complete, result=%d\n", result);
+ pr_info("Firmware Download Complete, result=%d\n", result);
pn544_hci_i2c_disable(phy);
@@ -694,14 +690,14 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- dev_err(&client->dev, "Need I2C_FUNC_I2C\n");
+ nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
return -ENODEV;
}
phy = devm_kzalloc(&client->dev, sizeof(struct pn544_i2c_phy),
GFP_KERNEL);
if (!phy) {
- dev_err(&client->dev,
+ nfc_err(&client->dev,
"Cannot allocate memory for pn544 i2c phy.\n");
return -ENOMEM;
}
@@ -714,18 +710,18 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
pdata = client->dev.platform_data;
if (pdata == NULL) {
- dev_err(&client->dev, "No platform data\n");
+ nfc_err(&client->dev, "No platform data\n");
return -EINVAL;
}
if (pdata->request_resources == NULL) {
- dev_err(&client->dev, "request_resources() missing\n");
+ nfc_err(&client->dev, "request_resources() missing\n");
return -EINVAL;
}
r = pdata->request_resources(client);
if (r) {
- dev_err(&client->dev, "Cannot get platform resources\n");
+ nfc_err(&client->dev, "Cannot get platform resources\n");
return r;
}
@@ -739,7 +735,7 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
PN544_HCI_I2C_DRIVER_NAME, phy);
if (r < 0) {
- dev_err(&client->dev, "Unable to register IRQ handler\n");
+ nfc_err(&client->dev, "Unable to register IRQ handler\n");
goto err_rti;
}
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c
index 078e62feba17..74cfa0a88b9e 100644
--- a/drivers/nfc/pn544/pn544.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -18,6 +18,8 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -41,6 +43,7 @@ enum pn544_state {
/* Proprietary commands */
#define PN544_WRITE 0x3f
+#define PN544_TEST_SWP 0x21
/* Proprietary gates, events, commands and registers */
@@ -81,14 +84,17 @@ enum pn544_state {
#define PN544_PL_NFCT_DEACTIVATED 0x09
#define PN544_SWP_MGMT_GATE 0xA0
+#define PN544_SWP_DEFAULT_MODE 0x01
#define PN544_NFC_WI_MGMT_GATE 0xA1
+#define PN544_NFC_ESE_DEFAULT_MODE 0x01
#define PN544_HCI_EVT_SND_DATA 0x01
#define PN544_HCI_EVT_ACTIVATED 0x02
#define PN544_HCI_EVT_DEACTIVATED 0x03
#define PN544_HCI_EVT_RCV_DATA 0x04
#define PN544_HCI_EVT_CONTINUE_MI 0x05
+#define PN544_HCI_EVT_SWITCH_MODE 0x03
#define PN544_HCI_CMD_ATTREQUEST 0x12
#define PN544_HCI_CMD_CONTINUE_ACTIVATION 0x13
@@ -187,13 +193,6 @@ static int pn544_hci_ready(struct nfc_hci_dev *hdev)
{{0x9e, 0xb4}, 0x00},
- {{0x9e, 0xd9}, 0xff},
- {{0x9e, 0xda}, 0xff},
- {{0x9e, 0xdb}, 0x23},
- {{0x9e, 0xdc}, 0x21},
- {{0x9e, 0xdd}, 0x22},
- {{0x9e, 0xde}, 0x24},
-
{{0x9c, 0x01}, 0x08},
{{0x9e, 0xaa}, 0x01},
@@ -394,7 +393,7 @@ static int pn544_hci_start_poll(struct nfc_hci_dev *hdev,
if ((im_protocols | tm_protocols) & NFC_PROTO_NFC_DEP_MASK) {
hdev->gb = nfc_get_local_general_bytes(hdev->ndev,
&hdev->gb_len);
- pr_debug("generate local bytes %p", hdev->gb);
+ pr_debug("generate local bytes %p\n", hdev->gb);
if (hdev->gb == NULL || hdev->gb_len == 0) {
im_protocols &= ~NFC_PROTO_NFC_DEP_MASK;
tm_protocols &= ~NFC_PROTO_NFC_DEP_MASK;
@@ -696,7 +695,7 @@ static int pn544_hci_tm_send(struct nfc_hci_dev *hdev, struct sk_buff *skb)
static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
struct nfc_target *target)
{
- pr_debug("supported protocol %d", target->supported_protocols);
+ pr_debug("supported protocol %d\b", target->supported_protocols);
if (target->supported_protocols & (NFC_PROTO_ISO14443_MASK |
NFC_PROTO_ISO14443_B_MASK)) {
return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
@@ -733,7 +732,7 @@ static int pn544_hci_event_received(struct nfc_hci_dev *hdev, u8 gate, u8 event,
struct sk_buff *rgb_skb = NULL;
int r;
- pr_debug("hci event %d", event);
+ pr_debug("hci event %d\n", event);
switch (event) {
case PN544_HCI_EVT_ACTIVATED:
if (gate == PN544_RF_READER_NFCIP1_INITIATOR_GATE) {
@@ -764,7 +763,7 @@ static int pn544_hci_event_received(struct nfc_hci_dev *hdev, u8 gate, u8 event,
}
if (skb->data[0] != 0) {
- pr_debug("data0 %d", skb->data[0]);
+ pr_debug("data0 %d\n", skb->data[0]);
r = -EPROTO;
goto exit;
}
@@ -792,6 +791,108 @@ static int pn544_hci_fw_download(struct nfc_hci_dev *hdev,
return info->fw_download(info->phy_id, firmware_name);
}
+static int pn544_hci_discover_se(struct nfc_hci_dev *hdev)
+{
+ u32 se_idx = 0;
+ u8 ese_mode = 0x01; /* Default mode */
+ struct sk_buff *res_skb;
+ int r;
+
+ r = nfc_hci_send_cmd(hdev, PN544_SYS_MGMT_GATE, PN544_TEST_SWP,
+ NULL, 0, &res_skb);
+
+ if (r == 0) {
+ if (res_skb->len == 2 && res_skb->data[0] == 0x00)
+ nfc_add_se(hdev->ndev, se_idx++, NFC_SE_UICC);
+
+ kfree_skb(res_skb);
+ }
+
+ r = nfc_hci_send_event(hdev, PN544_NFC_WI_MGMT_GATE,
+ PN544_HCI_EVT_SWITCH_MODE,
+ &ese_mode, 1);
+ if (r == 0)
+ nfc_add_se(hdev->ndev, se_idx++, NFC_SE_EMBEDDED);
+
+ return !se_idx;
+}
+
+#define PN544_SE_MODE_OFF 0x00
+#define PN544_SE_MODE_ON 0x01
+static int pn544_hci_enable_se(struct nfc_hci_dev *hdev, u32 se_idx)
+{
+ struct nfc_se *se;
+ u8 enable = PN544_SE_MODE_ON;
+ static struct uicc_gatelist {
+ u8 head;
+ u8 adr[2];
+ u8 value;
+ } uicc_gatelist[] = {
+ {0x00, {0x9e, 0xd9}, 0x23},
+ {0x00, {0x9e, 0xda}, 0x21},
+ {0x00, {0x9e, 0xdb}, 0x22},
+ {0x00, {0x9e, 0xdc}, 0x24},
+ };
+ struct uicc_gatelist *p = uicc_gatelist;
+ int count = ARRAY_SIZE(uicc_gatelist);
+ struct sk_buff *res_skb;
+ int r;
+
+ se = nfc_find_se(hdev->ndev, se_idx);
+
+ switch (se->type) {
+ case NFC_SE_UICC:
+ while (count--) {
+ r = nfc_hci_send_cmd(hdev, PN544_SYS_MGMT_GATE,
+ PN544_WRITE, (u8 *)p, 4, &res_skb);
+ if (r < 0)
+ return r;
+
+ if (res_skb->len != 1) {
+ kfree_skb(res_skb);
+ return -EPROTO;
+ }
+
+ if (res_skb->data[0] != p->value) {
+ kfree_skb(res_skb);
+ return -EIO;
+ }
+
+ kfree_skb(res_skb);
+
+ p++;
+ }
+
+ return nfc_hci_set_param(hdev, PN544_SWP_MGMT_GATE,
+ PN544_SWP_DEFAULT_MODE, &enable, 1);
+ case NFC_SE_EMBEDDED:
+ return nfc_hci_set_param(hdev, PN544_NFC_WI_MGMT_GATE,
+ PN544_NFC_ESE_DEFAULT_MODE, &enable, 1);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int pn544_hci_disable_se(struct nfc_hci_dev *hdev, u32 se_idx)
+{
+ struct nfc_se *se;
+ u8 disable = PN544_SE_MODE_OFF;
+
+ se = nfc_find_se(hdev->ndev, se_idx);
+
+ switch (se->type) {
+ case NFC_SE_UICC:
+ return nfc_hci_set_param(hdev, PN544_SWP_MGMT_GATE,
+ PN544_SWP_DEFAULT_MODE, &disable, 1);
+ case NFC_SE_EMBEDDED:
+ return nfc_hci_set_param(hdev, PN544_NFC_WI_MGMT_GATE,
+ PN544_NFC_ESE_DEFAULT_MODE, &disable, 1);
+ default:
+ return -EINVAL;
+ }
+}
+
static struct nfc_hci_ops pn544_hci_ops = {
.open = pn544_hci_open,
.close = pn544_hci_close,
@@ -807,6 +908,9 @@ static struct nfc_hci_ops pn544_hci_ops = {
.check_presence = pn544_hci_check_presence,
.event_received = pn544_hci_event_received,
.fw_download = pn544_hci_fw_download,
+ .discover_se = pn544_hci_discover_se,
+ .enable_se = pn544_hci_enable_se,
+ .disable_se = pn544_hci_disable_se,
};
int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
@@ -820,7 +924,6 @@ int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
info = kzalloc(sizeof(struct pn544_hci_info), GFP_KERNEL);
if (!info) {
- pr_err("Cannot allocate memory for pn544_hci_info.\n");
r = -ENOMEM;
goto err_info_alloc;
}
@@ -853,7 +956,7 @@ int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
phy_headroom + PN544_CMDS_HEADROOM,
phy_tailroom, phy_payload);
if (!info->hdev) {
- pr_err("Cannot allocate nfc hdev.\n");
+ pr_err("Cannot allocate nfc hdev\n");
r = -ENOMEM;
goto err_alloc_hdev;
}
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
new file mode 100644
index 000000000000..8a0571eb2627
--- /dev/null
+++ b/drivers/nfc/port100.c
@@ -0,0 +1,1529 @@
+/*
+ * Sony NFC Port-100 Series driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * Partly based/Inspired by Stephen Tiedemann's nfcpy
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <net/nfc/digital.h>
+
+#define VERSION "0.1"
+
+#define SONY_VENDOR_ID 0x054c
+#define RCS380_PRODUCT_ID 0x06c1
+
+#define PORT100_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \
+ NFC_PROTO_MIFARE_MASK | \
+ NFC_PROTO_FELICA_MASK | \
+ NFC_PROTO_NFC_DEP_MASK)
+
+#define PORT100_CAPABILITIES (NFC_DIGITAL_DRV_CAPS_IN_CRC | \
+ NFC_DIGITAL_DRV_CAPS_TG_CRC)
+
+/* Standard port100 frame definitions */
+#define PORT100_FRAME_HEADER_LEN (sizeof(struct port100_frame) \
+ + 2) /* data[0] CC, data[1] SCC */
+#define PORT100_FRAME_TAIL_LEN 2 /* data[len] DCS, data[len + 1] postamble*/
+
+#define PORT100_COMM_RF_HEAD_MAX_LEN (sizeof(struct port100_tg_comm_rf_cmd))
+
+/*
+ * Max extended frame payload len, excluding CC and SCC
+ * which are already in PORT100_FRAME_HEADER_LEN.
+ */
+#define PORT100_FRAME_MAX_PAYLOAD_LEN 1001
+
+#define PORT100_FRAME_ACK_SIZE 6 /* Preamble (1), SoPC (2), ACK Code (2),
+ Postamble (1) */
+static u8 ack_frame[PORT100_FRAME_ACK_SIZE] = {
+ 0x00, 0x00, 0xff, 0x00, 0xff, 0x00
+};
+
+#define PORT100_FRAME_CHECKSUM(f) (f->data[le16_to_cpu(f->datalen)])
+#define PORT100_FRAME_POSTAMBLE(f) (f->data[le16_to_cpu(f->datalen) + 1])
+
+/* start of frame */
+#define PORT100_FRAME_SOF 0x00FF
+#define PORT100_FRAME_EXT 0xFFFF
+#define PORT100_FRAME_ACK 0x00FF
+
+/* Port-100 command: in or out */
+#define PORT100_FRAME_DIRECTION(f) (f->data[0]) /* CC */
+#define PORT100_FRAME_DIR_OUT 0xD6
+#define PORT100_FRAME_DIR_IN 0xD7
+
+/* Port-100 sub-command */
+#define PORT100_FRAME_CMD(f) (f->data[1]) /* SCC */
+
+#define PORT100_CMD_GET_FIRMWARE_VERSION 0x20
+#define PORT100_CMD_GET_COMMAND_TYPE 0x28
+#define PORT100_CMD_SET_COMMAND_TYPE 0x2A
+
+#define PORT100_CMD_IN_SET_RF 0x00
+#define PORT100_CMD_IN_SET_PROTOCOL 0x02
+#define PORT100_CMD_IN_COMM_RF 0x04
+
+#define PORT100_CMD_TG_SET_RF 0x40
+#define PORT100_CMD_TG_SET_PROTOCOL 0x42
+#define PORT100_CMD_TG_SET_RF_OFF 0x46
+#define PORT100_CMD_TG_COMM_RF 0x48
+
+#define PORT100_CMD_SWITCH_RF 0x06
+
+#define PORT100_CMD_RESPONSE(cmd) (cmd + 1)
+
+#define PORT100_CMD_TYPE_IS_SUPPORTED(mask, cmd_type) \
+ ((mask) & (0x01 << (cmd_type)))
+#define PORT100_CMD_TYPE_0 0
+#define PORT100_CMD_TYPE_1 1
+
+#define PORT100_CMD_STATUS_OK 0x00
+#define PORT100_CMD_STATUS_TIMEOUT 0x80
+
+#define PORT100_MDAA_TGT_HAS_BEEN_ACTIVATED_MASK 0x01
+#define PORT100_MDAA_TGT_WAS_ACTIVATED_MASK 0x02
+
+struct port100;
+
+typedef void (*port100_send_async_complete_t)(struct port100 *dev, void *arg,
+ struct sk_buff *resp);
+
+/**
+ * Setting sets structure for in_set_rf command
+ *
+ * @in_*_set_number: Represent the entry indexes in the port-100 RF Base Table.
+ * This table contains multiple RF setting sets required for RF
+ * communication.
+ *
+ * @in_*_comm_type: Theses fields set the communication type to be used.
+ */
+struct port100_in_rf_setting {
+ u8 in_send_set_number;
+ u8 in_send_comm_type;
+ u8 in_recv_set_number;
+ u8 in_recv_comm_type;
+} __packed;
+
+#define PORT100_COMM_TYPE_IN_212F 0x01
+#define PORT100_COMM_TYPE_IN_424F 0x02
+#define PORT100_COMM_TYPE_IN_106A 0x03
+
+static const struct port100_in_rf_setting in_rf_settings[] = {
+ [NFC_DIGITAL_RF_TECH_212F] = {
+ .in_send_set_number = 1,
+ .in_send_comm_type = PORT100_COMM_TYPE_IN_212F,
+ .in_recv_set_number = 15,
+ .in_recv_comm_type = PORT100_COMM_TYPE_IN_212F,
+ },
+ [NFC_DIGITAL_RF_TECH_424F] = {
+ .in_send_set_number = 1,
+ .in_send_comm_type = PORT100_COMM_TYPE_IN_424F,
+ .in_recv_set_number = 15,
+ .in_recv_comm_type = PORT100_COMM_TYPE_IN_424F,
+ },
+ [NFC_DIGITAL_RF_TECH_106A] = {
+ .in_send_set_number = 2,
+ .in_send_comm_type = PORT100_COMM_TYPE_IN_106A,
+ .in_recv_set_number = 15,
+ .in_recv_comm_type = PORT100_COMM_TYPE_IN_106A,
+ },
+};
+
+/**
+ * Setting sets structure for tg_set_rf command
+ *
+ * @tg_set_number: Represents the entry index in the port-100 RF Base Table.
+ * This table contains multiple RF setting sets required for RF
+ * communication. this field is used for both send and receive
+ * settings.
+ *
+ * @tg_comm_type: Sets the communication type to be used to send and receive
+ * data.
+ */
+struct port100_tg_rf_setting {
+ u8 tg_set_number;
+ u8 tg_comm_type;
+} __packed;
+
+#define PORT100_COMM_TYPE_TG_106A 0x0B
+#define PORT100_COMM_TYPE_TG_212F 0x0C
+#define PORT100_COMM_TYPE_TG_424F 0x0D
+
+static const struct port100_tg_rf_setting tg_rf_settings[] = {
+ [NFC_DIGITAL_RF_TECH_106A] = {
+ .tg_set_number = 8,
+ .tg_comm_type = PORT100_COMM_TYPE_TG_106A,
+ },
+ [NFC_DIGITAL_RF_TECH_212F] = {
+ .tg_set_number = 8,
+ .tg_comm_type = PORT100_COMM_TYPE_TG_212F,
+ },
+ [NFC_DIGITAL_RF_TECH_424F] = {
+ .tg_set_number = 8,
+ .tg_comm_type = PORT100_COMM_TYPE_TG_424F,
+ },
+};
+
+#define PORT100_IN_PROT_INITIAL_GUARD_TIME 0x00
+#define PORT100_IN_PROT_ADD_CRC 0x01
+#define PORT100_IN_PROT_CHECK_CRC 0x02
+#define PORT100_IN_PROT_MULTI_CARD 0x03
+#define PORT100_IN_PROT_ADD_PARITY 0x04
+#define PORT100_IN_PROT_CHECK_PARITY 0x05
+#define PORT100_IN_PROT_BITWISE_AC_RECV_MODE 0x06
+#define PORT100_IN_PROT_VALID_BIT_NUMBER 0x07
+#define PORT100_IN_PROT_CRYPTO1 0x08
+#define PORT100_IN_PROT_ADD_SOF 0x09
+#define PORT100_IN_PROT_CHECK_SOF 0x0A
+#define PORT100_IN_PROT_ADD_EOF 0x0B
+#define PORT100_IN_PROT_CHECK_EOF 0x0C
+#define PORT100_IN_PROT_DEAF_TIME 0x0E
+#define PORT100_IN_PROT_CRM 0x0F
+#define PORT100_IN_PROT_CRM_MIN_LEN 0x10
+#define PORT100_IN_PROT_T1_TAG_FRAME 0x11
+#define PORT100_IN_PROT_RFCA 0x12
+#define PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR 0x13
+#define PORT100_IN_PROT_END 0x14
+
+#define PORT100_IN_MAX_NUM_PROTOCOLS 19
+
+#define PORT100_TG_PROT_TU 0x00
+#define PORT100_TG_PROT_RF_OFF 0x01
+#define PORT100_TG_PROT_CRM 0x02
+#define PORT100_TG_PROT_END 0x03
+
+#define PORT100_TG_MAX_NUM_PROTOCOLS 3
+
+struct port100_protocol {
+ u8 number;
+ u8 value;
+} __packed;
+
+static struct port100_protocol
+in_protocols[][PORT100_IN_MAX_NUM_PROTOCOLS + 1] = {
+ [NFC_DIGITAL_FRAMING_NFCA_SHORT] = {
+ { PORT100_IN_PROT_INITIAL_GUARD_TIME, 6 },
+ { PORT100_IN_PROT_ADD_CRC, 0 },
+ { PORT100_IN_PROT_CHECK_CRC, 0 },
+ { PORT100_IN_PROT_MULTI_CARD, 0 },
+ { PORT100_IN_PROT_ADD_PARITY, 0 },
+ { PORT100_IN_PROT_CHECK_PARITY, 1 },
+ { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 },
+ { PORT100_IN_PROT_VALID_BIT_NUMBER, 7 },
+ { PORT100_IN_PROT_CRYPTO1, 0 },
+ { PORT100_IN_PROT_ADD_SOF, 0 },
+ { PORT100_IN_PROT_CHECK_SOF, 0 },
+ { PORT100_IN_PROT_ADD_EOF, 0 },
+ { PORT100_IN_PROT_CHECK_EOF, 0 },
+ { PORT100_IN_PROT_DEAF_TIME, 4 },
+ { PORT100_IN_PROT_CRM, 0 },
+ { PORT100_IN_PROT_CRM_MIN_LEN, 0 },
+ { PORT100_IN_PROT_T1_TAG_FRAME, 0 },
+ { PORT100_IN_PROT_RFCA, 0 },
+ { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 },
+ { PORT100_IN_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCA_STANDARD] = {
+ { PORT100_IN_PROT_INITIAL_GUARD_TIME, 6 },
+ { PORT100_IN_PROT_ADD_CRC, 0 },
+ { PORT100_IN_PROT_CHECK_CRC, 0 },
+ { PORT100_IN_PROT_MULTI_CARD, 0 },
+ { PORT100_IN_PROT_ADD_PARITY, 1 },
+ { PORT100_IN_PROT_CHECK_PARITY, 1 },
+ { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 },
+ { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 },
+ { PORT100_IN_PROT_CRYPTO1, 0 },
+ { PORT100_IN_PROT_ADD_SOF, 0 },
+ { PORT100_IN_PROT_CHECK_SOF, 0 },
+ { PORT100_IN_PROT_ADD_EOF, 0 },
+ { PORT100_IN_PROT_CHECK_EOF, 0 },
+ { PORT100_IN_PROT_DEAF_TIME, 4 },
+ { PORT100_IN_PROT_CRM, 0 },
+ { PORT100_IN_PROT_CRM_MIN_LEN, 0 },
+ { PORT100_IN_PROT_T1_TAG_FRAME, 0 },
+ { PORT100_IN_PROT_RFCA, 0 },
+ { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 },
+ { PORT100_IN_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A] = {
+ { PORT100_IN_PROT_INITIAL_GUARD_TIME, 6 },
+ { PORT100_IN_PROT_ADD_CRC, 1 },
+ { PORT100_IN_PROT_CHECK_CRC, 1 },
+ { PORT100_IN_PROT_MULTI_CARD, 0 },
+ { PORT100_IN_PROT_ADD_PARITY, 1 },
+ { PORT100_IN_PROT_CHECK_PARITY, 1 },
+ { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 },
+ { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 },
+ { PORT100_IN_PROT_CRYPTO1, 0 },
+ { PORT100_IN_PROT_ADD_SOF, 0 },
+ { PORT100_IN_PROT_CHECK_SOF, 0 },
+ { PORT100_IN_PROT_ADD_EOF, 0 },
+ { PORT100_IN_PROT_CHECK_EOF, 0 },
+ { PORT100_IN_PROT_DEAF_TIME, 4 },
+ { PORT100_IN_PROT_CRM, 0 },
+ { PORT100_IN_PROT_CRM_MIN_LEN, 0 },
+ { PORT100_IN_PROT_T1_TAG_FRAME, 0 },
+ { PORT100_IN_PROT_RFCA, 0 },
+ { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 },
+ { PORT100_IN_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCA_T1T] = {
+ /* nfc_digital_framing_nfca_short */
+ { PORT100_IN_PROT_ADD_CRC, 2 },
+ { PORT100_IN_PROT_CHECK_CRC, 2 },
+ { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 },
+ { PORT100_IN_PROT_T1_TAG_FRAME, 2 },
+ { PORT100_IN_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCA_T2T] = {
+ /* nfc_digital_framing_nfca_standard */
+ { PORT100_IN_PROT_ADD_CRC, 1 },
+ { PORT100_IN_PROT_CHECK_CRC, 0 },
+ { PORT100_IN_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCA_NFC_DEP] = {
+ /* nfc_digital_framing_nfca_standard */
+ { PORT100_IN_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCF] = {
+ { PORT100_IN_PROT_INITIAL_GUARD_TIME, 18 },
+ { PORT100_IN_PROT_ADD_CRC, 1 },
+ { PORT100_IN_PROT_CHECK_CRC, 1 },
+ { PORT100_IN_PROT_MULTI_CARD, 0 },
+ { PORT100_IN_PROT_ADD_PARITY, 0 },
+ { PORT100_IN_PROT_CHECK_PARITY, 0 },
+ { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 },
+ { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 },
+ { PORT100_IN_PROT_CRYPTO1, 0 },
+ { PORT100_IN_PROT_ADD_SOF, 0 },
+ { PORT100_IN_PROT_CHECK_SOF, 0 },
+ { PORT100_IN_PROT_ADD_EOF, 0 },
+ { PORT100_IN_PROT_CHECK_EOF, 0 },
+ { PORT100_IN_PROT_DEAF_TIME, 4 },
+ { PORT100_IN_PROT_CRM, 0 },
+ { PORT100_IN_PROT_CRM_MIN_LEN, 0 },
+ { PORT100_IN_PROT_T1_TAG_FRAME, 0 },
+ { PORT100_IN_PROT_RFCA, 0 },
+ { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 },
+ { PORT100_IN_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCF_T3T] = {
+ /* nfc_digital_framing_nfcf */
+ { PORT100_IN_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCF_NFC_DEP] = {
+ /* nfc_digital_framing_nfcf */
+ { PORT100_IN_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED] = {
+ { PORT100_IN_PROT_END, 0 },
+ },
+};
+
+static struct port100_protocol
+tg_protocols[][PORT100_TG_MAX_NUM_PROTOCOLS + 1] = {
+ [NFC_DIGITAL_FRAMING_NFCA_SHORT] = {
+ { PORT100_TG_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCA_STANDARD] = {
+ { PORT100_TG_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A] = {
+ { PORT100_TG_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCA_T1T] = {
+ { PORT100_TG_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCA_T2T] = {
+ { PORT100_TG_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCA_NFC_DEP] = {
+ { PORT100_TG_PROT_TU, 1 },
+ { PORT100_TG_PROT_RF_OFF, 0 },
+ { PORT100_TG_PROT_CRM, 7 },
+ { PORT100_TG_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCF] = {
+ { PORT100_TG_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCF_T3T] = {
+ { PORT100_TG_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCF_NFC_DEP] = {
+ { PORT100_TG_PROT_TU, 1 },
+ { PORT100_TG_PROT_RF_OFF, 0 },
+ { PORT100_TG_PROT_CRM, 7 },
+ { PORT100_TG_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED] = {
+ { PORT100_TG_PROT_RF_OFF, 1 },
+ { PORT100_TG_PROT_END, 0 },
+ },
+};
+
+struct port100 {
+ struct nfc_digital_dev *nfc_digital_dev;
+
+ int skb_headroom;
+ int skb_tailroom;
+
+ struct usb_device *udev;
+ struct usb_interface *interface;
+
+ struct urb *out_urb;
+ struct urb *in_urb;
+
+ struct work_struct cmd_complete_work;
+
+ u8 cmd_type;
+
+ /* The digital stack serializes commands to be sent. There is no need
+ * for any queuing/locking mechanism at driver level.
+ */
+ struct port100_cmd *cmd;
+};
+
+struct port100_cmd {
+ u8 code;
+ int status;
+ struct sk_buff *req;
+ struct sk_buff *resp;
+ int resp_len;
+ port100_send_async_complete_t complete_cb;
+ void *complete_cb_context;
+};
+
+struct port100_frame {
+ u8 preamble;
+ __be16 start_frame;
+ __be16 extended_frame;
+ __le16 datalen;
+ u8 datalen_checksum;
+ u8 data[];
+} __packed;
+
+struct port100_ack_frame {
+ u8 preamble;
+ __be16 start_frame;
+ __be16 ack_frame;
+ u8 postambule;
+} __packed;
+
+struct port100_cb_arg {
+ nfc_digital_cmd_complete_t complete_cb;
+ void *complete_arg;
+ u8 mdaa;
+};
+
+struct port100_tg_comm_rf_cmd {
+ __le16 guard_time;
+ __le16 send_timeout;
+ u8 mdaa;
+ u8 nfca_param[6];
+ u8 nfcf_param[18];
+ u8 mf_halted;
+ u8 arae_flag;
+ __le16 recv_timeout;
+ u8 data[];
+} __packed;
+
+struct port100_tg_comm_rf_res {
+ u8 comm_type;
+ u8 ar_status;
+ u8 target_activated;
+ __le32 status;
+ u8 data[];
+} __packed;
+
+/* The rule: value + checksum = 0 */
+static inline u8 port100_checksum(u16 value)
+{
+ return ~(((u8 *)&value)[0] + ((u8 *)&value)[1]) + 1;
+}
+
+/* The rule: sum(data elements) + checksum = 0 */
+static u8 port100_data_checksum(u8 *data, int datalen)
+{
+ u8 sum = 0;
+ int i;
+
+ for (i = 0; i < datalen; i++)
+ sum += data[i];
+
+ return port100_checksum(sum);
+}
+
+static void port100_tx_frame_init(void *_frame, u8 cmd_code)
+{
+ struct port100_frame *frame = _frame;
+
+ frame->preamble = 0;
+ frame->start_frame = cpu_to_be16(PORT100_FRAME_SOF);
+ frame->extended_frame = cpu_to_be16(PORT100_FRAME_EXT);
+ PORT100_FRAME_DIRECTION(frame) = PORT100_FRAME_DIR_OUT;
+ PORT100_FRAME_CMD(frame) = cmd_code;
+ frame->datalen = cpu_to_le16(2);
+}
+
+static void port100_tx_frame_finish(void *_frame)
+{
+ struct port100_frame *frame = _frame;
+
+ frame->datalen_checksum = port100_checksum(le16_to_cpu(frame->datalen));
+
+ PORT100_FRAME_CHECKSUM(frame) =
+ port100_data_checksum(frame->data, le16_to_cpu(frame->datalen));
+
+ PORT100_FRAME_POSTAMBLE(frame) = 0;
+}
+
+static void port100_tx_update_payload_len(void *_frame, int len)
+{
+ struct port100_frame *frame = _frame;
+
+ frame->datalen = cpu_to_le16(le16_to_cpu(frame->datalen) + len);
+}
+
+static bool port100_rx_frame_is_valid(void *_frame)
+{
+ u8 checksum;
+ struct port100_frame *frame = _frame;
+
+ if (frame->start_frame != cpu_to_be16(PORT100_FRAME_SOF) ||
+ frame->extended_frame != cpu_to_be16(PORT100_FRAME_EXT))
+ return false;
+
+ checksum = port100_checksum(le16_to_cpu(frame->datalen));
+ if (checksum != frame->datalen_checksum)
+ return false;
+
+ checksum = port100_data_checksum(frame->data,
+ le16_to_cpu(frame->datalen));
+ if (checksum != PORT100_FRAME_CHECKSUM(frame))
+ return false;
+
+ return true;
+}
+
+static bool port100_rx_frame_is_ack(struct port100_ack_frame *frame)
+{
+ return (frame->start_frame == cpu_to_be16(PORT100_FRAME_SOF) &&
+ frame->ack_frame == cpu_to_be16(PORT100_FRAME_ACK));
+}
+
+static inline int port100_rx_frame_size(void *frame)
+{
+ struct port100_frame *f = frame;
+
+ return sizeof(struct port100_frame) + le16_to_cpu(f->datalen) +
+ PORT100_FRAME_TAIL_LEN;
+}
+
+static bool port100_rx_frame_is_cmd_response(struct port100 *dev, void *frame)
+{
+ struct port100_frame *f = frame;
+
+ return (PORT100_FRAME_CMD(f) == PORT100_CMD_RESPONSE(dev->cmd->code));
+}
+
+static void port100_recv_response(struct urb *urb)
+{
+ struct port100 *dev = urb->context;
+ struct port100_cmd *cmd = dev->cmd;
+ u8 *in_frame;
+
+ cmd->status = urb->status;
+
+ switch (urb->status) {
+ case 0:
+ break; /* success */
+ case -ECONNRESET:
+ case -ENOENT:
+ nfc_err(&dev->interface->dev,
+ "The urb has been canceled (status %d)", urb->status);
+ goto sched_wq;
+ case -ESHUTDOWN:
+ default:
+ nfc_err(&dev->interface->dev, "Urb failure (status %d)",
+ urb->status);
+ goto sched_wq;
+ }
+
+ in_frame = dev->in_urb->transfer_buffer;
+
+ if (!port100_rx_frame_is_valid(in_frame)) {
+ nfc_err(&dev->interface->dev, "Received an invalid frame");
+ cmd->status = -EIO;
+ goto sched_wq;
+ }
+
+ print_hex_dump_debug("PORT100 RX: ", DUMP_PREFIX_NONE, 16, 1, in_frame,
+ port100_rx_frame_size(in_frame), false);
+
+ if (!port100_rx_frame_is_cmd_response(dev, in_frame)) {
+ nfc_err(&dev->interface->dev,
+ "It's not the response to the last command");
+ cmd->status = -EIO;
+ goto sched_wq;
+ }
+
+sched_wq:
+ schedule_work(&dev->cmd_complete_work);
+}
+
+static int port100_submit_urb_for_response(struct port100 *dev, gfp_t flags)
+{
+ dev->in_urb->complete = port100_recv_response;
+
+ return usb_submit_urb(dev->in_urb, flags);
+}
+
+static void port100_recv_ack(struct urb *urb)
+{
+ struct port100 *dev = urb->context;
+ struct port100_cmd *cmd = dev->cmd;
+ struct port100_ack_frame *in_frame;
+ int rc;
+
+ cmd->status = urb->status;
+
+ switch (urb->status) {
+ case 0:
+ break; /* success */
+ case -ECONNRESET:
+ case -ENOENT:
+ nfc_err(&dev->interface->dev,
+ "The urb has been stopped (status %d)", urb->status);
+ goto sched_wq;
+ case -ESHUTDOWN:
+ default:
+ nfc_err(&dev->interface->dev, "Urb failure (status %d)",
+ urb->status);
+ goto sched_wq;
+ }
+
+ in_frame = dev->in_urb->transfer_buffer;
+
+ if (!port100_rx_frame_is_ack(in_frame)) {
+ nfc_err(&dev->interface->dev, "Received an invalid ack");
+ cmd->status = -EIO;
+ goto sched_wq;
+ }
+
+ rc = port100_submit_urb_for_response(dev, GFP_ATOMIC);
+ if (rc) {
+ nfc_err(&dev->interface->dev,
+ "usb_submit_urb failed with result %d", rc);
+ cmd->status = rc;
+ goto sched_wq;
+ }
+
+ return;
+
+sched_wq:
+ schedule_work(&dev->cmd_complete_work);
+}
+
+static int port100_submit_urb_for_ack(struct port100 *dev, gfp_t flags)
+{
+ dev->in_urb->complete = port100_recv_ack;
+
+ return usb_submit_urb(dev->in_urb, flags);
+}
+
+static int port100_send_ack(struct port100 *dev)
+{
+ int rc;
+
+ dev->out_urb->transfer_buffer = ack_frame;
+ dev->out_urb->transfer_buffer_length = sizeof(ack_frame);
+ rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
+
+ return rc;
+}
+
+static int port100_send_frame_async(struct port100 *dev, struct sk_buff *out,
+ struct sk_buff *in, int in_len)
+{
+ int rc;
+
+ dev->out_urb->transfer_buffer = out->data;
+ dev->out_urb->transfer_buffer_length = out->len;
+
+ dev->in_urb->transfer_buffer = in->data;
+ dev->in_urb->transfer_buffer_length = in_len;
+
+ print_hex_dump_debug("PORT100 TX: ", DUMP_PREFIX_NONE, 16, 1,
+ out->data, out->len, false);
+
+ rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ rc = port100_submit_urb_for_ack(dev, GFP_KERNEL);
+ if (rc)
+ goto error;
+
+ return 0;
+
+error:
+ usb_unlink_urb(dev->out_urb);
+ return rc;
+}
+
+static void port100_build_cmd_frame(struct port100 *dev, u8 cmd_code,
+ struct sk_buff *skb)
+{
+ /* payload is already there, just update datalen */
+ int payload_len = skb->len;
+
+ skb_push(skb, PORT100_FRAME_HEADER_LEN);
+ skb_put(skb, PORT100_FRAME_TAIL_LEN);
+
+ port100_tx_frame_init(skb->data, cmd_code);
+ port100_tx_update_payload_len(skb->data, payload_len);
+ port100_tx_frame_finish(skb->data);
+}
+
+static void port100_send_async_complete(struct port100 *dev)
+{
+ struct port100_cmd *cmd = dev->cmd;
+ int status = cmd->status;
+
+ struct sk_buff *req = cmd->req;
+ struct sk_buff *resp = cmd->resp;
+
+ dev_kfree_skb(req);
+
+ dev->cmd = NULL;
+
+ if (status < 0) {
+ cmd->complete_cb(dev, cmd->complete_cb_context,
+ ERR_PTR(status));
+ dev_kfree_skb(resp);
+ goto done;
+ }
+
+ skb_put(resp, port100_rx_frame_size(resp->data));
+ skb_pull(resp, PORT100_FRAME_HEADER_LEN);
+ skb_trim(resp, resp->len - PORT100_FRAME_TAIL_LEN);
+
+ cmd->complete_cb(dev, cmd->complete_cb_context, resp);
+
+done:
+ kfree(cmd);
+}
+
+static int port100_send_cmd_async(struct port100 *dev, u8 cmd_code,
+ struct sk_buff *req,
+ port100_send_async_complete_t complete_cb,
+ void *complete_cb_context)
+{
+ struct port100_cmd *cmd;
+ struct sk_buff *resp;
+ int rc;
+ int resp_len = PORT100_FRAME_HEADER_LEN +
+ PORT100_FRAME_MAX_PAYLOAD_LEN +
+ PORT100_FRAME_TAIL_LEN;
+
+ resp = alloc_skb(resp_len, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ dev_kfree_skb(resp);
+ return -ENOMEM;
+ }
+
+ cmd->code = cmd_code;
+ cmd->req = req;
+ cmd->resp = resp;
+ cmd->resp_len = resp_len;
+ cmd->complete_cb = complete_cb;
+ cmd->complete_cb_context = complete_cb_context;
+
+ port100_build_cmd_frame(dev, cmd_code, req);
+
+ dev->cmd = cmd;
+
+ rc = port100_send_frame_async(dev, req, resp, resp_len);
+ if (rc) {
+ kfree(cmd);
+ dev_kfree_skb(resp);
+ dev->cmd = NULL;
+ }
+
+ return rc;
+}
+
+struct port100_sync_cmd_response {
+ struct sk_buff *resp;
+ struct completion done;
+};
+
+static void port100_wq_cmd_complete(struct work_struct *work)
+{
+ struct port100 *dev = container_of(work, struct port100,
+ cmd_complete_work);
+
+ port100_send_async_complete(dev);
+}
+
+static void port100_send_sync_complete(struct port100 *dev, void *_arg,
+ struct sk_buff *resp)
+{
+ struct port100_sync_cmd_response *arg = _arg;
+
+ arg->resp = resp;
+ complete(&arg->done);
+}
+
+static struct sk_buff *port100_send_cmd_sync(struct port100 *dev, u8 cmd_code,
+ struct sk_buff *req)
+{
+ int rc;
+ struct port100_sync_cmd_response arg;
+
+ init_completion(&arg.done);
+
+ rc = port100_send_cmd_async(dev, cmd_code, req,
+ port100_send_sync_complete, &arg);
+ if (rc) {
+ dev_kfree_skb(req);
+ return ERR_PTR(rc);
+ }
+
+ wait_for_completion(&arg.done);
+
+ return arg.resp;
+}
+
+static void port100_send_complete(struct urb *urb)
+{
+ struct port100 *dev = urb->context;
+
+ switch (urb->status) {
+ case 0:
+ break; /* success */
+ case -ECONNRESET:
+ case -ENOENT:
+ nfc_err(&dev->interface->dev,
+ "The urb has been stopped (status %d)", urb->status);
+ break;
+ case -ESHUTDOWN:
+ default:
+ nfc_err(&dev->interface->dev, "Urb failure (status %d)",
+ urb->status);
+ }
+}
+
+static void port100_abort_cmd(struct nfc_digital_dev *ddev)
+{
+ struct port100 *dev = nfc_digital_get_drvdata(ddev);
+
+ /* An ack will cancel the last issued command */
+ port100_send_ack(dev);
+
+ /* cancel the urb request */
+ usb_kill_urb(dev->in_urb);
+}
+
+static struct sk_buff *port100_alloc_skb(struct port100 *dev, unsigned int size)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(dev->skb_headroom + dev->skb_tailroom + size,
+ GFP_KERNEL);
+ if (skb)
+ skb_reserve(skb, dev->skb_headroom);
+
+ return skb;
+}
+
+static int port100_set_command_type(struct port100 *dev, u8 command_type)
+{
+ struct sk_buff *skb;
+ struct sk_buff *resp;
+ int rc;
+
+ skb = port100_alloc_skb(dev, 1);
+ if (!skb)
+ return -ENOMEM;
+
+ *skb_put(skb, sizeof(u8)) = command_type;
+
+ resp = port100_send_cmd_sync(dev, PORT100_CMD_SET_COMMAND_TYPE, skb);
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
+ rc = resp->data[0];
+
+ dev_kfree_skb(resp);
+
+ return rc;
+}
+
+static u64 port100_get_command_type_mask(struct port100 *dev)
+{
+ struct sk_buff *skb;
+ struct sk_buff *resp;
+ u64 mask;
+
+ skb = port100_alloc_skb(dev, 0);
+ if (!skb)
+ return -ENOMEM;
+
+ resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_COMMAND_TYPE, skb);
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
+ if (resp->len < 8)
+ mask = 0;
+ else
+ mask = be64_to_cpu(*(__be64 *)resp->data);
+
+ dev_kfree_skb(resp);
+
+ return mask;
+}
+
+static u16 port100_get_firmware_version(struct port100 *dev)
+{
+ struct sk_buff *skb;
+ struct sk_buff *resp;
+ u16 fw_ver;
+
+ skb = port100_alloc_skb(dev, 0);
+ if (!skb)
+ return 0;
+
+ resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_FIRMWARE_VERSION,
+ skb);
+ if (IS_ERR(resp))
+ return 0;
+
+ fw_ver = le16_to_cpu(*(__le16 *)resp->data);
+
+ dev_kfree_skb(resp);
+
+ return fw_ver;
+}
+
+static int port100_switch_rf(struct nfc_digital_dev *ddev, bool on)
+{
+ struct port100 *dev = nfc_digital_get_drvdata(ddev);
+ struct sk_buff *skb, *resp;
+
+ skb = port100_alloc_skb(dev, 1);
+ if (!skb)
+ return -ENOMEM;
+
+ *skb_put(skb, 1) = on ? 1 : 0;
+
+ resp = port100_send_cmd_sync(dev, PORT100_CMD_SWITCH_RF, skb);
+
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
+ dev_kfree_skb(resp);
+
+ return 0;
+}
+
+static int port100_in_set_rf(struct nfc_digital_dev *ddev, u8 rf)
+{
+ struct port100 *dev = nfc_digital_get_drvdata(ddev);
+ struct sk_buff *skb;
+ struct sk_buff *resp;
+ int rc;
+
+ if (rf >= NFC_DIGITAL_RF_TECH_LAST)
+ return -EINVAL;
+
+ skb = port100_alloc_skb(dev, sizeof(struct port100_in_rf_setting));
+ if (!skb)
+ return -ENOMEM;
+
+ memcpy(skb_put(skb, sizeof(struct port100_in_rf_setting)),
+ &in_rf_settings[rf],
+ sizeof(struct port100_in_rf_setting));
+
+ resp = port100_send_cmd_sync(dev, PORT100_CMD_IN_SET_RF, skb);
+
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
+ rc = resp->data[0];
+
+ dev_kfree_skb(resp);
+
+ return rc;
+}
+
+static int port100_in_set_framing(struct nfc_digital_dev *ddev, int param)
+{
+ struct port100 *dev = nfc_digital_get_drvdata(ddev);
+ struct port100_protocol *protocols;
+ struct sk_buff *skb;
+ struct sk_buff *resp;
+ int num_protocols;
+ size_t size;
+ int rc;
+
+ if (param >= NFC_DIGITAL_FRAMING_LAST)
+ return -EINVAL;
+
+ protocols = in_protocols[param];
+
+ num_protocols = 0;
+ while (protocols[num_protocols].number != PORT100_IN_PROT_END)
+ num_protocols++;
+
+ if (!num_protocols)
+ return 0;
+
+ size = sizeof(struct port100_protocol) * num_protocols;
+
+ skb = port100_alloc_skb(dev, size);
+ if (!skb)
+ return -ENOMEM;
+
+ memcpy(skb_put(skb, size), protocols, size);
+
+ resp = port100_send_cmd_sync(dev, PORT100_CMD_IN_SET_PROTOCOL, skb);
+
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
+ rc = resp->data[0];
+
+ dev_kfree_skb(resp);
+
+ return rc;
+}
+
+static int port100_in_configure_hw(struct nfc_digital_dev *ddev, int type,
+ int param)
+{
+ if (type == NFC_DIGITAL_CONFIG_RF_TECH)
+ return port100_in_set_rf(ddev, param);
+
+ if (type == NFC_DIGITAL_CONFIG_FRAMING)
+ return port100_in_set_framing(ddev, param);
+
+ return -EINVAL;
+}
+
+static void port100_in_comm_rf_complete(struct port100 *dev, void *arg,
+ struct sk_buff *resp)
+{
+ struct port100_cb_arg *cb_arg = arg;
+ nfc_digital_cmd_complete_t cb = cb_arg->complete_cb;
+ u32 status;
+ int rc;
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+ goto exit;
+ }
+
+ if (resp->len < 4) {
+ nfc_err(&dev->interface->dev,
+ "Invalid packet length received.\n");
+ rc = -EIO;
+ goto error;
+ }
+
+ status = le32_to_cpu(*(__le32 *)resp->data);
+
+ skb_pull(resp, sizeof(u32));
+
+ if (status == PORT100_CMD_STATUS_TIMEOUT) {
+ rc = -ETIMEDOUT;
+ goto error;
+ }
+
+ if (status != PORT100_CMD_STATUS_OK) {
+ nfc_err(&dev->interface->dev,
+ "in_comm_rf failed with status 0x%08x\n", status);
+ rc = -EIO;
+ goto error;
+ }
+
+ /* Remove collision bits byte */
+ skb_pull(resp, 1);
+
+ goto exit;
+
+error:
+ kfree_skb(resp);
+ resp = ERR_PTR(rc);
+
+exit:
+ cb(dev->nfc_digital_dev, cb_arg->complete_arg, resp);
+
+ kfree(cb_arg);
+}
+
+static int port100_in_send_cmd(struct nfc_digital_dev *ddev,
+ struct sk_buff *skb, u16 _timeout,
+ nfc_digital_cmd_complete_t cb, void *arg)
+{
+ struct port100 *dev = nfc_digital_get_drvdata(ddev);
+ struct port100_cb_arg *cb_arg;
+ __le16 timeout;
+
+ cb_arg = kzalloc(sizeof(struct port100_cb_arg), GFP_KERNEL);
+ if (!cb_arg)
+ return -ENOMEM;
+
+ cb_arg->complete_cb = cb;
+ cb_arg->complete_arg = arg;
+
+ timeout = cpu_to_le16(_timeout * 10);
+
+ memcpy(skb_push(skb, sizeof(__le16)), &timeout, sizeof(__le16));
+
+ return port100_send_cmd_async(dev, PORT100_CMD_IN_COMM_RF, skb,
+ port100_in_comm_rf_complete, cb_arg);
+}
+
+static int port100_tg_set_rf(struct nfc_digital_dev *ddev, u8 rf)
+{
+ struct port100 *dev = nfc_digital_get_drvdata(ddev);
+ struct sk_buff *skb;
+ struct sk_buff *resp;
+ int rc;
+
+ if (rf >= NFC_DIGITAL_RF_TECH_LAST)
+ return -EINVAL;
+
+ skb = port100_alloc_skb(dev, sizeof(struct port100_tg_rf_setting));
+ if (!skb)
+ return -ENOMEM;
+
+ memcpy(skb_put(skb, sizeof(struct port100_tg_rf_setting)),
+ &tg_rf_settings[rf],
+ sizeof(struct port100_tg_rf_setting));
+
+ resp = port100_send_cmd_sync(dev, PORT100_CMD_TG_SET_RF, skb);
+
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
+ rc = resp->data[0];
+
+ dev_kfree_skb(resp);
+
+ return rc;
+}
+
+static int port100_tg_set_framing(struct nfc_digital_dev *ddev, int param)
+{
+ struct port100 *dev = nfc_digital_get_drvdata(ddev);
+ struct port100_protocol *protocols;
+ struct sk_buff *skb;
+ struct sk_buff *resp;
+ int rc;
+ int num_protocols;
+ size_t size;
+
+ if (param >= NFC_DIGITAL_FRAMING_LAST)
+ return -EINVAL;
+
+ protocols = tg_protocols[param];
+
+ num_protocols = 0;
+ while (protocols[num_protocols].number != PORT100_TG_PROT_END)
+ num_protocols++;
+
+ if (!num_protocols)
+ return 0;
+
+ size = sizeof(struct port100_protocol) * num_protocols;
+
+ skb = port100_alloc_skb(dev, size);
+ if (!skb)
+ return -ENOMEM;
+
+ memcpy(skb_put(skb, size), protocols, size);
+
+ resp = port100_send_cmd_sync(dev, PORT100_CMD_TG_SET_PROTOCOL, skb);
+
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
+ rc = resp->data[0];
+
+ dev_kfree_skb(resp);
+
+ return rc;
+}
+
+static int port100_tg_configure_hw(struct nfc_digital_dev *ddev, int type,
+ int param)
+{
+ if (type == NFC_DIGITAL_CONFIG_RF_TECH)
+ return port100_tg_set_rf(ddev, param);
+
+ if (type == NFC_DIGITAL_CONFIG_FRAMING)
+ return port100_tg_set_framing(ddev, param);
+
+ return -EINVAL;
+}
+
+static bool port100_tg_target_activated(struct port100 *dev, u8 tgt_activated)
+{
+ u8 mask;
+
+ switch (dev->cmd_type) {
+ case PORT100_CMD_TYPE_0:
+ mask = PORT100_MDAA_TGT_HAS_BEEN_ACTIVATED_MASK;
+ break;
+ case PORT100_CMD_TYPE_1:
+ mask = PORT100_MDAA_TGT_HAS_BEEN_ACTIVATED_MASK |
+ PORT100_MDAA_TGT_WAS_ACTIVATED_MASK;
+ break;
+ default:
+ nfc_err(&dev->interface->dev, "Unknonwn command type.\n");
+ return false;
+ }
+
+ return ((tgt_activated & mask) == mask);
+}
+
+static void port100_tg_comm_rf_complete(struct port100 *dev, void *arg,
+ struct sk_buff *resp)
+{
+ u32 status;
+ struct port100_cb_arg *cb_arg = arg;
+ nfc_digital_cmd_complete_t cb = cb_arg->complete_cb;
+ struct port100_tg_comm_rf_res *hdr;
+
+ if (IS_ERR(resp))
+ goto exit;
+
+ hdr = (struct port100_tg_comm_rf_res *)resp->data;
+
+ status = le32_to_cpu(hdr->status);
+
+ if (cb_arg->mdaa &&
+ !port100_tg_target_activated(dev, hdr->target_activated)) {
+ kfree_skb(resp);
+ resp = ERR_PTR(-ETIMEDOUT);
+
+ goto exit;
+ }
+
+ skb_pull(resp, sizeof(struct port100_tg_comm_rf_res));
+
+ if (status != PORT100_CMD_STATUS_OK) {
+ kfree_skb(resp);
+
+ if (status == PORT100_CMD_STATUS_TIMEOUT)
+ resp = ERR_PTR(-ETIMEDOUT);
+ else
+ resp = ERR_PTR(-EIO);
+ }
+
+exit:
+ cb(dev->nfc_digital_dev, cb_arg->complete_arg, resp);
+
+ kfree(cb_arg);
+}
+
+static int port100_tg_send_cmd(struct nfc_digital_dev *ddev,
+ struct sk_buff *skb, u16 timeout,
+ nfc_digital_cmd_complete_t cb, void *arg)
+{
+ struct port100 *dev = nfc_digital_get_drvdata(ddev);
+ struct port100_tg_comm_rf_cmd *hdr;
+ struct port100_cb_arg *cb_arg;
+
+ cb_arg = kzalloc(sizeof(struct port100_cb_arg), GFP_KERNEL);
+ if (!cb_arg)
+ return -ENOMEM;
+
+ cb_arg->complete_cb = cb;
+ cb_arg->complete_arg = arg;
+
+ skb_push(skb, sizeof(struct port100_tg_comm_rf_cmd));
+
+ hdr = (struct port100_tg_comm_rf_cmd *)skb->data;
+
+ memset(hdr, 0, sizeof(struct port100_tg_comm_rf_cmd));
+ hdr->guard_time = cpu_to_le16(500);
+ hdr->send_timeout = cpu_to_le16(0xFFFF);
+ hdr->recv_timeout = cpu_to_le16(timeout);
+
+ return port100_send_cmd_async(dev, PORT100_CMD_TG_COMM_RF, skb,
+ port100_tg_comm_rf_complete, cb_arg);
+}
+
+static int port100_listen_mdaa(struct nfc_digital_dev *ddev,
+ struct digital_tg_mdaa_params *params,
+ u16 timeout,
+ nfc_digital_cmd_complete_t cb, void *arg)
+{
+ struct port100 *dev = nfc_digital_get_drvdata(ddev);
+ struct port100_tg_comm_rf_cmd *hdr;
+ struct port100_cb_arg *cb_arg;
+ struct sk_buff *skb;
+ int rc;
+
+ rc = port100_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH,
+ NFC_DIGITAL_RF_TECH_106A);
+ if (rc)
+ return rc;
+
+ rc = port100_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+ NFC_DIGITAL_FRAMING_NFCA_NFC_DEP);
+ if (rc)
+ return rc;
+
+ cb_arg = kzalloc(sizeof(struct port100_cb_arg), GFP_KERNEL);
+ if (!cb_arg)
+ return -ENOMEM;
+
+ cb_arg->complete_cb = cb;
+ cb_arg->complete_arg = arg;
+ cb_arg->mdaa = 1;
+
+ skb = port100_alloc_skb(dev, 0);
+ if (!skb) {
+ kfree(cb_arg);
+ return -ENOMEM;
+ }
+
+ skb_push(skb, sizeof(struct port100_tg_comm_rf_cmd));
+ hdr = (struct port100_tg_comm_rf_cmd *)skb->data;
+
+ memset(hdr, 0, sizeof(struct port100_tg_comm_rf_cmd));
+
+ hdr->guard_time = 0;
+ hdr->send_timeout = cpu_to_le16(0xFFFF);
+ hdr->mdaa = 1;
+ hdr->nfca_param[0] = (params->sens_res >> 8) & 0xFF;
+ hdr->nfca_param[1] = params->sens_res & 0xFF;
+ memcpy(hdr->nfca_param + 2, params->nfcid1, 3);
+ hdr->nfca_param[5] = params->sel_res;
+ memcpy(hdr->nfcf_param, params->nfcid2, 8);
+ hdr->nfcf_param[16] = (params->sc >> 8) & 0xFF;
+ hdr->nfcf_param[17] = params->sc & 0xFF;
+ hdr->recv_timeout = cpu_to_le16(timeout);
+
+ return port100_send_cmd_async(dev, PORT100_CMD_TG_COMM_RF, skb,
+ port100_tg_comm_rf_complete, cb_arg);
+}
+
+static int port100_listen(struct nfc_digital_dev *ddev, u16 timeout,
+ nfc_digital_cmd_complete_t cb, void *arg)
+{
+ struct port100 *dev = nfc_digital_get_drvdata(ddev);
+ struct sk_buff *skb;
+
+ skb = port100_alloc_skb(dev, 0);
+ if (!skb)
+ return -ENOMEM;
+
+ return port100_tg_send_cmd(ddev, skb, timeout, cb, arg);
+}
+
+static struct nfc_digital_ops port100_digital_ops = {
+ .in_configure_hw = port100_in_configure_hw,
+ .in_send_cmd = port100_in_send_cmd,
+
+ .tg_listen_mdaa = port100_listen_mdaa,
+ .tg_listen = port100_listen,
+ .tg_configure_hw = port100_tg_configure_hw,
+ .tg_send_cmd = port100_tg_send_cmd,
+
+ .switch_rf = port100_switch_rf,
+ .abort_cmd = port100_abort_cmd,
+};
+
+static const struct usb_device_id port100_table[] = {
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = SONY_VENDOR_ID,
+ .idProduct = RCS380_PRODUCT_ID,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, port100_table);
+
+static int port100_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct port100 *dev;
+ int rc;
+ struct usb_host_interface *iface_desc;
+ struct usb_endpoint_descriptor *endpoint;
+ int in_endpoint;
+ int out_endpoint;
+ u16 fw_version;
+ u64 cmd_type_mask;
+ int i;
+
+ dev = devm_kzalloc(&interface->dev, sizeof(struct port100), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->udev = usb_get_dev(interface_to_usbdev(interface));
+ dev->interface = interface;
+ usb_set_intfdata(interface, dev);
+
+ in_endpoint = out_endpoint = 0;
+ iface_desc = interface->cur_altsetting;
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ endpoint = &iface_desc->endpoint[i].desc;
+
+ if (!in_endpoint && usb_endpoint_is_bulk_in(endpoint))
+ in_endpoint = endpoint->bEndpointAddress;
+
+ if (!out_endpoint && usb_endpoint_is_bulk_out(endpoint))
+ out_endpoint = endpoint->bEndpointAddress;
+ }
+
+ if (!in_endpoint || !out_endpoint) {
+ nfc_err(&interface->dev,
+ "Could not find bulk-in or bulk-out endpoint\n");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ dev->in_urb = usb_alloc_urb(0, GFP_KERNEL);
+ dev->out_urb = usb_alloc_urb(0, GFP_KERNEL);
+
+ if (!dev->in_urb || !dev->out_urb) {
+ nfc_err(&interface->dev, "Could not allocate USB URBs\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ usb_fill_bulk_urb(dev->in_urb, dev->udev,
+ usb_rcvbulkpipe(dev->udev, in_endpoint),
+ NULL, 0, NULL, dev);
+ usb_fill_bulk_urb(dev->out_urb, dev->udev,
+ usb_sndbulkpipe(dev->udev, out_endpoint),
+ NULL, 0, port100_send_complete, dev);
+
+ dev->skb_headroom = PORT100_FRAME_HEADER_LEN +
+ PORT100_COMM_RF_HEAD_MAX_LEN;
+ dev->skb_tailroom = PORT100_FRAME_TAIL_LEN;
+
+ INIT_WORK(&dev->cmd_complete_work, port100_wq_cmd_complete);
+
+ /* The first thing to do with the Port-100 is to set the command type
+ * to be used. If supported we use command type 1. 0 otherwise.
+ */
+ cmd_type_mask = port100_get_command_type_mask(dev);
+ if (!cmd_type_mask) {
+ nfc_err(&interface->dev,
+ "Could not get supported command types.\n");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ if (PORT100_CMD_TYPE_IS_SUPPORTED(cmd_type_mask, PORT100_CMD_TYPE_1))
+ dev->cmd_type = PORT100_CMD_TYPE_1;
+ else
+ dev->cmd_type = PORT100_CMD_TYPE_0;
+
+ rc = port100_set_command_type(dev, dev->cmd_type);
+ if (rc) {
+ nfc_err(&interface->dev,
+ "The device does not support command type %u.\n",
+ dev->cmd_type);
+ goto error;
+ }
+
+ fw_version = port100_get_firmware_version(dev);
+ if (!fw_version)
+ nfc_err(&interface->dev,
+ "Could not get device firmware version.\n");
+
+ nfc_info(&interface->dev,
+ "Sony NFC Port-100 Series attached (firmware v%x.%02x)\n",
+ (fw_version & 0xFF00) >> 8, fw_version & 0xFF);
+
+ dev->nfc_digital_dev = nfc_digital_allocate_device(&port100_digital_ops,
+ PORT100_PROTOCOLS,
+ PORT100_CAPABILITIES,
+ dev->skb_headroom,
+ dev->skb_tailroom);
+ if (!dev->nfc_digital_dev) {
+ nfc_err(&interface->dev,
+ "Could not allocate nfc_digital_dev.\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ nfc_digital_set_parent_dev(dev->nfc_digital_dev, &interface->dev);
+ nfc_digital_set_drvdata(dev->nfc_digital_dev, dev);
+
+ rc = nfc_digital_register_device(dev->nfc_digital_dev);
+ if (rc) {
+ nfc_err(&interface->dev,
+ "Could not register digital device.\n");
+ goto free_nfc_dev;
+ }
+
+ return 0;
+
+free_nfc_dev:
+ nfc_digital_free_device(dev->nfc_digital_dev);
+
+error:
+ usb_free_urb(dev->in_urb);
+ usb_free_urb(dev->out_urb);
+ usb_put_dev(dev->udev);
+
+ return rc;
+}
+
+static void port100_disconnect(struct usb_interface *interface)
+{
+ struct port100 *dev;
+
+ dev = usb_get_intfdata(interface);
+ usb_set_intfdata(interface, NULL);
+
+ nfc_digital_unregister_device(dev->nfc_digital_dev);
+ nfc_digital_free_device(dev->nfc_digital_dev);
+
+ usb_kill_urb(dev->in_urb);
+ usb_kill_urb(dev->out_urb);
+
+ usb_free_urb(dev->in_urb);
+ usb_free_urb(dev->out_urb);
+
+ kfree(dev->cmd);
+
+ nfc_info(&interface->dev, "Sony Port-100 NFC device disconnected");
+}
+
+static struct usb_driver port100_driver = {
+ .name = "port100",
+ .probe = port100_probe,
+ .disconnect = port100_disconnect,
+ .id_table = port100_table,
+};
+
+module_usb_driver(port100_driver);
+
+MODULE_DESCRIPTION("NFC Port-100 series usb driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/of/address.c b/drivers/of/address.c
index b55c21890760..4b9317bdb81c 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -489,7 +489,7 @@ static u64 __of_translate_address(struct device_node *dev,
int na, ns, pna, pns;
u64 result = OF_BAD_ADDR;
- pr_debug("OF: ** translation for device %s **\n", dev->full_name);
+ pr_debug("OF: ** translation for device %s **\n", of_node_full_name(dev));
/* Increase refcount at current level */
of_node_get(dev);
@@ -504,13 +504,13 @@ static u64 __of_translate_address(struct device_node *dev,
bus->count_cells(dev, &na, &ns);
if (!OF_CHECK_COUNTS(na, ns)) {
printk(KERN_ERR "prom_parse: Bad cell count for %s\n",
- dev->full_name);
+ of_node_full_name(dev));
goto bail;
}
memcpy(addr, in_addr, na * 4);
pr_debug("OF: bus is %s (na=%d, ns=%d) on %s\n",
- bus->name, na, ns, parent->full_name);
+ bus->name, na, ns, of_node_full_name(parent));
of_dump_addr("OF: translating address:", addr, na);
/* Translate */
@@ -532,12 +532,12 @@ static u64 __of_translate_address(struct device_node *dev,
pbus->count_cells(dev, &pna, &pns);
if (!OF_CHECK_COUNTS(pna, pns)) {
printk(KERN_ERR "prom_parse: Bad cell count for %s\n",
- dev->full_name);
+ of_node_full_name(dev));
break;
}
pr_debug("OF: parent bus is %s (na=%d, ns=%d) on %s\n",
- pbus->name, pna, pns, parent->full_name);
+ pbus->name, pna, pns, of_node_full_name(parent));
/* Apply bus translation */
if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop))
@@ -626,6 +626,14 @@ const __be32 *of_get_address(struct device_node *dev, int index, u64 *size,
}
EXPORT_SYMBOL(of_get_address);
+unsigned long __weak pci_address_to_pio(phys_addr_t address)
+{
+ if (address > IO_SPACE_LIMIT)
+ return (unsigned long)-1;
+
+ return (unsigned long) address;
+}
+
static int __of_address_to_resource(struct device_node *dev,
const __be32 *addrp, u64 size, unsigned int flags,
const char *name, struct resource *r)
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 7d4c70f859e3..f807d0edabf3 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -74,6 +74,13 @@ int of_n_size_cells(struct device_node *np)
}
EXPORT_SYMBOL(of_n_size_cells);
+#ifdef CONFIG_NUMA
+int __weak of_node_to_nid(struct device_node *np)
+{
+ return numa_node_id();
+}
+#endif
+
#if defined(CONFIG_OF_DYNAMIC)
/**
* of_node_get - Increment refcount of a node
@@ -265,9 +272,9 @@ static bool __of_find_n_match_cpu_property(struct device_node *cpun,
ac = of_n_addr_cells(cpun);
cell = of_get_property(cpun, prop_name, &prop_len);
- if (!cell)
+ if (!cell || !ac)
return false;
- prop_len /= sizeof(*cell);
+ prop_len /= sizeof(*cell) * ac;
for (tid = 0; tid < prop_len; tid++) {
hwid = of_read_number(cell, ac);
if (arch_match_cpu_phys_id(cpu, hwid)) {
@@ -280,6 +287,31 @@ static bool __of_find_n_match_cpu_property(struct device_node *cpun,
return false;
}
+/*
+ * arch_find_n_match_cpu_physical_id - See if the given device node is
+ * for the cpu corresponding to logical cpu 'cpu'. Return true if so,
+ * else false. If 'thread' is non-NULL, the local thread number within the
+ * core is returned in it.
+ */
+bool __weak arch_find_n_match_cpu_physical_id(struct device_node *cpun,
+ int cpu, unsigned int *thread)
+{
+ /* Check for non-standard "ibm,ppc-interrupt-server#s" property
+ * for thread ids on PowerPC. If it doesn't exist fallback to
+ * standard "reg" property.
+ */
+ if (IS_ENABLED(CONFIG_PPC) &&
+ __of_find_n_match_cpu_property(cpun,
+ "ibm,ppc-interrupt-server#s",
+ cpu, thread))
+ return true;
+
+ if (__of_find_n_match_cpu_property(cpun, "reg", cpu, thread))
+ return true;
+
+ return false;
+}
+
/**
* of_get_cpu_node - Get device node associated with the given logical CPU
*
@@ -300,24 +332,10 @@ static bool __of_find_n_match_cpu_property(struct device_node *cpun,
*/
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
{
- struct device_node *cpun, *cpus;
+ struct device_node *cpun;
- cpus = of_find_node_by_path("/cpus");
- if (!cpus)
- return NULL;
-
- for_each_child_of_node(cpus, cpun) {
- if (of_node_cmp(cpun->type, "cpu"))
- continue;
- /* Check for non-standard "ibm,ppc-interrupt-server#s" property
- * for thread ids on PowerPC. If it doesn't exist fallback to
- * standard "reg" property.
- */
- if (IS_ENABLED(CONFIG_PPC) &&
- __of_find_n_match_cpu_property(cpun,
- "ibm,ppc-interrupt-server#s", cpu, thread))
- return cpun;
- if (__of_find_n_match_cpu_property(cpun, "reg", cpu, thread))
+ for_each_node_by_type(cpun, "cpu") {
+ if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
return cpun;
}
return NULL;
@@ -1174,6 +1192,15 @@ int of_property_count_strings(struct device_node *np, const char *propname)
}
EXPORT_SYMBOL_GPL(of_property_count_strings);
+void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
+{
+ int i;
+ printk("%s %s", msg, of_node_full_name(args->np));
+ for (i = 0; i < args->args_count; i++)
+ printk(i ? ",%08x" : ":%08x", args->args[i]);
+ printk("\n");
+}
+
static int __of_parse_phandle_with_args(const struct device_node *np,
const char *list_name,
const char *cells_name,
@@ -1882,3 +1909,34 @@ int of_device_is_stdout_path(struct device_node *dn)
return of_stdout == dn;
}
EXPORT_SYMBOL_GPL(of_device_is_stdout_path);
+
+/**
+ * of_find_next_cache_node - Find a node's subsidiary cache
+ * @np: node of type "cpu" or "cache"
+ *
+ * Returns a node pointer with refcount incremented, use
+ * of_node_put() on it when done. Caller should hold a reference
+ * to np.
+ */
+struct device_node *of_find_next_cache_node(const struct device_node *np)
+{
+ struct device_node *child;
+ const phandle *handle;
+
+ handle = of_get_property(np, "l2-cache", NULL);
+ if (!handle)
+ handle = of_get_property(np, "next-level-cache", NULL);
+
+ if (handle)
+ return of_find_node_by_phandle(be32_to_cpup(handle));
+
+ /* OF on pmac has nodes instead of properties named "l2-cache"
+ * beneath CPU nodes.
+ */
+ if (!strcmp(np->type, "cpu"))
+ for_each_child_of_node(np, child)
+ if (!strcmp(child->type, "cache"))
+ return child;
+
+ return NULL;
+}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index a4fa9ad31b8f..2fa024b97c43 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -618,12 +618,72 @@ int __init of_scan_flat_dt_by_path(const char *path,
return ret;
}
+const char * __init of_flat_dt_get_machine_name(void)
+{
+ const char *name;
+ unsigned long dt_root = of_get_flat_dt_root();
+
+ name = of_get_flat_dt_prop(dt_root, "model", NULL);
+ if (!name)
+ name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
+ return name;
+}
+
+/**
+ * of_flat_dt_match_machine - Iterate match tables to find matching machine.
+ *
+ * @default_match: A machine specific ptr to return in case of no match.
+ * @get_next_compat: callback function to return next compatible match table.
+ *
+ * Iterate through machine match tables to find the best match for the machine
+ * compatible string in the FDT.
+ */
+const void * __init of_flat_dt_match_machine(const void *default_match,
+ const void * (*get_next_compat)(const char * const**))
+{
+ const void *data = NULL;
+ const void *best_data = default_match;
+ const char *const *compat;
+ unsigned long dt_root;
+ unsigned int best_score = ~1, score = 0;
+
+ dt_root = of_get_flat_dt_root();
+ while ((data = get_next_compat(&compat))) {
+ score = of_flat_dt_match(dt_root, compat);
+ if (score > 0 && score < best_score) {
+ best_data = data;
+ best_score = score;
+ }
+ }
+ if (!best_data) {
+ const char *prop;
+ long size;
+
+ pr_err("\n unrecognized device tree list:\n[ ");
+
+ prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
+ if (prop) {
+ while (size > 0) {
+ printk("'%s' ", prop);
+ size -= strlen(prop) + 1;
+ prop += strlen(prop) + 1;
+ }
+ }
+ printk("]\n\n");
+ return NULL;
+ }
+
+ pr_info("Machine model: %s\n", of_flat_dt_get_machine_name());
+
+ return best_data;
+}
+
#ifdef CONFIG_BLK_DEV_INITRD
/**
* early_init_dt_check_for_initrd - Decode initrd location from flat tree
* @node: reference to node containing initrd location ('chosen')
*/
-void __init early_init_dt_check_for_initrd(unsigned long node)
+static void __init early_init_dt_check_for_initrd(unsigned long node)
{
u64 start, end;
unsigned long len;
@@ -641,12 +701,15 @@ void __init early_init_dt_check_for_initrd(unsigned long node)
return;
end = of_read_number(prop, len/4);
- early_init_dt_setup_initrd_arch(start, end);
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = (unsigned long)__va(end);
+ initrd_below_start_ok = 1;
+
pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n",
(unsigned long long)start, (unsigned long long)end);
}
#else
-inline void early_init_dt_check_for_initrd(unsigned long node)
+static inline void early_init_dt_check_for_initrd(unsigned long node)
{
}
#endif /* CONFIG_BLK_DEV_INITRD */
@@ -774,6 +837,25 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
}
#ifdef CONFIG_HAVE_MEMBLOCK
+void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ const u64 phys_offset = __pa(PAGE_OFFSET);
+ base &= PAGE_MASK;
+ size &= PAGE_MASK;
+ if (base + size < phys_offset) {
+ pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
+ base, base + size);
+ return;
+ }
+ if (base < phys_offset) {
+ pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
+ base, phys_offset);
+ size -= phys_offset - base;
+ base = phys_offset;
+ }
+ memblock_add(base, size);
+}
+
/*
* called from unflatten_device_tree() to bootstrap devicetree itself
* Architectures can override this definition if memblock isn't used
@@ -784,6 +866,32 @@ void * __init __weak early_init_dt_alloc_memory_arch(u64 size, u64 align)
}
#endif
+bool __init early_init_dt_scan(void *params)
+{
+ if (!params)
+ return false;
+
+ /* Setup flat device-tree pointer */
+ initial_boot_params = params;
+
+ /* check device tree validity */
+ if (be32_to_cpu(initial_boot_params->magic) != OF_DT_HEADER) {
+ initial_boot_params = NULL;
+ return false;
+ }
+
+ /* Retrieve various information from the /chosen node */
+ of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
+
+ /* Initialize {size,address}-cells info */
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+
+ /* Setup memory, calling early_init_dt_add_memory_arch */
+ of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+
+ return true;
+}
+
/**
* unflatten_device_tree - create tree of device_nodes from flat blob
*
@@ -801,4 +909,28 @@ void __init unflatten_device_tree(void)
of_alias_scan(early_init_dt_alloc_memory_arch);
}
+/**
+ * unflatten_and_copy_device_tree - copy and create tree of device_nodes from flat blob
+ *
+ * Copies and unflattens the device-tree passed by the firmware, creating the
+ * tree of struct device_node. It also fills the "name" and "type"
+ * pointers of the nodes so the normal device-tree walking functions
+ * can be used. This should only be used when the FDT memory has not been
+ * reserved such is the case when the FDT is built-in to the kernel init
+ * section. If the FDT memory is reserved already then unflatten_device_tree
+ * should be used instead.
+ */
+void __init unflatten_and_copy_device_tree(void)
+{
+ int size = __be32_to_cpu(initial_boot_params->totalsize);
+ void *dt = early_init_dt_alloc_memory_arch(size,
+ __alignof__(struct boot_param_header));
+
+ if (dt) {
+ memcpy(dt, initial_boot_params, size);
+ initial_boot_params = dt;
+ }
+ unflatten_device_tree();
+}
+
#endif /* CONFIG_OF_EARLY_FLATTREE */
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 1752988d6aa8..d385bb824772 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -31,18 +31,17 @@
* @dev: Device node of the device whose interrupt is to be mapped
* @index: Index of the interrupt to map
*
- * This function is a wrapper that chains of_irq_map_one() and
+ * This function is a wrapper that chains of_irq_parse_one() and
* irq_create_of_mapping() to make things easier to callers
*/
unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
{
- struct of_irq oirq;
+ struct of_phandle_args oirq;
- if (of_irq_map_one(dev, index, &oirq))
+ if (of_irq_parse_one(dev, index, &oirq))
return 0;
- return irq_create_of_mapping(oirq.controller, oirq.specifier,
- oirq.size);
+ return irq_create_of_mapping(&oirq);
}
EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
@@ -79,33 +78,34 @@ struct device_node *of_irq_find_parent(struct device_node *child)
}
/**
- * of_irq_map_raw - Low level interrupt tree parsing
+ * of_irq_parse_raw - Low level interrupt tree parsing
* @parent: the device interrupt parent
- * @intspec: interrupt specifier ("interrupts" property of the device)
- * @ointsize: size of the passed in interrupt specifier
- * @addr: address specifier (start of "reg" property of the device)
- * @out_irq: structure of_irq filled by this function
+ * @addr: address specifier (start of "reg" property of the device) in be32 format
+ * @out_irq: structure of_irq updated by this function
*
* Returns 0 on success and a negative number on error
*
* This function is a low-level interrupt tree walking function. It
* can be used to do a partial walk with synthetized reg and interrupts
* properties, for example when resolving PCI interrupts when no device
- * node exist for the parent.
+ * node exist for the parent. It takes an interrupt specifier structure as
+ * input, walks the tree looking for any interrupt-map properties, translates
+ * the specifier for each map, and then returns the translated map.
*/
-int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
- u32 ointsize, const __be32 *addr, struct of_irq *out_irq)
+int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
{
struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
- const __be32 *tmp, *imap, *imask;
+ __be32 initial_match_array[MAX_PHANDLE_ARGS];
+ const __be32 *match_array = initial_match_array;
+ const __be32 *tmp, *imap, *imask, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 };
u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
int imaplen, match, i;
- pr_debug("of_irq_map_raw: par=%s,intspec=[0x%08x 0x%08x...],ointsize=%d\n",
- parent->full_name, be32_to_cpup(intspec),
- be32_to_cpup(intspec + 1), ointsize);
+#ifdef DEBUG
+ of_print_phandle_args("of_irq_parse_raw: ", out_irq);
+#endif
- ipar = of_node_get(parent);
+ ipar = of_node_get(out_irq->np);
/* First get the #interrupt-cells property of the current cursor
* that tells us how to interpret the passed-in intspec. If there
@@ -126,9 +126,9 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
goto fail;
}
- pr_debug("of_irq_map_raw: ipar=%s, size=%d\n", ipar->full_name, intsize);
+ pr_debug("of_irq_parse_raw: ipar=%s, size=%d\n", of_node_full_name(ipar), intsize);
- if (ointsize != intsize)
+ if (out_irq->args_count != intsize)
return -EINVAL;
/* Look for this #address-cells. We have to implement the old linux
@@ -147,6 +147,16 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
pr_debug(" -> addrsize=%d\n", addrsize);
+ /* Range check so that the temporary buffer doesn't overflow */
+ if (WARN_ON(addrsize + intsize > MAX_PHANDLE_ARGS))
+ goto fail;
+
+ /* Precalculate the match array - this simplifies match loop */
+ for (i = 0; i < addrsize; i++)
+ initial_match_array[i] = addr ? addr[i] : 0;
+ for (i = 0; i < intsize; i++)
+ initial_match_array[addrsize + i] = cpu_to_be32(out_irq->args[i]);
+
/* Now start the actual "proper" walk of the interrupt tree */
while (ipar != NULL) {
/* Now check if cursor is an interrupt-controller and if it is
@@ -155,15 +165,19 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
if (of_get_property(ipar, "interrupt-controller", NULL) !=
NULL) {
pr_debug(" -> got it !\n");
- for (i = 0; i < intsize; i++)
- out_irq->specifier[i] =
- of_read_number(intspec +i, 1);
- out_irq->size = intsize;
- out_irq->controller = ipar;
of_node_put(old);
return 0;
}
+ /*
+ * interrupt-map parsing does not work without a reg
+ * property when #address-cells != 0
+ */
+ if (addrsize && !addr) {
+ pr_debug(" -> no reg passed in when needed !\n");
+ goto fail;
+ }
+
/* Now look for an interrupt-map */
imap = of_get_property(ipar, "interrupt-map", &imaplen);
/* No interrupt map, check for an interrupt parent */
@@ -176,34 +190,16 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
/* Look for a mask */
imask = of_get_property(ipar, "interrupt-map-mask", NULL);
-
- /* If we were passed no "reg" property and we attempt to parse
- * an interrupt-map, then #address-cells must be 0.
- * Fail if it's not.
- */
- if (addr == NULL && addrsize != 0) {
- pr_debug(" -> no reg passed in when needed !\n");
- goto fail;
- }
+ if (!imask)
+ imask = dummy_imask;
/* Parse interrupt-map */
match = 0;
while (imaplen > (addrsize + intsize + 1) && !match) {
/* Compare specifiers */
match = 1;
- for (i = 0; i < addrsize && match; ++i) {
- __be32 mask = imask ? imask[i]
- : cpu_to_be32(0xffffffffu);
- match = ((addr[i] ^ imap[i]) & mask) == 0;
- }
- for (; i < (addrsize + intsize) && match; ++i) {
- __be32 mask = imask ? imask[i]
- : cpu_to_be32(0xffffffffu);
- match =
- ((intspec[i-addrsize] ^ imap[i]) & mask) == 0;
- }
- imap += addrsize + intsize;
- imaplen -= addrsize + intsize;
+ for (i = 0; i < (addrsize + intsize); i++, imaplen--)
+ match = !((match_array[i] ^ *imap++) & imask[i]);
pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen);
@@ -237,6 +233,8 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
newintsize, newaddrsize);
/* Check for malformed properties */
+ if (WARN_ON(newaddrsize + newintsize > MAX_PHANDLE_ARGS))
+ goto fail;
if (imaplen < (newaddrsize + newintsize))
goto fail;
@@ -248,12 +246,18 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
if (!match)
goto fail;
- of_node_put(old);
- old = of_node_get(newpar);
+ /*
+ * Successfully parsed an interrrupt-map translation; copy new
+ * interrupt specifier into the out_irq structure
+ */
+ of_node_put(out_irq->np);
+ out_irq->np = of_node_get(newpar);
+
+ match_array = imap - newaddrsize - newintsize;
+ for (i = 0; i < newintsize; i++)
+ out_irq->args[i] = be32_to_cpup(imap - newintsize + i);
+ out_irq->args_count = intsize = newintsize;
addrsize = newaddrsize;
- intsize = newintsize;
- intspec = imap - intsize;
- addr = intspec - addrsize;
skiplevel:
/* Iterate again with new parent */
@@ -264,46 +268,53 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
}
fail:
of_node_put(ipar);
- of_node_put(old);
+ of_node_put(out_irq->np);
of_node_put(newpar);
return -EINVAL;
}
-EXPORT_SYMBOL_GPL(of_irq_map_raw);
+EXPORT_SYMBOL_GPL(of_irq_parse_raw);
/**
- * of_irq_map_one - Resolve an interrupt for a device
+ * of_irq_parse_one - Resolve an interrupt for a device
* @device: the device whose interrupt is to be resolved
* @index: index of the interrupt to resolve
* @out_irq: structure of_irq filled by this function
*
- * This function resolves an interrupt, walking the tree, for a given
- * device-tree node. It's the high level pendant to of_irq_map_raw().
+ * This function resolves an interrupt for a node by walking the interrupt tree,
+ * finding which interrupt controller node it is attached to, and returning the
+ * interrupt specifier that can be used to retrieve a Linux IRQ number.
*/
-int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq)
+int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_args *out_irq)
{
struct device_node *p;
const __be32 *intspec, *tmp, *addr;
u32 intsize, intlen;
- int res = -EINVAL;
+ int i, res = -EINVAL;
- pr_debug("of_irq_map_one: dev=%s, index=%d\n", device->full_name, index);
+ pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index);
/* OldWorld mac stuff is "special", handle out of line */
if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
- return of_irq_map_oldworld(device, index, out_irq);
+ return of_irq_parse_oldworld(device, index, out_irq);
+
+ /* Get the reg property (if any) */
+ addr = of_get_property(device, "reg", NULL);
/* Get the interrupts property */
intspec = of_get_property(device, "interrupts", &intlen);
- if (intspec == NULL)
- return -EINVAL;
+ if (intspec == NULL) {
+ /* Try the new-style interrupts-extended */
+ res = of_parse_phandle_with_args(device, "interrupts-extended",
+ "#interrupt-cells", index, out_irq);
+ if (res)
+ return -EINVAL;
+ return of_irq_parse_raw(addr, out_irq);
+ }
intlen /= sizeof(*intspec);
pr_debug(" intspec=%d intlen=%d\n", be32_to_cpup(intspec), intlen);
- /* Get the reg property (if any) */
- addr = of_get_property(device, "reg", NULL);
-
/* Look for the interrupt parent. */
p = of_irq_find_parent(device);
if (p == NULL)
@@ -321,14 +332,20 @@ int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq
if ((index + 1) * intsize > intlen)
goto out;
- /* Get new specifier and map it */
- res = of_irq_map_raw(p, intspec + index * intsize, intsize,
- addr, out_irq);
+ /* Copy intspec into irq structure */
+ intspec += index * intsize;
+ out_irq->np = p;
+ out_irq->args_count = intsize;
+ for (i = 0; i < intsize; i++)
+ out_irq->args[i] = be32_to_cpup(intspec++);
+
+ /* Check if there are any interrupt-map translations to process */
+ res = of_irq_parse_raw(addr, out_irq);
out:
of_node_put(p);
return res;
}
-EXPORT_SYMBOL_GPL(of_irq_map_one);
+EXPORT_SYMBOL_GPL(of_irq_parse_one);
/**
* of_irq_to_resource - Decode a node's IRQ and return it as a resource
@@ -354,8 +371,8 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
&name);
r->start = r->end = irq;
- r->flags = IORESOURCE_IRQ;
- r->name = name ? name : dev->full_name;
+ r->flags = IORESOURCE_IRQ | irqd_get_trigger_type(irq_get_irq_data(irq));
+ r->name = name ? name : of_node_full_name(dev);
}
return irq;
@@ -368,9 +385,10 @@ EXPORT_SYMBOL_GPL(of_irq_to_resource);
*/
int of_irq_count(struct device_node *dev)
{
+ struct of_phandle_args irq;
int nr = 0;
- while (of_irq_to_resource(dev, nr, NULL))
+ while (of_irq_parse_one(dev, nr, &irq) == 0)
nr++;
return nr;
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index e5ca00893c0c..848199633798 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -2,7 +2,6 @@
#include <linux/export.h>
#include <linux/of.h>
#include <linux/of_pci.h>
-#include <asm/prom.h>
static inline int __of_pci_pci_compare(struct device_node *node,
unsigned int data)
diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c
index 677053813211..8736bc7676c5 100644
--- a/drivers/of/of_pci_irq.c
+++ b/drivers/of/of_pci_irq.c
@@ -2,10 +2,9 @@
#include <linux/of_pci.h>
#include <linux/of_irq.h>
#include <linux/export.h>
-#include <asm/prom.h>
/**
- * of_irq_map_pci - Resolve the interrupt for a PCI device
+ * of_irq_parse_pci - Resolve the interrupt for a PCI device
* @pdev: the device whose interrupt is to be resolved
* @out_irq: structure of_irq filled by this function
*
@@ -15,7 +14,7 @@
* PCI tree until an device-node is found, at which point it will finish
* resolving using the OF tree walking.
*/
-int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq)
+int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
{
struct device_node *dn, *ppnode;
struct pci_dev *ppdev;
@@ -30,7 +29,7 @@ int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq)
*/
dn = pci_device_to_OF_node(pdev);
if (dn) {
- rc = of_irq_map_one(dn, 0, out_irq);
+ rc = of_irq_parse_one(dn, 0, out_irq);
if (!rc)
return rc;
}
@@ -85,9 +84,37 @@ int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq)
pdev = ppdev;
}
+ out_irq->np = ppnode;
+ out_irq->args_count = 1;
+ out_irq->args[0] = lspec;
lspec_be = cpu_to_be32(lspec);
laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
- laddr[1] = laddr[2] = cpu_to_be32(0);
- return of_irq_map_raw(ppnode, &lspec_be, 1, laddr, out_irq);
+ laddr[1] = laddr[2] = cpu_to_be32(0);
+ return of_irq_parse_raw(laddr, out_irq);
}
-EXPORT_SYMBOL_GPL(of_irq_map_pci);
+EXPORT_SYMBOL_GPL(of_irq_parse_pci);
+
+/**
+ * of_irq_parse_and_map_pci() - Decode a PCI irq from the device tree and map to a virq
+ * @dev: The pci device needing an irq
+ * @slot: PCI slot number; passed when used as map_irq callback. Unused
+ * @pin: PCI irq pin number; passed when used as map_irq callback. Unused
+ *
+ * @slot and @pin are unused, but included in the function so that this
+ * function can be used directly as the map_irq callback to pci_fixup_irqs().
+ */
+int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct of_phandle_args oirq;
+ int ret;
+
+ ret = of_irq_parse_pci(dev, &oirq);
+ if (ret) {
+ dev_err(&dev->dev, "of_irq_parse_pci() failed with rc=%d\n", ret);
+ return 0; /* Proper return code 0 == NO_IRQ */
+ }
+
+ return irq_create_of_mapping(&oirq);
+}
+EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci);
+
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index 4ec19cbee57f..7b666736c168 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -22,7 +22,6 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_pdt.h>
-#include <asm/prom.h>
static struct of_pdt_ops *of_pdt_prom_ops __initdata;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index f6dcde220821..404d1daebefa 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -215,6 +215,8 @@ static struct platform_device *of_platform_device_create_pdata(
dev->archdata.dma_mask = 0xffffffffUL;
#endif
dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ if (!dev->dev.dma_mask)
+ dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
dev->dev.bus = &platform_bus_type;
dev->dev.platform_data = platform_data;
@@ -280,9 +282,6 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
else
of_device_make_bus_id(&dev->dev);
- /* setup amba-specific device info */
- dev->dma_mask = ~0;
-
/* Allow the HW Peripheral ID to be overridden */
prop = of_get_property(node, "arm,primecell-periphid", NULL);
if (prop)
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
index 0eb5c38b4e07..e21012bde639 100644
--- a/drivers/of/selftest.c
+++ b/drivers/of/selftest.c
@@ -9,18 +9,24 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/device.h>
-static bool selftest_passed = true;
+static struct selftest_results {
+ int passed;
+ int failed;
+} selftest_results;
+
#define selftest(result, fmt, ...) { \
if (!(result)) { \
- pr_err("FAIL %s:%i " fmt, __FILE__, __LINE__, ##__VA_ARGS__); \
- selftest_passed = false; \
+ selftest_results.failed++; \
+ pr_err("FAIL %s():%i " fmt, __func__, __LINE__, ##__VA_ARGS__); \
} else { \
- pr_info("pass %s:%i\n", __FILE__, __LINE__); \
+ selftest_results.passed++; \
+ pr_debug("pass %s():%i\n", __func__, __LINE__); \
} \
}
@@ -131,7 +137,6 @@ static void __init of_selftest_property_match_string(void)
struct device_node *np;
int rc;
- pr_info("start\n");
np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
if (!np) {
pr_err("No testcase data in device tree\n");
@@ -154,6 +159,147 @@ static void __init of_selftest_property_match_string(void)
selftest(rc == -EILSEQ, "unterminated string; rc=%i", rc);
}
+static void __init of_selftest_parse_interrupts(void)
+{
+ struct device_node *np;
+ struct of_phandle_args args;
+ int i, rc;
+
+ np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ for (i = 0; i < 4; i++) {
+ bool passed = true;
+ args.args_count = 0;
+ rc = of_irq_parse_one(np, i, &args);
+
+ passed &= !rc;
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == (i + 1));
+
+ selftest(passed, "index %i - data error on node %s rc=%i\n",
+ i, args.np->full_name, rc);
+ }
+ of_node_put(np);
+
+ np = of_find_node_by_path("/testcase-data/interrupts/interrupts1");
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ for (i = 0; i < 4; i++) {
+ bool passed = true;
+ args.args_count = 0;
+ rc = of_irq_parse_one(np, i, &args);
+
+ /* Test the values from tests-phandle.dtsi */
+ switch (i) {
+ case 0:
+ passed &= !rc;
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == 9);
+ break;
+ case 1:
+ passed &= !rc;
+ passed &= (args.args_count == 3);
+ passed &= (args.args[0] == 10);
+ passed &= (args.args[1] == 11);
+ passed &= (args.args[2] == 12);
+ break;
+ case 2:
+ passed &= !rc;
+ passed &= (args.args_count == 2);
+ passed &= (args.args[0] == 13);
+ passed &= (args.args[1] == 14);
+ break;
+ case 3:
+ passed &= !rc;
+ passed &= (args.args_count == 2);
+ passed &= (args.args[0] == 15);
+ passed &= (args.args[1] == 16);
+ break;
+ default:
+ passed = false;
+ }
+ selftest(passed, "index %i - data error on node %s rc=%i\n",
+ i, args.np->full_name, rc);
+ }
+ of_node_put(np);
+}
+
+static void __init of_selftest_parse_interrupts_extended(void)
+{
+ struct device_node *np;
+ struct of_phandle_args args;
+ int i, rc;
+
+ np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ for (i = 0; i < 7; i++) {
+ bool passed = true;
+ rc = of_irq_parse_one(np, i, &args);
+
+ /* Test the values from tests-phandle.dtsi */
+ switch (i) {
+ case 0:
+ passed &= !rc;
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == 1);
+ break;
+ case 1:
+ passed &= !rc;
+ passed &= (args.args_count == 3);
+ passed &= (args.args[0] == 2);
+ passed &= (args.args[1] == 3);
+ passed &= (args.args[2] == 4);
+ break;
+ case 2:
+ passed &= !rc;
+ passed &= (args.args_count == 2);
+ passed &= (args.args[0] == 5);
+ passed &= (args.args[1] == 6);
+ break;
+ case 3:
+ passed &= !rc;
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == 9);
+ break;
+ case 4:
+ passed &= !rc;
+ passed &= (args.args_count == 3);
+ passed &= (args.args[0] == 10);
+ passed &= (args.args[1] == 11);
+ passed &= (args.args[2] == 12);
+ break;
+ case 5:
+ passed &= !rc;
+ passed &= (args.args_count == 2);
+ passed &= (args.args[0] == 13);
+ passed &= (args.args[1] == 14);
+ break;
+ case 6:
+ passed &= !rc;
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == 15);
+ break;
+ default:
+ passed = false;
+ }
+
+ selftest(passed, "index %i - data error on node %s rc=%i\n",
+ i, args.np->full_name, rc);
+ }
+ of_node_put(np);
+}
+
static int __init of_selftest(void)
{
struct device_node *np;
@@ -168,7 +314,10 @@ static int __init of_selftest(void)
pr_info("start of selftest - you will see error messages\n");
of_selftest_parse_phandle_with_args();
of_selftest_property_match_string();
- pr_info("end of selftest - %s\n", selftest_passed ? "PASS" : "FAIL");
+ of_selftest_parse_interrupts();
+ of_selftest_parse_interrupts_extended();
+ pr_info("end of selftest - %i passed, %i failed\n",
+ selftest_results.passed, selftest_results.failed);
return 0;
}
late_initcall(of_selftest);
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 70694ce38be2..f5366850af7a 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -31,13 +31,17 @@ menuconfig PARPORT
If unsure, say Y.
+config ARCH_MIGHT_HAVE_PC_PARPORT
+ bool
+ help
+ Select this config option from the architecture Kconfig if
+ the architecture might have PC parallel port hardware.
+
if PARPORT
config PARPORT_PC
tristate "PC-style hardware"
- depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && !S390 && \
- (!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN && \
- !XTENSA && !CRIS && !H8300
+ depends on ARCH_MIGHT_HAVE_PC_PARPORT
---help---
You should say Y here if you have a PC-style parallel port. All
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 903e1285fda0..963761526229 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2004,6 +2004,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
struct resource *ECR_res = NULL;
struct resource *EPP_res = NULL;
struct platform_device *pdev = NULL;
+ int ret;
if (!dev) {
/* We need a physical device to attach to, but none was
@@ -2014,8 +2015,11 @@ struct parport *parport_pc_probe_port(unsigned long int base,
return NULL;
dev = &pdev->dev;
- dev->coherent_dma_mask = DMA_BIT_MASK(24);
- dev->dma_mask = &dev->coherent_dma_mask;
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(24));
+ if (ret) {
+ dev_err(dev, "Unable to set coherent dma mask: disabling DMA\n");
+ dma = PARPORT_DMA_NONE;
+ }
}
ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 3d9504811126..47d46c6d8468 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -3,7 +3,7 @@ menu "PCI host controller drivers"
config PCI_MVEBU
bool "Marvell EBU PCIe controller"
- depends on ARCH_MVEBU || ARCH_KIRKWOOD
+ depends on ARCH_MVEBU || ARCH_DOVE || ARCH_KIRKWOOD
depends on OF
config PCIE_DW
@@ -15,8 +15,22 @@ config PCI_EXYNOS
select PCIEPORTBUS
select PCIE_DW
+config PCI_IMX6
+ bool "Freescale i.MX6 PCIe controller"
+ depends on SOC_IMX6Q
+ select PCIEPORTBUS
+ select PCIE_DW
+
config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
depends on ARCH_TEGRA
+config PCI_RCAR_GEN2
+ bool "Renesas R-Car Gen2 Internal PCI controller"
+ depends on ARM && (ARCH_R8A7790 || ARCH_R8A7791 || COMPILE_TEST)
+ help
+ Say Y here if you want internal PCI support on R-Car Gen2 SoC.
+ There are 3 internal PCI controllers available with a single
+ built-in EHCI/OHCI host controller present on each one.
+
endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index c9a997b2690d..13fb3333aa05 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -1,4 +1,6 @@
obj-$(CONFIG_PCIE_DW) += pcie-designware.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
+obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
+obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
index 94e096bb2d0a..24beed38ddc7 100644
--- a/drivers/pci/host/pci-exynos.c
+++ b/drivers/pci/host/pci-exynos.c
@@ -48,6 +48,7 @@ struct exynos_pcie {
#define PCIE_IRQ_SPECIAL 0x008
#define PCIE_IRQ_EN_PULSE 0x00c
#define PCIE_IRQ_EN_LEVEL 0x010
+#define IRQ_MSI_ENABLE (0x1 << 2)
#define PCIE_IRQ_EN_SPECIAL 0x014
#define PCIE_PWR_RESET 0x018
#define PCIE_CORE_RESET 0x01c
@@ -77,18 +78,28 @@ struct exynos_pcie {
#define PCIE_PHY_PLL_BIAS 0x00c
#define PCIE_PHY_DCC_FEEDBACK 0x014
#define PCIE_PHY_PLL_DIV_1 0x05c
+#define PCIE_PHY_COMMON_POWER 0x064
+#define PCIE_PHY_COMMON_PD_CMN (0x1 << 3)
#define PCIE_PHY_TRSV0_EMP_LVL 0x084
#define PCIE_PHY_TRSV0_DRV_LVL 0x088
#define PCIE_PHY_TRSV0_RXCDR 0x0ac
+#define PCIE_PHY_TRSV0_POWER 0x0c4
+#define PCIE_PHY_TRSV0_PD_TSV (0x1 << 7)
#define PCIE_PHY_TRSV0_LVCC 0x0dc
#define PCIE_PHY_TRSV1_EMP_LVL 0x144
#define PCIE_PHY_TRSV1_RXCDR 0x16c
+#define PCIE_PHY_TRSV1_POWER 0x184
+#define PCIE_PHY_TRSV1_PD_TSV (0x1 << 7)
#define PCIE_PHY_TRSV1_LVCC 0x19c
#define PCIE_PHY_TRSV2_EMP_LVL 0x204
#define PCIE_PHY_TRSV2_RXCDR 0x22c
+#define PCIE_PHY_TRSV2_POWER 0x244
+#define PCIE_PHY_TRSV2_PD_TSV (0x1 << 7)
#define PCIE_PHY_TRSV2_LVCC 0x25c
#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4
#define PCIE_PHY_TRSV3_RXCDR 0x2ec
+#define PCIE_PHY_TRSV3_POWER 0x304
+#define PCIE_PHY_TRSV3_PD_TSV (0x1 << 7)
#define PCIE_PHY_TRSV3_LVCC 0x31c
static inline void exynos_elb_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
@@ -202,6 +213,58 @@ static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET);
}
+static void exynos_pcie_power_on_phy(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
+ val &= ~PCIE_PHY_COMMON_PD_CMN;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER);
+ val &= ~PCIE_PHY_TRSV0_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER);
+ val &= ~PCIE_PHY_TRSV1_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER);
+ val &= ~PCIE_PHY_TRSV2_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER);
+ val &= ~PCIE_PHY_TRSV3_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
+}
+
+static void exynos_pcie_power_off_phy(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
+ val |= PCIE_PHY_COMMON_PD_CMN;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER);
+ val |= PCIE_PHY_TRSV0_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER);
+ val |= PCIE_PHY_TRSV1_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER);
+ val |= PCIE_PHY_TRSV2_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER);
+ val |= PCIE_PHY_TRSV3_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
+}
+
static void exynos_pcie_init_phy(struct pcie_port *pp)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
@@ -270,6 +333,9 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
/* de-assert phy reset */
exynos_pcie_deassert_phy_reset(pp);
+ /* power on phy */
+ exynos_pcie_power_on_phy(pp);
+
/* initialize phy */
exynos_pcie_init_phy(pp);
@@ -302,6 +368,9 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
PCIE_PHY_PLL_LOCKED);
dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
}
+ /* power off phy */
+ exynos_pcie_power_off_phy(pp);
+
dev_err(pp->dev, "PCIe Link Fail\n");
return -EINVAL;
}
@@ -342,9 +411,36 @@ static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+
+ dw_handle_msi_irq(pp);
+
+ return IRQ_HANDLED;
+}
+
+static void exynos_pcie_msi_init(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ dw_pcie_msi_init(pp);
+
+ /* enable MSI interrupt */
+ val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_EN_LEVEL);
+ val |= IRQ_MSI_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_LEVEL);
+ return;
+}
+
static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
{
exynos_pcie_enable_irq_pulse(pp);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ exynos_pcie_msi_init(pp);
+
return;
}
@@ -430,6 +526,22 @@ static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev)
return ret;
}
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->msi_irq = platform_get_irq(pdev, 0);
+ if (!pp->msi_irq) {
+ dev_err(&pdev->dev, "failed to get msi irq\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ exynos_pcie_msi_irq_handler,
+ IRQF_SHARED, "exynos-pcie", pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request msi irq\n");
+ return ret;
+ }
+ }
+
pp->root_bus_nr = -1;
pp->ops = &exynos_pcie_host_ops;
@@ -487,18 +599,24 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
exynos_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base);
- if (IS_ERR(exynos_pcie->elbi_base))
- return PTR_ERR(exynos_pcie->elbi_base);
+ if (IS_ERR(exynos_pcie->elbi_base)) {
+ ret = PTR_ERR(exynos_pcie->elbi_base);
+ goto fail_bus_clk;
+ }
phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1);
exynos_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
- if (IS_ERR(exynos_pcie->phy_base))
- return PTR_ERR(exynos_pcie->phy_base);
+ if (IS_ERR(exynos_pcie->phy_base)) {
+ ret = PTR_ERR(exynos_pcie->phy_base);
+ goto fail_bus_clk;
+ }
block_base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
exynos_pcie->block_base = devm_ioremap_resource(&pdev->dev, block_base);
- if (IS_ERR(exynos_pcie->block_base))
- return PTR_ERR(exynos_pcie->block_base);
+ if (IS_ERR(exynos_pcie->block_base)) {
+ ret = PTR_ERR(exynos_pcie->block_base);
+ goto fail_bus_clk;
+ }
ret = add_pcie_port(pp, pdev);
if (ret < 0)
@@ -535,7 +653,7 @@ static struct platform_driver exynos_pcie_driver = {
.driver = {
.name = "exynos-pcie",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(exynos_pcie_of_match),
+ .of_match_table = exynos_pcie_of_match,
},
};
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
new file mode 100644
index 000000000000..bd70af8f31ac
--- /dev/null
+++ b/drivers/pci/host/pci-imx6.c
@@ -0,0 +1,568 @@
+/*
+ * PCIe host controller driver for Freescale i.MX6 SoCs
+ *
+ * Copyright (C) 2013 Kosagi
+ * http://www.kosagi.com
+ *
+ * Author: Sean Cross <xobs@kosagi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
+
+struct imx6_pcie {
+ int reset_gpio;
+ int power_on_gpio;
+ int wake_up_gpio;
+ int disable_gpio;
+ struct clk *lvds_gate;
+ struct clk *sata_ref_100m;
+ struct clk *pcie_ref_125m;
+ struct clk *pcie_axi;
+ struct pcie_port pp;
+ struct regmap *iomuxc_gpr;
+ void __iomem *mem_base;
+};
+
+/* PCIe Port Logic registers (memory-mapped) */
+#define PL_OFFSET 0x700
+#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
+#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
+
+#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
+#define PCIE_PHY_CTRL_DATA_LOC 0
+#define PCIE_PHY_CTRL_CAP_ADR_LOC 16
+#define PCIE_PHY_CTRL_CAP_DAT_LOC 17
+#define PCIE_PHY_CTRL_WR_LOC 18
+#define PCIE_PHY_CTRL_RD_LOC 19
+
+#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
+#define PCIE_PHY_STAT_ACK_LOC 16
+
+/* PHY registers (not memory-mapped) */
+#define PCIE_PHY_RX_ASIC_OUT 0x100D
+
+#define PHY_RX_OVRD_IN_LO 0x1005
+#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
+#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
+
+static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
+{
+ u32 val;
+ u32 max_iterations = 10;
+ u32 wait_counter = 0;
+
+ do {
+ val = readl(dbi_base + PCIE_PHY_STAT);
+ val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
+ wait_counter++;
+
+ if (val == exp_val)
+ return 0;
+
+ udelay(1);
+ } while (wait_counter < max_iterations);
+
+ return -ETIMEDOUT;
+}
+
+static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
+{
+ u32 val;
+ int ret;
+
+ val = addr << PCIE_PHY_CTRL_DATA_LOC;
+ writel(val, dbi_base + PCIE_PHY_CTRL);
+
+ val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
+ writel(val, dbi_base + PCIE_PHY_CTRL);
+
+ ret = pcie_phy_poll_ack(dbi_base, 1);
+ if (ret)
+ return ret;
+
+ val = addr << PCIE_PHY_CTRL_DATA_LOC;
+ writel(val, dbi_base + PCIE_PHY_CTRL);
+
+ ret = pcie_phy_poll_ack(dbi_base, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
+static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data)
+{
+ u32 val, phy_ctl;
+ int ret;
+
+ ret = pcie_phy_wait_ack(dbi_base, addr);
+ if (ret)
+ return ret;
+
+ /* assert Read signal */
+ phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
+ writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
+
+ ret = pcie_phy_poll_ack(dbi_base, 1);
+ if (ret)
+ return ret;
+
+ val = readl(dbi_base + PCIE_PHY_STAT);
+ *data = val & 0xffff;
+
+ /* deassert Read signal */
+ writel(0x00, dbi_base + PCIE_PHY_CTRL);
+
+ ret = pcie_phy_poll_ack(dbi_base, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
+{
+ u32 var;
+ int ret;
+
+ /* write addr */
+ /* cap addr */
+ ret = pcie_phy_wait_ack(dbi_base, addr);
+ if (ret)
+ return ret;
+
+ var = data << PCIE_PHY_CTRL_DATA_LOC;
+ writel(var, dbi_base + PCIE_PHY_CTRL);
+
+ /* capture data */
+ var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
+ writel(var, dbi_base + PCIE_PHY_CTRL);
+
+ ret = pcie_phy_poll_ack(dbi_base, 1);
+ if (ret)
+ return ret;
+
+ /* deassert cap data */
+ var = data << PCIE_PHY_CTRL_DATA_LOC;
+ writel(var, dbi_base + PCIE_PHY_CTRL);
+
+ /* wait for ack de-assertion */
+ ret = pcie_phy_poll_ack(dbi_base, 0);
+ if (ret)
+ return ret;
+
+ /* assert wr signal */
+ var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
+ writel(var, dbi_base + PCIE_PHY_CTRL);
+
+ /* wait for ack */
+ ret = pcie_phy_poll_ack(dbi_base, 1);
+ if (ret)
+ return ret;
+
+ /* deassert wr signal */
+ var = data << PCIE_PHY_CTRL_DATA_LOC;
+ writel(var, dbi_base + PCIE_PHY_CTRL);
+
+ /* wait for ack de-assertion */
+ ret = pcie_phy_poll_ack(dbi_base, 0);
+ if (ret)
+ return ret;
+
+ writel(0x0, dbi_base + PCIE_PHY_CTRL);
+
+ return 0;
+}
+
+/* Added for PCI abort handling */
+static int imx6q_pcie_abort_handler(unsigned long addr,
+ unsigned int fsr, struct pt_regs *regs)
+{
+ return 0;
+}
+
+static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
+{
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
+
+ gpio_set_value(imx6_pcie->reset_gpio, 0);
+ msleep(100);
+ gpio_set_value(imx6_pcie->reset_gpio, 1);
+
+ return 0;
+}
+
+static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
+{
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+ int ret;
+
+ if (gpio_is_valid(imx6_pcie->power_on_gpio))
+ gpio_set_value(imx6_pcie->power_on_gpio, 1);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
+
+ ret = clk_prepare_enable(imx6_pcie->sata_ref_100m);
+ if (ret) {
+ dev_err(pp->dev, "unable to enable sata_ref_100m\n");
+ goto err_sata_ref;
+ }
+
+ ret = clk_prepare_enable(imx6_pcie->pcie_ref_125m);
+ if (ret) {
+ dev_err(pp->dev, "unable to enable pcie_ref_125m\n");
+ goto err_pcie_ref;
+ }
+
+ ret = clk_prepare_enable(imx6_pcie->lvds_gate);
+ if (ret) {
+ dev_err(pp->dev, "unable to enable lvds_gate\n");
+ goto err_lvds_gate;
+ }
+
+ ret = clk_prepare_enable(imx6_pcie->pcie_axi);
+ if (ret) {
+ dev_err(pp->dev, "unable to enable pcie_axi\n");
+ goto err_pcie_axi;
+ }
+
+ /* allow the clocks to stabilize */
+ usleep_range(200, 500);
+
+ return 0;
+
+err_pcie_axi:
+ clk_disable_unprepare(imx6_pcie->lvds_gate);
+err_lvds_gate:
+ clk_disable_unprepare(imx6_pcie->pcie_ref_125m);
+err_pcie_ref:
+ clk_disable_unprepare(imx6_pcie->sata_ref_100m);
+err_sata_ref:
+ return ret;
+
+}
+
+static void imx6_pcie_init_phy(struct pcie_port *pp)
+{
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+
+ /* configure constant input signal to the pcie ctrl and phy */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN1, 0 << 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 0 << 6);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 20 << 12);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_FULL, 127 << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_LOW, 127 << 25);
+}
+
+static void imx6_pcie_host_init(struct pcie_port *pp)
+{
+ int count = 0;
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+
+ imx6_pcie_assert_core_reset(pp);
+
+ imx6_pcie_init_phy(pp);
+
+ imx6_pcie_deassert_core_reset(pp);
+
+ dw_pcie_setup_rc(pp);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
+
+ while (!dw_pcie_link_up(pp)) {
+ usleep_range(100, 1000);
+ count++;
+ if (count >= 200) {
+ dev_err(pp->dev, "phy link never came up\n");
+ dev_dbg(pp->dev,
+ "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
+ readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
+ readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
+ break;
+ }
+ }
+
+ return;
+}
+
+static int imx6_pcie_link_up(struct pcie_port *pp)
+{
+ u32 rc, ltssm, rx_valid, temp;
+
+ /* link is debug bit 36, debug register 1 starts at bit 32 */
+ rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32));
+ if (rc)
+ return -EAGAIN;
+
+ /*
+ * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
+ * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
+ * If (MAC/LTSSM.state == Recovery.RcvrLock)
+ * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition
+ * to gen2 is stuck
+ */
+ pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
+ ltssm = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0) & 0x3F;
+
+ if (rx_valid & 0x01)
+ return 0;
+
+ if (ltssm != 0x0d)
+ return 0;
+
+ dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
+
+ pcie_phy_read(pp->dbi_base,
+ PHY_RX_OVRD_IN_LO, &temp);
+ temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN
+ | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+ pcie_phy_write(pp->dbi_base,
+ PHY_RX_OVRD_IN_LO, temp);
+
+ usleep_range(2000, 3000);
+
+ pcie_phy_read(pp->dbi_base,
+ PHY_RX_OVRD_IN_LO, &temp);
+ temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN
+ | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+ pcie_phy_write(pp->dbi_base,
+ PHY_RX_OVRD_IN_LO, temp);
+
+ return 0;
+}
+
+static struct pcie_host_ops imx6_pcie_host_ops = {
+ .link_up = imx6_pcie_link_up,
+ .host_init = imx6_pcie_host_init,
+};
+
+static int imx6_add_pcie_port(struct pcie_port *pp,
+ struct platform_device *pdev)
+{
+ int ret;
+
+ pp->irq = platform_get_irq(pdev, 0);
+ if (!pp->irq) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return -ENODEV;
+ }
+
+ pp->root_bus_nr = -1;
+ pp->ops = &imx6_pcie_host_ops;
+
+ spin_lock_init(&pp->conf_lock);
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init imx6_pcie_probe(struct platform_device *pdev)
+{
+ struct imx6_pcie *imx6_pcie;
+ struct pcie_port *pp;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *dbi_base;
+ int ret;
+
+ imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
+ if (!imx6_pcie)
+ return -ENOMEM;
+
+ pp = &imx6_pcie->pp;
+ pp->dev = &pdev->dev;
+
+ /* Added for PCI abort handling */
+ hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
+ "imprecise external abort");
+
+ dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!dbi_base) {
+ dev_err(&pdev->dev, "dbi_base memory resource not found\n");
+ return -ENODEV;
+ }
+
+ pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
+ if (IS_ERR(pp->dbi_base)) {
+ ret = PTR_ERR(pp->dbi_base);
+ goto err;
+ }
+
+ /* Fetch GPIOs */
+ imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+ if (!gpio_is_valid(imx6_pcie->reset_gpio)) {
+ dev_err(&pdev->dev, "no reset-gpio defined\n");
+ ret = -ENODEV;
+ }
+ ret = devm_gpio_request_one(&pdev->dev,
+ imx6_pcie->reset_gpio,
+ GPIOF_OUT_INIT_LOW,
+ "PCIe reset");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get reset gpio\n");
+ goto err;
+ }
+
+ imx6_pcie->power_on_gpio = of_get_named_gpio(np, "power-on-gpio", 0);
+ if (gpio_is_valid(imx6_pcie->power_on_gpio)) {
+ ret = devm_gpio_request_one(&pdev->dev,
+ imx6_pcie->power_on_gpio,
+ GPIOF_OUT_INIT_LOW,
+ "PCIe power enable");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get power-on gpio\n");
+ goto err;
+ }
+ }
+
+ imx6_pcie->wake_up_gpio = of_get_named_gpio(np, "wake-up-gpio", 0);
+ if (gpio_is_valid(imx6_pcie->wake_up_gpio)) {
+ ret = devm_gpio_request_one(&pdev->dev,
+ imx6_pcie->wake_up_gpio,
+ GPIOF_IN,
+ "PCIe wake up");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get wake-up gpio\n");
+ goto err;
+ }
+ }
+
+ imx6_pcie->disable_gpio = of_get_named_gpio(np, "disable-gpio", 0);
+ if (gpio_is_valid(imx6_pcie->disable_gpio)) {
+ ret = devm_gpio_request_one(&pdev->dev,
+ imx6_pcie->disable_gpio,
+ GPIOF_OUT_INIT_HIGH,
+ "PCIe disable endpoint");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get disable-ep gpio\n");
+ goto err;
+ }
+ }
+
+ /* Fetch clocks */
+ imx6_pcie->lvds_gate = devm_clk_get(&pdev->dev, "lvds_gate");
+ if (IS_ERR(imx6_pcie->lvds_gate)) {
+ dev_err(&pdev->dev,
+ "lvds_gate clock select missing or invalid\n");
+ ret = PTR_ERR(imx6_pcie->lvds_gate);
+ goto err;
+ }
+
+ imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m");
+ if (IS_ERR(imx6_pcie->sata_ref_100m)) {
+ dev_err(&pdev->dev,
+ "sata_ref_100m clock source missing or invalid\n");
+ ret = PTR_ERR(imx6_pcie->sata_ref_100m);
+ goto err;
+ }
+
+ imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m");
+ if (IS_ERR(imx6_pcie->pcie_ref_125m)) {
+ dev_err(&pdev->dev,
+ "pcie_ref_125m clock source missing or invalid\n");
+ ret = PTR_ERR(imx6_pcie->pcie_ref_125m);
+ goto err;
+ }
+
+ imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi");
+ if (IS_ERR(imx6_pcie->pcie_axi)) {
+ dev_err(&pdev->dev,
+ "pcie_axi clock source missing or invalid\n");
+ ret = PTR_ERR(imx6_pcie->pcie_axi);
+ goto err;
+ }
+
+ /* Grab GPR config register range */
+ imx6_pcie->iomuxc_gpr =
+ syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+ if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
+ dev_err(&pdev->dev, "unable to find iomuxc registers\n");
+ ret = PTR_ERR(imx6_pcie->iomuxc_gpr);
+ goto err;
+ }
+
+ ret = imx6_add_pcie_port(pp, pdev);
+ if (ret < 0)
+ goto err;
+
+ platform_set_drvdata(pdev, imx6_pcie);
+ return 0;
+
+err:
+ return ret;
+}
+
+static const struct of_device_id imx6_pcie_of_match[] = {
+ { .compatible = "fsl,imx6q-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);
+
+static struct platform_driver imx6_pcie_driver = {
+ .driver = {
+ .name = "imx6q-pcie",
+ .owner = THIS_MODULE,
+ .of_match_table = imx6_pcie_of_match,
+ },
+};
+
+/* Freescale PCIe driver does not allow module unload */
+
+static int __init imx6_pcie_init(void)
+{
+ return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
+}
+fs_initcall(imx6_pcie_init);
+
+MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
+MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 729d5a101d62..c269e430c760 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -9,13 +9,17 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/mbus.h>
+#include <linux/msi.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
-#include <linux/of_pci.h>
#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/of_pci.h>
#include <linux/of_platform.h>
/*
@@ -103,6 +107,7 @@ struct mvebu_pcie_port;
struct mvebu_pcie {
struct platform_device *pdev;
struct mvebu_pcie_port *ports;
+ struct msi_chip *msi;
struct resource io;
struct resource realio;
struct resource mem;
@@ -115,7 +120,6 @@ struct mvebu_pcie_port {
char *name;
void __iomem *base;
spinlock_t conf_lock;
- int haslink;
u32 port;
u32 lane;
int devfn;
@@ -124,6 +128,9 @@ struct mvebu_pcie_port {
unsigned int io_target;
unsigned int io_attr;
struct clk *clk;
+ int reset_gpio;
+ int reset_active_low;
+ char *reset_name;
struct mvebu_sw_pci_bridge bridge;
struct device_node *dn;
struct mvebu_pcie *pcie;
@@ -133,29 +140,39 @@ struct mvebu_pcie_port {
size_t iowin_size;
};
+static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
+{
+ writel(val, port->base + reg);
+}
+
+static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg)
+{
+ return readl(port->base + reg);
+}
+
static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
{
- return !(readl(port->base + PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
+ return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
}
static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
{
u32 stat;
- stat = readl(port->base + PCIE_STAT_OFF);
+ stat = mvebu_readl(port, PCIE_STAT_OFF);
stat &= ~PCIE_STAT_BUS;
stat |= nr << 8;
- writel(stat, port->base + PCIE_STAT_OFF);
+ mvebu_writel(port, stat, PCIE_STAT_OFF);
}
static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
{
u32 stat;
- stat = readl(port->base + PCIE_STAT_OFF);
+ stat = mvebu_readl(port, PCIE_STAT_OFF);
stat &= ~PCIE_STAT_DEV;
stat |= nr << 16;
- writel(stat, port->base + PCIE_STAT_OFF);
+ mvebu_writel(port, stat, PCIE_STAT_OFF);
}
/*
@@ -163,7 +180,7 @@ static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
* BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks
* WIN[0-3] -> DRAM bank[0-3]
*/
-static void __init mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
+static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
{
const struct mbus_dram_target_info *dram;
u32 size;
@@ -173,33 +190,34 @@ static void __init mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
/* First, disable and clear BARs and windows. */
for (i = 1; i < 3; i++) {
- writel(0, port->base + PCIE_BAR_CTRL_OFF(i));
- writel(0, port->base + PCIE_BAR_LO_OFF(i));
- writel(0, port->base + PCIE_BAR_HI_OFF(i));
+ mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i));
+ mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i));
+ mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i));
}
for (i = 0; i < 5; i++) {
- writel(0, port->base + PCIE_WIN04_CTRL_OFF(i));
- writel(0, port->base + PCIE_WIN04_BASE_OFF(i));
- writel(0, port->base + PCIE_WIN04_REMAP_OFF(i));
+ mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i));
+ mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i));
+ mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
}
- writel(0, port->base + PCIE_WIN5_CTRL_OFF);
- writel(0, port->base + PCIE_WIN5_BASE_OFF);
- writel(0, port->base + PCIE_WIN5_REMAP_OFF);
+ mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF);
+ mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF);
+ mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF);
/* Setup windows for DDR banks. Count total DDR size on the fly. */
size = 0;
for (i = 0; i < dram->num_cs; i++) {
const struct mbus_dram_window *cs = dram->cs + i;
- writel(cs->base & 0xffff0000,
- port->base + PCIE_WIN04_BASE_OFF(i));
- writel(0, port->base + PCIE_WIN04_REMAP_OFF(i));
- writel(((cs->size - 1) & 0xffff0000) |
- (cs->mbus_attr << 8) |
- (dram->mbus_dram_target_id << 4) | 1,
- port->base + PCIE_WIN04_CTRL_OFF(i));
+ mvebu_writel(port, cs->base & 0xffff0000,
+ PCIE_WIN04_BASE_OFF(i));
+ mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
+ mvebu_writel(port,
+ ((cs->size - 1) & 0xffff0000) |
+ (cs->mbus_attr << 8) |
+ (dram->mbus_dram_target_id << 4) | 1,
+ PCIE_WIN04_CTRL_OFF(i));
size += cs->size;
}
@@ -209,41 +227,40 @@ static void __init mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
size = 1 << fls(size);
/* Setup BAR[1] to all DRAM banks. */
- writel(dram->cs[0].base, port->base + PCIE_BAR_LO_OFF(1));
- writel(0, port->base + PCIE_BAR_HI_OFF(1));
- writel(((size - 1) & 0xffff0000) | 1,
- port->base + PCIE_BAR_CTRL_OFF(1));
+ mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1));
+ mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1));
+ mvebu_writel(port, ((size - 1) & 0xffff0000) | 1,
+ PCIE_BAR_CTRL_OFF(1));
}
-static void __init mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
+static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
{
- u16 cmd;
- u32 mask;
+ u32 cmd, mask;
/* Point PCIe unit MBUS decode windows to DRAM space. */
mvebu_pcie_setup_wins(port);
/* Master + slave enable. */
- cmd = readw(port->base + PCIE_CMD_OFF);
+ cmd = mvebu_readl(port, PCIE_CMD_OFF);
cmd |= PCI_COMMAND_IO;
cmd |= PCI_COMMAND_MEMORY;
cmd |= PCI_COMMAND_MASTER;
- writew(cmd, port->base + PCIE_CMD_OFF);
+ mvebu_writel(port, cmd, PCIE_CMD_OFF);
/* Enable interrupt lines A-D. */
- mask = readl(port->base + PCIE_MASK_OFF);
+ mask = mvebu_readl(port, PCIE_MASK_OFF);
mask |= PCIE_MASK_ENABLE_INTS;
- writel(mask, port->base + PCIE_MASK_OFF);
+ mvebu_writel(port, mask, PCIE_MASK_OFF);
}
static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port,
struct pci_bus *bus,
u32 devfn, int where, int size, u32 *val)
{
- writel(PCIE_CONF_ADDR(bus->number, devfn, where),
- port->base + PCIE_CONF_ADDR_OFF);
+ mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
+ PCIE_CONF_ADDR_OFF);
- *val = readl(port->base + PCIE_CONF_DATA_OFF);
+ *val = mvebu_readl(port, PCIE_CONF_DATA_OFF);
if (size == 1)
*val = (*val >> (8 * (where & 3))) & 0xff;
@@ -257,21 +274,24 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
struct pci_bus *bus,
u32 devfn, int where, int size, u32 val)
{
- int ret = PCIBIOS_SUCCESSFUL;
+ u32 _val, shift = 8 * (where & 3);
- writel(PCIE_CONF_ADDR(bus->number, devfn, where),
- port->base + PCIE_CONF_ADDR_OFF);
+ mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
+ PCIE_CONF_ADDR_OFF);
+ _val = mvebu_readl(port, PCIE_CONF_DATA_OFF);
if (size == 4)
- writel(val, port->base + PCIE_CONF_DATA_OFF);
+ _val = val;
else if (size == 2)
- writew(val, port->base + PCIE_CONF_DATA_OFF + (where & 3));
+ _val = (_val & ~(0xffff << shift)) | ((val & 0xffff) << shift);
else if (size == 1)
- writeb(val, port->base + PCIE_CONF_DATA_OFF + (where & 3));
+ _val = (_val & ~(0xff << shift)) | ((val & 0xff) << shift);
else
- ret = PCIBIOS_BAD_REGISTER_NUMBER;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
- return ret;
+ mvebu_writel(port, _val, PCIE_CONF_DATA_OFF);
+
+ return PCIBIOS_SUCCESSFUL;
}
static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
@@ -552,7 +572,7 @@ static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
if (bus->number == 0)
return mvebu_sw_pci_bridge_write(port, where, size, val);
- if (!port->haslink)
+ if (!mvebu_pcie_link_up(port))
return PCIBIOS_DEVICE_NOT_FOUND;
/*
@@ -594,7 +614,7 @@ static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
if (bus->number == 0)
return mvebu_sw_pci_bridge_read(port, where, size, val);
- if (!port->haslink) {
+ if (!mvebu_pcie_link_up(port)) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
@@ -626,7 +646,7 @@ static struct pci_ops mvebu_pcie_ops = {
.write = mvebu_pcie_wr_conf,
};
-static int __init mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
+static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
{
struct mvebu_pcie *pcie = sys_to_pcie(sys);
int i;
@@ -645,19 +665,6 @@ static int __init mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
return 1;
}
-static int __init mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- struct of_irq oirq;
- int ret;
-
- ret = of_irq_map_pci(dev, &oirq);
- if (ret)
- return ret;
-
- return irq_create_of_mapping(oirq.controller, oirq.specifier,
- oirq.size);
-}
-
static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys)
{
struct mvebu_pcie *pcie = sys_to_pcie(sys);
@@ -673,11 +680,17 @@ static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys)
return bus;
}
-resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
- const struct resource *res,
- resource_size_t start,
- resource_size_t size,
- resource_size_t align)
+static void mvebu_pcie_add_bus(struct pci_bus *bus)
+{
+ struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
+ bus->msi = pcie->msi;
+}
+
+static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
+ const struct resource *res,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t align)
{
if (dev->bus->number != 0)
return start;
@@ -696,7 +709,7 @@ resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
return start;
}
-static void __init mvebu_pcie_enable(struct mvebu_pcie *pcie)
+static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
{
struct hw_pci hw;
@@ -706,9 +719,10 @@ static void __init mvebu_pcie_enable(struct mvebu_pcie *pcie)
hw.private_data = (void **)&pcie;
hw.setup = mvebu_pcie_setup;
hw.scan = mvebu_pcie_scan_bus;
- hw.map_irq = mvebu_pcie_map_irq;
+ hw.map_irq = of_irq_parse_and_map_pci;
hw.ops = &mvebu_pcie_ops;
hw.align_resource = mvebu_pcie_align_resource;
+ hw.add_bus = mvebu_pcie_add_bus;
pci_common_init(&hw);
}
@@ -718,10 +732,8 @@ static void __init mvebu_pcie_enable(struct mvebu_pcie *pcie)
* <...> property for one that matches the given port/lane. Once
* found, maps it.
*/
-static void __iomem * __init
-mvebu_pcie_map_registers(struct platform_device *pdev,
- struct device_node *np,
- struct mvebu_pcie_port *port)
+static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
+ struct device_node *np, struct mvebu_pcie_port *port)
{
struct resource regs;
int ret = 0;
@@ -777,7 +789,22 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
return -ENOENT;
}
-static int __init mvebu_pcie_probe(struct platform_device *pdev)
+static void mvebu_pcie_msi_enable(struct mvebu_pcie *pcie)
+{
+ struct device_node *msi_node;
+
+ msi_node = of_parse_phandle(pcie->pdev->dev.of_node,
+ "msi-parent", 0);
+ if (!msi_node)
+ return;
+
+ pcie->msi = of_pci_find_msi_chip_by_node(msi_node);
+
+ if (pcie->msi)
+ pcie->msi->dev = &pcie->pdev->dev;
+}
+
+static int mvebu_pcie_probe(struct platform_device *pdev)
{
struct mvebu_pcie *pcie;
struct device_node *np = pdev->dev.of_node;
@@ -790,6 +817,7 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev)
return -ENOMEM;
pcie->pdev = pdev;
+ platform_set_drvdata(pdev, pcie);
/* Get the PCIe memory and I/O aperture */
mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
@@ -818,13 +846,14 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev)
return ret;
}
+ i = 0;
for_each_child_of_node(pdev->dev.of_node, child) {
if (!of_device_is_available(child))
continue;
- pcie->nports++;
+ i++;
}
- pcie->ports = devm_kzalloc(&pdev->dev, pcie->nports *
+ pcie->ports = devm_kzalloc(&pdev->dev, i *
sizeof(struct mvebu_pcie_port),
GFP_KERNEL);
if (!pcie->ports)
@@ -833,6 +862,7 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev)
i = 0;
for_each_child_of_node(pdev->dev.of_node, child) {
struct mvebu_pcie_port *port = &pcie->ports[i];
+ enum of_gpio_flags flags;
if (!of_device_is_available(child))
continue;
@@ -873,45 +903,68 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev)
continue;
}
+ port->reset_gpio = of_get_named_gpio_flags(child,
+ "reset-gpios", 0, &flags);
+ if (gpio_is_valid(port->reset_gpio)) {
+ u32 reset_udelay = 20000;
+
+ port->reset_active_low = flags & OF_GPIO_ACTIVE_LOW;
+ port->reset_name = kasprintf(GFP_KERNEL,
+ "pcie%d.%d-reset", port->port, port->lane);
+ of_property_read_u32(child, "reset-delay-us",
+ &reset_udelay);
+
+ ret = devm_gpio_request_one(&pdev->dev,
+ port->reset_gpio, GPIOF_DIR_OUT, port->reset_name);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ continue;
+ }
+
+ gpio_set_value(port->reset_gpio,
+ (port->reset_active_low) ? 1 : 0);
+ msleep(reset_udelay/1000);
+ }
+
+ port->clk = of_clk_get_by_name(child, NULL);
+ if (IS_ERR(port->clk)) {
+ dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n",
+ port->port, port->lane);
+ continue;
+ }
+
+ ret = clk_prepare_enable(port->clk);
+ if (ret)
+ continue;
+
port->base = mvebu_pcie_map_registers(pdev, child, port);
if (IS_ERR(port->base)) {
dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n",
port->port, port->lane);
port->base = NULL;
+ clk_disable_unprepare(port->clk);
continue;
}
mvebu_pcie_set_local_dev_nr(port, 1);
- if (mvebu_pcie_link_up(port)) {
- port->haslink = 1;
- dev_info(&pdev->dev, "PCIe%d.%d: link up\n",
- port->port, port->lane);
- } else {
- port->haslink = 0;
- dev_info(&pdev->dev, "PCIe%d.%d: link down\n",
- port->port, port->lane);
- }
-
port->clk = of_clk_get_by_name(child, NULL);
if (IS_ERR(port->clk)) {
dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n",
port->port, port->lane);
iounmap(port->base);
- port->haslink = 0;
continue;
}
port->dn = child;
-
- clk_prepare_enable(port->clk);
spin_lock_init(&port->conf_lock);
-
mvebu_sw_pci_bridge_init(port);
-
i++;
}
+ pcie->nports = i;
+ mvebu_pcie_msi_enable(pcie);
mvebu_pcie_enable(pcie);
return 0;
@@ -920,6 +973,7 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev)
static const struct of_device_id mvebu_pcie_of_match_table[] = {
{ .compatible = "marvell,armada-xp-pcie", },
{ .compatible = "marvell,armada-370-pcie", },
+ { .compatible = "marvell,dove-pcie", },
{ .compatible = "marvell,kirkwood-pcie", },
{},
};
@@ -931,16 +985,12 @@ static struct platform_driver mvebu_pcie_driver = {
.name = "mvebu-pcie",
.of_match_table =
of_match_ptr(mvebu_pcie_of_match_table),
+ /* driver unloading/unbinding currently not supported */
+ .suppress_bind_attrs = true,
},
+ .probe = mvebu_pcie_probe,
};
-
-static int __init mvebu_pcie_init(void)
-{
- return platform_driver_probe(&mvebu_pcie_driver,
- mvebu_pcie_probe);
-}
-
-subsys_initcall(mvebu_pcie_init);
+module_platform_driver(mvebu_pcie_driver);
MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
MODULE_DESCRIPTION("Marvell EBU PCIe driver");
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
new file mode 100644
index 000000000000..cbaa5c4397e3
--- /dev/null
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -0,0 +1,333 @@
+/*
+ * pci-rcar-gen2: internal PCI bus support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* AHB-PCI Bridge PCI communication registers */
+#define RCAR_AHBPCI_PCICOM_OFFSET 0x800
+
+#define RCAR_PCIAHB_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x00)
+#define RCAR_PCIAHB_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x04)
+#define RCAR_PCIAHB_PREFETCH0 0x0
+#define RCAR_PCIAHB_PREFETCH4 0x1
+#define RCAR_PCIAHB_PREFETCH8 0x2
+#define RCAR_PCIAHB_PREFETCH16 0x3
+
+#define RCAR_AHBPCI_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x10)
+#define RCAR_AHBPCI_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x14)
+#define RCAR_AHBPCI_WIN_CTR_MEM (3 << 1)
+#define RCAR_AHBPCI_WIN_CTR_CFG (5 << 1)
+#define RCAR_AHBPCI_WIN1_HOST (1 << 30)
+#define RCAR_AHBPCI_WIN1_DEVICE (1 << 31)
+
+#define RCAR_PCI_INT_ENABLE_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x20)
+#define RCAR_PCI_INT_STATUS_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x24)
+#define RCAR_PCI_INT_A (1 << 16)
+#define RCAR_PCI_INT_B (1 << 17)
+#define RCAR_PCI_INT_PME (1 << 19)
+
+#define RCAR_AHB_BUS_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x30)
+#define RCAR_AHB_BUS_MMODE_HTRANS (1 << 0)
+#define RCAR_AHB_BUS_MMODE_BYTE_BURST (1 << 1)
+#define RCAR_AHB_BUS_MMODE_WR_INCR (1 << 2)
+#define RCAR_AHB_BUS_MMODE_HBUS_REQ (1 << 7)
+#define RCAR_AHB_BUS_SMODE_READYCTR (1 << 17)
+#define RCAR_AHB_BUS_MODE (RCAR_AHB_BUS_MMODE_HTRANS | \
+ RCAR_AHB_BUS_MMODE_BYTE_BURST | \
+ RCAR_AHB_BUS_MMODE_WR_INCR | \
+ RCAR_AHB_BUS_MMODE_HBUS_REQ | \
+ RCAR_AHB_BUS_SMODE_READYCTR)
+
+#define RCAR_USBCTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x34)
+#define RCAR_USBCTR_USBH_RST (1 << 0)
+#define RCAR_USBCTR_PCICLK_MASK (1 << 1)
+#define RCAR_USBCTR_PLL_RST (1 << 2)
+#define RCAR_USBCTR_DIRPD (1 << 8)
+#define RCAR_USBCTR_PCIAHB_WIN2_EN (1 << 9)
+#define RCAR_USBCTR_PCIAHB_WIN1_256M (0 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_512M (1 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_1G (2 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_2G (3 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_MASK (3 << 10)
+
+#define RCAR_PCI_ARBITER_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x40)
+#define RCAR_PCI_ARBITER_PCIREQ0 (1 << 0)
+#define RCAR_PCI_ARBITER_PCIREQ1 (1 << 1)
+#define RCAR_PCI_ARBITER_PCIBP_MODE (1 << 12)
+
+#define RCAR_PCI_UNIT_REV_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x48)
+
+/* Number of internal PCI controllers */
+#define RCAR_PCI_NR_CONTROLLERS 3
+
+struct rcar_pci_priv {
+ void __iomem *reg;
+ struct resource io_res;
+ struct resource mem_res;
+ struct resource *cfg_res;
+ int irq;
+};
+
+/* PCI configuration space operations */
+static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct pci_sys_data *sys = bus->sysdata;
+ struct rcar_pci_priv *priv = sys->private_data;
+ int slot, val;
+
+ if (sys->busnr != bus->number || PCI_FUNC(devfn))
+ return NULL;
+
+ /* Only one EHCI/OHCI device built-in */
+ slot = PCI_SLOT(devfn);
+ if (slot > 2)
+ return NULL;
+
+ val = slot ? RCAR_AHBPCI_WIN1_DEVICE | RCAR_AHBPCI_WIN_CTR_CFG :
+ RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG;
+
+ iowrite32(val, priv->reg + RCAR_AHBPCI_WIN1_CTR_REG);
+ return priv->reg + (slot >> 1) * 0x100 + where;
+}
+
+static int rcar_pci_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where);
+
+ if (!reg)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ *val = ioread8(reg);
+ break;
+ case 2:
+ *val = ioread16(reg);
+ break;
+ default:
+ *val = ioread32(reg);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rcar_pci_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where);
+
+ if (!reg)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ iowrite8(val, reg);
+ break;
+ case 2:
+ iowrite16(val, reg);
+ break;
+ default:
+ iowrite32(val, reg);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/* PCI interrupt mapping */
+static int __init rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct pci_sys_data *sys = dev->bus->sysdata;
+ struct rcar_pci_priv *priv = sys->private_data;
+
+ return priv->irq;
+}
+
+/* PCI host controller setup */
+static int __init rcar_pci_setup(int nr, struct pci_sys_data *sys)
+{
+ struct rcar_pci_priv *priv = sys->private_data;
+ void __iomem *reg = priv->reg;
+ u32 val;
+
+ val = ioread32(reg + RCAR_PCI_UNIT_REV_REG);
+ pr_info("PCI: bus%u revision %x\n", sys->busnr, val);
+
+ /* Disable Direct Power Down State and assert reset */
+ val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD;
+ val |= RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST;
+ iowrite32(val, reg + RCAR_USBCTR_REG);
+ udelay(4);
+
+ /* De-assert reset and set PCIAHB window1 size to 1GB */
+ val &= ~(RCAR_USBCTR_PCIAHB_WIN1_MASK | RCAR_USBCTR_PCICLK_MASK |
+ RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST);
+ iowrite32(val | RCAR_USBCTR_PCIAHB_WIN1_1G, reg + RCAR_USBCTR_REG);
+
+ /* Configure AHB master and slave modes */
+ iowrite32(RCAR_AHB_BUS_MODE, reg + RCAR_AHB_BUS_CTR_REG);
+
+ /* Configure PCI arbiter */
+ val = ioread32(reg + RCAR_PCI_ARBITER_CTR_REG);
+ val |= RCAR_PCI_ARBITER_PCIREQ0 | RCAR_PCI_ARBITER_PCIREQ1 |
+ RCAR_PCI_ARBITER_PCIBP_MODE;
+ iowrite32(val, reg + RCAR_PCI_ARBITER_CTR_REG);
+
+ /* PCI-AHB mapping: 0x40000000-0x80000000 */
+ iowrite32(0x40000000 | RCAR_PCIAHB_PREFETCH16,
+ reg + RCAR_PCIAHB_WIN1_CTR_REG);
+
+ /* AHB-PCI mapping: OHCI/EHCI registers */
+ val = priv->mem_res.start | RCAR_AHBPCI_WIN_CTR_MEM;
+ iowrite32(val, reg + RCAR_AHBPCI_WIN2_CTR_REG);
+
+ /* Enable AHB-PCI bridge PCI configuration access */
+ iowrite32(RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG,
+ reg + RCAR_AHBPCI_WIN1_CTR_REG);
+ /* Set PCI-AHB Window1 address */
+ iowrite32(0x40000000 | PCI_BASE_ADDRESS_MEM_PREFETCH,
+ reg + PCI_BASE_ADDRESS_1);
+ /* Set AHB-PCI bridge PCI communication area address */
+ val = priv->cfg_res->start + RCAR_AHBPCI_PCICOM_OFFSET;
+ iowrite32(val, reg + PCI_BASE_ADDRESS_0);
+
+ val = ioread32(reg + PCI_COMMAND);
+ val |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
+ PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+ iowrite32(val, reg + PCI_COMMAND);
+
+ /* Enable PCI interrupts */
+ iowrite32(RCAR_PCI_INT_A | RCAR_PCI_INT_B | RCAR_PCI_INT_PME,
+ reg + RCAR_PCI_INT_ENABLE_REG);
+
+ /* Add PCI resources */
+ pci_add_resource(&sys->resources, &priv->io_res);
+ pci_add_resource(&sys->resources, &priv->mem_res);
+
+ return 1;
+}
+
+static struct pci_ops rcar_pci_ops = {
+ .read = rcar_pci_read_config,
+ .write = rcar_pci_write_config,
+};
+
+static struct hw_pci rcar_hw_pci __initdata = {
+ .map_irq = rcar_pci_map_irq,
+ .ops = &rcar_pci_ops,
+ .setup = rcar_pci_setup,
+};
+
+static int rcar_pci_count __initdata;
+
+static int __init rcar_pci_add_controller(struct rcar_pci_priv *priv)
+{
+ void **private_data;
+ int count;
+
+ if (rcar_hw_pci.nr_controllers < rcar_pci_count)
+ goto add_priv;
+
+ /* (Re)allocate private data pointer array if needed */
+ count = rcar_pci_count + RCAR_PCI_NR_CONTROLLERS;
+ private_data = kzalloc(count * sizeof(void *), GFP_KERNEL);
+ if (!private_data)
+ return -ENOMEM;
+
+ rcar_pci_count = count;
+ if (rcar_hw_pci.private_data) {
+ memcpy(private_data, rcar_hw_pci.private_data,
+ rcar_hw_pci.nr_controllers * sizeof(void *));
+ kfree(rcar_hw_pci.private_data);
+ }
+
+ rcar_hw_pci.private_data = private_data;
+
+add_priv:
+ /* Add private data pointer to the array */
+ rcar_hw_pci.private_data[rcar_hw_pci.nr_controllers++] = priv;
+ return 0;
+}
+
+static int __init rcar_pci_probe(struct platform_device *pdev)
+{
+ struct resource *cfg_res, *mem_res;
+ struct rcar_pci_priv *priv;
+ void __iomem *reg;
+
+ cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(&pdev->dev, cfg_res);
+ if (!reg)
+ return -ENODEV;
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!mem_res || !mem_res->start)
+ return -ENODEV;
+
+ priv = devm_kzalloc(&pdev->dev,
+ sizeof(struct rcar_pci_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->mem_res = *mem_res;
+ /*
+ * The controller does not support/use port I/O,
+ * so setup a dummy port I/O region here.
+ */
+ priv->io_res.start = priv->mem_res.start;
+ priv->io_res.end = priv->mem_res.end;
+ priv->io_res.flags = IORESOURCE_IO;
+
+ priv->cfg_res = cfg_res;
+
+ priv->irq = platform_get_irq(pdev, 0);
+ priv->reg = reg;
+
+ return rcar_pci_add_controller(priv);
+}
+
+static struct platform_driver rcar_pci_driver = {
+ .driver = {
+ .name = "pci-rcar-gen2",
+ },
+};
+
+static int __init rcar_pci_init(void)
+{
+ int retval;
+
+ retval = platform_driver_probe(&rcar_pci_driver, rcar_pci_probe);
+ if (!retval)
+ pci_common_init(&rcar_hw_pci);
+
+ /* Private data pointer array is not needed any more */
+ kfree(rcar_hw_pci.private_data);
+ rcar_hw_pci.private_data = NULL;
+
+ return retval;
+}
+
+subsys_initcall(rcar_pci_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Renesas R-Car Gen2 internal PCI");
+MODULE_AUTHOR("Valentine Barshak <valentine.barshak@cogentembedded.com>");
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 2e9888a0635a..7c4f38dd42ba 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -408,7 +408,7 @@ static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
list_for_each_entry(bus, &pcie->busses, list)
if (bus->nr == busnr)
- return bus->area->addr;
+ return (void __iomem *)bus->area->addr;
bus = tegra_pcie_bus_alloc(pcie, busnr);
if (IS_ERR(bus))
@@ -416,7 +416,7 @@ static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
list_add_tail(&bus->list, &pcie->busses);
- return bus->area->addr;
+ return (void __iomem *)bus->area->addr;
}
static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index c10e9ac9bbbc..1e1fea4d959b 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -11,8 +11,11 @@
* published by the Free Software Foundation.
*/
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
@@ -64,7 +67,7 @@
static struct hw_pci dw_pci;
-unsigned long global_io_offset;
+static unsigned long global_io_offset;
static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
{
@@ -115,8 +118,8 @@ static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
writel(val, pp->dbi_base + reg);
}
-int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
- u32 *val)
+static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+ u32 *val)
{
int ret;
@@ -128,8 +131,8 @@ int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
return ret;
}
-int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
- u32 val)
+static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
+ u32 val)
{
int ret;
@@ -142,6 +145,205 @@ int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
return ret;
}
+static struct irq_chip dw_msi_irq_chip = {
+ .name = "PCI-MSI",
+ .irq_enable = unmask_msi_irq,
+ .irq_disable = mask_msi_irq,
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
+};
+
+/* MSI int handler */
+void dw_handle_msi_irq(struct pcie_port *pp)
+{
+ unsigned long val;
+ int i, pos, irq;
+
+ for (i = 0; i < MAX_MSI_CTRLS; i++) {
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
+ (u32 *)&val);
+ if (val) {
+ pos = 0;
+ while ((pos = find_next_bit(&val, 32, pos)) != 32) {
+ irq = irq_find_mapping(pp->irq_domain,
+ i * 32 + pos);
+ generic_handle_irq(irq);
+ pos++;
+ }
+ }
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, val);
+ }
+}
+
+void dw_pcie_msi_init(struct pcie_port *pp)
+{
+ pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
+
+ /* program the msi_data */
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
+ virt_to_phys((void *)pp->msi_data));
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0);
+}
+
+static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0)
+{
+ int flag = 1;
+
+ do {
+ pos = find_next_zero_bit(pp->msi_irq_in_use,
+ MAX_MSI_IRQS, pos);
+ /*if you have reached to the end then get out from here.*/
+ if (pos == MAX_MSI_IRQS)
+ return -ENOSPC;
+ /*
+ * Check if this position is at correct offset.nvec is always a
+ * power of two. pos0 must be nvec bit alligned.
+ */
+ if (pos % msgvec)
+ pos += msgvec - (pos % msgvec);
+ else
+ flag = 0;
+ } while (flag);
+
+ *pos0 = pos;
+ return 0;
+}
+
+static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
+{
+ int res, bit, irq, pos0, pos1, i;
+ u32 val;
+ struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
+
+ if (!pp) {
+ BUG();
+ return -EINVAL;
+ }
+
+ pos0 = find_first_zero_bit(pp->msi_irq_in_use,
+ MAX_MSI_IRQS);
+ if (pos0 % no_irqs) {
+ if (find_valid_pos0(pp, no_irqs, pos0, &pos0))
+ goto no_valid_irq;
+ }
+ if (no_irqs > 1) {
+ pos1 = find_next_bit(pp->msi_irq_in_use,
+ MAX_MSI_IRQS, pos0);
+ /* there must be nvec number of consecutive free bits */
+ while ((pos1 - pos0) < no_irqs) {
+ if (find_valid_pos0(pp, no_irqs, pos1, &pos0))
+ goto no_valid_irq;
+ pos1 = find_next_bit(pp->msi_irq_in_use,
+ MAX_MSI_IRQS, pos0);
+ }
+ }
+
+ irq = irq_find_mapping(pp->irq_domain, pos0);
+ if (!irq)
+ goto no_valid_irq;
+
+ i = 0;
+ while (i < no_irqs) {
+ set_bit(pos0 + i, pp->msi_irq_in_use);
+ irq_alloc_descs((irq + i), (irq + i), 1, 0);
+ irq_set_msi_desc(irq + i, desc);
+ /*Enable corresponding interrupt in MSI interrupt controller */
+ res = ((pos0 + i) / 32) * 12;
+ bit = (pos0 + i) % 32;
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+ val |= 1 << bit;
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+ i++;
+ }
+
+ *pos = pos0;
+ return irq;
+
+no_valid_irq:
+ *pos = pos0;
+ return -ENOSPC;
+}
+
+static void clear_irq(unsigned int irq)
+{
+ int res, bit, val, pos;
+ struct irq_desc *desc;
+ struct msi_desc *msi;
+ struct pcie_port *pp;
+ struct irq_data *data = irq_get_irq_data(irq);
+
+ /* get the port structure */
+ desc = irq_to_desc(irq);
+ msi = irq_desc_get_msi_desc(desc);
+ pp = sys_to_pcie(msi->dev->bus->sysdata);
+ if (!pp) {
+ BUG();
+ return;
+ }
+
+ pos = data->hwirq;
+
+ irq_free_desc(irq);
+
+ clear_bit(pos, pp->msi_irq_in_use);
+
+ /* Disable corresponding interrupt on MSI interrupt controller */
+ res = (pos / 32) * 12;
+ bit = pos % 32;
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+ val &= ~(1 << bit);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+}
+
+static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
+ struct msi_desc *desc)
+{
+ int irq, pos, msgvec;
+ u16 msg_ctr;
+ struct msi_msg msg;
+ struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
+
+ if (!pp) {
+ BUG();
+ return -EINVAL;
+ }
+
+ pci_read_config_word(pdev, desc->msi_attrib.pos+PCI_MSI_FLAGS,
+ &msg_ctr);
+ msgvec = (msg_ctr&PCI_MSI_FLAGS_QSIZE) >> 4;
+ if (msgvec == 0)
+ msgvec = (msg_ctr & PCI_MSI_FLAGS_QMASK) >> 1;
+ if (msgvec > 5)
+ msgvec = 0;
+
+ irq = assign_irq((1 << msgvec), desc, &pos);
+ if (irq < 0)
+ return irq;
+
+ msg_ctr &= ~PCI_MSI_FLAGS_QSIZE;
+ msg_ctr |= msgvec << 4;
+ pci_write_config_word(pdev, desc->msi_attrib.pos + PCI_MSI_FLAGS,
+ msg_ctr);
+ desc->msi_attrib.multiple = msgvec;
+
+ msg.address_lo = virt_to_phys((void *)pp->msi_data);
+ msg.address_hi = 0x0;
+ msg.data = pos;
+ write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
+static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
+{
+ clear_irq(irq);
+}
+
+static struct msi_chip dw_pcie_msi_chip = {
+ .setup_irq = dw_msi_setup_irq,
+ .teardown_irq = dw_msi_teardown_irq,
+};
+
int dw_pcie_link_up(struct pcie_port *pp)
{
if (pp->ops->link_up)
@@ -150,12 +352,27 @@ int dw_pcie_link_up(struct pcie_port *pp)
return 0;
}
+static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .map = dw_pcie_msi_map,
+};
+
int __init dw_pcie_host_init(struct pcie_port *pp)
{
struct device_node *np = pp->dev->of_node;
struct of_pci_range range;
struct of_pci_range_parser parser;
u32 val;
+ int i;
if (of_pci_range_parser_init(&parser, np)) {
dev_err(pp->dev, "missing ranges property\n");
@@ -223,6 +440,19 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
return -EINVAL;
}
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
+ MAX_MSI_IRQS, &msi_domain_ops,
+ &dw_pcie_msi_chip);
+ if (!pp->irq_domain) {
+ dev_err(pp->dev, "irq domain init failed\n");
+ return -ENXIO;
+ }
+
+ for (i = 0; i < MAX_MSI_IRQS; i++)
+ irq_create_mapping(pp->irq_domain, i);
+ }
+
if (pp->ops->host_init)
pp->ops->host_init(pp);
@@ -438,7 +668,7 @@ static struct pci_ops dw_pcie_ops = {
.write = dw_pcie_wr_conf,
};
-int dw_pcie_setup(int nr, struct pci_sys_data *sys)
+static int dw_pcie_setup(int nr, struct pci_sys_data *sys)
{
struct pcie_port *pp;
@@ -461,7 +691,7 @@ int dw_pcie_setup(int nr, struct pci_sys_data *sys)
return 1;
}
-struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
+static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
{
struct pci_bus *bus;
struct pcie_port *pp = sys_to_pcie(sys);
@@ -478,17 +708,28 @@ struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
return bus;
}
-int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata);
return pp->irq;
}
+static void dw_pcie_add_bus(struct pci_bus *bus)
+{
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ struct pcie_port *pp = sys_to_pcie(bus->sysdata);
+
+ dw_pcie_msi_chip.dev = pp->dev;
+ bus->msi = &dw_pcie_msi_chip;
+ }
+}
+
static struct hw_pci dw_pci = {
.setup = dw_pcie_setup,
.scan = dw_pcie_scan_bus,
.map_irq = dw_pcie_map_irq,
+ .add_bus = dw_pcie_add_bus,
};
void dw_pcie_setup_rc(struct pcie_port *pp)
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index 133820f1da97..c15379be2372 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -11,6 +11,9 @@
* published by the Free Software Foundation.
*/
+#ifndef _PCIE_DESIGNWARE_H
+#define _PCIE_DESIGNWARE_H
+
struct pcie_port_info {
u32 cfg0_size;
u32 cfg1_size;
@@ -20,6 +23,14 @@ struct pcie_port_info {
phys_addr_t mem_bus_addr;
};
+/*
+ * Maximum number of MSI IRQs can be 256 per controller. But keep
+ * it 32 as of now. Probably we will never need more than 32. If needed,
+ * then increment it in multiple of 32.
+ */
+#define MAX_MSI_IRQS 32
+#define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32)
+
struct pcie_port {
struct device *dev;
u8 root_bus_nr;
@@ -38,6 +49,10 @@ struct pcie_port {
int irq;
u32 lanes;
struct pcie_host_ops *ops;
+ int msi_irq;
+ struct irq_domain *irq_domain;
+ unsigned long msi_data;
+ DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
struct pcie_host_ops {
@@ -51,15 +66,12 @@ struct pcie_host_ops {
void (*host_init)(struct pcie_port *pp);
};
-extern unsigned long global_io_offset;
-
int cfg_read(void __iomem *addr, int where, int size, u32 *val);
int cfg_write(void __iomem *addr, int where, int size, u32 val);
-int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, u32 val);
-int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, u32 *val);
+void dw_handle_msi_irq(struct pcie_port *pp);
+void dw_pcie_msi_init(struct pcie_port *pp);
int dw_pcie_link_up(struct pcie_port *pp);
void dw_pcie_setup_rc(struct pcie_port *pp);
int dw_pcie_host_init(struct pcie_port *pp);
-int dw_pcie_setup(int nr, struct pci_sys_data *sys);
-struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys);
-int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
+
+#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 2a47e82821da..1ce8ee054f1a 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -338,7 +338,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
acpi_handle chandle, handle;
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
- flags &= OSC_SHPC_NATIVE_HP_CONTROL;
+ flags &= OSC_PCI_SHPC_NATIVE_HP_CONTROL;
if (!flags) {
err("Invalid flags %u specified!\n", flags);
return -EINVAL;
@@ -411,13 +411,10 @@ EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware);
static int pcihp_is_ejectable(acpi_handle handle)
{
acpi_status status;
- acpi_handle tmp;
unsigned long long removable;
- status = acpi_get_handle(handle, "_ADR", &tmp);
- if (ACPI_FAILURE(status))
+ if (!acpi_has_method(handle, "_ADR"))
return 0;
- status = acpi_get_handle(handle, "_EJ0", &tmp);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(handle, "_EJ0"))
return 1;
status = acpi_evaluate_integer(handle, "_RMV", NULL, &removable);
if (ACPI_SUCCESS(status) && removable)
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index f4e028924667..26100f510b10 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -39,16 +39,6 @@
#include <linux/mutex.h>
#include <linux/pci_hotplug.h>
-#define dbg(format, arg...) \
- do { \
- if (acpiphp_debug) \
- printk(KERN_DEBUG "%s: " format, \
- MY_NAME , ## arg); \
- } while (0)
-#define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
-#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
-#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
-
struct acpiphp_context;
struct acpiphp_bridge;
struct acpiphp_slot;
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index bf2203ef1308..8650d39db392 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -31,6 +31,8 @@
*
*/
+#define pr_fmt(fmt) "acpiphp: " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -43,12 +45,9 @@
#include <linux/smp.h>
#include "acpiphp.h"
-#define MY_NAME "acpiphp"
-
/* name size which is used for entries in pcihpfs */
#define SLOT_NAME_SIZE 21 /* {_SUN} */
-bool acpiphp_debug;
bool acpiphp_disabled;
/* local variables */
@@ -61,9 +60,7 @@ static struct acpiphp_attention_info *attention_info;
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
MODULE_PARM_DESC(disable, "disable acpiphp driver");
-module_param_named(debug, acpiphp_debug, bool, 0644);
module_param_named(disable, acpiphp_disabled, bool, 0444);
/* export the attention callback registration methods */
@@ -139,7 +136,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
/* enable the specified slot */
return acpiphp_enable_slot(slot->acpi_slot);
@@ -156,7 +153,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
/* disable the specified slot */
return acpiphp_disable_and_eject_slot(slot->acpi_slot);
@@ -176,8 +173,9 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
{
int retval = -ENODEV;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
-
+ pr_debug("%s - physical_slot = %s\n", __func__,
+ hotplug_slot_name(hotplug_slot));
+
if (attention_info && try_module_get(attention_info->owner)) {
retval = attention_info->set_attn(hotplug_slot, status);
module_put(attention_info->owner);
@@ -199,7 +197,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = acpiphp_get_power_status(slot->acpi_slot);
@@ -221,7 +219,8 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
int retval = -EINVAL;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
+ pr_debug("%s - physical_slot = %s\n", __func__,
+ hotplug_slot_name(hotplug_slot));
if (attention_info && try_module_get(attention_info->owner)) {
retval = attention_info->get_attn(hotplug_slot, value);
@@ -244,7 +243,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = acpiphp_get_latch_status(slot->acpi_slot);
@@ -264,7 +263,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = acpiphp_get_adapter_status(slot->acpi_slot);
@@ -279,7 +278,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
kfree(slot->hotplug_slot);
kfree(slot);
@@ -322,11 +321,11 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot,
if (retval == -EBUSY)
goto error_hpslot;
if (retval) {
- err("pci_hp_register failed with error %d\n", retval);
+ pr_err("pci_hp_register failed with error %d\n", retval);
goto error_hpslot;
}
- info("Slot [%s] registered\n", slot_name(slot));
+ pr_info("Slot [%s] registered\n", slot_name(slot));
return 0;
error_hpslot:
@@ -343,17 +342,17 @@ void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
struct slot *slot = acpiphp_slot->slot;
int retval = 0;
- info("Slot [%s] unregistered\n", slot_name(slot));
+ pr_info("Slot [%s] unregistered\n", slot_name(slot));
retval = pci_hp_deregister(slot->hotplug_slot);
if (retval)
- err("pci_hp_deregister failed with error %d\n", retval);
+ pr_err("pci_hp_deregister failed with error %d\n", retval);
}
void __init acpiphp_init(void)
{
- info(DRIVER_DESC " version: " DRIVER_VERSION "%s\n",
+ pr_info(DRIVER_DESC " version: " DRIVER_VERSION "%s\n",
acpiphp_disabled ? ", disabled by user; please report a bug"
: "");
}
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index be12fbfcae10..a9b3a19fb330 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -39,6 +39,8 @@
* bus. It loses the refcount when the the driver unloads.
*/
+#define pr_fmt(fmt) "acpiphp_glue: " fmt
+
#include <linux/init.h>
#include <linux/module.h>
@@ -58,8 +60,6 @@ static LIST_HEAD(bridge_list);
static DEFINE_MUTEX(bridge_mutex);
static DEFINE_MUTEX(acpiphp_context_lock);
-#define MY_NAME "acpiphp_glue"
-
static void handle_hotplug_event(acpi_handle handle, u32 type, void *data);
static void acpiphp_sanitize_bus(struct pci_bus *bus);
static void acpiphp_set_hpp_values(struct pci_bus *bus);
@@ -335,7 +335,7 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
if (ACPI_FAILURE(status))
sun = bridge->nr_slots;
- dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
+ pr_debug("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
sun, pci_domain_nr(pbus), pbus->number, device);
retval = acpiphp_register_hotplug_slot(slot, sun);
@@ -343,10 +343,10 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
slot->slot = NULL;
bridge->nr_slots--;
if (retval == -EBUSY)
- warn("Slot %llu already registered by another "
+ pr_warn("Slot %llu already registered by another "
"hotplug driver\n", sun);
else
- warn("acpiphp_register_hotplug_slot failed "
+ pr_warn("acpiphp_register_hotplug_slot failed "
"(err code = 0x%x)\n", retval);
}
/* Even if the slot registration fails, we can still use it. */
@@ -369,7 +369,7 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
if (register_hotplug_dock_device(handle,
&acpiphp_dock_ops, context,
acpiphp_dock_init, acpiphp_dock_release))
- dbg("failed to register dock device\n");
+ pr_debug("failed to register dock device\n");
}
/* install notify handler */
@@ -427,7 +427,7 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
ACPI_SYSTEM_NOTIFY,
handle_hotplug_event);
if (ACPI_FAILURE(status))
- err("failed to remove notify handler\n");
+ pr_err("failed to remove notify handler\n");
}
}
if (slot->slot)
@@ -552,9 +552,8 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
struct acpiphp_func *func;
int max, pass;
LIST_HEAD(add_list);
- int nr_found;
- nr_found = acpiphp_rescan_slot(slot);
+ acpiphp_rescan_slot(slot);
max = acpiphp_max_busnr(bus);
for (pass = 0; pass < 2; pass++) {
list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -574,9 +573,6 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
}
}
__pci_bus_assign_resources(bus, &add_list, NULL);
- /* Nothing more to do here if there are no new devices on this bus. */
- if (!nr_found && (slot->flags & SLOT_ENABLED))
- return;
acpiphp_sanitize_bus(bus);
acpiphp_set_hpp_values(bus);
@@ -830,8 +826,9 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
/* bus re-enumerate */
- dbg("%s: Bus check notify on %s\n", __func__, objname);
- dbg("%s: re-enumerating slots under %s\n", __func__, objname);
+ pr_debug("%s: Bus check notify on %s\n", __func__, objname);
+ pr_debug("%s: re-enumerating slots under %s\n",
+ __func__, objname);
if (bridge) {
acpiphp_check_bridge(bridge);
} else {
@@ -845,7 +842,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
case ACPI_NOTIFY_DEVICE_CHECK:
/* device check */
- dbg("%s: Device check notify on %s\n", __func__, objname);
+ pr_debug("%s: Device check notify on %s\n", __func__, objname);
if (bridge) {
acpiphp_check_bridge(bridge);
} else {
@@ -866,7 +863,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
case ACPI_NOTIFY_EJECT_REQUEST:
/* request device eject */
- dbg("%s: Device eject notify on %s\n", __func__, objname);
+ pr_debug("%s: Device eject notify on %s\n", __func__, objname);
acpiphp_disable_and_eject_slot(func->slot);
break;
}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 2f5786c8522c..0d64c414bf78 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -25,6 +25,8 @@
*
*/
+#define pr_fmt(fmt) "acpiphp_ibm: " fmt
+
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -43,23 +45,11 @@
#define DRIVER_AUTHOR "Irene Zubarev <zubarev@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>"
#define DRIVER_DESC "ACPI Hot Plug PCI Controller Driver IBM extension"
-static bool debug;
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRIVER_VERSION);
-module_param(debug, bool, 0644);
-MODULE_PARM_DESC(debug, " Debugging mode enabled or not");
-#define MY_NAME "acpiphp_ibm"
-
-#undef dbg
-#define dbg(format, arg...) \
-do { \
- if (debug) \
- printk(KERN_DEBUG "%s: " format, \
- MY_NAME , ## arg); \
-} while (0)
#define FOUND_APCI 0x61504349
/* these are the names for the IBM ACPI pseudo-device */
@@ -189,7 +179,7 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
ibm_slot = ibm_slot_from_id(hpslot_to_sun(slot));
- dbg("%s: set slot %d (%d) attention status to %d\n", __func__,
+ pr_debug("%s: set slot %d (%d) attention status to %d\n", __func__,
ibm_slot->slot.slot_num, ibm_slot->slot.slot_id,
(status ? 1 : 0));
@@ -202,10 +192,10 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
stat = acpi_evaluate_integer(ibm_acpi_handle, "APLS", &params, &rc);
if (ACPI_FAILURE(stat)) {
- err("APLS evaluation failed: 0x%08x\n", stat);
+ pr_err("APLS evaluation failed: 0x%08x\n", stat);
return -ENODEV;
} else if (!rc) {
- err("APLS method failed: 0x%08llx\n", rc);
+ pr_err("APLS method failed: 0x%08llx\n", rc);
return -ERANGE;
}
return 0;
@@ -234,7 +224,7 @@ static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status)
else
*status = 0;
- dbg("%s: get slot %d (%d) attention status is %d\n", __func__,
+ pr_debug("%s: get slot %d (%d) attention status is %d\n", __func__,
ibm_slot->slot.slot_num, ibm_slot->slot.slot_id,
*status);
@@ -266,10 +256,10 @@ static void ibm_handle_events(acpi_handle handle, u32 event, void *context)
u8 subevent = event & 0xf0;
struct notification *note = context;
- dbg("%s: Received notification %02x\n", __func__, event);
+ pr_debug("%s: Received notification %02x\n", __func__, event);
if (subevent == 0x80) {
- dbg("%s: generationg bus event\n", __func__);
+ pr_debug("%s: generationg bus event\n", __func__);
acpi_bus_generate_netlink_event(note->device->pnp.device_class,
dev_name(&note->device->dev),
note->event, detail);
@@ -301,7 +291,7 @@ static int ibm_get_table_from_acpi(char **bufp)
status = acpi_evaluate_object(ibm_acpi_handle, "APCI", NULL, &buffer);
if (ACPI_FAILURE(status)) {
- err("%s: APCI evaluation failed\n", __func__);
+ pr_err("%s: APCI evaluation failed\n", __func__);
return -ENODEV;
}
@@ -309,13 +299,13 @@ static int ibm_get_table_from_acpi(char **bufp)
if (!(package) ||
(package->type != ACPI_TYPE_PACKAGE) ||
!(package->package.elements)) {
- err("%s: Invalid APCI object\n", __func__);
+ pr_err("%s: Invalid APCI object\n", __func__);
goto read_table_done;
}
for(size = 0, i = 0; i < package->package.count; i++) {
if (package->package.elements[i].type != ACPI_TYPE_BUFFER) {
- err("%s: Invalid APCI element %d\n", __func__, i);
+ pr_err("%s: Invalid APCI element %d\n", __func__, i);
goto read_table_done;
}
size += package->package.elements[i].buffer.length;
@@ -325,7 +315,7 @@ static int ibm_get_table_from_acpi(char **bufp)
goto read_table_done;
lbuf = kzalloc(size, GFP_KERNEL);
- dbg("%s: element count: %i, ASL table size: %i, &table = 0x%p\n",
+ pr_debug("%s: element count: %i, ASL table size: %i, &table = 0x%p\n",
__func__, package->package.count, size, lbuf);
if (lbuf) {
@@ -370,8 +360,8 @@ static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
{
int bytes_read = -EINVAL;
char *table = NULL;
-
- dbg("%s: pos = %d, size = %zd\n", __func__, (int)pos, size);
+
+ pr_debug("%s: pos = %d, size = %zd\n", __func__, (int)pos, size);
if (pos == 0) {
bytes_read = ibm_get_table_from_acpi(&table);
@@ -403,7 +393,7 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
status = acpi_get_object_info(handle, &info);
if (ACPI_FAILURE(status)) {
- err("%s: Failed to get device information status=0x%x\n",
+ pr_err("%s: Failed to get device information status=0x%x\n",
__func__, status);
return retval;
}
@@ -411,7 +401,7 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
if (info->current_status && (info->valid & ACPI_VALID_HID) &&
(!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) ||
!strcmp(info->hardware_id.string, IBM_HARDWARE_ID2))) {
- dbg("found hardware: %s, handle: %p\n",
+ pr_debug("found hardware: %s, handle: %p\n",
info->hardware_id.string, handle);
*phandle = handle;
/* returning non-zero causes the search to stop
@@ -432,18 +422,18 @@ static int __init ibm_acpiphp_init(void)
struct acpi_device *device;
struct kobject *sysdir = &pci_slots_kset->kobj;
- dbg("%s\n", __func__);
+ pr_debug("%s\n", __func__);
if (acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, ibm_find_acpi_device, NULL,
&ibm_acpi_handle, NULL) != FOUND_APCI) {
- err("%s: acpi_walk_namespace failed\n", __func__);
+ pr_err("%s: acpi_walk_namespace failed\n", __func__);
retval = -ENODEV;
goto init_return;
}
- dbg("%s: found IBM aPCI device\n", __func__);
+ pr_debug("%s: found IBM aPCI device\n", __func__);
if (acpi_bus_get_device(ibm_acpi_handle, &device)) {
- err("%s: acpi_bus_get_device failed\n", __func__);
+ pr_err("%s: acpi_bus_get_device failed\n", __func__);
retval = -ENODEV;
goto init_return;
}
@@ -457,7 +447,7 @@ static int __init ibm_acpiphp_init(void)
ACPI_DEVICE_NOTIFY, ibm_handle_events,
&ibm_note);
if (ACPI_FAILURE(status)) {
- err("%s: Failed to register notification handler\n",
+ pr_err("%s: Failed to register notification handler\n",
__func__);
retval = -EBUSY;
goto init_cleanup;
@@ -479,17 +469,17 @@ static void __exit ibm_acpiphp_exit(void)
acpi_status status;
struct kobject *sysdir = &pci_slots_kset->kobj;
- dbg("%s\n", __func__);
+ pr_debug("%s\n", __func__);
if (acpiphp_unregister_attention(&ibm_attention_info))
- err("%s: attention info deregistration failed", __func__);
+ pr_err("%s: attention info deregistration failed", __func__);
status = acpi_remove_notify_handler(
ibm_acpi_handle,
ACPI_DEVICE_NOTIFY,
ibm_handle_events);
if (ACPI_FAILURE(status))
- err("%s: Notification handler removal failed\n", __func__);
+ pr_err("%s: Notification handler removal failed\n", __func__);
/* remove the /sys entries */
sysfs_remove_bin_file(sysdir, &ibm_apci_table_attr);
}
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 66e505ca24ef..3c7eb5dd91c6 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -133,7 +133,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
- pr_debug("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
kfree(slot->hotplug_slot->info);
kfree(slot->hotplug_slot);
kfree(slot);
@@ -183,10 +182,9 @@ int zpci_init_slot(struct zpci_dev *zdev)
snprintf(name, SLOT_NAME_SIZE, "%08x", zdev->fid);
rc = pci_hp_register(slot->hotplug_slot, zdev->bus,
ZPCI_DEVFN, name);
- if (rc) {
- pr_err("pci_hp_register failed with error %d\n", rc);
+ if (rc)
goto error_reg;
- }
+
list_add(&slot->slot_list, &s390_hotplug_slot_list);
return 0;
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index e260f207a90e..d876e4b3c6a9 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -191,7 +191,7 @@ static inline const char *slot_name(struct slot *slot)
#include <linux/pci-acpi.h>
static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev)
{
- u32 flags = OSC_SHPC_NATIVE_HP_CONTROL;
+ u32 flags = OSC_PCI_SHPC_NATIVE_HP_CONTROL;
return acpi_get_hp_hw_control_from_firmware(dev, flags);
}
#else
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index d5f90d6383bc..604265c40853 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -831,7 +831,7 @@ int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
int status, maxvec;
u16 msgctl;
- if (!dev->msi_cap)
+ if (!dev->msi_cap || dev->current_state != PCI_D0)
return -EINVAL;
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
@@ -862,7 +862,7 @@ int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec)
int ret, nvec;
u16 msgctl;
- if (!dev->msi_cap)
+ if (!dev->msi_cap || dev->current_state != PCI_D0)
return -EINVAL;
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
@@ -955,7 +955,7 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
int status, nr_entries;
int i, j;
- if (!entries || !dev->msix_cap)
+ if (!entries || !dev->msix_cap || dev->current_state != PCI_D0)
return -EINVAL;
status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index b0299e6d9a3f..dfd1f59de729 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -181,7 +181,6 @@ static bool acpi_pci_power_manageable(struct pci_dev *dev)
static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{
acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
- acpi_handle tmp;
static const u8 state_conv[] = {
[PCI_D0] = ACPI_STATE_D0,
[PCI_D1] = ACPI_STATE_D1,
@@ -192,7 +191,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
int error = -EINVAL;
/* If the ACPI device has _EJ0, ignore the device */
- if (!handle || ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp)))
+ if (!handle || acpi_has_method(handle, "_EJ0"))
return -ENODEV;
switch (state) {
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 98f7b9b89507..840fdc5ba0d8 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -135,6 +135,7 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
return retval;
return count;
}
+static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
/**
* store_remove_id - remove a PCI device ID from this driver
@@ -180,12 +181,14 @@ store_remove_id(struct device_driver *driver, const char *buf, size_t count)
return retval;
return count;
}
+static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);
-static struct driver_attribute pci_drv_attrs[] = {
- __ATTR(new_id, S_IWUSR, NULL, store_new_id),
- __ATTR(remove_id, S_IWUSR, NULL, store_remove_id),
- __ATTR_NULL,
+static struct attribute *pci_drv_attrs[] = {
+ &driver_attr_new_id.attr,
+ &driver_attr_remove_id.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(pci_drv);
/**
* pci_match_id - See if a pci device matches a given pci_id table
@@ -599,18 +602,10 @@ static int pci_pm_prepare(struct device *dev)
return error;
}
-static void pci_pm_complete(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
-
- if (drv && drv->pm && drv->pm->complete)
- drv->pm->complete(dev);
-}
#else /* !CONFIG_PM_SLEEP */
#define pci_pm_prepare NULL
-#define pci_pm_complete NULL
#endif /* !CONFIG_PM_SLEEP */
@@ -1121,9 +1116,8 @@ static int pci_pm_runtime_idle(struct device *dev)
#ifdef CONFIG_PM
-const struct dev_pm_ops pci_dev_pm_ops = {
+static const struct dev_pm_ops pci_dev_pm_ops = {
.prepare = pci_pm_prepare,
- .complete = pci_pm_complete,
.suspend = pci_pm_suspend,
.resume = pci_pm_resume,
.freeze = pci_pm_freeze,
@@ -1316,9 +1310,9 @@ struct bus_type pci_bus_type = {
.probe = pci_device_probe,
.remove = pci_device_remove,
.shutdown = pci_device_shutdown,
- .dev_attrs = pci_dev_attrs,
- .bus_attrs = pci_bus_attrs,
- .drv_attrs = pci_drv_attrs,
+ .dev_groups = pci_dev_groups,
+ .bus_groups = pci_bus_groups,
+ .drv_groups = pci_drv_groups,
.pm = PCI_PM_OPS_PTR,
};
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 7128cfdd64aa..2aaa83c85a4e 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -42,7 +42,8 @@ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
\
pdev = to_pci_dev (dev); \
return sprintf (buf, format_string, pdev->field); \
-}
+} \
+static DEVICE_ATTR_RO(field)
pci_config_attr(vendor, "0x%04x\n");
pci_config_attr(device, "0x%04x\n");
@@ -73,10 +74,13 @@ static ssize_t broken_parity_status_store(struct device *dev,
return count;
}
+static DEVICE_ATTR_RW(broken_parity_status);
-static ssize_t local_cpus_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
+static ssize_t pci_dev_show_local_cpu(struct device *dev,
+ int type,
+ struct device_attribute *attr,
+ char *buf)
+{
const struct cpumask *mask;
int len;
@@ -86,30 +90,28 @@ static ssize_t local_cpus_show(struct device *dev,
#else
mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
#endif
- len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask);
+ len = type ?
+ cpumask_scnprintf(buf, PAGE_SIZE-2, mask) :
+ cpulist_scnprintf(buf, PAGE_SIZE-2, mask);
+
buf[len++] = '\n';
buf[len] = '\0';
return len;
}
+static ssize_t local_cpus_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return pci_dev_show_local_cpu(dev, 1, attr, buf);
+}
+static DEVICE_ATTR_RO(local_cpus);
static ssize_t local_cpulist_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- const struct cpumask *mask;
- int len;
-
-#ifdef CONFIG_NUMA
- mask = (dev_to_node(dev) == -1) ? cpu_online_mask :
- cpumask_of_node(dev_to_node(dev));
-#else
- mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
-#endif
- len = cpulist_scnprintf(buf, PAGE_SIZE-2, mask);
- buf[len++] = '\n';
- buf[len] = '\0';
- return len;
+ return pci_dev_show_local_cpu(dev, 0, attr, buf);
}
+static DEVICE_ATTR_RO(local_cpulist);
/*
* PCI Bus Class Devices
@@ -170,6 +172,7 @@ resource_show(struct device * dev, struct device_attribute *attr, char * buf)
}
return (str - buf);
}
+static DEVICE_ATTR_RO(resource);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -181,10 +184,11 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
(u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
(u8)(pci_dev->class));
}
+static DEVICE_ATTR_RO(modalias);
-static ssize_t is_enabled_store(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
+static ssize_t enabled_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
unsigned long val;
@@ -208,14 +212,15 @@ static ssize_t is_enabled_store(struct device *dev,
return result < 0 ? result : count;
}
-static ssize_t is_enabled_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev;
pdev = to_pci_dev (dev);
return sprintf (buf, "%u\n", atomic_read(&pdev->enable_cnt));
}
+static DEVICE_ATTR_RW(enabled);
#ifdef CONFIG_NUMA
static ssize_t
@@ -223,6 +228,7 @@ numa_node_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf (buf, "%d\n", dev->numa_node);
}
+static DEVICE_ATTR_RO(numa_node);
#endif
static ssize_t
@@ -232,6 +238,7 @@ dma_mask_bits_show(struct device *dev, struct device_attribute *attr, char *buf)
return sprintf (buf, "%d\n", fls64(pdev->dma_mask));
}
+static DEVICE_ATTR_RO(dma_mask_bits);
static ssize_t
consistent_dma_mask_bits_show(struct device *dev, struct device_attribute *attr,
@@ -239,6 +246,7 @@ consistent_dma_mask_bits_show(struct device *dev, struct device_attribute *attr,
{
return sprintf (buf, "%d\n", fls64(dev->coherent_dma_mask));
}
+static DEVICE_ATTR_RO(consistent_dma_mask_bits);
static ssize_t
msi_bus_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -283,6 +291,7 @@ msi_bus_store(struct device *dev, struct device_attribute *attr,
return count;
}
+static DEVICE_ATTR_RW(msi_bus);
static DEFINE_MUTEX(pci_remove_rescan_mutex);
static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf,
@@ -302,10 +311,20 @@ static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf,
}
return count;
}
+static BUS_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store);
-struct bus_attribute pci_bus_attrs[] = {
- __ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store),
- __ATTR_NULL
+static struct attribute *pci_bus_attrs[] = {
+ &bus_attr_rescan.attr,
+ NULL,
+};
+
+static const struct attribute_group pci_bus_group = {
+ .attrs = pci_bus_attrs,
+};
+
+const struct attribute_group *pci_bus_groups[] = {
+ &pci_bus_group,
+ NULL,
};
static ssize_t
@@ -325,8 +344,9 @@ dev_rescan_store(struct device *dev, struct device_attribute *attr,
}
return count;
}
-struct device_attribute dev_rescan_attr = __ATTR(rescan, (S_IWUSR|S_IWGRP),
- NULL, dev_rescan_store);
+static struct device_attribute dev_rescan_attr = __ATTR(rescan,
+ (S_IWUSR|S_IWGRP),
+ NULL, dev_rescan_store);
static void remove_callback(struct device *dev)
{
@@ -356,8 +376,9 @@ remove_store(struct device *dev, struct device_attribute *dummy,
count = ret;
return count;
}
-struct device_attribute dev_remove_attr = __ATTR(remove, (S_IWUSR|S_IWGRP),
- NULL, remove_store);
+static struct device_attribute dev_remove_attr = __ATTR(remove,
+ (S_IWUSR|S_IWGRP),
+ NULL, remove_store);
static ssize_t
dev_bus_rescan_store(struct device *dev, struct device_attribute *attr,
@@ -404,6 +425,7 @@ static ssize_t d3cold_allowed_show(struct device *dev,
struct pci_dev *pdev = to_pci_dev(dev);
return sprintf (buf, "%u\n", pdev->d3cold_allowed);
}
+static DEVICE_ATTR_RW(d3cold_allowed);
#endif
#ifdef CONFIG_PCI_IOV
@@ -489,30 +511,38 @@ static struct device_attribute sriov_numvfs_attr =
sriov_numvfs_show, sriov_numvfs_store);
#endif /* CONFIG_PCI_IOV */
-struct device_attribute pci_dev_attrs[] = {
- __ATTR_RO(resource),
- __ATTR_RO(vendor),
- __ATTR_RO(device),
- __ATTR_RO(subsystem_vendor),
- __ATTR_RO(subsystem_device),
- __ATTR_RO(class),
- __ATTR_RO(irq),
- __ATTR_RO(local_cpus),
- __ATTR_RO(local_cpulist),
- __ATTR_RO(modalias),
+static struct attribute *pci_dev_attrs[] = {
+ &dev_attr_resource.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_device.attr,
+ &dev_attr_subsystem_vendor.attr,
+ &dev_attr_subsystem_device.attr,
+ &dev_attr_class.attr,
+ &dev_attr_irq.attr,
+ &dev_attr_local_cpus.attr,
+ &dev_attr_local_cpulist.attr,
+ &dev_attr_modalias.attr,
#ifdef CONFIG_NUMA
- __ATTR_RO(numa_node),
+ &dev_attr_numa_node.attr,
#endif
- __ATTR_RO(dma_mask_bits),
- __ATTR_RO(consistent_dma_mask_bits),
- __ATTR(enable, 0600, is_enabled_show, is_enabled_store),
- __ATTR(broken_parity_status,(S_IRUGO|S_IWUSR),
- broken_parity_status_show,broken_parity_status_store),
- __ATTR(msi_bus, 0644, msi_bus_show, msi_bus_store),
+ &dev_attr_dma_mask_bits.attr,
+ &dev_attr_consistent_dma_mask_bits.attr,
+ &dev_attr_enabled.attr,
+ &dev_attr_broken_parity_status.attr,
+ &dev_attr_msi_bus.attr,
#if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI)
- __ATTR(d3cold_allowed, 0644, d3cold_allowed_show, d3cold_allowed_store),
+ &dev_attr_d3cold_allowed.attr,
#endif
- __ATTR_NULL,
+ NULL,
+};
+
+static const struct attribute_group pci_dev_group = {
+ .attrs = pci_dev_attrs,
+};
+
+const struct attribute_group *pci_dev_groups[] = {
+ &pci_dev_group,
+ NULL,
};
static struct attribute *pcibus_attrs[] = {
@@ -544,7 +574,7 @@ boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
!!(pdev->resource[PCI_ROM_RESOURCE].flags &
IORESOURCE_ROM_SHADOW));
}
-struct device_attribute vga_attr = __ATTR_RO(boot_vga);
+static struct device_attribute vga_attr = __ATTR_RO(boot_vga);
static ssize_t
pci_read_config(struct file *filp, struct kobject *kobj,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index bdd64b1b4817..16c4366e402a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1644,8 +1644,10 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
if (enable) {
pme_dev = kmalloc(sizeof(struct pci_pme_device),
GFP_KERNEL);
- if (!pme_dev)
- goto out;
+ if (!pme_dev) {
+ dev_warn(&dev->dev, "can't enable PME#\n");
+ return;
+ }
pme_dev->dev = dev;
mutex_lock(&pci_pme_list_mutex);
list_add(&pme_dev->list, &pci_pme_list);
@@ -1666,7 +1668,6 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
}
}
-out:
dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
}
@@ -2860,7 +2861,7 @@ void __weak pcibios_set_master(struct pci_dev *dev)
lat = pcibios_max_latency;
else
return;
- dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
+
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}
@@ -3978,6 +3979,7 @@ int pcie_get_mps(struct pci_dev *dev)
return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
}
+EXPORT_SYMBOL(pcie_get_mps);
/**
* pcie_set_mps - set PCI Express maximum payload size
@@ -4002,6 +4004,7 @@ int pcie_set_mps(struct pci_dev *dev, int mps)
return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_PAYLOAD, v);
}
+EXPORT_SYMBOL(pcie_set_mps);
/**
* pcie_get_minimum_link - determine minimum link settings of a PCI device
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 8a00c063d7bc..9c91ecc1301b 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -153,10 +153,10 @@ static inline int pci_no_d1d2(struct pci_dev *dev)
return (dev->no_d1d2 || parent_dstates);
}
-extern struct device_attribute pci_dev_attrs[];
+extern const struct attribute_group *pci_dev_groups[];
extern const struct attribute_group *pcibus_groups[];
extern struct device_type pci_dev_type;
-extern struct bus_attribute pci_bus_attrs[];
+extern const struct attribute_group *pci_bus_groups[];
/**
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 7ef0f868b3e0..5e14f5a51357 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -641,8 +641,7 @@ static void pci_set_bus_speed(struct pci_bus *bus)
return;
}
- pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
- if (pos) {
+ if (pci_is_pcie(bridge)) {
u32 linkcap;
u16 linksta;
@@ -984,7 +983,6 @@ void set_pcie_port_type(struct pci_dev *pdev)
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (!pos)
return;
- pdev->is_pcie = 1;
pdev->pcie_cap = pos;
pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
pdev->pcie_flags_reg = reg16;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index f6c31fabf3af..91490453c229 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2955,6 +2955,29 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
/*
+ * PCI devices which are on Intel chips can skip the 10ms delay
+ * before entering D3 mode.
+ */
+static void quirk_remove_d3_delay(struct pci_dev *dev)
+{
+ dev->d3_delay = 0;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
+
+/*
* Some devices may pass our check in pci_intx_mask_supported if
* PCI_COMMAND_INTX_DISABLE works though they actually do not properly
* support this feature.
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index bc26d7990cc3..4ce83b26ae9e 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -982,7 +982,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
}
min_align = calculate_mem_align(aligns, max_order);
- min_align = max(min_align, window_alignment(bus, b_res->flags & mask));
+ min_align = max(min_align, window_alignment(bus, b_res->flags));
size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
if (children_add_size > add_size)
add_size = children_add_size;
@@ -1136,7 +1136,7 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus,
}
/* The root bus? */
- if (!bus->self)
+ if (pci_is_root_bus(bus))
return;
switch (bus->self->class >> 8) {
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index b8f5acf02261..de24232c5191 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -245,7 +245,7 @@ static int at91_cf_dt_init(struct platform_device *pdev)
}
#endif
-static int __init at91_cf_probe(struct platform_device *pdev)
+static int at91_cf_probe(struct platform_device *pdev)
{
struct at91_cf_socket *cf;
struct at91_cf_data *board = pdev->dev.platform_data;
@@ -354,7 +354,7 @@ fail0a:
return status;
}
-static int __exit at91_cf_remove(struct platform_device *pdev)
+static int at91_cf_remove(struct platform_device *pdev)
{
struct at91_cf_socket *cf = platform_get_drvdata(pdev);
@@ -404,14 +404,13 @@ static struct platform_driver at91_cf_driver = {
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(at91_cf_dt_ids),
},
- .remove = __exit_p(at91_cf_remove),
+ .probe = at91_cf_probe,
+ .remove = at91_cf_remove,
.suspend = at91_cf_suspend,
.resume = at91_cf_resume,
};
-/*--------------------------------------------------------------------------*/
-
-module_platform_driver_probe(at91_cf_driver, at91_cf_probe);
+module_platform_driver(at91_cf_driver);
MODULE_DESCRIPTION("AT91 Compact Flash Driver");
MODULE_AUTHOR("David Brownell");
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 2deacbb2ffdc..757119b87146 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -992,16 +992,17 @@ static ssize_t field##_show (struct device *dev, struct device_attribute *attr,
{ \
struct pcmcia_device *p_dev = to_pcmcia_dev(dev); \
return p_dev->test ? sprintf(buf, format, p_dev->field) : -ENODEV; \
-}
+} \
+static DEVICE_ATTR_RO(field);
#define pcmcia_device_stringattr(name, field) \
static ssize_t name##_show (struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct pcmcia_device *p_dev = to_pcmcia_dev(dev); \
return p_dev->field ? sprintf(buf, "%s\n", p_dev->field) : -ENODEV; \
-}
+} \
+static DEVICE_ATTR_RO(name);
-pcmcia_device_attr(func, socket, "0x%02x\n");
pcmcia_device_attr(func_id, has_func_id, "0x%02x\n");
pcmcia_device_attr(manf_id, has_manf_id, "0x%04x\n");
pcmcia_device_attr(card_id, has_card_id, "0x%04x\n");
@@ -1010,8 +1011,16 @@ pcmcia_device_stringattr(prod_id2, prod_id[1]);
pcmcia_device_stringattr(prod_id3, prod_id[2]);
pcmcia_device_stringattr(prod_id4, prod_id[3]);
-static ssize_t pcmcia_show_resources(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t function_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
+ return p_dev->socket ? sprintf(buf, "0x%02x\n", p_dev->func) : -ENODEV;
+}
+static DEVICE_ATTR_RO(function);
+
+static ssize_t resources_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
char *str = buf;
@@ -1022,8 +1031,9 @@ static ssize_t pcmcia_show_resources(struct device *dev,
return str - buf;
}
+static DEVICE_ATTR_RO(resources);
-static ssize_t pcmcia_show_pm_state(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t pm_state_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
@@ -1033,8 +1043,8 @@ static ssize_t pcmcia_show_pm_state(struct device *dev, struct device_attribute
return sprintf(buf, "on\n");
}
-static ssize_t pcmcia_store_pm_state(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t pm_state_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
int ret = 0;
@@ -1049,7 +1059,7 @@ static ssize_t pcmcia_store_pm_state(struct device *dev, struct device_attribute
return ret ? ret : count;
}
-
+static DEVICE_ATTR_RW(pm_state);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -1072,8 +1082,9 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
p_dev->func, p_dev->device_no,
hash[0], hash[1], hash[2], hash[3]);
}
+static DEVICE_ATTR_RO(modalias);
-static ssize_t pcmcia_store_allow_func_id_match(struct device *dev,
+static ssize_t allow_func_id_match_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
@@ -1088,22 +1099,24 @@ static ssize_t pcmcia_store_allow_func_id_match(struct device *dev,
return count;
}
-
-static struct device_attribute pcmcia_dev_attrs[] = {
- __ATTR(function, 0444, func_show, NULL),
- __ATTR(pm_state, 0644, pcmcia_show_pm_state, pcmcia_store_pm_state),
- __ATTR(resources, 0444, pcmcia_show_resources, NULL),
- __ATTR_RO(func_id),
- __ATTR_RO(manf_id),
- __ATTR_RO(card_id),
- __ATTR_RO(prod_id1),
- __ATTR_RO(prod_id2),
- __ATTR_RO(prod_id3),
- __ATTR_RO(prod_id4),
- __ATTR_RO(modalias),
- __ATTR(allow_func_id_match, 0200, NULL, pcmcia_store_allow_func_id_match),
- __ATTR_NULL,
+static DEVICE_ATTR_WO(allow_func_id_match);
+
+static struct attribute *pcmcia_dev_attrs[] = {
+ &dev_attr_resources.attr,
+ &dev_attr_pm_state.attr,
+ &dev_attr_function.attr,
+ &dev_attr_func_id.attr,
+ &dev_attr_manf_id.attr,
+ &dev_attr_card_id.attr,
+ &dev_attr_prod_id1.attr,
+ &dev_attr_prod_id2.attr,
+ &dev_attr_prod_id3.attr,
+ &dev_attr_prod_id4.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_allow_func_id_match.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(pcmcia_dev);
/* PM support, also needed for reset */
@@ -1389,7 +1402,7 @@ struct bus_type pcmcia_bus_type = {
.name = "pcmcia",
.uevent = pcmcia_bus_uevent,
.match = pcmcia_bus_match,
- .dev_attrs = pcmcia_dev_attrs,
+ .dev_groups = pcmcia_dev_groups,
.probe = pcmcia_device_probe,
.remove = pcmcia_device_remove,
.suspend = pcmcia_dev_suspend,
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index a007321ad314..1b206eac5f93 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -30,6 +30,8 @@
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index a82ace4d9a20..0846922b2316 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -15,15 +15,21 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/list.h>
+#include <linux/interrupt.h>
+
+#include <linux/irqchip/chained_irq.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/platform_data/pinctrl-single.h>
+
#include "core.h"
#include "pinconf.h"
@@ -150,19 +156,36 @@ struct pcs_name {
};
/**
+ * struct pcs_soc_data - SoC specific settings
+ * @flags: initial SoC specific PCS_FEAT_xxx values
+ * @irq: optional interrupt for the controller
+ * @irq_enable_mask: optional SoC specific interrupt enable mask
+ * @irq_status_mask: optional SoC specific interrupt status mask
+ * @rearm: optional SoC specific wake-up rearm function
+ */
+struct pcs_soc_data {
+ unsigned flags;
+ int irq;
+ unsigned irq_enable_mask;
+ unsigned irq_status_mask;
+ void (*rearm)(void);
+};
+
+/**
* struct pcs_device - pinctrl device instance
* @res: resources
* @base: virtual address of the controller
* @size: size of the ioremapped area
* @dev: device entry
* @pctl: pin controller device
+ * @flags: mask of PCS_FEAT_xxx values
+ * @lock: spinlock for register access
* @mutex: mutex protecting the lists
* @width: bits per mux register
* @fmask: function register mask
* @fshift: function register shift
* @foff: value to turn mux off
* @fmax: max number of functions in fmask
- * @is_pinconf: whether supports pinconf
* @bits_per_pin:number of bits per pin
* @names: array of register names for pins
* @pins: physical pins on the SoC
@@ -171,6 +194,9 @@ struct pcs_name {
* @pingroups: list of pingroups
* @functions: list of functions
* @gpiofuncs: list of gpio functions
+ * @irqs: list of interrupt registers
+ * @chip: chip container for this instance
+ * @domain: IRQ domain for this instance
* @ngroups: number of pingroups
* @nfuncs: number of functions
* @desc: pin controller descriptor
@@ -183,6 +209,12 @@ struct pcs_device {
unsigned size;
struct device *dev;
struct pinctrl_dev *pctl;
+ unsigned flags;
+#define PCS_QUIRK_SHARED_IRQ (1 << 2)
+#define PCS_FEAT_IRQ (1 << 1)
+#define PCS_FEAT_PINCONF (1 << 0)
+ struct pcs_soc_data socdata;
+ raw_spinlock_t lock;
struct mutex mutex;
unsigned width;
unsigned fmask;
@@ -190,7 +222,6 @@ struct pcs_device {
unsigned foff;
unsigned fmax;
bool bits_per_mux;
- bool is_pinconf;
unsigned bits_per_pin;
struct pcs_name *names;
struct pcs_data pins;
@@ -199,6 +230,9 @@ struct pcs_device {
struct list_head pingroups;
struct list_head functions;
struct list_head gpiofuncs;
+ struct list_head irqs;
+ struct irq_chip chip;
+ struct irq_domain *domain;
unsigned ngroups;
unsigned nfuncs;
struct pinctrl_desc desc;
@@ -206,6 +240,10 @@ struct pcs_device {
void (*write)(unsigned val, void __iomem *reg);
};
+#define PCS_QUIRK_HAS_SHARED_IRQ (pcs->flags & PCS_QUIRK_SHARED_IRQ)
+#define PCS_HAS_IRQ (pcs->flags & PCS_FEAT_IRQ)
+#define PCS_HAS_PINCONF (pcs->flags & PCS_FEAT_PINCONF)
+
static int pcs_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin,
unsigned long *config);
static int pcs_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin,
@@ -429,9 +467,11 @@ static int pcs_enable(struct pinctrl_dev *pctldev, unsigned fselector,
for (i = 0; i < func->nvals; i++) {
struct pcs_func_vals *vals;
+ unsigned long flags;
unsigned val, mask;
vals = &func->vals[i];
+ raw_spin_lock_irqsave(&pcs->lock, flags);
val = pcs->read(vals->reg);
if (pcs->bits_per_mux)
@@ -442,6 +482,7 @@ static int pcs_enable(struct pinctrl_dev *pctldev, unsigned fselector,
val &= ~mask;
val |= (vals->val & mask);
pcs->write(val, vals->reg);
+ raw_spin_unlock_irqrestore(&pcs->lock, flags);
}
return 0;
@@ -483,13 +524,16 @@ static void pcs_disable(struct pinctrl_dev *pctldev, unsigned fselector,
for (i = 0; i < func->nvals; i++) {
struct pcs_func_vals *vals;
+ unsigned long flags;
unsigned val;
vals = &func->vals[i];
+ raw_spin_lock_irqsave(&pcs->lock, flags);
val = pcs->read(vals->reg);
val &= ~pcs->fmask;
val |= pcs->foff << pcs->fshift;
pcs->write(val, vals->reg);
+ raw_spin_unlock_irqrestore(&pcs->lock, flags);
}
}
@@ -1060,7 +1104,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
};
/* If pinconf isn't supported, don't parse properties in below. */
- if (!pcs->is_pinconf)
+ if (!PCS_HAS_PINCONF)
return 0;
/* cacluate how much properties are supported in current node */
@@ -1184,7 +1228,7 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
(*map)->data.mux.group = np->name;
(*map)->data.mux.function = np->name;
- if (pcs->is_pinconf) {
+ if (PCS_HAS_PINCONF) {
res = pcs_parse_pinconf(pcs, np, function, map);
if (res)
goto free_pingroups;
@@ -1305,7 +1349,7 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
(*map)->data.mux.group = np->name;
(*map)->data.mux.function = np->name;
- if (pcs->is_pinconf) {
+ if (PCS_HAS_PINCONF) {
dev_err(pcs->dev, "pinconf not supported\n");
goto free_pingroups;
}
@@ -1440,11 +1484,33 @@ static void pcs_free_pingroups(struct pcs_device *pcs)
}
/**
+ * pcs_irq_free() - free interrupt
+ * @pcs: pcs driver instance
+ */
+static void pcs_irq_free(struct pcs_device *pcs)
+{
+ struct pcs_soc_data *pcs_soc = &pcs->socdata;
+
+ if (pcs_soc->irq < 0)
+ return;
+
+ if (pcs->domain)
+ irq_domain_remove(pcs->domain);
+
+ if (PCS_QUIRK_HAS_SHARED_IRQ)
+ free_irq(pcs_soc->irq, pcs_soc);
+ else
+ irq_set_chained_handler(pcs_soc->irq, NULL);
+}
+
+/**
* pcs_free_resources() - free memory used by this driver
* @pcs: pcs driver instance
*/
static void pcs_free_resources(struct pcs_device *pcs)
{
+ pcs_irq_free(pcs);
+
if (pcs->pctl)
pinctrl_unregister(pcs->pctl);
@@ -1493,6 +1559,268 @@ static int pcs_add_gpio_func(struct device_node *node, struct pcs_device *pcs)
}
return ret;
}
+/**
+ * @reg: virtual address of interrupt register
+ * @hwirq: hardware irq number
+ * @irq: virtual irq number
+ * @node: list node
+ */
+struct pcs_interrupt {
+ void __iomem *reg;
+ irq_hw_number_t hwirq;
+ unsigned int irq;
+ struct list_head node;
+};
+
+/**
+ * pcs_irq_set() - enables or disables an interrupt
+ *
+ * Note that this currently assumes one interrupt per pinctrl
+ * register that is typically used for wake-up events.
+ */
+static inline void pcs_irq_set(struct pcs_soc_data *pcs_soc,
+ int irq, const bool enable)
+{
+ struct pcs_device *pcs;
+ struct list_head *pos;
+ unsigned mask;
+
+ pcs = container_of(pcs_soc, struct pcs_device, socdata);
+ list_for_each(pos, &pcs->irqs) {
+ struct pcs_interrupt *pcswi;
+ unsigned soc_mask;
+
+ pcswi = list_entry(pos, struct pcs_interrupt, node);
+ if (irq != pcswi->irq)
+ continue;
+
+ soc_mask = pcs_soc->irq_enable_mask;
+ raw_spin_lock(&pcs->lock);
+ mask = pcs->read(pcswi->reg);
+ if (enable)
+ mask |= soc_mask;
+ else
+ mask &= ~soc_mask;
+ pcs->write(mask, pcswi->reg);
+ raw_spin_unlock(&pcs->lock);
+ }
+}
+
+/**
+ * pcs_irq_mask() - mask pinctrl interrupt
+ * @d: interrupt data
+ */
+static void pcs_irq_mask(struct irq_data *d)
+{
+ struct pcs_soc_data *pcs_soc = irq_data_get_irq_chip_data(d);
+
+ pcs_irq_set(pcs_soc, d->irq, false);
+}
+
+/**
+ * pcs_irq_unmask() - unmask pinctrl interrupt
+ * @d: interrupt data
+ */
+static void pcs_irq_unmask(struct irq_data *d)
+{
+ struct pcs_soc_data *pcs_soc = irq_data_get_irq_chip_data(d);
+
+ pcs_irq_set(pcs_soc, d->irq, true);
+ if (pcs_soc->rearm)
+ pcs_soc->rearm();
+}
+
+/**
+ * pcs_irq_set_wake() - toggle the suspend and resume wake up
+ * @d: interrupt data
+ * @state: wake-up state
+ *
+ * Note that this should be called only for suspend and resume.
+ * For runtime PM, the wake-up events should be enabled by default.
+ */
+static int pcs_irq_set_wake(struct irq_data *d, unsigned int state)
+{
+ if (state)
+ pcs_irq_unmask(d);
+ else
+ pcs_irq_mask(d);
+
+ return 0;
+}
+
+/**
+ * pcs_irq_handle() - common interrupt handler
+ * @pcs_irq: interrupt data
+ *
+ * Note that this currently assumes we have one interrupt bit per
+ * mux register. This interrupt is typically used for wake-up events.
+ * For more complex interrupts different handlers can be specified.
+ */
+static int pcs_irq_handle(struct pcs_soc_data *pcs_soc)
+{
+ struct pcs_device *pcs;
+ struct list_head *pos;
+ int count = 0;
+
+ pcs = container_of(pcs_soc, struct pcs_device, socdata);
+ list_for_each(pos, &pcs->irqs) {
+ struct pcs_interrupt *pcswi;
+ unsigned mask;
+
+ pcswi = list_entry(pos, struct pcs_interrupt, node);
+ raw_spin_lock(&pcs->lock);
+ mask = pcs->read(pcswi->reg);
+ raw_spin_unlock(&pcs->lock);
+ if (mask & pcs_soc->irq_status_mask) {
+ generic_handle_irq(irq_find_mapping(pcs->domain,
+ pcswi->hwirq));
+ count++;
+ }
+ }
+
+ /*
+ * For debugging on omaps, you may want to call pcs_soc->rearm()
+ * here to see wake-up interrupts during runtime also.
+ */
+
+ return count;
+}
+
+/**
+ * pcs_irq_handler() - handler for the shared interrupt case
+ * @irq: interrupt
+ * @d: data
+ *
+ * Use this for cases where multiple instances of
+ * pinctrl-single share a single interrupt like on omaps.
+ */
+static irqreturn_t pcs_irq_handler(int irq, void *d)
+{
+ struct pcs_soc_data *pcs_soc = d;
+
+ return pcs_irq_handle(pcs_soc) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/**
+ * pcs_irq_handle() - handler for the dedicated chained interrupt case
+ * @irq: interrupt
+ * @desc: interrupt descriptor
+ *
+ * Use this if you have a separate interrupt for each
+ * pinctrl-single instance.
+ */
+static void pcs_irq_chain_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct pcs_soc_data *pcs_soc = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip;
+ int res;
+
+ chip = irq_get_chip(irq);
+ chained_irq_enter(chip, desc);
+ res = pcs_irq_handle(pcs_soc);
+ /* REVISIT: export and add handle_bad_irq(irq, desc)? */
+ chained_irq_exit(chip, desc);
+
+ return;
+}
+
+static int pcs_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct pcs_soc_data *pcs_soc = d->host_data;
+ struct pcs_device *pcs;
+ struct pcs_interrupt *pcswi;
+
+ pcs = container_of(pcs_soc, struct pcs_device, socdata);
+ pcswi = devm_kzalloc(pcs->dev, sizeof(*pcswi), GFP_KERNEL);
+ if (!pcswi)
+ return -ENOMEM;
+
+ pcswi->reg = pcs->base + hwirq;
+ pcswi->hwirq = hwirq;
+ pcswi->irq = irq;
+
+ mutex_lock(&pcs->mutex);
+ list_add_tail(&pcswi->node, &pcs->irqs);
+ mutex_unlock(&pcs->mutex);
+
+ irq_set_chip_data(irq, pcs_soc);
+ irq_set_chip_and_handler(irq, &pcs->chip,
+ handle_level_irq);
+
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ irq_set_noprobe(irq);
+#endif
+
+ return 0;
+}
+
+static struct irq_domain_ops pcs_irqdomain_ops = {
+ .map = pcs_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+/**
+ * pcs_irq_init_chained_handler() - set up a chained interrupt handler
+ * @pcs: pcs driver instance
+ * @np: device node pointer
+ */
+static int pcs_irq_init_chained_handler(struct pcs_device *pcs,
+ struct device_node *np)
+{
+ struct pcs_soc_data *pcs_soc = &pcs->socdata;
+ const char *name = "pinctrl";
+ int num_irqs;
+
+ if (!pcs_soc->irq_enable_mask ||
+ !pcs_soc->irq_status_mask) {
+ pcs_soc->irq = -1;
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&pcs->irqs);
+ pcs->chip.name = name;
+ pcs->chip.irq_ack = pcs_irq_mask;
+ pcs->chip.irq_mask = pcs_irq_mask;
+ pcs->chip.irq_unmask = pcs_irq_unmask;
+ pcs->chip.irq_set_wake = pcs_irq_set_wake;
+
+ if (PCS_QUIRK_HAS_SHARED_IRQ) {
+ int res;
+
+ res = request_irq(pcs_soc->irq, pcs_irq_handler,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ name, pcs_soc);
+ if (res) {
+ pcs_soc->irq = -1;
+ return res;
+ }
+ } else {
+ irq_set_handler_data(pcs_soc->irq, pcs_soc);
+ irq_set_chained_handler(pcs_soc->irq,
+ pcs_irq_chain_handler);
+ }
+
+ /*
+ * We can use the register offset as the hardirq
+ * number as irq_domain_add_simple maps them lazily.
+ * This way we can easily support more than one
+ * interrupt per function if needed.
+ */
+ num_irqs = pcs->size;
+
+ pcs->domain = irq_domain_add_simple(np, num_irqs, 0,
+ &pcs_irqdomain_ops,
+ pcs_soc);
+ if (!pcs->domain) {
+ irq_set_chained_handler(pcs_soc->irq, NULL);
+ return -EINVAL;
+ }
+
+ return 0;
+}
#ifdef CONFIG_PM
static int pinctrl_single_suspend(struct platform_device *pdev,
@@ -1523,8 +1851,10 @@ static int pcs_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *match;
+ struct pcs_pdata *pdata;
struct resource *res;
struct pcs_device *pcs;
+ const struct pcs_soc_data *soc;
int ret;
match = of_match_device(pcs_of_match, &pdev->dev);
@@ -1537,11 +1867,14 @@ static int pcs_probe(struct platform_device *pdev)
return -ENOMEM;
}
pcs->dev = &pdev->dev;
+ raw_spin_lock_init(&pcs->lock);
mutex_init(&pcs->mutex);
INIT_LIST_HEAD(&pcs->pingroups);
INIT_LIST_HEAD(&pcs->functions);
INIT_LIST_HEAD(&pcs->gpiofuncs);
- pcs->is_pinconf = match->data;
+ soc = match->data;
+ pcs->flags = soc->flags;
+ memcpy(&pcs->socdata, soc, sizeof(*soc));
PCS_GET_PROP_U32("pinctrl-single,register-width", &pcs->width,
"register width not specified\n");
@@ -1610,7 +1943,7 @@ static int pcs_probe(struct platform_device *pdev)
pcs->desc.name = DRIVER_NAME;
pcs->desc.pctlops = &pcs_pinctrl_ops;
pcs->desc.pmxops = &pcs_pinmux_ops;
- if (pcs->is_pinconf)
+ if (PCS_HAS_PINCONF)
pcs->desc.confops = &pcs_pinconf_ops;
pcs->desc.owner = THIS_MODULE;
@@ -1629,6 +1962,27 @@ static int pcs_probe(struct platform_device *pdev)
if (ret < 0)
goto free;
+ pcs->socdata.irq = irq_of_parse_and_map(np, 0);
+ if (pcs->socdata.irq)
+ pcs->flags |= PCS_FEAT_IRQ;
+
+ /* We still need auxdata for some omaps for PRM interrupts */
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ if (pdata->rearm)
+ pcs->socdata.rearm = pdata->rearm;
+ if (pdata->irq) {
+ pcs->socdata.irq = pdata->irq;
+ pcs->flags |= PCS_FEAT_IRQ;
+ }
+ }
+
+ if (PCS_HAS_IRQ) {
+ ret = pcs_irq_init_chained_handler(pcs, np);
+ if (ret < 0)
+ dev_warn(pcs->dev, "initialized with no interrupts\n");
+ }
+
dev_info(pcs->dev, "%i pins at pa %p size %u\n",
pcs->desc.npins, pcs->base, pcs->size);
@@ -1652,9 +2006,25 @@ static int pcs_remove(struct platform_device *pdev)
return 0;
}
+static const struct pcs_soc_data pinctrl_single_omap_wkup = {
+ .flags = PCS_QUIRK_SHARED_IRQ,
+ .irq_enable_mask = (1 << 14), /* OMAP_WAKEUP_EN */
+ .irq_status_mask = (1 << 15), /* OMAP_WAKEUP_EVENT */
+};
+
+static const struct pcs_soc_data pinctrl_single = {
+};
+
+static const struct pcs_soc_data pinconf_single = {
+ .flags = PCS_FEAT_PINCONF,
+};
+
static struct of_device_id pcs_of_match[] = {
- { .compatible = "pinctrl-single", .data = (void *)false },
- { .compatible = "pinconf-single", .data = (void *)true },
+ { .compatible = "ti,omap3-padconf", .data = &pinctrl_single_omap_wkup },
+ { .compatible = "ti,omap4-padconf", .data = &pinctrl_single_omap_wkup },
+ { .compatible = "ti,omap5-padconf", .data = &pinctrl_single_omap_wkup },
+ { .compatible = "pinctrl-single", .data = &pinctrl_single },
+ { .compatible = "pinconf-single", .data = &pinconf_single },
{ },
};
MODULE_DEVICE_TABLE(of, pcs_of_match);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 96d6b2eef4f2..b51a7460cc49 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -504,6 +504,7 @@ config ASUS_WMI
depends on BACKLIGHT_CLASS_DEVICE
depends on RFKILL || RFKILL = n
depends on HOTPLUG_PCI
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
select INPUT_SPARSEKMAP
select LEDS_CLASS
select NEW_LEDS
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index a6afd4108beb..aefcc32e5634 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -190,16 +190,10 @@ struct eeepc_laptop {
*/
static int write_acpi_int(acpi_handle handle, const char *method, int val)
{
- struct acpi_object_list params;
- union acpi_object in_obj;
acpi_status status;
- params.count = 1;
- params.pointer = &in_obj;
- in_obj.type = ACPI_TYPE_INTEGER;
- in_obj.integer.value = val;
+ status = acpi_execute_simple_method(handle, (char *)method, val);
- status = acpi_evaluate_object(handle, (char *)method, &params, NULL);
return (status == AE_OK ? 0 : -1);
}
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 52b8a97efde1..9d30d69aa78f 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -219,8 +219,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
{ .type = ACPI_TYPE_INTEGER }
};
struct acpi_object_list arg_list = { 4, &params[0] };
- struct acpi_buffer output;
- union acpi_object out_obj;
+ unsigned long long value;
acpi_handle handle = NULL;
status = acpi_get_handle(fujitsu_hotkey->acpi_handle, "FUNC", &handle);
@@ -235,10 +234,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
params[2].integer.value = arg1;
params[3].integer.value = arg2;
- output.length = sizeof(out_obj);
- output.pointer = &out_obj;
-
- status = acpi_evaluate_object(handle, NULL, &arg_list, &output);
+ status = acpi_evaluate_integer(handle, NULL, &arg_list, &value);
if (ACPI_FAILURE(status)) {
vdbg_printk(FUJLAPTOP_DBG_WARN,
"FUNC 0x%x (args 0x%x, 0x%x, 0x%x) call failed\n",
@@ -246,18 +242,10 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
return -ENODEV;
}
- if (out_obj.type != ACPI_TYPE_INTEGER) {
- vdbg_printk(FUJLAPTOP_DBG_WARN,
- "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) did not "
- "return an integer\n",
- cmd, arg0, arg1, arg2);
- return -ENODEV;
- }
-
vdbg_printk(FUJLAPTOP_DBG_TRACE,
"FUNC 0x%x (args 0x%x, 0x%x, 0x%x) returned 0x%x\n",
- cmd, arg0, arg1, arg2, (int)out_obj.integer.value);
- return out_obj.integer.value;
+ cmd, arg0, arg1, arg2, (int)value);
+ return value;
}
#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
@@ -317,8 +305,6 @@ static enum led_brightness kblamps_get(struct led_classdev *cdev)
static int set_lcd_level(int level)
{
acpi_status status = AE_OK;
- union acpi_object arg0 = { ACPI_TYPE_INTEGER };
- struct acpi_object_list arg_list = { 1, &arg0 };
acpi_handle handle = NULL;
vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n",
@@ -333,9 +319,8 @@ static int set_lcd_level(int level)
return -ENODEV;
}
- arg0.integer.value = level;
- status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
+ status = acpi_execute_simple_method(handle, NULL, level);
if (ACPI_FAILURE(status))
return -ENODEV;
@@ -345,8 +330,6 @@ static int set_lcd_level(int level)
static int set_lcd_level_alt(int level)
{
acpi_status status = AE_OK;
- union acpi_object arg0 = { ACPI_TYPE_INTEGER };
- struct acpi_object_list arg_list = { 1, &arg0 };
acpi_handle handle = NULL;
vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n",
@@ -361,9 +344,7 @@ static int set_lcd_level_alt(int level)
return -ENODEV;
}
- arg0.integer.value = level;
-
- status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
+ status = acpi_execute_simple_method(handle, NULL, level);
if (ACPI_FAILURE(status))
return -ENODEV;
@@ -586,11 +567,10 @@ static struct platform_driver fujitsupf_driver = {
static void dmi_check_cb_common(const struct dmi_system_id *id)
{
- acpi_handle handle;
pr_info("Identified laptop model '%s'\n", id->ident);
if (use_alt_lcd_levels == -1) {
- if (ACPI_SUCCESS(acpi_get_handle(NULL,
- "\\_SB.PCI0.LPCB.FJEX.SBL2", &handle)))
+ if (acpi_has_method(NULL,
+ "\\_SB.PCI0.LPCB.FJEX.SBL2"))
use_alt_lcd_levels = 1;
else
use_alt_lcd_levels = 0;
@@ -653,7 +633,6 @@ static struct dmi_system_id fujitsu_dmi_table[] = {
static int acpi_fujitsu_add(struct acpi_device *device)
{
- acpi_handle handle;
int result = 0;
int state = 0;
struct input_dev *input;
@@ -702,8 +681,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
fujitsu->dev = device;
- if (ACPI_SUCCESS
- (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) {
+ if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
if (ACPI_FAILURE
(acpi_evaluate_object
@@ -803,7 +781,6 @@ static void acpi_fujitsu_notify(struct acpi_device *device, u32 event)
static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
{
- acpi_handle handle;
int result = 0;
int state = 0;
struct input_dev *input;
@@ -866,8 +843,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
fujitsu_hotkey->dev = device;
- if (ACPI_SUCCESS
- (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) {
+ if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
if (ACPI_FAILURE
(acpi_evaluate_object
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 89c4519d48ac..6788acc22ab9 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -72,8 +72,15 @@ enum {
VPCCMD_W_BL_POWER = 0x33,
};
+struct ideapad_rfk_priv {
+ int dev;
+ struct ideapad_private *priv;
+};
+
struct ideapad_private {
+ struct acpi_device *adev;
struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM];
+ struct ideapad_rfk_priv rfk_priv[IDEAPAD_RFKILL_DEV_NUM];
struct platform_device *platform_device;
struct input_dev *inputdev;
struct backlight_device *blightdev;
@@ -81,8 +88,6 @@ struct ideapad_private {
unsigned long cfg;
};
-static acpi_handle ideapad_handle;
-static struct ideapad_private *ideapad_priv;
static bool no_bt_rfkill;
module_param(no_bt_rfkill, bool, 0444);
MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
@@ -200,34 +205,38 @@ static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data)
*/
static int debugfs_status_show(struct seq_file *s, void *data)
{
+ struct ideapad_private *priv = s->private;
unsigned long value;
- if (!read_ec_data(ideapad_handle, VPCCMD_R_BL_MAX, &value))
+ if (!priv)
+ return -EINVAL;
+
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &value))
seq_printf(s, "Backlight max:\t%lu\n", value);
- if (!read_ec_data(ideapad_handle, VPCCMD_R_BL, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL, &value))
seq_printf(s, "Backlight now:\t%lu\n", value);
- if (!read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &value))
seq_printf(s, "BL power value:\t%s\n", value ? "On" : "Off");
seq_printf(s, "=====================\n");
- if (!read_ec_data(ideapad_handle, VPCCMD_R_RF, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_RF, &value))
seq_printf(s, "Radio status:\t%s(%lu)\n",
value ? "On" : "Off", value);
- if (!read_ec_data(ideapad_handle, VPCCMD_R_WIFI, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_WIFI, &value))
seq_printf(s, "Wifi status:\t%s(%lu)\n",
value ? "On" : "Off", value);
- if (!read_ec_data(ideapad_handle, VPCCMD_R_BT, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BT, &value))
seq_printf(s, "BT status:\t%s(%lu)\n",
value ? "On" : "Off", value);
- if (!read_ec_data(ideapad_handle, VPCCMD_R_3G, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_3G, &value))
seq_printf(s, "3G status:\t%s(%lu)\n",
value ? "On" : "Off", value);
seq_printf(s, "=====================\n");
- if (!read_ec_data(ideapad_handle, VPCCMD_R_TOUCHPAD, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value))
seq_printf(s, "Touchpad status:%s(%lu)\n",
value ? "On" : "Off", value);
- if (!read_ec_data(ideapad_handle, VPCCMD_R_CAMERA, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &value))
seq_printf(s, "Camera status:\t%s(%lu)\n",
value ? "On" : "Off", value);
@@ -236,7 +245,7 @@ static int debugfs_status_show(struct seq_file *s, void *data)
static int debugfs_status_open(struct inode *inode, struct file *file)
{
- return single_open(file, debugfs_status_show, NULL);
+ return single_open(file, debugfs_status_show, inode->i_private);
}
static const struct file_operations debugfs_status_fops = {
@@ -249,21 +258,23 @@ static const struct file_operations debugfs_status_fops = {
static int debugfs_cfg_show(struct seq_file *s, void *data)
{
- if (!ideapad_priv) {
+ struct ideapad_private *priv = s->private;
+
+ if (!priv) {
seq_printf(s, "cfg: N/A\n");
} else {
seq_printf(s, "cfg: 0x%.8lX\n\nCapability: ",
- ideapad_priv->cfg);
- if (test_bit(CFG_BT_BIT, &ideapad_priv->cfg))
+ priv->cfg);
+ if (test_bit(CFG_BT_BIT, &priv->cfg))
seq_printf(s, "Bluetooth ");
- if (test_bit(CFG_3G_BIT, &ideapad_priv->cfg))
+ if (test_bit(CFG_3G_BIT, &priv->cfg))
seq_printf(s, "3G ");
- if (test_bit(CFG_WIFI_BIT, &ideapad_priv->cfg))
+ if (test_bit(CFG_WIFI_BIT, &priv->cfg))
seq_printf(s, "Wireless ");
- if (test_bit(CFG_CAMERA_BIT, &ideapad_priv->cfg))
+ if (test_bit(CFG_CAMERA_BIT, &priv->cfg))
seq_printf(s, "Camera ");
seq_printf(s, "\nGraphic: ");
- switch ((ideapad_priv->cfg)&0x700) {
+ switch ((priv->cfg)&0x700) {
case 0x100:
seq_printf(s, "Intel");
break;
@@ -287,7 +298,7 @@ static int debugfs_cfg_show(struct seq_file *s, void *data)
static int debugfs_cfg_open(struct inode *inode, struct file *file)
{
- return single_open(file, debugfs_cfg_show, NULL);
+ return single_open(file, debugfs_cfg_show, inode->i_private);
}
static const struct file_operations debugfs_cfg_fops = {
@@ -308,14 +319,14 @@ static int ideapad_debugfs_init(struct ideapad_private *priv)
goto errout;
}
- node = debugfs_create_file("cfg", S_IRUGO, priv->debug, NULL,
+ node = debugfs_create_file("cfg", S_IRUGO, priv->debug, priv,
&debugfs_cfg_fops);
if (!node) {
pr_err("failed to create cfg in debugfs");
goto errout;
}
- node = debugfs_create_file("status", S_IRUGO, priv->debug, NULL,
+ node = debugfs_create_file("status", S_IRUGO, priv->debug, priv,
&debugfs_status_fops);
if (!node) {
pr_err("failed to create status in debugfs");
@@ -342,8 +353,9 @@ static ssize_t show_ideapad_cam(struct device *dev,
char *buf)
{
unsigned long result;
+ struct ideapad_private *priv = dev_get_drvdata(dev);
- if (read_ec_data(ideapad_handle, VPCCMD_R_CAMERA, &result))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &result))
return sprintf(buf, "-1\n");
return sprintf(buf, "%lu\n", result);
}
@@ -353,12 +365,13 @@ static ssize_t store_ideapad_cam(struct device *dev,
const char *buf, size_t count)
{
int ret, state;
+ struct ideapad_private *priv = dev_get_drvdata(dev);
if (!count)
return 0;
if (sscanf(buf, "%i", &state) != 1)
return -EINVAL;
- ret = write_ec_cmd(ideapad_handle, VPCCMD_W_CAMERA, state);
+ ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_CAMERA, state);
if (ret < 0)
return -EIO;
return count;
@@ -371,8 +384,9 @@ static ssize_t show_ideapad_fan(struct device *dev,
char *buf)
{
unsigned long result;
+ struct ideapad_private *priv = dev_get_drvdata(dev);
- if (read_ec_data(ideapad_handle, VPCCMD_R_FAN, &result))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &result))
return sprintf(buf, "-1\n");
return sprintf(buf, "%lu\n", result);
}
@@ -382,6 +396,7 @@ static ssize_t store_ideapad_fan(struct device *dev,
const char *buf, size_t count)
{
int ret, state;
+ struct ideapad_private *priv = dev_get_drvdata(dev);
if (!count)
return 0;
@@ -389,7 +404,7 @@ static ssize_t store_ideapad_fan(struct device *dev,
return -EINVAL;
if (state < 0 || state > 4 || state == 3)
return -EINVAL;
- ret = write_ec_cmd(ideapad_handle, VPCCMD_W_FAN, state);
+ ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_FAN, state);
if (ret < 0)
return -EIO;
return count;
@@ -415,7 +430,8 @@ static umode_t ideapad_is_visible(struct kobject *kobj,
supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg));
else if (attr == &dev_attr_fan_mode.attr) {
unsigned long value;
- supported = !read_ec_data(ideapad_handle, VPCCMD_R_FAN, &value);
+ supported = !read_ec_data(priv->adev->handle, VPCCMD_R_FAN,
+ &value);
} else
supported = true;
@@ -445,9 +461,9 @@ const struct ideapad_rfk_data ideapad_rfk_data[] = {
static int ideapad_rfk_set(void *data, bool blocked)
{
- unsigned long opcode = (unsigned long)data;
+ struct ideapad_rfk_priv *priv = data;
- return write_ec_cmd(ideapad_handle, opcode, !blocked);
+ return write_ec_cmd(priv->priv->adev->handle, priv->dev, !blocked);
}
static struct rfkill_ops ideapad_rfk_ops = {
@@ -459,7 +475,7 @@ static void ideapad_sync_rfk_state(struct ideapad_private *priv)
unsigned long hw_blocked;
int i;
- if (read_ec_data(ideapad_handle, VPCCMD_R_RF, &hw_blocked))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_RF, &hw_blocked))
return;
hw_blocked = !hw_blocked;
@@ -468,27 +484,30 @@ static void ideapad_sync_rfk_state(struct ideapad_private *priv)
rfkill_set_hw_state(priv->rfk[i], hw_blocked);
}
-static int ideapad_register_rfkill(struct acpi_device *adevice, int dev)
+static int ideapad_register_rfkill(struct ideapad_private *priv, int dev)
{
- struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
int ret;
unsigned long sw_blocked;
if (no_bt_rfkill &&
(ideapad_rfk_data[dev].type == RFKILL_TYPE_BLUETOOTH)) {
/* Force to enable bluetooth when no_bt_rfkill=1 */
- write_ec_cmd(ideapad_handle,
+ write_ec_cmd(priv->adev->handle,
ideapad_rfk_data[dev].opcode, 1);
return 0;
}
-
- priv->rfk[dev] = rfkill_alloc(ideapad_rfk_data[dev].name, &adevice->dev,
- ideapad_rfk_data[dev].type, &ideapad_rfk_ops,
- (void *)(long)dev);
+ priv->rfk_priv[dev].dev = dev;
+ priv->rfk_priv[dev].priv = priv;
+
+ priv->rfk[dev] = rfkill_alloc(ideapad_rfk_data[dev].name,
+ &priv->platform_device->dev,
+ ideapad_rfk_data[dev].type,
+ &ideapad_rfk_ops,
+ &priv->rfk_priv[dev]);
if (!priv->rfk[dev])
return -ENOMEM;
- if (read_ec_data(ideapad_handle, ideapad_rfk_data[dev].opcode-1,
+ if (read_ec_data(priv->adev->handle, ideapad_rfk_data[dev].opcode-1,
&sw_blocked)) {
rfkill_init_sw_state(priv->rfk[dev], 0);
} else {
@@ -504,10 +523,8 @@ static int ideapad_register_rfkill(struct acpi_device *adevice, int dev)
return 0;
}
-static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev)
+static void ideapad_unregister_rfkill(struct ideapad_private *priv, int dev)
{
- struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
-
if (!priv->rfk[dev])
return;
@@ -518,37 +535,16 @@ static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev)
/*
* Platform device
*/
-static int ideapad_platform_init(struct ideapad_private *priv)
+static int ideapad_sysfs_init(struct ideapad_private *priv)
{
- int result;
-
- priv->platform_device = platform_device_alloc("ideapad", -1);
- if (!priv->platform_device)
- return -ENOMEM;
- platform_set_drvdata(priv->platform_device, priv);
-
- result = platform_device_add(priv->platform_device);
- if (result)
- goto fail_platform_device;
-
- result = sysfs_create_group(&priv->platform_device->dev.kobj,
+ return sysfs_create_group(&priv->platform_device->dev.kobj,
&ideapad_attribute_group);
- if (result)
- goto fail_sysfs;
- return 0;
-
-fail_sysfs:
- platform_device_del(priv->platform_device);
-fail_platform_device:
- platform_device_put(priv->platform_device);
- return result;
}
-static void ideapad_platform_exit(struct ideapad_private *priv)
+static void ideapad_sysfs_exit(struct ideapad_private *priv)
{
sysfs_remove_group(&priv->platform_device->dev.kobj,
&ideapad_attribute_group);
- platform_device_unregister(priv->platform_device);
}
/*
@@ -623,7 +619,7 @@ static void ideapad_input_novokey(struct ideapad_private *priv)
{
unsigned long long_pressed;
- if (read_ec_data(ideapad_handle, VPCCMD_R_NOVO, &long_pressed))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_NOVO, &long_pressed))
return;
if (long_pressed)
ideapad_input_report(priv, 17);
@@ -635,7 +631,7 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv)
{
unsigned long bit, value;
- read_ec_data(ideapad_handle, VPCCMD_R_SPECIAL_BUTTONS, &value);
+ read_ec_data(priv->adev->handle, VPCCMD_R_SPECIAL_BUTTONS, &value);
for (bit = 0; bit < 16; bit++) {
if (test_bit(bit, &value)) {
@@ -662,19 +658,28 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv)
*/
static int ideapad_backlight_get_brightness(struct backlight_device *blightdev)
{
+ struct ideapad_private *priv = bl_get_data(blightdev);
unsigned long now;
- if (read_ec_data(ideapad_handle, VPCCMD_R_BL, &now))
+ if (!priv)
+ return -EINVAL;
+
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now))
return -EIO;
return now;
}
static int ideapad_backlight_update_status(struct backlight_device *blightdev)
{
- if (write_ec_cmd(ideapad_handle, VPCCMD_W_BL,
+ struct ideapad_private *priv = bl_get_data(blightdev);
+
+ if (!priv)
+ return -EINVAL;
+
+ if (write_ec_cmd(priv->adev->handle, VPCCMD_W_BL,
blightdev->props.brightness))
return -EIO;
- if (write_ec_cmd(ideapad_handle, VPCCMD_W_BL_POWER,
+ if (write_ec_cmd(priv->adev->handle, VPCCMD_W_BL_POWER,
blightdev->props.power == FB_BLANK_POWERDOWN ? 0 : 1))
return -EIO;
@@ -692,11 +697,11 @@ static int ideapad_backlight_init(struct ideapad_private *priv)
struct backlight_properties props;
unsigned long max, now, power;
- if (read_ec_data(ideapad_handle, VPCCMD_R_BL_MAX, &max))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &max))
return -EIO;
- if (read_ec_data(ideapad_handle, VPCCMD_R_BL, &now))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now))
return -EIO;
- if (read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &power))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power))
return -EIO;
memset(&props, 0, sizeof(struct backlight_properties));
@@ -734,7 +739,7 @@ static void ideapad_backlight_notify_power(struct ideapad_private *priv)
if (!blightdev)
return;
- if (read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &power))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power))
return;
blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
}
@@ -745,7 +750,7 @@ static void ideapad_backlight_notify_brightness(struct ideapad_private *priv)
/* if we control brightness via acpi video driver */
if (priv->blightdev == NULL) {
- read_ec_data(ideapad_handle, VPCCMD_R_BL, &now);
+ read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now);
return;
}
@@ -755,19 +760,12 @@ static void ideapad_backlight_notify_brightness(struct ideapad_private *priv)
/*
* module init/exit
*/
-static const struct acpi_device_id ideapad_device_ids[] = {
- { "VPC2004", 0},
- { "", 0},
-};
-MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
-
-static void ideapad_sync_touchpad_state(struct acpi_device *adevice)
+static void ideapad_sync_touchpad_state(struct ideapad_private *priv)
{
- struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
unsigned long value;
/* Without reading from EC touchpad LED doesn't switch state */
- if (!read_ec_data(adevice->handle, VPCCMD_R_TOUCHPAD, &value)) {
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) {
/* Some IdeaPads don't really turn off touchpad - they only
* switch the LED state. We (de)activate KBC AUX port to turn
* touchpad off and on. We send KEY_TOUCHPAD_OFF and
@@ -779,26 +777,77 @@ static void ideapad_sync_touchpad_state(struct acpi_device *adevice)
}
}
-static int ideapad_acpi_add(struct acpi_device *adevice)
+static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct ideapad_private *priv = data;
+ unsigned long vpc1, vpc2, vpc_bit;
+
+ if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1))
+ return;
+ if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2))
+ return;
+
+ vpc1 = (vpc2 << 8) | vpc1;
+ for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) {
+ if (test_bit(vpc_bit, &vpc1)) {
+ switch (vpc_bit) {
+ case 9:
+ ideapad_sync_rfk_state(priv);
+ break;
+ case 13:
+ case 11:
+ case 7:
+ case 6:
+ ideapad_input_report(priv, vpc_bit);
+ break;
+ case 5:
+ ideapad_sync_touchpad_state(priv);
+ break;
+ case 4:
+ ideapad_backlight_notify_brightness(priv);
+ break;
+ case 3:
+ ideapad_input_novokey(priv);
+ break;
+ case 2:
+ ideapad_backlight_notify_power(priv);
+ break;
+ case 0:
+ ideapad_check_special_buttons(priv);
+ break;
+ default:
+ pr_info("Unknown event: %lu\n", vpc_bit);
+ }
+ }
+ }
+}
+
+static int ideapad_acpi_add(struct platform_device *pdev)
{
int ret, i;
int cfg;
struct ideapad_private *priv;
+ struct acpi_device *adev;
+
+ ret = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev);
+ if (ret)
+ return -ENODEV;
- if (read_method_int(adevice->handle, "_CFG", &cfg))
+ if (read_method_int(adev->handle, "_CFG", &cfg))
return -ENODEV;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- dev_set_drvdata(&adevice->dev, priv);
- ideapad_priv = priv;
- ideapad_handle = adevice->handle;
+
+ dev_set_drvdata(&pdev->dev, priv);
priv->cfg = cfg;
+ priv->adev = adev;
+ priv->platform_device = pdev;
- ret = ideapad_platform_init(priv);
+ ret = ideapad_sysfs_init(priv);
if (ret)
- goto platform_failed;
+ goto sysfs_failed;
ret = ideapad_debugfs_init(priv);
if (ret)
@@ -810,117 +859,92 @@ static int ideapad_acpi_add(struct acpi_device *adevice)
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) {
if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
- ideapad_register_rfkill(adevice, i);
+ ideapad_register_rfkill(priv, i);
else
priv->rfk[i] = NULL;
}
ideapad_sync_rfk_state(priv);
- ideapad_sync_touchpad_state(adevice);
+ ideapad_sync_touchpad_state(priv);
if (!acpi_video_backlight_support()) {
ret = ideapad_backlight_init(priv);
if (ret && ret != -ENODEV)
goto backlight_failed;
}
+ ret = acpi_install_notify_handler(adev->handle,
+ ACPI_DEVICE_NOTIFY, ideapad_acpi_notify, priv);
+ if (ret)
+ goto notification_failed;
return 0;
-
+notification_failed:
+ ideapad_backlight_exit(priv);
backlight_failed:
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
- ideapad_unregister_rfkill(adevice, i);
+ ideapad_unregister_rfkill(priv, i);
ideapad_input_exit(priv);
input_failed:
ideapad_debugfs_exit(priv);
debugfs_failed:
- ideapad_platform_exit(priv);
-platform_failed:
+ ideapad_sysfs_exit(priv);
+sysfs_failed:
kfree(priv);
return ret;
}
-static int ideapad_acpi_remove(struct acpi_device *adevice)
+static int ideapad_acpi_remove(struct platform_device *pdev)
{
- struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
+ struct ideapad_private *priv = dev_get_drvdata(&pdev->dev);
int i;
+ acpi_remove_notify_handler(priv->adev->handle,
+ ACPI_DEVICE_NOTIFY, ideapad_acpi_notify);
ideapad_backlight_exit(priv);
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
- ideapad_unregister_rfkill(adevice, i);
+ ideapad_unregister_rfkill(priv, i);
ideapad_input_exit(priv);
ideapad_debugfs_exit(priv);
- ideapad_platform_exit(priv);
- dev_set_drvdata(&adevice->dev, NULL);
+ ideapad_sysfs_exit(priv);
+ dev_set_drvdata(&pdev->dev, NULL);
kfree(priv);
return 0;
}
-static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
+#ifdef CONFIG_PM_SLEEP
+static int ideapad_acpi_resume(struct device *device)
{
- struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
- acpi_handle handle = adevice->handle;
- unsigned long vpc1, vpc2, vpc_bit;
-
- if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1))
- return;
- if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2))
- return;
+ struct ideapad_private *priv;
- vpc1 = (vpc2 << 8) | vpc1;
- for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) {
- if (test_bit(vpc_bit, &vpc1)) {
- switch (vpc_bit) {
- case 9:
- ideapad_sync_rfk_state(priv);
- break;
- case 13:
- case 11:
- case 7:
- case 6:
- ideapad_input_report(priv, vpc_bit);
- break;
- case 5:
- ideapad_sync_touchpad_state(adevice);
- break;
- case 4:
- ideapad_backlight_notify_brightness(priv);
- break;
- case 3:
- ideapad_input_novokey(priv);
- break;
- case 2:
- ideapad_backlight_notify_power(priv);
- break;
- case 0:
- ideapad_check_special_buttons(priv);
- break;
- default:
- pr_info("Unknown event: %lu\n", vpc_bit);
- }
- }
- }
-}
+ if (!device)
+ return -EINVAL;
+ priv = dev_get_drvdata(device);
-static int ideapad_acpi_resume(struct device *device)
-{
- ideapad_sync_rfk_state(ideapad_priv);
- ideapad_sync_touchpad_state(to_acpi_device(device));
+ ideapad_sync_rfk_state(priv);
+ ideapad_sync_touchpad_state(priv);
return 0;
}
-
+#endif
static SIMPLE_DEV_PM_OPS(ideapad_pm, NULL, ideapad_acpi_resume);
-static struct acpi_driver ideapad_acpi_driver = {
- .name = "ideapad_acpi",
- .class = "IdeaPad",
- .ids = ideapad_device_ids,
- .ops.add = ideapad_acpi_add,
- .ops.remove = ideapad_acpi_remove,
- .ops.notify = ideapad_acpi_notify,
- .drv.pm = &ideapad_pm,
- .owner = THIS_MODULE,
+static const struct acpi_device_id ideapad_device_ids[] = {
+ { "VPC2004", 0},
+ { "", 0},
};
-module_acpi_driver(ideapad_acpi_driver);
+MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
+
+static struct platform_driver ideapad_acpi_driver = {
+ .probe = ideapad_acpi_add,
+ .remove = ideapad_acpi_remove,
+ .driver = {
+ .name = "ideapad_acpi",
+ .owner = THIS_MODULE,
+ .pm = &ideapad_pm,
+ .acpi_match_table = ACPI_PTR(ideapad_device_ids),
+ },
+};
+
+module_platform_driver(ideapad_acpi_driver);
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
MODULE_DESCRIPTION("IdeaPad ACPI Extras");
diff --git a/drivers/platform/x86/intel-rst.c b/drivers/platform/x86/intel-rst.c
index 41b740cb28bc..a2083a9e5662 100644
--- a/drivers/platform/x86/intel-rst.c
+++ b/drivers/platform/x86/intel-rst.c
@@ -29,24 +29,16 @@ static ssize_t irst_show_wakeup_events(struct device *dev,
char *buf)
{
struct acpi_device *acpi;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *result;
+ unsigned long long value;
acpi_status status;
acpi = to_acpi_device(dev);
- status = acpi_evaluate_object(acpi->handle, "GFFS", NULL, &output);
+ status = acpi_evaluate_integer(acpi->handle, "GFFS", NULL, &value);
if (!ACPI_SUCCESS(status))
return -EINVAL;
- result = output.pointer;
-
- if (result->type != ACPI_TYPE_INTEGER) {
- kfree(result);
- return -EINVAL;
- }
-
- return sprintf(buf, "%lld\n", result->integer.value);
+ return sprintf(buf, "%lld\n", value);
}
static ssize_t irst_store_wakeup_events(struct device *dev,
@@ -54,8 +46,6 @@ static ssize_t irst_store_wakeup_events(struct device *dev,
const char *buf, size_t count)
{
struct acpi_device *acpi;
- struct acpi_object_list input;
- union acpi_object param;
acpi_status status;
unsigned long value;
int error;
@@ -67,13 +57,7 @@ static ssize_t irst_store_wakeup_events(struct device *dev,
if (error)
return error;
- param.type = ACPI_TYPE_INTEGER;
- param.integer.value = value;
-
- input.count = 1;
- input.pointer = &param;
-
- status = acpi_evaluate_object(acpi->handle, "SFFS", &input, NULL);
+ status = acpi_execute_simple_method(acpi->handle, "SFFS", value);
if (!ACPI_SUCCESS(status))
return -EINVAL;
@@ -91,24 +75,16 @@ static ssize_t irst_show_wakeup_time(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *result;
+ unsigned long long value;
acpi_status status;
acpi = to_acpi_device(dev);
- status = acpi_evaluate_object(acpi->handle, "GFTV", NULL, &output);
+ status = acpi_evaluate_integer(acpi->handle, "GFTV", NULL, &value);
if (!ACPI_SUCCESS(status))
return -EINVAL;
- result = output.pointer;
-
- if (result->type != ACPI_TYPE_INTEGER) {
- kfree(result);
- return -EINVAL;
- }
-
- return sprintf(buf, "%lld\n", result->integer.value);
+ return sprintf(buf, "%lld\n", value);
}
static ssize_t irst_store_wakeup_time(struct device *dev,
@@ -116,8 +92,6 @@ static ssize_t irst_store_wakeup_time(struct device *dev,
const char *buf, size_t count)
{
struct acpi_device *acpi;
- struct acpi_object_list input;
- union acpi_object param;
acpi_status status;
unsigned long value;
int error;
@@ -129,13 +103,7 @@ static ssize_t irst_store_wakeup_time(struct device *dev,
if (error)
return error;
- param.type = ACPI_TYPE_INTEGER;
- param.integer.value = value;
-
- input.count = 1;
- input.pointer = &param;
-
- status = acpi_evaluate_object(acpi->handle, "SFTV", &input, NULL);
+ status = acpi_execute_simple_method(acpi->handle, "SFTV", value);
if (!ACPI_SUCCESS(status))
return -EINVAL;
diff --git a/drivers/platform/x86/intel-smartconnect.c b/drivers/platform/x86/intel-smartconnect.c
index 52259dcabecb..1838400dc036 100644
--- a/drivers/platform/x86/intel-smartconnect.c
+++ b/drivers/platform/x86/intel-smartconnect.c
@@ -25,37 +25,18 @@ MODULE_LICENSE("GPL");
static int smartconnect_acpi_init(struct acpi_device *acpi)
{
- struct acpi_object_list input;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *result;
- union acpi_object param;
+ unsigned long long value;
acpi_status status;
- status = acpi_evaluate_object(acpi->handle, "GAOS", NULL, &output);
+ status = acpi_evaluate_integer(acpi->handle, "GAOS", NULL, &value);
if (!ACPI_SUCCESS(status))
return -EINVAL;
- result = output.pointer;
-
- if (result->type != ACPI_TYPE_INTEGER) {
- kfree(result);
- return -EINVAL;
- }
-
- if (result->integer.value & 0x1) {
- param.type = ACPI_TYPE_INTEGER;
- param.integer.value = 0;
-
- input.count = 1;
- input.pointer = &param;
-
+ if (value & 0x1) {
dev_info(&acpi->dev, "Disabling Intel Smart Connect\n");
- status = acpi_evaluate_object(acpi->handle, "SAOS", &input,
- NULL);
+ status = acpi_execute_simple_method(acpi->handle, "SAOS", 0);
}
- kfree(result);
-
return 0;
}
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index d6cfc1558c2f..11244f8703c4 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -156,19 +156,15 @@ static struct thermal_cooling_device_ops memory_cooling_ops = {
static int intel_menlow_memory_add(struct acpi_device *device)
{
int result = -ENODEV;
- acpi_status status = AE_OK;
- acpi_handle dummy;
struct thermal_cooling_device *cdev;
if (!device)
return -EINVAL;
- status = acpi_get_handle(device->handle, MEMORY_GET_BANDWIDTH, &dummy);
- if (ACPI_FAILURE(status))
+ if (!acpi_has_method(device->handle, MEMORY_GET_BANDWIDTH))
goto end;
- status = acpi_get_handle(device->handle, MEMORY_SET_BANDWIDTH, &dummy);
- if (ACPI_FAILURE(status))
+ if (!acpi_has_method(device->handle, MEMORY_SET_BANDWIDTH))
goto end;
cdev = thermal_cooling_device_register("Memory controller", device,
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 9215ed72bece..d654f831410d 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -25,7 +25,7 @@
#include <linux/interrupt.h>
#include <linux/sfi.h>
#include <linux/module.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#include <asm/intel_scu_ipc.h>
/* IPC defines the following message types */
@@ -579,7 +579,7 @@ static struct pci_driver ipc_driver = {
static int __init intel_scu_ipc_init(void)
{
- platform = mrst_identify_cpu();
+ platform = intel_mid_identify_cpu();
if (platform == 0)
return -ENODEV;
return pci_register_driver(&ipc_driver);
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index d3fd52036fd6..47caab0ea7a1 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -127,18 +127,17 @@ MODULE_PARM_DESC(minor,
"default is -1 (automatic)");
#endif
-static int kbd_backlight = 1;
+static int kbd_backlight = -1;
module_param(kbd_backlight, int, 0444);
MODULE_PARM_DESC(kbd_backlight,
"set this to 0 to disable keyboard backlight, "
- "1 to enable it (default: 0)");
+ "1 to enable it (default: no change from current value)");
-static int kbd_backlight_timeout; /* = 0 */
+static int kbd_backlight_timeout = -1;
module_param(kbd_backlight_timeout, int, 0444);
MODULE_PARM_DESC(kbd_backlight_timeout,
- "set this to 0 to set the default 10 seconds timeout, "
- "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout "
- "(default: 0)");
+ "meaningful values vary from 0 to 3 and their meaning depends "
+ "on the model (default: no change from current value)");
#ifdef CONFIG_PM_SLEEP
static void sony_nc_kbd_backlight_resume(void);
@@ -1509,7 +1508,6 @@ static void sony_nc_function_resume(void)
static int sony_nc_resume(struct device *dev)
{
struct sony_nc_value *item;
- acpi_handle handle;
for (item = sony_nc_values; item->name; item++) {
int ret;
@@ -1524,15 +1522,13 @@ static int sony_nc_resume(struct device *dev)
}
}
- if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
- &handle))) {
+ if (acpi_has_method(sony_nc_acpi_handle, "ECON")) {
int arg = 1;
if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
dprintk("ECON Method failed\n");
}
- if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
- &handle)))
+ if (acpi_has_method(sony_nc_acpi_handle, "SN00"))
sony_nc_function_resume();
return 0;
@@ -1844,6 +1840,8 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
if (!kbdbl_ctl)
return -ENOMEM;
+ kbdbl_ctl->mode = kbd_backlight;
+ kbdbl_ctl->timeout = kbd_backlight_timeout;
kbdbl_ctl->handle = handle;
if (handle == 0x0137)
kbdbl_ctl->base = 0x0C00;
@@ -1870,8 +1868,8 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
if (ret)
goto outmode;
- __sony_nc_kbd_backlight_mode_set(kbd_backlight);
- __sony_nc_kbd_backlight_timeout_set(kbd_backlight_timeout);
+ __sony_nc_kbd_backlight_mode_set(kbdbl_ctl->mode);
+ __sony_nc_kbd_backlight_timeout_set(kbdbl_ctl->timeout);
return 0;
@@ -1886,17 +1884,8 @@ outkzalloc:
static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
{
if (kbdbl_ctl) {
- int result;
-
device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr);
-
- /* restore the default hw behaviour */
- sony_call_snc_handle(kbdbl_ctl->handle,
- kbdbl_ctl->base | 0x10000, &result);
- sony_call_snc_handle(kbdbl_ctl->handle,
- kbdbl_ctl->base + 0x200, &result);
-
kfree(kbdbl_ctl);
kbdbl_ctl = NULL;
}
@@ -2690,7 +2679,6 @@ static void sony_nc_backlight_ng_read_limits(int handle,
static void sony_nc_backlight_setup(void)
{
- acpi_handle unused;
int max_brightness = 0;
const struct backlight_ops *ops = NULL;
struct backlight_properties props;
@@ -2725,8 +2713,7 @@ static void sony_nc_backlight_setup(void)
sony_nc_backlight_ng_read_limits(0x14c, &sony_bl_props);
max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
- } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
- &unused))) {
+ } else if (acpi_has_method(sony_nc_acpi_handle, "GBRT")) {
ops = &sony_backlight_ops;
max_brightness = SONY_MAX_BRIGHTNESS - 1;
@@ -2758,7 +2745,6 @@ static int sony_nc_add(struct acpi_device *device)
{
acpi_status status;
int result = 0;
- acpi_handle handle;
struct sony_nc_value *item;
pr_info("%s v%s\n", SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
@@ -2798,15 +2784,13 @@ static int sony_nc_add(struct acpi_device *device)
goto outplatform;
}
- if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
- &handle))) {
+ if (acpi_has_method(sony_nc_acpi_handle, "ECON")) {
int arg = 1;
if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
dprintk("ECON Method failed\n");
}
- if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
- &handle))) {
+ if (acpi_has_method(sony_nc_acpi_handle, "SN00")) {
dprintk("Doing SNC setup\n");
/* retrieve the available handles */
result = sony_nc_handles_setup(sony_pf_device);
@@ -2829,9 +2813,8 @@ static int sony_nc_add(struct acpi_device *device)
/* find the available acpiget as described in the DSDT */
for (; item->acpiget && *item->acpiget; ++item->acpiget) {
- if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle,
- *item->acpiget,
- &handle))) {
+ if (acpi_has_method(sony_nc_acpi_handle,
+ *item->acpiget)) {
dprintk("Found %s getter: %s\n",
item->name, *item->acpiget);
item->devattr.attr.mode |= S_IRUGO;
@@ -2841,9 +2824,8 @@ static int sony_nc_add(struct acpi_device *device)
/* find the available acpiset as described in the DSDT */
for (; item->acpiset && *item->acpiset; ++item->acpiset) {
- if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle,
- *item->acpiset,
- &handle))) {
+ if (acpi_has_method(sony_nc_acpi_handle,
+ *item->acpiset)) {
dprintk("Found %s setter: %s\n",
item->name, *item->acpiset);
item->devattr.attr.mode |= S_IWUSR;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 03ca6c139f1a..05e046aa5e31 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -23,7 +23,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define TPACPI_VERSION "0.24"
+#define TPACPI_VERSION "0.25"
#define TPACPI_SYSFS_VERSION 0x020700
/*
@@ -88,6 +88,7 @@
#include <linux/pci_ids.h>
+#include <linux/thinkpad_acpi.h>
/* ThinkPad CMOS commands */
#define TP_CMOS_VOLUME_DOWN 0
@@ -700,6 +701,14 @@ static void __init drv_acpi_handle_init(const char *name,
static acpi_status __init tpacpi_acpi_handle_locate_callback(acpi_handle handle,
u32 level, void *context, void **return_value)
{
+ struct acpi_device *dev;
+ if (!strcmp(context, "video")) {
+ if (acpi_bus_get_device(handle, &dev))
+ return AE_OK;
+ if (strcmp(ACPI_VIDEO_HID, acpi_device_hid(dev)))
+ return AE_OK;
+ }
+
*(acpi_handle *)return_value = handle;
return AE_CTRL_TERMINATE;
@@ -712,10 +721,10 @@ static void __init tpacpi_acpi_handle_locate(const char *name,
acpi_status status;
acpi_handle device_found;
- BUG_ON(!name || !hid || !handle);
+ BUG_ON(!name || !handle);
vdbg_printk(TPACPI_DBG_INIT,
"trying to locate ACPI handle for %s, using HID %s\n",
- name, hid);
+ name, hid ? hid : "NULL");
memset(&device_found, 0, sizeof(device_found));
status = acpi_get_devices(hid, tpacpi_acpi_handle_locate_callback,
@@ -6090,19 +6099,28 @@ static int __init tpacpi_query_bcl_levels(acpi_handle handle)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
+ struct acpi_device *device, *child;
int rc;
- if (ACPI_SUCCESS(acpi_evaluate_object(handle, "_BCL", NULL, &buffer))) {
+ if (acpi_bus_get_device(handle, &device))
+ return 0;
+
+ rc = 0;
+ list_for_each_entry(child, &device->children, node) {
+ acpi_status status = acpi_evaluate_object(child->handle, "_BCL",
+ NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ continue;
+
obj = (union acpi_object *)buffer.pointer;
if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
pr_err("Unknown _BCL data, please report this to %s\n",
- TPACPI_MAIL);
+ TPACPI_MAIL);
rc = 0;
} else {
rc = obj->package.count;
}
- } else {
- return 0;
+ break;
}
kfree(buffer.pointer);
@@ -6118,7 +6136,7 @@ static unsigned int __init tpacpi_check_std_acpi_brightness_support(void)
acpi_handle video_device;
int bcl_levels = 0;
- tpacpi_acpi_handle_locate("video", ACPI_VIDEO_HID, &video_device);
+ tpacpi_acpi_handle_locate("video", NULL, &video_device);
if (video_device)
bcl_levels = tpacpi_query_bcl_levels(video_device);
@@ -8350,6 +8368,91 @@ static struct ibm_struct fan_driver_data = {
.resume = fan_resume,
};
+/*************************************************************************
+ * Mute LED subdriver
+ */
+
+
+struct tp_led_table {
+ acpi_string name;
+ int on_value;
+ int off_value;
+ int state;
+};
+
+static struct tp_led_table led_tables[] = {
+ [TPACPI_LED_MUTE] = {
+ .name = "SSMS",
+ .on_value = 1,
+ .off_value = 0,
+ },
+ [TPACPI_LED_MICMUTE] = {
+ .name = "MMTS",
+ .on_value = 2,
+ .off_value = 0,
+ },
+};
+
+static int mute_led_on_off(struct tp_led_table *t, bool state)
+{
+ acpi_handle temp;
+ int output;
+
+ if (!ACPI_SUCCESS(acpi_get_handle(hkey_handle, t->name, &temp))) {
+ pr_warn("Thinkpad ACPI has no %s interface.\n", t->name);
+ return -EIO;
+ }
+
+ if (!acpi_evalf(hkey_handle, &output, t->name, "dd",
+ state ? t->on_value : t->off_value))
+ return -EIO;
+
+ t->state = state;
+ return state;
+}
+
+int tpacpi_led_set(int whichled, bool on)
+{
+ struct tp_led_table *t;
+
+ if (whichled < 0 || whichled >= TPACPI_LED_MAX)
+ return -EINVAL;
+
+ t = &led_tables[whichled];
+ if (t->state < 0 || t->state == on)
+ return t->state;
+ return mute_led_on_off(t, on);
+}
+EXPORT_SYMBOL_GPL(tpacpi_led_set);
+
+static int mute_led_init(struct ibm_init_struct *iibm)
+{
+ acpi_handle temp;
+ int i;
+
+ for (i = 0; i < TPACPI_LED_MAX; i++) {
+ struct tp_led_table *t = &led_tables[i];
+ if (ACPI_SUCCESS(acpi_get_handle(hkey_handle, t->name, &temp)))
+ mute_led_on_off(t, false);
+ else
+ t->state = -ENODEV;
+ }
+ return 0;
+}
+
+static void mute_led_exit(void)
+{
+ int i;
+
+ for (i = 0; i < TPACPI_LED_MAX; i++)
+ tpacpi_led_set(i, false);
+}
+
+static struct ibm_struct mute_led_driver_data = {
+ .name = "mute_led",
+ .exit = mute_led_exit,
+};
+
/****************************************************************************
****************************************************************************
*
@@ -8768,6 +8871,10 @@ static struct ibm_init_struct ibms_init[] __initdata = {
.init = fan_init,
.data = &fan_driver_data,
},
+ {
+ .init = mute_led_init,
+ .data = &mute_led_driver_data,
+ },
};
static int __init set_ibm_param(const char *val, struct kernel_param *kp)
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index 4ab618c63b45..67897c8740ba 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -80,13 +80,9 @@ static void acpi_topstar_notify(struct acpi_device *device, u32 event)
static int acpi_topstar_fncx_switch(struct acpi_device *device, bool state)
{
acpi_status status;
- union acpi_object fncx_params[1] = {
- { .type = ACPI_TYPE_INTEGER }
- };
- struct acpi_object_list fncx_arg_list = { 1, &fncx_params[0] };
- fncx_params[0].integer.value = state ? 0x86 : 0x87;
- status = acpi_evaluate_object(device->handle, "FNCX", &fncx_arg_list, NULL);
+ status = acpi_execute_simple_method(device->handle, "FNCX",
+ state ? 0x86 : 0x87);
if (ACPI_FAILURE(status)) {
pr_err("Unable to switch FNCX notifications\n");
return -ENODEV;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index eb3467ea6d86..0cfadb65f7c6 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -191,16 +191,9 @@ static __inline__ void _set_bit(u32 * word, u32 mask, int value)
static int write_acpi_int(const char *methodName, int val)
{
- struct acpi_object_list params;
- union acpi_object in_objs[1];
acpi_status status;
- params.count = ARRAY_SIZE(in_objs);
- params.pointer = in_objs;
- in_objs[0].type = ACPI_TYPE_INTEGER;
- in_objs[0].integer.value = val;
-
- status = acpi_evaluate_object(NULL, (char *)methodName, &params, NULL);
+ status = acpi_execute_simple_method(NULL, (char *)methodName, val);
return (status == AE_OK) ? 0 : -EIO;
}
@@ -947,21 +940,17 @@ static void toshiba_acpi_hotkey_work(struct work_struct *work)
*/
static int toshiba_acpi_query_hotkey(struct toshiba_acpi_dev *dev)
{
- struct acpi_buffer buf;
- union acpi_object out_obj;
+ unsigned long long value;
acpi_status status;
- buf.pointer = &out_obj;
- buf.length = sizeof(out_obj);
-
- status = acpi_evaluate_object(dev->acpi_dev->handle, "INFO",
- NULL, &buf);
- if (ACPI_FAILURE(status) || out_obj.type != ACPI_TYPE_INTEGER) {
+ status = acpi_evaluate_integer(dev->acpi_dev->handle, "INFO",
+ NULL, &value);
+ if (ACPI_FAILURE(status)) {
pr_err("ACPI INFO method execution failed\n");
return -EIO;
}
- return out_obj.integer.value;
+ return value;
}
static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev,
@@ -981,7 +970,7 @@ static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev,
static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
{
acpi_status status;
- acpi_handle ec_handle, handle;
+ acpi_handle ec_handle;
int error;
u32 hci_result;
@@ -1008,10 +997,7 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
*/
status = AE_ERROR;
ec_handle = ec_get_handle();
- if (ec_handle)
- status = acpi_get_handle(ec_handle, "NTFY", &handle);
-
- if (ACPI_SUCCESS(status)) {
+ if (ec_handle && acpi_has_method(ec_handle, "NTFY")) {
INIT_WORK(&dev->hotkey_work, toshiba_acpi_hotkey_work);
error = i8042_install_filter(toshiba_acpi_i8042_filter);
@@ -1027,10 +1013,9 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
* Determine hotkey query interface. Prefer using the INFO
* method when it is available.
*/
- status = acpi_get_handle(dev->acpi_dev->handle, "INFO", &handle);
- if (ACPI_SUCCESS(status)) {
+ if (acpi_has_method(dev->acpi_dev->handle, "INFO"))
dev->info_supported = 1;
- } else {
+ else {
hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
if (hci_result == HCI_SUCCESS)
dev->system_event_supported = 1;
@@ -1155,15 +1140,10 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
static const char *find_hci_method(acpi_handle handle)
{
- acpi_status status;
- acpi_handle hci_handle;
-
- status = acpi_get_handle(handle, "GHCI", &hci_handle);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(handle, "GHCI"))
return "GHCI";
- status = acpi_get_handle(handle, "SPFC", &hci_handle);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(handle, "SPFC"))
return "SPFC";
return NULL;
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 601ea9512242..62e8c221d01e 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -252,8 +252,6 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
{
struct guid_block *block = NULL;
char method[5];
- struct acpi_object_list input;
- union acpi_object params[1];
acpi_status status;
acpi_handle handle;
@@ -263,13 +261,9 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
if (!block)
return AE_NOT_EXIST;
- input.count = 1;
- input.pointer = params;
- params[0].type = ACPI_TYPE_INTEGER;
- params[0].integer.value = enable;
snprintf(method, 5, "WE%02X", block->notify_id);
- status = acpi_evaluate_object(handle, method, &input, NULL);
+ status = acpi_execute_simple_method(handle, method, enable);
if (status != AE_OK && status != AE_NOT_FOUND)
return status;
@@ -353,10 +347,10 @@ struct acpi_buffer *out)
{
struct guid_block *block = NULL;
struct wmi_block *wblock = NULL;
- acpi_handle handle, wc_handle;
+ acpi_handle handle;
acpi_status status, wc_status = AE_ERROR;
- struct acpi_object_list input, wc_input;
- union acpi_object wc_params[1], wq_params[1];
+ struct acpi_object_list input;
+ union acpi_object wq_params[1];
char method[5];
char wc_method[5] = "WC";
@@ -386,11 +380,6 @@ struct acpi_buffer *out)
* enable collection.
*/
if (block->flags & ACPI_WMI_EXPENSIVE) {
- wc_input.count = 1;
- wc_input.pointer = wc_params;
- wc_params[0].type = ACPI_TYPE_INTEGER;
- wc_params[0].integer.value = 1;
-
strncat(wc_method, block->object_id, 2);
/*
@@ -398,10 +387,9 @@ struct acpi_buffer *out)
* expensive, but have no corresponding WCxx method. So we
* should not fail if this happens.
*/
- wc_status = acpi_get_handle(handle, wc_method, &wc_handle);
- if (ACPI_SUCCESS(wc_status))
- wc_status = acpi_evaluate_object(handle, wc_method,
- &wc_input, NULL);
+ if (acpi_has_method(handle, wc_method))
+ wc_status = acpi_execute_simple_method(handle,
+ wc_method, 1);
}
strcpy(method, "WQ");
@@ -414,9 +402,7 @@ struct acpi_buffer *out)
* the WQxx method failed - we should disable collection anyway.
*/
if ((block->flags & ACPI_WMI_EXPENSIVE) && ACPI_SUCCESS(wc_status)) {
- wc_params[0].integer.value = 0;
- status = acpi_evaluate_object(handle,
- wc_method, &wc_input, NULL);
+ status = acpi_execute_simple_method(handle, wc_method, 0);
}
return status;
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index ffd53e3eb92f..c8873b0ca551 100644
--- a/drivers/pnp/base.h
+++ b/drivers/pnp/base.h
@@ -4,7 +4,7 @@
*/
extern spinlock_t pnp_lock;
-extern struct device_attribute pnp_interface_attrs[];
+extern const struct attribute_group *pnp_dev_groups[];
void *pnp_alloc(long size);
int pnp_register_protocol(struct pnp_protocol *protocol);
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index a39ee38a9414..6936e0acedcd 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -246,7 +246,7 @@ struct bus_type pnp_bus_type = {
.remove = pnp_device_remove,
.shutdown = pnp_device_shutdown,
.pm = &pnp_bus_dev_pm_ops,
- .dev_attrs = pnp_interface_attrs,
+ .dev_groups = pnp_dev_groups,
};
int pnp_register_driver(struct pnp_driver *drv)
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index 0c201317284b..e6c403be09a9 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -203,8 +203,8 @@ static void pnp_print_option(pnp_info_buffer_t * buffer, char *space,
}
}
-static ssize_t pnp_show_options(struct device *dmdev,
- struct device_attribute *attr, char *buf)
+static ssize_t options_show(struct device *dmdev, struct device_attribute *attr,
+ char *buf)
{
struct pnp_dev *dev = to_pnp_dev(dmdev);
pnp_info_buffer_t *buffer;
@@ -241,10 +241,10 @@ static ssize_t pnp_show_options(struct device *dmdev,
kfree(buffer);
return ret;
}
+static DEVICE_ATTR_RO(options);
-static ssize_t pnp_show_current_resources(struct device *dmdev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t resources_show(struct device *dmdev,
+ struct device_attribute *attr, char *buf)
{
struct pnp_dev *dev = to_pnp_dev(dmdev);
pnp_info_buffer_t *buffer;
@@ -331,9 +331,9 @@ static char *pnp_get_resource_value(char *buf,
return buf;
}
-static ssize_t pnp_set_current_resources(struct device *dmdev,
- struct device_attribute *attr,
- const char *ubuf, size_t count)
+static ssize_t resources_store(struct device *dmdev,
+ struct device_attribute *attr, const char *ubuf,
+ size_t count)
{
struct pnp_dev *dev = to_pnp_dev(dmdev);
char *buf = (void *)ubuf;
@@ -434,9 +434,10 @@ done:
return retval;
return count;
}
+static DEVICE_ATTR_RW(resources);
-static ssize_t pnp_show_current_ids(struct device *dmdev,
- struct device_attribute *attr, char *buf)
+static ssize_t id_show(struct device *dmdev, struct device_attribute *attr,
+ char *buf)
{
char *str = buf;
struct pnp_dev *dev = to_pnp_dev(dmdev);
@@ -448,12 +449,20 @@ static ssize_t pnp_show_current_ids(struct device *dmdev,
}
return (str - buf);
}
+static DEVICE_ATTR_RO(id);
-struct device_attribute pnp_interface_attrs[] = {
- __ATTR(resources, S_IRUGO | S_IWUSR,
- pnp_show_current_resources,
- pnp_set_current_resources),
- __ATTR(options, S_IRUGO, pnp_show_options, NULL),
- __ATTR(id, S_IRUGO, pnp_show_current_ids, NULL),
- __ATTR_NULL,
+static struct attribute *pnp_dev_attrs[] = {
+ &dev_attr_resources.attr,
+ &dev_attr_options.attr,
+ &dev_attr_id.attr,
+ NULL,
+};
+
+static const struct attribute_group pnp_dev_group = {
+ .attrs = pnp_dev_attrs,
+};
+
+const struct attribute_group *pnp_dev_groups[] = {
+ &pnp_dev_group,
+ NULL,
};
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 34049b0b4c73..747826d99059 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -239,8 +239,6 @@ static char *__init pnpacpi_get_id(struct acpi_device *device)
static int __init pnpacpi_add_device(struct acpi_device *device)
{
- acpi_handle temp = NULL;
- acpi_status status;
struct pnp_dev *dev;
char *pnpid;
struct acpi_hardware_id *id;
@@ -253,8 +251,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
* If a PnPacpi device is not present , the device
* driver should not be loaded.
*/
- status = acpi_get_handle(device->handle, "_CRS", &temp);
- if (ACPI_FAILURE(status))
+ if (!acpi_has_method(device->handle, "_CRS"))
return 0;
pnpid = pnpacpi_get_id(device);
@@ -271,16 +268,14 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
dev->data = device;
/* .enabled means the device can decode the resources */
dev->active = device->status.enabled;
- status = acpi_get_handle(device->handle, "_SRS", &temp);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(device->handle, "_SRS"))
dev->capabilities |= PNP_CONFIGURABLE;
dev->capabilities |= PNP_READ;
if (device->flags.dynamic_status && (dev->capabilities & PNP_CONFIGURABLE))
dev->capabilities |= PNP_WRITE;
if (device->flags.removable)
dev->capabilities |= PNP_REMOVABLE;
- status = acpi_get_handle(device->handle, "_DIS", &temp);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(device->handle, "_DIS"))
dev->capabilities |= PNP_DISABLE;
if (strlen(acpi_device_name(device)))
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index e6f92b450913..5e2054afe840 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -346,6 +346,12 @@ config CHARGER_BQ24190
help
Say Y to enable support for the TI BQ24190 battery charger.
+config CHARGER_BQ24735
+ tristate "TI BQ24735 battery charger support"
+ depends on I2C && GPIOLIB
+ help
+ Say Y to enable support for the TI BQ24735 battery charger.
+
config CHARGER_SMB347
tristate "Summit Microelectronics SMB347 Battery Charger"
depends on I2C
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index a4b74177706f..372b4e8ab598 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o
obj-$(CONFIG_CHARGER_BQ24190) += bq24190_charger.o
+obj-$(CONFIG_CHARGER_BQ24735) += bq24735-charger.o
obj-$(CONFIG_POWER_AVS) += avs/
obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
index a4c4a10b3a41..19110aa613a1 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/ab8500_charger.c
@@ -766,7 +766,6 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
ret = -ENXIO;
break;
}
- break;
case USB_STAT_CARKIT_1:
case USB_STAT_CARKIT_2:
case USB_STAT_ACA_DOCK_CHARGER:
@@ -1387,8 +1386,12 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger,
* the GPADC module independant of the AB8500 chargers
*/
if (!di->vddadc_en_ac) {
- regulator_enable(di->regu);
- di->vddadc_en_ac = true;
+ ret = regulator_enable(di->regu);
+ if (ret)
+ dev_warn(di->dev,
+ "Failed to enable regulator\n");
+ else
+ di->vddadc_en_ac = true;
}
/* Check if the requested voltage or current is valid */
@@ -1556,8 +1559,12 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
* the GPADC module independant of the AB8500 chargers
*/
if (!di->vddadc_en_usb) {
- regulator_enable(di->regu);
- di->vddadc_en_usb = true;
+ ret = regulator_enable(di->regu);
+ if (ret)
+ dev_warn(di->dev,
+ "Failed to enable regulator\n");
+ else
+ di->vddadc_en_usb = true;
}
/* Enable USB charging */
diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c
index 0727f9256138..df893dd1447d 100644
--- a/drivers/power/bq2415x_charger.c
+++ b/drivers/power/bq2415x_charger.c
@@ -605,9 +605,13 @@ static int bq2415x_set_battery_regulation_voltage(struct bq2415x_device *bq,
{
int val = (mV/10 - 350) / 2;
+ /*
+ * According to datasheet, maximum battery regulation voltage is
+ * 4440mV which is b101111 = 47.
+ */
if (val < 0)
val = 0;
- else if (val > 94) /* FIXME: Max is 94 or 122 ? Set max value ? */
+ else if (val > 47)
return -EINVAL;
return bq2415x_i2c_write_mask(bq, BQ2415X_REG_VOLTAGE, val,
diff --git a/drivers/power/bq24735-charger.c b/drivers/power/bq24735-charger.c
new file mode 100644
index 000000000000..d022b823305b
--- /dev/null
+++ b/drivers/power/bq24735-charger.c
@@ -0,0 +1,419 @@
+/*
+ * Battery charger driver for TI BQ24735
+ *
+ * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+
+#include <linux/power/bq24735-charger.h>
+
+#define BQ24735_CHG_OPT 0x12
+#define BQ24735_CHG_OPT_CHARGE_DISABLE (1 << 0)
+#define BQ24735_CHG_OPT_AC_PRESENT (1 << 4)
+#define BQ24735_CHARGE_CURRENT 0x14
+#define BQ24735_CHARGE_CURRENT_MASK 0x1fc0
+#define BQ24735_CHARGE_VOLTAGE 0x15
+#define BQ24735_CHARGE_VOLTAGE_MASK 0x7ff0
+#define BQ24735_INPUT_CURRENT 0x3f
+#define BQ24735_INPUT_CURRENT_MASK 0x1f80
+#define BQ24735_MANUFACTURER_ID 0xfe
+#define BQ24735_DEVICE_ID 0xff
+
+struct bq24735 {
+ struct power_supply charger;
+ struct i2c_client *client;
+ struct bq24735_platform *pdata;
+};
+
+static inline struct bq24735 *to_bq24735(struct power_supply *psy)
+{
+ return container_of(psy, struct bq24735, charger);
+}
+
+static enum power_supply_property bq24735_charger_properties[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static inline int bq24735_write_word(struct i2c_client *client, u8 reg,
+ u16 value)
+{
+ return i2c_smbus_write_word_data(client, reg, le16_to_cpu(value));
+}
+
+static inline int bq24735_read_word(struct i2c_client *client, u8 reg)
+{
+ s32 ret = i2c_smbus_read_word_data(client, reg);
+
+ return ret < 0 ? ret : le16_to_cpu(ret);
+}
+
+static int bq24735_update_word(struct i2c_client *client, u8 reg,
+ u16 mask, u16 value)
+{
+ unsigned int tmp;
+ int ret;
+
+ ret = bq24735_read_word(client, reg);
+ if (ret < 0)
+ return ret;
+
+ tmp = ret & ~mask;
+ tmp |= value & mask;
+
+ return bq24735_write_word(client, reg, tmp);
+}
+
+static inline int bq24735_enable_charging(struct bq24735 *charger)
+{
+ return bq24735_update_word(charger->client, BQ24735_CHG_OPT,
+ BQ24735_CHG_OPT_CHARGE_DISABLE,
+ ~BQ24735_CHG_OPT_CHARGE_DISABLE);
+}
+
+static inline int bq24735_disable_charging(struct bq24735 *charger)
+{
+ return bq24735_update_word(charger->client, BQ24735_CHG_OPT,
+ BQ24735_CHG_OPT_CHARGE_DISABLE,
+ BQ24735_CHG_OPT_CHARGE_DISABLE);
+}
+
+static int bq24735_config_charger(struct bq24735 *charger)
+{
+ struct bq24735_platform *pdata = charger->pdata;
+ int ret;
+ u16 value;
+
+ if (pdata->charge_current) {
+ value = pdata->charge_current & BQ24735_CHARGE_CURRENT_MASK;
+
+ ret = bq24735_write_word(charger->client,
+ BQ24735_CHARGE_CURRENT, value);
+ if (ret < 0) {
+ dev_err(&charger->client->dev,
+ "Failed to write charger current : %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (pdata->charge_voltage) {
+ value = pdata->charge_voltage & BQ24735_CHARGE_VOLTAGE_MASK;
+
+ ret = bq24735_write_word(charger->client,
+ BQ24735_CHARGE_VOLTAGE, value);
+ if (ret < 0) {
+ dev_err(&charger->client->dev,
+ "Failed to write charger voltage : %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (pdata->input_current) {
+ value = pdata->input_current & BQ24735_INPUT_CURRENT_MASK;
+
+ ret = bq24735_write_word(charger->client,
+ BQ24735_INPUT_CURRENT, value);
+ if (ret < 0) {
+ dev_err(&charger->client->dev,
+ "Failed to write input current : %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static bool bq24735_charger_is_present(struct bq24735 *charger)
+{
+ struct bq24735_platform *pdata = charger->pdata;
+ int ret;
+
+ if (pdata->status_gpio_valid) {
+ ret = gpio_get_value_cansleep(pdata->status_gpio);
+ return ret ^= pdata->status_gpio_active_low == 0;
+ } else {
+ int ac = 0;
+
+ ac = bq24735_read_word(charger->client, BQ24735_CHG_OPT);
+ if (ac < 0) {
+ dev_err(&charger->client->dev,
+ "Failed to read charger options : %d\n",
+ ac);
+ return false;
+ }
+ return (ac & BQ24735_CHG_OPT_AC_PRESENT) ? true : false;
+ }
+
+ return false;
+}
+
+static irqreturn_t bq24735_charger_isr(int irq, void *devid)
+{
+ struct power_supply *psy = devid;
+ struct bq24735 *charger = to_bq24735(psy);
+
+ if (bq24735_charger_is_present(charger))
+ bq24735_enable_charging(charger);
+ else
+ bq24735_disable_charging(charger);
+
+ power_supply_changed(psy);
+
+ return IRQ_HANDLED;
+}
+
+static int bq24735_charger_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct bq24735 *charger;
+
+ charger = container_of(psy, struct bq24735, charger);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = bq24735_charger_is_present(charger) ? 1 : 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct bq24735_platform *bq24735_parse_dt_data(struct i2c_client *client)
+{
+ struct bq24735_platform *pdata;
+ struct device_node *np = client->dev.of_node;
+ u32 val;
+ int ret;
+ enum of_gpio_flags flags;
+
+ pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&client->dev,
+ "Memory alloc for bq24735 pdata failed\n");
+ return NULL;
+ }
+
+ pdata->status_gpio = of_get_named_gpio_flags(np, "ti,ac-detect-gpios",
+ 0, &flags);
+
+ if (flags & OF_GPIO_ACTIVE_LOW)
+ pdata->status_gpio_active_low = 1;
+
+ ret = of_property_read_u32(np, "ti,charge-current", &val);
+ if (!ret)
+ pdata->charge_current = val;
+
+ ret = of_property_read_u32(np, "ti,charge-voltage", &val);
+ if (!ret)
+ pdata->charge_voltage = val;
+
+ ret = of_property_read_u32(np, "ti,input-current", &val);
+ if (!ret)
+ pdata->input_current = val;
+
+ return pdata;
+}
+
+static int bq24735_charger_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct bq24735 *charger;
+ struct power_supply *supply;
+ char *name;
+
+ charger = devm_kzalloc(&client->dev, sizeof(*charger), GFP_KERNEL);
+ if (!charger)
+ return -ENOMEM;
+
+ charger->pdata = client->dev.platform_data;
+
+ if (IS_ENABLED(CONFIG_OF) && !charger->pdata && client->dev.of_node)
+ charger->pdata = bq24735_parse_dt_data(client);
+
+ if (!charger->pdata) {
+ dev_err(&client->dev, "no platform data provided\n");
+ return -EINVAL;
+ }
+
+ name = (char *)charger->pdata->name;
+ if (!name) {
+ name = kasprintf(GFP_KERNEL, "bq24735@%s",
+ dev_name(&client->dev));
+ if (!name) {
+ dev_err(&client->dev, "Failed to alloc device name\n");
+ return -ENOMEM;
+ }
+ }
+
+ charger->client = client;
+
+ supply = &charger->charger;
+
+ supply->name = name;
+ supply->type = POWER_SUPPLY_TYPE_MAINS;
+ supply->properties = bq24735_charger_properties;
+ supply->num_properties = ARRAY_SIZE(bq24735_charger_properties);
+ supply->get_property = bq24735_charger_get_property;
+ supply->supplied_to = charger->pdata->supplied_to;
+ supply->num_supplicants = charger->pdata->num_supplicants;
+ supply->of_node = client->dev.of_node;
+
+ i2c_set_clientdata(client, charger);
+
+ ret = bq24735_read_word(client, BQ24735_MANUFACTURER_ID);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read manufacturer id : %d\n",
+ ret);
+ goto err_free_name;
+ } else if (ret != 0x0040) {
+ dev_err(&client->dev,
+ "manufacturer id mismatch. 0x0040 != 0x%04x\n", ret);
+ ret = -ENODEV;
+ goto err_free_name;
+ }
+
+ ret = bq24735_read_word(client, BQ24735_DEVICE_ID);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read device id : %d\n", ret);
+ goto err_free_name;
+ } else if (ret != 0x000B) {
+ dev_err(&client->dev,
+ "device id mismatch. 0x000b != 0x%04x\n", ret);
+ ret = -ENODEV;
+ goto err_free_name;
+ }
+
+ if (gpio_is_valid(charger->pdata->status_gpio)) {
+ ret = devm_gpio_request(&client->dev,
+ charger->pdata->status_gpio,
+ name);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed GPIO request for GPIO %d: %d\n",
+ charger->pdata->status_gpio, ret);
+ }
+
+ charger->pdata->status_gpio_valid = !ret;
+ }
+
+ ret = bq24735_config_charger(charger);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed in configuring charger");
+ goto err_free_name;
+ }
+
+ /* check for AC adapter presence */
+ if (bq24735_charger_is_present(charger)) {
+ ret = bq24735_enable_charging(charger);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to enable charging\n");
+ goto err_free_name;
+ }
+ }
+
+ ret = power_supply_register(&client->dev, supply);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to register power supply: %d\n",
+ ret);
+ goto err_free_name;
+ }
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, bq24735_charger_isr,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ supply->name, supply);
+ if (ret) {
+ dev_err(&client->dev,
+ "Unable to register IRQ %d err %d\n",
+ client->irq, ret);
+ goto err_unregister_supply;
+ }
+ }
+
+ return 0;
+err_unregister_supply:
+ power_supply_unregister(supply);
+err_free_name:
+ if (name != charger->pdata->name)
+ kfree(name);
+
+ return ret;
+}
+
+static int bq24735_charger_remove(struct i2c_client *client)
+{
+ struct bq24735 *charger = i2c_get_clientdata(client);
+
+ if (charger->client->irq)
+ devm_free_irq(&charger->client->dev, charger->client->irq,
+ &charger->charger);
+
+ power_supply_unregister(&charger->charger);
+
+ if (charger->charger.name != charger->pdata->name)
+ kfree(charger->charger.name);
+
+ return 0;
+}
+
+static const struct i2c_device_id bq24735_charger_id[] = {
+ { "bq24735-charger", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, bq24735_charger_id);
+
+static const struct of_device_id bq24735_match_ids[] = {
+ { .compatible = "ti,bq24735", },
+ { /* end */ }
+};
+MODULE_DEVICE_TABLE(of, bq24735_match_ids);
+
+static struct i2c_driver bq24735_charger_driver = {
+ .driver = {
+ .name = "bq24735-charger",
+ .owner = THIS_MODULE,
+ .of_match_table = bq24735_match_ids,
+ },
+ .probe = bq24735_charger_probe,
+ .remove = bq24735_charger_remove,
+ .id_table = bq24735_charger_id,
+};
+
+module_i2c_driver(bq24735_charger_driver);
+
+MODULE_DESCRIPTION("bq24735 battery charging driver");
+MODULE_AUTHOR("Darbha Sriharsha <dsriharsha@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index e30e847600bb..7287c0efd6bf 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -1378,7 +1378,8 @@ static int charger_manager_register_sysfs(struct charger_manager *cm)
charger = &desc->charger_regulators[i];
snprintf(buf, 10, "charger.%d", i);
- str = kzalloc(sizeof(char) * (strlen(buf) + 1), GFP_KERNEL);
+ str = devm_kzalloc(cm->dev,
+ sizeof(char) * (strlen(buf) + 1), GFP_KERNEL);
if (!str) {
ret = -ENOMEM;
goto err;
@@ -1452,30 +1453,23 @@ static int charger_manager_probe(struct platform_device *pdev)
rtc_dev = NULL;
dev_err(&pdev->dev, "Cannot get RTC %s\n",
g_desc->rtc_name);
- ret = -ENODEV;
- goto err_alloc;
+ return -ENODEV;
}
}
if (!desc) {
dev_err(&pdev->dev, "No platform data (desc) found\n");
- ret = -ENODEV;
- goto err_alloc;
+ return -ENODEV;
}
- cm = kzalloc(sizeof(struct charger_manager), GFP_KERNEL);
- if (!cm) {
- ret = -ENOMEM;
- goto err_alloc;
- }
+ cm = devm_kzalloc(&pdev->dev,
+ sizeof(struct charger_manager), GFP_KERNEL);
+ if (!cm)
+ return -ENOMEM;
/* Basic Values. Unspecified are Null or 0 */
cm->dev = &pdev->dev;
- cm->desc = kmemdup(desc, sizeof(struct charger_desc), GFP_KERNEL);
- if (!cm->desc) {
- ret = -ENOMEM;
- goto err_alloc_desc;
- }
+ cm->desc = desc;
cm->last_temp_mC = INT_MIN; /* denotes "unmeasured, yet" */
/*
@@ -1498,27 +1492,23 @@ static int charger_manager_probe(struct platform_device *pdev)
}
if (!desc->charger_regulators || desc->num_charger_regulators < 1) {
- ret = -EINVAL;
dev_err(&pdev->dev, "charger_regulators undefined\n");
- goto err_no_charger;
+ return -EINVAL;
}
if (!desc->psy_charger_stat || !desc->psy_charger_stat[0]) {
dev_err(&pdev->dev, "No power supply defined\n");
- ret = -EINVAL;
- goto err_no_charger_stat;
+ return -EINVAL;
}
/* Counting index only */
while (desc->psy_charger_stat[i])
i++;
- cm->charger_stat = kzalloc(sizeof(struct power_supply *) * (i + 1),
- GFP_KERNEL);
- if (!cm->charger_stat) {
- ret = -ENOMEM;
- goto err_no_charger_stat;
- }
+ cm->charger_stat = devm_kzalloc(&pdev->dev,
+ sizeof(struct power_supply *) * i, GFP_KERNEL);
+ if (!cm->charger_stat)
+ return -ENOMEM;
for (i = 0; desc->psy_charger_stat[i]; i++) {
cm->charger_stat[i] = power_supply_get_by_name(
@@ -1526,8 +1516,7 @@ static int charger_manager_probe(struct platform_device *pdev)
if (!cm->charger_stat[i]) {
dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n",
desc->psy_charger_stat[i]);
- ret = -ENODEV;
- goto err_chg_stat;
+ return -ENODEV;
}
}
@@ -1535,21 +1524,18 @@ static int charger_manager_probe(struct platform_device *pdev)
if (!cm->fuel_gauge) {
dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n",
desc->psy_fuel_gauge);
- ret = -ENODEV;
- goto err_chg_stat;
+ return -ENODEV;
}
if (desc->polling_interval_ms == 0 ||
msecs_to_jiffies(desc->polling_interval_ms) <= CM_JIFFIES_SMALL) {
dev_err(&pdev->dev, "polling_interval_ms is too small\n");
- ret = -EINVAL;
- goto err_chg_stat;
+ return -EINVAL;
}
if (!desc->temperature_out_of_range) {
dev_err(&pdev->dev, "there is no temperature_out_of_range\n");
- ret = -EINVAL;
- goto err_chg_stat;
+ return -EINVAL;
}
if (!desc->charging_max_duration_ms ||
@@ -1570,14 +1556,13 @@ static int charger_manager_probe(struct platform_device *pdev)
cm->charger_psy.name = cm->psy_name_buf;
/* Allocate for psy properties because they may vary */
- cm->charger_psy.properties = kzalloc(sizeof(enum power_supply_property)
+ cm->charger_psy.properties = devm_kzalloc(&pdev->dev,
+ sizeof(enum power_supply_property)
* (ARRAY_SIZE(default_charger_props) +
- NUM_CHARGER_PSY_OPTIONAL),
- GFP_KERNEL);
- if (!cm->charger_psy.properties) {
- ret = -ENOMEM;
- goto err_chg_stat;
- }
+ NUM_CHARGER_PSY_OPTIONAL), GFP_KERNEL);
+ if (!cm->charger_psy.properties)
+ return -ENOMEM;
+
memcpy(cm->charger_psy.properties, default_charger_props,
sizeof(enum power_supply_property) *
ARRAY_SIZE(default_charger_props));
@@ -1614,7 +1599,7 @@ static int charger_manager_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Cannot register charger-manager with name \"%s\"\n",
cm->charger_psy.name);
- goto err_register;
+ return ret;
}
/* Register extcon device for charger cable */
@@ -1655,8 +1640,6 @@ err_reg_sysfs:
charger = &desc->charger_regulators[i];
sysfs_remove_group(&cm->charger_psy.dev->kobj,
&charger->attr_g);
-
- kfree(charger->attr_g.name);
}
err_reg_extcon:
for (i = 0; i < desc->num_charger_regulators; i++) {
@@ -1674,16 +1657,7 @@ err_reg_extcon:
}
power_supply_unregister(&cm->charger_psy);
-err_register:
- kfree(cm->charger_psy.properties);
-err_chg_stat:
- kfree(cm->charger_stat);
-err_no_charger_stat:
-err_no_charger:
- kfree(cm->desc);
-err_alloc_desc:
- kfree(cm);
-err_alloc:
+
return ret;
}
@@ -1718,11 +1692,6 @@ static int charger_manager_remove(struct platform_device *pdev)
try_charger_enable(cm, false);
- kfree(cm->charger_psy.properties);
- kfree(cm->charger_stat);
- kfree(cm->desc);
- kfree(cm);
-
return 0;
}
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index fc04d191579b..1bb3a91b1acc 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -2,6 +2,7 @@
* ISP1704 USB Charger Detection driver
*
* Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2012 - 2013 Pali Rohár <pali.rohar@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -65,10 +66,6 @@ struct isp1704_charger {
unsigned present:1;
unsigned online:1;
unsigned current_max;
-
- /* temp storage variables */
- unsigned long event;
- unsigned max_power;
};
static inline int isp1704_read(struct isp1704_charger *isp, u32 reg)
@@ -231,56 +228,59 @@ static inline int isp1704_charger_detect(struct isp1704_charger *isp)
return ret;
}
+static inline int isp1704_charger_detect_dcp(struct isp1704_charger *isp)
+{
+ if (isp1704_charger_detect(isp) &&
+ isp1704_charger_type(isp) == POWER_SUPPLY_TYPE_USB_DCP)
+ return true;
+ else
+ return false;
+}
+
static void isp1704_charger_work(struct work_struct *data)
{
- int detect;
- unsigned long event;
- unsigned power;
struct isp1704_charger *isp =
container_of(data, struct isp1704_charger, work);
static DEFINE_MUTEX(lock);
- event = isp->event;
- power = isp->max_power;
-
mutex_lock(&lock);
- if (event != USB_EVENT_NONE)
- isp1704_charger_set_power(isp, 1);
-
- switch (event) {
+ switch (isp->phy->last_event) {
case USB_EVENT_VBUS:
- isp->online = true;
+ /* do not call wall charger detection more times */
+ if (!isp->present) {
+ isp->online = true;
+ isp->present = 1;
+ isp1704_charger_set_power(isp, 1);
+
+ /* detect wall charger */
+ if (isp1704_charger_detect_dcp(isp)) {
+ isp->psy.type = POWER_SUPPLY_TYPE_USB_DCP;
+ isp->current_max = 1800;
+ } else {
+ isp->psy.type = POWER_SUPPLY_TYPE_USB;
+ isp->current_max = 500;
+ }
- /* detect charger */
- detect = isp1704_charger_detect(isp);
-
- if (detect) {
- isp->present = detect;
- isp->psy.type = isp1704_charger_type(isp);
+ /* enable data pullups */
+ if (isp->phy->otg->gadget)
+ usb_gadget_connect(isp->phy->otg->gadget);
}
- switch (isp->psy.type) {
- case POWER_SUPPLY_TYPE_USB_DCP:
- isp->current_max = 1800;
- break;
- case POWER_SUPPLY_TYPE_USB_CDP:
+ if (isp->psy.type != POWER_SUPPLY_TYPE_USB_DCP) {
/*
* Only 500mA here or high speed chirp
* handshaking may break
*/
- isp->current_max = 500;
- /* FALLTHROUGH */
- case POWER_SUPPLY_TYPE_USB:
- default:
- /* enable data pullups */
- if (isp->phy->otg->gadget)
- usb_gadget_connect(isp->phy->otg->gadget);
+ if (isp->current_max > 500)
+ isp->current_max = 500;
+
+ if (isp->current_max > 100)
+ isp->psy.type = POWER_SUPPLY_TYPE_USB_CDP;
}
break;
case USB_EVENT_NONE:
isp->online = false;
- isp->current_max = 0;
isp->present = 0;
isp->current_max = 0;
isp->psy.type = POWER_SUPPLY_TYPE_USB;
@@ -298,12 +298,6 @@ static void isp1704_charger_work(struct work_struct *data)
isp1704_charger_set_power(isp, 0);
break;
- case USB_EVENT_ENUMERATED:
- if (isp->present)
- isp->current_max = 1800;
- else
- isp->current_max = power;
- break;
default:
goto out;
}
@@ -314,16 +308,11 @@ out:
}
static int isp1704_notifier_call(struct notifier_block *nb,
- unsigned long event, void *power)
+ unsigned long val, void *v)
{
struct isp1704_charger *isp =
container_of(nb, struct isp1704_charger, nb);
- isp->event = event;
-
- if (power)
- isp->max_power = *((unsigned *)power);
-
schedule_work(&isp->work);
return NOTIFY_OK;
@@ -462,13 +451,13 @@ static int isp1704_charger_probe(struct platform_device *pdev)
if (isp->phy->otg->gadget)
usb_gadget_disconnect(isp->phy->otg->gadget);
+ if (isp->phy->last_event == USB_EVENT_NONE)
+ isp1704_charger_set_power(isp, 0);
+
/* Detect charger if VBUS is valid (the cable was already plugged). */
- ret = isp1704_read(isp, ULPI_USB_INT_STS);
- isp1704_charger_set_power(isp, 0);
- if ((ret & ULPI_INT_VBUS_VALID) && !isp->phy->otg->default_a) {
- isp->event = USB_EVENT_VBUS;
+ if (isp->phy->last_event == USB_EVENT_VBUS &&
+ !isp->phy->otg->default_a)
schedule_work(&isp->work);
- }
return 0;
fail2:
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index d664ef58afa7..e0b22f9b6fdd 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -33,6 +33,7 @@
#include <linux/power_supply.h>
#include <linux/power/max17042_battery.h>
#include <linux/of.h>
+#include <linux/regmap.h>
/* Status register bits */
#define STATUS_POR_BIT (1 << 1)
@@ -67,6 +68,7 @@
struct max17042_chip {
struct i2c_client *client;
+ struct regmap *regmap;
struct power_supply battery;
enum max170xx_chip_type chip_type;
struct max17042_platform_data *pdata;
@@ -74,35 +76,6 @@ struct max17042_chip {
int init_complete;
};
-static int max17042_write_reg(struct i2c_client *client, u8 reg, u16 value)
-{
- int ret = i2c_smbus_write_word_data(client, reg, value);
-
- if (ret < 0)
- dev_err(&client->dev, "%s: err %d\n", __func__, ret);
-
- return ret;
-}
-
-static int max17042_read_reg(struct i2c_client *client, u8 reg)
-{
- int ret = i2c_smbus_read_word_data(client, reg);
-
- if (ret < 0)
- dev_err(&client->dev, "%s: err %d\n", __func__, ret);
-
- return ret;
-}
-
-static void max17042_set_reg(struct i2c_client *client,
- struct max17042_reg_data *data, int size)
-{
- int i;
-
- for (i = 0; i < size; i++)
- max17042_write_reg(client, data[i].addr, data[i].data);
-}
-
static enum power_supply_property max17042_battery_props[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_CYCLE_COUNT,
@@ -125,96 +98,98 @@ static int max17042_get_property(struct power_supply *psy,
{
struct max17042_chip *chip = container_of(psy,
struct max17042_chip, battery);
+ struct regmap *map = chip->regmap;
int ret;
+ u32 data;
if (!chip->init_complete)
return -EAGAIN;
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
- ret = max17042_read_reg(chip->client, MAX17042_STATUS);
+ ret = regmap_read(map, MAX17042_STATUS, &data);
if (ret < 0)
return ret;
- if (ret & MAX17042_STATUS_BattAbsent)
+ if (data & MAX17042_STATUS_BattAbsent)
val->intval = 0;
else
val->intval = 1;
break;
case POWER_SUPPLY_PROP_CYCLE_COUNT:
- ret = max17042_read_reg(chip->client, MAX17042_Cycles);
+ ret = regmap_read(map, MAX17042_Cycles, &data);
if (ret < 0)
return ret;
- val->intval = ret;
+ val->intval = data;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
- ret = max17042_read_reg(chip->client, MAX17042_MinMaxVolt);
+ ret = regmap_read(map, MAX17042_MinMaxVolt, &data);
if (ret < 0)
return ret;
- val->intval = ret >> 8;
+ val->intval = data >> 8;
val->intval *= 20000; /* Units of LSB = 20mV */
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
if (chip->chip_type == MAX17042)
- ret = max17042_read_reg(chip->client, MAX17042_V_empty);
+ ret = regmap_read(map, MAX17042_V_empty, &data);
else
- ret = max17042_read_reg(chip->client, MAX17047_V_empty);
+ ret = regmap_read(map, MAX17047_V_empty, &data);
if (ret < 0)
return ret;
- val->intval = ret >> 7;
+ val->intval = data >> 7;
val->intval *= 10000; /* Units of LSB = 10mV */
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- ret = max17042_read_reg(chip->client, MAX17042_VCELL);
+ ret = regmap_read(map, MAX17042_VCELL, &data);
if (ret < 0)
return ret;
- val->intval = ret * 625 / 8;
+ val->intval = data * 625 / 8;
break;
case POWER_SUPPLY_PROP_VOLTAGE_AVG:
- ret = max17042_read_reg(chip->client, MAX17042_AvgVCELL);
+ ret = regmap_read(map, MAX17042_AvgVCELL, &data);
if (ret < 0)
return ret;
- val->intval = ret * 625 / 8;
+ val->intval = data * 625 / 8;
break;
case POWER_SUPPLY_PROP_VOLTAGE_OCV:
- ret = max17042_read_reg(chip->client, MAX17042_OCVInternal);
+ ret = regmap_read(map, MAX17042_OCVInternal, &data);
if (ret < 0)
return ret;
- val->intval = ret * 625 / 8;
+ val->intval = data * 625 / 8;
break;
case POWER_SUPPLY_PROP_CAPACITY:
- ret = max17042_read_reg(chip->client, MAX17042_RepSOC);
+ ret = regmap_read(map, MAX17042_RepSOC, &data);
if (ret < 0)
return ret;
- val->intval = ret >> 8;
+ val->intval = data >> 8;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
- ret = max17042_read_reg(chip->client, MAX17042_FullCAP);
+ ret = regmap_read(map, MAX17042_FullCAP, &data);
if (ret < 0)
return ret;
- val->intval = ret * 1000 / 2;
+ val->intval = data * 1000 / 2;
break;
case POWER_SUPPLY_PROP_CHARGE_COUNTER:
- ret = max17042_read_reg(chip->client, MAX17042_QH);
+ ret = regmap_read(map, MAX17042_QH, &data);
if (ret < 0)
return ret;
- val->intval = ret * 1000 / 2;
+ val->intval = data * 1000 / 2;
break;
case POWER_SUPPLY_PROP_TEMP:
- ret = max17042_read_reg(chip->client, MAX17042_TEMP);
+ ret = regmap_read(map, MAX17042_TEMP, &data);
if (ret < 0)
return ret;
- val->intval = ret;
+ val->intval = data;
/* The value is signed. */
if (val->intval & 0x8000) {
val->intval = (0x7fff & ~val->intval) + 1;
@@ -226,11 +201,11 @@ static int max17042_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
if (chip->pdata->enable_current_sense) {
- ret = max17042_read_reg(chip->client, MAX17042_Current);
+ ret = regmap_read(map, MAX17042_Current, &data);
if (ret < 0)
return ret;
- val->intval = ret;
+ val->intval = data;
if (val->intval & 0x8000) {
/* Negative */
val->intval = ~val->intval & 0x7fff;
@@ -244,12 +219,11 @@ static int max17042_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
if (chip->pdata->enable_current_sense) {
- ret = max17042_read_reg(chip->client,
- MAX17042_AvgCurrent);
+ ret = regmap_read(map, MAX17042_AvgCurrent, &data);
if (ret < 0)
return ret;
- val->intval = ret;
+ val->intval = data;
if (val->intval & 0x8000) {
/* Negative */
val->intval = ~val->intval & 0x7fff;
@@ -267,16 +241,15 @@ static int max17042_get_property(struct power_supply *psy,
return 0;
}
-static int max17042_write_verify_reg(struct i2c_client *client,
- u8 reg, u16 value)
+static int max17042_write_verify_reg(struct regmap *map, u8 reg, u32 value)
{
int retries = 8;
int ret;
- u16 read_value;
+ u32 read_value;
do {
- ret = i2c_smbus_write_word_data(client, reg, value);
- read_value = max17042_read_reg(client, reg);
+ ret = regmap_write(map, reg, value);
+ regmap_read(map, reg, &read_value);
if (read_value != value) {
ret = -EIO;
retries--;
@@ -284,50 +257,51 @@ static int max17042_write_verify_reg(struct i2c_client *client,
} while (retries && read_value != value);
if (ret < 0)
- dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+ pr_err("%s: err %d\n", __func__, ret);
return ret;
}
-static inline void max17042_override_por(
- struct i2c_client *client, u8 reg, u16 value)
+static inline void max17042_override_por(struct regmap *map,
+ u8 reg, u16 value)
{
if (value)
- max17042_write_reg(client, reg, value);
+ regmap_write(map, reg, value);
}
static inline void max10742_unlock_model(struct max17042_chip *chip)
{
- struct i2c_client *client = chip->client;
- max17042_write_reg(client, MAX17042_MLOCKReg1, MODEL_UNLOCK1);
- max17042_write_reg(client, MAX17042_MLOCKReg2, MODEL_UNLOCK2);
+ struct regmap *map = chip->regmap;
+ regmap_write(map, MAX17042_MLOCKReg1, MODEL_UNLOCK1);
+ regmap_write(map, MAX17042_MLOCKReg2, MODEL_UNLOCK2);
}
static inline void max10742_lock_model(struct max17042_chip *chip)
{
- struct i2c_client *client = chip->client;
- max17042_write_reg(client, MAX17042_MLOCKReg1, MODEL_LOCK1);
- max17042_write_reg(client, MAX17042_MLOCKReg2, MODEL_LOCK2);
+ struct regmap *map = chip->regmap;
+
+ regmap_write(map, MAX17042_MLOCKReg1, MODEL_LOCK1);
+ regmap_write(map, MAX17042_MLOCKReg2, MODEL_LOCK2);
}
static inline void max17042_write_model_data(struct max17042_chip *chip,
u8 addr, int size)
{
- struct i2c_client *client = chip->client;
+ struct regmap *map = chip->regmap;
int i;
for (i = 0; i < size; i++)
- max17042_write_reg(client, addr + i,
- chip->pdata->config_data->cell_char_tbl[i]);
+ regmap_write(map, addr + i,
+ chip->pdata->config_data->cell_char_tbl[i]);
}
static inline void max17042_read_model_data(struct max17042_chip *chip,
- u8 addr, u16 *data, int size)
+ u8 addr, u32 *data, int size)
{
- struct i2c_client *client = chip->client;
+ struct regmap *map = chip->regmap;
int i;
for (i = 0; i < size; i++)
- data[i] = max17042_read_reg(client, addr + i);
+ regmap_read(map, addr + i, &data[i]);
}
static inline int max17042_model_data_compare(struct max17042_chip *chip,
@@ -350,7 +324,7 @@ static int max17042_init_model(struct max17042_chip *chip)
{
int ret;
int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
- u16 *temp_data;
+ u32 *temp_data;
temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
if (!temp_data)
@@ -365,7 +339,7 @@ static int max17042_init_model(struct max17042_chip *chip)
ret = max17042_model_data_compare(
chip,
chip->pdata->config_data->cell_char_tbl,
- temp_data,
+ (u16 *)temp_data,
table_size);
max10742_lock_model(chip);
@@ -378,7 +352,7 @@ static int max17042_verify_model_lock(struct max17042_chip *chip)
{
int i;
int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
- u16 *temp_data;
+ u32 *temp_data;
int ret = 0;
temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
@@ -398,40 +372,38 @@ static int max17042_verify_model_lock(struct max17042_chip *chip)
static void max17042_write_config_regs(struct max17042_chip *chip)
{
struct max17042_config_data *config = chip->pdata->config_data;
+ struct regmap *map = chip->regmap;
- max17042_write_reg(chip->client, MAX17042_CONFIG, config->config);
- max17042_write_reg(chip->client, MAX17042_LearnCFG, config->learn_cfg);
- max17042_write_reg(chip->client, MAX17042_FilterCFG,
+ regmap_write(map, MAX17042_CONFIG, config->config);
+ regmap_write(map, MAX17042_LearnCFG, config->learn_cfg);
+ regmap_write(map, MAX17042_FilterCFG,
config->filter_cfg);
- max17042_write_reg(chip->client, MAX17042_RelaxCFG, config->relax_cfg);
+ regmap_write(map, MAX17042_RelaxCFG, config->relax_cfg);
if (chip->chip_type == MAX17047)
- max17042_write_reg(chip->client, MAX17047_FullSOCThr,
+ regmap_write(map, MAX17047_FullSOCThr,
config->full_soc_thresh);
}
static void max17042_write_custom_regs(struct max17042_chip *chip)
{
struct max17042_config_data *config = chip->pdata->config_data;
+ struct regmap *map = chip->regmap;
- max17042_write_verify_reg(chip->client, MAX17042_RCOMP0,
- config->rcomp0);
- max17042_write_verify_reg(chip->client, MAX17042_TempCo,
- config->tcompc0);
- max17042_write_verify_reg(chip->client, MAX17042_ICHGTerm,
- config->ichgt_term);
+ max17042_write_verify_reg(map, MAX17042_RCOMP0, config->rcomp0);
+ max17042_write_verify_reg(map, MAX17042_TempCo, config->tcompc0);
+ max17042_write_verify_reg(map, MAX17042_ICHGTerm, config->ichgt_term);
if (chip->chip_type == MAX17042) {
- max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
- config->empty_tempco);
- max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
+ regmap_write(map, MAX17042_EmptyTempCo, config->empty_tempco);
+ max17042_write_verify_reg(map, MAX17042_K_empty0,
config->kempty0);
} else {
- max17042_write_verify_reg(chip->client, MAX17047_QRTbl00,
+ max17042_write_verify_reg(map, MAX17047_QRTbl00,
config->qrtbl00);
- max17042_write_verify_reg(chip->client, MAX17047_QRTbl10,
+ max17042_write_verify_reg(map, MAX17047_QRTbl10,
config->qrtbl10);
- max17042_write_verify_reg(chip->client, MAX17047_QRTbl20,
+ max17042_write_verify_reg(map, MAX17047_QRTbl20,
config->qrtbl20);
- max17042_write_verify_reg(chip->client, MAX17047_QRTbl30,
+ max17042_write_verify_reg(map, MAX17047_QRTbl30,
config->qrtbl30);
}
}
@@ -439,58 +411,60 @@ static void max17042_write_custom_regs(struct max17042_chip *chip)
static void max17042_update_capacity_regs(struct max17042_chip *chip)
{
struct max17042_config_data *config = chip->pdata->config_data;
+ struct regmap *map = chip->regmap;
- max17042_write_verify_reg(chip->client, MAX17042_FullCAP,
+ max17042_write_verify_reg(map, MAX17042_FullCAP,
config->fullcap);
- max17042_write_reg(chip->client, MAX17042_DesignCap,
- config->design_cap);
- max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
+ regmap_write(map, MAX17042_DesignCap, config->design_cap);
+ max17042_write_verify_reg(map, MAX17042_FullCAPNom,
config->fullcapnom);
}
static void max17042_reset_vfsoc0_reg(struct max17042_chip *chip)
{
- u16 vfSoc;
+ unsigned int vfSoc;
+ struct regmap *map = chip->regmap;
- vfSoc = max17042_read_reg(chip->client, MAX17042_VFSOC);
- max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, VFSOC0_UNLOCK);
- max17042_write_verify_reg(chip->client, MAX17042_VFSOC0, vfSoc);
- max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, VFSOC0_LOCK);
+ regmap_read(map, MAX17042_VFSOC, &vfSoc);
+ regmap_write(map, MAX17042_VFSOC0Enable, VFSOC0_UNLOCK);
+ max17042_write_verify_reg(map, MAX17042_VFSOC0, vfSoc);
+ regmap_write(map, MAX17042_VFSOC0Enable, VFSOC0_LOCK);
}
static void max17042_load_new_capacity_params(struct max17042_chip *chip)
{
- u16 full_cap0, rep_cap, dq_acc, vfSoc;
+ u32 full_cap0, rep_cap, dq_acc, vfSoc;
u32 rem_cap;
struct max17042_config_data *config = chip->pdata->config_data;
+ struct regmap *map = chip->regmap;
- full_cap0 = max17042_read_reg(chip->client, MAX17042_FullCAP0);
- vfSoc = max17042_read_reg(chip->client, MAX17042_VFSOC);
+ regmap_read(map, MAX17042_FullCAP0, &full_cap0);
+ regmap_read(map, MAX17042_VFSOC, &vfSoc);
/* fg_vfSoc needs to shifted by 8 bits to get the
* perc in 1% accuracy, to get the right rem_cap multiply
* full_cap0, fg_vfSoc and devide by 100
*/
rem_cap = ((vfSoc >> 8) * full_cap0) / 100;
- max17042_write_verify_reg(chip->client, MAX17042_RemCap, (u16)rem_cap);
+ max17042_write_verify_reg(map, MAX17042_RemCap, rem_cap);
- rep_cap = (u16)rem_cap;
- max17042_write_verify_reg(chip->client, MAX17042_RepCap, rep_cap);
+ rep_cap = rem_cap;
+ max17042_write_verify_reg(map, MAX17042_RepCap, rep_cap);
/* Write dQ_acc to 200% of Capacity and dP_acc to 200% */
dq_acc = config->fullcap / dQ_ACC_DIV;
- max17042_write_verify_reg(chip->client, MAX17042_dQacc, dq_acc);
- max17042_write_verify_reg(chip->client, MAX17042_dPacc, dP_ACC_200);
+ max17042_write_verify_reg(map, MAX17042_dQacc, dq_acc);
+ max17042_write_verify_reg(map, MAX17042_dPacc, dP_ACC_200);
- max17042_write_verify_reg(chip->client, MAX17042_FullCAP,
+ max17042_write_verify_reg(map, MAX17042_FullCAP,
config->fullcap);
- max17042_write_reg(chip->client, MAX17042_DesignCap,
+ regmap_write(map, MAX17042_DesignCap,
config->design_cap);
- max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
+ max17042_write_verify_reg(map, MAX17042_FullCAPNom,
config->fullcapnom);
/* Update SOC register with new SOC */
- max17042_write_reg(chip->client, MAX17042_RepSOC, vfSoc);
+ regmap_write(map, MAX17042_RepSOC, vfSoc);
}
/*
@@ -500,59 +474,60 @@ static void max17042_load_new_capacity_params(struct max17042_chip *chip)
*/
static inline void max17042_override_por_values(struct max17042_chip *chip)
{
- struct i2c_client *client = chip->client;
+ struct regmap *map = chip->regmap;
struct max17042_config_data *config = chip->pdata->config_data;
- max17042_override_por(client, MAX17042_TGAIN, config->tgain);
- max17042_override_por(client, MAx17042_TOFF, config->toff);
- max17042_override_por(client, MAX17042_CGAIN, config->cgain);
- max17042_override_por(client, MAX17042_COFF, config->coff);
-
- max17042_override_por(client, MAX17042_VALRT_Th, config->valrt_thresh);
- max17042_override_por(client, MAX17042_TALRT_Th, config->talrt_thresh);
- max17042_override_por(client, MAX17042_SALRT_Th,
- config->soc_alrt_thresh);
- max17042_override_por(client, MAX17042_CONFIG, config->config);
- max17042_override_por(client, MAX17042_SHDNTIMER, config->shdntimer);
-
- max17042_override_por(client, MAX17042_DesignCap, config->design_cap);
- max17042_override_por(client, MAX17042_ICHGTerm, config->ichgt_term);
-
- max17042_override_por(client, MAX17042_AtRate, config->at_rate);
- max17042_override_por(client, MAX17042_LearnCFG, config->learn_cfg);
- max17042_override_por(client, MAX17042_FilterCFG, config->filter_cfg);
- max17042_override_por(client, MAX17042_RelaxCFG, config->relax_cfg);
- max17042_override_por(client, MAX17042_MiscCFG, config->misc_cfg);
- max17042_override_por(client, MAX17042_MaskSOC, config->masksoc);
-
- max17042_override_por(client, MAX17042_FullCAP, config->fullcap);
- max17042_override_por(client, MAX17042_FullCAPNom, config->fullcapnom);
+ max17042_override_por(map, MAX17042_TGAIN, config->tgain);
+ max17042_override_por(map, MAx17042_TOFF, config->toff);
+ max17042_override_por(map, MAX17042_CGAIN, config->cgain);
+ max17042_override_por(map, MAX17042_COFF, config->coff);
+
+ max17042_override_por(map, MAX17042_VALRT_Th, config->valrt_thresh);
+ max17042_override_por(map, MAX17042_TALRT_Th, config->talrt_thresh);
+ max17042_override_por(map, MAX17042_SALRT_Th,
+ config->soc_alrt_thresh);
+ max17042_override_por(map, MAX17042_CONFIG, config->config);
+ max17042_override_por(map, MAX17042_SHDNTIMER, config->shdntimer);
+
+ max17042_override_por(map, MAX17042_DesignCap, config->design_cap);
+ max17042_override_por(map, MAX17042_ICHGTerm, config->ichgt_term);
+
+ max17042_override_por(map, MAX17042_AtRate, config->at_rate);
+ max17042_override_por(map, MAX17042_LearnCFG, config->learn_cfg);
+ max17042_override_por(map, MAX17042_FilterCFG, config->filter_cfg);
+ max17042_override_por(map, MAX17042_RelaxCFG, config->relax_cfg);
+ max17042_override_por(map, MAX17042_MiscCFG, config->misc_cfg);
+ max17042_override_por(map, MAX17042_MaskSOC, config->masksoc);
+
+ max17042_override_por(map, MAX17042_FullCAP, config->fullcap);
+ max17042_override_por(map, MAX17042_FullCAPNom, config->fullcapnom);
if (chip->chip_type == MAX17042)
- max17042_override_por(client, MAX17042_SOC_empty,
+ max17042_override_por(map, MAX17042_SOC_empty,
config->socempty);
- max17042_override_por(client, MAX17042_LAvg_empty, config->lavg_empty);
- max17042_override_por(client, MAX17042_dQacc, config->dqacc);
- max17042_override_por(client, MAX17042_dPacc, config->dpacc);
+ max17042_override_por(map, MAX17042_LAvg_empty, config->lavg_empty);
+ max17042_override_por(map, MAX17042_dQacc, config->dqacc);
+ max17042_override_por(map, MAX17042_dPacc, config->dpacc);
if (chip->chip_type == MAX17042)
- max17042_override_por(client, MAX17042_V_empty, config->vempty);
+ max17042_override_por(map, MAX17042_V_empty, config->vempty);
else
- max17042_override_por(client, MAX17047_V_empty, config->vempty);
- max17042_override_por(client, MAX17042_TempNom, config->temp_nom);
- max17042_override_por(client, MAX17042_TempLim, config->temp_lim);
- max17042_override_por(client, MAX17042_FCTC, config->fctc);
- max17042_override_por(client, MAX17042_RCOMP0, config->rcomp0);
- max17042_override_por(client, MAX17042_TempCo, config->tcompc0);
+ max17042_override_por(map, MAX17047_V_empty, config->vempty);
+ max17042_override_por(map, MAX17042_TempNom, config->temp_nom);
+ max17042_override_por(map, MAX17042_TempLim, config->temp_lim);
+ max17042_override_por(map, MAX17042_FCTC, config->fctc);
+ max17042_override_por(map, MAX17042_RCOMP0, config->rcomp0);
+ max17042_override_por(map, MAX17042_TempCo, config->tcompc0);
if (chip->chip_type) {
- max17042_override_por(client, MAX17042_EmptyTempCo,
- config->empty_tempco);
- max17042_override_por(client, MAX17042_K_empty0,
- config->kempty0);
+ max17042_override_por(map, MAX17042_EmptyTempCo,
+ config->empty_tempco);
+ max17042_override_por(map, MAX17042_K_empty0,
+ config->kempty0);
}
}
static int max17042_init_chip(struct max17042_chip *chip)
{
+ struct regmap *map = chip->regmap;
int ret;
int val;
@@ -597,31 +572,32 @@ static int max17042_init_chip(struct max17042_chip *chip)
max17042_load_new_capacity_params(chip);
/* Init complete, Clear the POR bit */
- val = max17042_read_reg(chip->client, MAX17042_STATUS);
- max17042_write_reg(chip->client, MAX17042_STATUS,
- val & (~STATUS_POR_BIT));
+ regmap_read(map, MAX17042_STATUS, &val);
+ regmap_write(map, MAX17042_STATUS, val & (~STATUS_POR_BIT));
return 0;
}
static void max17042_set_soc_threshold(struct max17042_chip *chip, u16 off)
{
- u16 soc, soc_tr;
+ struct regmap *map = chip->regmap;
+ u32 soc, soc_tr;
/* program interrupt thesholds such that we should
* get interrupt for every 'off' perc change in the soc
*/
- soc = max17042_read_reg(chip->client, MAX17042_RepSOC) >> 8;
+ regmap_read(map, MAX17042_RepSOC, &soc);
+ soc >>= 8;
soc_tr = (soc + off) << 8;
soc_tr |= (soc - off);
- max17042_write_reg(chip->client, MAX17042_SALRT_Th, soc_tr);
+ regmap_write(map, MAX17042_SALRT_Th, soc_tr);
}
static irqreturn_t max17042_thread_handler(int id, void *dev)
{
struct max17042_chip *chip = dev;
- u16 val;
+ u32 val;
- val = max17042_read_reg(chip->client, MAX17042_STATUS);
+ regmap_read(chip->regmap, MAX17042_STATUS, &val);
if ((val & STATUS_INTR_SOCMIN_BIT) ||
(val & STATUS_INTR_SOCMAX_BIT)) {
dev_info(&chip->client->dev, "SOC threshold INTR\n");
@@ -682,13 +658,20 @@ max17042_get_pdata(struct device *dev)
}
#endif
+static struct regmap_config max17042_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .val_format_endian = REGMAP_ENDIAN_NATIVE,
+};
+
static int max17042_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct max17042_chip *chip;
int ret;
- int reg;
+ int i;
+ u32 val;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -EIO;
@@ -698,6 +681,12 @@ static int max17042_probe(struct i2c_client *client,
return -ENOMEM;
chip->client = client;
+ chip->regmap = devm_regmap_init_i2c(client, &max17042_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ dev_err(&client->dev, "Failed to initialize regmap\n");
+ return -EINVAL;
+ }
+
chip->pdata = max17042_get_pdata(&client->dev);
if (!chip->pdata) {
dev_err(&client->dev, "no platform data provided\n");
@@ -706,15 +695,15 @@ static int max17042_probe(struct i2c_client *client,
i2c_set_clientdata(client, chip);
- ret = max17042_read_reg(chip->client, MAX17042_DevName);
- if (ret == MAX17042_IC_VERSION) {
+ regmap_read(chip->regmap, MAX17042_DevName, &val);
+ if (val == MAX17042_IC_VERSION) {
dev_dbg(&client->dev, "chip type max17042 detected\n");
chip->chip_type = MAX17042;
- } else if (ret == MAX17047_IC_VERSION) {
+ } else if (val == MAX17047_IC_VERSION) {
dev_dbg(&client->dev, "chip type max17047/50 detected\n");
chip->chip_type = MAX17047;
} else {
- dev_err(&client->dev, "device version mismatch: %x\n", ret);
+ dev_err(&client->dev, "device version mismatch: %x\n", val);
return -EIO;
}
@@ -733,13 +722,15 @@ static int max17042_probe(struct i2c_client *client,
chip->pdata->r_sns = MAX17042_DEFAULT_SNS_RESISTOR;
if (chip->pdata->init_data)
- max17042_set_reg(client, chip->pdata->init_data,
- chip->pdata->num_init_data);
+ for (i = 0; i < chip->pdata->num_init_data; i++)
+ regmap_write(chip->regmap,
+ chip->pdata->init_data[i].addr,
+ chip->pdata->init_data[i].data);
if (!chip->pdata->enable_current_sense) {
- max17042_write_reg(client, MAX17042_CGAIN, 0x0000);
- max17042_write_reg(client, MAX17042_MiscCFG, 0x0003);
- max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
+ regmap_write(chip->regmap, MAX17042_CGAIN, 0x0000);
+ regmap_write(chip->regmap, MAX17042_MiscCFG, 0x0003);
+ regmap_write(chip->regmap, MAX17042_LearnCFG, 0x0007);
}
ret = power_supply_register(&client->dev, &chip->battery);
@@ -754,9 +745,9 @@ static int max17042_probe(struct i2c_client *client,
IRQF_TRIGGER_FALLING,
chip->battery.name, chip);
if (!ret) {
- reg = max17042_read_reg(client, MAX17042_CONFIG);
- reg |= CONFIG_ALRT_BIT_ENBL;
- max17042_write_reg(client, MAX17042_CONFIG, reg);
+ regmap_read(chip->regmap, MAX17042_CONFIG, &val);
+ val |= CONFIG_ALRT_BIT_ENBL;
+ regmap_write(chip->regmap, MAX17042_CONFIG, val);
max17042_set_soc_threshold(chip, 1);
} else {
client->irq = 0;
@@ -765,8 +756,8 @@ static int max17042_probe(struct i2c_client *client,
}
}
- reg = max17042_read_reg(chip->client, MAX17042_STATUS);
- if (reg & STATUS_POR_BIT) {
+ regmap_read(chip->regmap, MAX17042_STATUS, &val);
+ if (val & STATUS_POR_BIT) {
INIT_WORK(&chip->work, max17042_init_worker);
schedule_work(&chip->work);
} else {
@@ -786,7 +777,7 @@ static int max17042_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int max17042_suspend(struct device *dev)
{
struct max17042_chip *chip = dev_get_drvdata(dev);
@@ -816,17 +807,11 @@ static int max17042_resume(struct device *dev)
return 0;
}
-
-static const struct dev_pm_ops max17042_pm_ops = {
- .suspend = max17042_suspend,
- .resume = max17042_resume,
-};
-
-#define MAX17042_PM_OPS (&max17042_pm_ops)
-#else
-#define MAX17042_PM_OPS NULL
#endif
+static SIMPLE_DEV_PM_OPS(max17042_pm_ops, max17042_suspend,
+ max17042_resume);
+
#ifdef CONFIG_OF
static const struct of_device_id max17042_dt_match[] = {
{ .compatible = "maxim,max17042" },
@@ -849,7 +834,7 @@ static struct i2c_driver max17042_i2c_driver = {
.driver = {
.name = "max17042",
.of_match_table = of_match_ptr(max17042_dt_match),
- .pm = MAX17042_PM_OPS,
+ .pm = &max17042_pm_ops,
},
.probe = max17042_probe,
.remove = max17042_remove,
diff --git a/drivers/power/pm2301_charger.c b/drivers/power/pm2301_charger.c
index ffa10ed83eb1..b733c692a17f 100644
--- a/drivers/power/pm2301_charger.c
+++ b/drivers/power/pm2301_charger.c
@@ -205,7 +205,7 @@ static int pm2xxx_charger_batt_therm_mngt(struct pm2xxx_charger *pm2, int val)
}
-int pm2xxx_charger_die_therm_mngt(struct pm2xxx_charger *pm2, int val)
+static int pm2xxx_charger_die_therm_mngt(struct pm2xxx_charger *pm2, int val)
{
queue_work(pm2->charger_wq, &pm2->check_main_thermal_prot_work);
@@ -722,8 +722,12 @@ static int pm2xxx_charger_ac_en(struct ux500_charger *charger,
dev_dbg(pm2->dev, "Enable AC: %dmV %dmA\n", vset, iset);
if (!pm2->vddadc_en_ac) {
- regulator_enable(pm2->regu);
- pm2->vddadc_en_ac = true;
+ ret = regulator_enable(pm2->regu);
+ if (ret)
+ dev_warn(pm2->dev,
+ "Failed to enable vddadc regulator\n");
+ else
+ pm2->vddadc_en_ac = true;
}
ret = pm2xxx_charging_init(pm2);
diff --git a/drivers/power/tps65090-charger.c b/drivers/power/tps65090-charger.c
index bdd7b9b2546a..8fc9d6df87f6 100644
--- a/drivers/power/tps65090-charger.c
+++ b/drivers/power/tps65090-charger.c
@@ -15,15 +15,17 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
+#include <linux/slab.h>
+
#include <linux/mfd/tps65090.h>
#define TPS65090_REG_INTR_STS 0x00
@@ -185,10 +187,6 @@ static irqreturn_t tps65090_charger_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#if defined(CONFIG_OF)
-
-#include <linux/of_device.h>
-
static struct tps65090_platform_data *
tps65090_parse_dt_charger_data(struct platform_device *pdev)
{
@@ -210,13 +208,6 @@ static struct tps65090_platform_data *
return pdata;
}
-#else
-static struct tps65090_platform_data *
- tps65090_parse_dt_charger_data(struct platform_device *pdev)
-{
- return NULL;
-}
-#endif
static int tps65090_charger_probe(struct platform_device *pdev)
{
@@ -228,7 +219,7 @@ static int tps65090_charger_probe(struct platform_device *pdev)
pdata = dev_get_platdata(pdev->dev.parent);
- if (!pdata && pdev->dev.of_node)
+ if (IS_ENABLED(CONFIG_OF) && !pdata && pdev->dev.of_node)
pdata = tps65090_parse_dt_charger_data(pdev);
if (!pdata) {
@@ -277,13 +268,13 @@ static int tps65090_charger_probe(struct platform_device *pdev)
if (ret) {
dev_err(cdata->dev, "Unable to register irq %d err %d\n", irq,
ret);
- goto fail_free_irq;
+ goto fail_unregister_supply;
}
ret = tps65090_config_charger(cdata);
if (ret < 0) {
dev_err(&pdev->dev, "charger config failed, err %d\n", ret);
- goto fail_free_irq;
+ goto fail_unregister_supply;
}
/* Check for charger presence */
@@ -292,14 +283,14 @@ static int tps65090_charger_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(cdata->dev, "%s(): Error in reading reg 0x%x", __func__,
TPS65090_REG_CG_STATUS1);
- goto fail_free_irq;
+ goto fail_unregister_supply;
}
if (status1 != 0) {
ret = tps65090_enable_charging(cdata);
if (ret < 0) {
dev_err(cdata->dev, "error enabling charger\n");
- goto fail_free_irq;
+ goto fail_unregister_supply;
}
cdata->ac_online = 1;
power_supply_changed(&cdata->ac);
@@ -307,8 +298,6 @@ static int tps65090_charger_probe(struct platform_device *pdev)
return 0;
-fail_free_irq:
- devm_free_irq(cdata->dev, irq, cdata);
fail_unregister_supply:
power_supply_unregister(&cdata->ac);
@@ -319,7 +308,6 @@ static int tps65090_charger_remove(struct platform_device *pdev)
{
struct tps65090_charger *cdata = platform_get_drvdata(pdev);
- devm_free_irq(cdata->dev, cdata->irq, cdata);
power_supply_unregister(&cdata->ac);
return 0;
diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig
new file mode 100644
index 000000000000..a7c81b53d88a
--- /dev/null
+++ b/drivers/powercap/Kconfig
@@ -0,0 +1,32 @@
+#
+# Generic power capping sysfs interface configuration
+#
+
+menuconfig POWERCAP
+ bool "Generic powercap sysfs driver"
+ help
+ The power capping sysfs interface allows kernel subsystems to expose power
+ capping settings to user space in a consistent way. Usually, it consists
+ of multiple control types that determine which settings may be exposed and
+ power zones representing parts of the system that can be subject to power
+ capping.
+
+ If you want this code to be compiled in, say Y here.
+
+if POWERCAP
+# Client driver configurations go here.
+config INTEL_RAPL
+ tristate "Intel RAPL Support"
+ depends on X86
+ default n
+ ---help---
+ This enables support for the Intel Running Average Power Limit (RAPL)
+ technology which allows power limits to be enforced and monitored on
+ modern Intel processors (Sandy Bridge and later).
+
+ In RAPL, the platform level settings are divided into domains for
+ fine grained control. These domains include processor package, DRAM
+ controller, CPU core (Power Plance 0), graphics uncore (Power Plane
+ 1), etc.
+
+endif
diff --git a/drivers/powercap/Makefile b/drivers/powercap/Makefile
new file mode 100644
index 000000000000..0a21ef31372b
--- /dev/null
+++ b/drivers/powercap/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_POWERCAP) += powercap_sys.o
+obj-$(CONFIG_INTEL_RAPL) += intel_rapl.o
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
new file mode 100644
index 000000000000..2a786c504460
--- /dev/null
+++ b/drivers/powercap/intel_rapl.c
@@ -0,0 +1,1395 @@
+/*
+ * Intel Running Average Power Limit (RAPL) Driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/log2.h>
+#include <linux/bitmap.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/cpu.h>
+#include <linux/powercap.h>
+
+#include <asm/processor.h>
+#include <asm/cpu_device_id.h>
+
+/* bitmasks for RAPL MSRs, used by primitive access functions */
+#define ENERGY_STATUS_MASK 0xffffffff
+
+#define POWER_LIMIT1_MASK 0x7FFF
+#define POWER_LIMIT1_ENABLE BIT(15)
+#define POWER_LIMIT1_CLAMP BIT(16)
+
+#define POWER_LIMIT2_MASK (0x7FFFULL<<32)
+#define POWER_LIMIT2_ENABLE BIT_ULL(47)
+#define POWER_LIMIT2_CLAMP BIT_ULL(48)
+#define POWER_PACKAGE_LOCK BIT_ULL(63)
+#define POWER_PP_LOCK BIT(31)
+
+#define TIME_WINDOW1_MASK (0x7FULL<<17)
+#define TIME_WINDOW2_MASK (0x7FULL<<49)
+
+#define POWER_UNIT_OFFSET 0
+#define POWER_UNIT_MASK 0x0F
+
+#define ENERGY_UNIT_OFFSET 0x08
+#define ENERGY_UNIT_MASK 0x1F00
+
+#define TIME_UNIT_OFFSET 0x10
+#define TIME_UNIT_MASK 0xF0000
+
+#define POWER_INFO_MAX_MASK (0x7fffULL<<32)
+#define POWER_INFO_MIN_MASK (0x7fffULL<<16)
+#define POWER_INFO_MAX_TIME_WIN_MASK (0x3fULL<<48)
+#define POWER_INFO_THERMAL_SPEC_MASK 0x7fff
+
+#define PERF_STATUS_THROTTLE_TIME_MASK 0xffffffff
+#define PP_POLICY_MASK 0x1F
+
+/* Non HW constants */
+#define RAPL_PRIMITIVE_DERIVED BIT(1) /* not from raw data */
+#define RAPL_PRIMITIVE_DUMMY BIT(2)
+
+/* scale RAPL units to avoid floating point math inside kernel */
+#define POWER_UNIT_SCALE (1000000)
+#define ENERGY_UNIT_SCALE (1000000)
+#define TIME_UNIT_SCALE (1000000)
+
+#define TIME_WINDOW_MAX_MSEC 40000
+#define TIME_WINDOW_MIN_MSEC 250
+
+enum unit_type {
+ ARBITRARY_UNIT, /* no translation */
+ POWER_UNIT,
+ ENERGY_UNIT,
+ TIME_UNIT,
+};
+
+enum rapl_domain_type {
+ RAPL_DOMAIN_PACKAGE, /* entire package/socket */
+ RAPL_DOMAIN_PP0, /* core power plane */
+ RAPL_DOMAIN_PP1, /* graphics uncore */
+ RAPL_DOMAIN_DRAM,/* DRAM control_type */
+ RAPL_DOMAIN_MAX,
+};
+
+enum rapl_domain_msr_id {
+ RAPL_DOMAIN_MSR_LIMIT,
+ RAPL_DOMAIN_MSR_STATUS,
+ RAPL_DOMAIN_MSR_PERF,
+ RAPL_DOMAIN_MSR_POLICY,
+ RAPL_DOMAIN_MSR_INFO,
+ RAPL_DOMAIN_MSR_MAX,
+};
+
+/* per domain data, some are optional */
+enum rapl_primitives {
+ ENERGY_COUNTER,
+ POWER_LIMIT1,
+ POWER_LIMIT2,
+ FW_LOCK,
+
+ PL1_ENABLE, /* power limit 1, aka long term */
+ PL1_CLAMP, /* allow frequency to go below OS request */
+ PL2_ENABLE, /* power limit 2, aka short term, instantaneous */
+ PL2_CLAMP,
+
+ TIME_WINDOW1, /* long term */
+ TIME_WINDOW2, /* short term */
+ THERMAL_SPEC_POWER,
+ MAX_POWER,
+
+ MIN_POWER,
+ MAX_TIME_WINDOW,
+ THROTTLED_TIME,
+ PRIORITY_LEVEL,
+
+ /* below are not raw primitive data */
+ AVERAGE_POWER,
+ NR_RAPL_PRIMITIVES,
+};
+
+#define NR_RAW_PRIMITIVES (NR_RAPL_PRIMITIVES - 2)
+
+/* Can be expanded to include events, etc.*/
+struct rapl_domain_data {
+ u64 primitives[NR_RAPL_PRIMITIVES];
+ unsigned long timestamp;
+};
+
+
+#define DOMAIN_STATE_INACTIVE BIT(0)
+#define DOMAIN_STATE_POWER_LIMIT_SET BIT(1)
+#define DOMAIN_STATE_BIOS_LOCKED BIT(2)
+
+#define NR_POWER_LIMITS (2)
+struct rapl_power_limit {
+ struct powercap_zone_constraint *constraint;
+ int prim_id; /* primitive ID used to enable */
+ struct rapl_domain *domain;
+ const char *name;
+};
+
+static const char pl1_name[] = "long_term";
+static const char pl2_name[] = "short_term";
+
+struct rapl_domain {
+ const char *name;
+ enum rapl_domain_type id;
+ int msrs[RAPL_DOMAIN_MSR_MAX];
+ struct powercap_zone power_zone;
+ struct rapl_domain_data rdd;
+ struct rapl_power_limit rpl[NR_POWER_LIMITS];
+ u64 attr_map; /* track capabilities */
+ unsigned int state;
+ int package_id;
+};
+#define power_zone_to_rapl_domain(_zone) \
+ container_of(_zone, struct rapl_domain, power_zone)
+
+
+/* Each physical package contains multiple domains, these are the common
+ * data across RAPL domains within a package.
+ */
+struct rapl_package {
+ unsigned int id; /* physical package/socket id */
+ unsigned int nr_domains;
+ unsigned long domain_map; /* bit map of active domains */
+ unsigned int power_unit_divisor;
+ unsigned int energy_unit_divisor;
+ unsigned int time_unit_divisor;
+ struct rapl_domain *domains; /* array of domains, sized at runtime */
+ struct powercap_zone *power_zone; /* keep track of parent zone */
+ int nr_cpus; /* active cpus on the package, topology info is lost during
+ * cpu hotplug. so we have to track ourselves.
+ */
+ unsigned long power_limit_irq; /* keep track of package power limit
+ * notify interrupt enable status.
+ */
+ struct list_head plist;
+};
+#define PACKAGE_PLN_INT_SAVED BIT(0)
+#define MAX_PRIM_NAME (32)
+
+/* per domain data. used to describe individual knobs such that access function
+ * can be consolidated into one instead of many inline functions.
+ */
+struct rapl_primitive_info {
+ const char *name;
+ u64 mask;
+ int shift;
+ enum rapl_domain_msr_id id;
+ enum unit_type unit;
+ u32 flag;
+};
+
+#define PRIMITIVE_INFO_INIT(p, m, s, i, u, f) { \
+ .name = #p, \
+ .mask = m, \
+ .shift = s, \
+ .id = i, \
+ .unit = u, \
+ .flag = f \
+ }
+
+static void rapl_init_domains(struct rapl_package *rp);
+static int rapl_read_data_raw(struct rapl_domain *rd,
+ enum rapl_primitives prim,
+ bool xlate, u64 *data);
+static int rapl_write_data_raw(struct rapl_domain *rd,
+ enum rapl_primitives prim,
+ unsigned long long value);
+static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
+ int to_raw);
+static void package_power_limit_irq_save(int package_id);
+
+static LIST_HEAD(rapl_packages); /* guarded by CPU hotplug lock */
+
+static const char * const rapl_domain_names[] = {
+ "package",
+ "core",
+ "uncore",
+ "dram",
+};
+
+static struct powercap_control_type *control_type; /* PowerCap Controller */
+
+/* caller to ensure CPU hotplug lock is held */
+static struct rapl_package *find_package_by_id(int id)
+{
+ struct rapl_package *rp;
+
+ list_for_each_entry(rp, &rapl_packages, plist) {
+ if (rp->id == id)
+ return rp;
+ }
+
+ return NULL;
+}
+
+/* caller to ensure CPU hotplug lock is held */
+static int find_active_cpu_on_package(int package_id)
+{
+ int i;
+
+ for_each_online_cpu(i) {
+ if (topology_physical_package_id(i) == package_id)
+ return i;
+ }
+ /* all CPUs on this package are offline */
+
+ return -ENODEV;
+}
+
+/* caller must hold cpu hotplug lock */
+static void rapl_cleanup_data(void)
+{
+ struct rapl_package *p, *tmp;
+
+ list_for_each_entry_safe(p, tmp, &rapl_packages, plist) {
+ kfree(p->domains);
+ list_del(&p->plist);
+ kfree(p);
+ }
+}
+
+static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw)
+{
+ struct rapl_domain *rd;
+ u64 energy_now;
+
+ /* prevent CPU hotplug, make sure the RAPL domain does not go
+ * away while reading the counter.
+ */
+ get_online_cpus();
+ rd = power_zone_to_rapl_domain(power_zone);
+
+ if (!rapl_read_data_raw(rd, ENERGY_COUNTER, true, &energy_now)) {
+ *energy_raw = energy_now;
+ put_online_cpus();
+
+ return 0;
+ }
+ put_online_cpus();
+
+ return -EIO;
+}
+
+static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy)
+{
+ *energy = rapl_unit_xlate(0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0);
+ return 0;
+}
+
+static int release_zone(struct powercap_zone *power_zone)
+{
+ struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
+ struct rapl_package *rp;
+
+ /* package zone is the last zone of a package, we can free
+ * memory here since all children has been unregistered.
+ */
+ if (rd->id == RAPL_DOMAIN_PACKAGE) {
+ rp = find_package_by_id(rd->package_id);
+ if (!rp) {
+ dev_warn(&power_zone->dev, "no package id %s\n",
+ rd->name);
+ return -ENODEV;
+ }
+ kfree(rd);
+ rp->domains = NULL;
+ }
+
+ return 0;
+
+}
+
+static int find_nr_power_limit(struct rapl_domain *rd)
+{
+ int i;
+
+ for (i = 0; i < NR_POWER_LIMITS; i++) {
+ if (rd->rpl[i].name == NULL)
+ break;
+ }
+
+ return i;
+}
+
+static int set_domain_enable(struct powercap_zone *power_zone, bool mode)
+{
+ struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
+ int nr_powerlimit;
+
+ if (rd->state & DOMAIN_STATE_BIOS_LOCKED)
+ return -EACCES;
+ get_online_cpus();
+ nr_powerlimit = find_nr_power_limit(rd);
+ /* here we activate/deactivate the hardware for power limiting */
+ rapl_write_data_raw(rd, PL1_ENABLE, mode);
+ /* always enable clamp such that p-state can go below OS requested
+ * range. power capping priority over guranteed frequency.
+ */
+ rapl_write_data_raw(rd, PL1_CLAMP, mode);
+ /* some domains have pl2 */
+ if (nr_powerlimit > 1) {
+ rapl_write_data_raw(rd, PL2_ENABLE, mode);
+ rapl_write_data_raw(rd, PL2_CLAMP, mode);
+ }
+ put_online_cpus();
+
+ return 0;
+}
+
+static int get_domain_enable(struct powercap_zone *power_zone, bool *mode)
+{
+ struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
+ u64 val;
+
+ if (rd->state & DOMAIN_STATE_BIOS_LOCKED) {
+ *mode = false;
+ return 0;
+ }
+ get_online_cpus();
+ if (rapl_read_data_raw(rd, PL1_ENABLE, true, &val)) {
+ put_online_cpus();
+ return -EIO;
+ }
+ *mode = val;
+ put_online_cpus();
+
+ return 0;
+}
+
+/* per RAPL domain ops, in the order of rapl_domain_type */
+static struct powercap_zone_ops zone_ops[] = {
+ /* RAPL_DOMAIN_PACKAGE */
+ {
+ .get_energy_uj = get_energy_counter,
+ .get_max_energy_range_uj = get_max_energy_counter,
+ .release = release_zone,
+ .set_enable = set_domain_enable,
+ .get_enable = get_domain_enable,
+ },
+ /* RAPL_DOMAIN_PP0 */
+ {
+ .get_energy_uj = get_energy_counter,
+ .get_max_energy_range_uj = get_max_energy_counter,
+ .release = release_zone,
+ .set_enable = set_domain_enable,
+ .get_enable = get_domain_enable,
+ },
+ /* RAPL_DOMAIN_PP1 */
+ {
+ .get_energy_uj = get_energy_counter,
+ .get_max_energy_range_uj = get_max_energy_counter,
+ .release = release_zone,
+ .set_enable = set_domain_enable,
+ .get_enable = get_domain_enable,
+ },
+ /* RAPL_DOMAIN_DRAM */
+ {
+ .get_energy_uj = get_energy_counter,
+ .get_max_energy_range_uj = get_max_energy_counter,
+ .release = release_zone,
+ .set_enable = set_domain_enable,
+ .get_enable = get_domain_enable,
+ },
+};
+
+static int set_power_limit(struct powercap_zone *power_zone, int id,
+ u64 power_limit)
+{
+ struct rapl_domain *rd;
+ struct rapl_package *rp;
+ int ret = 0;
+
+ get_online_cpus();
+ rd = power_zone_to_rapl_domain(power_zone);
+ rp = find_package_by_id(rd->package_id);
+ if (!rp) {
+ ret = -ENODEV;
+ goto set_exit;
+ }
+
+ if (rd->state & DOMAIN_STATE_BIOS_LOCKED) {
+ dev_warn(&power_zone->dev, "%s locked by BIOS, monitoring only\n",
+ rd->name);
+ ret = -EACCES;
+ goto set_exit;
+ }
+
+ switch (rd->rpl[id].prim_id) {
+ case PL1_ENABLE:
+ rapl_write_data_raw(rd, POWER_LIMIT1, power_limit);
+ break;
+ case PL2_ENABLE:
+ rapl_write_data_raw(rd, POWER_LIMIT2, power_limit);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (!ret)
+ package_power_limit_irq_save(rd->package_id);
+set_exit:
+ put_online_cpus();
+ return ret;
+}
+
+static int get_current_power_limit(struct powercap_zone *power_zone, int id,
+ u64 *data)
+{
+ struct rapl_domain *rd;
+ u64 val;
+ int prim;
+ int ret = 0;
+
+ get_online_cpus();
+ rd = power_zone_to_rapl_domain(power_zone);
+ switch (rd->rpl[id].prim_id) {
+ case PL1_ENABLE:
+ prim = POWER_LIMIT1;
+ break;
+ case PL2_ENABLE:
+ prim = POWER_LIMIT2;
+ break;
+ default:
+ put_online_cpus();
+ return -EINVAL;
+ }
+ if (rapl_read_data_raw(rd, prim, true, &val))
+ ret = -EIO;
+ else
+ *data = val;
+
+ put_online_cpus();
+
+ return ret;
+}
+
+static int set_time_window(struct powercap_zone *power_zone, int id,
+ u64 window)
+{
+ struct rapl_domain *rd;
+ int ret = 0;
+
+ get_online_cpus();
+ rd = power_zone_to_rapl_domain(power_zone);
+ switch (rd->rpl[id].prim_id) {
+ case PL1_ENABLE:
+ rapl_write_data_raw(rd, TIME_WINDOW1, window);
+ break;
+ case PL2_ENABLE:
+ rapl_write_data_raw(rd, TIME_WINDOW2, window);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ put_online_cpus();
+ return ret;
+}
+
+static int get_time_window(struct powercap_zone *power_zone, int id, u64 *data)
+{
+ struct rapl_domain *rd;
+ u64 val;
+ int ret = 0;
+
+ get_online_cpus();
+ rd = power_zone_to_rapl_domain(power_zone);
+ switch (rd->rpl[id].prim_id) {
+ case PL1_ENABLE:
+ ret = rapl_read_data_raw(rd, TIME_WINDOW1, true, &val);
+ break;
+ case PL2_ENABLE:
+ ret = rapl_read_data_raw(rd, TIME_WINDOW2, true, &val);
+ break;
+ default:
+ put_online_cpus();
+ return -EINVAL;
+ }
+ if (!ret)
+ *data = val;
+ put_online_cpus();
+
+ return ret;
+}
+
+static const char *get_constraint_name(struct powercap_zone *power_zone, int id)
+{
+ struct rapl_power_limit *rpl;
+ struct rapl_domain *rd;
+
+ rd = power_zone_to_rapl_domain(power_zone);
+ rpl = (struct rapl_power_limit *) &rd->rpl[id];
+
+ return rpl->name;
+}
+
+
+static int get_max_power(struct powercap_zone *power_zone, int id,
+ u64 *data)
+{
+ struct rapl_domain *rd;
+ u64 val;
+ int prim;
+ int ret = 0;
+
+ get_online_cpus();
+ rd = power_zone_to_rapl_domain(power_zone);
+ switch (rd->rpl[id].prim_id) {
+ case PL1_ENABLE:
+ prim = THERMAL_SPEC_POWER;
+ break;
+ case PL2_ENABLE:
+ prim = MAX_POWER;
+ break;
+ default:
+ put_online_cpus();
+ return -EINVAL;
+ }
+ if (rapl_read_data_raw(rd, prim, true, &val))
+ ret = -EIO;
+ else
+ *data = val;
+
+ put_online_cpus();
+
+ return ret;
+}
+
+static struct powercap_zone_constraint_ops constraint_ops = {
+ .set_power_limit_uw = set_power_limit,
+ .get_power_limit_uw = get_current_power_limit,
+ .set_time_window_us = set_time_window,
+ .get_time_window_us = get_time_window,
+ .get_max_power_uw = get_max_power,
+ .get_name = get_constraint_name,
+};
+
+/* called after domain detection and package level data are set */
+static void rapl_init_domains(struct rapl_package *rp)
+{
+ int i;
+ struct rapl_domain *rd = rp->domains;
+
+ for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
+ unsigned int mask = rp->domain_map & (1 << i);
+ switch (mask) {
+ case BIT(RAPL_DOMAIN_PACKAGE):
+ rd->name = rapl_domain_names[RAPL_DOMAIN_PACKAGE];
+ rd->id = RAPL_DOMAIN_PACKAGE;
+ rd->msrs[0] = MSR_PKG_POWER_LIMIT;
+ rd->msrs[1] = MSR_PKG_ENERGY_STATUS;
+ rd->msrs[2] = MSR_PKG_PERF_STATUS;
+ rd->msrs[3] = 0;
+ rd->msrs[4] = MSR_PKG_POWER_INFO;
+ rd->rpl[0].prim_id = PL1_ENABLE;
+ rd->rpl[0].name = pl1_name;
+ rd->rpl[1].prim_id = PL2_ENABLE;
+ rd->rpl[1].name = pl2_name;
+ break;
+ case BIT(RAPL_DOMAIN_PP0):
+ rd->name = rapl_domain_names[RAPL_DOMAIN_PP0];
+ rd->id = RAPL_DOMAIN_PP0;
+ rd->msrs[0] = MSR_PP0_POWER_LIMIT;
+ rd->msrs[1] = MSR_PP0_ENERGY_STATUS;
+ rd->msrs[2] = 0;
+ rd->msrs[3] = MSR_PP0_POLICY;
+ rd->msrs[4] = 0;
+ rd->rpl[0].prim_id = PL1_ENABLE;
+ rd->rpl[0].name = pl1_name;
+ break;
+ case BIT(RAPL_DOMAIN_PP1):
+ rd->name = rapl_domain_names[RAPL_DOMAIN_PP1];
+ rd->id = RAPL_DOMAIN_PP1;
+ rd->msrs[0] = MSR_PP1_POWER_LIMIT;
+ rd->msrs[1] = MSR_PP1_ENERGY_STATUS;
+ rd->msrs[2] = 0;
+ rd->msrs[3] = MSR_PP1_POLICY;
+ rd->msrs[4] = 0;
+ rd->rpl[0].prim_id = PL1_ENABLE;
+ rd->rpl[0].name = pl1_name;
+ break;
+ case BIT(RAPL_DOMAIN_DRAM):
+ rd->name = rapl_domain_names[RAPL_DOMAIN_DRAM];
+ rd->id = RAPL_DOMAIN_DRAM;
+ rd->msrs[0] = MSR_DRAM_POWER_LIMIT;
+ rd->msrs[1] = MSR_DRAM_ENERGY_STATUS;
+ rd->msrs[2] = MSR_DRAM_PERF_STATUS;
+ rd->msrs[3] = 0;
+ rd->msrs[4] = MSR_DRAM_POWER_INFO;
+ rd->rpl[0].prim_id = PL1_ENABLE;
+ rd->rpl[0].name = pl1_name;
+ break;
+ }
+ if (mask) {
+ rd->package_id = rp->id;
+ rd++;
+ }
+ }
+}
+
+static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
+ int to_raw)
+{
+ u64 divisor = 1;
+ int scale = 1; /* scale to user friendly data without floating point */
+ u64 f, y; /* fraction and exp. used for time unit */
+ struct rapl_package *rp;
+
+ rp = find_package_by_id(package);
+ if (!rp)
+ return value;
+
+ switch (type) {
+ case POWER_UNIT:
+ divisor = rp->power_unit_divisor;
+ scale = POWER_UNIT_SCALE;
+ break;
+ case ENERGY_UNIT:
+ scale = ENERGY_UNIT_SCALE;
+ divisor = rp->energy_unit_divisor;
+ break;
+ case TIME_UNIT:
+ divisor = rp->time_unit_divisor;
+ scale = TIME_UNIT_SCALE;
+ /* special processing based on 2^Y*(1+F)/4 = val/divisor, refer
+ * to Intel Software Developer's manual Vol. 3a, CH 14.7.4.
+ */
+ if (!to_raw) {
+ f = (value & 0x60) >> 5;
+ y = value & 0x1f;
+ value = (1 << y) * (4 + f) * scale / 4;
+ return div64_u64(value, divisor);
+ } else {
+ do_div(value, scale);
+ value *= divisor;
+ y = ilog2(value);
+ f = div64_u64(4 * (value - (1 << y)), 1 << y);
+ value = (y & 0x1f) | ((f & 0x3) << 5);
+ return value;
+ }
+ break;
+ case ARBITRARY_UNIT:
+ default:
+ return value;
+ };
+
+ if (to_raw)
+ return div64_u64(value * divisor, scale);
+ else
+ return div64_u64(value * scale, divisor);
+}
+
+/* in the order of enum rapl_primitives */
+static struct rapl_primitive_info rpi[] = {
+ /* name, mask, shift, msr index, unit divisor */
+ PRIMITIVE_INFO_INIT(ENERGY_COUNTER, ENERGY_STATUS_MASK, 0,
+ RAPL_DOMAIN_MSR_STATUS, ENERGY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(POWER_LIMIT1, POWER_LIMIT1_MASK, 0,
+ RAPL_DOMAIN_MSR_LIMIT, POWER_UNIT, 0),
+ PRIMITIVE_INFO_INIT(POWER_LIMIT2, POWER_LIMIT2_MASK, 32,
+ RAPL_DOMAIN_MSR_LIMIT, POWER_UNIT, 0),
+ PRIMITIVE_INFO_INIT(FW_LOCK, POWER_PP_LOCK, 31,
+ RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PL1_ENABLE, POWER_LIMIT1_ENABLE, 15,
+ RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PL1_CLAMP, POWER_LIMIT1_CLAMP, 16,
+ RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PL2_ENABLE, POWER_LIMIT2_ENABLE, 47,
+ RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48,
+ RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17,
+ RAPL_DOMAIN_MSR_LIMIT, TIME_UNIT, 0),
+ PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49,
+ RAPL_DOMAIN_MSR_LIMIT, TIME_UNIT, 0),
+ PRIMITIVE_INFO_INIT(THERMAL_SPEC_POWER, POWER_INFO_THERMAL_SPEC_MASK,
+ 0, RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0),
+ PRIMITIVE_INFO_INIT(MAX_POWER, POWER_INFO_MAX_MASK, 32,
+ RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0),
+ PRIMITIVE_INFO_INIT(MIN_POWER, POWER_INFO_MIN_MASK, 16,
+ RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0),
+ PRIMITIVE_INFO_INIT(MAX_TIME_WINDOW, POWER_INFO_MAX_TIME_WIN_MASK, 48,
+ RAPL_DOMAIN_MSR_INFO, TIME_UNIT, 0),
+ PRIMITIVE_INFO_INIT(THROTTLED_TIME, PERF_STATUS_THROTTLE_TIME_MASK, 0,
+ RAPL_DOMAIN_MSR_PERF, TIME_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PRIORITY_LEVEL, PP_POLICY_MASK, 0,
+ RAPL_DOMAIN_MSR_POLICY, ARBITRARY_UNIT, 0),
+ /* non-hardware */
+ PRIMITIVE_INFO_INIT(AVERAGE_POWER, 0, 0, 0, POWER_UNIT,
+ RAPL_PRIMITIVE_DERIVED),
+ {NULL, 0, 0, 0},
+};
+
+/* Read primitive data based on its related struct rapl_primitive_info.
+ * if xlate flag is set, return translated data based on data units, i.e.
+ * time, energy, and power.
+ * RAPL MSRs are non-architectual and are laid out not consistently across
+ * domains. Here we use primitive info to allow writing consolidated access
+ * functions.
+ * For a given primitive, it is processed by MSR mask and shift. Unit conversion
+ * is pre-assigned based on RAPL unit MSRs read at init time.
+ * 63-------------------------- 31--------------------------- 0
+ * | xxxxx (mask) |
+ * | |<- shift ----------------|
+ * 63-------------------------- 31--------------------------- 0
+ */
+static int rapl_read_data_raw(struct rapl_domain *rd,
+ enum rapl_primitives prim,
+ bool xlate, u64 *data)
+{
+ u64 value, final;
+ u32 msr;
+ struct rapl_primitive_info *rp = &rpi[prim];
+ int cpu;
+
+ if (!rp->name || rp->flag & RAPL_PRIMITIVE_DUMMY)
+ return -EINVAL;
+
+ msr = rd->msrs[rp->id];
+ if (!msr)
+ return -EINVAL;
+ /* use physical package id to look up active cpus */
+ cpu = find_active_cpu_on_package(rd->package_id);
+ if (cpu < 0)
+ return cpu;
+
+ /* special-case package domain, which uses a different bit*/
+ if (prim == FW_LOCK && rd->id == RAPL_DOMAIN_PACKAGE) {
+ rp->mask = POWER_PACKAGE_LOCK;
+ rp->shift = 63;
+ }
+ /* non-hardware data are collected by the polling thread */
+ if (rp->flag & RAPL_PRIMITIVE_DERIVED) {
+ *data = rd->rdd.primitives[prim];
+ return 0;
+ }
+
+ if (rdmsrl_safe_on_cpu(cpu, msr, &value)) {
+ pr_debug("failed to read msr 0x%x on cpu %d\n", msr, cpu);
+ return -EIO;
+ }
+
+ final = value & rp->mask;
+ final = final >> rp->shift;
+ if (xlate)
+ *data = rapl_unit_xlate(rd->package_id, rp->unit, final, 0);
+ else
+ *data = final;
+
+ return 0;
+}
+
+/* Similar use of primitive info in the read counterpart */
+static int rapl_write_data_raw(struct rapl_domain *rd,
+ enum rapl_primitives prim,
+ unsigned long long value)
+{
+ u64 msr_val;
+ u32 msr;
+ struct rapl_primitive_info *rp = &rpi[prim];
+ int cpu;
+
+ cpu = find_active_cpu_on_package(rd->package_id);
+ if (cpu < 0)
+ return cpu;
+ msr = rd->msrs[rp->id];
+ if (rdmsrl_safe_on_cpu(cpu, msr, &msr_val)) {
+ dev_dbg(&rd->power_zone.dev,
+ "failed to read msr 0x%x on cpu %d\n", msr, cpu);
+ return -EIO;
+ }
+ value = rapl_unit_xlate(rd->package_id, rp->unit, value, 1);
+ msr_val &= ~rp->mask;
+ msr_val |= value << rp->shift;
+ if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) {
+ dev_dbg(&rd->power_zone.dev,
+ "failed to write msr 0x%x on cpu %d\n", msr, cpu);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int rapl_check_unit(struct rapl_package *rp, int cpu)
+{
+ u64 msr_val;
+ u32 value;
+
+ if (rdmsrl_safe_on_cpu(cpu, MSR_RAPL_POWER_UNIT, &msr_val)) {
+ pr_err("Failed to read power unit MSR 0x%x on CPU %d, exit.\n",
+ MSR_RAPL_POWER_UNIT, cpu);
+ return -ENODEV;
+ }
+
+ /* Raw RAPL data stored in MSRs are in certain scales. We need to
+ * convert them into standard units based on the divisors reported in
+ * the RAPL unit MSRs.
+ * i.e.
+ * energy unit: 1/enery_unit_divisor Joules
+ * power unit: 1/power_unit_divisor Watts
+ * time unit: 1/time_unit_divisor Seconds
+ */
+ value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
+ rp->energy_unit_divisor = 1 << value;
+
+
+ value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
+ rp->power_unit_divisor = 1 << value;
+
+ value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
+ rp->time_unit_divisor = 1 << value;
+
+ pr_debug("Physical package %d units: energy=%d, time=%d, power=%d\n",
+ rp->id,
+ rp->energy_unit_divisor,
+ rp->time_unit_divisor,
+ rp->power_unit_divisor);
+
+ return 0;
+}
+
+/* REVISIT:
+ * When package power limit is set artificially low by RAPL, LVT
+ * thermal interrupt for package power limit should be ignored
+ * since we are not really exceeding the real limit. The intention
+ * is to avoid excessive interrupts while we are trying to save power.
+ * A useful feature might be routing the package_power_limit interrupt
+ * to userspace via eventfd. once we have a usecase, this is simple
+ * to do by adding an atomic notifier.
+ */
+
+static void package_power_limit_irq_save(int package_id)
+{
+ u32 l, h = 0;
+ int cpu;
+ struct rapl_package *rp;
+
+ rp = find_package_by_id(package_id);
+ if (!rp)
+ return;
+
+ if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
+ return;
+
+ cpu = find_active_cpu_on_package(package_id);
+ if (cpu < 0)
+ return;
+ /* save the state of PLN irq mask bit before disabling it */
+ rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
+ if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) {
+ rp->power_limit_irq = l & PACKAGE_THERM_INT_PLN_ENABLE;
+ rp->power_limit_irq |= PACKAGE_PLN_INT_SAVED;
+ }
+ l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
+ wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+}
+
+/* restore per package power limit interrupt enable state */
+static void package_power_limit_irq_restore(int package_id)
+{
+ u32 l, h;
+ int cpu;
+ struct rapl_package *rp;
+
+ rp = find_package_by_id(package_id);
+ if (!rp)
+ return;
+
+ if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
+ return;
+
+ cpu = find_active_cpu_on_package(package_id);
+ if (cpu < 0)
+ return;
+
+ /* irq enable state not saved, nothing to restore */
+ if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED))
+ return;
+ rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
+
+ if (rp->power_limit_irq & PACKAGE_THERM_INT_PLN_ENABLE)
+ l |= PACKAGE_THERM_INT_PLN_ENABLE;
+ else
+ l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
+
+ wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+}
+
+static const struct x86_cpu_id rapl_ids[] = {
+ { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */
+ { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */
+ { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */
+ { X86_VENDOR_INTEL, 6, 0x45},/* HSW */
+ /* TODO: Add more CPU IDs after testing */
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
+
+/* read once for all raw primitive data for all packages, domains */
+static void rapl_update_domain_data(void)
+{
+ int dmn, prim;
+ u64 val;
+ struct rapl_package *rp;
+
+ list_for_each_entry(rp, &rapl_packages, plist) {
+ for (dmn = 0; dmn < rp->nr_domains; dmn++) {
+ pr_debug("update package %d domain %s data\n", rp->id,
+ rp->domains[dmn].name);
+ /* exclude non-raw primitives */
+ for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++)
+ if (!rapl_read_data_raw(&rp->domains[dmn], prim,
+ rpi[prim].unit,
+ &val))
+ rp->domains[dmn].rdd.primitives[prim] =
+ val;
+ }
+ }
+
+}
+
+static int rapl_unregister_powercap(void)
+{
+ struct rapl_package *rp;
+ struct rapl_domain *rd, *rd_package = NULL;
+
+ /* unregister all active rapl packages from the powercap layer,
+ * hotplug lock held
+ */
+ list_for_each_entry(rp, &rapl_packages, plist) {
+ package_power_limit_irq_restore(rp->id);
+
+ for (rd = rp->domains; rd < rp->domains + rp->nr_domains;
+ rd++) {
+ pr_debug("remove package, undo power limit on %d: %s\n",
+ rp->id, rd->name);
+ rapl_write_data_raw(rd, PL1_ENABLE, 0);
+ rapl_write_data_raw(rd, PL2_ENABLE, 0);
+ rapl_write_data_raw(rd, PL1_CLAMP, 0);
+ rapl_write_data_raw(rd, PL2_CLAMP, 0);
+ if (rd->id == RAPL_DOMAIN_PACKAGE) {
+ rd_package = rd;
+ continue;
+ }
+ powercap_unregister_zone(control_type, &rd->power_zone);
+ }
+ /* do the package zone last */
+ if (rd_package)
+ powercap_unregister_zone(control_type,
+ &rd_package->power_zone);
+ }
+ powercap_unregister_control_type(control_type);
+
+ return 0;
+}
+
+static int rapl_package_register_powercap(struct rapl_package *rp)
+{
+ struct rapl_domain *rd;
+ int ret = 0;
+ char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/
+ struct powercap_zone *power_zone = NULL;
+ int nr_pl;
+
+ /* first we register package domain as the parent zone*/
+ for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+ if (rd->id == RAPL_DOMAIN_PACKAGE) {
+ nr_pl = find_nr_power_limit(rd);
+ pr_debug("register socket %d package domain %s\n",
+ rp->id, rd->name);
+ memset(dev_name, 0, sizeof(dev_name));
+ snprintf(dev_name, sizeof(dev_name), "%s-%d",
+ rd->name, rp->id);
+ power_zone = powercap_register_zone(&rd->power_zone,
+ control_type,
+ dev_name, NULL,
+ &zone_ops[rd->id],
+ nr_pl,
+ &constraint_ops);
+ if (IS_ERR(power_zone)) {
+ pr_debug("failed to register package, %d\n",
+ rp->id);
+ ret = PTR_ERR(power_zone);
+ goto exit_package;
+ }
+ /* track parent zone in per package/socket data */
+ rp->power_zone = power_zone;
+ /* done, only one package domain per socket */
+ break;
+ }
+ }
+ if (!power_zone) {
+ pr_err("no package domain found, unknown topology!\n");
+ ret = -ENODEV;
+ goto exit_package;
+ }
+ /* now register domains as children of the socket/package*/
+ for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+ if (rd->id == RAPL_DOMAIN_PACKAGE)
+ continue;
+ /* number of power limits per domain varies */
+ nr_pl = find_nr_power_limit(rd);
+ power_zone = powercap_register_zone(&rd->power_zone,
+ control_type, rd->name,
+ rp->power_zone,
+ &zone_ops[rd->id], nr_pl,
+ &constraint_ops);
+
+ if (IS_ERR(power_zone)) {
+ pr_debug("failed to register power_zone, %d:%s:%s\n",
+ rp->id, rd->name, dev_name);
+ ret = PTR_ERR(power_zone);
+ goto err_cleanup;
+ }
+ }
+
+exit_package:
+ return ret;
+err_cleanup:
+ /* clean up previously initialized domains within the package if we
+ * failed after the first domain setup.
+ */
+ while (--rd >= rp->domains) {
+ pr_debug("unregister package %d domain %s\n", rp->id, rd->name);
+ powercap_unregister_zone(control_type, &rd->power_zone);
+ }
+
+ return ret;
+}
+
+static int rapl_register_powercap(void)
+{
+ struct rapl_domain *rd;
+ struct rapl_package *rp;
+ int ret = 0;
+
+ control_type = powercap_register_control_type(NULL, "intel-rapl", NULL);
+ if (IS_ERR(control_type)) {
+ pr_debug("failed to register powercap control_type.\n");
+ return PTR_ERR(control_type);
+ }
+ /* read the initial data */
+ rapl_update_domain_data();
+ list_for_each_entry(rp, &rapl_packages, plist)
+ if (rapl_package_register_powercap(rp))
+ goto err_cleanup_package;
+ return ret;
+
+err_cleanup_package:
+ /* clean up previously initialized packages */
+ list_for_each_entry_continue_reverse(rp, &rapl_packages, plist) {
+ for (rd = rp->domains; rd < rp->domains + rp->nr_domains;
+ rd++) {
+ pr_debug("unregister zone/package %d, %s domain\n",
+ rp->id, rd->name);
+ powercap_unregister_zone(control_type, &rd->power_zone);
+ }
+ }
+
+ return ret;
+}
+
+static int rapl_check_domain(int cpu, int domain)
+{
+ unsigned msr;
+ u64 val1, val2 = 0;
+ int retry = 0;
+
+ switch (domain) {
+ case RAPL_DOMAIN_PACKAGE:
+ msr = MSR_PKG_ENERGY_STATUS;
+ break;
+ case RAPL_DOMAIN_PP0:
+ msr = MSR_PP0_ENERGY_STATUS;
+ break;
+ case RAPL_DOMAIN_PP1:
+ msr = MSR_PP1_ENERGY_STATUS;
+ break;
+ case RAPL_DOMAIN_DRAM:
+ msr = MSR_DRAM_ENERGY_STATUS;
+ break;
+ default:
+ pr_err("invalid domain id %d\n", domain);
+ return -EINVAL;
+ }
+ if (rdmsrl_safe_on_cpu(cpu, msr, &val1))
+ return -ENODEV;
+
+ /* energy counters roll slowly on some domains */
+ while (++retry < 10) {
+ usleep_range(10000, 15000);
+ rdmsrl_safe_on_cpu(cpu, msr, &val2);
+ if ((val1 & ENERGY_STATUS_MASK) != (val2 & ENERGY_STATUS_MASK))
+ return 0;
+ }
+ /* if energy counter does not change, report as bad domain */
+ pr_info("domain %s energy ctr %llu:%llu not working, skip\n",
+ rapl_domain_names[domain], val1, val2);
+
+ return -ENODEV;
+}
+
+/* Detect active and valid domains for the given CPU, caller must
+ * ensure the CPU belongs to the targeted package and CPU hotlug is disabled.
+ */
+static int rapl_detect_domains(struct rapl_package *rp, int cpu)
+{
+ int i;
+ int ret = 0;
+ struct rapl_domain *rd;
+ u64 locked;
+
+ for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
+ /* use physical package id to read counters */
+ if (!rapl_check_domain(cpu, i))
+ rp->domain_map |= 1 << i;
+ }
+ rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX);
+ if (!rp->nr_domains) {
+ pr_err("no valid rapl domains found in package %d\n", rp->id);
+ ret = -ENODEV;
+ goto done;
+ }
+ pr_debug("found %d domains on package %d\n", rp->nr_domains, rp->id);
+
+ rp->domains = kcalloc(rp->nr_domains + 1, sizeof(struct rapl_domain),
+ GFP_KERNEL);
+ if (!rp->domains) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ rapl_init_domains(rp);
+
+ for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+ /* check if the domain is locked by BIOS */
+ if (rapl_read_data_raw(rd, FW_LOCK, false, &locked)) {
+ pr_info("RAPL package %d domain %s locked by BIOS\n",
+ rp->id, rd->name);
+ rd->state |= DOMAIN_STATE_BIOS_LOCKED;
+ }
+ }
+
+
+done:
+ return ret;
+}
+
+static bool is_package_new(int package)
+{
+ struct rapl_package *rp;
+
+ /* caller prevents cpu hotplug, there will be no new packages added
+ * or deleted while traversing the package list, no need for locking.
+ */
+ list_for_each_entry(rp, &rapl_packages, plist)
+ if (package == rp->id)
+ return false;
+
+ return true;
+}
+
+/* RAPL interface can be made of a two-level hierarchy: package level and domain
+ * level. We first detect the number of packages then domains of each package.
+ * We have to consider the possiblity of CPU online/offline due to hotplug and
+ * other scenarios.
+ */
+static int rapl_detect_topology(void)
+{
+ int i;
+ int phy_package_id;
+ struct rapl_package *new_package, *rp;
+
+ for_each_online_cpu(i) {
+ phy_package_id = topology_physical_package_id(i);
+ if (is_package_new(phy_package_id)) {
+ new_package = kzalloc(sizeof(*rp), GFP_KERNEL);
+ if (!new_package) {
+ rapl_cleanup_data();
+ return -ENOMEM;
+ }
+ /* add the new package to the list */
+ new_package->id = phy_package_id;
+ new_package->nr_cpus = 1;
+
+ /* check if the package contains valid domains */
+ if (rapl_detect_domains(new_package, i) ||
+ rapl_check_unit(new_package, i)) {
+ kfree(new_package->domains);
+ kfree(new_package);
+ /* free up the packages already initialized */
+ rapl_cleanup_data();
+ return -ENODEV;
+ }
+ INIT_LIST_HEAD(&new_package->plist);
+ list_add(&new_package->plist, &rapl_packages);
+ } else {
+ rp = find_package_by_id(phy_package_id);
+ if (rp)
+ ++rp->nr_cpus;
+ }
+ }
+
+ return 0;
+}
+
+/* called from CPU hotplug notifier, hotplug lock held */
+static void rapl_remove_package(struct rapl_package *rp)
+{
+ struct rapl_domain *rd, *rd_package = NULL;
+
+ for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+ if (rd->id == RAPL_DOMAIN_PACKAGE) {
+ rd_package = rd;
+ continue;
+ }
+ pr_debug("remove package %d, %s domain\n", rp->id, rd->name);
+ powercap_unregister_zone(control_type, &rd->power_zone);
+ }
+ /* do parent zone last */
+ powercap_unregister_zone(control_type, &rd_package->power_zone);
+ list_del(&rp->plist);
+ kfree(rp);
+}
+
+/* called from CPU hotplug notifier, hotplug lock held */
+static int rapl_add_package(int cpu)
+{
+ int ret = 0;
+ int phy_package_id;
+ struct rapl_package *rp;
+
+ phy_package_id = topology_physical_package_id(cpu);
+ rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
+
+ /* add the new package to the list */
+ rp->id = phy_package_id;
+ rp->nr_cpus = 1;
+ /* check if the package contains valid domains */
+ if (rapl_detect_domains(rp, cpu) ||
+ rapl_check_unit(rp, cpu)) {
+ ret = -ENODEV;
+ goto err_free_package;
+ }
+ if (!rapl_package_register_powercap(rp)) {
+ INIT_LIST_HEAD(&rp->plist);
+ list_add(&rp->plist, &rapl_packages);
+ return ret;
+ }
+
+err_free_package:
+ kfree(rp->domains);
+ kfree(rp);
+
+ return ret;
+}
+
+/* Handles CPU hotplug on multi-socket systems.
+ * If a CPU goes online as the first CPU of the physical package
+ * we add the RAPL package to the system. Similarly, when the last
+ * CPU of the package is removed, we remove the RAPL package and its
+ * associated domains. Cooling devices are handled accordingly at
+ * per-domain level.
+ */
+static int rapl_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned long cpu = (unsigned long)hcpu;
+ int phy_package_id;
+ struct rapl_package *rp;
+
+ phy_package_id = topology_physical_package_id(cpu);
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+ rp = find_package_by_id(phy_package_id);
+ if (rp)
+ ++rp->nr_cpus;
+ else
+ rapl_add_package(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ rp = find_package_by_id(phy_package_id);
+ if (!rp)
+ break;
+ if (--rp->nr_cpus == 0)
+ rapl_remove_package(rp);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block rapl_cpu_notifier = {
+ .notifier_call = rapl_cpu_callback,
+};
+
+static int __init rapl_init(void)
+{
+ int ret = 0;
+
+ if (!x86_match_cpu(rapl_ids)) {
+ pr_err("driver does not support CPU family %d model %d\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+
+ return -ENODEV;
+ }
+ /* prevent CPU hotplug during detection */
+ get_online_cpus();
+ ret = rapl_detect_topology();
+ if (ret)
+ goto done;
+
+ if (rapl_register_powercap()) {
+ rapl_cleanup_data();
+ ret = -ENODEV;
+ goto done;
+ }
+ register_hotcpu_notifier(&rapl_cpu_notifier);
+done:
+ put_online_cpus();
+
+ return ret;
+}
+
+static void __exit rapl_exit(void)
+{
+ get_online_cpus();
+ unregister_hotcpu_notifier(&rapl_cpu_notifier);
+ rapl_unregister_powercap();
+ rapl_cleanup_data();
+ put_online_cpus();
+}
+
+module_init(rapl_init);
+module_exit(rapl_exit);
+
+MODULE_DESCRIPTION("Driver for Intel RAPL (Running Average Power Limit)");
+MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
new file mode 100644
index 000000000000..21814f90a44b
--- /dev/null
+++ b/drivers/powercap/powercap_sys.c
@@ -0,0 +1,685 @@
+/*
+ * Power capping class
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/powercap.h>
+
+#define to_powercap_zone(n) container_of(n, struct powercap_zone, dev)
+#define to_powercap_control_type(n) \
+ container_of(n, struct powercap_control_type, dev)
+
+/* Power zone show function */
+#define define_power_zone_show(_attr) \
+static ssize_t _attr##_show(struct device *dev, \
+ struct device_attribute *dev_attr,\
+ char *buf) \
+{ \
+ u64 value; \
+ ssize_t len = -EINVAL; \
+ struct powercap_zone *power_zone = to_powercap_zone(dev); \
+ \
+ if (power_zone->ops->get_##_attr) { \
+ if (!power_zone->ops->get_##_attr(power_zone, &value)) \
+ len = sprintf(buf, "%lld\n", value); \
+ } \
+ \
+ return len; \
+}
+
+/* The only meaningful input is 0 (reset), others are silently ignored */
+#define define_power_zone_store(_attr) \
+static ssize_t _attr##_store(struct device *dev,\
+ struct device_attribute *dev_attr, \
+ const char *buf, size_t count) \
+{ \
+ int err; \
+ struct powercap_zone *power_zone = to_powercap_zone(dev); \
+ u64 value; \
+ \
+ err = kstrtoull(buf, 10, &value); \
+ if (err) \
+ return -EINVAL; \
+ if (value) \
+ return count; \
+ if (power_zone->ops->reset_##_attr) { \
+ if (!power_zone->ops->reset_##_attr(power_zone)) \
+ return count; \
+ } \
+ \
+ return -EINVAL; \
+}
+
+/* Power zone constraint show function */
+#define define_power_zone_constraint_show(_attr) \
+static ssize_t show_constraint_##_attr(struct device *dev, \
+ struct device_attribute *dev_attr,\
+ char *buf) \
+{ \
+ u64 value; \
+ ssize_t len = -ENODATA; \
+ struct powercap_zone *power_zone = to_powercap_zone(dev); \
+ int id; \
+ struct powercap_zone_constraint *pconst;\
+ \
+ if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \
+ return -EINVAL; \
+ if (id >= power_zone->const_id_cnt) \
+ return -EINVAL; \
+ pconst = &power_zone->constraints[id]; \
+ if (pconst && pconst->ops && pconst->ops->get_##_attr) { \
+ if (!pconst->ops->get_##_attr(power_zone, id, &value)) \
+ len = sprintf(buf, "%lld\n", value); \
+ } \
+ \
+ return len; \
+}
+
+/* Power zone constraint store function */
+#define define_power_zone_constraint_store(_attr) \
+static ssize_t store_constraint_##_attr(struct device *dev,\
+ struct device_attribute *dev_attr, \
+ const char *buf, size_t count) \
+{ \
+ int err; \
+ u64 value; \
+ struct powercap_zone *power_zone = to_powercap_zone(dev); \
+ int id; \
+ struct powercap_zone_constraint *pconst;\
+ \
+ if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \
+ return -EINVAL; \
+ if (id >= power_zone->const_id_cnt) \
+ return -EINVAL; \
+ pconst = &power_zone->constraints[id]; \
+ err = kstrtoull(buf, 10, &value); \
+ if (err) \
+ return -EINVAL; \
+ if (pconst && pconst->ops && pconst->ops->set_##_attr) { \
+ if (!pconst->ops->set_##_attr(power_zone, id, value)) \
+ return count; \
+ } \
+ \
+ return -ENODATA; \
+}
+
+/* Power zone information callbacks */
+define_power_zone_show(power_uw);
+define_power_zone_show(max_power_range_uw);
+define_power_zone_show(energy_uj);
+define_power_zone_store(energy_uj);
+define_power_zone_show(max_energy_range_uj);
+
+/* Power zone attributes */
+static DEVICE_ATTR_RO(max_power_range_uw);
+static DEVICE_ATTR_RO(power_uw);
+static DEVICE_ATTR_RO(max_energy_range_uj);
+static DEVICE_ATTR_RW(energy_uj);
+
+/* Power zone constraint attributes callbacks */
+define_power_zone_constraint_show(power_limit_uw);
+define_power_zone_constraint_store(power_limit_uw);
+define_power_zone_constraint_show(time_window_us);
+define_power_zone_constraint_store(time_window_us);
+define_power_zone_constraint_show(max_power_uw);
+define_power_zone_constraint_show(min_power_uw);
+define_power_zone_constraint_show(max_time_window_us);
+define_power_zone_constraint_show(min_time_window_us);
+
+/* For one time seeding of constraint device attributes */
+struct powercap_constraint_attr {
+ struct device_attribute power_limit_attr;
+ struct device_attribute time_window_attr;
+ struct device_attribute max_power_attr;
+ struct device_attribute min_power_attr;
+ struct device_attribute max_time_window_attr;
+ struct device_attribute min_time_window_attr;
+ struct device_attribute name_attr;
+};
+
+static struct powercap_constraint_attr
+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
+
+/* A list of powercap control_types */
+static LIST_HEAD(powercap_cntrl_list);
+/* Mutex to protect list of powercap control_types */
+static DEFINE_MUTEX(powercap_cntrl_list_lock);
+
+#define POWERCAP_CONSTRAINT_NAME_LEN 30 /* Some limit to avoid overflow */
+static ssize_t show_constraint_name(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ const char *name;
+ struct powercap_zone *power_zone = to_powercap_zone(dev);
+ int id;
+ ssize_t len = -ENODATA;
+ struct powercap_zone_constraint *pconst;
+
+ if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id))
+ return -EINVAL;
+ if (id >= power_zone->const_id_cnt)
+ return -EINVAL;
+ pconst = &power_zone->constraints[id];
+
+ if (pconst && pconst->ops && pconst->ops->get_name) {
+ name = pconst->ops->get_name(power_zone, id);
+ if (name) {
+ snprintf(buf, POWERCAP_CONSTRAINT_NAME_LEN,
+ "%s\n", name);
+ buf[POWERCAP_CONSTRAINT_NAME_LEN] = '\0';
+ len = strlen(buf);
+ }
+ }
+
+ return len;
+}
+
+static int create_constraint_attribute(int id, const char *name,
+ int mode,
+ struct device_attribute *dev_attr,
+ ssize_t (*show)(struct device *,
+ struct device_attribute *, char *),
+ ssize_t (*store)(struct device *,
+ struct device_attribute *,
+ const char *, size_t)
+ )
+{
+
+ dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
+ id, name);
+ if (!dev_attr->attr.name)
+ return -ENOMEM;
+ dev_attr->attr.mode = mode;
+ dev_attr->show = show;
+ dev_attr->store = store;
+
+ return 0;
+}
+
+static void free_constraint_attributes(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
+ kfree(constraint_attrs[i].power_limit_attr.attr.name);
+ kfree(constraint_attrs[i].time_window_attr.attr.name);
+ kfree(constraint_attrs[i].name_attr.attr.name);
+ kfree(constraint_attrs[i].max_power_attr.attr.name);
+ kfree(constraint_attrs[i].min_power_attr.attr.name);
+ kfree(constraint_attrs[i].max_time_window_attr.attr.name);
+ kfree(constraint_attrs[i].min_time_window_attr.attr.name);
+ }
+}
+
+static int seed_constraint_attributes(void)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
+ ret = create_constraint_attribute(i, "power_limit_uw",
+ S_IWUSR | S_IRUGO,
+ &constraint_attrs[i].power_limit_attr,
+ show_constraint_power_limit_uw,
+ store_constraint_power_limit_uw);
+ if (ret)
+ goto err_alloc;
+ ret = create_constraint_attribute(i, "time_window_us",
+ S_IWUSR | S_IRUGO,
+ &constraint_attrs[i].time_window_attr,
+ show_constraint_time_window_us,
+ store_constraint_time_window_us);
+ if (ret)
+ goto err_alloc;
+ ret = create_constraint_attribute(i, "name", S_IRUGO,
+ &constraint_attrs[i].name_attr,
+ show_constraint_name,
+ NULL);
+ if (ret)
+ goto err_alloc;
+ ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
+ &constraint_attrs[i].max_power_attr,
+ show_constraint_max_power_uw,
+ NULL);
+ if (ret)
+ goto err_alloc;
+ ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
+ &constraint_attrs[i].min_power_attr,
+ show_constraint_min_power_uw,
+ NULL);
+ if (ret)
+ goto err_alloc;
+ ret = create_constraint_attribute(i, "max_time_window_us",
+ S_IRUGO,
+ &constraint_attrs[i].max_time_window_attr,
+ show_constraint_max_time_window_us,
+ NULL);
+ if (ret)
+ goto err_alloc;
+ ret = create_constraint_attribute(i, "min_time_window_us",
+ S_IRUGO,
+ &constraint_attrs[i].min_time_window_attr,
+ show_constraint_min_time_window_us,
+ NULL);
+ if (ret)
+ goto err_alloc;
+
+ }
+
+ return 0;
+
+err_alloc:
+ free_constraint_attributes();
+
+ return ret;
+}
+
+static int create_constraints(struct powercap_zone *power_zone,
+ int nr_constraints,
+ struct powercap_zone_constraint_ops *const_ops)
+{
+ int i;
+ int ret = 0;
+ int count;
+ struct powercap_zone_constraint *pconst;
+
+ if (!power_zone || !const_ops || !const_ops->get_power_limit_uw ||
+ !const_ops->set_power_limit_uw ||
+ !const_ops->get_time_window_us ||
+ !const_ops->set_time_window_us)
+ return -EINVAL;
+
+ count = power_zone->zone_attr_count;
+ for (i = 0; i < nr_constraints; ++i) {
+ pconst = &power_zone->constraints[i];
+ pconst->ops = const_ops;
+ pconst->id = power_zone->const_id_cnt;
+ power_zone->const_id_cnt++;
+ power_zone->zone_dev_attrs[count++] =
+ &constraint_attrs[i].power_limit_attr.attr;
+ power_zone->zone_dev_attrs[count++] =
+ &constraint_attrs[i].time_window_attr.attr;
+ if (pconst->ops->get_name)
+ power_zone->zone_dev_attrs[count++] =
+ &constraint_attrs[i].name_attr.attr;
+ if (pconst->ops->get_max_power_uw)
+ power_zone->zone_dev_attrs[count++] =
+ &constraint_attrs[i].max_power_attr.attr;
+ if (pconst->ops->get_min_power_uw)
+ power_zone->zone_dev_attrs[count++] =
+ &constraint_attrs[i].min_power_attr.attr;
+ if (pconst->ops->get_max_time_window_us)
+ power_zone->zone_dev_attrs[count++] =
+ &constraint_attrs[i].max_time_window_attr.attr;
+ if (pconst->ops->get_min_time_window_us)
+ power_zone->zone_dev_attrs[count++] =
+ &constraint_attrs[i].min_time_window_attr.attr;
+ }
+ power_zone->zone_attr_count = count;
+
+ return ret;
+}
+
+static bool control_type_valid(void *control_type)
+{
+ struct powercap_control_type *pos = NULL;
+ bool found = false;
+
+ mutex_lock(&powercap_cntrl_list_lock);
+
+ list_for_each_entry(pos, &powercap_cntrl_list, node) {
+ if (pos == control_type) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&powercap_cntrl_list_lock);
+
+ return found;
+}
+
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct powercap_zone *power_zone = to_powercap_zone(dev);
+
+ return sprintf(buf, "%s\n", power_zone->name);
+}
+
+static DEVICE_ATTR_RO(name);
+
+/* Create zone and attributes in sysfs */
+static void create_power_zone_common_attributes(
+ struct powercap_zone *power_zone)
+{
+ int count = 0;
+
+ power_zone->zone_dev_attrs[count++] = &dev_attr_name.attr;
+ if (power_zone->ops->get_max_energy_range_uj)
+ power_zone->zone_dev_attrs[count++] =
+ &dev_attr_max_energy_range_uj.attr;
+ if (power_zone->ops->get_energy_uj)
+ power_zone->zone_dev_attrs[count++] =
+ &dev_attr_energy_uj.attr;
+ if (power_zone->ops->get_power_uw)
+ power_zone->zone_dev_attrs[count++] =
+ &dev_attr_power_uw.attr;
+ if (power_zone->ops->get_max_power_range_uw)
+ power_zone->zone_dev_attrs[count++] =
+ &dev_attr_max_power_range_uw.attr;
+ power_zone->zone_dev_attrs[count] = NULL;
+ power_zone->zone_attr_count = count;
+}
+
+static void powercap_release(struct device *dev)
+{
+ bool allocated;
+
+ if (dev->parent) {
+ struct powercap_zone *power_zone = to_powercap_zone(dev);
+
+ /* Store flag as the release() may free memory */
+ allocated = power_zone->allocated;
+ /* Remove id from parent idr struct */
+ idr_remove(power_zone->parent_idr, power_zone->id);
+ /* Destroy idrs allocated for this zone */
+ idr_destroy(&power_zone->idr);
+ kfree(power_zone->name);
+ kfree(power_zone->zone_dev_attrs);
+ kfree(power_zone->constraints);
+ if (power_zone->ops->release)
+ power_zone->ops->release(power_zone);
+ if (allocated)
+ kfree(power_zone);
+ } else {
+ struct powercap_control_type *control_type =
+ to_powercap_control_type(dev);
+
+ /* Store flag as the release() may free memory */
+ allocated = control_type->allocated;
+ idr_destroy(&control_type->idr);
+ mutex_destroy(&control_type->lock);
+ if (control_type->ops && control_type->ops->release)
+ control_type->ops->release(control_type);
+ if (allocated)
+ kfree(control_type);
+ }
+}
+
+static ssize_t enabled_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ bool mode = true;
+
+ /* Default is enabled */
+ if (dev->parent) {
+ struct powercap_zone *power_zone = to_powercap_zone(dev);
+ if (power_zone->ops->get_enable)
+ if (power_zone->ops->get_enable(power_zone, &mode))
+ mode = false;
+ } else {
+ struct powercap_control_type *control_type =
+ to_powercap_control_type(dev);
+ if (control_type->ops && control_type->ops->get_enable)
+ if (control_type->ops->get_enable(control_type, &mode))
+ mode = false;
+ }
+
+ return sprintf(buf, "%d\n", mode);
+}
+
+static ssize_t enabled_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ bool mode;
+
+ if (strtobool(buf, &mode))
+ return -EINVAL;
+ if (dev->parent) {
+ struct powercap_zone *power_zone = to_powercap_zone(dev);
+ if (power_zone->ops->set_enable)
+ if (!power_zone->ops->set_enable(power_zone, mode))
+ return len;
+ } else {
+ struct powercap_control_type *control_type =
+ to_powercap_control_type(dev);
+ if (control_type->ops && control_type->ops->set_enable)
+ if (!control_type->ops->set_enable(control_type, mode))
+ return len;
+ }
+
+ return -ENOSYS;
+}
+
+static DEVICE_ATTR_RW(enabled);
+
+static struct attribute *powercap_attrs[] = {
+ &dev_attr_enabled.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(powercap);
+
+static struct class powercap_class = {
+ .name = "powercap",
+ .dev_release = powercap_release,
+ .dev_groups = powercap_groups,
+};
+
+struct powercap_zone *powercap_register_zone(
+ struct powercap_zone *power_zone,
+ struct powercap_control_type *control_type,
+ const char *name,
+ struct powercap_zone *parent,
+ const struct powercap_zone_ops *ops,
+ int nr_constraints,
+ struct powercap_zone_constraint_ops *const_ops)
+{
+ int result;
+ int nr_attrs;
+
+ if (!name || !control_type || !ops ||
+ nr_constraints > MAX_CONSTRAINTS_PER_ZONE ||
+ (!ops->get_energy_uj && !ops->get_power_uw) ||
+ !control_type_valid(control_type))
+ return ERR_PTR(-EINVAL);
+
+ if (power_zone) {
+ if (!ops->release)
+ return ERR_PTR(-EINVAL);
+ memset(power_zone, 0, sizeof(*power_zone));
+ } else {
+ power_zone = kzalloc(sizeof(*power_zone), GFP_KERNEL);
+ if (!power_zone)
+ return ERR_PTR(-ENOMEM);
+ power_zone->allocated = true;
+ }
+ power_zone->ops = ops;
+ power_zone->control_type_inst = control_type;
+ if (!parent) {
+ power_zone->dev.parent = &control_type->dev;
+ power_zone->parent_idr = &control_type->idr;
+ } else {
+ power_zone->dev.parent = &parent->dev;
+ power_zone->parent_idr = &parent->idr;
+ }
+ power_zone->dev.class = &powercap_class;
+
+ mutex_lock(&control_type->lock);
+ /* Using idr to get the unique id */
+ result = idr_alloc(power_zone->parent_idr, NULL, 0, 0, GFP_KERNEL);
+ if (result < 0)
+ goto err_idr_alloc;
+
+ power_zone->id = result;
+ idr_init(&power_zone->idr);
+ power_zone->name = kstrdup(name, GFP_KERNEL);
+ if (!power_zone->name)
+ goto err_name_alloc;
+ dev_set_name(&power_zone->dev, "%s:%x",
+ dev_name(power_zone->dev.parent),
+ power_zone->id);
+ power_zone->constraints = kzalloc(sizeof(*power_zone->constraints) *
+ nr_constraints, GFP_KERNEL);
+ if (!power_zone->constraints)
+ goto err_const_alloc;
+
+ nr_attrs = nr_constraints * POWERCAP_CONSTRAINTS_ATTRS +
+ POWERCAP_ZONE_MAX_ATTRS + 1;
+ power_zone->zone_dev_attrs = kzalloc(sizeof(void *) *
+ nr_attrs, GFP_KERNEL);
+ if (!power_zone->zone_dev_attrs)
+ goto err_attr_alloc;
+ create_power_zone_common_attributes(power_zone);
+ result = create_constraints(power_zone, nr_constraints, const_ops);
+ if (result)
+ goto err_dev_ret;
+
+ power_zone->zone_dev_attrs[power_zone->zone_attr_count] = NULL;
+ power_zone->dev_zone_attr_group.attrs = power_zone->zone_dev_attrs;
+ power_zone->dev_attr_groups[0] = &power_zone->dev_zone_attr_group;
+ power_zone->dev_attr_groups[1] = NULL;
+ power_zone->dev.groups = power_zone->dev_attr_groups;
+ result = device_register(&power_zone->dev);
+ if (result)
+ goto err_dev_ret;
+
+ control_type->nr_zones++;
+ mutex_unlock(&control_type->lock);
+
+ return power_zone;
+
+err_dev_ret:
+ kfree(power_zone->zone_dev_attrs);
+err_attr_alloc:
+ kfree(power_zone->constraints);
+err_const_alloc:
+ kfree(power_zone->name);
+err_name_alloc:
+ idr_remove(power_zone->parent_idr, power_zone->id);
+err_idr_alloc:
+ if (power_zone->allocated)
+ kfree(power_zone);
+ mutex_unlock(&control_type->lock);
+
+ return ERR_PTR(result);
+}
+EXPORT_SYMBOL_GPL(powercap_register_zone);
+
+int powercap_unregister_zone(struct powercap_control_type *control_type,
+ struct powercap_zone *power_zone)
+{
+ if (!power_zone || !control_type)
+ return -EINVAL;
+
+ mutex_lock(&control_type->lock);
+ control_type->nr_zones--;
+ mutex_unlock(&control_type->lock);
+
+ device_unregister(&power_zone->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(powercap_unregister_zone);
+
+struct powercap_control_type *powercap_register_control_type(
+ struct powercap_control_type *control_type,
+ const char *name,
+ const struct powercap_control_type_ops *ops)
+{
+ int result;
+
+ if (!name)
+ return ERR_PTR(-EINVAL);
+ if (control_type) {
+ if (!ops || !ops->release)
+ return ERR_PTR(-EINVAL);
+ memset(control_type, 0, sizeof(*control_type));
+ } else {
+ control_type = kzalloc(sizeof(*control_type), GFP_KERNEL);
+ if (!control_type)
+ return ERR_PTR(-ENOMEM);
+ control_type->allocated = true;
+ }
+ mutex_init(&control_type->lock);
+ control_type->ops = ops;
+ INIT_LIST_HEAD(&control_type->node);
+ control_type->dev.class = &powercap_class;
+ dev_set_name(&control_type->dev, name);
+ result = device_register(&control_type->dev);
+ if (result) {
+ if (control_type->allocated)
+ kfree(control_type);
+ return ERR_PTR(result);
+ }
+ idr_init(&control_type->idr);
+
+ mutex_lock(&powercap_cntrl_list_lock);
+ list_add_tail(&control_type->node, &powercap_cntrl_list);
+ mutex_unlock(&powercap_cntrl_list_lock);
+
+ return control_type;
+}
+EXPORT_SYMBOL_GPL(powercap_register_control_type);
+
+int powercap_unregister_control_type(struct powercap_control_type *control_type)
+{
+ struct powercap_control_type *pos = NULL;
+
+ if (control_type->nr_zones) {
+ dev_err(&control_type->dev, "Zones of this type still not freed\n");
+ return -EINVAL;
+ }
+ mutex_lock(&powercap_cntrl_list_lock);
+ list_for_each_entry(pos, &powercap_cntrl_list, node) {
+ if (pos == control_type) {
+ list_del(&control_type->node);
+ mutex_unlock(&powercap_cntrl_list_lock);
+ device_unregister(&control_type->dev);
+ return 0;
+ }
+ }
+ mutex_unlock(&powercap_cntrl_list_lock);
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(powercap_unregister_control_type);
+
+static int __init powercap_init(void)
+{
+ int result = 0;
+
+ result = seed_constraint_attributes();
+ if (result)
+ return result;
+
+ result = class_register(&powercap_class);
+
+ return result;
+}
+
+device_initcall(powercap_init);
+
+MODULE_DESCRIPTION("PowerCap sysfs Driver");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c
index 3e9b6a78ad18..c9ae692d3451 100644
--- a/drivers/rapidio/rio-driver.c
+++ b/drivers/rapidio/rio-driver.c
@@ -223,8 +223,8 @@ struct device rio_bus = {
struct bus_type rio_bus_type = {
.name = "rapidio",
.match = rio_match_bus,
- .dev_attrs = rio_dev_attrs,
- .bus_attrs = rio_bus_attrs,
+ .dev_groups = rio_dev_groups,
+ .bus_groups = rio_bus_groups,
.probe = rio_device_probe,
.remove = rio_device_remove,
.uevent = rio_uevent,
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 9331be646dc3..e0221c6d0cc2 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -27,6 +27,7 @@ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
\
return sprintf(buf, format_string, rdev->field); \
} \
+static DEVICE_ATTR_RO(field);
rio_config_attr(did, "0x%04x\n");
rio_config_attr(vid, "0x%04x\n");
@@ -54,6 +55,7 @@ static ssize_t routes_show(struct device *dev, struct device_attribute *attr, ch
return (str - buf);
}
+static DEVICE_ATTR_RO(routes);
static ssize_t lprev_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -63,6 +65,7 @@ static ssize_t lprev_show(struct device *dev,
return sprintf(buf, "%s\n",
(rdev->prev) ? rio_name(rdev->prev) : "root");
}
+static DEVICE_ATTR_RO(lprev);
static ssize_t lnext_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -83,6 +86,7 @@ static ssize_t lnext_show(struct device *dev,
return str - buf;
}
+static DEVICE_ATTR_RO(lnext);
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -92,23 +96,29 @@ static ssize_t modalias_show(struct device *dev,
return sprintf(buf, "rapidio:v%04Xd%04Xav%04Xad%04X\n",
rdev->vid, rdev->did, rdev->asm_vid, rdev->asm_did);
}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *rio_dev_attrs[] = {
+ &dev_attr_did.attr,
+ &dev_attr_vid.attr,
+ &dev_attr_device_rev.attr,
+ &dev_attr_asm_did.attr,
+ &dev_attr_asm_vid.attr,
+ &dev_attr_asm_rev.attr,
+ &dev_attr_lprev.attr,
+ &dev_attr_destid.attr,
+ &dev_attr_modalias.attr,
+ NULL,
+};
-struct device_attribute rio_dev_attrs[] = {
- __ATTR_RO(did),
- __ATTR_RO(vid),
- __ATTR_RO(device_rev),
- __ATTR_RO(asm_did),
- __ATTR_RO(asm_vid),
- __ATTR_RO(asm_rev),
- __ATTR_RO(lprev),
- __ATTR_RO(destid),
- __ATTR_RO(modalias),
- __ATTR_NULL,
+static const struct attribute_group rio_dev_group = {
+ .attrs = rio_dev_attrs,
};
-static DEVICE_ATTR(routes, S_IRUGO, routes_show, NULL);
-static DEVICE_ATTR(lnext, S_IRUGO, lnext_show, NULL);
-static DEVICE_ATTR(hopcount, S_IRUGO, hopcount_show, NULL);
+const struct attribute_group *rio_dev_groups[] = {
+ &rio_dev_group,
+ NULL,
+};
static ssize_t
rio_read_config(struct file *filp, struct kobject *kobj,
@@ -316,8 +326,18 @@ exit:
return rc;
}
+static BUS_ATTR(scan, (S_IWUSR|S_IWGRP), NULL, bus_scan_store);
+
+static struct attribute *rio_bus_attrs[] = {
+ &bus_attr_scan.attr,
+ NULL,
+};
+
+static const struct attribute_group rio_bus_group = {
+ .attrs = rio_bus_attrs,
+};
-struct bus_attribute rio_bus_attrs[] = {
- __ATTR(scan, (S_IWUSR|S_IWGRP), NULL, bus_scan_store),
- __ATTR_NULL
+const struct attribute_group *rio_bus_groups[] = {
+ &rio_bus_group,
+ NULL,
};
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h
index 085215cd8502..5f99d22ad0b0 100644
--- a/drivers/rapidio/rio.h
+++ b/drivers/rapidio/rio.h
@@ -48,8 +48,8 @@ extern struct rio_mport *rio_find_mport(int mport_id);
extern int rio_mport_scan(int mport_id);
/* Structures internal to the RIO core code */
-extern struct device_attribute rio_dev_attrs[];
-extern struct bus_attribute rio_bus_attrs[];
+extern const struct attribute_group *rio_dev_groups[];
+extern const struct attribute_group *rio_bus_groups[];
#define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16))
#define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16))
diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c
index 3459f60dcfd1..d333f7eac106 100644
--- a/drivers/regulator/88pm800.c
+++ b/drivers/regulator/88pm800.c
@@ -141,18 +141,14 @@ struct pm800_regulators {
/* Ranges are sorted in ascending order. */
static const struct regulator_linear_range buck1_volt_range[] = {
- { .min_uV = 600000, .max_uV = 1587500, .min_sel = 0, .max_sel = 0x4f,
- .uV_step = 12500 },
- { .min_uV = 1600000, .max_uV = 1800000, .min_sel = 0x50,
- .max_sel = 0x54, .uV_step = 50000 },
+ REGULATOR_LINEAR_RANGE(600000, 0, 0x4f, 12500),
+ REGULATOR_LINEAR_RANGE(1600000, 0x50, 0x54, 50000),
};
/* BUCK 2~5 have same ranges. */
static const struct regulator_linear_range buck2_5_volt_range[] = {
- { .min_uV = 600000, .max_uV = 1587500, .min_sel = 0, .max_sel = 0x4f,
- .uV_step = 12500 },
- { .min_uV = 1600000, .max_uV = 3300000, .min_sel = 0x50,
- .max_sel = 0x72, .uV_step = 50000 },
+ REGULATOR_LINEAR_RANGE(600000, 0, 0x4f, 12500),
+ REGULATOR_LINEAR_RANGE(1600000, 0x50, 0x72, 50000),
};
static const unsigned int ldo1_volt_table[] = {
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 70230974468c..f704d83c93c4 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -391,7 +391,8 @@ static int pm8607_regulator_probe(struct platform_device *pdev)
else
config.regmap = chip->regmap_companion;
- info->regulator = regulator_register(&info->desc, &config);
+ info->regulator = devm_regulator_register(&pdev->dev, &info->desc,
+ &config);
if (IS_ERR(info->regulator)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
@@ -402,14 +403,6 @@ static int pm8607_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int pm8607_regulator_remove(struct platform_device *pdev)
-{
- struct pm8607_regulator_info *info = platform_get_drvdata(pdev);
-
- regulator_unregister(info->regulator);
- return 0;
-}
-
static struct platform_device_id pm8607_regulator_driver_ids[] = {
{
.name = "88pm860x-regulator",
@@ -428,7 +421,6 @@ static struct platform_driver pm8607_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = pm8607_regulator_probe,
- .remove = pm8607_regulator_remove,
.id_table = pm8607_regulator_driver_ids,
};
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index dfe58096b374..ce785f481281 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -28,16 +28,6 @@ config REGULATOR_DEBUG
help
Say yes here to enable debugging support.
-config REGULATOR_DUMMY
- bool "Provide a dummy regulator if regulator lookups fail"
- help
- If this option is enabled then when a regulator lookup fails
- and the board has not specified that it has provided full
- constraints the regulator core will provide an always
- enabled dummy regulator, allowing consumer drivers to continue.
-
- A warning will be generated when this substitution is done.
-
config REGULATOR_FIXED_VOLTAGE
tristate "Fixed voltage regulator support"
help
@@ -133,6 +123,14 @@ config REGULATOR_AS3711
This driver provides support for the voltage regulators on the
AS3711 PMIC
+config REGULATOR_AS3722
+ tristate "AMS AS3722 PMIC Regulators"
+ depends on MFD_AS3722
+ help
+ This driver provides support for the voltage regulators on the
+ AS3722 PMIC. This will enable support for all the software
+ controllable DCDC/LDO regulators.
+
config REGULATOR_DA903X
tristate "Dialog Semiconductor DA9030/DA9034 regulators"
depends on PMIC_DA903X
@@ -429,6 +427,14 @@ config REGULATOR_TI_ABB
on TI SoCs may be unstable without enabling this as it provides
device specific optimized bias to allow/optimize functionality.
+config REGULATOR_STW481X_VMMC
+ bool "ST Microelectronics STW481X VMMC regulator"
+ depends on MFD_STW481X
+ default y if MFD_STW481X
+ help
+ This driver supports the internal VMMC regulator in the STw481x
+ PMIC chips.
+
config REGULATOR_TPS51632
tristate "TI TPS51632 Power Regulator"
depends on I2C
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 185cce246022..01c597ea1744 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -3,7 +3,7 @@
#
-obj-$(CONFIG_REGULATOR) += core.o dummy.o fixed-helper.o helpers.o
+obj-$(CONFIG_REGULATOR) += core.o dummy.o fixed-helper.o helpers.o devres.o
obj-$(CONFIG_OF) += of_regulator.o
obj-$(CONFIG_REGULATOR_FIXED_VOLTAGE) += fixed.o
obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
@@ -18,6 +18,7 @@ obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o
obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o
obj-$(CONFIG_REGULATOR_ARIZONA) += arizona-micsupp.o arizona-ldo1.o
obj-$(CONFIG_REGULATOR_AS3711) += as3711-regulator.o
+obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
obj-$(CONFIG_REGULATOR_DA9055) += da9055-regulator.o
@@ -56,6 +57,7 @@ obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_RC5T583) += rc5t583-regulator.o
obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
+obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o
obj-$(CONFIG_REGULATOR_TI_ABB) += ti-abb-regulator.o
obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
obj-$(CONFIG_REGULATOR_TPS62360) += tps62360-regulator.o
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index 881159dfcb5e..f70a9bfa5ff2 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -176,7 +176,7 @@ static int aat2870_regulator_probe(struct platform_device *pdev)
config.driver_data = ri;
config.init_data = dev_get_platdata(&pdev->dev);
- rdev = regulator_register(&ri->desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "Failed to register regulator %s\n",
ri->desc.name);
@@ -187,21 +187,12 @@ static int aat2870_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int aat2870_regulator_remove(struct platform_device *pdev)
-{
- struct regulator_dev *rdev = platform_get_drvdata(pdev);
-
- regulator_unregister(rdev);
- return 0;
-}
-
static struct platform_driver aat2870_regulator_driver = {
.driver = {
.name = "aat2870-regulator",
.owner = THIS_MODULE,
},
.probe = aat2870_regulator_probe,
- .remove = aat2870_regulator_remove,
};
static int __init aat2870_regulator_init(void)
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 7d5eaa874b2d..77b46d0b37a6 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -535,7 +535,7 @@ static int ab3100_regulator_register(struct platform_device *pdev,
config.dev = &pdev->dev;
config.driver_data = reg;
- rdev = regulator_register(desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, desc, &config);
if (IS_ERR(rdev)) {
err = PTR_ERR(rdev);
dev_err(&pdev->dev,
@@ -616,7 +616,6 @@ static int ab3100_regulators_remove(struct platform_device *pdev)
for (i = 0; i < AB3100_NUM_REGULATORS; i++) {
struct ab3100_regulator *reg = &ab3100_regulators[i];
- regulator_unregister(reg->rdev);
reg->rdev = NULL;
}
return 0;
diff --git a/drivers/regulator/ab8500-ext.c b/drivers/regulator/ab8500-ext.c
index 02ff691cdb8b..29c0faaf8eba 100644
--- a/drivers/regulator/ab8500-ext.c
+++ b/drivers/regulator/ab8500-ext.c
@@ -413,16 +413,12 @@ static int ab8500_ext_regulator_probe(struct platform_device *pdev)
&pdata->ext_regulator[i];
/* register regulator with framework */
- info->rdev = regulator_register(&info->desc, &config);
+ info->rdev = devm_regulator_register(&pdev->dev, &info->desc,
+ &config);
if (IS_ERR(info->rdev)) {
err = PTR_ERR(info->rdev);
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
- /* when we fail, un-register all earlier regulators */
- while (--i >= 0) {
- info = &ab8500_ext_regulator_info[i];
- regulator_unregister(info->rdev);
- }
return err;
}
@@ -433,26 +429,8 @@ static int ab8500_ext_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int ab8500_ext_regulator_remove(struct platform_device *pdev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ab8500_ext_regulator_info); i++) {
- struct ab8500_ext_regulator_info *info = NULL;
- info = &ab8500_ext_regulator_info[i];
-
- dev_vdbg(rdev_get_dev(info->rdev),
- "%s-remove\n", info->desc.name);
-
- regulator_unregister(info->rdev);
- }
-
- return 0;
-}
-
static struct platform_driver ab8500_ext_regulator_driver = {
.probe = ab8500_ext_regulator_probe,
- .remove = ab8500_ext_regulator_remove,
.driver = {
.name = "ab8500-ext-regulator",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index b2b203cb6b2f..48016a050d5f 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -219,7 +219,6 @@ static int ad5398_probe(struct i2c_client *client,
struct ad5398_chip_info *chip;
const struct ad5398_current_data_format *df =
(struct ad5398_current_data_format *)id->driver_data;
- int ret;
if (!init_data)
return -EINVAL;
@@ -240,33 +239,21 @@ static int ad5398_probe(struct i2c_client *client,
chip->current_offset = df->current_offset;
chip->current_mask = (chip->current_level - 1) << chip->current_offset;
- chip->rdev = regulator_register(&ad5398_reg, &config);
+ chip->rdev = devm_regulator_register(&client->dev, &ad5398_reg,
+ &config);
if (IS_ERR(chip->rdev)) {
- ret = PTR_ERR(chip->rdev);
dev_err(&client->dev, "failed to register %s %s\n",
id->name, ad5398_reg.name);
- goto err;
+ return PTR_ERR(chip->rdev);
}
i2c_set_clientdata(client, chip);
dev_dbg(&client->dev, "%s regulator driver is registered.\n", id->name);
return 0;
-
-err:
- return ret;
-}
-
-static int ad5398_remove(struct i2c_client *client)
-{
- struct ad5398_chip_info *chip = i2c_get_clientdata(client);
-
- regulator_unregister(chip->rdev);
- return 0;
}
static struct i2c_driver ad5398_driver = {
.probe = ad5398_probe,
- .remove = ad5398_remove,
.driver = {
.name = "ad5398",
},
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 0d4a8ccbb536..c734d0980826 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -200,7 +200,7 @@ static int anatop_regulator_probe(struct platform_device *pdev)
config.regmap = sreg->anatop;
/* register regulator */
- rdev = regulator_register(rdesc, &config);
+ rdev = devm_regulator_register(dev, rdesc, &config);
if (IS_ERR(rdev)) {
dev_err(dev, "failed to register %s\n",
rdesc->name);
@@ -223,7 +223,6 @@ static int anatop_regulator_remove(struct platform_device *pdev)
struct anatop_regulator *sreg = rdev_get_drvdata(rdev);
const char *name = sreg->name;
- regulator_unregister(rdev);
kfree(name);
return 0;
@@ -256,7 +255,7 @@ static void __exit anatop_regulator_exit(void)
}
module_exit(anatop_regulator_exit);
-MODULE_AUTHOR("Nancy Chen <Nancy.Chen@freescale.com>, "
- "Ying-Chun Liu (PaulLiu) <paul.liu@linaro.org>");
+MODULE_AUTHOR("Nancy Chen <Nancy.Chen@freescale.com>");
+MODULE_AUTHOR("Ying-Chun Liu (PaulLiu) <paul.liu@linaro.org>");
MODULE_DESCRIPTION("ANATOP Regulator driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index 81d8681c3195..4f6c2055f6b2 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -226,7 +226,7 @@ static int arizona_ldo1_probe(struct platform_device *pdev)
else
config.init_data = &ldo1->init_data;
- ldo1->regulator = regulator_register(desc, &config);
+ ldo1->regulator = devm_regulator_register(&pdev->dev, desc, &config);
if (IS_ERR(ldo1->regulator)) {
ret = PTR_ERR(ldo1->regulator);
dev_err(arizona->dev, "Failed to register LDO1 supply: %d\n",
@@ -239,18 +239,8 @@ static int arizona_ldo1_probe(struct platform_device *pdev)
return 0;
}
-static int arizona_ldo1_remove(struct platform_device *pdev)
-{
- struct arizona_ldo1 *ldo1 = platform_get_drvdata(pdev);
-
- regulator_unregister(ldo1->regulator);
-
- return 0;
-}
-
static struct platform_driver arizona_ldo1_driver = {
.probe = arizona_ldo1_probe,
- .remove = arizona_ldo1_remove,
.driver = {
.name = "arizona-ldo1",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
index e87536bf0bed..724706a97dc4 100644
--- a/drivers/regulator/arizona-micsupp.c
+++ b/drivers/regulator/arizona-micsupp.c
@@ -225,7 +225,9 @@ static int arizona_micsupp_probe(struct platform_device *pdev)
regmap_update_bits(arizona->regmap, ARIZONA_MIC_CHARGE_PUMP_1,
ARIZONA_CPMIC_BYPASS, 0);
- micsupp->regulator = regulator_register(&arizona_micsupp, &config);
+ micsupp->regulator = devm_regulator_register(&pdev->dev,
+ &arizona_micsupp,
+ &config);
if (IS_ERR(micsupp->regulator)) {
ret = PTR_ERR(micsupp->regulator);
dev_err(arizona->dev, "Failed to register mic supply: %d\n",
@@ -238,18 +240,8 @@ static int arizona_micsupp_probe(struct platform_device *pdev)
return 0;
}
-static int arizona_micsupp_remove(struct platform_device *pdev)
-{
- struct arizona_micsupp *micsupp = platform_get_drvdata(pdev);
-
- regulator_unregister(micsupp->regulator);
-
- return 0;
-}
-
static struct platform_driver arizona_micsupp_driver = {
.probe = arizona_micsupp_probe,
- .remove = arizona_micsupp_remove,
.driver = {
.name = "arizona-micsupp",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/as3711-regulator.c b/drivers/regulator/as3711-regulator.c
index 8406cd745da2..c77a58478cca 100644
--- a/drivers/regulator/as3711-regulator.c
+++ b/drivers/regulator/as3711-regulator.c
@@ -117,26 +117,19 @@ static struct regulator_ops as3711_dldo_ops = {
};
static const struct regulator_linear_range as3711_sd_ranges[] = {
- { .min_uV = 612500, .max_uV = 1400000,
- .min_sel = 0x1, .max_sel = 0x40, .uV_step = 12500 },
- { .min_uV = 1425000, .max_uV = 2600000,
- .min_sel = 0x41, .max_sel = 0x70, .uV_step = 25000 },
- { .min_uV = 2650000, .max_uV = 3350000,
- .min_sel = 0x71, .max_sel = 0x7f, .uV_step = 50000 },
+ REGULATOR_LINEAR_RANGE(612500, 0x1, 0x40, 12500),
+ REGULATOR_LINEAR_RANGE(1425000, 0x41, 0x70, 25000),
+ REGULATOR_LINEAR_RANGE(2650000, 0x71, 0x7f, 50000),
};
static const struct regulator_linear_range as3711_aldo_ranges[] = {
- { .min_uV = 1200000, .max_uV = 1950000,
- .min_sel = 0, .max_sel = 0xf, .uV_step = 50000 },
- { .min_uV = 1800000, .max_uV = 3300000,
- .min_sel = 0x10, .max_sel = 0x1f, .uV_step = 100000 },
+ REGULATOR_LINEAR_RANGE(1200000, 0, 0xf, 50000),
+ REGULATOR_LINEAR_RANGE(1800000, 0x10, 0x1f, 100000),
};
static const struct regulator_linear_range as3711_dldo_ranges[] = {
- { .min_uV = 900000, .max_uV = 1700000,
- .min_sel = 0, .max_sel = 0x10, .uV_step = 50000 },
- { .min_uV = 1750000, .max_uV = 3300000,
- .min_sel = 0x20, .max_sel = 0x3f, .uV_step = 50000 },
+ REGULATOR_LINEAR_RANGE(900000, 0, 0x10, 50000),
+ REGULATOR_LINEAR_RANGE(1750000, 0x20, 0x3f, 50000),
};
#define AS3711_REG(_id, _en_reg, _en_bit, _vmask, _vshift, _min_uV, _max_uV, _sfx) \
@@ -273,33 +266,16 @@ static int as3711_regulator_probe(struct platform_device *pdev)
config.regmap = as3711->regmap;
config.of_node = of_node[id];
- rdev = regulator_register(&ri->desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "Failed to register regulator %s\n",
ri->desc.name);
- ret = PTR_ERR(rdev);
- goto eregreg;
+ return PTR_ERR(rdev);
}
reg->rdev = rdev;
}
platform_set_drvdata(pdev, regs);
return 0;
-
-eregreg:
- while (--id >= 0)
- regulator_unregister(regs[id].rdev);
-
- return ret;
-}
-
-static int as3711_regulator_remove(struct platform_device *pdev)
-{
- struct as3711_regulator *regs = platform_get_drvdata(pdev);
- int id;
-
- for (id = 0; id < AS3711_REGULATOR_NUM; ++id)
- regulator_unregister(regs[id].rdev);
- return 0;
}
static struct platform_driver as3711_regulator_driver = {
@@ -308,7 +284,6 @@ static struct platform_driver as3711_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = as3711_regulator_probe,
- .remove = as3711_regulator_remove,
};
static int __init as3711_regulator_init(void)
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c
new file mode 100644
index 000000000000..5917fe3dc983
--- /dev/null
+++ b/drivers/regulator/as3722-regulator.c
@@ -0,0 +1,908 @@
+/*
+ * Voltage regulator support for AMS AS3722 PMIC
+ *
+ * Copyright (C) 2013 ams
+ *
+ * Author: Florian Lobmaier <florian.lobmaier@ams.com>
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/as3722.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
+
+/* Regulator IDs */
+enum as3722_regulators_id {
+ AS3722_REGULATOR_ID_SD0,
+ AS3722_REGULATOR_ID_SD1,
+ AS3722_REGULATOR_ID_SD2,
+ AS3722_REGULATOR_ID_SD3,
+ AS3722_REGULATOR_ID_SD4,
+ AS3722_REGULATOR_ID_SD5,
+ AS3722_REGULATOR_ID_SD6,
+ AS3722_REGULATOR_ID_LDO0,
+ AS3722_REGULATOR_ID_LDO1,
+ AS3722_REGULATOR_ID_LDO2,
+ AS3722_REGULATOR_ID_LDO3,
+ AS3722_REGULATOR_ID_LDO4,
+ AS3722_REGULATOR_ID_LDO5,
+ AS3722_REGULATOR_ID_LDO6,
+ AS3722_REGULATOR_ID_LDO7,
+ AS3722_REGULATOR_ID_LDO9,
+ AS3722_REGULATOR_ID_LDO10,
+ AS3722_REGULATOR_ID_LDO11,
+ AS3722_REGULATOR_ID_MAX,
+};
+
+struct as3722_register_mapping {
+ u8 regulator_id;
+ const char *name;
+ const char *sname;
+ u8 vsel_reg;
+ u8 vsel_mask;
+ int n_voltages;
+ u32 enable_reg;
+ u8 enable_mask;
+ u32 control_reg;
+ u8 mode_mask;
+ u32 sleep_ctrl_reg;
+ u8 sleep_ctrl_mask;
+};
+
+struct as3722_regulator_config_data {
+ struct regulator_init_data *reg_init;
+ bool enable_tracking;
+ int ext_control;
+};
+
+struct as3722_regulators {
+ struct device *dev;
+ struct as3722 *as3722;
+ struct regulator_dev *rdevs[AS3722_REGULATOR_ID_MAX];
+ struct regulator_desc desc[AS3722_REGULATOR_ID_MAX];
+ struct as3722_regulator_config_data
+ reg_config_data[AS3722_REGULATOR_ID_MAX];
+};
+
+static const struct as3722_register_mapping as3722_reg_lookup[] = {
+ {
+ .regulator_id = AS3722_REGULATOR_ID_SD0,
+ .name = "as3722-sd0",
+ .vsel_reg = AS3722_SD0_VOLTAGE_REG,
+ .vsel_mask = AS3722_SD_VSEL_MASK,
+ .enable_reg = AS3722_SD_CONTROL_REG,
+ .enable_mask = AS3722_SDn_CTRL(0),
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL1_REG,
+ .sleep_ctrl_mask = AS3722_SD0_EXT_ENABLE_MASK,
+ .control_reg = AS3722_SD0_CONTROL_REG,
+ .mode_mask = AS3722_SD0_MODE_FAST,
+ .n_voltages = AS3722_SD0_VSEL_MAX + 1,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_SD1,
+ .name = "as3722-sd1",
+ .vsel_reg = AS3722_SD1_VOLTAGE_REG,
+ .vsel_mask = AS3722_SD_VSEL_MASK,
+ .enable_reg = AS3722_SD_CONTROL_REG,
+ .enable_mask = AS3722_SDn_CTRL(1),
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL1_REG,
+ .sleep_ctrl_mask = AS3722_SD1_EXT_ENABLE_MASK,
+ .control_reg = AS3722_SD1_CONTROL_REG,
+ .mode_mask = AS3722_SD1_MODE_FAST,
+ .n_voltages = AS3722_SD0_VSEL_MAX + 1,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_SD2,
+ .name = "as3722-sd2",
+ .sname = "vsup-sd2",
+ .vsel_reg = AS3722_SD2_VOLTAGE_REG,
+ .vsel_mask = AS3722_SD_VSEL_MASK,
+ .enable_reg = AS3722_SD_CONTROL_REG,
+ .enable_mask = AS3722_SDn_CTRL(2),
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL1_REG,
+ .sleep_ctrl_mask = AS3722_SD2_EXT_ENABLE_MASK,
+ .control_reg = AS3722_SD23_CONTROL_REG,
+ .mode_mask = AS3722_SD2_MODE_FAST,
+ .n_voltages = AS3722_SD2_VSEL_MAX + 1,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_SD3,
+ .name = "as3722-sd3",
+ .sname = "vsup-sd3",
+ .vsel_reg = AS3722_SD3_VOLTAGE_REG,
+ .vsel_mask = AS3722_SD_VSEL_MASK,
+ .enable_reg = AS3722_SD_CONTROL_REG,
+ .enable_mask = AS3722_SDn_CTRL(3),
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL1_REG,
+ .sleep_ctrl_mask = AS3722_SD3_EXT_ENABLE_MASK,
+ .control_reg = AS3722_SD23_CONTROL_REG,
+ .mode_mask = AS3722_SD3_MODE_FAST,
+ .n_voltages = AS3722_SD2_VSEL_MAX + 1,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_SD4,
+ .name = "as3722-sd4",
+ .sname = "vsup-sd4",
+ .vsel_reg = AS3722_SD4_VOLTAGE_REG,
+ .vsel_mask = AS3722_SD_VSEL_MASK,
+ .enable_reg = AS3722_SD_CONTROL_REG,
+ .enable_mask = AS3722_SDn_CTRL(4),
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL2_REG,
+ .sleep_ctrl_mask = AS3722_SD4_EXT_ENABLE_MASK,
+ .control_reg = AS3722_SD4_CONTROL_REG,
+ .mode_mask = AS3722_SD4_MODE_FAST,
+ .n_voltages = AS3722_SD2_VSEL_MAX + 1,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_SD5,
+ .name = "as3722-sd5",
+ .sname = "vsup-sd5",
+ .vsel_reg = AS3722_SD5_VOLTAGE_REG,
+ .vsel_mask = AS3722_SD_VSEL_MASK,
+ .enable_reg = AS3722_SD_CONTROL_REG,
+ .enable_mask = AS3722_SDn_CTRL(5),
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL2_REG,
+ .sleep_ctrl_mask = AS3722_SD5_EXT_ENABLE_MASK,
+ .control_reg = AS3722_SD5_CONTROL_REG,
+ .mode_mask = AS3722_SD5_MODE_FAST,
+ .n_voltages = AS3722_SD2_VSEL_MAX + 1,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_SD6,
+ .name = "as3722-sd6",
+ .vsel_reg = AS3722_SD6_VOLTAGE_REG,
+ .vsel_mask = AS3722_SD_VSEL_MASK,
+ .enable_reg = AS3722_SD_CONTROL_REG,
+ .enable_mask = AS3722_SDn_CTRL(6),
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL2_REG,
+ .sleep_ctrl_mask = AS3722_SD6_EXT_ENABLE_MASK,
+ .control_reg = AS3722_SD6_CONTROL_REG,
+ .mode_mask = AS3722_SD6_MODE_FAST,
+ .n_voltages = AS3722_SD0_VSEL_MAX + 1,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO0,
+ .name = "as3722-ldo0",
+ .sname = "vin-ldo0",
+ .vsel_reg = AS3722_LDO0_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO0_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL0_REG,
+ .enable_mask = AS3722_LDO0_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL3_REG,
+ .sleep_ctrl_mask = AS3722_LDO0_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO0_NUM_VOLT,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO1,
+ .name = "as3722-ldo1",
+ .sname = "vin-ldo1-6",
+ .vsel_reg = AS3722_LDO1_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL0_REG,
+ .enable_mask = AS3722_LDO1_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL3_REG,
+ .sleep_ctrl_mask = AS3722_LDO1_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO_NUM_VOLT,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO2,
+ .name = "as3722-ldo2",
+ .sname = "vin-ldo2-5-7",
+ .vsel_reg = AS3722_LDO2_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL0_REG,
+ .enable_mask = AS3722_LDO2_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL3_REG,
+ .sleep_ctrl_mask = AS3722_LDO2_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO_NUM_VOLT,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO3,
+ .name = "as3722-ldo3",
+ .name = "vin-ldo3-4",
+ .vsel_reg = AS3722_LDO3_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO3_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL0_REG,
+ .enable_mask = AS3722_LDO3_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL3_REG,
+ .sleep_ctrl_mask = AS3722_LDO3_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO3_NUM_VOLT,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO4,
+ .name = "as3722-ldo4",
+ .name = "vin-ldo3-4",
+ .vsel_reg = AS3722_LDO4_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL0_REG,
+ .enable_mask = AS3722_LDO4_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL4_REG,
+ .sleep_ctrl_mask = AS3722_LDO4_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO_NUM_VOLT,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO5,
+ .name = "as3722-ldo5",
+ .sname = "vin-ldo2-5-7",
+ .vsel_reg = AS3722_LDO5_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL0_REG,
+ .enable_mask = AS3722_LDO5_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL4_REG,
+ .sleep_ctrl_mask = AS3722_LDO5_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO_NUM_VOLT,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO6,
+ .name = "as3722-ldo6",
+ .sname = "vin-ldo1-6",
+ .vsel_reg = AS3722_LDO6_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL0_REG,
+ .enable_mask = AS3722_LDO6_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL4_REG,
+ .sleep_ctrl_mask = AS3722_LDO6_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO_NUM_VOLT,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO7,
+ .name = "as3722-ldo7",
+ .sname = "vin-ldo2-5-7",
+ .vsel_reg = AS3722_LDO7_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL0_REG,
+ .enable_mask = AS3722_LDO7_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL4_REG,
+ .sleep_ctrl_mask = AS3722_LDO7_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO_NUM_VOLT,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO9,
+ .name = "as3722-ldo9",
+ .sname = "vin-ldo9-10",
+ .vsel_reg = AS3722_LDO9_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL1_REG,
+ .enable_mask = AS3722_LDO9_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL5_REG,
+ .sleep_ctrl_mask = AS3722_LDO9_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO_NUM_VOLT,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO10,
+ .name = "as3722-ldo10",
+ .sname = "vin-ldo9-10",
+ .vsel_reg = AS3722_LDO10_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL1_REG,
+ .enable_mask = AS3722_LDO10_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL5_REG,
+ .sleep_ctrl_mask = AS3722_LDO10_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO_NUM_VOLT,
+ },
+ {
+ .regulator_id = AS3722_REGULATOR_ID_LDO11,
+ .name = "as3722-ldo11",
+ .sname = "vin-ldo11",
+ .vsel_reg = AS3722_LDO11_VOLTAGE_REG,
+ .vsel_mask = AS3722_LDO_VSEL_MASK,
+ .enable_reg = AS3722_LDOCONTROL1_REG,
+ .enable_mask = AS3722_LDO11_CTRL,
+ .sleep_ctrl_reg = AS3722_ENABLE_CTRL5_REG,
+ .sleep_ctrl_mask = AS3722_LDO11_EXT_ENABLE_MASK,
+ .n_voltages = AS3722_LDO_NUM_VOLT,
+ },
+};
+
+
+static const int as3722_ldo_current[] = { 150000, 300000 };
+static const int as3722_sd016_current[] = { 2500000, 3000000, 3500000 };
+
+static int as3722_current_to_index(int min_uA, int max_uA,
+ const int *curr_table, int n_currents)
+{
+ int i;
+
+ for (i = n_currents - 1; i >= 0; i--) {
+ if ((min_uA <= curr_table[i]) && (curr_table[i] <= max_uA))
+ return i;
+ }
+ return -EINVAL;
+}
+
+static int as3722_ldo_get_current_limit(struct regulator_dev *rdev)
+{
+ struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev);
+ struct as3722 *as3722 = as3722_regs->as3722;
+ int id = rdev_get_id(rdev);
+ u32 val;
+ int ret;
+
+ ret = as3722_read(as3722, as3722_reg_lookup[id].vsel_reg, &val);
+ if (ret < 0) {
+ dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n",
+ as3722_reg_lookup[id].vsel_reg, ret);
+ return ret;
+ }
+ if (val & AS3722_LDO_ILIMIT_MASK)
+ return 300000;
+ return 150000;
+}
+
+static int as3722_ldo_set_current_limit(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+{
+ struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev);
+ struct as3722 *as3722 = as3722_regs->as3722;
+ int id = rdev_get_id(rdev);
+ int ret;
+ u32 reg = 0;
+
+ ret = as3722_current_to_index(min_uA, max_uA, as3722_ldo_current,
+ ARRAY_SIZE(as3722_ldo_current));
+ if (ret < 0) {
+ dev_err(as3722_regs->dev,
+ "Current range min:max = %d:%d does not support\n",
+ min_uA, max_uA);
+ return ret;
+ }
+ if (ret)
+ reg = AS3722_LDO_ILIMIT_BIT;
+ return as3722_update_bits(as3722, as3722_reg_lookup[id].vsel_reg,
+ AS3722_LDO_ILIMIT_MASK, reg);
+}
+
+static struct regulator_ops as3722_ldo0_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_current_limit = as3722_ldo_get_current_limit,
+ .set_current_limit = as3722_ldo_set_current_limit,
+};
+
+static struct regulator_ops as3722_ldo0_extcntrl_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_current_limit = as3722_ldo_get_current_limit,
+ .set_current_limit = as3722_ldo_set_current_limit,
+};
+
+static int as3722_ldo3_set_tracking_mode(struct as3722_regulators *as3722_reg,
+ int id, u8 mode)
+{
+ struct as3722 *as3722 = as3722_reg->as3722;
+
+ switch (mode) {
+ case AS3722_LDO3_MODE_PMOS:
+ case AS3722_LDO3_MODE_PMOS_TRACKING:
+ case AS3722_LDO3_MODE_NMOS:
+ case AS3722_LDO3_MODE_SWITCH:
+ return as3722_update_bits(as3722,
+ as3722_reg_lookup[id].vsel_reg,
+ AS3722_LDO3_MODE_MASK, mode);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int as3722_ldo3_get_current_limit(struct regulator_dev *rdev)
+{
+ return 150000;
+}
+
+static struct regulator_ops as3722_ldo3_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_current_limit = as3722_ldo3_get_current_limit,
+};
+
+static struct regulator_ops as3722_ldo3_extcntrl_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_current_limit = as3722_ldo3_get_current_limit,
+};
+
+static const struct regulator_linear_range as3722_ldo_ranges[] = {
+ REGULATOR_LINEAR_RANGE(825000, 0x01, 0x24, 25000),
+ REGULATOR_LINEAR_RANGE(1725000, 0x40, 0x7F, 25000),
+};
+
+static struct regulator_ops as3722_ldo_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .get_current_limit = as3722_ldo_get_current_limit,
+ .set_current_limit = as3722_ldo_set_current_limit,
+};
+
+static struct regulator_ops as3722_ldo_extcntrl_ops = {
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .get_current_limit = as3722_ldo_get_current_limit,
+ .set_current_limit = as3722_ldo_set_current_limit,
+};
+
+static unsigned int as3722_sd_get_mode(struct regulator_dev *rdev)
+{
+ struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev);
+ struct as3722 *as3722 = as3722_regs->as3722;
+ int id = rdev_get_id(rdev);
+ u32 val;
+ int ret;
+
+ if (!as3722_reg_lookup[id].control_reg)
+ return -ENOTSUPP;
+
+ ret = as3722_read(as3722, as3722_reg_lookup[id].control_reg, &val);
+ if (ret < 0) {
+ dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n",
+ as3722_reg_lookup[id].control_reg, ret);
+ return ret;
+ }
+
+ if (val & as3722_reg_lookup[id].mode_mask)
+ return REGULATOR_MODE_FAST;
+ else
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int as3722_sd_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev);
+ struct as3722 *as3722 = as3722_regs->as3722;
+ u8 id = rdev_get_id(rdev);
+ u8 val = 0;
+ int ret;
+
+ if (!as3722_reg_lookup[id].control_reg)
+ return -ERANGE;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ val = as3722_reg_lookup[id].mode_mask;
+ case REGULATOR_MODE_NORMAL: /* fall down */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = as3722_update_bits(as3722, as3722_reg_lookup[id].control_reg,
+ as3722_reg_lookup[id].mode_mask, val);
+ if (ret < 0) {
+ dev_err(as3722_regs->dev, "Reg 0x%02x update failed: %d\n",
+ as3722_reg_lookup[id].control_reg, ret);
+ return ret;
+ }
+ return ret;
+}
+
+static int as3722_sd016_get_current_limit(struct regulator_dev *rdev)
+{
+ struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev);
+ struct as3722 *as3722 = as3722_regs->as3722;
+ int id = rdev_get_id(rdev);
+ u32 val, reg;
+ int mask;
+ int ret;
+
+ switch (id) {
+ case AS3722_REGULATOR_ID_SD0:
+ reg = AS3722_OVCURRENT_REG;
+ mask = AS3722_OVCURRENT_SD0_TRIP_MASK;
+ break;
+ case AS3722_REGULATOR_ID_SD1:
+ reg = AS3722_OVCURRENT_REG;
+ mask = AS3722_OVCURRENT_SD1_TRIP_MASK;
+ break;
+ case AS3722_REGULATOR_ID_SD6:
+ reg = AS3722_OVCURRENT_DEB_REG;
+ mask = AS3722_OVCURRENT_SD6_TRIP_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = as3722_read(as3722, reg, &val);
+ if (ret < 0) {
+ dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n",
+ reg, ret);
+ return ret;
+ }
+ val &= mask;
+ val >>= ffs(mask) - 1;
+ if (val == 3)
+ return -EINVAL;
+ return as3722_sd016_current[val];
+}
+
+static int as3722_sd016_set_current_limit(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+{
+ struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev);
+ struct as3722 *as3722 = as3722_regs->as3722;
+ int id = rdev_get_id(rdev);
+ int ret;
+ int val;
+ int mask;
+ u32 reg;
+
+ ret = as3722_current_to_index(min_uA, max_uA, as3722_sd016_current,
+ ARRAY_SIZE(as3722_sd016_current));
+ if (ret < 0) {
+ dev_err(as3722_regs->dev,
+ "Current range min:max = %d:%d does not support\n",
+ min_uA, max_uA);
+ return ret;
+ }
+
+ switch (id) {
+ case AS3722_REGULATOR_ID_SD0:
+ reg = AS3722_OVCURRENT_REG;
+ mask = AS3722_OVCURRENT_SD0_TRIP_MASK;
+ break;
+ case AS3722_REGULATOR_ID_SD1:
+ reg = AS3722_OVCURRENT_REG;
+ mask = AS3722_OVCURRENT_SD1_TRIP_MASK;
+ break;
+ case AS3722_REGULATOR_ID_SD6:
+ reg = AS3722_OVCURRENT_DEB_REG;
+ mask = AS3722_OVCURRENT_SD6_TRIP_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+ val = ret & mask;
+ val <<= ffs(mask) - 1;
+ return as3722_update_bits(as3722, reg, mask, val);
+}
+
+static const struct regulator_linear_range as3722_sd2345_ranges[] = {
+ REGULATOR_LINEAR_RANGE(612500, 0x01, 0x40, 12500),
+ REGULATOR_LINEAR_RANGE(1425000, 0x41, 0x70, 25000),
+ REGULATOR_LINEAR_RANGE(2650000, 0x71, 0x7F, 50000),
+};
+
+static struct regulator_ops as3722_sd016_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_current_limit = as3722_sd016_get_current_limit,
+ .set_current_limit = as3722_sd016_set_current_limit,
+ .get_mode = as3722_sd_get_mode,
+ .set_mode = as3722_sd_set_mode,
+};
+
+static struct regulator_ops as3722_sd016_extcntrl_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_current_limit = as3722_sd016_get_current_limit,
+ .set_current_limit = as3722_sd016_set_current_limit,
+ .get_mode = as3722_sd_get_mode,
+ .set_mode = as3722_sd_set_mode,
+};
+
+static struct regulator_ops as3722_sd2345_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .get_mode = as3722_sd_get_mode,
+ .set_mode = as3722_sd_set_mode,
+};
+
+static struct regulator_ops as3722_sd2345_extcntrl_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .get_mode = as3722_sd_get_mode,
+ .set_mode = as3722_sd_set_mode,
+};
+
+static int as3722_extreg_init(struct as3722_regulators *as3722_regs, int id,
+ int ext_pwr_ctrl)
+{
+ int ret;
+ unsigned int val;
+
+ if ((ext_pwr_ctrl < AS3722_EXT_CONTROL_ENABLE1) ||
+ (ext_pwr_ctrl > AS3722_EXT_CONTROL_ENABLE3))
+ return -EINVAL;
+
+ val = ext_pwr_ctrl << (ffs(as3722_reg_lookup[id].sleep_ctrl_mask) - 1);
+ ret = as3722_update_bits(as3722_regs->as3722,
+ as3722_reg_lookup[id].sleep_ctrl_reg,
+ as3722_reg_lookup[id].sleep_ctrl_mask, val);
+ if (ret < 0)
+ dev_err(as3722_regs->dev, "Reg 0x%02x update failed: %d\n",
+ as3722_reg_lookup[id].sleep_ctrl_reg, ret);
+ return ret;
+}
+
+static struct of_regulator_match as3722_regulator_matches[] = {
+ { .name = "sd0", },
+ { .name = "sd1", },
+ { .name = "sd2", },
+ { .name = "sd3", },
+ { .name = "sd4", },
+ { .name = "sd5", },
+ { .name = "sd6", },
+ { .name = "ldo0", },
+ { .name = "ldo1", },
+ { .name = "ldo2", },
+ { .name = "ldo3", },
+ { .name = "ldo4", },
+ { .name = "ldo5", },
+ { .name = "ldo6", },
+ { .name = "ldo7", },
+ { .name = "ldo9", },
+ { .name = "ldo10", },
+ { .name = "ldo11", },
+};
+
+static int as3722_get_regulator_dt_data(struct platform_device *pdev,
+ struct as3722_regulators *as3722_regs)
+{
+ struct device_node *np;
+ struct as3722_regulator_config_data *reg_config;
+ u32 prop;
+ int id;
+ int ret;
+
+ np = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
+ if (!np) {
+ dev_err(&pdev->dev, "Device is not having regulators node\n");
+ return -ENODEV;
+ }
+ pdev->dev.of_node = np;
+
+ ret = of_regulator_match(&pdev->dev, np, as3722_regulator_matches,
+ ARRAY_SIZE(as3722_regulator_matches));
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Parsing of regulator node failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ for (id = 0; id < ARRAY_SIZE(as3722_regulator_matches); ++id) {
+ struct device_node *reg_node;
+
+ reg_config = &as3722_regs->reg_config_data[id];
+ reg_config->reg_init = as3722_regulator_matches[id].init_data;
+ reg_node = as3722_regulator_matches[id].of_node;
+
+ if (!reg_config->reg_init || !reg_node)
+ continue;
+
+ ret = of_property_read_u32(reg_node, "ams,ext-control", &prop);
+ if (!ret) {
+ if (prop < 3)
+ reg_config->ext_control = prop;
+ else
+ dev_warn(&pdev->dev,
+ "ext-control have invalid option: %u\n",
+ prop);
+ }
+ reg_config->enable_tracking =
+ of_property_read_bool(reg_node, "ams,enable-tracking");
+ }
+ return 0;
+}
+
+static int as3722_regulator_probe(struct platform_device *pdev)
+{
+ struct as3722 *as3722 = dev_get_drvdata(pdev->dev.parent);
+ struct as3722_regulators *as3722_regs;
+ struct as3722_regulator_config_data *reg_config;
+ struct regulator_dev *rdev;
+ struct regulator_config config = { };
+ struct regulator_ops *ops;
+ int id;
+ int ret;
+
+ as3722_regs = devm_kzalloc(&pdev->dev, sizeof(*as3722_regs),
+ GFP_KERNEL);
+ if (!as3722_regs)
+ return -ENOMEM;
+
+ as3722_regs->dev = &pdev->dev;
+ as3722_regs->as3722 = as3722;
+ platform_set_drvdata(pdev, as3722_regs);
+
+ ret = as3722_get_regulator_dt_data(pdev, as3722_regs);
+ if (ret < 0)
+ return ret;
+
+ config.dev = &pdev->dev;
+ config.driver_data = as3722_regs;
+ config.regmap = as3722->regmap;
+
+ for (id = 0; id < AS3722_REGULATOR_ID_MAX; id++) {
+ reg_config = &as3722_regs->reg_config_data[id];
+
+ as3722_regs->desc[id].name = as3722_reg_lookup[id].name;
+ as3722_regs->desc[id].supply_name = as3722_reg_lookup[id].sname;
+ as3722_regs->desc[id].id = as3722_reg_lookup[id].regulator_id;
+ as3722_regs->desc[id].n_voltages =
+ as3722_reg_lookup[id].n_voltages;
+ as3722_regs->desc[id].type = REGULATOR_VOLTAGE;
+ as3722_regs->desc[id].owner = THIS_MODULE;
+ as3722_regs->desc[id].enable_reg =
+ as3722_reg_lookup[id].enable_reg;
+ as3722_regs->desc[id].enable_mask =
+ as3722_reg_lookup[id].enable_mask;
+ as3722_regs->desc[id].vsel_reg = as3722_reg_lookup[id].vsel_reg;
+ as3722_regs->desc[id].vsel_mask =
+ as3722_reg_lookup[id].vsel_mask;
+ switch (id) {
+ case AS3722_REGULATOR_ID_LDO0:
+ if (reg_config->ext_control)
+ ops = &as3722_ldo0_extcntrl_ops;
+ else
+ ops = &as3722_ldo0_ops;
+ as3722_regs->desc[id].min_uV = 825000;
+ as3722_regs->desc[id].uV_step = 25000;
+ as3722_regs->desc[id].linear_min_sel = 1;
+ as3722_regs->desc[id].enable_time = 500;
+ break;
+ case AS3722_REGULATOR_ID_LDO3:
+ if (reg_config->ext_control)
+ ops = &as3722_ldo3_extcntrl_ops;
+ else
+ ops = &as3722_ldo3_ops;
+ as3722_regs->desc[id].min_uV = 620000;
+ as3722_regs->desc[id].uV_step = 20000;
+ as3722_regs->desc[id].linear_min_sel = 1;
+ as3722_regs->desc[id].enable_time = 500;
+ if (reg_config->enable_tracking) {
+ ret = as3722_ldo3_set_tracking_mode(as3722_regs,
+ id, AS3722_LDO3_MODE_PMOS_TRACKING);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "LDO3 tracking failed: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ break;
+ case AS3722_REGULATOR_ID_SD0:
+ case AS3722_REGULATOR_ID_SD1:
+ case AS3722_REGULATOR_ID_SD6:
+ if (reg_config->ext_control)
+ ops = &as3722_sd016_extcntrl_ops;
+ else
+ ops = &as3722_sd016_ops;
+ as3722_regs->desc[id].min_uV = 610000;
+ as3722_regs->desc[id].uV_step = 10000;
+ as3722_regs->desc[id].linear_min_sel = 1;
+ break;
+ case AS3722_REGULATOR_ID_SD2:
+ case AS3722_REGULATOR_ID_SD3:
+ case AS3722_REGULATOR_ID_SD4:
+ case AS3722_REGULATOR_ID_SD5:
+ if (reg_config->ext_control)
+ ops = &as3722_sd2345_extcntrl_ops;
+ else
+ ops = &as3722_sd2345_ops;
+ as3722_regs->desc[id].linear_ranges =
+ as3722_sd2345_ranges;
+ as3722_regs->desc[id].n_linear_ranges =
+ ARRAY_SIZE(as3722_sd2345_ranges);
+ break;
+ default:
+ if (reg_config->ext_control)
+ ops = &as3722_ldo_extcntrl_ops;
+ else
+ ops = &as3722_ldo_ops;
+ as3722_regs->desc[id].min_uV = 825000;
+ as3722_regs->desc[id].uV_step = 25000;
+ as3722_regs->desc[id].linear_min_sel = 1;
+ as3722_regs->desc[id].enable_time = 500;
+ as3722_regs->desc[id].linear_ranges = as3722_ldo_ranges;
+ as3722_regs->desc[id].n_linear_ranges =
+ ARRAY_SIZE(as3722_ldo_ranges);
+ break;
+ }
+ as3722_regs->desc[id].ops = ops;
+ config.init_data = reg_config->reg_init;
+ config.of_node = as3722_regulator_matches[id].of_node;
+ rdev = devm_regulator_register(&pdev->dev,
+ &as3722_regs->desc[id], &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(&pdev->dev, "regulator %d register failed %d\n",
+ id, ret);
+ return ret;
+ }
+
+ as3722_regs->rdevs[id] = rdev;
+ if (reg_config->ext_control) {
+ ret = regulator_enable_regmap(rdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Regulator %d enable failed: %d\n",
+ id, ret);
+ return ret;
+ }
+ ret = as3722_extreg_init(as3722_regs, id,
+ reg_config->ext_control);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "AS3722 ext control failed: %d", ret);
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+static const struct of_device_id of_as3722_regulator_match[] = {
+ { .compatible = "ams,as3722-regulator", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_as3722_regulator_match);
+
+static struct platform_driver as3722_regulator_driver = {
+ .driver = {
+ .name = "as3722-regulator",
+ .owner = THIS_MODULE,
+ .of_match_table = of_as3722_regulator_match,
+ },
+ .probe = as3722_regulator_probe,
+};
+
+module_platform_driver(as3722_regulator_driver);
+
+MODULE_ALIAS("platform:as3722-regulator");
+MODULE_DESCRIPTION("AS3722 regulator driver");
+MODULE_AUTHOR("Florian Lobmaier <florian.lobmaier@ams.com>");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index a01b8b3b70ca..6382f0af353b 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -36,6 +36,7 @@
#include <trace/events/regulator.h>
#include "dummy.h"
+#include "internal.h"
#define rdev_crit(rdev, fmt, ...) \
pr_crit("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
@@ -52,8 +53,8 @@ static DEFINE_MUTEX(regulator_list_mutex);
static LIST_HEAD(regulator_list);
static LIST_HEAD(regulator_map_list);
static LIST_HEAD(regulator_ena_gpio_list);
+static LIST_HEAD(regulator_supply_alias_list);
static bool has_full_constraints;
-static bool board_wants_dummy_regulator;
static struct dentry *debugfs_root;
@@ -83,22 +84,16 @@ struct regulator_enable_gpio {
};
/*
- * struct regulator
+ * struct regulator_supply_alias
*
- * One for each consumer device.
+ * Used to map lookups for a supply onto an alternative device.
*/
-struct regulator {
- struct device *dev;
+struct regulator_supply_alias {
struct list_head list;
- unsigned int always_on:1;
- unsigned int bypass:1;
- int uA_load;
- int min_uV;
- int max_uV;
- char *supply_name;
- struct device_attribute dev_attr;
- struct regulator_dev *rdev;
- struct dentry *debugfs;
+ struct device *src_dev;
+ const char *src_supply;
+ struct device *alias_dev;
+ const char *alias_supply;
};
static int _regulator_is_enabled(struct regulator_dev *rdev);
@@ -923,6 +918,36 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
return 0;
}
+static int machine_constraints_current(struct regulator_dev *rdev,
+ struct regulation_constraints *constraints)
+{
+ struct regulator_ops *ops = rdev->desc->ops;
+ int ret;
+
+ if (!constraints->min_uA && !constraints->max_uA)
+ return 0;
+
+ if (constraints->min_uA > constraints->max_uA) {
+ rdev_err(rdev, "Invalid current constraints\n");
+ return -EINVAL;
+ }
+
+ if (!ops->set_current_limit || !ops->get_current_limit) {
+ rdev_warn(rdev, "Operation of current configuration missing\n");
+ return 0;
+ }
+
+ /* Set regulator current in constraints range */
+ ret = ops->set_current_limit(rdev, constraints->min_uA,
+ constraints->max_uA);
+ if (ret < 0) {
+ rdev_err(rdev, "Failed to set current constraint, %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* set_machine_constraints - sets regulator constraints
* @rdev: regulator source
@@ -953,6 +978,10 @@ static int set_machine_constraints(struct regulator_dev *rdev,
if (ret != 0)
goto out;
+ ret = machine_constraints_current(rdev, rdev->constraints);
+ if (ret != 0)
+ goto out;
+
/* do we need to setup our suspend state */
if (rdev->constraints->initial_state) {
ret = suspend_prepare(rdev, rdev->constraints->initial_state);
@@ -1186,11 +1215,39 @@ overflow_err:
static int _regulator_get_enable_time(struct regulator_dev *rdev)
{
+ if (rdev->constraints && rdev->constraints->enable_time)
+ return rdev->constraints->enable_time;
if (!rdev->desc->ops->enable_time)
return rdev->desc->enable_time;
return rdev->desc->ops->enable_time(rdev);
}
+static struct regulator_supply_alias *regulator_find_supply_alias(
+ struct device *dev, const char *supply)
+{
+ struct regulator_supply_alias *map;
+
+ list_for_each_entry(map, &regulator_supply_alias_list, list)
+ if (map->src_dev == dev && strcmp(map->src_supply, supply) == 0)
+ return map;
+
+ return NULL;
+}
+
+static void regulator_supply_alias(struct device **dev, const char **supply)
+{
+ struct regulator_supply_alias *map;
+
+ map = regulator_find_supply_alias(*dev, *supply);
+ if (map) {
+ dev_dbg(*dev, "Mapping supply %s to %s,%s\n",
+ *supply, map->alias_supply,
+ dev_name(map->alias_dev));
+ *dev = map->alias_dev;
+ *supply = map->alias_supply;
+ }
+}
+
static struct regulator_dev *regulator_dev_lookup(struct device *dev,
const char *supply,
int *ret)
@@ -1200,6 +1257,8 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
struct regulator_map *map;
const char *devname = NULL;
+ regulator_supply_alias(&dev, &supply);
+
/* first do a dt based lookup */
if (dev && dev->of_node) {
node = of_get_regulator(dev, supply);
@@ -1243,16 +1302,16 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
/* Internal regulator request function */
static struct regulator *_regulator_get(struct device *dev, const char *id,
- bool exclusive)
+ bool exclusive, bool allow_dummy)
{
struct regulator_dev *rdev;
struct regulator *regulator = ERR_PTR(-EPROBE_DEFER);
const char *devname = NULL;
- int ret = 0;
+ int ret = -EPROBE_DEFER;
if (id == NULL) {
pr_err("get() with no identifier\n");
- return regulator;
+ return ERR_PTR(-EINVAL);
}
if (dev)
@@ -1264,34 +1323,32 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
if (rdev)
goto found;
+ regulator = ERR_PTR(ret);
+
/*
* If we have return value from dev_lookup fail, we do not expect to
* succeed, so, quit with appropriate error value
*/
- if (ret) {
- regulator = ERR_PTR(ret);
+ if (ret && ret != -ENODEV) {
goto out;
}
- if (board_wants_dummy_regulator) {
- rdev = dummy_regulator_rdev;
- goto found;
- }
-
-#ifdef CONFIG_REGULATOR_DUMMY
if (!devname)
devname = "deviceless";
- /* If the board didn't flag that it was fully constrained then
- * substitute in a dummy regulator so consumers can continue.
+ /*
+ * Assume that a regulator is physically present and enabled
+ * even if it isn't hooked up and just provide a dummy.
*/
- if (!has_full_constraints) {
+ if (has_full_constraints && allow_dummy) {
pr_warn("%s supply %s not found, using dummy regulator\n",
devname, id);
+
rdev = dummy_regulator_rdev;
goto found;
+ } else {
+ dev_err(dev, "dummy supplies not allowed\n");
}
-#endif
mutex_unlock(&regulator_list_mutex);
return regulator;
@@ -1349,44 +1406,10 @@ out:
*/
struct regulator *regulator_get(struct device *dev, const char *id)
{
- return _regulator_get(dev, id, false);
+ return _regulator_get(dev, id, false, true);
}
EXPORT_SYMBOL_GPL(regulator_get);
-static void devm_regulator_release(struct device *dev, void *res)
-{
- regulator_put(*(struct regulator **)res);
-}
-
-/**
- * devm_regulator_get - Resource managed regulator_get()
- * @dev: device for regulator "consumer"
- * @id: Supply name or regulator ID.
- *
- * Managed regulator_get(). Regulators returned from this function are
- * automatically regulator_put() on driver detach. See regulator_get() for more
- * information.
- */
-struct regulator *devm_regulator_get(struct device *dev, const char *id)
-{
- struct regulator **ptr, *regulator;
-
- ptr = devres_alloc(devm_regulator_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
-
- regulator = regulator_get(dev, id);
- if (!IS_ERR(regulator)) {
- *ptr = regulator;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
-
- return regulator;
-}
-EXPORT_SYMBOL_GPL(devm_regulator_get);
-
/**
* regulator_get_exclusive - obtain exclusive access to a regulator.
* @dev: device for regulator "consumer"
@@ -1410,7 +1433,7 @@ EXPORT_SYMBOL_GPL(devm_regulator_get);
*/
struct regulator *regulator_get_exclusive(struct device *dev, const char *id)
{
- return _regulator_get(dev, id, true);
+ return _regulator_get(dev, id, true, false);
}
EXPORT_SYMBOL_GPL(regulator_get_exclusive);
@@ -1439,40 +1462,10 @@ EXPORT_SYMBOL_GPL(regulator_get_exclusive);
*/
struct regulator *regulator_get_optional(struct device *dev, const char *id)
{
- return _regulator_get(dev, id, 0);
+ return _regulator_get(dev, id, false, false);
}
EXPORT_SYMBOL_GPL(regulator_get_optional);
-/**
- * devm_regulator_get_optional - Resource managed regulator_get_optional()
- * @dev: device for regulator "consumer"
- * @id: Supply name or regulator ID.
- *
- * Managed regulator_get_optional(). Regulators returned from this
- * function are automatically regulator_put() on driver detach. See
- * regulator_get_optional() for more information.
- */
-struct regulator *devm_regulator_get_optional(struct device *dev,
- const char *id)
-{
- struct regulator **ptr, *regulator;
-
- ptr = devres_alloc(devm_regulator_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
-
- regulator = regulator_get_optional(dev, id);
- if (!IS_ERR(regulator)) {
- *ptr = regulator;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
-
- return regulator;
-}
-EXPORT_SYMBOL_GPL(devm_regulator_get_optional);
-
/* Locks held by regulator_put() */
static void _regulator_put(struct regulator *regulator)
{
@@ -1499,36 +1492,6 @@ static void _regulator_put(struct regulator *regulator)
}
/**
- * devm_regulator_get_exclusive - Resource managed regulator_get_exclusive()
- * @dev: device for regulator "consumer"
- * @id: Supply name or regulator ID.
- *
- * Managed regulator_get_exclusive(). Regulators returned from this function
- * are automatically regulator_put() on driver detach. See regulator_get() for
- * more information.
- */
-struct regulator *devm_regulator_get_exclusive(struct device *dev,
- const char *id)
-{
- struct regulator **ptr, *regulator;
-
- ptr = devres_alloc(devm_regulator_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
-
- regulator = _regulator_get(dev, id, 1);
- if (!IS_ERR(regulator)) {
- *ptr = regulator;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
-
- return regulator;
-}
-EXPORT_SYMBOL_GPL(devm_regulator_get_exclusive);
-
-/**
* regulator_put - "free" the regulator source
* @regulator: regulator source
*
@@ -1544,34 +1507,133 @@ void regulator_put(struct regulator *regulator)
}
EXPORT_SYMBOL_GPL(regulator_put);
-static int devm_regulator_match(struct device *dev, void *res, void *data)
+/**
+ * regulator_register_supply_alias - Provide device alias for supply lookup
+ *
+ * @dev: device that will be given as the regulator "consumer"
+ * @id: Supply name or regulator ID
+ * @alias_dev: device that should be used to lookup the supply
+ * @alias_id: Supply name or regulator ID that should be used to lookup the
+ * supply
+ *
+ * All lookups for id on dev will instead be conducted for alias_id on
+ * alias_dev.
+ */
+int regulator_register_supply_alias(struct device *dev, const char *id,
+ struct device *alias_dev,
+ const char *alias_id)
+{
+ struct regulator_supply_alias *map;
+
+ map = regulator_find_supply_alias(dev, id);
+ if (map)
+ return -EEXIST;
+
+ map = kzalloc(sizeof(struct regulator_supply_alias), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ map->src_dev = dev;
+ map->src_supply = id;
+ map->alias_dev = alias_dev;
+ map->alias_supply = alias_id;
+
+ list_add(&map->list, &regulator_supply_alias_list);
+
+ pr_info("Adding alias for supply %s,%s -> %s,%s\n",
+ id, dev_name(dev), alias_id, dev_name(alias_dev));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regulator_register_supply_alias);
+
+/**
+ * regulator_unregister_supply_alias - Remove device alias
+ *
+ * @dev: device that will be given as the regulator "consumer"
+ * @id: Supply name or regulator ID
+ *
+ * Remove a lookup alias if one exists for id on dev.
+ */
+void regulator_unregister_supply_alias(struct device *dev, const char *id)
{
- struct regulator **r = res;
- if (!r || !*r) {
- WARN_ON(!r || !*r);
- return 0;
+ struct regulator_supply_alias *map;
+
+ map = regulator_find_supply_alias(dev, id);
+ if (map) {
+ list_del(&map->list);
+ kfree(map);
}
- return *r == data;
}
+EXPORT_SYMBOL_GPL(regulator_unregister_supply_alias);
/**
- * devm_regulator_put - Resource managed regulator_put()
- * @regulator: regulator to free
+ * regulator_bulk_register_supply_alias - register multiple aliases
+ *
+ * @dev: device that will be given as the regulator "consumer"
+ * @id: List of supply names or regulator IDs
+ * @alias_dev: device that should be used to lookup the supply
+ * @alias_id: List of supply names or regulator IDs that should be used to
+ * lookup the supply
+ * @num_id: Number of aliases to register
+ *
+ * @return 0 on success, an errno on failure.
*
- * Deallocate a regulator allocated with devm_regulator_get(). Normally
- * this function will not need to be called and the resource management
- * code will ensure that the resource is freed.
+ * This helper function allows drivers to register several supply
+ * aliases in one operation. If any of the aliases cannot be
+ * registered any aliases that were registered will be removed
+ * before returning to the caller.
*/
-void devm_regulator_put(struct regulator *regulator)
+int regulator_bulk_register_supply_alias(struct device *dev, const char **id,
+ struct device *alias_dev,
+ const char **alias_id,
+ int num_id)
{
- int rc;
+ int i;
+ int ret;
+
+ for (i = 0; i < num_id; ++i) {
+ ret = regulator_register_supply_alias(dev, id[i], alias_dev,
+ alias_id[i]);
+ if (ret < 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ dev_err(dev,
+ "Failed to create supply alias %s,%s -> %s,%s\n",
+ id[i], dev_name(dev), alias_id[i], dev_name(alias_dev));
+
+ while (--i >= 0)
+ regulator_unregister_supply_alias(dev, id[i]);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_bulk_register_supply_alias);
+
+/**
+ * regulator_bulk_unregister_supply_alias - unregister multiple aliases
+ *
+ * @dev: device that will be given as the regulator "consumer"
+ * @id: List of supply names or regulator IDs
+ * @num_id: Number of aliases to unregister
+ *
+ * This helper function allows drivers to unregister several supply
+ * aliases in one operation.
+ */
+void regulator_bulk_unregister_supply_alias(struct device *dev,
+ const char **id,
+ int num_id)
+{
+ int i;
- rc = devres_release(regulator->dev, devm_regulator_release,
- devm_regulator_match, regulator);
- if (rc != 0)
- WARN_ON(rc);
+ for (i = 0; i < num_id; ++i)
+ regulator_unregister_supply_alias(dev, id[i]);
}
-EXPORT_SYMBOL_GPL(devm_regulator_put);
+EXPORT_SYMBOL_GPL(regulator_bulk_unregister_supply_alias);
+
/* Manage enable GPIO list. Same GPIO pin can be shared among regulators */
static int regulator_ena_gpio_request(struct regulator_dev *rdev,
@@ -1704,11 +1766,39 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
* together. */
trace_regulator_enable_delay(rdev_get_name(rdev));
- if (delay >= 1000) {
- mdelay(delay / 1000);
- udelay(delay % 1000);
- } else if (delay) {
- udelay(delay);
+ /*
+ * Delay for the requested amount of time as per the guidelines in:
+ *
+ * Documentation/timers/timers-howto.txt
+ *
+ * The assumption here is that regulators will never be enabled in
+ * atomic context and therefore sleeping functions can be used.
+ */
+ if (delay) {
+ unsigned int ms = delay / 1000;
+ unsigned int us = delay % 1000;
+
+ if (ms > 0) {
+ /*
+ * For small enough values, handle super-millisecond
+ * delays in the usleep_range() call below.
+ */
+ if (ms < 20)
+ us += ms * 1000;
+ else
+ msleep(ms);
+ }
+
+ /*
+ * Give the scheduler some room to coalesce with any other
+ * wakeup sources. For delays shorter than 10 us, don't even
+ * bother setting up high-resolution timers and just busy-
+ * loop.
+ */
+ if (us >= 10)
+ usleep_range(us, us + 100);
+ else
+ udelay(us);
}
trace_regulator_enable_complete(rdev_get_name(rdev));
@@ -2489,6 +2579,8 @@ static int _regulator_get_voltage(struct regulator_dev *rdev)
ret = rdev->desc->ops->get_voltage(rdev);
} else if (rdev->desc->ops->list_voltage) {
ret = rdev->desc->ops->list_voltage(rdev, 0);
+ } else if (rdev->desc->fixed_uV && (rdev->desc->n_voltages == 1)) {
+ ret = rdev->desc->fixed_uV;
} else {
return -EINVAL;
}
@@ -2912,52 +3004,6 @@ err:
}
EXPORT_SYMBOL_GPL(regulator_bulk_get);
-/**
- * devm_regulator_bulk_get - managed get multiple regulator consumers
- *
- * @dev: Device to supply
- * @num_consumers: Number of consumers to register
- * @consumers: Configuration of consumers; clients are stored here.
- *
- * @return 0 on success, an errno on failure.
- *
- * This helper function allows drivers to get several regulator
- * consumers in one operation with management, the regulators will
- * automatically be freed when the device is unbound. If any of the
- * regulators cannot be acquired then any regulators that were
- * allocated will be freed before returning to the caller.
- */
-int devm_regulator_bulk_get(struct device *dev, int num_consumers,
- struct regulator_bulk_data *consumers)
-{
- int i;
- int ret;
-
- for (i = 0; i < num_consumers; i++)
- consumers[i].consumer = NULL;
-
- for (i = 0; i < num_consumers; i++) {
- consumers[i].consumer = devm_regulator_get(dev,
- consumers[i].supply);
- if (IS_ERR(consumers[i].consumer)) {
- ret = PTR_ERR(consumers[i].consumer);
- dev_err(dev, "Failed to get supply '%s': %d\n",
- consumers[i].supply, ret);
- consumers[i].consumer = NULL;
- goto err;
- }
- }
-
- return 0;
-
-err:
- for (i = 0; i < num_consumers && consumers[i].consumer; i++)
- devm_regulator_put(consumers[i].consumer);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(devm_regulator_bulk_get);
-
static void regulator_bulk_enable_async(void *data, async_cookie_t cookie)
{
struct regulator_bulk_data *bulk = data;
@@ -3170,7 +3216,8 @@ static int add_regulator_attributes(struct regulator_dev *rdev)
/* some attributes need specific methods to be displayed */
if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) ||
(ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) ||
- (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0)) {
+ (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) ||
+ (rdev->desc->fixed_uV && (rdev->desc->n_voltages == 1))) {
status = device_create_file(dev, &dev_attr_microvolts);
if (status < 0)
return status;
@@ -3614,22 +3661,6 @@ void regulator_has_full_constraints(void)
EXPORT_SYMBOL_GPL(regulator_has_full_constraints);
/**
- * regulator_use_dummy_regulator - Provide a dummy regulator when none is found
- *
- * Calling this function will cause the regulator API to provide a
- * dummy regulator to consumers if no physical regulator is found,
- * allowing most consumers to proceed as though a regulator were
- * configured. This allows systems such as those with software
- * controllable regulators for the CPU core only to be brought up more
- * readily.
- */
-void regulator_use_dummy_regulator(void)
-{
- board_wants_dummy_regulator = true;
-}
-EXPORT_SYMBOL_GPL(regulator_use_dummy_regulator);
-
-/**
* rdev_get_drvdata - get rdev regulator driver data
* @rdev: regulator
*
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index f06854cf8cf5..b431ae357fcd 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -253,10 +253,8 @@ static int da9034_set_dvc_voltage_sel(struct regulator_dev *rdev,
}
static const struct regulator_linear_range da9034_ldo12_ranges[] = {
- { .min_uV = 1700000, .max_uV = 2050000, .min_sel = 0, .max_sel = 7,
- .uV_step = 50000 },
- { .min_uV = 2700000, .max_uV = 3050000, .min_sel = 8, .max_sel = 15,
- .uV_step = 50000 },
+ REGULATOR_LINEAR_RANGE(1700000, 0, 7, 50000),
+ REGULATOR_LINEAR_RANGE(2700000, 8, 15, 50000),
};
static struct regulator_ops da903x_regulator_ldo_ops = {
@@ -463,7 +461,7 @@ static int da903x_regulator_probe(struct platform_device *pdev)
config.init_data = dev_get_platdata(&pdev->dev);
config.driver_data = ri;
- rdev = regulator_register(&ri->desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
@@ -474,21 +472,12 @@ static int da903x_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int da903x_regulator_remove(struct platform_device *pdev)
-{
- struct regulator_dev *rdev = platform_get_drvdata(pdev);
-
- regulator_unregister(rdev);
- return 0;
-}
-
static struct platform_driver da903x_regulator_driver = {
.driver = {
.name = "da903x-regulator",
.owner = THIS_MODULE,
},
.probe = da903x_regulator_probe,
- .remove = da903x_regulator_remove,
};
static int __init da903x_regulator_init(void)
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index 1e4d483f6163..3adeaeffc485 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -70,6 +70,7 @@ struct da9052_regulator_info {
int step_uV;
int min_uV;
int max_uV;
+ unsigned char activate_bit;
};
struct da9052_regulator {
@@ -209,6 +210,36 @@ static int da9052_map_voltage(struct regulator_dev *rdev,
return sel;
}
+static int da9052_regulator_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int id = rdev_get_id(rdev);
+ int ret;
+
+ ret = da9052_reg_update(regulator->da9052, rdev->desc->vsel_reg,
+ rdev->desc->vsel_mask, selector);
+ if (ret < 0)
+ return ret;
+
+ /* Some LDOs and DCDCs are DVC controlled which requires enabling of
+ * the activate bit to implment the changes on the output.
+ */
+ switch (id) {
+ case DA9052_ID_BUCK1:
+ case DA9052_ID_BUCK2:
+ case DA9052_ID_BUCK3:
+ case DA9052_ID_LDO2:
+ case DA9052_ID_LDO3:
+ ret = da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG,
+ info->activate_bit, info->activate_bit);
+ break;
+ }
+
+ return ret;
+}
+
static struct regulator_ops da9052_dcdc_ops = {
.get_current_limit = da9052_dcdc_get_current_limit,
.set_current_limit = da9052_dcdc_set_current_limit,
@@ -216,7 +247,7 @@ static struct regulator_ops da9052_dcdc_ops = {
.list_voltage = da9052_list_voltage,
.map_voltage = da9052_map_voltage,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_sel = da9052_regulator_set_voltage_sel,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -226,7 +257,7 @@ static struct regulator_ops da9052_ldo_ops = {
.list_voltage = da9052_list_voltage,
.map_voltage = da9052_map_voltage,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_sel = da9052_regulator_set_voltage_sel,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -243,14 +274,13 @@ static struct regulator_ops da9052_ldo_ops = {
.owner = THIS_MODULE,\
.vsel_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
.vsel_mask = (1 << (sbits)) - 1,\
- .apply_reg = DA9052_SUPPLY_REG, \
- .apply_bit = (abits), \
.enable_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
.enable_mask = 1 << (ebits),\
},\
.min_uV = (min) * 1000,\
.max_uV = (max) * 1000,\
.step_uV = (step) * 1000,\
+ .activate_bit = (abits),\
}
#define DA9052_DCDC(_id, step, min, max, sbits, ebits, abits) \
@@ -264,14 +294,13 @@ static struct regulator_ops da9052_ldo_ops = {
.owner = THIS_MODULE,\
.vsel_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
.vsel_mask = (1 << (sbits)) - 1,\
- .apply_reg = DA9052_SUPPLY_REG, \
- .apply_bit = (abits), \
.enable_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
.enable_mask = 1 << (ebits),\
},\
.min_uV = (min) * 1000,\
.max_uV = (max) * 1000,\
.step_uV = (step) * 1000,\
+ .activate_bit = (abits),\
}
static struct da9052_regulator_info da9052_regulator_info[] = {
@@ -389,8 +418,9 @@ static int da9052_regulator_probe(struct platform_device *pdev)
#endif
}
- regulator->rdev = regulator_register(&regulator->info->reg_desc,
- &config);
+ regulator->rdev = devm_regulator_register(&pdev->dev,
+ &regulator->info->reg_desc,
+ &config);
if (IS_ERR(regulator->rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
regulator->info->reg_desc.name);
@@ -402,17 +432,8 @@ static int da9052_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int da9052_regulator_remove(struct platform_device *pdev)
-{
- struct da9052_regulator *regulator = platform_get_drvdata(pdev);
-
- regulator_unregister(regulator->rdev);
- return 0;
-}
-
static struct platform_driver da9052_regulator_driver = {
.probe = da9052_regulator_probe,
- .remove = da9052_regulator_remove,
.driver = {
.name = "da9052-regulator",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index 77b53e5a231c..7f340206d329 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -564,13 +564,13 @@ static int da9055_regulator_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- regulator->rdev = regulator_register(&regulator->info->reg_desc,
- &config);
+ regulator->rdev = devm_regulator_register(&pdev->dev,
+ &regulator->info->reg_desc,
+ &config);
if (IS_ERR(regulator->rdev)) {
dev_err(&pdev->dev, "Failed to register regulator %s\n",
regulator->info->reg_desc.name);
- ret = PTR_ERR(regulator->rdev);
- return ret;
+ return PTR_ERR(regulator->rdev);
}
/* Only LDO 5 and 6 has got the over current interrupt */
@@ -588,7 +588,7 @@ static int da9055_regulator_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Failed to request Regulator IRQ %d: %d\n",
irq, ret);
- goto err_regulator;
+ return ret;
}
}
}
@@ -596,24 +596,10 @@ static int da9055_regulator_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, regulator);
return 0;
-
-err_regulator:
- regulator_unregister(regulator->rdev);
- return ret;
-}
-
-static int da9055_regulator_remove(struct platform_device *pdev)
-{
- struct da9055_regulator *regulator = platform_get_drvdata(pdev);
-
- regulator_unregister(regulator->rdev);
-
- return 0;
}
static struct platform_driver da9055_regulator_driver = {
.probe = da9055_regulator_probe,
- .remove = da9055_regulator_remove,
.driver = {
.name = "da9055-regulator",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index b9f2653e4ef9..56727eb745df 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -717,7 +717,7 @@ static int da9063_regulator_probe(struct platform_device *pdev)
{
struct da9063 *da9063 = dev_get_drvdata(pdev->dev.parent);
struct da9063_pdata *da9063_pdata = dev_get_platdata(da9063->dev);
- struct of_regulator_match *da9063_reg_matches;
+ struct of_regulator_match *da9063_reg_matches = NULL;
struct da9063_regulators_pdata *regl_pdata;
const struct da9063_dev_model *model;
struct da9063_regulators *regulators;
@@ -847,13 +847,13 @@ static int da9063_regulator_probe(struct platform_device *pdev)
if (da9063_reg_matches)
config.of_node = da9063_reg_matches[id].of_node;
config.regmap = da9063->regmap;
- regl->rdev = regulator_register(&regl->desc, &config);
+ regl->rdev = devm_regulator_register(&pdev->dev, &regl->desc,
+ &config);
if (IS_ERR(regl->rdev)) {
dev_err(&pdev->dev,
"Failed to register %s regulator\n",
regl->desc.name);
- ret = PTR_ERR(regl->rdev);
- goto err;
+ return PTR_ERR(regl->rdev);
}
id++;
n++;
@@ -862,9 +862,8 @@ static int da9063_regulator_probe(struct platform_device *pdev)
/* LDOs overcurrent event support */
irq = platform_get_irq_byname(pdev, "LDO_LIM");
if (irq < 0) {
- ret = irq;
dev_err(&pdev->dev, "Failed to get IRQ.\n");
- goto err;
+ return irq;
}
regulators->irq_ldo_lim = regmap_irq_get_virq(da9063->regmap_irq, irq);
@@ -881,27 +880,15 @@ static int da9063_regulator_probe(struct platform_device *pdev)
}
return 0;
-
-err:
- /* Wind back regulators registeration */
- while (--n >= 0)
- regulator_unregister(regulators->regulator[n].rdev);
-
- return ret;
}
static int da9063_regulator_remove(struct platform_device *pdev)
{
struct da9063_regulators *regulators = platform_get_drvdata(pdev);
- struct da9063_regulator *regl;
free_irq(regulators->irq_ldo_lim, regulators);
free_irq(regulators->irq_uvov, regulators);
- for (regl = &regulators->regulator[regulators->n_regulators - 1];
- regl >= &regulators->regulator[0]; regl--)
- regulator_unregister(regl->rdev);
-
return 0;
}
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index f0fe54b38977..6f5ecbe1132e 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/regmap.h>
#include "da9210-regulator.h"
@@ -126,7 +127,8 @@ static int da9210_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct da9210 *chip;
- struct da9210_pdata *pdata = i2c->dev.platform_data;
+ struct device *dev = &i2c->dev;
+ struct da9210_pdata *pdata = dev_get_platdata(dev);
struct regulator_dev *rdev = NULL;
struct regulator_config config = { };
int error;
@@ -147,12 +149,13 @@ static int da9210_i2c_probe(struct i2c_client *i2c,
}
config.dev = &i2c->dev;
- if (pdata)
- config.init_data = &pdata->da9210_constraints;
+ config.init_data = pdata ? &pdata->da9210_constraints :
+ of_get_regulator_init_data(dev, dev->of_node);
config.driver_data = chip;
config.regmap = chip->regmap;
+ config.of_node = dev->of_node;
- rdev = regulator_register(&da9210_reg, &config);
+ rdev = devm_regulator_register(&i2c->dev, &da9210_reg, &config);
if (IS_ERR(rdev)) {
dev_err(&i2c->dev, "Failed to register DA9210 regulator\n");
return PTR_ERR(rdev);
@@ -165,13 +168,6 @@ static int da9210_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int da9210_i2c_remove(struct i2c_client *i2c)
-{
- struct da9210 *chip = i2c_get_clientdata(i2c);
- regulator_unregister(chip->rdev);
- return 0;
-}
-
static const struct i2c_device_id da9210_i2c_id[] = {
{"da9210", 0},
{},
@@ -185,7 +181,6 @@ static struct i2c_driver da9210_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = da9210_i2c_probe,
- .remove = da9210_i2c_remove,
.id_table = da9210_i2c_id,
};
diff --git a/drivers/regulator/devres.c b/drivers/regulator/devres.c
new file mode 100644
index 000000000000..f44818b838dc
--- /dev/null
+++ b/drivers/regulator/devres.c
@@ -0,0 +1,415 @@
+/*
+ * devres.c -- Voltage/Current Regulator framework devres implementation.
+ *
+ * Copyright 2013 Linaro Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/module.h>
+
+#include "internal.h"
+
+enum {
+ NORMAL_GET,
+ EXCLUSIVE_GET,
+ OPTIONAL_GET,
+};
+
+static void devm_regulator_release(struct device *dev, void *res)
+{
+ regulator_put(*(struct regulator **)res);
+}
+
+static struct regulator *_devm_regulator_get(struct device *dev, const char *id,
+ int get_type)
+{
+ struct regulator **ptr, *regulator;
+
+ ptr = devres_alloc(devm_regulator_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ switch (get_type) {
+ case NORMAL_GET:
+ regulator = regulator_get(dev, id);
+ break;
+ case EXCLUSIVE_GET:
+ regulator = regulator_get_exclusive(dev, id);
+ break;
+ case OPTIONAL_GET:
+ regulator = regulator_get_optional(dev, id);
+ break;
+ default:
+ regulator = ERR_PTR(-EINVAL);
+ }
+
+ if (!IS_ERR(regulator)) {
+ *ptr = regulator;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return regulator;
+}
+
+/**
+ * devm_regulator_get - Resource managed regulator_get()
+ * @dev: device for regulator "consumer"
+ * @id: Supply name or regulator ID.
+ *
+ * Managed regulator_get(). Regulators returned from this function are
+ * automatically regulator_put() on driver detach. See regulator_get() for more
+ * information.
+ */
+struct regulator *devm_regulator_get(struct device *dev, const char *id)
+{
+ return _devm_regulator_get(dev, id, NORMAL_GET);
+}
+EXPORT_SYMBOL_GPL(devm_regulator_get);
+
+/**
+ * devm_regulator_get_exclusive - Resource managed regulator_get_exclusive()
+ * @dev: device for regulator "consumer"
+ * @id: Supply name or regulator ID.
+ *
+ * Managed regulator_get_exclusive(). Regulators returned from this function
+ * are automatically regulator_put() on driver detach. See regulator_get() for
+ * more information.
+ */
+struct regulator *devm_regulator_get_exclusive(struct device *dev,
+ const char *id)
+{
+ return _devm_regulator_get(dev, id, EXCLUSIVE_GET);
+}
+EXPORT_SYMBOL_GPL(devm_regulator_get_exclusive);
+
+/**
+ * devm_regulator_get_optional - Resource managed regulator_get_optional()
+ * @dev: device for regulator "consumer"
+ * @id: Supply name or regulator ID.
+ *
+ * Managed regulator_get_optional(). Regulators returned from this
+ * function are automatically regulator_put() on driver detach. See
+ * regulator_get_optional() for more information.
+ */
+struct regulator *devm_regulator_get_optional(struct device *dev,
+ const char *id)
+{
+ return _devm_regulator_get(dev, id, OPTIONAL_GET);
+}
+EXPORT_SYMBOL_GPL(devm_regulator_get_optional);
+
+static int devm_regulator_match(struct device *dev, void *res, void *data)
+{
+ struct regulator **r = res;
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+ return *r == data;
+}
+
+/**
+ * devm_regulator_put - Resource managed regulator_put()
+ * @regulator: regulator to free
+ *
+ * Deallocate a regulator allocated with devm_regulator_get(). Normally
+ * this function will not need to be called and the resource management
+ * code will ensure that the resource is freed.
+ */
+void devm_regulator_put(struct regulator *regulator)
+{
+ int rc;
+
+ rc = devres_release(regulator->dev, devm_regulator_release,
+ devm_regulator_match, regulator);
+ if (rc != 0)
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_regulator_put);
+
+/**
+ * devm_regulator_bulk_get - managed get multiple regulator consumers
+ *
+ * @dev: Device to supply
+ * @num_consumers: Number of consumers to register
+ * @consumers: Configuration of consumers; clients are stored here.
+ *
+ * @return 0 on success, an errno on failure.
+ *
+ * This helper function allows drivers to get several regulator
+ * consumers in one operation with management, the regulators will
+ * automatically be freed when the device is unbound. If any of the
+ * regulators cannot be acquired then any regulators that were
+ * allocated will be freed before returning to the caller.
+ */
+int devm_regulator_bulk_get(struct device *dev, int num_consumers,
+ struct regulator_bulk_data *consumers)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < num_consumers; i++)
+ consumers[i].consumer = NULL;
+
+ for (i = 0; i < num_consumers; i++) {
+ consumers[i].consumer = devm_regulator_get(dev,
+ consumers[i].supply);
+ if (IS_ERR(consumers[i].consumer)) {
+ ret = PTR_ERR(consumers[i].consumer);
+ dev_err(dev, "Failed to get supply '%s': %d\n",
+ consumers[i].supply, ret);
+ consumers[i].consumer = NULL;
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ for (i = 0; i < num_consumers && consumers[i].consumer; i++)
+ devm_regulator_put(consumers[i].consumer);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_regulator_bulk_get);
+
+static void devm_rdev_release(struct device *dev, void *res)
+{
+ regulator_unregister(*(struct regulator_dev **)res);
+}
+
+/**
+ * devm_regulator_register - Resource managed regulator_register()
+ * @regulator_desc: regulator to register
+ * @config: runtime configuration for regulator
+ *
+ * Called by regulator drivers to register a regulator. Returns a
+ * valid pointer to struct regulator_dev on success or an ERR_PTR() on
+ * error. The regulator will automatically be released when the device
+ * is unbound.
+ */
+struct regulator_dev *devm_regulator_register(struct device *dev,
+ const struct regulator_desc *regulator_desc,
+ const struct regulator_config *config)
+{
+ struct regulator_dev **ptr, *rdev;
+
+ ptr = devres_alloc(devm_rdev_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ rdev = regulator_register(regulator_desc, config);
+ if (!IS_ERR(rdev)) {
+ *ptr = rdev;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return rdev;
+}
+EXPORT_SYMBOL_GPL(devm_regulator_register);
+
+static int devm_rdev_match(struct device *dev, void *res, void *data)
+{
+ struct regulator_dev **r = res;
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+ return *r == data;
+}
+
+/**
+ * devm_regulator_unregister - Resource managed regulator_unregister()
+ * @regulator: regulator to free
+ *
+ * Unregister a regulator registered with devm_regulator_register().
+ * Normally this function will not need to be called and the resource
+ * management code will ensure that the resource is freed.
+ */
+void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev)
+{
+ int rc;
+
+ rc = devres_release(dev, devm_rdev_release, devm_rdev_match, rdev);
+ if (rc != 0)
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_regulator_unregister);
+
+struct regulator_supply_alias_match {
+ struct device *dev;
+ const char *id;
+};
+
+static int devm_regulator_match_supply_alias(struct device *dev, void *res,
+ void *data)
+{
+ struct regulator_supply_alias_match *match = res;
+ struct regulator_supply_alias_match *target = data;
+
+ return match->dev == target->dev && strcmp(match->id, target->id) == 0;
+}
+
+static void devm_regulator_destroy_supply_alias(struct device *dev, void *res)
+{
+ struct regulator_supply_alias_match *match = res;
+
+ regulator_unregister_supply_alias(match->dev, match->id);
+}
+
+/**
+ * devm_regulator_register_supply_alias - Resource managed
+ * regulator_register_supply_alias()
+ *
+ * @dev: device that will be given as the regulator "consumer"
+ * @id: Supply name or regulator ID
+ * @alias_dev: device that should be used to lookup the supply
+ * @alias_id: Supply name or regulator ID that should be used to lookup the
+ * supply
+ *
+ * The supply alias will automatically be unregistered when the source
+ * device is unbound.
+ */
+int devm_regulator_register_supply_alias(struct device *dev, const char *id,
+ struct device *alias_dev,
+ const char *alias_id)
+{
+ struct regulator_supply_alias_match *match;
+ int ret;
+
+ match = devres_alloc(devm_regulator_destroy_supply_alias,
+ sizeof(struct regulator_supply_alias_match),
+ GFP_KERNEL);
+ if (!match)
+ return -ENOMEM;
+
+ match->dev = dev;
+ match->id = id;
+
+ ret = regulator_register_supply_alias(dev, id, alias_dev, alias_id);
+ if (ret < 0) {
+ devres_free(match);
+ return ret;
+ }
+
+ devres_add(dev, match);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_regulator_register_supply_alias);
+
+/**
+ * devm_regulator_unregister_supply_alias - Resource managed
+ * regulator_unregister_supply_alias()
+ *
+ * @dev: device that will be given as the regulator "consumer"
+ * @id: Supply name or regulator ID
+ *
+ * Unregister an alias registered with
+ * devm_regulator_register_supply_alias(). Normally this function
+ * will not need to be called and the resource management code
+ * will ensure that the resource is freed.
+ */
+void devm_regulator_unregister_supply_alias(struct device *dev, const char *id)
+{
+ struct regulator_supply_alias_match match;
+ int rc;
+
+ match.dev = dev;
+ match.id = id;
+
+ rc = devres_release(dev, devm_regulator_destroy_supply_alias,
+ devm_regulator_match_supply_alias, &match);
+ if (rc != 0)
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_regulator_unregister_supply_alias);
+
+/**
+ * devm_regulator_bulk_register_supply_alias - Managed register
+ * multiple aliases
+ *
+ * @dev: device that will be given as the regulator "consumer"
+ * @id: List of supply names or regulator IDs
+ * @alias_dev: device that should be used to lookup the supply
+ * @alias_id: List of supply names or regulator IDs that should be used to
+ * lookup the supply
+ * @num_id: Number of aliases to register
+ *
+ * @return 0 on success, an errno on failure.
+ *
+ * This helper function allows drivers to register several supply
+ * aliases in one operation, the aliases will be automatically
+ * unregisters when the source device is unbound. If any of the
+ * aliases cannot be registered any aliases that were registered
+ * will be removed before returning to the caller.
+ */
+int devm_regulator_bulk_register_supply_alias(struct device *dev,
+ const char **id,
+ struct device *alias_dev,
+ const char **alias_id,
+ int num_id)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < num_id; ++i) {
+ ret = devm_regulator_register_supply_alias(dev, id[i],
+ alias_dev,
+ alias_id[i]);
+ if (ret < 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ dev_err(dev,
+ "Failed to create supply alias %s,%s -> %s,%s\n",
+ id[i], dev_name(dev), alias_id[i], dev_name(alias_dev));
+
+ while (--i >= 0)
+ devm_regulator_unregister_supply_alias(dev, id[i]);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_regulator_bulk_register_supply_alias);
+
+/**
+ * devm_regulator_bulk_unregister_supply_alias - Managed unregister
+ * multiple aliases
+ *
+ * @dev: device that will be given as the regulator "consumer"
+ * @id: List of supply names or regulator IDs
+ * @num_id: Number of aliases to unregister
+ *
+ * Unregister aliases registered with
+ * devm_regulator_bulk_register_supply_alias(). Normally this function
+ * will not need to be called and the resource management code
+ * will ensure that the resource is freed.
+ */
+void devm_regulator_bulk_unregister_supply_alias(struct device *dev,
+ const char **id,
+ int num_id)
+{
+ int i;
+
+ for (i = 0; i < num_id; ++i)
+ devm_regulator_unregister_supply_alias(dev, id[i]);
+}
+EXPORT_SYMBOL_GPL(devm_regulator_bulk_unregister_supply_alias);
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 70b7220c587f..7ca3d9e3b0fe 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -218,9 +218,8 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
rdesc->vsel_mask = VSEL_NSEL_MASK;
rdesc->owner = THIS_MODULE;
- di->rdev = regulator_register(&di->desc, config);
+ di->rdev = devm_regulator_register(di->dev, &di->desc, config);
return PTR_ERR_OR_ZERO(di->rdev);
-
}
static struct regmap_config fan53555_regmap_config = {
@@ -291,14 +290,6 @@ static int fan53555_regulator_probe(struct i2c_client *client,
}
-static int fan53555_regulator_remove(struct i2c_client *client)
-{
- struct fan53555_device_info *di = i2c_get_clientdata(client);
-
- regulator_unregister(di->rdev);
- return 0;
-}
-
static const struct i2c_device_id fan53555_id[] = {
{"fan53555", -1},
{ },
@@ -309,7 +300,6 @@ static struct i2c_driver fan53555_regulator_driver = {
.name = "fan53555-regulator",
},
.probe = fan53555_regulator_probe,
- .remove = fan53555_regulator_remove,
.id_table = fan53555_id,
};
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 7610920014d7..5ea64b94341c 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -34,7 +34,6 @@
struct fixed_voltage_data {
struct regulator_desc desc;
struct regulator_dev *dev;
- int microvolts;
};
@@ -108,30 +107,7 @@ of_get_fixed_voltage_config(struct device *dev)
return config;
}
-static int fixed_voltage_get_voltage(struct regulator_dev *dev)
-{
- struct fixed_voltage_data *data = rdev_get_drvdata(dev);
-
- if (data->microvolts)
- return data->microvolts;
- else
- return -EINVAL;
-}
-
-static int fixed_voltage_list_voltage(struct regulator_dev *dev,
- unsigned selector)
-{
- struct fixed_voltage_data *data = rdev_get_drvdata(dev);
-
- if (selector != 0)
- return -EINVAL;
-
- return data->microvolts;
-}
-
static struct regulator_ops fixed_voltage_ops = {
- .get_voltage = fixed_voltage_get_voltage,
- .list_voltage = fixed_voltage_list_voltage,
};
static int reg_fixed_voltage_probe(struct platform_device *pdev)
@@ -186,23 +162,21 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
if (config->microvolts)
drvdata->desc.n_voltages = 1;
- drvdata->microvolts = config->microvolts;
+ drvdata->desc.fixed_uV = config->microvolts;
if (config->gpio >= 0)
cfg.ena_gpio = config->gpio;
cfg.ena_gpio_invert = !config->enable_high;
if (config->enabled_at_boot) {
- if (config->enable_high) {
+ if (config->enable_high)
cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
- } else {
+ else
cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW;
- }
} else {
- if (config->enable_high) {
+ if (config->enable_high)
cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW;
- } else {
+ else
cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
- }
}
if (config->gpio_is_open_drain)
cfg.ena_gpio_flags |= GPIOF_OPEN_DRAIN;
@@ -222,7 +196,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, drvdata);
dev_dbg(&pdev->dev, "%s supplying %duV\n", drvdata->desc.name,
- drvdata->microvolts);
+ drvdata->desc.fixed_uV);
return 0;
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 98a98ffa7fe0..04406a918c04 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -283,7 +283,6 @@ static int gpio_regulator_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "No regulator type set\n");
ret = -EINVAL;
goto err_memgpio;
- break;
}
drvdata->nr_gpios = config->nr_gpios;
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index 6e30df14714b..e221a271ba56 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -284,9 +284,13 @@ int regulator_map_voltage_linear_range(struct regulator_dev *rdev,
}
for (i = 0; i < rdev->desc->n_linear_ranges; i++) {
+ int linear_max_uV;
+
range = &rdev->desc->linear_ranges[i];
+ linear_max_uV = range->min_uV +
+ (range->max_sel - range->min_sel) * range->uV_step;
- if (!(min_uV <= range->max_uV && max_uV >= range->min_uV))
+ if (!(min_uV <= linear_max_uV && max_uV >= range->min_uV))
continue;
if (min_uV <= range->min_uV)
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
new file mode 100644
index 000000000000..84bbda10c396
--- /dev/null
+++ b/drivers/regulator/internal.h
@@ -0,0 +1,38 @@
+/*
+ * internal.h -- Voltage/Current Regulator framework internal code
+ *
+ * Copyright 2007, 2008 Wolfson Microelectronics PLC.
+ * Copyright 2008 SlimLogic Ltd.
+ *
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __REGULATOR_INTERNAL_H
+#define __REGULATOR_INTERNAL_H
+
+/*
+ * struct regulator
+ *
+ * One for each consumer device.
+ */
+struct regulator {
+ struct device *dev;
+ struct list_head list;
+ unsigned int always_on:1;
+ unsigned int bypass:1;
+ int uA_load;
+ int min_uV;
+ int max_uV;
+ char *supply_name;
+ struct device_attribute dev_attr;
+ struct regulator_dev *rdev;
+ struct dentry *debugfs;
+};
+
+#endif
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index 88c1a3acf563..6e5da95fa025 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -112,7 +112,7 @@ static int isl6271a_probe(struct i2c_client *i2c,
struct regulator_config config = { };
struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev);
struct isl_pmic *pmic;
- int err, i;
+ int i;
if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
@@ -133,32 +133,17 @@ static int isl6271a_probe(struct i2c_client *i2c,
config.init_data = NULL;
config.driver_data = pmic;
- pmic->rdev[i] = regulator_register(&isl_rd[i], &config);
+ pmic->rdev[i] = devm_regulator_register(&i2c->dev, &isl_rd[i],
+ &config);
if (IS_ERR(pmic->rdev[i])) {
dev_err(&i2c->dev, "failed to register %s\n", id->name);
- err = PTR_ERR(pmic->rdev[i]);
- goto error;
+ return PTR_ERR(pmic->rdev[i]);
}
}
i2c_set_clientdata(i2c, pmic);
return 0;
-
-error:
- while (--i >= 0)
- regulator_unregister(pmic->rdev[i]);
- return err;
-}
-
-static int isl6271a_remove(struct i2c_client *i2c)
-{
- struct isl_pmic *pmic = i2c_get_clientdata(i2c);
- int i;
-
- for (i = 0; i < 3; i++)
- regulator_unregister(pmic->rdev[i]);
- return 0;
}
static const struct i2c_device_id isl6271a_id[] = {
@@ -174,7 +159,6 @@ static struct i2c_driver isl6271a_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = isl6271a_probe,
- .remove = isl6271a_remove,
.id_table = isl6271a_id,
};
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 5a4604ee5ea5..947c05ffe0ab 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -474,8 +474,8 @@ static int lp3971_i2c_remove(struct i2c_client *i2c)
}
static const struct i2c_device_id lp3971_i2c_id[] = {
- { "lp3971", 0 },
- { }
+ { "lp3971", 0 },
+ { }
};
MODULE_DEVICE_TABLE(i2c, lp3971_i2c_id);
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index 2b84b727a3c4..2e4734ff79fc 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -785,7 +785,7 @@ static int lp872x_regulator_register(struct lp872x *lp)
struct regulator_desc *desc;
struct regulator_config cfg = { };
struct regulator_dev *rdev;
- int i, ret;
+ int i;
for (i = 0; i < lp->num_regulators; i++) {
desc = (lp->chipid == LP8720) ? &lp8720_regulator_desc[i] :
@@ -796,34 +796,16 @@ static int lp872x_regulator_register(struct lp872x *lp)
cfg.driver_data = lp;
cfg.regmap = lp->regmap;
- rdev = regulator_register(desc, &cfg);
+ rdev = devm_regulator_register(lp->dev, desc, &cfg);
if (IS_ERR(rdev)) {
dev_err(lp->dev, "regulator register err");
- ret = PTR_ERR(rdev);
- goto err;
+ return PTR_ERR(rdev);
}
*(lp->regulators + i) = rdev;
}
return 0;
-err:
- while (--i >= 0) {
- rdev = *(lp->regulators + i);
- regulator_unregister(rdev);
- }
- return ret;
-}
-
-static void lp872x_regulator_unregister(struct lp872x *lp)
-{
- struct regulator_dev *rdev;
- int i;
-
- for (i = 0; i < lp->num_regulators; i++) {
- rdev = *(lp->regulators + i);
- regulator_unregister(rdev);
- }
}
static const struct regmap_config lp872x_regmap_config = {
@@ -979,14 +961,6 @@ err_dev:
return ret;
}
-static int lp872x_remove(struct i2c_client *cl)
-{
- struct lp872x *lp = i2c_get_clientdata(cl);
-
- lp872x_regulator_unregister(lp);
- return 0;
-}
-
static const struct of_device_id lp872x_dt_ids[] = {
{ .compatible = "ti,lp8720", },
{ .compatible = "ti,lp8725", },
@@ -1008,7 +982,6 @@ static struct i2c_driver lp872x_driver = {
.of_match_table = of_match_ptr(lp872x_dt_ids),
},
.probe = lp872x_probe,
- .remove = lp872x_remove,
.id_table = lp872x_ids,
};
diff --git a/drivers/regulator/lp8788-buck.c b/drivers/regulator/lp8788-buck.c
index 0b015f2a7fd9..948afc249e29 100644
--- a/drivers/regulator/lp8788-buck.c
+++ b/drivers/regulator/lp8788-buck.c
@@ -515,7 +515,7 @@ static int lp8788_buck_probe(struct platform_device *pdev)
cfg.driver_data = buck;
cfg.regmap = lp->regmap;
- rdev = regulator_register(&lp8788_buck_desc[id], &cfg);
+ rdev = devm_regulator_register(&pdev->dev, &lp8788_buck_desc[id], &cfg);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(&pdev->dev, "BUCK%d regulator register err = %d\n",
@@ -529,18 +529,8 @@ static int lp8788_buck_probe(struct platform_device *pdev)
return 0;
}
-static int lp8788_buck_remove(struct platform_device *pdev)
-{
- struct lp8788_buck *buck = platform_get_drvdata(pdev);
-
- regulator_unregister(buck->regulator);
-
- return 0;
-}
-
static struct platform_driver lp8788_buck_driver = {
.probe = lp8788_buck_probe,
- .remove = lp8788_buck_remove,
.driver = {
.name = LP8788_DEV_BUCK,
.owner = THIS_MODULE,
diff --git a/drivers/regulator/lp8788-ldo.c b/drivers/regulator/lp8788-ldo.c
index 0527d87c6dd5..b9a29a29933f 100644
--- a/drivers/regulator/lp8788-ldo.c
+++ b/drivers/regulator/lp8788-ldo.c
@@ -543,7 +543,7 @@ static int lp8788_dldo_probe(struct platform_device *pdev)
cfg.driver_data = ldo;
cfg.regmap = lp->regmap;
- rdev = regulator_register(&lp8788_dldo_desc[id], &cfg);
+ rdev = devm_regulator_register(&pdev->dev, &lp8788_dldo_desc[id], &cfg);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(&pdev->dev, "DLDO%d regulator register err = %d\n",
@@ -557,18 +557,8 @@ static int lp8788_dldo_probe(struct platform_device *pdev)
return 0;
}
-static int lp8788_dldo_remove(struct platform_device *pdev)
-{
- struct lp8788_ldo *ldo = platform_get_drvdata(pdev);
-
- regulator_unregister(ldo->regulator);
-
- return 0;
-}
-
static struct platform_driver lp8788_dldo_driver = {
.probe = lp8788_dldo_probe,
- .remove = lp8788_dldo_remove,
.driver = {
.name = LP8788_DEV_DLDO,
.owner = THIS_MODULE,
@@ -603,7 +593,7 @@ static int lp8788_aldo_probe(struct platform_device *pdev)
cfg.driver_data = ldo;
cfg.regmap = lp->regmap;
- rdev = regulator_register(&lp8788_aldo_desc[id], &cfg);
+ rdev = devm_regulator_register(&pdev->dev, &lp8788_aldo_desc[id], &cfg);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(&pdev->dev, "ALDO%d regulator register err = %d\n",
@@ -617,18 +607,8 @@ static int lp8788_aldo_probe(struct platform_device *pdev)
return 0;
}
-static int lp8788_aldo_remove(struct platform_device *pdev)
-{
- struct lp8788_ldo *ldo = platform_get_drvdata(pdev);
-
- regulator_unregister(ldo->regulator);
-
- return 0;
-}
-
static struct platform_driver lp8788_aldo_driver = {
.probe = lp8788_aldo_probe,
- .remove = lp8788_aldo_remove,
.driver = {
.name = LP8788_DEV_ALDO,
.owner = THIS_MODULE,
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 3a599ee0a456..e242dd316d36 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -166,7 +166,7 @@ static int max1586_pmic_probe(struct i2c_client *client,
struct max1586_platform_data *pdata = dev_get_platdata(&client->dev);
struct regulator_config config = { };
struct max1586_data *max1586;
- int i, id, ret = -ENOMEM;
+ int i, id;
max1586 = devm_kzalloc(&client->dev, sizeof(struct max1586_data) +
sizeof(struct regulator_dev *) * (MAX1586_V6 + 1),
@@ -193,7 +193,7 @@ static int max1586_pmic_probe(struct i2c_client *client,
continue;
if (id < MAX1586_V3 || id > MAX1586_V6) {
dev_err(&client->dev, "invalid regulator id %d\n", id);
- goto err;
+ return -EINVAL;
}
if (id == MAX1586_V3) {
@@ -207,33 +207,18 @@ static int max1586_pmic_probe(struct i2c_client *client,
config.init_data = pdata->subdevs[i].platform_data;
config.driver_data = max1586;
- rdev[i] = regulator_register(&max1586_reg[id], &config);
+ rdev[i] = devm_regulator_register(&client->dev,
+ &max1586_reg[id], &config);
if (IS_ERR(rdev[i])) {
- ret = PTR_ERR(rdev[i]);
dev_err(&client->dev, "failed to register %s\n",
max1586_reg[id].name);
- goto err;
+ return PTR_ERR(rdev[i]);
}
}
i2c_set_clientdata(client, max1586);
dev_info(&client->dev, "Maxim 1586 regulator driver loaded\n");
return 0;
-
-err:
- while (--i >= 0)
- regulator_unregister(rdev[i]);
- return ret;
-}
-
-static int max1586_pmic_remove(struct i2c_client *client)
-{
- struct max1586_data *max1586 = i2c_get_clientdata(client);
- int i;
-
- for (i = 0; i <= MAX1586_V6; i++)
- regulator_unregister(max1586->rdev[i]);
- return 0;
}
static const struct i2c_device_id max1586_id[] = {
@@ -244,7 +229,6 @@ MODULE_DEVICE_TABLE(i2c, max1586_id);
static struct i2c_driver max1586_pmic_driver = {
.probe = max1586_pmic_probe,
- .remove = max1586_pmic_remove,
.driver = {
.name = "max1586",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c
index f563057e5690..ae001ccf26f4 100644
--- a/drivers/regulator/max77686.c
+++ b/drivers/regulator/max77686.c
@@ -478,32 +478,16 @@ static int max77686_pmic_probe(struct platform_device *pdev)
config.of_node = pdata->regulators[i].of_node;
max77686->opmode[i] = regulators[i].enable_mask;
- max77686->rdev[i] = regulator_register(&regulators[i], &config);
+ max77686->rdev[i] = devm_regulator_register(&pdev->dev,
+ &regulators[i], &config);
if (IS_ERR(max77686->rdev[i])) {
- ret = PTR_ERR(max77686->rdev[i]);
dev_err(&pdev->dev,
"regulator init failed for %d\n", i);
- max77686->rdev[i] = NULL;
- goto err;
+ return PTR_ERR(max77686->rdev[i]);
}
}
return 0;
-err:
- while (--i >= 0)
- regulator_unregister(max77686->rdev[i]);
- return ret;
-}
-
-static int max77686_pmic_remove(struct platform_device *pdev)
-{
- struct max77686_data *max77686 = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < MAX77686_REGULATORS; i++)
- regulator_unregister(max77686->rdev[i]);
-
- return 0;
}
static const struct platform_device_id max77686_pmic_id[] = {
@@ -518,7 +502,6 @@ static struct platform_driver max77686_pmic_driver = {
.owner = THIS_MODULE,
},
.probe = max77686_pmic_probe,
- .remove = max77686_pmic_remove,
.id_table = max77686_pmic_id,
};
diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c
index ce4b96c15eba..feb20bf4ccab 100644
--- a/drivers/regulator/max77693.c
+++ b/drivers/regulator/max77693.c
@@ -230,7 +230,7 @@ static int max77693_pmic_probe(struct platform_device *pdev)
struct max77693_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct max77693_pmic_dev *max77693_pmic;
struct max77693_regulator_data *rdata = NULL;
- int num_rdata, i, ret;
+ int num_rdata, i;
struct regulator_config config;
num_rdata = max77693_pmic_init_rdata(&pdev->dev, &rdata);
@@ -266,36 +266,16 @@ static int max77693_pmic_probe(struct platform_device *pdev)
config.init_data = rdata[i].initdata;
config.of_node = rdata[i].of_node;
- max77693_pmic->rdev[i] = regulator_register(&regulators[id],
- &config);
+ max77693_pmic->rdev[i] = devm_regulator_register(&pdev->dev,
+ &regulators[id], &config);
if (IS_ERR(max77693_pmic->rdev[i])) {
- ret = PTR_ERR(max77693_pmic->rdev[i]);
dev_err(max77693_pmic->dev,
"Failed to initialize regulator-%d\n", id);
- max77693_pmic->rdev[i] = NULL;
- goto err;
+ return PTR_ERR(max77693_pmic->rdev[i]);
}
}
return 0;
- err:
- while (--i >= 0)
- regulator_unregister(max77693_pmic->rdev[i]);
-
- return ret;
-}
-
-static int max77693_pmic_remove(struct platform_device *pdev)
-{
- struct max77693_pmic_dev *max77693_pmic = platform_get_drvdata(pdev);
- struct regulator_dev **rdev = max77693_pmic->rdev;
- int i;
-
- for (i = 0; i < max77693_pmic->num_regulators; i++)
- if (rdev[i])
- regulator_unregister(rdev[i]);
-
- return 0;
}
static const struct platform_device_id max77693_pmic_id[] = {
@@ -311,7 +291,6 @@ static struct platform_driver max77693_pmic_driver = {
.owner = THIS_MODULE,
},
.probe = max77693_pmic_probe,
- .remove = max77693_pmic_remove,
.id_table = max77693_pmic_id,
};
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 19c6f08eafd5..7f049c92ee52 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -234,7 +234,8 @@ static int max8649_regulator_probe(struct i2c_client *client,
config.driver_data = info;
config.regmap = info->regmap;
- info->regulator = regulator_register(&dcdc_desc, &config);
+ info->regulator = devm_regulator_register(&client->dev, &dcdc_desc,
+ &config);
if (IS_ERR(info->regulator)) {
dev_err(info->dev, "failed to register regulator %s\n",
dcdc_desc.name);
@@ -244,16 +245,6 @@ static int max8649_regulator_probe(struct i2c_client *client,
return 0;
}
-static int max8649_regulator_remove(struct i2c_client *client)
-{
- struct max8649_regulator_info *info = i2c_get_clientdata(client);
-
- if (info)
- regulator_unregister(info->regulator);
-
- return 0;
-}
-
static const struct i2c_device_id max8649_id[] = {
{ "max8649", 0 },
{ }
@@ -262,7 +253,6 @@ MODULE_DEVICE_TABLE(i2c, max8649_id);
static struct i2c_driver max8649_driver = {
.probe = max8649_regulator_probe,
- .remove = max8649_regulator_remove,
.driver = {
.name = "max8649",
},
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index 144bcacd734d..8d94d3d7f97f 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -439,7 +439,7 @@ static int max8660_probe(struct i2c_client *client,
for (i = 0; i < pdata->num_subdevs; i++) {
if (!pdata->subdevs[i].platform_data)
- goto err_out;
+ return ret;
boot_on = pdata->subdevs[i].platform_data->constraints.boot_on;
@@ -465,7 +465,7 @@ static int max8660_probe(struct i2c_client *client,
case MAX8660_V7:
if (type == MAX8661) {
dev_err(dev, "Regulator not on this chip!\n");
- goto err_out;
+ return -EINVAL;
}
if (boot_on)
@@ -475,7 +475,7 @@ static int max8660_probe(struct i2c_client *client,
default:
dev_err(dev, "invalid regulator %s\n",
pdata->subdevs[i].name);
- goto err_out;
+ return ret;
}
}
@@ -489,33 +489,18 @@ static int max8660_probe(struct i2c_client *client,
config.of_node = of_node[i];
config.driver_data = max8660;
- rdev[i] = regulator_register(&max8660_reg[id], &config);
+ rdev[i] = devm_regulator_register(&client->dev,
+ &max8660_reg[id], &config);
if (IS_ERR(rdev[i])) {
ret = PTR_ERR(rdev[i]);
- dev_err(dev, "failed to register %s\n",
+ dev_err(&client->dev, "failed to register %s\n",
max8660_reg[id].name);
- goto err_unregister;
+ return PTR_ERR(rdev[i]);
}
}
i2c_set_clientdata(client, max8660);
return 0;
-
-err_unregister:
- while (--i >= 0)
- regulator_unregister(rdev[i]);
-err_out:
- return ret;
-}
-
-static int max8660_remove(struct i2c_client *client)
-{
- struct max8660 *max8660 = i2c_get_clientdata(client);
- int i;
-
- for (i = 0; i < MAX8660_V_END; i++)
- regulator_unregister(max8660->rdev[i]);
- return 0;
}
static const struct i2c_device_id max8660_id[] = {
@@ -527,7 +512,6 @@ MODULE_DEVICE_TABLE(i2c, max8660_id);
static struct i2c_driver max8660_driver = {
.probe = max8660_probe,
- .remove = max8660_remove,
.driver = {
.name = "max8660",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c
index 4568c15fa78d..0c5fe6c6ac26 100644
--- a/drivers/regulator/max8907-regulator.c
+++ b/drivers/regulator/max8907-regulator.c
@@ -350,33 +350,17 @@ static int max8907_regulator_probe(struct platform_device *pdev)
pmic->desc[i].ops = &max8907_out5v_hwctl_ops;
}
- pmic->rdev[i] = regulator_register(&pmic->desc[i], &config);
+ pmic->rdev[i] = devm_regulator_register(&pdev->dev,
+ &pmic->desc[i], &config);
if (IS_ERR(pmic->rdev[i])) {
dev_err(&pdev->dev,
"failed to register %s regulator\n",
pmic->desc[i].name);
- ret = PTR_ERR(pmic->rdev[i]);
- goto err_unregister_regulator;
+ return PTR_ERR(pmic->rdev[i]);
}
}
return 0;
-
-err_unregister_regulator:
- while (--i >= 0)
- regulator_unregister(pmic->rdev[i]);
- return ret;
-}
-
-static int max8907_regulator_remove(struct platform_device *pdev)
-{
- struct max8907_regulator *pmic = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < MAX8907_NUM_REGULATORS; i++)
- regulator_unregister(pmic->rdev[i]);
-
- return 0;
}
static struct platform_driver max8907_regulator_driver = {
@@ -385,7 +369,6 @@ static struct platform_driver max8907_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = max8907_regulator_probe,
- .remove = max8907_regulator_remove,
};
static int __init max8907_regulator_init(void)
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index d80b5fa758ae..759510789e71 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -312,7 +312,7 @@ static int max8925_regulator_probe(struct platform_device *pdev)
if (pdata)
config.init_data = pdata;
- rdev = regulator_register(&ri->desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
@@ -323,22 +323,12 @@ static int max8925_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int max8925_regulator_remove(struct platform_device *pdev)
-{
- struct regulator_dev *rdev = platform_get_drvdata(pdev);
-
- regulator_unregister(rdev);
-
- return 0;
-}
-
static struct platform_driver max8925_regulator_driver = {
.driver = {
.name = "max8925-regulator",
.owner = THIS_MODULE,
},
.probe = max8925_regulator_probe,
- .remove = max8925_regulator_remove,
};
static int __init max8925_regulator_init(void)
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 5b77ab7762e4..892aa1e5b96c 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -467,7 +467,7 @@ static int max8973_probe(struct i2c_client *client,
config.regmap = max->regmap;
/* Register the regulators */
- rdev = regulator_register(&max->desc, &config);
+ rdev = devm_regulator_register(&client->dev, &max->desc, &config);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(max->dev, "regulator register failed, err %d\n", ret);
@@ -478,14 +478,6 @@ static int max8973_probe(struct i2c_client *client,
return 0;
}
-static int max8973_remove(struct i2c_client *client)
-{
- struct max8973_chip *max = i2c_get_clientdata(client);
-
- regulator_unregister(max->rdev);
- return 0;
-}
-
static const struct i2c_device_id max8973_id[] = {
{.name = "max8973",},
{},
@@ -499,7 +491,6 @@ static struct i2c_driver max8973_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = max8973_probe,
- .remove = max8973_remove,
.id_table = max8973_id,
};
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index df20069f0537..2d618fc9c1af 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -690,8 +690,9 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
if (max8997->ignore_gpiodvs_side_effect == false)
return -EINVAL;
- dev_warn(&rdev->dev, "MAX8997 GPIO-DVS Side Effect Warning: GPIO SET:"
- " %d -> %d\n", max8997->buck125_gpioindex, tmp_idx);
+ dev_warn(&rdev->dev,
+ "MAX8997 GPIO-DVS Side Effect Warning: GPIO SET: %d -> %d\n",
+ max8997->buck125_gpioindex, tmp_idx);
out:
if (new_idx < 0 || new_val < 0)
@@ -1081,7 +1082,7 @@ static int max8997_pmic_probe(struct platform_device *pdev)
pdata->buck1_voltage[i] +
buck1245_voltage_map_desc.step);
if (ret < 0)
- goto err_out;
+ return ret;
max8997->buck2_vol[i] = ret =
max8997_get_voltage_proper_val(
@@ -1090,7 +1091,7 @@ static int max8997_pmic_probe(struct platform_device *pdev)
pdata->buck2_voltage[i] +
buck1245_voltage_map_desc.step);
if (ret < 0)
- goto err_out;
+ return ret;
max8997->buck5_vol[i] = ret =
max8997_get_voltage_proper_val(
@@ -1099,7 +1100,7 @@ static int max8997_pmic_probe(struct platform_device *pdev)
pdata->buck5_voltage[i] +
buck1245_voltage_map_desc.step);
if (ret < 0)
- goto err_out;
+ return ret;
if (max_buck1 < max8997->buck1_vol[i])
max_buck1 = max8997->buck1_vol[i];
@@ -1143,24 +1144,23 @@ static int max8997_pmic_probe(struct platform_device *pdev)
!gpio_is_valid(pdata->buck125_gpios[1]) ||
!gpio_is_valid(pdata->buck125_gpios[2])) {
dev_err(&pdev->dev, "GPIO NOT VALID\n");
- ret = -EINVAL;
- goto err_out;
+ return -EINVAL;
}
ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[0],
"MAX8997 SET1");
if (ret)
- goto err_out;
+ return ret;
ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[1],
"MAX8997 SET2");
if (ret)
- goto err_out;
+ return ret;
ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[2],
"MAX8997 SET3");
if (ret)
- goto err_out;
+ return ret;
gpio_direction_output(pdata->buck125_gpios[0],
(max8997->buck125_gpioindex >> 2)
@@ -1205,33 +1205,16 @@ static int max8997_pmic_probe(struct platform_device *pdev)
config.driver_data = max8997;
config.of_node = pdata->regulators[i].reg_node;
- rdev[i] = regulator_register(&regulators[id], &config);
+ rdev[i] = devm_regulator_register(&pdev->dev, &regulators[id],
+ &config);
if (IS_ERR(rdev[i])) {
- ret = PTR_ERR(rdev[i]);
dev_err(max8997->dev, "regulator init failed for %d\n",
id);
- rdev[i] = NULL;
- goto err;
+ return PTR_ERR(rdev[i]);
}
}
return 0;
-err:
- while (--i >= 0)
- regulator_unregister(rdev[i]);
-err_out:
- return ret;
-}
-
-static int max8997_pmic_remove(struct platform_device *pdev)
-{
- struct max8997_data *max8997 = platform_get_drvdata(pdev);
- struct regulator_dev **rdev = max8997->rdev;
- int i;
-
- for (i = 0; i < max8997->num_regulators; i++)
- regulator_unregister(rdev[i]);
- return 0;
}
static const struct platform_device_id max8997_pmic_id[] = {
@@ -1246,7 +1229,6 @@ static struct platform_driver max8997_pmic_driver = {
.owner = THIS_MODULE,
},
.probe = max8997_pmic_probe,
- .remove = max8997_pmic_remove,
.id_table = max8997_pmic_id,
};
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index a4c53b2d1aaf..ae3f0656feb0 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -790,16 +790,14 @@ static int max8998_pmic_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"MAX8998 SET1 GPIO defined as 0 !\n");
WARN_ON(!pdata->buck1_set1);
- ret = -EIO;
- goto err_out;
+ return -EIO;
}
/* Check if SET2 is not equal to 0 */
if (!pdata->buck1_set2) {
dev_err(&pdev->dev,
"MAX8998 SET2 GPIO defined as 0 !\n");
WARN_ON(!pdata->buck1_set2);
- ret = -EIO;
- goto err_out;
+ return -EIO;
}
gpio_request(pdata->buck1_set1, "MAX8998 BUCK1_SET1");
@@ -823,7 +821,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
ret = max8998_write_reg(i2c,
MAX8998_REG_BUCK1_VOLTAGE1 + v, i);
if (ret)
- goto err_out;
+ return ret;
}
}
@@ -833,8 +831,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"MAX8998 SET3 GPIO defined as 0 !\n");
WARN_ON(!pdata->buck2_set3);
- ret = -EIO;
- goto err_out;
+ return -EIO;
}
gpio_request(pdata->buck2_set3, "MAX8998 BUCK2_SET3");
gpio_direction_output(pdata->buck2_set3,
@@ -852,7 +849,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
ret = max8998_write_reg(i2c,
MAX8998_REG_BUCK2_VOLTAGE1 + v, i);
if (ret)
- goto err_out;
+ return ret;
}
}
@@ -875,34 +872,19 @@ static int max8998_pmic_probe(struct platform_device *pdev)
config.init_data = pdata->regulators[i].initdata;
config.driver_data = max8998;
- rdev[i] = regulator_register(&regulators[index], &config);
+ rdev[i] = devm_regulator_register(&pdev->dev,
+ &regulators[index], &config);
if (IS_ERR(rdev[i])) {
ret = PTR_ERR(rdev[i]);
dev_err(max8998->dev, "regulator %s init failed (%d)\n",
regulators[index].name, ret);
rdev[i] = NULL;
- goto err;
+ return ret;
}
}
return 0;
-err:
- while (--i >= 0)
- regulator_unregister(rdev[i]);
-err_out:
- return ret;
-}
-
-static int max8998_pmic_remove(struct platform_device *pdev)
-{
- struct max8998_data *max8998 = platform_get_drvdata(pdev);
- struct regulator_dev **rdev = max8998->rdev;
- int i;
-
- for (i = 0; i < max8998->num_regulators; i++)
- regulator_unregister(rdev[i]);
- return 0;
}
static const struct platform_device_id max8998_pmic_id[] = {
@@ -918,7 +900,6 @@ static struct platform_driver max8998_pmic_driver = {
.owner = THIS_MODULE,
},
.probe = max8998_pmic_probe,
- .remove = max8998_pmic_remove,
.id_table = max8998_pmic_id,
};
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 5ff99d2703db..7f4a67edf780 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -258,34 +258,34 @@ static struct mc13xxx_regulator mc13783_regulators[] = {
MC13783_FIXED_DEFINE(REG, VAUDIO, REGULATORMODE0, mc13783_vaudio_val),
MC13783_FIXED_DEFINE(REG, VIOHI, REGULATORMODE0, mc13783_viohi_val),
- MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0, REGULATORSETTING0, \
+ MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0, REGULATORSETTING0,
mc13783_violo_val),
- MC13783_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, \
+ MC13783_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0,
mc13783_vdig_val),
- MC13783_DEFINE_REGU(VGEN, REGULATORMODE0, REGULATORSETTING0, \
+ MC13783_DEFINE_REGU(VGEN, REGULATORMODE0, REGULATORSETTING0,
mc13783_vgen_val),
- MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0, REGULATORSETTING0, \
+ MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0, REGULATORSETTING0,
mc13783_vrfdig_val),
- MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0, REGULATORSETTING0, \
+ MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0, REGULATORSETTING0,
mc13783_vrfref_val),
- MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0, REGULATORSETTING0, \
+ MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0, REGULATORSETTING0,
mc13783_vrfcp_val),
- MC13783_DEFINE_REGU(VSIM, REGULATORMODE1, REGULATORSETTING0, \
+ MC13783_DEFINE_REGU(VSIM, REGULATORMODE1, REGULATORSETTING0,
mc13783_vsim_val),
- MC13783_DEFINE_REGU(VESIM, REGULATORMODE1, REGULATORSETTING0, \
+ MC13783_DEFINE_REGU(VESIM, REGULATORMODE1, REGULATORSETTING0,
mc13783_vesim_val),
- MC13783_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, \
+ MC13783_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0,
mc13783_vcam_val),
MC13783_FIXED_DEFINE(REG, VRFBG, REGULATORMODE1, mc13783_vrfbg_val),
- MC13783_DEFINE_REGU(VVIB, REGULATORMODE1, REGULATORSETTING1, \
+ MC13783_DEFINE_REGU(VVIB, REGULATORMODE1, REGULATORSETTING1,
mc13783_vvib_val),
- MC13783_DEFINE_REGU(VRF1, REGULATORMODE1, REGULATORSETTING1, \
+ MC13783_DEFINE_REGU(VRF1, REGULATORMODE1, REGULATORSETTING1,
mc13783_vrf_val),
- MC13783_DEFINE_REGU(VRF2, REGULATORMODE1, REGULATORSETTING1, \
+ MC13783_DEFINE_REGU(VRF2, REGULATORMODE1, REGULATORSETTING1,
mc13783_vrf_val),
- MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1, REGULATORSETTING1, \
+ MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1, REGULATORSETTING1,
mc13783_vmmc_val),
- MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1, REGULATORSETTING1, \
+ MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1, REGULATORSETTING1,
mc13783_vmmc_val),
MC13783_GPO_DEFINE(REG, GPO1, POWERMISC, mc13783_gpo_val),
MC13783_GPO_DEFINE(REG, GPO2, POWERMISC, mc13783_gpo_val),
@@ -400,7 +400,7 @@ static int mc13783_regulator_probe(struct platform_device *pdev)
dev_get_platdata(&pdev->dev);
struct mc13xxx_regulator_init_data *mc13xxx_data;
struct regulator_config config = { };
- int i, ret, num_regulators;
+ int i, num_regulators;
num_regulators = mc13xxx_get_num_regulators_dt(pdev);
@@ -444,32 +444,16 @@ static int mc13783_regulator_probe(struct platform_device *pdev)
config.driver_data = priv;
config.of_node = node;
- priv->regulators[i] = regulator_register(desc, &config);
+ priv->regulators[i] = devm_regulator_register(&pdev->dev, desc,
+ &config);
if (IS_ERR(priv->regulators[i])) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
mc13783_regulators[i].desc.name);
- ret = PTR_ERR(priv->regulators[i]);
- goto err;
+ return PTR_ERR(priv->regulators[i]);
}
}
return 0;
-err:
- while (--i >= 0)
- regulator_unregister(priv->regulators[i]);
-
- return ret;
-}
-
-static int mc13783_regulator_remove(struct platform_device *pdev)
-{
- struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < priv->num_regulators; i++)
- regulator_unregister(priv->regulators[i]);
-
- return 0;
}
static struct platform_driver mc13783_regulator_driver = {
@@ -477,7 +461,6 @@ static struct platform_driver mc13783_regulator_driver = {
.name = "mc13783-regulator",
.owner = THIS_MODULE,
},
- .remove = mc13783_regulator_remove,
.probe = mc13783_regulator_probe,
};
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 1037e07937cf..96c9f80d9550 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -611,43 +611,27 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
config.driver_data = priv;
config.of_node = node;
- priv->regulators[i] = regulator_register(desc, &config);
+ priv->regulators[i] = devm_regulator_register(&pdev->dev, desc,
+ &config);
if (IS_ERR(priv->regulators[i])) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
mc13892_regulators[i].desc.name);
- ret = PTR_ERR(priv->regulators[i]);
- goto err;
+ return PTR_ERR(priv->regulators[i]);
}
}
return 0;
-err:
- while (--i >= 0)
- regulator_unregister(priv->regulators[i]);
- return ret;
err_unlock:
mc13xxx_unlock(mc13892);
return ret;
}
-static int mc13892_regulator_remove(struct platform_device *pdev)
-{
- struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < priv->num_regulators; i++)
- regulator_unregister(priv->regulators[i]);
-
- return 0;
-}
-
static struct platform_driver mc13892_regulator_driver = {
.driver = {
.name = "mc13892-regulator",
.owner = THIS_MODULE,
},
- .remove = mc13892_regulator_remove,
.probe = mc13892_regulator_probe,
};
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 7827384680d6..ea4f36f2cbe2 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -23,6 +23,8 @@ static void of_get_regulation_constraints(struct device_node *np,
const __be32 *min_uA, *max_uA, *ramp_delay;
struct property *prop;
struct regulation_constraints *constraints = &(*init_data)->constraints;
+ int ret;
+ u32 pval;
constraints->name = of_get_property(np, "regulator-name", NULL);
@@ -73,6 +75,10 @@ static void of_get_regulation_constraints(struct device_node *np,
else
constraints->ramp_disable = true;
}
+
+ ret = of_property_read_u32(np, "regulator-enable-ramp-delay", &pval);
+ if (!ret)
+ constraints->enable_time = pval;
}
/**
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 7e2b165972e6..9c62b1d34685 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -33,6 +33,7 @@ struct regs_info {
u8 vsel_addr;
u8 ctrl_addr;
u8 tstep_addr;
+ int sleep_id;
};
static const struct regs_info palmas_regs_info[] = {
@@ -42,6 +43,7 @@ static const struct regs_info palmas_regs_info[] = {
.vsel_addr = PALMAS_SMPS12_VOLTAGE,
.ctrl_addr = PALMAS_SMPS12_CTRL,
.tstep_addr = PALMAS_SMPS12_TSTEP,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS12,
},
{
.name = "SMPS123",
@@ -49,12 +51,14 @@ static const struct regs_info palmas_regs_info[] = {
.vsel_addr = PALMAS_SMPS12_VOLTAGE,
.ctrl_addr = PALMAS_SMPS12_CTRL,
.tstep_addr = PALMAS_SMPS12_TSTEP,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS12,
},
{
.name = "SMPS3",
.sname = "smps3-in",
.vsel_addr = PALMAS_SMPS3_VOLTAGE,
.ctrl_addr = PALMAS_SMPS3_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS3,
},
{
.name = "SMPS45",
@@ -62,6 +66,7 @@ static const struct regs_info palmas_regs_info[] = {
.vsel_addr = PALMAS_SMPS45_VOLTAGE,
.ctrl_addr = PALMAS_SMPS45_CTRL,
.tstep_addr = PALMAS_SMPS45_TSTEP,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS45,
},
{
.name = "SMPS457",
@@ -69,6 +74,7 @@ static const struct regs_info palmas_regs_info[] = {
.vsel_addr = PALMAS_SMPS45_VOLTAGE,
.ctrl_addr = PALMAS_SMPS45_CTRL,
.tstep_addr = PALMAS_SMPS45_TSTEP,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS45,
},
{
.name = "SMPS6",
@@ -76,12 +82,14 @@ static const struct regs_info palmas_regs_info[] = {
.vsel_addr = PALMAS_SMPS6_VOLTAGE,
.ctrl_addr = PALMAS_SMPS6_CTRL,
.tstep_addr = PALMAS_SMPS6_TSTEP,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS6,
},
{
.name = "SMPS7",
.sname = "smps7-in",
.vsel_addr = PALMAS_SMPS7_VOLTAGE,
.ctrl_addr = PALMAS_SMPS7_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS7,
},
{
.name = "SMPS8",
@@ -89,108 +97,128 @@ static const struct regs_info palmas_regs_info[] = {
.vsel_addr = PALMAS_SMPS8_VOLTAGE,
.ctrl_addr = PALMAS_SMPS8_CTRL,
.tstep_addr = PALMAS_SMPS8_TSTEP,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS8,
},
{
.name = "SMPS9",
.sname = "smps9-in",
.vsel_addr = PALMAS_SMPS9_VOLTAGE,
.ctrl_addr = PALMAS_SMPS9_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS9,
},
{
.name = "SMPS10_OUT2",
.sname = "smps10-in",
.ctrl_addr = PALMAS_SMPS10_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS10,
},
{
.name = "SMPS10_OUT1",
.sname = "smps10-out2",
.ctrl_addr = PALMAS_SMPS10_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SMPS10,
},
{
.name = "LDO1",
.sname = "ldo1-in",
.vsel_addr = PALMAS_LDO1_VOLTAGE,
.ctrl_addr = PALMAS_LDO1_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDO1,
},
{
.name = "LDO2",
.sname = "ldo2-in",
.vsel_addr = PALMAS_LDO2_VOLTAGE,
.ctrl_addr = PALMAS_LDO2_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDO2,
},
{
.name = "LDO3",
.sname = "ldo3-in",
.vsel_addr = PALMAS_LDO3_VOLTAGE,
.ctrl_addr = PALMAS_LDO3_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDO3,
},
{
.name = "LDO4",
.sname = "ldo4-in",
.vsel_addr = PALMAS_LDO4_VOLTAGE,
.ctrl_addr = PALMAS_LDO4_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDO4,
},
{
.name = "LDO5",
.sname = "ldo5-in",
.vsel_addr = PALMAS_LDO5_VOLTAGE,
.ctrl_addr = PALMAS_LDO5_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDO5,
},
{
.name = "LDO6",
.sname = "ldo6-in",
.vsel_addr = PALMAS_LDO6_VOLTAGE,
.ctrl_addr = PALMAS_LDO6_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDO6,
},
{
.name = "LDO7",
.sname = "ldo7-in",
.vsel_addr = PALMAS_LDO7_VOLTAGE,
.ctrl_addr = PALMAS_LDO7_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDO7,
},
{
.name = "LDO8",
.sname = "ldo8-in",
.vsel_addr = PALMAS_LDO8_VOLTAGE,
.ctrl_addr = PALMAS_LDO8_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDO8,
},
{
.name = "LDO9",
.sname = "ldo9-in",
.vsel_addr = PALMAS_LDO9_VOLTAGE,
.ctrl_addr = PALMAS_LDO9_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDO9,
},
{
.name = "LDOLN",
.sname = "ldoln-in",
.vsel_addr = PALMAS_LDOLN_VOLTAGE,
.ctrl_addr = PALMAS_LDOLN_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDOLN,
},
{
.name = "LDOUSB",
.sname = "ldousb-in",
.vsel_addr = PALMAS_LDOUSB_VOLTAGE,
.ctrl_addr = PALMAS_LDOUSB_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_LDOUSB,
},
{
.name = "REGEN1",
.ctrl_addr = PALMAS_REGEN1_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_REGEN1,
},
{
.name = "REGEN2",
.ctrl_addr = PALMAS_REGEN2_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_REGEN2,
},
{
.name = "REGEN3",
.ctrl_addr = PALMAS_REGEN3_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_REGEN3,
},
{
.name = "SYSEN1",
.ctrl_addr = PALMAS_SYSEN1_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SYSEN1,
},
{
.name = "SYSEN2",
.ctrl_addr = PALMAS_SYSEN2_CTRL,
+ .sleep_id = PALMAS_EXTERNAL_REQSTR_ID_SYSEN2,
},
};
@@ -478,6 +506,17 @@ static struct regulator_ops palmas_ops_smps = {
.set_ramp_delay = palmas_smps_set_ramp_delay,
};
+static struct regulator_ops palmas_ops_ext_control_smps = {
+ .set_mode = palmas_set_mode_smps,
+ .get_mode = palmas_get_mode_smps,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = palmas_list_voltage_smps,
+ .map_voltage = palmas_map_voltage_smps,
+ .set_voltage_time_sel = palma_smps_set_voltage_smps_time_sel,
+ .set_ramp_delay = palmas_smps_set_ramp_delay,
+};
+
static struct regulator_ops palmas_ops_smps10 = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
@@ -513,12 +552,37 @@ static struct regulator_ops palmas_ops_ldo = {
.map_voltage = regulator_map_voltage_linear,
};
+static struct regulator_ops palmas_ops_ext_control_ldo = {
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+};
+
static struct regulator_ops palmas_ops_extreg = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
};
+static struct regulator_ops palmas_ops_ext_control_extreg = {
+};
+
+static int palmas_regulator_config_external(struct palmas *palmas, int id,
+ struct palmas_reg_init *reg_init)
+{
+ int sleep_id = palmas_regs_info[id].sleep_id;
+ int ret;
+
+ ret = palmas_ext_control_req_config(palmas, sleep_id,
+ reg_init->roof_floor, true);
+ if (ret < 0)
+ dev_err(palmas->dev,
+ "Ext control config for regulator %d failed %d\n",
+ id, ret);
+ return ret;
+}
+
/*
* setup the hardware based sleep configuration of the SMPS/LDO regulators
* from the platform data. This is different to the software based control
@@ -577,7 +641,22 @@ static int palmas_smps_init(struct palmas *palmas, int id,
return ret;
}
+ if (reg_init->roof_floor && (id != PALMAS_REG_SMPS10_OUT1) &&
+ (id != PALMAS_REG_SMPS10_OUT2)) {
+ /* Enable externally controlled regulator */
+ addr = palmas_regs_info[id].ctrl_addr;
+ ret = palmas_smps_read(palmas, addr, &reg);
+ if (ret < 0)
+ return ret;
+ if (!(reg & PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK)) {
+ reg |= SMPS_CTRL_MODE_ON;
+ ret = palmas_smps_write(palmas, addr, reg);
+ if (ret < 0)
+ return ret;
+ }
+ return palmas_regulator_config_external(palmas, id, reg_init);
+ }
return 0;
}
@@ -608,6 +687,20 @@ static int palmas_ldo_init(struct palmas *palmas, int id,
if (ret)
return ret;
+ if (reg_init->roof_floor) {
+ /* Enable externally controlled regulator */
+ addr = palmas_regs_info[id].ctrl_addr;
+ ret = palmas_update_bits(palmas, PALMAS_LDO_BASE,
+ addr, PALMAS_LDO1_CTRL_MODE_ACTIVE,
+ PALMAS_LDO1_CTRL_MODE_ACTIVE);
+ if (ret < 0) {
+ dev_err(palmas->dev,
+ "LDO Register 0x%02x update failed %d\n",
+ addr, ret);
+ return ret;
+ }
+ return palmas_regulator_config_external(palmas, id, reg_init);
+ }
return 0;
}
@@ -630,6 +723,21 @@ static int palmas_extreg_init(struct palmas *palmas, int id,
addr, ret);
return ret;
}
+
+ if (reg_init->roof_floor) {
+ /* Enable externally controlled regulator */
+ addr = palmas_regs_info[id].ctrl_addr;
+ ret = palmas_update_bits(palmas, PALMAS_RESOURCE_BASE,
+ addr, PALMAS_REGEN1_CTRL_MODE_ACTIVE,
+ PALMAS_REGEN1_CTRL_MODE_ACTIVE);
+ if (ret < 0) {
+ dev_err(palmas->dev,
+ "Resource Register 0x%02x update failed %d\n",
+ addr, ret);
+ return ret;
+ }
+ return palmas_regulator_config_external(palmas, id, reg_init);
+ }
return 0;
}
@@ -712,7 +820,7 @@ static void palmas_dt_to_pdata(struct device *dev,
int idx, ret;
node = of_node_get(node);
- regulators = of_find_node_by_name(node, "regulators");
+ regulators = of_get_child_by_name(node, "regulators");
if (!regulators) {
dev_info(dev, "regulator node not found\n");
return;
@@ -740,9 +848,35 @@ static void palmas_dt_to_pdata(struct device *dev,
of_property_read_bool(palmas_matches[idx].of_node,
"ti,warm-reset");
- pdata->reg_init[idx]->roof_floor =
- of_property_read_bool(palmas_matches[idx].of_node,
- "ti,roof-floor");
+ ret = of_property_read_u32(palmas_matches[idx].of_node,
+ "ti,roof-floor", &prop);
+ /* EINVAL: Property not found */
+ if (ret != -EINVAL) {
+ int econtrol;
+
+ /* use default value, when no value is specified */
+ econtrol = PALMAS_EXT_CONTROL_NSLEEP;
+ if (!ret) {
+ switch (prop) {
+ case 1:
+ econtrol = PALMAS_EXT_CONTROL_ENABLE1;
+ break;
+ case 2:
+ econtrol = PALMAS_EXT_CONTROL_ENABLE2;
+ break;
+ case 3:
+ econtrol = PALMAS_EXT_CONTROL_NSLEEP;
+ break;
+ default:
+ WARN_ON(1);
+ dev_warn(dev,
+ "%s: Invalid roof-floor option: %u\n",
+ palmas_matches[idx].name, prop);
+ break;
+ }
+ }
+ pdata->reg_init[idx]->roof_floor = econtrol;
+ }
ret = of_property_read_u32(palmas_matches[idx].of_node,
"ti,mode-sleep", &prop);
@@ -856,7 +990,7 @@ static int palmas_regulators_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev,
"reading TSTEP reg failed: %d\n", ret);
- goto err_unregister_regulator;
+ return ret;
}
pmic->desc[id].ramp_delay =
palmas_smps_ramp_delay[reg & 0x3];
@@ -868,7 +1002,9 @@ static int palmas_regulators_probe(struct platform_device *pdev)
reg_init = pdata->reg_init[id];
ret = palmas_smps_init(palmas, id, reg_init);
if (ret)
- goto err_unregister_regulator;
+ return ret;
+ } else {
+ reg_init = NULL;
}
/* Register the regulators */
@@ -909,11 +1045,15 @@ static int palmas_regulators_probe(struct platform_device *pdev)
ret = palmas_smps_read(pmic->palmas, addr, &reg);
if (ret)
- goto err_unregister_regulator;
+ return ret;
if (reg & PALMAS_SMPS12_VOLTAGE_RANGE)
pmic->range[id] = 1;
- pmic->desc[id].ops = &palmas_ops_smps;
+ if (reg_init && reg_init->roof_floor)
+ pmic->desc[id].ops =
+ &palmas_ops_ext_control_smps;
+ else
+ pmic->desc[id].ops = &palmas_ops_smps;
pmic->desc[id].n_voltages = PALMAS_SMPS_NUM_VOLTAGES;
pmic->desc[id].vsel_reg =
PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
@@ -925,7 +1065,7 @@ static int palmas_regulators_probe(struct platform_device *pdev)
addr = palmas_regs_info[id].ctrl_addr;
ret = palmas_smps_read(pmic->palmas, addr, &reg);
if (ret)
- goto err_unregister_regulator;
+ return ret;
pmic->current_reg_mode[id] = reg &
PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
}
@@ -941,13 +1081,13 @@ static int palmas_regulators_probe(struct platform_device *pdev)
pmic->desc[id].supply_name = palmas_regs_info[id].sname;
config.of_node = palmas_matches[id].of_node;
- rdev = regulator_register(&pmic->desc[id], &config);
+ rdev = devm_regulator_register(&pdev->dev, &pmic->desc[id],
+ &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev,
"failed to register %s regulator\n",
pdev->name);
- ret = PTR_ERR(rdev);
- goto err_unregister_regulator;
+ return PTR_ERR(rdev);
}
/* Save regulator for cleanup */
@@ -956,6 +1096,10 @@ static int palmas_regulators_probe(struct platform_device *pdev)
/* Start this loop from the id left from previous loop */
for (; id < PALMAS_NUM_REGS; id++) {
+ if (pdata && pdata->reg_init[id])
+ reg_init = pdata->reg_init[id];
+ else
+ reg_init = NULL;
/* Miss out regulators which are not available due
* to alternate functions.
@@ -969,7 +1113,11 @@ static int palmas_regulators_probe(struct platform_device *pdev)
if (id < PALMAS_REG_REGEN1) {
pmic->desc[id].n_voltages = PALMAS_LDO_NUM_VOLTAGES;
- pmic->desc[id].ops = &palmas_ops_ldo;
+ if (reg_init && reg_init->roof_floor)
+ pmic->desc[id].ops =
+ &palmas_ops_ext_control_ldo;
+ else
+ pmic->desc[id].ops = &palmas_ops_ldo;
pmic->desc[id].min_uV = 900000;
pmic->desc[id].uV_step = 50000;
pmic->desc[id].linear_min_sel = 1;
@@ -999,7 +1147,11 @@ static int palmas_regulators_probe(struct platform_device *pdev)
pmic->desc[id].enable_time = 2000;
} else {
pmic->desc[id].n_voltages = 1;
- pmic->desc[id].ops = &palmas_ops_extreg;
+ if (reg_init && reg_init->roof_floor)
+ pmic->desc[id].ops =
+ &palmas_ops_ext_control_extreg;
+ else
+ pmic->desc[id].ops = &palmas_ops_extreg;
pmic->desc[id].enable_reg =
PALMAS_BASE_TO_REG(PALMAS_RESOURCE_BASE,
palmas_regs_info[id].ctrl_addr);
@@ -1015,13 +1167,13 @@ static int palmas_regulators_probe(struct platform_device *pdev)
pmic->desc[id].supply_name = palmas_regs_info[id].sname;
config.of_node = palmas_matches[id].of_node;
- rdev = regulator_register(&pmic->desc[id], &config);
+ rdev = devm_regulator_register(&pdev->dev, &pmic->desc[id],
+ &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev,
"failed to register %s regulator\n",
pdev->name);
- ret = PTR_ERR(rdev);
- goto err_unregister_regulator;
+ return PTR_ERR(rdev);
}
/* Save regulator for cleanup */
@@ -1037,31 +1189,14 @@ static int palmas_regulators_probe(struct platform_device *pdev)
else
ret = palmas_extreg_init(palmas,
id, reg_init);
- if (ret) {
- regulator_unregister(pmic->rdev[id]);
- goto err_unregister_regulator;
- }
+ if (ret)
+ return ret;
}
}
}
return 0;
-
-err_unregister_regulator:
- while (--id >= 0)
- regulator_unregister(pmic->rdev[id]);
- return ret;
-}
-
-static int palmas_regulators_remove(struct platform_device *pdev)
-{
- struct palmas_pmic *pmic = platform_get_drvdata(pdev);
- int id;
-
- for (id = 0; id < PALMAS_NUM_REGS; id++)
- regulator_unregister(pmic->rdev[id]);
- return 0;
}
static struct of_device_id of_palmas_match_tbl[] = {
@@ -1083,7 +1218,6 @@ static struct platform_driver palmas_driver = {
.owner = THIS_MODULE,
},
.probe = palmas_regulators_probe,
- .remove = palmas_regulators_remove,
};
static int __init palmas_init(void)
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index b49eaeedea84..3727b7d0e9ac 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -246,7 +246,8 @@ static int pcap_regulator_probe(struct platform_device *pdev)
config.init_data = dev_get_platdata(&pdev->dev);
config.driver_data = pcap;
- rdev = regulator_register(&pcap_regulators[pdev->id], &config);
+ rdev = devm_regulator_register(&pdev->dev, &pcap_regulators[pdev->id],
+ &config);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
@@ -255,22 +256,12 @@ static int pcap_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int pcap_regulator_remove(struct platform_device *pdev)
-{
- struct regulator_dev *rdev = platform_get_drvdata(pdev);
-
- regulator_unregister(rdev);
-
- return 0;
-}
-
static struct platform_driver pcap_regulator_driver = {
.driver = {
.name = "pcap-regulator",
.owner = THIS_MODULE,
},
.probe = pcap_regulator_probe,
- .remove = pcap_regulator_remove,
};
static int __init pcap_regulator_init(void)
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index 0f3576d48abf..d7da1c15a6da 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -90,7 +90,8 @@ static int pcf50633_regulator_probe(struct platform_device *pdev)
config.driver_data = pcf;
config.regmap = pcf->regmap;
- rdev = regulator_register(&regulators[pdev->id], &config);
+ rdev = devm_regulator_register(&pdev->dev, &regulators[pdev->id],
+ &config);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
@@ -102,21 +103,11 @@ static int pcf50633_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int pcf50633_regulator_remove(struct platform_device *pdev)
-{
- struct regulator_dev *rdev = platform_get_drvdata(pdev);
-
- regulator_unregister(rdev);
-
- return 0;
-}
-
static struct platform_driver pcf50633_regulator_driver = {
.driver = {
.name = "pcf50633-regltr",
},
.probe = pcf50633_regulator_probe,
- .remove = pcf50633_regulator_remove,
};
static int __init pcf50633_regulator_init(void)
diff --git a/drivers/regulator/rc5t583-regulator.c b/drivers/regulator/rc5t583-regulator.c
index 5885b4504596..b58affb33143 100644
--- a/drivers/regulator/rc5t583-regulator.c
+++ b/drivers/regulator/rc5t583-regulator.c
@@ -173,33 +173,16 @@ skip_ext_pwr_config:
config.driver_data = reg;
config.regmap = rc5t583->regmap;
- rdev = regulator_register(&ri->desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "Failed to register regulator %s\n",
ri->desc.name);
- ret = PTR_ERR(rdev);
- goto clean_exit;
+ return PTR_ERR(rdev);
}
reg->rdev = rdev;
}
platform_set_drvdata(pdev, regs);
return 0;
-
-clean_exit:
- while (--id >= 0)
- regulator_unregister(regs[id].rdev);
-
- return ret;
-}
-
-static int rc5t583_regulator_remove(struct platform_device *pdev)
-{
- struct rc5t583_regulator *regs = platform_get_drvdata(pdev);
- int id;
-
- for (id = 0; id < RC5T583_REGULATOR_MAX; ++id)
- regulator_unregister(regs[id].rdev);
- return 0;
}
static struct platform_driver rc5t583_regulator_driver = {
@@ -208,7 +191,6 @@ static struct platform_driver rc5t583_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = rc5t583_regulator_probe,
- .remove = rc5t583_regulator_remove,
};
static int __init rc5t583_regulator_init(void)
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 5eba2ff8c0e8..333677d68d0e 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -448,33 +448,17 @@ common_reg:
config.of_node = rdata[i].of_node;
}
- s2mps11->rdev[i] = regulator_register(&regulators[i], &config);
+ s2mps11->rdev[i] = devm_regulator_register(&pdev->dev,
+ &regulators[i], &config);
if (IS_ERR(s2mps11->rdev[i])) {
ret = PTR_ERR(s2mps11->rdev[i]);
dev_err(&pdev->dev, "regulator init failed for %d\n",
i);
- s2mps11->rdev[i] = NULL;
- goto err;
+ return ret;
}
}
return 0;
-err:
- for (i = 0; i < S2MPS11_REGULATOR_MAX; i++)
- regulator_unregister(s2mps11->rdev[i]);
-
- return ret;
-}
-
-static int s2mps11_pmic_remove(struct platform_device *pdev)
-{
- struct s2mps11_info *s2mps11 = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < S2MPS11_REGULATOR_MAX; i++)
- regulator_unregister(s2mps11->rdev[i]);
-
- return 0;
}
static const struct platform_device_id s2mps11_pmic_id[] = {
@@ -489,7 +473,6 @@ static struct platform_driver s2mps11_pmic_driver = {
.owner = THIS_MODULE,
},
.probe = s2mps11_pmic_probe,
- .remove = s2mps11_pmic_remove,
.id_table = s2mps11_pmic_id,
};
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index c24448bc43cf..2297fdf9ba7e 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -910,34 +910,17 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
config.regmap = iodev->regmap;
config.of_node = pdata->regulators[i].reg_node;
- rdev[i] = regulator_register(&regulators[id], &config);
+ rdev[i] = devm_regulator_register(&pdev->dev, &regulators[id],
+ &config);
if (IS_ERR(rdev[i])) {
ret = PTR_ERR(rdev[i]);
dev_err(s5m8767->dev, "regulator init failed for %d\n",
id);
- rdev[i] = NULL;
- goto err;
+ return ret;
}
}
return 0;
-err:
- for (i = 0; i < s5m8767->num_regulators; i++)
- regulator_unregister(rdev[i]);
-
- return ret;
-}
-
-static int s5m8767_pmic_remove(struct platform_device *pdev)
-{
- struct s5m8767_info *s5m8767 = platform_get_drvdata(pdev);
- struct regulator_dev **rdev = s5m8767->rdev;
- int i;
-
- for (i = 0; i < s5m8767->num_regulators; i++)
- regulator_unregister(rdev[i]);
-
- return 0;
}
static const struct platform_device_id s5m8767_pmic_id[] = {
@@ -952,7 +935,6 @@ static struct platform_driver s5m8767_pmic_driver = {
.owner = THIS_MODULE,
},
.probe = s5m8767_pmic_probe,
- .remove = s5m8767_pmic_remove,
.id_table = s5m8767_pmic_id,
};
diff --git a/drivers/regulator/stw481x-vmmc.c b/drivers/regulator/stw481x-vmmc.c
new file mode 100644
index 000000000000..f78857bd6a15
--- /dev/null
+++ b/drivers/regulator/stw481x-vmmc.c
@@ -0,0 +1,111 @@
+/*
+ * Regulator driver for STw4810/STw4811 VMMC regulator.
+ *
+ * Copyright (C) 2013 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/mfd/stw481x.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+static const unsigned int stw481x_vmmc_voltages[] = {
+ 1800000,
+ 1800000,
+ 2850000,
+ 3000000,
+ 1850000,
+ 2600000,
+ 2700000,
+ 3300000,
+};
+
+static struct regulator_ops stw481x_vmmc_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static struct regulator_desc vmmc_regulator = {
+ .name = "VMMC",
+ .id = 0,
+ .ops = &stw481x_vmmc_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(stw481x_vmmc_voltages),
+ .volt_table = stw481x_vmmc_voltages,
+ .enable_time = 200, /* FIXME: look this up */
+ .enable_reg = STW_CONF1,
+ .enable_mask = STW_CONF1_PDN_VMMC,
+ .vsel_reg = STW_CONF1,
+ .vsel_mask = STW_CONF1_VMMC_MASK,
+};
+
+static int stw481x_vmmc_regulator_probe(struct platform_device *pdev)
+{
+ struct stw481x *stw481x = dev_get_platdata(&pdev->dev);
+ struct regulator_config config = { };
+ int ret;
+
+ /* First disable the external VMMC if it's active */
+ ret = regmap_update_bits(stw481x->map, STW_CONF2,
+ STW_CONF2_VMMC_EXT, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "could not disable external VMMC\n");
+ return ret;
+ }
+
+ /* Register VMMC regulator */
+ config.dev = &pdev->dev;
+ config.driver_data = stw481x;
+ config.regmap = stw481x->map;
+ config.of_node = pdev->dev.of_node;
+ config.init_data = of_get_regulator_init_data(&pdev->dev,
+ pdev->dev.of_node);
+
+ stw481x->vmmc_regulator = regulator_register(&vmmc_regulator, &config);
+ if (IS_ERR(stw481x->vmmc_regulator)) {
+ dev_err(&pdev->dev,
+ "error initializing STw481x VMMC regulator\n");
+ return PTR_ERR(stw481x->vmmc_regulator);
+ }
+
+ dev_info(&pdev->dev, "initialized STw481x VMMC regulator\n");
+ return 0;
+}
+
+static int stw481x_vmmc_regulator_remove(struct platform_device *pdev)
+{
+ struct stw481x *stw481x = dev_get_platdata(&pdev->dev);
+
+ regulator_unregister(stw481x->vmmc_regulator);
+ return 0;
+}
+
+static const struct of_device_id stw481x_vmmc_match[] = {
+ { .compatible = "st,stw481x-vmmc", },
+ {},
+};
+
+static struct platform_driver stw481x_vmmc_regulator_driver = {
+ .driver = {
+ .name = "stw481x-vmmc-regulator",
+ .owner = THIS_MODULE,
+ .of_match_table = stw481x_vmmc_match,
+ },
+ .probe = stw481x_vmmc_regulator_probe,
+ .remove = stw481x_vmmc_regulator_remove,
+};
+
+module_platform_driver(stw481x_vmmc_regulator_driver);
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index 20c271d49dcb..b187b6bba7ad 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -615,7 +615,7 @@ static int ti_abb_init_table(struct device *dev, struct ti_abb *abb,
pname, *volt_table, vset_mask);
continue;
}
- info->vset = efuse_val & vset_mask >> __ffs(vset_mask);
+ info->vset = (efuse_val & vset_mask) >> __ffs(vset_mask);
dev_dbg(dev, "[%d]v=%d vset=%x\n", i, *volt_table, info->vset);
check_abb:
switch (info->opp_sel) {
@@ -708,39 +708,31 @@ static int ti_abb_probe(struct platform_device *pdev)
match = of_match_device(ti_abb_of_match, dev);
if (!match) {
/* We do not expect this to happen */
- ret = -ENODEV;
dev_err(dev, "%s: Unable to match device\n", __func__);
- goto err;
+ return -ENODEV;
}
if (!match->data) {
- ret = -EINVAL;
dev_err(dev, "%s: Bad data in match\n", __func__);
- goto err;
+ return -EINVAL;
}
abb = devm_kzalloc(dev, sizeof(struct ti_abb), GFP_KERNEL);
- if (!abb) {
- dev_err(dev, "%s: Unable to allocate ABB struct\n", __func__);
- ret = -ENOMEM;
- goto err;
- }
+ if (!abb)
+ return -ENOMEM;
abb->regs = match->data;
/* Map ABB resources */
pname = "base-address";
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
abb->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(abb->base)) {
- ret = PTR_ERR(abb->base);
- goto err;
- }
+ if (IS_ERR(abb->base))
+ return PTR_ERR(abb->base);
pname = "int-address";
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
if (!res) {
dev_err(dev, "Missing '%s' IO resource\n", pname);
- ret = -ENODEV;
- goto err;
+ return -ENODEV;
}
/*
* We may have shared interrupt register offsets which are
@@ -750,8 +742,7 @@ static int ti_abb_probe(struct platform_device *pdev)
resource_size(res));
if (!abb->int_base) {
dev_err(dev, "Unable to map '%s'\n", pname);
- ret = -ENOMEM;
- goto err;
+ return -ENOMEM;
}
/* Map Optional resources */
@@ -771,17 +762,19 @@ static int ti_abb_probe(struct platform_device *pdev)
resource_size(res));
if (!abb->efuse_base) {
dev_err(dev, "Unable to map '%s'\n", pname);
- ret = -ENOMEM;
- goto err;
+ return -ENOMEM;
}
pname = "ldo-address";
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
- abb->ldo_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(abb->ldo_base)) {
- ret = PTR_ERR(abb->ldo_base);
- goto err;
+ if (!res) {
+ dev_dbg(dev, "Missing '%s' IO resource\n", pname);
+ ret = -ENODEV;
+ goto skip_opt;
}
+ abb->ldo_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(abb->ldo_base))
+ return PTR_ERR(abb->ldo_base);
/* IF ldo_base is set, the following are mandatory */
pname = "ti,ldovbb-override-mask";
@@ -790,12 +783,11 @@ static int ti_abb_probe(struct platform_device *pdev)
&abb->ldovbb_override_mask);
if (ret) {
dev_err(dev, "Missing '%s' (%d)\n", pname, ret);
- goto err;
+ return ret;
}
if (!abb->ldovbb_override_mask) {
dev_err(dev, "Invalid property:'%s' set as 0!\n", pname);
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
pname = "ti,ldovbb-vset-mask";
@@ -804,12 +796,11 @@ static int ti_abb_probe(struct platform_device *pdev)
&abb->ldovbb_vset_mask);
if (ret) {
dev_err(dev, "Missing '%s' (%d)\n", pname, ret);
- goto err;
+ return ret;
}
if (!abb->ldovbb_vset_mask) {
dev_err(dev, "Invalid property:'%s' set as 0!\n", pname);
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
skip_opt:
@@ -819,31 +810,29 @@ skip_opt:
&abb->txdone_mask);
if (ret) {
dev_err(dev, "Missing '%s' (%d)\n", pname, ret);
- goto err;
+ return ret;
}
if (!abb->txdone_mask) {
dev_err(dev, "Invalid property:'%s' set as 0!\n", pname);
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
initdata = of_get_regulator_init_data(dev, pdev->dev.of_node);
if (!initdata) {
- ret = -ENOMEM;
dev_err(dev, "%s: Unable to alloc regulator init data\n",
__func__);
- goto err;
+ return -ENOMEM;
}
/* init ABB opp_sel table */
ret = ti_abb_init_table(dev, abb, initdata);
if (ret)
- goto err;
+ return ret;
/* init ABB timing */
ret = ti_abb_init_timings(dev, abb);
if (ret)
- goto err;
+ return ret;
desc = &abb->rdesc;
desc->name = dev_name(dev);
@@ -861,12 +850,12 @@ skip_opt:
config.driver_data = abb;
config.of_node = pdev->dev.of_node;
- rdev = regulator_register(desc, &config);
+ rdev = devm_regulator_register(dev, desc, &config);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(dev, "%s: failed to register regulator(%d)\n",
__func__, ret);
- goto err;
+ return ret;
}
platform_set_drvdata(pdev, rdev);
@@ -874,31 +863,12 @@ skip_opt:
ti_abb_rmw(abb->regs->sr2_en_mask, 1, abb->regs->setup_reg, abb->base);
return 0;
-
-err:
- dev_err(dev, "%s: Failed to initialize(%d)\n", __func__, ret);
- return ret;
-}
-
-/**
- * ti_abb_remove() - cleanups
- * @pdev: ABB platform device
- *
- * Return: 0
- */
-static int ti_abb_remove(struct platform_device *pdev)
-{
- struct regulator_dev *rdev = platform_get_drvdata(pdev);
-
- regulator_unregister(rdev);
- return 0;
}
MODULE_ALIAS("platform:ti_abb");
static struct platform_driver ti_abb_driver = {
.probe = ti_abb_probe,
- .remove = ti_abb_remove,
.driver = {
.name = "ti_abb",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c
index 9392a7ca3d2d..b0a3f0917a27 100644
--- a/drivers/regulator/tps51632-regulator.c
+++ b/drivers/regulator/tps51632-regulator.c
@@ -343,7 +343,7 @@ static int tps51632_probe(struct i2c_client *client,
config.regmap = tps->regmap;
config.of_node = client->dev.of_node;
- rdev = regulator_register(&tps->desc, &config);
+ rdev = devm_regulator_register(&client->dev, &tps->desc, &config);
if (IS_ERR(rdev)) {
dev_err(tps->dev, "regulator register failed\n");
return PTR_ERR(rdev);
@@ -353,14 +353,6 @@ static int tps51632_probe(struct i2c_client *client,
return 0;
}
-static int tps51632_remove(struct i2c_client *client)
-{
- struct tps51632_chip *tps = i2c_get_clientdata(client);
-
- regulator_unregister(tps->rdev);
- return 0;
-}
-
static const struct i2c_device_id tps51632_id[] = {
{.name = "tps51632",},
{},
@@ -375,7 +367,6 @@ static struct i2c_driver tps51632_i2c_driver = {
.of_match_table = of_match_ptr(tps51632_of_match),
},
.probe = tps51632_probe,
- .remove = tps51632_remove,
.id_table = tps51632_id,
};
diff --git a/drivers/regulator/tps6105x-regulator.c b/drivers/regulator/tps6105x-regulator.c
index ec9453ffb77f..c1e33a3d397b 100644
--- a/drivers/regulator/tps6105x-regulator.c
+++ b/drivers/regulator/tps6105x-regulator.c
@@ -137,7 +137,7 @@ static int tps6105x_regulator_probe(struct platform_device *pdev)
/* This instance is not set for regulator mode so bail out */
if (pdata->mode != TPS6105X_MODE_VOLTAGE) {
dev_info(&pdev->dev,
- "chip not in voltage mode mode, exit probe \n");
+ "chip not in voltage mode mode, exit probe\n");
return 0;
}
@@ -146,8 +146,9 @@ static int tps6105x_regulator_probe(struct platform_device *pdev)
config.driver_data = tps6105x;
/* Register regulator with framework */
- tps6105x->regulator = regulator_register(&tps6105x_regulator_desc,
- &config);
+ tps6105x->regulator = devm_regulator_register(&pdev->dev,
+ &tps6105x_regulator_desc,
+ &config);
if (IS_ERR(tps6105x->regulator)) {
ret = PTR_ERR(tps6105x->regulator);
dev_err(&tps6105x->client->dev,
@@ -159,20 +160,12 @@ static int tps6105x_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int tps6105x_regulator_remove(struct platform_device *pdev)
-{
- struct tps6105x *tps6105x = dev_get_platdata(&pdev->dev);
- regulator_unregister(tps6105x->regulator);
- return 0;
-}
-
static struct platform_driver tps6105x_regulator_driver = {
.driver = {
.name = "tps6105x-regulator",
.owner = THIS_MODULE,
},
.probe = tps6105x_regulator_probe,
- .remove = tps6105x_regulator_remove,
};
static __init int tps6105x_regulator_init(void)
diff --git a/drivers/regulator/tps62360-regulator.c b/drivers/regulator/tps62360-regulator.c
index 0b7ebb1ebf85..c2c0185a2dcd 100644
--- a/drivers/regulator/tps62360-regulator.c
+++ b/drivers/regulator/tps62360-regulator.c
@@ -476,7 +476,7 @@ static int tps62360_probe(struct i2c_client *client,
config.of_node = client->dev.of_node;
/* Register the regulators */
- rdev = regulator_register(&tps->desc, &config);
+ rdev = devm_regulator_register(&client->dev, &tps->desc, &config);
if (IS_ERR(rdev)) {
dev_err(tps->dev,
"%s(): regulator register failed with err %s\n",
@@ -488,20 +488,6 @@ static int tps62360_probe(struct i2c_client *client,
return 0;
}
-/**
- * tps62360_remove - tps62360 driver i2c remove handler
- * @client: i2c driver client device structure
- *
- * Unregister TPS driver as an i2c client device driver
- */
-static int tps62360_remove(struct i2c_client *client)
-{
- struct tps62360_chip *tps = i2c_get_clientdata(client);
-
- regulator_unregister(tps->rdev);
- return 0;
-}
-
static void tps62360_shutdown(struct i2c_client *client)
{
struct tps62360_chip *tps = i2c_get_clientdata(client);
@@ -535,7 +521,6 @@ static struct i2c_driver tps62360_i2c_driver = {
.of_match_table = of_match_ptr(tps62360_of_match),
},
.probe = tps62360_probe,
- .remove = tps62360_remove,
.shutdown = tps62360_shutdown,
.id_table = tps62360_id,
};
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index a15263d4bdff..3ef67a86115c 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -277,12 +277,12 @@ static int tps_65023_probe(struct i2c_client *client,
config.regmap = tps->regmap;
/* Register the regulators */
- rdev = regulator_register(&tps->desc[i], &config);
+ rdev = devm_regulator_register(&client->dev, &tps->desc[i],
+ &config);
if (IS_ERR(rdev)) {
dev_err(&client->dev, "failed to register %s\n",
id->name);
- error = PTR_ERR(rdev);
- goto fail;
+ return PTR_ERR(rdev);
}
/* Save regulator for cleanup */
@@ -293,24 +293,10 @@ static int tps_65023_probe(struct i2c_client *client,
/* Enable setting output voltage by I2C */
regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
- TPS65023_REG_CTRL2_CORE_ADJ, TPS65023_REG_CTRL2_CORE_ADJ);
+ TPS65023_REG_CTRL2_CORE_ADJ,
+ TPS65023_REG_CTRL2_CORE_ADJ);
return 0;
-
- fail:
- while (--i >= 0)
- regulator_unregister(tps->rdev[i]);
- return error;
-}
-
-static int tps_65023_remove(struct i2c_client *client)
-{
- struct tps_pmic *tps = i2c_get_clientdata(client);
- int i;
-
- for (i = 0; i < TPS65023_NUM_REGULATOR; i++)
- regulator_unregister(tps->rdev[i]);
- return 0;
}
static const struct tps_info tps65020_regs[] = {
@@ -430,7 +416,6 @@ static struct i2c_driver tps_65023_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = tps_65023_probe,
- .remove = tps_65023_remove,
.id_table = tps_65023_id,
};
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 4117ff52dba1..162a0fae20b3 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -508,13 +508,13 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
config.of_node = tps6507x_reg_matches[i].of_node;
}
- rdev = regulator_register(&tps->desc[i], &config);
+ rdev = devm_regulator_register(&pdev->dev, &tps->desc[i],
+ &config);
if (IS_ERR(rdev)) {
dev_err(tps6507x_dev->dev,
"failed to register %s regulator\n",
pdev->name);
- error = PTR_ERR(rdev);
- goto fail;
+ return PTR_ERR(rdev);
}
/* Save regulator for cleanup */
@@ -525,22 +525,6 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tps6507x_dev);
return 0;
-
-fail:
- while (--i >= 0)
- regulator_unregister(tps->rdev[i]);
- return error;
-}
-
-static int tps6507x_pmic_remove(struct platform_device *pdev)
-{
- struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
- struct tps6507x_pmic *tps = tps6507x_dev->pmic;
- int i;
-
- for (i = 0; i < TPS6507X_NUM_REGULATOR; i++)
- regulator_unregister(tps->rdev[i]);
- return 0;
}
static struct platform_driver tps6507x_pmic_driver = {
@@ -549,7 +533,6 @@ static struct platform_driver tps6507x_pmic_driver = {
.owner = THIS_MODULE,
},
.probe = tps6507x_pmic_probe,
- .remove = tps6507x_pmic_remove,
};
static int __init tps6507x_pmic_init(void)
diff --git a/drivers/regulator/tps65090-regulator.c b/drivers/regulator/tps65090-regulator.c
index c8e70451df38..676f75548f00 100644
--- a/drivers/regulator/tps65090-regulator.c
+++ b/drivers/regulator/tps65090-regulator.c
@@ -180,7 +180,7 @@ static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
return ERR_PTR(-ENOMEM);
}
- regulators = of_find_node_by_name(np, "regulators");
+ regulators = of_get_child_by_name(np, "regulators");
if (!regulators) {
dev_err(&pdev->dev, "regulator node not found\n");
return ERR_PTR(-ENODEV);
@@ -279,7 +279,7 @@ static int tps65090_regulator_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev,
"failed disable ext control\n");
- goto scrub;
+ return ret;
}
}
}
@@ -296,12 +296,11 @@ static int tps65090_regulator_probe(struct platform_device *pdev)
else
config.of_node = NULL;
- rdev = regulator_register(ri->desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, ri->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc->name);
- ret = PTR_ERR(rdev);
- goto scrub;
+ return PTR_ERR(rdev);
}
ri->rdev = rdev;
@@ -309,36 +308,13 @@ static int tps65090_regulator_probe(struct platform_device *pdev)
if (tps_pdata && is_dcdc(num) && tps_pdata->reg_init_data &&
tps_pdata->enable_ext_control) {
ret = tps65090_config_ext_control(ri, true);
- if (ret < 0) {
- /* Increment num to get unregister rdev */
- num++;
- goto scrub;
- }
+ if (ret < 0)
+ return ret;
}
}
platform_set_drvdata(pdev, pmic);
return 0;
-
-scrub:
- while (--num >= 0) {
- ri = &pmic[num];
- regulator_unregister(ri->rdev);
- }
- return ret;
-}
-
-static int tps65090_regulator_remove(struct platform_device *pdev)
-{
- struct tps65090_regulator *pmic = platform_get_drvdata(pdev);
- struct tps65090_regulator *ri;
- int num;
-
- for (num = 0; num < TPS65090_REGULATOR_MAX; ++num) {
- ri = &pmic[num];
- regulator_unregister(ri->rdev);
- }
- return 0;
}
static struct platform_driver tps65090_regulator_driver = {
@@ -347,7 +323,6 @@ static struct platform_driver tps65090_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = tps65090_regulator_probe,
- .remove = tps65090_regulator_remove,
};
static int __init tps65090_regulator_init(void)
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index 90861d68a0b0..9ea1bf26bd13 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -52,25 +52,17 @@ static const unsigned int LDO1_VSEL_table[] = {
};
static const struct regulator_linear_range tps65217_uv1_ranges[] = {
- { .min_uV = 900000, .max_uV = 1500000, .min_sel = 0, .max_sel = 24,
- .uV_step = 25000 },
- { .min_uV = 1550000, .max_uV = 1800000, .min_sel = 25, .max_sel = 30,
- .uV_step = 50000 },
- { .min_uV = 1850000, .max_uV = 2900000, .min_sel = 31, .max_sel = 52,
- .uV_step = 50000 },
- { .min_uV = 3000000, .max_uV = 3200000, .min_sel = 53, .max_sel = 55,
- .uV_step = 100000 },
- { .min_uV = 3300000, .max_uV = 3300000, .min_sel = 56, .max_sel = 62,
- .uV_step = 0 },
+ REGULATOR_LINEAR_RANGE(900000, 0, 24, 25000),
+ REGULATOR_LINEAR_RANGE(1550000, 25, 30, 50000),
+ REGULATOR_LINEAR_RANGE(1850000, 31, 52, 50000),
+ REGULATOR_LINEAR_RANGE(3000000, 53, 55, 100000),
+ REGULATOR_LINEAR_RANGE(3300000, 56, 62, 0),
};
static const struct regulator_linear_range tps65217_uv2_ranges[] = {
- { .min_uV = 1500000, .max_uV = 1900000, .min_sel = 0, .max_sel = 8,
- .uV_step = 50000 },
- { .min_uV = 2000000, .max_uV = 2400000, .min_sel = 9, .max_sel = 13,
- .uV_step = 100000 },
- { .min_uV = 2450000, .max_uV = 3300000, .min_sel = 14, .max_sel = 31,
- .uV_step = 50000 },
+ REGULATOR_LINEAR_RANGE(1500000, 0, 8, 50000),
+ REGULATOR_LINEAR_RANGE(2000000, 9, 13, 100000),
+ REGULATOR_LINEAR_RANGE(2450000, 14, 31, 50000),
};
static int tps65217_pmic_enable(struct regulator_dev *dev)
@@ -233,7 +225,7 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
struct regulator_init_data *reg_data;
struct regulator_dev *rdev;
struct regulator_config config = { };
- int i, ret;
+ int i;
if (tps->dev->of_node)
pdata = tps65217_parse_dt(pdev);
@@ -269,35 +261,18 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
if (tps->dev->of_node)
config.of_node = pdata->of_node[i];
- rdev = regulator_register(&regulators[i], &config);
+ rdev = devm_regulator_register(&pdev->dev, &regulators[i],
+ &config);
if (IS_ERR(rdev)) {
dev_err(tps->dev, "failed to register %s regulator\n",
pdev->name);
- ret = PTR_ERR(rdev);
- goto err_unregister_regulator;
+ return PTR_ERR(rdev);
}
/* Save regulator for cleanup */
tps->rdev[i] = rdev;
}
return 0;
-
-err_unregister_regulator:
- while (--i >= 0)
- regulator_unregister(tps->rdev[i]);
-
- return ret;
-}
-
-static int tps65217_regulator_remove(struct platform_device *pdev)
-{
- struct tps65217 *tps = platform_get_drvdata(pdev);
- unsigned int i;
-
- for (i = 0; i < TPS65217_NUM_REGULATOR; i++)
- regulator_unregister(tps->rdev[i]);
-
- return 0;
}
static struct platform_driver tps65217_regulator_driver = {
@@ -305,7 +280,6 @@ static struct platform_driver tps65217_regulator_driver = {
.name = "tps65217-pmic",
},
.probe = tps65217_regulator_probe,
- .remove = tps65217_regulator_remove,
};
static int __init tps65217_regulator_init(void)
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index 62e8d28beabd..9f6bfda711b7 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -577,21 +577,6 @@ static struct regulator_ops regulator_ops = {
.get_current_limit = get_current_limit,
};
-static int pmic_remove(struct spi_device *spi)
-{
- struct tps6524x *hw = spi_get_drvdata(spi);
- int i;
-
- if (!hw)
- return 0;
- for (i = 0; i < N_REGULATORS; i++) {
- regulator_unregister(hw->rdev[i]);
- hw->rdev[i] = NULL;
- }
- spi_set_drvdata(spi, NULL);
- return 0;
-}
-
static int pmic_probe(struct spi_device *spi)
{
struct tps6524x *hw;
@@ -599,7 +584,7 @@ static int pmic_probe(struct spi_device *spi)
const struct supply_info *info = supply_info;
struct regulator_init_data *init_data;
struct regulator_config config = { };
- int ret = 0, i;
+ int i;
init_data = dev_get_platdata(dev);
if (!init_data) {
@@ -632,24 +617,17 @@ static int pmic_probe(struct spi_device *spi)
config.init_data = init_data;
config.driver_data = hw;
- hw->rdev[i] = regulator_register(&hw->desc[i], &config);
- if (IS_ERR(hw->rdev[i])) {
- ret = PTR_ERR(hw->rdev[i]);
- hw->rdev[i] = NULL;
- goto fail;
- }
+ hw->rdev[i] = devm_regulator_register(dev, &hw->desc[i],
+ &config);
+ if (IS_ERR(hw->rdev[i]))
+ return PTR_ERR(hw->rdev[i]);
}
return 0;
-
-fail:
- pmic_remove(spi);
- return ret;
}
static struct spi_driver pmic_driver = {
.probe = pmic_probe,
- .remove = pmic_remove,
.driver = {
.name = "tps6524x",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 2c9155b66f09..e8e3a8afd3e2 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -298,7 +298,7 @@ static struct tps6586x_platform_data *tps6586x_parse_regulator_dt(
struct tps6586x_platform_data *pdata;
int err;
- regs = of_find_node_by_name(np, "regulators");
+ regs = of_get_child_by_name(np, "regulators");
if (!regs) {
dev_err(&pdev->dev, "regulator node not found\n");
return NULL;
@@ -379,15 +379,14 @@ static int tps6586x_regulator_probe(struct platform_device *pdev)
ri = find_regulator_info(id);
if (!ri) {
dev_err(&pdev->dev, "invalid regulator ID specified\n");
- err = -EINVAL;
- goto fail;
+ return -EINVAL;
}
err = tps6586x_regulator_preinit(pdev->dev.parent, ri);
if (err) {
dev_err(&pdev->dev,
"regulator %d preinit failed, e %d\n", id, err);
- goto fail;
+ return err;
}
config.dev = pdev->dev.parent;
@@ -397,12 +396,12 @@ static int tps6586x_regulator_probe(struct platform_device *pdev)
if (tps6586x_reg_matches)
config.of_node = tps6586x_reg_matches[id].of_node;
- rdev[id] = regulator_register(&ri->desc, &config);
+ rdev[id] = devm_regulator_register(&pdev->dev, &ri->desc,
+ &config);
if (IS_ERR(rdev[id])) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
- err = PTR_ERR(rdev[id]);
- goto fail;
+ return PTR_ERR(rdev[id]);
}
if (reg_data) {
@@ -411,30 +410,13 @@ static int tps6586x_regulator_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(&pdev->dev,
"Slew rate config failed, e %d\n", err);
- regulator_unregister(rdev[id]);
- goto fail;
+ return err;
}
}
}
platform_set_drvdata(pdev, rdev);
return 0;
-
-fail:
- while (--id >= 0)
- regulator_unregister(rdev[id]);
- return err;
-}
-
-static int tps6586x_regulator_remove(struct platform_device *pdev)
-{
- struct regulator_dev **rdev = platform_get_drvdata(pdev);
- int id = TPS6586X_ID_MAX_REGULATOR;
-
- while (--id >= 0)
- regulator_unregister(rdev[id]);
-
- return 0;
}
static struct platform_driver tps6586x_regulator_driver = {
@@ -443,7 +425,6 @@ static struct platform_driver tps6586x_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = tps6586x_regulator_probe,
- .remove = tps6586x_regulator_remove,
};
static int __init tps6586x_regulator_init(void)
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 45c16447744b..a00132e31ec7 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -481,7 +481,7 @@ static int tps65910_get_voltage_dcdc_sel(struct regulator_dev *dev)
/* multiplier 0 == 1 but 2,3 normal */
if (!mult)
- mult=1;
+ mult = 1;
if (sr) {
/* normalise to valid range */
@@ -685,7 +685,7 @@ static int tps65910_list_voltage_dcdc(struct regulator_dev *dev,
case TPS65910_REG_VDD2:
mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
volt = VDD1_2_MIN_VOLT +
- (selector % VDD1_2_NUM_VOLT_FINE) * VDD1_2_OFFSET;
+ (selector % VDD1_2_NUM_VOLT_FINE) * VDD1_2_OFFSET;
break;
case TPS65911_REG_VDDCTRL:
volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET);
@@ -703,7 +703,7 @@ static int tps65911_list_voltage(struct regulator_dev *dev, unsigned selector)
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
int step_mv = 0, id = rdev_get_id(dev);
- switch(id) {
+ switch (id) {
case TPS65911_REG_LDO1:
case TPS65911_REG_LDO2:
case TPS65911_REG_LDO4:
@@ -906,7 +906,7 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
}
ret = tps65910_reg_write(pmic->mfd, sr_reg_add, 0);
if (ret < 0) {
- dev_err(mfd->dev, "Error in settting sr register\n");
+ dev_err(mfd->dev, "Error in setting sr register\n");
return ret;
}
}
@@ -982,7 +982,7 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(
}
np = of_node_get(pdev->dev.parent->of_node);
- regulators = of_find_node_by_name(np, "regulators");
+ regulators = of_get_child_by_name(np, "regulators");
if (!regulators) {
dev_err(&pdev->dev, "regulator node not found\n");
return NULL;
@@ -1074,7 +1074,7 @@ static int tps65910_probe(struct platform_device *pdev)
tps65910_reg_set_bits(pmic->mfd, TPS65910_DEVCTRL,
DEVCTRL_SR_CTL_I2C_SEL_MASK);
- switch(tps65910_chip_id(tps65910)) {
+ switch (tps65910_chip_id(tps65910)) {
case TPS65910:
pmic->get_ctrl_reg = &tps65910_get_ctrl_register;
pmic->num_regulators = ARRAY_SIZE(tps65910_regs);
@@ -1177,35 +1177,19 @@ static int tps65910_probe(struct platform_device *pdev)
if (tps65910_reg_matches)
config.of_node = tps65910_reg_matches[i].of_node;
- rdev = regulator_register(&pmic->desc[i], &config);
+ rdev = devm_regulator_register(&pdev->dev, &pmic->desc[i],
+ &config);
if (IS_ERR(rdev)) {
dev_err(tps65910->dev,
"failed to register %s regulator\n",
pdev->name);
- err = PTR_ERR(rdev);
- goto err_unregister_regulator;
+ return PTR_ERR(rdev);
}
/* Save regulator for cleanup */
pmic->rdev[i] = rdev;
}
return 0;
-
-err_unregister_regulator:
- while (--i >= 0)
- regulator_unregister(pmic->rdev[i]);
- return err;
-}
-
-static int tps65910_remove(struct platform_device *pdev)
-{
- struct tps65910_reg *pmic = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < pmic->num_regulators; i++)
- regulator_unregister(pmic->rdev[i]);
-
- return 0;
}
static void tps65910_shutdown(struct platform_device *pdev)
@@ -1244,7 +1228,6 @@ static struct platform_driver tps65910_driver = {
.owner = THIS_MODULE,
},
.probe = tps65910_probe,
- .remove = tps65910_remove,
.shutdown = tps65910_shutdown,
};
diff --git a/drivers/regulator/tps65912-regulator.c b/drivers/regulator/tps65912-regulator.c
index 281e52ac64ba..9cafaa0f9455 100644
--- a/drivers/regulator/tps65912-regulator.c
+++ b/drivers/regulator/tps65912-regulator.c
@@ -119,12 +119,9 @@ struct tps65912_reg {
};
static const struct regulator_linear_range tps65912_ldo_ranges[] = {
- { .min_uV = 800000, .max_uV = 1600000, .min_sel = 0, .max_sel = 32,
- .uV_step = 25000 },
- { .min_uV = 1650000, .max_uV = 3000000, .min_sel = 33, .max_sel = 60,
- .uV_step = 50000 },
- { .min_uV = 3100000, .max_uV = 3300000, .min_sel = 61, .max_sel = 63,
- .uV_step = 100000 },
+ REGULATOR_LINEAR_RANGE(800000, 0, 32, 25000),
+ REGULATOR_LINEAR_RANGE(1650000, 33, 60, 50000),
+ REGULATOR_LINEAR_RANGE(3100000, 61, 63, 100000),
};
static int tps65912_get_range(struct tps65912_reg *pmic, int id)
@@ -461,7 +458,7 @@ static int tps65912_probe(struct platform_device *pdev)
struct regulator_dev *rdev;
struct tps65912_reg *pmic;
struct tps65912_board *pmic_plat_data;
- int i, err;
+ int i;
pmic_plat_data = dev_get_platdata(tps65912->dev);
if (!pmic_plat_data)
@@ -504,34 +501,19 @@ static int tps65912_probe(struct platform_device *pdev)
config.init_data = reg_data;
config.driver_data = pmic;
- rdev = regulator_register(&pmic->desc[i], &config);
+ rdev = devm_regulator_register(&pdev->dev, &pmic->desc[i],
+ &config);
if (IS_ERR(rdev)) {
dev_err(tps65912->dev,
"failed to register %s regulator\n",
pdev->name);
- err = PTR_ERR(rdev);
- goto err;
+ return PTR_ERR(rdev);
}
/* Save regulator for cleanup */
pmic->rdev[i] = rdev;
}
return 0;
-
-err:
- while (--i >= 0)
- regulator_unregister(pmic->rdev[i]);
- return err;
-}
-
-static int tps65912_remove(struct platform_device *pdev)
-{
- struct tps65912_reg *tps65912_reg = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < TPS65912_NUM_REGULATOR; i++)
- regulator_unregister(tps65912_reg->rdev[i]);
- return 0;
}
static struct platform_driver tps65912_driver = {
@@ -540,7 +522,6 @@ static struct platform_driver tps65912_driver = {
.owner = THIS_MODULE,
},
.probe = tps65912_probe,
- .remove = tps65912_remove,
};
static int __init tps65912_init(void)
diff --git a/drivers/regulator/tps80031-regulator.c b/drivers/regulator/tps80031-regulator.c
index 6511d0bfd896..71f457a42623 100644
--- a/drivers/regulator/tps80031-regulator.c
+++ b/drivers/regulator/tps80031-regulator.c
@@ -719,7 +719,7 @@ static int tps80031_regulator_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev,
"regulator config failed, e %d\n", ret);
- goto fail;
+ return ret;
}
ret = tps80031_power_req_config(pdev->dev.parent,
@@ -727,41 +727,22 @@ static int tps80031_regulator_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev,
"pwr_req config failed, err %d\n", ret);
- goto fail;
+ return ret;
}
}
- rdev = regulator_register(&ri->rinfo->desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, &ri->rinfo->desc,
+ &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev,
"register regulator failed %s\n",
ri->rinfo->desc.name);
- ret = PTR_ERR(rdev);
- goto fail;
+ return PTR_ERR(rdev);
}
ri->rdev = rdev;
}
platform_set_drvdata(pdev, pmic);
return 0;
-fail:
- while (--num >= 0) {
- ri = &pmic[num];
- regulator_unregister(ri->rdev);
- }
- return ret;
-}
-
-static int tps80031_regulator_remove(struct platform_device *pdev)
-{
- struct tps80031_regulator *pmic = platform_get_drvdata(pdev);
- struct tps80031_regulator *ri = NULL;
- int num;
-
- for (num = 0; num < TPS80031_REGULATOR_MAX; ++num) {
- ri = &pmic[num];
- regulator_unregister(ri->rdev);
- }
- return 0;
}
static struct platform_driver tps80031_regulator_driver = {
@@ -770,7 +751,6 @@ static struct platform_driver tps80031_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = tps80031_regulator_probe,
- .remove = tps80031_regulator_remove,
};
static int __init tps80031_regulator_init(void)
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 78aae4cbb004..8ebd785485c7 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -1188,7 +1188,7 @@ static int twlreg_probe(struct platform_device *pdev)
config.driver_data = info;
config.of_node = pdev->dev.of_node;
- rdev = regulator_register(&info->desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, &info->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "can't register %s, %ld\n",
info->desc.name, PTR_ERR(rdev));
@@ -1217,7 +1217,6 @@ static int twlreg_remove(struct platform_device *pdev)
struct regulator_dev *rdev = platform_get_drvdata(pdev);
struct twlreg_info *info = rdev->reg_data;
- regulator_unregister(rdev);
kfree(info);
return 0;
}
diff --git a/drivers/regulator/vexpress.c b/drivers/regulator/vexpress.c
index 4668c7f8133d..f3ae28a7e663 100644
--- a/drivers/regulator/vexpress.c
+++ b/drivers/regulator/vexpress.c
@@ -96,7 +96,7 @@ static int vexpress_regulator_probe(struct platform_device *pdev)
config.driver_data = reg;
config.of_node = pdev->dev.of_node;
- reg->regdev = regulator_register(&reg->desc, &config);
+ reg->regdev = devm_regulator_register(&pdev->dev, &reg->desc, &config);
if (IS_ERR(reg->regdev)) {
err = PTR_ERR(reg->regdev);
goto error_regulator_register;
@@ -119,7 +119,6 @@ static int vexpress_regulator_remove(struct platform_device *pdev)
struct vexpress_regulator *reg = platform_get_drvdata(pdev);
vexpress_config_func_put(reg->func);
- regulator_unregister(reg->regdev);
return 0;
}
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 11861cb861df..6823e6f2b88a 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -387,8 +387,9 @@ static struct regulator_ops wm831x_buckv_ops = {
* Set up DVS control. We just log errors since we can still run
* (with reduced performance) if we fail.
*/
-static void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc,
- struct wm831x_buckv_pdata *pdata)
+static void wm831x_buckv_dvs_init(struct platform_device *pdev,
+ struct wm831x_dcdc *dcdc,
+ struct wm831x_buckv_pdata *pdata)
{
struct wm831x *wm831x = dcdc->wm831x;
int ret;
@@ -402,9 +403,9 @@ static void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc,
*/
dcdc->dvs_gpio_state = pdata->dvs_init_state;
- ret = gpio_request_one(pdata->dvs_gpio,
- dcdc->dvs_gpio_state ? GPIOF_INIT_HIGH : 0,
- "DCDC DVS");
+ ret = devm_gpio_request_one(&pdev->dev, pdata->dvs_gpio,
+ dcdc->dvs_gpio_state ? GPIOF_INIT_HIGH : 0,
+ "DCDC DVS");
if (ret < 0) {
dev_err(wm831x->dev, "Failed to get %s DVS GPIO: %d\n",
dcdc->name, ret);
@@ -513,7 +514,8 @@ static int wm831x_buckv_probe(struct platform_device *pdev)
dcdc->dvs_vsel = ret & WM831X_DC1_DVS_VSEL_MASK;
if (pdata && pdata->dcdc[id])
- wm831x_buckv_dvs_init(dcdc, pdata->dcdc[id]->driver_data);
+ wm831x_buckv_dvs_init(pdev, dcdc,
+ pdata->dcdc[id]->driver_data);
config.dev = pdev->dev.parent;
if (pdata)
@@ -521,7 +523,8 @@ static int wm831x_buckv_probe(struct platform_device *pdev)
config.driver_data = dcdc;
config.regmap = wm831x->regmap;
- dcdc->regulator = regulator_register(&dcdc->desc, &config);
+ dcdc->regulator = devm_regulator_register(&pdev->dev, &dcdc->desc,
+ &config);
if (IS_ERR(dcdc->regulator)) {
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
@@ -530,57 +533,35 @@ static int wm831x_buckv_probe(struct platform_device *pdev)
}
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
- ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
- IRQF_TRIGGER_RISING, dcdc->name, dcdc);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ wm831x_dcdc_uv_irq,
+ IRQF_TRIGGER_RISING, dcdc->name, dcdc);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
- goto err_regulator;
+ goto err;
}
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "HC"));
- ret = request_threaded_irq(irq, NULL, wm831x_dcdc_oc_irq,
- IRQF_TRIGGER_RISING, dcdc->name, dcdc);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ wm831x_dcdc_oc_irq,
+ IRQF_TRIGGER_RISING, dcdc->name, dcdc);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request HC IRQ %d: %d\n",
irq, ret);
- goto err_uv;
+ goto err;
}
platform_set_drvdata(pdev, dcdc);
return 0;
-err_uv:
- free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV")),
- dcdc);
-err_regulator:
- regulator_unregister(dcdc->regulator);
err:
- if (dcdc->dvs_gpio)
- gpio_free(dcdc->dvs_gpio);
return ret;
}
-static int wm831x_buckv_remove(struct platform_device *pdev)
-{
- struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
- struct wm831x *wm831x = dcdc->wm831x;
-
- free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "HC")),
- dcdc);
- free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV")),
- dcdc);
- regulator_unregister(dcdc->regulator);
- if (dcdc->dvs_gpio)
- gpio_free(dcdc->dvs_gpio);
-
- return 0;
-}
-
static struct platform_driver wm831x_buckv_driver = {
.probe = wm831x_buckv_probe,
- .remove = wm831x_buckv_remove,
.driver = {
.name = "wm831x-buckv",
.owner = THIS_MODULE,
@@ -681,7 +662,8 @@ static int wm831x_buckp_probe(struct platform_device *pdev)
config.driver_data = dcdc;
config.regmap = wm831x->regmap;
- dcdc->regulator = regulator_register(&dcdc->desc, &config);
+ dcdc->regulator = devm_regulator_register(&pdev->dev, &dcdc->desc,
+ &config);
if (IS_ERR(dcdc->regulator)) {
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
@@ -690,38 +672,25 @@ static int wm831x_buckp_probe(struct platform_device *pdev)
}
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
- ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
- IRQF_TRIGGER_RISING, dcdc->name, dcdc);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ wm831x_dcdc_uv_irq,
+ IRQF_TRIGGER_RISING, dcdc->name, dcdc);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
- goto err_regulator;
+ goto err;
}
platform_set_drvdata(pdev, dcdc);
return 0;
-err_regulator:
- regulator_unregister(dcdc->regulator);
err:
return ret;
}
-static int wm831x_buckp_remove(struct platform_device *pdev)
-{
- struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
-
- free_irq(wm831x_irq(dcdc->wm831x, platform_get_irq_byname(pdev, "UV")),
- dcdc);
- regulator_unregister(dcdc->regulator);
-
- return 0;
-}
-
static struct platform_driver wm831x_buckp_driver = {
.probe = wm831x_buckp_probe,
- .remove = wm831x_buckp_remove,
.driver = {
.name = "wm831x-buckp",
.owner = THIS_MODULE,
@@ -813,7 +782,8 @@ static int wm831x_boostp_probe(struct platform_device *pdev)
config.driver_data = dcdc;
config.regmap = wm831x->regmap;
- dcdc->regulator = regulator_register(&dcdc->desc, &config);
+ dcdc->regulator = devm_regulator_register(&pdev->dev, &dcdc->desc,
+ &config);
if (IS_ERR(dcdc->regulator)) {
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
@@ -822,39 +792,26 @@ static int wm831x_boostp_probe(struct platform_device *pdev)
}
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
- ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
- IRQF_TRIGGER_RISING, dcdc->name,
- dcdc);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ wm831x_dcdc_uv_irq,
+ IRQF_TRIGGER_RISING, dcdc->name,
+ dcdc);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
- goto err_regulator;
+ goto err;
}
platform_set_drvdata(pdev, dcdc);
return 0;
-err_regulator:
- regulator_unregister(dcdc->regulator);
err:
return ret;
}
-static int wm831x_boostp_remove(struct platform_device *pdev)
-{
- struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
-
- free_irq(wm831x_irq(dcdc->wm831x, platform_get_irq_byname(pdev, "UV")),
- dcdc);
- regulator_unregister(dcdc->regulator);
-
- return 0;
-}
-
static struct platform_driver wm831x_boostp_driver = {
.probe = wm831x_boostp_probe,
- .remove = wm831x_boostp_remove,
.driver = {
.name = "wm831x-boostp",
.owner = THIS_MODULE,
@@ -914,7 +871,8 @@ static int wm831x_epe_probe(struct platform_device *pdev)
config.driver_data = dcdc;
config.regmap = wm831x->regmap;
- dcdc->regulator = regulator_register(&dcdc->desc, &config);
+ dcdc->regulator = devm_regulator_register(&pdev->dev, &dcdc->desc,
+ &config);
if (IS_ERR(dcdc->regulator)) {
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register EPE%d: %d\n",
@@ -930,18 +888,8 @@ err:
return ret;
}
-static int wm831x_epe_remove(struct platform_device *pdev)
-{
- struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
-
- regulator_unregister(dcdc->regulator);
-
- return 0;
-}
-
static struct platform_driver wm831x_epe_driver = {
.probe = wm831x_epe_probe,
- .remove = wm831x_epe_remove,
.driver = {
.name = "wm831x-epe",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index 4eb373de1fac..0339b886df5d 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -194,7 +194,8 @@ static int wm831x_isink_probe(struct platform_device *pdev)
config.init_data = pdata->isink[id];
config.driver_data = isink;
- isink->regulator = regulator_register(&isink->desc, &config);
+ isink->regulator = devm_regulator_register(&pdev->dev, &isink->desc,
+ &config);
if (IS_ERR(isink->regulator)) {
ret = PTR_ERR(isink->regulator);
dev_err(wm831x->dev, "Failed to register ISINK%d: %d\n",
@@ -203,38 +204,26 @@ static int wm831x_isink_probe(struct platform_device *pdev)
}
irq = wm831x_irq(wm831x, platform_get_irq(pdev, 0));
- ret = request_threaded_irq(irq, NULL, wm831x_isink_irq,
- IRQF_TRIGGER_RISING, isink->name, isink);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ wm831x_isink_irq,
+ IRQF_TRIGGER_RISING, isink->name,
+ isink);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request ISINK IRQ %d: %d\n",
irq, ret);
- goto err_regulator;
+ goto err;
}
platform_set_drvdata(pdev, isink);
return 0;
-err_regulator:
- regulator_unregister(isink->regulator);
err:
return ret;
}
-static int wm831x_isink_remove(struct platform_device *pdev)
-{
- struct wm831x_isink *isink = platform_get_drvdata(pdev);
-
- free_irq(wm831x_irq(isink->wm831x, platform_get_irq(pdev, 0)), isink);
-
- regulator_unregister(isink->regulator);
-
- return 0;
-}
-
static struct platform_driver wm831x_isink_driver = {
.probe = wm831x_isink_probe,
- .remove = wm831x_isink_remove,
.driver = {
.name = "wm831x-isink",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 2205fbc2c37b..46d6700467b5 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -63,10 +63,8 @@ static irqreturn_t wm831x_ldo_uv_irq(int irq, void *data)
*/
static const struct regulator_linear_range wm831x_gp_ldo_ranges[] = {
- { .min_uV = 900000, .max_uV = 1600000, .min_sel = 0, .max_sel = 14,
- .uV_step = 50000 },
- { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31,
- .uV_step = 100000 },
+ REGULATOR_LINEAR_RANGE(900000, 0, 14, 50000),
+ REGULATOR_LINEAR_RANGE(1700000, 15, 31, 100000),
};
static int wm831x_gp_ldo_set_suspend_voltage(struct regulator_dev *rdev,
@@ -279,7 +277,8 @@ static int wm831x_gp_ldo_probe(struct platform_device *pdev)
config.driver_data = ldo;
config.regmap = wm831x->regmap;
- ldo->regulator = regulator_register(&ldo->desc, &config);
+ ldo->regulator = devm_regulator_register(&pdev->dev, &ldo->desc,
+ &config);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
@@ -288,39 +287,26 @@ static int wm831x_gp_ldo_probe(struct platform_device *pdev)
}
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
- ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq,
- IRQF_TRIGGER_RISING, ldo->name,
- ldo);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ wm831x_ldo_uv_irq,
+ IRQF_TRIGGER_RISING, ldo->name,
+ ldo);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
- goto err_regulator;
+ goto err;
}
platform_set_drvdata(pdev, ldo);
return 0;
-err_regulator:
- regulator_unregister(ldo->regulator);
err:
return ret;
}
-static int wm831x_gp_ldo_remove(struct platform_device *pdev)
-{
- struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
-
- free_irq(wm831x_irq(ldo->wm831x,
- platform_get_irq_byname(pdev, "UV")), ldo);
- regulator_unregister(ldo->regulator);
-
- return 0;
-}
-
static struct platform_driver wm831x_gp_ldo_driver = {
.probe = wm831x_gp_ldo_probe,
- .remove = wm831x_gp_ldo_remove,
.driver = {
.name = "wm831x-ldo",
.owner = THIS_MODULE,
@@ -332,10 +318,8 @@ static struct platform_driver wm831x_gp_ldo_driver = {
*/
static const struct regulator_linear_range wm831x_aldo_ranges[] = {
- { .min_uV = 1000000, .max_uV = 1600000, .min_sel = 0, .max_sel = 12,
- .uV_step = 50000 },
- { .min_uV = 1700000, .max_uV = 3500000, .min_sel = 13, .max_sel = 31,
- .uV_step = 100000 },
+ REGULATOR_LINEAR_RANGE(1000000, 0, 12, 50000),
+ REGULATOR_LINEAR_RANGE(1700000, 13, 31, 100000),
};
static int wm831x_aldo_set_suspend_voltage(struct regulator_dev *rdev,
@@ -505,7 +489,8 @@ static int wm831x_aldo_probe(struct platform_device *pdev)
config.driver_data = ldo;
config.regmap = wm831x->regmap;
- ldo->regulator = regulator_register(&ldo->desc, &config);
+ ldo->regulator = devm_regulator_register(&pdev->dev, &ldo->desc,
+ &config);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
@@ -514,38 +499,25 @@ static int wm831x_aldo_probe(struct platform_device *pdev)
}
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
- ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq,
- IRQF_TRIGGER_RISING, ldo->name, ldo);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ wm831x_ldo_uv_irq,
+ IRQF_TRIGGER_RISING, ldo->name, ldo);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
- goto err_regulator;
+ goto err;
}
platform_set_drvdata(pdev, ldo);
return 0;
-err_regulator:
- regulator_unregister(ldo->regulator);
err:
return ret;
}
-static int wm831x_aldo_remove(struct platform_device *pdev)
-{
- struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
-
- free_irq(wm831x_irq(ldo->wm831x, platform_get_irq_byname(pdev, "UV")),
- ldo);
- regulator_unregister(ldo->regulator);
-
- return 0;
-}
-
static struct platform_driver wm831x_aldo_driver = {
.probe = wm831x_aldo_probe,
- .remove = wm831x_aldo_remove,
.driver = {
.name = "wm831x-aldo",
.owner = THIS_MODULE,
@@ -663,7 +635,8 @@ static int wm831x_alive_ldo_probe(struct platform_device *pdev)
config.driver_data = ldo;
config.regmap = wm831x->regmap;
- ldo->regulator = regulator_register(&ldo->desc, &config);
+ ldo->regulator = devm_regulator_register(&pdev->dev, &ldo->desc,
+ &config);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
@@ -679,18 +652,8 @@ err:
return ret;
}
-static int wm831x_alive_ldo_remove(struct platform_device *pdev)
-{
- struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
-
- regulator_unregister(ldo->regulator);
-
- return 0;
-}
-
static struct platform_driver wm831x_alive_ldo_driver = {
.probe = wm831x_alive_ldo_probe,
- .remove = wm831x_alive_ldo_remove,
.driver = {
.name = "wm831x-alive-ldo",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 61ca9292a429..de7b9c73e3fa 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -543,10 +543,8 @@ static int wm8350_dcdc_set_suspend_mode(struct regulator_dev *rdev,
}
static const struct regulator_linear_range wm8350_ldo_ranges[] = {
- { .min_uV = 900000, .max_uV = 1650000, .min_sel = 0, .max_sel = 15,
- .uV_step = 50000 },
- { .min_uV = 1800000, .max_uV = 3300000, .min_sel = 16, .max_sel = 31,
- .uV_step = 100000 },
+ REGULATOR_LINEAR_RANGE(900000, 0, 15, 50000),
+ REGULATOR_LINEAR_RANGE(1800000, 16, 31, 100000),
};
static int wm8350_ldo_set_suspend_voltage(struct regulator_dev *rdev, int uV)
@@ -1206,7 +1204,8 @@ static int wm8350_regulator_probe(struct platform_device *pdev)
config.regmap = wm8350->regmap;
/* register regulator */
- rdev = regulator_register(&wm8350_reg[pdev->id], &config);
+ rdev = devm_regulator_register(&pdev->dev, &wm8350_reg[pdev->id],
+ &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register %s\n",
wm8350_reg[pdev->id].name);
@@ -1217,7 +1216,6 @@ static int wm8350_regulator_probe(struct platform_device *pdev)
ret = wm8350_register_irq(wm8350, wm8350_reg[pdev->id].irq,
pmic_uv_handler, 0, "UV", rdev);
if (ret < 0) {
- regulator_unregister(rdev);
dev_err(&pdev->dev, "failed to register regulator %s IRQ\n",
wm8350_reg[pdev->id].name);
return ret;
@@ -1233,8 +1231,6 @@ static int wm8350_regulator_remove(struct platform_device *pdev)
wm8350_free_irq(wm8350, wm8350_reg[pdev->id].irq, rdev);
- regulator_unregister(rdev);
-
return 0;
}
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index 58f51bec13f2..82d829000851 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -20,10 +20,8 @@
#include <linux/mfd/wm8400-private.h>
static const struct regulator_linear_range wm8400_ldo_ranges[] = {
- { .min_uV = 900000, .max_uV = 1600000, .min_sel = 0, .max_sel = 14,
- .uV_step = 50000 },
- { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31,
- .uV_step = 100000 },
+ REGULATOR_LINEAR_RANGE(900000, 0, 14, 50000),
+ REGULATOR_LINEAR_RANGE(1700000, 15, 31, 100000),
};
static struct regulator_ops wm8400_ldo_ops = {
@@ -219,7 +217,8 @@ static int wm8400_regulator_probe(struct platform_device *pdev)
config.driver_data = wm8400;
config.regmap = wm8400->regmap;
- rdev = regulator_register(&regulators[pdev->id], &config);
+ rdev = devm_regulator_register(&pdev->dev, &regulators[pdev->id],
+ &config);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
@@ -228,21 +227,11 @@ static int wm8400_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int wm8400_regulator_remove(struct platform_device *pdev)
-{
- struct regulator_dev *rdev = platform_get_drvdata(pdev);
-
- regulator_unregister(rdev);
-
- return 0;
-}
-
static struct platform_driver wm8400_regulator_driver = {
.driver = {
.name = "wm8400-regulator",
},
.probe = wm8400_regulator_probe,
- .remove = wm8400_regulator_remove,
};
/**
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 5ee2a208457c..71c5911f2e71 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -165,7 +165,9 @@ static int wm8994_ldo_probe(struct platform_device *pdev)
ldo->init_data = *pdata->ldo[id].init_data;
}
- ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &config);
+ ldo->regulator = devm_regulator_register(&pdev->dev,
+ &wm8994_ldo_desc[id],
+ &config);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",
@@ -181,18 +183,8 @@ err:
return ret;
}
-static int wm8994_ldo_remove(struct platform_device *pdev)
-{
- struct wm8994_ldo *ldo = platform_get_drvdata(pdev);
-
- regulator_unregister(ldo->regulator);
-
- return 0;
-}
-
static struct platform_driver wm8994_ldo_driver = {
.probe = wm8994_ldo_probe,
- .remove = wm8994_ldo_remove,
.driver = {
.name = "wm8994-ldo",
.owner = THIS_MODULE,
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index b09c75c21b60..a34b50690b4e 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -30,7 +30,7 @@
#include "remoteproc_internal.h"
/* kick the remote processor, and let it know which virtqueue to poke at */
-static void rproc_virtio_notify(struct virtqueue *vq)
+static bool rproc_virtio_notify(struct virtqueue *vq)
{
struct rproc_vring *rvring = vq->priv;
struct rproc *rproc = rvring->rvdev->rproc;
@@ -39,6 +39,7 @@ static void rproc_virtio_notify(struct virtqueue *vq)
dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid);
rproc->ops->kick(rproc, notifyid);
+ return true;
}
/**
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 72c5cdbe0791..544be722937c 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -72,6 +72,7 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
} else
err = -EINVAL;
+ pm_stay_awake(rtc->dev.parent);
mutex_unlock(&rtc->ops_lock);
/* A timer might have just expired */
schedule_work(&rtc->irqwork);
@@ -113,6 +114,7 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
err = -EINVAL;
}
+ pm_stay_awake(rtc->dev.parent);
mutex_unlock(&rtc->ops_lock);
/* A timer might have just expired */
schedule_work(&rtc->irqwork);
@@ -771,9 +773,10 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
alarm.time = rtc_ktime_to_tm(timer->node.expires);
alarm.enabled = 1;
err = __rtc_set_alarm(rtc, &alarm);
- if (err == -ETIME)
+ if (err == -ETIME) {
+ pm_stay_awake(rtc->dev.parent);
schedule_work(&rtc->irqwork);
- else if (err) {
+ } else if (err) {
timerqueue_del(&rtc->timerqueue, &timer->node);
timer->enabled = 0;
return err;
@@ -818,8 +821,10 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
alarm.time = rtc_ktime_to_tm(next->expires);
alarm.enabled = 1;
err = __rtc_set_alarm(rtc, &alarm);
- if (err == -ETIME)
+ if (err == -ETIME) {
+ pm_stay_awake(rtc->dev.parent);
schedule_work(&rtc->irqwork);
+ }
}
}
@@ -845,7 +850,6 @@ void rtc_timer_do_work(struct work_struct *work)
mutex_lock(&rtc->ops_lock);
again:
- pm_relax(rtc->dev.parent);
__rtc_read_time(rtc, &tm);
now = rtc_tm_to_ktime(tm);
while ((next = timerqueue_getnext(&rtc->timerqueue))) {
@@ -880,6 +884,7 @@ again:
} else
rtc_alarm_disable(rtc);
+ pm_relax(rtc->dev.parent);
mutex_unlock(&rtc->ops_lock);
}
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 9c8f60903799..dc4f14255cc3 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -14,7 +14,9 @@
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/io.h>
#include <linux/slab.h>
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index 578baf9d9725..315209d9b407 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -38,8 +38,8 @@
#include <asm-generic/rtc.h>
#include <asm/intel_scu_ipc.h>
-#include <asm/mrst.h>
-#include <asm/mrst-vrtc.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_vrtc.h>
struct mrst_rtc {
struct rtc_device *rtc;
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 0f0609b1aa2c..e3b25712b659 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -371,6 +371,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
}
}
+ device_init_wakeup(&adev->dev, 1);
ldata->rtc = rtc_device_register("pl031", &adev->dev, ops,
THIS_MODULE);
if (IS_ERR(ldata->rtc)) {
@@ -384,8 +385,6 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
goto out_no_irq;
}
- device_init_wakeup(&adev->dev, 1);
-
return 0;
out_no_irq:
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 451bf99582ff..f302efa937ef 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -698,10 +698,11 @@ static void dasd_profile_start(struct dasd_block *block,
}
spin_lock(&block->profile.lock);
- if (block->profile.data)
+ if (block->profile.data) {
block->profile.data->dasd_io_nr_req[counter]++;
if (rq_data_dir(req) == READ)
block->profile.data->dasd_read_nr_req[counter]++;
+ }
spin_unlock(&block->profile.lock);
/*
@@ -2978,12 +2979,12 @@ static int dasd_alloc_queue(struct dasd_block *block)
elevator_exit(block->request_queue->elevator);
block->request_queue->elevator = NULL;
+ mutex_lock(&block->request_queue->sysfs_lock);
rc = elevator_init(block->request_queue, "deadline");
- if (rc) {
+ if (rc)
blk_cleanup_queue(block->request_queue);
- return rc;
- }
- return 0;
+ mutex_unlock(&block->request_queue->sysfs_lock);
+ return rc;
}
/*
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 5adb2042e824..cee7e2708a1f 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2077,6 +2077,7 @@ dasd_eckd_build_format(struct dasd_device *base,
int intensity = 0;
int r0_perm;
int nr_tracks;
+ int use_prefix;
startdev = dasd_alias_get_start_dev(base);
if (!startdev)
@@ -2106,28 +2107,46 @@ dasd_eckd_build_format(struct dasd_device *base,
intensity = fdata->intensity;
}
+ use_prefix = base_priv->features.feature[8] & 0x01;
+
switch (intensity) {
case 0x00: /* Normal format */
case 0x08: /* Normal format, use cdl. */
cplength = 2 + (rpt*nr_tracks);
- datasize = sizeof(struct PFX_eckd_data) +
- sizeof(struct LO_eckd_data) +
- rpt * nr_tracks * sizeof(struct eckd_count);
+ if (use_prefix)
+ datasize = sizeof(struct PFX_eckd_data) +
+ sizeof(struct LO_eckd_data) +
+ rpt * nr_tracks * sizeof(struct eckd_count);
+ else
+ datasize = sizeof(struct DE_eckd_data) +
+ sizeof(struct LO_eckd_data) +
+ rpt * nr_tracks * sizeof(struct eckd_count);
break;
case 0x01: /* Write record zero and format track. */
case 0x09: /* Write record zero and format track, use cdl. */
cplength = 2 + rpt * nr_tracks;
- datasize = sizeof(struct PFX_eckd_data) +
- sizeof(struct LO_eckd_data) +
- sizeof(struct eckd_count) +
- rpt * nr_tracks * sizeof(struct eckd_count);
+ if (use_prefix)
+ datasize = sizeof(struct PFX_eckd_data) +
+ sizeof(struct LO_eckd_data) +
+ sizeof(struct eckd_count) +
+ rpt * nr_tracks * sizeof(struct eckd_count);
+ else
+ datasize = sizeof(struct DE_eckd_data) +
+ sizeof(struct LO_eckd_data) +
+ sizeof(struct eckd_count) +
+ rpt * nr_tracks * sizeof(struct eckd_count);
break;
case 0x04: /* Invalidate track. */
case 0x0c: /* Invalidate track, use cdl. */
cplength = 3;
- datasize = sizeof(struct PFX_eckd_data) +
- sizeof(struct LO_eckd_data) +
- sizeof(struct eckd_count);
+ if (use_prefix)
+ datasize = sizeof(struct PFX_eckd_data) +
+ sizeof(struct LO_eckd_data) +
+ sizeof(struct eckd_count);
+ else
+ datasize = sizeof(struct DE_eckd_data) +
+ sizeof(struct LO_eckd_data) +
+ sizeof(struct eckd_count);
break;
default:
dev_warn(&startdev->cdev->dev,
@@ -2147,14 +2166,25 @@ dasd_eckd_build_format(struct dasd_device *base,
switch (intensity & ~0x08) {
case 0x00: /* Normal format. */
- prefix(ccw++, (struct PFX_eckd_data *) data,
- fdata->start_unit, fdata->stop_unit,
- DASD_ECKD_CCW_WRITE_CKD, base, startdev);
- /* grant subsystem permission to format R0 */
- if (r0_perm)
- ((struct PFX_eckd_data *)data)
- ->define_extent.ga_extended |= 0x04;
- data += sizeof(struct PFX_eckd_data);
+ if (use_prefix) {
+ prefix(ccw++, (struct PFX_eckd_data *) data,
+ fdata->start_unit, fdata->stop_unit,
+ DASD_ECKD_CCW_WRITE_CKD, base, startdev);
+ /* grant subsystem permission to format R0 */
+ if (r0_perm)
+ ((struct PFX_eckd_data *)data)
+ ->define_extent.ga_extended |= 0x04;
+ data += sizeof(struct PFX_eckd_data);
+ } else {
+ define_extent(ccw++, (struct DE_eckd_data *) data,
+ fdata->start_unit, fdata->stop_unit,
+ DASD_ECKD_CCW_WRITE_CKD, startdev);
+ /* grant subsystem permission to format R0 */
+ if (r0_perm)
+ ((struct DE_eckd_data *) data)
+ ->ga_extended |= 0x04;
+ data += sizeof(struct DE_eckd_data);
+ }
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, (struct LO_eckd_data *) data,
fdata->start_unit, 0, rpt*nr_tracks,
@@ -2163,11 +2193,18 @@ dasd_eckd_build_format(struct dasd_device *base,
data += sizeof(struct LO_eckd_data);
break;
case 0x01: /* Write record zero + format track. */
- prefix(ccw++, (struct PFX_eckd_data *) data,
- fdata->start_unit, fdata->stop_unit,
- DASD_ECKD_CCW_WRITE_RECORD_ZERO,
- base, startdev);
- data += sizeof(struct PFX_eckd_data);
+ if (use_prefix) {
+ prefix(ccw++, (struct PFX_eckd_data *) data,
+ fdata->start_unit, fdata->stop_unit,
+ DASD_ECKD_CCW_WRITE_RECORD_ZERO,
+ base, startdev);
+ data += sizeof(struct PFX_eckd_data);
+ } else {
+ define_extent(ccw++, (struct DE_eckd_data *) data,
+ fdata->start_unit, fdata->stop_unit,
+ DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev);
+ data += sizeof(struct DE_eckd_data);
+ }
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, (struct LO_eckd_data *) data,
fdata->start_unit, 0, rpt * nr_tracks + 1,
@@ -2176,10 +2213,17 @@ dasd_eckd_build_format(struct dasd_device *base,
data += sizeof(struct LO_eckd_data);
break;
case 0x04: /* Invalidate track. */
- prefix(ccw++, (struct PFX_eckd_data *) data,
- fdata->start_unit, fdata->stop_unit,
- DASD_ECKD_CCW_WRITE_CKD, base, startdev);
- data += sizeof(struct PFX_eckd_data);
+ if (use_prefix) {
+ prefix(ccw++, (struct PFX_eckd_data *) data,
+ fdata->start_unit, fdata->stop_unit,
+ DASD_ECKD_CCW_WRITE_CKD, base, startdev);
+ data += sizeof(struct PFX_eckd_data);
+ } else {
+ define_extent(ccw++, (struct DE_eckd_data *) data,
+ fdata->start_unit, fdata->stop_unit,
+ DASD_ECKD_CCW_WRITE_CKD, startdev);
+ data += sizeof(struct DE_eckd_data);
+ }
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, (struct LO_eckd_data *) data,
fdata->start_unit, 0, 1,
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 6eca019bcf30..7fef1f96b594 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -808,18 +808,19 @@ static void
dcssblk_make_request(struct request_queue *q, struct bio *bio)
{
struct dcssblk_dev_info *dev_info;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
unsigned long index;
unsigned long page_addr;
unsigned long source_addr;
unsigned long bytes_done;
- int i;
bytes_done = 0;
dev_info = bio->bi_bdev->bd_disk->private_data;
if (dev_info == NULL)
goto fail;
- if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
+ if ((bio->bi_iter.bi_sector & 7) != 0 ||
+ (bio->bi_iter.bi_size & 4095) != 0)
/* Request is not page-aligned. */
goto fail;
if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) {
@@ -842,22 +843,22 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
}
}
- index = (bio->bi_sector >> 3);
- bio_for_each_segment(bvec, bio, i) {
+ index = (bio->bi_iter.bi_sector >> 3);
+ bio_for_each_segment(bvec, bio, iter) {
page_addr = (unsigned long)
- page_address(bvec->bv_page) + bvec->bv_offset;
+ page_address(bvec.bv_page) + bvec.bv_offset;
source_addr = dev_info->start + (index<<12) + bytes_done;
if (unlikely((page_addr & 4095) != 0) || (bvec->bv_len & 4095) != 0)
// More paranoia.
goto fail;
if (bio_data_dir(bio) == READ) {
memcpy((void*)page_addr, (void*)source_addr,
- bvec->bv_len);
+ bvec.bv_len);
} else {
memcpy((void*)source_addr, (void*)page_addr,
- bvec->bv_len);
+ bvec.bv_len);
}
- bytes_done += bvec->bv_len;
+ bytes_done += bvec.bv_len;
}
bio_endio(bio, 0);
return;
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index 8b387b32fd62..e59331e6c2e5 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -107,7 +107,7 @@ extern debug_info_t *scm_debug;
static inline void SCM_LOG_HEX(int level, void *data, int length)
{
- if (level > scm_debug->level)
+ if (!debug_level_enabled(scm_debug, level))
return;
while (length > 0) {
debug_event(scm_debug, level, data, length);
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 464dd29d06c0..3e530f9da8c4 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -184,25 +184,26 @@ static unsigned long xpram_highest_page_index(void)
static void xpram_make_request(struct request_queue *q, struct bio *bio)
{
xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
unsigned int index;
unsigned long page_addr;
unsigned long bytes;
- int i;
- if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
+ if ((bio->bi_iter.bi_sector & 7) != 0 ||
+ (bio->bi_iter.bi_size & 4095) != 0)
/* Request is not page-aligned. */
goto fail;
- if ((bio->bi_size >> 12) > xdev->size)
+ if ((bio->bi_iter.bi_size >> 12) > xdev->size)
/* Request size is no page-aligned. */
goto fail;
- if ((bio->bi_sector >> 3) > 0xffffffffU - xdev->offset)
+ if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset)
goto fail;
- index = (bio->bi_sector >> 3) + xdev->offset;
- bio_for_each_segment(bvec, bio, i) {
+ index = (bio->bi_iter.bi_sector >> 3) + xdev->offset;
+ bio_for_each_segment(bvec, bio, iter) {
page_addr = (unsigned long)
- kmap(bvec->bv_page) + bvec->bv_offset;
- bytes = bvec->bv_len;
+ kmap(bvec.bv_page) + bvec.bv_offset;
+ bytes = bvec.bv_len;
if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
/* More paranoia. */
goto fail;
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 4600aa10a1c6..668b32b0dc1d 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -60,7 +60,7 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
struct appldata_product_id id;
int rc;
- strcpy(id.prod_nr, "LNXAPPL");
+ strncpy(id.prod_nr, "LNXAPPL", 7);
id.prod_fn = myhdr->applid;
id.record_nr = myhdr->record_num;
id.version_nr = myhdr->version;
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 24a08e8f19e1..2cdec21e8924 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -615,10 +615,10 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
if (rp->state != RAW3270_STATE_RESET)
return;
- if (rq && rq->rc) {
+ if (rq->rc) {
/* Reset command failed. */
rp->state = RAW3270_STATE_INIT;
- } else if (0 && MACHINE_IS_VM) {
+ } else if (MACHINE_IS_VM) {
raw3270_size_device_vm(rp);
raw3270_size_device_done(rp);
} else
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index a3aa374799dc..1fe264379e0d 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -486,7 +486,7 @@ sclp_sync_wait(void)
timeout = 0;
if (timer_pending(&sclp_request_timer)) {
/* Get timeout TOD value */
- timeout = get_tod_clock() +
+ timeout = get_tod_clock_fast() +
sclp_tod_from_jiffies(sclp_request_timer.expires -
jiffies);
}
@@ -508,7 +508,7 @@ sclp_sync_wait(void)
while (sclp_running_state != sclp_running_state_idle) {
/* Check for expired request timer */
if (timer_pending(&sclp_request_timer) &&
- get_tod_clock() > timeout &&
+ get_tod_clock_fast() > timeout &&
del_timer(&sclp_request_timer))
sclp_request_timer.function(sclp_request_timer.data);
cpu_relax();
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 9b3a24e8d3a0..cf31d3321dab 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -313,7 +313,7 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
int ret;
dev_num = iminor(inode);
- if (dev_num > MAXMINOR)
+ if (dev_num >= MAXMINOR)
return -ENODEV;
logptr = &sys_ser[dev_num];
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 794820a123d0..ffb1fcf0bf5b 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -151,7 +151,7 @@ static int __init init_cpu_info(enum arch_id arch)
/* get info for boot cpu from lowcore, stored in the HSA */
- sa = kmalloc(sizeof(*sa), GFP_KERNEL);
+ sa = dump_save_area_create(0);
if (!sa)
return -ENOMEM;
if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
@@ -159,7 +159,6 @@ static int __init init_cpu_info(enum arch_id arch)
kfree(sa);
return -EIO;
}
- zfcpdump_save_areas[0] = sa;
return 0;
}
@@ -246,24 +245,25 @@ static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
{
unsigned long end;
- int i = 0;
+ int i;
if (count == 0)
return 0;
end = start + count;
- while (zfcpdump_save_areas[i]) {
+ for (i = 0; i < dump_save_areas.count; i++) {
unsigned long cp_start, cp_end; /* copy range */
unsigned long sa_start, sa_end; /* save area range */
unsigned long prefix;
unsigned long sa_off, len, buf_off;
+ struct save_area *save_area = dump_save_areas.areas[i];
- prefix = zfcpdump_save_areas[i]->pref_reg;
+ prefix = save_area->pref_reg;
sa_start = prefix + sys_info.sa_base;
sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
if ((end < sa_start) || (start > sa_end))
- goto next;
+ continue;
cp_start = max(start, sa_start);
cp_end = min(end, sa_end);
@@ -272,10 +272,8 @@ static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
len = cp_end - cp_start;
TRACE("copy_lc for: %lx\n", start);
- if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len))
+ if (copy_lc(buf + buf_off, save_area, sa_off, len))
return -EFAULT;
-next:
- i++;
}
return 0;
}
@@ -637,8 +635,8 @@ static void __init zcore_header_init(int arch, struct zcore_header *hdr,
hdr->num_pages = mem_size / PAGE_SIZE;
hdr->tod = get_tod_clock();
get_cpu_id(&hdr->cpu_id);
- for (i = 0; zfcpdump_save_areas[i]; i++) {
- prefix = zfcpdump_save_areas[i]->pref_reg;
+ for (i = 0; i < dump_save_areas.count; i++) {
+ prefix = dump_save_areas.areas[i]->pref_reg;
hdr->real_cpu_cnt++;
if (!prefix)
continue;
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index d028fd800c9c..f055df0b167f 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -194,15 +194,14 @@ EXPORT_SYMBOL(airq_iv_release);
*/
unsigned long airq_iv_alloc_bit(struct airq_iv *iv)
{
- const unsigned long be_to_le = BITS_PER_LONG - 1;
unsigned long bit;
if (!iv->avail)
return -1UL;
spin_lock(&iv->lock);
- bit = find_first_bit_left(iv->avail, iv->bits);
+ bit = find_first_bit_inv(iv->avail, iv->bits);
if (bit < iv->bits) {
- clear_bit(bit ^ be_to_le, iv->avail);
+ clear_bit_inv(bit, iv->avail);
if (bit >= iv->end)
iv->end = bit + 1;
} else
@@ -220,19 +219,17 @@ EXPORT_SYMBOL(airq_iv_alloc_bit);
*/
void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit)
{
- const unsigned long be_to_le = BITS_PER_LONG - 1;
-
if (!iv->avail)
return;
spin_lock(&iv->lock);
/* Clear (possibly left over) interrupt bit */
- clear_bit(bit ^ be_to_le, iv->vector);
+ clear_bit_inv(bit, iv->vector);
/* Make the bit position available again */
- set_bit(bit ^ be_to_le, iv->avail);
+ set_bit_inv(bit, iv->avail);
if (bit == iv->end - 1) {
/* Find new end of bit-field */
while (--iv->end > 0)
- if (!test_bit((iv->end - 1) ^ be_to_le, iv->avail))
+ if (!test_bit_inv(iv->end - 1, iv->avail))
break;
}
spin_unlock(&iv->lock);
@@ -251,15 +248,13 @@ EXPORT_SYMBOL(airq_iv_free_bit);
unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
unsigned long end)
{
- const unsigned long be_to_le = BITS_PER_LONG - 1;
unsigned long bit;
/* Find non-zero bit starting from 'ivs->next'. */
- bit = find_next_bit_left(iv->vector, end, start);
+ bit = find_next_bit_inv(iv->vector, end, start);
if (bit >= end)
return -1UL;
- /* Clear interrupt bit (find left uses big-endian bit numbers) */
- clear_bit(bit ^ be_to_le, iv->vector);
+ clear_bit_inv(bit, iv->vector);
return bit;
}
EXPORT_SYMBOL(airq_iv_scan);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index d7da67a31c77..88e35d85d205 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -878,9 +878,9 @@ static void css_reset(void)
atomic_inc(&chpid_reset_count);
}
/* Wait for machine check for all channel paths. */
- timeout = get_tod_clock() + (RCHP_TIMEOUT << 12);
+ timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12);
while (atomic_read(&chpid_reset_count) != 0) {
- if (get_tod_clock() > timeout)
+ if (get_tod_clock_fast() > timeout)
break;
cpu_relax();
}
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index d9eddcba7e88..aca7bfc113aa 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -6,6 +6,7 @@
*/
#include <linux/kernel_stat.h>
+#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/device.h>
@@ -42,7 +43,7 @@ static debug_info_t *eadm_debug;
static void EADM_LOG_HEX(int level, void *data, int length)
{
- if (level > eadm_debug->level)
+ if (!debug_level_enabled(eadm_debug, level))
return;
while (length > 0) {
debug_event(eadm_debug, level, data, length);
@@ -159,6 +160,9 @@ static void eadm_subchannel_irq(struct subchannel *sch)
}
scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error);
private->state = EADM_IDLE;
+
+ if (private->completion)
+ complete(private->completion);
}
static struct subchannel *eadm_get_idle_sch(void)
@@ -255,13 +259,32 @@ out:
static void eadm_quiesce(struct subchannel *sch)
{
+ struct eadm_private *private = get_eadm_private(sch);
+ DECLARE_COMPLETION_ONSTACK(completion);
int ret;
+ spin_lock_irq(sch->lock);
+ if (private->state != EADM_BUSY)
+ goto disable;
+
+ if (eadm_subchannel_clear(sch))
+ goto disable;
+
+ private->completion = &completion;
+ spin_unlock_irq(sch->lock);
+
+ wait_for_completion_io(&completion);
+
+ spin_lock_irq(sch->lock);
+ private->completion = NULL;
+
+disable:
+ eadm_subchannel_set_timeout(sch, 0);
do {
- spin_lock_irq(sch->lock);
ret = cio_disable_subchannel(sch);
- spin_unlock_irq(sch->lock);
} while (ret == -EBUSY);
+
+ spin_unlock_irq(sch->lock);
}
static int eadm_subchannel_remove(struct subchannel *sch)
diff --git a/drivers/s390/cio/eadm_sch.h b/drivers/s390/cio/eadm_sch.h
index 2779be093982..9664e4653f98 100644
--- a/drivers/s390/cio/eadm_sch.h
+++ b/drivers/s390/cio/eadm_sch.h
@@ -1,6 +1,7 @@
#ifndef EADM_SCH_H
#define EADM_SCH_H
+#include <linux/completion.h>
#include <linux/device.h>
#include <linux/timer.h>
#include <linux/list.h>
@@ -9,9 +10,10 @@
struct eadm_private {
union orb orb;
enum {EADM_IDLE, EADM_BUSY, EADM_NOT_OPER} state;
+ struct completion *completion;
+ struct subchannel *sch;
struct timer_list timer;
struct list_head head;
- struct subchannel *sch;
} __aligned(8);
#define get_eadm_private(n) ((struct eadm_private *)dev_get_drvdata(&n->dev))
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index 647b422bb22a..dfac9bfefea3 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -16,12 +16,6 @@
extern debug_info_t *qdio_dbf_setup;
extern debug_info_t *qdio_dbf_error;
-/* sort out low debug levels early to avoid wasted sprints */
-static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level)
-{
- return (level <= dbf_grp->level);
-}
-
#define DBF_ERR 3 /* error conditions */
#define DBF_WARN 4 /* warning conditions */
#define DBF_INFO 6 /* informational */
@@ -65,7 +59,7 @@ static inline void DBF_ERROR_HEX(void *addr, int len)
#define DBF_DEV_EVENT(level, device, text...) \
do { \
char debug_buffer[QDIO_DBF_LEN]; \
- if (qdio_dbf_passes(device->debug_area, level)) { \
+ if (debug_level_enabled(device->debug_area, level)) { \
snprintf(debug_buffer, QDIO_DBF_LEN, text); \
debug_text_event(device->debug_area, level, debug_buffer); \
} \
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 8ed52aa49122..3e602e8affa7 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -338,10 +338,10 @@ again:
retries++;
if (!start_time) {
- start_time = get_tod_clock();
+ start_time = get_tod_clock_fast();
goto again;
}
- if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
+ if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
goto again;
}
if (retries) {
@@ -504,7 +504,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
int count, stop;
unsigned char state = 0;
- q->timestamp = get_tod_clock();
+ q->timestamp = get_tod_clock_fast();
/*
* Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -528,7 +528,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
case SLSB_P_INPUT_PRIMED:
inbound_primed(q, count);
q->first_to_check = add_buf(q->first_to_check, count);
- if (atomic_sub(count, &q->nr_buf_used) == 0)
+ if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
@@ -595,7 +595,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
* At this point we know, that inbound first_to_check
* has (probably) not moved (see qdio_inbound_processing).
*/
- if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
+ if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
q->first_to_check);
return 1;
@@ -728,7 +728,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
int count, stop;
unsigned char state = 0;
- q->timestamp = get_tod_clock();
+ q->timestamp = get_tod_clock_fast();
if (need_siga_sync(q))
if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
index 841ea72e4a4e..28d9349de1ad 100644
--- a/drivers/s390/crypto/zcrypt_debug.h
+++ b/drivers/s390/crypto/zcrypt_debug.h
@@ -11,12 +11,6 @@
/* that gives us 15 characters in the text event views */
#define ZCRYPT_DBF_LEN 16
-/* sort out low debug levels early to avoid wasted sprints */
-static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
-{
- return (level <= dbf_grp->level);
-}
-
#define DBF_ERR 3 /* error conditions */
#define DBF_WARN 4 /* warning conditions */
#define DBF_INFO 6 /* informational */
@@ -25,7 +19,7 @@ static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
#define ZCRYPT_DBF_COMMON(level, text...) \
do { \
- if (zcrypt_dbf_passes(zcrypt_dbf_common, level)) { \
+ if (debug_level_enabled(zcrypt_dbf_common, level)) { \
char debug_buffer[ZCRYPT_DBF_LEN]; \
snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
debug_text_event(zcrypt_dbf_common, level, \
@@ -35,7 +29,7 @@ static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
#define ZCRYPT_DBF_DEVICES(level, text...) \
do { \
- if (zcrypt_dbf_passes(zcrypt_dbf_devices, level)) { \
+ if (debug_level_enabled(zcrypt_dbf_devices, level)) { \
char debug_buffer[ZCRYPT_DBF_LEN]; \
snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
debug_text_event(zcrypt_dbf_devices, level, \
@@ -45,7 +39,7 @@ static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
#define ZCRYPT_DBF_DEV(level, device, text...) \
do { \
- if (zcrypt_dbf_passes(device->dbf_area, level)) { \
+ if (debug_level_enabled(device->dbf_area, level)) { \
char debug_buffer[ZCRYPT_DBF_LEN]; \
snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
debug_text_event(device->dbf_area, level, \
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index af2166fa5159..1abd0db29915 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -166,11 +166,15 @@ static void kvm_reset(struct virtio_device *vdev)
* make a hypercall. We hand the address of the virtqueue so the Host
* knows which virtqueue we're talking about.
*/
-static void kvm_notify(struct virtqueue *vq)
+static bool kvm_notify(struct virtqueue *vq)
{
+ long rc;
struct kvm_vqconfig *config = vq->priv;
- kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address);
+ rc = kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address);
+ if (rc < 0)
+ return false;
+ return true;
}
/*
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index 779dc5136291..d6297176ab85 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -162,7 +162,7 @@ static inline long do_kvm_notify(struct subchannel_id schid,
return __rc;
}
-static void virtio_ccw_kvm_notify(struct virtqueue *vq)
+static bool virtio_ccw_kvm_notify(struct virtqueue *vq)
{
struct virtio_ccw_vq_info *info = vq->priv;
struct virtio_ccw_device *vcdev;
@@ -171,6 +171,9 @@ static void virtio_ccw_kvm_notify(struct virtqueue *vq)
vcdev = to_vc_device(info->vq->vdev);
ccw_device_get_schid(vcdev->cdev, &schid);
info->cookie = do_kvm_notify(schid, vq->index, info->cookie);
+ if (info->cookie < 0)
+ return false;
+ return true;
}
static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
index 1bc5904df19f..3339b9b607b3 100644
--- a/drivers/s390/net/claw.h
+++ b/drivers/s390/net/claw.h
@@ -114,15 +114,9 @@ do { \
debug_event(claw_dbf_##name,level,(void*)(addr),len); \
} while (0)
-/* Allow to sort out low debug levels early to avoid wasted sprints */
-static inline int claw_dbf_passes(debug_info_t *dbf_grp, int level)
-{
- return (level <= dbf_grp->level);
-}
-
#define CLAW_DBF_TEXT_(level,name,text...) \
do { \
- if (claw_dbf_passes(claw_dbf_##name, level)) { \
+ if (debug_level_enabled(claw_dbf_##name, level)) { \
sprintf(debug_buffer, text); \
debug_text_event(claw_dbf_##name, level, \
debug_buffer); \
diff --git a/drivers/s390/net/ctcm_dbug.c b/drivers/s390/net/ctcm_dbug.c
index 6514e1cb3f1c..8363f1c966ef 100644
--- a/drivers/s390/net/ctcm_dbug.c
+++ b/drivers/s390/net/ctcm_dbug.c
@@ -66,7 +66,7 @@ void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *fmt, ...)
char dbf_txt_buf[64];
va_list args;
- if (level > (ctcm_dbf[dbf_nix].id)->level)
+ if (!debug_level_enabled(ctcm_dbf[dbf_nix].id, level))
return;
va_start(args, fmt);
vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index 8c03392ac833..150fcb4cebc3 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -16,15 +16,9 @@ do { \
debug_event(lcs_dbf_##name,level,(void*)(addr),len); \
} while (0)
-/* Allow to sort out low debug levels early to avoid wasted sprints */
-static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level)
-{
- return (level <= dbf_grp->level);
-}
-
#define LCS_DBF_TEXT_(level,name,text...) \
do { \
- if (lcs_dbf_passes(lcs_dbf_##name, level)) { \
+ if (debug_level_enabled(lcs_dbf_##name, level)) { \
sprintf(debug_buffer, text); \
debug_text_event(lcs_dbf_##name, level, debug_buffer); \
} \
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 279ad504ec3c..9b333fcf1a4c 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -105,15 +105,9 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
-/* Allow to sort out low debug levels early to avoid wasted sprints */
-static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
-{
- return (level <= dbf_grp->level);
-}
-
#define IUCV_DBF_TEXT_(name, level, text...) \
do { \
- if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
+ if (debug_level_enabled(iucv_dbf_##name, level)) { \
char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
sprintf(__buf, text); \
debug_text_event(iucv_dbf_##name, level, __buf); \
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 0a328d0d11be..d7b66a28fe75 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -5096,7 +5096,7 @@ void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
char dbf_txt_buf[32];
va_list args;
- if (level > id->level)
+ if (!debug_level_enabled(id, level))
return;
va_start(args, fmt);
vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 132a905b6bdb..0ca64484cfa3 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -344,7 +344,7 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
/**
* zfcp_dbf_san_req - trace event for issued SAN request
- * @tag: indentifier for event
+ * @tag: identifier for event
* @fsf_req: request containing issued CT data
* d_id: destination ID
*/
@@ -361,7 +361,7 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
/**
* zfcp_dbf_san_res - trace event for received SAN request
- * @tag: indentifier for event
+ * @tag: identifier for event
* @fsf_req: request containing issued CT data
*/
void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
@@ -377,7 +377,7 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
/**
* zfcp_dbf_san_in_els - trace event for incoming ELS
- * @tag: indentifier for event
+ * @tag: identifier for event
* @fsf_req: request containing issued CT data
*/
void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 3ac7a4b30dd9..0be3d48681ae 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -278,7 +278,7 @@ struct zfcp_dbf {
static inline
void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
{
- if (level <= req->adapter->dbf->hba->level)
+ if (debug_level_enabled(req->adapter->dbf->hba, level))
zfcp_dbf_hba_fsf_res(tag, req);
}
@@ -317,7 +317,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
struct zfcp_adapter *adapter = (struct zfcp_adapter *)
scmd->device->host->hostdata[0];
- if (level <= adapter->dbf->scsi->level)
+ if (debug_level_enabled(adapter->dbf->scsi, level))
zfcp_dbf_scsi(tag, scmd, req);
}
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index feab3a5e50b5..757eb0716d45 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -696,7 +696,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC,
PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
pci_device)) != NULL) {
- struct blogic_adapter *adapter = adapter;
+ struct blogic_adapter *host_adapter = adapter;
struct blogic_adapter_info adapter_info;
enum blogic_isa_ioport mod_ioaddr_req;
unsigned char bus;
@@ -744,9 +744,9 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
known and enabled, note that the particular Standard ISA I/O
Address should not be probed.
*/
- adapter->io_addr = io_addr;
- blogic_intreset(adapter);
- if (blogic_cmd(adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
+ host_adapter->io_addr = io_addr;
+ blogic_intreset(host_adapter);
+ if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
&adapter_info, sizeof(adapter_info)) ==
sizeof(adapter_info)) {
if (adapter_info.isa_port < 6)
@@ -762,7 +762,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
I/O Address assigned at system initialization.
*/
mod_ioaddr_req = BLOGIC_IO_DISABLE;
- blogic_cmd(adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req,
+ blogic_cmd(host_adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req,
sizeof(mod_ioaddr_req), NULL, 0);
/*
For the first MultiMaster Host Adapter enumerated,
@@ -779,12 +779,12 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
fetch_localram.offset = BLOGIC_AUTOSCSI_BASE + 45;
fetch_localram.count = sizeof(autoscsi_byte45);
- blogic_cmd(adapter, BLOGIC_FETCH_LOCALRAM,
+ blogic_cmd(host_adapter, BLOGIC_FETCH_LOCALRAM,
&fetch_localram, sizeof(fetch_localram),
&autoscsi_byte45,
sizeof(autoscsi_byte45));
- blogic_cmd(adapter, BLOGIC_GET_BOARD_ID, NULL, 0, &id,
- sizeof(id));
+ blogic_cmd(host_adapter, BLOGIC_GET_BOARD_ID, NULL, 0,
+ &id, sizeof(id));
if (id.fw_ver_digit1 == '5')
force_scan_order =
autoscsi_byte45.force_scan_order;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 408a42ef787a..f0d432c139d0 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -771,6 +771,8 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
{
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
}
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 33c52bc2c7b4..97fd450aff09 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1035,7 +1035,6 @@ static void arcmsr_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
scsi_host_put(host);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static void arcmsr_shutdown(struct pci_dev *pdev)
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 15a629d8ed08..a795d81ef875 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -3144,8 +3144,6 @@ static void atp870u_remove (struct pci_dev *pdev)
atp870u_free_tables(pshost);
printk(KERN_INFO "scsi_host_put : %p\n",pshost);
scsi_host_put(pshost);
- printk(KERN_INFO "pci_set_drvdata : %p\n",pdev);
- pci_set_drvdata(pdev, NULL);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index f8ca7becacca..fc80a325a1e6 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -766,49 +766,20 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
bfad->pcidev = pdev;
/* Adjust PCIe Maximum Read Request Size */
- if (pcie_max_read_reqsz > 0) {
- int pcie_cap_reg;
- u16 pcie_dev_ctl;
- u16 mask = 0xffff;
-
- switch (pcie_max_read_reqsz) {
- case 128:
- mask = 0x0;
- break;
- case 256:
- mask = 0x1000;
- break;
- case 512:
- mask = 0x2000;
- break;
- case 1024:
- mask = 0x3000;
- break;
- case 2048:
- mask = 0x4000;
- break;
- case 4096:
- mask = 0x5000;
- break;
- default:
- break;
- }
-
- pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (mask != 0xffff && pcie_cap_reg) {
- pcie_cap_reg += 0x08;
- pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl);
- if ((pcie_dev_ctl & 0x7000) != mask) {
- printk(KERN_WARNING "BFA[%s]: "
+ if (pci_is_pcie(pdev) && pcie_max_read_reqsz) {
+ if (pcie_max_read_reqsz >= 128 &&
+ pcie_max_read_reqsz <= 4096 &&
+ is_power_of_2(pcie_max_read_reqsz)) {
+ int max_rq = pcie_get_readrq(pdev);
+ printk(KERN_WARNING "BFA[%s]: "
"pcie_max_read_request_size is %d, "
- "reset to %d\n", bfad->pci_name,
- (1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7,
+ "reset to %d\n", bfad->pci_name, max_rq,
pcie_max_read_reqsz);
-
- pcie_dev_ctl &= ~0x7000;
- pci_write_config_word(pdev, pcie_cap_reg,
- pcie_dev_ctl | mask);
- }
+ pcie_set_readrq(pdev, pcie_max_read_reqsz);
+ } else {
+ printk(KERN_WARNING "BFA[%s]: invalid "
+ "pcie_max_read_request_size %d ignored\n",
+ bfad->pci_name, pcie_max_read_reqsz);
}
}
@@ -833,7 +804,6 @@ bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
/* Disable PCIE Advanced Error Recovery (AER) */
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
bfa_status_t
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 5be718c241c4..e4cf23df4b4f 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -126,7 +126,7 @@ static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code)
/**
* bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification
- * @ep: endpoint (transport indentifier) structure
+ * @ep: endpoint (transport identifier) structure
* @action: action, ARM or DISARM. For now only ARM_CQE is used
*
* Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt
@@ -756,7 +756,7 @@ void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
/**
* bnx2i_send_conn_destroy - initiates iscsi connection teardown process
* @hba: adapter structure pointer
- * @ep: endpoint (transport indentifier) structure
+ * @ep: endpoint (transport identifier) structure
*
* this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate
* iscsi connection context clean-up process
@@ -791,7 +791,7 @@ int bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/**
* bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process
* @hba: adapter structure pointer
- * @ep: endpoint (transport indentifier) structure
+ * @ep: endpoint (transport identifier) structure
*
* 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
*/
@@ -851,7 +851,7 @@ static int bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
/**
* bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation
* @hba: adapter structure pointer
- * @ep: endpoint (transport indentifier) structure
+ * @ep: endpoint (transport identifier) structure
*
* 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
*/
@@ -920,7 +920,7 @@ static int bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
* bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process
*
* @hba: adapter structure pointer
- * @ep: endpoint (transport indentifier) structure
+ * @ep: endpoint (transport identifier) structure
*
* this routine prepares and posts CONN_OFLD_REQ1/2 KWQE
*/
@@ -939,7 +939,7 @@ int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/**
* setup_qp_page_tables - iscsi QP page table setup function
- * @ep: endpoint (transport indentifier) structure
+ * @ep: endpoint (transport identifier) structure
*
* Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires
* 64-bit address in big endian format. Whereas 10G/sec (57710) requires
@@ -1046,7 +1046,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
/**
* bnx2i_alloc_qp_resc - allocates required resources for QP.
* @hba: adapter structure pointer
- * @ep: endpoint (transport indentifier) structure
+ * @ep: endpoint (transport identifier) structure
*
* Allocate QP (transport layer for iSCSI connection) resources, DMA'able
* memory for SQ/RQ/CQ and page tables. EP structure elements such
@@ -1191,7 +1191,7 @@ mem_alloc_err:
/**
* bnx2i_free_qp_resc - free memory resources held by QP
* @hba: adapter structure pointer
- * @ep: endpoint (transport indentifier) structure
+ * @ep: endpoint (transport identifier) structure
*
* Free QP resources - SQ/RQ/CQ memory and page tables.
*/
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index fabeb88602ac..854dad7d5b03 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -596,7 +596,7 @@ void bnx2i_drop_session(struct iscsi_cls_session *cls_session)
/**
* bnx2i_ep_destroy_list_add - add an entry to EP destroy list
* @hba: pointer to adapter instance
- * @ep: pointer to endpoint (transport indentifier) structure
+ * @ep: pointer to endpoint (transport identifier) structure
*
* EP destroy queue manager
*/
@@ -613,7 +613,7 @@ static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,
* bnx2i_ep_destroy_list_del - add an entry to EP destroy list
*
* @hba: pointer to adapter instance
- * @ep: pointer to endpoint (transport indentifier) structure
+ * @ep: pointer to endpoint (transport identifier) structure
*
* EP destroy queue manager
*/
@@ -630,7 +630,7 @@ static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba,
/**
* bnx2i_ep_ofld_list_add - add an entry to ep offload pending list
* @hba: pointer to adapter instance
- * @ep: pointer to endpoint (transport indentifier) structure
+ * @ep: pointer to endpoint (transport identifier) structure
*
* pending conn offload completion queue manager
*/
@@ -646,7 +646,7 @@ static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,
/**
* bnx2i_ep_ofld_list_del - add an entry to ep offload pending list
* @hba: pointer to adapter instance
- * @ep: pointer to endpoint (transport indentifier) structure
+ * @ep: pointer to endpoint (transport identifier) structure
*
* pending conn offload completion queue manager
*/
@@ -721,7 +721,7 @@ bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
/**
* bnx2i_ep_active_list_add - add an entry to ep active list
* @hba: pointer to adapter instance
- * @ep: pointer to endpoint (transport indentifier) structure
+ * @ep: pointer to endpoint (transport identifier) structure
*
* current active conn queue manager
*/
@@ -737,7 +737,7 @@ static void bnx2i_ep_active_list_add(struct bnx2i_hba *hba,
/**
* bnx2i_ep_active_list_del - deletes an entry to ep active list
* @hba: pointer to adapter instance
- * @ep: pointer to endpoint (transport indentifier) structure
+ * @ep: pointer to endpoint (transport identifier) structure
*
* current active conn queue manager
*/
@@ -1695,7 +1695,7 @@ no_nx2_route:
/**
* bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources
* @hba: pointer to adapter instance
- * @ep: endpoint (transport indentifier) structure
+ * @ep: endpoint (transport identifier) structure
*
* destroys cm_sock structure and on chip iscsi context
*/
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 0eb35b9b3784..0eaec4748957 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -852,22 +852,6 @@ csio_hw_get_flash_params(struct csio_hw *hw)
return 0;
}
-static void
-csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range)
-{
- uint16_t val;
- int pcie_cap;
-
- if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) {
- pci_read_config_word(hw->pdev,
- pcie_cap + PCI_EXP_DEVCTL2, &val);
- val &= 0xfff0;
- val |= range ;
- pci_write_config_word(hw->pdev,
- pcie_cap + PCI_EXP_DEVCTL2, val);
- }
-}
-
/*****************************************************************************/
/* HW State machine assists */
/*****************************************************************************/
@@ -2069,8 +2053,10 @@ csio_hw_configure(struct csio_hw *hw)
goto out;
}
- /* Set pci completion timeout value to 4 seconds. */
- csio_set_pcie_completion_timeout(hw, 0xd);
+ /* Set PCIe completion timeout to 4 seconds */
+ if (pci_is_pcie(hw->pdev))
+ pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR);
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 00346fe939d5..1aafc331ee63 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -1010,7 +1010,6 @@ err_lnode_exit:
csio_hw_stop(hw);
spin_unlock_irq(&hw->lock);
csio_lnodes_unblock_request(hw);
- pci_set_drvdata(hw->pdev, NULL);
csio_lnodes_exit(hw, 0);
csio_hw_free(hw);
err_pci_exit:
@@ -1044,7 +1043,6 @@ static void csio_remove_one(struct pci_dev *pdev)
csio_lnodes_exit(hw, 0);
csio_hw_free(hw);
- pci_set_drvdata(pdev, NULL);
csio_pci_exit(pdev, &bars);
}
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 694e13c45dfd..a726187abe5c 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -4859,7 +4859,6 @@ static void dc395x_remove_one(struct pci_dev *dev)
adapter_uninit(acb);
pci_disable_device(dev);
scsi_host_put(scsi_host);
- pci_set_drvdata(dev, NULL);
}
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index c9382d6eee78..1f4f22fe8281 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -553,16 +553,20 @@ static struct device_type fcoe_fcf_device_type = {
.release = fcoe_fcf_device_release,
};
-static struct bus_attribute fcoe_bus_attr_group[] = {
- __ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store),
- __ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store),
- __ATTR_NULL
+static BUS_ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store);
+static BUS_ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store);
+
+static struct attribute *fcoe_bus_attrs[] = {
+ &bus_attr_ctlr_create.attr,
+ &bus_attr_ctlr_destroy.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(fcoe_bus);
static struct bus_type fcoe_bus_type = {
.name = "fcoe",
.match = &fcoe_bus_match,
- .bus_attrs = fcoe_bus_attr_group,
+ .bus_groups = fcoe_bus_groups,
};
/**
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index bbf81ea3a252..889b594fe8b0 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -996,7 +996,6 @@ static void fnic_remove(struct pci_dev *pdev)
fnic_iounmap(fnic);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
scsi_host_put(lp->host);
}
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 6d55b4e7e792..ee4fa40a50b1 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -594,8 +594,6 @@ static void gdth_pci_remove_one(struct pci_dev *pdev)
{
gdth_ha_str *ha = pci_get_drvdata(pdev);
- pci_set_drvdata(pdev, NULL);
-
list_del(&ha->list);
gdth_remove_one(ha);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 891c86b66253..df72d4a58385 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -5018,7 +5018,6 @@ static void hpsa_remove_one(struct pci_dev *pdev)
kfree(h->hba_inquiry_data);
pci_disable_device(pdev);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
kfree(h);
}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 446b85110a1f..0cac7d8fd0f7 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -2163,10 +2163,10 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
}
/* do we need to support multiple segments? */
- if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) {
- printk("%s: multiple segments req %u %u, rsp %u %u\n",
- __func__, bio_segments(req->bio), blk_rq_bytes(req),
- bio_segments(rsp->bio), blk_rq_bytes(rsp));
+ if (bio_multiple_segments(req->bio) ||
+ bio_multiple_segments(rsp->bio)) {
+ printk("%s: multiple segments req %u, rsp %u\n",
+ __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
return -EINVAL;
}
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 647f5bfb3bd3..3dfd38ce806c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4581,8 +4581,6 @@ lpfc_disable_pci_dev(struct lpfc_hba *phba)
/* Release PCI resource and disable PCI device */
pci_release_selected_regions(pdev, bars);
pci_disable_device(pdev);
- /* Null out PCI private reference to driver */
- pci_set_drvdata(pdev, NULL);
return;
}
@@ -9429,7 +9427,6 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
/* Disable interrupt */
lpfc_sli_disable_intr(phba);
- pci_set_drvdata(pdev, NULL);
scsi_host_put(shost);
/*
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 515c9629e9fe..d1a4b82836ea 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -534,7 +534,6 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
out_cmm_unreg:
- pci_set_drvdata(pdev, NULL);
megaraid_cmm_unregister(adapter);
out_fini_mbox:
megaraid_fini_mbox(adapter);
@@ -594,11 +593,6 @@ megaraid_detach_one(struct pci_dev *pdev)
// detach from the IO sub-system
megaraid_io_detach(adapter);
- // reset the device state in the PCI structure. We check this
- // condition when we enter here. If the device state is NULL,
- // that would mean the device has already been removed
- pci_set_drvdata(pdev, NULL);
-
// Unregister from common management module
//
// FIXME: this must return success or failure for conditions if there
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 3020921a4746..a6efc1e088ec 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -4449,7 +4449,6 @@ retry_irq_register:
megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
megasas_mgmt_info.max_index--;
- pci_set_drvdata(pdev, NULL);
instance->instancet->disable_intr(instance);
if (instance->msix_vectors)
for (i = 0 ; i < instance->msix_vectors; i++)
@@ -4805,8 +4804,6 @@ static void megasas_detach_one(struct pci_dev *pdev)
}
}
- pci_set_drvdata(instance->pdev, NULL);
-
instance->instancet->disable_intr(instance);
if (instance->msix_vectors)
@@ -4848,8 +4845,6 @@ static void megasas_detach_one(struct pci_dev *pdev)
instance->evt_detail, instance->evt_detail_h);
scsi_host_put(host);
- pci_set_drvdata(pdev, NULL);
-
pci_disable_device(pdev);
return;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 9d26637308be..410f4a3e8888 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -1901,7 +1901,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
Mpi2SmpPassthroughRequest_t *mpi_request;
Mpi2SmpPassthroughReply_t *mpi_reply;
- int rc, i;
+ int rc;
u16 smid;
u32 ioc_state;
unsigned long timeleft;
@@ -1916,7 +1916,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
void *pci_addr_out = NULL;
u16 wait_state_count;
struct request *rsp = req->next_rq;
- struct bio_vec *bvec = NULL;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
if (!rsp) {
printk(MPT2SAS_ERR_FMT "%s: the smp response space is "
@@ -1942,7 +1943,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
ioc->transport_cmds.status = MPT2_CMD_PENDING;
/* Check if the request is split across multiple segments */
- if (bio_segments(req->bio) > 1) {
+ if (bio_multiple_segments(req->bio)) {
u32 offset = 0;
/* Allocate memory and copy the request */
@@ -1955,11 +1956,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
goto out;
}
- bio_for_each_segment(bvec, req->bio, i) {
+ bio_for_each_segment(bvec, req->bio, iter) {
memcpy(pci_addr_out + offset,
- page_address(bvec->bv_page) + bvec->bv_offset,
- bvec->bv_len);
- offset += bvec->bv_len;
+ page_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
}
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -1974,7 +1975,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* Check if the response needs to be populated across
* multiple segments */
- if (bio_segments(rsp->bio) > 1) {
+ if (bio_multiple_segments(rsp->bio)) {
pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
&pci_dma_in);
if (!pci_addr_in) {
@@ -2041,7 +2042,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
- if (bio_segments(req->bio) > 1) {
+ if (bio_multiple_segments(req->bio)) {
ioc->base_add_sg_single(psge, sgl_flags |
(blk_rq_bytes(req) - 4), pci_dma_out);
} else {
@@ -2057,7 +2058,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
MPI2_SGE_FLAGS_END_OF_LIST);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
- if (bio_segments(rsp->bio) > 1) {
+ if (bio_multiple_segments(rsp->bio)) {
ioc->base_add_sg_single(psge, sgl_flags |
(blk_rq_bytes(rsp) + 4), pci_dma_in);
} else {
@@ -2102,23 +2103,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
le16_to_cpu(mpi_reply->ResponseDataLength);
/* check if the resp needs to be copied from the allocated
* pci mem */
- if (bio_segments(rsp->bio) > 1) {
+ if (bio_multiple_segments(rsp->bio)) {
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
- bio_for_each_segment(bvec, rsp->bio, i) {
- if (bytes_to_copy <= bvec->bv_len) {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+ if (bytes_to_copy <= bvec.bv_len) {
+ memcpy(page_address(bvec.bv_page) +
+ bvec.bv_offset, pci_addr_in +
offset, bytes_to_copy);
break;
} else {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
- offset, bvec->bv_len);
- bytes_to_copy -= bvec->bv_len;
+ memcpy(page_address(bvec.bv_page) +
+ bvec.bv_offset, pci_addr_in +
+ offset, bvec.bv_len);
+ bytes_to_copy -= bvec.bv_len;
}
- offset += bvec->bv_len;
+ offset += bvec.bv_len;
}
}
} else {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index e771a88c6a74..65170cb1a00f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -1884,7 +1884,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
Mpi2SmpPassthroughRequest_t *mpi_request;
Mpi2SmpPassthroughReply_t *mpi_reply;
- int rc, i;
+ int rc;
u16 smid;
u32 ioc_state;
unsigned long timeleft;
@@ -1898,7 +1898,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
void *pci_addr_out = NULL;
u16 wait_state_count;
struct request *rsp = req->next_rq;
- struct bio_vec *bvec = NULL;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
if (!rsp) {
pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n",
@@ -1925,7 +1926,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
ioc->transport_cmds.status = MPT3_CMD_PENDING;
/* Check if the request is split across multiple segments */
- if (req->bio->bi_vcnt > 1) {
+ if (bio_multiple_segments(req->bio)) {
u32 offset = 0;
/* Allocate memory and copy the request */
@@ -1938,11 +1939,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
goto out;
}
- bio_for_each_segment(bvec, req->bio, i) {
+ bio_for_each_segment(bvec, req->bio, iter) {
memcpy(pci_addr_out + offset,
- page_address(bvec->bv_page) + bvec->bv_offset,
- bvec->bv_len);
- offset += bvec->bv_len;
+ page_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
}
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -1957,7 +1958,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* Check if the response needs to be populated across
* multiple segments */
- if (rsp->bio->bi_vcnt > 1) {
+ if (bio_multiple_segments(rsp->bio)) {
pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
&pci_dma_in);
if (!pci_addr_in) {
@@ -2018,7 +2019,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
psge = &mpi_request->SGL;
- if (req->bio->bi_vcnt > 1)
+ if (bio_multiple_segments(req->bio))
ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4),
pci_dma_in, (blk_rq_bytes(rsp) + 4));
else
@@ -2063,23 +2064,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* check if the resp needs to be copied from the allocated
* pci mem */
- if (rsp->bio->bi_vcnt > 1) {
+ if (bio_multiple_segments(rsp->bio)) {
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
- bio_for_each_segment(bvec, rsp->bio, i) {
- if (bytes_to_copy <= bvec->bv_len) {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+ if (bytes_to_copy <= bvec.bv_len) {
+ memcpy(page_address(bvec.bv_page) +
+ bvec.bv_offset, pci_addr_in +
offset, bytes_to_copy);
break;
} else {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
- offset, bvec->bv_len);
- bytes_to_copy -= bvec->bv_len;
+ memcpy(page_address(bvec.bv_page) +
+ bvec.bv_offset, pci_addr_in +
+ offset, bvec.bv_len);
+ bytes_to_copy -= bvec.bv_len;
}
- offset += bvec->bv_len;
+ offset += bvec.bv_len;
}
}
} else {
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 7b7381d7671f..5ff978be249d 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -657,7 +657,6 @@ static void mvs_pci_remove(struct pci_dev *pdev)
tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet);
#endif
- pci_set_drvdata(pdev, NULL);
sas_unregister_ha(sha);
sas_remove_host(mvi->shost);
scsi_remove_host(mvi->shost);
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 6b1b4e91e53f..6c1f223a8e1d 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1411,7 +1411,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
if (res) {
del_timer(&task->slow_task->timer);
- mv_printk("executing internel task failed:%d\n", res);
+ mv_printk("executing internal task failed:%d\n", res);
goto ex_err;
}
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index c3601b57a80c..edbee8dc62c9 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2583,7 +2583,6 @@ static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
fail_io_attach:
- pci_set_drvdata(pdev, NULL);
mhba->instancet->disable_intr(mhba);
free_irq(mhba->pdev->irq, mhba);
fail_init_irq:
@@ -2618,7 +2617,6 @@ static void mvumi_detach_one(struct pci_dev *pdev)
free_irq(mhba->pdev->irq, mhba);
mvumi_release_fw(mhba);
scsi_host_put(host);
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
dev_dbg(&pdev->dev, "driver is removed!\n");
}
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 5982a587babc..7d014b11df62 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -1615,7 +1615,7 @@ struct ncb {
spinlock_t smp_lock; /* Lock for SMP threading */
/*----------------------------------------------------------------
- ** Chip and controller indentification.
+ ** Chip and controller identification.
**----------------------------------------------------------------
*/
int unit; /* Unit number */
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index aa66361ed44b..bac04c2335aa 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -731,7 +731,7 @@ static int _osd_req_list_objects(struct osd_request *or,
bio->bi_rw &= ~REQ_WRITE;
or->in.bio = bio;
- or->in.total_bytes = bio->bi_size;
+ or->in.total_bytes = bio->bi_iter.bi_size;
return 0;
}
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index f7c189606b84..0dba7c7856ab 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -873,7 +873,6 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
struct pm8001_hba_info *pm8001_ha;
int i;
pm8001_ha = sha->lldd_ha;
- pci_set_drvdata(pdev, NULL);
sas_unregister_ha(sha);
sas_remove_host(pm8001_ha->shost);
list_del(&pm8001_ha->list);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 1eb7b0280a45..e43db7742047 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -6049,7 +6049,6 @@ out_release_regions:
out_disable_device:
atomic_dec(&pmcraid_adapter_count);
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return -ENODEV;
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 2ef497ebadc0..ee5c1833eb73 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -20,7 +20,7 @@
* | Device Discovery | 0x2095 | 0x2020-0x2022, |
* | | | 0x2011-0x2012, |
* | | | 0x2016 |
- * | Queue Command and IO tracing | 0x3058 | 0x3006-0x300b |
+ * | Queue Command and IO tracing | 0x3059 | 0x3006-0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
* | | | 0x302d,0x3033 |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index df1b30ba938c..ff9c86b1a0d8 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1957,6 +1957,15 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
que = MSW(sts->handle);
req = ha->req_q_map[que];
+ /* Check for invalid queue pointer */
+ if (req == NULL ||
+ que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
+ ql_dbg(ql_dbg_io, vha, 0x3059,
+ "Invalid status handle (0x%x): Bad req pointer. req=%p, "
+ "que=%u.\n", sts->handle, req, que);
+ return;
+ }
+
/* Validate handle. */
if (handle < req->num_outstanding_cmds)
sp = req->outstanding_cmds[handle];
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 62ee7131b204..30d20e74e48a 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -507,7 +507,7 @@ qlafx00_pci_config(scsi_qla_host_t *vha)
pci_write_config_word(ha->pdev, PCI_COMMAND, w);
/* PCIe -- adjust Maximum Read Request Size (2048). */
- if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
+ if (pci_is_pcie(ha->pdev))
pcie_set_readrq(ha->pdev, 2048);
ha->chip_revision = ha->pdev->revision;
@@ -660,10 +660,8 @@ char *
qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
{
struct qla_hw_data *ha = vha->hw;
- int pcie_reg;
- pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
- if (pcie_reg) {
+ if (pci_is_pcie(ha->pdev)) {
strcpy(str, "PCIe iSA");
return str;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 9f01bbbf3a26..52be35e0300c 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -494,18 +494,14 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
static char *pci_bus_modes[] = { "33", "66", "100", "133", };
struct qla_hw_data *ha = vha->hw;
uint32_t pci_bus;
- int pcie_reg;
- pcie_reg = pci_pcie_cap(ha->pdev);
- if (pcie_reg) {
+ if (pci_is_pcie(ha->pdev)) {
char lwstr[6];
- uint16_t pcie_lstat, lspeed, lwidth;
+ uint32_t lstat, lspeed, lwidth;
- pcie_reg += PCI_EXP_LNKCAP;
- pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat);
- lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3);
- lwidth = (pcie_lstat &
- (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4;
+ pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
+ lspeed = lstat & PCI_EXP_LNKCAP_SLS;
+ lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;
strcpy(str, "PCIe (");
switch (lspeed) {
@@ -3183,7 +3179,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static void
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index f8a0a26a3cd4..1be6cefc390b 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -7400,7 +7400,6 @@ static void qla4xxx_remove_adapter(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
/**
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d1549b74e2d1..84f7c54112e2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -206,7 +206,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
*/
int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
- unsigned char *sense, int timeout, int retries, int flags,
+ unsigned char *sense, int timeout, int retries, u64 flags,
int *resid)
{
struct request *req;
@@ -257,7 +257,7 @@ EXPORT_SYMBOL(scsi_execute);
int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
struct scsi_sense_hdr *sshdr, int timeout, int retries,
- int *resid, int flags)
+ int *resid, u64 flags)
{
char *sense = NULL;
int result;
@@ -1684,7 +1684,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
host_dev = scsi_get_device(shost);
if (host_dev && host_dev->dma_mask)
- bounce_limit = *host_dev->dma_mask;
+ bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT;
return bounce_limit;
}
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index f379c7f3034c..2700a5a09bd4 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -24,12 +24,15 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/delay.h>
#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_srp.h>
+#include "scsi_priv.h"
#include "scsi_transport_srp_internal.h"
struct srp_host_attrs {
@@ -38,7 +41,7 @@ struct srp_host_attrs {
#define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
#define SRP_HOST_ATTRS 0
-#define SRP_RPORT_ATTRS 3
+#define SRP_RPORT_ATTRS 8
struct srp_internal {
struct scsi_transport_template t;
@@ -54,6 +57,36 @@ struct srp_internal {
#define dev_to_rport(d) container_of(d, struct srp_rport, dev)
#define transport_class_to_srp_rport(dev) dev_to_rport((dev)->parent)
+static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
+{
+ return dev_to_shost(r->dev.parent);
+}
+
+/**
+ * srp_tmo_valid() - check timeout combination validity
+ *
+ * The combination of the timeout parameters must be such that SCSI commands
+ * are finished in a reasonable time. Hence do not allow the fast I/O fail
+ * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT. Furthermore, these
+ * parameters must be such that multipath can detect failed paths timely.
+ * Hence do not allow all three parameters to be disabled simultaneously.
+ */
+int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, int dev_loss_tmo)
+{
+ if (reconnect_delay < 0 && fast_io_fail_tmo < 0 && dev_loss_tmo < 0)
+ return -EINVAL;
+ if (reconnect_delay == 0)
+ return -EINVAL;
+ if (fast_io_fail_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
+ return -EINVAL;
+ if (dev_loss_tmo >= LONG_MAX / HZ)
+ return -EINVAL;
+ if (fast_io_fail_tmo >= 0 && dev_loss_tmo >= 0 &&
+ fast_io_fail_tmo >= dev_loss_tmo)
+ return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(srp_tmo_valid);
static int srp_host_setup(struct transport_container *tc, struct device *dev,
struct device *cdev)
@@ -134,10 +167,465 @@ static ssize_t store_srp_rport_delete(struct device *dev,
static DEVICE_ATTR(delete, S_IWUSR, NULL, store_srp_rport_delete);
+static ssize_t show_srp_rport_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ static const char *const state_name[] = {
+ [SRP_RPORT_RUNNING] = "running",
+ [SRP_RPORT_BLOCKED] = "blocked",
+ [SRP_RPORT_FAIL_FAST] = "fail-fast",
+ [SRP_RPORT_LOST] = "lost",
+ };
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+ enum srp_rport_state state = rport->state;
+
+ return sprintf(buf, "%s\n",
+ (unsigned)state < ARRAY_SIZE(state_name) ?
+ state_name[state] : "???");
+}
+
+static DEVICE_ATTR(state, S_IRUGO, show_srp_rport_state, NULL);
+
+static ssize_t srp_show_tmo(char *buf, int tmo)
+{
+ return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n");
+}
+
+static int srp_parse_tmo(int *tmo, const char *buf)
+{
+ int res = 0;
+
+ if (strncmp(buf, "off", 3) != 0)
+ res = kstrtoint(buf, 0, tmo);
+ else
+ *tmo = -1;
+
+ return res;
+}
+
+static ssize_t show_reconnect_delay(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+
+ return srp_show_tmo(buf, rport->reconnect_delay);
+}
+
+static ssize_t store_reconnect_delay(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, const size_t count)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+ int res, delay;
+
+ res = srp_parse_tmo(&delay, buf);
+ if (res)
+ goto out;
+ res = srp_tmo_valid(delay, rport->fast_io_fail_tmo,
+ rport->dev_loss_tmo);
+ if (res)
+ goto out;
+
+ if (rport->reconnect_delay <= 0 && delay > 0 &&
+ rport->state != SRP_RPORT_RUNNING) {
+ queue_delayed_work(system_long_wq, &rport->reconnect_work,
+ delay * HZ);
+ } else if (delay <= 0) {
+ cancel_delayed_work(&rport->reconnect_work);
+ }
+ rport->reconnect_delay = delay;
+ res = count;
+
+out:
+ return res;
+}
+
+static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, show_reconnect_delay,
+ store_reconnect_delay);
+
+static ssize_t show_failed_reconnects(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+
+ return sprintf(buf, "%d\n", rport->failed_reconnects);
+}
+
+static DEVICE_ATTR(failed_reconnects, S_IRUGO, show_failed_reconnects, NULL);
+
+static ssize_t show_srp_rport_fast_io_fail_tmo(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+
+ return srp_show_tmo(buf, rport->fast_io_fail_tmo);
+}
+
+static ssize_t store_srp_rport_fast_io_fail_tmo(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+ int res;
+ int fast_io_fail_tmo;
+
+ res = srp_parse_tmo(&fast_io_fail_tmo, buf);
+ if (res)
+ goto out;
+ res = srp_tmo_valid(rport->reconnect_delay, fast_io_fail_tmo,
+ rport->dev_loss_tmo);
+ if (res)
+ goto out;
+ rport->fast_io_fail_tmo = fast_io_fail_tmo;
+ res = count;
+
+out:
+ return res;
+}
+
+static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
+ show_srp_rport_fast_io_fail_tmo,
+ store_srp_rport_fast_io_fail_tmo);
+
+static ssize_t show_srp_rport_dev_loss_tmo(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+
+ return srp_show_tmo(buf, rport->dev_loss_tmo);
+}
+
+static ssize_t store_srp_rport_dev_loss_tmo(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+ int res;
+ int dev_loss_tmo;
+
+ res = srp_parse_tmo(&dev_loss_tmo, buf);
+ if (res)
+ goto out;
+ res = srp_tmo_valid(rport->reconnect_delay, rport->fast_io_fail_tmo,
+ dev_loss_tmo);
+ if (res)
+ goto out;
+ rport->dev_loss_tmo = dev_loss_tmo;
+ res = count;
+
+out:
+ return res;
+}
+
+static DEVICE_ATTR(dev_loss_tmo, S_IRUGO | S_IWUSR,
+ show_srp_rport_dev_loss_tmo,
+ store_srp_rport_dev_loss_tmo);
+
+static int srp_rport_set_state(struct srp_rport *rport,
+ enum srp_rport_state new_state)
+{
+ enum srp_rport_state old_state = rport->state;
+
+ lockdep_assert_held(&rport->mutex);
+
+ switch (new_state) {
+ case SRP_RPORT_RUNNING:
+ switch (old_state) {
+ case SRP_RPORT_LOST:
+ goto invalid;
+ default:
+ break;
+ }
+ break;
+ case SRP_RPORT_BLOCKED:
+ switch (old_state) {
+ case SRP_RPORT_RUNNING:
+ break;
+ default:
+ goto invalid;
+ }
+ break;
+ case SRP_RPORT_FAIL_FAST:
+ switch (old_state) {
+ case SRP_RPORT_LOST:
+ goto invalid;
+ default:
+ break;
+ }
+ break;
+ case SRP_RPORT_LOST:
+ break;
+ }
+ rport->state = new_state;
+ return 0;
+
+invalid:
+ return -EINVAL;
+}
+
+/**
+ * srp_reconnect_work() - reconnect and schedule a new attempt if necessary
+ */
+static void srp_reconnect_work(struct work_struct *work)
+{
+ struct srp_rport *rport = container_of(to_delayed_work(work),
+ struct srp_rport, reconnect_work);
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ int delay, res;
+
+ res = srp_reconnect_rport(rport);
+ if (res != 0) {
+ shost_printk(KERN_ERR, shost,
+ "reconnect attempt %d failed (%d)\n",
+ ++rport->failed_reconnects, res);
+ delay = rport->reconnect_delay *
+ min(100, max(1, rport->failed_reconnects - 10));
+ if (delay > 0)
+ queue_delayed_work(system_long_wq,
+ &rport->reconnect_work, delay * HZ);
+ }
+}
+
+static void __rport_fail_io_fast(struct srp_rport *rport)
+{
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct srp_internal *i;
+
+ lockdep_assert_held(&rport->mutex);
+
+ if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST))
+ return;
+ scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
+
+ /* Involve the LLD if possible to terminate all I/O on the rport. */
+ i = to_srp_internal(shost->transportt);
+ if (i->f->terminate_rport_io)
+ i->f->terminate_rport_io(rport);
+}
+
+/**
+ * rport_fast_io_fail_timedout() - fast I/O failure timeout handler
+ */
+static void rport_fast_io_fail_timedout(struct work_struct *work)
+{
+ struct srp_rport *rport = container_of(to_delayed_work(work),
+ struct srp_rport, fast_io_fail_work);
+ struct Scsi_Host *shost = rport_to_shost(rport);
+
+ pr_info("fast_io_fail_tmo expired for SRP %s / %s.\n",
+ dev_name(&rport->dev), dev_name(&shost->shost_gendev));
+
+ mutex_lock(&rport->mutex);
+ if (rport->state == SRP_RPORT_BLOCKED)
+ __rport_fail_io_fast(rport);
+ mutex_unlock(&rport->mutex);
+}
+
+/**
+ * rport_dev_loss_timedout() - device loss timeout handler
+ */
+static void rport_dev_loss_timedout(struct work_struct *work)
+{
+ struct srp_rport *rport = container_of(to_delayed_work(work),
+ struct srp_rport, dev_loss_work);
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct srp_internal *i = to_srp_internal(shost->transportt);
+
+ pr_info("dev_loss_tmo expired for SRP %s / %s.\n",
+ dev_name(&rport->dev), dev_name(&shost->shost_gendev));
+
+ mutex_lock(&rport->mutex);
+ WARN_ON(srp_rport_set_state(rport, SRP_RPORT_LOST) != 0);
+ scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
+ mutex_unlock(&rport->mutex);
+
+ i->f->rport_delete(rport);
+}
+
+static void __srp_start_tl_fail_timers(struct srp_rport *rport)
+{
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ int delay, fast_io_fail_tmo, dev_loss_tmo;
+
+ lockdep_assert_held(&rport->mutex);
+
+ if (!rport->deleted) {
+ delay = rport->reconnect_delay;
+ fast_io_fail_tmo = rport->fast_io_fail_tmo;
+ dev_loss_tmo = rport->dev_loss_tmo;
+ pr_debug("%s current state: %d\n",
+ dev_name(&shost->shost_gendev), rport->state);
+
+ if (delay > 0)
+ queue_delayed_work(system_long_wq,
+ &rport->reconnect_work,
+ 1UL * delay * HZ);
+ if (fast_io_fail_tmo >= 0 &&
+ srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
+ pr_debug("%s new state: %d\n",
+ dev_name(&shost->shost_gendev),
+ rport->state);
+ scsi_target_block(&shost->shost_gendev);
+ queue_delayed_work(system_long_wq,
+ &rport->fast_io_fail_work,
+ 1UL * fast_io_fail_tmo * HZ);
+ }
+ if (dev_loss_tmo >= 0)
+ queue_delayed_work(system_long_wq,
+ &rport->dev_loss_work,
+ 1UL * dev_loss_tmo * HZ);
+ } else {
+ pr_debug("%s has already been deleted\n",
+ dev_name(&shost->shost_gendev));
+ srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST);
+ scsi_target_unblock(&shost->shost_gendev,
+ SDEV_TRANSPORT_OFFLINE);
+ }
+}
+
+/**
+ * srp_start_tl_fail_timers() - start the transport layer failure timers
+ *
+ * Start the transport layer fast I/O failure and device loss timers. Do not
+ * modify a timer that was already started.
+ */
+void srp_start_tl_fail_timers(struct srp_rport *rport)
+{
+ mutex_lock(&rport->mutex);
+ __srp_start_tl_fail_timers(rport);
+ mutex_unlock(&rport->mutex);
+}
+EXPORT_SYMBOL(srp_start_tl_fail_timers);
+
+/**
+ * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
+ */
+static int scsi_request_fn_active(struct Scsi_Host *shost)
+{
+ struct scsi_device *sdev;
+ struct request_queue *q;
+ int request_fn_active = 0;
+
+ shost_for_each_device(sdev, shost) {
+ q = sdev->request_queue;
+
+ spin_lock_irq(q->queue_lock);
+ request_fn_active += q->request_fn_active;
+ spin_unlock_irq(q->queue_lock);
+ }
+
+ return request_fn_active;
+}
+
+/**
+ * srp_reconnect_rport() - reconnect to an SRP target port
+ *
+ * Blocks SCSI command queueing before invoking reconnect() such that
+ * queuecommand() won't be invoked concurrently with reconnect() from outside
+ * the SCSI EH. This is important since a reconnect() implementation may
+ * reallocate resources needed by queuecommand().
+ *
+ * Notes:
+ * - This function neither waits until outstanding requests have finished nor
+ * tries to abort these. It is the responsibility of the reconnect()
+ * function to finish outstanding commands before reconnecting to the target
+ * port.
+ * - It is the responsibility of the caller to ensure that the resources
+ * reallocated by the reconnect() function won't be used while this function
+ * is in progress. One possible strategy is to invoke this function from
+ * the context of the SCSI EH thread only. Another possible strategy is to
+ * lock the rport mutex inside each SCSI LLD callback that can be invoked by
+ * the SCSI EH (the scsi_host_template.eh_*() functions and also the
+ * scsi_host_template.queuecommand() function).
+ */
+int srp_reconnect_rport(struct srp_rport *rport)
+{
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct srp_internal *i = to_srp_internal(shost->transportt);
+ struct scsi_device *sdev;
+ int res;
+
+ pr_debug("SCSI host %s\n", dev_name(&shost->shost_gendev));
+
+ res = mutex_lock_interruptible(&rport->mutex);
+ if (res)
+ goto out;
+ scsi_target_block(&shost->shost_gendev);
+ while (scsi_request_fn_active(shost))
+ msleep(20);
+ res = i->f->reconnect(rport);
+ pr_debug("%s (state %d): transport.reconnect() returned %d\n",
+ dev_name(&shost->shost_gendev), rport->state, res);
+ if (res == 0) {
+ cancel_delayed_work(&rport->fast_io_fail_work);
+ cancel_delayed_work(&rport->dev_loss_work);
+
+ rport->failed_reconnects = 0;
+ srp_rport_set_state(rport, SRP_RPORT_RUNNING);
+ scsi_target_unblock(&shost->shost_gendev, SDEV_RUNNING);
+ /*
+ * If the SCSI error handler has offlined one or more devices,
+ * invoking scsi_target_unblock() won't change the state of
+ * these devices into running so do that explicitly.
+ */
+ spin_lock_irq(shost->host_lock);
+ __shost_for_each_device(sdev, shost)
+ if (sdev->sdev_state == SDEV_OFFLINE)
+ sdev->sdev_state = SDEV_RUNNING;
+ spin_unlock_irq(shost->host_lock);
+ } else if (rport->state == SRP_RPORT_RUNNING) {
+ /*
+ * srp_reconnect_rport() was invoked with fast_io_fail
+ * off. Mark the port as failed and start the TL failure
+ * timers if these had not yet been started.
+ */
+ __rport_fail_io_fast(rport);
+ scsi_target_unblock(&shost->shost_gendev,
+ SDEV_TRANSPORT_OFFLINE);
+ __srp_start_tl_fail_timers(rport);
+ } else if (rport->state != SRP_RPORT_BLOCKED) {
+ scsi_target_unblock(&shost->shost_gendev,
+ SDEV_TRANSPORT_OFFLINE);
+ }
+ mutex_unlock(&rport->mutex);
+
+out:
+ return res;
+}
+EXPORT_SYMBOL(srp_reconnect_rport);
+
+/**
+ * srp_timed_out() - SRP transport intercept of the SCSI timeout EH
+ *
+ * If a timeout occurs while an rport is in the blocked state, ask the SCSI
+ * EH to continue waiting (BLK_EH_RESET_TIMER). Otherwise let the SCSI core
+ * handle the timeout (BLK_EH_NOT_HANDLED).
+ *
+ * Note: This function is called from soft-IRQ context and with the request
+ * queue lock held.
+ */
+static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
+{
+ struct scsi_device *sdev = scmd->device;
+ struct Scsi_Host *shost = sdev->host;
+ struct srp_internal *i = to_srp_internal(shost->transportt);
+
+ pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
+ return i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
+ BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
+}
+
static void srp_rport_release(struct device *dev)
{
struct srp_rport *rport = dev_to_rport(dev);
+ cancel_delayed_work_sync(&rport->reconnect_work);
+ cancel_delayed_work_sync(&rport->fast_io_fail_work);
+ cancel_delayed_work_sync(&rport->dev_loss_work);
+
put_device(dev->parent);
kfree(rport);
}
@@ -185,6 +673,24 @@ static int srp_host_match(struct attribute_container *cont, struct device *dev)
}
/**
+ * srp_rport_get() - increment rport reference count
+ */
+void srp_rport_get(struct srp_rport *rport)
+{
+ get_device(&rport->dev);
+}
+EXPORT_SYMBOL(srp_rport_get);
+
+/**
+ * srp_rport_put() - decrement rport reference count
+ */
+void srp_rport_put(struct srp_rport *rport)
+{
+ put_device(&rport->dev);
+}
+EXPORT_SYMBOL(srp_rport_put);
+
+/**
* srp_rport_add - add a SRP remote port to the device hierarchy
* @shost: scsi host the remote port is connected to.
* @ids: The port id for the remote port.
@@ -196,12 +702,15 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
{
struct srp_rport *rport;
struct device *parent = &shost->shost_gendev;
+ struct srp_internal *i = to_srp_internal(shost->transportt);
int id, ret;
rport = kzalloc(sizeof(*rport), GFP_KERNEL);
if (!rport)
return ERR_PTR(-ENOMEM);
+ mutex_init(&rport->mutex);
+
device_initialize(&rport->dev);
rport->dev.parent = get_device(parent);
@@ -210,6 +719,17 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
rport->roles = ids->roles;
+ if (i->f->reconnect)
+ rport->reconnect_delay = i->f->reconnect_delay ?
+ *i->f->reconnect_delay : 10;
+ INIT_DELAYED_WORK(&rport->reconnect_work, srp_reconnect_work);
+ rport->fast_io_fail_tmo = i->f->fast_io_fail_tmo ?
+ *i->f->fast_io_fail_tmo : 15;
+ rport->dev_loss_tmo = i->f->dev_loss_tmo ? *i->f->dev_loss_tmo : 60;
+ INIT_DELAYED_WORK(&rport->fast_io_fail_work,
+ rport_fast_io_fail_timedout);
+ INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
+
id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
@@ -259,6 +779,13 @@ void srp_rport_del(struct srp_rport *rport)
transport_remove_device(dev);
device_del(dev);
transport_destroy_device(dev);
+
+ mutex_lock(&rport->mutex);
+ if (rport->state == SRP_RPORT_BLOCKED)
+ __rport_fail_io_fast(rport);
+ rport->deleted = true;
+ mutex_unlock(&rport->mutex);
+
put_device(dev);
}
EXPORT_SYMBOL_GPL(srp_rport_del);
@@ -310,6 +837,8 @@ srp_attach_transport(struct srp_function_template *ft)
if (!i)
return NULL;
+ i->t.eh_timed_out = srp_timed_out;
+
i->t.tsk_mgmt_response = srp_tsk_mgmt_response;
i->t.it_nexus_response = srp_it_nexus_response;
@@ -327,6 +856,15 @@ srp_attach_transport(struct srp_function_template *ft)
count = 0;
i->rport_attrs[count++] = &dev_attr_port_id;
i->rport_attrs[count++] = &dev_attr_roles;
+ if (ft->has_rport_state) {
+ i->rport_attrs[count++] = &dev_attr_state;
+ i->rport_attrs[count++] = &dev_attr_fast_io_fail_tmo;
+ i->rport_attrs[count++] = &dev_attr_dev_loss_tmo;
+ }
+ if (ft->reconnect) {
+ i->rport_attrs[count++] = &dev_attr_reconnect_delay;
+ i->rport_attrs[count++] = &dev_attr_failed_reconnects;
+ }
if (ft->rport_delete)
i->rport_attrs[count++] = &dev_attr_delete;
i->rport_attrs[count++] = NULL;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e62d17d41d4e..82ff4ccac121 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -800,7 +800,7 @@ static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq)
if (sdkp->device->no_write_same)
return BLKPREP_KILL;
- BUG_ON(bio_offset(bio) || bio_iovec(bio)->bv_len != sdp->sector_size);
+ BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
sector >>= ilog2(sdp->sector_size) - 9;
nr_sectors >>= ilog2(sdp->sector_size) - 9;
@@ -1002,7 +1002,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
SCpnt->cmnd[0] = READ_6;
SCpnt->sc_data_direction = DMA_FROM_DEVICE;
} else {
- scmd_printk(KERN_ERR, SCpnt, "Unknown command %x\n", rq->cmd_flags);
+ scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags);
goto out;
}
@@ -2854,6 +2854,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
gd->events |= DISK_EVENT_MEDIA_CHANGE;
}
+ blk_pm_runtime_init(sdp->request_queue, dev);
add_disk(gd);
if (sdkp->capacity)
sd_dif_config_host(sdkp);
@@ -2862,7 +2863,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
sdp->removable ? "removable " : "");
- blk_pm_runtime_init(sdp->request_queue, dev);
scsi_autopm_put_device(sdp);
put_device(&sdkp->dev);
}
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 6174ca4ea275..a7a691d0af7d 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -365,7 +365,6 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector,
struct bio *bio;
struct scsi_disk *sdkp;
struct sd_dif_tuple *sdt;
- unsigned int i, j;
u32 phys, virt;
sdkp = rq->bio->bi_bdev->bd_disk->private_data;
@@ -376,19 +375,21 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector,
phys = hw_sector & 0xffffffff;
__rq_for_each_bio(bio, rq) {
- struct bio_vec *iv;
+ struct bio_vec iv;
+ struct bvec_iter iter;
+ unsigned int j;
/* Already remapped? */
if (bio_flagged(bio, BIO_MAPPED_INTEGRITY))
break;
- virt = bio->bi_integrity->bip_sector & 0xffffffff;
+ virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
- bip_for_each_vec(iv, bio->bi_integrity, i) {
- sdt = kmap_atomic(iv->bv_page)
- + iv->bv_offset;
+ bip_for_each_vec(iv, bio->bi_integrity, iter) {
+ sdt = kmap_atomic(iv.bv_page)
+ + iv.bv_offset;
- for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
+ for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
if (be32_to_cpu(sdt->ref_tag) == virt)
sdt->ref_tag = cpu_to_be32(phys);
@@ -414,7 +415,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
struct scsi_disk *sdkp;
struct bio *bio;
struct sd_dif_tuple *sdt;
- unsigned int i, j, sectors, sector_sz;
+ unsigned int j, sectors, sector_sz;
u32 phys, virt;
sdkp = scsi_disk(scmd->request->rq_disk);
@@ -430,15 +431,16 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
phys >>= 3;
__rq_for_each_bio(bio, scmd->request) {
- struct bio_vec *iv;
+ struct bio_vec iv;
+ struct bvec_iter iter;
- virt = bio->bi_integrity->bip_sector & 0xffffffff;
+ virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
- bip_for_each_vec(iv, bio->bi_integrity, i) {
- sdt = kmap_atomic(iv->bv_page)
- + iv->bv_offset;
+ bip_for_each_vec(iv, bio->bi_integrity, iter) {
+ sdt = kmap_atomic(iv.bv_page)
+ + iv.bv_offset;
- for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
+ for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
if (sectors == 0) {
kunmap_atomic(sdt);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 5cbc4bb1b395..df5e961484e1 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -105,8 +105,11 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ;
static int sg_add(struct device *, struct class_interface *);
static void sg_remove(struct device *, struct class_interface *);
+static DEFINE_SPINLOCK(sg_open_exclusive_lock);
+
static DEFINE_IDR(sg_index_idr);
-static DEFINE_RWLOCK(sg_index_lock);
+static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
+ file descriptor list for device */
static struct class_interface sg_interface = {
.add_dev = sg_add,
@@ -143,7 +146,8 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
} Sg_request;
typedef struct sg_fd { /* holds the state of a file descriptor */
- struct list_head sfd_siblings; /* protected by sfd_lock of device */
+ /* sfd_siblings is protected by sg_index_lock */
+ struct list_head sfd_siblings;
struct sg_device *parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */
rwlock_t rq_list_lock; /* protect access to list in req_arr */
@@ -166,12 +170,13 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
typedef struct sg_device { /* holds the state of each scsi generic device */
struct scsi_device *device;
+ wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
int sg_tablesize; /* adapter's max scatter-gather table size */
u32 index; /* device index number */
- spinlock_t sfd_lock; /* protect file descriptor list for device */
+ /* sfds is protected by sg_index_lock */
struct list_head sfds;
- struct rw_semaphore o_sem; /* exclude open should hold this rwsem */
volatile char detached; /* 0->attached, 1->detached pending removal */
+ /* exclude protected by sg_open_exclusive_lock */
char exclude; /* opened for exclusive access */
char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
struct gendisk *disk;
@@ -220,14 +225,35 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
}
+static int get_exclude(Sg_device *sdp)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&sg_open_exclusive_lock, flags);
+ ret = sdp->exclude;
+ spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
+ return ret;
+}
+
+static int set_exclude(Sg_device *sdp, char val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sg_open_exclusive_lock, flags);
+ sdp->exclude = val;
+ spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
+ return val;
+}
+
static int sfds_list_empty(Sg_device *sdp)
{
unsigned long flags;
int ret;
- spin_lock_irqsave(&sdp->sfd_lock, flags);
+ read_lock_irqsave(&sg_index_lock, flags);
ret = list_empty(&sdp->sfds);
- spin_unlock_irqrestore(&sdp->sfd_lock, flags);
+ read_unlock_irqrestore(&sg_index_lock, flags);
return ret;
}
@@ -239,6 +265,7 @@ sg_open(struct inode *inode, struct file *filp)
struct request_queue *q;
Sg_device *sdp;
Sg_fd *sfp;
+ int res;
int retval;
nonseekable_open(inode, filp);
@@ -267,52 +294,54 @@ sg_open(struct inode *inode, struct file *filp)
goto error_out;
}
- if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) {
- retval = -EPERM; /* Can't lock it with read only access */
- goto error_out;
- }
- if (flags & O_NONBLOCK) {
- if (flags & O_EXCL) {
- if (!down_write_trylock(&sdp->o_sem)) {
- retval = -EBUSY;
- goto error_out;
- }
- } else {
- if (!down_read_trylock(&sdp->o_sem)) {
- retval = -EBUSY;
- goto error_out;
- }
+ if (flags & O_EXCL) {
+ if (O_RDONLY == (flags & O_ACCMODE)) {
+ retval = -EPERM; /* Can't lock it with read only access */
+ goto error_out;
+ }
+ if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) {
+ retval = -EBUSY;
+ goto error_out;
+ }
+ res = wait_event_interruptible(sdp->o_excl_wait,
+ ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1)));
+ if (res) {
+ retval = res; /* -ERESTARTSYS because signal hit process */
+ goto error_out;
+ }
+ } else if (get_exclude(sdp)) { /* some other fd has an exclusive lock on dev */
+ if (flags & O_NONBLOCK) {
+ retval = -EBUSY;
+ goto error_out;
+ }
+ res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp));
+ if (res) {
+ retval = res; /* -ERESTARTSYS because signal hit process */
+ goto error_out;
}
- } else {
- if (flags & O_EXCL)
- down_write(&sdp->o_sem);
- else
- down_read(&sdp->o_sem);
}
- /* Since write lock is held, no need to check sfd_list */
- if (flags & O_EXCL)
- sdp->exclude = 1; /* used by release lock */
-
+ if (sdp->detached) {
+ retval = -ENODEV;
+ goto error_out;
+ }
if (sfds_list_empty(sdp)) { /* no existing opens on this device */
sdp->sgdebug = 0;
q = sdp->device->request_queue;
sdp->sg_tablesize = queue_max_segments(q);
}
- sfp = sg_add_sfp(sdp, dev);
- if (!IS_ERR(sfp))
+ if ((sfp = sg_add_sfp(sdp, dev)))
filp->private_data = sfp;
- /* retval is already provably zero at this point because of the
- * check after retval = scsi_autopm_get_device(sdp->device))
- */
else {
- retval = PTR_ERR(sfp);
-
if (flags & O_EXCL) {
- sdp->exclude = 0; /* undo if error */
- up_write(&sdp->o_sem);
- } else
- up_read(&sdp->o_sem);
+ set_exclude(sdp, 0); /* undo if error */
+ wake_up_interruptible(&sdp->o_excl_wait);
+ }
+ retval = -ENOMEM;
+ goto error_out;
+ }
+ retval = 0;
error_out:
+ if (retval) {
scsi_autopm_put_device(sdp->device);
sdp_put:
scsi_device_put(sdp->device);
@@ -329,18 +358,13 @@ sg_release(struct inode *inode, struct file *filp)
{
Sg_device *sdp;
Sg_fd *sfp;
- int excl;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
- excl = sdp->exclude;
- sdp->exclude = 0;
- if (excl)
- up_write(&sdp->o_sem);
- else
- up_read(&sdp->o_sem);
+ set_exclude(sdp, 0);
+ wake_up_interruptible(&sdp->o_excl_wait);
scsi_autopm_put_device(sdp->device);
kref_put(&sfp->f_ref, sg_remove_sfp);
@@ -1391,9 +1415,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
disk->first_minor = k;
sdp->disk = disk;
sdp->device = scsidp;
- spin_lock_init(&sdp->sfd_lock);
INIT_LIST_HEAD(&sdp->sfds);
- init_rwsem(&sdp->o_sem);
+ init_waitqueue_head(&sdp->o_excl_wait);
sdp->sg_tablesize = queue_max_segments(q);
sdp->index = k;
kref_init(&sdp->d_ref);
@@ -1526,13 +1549,11 @@ static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
/* Need a write lock to set sdp->detached. */
write_lock_irqsave(&sg_index_lock, iflags);
- spin_lock(&sdp->sfd_lock);
sdp->detached = 1;
list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
wake_up_interruptible(&sfp->read_wait);
kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
}
- spin_unlock(&sdp->sfd_lock);
write_unlock_irqrestore(&sg_index_lock, iflags);
sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
@@ -2043,7 +2064,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
if (!sfp)
- return ERR_PTR(-ENOMEM);
+ return NULL;
init_waitqueue_head(&sfp->read_wait);
rwlock_init(&sfp->rq_list_lock);
@@ -2057,13 +2078,9 @@ sg_add_sfp(Sg_device * sdp, int dev)
sfp->cmd_q = SG_DEF_COMMAND_Q;
sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
sfp->parentdp = sdp;
- spin_lock_irqsave(&sdp->sfd_lock, iflags);
- if (sdp->detached) {
- spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
- return ERR_PTR(-ENODEV);
- }
+ write_lock_irqsave(&sg_index_lock, iflags);
list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
- spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
+ write_unlock_irqrestore(&sg_index_lock, iflags);
SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
if (unlikely(sg_big_buff != def_reserved_size))
sg_big_buff = def_reserved_size;
@@ -2113,9 +2130,10 @@ static void sg_remove_sfp(struct kref *kref)
struct sg_device *sdp = sfp->parentdp;
unsigned long iflags;
- spin_lock_irqsave(&sdp->sfd_lock, iflags);
+ write_lock_irqsave(&sg_index_lock, iflags);
list_del(&sfp->sfd_siblings);
- spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
+ write_unlock_irqrestore(&sg_index_lock, iflags);
+ wake_up_interruptible(&sdp->o_excl_wait);
INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
schedule_work(&sfp->ew.work);
@@ -2502,7 +2520,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
return 0;
}
-/* must be called while holding sg_index_lock and sfd_lock */
+/* must be called while holding sg_index_lock */
static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
{
int k, m, new_interface, blen, usg;
@@ -2587,26 +2605,22 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
- if (sdp) {
- spin_lock(&sdp->sfd_lock);
- if (!list_empty(&sdp->sfds)) {
- struct scsi_device *scsidp = sdp->device;
+ if (sdp && !list_empty(&sdp->sfds)) {
+ struct scsi_device *scsidp = sdp->device;
- seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
- if (sdp->detached)
- seq_printf(s, "detached pending close ");
- else
- seq_printf
- (s, "scsi%d chan=%d id=%d lun=%d em=%d",
- scsidp->host->host_no,
- scsidp->channel, scsidp->id,
- scsidp->lun,
- scsidp->host->hostt->emulated);
- seq_printf(s, " sg_tablesize=%d excl=%d\n",
- sdp->sg_tablesize, sdp->exclude);
- sg_proc_debug_helper(s, sdp);
- }
- spin_unlock(&sdp->sfd_lock);
+ seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
+ if (sdp->detached)
+ seq_printf(s, "detached pending close ");
+ else
+ seq_printf
+ (s, "scsi%d chan=%d id=%d lun=%d em=%d",
+ scsidp->host->host_no,
+ scsidp->channel, scsidp->id,
+ scsidp->lun,
+ scsidp->host->hostt->emulated);
+ seq_printf(s, " sg_tablesize=%d excl=%d\n",
+ sdp->sg_tablesize, get_exclude(sdp));
+ sg_proc_debug_helper(s, sdp);
}
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 325c31caa6e0..1aa4befcfbd0 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1790,8 +1790,6 @@ static void stex_remove(struct pci_dev *pdev)
scsi_remove_host(hba->host);
- pci_set_drvdata(pdev, NULL);
-
stex_hba_stop(hba);
stex_hba_free(hba);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h
index b80bf709f104..805369521df8 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.h
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.h
@@ -174,7 +174,7 @@ struct sym_slcb {
*/
struct sym_shcb {
/*
- * Chip and controller indentification.
+ * Chip and controller identification.
*/
int unit;
char inst_name[16];
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 9327f5fcec4e..b06a1dea8818 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -2553,7 +2553,6 @@ static void dc390_remove_one(struct pci_dev *dev)
pci_disable_device(dev);
scsi_host_put(scsi_host);
- pci_set_drvdata(dev, NULL);
}
static struct pci_device_id tmscsim_pci_tbl[] = {
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index a823cf44e949..8b9531204c2b 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -132,7 +132,6 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
ufshcd_remove(hba);
- pci_set_drvdata(pdev, NULL);
}
/**
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 74b88efde6ad..e6bb2352df40 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -710,19 +710,15 @@ static struct scsi_host_template virtscsi_host_template_multi = {
#define virtscsi_config_get(vdev, fld) \
({ \
typeof(((struct virtio_scsi_config *)0)->fld) __val; \
- vdev->config->get(vdev, \
- offsetof(struct virtio_scsi_config, fld), \
- &__val, sizeof(__val)); \
+ virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
__val; \
})
#define virtscsi_config_set(vdev, fld, val) \
- (void)({ \
+ do { \
typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
- vdev->config->set(vdev, \
- offsetof(struct virtio_scsi_config, fld), \
- &__val, sizeof(__val)); \
- })
+ virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
+ } while(0)
static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
{
@@ -954,7 +950,7 @@ static void virtscsi_remove(struct virtio_device *vdev)
scsi_host_put(shost);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int virtscsi_freeze(struct virtio_device *vdev)
{
virtscsi_remove_vqs(vdev);
@@ -988,7 +984,7 @@ static struct virtio_driver virtio_scsi_driver = {
.id_table = id_table,
.probe = virtscsi_probe,
.scan = virtscsi_scan,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.freeze = virtscsi_freeze,
.restore = virtscsi_restore,
#endif
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 3bfaa66fa0d1..b9755ec0e812 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1405,7 +1405,6 @@ out_release_resources:
out_free_host:
scsi_host_put(host);
out_disable_device:
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return error;
@@ -1445,7 +1444,6 @@ static void pvscsi_remove(struct pci_dev *pdev)
scsi_host_put(host);
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index b9c53cc40e1f..eb1f1ef5fa2e 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -264,6 +264,7 @@ config SPI_FSL_SPI
config SPI_FSL_DSPI
tristate "Freescale DSPI controller"
select SPI_BITBANG
+ depends on SOC_VF610 || COMPILE_TEST
help
This enables support for the Freescale DSPI controller in master
mode. VF610 platform uses the controller.
@@ -369,7 +370,7 @@ config SPI_PXA2XX_PCI
config SPI_RSPI
tristate "Renesas RSPI controller"
- depends on SUPERH && SH_DMAE_BASE
+ depends on (SUPERH || ARCH_SHMOBILE) && SH_DMAE_BASE
help
SPI driver for Renesas RSPI blocks.
@@ -393,7 +394,7 @@ config SPI_S3C24XX_FIQ
config SPI_S3C64XX
tristate "Samsung S3C64XX series type SPI"
- depends on (ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5P64X0 || ARCH_EXYNOS)
+ depends on PLAT_SAMSUNG
select S3C64XX_DMA if ARCH_S3C64XX
help
SPI driver for Samsung S3C64XX and newer SoCs.
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index 9a64c3fee218..595b62cb545d 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -219,7 +219,7 @@ static int altera_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hw);
/* setup the state for the bitbang driver */
- hw->bitbang.master = spi_master_get(master);
+ hw->bitbang.master = master;
if (!hw->bitbang.master)
return err;
hw->bitbang.chipselect = altera_spi_chipsel;
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 37bad952ab38..821bf7ac218d 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -231,7 +231,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
master->num_chipselect = pdata->num_chipselect;
}
- sp->bitbang.master = spi_master_get(master);
+ sp->bitbang.master = master;
sp->bitbang.chipselect = ath79_spi_chipselect;
sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0;
sp->bitbang.setup_transfer = spi_bitbang_setup_transfer;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index d4ac60b4a56e..273db0beb2b8 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -170,18 +170,18 @@
/* Bit manipulation macros */
#define SPI_BIT(name) \
(1 << SPI_##name##_OFFSET)
-#define SPI_BF(name,value) \
+#define SPI_BF(name, value) \
(((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET)
-#define SPI_BFEXT(name,value) \
+#define SPI_BFEXT(name, value) \
(((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1))
-#define SPI_BFINS(name,value,old) \
- ( ((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
- | SPI_BF(name,value))
+#define SPI_BFINS(name, value, old) \
+ (((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
+ | SPI_BF(name, value))
/* Register access macros */
-#define spi_readl(port,reg) \
+#define spi_readl(port, reg) \
__raw_readl((port)->regs + SPI_##reg)
-#define spi_writel(port,reg,value) \
+#define spi_writel(port, reg, value) \
__raw_writel((value), (port)->regs + SPI_##reg)
/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
@@ -1401,8 +1401,8 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
asd = spi->controller_state;
bits = (asd->csr >> 4) & 0xf;
if (bits != xfer->bits_per_word - 8) {
- dev_dbg(&spi->dev, "you can't yet change "
- "bits_per_word in transfers\n");
+ dev_dbg(&spi->dev,
+ "you can't yet change bits_per_word in transfers\n");
return -ENOPROTOOPT;
}
}
@@ -1516,7 +1516,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
/* setup spi core then atmel-specific driver state */
ret = -ENOMEM;
- master = spi_alloc_master(&pdev->dev, sizeof *as);
+ master = spi_alloc_master(&pdev->dev, sizeof(*as));
if (!master)
goto out_free;
@@ -1546,9 +1546,11 @@ static int atmel_spi_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&as->queue);
as->pdev = pdev;
- as->regs = ioremap(regs->start, resource_size(regs));
- if (!as->regs)
+ as->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(as->regs)) {
+ ret = PTR_ERR(as->regs);
goto out_free_buffer;
+ }
as->phybase = regs->start;
as->irq = irq;
as->clk = clk;
@@ -1617,7 +1619,6 @@ out_free_dma:
out_free_irq:
free_irq(irq, master);
out_unmap_regs:
- iounmap(as->regs);
out_free_buffer:
if (!as->use_pdc)
tasklet_kill(&as->tasklet);
@@ -1669,36 +1670,36 @@ static int atmel_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(as->clk);
clk_put(as->clk);
free_irq(as->irq, master);
- iounmap(as->regs);
spi_unregister_master(master);
return 0;
}
-#ifdef CONFIG_PM
-
-static int atmel_spi_suspend(struct platform_device *pdev, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+static int atmel_spi_suspend(struct device *dev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = dev_get_drvdata(dev);
struct atmel_spi *as = spi_master_get_devdata(master);
clk_disable_unprepare(as->clk);
return 0;
}
-static int atmel_spi_resume(struct platform_device *pdev)
+static int atmel_spi_resume(struct device *dev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = dev_get_drvdata(dev);
struct atmel_spi *as = spi_master_get_devdata(master);
- return clk_prepare_enable(as->clk);
+ clk_prepare_enable(as->clk);
return 0;
}
+static SIMPLE_DEV_PM_OPS(atmel_spi_pm_ops, atmel_spi_suspend, atmel_spi_resume);
+
+#define ATMEL_SPI_PM_OPS (&atmel_spi_pm_ops)
#else
-#define atmel_spi_suspend NULL
-#define atmel_spi_resume NULL
+#define ATMEL_SPI_PM_OPS NULL
#endif
#if defined(CONFIG_OF)
@@ -1714,10 +1715,9 @@ static struct platform_driver atmel_spi_driver = {
.driver = {
.name = "atmel_spi",
.owner = THIS_MODULE,
+ .pm = ATMEL_SPI_PM_OPS,
.of_match_table = of_match_ptr(atmel_spi_dt_ids),
},
- .suspend = atmel_spi_suspend,
- .resume = atmel_spi_resume,
.probe = atmel_spi_probe,
.remove = atmel_spi_remove,
};
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index 1d00d9b397dd..c4141c92bcff 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -775,7 +775,7 @@ static int au1550_spi_probe(struct platform_device *pdev)
hw = spi_master_get_devdata(master);
- hw->master = spi_master_get(master);
+ hw->master = master;
hw->pdata = dev_get_platdata(&pdev->dev);
hw->dev = &pdev->dev;
@@ -985,6 +985,7 @@ static int au1550_spi_remove(struct platform_device *pdev)
MODULE_ALIAS("platform:au1550-spi");
static struct platform_driver au1550_spi_drv = {
+ .probe = au1550_spi_probe,
.remove = au1550_spi_remove,
.driver = {
.name = "au1550-spi",
@@ -1004,7 +1005,7 @@ static int __init au1550_spi_init(void)
printk(KERN_ERR "au1550-spi: cannot add memory"
"dbdma device\n");
}
- return platform_driver_probe(&au1550_spi_drv, au1550_spi_probe);
+ return platform_driver_register(&au1550_spi_drv);
}
module_init(au1550_spi_init);
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 52c81481c5c7..4c332143a310 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -358,7 +358,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
bcm2835_wr(bs, BCM2835_SPI_CS,
BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
- err = spi_register_master(master);
+ err = devm_spi_register_master(&pdev->dev, master);
if (err) {
dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
goto out_free_irq;
@@ -381,14 +381,12 @@ static int bcm2835_spi_remove(struct platform_device *pdev)
struct bcm2835_spi *bs = spi_master_get_devdata(master);
free_irq(bs->irq, master);
- spi_unregister_master(master);
/* Clear FIFOs, and disable the HW block */
bcm2835_wr(bs, BCM2835_SPI_CS,
BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
clk_disable_unprepare(bs->clk);
- spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 536b0e363826..80d56b214eb5 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -412,7 +412,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
/* register and we are done */
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(dev, master);
if (ret) {
dev_err(dev, "spi register failed\n");
goto out_clk_disable;
@@ -438,8 +438,6 @@ static int bcm63xx_spi_remove(struct platform_device *pdev)
struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct bcm63xx_spi *bs = spi_master_get_devdata(master);
- spi_unregister_master(master);
-
/* reset spi block */
bcm_spi_writeb(bs, 0, SPI_INT_MASK);
@@ -447,8 +445,6 @@ static int bcm63xx_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(bs->clk);
clk_put(bs->clk);
- spi_master_put(master);
-
return 0;
}
diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c
index 91921b5f5817..38941e5920b5 100644
--- a/drivers/spi/spi-bfin-sport.c
+++ b/drivers/spi/spi-bfin-sport.c
@@ -592,7 +592,7 @@ bfin_sport_spi_setup(struct spi_device *spi)
*/
if (chip_info->ctl_reg || chip_info->enable_dma) {
ret = -EINVAL;
- dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields");
+ dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields\n");
goto error;
}
chip->cs_chg_udelay = chip_info->cs_chg_udelay;
@@ -879,11 +879,10 @@ static int bfin_sport_spi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int
-bfin_sport_spi_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int bfin_sport_spi_suspend(struct device *dev)
{
- struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ struct bfin_sport_spi_master_data *drv_data = dev_get_drvdata(dev);
int status;
status = bfin_sport_spi_stop_queue(drv_data);
@@ -896,10 +895,9 @@ bfin_sport_spi_suspend(struct platform_device *pdev, pm_message_t state)
return status;
}
-static int
-bfin_sport_spi_resume(struct platform_device *pdev)
+static int bfin_sport_spi_resume(struct device *dev)
{
- struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ struct bfin_sport_spi_master_data *drv_data = dev_get_drvdata(dev);
int status;
/* Enable the SPI interface */
@@ -912,19 +910,22 @@ bfin_sport_spi_resume(struct platform_device *pdev)
return status;
}
+
+static SIMPLE_DEV_PM_OPS(bfin_sport_spi_pm_ops, bfin_sport_spi_suspend,
+ bfin_sport_spi_resume);
+
+#define BFIN_SPORT_SPI_PM_OPS (&bfin_sport_spi_pm_ops)
#else
-# define bfin_sport_spi_suspend NULL
-# define bfin_sport_spi_resume NULL
+#define BFIN_SPORT_SPI_PM_OPS NULL
#endif
static struct platform_driver bfin_sport_spi_driver = {
.driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = BFIN_SPORT_SPI_PM_OPS,
},
.probe = bfin_sport_spi_probe,
.remove = bfin_sport_spi_remove,
- .suspend = bfin_sport_spi_suspend,
- .resume = bfin_sport_spi_resume,
};
module_platform_driver(bfin_sport_spi_driver);
diff --git a/drivers/spi/spi-bfin-v3.c b/drivers/spi/spi-bfin-v3.c
index f4bf81347d68..8f8598834b30 100644
--- a/drivers/spi/spi-bfin-v3.c
+++ b/drivers/spi/spi-bfin-v3.c
@@ -867,7 +867,7 @@ static int bfin_spi_probe(struct platform_device *pdev)
tasklet_init(&drv_data->pump_transfers,
bfin_spi_pump_transfers, (unsigned long)drv_data);
/* register with the SPI framework */
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(dev, master);
if (ret) {
dev_err(dev, "can not register spi master\n");
goto err_free_peripheral;
@@ -898,7 +898,6 @@ static int bfin_spi_remove(struct platform_device *pdev)
free_dma(drv_data->rx_dma);
free_dma(drv_data->tx_dma);
- spi_unregister_master(drv_data->master);
return 0;
}
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index 45bdf73d6868..f0f195af75d4 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -524,7 +524,7 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
timeout = jiffies + HZ;
while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF))
if (!time_before(jiffies, timeout)) {
- dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF");
+ dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF\n");
break;
} else
cpu_relax();
@@ -913,8 +913,9 @@ static void bfin_spi_pump_messages(struct work_struct *work)
drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
struct spi_transfer, transfer_list);
- dev_dbg(&drv_data->pdev->dev, "got a message to pump, "
- "state is set to: baud %d, flag 0x%x, ctl 0x%x\n",
+ dev_dbg(&drv_data->pdev->dev,
+ "got a message to pump, state is set to: baud "
+ "%d, flag 0x%x, ctl 0x%x\n",
drv_data->cur_chip->baud, drv_data->cur_chip->flag,
drv_data->cur_chip->ctl_reg);
@@ -1013,8 +1014,8 @@ static int bfin_spi_setup(struct spi_device *spi)
* but let's assume (for now) they do.
*/
if (chip_info->ctl_reg & ~bfin_ctl_reg) {
- dev_err(&spi->dev, "do not set bits in ctl_reg "
- "that the SPI framework manages\n");
+ dev_err(&spi->dev,
+ "do not set bits in ctl_reg that the SPI framework manages\n");
goto error;
}
chip->enable_dma = chip_info->enable_dma != 0
@@ -1050,17 +1051,17 @@ static int bfin_spi_setup(struct spi_device *spi)
chip->chip_select_num = spi->chip_select;
if (chip->chip_select_num < MAX_CTRL_CS) {
if (!(spi->mode & SPI_CPHA))
- dev_warn(&spi->dev, "Warning: SPI CPHA not set:"
- " Slave Select not under software control!\n"
- " See Documentation/blackfin/bfin-spi-notes.txt");
+ dev_warn(&spi->dev,
+ "Warning: SPI CPHA not set: Slave Select not under software control!\n"
+ "See Documentation/blackfin/bfin-spi-notes.txt\n");
chip->flag = (1 << spi->chip_select) << 8;
} else
chip->cs_gpio = chip->chip_select_num - MAX_CTRL_CS;
if (chip->enable_dma && chip->pio_interrupt) {
- dev_err(&spi->dev, "enable_dma is set, "
- "do not set pio_interrupt\n");
+ dev_err(&spi->dev,
+ "enable_dma is set, do not set pio_interrupt\n");
goto error;
}
/*
@@ -1410,10 +1411,10 @@ static int bfin_spi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int bfin_spi_suspend(struct device *dev)
{
- struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ struct bfin_spi_master_data *drv_data = dev_get_drvdata(dev);
int status = 0;
status = bfin_spi_stop_queue(drv_data);
@@ -1432,9 +1433,9 @@ static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int bfin_spi_resume(struct platform_device *pdev)
+static int bfin_spi_resume(struct device *dev)
{
- struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ struct bfin_spi_master_data *drv_data = dev_get_drvdata(dev);
int status = 0;
bfin_write(&drv_data->regs->ctl, drv_data->ctrl_reg);
@@ -1443,31 +1444,34 @@ static int bfin_spi_resume(struct platform_device *pdev)
/* Start the queue running */
status = bfin_spi_start_queue(drv_data);
if (status != 0) {
- dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
+ dev_err(dev, "problem starting queue (%d)\n", status);
return status;
}
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(bfin_spi_pm_ops, bfin_spi_suspend, bfin_spi_resume);
+
+#define BFIN_SPI_PM_OPS (&bfin_spi_pm_ops)
#else
-#define bfin_spi_suspend NULL
-#define bfin_spi_resume NULL
-#endif /* CONFIG_PM */
+#define BFIN_SPI_PM_OPS NULL
+#endif
MODULE_ALIAS("platform:bfin-spi");
static struct platform_driver bfin_spi_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .pm = BFIN_SPI_PM_OPS,
},
- .suspend = bfin_spi_suspend,
- .resume = bfin_spi_resume,
+ .probe = bfin_spi_probe,
.remove = bfin_spi_remove,
};
static int __init bfin_spi_init(void)
{
- return platform_driver_probe(&bfin_spi_driver, bfin_spi_probe);
+ return platform_driver_register(&bfin_spi_driver);
}
subsys_initcall(bfin_spi_init);
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index 8c11355dec23..bd222f6b677d 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -191,7 +191,7 @@ int spi_bitbang_setup(struct spi_device *spi)
bitbang = spi_master_get_devdata(spi->master);
if (!cs) {
- cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
spi->controller_state = cs;
@@ -258,7 +258,7 @@ static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t)
static int spi_bitbang_prepare_hardware(struct spi_master *spi)
{
- struct spi_bitbang *bitbang;
+ struct spi_bitbang *bitbang;
unsigned long flags;
bitbang = spi_master_get_devdata(spi);
@@ -273,7 +273,7 @@ static int spi_bitbang_prepare_hardware(struct spi_master *spi)
static int spi_bitbang_transfer_one(struct spi_master *master,
struct spi_message *m)
{
- struct spi_bitbang *bitbang;
+ struct spi_bitbang *bitbang;
unsigned nsecs;
struct spi_transfer *t = NULL;
unsigned cs_change;
@@ -292,7 +292,7 @@ static int spi_bitbang_transfer_one(struct spi_master *master,
cs_change = 1;
status = 0;
- list_for_each_entry (t, &m->transfers, transfer_list) {
+ list_for_each_entry(t, &m->transfers, transfer_list) {
/* override speed or wordsize? */
if (t->speed_hz || t->bits_per_word)
@@ -349,7 +349,8 @@ static int spi_bitbang_transfer_one(struct spi_master *master,
if (t->delay_usecs)
udelay(t->delay_usecs);
- if (cs_change && !list_is_last(&t->transfer_list, &m->transfers)) {
+ if (cs_change &&
+ !list_is_last(&t->transfer_list, &m->transfers)) {
/* sometimes a short mid-message deselect of the chip
* may be needed to terminate a mode or command
*/
@@ -378,7 +379,7 @@ static int spi_bitbang_transfer_one(struct spi_master *master,
static int spi_bitbang_unprepare_hardware(struct spi_master *spi)
{
- struct spi_bitbang *bitbang;
+ struct spi_bitbang *bitbang;
unsigned long flags;
bitbang = spi_master_get_devdata(spi);
@@ -414,10 +415,16 @@ static int spi_bitbang_unprepare_hardware(struct spi_master *spi)
* This routine registers the spi_master, which will process requests in a
* dedicated task, keeping IRQs unblocked most of the time. To stop
* processing those requests, call spi_bitbang_stop().
+ *
+ * On success, this routine will take a reference to master. The caller is
+ * responsible for calling spi_bitbang_stop() to decrement the reference and
+ * spi_master_put() as counterpart of spi_alloc_master() to prevent a memory
+ * leak.
*/
int spi_bitbang_start(struct spi_bitbang *bitbang)
{
struct spi_master *master = bitbang->master;
+ int ret;
if (!master || !bitbang->chipselect)
return -EINVAL;
@@ -449,7 +456,11 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
/* driver may get busy before register() returns, especially
* if someone registered boardinfo for devices
*/
- return spi_register_master(master);
+ ret = spi_register_master(spi_master_get(master));
+ if (ret)
+ spi_master_put(master);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(spi_bitbang_start);
diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c
index 5ed08e537433..8081f96bd1d5 100644
--- a/drivers/spi/spi-butterfly.c
+++ b/drivers/spi/spi-butterfly.c
@@ -147,8 +147,8 @@ static void butterfly_chipselect(struct spi_device *spi, int value)
/* we only needed to implement one mode here, and choose SPI_MODE_0 */
-#define spidelay(X) do{}while(0)
-//#define spidelay ndelay
+#define spidelay(X) do { } while (0)
+/* #define spidelay ndelay */
#include "spi-bitbang-txrx.h"
@@ -171,15 +171,15 @@ static struct mtd_partition partitions[] = { {
/* sector 0 = 8 pages * 264 bytes/page (1 block)
* sector 1 = 248 pages * 264 bytes/page
*/
- .name = "bookkeeping", // 66 KB
+ .name = "bookkeeping", /* 66 KB */
.offset = 0,
.size = (8 + 248) * 264,
-// .mask_flags = MTD_WRITEABLE,
+ /* .mask_flags = MTD_WRITEABLE, */
}, {
/* sector 2 = 256 pages * 264 bytes/page
* sectors 3-5 = 512 pages * 264 bytes/page
*/
- .name = "filesystem", // 462 KB
+ .name = "filesystem", /* 462 KB */
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
} };
@@ -209,7 +209,7 @@ static void butterfly_attach(struct parport *p)
* and no way to be selective about what it binds to.
*/
- master = spi_alloc_master(dev, sizeof *pp);
+ master = spi_alloc_master(dev, sizeof(*pp));
if (!master) {
status = -ENOMEM;
goto done;
@@ -225,7 +225,7 @@ static void butterfly_attach(struct parport *p)
master->bus_num = 42;
master->num_chipselect = 2;
- pp->bitbang.master = spi_master_get(master);
+ pp->bitbang.master = master;
pp->bitbang.chipselect = butterfly_chipselect;
pp->bitbang.txrx_word[SPI_MODE_0] = butterfly_txrx_word_mode0;
@@ -289,7 +289,6 @@ static void butterfly_attach(struct parport *p)
pr_debug("%s: dataflash at %s\n", p->name,
dev_name(&pp->dataflash->dev));
- // dev_info(_what?_, ...)
pr_info("%s: AVR Butterfly\n", p->name);
butterfly = pp;
return;
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index 6416798828e7..e2a5a426b2ef 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -226,10 +226,10 @@ static int spi_clps711x_probe(struct platform_device *pdev)
dev_name(&pdev->dev), hw);
if (ret) {
dev_err(&pdev->dev, "Can't request IRQ\n");
- goto clk_out;
+ goto err_out;
}
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (!ret) {
dev_info(&pdev->dev,
"SPI bus driver initialized. Master clock %u Hz\n",
@@ -239,7 +239,6 @@ static int spi_clps711x_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to register master\n");
-clk_out:
err_out:
while (--i >= 0)
if (gpio_is_valid(hw->chipselect[i]))
@@ -260,8 +259,6 @@ static int spi_clps711x_remove(struct platform_device *pdev)
if (gpio_is_valid(hw->chipselect[i]))
gpio_free(hw->chipselect[i]);
- spi_unregister_master(master);
-
return 0;
}
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 8fbfe2483ffd..dd72445ba2ea 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -279,7 +279,8 @@ static int davinci_spi_setup_transfer(struct spi_device *spi,
struct davinci_spi *dspi;
struct davinci_spi_config *spicfg;
u8 bits_per_word = 0;
- u32 hz = 0, spifmt = 0, prescale = 0;
+ u32 hz = 0, spifmt = 0;
+ int prescale;
dspi = spi_master_get_devdata(spi->master);
spicfg = (struct davinci_spi_config *)spi->controller_data;
@@ -916,7 +917,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
if (ret)
goto unmap_io;
- dspi->bitbang.master = spi_master_get(master);
+ dspi->bitbang.master = master;
if (dspi->bitbang.master == NULL) {
ret = -ENODEV;
goto irq_free;
@@ -925,7 +926,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
dspi->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(dspi->clk)) {
ret = -ENODEV;
- goto put_master;
+ goto irq_free;
}
clk_prepare_enable(dspi->clk);
@@ -1015,8 +1016,6 @@ free_dma:
free_clk:
clk_disable_unprepare(dspi->clk);
clk_put(dspi->clk);
-put_master:
- spi_master_put(master);
irq_free:
free_irq(dspi->irq, dspi);
unmap_io:
@@ -1024,7 +1023,7 @@ unmap_io:
release_region:
release_mem_region(dspi->pbase, resource_size(r));
free_master:
- kfree(master);
+ spi_master_put(master);
err:
return ret;
}
@@ -1051,11 +1050,11 @@ static int davinci_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(dspi->clk);
clk_put(dspi->clk);
- spi_master_put(master);
free_irq(dspi->irq, dspi);
iounmap(dspi->base);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(dspi->pbase, resource_size(r));
+ spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 4aa8be865cc0..168c620947f4 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -74,7 +74,7 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
dwsmmio->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(dwsmmio->clk)) {
ret = PTR_ERR(dwsmmio->clk);
- goto err_irq;
+ goto err_unmap;
}
clk_enable(dwsmmio->clk);
@@ -94,8 +94,6 @@ err_clk:
clk_disable(dwsmmio->clk);
clk_put(dwsmmio->clk);
dwsmmio->clk = NULL;
-err_irq:
- free_irq(dws->irq, dws);
err_unmap:
iounmap(dws->regs);
err_release_reg:
@@ -115,7 +113,6 @@ static int dw_spi_mmio_remove(struct platform_device *pdev)
clk_put(dwsmmio->clk);
dwsmmio->clk = NULL;
- free_irq(dwsmmio->dws.irq, &dwsmmio->dws);
dw_spi_remove_host(&dwsmmio->dws);
iounmap(dwsmmio->dws.regs);
kfree(dwsmmio);
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 6055c8d9fdd7..66fa9955ea14 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -40,7 +40,7 @@ static int spi_pci_probe(struct pci_dev *pdev,
int pci_bar = 0;
int ret;
- printk(KERN_INFO "DW: found PCI SPI controller(ID: %04x:%04x)\n",
+ dev_info(&pdev->dev, "found PCI SPI controller(ID: %04x:%04x)\n",
pdev->vendor, pdev->device);
ret = pci_enable_device(pdev);
@@ -109,7 +109,6 @@ static void spi_pci_remove(struct pci_dev *pdev)
{
struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
- pci_set_drvdata(pdev, NULL);
dw_spi_remove_host(&dwpci->dws);
iounmap(dwpci->dws.regs);
pci_release_region(pdev, 0);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 79c958e49f61..b897c4adb39d 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -870,8 +870,8 @@ void dw_spi_remove_host(struct dw_spi *dws)
/* Remove the queue */
status = destroy_queue(dws);
if (status != 0)
- dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not "
- "complete, message memory not freed\n");
+ dev_err(&dws->master->dev,
+ "dw_spi_remove: workqueue will not complete, message memory not freed\n");
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);
diff --git a/drivers/spi/spi-efm32.c b/drivers/spi/spi-efm32.c
index 7d84418a01d8..d4d3cc534792 100644
--- a/drivers/spi/spi-efm32.c
+++ b/drivers/spi/spi-efm32.c
@@ -280,10 +280,6 @@ static irqreturn_t efm32_spi_txirq(int irq, void *data)
return IRQ_HANDLED;
}
-static const struct efm32_spi_pdata efm32_spi_pdata_default = {
- .location = 1,
-};
-
static u32 efm32_spi_get_configured_location(struct efm32_spi_ddata *ddata)
{
u32 reg = efm32_spi_read32(ddata, REG_ROUTE);
@@ -347,7 +343,7 @@ static int efm32_spi_probe(struct platform_device *pdev)
ddata = spi_master_get_devdata(master);
- ddata->bitbang.master = spi_master_get(master);
+ ddata->bitbang.master = master;
ddata->bitbang.chipselect = efm32_spi_chipselect;
ddata->bitbang.setup_transfer = efm32_spi_setup_transfer;
ddata->bitbang.txrx_bufs = efm32_spi_txrx_bufs;
@@ -387,7 +383,7 @@ static int efm32_spi_probe(struct platform_device *pdev)
goto err;
}
- if (resource_size(res) < 60) {
+ if (resource_size(res) < 0x60) {
ret = -EINVAL;
dev_err(&pdev->dev, "memory resource too small\n");
goto err;
@@ -467,7 +463,6 @@ err_disable_clk:
clk_disable_unprepare(ddata->clk);
err:
spi_master_put(master);
- kfree(master);
}
return ret;
@@ -478,13 +473,14 @@ static int efm32_spi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct efm32_spi_ddata *ddata = spi_master_get_devdata(master);
+ spi_bitbang_stop(&ddata->bitbang);
+
efm32_spi_write32(ddata, 0, REG_IEN);
free_irq(ddata->txirq, ddata);
free_irq(ddata->rxirq, ddata);
clk_disable_unprepare(ddata->clk);
spi_master_put(master);
- kfree(master);
return 0;
}
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index d22c00a227b6..1bfaed6e4073 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -330,7 +330,7 @@ static int ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
chip->spi->mode, div_cpsr, div_scr, dss);
- dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0);
+ dev_dbg(&espi->pdev->dev, "setup: cr0 %#x\n", cr0);
ep93xx_spi_write_u8(espi, SSPCPSR, div_cpsr);
ep93xx_spi_write_u16(espi, SSPCR0, cr0);
@@ -509,7 +509,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir)
}
if (WARN_ON(len)) {
- dev_warn(&espi->pdev->dev, "len = %zu expected 0!", len);
+ dev_warn(&espi->pdev->dev, "len = %zu expected 0!\n", len);
return ERR_PTR(-EINVAL);
}
@@ -942,7 +942,7 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
/* make sure that the hardware is disabled */
ep93xx_spi_write_u8(espi, SSPCR1, 0);
- error = spi_register_master(master);
+ error = devm_spi_register_master(&pdev->dev, master);
if (error) {
dev_err(&pdev->dev, "failed to register SPI master\n");
goto fail_free_dma;
@@ -968,7 +968,6 @@ static int ep93xx_spi_remove(struct platform_device *pdev)
ep93xx_spi_release_dma(espi);
- spi_unregister_master(master);
return 0;
}
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
index 07971e3fe58b..54b06376f03c 100644
--- a/drivers/spi/spi-fsl-cpm.c
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -20,6 +20,7 @@
#include <linux/spi/spi.h>
#include <linux/fsl_devices.h>
#include <linux/dma-mapping.h>
+#include <linux/of_address.h>
#include <asm/cpm.h>
#include <asm/qe.h>
@@ -299,7 +300,7 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
switch (mspi->subblock) {
default:
- dev_warn(dev, "cell-index unspecified, assuming SPI1");
+ dev_warn(dev, "cell-index unspecified, assuming SPI1\n");
/* fall through */
case 0:
mspi->subblock = QE_CR_SUBBLOCK_SPI1;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 4e44575bd87a..8641b03bdd7a 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -108,7 +108,7 @@ struct fsl_dspi {
struct spi_bitbang bitbang;
struct platform_device *pdev;
- void *base;
+ void __iomem *base;
int irq;
struct clk *clk;
@@ -165,7 +165,7 @@ static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
}
}
- pr_warn("Can not find valid buad rate,speed_hz is %d,clkrate is %ld\
+ pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld\
,we use the max prescaler value.\n", speed_hz, clkrate);
*pbr = ARRAY_SIZE(pbr_tbl) - 1;
*br = ARRAY_SIZE(brs) - 1;
@@ -450,7 +450,7 @@ static int dspi_probe(struct platform_device *pdev)
dspi = spi_master_get_devdata(master);
dspi->pdev = pdev;
- dspi->bitbang.master = spi_master_get(master);
+ dspi->bitbang.master = master;
dspi->bitbang.chipselect = dspi_chipselect;
dspi->bitbang.setup_transfer = dspi_setup_transfer;
dspi->bitbang.txrx_bufs = dspi_txrx_transfer;
@@ -520,7 +520,6 @@ out_clk_put:
clk_disable_unprepare(dspi->clk);
out_master_put:
spi_master_put(master);
- platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -531,6 +530,7 @@ static int dspi_remove(struct platform_device *pdev)
/* Disconnect from the SPI framework */
spi_bitbang_stop(&dspi->bitbang);
+ clk_disable_unprepare(dspi->clk);
spi_master_put(dspi->bitbang.master);
return 0;
@@ -547,5 +547,5 @@ static struct platform_driver fsl_dspi_driver = {
module_platform_driver(fsl_dspi_driver);
MODULE_DESCRIPTION("Freescale DSPI Controller Driver");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index b8f1103fe28e..32200d4f8780 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -16,6 +16,8 @@
#include <linux/fsl_devices.h>
#include <linux/mm.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/err.h>
@@ -289,8 +291,8 @@ static void fsl_espi_do_trans(struct spi_message *m,
if ((first->bits_per_word != t->bits_per_word) ||
(first->speed_hz != t->speed_hz)) {
espi_trans->status = -EINVAL;
- dev_err(mspi->dev, "bits_per_word/speed_hz should be"
- " same for the same SPI transfer\n");
+ dev_err(mspi->dev,
+ "bits_per_word/speed_hz should be same for the same SPI transfer\n");
return;
}
@@ -687,7 +689,7 @@ static int of_fsl_espi_probe(struct platform_device *ofdev)
struct device_node *np = ofdev->dev.of_node;
struct spi_master *master;
struct resource mem;
- struct resource irq;
+ unsigned int irq;
int ret = -ENOMEM;
ret = of_mpc8xxx_spi_probe(ofdev);
@@ -702,13 +704,13 @@ static int of_fsl_espi_probe(struct platform_device *ofdev)
if (ret)
goto err;
- ret = of_irq_to_resource(np, 0, &irq);
+ irq = irq_of_parse_and_map(np, 0);
if (!ret) {
ret = -EINVAL;
goto err;
}
- master = fsl_espi_probe(dev, &mem, irq.start);
+ master = fsl_espi_probe(dev, &mem, irq);
if (IS_ERR(master)) {
ret = PTR_ERR(master);
goto err;
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 68b69fec13a9..3fb09f981980 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
@@ -467,7 +468,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
}
#endif
- spi_gpio->bitbang.master = spi_master_get(master);
+ spi_gpio->bitbang.master = master;
spi_gpio->bitbang.chipselect = spi_gpio_chipselect;
if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) {
@@ -486,7 +487,6 @@ static int spi_gpio_probe(struct platform_device *pdev)
status = spi_bitbang_start(&spi_gpio->bitbang);
if (status < 0) {
- spi_master_put(spi_gpio->bitbang.master);
gpio_free:
if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO)
gpio_free(SPI_MISO_GPIO);
@@ -510,13 +510,13 @@ static int spi_gpio_remove(struct platform_device *pdev)
/* stop() unregisters child devices too */
status = spi_bitbang_stop(&spi_gpio->bitbang);
- spi_master_put(spi_gpio->bitbang.master);
if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO)
gpio_free(SPI_MISO_GPIO);
if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI)
gpio_free(SPI_MOSI_GPIO);
gpio_free(SPI_SCK_GPIO);
+ spi_master_put(spi_gpio->bitbang.master);
return status;
}
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 15323d8bd9cf..b80f2f70fef7 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -749,6 +749,35 @@ static void spi_imx_cleanup(struct spi_device *spi)
{
}
+static int
+spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_enable(spi_imx->clk_per);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(spi_imx->clk_ipg);
+ if (ret) {
+ clk_disable(spi_imx->clk_per);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+
+ clk_disable(spi_imx->clk_ipg);
+ clk_disable(spi_imx->clk_per);
+ return 0;
+}
+
static int spi_imx_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -786,7 +815,7 @@ static int spi_imx_probe(struct platform_device *pdev)
master->num_chipselect = num_cs;
spi_imx = spi_master_get_devdata(master);
- spi_imx->bitbang.master = spi_master_get(master);
+ spi_imx->bitbang.master = master;
for (i = 0; i < master->num_chipselect; i++) {
int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
@@ -810,6 +839,8 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
spi_imx->bitbang.master->setup = spi_imx_setup;
spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
+ spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message;
+ spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message;
spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
init_completion(&spi_imx->xfer_done);
@@ -872,6 +903,8 @@ static int spi_imx_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "probed\n");
+ clk_disable(spi_imx->clk_ipg);
+ clk_disable(spi_imx->clk_per);
return ret;
out_clk_put:
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c
index 0759b5db9883..41c5765be746 100644
--- a/drivers/spi/spi-lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -222,7 +222,7 @@ static void spi_lm70llp_attach(struct parport *p)
/*
* SPI and bitbang hookup.
*/
- pp->bitbang.master = spi_master_get(master);
+ pp->bitbang.master = master;
pp->bitbang.chipselect = lm70_chipselect;
pp->bitbang.txrx_word[SPI_MODE_0] = lm70_txrx;
pp->bitbang.flags = SPI_3WIRE;
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 6adf4e35816d..58d5ee0e4443 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -20,6 +20,7 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/completion.h>
#include <linux/io.h>
@@ -536,7 +537,7 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
if (ret < 0)
goto free_clock;
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(dev, master);
if (ret < 0)
goto free_clock;
@@ -559,12 +560,10 @@ static int mpc512x_psc_spi_do_remove(struct device *dev)
struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
- spi_unregister_master(master);
clk_disable_unprepare(mps->clk_mclk);
free_irq(mps->irq, mps);
if (mps->psc)
iounmap(mps->psc);
- spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
index 6e925dc34396..00ba910ab302 100644
--- a/drivers/spi/spi-mpc52xx-psc.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -383,8 +383,8 @@ static int mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
mps->irq = irq;
if (pdata == NULL) {
- dev_warn(dev, "probe called without platform data, no "
- "cs_control function will be called\n");
+ dev_warn(dev,
+ "probe called without platform data, no cs_control function will be called\n");
mps->cs_control = NULL;
mps->sysclk = 0;
master->bus_num = bus_num;
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index de7b1141b90f..de333059a9a7 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -57,34 +57,53 @@
#define SG_MAXLEN 0xff00
+/*
+ * Flags for txrx functions. More efficient that using an argument register for
+ * each one.
+ */
+#define TXRX_WRITE (1<<0) /* This is a write */
+#define TXRX_DEASSERT_CS (1<<1) /* De-assert CS at end of txrx */
+
struct mxs_spi {
struct mxs_ssp ssp;
struct completion c;
+ unsigned int sck; /* Rate requested (vs actual) */
};
static int mxs_spi_setup_transfer(struct spi_device *dev,
- struct spi_transfer *t)
+ const struct spi_transfer *t)
{
struct mxs_spi *spi = spi_master_get_devdata(dev->master);
struct mxs_ssp *ssp = &spi->ssp;
- uint32_t hz = 0;
+ const unsigned int hz = min(dev->max_speed_hz, t->speed_hz);
- hz = dev->max_speed_hz;
- if (t && t->speed_hz)
- hz = min(hz, t->speed_hz);
if (hz == 0) {
- dev_err(&dev->dev, "Cannot continue with zero clock\n");
+ dev_err(&dev->dev, "SPI clock rate of zero not allowed\n");
return -EINVAL;
}
- mxs_ssp_set_clk_rate(ssp, hz);
+ if (hz != spi->sck) {
+ mxs_ssp_set_clk_rate(ssp, hz);
+ /*
+ * Save requested rate, hz, rather than the actual rate,
+ * ssp->clk_rate. Otherwise we would set the rate every trasfer
+ * when the actual rate is not quite the same as requested rate.
+ */
+ spi->sck = hz;
+ /*
+ * Perhaps we should return an error if the actual clock is
+ * nowhere close to what was requested?
+ */
+ }
+
+ writel(BM_SSP_CTRL0_LOCK_CS,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
writel(BF_SSP_CTRL1_SSP_MODE(BV_SSP_CTRL1_SSP_MODE__SPI) |
- BF_SSP_CTRL1_WORD_LENGTH
- (BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) |
- ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) |
- ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0),
- ssp->base + HW_SSP_CTRL1(ssp));
+ BF_SSP_CTRL1_WORD_LENGTH(BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) |
+ ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) |
+ ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0),
+ ssp->base + HW_SSP_CTRL1(ssp));
writel(0x0, ssp->base + HW_SSP_CMD0);
writel(0x0, ssp->base + HW_SSP_CMD1);
@@ -94,26 +113,15 @@ static int mxs_spi_setup_transfer(struct spi_device *dev,
static int mxs_spi_setup(struct spi_device *dev)
{
- int err = 0;
-
if (!dev->bits_per_word)
dev->bits_per_word = 8;
- if (dev->mode & ~(SPI_CPOL | SPI_CPHA))
- return -EINVAL;
-
- err = mxs_spi_setup_transfer(dev, NULL);
- if (err) {
- dev_err(&dev->dev,
- "Failed to setup transfer, error = %d\n", err);
- }
-
- return err;
+ return 0;
}
-static uint32_t mxs_spi_cs_to_reg(unsigned cs)
+static u32 mxs_spi_cs_to_reg(unsigned cs)
{
- uint32_t select = 0;
+ u32 select = 0;
/*
* i.MX28 Datasheet: 17.10.1: HW_SSP_CTRL0
@@ -131,43 +139,11 @@ static uint32_t mxs_spi_cs_to_reg(unsigned cs)
return select;
}
-static void mxs_spi_set_cs(struct mxs_spi *spi, unsigned cs)
-{
- const uint32_t mask =
- BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ;
- uint32_t select;
- struct mxs_ssp *ssp = &spi->ssp;
-
- writel(mask, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
- select = mxs_spi_cs_to_reg(cs);
- writel(select, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
-}
-
-static inline void mxs_spi_enable(struct mxs_spi *spi)
-{
- struct mxs_ssp *ssp = &spi->ssp;
-
- writel(BM_SSP_CTRL0_LOCK_CS,
- ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
- writel(BM_SSP_CTRL0_IGNORE_CRC,
- ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
-}
-
-static inline void mxs_spi_disable(struct mxs_spi *spi)
-{
- struct mxs_ssp *ssp = &spi->ssp;
-
- writel(BM_SSP_CTRL0_LOCK_CS,
- ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
- writel(BM_SSP_CTRL0_IGNORE_CRC,
- ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
-}
-
static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set)
{
const unsigned long timeout = jiffies + msecs_to_jiffies(SSP_TIMEOUT);
struct mxs_ssp *ssp = &spi->ssp;
- uint32_t reg;
+ u32 reg;
do {
reg = readl_relaxed(ssp->base + offset);
@@ -200,9 +176,9 @@ static irqreturn_t mxs_ssp_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
+static int mxs_spi_txrx_dma(struct mxs_spi *spi,
unsigned char *buf, int len,
- int *first, int *last, int write)
+ unsigned int flags)
{
struct mxs_ssp *ssp = &spi->ssp;
struct dma_async_tx_descriptor *desc = NULL;
@@ -211,11 +187,11 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
const int sgs = DIV_ROUND_UP(len, desc_len);
int sg_count;
int min, ret;
- uint32_t ctrl0;
+ u32 ctrl0;
struct page *vm_page;
void *sg_buf;
struct {
- uint32_t pio[4];
+ u32 pio[4];
struct scatterlist sg;
} *dma_xfer;
@@ -228,21 +204,25 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
INIT_COMPLETION(spi->c);
+ /* Chip select was already programmed into CTRL0 */
ctrl0 = readl(ssp->base + HW_SSP_CTRL0);
- ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT;
- ctrl0 |= BM_SSP_CTRL0_DATA_XFER | mxs_spi_cs_to_reg(cs);
+ ctrl0 &= ~(BM_SSP_CTRL0_XFER_COUNT | BM_SSP_CTRL0_IGNORE_CRC |
+ BM_SSP_CTRL0_READ);
+ ctrl0 |= BM_SSP_CTRL0_DATA_XFER;
- if (*first)
- ctrl0 |= BM_SSP_CTRL0_LOCK_CS;
- if (!write)
+ if (!(flags & TXRX_WRITE))
ctrl0 |= BM_SSP_CTRL0_READ;
/* Queue the DMA data transfer. */
for (sg_count = 0; sg_count < sgs; sg_count++) {
+ /* Prepare the transfer descriptor. */
min = min(len, desc_len);
- /* Prepare the transfer descriptor. */
- if ((sg_count + 1 == sgs) && *last)
+ /*
+ * De-assert CS on last segment if flag is set (i.e., no more
+ * transfers will follow)
+ */
+ if ((sg_count + 1 == sgs) && (flags & TXRX_DEASSERT_CS))
ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC;
if (ssp->devid == IMX23_SSP) {
@@ -267,7 +247,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
sg_init_one(&dma_xfer[sg_count].sg, sg_buf, min);
ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
- write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ (flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
len -= min;
buf += min;
@@ -287,7 +267,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
desc = dmaengine_prep_slave_sg(ssp->dmach,
&dma_xfer[sg_count].sg, 1,
- write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+ (flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
@@ -324,7 +304,7 @@ err_vmalloc:
while (--sg_count >= 0) {
err_mapped:
dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
- write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ (flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
}
kfree(dma_xfer);
@@ -332,20 +312,19 @@ err_mapped:
return ret;
}
-static int mxs_spi_txrx_pio(struct mxs_spi *spi, int cs,
+static int mxs_spi_txrx_pio(struct mxs_spi *spi,
unsigned char *buf, int len,
- int *first, int *last, int write)
+ unsigned int flags)
{
struct mxs_ssp *ssp = &spi->ssp;
- if (*first)
- mxs_spi_enable(spi);
-
- mxs_spi_set_cs(spi, cs);
+ writel(BM_SSP_CTRL0_IGNORE_CRC,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
while (len--) {
- if (*last && len == 0)
- mxs_spi_disable(spi);
+ if (len == 0 && (flags & TXRX_DEASSERT_CS))
+ writel(BM_SSP_CTRL0_IGNORE_CRC,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
if (ssp->devid == IMX23_SSP) {
writel(BM_SSP_CTRL0_XFER_COUNT,
@@ -356,7 +335,7 @@ static int mxs_spi_txrx_pio(struct mxs_spi *spi, int cs,
writel(1, ssp->base + HW_SSP_XFER_SIZE);
}
- if (write)
+ if (flags & TXRX_WRITE)
writel(BM_SSP_CTRL0_READ,
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
else
@@ -369,13 +348,13 @@ static int mxs_spi_txrx_pio(struct mxs_spi *spi, int cs,
if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 1))
return -ETIMEDOUT;
- if (write)
+ if (flags & TXRX_WRITE)
writel(*buf, ssp->base + HW_SSP_DATA(ssp));
writel(BM_SSP_CTRL0_DATA_XFER,
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
- if (!write) {
+ if (!(flags & TXRX_WRITE)) {
if (mxs_ssp_wait(spi, HW_SSP_STATUS(ssp),
BM_SSP_STATUS_FIFO_EMPTY, 0))
return -ETIMEDOUT;
@@ -400,14 +379,15 @@ static int mxs_spi_transfer_one(struct spi_master *master,
{
struct mxs_spi *spi = spi_master_get_devdata(master);
struct mxs_ssp *ssp = &spi->ssp;
- int first, last;
struct spi_transfer *t, *tmp_t;
+ unsigned int flag;
int status = 0;
- int cs;
-
- first = last = 0;
- cs = m->spi->chip_select;
+ /* Program CS register bits here, it will be used for all transfers. */
+ writel(BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
+ writel(mxs_spi_cs_to_reg(m->spi->chip_select),
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) {
@@ -415,16 +395,9 @@ static int mxs_spi_transfer_one(struct spi_master *master,
if (status)
break;
- if (&t->transfer_list == m->transfers.next)
- first = 1;
- if (&t->transfer_list == m->transfers.prev)
- last = 1;
- if ((t->rx_buf && t->tx_buf) || (t->rx_dma && t->tx_dma)) {
- dev_err(ssp->dev,
- "Cannot send and receive simultaneously\n");
- status = -EINVAL;
- break;
- }
+ /* De-assert on last transfer, inverted by cs_change flag */
+ flag = (&t->transfer_list == m->transfers.prev) ^ t->cs_change ?
+ TXRX_DEASSERT_CS : 0;
/*
* Small blocks can be transfered via PIO.
@@ -441,26 +414,26 @@ static int mxs_spi_transfer_one(struct spi_master *master,
STMP_OFFSET_REG_CLR);
if (t->tx_buf)
- status = mxs_spi_txrx_pio(spi, cs,
+ status = mxs_spi_txrx_pio(spi,
(void *)t->tx_buf,
- t->len, &first, &last, 1);
+ t->len, flag | TXRX_WRITE);
if (t->rx_buf)
- status = mxs_spi_txrx_pio(spi, cs,
+ status = mxs_spi_txrx_pio(spi,
t->rx_buf, t->len,
- &first, &last, 0);
+ flag);
} else {
writel(BM_SSP_CTRL1_DMA_ENABLE,
ssp->base + HW_SSP_CTRL1(ssp) +
STMP_OFFSET_REG_SET);
if (t->tx_buf)
- status = mxs_spi_txrx_dma(spi, cs,
+ status = mxs_spi_txrx_dma(spi,
(void *)t->tx_buf, t->len,
- &first, &last, 1);
+ flag | TXRX_WRITE);
if (t->rx_buf)
- status = mxs_spi_txrx_dma(spi, cs,
+ status = mxs_spi_txrx_dma(spi,
t->rx_buf, t->len,
- &first, &last, 0);
+ flag);
}
if (status) {
@@ -469,7 +442,6 @@ static int mxs_spi_transfer_one(struct spi_master *master,
}
m->actual_length += t->len;
- first = last = 0;
}
m->status = status;
@@ -563,7 +535,6 @@ static int mxs_spi_probe(struct platform_device *pdev)
goto out_dma_release;
clk_set_rate(ssp->clk, clk_freq);
- ssp->clk_rate = clk_get_rate(ssp->clk) / 1000;
ret = stmp_reset_block(ssp->base);
if (ret)
@@ -571,7 +542,7 @@ static int mxs_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret);
goto out_disable_clk;
@@ -598,10 +569,8 @@ static int mxs_spi_remove(struct platform_device *pdev)
spi = spi_master_get_devdata(master);
ssp = &spi->ssp;
- spi_unregister_master(master);
clk_disable_unprepare(ssp->clk);
dma_release_channel(ssp->dmach);
- spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index 47a68b43bcd5..e0c32bc69ee2 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -349,7 +349,7 @@ static int nuc900_spi_probe(struct platform_device *pdev)
}
hw = spi_master_get_devdata(master);
- hw->master = spi_master_get(master);
+ hw->master = master;
hw->pdata = dev_get_platdata(&pdev->dev);
hw->dev = &pdev->dev;
@@ -435,7 +435,6 @@ err_iomap:
kfree(hw->ioarea);
err_pdata:
spi_master_put(hw->master);
-
err_nomem:
return err;
}
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index 333cb1badcd7..91c668596202 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -306,7 +306,7 @@ static int tiny_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hw);
/* setup the state for the bitbang driver */
- hw->bitbang.master = spi_master_get(master);
+ hw->bitbang.master = master;
if (!hw->bitbang.master)
return err;
hw->bitbang.setup_transfer = tiny_spi_setup_transfer;
diff --git a/drivers/spi/spi-octeon.c b/drivers/spi/spi-octeon.c
index 5f28ddbe4f7e..67249a48b391 100644
--- a/drivers/spi/spi-octeon.c
+++ b/drivers/spi/spi-octeon.c
@@ -272,7 +272,7 @@ static int octeon_spi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->dev.of_node = pdev->dev.of_node;
- err = spi_register_master(master);
+ err = devm_spi_register_master(&pdev->dev, master);
if (err) {
dev_err(&pdev->dev, "register master failed: %d\n", err);
goto fail;
@@ -292,8 +292,6 @@ static int octeon_spi_remove(struct platform_device *pdev)
struct octeon_spi *p = spi_master_get_devdata(master);
u64 register_base = p->register_base;
- spi_unregister_master(master);
-
/* Clear the CSENA* and put everything in a known state. */
cvmx_write_csr(register_base + OCTEON_SPI_CFG, 0);
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index 69ecf05757dd..b6ed82beb01d 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -457,7 +457,7 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
goto err;
}
- status = spi_register_master(master);
+ status = devm_spi_register_master(&pdev->dev, master);
if (status < 0)
goto err;
@@ -485,8 +485,6 @@ static int omap1_spi100k_remove(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- spi_unregister_master(master);
-
return 0;
}
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index a6a8f0961750..9313fd3b413d 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -557,7 +557,8 @@ static struct platform_driver uwire_driver = {
.name = "omap_uwire",
.owner = THIS_MODULE,
},
- .remove = uwire_remove,
+ .probe = uwire_probe,
+ .remove = uwire_remove,
// suspend ... unuse ck
// resume ... use ck
};
@@ -579,7 +580,7 @@ static int __init omap_uwire_init(void)
omap_writel(val | 0x00AAA000, OMAP7XX_IO_CONF_9);
}
- return platform_driver_probe(&uwire_driver, uwire_probe);
+ return platform_driver_register(&uwire_driver);
}
static void __exit omap_uwire_exit(void)
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index ed4af4708d9a..443df39840bc 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -276,7 +276,7 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
struct omap2_mcspi_cs *cs = spi->controller_state;
struct omap2_mcspi *mcspi;
unsigned int wcnt;
- int fifo_depth, bytes_per_word;
+ int max_fifo_depth, fifo_depth, bytes_per_word;
u32 chconf, xferlevel;
mcspi = spi_master_get_devdata(master);
@@ -287,7 +287,12 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
if (t->len % bytes_per_word != 0)
goto disable_fifo;
- fifo_depth = gcd(t->len, OMAP2_MCSPI_MAX_FIFODEPTH);
+ if (t->rx_buf != NULL && t->tx_buf != NULL)
+ max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2;
+ else
+ max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
+
+ fifo_depth = gcd(t->len, max_fifo_depth);
if (fifo_depth < 2 || fifo_depth % bytes_per_word != 0)
goto disable_fifo;
@@ -299,7 +304,8 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
if (t->rx_buf != NULL) {
chconf |= OMAP2_MCSPI_CHCONF_FFER;
xferlevel |= (fifo_depth - 1) << 8;
- } else {
+ }
+ if (t->tx_buf != NULL) {
chconf |= OMAP2_MCSPI_CHCONF_FFET;
xferlevel |= fifo_depth - 1;
}
@@ -498,7 +504,7 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
((u32 *)xfer->rx_buf)[elements++] = w;
} else {
int bytes_per_word = mcspi_bytes_per_word(word_len);
- dev_err(&spi->dev, "DMA RX penultimate word empty");
+ dev_err(&spi->dev, "DMA RX penultimate word empty\n");
count -= (bytes_per_word << 1);
omap2_mcspi_set_enable(spi, 1);
return count;
@@ -516,7 +522,7 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
else /* word_len <= 32 */
((u32 *)xfer->rx_buf)[elements] = w;
} else {
- dev_err(&spi->dev, "DMA RX last word empty");
+ dev_err(&spi->dev, "DMA RX last word empty\n");
count -= mcspi_bytes_per_word(word_len);
}
omap2_mcspi_set_enable(spi, 1);
@@ -1407,7 +1413,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
if (status < 0)
goto disable_pm;
- status = spi_register_master(master);
+ status = devm_spi_register_master(&pdev->dev, master);
if (status < 0)
goto disable_pm;
@@ -1435,7 +1441,6 @@ static int omap2_mcspi_remove(struct platform_device *pdev)
pm_runtime_put_sync(mcspi->dev);
pm_runtime_disable(&pdev->dev);
- spi_unregister_master(master);
kfree(dma_channels);
return 0;
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 1d1d321d90c4..744841e095e4 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -84,8 +84,8 @@ static int orion_spi_set_transfer_size(struct orion_spi *orion_spi, int size)
orion_spi_clrbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
ORION_SPI_IF_8_16_BIT_MODE);
} else {
- pr_debug("Bad bits per word value %d (only 8 or 16 are "
- "allowed).\n", size);
+ pr_debug("Bad bits per word value %d (only 8 or 16 are allowed).\n",
+ size);
return -EINVAL;
}
@@ -407,7 +407,7 @@ static int orion_spi_probe(struct platform_device *pdev)
const u32 *iprop;
int size;
- master = spi_alloc_master(&pdev->dev, sizeof *spi);
+ master = spi_alloc_master(&pdev->dev, sizeof(*spi));
if (master == NULL) {
dev_dbg(&pdev->dev, "master allocation failed\n");
return -ENOMEM;
@@ -457,7 +457,7 @@ static int orion_spi_probe(struct platform_device *pdev)
goto out_rel_clk;
master->dev.of_node = pdev->dev.of_node;
- status = spi_register_master(master);
+ status = devm_spi_register_master(&pdev->dev, master);
if (status < 0)
goto out_rel_clk;
@@ -483,8 +483,6 @@ static int orion_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(spi->clk);
clk_put(spi->clk);
- spi_unregister_master(master);
-
return 0;
}
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 9c511a954d21..2789b452e711 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1619,7 +1619,6 @@ static int verify_controller_parameters(struct pl022 *pl022,
dev_err(&pl022->adev->dev,
"RX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
- break;
}
switch (chip_info->tx_lev_trig) {
case SSP_TX_1_OR_MORE_EMPTY_LOC:
@@ -1645,7 +1644,6 @@ static int verify_controller_parameters(struct pl022 *pl022,
dev_err(&pl022->adev->dev,
"TX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
- break;
}
if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
if ((chip_info->ctrl_len < SSP_BITS_4)
@@ -2175,8 +2173,8 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
status = -ENOMEM;
goto err_no_ioremap;
}
- printk(KERN_INFO "pl022: mapped registers from %pa to %p\n",
- &adev->res.start, pl022->virtbase);
+ dev_info(&adev->dev, "mapped registers from %pa to %p\n",
+ &adev->res.start, pl022->virtbase);
pl022->clk = devm_clk_get(&adev->dev, NULL);
if (IS_ERR(pl022->clk)) {
@@ -2227,7 +2225,7 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
/* Register with the SPI framework */
amba_set_drvdata(adev, pl022);
- status = spi_register_master(master);
+ status = devm_spi_register_master(&adev->dev, master);
if (status != 0) {
dev_err(&adev->dev,
"probe - problem registering spi master\n");
@@ -2287,8 +2285,6 @@ pl022_remove(struct amba_device *adev)
clk_unprepare(pl022->clk);
amba_release_regions(adev);
tasklet_disable(&pl022->pump_transfers);
- spi_unregister_master(pl022->master);
- amba_set_drvdata(adev, NULL);
return 0;
}
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index 0ee53c25ba58..c57740bb70d3 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -396,7 +396,7 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
master->dev.of_node = np;
platform_set_drvdata(op, master);
hw = spi_master_get_devdata(master);
- hw->master = spi_master_get(master);
+ hw->master = master;
hw->dev = dev;
init_completion(&hw->done);
@@ -558,6 +558,7 @@ static int spi_ppc4xx_of_remove(struct platform_device *op)
free_irq(hw->irqnum, hw);
iounmap(hw->regs);
free_gpios(hw);
+ spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index c1a50674c1e3..cb0e1f1137ad 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -573,8 +573,8 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
write_SSTO(0, reg);
write_SSSR_CS(drv_data, drv_data->clear_sr);
- dev_err(&drv_data->pdev->dev, "bad message state "
- "in interrupt handler\n");
+ dev_err(&drv_data->pdev->dev,
+ "bad message state in interrupt handler\n");
/* Never fail */
return IRQ_HANDLED;
@@ -651,8 +651,8 @@ static void pump_transfers(unsigned long data)
if (message->is_dma_mapped
|| transfer->rx_dma || transfer->tx_dma) {
dev_err(&drv_data->pdev->dev,
- "pump_transfers: mapped transfer length "
- "of %u is greater than %d\n",
+ "pump_transfers: mapped transfer length of "
+ "%u is greater than %d\n",
transfer->len, MAX_DMA_LEN);
message->status = -EINVAL;
giveback(drv_data);
@@ -660,11 +660,10 @@ static void pump_transfers(unsigned long data)
}
/* warn ... we force this to PIO mode */
- if (printk_ratelimit())
- dev_warn(&message->spi->dev, "pump_transfers: "
- "DMA disabled for transfer length %ld "
- "greater than %d\n",
- (long)drv_data->len, MAX_DMA_LEN);
+ dev_warn_ratelimited(&message->spi->dev,
+ "pump_transfers: DMA disabled for transfer length %ld "
+ "greater than %d\n",
+ (long)drv_data->len, MAX_DMA_LEN);
}
/* Setup the transfer state based on the type of transfer */
@@ -726,11 +725,8 @@ static void pump_transfers(unsigned long data)
message->spi,
bits, &dma_burst,
&dma_thresh))
- if (printk_ratelimit())
- dev_warn(&message->spi->dev,
- "pump_transfers: "
- "DMA burst size reduced to "
- "match bits_per_word\n");
+ dev_warn_ratelimited(&message->spi->dev,
+ "pump_transfers: DMA burst size reduced to match bits_per_word\n");
}
cr0 = clk_div
@@ -854,8 +850,8 @@ static int setup_cs(struct spi_device *spi, struct chip_data *chip,
if (gpio_is_valid(chip_info->gpio_cs)) {
err = gpio_request(chip_info->gpio_cs, "SPI_CS");
if (err) {
- dev_err(&spi->dev, "failed to request chip select "
- "GPIO%d\n", chip_info->gpio_cs);
+ dev_err(&spi->dev, "failed to request chip select GPIO%d\n",
+ chip_info->gpio_cs);
return err;
}
@@ -899,8 +895,8 @@ static int setup(struct spi_device *spi)
if (drv_data->ssp_type == CE4100_SSP) {
if (spi->chip_select > 4) {
- dev_err(&spi->dev, "failed setup: "
- "cs number must not be > 4.\n");
+ dev_err(&spi->dev,
+ "failed setup: cs number must not be > 4.\n");
kfree(chip);
return -EINVAL;
}
@@ -956,8 +952,8 @@ static int setup(struct spi_device *spi)
spi->bits_per_word,
&chip->dma_burst_size,
&chip->dma_threshold)) {
- dev_warn(&spi->dev, "in setup: DMA burst size reduced "
- "to match bits_per_word\n");
+ dev_warn(&spi->dev,
+ "in setup: DMA burst size reduced to match bits_per_word\n");
}
}
@@ -1205,7 +1201,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
/* Register with the SPI framework */
platform_set_drvdata(pdev, drv_data);
- status = spi_register_master(master);
+ status = devm_spi_register_master(&pdev->dev, master);
if (status != 0) {
dev_err(&pdev->dev, "problem registering spi master\n");
goto out_error_clock_enabled;
@@ -1257,9 +1253,6 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
/* Release SSP */
pxa_ssp_free(ssp);
- /* Disconnect from the SPI framework */
- spi_unregister_master(drv_data->master);
-
return 0;
}
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 8719206a03a0..58449ad4ad0d 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -59,6 +59,14 @@
#define RSPI_SPCMD6 0x1c
#define RSPI_SPCMD7 0x1e
+/*qspi only */
+#define QSPI_SPBFCR 0x18
+#define QSPI_SPBDCR 0x1a
+#define QSPI_SPBMUL0 0x1c
+#define QSPI_SPBMUL1 0x20
+#define QSPI_SPBMUL2 0x24
+#define QSPI_SPBMUL3 0x28
+
/* SPCR */
#define SPCR_SPRIE 0x80
#define SPCR_SPE 0x40
@@ -126,6 +134,8 @@
#define SPCMD_LSBF 0x1000
#define SPCMD_SPB_MASK 0x0f00
#define SPCMD_SPB_8_TO_16(bit) (((bit - 1) << 8) & SPCMD_SPB_MASK)
+#define SPCMD_SPB_8BIT 0x0000 /* qspi only */
+#define SPCMD_SPB_16BIT 0x0100
#define SPCMD_SPB_20BIT 0x0000
#define SPCMD_SPB_24BIT 0x0100
#define SPCMD_SPB_32BIT 0x0200
@@ -135,6 +145,10 @@
#define SPCMD_CPOL 0x0002
#define SPCMD_CPHA 0x0001
+/* SPBFCR */
+#define SPBFCR_TXRST 0x80 /* qspi only */
+#define SPBFCR_RXRST 0x40 /* qspi only */
+
struct rspi_data {
void __iomem *addr;
u32 max_speed_hz;
@@ -145,6 +159,7 @@ struct rspi_data {
spinlock_t lock;
struct clk *clk;
unsigned char spsr;
+ const struct spi_ops *ops;
/* for dmaengine */
struct dma_chan *chan_tx;
@@ -165,6 +180,11 @@ static void rspi_write16(struct rspi_data *rspi, u16 data, u16 offset)
iowrite16(data, rspi->addr + offset);
}
+static void rspi_write32(struct rspi_data *rspi, u32 data, u16 offset)
+{
+ iowrite32(data, rspi->addr + offset);
+}
+
static u8 rspi_read8(struct rspi_data *rspi, u16 offset)
{
return ioread8(rspi->addr + offset);
@@ -175,17 +195,103 @@ static u16 rspi_read16(struct rspi_data *rspi, u16 offset)
return ioread16(rspi->addr + offset);
}
-static unsigned char rspi_calc_spbr(struct rspi_data *rspi)
+/* optional functions */
+struct spi_ops {
+ int (*set_config_register)(struct rspi_data *rspi, int access_size);
+ int (*send_pio)(struct rspi_data *rspi, struct spi_message *mesg,
+ struct spi_transfer *t);
+ int (*receive_pio)(struct rspi_data *rspi, struct spi_message *mesg,
+ struct spi_transfer *t);
+
+};
+
+/*
+ * functions for RSPI
+ */
+static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
+{
+ int spbr;
+
+ /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
+ rspi_write8(rspi, 0x00, RSPI_SPPCR);
+
+ /* Sets transfer bit rate */
+ spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1;
+ rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
+
+ /* Sets number of frames to be used: 1 frame */
+ rspi_write8(rspi, 0x00, RSPI_SPDCR);
+
+ /* Sets RSPCK, SSL, next-access delay value */
+ rspi_write8(rspi, 0x00, RSPI_SPCKD);
+ rspi_write8(rspi, 0x00, RSPI_SSLND);
+ rspi_write8(rspi, 0x00, RSPI_SPND);
+
+ /* Sets parity, interrupt mask */
+ rspi_write8(rspi, 0x00, RSPI_SPCR2);
+
+ /* Sets SPCMD */
+ rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | SPCMD_SSLKP,
+ RSPI_SPCMD0);
+
+ /* Sets RSPI mode */
+ rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
+
+ return 0;
+}
+
+/*
+ * functions for QSPI
+ */
+static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
{
- int tmp;
- unsigned char spbr;
+ u16 spcmd;
+ int spbr;
+
+ /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
+ rspi_write8(rspi, 0x00, RSPI_SPPCR);
+
+ /* Sets transfer bit rate */
+ spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz);
+ rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
+
+ /* Sets number of frames to be used: 1 frame */
+ rspi_write8(rspi, 0x00, RSPI_SPDCR);
+
+ /* Sets RSPCK, SSL, next-access delay value */
+ rspi_write8(rspi, 0x00, RSPI_SPCKD);
+ rspi_write8(rspi, 0x00, RSPI_SSLND);
+ rspi_write8(rspi, 0x00, RSPI_SPND);
+
+ /* Data Length Setting */
+ if (access_size == 8)
+ spcmd = SPCMD_SPB_8BIT;
+ else if (access_size == 16)
+ spcmd = SPCMD_SPB_16BIT;
+ else if (access_size == 32)
+ spcmd = SPCMD_SPB_32BIT;
+
+ spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | SPCMD_SSLKP | SPCMD_SPNDEN;
+
+ /* Resets transfer data length */
+ rspi_write32(rspi, 0, QSPI_SPBMUL0);
+
+ /* Resets transmit and receive buffer */
+ rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
+ /* Sets buffer to allow normal operation */
+ rspi_write8(rspi, 0x00, QSPI_SPBFCR);
+
+ /* Sets SPCMD */
+ rspi_write16(rspi, spcmd, RSPI_SPCMD0);
- tmp = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1;
- spbr = clamp(tmp, 0, 255);
+ /* Enables SPI function in a master mode */
+ rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR);
- return spbr;
+ return 0;
}
+#define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
+
static void rspi_enable_irq(struct rspi_data *rspi, u8 enable)
{
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
@@ -220,54 +326,60 @@ static void rspi_negate_ssl(struct rspi_data *rspi)
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
}
-static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
+static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
+ struct spi_transfer *t)
{
- /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
- rspi_write8(rspi, 0x00, RSPI_SPPCR);
-
- /* Sets transfer bit rate */
- rspi_write8(rspi, rspi_calc_spbr(rspi), RSPI_SPBR);
-
- /* Sets number of frames to be used: 1 frame */
- rspi_write8(rspi, 0x00, RSPI_SPDCR);
+ int remain = t->len;
+ u8 *data;
- /* Sets RSPCK, SSL, next-access delay value */
- rspi_write8(rspi, 0x00, RSPI_SPCKD);
- rspi_write8(rspi, 0x00, RSPI_SSLND);
- rspi_write8(rspi, 0x00, RSPI_SPND);
+ data = (u8 *)t->tx_buf;
+ while (remain > 0) {
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD,
+ RSPI_SPCR);
- /* Sets parity, interrupt mask */
- rspi_write8(rspi, 0x00, RSPI_SPCR2);
+ if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
+ dev_err(&rspi->master->dev,
+ "%s: tx empty timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
- /* Sets SPCMD */
- rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | SPCMD_SSLKP,
- RSPI_SPCMD0);
+ rspi_write16(rspi, *data, RSPI_SPDR);
+ data++;
+ remain--;
+ }
- /* Sets RSPI mode */
- rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
+ /* Waiting for the last transmition */
+ rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
return 0;
}
-static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
+static int qspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
struct spi_transfer *t)
{
int remain = t->len;
u8 *data;
+ rspi_write8(rspi, SPBFCR_TXRST, QSPI_SPBFCR);
+ rspi_write8(rspi, 0x00, QSPI_SPBFCR);
+
data = (u8 *)t->tx_buf;
while (remain > 0) {
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD,
- RSPI_SPCR);
if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
dev_err(&rspi->master->dev,
"%s: tx empty timeout\n", __func__);
return -ETIMEDOUT;
}
+ rspi_write8(rspi, *data++, RSPI_SPDR);
+
+ if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
+ dev_err(&rspi->master->dev,
+ "%s: receive timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ rspi_read8(rspi, RSPI_SPDR);
- rspi_write16(rspi, *data, RSPI_SPDR);
- data++;
remain--;
}
@@ -277,6 +389,8 @@ static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
return 0;
}
+#define send_pio(spi, mesg, t) spi->ops->send_pio(spi, mesg, t)
+
static void rspi_dma_complete(void *arg)
{
struct rspi_data *rspi = arg;
@@ -442,6 +556,51 @@ static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
return 0;
}
+static void qspi_receive_init(struct rspi_data *rspi)
+{
+ unsigned char spsr;
+
+ spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPRF)
+ rspi_read8(rspi, RSPI_SPDR); /* dummy read */
+ rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
+ rspi_write8(rspi, 0x00, QSPI_SPBFCR);
+}
+
+static int qspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
+ struct spi_transfer *t)
+{
+ int remain = t->len;
+ u8 *data;
+
+ qspi_receive_init(rspi);
+
+ data = (u8 *)t->rx_buf;
+ while (remain > 0) {
+
+ if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
+ dev_err(&rspi->master->dev,
+ "%s: tx empty timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ /* dummy write for generate clock */
+ rspi_write8(rspi, 0x00, RSPI_SPDR);
+
+ if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
+ dev_err(&rspi->master->dev,
+ "%s: receive timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ /* SPDR allows 8, 16 or 32-bit access */
+ *data++ = rspi_read8(rspi, RSPI_SPDR);
+ remain--;
+ }
+
+ return 0;
+}
+
+#define receive_pio(spi, mesg, t) spi->ops->receive_pio(spi, mesg, t)
+
static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
{
struct scatterlist sg, sg_dummy;
@@ -581,7 +740,7 @@ static void rspi_work(struct work_struct *work)
if (rspi_is_dma(rspi, t))
ret = rspi_send_dma(rspi, t);
else
- ret = rspi_send_pio(rspi, mesg, t);
+ ret = send_pio(rspi, mesg, t);
if (ret < 0)
goto error;
}
@@ -589,7 +748,7 @@ static void rspi_work(struct work_struct *work)
if (rspi_is_dma(rspi, t))
ret = rspi_receive_dma(rspi, t);
else
- ret = rspi_receive_pio(rspi, mesg, t);
+ ret = receive_pio(rspi, mesg, t);
if (ret < 0)
goto error;
}
@@ -616,7 +775,7 @@ static int rspi_setup(struct spi_device *spi)
spi->bits_per_word = 8;
rspi->max_speed_hz = spi->max_speed_hz;
- rspi_set_config_register(rspi, 8);
+ set_config_register(rspi, 8);
return 0;
}
@@ -745,7 +904,16 @@ static int rspi_probe(struct platform_device *pdev)
struct rspi_data *rspi;
int ret, irq;
char clk_name[16];
-
+ struct rspi_plat_data *rspi_pd = pdev->dev.platform_data;
+ const struct spi_ops *ops;
+ const struct platform_device_id *id_entry = pdev->id_entry;
+
+ ops = (struct spi_ops *)id_entry->driver_data;
+ /* ops parameter check */
+ if (!ops->set_config_register) {
+ dev_err(&pdev->dev, "there is no set_config_register\n");
+ return -ENODEV;
+ }
/* get base addr */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(res == NULL)) {
@@ -767,7 +935,7 @@ static int rspi_probe(struct platform_device *pdev)
rspi = spi_master_get_devdata(master);
platform_set_drvdata(pdev, rspi);
-
+ rspi->ops = ops;
rspi->master = master;
rspi->addr = ioremap(res->start, resource_size(res));
if (rspi->addr == NULL) {
@@ -776,7 +944,7 @@ static int rspi_probe(struct platform_device *pdev)
goto error1;
}
- snprintf(clk_name, sizeof(clk_name), "rspi%d", pdev->id);
+ snprintf(clk_name, sizeof(clk_name), "%s%d", id_entry->name, pdev->id);
rspi->clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(rspi->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
@@ -790,7 +958,10 @@ static int rspi_probe(struct platform_device *pdev)
INIT_WORK(&rspi->ws, rspi_work);
init_waitqueue_head(&rspi->wait);
- master->num_chipselect = 2;
+ master->num_chipselect = rspi_pd->num_chipselect;
+ if (!master->num_chipselect)
+ master->num_chipselect = 2; /* default */
+
master->bus_num = pdev->id;
master->setup = rspi_setup;
master->transfer = rspi_transfer;
@@ -832,11 +1003,32 @@ error1:
return ret;
}
+static struct spi_ops rspi_ops = {
+ .set_config_register = rspi_set_config_register,
+ .send_pio = rspi_send_pio,
+ .receive_pio = rspi_receive_pio,
+};
+
+static struct spi_ops qspi_ops = {
+ .set_config_register = qspi_set_config_register,
+ .send_pio = qspi_send_pio,
+ .receive_pio = qspi_receive_pio,
+};
+
+static struct platform_device_id spi_driver_ids[] = {
+ { "rspi", (kernel_ulong_t)&rspi_ops },
+ { "qspi", (kernel_ulong_t)&qspi_ops },
+ {},
+};
+
+MODULE_DEVICE_TABLE(platform, spi_driver_ids);
+
static struct platform_driver rspi_driver = {
.probe = rspi_probe,
.remove = rspi_remove,
+ .id_table = spi_driver_ids,
.driver = {
- .name = "rspi",
+ .name = "renesas_spi",
.owner = THIS_MODULE,
},
};
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index ce318d95a6ee..0dc32a11bd3c 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -280,7 +280,7 @@ static inline u32 ack_bit(unsigned int irq)
* so the caller does not need to do anything more than start the transfer
* as normal, since the IRQ will have been re-routed to the FIQ handler.
*/
-void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw)
+static void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw)
{
struct pt_regs regs;
enum spi_fiq_mode mode;
@@ -524,7 +524,7 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
hw = spi_master_get_devdata(master);
memset(hw, 0, sizeof(struct s3c24xx_spi));
- hw->master = spi_master_get(master);
+ hw->master = master;
hw->pdata = pdata = dev_get_platdata(&pdev->dev);
hw->dev = &pdev->dev;
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index a80376dc3a10..9e2020df9e0f 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -205,7 +205,6 @@ struct s3c64xx_spi_driver_data {
#endif
struct s3c64xx_spi_port_config *port_conf;
unsigned int port_id;
- unsigned long gpios[4];
bool cs_gpio;
};
@@ -559,25 +558,18 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi)
{
- struct s3c64xx_spi_csinfo *cs;
-
if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
/* Deselect the last toggled device */
- cs = sdd->tgl_spi->controller_data;
- if (sdd->cs_gpio)
- gpio_set_value(cs->line,
+ if (spi->cs_gpio >= 0)
+ gpio_set_value(spi->cs_gpio,
spi->mode & SPI_CS_HIGH ? 0 : 1);
}
sdd->tgl_spi = NULL;
}
- cs = spi->controller_data;
- if (sdd->cs_gpio)
- gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0);
-
- /* Start the signals */
- writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ if (spi->cs_gpio >= 0)
+ gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 1 : 0);
}
static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
@@ -702,16 +694,11 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi)
{
- struct s3c64xx_spi_csinfo *cs = spi->controller_data;
-
if (sdd->tgl_spi == spi)
sdd->tgl_spi = NULL;
- if (sdd->cs_gpio)
- gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1);
-
- /* Quiese the signals */
- writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ if (spi->cs_gpio >= 0)
+ gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
}
static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
@@ -862,16 +849,12 @@ static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
}
}
-static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
- struct spi_message *msg)
+static int s3c64xx_spi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
{
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
struct spi_device *spi = msg->spi;
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
- struct spi_transfer *xfer;
- int status = 0, cs_toggle = 0;
- u32 speed;
- u8 bpw;
/* If Master's(controller) state differs from that needed by Slave */
if (sdd->cur_speed != spi->max_speed_hz
@@ -887,106 +870,98 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
if (s3c64xx_spi_map_mssg(sdd, msg)) {
dev_err(&spi->dev,
"Xfer: Unable to map message buffers!\n");
- status = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
/* Configure feedback delay */
writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-
- unsigned long flags;
- int use_dma;
-
- INIT_COMPLETION(sdd->xfer_completion);
-
- /* Only BPW and Speed may change across transfers */
- bpw = xfer->bits_per_word;
- speed = xfer->speed_hz ? : spi->max_speed_hz;
-
- if (xfer->len % (bpw / 8)) {
- dev_err(&spi->dev,
- "Xfer length(%u) not a multiple of word size(%u)\n",
- xfer->len, bpw / 8);
- status = -EIO;
- goto out;
- }
+ return 0;
+}
- if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
- sdd->cur_bpw = bpw;
- sdd->cur_speed = speed;
- s3c64xx_spi_config(sdd);
- }
+static int s3c64xx_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ int status;
+ u32 speed;
+ u8 bpw;
+ unsigned long flags;
+ int use_dma;
- /* Polling method for xfers not bigger than FIFO capacity */
- use_dma = 0;
- if (!is_polling(sdd) &&
- (sdd->rx_dma.ch && sdd->tx_dma.ch &&
- (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1))))
- use_dma = 1;
+ INIT_COMPLETION(sdd->xfer_completion);
- spin_lock_irqsave(&sdd->lock, flags);
+ /* Only BPW and Speed may change across transfers */
+ bpw = xfer->bits_per_word;
+ speed = xfer->speed_hz ? : spi->max_speed_hz;
- /* Pending only which is to be done */
- sdd->state &= ~RXBUSY;
- sdd->state &= ~TXBUSY;
+ if (xfer->len % (bpw / 8)) {
+ dev_err(&spi->dev,
+ "Xfer length(%u) not a multiple of word size(%u)\n",
+ xfer->len, bpw / 8);
+ return -EIO;
+ }
- enable_datapath(sdd, spi, xfer, use_dma);
+ if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
+ sdd->cur_bpw = bpw;
+ sdd->cur_speed = speed;
+ s3c64xx_spi_config(sdd);
+ }
- /* Slave Select */
- enable_cs(sdd, spi);
+ /* Polling method for xfers not bigger than FIFO capacity */
+ use_dma = 0;
+ if (!is_polling(sdd) &&
+ (sdd->rx_dma.ch && sdd->tx_dma.ch &&
+ (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1))))
+ use_dma = 1;
- spin_unlock_irqrestore(&sdd->lock, flags);
+ spin_lock_irqsave(&sdd->lock, flags);
- status = wait_for_xfer(sdd, xfer, use_dma);
+ /* Pending only which is to be done */
+ sdd->state &= ~RXBUSY;
+ sdd->state &= ~TXBUSY;
- if (status) {
- dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
- xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
- (sdd->state & RXBUSY) ? 'f' : 'p',
- (sdd->state & TXBUSY) ? 'f' : 'p',
- xfer->len);
+ enable_datapath(sdd, spi, xfer, use_dma);
- if (use_dma) {
- if (xfer->tx_buf != NULL
- && (sdd->state & TXBUSY))
- s3c64xx_spi_dma_stop(sdd, &sdd->tx_dma);
- if (xfer->rx_buf != NULL
- && (sdd->state & RXBUSY))
- s3c64xx_spi_dma_stop(sdd, &sdd->rx_dma);
- }
+ /* Start the signals */
+ writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
- goto out;
- }
+ /* Start the signals */
+ writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
+ spin_unlock_irqrestore(&sdd->lock, flags);
- if (xfer->cs_change) {
- /* Hint that the next mssg is gonna be
- for the same device */
- if (list_is_last(&xfer->transfer_list,
- &msg->transfers))
- cs_toggle = 1;
+ status = wait_for_xfer(sdd, xfer, use_dma);
+
+ if (status) {
+ dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
+ xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
+ (sdd->state & RXBUSY) ? 'f' : 'p',
+ (sdd->state & TXBUSY) ? 'f' : 'p',
+ xfer->len);
+
+ if (use_dma) {
+ if (xfer->tx_buf != NULL
+ && (sdd->state & TXBUSY))
+ s3c64xx_spi_dma_stop(sdd, &sdd->tx_dma);
+ if (xfer->rx_buf != NULL
+ && (sdd->state & RXBUSY))
+ s3c64xx_spi_dma_stop(sdd, &sdd->rx_dma);
}
-
- msg->actual_length += xfer->len;
-
+ } else {
flush_fifo(sdd);
}
-out:
- if (!cs_toggle || status)
- disable_cs(sdd, spi);
- else
- sdd->tgl_spi = spi;
-
- s3c64xx_spi_unmap_mssg(sdd, msg);
+ return status;
+}
- msg->status = status;
+static int s3c64xx_spi_unprepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- spi_finalize_current_message(master);
+ s3c64xx_spi_unmap_mssg(sdd, msg);
return 0;
}
@@ -1071,6 +1046,8 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
cs->line, err);
goto err_gpio_req;
}
+
+ spi->cs_gpio = cs->line;
}
spi_set_ctldata(spi, cs);
@@ -1117,11 +1094,14 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
}
pm_runtime_put(&sdd->pdev->dev);
+ writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
disable_cs(sdd, spi);
return 0;
setup_exit:
+ pm_runtime_put(&sdd->pdev->dev);
/* setup() returns with device de-selected */
+ writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
disable_cs(sdd, spi);
gpio_free(cs->line);
@@ -1140,8 +1120,8 @@ static void s3c64xx_spi_cleanup(struct spi_device *spi)
struct s3c64xx_spi_driver_data *sdd;
sdd = spi_master_get_devdata(spi->master);
- if (cs && sdd->cs_gpio) {
- gpio_free(cs->line);
+ if (spi->cs_gpio) {
+ gpio_free(spi->cs_gpio);
if (spi->dev.of_node)
kfree(cs);
}
@@ -1359,7 +1339,9 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
master->setup = s3c64xx_spi_setup;
master->cleanup = s3c64xx_spi_cleanup;
master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
- master->transfer_one_message = s3c64xx_spi_transfer_one_message;
+ master->prepare_message = s3c64xx_spi_prepare_message;
+ master->transfer_one = s3c64xx_spi_transfer_one;
+ master->unprepare_message = s3c64xx_spi_unprepare_message;
master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
master->num_chipselect = sci->num_cs;
master->dma_alignment = 8;
@@ -1428,11 +1410,12 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
sdd->regs + S3C64XX_SPI_INT_EN);
+ pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- if (spi_register_master(master)) {
- dev_err(&pdev->dev, "cannot register SPI master\n");
- ret = -EBUSY;
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret);
goto err3;
}
@@ -1461,16 +1444,12 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
- spi_unregister_master(master);
-
writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
clk_disable_unprepare(sdd->src_clk);
clk_disable_unprepare(sdd->clk);
- spi_master_put(master);
-
return 0;
}
@@ -1480,11 +1459,14 @@ static int s3c64xx_spi_suspend(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- spi_master_suspend(master);
+ int ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
- /* Disable the clock */
- clk_disable_unprepare(sdd->src_clk);
- clk_disable_unprepare(sdd->clk);
+ if (!pm_runtime_suspended(dev)) {
+ clk_disable_unprepare(sdd->clk);
+ clk_disable_unprepare(sdd->src_clk);
+ }
sdd->cur_speed = 0; /* Output Clock is stopped */
@@ -1500,15 +1482,14 @@ static int s3c64xx_spi_resume(struct device *dev)
if (sci->cfg_gpio)
sci->cfg_gpio();
- /* Enable the clock */
- clk_prepare_enable(sdd->src_clk);
- clk_prepare_enable(sdd->clk);
+ if (!pm_runtime_suspended(dev)) {
+ clk_prepare_enable(sdd->src_clk);
+ clk_prepare_enable(sdd->clk);
+ }
s3c64xx_spi_hwinit(sdd, sdd->port_id);
- spi_master_resume(master);
-
- return 0;
+ return spi_master_resume(master);
}
#endif /* CONFIG_PM_SLEEP */
@@ -1528,9 +1509,17 @@ static int s3c64xx_spi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ int ret;
- clk_prepare_enable(sdd->src_clk);
- clk_prepare_enable(sdd->clk);
+ ret = clk_prepare_enable(sdd->src_clk);
+ if (ret != 0)
+ return ret;
+
+ ret = clk_prepare_enable(sdd->clk);
+ if (ret != 0) {
+ clk_disable_unprepare(sdd->src_clk);
+ return ret;
+ }
return 0;
}
@@ -1616,6 +1605,18 @@ static struct platform_device_id s3c64xx_spi_driver_ids[] = {
};
static const struct of_device_id s3c64xx_spi_dt_match[] = {
+ { .compatible = "samsung,s3c2443-spi",
+ .data = (void *)&s3c2443_spi_port_config,
+ },
+ { .compatible = "samsung,s3c6410-spi",
+ .data = (void *)&s3c6410_spi_port_config,
+ },
+ { .compatible = "samsung,s5pc100-spi",
+ .data = (void *)&s5pc100_spi_port_config,
+ },
+ { .compatible = "samsung,s5pv210-spi",
+ .data = (void *)&s5pv210_spi_port_config,
+ },
{ .compatible = "samsung,exynos4210-spi",
.data = (void *)&exynos4_spi_port_config,
},
@@ -1633,22 +1634,13 @@ static struct platform_driver s3c64xx_spi_driver = {
.pm = &s3c64xx_spi_pm,
.of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
},
+ .probe = s3c64xx_spi_probe,
.remove = s3c64xx_spi_remove,
.id_table = s3c64xx_spi_driver_ids,
};
MODULE_ALIAS("platform:s3c64xx-spi");
-static int __init s3c64xx_spi_init(void)
-{
- return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
-}
-subsys_initcall(s3c64xx_spi_init);
-
-static void __exit s3c64xx_spi_exit(void)
-{
- platform_driver_unregister(&s3c64xx_spi_driver);
-}
-module_exit(s3c64xx_spi_exit);
+module_platform_driver(s3c64xx_spi_driver);
MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index e488a90a98b8..bc38aaf4e1f9 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -137,7 +137,7 @@ static void hspi_hw_setup(struct hspi_priv *hspi,
rate /= 16;
/* CLKCx calculation */
- rate /= (((idiv_clk & 0x1F) + 1) * 2) ;
+ rate /= (((idiv_clk & 0x1F) + 1) * 2);
/* save best settings */
tmp = abs(target_rate - rate);
@@ -305,7 +305,7 @@ static int hspi_probe(struct platform_device *pdev)
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->auto_runtime_pm = true;
master->transfer_one_message = hspi_transfer_one_message;
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_master error.\n");
goto error1;
@@ -328,7 +328,6 @@ static int hspi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
clk_put(hspi->clk);
- spi_unregister_master(hspi->master);
return 0;
}
diff --git a/drivers/spi/spi-sh-sci.c b/drivers/spi/spi-sh-sci.c
index 8eefeb6007df..38eb24df796c 100644
--- a/drivers/spi/spi-sh-sci.c
+++ b/drivers/spi/spi-sh-sci.c
@@ -133,7 +133,7 @@ static int sh_sci_spi_probe(struct platform_device *dev)
sp->info = dev_get_platdata(&dev->dev);
/* setup spi bitbang adaptor */
- sp->bitbang.master = spi_master_get(master);
+ sp->bitbang.master = master;
sp->bitbang.master->bus_num = sp->info->bus_num;
sp->bitbang.master->num_chipselect = sp->info->num_chipselect;
sp->bitbang.chipselect = sh_sci_spi_chipselect;
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index a1f21b747733..592b4aff651f 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -632,7 +632,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
if (ret)
goto free_master;
- sspi->bitbang.master = spi_master_get(master);
+ sspi->bitbang.master = master;
sspi->bitbang.chipselect = spi_sirfsoc_chipselect;
sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer;
sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 145dd435483b..9146bb3c2489 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -182,6 +182,7 @@ struct tegra_spi_data {
u32 cur_speed;
struct spi_device *cur_spi;
+ struct spi_device *cs_control;
unsigned cur_pos;
unsigned cur_len;
unsigned words_per_32bit;
@@ -267,7 +268,7 @@ static unsigned tegra_spi_calculate_curr_xfer_param(
unsigned max_len;
unsigned total_fifo_words;
- tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1;
+ tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
if (bits_per_word == 8 || bits_per_word == 16) {
tspi->is_packed = 1;
@@ -676,15 +677,12 @@ static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
dma_release_channel(dma_chan);
}
-static int tegra_spi_start_transfer_one(struct spi_device *spi,
- struct spi_transfer *t, bool is_first_of_msg,
- bool is_single_xfer)
+static unsigned long tegra_spi_setup_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t, bool is_first_of_msg)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
u32 speed = t->speed_hz;
u8 bits_per_word = t->bits_per_word;
- unsigned total_fifo_words;
- int ret;
unsigned long command1;
int req_mode;
@@ -698,7 +696,6 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
tspi->cur_rx_pos = 0;
tspi->cur_tx_pos = 0;
tspi->curr_xfer = t;
- total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t);
if (is_first_of_msg) {
tegra_spi_clear_status(tspi);
@@ -717,7 +714,12 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
else if (req_mode == SPI_MODE_3)
command1 |= SPI_CONTROL_MODE_3;
- tegra_spi_writel(tspi, command1, SPI_COMMAND1);
+ if (tspi->cs_control) {
+ if (tspi->cs_control != spi)
+ tegra_spi_writel(tspi, command1, SPI_COMMAND1);
+ tspi->cs_control = NULL;
+ } else
+ tegra_spi_writel(tspi, command1, SPI_COMMAND1);
command1 |= SPI_CS_SW_HW;
if (spi->mode & SPI_CS_HIGH)
@@ -732,6 +734,18 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
}
+ return command1;
+}
+
+static int tegra_spi_start_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t, unsigned long command1)
+{
+ struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ unsigned total_fifo_words;
+ int ret;
+
+ total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t);
+
if (tspi->is_packed)
command1 |= SPI_PACKED;
@@ -803,29 +817,50 @@ static int tegra_spi_setup(struct spi_device *spi)
return 0;
}
+static void tegra_spi_transfer_delay(int delay)
+{
+ if (!delay)
+ return;
+
+ if (delay >= 1000)
+ mdelay(delay / 1000);
+
+ udelay(delay % 1000);
+}
+
static int tegra_spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
bool is_first_msg = true;
- int single_xfer;
struct tegra_spi_data *tspi = spi_master_get_devdata(master);
struct spi_transfer *xfer;
struct spi_device *spi = msg->spi;
int ret;
+ bool skip = false;
msg->status = 0;
msg->actual_length = 0;
- single_xfer = list_is_singular(&msg->transfers);
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ unsigned long cmd1;
+
INIT_COMPLETION(tspi->xfer_completion);
- ret = tegra_spi_start_transfer_one(spi, xfer,
- is_first_msg, single_xfer);
+
+ cmd1 = tegra_spi_setup_transfer_one(spi, xfer, is_first_msg);
+
+ if (!xfer->len) {
+ ret = 0;
+ skip = true;
+ goto complete_xfer;
+ }
+
+ ret = tegra_spi_start_transfer_one(spi, xfer, cmd1);
if (ret < 0) {
dev_err(tspi->dev,
"spi can not start transfer, err %d\n", ret);
- goto exit;
+ goto complete_xfer;
}
+
is_first_msg = false;
ret = wait_for_completion_timeout(&tspi->xfer_completion,
SPI_DMA_TIMEOUT);
@@ -833,24 +868,40 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
dev_err(tspi->dev,
"spi trasfer timeout, err %d\n", ret);
ret = -EIO;
- goto exit;
+ goto complete_xfer;
}
if (tspi->tx_status || tspi->rx_status) {
dev_err(tspi->dev, "Error in Transfer\n");
ret = -EIO;
- goto exit;
+ goto complete_xfer;
}
msg->actual_length += xfer->len;
- if (xfer->cs_change && xfer->delay_usecs) {
+
+complete_xfer:
+ if (ret < 0 || skip) {
tegra_spi_writel(tspi, tspi->def_command1_reg,
SPI_COMMAND1);
- udelay(xfer->delay_usecs);
+ tegra_spi_transfer_delay(xfer->delay_usecs);
+ goto exit;
+ } else if (msg->transfers.prev == &xfer->transfer_list) {
+ /* This is the last transfer in message */
+ if (xfer->cs_change)
+ tspi->cs_control = spi;
+ else {
+ tegra_spi_writel(tspi, tspi->def_command1_reg,
+ SPI_COMMAND1);
+ tegra_spi_transfer_delay(xfer->delay_usecs);
+ }
+ } else if (xfer->cs_change) {
+ tegra_spi_writel(tspi, tspi->def_command1_reg,
+ SPI_COMMAND1);
+ tegra_spi_transfer_delay(xfer->delay_usecs);
}
+
}
ret = 0;
exit:
- tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
msg->status = ret;
spi_finalize_current_message(master);
return ret;
@@ -1115,7 +1166,7 @@ static int tegra_spi_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
master->dev.of_node = pdev->dev.of_node;
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "can not register to master err %d\n", ret);
goto exit_pm_disable;
@@ -1142,7 +1193,6 @@ static int tegra_spi_remove(struct platform_device *pdev)
struct tegra_spi_data *tspi = spi_master_get_devdata(master);
free_irq(tspi->irq, tspi);
- spi_unregister_master(master);
if (tspi->tx_dma_chan)
tegra_spi_deinit_dma_param(tspi, false);
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 1d814dc6e000..79be8ce6a9d1 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -173,7 +173,7 @@ static unsigned tegra_sflash_calculate_curr_xfer_param(
unsigned remain_len = t->len - tsd->cur_pos;
unsigned max_word;
- tsd->bytes_per_word = (t->bits_per_word - 1) / 8 + 1;
+ tsd->bytes_per_word = DIV_ROUND_UP(t->bits_per_word, 8);
max_word = remain_len / tsd->bytes_per_word;
if (max_word > SPI_FIFO_DEPTH)
max_word = SPI_FIFO_DEPTH;
@@ -529,7 +529,7 @@ static int tegra_sflash_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
master->dev.of_node = pdev->dev.of_node;
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "can not register to master err %d\n", ret);
goto exit_pm_disable;
@@ -553,7 +553,6 @@ static int tegra_sflash_remove(struct platform_device *pdev)
struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
free_irq(tsd->irq, tsd);
- spi_unregister_master(master);
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index c70353672a23..af0a67886ae8 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -278,12 +278,12 @@ static unsigned tegra_slink_calculate_curr_xfer_param(
{
unsigned remain_len = t->len - tspi->cur_pos;
unsigned max_word;
- unsigned bits_per_word ;
+ unsigned bits_per_word;
unsigned max_len;
unsigned total_fifo_words;
bits_per_word = t->bits_per_word;
- tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1;
+ tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
if (bits_per_word == 8 || bits_per_word == 16) {
tspi->is_packed = 1;
@@ -707,8 +707,7 @@ static void tegra_slink_deinit_dma_param(struct tegra_slink_data *tspi,
}
static int tegra_slink_start_transfer_one(struct spi_device *spi,
- struct spi_transfer *t, bool is_first_of_msg,
- bool is_single_xfer)
+ struct spi_transfer *t)
{
struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
u32 speed;
@@ -732,32 +731,12 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi,
tspi->curr_xfer = t;
total_fifo_words = tegra_slink_calculate_curr_xfer_param(spi, tspi, t);
- if (is_first_of_msg) {
- tegra_slink_clear_status(tspi);
+ command = tspi->command_reg;
+ command &= ~SLINK_BIT_LENGTH(~0);
+ command |= SLINK_BIT_LENGTH(bits_per_word - 1);
- command = tspi->def_command_reg;
- command |= SLINK_BIT_LENGTH(bits_per_word - 1);
- command |= SLINK_CS_SW | SLINK_CS_VALUE;
-
- command2 = tspi->def_command2_reg;
- command2 |= SLINK_SS_EN_CS(spi->chip_select);
-
- command &= ~SLINK_MODES;
- if (spi->mode & SPI_CPHA)
- command |= SLINK_CK_SDA;
-
- if (spi->mode & SPI_CPOL)
- command |= SLINK_IDLE_SCLK_DRIVE_HIGH;
- else
- command |= SLINK_IDLE_SCLK_DRIVE_LOW;
- } else {
- command = tspi->command_reg;
- command &= ~SLINK_BIT_LENGTH(~0);
- command |= SLINK_BIT_LENGTH(bits_per_word - 1);
-
- command2 = tspi->command2_reg;
- command2 &= ~(SLINK_RXEN | SLINK_TXEN);
- }
+ command2 = tspi->command2_reg;
+ command2 &= ~(SLINK_RXEN | SLINK_TXEN);
tegra_slink_writel(tspi, command, SLINK_COMMAND);
tspi->command_reg = command;
@@ -824,58 +803,72 @@ static int tegra_slink_setup(struct spi_device *spi)
return 0;
}
-static int tegra_slink_transfer_one_message(struct spi_master *master,
- struct spi_message *msg)
+static int tegra_slink_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
{
- bool is_first_msg = true;
- int single_xfer;
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
- struct spi_transfer *xfer;
struct spi_device *spi = msg->spi;
- int ret;
- msg->status = 0;
- msg->actual_length = 0;
+ tegra_slink_clear_status(tspi);
- single_xfer = list_is_singular(&msg->transfers);
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- INIT_COMPLETION(tspi->xfer_completion);
- ret = tegra_slink_start_transfer_one(spi, xfer,
- is_first_msg, single_xfer);
- if (ret < 0) {
- dev_err(tspi->dev,
- "spi can not start transfer, err %d\n", ret);
- goto exit;
- }
- is_first_msg = false;
- ret = wait_for_completion_timeout(&tspi->xfer_completion,
- SLINK_DMA_TIMEOUT);
- if (WARN_ON(ret == 0)) {
- dev_err(tspi->dev,
- "spi trasfer timeout, err %d\n", ret);
- ret = -EIO;
- goto exit;
- }
+ tspi->command_reg = tspi->def_command_reg;
+ tspi->command_reg |= SLINK_CS_SW | SLINK_CS_VALUE;
- if (tspi->tx_status || tspi->rx_status) {
- dev_err(tspi->dev, "Error in Transfer\n");
- ret = -EIO;
- goto exit;
- }
- msg->actual_length += xfer->len;
- if (xfer->cs_change && xfer->delay_usecs) {
- tegra_slink_writel(tspi, tspi->def_command_reg,
- SLINK_COMMAND);
- udelay(xfer->delay_usecs);
- }
+ tspi->command2_reg = tspi->def_command2_reg;
+ tspi->command2_reg |= SLINK_SS_EN_CS(spi->chip_select);
+
+ tspi->command_reg &= ~SLINK_MODES;
+ if (spi->mode & SPI_CPHA)
+ tspi->command_reg |= SLINK_CK_SDA;
+
+ if (spi->mode & SPI_CPOL)
+ tspi->command_reg |= SLINK_IDLE_SCLK_DRIVE_HIGH;
+ else
+ tspi->command_reg |= SLINK_IDLE_SCLK_DRIVE_LOW;
+
+ return 0;
+}
+
+static int tegra_slink_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ INIT_COMPLETION(tspi->xfer_completion);
+ ret = tegra_slink_start_transfer_one(spi, xfer);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "spi can not start transfer, err %d\n", ret);
+ return ret;
}
- ret = 0;
-exit:
+
+ ret = wait_for_completion_timeout(&tspi->xfer_completion,
+ SLINK_DMA_TIMEOUT);
+ if (WARN_ON(ret == 0)) {
+ dev_err(tspi->dev,
+ "spi trasfer timeout, err %d\n", ret);
+ return -EIO;
+ }
+
+ if (tspi->tx_status)
+ return tspi->tx_status;
+ if (tspi->rx_status)
+ return tspi->rx_status;
+
+ return 0;
+}
+
+static int tegra_slink_unprepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+
tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
- msg->status = ret;
- spi_finalize_current_message(master);
- return ret;
+
+ return 0;
}
static irqreturn_t handle_cpu_based_xfer(struct tegra_slink_data *tspi)
@@ -1078,7 +1071,9 @@ static int tegra_slink_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->setup = tegra_slink_setup;
- master->transfer_one_message = tegra_slink_transfer_one_message;
+ master->prepare_message = tegra_slink_prepare_message;
+ master->transfer_one = tegra_slink_transfer_one;
+ master->unprepare_message = tegra_slink_unprepare_message;
master->auto_runtime_pm = true;
master->num_chipselect = MAX_CHIP_SELECT;
master->bus_num = -1;
@@ -1164,7 +1159,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
master->dev.of_node = pdev->dev.of_node;
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "can not register to master err %d\n", ret);
goto exit_pm_disable;
@@ -1191,7 +1186,6 @@ static int tegra_slink_remove(struct platform_device *pdev)
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
free_irq(tspi->irq, tspi);
- spi_unregister_master(master);
if (tspi->tx_dma_chan)
tegra_slink_deinit_dma_param(tspi, false);
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index e12d962a289f..0b71270fbf67 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -41,9 +41,6 @@ struct ti_qspi_regs {
struct ti_qspi {
struct completion transfer_complete;
- /* IRQ synchronization */
- spinlock_t lock;
-
/* list synchronization */
struct mutex list_lock;
@@ -57,7 +54,6 @@ struct ti_qspi {
u32 spi_max_frequency;
u32 cmd;
u32 dc;
- u32 stat;
};
#define QSPI_PID (0x0)
@@ -397,13 +393,12 @@ static irqreturn_t ti_qspi_isr(int irq, void *dev_id)
{
struct ti_qspi *qspi = dev_id;
u16 int_stat;
+ u32 stat;
irqreturn_t ret = IRQ_HANDLED;
- spin_lock(&qspi->lock);
-
int_stat = ti_qspi_read(qspi, QSPI_INTR_STATUS_ENABLED_CLEAR);
- qspi->stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
+ stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
if (!int_stat) {
dev_dbg(qspi->dev, "No IRQ triggered\n");
@@ -411,35 +406,14 @@ static irqreturn_t ti_qspi_isr(int irq, void *dev_id)
goto out;
}
- ret = IRQ_WAKE_THREAD;
-
- ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG);
ti_qspi_write(qspi, QSPI_WC_INT_DISABLE,
QSPI_INTR_STATUS_ENABLED_CLEAR);
-
+ if (stat & WC)
+ complete(&qspi->transfer_complete);
out:
- spin_unlock(&qspi->lock);
-
return ret;
}
-static irqreturn_t ti_qspi_threaded_isr(int this_irq, void *dev_id)
-{
- struct ti_qspi *qspi = dev_id;
- unsigned long flags;
-
- spin_lock_irqsave(&qspi->lock, flags);
-
- if (qspi->stat & WC)
- complete(&qspi->transfer_complete);
-
- spin_unlock_irqrestore(&qspi->lock, flags);
-
- ti_qspi_write(qspi, QSPI_WC_INT_EN, QSPI_INTR_ENABLE_SET_REG);
-
- return IRQ_HANDLED;
-}
-
static int ti_qspi_runtime_resume(struct device *dev)
{
struct ti_qspi *qspi;
@@ -472,7 +446,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
- master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD;
master->bus_num = -1;
master->flags = SPI_MASTER_HALF_DUPLEX;
@@ -499,7 +473,6 @@ static int ti_qspi_probe(struct platform_device *pdev)
return irq;
}
- spin_lock_init(&qspi->lock);
mutex_init(&qspi->list_lock);
qspi->base = devm_ioremap_resource(&pdev->dev, r);
@@ -508,8 +481,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
goto free_master;
}
- ret = devm_request_threaded_irq(&pdev->dev, irq, ti_qspi_isr,
- ti_qspi_threaded_isr, 0,
+ ret = devm_request_irq(&pdev->dev, irq, ti_qspi_isr, 0,
dev_name(&pdev->dev), qspi);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
@@ -532,7 +504,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
if (!of_property_read_u32(np, "spi-max-frequency", &max_freq))
qspi->spi_max_frequency = max_freq;
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret)
goto free_master;
@@ -547,7 +519,7 @@ static int ti_qspi_remove(struct platform_device *pdev)
{
struct ti_qspi *qspi = platform_get_drvdata(pdev);
- spi_unregister_master(qspi->master);
+ ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG);
return 0;
}
@@ -558,7 +530,7 @@ static const struct dev_pm_ops ti_qspi_pm_ops = {
static struct platform_driver ti_qspi_driver = {
.probe = ti_qspi_probe,
- .remove = ti_qspi_remove,
+ .remove = ti_qspi_remove,
.driver = {
.name = "ti,dra7xxx-qspi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index eaeeed51bbbf..446131308acb 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -506,8 +506,8 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
goto err_out;
}
- dev_dbg(&pspi->dev, "%s Transfer List not empty. "
- "Transfer Speed is set.\n", __func__);
+ dev_dbg(&pspi->dev,
+ "%s Transfer List not empty. Transfer Speed is set.\n", __func__);
spin_lock_irqsave(&data->lock, flags);
/* validate Tx/Rx buffers and Transfer length */
@@ -526,8 +526,9 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
goto err_return_spinlock;
}
- dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length"
- " valid\n", __func__);
+ dev_dbg(&pspi->dev,
+ "%s Tx/Rx buffer valid. Transfer length valid\n",
+ __func__);
/* if baud rate has been specified validate the same */
if (transfer->speed_hz > PCH_MAX_BAUDRATE)
@@ -1181,8 +1182,8 @@ static void pch_spi_process_messages(struct work_struct *pwork)
spin_lock(&data->lock);
/* check if suspend has been initiated;if yes flush queue */
if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
- dev_dbg(&data->master->dev, "%s suspend/remove initiated,"
- "flushing queue\n", __func__);
+ dev_dbg(&data->master->dev,
+ "%s suspend/remove initiated, flushing queue\n", __func__);
list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
pmsg->status = -EIO;
@@ -1410,13 +1411,13 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
/* baseaddress + address offset) */
data->io_base_addr = pci_resource_start(board_dat->pdev, 1) +
PCH_ADDRESS_SIZE * plat_dev->id;
- data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0) +
- PCH_ADDRESS_SIZE * plat_dev->id;
+ data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0);
if (!data->io_remap_addr) {
dev_err(&plat_dev->dev, "%s pci_iomap failed\n", __func__);
ret = -ENOMEM;
goto err_pci_iomap;
}
+ data->io_remap_addr += PCH_ADDRESS_SIZE * plat_dev->id;
dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n",
plat_dev->id, data->io_remap_addr);
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index 7c6d15766c72..637cce2b8bdd 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -177,7 +177,7 @@ static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
| 0x08,
TXx9_SPCR0);
- list_for_each_entry (t, &m->transfers, transfer_list) {
+ list_for_each_entry(t, &m->transfers, transfer_list) {
const void *txbuf = t->tx_buf;
void *rxbuf = t->rx_buf;
u32 data;
@@ -308,7 +308,7 @@ static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
m->actual_length = 0;
/* check each transfer's parameters */
- list_for_each_entry (t, &m->transfers, transfer_list) {
+ list_for_each_entry(t, &m->transfers, transfer_list) {
u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
u8 bits_per_word = t->bits_per_word;
@@ -406,7 +406,7 @@ static int txx9spi_probe(struct platform_device *dev)
master->num_chipselect = (u16)UINT_MAX; /* any GPIO numbers */
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&dev->dev, master);
if (ret)
goto exit;
return 0;
@@ -428,11 +428,9 @@ static int txx9spi_remove(struct platform_device *dev)
struct spi_master *master = spi_master_get(platform_get_drvdata(dev));
struct txx9spi *c = spi_master_get_devdata(master);
- spi_unregister_master(master);
destroy_workqueue(c->workqueue);
clk_disable(c->clk);
clk_put(c->clk);
- spi_master_put(master);
return 0;
}
@@ -440,6 +438,7 @@ static int txx9spi_remove(struct platform_device *dev)
MODULE_ALIAS("platform:spi_txx9");
static struct platform_driver txx9spi_driver = {
+ .probe = txx9spi_probe,
.remove = txx9spi_remove,
.driver = {
.name = "spi_txx9",
@@ -449,7 +448,7 @@ static struct platform_driver txx9spi_driver = {
static int __init txx9spi_init(void)
{
- return platform_driver_probe(&txx9spi_driver, txx9spi_probe);
+ return platform_driver_register(&txx9spi_driver);
}
subsys_initcall(txx9spi_init);
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 0bf1b2c457a1..ec3a83f52ea2 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -372,7 +372,7 @@ static int xilinx_spi_probe(struct platform_device *pdev)
master->mode_bits = SPI_CPOL | SPI_CPHA;
xspi = spi_master_get_devdata(master);
- xspi->bitbang.master = spi_master_get(master);
+ xspi->bitbang.master = master;
xspi->bitbang.chipselect = xilinx_spi_chipselect;
xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 9e039c60c068..b374c9ec6e48 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -39,6 +39,9 @@
#include <linux/ioport.h>
#include <linux/acpi.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/spi.h>
+
static void spidev_release(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
@@ -58,11 +61,13 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
}
+static DEVICE_ATTR_RO(modalias);
-static struct device_attribute spi_dev_attrs[] = {
- __ATTR_RO(modalias),
- __ATTR_NULL,
+static struct attribute *spi_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(spi_dev);
/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
* and the sysfs version makes coldplug work too.
@@ -229,7 +234,7 @@ static const struct dev_pm_ops spi_pm = {
struct bus_type spi_bus_type = {
.name = "spi",
- .dev_attrs = spi_dev_attrs,
+ .dev_groups = spi_dev_groups,
.match = spi_match_device,
.uevent = spi_uevent,
.pm = &spi_pm,
@@ -240,15 +245,27 @@ EXPORT_SYMBOL_GPL(spi_bus_type);
static int spi_drv_probe(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
+ struct spi_device *spi = to_spi_device(dev);
+ int ret;
+
+ acpi_dev_pm_attach(&spi->dev, true);
+ ret = sdrv->probe(spi);
+ if (ret)
+ acpi_dev_pm_detach(&spi->dev, true);
- return sdrv->probe(to_spi_device(dev));
+ return ret;
}
static int spi_drv_remove(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
+ struct spi_device *spi = to_spi_device(dev);
+ int ret;
+
+ ret = sdrv->remove(spi);
+ acpi_dev_pm_detach(&spi->dev, true);
- return sdrv->remove(to_spi_device(dev));
+ return ret;
}
static void spi_drv_shutdown(struct device *dev)
@@ -323,7 +340,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
if (!spi_master_get(master))
return NULL;
- spi = kzalloc(sizeof *spi, GFP_KERNEL);
+ spi = kzalloc(sizeof(*spi), GFP_KERNEL);
if (!spi) {
dev_err(dev, "cannot alloc spi_device\n");
spi_master_put(master);
@@ -340,6 +357,21 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
}
EXPORT_SYMBOL_GPL(spi_alloc_device);
+void spi_dev_set_name(struct spi_device *spi)
+{
+ if (ACPI_HANDLE(&spi->dev)) {
+ struct acpi_device *adev;
+ if (!acpi_bus_get_device(ACPI_HANDLE(&spi->dev), &adev)) {
+ dev_set_name(&spi->dev, "spi-%s",
+ dev_name(&adev->dev));
+ return;
+ }
+ }
+
+ dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
+ spi->chip_select);
+}
+
/**
* spi_add_device - Add spi_device allocated with spi_alloc_device
* @spi: spi_device to register
@@ -366,9 +398,7 @@ int spi_add_device(struct spi_device *spi)
}
/* Set the bus ID string */
- dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
- spi->chip_select);
-
+ spi_dev_set_name(spi);
/* We need to make sure there's no other device with this
* chipselect **BEFORE** we call setup(), else we'll trash
@@ -523,6 +553,95 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
/*-------------------------------------------------------------------------*/
+static void spi_set_cs(struct spi_device *spi, bool enable)
+{
+ if (spi->mode & SPI_CS_HIGH)
+ enable = !enable;
+
+ if (spi->cs_gpio >= 0)
+ gpio_set_value(spi->cs_gpio, !enable);
+ else if (spi->master->set_cs)
+ spi->master->set_cs(spi, !enable);
+}
+
+/*
+ * spi_transfer_one_message - Default implementation of transfer_one_message()
+ *
+ * This is a standard implementation of transfer_one_message() for
+ * drivers which impelment a transfer_one() operation. It provides
+ * standard handling of delays and chip select management.
+ */
+static int spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_transfer *xfer;
+ bool cur_cs = true;
+ bool keep_cs = false;
+ int ret = 0;
+
+ spi_set_cs(msg->spi, true);
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ trace_spi_transfer_start(msg, xfer);
+
+ INIT_COMPLETION(master->xfer_completion);
+
+ ret = master->transfer_one(master, msg->spi, xfer);
+ if (ret < 0) {
+ dev_err(&msg->spi->dev,
+ "SPI transfer failed: %d\n", ret);
+ goto out;
+ }
+
+ if (ret > 0)
+ wait_for_completion(&master->xfer_completion);
+
+ trace_spi_transfer_stop(msg, xfer);
+
+ if (msg->status != -EINPROGRESS)
+ goto out;
+
+ if (xfer->delay_usecs)
+ udelay(xfer->delay_usecs);
+
+ if (xfer->cs_change) {
+ if (list_is_last(&xfer->transfer_list,
+ &msg->transfers)) {
+ keep_cs = true;
+ } else {
+ cur_cs = !cur_cs;
+ spi_set_cs(msg->spi, cur_cs);
+ }
+ }
+
+ msg->actual_length += xfer->len;
+ }
+
+out:
+ if (ret != 0 || !keep_cs)
+ spi_set_cs(msg->spi, false);
+
+ if (msg->status == -EINPROGRESS)
+ msg->status = ret;
+
+ spi_finalize_current_message(master);
+
+ return ret;
+}
+
+/**
+ * spi_finalize_current_transfer - report completion of a transfer
+ *
+ * Called by SPI drivers using the core transfer_one_message()
+ * implementation to notify it that the current interrupt driven
+ * transfer has finised and the next one may be scheduled.
+ */
+void spi_finalize_current_transfer(struct spi_master *master)
+{
+ complete(&master->xfer_completion);
+}
+EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
+
/**
* spi_pump_messages - kthread work function which processes spi message queue
* @work: pointer to kthread work struct contained in the master struct
@@ -557,6 +676,7 @@ static void spi_pump_messages(struct kthread_work *work)
pm_runtime_mark_last_busy(master->dev.parent);
pm_runtime_put_autosuspend(master->dev.parent);
}
+ trace_spi_master_idle(master);
return;
}
@@ -585,6 +705,9 @@ static void spi_pump_messages(struct kthread_work *work)
}
}
+ if (!was_busy)
+ trace_spi_master_busy(master);
+
if (!was_busy && master->prepare_transfer_hardware) {
ret = master->prepare_transfer_hardware(master);
if (ret) {
@@ -597,6 +720,20 @@ static void spi_pump_messages(struct kthread_work *work)
}
}
+ trace_spi_message_start(master->cur_msg);
+
+ if (master->prepare_message) {
+ ret = master->prepare_message(master, master->cur_msg);
+ if (ret) {
+ dev_err(&master->dev,
+ "failed to prepare message: %d\n", ret);
+ master->cur_msg->status = ret;
+ spi_finalize_current_message(master);
+ return;
+ }
+ master->cur_msg_prepared = true;
+ }
+
ret = master->transfer_one_message(master, master->cur_msg);
if (ret) {
dev_err(&master->dev,
@@ -678,6 +815,7 @@ void spi_finalize_current_message(struct spi_master *master)
{
struct spi_message *mesg;
unsigned long flags;
+ int ret;
spin_lock_irqsave(&master->queue_lock, flags);
mesg = master->cur_msg;
@@ -686,9 +824,20 @@ void spi_finalize_current_message(struct spi_master *master)
queue_kthread_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
+ if (master->cur_msg_prepared && master->unprepare_message) {
+ ret = master->unprepare_message(master, mesg);
+ if (ret) {
+ dev_err(&master->dev,
+ "failed to unprepare message: %d\n", ret);
+ }
+ }
+ master->cur_msg_prepared = false;
+
mesg->state = NULL;
if (mesg->complete)
mesg->complete(mesg->context);
+
+ trace_spi_message_done(mesg);
}
EXPORT_SYMBOL_GPL(spi_finalize_current_message);
@@ -803,6 +952,8 @@ static int spi_master_initialize_queue(struct spi_master *master)
master->queued = true;
master->transfer = spi_queued_transfer;
+ if (!master->transfer_one_message)
+ master->transfer_one_message = spi_transfer_one_message;
/* Initialize and start queue */
ret = spi_init_queue(master);
@@ -838,10 +989,8 @@ static void of_register_spi_devices(struct spi_master *master)
{
struct spi_device *spi;
struct device_node *nc;
- const __be32 *prop;
- char modalias[SPI_NAME_SIZE + 4];
int rc;
- int len;
+ u32 value;
if (!master->dev.of_node)
return;
@@ -866,14 +1015,14 @@ static void of_register_spi_devices(struct spi_master *master)
}
/* Device address */
- prop = of_get_property(nc, "reg", &len);
- if (!prop || len < sizeof(*prop)) {
- dev_err(&master->dev, "%s has no 'reg' property\n",
- nc->full_name);
+ rc = of_property_read_u32(nc, "reg", &value);
+ if (rc) {
+ dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
+ nc->full_name, rc);
spi_dev_put(spi);
continue;
}
- spi->chip_select = be32_to_cpup(prop);
+ spi->chip_select = value;
/* Mode (clock phase/polarity/etc.) */
if (of_find_property(nc, "spi-cpha", NULL))
@@ -886,55 +1035,53 @@ static void of_register_spi_devices(struct spi_master *master)
spi->mode |= SPI_3WIRE;
/* Device DUAL/QUAD mode */
- prop = of_get_property(nc, "spi-tx-bus-width", &len);
- if (prop && len == sizeof(*prop)) {
- switch (be32_to_cpup(prop)) {
- case SPI_NBITS_SINGLE:
+ if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
+ switch (value) {
+ case 1:
break;
- case SPI_NBITS_DUAL:
+ case 2:
spi->mode |= SPI_TX_DUAL;
break;
- case SPI_NBITS_QUAD:
+ case 4:
spi->mode |= SPI_TX_QUAD;
break;
default:
dev_err(&master->dev,
"spi-tx-bus-width %d not supported\n",
- be32_to_cpup(prop));
+ value);
spi_dev_put(spi);
continue;
}
}
- prop = of_get_property(nc, "spi-rx-bus-width", &len);
- if (prop && len == sizeof(*prop)) {
- switch (be32_to_cpup(prop)) {
- case SPI_NBITS_SINGLE:
+ if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
+ switch (value) {
+ case 1:
break;
- case SPI_NBITS_DUAL:
+ case 2:
spi->mode |= SPI_RX_DUAL;
break;
- case SPI_NBITS_QUAD:
+ case 4:
spi->mode |= SPI_RX_QUAD;
break;
default:
dev_err(&master->dev,
"spi-rx-bus-width %d not supported\n",
- be32_to_cpup(prop));
+ value);
spi_dev_put(spi);
continue;
}
}
/* Device speed */
- prop = of_get_property(nc, "spi-max-frequency", &len);
- if (!prop || len < sizeof(*prop)) {
- dev_err(&master->dev, "%s has no 'spi-max-frequency' property\n",
- nc->full_name);
+ rc = of_property_read_u32(nc, "spi-max-frequency", &value);
+ if (rc) {
+ dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
+ nc->full_name, rc);
spi_dev_put(spi);
continue;
}
- spi->max_speed_hz = be32_to_cpup(prop);
+ spi->max_speed_hz = value;
/* IRQ */
spi->irq = irq_of_parse_and_map(nc, 0);
@@ -944,9 +1091,7 @@ static void of_register_spi_devices(struct spi_master *master)
spi->dev.of_node = nc;
/* Register the new device */
- snprintf(modalias, sizeof(modalias), "%s%s", SPI_MODULE_PREFIX,
- spi->modalias);
- request_module(modalias);
+ request_module("%s%s", SPI_MODULE_PREFIX, spi->modalias);
rc = spi_add_device(spi);
if (rc) {
dev_err(&master->dev, "spi_device register error %s\n",
@@ -1025,8 +1170,10 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
return AE_OK;
}
- strlcpy(spi->modalias, dev_name(&adev->dev), sizeof(spi->modalias));
+ adev->power.flags.ignore_parent = true;
+ strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
if (spi_add_device(spi)) {
+ adev->power.flags.ignore_parent = false;
dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
dev_name(&adev->dev));
spi_dev_put(spi);
@@ -1097,7 +1244,7 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
if (!dev)
return NULL;
- master = kzalloc(size + sizeof *master, GFP_KERNEL);
+ master = kzalloc(size + sizeof(*master), GFP_KERNEL);
if (!master)
return NULL;
@@ -1122,7 +1269,7 @@ static int of_spi_register_master(struct spi_master *master)
return 0;
nb = of_gpio_named_count(np, "cs-gpios");
- master->num_chipselect = max(nb, (int)master->num_chipselect);
+ master->num_chipselect = max_t(int, nb, master->num_chipselect);
/* Return error only for an incorrectly formed cs-gpios property */
if (nb == 0 || nb == -ENOENT)
@@ -1209,6 +1356,7 @@ int spi_register_master(struct spi_master *master)
spin_lock_init(&master->bus_lock_spinlock);
mutex_init(&master->bus_lock_mutex);
master->bus_lock_flag = 0;
+ init_completion(&master->xfer_completion);
/* register the device, then userspace will see it.
* registration fails if the bus ID is in use.
@@ -1245,6 +1393,41 @@ done:
}
EXPORT_SYMBOL_GPL(spi_register_master);
+static void devm_spi_unregister(struct device *dev, void *res)
+{
+ spi_unregister_master(*(struct spi_master **)res);
+}
+
+/**
+ * dev_spi_register_master - register managed SPI master controller
+ * @dev: device managing SPI master
+ * @master: initialized master, originally from spi_alloc_master()
+ * Context: can sleep
+ *
+ * Register a SPI device as with spi_register_master() which will
+ * automatically be unregister
+ */
+int devm_spi_register_master(struct device *dev, struct spi_master *master)
+{
+ struct spi_master **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = spi_register_master(master);
+ if (ret != 0) {
+ *ptr = master;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_spi_register_master);
+
static int __unregister(struct device *dev, void *null)
{
spi_unregister_device(to_spi_device(dev));
@@ -1402,8 +1585,7 @@ int spi_setup(struct spi_device *spi)
if (spi->master->setup)
status = spi->master->setup(spi);
- dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s"
- "%u bits/w, %u Hz max --> %d\n",
+ dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
@@ -1421,6 +1603,10 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
struct spi_master *master = spi->master;
struct spi_transfer *xfer;
+ message->spi = spi;
+
+ trace_spi_message_submit(message);
+
if (list_empty(&message->transfers))
return -EINVAL;
if (!message->complete)
@@ -1520,7 +1706,6 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
}
}
- message->spi = spi;
message->status = -EINPROGRESS;
return master->transfer(spi, message);
}
@@ -1762,7 +1947,7 @@ int spi_bus_unlock(struct spi_master *master)
EXPORT_SYMBOL_GPL(spi_bus_unlock);
/* portable code must never pass more than 32 bytes */
-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
+#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
static u8 *buf;
@@ -1811,7 +1996,7 @@ int spi_write_then_read(struct spi_device *spi,
}
spi_message_init(&message);
- memset(x, 0, sizeof x);
+ memset(x, 0, sizeof(x));
if (n_tx) {
x[0].len = n_tx;
spi_message_add_tail(&x[0], &message);
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index ca5bcfe874d0..d7c6e36021e8 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -37,7 +37,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/spidev.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/*
@@ -206,9 +206,9 @@ spidev_write(struct file *filp, const char __user *buf,
mutex_lock(&spidev->buf_lock);
missing = copy_from_user(spidev->buffer, buf, count);
- if (missing == 0) {
+ if (missing == 0)
status = spidev_sync_write(spidev, count);
- } else
+ else
status = -EFAULT;
mutex_unlock(&spidev->buf_lock);
@@ -629,7 +629,6 @@ static int spidev_remove(struct spi_device *spi)
/* make sure ops on existing fds can abort cleanly */
spin_lock_irq(&spidev->spi_lock);
spidev->spi = NULL;
- spi_set_drvdata(spi, NULL);
spin_unlock_irq(&spidev->spi_lock);
/* prevent new opens */
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index e55ddf7cd7c2..32a811d11c25 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -374,7 +374,8 @@ static ssize_t \
attrib##_show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
return sprintf(buf, format_string, dev_to_ssb_dev(dev)->field); \
-}
+} \
+static DEVICE_ATTR_RO(attrib);
ssb_config_attr(core_num, core_index, "%u\n")
ssb_config_attr(coreid, id.coreid, "0x%04x\n")
@@ -387,16 +388,18 @@ name_show(struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%s\n",
ssb_core_name(dev_to_ssb_dev(dev)->id.coreid));
}
-
-static struct device_attribute ssb_device_attrs[] = {
- __ATTR_RO(name),
- __ATTR_RO(core_num),
- __ATTR_RO(coreid),
- __ATTR_RO(vendor),
- __ATTR_RO(revision),
- __ATTR_RO(irq),
- __ATTR_NULL,
+static DEVICE_ATTR_RO(name);
+
+static struct attribute *ssb_device_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_core_num.attr,
+ &dev_attr_coreid.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_revision.attr,
+ &dev_attr_irq.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(ssb_device);
static struct bus_type ssb_bustype = {
.name = "ssb",
@@ -407,7 +410,7 @@ static struct bus_type ssb_bustype = {
.suspend = ssb_device_suspend,
.resume = ssb_device_resume,
.uevent = ssb_device_uevent,
- .dev_attrs = ssb_device_attrs,
+ .dev_groups = ssb_device_groups,
};
static void ssb_buses_lock(void)
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index f91bc1fdd895..639ba96adb36 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -1960,6 +1960,7 @@ cntrlEnd:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
+ memset(&DevInfo, 0, sizeof(DevInfo));
DevInfo.MaxRDMBufferSize = BUFFER_4K;
DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
DevInfo.u32RxAlignmentCorrection = 0;
diff --git a/drivers/staging/dwc2/platform.c b/drivers/staging/dwc2/platform.c
index 44cce2fa6361..1d68c49afabe 100644
--- a/drivers/staging/dwc2/platform.c
+++ b/drivers/staging/dwc2/platform.c
@@ -100,8 +100,9 @@ static int dwc2_driver_probe(struct platform_device *dev)
*/
if (!dev->dev.dma_mask)
dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
- if (!dev->dev.coherent_dma_mask)
- dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ retval = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ return retval;
irq = platform_get_irq(dev, 0);
if (irq < 0) {
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index f73e58f5ef8d..3f78439f1b3a 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -3605,17 +3605,10 @@ static int et131x_pci_init(struct et131x_adapter *adapter,
goto err_out;
}
- /* Let's set up the PORT LOGIC Register. First we need to know what
- * the max_payload_size is
- */
- if (pcie_capability_read_word(pdev, PCI_EXP_DEVCAP, &max_payload)) {
- dev_err(&pdev->dev,
- "Could not read PCI config space for Max Payload Size\n");
- goto err_out;
- }
+ /* Let's set up the PORT LOGIC Register. */
/* Program the Ack/Nak latency and replay timers */
- max_payload &= 0x07;
+ max_payload = pdev->pcie_mpss;
if (max_payload < 2) {
static const u16 acknak[2] = { 0x76, 0xD0 };
@@ -3645,8 +3638,7 @@ static int et131x_pci_init(struct et131x_adapter *adapter,
}
/* Change the max read size to 2k */
- if (pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_READRQ, 0x4 << 12)) {
+ if (pcie_set_readrq(pdev, 2048)) {
dev_err(&pdev->dev,
"Couldn't change PCI config space for Max read size\n");
goto err_out;
@@ -4797,21 +4789,8 @@ static int et131x_pci_setup(struct pci_dev *pdev,
pci_set_master(pdev);
/* Check the DMA addressing support of this device */
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (rc < 0) {
- dev_err(&pdev->dev,
- "Unable to obtain 64 bit DMA for consistent allocations\n");
- goto err_release_res;
- }
- } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc < 0) {
- dev_err(&pdev->dev,
- "Unable to obtain 32 bit DMA for consistent allocations\n");
- goto err_release_res;
- }
- } else {
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
dev_err(&pdev->dev, "No usable DMA addressing method\n");
rc = -EIO;
goto err_release_res;
diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h
index 9221a74efd18..93c7299e8353 100644
--- a/drivers/staging/iio/adc/ad7606.h
+++ b/drivers/staging/iio/adc/ad7606.h
@@ -42,7 +42,7 @@ struct ad7606_platform_data {
/**
* struct ad7606_chip_info - chip specifc information
- * @name: indentification string for chip
+ * @name: identification string for chip
* @int_vref_mv: the internal reference voltage
* @channels: channel specification
* @num_channels: number of channels
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index 74025fbae679..abfc8bd1794d 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -86,7 +86,7 @@ static int ade7753_spi_read_reg_16(struct device *dev,
struct ade7753_state *st = iio_priv(indio_dev);
ssize_t ret;
- ret = spi_w8r16(st->us, ADE7753_READ_REG(reg_address));
+ ret = spi_w8r16be(st->us, ADE7753_READ_REG(reg_address));
if (ret < 0) {
dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
reg_address);
@@ -94,7 +94,6 @@ static int ade7753_spi_read_reg_16(struct device *dev,
}
*val = ret;
- *val = be16_to_cpup(val);
return 0;
}
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index f649ebe55a04..3d1c02cd6538 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -86,7 +86,7 @@ static int ade7754_spi_read_reg_16(struct device *dev,
struct ade7754_state *st = iio_priv(indio_dev);
int ret;
- ret = spi_w8r16(st->us, ADE7754_READ_REG(reg_address));
+ ret = spi_w8r16be(st->us, ADE7754_READ_REG(reg_address));
if (ret < 0) {
dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
reg_address);
@@ -94,7 +94,6 @@ static int ade7754_spi_read_reg_16(struct device *dev,
}
*val = ret;
- *val = be16_to_cpup(val);
return 0;
}
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index d214ac4932cb..7467e51fd424 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -86,7 +86,7 @@ static int ade7759_spi_read_reg_16(struct device *dev,
struct ade7759_state *st = iio_priv(indio_dev);
int ret;
- ret = spi_w8r16(st->us, ADE7759_READ_REG(reg_address));
+ ret = spi_w8r16be(st->us, ADE7759_READ_REG(reg_address));
if (ret < 0) {
dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
reg_address);
@@ -94,7 +94,6 @@ static int ade7759_spi_read_reg_16(struct device *dev,
}
*val = ret;
- *val = be16_to_cpup(val);
return 0;
}
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig
index 394254f7d6b5..5032ff7c2259 100644
--- a/drivers/staging/imx-drm/Kconfig
+++ b/drivers/staging/imx-drm/Kconfig
@@ -1,6 +1,7 @@
config DRM_IMX
tristate "DRM Support for Freescale i.MX"
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select VIDEOMODE_HELPERS
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index a2e52a0c53c9..ad135d3c3281 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -396,14 +396,14 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
/*
* enable drm irq mode.
- * - with irq_enabled = 1, we can use the vblank feature.
+ * - with irq_enabled = true, we can use the vblank feature.
*
* P.S. note that we wouldn't use drm irq handler but
* just specific driver own one instead because
* drm framework supports only one irq handler and
* drivers can well take care of their interrupts
*/
- drm->irq_enabled = 1;
+ drm->irq_enabled = true;
drm_mode_config_init(drm);
imx_drm_mode_config_init(drm);
@@ -423,11 +423,11 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
goto err_init;
/*
- * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+ * with vblank_disable_allowed = true, vblank interrupt will be disabled
* by drm timer once a current process gives up ownership of
* vblank event.(after drm_vblank_put function is called)
*/
- imxdrm->drm->vblank_disable_allowed = 1;
+ imxdrm->drm->vblank_disable_allowed = true;
if (!imx_drm_device_get())
ret = -EINVAL;
@@ -800,6 +800,12 @@ static struct drm_driver imx_drm_driver = {
static int imx_drm_platform_probe(struct platform_device *pdev)
{
+ int ret;
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
imx_drm_device->dev = &pdev->dev;
return drm_platform_init(&imx_drm_driver, pdev);
@@ -842,8 +848,6 @@ static int __init imx_drm_init(void)
goto err_pdev;
}
- imx_drm_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32),
-
ret = platform_driver_register(&imx_drm_pdrv);
if (ret)
goto err_pdrv;
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index 6fd37a7453e9..9e73e8d8c9aa 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -523,7 +523,9 @@ static int ipu_drm_probe(struct platform_device *pdev)
if (!pdata)
return -EINVAL;
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
ipu_crtc = devm_kzalloc(&pdev->dev, sizeof(*ipu_crtc), GFP_KERNEL);
if (!ipu_crtc)
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index 2340458b8a04..3488bb6c44a7 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -194,10 +194,10 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
struct cl_object *obj = ll_i2info(inode)->lli_clob;
pgoff_t offset;
int ret;
- int i;
int rw;
obd_count page_count = 0;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
struct bio *bio;
ssize_t bytes;
@@ -220,15 +220,15 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
for (bio = head; bio != NULL; bio = bio->bi_next) {
LASSERT(rw == bio->bi_rw);
- offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset;
- bio_for_each_segment(bvec, bio, i) {
- BUG_ON(bvec->bv_offset != 0);
- BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE);
+ offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
+ bio_for_each_segment(bvec, bio, iter) {
+ BUG_ON(bvec.bv_offset != 0);
+ BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE);
- pages[page_count] = bvec->bv_page;
+ pages[page_count] = bvec.bv_page;
offsets[page_count] = offset;
page_count++;
- offset += bvec->bv_len;
+ offset += bvec.bv_len;
}
LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
}
@@ -313,7 +313,8 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
bio = &lo->lo_bio;
while (*bio && (*bio)->bi_rw == rw) {
CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
- (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size,
+ (unsigned long long)(*bio)->bi_iter.bi_sector,
+ (*bio)->bi_iter.bi_size,
page_count, (*bio)->bi_vcnt);
if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
break;
@@ -348,7 +349,8 @@ loop_make_request(struct request_queue *q, struct bio *old_bio)
goto err;
CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
- (unsigned long long)old_bio->bi_sector, old_bio->bi_size);
+ (unsigned long long)old_bio->bi_iter.bi_sector,
+ old_bio->bi_iter.bi_size);
spin_lock_irq(&lo->lo_lock);
inactive = (lo->lo_state != LLOOP_BOUND);
@@ -368,7 +370,7 @@ loop_make_request(struct request_queue *q, struct bio *old_bio)
loop_add_bio(lo, old_bio);
LL_MRF_RETURN(0);
err:
- cfs_bio_io_error(old_bio, old_bio->bi_size);
+ cfs_bio_io_error(old_bio, old_bio->bi_iter.bi_size);
LL_MRF_RETURN(0);
}
@@ -380,7 +382,7 @@ static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
while (bio) {
struct bio *tmp = bio->bi_next;
bio->bi_next = NULL;
- cfs_bio_endio(bio, bio->bi_size, ret);
+ cfs_bio_endio(bio, bio->bi_iter.bi_size, ret);
bio = tmp;
}
}
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index 90d6ac469355..081407be33ab 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -901,10 +901,7 @@ dt3155_probe(struct pci_dev *pdev, const struct pci_device_id *id)
int err;
struct dt3155_priv *pd;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err)
- return -ENODEV;
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err)
return -ENODEV;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
diff --git a/drivers/staging/media/lirc/TODO b/drivers/staging/media/lirc/TODO
index b6cb593f55c6..cbea5d84fed3 100644
--- a/drivers/staging/media/lirc/TODO
+++ b/drivers/staging/media/lirc/TODO
@@ -2,6 +2,11 @@
(see drivers/media/IR/mceusb.c vs. lirc_mceusb.c in lirc cvs for an
example of a previously completed port).
+- lirc_bt829 uses registers on a Mach64 VT, which has a separate kernel
+ framebuffer driver (atyfb) and userland X driver (mach64). It can't
+ simply be converted to a normal PCI driver, but ideally it should be
+ coordinated with the other drivers.
+
Please send patches to:
Jarod Wilson <jarod@wilsonet.com>
Greg Kroah-Hartman <greg@kroah.com>
diff --git a/drivers/staging/media/lirc/lirc_bt829.c b/drivers/staging/media/lirc/lirc_bt829.c
index fa31ee7dd6a9..623f10e2cdaa 100644
--- a/drivers/staging/media/lirc/lirc_bt829.c
+++ b/drivers/staging/media/lirc/lirc_bt829.c
@@ -63,7 +63,7 @@ static bool debug;
} while (0)
static int atir_minor;
-static unsigned long pci_addr_phys;
+static phys_addr_t pci_addr_phys;
static unsigned char *pci_addr_lin;
static struct lirc_driver atir_driver;
@@ -78,11 +78,11 @@ static struct pci_dev *do_pci_probe(void)
pci_addr_phys = 0;
if (my_dev->resource[0].flags & IORESOURCE_MEM) {
pci_addr_phys = my_dev->resource[0].start;
- pr_info("memory at 0x%08X\n",
- (unsigned int)pci_addr_phys);
+ pr_info("memory at %pa\n", &pci_addr_phys);
}
if (pci_addr_phys == 0) {
pr_err("no memory resource ?\n");
+ pci_dev_put(my_dev);
return NULL;
}
} else {
@@ -120,13 +120,20 @@ static void atir_set_use_dec(void *data)
int init_module(void)
{
struct pci_dev *pdev;
+ int rc;
pdev = do_pci_probe();
if (pdev == NULL)
return -ENODEV;
- if (!atir_init_start())
- return -ENODEV;
+ rc = pci_enable_device(pdev);
+ if (rc)
+ goto err_put_dev;
+
+ if (!atir_init_start()) {
+ rc = -ENODEV;
+ goto err_disable;
+ }
strcpy(atir_driver.name, "ATIR");
atir_driver.minor = -1;
@@ -142,17 +149,31 @@ int init_module(void)
atir_minor = lirc_register_driver(&atir_driver);
if (atir_minor < 0) {
pr_err("failed to register driver!\n");
- return atir_minor;
+ rc = atir_minor;
+ goto err_unmap;
}
dprintk("driver is registered on minor %d\n", atir_minor);
return 0;
+
+err_unmap:
+ iounmap(pci_addr_lin);
+err_disable:
+ pci_disable_device(pdev);
+err_put_dev:
+ pci_dev_put(pdev);
+ return rc;
}
void cleanup_module(void)
{
+ struct pci_dev *pdev = to_pci_dev(atir_driver.dev);
+
lirc_unregister_driver(atir_minor);
+ iounmap(pci_addr_lin);
+ pci_disable_device(pdev);
+ pci_dev_put(pdev);
}
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index af08e677b60f..7b3be2346b4b 100644
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
@@ -707,7 +707,8 @@ static irqreturn_t irq_handler(int i, void *blah)
pr_warn("ignoring spike: %d %d %lx %lx %lx %lx\n",
dcd, sense,
tv.tv_sec, lasttv.tv_sec,
- tv.tv_usec, lasttv.tv_usec);
+ (unsigned long)tv.tv_usec,
+ (unsigned long)lasttv.tv_usec);
continue;
}
@@ -719,7 +720,8 @@ static irqreturn_t irq_handler(int i, void *blah)
pr_warn("%d %d %lx %lx %lx %lx\n",
dcd, sense,
tv.tv_sec, lasttv.tv_sec,
- tv.tv_usec, lasttv.tv_usec);
+ (unsigned long)tv.tv_usec,
+ (unsigned long)lasttv.tv_usec);
data = PULSE_MASK;
} else if (deltv > 15) {
data = PULSE_MASK; /* really long time */
@@ -728,7 +730,8 @@ static irqreturn_t irq_handler(int i, void *blah)
pr_warn("AIEEEE: %d %d %lx %lx %lx %lx\n",
dcd, sense,
tv.tv_sec, lasttv.tv_sec,
- tv.tv_usec, lasttv.tv_usec);
+ (unsigned long)tv.tv_usec,
+ (unsigned long)lasttv.tv_usec);
/*
* detecting pulse while this
* MUST be a space!
diff --git a/drivers/staging/media/msi3101/Kconfig b/drivers/staging/media/msi3101/Kconfig
index b94a95a597d6..0c349c8595e4 100644
--- a/drivers/staging/media/msi3101/Kconfig
+++ b/drivers/staging/media/msi3101/Kconfig
@@ -1,3 +1,5 @@
config USB_MSI3101
tristate "Mirics MSi3101 SDR Dongle"
depends on USB && VIDEO_DEV && VIDEO_V4L2
+ select VIDEOBUF2_CORE
+ select VIDEOBUF2_VMALLOC
diff --git a/drivers/staging/media/msi3101/sdr-msi3101.c b/drivers/staging/media/msi3101/sdr-msi3101.c
index 24c7b70a6cbf..4c3bf776bb20 100644
--- a/drivers/staging/media/msi3101/sdr-msi3101.c
+++ b/drivers/staging/media/msi3101/sdr-msi3101.c
@@ -1131,7 +1131,13 @@ static int msi3101_queue_setup(struct vb2_queue *vq,
/* Absolute min and max number of buffers available for mmap() */
*nbuffers = 32;
*nplanes = 1;
- sizes[0] = PAGE_ALIGN(3 * 3072); /* 3 * 768 * 4 */
+ /*
+ * 3, wMaxPacketSize 3x 1024 bytes
+ * 504, max IQ sample pairs per 1024 frame
+ * 2, two samples, I and Q
+ * 4, 32-bit float
+ */
+ sizes[0] = PAGE_ALIGN(3 * 504 * 2 * 4); /* = 12096 */
dev_dbg(&s->udev->dev, "%s: nbuffers=%d sizes[0]=%d\n",
__func__, *nbuffers, sizes[0]);
return 0;
@@ -1657,7 +1663,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
f->frequency * 625UL / 10UL);
}
-const struct v4l2_ioctl_ops msi3101_ioctl_ops = {
+static const struct v4l2_ioctl_ops msi3101_ioctl_ops = {
.vidioc_querycap = msi3101_querycap,
.vidioc_enum_input = msi3101_enum_input,
diff --git a/drivers/staging/media/solo6x10/solo6x10-disp.c b/drivers/staging/media/solo6x10/solo6x10-disp.c
index 32d9953bc36e..145295a5db72 100644
--- a/drivers/staging/media/solo6x10/solo6x10-disp.c
+++ b/drivers/staging/media/solo6x10/solo6x10-disp.c
@@ -176,18 +176,27 @@ static void solo_vout_config(struct solo_dev *solo_dev)
static int solo_dma_vin_region(struct solo_dev *solo_dev, u32 off,
u16 val, int reg_size)
{
- u16 buf[64];
- int i;
- int ret = 0;
+ u16 *buf;
+ const int n = 64, size = n * sizeof(*buf);
+ int i, ret = 0;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- for (i = 0; i < sizeof(buf) >> 1; i++)
+ for (i = 0; i < n; i++)
buf[i] = cpu_to_le16(val);
- for (i = 0; i < reg_size; i += sizeof(buf))
- ret |= solo_p2m_dma(solo_dev, 1, buf,
- SOLO_MOTION_EXT_ADDR(solo_dev) + off + i,
- sizeof(buf), 0, 0);
+ for (i = 0; i < reg_size; i += size) {
+ ret = solo_p2m_dma(solo_dev, 1, buf,
+ SOLO_MOTION_EXT_ADDR(solo_dev) + off + i,
+ size, 0, 0);
+
+ if (ret)
+ break;
+ }
+ kfree(buf);
return ret;
}
diff --git a/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c b/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
index a4c589604b02..d582c5b84c14 100644
--- a/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
@@ -95,38 +95,11 @@ static unsigned char vop_6110_pal_cif[] = {
0x01, 0x68, 0xce, 0x32, 0x28, 0x00, 0x00, 0x00,
};
-struct vop_header {
- /* VE_STATUS0 */
- u32 mpeg_size:20, sad_motion_flag:1, video_motion_flag:1, vop_type:2,
- channel:5, source_fl:1, interlace:1, progressive:1;
-
- /* VE_STATUS1 */
- u32 vsize:8, hsize:8, last_queue:4, nop0:8, scale:4;
-
- /* VE_STATUS2 */
- u32 mpeg_off;
-
- /* VE_STATUS3 */
- u32 jpeg_off;
-
- /* VE_STATUS4 */
- u32 jpeg_size:20, interval:10, nop1:2;
-
- /* VE_STATUS5/6 */
- u32 sec, usec;
-
- /* VE_STATUS7/8/9 */
- u32 nop2[3];
-
- /* VE_STATUS10 */
- u32 mpeg_size_alt:20, nop3:12;
-
- u32 end_nops[5];
-} __packed;
+typedef __le32 vop_header[16];
struct solo_enc_buf {
enum solo_enc_types type;
- struct vop_header *vh;
+ const vop_header *vh;
int motion;
};
@@ -346,7 +319,7 @@ static int enc_get_mpeg_dma(struct solo_dev *solo_dev, dma_addr_t dma,
/* Build a descriptor queue out of an SG list and send it to the P2M for
* processing. */
static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip,
- struct vb2_dma_sg_desc *vbuf, int off, int size,
+ struct sg_table *vbuf, int off, int size,
unsigned int base, unsigned int base_size)
{
struct solo_dev *solo_dev = solo_enc->solo_dev;
@@ -359,7 +332,7 @@ static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip,
solo_enc->desc_count = 1;
- for_each_sg(vbuf->sglist, sg, vbuf->num_pages, i) {
+ for_each_sg(vbuf->sgl, sg, vbuf->nents, i) {
struct solo_p2m_desc *desc;
dma_addr_t dma;
int len;
@@ -430,84 +403,145 @@ static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip,
solo_enc->desc_count - 1);
}
+/* Extract values from VOP header - VE_STATUSxx */
+static inline int vop_interlaced(const vop_header *vh)
+{
+ return (__le32_to_cpu((*vh)[0]) >> 30) & 1;
+}
+
+static inline u8 vop_channel(const vop_header *vh)
+{
+ return (__le32_to_cpu((*vh)[0]) >> 24) & 0x1F;
+}
+
+static inline u8 vop_type(const vop_header *vh)
+{
+ return (__le32_to_cpu((*vh)[0]) >> 22) & 3;
+}
+
+static inline u32 vop_mpeg_size(const vop_header *vh)
+{
+ return __le32_to_cpu((*vh)[0]) & 0xFFFFF;
+}
+
+static inline u8 vop_hsize(const vop_header *vh)
+{
+ return (__le32_to_cpu((*vh)[1]) >> 8) & 0xFF;
+}
+
+static inline u8 vop_vsize(const vop_header *vh)
+{
+ return __le32_to_cpu((*vh)[1]) & 0xFF;
+}
+
+static inline u32 vop_mpeg_offset(const vop_header *vh)
+{
+ return __le32_to_cpu((*vh)[2]);
+}
+
+static inline u32 vop_jpeg_offset(const vop_header *vh)
+{
+ return __le32_to_cpu((*vh)[3]);
+}
+
+static inline u32 vop_jpeg_size(const vop_header *vh)
+{
+ return __le32_to_cpu((*vh)[4]) & 0xFFFFF;
+}
+
+static inline u32 vop_sec(const vop_header *vh)
+{
+ return __le32_to_cpu((*vh)[5]);
+}
+
+static inline u32 vop_usec(const vop_header *vh)
+{
+ return __le32_to_cpu((*vh)[6]);
+}
+
static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
- struct vb2_buffer *vb, struct vop_header *vh)
+ struct vb2_buffer *vb, const vop_header *vh)
{
struct solo_dev *solo_dev = solo_enc->solo_dev;
- struct vb2_dma_sg_desc *vbuf = vb2_dma_sg_plane_desc(vb, 0);
+ struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
int frame_size;
int ret;
vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
- if (vb2_plane_size(vb, 0) < vh->jpeg_size + solo_enc->jpeg_len)
+ if (vb2_plane_size(vb, 0) < vop_jpeg_size(vh) + solo_enc->jpeg_len)
return -EIO;
- sg_copy_from_buffer(vbuf->sglist, vbuf->num_pages,
- solo_enc->jpeg_header,
- solo_enc->jpeg_len);
-
- frame_size = (vh->jpeg_size + solo_enc->jpeg_len + (DMA_ALIGN - 1))
+ frame_size = (vop_jpeg_size(vh) + solo_enc->jpeg_len + (DMA_ALIGN - 1))
& ~(DMA_ALIGN - 1);
- vb2_set_plane_payload(vb, 0, vh->jpeg_size + solo_enc->jpeg_len);
+ vb2_set_plane_payload(vb, 0, vop_jpeg_size(vh) + solo_enc->jpeg_len);
- dma_map_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages,
+ /* may discard all previous data in vbuf->sgl */
+ dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
DMA_FROM_DEVICE);
- ret = solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf, vh->jpeg_off,
- frame_size, SOLO_JPEG_EXT_ADDR(solo_dev),
- SOLO_JPEG_EXT_SIZE(solo_dev));
- dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages,
+ ret = solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf,
+ vop_jpeg_offset(vh) - SOLO_JPEG_EXT_ADDR(solo_dev),
+ frame_size, SOLO_JPEG_EXT_ADDR(solo_dev),
+ SOLO_JPEG_EXT_SIZE(solo_dev));
+ dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
DMA_FROM_DEVICE);
+
+ /* add the header only after dma_unmap_sg() */
+ sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
+ solo_enc->jpeg_header, solo_enc->jpeg_len);
+
return ret;
}
static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
- struct vb2_buffer *vb, struct vop_header *vh)
+ struct vb2_buffer *vb, const vop_header *vh)
{
struct solo_dev *solo_dev = solo_enc->solo_dev;
- struct vb2_dma_sg_desc *vbuf = vb2_dma_sg_plane_desc(vb, 0);
+ struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
int frame_off, frame_size;
int skip = 0;
int ret;
- if (vb2_plane_size(vb, 0) < vh->mpeg_size)
+ if (vb2_plane_size(vb, 0) < vop_mpeg_size(vh))
return -EIO;
/* If this is a key frame, add extra header */
- if (!vh->vop_type) {
- sg_copy_from_buffer(vbuf->sglist, vbuf->num_pages,
- solo_enc->vop,
- solo_enc->vop_len);
-
+ vb->v4l2_buf.flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME);
+ if (!vop_type(vh)) {
skip = solo_enc->vop_len;
-
vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
- vb2_set_plane_payload(vb, 0, vh->mpeg_size + solo_enc->vop_len);
+ vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh) + solo_enc->vop_len);
} else {
vb->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
- vb2_set_plane_payload(vb, 0, vh->mpeg_size);
+ vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh));
}
/* Now get the actual mpeg payload */
- frame_off = (vh->mpeg_off + sizeof(*vh))
+ frame_off = (vop_mpeg_offset(vh) - SOLO_MP4E_EXT_ADDR(solo_dev) + sizeof(*vh))
% SOLO_MP4E_EXT_SIZE(solo_dev);
- frame_size = (vh->mpeg_size + skip + (DMA_ALIGN - 1))
+ frame_size = (vop_mpeg_size(vh) + skip + (DMA_ALIGN - 1))
& ~(DMA_ALIGN - 1);
- dma_map_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages,
+ /* may discard all previous data in vbuf->sgl */
+ dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
DMA_FROM_DEVICE);
ret = solo_send_desc(solo_enc, skip, vbuf, frame_off, frame_size,
SOLO_MP4E_EXT_ADDR(solo_dev),
SOLO_MP4E_EXT_SIZE(solo_dev));
- dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages,
+ dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
DMA_FROM_DEVICE);
+
+ /* add the header only after dma_unmap_sg() */
+ if (!vop_type(vh))
+ sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
+ solo_enc->vop, solo_enc->vop_len);
return ret;
}
static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc,
struct vb2_buffer *vb, struct solo_enc_buf *enc_buf)
{
- struct vop_header *vh = enc_buf->vh;
+ const vop_header *vh = enc_buf->vh;
int ret;
/* Check for motion flags */
@@ -531,8 +565,8 @@ static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc,
if (!ret) {
vb->v4l2_buf.sequence = solo_enc->sequence++;
- vb->v4l2_buf.timestamp.tv_sec = vh->sec;
- vb->v4l2_buf.timestamp.tv_usec = vh->usec;
+ vb->v4l2_buf.timestamp.tv_sec = vop_sec(vh);
+ vb->v4l2_buf.timestamp.tv_usec = vop_usec(vh);
}
vb2_buffer_done(vb, ret ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
@@ -605,15 +639,13 @@ static void solo_handle_ring(struct solo_dev *solo_dev)
/* FAIL... */
if (enc_get_mpeg_dma(solo_dev, solo_dev->vh_dma, off,
- sizeof(struct vop_header)))
+ sizeof(vop_header)))
continue;
- enc_buf.vh = (struct vop_header *)solo_dev->vh_buf;
- enc_buf.vh->mpeg_off -= SOLO_MP4E_EXT_ADDR(solo_dev);
- enc_buf.vh->jpeg_off -= SOLO_JPEG_EXT_ADDR(solo_dev);
+ enc_buf.vh = solo_dev->vh_buf;
/* Sanity check */
- if (enc_buf.vh->mpeg_off != off)
+ if (vop_mpeg_offset(enc_buf.vh) != SOLO_MP4E_EXT_ADDR(solo_dev) + off)
continue;
if (solo_motion_detected(solo_enc))
@@ -1329,7 +1361,7 @@ int solo_enc_v4l2_init(struct solo_dev *solo_dev, unsigned nr)
init_waitqueue_head(&solo_dev->ring_thread_wait);
- solo_dev->vh_size = sizeof(struct vop_header);
+ solo_dev->vh_size = sizeof(vop_header);
solo_dev->vh_buf = pci_alloc_consistent(solo_dev->pdev,
solo_dev->vh_size,
&solo_dev->vh_dma);
diff --git a/drivers/staging/media/solo6x10/solo6x10.h b/drivers/staging/media/solo6x10/solo6x10.h
index 6f91d2e34b2a..f1bbb8cb74e6 100644
--- a/drivers/staging/media/solo6x10/solo6x10.h
+++ b/drivers/staging/media/solo6x10/solo6x10.h
@@ -94,7 +94,6 @@
#define SOLO_ENC_MODE_HD1 1
#define SOLO_ENC_MODE_D1 9
-#define SOLO_DEFAULT_GOP 30
#define SOLO_DEFAULT_QP 3
#ifndef V4L2_BUF_FLAG_MOTION_ON
diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
index 6ccb64fb0786..6ce0af9977d8 100644
--- a/drivers/staging/ozwpan/ozcdev.c
+++ b/drivers/staging/ozwpan/ozcdev.c
@@ -155,6 +155,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
struct oz_app_hdr *app_hdr;
struct oz_serial_ctx *ctx;
+ if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
+ return -EINVAL;
+
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd)
diff --git a/drivers/staging/sb105x/sb_pci_mp.c b/drivers/staging/sb105x/sb_pci_mp.c
index 23db32f07fd5..a10cdb17038b 100644
--- a/drivers/staging/sb105x/sb_pci_mp.c
+++ b/drivers/staging/sb105x/sb_pci_mp.c
@@ -1063,7 +1063,7 @@ static int mp_wait_modem_status(struct sb_uart_state *state, unsigned long arg)
static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt)
{
- struct serial_icounter_struct icount;
+ struct serial_icounter_struct icount = {};
struct sb_uart_icount cnow;
struct sb_uart_port *port = state->port;
diff --git a/drivers/staging/wlags49_h2/wl_priv.c b/drivers/staging/wlags49_h2/wl_priv.c
index c97e0e154d28..7e10dcdc3090 100644
--- a/drivers/staging/wlags49_h2/wl_priv.c
+++ b/drivers/staging/wlags49_h2/wl_priv.c
@@ -570,6 +570,7 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
ltv_t *pLtv;
bool_t ltvAllocated = FALSE;
ENCSTRCT sEncryption;
+ size_t len;
#ifdef USE_WDS
hcf_16 hcfPort = HCF_PORT_0;
@@ -686,7 +687,8 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
break;
case CFG_CNF_OWN_NAME:
memset(lp->StationName, 0, sizeof(lp->StationName));
- memcpy((void *)lp->StationName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]);
+ len = min_t(size_t, pLtv->u.u16[0], sizeof(lp->StationName));
+ strlcpy(lp->StationName, &pLtv->u.u8[2], len);
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break;
case CFG_CNF_LOAD_BALANCING:
@@ -1783,6 +1785,7 @@ int wvlan_set_station_nickname(struct net_device *dev,
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
+ size_t len;
int ret = 0;
/*------------------------------------------------------------------------*/
@@ -1793,8 +1796,8 @@ int wvlan_set_station_nickname(struct net_device *dev,
wl_lock(lp, &flags);
memset(lp->StationName, 0, sizeof(lp->StationName));
-
- memcpy(lp->StationName, extra, wrqu->data.length);
+ len = min_t(size_t, wrqu->data.length, sizeof(lp->StationName));
+ strlcpy(lp->StationName, extra, len);
/* Commit the adapter parameters */
wl_apply(lp);
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 2c4ed52ca849..231123868908 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -171,13 +171,14 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio)
u64 start, end, bound;
/* unaligned request */
- if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
+ if (unlikely(bio->bi_iter.bi_sector &
+ (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
return 0;
- if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
+ if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
return 0;
- start = bio->bi_sector;
- end = start + (bio->bi_size >> SECTOR_SHIFT);
+ start = bio->bi_iter.bi_sector;
+ end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
bound = zram->disksize >> SECTOR_SHIFT;
/* out of range range */
if (unlikely(start >= bound || end > bound || start > end))
@@ -669,9 +670,10 @@ static ssize_t reset_store(struct device *dev,
static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
{
- int i, offset;
+ int offset;
u32 index;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
switch (rw) {
case READ:
@@ -682,36 +684,37 @@ static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
break;
}
- index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
- offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
+ index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
+ offset = (bio->bi_iter.bi_sector &
+ (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
- bio_for_each_segment(bvec, bio, i) {
+ bio_for_each_segment(bvec, bio, iter) {
int max_transfer_size = PAGE_SIZE - offset;
- if (bvec->bv_len > max_transfer_size) {
+ if (bvec.bv_len > max_transfer_size) {
/*
* zram_bvec_rw() can only make operation on a single
* zram page. Split the bio vector.
*/
struct bio_vec bv;
- bv.bv_page = bvec->bv_page;
+ bv.bv_page = bvec.bv_page;
bv.bv_len = max_transfer_size;
- bv.bv_offset = bvec->bv_offset;
+ bv.bv_offset = bvec.bv_offset;
if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
goto out;
- bv.bv_len = bvec->bv_len - max_transfer_size;
+ bv.bv_len = bvec.bv_len - max_transfer_size;
bv.bv_offset += max_transfer_size;
if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
goto out;
} else
- if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
+ if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw)
< 0)
goto out;
- update_position(&index, &offset, bvec);
+ update_position(&index, &offset, &bvec);
}
set_bit(BIO_UPTODATE, &bio->bi_flags);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index b9a3394fe479..4e89aa022428 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -319,7 +319,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
bio->bi_bdev = ib_dev->ibd_bd;
bio->bi_private = cmd;
bio->bi_end_io = &iblock_bio_done;
- bio->bi_sector = lba;
+ bio->bi_iter.bi_sector = lba;
return bio;
}
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 551c96ca60ac..0f199f6a0738 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -134,10 +134,10 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
* pSCSI Host ID and enable for phba mode
*/
sh = scsi_host_lookup(phv->phv_host_id);
- if (IS_ERR(sh)) {
+ if (!sh) {
pr_err("pSCSI: Unable to locate SCSI Host for"
" phv_host_id: %d\n", phv->phv_host_id);
- return PTR_ERR(sh);
+ return -EINVAL;
}
phv->phv_lld_host = sh;
@@ -515,10 +515,10 @@ static int pscsi_configure_device(struct se_device *dev)
sh = phv->phv_lld_host;
} else {
sh = scsi_host_lookup(pdv->pdv_host_id);
- if (IS_ERR(sh)) {
+ if (!sh) {
pr_err("pSCSI: Unable to locate"
" pdv_host_id: %d\n", pdv->pdv_host_id);
- return PTR_ERR(sh);
+ return -EINVAL;
}
}
} else {
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 4714c6f8da4b..d9b92b2c524d 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -263,6 +263,11 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
sectors, cmd->se_dev->dev_attrib.max_write_same_len);
return TCM_INVALID_CDB_FIELD;
}
+ /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
+ if (flags[0] & 0x10) {
+ pr_warn("WRITE SAME with ANCHOR not supported\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
/*
* Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
* translated into block discard requests within backend code.
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 3da4fd10b9f8..474cd44fac14 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -82,6 +82,9 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
mutex_lock(&g_device_mutex);
list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
+ if (!se_dev->dev_attrib.emulate_3pc)
+ continue;
+
memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
@@ -357,6 +360,7 @@ struct xcopy_pt_cmd {
struct se_cmd se_cmd;
struct xcopy_op *xcopy_op;
struct completion xpt_passthrough_sem;
+ unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
};
static struct se_port xcopy_pt_port;
@@ -675,7 +679,8 @@ static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
se_cmd->scsi_status);
- return 0;
+
+ return (se_cmd->scsi_status) ? -EINVAL : 0;
}
static int target_xcopy_read_source(
@@ -708,7 +713,7 @@ static int target_xcopy_read_source(
(unsigned long long)src_lba, src_sectors, length);
transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
- DMA_FROM_DEVICE, 0, NULL);
+ DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
xop->src_pt_cmd = xpt_cmd;
rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
@@ -768,7 +773,7 @@ static int target_xcopy_write_destination(
(unsigned long long)dst_lba, dst_sectors, length);
transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
- DMA_TO_DEVICE, 0, NULL);
+ DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
xop->dst_pt_cmd = xpt_cmd;
rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
@@ -884,30 +889,42 @@ out:
sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
{
+ struct se_device *dev = se_cmd->se_dev;
struct xcopy_op *xop = NULL;
unsigned char *p = NULL, *seg_desc;
unsigned int list_id, list_id_usage, sdll, inline_dl, sa;
+ sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
int rc;
unsigned short tdll;
+ if (!dev->dev_attrib.emulate_3pc) {
+ pr_err("EXTENDED_COPY operation explicitly disabled\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
sa = se_cmd->t_task_cdb[1] & 0x1f;
if (sa != 0x00) {
pr_err("EXTENDED_COPY(LID4) not supported\n");
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
+ xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
+ if (!xop) {
+ pr_err("Unable to allocate xcopy_op\n");
+ return TCM_OUT_OF_RESOURCES;
+ }
+ xop->xop_se_cmd = se_cmd;
+
p = transport_kmap_data_sg(se_cmd);
if (!p) {
pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
+ kfree(xop);
return TCM_OUT_OF_RESOURCES;
}
list_id = p[0];
- if (list_id != 0x00) {
- pr_err("XCOPY with non zero list_id: 0x%02x\n", list_id);
- goto out;
- }
- list_id_usage = (p[1] & 0x18);
+ list_id_usage = (p[1] & 0x18) >> 3;
+
/*
* Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
*/
@@ -920,13 +937,6 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
goto out;
}
- xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
- if (!xop) {
- pr_err("Unable to allocate xcopy_op\n");
- goto out;
- }
- xop->xop_se_cmd = se_cmd;
-
pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
" tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
tdll, sdll, inline_dl);
@@ -935,6 +945,17 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
if (rc <= 0)
goto out;
+ if (xop->src_dev->dev_attrib.block_size !=
+ xop->dst_dev->dev_attrib.block_size) {
+ pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev"
+ " block_size: %u currently unsupported\n",
+ xop->src_dev->dev_attrib.block_size,
+ xop->dst_dev->dev_attrib.block_size);
+ xcopy_pt_undepend_remotedev(xop);
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out;
+ }
+
pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
rc * XCOPY_TARGET_DESC_LEN);
seg_desc = &p[16];
@@ -957,7 +978,7 @@ out:
if (p)
transport_kunmap_data_sg(se_cmd);
kfree(xop);
- return TCM_INVALID_CDB_FIELD;
+ return ret;
}
static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index dbfc390330ac..f35a1f75b15b 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -56,7 +56,7 @@ config THERMAL_DEFAULT_GOV_USER_SPACE
select THERMAL_GOV_USER_SPACE
help
Select this if you want to let the user space manage the
- lpatform thermals.
+ platform thermals.
endchoice
@@ -69,6 +69,7 @@ config THERMAL_GOV_STEP_WISE
bool "Step_wise thermal governor"
help
Enable this to manage platform thermals using a simple linear
+ governor.
config THERMAL_GOV_USER_SPACE
bool "User_space thermal governor"
@@ -78,7 +79,6 @@ config THERMAL_GOV_USER_SPACE
config CPU_THERMAL
bool "generic cpu cooling support"
depends on CPU_FREQ
- select CPU_FREQ_TABLE
help
This implements the generic cpu cooling mechanism through frequency
reduction. An ACPI version of this already exists
@@ -117,14 +117,14 @@ config SPEAR_THERMAL
depends on OF
help
Enable this to plug the SPEAr thermal sensor driver into the Linux
- thermal framework
+ thermal framework.
config RCAR_THERMAL
tristate "Renesas R-Car thermal driver"
depends on ARCH_SHMOBILE
help
Enable this to plug the R-Car thermal sensor driver into the Linux
- thermal framework
+ thermal framework.
config KIRKWOOD_THERMAL
tristate "Temperature sensor on Marvell Kirkwood SoCs"
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index b40b37cd25e0..8f181b3f842b 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -675,6 +675,11 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = {
{ X86_VENDOR_INTEL, 6, 0x2e},
{ X86_VENDOR_INTEL, 6, 0x2f},
{ X86_VENDOR_INTEL, 6, 0x3a},
+ { X86_VENDOR_INTEL, 6, 0x3c},
+ { X86_VENDOR_INTEL, 6, 0x3e},
+ { X86_VENDOR_INTEL, 6, 0x3f},
+ { X86_VENDOR_INTEL, 6, 0x45},
+ { X86_VENDOR_INTEL, 6, 0x46},
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
@@ -758,21 +763,39 @@ static int powerclamp_init(void)
/* probe cpu features and ids here */
retval = powerclamp_probe();
if (retval)
- return retval;
+ goto exit_free;
+
/* set default limit, maybe adjusted during runtime based on feedback */
window_size = 2;
register_hotcpu_notifier(&powerclamp_cpu_notifier);
+
powerclamp_thread = alloc_percpu(struct task_struct *);
+ if (!powerclamp_thread) {
+ retval = -ENOMEM;
+ goto exit_unregister;
+ }
+
cooling_dev = thermal_cooling_device_register("intel_powerclamp", NULL,
&powerclamp_cooling_ops);
- if (IS_ERR(cooling_dev))
- return -ENODEV;
+ if (IS_ERR(cooling_dev)) {
+ retval = -ENODEV;
+ goto exit_free_thread;
+ }
if (!duration)
duration = jiffies_to_msecs(DEFAULT_DURATION_JIFFIES);
+
powerclamp_create_debug_files();
return 0;
+
+exit_free_thread:
+ free_percpu(powerclamp_thread);
+exit_unregister:
+ unregister_hotcpu_notifier(&powerclamp_cpu_notifier);
+exit_free:
+ kfree(cpu_clamping_mask);
+ return retval;
}
module_init(powerclamp_init);
diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c
index f10a6ad37c06..c2301da08ac7 100644
--- a/drivers/thermal/samsung/exynos_thermal_common.c
+++ b/drivers/thermal/samsung/exynos_thermal_common.c
@@ -310,8 +310,6 @@ void exynos_report_trigger(struct thermal_sensor_conf *conf)
}
th_zone = conf->pzone_data;
- if (th_zone->therm_dev)
- return;
if (th_zone->bind == false) {
for (i = 0; i < th_zone->cool_dev_size; i++) {
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index b43afda8acd1..32f38b90c4f6 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -317,6 +317,9 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
con = readl(data->base + reg->tmu_ctrl);
+ if (pdata->test_mux)
+ con |= (pdata->test_mux << reg->test_mux_addr_shift);
+
if (pdata->reference_voltage) {
con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift);
con |= pdata->reference_voltage << reg->buf_vref_sel_shift;
@@ -488,7 +491,7 @@ static const struct of_device_id exynos_tmu_match[] = {
},
{
.compatible = "samsung,exynos4412-tmu",
- .data = (void *)EXYNOS5250_TMU_DRV_DATA,
+ .data = (void *)EXYNOS4412_TMU_DRV_DATA,
},
{
.compatible = "samsung,exynos5250-tmu",
@@ -629,9 +632,10 @@ static int exynos_tmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (pdata->type == SOC_ARCH_EXYNOS ||
- pdata->type == SOC_ARCH_EXYNOS4210 ||
- pdata->type == SOC_ARCH_EXYNOS5440)
+ if (pdata->type == SOC_ARCH_EXYNOS4210 ||
+ pdata->type == SOC_ARCH_EXYNOS4412 ||
+ pdata->type == SOC_ARCH_EXYNOS5250 ||
+ pdata->type == SOC_ARCH_EXYNOS5440)
data->soc = pdata->type;
else {
ret = -EINVAL;
diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h
index b364c9eee701..3fb65547e64c 100644
--- a/drivers/thermal/samsung/exynos_tmu.h
+++ b/drivers/thermal/samsung/exynos_tmu.h
@@ -41,7 +41,8 @@ enum calibration_mode {
enum soc_type {
SOC_ARCH_EXYNOS4210 = 1,
- SOC_ARCH_EXYNOS,
+ SOC_ARCH_EXYNOS4412,
+ SOC_ARCH_EXYNOS5250,
SOC_ARCH_EXYNOS5440,
};
@@ -84,6 +85,7 @@ enum soc_type {
* @triminfo_reload_shift: shift of triminfo reload enable bit in triminfo_ctrl
reg.
* @tmu_ctrl: TMU main controller register.
+ * @test_mux_addr_shift: shift bits of test mux address.
* @buf_vref_sel_shift: shift bits of reference voltage in tmu_ctrl register.
* @buf_vref_sel_mask: mask bits of reference voltage in tmu_ctrl register.
* @therm_trip_mode_shift: shift bits of tripping mode in tmu_ctrl register.
@@ -150,6 +152,7 @@ struct exynos_tmu_registers {
u32 triminfo_reload_shift;
u32 tmu_ctrl;
+ u32 test_mux_addr_shift;
u32 buf_vref_sel_shift;
u32 buf_vref_sel_mask;
u32 therm_trip_mode_shift;
@@ -257,6 +260,7 @@ struct exynos_tmu_registers {
* @first_point_trim: temp value of the first point trimming
* @second_point_trim: temp value of the second point trimming
* @default_temp_offset: default temperature offset in case of no trimming
+ * @test_mux; information if SoC supports test MUX
* @cal_type: calibration type for temperature
* @cal_mode: calibration mode for temperature
* @freq_clip_table: Table representing frequency reduction percentage.
@@ -286,6 +290,7 @@ struct exynos_tmu_platform_data {
u8 first_point_trim;
u8 second_point_trim;
u8 default_temp_offset;
+ u8 test_mux;
enum calibration_type cal_type;
enum calibration_mode cal_mode;
diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c
index 9002499c1f69..073c292baa53 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.c
+++ b/drivers/thermal/samsung/exynos_tmu_data.c
@@ -90,14 +90,15 @@ struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
};
#endif
-#if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412)
-static const struct exynos_tmu_registers exynos5250_tmu_registers = {
+#if defined(CONFIG_SOC_EXYNOS4412) || defined(CONFIG_SOC_EXYNOS5250)
+static const struct exynos_tmu_registers exynos4412_tmu_registers = {
.triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
.triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
.triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
.triminfo_ctrl = EXYNOS_TMU_TRIMINFO_CON,
.triminfo_reload_shift = EXYNOS_TRIMINFO_RELOAD_SHIFT,
.tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
+ .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT,
.buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
.buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
.therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
@@ -128,7 +129,7 @@ static const struct exynos_tmu_registers exynos5250_tmu_registers = {
.emul_time_mask = EXYNOS_EMUL_TIME_MASK,
};
-#define EXYNOS5250_TMU_DATA \
+#define EXYNOS4412_TMU_DATA \
.threshold_falling = 10, \
.trigger_levels[0] = 85, \
.trigger_levels[1] = 103, \
@@ -162,15 +163,32 @@ static const struct exynos_tmu_registers exynos5250_tmu_registers = {
.temp_level = 103, \
}, \
.freq_tab_count = 2, \
- .type = SOC_ARCH_EXYNOS, \
- .registers = &exynos5250_tmu_registers, \
+ .registers = &exynos4412_tmu_registers, \
.features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
TMU_SUPPORT_EMUL_TIME)
+#endif
+#if defined(CONFIG_SOC_EXYNOS4412)
+struct exynos_tmu_init_data const exynos4412_default_tmu_data = {
+ .tmu_data = {
+ {
+ EXYNOS4412_TMU_DATA,
+ .type = SOC_ARCH_EXYNOS4412,
+ .test_mux = EXYNOS4412_MUX_ADDR_VALUE,
+ },
+ },
+ .tmu_count = 1,
+};
+#endif
+
+#if defined(CONFIG_SOC_EXYNOS5250)
struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
.tmu_data = {
- { EXYNOS5250_TMU_DATA },
+ {
+ EXYNOS4412_TMU_DATA,
+ .type = SOC_ARCH_EXYNOS5250,
+ },
},
.tmu_count = 1,
};
diff --git a/drivers/thermal/samsung/exynos_tmu_data.h b/drivers/thermal/samsung/exynos_tmu_data.h
index dc7feb51099b..a1ea19d9e0a6 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.h
+++ b/drivers/thermal/samsung/exynos_tmu_data.h
@@ -95,6 +95,10 @@
#define EXYNOS_MAX_TRIGGER_PER_REG 4
+/* Exynos4412 specific */
+#define EXYNOS4412_MUX_ADDR_VALUE 6
+#define EXYNOS4412_MUX_ADDR_SHIFT 20
+
/*exynos5440 specific registers*/
#define EXYNOS5440_TMU_S0_7_TRIM 0x000
#define EXYNOS5440_TMU_S0_7_CTRL 0x020
@@ -138,7 +142,14 @@ extern struct exynos_tmu_init_data const exynos4210_default_tmu_data;
#define EXYNOS4210_TMU_DRV_DATA (NULL)
#endif
-#if (defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412))
+#if defined(CONFIG_SOC_EXYNOS4412)
+extern struct exynos_tmu_init_data const exynos4412_default_tmu_data;
+#define EXYNOS4412_TMU_DRV_DATA (&exynos4412_default_tmu_data)
+#else
+#define EXYNOS4412_TMU_DRV_DATA (NULL)
+#endif
+
+#if defined(CONFIG_SOC_EXYNOS5250)
extern struct exynos_tmu_init_data const exynos5250_default_tmu_data;
#define EXYNOS5250_TMU_DRV_DATA (&exynos5250_default_tmu_data)
#else
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index eeef0e2498ca..fdb07199d9c2 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -159,7 +159,7 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
INIT_LIST_HEAD(&hwmon->tz_list);
strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
- hwmon->device = hwmon_device_register(&tz->device);
+ hwmon->device = hwmon_device_register(NULL);
if (IS_ERR(hwmon->device)) {
result = PTR_ERR(hwmon->device);
goto free_mem;
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 4f8b9af54a5a..5a47cc8c8f85 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -110,6 +110,7 @@ static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal,
} else {
dev_err(bgp->dev,
"Failed to read PCB state. Using defaults\n");
+ ret = 0;
}
}
*temp = ti_thermal_hotspot_temperature(tmp, slope, constant);
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index f36950e4134f..7722cb9d5a80 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -316,18 +316,19 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
int phy_id = topology_physical_package_id(cpu);
struct phy_dev_entry *phdev = pkg_temp_thermal_get_phy_entry(cpu);
bool notify = false;
+ unsigned long flags;
if (!phdev)
return;
- spin_lock(&pkg_work_lock);
+ spin_lock_irqsave(&pkg_work_lock, flags);
++pkg_work_cnt;
if (unlikely(phy_id > max_phy_id)) {
- spin_unlock(&pkg_work_lock);
+ spin_unlock_irqrestore(&pkg_work_lock, flags);
return;
}
pkg_work_scheduled[phy_id] = 0;
- spin_unlock(&pkg_work_lock);
+ spin_unlock_irqrestore(&pkg_work_lock, flags);
enable_pkg_thres_interrupt();
rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
@@ -397,6 +398,7 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
int thres_count;
u32 eax, ebx, ecx, edx;
u8 *temp;
+ unsigned long flags;
cpuid(6, &eax, &ebx, &ecx, &edx);
thres_count = ebx & 0x07;
@@ -420,19 +422,19 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
goto err_ret_unlock;
}
- spin_lock(&pkg_work_lock);
+ spin_lock_irqsave(&pkg_work_lock, flags);
if (topology_physical_package_id(cpu) > max_phy_id)
max_phy_id = topology_physical_package_id(cpu);
temp = krealloc(pkg_work_scheduled,
(max_phy_id+1) * sizeof(u8), GFP_ATOMIC);
if (!temp) {
- spin_unlock(&pkg_work_lock);
+ spin_unlock_irqrestore(&pkg_work_lock, flags);
err = -ENOMEM;
goto err_ret_free;
}
pkg_work_scheduled = temp;
pkg_work_scheduled[topology_physical_package_id(cpu)] = 0;
- spin_unlock(&pkg_work_lock);
+ spin_unlock_irqrestore(&pkg_work_lock, flags);
phy_dev_entry->phys_proc_id = topology_physical_package_id(cpu);
phy_dev_entry->first_cpu = cpu;
diff --git a/drivers/tty/bfin_jtag_comm.c b/drivers/tty/bfin_jtag_comm.c
index a93a424873fa..8096fcbe2dc1 100644
--- a/drivers/tty/bfin_jtag_comm.c
+++ b/drivers/tty/bfin_jtag_comm.c
@@ -349,7 +349,7 @@ bfin_jc_early_write(struct console *co, const char *buf, unsigned int count)
bfin_jc_straight_buffer_write(buf, count);
}
-static struct __initdata console bfin_jc_early_console = {
+static struct console bfin_jc_early_console __initdata = {
.name = "early_BFJC",
.write = bfin_jc_early_write,
.flags = CON_ANYTIME | CON_PRINTBUFFER,
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c
index 9bffcec5ad82..0419b69e270f 100644
--- a/drivers/tty/ehv_bytechan.c
+++ b/drivers/tty/ehv_bytechan.c
@@ -32,6 +32,7 @@
#include <linux/poll.h>
#include <asm/epapr_hcalls.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/cdev.h>
#include <linux/console.h>
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 44fbebab5075..3502a7bbb69e 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -86,6 +86,21 @@ static int hvc_dcc_get_chars(uint32_t vt, char *buf, int count)
return i;
}
+static bool hvc_dcc_check(void)
+{
+ unsigned long time = jiffies + (HZ / 10);
+
+ /* Write a test character to check if it is handled */
+ __dcc_putchar('\n');
+
+ while (time_is_after_jiffies(time)) {
+ if (!(__dcc_getstatus() & DCC_STATUS_TX))
+ return true;
+ }
+
+ return false;
+}
+
static const struct hv_ops hvc_dcc_get_put_ops = {
.get_chars = hvc_dcc_get_chars,
.put_chars = hvc_dcc_put_chars,
@@ -93,6 +108,9 @@ static const struct hv_ops hvc_dcc_get_put_ops = {
static int __init hvc_dcc_console_init(void)
{
+ if (!hvc_dcc_check())
+ return -ENODEV;
+
hvc_instantiate(0, 0, &hvc_dcc_get_put_ops);
return 0;
}
@@ -100,6 +118,9 @@ console_initcall(hvc_dcc_console_init);
static int __init hvc_dcc_init(void)
{
+ if (!hvc_dcc_check())
+ return -ENODEV;
+
hvc_alloc(0, 0, &hvc_dcc_get_put_ops, 128);
return 0;
}
diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index fd17a9b804b8..db19a38c8c69 100644
--- a/drivers/tty/hvc/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
@@ -1354,8 +1354,7 @@ out_error_memory:
mempool_destroy(hvc_iucv_mempool);
kmem_cache_destroy(hvc_iucv_buffer_cache);
out_error:
- if (hvc_iucv_filter)
- kfree(hvc_iucv_filter);
+ kfree(hvc_iucv_filter);
hvc_iucv_devices = 0; /* ensure that we do not provide any device */
return rc;
}
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index cd69b48f6dfd..6496872e2e47 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -329,7 +329,7 @@ static void udbg_init_opal_common(void)
void __init hvc_opal_init_early(void)
{
struct device_node *stdout_node = NULL;
- const u32 *termno;
+ const __be32 *termno;
const char *name = NULL;
const struct hv_ops *ops;
u32 index;
@@ -371,7 +371,7 @@ void __init hvc_opal_init_early(void)
if (!stdout_node)
return;
termno = of_get_property(stdout_node, "reg", NULL);
- index = termno ? *termno : 0;
+ index = termno ? be32_to_cpup(termno) : 0;
if (index >= MAX_NR_HVC_CONSOLES)
return;
hvc_opal_privs[index] = &hvc_opal_boot_priv;
diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index c791b18cdd08..b594abfbf21e 100644
--- a/drivers/tty/hvc/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -48,6 +48,7 @@
#include <asm/prom.h>
#include <asm/hvsi.h>
#include <asm/udbg.h>
+#include <asm/machdep.h>
#include "hvc_console.h"
@@ -457,7 +458,9 @@ void __init hvc_vio_init_early(void)
if (hvterm_priv0.proto == HV_PROTOCOL_HVSI)
goto out;
#endif
- add_preferred_console("hvc", 0, NULL);
+ /* Check whether the user has requested a different console. */
+ if (!strstr(cmd_line, "console="))
+ add_preferred_console("hvc", 0, NULL);
hvc_instantiate(0, 0, ops);
out:
of_node_put(stdout_node);
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index c193af6a628f..636c9baad7a5 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -183,7 +183,7 @@ static int dom0_write_console(uint32_t vtermno, const char *str, int len)
{
int rc = HYPERVISOR_console_io(CONSOLEIO_write, len, (char *)str);
if (rc < 0)
- return 0;
+ return rc;
return len;
}
@@ -642,7 +642,22 @@ struct console xenboot_console = {
void xen_raw_console_write(const char *str)
{
- dom0_write_console(0, str, strlen(str));
+ ssize_t len = strlen(str);
+ int rc = 0;
+
+ if (xen_domain()) {
+ rc = dom0_write_console(0, str, len);
+#ifdef CONFIG_X86
+ if (rc == -ENOSYS && xen_hvm_domain())
+ goto outb_print;
+
+ } else if (xen_cpuid_base()) {
+ int i;
+outb_print:
+ for (i = 0; i < len; i++)
+ outb(str[i], 0xe9);
+#endif
+ }
}
void xen_raw_printk(const char *fmt, ...)
diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
index ac2767100df5..347050ea414a 100644
--- a/drivers/tty/hvc/hvsi_lib.c
+++ b/drivers/tty/hvc/hvsi_lib.c
@@ -9,7 +9,7 @@
static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
{
- packet->seqno = atomic_inc_return(&pv->seqno);
+ packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
/* Assumes that always succeeds, works in practice */
return pv->put_chars(pv->termno, (char *)packet, packet->len);
@@ -28,7 +28,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
/* Send version query */
q.hdr.type = VS_QUERY_PACKET_HEADER;
q.hdr.len = sizeof(struct hvsi_query);
- q.verb = VSV_SEND_VERSION_NUMBER;
+ q.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER);
hvsi_send_packet(pv, &q.hdr);
}
@@ -40,7 +40,7 @@ static int hvsi_send_close(struct hvsi_priv *pv)
ctrl.hdr.type = VS_CONTROL_PACKET_HEADER;
ctrl.hdr.len = sizeof(struct hvsi_control);
- ctrl.verb = VSV_CLOSE_PROTOCOL;
+ ctrl.verb = cpu_to_be16(VSV_CLOSE_PROTOCOL);
return hvsi_send_packet(pv, &ctrl.hdr);
}
@@ -69,14 +69,14 @@ static void hvsi_got_control(struct hvsi_priv *pv)
{
struct hvsi_control *pkt = (struct hvsi_control *)pv->inbuf;
- switch (pkt->verb) {
+ switch (be16_to_cpu(pkt->verb)) {
case VSV_CLOSE_PROTOCOL:
/* We restart the handshaking */
hvsi_start_handshake(pv);
break;
case VSV_MODEM_CTL_UPDATE:
/* Transition of carrier detect */
- hvsi_cd_change(pv, pkt->word & HVSI_TSCD);
+ hvsi_cd_change(pv, be32_to_cpu(pkt->word) & HVSI_TSCD);
break;
}
}
@@ -87,7 +87,7 @@ static void hvsi_got_query(struct hvsi_priv *pv)
struct hvsi_query_response r;
/* We only handle version queries */
- if (pkt->verb != VSV_SEND_VERSION_NUMBER)
+ if (be16_to_cpu(pkt->verb) != VSV_SEND_VERSION_NUMBER)
return;
pr_devel("HVSI@%x: Got version query, sending response...\n",
@@ -96,7 +96,7 @@ static void hvsi_got_query(struct hvsi_priv *pv)
/* Send version response */
r.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
r.hdr.len = sizeof(struct hvsi_query_response);
- r.verb = VSV_SEND_VERSION_NUMBER;
+ r.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER);
r.u.version = HVSI_VERSION;
r.query_seqno = pkt->hdr.seqno;
hvsi_send_packet(pv, &r.hdr);
@@ -112,7 +112,7 @@ static void hvsi_got_response(struct hvsi_priv *pv)
switch(r->verb) {
case VSV_SEND_MODEM_CTL_STATUS:
- hvsi_cd_change(pv, r->u.mctrl_word & HVSI_TSCD);
+ hvsi_cd_change(pv, be32_to_cpu(r->u.mctrl_word) & HVSI_TSCD);
pv->mctrl_update = 1;
break;
}
@@ -265,8 +265,7 @@ int hvsilib_read_mctrl(struct hvsi_priv *pv)
pv->mctrl_update = 0;
q.hdr.type = VS_QUERY_PACKET_HEADER;
q.hdr.len = sizeof(struct hvsi_query);
- q.hdr.seqno = atomic_inc_return(&pv->seqno);
- q.verb = VSV_SEND_MODEM_CTL_STATUS;
+ q.verb = cpu_to_be16(VSV_SEND_MODEM_CTL_STATUS);
rc = hvsi_send_packet(pv, &q.hdr);
if (rc <= 0) {
pr_devel("HVSI@%x: Error %d...\n", pv->termno, rc);
@@ -304,9 +303,9 @@ int hvsilib_write_mctrl(struct hvsi_priv *pv, int dtr)
ctrl.hdr.type = VS_CONTROL_PACKET_HEADER,
ctrl.hdr.len = sizeof(struct hvsi_control);
- ctrl.verb = VSV_SET_MODEM_CTL;
- ctrl.mask = HVSI_TSDTR;
- ctrl.word = dtr ? HVSI_TSDTR : 0;
+ ctrl.verb = cpu_to_be16(VSV_SET_MODEM_CTL);
+ ctrl.mask = cpu_to_be32(HVSI_TSDTR);
+ ctrl.word = cpu_to_be32(dtr ? HVSI_TSDTR : 0);
return hvsi_send_packet(pv, &ctrl.hdr);
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 7a744b69c3d1..7cdd1eb9406c 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -767,7 +767,7 @@ static size_t __process_echoes(struct tty_struct *tty)
* of echo overrun before the next commit), then discard enough
* data at the tail to prevent a subsequent overrun */
while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
- if (echo_buf(ldata, tail == ECHO_OP_START)) {
+ if (echo_buf(ldata, tail) == ECHO_OP_START) {
if (echo_buf(ldata, tail) == ECHO_OP_ERASE_TAB)
tail += 3;
else
@@ -1752,20 +1752,14 @@ int is_ignored(int sig)
static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
{
struct n_tty_data *ldata = tty->disc_data;
- int canon_change = 1;
- if (old)
- canon_change = (old->c_lflag ^ tty->termios.c_lflag) & ICANON;
- if (canon_change) {
+ if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) {
bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
ldata->line_start = ldata->canon_head = ldata->read_tail;
ldata->erasing = 0;
ldata->lnext = 0;
}
- if (canon_change && !L_ICANON(tty) && read_cnt(ldata))
- wake_up_interruptible(&tty->read_wait);
-
ldata->icanon = (L_ICANON(tty) != 0);
if (I_ISTRIP(tty) || I_IUCLC(tty) || I_IGNCR(tty) ||
@@ -1820,9 +1814,8 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
* Fix tty hang when I_IXON(tty) is cleared, but the tty
* been stopped by STOP_CHAR(tty) before it.
*/
- if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) {
+ if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped)
start_tty(tty);
- }
/* The termios change make the tty ready for I/O */
wake_up_interruptible(&tty->write_wait);
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index d6080c3831ef..cd0429369557 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -959,7 +959,7 @@ static int receive_flow_control(struct nozomi *dc)
dev_err(&dc->pdev->dev,
"ERROR: flow control received for non-existing port\n");
return 0;
- };
+ }
DBG1("0x%04X->0x%04X", *((u16 *)&dc->port[port].ctrl_dl),
*((u16 *)&ctrl_dl));
@@ -1025,7 +1025,7 @@ static enum ctrl_port_type port2ctrl(enum port_type port,
dev_err(&dc->pdev->dev,
"ERROR: send flow control " \
"received for non-existing port\n");
- };
+ }
return CTRL_ERROR;
}
@@ -1805,7 +1805,7 @@ static int ntty_ioctl(struct tty_struct *tty,
default:
DBG1("ERR: 0x%08X, %d", cmd, cmd);
break;
- };
+ }
return rval;
}
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 570df9d2a5d2..e33d38cb170f 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -2322,7 +2322,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
if (up->capabilities & UART_CAP_FIFO && port->fifosize > 1) {
fcr = uart_config[port->type].fcr;
- if (baud < 2400 || fifo_bug) {
+ if ((baud < 2400 && !up->dma) || fifo_bug) {
fcr &= ~UART_FCR_TRIGGER_MASK;
fcr |= UART_FCR_TRIGGER_1;
}
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index daf710f5c3fc..4658e3e0ec42 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -56,11 +56,11 @@
struct dw8250_data {
- int last_lcr;
- int last_mcr;
- int line;
- struct clk *clk;
- u8 usr_reg;
+ u8 usr_reg;
+ int last_mcr;
+ int line;
+ struct clk *clk;
+ struct uart_8250_dma dma;
};
static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
@@ -76,17 +76,33 @@ static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
return value;
}
+static void dw8250_force_idle(struct uart_port *p)
+{
+ serial8250_clear_and_reinit_fifos(container_of
+ (p, struct uart_8250_port, port));
+ (void)p->serial_in(p, UART_RX);
+}
+
static void dw8250_serial_out(struct uart_port *p, int offset, int value)
{
struct dw8250_data *d = p->private_data;
- if (offset == UART_LCR)
- d->last_lcr = value;
-
if (offset == UART_MCR)
d->last_mcr = value;
writeb(value, p->membase + (offset << p->regshift));
+
+ /* Make sure LCR write wasn't ignored */
+ if (offset == UART_LCR) {
+ int tries = 1000;
+ while (tries--) {
+ if (value == p->serial_in(p, UART_LCR))
+ return;
+ dw8250_force_idle(p);
+ writeb(value, p->membase + (UART_LCR << p->regshift));
+ }
+ dev_err(p->dev, "Couldn't set LCR to %d\n", value);
+ }
}
static unsigned int dw8250_serial_in(struct uart_port *p, int offset)
@@ -107,13 +123,22 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
{
struct dw8250_data *d = p->private_data;
- if (offset == UART_LCR)
- d->last_lcr = value;
-
if (offset == UART_MCR)
d->last_mcr = value;
writel(value, p->membase + (offset << p->regshift));
+
+ /* Make sure LCR write wasn't ignored */
+ if (offset == UART_LCR) {
+ int tries = 1000;
+ while (tries--) {
+ if (value == p->serial_in(p, UART_LCR))
+ return;
+ dw8250_force_idle(p);
+ writel(value, p->membase + (UART_LCR << p->regshift));
+ }
+ dev_err(p->dev, "Couldn't set LCR to %d\n", value);
+ }
}
static unsigned int dw8250_serial_in32(struct uart_port *p, int offset)
@@ -131,9 +156,8 @@ static int dw8250_handle_irq(struct uart_port *p)
if (serial8250_handle_irq(p, iir)) {
return 1;
} else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
- /* Clear the USR and write the LCR again. */
+ /* Clear the USR */
(void)p->serial_in(p, d->usr_reg);
- p->serial_out(p, UART_LCR, d->last_lcr);
return 1;
}
@@ -153,6 +177,14 @@ dw8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old)
pm_runtime_put_sync_suspend(port->dev);
}
+static bool dw8250_dma_filter(struct dma_chan *chan, void *param)
+{
+ struct dw8250_data *data = param;
+
+ return chan->chan_id == data->dma.tx_chan_id ||
+ chan->chan_id == data->dma.rx_chan_id;
+}
+
static void dw8250_setup_port(struct uart_8250_port *up)
{
struct uart_port *p = &up->port;
@@ -241,7 +273,8 @@ static int dw8250_probe_of(struct uart_port *p,
}
#ifdef CONFIG_ACPI
-static int dw8250_probe_acpi(struct uart_8250_port *up)
+static int dw8250_probe_acpi(struct uart_8250_port *up,
+ struct dw8250_data *data)
{
const struct acpi_device_id *id;
struct uart_port *p = &up->port;
@@ -260,9 +293,7 @@ static int dw8250_probe_acpi(struct uart_8250_port *up)
if (!p->uartclk)
p->uartclk = (unsigned int)id->driver_data;
- up->dma = devm_kzalloc(p->dev, sizeof(*up->dma), GFP_KERNEL);
- if (!up->dma)
- return -ENOMEM;
+ up->dma = &data->dma;
up->dma->rxconf.src_maxburst = p->fifosize / 4;
up->dma->txconf.dst_maxburst = p->fifosize / 4;
@@ -270,7 +301,8 @@ static int dw8250_probe_acpi(struct uart_8250_port *up)
return 0;
}
#else
-static inline int dw8250_probe_acpi(struct uart_8250_port *up)
+static inline int dw8250_probe_acpi(struct uart_8250_port *up,
+ struct dw8250_data *data)
{
return -ENODEV;
}
@@ -314,6 +346,12 @@ static int dw8250_probe(struct platform_device *pdev)
uart.port.uartclk = clk_get_rate(data->clk);
}
+ data->dma.rx_chan_id = -1;
+ data->dma.tx_chan_id = -1;
+ data->dma.rx_param = data;
+ data->dma.tx_param = data;
+ data->dma.fn = dw8250_dma_filter;
+
uart.port.iotype = UPIO_MEM;
uart.port.serial_in = dw8250_serial_in;
uart.port.serial_out = dw8250_serial_out;
@@ -324,7 +362,7 @@ static int dw8250_probe(struct platform_device *pdev)
if (err)
return err;
} else if (ACPI_HANDLE(&pdev->dev)) {
- err = dw8250_probe_acpi(&uart);
+ err = dw8250_probe_acpi(&uart, data);
if (err)
return err;
} else {
diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c
index 5f3bba12c159..d1a9078003bd 100644
--- a/drivers/tty/serial/8250/8250_em.c
+++ b/drivers/tty/serial/8250/8250_em.c
@@ -122,7 +122,7 @@ static int serial8250_em_probe(struct platform_device *pdev)
up.port.dev = &pdev->dev;
up.port.private_data = priv;
- clk_enable(priv->sclk);
+ clk_prepare_enable(priv->sclk);
up.port.uartclk = clk_get_rate(priv->sclk);
up.port.iotype = UPIO_MEM32;
@@ -134,7 +134,7 @@ static int serial8250_em_probe(struct platform_device *pdev)
ret = serial8250_register_8250_port(&up);
if (ret < 0) {
dev_err(&pdev->dev, "unable to register 8250 port\n");
- clk_disable(priv->sclk);
+ clk_disable_unprepare(priv->sclk);
return ret;
}
@@ -148,7 +148,7 @@ static int serial8250_em_remove(struct platform_device *pdev)
struct serial8250_em_priv *priv = platform_get_drvdata(pdev);
serial8250_unregister_port(priv->line);
- clk_disable(priv->sclk);
+ clk_disable_unprepare(priv->sclk);
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index c810da7c7a88..4697a514b80a 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -9,6 +9,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License.
*/
+#undef DEBUG
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
@@ -27,8 +28,6 @@
#include "8250.h"
-#undef SERIAL_DEBUG_PCI
-
/*
* init function returns:
* > 0 - number of ports
@@ -63,7 +62,7 @@ static int pci_default_setup(struct serial_private*,
static void moan_device(const char *str, struct pci_dev *dev)
{
- printk(KERN_WARNING
+ dev_err(&dev->dev,
"%s: %s\n"
"Please send the output of lspci -vv, this\n"
"message (0x%04x,0x%04x,0x%04x,0x%04x), the\n"
@@ -233,7 +232,7 @@ static int pci_inteli960ni_init(struct pci_dev *dev)
/* is firmware started? */
pci_read_config_dword(dev, 0x44, (void *)&oldval);
if (oldval == 0x00001000L) { /* RESET value */
- printk(KERN_DEBUG "Local i960 firmware missing");
+ dev_dbg(&dev->dev, "Local i960 firmware missing\n");
return -ENODEV;
}
return 0;
@@ -827,7 +826,7 @@ static int pci_netmos_9900_numports(struct pci_dev *dev)
if (sub_serports > 0) {
return sub_serports;
} else {
- printk(KERN_NOTICE "NetMos/Mostech serial driver ignoring port on ambiguous config.\n");
+ dev_err(&dev->dev, "NetMos/Mostech serial driver ignoring port on ambiguous config.\n");
return 0;
}
}
@@ -931,7 +930,7 @@ static int pci_ite887x_init(struct pci_dev *dev)
}
if (!inta_addr[i]) {
- printk(KERN_ERR "ite887x: could not find iobase\n");
+ dev_err(&dev->dev, "ite887x: could not find iobase\n");
return -ENODEV;
}
@@ -1024,9 +1023,9 @@ static int pci_oxsemi_tornado_init(struct pci_dev *dev)
/* Tornado device */
if (deviceID == 0x07000200) {
number_uarts = ioread8(p + 4);
- printk(KERN_DEBUG
+ dev_dbg(&dev->dev,
"%d ports detected on Oxford PCI Express device\n",
- number_uarts);
+ number_uarts);
}
pci_iounmap(dev, p);
return number_uarts;
@@ -1308,6 +1307,29 @@ static int pci_default_setup(struct serial_private *priv,
return setup_port(priv, port, bar, offset, board->reg_shift);
}
+static int pci_pericom_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
+{
+ unsigned int bar, offset = board->first_offset, maxnr;
+
+ bar = FL_GET_BASE(board->flags);
+ if (board->flags & FL_BASE_BARS)
+ bar += idx;
+ else
+ offset += idx * board->uart_offset;
+
+ maxnr = (pci_resource_len(priv->dev, bar) - board->first_offset) >>
+ (board->reg_shift + 3);
+
+ if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr)
+ return 1;
+
+ port->port.uartclk = 14745600;
+
+ return setup_port(priv, port, bar, offset, board->reg_shift);
+}
+
static int
ce4100_serial_setup(struct serial_private *priv,
const struct pciserial_board *board,
@@ -1324,6 +1346,120 @@ ce4100_serial_setup(struct serial_private *priv,
return ret;
}
+#define PCI_DEVICE_ID_INTEL_BYT_UART1 0x0f0a
+#define PCI_DEVICE_ID_INTEL_BYT_UART2 0x0f0c
+
+#define BYT_PRV_CLK 0x800
+#define BYT_PRV_CLK_EN (1 << 0)
+#define BYT_PRV_CLK_M_VAL_SHIFT 1
+#define BYT_PRV_CLK_N_VAL_SHIFT 16
+#define BYT_PRV_CLK_UPDATE (1 << 31)
+
+#define BYT_GENERAL_REG 0x808
+#define BYT_GENERAL_DIS_RTS_N_OVERRIDE (1 << 3)
+
+#define BYT_TX_OVF_INT 0x820
+#define BYT_TX_OVF_INT_MASK (1 << 1)
+
+static void
+byt_set_termios(struct uart_port *p, struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned int baud = tty_termios_baud_rate(termios);
+ unsigned int m = 6912;
+ unsigned int n = 15625;
+ u32 reg;
+
+ /* For baud rates 1M, 2M, 3M and 4M the dividers must be adjusted. */
+ if (baud == 1000000 || baud == 2000000 || baud == 4000000) {
+ m = 64;
+ n = 100;
+
+ p->uartclk = 64000000;
+ } else if (baud == 3000000) {
+ m = 48;
+ n = 100;
+
+ p->uartclk = 48000000;
+ } else {
+ p->uartclk = 44236800;
+ }
+
+ /* Reset the clock */
+ reg = (m << BYT_PRV_CLK_M_VAL_SHIFT) | (n << BYT_PRV_CLK_N_VAL_SHIFT);
+ writel(reg, p->membase + BYT_PRV_CLK);
+ reg |= BYT_PRV_CLK_EN | BYT_PRV_CLK_UPDATE;
+ writel(reg, p->membase + BYT_PRV_CLK);
+
+ /*
+ * If auto-handshake mechanism is not enabled,
+ * disable rts_n override
+ */
+ reg = readl(p->membase + BYT_GENERAL_REG);
+ reg &= ~BYT_GENERAL_DIS_RTS_N_OVERRIDE;
+ if (termios->c_cflag & CRTSCTS)
+ reg |= BYT_GENERAL_DIS_RTS_N_OVERRIDE;
+ writel(reg, p->membase + BYT_GENERAL_REG);
+
+ serial8250_do_set_termios(p, termios, old);
+}
+
+static bool byt_dma_filter(struct dma_chan *chan, void *param)
+{
+ return chan->chan_id == *(int *)param;
+}
+
+static int
+byt_serial_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
+{
+ struct uart_8250_dma *dma;
+ int ret;
+
+ dma = devm_kzalloc(port->port.dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+
+ switch (priv->dev->device) {
+ case PCI_DEVICE_ID_INTEL_BYT_UART1:
+ dma->rx_chan_id = 3;
+ dma->tx_chan_id = 2;
+ break;
+ case PCI_DEVICE_ID_INTEL_BYT_UART2:
+ dma->rx_chan_id = 5;
+ dma->tx_chan_id = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dma->rxconf.slave_id = dma->rx_chan_id;
+ dma->rxconf.src_maxburst = 16;
+
+ dma->txconf.slave_id = dma->tx_chan_id;
+ dma->txconf.dst_maxburst = 16;
+
+ dma->fn = byt_dma_filter;
+ dma->rx_param = &dma->rx_chan_id;
+ dma->tx_param = &dma->tx_chan_id;
+
+ ret = pci_default_setup(priv, board, port, idx);
+ port->port.iotype = UPIO_MEM;
+ port->port.type = PORT_16550A;
+ port->port.flags = (port->port.flags | UPF_FIXED_PORT | UPF_FIXED_TYPE);
+ port->port.set_termios = byt_set_termios;
+ port->port.fifosize = 64;
+ port->tx_loadsz = 64;
+ port->dma = dma;
+ port->capabilities = UART_CAP_FIFO | UART_CAP_AFE;
+
+ /* Disable Tx counter interrupts */
+ writel(BYT_TX_OVF_INT_MASK, port->port.membase + BYT_TX_OVF_INT);
+
+ return ret;
+}
+
static int
pci_omegapci_setup(struct serial_private *priv,
const struct pciserial_board *board,
@@ -1344,17 +1480,80 @@ pci_brcm_trumanage_setup(struct serial_private *priv,
return ret;
}
+static int pci_fintek_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
+{
+ struct pci_dev *pdev = priv->dev;
+ unsigned long base;
+ unsigned long iobase;
+ unsigned long ciobase = 0;
+ u8 config_base;
+
+ /*
+ * We are supposed to be able to read these from the PCI config space,
+ * but the values there don't seem to match what we need to use, so
+ * just use these hard-coded values for now, as they are correct.
+ */
+ switch (idx) {
+ case 0: iobase = 0xe000; config_base = 0x40; break;
+ case 1: iobase = 0xe008; config_base = 0x48; break;
+ case 2: iobase = 0xe010; config_base = 0x50; break;
+ case 3: iobase = 0xe018; config_base = 0x58; break;
+ case 4: iobase = 0xe020; config_base = 0x60; break;
+ case 5: iobase = 0xe028; config_base = 0x68; break;
+ case 6: iobase = 0xe030; config_base = 0x70; break;
+ case 7: iobase = 0xe038; config_base = 0x78; break;
+ case 8: iobase = 0xe040; config_base = 0x80; break;
+ case 9: iobase = 0xe048; config_base = 0x88; break;
+ case 10: iobase = 0xe050; config_base = 0x90; break;
+ case 11: iobase = 0xe058; config_base = 0x98; break;
+ default:
+ /* Unknown number of ports, get out of here */
+ return -EINVAL;
+ }
+
+ if (idx < 4) {
+ base = pci_resource_start(priv->dev, 3);
+ ciobase = (int)(base + (0x8 * idx));
+ }
+
+ dev_dbg(&pdev->dev, "%s: idx=%d iobase=0x%lx ciobase=0x%lx config_base=0x%2x\n",
+ __func__, idx, iobase, ciobase, config_base);
+
+ /* Enable UART I/O port */
+ pci_write_config_byte(pdev, config_base + 0x00, 0x01);
+
+ /* Select 128-byte FIFO and 8x FIFO threshold */
+ pci_write_config_byte(pdev, config_base + 0x01, 0x33);
+
+ /* LSB UART */
+ pci_write_config_byte(pdev, config_base + 0x04, (u8)(iobase & 0xff));
+
+ /* MSB UART */
+ pci_write_config_byte(pdev, config_base + 0x05, (u8)((iobase & 0xff00) >> 8));
+
+ /* irq number, this usually fails, but the spec says to do it anyway. */
+ pci_write_config_byte(pdev, config_base + 0x06, pdev->irq);
+
+ port->port.iotype = UPIO_PORT;
+ port->port.iobase = iobase;
+ port->port.mapbase = 0;
+ port->port.membase = NULL;
+ port->port.regshift = 0;
+
+ return 0;
+}
+
static int skip_tx_en_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
{
port->port.flags |= UPF_NO_TXEN_TEST;
- printk(KERN_DEBUG "serial8250: skipping TxEn test for device "
- "[%04x:%04x] subsystem [%04x:%04x]\n",
- priv->dev->vendor,
- priv->dev->device,
- priv->dev->subsystem_vendor,
- priv->dev->subsystem_device);
+ dev_dbg(&priv->dev->dev,
+ "serial8250: skipping TxEn test for device [%04x:%04x] subsystem [%04x:%04x]\n",
+ priv->dev->vendor, priv->dev->device,
+ priv->dev->subsystem_vendor, priv->dev->subsystem_device);
return pci_default_setup(priv, board, port, idx);
}
@@ -1662,6 +1861,20 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subdevice = PCI_ANY_ID,
.setup = kt_serial_setup,
},
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BYT_UART1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = byt_serial_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BYT_UART2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = byt_serial_setup,
+ },
/*
* ITE
*/
@@ -1826,6 +2039,31 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.exit = pci_plx9050_exit,
},
/*
+ * Pericom
+ */
+ {
+ .vendor = 0x12d8,
+ .device = 0x7952,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = 0x12d8,
+ .device = 0x7954,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = 0x12d8,
+ .device = 0x7958,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+
+ /*
* PLX
*/
{
@@ -2255,6 +2493,27 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subdevice = PCI_ANY_ID,
.setup = pci_brcm_trumanage_setup,
},
+ {
+ .vendor = 0x1c29,
+ .device = 0x1104,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_fintek_setup,
+ },
+ {
+ .vendor = 0x1c29,
+ .device = 0x1108,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_fintek_setup,
+ },
+ {
+ .vendor = 0x1c29,
+ .device = 0x1112,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_fintek_setup,
+ },
/*
* Default "match everything" terminator entry
@@ -2449,9 +2708,13 @@ enum pci_board_num_t {
pbn_ADDIDATA_PCIe_4_3906250,
pbn_ADDIDATA_PCIe_8_3906250,
pbn_ce4100_1_115200,
+ pbn_byt,
pbn_omegapci,
pbn_NETMOS9900_2s_115200,
pbn_brcm_trumanage,
+ pbn_fintek_4,
+ pbn_fintek_8,
+ pbn_fintek_12,
};
/*
@@ -3185,6 +3448,13 @@ static struct pciserial_board pci_boards[] = {
.base_baud = 921600,
.reg_shift = 2,
},
+ [pbn_byt] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 2764800,
+ .uart_offset = 0x80,
+ .reg_shift = 2,
+ },
[pbn_omegapci] = {
.flags = FL_BASE0,
.num_ports = 8,
@@ -3202,6 +3472,24 @@ static struct pciserial_board pci_boards[] = {
.reg_shift = 2,
.base_baud = 115200,
},
+ [pbn_fintek_4] = {
+ .num_ports = 4,
+ .uart_offset = 8,
+ .base_baud = 115200,
+ .first_offset = 0x40,
+ },
+ [pbn_fintek_8] = {
+ .num_ports = 8,
+ .uart_offset = 8,
+ .base_baud = 115200,
+ .first_offset = 0x40,
+ },
+ [pbn_fintek_12] = {
+ .num_ports = 12,
+ .uart_offset = 8,
+ .base_baud = 115200,
+ .first_offset = 0x40,
+ },
};
static const struct pci_device_id blacklist[] = {
@@ -3362,14 +3650,15 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
if (quirk->setup(priv, board, &uart, i))
break;
-#ifdef SERIAL_DEBUG_PCI
- printk(KERN_DEBUG "Setup PCI port: port %lx, irq %d, type %d\n",
- uart.port.iobase, uart.port.irq, uart.port.iotype);
-#endif
+ dev_dbg(&dev->dev, "Setup PCI port: port %lx, irq %d, type %d\n",
+ uart.port.iobase, uart.port.irq, uart.port.iotype);
priv->line[i] = serial8250_register_8250_port(&uart);
if (priv->line[i] < 0) {
- printk(KERN_WARNING "Couldn't register serial port %s: %d\n", pci_name(dev), priv->line[i]);
+ dev_err(&dev->dev,
+ "Couldn't register serial port %lx, irq %d, type %d, error %d\n",
+ uart.port.iobase, uart.port.irq,
+ uart.port.iotype, priv->line[i]);
break;
}
}
@@ -3462,7 +3751,7 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
}
if (ent->driver_data >= ARRAY_SIZE(pci_boards)) {
- printk(KERN_ERR "pci_init_one: invalid driver_data: %ld\n",
+ dev_err(&dev->dev, "invalid driver_data: %ld\n",
ent->driver_data);
return -EINVAL;
}
@@ -3520,8 +3809,6 @@ static void pciserial_remove_one(struct pci_dev *dev)
{
struct serial_private *priv = pci_get_drvdata(dev);
- pci_set_drvdata(dev, NULL);
-
pciserial_remove_ports(priv);
pci_disable_device(dev);
@@ -3555,7 +3842,7 @@ static int pciserial_resume_one(struct pci_dev *dev)
err = pci_enable_device(dev);
/* FIXME: We cannot simply error out here */
if (err)
- printk(KERN_ERR "pciserial: Unable to re-enable ports, trying to continue.\n");
+ dev_err(&dev->dev, "Unable to re-enable ports, trying to continue.\n");
pciserial_resume_ports(priv);
}
return 0;
@@ -4848,6 +5135,15 @@ static struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CE4100_UART,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_ce4100_1_115200 },
+ /* Intel BayTrail */
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT_UART1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
+ pbn_byt },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT_UART2,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
+ pbn_byt },
/*
* Cronyx Omega PCI
@@ -4918,6 +5214,11 @@ static struct pci_device_id serial_pci_tbl[] = {
0,
0, pbn_exar_XR17V358 },
+ /* Fintek PCI serial cards */
+ { PCI_DEVICE(0x1c29, 0x1104), .driver_data = pbn_fintek_4 },
+ { PCI_DEVICE(0x1c29, 0x1108), .driver_data = pbn_fintek_8 },
+ { PCI_DEVICE(0x1c29, 0x1112), .driver_data = pbn_fintek_12 },
+
/*
* These entries match devices with class COMMUNICATION_SERIAL,
* COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index febd45cd5027..a3817ab8602f 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -709,7 +709,7 @@ config SERIAL_IP22_ZILOG_CONSOLE
config SERIAL_SH_SCI
tristate "SuperH SCI(F) serial port support"
- depends on HAVE_CLK && (SUPERH || ARCH_SHMOBILE)
+ depends on HAVE_CLK && (SUPERH || ARM || COMPILE_TEST)
select SERIAL_CORE
config SERIAL_SH_SCI_NR_UARTS
@@ -1512,6 +1512,7 @@ config SERIAL_FSL_LPUART_CONSOLE
config SERIAL_ST_ASC
tristate "ST ASC serial port support"
select SERIAL_CORE
+ depends on ARM || COMPILE_TEST
help
This driver is for the on-chip Asychronous Serial Controller on
STMicroelectronics STi SoCs.
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 8b90f0b6dfdf..33bd8606be62 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -728,7 +728,6 @@ static int pl010_probe(struct amba_device *dev, const struct amba_id *id)
amba_set_drvdata(dev, uap);
ret = uart_add_one_port(&amba_reg, &uap->port);
if (ret) {
- amba_set_drvdata(dev, NULL);
amba_ports[i] = NULL;
clk_put(uap->clk);
unmap:
@@ -745,8 +744,6 @@ static int pl010_remove(struct amba_device *dev)
struct uart_amba_port *uap = amba_get_drvdata(dev);
int i;
- amba_set_drvdata(dev, NULL);
-
uart_remove_one_port(&amba_reg, &uap->port);
for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index aaa22867e656..7203864992a5 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2147,7 +2147,6 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
amba_set_drvdata(dev, uap);
ret = uart_add_one_port(&amba_reg, &uap->port);
if (ret) {
- amba_set_drvdata(dev, NULL);
amba_ports[i] = NULL;
pl011_dma_remove(uap);
}
@@ -2160,8 +2159,6 @@ static int pl011_remove(struct amba_device *dev)
struct uart_amba_port *uap = amba_get_drvdata(dev);
int i;
- amba_set_drvdata(dev, NULL);
-
uart_remove_one_port(&amba_reg, &uap->port);
for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index 569872f4c9b8..c9f5c9dcc15c 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -533,7 +533,7 @@ arc_uart_init_one(struct platform_device *pdev, int dev_id)
unsigned long *plat_data;
struct arc_uart_port *uart = &arc_uart_ports[dev_id];
- plat_data = (unsigned long *)dev_get_platdata(&pdev->dev);
+ plat_data = dev_get_platdata(&pdev->dev);
if (!plat_data)
return -ENODEV;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index d067285a2d20..c7d99af46a96 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -99,6 +99,7 @@ static void atmel_stop_rx(struct uart_port *port);
#define UART_PUT_RTOR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_RTOR)
#define UART_PUT_TTGR(port, v) __raw_writel(v, (port)->membase + ATMEL_US_TTGR)
#define UART_GET_IP_NAME(port) __raw_readl((port)->membase + ATMEL_US_NAME)
+#define UART_GET_IP_VERSION(port) __raw_readl((port)->membase + ATMEL_US_VERSION)
/* PDC registers */
#define UART_PUT_PTCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_PTCR)
@@ -1499,10 +1500,11 @@ static void atmel_set_ops(struct uart_port *port)
/*
* Get ip name usart or uart
*/
-static int atmel_get_ip_name(struct uart_port *port)
+static void atmel_get_ip_name(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int name = UART_GET_IP_NAME(port);
+ u32 version;
int usart, uart;
/* usart and uart ascii */
usart = 0x55534152;
@@ -1517,11 +1519,23 @@ static int atmel_get_ip_name(struct uart_port *port)
dev_dbg(port->dev, "This is uart\n");
atmel_port->is_usart = false;
} else {
- dev_err(port->dev, "Not supported ip name, set to uart\n");
- return -EINVAL;
+ /* fallback for older SoCs: use version field */
+ version = UART_GET_IP_VERSION(port);
+ switch (version) {
+ case 0x302:
+ case 0x10213:
+ dev_dbg(port->dev, "This version is usart\n");
+ atmel_port->is_usart = true;
+ break;
+ case 0x203:
+ case 0x10202:
+ dev_dbg(port->dev, "This version is uart\n");
+ atmel_port->is_usart = false;
+ break;
+ default:
+ dev_err(port->dev, "Not supported ip name nor version, set to uart\n");
+ }
}
-
- return 0;
}
/*
@@ -2405,9 +2419,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
/*
* Get port name of usart or uart
*/
- ret = atmel_get_ip_name(&port->uart);
- if (ret < 0)
- goto err_add_port;
+ atmel_get_ip_name(&port->uart);
return 0;
diff --git a/drivers/tty/serial/bfin_sport_uart.c b/drivers/tty/serial/bfin_sport_uart.c
index 87636cc61a21..4f229703328b 100644
--- a/drivers/tty/serial/bfin_sport_uart.c
+++ b/drivers/tty/serial/bfin_sport_uart.c
@@ -766,9 +766,8 @@ static int sport_uart_probe(struct platform_device *pdev)
return -ENOMEM;
}
- ret = peripheral_request_list(
- (unsigned short *)dev_get_platdata(&pdev->dev),
- DRV_NAME);
+ ret = peripheral_request_list(dev_get_platdata(&pdev->dev),
+ DRV_NAME);
if (ret) {
dev_err(&pdev->dev,
"Fail to request SPORT peripherals\n");
@@ -844,8 +843,7 @@ static int sport_uart_probe(struct platform_device *pdev)
out_error_unmap:
iounmap(sport->port.membase);
out_error_free_peripherals:
- peripheral_free_list(
- (unsigned short *)dev_get_platdata(&pdev->dev));
+ peripheral_free_list(dev_get_platdata(&pdev->dev));
out_error_free_mem:
kfree(sport);
bfin_sport_uart_ports[pdev->id] = NULL;
@@ -864,8 +862,7 @@ static int sport_uart_remove(struct platform_device *pdev)
if (sport) {
uart_remove_one_port(&sport_uart_reg, &sport->port);
iounmap(sport->port.membase);
- peripheral_free_list(
- (unsigned short *)dev_get_platdata(&pdev->dev));
+ peripheral_free_list(dev_get_platdata(&pdev->dev));
kfree(sport);
bfin_sport_uart_ports[pdev->id] = NULL;
}
diff --git a/drivers/tty/serial/bfin_uart.c b/drivers/tty/serial/bfin_uart.c
index 3c75e8e04028..869ceba2ec57 100644
--- a/drivers/tty/serial/bfin_uart.c
+++ b/drivers/tty/serial/bfin_uart.c
@@ -680,7 +680,7 @@ static int bfin_serial_startup(struct uart_port *port)
default:
uart_dma_ch_rx = uart_dma_ch_tx = 0;
break;
- };
+ }
if (uart_dma_ch_rx &&
request_dma(uart_dma_ch_rx, "BFIN_UART_RX") < 0) {
@@ -726,7 +726,7 @@ static int bfin_serial_startup(struct uart_port *port)
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
if (uart->cts_pin >= 0) {
if (request_irq(uart->status_irq, bfin_serial_mctrl_cts_int,
- IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
+ 0, "BFIN_UART_MODEM_STATUS", uart)) {
uart->cts_pin = -1;
dev_info(port->dev, "Unable to attach BlackFin UART Modem Status interrupt.\n");
}
@@ -765,7 +765,7 @@ static void bfin_serial_shutdown(struct uart_port *port)
break;
default:
break;
- };
+ }
#endif
free_irq(uart->rx_irq, uart);
free_irq(uart->tx_irq, uart);
@@ -1240,7 +1240,7 @@ static int bfin_serial_probe(struct platform_device *pdev)
*/
#endif
ret = peripheral_request_list(
- (unsigned short *)dev_get_platdata(&pdev->dev),
+ dev_get_platdata(&pdev->dev),
DRIVER_NAME);
if (ret) {
dev_err(&pdev->dev,
@@ -1358,8 +1358,7 @@ static int bfin_serial_probe(struct platform_device *pdev)
out_error_unmap:
iounmap(uart->port.membase);
out_error_free_peripherals:
- peripheral_free_list(
- (unsigned short *)dev_get_platdata(&pdev->dev));
+ peripheral_free_list(dev_get_platdata(&pdev->dev));
out_error_free_mem:
kfree(uart);
bfin_serial_ports[pdev->id] = NULL;
@@ -1377,8 +1376,7 @@ static int bfin_serial_remove(struct platform_device *pdev)
if (uart) {
uart_remove_one_port(&bfin_serial_reg, &uart->port);
iounmap(uart->port.membase);
- peripheral_free_list(
- (unsigned short *)dev_get_platdata(&pdev->dev));
+ peripheral_free_list(dev_get_platdata(&pdev->dev));
kfree(uart);
bfin_serial_ports[pdev->id] = NULL;
}
@@ -1432,8 +1430,8 @@ static int bfin_earlyprintk_probe(struct platform_device *pdev)
return -ENOENT;
}
- ret = peripheral_request_list(
- (unsigned short *)dev_get_platdata(&pdev->dev), DRIVER_NAME);
+ ret = peripheral_request_list(dev_get_platdata(&pdev->dev),
+ DRIVER_NAME);
if (ret) {
dev_err(&pdev->dev,
"fail to request bfin serial peripherals\n");
@@ -1463,8 +1461,7 @@ static int bfin_earlyprintk_probe(struct platform_device *pdev)
return 0;
out_error_free_peripherals:
- peripheral_free_list(
- (unsigned short *)dev_get_platdata(&pdev->dev));
+ peripheral_free_list(dev_get_platdata(&pdev->dev));
return ret;
}
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index 7e4e4088471c..8d0b994357c8 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -459,7 +459,6 @@ static int uart_clps711x_probe(struct platform_device *pdev)
ret = uart_register_driver(&s->uart);
if (ret) {
dev_err(&pdev->dev, "Registering UART driver failed\n");
- devm_clk_put(&pdev->dev, s->uart_clk);
return ret;
}
@@ -487,7 +486,6 @@ static int uart_clps711x_remove(struct platform_device *pdev)
for (i = 0; i < UART_CLPS711X_NR; i++)
uart_remove_one_port(&s->uart, &s->port[i]);
- devm_clk_put(&pdev->dev, s->uart_clk);
uart_unregister_driver(&s->uart);
return 0;
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 1a535f70dc41..7d76214612c7 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -41,6 +41,8 @@
#include <linux/bootmem.h>
#include <linux/dma-mapping.h>
#include <linux/fs_uart_pd.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
@@ -1207,7 +1209,7 @@ static int cpm_uart_init_port(struct device_node *np,
pinfo->port.fifosize = pinfo->tx_nrfifos * pinfo->tx_fifosize;
spin_lock_init(&pinfo->port.lock);
- pinfo->port.irq = of_irq_to_resource(np, 0, NULL);
+ pinfo->port.irq = irq_of_parse_and_map(np, 0);
if (pinfo->port.irq == NO_IRQ) {
ret = -EINVAL;
goto out_pram;
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
index 18f79575894a..527a969b0952 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
@@ -45,6 +45,7 @@
#include <linux/kernel.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include "cpm_uart.h"
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index af286e6713eb..590390970996 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1008,7 +1008,7 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
return -ENODEV;
}
- pl_data = (struct ifx_modem_platform_data *)dev_get_platdata(&spi->dev);
+ pl_data = dev_get_platdata(&spi->dev);
if (!pl_data) {
dev_err(&spi->dev, "missing platform data!");
return -ENODEV;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 042aa077b5b3..b2cfdb661947 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -223,8 +223,7 @@ struct imx_port {
struct dma_chan *dma_chan_rx, *dma_chan_tx;
struct scatterlist rx_sgl, tx_sgl[2];
void *rx_buf;
- unsigned int rx_bytes, tx_bytes;
- struct work_struct tsk_dma_rx, tsk_dma_tx;
+ unsigned int tx_bytes;
unsigned int dma_tx_nents;
wait_queue_head_t dma_wait;
};
@@ -505,34 +504,25 @@ static void dma_tx_callback(void *data)
dev_dbg(sport->port.dev, "exit in %s.\n", __func__);
return;
}
-
- schedule_work(&sport->tsk_dma_tx);
}
-static void dma_tx_work(struct work_struct *w)
+static void imx_dma_tx(struct imx_port *sport)
{
- struct imx_port *sport = container_of(w, struct imx_port, tsk_dma_tx);
struct circ_buf *xmit = &sport->port.state->xmit;
struct scatterlist *sgl = sport->tx_sgl;
struct dma_async_tx_descriptor *desc;
struct dma_chan *chan = sport->dma_chan_tx;
struct device *dev = sport->port.dev;
enum dma_status status;
- unsigned long flags;
int ret;
- status = chan->device->device_tx_status(chan, (dma_cookie_t)0, NULL);
+ status = dmaengine_tx_status(chan, (dma_cookie_t)0, NULL);
if (DMA_IN_PROGRESS == status)
return;
- spin_lock_irqsave(&sport->port.lock, flags);
sport->tx_bytes = uart_circ_chars_pending(xmit);
- if (sport->tx_bytes == 0) {
- spin_unlock_irqrestore(&sport->port.lock, flags);
- return;
- }
- if (xmit->tail > xmit->head) {
+ if (xmit->tail > xmit->head && xmit->head > 0) {
sport->dma_tx_nents = 2;
sg_init_table(sgl, 2);
sg_set_buf(sgl, xmit->buf + xmit->tail,
@@ -542,7 +532,6 @@ static void dma_tx_work(struct work_struct *w)
sport->dma_tx_nents = 1;
sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
}
- spin_unlock_irqrestore(&sport->port.lock, flags);
ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
if (ret == 0) {
@@ -609,11 +598,7 @@ static void imx_start_tx(struct uart_port *port)
}
if (sport->dma_is_enabled) {
- /*
- * We may in the interrupt context, so arise a work_struct to
- * do the real job.
- */
- schedule_work(&sport->tsk_dma_tx);
+ imx_dma_tx(sport);
return;
}
@@ -732,6 +717,7 @@ out:
return IRQ_HANDLED;
}
+static int start_rx_dma(struct imx_port *sport);
/*
* If the RXFIFO is filled with some data, and then we
* arise a DMA operation to receive them.
@@ -750,7 +736,7 @@ static void imx_dma_rxint(struct imx_port *sport)
writel(temp, sport->port.membase + UCR1);
/* tell the DMA to receive the data. */
- schedule_work(&sport->tsk_dma_rx);
+ start_rx_dma(sport);
}
}
@@ -795,8 +781,15 @@ static irqreturn_t imx_int(int irq, void *dev_id)
static unsigned int imx_tx_empty(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
+ unsigned int ret;
+
+ ret = (readl(sport->port.membase + USR2) & USR2_TXDC) ? TIOCSER_TEMT : 0;
- return (readl(sport->port.membase + USR2) & USR2_TXDC) ? TIOCSER_TEMT : 0;
+ /* If the TX DMA is working, return 0. */
+ if (sport->dma_is_enabled && sport->dma_is_txing)
+ ret = 0;
+
+ return ret;
}
/*
@@ -865,22 +858,6 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
}
#define RX_BUF_SIZE (PAGE_SIZE)
-static int start_rx_dma(struct imx_port *sport);
-static void dma_rx_work(struct work_struct *w)
-{
- struct imx_port *sport = container_of(w, struct imx_port, tsk_dma_rx);
- struct tty_port *port = &sport->port.state->port;
-
- if (sport->rx_bytes) {
- tty_insert_flip_string(port, sport->rx_buf, sport->rx_bytes);
- tty_flip_buffer_push(port);
- sport->rx_bytes = 0;
- }
-
- if (sport->dma_is_rxing)
- start_rx_dma(sport);
-}
-
static void imx_rx_dma_done(struct imx_port *sport)
{
unsigned long temp;
@@ -912,6 +889,7 @@ static void dma_rx_callback(void *data)
struct imx_port *sport = data;
struct dma_chan *chan = sport->dma_chan_rx;
struct scatterlist *sgl = &sport->rx_sgl;
+ struct tty_port *port = &sport->port.state->port;
struct dma_tx_state state;
enum dma_status status;
unsigned int count;
@@ -919,13 +897,15 @@ static void dma_rx_callback(void *data)
/* unmap it first */
dma_unmap_sg(sport->port.dev, sgl, 1, DMA_FROM_DEVICE);
- status = chan->device->device_tx_status(chan, (dma_cookie_t)0, &state);
+ status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state);
count = RX_BUF_SIZE - state.residue;
dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
if (count) {
- sport->rx_bytes = count;
- schedule_work(&sport->tsk_dma_rx);
+ tty_insert_flip_string(port, sport->rx_buf, count);
+ tty_flip_buffer_push(port);
+
+ start_rx_dma(sport);
} else
imx_rx_dma_done(sport);
}
@@ -1007,7 +987,6 @@ static int imx_uart_dma_init(struct imx_port *sport)
ret = -ENOMEM;
goto err;
}
- sport->rx_bytes = 0;
/* Prepare for TX : */
sport->dma_chan_tx = dma_request_slave_channel(dev, "tx");
@@ -1038,11 +1017,7 @@ err:
static void imx_enable_dma(struct imx_port *sport)
{
unsigned long temp;
- struct tty_port *port = &sport->port.state->port;
- port->low_latency = 1;
- INIT_WORK(&sport->tsk_dma_tx, dma_tx_work);
- INIT_WORK(&sport->tsk_dma_rx, dma_rx_work);
init_waitqueue_head(&sport->dma_wait);
/* set UCR1 */
@@ -1063,7 +1038,6 @@ static void imx_enable_dma(struct imx_port *sport)
static void imx_disable_dma(struct imx_port *sport)
{
unsigned long temp;
- struct tty_port *port = &sport->port.state->port;
/* clear UCR1 */
temp = readl(sport->port.membase + UCR1);
@@ -1081,7 +1055,6 @@ static void imx_disable_dma(struct imx_port *sport)
writel(temp, sport->port.membase + UCR4);
sport->dma_is_enabled = 0;
- port->low_latency = 0;
}
/* half the RX buffer size */
@@ -1303,6 +1276,16 @@ static void imx_shutdown(struct uart_port *port)
clk_disable_unprepare(sport->clk_ipg);
}
+static void imx_flush_buffer(struct uart_port *port)
+{
+ struct imx_port *sport = (struct imx_port *)port;
+
+ if (sport->dma_is_enabled) {
+ sport->tx_bytes = 0;
+ dmaengine_terminate_all(sport->dma_chan_tx);
+ }
+}
+
static void
imx_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
@@ -1539,7 +1522,7 @@ imx_verify_port(struct uart_port *port, struct serial_struct *ser)
ret = -EINVAL;
if (sport->port.uartclk / 16 != ser->baud_base)
ret = -EINVAL;
- if ((void *)sport->port.mapbase != ser->iomem_base)
+ if (sport->port.mapbase != (unsigned long)ser->iomem_base)
ret = -EINVAL;
if (sport->port.iobase != ser->port)
ret = -EINVAL;
@@ -1623,6 +1606,7 @@ static struct uart_ops imx_pops = {
.break_ctl = imx_break_ctl,
.startup = imx_startup,
.shutdown = imx_shutdown,
+ .flush_buffer = imx_flush_buffer,
.set_termios = imx_set_termios,
.type = imx_type,
.release_port = imx_release_port,
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index cb3c81eb0996..1d9420548e16 100644
--- a/drivers/tty/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
@@ -832,7 +832,7 @@ ip22zilog_convert_to_zs(struct uart_ip22zilog_port *up, unsigned int cflag,
up->curregs[5] |= Tx8;
up->parity_mask = 0xff;
break;
- };
+ }
up->curregs[4] &= ~0x0c;
if (cflag & CSTOPB)
up->curregs[4] |= SB2;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index b2e707aa603a..8d71e4047bb3 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -690,7 +690,7 @@ static void max310x_handle_tx(struct uart_port *port)
max310x_port_write(port, MAX310X_THR_REG,
xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- };
+ }
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
index d3db042f649e..52c930fac210 100644
--- a/drivers/tty/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
@@ -293,7 +293,7 @@ static void serial_hsu_enable_ms(struct uart_port *port)
serial_out(up, UART_IER, up->ier);
}
-void hsu_dma_tx(struct uart_hsu_port *up)
+static void hsu_dma_tx(struct uart_hsu_port *up)
{
struct circ_buf *xmit = &up->port.state->xmit;
struct hsu_dma_buffer *dbuf = &up->txbuf;
@@ -340,7 +340,8 @@ void hsu_dma_tx(struct uart_hsu_port *up)
}
/* The buffer is already cache coherent */
-void hsu_dma_start_rx_chan(struct hsu_dma_chan *rxc, struct hsu_dma_buffer *dbuf)
+static void hsu_dma_start_rx_chan(struct hsu_dma_chan *rxc,
+ struct hsu_dma_buffer *dbuf)
{
dbuf->ofs = 0;
@@ -386,7 +387,8 @@ static void serial_hsu_stop_tx(struct uart_port *port)
/* This is always called in spinlock protected mode, so
* modify timeout timer is safe here */
-void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts, unsigned long *flags)
+static void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts,
+ unsigned long *flags)
{
struct hsu_dma_buffer *dbuf = &up->rxbuf;
struct hsu_dma_chan *chan = up->rxc;
@@ -1183,7 +1185,7 @@ static struct console serial_hsu_console = {
#define SERIAL_HSU_CONSOLE NULL
#endif
-struct uart_ops serial_hsu_pops = {
+static struct uart_ops serial_hsu_pops = {
.tx_empty = serial_hsu_tx_empty,
.set_mctrl = serial_hsu_set_mctrl,
.get_mctrl = serial_hsu_get_mctrl,
@@ -1451,7 +1453,6 @@ static void serial_hsu_remove(struct pci_dev *pdev)
uart_remove_one_port(&serial_hsu_reg, &up->port);
}
- pci_set_drvdata(pdev, NULL);
free_irq(pdev->irq, priv);
pci_disable_device(pdev);
}
@@ -1504,4 +1505,4 @@ module_init(hsu_pci_init);
module_exit(hsu_pci_exit);
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:medfield-hsu");
+MODULE_DEVICE_TABLE(pci, pci_ids);
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 5be1df39f9f5..ec06505e3ae6 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -1301,7 +1301,6 @@ static struct uart_ops mpc52xx_uart_ops = {
.shutdown = mpc52xx_uart_shutdown,
.set_termios = mpc52xx_uart_set_termios,
/* .pm = mpc52xx_uart_pm, Not supported yet */
-/* .set_wake = mpc52xx_uart_set_wake, Not supported yet */
.type = mpc52xx_uart_type,
.release_port = mpc52xx_uart_release_port,
.request_port = mpc52xx_uart_request_port,
@@ -1766,7 +1765,7 @@ mpc52xx_uart_of_remove(struct platform_device *op)
static int
mpc52xx_uart_of_suspend(struct platform_device *op, pm_message_t state)
{
- struct uart_port *port = (struct uart_port *) platform_get_drvdata(op);
+ struct uart_port *port = platform_get_drvdata(op);
if (port)
uart_suspend_port(&mpc52xx_uart_driver, port);
@@ -1777,7 +1776,7 @@ mpc52xx_uart_of_suspend(struct platform_device *op, pm_message_t state)
static int
mpc52xx_uart_of_resume(struct platform_device *op)
{
- struct uart_port *port = (struct uart_port *) platform_get_drvdata(op);
+ struct uart_port *port = platform_get_drvdata(op);
if (port)
uart_resume_port(&mpc52xx_uart_driver, port);
diff --git a/drivers/tty/serial/mpsc.c b/drivers/tty/serial/mpsc.c
index 8d702677acc5..e30a3ca3cea3 100644
--- a/drivers/tty/serial/mpsc.c
+++ b/drivers/tty/serial/mpsc.c
@@ -2030,7 +2030,7 @@ static void mpsc_drv_get_platform_data(struct mpsc_port_info *pi,
{
struct mpsc_pdata *pdata;
- pdata = (struct mpsc_pdata *)dev_get_platdata(&pd->dev);
+ pdata = dev_get_platdata(&pd->dev);
pi->port.uartclk = pdata->brg_clk_freq;
pi->port.iotype = UPIO_MEM;
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index a67e7081f001..db0448ae59dc 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -43,6 +43,7 @@
#include <linux/kthread.h>
#include <linux/spi/spi.h>
+#include <linux/pm.h>
#include "mrst_max3110.h"
@@ -61,6 +62,7 @@ struct uart_max3110 {
struct task_struct *main_thread;
struct task_struct *read_thread;
struct mutex thread_mutex;
+ struct mutex io_mutex;
u32 baud;
u16 cur_conf;
@@ -90,6 +92,7 @@ static int max3110_write_then_read(struct uart_max3110 *max,
struct spi_transfer x;
int ret;
+ mutex_lock(&max->io_mutex);
spi_message_init(&message);
memset(&x, 0, sizeof x);
x.len = len;
@@ -104,6 +107,7 @@ static int max3110_write_then_read(struct uart_max3110 *max,
/* Do the i/o */
ret = spi_sync(spi, &message);
+ mutex_unlock(&max->io_mutex);
return ret;
}
@@ -491,19 +495,9 @@ static int serial_m3110_startup(struct uart_port *port)
port->state->port.low_latency = 1;
if (max->irq) {
- max->read_thread = NULL;
- ret = request_irq(max->irq, serial_m3110_irq,
- IRQ_TYPE_EDGE_FALLING, "max3110", max);
- if (ret) {
- max->irq = 0;
- pr_err(PR_FMT "unable to allocate IRQ, polling\n");
- } else {
- /* Enable RX IRQ only */
- config |= WC_RXA_IRQ_ENABLE;
- }
- }
-
- if (max->irq == 0) {
+ /* Enable RX IRQ only */
+ config |= WC_RXA_IRQ_ENABLE;
+ } else {
/* If IRQ is disabled, start a read thread for input data */
max->read_thread =
kthread_run(max3110_read_thread, max, "max3110_read");
@@ -517,8 +511,6 @@ static int serial_m3110_startup(struct uart_port *port)
ret = max3110_out(max, config);
if (ret) {
- if (max->irq)
- free_irq(max->irq, max);
if (max->read_thread)
kthread_stop(max->read_thread);
max->read_thread = NULL;
@@ -540,9 +532,6 @@ static void serial_m3110_shutdown(struct uart_port *port)
max->read_thread = NULL;
}
- if (max->irq)
- free_irq(max->irq, max);
-
/* Disable interrupts from this port */
config = WC_TAG | WC_SW_SHDI;
max3110_out(max, config);
@@ -749,7 +738,8 @@ static int serial_m3110_suspend(struct device *dev)
struct spi_device *spi = to_spi_device(dev);
struct uart_max3110 *max = spi_get_drvdata(spi);
- disable_irq(max->irq);
+ if (max->irq > 0)
+ disable_irq(max->irq);
uart_suspend_port(&serial_m3110_reg, &max->port);
max3110_out(max, max->cur_conf | WC_SW_SHDI);
return 0;
@@ -762,7 +752,8 @@ static int serial_m3110_resume(struct device *dev)
max3110_out(max, max->cur_conf);
uart_resume_port(&serial_m3110_reg, &max->port);
- enable_irq(max->irq);
+ if (max->irq > 0)
+ enable_irq(max->irq);
return 0;
}
@@ -803,6 +794,7 @@ static int serial_m3110_probe(struct spi_device *spi)
max->irq = (u16)spi->irq;
mutex_init(&max->thread_mutex);
+ mutex_init(&max->io_mutex);
max->word_7bits = 0;
max->parity = 0;
@@ -840,6 +832,16 @@ static int serial_m3110_probe(struct spi_device *spi)
goto err_kthread;
}
+ if (max->irq) {
+ ret = request_irq(max->irq, serial_m3110_irq,
+ IRQ_TYPE_EDGE_FALLING, "max3110", max);
+ if (ret) {
+ max->irq = 0;
+ dev_warn(&spi->dev,
+ "unable to allocate IRQ, will use polling method\n");
+ }
+ }
+
spi_set_drvdata(spi, max);
pmax = max;
@@ -867,6 +869,9 @@ static int serial_m3110_remove(struct spi_device *dev)
free_page((unsigned long)max->con_xmit.buf);
+ if (max->irq)
+ free_irq(max->irq, max);
+
if (max->main_thread)
kthread_stop(max->main_thread);
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 10e9d70b5c40..d8b6fee77a03 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -39,6 +39,7 @@
#include <asm/cacheflush.h>
#define MXS_AUART_PORTS 5
+#define MXS_AUART_FIFO_SIZE 16
#define AUART_CTRL0 0x00000000
#define AUART_CTRL0_SET 0x00000004
@@ -548,6 +549,9 @@ static int mxs_auart_dma_init(struct mxs_auart_port *s)
s->flags |= MXS_AUART_DMA_ENABLED;
dev_dbg(s->dev, "enabled the DMA support.");
+ /* The DMA buffer is now the FIFO the TTY subsystem can use */
+ s->port.fifosize = UART_XMIT_SIZE;
+
return 0;
err_out:
@@ -741,6 +745,9 @@ static int mxs_auart_startup(struct uart_port *u)
writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN,
u->membase + AUART_INTR);
+ /* Reset FIFO size (it could have changed if DMA was enabled) */
+ u->fifosize = MXS_AUART_FIFO_SIZE;
+
/*
* Enable fifo so all four bytes of a DMA word are written to
* output (otherwise, only the LSB is written, ie. 1 in 4 bytes)
@@ -1056,7 +1063,7 @@ static int mxs_auart_probe(struct platform_device *pdev)
s->port.membase = ioremap(r->start, resource_size(r));
s->port.ops = &mxs_auart_ops;
s->port.iotype = UPIO_MEM;
- s->port.fifosize = 16;
+ s->port.fifosize = MXS_AUART_FIFO_SIZE;
s->port.uartclk = clk_get_rate(s->clk);
s->port.type = PORT_IMX;
s->port.dev = s->dev = &pdev->dev;
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 816d1a23f9d0..fa511ebab67c 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -39,6 +39,7 @@
#include <linux/irq.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/platform_data/serial-omap.h>
@@ -134,6 +135,7 @@ struct uart_omap_port {
struct uart_port port;
struct uart_omap_dma uart_dma;
struct device *dev;
+ int wakeirq;
unsigned char ier;
unsigned char lcr;
@@ -175,7 +177,7 @@ struct uart_omap_port {
bool is_suspending;
};
-#define to_uart_omap_port(p) ((container_of((p), struct uart_omap_port, port)))
+#define to_uart_omap_port(p) ((container_of((p), struct uart_omap_port, port)))
static struct uart_omap_port *ui[OMAP_MAX_HSUART_PORTS];
@@ -214,10 +216,23 @@ static int serial_omap_get_context_loss_count(struct uart_omap_port *up)
return pdata->get_context_loss_count(up->dev);
}
+static inline void serial_omap_enable_wakeirq(struct uart_omap_port *up,
+ bool enable)
+{
+ if (!up->wakeirq)
+ return;
+
+ if (enable)
+ enable_irq(up->wakeirq);
+ else
+ disable_irq(up->wakeirq);
+}
+
static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable)
{
struct omap_uart_port_info *pdata = dev_get_platdata(up->dev);
+ serial_omap_enable_wakeirq(up, enable);
if (!pdata || !pdata->enable_wakeup)
return;
@@ -242,12 +257,12 @@ serial_omap_baud_is_mode16(struct uart_port *port, unsigned int baud)
unsigned int n16 = port->uartclk / (16 * baud);
int baudAbsDiff13 = baud - (port->uartclk / (13 * n13));
int baudAbsDiff16 = baud - (port->uartclk / (16 * n16));
- if(baudAbsDiff13 < 0)
+ if (baudAbsDiff13 < 0)
baudAbsDiff13 = -baudAbsDiff13;
- if(baudAbsDiff16 < 0)
+ if (baudAbsDiff16 < 0)
baudAbsDiff16 = -baudAbsDiff16;
- return (baudAbsDiff13 > baudAbsDiff16);
+ return (baudAbsDiff13 >= baudAbsDiff16);
}
/*
@@ -258,13 +273,13 @@ serial_omap_baud_is_mode16(struct uart_port *port, unsigned int baud)
static unsigned int
serial_omap_get_divisor(struct uart_port *port, unsigned int baud)
{
- unsigned int divisor;
+ unsigned int mode;
if (!serial_omap_baud_is_mode16(port, baud))
- divisor = 13;
+ mode = 13;
else
- divisor = 16;
- return port->uartclk/(baud * divisor);
+ mode = 16;
+ return port->uartclk/(mode * baud);
}
static void serial_omap_enable_ms(struct uart_port *port)
@@ -283,28 +298,40 @@ static void serial_omap_enable_ms(struct uart_port *port)
static void serial_omap_stop_tx(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
- struct circ_buf *xmit = &up->port.state->xmit;
int res;
pm_runtime_get_sync(up->dev);
- /* handle rs485 */
+ /* Handle RS-485 */
if (up->rs485.flags & SER_RS485_ENABLED) {
- /* do nothing if current tx not yet completed */
- res = serial_in(up, UART_LSR) & UART_LSR_TEMT;
- if (!res)
- return;
-
- /* if there's no more data to send, turn off rts */
- if (uart_circ_empty(xmit)) {
- /* if rts not already disabled */
+ if (up->scr & OMAP_UART_SCR_TX_EMPTY) {
+ /* THR interrupt is fired when both TX FIFO and TX
+ * shift register are empty. This means there's nothing
+ * left to transmit now, so make sure the THR interrupt
+ * is fired when TX FIFO is below the trigger level,
+ * disable THR interrupts and toggle the RS-485 GPIO
+ * data direction pin if needed.
+ */
+ up->scr &= ~OMAP_UART_SCR_TX_EMPTY;
+ serial_out(up, UART_OMAP_SCR, up->scr);
res = (up->rs485.flags & SER_RS485_RTS_AFTER_SEND) ? 1 : 0;
if (gpio_get_value(up->rts_gpio) != res) {
- if (up->rs485.delay_rts_after_send > 0) {
+ if (up->rs485.delay_rts_after_send > 0)
mdelay(up->rs485.delay_rts_after_send);
- }
gpio_set_value(up->rts_gpio, res);
}
+ } else {
+ /* We're asked to stop, but there's still stuff in the
+ * UART FIFO, so make sure the THR interrupt is fired
+ * when both TX FIFO and TX shift register are empty.
+ * The next THR interrupt (if no transmission is started
+ * in the meantime) will indicate the end of a
+ * transmission. Therefore we _don't_ disable THR
+ * interrupts in this situation.
+ */
+ up->scr |= OMAP_UART_SCR_TX_EMPTY;
+ serial_out(up, UART_OMAP_SCR, up->scr);
+ return;
}
}
@@ -384,15 +411,18 @@ static void serial_omap_start_tx(struct uart_port *port)
pm_runtime_get_sync(up->dev);
- /* handle rs485 */
+ /* Handle RS-485 */
if (up->rs485.flags & SER_RS485_ENABLED) {
+ /* Fire THR interrupts when FIFO is below trigger level */
+ up->scr &= ~OMAP_UART_SCR_TX_EMPTY;
+ serial_out(up, UART_OMAP_SCR, up->scr);
+
/* if rts not already enabled */
res = (up->rs485.flags & SER_RS485_RTS_ON_SEND) ? 1 : 0;
if (gpio_get_value(up->rts_gpio) != res) {
gpio_set_value(up->rts_gpio, res);
- if (up->rs485.delay_rts_before_send > 0) {
+ if (up->rs485.delay_rts_before_send > 0)
mdelay(up->rs485.delay_rts_before_send);
- }
}
}
@@ -699,6 +729,20 @@ static int serial_omap_startup(struct uart_port *port)
if (retval)
return retval;
+ /* Optional wake-up IRQ */
+ if (up->wakeirq) {
+ retval = request_irq(up->wakeirq, serial_omap_irq,
+ up->port.irqflags, up->name, up);
+ if (retval) {
+ free_irq(up->port.irq, up);
+ return retval;
+ }
+ disable_irq(up->wakeirq);
+ } else {
+ dev_info(up->port.dev, "no wakeirq for uart%d\n",
+ up->port.line);
+ }
+
dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line);
pm_runtime_get_sync(up->dev);
@@ -787,6 +831,8 @@ static void serial_omap_shutdown(struct uart_port *port)
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
free_irq(up->port.irq, up);
+ if (up->wakeirq)
+ free_irq(up->wakeirq, up);
}
static void serial_omap_uart_qos_work(struct work_struct *work)
@@ -938,7 +984,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
*/
/* Set receive FIFO threshold to 16 characters and
- * transmit FIFO threshold to 16 spaces
+ * transmit FIFO threshold to 32 spaces
*/
up->fcr &= ~OMAP_UART_FCR_RX_FIFO_TRIG_MASK;
up->fcr &= ~OMAP_UART_FCR_TX_FIFO_TRIG_MASK;
@@ -1060,15 +1106,6 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->port.line);
}
-static int serial_omap_set_wake(struct uart_port *port, unsigned int state)
-{
- struct uart_omap_port *up = to_uart_omap_port(port);
-
- serial_omap_enable_wakeup(up, state);
-
- return 0;
-}
-
static void
serial_omap_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
@@ -1353,6 +1390,15 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
up->ier = mode;
serial_out(up, UART_IER, up->ier);
+ /* If RS-485 is disabled, make sure the THR interrupt is fired when
+ * TX FIFO is below the trigger level.
+ */
+ if (!(up->rs485.flags & SER_RS485_ENABLED) &&
+ (up->scr & OMAP_UART_SCR_TX_EMPTY)) {
+ up->scr &= ~OMAP_UART_SCR_TX_EMPTY;
+ serial_out(up, UART_OMAP_SCR, up->scr);
+ }
+
spin_unlock_irqrestore(&up->port.lock, flags);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
@@ -1401,7 +1447,6 @@ static struct uart_ops serial_omap_pops = {
.shutdown = serial_omap_shutdown,
.set_termios = serial_omap_set_termios,
.pm = serial_omap_pm,
- .set_wake = serial_omap_set_wake,
.type = serial_omap_type,
.release_port = serial_omap_release_port,
.request_port = serial_omap_request_port,
@@ -1582,11 +1627,23 @@ static int serial_omap_probe(struct platform_device *pdev)
struct uart_omap_port *up;
struct resource *mem, *irq;
struct omap_uart_port_info *omap_up_info = dev_get_platdata(&pdev->dev);
- int ret;
+ int ret, uartirq = 0, wakeirq = 0;
+ /* The optional wakeirq may be specified in the board dts file */
if (pdev->dev.of_node) {
+ uartirq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (!uartirq)
+ return -EPROBE_DEFER;
+ wakeirq = irq_of_parse_and_map(pdev->dev.of_node, 1);
omap_up_info = of_get_uart_port_info(&pdev->dev);
pdev->dev.platform_data = omap_up_info;
+ } else {
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return -ENODEV;
+ }
+ uartirq = irq->start;
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1595,12 +1652,6 @@ static int serial_omap_probe(struct platform_device *pdev)
return -ENODEV;
}
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq) {
- dev_err(&pdev->dev, "no irq resource?\n");
- return -ENODEV;
- }
-
if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
pdev->dev.driver->name)) {
dev_err(&pdev->dev, "memory region already claimed\n");
@@ -1634,7 +1685,8 @@ static int serial_omap_probe(struct platform_device *pdev)
up->port.dev = &pdev->dev;
up->port.type = PORT_OMAP;
up->port.iotype = UPIO_MEM;
- up->port.irq = irq->start;
+ up->port.irq = uartirq;
+ up->wakeirq = wakeirq;
up->port.regshift = 2;
up->port.fifosize = 64;
@@ -1670,8 +1722,9 @@ static int serial_omap_probe(struct platform_device *pdev)
up->port.uartclk = omap_up_info->uartclk;
if (!up->port.uartclk) {
up->port.uartclk = DEFAULT_CLK_SPEED;
- dev_warn(&pdev->dev, "No clock speed specified: using default:"
- "%d\n", DEFAULT_CLK_SPEED);
+ dev_warn(&pdev->dev,
+ "No clock speed specified: using default: %d\n",
+ DEFAULT_CLK_SPEED);
}
up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 44077c0b7670..0aa2b528ef3d 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1614,7 +1614,6 @@ static struct uart_ops pch_uart_ops = {
.shutdown = pch_uart_shutdown,
.set_termios = pch_uart_set_termios,
/* .pm = pch_uart_pm, Not supported yet */
-/* .set_wake = pch_uart_set_wake, Not supported yet */
.type = pch_uart_type,
.release_port = pch_uart_release_port,
.request_port = pch_uart_request_port,
@@ -1996,6 +1995,8 @@ module_exit(pch_uart_module_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Intel EG20T PCH UART PCI Driver");
+MODULE_DEVICE_TABLE(pci, pch_uart_pci_id);
+
module_param(default_baud, uint, S_IRUGO);
MODULE_PARM_DESC(default_baud,
"Default BAUD for initial driver state and console (default 9600)");
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index f87f1a0c8c6e..481b781b26e3 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -57,6 +57,8 @@
#include <linux/bitops.h>
#include <linux/sysrq.h>
#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -1072,7 +1074,7 @@ static void pmz_convert_to_zs(struct uart_pmac_port *uap, unsigned int cflag,
uap->curregs[5] |= Tx8;
uap->parity_mask = 0xff;
break;
- };
+ }
uap->curregs[4] &= ~(SB_MASK);
if (cflag & CSTOPB)
uap->curregs[4] |= SB2;
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index ba25722a7131..753d4525b367 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -647,7 +647,10 @@ void sa1100_register_uart_fns(struct sa1100_port_fns *fns)
sa1100_pops.set_mctrl = fns->set_mctrl;
sa1100_pops.pm = fns->pm;
- sa1100_pops.set_wake = fns->set_wake;
+ /*
+ * FIXME: fns->set_wake is unused - this should be called from
+ * the suspend() callback if device_may_wakeup(dev)) is set.
+ */
}
void __init sa1100_register_uart(int idx, int port)
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index f3dfa19a1cb8..c1af04d46682 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -407,7 +407,14 @@ static unsigned int s3c24xx_serial_get_mctrl(struct uart_port *port)
static void s3c24xx_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- /* todo - possibly remove AFC and do manual CTS */
+ unsigned int umcon = rd_regl(port, S3C2410_UMCON);
+
+ if (mctrl & TIOCM_RTS)
+ umcon |= S3C2410_UMCOM_RTS_LOW;
+ else
+ umcon &= ~S3C2410_UMCOM_RTS_LOW;
+
+ wr_regl(port, S3C2410_UMCON, umcon);
}
static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state)
@@ -774,8 +781,6 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
if (termios->c_cflag & CSTOPB)
ulcon |= S3C2410_LCON_STOPB;
- umcon = (termios->c_cflag & CRTSCTS) ? S3C2410_UMCOM_AFC : 0;
-
if (termios->c_cflag & PARENB) {
if (termios->c_cflag & PARODD)
ulcon |= S3C2410_LCON_PODD;
@@ -792,6 +797,15 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
wr_regl(port, S3C2410_ULCON, ulcon);
wr_regl(port, S3C2410_UBRDIV, quot);
+
+ umcon = rd_regl(port, S3C2410_UMCON);
+ if (termios->c_cflag & CRTSCTS) {
+ umcon |= S3C2410_UMCOM_AFC;
+ /* Disable RTS when RX FIFO contains 63 bytes */
+ umcon &= ~S3C2412_UMCON_AFC_8;
+ } else {
+ umcon &= ~S3C2410_UMCOM_AFC;
+ }
wr_regl(port, S3C2410_UMCON, umcon);
if (ourport->info->has_divslot)
@@ -1254,7 +1268,7 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
ourport->baudclk = ERR_PTR(-EINVAL);
ourport->info = ourport->drv_data->info;
ourport->cfg = (dev_get_platdata(&pdev->dev)) ?
- (struct s3c2410_uartcfg *)dev_get_platdata(&pdev->dev) :
+ dev_get_platdata(&pdev->dev) :
ourport->drv_data->def_cfg;
ourport->port.fifosize = (ourport->info->fifosize) ?
diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
index aaa617a6c499..8827e5424cef 100644
--- a/drivers/tty/serial/samsung.h
+++ b/drivers/tty/serial/samsung.h
@@ -63,7 +63,7 @@ struct s3c24xx_uart_port {
/* conversion functions */
-#define s3c24xx_dev_to_port(__dev) (struct uart_port *)dev_get_drvdata(__dev)
+#define s3c24xx_dev_to_port(__dev) dev_get_drvdata(__dev)
/* register access controls */
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index 49e9bbfe6cab..a447f71538ef 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -986,6 +986,7 @@ static int sccnxp_probe(struct platform_device *pdev)
return 0;
}
+ uart_unregister_driver(&s->uart);
err_out:
if (!IS_ERR(s->regulator))
return regulator_disable(s->regulator);
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index 0489a2bdcdf9..dfe79ccc4fb3 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -1018,7 +1018,7 @@ static int tegra_uart_startup(struct uart_port *u)
goto fail_hw_init;
}
- ret = request_irq(u->irq, tegra_uart_isr, IRQF_DISABLED,
+ ret = request_irq(u->irq, tegra_uart_isr, 0,
dev_name(u->dev), tup);
if (ret < 0) {
dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index 440a962412da..90a080b1f9ee 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -1220,8 +1220,6 @@ static void pciserial_txx9_remove_one(struct pci_dev *dev)
{
struct uart_txx9_port *up = pci_get_drvdata(dev);
- pci_set_drvdata(dev, NULL);
-
if (up) {
serial_txx9_unregister_port(up->port.line);
pci_disable_device(dev);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 537750261aaa..e7e9cabb21fd 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -52,6 +52,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/gpio.h>
+#include <linux/of.h>
#ifdef CONFIG_SUPERH
#include <asm/sh_bios.h>
@@ -1433,7 +1434,7 @@ static void work_fn_rx(struct work_struct *work)
desc = s->desc_rx[new];
if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
/* Handle incomplete DMA receive */
struct dma_chan *chan = s->chan_rx;
struct shdma_desc *sh_desc = container_of(desc,
@@ -2437,6 +2438,112 @@ static int sci_remove(struct platform_device *dev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id of_sci_match[] = {
+ { .compatible = "renesas,sci-SCI-uart",
+ .data = (void *)PORT_SCI },
+ { .compatible = "renesas,sci-SCIF-uart",
+ .data = (void *)PORT_SCIF },
+ { .compatible = "renesas,sci-IRDA-uart",
+ .data = (void *)PORT_IRDA },
+ { .compatible = "renesas,sci-SCIFA-uart",
+ .data = (void *)PORT_SCIFA },
+ { .compatible = "renesas,sci-SCIFB-uart",
+ .data = (void *)PORT_SCIFB },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_sci_match);
+
+static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
+ int *dev_id)
+{
+ struct plat_sci_port *p;
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match;
+ struct resource *res;
+ const __be32 *prop;
+ int i, irq, val;
+
+ match = of_match_node(of_sci_match, pdev->dev.of_node);
+ if (!match || !match->data) {
+ dev_err(&pdev->dev, "OF match error\n");
+ return NULL;
+ }
+
+ p = devm_kzalloc(&pdev->dev, sizeof(struct plat_sci_port), GFP_KERNEL);
+ if (!p) {
+ dev_err(&pdev->dev, "failed to allocate DT config data\n");
+ return NULL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get I/O memory\n");
+ return NULL;
+ }
+ p->mapbase = res->start;
+
+ for (i = 0; i < SCIx_NR_IRQS; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq data %d\n", i);
+ return NULL;
+ }
+ p->irqs[i] = irq;
+ }
+
+ prop = of_get_property(np, "cell-index", NULL);
+ if (!prop) {
+ dev_err(&pdev->dev, "required DT prop cell-index missing\n");
+ return NULL;
+ }
+ *dev_id = be32_to_cpup(prop);
+
+ prop = of_get_property(np, "renesas,scscr", NULL);
+ if (!prop) {
+ dev_err(&pdev->dev, "required DT prop scscr missing\n");
+ return NULL;
+ }
+ p->scscr = be32_to_cpup(prop);
+
+ prop = of_get_property(np, "renesas,scbrr-algo-id", NULL);
+ if (!prop) {
+ dev_err(&pdev->dev, "required DT prop scbrr-algo-id missing\n");
+ return NULL;
+ }
+ val = be32_to_cpup(prop);
+ if (val <= SCBRR_ALGO_INVALID || val >= SCBRR_NR_ALGOS) {
+ dev_err(&pdev->dev, "DT prop scbrr-algo-id out of range\n");
+ return NULL;
+ }
+ p->scbrr_algo_id = val;
+
+ p->flags = UPF_IOREMAP;
+ if (of_get_property(np, "renesas,autoconf", NULL))
+ p->flags |= UPF_BOOT_AUTOCONF;
+
+ prop = of_get_property(np, "renesas,regtype", NULL);
+ if (prop) {
+ val = be32_to_cpup(prop);
+ if (val < SCIx_PROBE_REGTYPE || val >= SCIx_NR_REGTYPES) {
+ dev_err(&pdev->dev, "DT prop regtype out of range\n");
+ return NULL;
+ }
+ p->regtype = val;
+ }
+
+ p->type = (unsigned int)match->data;
+
+ return p;
+}
+#else
+static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
+ int *dev_id)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
static int sci_probe_single(struct platform_device *dev,
unsigned int index,
struct plat_sci_port *p,
@@ -2469,9 +2576,9 @@ static int sci_probe_single(struct platform_device *dev,
static int sci_probe(struct platform_device *dev)
{
- struct plat_sci_port *p = dev_get_platdata(&dev->dev);
- struct sci_port *sp = &sci_ports[dev->id];
- int ret;
+ struct plat_sci_port *p;
+ struct sci_port *sp;
+ int ret, dev_id = dev->id;
/*
* If we've come here via earlyprintk initialization, head off to
@@ -2481,9 +2588,20 @@ static int sci_probe(struct platform_device *dev)
if (is_early_platform_device(dev))
return sci_probe_earlyprintk(dev);
+ if (dev->dev.of_node)
+ p = sci_parse_dt(dev, &dev_id);
+ else
+ p = dev_get_platdata(&dev->dev);
+
+ if (!p) {
+ dev_err(&dev->dev, "no setup data supplied\n");
+ return -EINVAL;
+ }
+
+ sp = &sci_ports[dev_id];
platform_set_drvdata(dev, sp);
- ret = sci_probe_single(dev, dev->id, p, sp);
+ ret = sci_probe_single(dev, dev_id, p, sp);
if (ret)
return ret;
@@ -2535,6 +2653,7 @@ static struct platform_driver sci_driver = {
.name = "sh-sci",
.owner = THIS_MODULE,
.pm = &sci_dev_pm_ops,
+ .of_match_table = of_match_ptr(of_sci_match),
},
};
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index 61c1ad03db5b..f186a8fb8887 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -529,7 +529,7 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param)
while (sirfport->rx_completed != sirfport->rx_issued) {
sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
SIRFSOC_RX_DMA_BUF_SIZE);
- sirfsoc_rx_submit_one_dma_desc(port, sirfport->rx_completed++);
+ sirfport->rx_completed++;
sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
}
count = CIRC_CNT(sirfport->rx_dma_items[sirfport->rx_issued].xmit.head,
@@ -706,12 +706,19 @@ static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param)
{
struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
struct uart_port *port = &sirfport->port;
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
unsigned long flags;
spin_lock_irqsave(&sirfport->rx_lock, flags);
while (sirfport->rx_completed != sirfport->rx_issued) {
sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
SIRFSOC_RX_DMA_BUF_SIZE);
- sirfsoc_rx_submit_one_dma_desc(port, sirfport->rx_completed++);
+ if (rd_regl(port, ureg->sirfsoc_int_en_reg) &
+ uint_en->sirfsoc_rx_timeout_en)
+ sirfsoc_rx_submit_one_dma_desc(port,
+ sirfport->rx_completed++);
+ else
+ sirfport->rx_completed++;
sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
}
spin_unlock_irqrestore(&sirfport->rx_lock, flags);
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
index fb8d0a002607..b7d679c0881b 100644
--- a/drivers/tty/serial/sirfsoc_uart.h
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -368,15 +368,6 @@ struct sirfsoc_uart_register sirfsoc_uart = {
#define SIRFSOC_UART_NR 6
#define SIRFSOC_PORT_TYPE 0xa5
-/* Baud Rate Calculation */
-#define SIRF_MIN_SAMPLE_DIV 0xf
-#define SIRF_MAX_SAMPLE_DIV 0x3f
-#define SIRF_IOCLK_DIV_MAX 0xffff
-#define SIRF_SAMPLE_DIV_SHIFT 16
-#define SIRF_IOCLK_DIV_MASK 0xffff
-#define SIRF_SAMPLE_DIV_MASK 0x3f0000
-#define SIRF_BAUD_RATE_SUPPORT_NR 18
-
/* Uart Common Use Macro*/
#define SIRFSOC_RX_DMA_BUF_SIZE 256
#define BYTES_TO_ALIGN(dma_addr) ((unsigned long)(dma_addr) & 0x3)
@@ -453,9 +444,6 @@ struct sirfsoc_uart_port {
int rx_issued;
};
-/* Hardware Flow Control */
-#define SIRFUART_AFC_CTRL_RX_THD 0x70
-
/* Register Access Control */
#define portaddr(port, reg) ((port)->membase + (reg))
#define rd_regb(port, reg) (__raw_readb(portaddr(port, reg)))
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 5d6136b2a04a..380fb5355cb2 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -894,7 +894,7 @@ static int sunsab_console_setup(struct console *con, char *options)
case B115200: baud = 115200; break;
case B230400: baud = 230400; break;
case B460800: baud = 460800; break;
- };
+ }
/*
* Temporary fix.
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 699cc1b5f6aa..db79b76f5c8e 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -522,7 +522,7 @@ static void receive_kbd_ms_chars(struct uart_sunsu_port *up, int is_break)
serio_interrupt(&up->serio, ch, 0);
#endif
break;
- };
+ }
}
} while (serial_in(up, UART_LSR) & UART_LSR_DR);
}
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index 135a15203532..45a8c6aa5837 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -319,7 +319,7 @@ static void sunzilog_kbdms_receive_chars(struct uart_sunzilog_port *up,
serio_interrupt(&up->serio, ch, 0);
#endif
break;
- };
+ }
}
}
@@ -897,7 +897,7 @@ sunzilog_convert_to_zs(struct uart_sunzilog_port *up, unsigned int cflag,
up->curregs[R5] |= Tx8;
up->parity_mask = 0xff;
break;
- };
+ }
up->curregs[R4] &= ~0x0c;
if (cflag & CSTOPB)
up->curregs[R4] |= SB2;
@@ -1239,7 +1239,7 @@ static int __init sunzilog_console_setup(struct console *con, char *options)
default: case B9600: baud = 9600; break;
case B19200: baud = 19200; break;
case B38400: baud = 38400; break;
- };
+ }
brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index 88317482b81f..d569ca58bab6 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -25,6 +25,8 @@
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
@@ -269,7 +271,7 @@ static unsigned int qe_uart_tx_empty(struct uart_port *port)
return 1;
bdp++;
- };
+ }
}
/*
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 7e4150aa69c6..e46e9f3f19b9 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1,7 +1,7 @@
/*
* Xilinx PS UART driver
*
- * 2011 (c) Xilinx Inc.
+ * 2011 - 2013 (C) Xilinx Inc.
*
* This program is free software; you can redistribute it
* and/or modify it under the terms of the GNU General Public
@@ -11,13 +11,17 @@
*
*/
+#if defined(CONFIG_SERIAL_XILINX_PS_UART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
#include <linux/platform_device.h>
#include <linux/serial.h>
+#include <linux/console.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
-#include <linux/console.h>
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/io.h>
@@ -29,12 +33,22 @@
#define XUARTPS_MAJOR 0 /* use dynamic node allocation */
#define XUARTPS_MINOR 0 /* works best with devtmpfs */
#define XUARTPS_NR_PORTS 2
-#define XUARTPS_FIFO_SIZE 16 /* FIFO size */
+#define XUARTPS_FIFO_SIZE 64 /* FIFO size */
#define XUARTPS_REGISTER_SPACE 0xFFF
#define xuartps_readl(offset) ioread32(port->membase + offset)
#define xuartps_writel(val, offset) iowrite32(val, port->membase + offset)
+/* Rx Trigger level */
+static int rx_trigger_level = 56;
+module_param(rx_trigger_level, uint, S_IRUGO);
+MODULE_PARM_DESC(rx_trigger_level, "Rx trigger level, 1-63 bytes");
+
+/* Rx Timeout */
+static int rx_timeout = 10;
+module_param(rx_timeout, uint, S_IRUGO);
+MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
+
/********************************Register Map********************************/
/** UART
*
@@ -128,6 +142,9 @@
#define XUARTPS_IXR_RXEMPTY 0x00000002 /* RX FIFO empty interrupt. */
#define XUARTPS_IXR_MASK 0x00001FFF /* Valid bit mask */
+/* Goes in read_status_mask for break detection as the HW doesn't do it*/
+#define XUARTPS_IXR_BRK 0x80000000
+
/** Channel Status Register
*
* The channel status register (CSR) is provided to enable the control logic
@@ -139,15 +156,27 @@
#define XUARTPS_SR_TXFULL 0x00000010 /* TX FIFO full */
#define XUARTPS_SR_RXTRIG 0x00000001 /* Rx Trigger */
+/* baud dividers min/max values */
+#define XUARTPS_BDIV_MIN 4
+#define XUARTPS_BDIV_MAX 255
+#define XUARTPS_CD_MAX 65535
+
/**
* struct xuartps - device data
- * @refclk Reference clock
- * @aperclk APB clock
+ * @port Pointer to the UART port
+ * @refclk Reference clock
+ * @aperclk APB clock
+ * @baud Current baud rate
+ * @clk_rate_change_nb Notifier block for clock changes
*/
struct xuartps {
+ struct uart_port *port;
struct clk *refclk;
struct clk *aperclk;
+ unsigned int baud;
+ struct notifier_block clk_rate_change_nb;
};
+#define to_xuartps(_nb) container_of(_nb, struct xuartps, clk_rate_change_nb);
/**
* xuartps_isr - Interrupt handler
@@ -171,6 +200,23 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
*/
isrstatus = xuartps_readl(XUARTPS_ISR_OFFSET);
+ /*
+ * There is no hardware break detection, so we interpret framing
+ * error with all-zeros data as a break sequence. Most of the time,
+ * there's another non-zero byte at the end of the sequence.
+ */
+
+ if (isrstatus & XUARTPS_IXR_FRAMING) {
+ while (!(xuartps_readl(XUARTPS_SR_OFFSET) &
+ XUARTPS_SR_RXEMPTY)) {
+ if (!xuartps_readl(XUARTPS_FIFO_OFFSET)) {
+ port->read_status_mask |= XUARTPS_IXR_BRK;
+ isrstatus &= ~XUARTPS_IXR_FRAMING;
+ }
+ }
+ xuartps_writel(XUARTPS_IXR_FRAMING, XUARTPS_ISR_OFFSET);
+ }
+
/* drop byte with parity error if IGNPAR specified */
if (isrstatus & port->ignore_status_mask & XUARTPS_IXR_PARITY)
isrstatus &= ~(XUARTPS_IXR_RXTRIG | XUARTPS_IXR_TOUT);
@@ -184,6 +230,30 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
while ((xuartps_readl(XUARTPS_SR_OFFSET) &
XUARTPS_SR_RXEMPTY) != XUARTPS_SR_RXEMPTY) {
data = xuartps_readl(XUARTPS_FIFO_OFFSET);
+
+ /* Non-NULL byte after BREAK is garbage (99%) */
+ if (data && (port->read_status_mask &
+ XUARTPS_IXR_BRK)) {
+ port->read_status_mask &= ~XUARTPS_IXR_BRK;
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ }
+
+ /*
+ * uart_handle_sysrq_char() doesn't work if
+ * spinlocked, for some reason
+ */
+ if (port->sysrq) {
+ spin_unlock(&port->lock);
+ if (uart_handle_sysrq_char(port,
+ (unsigned char)data)) {
+ spin_lock(&port->lock);
+ continue;
+ }
+ spin_lock(&port->lock);
+ }
+
port->icount.rx++;
if (isrstatus & XUARTPS_IXR_PARITY) {
@@ -247,63 +317,196 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
}
/**
- * xuartps_set_baud_rate - Calculate and set the baud rate
- * @port: Handle to the uart port structure
- * @baud: Baud rate to set
- *
+ * xuartps_calc_baud_divs - Calculate baud rate divisors
+ * @clk: UART module input clock
+ * @baud: Desired baud rate
+ * @rbdiv: BDIV value (return value)
+ * @rcd: CD value (return value)
+ * @div8: Value for clk_sel bit in mod (return value)
* Returns baud rate, requested baud when possible, or actual baud when there
- * was too much error
- **/
-static unsigned int xuartps_set_baud_rate(struct uart_port *port,
- unsigned int baud)
+ * was too much error, zero if no valid divisors are found.
+ *
+ * Formula to obtain baud rate is
+ * baud_tx/rx rate = clk/CD * (BDIV + 1)
+ * input_clk = (Uart User Defined Clock or Apb Clock)
+ * depends on UCLKEN in MR Reg
+ * clk = input_clk or input_clk/8;
+ * depends on CLKS in MR reg
+ * CD and BDIV depends on values in
+ * baud rate generate register
+ * baud rate clock divisor register
+ */
+static unsigned int xuartps_calc_baud_divs(unsigned int clk, unsigned int baud,
+ u32 *rbdiv, u32 *rcd, int *div8)
{
- unsigned int sel_clk;
- unsigned int calc_baud = 0;
- unsigned int brgr_val, brdiv_val;
+ u32 cd, bdiv;
+ unsigned int calc_baud;
+ unsigned int bestbaud = 0;
unsigned int bauderror;
+ unsigned int besterror = ~0;
- /* Formula to obtain baud rate is
- * baud_tx/rx rate = sel_clk/CD * (BDIV + 1)
- * input_clk = (Uart User Defined Clock or Apb Clock)
- * depends on UCLKEN in MR Reg
- * sel_clk = input_clk or input_clk/8;
- * depends on CLKS in MR reg
- * CD and BDIV depends on values in
- * baud rate generate register
- * baud rate clock divisor register
- */
- sel_clk = port->uartclk;
- if (xuartps_readl(XUARTPS_MR_OFFSET) & XUARTPS_MR_CLKSEL)
- sel_clk = sel_clk / 8;
-
- /* Find the best values for baud generation */
- for (brdiv_val = 4; brdiv_val < 255; brdiv_val++) {
+ if (baud < clk / ((XUARTPS_BDIV_MAX + 1) * XUARTPS_CD_MAX)) {
+ *div8 = 1;
+ clk /= 8;
+ } else {
+ *div8 = 0;
+ }
- brgr_val = sel_clk / (baud * (brdiv_val + 1));
- if (brgr_val < 2 || brgr_val > 65535)
+ for (bdiv = XUARTPS_BDIV_MIN; bdiv <= XUARTPS_BDIV_MAX; bdiv++) {
+ cd = DIV_ROUND_CLOSEST(clk, baud * (bdiv + 1));
+ if (cd < 1 || cd > XUARTPS_CD_MAX)
continue;
- calc_baud = sel_clk / (brgr_val * (brdiv_val + 1));
+ calc_baud = clk / (cd * (bdiv + 1));
if (baud > calc_baud)
bauderror = baud - calc_baud;
else
bauderror = calc_baud - baud;
- /* use the values when percent error is acceptable */
- if (((bauderror * 100) / baud) < 3) {
- calc_baud = baud;
- break;
+ if (besterror > bauderror) {
+ *rbdiv = bdiv;
+ *rcd = cd;
+ bestbaud = calc_baud;
+ besterror = bauderror;
}
}
+ /* use the values when percent error is acceptable */
+ if (((besterror * 100) / baud) < 3)
+ bestbaud = baud;
+
+ return bestbaud;
+}
- /* Set the values for the new baud rate */
- xuartps_writel(brgr_val, XUARTPS_BAUDGEN_OFFSET);
- xuartps_writel(brdiv_val, XUARTPS_BAUDDIV_OFFSET);
+/**
+ * xuartps_set_baud_rate - Calculate and set the baud rate
+ * @port: Handle to the uart port structure
+ * @baud: Baud rate to set
+ * Returns baud rate, requested baud when possible, or actual baud when there
+ * was too much error, zero if no valid divisors are found.
+ */
+static unsigned int xuartps_set_baud_rate(struct uart_port *port,
+ unsigned int baud)
+{
+ unsigned int calc_baud;
+ u32 cd = 0, bdiv = 0;
+ u32 mreg;
+ int div8;
+ struct xuartps *xuartps = port->private_data;
+
+ calc_baud = xuartps_calc_baud_divs(port->uartclk, baud, &bdiv, &cd,
+ &div8);
+
+ /* Write new divisors to hardware */
+ mreg = xuartps_readl(XUARTPS_MR_OFFSET);
+ if (div8)
+ mreg |= XUARTPS_MR_CLKSEL;
+ else
+ mreg &= ~XUARTPS_MR_CLKSEL;
+ xuartps_writel(mreg, XUARTPS_MR_OFFSET);
+ xuartps_writel(cd, XUARTPS_BAUDGEN_OFFSET);
+ xuartps_writel(bdiv, XUARTPS_BAUDDIV_OFFSET);
+ xuartps->baud = baud;
return calc_baud;
}
+#ifdef CONFIG_COMMON_CLK
+/**
+ * xuartps_clk_notitifer_cb - Clock notifier callback
+ * @nb: Notifier block
+ * @event: Notify event
+ * @data: Notifier data
+ * Returns NOTIFY_OK on success, NOTIFY_BAD on error.
+ */
+static int xuartps_clk_notifier_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ u32 ctrl_reg;
+ struct uart_port *port;
+ int locked = 0;
+ struct clk_notifier_data *ndata = data;
+ unsigned long flags = 0;
+ struct xuartps *xuartps = to_xuartps(nb);
+
+ port = xuartps->port;
+ if (port->suspended)
+ return NOTIFY_OK;
+
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ {
+ u32 bdiv;
+ u32 cd;
+ int div8;
+
+ /*
+ * Find out if current baud-rate can be achieved with new clock
+ * frequency.
+ */
+ if (!xuartps_calc_baud_divs(ndata->new_rate, xuartps->baud,
+ &bdiv, &cd, &div8))
+ return NOTIFY_BAD;
+
+ spin_lock_irqsave(&xuartps->port->lock, flags);
+
+ /* Disable the TX and RX to set baud rate */
+ xuartps_writel(xuartps_readl(XUARTPS_CR_OFFSET) |
+ (XUARTPS_CR_TX_DIS | XUARTPS_CR_RX_DIS),
+ XUARTPS_CR_OFFSET);
+
+ spin_unlock_irqrestore(&xuartps->port->lock, flags);
+
+ return NOTIFY_OK;
+ }
+ case POST_RATE_CHANGE:
+ /*
+ * Set clk dividers to generate correct baud with new clock
+ * frequency.
+ */
+
+ spin_lock_irqsave(&xuartps->port->lock, flags);
+
+ locked = 1;
+ port->uartclk = ndata->new_rate;
+
+ xuartps->baud = xuartps_set_baud_rate(xuartps->port,
+ xuartps->baud);
+ /* fall through */
+ case ABORT_RATE_CHANGE:
+ if (!locked)
+ spin_lock_irqsave(&xuartps->port->lock, flags);
+
+ /* Set TX/RX Reset */
+ xuartps_writel(xuartps_readl(XUARTPS_CR_OFFSET) |
+ (XUARTPS_CR_TXRST | XUARTPS_CR_RXRST),
+ XUARTPS_CR_OFFSET);
+
+ while (xuartps_readl(XUARTPS_CR_OFFSET) &
+ (XUARTPS_CR_TXRST | XUARTPS_CR_RXRST))
+ cpu_relax();
+
+ /*
+ * Clear the RX disable and TX disable bits and then set the TX
+ * enable bit and RX enable bit to enable the transmitter and
+ * receiver.
+ */
+ xuartps_writel(rx_timeout, XUARTPS_RXTOUT_OFFSET);
+ ctrl_reg = xuartps_readl(XUARTPS_CR_OFFSET);
+ xuartps_writel(
+ (ctrl_reg & ~(XUARTPS_CR_TX_DIS | XUARTPS_CR_RX_DIS)) |
+ (XUARTPS_CR_TX_EN | XUARTPS_CR_RX_EN),
+ XUARTPS_CR_OFFSET);
+
+ spin_unlock_irqrestore(&xuartps->port->lock, flags);
+
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+}
+#endif
+
/*----------------------Uart Operations---------------------------*/
/**
@@ -346,7 +549,7 @@ static void xuartps_start_tx(struct uart_port *port)
port->state->xmit.tail = (port->state->xmit.tail + 1) &
(UART_XMIT_SIZE - 1);
}
-
+ xuartps_writel(XUARTPS_IXR_TXEMPTY, XUARTPS_ISR_OFFSET);
/* Enable the TX Empty interrupt */
xuartps_writel(XUARTPS_IXR_TXEMPTY, XUARTPS_IER_OFFSET);
@@ -437,7 +640,7 @@ static void xuartps_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old)
{
unsigned int cval = 0;
- unsigned int baud;
+ unsigned int baud, minbaud, maxbaud;
unsigned long flags;
unsigned int ctrl_reg, mode_reg;
@@ -454,8 +657,14 @@ static void xuartps_set_termios(struct uart_port *port,
(XUARTPS_CR_TX_DIS | XUARTPS_CR_RX_DIS),
XUARTPS_CR_OFFSET);
- /* Min baud rate = 6bps and Max Baud Rate is 10Mbps for 100Mhz clk */
- baud = uart_get_baud_rate(port, termios, old, 0, 10000000);
+ /*
+ * Min baud rate = 6bps and Max Baud Rate is 10Mbps for 100Mhz clk
+ * min and max baud should be calculated here based on port->uartclk.
+ * this way we get a valid baud and can safely call set_baud()
+ */
+ minbaud = port->uartclk / ((XUARTPS_BDIV_MAX + 1) * XUARTPS_CD_MAX * 8);
+ maxbaud = port->uartclk / (XUARTPS_BDIV_MIN + 1);
+ baud = uart_get_baud_rate(port, termios, old, minbaud, maxbaud);
baud = xuartps_set_baud_rate(port, baud);
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
@@ -480,7 +689,7 @@ static void xuartps_set_termios(struct uart_port *port,
| (XUARTPS_CR_TX_EN | XUARTPS_CR_RX_EN),
XUARTPS_CR_OFFSET);
- xuartps_writel(10, XUARTPS_RXTOUT_OFFSET);
+ xuartps_writel(rx_timeout, XUARTPS_RXTOUT_OFFSET);
port->read_status_mask = XUARTPS_IXR_TXEMPTY | XUARTPS_IXR_RXTRIG |
XUARTPS_IXR_OVERRUN | XUARTPS_IXR_TOUT;
@@ -531,13 +740,17 @@ static void xuartps_set_termios(struct uart_port *port,
cval |= XUARTPS_MR_PARITY_MARK;
else
cval |= XUARTPS_MR_PARITY_SPACE;
- } else if (termios->c_cflag & PARODD)
+ } else {
+ if (termios->c_cflag & PARODD)
cval |= XUARTPS_MR_PARITY_ODD;
else
cval |= XUARTPS_MR_PARITY_EVEN;
- } else
+ }
+ } else {
cval |= XUARTPS_MR_PARITY_NONE;
- xuartps_writel(cval , XUARTPS_MR_OFFSET);
+ }
+ cval |= mode_reg & 1;
+ xuartps_writel(cval, XUARTPS_MR_OFFSET);
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -583,11 +796,17 @@ static int xuartps_startup(struct uart_port *port)
| XUARTPS_MR_PARITY_NONE | XUARTPS_MR_CHARLEN_8_BIT,
XUARTPS_MR_OFFSET);
- /* Set the RX FIFO Trigger level to 14 assuming FIFO size as 16 */
- xuartps_writel(14, XUARTPS_RXWM_OFFSET);
+ /*
+ * Set the RX FIFO Trigger level to use most of the FIFO, but it
+ * can be tuned with a module parameter
+ */
+ xuartps_writel(rx_trigger_level, XUARTPS_RXWM_OFFSET);
- /* Receive Timeout register is enabled with value of 10 */
- xuartps_writel(10, XUARTPS_RXTOUT_OFFSET);
+ /*
+ * Receive Timeout register is enabled but it
+ * can be tuned with a module parameter
+ */
+ xuartps_writel(rx_timeout, XUARTPS_RXTOUT_OFFSET);
/* Clear out any pending interrupts before enabling them */
xuartps_writel(xuartps_readl(XUARTPS_ISR_OFFSET), XUARTPS_ISR_OFFSET);
@@ -727,6 +946,54 @@ static void xuartps_enable_ms(struct uart_port *port)
/* N/A */
}
+#ifdef CONFIG_CONSOLE_POLL
+static int xuartps_poll_get_char(struct uart_port *port)
+{
+ u32 imr;
+ int c;
+
+ /* Disable all interrupts */
+ imr = xuartps_readl(XUARTPS_IMR_OFFSET);
+ xuartps_writel(imr, XUARTPS_IDR_OFFSET);
+
+ /* Check if FIFO is empty */
+ if (xuartps_readl(XUARTPS_SR_OFFSET) & XUARTPS_SR_RXEMPTY)
+ c = NO_POLL_CHAR;
+ else /* Read a character */
+ c = (unsigned char) xuartps_readl(XUARTPS_FIFO_OFFSET);
+
+ /* Enable interrupts */
+ xuartps_writel(imr, XUARTPS_IER_OFFSET);
+
+ return c;
+}
+
+static void xuartps_poll_put_char(struct uart_port *port, unsigned char c)
+{
+ u32 imr;
+
+ /* Disable all interrupts */
+ imr = xuartps_readl(XUARTPS_IMR_OFFSET);
+ xuartps_writel(imr, XUARTPS_IDR_OFFSET);
+
+ /* Wait until FIFO is empty */
+ while (!(xuartps_readl(XUARTPS_SR_OFFSET) & XUARTPS_SR_TXEMPTY))
+ cpu_relax();
+
+ /* Write a character */
+ xuartps_writel(c, XUARTPS_FIFO_OFFSET);
+
+ /* Wait until FIFO is empty */
+ while (!(xuartps_readl(XUARTPS_SR_OFFSET) & XUARTPS_SR_TXEMPTY))
+ cpu_relax();
+
+ /* Enable interrupts */
+ xuartps_writel(imr, XUARTPS_IER_OFFSET);
+
+ return;
+}
+#endif
+
/** The UART operations structure
*/
static struct uart_ops xuartps_ops = {
@@ -759,6 +1026,10 @@ static struct uart_ops xuartps_ops = {
.config_port = xuartps_config_port, /* Configure when driver
* adds a xuartps port
*/
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = xuartps_poll_get_char,
+ .poll_put_char = xuartps_poll_put_char,
+#endif
};
static struct uart_port xuartps_port[2];
@@ -837,7 +1108,7 @@ static void xuartps_console_write(struct console *co, const char *s,
{
struct uart_port *port = &xuartps_port[co->index];
unsigned long flags;
- unsigned int imr;
+ unsigned int imr, ctrl;
int locked = 1;
if (oops_in_progress)
@@ -849,9 +1120,19 @@ static void xuartps_console_write(struct console *co, const char *s,
imr = xuartps_readl(XUARTPS_IMR_OFFSET);
xuartps_writel(imr, XUARTPS_IDR_OFFSET);
+ /*
+ * Make sure that the tx part is enabled. Set the TX enable bit and
+ * clear the TX disable bit to enable the transmitter.
+ */
+ ctrl = xuartps_readl(XUARTPS_CR_OFFSET);
+ xuartps_writel((ctrl & ~XUARTPS_CR_TX_DIS) | XUARTPS_CR_TX_EN,
+ XUARTPS_CR_OFFSET);
+
uart_console_write(port, s, count, xuartps_console_putchar);
xuartps_console_wait_tx(port);
+ xuartps_writel(ctrl, XUARTPS_CR_OFFSET);
+
/* restore interrupt state, it seems like there may be a h/w bug
* in that the interrupt enable register should not need to be
* written based on the data sheet
@@ -933,6 +1214,119 @@ static struct uart_driver xuartps_uart_driver = {
#endif
};
+#ifdef CONFIG_PM_SLEEP
+/**
+ * xuartps_suspend - suspend event
+ * @device: Pointer to the device structure
+ *
+ * Returns 0
+ */
+static int xuartps_suspend(struct device *device)
+{
+ struct uart_port *port = dev_get_drvdata(device);
+ struct tty_struct *tty;
+ struct device *tty_dev;
+ int may_wake = 0;
+
+ /* Get the tty which could be NULL so don't assume it's valid */
+ tty = tty_port_tty_get(&port->state->port);
+ if (tty) {
+ tty_dev = tty->dev;
+ may_wake = device_may_wakeup(tty_dev);
+ tty_kref_put(tty);
+ }
+
+ /*
+ * Call the API provided in serial_core.c file which handles
+ * the suspend.
+ */
+ uart_suspend_port(&xuartps_uart_driver, port);
+ if (console_suspend_enabled && !may_wake) {
+ struct xuartps *xuartps = port->private_data;
+
+ clk_disable(xuartps->refclk);
+ clk_disable(xuartps->aperclk);
+ } else {
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&port->lock, flags);
+ /* Empty the receive FIFO 1st before making changes */
+ while (!(xuartps_readl(XUARTPS_SR_OFFSET) & XUARTPS_SR_RXEMPTY))
+ xuartps_readl(XUARTPS_FIFO_OFFSET);
+ /* set RX trigger level to 1 */
+ xuartps_writel(1, XUARTPS_RXWM_OFFSET);
+ /* disable RX timeout interrups */
+ xuartps_writel(XUARTPS_IXR_TOUT, XUARTPS_IDR_OFFSET);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+
+ return 0;
+}
+
+/**
+ * xuartps_resume - Resume after a previous suspend
+ * @device: Pointer to the device structure
+ *
+ * Returns 0
+ */
+static int xuartps_resume(struct device *device)
+{
+ struct uart_port *port = dev_get_drvdata(device);
+ unsigned long flags = 0;
+ u32 ctrl_reg;
+ struct tty_struct *tty;
+ struct device *tty_dev;
+ int may_wake = 0;
+
+ /* Get the tty which could be NULL so don't assume it's valid */
+ tty = tty_port_tty_get(&port->state->port);
+ if (tty) {
+ tty_dev = tty->dev;
+ may_wake = device_may_wakeup(tty_dev);
+ tty_kref_put(tty);
+ }
+
+ if (console_suspend_enabled && !may_wake) {
+ struct xuartps *xuartps = port->private_data;
+
+ clk_enable(xuartps->aperclk);
+ clk_enable(xuartps->refclk);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Set TX/RX Reset */
+ xuartps_writel(xuartps_readl(XUARTPS_CR_OFFSET) |
+ (XUARTPS_CR_TXRST | XUARTPS_CR_RXRST),
+ XUARTPS_CR_OFFSET);
+ while (xuartps_readl(XUARTPS_CR_OFFSET) &
+ (XUARTPS_CR_TXRST | XUARTPS_CR_RXRST))
+ cpu_relax();
+
+ /* restore rx timeout value */
+ xuartps_writel(rx_timeout, XUARTPS_RXTOUT_OFFSET);
+ /* Enable Tx/Rx */
+ ctrl_reg = xuartps_readl(XUARTPS_CR_OFFSET);
+ xuartps_writel(
+ (ctrl_reg & ~(XUARTPS_CR_TX_DIS | XUARTPS_CR_RX_DIS)) |
+ (XUARTPS_CR_TX_EN | XUARTPS_CR_RX_EN),
+ XUARTPS_CR_OFFSET);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+ } else {
+ spin_lock_irqsave(&port->lock, flags);
+ /* restore original rx trigger level */
+ xuartps_writel(rx_trigger_level, XUARTPS_RXWM_OFFSET);
+ /* enable RX timeout interrupt */
+ xuartps_writel(XUARTPS_IXR_TOUT, XUARTPS_IER_OFFSET);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+
+ return uart_resume_port(&xuartps_uart_driver, port);
+}
+#endif /* ! CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(xuartps_dev_pm_ops, xuartps_suspend, xuartps_resume);
+
/* ---------------------------------------------------------------------
* Platform bus binding
*/
@@ -949,27 +1343,26 @@ static int xuartps_probe(struct platform_device *pdev)
struct resource *res, *res2;
struct xuartps *xuartps_data;
- xuartps_data = kzalloc(sizeof(*xuartps_data), GFP_KERNEL);
+ xuartps_data = devm_kzalloc(&pdev->dev, sizeof(*xuartps_data),
+ GFP_KERNEL);
if (!xuartps_data)
return -ENOMEM;
- xuartps_data->aperclk = clk_get(&pdev->dev, "aper_clk");
+ xuartps_data->aperclk = devm_clk_get(&pdev->dev, "aper_clk");
if (IS_ERR(xuartps_data->aperclk)) {
dev_err(&pdev->dev, "aper_clk clock not found.\n");
- rc = PTR_ERR(xuartps_data->aperclk);
- goto err_out_free;
+ return PTR_ERR(xuartps_data->aperclk);
}
- xuartps_data->refclk = clk_get(&pdev->dev, "ref_clk");
+ xuartps_data->refclk = devm_clk_get(&pdev->dev, "ref_clk");
if (IS_ERR(xuartps_data->refclk)) {
dev_err(&pdev->dev, "ref_clk clock not found.\n");
- rc = PTR_ERR(xuartps_data->refclk);
- goto err_out_clk_put_aper;
+ return PTR_ERR(xuartps_data->refclk);
}
rc = clk_prepare_enable(xuartps_data->aperclk);
if (rc) {
dev_err(&pdev->dev, "Unable to enable APER clock.\n");
- goto err_out_clk_put;
+ return rc;
}
rc = clk_prepare_enable(xuartps_data->refclk);
if (rc) {
@@ -989,13 +1382,21 @@ static int xuartps_probe(struct platform_device *pdev)
goto err_out_clk_disable;
}
+#ifdef CONFIG_COMMON_CLK
+ xuartps_data->clk_rate_change_nb.notifier_call =
+ xuartps_clk_notifier_cb;
+ if (clk_notifier_register(xuartps_data->refclk,
+ &xuartps_data->clk_rate_change_nb))
+ dev_warn(&pdev->dev, "Unable to register clock notifier.\n");
+#endif
+
/* Initialize the port structure */
port = xuartps_get_port();
if (!port) {
dev_err(&pdev->dev, "Cannot get uart_port structure\n");
rc = -ENODEV;
- goto err_out_clk_disable;
+ goto err_out_notif_unreg;
} else {
/* Register the port.
* This function also registers this device with the tty layer
@@ -1006,26 +1407,26 @@ static int xuartps_probe(struct platform_device *pdev)
port->dev = &pdev->dev;
port->uartclk = clk_get_rate(xuartps_data->refclk);
port->private_data = xuartps_data;
+ xuartps_data->port = port;
platform_set_drvdata(pdev, port);
rc = uart_add_one_port(&xuartps_uart_driver, port);
if (rc) {
dev_err(&pdev->dev,
"uart_add_one_port() failed; err=%i\n", rc);
- goto err_out_clk_disable;
+ goto err_out_notif_unreg;
}
return 0;
}
+err_out_notif_unreg:
+#ifdef CONFIG_COMMON_CLK
+ clk_notifier_unregister(xuartps_data->refclk,
+ &xuartps_data->clk_rate_change_nb);
+#endif
err_out_clk_disable:
clk_disable_unprepare(xuartps_data->refclk);
err_out_clk_dis_aper:
clk_disable_unprepare(xuartps_data->aperclk);
-err_out_clk_put:
- clk_put(xuartps_data->refclk);
-err_out_clk_put_aper:
- clk_put(xuartps_data->aperclk);
-err_out_free:
- kfree(xuartps_data);
return rc;
}
@@ -1043,13 +1444,14 @@ static int xuartps_remove(struct platform_device *pdev)
int rc;
/* Remove the xuartps port from the serial core */
+#ifdef CONFIG_COMMON_CLK
+ clk_notifier_unregister(xuartps_data->refclk,
+ &xuartps_data->clk_rate_change_nb);
+#endif
rc = uart_remove_one_port(&xuartps_uart_driver, port);
port->mapbase = 0;
clk_disable_unprepare(xuartps_data->refclk);
clk_disable_unprepare(xuartps_data->aperclk);
- clk_put(xuartps_data->refclk);
- clk_put(xuartps_data->aperclk);
- kfree(xuartps_data);
return rc;
}
@@ -1067,6 +1469,7 @@ static struct platform_driver xuartps_platform_driver = {
.owner = THIS_MODULE,
.name = XUARTPS_NAME, /* Driver name */
.of_match_table = xuartps_of_match,
+ .pm = &xuartps_dev_pm_ops,
},
};
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 40a9fe9d3b10..ce396ecdf412 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -51,7 +51,7 @@
#include <asm/irq_regs.h>
/* Whether we react on sysrq keys or just ignore them */
-static int __read_mostly sysrq_enabled = SYSRQ_DEFAULT_ENABLE;
+static int __read_mostly sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE;
static bool __read_mostly sysrq_always_enabled;
unsigned short platform_sysrq_reset_seq[] __weak = { KEY_RESERVED };
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index f597e88a705d..c94d2349dd06 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -140,6 +140,10 @@ EXPORT_SYMBOL(tty_port_destroy);
static void tty_port_destructor(struct kref *kref)
{
struct tty_port *port = container_of(kref, struct tty_port, kref);
+
+ /* check if last port ref was dropped before tty release */
+ if (WARN_ON(port->itty))
+ return;
if (port->xmit_buf)
free_page((unsigned long)port->xmit_buf);
tty_port_destroy(port);
@@ -480,8 +484,6 @@ int tty_port_close_start(struct tty_port *port,
if (port->count) {
spin_unlock_irqrestore(&port->lock, flags);
- if (port->ops->drop)
- port->ops->drop(port);
return 0;
}
set_bit(ASYNCB_CLOSING, &port->flags);
@@ -500,9 +502,7 @@ int tty_port_close_start(struct tty_port *port,
/* Flush the ldisc buffering */
tty_ldisc_flush(tty);
- /* Don't call port->drop for the last reference. Callers will want
- to drop the last active reference in ->shutdown() or the tty
- shutdown path */
+ /* Report to caller this is the last port reference */
return 1;
}
EXPORT_SYMBOL(tty_port_close_start);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 9a8e8c5a0c73..61b1137d7e56 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1300,21 +1300,30 @@ static void csi_m(struct vc_data *vc)
case 27:
vc->vc_reverse = 0;
break;
- case 38: /* ANSI X3.64-1979 (SCO-ish?)
- * Enables underscore, white foreground
- * with white underscore (Linux - use
- * default foreground).
+ case 38:
+ case 48: /* ITU T.416
+ * Higher colour modes.
+ * They break the usual properties of SGR codes
+ * and thus need to be detected and ignored by
+ * hand. Strictly speaking, that standard also
+ * wants : rather than ; as separators, contrary
+ * to ECMA-48, but no one produces such codes
+ * and almost no one accepts them.
*/
- vc->vc_color = (vc->vc_def_color & 0x0f) | (vc->vc_color & 0xf0);
- vc->vc_underline = 1;
+ i++;
+ if (i > vc->vc_npar)
+ break;
+ if (vc->vc_par[i] == 5) /* 256 colours */
+ i++; /* ubiquitous */
+ else if (vc->vc_par[i] == 2) /* 24 bit colours */
+ i += 3; /* extremely rare */
+ /* Subcommands 3 (CMY) and 4 (CMYK) are so insane
+ * that detecting them is not worth the few extra
+ * bytes of kernel's size.
+ */
break;
- case 39: /* ANSI X3.64-1979 (SCO-ish?)
- * Disable underline option.
- * Reset colour to default? It did this
- * before...
- */
+ case 39:
vc->vc_color = (vc->vc_def_color & 0x0f) | (vc->vc_color & 0xf0);
- vc->vc_underline = 0;
break;
case 49:
vc->vc_color = (vc->vc_def_color & 0xf0) | (vc->vc_color & 0x0f);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index ba475632c5fa..0e808cf91d97 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -642,16 +642,29 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
{
struct uio_device *idev = vma->vm_private_data;
int mi = uio_find_mem_index(vma);
+ struct uio_mem *mem;
if (mi < 0)
return -EINVAL;
+ mem = idev->info->mem + mi;
- vma->vm_ops = &uio_physical_vm_ops;
+ if (vma->vm_end - vma->vm_start > mem->size)
+ return -EINVAL;
+ vma->vm_ops = &uio_physical_vm_ops;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ /*
+ * We cannot use the vm_iomap_memory() helper here,
+ * because vma->vm_pgoff is the map index we looked
+ * up above in uio_find_mem_index(), rather than an
+ * actual page offset into the mmap.
+ *
+ * So we just do the physical mmap without a page
+ * offset.
+ */
return remap_pfn_range(vma,
vma->vm_start,
- idev->info->mem[mi].addr >> PAGE_SHIFT,
+ mem->addr >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 023d3cb6aa0a..bb5d976e5b81 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -115,10 +115,9 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
pdata.phy = data->phy;
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto err_clk;
if (data->usbmisc_data) {
ret = imx_usbmisc_init(data->usbmisc_data);
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index db535b0aa172..fed7f68d025d 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -28,7 +28,7 @@ config USB_DEFAULT_PERSIST
bool "Enable USB persist by default"
default y
help
- Say N here if you don't want USB power session persistance
+ Say N here if you don't want USB power session persistence
enabled by default. If you say N it will make suspended USB
devices that lose power get reenumerated as if they had been
unplugged, causing any mounted filesystems to be lost. The
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 2f2e88a3a11a..8b20c70d91e7 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -119,10 +119,9 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we move to full device tree support this will vanish off.
*/
- if (!dev->dma_mask)
- dev->dma_mask = &dev->coherent_dma_mask;
- if (!dev->coherent_dma_mask)
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto err1;
platform_set_drvdata(pdev, exynos);
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index f3bb363f1d4a..807127d56fa3 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -33,6 +33,7 @@
#include <linux/io.h>
#include <linux/moduleparam.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/usb/ch9.h>
diff --git a/drivers/usb/gadget/lpc32xx_udc.c b/drivers/usb/gadget/lpc32xx_udc.c
index 67128be1e1b7..6a2a65aa0057 100644
--- a/drivers/usb/gadget/lpc32xx_udc.c
+++ b/drivers/usb/gadget/lpc32xx_udc.c
@@ -3078,7 +3078,9 @@ static int __init lpc32xx_udc_probe(struct platform_device *pdev)
udc->isp1301_i2c_client->addr);
pdev->dev.dma_mask = &lpc32xx_usbd_dmamask;
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ retval = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ goto resource_fail;
udc->board = &lpc32xx_usbddata;
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index ec20a1f50c2d..20f6ec22fac3 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -220,11 +220,11 @@ int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
* If we can't read the file, it's no good.
* If we can't write the file, use it read-only.
*/
- if (!(filp->f_op->read || filp->f_op->aio_read)) {
+ if (!file_readable(filp)) {
LINFO(curlun, "file not readable: %s\n", filename);
goto out;
}
- if (!(filp->f_op->write || filp->f_op->aio_write))
+ if (!file_writable(filp))
ro = 1;
size = i_size_read(inode->i_mapping->host);
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index df13d425e9c5..205f4a336583 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -227,8 +227,7 @@ static int bcma_hcd_probe(struct bcma_device *dev)
/* TODO: Probably need checks here; is the core connected? */
- if (dma_set_mask(dev->dma_dev, DMA_BIT_MASK(32)) ||
- dma_set_coherent_mask(dev->dma_dev, DMA_BIT_MASK(32)))
+ if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
return -EOPNOTSUPP;
usb_dev = kzalloc(sizeof(struct bcma_hcd_device), GFP_KERNEL);
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index f417526fb1f4..284f8417eae5 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -96,10 +96,9 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ goto fail_create_hcd;
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index 016352e0f5a7..e97c198e052f 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -84,10 +84,9 @@ static int exynos_ehci_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we move to full device tree support this will vanish off.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
exynos_setup_vbus_gpio(pdev);
diff --git a/drivers/usb/host/ehci-octeon.c b/drivers/usb/host/ehci-octeon.c
index ab0397e4d8f3..4c528b2c033a 100644
--- a/drivers/usb/host/ehci-octeon.c
+++ b/drivers/usb/host/ehci-octeon.c
@@ -116,8 +116,10 @@ static int ehci_octeon_drv_probe(struct platform_device *pdev)
* We can DMA from anywhere. But the descriptors must be in
* the lower 4GB.
*/
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
pdev->dev.dma_mask = &ehci_octeon_dma_mask;
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
hcd = usb_create_hcd(&ehci_octeon_hc_driver, &pdev->dev, "octeon");
if (!hcd)
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 78b01fa475bb..6fa82d6b7661 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -104,7 +104,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
struct resource *res;
struct usb_hcd *hcd;
void __iomem *regs;
- int ret = -ENODEV;
+ int ret;
int irq;
int i;
struct omap_hcd *omap;
@@ -144,11 +144,11 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!dev->dma_mask)
- dev->dma_mask = &dev->coherent_dma_mask;
- if (!dev->coherent_dma_mask)
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+ ret = -ENODEV;
hcd = usb_create_hcd(&ehci_omap_hc_driver, dev,
dev_name(dev));
if (!hcd) {
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index d1dfb9db5b42..2ba76730e650 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -180,10 +180,9 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
* set. Since shared usb code relies on it, set it here for
* now. Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ goto err1;
if (!request_mem_region(res->start, resource_size(res),
ehci_orion_hc_driver.description)) {
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index f6b790ca8cf2..7f30b7168d5a 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -78,7 +78,7 @@ static int ehci_platform_probe(struct platform_device *dev)
struct resource *res_mem;
struct usb_ehci_pdata *pdata;
int irq;
- int err = -ENOMEM;
+ int err;
if (usb_disabled())
return -ENODEV;
@@ -89,10 +89,10 @@ static int ehci_platform_probe(struct platform_device *dev)
*/
if (!dev_get_platdata(&dev->dev))
dev->dev.platform_data = &ehci_platform_defaults;
- if (!dev->dev.dma_mask)
- dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
- if (!dev->dev.coherent_dma_mask)
- dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
+ err = dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
pdata = dev_get_platdata(&dev->dev);
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 6cc5567bf9c8..875d2fcc9e0e 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -16,6 +16,8 @@
#include <linux/signal.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
index 1cf0adba3fc8..ee6f9ffaa0e7 100644
--- a/drivers/usb/host/ehci-spear.c
+++ b/drivers/usb/host/ehci-spear.c
@@ -81,10 +81,9 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ goto fail;
usbh_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(usbh_clk)) {
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index e6d8e26e48cc..b9fd0396011e 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -362,10 +362,9 @@ static int tegra_ehci_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev,
dev_name(&pdev->dev));
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 0b46542591ff..0551c0af0fd1 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -26,6 +26,8 @@
#include <linux/io.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 476b5a5baf25..418444ebb1b8 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -469,7 +469,7 @@ MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids);
static int ohci_at91_of_init(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- int i, gpio;
+ int i, gpio, ret;
enum of_gpio_flags flags;
struct at91_usbh_data *pdata;
u32 ports;
@@ -481,10 +481,9 @@ static int ohci_at91_of_init(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index a87baedc0aa7..91ec9b2cd378 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -71,10 +71,9 @@ static int exynos_ohci_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we move to full device tree support this will vanish off.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
hcd = usb_create_hcd(&exynos_ohci_hc_driver,
&pdev->dev, dev_name(&pdev->dev));
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index 9ab7e24ba65d..e99db8a6d55f 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -181,8 +181,9 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail_disable;
dev_dbg(&pdev->dev, "%s: " DRIVER_DESC " (nxp)\n", hcd_name);
if (usb_disabled()) {
diff --git a/drivers/usb/host/ohci-octeon.c b/drivers/usb/host/ohci-octeon.c
index 342dc7e543b8..6c16dcef15c6 100644
--- a/drivers/usb/host/ohci-octeon.c
+++ b/drivers/usb/host/ohci-octeon.c
@@ -127,8 +127,9 @@ static int ohci_octeon_drv_probe(struct platform_device *pdev)
}
/* Ohci is a 32-bit device. */
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
hcd = usb_create_hcd(&ohci_octeon_hc_driver, &pdev->dev, "octeon");
if (!hcd)
diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
index 408d06a68571..21457417a856 100644
--- a/drivers/usb/host/ohci-omap3.c
+++ b/drivers/usb/host/ohci-omap3.c
@@ -65,7 +65,7 @@ static int ohci_hcd_omap3_probe(struct platform_device *pdev)
struct usb_hcd *hcd = NULL;
void __iomem *regs = NULL;
struct resource *res;
- int ret = -ENODEV;
+ int ret;
int irq;
if (usb_disabled())
@@ -99,11 +99,11 @@ static int ohci_hcd_omap3_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!dev->dma_mask)
- dev->dma_mask = &dev->coherent_dma_mask;
- if (!dev->coherent_dma_mask)
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto err_io;
+ ret = -ENODEV;
hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev,
dev_name(dev));
if (!hcd) {
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 75f5a1e2f01e..81f3eba215c1 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -14,6 +14,8 @@
*/
#include <linux/signal.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/prom.h>
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index deea5d1d6394..e89ac4d4b87e 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -298,6 +298,7 @@ static int ohci_pxa_of_init(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct pxaohci_platform_data *pdata;
u32 tmp;
+ int ret;
if (!np)
return 0;
@@ -306,10 +307,9 @@ static int ohci_pxa_of_init(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index be3429e08d90..f90101b9cdb9 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -64,10 +64,10 @@ static void s3c2410_start_hc(struct platform_device *dev, struct usb_hcd *hcd)
dev_dbg(&dev->dev, "s3c2410_start_hc:\n");
- clk_enable(usb_clk);
+ clk_prepare_enable(usb_clk);
mdelay(2); /* let the bus clock stabilise */
- clk_enable(clk);
+ clk_prepare_enable(clk);
if (info != NULL) {
info->hcd = hcd;
@@ -92,8 +92,8 @@ static void s3c2410_stop_hc(struct platform_device *dev)
(info->enable_oc)(info, 0);
}
- clk_disable(clk);
- clk_disable(usb_clk);
+ clk_disable_unprepare(clk);
+ clk_disable_unprepare(usb_clk);
}
/* ohci_s3c2410_hub_status_data
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index 17b2a7dad77b..aa9e127bbe71 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -185,6 +185,12 @@ static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
if (usb_disabled())
return -ENODEV;
+ /*
+ * We don't call dma_set_mask_and_coherent() here because the
+ * DMA mask has already been appropraitely setup by the core
+ * SA-1111 bus code (which includes bug workarounds.)
+ */
+
hcd = usb_create_hcd(&ohci_sa1111_hc_driver, &dev->dev, "sa1111");
if (!hcd)
return -ENOMEM;
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index 31ff3fc4e26f..6b02107d281d 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -56,10 +56,9 @@ static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ goto fail;
usbh_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(usbh_clk)) {
diff --git a/drivers/usb/host/ssb-hcd.c b/drivers/usb/host/ssb-hcd.c
index 74af2c6287d2..0196f766df73 100644
--- a/drivers/usb/host/ssb-hcd.c
+++ b/drivers/usb/host/ssb-hcd.c
@@ -163,8 +163,7 @@ static int ssb_hcd_probe(struct ssb_device *dev,
/* TODO: Probably need checks here; is the core connected? */
- if (dma_set_mask(dev->dma_dev, DMA_BIT_MASK(32)) ||
- dma_set_coherent_mask(dev->dma_dev, DMA_BIT_MASK(32)))
+ if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
return -EOPNOTSUPP;
usb_dev = kzalloc(sizeof(struct ssb_hcd_device), GFP_KERNEL);
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index ded842bc6578..3003fefaa964 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -75,10 +75,9 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- if (!pdev->dev.coherent_dma_mask)
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev,
pdev->name);
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index f483d1924c28..122446bf1664 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -259,7 +259,7 @@ static int ux500_probe(struct platform_device *pdev)
goto err1;
}
- clk = clk_get(&pdev->dev, "usb");
+ clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
ret = PTR_ERR(clk);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index f53298d32099..9ced8937a8f3 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -904,6 +904,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
/* Crucible Devices */
{ USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 1b8af461b522..a7019d1e3058 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1307,3 +1307,9 @@
* Manufacturer: Crucible Technologies
*/
#define FTDI_CT_COMET_PID 0x8e08
+
+/*
+ * Product: Z3X Box
+ * Manufacturer: Smart GSM Team
+ */
+#define FTDI_Z3X_PID 0x0011
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index bedf8e47713b..1e6de4cd079d 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -4,11 +4,6 @@
* Copyright (C) 2001-2007 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2003 IBM Corp.
*
- * Copyright (C) 2009, 2013 Frank Schäfer <fschaefer.oss@googlemail.com>
- * - fixes, improvements and documentation for the baud rate encoding methods
- * Copyright (C) 2013 Reinhard Max <max@suse.de>
- * - fixes and improvements for the divisor based baud rate encoding method
- *
* Original driver for 2.2.x by anonymous
*
* This program is free software; you can redistribute it and/or
@@ -134,18 +129,10 @@ MODULE_DEVICE_TABLE(usb, id_table);
enum pl2303_type {
- type_0, /* H version ? */
- type_1, /* H version ? */
- HX_TA, /* HX(A) / X(A) / TA version */ /* TODO: improve */
- HXD_EA_RA_SA, /* HXD / EA / RA / SA version */ /* TODO: improve */
- TB, /* TB version */
- HX_CLONE, /* Cheap and less functional clone of the HX chip */
+ type_0, /* don't know the difference between type 0 and */
+ type_1, /* type 1, until someone from prolific tells us... */
+ HX, /* HX version of the pl2303 chip */
};
-/*
- * NOTE: don't know the difference between type 0 and type 1,
- * until someone from Prolific tells us...
- * TODO: distinguish between X/HX, TA and HXD, EA, RA, SA variants
- */
struct pl2303_serial_private {
enum pl2303_type type;
@@ -185,7 +172,6 @@ static int pl2303_startup(struct usb_serial *serial)
{
struct pl2303_serial_private *spriv;
enum pl2303_type type = type_0;
- char *type_str = "unknown (treating as type_0)";
unsigned char *buf;
spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
@@ -198,53 +184,15 @@ static int pl2303_startup(struct usb_serial *serial)
return -ENOMEM;
}
- if (serial->dev->descriptor.bDeviceClass == 0x02) {
+ if (serial->dev->descriptor.bDeviceClass == 0x02)
type = type_0;
- type_str = "type_0";
- } else if (serial->dev->descriptor.bMaxPacketSize0 == 0x40) {
- /*
- * NOTE: The bcdDevice version is the only difference between
- * the device descriptors of the X/HX, HXD, EA, RA, SA, TA, TB
- */
- if (le16_to_cpu(serial->dev->descriptor.bcdDevice) == 0x300) {
- /* Check if the device is a clone */
- pl2303_vendor_read(0x9494, 0, serial, buf);
- /*
- * NOTE: Not sure if this read is really needed.
- * The HX returns 0x00, the clone 0x02, but the Windows
- * driver seems to ignore the value and continues.
- */
- pl2303_vendor_write(0x0606, 0xaa, serial);
- pl2303_vendor_read(0x8686, 0, serial, buf);
- if (buf[0] != 0xaa) {
- type = HX_CLONE;
- type_str = "X/HX clone (limited functionality)";
- } else {
- type = HX_TA;
- type_str = "X/HX/TA";
- }
- pl2303_vendor_write(0x0606, 0x00, serial);
- } else if (le16_to_cpu(serial->dev->descriptor.bcdDevice)
- == 0x400) {
- type = HXD_EA_RA_SA;
- type_str = "HXD/EA/RA/SA";
- } else if (le16_to_cpu(serial->dev->descriptor.bcdDevice)
- == 0x500) {
- type = TB;
- type_str = "TB";
- } else {
- dev_info(&serial->interface->dev,
- "unknown/unsupported device type\n");
- kfree(spriv);
- kfree(buf);
- return -ENODEV;
- }
- } else if (serial->dev->descriptor.bDeviceClass == 0x00
- || serial->dev->descriptor.bDeviceClass == 0xFF) {
+ else if (serial->dev->descriptor.bMaxPacketSize0 == 0x40)
+ type = HX;
+ else if (serial->dev->descriptor.bDeviceClass == 0x00)
type = type_1;
- type_str = "type_1";
- }
- dev_dbg(&serial->interface->dev, "device type: %s\n", type_str);
+ else if (serial->dev->descriptor.bDeviceClass == 0xFF)
+ type = type_1;
+ dev_dbg(&serial->interface->dev, "device type: %d\n", type);
spriv->type = type;
usb_set_serial_data(serial, spriv);
@@ -259,10 +207,10 @@ static int pl2303_startup(struct usb_serial *serial)
pl2303_vendor_read(0x8383, 0, serial, buf);
pl2303_vendor_write(0, 1, serial);
pl2303_vendor_write(1, 0, serial);
- if (type == type_0 || type == type_1)
- pl2303_vendor_write(2, 0x24, serial);
- else
+ if (type == HX)
pl2303_vendor_write(2, 0x44, serial);
+ else
+ pl2303_vendor_write(2, 0x24, serial);
kfree(buf);
return 0;
@@ -316,174 +264,65 @@ static int pl2303_set_control_lines(struct usb_serial_port *port, u8 value)
return retval;
}
-static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type,
- u8 buf[4])
+static void pl2303_encode_baudrate(struct tty_struct *tty,
+ struct usb_serial_port *port,
+ u8 buf[4])
{
- /*
- * NOTE: Only the values defined in baud_sup are supported !
- * => if unsupported values are set, the PL2303 uses 9600 baud instead
- * => HX clones just don't work at unsupported baud rates < 115200 baud,
- * for baud rates > 115200 they run at 115200 baud
- */
const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600,
- 4800, 7200, 9600, 14400, 19200, 28800, 38400,
- 57600, 115200, 230400, 460800, 614400, 921600,
- 1228800, 2457600, 3000000, 6000000, 12000000 };
+ 4800, 7200, 9600, 14400, 19200, 28800, 38400,
+ 57600, 115200, 230400, 460800, 500000, 614400,
+ 921600, 1228800, 2457600, 3000000, 6000000 };
+
+ struct usb_serial *serial = port->serial;
+ struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
+ int baud;
+ int i;
+
/*
- * NOTE: With the exception of type_0/1 devices, the following
- * additional baud rates are supported (tested with HX rev. 3A only):
- * 110*, 56000*, 128000, 134400, 161280, 201600, 256000*, 268800,
- * 403200, 806400. (*: not HX and HX clones)
- *
- * Maximum values: HXD, TB: 12000000; HX, TA: 6000000;
- * type_0+1: 1228800; RA: 921600; HX clones, SA: 115200
- *
- * As long as we are not using this encoding method for anything else
- * than the type_0+1, HX and HX clone chips, there is no point in
- * complicating the code to support them.
+ * NOTE: Only the values defined in baud_sup are supported!
+ * => if unsupported values are set, the PL2303 seems to use
+ * 9600 baud (at least my PL2303X always does)
*/
- int i;
+ baud = tty_get_baud_rate(tty);
+ dev_dbg(&port->dev, "baud requested = %d\n", baud);
+ if (!baud)
+ return;
/* Set baudrate to nearest supported value */
for (i = 0; i < ARRAY_SIZE(baud_sup); ++i) {
if (baud_sup[i] > baud)
break;
}
+
if (i == ARRAY_SIZE(baud_sup))
baud = baud_sup[i - 1];
else if (i > 0 && (baud_sup[i] - baud) > (baud - baud_sup[i - 1]))
baud = baud_sup[i - 1];
else
baud = baud_sup[i];
- /* Respect the chip type specific baud rate limits */
- /*
- * FIXME: as long as we don't know how to distinguish between the
- * HXD, EA, RA, and SA chip variants, allow the max. value of 12M.
- */
- if (type == HX_TA)
- baud = min_t(int, baud, 6000000);
- else if (type == type_0 || type == type_1)
- baud = min_t(int, baud, 1228800);
- else if (type == HX_CLONE)
- baud = min_t(int, baud, 115200);
- /* Direct (standard) baud rate encoding method */
- put_unaligned_le32(baud, buf);
-
- return baud;
-}
-static int pl2303_baudrate_encode_divisor(int baud, enum pl2303_type type,
- u8 buf[4])
-{
- /*
- * Divisor based baud rate encoding method
- *
- * NOTE: HX clones do NOT support this method.
- * It's not clear if the type_0/1 chips support it.
- *
- * divisor = 12MHz * 32 / baudrate = 2^A * B
- *
- * with
- *
- * A = buf[1] & 0x0e
- * B = buf[0] + (buf[1] & 0x01) << 8
- *
- * Special cases:
- * => 8 < B < 16: device seems to work not properly
- * => B <= 8: device uses the max. value B = 512 instead
- */
- unsigned int A, B;
+ /* type_0, type_1 only support up to 1228800 baud */
+ if (spriv->type != HX)
+ baud = min_t(int, baud, 1228800);
- /*
- * NOTE: The Windows driver allows maximum baud rates of 110% of the
- * specified maximium value.
- * Quick tests with early (2004) HX (rev. A) chips suggest, that even
- * higher baud rates (up to the maximum of 24M baud !) are working fine,
- * but that should really be tested carefully in "real life" scenarios
- * before removing the upper limit completely.
- * Baud rates smaller than the specified 75 baud are definitely working
- * fine.
- */
- if (type == type_0 || type == type_1)
- baud = min_t(int, baud, 1228800 * 1.1);
- else if (type == HX_TA)
- baud = min_t(int, baud, 6000000 * 1.1);
- else if (type == HXD_EA_RA_SA)
- /* HXD, EA: 12Mbps; RA: 1Mbps; SA: 115200 bps */
- /*
- * FIXME: as long as we don't know how to distinguish between
- * these chip variants, allow the max. of these values
- */
- baud = min_t(int, baud, 12000000 * 1.1);
- else if (type == TB)
- baud = min_t(int, baud, 12000000 * 1.1);
- /* Determine factors A and B */
- A = 0;
- B = 12000000 * 32 / baud; /* 12MHz */
- B <<= 1; /* Add one bit for rounding */
- while (B > (512 << 1) && A <= 14) {
- A += 2;
- B >>= 2;
- }
- if (A > 14) { /* max. divisor = min. baudrate reached */
- A = 14;
- B = 512;
- /* => ~45.78 baud */
+ if (baud <= 115200) {
+ put_unaligned_le32(baud, buf);
} else {
- B = (B + 1) >> 1; /* Round the last bit */
- }
- /* Handle special cases */
- if (B == 512)
- B = 0; /* also: 1 to 8 */
- else if (B < 16)
/*
- * NOTE: With the current algorithm this happens
- * only for A=0 and means that the min. divisor
- * (respectively: the max. baudrate) is reached.
+ * Apparently the formula for higher speeds is:
+ * baudrate = 12M * 32 / (2^buf[1]) / buf[0]
*/
- B = 16; /* => 24 MBaud */
- /* Encode the baud rate */
- buf[3] = 0x80; /* Select divisor encoding method */
- buf[2] = 0;
- buf[1] = (A & 0x0e); /* A */
- buf[1] |= ((B & 0x100) >> 8); /* MSB of B */
- buf[0] = B & 0xff; /* 8 LSBs of B */
- /* Calculate the actual/resulting baud rate */
- if (B <= 8)
- B = 512;
- baud = 12000000 * 32 / ((1 << A) * B);
-
- return baud;
-}
-
-static void pl2303_encode_baudrate(struct tty_struct *tty,
- struct usb_serial_port *port,
- enum pl2303_type type,
- u8 buf[4])
-{
- int baud;
+ unsigned tmp = 12000000 * 32 / baud;
+ buf[3] = 0x80;
+ buf[2] = 0;
+ buf[1] = (tmp >= 256);
+ while (tmp >= 256) {
+ tmp >>= 2;
+ buf[1] <<= 1;
+ }
+ buf[0] = tmp;
+ }
- baud = tty_get_baud_rate(tty);
- dev_dbg(&port->dev, "baud requested = %d\n", baud);
- if (!baud)
- return;
- /*
- * There are two methods for setting/encoding the baud rate
- * 1) Direct method: encodes the baud rate value directly
- * => supported by all chip types
- * 2) Divisor based method: encodes a divisor to a base value (12MHz*32)
- * => not supported by HX clones (and likely type_0/1 chips)
- *
- * NOTE: Although the divisor based baud rate encoding method is much
- * more flexible, some of the standard baud rate values can not be
- * realized exactly. But the difference is very small (max. 0.2%) and
- * the device likely uses the same baud rate generator for both methods
- * so that there is likley no difference.
- */
- if (type == type_0 || type == type_1 || type == HX_CLONE)
- baud = pl2303_baudrate_encode_direct(baud, type, buf);
- else
- baud = pl2303_baudrate_encode_divisor(baud, type, buf);
/* Save resulting baud rate */
tty_encode_baud_rate(tty, baud, baud);
dev_dbg(&port->dev, "baud set = %d\n", baud);
@@ -540,8 +379,8 @@ static void pl2303_set_termios(struct tty_struct *tty,
dev_dbg(&port->dev, "data bits = %d\n", buf[6]);
}
- /* For reference: buf[0]:buf[3] baud rate value */
- pl2303_encode_baudrate(tty, port, spriv->type, buf);
+ /* For reference buf[0]:buf[3] baud rate value */
+ pl2303_encode_baudrate(tty, port, &buf[0]);
/* For reference buf[4]=0 is 1 stop bits */
/* For reference buf[4]=1 is 1.5 stop bits */
@@ -618,10 +457,10 @@ static void pl2303_set_termios(struct tty_struct *tty,
dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf);
if (C_CRTSCTS(tty)) {
- if (spriv->type == type_0 || spriv->type == type_1)
- pl2303_vendor_write(0x0, 0x41, serial);
- else
+ if (spriv->type == HX)
pl2303_vendor_write(0x0, 0x61, serial);
+ else
+ pl2303_vendor_write(0x0, 0x41, serial);
} else {
pl2303_vendor_write(0x0, 0x0, serial);
}
@@ -658,7 +497,7 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
int result;
- if (spriv->type == type_0 || spriv->type == type_1) {
+ if (spriv->type != HX) {
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
} else {
@@ -833,7 +672,6 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
BREAK_REQUEST, BREAK_REQUEST_TYPE, state,
0, NULL, 0, 100);
- /* NOTE: HX clones don't support sending breaks, -EPIPE is returned */
if (result)
dev_err(&port->dev, "error sending break = %d\n", result);
}
diff --git a/drivers/uwb/umc-bus.c b/drivers/uwb/umc-bus.c
index 5c5b3fc9088a..e3ed6ff6a481 100644
--- a/drivers/uwb/umc-bus.c
+++ b/drivers/uwb/umc-bus.c
@@ -201,6 +201,7 @@ static ssize_t capability_id_show(struct device *dev, struct device_attribute *a
return sprintf(buf, "0x%02x\n", umc->cap_id);
}
+static DEVICE_ATTR_RO(capability_id);
static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -208,12 +209,14 @@ static ssize_t version_show(struct device *dev, struct device_attribute *attr, c
return sprintf(buf, "0x%04x\n", umc->version);
}
+static DEVICE_ATTR_RO(version);
-static struct device_attribute umc_dev_attrs[] = {
- __ATTR_RO(capability_id),
- __ATTR_RO(version),
- __ATTR_NULL,
+static struct attribute *umc_dev_attrs[] = {
+ &dev_attr_capability_id.attr,
+ &dev_attr_version.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(umc_dev);
struct bus_type umc_bus_type = {
.name = "umc",
@@ -222,7 +225,7 @@ struct bus_type umc_bus_type = {
.remove = umc_device_remove,
.suspend = umc_device_suspend,
.resume = umc_device_resume,
- .dev_attrs = umc_dev_attrs,
+ .dev_groups = umc_dev_groups,
};
EXPORT_SYMBOL_GPL(umc_bus_type);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index ce5221fa393a..e663921eebb6 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1056,7 +1056,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
if (data_direction != DMA_NONE) {
ret = vhost_scsi_map_iov_to_sgl(cmd,
&vq->iov[data_first], data_num,
- data_direction == DMA_TO_DEVICE);
+ data_direction == DMA_FROM_DEVICE);
if (unlikely(ret)) {
vq_err(vq, "Failed to map iov to sgl\n");
goto err_free;
diff --git a/drivers/video/68328fb.c b/drivers/video/68328fb.c
index fa44fbed397d..552258c8f99d 100644
--- a/drivers/video/68328fb.c
+++ b/drivers/video/68328fb.c
@@ -478,11 +478,10 @@ int __init mc68x328fb_init(void)
return -EINVAL;
}
- printk(KERN_INFO
- "fb%d: %s frame buffer device\n", fb_info.node, fb_info.fix.id);
- printk(KERN_INFO
- "fb%d: %dx%dx%d at 0x%08lx\n", fb_info.node,
- mc68x328fb_default.xres_virtual, mc68x328fb_default.yres_virtual,
+ fb_info(&fb_info, "%s frame buffer device\n", fb_info.fix.id);
+ fb_info(&fb_info, "%dx%dx%d at 0x%08lx\n",
+ mc68x328fb_default.xres_virtual,
+ mc68x328fb_default.yres_virtual,
1 << mc68x328fb_default.bits_per_pixel, videomemory);
return 0;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 84b685f7ab6e..4f2e1b35eb38 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -19,10 +19,10 @@ source "drivers/char/agp/Kconfig"
source "drivers/gpu/vga/Kconfig"
-source "drivers/gpu/drm/Kconfig"
-
source "drivers/gpu/host1x/Kconfig"
+source "drivers/gpu/drm/Kconfig"
+
config VGASTATE
tristate
default n
@@ -996,6 +996,8 @@ config FB_ATMEL
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select FB_MODE_HELPERS
+ select VIDEOMODE_HELPERS
help
This enables support for the AT91/AT32 LCD Controller.
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 0a2cce7285be..14d6b3793e0a 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -10,6 +10,7 @@
*
* ARM PrimeCell PL110 Color LCD Controller
*/
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -544,13 +545,17 @@ static int clcdfb_register(struct clcd_fb *fb)
static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
{
- struct clcd_board *board = dev->dev.platform_data;
+ struct clcd_board *board = dev_get_platdata(&dev->dev);
struct clcd_fb *fb;
int ret;
if (!board)
return -EINVAL;
+ ret = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto out;
+
ret = amba_request_regions(dev, NULL);
if (ret) {
printk(KERN_ERR "CLCD: unable to reserve regs region\n");
@@ -594,8 +599,6 @@ static int clcdfb_remove(struct amba_device *dev)
{
struct clcd_fb *fb = amba_get_drvdata(dev);
- amba_set_drvdata(dev, NULL);
-
clcdfb_disable(fb);
unregister_framebuffer(&fb->fb);
if (fb->fb.cmap.len)
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index a6780eecff0e..0dac36ce09d6 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -3742,13 +3742,12 @@ default_chipset:
if (err)
goto unset_drvdata;
- printk("fb%d: %s frame buffer device, using %dK of video memory\n",
- info->node, info->fix.id, info->fix.smem_len>>10);
+ fb_info(info, "%s frame buffer device, using %dK of video memory\n",
+ info->fix.id, info->fix.smem_len>>10);
return 0;
unset_drvdata:
- dev_set_drvdata(&pdev->dev, NULL);
fb_dealloc_cmap(&info->cmap);
free_irq:
free_irq(IRQ_AMIGA_COPPER, info->par);
@@ -3768,7 +3767,6 @@ static int __exit amifb_remove(struct platform_device *pdev)
struct fb_info *info = dev_get_drvdata(&pdev->dev);
unregister_framebuffer(info);
- dev_set_drvdata(&pdev->dev, NULL);
fb_dealloc_cmap(&info->cmap);
free_irq(IRQ_AMIGA_COPPER, info->par);
custom.dmacon = DMAF_ALL | DMAF_MASTER;
diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
index e43401afdd03..1b0b233b8b39 100644
--- a/drivers/video/arcfb.c
+++ b/drivers/video/arcfb.c
@@ -556,9 +556,8 @@ static int arcfb_probe(struct platform_device *dev)
goto err1;
}
}
- printk(KERN_INFO
- "fb%d: Arc frame buffer device, using %dK of video memory\n",
- info->node, videomemorysize >> 10);
+ fb_info(info, "Arc frame buffer device, using %dK of video memory\n",
+ videomemorysize >> 10);
/* this inits the lcd but doesn't clear dirty pixels */
for (i = 0; i < num_cols * num_rows; i++) {
@@ -572,8 +571,7 @@ static int arcfb_probe(struct platform_device *dev)
/* if we were told to splash the screen, we just clear it */
if (!nosplash) {
for (i = 0; i < num_cols * num_rows; i++) {
- printk(KERN_INFO "fb%d: splashing lcd %d\n",
- info->node, i);
+ fb_info(info, "splashing lcd %d\n", i);
ks108_set_start_line(par, i, 0);
ks108_clear_lcd(par, i);
}
diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c
index 94a51f1ef904..a6b29bd4a12a 100644
--- a/drivers/video/arkfb.c
+++ b/drivers/video/arkfb.c
@@ -137,8 +137,7 @@ static void arkfb_settile(struct fb_info *info, struct fb_tilemap *map)
if ((map->width != 8) || (map->height != 16) ||
(map->depth != 1) || (map->length != 256)) {
- printk(KERN_ERR "fb%d: unsupported font parameters: width %d, "
- "height %d, depth %d, length %d\n", info->node,
+ fb_err(info, "unsupported font parameters: width %d, height %d, depth %d, length %d\n",
map->width, map->height, map->depth, map->length);
return;
}
@@ -517,7 +516,7 @@ static void ark_set_pixclock(struct fb_info *info, u32 pixclock)
int rv = dac_set_freq(par->dac, 0, 1000000000 / pixclock);
if (rv < 0) {
- printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
+ fb_err(info, "cannot set requested pixclock, keeping old value\n");
return;
}
@@ -584,7 +583,7 @@ static int arkfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
rv = svga_match_format (arkfb_formats, var, NULL);
if (rv < 0)
{
- printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
+ fb_err(info, "unsupported mode requested\n");
return rv;
}
@@ -604,14 +603,15 @@ static int arkfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
if (mem > info->screen_size)
{
- printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n", info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
+ fb_err(info, "not enough framebuffer memory (%d kB requested, %d kB available)\n",
+ mem >> 10, (unsigned int) (info->screen_size >> 10));
return -EINVAL;
}
rv = svga_check_timings (&ark_timing_regs, var, info->node);
if (rv < 0)
{
- printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
+ fb_err(info, "invalid timings requested\n");
return rv;
}
@@ -693,7 +693,7 @@ static int arkfb_set_par(struct fb_info *info)
vga_wseq(par->state.vgabase, 0x18, regval);
/* Set the offset register */
- pr_debug("fb%d: offset register : %d\n", info->node, offset_value);
+ fb_dbg(info, "offset register : %d\n", offset_value);
svga_wcrt_multi(par->state.vgabase, ark_offset_regs, offset_value);
/* fix for hi-res textmode */
@@ -716,7 +716,7 @@ static int arkfb_set_par(struct fb_info *info)
/* Set mode-specific register values */
switch (mode) {
case 0:
- pr_debug("fb%d: text mode\n", info->node);
+ fb_dbg(info, "text mode\n");
svga_set_textmode_vga_regs(par->state.vgabase);
vga_wseq(par->state.vgabase, 0x11, 0x10); /* basic VGA mode */
@@ -725,7 +725,7 @@ static int arkfb_set_par(struct fb_info *info)
break;
case 1:
- pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
+ fb_dbg(info, "4 bit pseudocolor\n");
vga_wgfx(par->state.vgabase, VGA_GFX_MODE, 0x40);
vga_wseq(par->state.vgabase, 0x11, 0x10); /* basic VGA mode */
@@ -733,44 +733,44 @@ static int arkfb_set_par(struct fb_info *info)
dac_set_mode(par->dac, DAC_PSEUDO8_8);
break;
case 2:
- pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
+ fb_dbg(info, "4 bit pseudocolor, planar\n");
vga_wseq(par->state.vgabase, 0x11, 0x10); /* basic VGA mode */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x00, 0x04); /* 8bit pixel path */
dac_set_mode(par->dac, DAC_PSEUDO8_8);
break;
case 3:
- pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
+ fb_dbg(info, "8 bit pseudocolor\n");
vga_wseq(par->state.vgabase, 0x11, 0x16); /* 8bpp accel mode */
if (info->var.pixclock > 20000) {
- pr_debug("fb%d: not using multiplex\n", info->node);
+ fb_dbg(info, "not using multiplex\n");
svga_wcrt_mask(par->state.vgabase, 0x46, 0x00, 0x04); /* 8bit pixel path */
dac_set_mode(par->dac, DAC_PSEUDO8_8);
} else {
- pr_debug("fb%d: using multiplex\n", info->node);
+ fb_dbg(info, "using multiplex\n");
svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
dac_set_mode(par->dac, DAC_PSEUDO8_16);
hdiv = 2;
}
break;
case 4:
- pr_debug("fb%d: 5/5/5 truecolor\n", info->node);
+ fb_dbg(info, "5/5/5 truecolor\n");
vga_wseq(par->state.vgabase, 0x11, 0x1A); /* 16bpp accel mode */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
dac_set_mode(par->dac, DAC_RGB1555_16);
break;
case 5:
- pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
+ fb_dbg(info, "5/6/5 truecolor\n");
vga_wseq(par->state.vgabase, 0x11, 0x1A); /* 16bpp accel mode */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
dac_set_mode(par->dac, DAC_RGB0565_16);
break;
case 6:
- pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
+ fb_dbg(info, "8/8/8 truecolor\n");
vga_wseq(par->state.vgabase, 0x11, 0x16); /* 8bpp accel mode ??? */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
@@ -779,7 +779,7 @@ static int arkfb_set_par(struct fb_info *info)
hdiv = 2;
break;
case 7:
- pr_debug("fb%d: 8/8/8/8 truecolor\n", info->node);
+ fb_dbg(info, "8/8/8/8 truecolor\n");
vga_wseq(par->state.vgabase, 0x11, 0x1E); /* 32bpp accel mode */
svga_wcrt_mask(par->state.vgabase, 0x46, 0x04, 0x04); /* 16bit pixel path */
@@ -787,7 +787,7 @@ static int arkfb_set_par(struct fb_info *info)
hmul = 2;
break;
default:
- printk(KERN_ERR "fb%d: unsupported mode - bug\n", info->node);
+ fb_err(info, "unsupported mode - bug\n");
return -EINVAL;
}
@@ -879,19 +879,19 @@ static int arkfb_blank(int blank_mode, struct fb_info *info)
switch (blank_mode) {
case FB_BLANK_UNBLANK:
- pr_debug("fb%d: unblank\n", info->node);
+ fb_dbg(info, "unblank\n");
svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
break;
case FB_BLANK_NORMAL:
- pr_debug("fb%d: blank\n", info->node);
+ fb_dbg(info, "blank\n");
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
break;
case FB_BLANK_POWERDOWN:
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_VSYNC_SUSPEND:
- pr_debug("fb%d: sync down\n", info->node);
+ fb_dbg(info, "sync down\n");
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
svga_wcrt_mask(par->state.vgabase, 0x17, 0x00, 0x80);
break;
@@ -1048,12 +1048,12 @@ static int ark_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
rc = register_framebuffer(info);
if (rc < 0) {
- dev_err(info->device, "cannot register framebugger\n");
+ dev_err(info->device, "cannot register framebuffer\n");
goto err_reg_fb;
}
- printk(KERN_INFO "fb%d: %s on %s, %d MB RAM\n", info->node, info->fix.id,
- pci_name(dev), info->fix.smem_len >> 20);
+ fb_info(info, "%s on %s, %d MB RAM\n",
+ info->fix.id, pci_name(dev), info->fix.smem_len >> 20);
/* Record a reference to the driver data */
pci_set_drvdata(dev, info);
@@ -1108,7 +1108,6 @@ static void ark_pci_remove(struct pci_dev *dev)
pci_release_regions(dev);
/* pci_disable_device(dev); */
- pci_set_drvdata(dev, NULL);
framebuffer_release(info);
}
}
diff --git a/drivers/video/asiliantfb.c b/drivers/video/asiliantfb.c
index d5a37d62847b..d611f1a1ac53 100644
--- a/drivers/video/asiliantfb.c
+++ b/drivers/video/asiliantfb.c
@@ -527,8 +527,8 @@ static int init_asiliant(struct fb_info *p, unsigned long addr)
return err;
}
- printk(KERN_INFO "fb%d: Asiliant 69000 frame buffer (%dK RAM detected)\n",
- p->node, p->fix.smem_len / 1024);
+ fb_info(p, "Asiliant 69000 frame buffer (%dK RAM detected)\n",
+ p->fix.smem_len / 1024);
writeb(0xff, mmio_base + 0x78c);
chips_hw_init(p);
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index 64e41f5448c4..e21d1f58554c 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -3246,11 +3246,8 @@ int __init atafb_init(void)
return -EINVAL;
}
- // FIXME: mode needs setting!
- //printk("fb%d: %s frame buffer device, using %dK of video memory\n",
- // fb_info.node, fb_info.mode->name, screen_len>>10);
- printk("fb%d: frame buffer device, using %dK of video memory\n",
- fb_info.node, screen_len >> 10);
+ fb_info(&fb_info, "frame buffer device, using %dK of video memory\n",
+ screen_len >> 10);
/* TODO: This driver cannot be unloaded yet */
return 0;
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 088511a58a26..8521051cf946 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -20,12 +20,55 @@
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/platform_data/atmel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <video/of_display_timing.h>
+#include <video/videomode.h>
#include <mach/cpu.h>
#include <asm/gpio.h>
#include <video/atmel_lcdc.h>
+struct atmel_lcdfb_config {
+ bool have_alt_pixclock;
+ bool have_hozval;
+ bool have_intensity_bit;
+};
+
+ /* LCD Controller info data structure, stored in device platform_data */
+struct atmel_lcdfb_info {
+ spinlock_t lock;
+ struct fb_info *info;
+ void __iomem *mmio;
+ int irq_base;
+ struct work_struct task;
+
+ unsigned int smem_len;
+ struct platform_device *pdev;
+ struct clk *bus_clk;
+ struct clk *lcdc_clk;
+
+ struct backlight_device *backlight;
+ u8 bl_power;
+ u8 saved_lcdcon;
+
+ u32 pseudo_palette[16];
+ bool have_intensity_bit;
+
+ struct atmel_lcdfb_pdata pdata;
+
+ struct atmel_lcdfb_config *config;
+};
+
+struct atmel_lcdfb_power_ctrl_gpio {
+ int gpio;
+ int active_low;
+
+ struct list_head list;
+};
+
#define lcdc_readl(sinfo, reg) __raw_readl((sinfo)->mmio+(reg))
#define lcdc_writel(sinfo, reg, val) __raw_writel((val), (sinfo)->mmio+(reg))
@@ -34,12 +77,6 @@
#define ATMEL_LCDC_DMA_BURST_LEN 8 /* words */
#define ATMEL_LCDC_FIFO_SIZE 512 /* words */
-struct atmel_lcdfb_config {
- bool have_alt_pixclock;
- bool have_hozval;
- bool have_intensity_bit;
-};
-
static struct atmel_lcdfb_config at91sam9261_config = {
.have_hozval = true,
.have_intensity_bit = true,
@@ -248,18 +285,27 @@ static void exit_backlight(struct atmel_lcdfb_info *sinfo)
static void init_contrast(struct atmel_lcdfb_info *sinfo)
{
+ struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
+
/* contrast pwm can be 'inverted' */
- if (sinfo->lcdcon_pol_negative)
+ if (pdata->lcdcon_pol_negative)
contrast_ctr &= ~(ATMEL_LCDC_POL_POSITIVE);
/* have some default contrast/backlight settings */
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, contrast_ctr);
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_VAL, ATMEL_LCDC_CVAL_DEFAULT);
- if (sinfo->lcdcon_is_backlight)
+ if (pdata->lcdcon_is_backlight)
init_backlight(sinfo);
}
+static inline void atmel_lcdfb_power_control(struct atmel_lcdfb_info *sinfo, int on)
+{
+ struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
+
+ if (pdata->atmel_lcdfb_power_control)
+ pdata->atmel_lcdfb_power_control(pdata, on);
+}
static struct fb_fix_screeninfo atmel_lcdfb_fix __initdata = {
.type = FB_TYPE_PACKED_PIXELS,
@@ -299,9 +345,11 @@ static unsigned long compute_hozval(struct atmel_lcdfb_info *sinfo,
static void atmel_lcdfb_stop_nowait(struct atmel_lcdfb_info *sinfo)
{
+ struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
+
/* Turn off the LCD controller and the DMA controller */
lcdc_writel(sinfo, ATMEL_LCDC_PWRCON,
- sinfo->guard_time << ATMEL_LCDC_GUARDT_OFFSET);
+ pdata->guard_time << ATMEL_LCDC_GUARDT_OFFSET);
/* Wait for the LCDC core to become idle */
while (lcdc_readl(sinfo, ATMEL_LCDC_PWRCON) & ATMEL_LCDC_BUSY)
@@ -321,9 +369,11 @@ static void atmel_lcdfb_stop(struct atmel_lcdfb_info *sinfo)
static void atmel_lcdfb_start(struct atmel_lcdfb_info *sinfo)
{
- lcdc_writel(sinfo, ATMEL_LCDC_DMACON, sinfo->default_dmacon);
+ struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
+
+ lcdc_writel(sinfo, ATMEL_LCDC_DMACON, pdata->default_dmacon);
lcdc_writel(sinfo, ATMEL_LCDC_PWRCON,
- (sinfo->guard_time << ATMEL_LCDC_GUARDT_OFFSET)
+ (pdata->guard_time << ATMEL_LCDC_GUARDT_OFFSET)
| ATMEL_LCDC_PWR);
}
@@ -424,6 +474,7 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
{
struct device *dev = info->device;
struct atmel_lcdfb_info *sinfo = info->par;
+ struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
unsigned long clk_value_khz;
clk_value_khz = clk_get_rate(sinfo->lcdc_clk) / 1000;
@@ -510,7 +561,7 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
else
var->green.length = 6;
- if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
+ if (pdata->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
/* RGB:5X5 mode */
var->red.offset = var->green.length + 5;
var->blue.offset = 0;
@@ -527,7 +578,7 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
var->transp.length = 8;
/* fall through */
case 24:
- if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
+ if (pdata->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
/* RGB:888 mode */
var->red.offset = 16;
var->blue.offset = 0;
@@ -576,6 +627,7 @@ static void atmel_lcdfb_reset(struct atmel_lcdfb_info *sinfo)
static int atmel_lcdfb_set_par(struct fb_info *info)
{
struct atmel_lcdfb_info *sinfo = info->par;
+ struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
unsigned long hozval_linesz;
unsigned long value;
unsigned long clk_value_khz;
@@ -637,7 +689,7 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
/* Initialize control register 2 */
- value = sinfo->default_lcdcon2;
+ value = pdata->default_lcdcon2;
if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT))
value |= ATMEL_LCDC_INVLINE_INVERTED;
@@ -741,6 +793,7 @@ static int atmel_lcdfb_setcolreg(unsigned int regno, unsigned int red,
unsigned int transp, struct fb_info *info)
{
struct atmel_lcdfb_info *sinfo = info->par;
+ struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
unsigned int val;
u32 *pal;
int ret = 1;
@@ -777,8 +830,7 @@ static int atmel_lcdfb_setcolreg(unsigned int regno, unsigned int red,
*/
} else {
/* new style BGR:565 / RGB:565 */
- if (sinfo->lcd_wiring_mode ==
- ATMEL_LCDC_WIRING_RGB) {
+ if (pdata->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
val = ((blue >> 11) & 0x001f);
val |= ((red >> 0) & 0xf800);
} else {
@@ -912,16 +964,187 @@ static void atmel_lcdfb_stop_clock(struct atmel_lcdfb_info *sinfo)
clk_disable_unprepare(sinfo->lcdc_clk);
}
+#ifdef CONFIG_OF
+static const struct of_device_id atmel_lcdfb_dt_ids[] = {
+ { .compatible = "atmel,at91sam9261-lcdc" , .data = &at91sam9261_config, },
+ { .compatible = "atmel,at91sam9263-lcdc" , .data = &at91sam9263_config, },
+ { .compatible = "atmel,at91sam9g10-lcdc" , .data = &at91sam9g10_config, },
+ { .compatible = "atmel,at91sam9g45-lcdc" , .data = &at91sam9g45_config, },
+ { .compatible = "atmel,at91sam9g45es-lcdc" , .data = &at91sam9g45es_config, },
+ { .compatible = "atmel,at91sam9rl-lcdc" , .data = &at91sam9rl_config, },
+ { .compatible = "atmel,at32ap-lcdc" , .data = &at32ap_config, },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_lcdfb_dt_ids);
+
+static const char *atmel_lcdfb_wiring_modes[] = {
+ [ATMEL_LCDC_WIRING_BGR] = "BRG",
+ [ATMEL_LCDC_WIRING_RGB] = "RGB",
+};
+
+const int atmel_lcdfb_get_of_wiring_modes(struct device_node *np)
+{
+ const char *mode;
+ int err, i;
+
+ err = of_property_read_string(np, "atmel,lcd-wiring-mode", &mode);
+ if (err < 0)
+ return ATMEL_LCDC_WIRING_BGR;
+
+ for (i = 0; i < ARRAY_SIZE(atmel_lcdfb_wiring_modes); i++)
+ if (!strcasecmp(mode, atmel_lcdfb_wiring_modes[i]))
+ return i;
+
+ return -ENODEV;
+}
+
+static void atmel_lcdfb_power_control_gpio(struct atmel_lcdfb_pdata *pdata, int on)
+{
+ struct atmel_lcdfb_power_ctrl_gpio *og;
+
+ list_for_each_entry(og, &pdata->pwr_gpios, list)
+ gpio_set_value(og->gpio, on);
+}
+
+static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
+{
+ struct fb_info *info = sinfo->info;
+ struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
+ struct fb_var_screeninfo *var = &info->var;
+ struct device *dev = &sinfo->pdev->dev;
+ struct device_node *np =dev->of_node;
+ struct device_node *display_np;
+ struct device_node *timings_np;
+ struct display_timings *timings;
+ enum of_gpio_flags flags;
+ struct atmel_lcdfb_power_ctrl_gpio *og;
+ bool is_gpio_power = false;
+ int ret = -ENOENT;
+ int i, gpio;
+
+ sinfo->config = (struct atmel_lcdfb_config*)
+ of_match_device(atmel_lcdfb_dt_ids, dev)->data;
+
+ display_np = of_parse_phandle(np, "display", 0);
+ if (!display_np) {
+ dev_err(dev, "failed to find display phandle\n");
+ return -ENOENT;
+ }
+
+ ret = of_property_read_u32(display_np, "bits-per-pixel", &var->bits_per_pixel);
+ if (ret < 0) {
+ dev_err(dev, "failed to get property bits-per-pixel\n");
+ goto put_display_node;
+ }
+
+ ret = of_property_read_u32(display_np, "atmel,guard-time", &pdata->guard_time);
+ if (ret < 0) {
+ dev_err(dev, "failed to get property atmel,guard-time\n");
+ goto put_display_node;
+ }
+
+ ret = of_property_read_u32(display_np, "atmel,lcdcon2", &pdata->default_lcdcon2);
+ if (ret < 0) {
+ dev_err(dev, "failed to get property atmel,lcdcon2\n");
+ goto put_display_node;
+ }
+
+ ret = of_property_read_u32(display_np, "atmel,dmacon", &pdata->default_dmacon);
+ if (ret < 0) {
+ dev_err(dev, "failed to get property bits-per-pixel\n");
+ goto put_display_node;
+ }
+
+ ret = -ENOMEM;
+ for (i = 0; i < of_gpio_named_count(display_np, "atmel,power-control-gpio"); i++) {
+ gpio = of_get_named_gpio_flags(display_np, "atmel,power-control-gpio",
+ i, &flags);
+ if (gpio < 0)
+ continue;
+
+ og = devm_kzalloc(dev, sizeof(*og), GFP_KERNEL);
+ if (!og)
+ goto put_display_node;
+
+ og->gpio = gpio;
+ og->active_low = flags & OF_GPIO_ACTIVE_LOW;
+ is_gpio_power = true;
+ ret = devm_gpio_request(dev, gpio, "lcd-power-control-gpio");
+ if (ret) {
+ dev_err(dev, "request gpio %d failed\n", gpio);
+ goto put_display_node;
+ }
+
+ ret = gpio_direction_output(gpio, og->active_low);
+ if (ret) {
+ dev_err(dev, "set direction output gpio %d failed\n", gpio);
+ goto put_display_node;
+ }
+ }
+
+ if (is_gpio_power)
+ pdata->atmel_lcdfb_power_control = atmel_lcdfb_power_control_gpio;
+
+ ret = atmel_lcdfb_get_of_wiring_modes(display_np);
+ if (ret < 0) {
+ dev_err(dev, "invalid atmel,lcd-wiring-mode\n");
+ goto put_display_node;
+ }
+ pdata->lcd_wiring_mode = ret;
+
+ pdata->lcdcon_is_backlight = of_property_read_bool(display_np, "atmel,lcdcon-backlight");
+
+ timings = of_get_display_timings(display_np);
+ if (!timings) {
+ dev_err(dev, "failed to get display timings\n");
+ goto put_display_node;
+ }
+
+ timings_np = of_find_node_by_name(display_np, "display-timings");
+ if (!timings_np) {
+ dev_err(dev, "failed to find display-timings node\n");
+ goto put_display_node;
+ }
+
+ for (i = 0; i < of_get_child_count(timings_np); i++) {
+ struct videomode vm;
+ struct fb_videomode fb_vm;
+
+ ret = videomode_from_timings(timings, &vm, i);
+ if (ret < 0)
+ goto put_timings_node;
+ ret = fb_videomode_from_videomode(&vm, &fb_vm);
+ if (ret < 0)
+ goto put_timings_node;
+
+ fb_add_videomode(&fb_vm, &info->modelist);
+ }
+
+ return 0;
+
+put_timings_node:
+ of_node_put(timings_np);
+put_display_node:
+ of_node_put(display_np);
+ return ret;
+}
+#else
+static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
+{
+ return 0;
+}
+#endif
static int __init atmel_lcdfb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fb_info *info;
struct atmel_lcdfb_info *sinfo;
- struct atmel_lcdfb_info *pdata_sinfo;
- struct fb_videomode fbmode;
+ struct atmel_lcdfb_pdata *pdata = NULL;
struct resource *regs = NULL;
struct resource *map = NULL;
+ struct fb_modelist *modelist;
int ret;
dev_dbg(dev, "%s BEGIN\n", __func__);
@@ -934,26 +1157,35 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
}
sinfo = info->par;
+ sinfo->pdev = pdev;
+ sinfo->info = info;
+
+ INIT_LIST_HEAD(&info->modelist);
- if (dev->platform_data) {
- pdata_sinfo = (struct atmel_lcdfb_info *)dev->platform_data;
- sinfo->default_bpp = pdata_sinfo->default_bpp;
- sinfo->default_dmacon = pdata_sinfo->default_dmacon;
- sinfo->default_lcdcon2 = pdata_sinfo->default_lcdcon2;
- sinfo->default_monspecs = pdata_sinfo->default_monspecs;
- sinfo->atmel_lcdfb_power_control = pdata_sinfo->atmel_lcdfb_power_control;
- sinfo->guard_time = pdata_sinfo->guard_time;
- sinfo->smem_len = pdata_sinfo->smem_len;
- sinfo->lcdcon_is_backlight = pdata_sinfo->lcdcon_is_backlight;
- sinfo->lcdcon_pol_negative = pdata_sinfo->lcdcon_pol_negative;
- sinfo->lcd_wiring_mode = pdata_sinfo->lcd_wiring_mode;
+ if (pdev->dev.of_node) {
+ ret = atmel_lcdfb_of_init(sinfo);
+ if (ret)
+ goto free_info;
+ } else if (dev_get_platdata(dev)) {
+ struct fb_monspecs *monspecs;
+ int i;
+
+ pdata = dev_get_platdata(dev);
+ monspecs = pdata->default_monspecs;
+ sinfo->pdata = *pdata;
+
+ for (i = 0; i < monspecs->modedb_len; i++)
+ fb_add_videomode(&monspecs->modedb[i], &info->modelist);
+
+ sinfo->config = atmel_lcdfb_get_config(pdev);
+
+ info->var.bits_per_pixel = pdata->default_bpp ? pdata->default_bpp : 16;
+ memcpy(&info->monspecs, pdata->default_monspecs, sizeof(info->monspecs));
} else {
dev_err(dev, "cannot get default configuration\n");
goto free_info;
}
- sinfo->info = info;
- sinfo->pdev = pdev;
- sinfo->config = atmel_lcdfb_get_config(pdev);
+
if (!sinfo->config)
goto free_info;
@@ -962,7 +1194,6 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
info->pseudo_palette = sinfo->pseudo_palette;
info->fbops = &atmel_lcdfb_ops;
- memcpy(&info->monspecs, sinfo->default_monspecs, sizeof(info->monspecs));
info->fix = atmel_lcdfb_fix;
/* Enable LCDC Clocks */
@@ -978,14 +1209,11 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
}
atmel_lcdfb_start_clock(sinfo);
- ret = fb_find_mode(&info->var, info, NULL, info->monspecs.modedb,
- info->monspecs.modedb_len, info->monspecs.modedb,
- sinfo->default_bpp);
- if (!ret) {
- dev_err(dev, "no suitable video mode found\n");
- goto stop_clk;
- }
+ modelist = list_first_entry(&info->modelist,
+ struct fb_modelist, list);
+ fb_videomode_to_var(&info->var, &modelist->mode);
+ atmel_lcdfb_check_var(&info->var, info);
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
@@ -1069,18 +1297,6 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
goto unregister_irqs;
}
- /*
- * This makes sure that our colour bitfield
- * descriptors are correctly initialised.
- */
- atmel_lcdfb_check_var(&info->var, info);
-
- ret = fb_set_var(info, &info->var);
- if (ret) {
- dev_warn(dev, "unable to set display parameters\n");
- goto free_cmap;
- }
-
dev_set_drvdata(dev, info);
/*
@@ -1092,13 +1308,8 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
goto reset_drvdata;
}
- /* add selected videomode to modelist */
- fb_var_to_videomode(&fbmode, &info->var);
- fb_add_videomode(&fbmode, &info->modelist);
-
/* Power up the LCDC screen */
- if (sinfo->atmel_lcdfb_power_control)
- sinfo->atmel_lcdfb_power_control(1);
+ atmel_lcdfb_power_control(sinfo, 1);
dev_info(dev, "fb%d: Atmel LCDC at 0x%08lx (mapped at %p), irq %d\n",
info->node, info->fix.mmio_start, sinfo->mmio, sinfo->irq_base);
@@ -1107,7 +1318,6 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
reset_drvdata:
dev_set_drvdata(dev, NULL);
-free_cmap:
fb_dealloc_cmap(&info->cmap);
unregister_irqs:
cancel_work_sync(&sinfo->task);
@@ -1143,15 +1353,16 @@ static int __exit atmel_lcdfb_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct fb_info *info = dev_get_drvdata(dev);
struct atmel_lcdfb_info *sinfo;
+ struct atmel_lcdfb_pdata *pdata;
if (!info || !info->par)
return 0;
sinfo = info->par;
+ pdata = &sinfo->pdata;
cancel_work_sync(&sinfo->task);
exit_backlight(sinfo);
- if (sinfo->atmel_lcdfb_power_control)
- sinfo->atmel_lcdfb_power_control(0);
+ atmel_lcdfb_power_control(sinfo, 0);
unregister_framebuffer(info);
atmel_lcdfb_stop_clock(sinfo);
clk_put(sinfo->lcdc_clk);
@@ -1167,7 +1378,6 @@ static int __exit atmel_lcdfb_remove(struct platform_device *pdev)
atmel_lcdfb_free_video_memory(sinfo);
}
- dev_set_drvdata(dev, NULL);
framebuffer_release(info);
return 0;
@@ -1188,9 +1398,7 @@ static int atmel_lcdfb_suspend(struct platform_device *pdev, pm_message_t mesg)
sinfo->saved_lcdcon = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_CTR);
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, 0);
- if (sinfo->atmel_lcdfb_power_control)
- sinfo->atmel_lcdfb_power_control(0);
-
+ atmel_lcdfb_power_control(sinfo, 0);
atmel_lcdfb_stop(sinfo);
atmel_lcdfb_stop_clock(sinfo);
@@ -1204,8 +1412,7 @@ static int atmel_lcdfb_resume(struct platform_device *pdev)
atmel_lcdfb_start_clock(sinfo);
atmel_lcdfb_start(sinfo);
- if (sinfo->atmel_lcdfb_power_control)
- sinfo->atmel_lcdfb_power_control(1);
+ atmel_lcdfb_power_control(sinfo, 1);
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, sinfo->saved_lcdcon);
/* Enable FIFO & DMA errors */
@@ -1228,6 +1435,7 @@ static struct platform_driver atmel_lcdfb_driver = {
.driver = {
.name = "atmel_lcdfb",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_lcdfb_dt_ids),
},
};
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index a4dfe8cb0a0a..12ca031877d4 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -413,7 +413,6 @@ struct aty128fb_par {
int blitter_may_be_busy;
int fifo_slots; /* free slots in FIFO (64 max) */
- int pm_reg;
int crt_on, lcd_on;
struct pci_dev *pdev;
struct fb_info *next;
@@ -2016,7 +2015,6 @@ static int aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent)
aty128_init_engine(par);
- par->pm_reg = pdev->pm_cap;
par->pdev = pdev;
par->asleep = 0;
par->lock_blank = 0;
@@ -2029,8 +2027,8 @@ static int aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent)
if (register_framebuffer(info) < 0)
return 0;
- printk(KERN_INFO "fb%d: %s frame buffer device on %s\n",
- info->node, info->fix.id, video_card);
+ fb_info(info, "%s frame buffer device on %s\n",
+ info->fix.id, video_card);
return 1; /* success! */
}
@@ -2397,7 +2395,7 @@ static void aty128_set_suspend(struct aty128fb_par *par, int suspend)
u32 pmgt;
struct pci_dev *pdev = par->pdev;
- if (!par->pm_reg)
+ if (!par->pdev->pm_cap)
return;
/* Set the chip into the appropriate suspend mode (we use D2,
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 9b0f12c5c284..28fafbf864a5 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -1848,7 +1848,6 @@ static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
return aty_waitforvblank(par, crtc);
}
- break;
#if defined(DEBUG) && defined(CONFIG_FB_ATY_CT)
case ATYIO_CLKR:
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 1e30b2b3e79f..26d80a4486fb 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -819,11 +819,6 @@ static int radeonfb_check_var (struct fb_var_screeninfo *var, struct fb_info *in
if (v.xres_virtual < v.xres)
v.xres = v.xres_virtual;
- if (v.xoffset < 0)
- v.xoffset = 0;
- if (v.yoffset < 0)
- v.yoffset = 0;
-
if (v.xoffset > v.xres_virtual - v.xres)
v.xoffset = v.xres_virtual - v.xres - 1;
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index f7091ece580d..46a12f1a93c3 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -1427,6 +1427,8 @@ static void radeon_pm_full_reset_sdram(struct radeonfb_info *rinfo)
mdelay( 15);
}
+#if defined(CONFIG_PM)
+#if defined(CONFIG_X86) || defined(CONFIG_PPC_PMAC)
static void radeon_pm_reset_pad_ctlr_strength(struct radeonfb_info *rinfo)
{
u32 tmp, tmp2;
@@ -1939,9 +1941,10 @@ static void radeon_reinitialize_M10(struct radeonfb_info *rinfo)
*/
radeon_pm_m10_enable_lvds_spread_spectrum(rinfo);
}
+#endif
#ifdef CONFIG_PPC_OF
-
+#ifdef CONFIG_PPC_PMAC
static void radeon_pm_m9p_reconfigure_mc(struct radeonfb_info *rinfo)
{
OUTREG(MC_CNTL, rinfo->save_regs[46]);
@@ -2202,6 +2205,8 @@ static void radeon_reinitialize_M9P(struct radeonfb_info *rinfo)
radeon_pm_restore_pixel_pll(rinfo);
radeon_pm_m10_enable_lvds_spread_spectrum(rinfo);
}
+#endif
+#endif
#if 0 /* Not ready yet */
static void radeon_reinitialize_QW(struct radeonfb_info *rinfo)
@@ -2515,13 +2520,13 @@ static void radeonfb_whack_power_state(struct radeonfb_info *rinfo, pci_power_t
for (;;) {
pci_read_config_word(rinfo->pdev,
- rinfo->pm_reg+PCI_PM_CTRL,
+ rinfo->pdev->pm_cap + PCI_PM_CTRL,
&pwr_cmd);
- if (pwr_cmd & 2)
+ if (pwr_cmd & state)
break;
- pwr_cmd = (pwr_cmd & ~PCI_PM_CTRL_STATE_MASK) | 2;
+ pwr_cmd = (pwr_cmd & ~PCI_PM_CTRL_STATE_MASK) | state;
pci_write_config_word(rinfo->pdev,
- rinfo->pm_reg+PCI_PM_CTRL,
+ rinfo->pdev->pm_cap + PCI_PM_CTRL,
pwr_cmd);
msleep(500);
}
@@ -2532,7 +2537,7 @@ static void radeon_set_suspend(struct radeonfb_info *rinfo, int suspend)
{
u32 tmp;
- if (!rinfo->pm_reg)
+ if (!rinfo->pdev->pm_cap)
return;
/* Set the chip into appropriate suspend mode (we use D2,
@@ -2804,9 +2809,6 @@ static void radeonfb_early_resume(void *data)
void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk, int ignore_devlist, int force_sleep)
{
- /* Find PM registers in config space if any*/
- rinfo->pm_reg = rinfo->pdev->pm_cap;
-
/* Enable/Disable dynamic clocks: TODO add sysfs access */
if (rinfo->family == CHIP_FAMILY_RS480)
rinfo->dynclk = -1;
@@ -2830,7 +2832,7 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk, int ignore_devlis
* reason. --BenH
*/
if (machine_is(powermac) && rinfo->of_node) {
- if (rinfo->is_mobility && rinfo->pm_reg &&
+ if (rinfo->is_mobility && rinfo->pdev->pm_cap &&
rinfo->family <= CHIP_FAMILY_RV250)
rinfo->pm_mode |= radeon_pm_d2;
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h
index 7351e66c7f54..cb846044f57c 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/aty/radeonfb.h
@@ -342,7 +342,6 @@ struct radeonfb_info {
int mtrr_hdl;
- int pm_reg;
u32 save_regs[100];
int asleep;
int lock_blank;
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index a54ccdc4d661..372d4aea9d1c 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -361,37 +361,13 @@ void au1100fb_fb_rotate(struct fb_info *fbi, int angle)
int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
{
struct au1100fb_device *fbdev;
- unsigned int len;
- unsigned long start=0, off;
fbdev = to_au1100fb_device(fbi);
- if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
- return -EINVAL;
- }
-
- start = fbdev->fb_phys & PAGE_MASK;
- len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
-
- off = vma->vm_pgoff << PAGE_SHIFT;
-
- if ((vma->vm_end - vma->vm_start + off) > len) {
- return -EINVAL;
- }
-
- off += start;
- vma->vm_pgoff = off >> PAGE_SHIFT;
-
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
- if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot)) {
- return -EAGAIN;
- }
-
- return 0;
+ return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
}
static struct fb_ops au1100fb_ops =
@@ -588,7 +564,7 @@ int au1100fb_drv_remove(struct platform_device *dev)
if (!dev)
return -ENODEV;
- fbdev = (struct au1100fb_device *) platform_get_drvdata(dev);
+ fbdev = platform_get_drvdata(dev);
#if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO)
au1100fb_fb_blank(VESA_POWERDOWN, &fbdev->info);
@@ -660,19 +636,7 @@ static struct platform_driver au1100fb_driver = {
.suspend = au1100fb_drv_suspend,
.resume = au1100fb_drv_resume,
};
-
-static int __init au1100fb_load(void)
-{
- return platform_driver_register(&au1100fb_driver);
-}
-
-static void __exit au1100fb_unload(void)
-{
- platform_driver_unregister(&au1100fb_driver);
-}
-
-module_init(au1100fb_load);
-module_exit(au1100fb_unload);
+module_platform_driver(au1100fb_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
index 301224ecc950..4cfba78a1458 100644
--- a/drivers/video/au1200fb.c
+++ b/drivers/video/au1200fb.c
@@ -1233,34 +1233,13 @@ static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
* method mainly to allow the use of the TLB streaming flag (CCA=6)
*/
static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
-
{
- unsigned int len;
- unsigned long start=0, off;
struct au1200fb_device *fbdev = info->par;
- if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
- return -EINVAL;
- }
-
- start = fbdev->fb_phys & PAGE_MASK;
- len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
-
- off = vma->vm_pgoff << PAGE_SHIFT;
-
- if ((vma->vm_end - vma->vm_start + off) > len) {
- return -EINVAL;
- }
-
- off += start;
- vma->vm_pgoff = off >> PAGE_SHIFT;
-
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
- return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
+ return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
}
static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
@@ -1874,21 +1853,7 @@ static struct platform_driver au1200fb_driver = {
.probe = au1200fb_drv_probe,
.remove = au1200fb_drv_remove,
};
-
-/*-------------------------------------------------------------------------*/
-
-static int __init au1200fb_init(void)
-{
- return platform_driver_register(&au1200fb_driver);
-}
-
-static void __exit au1200fb_cleanup(void)
-{
- platform_driver_unregister(&au1200fb_driver);
-}
-
-module_init(au1200fb_init);
-module_exit(au1200fb_cleanup);
+module_platform_driver(au1200fb_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
index 0393d827dd44..f7447f7004fb 100644
--- a/drivers/video/backlight/atmel-pwm-bl.c
+++ b/drivers/video/backlight/atmel-pwm-bl.c
@@ -118,7 +118,7 @@ static const struct backlight_ops atmel_pwm_bl_ops = {
.update_status = atmel_pwm_bl_set_intensity,
};
-static int __init atmel_pwm_bl_probe(struct platform_device *pdev)
+static int atmel_pwm_bl_probe(struct platform_device *pdev)
{
struct backlight_properties props;
const struct atmel_pwm_bl_platform_data *pdata;
@@ -202,7 +202,7 @@ err_free_mem:
return retval;
}
-static int __exit atmel_pwm_bl_remove(struct platform_device *pdev)
+static int atmel_pwm_bl_remove(struct platform_device *pdev)
{
struct atmel_pwm_bl *pwmbl = platform_get_drvdata(pdev);
@@ -220,10 +220,11 @@ static struct platform_driver atmel_pwm_bl_driver = {
.name = "atmel-pwm-bl",
},
/* REVISIT add suspend() and resume() */
- .remove = __exit_p(atmel_pwm_bl_remove),
+ .probe = atmel_pwm_bl_probe,
+ .remove = atmel_pwm_bl_remove,
};
-module_platform_driver_probe(atmel_pwm_bl_driver, atmel_pwm_bl_probe);
+module_platform_driver(atmel_pwm_bl_driver);
MODULE_AUTHOR("Hans-Christian egtvedt <hans-christian.egtvedt@atmel.com>");
MODULE_DESCRIPTION("Atmel PWM backlight driver");
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 94a403a9717a..5d05555fe841 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -21,6 +21,9 @@
#include <asm/backlight.h>
#endif
+static struct list_head backlight_dev_list;
+static struct mutex backlight_dev_list_mutex;
+
static const char *const backlight_types[] = {
[BACKLIGHT_RAW] = "raw",
[BACKLIGHT_PLATFORM] = "platform",
@@ -349,10 +352,32 @@ struct backlight_device *backlight_device_register(const char *name,
mutex_unlock(&pmac_backlight_mutex);
#endif
+ mutex_lock(&backlight_dev_list_mutex);
+ list_add(&new_bd->entry, &backlight_dev_list);
+ mutex_unlock(&backlight_dev_list_mutex);
+
return new_bd;
}
EXPORT_SYMBOL(backlight_device_register);
+bool backlight_device_registered(enum backlight_type type)
+{
+ bool found = false;
+ struct backlight_device *bd;
+
+ mutex_lock(&backlight_dev_list_mutex);
+ list_for_each_entry(bd, &backlight_dev_list, entry) {
+ if (bd->props.type == type) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&backlight_dev_list_mutex);
+
+ return found;
+}
+EXPORT_SYMBOL(backlight_device_registered);
+
/**
* backlight_device_unregister - unregisters a backlight device object.
* @bd: the backlight device object to be unregistered and freed.
@@ -364,6 +389,10 @@ void backlight_device_unregister(struct backlight_device *bd)
if (!bd)
return;
+ mutex_lock(&backlight_dev_list_mutex);
+ list_del(&bd->entry);
+ mutex_unlock(&backlight_dev_list_mutex);
+
#ifdef CONFIG_PMAC_BACKLIGHT
mutex_lock(&pmac_backlight_mutex);
if (pmac_backlight == bd)
@@ -499,6 +528,8 @@ static int __init backlight_class_init(void)
backlight_class->dev_groups = bl_device_groups;
backlight_class->pm = &backlight_class_dev_pm_ops;
+ INIT_LIST_HEAD(&backlight_dev_list);
+ mutex_init(&backlight_dev_list_mutex);
return 0;
}
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index a35a38c709cf..59eebe0b3846 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -244,7 +244,6 @@ static int l4f00242t03_remove(struct spi_device *spi)
l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
lcd_device_unregister(priv->ld);
- spi_set_drvdata(spi, NULL);
return 0;
}
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index bf081573e5b5..be5d636764bf 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -198,7 +198,7 @@ static int tosa_lcd_probe(struct spi_device *spi)
ret = devm_gpio_request_one(&spi->dev, TOSA_GPIO_TG_ON,
GPIOF_OUT_INIT_LOW, "tg #pwr");
if (ret < 0)
- goto err_gpio_tg;
+ return ret;
mdelay(60);
@@ -219,8 +219,6 @@ static int tosa_lcd_probe(struct spi_device *spi)
err_register:
tosa_lcd_tg_off(data);
-err_gpio_tg:
- spi_set_drvdata(spi, NULL);
return ret;
}
@@ -235,8 +233,6 @@ static int tosa_lcd_remove(struct spi_device *spi)
tosa_lcd_tg_off(data);
- spi_set_drvdata(spi, NULL);
-
return 0;
}
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 87f288bfc58c..42b8f9d11018 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -761,19 +761,7 @@ static struct platform_driver bfin_bf54x_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init bfin_bf54x_driver_init(void)
-{
- return platform_driver_register(&bfin_bf54x_driver);
-}
-
-static void __exit bfin_bf54x_driver_cleanup(void)
-{
- platform_driver_unregister(&bfin_bf54x_driver);
-}
+module_platform_driver(bfin_bf54x_driver);
MODULE_DESCRIPTION("Blackfin BF54x TFT LCD Driver");
MODULE_LICENSE("GPL");
-
-module_init(bfin_bf54x_driver_init);
-module_exit(bfin_bf54x_driver_cleanup);
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index 48c0c4e38a62..b5cf1307a3d9 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -664,19 +664,7 @@ static struct platform_driver bfin_t350mcqb_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init bfin_t350mcqb_driver_init(void)
-{
- return platform_driver_register(&bfin_t350mcqb_driver);
-}
-
-static void __exit bfin_t350mcqb_driver_cleanup(void)
-{
- platform_driver_unregister(&bfin_t350mcqb_driver);
-}
+module_platform_driver(bfin_t350mcqb_driver);
MODULE_DESCRIPTION("Blackfin TFT LCD Driver");
MODULE_LICENSE("GPL");
-
-module_init(bfin_t350mcqb_driver_init);
-module_exit(bfin_t350mcqb_driver_cleanup);
diff --git a/drivers/video/broadsheetfb.c b/drivers/video/broadsheetfb.c
index b09701c79432..8556264b16b7 100644
--- a/drivers/video/broadsheetfb.c
+++ b/drivers/video/broadsheetfb.c
@@ -1167,9 +1167,8 @@ static int broadsheetfb_probe(struct platform_device *dev)
if (retval < 0)
goto err_unreg_fb;
- printk(KERN_INFO
- "fb%d: Broadsheet frame buffer, using %dK of video memory\n",
- info->node, videomemorysize >> 10);
+ fb_info(info, "Broadsheet frame buffer, using %dK of video memory\n",
+ videomemorysize >> 10);
return 0;
@@ -1217,19 +1216,7 @@ static struct platform_driver broadsheetfb_driver = {
.name = "broadsheetfb",
},
};
-
-static int __init broadsheetfb_init(void)
-{
- return platform_driver_register(&broadsheetfb_driver);
-}
-
-static void __exit broadsheetfb_exit(void)
-{
- platform_driver_unregister(&broadsheetfb_driver);
-}
-
-module_init(broadsheetfb_init);
-module_exit(broadsheetfb_exit);
+module_platform_driver(broadsheetfb_driver);
MODULE_DESCRIPTION("fbdev driver for Broadsheet controller");
MODULE_AUTHOR("Jaya Kumar");
diff --git a/drivers/video/bw2.c b/drivers/video/bw2.c
index 60017fc634b5..bc123d6947a4 100644
--- a/drivers/video/bw2.c
+++ b/drivers/video/bw2.c
@@ -363,8 +363,6 @@ static int bw2_remove(struct platform_device *op)
framebuffer_release(info);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/video/carminefb.c b/drivers/video/carminefb.c
index 153dd65b0ae8..65f7c15f5fdb 100644
--- a/drivers/video/carminefb.c
+++ b/drivers/video/carminefb.c
@@ -585,8 +585,7 @@ static int alloc_carmine_fb(void __iomem *regs, void __iomem *smem_base,
if (ret < 0)
goto err_dealloc_cmap;
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
*rinfo = info;
return 0;
@@ -746,7 +745,6 @@ static void carminefb_remove(struct pci_dev *dev)
iounmap(hw->v_regs);
release_mem_region(fix.mmio_start, fix.mmio_len);
- pci_set_drvdata(dev, NULL);
pci_disable_device(dev);
kfree(hw);
}
diff --git a/drivers/video/cfbimgblt.c b/drivers/video/cfbimgblt.c
index baed57d3cfff..a2bb276a8b24 100644
--- a/drivers/video/cfbimgblt.c
+++ b/drivers/video/cfbimgblt.c
@@ -181,7 +181,7 @@ static inline void slow_imageblit(const struct fb_image *image, struct fb_info *
}
shift += bpp;
shift &= (32 - 1);
- if (!l) { l = 8; s++; };
+ if (!l) { l = 8; s++; }
}
/* write trailing bits */
diff --git a/drivers/video/cg14.c b/drivers/video/cg14.c
index ed3b8891e006..c79745b136bb 100644
--- a/drivers/video/cg14.c
+++ b/drivers/video/cg14.c
@@ -330,7 +330,7 @@ static int cg14_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
default:
ret = -ENOSYS;
break;
- };
+ }
if (!ret) {
sbus_writeb(cur_mode, &regs->mcr);
par->mode = mode;
@@ -343,7 +343,7 @@ static int cg14_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
FBTYPE_MDICOLOR, 8,
info->fix.smem_len);
break;
- };
+ }
return ret;
}
@@ -583,8 +583,6 @@ static int cg14_remove(struct platform_device *op)
framebuffer_release(info);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c
index 9f63507ded37..64a89d5747ed 100644
--- a/drivers/video/cg3.c
+++ b/drivers/video/cg3.c
@@ -446,8 +446,6 @@ static int cg3_remove(struct platform_device *op)
framebuffer_release(info);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/video/cg6.c b/drivers/video/cg6.c
index 3545decc7485..70781fea092a 100644
--- a/drivers/video/cg6.c
+++ b/drivers/video/cg6.c
@@ -624,7 +624,7 @@ static void cg6_init_fix(struct fb_info *info, int linebytes)
default:
cg6_cpu_name = "i386";
break;
- };
+ }
if (((conf >> CG6_FHC_REV_SHIFT) & CG6_FHC_REV_MASK) >= 11) {
if (info->fix.smem_len <= 0x100000)
cg6_card_name = "TGX";
@@ -839,8 +839,6 @@ static int cg6_remove(struct platform_device *op)
framebuffer_release(info);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index 97db3ba8f237..5aab9b9dc210 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -595,11 +595,6 @@ static int cirrusfb_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
- if (var->xoffset < 0)
- var->xoffset = 0;
- if (var->yoffset < 0)
- var->yoffset = 0;
-
/* truncate xoffset and yoffset to maximum if too high */
if (var->xoffset > var->xres_virtual - var->xres)
var->xoffset = var->xres_virtual - var->xres - 1;
@@ -2159,7 +2154,6 @@ static int cirrusfb_pci_register(struct pci_dev *pdev,
if (!ret)
return 0;
- pci_set_drvdata(pdev, NULL);
iounmap(info->screen_base);
err_release_legacy:
if (release_io_ports)
diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/cobalt_lcdfb.c
index a9031498e10c..d5533f4db1cf 100644
--- a/drivers/video/cobalt_lcdfb.c
+++ b/drivers/video/cobalt_lcdfb.c
@@ -368,8 +368,7 @@ static int cobalt_lcdfb_probe(struct platform_device *dev)
lcd_clear(info);
- printk(KERN_INFO "fb%d: Cobalt server LCD frame buffer device\n",
- info->node);
+ fb_info(info, "Cobalt server LCD frame buffer device\n");
return 0;
}
@@ -395,19 +394,7 @@ static struct platform_driver cobalt_lcdfb_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init cobalt_lcdfb_init(void)
-{
- return platform_driver_register(&cobalt_lcdfb_driver);
-}
-
-static void __exit cobalt_lcdfb_exit(void)
-{
- platform_driver_unregister(&cobalt_lcdfb_driver);
-}
-
-module_init(cobalt_lcdfb_init);
-module_exit(cobalt_lcdfb_exit);
+module_platform_driver(cobalt_lcdfb_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Yoichi Yuasa");
diff --git a/drivers/video/controlfb.c b/drivers/video/controlfb.c
index 67b77b40aa7f..fdadef979238 100644
--- a/drivers/video/controlfb.c
+++ b/drivers/video/controlfb.c
@@ -471,8 +471,8 @@ try_again:
/* Register with fbdev layer */
if (register_framebuffer(&p->info) < 0)
return -ENXIO;
-
- printk(KERN_INFO "fb%d: control display adapter\n", p->info.node);
+
+ fb_info(&p->info, "control display adapter\n");
return 0;
}
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c
index 57886787ead0..b0a950f36970 100644
--- a/drivers/video/cyber2000fb.c
+++ b/drivers/video/cyber2000fb.c
@@ -1641,67 +1641,6 @@ static void cyberpro_common_resume(struct cfb_info *cfb)
cyber2000fb_set_par(&cfb->fb);
}
-#ifdef CONFIG_ARCH_SHARK
-
-#include <mach/framebuffer.h>
-
-static int cyberpro_vl_probe(void)
-{
- struct cfb_info *cfb;
- int err = -ENOMEM;
-
- if (!request_mem_region(FB_START, FB_SIZE, "CyberPro2010"))
- return err;
-
- cfb = cyberpro_alloc_fb_info(ID_CYBERPRO_2010, "CyberPro2010");
- if (!cfb)
- goto failed_release;
-
- cfb->irq = -1;
- cfb->region = ioremap(FB_START, FB_SIZE);
- if (!cfb->region)
- goto failed_ioremap;
-
- cfb->regs = cfb->region + MMIO_OFFSET;
- cfb->fb.device = NULL;
- cfb->fb.fix.mmio_start = FB_START + MMIO_OFFSET;
- cfb->fb.fix.smem_start = FB_START;
-
- /*
- * Bring up the hardware. This is expected to enable access
- * to the linear memory region, and allow access to the memory
- * mapped registers. Also, mem_ctl1 and mem_ctl2 must be
- * initialised.
- */
- cyber2000fb_writeb(0x18, 0x46e8, cfb);
- cyber2000fb_writeb(0x01, 0x102, cfb);
- cyber2000fb_writeb(0x08, 0x46e8, cfb);
- cyber2000fb_writeb(EXT_BIU_MISC, 0x3ce, cfb);
- cyber2000fb_writeb(EXT_BIU_MISC_LIN_ENABLE, 0x3cf, cfb);
-
- cfb->mclk_mult = 0xdb;
- cfb->mclk_div = 0x54;
-
- err = cyberpro_common_probe(cfb);
- if (err)
- goto failed;
-
- if (int_cfb_info == NULL)
- int_cfb_info = cfb;
-
- return 0;
-
-failed:
- iounmap(cfb->region);
-failed_ioremap:
- cyberpro_free_fb_info(cfb);
-failed_release:
- release_mem_region(FB_START, FB_SIZE);
-
- return err;
-}
-#endif /* CONFIG_ARCH_SHARK */
-
/*
* PCI specific support.
*/
@@ -1871,11 +1810,6 @@ static void cyberpro_pci_remove(struct pci_dev *dev)
iounmap(cfb->region);
cyberpro_free_fb_info(cfb);
- /*
- * Ensure that the driver data is no longer
- * valid.
- */
- pci_set_drvdata(dev, NULL);
if (cfb == int_cfb_info)
int_cfb_info = NULL;
@@ -1948,28 +1882,19 @@ static int __init cyber2000fb_init(void)
cyber2000fb_setup(option);
#endif
-#ifdef CONFIG_ARCH_SHARK
- err = cyberpro_vl_probe();
- if (!err)
- ret = 0;
-#endif
-#ifdef CONFIG_PCI
err = pci_register_driver(&cyberpro_driver);
if (!err)
ret = 0;
-#endif
return ret ? err : 0;
}
module_init(cyber2000fb_init);
-#ifndef CONFIG_ARCH_SHARK
static void __exit cyberpro_exit(void)
{
pci_unregister_driver(&cyberpro_driver);
}
module_exit(cyberpro_exit);
-#endif
MODULE_AUTHOR("Russell King");
MODULE_DESCRIPTION("CyberPro 2000, 2010 and 5000 framebuffer driver");
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index e030e17a83f2..a1d74dd11988 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -129,7 +129,6 @@
#define LCD_NUM_BUFFERS 2
-#define WSI_TIMEOUT 50
#define PALETTE_SIZE 256
#define CLK_MIN_DIV 2
@@ -1314,7 +1313,7 @@ static struct fb_ops da8xx_fb_ops = {
static struct fb_videomode *da8xx_fb_get_videomode(struct platform_device *dev)
{
- struct da8xx_lcdc_platform_data *fb_pdata = dev->dev.platform_data;
+ struct da8xx_lcdc_platform_data *fb_pdata = dev_get_platdata(&dev->dev);
struct fb_videomode *lcdc_info;
int i;
@@ -1336,7 +1335,7 @@ static struct fb_videomode *da8xx_fb_get_videomode(struct platform_device *dev)
static int fb_probe(struct platform_device *device)
{
struct da8xx_lcdc_platform_data *fb_pdata =
- device->dev.platform_data;
+ dev_get_platdata(&device->dev);
static struct resource *lcdc_regs;
struct lcd_ctrl_config *lcd_cfg;
struct fb_videomode *lcdc_info;
@@ -1548,7 +1547,7 @@ err_pm_runtime_disable:
}
#ifdef CONFIG_PM
-struct lcdc_context {
+static struct lcdc_context {
u32 clk_enable;
u32 ctrl;
u32 dma_ctrl;
@@ -1663,19 +1662,7 @@ static struct platform_driver da8xx_fb_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init da8xx_fb_init(void)
-{
- return platform_driver_register(&da8xx_fb_driver);
-}
-
-static void __exit da8xx_fb_cleanup(void)
-{
- platform_driver_unregister(&da8xx_fb_driver);
-}
-
-module_init(da8xx_fb_init);
-module_exit(da8xx_fb_cleanup);
+module_platform_driver(da8xx_fb_driver);
MODULE_DESCRIPTION("Framebuffer driver for TI da8xx/omap-l1xx");
MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 7f9ff75d0db2..cd7c0df9f24b 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -108,8 +108,8 @@ static int efifb_setup(char *options)
if (!*this_opt) continue;
for (i = 0; i < M_UNKNOWN; i++) {
- if (!strcmp(this_opt, efifb_dmi_list[i].optname) &&
- efifb_dmi_list[i].base != 0) {
+ if (efifb_dmi_list[i].base != 0 &&
+ !strcmp(this_opt, efifb_dmi_list[i].optname)) {
screen_info.lfb_base = efifb_dmi_list[i].base;
screen_info.lfb_linelength = efifb_dmi_list[i].stride;
screen_info.lfb_width = efifb_dmi_list[i].width;
@@ -322,8 +322,7 @@ static int efifb_probe(struct platform_device *dev)
printk(KERN_ERR "efifb: cannot register framebuffer\n");
goto err_fb_dealoc;
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
err_fb_dealoc:
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index 28a837dfddd1..35a0f533f1a2 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -487,7 +487,7 @@ static void ep93xxfb_dealloc_videomem(struct fb_info *info)
static int ep93xxfb_probe(struct platform_device *pdev)
{
- struct ep93xxfb_mach_info *mach_info = pdev->dev.platform_data;
+ struct ep93xxfb_mach_info *mach_info = dev_get_platdata(&pdev->dev);
struct fb_info *info;
struct ep93xx_fbi *fbi;
struct resource *res;
diff --git a/drivers/video/exynos/exynos_mipi_dsi_common.c b/drivers/video/exynos/exynos_mipi_dsi_common.c
index 520fc9bd887b..7eed957b6014 100644
--- a/drivers/video/exynos/exynos_mipi_dsi_common.c
+++ b/drivers/video/exynos/exynos_mipi_dsi_common.c
@@ -376,6 +376,7 @@ int exynos_mipi_dsi_rd_data(struct mipi_dsim_device *dsim, unsigned int data_id,
"data id %x is not supported current DSI spec.\n",
data_id);
+ mutex_unlock(&dsim->lock);
return -EINVAL;
}
@@ -667,7 +668,7 @@ int exynos_mipi_dsi_init_dsim(struct mipi_dsim_device *dsim)
default:
dev_info(dsim->dev, "data lane is invalid.\n");
return -EINVAL;
- };
+ }
exynos_mipi_dsi_sw_reset(dsim);
exynos_mipi_dsi_func_reset(dsim);
diff --git a/drivers/video/fb-puv3.c b/drivers/video/fb-puv3.c
index 27fc956166fa..6db9ebd042a3 100644
--- a/drivers/video/fb-puv3.c
+++ b/drivers/video/fb-puv3.c
@@ -713,9 +713,8 @@ static int unifb_probe(struct platform_device *dev)
platform_set_drvdata(dev, info);
platform_device_add_data(dev, unifb_regs, sizeof(u32) * UNIFB_REGS_NUM);
- printk(KERN_INFO
- "fb%d: Virtual frame buffer device, using %dM of video memory\n",
- info->node, UNIFB_MEMSIZE >> 20);
+ fb_info(info, "Virtual frame buffer device, using %dM of video memory\n",
+ UNIFB_MEMSIZE >> 20);
return 0;
err2:
fb_dealloc_cmap(&info->cmap);
diff --git a/drivers/video/ffb.c b/drivers/video/ffb.c
index 6d2744794dd1..4c4ffa61ae26 100644
--- a/drivers/video/ffb.c
+++ b/drivers/video/ffb.c
@@ -1035,8 +1035,6 @@ static int ffb_remove(struct platform_device *op)
framebuffer_release(info);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/video/fm2fb.c b/drivers/video/fm2fb.c
index c99c9671302b..e69d47af9932 100644
--- a/drivers/video/fm2fb.c
+++ b/drivers/video/fm2fb.c
@@ -289,7 +289,7 @@ static int fm2fb_probe(struct zorro_dev *z, const struct zorro_device_id *id)
zorro_release_device(z);
return -EINVAL;
}
- printk("fb%d: %s frame buffer device\n", info->node, fb_fix.id);
+ fb_info(info, "%s frame buffer device\n", fb_fix.id);
return 0;
}
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index 6dd72250111e..e8758b9c3bcc 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -31,6 +31,8 @@
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/spinlock.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <sysdev/fsl_soc.h>
#include <linux/fsl-diu-fb.h>
@@ -1102,7 +1104,7 @@ static int fsl_diu_cursor(struct fb_info *info, struct fb_cursor *cursor)
fsl_diu_load_cursor_image(info, image, bg, fg,
cursor->image.width, cursor->image.height);
- };
+ }
/*
* Show or hide the cursor. The cursor data is always stored in the
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
index ceab37020fff..4c7cb368a9dc 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/gbefb.c
@@ -1236,9 +1236,9 @@ static int gbefb_probe(struct platform_device *p_dev)
platform_set_drvdata(p_dev, info);
gbefb_create_sysfs(&p_dev->dev);
- printk(KERN_INFO "fb%d: %s rev %d @ 0x%08x using %dkB memory\n",
- info->node, info->fix.id, gbe_revision, (unsigned) GBE_BASE,
- gbe_mem_size >> 10);
+ fb_info(info, "%s rev %d @ 0x%08x using %dkB memory\n",
+ info->fix.id, gbe_revision, (unsigned)GBE_BASE,
+ gbe_mem_size >> 10);
return 0;
diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
index ebbaada7b941..2794ba11f332 100644
--- a/drivers/video/geode/gx1fb_core.c
+++ b/drivers/video/geode/gx1fb_core.c
@@ -357,7 +357,7 @@ static int gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err;
}
pci_set_drvdata(pdev, info);
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
err:
@@ -399,7 +399,6 @@ static void gx1fb_remove(struct pci_dev *pdev)
release_mem_region(gx1_gx_base() + 0x8300, 0x100);
fb_dealloc_cmap(&info->cmap);
- pci_set_drvdata(pdev, NULL);
framebuffer_release(info);
}
diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c
index 19f0c1add747..1790f14bab15 100644
--- a/drivers/video/geode/gxfb_core.c
+++ b/drivers/video/geode/gxfb_core.c
@@ -423,7 +423,7 @@ static int gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err;
}
pci_set_drvdata(pdev, info);
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
err:
@@ -471,7 +471,6 @@ static void gxfb_remove(struct pci_dev *pdev)
pci_release_region(pdev, 1);
fb_dealloc_cmap(&info->cmap);
- pci_set_drvdata(pdev, NULL);
framebuffer_release(info);
}
diff --git a/drivers/video/geode/lxfb_core.c b/drivers/video/geode/lxfb_core.c
index 4dd7b5566962..9e1d19d673a1 100644
--- a/drivers/video/geode/lxfb_core.c
+++ b/drivers/video/geode/lxfb_core.c
@@ -555,8 +555,7 @@ static int lxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err;
}
pci_set_drvdata(pdev, info);
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
@@ -606,7 +605,6 @@ static void lxfb_remove(struct pci_dev *pdev)
pci_release_region(pdev, 3);
fb_dealloc_cmap(&info->cmap);
- pci_set_drvdata(pdev, NULL);
framebuffer_release(info);
}
diff --git a/drivers/video/grvga.c b/drivers/video/grvga.c
index 861109e7de1b..c078701f15f6 100644
--- a/drivers/video/grvga.c
+++ b/drivers/video/grvga.c
@@ -496,7 +496,6 @@ static int grvga_probe(struct platform_device *dev)
return 0;
free_mem:
- dev_set_drvdata(&dev->dev, NULL);
if (grvga_fix_addr)
iounmap((void *)virtual_start);
else
@@ -530,7 +529,6 @@ static int grvga_remove(struct platform_device *device)
kfree((void *)info->screen_base);
framebuffer_release(info);
- dev_set_drvdata(&device->dev, NULL);
}
return 0;
@@ -557,19 +555,7 @@ static struct platform_driver grvga_driver = {
.remove = grvga_remove,
};
-
-static int __init grvga_init(void)
-{
- return platform_driver_register(&grvga_driver);
-}
-
-static void __exit grvga_exit(void)
-{
- platform_driver_unregister(&grvga_driver);
-}
-
-module_init(grvga_init);
-module_exit(grvga_exit);
+module_platform_driver(grvga_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Aeroflex Gaisler");
diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
index c35663f6a54a..135d78a02588 100644
--- a/drivers/video/gxt4500.c
+++ b/drivers/video/gxt4500.c
@@ -698,8 +698,7 @@ static int gxt4500_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "gxt4500: cannot register framebuffer\n");
goto err_free_cmap;
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
diff --git a/drivers/video/hecubafb.c b/drivers/video/hecubafb.c
index 59d23181fdb0..f64120ec9192 100644
--- a/drivers/video/hecubafb.c
+++ b/drivers/video/hecubafb.c
@@ -261,9 +261,8 @@ static int hecubafb_probe(struct platform_device *dev)
goto err_fbreg;
platform_set_drvdata(dev, info);
- printk(KERN_INFO
- "fb%d: Hecuba frame buffer device, using %dK of video memory\n",
- info->node, videomemorysize >> 10);
+ fb_info(info, "Hecuba frame buffer device, using %dK of video memory\n",
+ videomemorysize >> 10);
/* this inits the dpy */
retval = par->board->init(par);
@@ -305,19 +304,7 @@ static struct platform_driver hecubafb_driver = {
.name = "hecubafb",
},
};
-
-static int __init hecubafb_init(void)
-{
- return platform_driver_register(&hecubafb_driver);
-}
-
-static void __exit hecubafb_exit(void)
-{
- platform_driver_unregister(&hecubafb_driver);
-}
-
-module_init(hecubafb_init);
-module_exit(hecubafb_exit);
+module_platform_driver(hecubafb_driver);
MODULE_DESCRIPTION("fbdev driver for Hecuba/Apollo controller");
MODULE_AUTHOR("Jaya Kumar");
diff --git a/drivers/video/hgafb.c b/drivers/video/hgafb.c
index 1e9e2d819d1f..5ff9fe2116a4 100644
--- a/drivers/video/hgafb.c
+++ b/drivers/video/hgafb.c
@@ -586,8 +586,7 @@ static int hgafb_probe(struct platform_device *pdev)
return -EINVAL;
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
platform_set_drvdata(pdev, info);
return 0;
}
diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c
index c2414d6ab646..a648d5186c6e 100644
--- a/drivers/video/hitfb.c
+++ b/drivers/video/hitfb.c
@@ -405,8 +405,7 @@ static int hitfb_probe(struct platform_device *dev)
platform_set_drvdata(dev, info);
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
diff --git a/drivers/video/hpfb.c b/drivers/video/hpfb.c
index b802f93cef5d..a1b7e5fa9b09 100644
--- a/drivers/video/hpfb.c
+++ b/drivers/video/hpfb.c
@@ -298,8 +298,7 @@ static int hpfb_init_one(unsigned long phys_base, unsigned long virt_base)
if (ret < 0)
goto dealloc_cmap;
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- fb_info.node, fb_info.fix.id);
+ fb_info(&fb_info, "%s frame buffer device\n", fb_info.fix.id);
return 0;
diff --git a/drivers/video/hyperv_fb.c b/drivers/video/hyperv_fb.c
index 8ac99b87c07e..130708f96430 100644
--- a/drivers/video/hyperv_fb.c
+++ b/drivers/video/hyperv_fb.c
@@ -575,6 +575,10 @@ static int hvfb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 0;
}
+static int hvfb_blank(int blank, struct fb_info *info)
+{
+ return 1; /* get fb_blank to set the colormap to all black */
+}
static struct fb_ops hvfb_ops = {
.owner = THIS_MODULE,
@@ -584,6 +588,7 @@ static struct fb_ops hvfb_ops = {
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
+ .fb_blank = hvfb_blank,
};
@@ -795,12 +800,21 @@ static int hvfb_remove(struct hv_device *hdev)
}
+static DEFINE_PCI_DEVICE_TABLE(pci_stub_id_table) = {
+ {
+ .vendor = PCI_VENDOR_ID_MICROSOFT,
+ .device = PCI_DEVICE_ID_HYPERV_VIDEO,
+ },
+ { /* end of list */ }
+};
+
static const struct hv_vmbus_device_id id_table[] = {
/* Synthetic Video Device GUID */
{HV_SYNTHVID_GUID},
{}
};
+MODULE_DEVICE_TABLE(pci, pci_stub_id_table);
MODULE_DEVICE_TABLE(vmbus, id_table);
static struct hv_driver hvfb_drv = {
@@ -810,14 +824,43 @@ static struct hv_driver hvfb_drv = {
.remove = hvfb_remove,
};
+static int hvfb_pci_stub_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ return 0;
+}
+
+static void hvfb_pci_stub_remove(struct pci_dev *pdev)
+{
+}
+
+static struct pci_driver hvfb_pci_stub_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = pci_stub_id_table,
+ .probe = hvfb_pci_stub_probe,
+ .remove = hvfb_pci_stub_remove,
+};
static int __init hvfb_drv_init(void)
{
- return vmbus_driver_register(&hvfb_drv);
+ int ret;
+
+ ret = vmbus_driver_register(&hvfb_drv);
+ if (ret != 0)
+ return ret;
+
+ ret = pci_register_driver(&hvfb_pci_stub_driver);
+ if (ret != 0) {
+ vmbus_driver_unregister(&hvfb_drv);
+ return ret;
+ }
+
+ return 0;
}
static void __exit hvfb_drv_exit(void)
{
+ pci_unregister_driver(&hvfb_pci_stub_driver);
vmbus_driver_unregister(&hvfb_drv);
}
diff --git a/drivers/video/i740fb.c b/drivers/video/i740fb.c
index 6c4838818950..ca7c9df193b0 100644
--- a/drivers/video/i740fb.c
+++ b/drivers/video/i740fb.c
@@ -203,8 +203,7 @@ static int i740fb_release(struct fb_info *info, int user)
mutex_lock(&(par->open_lock));
if (par->ref_count == 0) {
- printk(KERN_ERR "fb%d: release called with zero refcount\n",
- info->node);
+ fb_err(info, "release called with zero refcount\n");
mutex_unlock(&(par->open_lock));
return -EINVAL;
}
@@ -1067,7 +1066,7 @@ static int i740fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
par->has_sgram = !((tmp & DRAM_RAS_TIMING) ||
(tmp & DRAM_RAS_PRECHARGE));
- printk(KERN_INFO "fb%d: Intel740 on %s, %ld KB %s\n", info->node,
+ fb_info(info, "Intel740 on %s, %ld KB %s\n",
pci_name(dev), info->screen_size >> 10,
par->has_sgram ? "SGRAM" : "SDRAM");
@@ -1143,8 +1142,7 @@ static int i740fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
goto err_reg_framebuffer;
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
pci_set_drvdata(dev, info);
#ifdef CONFIG_MTRR
if (mtrr) {
@@ -1194,7 +1192,6 @@ static void i740fb_remove(struct pci_dev *dev)
pci_iounmap(dev, info->screen_base);
pci_release_regions(dev);
/* pci_disable_device(dev); */
- pci_set_drvdata(dev, NULL);
framebuffer_release(info);
}
}
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 4ce3438ade6f..038192ac7369 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -2129,7 +2129,6 @@ static void __exit i810fb_remove_pci(struct pci_dev *dev)
unregister_framebuffer(info);
i810fb_release_resource(info, par);
- pci_set_drvdata(dev, NULL);
printk("cleanup_module: unloaded i810 framebuffer device\n");
}
diff --git a/drivers/video/igafb.c b/drivers/video/igafb.c
index 79cbfa7d1a9b..486f18897414 100644
--- a/drivers/video/igafb.c
+++ b/drivers/video/igafb.c
@@ -360,9 +360,8 @@ static int __init iga_init(struct fb_info *info, struct iga_par *par)
if (register_framebuffer(info) < 0)
return 0;
- printk("fb%d: %s frame buffer device at 0x%08lx [%dMB VRAM]\n",
- info->node, info->fix.id,
- par->frame_buffer_phys, info->fix.smem_len >> 20);
+ fb_info(info, "%s frame buffer device at 0x%08lx [%dMB VRAM]\n",
+ info->fix.id, par->frame_buffer_phys, info->fix.smem_len >> 20);
iga_blank_border(par);
return 1;
diff --git a/drivers/video/imsttfb.c b/drivers/video/imsttfb.c
index d5220cc90e93..aae10ce74f14 100644
--- a/drivers/video/imsttfb.c
+++ b/drivers/video/imsttfb.c
@@ -1461,8 +1461,8 @@ static void init_imstt(struct fb_info *info)
}
tmp = (read_reg_le32(par->dc_regs, SSTATUS) & 0x0f00) >> 8;
- printk("fb%u: %s frame buffer; %uMB vram; chip version %u\n",
- info->node, info->fix.id, info->fix.smem_len >> 20, tmp);
+ fb_info(info, "%s frame buffer; %uMB vram; chip version %u\n",
+ info->fix.id, info->fix.smem_len >> 20, tmp);
}
static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 38733ac2b698..44ee678481d5 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -755,7 +755,7 @@ static int imxfb_resume(struct platform_device *dev)
static int imxfb_init_fbinfo(struct platform_device *pdev)
{
- struct imx_fb_platform_data *pdata = pdev->dev.platform_data;
+ struct imx_fb_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct fb_info *info = dev_get_drvdata(&pdev->dev);
struct imxfb_info *fbi = info->par;
struct device_node *np;
@@ -877,7 +877,7 @@ static int imxfb_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
info = framebuffer_alloc(sizeof(struct imxfb_info), &pdev->dev);
if (!info)
@@ -1066,7 +1066,7 @@ static int imxfb_remove(struct platform_device *pdev)
#endif
unregister_framebuffer(info);
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->exit)
pdata->exit(fbi->pdev);
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index 8209e46c5d28..b847d530471a 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -931,8 +931,6 @@ static void intelfb_pci_unregister(struct pci_dev *pdev)
return;
cleanup(dinfo);
-
- pci_set_drvdata(pdev, NULL);
}
/***************************************************************
diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
index 2c49112fdd6c..87790e9644d0 100644
--- a/drivers/video/jz4740_fb.c
+++ b/drivers/video/jz4740_fb.c
@@ -99,9 +99,9 @@
#define JZ_LCD_CTRL_BPP_15_16 0x4
#define JZ_LCD_CTRL_BPP_18_24 0x5
-#define JZ_LCD_CMD_SOF_IRQ BIT(15)
-#define JZ_LCD_CMD_EOF_IRQ BIT(16)
-#define JZ_LCD_CMD_ENABLE_PAL BIT(12)
+#define JZ_LCD_CMD_SOF_IRQ BIT(31)
+#define JZ_LCD_CMD_EOF_IRQ BIT(30)
+#define JZ_LCD_CMD_ENABLE_PAL BIT(28)
#define JZ_LCD_SYNC_MASK 0x3ff
@@ -471,7 +471,7 @@ static int jzfb_set_par(struct fb_info *info)
writel(ctrl, jzfb->base + JZ_REG_LCD_CTRL);
if (!jzfb->is_enabled)
- clk_disable(jzfb->ldclk);
+ clk_disable_unprepare(jzfb->ldclk);
mutex_unlock(&jzfb->lock);
@@ -485,7 +485,7 @@ static void jzfb_enable(struct jzfb *jzfb)
{
uint32_t ctrl;
- clk_enable(jzfb->ldclk);
+ clk_prepare_enable(jzfb->ldclk);
jz_gpio_bulk_resume(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
jz_gpio_bulk_resume(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
@@ -514,7 +514,7 @@ static void jzfb_disable(struct jzfb *jzfb)
jz_gpio_bulk_suspend(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
jz_gpio_bulk_suspend(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
- clk_disable(jzfb->ldclk);
+ clk_disable_unprepare(jzfb->ldclk);
}
static int jzfb_blank(int blank_mode, struct fb_info *info)
@@ -693,7 +693,7 @@ static int jzfb_probe(struct platform_device *pdev)
fb_alloc_cmap(&fb->cmap, 256, 0);
- clk_enable(jzfb->ldclk);
+ clk_prepare_enable(jzfb->ldclk);
jzfb->is_enabled = 1;
writel(jzfb->framedesc->next, jzfb->base + JZ_REG_LCD_DA0);
@@ -763,7 +763,7 @@ static int jzfb_suspend(struct device *dev)
static int jzfb_resume(struct device *dev)
{
struct jzfb *jzfb = dev_get_drvdata(dev);
- clk_enable(jzfb->ldclk);
+ clk_prepare_enable(jzfb->ldclk);
mutex_lock(&jzfb->lock);
if (jzfb->is_enabled)
@@ -798,18 +798,7 @@ static struct platform_driver jzfb_driver = {
.pm = JZFB_PM_OPS,
},
};
-
-static int __init jzfb_init(void)
-{
- return platform_driver_register(&jzfb_driver);
-}
-module_init(jzfb_init);
-
-static void __exit jzfb_exit(void)
-{
- platform_driver_unregister(&jzfb_driver);
-}
-module_exit(jzfb_exit);
+module_platform_driver(jzfb_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
diff --git a/drivers/video/kyro/fbdev.c b/drivers/video/kyro/fbdev.c
index 6157f74ac600..50c857477e4f 100644
--- a/drivers/video/kyro/fbdev.c
+++ b/drivers/video/kyro/fbdev.c
@@ -623,7 +623,6 @@ static int kyrofb_ioctl(struct fb_info *info,
"command instead.\n");
return -EINVAL;
}
- break;
case KYRO_IOCTL_UVSTRIDE:
if (copy_to_user(argp, &deviceInfo.ulOverlayUVStride, sizeof(unsigned long)))
return -EFAULT;
@@ -736,10 +735,10 @@ static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (register_framebuffer(info) < 0)
goto out_unmap;
- printk("fb%d: %s frame buffer device, at %dx%d@%d using %ldk/%ldk of VRAM\n",
- info->node, info->fix.id, info->var.xres,
- info->var.yres, info->var.bits_per_pixel, size >> 10,
- (unsigned long)info->fix.smem_len >> 10);
+ fb_info(info, "%s frame buffer device, at %dx%d@%d using %ldk/%ldk of VRAM\n",
+ info->fix.id,
+ info->var.xres, info->var.yres, info->var.bits_per_pixel,
+ size >> 10, (unsigned long)info->fix.smem_len >> 10);
pci_set_drvdata(pdev, info);
@@ -779,7 +778,6 @@ static void kyrofb_remove(struct pci_dev *pdev)
#endif
unregister_framebuffer(info);
- pci_set_drvdata(pdev, NULL);
framebuffer_release(info);
}
diff --git a/drivers/video/leo.c b/drivers/video/leo.c
index b17f5009a436..2c7f7d479fe2 100644
--- a/drivers/video/leo.c
+++ b/drivers/video/leo.c
@@ -469,7 +469,7 @@ static void leo_wid_put(struct fb_info *info, struct fb_wid_list *wl)
default:
continue;
- };
+ }
sbus_writel(0x5800 + j, &lx_krn->krn_type);
sbus_writel(wi->wi_values[0], &lx_krn->krn_value);
}
@@ -648,8 +648,6 @@ static int leo_remove(struct platform_device *op)
framebuffer_release(info);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/video/macfb.c b/drivers/video/macfb.c
index fe01add3700e..5bd2eb8d4f39 100644
--- a/drivers/video/macfb.c
+++ b/drivers/video/macfb.c
@@ -913,8 +913,7 @@ static int __init macfb_init(void)
if (err)
goto fail_dealloc;
- pr_info("fb%d: %s frame buffer device\n",
- fb_info.node, fb_info.fix.id);
+ fb_info(&fb_info, "%s frame buffer device\n", fb_info.fix.id);
return 0;
diff --git a/drivers/video/matrox/matroxfb_DAC1064.c b/drivers/video/matrox/matroxfb_DAC1064.c
index 1717623aabc0..a01147fdf270 100644
--- a/drivers/video/matrox/matroxfb_DAC1064.c
+++ b/drivers/video/matrox/matroxfb_DAC1064.c
@@ -494,7 +494,7 @@ static int m1064_compute(void* out, struct my_timming* m) {
if (inDAC1064(minfo, M1064_XPIXPLLSTAT) & 0x40)
break;
udelay(10);
- };
+ }
CRITEND
@@ -639,7 +639,7 @@ static void MGAG100_progPixClock(const struct matrox_fb_info *minfo, int flags,
if (inDAC1064(minfo, M1064_XPIXPLLSTAT) & 0x40)
break;
udelay(10);
- };
+ }
if (!clk)
printk(KERN_ERR "matroxfb: Pixel PLL%c not locked after usual time\n", (reg-M1064_XPIXPLLAM-2)/4 + 'A');
selClk = inDAC1064(minfo, M1064_XPIXCLKCTRL) & ~M1064_XPIXCLKCTRL_SRC_MASK;
diff --git a/drivers/video/matrox/matroxfb_Ti3026.c b/drivers/video/matrox/matroxfb_Ti3026.c
index 9a44cec394b5..195ad7cac1ba 100644
--- a/drivers/video/matrox/matroxfb_Ti3026.c
+++ b/drivers/video/matrox/matroxfb_Ti3026.c
@@ -473,7 +473,7 @@ static void ti3026_setMCLK(struct matrox_fb_info *minfo, int fout)
if (inTi3026(minfo, TVP3026_XPIXPLLDATA) & 0x40)
break;
udelay(10);
- };
+ }
if (!tmout)
printk(KERN_ERR "matroxfb: Temporary pixel PLL not locked after 5 secs\n");
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 245652911650..87c64ff4546c 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -1893,14 +1893,12 @@ static int initMatrox2(struct matrox_fb_info *minfo, struct board *b)
if (register_framebuffer(&minfo->fbcon) < 0) {
goto failVideoIO;
}
- printk("fb%d: %s frame buffer device\n",
- minfo->fbcon.node, minfo->fbcon.fix.id);
+ fb_info(&minfo->fbcon, "%s frame buffer device\n", minfo->fbcon.fix.id);
/* there is no console on this fb... but we have to initialize hardware
* until someone tells me what is proper thing to do */
if (!minfo->initialized) {
- printk(KERN_INFO "fb%d: initializing hardware\n",
- minfo->fbcon.node);
+ fb_info(&minfo->fbcon, "initializing hardware\n");
/* We have to use FB_ACTIVATE_FORCE, as we had to put vesafb_defined to the fbcon.var
* already before, so register_framebuffer works correctly. */
vesafb_defined.activate |= FB_ACTIVATE_FORCE;
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index fd2897455696..ee41a0f276b2 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -1295,19 +1295,7 @@ static struct i2c_driver maven_driver={
.id_table = maven_id,
};
-static int __init matroxfb_maven_init(void)
-{
- return i2c_add_driver(&maven_driver);
-}
-
-static void __exit matroxfb_maven_exit(void)
-{
- i2c_del_driver(&maven_driver);
-}
-
+module_i2c_driver(maven_driver);
MODULE_AUTHOR("(c) 1999-2002 Petr Vandrovec <vandrove@vc.cvut.cz>");
MODULE_DESCRIPTION("Matrox G200/G400 Matrox MGA-TVO driver");
MODULE_LICENSE("GPL");
-module_init(matroxfb_maven_init);
-module_exit(matroxfb_maven_exit);
-/* we do not have __setup() yet */
diff --git a/drivers/video/mb862xx/mb862xxfbdrv.c b/drivers/video/mb862xx/mb862xxfbdrv.c
index 91c59c9fb082..0cd4c3318511 100644
--- a/drivers/video/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/mb862xx/mb862xxfbdrv.c
@@ -781,7 +781,6 @@ rel_reg:
irqdisp:
irq_dispose_mapping(par->irq);
fbrel:
- dev_set_drvdata(dev, NULL);
framebuffer_release(info);
return ret;
}
@@ -814,7 +813,6 @@ static int of_platform_mb862xx_remove(struct platform_device *ofdev)
iounmap(par->mmio_base);
iounmap(par->fb_base);
- dev_set_drvdata(&ofdev->dev, NULL);
release_mem_region(par->res->start, res_size);
framebuffer_release(fbi);
return 0;
@@ -1157,7 +1155,6 @@ static void mb862xx_pci_remove(struct pci_dev *pdev)
device_remove_file(&pdev->dev, &dev_attr_dispregs);
- pci_set_drvdata(pdev, NULL);
unregister_framebuffer(fbi);
fb_dealloc_cmap(&fbi->cmap);
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c
index 0c1a874ffd2b..f0a5392f5fd3 100644
--- a/drivers/video/mbx/mbxfb.c
+++ b/drivers/video/mbx/mbxfb.c
@@ -890,7 +890,7 @@ static int mbxfb_probe(struct platform_device *dev)
dev_dbg(&dev->dev, "mbxfb_probe\n");
- pdata = dev->dev.platform_data;
+ pdata = dev_get_platdata(&dev->dev);
if (!pdata) {
dev_err(&dev->dev, "platform data is required\n");
return -EINVAL;
@@ -976,7 +976,7 @@ static int mbxfb_probe(struct platform_device *dev)
platform_set_drvdata(dev, fbi);
- printk(KERN_INFO "fb%d: mbx frame buffer device\n", fbi->node);
+ fb_info(fbi, "mbx frame buffer device\n");
if (mfbi->platform_probe)
mfbi->platform_probe(fbi);
diff --git a/drivers/video/metronomefb.c b/drivers/video/metronomefb.c
index f30150d71be9..195cc2db4c2c 100644
--- a/drivers/video/metronomefb.c
+++ b/drivers/video/metronomefb.c
@@ -690,7 +690,8 @@ static int metronomefb_probe(struct platform_device *dev)
goto err_csum_table;
}
- if (board->setup_irq(info))
+ retval = board->setup_irq(info);
+ if (retval)
goto err_csum_table;
retval = metronome_init_regs(par);
@@ -769,23 +770,11 @@ static struct platform_driver metronomefb_driver = {
.name = "metronomefb",
},
};
-
-static int __init metronomefb_init(void)
-{
- return platform_driver_register(&metronomefb_driver);
-}
-
-static void __exit metronomefb_exit(void)
-{
- platform_driver_unregister(&metronomefb_driver);
-}
+module_platform_driver(metronomefb_driver);
module_param(user_wfm_size, uint, 0);
MODULE_PARM_DESC(user_wfm_size, "Set custom waveform size");
-module_init(metronomefb_init);
-module_exit(metronomefb_exit);
-
MODULE_DESCRIPTION("fbdev driver for Metronome controller");
MODULE_AUTHOR("Jaya Kumar");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/mmp/fb/mmpfb.c b/drivers/video/mmp/fb/mmpfb.c
index 4ab95b8daed3..7ab31eb76a8c 100644
--- a/drivers/video/mmp/fb/mmpfb.c
+++ b/drivers/video/mmp/fb/mmpfb.c
@@ -392,12 +392,29 @@ static int var_update(struct fb_info *info)
return 0;
}
+static void mmpfb_set_win(struct fb_info *info)
+{
+ struct mmpfb_info *fbi = info->par;
+ struct fb_var_screeninfo *var = &info->var;
+ struct mmp_win win;
+ u32 stride;
+
+ memset(&win, 0, sizeof(win));
+ win.xsrc = win.xdst = fbi->mode.xres;
+ win.ysrc = win.ydst = fbi->mode.yres;
+ win.pix_fmt = fbi->pix_fmt;
+ stride = pixfmt_to_stride(win.pix_fmt);
+ win.pitch[0] = var->xres_virtual * stride;
+ win.pitch[1] = win.pitch[2] =
+ (stride == 1) ? (var->xres_virtual >> 1) : 0;
+ mmp_overlay_set_win(fbi->overlay, &win);
+}
+
static int mmpfb_set_par(struct fb_info *info)
{
struct mmpfb_info *fbi = info->par;
struct fb_var_screeninfo *var = &info->var;
struct mmp_addr addr;
- struct mmp_win win;
struct mmp_mode mode;
int ret;
@@ -409,11 +426,8 @@ static int mmpfb_set_par(struct fb_info *info)
fbmode_to_mmpmode(&mode, &fbi->mode, fbi->output_fmt);
mmp_path_set_mode(fbi->path, &mode);
- memset(&win, 0, sizeof(win));
- win.xsrc = win.xdst = fbi->mode.xres;
- win.ysrc = win.ydst = fbi->mode.yres;
- win.pix_fmt = fbi->pix_fmt;
- mmp_overlay_set_win(fbi->overlay, &win);
+ /* set window related info */
+ mmpfb_set_win(info);
/* set address always */
memset(&addr, 0, sizeof(addr));
@@ -427,16 +441,12 @@ static int mmpfb_set_par(struct fb_info *info)
static void mmpfb_power(struct mmpfb_info *fbi, int power)
{
struct mmp_addr addr;
- struct mmp_win win;
struct fb_var_screeninfo *var = &fbi->fb_info->var;
/* for power on, always set address/window again */
if (power) {
- memset(&win, 0, sizeof(win));
- win.xsrc = win.xdst = fbi->mode.xres;
- win.ysrc = win.ydst = fbi->mode.yres;
- win.pix_fmt = fbi->pix_fmt;
- mmp_overlay_set_win(fbi->overlay, &win);
+ /* set window related info */
+ mmpfb_set_win(fbi->fb_info);
/* set address always */
memset(&addr, 0, sizeof(addr));
diff --git a/drivers/video/mmp/hw/mmp_ctrl.c b/drivers/video/mmp/hw/mmp_ctrl.c
index 6ac755270ab4..8621a9f2bdcc 100644
--- a/drivers/video/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/mmp/hw/mmp_ctrl.c
@@ -53,15 +53,14 @@ static irqreturn_t ctrl_handle_irq(int irq, void *dev_id)
tmp = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR);
if (tmp & isr)
writel_relaxed(~isr, ctrl->reg_base + SPU_IRQ_ISR);
- } while ((isr = readl(ctrl->reg_base + SPU_IRQ_ISR)) & imask);
+ } while ((isr = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR)) & imask);
return IRQ_HANDLED;
}
static u32 fmt_to_reg(struct mmp_overlay *overlay, int pix_fmt)
{
- u32 link_config = path_to_path_plat(overlay->path)->link_config;
- u32 rbswap, uvswap = 0, yuvswap = 0,
+ u32 rbswap = 0, uvswap = 0, yuvswap = 0,
csc_en = 0, val = 0,
vid = overlay_is_vid(overlay);
@@ -71,27 +70,23 @@ static u32 fmt_to_reg(struct mmp_overlay *overlay, int pix_fmt)
case PIXFMT_RGB888PACK:
case PIXFMT_RGB888UNPACK:
case PIXFMT_RGBA888:
- rbswap = !(link_config & 0x1);
+ rbswap = 1;
break;
case PIXFMT_VYUY:
case PIXFMT_YVU422P:
case PIXFMT_YVU420P:
- rbswap = link_config & 0x1;
uvswap = 1;
break;
case PIXFMT_YUYV:
- rbswap = link_config & 0x1;
yuvswap = 1;
break;
default:
- rbswap = link_config & 0x1;
break;
}
switch (pix_fmt) {
case PIXFMT_RGB565:
case PIXFMT_BGR565:
- val = 0;
break;
case PIXFMT_RGB1555:
case PIXFMT_BGR1555:
@@ -147,17 +142,27 @@ static void dmafetch_set_fmt(struct mmp_overlay *overlay)
static void overlay_set_win(struct mmp_overlay *overlay, struct mmp_win *win)
{
struct lcd_regs *regs = path_regs(overlay->path);
- u32 pitch;
/* assert win supported */
memcpy(&overlay->win, win, sizeof(struct mmp_win));
mutex_lock(&overlay->access_ok);
- pitch = win->xsrc * pixfmt_to_stride(win->pix_fmt);
- writel_relaxed(pitch, &regs->g_pitch);
- writel_relaxed((win->ysrc << 16) | win->xsrc, &regs->g_size);
- writel_relaxed((win->ydst << 16) | win->xdst, &regs->g_size_z);
- writel_relaxed(0, &regs->g_start);
+
+ if (overlay_is_vid(overlay)) {
+ writel_relaxed(win->pitch[0], &regs->v_pitch_yc);
+ writel_relaxed(win->pitch[2] << 16 |
+ win->pitch[1], &regs->v_pitch_uv);
+
+ writel_relaxed((win->ysrc << 16) | win->xsrc, &regs->v_size);
+ writel_relaxed((win->ydst << 16) | win->xdst, &regs->v_size_z);
+ writel_relaxed(win->ypos << 16 | win->xpos, &regs->v_start);
+ } else {
+ writel_relaxed(win->pitch[0], &regs->g_pitch);
+
+ writel_relaxed((win->ysrc << 16) | win->xsrc, &regs->g_size);
+ writel_relaxed((win->ydst << 16) | win->xdst, &regs->g_size_z);
+ writel_relaxed(win->ypos << 16 | win->xpos, &regs->g_start);
+ }
dmafetch_set_fmt(overlay);
mutex_unlock(&overlay->access_ok);
@@ -239,7 +244,13 @@ static int overlay_set_addr(struct mmp_overlay *overlay, struct mmp_addr *addr)
/* FIXME: assert addr supported */
memcpy(&overlay->addr, addr, sizeof(struct mmp_addr));
- writel(addr->phys[0], &regs->g_0);
+
+ if (overlay_is_vid(overlay)) {
+ writel_relaxed(addr->phys[0], &regs->v_y0);
+ writel_relaxed(addr->phys[1], &regs->v_u0);
+ writel_relaxed(addr->phys[2], &regs->v_v0);
+ } else
+ writel_relaxed(addr->phys[0], &regs->g_0);
return overlay->addr.phys[0];
}
@@ -248,7 +259,8 @@ static void path_set_mode(struct mmp_path *path, struct mmp_mode *mode)
{
struct lcd_regs *regs = path_regs(path);
u32 total_x, total_y, vsync_ctrl, tmp, sclk_src, sclk_div,
- link_config = path_to_path_plat(path)->link_config;
+ link_config = path_to_path_plat(path)->link_config,
+ dsi_rbswap = path_to_path_plat(path)->link_config;
/* FIXME: assert videomode supported */
memcpy(&path->mode, mode, sizeof(struct mmp_mode));
@@ -263,6 +275,12 @@ static void path_set_mode(struct mmp_path *path, struct mmp_mode *mode)
tmp |= CFG_DUMB_ENA(1);
writel_relaxed(tmp, ctrl_regs(path) + intf_ctrl(path->id));
+ /* interface rb_swap setting */
+ tmp = readl_relaxed(ctrl_regs(path) + intf_rbswap_ctrl(path->id)) &
+ (~(CFG_INTFRBSWAP_MASK));
+ tmp |= dsi_rbswap & CFG_INTFRBSWAP_MASK;
+ writel_relaxed(tmp, ctrl_regs(path) + intf_rbswap_ctrl(path->id));
+
writel_relaxed((mode->yres << 16) | mode->xres, &regs->screen_active);
writel_relaxed((mode->left_margin << 16) | mode->right_margin,
&regs->screen_h_porch);
@@ -370,20 +388,12 @@ static void path_set_default(struct mmp_path *path)
* bus arbiter for faster read if not tv path;
* 2.enable horizontal smooth filter;
*/
- if (PATH_PN == path->id) {
- mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK
- | CFG_ARBFAST_ENA(1);
- tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
- tmp |= mask;
- writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
- } else if (PATH_TV == path->id) {
- mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK
- | CFG_ARBFAST_ENA(1);
- tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
- tmp &= ~mask;
- tmp |= CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK;
- writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
- }
+ mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK | CFG_ARBFAST_ENA(1);
+ tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
+ tmp |= mask;
+ if (PATH_TV == path->id)
+ tmp &= ~CFG_ARBFAST_ENA(1);
+ writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
}
static int path_init(struct mmphw_path_plat *path_plat,
@@ -419,6 +429,7 @@ static int path_init(struct mmphw_path_plat *path_plat,
path_plat->path = path;
path_plat->path_config = config->path_config;
path_plat->link_config = config->link_config;
+ path_plat->dsi_rbswap = config->dsi_rbswap;
path_set_default(path);
kfree(path_info);
diff --git a/drivers/video/mmp/hw/mmp_ctrl.h b/drivers/video/mmp/hw/mmp_ctrl.h
index edd2002b0e99..53301cfdb1ae 100644
--- a/drivers/video/mmp/hw/mmp_ctrl.h
+++ b/drivers/video/mmp/hw/mmp_ctrl.h
@@ -163,6 +163,8 @@ struct lcd_regs {
#define LCD_SCLK(path) ((PATH_PN == path->id) ? LCD_CFG_SCLK_DIV :\
((PATH_TV == path->id) ? LCD_TCLK_DIV : LCD_PN2_SCLK_DIV))
+#define intf_rbswap_ctrl(id) ((id) ? (((id) & 1) ? LCD_TVIF_CTRL : \
+ PN2_IOPAD_CONTROL) : LCD_TOP_CTRL)
/* dither configure */
#ifdef CONFIG_CPU_PXA988
@@ -615,6 +617,8 @@ struct lcd_regs {
#define LCD_SPU_DUMB_CTRL 0x01B8
#define CFG_DUMBMODE(mode) ((mode)<<28)
#define CFG_DUMBMODE_MASK 0xF0000000
+#define CFG_INTFRBSWAP(mode) ((mode)<<24)
+#define CFG_INTFRBSWAP_MASK 0x0F000000
#define CFG_LCDGPIO_O(data) ((data)<<20)
#define CFG_LCDGPIO_O_MASK 0x0FF00000
#define CFG_LCDGPIO_ENA(gpio) ((gpio)<<12)
@@ -1427,6 +1431,7 @@ struct mmphw_path_plat {
struct mmp_path *path;
u32 path_config;
u32 link_config;
+ u32 dsi_rbswap;
};
/* mmp ctrl describes mmp controller related info */
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index cfdb380ec81e..804f874d32d3 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -1354,7 +1354,7 @@ static struct fb_info *mx3fb_init_fbinfo(struct device *dev, struct fb_ops *ops)
static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
{
struct device *dev = mx3fb->dev;
- struct mx3fb_platform_data *mx3fb_pdata = dev->platform_data;
+ struct mx3fb_platform_data *mx3fb_pdata = dev_get_platdata(dev);
const char *name = mx3fb_pdata->name;
unsigned int irq;
struct fb_info *fbi;
@@ -1462,7 +1462,7 @@ static bool chan_filter(struct dma_chan *chan, void *arg)
return false;
dev = rq->mx3fb->dev;
- mx3fb_pdata = dev->platform_data;
+ mx3fb_pdata = dev_get_platdata(dev);
return rq->id == chan->chan_id &&
mx3fb_pdata->dma_dev == chan->device->dev;
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index c172a5281f9e..44f99a60bb9b 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -2106,8 +2106,7 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (err < 0)
goto err_reg_fb;
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
/*
* Our driver data
@@ -2148,12 +2147,6 @@ static void neofb_remove(struct pci_dev *dev)
fb_destroy_modedb(info->monspecs.modedb);
neo_unmap_mmio(info);
neo_free_fb_info(info);
-
- /*
- * Ensure that the driver data is no longer
- * valid.
- */
- pci_set_drvdata(dev, NULL);
}
}
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c
index 796e5112ceee..478f9808dee4 100644
--- a/drivers/video/nuc900fb.c
+++ b/drivers/video/nuc900fb.c
@@ -91,7 +91,7 @@ static int nuc900fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct nuc900fb_info *fbi = info->par;
- struct nuc900fb_mach_info *mach_info = fbi->dev->platform_data;
+ struct nuc900fb_mach_info *mach_info = dev_get_platdata(fbi->dev);
struct nuc900fb_display *display = NULL;
struct nuc900fb_display *default_display = mach_info->displays +
mach_info->default_display;
@@ -358,7 +358,7 @@ static inline void modify_gpio(void __iomem *reg,
static int nuc900fb_init_registers(struct fb_info *info)
{
struct nuc900fb_info *fbi = info->par;
- struct nuc900fb_mach_info *mach_info = fbi->dev->platform_data;
+ struct nuc900fb_mach_info *mach_info = dev_get_platdata(fbi->dev);
void __iomem *regs = fbi->io;
/*reset the display engine*/
@@ -512,7 +512,7 @@ static int nuc900fb_probe(struct platform_device *pdev)
int size;
dev_dbg(&pdev->dev, "devinit\n");
- mach_info = pdev->dev.platform_data;
+ mach_info = dev_get_platdata(&pdev->dev);
if (mach_info == NULL) {
dev_err(&pdev->dev,
"no platform data for lcd, cannot attach\n");
@@ -647,8 +647,7 @@ static int nuc900fb_probe(struct platform_device *pdev)
goto free_cpufreq;
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- fbinfo->node, fbinfo->fix.id);
+ fb_info(fbinfo, "%s frame buffer device\n", fbinfo->fix.id);
return 0;
diff --git a/drivers/video/nvidia/nv_hw.c b/drivers/video/nvidia/nv_hw.c
index ed20a9871b33..81c80ac3c76f 100644
--- a/drivers/video/nvidia/nv_hw.c
+++ b/drivers/video/nvidia/nv_hw.c
@@ -1300,7 +1300,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
break;
default:
break;
- };
+ }
NV_WR32(par->PGRAPH, 0x0b38, 0x2ffff800);
NV_WR32(par->PGRAPH, 0x0b3c, 0x00006000);
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 0c4f34311eda..9dbea2223401 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -515,8 +515,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
if (register_framebuffer(info) < 0)
goto out_err;
- printk(KERN_INFO "fb%d: Open Firmware frame buffer device on %s\n",
- info->node, full_name);
+ fb_info(info, "Open Firmware frame buffer device on %s\n", full_name);
return;
out_err:
diff --git a/drivers/video/omap/hwa742.c b/drivers/video/omap/hwa742.c
index f349ee6f0cea..a4ee65b8f918 100644
--- a/drivers/video/omap/hwa742.c
+++ b/drivers/video/omap/hwa742.c
@@ -947,7 +947,7 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
hwa742.extif = fbdev->ext_if;
hwa742.int_ctrl = fbdev->int_ctrl;
- omapfb_conf = fbdev->dev->platform_data;
+ omapfb_conf = dev_get_platdata(fbdev->dev);
hwa742.sys_ck = clk_get(NULL, "hwa_sys_ck");
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index d40612c31a98..e4fc6d9b5371 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -1602,7 +1602,7 @@ static int omapfb_find_ctrl(struct omapfb_device *fbdev)
char name[17];
int i;
- conf = fbdev->dev->platform_data;
+ conf = dev_get_platdata(fbdev->dev);
fbdev->ctrl = NULL;
@@ -1674,7 +1674,7 @@ static int omapfb_do_probe(struct platform_device *pdev,
goto cleanup;
}
- if (pdev->dev.platform_data == NULL) {
+ if (dev_get_platdata(&pdev->dev) == NULL) {
dev_err(&pdev->dev, "missing platform data\n");
r = -ENOENT;
goto cleanup;
diff --git a/drivers/video/omap2/displays-new/Kconfig b/drivers/video/omap2/displays-new/Kconfig
index 10b25e7cd878..e6cfc38160d3 100644
--- a/drivers/video/omap2/displays-new/Kconfig
+++ b/drivers/video/omap2/displays-new/Kconfig
@@ -57,6 +57,12 @@ config DISPLAY_PANEL_SHARP_LS037V7DW01
help
LCD Panel used in TI's SDP3430 and EVM boards
+config DISPLAY_PANEL_TPO_TD028TTEC1
+ tristate "TPO TD028TTEC1 LCD Panel"
+ depends on SPI
+ help
+ LCD panel used in Openmoko.
+
config DISPLAY_PANEL_TPO_TD043MTEA1
tristate "TPO TD043MTEA1 LCD Panel"
depends on SPI
diff --git a/drivers/video/omap2/displays-new/Makefile b/drivers/video/omap2/displays-new/Makefile
index 5aeb11b8fcd5..0323a8a1c682 100644
--- a/drivers/video/omap2/displays-new/Makefile
+++ b/drivers/video/omap2/displays-new/Makefile
@@ -8,5 +8,6 @@ obj-$(CONFIG_DISPLAY_PANEL_DSI_CM) += panel-dsi-cm.o
obj-$(CONFIG_DISPLAY_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
obj-$(CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
obj-$(CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
+obj-$(CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
obj-$(CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
obj-$(CONFIG_DISPLAY_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
diff --git a/drivers/video/omap2/displays-new/connector-dvi.c b/drivers/video/omap2/displays-new/connector-dvi.c
index 63d88ee6dfe4..b6c50904038e 100644
--- a/drivers/video/omap2/displays-new/connector-dvi.c
+++ b/drivers/video/omap2/displays-new/connector-dvi.c
@@ -262,6 +262,9 @@ static int dvic_probe_pdata(struct platform_device *pdev)
in = omap_dss_find_output(pdata->source);
if (in == NULL) {
+ if (ddata->i2c_adapter)
+ i2c_put_adapter(ddata->i2c_adapter);
+
dev_err(&pdev->dev, "Failed to find video source\n");
return -EPROBE_DEFER;
}
@@ -313,6 +316,10 @@ static int dvic_probe(struct platform_device *pdev)
err_reg:
omap_dss_put_device(ddata->in);
+
+ if (ddata->i2c_adapter)
+ i2c_put_adapter(ddata->i2c_adapter);
+
return r;
}
diff --git a/drivers/video/omap2/displays-new/panel-dsi-cm.c b/drivers/video/omap2/displays-new/panel-dsi-cm.c
index aaaea6469cd9..b7baafe83aa3 100644
--- a/drivers/video/omap2/displays-new/panel-dsi-cm.c
+++ b/drivers/video/omap2/displays-new/panel-dsi-cm.c
@@ -599,7 +599,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
if (r) {
dev_err(&ddata->pdev->dev, "failed to configure DSI pins\n");
goto err0;
- };
+ }
r = in->ops.dsi->set_config(in, &dsi_config);
if (r) {
diff --git a/drivers/video/omap2/displays-new/panel-tpo-td028ttec1.c b/drivers/video/omap2/displays-new/panel-tpo-td028ttec1.c
new file mode 100644
index 000000000000..9a08908fe998
--- /dev/null
+++ b/drivers/video/omap2/displays-new/panel-tpo-td028ttec1.c
@@ -0,0 +1,480 @@
+/*
+ * Toppoly TD028TTEC1 panel support
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Neo 1973 code (jbt6k74.c):
+ * Copyright (C) 2006-2007 by OpenMoko, Inc.
+ * Author: Harald Welte <laforge@openmoko.org>
+ *
+ * Ported and adapted from Neo 1973 U-Boot by:
+ * H. Nikolaus Schaller <hns@goldelico.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <video/omapdss.h>
+#include <video/omap-panel-data.h>
+
+struct panel_drv_data {
+ struct omap_dss_device dssdev;
+ struct omap_dss_device *in;
+
+ int data_lines;
+
+ struct omap_video_timings videomode;
+
+ struct spi_device *spi_dev;
+};
+
+static struct omap_video_timings td028ttec1_panel_timings = {
+ .x_res = 480,
+ .y_res = 640,
+ .pixel_clock = 22153,
+ .hfp = 24,
+ .hsw = 8,
+ .hbp = 8,
+ .vfp = 4,
+ .vsw = 2,
+ .vbp = 2,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
+};
+
+#define JBT_COMMAND 0x000
+#define JBT_DATA 0x100
+
+static int jbt_ret_write_0(struct panel_drv_data *ddata, u8 reg)
+{
+ int rc;
+ u16 tx_buf = JBT_COMMAND | reg;
+
+ rc = spi_write(ddata->spi_dev, (u8 *)&tx_buf,
+ 1*sizeof(u16));
+ if (rc != 0)
+ dev_err(&ddata->spi_dev->dev,
+ "jbt_ret_write_0 spi_write ret %d\n", rc);
+
+ return rc;
+}
+
+static int jbt_reg_write_1(struct panel_drv_data *ddata, u8 reg, u8 data)
+{
+ int rc;
+ u16 tx_buf[2];
+
+ tx_buf[0] = JBT_COMMAND | reg;
+ tx_buf[1] = JBT_DATA | data;
+ rc = spi_write(ddata->spi_dev, (u8 *)tx_buf,
+ 2*sizeof(u16));
+ if (rc != 0)
+ dev_err(&ddata->spi_dev->dev,
+ "jbt_reg_write_1 spi_write ret %d\n", rc);
+
+ return rc;
+}
+
+static int jbt_reg_write_2(struct panel_drv_data *ddata, u8 reg, u16 data)
+{
+ int rc;
+ u16 tx_buf[3];
+
+ tx_buf[0] = JBT_COMMAND | reg;
+ tx_buf[1] = JBT_DATA | (data >> 8);
+ tx_buf[2] = JBT_DATA | (data & 0xff);
+
+ rc = spi_write(ddata->spi_dev, (u8 *)tx_buf,
+ 3*sizeof(u16));
+
+ if (rc != 0)
+ dev_err(&ddata->spi_dev->dev,
+ "jbt_reg_write_2 spi_write ret %d\n", rc);
+
+ return rc;
+}
+
+enum jbt_register {
+ JBT_REG_SLEEP_IN = 0x10,
+ JBT_REG_SLEEP_OUT = 0x11,
+
+ JBT_REG_DISPLAY_OFF = 0x28,
+ JBT_REG_DISPLAY_ON = 0x29,
+
+ JBT_REG_RGB_FORMAT = 0x3a,
+ JBT_REG_QUAD_RATE = 0x3b,
+
+ JBT_REG_POWER_ON_OFF = 0xb0,
+ JBT_REG_BOOSTER_OP = 0xb1,
+ JBT_REG_BOOSTER_MODE = 0xb2,
+ JBT_REG_BOOSTER_FREQ = 0xb3,
+ JBT_REG_OPAMP_SYSCLK = 0xb4,
+ JBT_REG_VSC_VOLTAGE = 0xb5,
+ JBT_REG_VCOM_VOLTAGE = 0xb6,
+ JBT_REG_EXT_DISPL = 0xb7,
+ JBT_REG_OUTPUT_CONTROL = 0xb8,
+ JBT_REG_DCCLK_DCEV = 0xb9,
+ JBT_REG_DISPLAY_MODE1 = 0xba,
+ JBT_REG_DISPLAY_MODE2 = 0xbb,
+ JBT_REG_DISPLAY_MODE = 0xbc,
+ JBT_REG_ASW_SLEW = 0xbd,
+ JBT_REG_DUMMY_DISPLAY = 0xbe,
+ JBT_REG_DRIVE_SYSTEM = 0xbf,
+
+ JBT_REG_SLEEP_OUT_FR_A = 0xc0,
+ JBT_REG_SLEEP_OUT_FR_B = 0xc1,
+ JBT_REG_SLEEP_OUT_FR_C = 0xc2,
+ JBT_REG_SLEEP_IN_LCCNT_D = 0xc3,
+ JBT_REG_SLEEP_IN_LCCNT_E = 0xc4,
+ JBT_REG_SLEEP_IN_LCCNT_F = 0xc5,
+ JBT_REG_SLEEP_IN_LCCNT_G = 0xc6,
+
+ JBT_REG_GAMMA1_FINE_1 = 0xc7,
+ JBT_REG_GAMMA1_FINE_2 = 0xc8,
+ JBT_REG_GAMMA1_INCLINATION = 0xc9,
+ JBT_REG_GAMMA1_BLUE_OFFSET = 0xca,
+
+ JBT_REG_BLANK_CONTROL = 0xcf,
+ JBT_REG_BLANK_TH_TV = 0xd0,
+ JBT_REG_CKV_ON_OFF = 0xd1,
+ JBT_REG_CKV_1_2 = 0xd2,
+ JBT_REG_OEV_TIMING = 0xd3,
+ JBT_REG_ASW_TIMING_1 = 0xd4,
+ JBT_REG_ASW_TIMING_2 = 0xd5,
+
+ JBT_REG_HCLOCK_VGA = 0xec,
+ JBT_REG_HCLOCK_QVGA = 0xed,
+};
+
+#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
+
+static int td028ttec1_panel_connect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (omapdss_device_is_connected(dssdev))
+ return 0;
+
+ r = in->ops.dpi->connect(in, dssdev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void td028ttec1_panel_disconnect(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (!omapdss_device_is_connected(dssdev))
+ return;
+
+ in->ops.dpi->disconnect(in, dssdev);
+}
+
+static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ int r;
+
+ if (!omapdss_device_is_connected(dssdev))
+ return -ENODEV;
+
+ if (omapdss_device_is_enabled(dssdev))
+ return 0;
+
+ in->ops.dpi->set_data_lines(in, ddata->data_lines);
+ in->ops.dpi->set_timings(in, &ddata->videomode);
+
+ r = in->ops.dpi->enable(in);
+ if (r)
+ return r;
+
+ dev_dbg(dssdev->dev, "td028ttec1_panel_enable() - state %d\n",
+ dssdev->state);
+
+ /* three times command zero */
+ r |= jbt_ret_write_0(ddata, 0x00);
+ usleep_range(1000, 2000);
+ r |= jbt_ret_write_0(ddata, 0x00);
+ usleep_range(1000, 2000);
+ r |= jbt_ret_write_0(ddata, 0x00);
+ usleep_range(1000, 2000);
+
+ if (r) {
+ dev_warn(dssdev->dev, "transfer error\n");
+ goto transfer_err;
+ }
+
+ /* deep standby out */
+ r |= jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x17);
+
+ /* RGB I/F on, RAM write off, QVGA through, SIGCON enable */
+ r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE, 0x80);
+
+ /* Quad mode off */
+ r |= jbt_reg_write_1(ddata, JBT_REG_QUAD_RATE, 0x00);
+
+ /* AVDD on, XVDD on */
+ r |= jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x16);
+
+ /* Output control */
+ r |= jbt_reg_write_2(ddata, JBT_REG_OUTPUT_CONTROL, 0xfff9);
+
+ /* Sleep mode off */
+ r |= jbt_ret_write_0(ddata, JBT_REG_SLEEP_OUT);
+
+ /* at this point we have like 50% grey */
+
+ /* initialize register set */
+ r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE1, 0x01);
+ r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE2, 0x00);
+ r |= jbt_reg_write_1(ddata, JBT_REG_RGB_FORMAT, 0x60);
+ r |= jbt_reg_write_1(ddata, JBT_REG_DRIVE_SYSTEM, 0x10);
+ r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_OP, 0x56);
+ r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_MODE, 0x33);
+ r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_FREQ, 0x11);
+ r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_FREQ, 0x11);
+ r |= jbt_reg_write_1(ddata, JBT_REG_OPAMP_SYSCLK, 0x02);
+ r |= jbt_reg_write_1(ddata, JBT_REG_VSC_VOLTAGE, 0x2b);
+ r |= jbt_reg_write_1(ddata, JBT_REG_VCOM_VOLTAGE, 0x40);
+ r |= jbt_reg_write_1(ddata, JBT_REG_EXT_DISPL, 0x03);
+ r |= jbt_reg_write_1(ddata, JBT_REG_DCCLK_DCEV, 0x04);
+ /*
+ * default of 0x02 in JBT_REG_ASW_SLEW responsible for 72Hz requirement
+ * to avoid red / blue flicker
+ */
+ r |= jbt_reg_write_1(ddata, JBT_REG_ASW_SLEW, 0x04);
+ r |= jbt_reg_write_1(ddata, JBT_REG_DUMMY_DISPLAY, 0x00);
+
+ r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_A, 0x11);
+ r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_B, 0x11);
+ r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_C, 0x11);
+ r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_D, 0x2040);
+ r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_E, 0x60c0);
+ r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_F, 0x1020);
+ r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_G, 0x60c0);
+
+ r |= jbt_reg_write_2(ddata, JBT_REG_GAMMA1_FINE_1, 0x5533);
+ r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_FINE_2, 0x00);
+ r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_INCLINATION, 0x00);
+ r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_BLUE_OFFSET, 0x00);
+
+ r |= jbt_reg_write_2(ddata, JBT_REG_HCLOCK_VGA, 0x1f0);
+ r |= jbt_reg_write_1(ddata, JBT_REG_BLANK_CONTROL, 0x02);
+ r |= jbt_reg_write_2(ddata, JBT_REG_BLANK_TH_TV, 0x0804);
+
+ r |= jbt_reg_write_1(ddata, JBT_REG_CKV_ON_OFF, 0x01);
+ r |= jbt_reg_write_2(ddata, JBT_REG_CKV_1_2, 0x0000);
+
+ r |= jbt_reg_write_2(ddata, JBT_REG_OEV_TIMING, 0x0d0e);
+ r |= jbt_reg_write_2(ddata, JBT_REG_ASW_TIMING_1, 0x11a4);
+ r |= jbt_reg_write_1(ddata, JBT_REG_ASW_TIMING_2, 0x0e);
+
+ r |= jbt_ret_write_0(ddata, JBT_REG_DISPLAY_ON);
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+transfer_err:
+
+ return r ? -EIO : 0;
+}
+
+static void td028ttec1_panel_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (!omapdss_device_is_enabled(dssdev))
+ return;
+
+ dev_dbg(dssdev->dev, "td028ttec1_panel_disable()\n");
+
+ jbt_ret_write_0(ddata, JBT_REG_DISPLAY_OFF);
+ jbt_reg_write_2(ddata, JBT_REG_OUTPUT_CONTROL, 0x8002);
+ jbt_ret_write_0(ddata, JBT_REG_SLEEP_IN);
+ jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x00);
+
+ in->ops.dpi->disable(in);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static void td028ttec1_panel_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ ddata->videomode = *timings;
+ dssdev->panel.timings = *timings;
+
+ in->ops.dpi->set_timings(in, timings);
+}
+
+static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ *timings = ddata->videomode;
+}
+
+static int td028ttec1_panel_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.dpi->check_timings(in, timings);
+}
+
+static struct omap_dss_driver td028ttec1_ops = {
+ .connect = td028ttec1_panel_connect,
+ .disconnect = td028ttec1_panel_disconnect,
+
+ .enable = td028ttec1_panel_enable,
+ .disable = td028ttec1_panel_disable,
+
+ .set_timings = td028ttec1_panel_set_timings,
+ .get_timings = td028ttec1_panel_get_timings,
+ .check_timings = td028ttec1_panel_check_timings,
+};
+
+static int td028ttec1_panel_probe_pdata(struct spi_device *spi)
+{
+ const struct panel_tpo_td028ttec1_platform_data *pdata;
+ struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
+ struct omap_dss_device *dssdev, *in;
+
+ pdata = dev_get_platdata(&spi->dev);
+
+ in = omap_dss_find_output(pdata->source);
+ if (in == NULL) {
+ dev_err(&spi->dev, "failed to find video source '%s'\n",
+ pdata->source);
+ return -EPROBE_DEFER;
+ }
+
+ ddata->in = in;
+
+ ddata->data_lines = pdata->data_lines;
+
+ dssdev = &ddata->dssdev;
+ dssdev->name = pdata->name;
+
+ return 0;
+}
+
+static int td028ttec1_panel_probe(struct spi_device *spi)
+{
+ struct panel_drv_data *ddata;
+ struct omap_dss_device *dssdev;
+ int r;
+
+ dev_dbg(&spi->dev, "%s\n", __func__);
+
+ spi->bits_per_word = 9;
+ spi->mode = SPI_MODE_3;
+
+ r = spi_setup(spi);
+ if (r < 0) {
+ dev_err(&spi->dev, "spi_setup failed: %d\n", r);
+ return r;
+ }
+
+ ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
+ if (ddata == NULL)
+ return -ENOMEM;
+
+ dev_set_drvdata(&spi->dev, ddata);
+
+ ddata->spi_dev = spi;
+
+ if (dev_get_platdata(&spi->dev)) {
+ r = td028ttec1_panel_probe_pdata(spi);
+ if (r)
+ return r;
+ } else {
+ return -ENODEV;
+ }
+
+ ddata->videomode = td028ttec1_panel_timings;
+
+ dssdev = &ddata->dssdev;
+ dssdev->dev = &spi->dev;
+ dssdev->driver = &td028ttec1_ops;
+ dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->owner = THIS_MODULE;
+ dssdev->panel.timings = ddata->videomode;
+ dssdev->phy.dpi.data_lines = ddata->data_lines;
+
+ r = omapdss_register_display(dssdev);
+ if (r) {
+ dev_err(&spi->dev, "Failed to register panel\n");
+ goto err_reg;
+ }
+
+ return 0;
+
+err_reg:
+ omap_dss_put_device(ddata->in);
+ return r;
+}
+
+static int td028ttec1_panel_remove(struct spi_device *spi)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
+ struct omap_dss_device *dssdev = &ddata->dssdev;
+ struct omap_dss_device *in = ddata->in;
+
+ dev_dbg(&ddata->spi_dev->dev, "%s\n", __func__);
+
+ omapdss_unregister_display(dssdev);
+
+ td028ttec1_panel_disable(dssdev);
+ td028ttec1_panel_disconnect(dssdev);
+
+ omap_dss_put_device(in);
+
+ return 0;
+}
+
+static struct spi_driver td028ttec1_spi_driver = {
+ .probe = td028ttec1_panel_probe,
+ .remove = td028ttec1_panel_remove,
+
+ .driver = {
+ .name = "panel-tpo-td028ttec1",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_spi_driver(td028ttec1_spi_driver);
+
+MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>");
+MODULE_DESCRIPTION("Toppoly TD028TTEC1 panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
index 94832eb06a3d..d3aa91bdd6a8 100644
--- a/drivers/video/omap2/dss/Makefile
+++ b/drivers/video/omap2/dss/Makefile
@@ -10,5 +10,6 @@ omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
-omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi.o ti_hdmi_4xxx_ip.o
+omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi4.o hdmi_common.o hdmi_wp.o hdmi_pll.o \
+ hdmi_phy.o hdmi4_core.o
ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 60d3958d04f7..ffa45c894cd4 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -266,7 +266,7 @@ static int (*dss_output_drv_reg_funcs[])(void) __initdata = {
venc_init_platform_driver,
#endif
#ifdef CONFIG_OMAP4_DSS_HDMI
- hdmi_init_platform_driver,
+ hdmi4_init_platform_driver,
#endif
};
@@ -287,7 +287,7 @@ static void (*dss_output_drv_unreg_funcs[])(void) __exitdata = {
venc_uninit_platform_driver,
#endif
#ifdef CONFIG_OMAP4_DSS_HDMI
- hdmi_uninit_platform_driver,
+ hdmi4_uninit_platform_driver,
#endif
};
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 477975009eee..4ec59ca72e5d 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -2352,7 +2352,7 @@ int dispc_ovl_check(enum omap_plane plane, enum omap_channel channel,
{
enum omap_overlay_caps caps = dss_feat_get_overlay_caps(plane);
bool five_taps = true;
- bool fieldmode = 0;
+ bool fieldmode = false;
u16 in_height = oi->height;
u16 in_width = oi->width;
bool ilace = timings->interlace;
@@ -2365,7 +2365,7 @@ int dispc_ovl_check(enum omap_plane plane, enum omap_channel channel,
out_height = oi->out_height == 0 ? oi->height : oi->out_height;
if (ilace && oi->height == out_height)
- fieldmode = 1;
+ fieldmode = true;
if (ilace) {
if (fieldmode)
@@ -2396,7 +2396,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
bool mem_to_mem)
{
bool five_taps = true;
- bool fieldmode = 0;
+ bool fieldmode = false;
int r, cconv = 0;
unsigned offset0, offset1;
s32 row_inc;
@@ -2417,7 +2417,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
out_height = out_height == 0 ? height : out_height;
if (ilace && height == out_height)
- fieldmode = 1;
+ fieldmode = true;
if (ilace) {
if (fieldmode)
@@ -2918,7 +2918,7 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
break;
default:
BUG();
- };
+ }
l = dispc_read_reg(DISPC_POL_FREQ(channel));
l |= FLD_VAL(onoff, 17, 17);
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index fafe7c941a60..669a81fdf58e 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -266,7 +266,7 @@ void videomode_to_omap_video_timings(const struct videomode *vm,
OMAPDSS_SIG_ACTIVE_LOW;
ovt->de_level = vm->flags & DISPLAY_FLAGS_DE_HIGH ?
OMAPDSS_SIG_ACTIVE_HIGH :
- OMAPDSS_SIG_ACTIVE_HIGH;
+ OMAPDSS_SIG_ACTIVE_LOW;
ovt->data_pclk_edge = vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE ?
OMAPDSS_DRIVE_SIG_RISING_EDGE :
OMAPDSS_DRIVE_SIG_FALLING_EDGE;
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index a598b5812285..6056b27cf73c 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -312,7 +312,7 @@ struct dsi_data {
struct dsi_isr_tables isr_tables_copy;
int update_channel;
-#ifdef DEBUG
+#ifdef DSI_PERF_MEASURE
unsigned update_bytes;
#endif
@@ -334,7 +334,7 @@ struct dsi_data {
u32 errors;
spinlock_t errors_lock;
-#ifdef DEBUG
+#ifdef DSI_PERF_MEASURE
ktime_t perf_setup_time;
ktime_t perf_start_time;
#endif
@@ -373,7 +373,7 @@ struct dsi_packet_sent_handler_data {
struct completion *completion;
};
-#ifdef DEBUG
+#ifdef DSI_PERF_MEASURE
static bool dsi_perf;
module_param(dsi_perf, bool, 0644);
#endif
@@ -497,7 +497,7 @@ u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
}
}
-#ifdef DEBUG
+#ifdef DSI_PERF_MEASURE
static void dsi_perf_mark_setup(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -4066,7 +4066,7 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
default:
r = -EINVAL;
goto err_pix_fmt;
- };
+ }
dsi_if_enable(dsidev, false);
dsi_vc_enable(dsidev, channel, false);
@@ -4277,7 +4277,7 @@ static int dsi_update(struct omap_dss_device *dssdev, int channel,
dw = dsi->timings.x_res;
dh = dsi->timings.y_res;
-#ifdef DEBUG
+#ifdef DSI_PERF_MEASURE
dsi->update_bytes = dw * dh *
dsi_get_pixel_size(dsi->pix_fmt) / 8;
#endif
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index e172531d196b..f538e867c0f8 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -427,8 +427,8 @@ int venc_init_platform_driver(void) __init;
void venc_uninit_platform_driver(void) __exit;
/* HDMI */
-int hdmi_init_platform_driver(void) __init;
-void hdmi_uninit_platform_driver(void) __exit;
+int hdmi4_init_platform_driver(void) __init;
+void hdmi4_uninit_platform_driver(void) __exit;
/* RFBI */
int rfbi_init_platform_driver(void) __init;
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index b9cfebb378a2..f8fd6dbacabc 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -789,50 +789,6 @@ static const struct omap_dss_features omap5_dss_features = {
.burst_size_unit = 16,
};
-#if defined(CONFIG_OMAP4_DSS_HDMI)
-/* HDMI OMAP4 Functions*/
-static const struct ti_hdmi_ip_ops omap4_hdmi_functions = {
-
- .video_configure = ti_hdmi_4xxx_basic_configure,
- .phy_enable = ti_hdmi_4xxx_phy_enable,
- .phy_disable = ti_hdmi_4xxx_phy_disable,
- .read_edid = ti_hdmi_4xxx_read_edid,
- .pll_enable = ti_hdmi_4xxx_pll_enable,
- .pll_disable = ti_hdmi_4xxx_pll_disable,
- .video_enable = ti_hdmi_4xxx_wp_video_start,
- .video_disable = ti_hdmi_4xxx_wp_video_stop,
- .dump_wrapper = ti_hdmi_4xxx_wp_dump,
- .dump_core = ti_hdmi_4xxx_core_dump,
- .dump_pll = ti_hdmi_4xxx_pll_dump,
- .dump_phy = ti_hdmi_4xxx_phy_dump,
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
- .audio_enable = ti_hdmi_4xxx_wp_audio_enable,
- .audio_disable = ti_hdmi_4xxx_wp_audio_disable,
- .audio_start = ti_hdmi_4xxx_audio_start,
- .audio_stop = ti_hdmi_4xxx_audio_stop,
- .audio_config = ti_hdmi_4xxx_audio_config,
- .audio_get_dma_port = ti_hdmi_4xxx_audio_get_dma_port,
-#endif
-
-};
-
-void dss_init_hdmi_ip_ops(struct hdmi_ip_data *ip_data,
- enum omapdss_version version)
-{
- switch (version) {
- case OMAPDSS_VER_OMAP4430_ES1:
- case OMAPDSS_VER_OMAP4430_ES2:
- case OMAPDSS_VER_OMAP4:
- ip_data->ops = &omap4_hdmi_functions;
- break;
- default:
- ip_data->ops = NULL;
- }
-
- WARN_ON(ip_data->ops == NULL);
-}
-#endif
-
/* Functions returning values related to a DSS feature */
int dss_feat_get_num_mgrs(void)
{
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index 489b9bec4a6d..10b0556e1352 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -20,10 +20,6 @@
#ifndef __OMAP2_DSS_FEATURES_H
#define __OMAP2_DSS_FEATURES_H
-#if defined(CONFIG_OMAP4_DSS_HDMI)
-#include "ti_hdmi.h"
-#endif
-
#define MAX_DSS_MANAGERS 4
#define MAX_DSS_OVERLAYS 4
#define MAX_DSS_LCD_MANAGERS 3
@@ -117,8 +113,4 @@ bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type);
bool dss_has_feature(enum dss_feat_id id);
void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
void dss_features_init(enum omapdss_version version);
-#if defined(CONFIG_OMAP4_DSS_HDMI)
-void dss_init_hdmi_ip_ops(struct hdmi_ip_data *ip_data,
- enum omapdss_version version);
-#endif
#endif
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
deleted file mode 100644
index 82a964074993..000000000000
--- a/drivers/video/omap2/dss/hdmi.c
+++ /dev/null
@@ -1,1184 +0,0 @@
-/*
- * hdmi.c
- *
- * HDMI interface DSS driver setting for TI's OMAP4 family of processor.
- * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
- * Authors: Yong Zhi
- * Mythri pk <mythripk@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define DSS_SUBSYS_NAME "HDMI"
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/clk.h>
-#include <linux/gpio.h>
-#include <linux/regulator/consumer.h>
-#include <video/omapdss.h>
-
-#include "ti_hdmi.h"
-#include "dss.h"
-#include "dss_features.h"
-
-#define HDMI_WP 0x0
-#define HDMI_CORE_SYS 0x400
-#define HDMI_CORE_AV 0x900
-#define HDMI_PLLCTRL 0x200
-#define HDMI_PHY 0x300
-
-/* HDMI EDID Length move this */
-#define HDMI_EDID_MAX_LENGTH 256
-#define EDID_TIMING_DESCRIPTOR_SIZE 0x12
-#define EDID_DESCRIPTOR_BLOCK0_ADDRESS 0x36
-#define EDID_DESCRIPTOR_BLOCK1_ADDRESS 0x80
-#define EDID_SIZE_BLOCK0_TIMING_DESCRIPTOR 4
-#define EDID_SIZE_BLOCK1_TIMING_DESCRIPTOR 4
-
-#define HDMI_DEFAULT_REGN 16
-#define HDMI_DEFAULT_REGM2 1
-
-static struct {
- struct mutex lock;
- struct platform_device *pdev;
-
- struct hdmi_ip_data ip_data;
-
- struct clk *sys_clk;
- struct regulator *vdda_hdmi_dac_reg;
-
- bool core_enabled;
-
- struct omap_dss_device output;
-} hdmi;
-
-/*
- * Logic for the below structure :
- * user enters the CEA or VESA timings by specifying the HDMI/DVI code.
- * There is a correspondence between CEA/VESA timing and code, please
- * refer to section 6.3 in HDMI 1.3 specification for timing code.
- *
- * In the below structure, cea_vesa_timings corresponds to all OMAP4
- * supported CEA and VESA timing values.code_cea corresponds to the CEA
- * code, It is used to get the timing from cea_vesa_timing array.Similarly
- * with code_vesa. Code_index is used for back mapping, that is once EDID
- * is read from the TV, EDID is parsed to find the timing values and then
- * map it to corresponding CEA or VESA index.
- */
-
-static const struct hdmi_config cea_timings[] = {
- {
- { 640, 480, 25200, 96, 16, 48, 2, 10, 33,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 1, HDMI_HDMI },
- },
- {
- { 720, 480, 27027, 62, 16, 60, 6, 9, 30,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 2, HDMI_HDMI },
- },
- {
- { 1280, 720, 74250, 40, 110, 220, 5, 5, 20,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 4, HDMI_HDMI },
- },
- {
- { 1920, 540, 74250, 44, 88, 148, 5, 2, 15,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- true, },
- { 5, HDMI_HDMI },
- },
- {
- { 1440, 240, 27027, 124, 38, 114, 3, 4, 15,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- true, },
- { 6, HDMI_HDMI },
- },
- {
- { 1920, 1080, 148500, 44, 88, 148, 5, 4, 36,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 16, HDMI_HDMI },
- },
- {
- { 720, 576, 27000, 64, 12, 68, 5, 5, 39,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 17, HDMI_HDMI },
- },
- {
- { 1280, 720, 74250, 40, 440, 220, 5, 5, 20,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 19, HDMI_HDMI },
- },
- {
- { 1920, 540, 74250, 44, 528, 148, 5, 2, 15,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- true, },
- { 20, HDMI_HDMI },
- },
- {
- { 1440, 288, 27000, 126, 24, 138, 3, 2, 19,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- true, },
- { 21, HDMI_HDMI },
- },
- {
- { 1440, 576, 54000, 128, 24, 136, 5, 5, 39,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 29, HDMI_HDMI },
- },
- {
- { 1920, 1080, 148500, 44, 528, 148, 5, 4, 36,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 31, HDMI_HDMI },
- },
- {
- { 1920, 1080, 74250, 44, 638, 148, 5, 4, 36,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 32, HDMI_HDMI },
- },
- {
- { 2880, 480, 108108, 248, 64, 240, 6, 9, 30,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 35, HDMI_HDMI },
- },
- {
- { 2880, 576, 108000, 256, 48, 272, 5, 5, 39,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 37, HDMI_HDMI },
- },
-};
-
-static const struct hdmi_config vesa_timings[] = {
-/* VESA From Here */
- {
- { 640, 480, 25175, 96, 16, 48, 2, 11, 31,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 4, HDMI_DVI },
- },
- {
- { 800, 600, 40000, 128, 40, 88, 4, 1, 23,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 9, HDMI_DVI },
- },
- {
- { 848, 480, 33750, 112, 16, 112, 8, 6, 23,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0xE, HDMI_DVI },
- },
- {
- { 1280, 768, 79500, 128, 64, 192, 7, 3, 20,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 0x17, HDMI_DVI },
- },
- {
- { 1280, 800, 83500, 128, 72, 200, 6, 3, 22,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 0x1C, HDMI_DVI },
- },
- {
- { 1360, 768, 85500, 112, 64, 256, 6, 3, 18,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x27, HDMI_DVI },
- },
- {
- { 1280, 960, 108000, 112, 96, 312, 3, 1, 36,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x20, HDMI_DVI },
- },
- {
- { 1280, 1024, 108000, 112, 48, 248, 3, 1, 38,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x23, HDMI_DVI },
- },
- {
- { 1024, 768, 65000, 136, 24, 160, 6, 3, 29,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 0x10, HDMI_DVI },
- },
- {
- { 1400, 1050, 121750, 144, 88, 232, 4, 3, 32,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 0x2A, HDMI_DVI },
- },
- {
- { 1440, 900, 106500, 152, 80, 232, 6, 3, 25,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 0x2F, HDMI_DVI },
- },
- {
- { 1680, 1050, 146250, 176 , 104, 280, 6, 3, 30,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 0x3A, HDMI_DVI },
- },
- {
- { 1366, 768, 85500, 143, 70, 213, 3, 3, 24,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x51, HDMI_DVI },
- },
- {
- { 1920, 1080, 148500, 44, 148, 80, 5, 4, 36,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x52, HDMI_DVI },
- },
- {
- { 1280, 768, 68250, 32, 48, 80, 7, 3, 12,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x16, HDMI_DVI },
- },
- {
- { 1400, 1050, 101000, 32, 48, 80, 4, 3, 23,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x29, HDMI_DVI },
- },
- {
- { 1680, 1050, 119000, 32, 48, 80, 6, 3, 21,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x39, HDMI_DVI },
- },
- {
- { 1280, 800, 79500, 32, 48, 80, 6, 3, 14,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x1B, HDMI_DVI },
- },
- {
- { 1280, 720, 74250, 40, 110, 220, 5, 5, 20,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x55, HDMI_DVI },
- },
- {
- { 1920, 1200, 154000, 32, 48, 80, 6, 3, 26,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x44, HDMI_DVI },
- },
-};
-
-static int hdmi_runtime_get(void)
-{
- int r;
-
- DSSDBG("hdmi_runtime_get\n");
-
- r = pm_runtime_get_sync(&hdmi.pdev->dev);
- WARN_ON(r < 0);
- if (r < 0)
- return r;
-
- return 0;
-}
-
-static void hdmi_runtime_put(void)
-{
- int r;
-
- DSSDBG("hdmi_runtime_put\n");
-
- r = pm_runtime_put_sync(&hdmi.pdev->dev);
- WARN_ON(r < 0 && r != -ENOSYS);
-}
-
-static int hdmi_init_regulator(void)
-{
- struct regulator *reg;
-
- if (hdmi.vdda_hdmi_dac_reg != NULL)
- return 0;
-
- reg = devm_regulator_get(&hdmi.pdev->dev, "vdda_hdmi_dac");
-
- /* DT HACK: try VDAC to make omapdss work for o4 sdp/panda */
- if (IS_ERR(reg))
- reg = devm_regulator_get(&hdmi.pdev->dev, "VDAC");
-
- if (IS_ERR(reg)) {
- DSSERR("can't get VDDA_HDMI_DAC regulator\n");
- return PTR_ERR(reg);
- }
-
- hdmi.vdda_hdmi_dac_reg = reg;
-
- return 0;
-}
-
-static const struct hdmi_config *hdmi_find_timing(
- const struct hdmi_config *timings_arr,
- int len)
-{
- int i;
-
- for (i = 0; i < len; i++) {
- if (timings_arr[i].cm.code == hdmi.ip_data.cfg.cm.code)
- return &timings_arr[i];
- }
- return NULL;
-}
-
-static const struct hdmi_config *hdmi_get_timings(void)
-{
- const struct hdmi_config *arr;
- int len;
-
- if (hdmi.ip_data.cfg.cm.mode == HDMI_DVI) {
- arr = vesa_timings;
- len = ARRAY_SIZE(vesa_timings);
- } else {
- arr = cea_timings;
- len = ARRAY_SIZE(cea_timings);
- }
-
- return hdmi_find_timing(arr, len);
-}
-
-static bool hdmi_timings_compare(struct omap_video_timings *timing1,
- const struct omap_video_timings *timing2)
-{
- int timing1_vsync, timing1_hsync, timing2_vsync, timing2_hsync;
-
- if ((DIV_ROUND_CLOSEST(timing2->pixel_clock, 1000) ==
- DIV_ROUND_CLOSEST(timing1->pixel_clock, 1000)) &&
- (timing2->x_res == timing1->x_res) &&
- (timing2->y_res == timing1->y_res)) {
-
- timing2_hsync = timing2->hfp + timing2->hsw + timing2->hbp;
- timing1_hsync = timing1->hfp + timing1->hsw + timing1->hbp;
- timing2_vsync = timing2->vfp + timing2->vsw + timing2->vbp;
- timing1_vsync = timing2->vfp + timing2->vsw + timing2->vbp;
-
- DSSDBG("timing1_hsync = %d timing1_vsync = %d"\
- "timing2_hsync = %d timing2_vsync = %d\n",
- timing1_hsync, timing1_vsync,
- timing2_hsync, timing2_vsync);
-
- if ((timing1_hsync == timing2_hsync) &&
- (timing1_vsync == timing2_vsync)) {
- return true;
- }
- }
- return false;
-}
-
-static struct hdmi_cm hdmi_get_code(struct omap_video_timings *timing)
-{
- int i;
- struct hdmi_cm cm = {-1};
- DSSDBG("hdmi_get_code\n");
-
- for (i = 0; i < ARRAY_SIZE(cea_timings); i++) {
- if (hdmi_timings_compare(timing, &cea_timings[i].timings)) {
- cm = cea_timings[i].cm;
- goto end;
- }
- }
- for (i = 0; i < ARRAY_SIZE(vesa_timings); i++) {
- if (hdmi_timings_compare(timing, &vesa_timings[i].timings)) {
- cm = vesa_timings[i].cm;
- goto end;
- }
- }
-
-end: return cm;
-
-}
-
-static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
- struct hdmi_pll_info *pi)
-{
- unsigned long clkin, refclk;
- u32 mf;
-
- clkin = clk_get_rate(hdmi.sys_clk) / 10000;
- /*
- * Input clock is predivided by N + 1
- * out put of which is reference clk
- */
-
- pi->regn = HDMI_DEFAULT_REGN;
-
- refclk = clkin / pi->regn;
-
- pi->regm2 = HDMI_DEFAULT_REGM2;
-
- /*
- * multiplier is pixel_clk/ref_clk
- * Multiplying by 100 to avoid fractional part removal
- */
- pi->regm = phy * pi->regm2 / refclk;
-
- /*
- * fractional multiplier is remainder of the difference between
- * multiplier and actual phy(required pixel clock thus should be
- * multiplied by 2^18(262144) divided by the reference clock
- */
- mf = (phy - pi->regm / pi->regm2 * refclk) * 262144;
- pi->regmf = pi->regm2 * mf / refclk;
-
- /*
- * Dcofreq should be set to 1 if required pixel clock
- * is greater than 1000MHz
- */
- pi->dcofreq = phy > 1000 * 100;
- pi->regsd = ((pi->regm * clkin / 10) / (pi->regn * 250) + 5) / 10;
-
- /* Set the reference clock to sysclk reference */
- pi->refsel = HDMI_REFSEL_SYSCLK;
-
- DSSDBG("M = %d Mf = %d\n", pi->regm, pi->regmf);
- DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd);
-}
-
-static int hdmi_power_on_core(struct omap_dss_device *dssdev)
-{
- int r;
-
- r = regulator_enable(hdmi.vdda_hdmi_dac_reg);
- if (r)
- return r;
-
- r = hdmi_runtime_get();
- if (r)
- goto err_runtime_get;
-
- /* Make selection of HDMI in DSS */
- dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
-
- hdmi.core_enabled = true;
-
- return 0;
-
-err_runtime_get:
- regulator_disable(hdmi.vdda_hdmi_dac_reg);
-
- return r;
-}
-
-static void hdmi_power_off_core(struct omap_dss_device *dssdev)
-{
- hdmi.core_enabled = false;
-
- hdmi_runtime_put();
- regulator_disable(hdmi.vdda_hdmi_dac_reg);
-}
-
-static int hdmi_power_on_full(struct omap_dss_device *dssdev)
-{
- int r;
- struct omap_video_timings *p;
- struct omap_overlay_manager *mgr = hdmi.output.manager;
- unsigned long phy;
-
- r = hdmi_power_on_core(dssdev);
- if (r)
- return r;
-
- dss_mgr_disable(mgr);
-
- p = &hdmi.ip_data.cfg.timings;
-
- DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
-
- phy = p->pixel_clock;
-
- hdmi_compute_pll(dssdev, phy, &hdmi.ip_data.pll_data);
-
- hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
-
- /* config the PLL and PHY hdmi_set_pll_pwrfirst */
- r = hdmi.ip_data.ops->pll_enable(&hdmi.ip_data);
- if (r) {
- DSSDBG("Failed to lock PLL\n");
- goto err_pll_enable;
- }
-
- r = hdmi.ip_data.ops->phy_enable(&hdmi.ip_data);
- if (r) {
- DSSDBG("Failed to start PHY\n");
- goto err_phy_enable;
- }
-
- hdmi.ip_data.ops->video_configure(&hdmi.ip_data);
-
- /* bypass TV gamma table */
- dispc_enable_gamma_table(0);
-
- /* tv size */
- dss_mgr_set_timings(mgr, p);
-
- r = hdmi.ip_data.ops->video_enable(&hdmi.ip_data);
- if (r)
- goto err_vid_enable;
-
- r = dss_mgr_enable(mgr);
- if (r)
- goto err_mgr_enable;
-
- return 0;
-
-err_mgr_enable:
- hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
-err_vid_enable:
- hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
-err_phy_enable:
- hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
-err_pll_enable:
- hdmi_power_off_core(dssdev);
- return -EIO;
-}
-
-static void hdmi_power_off_full(struct omap_dss_device *dssdev)
-{
- struct omap_overlay_manager *mgr = hdmi.output.manager;
-
- dss_mgr_disable(mgr);
-
- hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
- hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
- hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
-
- hdmi_power_off_core(dssdev);
-}
-
-static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- struct hdmi_cm cm;
-
- cm = hdmi_get_code(timings);
- if (cm.code == -1) {
- return -EINVAL;
- }
-
- return 0;
-
-}
-
-static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- struct hdmi_cm cm;
- const struct hdmi_config *t;
-
- mutex_lock(&hdmi.lock);
-
- cm = hdmi_get_code(timings);
- hdmi.ip_data.cfg.cm = cm;
-
- t = hdmi_get_timings();
- if (t != NULL) {
- hdmi.ip_data.cfg = *t;
-
- dispc_set_tv_pclk(t->timings.pixel_clock * 1000);
- }
-
- mutex_unlock(&hdmi.lock);
-}
-
-static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- const struct hdmi_config *cfg;
-
- cfg = hdmi_get_timings();
- if (cfg == NULL)
- cfg = &vesa_timings[0];
-
- memcpy(timings, &cfg->timings, sizeof(cfg->timings));
-}
-
-static void hdmi_dump_regs(struct seq_file *s)
-{
- mutex_lock(&hdmi.lock);
-
- if (hdmi_runtime_get()) {
- mutex_unlock(&hdmi.lock);
- return;
- }
-
- hdmi.ip_data.ops->dump_wrapper(&hdmi.ip_data, s);
- hdmi.ip_data.ops->dump_pll(&hdmi.ip_data, s);
- hdmi.ip_data.ops->dump_phy(&hdmi.ip_data, s);
- hdmi.ip_data.ops->dump_core(&hdmi.ip_data, s);
-
- hdmi_runtime_put();
- mutex_unlock(&hdmi.lock);
-}
-
-static int read_edid(u8 *buf, int len)
-{
- int r;
-
- mutex_lock(&hdmi.lock);
-
- r = hdmi_runtime_get();
- BUG_ON(r);
-
- r = hdmi.ip_data.ops->read_edid(&hdmi.ip_data, buf, len);
-
- hdmi_runtime_put();
- mutex_unlock(&hdmi.lock);
-
- return r;
-}
-
-static int hdmi_display_enable(struct omap_dss_device *dssdev)
-{
- struct omap_dss_device *out = &hdmi.output;
- int r = 0;
-
- DSSDBG("ENTER hdmi_display_enable\n");
-
- mutex_lock(&hdmi.lock);
-
- if (out == NULL || out->manager == NULL) {
- DSSERR("failed to enable display: no output/manager\n");
- r = -ENODEV;
- goto err0;
- }
-
- r = hdmi_power_on_full(dssdev);
- if (r) {
- DSSERR("failed to power on device\n");
- goto err0;
- }
-
- mutex_unlock(&hdmi.lock);
- return 0;
-
-err0:
- mutex_unlock(&hdmi.lock);
- return r;
-}
-
-static void hdmi_display_disable(struct omap_dss_device *dssdev)
-{
- DSSDBG("Enter hdmi_display_disable\n");
-
- mutex_lock(&hdmi.lock);
-
- hdmi_power_off_full(dssdev);
-
- mutex_unlock(&hdmi.lock);
-}
-
-static int hdmi_core_enable(struct omap_dss_device *dssdev)
-{
- int r = 0;
-
- DSSDBG("ENTER omapdss_hdmi_core_enable\n");
-
- mutex_lock(&hdmi.lock);
-
- r = hdmi_power_on_core(dssdev);
- if (r) {
- DSSERR("failed to power on device\n");
- goto err0;
- }
-
- mutex_unlock(&hdmi.lock);
- return 0;
-
-err0:
- mutex_unlock(&hdmi.lock);
- return r;
-}
-
-static void hdmi_core_disable(struct omap_dss_device *dssdev)
-{
- DSSDBG("Enter omapdss_hdmi_core_disable\n");
-
- mutex_lock(&hdmi.lock);
-
- hdmi_power_off_core(dssdev);
-
- mutex_unlock(&hdmi.lock);
-}
-
-static int hdmi_get_clocks(struct platform_device *pdev)
-{
- struct clk *clk;
-
- clk = devm_clk_get(&pdev->dev, "sys_clk");
- if (IS_ERR(clk)) {
- DSSERR("can't get sys_clk\n");
- return PTR_ERR(clk);
- }
-
- hdmi.sys_clk = clk;
-
- return 0;
-}
-
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
-int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts)
-{
- u32 deep_color;
- bool deep_color_correct = false;
- u32 pclk = hdmi.ip_data.cfg.timings.pixel_clock;
-
- if (n == NULL || cts == NULL)
- return -EINVAL;
-
- /* TODO: When implemented, query deep color mode here. */
- deep_color = 100;
-
- /*
- * When using deep color, the default N value (as in the HDMI
- * specification) yields to an non-integer CTS. Hence, we
- * modify it while keeping the restrictions described in
- * section 7.2.1 of the HDMI 1.4a specification.
- */
- switch (sample_freq) {
- case 32000:
- case 48000:
- case 96000:
- case 192000:
- if (deep_color == 125)
- if (pclk == 27027 || pclk == 74250)
- deep_color_correct = true;
- if (deep_color == 150)
- if (pclk == 27027)
- deep_color_correct = true;
- break;
- case 44100:
- case 88200:
- case 176400:
- if (deep_color == 125)
- if (pclk == 27027)
- deep_color_correct = true;
- break;
- default:
- return -EINVAL;
- }
-
- if (deep_color_correct) {
- switch (sample_freq) {
- case 32000:
- *n = 8192;
- break;
- case 44100:
- *n = 12544;
- break;
- case 48000:
- *n = 8192;
- break;
- case 88200:
- *n = 25088;
- break;
- case 96000:
- *n = 16384;
- break;
- case 176400:
- *n = 50176;
- break;
- case 192000:
- *n = 32768;
- break;
- default:
- return -EINVAL;
- }
- } else {
- switch (sample_freq) {
- case 32000:
- *n = 4096;
- break;
- case 44100:
- *n = 6272;
- break;
- case 48000:
- *n = 6144;
- break;
- case 88200:
- *n = 12544;
- break;
- case 96000:
- *n = 12288;
- break;
- case 176400:
- *n = 25088;
- break;
- case 192000:
- *n = 24576;
- break;
- default:
- return -EINVAL;
- }
- }
- /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
- *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
-
- return 0;
-}
-
-static bool hdmi_mode_has_audio(void)
-{
- if (hdmi.ip_data.cfg.cm.mode == HDMI_HDMI)
- return true;
- else
- return false;
-}
-
-#endif
-
-static int hdmi_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
-{
- struct omap_overlay_manager *mgr;
- int r;
-
- dss_init_hdmi_ip_ops(&hdmi.ip_data, omapdss_get_version());
-
- r = hdmi_init_regulator();
- if (r)
- return r;
-
- mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
- if (!mgr)
- return -ENODEV;
-
- r = dss_mgr_connect(mgr, dssdev);
- if (r)
- return r;
-
- r = omapdss_output_set_device(dssdev, dst);
- if (r) {
- DSSERR("failed to connect output to new device: %s\n",
- dst->name);
- dss_mgr_disconnect(mgr, dssdev);
- return r;
- }
-
- return 0;
-}
-
-static void hdmi_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
-{
- WARN_ON(dst != dssdev->dst);
-
- if (dst != dssdev->dst)
- return;
-
- omapdss_output_unset_device(dssdev);
-
- if (dssdev->manager)
- dss_mgr_disconnect(dssdev->manager, dssdev);
-}
-
-static int hdmi_read_edid(struct omap_dss_device *dssdev,
- u8 *edid, int len)
-{
- bool need_enable;
- int r;
-
- need_enable = hdmi.core_enabled == false;
-
- if (need_enable) {
- r = hdmi_core_enable(dssdev);
- if (r)
- return r;
- }
-
- r = read_edid(edid, len);
-
- if (need_enable)
- hdmi_core_disable(dssdev);
-
- return r;
-}
-
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
-static int hdmi_audio_enable(struct omap_dss_device *dssdev)
-{
- int r;
-
- mutex_lock(&hdmi.lock);
-
- if (!hdmi_mode_has_audio()) {
- r = -EPERM;
- goto err;
- }
-
-
- r = hdmi.ip_data.ops->audio_enable(&hdmi.ip_data);
- if (r)
- goto err;
-
- mutex_unlock(&hdmi.lock);
- return 0;
-
-err:
- mutex_unlock(&hdmi.lock);
- return r;
-}
-
-static void hdmi_audio_disable(struct omap_dss_device *dssdev)
-{
- hdmi.ip_data.ops->audio_disable(&hdmi.ip_data);
-}
-
-static int hdmi_audio_start(struct omap_dss_device *dssdev)
-{
- return hdmi.ip_data.ops->audio_start(&hdmi.ip_data);
-}
-
-static void hdmi_audio_stop(struct omap_dss_device *dssdev)
-{
- hdmi.ip_data.ops->audio_stop(&hdmi.ip_data);
-}
-
-static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
-{
- bool r;
-
- mutex_lock(&hdmi.lock);
-
- r = hdmi_mode_has_audio();
-
- mutex_unlock(&hdmi.lock);
- return r;
-}
-
-static int hdmi_audio_config(struct omap_dss_device *dssdev,
- struct omap_dss_audio *audio)
-{
- int r;
-
- mutex_lock(&hdmi.lock);
-
- if (!hdmi_mode_has_audio()) {
- r = -EPERM;
- goto err;
- }
-
- r = hdmi.ip_data.ops->audio_config(&hdmi.ip_data, audio);
- if (r)
- goto err;
-
- mutex_unlock(&hdmi.lock);
- return 0;
-
-err:
- mutex_unlock(&hdmi.lock);
- return r;
-}
-#else
-static int hdmi_audio_enable(struct omap_dss_device *dssdev)
-{
- return -EPERM;
-}
-
-static void hdmi_audio_disable(struct omap_dss_device *dssdev)
-{
-}
-
-static int hdmi_audio_start(struct omap_dss_device *dssdev)
-{
- return -EPERM;
-}
-
-static void hdmi_audio_stop(struct omap_dss_device *dssdev)
-{
-}
-
-static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
-{
- return false;
-}
-
-static int hdmi_audio_config(struct omap_dss_device *dssdev,
- struct omap_dss_audio *audio)
-{
- return -EPERM;
-}
-#endif
-
-static const struct omapdss_hdmi_ops hdmi_ops = {
- .connect = hdmi_connect,
- .disconnect = hdmi_disconnect,
-
- .enable = hdmi_display_enable,
- .disable = hdmi_display_disable,
-
- .check_timings = hdmi_display_check_timing,
- .set_timings = hdmi_display_set_timing,
- .get_timings = hdmi_display_get_timings,
-
- .read_edid = hdmi_read_edid,
-
- .audio_enable = hdmi_audio_enable,
- .audio_disable = hdmi_audio_disable,
- .audio_start = hdmi_audio_start,
- .audio_stop = hdmi_audio_stop,
- .audio_supported = hdmi_audio_supported,
- .audio_config = hdmi_audio_config,
-};
-
-static void hdmi_init_output(struct platform_device *pdev)
-{
- struct omap_dss_device *out = &hdmi.output;
-
- out->dev = &pdev->dev;
- out->id = OMAP_DSS_OUTPUT_HDMI;
- out->output_type = OMAP_DISPLAY_TYPE_HDMI;
- out->name = "hdmi.0";
- out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->ops.hdmi = &hdmi_ops;
- out->owner = THIS_MODULE;
-
- omapdss_register_output(out);
-}
-
-static void __exit hdmi_uninit_output(struct platform_device *pdev)
-{
- struct omap_dss_device *out = &hdmi.output;
-
- omapdss_unregister_output(out);
-}
-
-/* HDMI HW IP initialisation */
-static int omapdss_hdmihw_probe(struct platform_device *pdev)
-{
- struct resource *res;
- int r;
-
- hdmi.pdev = pdev;
-
- mutex_init(&hdmi.lock);
- mutex_init(&hdmi.ip_data.lock);
-
- res = platform_get_resource(hdmi.pdev, IORESOURCE_MEM, 0);
-
- /* Base address taken from platform */
- hdmi.ip_data.base_wp = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(hdmi.ip_data.base_wp))
- return PTR_ERR(hdmi.ip_data.base_wp);
-
- hdmi.ip_data.irq = platform_get_irq(pdev, 0);
- if (hdmi.ip_data.irq < 0) {
- DSSERR("platform_get_irq failed\n");
- return -ENODEV;
- }
-
- r = hdmi_get_clocks(pdev);
- if (r) {
- DSSERR("can't get clocks\n");
- return r;
- }
-
- pm_runtime_enable(&pdev->dev);
-
- hdmi.ip_data.core_sys_offset = HDMI_CORE_SYS;
- hdmi.ip_data.core_av_offset = HDMI_CORE_AV;
- hdmi.ip_data.pll_offset = HDMI_PLLCTRL;
- hdmi.ip_data.phy_offset = HDMI_PHY;
-
- hdmi_init_output(pdev);
-
- dss_debugfs_create_file("hdmi", hdmi_dump_regs);
-
- return 0;
-}
-
-static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
-{
- hdmi_uninit_output(pdev);
-
- pm_runtime_disable(&pdev->dev);
-
- return 0;
-}
-
-static int hdmi_runtime_suspend(struct device *dev)
-{
- clk_disable_unprepare(hdmi.sys_clk);
-
- dispc_runtime_put();
-
- return 0;
-}
-
-static int hdmi_runtime_resume(struct device *dev)
-{
- int r;
-
- r = dispc_runtime_get();
- if (r < 0)
- return r;
-
- clk_prepare_enable(hdmi.sys_clk);
-
- return 0;
-}
-
-static const struct dev_pm_ops hdmi_pm_ops = {
- .runtime_suspend = hdmi_runtime_suspend,
- .runtime_resume = hdmi_runtime_resume,
-};
-
-static struct platform_driver omapdss_hdmihw_driver = {
- .probe = omapdss_hdmihw_probe,
- .remove = __exit_p(omapdss_hdmihw_remove),
- .driver = {
- .name = "omapdss_hdmi",
- .owner = THIS_MODULE,
- .pm = &hdmi_pm_ops,
- },
-};
-
-int __init hdmi_init_platform_driver(void)
-{
- return platform_driver_register(&omapdss_hdmihw_driver);
-}
-
-void __exit hdmi_uninit_platform_driver(void)
-{
- platform_driver_unregister(&omapdss_hdmihw_driver);
-}
diff --git a/drivers/video/omap2/dss/hdmi.h b/drivers/video/omap2/dss/hdmi.h
new file mode 100644
index 000000000000..b0493768a5d7
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi.h
@@ -0,0 +1,444 @@
+/*
+ * HDMI driver definition for TI OMAP4 Processor.
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _HDMI_H
+#define _HDMI_H
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <video/omapdss.h>
+
+#include "dss.h"
+
+/* HDMI Wrapper */
+
+#define HDMI_WP_REVISION 0x0
+#define HDMI_WP_SYSCONFIG 0x10
+#define HDMI_WP_IRQSTATUS_RAW 0x24
+#define HDMI_WP_IRQSTATUS 0x28
+#define HDMI_WP_IRQENABLE_SET 0x2C
+#define HDMI_WP_IRQENABLE_CLR 0x30
+#define HDMI_WP_IRQWAKEEN 0x34
+#define HDMI_WP_PWR_CTRL 0x40
+#define HDMI_WP_DEBOUNCE 0x44
+#define HDMI_WP_VIDEO_CFG 0x50
+#define HDMI_WP_VIDEO_SIZE 0x60
+#define HDMI_WP_VIDEO_TIMING_H 0x68
+#define HDMI_WP_VIDEO_TIMING_V 0x6C
+#define HDMI_WP_WP_CLK 0x70
+#define HDMI_WP_AUDIO_CFG 0x80
+#define HDMI_WP_AUDIO_CFG2 0x84
+#define HDMI_WP_AUDIO_CTRL 0x88
+#define HDMI_WP_AUDIO_DATA 0x8C
+
+/* HDMI WP IRQ flags */
+
+#define HDMI_IRQ_OCP_TIMEOUT (1 << 4)
+#define HDMI_IRQ_AUDIO_FIFO_UNDERFLOW (1 << 8)
+#define HDMI_IRQ_AUDIO_FIFO_OVERFLOW (1 << 9)
+#define HDMI_IRQ_AUDIO_FIFO_SAMPLE_REQ (1 << 10)
+#define HDMI_IRQ_VIDEO_VSYNC (1 << 16)
+#define HDMI_IRQ_VIDEO_FRAME_DONE (1 << 17)
+#define HDMI_IRQ_PHY_LINE5V_ASSERT (1 << 24)
+#define HDMI_IRQ_LINK_CONNECT (1 << 25)
+#define HDMI_IRQ_LINK_DISCONNECT (1 << 26)
+#define HDMI_IRQ_PLL_LOCK (1 << 29)
+#define HDMI_IRQ_PLL_UNLOCK (1 << 30)
+#define HDMI_IRQ_PLL_RECAL (1 << 31)
+
+/* HDMI PLL */
+
+#define PLLCTRL_PLL_CONTROL 0x0
+#define PLLCTRL_PLL_STATUS 0x4
+#define PLLCTRL_PLL_GO 0x8
+#define PLLCTRL_CFG1 0xC
+#define PLLCTRL_CFG2 0x10
+#define PLLCTRL_CFG3 0x14
+#define PLLCTRL_SSC_CFG1 0x18
+#define PLLCTRL_SSC_CFG2 0x1C
+#define PLLCTRL_CFG4 0x20
+
+/* HDMI PHY */
+
+#define HDMI_TXPHY_TX_CTRL 0x0
+#define HDMI_TXPHY_DIGITAL_CTRL 0x4
+#define HDMI_TXPHY_POWER_CTRL 0x8
+#define HDMI_TXPHY_PAD_CFG_CTRL 0xC
+
+enum hdmi_pll_pwr {
+ HDMI_PLLPWRCMD_ALLOFF = 0,
+ HDMI_PLLPWRCMD_PLLONLY = 1,
+ HDMI_PLLPWRCMD_BOTHON_ALLCLKS = 2,
+ HDMI_PLLPWRCMD_BOTHON_NOPHYCLK = 3
+};
+
+enum hdmi_phy_pwr {
+ HDMI_PHYPWRCMD_OFF = 0,
+ HDMI_PHYPWRCMD_LDOON = 1,
+ HDMI_PHYPWRCMD_TXON = 2
+};
+
+enum hdmi_core_hdmi_dvi {
+ HDMI_DVI = 0,
+ HDMI_HDMI = 1
+};
+
+enum hdmi_clk_refsel {
+ HDMI_REFSEL_PCLK = 0,
+ HDMI_REFSEL_REF1 = 1,
+ HDMI_REFSEL_REF2 = 2,
+ HDMI_REFSEL_SYSCLK = 3
+};
+
+enum hdmi_packing_mode {
+ HDMI_PACK_10b_RGB_YUV444 = 0,
+ HDMI_PACK_24b_RGB_YUV444_YUV422 = 1,
+ HDMI_PACK_20b_YUV422 = 2,
+ HDMI_PACK_ALREADYPACKED = 7
+};
+
+enum hdmi_stereo_channels {
+ HDMI_AUDIO_STEREO_NOCHANNELS = 0,
+ HDMI_AUDIO_STEREO_ONECHANNEL = 1,
+ HDMI_AUDIO_STEREO_TWOCHANNELS = 2,
+ HDMI_AUDIO_STEREO_THREECHANNELS = 3,
+ HDMI_AUDIO_STEREO_FOURCHANNELS = 4
+};
+
+enum hdmi_audio_type {
+ HDMI_AUDIO_TYPE_LPCM = 0,
+ HDMI_AUDIO_TYPE_IEC = 1
+};
+
+enum hdmi_audio_justify {
+ HDMI_AUDIO_JUSTIFY_LEFT = 0,
+ HDMI_AUDIO_JUSTIFY_RIGHT = 1
+};
+
+enum hdmi_audio_sample_order {
+ HDMI_AUDIO_SAMPLE_RIGHT_FIRST = 0,
+ HDMI_AUDIO_SAMPLE_LEFT_FIRST = 1
+};
+
+enum hdmi_audio_samples_perword {
+ HDMI_AUDIO_ONEWORD_ONESAMPLE = 0,
+ HDMI_AUDIO_ONEWORD_TWOSAMPLES = 1
+};
+
+enum hdmi_audio_sample_size {
+ HDMI_AUDIO_SAMPLE_16BITS = 0,
+ HDMI_AUDIO_SAMPLE_24BITS = 1
+};
+
+enum hdmi_audio_transf_mode {
+ HDMI_AUDIO_TRANSF_DMA = 0,
+ HDMI_AUDIO_TRANSF_IRQ = 1
+};
+
+enum hdmi_audio_blk_strt_end_sig {
+ HDMI_AUDIO_BLOCK_SIG_STARTEND_ON = 0,
+ HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF = 1
+};
+
+enum hdmi_core_audio_layout {
+ HDMI_AUDIO_LAYOUT_2CH = 0,
+ HDMI_AUDIO_LAYOUT_8CH = 1
+};
+
+enum hdmi_core_cts_mode {
+ HDMI_AUDIO_CTS_MODE_HW = 0,
+ HDMI_AUDIO_CTS_MODE_SW = 1
+};
+
+enum hdmi_audio_mclk_mode {
+ HDMI_AUDIO_MCLK_128FS = 0,
+ HDMI_AUDIO_MCLK_256FS = 1,
+ HDMI_AUDIO_MCLK_384FS = 2,
+ HDMI_AUDIO_MCLK_512FS = 3,
+ HDMI_AUDIO_MCLK_768FS = 4,
+ HDMI_AUDIO_MCLK_1024FS = 5,
+ HDMI_AUDIO_MCLK_1152FS = 6,
+ HDMI_AUDIO_MCLK_192FS = 7
+};
+
+/* INFOFRAME_AVI_ and INFOFRAME_AUDIO_ definitions */
+enum hdmi_core_infoframe {
+ HDMI_INFOFRAME_AVI_DB1Y_RGB = 0,
+ HDMI_INFOFRAME_AVI_DB1Y_YUV422 = 1,
+ HDMI_INFOFRAME_AVI_DB1Y_YUV444 = 2,
+ HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF = 0,
+ HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_ON = 1,
+ HDMI_INFOFRAME_AVI_DB1B_NO = 0,
+ HDMI_INFOFRAME_AVI_DB1B_VERT = 1,
+ HDMI_INFOFRAME_AVI_DB1B_HORI = 2,
+ HDMI_INFOFRAME_AVI_DB1B_VERTHORI = 3,
+ HDMI_INFOFRAME_AVI_DB1S_0 = 0,
+ HDMI_INFOFRAME_AVI_DB1S_1 = 1,
+ HDMI_INFOFRAME_AVI_DB1S_2 = 2,
+ HDMI_INFOFRAME_AVI_DB2C_NO = 0,
+ HDMI_INFOFRAME_AVI_DB2C_ITU601 = 1,
+ HDMI_INFOFRAME_AVI_DB2C_ITU709 = 2,
+ HDMI_INFOFRAME_AVI_DB2C_EC_EXTENDED = 3,
+ HDMI_INFOFRAME_AVI_DB2M_NO = 0,
+ HDMI_INFOFRAME_AVI_DB2M_43 = 1,
+ HDMI_INFOFRAME_AVI_DB2M_169 = 2,
+ HDMI_INFOFRAME_AVI_DB2R_SAME = 8,
+ HDMI_INFOFRAME_AVI_DB2R_43 = 9,
+ HDMI_INFOFRAME_AVI_DB2R_169 = 10,
+ HDMI_INFOFRAME_AVI_DB2R_149 = 11,
+ HDMI_INFOFRAME_AVI_DB3ITC_NO = 0,
+ HDMI_INFOFRAME_AVI_DB3ITC_YES = 1,
+ HDMI_INFOFRAME_AVI_DB3EC_XVYUV601 = 0,
+ HDMI_INFOFRAME_AVI_DB3EC_XVYUV709 = 1,
+ HDMI_INFOFRAME_AVI_DB3Q_DEFAULT = 0,
+ HDMI_INFOFRAME_AVI_DB3Q_LR = 1,
+ HDMI_INFOFRAME_AVI_DB3Q_FR = 2,
+ HDMI_INFOFRAME_AVI_DB3SC_NO = 0,
+ HDMI_INFOFRAME_AVI_DB3SC_HORI = 1,
+ HDMI_INFOFRAME_AVI_DB3SC_VERT = 2,
+ HDMI_INFOFRAME_AVI_DB3SC_HORIVERT = 3,
+ HDMI_INFOFRAME_AVI_DB5PR_NO = 0,
+ HDMI_INFOFRAME_AVI_DB5PR_2 = 1,
+ HDMI_INFOFRAME_AVI_DB5PR_3 = 2,
+ HDMI_INFOFRAME_AVI_DB5PR_4 = 3,
+ HDMI_INFOFRAME_AVI_DB5PR_5 = 4,
+ HDMI_INFOFRAME_AVI_DB5PR_6 = 5,
+ HDMI_INFOFRAME_AVI_DB5PR_7 = 6,
+ HDMI_INFOFRAME_AVI_DB5PR_8 = 7,
+ HDMI_INFOFRAME_AVI_DB5PR_9 = 8,
+ HDMI_INFOFRAME_AVI_DB5PR_10 = 9,
+};
+
+struct hdmi_cm {
+ int code;
+ int mode;
+};
+
+struct hdmi_video_format {
+ enum hdmi_packing_mode packing_mode;
+ u32 y_res; /* Line per panel */
+ u32 x_res; /* pixel per line */
+};
+
+struct hdmi_config {
+ struct omap_video_timings timings;
+ struct hdmi_cm cm;
+};
+
+/* HDMI PLL structure */
+struct hdmi_pll_info {
+ u16 regn;
+ u16 regm;
+ u32 regmf;
+ u16 regm2;
+ u16 regsd;
+ u16 dcofreq;
+ enum hdmi_clk_refsel refsel;
+};
+
+struct hdmi_audio_format {
+ enum hdmi_stereo_channels stereo_channels;
+ u8 active_chnnls_msk;
+ enum hdmi_audio_type type;
+ enum hdmi_audio_justify justification;
+ enum hdmi_audio_sample_order sample_order;
+ enum hdmi_audio_samples_perword samples_per_word;
+ enum hdmi_audio_sample_size sample_size;
+ enum hdmi_audio_blk_strt_end_sig en_sig_blk_strt_end;
+};
+
+struct hdmi_audio_dma {
+ u8 transfer_size;
+ u8 block_size;
+ enum hdmi_audio_transf_mode mode;
+ u16 fifo_threshold;
+};
+
+struct hdmi_core_audio_i2s_config {
+ u8 in_length_bits;
+ u8 justification;
+ u8 sck_edge_mode;
+ u8 vbit;
+ u8 direction;
+ u8 shift;
+ u8 active_sds;
+};
+
+struct hdmi_core_audio_config {
+ struct hdmi_core_audio_i2s_config i2s_cfg;
+ struct snd_aes_iec958 *iec60958_cfg;
+ bool fs_override;
+ u32 n;
+ u32 cts;
+ u32 aud_par_busclk;
+ enum hdmi_core_audio_layout layout;
+ enum hdmi_core_cts_mode cts_mode;
+ bool use_mclk;
+ enum hdmi_audio_mclk_mode mclk_mode;
+ bool en_acr_pkt;
+ bool en_dsd_audio;
+ bool en_parallel_aud_input;
+ bool en_spdif;
+};
+
+/*
+ * Refer to section 8.2 in HDMI 1.3 specification for
+ * details about infoframe databytes
+ */
+struct hdmi_core_infoframe_avi {
+ /* Y0, Y1 rgb,yCbCr */
+ u8 db1_format;
+ /* A0 Active information Present */
+ u8 db1_active_info;
+ /* B0, B1 Bar info data valid */
+ u8 db1_bar_info_dv;
+ /* S0, S1 scan information */
+ u8 db1_scan_info;
+ /* C0, C1 colorimetry */
+ u8 db2_colorimetry;
+ /* M0, M1 Aspect ratio (4:3, 16:9) */
+ u8 db2_aspect_ratio;
+ /* R0...R3 Active format aspect ratio */
+ u8 db2_active_fmt_ar;
+ /* ITC IT content. */
+ u8 db3_itc;
+ /* EC0, EC1, EC2 Extended colorimetry */
+ u8 db3_ec;
+ /* Q1, Q0 Quantization range */
+ u8 db3_q_range;
+ /* SC1, SC0 Non-uniform picture scaling */
+ u8 db3_nup_scaling;
+ /* VIC0..6 Video format identification */
+ u8 db4_videocode;
+ /* PR0..PR3 Pixel repetition factor */
+ u8 db5_pixel_repeat;
+ /* Line number end of top bar */
+ u16 db6_7_line_eoftop;
+ /* Line number start of bottom bar */
+ u16 db8_9_line_sofbottom;
+ /* Pixel number end of left bar */
+ u16 db10_11_pixel_eofleft;
+ /* Pixel number start of right bar */
+ u16 db12_13_pixel_sofright;
+};
+
+struct hdmi_wp_data {
+ void __iomem *base;
+};
+
+struct hdmi_pll_data {
+ void __iomem *base;
+
+ struct hdmi_pll_info info;
+};
+
+struct hdmi_phy_data {
+ void __iomem *base;
+
+ int irq;
+};
+
+struct hdmi_core_data {
+ void __iomem *base;
+
+ struct hdmi_core_infoframe_avi avi_cfg;
+};
+
+static inline void hdmi_write_reg(void __iomem *base_addr, const u16 idx,
+ u32 val)
+{
+ __raw_writel(val, base_addr + idx);
+}
+
+static inline u32 hdmi_read_reg(void __iomem *base_addr, const u16 idx)
+{
+ return __raw_readl(base_addr + idx);
+}
+
+#define REG_FLD_MOD(base, idx, val, start, end) \
+ hdmi_write_reg(base, idx, FLD_MOD(hdmi_read_reg(base, idx),\
+ val, start, end))
+#define REG_GET(base, idx, start, end) \
+ FLD_GET(hdmi_read_reg(base, idx), start, end)
+
+static inline int hdmi_wait_for_bit_change(void __iomem *base_addr,
+ const u16 idx, int b2, int b1, u32 val)
+{
+ u32 t = 0;
+ while (val != REG_GET(base_addr, idx, b2, b1)) {
+ udelay(1);
+ if (t++ > 10000)
+ return !val;
+ }
+ return val;
+}
+
+/* HDMI wrapper funcs */
+int hdmi_wp_video_start(struct hdmi_wp_data *wp);
+void hdmi_wp_video_stop(struct hdmi_wp_data *wp);
+void hdmi_wp_dump(struct hdmi_wp_data *wp, struct seq_file *s);
+u32 hdmi_wp_get_irqstatus(struct hdmi_wp_data *wp);
+void hdmi_wp_set_irqstatus(struct hdmi_wp_data *wp, u32 irqstatus);
+void hdmi_wp_set_irqenable(struct hdmi_wp_data *wp, u32 mask);
+void hdmi_wp_clear_irqenable(struct hdmi_wp_data *wp, u32 mask);
+int hdmi_wp_set_phy_pwr(struct hdmi_wp_data *wp, enum hdmi_phy_pwr val);
+int hdmi_wp_set_pll_pwr(struct hdmi_wp_data *wp, enum hdmi_pll_pwr val);
+void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
+ struct hdmi_video_format *video_fmt);
+void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
+ struct omap_video_timings *timings);
+void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
+ struct omap_video_timings *timings);
+void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
+ struct omap_video_timings *timings, struct hdmi_config *param);
+int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp);
+
+/* HDMI PLL funcs */
+int hdmi_pll_enable(struct hdmi_pll_data *pll, struct hdmi_wp_data *wp);
+void hdmi_pll_disable(struct hdmi_pll_data *pll, struct hdmi_wp_data *wp);
+void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s);
+void hdmi_pll_compute(struct hdmi_pll_data *pll, unsigned long clkin, int phy);
+int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll);
+
+/* HDMI PHY funcs */
+int hdmi_phy_enable(struct hdmi_phy_data *phy, struct hdmi_wp_data *wp,
+ struct hdmi_config *cfg);
+void hdmi_phy_disable(struct hdmi_phy_data *phy, struct hdmi_wp_data *wp);
+void hdmi_phy_dump(struct hdmi_phy_data *phy, struct seq_file *s);
+int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy);
+
+/* HDMI common funcs */
+const struct hdmi_config *hdmi_default_timing(void);
+const struct hdmi_config *hdmi_get_timings(int mode, int code);
+struct hdmi_cm hdmi_get_code(struct omap_video_timings *timing);
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts);
+int hdmi_wp_audio_enable(struct hdmi_wp_data *wp, bool enable);
+int hdmi_wp_audio_core_req_enable(struct hdmi_wp_data *wp, bool enable);
+void hdmi_wp_audio_config_format(struct hdmi_wp_data *wp,
+ struct hdmi_audio_format *aud_fmt);
+void hdmi_wp_audio_config_dma(struct hdmi_wp_data *wp,
+ struct hdmi_audio_dma *aud_dma);
+static inline bool hdmi_mode_has_audio(int mode)
+{
+ return mode == HDMI_HDMI ? true : false;
+}
+#endif
+#endif
diff --git a/drivers/video/omap2/dss/hdmi4.c b/drivers/video/omap2/dss/hdmi4.c
new file mode 100644
index 000000000000..e14009614338
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi4.c
@@ -0,0 +1,696 @@
+/*
+ * HDMI interface DSS driver for TI's OMAP4 family of SoCs.
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Authors: Yong Zhi
+ * Mythri pk <mythripk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "HDMI"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <video/omapdss.h>
+
+#include "hdmi4_core.h"
+#include "dss.h"
+#include "dss_features.h"
+
+static struct {
+ struct mutex lock;
+ struct platform_device *pdev;
+
+ struct hdmi_wp_data wp;
+ struct hdmi_pll_data pll;
+ struct hdmi_phy_data phy;
+ struct hdmi_core_data core;
+
+ struct hdmi_config cfg;
+
+ struct clk *sys_clk;
+ struct regulator *vdda_hdmi_dac_reg;
+
+ bool core_enabled;
+
+ struct omap_dss_device output;
+} hdmi;
+
+static int hdmi_runtime_get(void)
+{
+ int r;
+
+ DSSDBG("hdmi_runtime_get\n");
+
+ r = pm_runtime_get_sync(&hdmi.pdev->dev);
+ WARN_ON(r < 0);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+static void hdmi_runtime_put(void)
+{
+ int r;
+
+ DSSDBG("hdmi_runtime_put\n");
+
+ r = pm_runtime_put_sync(&hdmi.pdev->dev);
+ WARN_ON(r < 0 && r != -ENOSYS);
+}
+
+static int hdmi_init_regulator(void)
+{
+ struct regulator *reg;
+
+ if (hdmi.vdda_hdmi_dac_reg != NULL)
+ return 0;
+
+ reg = devm_regulator_get(&hdmi.pdev->dev, "vdda_hdmi_dac");
+
+ /* DT HACK: try VDAC to make omapdss work for o4 sdp/panda */
+ if (IS_ERR(reg))
+ reg = devm_regulator_get(&hdmi.pdev->dev, "VDAC");
+
+ if (IS_ERR(reg)) {
+ DSSERR("can't get VDDA_HDMI_DAC regulator\n");
+ return PTR_ERR(reg);
+ }
+
+ hdmi.vdda_hdmi_dac_reg = reg;
+
+ return 0;
+}
+
+static int hdmi_power_on_core(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ r = regulator_enable(hdmi.vdda_hdmi_dac_reg);
+ if (r)
+ return r;
+
+ r = hdmi_runtime_get();
+ if (r)
+ goto err_runtime_get;
+
+ /* Make selection of HDMI in DSS */
+ dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
+
+ hdmi.core_enabled = true;
+
+ return 0;
+
+err_runtime_get:
+ regulator_disable(hdmi.vdda_hdmi_dac_reg);
+
+ return r;
+}
+
+static void hdmi_power_off_core(struct omap_dss_device *dssdev)
+{
+ hdmi.core_enabled = false;
+
+ hdmi_runtime_put();
+ regulator_disable(hdmi.vdda_hdmi_dac_reg);
+}
+
+static int hdmi_power_on_full(struct omap_dss_device *dssdev)
+{
+ int r;
+ struct omap_video_timings *p;
+ struct omap_overlay_manager *mgr = hdmi.output.manager;
+ unsigned long phy;
+
+ r = hdmi_power_on_core(dssdev);
+ if (r)
+ return r;
+
+ dss_mgr_disable(mgr);
+
+ p = &hdmi.cfg.timings;
+
+ DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
+
+ phy = p->pixel_clock;
+
+ hdmi_pll_compute(&hdmi.pll, clk_get_rate(hdmi.sys_clk), phy);
+
+ hdmi_wp_video_stop(&hdmi.wp);
+
+ /* config the PLL and PHY hdmi_set_pll_pwrfirst */
+ r = hdmi_pll_enable(&hdmi.pll, &hdmi.wp);
+ if (r) {
+ DSSDBG("Failed to lock PLL\n");
+ goto err_pll_enable;
+ }
+
+ r = hdmi_phy_enable(&hdmi.phy, &hdmi.wp, &hdmi.cfg);
+ if (r) {
+ DSSDBG("Failed to start PHY\n");
+ goto err_phy_enable;
+ }
+
+ hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
+
+ /* bypass TV gamma table */
+ dispc_enable_gamma_table(0);
+
+ /* tv size */
+ dss_mgr_set_timings(mgr, p);
+
+ r = hdmi_wp_video_start(&hdmi.wp);
+ if (r)
+ goto err_vid_enable;
+
+ r = dss_mgr_enable(mgr);
+ if (r)
+ goto err_mgr_enable;
+
+ return 0;
+
+err_mgr_enable:
+ hdmi_wp_video_stop(&hdmi.wp);
+err_vid_enable:
+ hdmi_phy_disable(&hdmi.phy, &hdmi.wp);
+err_phy_enable:
+ hdmi_pll_disable(&hdmi.pll, &hdmi.wp);
+err_pll_enable:
+ hdmi_power_off_core(dssdev);
+ return -EIO;
+}
+
+static void hdmi_power_off_full(struct omap_dss_device *dssdev)
+{
+ struct omap_overlay_manager *mgr = hdmi.output.manager;
+
+ dss_mgr_disable(mgr);
+
+ hdmi_wp_video_stop(&hdmi.wp);
+ hdmi_phy_disable(&hdmi.phy, &hdmi.wp);
+ hdmi_pll_disable(&hdmi.pll, &hdmi.wp);
+
+ hdmi_power_off_core(dssdev);
+}
+
+static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct hdmi_cm cm;
+
+ cm = hdmi_get_code(timings);
+ if (cm.code == -1)
+ return -EINVAL;
+
+ return 0;
+
+}
+
+static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct hdmi_cm cm;
+ const struct hdmi_config *t;
+
+ mutex_lock(&hdmi.lock);
+
+ cm = hdmi_get_code(timings);
+ hdmi.cfg.cm = cm;
+
+ t = hdmi_get_timings(cm.mode, cm.code);
+ if (t != NULL) {
+ hdmi.cfg = *t;
+
+ dispc_set_tv_pclk(t->timings.pixel_clock * 1000);
+ }
+
+ mutex_unlock(&hdmi.lock);
+}
+
+static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ const struct hdmi_config *cfg;
+ struct hdmi_cm cm = hdmi.cfg.cm;
+
+ cfg = hdmi_get_timings(cm.mode, cm.code);
+ if (cfg == NULL)
+ cfg = hdmi_default_timing();
+
+ memcpy(timings, &cfg->timings, sizeof(cfg->timings));
+}
+
+static void hdmi_dump_regs(struct seq_file *s)
+{
+ mutex_lock(&hdmi.lock);
+
+ if (hdmi_runtime_get()) {
+ mutex_unlock(&hdmi.lock);
+ return;
+ }
+
+ hdmi_wp_dump(&hdmi.wp, s);
+ hdmi_pll_dump(&hdmi.pll, s);
+ hdmi_phy_dump(&hdmi.phy, s);
+ hdmi4_core_dump(&hdmi.core, s);
+
+ hdmi_runtime_put();
+ mutex_unlock(&hdmi.lock);
+}
+
+static int read_edid(u8 *buf, int len)
+{
+ int r;
+
+ mutex_lock(&hdmi.lock);
+
+ r = hdmi_runtime_get();
+ BUG_ON(r);
+
+ r = hdmi4_read_edid(&hdmi.core, buf, len);
+
+ hdmi_runtime_put();
+ mutex_unlock(&hdmi.lock);
+
+ return r;
+}
+
+static int hdmi_display_enable(struct omap_dss_device *dssdev)
+{
+ struct omap_dss_device *out = &hdmi.output;
+ int r = 0;
+
+ DSSDBG("ENTER hdmi_display_enable\n");
+
+ mutex_lock(&hdmi.lock);
+
+ if (out == NULL || out->manager == NULL) {
+ DSSERR("failed to enable display: no output/manager\n");
+ r = -ENODEV;
+ goto err0;
+ }
+
+ r = hdmi_power_on_full(dssdev);
+ if (r) {
+ DSSERR("failed to power on device\n");
+ goto err0;
+ }
+
+ mutex_unlock(&hdmi.lock);
+ return 0;
+
+err0:
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+
+static void hdmi_display_disable(struct omap_dss_device *dssdev)
+{
+ DSSDBG("Enter hdmi_display_disable\n");
+
+ mutex_lock(&hdmi.lock);
+
+ hdmi_power_off_full(dssdev);
+
+ mutex_unlock(&hdmi.lock);
+}
+
+static int hdmi_core_enable(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ DSSDBG("ENTER omapdss_hdmi_core_enable\n");
+
+ mutex_lock(&hdmi.lock);
+
+ r = hdmi_power_on_core(dssdev);
+ if (r) {
+ DSSERR("failed to power on device\n");
+ goto err0;
+ }
+
+ mutex_unlock(&hdmi.lock);
+ return 0;
+
+err0:
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+
+static void hdmi_core_disable(struct omap_dss_device *dssdev)
+{
+ DSSDBG("Enter omapdss_hdmi_core_disable\n");
+
+ mutex_lock(&hdmi.lock);
+
+ hdmi_power_off_core(dssdev);
+
+ mutex_unlock(&hdmi.lock);
+}
+
+static int hdmi_get_clocks(struct platform_device *pdev)
+{
+ struct clk *clk;
+
+ clk = devm_clk_get(&pdev->dev, "sys_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get sys_clk\n");
+ return PTR_ERR(clk);
+ }
+
+ hdmi.sys_clk = clk;
+
+ return 0;
+}
+
+static int hdmi_connect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
+{
+ struct omap_overlay_manager *mgr;
+ int r;
+
+ r = hdmi_init_regulator();
+ if (r)
+ return r;
+
+ mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
+ if (!mgr)
+ return -ENODEV;
+
+ r = dss_mgr_connect(mgr, dssdev);
+ if (r)
+ return r;
+
+ r = omapdss_output_set_device(dssdev, dst);
+ if (r) {
+ DSSERR("failed to connect output to new device: %s\n",
+ dst->name);
+ dss_mgr_disconnect(mgr, dssdev);
+ return r;
+ }
+
+ return 0;
+}
+
+static void hdmi_disconnect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
+{
+ WARN_ON(dst != dssdev->dst);
+
+ if (dst != dssdev->dst)
+ return;
+
+ omapdss_output_unset_device(dssdev);
+
+ if (dssdev->manager)
+ dss_mgr_disconnect(dssdev->manager, dssdev);
+}
+
+static int hdmi_read_edid(struct omap_dss_device *dssdev,
+ u8 *edid, int len)
+{
+ bool need_enable;
+ int r;
+
+ need_enable = hdmi.core_enabled == false;
+
+ if (need_enable) {
+ r = hdmi_core_enable(dssdev);
+ if (r)
+ return r;
+ }
+
+ r = read_edid(edid, len);
+
+ if (need_enable)
+ hdmi_core_disable(dssdev);
+
+ return r;
+}
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+static int hdmi_audio_enable(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ mutex_lock(&hdmi.lock);
+
+ if (!hdmi_mode_has_audio(hdmi.cfg.cm.mode)) {
+ r = -EPERM;
+ goto err;
+ }
+
+ r = hdmi_wp_audio_enable(&hdmi.wp, true);
+ if (r)
+ goto err;
+
+ mutex_unlock(&hdmi.lock);
+ return 0;
+
+err:
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+
+static void hdmi_audio_disable(struct omap_dss_device *dssdev)
+{
+ hdmi_wp_audio_enable(&hdmi.wp, false);
+}
+
+static int hdmi_audio_start(struct omap_dss_device *dssdev)
+{
+ return hdmi4_audio_start(&hdmi.core, &hdmi.wp);
+}
+
+static void hdmi_audio_stop(struct omap_dss_device *dssdev)
+{
+ hdmi4_audio_stop(&hdmi.core, &hdmi.wp);
+}
+
+static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
+{
+ bool r;
+
+ mutex_lock(&hdmi.lock);
+
+ r = hdmi_mode_has_audio(hdmi.cfg.cm.mode);
+
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+
+static int hdmi_audio_config(struct omap_dss_device *dssdev,
+ struct omap_dss_audio *audio)
+{
+ int r;
+ u32 pclk = hdmi.cfg.timings.pixel_clock;
+
+ mutex_lock(&hdmi.lock);
+
+ if (!hdmi_mode_has_audio(hdmi.cfg.cm.mode)) {
+ r = -EPERM;
+ goto err;
+ }
+
+ r = hdmi4_audio_config(&hdmi.core, &hdmi.wp, audio, pclk);
+ if (r)
+ goto err;
+
+ mutex_unlock(&hdmi.lock);
+ return 0;
+
+err:
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+#else
+static int hdmi_audio_enable(struct omap_dss_device *dssdev)
+{
+ return -EPERM;
+}
+
+static void hdmi_audio_disable(struct omap_dss_device *dssdev)
+{
+}
+
+static int hdmi_audio_start(struct omap_dss_device *dssdev)
+{
+ return -EPERM;
+}
+
+static void hdmi_audio_stop(struct omap_dss_device *dssdev)
+{
+}
+
+static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
+{
+ return false;
+}
+
+static int hdmi_audio_config(struct omap_dss_device *dssdev,
+ struct omap_dss_audio *audio)
+{
+ return -EPERM;
+}
+#endif
+
+static const struct omapdss_hdmi_ops hdmi_ops = {
+ .connect = hdmi_connect,
+ .disconnect = hdmi_disconnect,
+
+ .enable = hdmi_display_enable,
+ .disable = hdmi_display_disable,
+
+ .check_timings = hdmi_display_check_timing,
+ .set_timings = hdmi_display_set_timing,
+ .get_timings = hdmi_display_get_timings,
+
+ .read_edid = hdmi_read_edid,
+
+ .audio_enable = hdmi_audio_enable,
+ .audio_disable = hdmi_audio_disable,
+ .audio_start = hdmi_audio_start,
+ .audio_stop = hdmi_audio_stop,
+ .audio_supported = hdmi_audio_supported,
+ .audio_config = hdmi_audio_config,
+};
+
+static void hdmi_init_output(struct platform_device *pdev)
+{
+ struct omap_dss_device *out = &hdmi.output;
+
+ out->dev = &pdev->dev;
+ out->id = OMAP_DSS_OUTPUT_HDMI;
+ out->output_type = OMAP_DISPLAY_TYPE_HDMI;
+ out->name = "hdmi.0";
+ out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
+ out->ops.hdmi = &hdmi_ops;
+ out->owner = THIS_MODULE;
+
+ omapdss_register_output(out);
+}
+
+static void __exit hdmi_uninit_output(struct platform_device *pdev)
+{
+ struct omap_dss_device *out = &hdmi.output;
+
+ omapdss_unregister_output(out);
+}
+
+/* HDMI HW IP initialisation */
+static int omapdss_hdmihw_probe(struct platform_device *pdev)
+{
+ int r;
+
+ hdmi.pdev = pdev;
+
+ mutex_init(&hdmi.lock);
+
+ r = hdmi_wp_init(pdev, &hdmi.wp);
+ if (r)
+ return r;
+
+ r = hdmi_pll_init(pdev, &hdmi.pll);
+ if (r)
+ return r;
+
+ r = hdmi_phy_init(pdev, &hdmi.phy);
+ if (r)
+ return r;
+
+ r = hdmi4_core_init(pdev, &hdmi.core);
+ if (r)
+ return r;
+
+ r = hdmi_get_clocks(pdev);
+ if (r) {
+ DSSERR("can't get clocks\n");
+ return r;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ hdmi_init_output(pdev);
+
+ dss_debugfs_create_file("hdmi", hdmi_dump_regs);
+
+ return 0;
+}
+
+static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
+{
+ hdmi_uninit_output(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int hdmi_runtime_suspend(struct device *dev)
+{
+ clk_disable_unprepare(hdmi.sys_clk);
+
+ dispc_runtime_put();
+
+ return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+ int r;
+
+ r = dispc_runtime_get();
+ if (r < 0)
+ return r;
+
+ clk_prepare_enable(hdmi.sys_clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+ .runtime_suspend = hdmi_runtime_suspend,
+ .runtime_resume = hdmi_runtime_resume,
+};
+
+static struct platform_driver omapdss_hdmihw_driver = {
+ .probe = omapdss_hdmihw_probe,
+ .remove = __exit_p(omapdss_hdmihw_remove),
+ .driver = {
+ .name = "omapdss_hdmi",
+ .owner = THIS_MODULE,
+ .pm = &hdmi_pm_ops,
+ },
+};
+
+int __init hdmi4_init_platform_driver(void)
+{
+ return platform_driver_register(&omapdss_hdmihw_driver);
+}
+
+void __exit hdmi4_uninit_platform_driver(void)
+{
+ platform_driver_unregister(&omapdss_hdmihw_driver);
+}
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/hdmi4_core.c
index 3dfe00956a4f..5dd5e5489b41 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
+++ b/drivers/video/omap2/dss/hdmi4_core.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/delay.h>
+#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/seq_file.h>
#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
@@ -33,304 +34,19 @@
#include <sound/asoundef.h>
#endif
-#include "ti_hdmi_4xxx_ip.h"
-#include "dss.h"
+#include "hdmi4_core.h"
#include "dss_features.h"
-#define HDMI_IRQ_LINK_CONNECT (1 << 25)
-#define HDMI_IRQ_LINK_DISCONNECT (1 << 26)
+#define HDMI_CORE_AV 0x500
-static inline void hdmi_write_reg(void __iomem *base_addr,
- const u16 idx, u32 val)
+static inline void __iomem *hdmi_av_base(struct hdmi_core_data *core)
{
- __raw_writel(val, base_addr + idx);
+ return core->base + HDMI_CORE_AV;
}
-static inline u32 hdmi_read_reg(void __iomem *base_addr,
- const u16 idx)
+static int hdmi_core_ddc_init(struct hdmi_core_data *core)
{
- return __raw_readl(base_addr + idx);
-}
-
-static inline void __iomem *hdmi_wp_base(struct hdmi_ip_data *ip_data)
-{
- return ip_data->base_wp;
-}
-
-static inline void __iomem *hdmi_phy_base(struct hdmi_ip_data *ip_data)
-{
- return ip_data->base_wp + ip_data->phy_offset;
-}
-
-static inline void __iomem *hdmi_pll_base(struct hdmi_ip_data *ip_data)
-{
- return ip_data->base_wp + ip_data->pll_offset;
-}
-
-static inline void __iomem *hdmi_av_base(struct hdmi_ip_data *ip_data)
-{
- return ip_data->base_wp + ip_data->core_av_offset;
-}
-
-static inline void __iomem *hdmi_core_sys_base(struct hdmi_ip_data *ip_data)
-{
- return ip_data->base_wp + ip_data->core_sys_offset;
-}
-
-static inline int hdmi_wait_for_bit_change(void __iomem *base_addr,
- const u16 idx,
- int b2, int b1, u32 val)
-{
- u32 t = 0;
- while (val != REG_GET(base_addr, idx, b2, b1)) {
- udelay(1);
- if (t++ > 10000)
- return !val;
- }
- return val;
-}
-
-static int hdmi_pll_init(struct hdmi_ip_data *ip_data)
-{
- u32 r;
- void __iomem *pll_base = hdmi_pll_base(ip_data);
- struct hdmi_pll_info *fmt = &ip_data->pll_data;
-
- /* PLL start always use manual mode */
- REG_FLD_MOD(pll_base, PLLCTRL_PLL_CONTROL, 0x0, 0, 0);
-
- r = hdmi_read_reg(pll_base, PLLCTRL_CFG1);
- r = FLD_MOD(r, fmt->regm, 20, 9); /* CFG1_PLL_REGM */
- r = FLD_MOD(r, fmt->regn - 1, 8, 1); /* CFG1_PLL_REGN */
-
- hdmi_write_reg(pll_base, PLLCTRL_CFG1, r);
-
- r = hdmi_read_reg(pll_base, PLLCTRL_CFG2);
-
- r = FLD_MOD(r, 0x0, 12, 12); /* PLL_HIGHFREQ divide by 2 */
- r = FLD_MOD(r, 0x1, 13, 13); /* PLL_REFEN */
- r = FLD_MOD(r, 0x0, 14, 14); /* PHY_CLKINEN de-assert during locking */
- r = FLD_MOD(r, fmt->refsel, 22, 21); /* REFSEL */
-
- if (fmt->dcofreq) {
- /* divider programming for frequency beyond 1000Mhz */
- REG_FLD_MOD(pll_base, PLLCTRL_CFG3, fmt->regsd, 17, 10);
- r = FLD_MOD(r, 0x4, 3, 1); /* 1000MHz and 2000MHz */
- } else {
- r = FLD_MOD(r, 0x2, 3, 1); /* 500MHz and 1000MHz */
- }
-
- hdmi_write_reg(pll_base, PLLCTRL_CFG2, r);
-
- r = hdmi_read_reg(pll_base, PLLCTRL_CFG4);
- r = FLD_MOD(r, fmt->regm2, 24, 18);
- r = FLD_MOD(r, fmt->regmf, 17, 0);
-
- hdmi_write_reg(pll_base, PLLCTRL_CFG4, r);
-
- /* go now */
- REG_FLD_MOD(pll_base, PLLCTRL_PLL_GO, 0x1, 0, 0);
-
- /* wait for bit change */
- if (hdmi_wait_for_bit_change(pll_base, PLLCTRL_PLL_GO,
- 0, 0, 1) != 1) {
- pr_err("PLL GO bit not set\n");
- return -ETIMEDOUT;
- }
-
- /* Wait till the lock bit is set in PLL status */
- if (hdmi_wait_for_bit_change(pll_base,
- PLLCTRL_PLL_STATUS, 1, 1, 1) != 1) {
- pr_err("cannot lock PLL\n");
- pr_err("CFG1 0x%x\n",
- hdmi_read_reg(pll_base, PLLCTRL_CFG1));
- pr_err("CFG2 0x%x\n",
- hdmi_read_reg(pll_base, PLLCTRL_CFG2));
- pr_err("CFG4 0x%x\n",
- hdmi_read_reg(pll_base, PLLCTRL_CFG4));
- return -ETIMEDOUT;
- }
-
- pr_debug("PLL locked!\n");
-
- return 0;
-}
-
-/* PHY_PWR_CMD */
-static int hdmi_set_phy_pwr(struct hdmi_ip_data *ip_data, enum hdmi_phy_pwr val)
-{
- /* Return if already the state */
- if (REG_GET(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL, 5, 4) == val)
- return 0;
-
- /* Command for power control of HDMI PHY */
- REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL, val, 7, 6);
-
- /* Status of the power control of HDMI PHY */
- if (hdmi_wait_for_bit_change(hdmi_wp_base(ip_data),
- HDMI_WP_PWR_CTRL, 5, 4, val) != val) {
- pr_err("Failed to set PHY power mode to %d\n", val);
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-/* PLL_PWR_CMD */
-static int hdmi_set_pll_pwr(struct hdmi_ip_data *ip_data, enum hdmi_pll_pwr val)
-{
- /* Command for power control of HDMI PLL */
- REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL, val, 3, 2);
-
- /* wait till PHY_PWR_STATUS is set */
- if (hdmi_wait_for_bit_change(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL,
- 1, 0, val) != val) {
- pr_err("Failed to set PLL_PWR_STATUS\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int hdmi_pll_reset(struct hdmi_ip_data *ip_data)
-{
- /* SYSRESET controlled by power FSM */
- REG_FLD_MOD(hdmi_pll_base(ip_data), PLLCTRL_PLL_CONTROL, 0x0, 3, 3);
-
- /* READ 0x0 reset is in progress */
- if (hdmi_wait_for_bit_change(hdmi_pll_base(ip_data),
- PLLCTRL_PLL_STATUS, 0, 0, 1) != 1) {
- pr_err("Failed to sysreset PLL\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-int ti_hdmi_4xxx_pll_enable(struct hdmi_ip_data *ip_data)
-{
- u16 r = 0;
-
- r = hdmi_set_pll_pwr(ip_data, HDMI_PLLPWRCMD_ALLOFF);
- if (r)
- return r;
-
- r = hdmi_set_pll_pwr(ip_data, HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
- if (r)
- return r;
-
- r = hdmi_pll_reset(ip_data);
- if (r)
- return r;
-
- r = hdmi_pll_init(ip_data);
- if (r)
- return r;
-
- return 0;
-}
-
-void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data)
-{
- hdmi_set_pll_pwr(ip_data, HDMI_PLLPWRCMD_ALLOFF);
-}
-
-static irqreturn_t hdmi_irq_handler(int irq, void *data)
-{
- struct hdmi_ip_data *ip_data = data;
- void __iomem *wp_base = hdmi_wp_base(ip_data);
- u32 irqstatus;
-
- irqstatus = hdmi_read_reg(wp_base, HDMI_WP_IRQSTATUS);
- hdmi_write_reg(wp_base, HDMI_WP_IRQSTATUS, irqstatus);
- /* flush posted write */
- hdmi_read_reg(wp_base, HDMI_WP_IRQSTATUS);
-
- if ((irqstatus & HDMI_IRQ_LINK_CONNECT) &&
- irqstatus & HDMI_IRQ_LINK_DISCONNECT) {
- /*
- * If we get both connect and disconnect interrupts at the same
- * time, turn off the PHY, clear interrupts, and restart, which
- * raises connect interrupt if a cable is connected, or nothing
- * if cable is not connected.
- */
- hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
-
- hdmi_write_reg(wp_base, HDMI_WP_IRQSTATUS,
- HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
- /* flush posted write */
- hdmi_read_reg(wp_base, HDMI_WP_IRQSTATUS);
-
- hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_LDOON);
- } else if (irqstatus & HDMI_IRQ_LINK_CONNECT) {
- hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_TXON);
- } else if (irqstatus & HDMI_IRQ_LINK_DISCONNECT) {
- hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_LDOON);
- }
-
- return IRQ_HANDLED;
-}
-
-int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
-{
- u16 r = 0;
- void __iomem *phy_base = hdmi_phy_base(ip_data);
-
- hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_IRQENABLE_CLR,
- 0xffffffff);
-
- hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_IRQSTATUS,
- HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
-
- r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_LDOON);
- if (r)
- return r;
-
- /*
- * Read address 0 in order to get the SCP reset done completed
- * Dummy access performed to make sure reset is done
- */
- hdmi_read_reg(phy_base, HDMI_TXPHY_TX_CTRL);
-
- /*
- * Write to phy address 0 to configure the clock
- * use HFBITCLK write HDMI_TXPHY_TX_CONTROL_FREQOUT field
- */
- REG_FLD_MOD(phy_base, HDMI_TXPHY_TX_CTRL, 0x1, 31, 30);
-
- /* Write to phy address 1 to start HDMI line (TXVALID and TMDSCLKEN) */
- hdmi_write_reg(phy_base, HDMI_TXPHY_DIGITAL_CTRL, 0xF0000000);
-
- /* Setup max LDO voltage */
- REG_FLD_MOD(phy_base, HDMI_TXPHY_POWER_CTRL, 0xB, 3, 0);
-
- /* Write to phy address 3 to change the polarity control */
- REG_FLD_MOD(phy_base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
-
- r = request_threaded_irq(ip_data->irq, NULL, hdmi_irq_handler,
- IRQF_ONESHOT, "OMAP HDMI", ip_data);
- if (r) {
- DSSERR("HDMI IRQ request failed\n");
- hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
- return r;
- }
-
- hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_IRQENABLE_SET,
- HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
-
- return 0;
-}
-
-void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data)
-{
- free_irq(ip_data->irq, ip_data);
-
- hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
-}
-
-static int hdmi_core_ddc_init(struct hdmi_ip_data *ip_data)
-{
- void __iomem *base = hdmi_core_sys_base(ip_data);
+ void __iomem *base = core->base;
/* Turn on CLK for DDC */
REG_FLD_MOD(base, HDMI_CORE_AV_DPD, 0x7, 2, 0);
@@ -370,10 +86,10 @@ static int hdmi_core_ddc_init(struct hdmi_ip_data *ip_data)
return 0;
}
-static int hdmi_core_ddc_edid(struct hdmi_ip_data *ip_data,
+static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
u8 *pedid, int ext)
{
- void __iomem *base = hdmi_core_sys_base(ip_data);
+ void __iomem *base = core->base;
u32 i;
char checksum;
u32 offset = 0;
@@ -452,26 +168,25 @@ static int hdmi_core_ddc_edid(struct hdmi_ip_data *ip_data,
return 0;
}
-int ti_hdmi_4xxx_read_edid(struct hdmi_ip_data *ip_data,
- u8 *edid, int len)
+int hdmi4_read_edid(struct hdmi_core_data *core, u8 *edid, int len)
{
int r, l;
if (len < 128)
return -EINVAL;
- r = hdmi_core_ddc_init(ip_data);
+ r = hdmi_core_ddc_init(core);
if (r)
return r;
- r = hdmi_core_ddc_edid(ip_data, edid, 0);
+ r = hdmi_core_ddc_edid(core, edid, 0);
if (r)
return r;
l = 128;
if (len >= 128 * 2 && edid[0x7e] > 0) {
- r = hdmi_core_ddc_edid(ip_data, edid + 0x80, 1);
+ r = hdmi_core_ddc_edid(core, edid + 0x80, 1);
if (r)
return r;
l += 128;
@@ -508,7 +223,7 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg,
avi_cfg->db3_nup_scaling = 0;
avi_cfg->db4_videocode = 0;
avi_cfg->db5_pixel_repeat = 0;
- avi_cfg->db6_7_line_eoftop = 0 ;
+ avi_cfg->db6_7_line_eoftop = 0;
avi_cfg->db8_9_line_sofbottom = 0;
avi_cfg->db10_11_pixel_eofleft = 0;
avi_cfg->db12_13_pixel_sofright = 0;
@@ -524,38 +239,39 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg,
repeat_cfg->generic_pkt_repeat = 0;
}
-static void hdmi_core_powerdown_disable(struct hdmi_ip_data *ip_data)
+static void hdmi_core_powerdown_disable(struct hdmi_core_data *core)
{
pr_debug("Enter hdmi_core_powerdown_disable\n");
- REG_FLD_MOD(hdmi_core_sys_base(ip_data), HDMI_CORE_CTRL1, 0x0, 0, 0);
+ REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x0, 0, 0);
}
-static void hdmi_core_swreset_release(struct hdmi_ip_data *ip_data)
+static void hdmi_core_swreset_release(struct hdmi_core_data *core)
{
pr_debug("Enter hdmi_core_swreset_release\n");
- REG_FLD_MOD(hdmi_core_sys_base(ip_data), HDMI_CORE_SYS_SRST, 0x0, 0, 0);
+ REG_FLD_MOD(core->base, HDMI_CORE_SYS_SRST, 0x0, 0, 0);
}
-static void hdmi_core_swreset_assert(struct hdmi_ip_data *ip_data)
+static void hdmi_core_swreset_assert(struct hdmi_core_data *core)
{
pr_debug("Enter hdmi_core_swreset_assert\n");
- REG_FLD_MOD(hdmi_core_sys_base(ip_data), HDMI_CORE_SYS_SRST, 0x1, 0, 0);
+ REG_FLD_MOD(core->base, HDMI_CORE_SYS_SRST, 0x1, 0, 0);
}
/* HDMI_CORE_VIDEO_CONFIG */
-static void hdmi_core_video_config(struct hdmi_ip_data *ip_data,
+static void hdmi_core_video_config(struct hdmi_core_data *core,
struct hdmi_core_video_config *cfg)
{
u32 r = 0;
- void __iomem *core_sys_base = hdmi_core_sys_base(ip_data);
+ void __iomem *core_sys_base = core->base;
+ void __iomem *core_av_base = hdmi_av_base(core);
/* sys_ctrl1 default configuration not tunable */
- r = hdmi_read_reg(core_sys_base, HDMI_CORE_CTRL1);
- r = FLD_MOD(r, HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC, 5, 5);
- r = FLD_MOD(r, HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC, 4, 4);
- r = FLD_MOD(r, HDMI_CORE_CTRL1_BSEL_24BITBUS, 2, 2);
- r = FLD_MOD(r, HDMI_CORE_CTRL1_EDGE_RISINGEDGE, 1, 1);
- hdmi_write_reg(core_sys_base, HDMI_CORE_CTRL1, r);
+ r = hdmi_read_reg(core_sys_base, HDMI_CORE_SYS_SYS_CTRL1);
+ r = FLD_MOD(r, HDMI_CORE_SYS_SYS_CTRL1_VEN_FOLLOWVSYNC, 5, 5);
+ r = FLD_MOD(r, HDMI_CORE_SYS_SYS_CTRL1_HEN_FOLLOWHSYNC, 4, 4);
+ r = FLD_MOD(r, HDMI_CORE_SYS_SYS_CTRL1_BSEL_24BITBUS, 2, 2);
+ r = FLD_MOD(r, HDMI_CORE_SYS_SYS_CTRL1_EDGE_RISINGEDGE, 1, 1);
+ hdmi_write_reg(core_sys_base, HDMI_CORE_SYS_SYS_CTRL1, r);
REG_FLD_MOD(core_sys_base,
HDMI_CORE_SYS_VID_ACEN, cfg->ip_bus_width, 7, 6);
@@ -574,23 +290,23 @@ static void hdmi_core_video_config(struct hdmi_ip_data *ip_data,
hdmi_write_reg(core_sys_base, HDMI_CORE_SYS_VID_MODE, r);
/* HDMI_Ctrl */
- r = hdmi_read_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_HDMI_CTRL);
+ r = hdmi_read_reg(core_av_base, HDMI_CORE_AV_HDMI_CTRL);
r = FLD_MOD(r, cfg->deep_color_pkt, 6, 6);
r = FLD_MOD(r, cfg->pkt_mode, 5, 3);
r = FLD_MOD(r, cfg->hdmi_dvi, 0, 0);
- hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_HDMI_CTRL, r);
+ hdmi_write_reg(core_av_base, HDMI_CORE_AV_HDMI_CTRL, r);
/* TMDS_CTRL */
REG_FLD_MOD(core_sys_base,
HDMI_CORE_SYS_TMDS_CTRL, cfg->tclk_sel_clkmult, 6, 5);
}
-static void hdmi_core_aux_infoframe_avi_config(struct hdmi_ip_data *ip_data)
+static void hdmi_core_aux_infoframe_avi_config(struct hdmi_core_data *core)
{
u32 val;
char sum = 0, checksum = 0;
- void __iomem *av_base = hdmi_av_base(ip_data);
- struct hdmi_core_infoframe_avi info_avi = ip_data->avi_cfg;
+ void __iomem *av_base = hdmi_av_base(core);
+ struct hdmi_core_infoframe_avi info_avi = core->avi_cfg;
sum += 0x82 + 0x002 + 0x00D;
hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_TYPE, 0x082);
@@ -661,160 +377,64 @@ static void hdmi_core_aux_infoframe_avi_config(struct hdmi_ip_data *ip_data)
hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_CHSUM, checksum);
}
-static void hdmi_core_av_packet_config(struct hdmi_ip_data *ip_data,
+static void hdmi_core_av_packet_config(struct hdmi_core_data *core,
struct hdmi_core_packet_enable_repeat repeat_cfg)
{
/* enable/repeat the infoframe */
- hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_PB_CTRL1,
+ hdmi_write_reg(hdmi_av_base(core), HDMI_CORE_AV_PB_CTRL1,
(repeat_cfg.audio_pkt << 5) |
(repeat_cfg.audio_pkt_repeat << 4) |
(repeat_cfg.avi_infoframe << 1) |
(repeat_cfg.avi_infoframe_repeat));
/* enable/repeat the packet */
- hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_PB_CTRL2,
+ hdmi_write_reg(hdmi_av_base(core), HDMI_CORE_AV_PB_CTRL2,
(repeat_cfg.gen_cntrl_pkt << 3) |
(repeat_cfg.gen_cntrl_pkt_repeat << 2) |
(repeat_cfg.generic_pkt << 1) |
(repeat_cfg.generic_pkt_repeat));
}
-static void hdmi_wp_init(struct omap_video_timings *timings,
- struct hdmi_video_format *video_fmt)
-{
- pr_debug("Enter hdmi_wp_init\n");
-
- timings->hbp = 0;
- timings->hfp = 0;
- timings->hsw = 0;
- timings->vbp = 0;
- timings->vfp = 0;
- timings->vsw = 0;
-
- video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
- video_fmt->y_res = 0;
- video_fmt->x_res = 0;
-
-}
-
-int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data)
-{
- REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, true, 31, 31);
- return 0;
-}
-
-void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data)
-{
- REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, false, 31, 31);
-}
-
-static void hdmi_wp_video_init_format(struct hdmi_video_format *video_fmt,
- struct omap_video_timings *timings, struct hdmi_config *param)
-{
- pr_debug("Enter hdmi_wp_video_init_format\n");
-
- video_fmt->y_res = param->timings.y_res;
- video_fmt->x_res = param->timings.x_res;
-
- timings->hbp = param->timings.hbp;
- timings->hfp = param->timings.hfp;
- timings->hsw = param->timings.hsw;
- timings->vbp = param->timings.vbp;
- timings->vfp = param->timings.vfp;
- timings->vsw = param->timings.vsw;
-}
-
-static void hdmi_wp_video_config_format(struct hdmi_ip_data *ip_data,
- struct hdmi_video_format *video_fmt)
-{
- u32 l = 0;
-
- REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG,
- video_fmt->packing_mode, 10, 8);
-
- l |= FLD_VAL(video_fmt->y_res, 31, 16);
- l |= FLD_VAL(video_fmt->x_res, 15, 0);
- hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_SIZE, l);
-}
-
-static void hdmi_wp_video_config_interface(struct hdmi_ip_data *ip_data)
-{
- u32 r;
- bool vsync_pol, hsync_pol;
- pr_debug("Enter hdmi_wp_video_config_interface\n");
-
- vsync_pol = ip_data->cfg.timings.vsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
- hsync_pol = ip_data->cfg.timings.hsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
-
- r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG);
- r = FLD_MOD(r, vsync_pol, 7, 7);
- r = FLD_MOD(r, hsync_pol, 6, 6);
- r = FLD_MOD(r, ip_data->cfg.timings.interlace, 3, 3);
- r = FLD_MOD(r, 1, 1, 0); /* HDMI_TIMING_MASTER_24BIT */
- hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, r);
-}
-
-static void hdmi_wp_video_config_timing(struct hdmi_ip_data *ip_data,
- struct omap_video_timings *timings)
-{
- u32 timing_h = 0;
- u32 timing_v = 0;
-
- pr_debug("Enter hdmi_wp_video_config_timing\n");
-
- timing_h |= FLD_VAL(timings->hbp, 31, 20);
- timing_h |= FLD_VAL(timings->hfp, 19, 8);
- timing_h |= FLD_VAL(timings->hsw, 7, 0);
- hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_TIMING_H, timing_h);
-
- timing_v |= FLD_VAL(timings->vbp, 31, 20);
- timing_v |= FLD_VAL(timings->vfp, 19, 8);
- timing_v |= FLD_VAL(timings->vsw, 7, 0);
- hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_TIMING_V, timing_v);
-}
-
-void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data)
+void hdmi4_configure(struct hdmi_core_data *core,
+ struct hdmi_wp_data *wp, struct hdmi_config *cfg)
{
/* HDMI */
struct omap_video_timings video_timing;
struct hdmi_video_format video_format;
/* HDMI core */
- struct hdmi_core_infoframe_avi *avi_cfg = &ip_data->avi_cfg;
+ struct hdmi_core_infoframe_avi *avi_cfg = &core->avi_cfg;
struct hdmi_core_video_config v_core_cfg;
struct hdmi_core_packet_enable_repeat repeat_cfg;
- struct hdmi_config *cfg = &ip_data->cfg;
-
- hdmi_wp_init(&video_timing, &video_format);
hdmi_core_init(&v_core_cfg, avi_cfg, &repeat_cfg);
- hdmi_wp_video_init_format(&video_format, &video_timing, cfg);
+ hdmi_wp_init_vid_fmt_timings(&video_format, &video_timing, cfg);
- hdmi_wp_video_config_timing(ip_data, &video_timing);
+ hdmi_wp_video_config_timing(wp, &video_timing);
/* video config */
video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
- hdmi_wp_video_config_format(ip_data, &video_format);
+ hdmi_wp_video_config_format(wp, &video_format);
- hdmi_wp_video_config_interface(ip_data);
+ hdmi_wp_video_config_interface(wp, &video_timing);
/*
* configure core video part
* set software reset in the core
*/
- hdmi_core_swreset_assert(ip_data);
+ hdmi_core_swreset_assert(core);
/* power down off */
- hdmi_core_powerdown_disable(ip_data);
+ hdmi_core_powerdown_disable(core);
v_core_cfg.pkt_mode = HDMI_PACKETMODE24BITPERPIXEL;
v_core_cfg.hdmi_dvi = cfg->cm.mode;
- hdmi_core_video_config(ip_data, &v_core_cfg);
+ hdmi_core_video_config(core, &v_core_cfg);
/* release software reset in the core */
- hdmi_core_swreset_release(ip_data);
+ hdmi_core_swreset_release(core);
/*
* configure packet
@@ -839,7 +459,7 @@ void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data)
avi_cfg->db10_11_pixel_eofleft = 0;
avi_cfg->db12_13_pixel_sofright = 0;
- hdmi_core_aux_infoframe_avi_config(ip_data);
+ hdmi_core_aux_infoframe_avi_config(core);
/* enable/repeat the infoframe */
repeat_cfg.avi_infoframe = HDMI_PACKETENABLE;
@@ -847,65 +467,30 @@ void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data)
/* wakeup */
repeat_cfg.audio_pkt = HDMI_PACKETENABLE;
repeat_cfg.audio_pkt_repeat = HDMI_PACKETREPEATON;
- hdmi_core_av_packet_config(ip_data, repeat_cfg);
-}
-
-void ti_hdmi_4xxx_wp_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
-{
-#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r,\
- hdmi_read_reg(hdmi_wp_base(ip_data), r))
-
- DUMPREG(HDMI_WP_REVISION);
- DUMPREG(HDMI_WP_SYSCONFIG);
- DUMPREG(HDMI_WP_IRQSTATUS_RAW);
- DUMPREG(HDMI_WP_IRQSTATUS);
- DUMPREG(HDMI_WP_PWR_CTRL);
- DUMPREG(HDMI_WP_IRQENABLE_SET);
- DUMPREG(HDMI_WP_VIDEO_CFG);
- DUMPREG(HDMI_WP_VIDEO_SIZE);
- DUMPREG(HDMI_WP_VIDEO_TIMING_H);
- DUMPREG(HDMI_WP_VIDEO_TIMING_V);
- DUMPREG(HDMI_WP_WP_CLK);
- DUMPREG(HDMI_WP_AUDIO_CFG);
- DUMPREG(HDMI_WP_AUDIO_CFG2);
- DUMPREG(HDMI_WP_AUDIO_CTRL);
- DUMPREG(HDMI_WP_AUDIO_DATA);
-}
-
-void ti_hdmi_4xxx_pll_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
-{
-#define DUMPPLL(r) seq_printf(s, "%-35s %08x\n", #r,\
- hdmi_read_reg(hdmi_pll_base(ip_data), r))
-
- DUMPPLL(PLLCTRL_PLL_CONTROL);
- DUMPPLL(PLLCTRL_PLL_STATUS);
- DUMPPLL(PLLCTRL_PLL_GO);
- DUMPPLL(PLLCTRL_CFG1);
- DUMPPLL(PLLCTRL_CFG2);
- DUMPPLL(PLLCTRL_CFG3);
- DUMPPLL(PLLCTRL_CFG4);
+ hdmi_core_av_packet_config(core, repeat_cfg);
}
-void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
+void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s)
{
int i;
#define CORE_REG(i, name) name(i)
#define DUMPCORE(r) seq_printf(s, "%-35s %08x\n", #r,\
- hdmi_read_reg(hdmi_core_sys_base(ip_data), r))
+ hdmi_read_reg(core->base, r))
#define DUMPCOREAV(r) seq_printf(s, "%-35s %08x\n", #r,\
- hdmi_read_reg(hdmi_av_base(ip_data), r))
+ hdmi_read_reg(hdmi_av_base(core), r))
#define DUMPCOREAV2(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \
(i < 10) ? 32 - (int)strlen(#r) : 31 - (int)strlen(#r), " ", \
- hdmi_read_reg(hdmi_av_base(ip_data), CORE_REG(i, r)))
+ hdmi_read_reg(hdmi_av_base(core), CORE_REG(i, r)))
DUMPCORE(HDMI_CORE_SYS_VND_IDL);
DUMPCORE(HDMI_CORE_SYS_DEV_IDL);
DUMPCORE(HDMI_CORE_SYS_DEV_IDH);
DUMPCORE(HDMI_CORE_SYS_DEV_REV);
DUMPCORE(HDMI_CORE_SYS_SRST);
- DUMPCORE(HDMI_CORE_CTRL1);
+ DUMPCORE(HDMI_CORE_SYS_SYS_CTRL1);
DUMPCORE(HDMI_CORE_SYS_SYS_STAT);
+ DUMPCORE(HDMI_CORE_SYS_SYS_CTRL3);
DUMPCORE(HDMI_CORE_SYS_DE_DLY);
DUMPCORE(HDMI_CORE_SYS_DE_CTRL);
DUMPCORE(HDMI_CORE_SYS_DE_TOP);
@@ -913,14 +498,58 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
DUMPCORE(HDMI_CORE_SYS_DE_CNTH);
DUMPCORE(HDMI_CORE_SYS_DE_LINL);
DUMPCORE(HDMI_CORE_SYS_DE_LINH_1);
+ DUMPCORE(HDMI_CORE_SYS_HRES_L);
+ DUMPCORE(HDMI_CORE_SYS_HRES_H);
+ DUMPCORE(HDMI_CORE_SYS_VRES_L);
+ DUMPCORE(HDMI_CORE_SYS_VRES_H);
+ DUMPCORE(HDMI_CORE_SYS_IADJUST);
+ DUMPCORE(HDMI_CORE_SYS_POLDETECT);
+ DUMPCORE(HDMI_CORE_SYS_HWIDTH1);
+ DUMPCORE(HDMI_CORE_SYS_HWIDTH2);
+ DUMPCORE(HDMI_CORE_SYS_VWIDTH);
+ DUMPCORE(HDMI_CORE_SYS_VID_CTRL);
DUMPCORE(HDMI_CORE_SYS_VID_ACEN);
DUMPCORE(HDMI_CORE_SYS_VID_MODE);
+ DUMPCORE(HDMI_CORE_SYS_VID_BLANK1);
+ DUMPCORE(HDMI_CORE_SYS_VID_BLANK3);
+ DUMPCORE(HDMI_CORE_SYS_VID_BLANK1);
+ DUMPCORE(HDMI_CORE_SYS_DC_HEADER);
+ DUMPCORE(HDMI_CORE_SYS_VID_DITHER);
+ DUMPCORE(HDMI_CORE_SYS_RGB2XVYCC_CT);
+ DUMPCORE(HDMI_CORE_SYS_R2Y_COEFF_LOW);
+ DUMPCORE(HDMI_CORE_SYS_R2Y_COEFF_UP);
+ DUMPCORE(HDMI_CORE_SYS_G2Y_COEFF_LOW);
+ DUMPCORE(HDMI_CORE_SYS_G2Y_COEFF_UP);
+ DUMPCORE(HDMI_CORE_SYS_B2Y_COEFF_LOW);
+ DUMPCORE(HDMI_CORE_SYS_B2Y_COEFF_UP);
+ DUMPCORE(HDMI_CORE_SYS_R2CB_COEFF_LOW);
+ DUMPCORE(HDMI_CORE_SYS_R2CB_COEFF_UP);
+ DUMPCORE(HDMI_CORE_SYS_G2CB_COEFF_LOW);
+ DUMPCORE(HDMI_CORE_SYS_G2CB_COEFF_UP);
+ DUMPCORE(HDMI_CORE_SYS_B2CB_COEFF_LOW);
+ DUMPCORE(HDMI_CORE_SYS_B2CB_COEFF_UP);
+ DUMPCORE(HDMI_CORE_SYS_R2CR_COEFF_LOW);
+ DUMPCORE(HDMI_CORE_SYS_R2CR_COEFF_UP);
+ DUMPCORE(HDMI_CORE_SYS_G2CR_COEFF_LOW);
+ DUMPCORE(HDMI_CORE_SYS_G2CR_COEFF_UP);
+ DUMPCORE(HDMI_CORE_SYS_B2CR_COEFF_LOW);
+ DUMPCORE(HDMI_CORE_SYS_B2CR_COEFF_UP);
+ DUMPCORE(HDMI_CORE_SYS_RGB_OFFSET_LOW);
+ DUMPCORE(HDMI_CORE_SYS_RGB_OFFSET_UP);
+ DUMPCORE(HDMI_CORE_SYS_Y_OFFSET_LOW);
+ DUMPCORE(HDMI_CORE_SYS_Y_OFFSET_UP);
+ DUMPCORE(HDMI_CORE_SYS_CBCR_OFFSET_LOW);
+ DUMPCORE(HDMI_CORE_SYS_CBCR_OFFSET_UP);
DUMPCORE(HDMI_CORE_SYS_INTR_STATE);
DUMPCORE(HDMI_CORE_SYS_INTR1);
DUMPCORE(HDMI_CORE_SYS_INTR2);
DUMPCORE(HDMI_CORE_SYS_INTR3);
DUMPCORE(HDMI_CORE_SYS_INTR4);
- DUMPCORE(HDMI_CORE_SYS_UMASK1);
+ DUMPCORE(HDMI_CORE_SYS_INTR_UNMASK1);
+ DUMPCORE(HDMI_CORE_SYS_INTR_UNMASK2);
+ DUMPCORE(HDMI_CORE_SYS_INTR_UNMASK3);
+ DUMPCORE(HDMI_CORE_SYS_INTR_UNMASK4);
+ DUMPCORE(HDMI_CORE_SYS_INTR_CTRL);
DUMPCORE(HDMI_CORE_SYS_TMDS_CTRL);
DUMPCORE(HDMI_CORE_DDC_ADDR);
@@ -1009,60 +638,12 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
DUMPCOREAV(HDMI_CORE_AV_CEC_ADDR_ID);
}
-void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
-{
-#define DUMPPHY(r) seq_printf(s, "%-35s %08x\n", #r,\
- hdmi_read_reg(hdmi_phy_base(ip_data), r))
-
- DUMPPHY(HDMI_TXPHY_TX_CTRL);
- DUMPPHY(HDMI_TXPHY_DIGITAL_CTRL);
- DUMPPHY(HDMI_TXPHY_POWER_CTRL);
- DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL);
-}
-
#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
-static void ti_hdmi_4xxx_wp_audio_config_format(struct hdmi_ip_data *ip_data,
- struct hdmi_audio_format *aud_fmt)
-{
- u32 r;
-
- DSSDBG("Enter hdmi_wp_audio_config_format\n");
-
- r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG);
- r = FLD_MOD(r, aud_fmt->stereo_channels, 26, 24);
- r = FLD_MOD(r, aud_fmt->active_chnnls_msk, 23, 16);
- r = FLD_MOD(r, aud_fmt->en_sig_blk_strt_end, 5, 5);
- r = FLD_MOD(r, aud_fmt->type, 4, 4);
- r = FLD_MOD(r, aud_fmt->justification, 3, 3);
- r = FLD_MOD(r, aud_fmt->sample_order, 2, 2);
- r = FLD_MOD(r, aud_fmt->samples_per_word, 1, 1);
- r = FLD_MOD(r, aud_fmt->sample_size, 0, 0);
- hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG, r);
-}
-
-static void ti_hdmi_4xxx_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
- struct hdmi_audio_dma *aud_dma)
-{
- u32 r;
-
- DSSDBG("Enter hdmi_wp_audio_config_dma\n");
-
- r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG2);
- r = FLD_MOD(r, aud_dma->transfer_size, 15, 8);
- r = FLD_MOD(r, aud_dma->block_size, 7, 0);
- hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG2, r);
-
- r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CTRL);
- r = FLD_MOD(r, aud_dma->mode, 9, 9);
- r = FLD_MOD(r, aud_dma->fifo_threshold, 8, 0);
- hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CTRL, r);
-}
-
-static void ti_hdmi_4xxx_core_audio_config(struct hdmi_ip_data *ip_data,
+static void hdmi_core_audio_config(struct hdmi_core_data *core,
struct hdmi_core_audio_config *cfg)
{
u32 r;
- void __iomem *av_base = hdmi_av_base(ip_data);
+ void __iomem *av_base = hdmi_av_base(core);
/*
* Parameters for generation of Audio Clock Recovery packets
@@ -1157,11 +738,11 @@ static void ti_hdmi_4xxx_core_audio_config(struct hdmi_ip_data *ip_data,
REG_FLD_MOD(av_base, HDMI_CORE_AV_SWAP_I2S, 1, 5, 5);
}
-static void ti_hdmi_4xxx_core_audio_infoframe_cfg(struct hdmi_ip_data *ip_data,
+static void hdmi_core_audio_infoframe_cfg(struct hdmi_core_data *core,
struct snd_cea_861_aud_if *info_aud)
{
u8 sum = 0, checksum = 0;
- void __iomem *av_base = hdmi_av_base(ip_data);
+ void __iomem *av_base = hdmi_av_base(core);
/*
* Set audio info frame type, version and length as
@@ -1207,20 +788,20 @@ static void ti_hdmi_4xxx_core_audio_infoframe_cfg(struct hdmi_ip_data *ip_data,
*/
}
-int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
- struct omap_dss_audio *audio)
+int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
+ struct omap_dss_audio *audio, u32 pclk)
{
struct hdmi_audio_format audio_format;
struct hdmi_audio_dma audio_dma;
- struct hdmi_core_audio_config core;
+ struct hdmi_core_audio_config acore;
int err, n, cts, channel_count;
unsigned int fs_nr;
bool word_length_16b = false;
- if (!audio || !audio->iec || !audio->cea || !ip_data)
+ if (!audio || !audio->iec || !audio->cea || !core)
return -EINVAL;
- core.iec60958_cfg = audio->iec;
+ acore.iec60958_cfg = audio->iec;
/*
* In the IEC-60958 status word, check if the audio sample word length
* is 16-bit as several optimizations can be performed in such case.
@@ -1231,22 +812,22 @@ int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
/* I2S configuration. See Phillips' specification */
if (word_length_16b)
- core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
+ acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
else
- core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
+ acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
/*
* The I2S input word length is twice the lenght given in the IEC-60958
* status word. If the word size is greater than
* 20 bits, increment by one.
*/
- core.i2s_cfg.in_length_bits = audio->iec->status[4]
+ acore.i2s_cfg.in_length_bits = audio->iec->status[4]
& IEC958_AES4_CON_WORDLEN;
if (audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24)
- core.i2s_cfg.in_length_bits++;
- core.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
- core.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
- core.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
- core.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
+ acore.i2s_cfg.in_length_bits++;
+ acore.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
+ acore.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
+ acore.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
+ acore.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
/* convert sample frequency to a number */
switch (audio->iec->status[3] & IEC958_AES3_CON_FS) {
@@ -1275,23 +856,23 @@ int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
return -EINVAL;
}
- err = hdmi_compute_acr(fs_nr, &n, &cts);
+ err = hdmi_compute_acr(pclk, fs_nr, &n, &cts);
/* Audio clock regeneration settings */
- core.n = n;
- core.cts = cts;
+ acore.n = n;
+ acore.cts = cts;
if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
- core.aud_par_busclk = 0;
- core.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
- core.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
+ acore.aud_par_busclk = 0;
+ acore.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
+ acore.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
} else {
- core.aud_par_busclk = (((128 * 31) - 1) << 8);
- core.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
- core.use_mclk = true;
+ acore.aud_par_busclk = (((128 * 31) - 1) << 8);
+ acore.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
+ acore.use_mclk = true;
}
- if (core.use_mclk)
- core.mclk_mode = HDMI_AUDIO_MCLK_128FS;
+ if (acore.use_mclk)
+ acore.mclk_mode = HDMI_AUDIO_MCLK_128FS;
/* Audio channels settings */
channel_count = (audio->cea->db1_ct_cc &
@@ -1329,25 +910,25 @@ int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
*/
if (channel_count == 2) {
audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL;
- core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
- core.layout = HDMI_AUDIO_LAYOUT_2CH;
+ acore.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
+ acore.layout = HDMI_AUDIO_LAYOUT_2CH;
} else {
audio_format.stereo_channels = HDMI_AUDIO_STEREO_FOURCHANNELS;
- core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN |
+ acore.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN |
HDMI_AUDIO_I2S_SD1_EN | HDMI_AUDIO_I2S_SD2_EN |
HDMI_AUDIO_I2S_SD3_EN;
- core.layout = HDMI_AUDIO_LAYOUT_8CH;
+ acore.layout = HDMI_AUDIO_LAYOUT_8CH;
}
- core.en_spdif = false;
+ acore.en_spdif = false;
/* use sample frequency from channel status word */
- core.fs_override = true;
+ acore.fs_override = true;
/* enable ACR packets */
- core.en_acr_pkt = true;
+ acore.en_acr_pkt = true;
/* disable direct streaming digital audio */
- core.en_dsd_audio = false;
+ acore.en_dsd_audio = false;
/* use parallel audio interface */
- core.en_parallel_aud_input = true;
+ acore.en_parallel_aud_input = true;
/* DMA settings */
if (word_length_16b)
@@ -1374,49 +955,37 @@ int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_ON;
/* configure DMA and audio FIFO format*/
- ti_hdmi_4xxx_wp_audio_config_dma(ip_data, &audio_dma);
- ti_hdmi_4xxx_wp_audio_config_format(ip_data, &audio_format);
+ hdmi_wp_audio_config_dma(wp, &audio_dma);
+ hdmi_wp_audio_config_format(wp, &audio_format);
/* configure the core*/
- ti_hdmi_4xxx_core_audio_config(ip_data, &core);
+ hdmi_core_audio_config(core, &acore);
/* configure CEA 861 audio infoframe*/
- ti_hdmi_4xxx_core_audio_infoframe_cfg(ip_data, audio->cea);
+ hdmi_core_audio_infoframe_cfg(core, audio->cea);
return 0;
}
-int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data)
+int hdmi4_audio_start(struct hdmi_core_data *core, struct hdmi_wp_data *wp)
{
- REG_FLD_MOD(hdmi_wp_base(ip_data),
- HDMI_WP_AUDIO_CTRL, true, 31, 31);
- return 0;
-}
+ REG_FLD_MOD(hdmi_av_base(core),
+ HDMI_CORE_AV_AUD_MODE, true, 0, 0);
-void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data)
-{
- REG_FLD_MOD(hdmi_wp_base(ip_data),
- HDMI_WP_AUDIO_CTRL, false, 31, 31);
-}
+ hdmi_wp_audio_core_req_enable(wp, true);
-int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data)
-{
- REG_FLD_MOD(hdmi_av_base(ip_data),
- HDMI_CORE_AV_AUD_MODE, true, 0, 0);
- REG_FLD_MOD(hdmi_wp_base(ip_data),
- HDMI_WP_AUDIO_CTRL, true, 30, 30);
return 0;
}
-void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data)
+void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp)
{
- REG_FLD_MOD(hdmi_av_base(ip_data),
+ REG_FLD_MOD(hdmi_av_base(core),
HDMI_CORE_AV_AUD_MODE, false, 0, 0);
- REG_FLD_MOD(hdmi_wp_base(ip_data),
- HDMI_WP_AUDIO_CTRL, false, 30, 30);
+
+ hdmi_wp_audio_core_req_enable(wp, false);
}
-int ti_hdmi_4xxx_audio_get_dma_port(u32 *offset, u32 *size)
+int hdmi4_audio_get_dma_port(u32 *offset, u32 *size)
{
if (!offset || !size)
return -EINVAL;
@@ -1424,4 +993,42 @@ int ti_hdmi_4xxx_audio_get_dma_port(u32 *offset, u32 *size)
*size = 4;
return 0;
}
+
#endif
+
+#define CORE_OFFSET 0x400
+#define CORE_SIZE 0xc00
+
+int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
+{
+ struct resource *res;
+ struct resource temp_res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi_core");
+ if (!res) {
+ DSSDBG("can't get CORE mem resource by name\n");
+ /*
+ * if hwmod/DT doesn't have the memory resource information
+ * split into HDMI sub blocks by name, we try again by getting
+ * the platform's first resource. this code will be removed when
+ * the driver can get the mem resources by name
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DSSERR("can't get CORE mem resource\n");
+ return -EINVAL;
+ }
+
+ temp_res.start = res->start + CORE_OFFSET;
+ temp_res.end = temp_res.start + CORE_SIZE - 1;
+ res = &temp_res;
+ }
+
+ core->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!core->base) {
+ DSSERR("can't ioremap CORE\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h b/drivers/video/omap2/dss/hdmi4_core.h
index 6ef2f929a76d..bb646896fa82 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
+++ b/drivers/video/omap2/dss/hdmi4_core.h
@@ -1,7 +1,5 @@
/*
- * ti_hdmi_4xxx_ip.h
- *
- * HDMI header definition for DM81xx, DM38xx, TI OMAP4 etc processors.
+ * HDMI header definition for OMAP4 HDMI core IP
*
* Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
*
@@ -18,41 +16,22 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _HDMI_TI_4xxx_H_
-#define _HDMI_TI_4xxx_H_
-
-#include <linux/string.h>
-#include <video/omapdss.h>
-#include "ti_hdmi.h"
-
-/* HDMI Wrapper */
+#ifndef _HDMI4_CORE_H_
+#define _HDMI4_CORE_H_
-#define HDMI_WP_REVISION 0x0
-#define HDMI_WP_SYSCONFIG 0x10
-#define HDMI_WP_IRQSTATUS_RAW 0x24
-#define HDMI_WP_IRQSTATUS 0x28
-#define HDMI_WP_PWR_CTRL 0x40
-#define HDMI_WP_IRQENABLE_SET 0x2C
-#define HDMI_WP_IRQENABLE_CLR 0x30
-#define HDMI_WP_VIDEO_CFG 0x50
-#define HDMI_WP_VIDEO_SIZE 0x60
-#define HDMI_WP_VIDEO_TIMING_H 0x68
-#define HDMI_WP_VIDEO_TIMING_V 0x6C
-#define HDMI_WP_WP_CLK 0x70
-#define HDMI_WP_AUDIO_CFG 0x80
-#define HDMI_WP_AUDIO_CFG2 0x84
-#define HDMI_WP_AUDIO_CTRL 0x88
-#define HDMI_WP_AUDIO_DATA 0x8C
+#include "hdmi.h"
-/* HDMI IP Core System */
+/* OMAP4 HDMI IP Core System */
#define HDMI_CORE_SYS_VND_IDL 0x0
#define HDMI_CORE_SYS_DEV_IDL 0x8
#define HDMI_CORE_SYS_DEV_IDH 0xC
#define HDMI_CORE_SYS_DEV_REV 0x10
#define HDMI_CORE_SYS_SRST 0x14
-#define HDMI_CORE_CTRL1 0x20
+#define HDMI_CORE_SYS_SYS_CTRL1 0x20
#define HDMI_CORE_SYS_SYS_STAT 0x24
+#define HDMI_CORE_SYS_SYS_CTRL3 0x28
+#define HDMI_CORE_SYS_DCTL 0x34
#define HDMI_CORE_SYS_DE_DLY 0xC8
#define HDMI_CORE_SYS_DE_CTRL 0xCC
#define HDMI_CORE_SYS_DE_TOP 0xD0
@@ -60,20 +39,65 @@
#define HDMI_CORE_SYS_DE_CNTH 0xDC
#define HDMI_CORE_SYS_DE_LINL 0xE0
#define HDMI_CORE_SYS_DE_LINH_1 0xE4
+#define HDMI_CORE_SYS_HRES_L 0xE8
+#define HDMI_CORE_SYS_HRES_H 0xEC
+#define HDMI_CORE_SYS_VRES_L 0xF0
+#define HDMI_CORE_SYS_VRES_H 0xF4
+#define HDMI_CORE_SYS_IADJUST 0xF8
+#define HDMI_CORE_SYS_POLDETECT 0xFC
+#define HDMI_CORE_SYS_HWIDTH1 0x110
+#define HDMI_CORE_SYS_HWIDTH2 0x114
+#define HDMI_CORE_SYS_VWIDTH 0x11C
+#define HDMI_CORE_SYS_VID_CTRL 0x120
#define HDMI_CORE_SYS_VID_ACEN 0x124
#define HDMI_CORE_SYS_VID_MODE 0x128
+#define HDMI_CORE_SYS_VID_BLANK1 0x12C
+#define HDMI_CORE_SYS_VID_BLANK2 0x130
+#define HDMI_CORE_SYS_VID_BLANK3 0x134
+#define HDMI_CORE_SYS_DC_HEADER 0x138
+#define HDMI_CORE_SYS_VID_DITHER 0x13C
+#define HDMI_CORE_SYS_RGB2XVYCC_CT 0x140
+#define HDMI_CORE_SYS_R2Y_COEFF_LOW 0x144
+#define HDMI_CORE_SYS_R2Y_COEFF_UP 0x148
+#define HDMI_CORE_SYS_G2Y_COEFF_LOW 0x14C
+#define HDMI_CORE_SYS_G2Y_COEFF_UP 0x150
+#define HDMI_CORE_SYS_B2Y_COEFF_LOW 0x154
+#define HDMI_CORE_SYS_B2Y_COEFF_UP 0x158
+#define HDMI_CORE_SYS_R2CB_COEFF_LOW 0x15C
+#define HDMI_CORE_SYS_R2CB_COEFF_UP 0x160
+#define HDMI_CORE_SYS_G2CB_COEFF_LOW 0x164
+#define HDMI_CORE_SYS_G2CB_COEFF_UP 0x168
+#define HDMI_CORE_SYS_B2CB_COEFF_LOW 0x16C
+#define HDMI_CORE_SYS_B2CB_COEFF_UP 0x170
+#define HDMI_CORE_SYS_R2CR_COEFF_LOW 0x174
+#define HDMI_CORE_SYS_R2CR_COEFF_UP 0x178
+#define HDMI_CORE_SYS_G2CR_COEFF_LOW 0x17C
+#define HDMI_CORE_SYS_G2CR_COEFF_UP 0x180
+#define HDMI_CORE_SYS_B2CR_COEFF_LOW 0x184
+#define HDMI_CORE_SYS_B2CR_COEFF_UP 0x188
+#define HDMI_CORE_SYS_RGB_OFFSET_LOW 0x18C
+#define HDMI_CORE_SYS_RGB_OFFSET_UP 0x190
+#define HDMI_CORE_SYS_Y_OFFSET_LOW 0x194
+#define HDMI_CORE_SYS_Y_OFFSET_UP 0x198
+#define HDMI_CORE_SYS_CBCR_OFFSET_LOW 0x19C
+#define HDMI_CORE_SYS_CBCR_OFFSET_UP 0x1A0
#define HDMI_CORE_SYS_INTR_STATE 0x1C0
#define HDMI_CORE_SYS_INTR1 0x1C4
#define HDMI_CORE_SYS_INTR2 0x1C8
#define HDMI_CORE_SYS_INTR3 0x1CC
#define HDMI_CORE_SYS_INTR4 0x1D0
-#define HDMI_CORE_SYS_UMASK1 0x1D4
+#define HDMI_CORE_SYS_INTR_UNMASK1 0x1D4
+#define HDMI_CORE_SYS_INTR_UNMASK2 0x1D8
+#define HDMI_CORE_SYS_INTR_UNMASK3 0x1DC
+#define HDMI_CORE_SYS_INTR_UNMASK4 0x1E0
+#define HDMI_CORE_SYS_INTR_CTRL 0x1E4
#define HDMI_CORE_SYS_TMDS_CTRL 0x208
-#define HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC 0x1
-#define HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC 0x1
-#define HDMI_CORE_CTRL1_BSEL_24BITBUS 0x1
-#define HDMI_CORE_CTRL1_EDGE_RISINGEDGE 0x1
+/* value definitions for HDMI_CORE_SYS_SYS_CTRL1 fields */
+#define HDMI_CORE_SYS_SYS_CTRL1_VEN_FOLLOWVSYNC 0x1
+#define HDMI_CORE_SYS_SYS_CTRL1_HEN_FOLLOWHSYNC 0x1
+#define HDMI_CORE_SYS_SYS_CTRL1_BSEL_24BITBUS 0x1
+#define HDMI_CORE_SYS_SYS_CTRL1_EDGE_RISINGEDGE 0x1
/* HDMI DDC E-DID */
#define HDMI_CORE_DDC_ADDR 0x3B4
@@ -158,35 +182,6 @@
#define HDMI_CORE_AV_GEN_DBYTE_NELEMS 31
#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS 31
-/* PLL */
-
-#define PLLCTRL_PLL_CONTROL 0x0
-#define PLLCTRL_PLL_STATUS 0x4
-#define PLLCTRL_PLL_GO 0x8
-#define PLLCTRL_CFG1 0xC
-#define PLLCTRL_CFG2 0x10
-#define PLLCTRL_CFG3 0x14
-#define PLLCTRL_CFG4 0x20
-
-/* HDMI PHY */
-
-#define HDMI_TXPHY_TX_CTRL 0x0
-#define HDMI_TXPHY_DIGITAL_CTRL 0x4
-#define HDMI_TXPHY_POWER_CTRL 0x8
-#define HDMI_TXPHY_PAD_CFG_CTRL 0xC
-
-#define REG_FLD_MOD(base, idx, val, start, end) \
- hdmi_write_reg(base, idx, FLD_MOD(hdmi_read_reg(base, idx),\
- val, start, end))
-#define REG_GET(base, idx, start, end) \
- FLD_GET(hdmi_read_reg(base, idx), start, end)
-
-enum hdmi_phy_pwr {
- HDMI_PHYPWRCMD_OFF = 0,
- HDMI_PHYPWRCMD_LDOON = 1,
- HDMI_PHYPWRCMD_TXON = 2
-};
-
enum hdmi_core_inputbus_width {
HDMI_INPUT_8BIT = 0,
HDMI_INPUT_10BIT = 1,
@@ -229,114 +224,6 @@ enum hdmi_core_packet_ctrl {
HDMI_PACKETREPEATOFF = 0
};
-/* INFOFRAME_AVI_ and INFOFRAME_AUDIO_ definitions */
-enum hdmi_core_infoframe {
- HDMI_INFOFRAME_AVI_DB1Y_RGB = 0,
- HDMI_INFOFRAME_AVI_DB1Y_YUV422 = 1,
- HDMI_INFOFRAME_AVI_DB1Y_YUV444 = 2,
- HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF = 0,
- HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_ON = 1,
- HDMI_INFOFRAME_AVI_DB1B_NO = 0,
- HDMI_INFOFRAME_AVI_DB1B_VERT = 1,
- HDMI_INFOFRAME_AVI_DB1B_HORI = 2,
- HDMI_INFOFRAME_AVI_DB1B_VERTHORI = 3,
- HDMI_INFOFRAME_AVI_DB1S_0 = 0,
- HDMI_INFOFRAME_AVI_DB1S_1 = 1,
- HDMI_INFOFRAME_AVI_DB1S_2 = 2,
- HDMI_INFOFRAME_AVI_DB2C_NO = 0,
- HDMI_INFOFRAME_AVI_DB2C_ITU601 = 1,
- HDMI_INFOFRAME_AVI_DB2C_ITU709 = 2,
- HDMI_INFOFRAME_AVI_DB2C_EC_EXTENDED = 3,
- HDMI_INFOFRAME_AVI_DB2M_NO = 0,
- HDMI_INFOFRAME_AVI_DB2M_43 = 1,
- HDMI_INFOFRAME_AVI_DB2M_169 = 2,
- HDMI_INFOFRAME_AVI_DB2R_SAME = 8,
- HDMI_INFOFRAME_AVI_DB2R_43 = 9,
- HDMI_INFOFRAME_AVI_DB2R_169 = 10,
- HDMI_INFOFRAME_AVI_DB2R_149 = 11,
- HDMI_INFOFRAME_AVI_DB3ITC_NO = 0,
- HDMI_INFOFRAME_AVI_DB3ITC_YES = 1,
- HDMI_INFOFRAME_AVI_DB3EC_XVYUV601 = 0,
- HDMI_INFOFRAME_AVI_DB3EC_XVYUV709 = 1,
- HDMI_INFOFRAME_AVI_DB3Q_DEFAULT = 0,
- HDMI_INFOFRAME_AVI_DB3Q_LR = 1,
- HDMI_INFOFRAME_AVI_DB3Q_FR = 2,
- HDMI_INFOFRAME_AVI_DB3SC_NO = 0,
- HDMI_INFOFRAME_AVI_DB3SC_HORI = 1,
- HDMI_INFOFRAME_AVI_DB3SC_VERT = 2,
- HDMI_INFOFRAME_AVI_DB3SC_HORIVERT = 3,
- HDMI_INFOFRAME_AVI_DB5PR_NO = 0,
- HDMI_INFOFRAME_AVI_DB5PR_2 = 1,
- HDMI_INFOFRAME_AVI_DB5PR_3 = 2,
- HDMI_INFOFRAME_AVI_DB5PR_4 = 3,
- HDMI_INFOFRAME_AVI_DB5PR_5 = 4,
- HDMI_INFOFRAME_AVI_DB5PR_6 = 5,
- HDMI_INFOFRAME_AVI_DB5PR_7 = 6,
- HDMI_INFOFRAME_AVI_DB5PR_8 = 7,
- HDMI_INFOFRAME_AVI_DB5PR_9 = 8,
- HDMI_INFOFRAME_AVI_DB5PR_10 = 9,
-};
-
-enum hdmi_packing_mode {
- HDMI_PACK_10b_RGB_YUV444 = 0,
- HDMI_PACK_24b_RGB_YUV444_YUV422 = 1,
- HDMI_PACK_20b_YUV422 = 2,
- HDMI_PACK_ALREADYPACKED = 7
-};
-
-enum hdmi_core_audio_layout {
- HDMI_AUDIO_LAYOUT_2CH = 0,
- HDMI_AUDIO_LAYOUT_8CH = 1
-};
-
-enum hdmi_core_cts_mode {
- HDMI_AUDIO_CTS_MODE_HW = 0,
- HDMI_AUDIO_CTS_MODE_SW = 1
-};
-
-enum hdmi_stereo_channels {
- HDMI_AUDIO_STEREO_NOCHANNELS = 0,
- HDMI_AUDIO_STEREO_ONECHANNEL = 1,
- HDMI_AUDIO_STEREO_TWOCHANNELS = 2,
- HDMI_AUDIO_STEREO_THREECHANNELS = 3,
- HDMI_AUDIO_STEREO_FOURCHANNELS = 4
-};
-
-enum hdmi_audio_type {
- HDMI_AUDIO_TYPE_LPCM = 0,
- HDMI_AUDIO_TYPE_IEC = 1
-};
-
-enum hdmi_audio_justify {
- HDMI_AUDIO_JUSTIFY_LEFT = 0,
- HDMI_AUDIO_JUSTIFY_RIGHT = 1
-};
-
-enum hdmi_audio_sample_order {
- HDMI_AUDIO_SAMPLE_RIGHT_FIRST = 0,
- HDMI_AUDIO_SAMPLE_LEFT_FIRST = 1
-};
-
-enum hdmi_audio_samples_perword {
- HDMI_AUDIO_ONEWORD_ONESAMPLE = 0,
- HDMI_AUDIO_ONEWORD_TWOSAMPLES = 1
-};
-
-enum hdmi_audio_sample_size {
- HDMI_AUDIO_SAMPLE_16BITS = 0,
- HDMI_AUDIO_SAMPLE_24BITS = 1
-};
-
-enum hdmi_audio_transf_mode {
- HDMI_AUDIO_TRANSF_DMA = 0,
- HDMI_AUDIO_TRANSF_IRQ = 1
-};
-
-enum hdmi_audio_blk_strt_end_sig {
- HDMI_AUDIO_BLOCK_SIG_STARTEND_ON = 0,
- HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF = 1
-};
-
enum hdmi_audio_i2s_config {
HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST = 0,
HDMI_AUDIO_I2S_LSB_SHIFTED_FIRST = 1,
@@ -352,17 +239,6 @@ enum hdmi_audio_i2s_config {
HDMI_AUDIO_I2S_SD3_EN = 1 << 3,
};
-enum hdmi_audio_mclk_mode {
- HDMI_AUDIO_MCLK_128FS = 0,
- HDMI_AUDIO_MCLK_256FS = 1,
- HDMI_AUDIO_MCLK_384FS = 2,
- HDMI_AUDIO_MCLK_512FS = 3,
- HDMI_AUDIO_MCLK_768FS = 4,
- HDMI_AUDIO_MCLK_1024FS = 5,
- HDMI_AUDIO_MCLK_1152FS = 6,
- HDMI_AUDIO_MCLK_192FS = 7
-};
-
struct hdmi_core_video_config {
enum hdmi_core_inputbus_width ip_bus_width;
enum hdmi_core_dither_trunc op_dither_truc;
@@ -383,55 +259,18 @@ struct hdmi_core_packet_enable_repeat {
u32 generic_pkt_repeat;
};
-struct hdmi_video_format {
- enum hdmi_packing_mode packing_mode;
- u32 y_res; /* Line per panel */
- u32 x_res; /* pixel per line */
-};
-
-struct hdmi_audio_format {
- enum hdmi_stereo_channels stereo_channels;
- u8 active_chnnls_msk;
- enum hdmi_audio_type type;
- enum hdmi_audio_justify justification;
- enum hdmi_audio_sample_order sample_order;
- enum hdmi_audio_samples_perword samples_per_word;
- enum hdmi_audio_sample_size sample_size;
- enum hdmi_audio_blk_strt_end_sig en_sig_blk_strt_end;
-};
-
-struct hdmi_audio_dma {
- u8 transfer_size;
- u8 block_size;
- enum hdmi_audio_transf_mode mode;
- u16 fifo_threshold;
-};
-
-struct hdmi_core_audio_i2s_config {
- u8 in_length_bits;
- u8 justification;
- u8 sck_edge_mode;
- u8 vbit;
- u8 direction;
- u8 shift;
- u8 active_sds;
-};
-
-struct hdmi_core_audio_config {
- struct hdmi_core_audio_i2s_config i2s_cfg;
- struct snd_aes_iec958 *iec60958_cfg;
- bool fs_override;
- u32 n;
- u32 cts;
- u32 aud_par_busclk;
- enum hdmi_core_audio_layout layout;
- enum hdmi_core_cts_mode cts_mode;
- bool use_mclk;
- enum hdmi_audio_mclk_mode mclk_mode;
- bool en_acr_pkt;
- bool en_dsd_audio;
- bool en_parallel_aud_input;
- bool en_spdif;
-};
+int hdmi4_read_edid(struct hdmi_core_data *core, u8 *edid, int len);
+void hdmi4_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
+ struct hdmi_config *cfg);
+void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s);
+int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core);
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+int hdmi4_audio_start(struct hdmi_core_data *core, struct hdmi_wp_data *wp);
+void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp);
+int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
+ struct omap_dss_audio *audio, u32 pclk);
+int hdmi4_audio_get_dma_port(u32 *offset, u32 *size);
+#endif
#endif
diff --git a/drivers/video/omap2/dss/hdmi_common.c b/drivers/video/omap2/dss/hdmi_common.c
new file mode 100644
index 000000000000..5586aaad9d63
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi_common.c
@@ -0,0 +1,423 @@
+
+/*
+ * Logic for the below structure :
+ * user enters the CEA or VESA timings by specifying the HDMI/DVI code.
+ * There is a correspondence between CEA/VESA timing and code, please
+ * refer to section 6.3 in HDMI 1.3 specification for timing code.
+ *
+ * In the below structure, cea_vesa_timings corresponds to all OMAP4
+ * supported CEA and VESA timing values.code_cea corresponds to the CEA
+ * code, It is used to get the timing from cea_vesa_timing array.Similarly
+ * with code_vesa. Code_index is used for back mapping, that is once EDID
+ * is read from the TV, EDID is parsed to find the timing values and then
+ * map it to corresponding CEA or VESA index.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <video/omapdss.h>
+
+#include "hdmi.h"
+
+static const struct hdmi_config cea_timings[] = {
+ {
+ { 640, 480, 25200, 96, 16, 48, 2, 10, 33,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 1, HDMI_HDMI },
+ },
+ {
+ { 720, 480, 27027, 62, 16, 60, 6, 9, 30,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 2, HDMI_HDMI },
+ },
+ {
+ { 1280, 720, 74250, 40, 110, 220, 5, 5, 20,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 4, HDMI_HDMI },
+ },
+ {
+ { 1920, 540, 74250, 44, 88, 148, 5, 2, 15,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ true, },
+ { 5, HDMI_HDMI },
+ },
+ {
+ { 1440, 240, 27027, 124, 38, 114, 3, 4, 15,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ true, },
+ { 6, HDMI_HDMI },
+ },
+ {
+ { 1920, 1080, 148500, 44, 88, 148, 5, 4, 36,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 16, HDMI_HDMI },
+ },
+ {
+ { 720, 576, 27000, 64, 12, 68, 5, 5, 39,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 17, HDMI_HDMI },
+ },
+ {
+ { 1280, 720, 74250, 40, 440, 220, 5, 5, 20,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 19, HDMI_HDMI },
+ },
+ {
+ { 1920, 540, 74250, 44, 528, 148, 5, 2, 15,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ true, },
+ { 20, HDMI_HDMI },
+ },
+ {
+ { 1440, 288, 27000, 126, 24, 138, 3, 2, 19,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ true, },
+ { 21, HDMI_HDMI },
+ },
+ {
+ { 1440, 576, 54000, 128, 24, 136, 5, 5, 39,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 29, HDMI_HDMI },
+ },
+ {
+ { 1920, 1080, 148500, 44, 528, 148, 5, 4, 36,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 31, HDMI_HDMI },
+ },
+ {
+ { 1920, 1080, 74250, 44, 638, 148, 5, 4, 36,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 32, HDMI_HDMI },
+ },
+ {
+ { 2880, 480, 108108, 248, 64, 240, 6, 9, 30,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 35, HDMI_HDMI },
+ },
+ {
+ { 2880, 576, 108000, 256, 48, 272, 5, 5, 39,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 37, HDMI_HDMI },
+ },
+};
+
+static const struct hdmi_config vesa_timings[] = {
+/* VESA From Here */
+ {
+ { 640, 480, 25175, 96, 16, 48, 2, 11, 31,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 4, HDMI_DVI },
+ },
+ {
+ { 800, 600, 40000, 128, 40, 88, 4, 1, 23,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 9, HDMI_DVI },
+ },
+ {
+ { 848, 480, 33750, 112, 16, 112, 8, 6, 23,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0xE, HDMI_DVI },
+ },
+ {
+ { 1280, 768, 79500, 128, 64, 192, 7, 3, 20,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 0x17, HDMI_DVI },
+ },
+ {
+ { 1280, 800, 83500, 128, 72, 200, 6, 3, 22,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 0x1C, HDMI_DVI },
+ },
+ {
+ { 1360, 768, 85500, 112, 64, 256, 6, 3, 18,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x27, HDMI_DVI },
+ },
+ {
+ { 1280, 960, 108000, 112, 96, 312, 3, 1, 36,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x20, HDMI_DVI },
+ },
+ {
+ { 1280, 1024, 108000, 112, 48, 248, 3, 1, 38,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x23, HDMI_DVI },
+ },
+ {
+ { 1024, 768, 65000, 136, 24, 160, 6, 3, 29,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 0x10, HDMI_DVI },
+ },
+ {
+ { 1400, 1050, 121750, 144, 88, 232, 4, 3, 32,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 0x2A, HDMI_DVI },
+ },
+ {
+ { 1440, 900, 106500, 152, 80, 232, 6, 3, 25,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 0x2F, HDMI_DVI },
+ },
+ {
+ { 1680, 1050, 146250, 176 , 104, 280, 6, 3, 30,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 0x3A, HDMI_DVI },
+ },
+ {
+ { 1366, 768, 85500, 143, 70, 213, 3, 3, 24,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x51, HDMI_DVI },
+ },
+ {
+ { 1920, 1080, 148500, 44, 148, 80, 5, 4, 36,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x52, HDMI_DVI },
+ },
+ {
+ { 1280, 768, 68250, 32, 48, 80, 7, 3, 12,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x16, HDMI_DVI },
+ },
+ {
+ { 1400, 1050, 101000, 32, 48, 80, 4, 3, 23,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x29, HDMI_DVI },
+ },
+ {
+ { 1680, 1050, 119000, 32, 48, 80, 6, 3, 21,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x39, HDMI_DVI },
+ },
+ {
+ { 1280, 800, 79500, 32, 48, 80, 6, 3, 14,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x1B, HDMI_DVI },
+ },
+ {
+ { 1280, 720, 74250, 40, 110, 220, 5, 5, 20,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x55, HDMI_DVI },
+ },
+ {
+ { 1920, 1200, 154000, 32, 48, 80, 6, 3, 26,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x44, HDMI_DVI },
+ },
+};
+
+const struct hdmi_config *hdmi_default_timing(void)
+{
+ return &vesa_timings[0];
+}
+
+static const struct hdmi_config *hdmi_find_timing(int code,
+ const struct hdmi_config *timings_arr, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (timings_arr[i].cm.code == code)
+ return &timings_arr[i];
+ }
+
+ return NULL;
+}
+
+const struct hdmi_config *hdmi_get_timings(int mode, int code)
+{
+ const struct hdmi_config *arr;
+ int len;
+
+ if (mode == HDMI_DVI) {
+ arr = vesa_timings;
+ len = ARRAY_SIZE(vesa_timings);
+ } else {
+ arr = cea_timings;
+ len = ARRAY_SIZE(cea_timings);
+ }
+
+ return hdmi_find_timing(code, arr, len);
+}
+
+static bool hdmi_timings_compare(struct omap_video_timings *timing1,
+ const struct omap_video_timings *timing2)
+{
+ int timing1_vsync, timing1_hsync, timing2_vsync, timing2_hsync;
+
+ if ((DIV_ROUND_CLOSEST(timing2->pixel_clock, 1000) ==
+ DIV_ROUND_CLOSEST(timing1->pixel_clock, 1000)) &&
+ (timing2->x_res == timing1->x_res) &&
+ (timing2->y_res == timing1->y_res)) {
+
+ timing2_hsync = timing2->hfp + timing2->hsw + timing2->hbp;
+ timing1_hsync = timing1->hfp + timing1->hsw + timing1->hbp;
+ timing2_vsync = timing2->vfp + timing2->vsw + timing2->vbp;
+ timing1_vsync = timing1->vfp + timing1->vsw + timing1->vbp;
+
+ DSSDBG("timing1_hsync = %d timing1_vsync = %d"\
+ "timing2_hsync = %d timing2_vsync = %d\n",
+ timing1_hsync, timing1_vsync,
+ timing2_hsync, timing2_vsync);
+
+ if ((timing1_hsync == timing2_hsync) &&
+ (timing1_vsync == timing2_vsync)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+struct hdmi_cm hdmi_get_code(struct omap_video_timings *timing)
+{
+ int i;
+ struct hdmi_cm cm = {-1};
+ DSSDBG("hdmi_get_code\n");
+
+ for (i = 0; i < ARRAY_SIZE(cea_timings); i++) {
+ if (hdmi_timings_compare(timing, &cea_timings[i].timings)) {
+ cm = cea_timings[i].cm;
+ goto end;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(vesa_timings); i++) {
+ if (hdmi_timings_compare(timing, &vesa_timings[i].timings)) {
+ cm = vesa_timings[i].cm;
+ goto end;
+ }
+ }
+
+end:
+ return cm;
+}
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts)
+{
+ u32 deep_color;
+ bool deep_color_correct = false;
+
+ if (n == NULL || cts == NULL)
+ return -EINVAL;
+
+ /* TODO: When implemented, query deep color mode here. */
+ deep_color = 100;
+
+ /*
+ * When using deep color, the default N value (as in the HDMI
+ * specification) yields to an non-integer CTS. Hence, we
+ * modify it while keeping the restrictions described in
+ * section 7.2.1 of the HDMI 1.4a specification.
+ */
+ switch (sample_freq) {
+ case 32000:
+ case 48000:
+ case 96000:
+ case 192000:
+ if (deep_color == 125)
+ if (pclk == 27027 || pclk == 74250)
+ deep_color_correct = true;
+ if (deep_color == 150)
+ if (pclk == 27027)
+ deep_color_correct = true;
+ break;
+ case 44100:
+ case 88200:
+ case 176400:
+ if (deep_color == 125)
+ if (pclk == 27027)
+ deep_color_correct = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (deep_color_correct) {
+ switch (sample_freq) {
+ case 32000:
+ *n = 8192;
+ break;
+ case 44100:
+ *n = 12544;
+ break;
+ case 48000:
+ *n = 8192;
+ break;
+ case 88200:
+ *n = 25088;
+ break;
+ case 96000:
+ *n = 16384;
+ break;
+ case 176400:
+ *n = 50176;
+ break;
+ case 192000:
+ *n = 32768;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (sample_freq) {
+ case 32000:
+ *n = 4096;
+ break;
+ case 44100:
+ *n = 6272;
+ break;
+ case 48000:
+ *n = 6144;
+ break;
+ case 88200:
+ *n = 12544;
+ break;
+ case 96000:
+ *n = 12288;
+ break;
+ case 176400:
+ *n = 25088;
+ break;
+ case 192000:
+ *n = 24576;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
+ *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
+
+ return 0;
+}
+#endif
diff --git a/drivers/video/omap2/dss/hdmi_phy.c b/drivers/video/omap2/dss/hdmi_phy.c
new file mode 100644
index 000000000000..45acb997ac00
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi_phy.c
@@ -0,0 +1,160 @@
+/*
+ * HDMI PHY
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <video/omapdss.h>
+
+#include "dss.h"
+#include "hdmi.h"
+
+void hdmi_phy_dump(struct hdmi_phy_data *phy, struct seq_file *s)
+{
+#define DUMPPHY(r) seq_printf(s, "%-35s %08x\n", #r,\
+ hdmi_read_reg(phy->base, r))
+
+ DUMPPHY(HDMI_TXPHY_TX_CTRL);
+ DUMPPHY(HDMI_TXPHY_DIGITAL_CTRL);
+ DUMPPHY(HDMI_TXPHY_POWER_CTRL);
+ DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL);
+}
+
+static irqreturn_t hdmi_irq_handler(int irq, void *data)
+{
+ struct hdmi_wp_data *wp = data;
+ u32 irqstatus;
+
+ irqstatus = hdmi_wp_get_irqstatus(wp);
+ hdmi_wp_set_irqstatus(wp, irqstatus);
+
+ if ((irqstatus & HDMI_IRQ_LINK_CONNECT) &&
+ irqstatus & HDMI_IRQ_LINK_DISCONNECT) {
+ /*
+ * If we get both connect and disconnect interrupts at the same
+ * time, turn off the PHY, clear interrupts, and restart, which
+ * raises connect interrupt if a cable is connected, or nothing
+ * if cable is not connected.
+ */
+ hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_OFF);
+
+ hdmi_wp_set_irqstatus(wp, HDMI_IRQ_LINK_CONNECT |
+ HDMI_IRQ_LINK_DISCONNECT);
+
+ hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON);
+ } else if (irqstatus & HDMI_IRQ_LINK_CONNECT) {
+ hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_TXON);
+ } else if (irqstatus & HDMI_IRQ_LINK_DISCONNECT) {
+ hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int hdmi_phy_enable(struct hdmi_phy_data *phy, struct hdmi_wp_data *wp,
+ struct hdmi_config *cfg)
+{
+ u16 r = 0;
+ u32 irqstatus;
+
+ hdmi_wp_clear_irqenable(wp, 0xffffffff);
+
+ irqstatus = hdmi_wp_get_irqstatus(wp);
+ hdmi_wp_set_irqstatus(wp, irqstatus);
+
+ r = hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON);
+ if (r)
+ return r;
+
+ /*
+ * Read address 0 in order to get the SCP reset done completed
+ * Dummy access performed to make sure reset is done
+ */
+ hdmi_read_reg(phy->base, HDMI_TXPHY_TX_CTRL);
+
+ /*
+ * Write to phy address 0 to configure the clock
+ * use HFBITCLK write HDMI_TXPHY_TX_CONTROL_FREQOUT field
+ */
+ REG_FLD_MOD(phy->base, HDMI_TXPHY_TX_CTRL, 0x1, 31, 30);
+
+ /* Write to phy address 1 to start HDMI line (TXVALID and TMDSCLKEN) */
+ hdmi_write_reg(phy->base, HDMI_TXPHY_DIGITAL_CTRL, 0xF0000000);
+
+ /* Setup max LDO voltage */
+ REG_FLD_MOD(phy->base, HDMI_TXPHY_POWER_CTRL, 0xB, 3, 0);
+
+ /* Write to phy address 3 to change the polarity control */
+ REG_FLD_MOD(phy->base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
+
+ r = request_threaded_irq(phy->irq, NULL, hdmi_irq_handler,
+ IRQF_ONESHOT, "OMAP HDMI", wp);
+ if (r) {
+ DSSERR("HDMI IRQ request failed\n");
+ hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_OFF);
+ return r;
+ }
+
+ hdmi_wp_set_irqenable(wp,
+ HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
+
+ return 0;
+}
+
+void hdmi_phy_disable(struct hdmi_phy_data *phy, struct hdmi_wp_data *wp)
+{
+ free_irq(phy->irq, wp);
+
+ hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_OFF);
+}
+
+#define PHY_OFFSET 0x300
+#define PHY_SIZE 0x100
+
+int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy)
+{
+ struct resource *res;
+ struct resource temp_res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi_txphy");
+ if (!res) {
+ DSSDBG("can't get PHY mem resource by name\n");
+ /*
+ * if hwmod/DT doesn't have the memory resource information
+ * split into HDMI sub blocks by name, we try again by getting
+ * the platform's first resource. this code will be removed when
+ * the driver can get the mem resources by name
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DSSERR("can't get PHY mem resource\n");
+ return -EINVAL;
+ }
+
+ temp_res.start = res->start + PHY_OFFSET;
+ temp_res.end = temp_res.start + PHY_SIZE - 1;
+ res = &temp_res;
+ }
+
+ phy->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!phy->base) {
+ DSSERR("can't ioremap TX PHY\n");
+ return -ENOMEM;
+ }
+
+ phy->irq = platform_get_irq(pdev, 0);
+ if (phy->irq < 0) {
+ DSSERR("platform_get_irq failed\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
diff --git a/drivers/video/omap2/dss/hdmi_pll.c b/drivers/video/omap2/dss/hdmi_pll.c
new file mode 100644
index 000000000000..d3e6e78c0082
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi_pll.c
@@ -0,0 +1,230 @@
+/*
+ * HDMI PLL
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <video/omapdss.h>
+
+#include "dss.h"
+#include "hdmi.h"
+
+#define HDMI_DEFAULT_REGN 16
+#define HDMI_DEFAULT_REGM2 1
+
+void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s)
+{
+#define DUMPPLL(r) seq_printf(s, "%-35s %08x\n", #r,\
+ hdmi_read_reg(pll->base, r))
+
+ DUMPPLL(PLLCTRL_PLL_CONTROL);
+ DUMPPLL(PLLCTRL_PLL_STATUS);
+ DUMPPLL(PLLCTRL_PLL_GO);
+ DUMPPLL(PLLCTRL_CFG1);
+ DUMPPLL(PLLCTRL_CFG2);
+ DUMPPLL(PLLCTRL_CFG3);
+ DUMPPLL(PLLCTRL_SSC_CFG1);
+ DUMPPLL(PLLCTRL_SSC_CFG2);
+ DUMPPLL(PLLCTRL_CFG4);
+}
+
+void hdmi_pll_compute(struct hdmi_pll_data *pll, unsigned long clkin, int phy)
+{
+ struct hdmi_pll_info *pi = &pll->info;
+ unsigned long refclk;
+ u32 mf;
+
+ /* use our funky units */
+ clkin /= 10000;
+
+ /*
+ * Input clock is predivided by N + 1
+ * out put of which is reference clk
+ */
+
+ pi->regn = HDMI_DEFAULT_REGN;
+
+ refclk = clkin / pi->regn;
+
+ pi->regm2 = HDMI_DEFAULT_REGM2;
+
+ /*
+ * multiplier is pixel_clk/ref_clk
+ * Multiplying by 100 to avoid fractional part removal
+ */
+ pi->regm = phy * pi->regm2 / refclk;
+
+ /*
+ * fractional multiplier is remainder of the difference between
+ * multiplier and actual phy(required pixel clock thus should be
+ * multiplied by 2^18(262144) divided by the reference clock
+ */
+ mf = (phy - pi->regm / pi->regm2 * refclk) * 262144;
+ pi->regmf = pi->regm2 * mf / refclk;
+
+ /*
+ * Dcofreq should be set to 1 if required pixel clock
+ * is greater than 1000MHz
+ */
+ pi->dcofreq = phy > 1000 * 100;
+ pi->regsd = ((pi->regm * clkin / 10) / (pi->regn * 250) + 5) / 10;
+
+ /* Set the reference clock to sysclk reference */
+ pi->refsel = HDMI_REFSEL_SYSCLK;
+
+ DSSDBG("M = %d Mf = %d\n", pi->regm, pi->regmf);
+ DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd);
+}
+
+
+static int hdmi_pll_config(struct hdmi_pll_data *pll)
+{
+ u32 r;
+ struct hdmi_pll_info *fmt = &pll->info;
+
+ /* PLL start always use manual mode */
+ REG_FLD_MOD(pll->base, PLLCTRL_PLL_CONTROL, 0x0, 0, 0);
+
+ r = hdmi_read_reg(pll->base, PLLCTRL_CFG1);
+ r = FLD_MOD(r, fmt->regm, 20, 9); /* CFG1_PLL_REGM */
+ r = FLD_MOD(r, fmt->regn - 1, 8, 1); /* CFG1_PLL_REGN */
+ hdmi_write_reg(pll->base, PLLCTRL_CFG1, r);
+
+ r = hdmi_read_reg(pll->base, PLLCTRL_CFG2);
+
+ r = FLD_MOD(r, 0x0, 12, 12); /* PLL_HIGHFREQ divide by 2 */
+ r = FLD_MOD(r, 0x1, 13, 13); /* PLL_REFEN */
+ r = FLD_MOD(r, 0x0, 14, 14); /* PHY_CLKINEN de-assert during locking */
+ r = FLD_MOD(r, fmt->refsel, 22, 21); /* REFSEL */
+
+ if (fmt->dcofreq) {
+ /* divider programming for frequency beyond 1000Mhz */
+ REG_FLD_MOD(pll->base, PLLCTRL_CFG3, fmt->regsd, 17, 10);
+ r = FLD_MOD(r, 0x4, 3, 1); /* 1000MHz and 2000MHz */
+ } else {
+ r = FLD_MOD(r, 0x2, 3, 1); /* 500MHz and 1000MHz */
+ }
+
+ hdmi_write_reg(pll->base, PLLCTRL_CFG2, r);
+
+ r = hdmi_read_reg(pll->base, PLLCTRL_CFG4);
+ r = FLD_MOD(r, fmt->regm2, 24, 18);
+ r = FLD_MOD(r, fmt->regmf, 17, 0);
+ hdmi_write_reg(pll->base, PLLCTRL_CFG4, r);
+
+ /* go now */
+ REG_FLD_MOD(pll->base, PLLCTRL_PLL_GO, 0x1, 0, 0);
+
+ /* wait for bit change */
+ if (hdmi_wait_for_bit_change(pll->base, PLLCTRL_PLL_GO,
+ 0, 0, 1) != 1) {
+ pr_err("PLL GO bit not set\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Wait till the lock bit is set in PLL status */
+ if (hdmi_wait_for_bit_change(pll->base,
+ PLLCTRL_PLL_STATUS, 1, 1, 1) != 1) {
+ pr_err("cannot lock PLL\n");
+ pr_err("CFG1 0x%x\n",
+ hdmi_read_reg(pll->base, PLLCTRL_CFG1));
+ pr_err("CFG2 0x%x\n",
+ hdmi_read_reg(pll->base, PLLCTRL_CFG2));
+ pr_err("CFG4 0x%x\n",
+ hdmi_read_reg(pll->base, PLLCTRL_CFG4));
+ return -ETIMEDOUT;
+ }
+
+ pr_debug("PLL locked!\n");
+
+ return 0;
+}
+
+static int hdmi_pll_reset(struct hdmi_pll_data *pll)
+{
+ /* SYSRESET controlled by power FSM */
+ REG_FLD_MOD(pll->base, PLLCTRL_PLL_CONTROL, 0x0, 3, 3);
+
+ /* READ 0x0 reset is in progress */
+ if (hdmi_wait_for_bit_change(pll->base, PLLCTRL_PLL_STATUS, 0, 0, 1)
+ != 1) {
+ pr_err("Failed to sysreset PLL\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int hdmi_pll_enable(struct hdmi_pll_data *pll, struct hdmi_wp_data *wp)
+{
+ u16 r = 0;
+
+ r = hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF);
+ if (r)
+ return r;
+
+ r = hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
+ if (r)
+ return r;
+
+ r = hdmi_pll_reset(pll);
+ if (r)
+ return r;
+
+ r = hdmi_pll_config(pll);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+void hdmi_pll_disable(struct hdmi_pll_data *pll, struct hdmi_wp_data *wp)
+{
+ hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF);
+}
+
+#define PLL_OFFSET 0x200
+#define PLL_SIZE 0x100
+
+int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll)
+{
+ struct resource *res;
+ struct resource temp_res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi_pllctrl");
+ if (!res) {
+ DSSDBG("can't get PLL mem resource by name\n");
+ /*
+ * if hwmod/DT doesn't have the memory resource information
+ * split into HDMI sub blocks by name, we try again by getting
+ * the platform's first resource. this code will be removed when
+ * the driver can get the mem resources by name
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DSSERR("can't get PLL mem resource\n");
+ return -EINVAL;
+ }
+
+ temp_res.start = res->start + PLL_OFFSET;
+ temp_res.end = temp_res.start + PLL_SIZE - 1;
+ res = &temp_res;
+ }
+
+ pll->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!pll->base) {
+ DSSERR("can't ioremap PLLCTRL\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
diff --git a/drivers/video/omap2/dss/hdmi_wp.c b/drivers/video/omap2/dss/hdmi_wp.c
new file mode 100644
index 000000000000..8151d8969a6e
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi_wp.c
@@ -0,0 +1,271 @@
+/*
+ * HDMI wrapper
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <video/omapdss.h>
+
+#include "dss.h"
+#include "hdmi.h"
+
+void hdmi_wp_dump(struct hdmi_wp_data *wp, struct seq_file *s)
+{
+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, hdmi_read_reg(wp->base, r))
+
+ DUMPREG(HDMI_WP_REVISION);
+ DUMPREG(HDMI_WP_SYSCONFIG);
+ DUMPREG(HDMI_WP_IRQSTATUS_RAW);
+ DUMPREG(HDMI_WP_IRQSTATUS);
+ DUMPREG(HDMI_WP_IRQENABLE_SET);
+ DUMPREG(HDMI_WP_IRQENABLE_CLR);
+ DUMPREG(HDMI_WP_IRQWAKEEN);
+ DUMPREG(HDMI_WP_PWR_CTRL);
+ DUMPREG(HDMI_WP_DEBOUNCE);
+ DUMPREG(HDMI_WP_VIDEO_CFG);
+ DUMPREG(HDMI_WP_VIDEO_SIZE);
+ DUMPREG(HDMI_WP_VIDEO_TIMING_H);
+ DUMPREG(HDMI_WP_VIDEO_TIMING_V);
+ DUMPREG(HDMI_WP_WP_CLK);
+ DUMPREG(HDMI_WP_AUDIO_CFG);
+ DUMPREG(HDMI_WP_AUDIO_CFG2);
+ DUMPREG(HDMI_WP_AUDIO_CTRL);
+ DUMPREG(HDMI_WP_AUDIO_DATA);
+}
+
+u32 hdmi_wp_get_irqstatus(struct hdmi_wp_data *wp)
+{
+ return hdmi_read_reg(wp->base, HDMI_WP_IRQSTATUS);
+}
+
+void hdmi_wp_set_irqstatus(struct hdmi_wp_data *wp, u32 irqstatus)
+{
+ hdmi_write_reg(wp->base, HDMI_WP_IRQSTATUS, irqstatus);
+ /* flush posted write */
+ hdmi_read_reg(wp->base, HDMI_WP_IRQSTATUS);
+}
+
+void hdmi_wp_set_irqenable(struct hdmi_wp_data *wp, u32 mask)
+{
+ hdmi_write_reg(wp->base, HDMI_WP_IRQENABLE_SET, mask);
+}
+
+void hdmi_wp_clear_irqenable(struct hdmi_wp_data *wp, u32 mask)
+{
+ hdmi_write_reg(wp->base, HDMI_WP_IRQENABLE_CLR, mask);
+}
+
+/* PHY_PWR_CMD */
+int hdmi_wp_set_phy_pwr(struct hdmi_wp_data *wp, enum hdmi_phy_pwr val)
+{
+ /* Return if already the state */
+ if (REG_GET(wp->base, HDMI_WP_PWR_CTRL, 5, 4) == val)
+ return 0;
+
+ /* Command for power control of HDMI PHY */
+ REG_FLD_MOD(wp->base, HDMI_WP_PWR_CTRL, val, 7, 6);
+
+ /* Status of the power control of HDMI PHY */
+ if (hdmi_wait_for_bit_change(wp->base, HDMI_WP_PWR_CTRL, 5, 4, val)
+ != val) {
+ pr_err("Failed to set PHY power mode to %d\n", val);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/* PLL_PWR_CMD */
+int hdmi_wp_set_pll_pwr(struct hdmi_wp_data *wp, enum hdmi_pll_pwr val)
+{
+ /* Command for power control of HDMI PLL */
+ REG_FLD_MOD(wp->base, HDMI_WP_PWR_CTRL, val, 3, 2);
+
+ /* wait till PHY_PWR_STATUS is set */
+ if (hdmi_wait_for_bit_change(wp->base, HDMI_WP_PWR_CTRL, 1, 0, val)
+ != val) {
+ pr_err("Failed to set PLL_PWR_STATUS\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int hdmi_wp_video_start(struct hdmi_wp_data *wp)
+{
+ REG_FLD_MOD(wp->base, HDMI_WP_VIDEO_CFG, true, 31, 31);
+
+ return 0;
+}
+
+void hdmi_wp_video_stop(struct hdmi_wp_data *wp)
+{
+ REG_FLD_MOD(wp->base, HDMI_WP_VIDEO_CFG, false, 31, 31);
+}
+
+void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
+ struct hdmi_video_format *video_fmt)
+{
+ u32 l = 0;
+
+ REG_FLD_MOD(wp->base, HDMI_WP_VIDEO_CFG, video_fmt->packing_mode,
+ 10, 8);
+
+ l |= FLD_VAL(video_fmt->y_res, 31, 16);
+ l |= FLD_VAL(video_fmt->x_res, 15, 0);
+ hdmi_write_reg(wp->base, HDMI_WP_VIDEO_SIZE, l);
+}
+
+void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
+ struct omap_video_timings *timings)
+{
+ u32 r;
+ bool vsync_pol, hsync_pol;
+ pr_debug("Enter hdmi_wp_video_config_interface\n");
+
+ vsync_pol = timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
+ hsync_pol = timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
+
+ r = hdmi_read_reg(wp->base, HDMI_WP_VIDEO_CFG);
+ r = FLD_MOD(r, vsync_pol, 7, 7);
+ r = FLD_MOD(r, hsync_pol, 6, 6);
+ r = FLD_MOD(r, timings->interlace, 3, 3);
+ r = FLD_MOD(r, 1, 1, 0); /* HDMI_TIMING_MASTER_24BIT */
+ hdmi_write_reg(wp->base, HDMI_WP_VIDEO_CFG, r);
+}
+
+void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
+ struct omap_video_timings *timings)
+{
+ u32 timing_h = 0;
+ u32 timing_v = 0;
+
+ pr_debug("Enter hdmi_wp_video_config_timing\n");
+
+ timing_h |= FLD_VAL(timings->hbp, 31, 20);
+ timing_h |= FLD_VAL(timings->hfp, 19, 8);
+ timing_h |= FLD_VAL(timings->hsw, 7, 0);
+ hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_H, timing_h);
+
+ timing_v |= FLD_VAL(timings->vbp, 31, 20);
+ timing_v |= FLD_VAL(timings->vfp, 19, 8);
+ timing_v |= FLD_VAL(timings->vsw, 7, 0);
+ hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_V, timing_v);
+}
+
+void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
+ struct omap_video_timings *timings, struct hdmi_config *param)
+{
+ pr_debug("Enter hdmi_wp_video_init_format\n");
+
+ video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
+ video_fmt->y_res = param->timings.y_res;
+ video_fmt->x_res = param->timings.x_res;
+
+ timings->hbp = param->timings.hbp;
+ timings->hfp = param->timings.hfp;
+ timings->hsw = param->timings.hsw;
+ timings->vbp = param->timings.vbp;
+ timings->vfp = param->timings.vfp;
+ timings->vsw = param->timings.vsw;
+ timings->vsync_level = param->timings.vsync_level;
+ timings->hsync_level = param->timings.hsync_level;
+ timings->interlace = param->timings.interlace;
+}
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+void hdmi_wp_audio_config_format(struct hdmi_wp_data *wp,
+ struct hdmi_audio_format *aud_fmt)
+{
+ u32 r;
+
+ DSSDBG("Enter hdmi_wp_audio_config_format\n");
+
+ r = hdmi_read_reg(wp->base, HDMI_WP_AUDIO_CFG);
+ r = FLD_MOD(r, aud_fmt->stereo_channels, 26, 24);
+ r = FLD_MOD(r, aud_fmt->active_chnnls_msk, 23, 16);
+ r = FLD_MOD(r, aud_fmt->en_sig_blk_strt_end, 5, 5);
+ r = FLD_MOD(r, aud_fmt->type, 4, 4);
+ r = FLD_MOD(r, aud_fmt->justification, 3, 3);
+ r = FLD_MOD(r, aud_fmt->sample_order, 2, 2);
+ r = FLD_MOD(r, aud_fmt->samples_per_word, 1, 1);
+ r = FLD_MOD(r, aud_fmt->sample_size, 0, 0);
+ hdmi_write_reg(wp->base, HDMI_WP_AUDIO_CFG, r);
+}
+
+void hdmi_wp_audio_config_dma(struct hdmi_wp_data *wp,
+ struct hdmi_audio_dma *aud_dma)
+{
+ u32 r;
+
+ DSSDBG("Enter hdmi_wp_audio_config_dma\n");
+
+ r = hdmi_read_reg(wp->base, HDMI_WP_AUDIO_CFG2);
+ r = FLD_MOD(r, aud_dma->transfer_size, 15, 8);
+ r = FLD_MOD(r, aud_dma->block_size, 7, 0);
+ hdmi_write_reg(wp->base, HDMI_WP_AUDIO_CFG2, r);
+
+ r = hdmi_read_reg(wp->base, HDMI_WP_AUDIO_CTRL);
+ r = FLD_MOD(r, aud_dma->mode, 9, 9);
+ r = FLD_MOD(r, aud_dma->fifo_threshold, 8, 0);
+ hdmi_write_reg(wp->base, HDMI_WP_AUDIO_CTRL, r);
+}
+
+int hdmi_wp_audio_enable(struct hdmi_wp_data *wp, bool enable)
+{
+ REG_FLD_MOD(wp->base, HDMI_WP_AUDIO_CTRL, enable, 31, 31);
+
+ return 0;
+}
+
+int hdmi_wp_audio_core_req_enable(struct hdmi_wp_data *wp, bool enable)
+{
+ REG_FLD_MOD(wp->base, HDMI_WP_AUDIO_CTRL, enable, 30, 30);
+
+ return 0;
+}
+#endif
+
+#define WP_SIZE 0x200
+
+int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp)
+{
+ struct resource *res;
+ struct resource temp_res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi_wp");
+ if (!res) {
+ DSSDBG("can't get WP mem resource by name\n");
+ /*
+ * if hwmod/DT doesn't have the memory resource information
+ * split into HDMI sub blocks by name, we try again by getting
+ * the platform's first resource. this code will be removed when
+ * the driver can get the mem resources by name
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DSSERR("can't get WP mem resource\n");
+ return -EINVAL;
+ }
+
+ temp_res.start = res->start;
+ temp_res.end = temp_res.start + WP_SIZE - 1;
+ res = &temp_res;
+ }
+
+ wp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!wp->base) {
+ DSSERR("can't ioremap HDMI WP\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
diff --git a/drivers/video/omap2/dss/ti_hdmi.h b/drivers/video/omap2/dss/ti_hdmi.h
deleted file mode 100644
index 45215f44617c..000000000000
--- a/drivers/video/omap2/dss/ti_hdmi.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * ti_hdmi.h
- *
- * HDMI driver definition for TI OMAP4, DM81xx, DM38xx Processor.
- *
- * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _TI_HDMI_H
-#define _TI_HDMI_H
-
-struct hdmi_ip_data;
-
-enum hdmi_pll_pwr {
- HDMI_PLLPWRCMD_ALLOFF = 0,
- HDMI_PLLPWRCMD_PLLONLY = 1,
- HDMI_PLLPWRCMD_BOTHON_ALLCLKS = 2,
- HDMI_PLLPWRCMD_BOTHON_NOPHYCLK = 3
-};
-
-enum hdmi_core_hdmi_dvi {
- HDMI_DVI = 0,
- HDMI_HDMI = 1
-};
-
-enum hdmi_clk_refsel {
- HDMI_REFSEL_PCLK = 0,
- HDMI_REFSEL_REF1 = 1,
- HDMI_REFSEL_REF2 = 2,
- HDMI_REFSEL_SYSCLK = 3
-};
-
-struct hdmi_cm {
- int code;
- int mode;
-};
-
-struct hdmi_config {
- struct omap_video_timings timings;
- struct hdmi_cm cm;
-};
-
-/* HDMI PLL structure */
-struct hdmi_pll_info {
- u16 regn;
- u16 regm;
- u32 regmf;
- u16 regm2;
- u16 regsd;
- u16 dcofreq;
- enum hdmi_clk_refsel refsel;
-};
-
-struct ti_hdmi_ip_ops {
-
- void (*video_configure)(struct hdmi_ip_data *ip_data);
-
- int (*phy_enable)(struct hdmi_ip_data *ip_data);
-
- void (*phy_disable)(struct hdmi_ip_data *ip_data);
-
- int (*read_edid)(struct hdmi_ip_data *ip_data, u8 *edid, int len);
-
- int (*pll_enable)(struct hdmi_ip_data *ip_data);
-
- void (*pll_disable)(struct hdmi_ip_data *ip_data);
-
- int (*video_enable)(struct hdmi_ip_data *ip_data);
-
- void (*video_disable)(struct hdmi_ip_data *ip_data);
-
- void (*dump_wrapper)(struct hdmi_ip_data *ip_data, struct seq_file *s);
-
- void (*dump_core)(struct hdmi_ip_data *ip_data, struct seq_file *s);
-
- void (*dump_pll)(struct hdmi_ip_data *ip_data, struct seq_file *s);
-
- void (*dump_phy)(struct hdmi_ip_data *ip_data, struct seq_file *s);
-
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
- int (*audio_enable)(struct hdmi_ip_data *ip_data);
-
- void (*audio_disable)(struct hdmi_ip_data *ip_data);
-
- int (*audio_start)(struct hdmi_ip_data *ip_data);
-
- void (*audio_stop)(struct hdmi_ip_data *ip_data);
-
- int (*audio_config)(struct hdmi_ip_data *ip_data,
- struct omap_dss_audio *audio);
-
- int (*audio_get_dma_port)(u32 *offset, u32 *size);
-#endif
-
-};
-
-/*
- * Refer to section 8.2 in HDMI 1.3 specification for
- * details about infoframe databytes
- */
-struct hdmi_core_infoframe_avi {
- /* Y0, Y1 rgb,yCbCr */
- u8 db1_format;
- /* A0 Active information Present */
- u8 db1_active_info;
- /* B0, B1 Bar info data valid */
- u8 db1_bar_info_dv;
- /* S0, S1 scan information */
- u8 db1_scan_info;
- /* C0, C1 colorimetry */
- u8 db2_colorimetry;
- /* M0, M1 Aspect ratio (4:3, 16:9) */
- u8 db2_aspect_ratio;
- /* R0...R3 Active format aspect ratio */
- u8 db2_active_fmt_ar;
- /* ITC IT content. */
- u8 db3_itc;
- /* EC0, EC1, EC2 Extended colorimetry */
- u8 db3_ec;
- /* Q1, Q0 Quantization range */
- u8 db3_q_range;
- /* SC1, SC0 Non-uniform picture scaling */
- u8 db3_nup_scaling;
- /* VIC0..6 Video format identification */
- u8 db4_videocode;
- /* PR0..PR3 Pixel repetition factor */
- u8 db5_pixel_repeat;
- /* Line number end of top bar */
- u16 db6_7_line_eoftop;
- /* Line number start of bottom bar */
- u16 db8_9_line_sofbottom;
- /* Pixel number end of left bar */
- u16 db10_11_pixel_eofleft;
- /* Pixel number start of right bar */
- u16 db12_13_pixel_sofright;
-};
-
-struct hdmi_ip_data {
- void __iomem *base_wp; /* HDMI wrapper */
- unsigned long core_sys_offset;
- unsigned long core_av_offset;
- unsigned long pll_offset;
- unsigned long phy_offset;
- int irq;
- const struct ti_hdmi_ip_ops *ops;
- struct hdmi_config cfg;
- struct hdmi_pll_info pll_data;
- struct hdmi_core_infoframe_avi avi_cfg;
-
- /* ti_hdmi_4xxx_ip private data. These should be in a separate struct */
- struct mutex lock;
-};
-int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data);
-int ti_hdmi_4xxx_read_edid(struct hdmi_ip_data *ip_data, u8 *edid, int len);
-int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data);
-int ti_hdmi_4xxx_pll_enable(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_wp_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
-void ti_hdmi_4xxx_pll_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
-void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
-void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
-int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts);
-int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data);
-int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data);
-int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
- struct omap_dss_audio *audio);
-int ti_hdmi_4xxx_audio_get_dma_port(u32 *offset, u32 *size);
-#endif
-#endif
diff --git a/drivers/video/p9100.c b/drivers/video/p9100.c
index 4b23af6e5c28..367cea8f43f3 100644
--- a/drivers/video/p9100.c
+++ b/drivers/video/p9100.c
@@ -339,8 +339,6 @@ static int p9100_remove(struct platform_device *op)
framebuffer_release(info);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index 3d86bac62d3e..4c9299576827 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -403,7 +403,7 @@ try_again:
if (rc < 0)
return rc;
- printk(KERN_INFO "fb%d: Apple Platinum frame buffer device\n", info->node);
+ fb_info(info, "Apple Platinum frame buffer device\n");
return 0;
}
@@ -639,7 +639,6 @@ static int platinumfb_probe(struct platform_device* odev)
iounmap(pinfo->frame_buffer);
iounmap(pinfo->platinum_regs);
iounmap(pinfo->cmap_regs);
- dev_set_drvdata(&odev->dev, NULL);
framebuffer_release(info);
}
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index 81354eeab021..3b85b647bc10 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -1694,8 +1694,8 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (retval < 0)
goto err_exit_all;
- printk(KERN_INFO "fb%d: %s frame buffer device, memory = %dK.\n",
- info->node, info->fix.id, pm2fb_fix.smem_len / 1024);
+ fb_info(info, "%s frame buffer device, memory = %dK\n",
+ info->fix.id, pm2fb_fix.smem_len / 1024);
/*
* Our driver data
@@ -1744,7 +1744,6 @@ static void pm2fb_remove(struct pci_dev *pdev)
iounmap(par->v_regs);
release_mem_region(fix->mmio_start, fix->mmio_len);
- pci_set_drvdata(pdev, NULL);
fb_dealloc_cmap(&info->cmap);
kfree(info->pixmap.addr);
framebuffer_release(info);
diff --git a/drivers/video/pm3fb.c b/drivers/video/pm3fb.c
index 7718faa4a73b..4bf3273d0433 100644
--- a/drivers/video/pm3fb.c
+++ b/drivers/video/pm3fb.c
@@ -1445,8 +1445,7 @@ static int pm3fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
retval = -EINVAL;
goto err_exit_all;
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
pci_set_drvdata(dev, info);
return 0;
@@ -1489,7 +1488,6 @@ static void pm3fb_remove(struct pci_dev *dev)
iounmap(par->v_regs);
release_mem_region(fix->mmio_start, fix->mmio_len);
- pci_set_drvdata(dev, NULL);
kfree(info->pixmap.addr);
framebuffer_release(info);
}
diff --git a/drivers/video/pmag-ba-fb.c b/drivers/video/pmag-ba-fb.c
index d1e46cedb1f7..914a52ba8477 100644
--- a/drivers/video/pmag-ba-fb.c
+++ b/drivers/video/pmag-ba-fb.c
@@ -212,8 +212,8 @@ static int pmagbafb_probe(struct device *dev)
get_device(dev);
- pr_info("fb%d: %s frame buffer device at %s\n",
- info->node, info->fix.id, dev_name(dev));
+ fb_info(info, "%s frame buffer device at %s\n",
+ info->fix.id, dev_name(dev));
return 0;
diff --git a/drivers/video/pmagb-b-fb.c b/drivers/video/pmagb-b-fb.c
index 0e1317400328..0822b6f8dddc 100644
--- a/drivers/video/pmagb-b-fb.c
+++ b/drivers/video/pmagb-b-fb.c
@@ -328,11 +328,10 @@ static int pmagbbfb_probe(struct device *dev)
snprintf(freq1, sizeof(freq1), "%u.%03uMHz",
par->osc1 / 1000, par->osc1 % 1000);
- pr_info("fb%d: %s frame buffer device at %s\n",
- info->node, info->fix.id, dev_name(dev));
- pr_info("fb%d: Osc0: %s, Osc1: %s, Osc%u selected\n",
- info->node, freq0, par->osc1 ? freq1 : "disabled",
- par->osc1 != 0);
+ fb_info(info, "%s frame buffer device at %s\n",
+ info->fix.id, dev_name(dev));
+ fb_info(info, "Osc0: %s, Osc1: %s, Osc%u selected\n",
+ freq0, par->osc1 ? freq1 : "disabled", par->osc1 != 0);
return 0;
diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c
index df07860563e6..167cffff3d4e 100644
--- a/drivers/video/pvr2fb.c
+++ b/drivers/video/pvr2fb.c
@@ -817,24 +817,25 @@ static int pvr2fb_common_init(void)
rev = fb_readl(par->mmio_base + 0x04);
- printk("fb%d: %s (rev %ld.%ld) frame buffer device, using %ldk/%ldk of video memory\n",
- fb_info->node, fb_info->fix.id, (rev >> 4) & 0x0f, rev & 0x0f,
- modememused >> 10, (unsigned long)(fb_info->fix.smem_len >> 10));
- printk("fb%d: Mode %dx%d-%d pitch = %ld cable: %s video output: %s\n",
- fb_info->node, fb_info->var.xres, fb_info->var.yres,
- fb_info->var.bits_per_pixel,
- get_line_length(fb_info->var.xres, fb_info->var.bits_per_pixel),
- (char *)pvr2_get_param(cables, NULL, cable_type, 3),
- (char *)pvr2_get_param(outputs, NULL, video_output, 3));
+ fb_info(fb_info, "%s (rev %ld.%ld) frame buffer device, using %ldk/%ldk of video memory\n",
+ fb_info->fix.id, (rev >> 4) & 0x0f, rev & 0x0f,
+ modememused >> 10,
+ (unsigned long)(fb_info->fix.smem_len >> 10));
+ fb_info(fb_info, "Mode %dx%d-%d pitch = %ld cable: %s video output: %s\n",
+ fb_info->var.xres, fb_info->var.yres,
+ fb_info->var.bits_per_pixel,
+ get_line_length(fb_info->var.xres, fb_info->var.bits_per_pixel),
+ (char *)pvr2_get_param(cables, NULL, cable_type, 3),
+ (char *)pvr2_get_param(outputs, NULL, video_output, 3));
#ifdef CONFIG_SH_STORE_QUEUES
- printk(KERN_NOTICE "fb%d: registering with SQ API\n", fb_info->node);
+ fb_notice(fb_info, "registering with SQ API\n");
pvr2fb_map = sq_remap(fb_info->fix.smem_start, fb_info->fix.smem_len,
fb_info->fix.id, PAGE_SHARED);
- printk(KERN_NOTICE "fb%d: Mapped video memory to SQ addr 0x%lx\n",
- fb_info->node, pvr2fb_map);
+ fb_notice(fb_info, "Mapped video memory to SQ addr 0x%lx\n",
+ pvr2fb_map);
#endif
return 0;
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index aa9bd1f76d60..c95b9e46d48f 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -364,7 +364,7 @@ static void set_graphics_start(struct fb_info *info, int xoffset, int yoffset)
static void set_dumb_panel_control(struct fb_info *info)
{
struct pxa168fb_info *fbi = info->par;
- struct pxa168fb_mach_info *mi = fbi->dev->platform_data;
+ struct pxa168fb_mach_info *mi = dev_get_platdata(fbi->dev);
u32 x;
/*
@@ -407,7 +407,7 @@ static int pxa168fb_set_par(struct fb_info *info)
u32 x;
struct pxa168fb_mach_info *mi;
- mi = fbi->dev->platform_data;
+ mi = dev_get_platdata(fbi->dev);
/*
* Set additional mode info.
@@ -609,7 +609,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
struct clk *clk;
int irq, ret;
- mi = pdev->dev.platform_data;
+ mi = dev_get_platdata(&pdev->dev);
if (mi == NULL) {
dev_err(&pdev->dev, "no platform data defined\n");
return -EINVAL;
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index eca2de45f7a6..1ecd9cec2921 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -457,7 +457,7 @@ static int pxafb_adjust_timing(struct pxafb_info *fbi,
static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct pxafb_info *fbi = (struct pxafb_info *)info;
- struct pxafb_mach_info *inf = fbi->dev->platform_data;
+ struct pxafb_mach_info *inf = dev_get_platdata(fbi->dev);
int err;
if (inf->fixed_modes) {
@@ -1230,7 +1230,7 @@ static unsigned int __smart_timing(unsigned time_ns, unsigned long lcd_clk)
static void setup_smart_timing(struct pxafb_info *fbi,
struct fb_var_screeninfo *var)
{
- struct pxafb_mach_info *inf = fbi->dev->platform_data;
+ struct pxafb_mach_info *inf = dev_get_platdata(fbi->dev);
struct pxafb_mode_info *mode = &inf->modes[0];
unsigned long lclk = clk_get_rate(fbi->clk);
unsigned t1, t2, t3, t4;
@@ -1258,14 +1258,14 @@ static void setup_smart_timing(struct pxafb_info *fbi,
static int pxafb_smart_thread(void *arg)
{
struct pxafb_info *fbi = arg;
- struct pxafb_mach_info *inf = fbi->dev->platform_data;
+ struct pxafb_mach_info *inf = dev_get_platdata(fbi->dev);
if (!inf->smart_update) {
pr_err("%s: not properly initialized, thread terminated\n",
__func__);
return -EINVAL;
}
- inf = fbi->dev->platform_data;
+ inf = dev_get_platdata(fbi->dev);
pr_debug("%s(): task starting\n", __func__);
@@ -1793,7 +1793,7 @@ static struct pxafb_info *pxafb_init_fbinfo(struct device *dev)
{
struct pxafb_info *fbi;
void *addr;
- struct pxafb_mach_info *inf = dev->platform_data;
+ struct pxafb_mach_info *inf = dev_get_platdata(dev);
/* Alloc the pxafb_info and pseudo_palette in one step */
fbi = kmalloc(sizeof(struct pxafb_info) + sizeof(u32) * 16, GFP_KERNEL);
@@ -1855,7 +1855,7 @@ static struct pxafb_info *pxafb_init_fbinfo(struct device *dev)
#ifdef CONFIG_FB_PXA_PARAMETERS
static int parse_opt_mode(struct device *dev, const char *this_opt)
{
- struct pxafb_mach_info *inf = dev->platform_data;
+ struct pxafb_mach_info *inf = dev_get_platdata(dev);
const char *name = this_opt+5;
unsigned int namelen = strlen(name);
@@ -1914,7 +1914,7 @@ done:
static int parse_opt(struct device *dev, char *this_opt)
{
- struct pxafb_mach_info *inf = dev->platform_data;
+ struct pxafb_mach_info *inf = dev_get_platdata(dev);
struct pxafb_mode_info *mode = &inf->modes[0];
char s[64];
@@ -2102,7 +2102,7 @@ static int pxafb_probe(struct platform_device *dev)
dev_dbg(&dev->dev, "pxafb_probe\n");
- inf = dev->dev.platform_data;
+ inf = dev_get_platdata(&dev->dev);
ret = -ENOMEM;
fbi = NULL;
if (!inf)
diff --git a/drivers/video/q40fb.c b/drivers/video/q40fb.c
index d44c7351de0f..7487f76f6275 100644
--- a/drivers/video/q40fb.c
+++ b/drivers/video/q40fb.c
@@ -119,8 +119,7 @@ static int q40fb_probe(struct platform_device *dev)
return -EINVAL;
}
- printk(KERN_INFO "fb%d: Q40 frame buffer alive and kicking !\n",
- info->node);
+ fb_info(info, "Q40 frame buffer alive and kicking !\n");
return 0;
}
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index 9536715b5a1b..a5514acd2ac6 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -1185,11 +1185,6 @@ static int rivafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
if (rivafb_do_maximize(info, var, nom, den) < 0)
return -EINVAL;
- if (var->xoffset < 0)
- var->xoffset = 0;
- if (var->yoffset < 0)
- var->yoffset = 0;
-
/* truncate xoffset and yoffset to maximum if too high */
if (var->xoffset > var->xres_virtual - var->xres)
var->xoffset = var->xres_virtual - var->xres - 1;
diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
index 05c2dc3d4bc0..83433cb0dfba 100644
--- a/drivers/video/s1d13xxxfb.c
+++ b/drivers/video/s1d13xxxfb.c
@@ -777,8 +777,8 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
printk(KERN_INFO "Epson S1D13XXX FB Driver\n");
/* enable platform-dependent hardware glue, if any */
- if (pdev->dev.platform_data)
- pdata = pdev->dev.platform_data;
+ if (dev_get_platdata(&pdev->dev))
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->platform_init_video)
pdata->platform_init_video();
@@ -901,8 +901,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
goto bail;
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
@@ -923,8 +922,8 @@ static int s1d13xxxfb_suspend(struct platform_device *dev, pm_message_t state)
lcd_enable(s1dfb, 0);
crt_enable(s1dfb, 0);
- if (dev->dev.platform_data)
- pdata = dev->dev.platform_data;
+ if (dev_get_platdata(&dev->dev))
+ pdata = dev_get_platdata(&dev->dev);
#if 0
if (!s1dfb->disp_save)
@@ -973,8 +972,8 @@ static int s1d13xxxfb_resume(struct platform_device *dev)
while ((s1d13xxxfb_readreg(s1dfb, S1DREG_PS_STATUS) & 0x01))
udelay(10);
- if (dev->dev.platform_data)
- pdata = dev->dev.platform_data;
+ if (dev_get_platdata(&dev->dev))
+ pdata = dev_get_platdata(&dev->dev);
if (s1dfb->regs_save) {
/* will write RO regs, *should* get away with it :) */
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 2e7991c7ca08..62acae2694a9 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -1378,7 +1378,7 @@ static int s3c_fb_probe(struct platform_device *pdev)
return -EINVAL;
}
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
if (!pd) {
dev_err(dev, "no platform data specified\n");
return -EINVAL;
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index 21a32adbb8ea..81af5a63e9e1 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -123,7 +123,7 @@ static int s3c2410fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct s3c2410fb_info *fbi = info->par;
- struct s3c2410fb_mach_info *mach_info = fbi->dev->platform_data;
+ struct s3c2410fb_mach_info *mach_info = dev_get_platdata(fbi->dev);
struct s3c2410fb_display *display = NULL;
struct s3c2410fb_display *default_display = mach_info->displays +
mach_info->default_display;
@@ -686,7 +686,7 @@ static inline void modify_gpio(void __iomem *reg,
static int s3c2410fb_init_registers(struct fb_info *info)
{
struct s3c2410fb_info *fbi = info->par;
- struct s3c2410fb_mach_info *mach_info = fbi->dev->platform_data;
+ struct s3c2410fb_mach_info *mach_info = dev_get_platdata(fbi->dev);
unsigned long flags;
void __iomem *regs = fbi->io;
void __iomem *tpal;
@@ -833,7 +833,7 @@ static int s3c24xxfb_probe(struct platform_device *pdev,
int size;
u32 lcdcon1;
- mach_info = pdev->dev.platform_data;
+ mach_info = dev_get_platdata(&pdev->dev);
if (mach_info == NULL) {
dev_err(&pdev->dev,
"no platform data for lcd, cannot attach\n");
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index d838ba829459..968b2997175a 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -306,8 +306,8 @@ static void s3fb_settile_fast(struct fb_info *info, struct fb_tilemap *map)
if ((map->width != 8) || (map->height != 16) ||
(map->depth != 1) || (map->length != 256)) {
- printk(KERN_ERR "fb%d: unsupported font parameters: width %d, height %d, depth %d, length %d\n",
- info->node, map->width, map->height, map->depth, map->length);
+ fb_err(info, "unsupported font parameters: width %d, height %d, depth %d, length %d\n",
+ map->width, map->height, map->depth, map->length);
return;
}
@@ -476,7 +476,7 @@ static void s3_set_pixclock(struct fb_info *info, u32 pixclock)
rv = svga_compute_pll((par->chip == CHIP_365_TRIO3D) ? &s3_trio3d_pll : &s3_pll,
1000000000 / pixclock, &m, &n, &r, info->node);
if (rv < 0) {
- printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
+ fb_err(info, "cannot set requested pixclock, keeping old value\n");
return;
}
@@ -569,7 +569,7 @@ static int s3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
rv = -EINVAL;
if (rv < 0) {
- printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
+ fb_err(info, "unsupported mode requested\n");
return rv;
}
@@ -587,22 +587,21 @@ static int s3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
/* Check whether have enough memory */
mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
if (mem > info->screen_size) {
- printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n",
- info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
+ fb_err(info, "not enough framebuffer memory (%d kB requested , %u kB available)\n",
+ mem >> 10, (unsigned int) (info->screen_size >> 10));
return -EINVAL;
}
rv = svga_check_timings (&s3_timing_regs, var, info->node);
if (rv < 0) {
- printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
+ fb_err(info, "invalid timings requested\n");
return rv;
}
rv = svga_compute_pll(&s3_pll, PICOS2KHZ(var->pixclock), &m, &n, &r,
info->node);
if (rv < 0) {
- printk(KERN_ERR "fb%d: invalid pixclock value requested\n",
- info->node);
+ fb_err(info, "invalid pixclock value requested\n");
return rv;
}
@@ -686,7 +685,7 @@ static int s3fb_set_par(struct fb_info *info)
/* Set the offset register */
- pr_debug("fb%d: offset register : %d\n", info->node, offset_value);
+ fb_dbg(info, "offset register : %d\n", offset_value);
svga_wcrt_multi(par->state.vgabase, s3_offset_regs, offset_value);
if (par->chip != CHIP_357_VIRGE_GX2 &&
@@ -769,7 +768,7 @@ static int s3fb_set_par(struct fb_info *info)
/* Set mode-specific register values */
switch (mode) {
case 0:
- pr_debug("fb%d: text mode\n", info->node);
+ fb_dbg(info, "text mode\n");
svga_set_textmode_vga_regs(par->state.vgabase);
/* Set additional registers like in 8-bit mode */
@@ -780,12 +779,12 @@ static int s3fb_set_par(struct fb_info *info)
svga_wcrt_mask(par->state.vgabase, 0x3A, 0x00, 0x30);
if (fasttext) {
- pr_debug("fb%d: high speed text mode set\n", info->node);
+ fb_dbg(info, "high speed text mode set\n");
svga_wcrt_mask(par->state.vgabase, 0x31, 0x40, 0x40);
}
break;
case 1:
- pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
+ fb_dbg(info, "4 bit pseudocolor\n");
vga_wgfx(par->state.vgabase, VGA_GFX_MODE, 0x40);
/* Set additional registers like in 8-bit mode */
@@ -796,7 +795,7 @@ static int s3fb_set_par(struct fb_info *info)
svga_wcrt_mask(par->state.vgabase, 0x3A, 0x00, 0x30);
break;
case 2:
- pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
+ fb_dbg(info, "4 bit pseudocolor, planar\n");
/* Set additional registers like in 8-bit mode */
svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30);
@@ -806,7 +805,7 @@ static int s3fb_set_par(struct fb_info *info)
svga_wcrt_mask(par->state.vgabase, 0x3A, 0x00, 0x30);
break;
case 3:
- pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
+ fb_dbg(info, "8 bit pseudocolor\n");
svga_wcrt_mask(par->state.vgabase, 0x50, 0x00, 0x30);
if (info->var.pixclock > 20000 ||
par->chip == CHIP_357_VIRGE_GX2 ||
@@ -822,7 +821,7 @@ static int s3fb_set_par(struct fb_info *info)
}
break;
case 4:
- pr_debug("fb%d: 5/5/5 truecolor\n", info->node);
+ fb_dbg(info, "5/5/5 truecolor\n");
if (par->chip == CHIP_988_VIRGE_VX) {
if (info->var.pixclock > 20000)
svga_wcrt_mask(par->state.vgabase, 0x67, 0x20, 0xF0);
@@ -850,7 +849,7 @@ static int s3fb_set_par(struct fb_info *info)
}
break;
case 5:
- pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
+ fb_dbg(info, "5/6/5 truecolor\n");
if (par->chip == CHIP_988_VIRGE_VX) {
if (info->var.pixclock > 20000)
svga_wcrt_mask(par->state.vgabase, 0x67, 0x40, 0xF0);
@@ -879,16 +878,16 @@ static int s3fb_set_par(struct fb_info *info)
break;
case 6:
/* VIRGE VX case */
- pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
+ fb_dbg(info, "8/8/8 truecolor\n");
svga_wcrt_mask(par->state.vgabase, 0x67, 0xD0, 0xF0);
break;
case 7:
- pr_debug("fb%d: 8/8/8/8 truecolor\n", info->node);
+ fb_dbg(info, "8/8/8/8 truecolor\n");
svga_wcrt_mask(par->state.vgabase, 0x50, 0x30, 0x30);
svga_wcrt_mask(par->state.vgabase, 0x67, 0xD0, 0xF0);
break;
default:
- printk(KERN_ERR "fb%d: unsupported mode - bug\n", info->node);
+ fb_err(info, "unsupported mode - bug\n");
return -EINVAL;
}
@@ -991,27 +990,27 @@ static int s3fb_blank(int blank_mode, struct fb_info *info)
switch (blank_mode) {
case FB_BLANK_UNBLANK:
- pr_debug("fb%d: unblank\n", info->node);
+ fb_dbg(info, "unblank\n");
svga_wcrt_mask(par->state.vgabase, 0x56, 0x00, 0x06);
svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
break;
case FB_BLANK_NORMAL:
- pr_debug("fb%d: blank\n", info->node);
+ fb_dbg(info, "blank\n");
svga_wcrt_mask(par->state.vgabase, 0x56, 0x00, 0x06);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
case FB_BLANK_HSYNC_SUSPEND:
- pr_debug("fb%d: hsync\n", info->node);
+ fb_dbg(info, "hsync\n");
svga_wcrt_mask(par->state.vgabase, 0x56, 0x02, 0x06);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
case FB_BLANK_VSYNC_SUSPEND:
- pr_debug("fb%d: vsync\n", info->node);
+ fb_dbg(info, "vsync\n");
svga_wcrt_mask(par->state.vgabase, 0x56, 0x04, 0x06);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
case FB_BLANK_POWERDOWN:
- pr_debug("fb%d: sync down\n", info->node);
+ fb_dbg(info, "sync down\n");
svga_wcrt_mask(par->state.vgabase, 0x56, 0x06, 0x06);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
@@ -1352,13 +1351,16 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto err_reg_fb;
}
- printk(KERN_INFO "fb%d: %s on %s, %d MB RAM, %d MHz MCLK\n", info->node, info->fix.id,
- pci_name(dev), info->fix.smem_len >> 20, (par->mclk_freq + 500) / 1000);
+ fb_info(info, "%s on %s, %d MB RAM, %d MHz MCLK\n",
+ info->fix.id, pci_name(dev),
+ info->fix.smem_len >> 20, (par->mclk_freq + 500) / 1000);
if (par->chip == CHIP_UNKNOWN)
- printk(KERN_INFO "fb%d: unknown chip, CR2D=%x, CR2E=%x, CRT2F=%x, CRT30=%x\n",
- info->node, vga_rcrt(par->state.vgabase, 0x2d), vga_rcrt(par->state.vgabase, 0x2e),
- vga_rcrt(par->state.vgabase, 0x2f), vga_rcrt(par->state.vgabase, 0x30));
+ fb_info(info, "unknown chip, CR2D=%x, CR2E=%x, CRT2F=%x, CRT30=%x\n",
+ vga_rcrt(par->state.vgabase, 0x2d),
+ vga_rcrt(par->state.vgabase, 0x2e),
+ vga_rcrt(par->state.vgabase, 0x2f),
+ vga_rcrt(par->state.vgabase, 0x30));
/* Record a reference to the driver data */
pci_set_drvdata(dev, info);
@@ -1424,7 +1426,6 @@ static void s3_pci_remove(struct pci_dev *dev)
pci_release_regions(dev);
/* pci_disable_device(dev); */
- pci_set_drvdata(dev, NULL);
framebuffer_release(info);
}
}
diff --git a/drivers/video/sa1100fb.c b/drivers/video/sa1100fb.c
index de76da0c6429..580c444ec301 100644
--- a/drivers/video/sa1100fb.c
+++ b/drivers/video/sa1100fb.c
@@ -1116,7 +1116,7 @@ static struct fb_monspecs monspecs = {
static struct sa1100fb_info *sa1100fb_init_fbinfo(struct device *dev)
{
- struct sa1100fb_mach_info *inf = dev->platform_data;
+ struct sa1100fb_mach_info *inf = dev_get_platdata(dev);
struct sa1100fb_info *fbi;
unsigned i;
@@ -1201,7 +1201,7 @@ static int sa1100fb_probe(struct platform_device *pdev)
struct resource *res;
int ret, irq;
- if (!pdev->dev.platform_data) {
+ if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "no platform LCD data\n");
return -EINVAL;
}
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index 741b2395d01e..4dbf45f3b21a 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -2362,12 +2362,6 @@ static void savagefb_remove(struct pci_dev *dev)
kfree(info->pixmap.addr);
pci_release_regions(dev);
framebuffer_release(info);
-
- /*
- * Ensure that the driver data is no longer
- * valid.
- */
- pci_set_drvdata(dev, NULL);
}
}
diff --git a/drivers/video/sbuslib.c b/drivers/video/sbuslib.c
index 296afae442f4..a350209ffbd3 100644
--- a/drivers/video/sbuslib.c
+++ b/drivers/video/sbuslib.c
@@ -186,7 +186,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
}
default:
return -EINVAL;
- };
+ }
}
EXPORT_SYMBOL(sbusfb_ioctl_helper);
diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c
index a9ac3ce2d0e9..bc74d0408998 100644
--- a/drivers/video/sgivwfb.c
+++ b/drivers/video/sgivwfb.c
@@ -803,8 +803,8 @@ static int sgivwfb_probe(struct platform_device *dev)
platform_set_drvdata(dev, info);
- printk(KERN_INFO "fb%d: SGI DBE frame buffer device, using %ldK of video memory at %#lx\n",
- info->node, sgivwfb_mem_size >> 10, sgivwfb_mem_phys);
+ fb_info(info, "SGI DBE frame buffer device, using %ldK of video memory at %#lx\n",
+ sgivwfb_mem_size >> 10, sgivwfb_mem_phys);
return 0;
fail_register_framebuffer:
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index bfe4728480fd..5e2845b9f3a8 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -498,7 +498,7 @@ static void sh_hdmi_video_config(struct sh_hdmi *hdmi)
static void sh_hdmi_audio_config(struct sh_hdmi *hdmi)
{
u8 data;
- struct sh_mobile_hdmi_info *pdata = hdmi->dev->platform_data;
+ struct sh_mobile_hdmi_info *pdata = dev_get_platdata(hdmi->dev);
/*
* [7:4] L/R data swap control
@@ -815,7 +815,7 @@ static unsigned long sh_hdmi_rate_error(struct sh_hdmi *hdmi,
unsigned long *hdmi_rate, unsigned long *parent_rate)
{
unsigned long target = PICOS2KHZ(mode->pixclock) * 1000, rate_error;
- struct sh_mobile_hdmi_info *pdata = hdmi->dev->platform_data;
+ struct sh_mobile_hdmi_info *pdata = dev_get_platdata(hdmi->dev);
*hdmi_rate = clk_round_rate(hdmi->hdmi_clk, target);
if ((long)*hdmi_rate < 0)
@@ -1271,7 +1271,7 @@ static void sh_hdmi_htop1_init(struct sh_hdmi *hdmi)
static int __init sh_hdmi_probe(struct platform_device *pdev)
{
- struct sh_mobile_hdmi_info *pdata = pdev->dev.platform_data;
+ struct sh_mobile_hdmi_info *pdata = dev_get_platdata(&pdev->dev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct resource *htop1_res;
int irq = platform_get_irq(pdev, 0), ret;
@@ -1290,7 +1290,7 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
}
}
- hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
+ hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi) {
dev_err(&pdev->dev, "Cannot allocate device data\n");
return -ENOMEM;
@@ -1304,7 +1304,7 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
if (IS_ERR(hdmi->hdmi_clk)) {
ret = PTR_ERR(hdmi->hdmi_clk);
dev_err(&pdev->dev, "Unable to get clock: %d\n", ret);
- goto egetclk;
+ return ret;
}
/* select register access functions */
@@ -1407,8 +1407,6 @@ ereqreg:
clk_disable(hdmi->hdmi_clk);
erate:
clk_put(hdmi->hdmi_clk);
-egetclk:
- kfree(hdmi);
return ret;
}
@@ -1433,7 +1431,6 @@ static int __exit sh_hdmi_remove(struct platform_device *pdev)
iounmap(hdmi->htop1);
iounmap(hdmi->base);
release_mem_region(res->start, resource_size(res));
- kfree(hdmi);
return 0;
}
diff --git a/drivers/video/simplefb.c b/drivers/video/simplefb.c
index 8d7810613058..210f3a02121a 100644
--- a/drivers/video/simplefb.c
+++ b/drivers/video/simplefb.c
@@ -66,8 +66,15 @@ static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
return 0;
}
+static void simplefb_destroy(struct fb_info *info)
+{
+ if (info->screen_base)
+ iounmap(info->screen_base);
+}
+
static struct fb_ops simplefb_ops = {
.owner = THIS_MODULE,
+ .fb_destroy = simplefb_destroy,
.fb_setcolreg = simplefb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
@@ -132,7 +139,7 @@ static int simplefb_parse_dt(struct platform_device *pdev,
static int simplefb_parse_pd(struct platform_device *pdev,
struct simplefb_params *params)
{
- struct simplefb_platform_data *pd = pdev->dev.platform_data;
+ struct simplefb_platform_data *pd = dev_get_platdata(&pdev->dev);
int i;
params->width = pd->width;
@@ -167,7 +174,7 @@ static int simplefb_probe(struct platform_device *pdev)
return -ENODEV;
ret = -ENODEV;
- if (pdev->dev.platform_data)
+ if (dev_get_platdata(&pdev->dev))
ret = simplefb_parse_pd(pdev, &params);
else if (pdev->dev.of_node)
ret = simplefb_parse_dt(pdev, &params);
@@ -212,17 +219,26 @@ static int simplefb_probe(struct platform_device *pdev)
info->fbops = &simplefb_ops;
info->flags = FBINFO_DEFAULT | FBINFO_MISC_FIRMWARE;
- info->screen_base = devm_ioremap(&pdev->dev, info->fix.smem_start,
- info->fix.smem_len);
+ info->screen_base = ioremap_wc(info->fix.smem_start,
+ info->fix.smem_len);
if (!info->screen_base) {
framebuffer_release(info);
return -ENODEV;
}
info->pseudo_palette = (void *)(info + 1);
+ dev_info(&pdev->dev, "framebuffer at 0x%lx, 0x%x bytes, mapped to 0x%p\n",
+ info->fix.smem_start, info->fix.smem_len,
+ info->screen_base);
+ dev_info(&pdev->dev, "format=%s, mode=%dx%dx%d, linelength=%d\n",
+ params.format->name,
+ info->var.xres, info->var.yres,
+ info->var.bits_per_pixel, info->fix.line_length);
+
ret = register_framebuffer(info);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret);
+ iounmap(info->screen_base);
framebuffer_release(info);
return ret;
}
diff --git a/drivers/video/sis/init.c b/drivers/video/sis/init.c
index f082ae55c0c9..4f26bc28e60b 100644
--- a/drivers/video/sis/init.c
+++ b/drivers/video/sis/init.c
@@ -3320,9 +3320,8 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
}
#ifndef GETBITSTR
-#define BITMASK(h,l) (((unsigned)(1U << ((h)-(l)+1))-1)<<(l))
-#define GENMASK(mask) BITMASK(1?mask,0?mask)
-#define GETBITS(var,mask) (((var) & GENMASK(mask)) >> (0?mask))
+#define GENBITSMASK(mask) GENMASK(1?mask,0?mask)
+#define GETBITS(var,mask) (((var) & GENBITSMASK(mask)) >> (0?mask))
#define GETBITSTR(val,from,to) ((GETBITS(val,from)) << (0?to))
#endif
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 977e27927a21..22ad028bf123 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -5994,7 +5994,6 @@ static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if(!ivideo->sisvga_enabled) {
if(pci_enable_device(pdev)) {
if(ivideo->nbridge) pci_dev_put(ivideo->nbridge);
- pci_set_drvdata(pdev, NULL);
framebuffer_release(sis_fb_info);
return -EIO;
}
@@ -6211,7 +6210,6 @@ error_3: vfree(ivideo->bios_abase);
pci_dev_put(ivideo->lpcdev);
if(ivideo->nbridge)
pci_dev_put(ivideo->nbridge);
- pci_set_drvdata(pdev, NULL);
if(!ivideo->sisvga_enabled)
pci_disable_device(pdev);
framebuffer_release(sis_fb_info);
@@ -6480,8 +6478,8 @@ error_3: vfree(ivideo->bios_abase);
"disabled");
- printk(KERN_INFO "fb%d: %s frame buffer device version %d.%d.%d\n",
- sis_fb_info->node, ivideo->myid, VER_MAJOR, VER_MINOR, VER_LEVEL);
+ fb_info(sis_fb_info, "%s frame buffer device version %d.%d.%d\n",
+ ivideo->myid, VER_MAJOR, VER_MINOR, VER_LEVEL);
printk(KERN_INFO "sisfb: Copyright (C) 2001-2005 Thomas Winischhofer\n");
@@ -6523,8 +6521,6 @@ static void sisfb_remove(struct pci_dev *pdev)
mtrr_del(ivideo->mtrr, ivideo->video_base, ivideo->video_size);
#endif
- pci_set_drvdata(pdev, NULL);
-
/* If device was disabled when starting, disable
* it when quitting.
*/
diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c
index 2d4694c6b9e0..fefde7c6add7 100644
--- a/drivers/video/skeletonfb.c
+++ b/drivers/video/skeletonfb.c
@@ -824,8 +824,7 @@ static int xxxfb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
fb_dealloc_cmap(&info->cmap);
return -EINVAL;
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
pci_set_drvdata(dev, info); /* or platform_set_drvdata(pdev, info) */
return 0;
}
diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
index e188ada2ffd1..d513ed6a49f2 100644
--- a/drivers/video/smscufx.c
+++ b/drivers/video/smscufx.c
@@ -1147,7 +1147,7 @@ static void ufx_free_framebuffer_work(struct work_struct *work)
fb_destroy_modelist(&info->modelist);
- dev->info = 0;
+ dev->info = NULL;
/* Assume info structure is freed after this point */
framebuffer_release(info);
diff --git a/drivers/video/ssd1307fb.c b/drivers/video/ssd1307fb.c
index 44967c8fef2b..f4daa59f0a80 100644
--- a/drivers/video/ssd1307fb.c
+++ b/drivers/video/ssd1307fb.c
@@ -569,7 +569,7 @@ static struct i2c_driver ssd1307fb_driver = {
.id_table = ssd1307fb_i2c_id,
.driver = {
.name = "ssd1307fb",
- .of_match_table = of_match_ptr(ssd1307fb_of_match),
+ .of_match_table = ssd1307fb_of_match,
.owner = THIS_MODULE,
},
};
diff --git a/drivers/video/sstfb.c b/drivers/video/sstfb.c
index 9c00026e3ae2..f0cb279ef333 100644
--- a/drivers/video/sstfb.c
+++ b/drivers/video/sstfb.c
@@ -706,10 +706,10 @@ static void sstfb_setvgapass( struct fb_info *info, int enable )
fbiinit0 = sst_read (FBIINIT0);
if (par->vgapass) {
sst_write(FBIINIT0, fbiinit0 & ~DIS_VGA_PASSTHROUGH);
- printk(KERN_INFO "fb%d: Enabling VGA pass-through\n", info->node );
+ fb_info(info, "Enabling VGA pass-through\n");
} else {
sst_write(FBIINIT0, fbiinit0 | DIS_VGA_PASSTHROUGH);
- printk(KERN_INFO "fb%d: Disabling VGA pass-through\n", info->node );
+ fb_info(info, "Disabling VGA pass-through\n");
}
pci_write_config_dword(sst_dev, PCI_INIT_ENABLE, tmp);
}
@@ -1437,8 +1437,8 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(KERN_WARNING "sstfb: can't create sysfs entry.\n");
- printk(KERN_INFO "fb%d: %s frame buffer device at 0x%p\n",
- info->node, fix->id, info->screen_base);
+ fb_info(info, "%s frame buffer device at 0x%p\n",
+ fix->id, info->screen_base);
return 0;
diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c
index 876648e15e9d..a943a7cbaf7f 100644
--- a/drivers/video/stifb.c
+++ b/drivers/video/stifb.c
@@ -1283,9 +1283,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
sti->info = info; /* save for unregister_framebuffer() */
- printk(KERN_INFO
- "fb%d: %s %dx%d-%d frame buffer device, %s, id: %04x, mmio: 0x%04lx\n",
- fb->info.node,
+ fb_info(&fb->info, "%s %dx%d-%d frame buffer device, %s, id: %04x, mmio: 0x%04lx\n",
fix->id,
var->xres,
var->yres,
diff --git a/drivers/video/sunxvr1000.c b/drivers/video/sunxvr1000.c
index cc6f48bba36b..58241b47a96d 100644
--- a/drivers/video/sunxvr1000.c
+++ b/drivers/video/sunxvr1000.c
@@ -186,8 +186,6 @@ static int gfb_remove(struct platform_device *op)
framebuffer_release(info);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/video/svgalib.c b/drivers/video/svgalib.c
index 33df9ec91795..9e01322fabe3 100644
--- a/drivers/video/svgalib.c
+++ b/drivers/video/svgalib.c
@@ -198,8 +198,8 @@ void svga_settile(struct fb_info *info, struct fb_tilemap *map)
if ((map->width != 8) || (map->height != 16) ||
(map->depth != 1) || (map->length != 256)) {
- printk(KERN_ERR "fb%d: unsupported font parameters: width %d, height %d, depth %d, length %d\n",
- info->node, map->width, map->height, map->depth, map->length);
+ fb_err(info, "unsupported font parameters: width %d, height %d, depth %d, length %d\n",
+ map->width, map->height, map->depth, map->length);
return;
}
diff --git a/drivers/video/sysimgblt.c b/drivers/video/sysimgblt.c
index 186c6f607be2..a4d05b1b17d7 100644
--- a/drivers/video/sysimgblt.c
+++ b/drivers/video/sysimgblt.c
@@ -152,7 +152,7 @@ static void slow_imageblit(const struct fb_image *image, struct fb_info *p,
}
shift += bpp;
shift &= (32 - 1);
- if (!l) { l = 8; s++; };
+ if (!l) { l = 8; s++; }
}
/* write trailing bits */
diff --git a/drivers/video/tcx.c b/drivers/video/tcx.c
index c000852500aa..7fb2d696fac7 100644
--- a/drivers/video/tcx.c
+++ b/drivers/video/tcx.c
@@ -232,7 +232,7 @@ tcx_blank(int blank, struct fb_info *info)
case FB_BLANK_POWERDOWN: /* Poweroff */
break;
- };
+ }
sbus_writel(val, &thc->thc_misc);
@@ -434,7 +434,7 @@ static int tcx_probe(struct platform_device *op)
default:
j = i;
break;
- };
+ }
par->mmap_map[i].poff = op->resource[j].start;
}
@@ -498,8 +498,6 @@ static int tcx_remove(struct platform_device *op)
framebuffer_release(info);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/video/tdfxfb.c b/drivers/video/tdfxfb.c
index 64bc28ba4037..f761fe375f5b 100644
--- a/drivers/video/tdfxfb.c
+++ b/drivers/video/tdfxfb.c
@@ -1646,7 +1646,6 @@ static void tdfxfb_remove(struct pci_dev *pdev)
pci_resource_len(pdev, 1));
release_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
- pci_set_drvdata(pdev, NULL);
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
index c9c8e5a1fdee..f28674fea909 100644
--- a/drivers/video/tgafb.c
+++ b/drivers/video/tgafb.c
@@ -1671,8 +1671,8 @@ static int tgafb_register(struct device *dev)
if (tga_bus_tc)
pr_info("tgafb: SFB+ detected, rev=0x%02x\n",
par->tga_chip_rev);
- pr_info("fb%d: %s frame buffer device at 0x%lx\n",
- info->node, info->fix.id, (long)bar0_start);
+ fb_info(info, "%s frame buffer device at 0x%lx\n",
+ info->fix.id, (long)bar0_start);
return 0;
diff --git a/drivers/video/tmiofb.c b/drivers/video/tmiofb.c
index deb8733f3c70..7fb4e321a431 100644
--- a/drivers/video/tmiofb.c
+++ b/drivers/video/tmiofb.c
@@ -250,7 +250,7 @@ static irqreturn_t tmiofb_irq(int irq, void *__info)
*/
static int tmiofb_hw_stop(struct platform_device *dev)
{
- struct tmio_fb_data *data = dev->dev.platform_data;
+ struct tmio_fb_data *data = dev_get_platdata(&dev->dev);
struct fb_info *info = platform_get_drvdata(dev);
struct tmiofb_par *par = info->par;
@@ -311,7 +311,7 @@ static int tmiofb_hw_init(struct platform_device *dev)
*/
static void tmiofb_hw_mode(struct platform_device *dev)
{
- struct tmio_fb_data *data = dev->dev.platform_data;
+ struct tmio_fb_data *data = dev_get_platdata(&dev->dev);
struct fb_info *info = platform_get_drvdata(dev);
struct fb_videomode *mode = info->mode;
struct tmiofb_par *par = info->par;
@@ -557,7 +557,7 @@ static int tmiofb_ioctl(struct fb_info *fbi,
static struct fb_videomode *
tmiofb_find_mode(struct fb_info *info, struct fb_var_screeninfo *var)
{
- struct tmio_fb_data *data = info->device->platform_data;
+ struct tmio_fb_data *data = dev_get_platdata(info->device);
struct fb_videomode *best = NULL;
int i;
@@ -577,7 +577,7 @@ static int tmiofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct fb_videomode *mode;
- struct tmio_fb_data *data = info->device->platform_data;
+ struct tmio_fb_data *data = dev_get_platdata(info->device);
mode = tmiofb_find_mode(info, var);
if (!mode || var->bits_per_pixel > 16)
@@ -678,7 +678,7 @@ static struct fb_ops tmiofb_ops = {
static int tmiofb_probe(struct platform_device *dev)
{
const struct mfd_cell *cell = mfd_get_cell(dev);
- struct tmio_fb_data *data = dev->dev.platform_data;
+ struct tmio_fb_data *data = dev_get_platdata(&dev->dev);
struct resource *ccr = platform_get_resource(dev, IORESOURCE_MEM, 1);
struct resource *lcr = platform_get_resource(dev, IORESOURCE_MEM, 0);
struct resource *vram = platform_get_resource(dev, IORESOURCE_MEM, 2);
@@ -781,8 +781,7 @@ static int tmiofb_probe(struct platform_device *dev)
if (retval < 0)
goto err_register_framebuffer;
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c
index ab57d387d6b5..7ed9a227f5ea 100644
--- a/drivers/video/tridentfb.c
+++ b/drivers/video/tridentfb.c
@@ -1553,7 +1553,6 @@ static void trident_pci_remove(struct pci_dev *dev)
iounmap(info->screen_base);
release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len);
release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len);
- pci_set_drvdata(dev, NULL);
kfree(info->pixmap.addr);
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index d2e5bc3cf969..025f14e30eed 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -1166,7 +1166,7 @@ static int dlfb_realloc_framebuffer(struct dlfb_data *dev, struct fb_info *info)
int new_len;
unsigned char *old_fb = info->screen_base;
unsigned char *new_fb;
- unsigned char *new_back = 0;
+ unsigned char *new_back = NULL;
pr_warn("Reallocating framebuffer. Addresses will change!\n");
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 7aec6f39fdd5..256fba7f4641 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -233,8 +233,7 @@ out:
static void uvesafb_free(struct uvesafb_ktask *task)
{
if (task) {
- if (task->done)
- kfree(task->done);
+ kfree(task->done);
kfree(task);
}
}
@@ -1332,8 +1331,8 @@ setmode:
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
info->fix.line_length = mode->bytes_per_scan_line;
-out: if (crtc != NULL)
- kfree(crtc);
+out:
+ kfree(crtc);
uvesafb_free(task);
return err;
@@ -1771,13 +1770,11 @@ static int uvesafb_probe(struct platform_device *dev)
"using %dk, total %dk\n", info->fix.smem_start,
info->screen_base, info->fix.smem_len/1024,
par->vbe_ib.total_memory * 64);
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
err = sysfs_create_group(&dev->dev.kobj, &uvesafb_dev_attgrp);
if (err != 0)
- printk(KERN_WARNING "fb%d: failed to register attributes\n",
- info->node);
+ fb_warn(info, "failed to register attributes\n");
return 0;
@@ -1793,8 +1790,7 @@ out_mode:
fb_destroy_modedb(info->monspecs.modedb);
fb_dealloc_cmap(&info->cmap);
out:
- if (par->vbe_modes)
- kfree(par->vbe_modes);
+ kfree(par->vbe_modes);
framebuffer_release(info);
return err;
@@ -1817,12 +1813,9 @@ static int uvesafb_remove(struct platform_device *dev)
fb_dealloc_cmap(&info->cmap);
if (par) {
- if (par->vbe_modes)
- kfree(par->vbe_modes);
- if (par->vbe_state_orig)
- kfree(par->vbe_state_orig);
- if (par->vbe_state_saved)
- kfree(par->vbe_state_saved);
+ kfree(par->vbe_modes);
+ kfree(par->vbe_state_orig);
+ kfree(par->vbe_state_saved);
}
framebuffer_release(info);
diff --git a/drivers/video/valkyriefb.c b/drivers/video/valkyriefb.c
index 3f5a041601da..e287ebc47817 100644
--- a/drivers/video/valkyriefb.c
+++ b/drivers/video/valkyriefb.c
@@ -392,7 +392,7 @@ int __init valkyriefb_init(void)
if ((err = register_framebuffer(&p->info)) != 0)
goto out_cmap_free;
- printk(KERN_INFO "fb%d: valkyrie frame buffer device\n", p->info.node);
+ fb_info(&p->info, "valkyrie frame buffer device\n");
return 0;
out_cmap_free:
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
index bd83233ec227..1c7da3b098d6 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/vesafb.c
@@ -489,8 +489,7 @@ static int vesafb_probe(struct platform_device *dev)
fb_dealloc_cmap(&info->cmap);
goto err;
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
err:
if (info->screen_base)
diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c
index ee5985efa15c..70a897b1e458 100644
--- a/drivers/video/vfb.c
+++ b/drivers/video/vfb.c
@@ -390,9 +390,8 @@ static int vfb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
if (var->vmode & FB_VMODE_YWRAP) {
- if (var->yoffset < 0
- || var->yoffset >= info->var.yres_virtual
- || var->xoffset)
+ if (var->yoffset >= info->var.yres_virtual ||
+ var->xoffset)
return -EINVAL;
} else {
if (var->xoffset + info->var.xres > info->var.xres_virtual ||
@@ -527,9 +526,8 @@ static int vfb_probe(struct platform_device *dev)
goto err2;
platform_set_drvdata(dev, info);
- printk(KERN_INFO
- "fb%d: Virtual frame buffer device, using %ldK of video memory\n",
- info->node, videomemorysize >> 10);
+ fb_info(info, "Virtual frame buffer device, using %ldK of video memory\n",
+ videomemorysize >> 10);
return 0;
err2:
fb_dealloc_cmap(&info->cmap);
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index 2827333703d9..283d335a759f 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -1377,8 +1377,7 @@ static int vga16fb_probe(struct platform_device *dev)
goto err_check_var;
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
platform_set_drvdata(dev, info);
return 0;
diff --git a/drivers/video/vt8500lcdfb.c b/drivers/video/vt8500lcdfb.c
index 897484903c30..b30e5a439d1f 100644
--- a/drivers/video/vt8500lcdfb.c
+++ b/drivers/video/vt8500lcdfb.c
@@ -365,7 +365,7 @@ static int vt8500lcd_probe(struct platform_device *pdev)
if (!fb_mem_virt) {
pr_err("%s: Failed to allocate framebuffer\n", __func__);
return -ENOMEM;
- };
+ }
fbi->fb.fix.smem_start = fb_mem_phys;
fbi->fb.fix.smem_len = fb_mem_len;
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
index e9557fa014ee..8bc6e0958a09 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/vt8623fb.c
@@ -266,7 +266,7 @@ static void vt8623_set_pixclock(struct fb_info *info, u32 pixclock)
rv = svga_compute_pll(&vt8623_pll, 1000000000 / pixclock, &m, &n, &r, info->node);
if (rv < 0) {
- printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
+ fb_err(info, "cannot set requested pixclock, keeping old value\n");
return;
}
@@ -335,7 +335,7 @@ static int vt8623fb_check_var(struct fb_var_screeninfo *var, struct fb_info *inf
rv = svga_match_format (vt8623fb_formats, var, NULL);
if (rv < 0)
{
- printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
+ fb_err(info, "unsupported mode requested\n");
return rv;
}
@@ -354,21 +354,23 @@ static int vt8623fb_check_var(struct fb_var_screeninfo *var, struct fb_info *inf
mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
if (mem > info->screen_size)
{
- printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n", info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
+ fb_err(info, "not enough framebuffer memory (%d kB requested, %d kB available)\n",
+ mem >> 10, (unsigned int) (info->screen_size >> 10));
return -EINVAL;
}
/* Text mode is limited to 256 kB of memory */
if ((var->bits_per_pixel == 0) && (mem > (256*1024)))
{
- printk(KERN_ERR "fb%d: text framebuffer size too large (%d kB requested, 256 kB possible)\n", info->node, mem >> 10);
+ fb_err(info, "text framebuffer size too large (%d kB requested, 256 kB possible)\n",
+ mem >> 10);
return -EINVAL;
}
rv = svga_check_timings (&vt8623_timing_regs, var, info->node);
if (rv < 0)
{
- printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
+ fb_err(info, "invalid timings requested\n");
return rv;
}
@@ -474,32 +476,32 @@ static int vt8623fb_set_par(struct fb_info *info)
mode = svga_match_format(vt8623fb_formats, &(info->var), &(info->fix));
switch (mode) {
case 0:
- pr_debug("fb%d: text mode\n", info->node);
+ fb_dbg(info, "text mode\n");
svga_set_textmode_vga_regs(par->state.vgabase);
svga_wseq_mask(par->state.vgabase, 0x15, 0x00, 0xFE);
svga_wcrt_mask(par->state.vgabase, 0x11, 0x60, 0x70);
break;
case 1:
- pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
+ fb_dbg(info, "4 bit pseudocolor\n");
vga_wgfx(par->state.vgabase, VGA_GFX_MODE, 0x40);
svga_wseq_mask(par->state.vgabase, 0x15, 0x20, 0xFE);
svga_wcrt_mask(par->state.vgabase, 0x11, 0x00, 0x70);
break;
case 2:
- pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
+ fb_dbg(info, "4 bit pseudocolor, planar\n");
svga_wseq_mask(par->state.vgabase, 0x15, 0x00, 0xFE);
svga_wcrt_mask(par->state.vgabase, 0x11, 0x00, 0x70);
break;
case 3:
- pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
+ fb_dbg(info, "8 bit pseudocolor\n");
svga_wseq_mask(par->state.vgabase, 0x15, 0x22, 0xFE);
break;
case 4:
- pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
+ fb_dbg(info, "5/6/5 truecolor\n");
svga_wseq_mask(par->state.vgabase, 0x15, 0xB6, 0xFE);
break;
case 5:
- pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
+ fb_dbg(info, "8/8/8 truecolor\n");
svga_wseq_mask(par->state.vgabase, 0x15, 0xAE, 0xFE);
break;
default:
@@ -584,27 +586,27 @@ static int vt8623fb_blank(int blank_mode, struct fb_info *info)
switch (blank_mode) {
case FB_BLANK_UNBLANK:
- pr_debug("fb%d: unblank\n", info->node);
+ fb_dbg(info, "unblank\n");
svga_wcrt_mask(par->state.vgabase, 0x36, 0x00, 0x30);
svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
break;
case FB_BLANK_NORMAL:
- pr_debug("fb%d: blank\n", info->node);
+ fb_dbg(info, "blank\n");
svga_wcrt_mask(par->state.vgabase, 0x36, 0x00, 0x30);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
case FB_BLANK_HSYNC_SUSPEND:
- pr_debug("fb%d: DPMS standby (hsync off)\n", info->node);
+ fb_dbg(info, "DPMS standby (hsync off)\n");
svga_wcrt_mask(par->state.vgabase, 0x36, 0x10, 0x30);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
case FB_BLANK_VSYNC_SUSPEND:
- pr_debug("fb%d: DPMS suspend (vsync off)\n", info->node);
+ fb_dbg(info, "DPMS suspend (vsync off)\n");
svga_wcrt_mask(par->state.vgabase, 0x36, 0x20, 0x30);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
case FB_BLANK_POWERDOWN:
- pr_debug("fb%d: DPMS off (no sync)\n", info->node);
+ fb_dbg(info, "DPMS off (no sync)\n");
svga_wcrt_mask(par->state.vgabase, 0x36, 0x30, 0x30);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
@@ -769,12 +771,12 @@ static int vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
rc = register_framebuffer(info);
if (rc < 0) {
- dev_err(info->device, "cannot register framebugger\n");
+ dev_err(info->device, "cannot register framebuffer\n");
goto err_reg_fb;
}
- printk(KERN_INFO "fb%d: %s on %s, %d MB RAM\n", info->node, info->fix.id,
- pci_name(dev), info->fix.smem_len >> 20);
+ fb_info(info, "%s on %s, %d MB RAM\n",
+ info->fix.id, pci_name(dev), info->fix.smem_len >> 20);
/* Record a reference to the driver data */
pci_set_drvdata(dev, info);
@@ -829,7 +831,6 @@ static void vt8623_pci_remove(struct pci_dev *dev)
pci_release_regions(dev);
/* pci_disable_device(dev); */
- pci_set_drvdata(dev, NULL);
framebuffer_release(info);
}
}
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index 7a299e951f75..10951c82f6ed 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -680,7 +680,7 @@ int w100fb_probe(struct platform_device *pdev)
par = info->par;
platform_set_drvdata(pdev, info);
- inf = pdev->dev.platform_data;
+ inf = dev_get_platdata(&pdev->dev);
par->chip_id = chip_id;
par->mach = inf;
par->fastpll_mode = 0;
@@ -761,10 +761,9 @@ int w100fb_probe(struct platform_device *pdev)
err |= device_create_file(&pdev->dev, &dev_attr_flip);
if (err != 0)
- printk(KERN_WARNING "fb%d: failed to register attributes (%d)\n",
- info->node, err);
+ fb_warn(info, "failed to register attributes (%d)\n", err);
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id);
+ fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
out:
if (info) {
diff --git a/drivers/video/wm8505fb.c b/drivers/video/wm8505fb.c
index 3072f30cad19..537d199612af 100644
--- a/drivers/video/wm8505fb.c
+++ b/drivers/video/wm8505fb.c
@@ -372,14 +372,12 @@ static int wm8505fb_probe(struct platform_device *pdev)
}
ret = device_create_file(&pdev->dev, &dev_attr_contrast);
- if (ret < 0) {
- printk(KERN_WARNING "fb%d: failed to register attributes (%d)\n",
- fbi->fb.node, ret);
- }
+ if (ret < 0)
+ fb_warn(&fbi->fb, "failed to register attributes (%d)\n", ret);
- printk(KERN_INFO "fb%d: %s frame buffer at 0x%lx-0x%lx\n",
- fbi->fb.node, fbi->fb.fix.id, fbi->fb.fix.smem_start,
- fbi->fb.fix.smem_start + fbi->fb.fix.smem_len - 1);
+ fb_info(&fbi->fb, "%s frame buffer at 0x%lx-0x%lx\n",
+ fbi->fb.fix.id, fbi->fb.fix.smem_start,
+ fbi->fb.fix.smem_start + fbi->fb.fix.smem_len - 1);
return 0;
}
@@ -411,7 +409,7 @@ static struct platform_driver wm8505fb_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
- .of_match_table = of_match_ptr(wmt_dt_ids),
+ .of_match_table = wmt_dt_ids,
},
};
diff --git a/drivers/video/wmt_ge_rops.c b/drivers/video/wmt_ge_rops.c
index 4aaeb18223bc..b0a9f34b2e01 100644
--- a/drivers/video/wmt_ge_rops.c
+++ b/drivers/video/wmt_ge_rops.c
@@ -169,13 +169,13 @@ static struct platform_driver wmt_ge_rops_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "wmt_ge_rops",
- .of_match_table = of_match_ptr(wmt_dt_ids),
+ .of_match_table = wmt_dt_ids,
},
};
module_platform_driver(wmt_ge_rops_driver);
-MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com");
+MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
MODULE_DESCRIPTION("Accelerators for raster operations using "
"WonderMedia Graphics Engine");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c
index 84c664ea8eb9..6ff1a91e9dfd 100644
--- a/drivers/video/xilinxfb.c
+++ b/drivers/video/xilinxfb.c
@@ -260,10 +260,9 @@ static int xilinxfb_assign(struct platform_device *pdev,
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
drvdata->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(drvdata->regs)) {
- rc = PTR_ERR(drvdata->regs);
- goto err_region;
- }
+ if (IS_ERR(drvdata->regs))
+ return PTR_ERR(drvdata->regs);
+
drvdata->regs_phys = res->start;
}
@@ -279,11 +278,7 @@ static int xilinxfb_assign(struct platform_device *pdev,
if (!drvdata->fb_virt) {
dev_err(dev, "Could not allocate frame buffer memory\n");
- rc = -ENOMEM;
- if (drvdata->flags & BUS_ACCESS_FLAG)
- goto err_fbmem;
- else
- goto err_region;
+ return -ENOMEM;
}
/* Clear (turn to black) the framebuffer */
@@ -363,14 +358,6 @@ err_cmap:
/* Turn off the display */
xilinx_fb_out32(drvdata, REG_CTRL, 0);
-err_fbmem:
- if (drvdata->flags & BUS_ACCESS_FLAG)
- devm_iounmap(dev, drvdata->regs);
-
-err_region:
- kfree(drvdata);
- dev_set_drvdata(dev, NULL);
-
return rc;
}
@@ -395,17 +382,12 @@ static int xilinxfb_release(struct device *dev)
/* Turn off the display */
xilinx_fb_out32(drvdata, REG_CTRL, 0);
- /* Release the resources, as allocated based on interface */
- if (drvdata->flags & BUS_ACCESS_FLAG)
- devm_iounmap(dev, drvdata->regs);
#ifdef CONFIG_PPC_DCR
- else
+ /* Release the resources, as allocated based on interface */
+ if (!(drvdata->flags & BUS_ACCESS_FLAG))
dcr_unmap(drvdata->dcr_host, drvdata->dcr_len);
#endif
- kfree(drvdata);
- dev_set_drvdata(dev, NULL);
-
return 0;
}
@@ -413,7 +395,7 @@ static int xilinxfb_release(struct device *dev)
* OF bus binding
*/
-static int xilinxfb_of_probe(struct platform_device *op)
+static int xilinxfb_of_probe(struct platform_device *pdev)
{
const u32 *prop;
u32 tft_access = 0;
@@ -425,17 +407,15 @@ static int xilinxfb_of_probe(struct platform_device *op)
pdata = xilinx_fb_default_pdata;
/* Allocate the driver data region */
- drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata) {
- dev_err(&op->dev, "Couldn't allocate device private record\n");
+ drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
return -ENOMEM;
- }
/*
* To check whether the core is connected directly to DCR or BUS
* interface and initialize the tft_access accordingly.
*/
- of_property_read_u32(op->dev.of_node, "xlnx,dcr-splb-slave-if",
+ of_property_read_u32(pdev->dev.of_node, "xlnx,dcr-splb-slave-if",
&tft_access);
/*
@@ -448,40 +428,39 @@ static int xilinxfb_of_probe(struct platform_device *op)
#ifdef CONFIG_PPC_DCR
else {
int start;
- start = dcr_resource_start(op->dev.of_node, 0);
- drvdata->dcr_len = dcr_resource_len(op->dev.of_node, 0);
- drvdata->dcr_host = dcr_map(op->dev.of_node, start, drvdata->dcr_len);
+ start = dcr_resource_start(pdev->dev.of_node, 0);
+ drvdata->dcr_len = dcr_resource_len(pdev->dev.of_node, 0);
+ drvdata->dcr_host = dcr_map(pdev->dev.of_node, start, drvdata->dcr_len);
if (!DCR_MAP_OK(drvdata->dcr_host)) {
- dev_err(&op->dev, "invalid DCR address\n");
- kfree(drvdata);
+ dev_err(&pdev->dev, "invalid DCR address\n");
return -ENODEV;
}
}
#endif
- prop = of_get_property(op->dev.of_node, "phys-size", &size);
+ prop = of_get_property(pdev->dev.of_node, "phys-size", &size);
if ((prop) && (size >= sizeof(u32)*2)) {
pdata.screen_width_mm = prop[0];
pdata.screen_height_mm = prop[1];
}
- prop = of_get_property(op->dev.of_node, "resolution", &size);
+ prop = of_get_property(pdev->dev.of_node, "resolution", &size);
if ((prop) && (size >= sizeof(u32)*2)) {
pdata.xres = prop[0];
pdata.yres = prop[1];
}
- prop = of_get_property(op->dev.of_node, "virtual-resolution", &size);
+ prop = of_get_property(pdev->dev.of_node, "virtual-resolution", &size);
if ((prop) && (size >= sizeof(u32)*2)) {
pdata.xvirt = prop[0];
pdata.yvirt = prop[1];
}
- if (of_find_property(op->dev.of_node, "rotate-display", NULL))
+ if (of_find_property(pdev->dev.of_node, "rotate-display", NULL))
pdata.rotate_screen = 1;
- dev_set_drvdata(&op->dev, drvdata);
- return xilinxfb_assign(op, drvdata, &pdata);
+ dev_set_drvdata(&pdev->dev, drvdata);
+ return xilinxfb_assign(pdev, drvdata, &pdata);
}
static int xilinxfb_of_remove(struct platform_device *op)
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index d294f67d6f84..32c8fc5f7a5c 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/reboot.h>
#include <linux/uaccess.h>
#include <linux/notifier.h>
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index ee59b74768d9..fed0ce198ae3 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -13,18 +13,24 @@ static ssize_t device_show(struct device *_d,
struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "0x%04x\n", dev->id.device);
}
+static DEVICE_ATTR_RO(device);
+
static ssize_t vendor_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "0x%04x\n", dev->id.vendor);
}
+static DEVICE_ATTR_RO(vendor);
+
static ssize_t status_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "0x%08x\n", dev->config->get_status(dev));
}
+static DEVICE_ATTR_RO(status);
+
static ssize_t modalias_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
@@ -32,6 +38,8 @@ static ssize_t modalias_show(struct device *_d,
return sprintf(buf, "virtio:d%08Xv%08X\n",
dev->id.device, dev->id.vendor);
}
+static DEVICE_ATTR_RO(modalias);
+
static ssize_t features_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
@@ -47,14 +55,17 @@ static ssize_t features_show(struct device *_d,
len += sprintf(buf+len, "\n");
return len;
}
-static struct device_attribute virtio_dev_attrs[] = {
- __ATTR_RO(device),
- __ATTR_RO(vendor),
- __ATTR_RO(status),
- __ATTR_RO(modalias),
- __ATTR_RO(features),
- __ATTR_NULL
+static DEVICE_ATTR_RO(features);
+
+static struct attribute *virtio_dev_attrs[] = {
+ &dev_attr_device.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_status.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_features.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(virtio_dev);
static inline int virtio_id_match(const struct virtio_device *dev,
const struct virtio_device_id *id)
@@ -165,7 +176,7 @@ static int virtio_dev_remove(struct device *_d)
static struct bus_type virtio_bus = {
.name = "virtio",
.match = virtio_dev_match,
- .dev_attrs = virtio_dev_attrs,
+ .dev_groups = virtio_dev_groups,
.uevent = virtio_uevent,
.probe = virtio_dev_probe,
.remove = virtio_dev_remove,
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 1f572c00a1be..c444654fc33f 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -275,9 +275,8 @@ static inline s64 towards_target(struct virtio_balloon *vb)
__le32 v;
s64 target;
- vb->vdev->config->get(vb->vdev,
- offsetof(struct virtio_balloon_config, num_pages),
- &v, sizeof(v));
+ virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages, &v);
+
target = le32_to_cpu(v);
return target - vb->num_pages;
}
@@ -286,9 +285,8 @@ static void update_balloon_size(struct virtio_balloon *vb)
{
__le32 actual = cpu_to_le32(vb->num_pages);
- vb->vdev->config->set(vb->vdev,
- offsetof(struct virtio_balloon_config, actual),
- &actual, sizeof(actual));
+ virtio_cwrite(vb->vdev, struct virtio_balloon_config, num_pages,
+ &actual);
}
static int balloon(void *_vballoon)
@@ -513,7 +511,7 @@ static void virtballoon_remove(struct virtio_device *vdev)
kfree(vb);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int virtballoon_freeze(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
@@ -556,7 +554,7 @@ static struct virtio_driver virtio_balloon_driver = {
.probe = virtballoon_probe,
.remove = virtballoon_remove,
.config_changed = virtballoon_changed,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.freeze = virtballoon_freeze,
.restore = virtballoon_restore,
#endif
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 1ba0d6831015..e9fdeb861992 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -219,13 +219,14 @@ static void vm_reset(struct virtio_device *vdev)
/* Transport interface */
/* the notify function used when creating a virt queue */
-static void vm_notify(struct virtqueue *vq)
+static bool vm_notify(struct virtqueue *vq)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
/* We write the queue's selector into the notification register to
* signal the other end */
writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
+ return true;
}
/* Notify all virtqueues on an interrupt. */
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 98917fc872a4..a37c69941d30 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -197,13 +197,14 @@ static void vp_reset(struct virtio_device *vdev)
}
/* the notify function used when creating a virt queue */
-static void vp_notify(struct virtqueue *vq)
+static bool vp_notify(struct virtqueue *vq)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
/* we write the queue's selector into the notification register to
* signal the other end */
iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
+ return true;
}
/* Handle a configuration change: Tell driver if it wants to know. */
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 6b4a4db4404d..f47777582ce5 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -81,7 +81,7 @@ struct vring_virtqueue
u16 last_used_idx;
/* How to notify other side. FIXME: commonalize hcalls! */
- void (*notify)(struct virtqueue *vq);
+ bool (*notify)(struct virtqueue *vq);
#ifdef DEBUG
/* They're supposed to lock for us. */
@@ -173,6 +173,8 @@ static inline int vring_add_indirect(struct vring_virtqueue *vq,
head = vq->free_head;
vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
vq->vring.desc[head].addr = virt_to_phys(desc);
+ /* kmemleak gives a false positive, as it's hidden by virt_to_phys */
+ kmemleak_ignore(desc);
vq->vring.desc[head].len = i * sizeof(struct vring_desc);
/* Update free pointer */
@@ -428,13 +430,22 @@ EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
* @vq: the struct virtqueue
*
* This does not need to be serialized.
+ *
+ * Returns false if host notify failed or queue is broken, otherwise true.
*/
-void virtqueue_notify(struct virtqueue *_vq)
+bool virtqueue_notify(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
+ if (unlikely(vq->broken))
+ return false;
+
/* Prod other side to tell it about changes. */
- vq->notify(_vq);
+ if (vq->notify(_vq) < 0) {
+ vq->broken = true;
+ return false;
+ }
+ return true;
}
EXPORT_SYMBOL_GPL(virtqueue_notify);
@@ -447,11 +458,14 @@ EXPORT_SYMBOL_GPL(virtqueue_notify);
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
+ *
+ * Returns false if kick failed, otherwise true.
*/
-void virtqueue_kick(struct virtqueue *vq)
+bool virtqueue_kick(struct virtqueue *vq)
{
if (virtqueue_kick_prepare(vq))
- virtqueue_notify(vq);
+ return virtqueue_notify(vq);
+ return true;
}
EXPORT_SYMBOL_GPL(virtqueue_kick);
@@ -742,7 +756,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
struct virtio_device *vdev,
bool weak_barriers,
void *pages,
- void (*notify)(struct virtqueue *),
+ bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name)
{
@@ -837,4 +851,12 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
+bool virtqueue_is_broken(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ return vq->broken;
+}
+EXPORT_SYMBOL_GPL(virtqueue_is_broken);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index d1d53f301de7..74f29a2fc51c 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -392,6 +392,25 @@ config RETU_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called retu_wdt.
+config MOXART_WDT
+ tristate "MOXART watchdog"
+ depends on ARCH_MOXART
+ help
+ Say Y here to include Watchdog timer support for the watchdog
+ existing on the MOXA ART SoC series platforms.
+
+ To compile this driver as a module, choose M here: the
+ module will be called moxart_wdt.
+
+config SIRFSOC_WATCHDOG
+ tristate "SiRFSOC watchdog"
+ depends on ARCH_SIRF
+ select WATCHDOG_CORE
+ default y
+ help
+ Support for CSR SiRFprimaII and SiRFatlasVI watchdog. When
+ the watchdog triggers the system will be reset.
+
# AVR32 Architecture
config AT32AP700X_WDT
@@ -418,8 +437,6 @@ config BFIN_WDT
# FRV Architecture
-# H8300 Architecture
-
# X86 (i386 + ia64 + x86_64) Architecture
config ACQUIRE_WDT
@@ -1127,6 +1144,13 @@ config LANTIQ_WDT
help
Hardware driver for the Lantiq SoC Watchdog Timer.
+config RALINK_WDT
+ tristate "Ralink SoC watchdog"
+ select WATCHDOG_CORE
+ depends on RALINK
+ help
+ Hardware driver for the Ralink SoC Watchdog Timer.
+
# PARISC Architecture
# POWERPC Architecture
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 6c5bb274d3cd..91bd95a64baf 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -55,6 +55,8 @@ obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
obj-$(CONFIG_UX500_WATCHDOG) += ux500_wdt.o
obj-$(CONFIG_RETU_WATCHDOG) += retu_wdt.o
obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o
+obj-$(CONFIG_MOXART_WDT) += moxart_wdt.o
+obj-$(CONFIG_SIRFSOC_WATCHDOG) += sirfsoc_wdt.o
# AVR32 Architecture
obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
@@ -66,8 +68,6 @@ obj-$(CONFIG_BFIN_WDT) += bfin_wdt.o
# FRV Architecture
-# H8300 Architecture
-
# X86 (i386 + ia64 + x86_64) Architecture
obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
obj-$(CONFIG_ADVANTECH_WDT) += advantechwdt.o
@@ -136,6 +136,7 @@ obj-$(CONFIG_TXX9_WDT) += txx9wdt.o
obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o
octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o
obj-$(CONFIG_LANTIQ_WDT) += lantiq_wdt.o
+obj-$(CONFIG_RALINK_WDT) += rt2880_wdt.o
# PARISC Architecture
diff --git a/drivers/watchdog/acquirewdt.c b/drivers/watchdog/acquirewdt.c
index 24a517777fa0..5cf1621def9c 100644
--- a/drivers/watchdog/acquirewdt.c
+++ b/drivers/watchdog/acquirewdt.c
@@ -60,8 +60,7 @@
#include <linux/types.h> /* For standard types (like size_t) */
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/kernel.h> /* For printk/panic/... */
-#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV
- (WATCHDOG_MINOR) */
+#include <linux/miscdevice.h> /* For struct miscdevice */
#include <linux/watchdog.h> /* For the watchdog specific items */
#include <linux/fs.h> /* For file operations */
#include <linux/ioport.h> /* For io-port access */
@@ -337,4 +336,3 @@ module_exit(acq_exit);
MODULE_AUTHOR("David Woodhouse");
MODULE_DESCRIPTION("Acquire Inc. Single Board Computer Watchdog Timer driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/advantechwdt.c b/drivers/watchdog/advantechwdt.c
index cc6702fc5268..a8961addc59c 100644
--- a/drivers/watchdog/advantechwdt.c
+++ b/drivers/watchdog/advantechwdt.c
@@ -345,4 +345,3 @@ module_exit(advwdt_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marek Michalkiewicz <marekm@linux.org.pl>");
MODULE_DESCRIPTION("Advantech Single Board Computer WDT driver");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c
index 41b84936a521..fbb7b94cabfd 100644
--- a/drivers/watchdog/alim1535_wdt.c
+++ b/drivers/watchdog/alim1535_wdt.c
@@ -452,4 +452,3 @@ module_exit(watchdog_exit);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("ALi M1535 PMU Watchdog Timer driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index 5eee55012e33..12f0b762b528 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -425,4 +425,3 @@ MODULE_DEVICE_TABLE(pci, alim7101_pci_tbl);
MODULE_AUTHOR("Steve Hill");
MODULE_DESCRIPTION("ALi M7101 PMU Computer Watchdog Timer driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index b3709f9cf5be..3a996576343a 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -46,7 +46,6 @@
MODULE_AUTHOR("Nicolas Thill <nico@openwrt.org>");
MODULE_DESCRIPTION(LONGNAME);
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
static int margin = 60;
module_param(margin, int, 0);
diff --git a/drivers/watchdog/at32ap700x_wdt.c b/drivers/watchdog/at32ap700x_wdt.c
index b178e717ef09..afe7d17e6776 100644
--- a/drivers/watchdog/at32ap700x_wdt.c
+++ b/drivers/watchdog/at32ap700x_wdt.c
@@ -434,4 +434,3 @@ module_platform_driver_probe(at32_wdt_driver, at32_wdt_probe);
MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
MODULE_DESCRIPTION("Watchdog driver for Atmel AT32AP700X");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index 1c75260b987c..dee6cc21d270 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -269,7 +269,7 @@ static struct platform_driver at91wdt_driver = {
.driver = {
.name = "at91_wdt",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(at91_wdt_dt_ids),
+ .of_match_table = at91_wdt_dt_ids,
},
};
@@ -297,5 +297,4 @@ module_exit(at91_wdt_exit);
MODULE_AUTHOR("Andrew Victor");
MODULE_DESCRIPTION("Watchdog driver for Atmel AT91RM9200");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:at91_wdt");
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index be37dde4f864..9bd089ebb70f 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -19,11 +19,13 @@
#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
+#include <linux/reboot.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/jiffies.h>
@@ -31,22 +33,33 @@
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include "at91sam9_wdt.h"
#define DRV_NAME "AT91SAM9 Watchdog"
-#define wdt_read(field) \
- __raw_readl(at91wdt_private.base + field)
-#define wdt_write(field, val) \
- __raw_writel((val), at91wdt_private.base + field)
+#define wdt_read(wdt, field) \
+ __raw_readl((wdt)->base + (field))
+#define wdt_write(wtd, field, val) \
+ __raw_writel((val), (wdt)->base + (field))
/* AT91SAM9 watchdog runs a 12bit counter @ 256Hz,
* use this to convert a watchdog
* value from/to milliseconds.
*/
-#define ms_to_ticks(t) (((t << 8) / 1000) - 1)
-#define ticks_to_ms(t) (((t + 1) * 1000) >> 8)
+#define ticks_to_hz_rounddown(t) ((((t) + 1) * HZ) >> 8)
+#define ticks_to_hz_roundup(t) (((((t) + 1) * HZ) + 255) >> 8)
+#define ticks_to_secs(t) (((t) + 1) >> 8)
+#define secs_to_ticks(s) (((s) << 8) - 1)
+
+#define WDT_MR_RESET 0x3FFF2FFF
+
+/* Watchdog max counter value in ticks */
+#define WDT_COUNTER_MAX_TICKS 0xFFF
+
+/* Watchdog max delta/value in secs */
+#define WDT_COUNTER_MAX_SECS ticks_to_secs(WDT_COUNTER_MAX_TICKS)
/* Hardware timeout in seconds */
#define WDT_HW_TIMEOUT 2
@@ -66,23 +79,40 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
"(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static struct watchdog_device at91_wdt_dev;
-static void at91_ping(unsigned long data);
-
-static struct {
+#define to_wdt(wdd) container_of(wdd, struct at91wdt, wdd)
+struct at91wdt {
+ struct watchdog_device wdd;
void __iomem *base;
unsigned long next_heartbeat; /* the next_heartbeat for the timer */
struct timer_list timer; /* The timer that pings the watchdog */
-} at91wdt_private;
+ u32 mr;
+ u32 mr_mask;
+ unsigned long heartbeat; /* WDT heartbeat in jiffies */
+ bool nowayout;
+ unsigned int irq;
+};
/* ......................................................................... */
+static irqreturn_t wdt_interrupt(int irq, void *dev_id)
+{
+ struct at91wdt *wdt = (struct at91wdt *)dev_id;
+
+ if (wdt_read(wdt, AT91_WDT_SR)) {
+ pr_crit("at91sam9 WDT software reset\n");
+ emergency_restart();
+ pr_crit("Reboot didn't ?????\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
/*
* Reload the watchdog timer. (ie, pat the watchdog)
*/
-static inline void at91_wdt_reset(void)
+static inline void at91_wdt_reset(struct at91wdt *wdt)
{
- wdt_write(AT91_WDT_CR, AT91_WDT_KEY | AT91_WDT_WDRSTT);
+ wdt_write(wdt, AT91_WDT_CR, AT91_WDT_KEY | AT91_WDT_WDRSTT);
}
/*
@@ -90,26 +120,21 @@ static inline void at91_wdt_reset(void)
*/
static void at91_ping(unsigned long data)
{
- if (time_before(jiffies, at91wdt_private.next_heartbeat) ||
- (!watchdog_active(&at91_wdt_dev))) {
- at91_wdt_reset();
- mod_timer(&at91wdt_private.timer, jiffies + WDT_TIMEOUT);
- } else
+ struct at91wdt *wdt = (struct at91wdt *)data;
+ if (time_before(jiffies, wdt->next_heartbeat) ||
+ !watchdog_active(&wdt->wdd)) {
+ at91_wdt_reset(wdt);
+ mod_timer(&wdt->timer, jiffies + wdt->heartbeat);
+ } else {
pr_crit("I will reset your machine !\n");
-}
-
-static int at91_wdt_ping(struct watchdog_device *wdd)
-{
- /* calculate when the next userspace timeout will be */
- at91wdt_private.next_heartbeat = jiffies + wdd->timeout * HZ;
- return 0;
+ }
}
static int at91_wdt_start(struct watchdog_device *wdd)
{
- /* calculate the next userspace timeout and modify the timer */
- at91_wdt_ping(wdd);
- mod_timer(&at91wdt_private.timer, jiffies + WDT_TIMEOUT);
+ struct at91wdt *wdt = to_wdt(wdd);
+ /* calculate when the next userspace timeout will be */
+ wdt->next_heartbeat = jiffies + wdd->timeout * HZ;
return 0;
}
@@ -122,39 +147,89 @@ static int at91_wdt_stop(struct watchdog_device *wdd)
static int at91_wdt_set_timeout(struct watchdog_device *wdd, unsigned int new_timeout)
{
wdd->timeout = new_timeout;
- return 0;
+ return at91_wdt_start(wdd);
}
-/*
- * Set the watchdog time interval in 1/256Hz (write-once)
- * Counter is 12 bit.
- */
-static int at91_wdt_settimeout(unsigned int timeout)
+static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt)
{
- unsigned int reg;
- unsigned int mr;
-
- /* Check if disabled */
- mr = wdt_read(AT91_WDT_MR);
- if (mr & AT91_WDT_WDDIS) {
- pr_err("sorry, watchdog is disabled\n");
- return -EIO;
+ u32 tmp;
+ u32 delta;
+ u32 value;
+ int err;
+ u32 mask = wdt->mr_mask;
+ unsigned long min_heartbeat = 1;
+ struct device *dev = &pdev->dev;
+
+ tmp = wdt_read(wdt, AT91_WDT_MR);
+ if ((tmp & mask) != (wdt->mr & mask)) {
+ if (tmp == WDT_MR_RESET) {
+ wdt_write(wdt, AT91_WDT_MR, wdt->mr);
+ tmp = wdt_read(wdt, AT91_WDT_MR);
+ }
+ }
+
+ if (tmp & AT91_WDT_WDDIS) {
+ if (wdt->mr & AT91_WDT_WDDIS)
+ return 0;
+ dev_err(dev, "watchdog is disabled\n");
+ return -EINVAL;
+ }
+
+ value = tmp & AT91_WDT_WDV;
+ delta = (tmp & AT91_WDT_WDD) >> 16;
+
+ if (delta < value)
+ min_heartbeat = ticks_to_hz_roundup(value - delta);
+
+ wdt->heartbeat = ticks_to_hz_rounddown(value);
+ if (!wdt->heartbeat) {
+ dev_err(dev,
+ "heartbeat is too small for the system to handle it correctly\n");
+ return -EINVAL;
+ }
+
+ if (wdt->heartbeat < min_heartbeat + 4) {
+ wdt->heartbeat = min_heartbeat;
+ dev_warn(dev,
+ "min heartbeat and max heartbeat might be too close for the system to handle it correctly\n");
+ if (wdt->heartbeat < 4)
+ dev_warn(dev,
+ "heartbeat might be too small for the system to handle it correctly\n");
+ } else {
+ wdt->heartbeat -= 4;
}
- /*
- * All counting occurs at SLOW_CLOCK / 128 = 256 Hz
- *
- * Since WDV is a 12-bit counter, the maximum period is
- * 4096 / 256 = 16 seconds.
- */
- reg = AT91_WDT_WDRSTEN /* causes watchdog reset */
- /* | AT91_WDT_WDRPROC causes processor reset only */
- | AT91_WDT_WDDBGHLT /* disabled in debug mode */
- | AT91_WDT_WDD /* restart at any time */
- | (timeout & AT91_WDT_WDV); /* timer value */
- wdt_write(AT91_WDT_MR, reg);
+ if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) {
+ err = request_irq(wdt->irq, wdt_interrupt,
+ IRQF_SHARED | IRQF_IRQPOLL,
+ pdev->name, wdt);
+ if (err)
+ return err;
+ }
+
+ if ((tmp & wdt->mr_mask) != (wdt->mr & wdt->mr_mask))
+ dev_warn(dev,
+ "watchdog already configured differently (mr = %x expecting %x)\n",
+ tmp & wdt->mr_mask, wdt->mr & wdt->mr_mask);
+
+ setup_timer(&wdt->timer, at91_ping, (unsigned long)wdt);
+ mod_timer(&wdt->timer, jiffies + wdt->heartbeat);
+
+ /* Try to set timeout from device tree first */
+ if (watchdog_init_timeout(&wdt->wdd, 0, dev))
+ watchdog_init_timeout(&wdt->wdd, heartbeat, dev);
+ watchdog_set_nowayout(&wdt->wdd, wdt->nowayout);
+ err = watchdog_register_device(&wdt->wdd);
+ if (err)
+ goto out_stop_timer;
+
+ wdt->next_heartbeat = jiffies + wdt->wdd.timeout * HZ;
return 0;
+
+out_stop_timer:
+ del_timer(&wdt->timer);
+ return err;
}
/* ......................................................................... */
@@ -169,61 +244,123 @@ static const struct watchdog_ops at91_wdt_ops = {
.owner = THIS_MODULE,
.start = at91_wdt_start,
.stop = at91_wdt_stop,
- .ping = at91_wdt_ping,
.set_timeout = at91_wdt_set_timeout,
};
-static struct watchdog_device at91_wdt_dev = {
- .info = &at91_wdt_info,
- .ops = &at91_wdt_ops,
- .timeout = WDT_HEARTBEAT,
- .min_timeout = 1,
- .max_timeout = 0xFFFF,
-};
+#if defined(CONFIG_OF)
+static int of_at91wdt_init(struct device_node *np, struct at91wdt *wdt)
+{
+ u32 min = 0;
+ u32 max = WDT_COUNTER_MAX_SECS;
+ const char *tmp;
+
+ /* Get the interrupts property */
+ wdt->irq = irq_of_parse_and_map(np, 0);
+ if (!wdt->irq)
+ dev_warn(wdt->wdd.parent, "failed to get IRQ from DT\n");
+
+ if (!of_property_read_u32_index(np, "atmel,max-heartbeat-sec", 0,
+ &max)) {
+ if (!max || max > WDT_COUNTER_MAX_SECS)
+ max = WDT_COUNTER_MAX_SECS;
+
+ if (!of_property_read_u32_index(np, "atmel,min-heartbeat-sec",
+ 0, &min)) {
+ if (min >= max)
+ min = max - 1;
+ }
+ }
+
+ min = secs_to_ticks(min);
+ max = secs_to_ticks(max);
+
+ wdt->mr_mask = 0x3FFFFFFF;
+ wdt->mr = 0;
+ if (!of_property_read_string(np, "atmel,watchdog-type", &tmp) &&
+ !strcmp(tmp, "software")) {
+ wdt->mr |= AT91_WDT_WDFIEN;
+ wdt->mr_mask &= ~AT91_WDT_WDRPROC;
+ } else {
+ wdt->mr |= AT91_WDT_WDRSTEN;
+ }
+
+ if (!of_property_read_string(np, "atmel,reset-type", &tmp) &&
+ !strcmp(tmp, "proc"))
+ wdt->mr |= AT91_WDT_WDRPROC;
+
+ if (of_property_read_bool(np, "atmel,disable")) {
+ wdt->mr |= AT91_WDT_WDDIS;
+ wdt->mr_mask &= AT91_WDT_WDDIS;
+ }
+
+ if (of_property_read_bool(np, "atmel,idle-halt"))
+ wdt->mr |= AT91_WDT_WDIDLEHLT;
+
+ if (of_property_read_bool(np, "atmel,dbg-halt"))
+ wdt->mr |= AT91_WDT_WDDBGHLT;
+
+ wdt->mr |= max | ((max - min) << 16);
+
+ return 0;
+}
+#else
+static inline int of_at91wdt_init(struct device_node *np, struct at91wdt *wdt)
+{
+ return 0;
+}
+#endif
static int __init at91wdt_probe(struct platform_device *pdev)
{
struct resource *r;
- int res;
+ int err;
+ struct at91wdt *wdt;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENODEV;
- at91wdt_private.base = ioremap(r->start, resource_size(r));
- if (!at91wdt_private.base) {
- dev_err(&pdev->dev, "failed to map registers, aborting.\n");
+ wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
return -ENOMEM;
- }
- at91_wdt_dev.parent = &pdev->dev;
- watchdog_init_timeout(&at91_wdt_dev, heartbeat, &pdev->dev);
- watchdog_set_nowayout(&at91_wdt_dev, nowayout);
+ wdt->mr = (WDT_HW_TIMEOUT * 256) | AT91_WDT_WDRSTEN | AT91_WDT_WDD |
+ AT91_WDT_WDDBGHLT | AT91_WDT_WDIDLEHLT;
+ wdt->mr_mask = 0x3FFFFFFF;
+ wdt->nowayout = nowayout;
+ wdt->wdd.parent = &pdev->dev;
+ wdt->wdd.info = &at91_wdt_info;
+ wdt->wdd.ops = &at91_wdt_ops;
+ wdt->wdd.timeout = WDT_HEARTBEAT;
+ wdt->wdd.min_timeout = 1;
+ wdt->wdd.max_timeout = 0xFFFF;
- /* Set watchdog */
- res = at91_wdt_settimeout(ms_to_ticks(WDT_HW_TIMEOUT * 1000));
- if (res)
- return res;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ wdt->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(wdt->base))
+ return PTR_ERR(wdt->base);
+
+ if (pdev->dev.of_node) {
+ err = of_at91wdt_init(pdev->dev.of_node, wdt);
+ if (err)
+ return err;
+ }
- res = watchdog_register_device(&at91_wdt_dev);
- if (res)
- return res;
+ err = at91_wdt_init(pdev, wdt);
+ if (err)
+ return err;
- at91wdt_private.next_heartbeat = jiffies + at91_wdt_dev.timeout * HZ;
- setup_timer(&at91wdt_private.timer, at91_ping, 0);
- mod_timer(&at91wdt_private.timer, jiffies + WDT_TIMEOUT);
+ platform_set_drvdata(pdev, wdt);
pr_info("enabled (heartbeat=%d sec, nowayout=%d)\n",
- at91_wdt_dev.timeout, nowayout);
+ wdt->wdd.timeout, wdt->nowayout);
return 0;
}
static int __exit at91wdt_remove(struct platform_device *pdev)
{
- watchdog_unregister_device(&at91_wdt_dev);
+ struct at91wdt *wdt = platform_get_drvdata(pdev);
+ watchdog_unregister_device(&wdt->wdd);
pr_warn("I quit now, hardware will probably reboot!\n");
- del_timer(&at91wdt_private.timer);
+ del_timer(&wdt->timer);
return 0;
}
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index 37cb09b27b63..9fa1f69dac13 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -329,4 +329,3 @@ MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org");
MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRIVER_NAME);
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index 61566fc47f84..a6a2cebb2587 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -186,4 +186,3 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
MODULE_DESCRIPTION("Driver for Broadcom BCM2835 watchdog timer");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index a14a58d9d110..4eb188b87f8e 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -317,5 +317,4 @@ MODULE_AUTHOR("Miguel Gaio <miguel.gaio@efixo.com>");
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
MODULE_DESCRIPTION("Driver for the Broadcom BCM63xx SoC watchdog");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:bcm63xx-wdt");
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c
index 5d36d6fb4969..a3b6a5b30f9f 100644
--- a/drivers/watchdog/bfin_wdt.c
+++ b/drivers/watchdog/bfin_wdt.c
@@ -465,7 +465,6 @@ module_exit(bfin_wdt_exit);
MODULE_AUTHOR("Michele d'Amico, Mike Frysinger <vapier@gentoo.org>");
MODULE_DESCRIPTION("Blackfin Watchdog Device Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(timeout, uint, 0);
MODULE_PARM_DESC(timeout,
diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
index f270bb7bc456..f7ae49edb518 100644
--- a/drivers/watchdog/cpu5wdt.c
+++ b/drivers/watchdog/cpu5wdt.c
@@ -289,7 +289,6 @@ MODULE_AUTHOR("Heiko Ronsdorf <hero@ihg.uni-duisburg.de>");
MODULE_DESCRIPTION("sma cpu5 watchdog driver");
MODULE_SUPPORTED_DEVICE("sma cpu5 watchdog");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(port, int, 0);
MODULE_PARM_DESC(port, "base address of watchdog card, default is 0x91");
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index bead7740c86a..dd625cca1ae5 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -267,5 +267,4 @@ MODULE_PARM_DESC(heartbeat,
__MODULE_STRING(DEFAULT_HEARTBEAT));
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:watchdog");
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index e621098bf663..a46f5c7ee7ff 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -29,6 +29,7 @@
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/of.h>
#include <linux/pm.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
@@ -203,12 +204,12 @@ static long dw_wdt_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
switch (cmd) {
case WDIOC_GETSUPPORT:
- return copy_to_user((struct watchdog_info *)arg, &dw_wdt_ident,
+ return copy_to_user((void __user *)arg, &dw_wdt_ident,
sizeof(dw_wdt_ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
- return put_user(0, (int *)arg);
+ return put_user(0, (int __user *)arg);
case WDIOC_KEEPALIVE:
dw_wdt_set_next_heartbeat();
@@ -252,17 +253,17 @@ static int dw_wdt_release(struct inode *inode, struct file *filp)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int dw_wdt_suspend(struct device *dev)
{
- clk_disable(dw_wdt.clk);
+ clk_disable_unprepare(dw_wdt.clk);
return 0;
}
static int dw_wdt_resume(struct device *dev)
{
- int err = clk_enable(dw_wdt.clk);
+ int err = clk_prepare_enable(dw_wdt.clk);
if (err)
return err;
@@ -271,12 +272,9 @@ static int dw_wdt_resume(struct device *dev)
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
-static const struct dev_pm_ops dw_wdt_pm_ops = {
- .suspend = dw_wdt_suspend,
- .resume = dw_wdt_resume,
-};
-#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(dw_wdt_pm_ops, dw_wdt_suspend, dw_wdt_resume);
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
@@ -309,7 +307,7 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
if (IS_ERR(dw_wdt.clk))
return PTR_ERR(dw_wdt.clk);
- ret = clk_enable(dw_wdt.clk);
+ ret = clk_prepare_enable(dw_wdt.clk);
if (ret)
return ret;
@@ -326,7 +324,7 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
return 0;
out_disable_clk:
- clk_disable(dw_wdt.clk);
+ clk_disable_unprepare(dw_wdt.clk);
return ret;
}
@@ -335,20 +333,27 @@ static int dw_wdt_drv_remove(struct platform_device *pdev)
{
misc_deregister(&dw_wdt_miscdev);
- clk_disable(dw_wdt.clk);
+ clk_disable_unprepare(dw_wdt.clk);
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id dw_wdt_of_match[] = {
+ { .compatible = "snps,dw-wdt", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dw_wdt_of_match);
+#endif
+
static struct platform_driver dw_wdt_driver = {
.probe = dw_wdt_drv_probe,
.remove = dw_wdt_drv_remove,
.driver = {
.name = "dw_wdt",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
+ .of_match_table = of_match_ptr(dw_wdt_of_match),
.pm = &dw_wdt_pm_ops,
-#endif /* CONFIG_PM */
},
};
@@ -357,4 +362,3 @@ module_platform_driver(dw_wdt_driver);
MODULE_AUTHOR("Jamie Iles");
MODULE_DESCRIPTION("Synopsys DesignWare Watchdog Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c
index e0574844c313..833e81311848 100644
--- a/drivers/watchdog/ep93xx_wdt.c
+++ b/drivers/watchdog/ep93xx_wdt.c
@@ -179,4 +179,3 @@ MODULE_AUTHOR("Ray Lehtiniemi <rayl@mail.com>,"
MODULE_DESCRIPTION("EP93xx Watchdog");
MODULE_LICENSE("GPL");
MODULE_VERSION(WDT_VERSION);
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index cd31b8a2a729..23ee53240c4c 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -477,4 +477,3 @@ module_exit(eurwdt_exit);
MODULE_AUTHOR("Rodolfo Giometti");
MODULE_DESCRIPTION("Driver for Eurotech CPU-1220/1410 on board watchdog");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index 257cfbad21da..25beb30878d7 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -34,6 +34,7 @@
#include <linux/watchdog.h>
#include <linux/fs.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/io.h>
#include <linux/uaccess.h>
@@ -330,5 +331,4 @@ module_exit(gef_wdt_exit);
MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com>");
MODULE_DESCRIPTION("GE watchdog driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:gef_wdt");
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c
index fcd599d4e225..4a6ae84b42bc 100644
--- a/drivers/watchdog/geodewdt.c
+++ b/drivers/watchdog/geodewdt.c
@@ -297,4 +297,3 @@ module_exit(geodewdt_exit);
MODULE_AUTHOR("Advanced Micro Devices, Inc");
MODULE_DESCRIPTION("Geode GX/LX Watchdog Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 19f3c3fc65f4..45b979d9dd13 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -881,7 +881,6 @@ MODULE_AUTHOR("Tom Mingarelli");
MODULE_DESCRIPTION("hp watchdog driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(HPWDT_VERSION);
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(soft_margin, int, 0);
MODULE_PARM_DESC(soft_margin, "Watchdog timeout in seconds");
diff --git a/drivers/watchdog/i6300esb.c b/drivers/watchdog/i6300esb.c
index 2b2ea13d03ea..a72fe9361ddf 100644
--- a/drivers/watchdog/i6300esb.c
+++ b/drivers/watchdog/i6300esb.c
@@ -497,4 +497,3 @@ module_pci_driver(esb_driver);
MODULE_AUTHOR("Ross Biro and David Härdeman");
MODULE_DESCRIPTION("Watchdog driver for Intel 6300ESB chipsets");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 6130321da387..04f8af65acfd 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -56,8 +56,6 @@
#include <linux/types.h> /* For standard types (like size_t) */
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/kernel.h> /* For printk/panic/... */
-#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV
- (WATCHDOG_MINOR) */
#include <linux/watchdog.h> /* For the watchdog specific items */
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/fs.h> /* For file operations */
@@ -394,7 +392,7 @@ static int iTCO_wdt_probe(struct platform_device *dev)
{
int ret = -ENODEV;
unsigned long val32;
- struct lpc_ich_info *ich_info = dev->dev.platform_data;
+ struct lpc_ich_info *ich_info = dev_get_platdata(&dev->dev);
if (!ich_info)
goto out;
@@ -582,5 +580,4 @@ MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
MODULE_DESCRIPTION("Intel TCO WatchDog Timer Driver");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/watchdog/ib700wdt.c b/drivers/watchdog/ib700wdt.c
index eb6b5cc98ec6..7ae36690c449 100644
--- a/drivers/watchdog/ib700wdt.c
+++ b/drivers/watchdog/ib700wdt.c
@@ -382,6 +382,5 @@ module_exit(ibwdt_exit);
MODULE_AUTHOR("Charles Howes <chowes@vsol.net>");
MODULE_DESCRIPTION("IB700 SBC watchdog driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
/* end of ib700wdt.c */
diff --git a/drivers/watchdog/ibmasr.c b/drivers/watchdog/ibmasr.c
index bc3fb8fe89ab..db0a34460e57 100644
--- a/drivers/watchdog/ibmasr.c
+++ b/drivers/watchdog/ibmasr.c
@@ -419,4 +419,3 @@ MODULE_PARM_DESC(nowayout,
MODULE_DESCRIPTION("IBM Automatic Server Restart driver");
MODULE_AUTHOR("Andrey Panin");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/ie6xx_wdt.c b/drivers/watchdog/ie6xx_wdt.c
index e24ef6a6e064..70a240297c6d 100644
--- a/drivers/watchdog/ie6xx_wdt.c
+++ b/drivers/watchdog/ie6xx_wdt.c
@@ -344,5 +344,4 @@ module_exit(ie6xx_wdt_exit);
MODULE_AUTHOR("Alexander Stein <alexander.stein@systec-electronic.com>");
MODULE_DESCRIPTION("Intel Atom E6xx Watchdog Device Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 693ac3f4de5a..b4786bccc42c 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -322,6 +322,7 @@ static const struct of_device_id imx2_wdt_dt_ids[] = {
{ .compatible = "fsl,imx21-wdt", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, imx2_wdt_dt_ids);
static struct platform_driver imx2_wdt_driver = {
.remove = __exit_p(imx2_wdt_remove),
@@ -338,5 +339,4 @@ module_platform_driver_probe(imx2_wdt_driver, imx2_wdt_probe);
MODULE_AUTHOR("Wolfram Sang");
MODULE_DESCRIPTION("Watchdog driver for IMX2 and later");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/watchdog/indydog.c b/drivers/watchdog/indydog.c
index 6d90f7a2ce22..1b5c25a47b87 100644
--- a/drivers/watchdog/indydog.c
+++ b/drivers/watchdog/indydog.c
@@ -214,4 +214,3 @@ module_exit(watchdog_exit);
MODULE_AUTHOR("Guido Guenther <agx@sigxcpu.org>");
MODULE_DESCRIPTION("Hardware Watchdog Device for SGI IP22");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c
index 9dda2d08af91..e13e65e996aa 100644
--- a/drivers/watchdog/intel_scu_watchdog.c
+++ b/drivers/watchdog/intel_scu_watchdog.c
@@ -48,7 +48,7 @@
#include <linux/atomic.h>
#include <asm/intel_scu_ipc.h>
#include <asm/apb_timer.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
#include "intel_scu_watchdog.h"
@@ -445,7 +445,7 @@ static int __init intel_scu_watchdog_init(void)
*
* If it isn't an intel MID device then it doesn't have this watchdog
*/
- if (!mrst_identify_cpu())
+ if (!intel_mid_identify_cpu())
return -ENODEV;
/* Check boot parameters to verify that their initial values */
@@ -564,5 +564,4 @@ module_exit(intel_scu_watchdog_exit);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel SCU Watchdog Device Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_VERSION(WDT_VER);
diff --git a/drivers/watchdog/iop_wdt.c b/drivers/watchdog/iop_wdt.c
index d964faf1a250..b16013ffacc2 100644
--- a/drivers/watchdog/iop_wdt.c
+++ b/drivers/watchdog/iop_wdt.c
@@ -259,4 +259,3 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
MODULE_AUTHOR("Curt E Bruns <curt.e.bruns@intel.com>");
MODULE_DESCRIPTION("iop watchdog timer driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c
index f4cce6d66a55..41b3979a9d87 100644
--- a/drivers/watchdog/it8712f_wdt.c
+++ b/drivers/watchdog/it8712f_wdt.c
@@ -41,7 +41,6 @@
MODULE_AUTHOR("Jorge Boncompte - DTI2 <jorge@dti2.net>");
MODULE_DESCRIPTION("IT8712F Watchdog Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
static int max_units = 255;
static int margin = 60; /* in seconds */
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index d3dcc6988b5f..e2bba68ae71e 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -772,4 +772,3 @@ module_exit(it87_wdt_exit);
MODULE_AUTHOR("Oliver Schuster");
MODULE_DESCRIPTION("Hardware Watchdog Device Driver for IT87xx EC-LPC I/O");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
index 5580b4fff7fe..f20cc53ff719 100644
--- a/drivers/watchdog/ixp4xx_wdt.c
+++ b/drivers/watchdog/ixp4xx_wdt.c
@@ -208,5 +208,3 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index d1afdf684c18..2de486a7eea1 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -222,5 +222,4 @@ module_platform_driver(jz4740_wdt_driver);
MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
MODULE_DESCRIPTION("jz4740 Watchdog Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:jz4740-wdt");
diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
index 5c3d4df63e68..a1a3638c579c 100644
--- a/drivers/watchdog/kempld_wdt.c
+++ b/drivers/watchdog/kempld_wdt.c
@@ -67,7 +67,7 @@ enum {
PRESCALER_12,
};
-const u32 kempld_prescaler[] = {
+static const u32 kempld_prescaler[] = {
[PRESCALER_21] = (1 << 21) - 1,
[PRESCALER_17] = (1 << 17) - 1,
[PRESCALER_12] = (1 << 12) - 1,
@@ -361,7 +361,7 @@ static long kempld_wdt_ioctl(struct watchdog_device *wdd, unsigned int cmd,
ret = kempld_wdt_keepalive(wdd);
break;
case WDIOC_GETPRETIMEOUT:
- ret = put_user(wdt_data->pretimeout, (int *)arg);
+ ret = put_user(wdt_data->pretimeout, (int __user *)arg);
break;
}
@@ -578,4 +578,3 @@ module_platform_driver(kempld_wdt_driver);
MODULE_DESCRIPTION("KEM PLD Watchdog Driver");
MODULE_AUTHOR("Michael Brunner <michael.brunner@kontron.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c
index dce9ecffd44a..40ca5594a336 100644
--- a/drivers/watchdog/ks8695_wdt.c
+++ b/drivers/watchdog/ks8695_wdt.c
@@ -323,5 +323,4 @@ module_exit(ks8695_wdt_exit);
MODULE_AUTHOR("Andrew Victor");
MODULE_DESCRIPTION("Watchdog driver for KS8695");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:ks8695_wdt");
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index 088fd0c9d888..3b3148c764a3 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -249,4 +249,3 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
MODULE_DESCRIPTION("Lantiq SoC Watchdog");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/m54xx_wdt.c b/drivers/watchdog/m54xx_wdt.c
index 173494a681e6..da6fa2b68074 100644
--- a/drivers/watchdog/m54xx_wdt.c
+++ b/drivers/watchdog/m54xx_wdt.c
@@ -223,4 +223,3 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c
index bf84f788e592..9826b59ef734 100644
--- a/drivers/watchdog/machzwd.c
+++ b/drivers/watchdog/machzwd.c
@@ -92,7 +92,6 @@ static unsigned short zf_readw(unsigned char port)
MODULE_AUTHOR("Fernando Fuganti <fuganti@conectiva.com.br>");
MODULE_DESCRIPTION("MachZ ZF-Logic Watchdog driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index cc9d328086ed..6d4f3998e1f6 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -258,4 +258,3 @@ MODULE_PARM_DESC(nodelay,
"(max6373/74 only, default=0)");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/mixcomwd.c b/drivers/watchdog/mixcomwd.c
index 97d62ee50341..be86ea359eee 100644
--- a/drivers/watchdog/mixcomwd.c
+++ b/drivers/watchdog/mixcomwd.c
@@ -315,4 +315,3 @@ MODULE_AUTHOR("Gergely Madarasz <gorgo@itc.hu>");
MODULE_DESCRIPTION("MixCom Watchdog driver");
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c
new file mode 100644
index 000000000000..4166e4d116a8
--- /dev/null
+++ b/drivers/watchdog/moxart_wdt.c
@@ -0,0 +1,165 @@
+/*
+ * MOXA ART SoCs watchdog driver.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+#include <linux/moduleparam.h>
+
+#define REG_COUNT 0x4
+#define REG_MODE 0x8
+#define REG_ENABLE 0xC
+
+struct moxart_wdt_dev {
+ struct watchdog_device dev;
+ void __iomem *base;
+ unsigned int clock_frequency;
+};
+
+static int heartbeat;
+
+static int moxart_wdt_stop(struct watchdog_device *wdt_dev)
+{
+ struct moxart_wdt_dev *moxart_wdt = watchdog_get_drvdata(wdt_dev);
+
+ writel(0, moxart_wdt->base + REG_ENABLE);
+
+ return 0;
+}
+
+static int moxart_wdt_start(struct watchdog_device *wdt_dev)
+{
+ struct moxart_wdt_dev *moxart_wdt = watchdog_get_drvdata(wdt_dev);
+
+ writel(moxart_wdt->clock_frequency * wdt_dev->timeout,
+ moxart_wdt->base + REG_COUNT);
+ writel(0x5ab9, moxart_wdt->base + REG_MODE);
+ writel(0x03, moxart_wdt->base + REG_ENABLE);
+
+ return 0;
+}
+
+static int moxart_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ unsigned int timeout)
+{
+ wdt_dev->timeout = timeout;
+
+ return 0;
+}
+
+static const struct watchdog_info moxart_wdt_info = {
+ .identity = "moxart-wdt",
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+};
+
+static const struct watchdog_ops moxart_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = moxart_wdt_start,
+ .stop = moxart_wdt_stop,
+ .set_timeout = moxart_wdt_set_timeout,
+};
+
+static int moxart_wdt_probe(struct platform_device *pdev)
+{
+ struct moxart_wdt_dev *moxart_wdt;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ struct clk *clk;
+ int err;
+ unsigned int max_timeout;
+ bool nowayout = WATCHDOG_NOWAYOUT;
+
+ moxart_wdt = devm_kzalloc(dev, sizeof(*moxart_wdt), GFP_KERNEL);
+ if (!moxart_wdt)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, moxart_wdt);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ moxart_wdt->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(moxart_wdt->base))
+ return PTR_ERR(moxart_wdt->base);
+
+ clk = of_clk_get(node, 0);
+ if (IS_ERR(clk)) {
+ pr_err("%s: of_clk_get failed\n", __func__);
+ return PTR_ERR(clk);
+ }
+
+ moxart_wdt->clock_frequency = clk_get_rate(clk);
+ if (moxart_wdt->clock_frequency == 0) {
+ pr_err("%s: incorrect clock frequency\n", __func__);
+ return -EINVAL;
+ }
+
+ max_timeout = UINT_MAX / moxart_wdt->clock_frequency;
+
+ moxart_wdt->dev.info = &moxart_wdt_info;
+ moxart_wdt->dev.ops = &moxart_wdt_ops;
+ moxart_wdt->dev.timeout = max_timeout;
+ moxart_wdt->dev.min_timeout = 1;
+ moxart_wdt->dev.max_timeout = max_timeout;
+ moxart_wdt->dev.parent = dev;
+
+ watchdog_init_timeout(&moxart_wdt->dev, heartbeat, dev);
+ watchdog_set_nowayout(&moxart_wdt->dev, nowayout);
+
+ watchdog_set_drvdata(&moxart_wdt->dev, moxart_wdt);
+
+ err = watchdog_register_device(&moxart_wdt->dev);
+ if (err)
+ return err;
+
+ dev_dbg(dev, "Watchdog enabled (heartbeat=%d sec, nowayout=%d)\n",
+ moxart_wdt->dev.timeout, nowayout);
+
+ return 0;
+}
+
+static int moxart_wdt_remove(struct platform_device *pdev)
+{
+ struct moxart_wdt_dev *moxart_wdt = platform_get_drvdata(pdev);
+
+ moxart_wdt_stop(&moxart_wdt->dev);
+ watchdog_unregister_device(&moxart_wdt->dev);
+
+ return 0;
+}
+
+static const struct of_device_id moxart_watchdog_match[] = {
+ { .compatible = "moxa,moxart-watchdog" },
+ { },
+};
+
+static struct platform_driver moxart_wdt_driver = {
+ .probe = moxart_wdt_probe,
+ .remove = moxart_wdt_remove,
+ .driver = {
+ .name = "moxart-watchdog",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_watchdog_match,
+ },
+};
+module_platform_driver(moxart_wdt_driver);
+
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds");
+
+MODULE_DESCRIPTION("MOXART watchdog driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index da2752063bb7..d82152077fd9 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/miscdevice.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/module.h>
#include <linux/watchdog.h>
@@ -329,4 +330,3 @@ MODULE_AUTHOR("Dave Updegraff, Kumar Gala");
MODULE_DESCRIPTION("Driver for watchdog timer in MPC8xx/MPC83xx/MPC86xx "
"uProcessors");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index b4341110ad4f..edb31ffd7927 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -257,5 +257,4 @@ module_platform_driver(mtx1_wdt_driver);
MODULE_AUTHOR("Michael Stickel, Florian Fainelli");
MODULE_DESCRIPTION("Driver for the MTX-1 watchdog");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:mtx1-wdt");
diff --git a/drivers/watchdog/mv64x60_wdt.c b/drivers/watchdog/mv64x60_wdt.c
index e4cf98019265..f9fa58409396 100644
--- a/drivers/watchdog/mv64x60_wdt.c
+++ b/drivers/watchdog/mv64x60_wdt.c
@@ -255,7 +255,7 @@ static struct miscdevice mv64x60_wdt_miscdev = {
static int mv64x60_wdt_probe(struct platform_device *dev)
{
- struct mv64x60_wdt_pdata *pdata = dev->dev.platform_data;
+ struct mv64x60_wdt_pdata *pdata = dev_get_platdata(&dev->dev);
struct resource *r;
int timeout = 10;
@@ -323,5 +323,4 @@ module_exit(mv64x60_wdt_exit);
MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
MODULE_DESCRIPTION("MV64x60 watchdog driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:" MV64x60_WDT_NAME);
diff --git a/drivers/watchdog/nuc900_wdt.c b/drivers/watchdog/nuc900_wdt.c
index b15b6efd91a1..a0d893b0930e 100644
--- a/drivers/watchdog/nuc900_wdt.c
+++ b/drivers/watchdog/nuc900_wdt.c
@@ -307,5 +307,4 @@ module_platform_driver(nuc900wdt_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("Watchdog driver for NUC900");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:nuc900-wdt");
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index 59cf19eeea07..231e5b9d5c8e 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -513,4 +513,3 @@ module_exit(nv_tco_cleanup_module);
MODULE_AUTHOR("Mike Waychison");
MODULE_DESCRIPTION("TCO timer driver for NV chipsets");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index 4dd281f2c33f..fb57103c8ebc 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -405,4 +405,3 @@ module_platform_driver(xwdt_driver);
MODULE_AUTHOR("Alejandro Cabrera <aldaya@gmail.com>");
MODULE_DESCRIPTION("Xilinx Watchdog driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index af88ffd1068f..7b79ca093e89 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -205,7 +205,7 @@ static const struct watchdog_ops omap_wdt_ops = {
static int omap_wdt_probe(struct platform_device *pdev)
{
- struct omap_wd_timer_platform_data *pdata = pdev->dev.platform_data;
+ struct omap_wd_timer_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct watchdog_device *omap_wdt;
struct resource *res, *mem;
struct omap_wdt_dev *wdev;
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 4ea5fcccac02..44edca66d564 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -207,7 +207,7 @@ static struct platform_driver orion_wdt_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "orion_wdt",
- .of_match_table = of_match_ptr(orion_wdt_of_match_table),
+ .of_match_table = orion_wdt_of_match_table,
},
};
@@ -225,4 +225,3 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:orion_wdt");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index 5afb89b48650..5211d56b3681 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -580,8 +580,6 @@ MODULE_AUTHOR("Sven Anders <anders@anduras.de>, "
MODULE_DESCRIPTION("PC87413 WDT driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
module_param(io, int, 0);
MODULE_PARM_DESC(io, MODNAME " I/O port (default: "
__MODULE_STRING(IO_DEFAULT) ").");
diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c
index 33e49a7f889f..e936f15dc7c7 100644
--- a/drivers/watchdog/pcwd.c
+++ b/drivers/watchdog/pcwd.c
@@ -61,7 +61,7 @@
#include <linux/delay.h> /* For mdelay function */
#include <linux/timer.h> /* For timer related operations */
#include <linux/jiffies.h> /* For jiffies stuff */
-#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR) */
+#include <linux/miscdevice.h> /* For struct miscdevice */
#include <linux/watchdog.h> /* For the watchdog specific items */
#include <linux/reboot.h> /* For kernel_power_off() */
#include <linux/init.h> /* For __init/__exit/... */
@@ -1011,5 +1011,3 @@ MODULE_AUTHOR("Ken Hollis <kenji@bitgate.com>, "
MODULE_DESCRIPTION("Berkshire ISA-PC Watchdog driver");
MODULE_VERSION(WATCHDOG_VERSION);
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-MODULE_ALIAS_MISCDEV(TEMP_MINOR);
diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c
index 7890f84edf76..b4864f254b48 100644
--- a/drivers/watchdog/pcwd_pci.c
+++ b/drivers/watchdog/pcwd_pci.c
@@ -40,7 +40,7 @@
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/kernel.h> /* For printk/panic/... */
#include <linux/delay.h> /* For mdelay function */
-#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR) */
+#include <linux/miscdevice.h> /* For struct miscdevice */
#include <linux/watchdog.h> /* For the watchdog specific items */
#include <linux/notifier.h> /* For notifier support */
#include <linux/reboot.h> /* For reboot_notifier stuff */
@@ -820,5 +820,3 @@ module_pci_driver(pcipcwd_driver);
MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
MODULE_DESCRIPTION("Berkshire PCI-PC Watchdog driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-MODULE_ALIAS_MISCDEV(TEMP_MINOR);
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 7b14d1847927..53598e832a2a 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -32,7 +32,7 @@
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/kernel.h> /* For printk/panic/... */
#include <linux/delay.h> /* For mdelay function */
-#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR) */
+#include <linux/miscdevice.h> /* For struct miscdevice */
#include <linux/watchdog.h> /* For the watchdog specific items */
#include <linux/notifier.h> /* For notifier support */
#include <linux/reboot.h> /* For reboot_notifier stuff */
@@ -72,8 +72,6 @@ do { \
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE(DRIVER_LICENSE);
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-MODULE_ALIAS_MISCDEV(TEMP_MINOR);
/* Module Parameters */
module_param(debug, int, 0);
@@ -235,13 +233,17 @@ static int usb_pcwd_send_command(struct usb_pcwd_private *usb_pcwd,
unsigned char cmd, unsigned char *msb, unsigned char *lsb)
{
int got_response, count;
- unsigned char buf[6];
+ unsigned char *buf;
/* We will not send any commands if the USB PCWD device does
* not exist */
if ((!usb_pcwd) || (!usb_pcwd->exists))
return -1;
+ buf = kmalloc(6, GFP_KERNEL);
+ if (buf == NULL)
+ return 0;
+
/* The USB PC Watchdog uses a 6 byte report format.
* The board currently uses only 3 of the six bytes of the report. */
buf[0] = cmd; /* Byte 0 = CMD */
@@ -277,6 +279,8 @@ static int usb_pcwd_send_command(struct usb_pcwd_private *usb_pcwd,
*lsb = usb_pcwd->cmd_data_lsb;
}
+ kfree(buf);
+
return got_response;
}
diff --git a/drivers/watchdog/pika_wdt.c b/drivers/watchdog/pika_wdt.c
index 7d3d471f810c..4f412b4c356a 100644
--- a/drivers/watchdog/pika_wdt.c
+++ b/drivers/watchdog/pika_wdt.c
@@ -298,5 +298,3 @@ module_exit(pikawdt_exit);
MODULE_AUTHOR("Sean MacLennan <smaclennan@pikatech.com>");
MODULE_DESCRIPTION("PIKA FPGA based Watchdog Timer");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index b30bd430f591..1bdcc313e1d9 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -233,5 +233,4 @@ MODULE_PARM_DESC(nowayout,
"Set to 1 to keep watchdog running after device release");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:pnx4008-watchdog");
diff --git a/drivers/watchdog/pnx833x_wdt.c b/drivers/watchdog/pnx833x_wdt.c
index 1b62a7dfcc95..882fdcb46ad1 100644
--- a/drivers/watchdog/pnx833x_wdt.c
+++ b/drivers/watchdog/pnx833x_wdt.c
@@ -278,4 +278,3 @@ module_exit(watchdog_exit);
MODULE_AUTHOR("Daniel Laird/Andre McCurdy");
MODULE_DESCRIPTION("Hardware Watchdog Device for PNX833x");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index 9cf6bc7a234f..71e78ef4b736 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -25,8 +25,7 @@
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/kernel.h> /* For printk/panic/... */
#include <linux/fs.h> /* For file operations */
-#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV
- (WATCHDOG_MINOR) */
+#include <linux/miscdevice.h> /* For struct miscdevice */
#include <linux/watchdog.h> /* For the watchdog specific items */
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/platform_device.h> /* For platform_driver framework */
@@ -329,4 +328,3 @@ MODULE_AUTHOR("Ondrej Zajicek <santiago@crfreenet.org>,"
"Florian Fainelli <florian@openwrt.org>");
MODULE_DESCRIPTION("Driver for the IDT RC32434 SoC watchdog");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index b0f116c2fd53..082d06262959 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -231,7 +231,7 @@ static int rdc321x_wdt_probe(struct platform_device *pdev)
struct resource *r;
struct rdc321x_wdt_pdata *pdata;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data supplied\n");
return -ENODEV;
@@ -298,4 +298,3 @@ module_platform_driver(rdc321x_wdt_driver);
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
MODULE_DESCRIPTION("RDC321x watchdog driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
new file mode 100644
index 000000000000..53d37fea183e
--- /dev/null
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -0,0 +1,207 @@
+/*
+ * Ralink RT288x/RT3xxx/MT76xx built-in hardware watchdog timer
+ *
+ * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ *
+ * This driver was based on: drivers/watchdog/softdog.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/watchdog.h>
+#include <linux/miscdevice.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+
+#include <asm/mach-ralink/ralink_regs.h>
+
+#define SYSC_RSTSTAT 0x38
+#define WDT_RST_CAUSE BIT(1)
+
+#define RALINK_WDT_TIMEOUT 30
+#define RALINK_WDT_PRESCALE 65536
+
+#define TIMER_REG_TMR1LOAD 0x00
+#define TIMER_REG_TMR1CTL 0x08
+
+#define TMRSTAT_TMR1RST BIT(5)
+
+#define TMR1CTL_ENABLE BIT(7)
+#define TMR1CTL_MODE_SHIFT 4
+#define TMR1CTL_MODE_MASK 0x3
+#define TMR1CTL_MODE_FREE_RUNNING 0x0
+#define TMR1CTL_MODE_PERIODIC 0x1
+#define TMR1CTL_MODE_TIMEOUT 0x2
+#define TMR1CTL_MODE_WDT 0x3
+#define TMR1CTL_PRESCALE_MASK 0xf
+#define TMR1CTL_PRESCALE_65536 0xf
+
+static struct clk *rt288x_wdt_clk;
+static unsigned long rt288x_wdt_freq;
+static void __iomem *rt288x_wdt_base;
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static inline void rt_wdt_w32(unsigned reg, u32 val)
+{
+ iowrite32(val, rt288x_wdt_base + reg);
+}
+
+static inline u32 rt_wdt_r32(unsigned reg)
+{
+ return ioread32(rt288x_wdt_base + reg);
+}
+
+static int rt288x_wdt_ping(struct watchdog_device *w)
+{
+ rt_wdt_w32(TIMER_REG_TMR1LOAD, w->timeout * rt288x_wdt_freq);
+
+ return 0;
+}
+
+static int rt288x_wdt_start(struct watchdog_device *w)
+{
+ u32 t;
+
+ t = rt_wdt_r32(TIMER_REG_TMR1CTL);
+ t &= ~(TMR1CTL_MODE_MASK << TMR1CTL_MODE_SHIFT |
+ TMR1CTL_PRESCALE_MASK);
+ t |= (TMR1CTL_MODE_WDT << TMR1CTL_MODE_SHIFT |
+ TMR1CTL_PRESCALE_65536);
+ rt_wdt_w32(TIMER_REG_TMR1CTL, t);
+
+ rt288x_wdt_ping(w);
+
+ t = rt_wdt_r32(TIMER_REG_TMR1CTL);
+ t |= TMR1CTL_ENABLE;
+ rt_wdt_w32(TIMER_REG_TMR1CTL, t);
+
+ return 0;
+}
+
+static int rt288x_wdt_stop(struct watchdog_device *w)
+{
+ u32 t;
+
+ rt288x_wdt_ping(w);
+
+ t = rt_wdt_r32(TIMER_REG_TMR1CTL);
+ t &= ~TMR1CTL_ENABLE;
+ rt_wdt_w32(TIMER_REG_TMR1CTL, t);
+
+ return 0;
+}
+
+static int rt288x_wdt_set_timeout(struct watchdog_device *w, unsigned int t)
+{
+ w->timeout = t;
+ rt288x_wdt_ping(w);
+
+ return 0;
+}
+
+static int rt288x_wdt_bootcause(void)
+{
+ if (rt_sysc_r32(SYSC_RSTSTAT) & WDT_RST_CAUSE)
+ return WDIOF_CARDRESET;
+
+ return 0;
+}
+
+static struct watchdog_info rt288x_wdt_info = {
+ .identity = "Ralink Watchdog",
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+};
+
+static struct watchdog_ops rt288x_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = rt288x_wdt_start,
+ .stop = rt288x_wdt_stop,
+ .ping = rt288x_wdt_ping,
+ .set_timeout = rt288x_wdt_set_timeout,
+};
+
+static struct watchdog_device rt288x_wdt_dev = {
+ .info = &rt288x_wdt_info,
+ .ops = &rt288x_wdt_ops,
+ .min_timeout = 1,
+};
+
+static int rt288x_wdt_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rt288x_wdt_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rt288x_wdt_base))
+ return PTR_ERR(rt288x_wdt_base);
+
+ rt288x_wdt_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(rt288x_wdt_clk))
+ return PTR_ERR(rt288x_wdt_clk);
+
+ device_reset(&pdev->dev);
+
+ rt288x_wdt_freq = clk_get_rate(rt288x_wdt_clk) / RALINK_WDT_PRESCALE;
+
+ rt288x_wdt_dev.dev = &pdev->dev;
+ rt288x_wdt_dev.bootstatus = rt288x_wdt_bootcause();
+
+ rt288x_wdt_dev.max_timeout = (0xfffful / rt288x_wdt_freq);
+ rt288x_wdt_dev.timeout = rt288x_wdt_dev.max_timeout;
+
+ watchdog_set_nowayout(&rt288x_wdt_dev, nowayout);
+
+ ret = watchdog_register_device(&rt288x_wdt_dev);
+ if (!ret)
+ dev_info(&pdev->dev, "Initialized\n");
+
+ return 0;
+}
+
+static int rt288x_wdt_remove(struct platform_device *pdev)
+{
+ watchdog_unregister_device(&rt288x_wdt_dev);
+
+ return 0;
+}
+
+static void rt288x_wdt_shutdown(struct platform_device *pdev)
+{
+ rt288x_wdt_stop(&rt288x_wdt_dev);
+}
+
+static const struct of_device_id rt288x_wdt_match[] = {
+ { .compatible = "ralink,rt2880-wdt" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rt288x_wdt_match);
+
+static struct platform_driver rt288x_wdt_driver = {
+ .probe = rt288x_wdt_probe,
+ .remove = rt288x_wdt_remove,
+ .shutdown = rt288x_wdt_shutdown,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .of_match_table = rt288x_wdt_match,
+ },
+};
+
+module_platform_driver(rt288x_wdt_driver);
+
+MODULE_DESCRIPTION("MediaTek/Ralink RT288x/RT3xxx hardware watchdog driver");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 23aad7c6bf5d..7d8fd041ee25 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -29,7 +29,6 @@
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/timer.h>
-#include <linux/miscdevice.h> /* for MODULE_ALIAS_MISCDEV */
#include <linux/watchdog.h>
#include <linux/init.h>
#include <linux/platform_device.h>
@@ -539,5 +538,4 @@ MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>, "
"Dimitry Andric <dimitry.andric@tomtom.com>");
MODULE_DESCRIPTION("S3C2410 Watchdog Device Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:s3c2410-wdt");
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index ccd6b29e21bf..e1d39a1e9628 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -193,4 +193,3 @@ module_param(margin, int, 0);
MODULE_PARM_DESC(margin, "Watchdog margin in seconds (default 60s)");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index ea5d84a1fdad..3abae50773b8 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -341,7 +341,6 @@ MODULE_PARM_DESC(timeout,
"Watchdog timeout in microseconds (max/default 8388607 or 8.3ish secs)");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
/*
* example code that can be put in a platform code area to utilize the
diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c
index 63632ec87c7e..2eef58a0cf05 100644
--- a/drivers/watchdog/sbc60xxwdt.c
+++ b/drivers/watchdog/sbc60xxwdt.c
@@ -387,4 +387,3 @@ module_exit(sbc60xxwdt_unload);
MODULE_AUTHOR("Jakob Oestergaard <jakob@unthought.net>");
MODULE_DESCRIPTION("60xx Single Board Computer Watchdog Timer driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sbc7240_wdt.c b/drivers/watchdog/sbc7240_wdt.c
index 719edc8fdeb3..5f268add17ce 100644
--- a/drivers/watchdog/sbc7240_wdt.c
+++ b/drivers/watchdog/sbc7240_wdt.c
@@ -309,5 +309,3 @@ MODULE_AUTHOR("Gilles Gigan");
MODULE_DESCRIPTION("Watchdog device driver for single board"
" computers EPIC Nano 7240 from iEi");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
diff --git a/drivers/watchdog/sbc8360.c b/drivers/watchdog/sbc8360.c
index d4781e05f017..da60560ca446 100644
--- a/drivers/watchdog/sbc8360.c
+++ b/drivers/watchdog/sbc8360.c
@@ -404,6 +404,5 @@ MODULE_AUTHOR("Ian E. Morgan <imorgan@webcon.ca>");
MODULE_DESCRIPTION("SBC8360 watchdog driver");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.01");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
/* end of sbc8360.c */
diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c
index 0c3e9f66ef77..a1c502e0d8ec 100644
--- a/drivers/watchdog/sbc_epx_c3.c
+++ b/drivers/watchdog/sbc_epx_c3.c
@@ -220,4 +220,3 @@ MODULE_DESCRIPTION("Hardware Watchdog Device for Winsystems EPX-C3 SBC. "
"so only use it if you are *sure* you are running on this specific "
"SBC system from Winsystems! It writes to IO ports 0x1ee and 0x1ef!");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index 90d5527ca886..a517d8bae757 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -263,5 +263,3 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c
index 3fb83b0c28c2..3b9fff9dcf65 100644
--- a/drivers/watchdog/sc1200wdt.c
+++ b/drivers/watchdog/sc1200wdt.c
@@ -476,4 +476,3 @@ MODULE_AUTHOR("Zwane Mwaikambo <zwane@commfireservices.com>");
MODULE_DESCRIPTION(
"Driver for National Semiconductor PC87307/PC97307 watchdog component");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c
index 707e027e5002..f353e18b1a82 100644
--- a/drivers/watchdog/sc520_wdt.c
+++ b/drivers/watchdog/sc520_wdt.c
@@ -433,4 +433,3 @@ MODULE_AUTHOR("Scott and Bill Jennings");
MODULE_DESCRIPTION(
"Driver for watchdog timer in AMD \"Elan\" SC520 uProcessor");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index af7b136b1874..b96127ea3de1 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -26,8 +26,7 @@
#include <linux/types.h> /* For standard types (like size_t) */
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/kernel.h> /* For printk/... */
-#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV
- (WATCHDOG_MINOR) */
+#include <linux/miscdevice.h> /* For struct miscdevice */
#include <linux/watchdog.h> /* For the watchdog specific items */
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/fs.h> /* For file operations */
@@ -545,5 +544,3 @@ module_exit(sch311x_wdt_exit);
MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
MODULE_DESCRIPTION("SMSC SCH311x WatchDog Timer Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
diff --git a/drivers/watchdog/scx200_wdt.c b/drivers/watchdog/scx200_wdt.c
index 8ae7c282d465..836377cf9271 100644
--- a/drivers/watchdog/scx200_wdt.c
+++ b/drivers/watchdog/scx200_wdt.c
@@ -37,7 +37,6 @@
MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>");
MODULE_DESCRIPTION("NatSemi SCx200 Watchdog Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
static int margin = 60; /* in seconds */
module_param(margin, int, 0);
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index 5bca79457768..f9b8e06f3558 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -343,7 +343,6 @@ MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
MODULE_DESCRIPTION("SuperH watchdog driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(clock_division_ratio, int, 0);
MODULE_PARM_DESC(clock_division_ratio,
diff --git a/drivers/watchdog/sirfsoc_wdt.c b/drivers/watchdog/sirfsoc_wdt.c
new file mode 100644
index 000000000000..3b8d7397e001
--- /dev/null
+++ b/drivers/watchdog/sirfsoc_wdt.c
@@ -0,0 +1,224 @@
+/*
+ * Watchdog driver for CSR SiRFprimaII and SiRFatlasVI
+ *
+ * Copyright (c) 2013 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/watchdog.h>
+#include <linux/platform_device.h>
+#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#define SIRFSOC_TIMER_COUNTER_LO 0x0000
+#define SIRFSOC_TIMER_MATCH_0 0x0008
+#define SIRFSOC_TIMER_INT_EN 0x0024
+#define SIRFSOC_TIMER_WATCHDOG_EN 0x0028
+#define SIRFSOC_TIMER_LATCH 0x0030
+#define SIRFSOC_TIMER_LATCHED_LO 0x0034
+
+#define SIRFSOC_TIMER_WDT_INDEX 5
+
+#define SIRFSOC_WDT_MIN_TIMEOUT 30 /* 30 secs */
+#define SIRFSOC_WDT_MAX_TIMEOUT (10 * 60) /* 10 mins */
+#define SIRFSOC_WDT_DEFAULT_TIMEOUT 30 /* 30 secs */
+
+static unsigned int timeout = SIRFSOC_WDT_DEFAULT_TIMEOUT;
+static bool nowayout = WATCHDOG_NOWAYOUT;
+
+module_param(timeout, uint, 0);
+module_param(nowayout, bool, 0);
+
+MODULE_PARM_DESC(timeout, "Default watchdog timeout (in seconds)");
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static unsigned int sirfsoc_wdt_gettimeleft(struct watchdog_device *wdd)
+{
+ u32 counter, match;
+ void __iomem *wdt_base;
+ int time_left;
+
+ wdt_base = watchdog_get_drvdata(wdd);
+ counter = readl(wdt_base + SIRFSOC_TIMER_COUNTER_LO);
+ match = readl(wdt_base +
+ SIRFSOC_TIMER_MATCH_0 + (SIRFSOC_TIMER_WDT_INDEX << 2));
+
+ time_left = match - counter;
+
+ return time_left / CLOCK_TICK_RATE;
+}
+
+static int sirfsoc_wdt_updatetimeout(struct watchdog_device *wdd)
+{
+ u32 counter, timeout_ticks;
+ void __iomem *wdt_base;
+
+ timeout_ticks = wdd->timeout * CLOCK_TICK_RATE;
+ wdt_base = watchdog_get_drvdata(wdd);
+
+ /* Enable the latch before reading the LATCH_LO register */
+ writel(1, wdt_base + SIRFSOC_TIMER_LATCH);
+
+ /* Set the TO value */
+ counter = readl(wdt_base + SIRFSOC_TIMER_LATCHED_LO);
+
+ counter += timeout_ticks;
+
+ writel(counter, wdt_base +
+ SIRFSOC_TIMER_MATCH_0 + (SIRFSOC_TIMER_WDT_INDEX << 2));
+
+ return 0;
+}
+
+static int sirfsoc_wdt_enable(struct watchdog_device *wdd)
+{
+ void __iomem *wdt_base = watchdog_get_drvdata(wdd);
+ sirfsoc_wdt_updatetimeout(wdd);
+
+ /*
+ * NOTE: If interrupt is not enabled
+ * then WD-Reset doesn't get generated at all.
+ */
+ writel(readl(wdt_base + SIRFSOC_TIMER_INT_EN)
+ | (1 << SIRFSOC_TIMER_WDT_INDEX),
+ wdt_base + SIRFSOC_TIMER_INT_EN);
+ writel(1, wdt_base + SIRFSOC_TIMER_WATCHDOG_EN);
+
+ return 0;
+}
+
+static int sirfsoc_wdt_disable(struct watchdog_device *wdd)
+{
+ void __iomem *wdt_base = watchdog_get_drvdata(wdd);
+
+ writel(0, wdt_base + SIRFSOC_TIMER_WATCHDOG_EN);
+ writel(readl(wdt_base + SIRFSOC_TIMER_INT_EN)
+ & (~(1 << SIRFSOC_TIMER_WDT_INDEX)),
+ wdt_base + SIRFSOC_TIMER_INT_EN);
+
+ return 0;
+}
+
+static int sirfsoc_wdt_settimeout(struct watchdog_device *wdd, unsigned int to)
+{
+ wdd->timeout = to;
+ sirfsoc_wdt_updatetimeout(wdd);
+
+ return 0;
+}
+
+#define OPTIONS (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE)
+
+static const struct watchdog_info sirfsoc_wdt_ident = {
+ .options = OPTIONS,
+ .firmware_version = 0,
+ .identity = "SiRFSOC Watchdog",
+};
+
+static struct watchdog_ops sirfsoc_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = sirfsoc_wdt_enable,
+ .stop = sirfsoc_wdt_disable,
+ .get_timeleft = sirfsoc_wdt_gettimeleft,
+ .ping = sirfsoc_wdt_updatetimeout,
+ .set_timeout = sirfsoc_wdt_settimeout,
+};
+
+static struct watchdog_device sirfsoc_wdd = {
+ .info = &sirfsoc_wdt_ident,
+ .ops = &sirfsoc_wdt_ops,
+ .timeout = SIRFSOC_WDT_DEFAULT_TIMEOUT,
+ .min_timeout = SIRFSOC_WDT_MIN_TIMEOUT,
+ .max_timeout = SIRFSOC_WDT_MAX_TIMEOUT,
+};
+
+static int sirfsoc_wdt_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+ void __iomem *base;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ watchdog_set_drvdata(&sirfsoc_wdd, base);
+
+ watchdog_init_timeout(&sirfsoc_wdd, timeout, &pdev->dev);
+ watchdog_set_nowayout(&sirfsoc_wdd, nowayout);
+
+ ret = watchdog_register_device(&sirfsoc_wdd);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, &sirfsoc_wdd);
+
+ return 0;
+}
+
+static void sirfsoc_wdt_shutdown(struct platform_device *pdev)
+{
+ struct watchdog_device *wdd = platform_get_drvdata(pdev);
+
+ sirfsoc_wdt_disable(wdd);
+}
+
+static int sirfsoc_wdt_remove(struct platform_device *pdev)
+{
+ sirfsoc_wdt_shutdown(pdev);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sirfsoc_wdt_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int sirfsoc_wdt_resume(struct device *dev)
+{
+ struct watchdog_device *wdd = dev_get_drvdata(dev);
+
+ /*
+ * NOTE: Since timer controller registers settings are saved
+ * and restored back by the timer-prima2.c, so we need not
+ * update WD settings except refreshing timeout.
+ */
+ sirfsoc_wdt_updatetimeout(wdd);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(sirfsoc_wdt_pm_ops,
+ sirfsoc_wdt_suspend, sirfsoc_wdt_resume);
+
+static const struct of_device_id sirfsoc_wdt_of_match[] = {
+ { .compatible = "sirf,prima2-tick"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, sirfsoc_wdt_of_match);
+
+static struct platform_driver sirfsoc_wdt_driver = {
+ .driver = {
+ .name = "sirfsoc-wdt",
+ .owner = THIS_MODULE,
+ .pm = &sirfsoc_wdt_pm_ops,
+ .of_match_table = of_match_ptr(sirfsoc_wdt_of_match),
+ },
+ .probe = sirfsoc_wdt_probe,
+ .remove = sirfsoc_wdt_remove,
+ .shutdown = sirfsoc_wdt_shutdown,
+};
+module_platform_driver(sirfsoc_wdt_driver);
+
+MODULE_DESCRIPTION("SiRF SoC watchdog driver");
+MODULE_AUTHOR("Xianglong Du <Xianglong.Du@csr.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sirfsoc-wdt");
diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c
index 6d665f9c1d58..445ea1ad1fa9 100644
--- a/drivers/watchdog/smsc37b787_wdt.c
+++ b/drivers/watchdog/smsc37b787_wdt.c
@@ -603,8 +603,6 @@ MODULE_DESCRIPTION("Driver for SMsC 37B787 watchdog component (Version "
VERSION ")");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
#ifdef SMSC_SUPPORT_MINUTES
module_param(unit, int, 0);
MODULE_PARM_DESC(unit,
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index b68b1e519d53..ef2638fee4a8 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -207,4 +207,3 @@ module_exit(watchdog_exit);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("Software Watchdog Device Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index 0e9d8c479c35..ce63a1bbf395 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -580,4 +580,3 @@ module_exit(sp5100_tco_cleanup_module);
MODULE_AUTHOR("Priyanka Gupta");
MODULE_DESCRIPTION("TCO timer driver for SP5100/SB800 chipset");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 58df98aec122..3f786ce0a6f2 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -268,7 +268,6 @@ static int sp805_wdt_remove(struct amba_device *adev)
struct sp805_wdt *wdt = amba_get_drvdata(adev);
watchdog_unregister_device(&wdt->wdd);
- amba_set_drvdata(adev, NULL);
watchdog_set_drvdata(&wdt->wdd, NULL);
return 0;
diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c
index c97e98dcde62..d667f6b51d35 100644
--- a/drivers/watchdog/stmp3xxx_rtc_wdt.c
+++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c
@@ -30,7 +30,7 @@ MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat period in seconds from 1 to "
static int wdt_start(struct watchdog_device *wdd)
{
struct device *dev = watchdog_get_drvdata(wdd);
- struct stmp3xxx_wdt_pdata *pdata = dev->platform_data;
+ struct stmp3xxx_wdt_pdata *pdata = dev_get_platdata(dev);
pdata->wdt_set_timeout(dev->parent, wdd->timeout * WDOG_TICK_RATE);
return 0;
@@ -39,7 +39,7 @@ static int wdt_start(struct watchdog_device *wdd)
static int wdt_stop(struct watchdog_device *wdd)
{
struct device *dev = watchdog_get_drvdata(wdd);
- struct stmp3xxx_wdt_pdata *pdata = dev->platform_data;
+ struct stmp3xxx_wdt_pdata *pdata = dev_get_platdata(dev);
pdata->wdt_set_timeout(dev->parent, 0);
return 0;
@@ -108,4 +108,3 @@ module_platform_driver(stmp3xxx_wdt_driver);
MODULE_DESCRIPTION("STMP3XXX RTC Watchdog Driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index f6caa77151c7..76332d893e12 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -217,7 +217,7 @@ static struct platform_driver sunxi_wdt_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
- .of_match_table = of_match_ptr(sunxi_wdt_dt_ids)
+ .of_match_table = sunxi_wdt_dt_ids,
},
};
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index c9b0c627fe7e..09d4831aa61f 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -192,7 +192,7 @@ static int ts72xx_wdt_open(struct inode *inode, struct file *file)
dev_err(&wdt->pdev->dev,
"failed to convert timeout (%d) to register value\n",
timeout);
- return -EINVAL;
+ return regval;
}
if (mutex_lock_interruptible(&wdt->lock))
@@ -305,7 +305,8 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case WDIOC_GETSUPPORT:
- error = copy_to_user(argp, &winfo, sizeof(winfo));
+ if (copy_to_user(argp, &winfo, sizeof(winfo)))
+ error = -EFAULT;
break;
case WDIOC_GETSTATUS:
@@ -320,10 +321,9 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd,
case WDIOC_SETOPTIONS: {
int options;
- if (get_user(options, p)) {
- error = -EFAULT;
+ error = get_user(options, p);
+ if (error)
break;
- }
error = -EINVAL;
@@ -341,30 +341,26 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd,
case WDIOC_SETTIMEOUT: {
int new_timeout;
+ int regval;
- if (get_user(new_timeout, p)) {
- error = -EFAULT;
- } else {
- int regval;
-
- regval = timeout_to_regval(new_timeout);
- if (regval < 0) {
- error = -EINVAL;
- } else {
- ts72xx_wdt_stop(wdt);
- wdt->regval = regval;
- ts72xx_wdt_start(wdt);
- }
- }
+ error = get_user(new_timeout, p);
if (error)
break;
+ regval = timeout_to_regval(new_timeout);
+ if (regval < 0) {
+ error = regval;
+ break;
+ }
+ ts72xx_wdt_stop(wdt);
+ wdt->regval = regval;
+ ts72xx_wdt_start(wdt);
+
/*FALLTHROUGH*/
}
case WDIOC_GETTIMEOUT:
- if (put_user(regval_to_timeout(wdt->regval), p))
- error = -EFAULT;
+ error = put_user(regval_to_timeout(wdt->regval), p);
break;
default:
diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c
index 88f23c5cfddb..0fd0e8ae62a8 100644
--- a/drivers/watchdog/txx9wdt.c
+++ b/drivers/watchdog/txx9wdt.c
@@ -176,5 +176,4 @@ module_platform_driver_probe(txx9wdt_driver, txx9wdt_probe);
MODULE_DESCRIPTION("TXx9 Watchdog Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:txx9wdt");
diff --git a/drivers/watchdog/ux500_wdt.c b/drivers/watchdog/ux500_wdt.c
index a614d84121c3..e029b5768f2c 100644
--- a/drivers/watchdog/ux500_wdt.c
+++ b/drivers/watchdog/ux500_wdt.c
@@ -88,7 +88,7 @@ static struct watchdog_device ux500_wdt = {
static int ux500_wdt_probe(struct platform_device *pdev)
{
int ret;
- struct ux500_wdt_data *pdata = pdev->dev.platform_data;
+ struct ux500_wdt_data *pdata = dev_get_platdata(&pdev->dev);
if (pdata) {
if (pdata->timeout > 0)
@@ -167,5 +167,4 @@ module_platform_driver(ux500_wdt_driver);
MODULE_AUTHOR("Jonas Aaberg <jonas.aberg@stericsson.com>");
MODULE_DESCRIPTION("Ux500 Watchdog Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:ux500_wdt");
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index 92f1326f0cfc..da6781c12523 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -396,4 +396,3 @@ module_exit(wdt_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pádraig Brady <P@draigBrady.com>");
MODULE_DESCRIPTION("w83627hf/thf WDT driver");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/w83697hf_wdt.c b/drivers/watchdog/w83697hf_wdt.c
index cd9f3c1e1af4..aaf2995d37f4 100644
--- a/drivers/watchdog/w83697hf_wdt.c
+++ b/drivers/watchdog/w83697hf_wdt.c
@@ -458,4 +458,3 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marcus Junker <junker@anduras.de>, "
"Samuel Tardieu <sam@rfc1149.net>");
MODULE_DESCRIPTION("w83697hf/hg WDT driver");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c
index 274be0bfaf24..ff58cb74671f 100644
--- a/drivers/watchdog/w83697ug_wdt.c
+++ b/drivers/watchdog/w83697ug_wdt.c
@@ -395,4 +395,3 @@ module_exit(wdt_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Flemming Frandsen <ff@nrvissing.net>");
MODULE_DESCRIPTION("w83697ug/uf WDT driver");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/w83877f_wdt.c b/drivers/watchdog/w83877f_wdt.c
index 7874ae06232b..f0483c75ed32 100644
--- a/drivers/watchdog/w83877f_wdt.c
+++ b/drivers/watchdog/w83877f_wdt.c
@@ -406,4 +406,3 @@ module_exit(w83877f_wdt_unload);
MODULE_AUTHOR("Scott and Bill Jennings");
MODULE_DESCRIPTION("Driver for watchdog timer in w83877f chip");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c
index 5d2c902825c2..91bf55a20024 100644
--- a/drivers/watchdog/w83977f_wdt.c
+++ b/drivers/watchdog/w83977f_wdt.c
@@ -527,4 +527,3 @@ module_exit(w83977f_wdt_exit);
MODULE_AUTHOR("Jose Goncalves <jose.goncalves@inov.pt>");
MODULE_DESCRIPTION("Driver for watchdog timer in W83977F I/O chip");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c
index 25aba6e00a23..db0da7ea4fd8 100644
--- a/drivers/watchdog/wafer5823wdt.c
+++ b/drivers/watchdog/wafer5823wdt.c
@@ -322,6 +322,5 @@ module_exit(wafwdt_exit);
MODULE_AUTHOR("Justin Cormack");
MODULE_DESCRIPTION("ICP Wafer 5823 Single Board Computer WDT driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
/* end of wafer5823wdt.c */
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 05d18b4c661b..461336c4519f 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -77,7 +77,7 @@ int watchdog_init_timeout(struct watchdog_device *wdd,
watchdog_check_min_max_timeout(wdd);
- /* try to get the tiemout module parameter first */
+ /* try to get the timeout module parameter first */
if (!watchdog_timeout_invalid(wdd, timeout_parm)) {
wdd->timeout = timeout_parm;
return ret;
diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c
index 3045debd5411..0240c60d14e3 100644
--- a/drivers/watchdog/wdrtas.c
+++ b/drivers/watchdog/wdrtas.c
@@ -48,8 +48,6 @@
MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
MODULE_DESCRIPTION("RTAS watchdog driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-MODULE_ALIAS_MISCDEV(TEMP_MINOR);
static bool wdrtas_nowayout = WATCHDOG_NOWAYOUT;
static atomic_t wdrtas_miscdev_open = ATOMIC_INIT(0);
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index ee4333c01109..e0206b5b7d89 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -664,6 +664,4 @@ module_exit(wdt_exit);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("Driver for ISA ICS watchdog cards (WDT500/501)");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-MODULE_ALIAS_MISCDEV(TEMP_MINOR);
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index 5eec74053882..7355ddd0b207 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -224,7 +224,6 @@ static void __exit footbridge_watchdog_exit(void)
MODULE_AUTHOR("Phil Blundell <pb@nexus.co.uk>");
MODULE_DESCRIPTION("Footbridge watchdog driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(soft_margin, int, 0);
MODULE_PARM_DESC(soft_margin, "Watchdog timeout in seconds");
diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c
index 65a402344933..a8e6f87f60c9 100644
--- a/drivers/watchdog/wdt977.c
+++ b/drivers/watchdog/wdt977.c
@@ -507,4 +507,3 @@ module_exit(wd977_exit);
MODULE_AUTHOR("Woody Suwalski <woodys@xandros.com>");
MODULE_DESCRIPTION("W83977AF Watchdog driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index 36a54c0e32dd..ee89ba4dea63 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -744,5 +744,3 @@ module_pci_driver(wdtpci_driver);
MODULE_AUTHOR("JP Nollmann, Alan Cox");
MODULE_DESCRIPTION("Driver for the ICS PCI-WDT500/501 watchdog cards");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-MODULE_ALIAS_MISCDEV(TEMP_MINOR);
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index d4e47eda4182..e243bd01c774 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -184,7 +184,7 @@ static const struct watchdog_ops wm831x_wdt_ops = {
static int wm831x_wdt_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
- struct wm831x_pdata *chip_pdata;
+ struct wm831x_pdata *chip_pdata = dev_get_platdata(pdev->dev.parent);
struct wm831x_watchdog_pdata *pdata;
struct wm831x_wdt_drvdata *driver_data;
struct watchdog_device *wm831x_wdt;
@@ -231,12 +231,10 @@ static int wm831x_wdt_probe(struct platform_device *pdev)
wm831x_wdt->timeout = wm831x_wdt_cfgs[i].time;
/* Apply any configuration */
- if (pdev->dev.parent->platform_data) {
- chip_pdata = pdev->dev.parent->platform_data;
+ if (chip_pdata)
pdata = chip_pdata->watchdog;
- } else {
+ else
pdata = NULL;
- }
if (pdata) {
reg &= ~(WM831X_WDOG_SECACT_MASK | WM831X_WDOG_PRIMACT_MASK |
diff --git a/drivers/watchdog/xen_wdt.c b/drivers/watchdog/xen_wdt.c
index 92ad33d0cb71..7a42dffd39e5 100644
--- a/drivers/watchdog/xen_wdt.c
+++ b/drivers/watchdog/xen_wdt.c
@@ -362,4 +362,3 @@ MODULE_AUTHOR("Jan Beulich <jbeulich@novell.com>");
MODULE_DESCRIPTION("Xen WatchDog Timer Driver");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 23eae5cb69c2..c794ea182140 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -140,7 +140,6 @@ config XEN_GRANT_DEV_ALLOC
config SWIOTLB_XEN
def_bool y
- depends on PCI && X86
select SWIOTLB
config XEN_TMEM
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index c4d2298893b1..62ccf5424ba8 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -49,6 +49,7 @@
#include <xen/grant_table.h>
#include <xen/interface/memory.h>
#include <xen/hvc-console.h>
+#include <xen/swiotlb-xen.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/interface.h>
@@ -898,8 +899,16 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
&map_ops[i].status, __func__);
- if (xen_feature(XENFEAT_auto_translated_physmap))
+ /* this is basically a nop on x86 */
+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
+ for (i = 0; i < count; i++) {
+ if (map_ops[i].status)
+ continue;
+ set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
+ map_ops[i].dev_bus_addr >> PAGE_SHIFT);
+ }
return ret;
+ }
if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode();
@@ -942,8 +951,14 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
if (ret)
return ret;
- if (xen_feature(XENFEAT_auto_translated_physmap))
+ /* this is basically a nop on x86 */
+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
+ for (i = 0; i < count; i++) {
+ set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
+ INVALID_P2M_ENTRY);
+ }
return ret;
+ }
if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode();
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 1b2277c311d2..7ca9621a7883 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -42,12 +42,30 @@
#include <xen/page.h>
#include <xen/xen-ops.h>
#include <xen/hvc-console.h>
+#include <asm/dma-mapping.h>
+#include <asm/xen/page-coherent.h>
+
+#include <trace/events/swiotlb.h>
/*
* Used to do a quick range check in swiotlb_tbl_unmap_single and
* swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
* API.
*/
+#ifndef CONFIG_X86
+static unsigned long dma_alloc_coherent_mask(struct device *dev,
+ gfp_t gfp)
+{
+ unsigned long dma_mask = 0;
+
+ dma_mask = dev->coherent_dma_mask;
+ if (!dma_mask)
+ dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
+
+ return dma_mask;
+}
+#endif
+
static char *xen_io_tlb_start, *xen_io_tlb_end;
static unsigned long xen_io_tlb_nslabs;
/*
@@ -56,17 +74,17 @@ static unsigned long xen_io_tlb_nslabs;
static u64 start_dma_addr;
-static dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
+static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
{
return phys_to_machine(XPADDR(paddr)).maddr;
}
-static phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
+static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
{
return machine_to_phys(XMADDR(baddr)).paddr;
}
-static dma_addr_t xen_virt_to_bus(void *address)
+static inline dma_addr_t xen_virt_to_bus(void *address)
{
return xen_phys_to_bus(virt_to_phys(address));
}
@@ -89,7 +107,7 @@ static int check_pages_physically_contiguous(unsigned long pfn,
return 1;
}
-static int range_straddles_page_boundary(phys_addr_t p, size_t size)
+static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
{
unsigned long pfn = PFN_DOWN(p);
unsigned int offset = p & ~PAGE_MASK;
@@ -126,6 +144,8 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
{
int i, rc;
int dma_bits;
+ dma_addr_t dma_handle;
+ phys_addr_t p = virt_to_phys(buf);
dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
@@ -135,9 +155,9 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
do {
rc = xen_create_contiguous_region(
- (unsigned long)buf + (i << IO_TLB_SHIFT),
+ p + (i << IO_TLB_SHIFT),
get_order(slabs << IO_TLB_SHIFT),
- dma_bits);
+ dma_bits, &dma_handle);
} while (rc && dma_bits++ < max_dma_bits);
if (rc)
return rc;
@@ -263,7 +283,6 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
void *ret;
int order = get_order(size);
u64 dma_mask = DMA_BIT_MASK(32);
- unsigned long vstart;
phys_addr_t phys;
dma_addr_t dev_addr;
@@ -278,8 +297,12 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
return ret;
- vstart = __get_free_pages(flags, order);
- ret = (void *)vstart;
+ /* On ARM this function returns an ioremap'ped virtual address for
+ * which virt_to_phys doesn't return the corresponding physical
+ * address. In fact on ARM virt_to_phys only works for kernel direct
+ * mapped RAM memory. Also see comment below.
+ */
+ ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
if (!ret)
return ret;
@@ -287,18 +310,21 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
if (hwdev && hwdev->coherent_dma_mask)
dma_mask = dma_alloc_coherent_mask(hwdev, flags);
- phys = virt_to_phys(ret);
+ /* At this point dma_handle is the physical address, next we are
+ * going to set it to the machine address.
+ * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
+ * to *dma_handle. */
+ phys = *dma_handle;
dev_addr = xen_phys_to_bus(phys);
if (((dev_addr + size - 1 <= dma_mask)) &&
!range_straddles_page_boundary(phys, size))
*dma_handle = dev_addr;
else {
- if (xen_create_contiguous_region(vstart, order,
- fls64(dma_mask)) != 0) {
- free_pages(vstart, order);
+ if (xen_create_contiguous_region(phys, order,
+ fls64(dma_mask), dma_handle) != 0) {
+ xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
return NULL;
}
- *dma_handle = virt_to_machine(ret).maddr;
}
memset(ret, 0, size);
return ret;
@@ -319,13 +345,15 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
if (hwdev && hwdev->coherent_dma_mask)
dma_mask = hwdev->coherent_dma_mask;
- phys = virt_to_phys(vaddr);
+ /* do not use virt_to_phys because on ARM it doesn't return you the
+ * physical address */
+ phys = xen_bus_to_phys(dev_addr);
if (((dev_addr + size - 1 > dma_mask)) ||
range_straddles_page_boundary(phys, size))
- xen_destroy_contiguous_region((unsigned long)vaddr, order);
+ xen_destroy_contiguous_region(phys, order);
- free_pages((unsigned long)vaddr, order);
+ xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
}
EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
@@ -352,16 +380,25 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
* buffering it.
*/
if (dma_capable(dev, dev_addr, size) &&
- !range_straddles_page_boundary(phys, size) && !swiotlb_force)
+ !range_straddles_page_boundary(phys, size) && !swiotlb_force) {
+ /* we are not interested in the dma_addr returned by
+ * xen_dma_map_page, only in the potential cache flushes executed
+ * by the function. */
+ xen_dma_map_page(dev, page, offset, size, dir, attrs);
return dev_addr;
+ }
/*
* Oh well, have to allocate and map a bounce buffer.
*/
+ trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
+
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
if (map == SWIOTLB_MAP_ERROR)
return DMA_ERROR_CODE;
+ xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
+ map & ~PAGE_MASK, size, dir, attrs);
dev_addr = xen_phys_to_bus(map);
/*
@@ -384,12 +421,15 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
* whatever the device wrote there.
*/
static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir)
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
phys_addr_t paddr = xen_bus_to_phys(dev_addr);
BUG_ON(dir == DMA_NONE);
+ xen_dma_unmap_page(hwdev, paddr, size, dir, attrs);
+
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr)) {
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
@@ -412,7 +452,7 @@ void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- xen_unmap_single(hwdev, dev_addr, size, dir);
+ xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
}
EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
@@ -435,11 +475,15 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE);
+ if (target == SYNC_FOR_CPU)
+ xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
+
/* NOTE: We use dev_addr here, not paddr! */
- if (is_xen_swiotlb_buffer(dev_addr)) {
+ if (is_xen_swiotlb_buffer(dev_addr))
swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
- return;
- }
+
+ if (target == SYNC_FOR_DEVICE)
+ xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
if (dir != DMA_FROM_DEVICE)
return;
@@ -502,16 +546,26 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
sg->length,
dir);
if (map == SWIOTLB_MAP_ERROR) {
+ dev_warn(hwdev, "swiotlb buffer is full\n");
/* Don't panic here, we expect map_sg users
to do proper error handling. */
xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
attrs);
sg_dma_len(sgl) = 0;
- return DMA_ERROR_CODE;
+ return 0;
}
sg->dma_address = xen_phys_to_bus(map);
- } else
+ } else {
+ /* we are not interested in the dma_addr returned by
+ * xen_dma_map_page, only in the potential cache flushes executed
+ * by the function. */
+ xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
+ paddr & ~PAGE_MASK,
+ sg->length,
+ dir,
+ attrs);
sg->dma_address = dev_addr;
+ }
sg_dma_len(sg) = sg->length;
}
return nelems;
@@ -533,7 +587,7 @@ xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i)
- xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir);
+ xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
}
EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
@@ -593,3 +647,15 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
+
+int
+xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
+{
+ if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
+ return -EIO;
+
+ *dev->dma_mask = dma_mask;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 38e92b770e91..3c0a74b3e9b1 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -384,12 +384,14 @@ static ssize_t nodename_show(struct device *dev,
{
return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
}
+static DEVICE_ATTR_RO(nodename);
static ssize_t devtype_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
}
+static DEVICE_ATTR_RO(devtype);
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -397,14 +399,24 @@ static ssize_t modalias_show(struct device *dev,
return sprintf(buf, "%s:%s\n", dev->bus->name,
to_xenbus_device(dev)->devicetype);
}
+static DEVICE_ATTR_RO(modalias);
-struct device_attribute xenbus_dev_attrs[] = {
- __ATTR_RO(nodename),
- __ATTR_RO(devtype),
- __ATTR_RO(modalias),
- __ATTR_NULL
+static struct attribute *xenbus_dev_attrs[] = {
+ &dev_attr_nodename.attr,
+ &dev_attr_devtype.attr,
+ &dev_attr_modalias.attr,
+ NULL,
};
-EXPORT_SYMBOL_GPL(xenbus_dev_attrs);
+
+static const struct attribute_group xenbus_dev_group = {
+ .attrs = xenbus_dev_attrs,
+};
+
+const struct attribute_group *xenbus_dev_groups[] = {
+ &xenbus_dev_group,
+ NULL,
+};
+EXPORT_SYMBOL_GPL(xenbus_dev_groups);
int xenbus_probe_node(struct xen_bus_type *bus,
const char *type,
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
index 146f857a36f8..1085ec294a19 100644
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -54,7 +54,7 @@ enum xenstore_init {
XS_LOCAL,
};
-extern struct device_attribute xenbus_dev_attrs[];
+extern const struct attribute_group *xenbus_dev_groups[];
extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
extern int xenbus_dev_probe(struct device *_dev);
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 998bbbab816b..5125dce11a60 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -200,7 +200,7 @@ static struct xen_bus_type xenbus_backend = {
.probe = xenbus_dev_probe,
.remove = xenbus_dev_remove,
.shutdown = xenbus_dev_shutdown,
- .dev_attrs = xenbus_dev_attrs,
+ .dev_groups = xenbus_dev_groups,
},
};
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 34b20bfa4e8c..129bf84c19ec 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -154,7 +154,7 @@ static struct xen_bus_type xenbus_frontend = {
.probe = xenbus_frontend_dev_probe,
.remove = xenbus_dev_remove,
.shutdown = xenbus_dev_shutdown,
- .dev_attrs = xenbus_dev_attrs,
+ .dev_groups = xenbus_dev_groups,
.pm = &xenbus_pm_ops,
},
diff --git a/fs/9p/cache.c b/fs/9p/cache.c
index a9ea73d6dcf3..a69260f27555 100644
--- a/fs/9p/cache.c
+++ b/fs/9p/cache.c
@@ -90,7 +90,7 @@ void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index,
&v9fs_cache_session_index_def,
- v9ses);
+ v9ses, true);
p9_debug(P9_DEBUG_FSC, "session %p get cookie %p\n",
v9ses, v9ses->fscache);
}
@@ -204,7 +204,7 @@ void v9fs_cache_inode_get_cookie(struct inode *inode)
v9ses = v9fs_inode2v9ses(inode);
v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
&v9fs_cache_inode_index_def,
- v9inode);
+ v9inode, true);
p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
inode, v9inode->fscache);
@@ -239,13 +239,12 @@ void v9fs_cache_inode_flush_cookie(struct inode *inode)
void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
{
struct v9fs_inode *v9inode = V9FS_I(inode);
- struct p9_fid *fid;
if (!v9inode->fscache)
return;
spin_lock(&v9inode->fscache_lock);
- fid = filp->private_data;
+
if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
v9fs_cache_inode_flush_cookie(inode);
else
@@ -271,7 +270,7 @@ void v9fs_cache_inode_reset_cookie(struct inode *inode)
v9ses = v9fs_inode2v9ses(inode);
v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
&v9fs_cache_inode_index_def,
- v9inode);
+ v9inode, true);
p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n",
inode, old, v9inode->fscache);
diff --git a/fs/9p/cache.h b/fs/9p/cache.h
index 40cc54ced5d9..2f9675491095 100644
--- a/fs/9p/cache.h
+++ b/fs/9p/cache.h
@@ -101,6 +101,18 @@ static inline void v9fs_fscache_wait_on_page_write(struct inode *inode,
#else /* CONFIG_9P_FSCACHE */
+static inline void v9fs_cache_inode_get_cookie(struct inode *inode)
+{
+}
+
+static inline void v9fs_cache_inode_put_cookie(struct inode *inode)
+{
+}
+
+static inline void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *file)
+{
+}
+
static inline int v9fs_fscache_release_page(struct page *page,
gfp_t gfp) {
return 1;
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 08f2e1e9a7e6..14da82564f4e 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -56,7 +56,7 @@ enum {
/* Options that take no arguments */
Opt_nodevmap,
/* Cache options */
- Opt_cache_loose, Opt_fscache,
+ Opt_cache_loose, Opt_fscache, Opt_mmap,
/* Access options */
Opt_access, Opt_posixacl,
/* Error token */
@@ -74,6 +74,7 @@ static const match_table_t tokens = {
{Opt_cache, "cache=%s"},
{Opt_cache_loose, "loose"},
{Opt_fscache, "fscache"},
+ {Opt_mmap, "mmap"},
{Opt_cachetag, "cachetag=%s"},
{Opt_access, "access=%s"},
{Opt_posixacl, "posixacl"},
@@ -91,6 +92,9 @@ static int get_cache_mode(char *s)
} else if (!strcmp(s, "fscache")) {
version = CACHE_FSCACHE;
p9_debug(P9_DEBUG_9P, "Cache mode: fscache\n");
+ } else if (!strcmp(s, "mmap")) {
+ version = CACHE_MMAP;
+ p9_debug(P9_DEBUG_9P, "Cache mode: mmap\n");
} else if (!strcmp(s, "none")) {
version = CACHE_NONE;
p9_debug(P9_DEBUG_9P, "Cache mode: none\n");
@@ -220,6 +224,9 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
case Opt_fscache:
v9ses->cache = CACHE_FSCACHE;
break;
+ case Opt_mmap:
+ v9ses->cache = CACHE_MMAP;
+ break;
case Opt_cachetag:
#ifdef CONFIG_9P_FSCACHE
v9ses->cachetag = match_strdup(&args[0]);
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index a8e127c89627..099c7712631c 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -64,6 +64,7 @@ enum p9_session_flags {
enum p9_cache_modes {
CACHE_NONE,
+ CACHE_MMAP,
CACHE_LOOSE,
CACHE_FSCACHE,
};
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index dc95a252523d..b83ebfbf3fdc 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -50,6 +50,8 @@ extern const struct dentry_operations v9fs_dentry_operations;
extern const struct dentry_operations v9fs_cached_dentry_operations;
extern const struct file_operations v9fs_cached_file_operations;
extern const struct file_operations v9fs_cached_file_operations_dotl;
+extern const struct file_operations v9fs_mmap_file_operations;
+extern const struct file_operations v9fs_mmap_file_operations_dotl;
extern struct kmem_cache *v9fs_inode_cache;
struct inode *v9fs_alloc_inode(struct super_block *sb);
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 9ff073f4090a..7e08ccd542b5 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -202,6 +202,8 @@ static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
{
int retval;
+ p9_debug(P9_DEBUG_VFS, "page %p\n", page);
+
retval = v9fs_vfs_writepage_locked(page);
if (retval < 0) {
if (retval == -EAGAIN) {
@@ -241,9 +243,8 @@ static int v9fs_launder_page(struct page *page)
* v9fs_direct_IO - 9P address space operation for direct I/O
* @rw: direction (read or write)
* @iocb: target I/O control block
- * @iov: array of vectors that define I/O buffer
+ * @iter: array of vectors that define I/O buffer
* @pos: offset in file to begin the operation
- * @nr_segs: size of iovec array
*
* The presence of v9fs_direct_IO() in the address space ops vector
* allowes open() O_DIRECT flags which would have failed otherwise.
@@ -252,13 +253,12 @@ static int v9fs_launder_page(struct page *page)
* the VFS gets them, so this method should never be called.
*
* Direct IO is not 'yet' supported in the cached mode. Hence when
- * this routine is called through generic_file_aio_read(), the read/write fails
- * with an error.
+ * this routine is called through generic_file_read_iter(), the read/write
+ * fails with an error.
*
*/
static ssize_t
-v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
- loff_t pos, unsigned long nr_segs)
+v9fs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
{
/*
* FIXME
@@ -267,7 +267,7 @@ v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
*/
p9_debug(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) off/no(%lld/%lu) EINVAL\n",
iocb->ki_filp->f_path.dentry->d_name.name,
- (long long)pos, nr_segs);
+ (long long)pos, iter->nr_segs);
return -EINVAL;
}
@@ -282,6 +282,9 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
struct inode *inode = mapping->host;
+
+ p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
+
v9inode = V9FS_I(inode);
start:
page = grab_cache_page_write_begin(mapping, index, flags);
@@ -312,6 +315,8 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
loff_t last_pos = pos + copied;
struct inode *inode = page->mapping->host;
+ p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
+
if (unlikely(copied < len)) {
/*
* zero out the rest of the area
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index aa5ecf479a57..3f7caf9e3c58 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -45,6 +45,7 @@
#include "cache.h"
static const struct vm_operations_struct v9fs_file_vm_ops;
+static const struct vm_operations_struct v9fs_mmap_file_vm_ops;
/**
* v9fs_file_open - open a file (or directory)
@@ -87,7 +88,8 @@ int v9fs_file_open(struct inode *inode, struct file *file)
file->private_data = fid;
mutex_lock(&v9inode->v_mutex);
- if (v9ses->cache && !v9inode->writeback_fid &&
+ if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
+ !v9inode->writeback_fid &&
((file->f_flags & O_ACCMODE) != O_RDONLY)) {
/*
* clone a fid and add it to writeback_fid
@@ -105,10 +107,10 @@ int v9fs_file_open(struct inode *inode, struct file *file)
v9inode->writeback_fid = (void *) fid;
}
mutex_unlock(&v9inode->v_mutex);
-#ifdef CONFIG_9P_FSCACHE
- if (v9ses->cache)
+ /* previous check would also set cookie with CACHE_LOOSE?
+ * set_cookie does a check if v9inode->fscache anyway... */
+ if (v9ses->cache == CACHE_FSCACHE)
v9fs_cache_inode_set_cookie(inode, file);
-#endif
return 0;
out_error:
p9_client_clunk(file->private_data);
@@ -463,14 +465,12 @@ v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid,
int n;
loff_t i_size;
size_t total = 0;
- struct p9_client *clnt;
loff_t origin = *offset;
unsigned long pg_start, pg_end;
p9_debug(P9_DEBUG_VFS, "data %p count %d offset %x\n",
data, (int)count, (int)*offset);
- clnt = fid->clnt;
do {
n = p9_client_write(fid, NULL, data+total, origin+total, count);
if (n <= 0)
@@ -583,11 +583,12 @@ int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
}
static int
-v9fs_file_mmap(struct file *file, struct vm_area_struct *vma)
+v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
{
int retval;
- retval = generic_file_mmap(file, vma);
+
+ retval = generic_file_mmap(filp, vma);
if (!retval)
vma->vm_ops = &v9fs_file_vm_ops;
@@ -595,6 +596,43 @@ v9fs_file_mmap(struct file *file, struct vm_area_struct *vma)
}
static int
+v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int retval;
+ struct inode *inode;
+ struct v9fs_inode *v9inode;
+ struct p9_fid *fid;
+
+ inode = file_inode(filp);
+ v9inode = V9FS_I(inode);
+ mutex_lock(&v9inode->v_mutex);
+ if (!v9inode->writeback_fid &&
+ (vma->vm_flags & VM_WRITE)) {
+ /*
+ * clone a fid and add it to writeback_fid
+ * we do it during mmap instead of
+ * page dirty time via write_begin/page_mkwrite
+ * because we want write after unlink usecase
+ * to work.
+ */
+ fid = v9fs_writeback_fid(filp->f_path.dentry);
+ if (IS_ERR(fid)) {
+ retval = PTR_ERR(fid);
+ mutex_unlock(&v9inode->v_mutex);
+ return retval;
+ }
+ v9inode->writeback_fid = (void *) fid;
+ }
+ mutex_unlock(&v9inode->v_mutex);
+
+ retval = generic_file_mmap(filp, vma);
+ if (!retval)
+ vma->vm_ops = &v9fs_mmap_file_vm_ops;
+
+ return retval;
+}
+
+static int
v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct v9fs_inode *v9inode;
@@ -662,6 +700,22 @@ v9fs_cached_file_read(struct file *filp, char __user *data, size_t count,
return do_sync_read(filp, data, count, offset);
}
+/**
+ * v9fs_mmap_file_read - read from a file
+ * @filp: file pointer to read
+ * @udata: user data buffer to read data into
+ * @count: size of buffer
+ * @offset: offset at which to read data
+ *
+ */
+static ssize_t
+v9fs_mmap_file_read(struct file *filp, char __user *data, size_t count,
+ loff_t *offset)
+{
+ /* TODO: Check if there are dirty pages */
+ return v9fs_file_read(filp, data, count, offset);
+}
+
static ssize_t
v9fs_direct_write(struct file *filp, const char __user * data,
size_t count, loff_t *offsetp)
@@ -732,19 +786,72 @@ v9fs_cached_file_write(struct file *filp, const char __user * data,
return do_sync_write(filp, data, count, offset);
}
+
+/**
+ * v9fs_mmap_file_write - write to a file
+ * @filp: file pointer to write
+ * @data: data buffer to write data from
+ * @count: size of buffer
+ * @offset: offset at which to write data
+ *
+ */
+static ssize_t
+v9fs_mmap_file_write(struct file *filp, const char __user *data,
+ size_t count, loff_t *offset)
+{
+ /*
+ * TODO: invalidate mmaps on filp's inode between
+ * offset and offset+count
+ */
+ return v9fs_file_write(filp, data, count, offset);
+}
+
+static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
+{
+ struct inode *inode;
+
+ struct writeback_control wbc = {
+ .nr_to_write = LONG_MAX,
+ .sync_mode = WB_SYNC_ALL,
+ .range_start = vma->vm_pgoff * PAGE_SIZE,
+ /* absolute end, byte at end included */
+ .range_end = vma->vm_pgoff * PAGE_SIZE +
+ (vma->vm_end - vma->vm_start - 1),
+ };
+
+
+ p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
+
+ inode = file_inode(vma->vm_file);
+
+ if (!mapping_cap_writeback_dirty(inode->i_mapping))
+ wbc.nr_to_write = 0;
+
+ might_sleep();
+ sync_inode(inode, &wbc);
+}
+
+
static const struct vm_operations_struct v9fs_file_vm_ops = {
.fault = filemap_fault,
.page_mkwrite = v9fs_vm_page_mkwrite,
.remap_pages = generic_file_remap_pages,
};
+static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
+ .close = v9fs_mmap_vm_close,
+ .fault = filemap_fault,
+ .page_mkwrite = v9fs_vm_page_mkwrite,
+ .remap_pages = generic_file_remap_pages,
+};
+
const struct file_operations v9fs_cached_file_operations = {
.llseek = generic_file_llseek,
.read = v9fs_cached_file_read,
.write = v9fs_cached_file_write,
- .aio_read = generic_file_aio_read,
- .aio_write = generic_file_aio_write,
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
.open = v9fs_file_open,
.release = v9fs_dir_release,
.lock = v9fs_file_lock,
@@ -756,8 +863,8 @@ const struct file_operations v9fs_cached_file_operations_dotl = {
.llseek = generic_file_llseek,
.read = v9fs_cached_file_read,
.write = v9fs_cached_file_write,
- .aio_read = generic_file_aio_read,
- .aio_write = generic_file_aio_write,
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
.open = v9fs_file_open,
.release = v9fs_dir_release,
.lock = v9fs_file_lock_dotl,
@@ -788,3 +895,26 @@ const struct file_operations v9fs_file_operations_dotl = {
.mmap = generic_file_readonly_mmap,
.fsync = v9fs_file_fsync_dotl,
};
+
+const struct file_operations v9fs_mmap_file_operations = {
+ .llseek = generic_file_llseek,
+ .read = v9fs_mmap_file_read,
+ .write = v9fs_mmap_file_write,
+ .open = v9fs_file_open,
+ .release = v9fs_dir_release,
+ .lock = v9fs_file_lock,
+ .mmap = v9fs_mmap_file_mmap,
+ .fsync = v9fs_file_fsync,
+};
+
+const struct file_operations v9fs_mmap_file_operations_dotl = {
+ .llseek = generic_file_llseek,
+ .read = v9fs_mmap_file_read,
+ .write = v9fs_mmap_file_write,
+ .open = v9fs_file_open,
+ .release = v9fs_dir_release,
+ .lock = v9fs_file_lock_dotl,
+ .flock = v9fs_file_flock_dotl,
+ .mmap = v9fs_mmap_file_mmap,
+ .fsync = v9fs_file_fsync_dotl,
+};
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 94de6d1482e2..bb7991c7e5c7 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -299,15 +299,22 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
case S_IFREG:
if (v9fs_proto_dotl(v9ses)) {
inode->i_op = &v9fs_file_inode_operations_dotl;
- if (v9ses->cache)
+ if (v9ses->cache == CACHE_LOOSE ||
+ v9ses->cache == CACHE_FSCACHE)
inode->i_fop =
&v9fs_cached_file_operations_dotl;
+ else if (v9ses->cache == CACHE_MMAP)
+ inode->i_fop = &v9fs_mmap_file_operations_dotl;
else
inode->i_fop = &v9fs_file_operations_dotl;
} else {
inode->i_op = &v9fs_file_inode_operations;
- if (v9ses->cache)
- inode->i_fop = &v9fs_cached_file_operations;
+ if (v9ses->cache == CACHE_LOOSE ||
+ v9ses->cache == CACHE_FSCACHE)
+ inode->i_fop =
+ &v9fs_cached_file_operations;
+ else if (v9ses->cache == CACHE_MMAP)
+ inode->i_fop = &v9fs_mmap_file_operations;
else
inode->i_fop = &v9fs_file_operations;
}
@@ -448,9 +455,7 @@ void v9fs_evict_inode(struct inode *inode)
clear_inode(inode);
filemap_fdatawrite(inode->i_mapping);
-#ifdef CONFIG_9P_FSCACHE
v9fs_cache_inode_put_cookie(inode);
-#endif
/* clunk the fid stashed in writeback_fid */
if (v9inode->writeback_fid) {
p9_client_clunk(v9inode->writeback_fid);
@@ -531,9 +536,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
goto error;
v9fs_stat2inode(st, inode, sb);
-#ifdef CONFIG_9P_FSCACHE
v9fs_cache_inode_get_cookie(inode);
-#endif
unlock_new_inode(inode);
return inode;
error:
@@ -783,7 +786,6 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
struct dentry *res;
- struct super_block *sb;
struct v9fs_session_info *v9ses;
struct p9_fid *dfid, *fid;
struct inode *inode;
@@ -795,7 +797,6 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
if (dentry->d_name.len > NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
- sb = dir->i_sb;
v9ses = v9fs_inode2v9ses(dir);
/* We can walk d_parent because we hold the dir->i_mutex */
dfid = v9fs_fid_lookup(dentry->d_parent);
@@ -816,7 +817,7 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
* unlink. For cached mode create calls request for new
* inode. But with cache disabled, lookup should do this.
*/
- if (v9ses->cache)
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
else
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
@@ -867,7 +868,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
return finish_no_open(file, res);
err = 0;
- fid = NULL;
+
v9ses = v9fs_inode2v9ses(dir);
perm = unixmode2p9mode(v9ses, mode);
fid = v9fs_create(v9ses, dir, dentry, NULL, perm,
@@ -882,7 +883,8 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
v9fs_invalidate_inode_attr(dir);
v9inode = V9FS_I(dentry->d_inode);
mutex_lock(&v9inode->v_mutex);
- if (v9ses->cache && !v9inode->writeback_fid &&
+ if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
+ !v9inode->writeback_fid &&
((flags & O_ACCMODE) != O_RDONLY)) {
/*
* clone a fid and add it to writeback_fid
@@ -905,10 +907,8 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
goto error;
file->private_data = fid;
-#ifdef CONFIG_9P_FSCACHE
- if (v9ses->cache)
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
v9fs_cache_inode_set_cookie(dentry->d_inode, file);
-#endif
*opened |= FILE_CREATED;
out:
@@ -1485,7 +1485,7 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
*/
i_size = inode->i_size;
v9fs_stat2inode(st, inode, inode->i_sb);
- if (v9ses->cache)
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
inode->i_size = i_size;
spin_unlock(&inode->i_lock);
out:
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index a7c481402c46..59dc8e87647f 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -141,9 +141,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
goto error;
v9fs_stat2inode_dotl(st, inode);
-#ifdef CONFIG_9P_FSCACHE
v9fs_cache_inode_get_cookie(inode);
-#endif
retval = v9fs_get_acl(inode, fid);
if (retval)
goto error;
@@ -332,7 +330,8 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
v9inode = V9FS_I(inode);
mutex_lock(&v9inode->v_mutex);
- if (v9ses->cache && !v9inode->writeback_fid &&
+ if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
+ !v9inode->writeback_fid &&
((flags & O_ACCMODE) != O_RDONLY)) {
/*
* clone a fid and add it to writeback_fid
@@ -355,10 +354,8 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
if (err)
goto err_clunk_old_fid;
file->private_data = ofid;
-#ifdef CONFIG_9P_FSCACHE
- if (v9ses->cache)
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
v9fs_cache_inode_set_cookie(inode, file);
-#endif
*opened |= FILE_CREATED;
out:
v9fs_put_acl(dacl, pacl);
@@ -477,13 +474,11 @@ static int
v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
- int err;
struct v9fs_session_info *v9ses;
struct p9_fid *fid;
struct p9_stat_dotl *st;
p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
- err = -EPERM;
v9ses = v9fs_dentry2v9ses(dentry);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
generic_fillattr(dentry->d_inode, stat);
@@ -560,7 +555,6 @@ static int v9fs_mapped_iattr_valid(int iattr_valid)
int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
{
int retval;
- struct v9fs_session_info *v9ses;
struct p9_fid *fid;
struct p9_iattr_dotl p9attr;
struct inode *inode = dentry->d_inode;
@@ -581,8 +575,6 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
- retval = -EPERM;
- v9ses = v9fs_dentry2v9ses(dentry);
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
@@ -719,7 +711,7 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
}
v9fs_invalidate_inode_attr(dir);
- if (v9ses->cache) {
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
/* Now walk from the parent so we can get an unopened fid. */
fid = p9_client_walk(dfid, 1, &name, 1);
if (IS_ERR(fid)) {
@@ -772,7 +764,6 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
int err;
- char *name;
struct dentry *dir_dentry;
struct p9_fid *dfid, *oldfid;
struct v9fs_session_info *v9ses;
@@ -790,8 +781,6 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
if (IS_ERR(oldfid))
return PTR_ERR(oldfid);
- name = (char *) dentry->d_name.name;
-
err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name);
if (err < 0) {
@@ -977,7 +966,7 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
*/
i_size = inode->i_size;
v9fs_stat2inode_dotl(st, inode);
- if (v9ses->cache)
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
inode->i_size = i_size;
spin_unlock(&inode->i_lock);
out:
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 2756dcd5de6e..0afd0382822b 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -144,7 +144,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
}
v9fs_fill_super(sb, v9ses, flags, data);
- if (v9ses->cache)
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
sb->s_d_op = &v9fs_cached_dentry_operations;
else
sb->s_d_op = &v9fs_dentry_operations;
@@ -282,7 +282,7 @@ static int v9fs_drop_inode(struct inode *inode)
{
struct v9fs_session_info *v9ses;
v9ses = v9fs_inode2v9ses(inode);
- if (v9ses->cache)
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
return generic_drop_inode(inode);
/*
* in case of non cached mode always drop the
@@ -325,10 +325,12 @@ static int v9fs_write_inode_dotl(struct inode *inode,
* send an fsync request to server irrespective of
* wbc->sync_mode.
*/
- p9_debug(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
v9inode = V9FS_I(inode);
+ p9_debug(P9_DEBUG_VFS, "%s: inode %p, writeback_fid %p\n",
+ __func__, inode, v9inode->writeback_fid);
if (!v9inode->writeback_fid)
return 0;
+
ret = p9_client_fsync(v9inode->writeback_fid, 0);
if (ret < 0) {
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index 3c28cdfb8c47..04133a1fd9cb 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -138,8 +138,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
if (retval < 0) {
p9_debug(P9_DEBUG_VFS, "p9_client_xattrcreate failed %d\n",
retval);
- p9_client_clunk(fid);
- return retval;
+ goto err;
}
msize = fid->clnt->msize;
while (value_len) {
@@ -152,12 +151,15 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
if (write_count < 0) {
/* error in xattr write */
retval = write_count;
- break;
+ goto err;
}
offset += write_count;
value_len -= write_count;
}
- return p9_client_clunk(fid);
+ retval = offset;
+err:
+ p9_client_clunk(fid);
+ return retval;
}
ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
diff --git a/fs/Makefile b/fs/Makefile
index 4fe6df3ec28f..1afa0e020082 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -11,7 +11,7 @@ obj-y := open.o read_write.o file_table.o super.o \
attr.o bad_inode.o file.o filesystems.o namespace.o \
seq_file.o xattr.o libfs.o fs-writeback.o \
pnode.o splice.o sync.o utimes.o \
- stack.o fs_struct.o statfs.o
+ stack.o fs_struct.o statfs.o iov-iter.o
ifeq ($(CONFIG_BLOCK),y)
obj-y += buffer.o bio.o block_dev.o direct-io.o mpage.o ioprio.o
diff --git a/fs/adfs/file.c b/fs/adfs/file.c
index a36da5382b40..da1e02161ac3 100644
--- a/fs/adfs/file.c
+++ b/fs/adfs/file.c
@@ -24,11 +24,11 @@
const struct file_operations adfs_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
- .aio_read = generic_file_aio_read,
+ .read_iter = generic_file_read_iter,
.mmap = generic_file_mmap,
.fsync = generic_file_fsync,
.write = do_sync_write,
- .aio_write = generic_file_aio_write,
+ .write_iter = generic_file_write_iter,
.splice_read = generic_file_splice_read,
};
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 8669b6ecddee..664f743c2d8d 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -28,9 +28,9 @@ static int affs_file_release(struct inode *inode, struct file *filp);
const struct file_operations affs_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
- .aio_read = generic_file_aio_read,
+ .read_iter = generic_file_read_iter,
.write = do_sync_write,
- .aio_write = generic_file_aio_write,
+ .write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.open = affs_file_open,
.release = affs_file_release,
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 3c090b7555ea..ca0a3cf93791 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -179,7 +179,7 @@ struct afs_cell *afs_cell_create(const char *name, unsigned namesz,
/* put it up for caching (this never returns an error) */
cell->cache = fscache_acquire_cookie(afs_cache_netfs.primary_index,
&afs_cell_cache_index_def,
- cell);
+ cell, true);
#endif
/* add to the cell lists */
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 66d50fe2ee45..3b71622e40f4 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -33,8 +33,8 @@ const struct file_operations afs_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
- .aio_read = generic_file_aio_read,
- .aio_write = afs_file_write,
+ .read_iter = generic_file_read_iter,
+ .write_iter = afs_file_write,
.mmap = generic_file_readonly_mmap,
.splice_read = generic_file_splice_read,
.fsync = afs_fsync,
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 789bc253b5f6..ce25d755b7aa 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -259,7 +259,7 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
#ifdef CONFIG_AFS_FSCACHE
vnode->cache = fscache_acquire_cookie(vnode->volume->cache,
&afs_vnode_cache_index_def,
- vnode);
+ vnode, true);
#endif
ret = afs_inode_map_status(vnode, key);
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index a306bb6d88d9..9c048ffac900 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -747,8 +747,7 @@ extern int afs_write_end(struct file *file, struct address_space *mapping,
extern int afs_writepage(struct page *, struct writeback_control *);
extern int afs_writepages(struct address_space *, struct writeback_control *);
extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
-extern ssize_t afs_file_write(struct kiocb *, const struct iovec *,
- unsigned long, loff_t);
+extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *, loff_t);
extern int afs_writeback_all(struct afs_vnode *);
extern int afs_fsync(struct file *, loff_t, loff_t, int);
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index 57bcb1596530..b6df2e83809f 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -308,7 +308,8 @@ static int afs_vlocation_fill_in_record(struct afs_vlocation *vl,
/* see if we have an in-cache copy (will set vl->valid if there is) */
#ifdef CONFIG_AFS_FSCACHE
vl->cache = fscache_acquire_cookie(vl->cell->cache,
- &afs_vlocation_cache_index_def, vl);
+ &afs_vlocation_cache_index_def, vl,
+ true);
#endif
if (vl->valid) {
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index 401eeb21869f..2b607257820c 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -131,7 +131,7 @@ struct afs_volume *afs_volume_lookup(struct afs_mount_params *params)
#ifdef CONFIG_AFS_FSCACHE
volume->cache = fscache_acquire_cookie(vlocation->cache,
&afs_volume_cache_index_def,
- volume);
+ volume, true);
#endif
afs_get_vlocation(vlocation);
volume->vlocation = vlocation;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index a890db4b9898..9fa2f596430a 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -625,15 +625,14 @@ void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
/*
* write to an AFS file
*/
-ssize_t afs_file_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
{
struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
ssize_t result;
- size_t count = iov_length(iov, nr_segs);
+ size_t count = iov_iter_count(iter);
_enter("{%x.%u},{%zu},%lu,",
- vnode->fid.vid, vnode->fid.vnode, count, nr_segs);
+ vnode->fid.vid, vnode->fid.vnode, count, iter->nr_segs);
if (IS_SWAPFILE(&vnode->vfs_inode)) {
printk(KERN_INFO
@@ -644,7 +643,7 @@ ssize_t afs_file_write(struct kiocb *iocb, const struct iovec *iov,
if (!count)
return 0;
- result = generic_file_aio_write(iocb, iov, nr_segs, pos);
+ result = generic_file_write_iter(iocb, iter, pos);
if (IS_ERR_VALUE(result)) {
_leave(" = %zd", result);
return result;
diff --git a/fs/aio.c b/fs/aio.c
index 067e3d340c35..a5630703eb56 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -877,6 +877,10 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
iocb->ki_ctx = ERR_PTR(-EXDEV);
wake_up_process(iocb->ki_obj.tsk);
return;
+ } else if (is_kernel_kiocb(iocb)) {
+ iocb->ki_obj.complete(iocb->ki_user_data, res);
+ aio_kernel_free(iocb);
+ return;
}
/*
@@ -1195,13 +1199,55 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb,
return 0;
}
+static ssize_t aio_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct file *file = iocb->ki_filp;
+ ssize_t ret;
+
+ if (unlikely(!is_kernel_kiocb(iocb)))
+ return -EINVAL;
+
+ if (unlikely(!(file->f_mode & FMODE_READ)))
+ return -EBADF;
+
+ ret = security_file_permission(file, MAY_READ);
+ if (unlikely(ret))
+ return ret;
+
+ if (!file->f_op->read_iter)
+ return -EINVAL;
+
+ return file->f_op->read_iter(iocb, iter, iocb->ki_pos);
+}
+
+static ssize_t aio_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct file *file = iocb->ki_filp;
+ ssize_t ret;
+
+ if (unlikely(!is_kernel_kiocb(iocb)))
+ return -EINVAL;
+
+ if (unlikely(!(file->f_mode & FMODE_WRITE)))
+ return -EBADF;
+
+ ret = security_file_permission(file, MAY_WRITE);
+ if (unlikely(ret))
+ return ret;
+
+ if (!file->f_op->write_iter)
+ return -EINVAL;
+
+ return file->f_op->write_iter(iocb, iter, iocb->ki_pos);
+}
+
/*
* aio_setup_iocb:
* Performs the initial checks and aio retry method
* setup for the kiocb at the time of io submission.
*/
static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
- char __user *buf, bool compat)
+ void *buf, bool compat)
{
struct file *file = req->ki_filp;
ssize_t ret;
@@ -1216,14 +1262,14 @@ static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
case IOCB_CMD_PREADV:
mode = FMODE_READ;
rw = READ;
- rw_op = file->f_op->aio_read;
+ rw_op = do_aio_read;
goto rw_common;
case IOCB_CMD_PWRITE:
case IOCB_CMD_PWRITEV:
mode = FMODE_WRITE;
rw = WRITE;
- rw_op = file->f_op->aio_write;
+ rw_op = do_aio_write;
goto rw_common;
rw_common:
if (unlikely(!(file->f_mode & mode)))
@@ -1266,6 +1312,14 @@ rw_common:
file_end_write(file);
break;
+ case IOCB_CMD_READ_ITER:
+ ret = aio_read_iter(req, buf);
+ break;
+
+ case IOCB_CMD_WRITE_ITER:
+ ret = aio_write_iter(req, buf);
+ break;
+
case IOCB_CMD_FDSYNC:
if (!file->f_op->aio_fsync)
return -EINVAL;
@@ -1303,6 +1357,80 @@ rw_common:
return 0;
}
+/*
+ * This allocates an iocb that will be used to submit and track completion of
+ * an IO that is issued from kernel space.
+ *
+ * The caller is expected to call the appropriate aio_kernel_init_() functions
+ * and then call aio_kernel_submit(). From that point forward progress is
+ * guaranteed by the file system aio method. Eventually the caller's
+ * completion callback will be called.
+ *
+ * These iocbs are special. They don't have a context, we don't limit the
+ * number pending, and they can't be canceled.
+ */
+struct kiocb *aio_kernel_alloc(gfp_t gfp)
+{
+ return kzalloc(sizeof(struct kiocb), gfp);
+}
+EXPORT_SYMBOL_GPL(aio_kernel_alloc);
+
+void aio_kernel_free(struct kiocb *iocb)
+{
+ kfree(iocb);
+}
+EXPORT_SYMBOL_GPL(aio_kernel_free);
+
+/*
+ * ptr and count can be a buff and bytes or an iov and segs.
+ */
+void aio_kernel_init_rw(struct kiocb *iocb, struct file *filp,
+ size_t nr, loff_t off)
+{
+ iocb->ki_filp = filp;
+ iocb->ki_nbytes = nr;
+ iocb->ki_pos = off;
+ iocb->ki_ctx = (void *)-1;
+}
+EXPORT_SYMBOL_GPL(aio_kernel_init_rw);
+
+void aio_kernel_init_callback(struct kiocb *iocb,
+ void (*complete)(u64 user_data, long res),
+ u64 user_data)
+{
+ iocb->ki_obj.complete = complete;
+ iocb->ki_user_data = user_data;
+}
+EXPORT_SYMBOL_GPL(aio_kernel_init_callback);
+
+/*
+ * The iocb is our responsibility once this is called. The caller must not
+ * reference it.
+ *
+ * Callers must be prepared for their iocb completion callback to be called the
+ * moment they enter this function. The completion callback may be called from
+ * any context.
+ *
+ * Returns: 0: the iocb completion callback will be called with the op result
+ * negative errno: the operation was not submitted and the iocb was freed
+ */
+int aio_kernel_submit(struct kiocb *iocb, unsigned op, void *ptr)
+{
+ int ret;
+
+ BUG_ON(!is_kernel_kiocb(iocb));
+ BUG_ON(!iocb->ki_obj.complete);
+ BUG_ON(!iocb->ki_filp);
+
+ ret = aio_run_iocb(iocb, op, ptr, 0);
+
+ if (ret)
+ aio_kernel_free(iocb);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(aio_kernel_submit);
+
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
struct iocb *iocb, bool compat)
{
@@ -1362,7 +1490,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
req->ki_nbytes = iocb->aio_nbytes;
ret = aio_run_iocb(req, iocb->aio_lio_opcode,
- (char __user *)(unsigned long)iocb->aio_buf,
+ (void *)(unsigned long)iocb->aio_buf,
compat);
if (ret)
goto out_put_req;
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 7c93953030fb..38651e5da183 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -39,12 +39,24 @@ static ssize_t bad_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
return -EIO;
}
+static ssize_t bad_file_read_iter(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t pos)
+{
+ return -EIO;
+}
+
static ssize_t bad_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
return -EIO;
}
+static ssize_t bad_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t pos)
+{
+ return -EIO;
+}
+
static int bad_file_readdir(struct file *file, struct dir_context *ctx)
{
return -EIO;
@@ -151,7 +163,9 @@ static const struct file_operations bad_file_ops =
.read = bad_file_read,
.write = bad_file_write,
.aio_read = bad_file_aio_read,
+ .read_iter = bad_file_read_iter,
.aio_write = bad_file_aio_write,
+ .write_iter = bad_file_write_iter,
.iterate = bad_file_readdir,
.poll = bad_file_poll,
.unlocked_ioctl = bad_file_unlocked_ioctl,
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index e9c75e20db32..daa15d6ba450 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -42,7 +42,7 @@ static void befs_destroy_inode(struct inode *inode);
static int befs_init_inodecache(void);
static void befs_destroy_inodecache(void);
static void *befs_follow_link(struct dentry *, struct nameidata *);
-static void befs_put_link(struct dentry *, struct nameidata *, void *);
+static void *befs_fast_follow_link(struct dentry *, struct nameidata *);
static int befs_utf2nls(struct super_block *sb, const char *in, int in_len,
char **out, int *out_len);
static int befs_nls2utf(struct super_block *sb, const char *in, int in_len,
@@ -79,10 +79,15 @@ static const struct address_space_operations befs_aops = {
.bmap = befs_bmap,
};
+static const struct inode_operations befs_fast_symlink_inode_operations = {
+ .readlink = generic_readlink,
+ .follow_link = befs_fast_follow_link,
+};
+
static const struct inode_operations befs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = befs_follow_link,
- .put_link = befs_put_link,
+ .put_link = kfree_put_link,
};
/*
@@ -411,7 +416,10 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
inode->i_op = &befs_dir_inode_operations;
inode->i_fop = &befs_dir_operations;
} else if (S_ISLNK(inode->i_mode)) {
- inode->i_op = &befs_symlink_inode_operations;
+ if (befs_ino->i_flags & BEFS_LONG_SYMLINK)
+ inode->i_op = &befs_symlink_inode_operations;
+ else
+ inode->i_op = &befs_fast_symlink_inode_operations;
} else {
befs_error(sb, "Inode %lu is not a regular file, "
"directory or symlink. THAT IS WRONG! BeFS has no "
@@ -477,47 +485,40 @@ befs_destroy_inodecache(void)
static void *
befs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
+ struct super_block *sb = dentry->d_sb;
befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
+ befs_data_stream *data = &befs_ino->i_data.ds;
+ befs_off_t len = data->size;
char *link;
- if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
- struct super_block *sb = dentry->d_sb;
- befs_data_stream *data = &befs_ino->i_data.ds;
- befs_off_t len = data->size;
+ if (len == 0) {
+ befs_error(sb, "Long symlink with illegal length");
+ link = ERR_PTR(-EIO);
+ } else {
+ befs_debug(sb, "Follow long symlink");
- if (len == 0) {
- befs_error(sb, "Long symlink with illegal length");
+ link = kmalloc(len, GFP_NOFS);
+ if (!link) {
+ link = ERR_PTR(-ENOMEM);
+ } else if (befs_read_lsymlink(sb, data, link, len) != len) {
+ kfree(link);
+ befs_error(sb, "Failed to read entire long symlink");
link = ERR_PTR(-EIO);
} else {
- befs_debug(sb, "Follow long symlink");
-
- link = kmalloc(len, GFP_NOFS);
- if (!link) {
- link = ERR_PTR(-ENOMEM);
- } else if (befs_read_lsymlink(sb, data, link, len) != len) {
- kfree(link);
- befs_error(sb, "Failed to read entire long symlink");
- link = ERR_PTR(-EIO);
- } else {
- link[len - 1] = '\0';
- }
+ link[len - 1] = '\0';
}
- } else {
- link = befs_ino->i_data.symlink;
}
-
nd_set_link(nd, link);
return NULL;
}
-static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+
+static void *
+befs_fast_follow_link(struct dentry *dentry, struct nameidata *nd)
{
befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
- if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
- char *link = nd_get_link(nd);
- if (!IS_ERR(link))
- kfree(link);
- }
+ nd_set_link(nd, befs_ino->i_data.symlink);
+ return NULL;
}
/*
diff --git a/fs/bfs/file.c b/fs/bfs/file.c
index ae2892218335..d150660d598b 100644
--- a/fs/bfs/file.c
+++ b/fs/bfs/file.c
@@ -24,9 +24,9 @@
const struct file_operations bfs_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
- .aio_read = generic_file_aio_read,
+ .read_iter = generic_file_read_iter,
.write = do_sync_write,
- .aio_write = generic_file_aio_write,
+ .write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.splice_read = generic_file_splice_read,
};
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index fc60b31453ee..31f2d5af7b24 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -134,8 +134,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
return 0;
}
- iv = bip_vec_idx(bip, bip->bip_vcnt);
- BUG_ON(iv == NULL);
+ iv = bip->bip_vec + bip->bip_vcnt;
iv->bv_page = page;
iv->bv_len = len;
@@ -203,6 +202,12 @@ static inline unsigned int bio_integrity_hw_sectors(struct blk_integrity *bi,
return sectors;
}
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return bio_integrity_hw_sectors(bi, sectors) * bi->tuple_size;
+}
+
/**
* bio_integrity_tag_size - Retrieve integrity tag space
* @bio: bio to inspect
@@ -215,9 +220,9 @@ unsigned int bio_integrity_tag_size(struct bio *bio)
{
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
- BUG_ON(bio->bi_size == 0);
+ BUG_ON(bio->bi_iter.bi_size == 0);
- return bi->tag_size * (bio->bi_size / bi->sector_size);
+ return bi->tag_size * (bio->bi_iter.bi_size / bi->sector_size);
}
EXPORT_SYMBOL(bio_integrity_tag_size);
@@ -235,9 +240,9 @@ int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len, int set)
nr_sectors = bio_integrity_hw_sectors(bi,
DIV_ROUND_UP(len, bi->tag_size));
- if (nr_sectors * bi->tuple_size > bip->bip_size) {
- printk(KERN_ERR "%s: tag too big for bio: %u > %u\n",
- __func__, nr_sectors * bi->tuple_size, bip->bip_size);
+ if (nr_sectors * bi->tuple_size > bip->bip_iter.bi_size) {
+ printk(KERN_ERR "%s: tag too big for bio: %u > %u\n", __func__,
+ nr_sectors * bi->tuple_size, bip->bip_iter.bi_size);
return -1;
}
@@ -299,29 +304,30 @@ static void bio_integrity_generate(struct bio *bio)
{
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
struct blk_integrity_exchg bix;
- struct bio_vec *bv;
- sector_t sector = bio->bi_sector;
- unsigned int i, sectors, total;
+ struct bio_vec bv;
+ struct bvec_iter iter;
+ sector_t sector = bio->bi_iter.bi_sector;
+ unsigned int sectors, total;
void *prot_buf = bio->bi_integrity->bip_buf;
total = 0;
bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
bix.sector_size = bi->sector_size;
- bio_for_each_segment(bv, bio, i) {
- void *kaddr = kmap_atomic(bv->bv_page);
- bix.data_buf = kaddr + bv->bv_offset;
- bix.data_size = bv->bv_len;
+ bio_for_each_segment(bv, bio, iter) {
+ void *kaddr = kmap_atomic(bv.bv_page);
+ bix.data_buf = kaddr + bv.bv_offset;
+ bix.data_size = bv.bv_len;
bix.prot_buf = prot_buf;
bix.sector = sector;
bi->generate_fn(&bix);
- sectors = bv->bv_len / bi->sector_size;
+ sectors = bv.bv_len / bi->sector_size;
sector += sectors;
prot_buf += sectors * bi->tuple_size;
total += sectors * bi->tuple_size;
- BUG_ON(total > bio->bi_integrity->bip_size);
+ BUG_ON(total > bio->bi_integrity->bip_iter.bi_size);
kunmap_atomic(kaddr);
}
@@ -386,8 +392,8 @@ int bio_integrity_prep(struct bio *bio)
bip->bip_owns_buf = 1;
bip->bip_buf = buf;
- bip->bip_size = len;
- bip->bip_sector = bio->bi_sector;
+ bip->bip_iter.bi_size = len;
+ bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
/* Map it */
offset = offset_in_page(buf);
@@ -441,19 +447,20 @@ static int bio_integrity_verify(struct bio *bio)
{
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
struct blk_integrity_exchg bix;
- struct bio_vec *bv;
- sector_t sector = bio->bi_integrity->bip_sector;
- unsigned int i, sectors, total, ret;
+ struct bio_vec bv;
+ struct bvec_iter iter;
+ sector_t sector = bio->bi_integrity->bip_iter.bi_sector;
+ unsigned int sectors, total, ret;
void *prot_buf = bio->bi_integrity->bip_buf;
ret = total = 0;
bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
bix.sector_size = bi->sector_size;
- bio_for_each_segment(bv, bio, i) {
- void *kaddr = kmap_atomic(bv->bv_page);
- bix.data_buf = kaddr + bv->bv_offset;
- bix.data_size = bv->bv_len;
+ bio_for_each_segment(bv, bio, iter) {
+ void *kaddr = kmap_atomic(bv.bv_page);
+ bix.data_buf = kaddr + bv.bv_offset;
+ bix.data_size = bv.bv_len;
bix.prot_buf = prot_buf;
bix.sector = sector;
@@ -464,11 +471,11 @@ static int bio_integrity_verify(struct bio *bio)
return ret;
}
- sectors = bv->bv_len / bi->sector_size;
+ sectors = bv.bv_len / bi->sector_size;
sector += sectors;
prot_buf += sectors * bi->tuple_size;
total += sectors * bi->tuple_size;
- BUG_ON(total > bio->bi_integrity->bip_size);
+ BUG_ON(total > bio->bi_integrity->bip_iter.bi_size);
kunmap_atomic(kaddr);
}
@@ -495,7 +502,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
/* Restore original bio completion handler */
bio->bi_end_io = bip->bip_end_io;
- bio_endio(bio, error);
+ bio_endio_nodec(bio, error);
}
/**
@@ -533,56 +540,6 @@ void bio_integrity_endio(struct bio *bio, int error)
EXPORT_SYMBOL(bio_integrity_endio);
/**
- * bio_integrity_mark_head - Advance bip_vec skip bytes
- * @bip: Integrity vector to advance
- * @skip: Number of bytes to advance it
- */
-void bio_integrity_mark_head(struct bio_integrity_payload *bip,
- unsigned int skip)
-{
- struct bio_vec *iv;
- unsigned int i;
-
- bip_for_each_vec(iv, bip, i) {
- if (skip == 0) {
- bip->bip_idx = i;
- return;
- } else if (skip >= iv->bv_len) {
- skip -= iv->bv_len;
- } else { /* skip < iv->bv_len) */
- iv->bv_offset += skip;
- iv->bv_len -= skip;
- bip->bip_idx = i;
- return;
- }
- }
-}
-
-/**
- * bio_integrity_mark_tail - Truncate bip_vec to be len bytes long
- * @bip: Integrity vector to truncate
- * @len: New length of integrity vector
- */
-void bio_integrity_mark_tail(struct bio_integrity_payload *bip,
- unsigned int len)
-{
- struct bio_vec *iv;
- unsigned int i;
-
- bip_for_each_vec(iv, bip, i) {
- if (len == 0) {
- bip->bip_vcnt = i;
- return;
- } else if (len >= iv->bv_len) {
- len -= iv->bv_len;
- } else { /* len < iv->bv_len) */
- iv->bv_len = len;
- len = 0;
- }
- }
-}
-
-/**
* bio_integrity_advance - Advance integrity vector
* @bio: bio whose integrity vector to update
* @bytes_done: number of data bytes that have been completed
@@ -595,13 +552,9 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
{
struct bio_integrity_payload *bip = bio->bi_integrity;
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
- unsigned int nr_sectors;
+ unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
- BUG_ON(bip == NULL);
- BUG_ON(bi == NULL);
-
- nr_sectors = bio_integrity_hw_sectors(bi, bytes_done >> 9);
- bio_integrity_mark_head(bip, nr_sectors * bi->tuple_size);
+ bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
}
EXPORT_SYMBOL(bio_integrity_advance);
@@ -621,64 +574,13 @@ void bio_integrity_trim(struct bio *bio, unsigned int offset,
{
struct bio_integrity_payload *bip = bio->bi_integrity;
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
- unsigned int nr_sectors;
- BUG_ON(bip == NULL);
- BUG_ON(bi == NULL);
- BUG_ON(!bio_flagged(bio, BIO_CLONED));
-
- nr_sectors = bio_integrity_hw_sectors(bi, sectors);
- bip->bip_sector = bip->bip_sector + offset;
- bio_integrity_mark_head(bip, offset * bi->tuple_size);
- bio_integrity_mark_tail(bip, sectors * bi->tuple_size);
+ bio_integrity_advance(bio, offset << 9);
+ bip->bip_iter.bi_size = bio_integrity_bytes(bi, sectors);
}
EXPORT_SYMBOL(bio_integrity_trim);
/**
- * bio_integrity_split - Split integrity metadata
- * @bio: Protected bio
- * @bp: Resulting bio_pair
- * @sectors: Offset
- *
- * Description: Splits an integrity page into a bio_pair.
- */
-void bio_integrity_split(struct bio *bio, struct bio_pair *bp, int sectors)
-{
- struct blk_integrity *bi;
- struct bio_integrity_payload *bip = bio->bi_integrity;
- unsigned int nr_sectors;
-
- if (bio_integrity(bio) == 0)
- return;
-
- bi = bdev_get_integrity(bio->bi_bdev);
- BUG_ON(bi == NULL);
- BUG_ON(bip->bip_vcnt != 1);
-
- nr_sectors = bio_integrity_hw_sectors(bi, sectors);
-
- bp->bio1.bi_integrity = &bp->bip1;
- bp->bio2.bi_integrity = &bp->bip2;
-
- bp->iv1 = bip->bip_vec[bip->bip_idx];
- bp->iv2 = bip->bip_vec[bip->bip_idx];
-
- bp->bip1.bip_vec = &bp->iv1;
- bp->bip2.bip_vec = &bp->iv2;
-
- bp->iv1.bv_len = sectors * bi->tuple_size;
- bp->iv2.bv_offset += sectors * bi->tuple_size;
- bp->iv2.bv_len -= sectors * bi->tuple_size;
-
- bp->bip1.bip_sector = bio->bi_integrity->bip_sector;
- bp->bip2.bip_sector = bio->bi_integrity->bip_sector + nr_sectors;
-
- bp->bip1.bip_vcnt = bp->bip2.bip_vcnt = 1;
- bp->bip1.bip_idx = bp->bip2.bip_idx = 0;
-}
-EXPORT_SYMBOL(bio_integrity_split);
-
-/**
* bio_integrity_clone - Callback for cloning bios with integrity metadata
* @bio: New bio
* @bio_src: Original bio
@@ -692,19 +594,12 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
struct bio_integrity_payload *bip_src = bio_src->bi_integrity;
struct bio_integrity_payload *bip;
- BUG_ON(bip_src == NULL);
-
- bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
-
+ bip = bio_integrity_alloc(bio, gfp_mask, 0);
if (bip == NULL)
- return -EIO;
-
- memcpy(bip->bip_vec, bip_src->bip_vec,
- bip_src->bip_vcnt * sizeof(struct bio_vec));
+ return -ENOMEM;
- bip->bip_sector = bip_src->bip_sector;
- bip->bip_vcnt = bip_src->bip_vcnt;
- bip->bip_idx = bip_src->bip_idx;
+ bip->bip_vec = bip_src->bip_vec;
+ bip->bip_iter = bip_src->bip_iter;
return 0;
}
diff --git a/fs/bio.c b/fs/bio.c
index ea5035da4d9a..7d538a118397 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -38,8 +38,6 @@
*/
#define BIO_INLINE_VECS 4
-static mempool_t *bio_split_pool __read_mostly;
-
/*
* if you change this list, also change bvec_alloc or things will
* break badly! cannot be bigger than what you can fit into an
@@ -273,6 +271,7 @@ void bio_init(struct bio *bio)
{
memset(bio, 0, sizeof(*bio));
bio->bi_flags = 1 << BIO_UPTODATE;
+ atomic_set(&bio->bi_remaining, 1);
atomic_set(&bio->bi_cnt, 1);
}
EXPORT_SYMBOL(bio_init);
@@ -295,9 +294,34 @@ void bio_reset(struct bio *bio)
memset(bio, 0, BIO_RESET_BYTES);
bio->bi_flags = flags|(1 << BIO_UPTODATE);
+ atomic_set(&bio->bi_remaining, 1);
}
EXPORT_SYMBOL(bio_reset);
+static void bio_chain_endio(struct bio *bio, int error)
+{
+ bio_endio(bio->bi_private, error);
+}
+
+/**
+ * bio_chain - chain bio completions
+ *
+ * The caller won't have a bi_end_io called when @bio completes - instead,
+ * @parent's bi_end_io won't be called until both @parent and @bio have
+ * completed.
+ *
+ * The caller must not set bi_private or bi_end_io in @bio.
+ */
+void bio_chain(struct bio *bio, struct bio *parent)
+{
+ BUG_ON(bio->bi_private || bio->bi_end_io);
+
+ bio->bi_private = parent;
+ bio->bi_end_io = bio_chain_endio;
+ atomic_inc(&parent->bi_remaining);
+}
+EXPORT_SYMBOL(bio_chain);
+
static void bio_alloc_rescue(struct work_struct *work)
{
struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
@@ -473,13 +497,13 @@ EXPORT_SYMBOL(bio_alloc_bioset);
void zero_fill_bio(struct bio *bio)
{
unsigned long flags;
- struct bio_vec *bv;
- int i;
+ struct bio_vec bv;
+ struct bvec_iter iter;
- bio_for_each_segment(bv, bio, i) {
- char *data = bvec_kmap_irq(bv, &flags);
- memset(data, 0, bv->bv_len);
- flush_dcache_page(bv->bv_page);
+ bio_for_each_segment(bv, bio, iter) {
+ char *data = bvec_kmap_irq(&bv, &flags);
+ memset(data, 0, bv.bv_len);
+ flush_dcache_page(bv.bv_page);
bvec_kunmap_irq(data, &flags);
}
}
@@ -525,20 +549,18 @@ EXPORT_SYMBOL(bio_phys_segments);
*/
void __bio_clone(struct bio *bio, struct bio *bio_src)
{
- memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
- bio_src->bi_max_vecs * sizeof(struct bio_vec));
+ BUG_ON(bio->bi_pool && BIO_POOL_IDX(bio) != BIO_POOL_NONE);
/*
* most users will be overriding ->bi_bdev with a new target,
* so we don't set nor calculate new physical/hw segment counts here
*/
- bio->bi_sector = bio_src->bi_sector;
bio->bi_bdev = bio_src->bi_bdev;
bio->bi_flags |= 1 << BIO_CLONED;
bio->bi_rw = bio_src->bi_rw;
+ bio->bi_iter = bio_src->bi_iter;
+ bio->bi_io_vec = bio_src->bi_io_vec;
bio->bi_vcnt = bio_src->bi_vcnt;
- bio->bi_size = bio_src->bi_size;
- bio->bi_idx = bio_src->bi_idx;
}
EXPORT_SYMBOL(__bio_clone);
@@ -555,7 +577,7 @@ struct bio *bio_clone_bioset(struct bio *bio, gfp_t gfp_mask,
{
struct bio *b;
- b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, bs);
+ b = bio_alloc_bioset(gfp_mask, 0, bs);
if (!b)
return NULL;
@@ -577,6 +599,50 @@ struct bio *bio_clone_bioset(struct bio *bio, gfp_t gfp_mask,
EXPORT_SYMBOL(bio_clone_bioset);
/**
+ * bio_clone_biovec: Given a cloned bio, give the clone its own copy of the
+ * biovec
+ * @bio: cloned bio
+ *
+ * @bio must have been allocated from a bioset - i.e. returned from
+ * bio_clone_bioset()
+ */
+int bio_clone_biovec(struct bio *bio, gfp_t gfp_mask)
+{
+ unsigned long idx = BIO_POOL_NONE;
+ unsigned nr_iovecs = 0;
+ struct bio_vec bv, *bvl = NULL;
+ struct bvec_iter iter;
+
+ BUG_ON(!bio->bi_pool);
+ BUG_ON(BIO_POOL_IDX(bio) != BIO_POOL_NONE);
+
+ bio_for_each_segment(bv, bio, iter)
+ nr_iovecs++;
+
+ if (nr_iovecs > BIO_INLINE_VECS) {
+ bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx,
+ bio->bi_pool->bvec_pool);
+ if (!bvl)
+ return -ENOMEM;
+ } else if (nr_iovecs) {
+ bvl = bio->bi_inline_vecs;
+ }
+
+ bio_for_each_segment(bv, bio, iter)
+ bvl[bio->bi_vcnt++] = bv;
+
+ bio->bi_io_vec = bvl;
+ bio->bi_iter.bi_idx = 0;
+ bio->bi_iter.bi_bvec_done = 0;
+
+ bio->bi_flags &= BIO_POOL_MASK - 1;
+ bio->bi_flags |= idx << BIO_POOL_OFFSET;
+
+ return 0;
+}
+EXPORT_SYMBOL(bio_clone_biovec);
+
+/**
* bio_get_nr_vecs - return approx number of vecs
* @bdev: I/O target
*
@@ -612,7 +678,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
if (unlikely(bio_flagged(bio, BIO_CLONED)))
return 0;
- if (((bio->bi_size + len) >> 9) > max_sectors)
+ if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
return 0;
/*
@@ -635,8 +701,9 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
simulate merging updated prev_bvec
as new bvec. */
.bi_bdev = bio->bi_bdev,
- .bi_sector = bio->bi_sector,
- .bi_size = bio->bi_size - prev_bv_len,
+ .bi_sector = bio->bi_iter.bi_sector,
+ .bi_size = bio->bi_iter.bi_size -
+ prev_bv_len,
.bi_rw = bio->bi_rw,
};
@@ -684,8 +751,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
if (q->merge_bvec_fn) {
struct bvec_merge_data bvm = {
.bi_bdev = bio->bi_bdev,
- .bi_sector = bio->bi_sector,
- .bi_size = bio->bi_size,
+ .bi_sector = bio->bi_iter.bi_sector,
+ .bi_size = bio->bi_iter.bi_size,
.bi_rw = bio->bi_rw,
};
@@ -708,7 +775,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
bio->bi_vcnt++;
bio->bi_phys_segments++;
done:
- bio->bi_size += len;
+ bio->bi_iter.bi_size += len;
return len;
}
@@ -807,28 +874,7 @@ void bio_advance(struct bio *bio, unsigned bytes)
if (bio_integrity(bio))
bio_integrity_advance(bio, bytes);
- bio->bi_sector += bytes >> 9;
- bio->bi_size -= bytes;
-
- if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
- return;
-
- while (bytes) {
- if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
- WARN_ONCE(1, "bio idx %d >= vcnt %d\n",
- bio->bi_idx, bio->bi_vcnt);
- break;
- }
-
- if (bytes >= bio_iovec(bio)->bv_len) {
- bytes -= bio_iovec(bio)->bv_len;
- bio->bi_idx++;
- } else {
- bio_iovec(bio)->bv_len -= bytes;
- bio_iovec(bio)->bv_offset += bytes;
- bytes = 0;
- }
- }
+ bio_advance_iter(bio, &bio->bi_iter, bytes);
}
EXPORT_SYMBOL(bio_advance);
@@ -874,117 +920,80 @@ EXPORT_SYMBOL(bio_alloc_pages);
*/
void bio_copy_data(struct bio *dst, struct bio *src)
{
- struct bio_vec *src_bv, *dst_bv;
- unsigned src_offset, dst_offset, bytes;
+ struct bvec_iter src_iter, dst_iter;
+ struct bio_vec src_bv, dst_bv;
void *src_p, *dst_p;
+ unsigned bytes;
- src_bv = bio_iovec(src);
- dst_bv = bio_iovec(dst);
-
- src_offset = src_bv->bv_offset;
- dst_offset = dst_bv->bv_offset;
+ src_iter = src->bi_iter;
+ dst_iter = dst->bi_iter;
while (1) {
- if (src_offset == src_bv->bv_offset + src_bv->bv_len) {
- src_bv++;
- if (src_bv == bio_iovec_idx(src, src->bi_vcnt)) {
- src = src->bi_next;
- if (!src)
- break;
-
- src_bv = bio_iovec(src);
- }
+ if (!src_iter.bi_size) {
+ src = src->bi_next;
+ if (!src)
+ break;
- src_offset = src_bv->bv_offset;
+ src_iter = src->bi_iter;
}
- if (dst_offset == dst_bv->bv_offset + dst_bv->bv_len) {
- dst_bv++;
- if (dst_bv == bio_iovec_idx(dst, dst->bi_vcnt)) {
- dst = dst->bi_next;
- if (!dst)
- break;
-
- dst_bv = bio_iovec(dst);
- }
+ if (!dst_iter.bi_size) {
+ dst = dst->bi_next;
+ if (!dst)
+ break;
- dst_offset = dst_bv->bv_offset;
+ dst_iter = dst->bi_iter;
}
- bytes = min(dst_bv->bv_offset + dst_bv->bv_len - dst_offset,
- src_bv->bv_offset + src_bv->bv_len - src_offset);
+ src_bv = bio_iter_iovec(src, src_iter);
+ dst_bv = bio_iter_iovec(dst, dst_iter);
+
+ bytes = min(src_bv.bv_len, dst_bv.bv_len);
- src_p = kmap_atomic(src_bv->bv_page);
- dst_p = kmap_atomic(dst_bv->bv_page);
+ src_p = kmap_atomic(src_bv.bv_page);
+ dst_p = kmap_atomic(dst_bv.bv_page);
- memcpy(dst_p + dst_offset,
- src_p + src_offset,
+ memcpy(dst_p + dst_bv.bv_offset,
+ src_p + src_bv.bv_offset,
bytes);
kunmap_atomic(dst_p);
kunmap_atomic(src_p);
- src_offset += bytes;
- dst_offset += bytes;
+ bio_advance_iter(src, &src_iter, bytes);
+ bio_advance_iter(dst, &dst_iter, bytes);
}
}
EXPORT_SYMBOL(bio_copy_data);
struct bio_map_data {
- struct bio_vec *iovecs;
- struct sg_iovec *sgvecs;
int nr_sgvecs;
int is_our_pages;
+ struct sg_iovec sgvecs[];
};
static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
struct sg_iovec *iov, int iov_count,
int is_our_pages)
{
- memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
bmd->nr_sgvecs = iov_count;
bmd->is_our_pages = is_our_pages;
bio->bi_private = bmd;
}
-static void bio_free_map_data(struct bio_map_data *bmd)
-{
- kfree(bmd->iovecs);
- kfree(bmd->sgvecs);
- kfree(bmd);
-}
-
static struct bio_map_data *bio_alloc_map_data(int nr_segs,
unsigned int iov_count,
gfp_t gfp_mask)
{
- struct bio_map_data *bmd;
-
if (iov_count > UIO_MAXIOV)
return NULL;
- bmd = kmalloc(sizeof(*bmd), gfp_mask);
- if (!bmd)
- return NULL;
-
- bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, gfp_mask);
- if (!bmd->iovecs) {
- kfree(bmd);
- return NULL;
- }
-
- bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, gfp_mask);
- if (bmd->sgvecs)
- return bmd;
-
- kfree(bmd->iovecs);
- kfree(bmd);
- return NULL;
+ return kmalloc(sizeof(struct bio_map_data) +
+ sizeof(struct sg_iovec) * iov_count, gfp_mask);
}
-static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
- struct sg_iovec *iov, int iov_count,
+static int __bio_copy_iov(struct bio *bio, struct sg_iovec *iov, int iov_count,
int to_user, int from_user, int do_free_page)
{
int ret = 0, i;
@@ -994,7 +1003,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
bio_for_each_segment_all(bvec, bio, i) {
char *bv_addr = page_address(bvec->bv_page);
- unsigned int bv_len = iovecs[i].bv_len;
+ unsigned int bv_len = bvec->bv_len;
while (bv_len && iov_idx < iov_count) {
unsigned int bytes;
@@ -1054,14 +1063,14 @@ int bio_uncopy_user(struct bio *bio)
* don't copy into a random user address space, just free.
*/
if (current->mm)
- ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
- bmd->nr_sgvecs, bio_data_dir(bio) == READ,
+ ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs,
+ bio_data_dir(bio) == READ,
0, bmd->is_our_pages);
else if (bmd->is_our_pages)
bio_for_each_segment_all(bvec, bio, i)
__free_page(bvec->bv_page);
}
- bio_free_map_data(bmd);
+ kfree(bmd);
bio_put(bio);
return ret;
}
@@ -1175,7 +1184,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
*/
if ((!write_to_vm && (!map_data || !map_data->null_mapped)) ||
(map_data && map_data->from_user)) {
- ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 1, 0);
+ ret = __bio_copy_iov(bio, iov, iov_count, 0, 1, 0);
if (ret)
goto cleanup;
}
@@ -1189,7 +1198,7 @@ cleanup:
bio_put(bio);
out_bmd:
- bio_free_map_data(bmd);
+ kfree(bmd);
return ERR_PTR(ret);
}
@@ -1485,7 +1494,7 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
if (IS_ERR(bio))
return bio;
- if (bio->bi_size == len)
+ if (bio->bi_iter.bi_size == len)
return bio;
/*
@@ -1506,16 +1515,15 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
bio_for_each_segment_all(bvec, bio, i) {
char *addr = page_address(bvec->bv_page);
- int len = bmd->iovecs[i].bv_len;
if (read)
- memcpy(p, addr, len);
+ memcpy(p, addr, bvec->bv_len);
__free_page(bvec->bv_page);
- p += len;
+ p += bvec->bv_len;
}
- bio_free_map_data(bmd);
+ kfree(bmd);
bio_put(bio);
}
@@ -1686,11 +1694,11 @@ void bio_check_pages_dirty(struct bio *bio)
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
void bio_flush_dcache_pages(struct bio *bi)
{
- int i;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
- bio_for_each_segment(bvec, bi, i)
- flush_dcache_page(bvec->bv_page);
+ bio_for_each_segment(bvec, bi, iter)
+ flush_dcache_page(bvec.bv_page);
}
EXPORT_SYMBOL(bio_flush_dcache_pages);
#endif
@@ -1711,136 +1719,110 @@ EXPORT_SYMBOL(bio_flush_dcache_pages);
**/
void bio_endio(struct bio *bio, int error)
{
- if (error)
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
- else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
- error = -EIO;
+ while (bio) {
+ BUG_ON(atomic_read(&bio->bi_remaining) <= 0);
- if (bio->bi_end_io)
- bio->bi_end_io(bio, error);
-}
-EXPORT_SYMBOL(bio_endio);
+ if (error)
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+ error = -EIO;
-void bio_pair_release(struct bio_pair *bp)
-{
- if (atomic_dec_and_test(&bp->cnt)) {
- struct bio *master = bp->bio1.bi_private;
+ if (!atomic_dec_and_test(&bio->bi_remaining))
+ return;
- bio_endio(master, bp->error);
- mempool_free(bp, bp->bio2.bi_private);
+ /*
+ * Need to have a real endio function for chained bios,
+ * otherwise various corner cases will break (like stacking
+ * block devices that save/restore bi_end_io) - however, we want
+ * to avoid unbounded recursion and blowing the stack. Tail call
+ * optimization would handle this, but compiling with frame
+ * pointers also disables gcc's sibling call optimization.
+ */
+ if (bio->bi_end_io == bio_chain_endio) {
+ bio = bio->bi_private;
+ } else {
+ if (bio->bi_end_io)
+ bio->bi_end_io(bio, error);
+ bio = NULL;
+ }
}
}
-EXPORT_SYMBOL(bio_pair_release);
-
-static void bio_pair_end_1(struct bio *bi, int err)
-{
- struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);
-
- if (err)
- bp->error = err;
-
- bio_pair_release(bp);
-}
+EXPORT_SYMBOL(bio_endio);
-static void bio_pair_end_2(struct bio *bi, int err)
+/**
+ * bio_endio_nodec - end I/O on a bio, without decrementing bi_remaining
+ * @bio: bio
+ * @error: error, if any
+ *
+ * For code that has saved and restored bi_end_io; thing hard before using this
+ * function, probably you should've cloned the entire bio.
+ **/
+void bio_endio_nodec(struct bio *bio, int error)
{
- struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);
-
- if (err)
- bp->error = err;
-
- bio_pair_release(bp);
+ atomic_inc(&bio->bi_remaining);
+ bio_endio(bio, error);
}
+EXPORT_SYMBOL(bio_endio_nodec);
-/*
- * split a bio - only worry about a bio with a single page in its iovec
+/**
+ * bio_split - split a bio
+ * @bio: bio to split
+ * @sectors: number of sectors to split from the front of @bio
+ * @gfp: gfp mask
+ * @bs: bio set to allocate from
+ *
+ * Allocates and returns a new bio which represents @sectors from the start of
+ * @bio, and updates @bio to represent the remaining sectors.
+ *
+ * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's
+ * responsibility to ensure that @bio is not freed before the split.
*/
-struct bio_pair *bio_split(struct bio *bi, int first_sectors)
+struct bio *bio_split(struct bio *bio, int sectors,
+ gfp_t gfp, struct bio_set *bs)
{
- struct bio_pair *bp = mempool_alloc(bio_split_pool, GFP_NOIO);
-
- if (!bp)
- return bp;
-
- trace_block_split(bdev_get_queue(bi->bi_bdev), bi,
- bi->bi_sector + first_sectors);
-
- BUG_ON(bio_segments(bi) > 1);
- atomic_set(&bp->cnt, 3);
- bp->error = 0;
- bp->bio1 = *bi;
- bp->bio2 = *bi;
- bp->bio2.bi_sector += first_sectors;
- bp->bio2.bi_size -= first_sectors << 9;
- bp->bio1.bi_size = first_sectors << 9;
-
- if (bi->bi_vcnt != 0) {
- bp->bv1 = *bio_iovec(bi);
- bp->bv2 = *bio_iovec(bi);
-
- if (bio_is_rw(bi)) {
- bp->bv2.bv_offset += first_sectors << 9;
- bp->bv2.bv_len -= first_sectors << 9;
- bp->bv1.bv_len = first_sectors << 9;
- }
+ struct bio *split = NULL;
- bp->bio1.bi_io_vec = &bp->bv1;
- bp->bio2.bi_io_vec = &bp->bv2;
+ BUG_ON(sectors <= 0);
+ BUG_ON(sectors >= bio_sectors(bio));
- bp->bio1.bi_max_vecs = 1;
- bp->bio2.bi_max_vecs = 1;
- }
+ split = bio_clone_bioset(bio, gfp, bs);
+ if (!split)
+ return NULL;
- bp->bio1.bi_end_io = bio_pair_end_1;
- bp->bio2.bi_end_io = bio_pair_end_2;
+ split->bi_iter.bi_size = sectors << 9;
- bp->bio1.bi_private = bi;
- bp->bio2.bi_private = bio_split_pool;
+ if (bio_integrity(split))
+ bio_integrity_trim(split, 0, sectors);
- if (bio_integrity(bi))
- bio_integrity_split(bi, bp, first_sectors);
+ bio_advance(bio, split->bi_iter.bi_size);
- return bp;
+ return split;
}
EXPORT_SYMBOL(bio_split);
/**
- * bio_sector_offset - Find hardware sector offset in bio
- * @bio: bio to inspect
- * @index: bio_vec index
- * @offset: offset in bv_page
- *
- * Return the number of hardware sectors between beginning of bio
- * and an end point indicated by a bio_vec index and an offset
- * within that vector's page.
+ * bio_trim - trim a bio
+ * @bio: bio to trim
+ * @offset: number of sectors to trim from the front of @bio
+ * @size: size we want to trim @bio to, in sectors
*/
-sector_t bio_sector_offset(struct bio *bio, unsigned short index,
- unsigned int offset)
+void bio_trim(struct bio *bio, int offset, int size)
{
- unsigned int sector_sz;
- struct bio_vec *bv;
- sector_t sectors;
- int i;
-
- sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue);
- sectors = 0;
+ /* 'bio' is a cloned bio which we need to trim to match
+ * the given offset and size.
+ */
- if (index >= bio->bi_idx)
- index = bio->bi_vcnt - 1;
+ size <<= 9;
+ if (offset == 0 && size == bio->bi_iter.bi_size)
+ return;
- bio_for_each_segment_all(bv, bio, i) {
- if (i == index) {
- if (offset > bv->bv_offset)
- sectors += (offset - bv->bv_offset) / sector_sz;
- break;
- }
+ clear_bit(BIO_SEG_VALID, &bio->bi_flags);
- sectors += bv->bv_len / sector_sz;
- }
+ bio_advance(bio, offset << 9);
- return sectors;
+ bio->bi_iter.bi_size = size;
}
-EXPORT_SYMBOL(bio_sector_offset);
+EXPORT_SYMBOL_GPL(bio_trim);
/*
* create memory pools for biovec's in a bio_set.
@@ -2019,11 +2001,6 @@ static int __init init_bio(void)
if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
panic("bio: can't create integrity pool\n");
- bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
- sizeof(struct bio_pair));
- if (!bio_split_pool)
- panic("bio: can't create split pool\n");
-
return 0;
}
subsys_initcall(init_bio);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 1e86823a9cbd..34d9da0e6b74 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -165,14 +165,14 @@ blkdev_get_block(struct inode *inode, sector_t iblock,
}
static ssize_t
-blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
- loff_t offset, unsigned long nr_segs)
+blkdev_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
+ loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
- return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iov, offset,
- nr_segs, blkdev_get_block, NULL, NULL, 0);
+ return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iter,
+ offset, blkdev_get_block, NULL, NULL, 0);
}
int __sync_blockdev(struct block_device *bdev, int wait)
@@ -1508,8 +1508,7 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
* Does not take i_mutex for the write and thus is not for general purpose
* use.
*/
-ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
{
struct file *file = iocb->ki_filp;
struct blk_plug plug;
@@ -1518,7 +1517,7 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
BUG_ON(iocb->ki_pos != pos);
blk_start_plug(&plug);
- ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+ ret = __generic_file_write_iter(iocb, iter, &iocb->ki_pos);
if (ret > 0) {
ssize_t err;
@@ -1529,10 +1528,10 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
blk_finish_plug(&plug);
return ret;
}
-EXPORT_SYMBOL_GPL(blkdev_aio_write);
+EXPORT_SYMBOL_GPL(blkdev_write_iter);
-static ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t pos)
{
struct file *file = iocb->ki_filp;
struct inode *bd_inode = file->f_mapping->host;
@@ -1543,8 +1542,8 @@ static ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
size -= pos;
if (size < iocb->ki_nbytes)
- nr_segs = iov_shorten((struct iovec *)iov, nr_segs, size);
- return generic_file_aio_read(iocb, iov, nr_segs, pos);
+ iov_iter_shorten(iter, size);
+ return generic_file_read_iter(iocb, iter, pos);
}
/*
@@ -1578,8 +1577,8 @@ const struct file_operations def_blk_fops = {
.llseek = block_llseek,
.read = do_sync_read,
.write = do_sync_write,
- .aio_read = blkdev_aio_read,
- .aio_write = blkdev_aio_write,
+ .read_iter = blkdev_read_iter,
+ .write_iter = blkdev_write_iter,
.mmap = generic_file_mmap,
.fsync = blkdev_fsync,
.unlocked_ioctl = block_ioctl,
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 398cbd517be2..f9d5094e1029 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -59,7 +59,8 @@ config BTRFS_FS_RUN_SANITY_TESTS
help
This will run some basic sanity tests on the free space cache
code to make sure it is acting as it should. These are mostly
- regression tests and are only really interesting to btrfs devlopers.
+ regression tests and are only really interesting to btrfs
+ developers.
If unsure, say N.
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 1c47be187240..7fcac709934d 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -1687,7 +1687,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
return -1;
}
bio->bi_bdev = block_ctx->dev->bdev;
- bio->bi_sector = dev_bytenr >> 9;
+ bio->bi_iter.bi_sector = dev_bytenr >> 9;
bio->bi_end_io = btrfsic_complete_bio_end_io;
bio->bi_private = &complete;
@@ -3020,7 +3020,7 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
int bio_is_patched;
char **mapped_datav;
- dev_bytenr = 512 * bio->bi_sector;
+ dev_bytenr = 512 * bio->bi_iter.bi_sector;
bio_is_patched = 0;
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
@@ -3028,8 +3028,8 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
"submit_bio(rw=0x%x, bi_vcnt=%u,"
" bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
rw, bio->bi_vcnt,
- (unsigned long long)bio->bi_sector, dev_bytenr,
- bio->bi_bdev);
+ (unsigned long long)bio->bi_iter.bi_sector,
+ dev_bytenr, bio->bi_bdev);
mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt,
GFP_NOFS);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 6aad98cb343f..06ab821947f9 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -173,7 +173,8 @@ static void end_compressed_bio_read(struct bio *bio, int err)
goto out;
inode = cb->inode;
- ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9);
+ ret = check_compressed_csum(inode, cb,
+ (u64)bio->bi_iter.bi_sector << 9);
if (ret)
goto csum_failed;
@@ -373,7 +374,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
page = compressed_pages[pg_index];
page->mapping = inode->i_mapping;
- if (bio->bi_size)
+ if (bio->bi_iter.bi_size)
ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
PAGE_CACHE_SIZE,
bio, 0);
@@ -507,7 +508,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
if (!em || last_offset < em->start ||
(last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
- (em->block_start >> 9) != cb->orig_bio->bi_sector) {
+ (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
free_extent_map(em);
unlock_extent(tree, last_offset, end);
unlock_page(page);
@@ -553,7 +554,7 @@ next:
* in it. We don't actually do IO on those pages but allocate new ones
* to hold the compressed pages on disk.
*
- * bio->bi_sector points to the compressed extent on disk
+ * bio->bi_iter.bi_sector points to the compressed extent on disk
* bio->bi_io_vec points to all of the inode pages
* bio->bi_vcnt is a count of pages
*
@@ -574,7 +575,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
struct page *page;
struct block_device *bdev;
struct bio *comp_bio;
- u64 cur_disk_byte = (u64)bio->bi_sector << 9;
+ u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
u64 em_len;
u64 em_start;
struct extent_map *em;
@@ -660,7 +661,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
page->mapping = inode->i_mapping;
page->index = em_start >> PAGE_CACHE_SHIFT;
- if (comp_bio->bi_size)
+ if (comp_bio->bi_iter.bi_size)
ret = tree->ops->merge_bio_hook(READ, page, 0,
PAGE_CACHE_SIZE,
comp_bio, 0);
@@ -688,8 +689,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
comp_bio, sums);
BUG_ON(ret); /* -ENOMEM */
}
- sums += (comp_bio->bi_size + root->sectorsize - 1) /
- root->sectorsize;
+ sums += (comp_bio->bi_iter.bi_size +
+ root->sectorsize - 1) / root->sectorsize;
ret = btrfs_map_bio(root, READ, comp_bio,
mirror_num, 0);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 0506f40ede83..a80a2ccb955c 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3105,11 +3105,6 @@ static inline u32 btrfs_level_size(struct btrfs_root *root, int level)
((unsigned long)(btrfs_leaf_data(leaf) + \
btrfs_item_offset_nr(leaf, slot)))
-static inline struct dentry *fdentry(struct file *file)
-{
- return file->f_path.dentry;
-}
-
static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info)
{
return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) &&
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 51731b76900d..0df176ac4309 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1994,7 +1994,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
return -EIO;
bio->bi_private = &compl;
bio->bi_end_io = repair_io_failure_callback;
- bio->bi_size = 0;
+ bio->bi_iter.bi_size = 0;
map_length = length;
ret = btrfs_map_block(fs_info, WRITE, logical,
@@ -2005,7 +2005,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
}
BUG_ON(mirror_num != bbio->mirror_num);
sector = bbio->stripes[mirror_num-1].physical >> 9;
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
dev = bbio->stripes[mirror_num-1].dev;
kfree(bbio);
if (!dev || !dev->bdev || !dev->writeable) {
@@ -2276,9 +2276,9 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
return -EIO;
}
bio->bi_end_io = failed_bio->bi_end_io;
- bio->bi_sector = failrec->logical >> 9;
+ bio->bi_iter.bi_sector = failrec->logical >> 9;
bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
- bio->bi_size = 0;
+ bio->bi_iter.bi_size = 0;
btrfs_failed_bio = btrfs_io_bio(failed_bio);
if (btrfs_failed_bio->csum) {
@@ -2422,7 +2422,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
struct inode *inode = page->mapping->host;
pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
- "mirror=%lu\n", (u64)bio->bi_sector, err,
+ "mirror=%lu\n", (u64)bio->bi_iter.bi_sector, err,
io_bio->mirror_num);
tree = &BTRFS_I(inode)->io_tree;
@@ -2555,9 +2555,9 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
}
if (bio) {
- bio->bi_size = 0;
+ bio->bi_iter.bi_size = 0;
bio->bi_bdev = bdev;
- bio->bi_sector = first_sector;
+ bio->bi_iter.bi_sector = first_sector;
btrfs_bio = btrfs_io_bio(bio);
btrfs_bio->csum = NULL;
btrfs_bio->csum_allocated = NULL;
@@ -2651,7 +2651,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
if (bio_ret && *bio_ret) {
bio = *bio_ret;
if (old_compressed)
- contig = bio->bi_sector == sector;
+ contig = bio->bi_iter.bi_sector == sector;
else
contig = bio_end_sector(bio) == sector;
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 4f53159bdb9d..997f951a562c 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -182,7 +182,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
if (!path)
return -ENOMEM;
- nblocks = bio->bi_size >> inode->i_sb->s_blocksize_bits;
+ nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
if (!dst) {
if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
btrfs_bio->csum_allocated = kmalloc(nblocks * csum_size,
@@ -201,7 +201,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
csum = (u8 *)dst;
}
- if (bio->bi_size > PAGE_CACHE_SIZE * 8)
+ if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8)
path->reada = 2;
WARN_ON(bio->bi_vcnt <= 0);
@@ -217,7 +217,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
path->skip_locking = 1;
}
- disk_bytenr = (u64)bio->bi_sector << 9;
+ disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
if (dio)
offset = logical_offset;
while (bio_index < bio->bi_vcnt) {
@@ -302,7 +302,7 @@ int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
struct btrfs_dio_private *dip, struct bio *bio,
u64 offset)
{
- int len = (bio->bi_sector << 9) - dip->disk_bytenr;
+ int len = (bio->bi_iter.bi_sector << 9) - dip->disk_bytenr;
u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
int ret;
@@ -444,11 +444,12 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
u64 offset;
WARN_ON(bio->bi_vcnt <= 0);
- sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS);
+ sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_iter.bi_size),
+ GFP_NOFS);
if (!sums)
return -ENOMEM;
- sums->len = bio->bi_size;
+ sums->len = bio->bi_iter.bi_size;
INIT_LIST_HEAD(&sums->list);
if (contig)
@@ -458,7 +459,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
ordered = btrfs_lookup_ordered_extent(inode, offset);
BUG_ON(!ordered); /* Logic error */
- sums->bytenr = (u64)bio->bi_sector << 9;
+ sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
index = 0;
while (bio_index < bio->bi_vcnt) {
@@ -473,7 +474,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
btrfs_add_ordered_sum(inode, ordered, sums);
btrfs_put_ordered_extent(ordered);
- bytes_left = bio->bi_size - total_bytes;
+ bytes_left = bio->bi_iter.bi_size - total_bytes;
sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
GFP_NOFS);
@@ -481,7 +482,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
sums->len = bytes_left;
ordered = btrfs_lookup_ordered_extent(inode, offset);
BUG_ON(!ordered); /* Logic error */
- sums->bytenr = ((u64)bio->bi_sector << 9) +
+ sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) +
total_bytes;
index = 0;
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 72da4df53c9a..5e70fc2cef27 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -453,7 +453,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
write_bytes -= copied;
total_copied += copied;
- /* Return to btrfs_file_aio_write to fault page */
+ /* Return to btrfs_file_write_iter to fault page */
if (unlikely(copied == 0))
break;
@@ -1557,27 +1557,23 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
}
static ssize_t __btrfs_direct_write(struct kiocb *iocb,
- const struct iovec *iov,
- unsigned long nr_segs, loff_t pos,
- loff_t *ppos, size_t count, size_t ocount)
+ struct iov_iter *iter, loff_t pos,
+ loff_t *ppos, size_t count)
{
struct file *file = iocb->ki_filp;
- struct iov_iter i;
ssize_t written;
ssize_t written_buffered;
loff_t endbyte;
int err;
- written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
- count, ocount);
+ written = generic_file_direct_write_iter(iocb, iter, pos, ppos, count);
if (written < 0 || written == count)
return written;
pos += written;
count -= written;
- iov_iter_init(&i, iov, nr_segs, count, written);
- written_buffered = __btrfs_buffered_write(file, &i, pos);
+ written_buffered = __btrfs_buffered_write(file, iter, pos);
if (written_buffered < 0) {
err = written_buffered;
goto out;
@@ -1612,9 +1608,8 @@ static void update_time_for_write(struct inode *inode)
inode_inc_iversion(inode);
}
-static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
- const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
+ struct iov_iter *iter, loff_t pos)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
@@ -1623,17 +1618,12 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
u64 start_pos;
ssize_t num_written = 0;
ssize_t err = 0;
- size_t count, ocount;
+ size_t count;
bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
mutex_lock(&inode->i_mutex);
- err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
- if (err) {
- mutex_unlock(&inode->i_mutex);
- goto out;
- }
- count = ocount;
+ count = iov_iter_count(iter);
current->backing_dev_info = inode->i_mapping->backing_dev_info;
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
@@ -1686,14 +1676,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
atomic_inc(&BTRFS_I(inode)->sync_writers);
if (unlikely(file->f_flags & O_DIRECT)) {
- num_written = __btrfs_direct_write(iocb, iov, nr_segs,
- pos, ppos, count, ocount);
+ num_written = __btrfs_direct_write(iocb, iter, pos, ppos,
+ count);
} else {
- struct iov_iter i;
-
- iov_iter_init(&i, iov, nr_segs, count, num_written);
-
- num_written = __btrfs_buffered_write(file, &i, pos);
+ num_written = __btrfs_buffered_write(file, iter, pos);
if (num_written > 0)
*ppos = pos + num_written;
}
@@ -2552,9 +2538,9 @@ const struct file_operations btrfs_file_operations = {
.llseek = btrfs_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
- .aio_read = generic_file_aio_read,
.splice_read = generic_file_splice_read,
- .aio_write = btrfs_file_aio_write,
+ .read_iter = generic_file_read_iter,
+ .write_iter = btrfs_file_write_iter,
.mmap = btrfs_file_mmap,
.open = generic_file_open,
.release = btrfs_release_file,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 51e3afa78354..02d79fb9e3e2 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1579,7 +1579,7 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
unsigned long bio_flags)
{
struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
- u64 logical = (u64)bio->bi_sector << 9;
+ u64 logical = (u64)bio->bi_iter.bi_sector << 9;
u64 length = 0;
u64 map_length;
int ret;
@@ -1587,7 +1587,7 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
if (bio_flags & EXTENT_BIO_COMPRESSED)
return 0;
- length = bio->bi_size;
+ length = bio->bi_iter.bi_size;
map_length = length;
ret = btrfs_map_block(root->fs_info, rw, logical,
&map_length, NULL, 0);
@@ -6884,7 +6884,8 @@ static void btrfs_end_dio_bio(struct bio *bio, int err)
printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
"sector %#Lx len %u err no %d\n",
btrfs_ino(dip->inode), bio->bi_rw,
- (unsigned long long)bio->bi_sector, bio->bi_size, err);
+ (unsigned long long)bio->bi_iter.bi_sector,
+ bio->bi_iter.bi_size, err);
dip->errors = 1;
/*
@@ -6975,7 +6976,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
struct bio *bio;
struct bio *orig_bio = dip->orig_bio;
struct bio_vec *bvec = orig_bio->bi_io_vec;
- u64 start_sector = orig_bio->bi_sector;
+ u64 start_sector = orig_bio->bi_iter.bi_sector;
u64 file_offset = dip->logical_offset;
u64 submit_len = 0;
u64 map_length;
@@ -6983,7 +6984,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
int ret = 0;
int async_submit = 0;
- map_length = orig_bio->bi_size;
+ map_length = orig_bio->bi_iter.bi_size;
ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
&map_length, NULL, 0);
if (ret) {
@@ -6991,7 +6992,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
return -EIO;
}
- if (map_length >= orig_bio->bi_size) {
+ if (map_length >= orig_bio->bi_iter.bi_size) {
bio = orig_bio;
goto submit;
}
@@ -7043,7 +7044,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
- map_length = orig_bio->bi_size;
+ map_length = orig_bio->bi_iter.bi_size;
ret = btrfs_map_block(root->fs_info, rw,
start_sector << 9,
&map_length, NULL, 0);
@@ -7101,7 +7102,8 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
if (!skip_sum && !write) {
csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
- sum_len = dio_bio->bi_size >> inode->i_sb->s_blocksize_bits;
+ sum_len = dio_bio->bi_iter.bi_size >>
+ inode->i_sb->s_blocksize_bits;
sum_len *= csum_size;
} else {
sum_len = 0;
@@ -7116,8 +7118,8 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
dip->private = dio_bio->bi_private;
dip->inode = inode;
dip->logical_offset = file_offset;
- dip->bytes = dio_bio->bi_size;
- dip->disk_bytenr = (u64)dio_bio->bi_sector << 9;
+ dip->bytes = dio_bio->bi_iter.bi_size;
+ dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
io_bio->bi_private = dip;
dip->errors = 0;
dip->orig_bio = io_bio;
@@ -7155,8 +7157,7 @@ free_ordered:
}
static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
- const struct iovec *iov, loff_t offset,
- unsigned long nr_segs)
+ struct iov_iter *iter, loff_t offset)
{
int seg;
int i;
@@ -7170,35 +7171,50 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
goto out;
/* Check the memory alignment. Blocks cannot straddle pages */
- for (seg = 0; seg < nr_segs; seg++) {
- addr = (unsigned long)iov[seg].iov_base;
- size = iov[seg].iov_len;
- end += size;
- if ((addr & blocksize_mask) || (size & blocksize_mask))
- goto out;
+ if (iov_iter_has_iovec(iter)) {
+ const struct iovec *iov = iov_iter_iovec(iter);
+
+ for (seg = 0; seg < iter->nr_segs; seg++) {
+ addr = (unsigned long)iov[seg].iov_base;
+ size = iov[seg].iov_len;
+ end += size;
+ if ((addr & blocksize_mask) || (size & blocksize_mask))
+ goto out;
- /* If this is a write we don't need to check anymore */
- if (rw & WRITE)
- continue;
+ /* If this is a write we don't need to check anymore */
+ if (rw & WRITE)
+ continue;
- /*
- * Check to make sure we don't have duplicate iov_base's in this
- * iovec, if so return EINVAL, otherwise we'll get csum errors
- * when reading back.
- */
- for (i = seg + 1; i < nr_segs; i++) {
- if (iov[seg].iov_base == iov[i].iov_base)
+ /*
+ * Check to make sure we don't have duplicate iov_base's
+ * in this iovec, if so return EINVAL, otherwise we'll
+ * get csum errors when reading back.
+ */
+ for (i = seg + 1; i < iter->nr_segs; i++) {
+ if (iov[seg].iov_base == iov[i].iov_base)
+ goto out;
+ }
+ }
+ } else if (iov_iter_has_bvec(iter)) {
+ struct bio_vec *bvec = iov_iter_bvec(iter);
+
+ for (seg = 0; seg < iter->nr_segs; seg++) {
+ addr = (unsigned long)bvec[seg].bv_offset;
+ size = bvec[seg].bv_len;
+ end += size;
+ if ((addr & blocksize_mask) || (size & blocksize_mask))
goto out;
}
- }
+ } else
+ BUG();
+
retval = 0;
out:
return retval;
}
static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
- const struct iovec *iov, loff_t offset,
- unsigned long nr_segs)
+ struct iov_iter *iter, loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
@@ -7208,8 +7224,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
bool relock = false;
ssize_t ret;
- if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
- offset, nr_segs))
+ if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iter, offset))
return 0;
atomic_inc(&inode->i_dio_count);
@@ -7221,7 +7236,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
* call btrfs_wait_ordered_range to make absolutely sure that any
* outstanding dirty pages are on disk.
*/
- count = iov_length(iov, nr_segs);
+ count = iov_iter_count(iter);
btrfs_wait_ordered_range(inode, offset, count);
if (rw & WRITE) {
@@ -7246,7 +7261,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
ret = __blockdev_direct_IO(rw, iocb, inode,
BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
- iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
+ iter, offset, btrfs_get_blocks_direct, NULL,
btrfs_submit_direct, flags);
if (rw & WRITE) {
if (ret < 0 && ret != -EIOCBQUEUED)
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 9d46f60cb943..6bbf316764d7 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -321,7 +321,7 @@ static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(fdentry(file)->d_sb);
+ struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
struct btrfs_device *device;
struct request_queue *q;
struct fstrim_range range;
@@ -2098,7 +2098,7 @@ static noinline int btrfs_ioctl_ino_lookup(struct file *file,
static noinline int btrfs_ioctl_snap_destroy(struct file *file,
void __user *arg)
{
- struct dentry *parent = fdentry(file);
+ struct dentry *parent = file->f_path.dentry;
struct dentry *dentry;
struct inode *dir = parent->d_inode;
struct inode *inode;
@@ -3119,7 +3119,7 @@ out:
static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
u64 off, u64 olen, u64 destoff)
{
- struct inode *inode = fdentry(file)->d_inode;
+ struct inode *inode = file_inode(file);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct fd src_file;
struct inode *src;
@@ -4317,7 +4317,7 @@ static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+ struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index d0ecfbd9cc9f..03f7945fd1f2 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -1033,8 +1033,8 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
/* see if we can add this page onto our existing bio */
if (last) {
- last_end = (u64)last->bi_sector << 9;
- last_end += last->bi_size;
+ last_end = (u64)last->bi_iter.bi_sector << 9;
+ last_end += last->bi_iter.bi_size;
/*
* we can't merge these if they are from different
@@ -1054,9 +1054,9 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
if (!bio)
return -ENOMEM;
- bio->bi_size = 0;
+ bio->bi_iter.bi_size = 0;
bio->bi_bdev = stripe->dev->bdev;
- bio->bi_sector = disk_start >> 9;
+ bio->bi_iter.bi_sector = disk_start >> 9;
set_bit(BIO_UPTODATE, &bio->bi_flags);
bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
@@ -1112,7 +1112,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
spin_lock_irq(&rbio->bio_list_lock);
bio_list_for_each(bio, &rbio->bio_list) {
- start = (u64)bio->bi_sector << 9;
+ start = (u64)bio->bi_iter.bi_sector << 9;
stripe_offset = start - rbio->raid_map[0];
page_index = stripe_offset >> PAGE_CACHE_SHIFT;
@@ -1273,7 +1273,7 @@ cleanup:
static int find_bio_stripe(struct btrfs_raid_bio *rbio,
struct bio *bio)
{
- u64 physical = bio->bi_sector;
+ u64 physical = bio->bi_iter.bi_sector;
u64 stripe_start;
int i;
struct btrfs_bio_stripe *stripe;
@@ -1299,7 +1299,7 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio,
static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
struct bio *bio)
{
- u64 logical = bio->bi_sector;
+ u64 logical = bio->bi_iter.bi_sector;
u64 stripe_start;
int i;
@@ -1603,8 +1603,8 @@ static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
plug_list);
struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
plug_list);
- u64 a_sector = ra->bio_list.head->bi_sector;
- u64 b_sector = rb->bio_list.head->bi_sector;
+ u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
+ u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
if (a_sector < b_sector)
return -1;
@@ -1692,7 +1692,7 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
if (IS_ERR(rbio))
return PTR_ERR(rbio);
bio_list_add(&rbio->bio_list, bio);
- rbio->bio_list_bytes = bio->bi_size;
+ rbio->bio_list_bytes = bio->bi_iter.bi_size;
/*
* don't plug on full rbios, just get them out the door
@@ -2045,7 +2045,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
rbio->read_rebuild = 1;
bio_list_add(&rbio->bio_list, bio);
- rbio->bio_list_bytes = bio->bi_size;
+ rbio->bio_list_bytes = bio->bi_iter.bi_size;
rbio->faila = find_logical_bio_stripe(rbio, bio);
if (rbio->faila == -1) {
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index a18e0e23f6a6..fe42870070ab 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1308,7 +1308,7 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
continue;
}
bio->bi_bdev = page->dev->bdev;
- bio->bi_sector = page->physical >> 9;
+ bio->bi_iter.bi_sector = page->physical >> 9;
bio->bi_end_io = scrub_complete_bio_end_io;
bio->bi_private = &complete;
@@ -1440,7 +1440,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
if (!bio)
return -EIO;
bio->bi_bdev = page_bad->dev->bdev;
- bio->bi_sector = page_bad->physical >> 9;
+ bio->bi_iter.bi_sector = page_bad->physical >> 9;
bio->bi_end_io = scrub_complete_bio_end_io;
bio->bi_private = &complete;
@@ -1538,7 +1538,7 @@ again:
bio->bi_private = sbio;
bio->bi_end_io = scrub_wr_bio_end_io;
bio->bi_bdev = sbio->dev->bdev;
- bio->bi_sector = sbio->physical >> 9;
+ bio->bi_iter.bi_sector = sbio->physical >> 9;
sbio->err = 0;
} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
spage->physical_for_dev_replace ||
@@ -1944,7 +1944,7 @@ again:
bio->bi_private = sbio;
bio->bi_end_io = scrub_bio_end_io;
bio->bi_bdev = sbio->dev->bdev;
- bio->bi_sector = sbio->physical >> 9;
+ bio->bi_iter.bi_sector = sbio->physical >> 9;
sbio->err = 0;
} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
spage->physical ||
@@ -3402,8 +3402,8 @@ static int write_page_nocow(struct scrub_ctx *sctx,
}
bio->bi_private = &compl;
bio->bi_end_io = scrub_complete_bio_end_io;
- bio->bi_size = 0;
- bio->bi_sector = physical_for_dev_replace >> 9;
+ bio->bi_iter.bi_size = 0;
+ bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
bio->bi_bdev = dev->bdev;
ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
if (ret != PAGE_CACHE_SIZE) {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 043b215769c2..ef4894725ff1 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -5407,7 +5407,7 @@ static int bio_size_ok(struct block_device *bdev, struct bio *bio,
if (!q->merge_bvec_fn)
return 1;
- bvm.bi_size = bio->bi_size - prev->bv_len;
+ bvm.bi_size = bio->bi_iter.bi_size - prev->bv_len;
if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
return 0;
return 1;
@@ -5422,7 +5422,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
bio->bi_private = bbio;
btrfs_io_bio(bio)->stripe_index = dev_nr;
bio->bi_end_io = btrfs_end_bio;
- bio->bi_sector = physical >> 9;
+ bio->bi_iter.bi_sector = physical >> 9;
#ifdef DEBUG
{
struct rcu_string *name;
@@ -5460,7 +5460,7 @@ again:
while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
bvec->bv_offset) < bvec->bv_len) {
- u64 len = bio->bi_size;
+ u64 len = bio->bi_iter.bi_size;
atomic_inc(&bbio->stripes_pending);
submit_stripe_bio(root, bbio, bio, physical, dev_nr,
@@ -5482,7 +5482,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
bio->bi_private = bbio->private;
bio->bi_end_io = bbio->end_io;
btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
- bio->bi_sector = logical >> 9;
+ bio->bi_iter.bi_sector = logical >> 9;
kfree(bbio);
bio_endio(bio, -EIO);
}
@@ -5493,7 +5493,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
{
struct btrfs_device *dev;
struct bio *first_bio = bio;
- u64 logical = (u64)bio->bi_sector << 9;
+ u64 logical = (u64)bio->bi_iter.bi_sector << 9;
u64 length = 0;
u64 map_length;
u64 *raid_map = NULL;
@@ -5502,7 +5502,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
int total_devs = 1;
struct btrfs_bio *bbio = NULL;
- length = bio->bi_size;
+ length = bio->bi_iter.bi_size;
map_length = length;
ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
diff --git a/fs/buffer.c b/fs/buffer.c
index 6024877335ca..1c04ec66974e 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2982,11 +2982,11 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
* let it through, and the IO layer will turn it into
* an EIO.
*/
- if (unlikely(bio->bi_sector >= maxsector))
+ if (unlikely(bio->bi_iter.bi_sector >= maxsector))
return;
- maxsector -= bio->bi_sector;
- bytes = bio->bi_size;
+ maxsector -= bio->bi_iter.bi_sector;
+ bytes = bio->bi_iter.bi_size;
if (likely((bytes >> 9) <= maxsector))
return;
@@ -2994,7 +2994,7 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
bytes = maxsector << 9;
/* Truncate the bio.. */
- bio->bi_size = bytes;
+ bio->bi_iter.bi_size = bytes;
bio->bi_io_vec[0].bv_len = bytes;
/* ..and clear the end of the buffer for reads */
@@ -3029,14 +3029,14 @@ int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
*/
bio = bio_alloc(GFP_NOIO, 1);
- bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+ bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_bdev = bh->b_bdev;
bio->bi_io_vec[0].bv_page = bh->b_page;
bio->bi_io_vec[0].bv_len = bh->b_size;
bio->bi_io_vec[0].bv_offset = bh_offset(bh);
bio->bi_vcnt = 1;
- bio->bi_size = bh->b_size;
+ bio->bi_iter.bi_size = bh->b_size;
bio->bi_end_io = end_bio_bh_io_sync;
bio->bi_private = bh;
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 43eb5592cdea..00baf1419989 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -270,7 +270,7 @@ static void cachefiles_drop_object(struct fscache_object *_object)
#endif
/* delete retired objects */
- if (test_bit(FSCACHE_COOKIE_RETIRED, &object->fscache.cookie->flags) &&
+ if (test_bit(FSCACHE_OBJECT_RETIRED, &object->fscache.flags) &&
_object != cache->cache.fsdef
) {
_debug("- retire object OBJ%x", object->fscache.debug_id);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 6df8bd481425..1cb39e652886 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1179,8 +1179,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
* never get called.
*/
static ssize_t ceph_direct_io(int rw, struct kiocb *iocb,
- const struct iovec *iov,
- loff_t pos, unsigned long nr_segs)
+ struct iov_iter *iter, loff_t pos)
{
WARN_ON(1);
return -EINVAL;
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index 6bfe65e0b038..8c44fdd4e1c3 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -68,7 +68,7 @@ int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
{
fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
&ceph_fscache_fsid_object_def,
- fsc);
+ fsc, true);
if (fsc->fscache == NULL) {
pr_err("Unable to resgister fsid: %p fscache cookie", fsc);
@@ -204,7 +204,7 @@ void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc,
ci->fscache = fscache_acquire_cookie(fsc->fscache,
&ceph_fscache_inode_object_def,
- ci);
+ ci, true);
done:
mutex_unlock(&inode->i_mutex);
@@ -324,6 +324,9 @@ void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ if (!PageFsCache(page))
+ return;
+
fscache_wait_on_page_write(ci->fscache, page);
fscache_uncache_page(ci->fscache, page);
}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 13976c33332e..3c0a4bd74996 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -897,7 +897,7 @@ static int __ceph_is_any_caps(struct ceph_inode_info *ci)
* caller should hold i_ceph_lock.
* caller will not hold session s_mutex if called from destroy_inode.
*/
-void __ceph_remove_cap(struct ceph_cap *cap)
+void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
{
struct ceph_mds_session *session = cap->session;
struct ceph_inode_info *ci = cap->ci;
@@ -909,6 +909,16 @@ void __ceph_remove_cap(struct ceph_cap *cap)
/* remove from session list */
spin_lock(&session->s_cap_lock);
+ /*
+ * s_cap_reconnect is protected by s_cap_lock. no one changes
+ * s_cap_gen while session is in the reconnect state.
+ */
+ if (queue_release &&
+ (!session->s_cap_reconnect ||
+ cap->cap_gen == session->s_cap_gen))
+ __queue_cap_release(session, ci->i_vino.ino, cap->cap_id,
+ cap->mseq, cap->issue_seq);
+
if (session->s_cap_iterator == cap) {
/* not yet, we are iterating over this very cap */
dout("__ceph_remove_cap delaying %p removal from session %p\n",
@@ -1023,7 +1033,6 @@ void __queue_cap_release(struct ceph_mds_session *session,
struct ceph_mds_cap_release *head;
struct ceph_mds_cap_item *item;
- spin_lock(&session->s_cap_lock);
BUG_ON(!session->s_num_cap_releases);
msg = list_first_entry(&session->s_cap_releases,
struct ceph_msg, list_head);
@@ -1052,7 +1061,6 @@ void __queue_cap_release(struct ceph_mds_session *session,
(int)CEPH_CAPS_PER_RELEASE,
(int)msg->front.iov_len);
}
- spin_unlock(&session->s_cap_lock);
}
/*
@@ -1067,12 +1075,8 @@ void ceph_queue_caps_release(struct inode *inode)
p = rb_first(&ci->i_caps);
while (p) {
struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
- struct ceph_mds_session *session = cap->session;
-
- __queue_cap_release(session, ceph_ino(inode), cap->cap_id,
- cap->mseq, cap->issue_seq);
p = rb_next(p);
- __ceph_remove_cap(cap);
+ __ceph_remove_cap(cap, true);
}
}
@@ -2791,7 +2795,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
}
spin_unlock(&mdsc->cap_dirty_lock);
}
- __ceph_remove_cap(cap);
+ __ceph_remove_cap(cap, false);
}
/* else, we already released it */
@@ -2931,9 +2935,12 @@ void ceph_handle_caps(struct ceph_mds_session *session,
if (!inode) {
dout(" i don't have ino %llx\n", vino.ino);
- if (op == CEPH_CAP_OP_IMPORT)
+ if (op == CEPH_CAP_OP_IMPORT) {
+ spin_lock(&session->s_cap_lock);
__queue_cap_release(session, vino.ino, cap_id,
mseq, seq);
+ spin_unlock(&session->s_cap_lock);
+ }
goto flush_cap_releases;
}
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 868b61d56cac..2a0bcaeb189a 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -352,8 +352,18 @@ more:
}
/* note next offset and last dentry name */
+ rinfo = &req->r_reply_info;
+ if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
+ frag = le32_to_cpu(rinfo->dir_dir->frag);
+ if (ceph_frag_is_leftmost(frag))
+ fi->next_offset = 2;
+ else
+ fi->next_offset = 0;
+ off = fi->next_offset;
+ }
fi->offset = fi->next_offset;
fi->last_readdir = req;
+ fi->frag = frag;
if (req->r_reply_info.dir_end) {
kfree(fi->last_name);
@@ -363,7 +373,6 @@ more:
else
fi->next_offset = 0;
} else {
- rinfo = &req->r_reply_info;
err = note_last_dentry(fi,
rinfo->dir_dname[rinfo->dir_nr-1],
rinfo->dir_dname_len[rinfo->dir_nr-1]);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 3de89829e2a1..37b5b5c4ad5b 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -408,51 +408,92 @@ more:
*
* If the read spans object boundary, just do multiple reads.
*/
-static ssize_t ceph_sync_read(struct file *file, char __user *data,
- unsigned len, loff_t *poff, int *checkeof)
+static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
+ int *checkeof)
{
+ struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct page **pages;
- u64 off = *poff;
+ u64 off = iocb->ki_pos;
int num_pages, ret;
+ size_t len = i->count;
- dout("sync_read on file %p %llu~%u %s\n", file, off, len,
+ dout("sync_read on file %p %llu~%u %s\n", file, off,
+ (unsigned)len,
(file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
-
- if (file->f_flags & O_DIRECT) {
- num_pages = calc_pages_for((unsigned long)data, len);
- pages = ceph_get_direct_page_vector(data, num_pages, true);
- } else {
- num_pages = calc_pages_for(off, len);
- pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
- }
- if (IS_ERR(pages))
- return PTR_ERR(pages);
-
/*
* flush any page cache pages in this range. this
* will make concurrent normal and sync io slow,
* but it will at least behave sensibly when they are
* in sequence.
*/
- ret = filemap_write_and_wait(inode->i_mapping);
+ ret = filemap_write_and_wait_range(inode->i_mapping, off,
+ off + len);
if (ret < 0)
- goto done;
+ return ret;
- ret = striped_read(inode, off, len, pages, num_pages, checkeof,
- file->f_flags & O_DIRECT,
- (unsigned long)data & ~PAGE_MASK);
+ if (file->f_flags & O_DIRECT) {
+ while (iov_iter_count(i)) {
+ void __user *data = iov_iter_iovec(i)->iov_base + i->iov_offset;
+ size_t len = iov_iter_iovec(i)->iov_len - i->iov_offset;
+
+ num_pages = calc_pages_for((unsigned long)data, len);
+ pages = ceph_get_direct_page_vector(data,
+ num_pages, true);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ ret = striped_read(inode, off, len,
+ pages, num_pages, checkeof,
+ 1, (unsigned long)data & ~PAGE_MASK);
+ ceph_put_page_vector(pages, num_pages, true);
+
+ if (ret <= 0)
+ break;
+ off += ret;
+ iov_iter_advance(i, ret);
+ if (ret < len)
+ break;
+ }
+ } else {
+ num_pages = calc_pages_for(off, len);
+ pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+ ret = striped_read(inode, off, len, pages,
+ num_pages, checkeof, 0, 0);
+ if (ret > 0) {
+ int l, k = 0;
+ size_t left = len = ret;
+
+ while (left) {
+ void __user *data = iov_iter_iovec(i)->iov_base
+ + i->iov_offset;
+ l = min(iov_iter_iovec(i)->iov_len - i->iov_offset,
+ left);
+
+ ret = ceph_copy_page_vector_to_user(&pages[k],
+ data, off,
+ l);
+ if (ret > 0) {
+ iov_iter_advance(i, ret);
+ left -= ret;
+ off += ret;
+ k = calc_pages_for(iocb->ki_pos,
+ len - left + 1) - 1;
+ BUG_ON(k >= num_pages && left);
+ } else
+ break;
+ }
+ }
+ ceph_release_page_vector(pages, num_pages);
+ }
- if (ret >= 0 && (file->f_flags & O_DIRECT) == 0)
- ret = ceph_copy_page_vector_to_user(pages, data, off, ret);
- if (ret >= 0)
- *poff = off + ret;
+ if (off > iocb->ki_pos) {
+ ret = off - iocb->ki_pos;
+ iocb->ki_pos = off;
+ }
-done:
- if (file->f_flags & O_DIRECT)
- ceph_put_page_vector(pages, num_pages, true);
- else
- ceph_release_page_vector(pages, num_pages);
dout("sync_read result %d\n", ret);
return ret;
}
@@ -489,83 +530,79 @@ static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
}
}
+
/*
- * Synchronous write, straight from __user pointer or user pages (if
- * O_DIRECT).
+ * Synchronous write, straight from __user pointer or user pages.
*
* If write spans object boundary, just do multiple writes. (For a
* correct atomic write, we should e.g. take write locks on all
* objects, rollback on failure, etc.)
*/
-static ssize_t ceph_sync_write(struct file *file, const char __user *data,
- size_t left, loff_t pos, loff_t *ppos)
+static ssize_t
+ceph_sync_direct_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, size_t count)
{
+ struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_snap_context *snapc;
struct ceph_vino vino;
struct ceph_osd_request *req;
- int num_ops = 1;
struct page **pages;
int num_pages;
- u64 len;
int written = 0;
int flags;
int check_caps = 0;
- int page_align, io_align;
- unsigned long buf_align;
+ int page_align;
int ret;
struct timespec mtime = CURRENT_TIME;
- bool own_pages = false;
+ loff_t pos = iocb->ki_pos;
+ struct iov_iter i;
if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
return -EROFS;
- dout("sync_write on file %p %lld~%u %s\n", file, pos,
- (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
+ dout("sync_direct_write on file %p %lld~%u\n", file, pos,
+ (unsigned)count);
- ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left);
+ ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
if (ret < 0)
return ret;
ret = invalidate_inode_pages2_range(inode->i_mapping,
pos >> PAGE_CACHE_SHIFT,
- (pos + left) >> PAGE_CACHE_SHIFT);
+ (pos + count) >> PAGE_CACHE_SHIFT);
if (ret < 0)
dout("invalidate_inode_pages2_range returned %d\n", ret);
flags = CEPH_OSD_FLAG_ORDERSNAP |
CEPH_OSD_FLAG_ONDISK |
CEPH_OSD_FLAG_WRITE;
- if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0)
- flags |= CEPH_OSD_FLAG_ACK;
- else
- num_ops++; /* Also include a 'startsync' command. */
- /*
- * we may need to do multiple writes here if we span an object
- * boundary. this isn't atomic, unfortunately. :(
- */
-more:
- io_align = pos & ~PAGE_MASK;
- buf_align = (unsigned long)data & ~PAGE_MASK;
- len = left;
-
- snapc = ci->i_snap_realm->cached_context;
- vino = ceph_vino(inode);
- req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
- vino, pos, &len, num_ops,
- CEPH_OSD_OP_WRITE, flags, snapc,
- ci->i_truncate_seq, ci->i_truncate_size,
- false);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ iov_iter_init(&i, iov, nr_segs, count, 0);
+
+ while (iov_iter_count(&i) > 0) {
+ void __user *data = iov_iter_iovec(&i)->iov_base + i.iov_offset;
+ u64 len = iov_iter_iovec(&i)->iov_len - i.iov_offset;
+
+ page_align = (unsigned long)data & ~PAGE_MASK;
+
+ snapc = ci->i_snap_realm->cached_context;
+ vino = ceph_vino(inode);
+ req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
+ vino, pos, &len,
+ 2,/*include a 'startsync' command*/
+ CEPH_OSD_OP_WRITE, flags, snapc,
+ ci->i_truncate_seq,
+ ci->i_truncate_size,
+ false);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
+ goto out;
+ }
- /* write from beginning of first page, regardless of io alignment */
- page_align = file->f_flags & O_DIRECT ? buf_align : io_align;
- num_pages = calc_pages_for(page_align, len);
- if (file->f_flags & O_DIRECT) {
+ num_pages = calc_pages_for(page_align, len);
pages = ceph_get_direct_page_vector(data, num_pages, false);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
@@ -577,60 +614,175 @@ more:
* may block.
*/
truncate_inode_pages_range(inode->i_mapping, pos,
- (pos+len) | (PAGE_CACHE_SIZE-1));
- } else {
+ (pos+len) | (PAGE_CACHE_SIZE-1));
+ osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
+ false, false);
+
+ /* BUG_ON(vino.snap != CEPH_NOSNAP); */
+ ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
+
+ ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
+ if (!ret)
+ ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
+
+ ceph_put_page_vector(pages, num_pages, false);
+
+out:
+ ceph_osdc_put_request(req);
+ if (ret == 0) {
+ pos += len;
+ written += len;
+ iov_iter_advance(&i, (size_t)len);
+
+ if (pos > i_size_read(inode)) {
+ check_caps = ceph_inode_set_size(inode, pos);
+ if (check_caps)
+ ceph_check_caps(ceph_inode(inode),
+ CHECK_CAPS_AUTHONLY,
+ NULL);
+ }
+ } else
+ break;
+ }
+
+ if (ret != -EOLDSNAPC && written > 0) {
+ iocb->ki_pos = pos;
+ ret = written;
+ }
+ return ret;
+}
+
+
+/*
+ * Synchronous write, straight from __user pointer or user pages.
+ *
+ * If write spans object boundary, just do multiple writes. (For a
+ * correct atomic write, we should e.g. take write locks on all
+ * objects, rollback on failure, etc.)
+ */
+static ssize_t ceph_sync_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, size_t count)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_snap_context *snapc;
+ struct ceph_vino vino;
+ struct ceph_osd_request *req;
+ struct page **pages;
+ u64 len;
+ int num_pages;
+ int written = 0;
+ int flags;
+ int check_caps = 0;
+ int ret;
+ struct timespec mtime = CURRENT_TIME;
+ loff_t pos = iocb->ki_pos;
+ struct iov_iter i;
+
+ if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
+ return -EROFS;
+
+ dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count);
+
+ ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
+ if (ret < 0)
+ return ret;
+
+ ret = invalidate_inode_pages2_range(inode->i_mapping,
+ pos >> PAGE_CACHE_SHIFT,
+ (pos + count) >> PAGE_CACHE_SHIFT);
+ if (ret < 0)
+ dout("invalidate_inode_pages2_range returned %d\n", ret);
+
+ flags = CEPH_OSD_FLAG_ORDERSNAP |
+ CEPH_OSD_FLAG_ONDISK |
+ CEPH_OSD_FLAG_WRITE |
+ CEPH_OSD_FLAG_ACK;
+
+ iov_iter_init(&i, iov, nr_segs, count, 0);
+
+ while ((len = iov_iter_count(&i)) > 0) {
+ size_t left;
+ int n;
+
+ snapc = ci->i_snap_realm->cached_context;
+ vino = ceph_vino(inode);
+ req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
+ vino, pos, &len, 1,
+ CEPH_OSD_OP_WRITE, flags, snapc,
+ ci->i_truncate_seq,
+ ci->i_truncate_size,
+ false);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
+ goto out;
+ }
+
+ /*
+ * write from beginning of first page,
+ * regardless of io alignment
+ */
+ num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+
pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto out;
}
- ret = ceph_copy_user_to_page_vector(pages, data, pos, len);
+
+ left = len;
+ for (n = 0; n < num_pages; n++) {
+ size_t plen = min(left, PAGE_SIZE);
+ ret = iov_iter_copy_from_user(pages[n], &i, 0, plen);
+ if (ret != plen) {
+ ret = -EFAULT;
+ break;
+ }
+ left -= ret;
+ iov_iter_advance(&i, ret);
+ }
+
if (ret < 0) {
ceph_release_page_vector(pages, num_pages);
goto out;
}
- if ((file->f_flags & O_SYNC) == 0) {
- /* get a second commit callback */
- req->r_unsafe_callback = ceph_sync_write_unsafe;
- req->r_inode = inode;
- own_pages = true;
- }
- }
- osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
- false, own_pages);
+ /* get a second commit callback */
+ req->r_unsafe_callback = ceph_sync_write_unsafe;
+ req->r_inode = inode;
- /* BUG_ON(vino.snap != CEPH_NOSNAP); */
- ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
+ osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
+ false, true);
- ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
- if (!ret)
- ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
+ /* BUG_ON(vino.snap != CEPH_NOSNAP); */
+ ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
- if (file->f_flags & O_DIRECT)
- ceph_put_page_vector(pages, num_pages, false);
- else if (file->f_flags & O_SYNC)
- ceph_release_page_vector(pages, num_pages);
+ ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
+ if (!ret)
+ ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
out:
- ceph_osdc_put_request(req);
- if (ret == 0) {
- pos += len;
- written += len;
- left -= len;
- data += len;
- if (left)
- goto more;
+ ceph_osdc_put_request(req);
+ if (ret == 0) {
+ pos += len;
+ written += len;
+
+ if (pos > i_size_read(inode)) {
+ check_caps = ceph_inode_set_size(inode, pos);
+ if (check_caps)
+ ceph_check_caps(ceph_inode(inode),
+ CHECK_CAPS_AUTHONLY,
+ NULL);
+ }
+ } else
+ break;
+ }
+ if (ret != -EOLDSNAPC && written > 0) {
ret = written;
- *ppos = pos;
- if (pos > i_size_read(inode))
- check_caps = ceph_inode_set_size(inode, pos);
- if (check_caps)
- ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY,
- NULL);
- } else if (ret != -EOLDSNAPC && written > 0) {
- ret = written;
+ iocb->ki_pos = pos;
}
return ret;
}
@@ -647,55 +799,84 @@ static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov,
{
struct file *filp = iocb->ki_filp;
struct ceph_file_info *fi = filp->private_data;
- loff_t *ppos = &iocb->ki_pos;
- size_t len = iov->iov_len;
+ size_t len = iocb->ki_nbytes;
struct inode *inode = file_inode(filp);
struct ceph_inode_info *ci = ceph_inode(inode);
- void __user *base = iov->iov_base;
ssize_t ret;
int want, got = 0;
int checkeof = 0, read = 0;
- dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
- inode, ceph_vinop(inode), pos, (unsigned)len, inode);
again:
+ dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
+ inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
+
if (fi->fmode & CEPH_FILE_MODE_LAZY)
want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
else
want = CEPH_CAP_FILE_CACHE;
ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1);
if (ret < 0)
- goto out;
- dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
- inode, ceph_vinop(inode), pos, (unsigned)len,
- ceph_cap_string(got));
+ return ret;
if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
(iocb->ki_filp->f_flags & O_DIRECT) ||
- (fi->flags & CEPH_F_SYNC))
+ (fi->flags & CEPH_F_SYNC)) {
+ struct iov_iter i;
+
+ dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
+ inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
+ ceph_cap_string(got));
+
+ if (!read) {
+ ret = generic_segment_checks(iov, &nr_segs,
+ &len, VERIFY_WRITE);
+ if (ret)
+ goto out;
+ }
+
+ iov_iter_init(&i, iov, nr_segs, len, read);
+
/* hmm, this isn't really async... */
- ret = ceph_sync_read(filp, base, len, ppos, &checkeof);
- else
- ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
+ ret = ceph_sync_read(iocb, &i, &checkeof);
+ } else {
+ /*
+ * We can't modify the content of iov,
+ * so we only read from beginning.
+ */
+ if (read) {
+ iocb->ki_pos = pos;
+ len = iocb->ki_nbytes;
+ read = 0;
+ }
+ dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
+ inode, ceph_vinop(inode), pos, (unsigned)len,
+ ceph_cap_string(got));
+ ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
+ }
out:
dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
ceph_put_cap_refs(ci, got);
if (checkeof && ret >= 0) {
- int statret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
+ int statret = ceph_do_getattr(inode,
+ CEPH_STAT_CAP_SIZE);
/* hit EOF or hole? */
- if (statret == 0 && *ppos < inode->i_size) {
- dout("aio_read sync_read hit hole, ppos %lld < size %lld, reading more\n", *ppos, inode->i_size);
+ if (statret == 0 && iocb->ki_pos < inode->i_size &&
+ ret < len) {
+ dout("sync_read hit hole, ppos %lld < size %lld"
+ ", reading more\n", iocb->ki_pos,
+ inode->i_size);
+
read += ret;
- base += ret;
len -= ret;
checkeof = 0;
goto again;
}
}
+
if (ret >= 0)
ret += read;
@@ -772,11 +953,13 @@ retry_snap:
inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
- (iocb->ki_filp->f_flags & O_DIRECT) ||
- (fi->flags & CEPH_F_SYNC)) {
+ (file->f_flags & O_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
mutex_unlock(&inode->i_mutex);
- written = ceph_sync_write(file, iov->iov_base, count,
- pos, &iocb->ki_pos);
+ if (file->f_flags & O_DIRECT)
+ written = ceph_sync_direct_write(iocb, iov,
+ nr_segs, count);
+ else
+ written = ceph_sync_write(iocb, iov, nr_segs, count);
if (written == -EOLDSNAPC) {
dout("aio_write %p %llx.%llx %llu~%u"
"got EOLDSNAPC, retrying\n",
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 8549a48115f7..2ae1381de64a 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -436,6 +436,16 @@ void ceph_destroy_inode(struct inode *inode)
call_rcu(&inode->i_rcu, ceph_i_callback);
}
+int ceph_drop_inode(struct inode *inode)
+{
+ /*
+ * Positve dentry and corresponding inode are always accompanied
+ * in MDS reply. So no need to keep inode in the cache after
+ * dropping all its aliases.
+ */
+ return 1;
+}
+
/*
* Helpers to fill in size, ctime, mtime, and atime. We have to be
* careful because either the client or MDS may have more up to date
@@ -577,6 +587,8 @@ static int fill_inode(struct inode *inode,
int issued = 0, implemented;
struct timespec mtime, atime, ctime;
u32 nsplits;
+ struct ceph_inode_frag *frag;
+ struct rb_node *rb_node;
struct ceph_buffer *xattr_blob = NULL;
int err = 0;
int queue_trunc = 0;
@@ -751,15 +763,38 @@ no_change:
/* FIXME: move me up, if/when version reflects fragtree changes */
nsplits = le32_to_cpu(info->fragtree.nsplits);
mutex_lock(&ci->i_fragtree_mutex);
+ rb_node = rb_first(&ci->i_fragtree);
for (i = 0; i < nsplits; i++) {
u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
- struct ceph_inode_frag *frag = __get_or_create_frag(ci, id);
-
- if (IS_ERR(frag))
- continue;
+ frag = NULL;
+ while (rb_node) {
+ frag = rb_entry(rb_node, struct ceph_inode_frag, node);
+ if (ceph_frag_compare(frag->frag, id) >= 0) {
+ if (frag->frag != id)
+ frag = NULL;
+ else
+ rb_node = rb_next(rb_node);
+ break;
+ }
+ rb_node = rb_next(rb_node);
+ rb_erase(&frag->node, &ci->i_fragtree);
+ kfree(frag);
+ frag = NULL;
+ }
+ if (!frag) {
+ frag = __get_or_create_frag(ci, id);
+ if (IS_ERR(frag))
+ continue;
+ }
frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
dout(" frag %x split by %d\n", frag->frag, frag->split_by);
}
+ while (rb_node) {
+ frag = rb_entry(rb_node, struct ceph_inode_frag, node);
+ rb_node = rb_next(rb_node);
+ rb_erase(&frag->node, &ci->i_fragtree);
+ kfree(frag);
+ }
mutex_unlock(&ci->i_fragtree_mutex);
/* were we issued a capability? */
@@ -1250,8 +1285,20 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
int err = 0, i;
struct inode *snapdir = NULL;
struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
- u64 frag = le32_to_cpu(rhead->args.readdir.frag);
struct ceph_dentry_info *di;
+ u64 r_readdir_offset = req->r_readdir_offset;
+ u32 frag = le32_to_cpu(rhead->args.readdir.frag);
+
+ if (rinfo->dir_dir &&
+ le32_to_cpu(rinfo->dir_dir->frag) != frag) {
+ dout("readdir_prepopulate got new frag %x -> %x\n",
+ frag, le32_to_cpu(rinfo->dir_dir->frag));
+ frag = le32_to_cpu(rinfo->dir_dir->frag);
+ if (ceph_frag_is_leftmost(frag))
+ r_readdir_offset = 2;
+ else
+ r_readdir_offset = 0;
+ }
if (req->r_aborted)
return readdir_prepopulate_inodes_only(req, session);
@@ -1315,7 +1362,7 @@ retry_lookup:
}
di = dn->d_fsdata;
- di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
+ di->offset = ceph_make_fpos(frag, i + r_readdir_offset);
/* inode */
if (dn->d_inode) {
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index b7bda5d9611d..6d953ab0ac06 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -43,6 +43,7 @@
*/
struct ceph_reconnect_state {
+ int nr_caps;
struct ceph_pagelist *pagelist;
bool flock;
};
@@ -443,6 +444,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
INIT_LIST_HEAD(&s->s_waiting);
INIT_LIST_HEAD(&s->s_unsafe);
s->s_num_cap_releases = 0;
+ s->s_cap_reconnect = 0;
s->s_cap_iterator = NULL;
INIT_LIST_HEAD(&s->s_cap_releases);
INIT_LIST_HEAD(&s->s_cap_releases_done);
@@ -986,7 +988,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
dout("removing cap %p, ci is %p, inode is %p\n",
cap, ci, &ci->vfs_inode);
spin_lock(&ci->i_ceph_lock);
- __ceph_remove_cap(cap);
+ __ceph_remove_cap(cap, false);
if (!__ceph_is_any_real_caps(ci)) {
struct ceph_mds_client *mdsc =
ceph_sb_to_client(inode->i_sb)->mdsc;
@@ -1231,9 +1233,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
session->s_trim_caps--;
if (oissued) {
/* we aren't the only cap.. just remove us */
- __queue_cap_release(session, ceph_ino(inode), cap->cap_id,
- cap->mseq, cap->issue_seq);
- __ceph_remove_cap(cap);
+ __ceph_remove_cap(cap, true);
} else {
/* try to drop referring dentries */
spin_unlock(&ci->i_ceph_lock);
@@ -1416,7 +1416,6 @@ static void discard_cap_releases(struct ceph_mds_client *mdsc,
unsigned num;
dout("discard_cap_releases mds%d\n", session->s_mds);
- spin_lock(&session->s_cap_lock);
/* zero out the in-progress message */
msg = list_first_entry(&session->s_cap_releases,
@@ -1443,8 +1442,6 @@ static void discard_cap_releases(struct ceph_mds_client *mdsc,
msg->front.iov_len = sizeof(*head);
list_add(&msg->list_head, &session->s_cap_releases);
}
-
- spin_unlock(&session->s_cap_lock);
}
/*
@@ -2238,8 +2235,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
if (err == 0) {
if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
- req->r_op == CEPH_MDS_OP_LSSNAP) &&
- rinfo->dir_nr)
+ req->r_op == CEPH_MDS_OP_LSSNAP))
ceph_readdir_prepopulate(req, req->r_session);
ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
}
@@ -2490,6 +2486,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
cap->seq = 0; /* reset cap seq */
cap->issue_seq = 0; /* and issue_seq */
cap->mseq = 0; /* and migrate_seq */
+ cap->cap_gen = cap->session->s_cap_gen;
if (recon_state->flock) {
rec.v2.cap_id = cpu_to_le64(cap->cap_id);
@@ -2552,6 +2549,8 @@ encode_again:
} else {
err = ceph_pagelist_append(pagelist, &rec, reclen);
}
+
+ recon_state->nr_caps++;
out_free:
kfree(path);
out_dput:
@@ -2579,6 +2578,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
struct rb_node *p;
int mds = session->s_mds;
int err = -ENOMEM;
+ int s_nr_caps;
struct ceph_pagelist *pagelist;
struct ceph_reconnect_state recon_state;
@@ -2610,20 +2610,38 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
dout("session %p state %s\n", session,
session_state_name(session->s_state));
+ spin_lock(&session->s_gen_ttl_lock);
+ session->s_cap_gen++;
+ spin_unlock(&session->s_gen_ttl_lock);
+
+ spin_lock(&session->s_cap_lock);
+ /*
+ * notify __ceph_remove_cap() that we are composing cap reconnect.
+ * If a cap get released before being added to the cap reconnect,
+ * __ceph_remove_cap() should skip queuing cap release.
+ */
+ session->s_cap_reconnect = 1;
/* drop old cap expires; we're about to reestablish that state */
discard_cap_releases(mdsc, session);
+ spin_unlock(&session->s_cap_lock);
/* traverse this session's caps */
- err = ceph_pagelist_encode_32(pagelist, session->s_nr_caps);
+ s_nr_caps = session->s_nr_caps;
+ err = ceph_pagelist_encode_32(pagelist, s_nr_caps);
if (err)
goto fail;
+ recon_state.nr_caps = 0;
recon_state.pagelist = pagelist;
recon_state.flock = session->s_con.peer_features & CEPH_FEATURE_FLOCK;
err = iterate_session_caps(session, encode_caps_cb, &recon_state);
if (err < 0)
goto fail;
+ spin_lock(&session->s_cap_lock);
+ session->s_cap_reconnect = 0;
+ spin_unlock(&session->s_cap_lock);
+
/*
* snaprealms. we provide mds with the ino, seq (version), and
* parent for all of our realms. If the mds has any newer info,
@@ -2646,11 +2664,18 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
if (recon_state.flock)
reply->hdr.version = cpu_to_le16(2);
- if (pagelist->length) {
- /* set up outbound data if we have any */
- reply->hdr.data_len = cpu_to_le32(pagelist->length);
- ceph_msg_data_add_pagelist(reply, pagelist);
+
+ /* raced with cap release? */
+ if (s_nr_caps != recon_state.nr_caps) {
+ struct page *page = list_first_entry(&pagelist->head,
+ struct page, lru);
+ __le32 *addr = kmap_atomic(page);
+ *addr = cpu_to_le32(recon_state.nr_caps);
+ kunmap_atomic(addr);
}
+
+ reply->hdr.data_len = cpu_to_le32(pagelist->length);
+ ceph_msg_data_add_pagelist(reply, pagelist);
ceph_con_send(&session->s_con, reply);
mutex_unlock(&session->s_mutex);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index c2a19fbbe517..4c053d099ae4 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -132,6 +132,7 @@ struct ceph_mds_session {
struct list_head s_caps; /* all caps issued by this session */
int s_nr_caps, s_trim_caps;
int s_num_cap_releases;
+ int s_cap_reconnect;
struct list_head s_cap_releases; /* waiting cap_release messages */
struct list_head s_cap_releases_done; /* ready to send */
struct ceph_cap *s_cap_iterator;
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 6a0951e43044..e58bd4a23bfb 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -686,6 +686,7 @@ static const struct super_operations ceph_super_ops = {
.alloc_inode = ceph_alloc_inode,
.destroy_inode = ceph_destroy_inode,
.write_inode = ceph_write_inode,
+ .drop_inode = ceph_drop_inode,
.sync_fs = ceph_sync_fs,
.put_super = ceph_put_super,
.show_options = ceph_show_options,
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 6014b0a3c405..8de94b564d67 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -691,6 +691,7 @@ extern const struct inode_operations ceph_file_iops;
extern struct inode *ceph_alloc_inode(struct super_block *sb);
extern void ceph_destroy_inode(struct inode *inode);
+extern int ceph_drop_inode(struct inode *inode);
extern struct inode *ceph_get_inode(struct super_block *sb,
struct ceph_vino vino);
@@ -741,13 +742,7 @@ extern int ceph_add_cap(struct inode *inode,
int fmode, unsigned issued, unsigned wanted,
unsigned cap, unsigned seq, u64 realmino, int flags,
struct ceph_cap_reservation *caps_reservation);
-extern void __ceph_remove_cap(struct ceph_cap *cap);
-static inline void ceph_remove_cap(struct ceph_cap *cap)
-{
- spin_lock(&cap->ci->i_ceph_lock);
- __ceph_remove_cap(cap);
- spin_unlock(&cap->ci->i_ceph_lock);
-}
+extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
extern void ceph_put_cap(struct ceph_mds_client *mdsc,
struct ceph_cap *cap);
diff --git a/fs/char_dev.c b/fs/char_dev.c
index afc2bb691780..b9bc05b9513a 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -574,7 +574,8 @@ static struct kobject *base_probe(dev_t dev, int *part, void *data)
void __init chrdev_init(void)
{
cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
- bdi_init(&directly_mappable_cdev_bdi);
+ if (bdi_init(&directly_mappable_cdev_bdi))
+ panic("Failed to init directly mappable cdev bdi");
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 77fc5e181077..849f6132b327 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -862,7 +862,7 @@ const struct inode_operations cifs_file_inode_ops = {
const struct inode_operations cifs_symlink_inode_ops = {
.readlink = generic_readlink,
.follow_link = cifs_follow_link,
- .put_link = cifs_put_link,
+ .put_link = kfree_put_link,
.permission = cifs_permission,
/* BB add the following two eventually */
/* revalidate: cifs_revalidate,
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 6d0b07217ac9..26a754f49ba1 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -115,8 +115,6 @@ extern struct vfsmount *cifs_dfs_d_automount(struct path *path);
/* Functions related to symlinks */
extern void *cifs_follow_link(struct dentry *direntry, struct nameidata *nd);
-extern void cifs_put_link(struct dentry *direntry,
- struct nameidata *nd, void *);
extern int cifs_readlink(struct dentry *direntry, char __user *buffer,
int buflen);
extern int cifs_symlink(struct inode *inode, struct dentry *direntry,
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 52b6f6c26bfc..1a1fdcc2e491 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -278,6 +278,8 @@ struct smb_version_operations {
/* set attributes */
int (*set_file_info)(struct inode *, const char *, FILE_BASIC_INFO *,
const unsigned int);
+ int (*set_compression)(const unsigned int, struct cifs_tcon *,
+ struct cifsFileInfo *);
/* check if we can send an echo or nor */
bool (*can_echo)(struct TCP_Server_Info *);
/* send echo request */
@@ -379,6 +381,9 @@ struct smb_version_operations {
char * (*create_lease_buf)(u8 *, u8);
/* parse lease context buffer and return oplock/epoch info */
__u8 (*parse_lease_buf)(void *, unsigned int *);
+ int (*clone_range)(const unsigned int, struct cifsFileInfo *src_file,
+ struct cifsFileInfo *target_file, u64 src_off, u64 len,
+ u64 dest_off);
};
struct smb_version_values {
@@ -620,11 +625,34 @@ set_credits(struct TCP_Server_Info *server, const int val)
}
static inline __u64
-get_next_mid(struct TCP_Server_Info *server)
+get_next_mid64(struct TCP_Server_Info *server)
{
return server->ops->get_next_mid(server);
}
+static inline __le16
+get_next_mid(struct TCP_Server_Info *server)
+{
+ __u16 mid = get_next_mid64(server);
+ /*
+ * The value in the SMB header should be little endian for easy
+ * on-the-wire decoding.
+ */
+ return cpu_to_le16(mid);
+}
+
+static inline __u16
+get_mid(const struct smb_hdr *smb)
+{
+ return le16_to_cpu(smb->Mid);
+}
+
+static inline bool
+compare_mid(__u16 mid, const struct smb_hdr *smb)
+{
+ return mid == le16_to_cpu(smb->Mid);
+}
+
/*
* When the server supports very large reads and writes via POSIX extensions,
* we can allow up to 2^24-1, minus the size of a READ/WRITE_AND_X header, not
@@ -828,6 +856,8 @@ struct cifs_tcon {
__u32 maximal_access;
__u32 vol_serial_number;
__le64 vol_create_time;
+ __u32 ss_flags; /* sector size flags */
+ __u32 perf_sector_size; /* best sector size for perf */
#endif /* CONFIG_CIFS_SMB2 */
#ifdef CONFIG_CIFS_FSCACHE
u64 resource_id; /* server resource id */
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 08f9dfb1a894..9e5ee34de986 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -428,7 +428,7 @@ struct smb_hdr {
__u16 Tid;
__le16 Pid;
__u16 Uid;
- __u16 Mid;
+ __le16 Mid;
__u8 WordCount;
} __attribute__((packed));
@@ -1352,6 +1352,35 @@ typedef struct smb_com_transaction_ioctl_req {
__u8 Data[1];
} __attribute__((packed)) TRANSACT_IOCTL_REQ;
+typedef struct smb_com_transaction_compr_ioctl_req {
+ struct smb_hdr hdr; /* wct = 23 */
+ __u8 MaxSetupCount;
+ __u16 Reserved;
+ __le32 TotalParameterCount;
+ __le32 TotalDataCount;
+ __le32 MaxParameterCount;
+ __le32 MaxDataCount;
+ __le32 ParameterCount;
+ __le32 ParameterOffset;
+ __le32 DataCount;
+ __le32 DataOffset;
+ __u8 SetupCount; /* four setup words follow subcommand */
+ /* SNIA spec incorrectly included spurious pad here */
+ __le16 SubCommand; /* 2 = IOCTL/FSCTL */
+ __le32 FunctionCode;
+ __u16 Fid;
+ __u8 IsFsctl; /* 1 = File System Control 0 = device control (IOCTL) */
+ __u8 IsRootFlag; /* 1 = apply command to root of share (must be DFS) */
+ __le16 ByteCount;
+ __u8 Pad[3];
+ __le16 compression_state; /* See below for valid flags */
+} __attribute__((packed)) TRANSACT_COMPR_IOCTL_REQ;
+
+/* compression state flags */
+#define COMPRESSION_FORMAT_NONE 0x0000
+#define COMPRESSION_FORMAT_DEFAULT 0x0001
+#define COMPRESSION_FORMAT_LZNT1 0x0002
+
typedef struct smb_com_transaction_ioctl_rsp {
struct smb_hdr hdr; /* wct = 19 */
__u8 Reserved[3];
@@ -2215,6 +2244,9 @@ typedef struct {
__le32 DeviceCharacteristics;
} __attribute__((packed)) FILE_SYSTEM_DEVICE_INFO; /* device info level 0x104 */
+/* minimum includes first three fields, and empty FS Name */
+#define MIN_FS_ATTR_INFO_SIZE 12
+
typedef struct {
__le32 Attributes;
__le32 MaxPathNameComponentLength;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index b5ec2a268f56..aa3397620342 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -360,6 +360,8 @@ extern int CIFSSMBUnixQuerySymLink(const unsigned int xid,
extern int CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
__u16 fid, char **symlinkinfo,
const struct nls_table *nls_codepage);
+extern int CIFSSMB_set_compression(const unsigned int xid,
+ struct cifs_tcon *tcon, __u16 fid);
extern int CIFSSMBOpen(const unsigned int xid, struct cifs_tcon *tcon,
const char *fileName, const int disposition,
const int access_flags, const int omode,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index ccd31ab815d4..93b29474714a 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -3199,6 +3199,60 @@ qreparse_out:
return rc;
}
+int
+CIFSSMB_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+ __u16 fid)
+{
+ int rc = 0;
+ int bytes_returned;
+ struct smb_com_transaction_compr_ioctl_req *pSMB;
+ struct smb_com_transaction_ioctl_rsp *pSMBr;
+
+ cifs_dbg(FYI, "Set compression for %u\n", fid);
+ rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
+ (void **) &pSMBr);
+ if (rc)
+ return rc;
+
+ pSMB->compression_state = cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
+
+ pSMB->TotalParameterCount = 0;
+ pSMB->TotalDataCount = __constant_cpu_to_le32(2);
+ pSMB->MaxParameterCount = 0;
+ pSMB->MaxDataCount = 0;
+ pSMB->MaxSetupCount = 4;
+ pSMB->Reserved = 0;
+ pSMB->ParameterOffset = 0;
+ pSMB->DataCount = __constant_cpu_to_le32(2);
+ pSMB->DataOffset =
+ cpu_to_le32(offsetof(struct smb_com_transaction_compr_ioctl_req,
+ compression_state) - 4); /* 84 */
+ pSMB->SetupCount = 4;
+ pSMB->SubCommand = __constant_cpu_to_le16(NT_TRANSACT_IOCTL);
+ pSMB->ParameterCount = 0;
+ pSMB->FunctionCode = __constant_cpu_to_le32(FSCTL_SET_COMPRESSION);
+ pSMB->IsFsctl = 1; /* FSCTL */
+ pSMB->IsRootFlag = 0;
+ pSMB->Fid = fid; /* file handle always le */
+ /* 3 byte pad, followed by 2 byte compress state */
+ pSMB->ByteCount = __constant_cpu_to_le16(5);
+ inc_rfc1001_len(pSMB, 5);
+
+ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+ (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+ if (rc)
+ cifs_dbg(FYI, "Send error in SetCompression = %d\n", rc);
+
+ cifs_buf_release(pSMB);
+
+ /*
+ * Note: On -EAGAIN error only caller can retry on handle based calls
+ * since file handle passed in no longer valid.
+ */
+ return rc;
+}
+
+
#ifdef CONFIG_CIFS_POSIX
/*Convert an Access Control Entry from wire format to local POSIX xattr format*/
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index a279ffc0bc29..62a55147400a 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2242,6 +2242,8 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ if (ses->status == CifsExiting)
+ continue;
if (!match_session(ses, vol))
continue;
++ses->ses_count;
@@ -2255,24 +2257,37 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
static void
cifs_put_smb_ses(struct cifs_ses *ses)
{
- unsigned int xid;
+ unsigned int rc, xid;
struct TCP_Server_Info *server = ses->server;
cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
+
spin_lock(&cifs_tcp_ses_lock);
+ if (ses->status == CifsExiting) {
+ spin_unlock(&cifs_tcp_ses_lock);
+ return;
+ }
if (--ses->ses_count > 0) {
spin_unlock(&cifs_tcp_ses_lock);
return;
}
-
- list_del_init(&ses->smb_ses_list);
+ if (ses->status == CifsGood)
+ ses->status = CifsExiting;
spin_unlock(&cifs_tcp_ses_lock);
- if (ses->status == CifsGood && server->ops->logoff) {
+ if (ses->status == CifsExiting && server->ops->logoff) {
xid = get_xid();
- server->ops->logoff(xid, ses);
+ rc = server->ops->logoff(xid, ses);
+ if (rc)
+ cifs_dbg(VFS, "%s: Session Logoff failure rc=%d\n",
+ __func__, rc);
_free_xid(xid);
}
+
+ spin_lock(&cifs_tcp_ses_lock);
+ list_del_init(&ses->smb_ses_list);
+ spin_unlock(&cifs_tcp_ses_lock);
+
sesInfoFree(ses);
cifs_put_tcp_session(server);
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 7ddddf2e2504..cf6aedc59c21 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2737,8 +2737,8 @@ cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
/* go while there's data to be copied and no errors */
if (copy && !rc) {
pdata = kmap(page);
- rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
- (int)copy);
+ rc = memcpy_toiovecend(iov_iter_iovec(&ii), pdata,
+ ii.iov_offset, (int)copy);
kunmap(page);
if (!rc) {
*copied += copy;
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index b3258f35e88a..8d4b7bc8ae91 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -27,7 +27,7 @@ void cifs_fscache_get_client_cookie(struct TCP_Server_Info *server)
{
server->fscache =
fscache_acquire_cookie(cifs_fscache_netfs.primary_index,
- &cifs_fscache_server_index_def, server);
+ &cifs_fscache_server_index_def, server, true);
cifs_dbg(FYI, "%s: (0x%p/0x%p)\n",
__func__, server, server->fscache);
}
@@ -46,7 +46,7 @@ void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
tcon->fscache =
fscache_acquire_cookie(server->fscache,
- &cifs_fscache_super_index_def, tcon);
+ &cifs_fscache_super_index_def, tcon, true);
cifs_dbg(FYI, "%s: (0x%p/0x%p)\n",
__func__, server->fscache, tcon->fscache);
}
@@ -69,7 +69,7 @@ static void cifs_fscache_enable_inode_cookie(struct inode *inode)
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) {
cifsi->fscache = fscache_acquire_cookie(tcon->fscache,
- &cifs_fscache_inode_object_def, cifsi);
+ &cifs_fscache_inode_object_def, cifsi, true);
cifs_dbg(FYI, "%s: got FH cookie (0x%p/0x%p)\n",
__func__, tcon->fscache, cifsi->fscache);
}
@@ -119,7 +119,7 @@ void cifs_fscache_reset_inode_cookie(struct inode *inode)
cifsi->fscache = fscache_acquire_cookie(
cifs_sb_master_tcon(cifs_sb)->fscache,
&cifs_fscache_inode_object_def,
- cifsi);
+ cifsi, true);
cifs_dbg(FYI, "%s: new cookie 0x%p oldcookie 0x%p\n",
__func__, cifsi->fscache, old);
}
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 3e0845585853..d353f6cc55aa 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -3,7 +3,7 @@
*
* vfs operations that deal with io control
*
- * Copyright (C) International Business Machines Corp., 2005,2007
+ * Copyright (C) International Business Machines Corp., 2005,2013
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -22,25 +22,122 @@
*/
#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mount.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/btrfs.h>
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifs_debug.h"
#include "cifsfs.h"
+static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
+ unsigned long srcfd, u64 off, u64 len, u64 destoff)
+{
+ int rc;
+ struct cifsFileInfo *smb_file_target = dst_file->private_data;
+ struct inode *target_inode = file_inode(dst_file);
+ struct cifs_tcon *target_tcon;
+ struct fd src_file;
+ struct cifsFileInfo *smb_file_src;
+ struct inode *src_inode;
+ struct cifs_tcon *src_tcon;
+
+ cifs_dbg(FYI, "ioctl clone range\n");
+ /* the destination must be opened for writing */
+ if (!(dst_file->f_mode & FMODE_WRITE)) {
+ cifs_dbg(FYI, "file target not open for write\n");
+ return -EINVAL;
+ }
+
+ /* check if target volume is readonly and take reference */
+ rc = mnt_want_write_file(dst_file);
+ if (rc) {
+ cifs_dbg(FYI, "mnt_want_write failed with rc %d\n", rc);
+ return rc;
+ }
+
+ src_file = fdget(srcfd);
+ if (!src_file.file) {
+ rc = -EBADF;
+ goto out_drop_write;
+ }
+
+ if ((!src_file.file->private_data) || (!dst_file->private_data)) {
+ rc = -EBADF;
+ cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
+ goto out_fput;
+ }
+
+ rc = -EXDEV;
+ smb_file_target = dst_file->private_data;
+ smb_file_src = src_file.file->private_data;
+ src_tcon = tlink_tcon(smb_file_src->tlink);
+ target_tcon = tlink_tcon(smb_file_target->tlink);
+
+ /* check if source and target are on same tree connection */
+ if (src_tcon != target_tcon) {
+ cifs_dbg(VFS, "file copy src and target on different volume\n");
+ goto out_fput;
+ }
+
+ src_inode = src_file.file->f_dentry->d_inode;
+
+ /* Note: cifs case is easier than btrfs since server responsible for */
+ /* checks for proper open modes and file type and if it wants */
+ /* server could even support copy of range where source = target */
+
+ /* so we do not deadlock racing two ioctls on same files */
+ /* btrfs does a similar check */
+ if (target_inode < src_inode) {
+ mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_PARENT);
+ mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD);
+ } else {
+ mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_PARENT);
+ mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_CHILD);
+ }
+
+ /* determine range to clone */
+ rc = -EINVAL;
+ if (off + len > src_inode->i_size || off + len < off)
+ goto out_unlock;
+ if (len == 0)
+ len = src_inode->i_size - off;
+
+ cifs_dbg(FYI, "about to flush pages\n");
+ /* should we flush first and last page first */
+ truncate_inode_pages_range(&target_inode->i_data, destoff,
+ PAGE_CACHE_ALIGN(destoff + len)-1);
+
+ if (target_tcon->ses->server->ops->clone_range)
+ rc = target_tcon->ses->server->ops->clone_range(xid,
+ smb_file_src, smb_file_target, off, len, destoff);
+
+ /* force revalidate of size and timestamps of target file now
+ that target is updated on the server */
+ CIFS_I(target_inode)->time = 0;
+out_unlock:
+ mutex_unlock(&src_inode->i_mutex);
+ mutex_unlock(&target_inode->i_mutex);
+out_fput:
+ fdput(src_file);
+out_drop_write:
+ mnt_drop_write_file(dst_file);
+ return rc;
+}
+
long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
{
struct inode *inode = file_inode(filep);
int rc = -ENOTTY; /* strange error - but the precedent */
unsigned int xid;
struct cifs_sb_info *cifs_sb;
-#ifdef CONFIG_CIFS_POSIX
struct cifsFileInfo *pSMBFile = filep->private_data;
struct cifs_tcon *tcon;
__u64 ExtAttrBits = 0;
- __u64 ExtAttrMask = 0;
__u64 caps;
-#endif /* CONFIG_CIFS_POSIX */
xid = get_xid();
@@ -49,13 +146,14 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
cifs_sb = CIFS_SB(inode->i_sb);
switch (command) {
-#ifdef CONFIG_CIFS_POSIX
case FS_IOC_GETFLAGS:
if (pSMBFile == NULL)
break;
tcon = tlink_tcon(pSMBFile->tlink);
caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
+#ifdef CONFIG_CIFS_POSIX
if (CIFS_UNIX_EXTATTR_CAP & caps) {
+ __u64 ExtAttrMask = 0;
rc = CIFSGetExtAttr(xid, tcon,
pSMBFile->fid.netfid,
&ExtAttrBits, &ExtAttrMask);
@@ -63,29 +161,53 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
rc = put_user(ExtAttrBits &
FS_FL_USER_VISIBLE,
(int __user *)arg);
+ if (rc != EOPNOTSUPP)
+ break;
+ }
+#endif /* CONFIG_CIFS_POSIX */
+ rc = 0;
+ if (CIFS_I(inode)->cifsAttrs & ATTR_COMPRESSED) {
+ /* add in the compressed bit */
+ ExtAttrBits = FS_COMPR_FL;
+ rc = put_user(ExtAttrBits & FS_FL_USER_VISIBLE,
+ (int __user *)arg);
}
break;
-
case FS_IOC_SETFLAGS:
if (pSMBFile == NULL)
break;
tcon = tlink_tcon(pSMBFile->tlink);
caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
- if (CIFS_UNIX_EXTATTR_CAP & caps) {
- if (get_user(ExtAttrBits, (int __user *)arg)) {
- rc = -EFAULT;
- break;
- }
- /*
- * rc = CIFSGetExtAttr(xid, tcon,
- * pSMBFile->fid.netfid,
- * extAttrBits,
- * &ExtAttrMask);
- */
+
+ if (get_user(ExtAttrBits, (int __user *)arg)) {
+ rc = -EFAULT;
+ break;
+ }
+
+ /*
+ * if (CIFS_UNIX_EXTATTR_CAP & caps)
+ * rc = CIFSSetExtAttr(xid, tcon,
+ * pSMBFile->fid.netfid,
+ * extAttrBits,
+ * &ExtAttrMask);
+ * if (rc != EOPNOTSUPP)
+ * break;
+ */
+
+ /* Currently only flag we can set is compressed flag */
+ if ((ExtAttrBits & FS_COMPR_FL) == 0)
+ break;
+
+ /* Try to set compress flag */
+ if (tcon->ses->server->ops->set_compression) {
+ rc = tcon->ses->server->ops->set_compression(
+ xid, tcon, pSMBFile);
+ cifs_dbg(FYI, "set compress flag rc %d\n", rc);
}
- cifs_dbg(FYI, "set flags not implemented yet\n");
break;
-#endif /* CONFIG_CIFS_POSIX */
+ case BTRFS_IOC_CLONE:
+ rc = cifs_ioctl_clone(xid, filep, arg, 0, 0, 0);
+ break;
default:
cifs_dbg(FYI, "unsupported ioctl\n");
break;
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 7e36ceba0c7a..cc0234710ddb 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -621,10 +621,3 @@ symlink_exit:
free_xid(xid);
return rc;
}
-
-void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
-{
- char *p = nd_get_link(nd);
- if (!IS_ERR(p))
- kfree(p);
-}
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 138a011633fe..2f9f3790679d 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -278,7 +278,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
}
static int
-check_smb_hdr(struct smb_hdr *smb, __u16 mid)
+check_smb_hdr(struct smb_hdr *smb)
{
/* does it have the right SMB "signature" ? */
if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
@@ -287,13 +287,6 @@ check_smb_hdr(struct smb_hdr *smb, __u16 mid)
return 1;
}
- /* Make sure that message ids match */
- if (mid != smb->Mid) {
- cifs_dbg(VFS, "Mids do not match. received=%u expected=%u\n",
- smb->Mid, mid);
- return 1;
- }
-
/* if it's a response then accept */
if (smb->Flags & SMBFLG_RESPONSE)
return 0;
@@ -302,7 +295,8 @@ check_smb_hdr(struct smb_hdr *smb, __u16 mid)
if (smb->Command == SMB_COM_LOCKING_ANDX)
return 0;
- cifs_dbg(VFS, "Server sent request, not response. mid=%u\n", smb->Mid);
+ cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
+ get_mid(smb));
return 1;
}
@@ -310,7 +304,6 @@ int
checkSMB(char *buf, unsigned int total_read)
{
struct smb_hdr *smb = (struct smb_hdr *)buf;
- __u16 mid = smb->Mid;
__u32 rfclen = be32_to_cpu(smb->smb_buf_length);
__u32 clc_len; /* calculated length */
cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
@@ -348,7 +341,7 @@ checkSMB(char *buf, unsigned int total_read)
}
/* otherwise, there is enough to get to the BCC */
- if (check_smb_hdr(smb, mid))
+ if (check_smb_hdr(smb))
return -EIO;
clc_len = smbCalcSize(smb);
@@ -359,6 +352,7 @@ checkSMB(char *buf, unsigned int total_read)
}
if (4 + rfclen != clc_len) {
+ __u16 mid = get_mid(smb);
/* check if bcc wrapped around for large read responses */
if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
/* check if lengths match mod 64K */
@@ -366,11 +360,11 @@ checkSMB(char *buf, unsigned int total_read)
return 0; /* bcc wrapped */
}
cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
- clc_len, 4 + rfclen, smb->Mid);
+ clc_len, 4 + rfclen, mid);
if (4 + rfclen < clc_len) {
cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
- rfclen, smb->Mid);
+ rfclen, mid);
return -EIO;
} else if (rfclen > clc_len + 512) {
/*
@@ -383,7 +377,7 @@ checkSMB(char *buf, unsigned int total_read)
* data to 512 bytes.
*/
cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
- rfclen, smb->Mid);
+ rfclen, mid);
return -EIO;
}
}
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 8233b174de3d..384cffe42850 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -67,7 +67,7 @@ send_nt_cancel(struct TCP_Server_Info *server, void *buf,
mutex_unlock(&server->srv_mutex);
cifs_dbg(FYI, "issued NT_CANCEL for mid %u, rc = %d\n",
- in_buf->Mid, rc);
+ get_mid(in_buf), rc);
return rc;
}
@@ -101,7 +101,7 @@ cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
spin_lock(&GlobalMid_Lock);
list_for_each_entry(mid, &server->pending_mid_q, qhead) {
- if (mid->mid == buf->Mid &&
+ if (compare_mid(mid->mid, buf) &&
mid->mid_state == MID_REQUEST_SUBMITTED &&
le16_to_cpu(mid->command) == buf->Command) {
spin_unlock(&GlobalMid_Lock);
@@ -807,6 +807,13 @@ out:
}
static int
+cifs_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifsFileInfo *cfile)
+{
+ return CIFSSMB_set_compression(xid, tcon, cfile->fid.netfid);
+}
+
+static int
cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
const char *path, struct cifs_sb_info *cifs_sb,
struct cifs_fid *fid, __u16 search_flags,
@@ -956,6 +963,7 @@ struct smb_version_operations smb1_operations = {
.set_path_size = CIFSSMBSetEOF,
.set_file_size = CIFSSMBSetFileSize,
.set_file_info = smb_set_file_info,
+ .set_compression = cifs_set_compression,
.echo = CIFSSMBEcho,
.mkdir = CIFSSMBMkDir,
.mkdir_setinfo = cifs_mkdir_setinfo,
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 861b33214144..11dde4b24f8a 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -209,6 +209,94 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
return rsize;
}
+#ifdef CONFIG_CIFS_STATS2
+static int
+SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
+{
+ int rc;
+ unsigned int ret_data_len = 0;
+ struct network_interface_info_ioctl_rsp *out_buf;
+
+ rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
+ FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
+ NULL /* no data input */, 0 /* no data input */,
+ (char **)&out_buf, &ret_data_len);
+
+ if ((rc == 0) && (ret_data_len > 0)) {
+ /* Dump info on first interface */
+ cifs_dbg(FYI, "Adapter Capability 0x%x\t",
+ le32_to_cpu(out_buf->Capability));
+ cifs_dbg(FYI, "Link Speed %lld\n",
+ le64_to_cpu(out_buf->LinkSpeed));
+ } else
+ cifs_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
+
+ return rc;
+}
+#endif /* STATS2 */
+
+static void
+smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
+{
+ int rc;
+ __le16 srch_path = 0; /* Null - open root of share */
+ u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+ struct cifs_open_parms oparms;
+ struct cifs_fid fid;
+
+ oparms.tcon = tcon;
+ oparms.desired_access = FILE_READ_ATTRIBUTES;
+ oparms.disposition = FILE_OPEN;
+ oparms.create_options = 0;
+ oparms.fid = &fid;
+ oparms.reconnect = false;
+
+ rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL);
+ if (rc)
+ return;
+
+#ifdef CONFIG_CIFS_STATS2
+ SMB3_request_interfaces(xid, tcon);
+#endif /* STATS2 */
+
+ SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+ FS_ATTRIBUTE_INFORMATION);
+ SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+ FS_DEVICE_INFORMATION);
+ SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+ FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
+ SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+ return;
+}
+
+static void
+smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
+{
+ int rc;
+ __le16 srch_path = 0; /* Null - open root of share */
+ u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+ struct cifs_open_parms oparms;
+ struct cifs_fid fid;
+
+ oparms.tcon = tcon;
+ oparms.desired_access = FILE_READ_ATTRIBUTES;
+ oparms.disposition = FILE_OPEN;
+ oparms.create_options = 0;
+ oparms.fid = &fid;
+ oparms.reconnect = false;
+
+ rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL);
+ if (rc)
+ return;
+
+ SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+ FS_ATTRIBUTE_INFORMATION);
+ SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+ FS_DEVICE_INFORMATION);
+ SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+ return;
+}
+
static int
smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const char *full_path)
@@ -304,7 +392,19 @@ smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
seq_puts(m, " ASYMMETRIC,");
if (tcon->capabilities == 0)
seq_puts(m, " None");
+ if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
+ seq_puts(m, " Aligned,");
+ if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
+ seq_puts(m, " Partition Aligned,");
+ if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
+ seq_puts(m, " SSD,");
+ if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
+ seq_puts(m, " TRIM-support,");
+
seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
+ if (tcon->perf_sector_size)
+ seq_printf(m, "\tOptimal sector size: 0x%x",
+ tcon->perf_sector_size);
}
static void
@@ -394,6 +494,85 @@ smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
}
static int
+SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid,
+ struct copychunk_ioctl *pcchunk)
+{
+ int rc;
+ unsigned int ret_data_len;
+ struct resume_key_req *res_key;
+
+ rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
+ FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
+ NULL, 0 /* no input */,
+ (char **)&res_key, &ret_data_len);
+
+ if (rc) {
+ cifs_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
+ goto req_res_key_exit;
+ }
+ if (ret_data_len < sizeof(struct resume_key_req)) {
+ cifs_dbg(VFS, "Invalid refcopy resume key length\n");
+ rc = -EINVAL;
+ goto req_res_key_exit;
+ }
+ memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
+
+req_res_key_exit:
+ kfree(res_key);
+ return rc;
+}
+
+static int
+smb2_clone_range(const unsigned int xid,
+ struct cifsFileInfo *srcfile,
+ struct cifsFileInfo *trgtfile, u64 src_off,
+ u64 len, u64 dest_off)
+{
+ int rc;
+ unsigned int ret_data_len;
+ struct copychunk_ioctl *pcchunk;
+ char *retbuf = NULL;
+
+ pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
+
+ if (pcchunk == NULL)
+ return -ENOMEM;
+
+ cifs_dbg(FYI, "in smb2_clone_range - about to call request res key\n");
+ /* Request a key from the server to identify the source of the copy */
+ rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
+ srcfile->fid.persistent_fid,
+ srcfile->fid.volatile_fid, pcchunk);
+
+ /* Note: request_res_key sets res_key null only if rc !=0 */
+ if (rc)
+ return rc;
+
+ /* For now array only one chunk long, will make more flexible later */
+ pcchunk->ChunkCount = __constant_cpu_to_le32(1);
+ pcchunk->Reserved = 0;
+ pcchunk->SourceOffset = cpu_to_le64(src_off);
+ pcchunk->TargetOffset = cpu_to_le64(dest_off);
+ pcchunk->Length = cpu_to_le32(len);
+ pcchunk->Reserved2 = 0;
+
+ /* Request that server copy to target from src file identified by key */
+ rc = SMB2_ioctl(xid, tlink_tcon(trgtfile->tlink),
+ trgtfile->fid.persistent_fid,
+ trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
+ true /* is_fsctl */, (char *)pcchunk,
+ sizeof(struct copychunk_ioctl), &retbuf, &ret_data_len);
+
+ /* BB need to special case rc = EINVAL to alter chunk size */
+
+ cifs_dbg(FYI, "rc %d data length out %d\n", rc, ret_data_len);
+
+ kfree(pcchunk);
+ return rc;
+}
+
+static int
smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid *fid)
{
@@ -446,6 +625,14 @@ smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
}
static int
+smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifsFileInfo *cfile)
+{
+ return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
+ cfile->fid.volatile_fid);
+}
+
+static int
smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
const char *path, struct cifs_sb_info *cifs_sb,
struct cifs_fid *fid, __u16 search_flags,
@@ -865,6 +1052,7 @@ struct smb_version_operations smb20_operations = {
.logoff = SMB2_logoff,
.tree_connect = SMB2_tcon,
.tree_disconnect = SMB2_tdis,
+ .qfs_tcon = smb2_qfs_tcon,
.is_path_accessible = smb2_is_path_accessible,
.can_echo = smb2_can_echo,
.echo = SMB2_echo,
@@ -874,6 +1062,7 @@ struct smb_version_operations smb20_operations = {
.set_path_size = smb2_set_path_size,
.set_file_size = smb2_set_file_size,
.set_file_info = smb2_set_file_info,
+ .set_compression = smb2_set_compression,
.mkdir = smb2_mkdir,
.mkdir_setinfo = smb2_mkdir_setinfo,
.rmdir = smb2_rmdir,
@@ -907,6 +1096,7 @@ struct smb_version_operations smb20_operations = {
.set_oplock_level = smb2_set_oplock_level,
.create_lease_buf = smb2_create_lease_buf,
.parse_lease_buf = smb2_parse_lease_buf,
+ .clone_range = smb2_clone_range,
};
struct smb_version_operations smb21_operations = {
@@ -936,6 +1126,7 @@ struct smb_version_operations smb21_operations = {
.logoff = SMB2_logoff,
.tree_connect = SMB2_tcon,
.tree_disconnect = SMB2_tdis,
+ .qfs_tcon = smb2_qfs_tcon,
.is_path_accessible = smb2_is_path_accessible,
.can_echo = smb2_can_echo,
.echo = SMB2_echo,
@@ -945,6 +1136,7 @@ struct smb_version_operations smb21_operations = {
.set_path_size = smb2_set_path_size,
.set_file_size = smb2_set_file_size,
.set_file_info = smb2_set_file_info,
+ .set_compression = smb2_set_compression,
.mkdir = smb2_mkdir,
.mkdir_setinfo = smb2_mkdir_setinfo,
.rmdir = smb2_rmdir,
@@ -978,6 +1170,7 @@ struct smb_version_operations smb21_operations = {
.set_oplock_level = smb21_set_oplock_level,
.create_lease_buf = smb2_create_lease_buf,
.parse_lease_buf = smb2_parse_lease_buf,
+ .clone_range = smb2_clone_range,
};
struct smb_version_operations smb30_operations = {
@@ -1008,6 +1201,7 @@ struct smb_version_operations smb30_operations = {
.logoff = SMB2_logoff,
.tree_connect = SMB2_tcon,
.tree_disconnect = SMB2_tdis,
+ .qfs_tcon = smb3_qfs_tcon,
.is_path_accessible = smb2_is_path_accessible,
.can_echo = smb2_can_echo,
.echo = SMB2_echo,
@@ -1017,6 +1211,7 @@ struct smb_version_operations smb30_operations = {
.set_path_size = smb2_set_path_size,
.set_file_size = smb2_set_file_size,
.set_file_info = smb2_set_file_info,
+ .set_compression = smb2_set_compression,
.mkdir = smb2_mkdir,
.mkdir_setinfo = smb2_mkdir_setinfo,
.rmdir = smb2_rmdir,
@@ -1051,6 +1246,7 @@ struct smb_version_operations smb30_operations = {
.set_oplock_level = smb3_set_oplock_level,
.create_lease_buf = smb3_create_lease_buf,
.parse_lease_buf = smb3_parse_lease_buf,
+ .clone_range = smb2_clone_range,
};
struct smb_version_values smb20_values = {
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index edccb5252462..8ab05b0d6778 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1137,6 +1137,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
cifs_dbg(FYI, "SMB2 IOCTL\n");
+ *out_data = NULL;
/* zero out returned data len, in case of error */
if (plen)
*plen = 0;
@@ -1182,11 +1183,23 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
req->Flags = 0;
iov[0].iov_base = (char *)req;
- /* 4 for rfc1002 length field */
- iov[0].iov_len = get_rfc1002_length(req) + 4;
- if (indatalen)
- inc_rfc1001_len(req, indatalen);
+ /*
+ * If no input data, the size of ioctl struct in
+ * protocol spec still includes a 1 byte data buffer,
+ * but if input data passed to ioctl, we do not
+ * want to double count this, so we do not send
+ * the dummy one byte of data in iovec[0] if sending
+ * input data (in iovec[1]). We also must add 4 bytes
+ * in first iovec to allow for rfc1002 length field.
+ */
+
+ if (indatalen) {
+ iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
+ inc_rfc1001_len(req, indatalen - 1);
+ } else
+ iov[0].iov_len = get_rfc1002_length(req) + 4;
+
rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
@@ -1234,6 +1247,33 @@ ioctl_exit:
return rc;
}
+/*
+ * Individual callers to ioctl worker function follow
+ */
+
+int
+SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid)
+{
+ int rc;
+ char *res_key = NULL;
+ struct compress_ioctl fsctl_input;
+ char *ret_data = NULL;
+
+ fsctl_input.CompressionState =
+ __constant_cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
+
+ rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
+ FSCTL_SET_COMPRESSION, true /* is_fsctl */,
+ (char *)&fsctl_input /* data input */,
+ 2 /* in data len */, &ret_data /* out data */, NULL);
+
+ cifs_dbg(FYI, "set compression rc %d\n", rc);
+ kfree(res_key);
+
+ return rc;
+}
+
int
SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid)
@@ -2299,7 +2339,7 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
- goto qinf_exit;
+ goto qfsinf_exit;
}
rsp = (struct smb2_query_info_rsp *)iov.iov_base;
@@ -2311,7 +2351,70 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
if (!rc)
copy_fs_info_to_kstatfs(info, fsdata);
-qinf_exit:
+qfsinf_exit:
+ free_rsp_buf(resp_buftype, iov.iov_base);
+ return rc;
+}
+
+int
+SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid, int level)
+{
+ struct smb2_query_info_rsp *rsp = NULL;
+ struct kvec iov;
+ int rc = 0;
+ int resp_buftype, max_len, min_len;
+ struct cifs_ses *ses = tcon->ses;
+ unsigned int rsp_len, offset;
+
+ if (level == FS_DEVICE_INFORMATION) {
+ max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
+ min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
+ } else if (level == FS_ATTRIBUTE_INFORMATION) {
+ max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
+ min_len = MIN_FS_ATTR_INFO_SIZE;
+ } else if (level == FS_SECTOR_SIZE_INFORMATION) {
+ max_len = sizeof(struct smb3_fs_ss_info);
+ min_len = sizeof(struct smb3_fs_ss_info);
+ } else {
+ cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
+ return -EINVAL;
+ }
+
+ rc = build_qfs_info_req(&iov, tcon, level, max_len,
+ persistent_fid, volatile_fid);
+ if (rc)
+ return rc;
+
+ rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
+ if (rc) {
+ cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
+ goto qfsattr_exit;
+ }
+ rsp = (struct smb2_query_info_rsp *)iov.iov_base;
+
+ rsp_len = le32_to_cpu(rsp->OutputBufferLength);
+ offset = le16_to_cpu(rsp->OutputBufferOffset);
+ rc = validate_buf(offset, rsp_len, &rsp->hdr, min_len);
+ if (rc)
+ goto qfsattr_exit;
+
+ if (level == FS_ATTRIBUTE_INFORMATION)
+ memcpy(&tcon->fsAttrInfo, 4 /* RFC1001 len */ + offset
+ + (char *)&rsp->hdr, min_t(unsigned int,
+ rsp_len, max_len));
+ else if (level == FS_DEVICE_INFORMATION)
+ memcpy(&tcon->fsDevInfo, 4 /* RFC1001 len */ + offset
+ + (char *)&rsp->hdr, sizeof(FILE_SYSTEM_DEVICE_INFO));
+ else if (level == FS_SECTOR_SIZE_INFORMATION) {
+ struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
+ (4 /* RFC1001 len */ + offset + (char *)&rsp->hdr);
+ tcon->ss_flags = le32_to_cpu(ss_info->Flags);
+ tcon->perf_sector_size =
+ le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
+ }
+
+qfsattr_exit:
free_rsp_buf(resp_buftype, iov.iov_base);
return rc;
}
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index b83d0118a757..b50a129572cd 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -534,9 +534,16 @@ struct create_durable {
} Data;
} __packed;
+#define COPY_CHUNK_RES_KEY_SIZE 24
+struct resume_key_req {
+ char ResumeKey[COPY_CHUNK_RES_KEY_SIZE];
+ __le32 ContextLength; /* MBZ */
+ char Context[0]; /* ignored, Windows sets to 4 bytes of zero */
+} __packed;
+
/* this goes in the ioctl buffer when doing a copychunk request */
struct copychunk_ioctl {
- char SourceKey[24];
+ char SourceKey[COPY_CHUNK_RES_KEY_SIZE];
__le32 ChunkCount; /* we are only sending 1 */
__le32 Reserved;
/* array will only be one chunk long for us */
@@ -546,6 +553,12 @@ struct copychunk_ioctl {
__u32 Reserved2;
} __packed;
+struct copychunk_ioctl_rsp {
+ __le32 ChunksWritten;
+ __le32 ChunkBytesWritten;
+ __le32 TotalBytesWritten;
+} __packed;
+
/* Response and Request are the same format */
struct validate_negotiate_info {
__le32 Capabilities;
@@ -569,6 +582,10 @@ struct network_interface_info_ioctl_rsp {
#define NO_FILE_ID 0xFFFFFFFFFFFFFFFFULL /* general ioctls to srv not to file */
+struct compress_ioctl {
+ __le16 CompressionState; /* See cifspdu.h for possible flag values */
+} __packed;
+
struct smb2_ioctl_req {
struct smb2_hdr hdr;
__le16 StructureSize; /* Must be 57 */
@@ -584,7 +601,7 @@ struct smb2_ioctl_req {
__le32 MaxOutputResponse;
__le32 Flags;
__u32 Reserved2;
- char Buffer[0];
+ __u8 Buffer[0];
} __packed;
struct smb2_ioctl_rsp {
@@ -870,14 +887,16 @@ struct smb2_lease_ack {
/* File System Information Classes */
#define FS_VOLUME_INFORMATION 1 /* Query */
-#define FS_LABEL_INFORMATION 2 /* Set */
+#define FS_LABEL_INFORMATION 2 /* Local only */
#define FS_SIZE_INFORMATION 3 /* Query */
#define FS_DEVICE_INFORMATION 4 /* Query */
#define FS_ATTRIBUTE_INFORMATION 5 /* Query */
#define FS_CONTROL_INFORMATION 6 /* Query, Set */
#define FS_FULL_SIZE_INFORMATION 7 /* Query */
#define FS_OBJECT_ID_INFORMATION 8 /* Query, Set */
-#define FS_DRIVER_PATH_INFORMATION 9 /* Query */
+#define FS_DRIVER_PATH_INFORMATION 9 /* Local only */
+#define FS_VOLUME_FLAGS_INFORMATION 10 /* Local only */
+#define FS_SECTOR_SIZE_INFORMATION 11 /* SMB3 or later. Query */
struct smb2_fs_full_size_info {
__le64 TotalAllocationUnits;
@@ -887,6 +906,22 @@ struct smb2_fs_full_size_info {
__le32 BytesPerSector;
} __packed;
+#define SSINFO_FLAGS_ALIGNED_DEVICE 0x00000001
+#define SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE 0x00000002
+#define SSINFO_FLAGS_NO_SEEK_PENALTY 0x00000004
+#define SSINFO_FLAGS_TRIM_ENABLED 0x00000008
+
+/* sector size info struct */
+struct smb3_fs_ss_info {
+ __le32 LogicalBytesPerSector;
+ __le32 PhysicalBytesPerSectorForAtomicity;
+ __le32 PhysicalBytesPerSectorForPerf;
+ __le32 FileSystemEffectivePhysicalBytesPerSectorForAtomicity;
+ __le32 Flags;
+ __le32 ByteOffsetForSectorAlignment;
+ __le32 ByteOffsetForPartitionAlignment;
+} __packed;
+
/* partial list of QUERY INFO levels */
#define FILE_DIRECTORY_INFORMATION 1
#define FILE_FULL_DIRECTORY_INFORMATION 2
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index e3fb4801ee96..313813e4c19b 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -142,12 +142,16 @@ extern int SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon,
extern int SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
FILE_BASIC_INFO *buf);
+extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid);
extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
const u64 persistent_fid, const u64 volatile_fid,
const __u8 oplock_level);
extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
struct kstatfs *FSData);
+extern int SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_file_id, u64 volatile_file_id, int lvl);
extern int SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
const __u64 persist_fid, const __u64 volatile_fid,
const __u32 pid, const __u64 length, const __u64 offset,
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 340abca3aa52..59c748ce872f 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -466,7 +466,7 @@ smb2_verify_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
static inline void
smb2_seq_num_into_buf(struct TCP_Server_Info *server, struct smb2_hdr *hdr)
{
- hdr->MessageId = get_next_mid(server);
+ hdr->MessageId = get_next_mid64(server);
}
static struct mid_q_entry *
@@ -516,13 +516,19 @@ smb2_get_mid_entry(struct cifs_ses *ses, struct smb2_hdr *buf,
return -EAGAIN;
}
- if (ses->status != CifsGood) {
- /* check if SMB2 session is bad because we are setting it up */
+ if (ses->status == CifsNew) {
if ((buf->Command != SMB2_SESSION_SETUP) &&
(buf->Command != SMB2_NEGOTIATE))
return -EAGAIN;
/* else ok - we are setting up session */
}
+
+ if (ses->status == CifsExiting) {
+ if (buf->Command != SMB2_LOGOFF)
+ return -EAGAIN;
+ /* else ok - we are shutting down the session */
+ }
+
*mid = smb2_mid_entry_alloc(buf, ses->server);
if (*mid == NULL)
return -ENOMEM;
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 800b938e4061..b37570952846 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -58,7 +58,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
return temp;
else {
memset(temp, 0, sizeof(struct mid_q_entry));
- temp->mid = smb_buffer->Mid; /* always LE */
+ temp->mid = get_mid(smb_buffer);
temp->pid = current->pid;
temp->command = cpu_to_le16(smb_buffer->Command);
cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
@@ -431,13 +431,20 @@ static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
return -EAGAIN;
}
- if (ses->status != CifsGood) {
- /* check if SMB session is bad because we are setting it up */
+ if (ses->status == CifsNew) {
if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
(in_buf->Command != SMB_COM_NEGOTIATE))
return -EAGAIN;
/* else ok - we are setting up session */
}
+
+ if (ses->status == CifsExiting) {
+ /* check if SMB session is bad because we are setting it up */
+ if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
+ return -EAGAIN;
+ /* else ok - we are shutting down session */
+ }
+
*ppmidQ = AllocMidQEntry(in_buf, ses->server);
if (*ppmidQ == NULL)
return -ENOMEM;
diff --git a/fs/dcache.c b/fs/dcache.c
index 41000305d716..fb83a158ebcc 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -542,7 +542,7 @@ EXPORT_SYMBOL(d_drop);
* If ref is non-zero, then decrement the refcount too.
* Returns dentry requiring refcount drop, or NULL if we're done.
*/
-static inline struct dentry *
+static struct dentry *
dentry_kill(struct dentry *dentry, int unlock_on_failure)
__releases(dentry->d_lock)
{
@@ -630,7 +630,8 @@ repeat:
goto kill_it;
}
- dentry->d_flags |= DCACHE_REFERENCED;
+ if (!(dentry->d_flags & DCACHE_REFERENCED))
+ dentry->d_flags |= DCACHE_REFERENCED;
dentry_lru_add(dentry);
dentry->d_lockref.count--;
@@ -1331,14 +1332,6 @@ rename_retry:
* list is non-empty and continue searching.
*/
-/**
- * have_submounts - check for mounts over a dentry
- * @parent: dentry to check.
- *
- * Return true if the parent or its subdirectories contain
- * a mount point
- */
-
static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
{
int *ret = data;
@@ -1349,6 +1342,13 @@ static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
return D_WALK_CONTINUE;
}
+/**
+ * have_submounts - check for mounts over a dentry
+ * @parent: dentry to check.
+ *
+ * Return true if the parent or its subdirectories contain
+ * a mount point
+ */
int have_submounts(struct dentry *parent)
{
int ret = 0;
@@ -1801,6 +1801,32 @@ struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
EXPORT_SYMBOL(d_instantiate_unique);
+/**
+ * d_instantiate_no_diralias - instantiate a non-aliased dentry
+ * @entry: dentry to complete
+ * @inode: inode to attach to this dentry
+ *
+ * Fill in inode information in the entry. If a directory alias is found, then
+ * return an error. Together with d_materialise_unique() this guarantees that a
+ * directory inode may never have more than one alias.
+ */
+int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
+{
+ BUG_ON(!hlist_unhashed(&entry->d_alias));
+
+ spin_lock(&inode->i_lock);
+ if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
+ spin_unlock(&inode->i_lock);
+ return -EBUSY;
+ }
+ __d_instantiate(entry, inode);
+ spin_unlock(&inode->i_lock);
+ security_d_instantiate(entry, inode);
+
+ return 0;
+}
+EXPORT_SYMBOL(d_instantiate_no_diralias);
+
struct dentry *d_make_root(struct inode *root_inode)
{
struct dentry *res = NULL;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 0e04142d5962..e84f8e1fe4c2 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -127,6 +127,7 @@ struct dio {
spinlock_t bio_lock; /* protects BIO fields below */
int page_errors; /* errno from get_user_pages() */
int is_async; /* is IO async ? */
+ int should_dirty; /* should we mark read pages dirty? */
bool defer_completion; /* defer AIO completion to workqueue? */
int io_error; /* IO error in completion path */
unsigned long refcount; /* direct_io_worker() and bios */
@@ -375,7 +376,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
bio = bio_alloc(GFP_KERNEL, nr_vecs);
bio->bi_bdev = bdev;
- bio->bi_sector = first_sector;
+ bio->bi_iter.bi_sector = first_sector;
if (dio->is_async)
bio->bi_end_io = dio_bio_end_aio;
else
@@ -403,7 +404,7 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
dio->refcount++;
spin_unlock_irqrestore(&dio->bio_lock, flags);
- if (dio->is_async && dio->rw == READ)
+ if (dio->is_async && dio->rw == READ && dio->should_dirty)
bio_set_pages_dirty(bio);
if (sdio->submit_io)
@@ -474,13 +475,14 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
if (!uptodate)
dio->io_error = -EIO;
- if (dio->is_async && dio->rw == READ) {
+ if (dio->is_async && dio->rw == READ && dio->should_dirty) {
bio_check_pages_dirty(bio); /* transfers ownership */
} else {
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
- if (dio->rw == READ && !PageCompound(page))
+ if (dio->rw == READ && !PageCompound(page) &&
+ dio->should_dirty)
set_page_dirty_lock(page);
page_cache_release(page);
}
@@ -719,7 +721,7 @@ static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
if (sdio->bio) {
loff_t cur_offset = sdio->cur_page_fs_offset;
loff_t bio_next_offset = sdio->logical_offset_in_bio +
- sdio->bio->bi_size;
+ sdio->bio->bi_iter.bi_size;
/*
* See whether this new request is contiguous with the old.
@@ -1081,6 +1083,101 @@ static inline int drop_refcount(struct dio *dio)
return ret2;
}
+static ssize_t direct_IO_iovec(const struct iovec *iov, unsigned long nr_segs,
+ struct dio *dio, struct dio_submit *sdio,
+ unsigned blkbits, struct buffer_head *map_bh)
+{
+ size_t bytes;
+ ssize_t retval = 0;
+ int seg;
+ unsigned long user_addr;
+
+ for (seg = 0; seg < nr_segs; seg++) {
+ user_addr = (unsigned long)iov[seg].iov_base;
+ sdio->pages_in_io +=
+ ((user_addr + iov[seg].iov_len + PAGE_SIZE-1) /
+ PAGE_SIZE - user_addr / PAGE_SIZE);
+ }
+
+ dio->should_dirty = 1;
+
+ for (seg = 0; seg < nr_segs; seg++) {
+ user_addr = (unsigned long)iov[seg].iov_base;
+ sdio->size += bytes = iov[seg].iov_len;
+
+ /* Index into the first page of the first block */
+ sdio->first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits;
+ sdio->final_block_in_request = sdio->block_in_file +
+ (bytes >> blkbits);
+ /* Page fetching state */
+ sdio->head = 0;
+ sdio->tail = 0;
+ sdio->curr_page = 0;
+
+ sdio->total_pages = 0;
+ if (user_addr & (PAGE_SIZE-1)) {
+ sdio->total_pages++;
+ bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1));
+ }
+ sdio->total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
+ sdio->curr_user_address = user_addr;
+
+ retval = do_direct_IO(dio, sdio, map_bh);
+
+ dio->result += iov[seg].iov_len -
+ ((sdio->final_block_in_request - sdio->block_in_file) <<
+ blkbits);
+
+ if (retval) {
+ dio_cleanup(dio, sdio);
+ break;
+ }
+ } /* end iovec loop */
+
+ return retval;
+}
+
+static ssize_t direct_IO_bvec(struct bio_vec *bvec, unsigned long nr_segs,
+ struct dio *dio, struct dio_submit *sdio,
+ unsigned blkbits, struct buffer_head *map_bh)
+{
+ ssize_t retval = 0;
+ int seg;
+
+ sdio->pages_in_io += nr_segs;
+
+ for (seg = 0; seg < nr_segs; seg++) {
+ sdio->size += bvec[seg].bv_len;
+
+ /* Index into the first page of the first block */
+ sdio->first_block_in_page = bvec[seg].bv_offset >> blkbits;
+ sdio->final_block_in_request = sdio->block_in_file +
+ (bvec[seg].bv_len >> blkbits);
+ /* Page fetching state */
+ sdio->curr_page = 0;
+ page_cache_get(bvec[seg].bv_page);
+ dio->pages[0] = bvec[seg].bv_page;
+ sdio->head = 0;
+ sdio->tail = 1;
+
+ sdio->total_pages = 1;
+ sdio->curr_user_address = 0;
+
+ retval = do_direct_IO(dio, sdio, map_bh);
+
+ dio->result += bvec[seg].bv_len -
+ ((sdio->final_block_in_request - sdio->block_in_file) <<
+ blkbits);
+
+ if (retval) {
+ dio_cleanup(dio, sdio);
+ break;
+ }
+ }
+
+ return retval;
+}
+
/*
* This is a library function for use by filesystem drivers.
*
@@ -1108,9 +1205,9 @@ static inline int drop_refcount(struct dio *dio)
*/
static inline ssize_t
do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
- struct block_device *bdev, const struct iovec *iov, loff_t offset,
- unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
- dio_submit_t submit_io, int flags)
+ struct block_device *bdev, struct iov_iter *iter, loff_t offset,
+ get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io,
+ int flags)
{
int seg;
size_t size;
@@ -1122,10 +1219,9 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
loff_t end = offset;
struct dio *dio;
struct dio_submit sdio = { 0, };
- unsigned long user_addr;
- size_t bytes;
struct buffer_head map_bh = { 0, };
struct blk_plug plug;
+ unsigned long nr_segs = iter->nr_segs;
if (rw & WRITE)
rw = WRITE_ODIRECT;
@@ -1144,20 +1240,49 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
}
/* Check the memory alignment. Blocks cannot straddle pages */
- for (seg = 0; seg < nr_segs; seg++) {
- addr = (unsigned long)iov[seg].iov_base;
- size = iov[seg].iov_len;
- end += size;
- if (unlikely((addr & blocksize_mask) ||
- (size & blocksize_mask))) {
- if (bdev)
- blkbits = blksize_bits(
- bdev_logical_block_size(bdev));
- blocksize_mask = (1 << blkbits) - 1;
- if ((addr & blocksize_mask) || (size & blocksize_mask))
- goto out;
+ if (iov_iter_has_iovec(iter)) {
+ const struct iovec *iov = iov_iter_iovec(iter);
+
+ for (seg = 0; seg < nr_segs; seg++) {
+ addr = (unsigned long)iov[seg].iov_base;
+ size = iov[seg].iov_len;
+ end += size;
+ if (unlikely((addr & blocksize_mask) ||
+ (size & blocksize_mask))) {
+ if (bdev)
+ blkbits = blksize_bits(
+ bdev_logical_block_size(bdev));
+ blocksize_mask = (1 << blkbits) - 1;
+ if ((addr & blocksize_mask) ||
+ (size & blocksize_mask))
+ goto out;
+ }
}
- }
+ } else if (iov_iter_has_bvec(iter)) {
+ /*
+ * Is this necessary, or can we trust the in-kernel
+ * caller? Can we replace this with
+ * end += iov_iter_count(iter); ?
+ */
+ struct bio_vec *bvec = iov_iter_bvec(iter);
+
+ for (seg = 0; seg < nr_segs; seg++) {
+ addr = bvec[seg].bv_offset;
+ size = bvec[seg].bv_len;
+ end += size;
+ if (unlikely((addr & blocksize_mask) ||
+ (size & blocksize_mask))) {
+ if (bdev)
+ blkbits = blksize_bits(
+ bdev_logical_block_size(bdev));
+ blocksize_mask = (1 << blkbits) - 1;
+ if ((addr & blocksize_mask) ||
+ (size & blocksize_mask))
+ goto out;
+ }
+ }
+ } else
+ BUG();
/* watch out for a 0 len io from a tricksy fs */
if (rw == READ && end == offset)
@@ -1251,47 +1376,14 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
if (unlikely(sdio.blkfactor))
sdio.pages_in_io = 2;
- for (seg = 0; seg < nr_segs; seg++) {
- user_addr = (unsigned long)iov[seg].iov_base;
- sdio.pages_in_io +=
- ((user_addr + iov[seg].iov_len + PAGE_SIZE-1) /
- PAGE_SIZE - user_addr / PAGE_SIZE);
- }
-
blk_start_plug(&plug);
- for (seg = 0; seg < nr_segs; seg++) {
- user_addr = (unsigned long)iov[seg].iov_base;
- sdio.size += bytes = iov[seg].iov_len;
-
- /* Index into the first page of the first block */
- sdio.first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits;
- sdio.final_block_in_request = sdio.block_in_file +
- (bytes >> blkbits);
- /* Page fetching state */
- sdio.head = 0;
- sdio.tail = 0;
- sdio.curr_page = 0;
-
- sdio.total_pages = 0;
- if (user_addr & (PAGE_SIZE-1)) {
- sdio.total_pages++;
- bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1));
- }
- sdio.total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
- sdio.curr_user_address = user_addr;
-
- retval = do_direct_IO(dio, &sdio, &map_bh);
-
- dio->result += iov[seg].iov_len -
- ((sdio.final_block_in_request - sdio.block_in_file) <<
- blkbits);
-
- if (retval) {
- dio_cleanup(dio, &sdio);
- break;
- }
- } /* end iovec loop */
+ if (iov_iter_has_iovec(iter))
+ retval = direct_IO_iovec(iov_iter_iovec(iter), nr_segs, dio,
+ &sdio, blkbits, &map_bh);
+ else
+ retval = direct_IO_bvec(iov_iter_bvec(iter), nr_segs, dio,
+ &sdio, blkbits, &map_bh);
if (retval == -ENOTBLK) {
/*
@@ -1360,9 +1452,9 @@ out:
ssize_t
__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
- struct block_device *bdev, const struct iovec *iov, loff_t offset,
- unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
- dio_submit_t submit_io, int flags)
+ struct block_device *bdev, struct iov_iter *iter, loff_t offset,
+ get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io,
+ int flags)
{
/*
* The block device state is needed in the end to finally
@@ -1376,9 +1468,8 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
prefetch(bdev->bd_queue);
prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES);
- return do_blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
- nr_segs, get_block, end_io,
- submit_io, flags);
+ return do_blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset,
+ get_block, end_io, submit_io, flags);
}
EXPORT_SYMBOL(__blockdev_direct_IO);
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 88556dc0458e..d5abafd56a6d 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -706,9 +706,7 @@ static int lkb_idr_is_local(int id, void *p, void *data)
{
struct dlm_lkb *lkb = p;
- if (!lkb->lkb_nodeid)
- return 1;
- return 0;
+ return lkb->lkb_nodeid == 0 && lkb->lkb_grmode != DLM_LOCK_IV;
}
static int lkb_idr_is_any(int id, void *p, void *data)
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index c88e355f7635..000eae2782b6 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -408,7 +408,7 @@ static loff_t lower_offset_for_page(struct ecryptfs_crypt_stat *crypt_stat,
struct page *page)
{
return ecryptfs_lower_header_size(crypt_stat) +
- (page->index << PAGE_CACHE_SHIFT);
+ ((loff_t)page->index << PAGE_CACHE_SHIFT);
}
/**
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index bf12ba5dd223..4000f6b3a750 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -44,15 +44,15 @@
*/
static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
{
- struct dentry *lower_dentry;
- int rc = 1;
+ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+ int rc;
+
+ if (!(lower_dentry->d_flags & DCACHE_OP_REVALIDATE))
+ return 1;
if (flags & LOOKUP_RCU)
return -ECHILD;
- lower_dentry = ecryptfs_dentry_to_lower(dentry);
- if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate)
- goto out;
rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
if (dentry->d_inode) {
struct inode *lower_inode =
@@ -60,12 +60,17 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
fsstack_copy_attr_all(dentry->d_inode, lower_inode);
}
-out:
return rc;
}
struct kmem_cache *ecryptfs_dentry_info_cache;
+static void ecryptfs_dentry_free_rcu(struct rcu_head *head)
+{
+ kmem_cache_free(ecryptfs_dentry_info_cache,
+ container_of(head, struct ecryptfs_dentry_info, rcu));
+}
+
/**
* ecryptfs_d_release
* @dentry: The ecryptfs dentry
@@ -74,15 +79,11 @@ struct kmem_cache *ecryptfs_dentry_info_cache;
*/
static void ecryptfs_d_release(struct dentry *dentry)
{
- if (ecryptfs_dentry_to_private(dentry)) {
- if (ecryptfs_dentry_to_lower(dentry)) {
- dput(ecryptfs_dentry_to_lower(dentry));
- mntput(ecryptfs_dentry_to_lower_mnt(dentry));
- }
- kmem_cache_free(ecryptfs_dentry_info_cache,
- ecryptfs_dentry_to_private(dentry));
+ struct ecryptfs_dentry_info *p = dentry->d_fsdata;
+ if (p) {
+ path_put(&p->lower_path);
+ call_rcu(&p->rcu, ecryptfs_dentry_free_rcu);
}
- return;
}
const struct dentry_operations ecryptfs_dops = {
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index df19d34a033b..90d1882b306f 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -261,7 +261,10 @@ struct ecryptfs_inode_info {
* vfsmount too. */
struct ecryptfs_dentry_info {
struct path lower_path;
- struct ecryptfs_crypt_stat *crypt_stat;
+ union {
+ struct ecryptfs_crypt_stat *crypt_stat;
+ struct rcu_head rcu;
+ };
};
/**
@@ -512,13 +515,6 @@ ecryptfs_dentry_to_lower(struct dentry *dentry)
return ((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path.dentry;
}
-static inline void
-ecryptfs_set_dentry_lower(struct dentry *dentry, struct dentry *lower_dentry)
-{
- ((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path.dentry =
- lower_dentry;
-}
-
static inline struct vfsmount *
ecryptfs_dentry_to_lower_mnt(struct dentry *dentry)
{
@@ -531,13 +527,6 @@ ecryptfs_dentry_to_lower_path(struct dentry *dentry)
return &((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path;
}
-static inline void
-ecryptfs_set_dentry_lower_mnt(struct dentry *dentry, struct vfsmount *lower_mnt)
-{
- ((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path.mnt =
- lower_mnt;
-}
-
#define ecryptfs_printk(type, fmt, arg...) \
__ecryptfs_printk(type "%s: " fmt, __func__, ## arg);
__printf(1, 2)
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 992cf95830b5..3ed6e5f5bb4b 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -37,22 +37,21 @@
/**
* ecryptfs_read_update_atime
*
- * generic_file_read updates the atime of upper layer inode. But, it
+ * generic_file_read_iter updates the atime of upper layer inode. But, it
* doesn't give us a chance to update the atime of the lower layer
- * inode. This function is a wrapper to generic_file_read. It
- * updates the atime of the lower level inode if generic_file_read
+ * inode. This function is a wrapper to generic_file_read_iter. It
+ * updates the atime of the lower level inode if generic_file_read_iter
* returns without any errors. This is to be used only for file reads.
* The function to be used for directory reads is ecryptfs_read.
*/
static ssize_t ecryptfs_read_update_atime(struct kiocb *iocb,
- const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+ struct iov_iter *iter, loff_t pos)
{
ssize_t rc;
struct path *path;
struct file *file = iocb->ki_filp;
- rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
+ rc = generic_file_read_iter(iocb, iter, pos);
/*
* Even though this is a async interface, we need to wait
* for IO to finish to update atime
@@ -357,9 +356,9 @@ const struct file_operations ecryptfs_dir_fops = {
const struct file_operations ecryptfs_main_fops = {
.llseek = generic_file_llseek,
.read = do_sync_read,
- .aio_read = ecryptfs_read_update_atime,
+ .read_iter = ecryptfs_read_update_atime,
.write = do_sync_write,
- .aio_write = generic_file_aio_write,
+ .write_iter = generic_file_write_iter,
.iterate = ecryptfs_readdir,
.unlocked_ioctl = ecryptfs_unlocked_ioctl,
#ifdef CONFIG_COMPAT
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 67e9b6339691..0f9b66eaa767 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -361,8 +361,8 @@ static int ecryptfs_lookup_interpose(struct dentry *dentry,
BUG_ON(!d_count(lower_dentry));
ecryptfs_set_dentry_private(dentry, dentry_info);
- ecryptfs_set_dentry_lower(dentry, lower_dentry);
- ecryptfs_set_dentry_lower_mnt(dentry, lower_mnt);
+ dentry_info->lower_path.mnt = lower_mnt;
+ dentry_info->lower_path.dentry = lower_dentry;
if (!lower_dentry->d_inode) {
/* We want to add because we couldn't find in lower */
@@ -703,16 +703,6 @@ out:
return NULL;
}
-static void
-ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
-{
- char *buf = nd_get_link(nd);
- if (!IS_ERR(buf)) {
- /* Free the char* */
- kfree(buf);
- }
-}
-
/**
* upper_size_to_lower_size
* @crypt_stat: Crypt_stat associated with file
@@ -1121,7 +1111,7 @@ out:
const struct inode_operations ecryptfs_symlink_iops = {
.readlink = generic_readlink,
.follow_link = ecryptfs_follow_link,
- .put_link = ecryptfs_put_link,
+ .put_link = kfree_put_link,
.permission = ecryptfs_permission,
.setattr = ecryptfs_setattr,
.getattr = ecryptfs_getattr_link,
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 7d52806c2119..4725a07f003c 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -1149,7 +1149,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
struct ecryptfs_msg_ctx *msg_ctx;
struct ecryptfs_message *msg = NULL;
char *auth_tok_sig;
- char *payload;
+ char *payload = NULL;
size_t payload_len = 0;
int rc;
@@ -1203,6 +1203,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
}
out:
kfree(msg);
+ kfree(payload);
return rc;
}
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index eb1c5979ecaf..1b119d3bf924 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -585,8 +585,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
/* ->kill_sb() will take care of root_info */
ecryptfs_set_dentry_private(s->s_root, root_info);
- ecryptfs_set_dentry_lower(s->s_root, path.dentry);
- ecryptfs_set_dentry_lower_mnt(s->s_root, path.mnt);
+ root_info->lower_path = path;
s->s_flags |= MS_ACTIVE;
return dget(s->s_root);
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 473e09da7d02..810c28fb8c3c 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -34,7 +34,6 @@
#include <linux/mutex.h>
#include <linux/anon_inodes.h>
#include <linux/device.h>
-#include <linux/freezer.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/mman.h>
@@ -1605,8 +1604,7 @@ fetch_events:
}
spin_unlock_irqrestore(&ep->lock, flags);
- if (!freezable_schedule_hrtimeout_range(to, slack,
- HRTIMER_MODE_ABS))
+ if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
timed_out = 1;
spin_lock_irqsave(&ep->lock, flags);
diff --git a/fs/exec.c b/fs/exec.c
index 8875dd10ae7a..2ea437e5acf4 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1547,6 +1547,7 @@ static int do_execve_common(const char *filename,
current->fs->in_exec = 0;
current->in_execve = 0;
acct_update_integrals(current);
+ task_numa_free(current);
free_bprm(bprm);
if (displaced)
put_files_struct(displaced);
diff --git a/fs/exofs/file.c b/fs/exofs/file.c
index 491c6c078e7f..20564f8a358a 100644
--- a/fs/exofs/file.c
+++ b/fs/exofs/file.c
@@ -69,8 +69,8 @@ const struct file_operations exofs_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
- .aio_read = generic_file_aio_read,
- .aio_write = generic_file_aio_write,
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.open = generic_file_open,
.release = exofs_release_file,
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index a5b3a5db3120..6af043bab460 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -64,8 +64,8 @@ const struct file_operations ext2_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
- .aio_read = generic_file_aio_read,
- .aio_write = generic_file_aio_write,
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
.unlocked_ioctl = ext2_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ext2_compat_ioctl,
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index c260de6d7b6d..cf91b336e3df 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -848,18 +848,16 @@ static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
}
static ssize_t
-ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
- loff_t offset, unsigned long nr_segs)
+ext2_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
ssize_t ret;
- ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
- ext2_get_block);
+ ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, ext2_get_block);
if (ret < 0 && (rw & WRITE))
- ext2_write_failed(mapping, offset + iov_length(iov, nr_segs));
+ ext2_write_failed(mapping, offset + iov_iter_count(iter));
return ret;
}
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index bafdd48eefde..a331ad1c23f8 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -309,43 +309,17 @@ struct fname {
*/
static void free_rb_tree_fname(struct rb_root *root)
{
- struct rb_node *n = root->rb_node;
- struct rb_node *parent;
- struct fname *fname;
-
- while (n) {
- /* Do the node's children first */
- if (n->rb_left) {
- n = n->rb_left;
- continue;
- }
- if (n->rb_right) {
- n = n->rb_right;
- continue;
- }
- /*
- * The node has no children; free it, and then zero
- * out parent's link to it. Finally go to the
- * beginning of the loop and try to free the parent
- * node.
- */
- parent = rb_parent(n);
- fname = rb_entry(n, struct fname, rb_hash);
+ struct fname *fname, *next;
+
+ rbtree_postorder_for_each_entry_safe(fname, next, root, rb_hash)
while (fname) {
struct fname * old = fname;
fname = fname->next;
kfree (old);
}
- if (!parent)
- *root = RB_ROOT;
- else if (parent->rb_left == n)
- parent->rb_left = NULL;
- else if (parent->rb_right == n)
- parent->rb_right = NULL;
- n = parent;
- }
-}
+ *root = RB_ROOT;
+}
static struct dir_private_info *ext3_htree_create_dir_info(struct file *filp,
loff_t pos)
diff --git a/fs/ext3/file.c b/fs/ext3/file.c
index 25cb413277e9..a79677188b54 100644
--- a/fs/ext3/file.c
+++ b/fs/ext3/file.c
@@ -52,8 +52,8 @@ const struct file_operations ext3_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
- .aio_read = generic_file_aio_read,
- .aio_write = generic_file_aio_write,
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
.unlocked_ioctl = ext3_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ext3_compat_ioctl,
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 2bd85486b879..85bd13b8b758 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1862,8 +1862,7 @@ static int ext3_releasepage(struct page *page, gfp_t wait)
* VFS code falls back into buffered path in that case so we are safe.
*/
static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
- const struct iovec *iov, loff_t offset,
- unsigned long nr_segs)
+ struct iov_iter *iter, loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
@@ -1871,10 +1870,10 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
handle_t *handle;
ssize_t ret;
int orphan = 0;
- size_t count = iov_length(iov, nr_segs);
+ size_t count = iov_iter_count(iter);
int retries = 0;
- trace_ext3_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
+ trace_ext3_direct_IO_enter(inode, offset, count, rw);
if (rw == WRITE) {
loff_t final_size = offset + count;
@@ -1898,15 +1897,14 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
}
retry:
- ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
- ext3_get_block);
+ ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, ext3_get_block);
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);
- loff_t end = offset + iov_length(iov, nr_segs);
+ loff_t end = offset + count;
if (end > isize)
ext3_truncate_failed_direct_write(inode);
@@ -1949,8 +1947,7 @@ retry:
ret = err;
}
out:
- trace_ext3_direct_IO_exit(inode, offset,
- iov_length(iov, nr_segs), rw, ret);
+ trace_ext3_direct_IO_exit(inode, offset, count, rw, ret);
return ret;
}
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index c50c76190373..37fd31ed16e7 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -2825,6 +2825,10 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
* bitmap, and an inode table.
*/
overhead += ngroups * (2 + sbi->s_itb_per_group);
+
+ /* Add the journal blocks as well */
+ overhead += sbi->s_journal->j_maxlen;
+
sbi->s_overhead_last = overhead;
smp_wmb();
sbi->s_blocks_last = le32_to_cpu(es->s_blocks_count);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index af815ea9d7cc..850bf979beb0 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -29,6 +29,7 @@
#include <linux/wait.h>
#include <linux/blockgroup_lock.h>
#include <linux/percpu_counter.h>
+#include <linux/ratelimit.h>
#include <crypto/hash.h>
#ifdef __KERNEL__
#include <linux/compat.h>
@@ -1314,6 +1315,11 @@ struct ext4_sb_info {
unsigned long s_es_last_sorted;
struct percpu_counter s_extent_cache_cnt;
spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
+
+ /* Ratelimit ext4 messages. */
+ struct ratelimit_state s_err_ratelimit_state;
+ struct ratelimit_state s_warning_ratelimit_state;
+ struct ratelimit_state s_msg_ratelimit_state;
};
static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -2117,8 +2123,7 @@ extern void ext4_da_update_reserve_space(struct inode *inode,
extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags);
extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
- const struct iovec *iov, loff_t offset,
- unsigned long nr_segs);
+ struct iov_iter *iter, loff_t offset);
extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks);
extern void ext4_ind_truncate(handle_t *, struct inode *inode);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 3da21945ff1f..2ab3dcb741df 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -74,12 +74,11 @@ void ext4_unwritten_wait(struct inode *inode)
* or one thread will zero the other's data, causing corruption.
*/
static int
-ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+ext4_unaligned_aio(struct inode *inode, struct iov_iter *iter, loff_t pos)
{
struct super_block *sb = inode->i_sb;
int blockmask = sb->s_blocksize - 1;
- size_t count = iov_length(iov, nr_segs);
+ size_t count = iov_iter_count(iter);
loff_t final_size = pos + count;
if (pos >= inode->i_size)
@@ -92,8 +91,8 @@ ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
}
static ssize_t
-ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+ext4_file_dio_write(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t pos)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
@@ -101,11 +100,11 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
int unaligned_aio = 0;
ssize_t ret;
int overwrite = 0;
- size_t length = iov_length(iov, nr_segs);
+ size_t length = iov_iter_count(iter);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
!is_sync_kiocb(iocb))
- unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
+ unaligned_aio = ext4_unaligned_aio(inode, iter, pos);
/* Unaligned direct AIO must be serialized; see comment above */
if (unaligned_aio) {
@@ -146,7 +145,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
overwrite = 1;
}
- ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+ ret = __generic_file_write_iter(iocb, iter, &iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
if (ret > 0) {
@@ -165,8 +164,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
}
static ssize_t
-ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
{
struct inode *inode = file_inode(iocb->ki_filp);
ssize_t ret;
@@ -178,22 +176,24 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- size_t length = iov_length(iov, nr_segs);
+ size_t length = iov_iter_count(iter);
if ((pos > sbi->s_bitmap_maxbytes ||
(pos == sbi->s_bitmap_maxbytes && length > 0)))
return -EFBIG;
if (pos + length > sbi->s_bitmap_maxbytes) {
- nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
- sbi->s_bitmap_maxbytes - pos);
+ ret = iov_iter_shorten(iter,
+ sbi->s_bitmap_maxbytes - pos);
+ if (ret)
+ return ret;
}
}
if (unlikely(iocb->ki_filp->f_flags & O_DIRECT))
- ret = ext4_file_dio_write(iocb, iov, nr_segs, pos);
+ ret = ext4_file_dio_write(iocb, iter, pos);
else
- ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
+ ret = generic_file_write_iter(iocb, iter, pos);
return ret;
}
@@ -594,8 +594,8 @@ const struct file_operations ext4_file_operations = {
.llseek = ext4_llseek,
.read = do_sync_read,
.write = do_sync_write,
- .aio_read = generic_file_aio_read,
- .aio_write = ext4_file_write,
+ .read_iter = generic_file_read_iter,
+ .write_iter = ext4_file_write_iter,
.unlocked_ioctl = ext4_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ext4_compat_ioctl,
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 594009f5f523..8026469aa1fb 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -639,8 +639,7 @@ out:
* VFS code falls back into buffered path in that case so we are safe.
*/
ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
- const struct iovec *iov, loff_t offset,
- unsigned long nr_segs)
+ struct iov_iter *iter, loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
@@ -648,7 +647,7 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
handle_t *handle;
ssize_t ret;
int orphan = 0;
- size_t count = iov_length(iov, nr_segs);
+ size_t count = iov_iter_count(iter);
int retries = 0;
if (rw == WRITE) {
@@ -687,18 +686,17 @@ retry:
goto locked;
}
ret = __blockdev_direct_IO(rw, iocb, inode,
- inode->i_sb->s_bdev, iov,
- offset, nr_segs,
- ext4_get_block, NULL, NULL, 0);
+ inode->i_sb->s_bdev, iter,
+ offset, ext4_get_block, NULL, NULL, 0);
inode_dio_done(inode);
} else {
locked:
- ret = blockdev_direct_IO(rw, iocb, inode, iov,
- offset, nr_segs, ext4_get_block);
+ ret = blockdev_direct_IO(rw, iocb, inode, iter,
+ offset, ext4_get_block);
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);
- loff_t end = offset + iov_length(iov, nr_segs);
+ loff_t end = offset + iov_iter_count(iter);
if (end > isize)
ext4_truncate_failed_write(inode);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index e274e9c1171f..05599cd23a1b 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2178,6 +2178,9 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
*
* @handle - handle for journal operations
* @mpd - extent to map
+ * @give_up_on_write - we set this to true iff there is a fatal error and there
+ * is no hope of writing the data. The caller should discard
+ * dirty pages to avoid infinite loops.
*
* The function maps extent starting at mpd->lblk of length mpd->len. If it is
* delayed, blocks are allocated, if it is unwritten, we may need to convert
@@ -2295,6 +2298,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
struct address_space *mapping = mpd->inode->i_mapping;
struct pagevec pvec;
unsigned int nr_pages;
+ long left = mpd->wbc->nr_to_write;
pgoff_t index = mpd->first_page;
pgoff_t end = mpd->last_page;
int tag;
@@ -2330,6 +2334,17 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
if (page->index > end)
goto out;
+ /*
+ * Accumulated enough dirty pages? This doesn't apply
+ * to WB_SYNC_ALL mode. For integrity sync we have to
+ * keep going because someone may be concurrently
+ * dirtying pages, and we might have synced a lot of
+ * newly appeared dirty pages, but have not synced all
+ * of the old dirty pages.
+ */
+ if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
+ goto out;
+
/* If we can't merge this page, we are done. */
if (mpd->map.m_len > 0 && mpd->next_page != page->index)
goto out;
@@ -2364,19 +2379,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
if (err <= 0)
goto out;
err = 0;
-
- /*
- * Accumulated enough dirty pages? This doesn't apply
- * to WB_SYNC_ALL mode. For integrity sync we have to
- * keep going because someone may be concurrently
- * dirtying pages, and we might have synced a lot of
- * newly appeared dirty pages, but have not synced all
- * of the old dirty pages.
- */
- if (mpd->wbc->sync_mode == WB_SYNC_NONE &&
- mpd->next_page - mpd->first_page >=
- mpd->wbc->nr_to_write)
- goto out;
+ left--;
}
pagevec_release(&pvec);
cond_resched();
@@ -3067,13 +3070,12 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
*
*/
static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
- const struct iovec *iov, loff_t offset,
- unsigned long nr_segs)
+ struct iov_iter *iter, loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
ssize_t ret;
- size_t count = iov_length(iov, nr_segs);
+ size_t count = iov_iter_count(iter);
int overwrite = 0;
get_block_t *get_block_func = NULL;
int dio_flags = 0;
@@ -3082,7 +3084,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
/* Use the old path for reads and writes beyond i_size. */
if (rw != WRITE || final_size > inode->i_size)
- return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
+ return ext4_ind_direct_IO(rw, iocb, iter, offset);
BUG_ON(iocb->private == NULL);
@@ -3149,8 +3151,8 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
dio_flags = DIO_LOCKING;
}
ret = __blockdev_direct_IO(rw, iocb, inode,
- inode->i_sb->s_bdev, iov,
- offset, nr_segs,
+ inode->i_sb->s_bdev, iter,
+ offset,
get_block_func,
ext4_end_io_dio,
NULL,
@@ -3204,8 +3206,7 @@ retake_lock:
}
static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
- const struct iovec *iov, loff_t offset,
- unsigned long nr_segs)
+ struct iov_iter *iter, loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
@@ -3221,13 +3222,12 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
if (ext4_has_inline_data(inode))
return 0;
- trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
+ trace_ext4_direct_IO_enter(inode, offset, iov_iter_count(iter), rw);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
- ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
+ ret = ext4_ext_direct_IO(rw, iocb, iter, offset);
else
- ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
- trace_ext4_direct_IO_exit(inode, offset,
- iov_length(iov, nr_segs), rw, ret);
+ ret = ext4_ind_direct_IO(rw, iocb, iter, offset);
+ trace_ext4_direct_IO_exit(inode, offset, iov_iter_count(iter), rw, ret);
return ret;
}
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index d7d0c7b46ed4..c1aa7a8e8c47 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -197,14 +197,15 @@ static void dump_completed_IO(struct inode *inode, struct list_head *head)
static void ext4_add_complete_io(ext4_io_end_t *io_end)
{
struct ext4_inode_info *ei = EXT4_I(io_end->inode);
+ struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
struct workqueue_struct *wq;
unsigned long flags;
/* Only reserved conversions from writeback should enter here */
WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
- WARN_ON(!io_end->handle);
+ WARN_ON(!io_end->handle && sbi->s_journal);
spin_lock_irqsave(&ei->i_completed_io_lock, flags);
- wq = EXT4_SB(io_end->inode->i_sb)->rsv_conversion_wq;
+ wq = sbi->rsv_conversion_wq;
if (list_empty(&ei->i_rsv_conversion_list))
queue_work(wq, &ei->i_rsv_conversion_work);
list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
@@ -297,7 +298,7 @@ ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
static void ext4_end_bio(struct bio *bio, int error)
{
ext4_io_end_t *io_end = bio->bi_private;
- sector_t bi_sector = bio->bi_sector;
+ sector_t bi_sector = bio->bi_iter.bi_sector;
BUG_ON(!io_end);
bio->bi_end_io = NULL;
@@ -365,7 +366,7 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
if (!bio)
return -ENOMEM;
- bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+ bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_bdev = bh->b_bdev;
bio->bi_end_io = ext4_end_bio;
bio->bi_private = ext4_get_io_end(io->io_end);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 2c2e6cbc6bed..d3a857bfae47 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -411,20 +411,26 @@ static void ext4_handle_error(struct super_block *sb)
sb->s_id);
}
+#define ext4_error_ratelimit(sb) \
+ ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state), \
+ "EXT4-fs error")
+
void __ext4_error(struct super_block *sb, const char *function,
unsigned int line, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
- va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
- printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
- sb->s_id, function, line, current->comm, &vaf);
- va_end(args);
+ if (ext4_error_ratelimit(sb)) {
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ printk(KERN_CRIT
+ "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
+ sb->s_id, function, line, current->comm, &vaf);
+ va_end(args);
+ }
save_error_info(sb, function, line);
-
ext4_handle_error(sb);
}
@@ -438,22 +444,23 @@ void __ext4_error_inode(struct inode *inode, const char *function,
es->s_last_error_ino = cpu_to_le32(inode->i_ino);
es->s_last_error_block = cpu_to_le64(block);
+ if (ext4_error_ratelimit(inode->i_sb)) {
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ if (block)
+ printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
+ "inode #%lu: block %llu: comm %s: %pV\n",
+ inode->i_sb->s_id, function, line, inode->i_ino,
+ block, current->comm, &vaf);
+ else
+ printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
+ "inode #%lu: comm %s: %pV\n",
+ inode->i_sb->s_id, function, line, inode->i_ino,
+ current->comm, &vaf);
+ va_end(args);
+ }
save_error_info(inode->i_sb, function, line);
- va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
- if (block)
- printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
- "inode #%lu: block %llu: comm %s: %pV\n",
- inode->i_sb->s_id, function, line, inode->i_ino,
- block, current->comm, &vaf);
- else
- printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
- "inode #%lu: comm %s: %pV\n",
- inode->i_sb->s_id, function, line, inode->i_ino,
- current->comm, &vaf);
- va_end(args);
-
ext4_handle_error(inode->i_sb);
}
@@ -469,27 +476,28 @@ void __ext4_error_file(struct file *file, const char *function,
es = EXT4_SB(inode->i_sb)->s_es;
es->s_last_error_ino = cpu_to_le32(inode->i_ino);
+ if (ext4_error_ratelimit(inode->i_sb)) {
+ path = d_path(&(file->f_path), pathname, sizeof(pathname));
+ if (IS_ERR(path))
+ path = "(unknown)";
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ if (block)
+ printk(KERN_CRIT
+ "EXT4-fs error (device %s): %s:%d: inode #%lu: "
+ "block %llu: comm %s: path %s: %pV\n",
+ inode->i_sb->s_id, function, line, inode->i_ino,
+ block, current->comm, path, &vaf);
+ else
+ printk(KERN_CRIT
+ "EXT4-fs error (device %s): %s:%d: inode #%lu: "
+ "comm %s: path %s: %pV\n",
+ inode->i_sb->s_id, function, line, inode->i_ino,
+ current->comm, path, &vaf);
+ va_end(args);
+ }
save_error_info(inode->i_sb, function, line);
- path = d_path(&(file->f_path), pathname, sizeof(pathname));
- if (IS_ERR(path))
- path = "(unknown)";
- va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
- if (block)
- printk(KERN_CRIT
- "EXT4-fs error (device %s): %s:%d: inode #%lu: "
- "block %llu: comm %s: path %s: %pV\n",
- inode->i_sb->s_id, function, line, inode->i_ino,
- block, current->comm, path, &vaf);
- else
- printk(KERN_CRIT
- "EXT4-fs error (device %s): %s:%d: inode #%lu: "
- "comm %s: path %s: %pV\n",
- inode->i_sb->s_id, function, line, inode->i_ino,
- current->comm, path, &vaf);
- va_end(args);
-
ext4_handle_error(inode->i_sb);
}
@@ -543,11 +551,13 @@ void __ext4_std_error(struct super_block *sb, const char *function,
(sb->s_flags & MS_RDONLY))
return;
- errstr = ext4_decode_error(sb, errno, nbuf);
- printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
- sb->s_id, function, line, errstr);
- save_error_info(sb, function, line);
+ if (ext4_error_ratelimit(sb)) {
+ errstr = ext4_decode_error(sb, errno, nbuf);
+ printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
+ sb->s_id, function, line, errstr);
+ }
+ save_error_info(sb, function, line);
ext4_handle_error(sb);
}
@@ -597,6 +607,9 @@ void __ext4_msg(struct super_block *sb,
struct va_format vaf;
va_list args;
+ if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
+ return;
+
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
@@ -610,6 +623,10 @@ void __ext4_warning(struct super_block *sb, const char *function,
struct va_format vaf;
va_list args;
+ if (!___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),
+ "EXT4-fs warning"))
+ return;
+
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
@@ -633,18 +650,20 @@ __acquires(bitlock)
es->s_last_error_block = cpu_to_le64(block);
__save_error_info(sb, function, line);
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
- printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
- sb->s_id, function, line, grp);
- if (ino)
- printk(KERN_CONT "inode %lu: ", ino);
- if (block)
- printk(KERN_CONT "block %llu:", (unsigned long long) block);
- printk(KERN_CONT "%pV\n", &vaf);
- va_end(args);
+ if (ext4_error_ratelimit(sb)) {
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
+ sb->s_id, function, line, grp);
+ if (ino)
+ printk(KERN_CONT "inode %lu: ", ino);
+ if (block)
+ printk(KERN_CONT "block %llu:",
+ (unsigned long long) block);
+ printk(KERN_CONT "%pV\n", &vaf);
+ va_end(args);
+ }
if (test_opt(sb, ERRORS_CONT)) {
ext4_commit_super(sb, 0);
@@ -2606,6 +2625,12 @@ EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
EXT4_DEPRECATED_ATTR(max_writeback_mb_bump, 128);
EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
EXT4_ATTR(trigger_fs_error, 0200, NULL, trigger_test_error);
+EXT4_RW_ATTR_SBI_UI(err_ratelimit_interval_ms, s_err_ratelimit_state.interval);
+EXT4_RW_ATTR_SBI_UI(err_ratelimit_burst, s_err_ratelimit_state.burst);
+EXT4_RW_ATTR_SBI_UI(warning_ratelimit_interval_ms, s_warning_ratelimit_state.interval);
+EXT4_RW_ATTR_SBI_UI(warning_ratelimit_burst, s_warning_ratelimit_state.burst);
+EXT4_RW_ATTR_SBI_UI(msg_ratelimit_interval_ms, s_msg_ratelimit_state.interval);
+EXT4_RW_ATTR_SBI_UI(msg_ratelimit_burst, s_msg_ratelimit_state.burst);
static struct attribute *ext4_attrs[] = {
ATTR_LIST(delayed_allocation_blocks),
@@ -2623,6 +2648,12 @@ static struct attribute *ext4_attrs[] = {
ATTR_LIST(max_writeback_mb_bump),
ATTR_LIST(extent_max_zeroout_kb),
ATTR_LIST(trigger_fs_error),
+ ATTR_LIST(err_ratelimit_interval_ms),
+ ATTR_LIST(err_ratelimit_burst),
+ ATTR_LIST(warning_ratelimit_interval_ms),
+ ATTR_LIST(warning_ratelimit_burst),
+ ATTR_LIST(msg_ratelimit_interval_ms),
+ ATTR_LIST(msg_ratelimit_burst),
NULL,
};
@@ -4118,6 +4149,11 @@ no_journal:
if (es->s_error_count)
mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
+ /* Enable message ratelimiting. Default is 10 messages per 5 secs. */
+ ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
+ ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
+ ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
+
kfree(orig_data);
return 0;
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index e06e0995e00f..214fe1054fce 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -63,3 +63,11 @@ config F2FS_FS_SECURITY
the extended attribute support in advance.
If you are not using a security module, say N.
+
+config F2FS_CHECK_FS
+ bool "F2FS consistency checking feature"
+ depends on F2FS_FS
+ help
+ Enables BUG_ONs which check the file system consistency in runtime.
+
+ If you want to improve the performance, say N.
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index b7826ec1b470..d0fc287efeff 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -205,7 +205,8 @@ struct posix_acl *f2fs_get_acl(struct inode *inode, int type)
return acl;
}
-static int f2fs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
+static int f2fs_set_acl(struct inode *inode, int type,
+ struct posix_acl *acl, struct page *ipage)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
struct f2fs_inode_info *fi = F2FS_I(inode);
@@ -250,7 +251,7 @@ static int f2fs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
}
}
- error = f2fs_setxattr(inode, name_index, "", value, size, NULL);
+ error = f2fs_setxattr(inode, name_index, "", value, size, ipage);
kfree(value);
if (!error)
@@ -260,10 +261,10 @@ static int f2fs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
return error;
}
-int f2fs_init_acl(struct inode *inode, struct inode *dir)
+int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage)
{
- struct posix_acl *acl = NULL;
struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
+ struct posix_acl *acl = NULL;
int error = 0;
if (!S_ISLNK(inode->i_mode)) {
@@ -276,19 +277,19 @@ int f2fs_init_acl(struct inode *inode, struct inode *dir)
inode->i_mode &= ~current_umask();
}
- if (test_opt(sbi, POSIX_ACL) && acl) {
+ if (!test_opt(sbi, POSIX_ACL) || !acl)
+ goto cleanup;
- if (S_ISDIR(inode->i_mode)) {
- error = f2fs_set_acl(inode, ACL_TYPE_DEFAULT, acl);
- if (error)
- goto cleanup;
- }
- error = posix_acl_create(&acl, GFP_KERNEL, &inode->i_mode);
- if (error < 0)
- return error;
- if (error > 0)
- error = f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl);
+ if (S_ISDIR(inode->i_mode)) {
+ error = f2fs_set_acl(inode, ACL_TYPE_DEFAULT, acl, ipage);
+ if (error)
+ goto cleanup;
}
+ error = posix_acl_create(&acl, GFP_KERNEL, &inode->i_mode);
+ if (error < 0)
+ return error;
+ if (error > 0)
+ error = f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl, ipage);
cleanup:
posix_acl_release(acl);
return error;
@@ -313,7 +314,8 @@ int f2fs_acl_chmod(struct inode *inode)
error = posix_acl_chmod(&acl, GFP_KERNEL, mode);
if (error)
return error;
- error = f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl);
+
+ error = f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl, NULL);
posix_acl_release(acl);
return error;
}
@@ -388,7 +390,7 @@ static int f2fs_xattr_set_acl(struct dentry *dentry, const char *name,
acl = NULL;
}
- error = f2fs_set_acl(inode, type, acl);
+ error = f2fs_set_acl(inode, type, acl, NULL);
release_and_out:
posix_acl_release(acl);
diff --git a/fs/f2fs/acl.h b/fs/f2fs/acl.h
index 80f430674417..49633131e038 100644
--- a/fs/f2fs/acl.h
+++ b/fs/f2fs/acl.h
@@ -36,9 +36,9 @@ struct f2fs_acl_header {
#ifdef CONFIG_F2FS_FS_POSIX_ACL
-extern struct posix_acl *f2fs_get_acl(struct inode *inode, int type);
-extern int f2fs_acl_chmod(struct inode *inode);
-extern int f2fs_init_acl(struct inode *inode, struct inode *dir);
+extern struct posix_acl *f2fs_get_acl(struct inode *, int);
+extern int f2fs_acl_chmod(struct inode *);
+extern int f2fs_init_acl(struct inode *, struct inode *, struct page *);
#else
#define f2fs_check_acl NULL
#define f2fs_get_acl NULL
@@ -49,7 +49,8 @@ static inline int f2fs_acl_chmod(struct inode *inode)
return 0;
}
-static inline int f2fs_init_acl(struct inode *inode, struct inode *dir)
+static inline int f2fs_init_acl(struct inode *inode, struct inode *dir,
+ struct page *page)
{
return 0;
}
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index bb312201ca95..d430157ffe60 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -81,7 +81,7 @@ static int f2fs_write_meta_page(struct page *page,
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
/* Should not write any meta pages, if any IO error was occurred */
- if (wbc->for_reclaim ||
+ if (wbc->for_reclaim || sbi->por_doing ||
is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)) {
dec_page_count(sbi, F2FS_DIRTY_META);
wbc->pages_skipped++;
@@ -142,8 +142,8 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
lock_page(page);
- BUG_ON(page->mapping != mapping);
- BUG_ON(!PageDirty(page));
+ f2fs_bug_on(page->mapping != mapping);
+ f2fs_bug_on(!PageDirty(page));
clear_page_dirty_for_io(page);
if (f2fs_write_meta_page(page, &wbc)) {
unlock_page(page);
@@ -167,6 +167,8 @@ static int f2fs_set_meta_page_dirty(struct page *page)
struct address_space *mapping = page->mapping;
struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
+ trace_f2fs_set_page_dirty(page, META);
+
SetPageUptodate(page);
if (!PageDirty(page)) {
__set_page_dirty_nobuffers(page);
@@ -206,6 +208,7 @@ int acquire_orphan_inode(struct f2fs_sb_info *sbi)
void release_orphan_inode(struct f2fs_sb_info *sbi)
{
mutex_lock(&sbi->orphan_inode_mutex);
+ f2fs_bug_on(sbi->n_orphans == 0);
sbi->n_orphans--;
mutex_unlock(&sbi->orphan_inode_mutex);
}
@@ -225,12 +228,8 @@ void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
break;
orphan = NULL;
}
-retry:
- new = kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC);
- if (!new) {
- cond_resched();
- goto retry;
- }
+
+ new = f2fs_kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC);
new->ino = ino;
/* add new_oentry into list which is sorted by inode number */
@@ -253,6 +252,7 @@ void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
if (orphan->ino == ino) {
list_del(&orphan->list);
kmem_cache_free(orphan_entry_slab, orphan);
+ f2fs_bug_on(sbi->n_orphans == 0);
sbi->n_orphans--;
break;
}
@@ -263,7 +263,7 @@ void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
{
struct inode *inode = f2fs_iget(sbi->sb, ino);
- BUG_ON(IS_ERR(inode));
+ f2fs_bug_on(IS_ERR(inode));
clear_nlink(inode);
/* truncate all the data during iput */
@@ -277,7 +277,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
return 0;
- sbi->por_doing = 1;
+ sbi->por_doing = true;
start_blk = __start_cp_addr(sbi) + 1;
orphan_blkaddr = __start_sum_addr(sbi) - 1;
@@ -294,7 +294,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
}
/* clear Orphan Flag */
clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
- sbi->por_doing = 0;
+ sbi->por_doing = false;
return 0;
}
@@ -469,9 +469,7 @@ static int __add_dirty_inode(struct inode *inode, struct dir_inode_entry *new)
return -EEXIST;
}
list_add_tail(&new->list, head);
-#ifdef CONFIG_F2FS_STAT_FS
- sbi->n_dirty_dirs++;
-#endif
+ stat_inc_dirty_dir(sbi);
return 0;
}
@@ -482,12 +480,8 @@ void set_dirty_dir_page(struct inode *inode, struct page *page)
if (!S_ISDIR(inode->i_mode))
return;
-retry:
- new = kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
- if (!new) {
- cond_resched();
- goto retry;
- }
+
+ new = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
new->inode = inode;
INIT_LIST_HEAD(&new->list);
@@ -504,13 +498,9 @@ retry:
void add_dirty_dir_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
- struct dir_inode_entry *new;
-retry:
- new = kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
- if (!new) {
- cond_resched();
- goto retry;
- }
+ struct dir_inode_entry *new =
+ f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
+
new->inode = inode;
INIT_LIST_HEAD(&new->list);
@@ -541,9 +531,7 @@ void remove_dirty_dir_inode(struct inode *inode)
if (entry->inode == inode) {
list_del(&entry->list);
kmem_cache_free(inode_entry_slab, entry);
-#ifdef CONFIG_F2FS_STAT_FS
- sbi->n_dirty_dirs--;
-#endif
+ stat_dec_dirty_dir(sbi);
break;
}
}
@@ -617,11 +605,10 @@ static void block_operations(struct f2fs_sb_info *sbi)
blk_start_plug(&plug);
retry_flush_dents:
- mutex_lock_all(sbi);
-
+ f2fs_lock_all(sbi);
/* write all the dirty dentry pages */
if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
- mutex_unlock_all(sbi);
+ f2fs_unlock_all(sbi);
sync_dirty_dir_inodes(sbi);
goto retry_flush_dents;
}
@@ -644,7 +631,7 @@ retry_flush_nodes:
static void unblock_operations(struct f2fs_sb_info *sbi)
{
mutex_unlock(&sbi->node_write);
- mutex_unlock_all(sbi);
+ f2fs_unlock_all(sbi);
}
static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
@@ -756,8 +743,15 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
f2fs_put_page(cp_page, 1);
/* wait for previous submitted node/meta pages writeback */
- while (get_pages(sbi, F2FS_WRITEBACK))
- congestion_wait(BLK_RW_ASYNC, HZ / 50);
+ sbi->cp_task = current;
+ while (get_pages(sbi, F2FS_WRITEBACK)) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (!get_pages(sbi, F2FS_WRITEBACK))
+ break;
+ io_schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+ sbi->cp_task = NULL;
filemap_fdatawait_range(sbi->node_inode->i_mapping, 0, LONG_MAX);
filemap_fdatawait_range(sbi->meta_inode->i_mapping, 0, LONG_MAX);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 941f9b9ca3a5..35a2ccd098f3 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -68,9 +68,6 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
struct buffer_head *bh_result)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
-#ifdef CONFIG_F2FS_STAT_FS
- struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-#endif
pgoff_t start_fofs, end_fofs;
block_t start_blkaddr;
@@ -80,9 +77,8 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
return 0;
}
-#ifdef CONFIG_F2FS_STAT_FS
- sbi->total_hit_ext++;
-#endif
+ stat_inc_total_hit(inode->i_sb);
+
start_fofs = fi->ext.fofs;
end_fofs = fi->ext.fofs + fi->ext.len - 1;
start_blkaddr = fi->ext.blk_addr;
@@ -100,9 +96,7 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
else
bh_result->b_size = UINT_MAX;
-#ifdef CONFIG_F2FS_STAT_FS
- sbi->read_hit_ext++;
-#endif
+ stat_inc_read_hit(inode->i_sb);
read_unlock(&fi->ext.ext_lock);
return 1;
}
@@ -116,7 +110,7 @@ void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
pgoff_t fofs, start_fofs, end_fofs;
block_t start_blkaddr, end_blkaddr;
- BUG_ON(blk_addr == NEW_ADDR);
+ f2fs_bug_on(blk_addr == NEW_ADDR);
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
dn->ofs_in_node;
@@ -395,7 +389,7 @@ int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
bio = f2fs_bio_alloc(bdev, 1);
/* Initialize the bio */
- bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
+ bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
bio->bi_end_io = read_end_io;
if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
@@ -442,7 +436,7 @@ static int get_data_block_ro(struct inode *inode, sector_t iblock,
}
/* It does not support data allocation */
- BUG_ON(create);
+ f2fs_bug_on(create);
if (dn.data_blkaddr != NEW_ADDR && dn.data_blkaddr != NULL_ADDR) {
int i;
@@ -560,9 +554,9 @@ write:
inode_dec_dirty_dents(inode);
err = do_write_data_page(page);
} else {
- int ilock = mutex_lock_op(sbi);
+ f2fs_lock_op(sbi);
err = do_write_data_page(page);
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
need_balance_fs = true;
}
if (err == -ENOENT)
@@ -641,7 +635,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
struct dnode_of_data dn;
int err = 0;
- int ilock;
f2fs_balance_fs(sbi);
repeat:
@@ -650,7 +643,7 @@ repeat:
return -ENOMEM;
*pagep = page;
- ilock = mutex_lock_op(sbi);
+ f2fs_lock_op(sbi);
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, index, ALLOC_NODE);
@@ -664,7 +657,7 @@ repeat:
if (err)
goto err;
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
return 0;
@@ -700,7 +693,7 @@ out:
return 0;
err:
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
f2fs_put_page(page, 1);
return err;
}
@@ -727,7 +720,7 @@ static int f2fs_write_end(struct file *file,
}
static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
- const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+ struct iov_iter *iter, loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
@@ -736,7 +729,7 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
return 0;
/* Needs synchronization with the cleaner */
- return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
+ return blockdev_direct_IO(rw, iocb, inode, iter, offset,
get_data_block_ro);
}
@@ -763,6 +756,8 @@ static int f2fs_set_data_page_dirty(struct page *page)
struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
+ trace_f2fs_set_page_dirty(page, DATA);
+
SetPageUptodate(page);
if (!PageDirty(page)) {
__set_page_dirty_nobuffers(page);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 384c6daf9a89..594fc1bb64ef 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -139,7 +139,7 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
bool room = false;
int max_slots = 0;
- BUG_ON(level > MAX_DIR_HASH_DEPTH);
+ f2fs_bug_on(level > MAX_DIR_HASH_DEPTH);
nbucket = dir_buckets(level);
nblock = bucket_blocks(level);
@@ -346,7 +346,7 @@ static struct page *init_inode_metadata(struct inode *inode,
goto error;
}
- err = f2fs_init_acl(inode, dir);
+ err = f2fs_init_acl(inode, dir, page);
if (err)
goto error;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 608f0df5b919..625eb4befad4 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -18,6 +18,13 @@
#include <linux/crc32.h>
#include <linux/magic.h>
#include <linux/kobject.h>
+#include <linux/sched.h>
+
+#ifdef CONFIG_F2FS_CHECK_FS
+#define f2fs_bug_on(condition) BUG_ON(condition)
+#else
+#define f2fs_bug_on(condition)
+#endif
/*
* For mount options
@@ -298,6 +305,9 @@ struct f2fs_sm_info {
unsigned int main_segments; /* # of segments in main area */
unsigned int reserved_segments; /* # of reserved segments */
unsigned int ovp_segments; /* # of overprovision segments */
+
+ /* a threshold to reclaim prefree segments */
+ unsigned int rec_prefree_segments;
};
/*
@@ -318,14 +328,6 @@ enum count_type {
};
/*
- * Uses as sbi->fs_lock[NR_GLOBAL_LOCKS].
- * The checkpoint procedure blocks all the locks in this fs_lock array.
- * Some FS operations grab free locks, and if there is no free lock,
- * then wait to grab a lock in a round-robin manner.
- */
-#define NR_GLOBAL_LOCKS 8
-
-/*
* The below are the page types of bios used in submti_bio().
* The available types are:
* DATA User data pages. It operates as async mode.
@@ -365,12 +367,12 @@ struct f2fs_sb_info {
struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
struct inode *meta_inode; /* cache meta blocks */
struct mutex cp_mutex; /* checkpoint procedure lock */
- struct mutex fs_lock[NR_GLOBAL_LOCKS]; /* blocking FS operations */
+ struct rw_semaphore cp_rwsem; /* blocking FS operations */
struct mutex node_write; /* locking node writes */
struct mutex writepages; /* mutex for writepages() */
- unsigned char next_lock_num; /* round-robin global locks */
- int por_doing; /* recovery is doing or not */
- int on_build_free_nids; /* build_free_nids is doing */
+ bool por_doing; /* recovery is doing or not */
+ bool on_build_free_nids; /* build_free_nids is doing */
+ struct task_struct *cp_task; /* checkpoint task */
/* for orphan inode management */
struct list_head orphan_inode_list; /* orphan inode list */
@@ -520,48 +522,24 @@ static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
cp->ckpt_flags = cpu_to_le32(ckpt_flags);
}
-static inline void mutex_lock_all(struct f2fs_sb_info *sbi)
+static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
{
- int i;
-
- for (i = 0; i < NR_GLOBAL_LOCKS; i++) {
- /*
- * This is the only time we take multiple fs_lock[]
- * instances; the order is immaterial since we
- * always hold cp_mutex, which serializes multiple
- * such operations.
- */
- mutex_lock_nest_lock(&sbi->fs_lock[i], &sbi->cp_mutex);
- }
+ down_read(&sbi->cp_rwsem);
}
-static inline void mutex_unlock_all(struct f2fs_sb_info *sbi)
+static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
{
- int i = 0;
- for (; i < NR_GLOBAL_LOCKS; i++)
- mutex_unlock(&sbi->fs_lock[i]);
+ up_read(&sbi->cp_rwsem);
}
-static inline int mutex_lock_op(struct f2fs_sb_info *sbi)
+static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
{
- unsigned char next_lock = sbi->next_lock_num % NR_GLOBAL_LOCKS;
- int i = 0;
-
- for (; i < NR_GLOBAL_LOCKS; i++)
- if (mutex_trylock(&sbi->fs_lock[i]))
- return i;
-
- mutex_lock(&sbi->fs_lock[next_lock]);
- sbi->next_lock_num++;
- return next_lock;
+ down_write_nest_lock(&sbi->cp_rwsem, &sbi->cp_mutex);
}
-static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, int ilock)
+static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
{
- if (ilock < 0)
- return;
- BUG_ON(ilock >= NR_GLOBAL_LOCKS);
- mutex_unlock(&sbi->fs_lock[ilock]);
+ up_write(&sbi->cp_rwsem);
}
/*
@@ -612,8 +590,8 @@ static inline int dec_valid_block_count(struct f2fs_sb_info *sbi,
blkcnt_t count)
{
spin_lock(&sbi->stat_lock);
- BUG_ON(sbi->total_valid_block_count < (block_t) count);
- BUG_ON(inode->i_blocks < count);
+ f2fs_bug_on(sbi->total_valid_block_count < (block_t) count);
+ f2fs_bug_on(inode->i_blocks < count);
inode->i_blocks -= count;
sbi->total_valid_block_count -= (block_t)count;
spin_unlock(&sbi->stat_lock);
@@ -745,9 +723,9 @@ static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
{
spin_lock(&sbi->stat_lock);
- BUG_ON(sbi->total_valid_block_count < count);
- BUG_ON(sbi->total_valid_node_count < count);
- BUG_ON(inode->i_blocks < count);
+ f2fs_bug_on(sbi->total_valid_block_count < count);
+ f2fs_bug_on(sbi->total_valid_node_count < count);
+ f2fs_bug_on(inode->i_blocks < count);
inode->i_blocks -= count;
sbi->total_valid_node_count -= count;
@@ -768,7 +746,7 @@ static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
{
spin_lock(&sbi->stat_lock);
- BUG_ON(sbi->total_valid_inode_count == sbi->total_node_count);
+ f2fs_bug_on(sbi->total_valid_inode_count == sbi->total_node_count);
sbi->total_valid_inode_count++;
spin_unlock(&sbi->stat_lock);
}
@@ -776,7 +754,7 @@ static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
static inline int dec_valid_inode_count(struct f2fs_sb_info *sbi)
{
spin_lock(&sbi->stat_lock);
- BUG_ON(!sbi->total_valid_inode_count);
+ f2fs_bug_on(!sbi->total_valid_inode_count);
sbi->total_valid_inode_count--;
spin_unlock(&sbi->stat_lock);
return 0;
@@ -797,7 +775,7 @@ static inline void f2fs_put_page(struct page *page, int unlock)
return;
if (unlock) {
- BUG_ON(!PageLocked(page));
+ f2fs_bug_on(!PageLocked(page));
unlock_page(page);
}
page_cache_release(page);
@@ -819,6 +797,20 @@ static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, ctor);
}
+static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
+ gfp_t flags)
+{
+ void *entry;
+retry:
+ entry = kmem_cache_alloc(cachep, flags);
+ if (!entry) {
+ cond_resched();
+ goto retry;
+ }
+
+ return entry;
+}
+
#define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
static inline bool IS_INODE(struct page *page)
@@ -979,6 +971,7 @@ long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
*/
void f2fs_set_inode_flags(struct inode *);
struct inode *f2fs_iget(struct super_block *, unsigned long);
+int try_to_free_nats(struct f2fs_sb_info *, int);
void update_inode(struct inode *, struct page *);
int update_inode_page(struct inode *);
int f2fs_write_inode(struct inode *, struct writeback_control *);
@@ -1033,6 +1026,7 @@ void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
int truncate_inode_blocks(struct inode *, pgoff_t);
int truncate_xattr_node(struct inode *, struct page *);
+int wait_on_node_pages_writeback(struct f2fs_sb_info *, nid_t);
int remove_inode_page(struct inode *);
struct page *new_inode_page(struct inode *, const struct qstr *);
struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *);
@@ -1059,6 +1053,7 @@ void destroy_node_manager_caches(void);
* segment.c
*/
void f2fs_balance_fs(struct f2fs_sb_info *);
+void f2fs_balance_fs_bg(struct f2fs_sb_info *);
void invalidate_blocks(struct f2fs_sb_info *, block_t);
void clear_prefree_segments(struct f2fs_sb_info *);
int npages_for_summary_flush(struct f2fs_sb_info *);
@@ -1172,7 +1167,16 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
return (struct f2fs_stat_info*)sbi->stat_info;
}
-#define stat_inc_call_count(si) ((si)->call_count++)
+#define stat_inc_call_count(si) ((si)->call_count++)
+#define stat_inc_bggc_count(sbi) ((sbi)->bg_gc++)
+#define stat_inc_dirty_dir(sbi) ((sbi)->n_dirty_dirs++)
+#define stat_dec_dirty_dir(sbi) ((sbi)->n_dirty_dirs--)
+#define stat_inc_total_hit(sb) ((F2FS_SB(sb))->total_hit_ext++)
+#define stat_inc_read_hit(sb) ((F2FS_SB(sb))->read_hit_ext++)
+#define stat_inc_seg_type(sbi, curseg) \
+ ((sbi)->segment_count[(curseg)->alloc_type]++)
+#define stat_inc_block_count(sbi, curseg) \
+ ((sbi)->block_count[(curseg)->alloc_type]++)
#define stat_inc_seg_count(sbi, type) \
do { \
@@ -1207,6 +1211,13 @@ void __init f2fs_create_root_stats(void);
void f2fs_destroy_root_stats(void);
#else
#define stat_inc_call_count(si)
+#define stat_inc_bggc_count(si)
+#define stat_inc_dirty_dir(sbi)
+#define stat_dec_dirty_dir(sbi)
+#define stat_inc_total_hit(sb)
+#define stat_inc_read_hit(sb)
+#define stat_inc_seg_type(sbi, curseg)
+#define stat_inc_block_count(sbi, curseg)
#define stat_inc_seg_count(si, type)
#define stat_inc_tot_blk_count(si, blks)
#define stat_inc_data_blk_count(si, blks)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 02c906971cc6..4902cebe6e8f 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -35,18 +35,18 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
block_t old_blk_addr;
struct dnode_of_data dn;
- int err, ilock;
+ int err;
f2fs_balance_fs(sbi);
sb_start_pagefault(inode->i_sb);
/* block allocation */
- ilock = mutex_lock_op(sbi);
+ f2fs_lock_op(sbi);
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, page->index, ALLOC_NODE);
if (err) {
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
goto out;
}
@@ -56,12 +56,12 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
err = reserve_new_block(&dn);
if (err) {
f2fs_put_dnode(&dn);
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
goto out;
}
}
f2fs_put_dnode(&dn);
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
file_update_time(vma->vm_file);
lock_page(page);
@@ -88,6 +88,7 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
set_page_dirty(page);
SetPageUptodate(page);
+ trace_f2fs_vm_page_mkwrite(page, DATA);
mapped:
/* fill the page */
wait_on_page_writeback(page);
@@ -188,8 +189,9 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (ret)
goto out;
}
- filemap_fdatawait_range(sbi->node_inode->i_mapping,
- 0, LONG_MAX);
+ ret = wait_on_node_pages_writeback(sbi, inode->i_ino);
+ if (ret)
+ goto out;
ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
}
out:
@@ -270,7 +272,7 @@ static int truncate_blocks(struct inode *inode, u64 from)
unsigned int blocksize = inode->i_sb->s_blocksize;
struct dnode_of_data dn;
pgoff_t free_from;
- int count = 0, ilock = -1;
+ int count = 0;
int err;
trace_f2fs_truncate_blocks_enter(inode, from);
@@ -278,13 +280,13 @@ static int truncate_blocks(struct inode *inode, u64 from)
free_from = (pgoff_t)
((from + blocksize - 1) >> (sbi->log_blocksize));
- ilock = mutex_lock_op(sbi);
+ f2fs_lock_op(sbi);
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
if (err) {
if (err == -ENOENT)
goto free_next;
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
trace_f2fs_truncate_blocks_exit(inode, err);
return err;
}
@@ -295,7 +297,7 @@ static int truncate_blocks(struct inode *inode, u64 from)
count = ADDRS_PER_BLOCK;
count -= dn.ofs_in_node;
- BUG_ON(count < 0);
+ f2fs_bug_on(count < 0);
if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
truncate_data_blocks_range(&dn, count);
@@ -305,7 +307,7 @@ static int truncate_blocks(struct inode *inode, u64 from)
f2fs_put_dnode(&dn);
free_next:
err = truncate_inode_blocks(inode, free_from);
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
/* lastly zero out the first data page */
truncate_partial_data_page(inode, from);
@@ -416,16 +418,15 @@ static void fill_zero(struct inode *inode, pgoff_t index,
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
struct page *page;
- int ilock;
if (!len)
return;
f2fs_balance_fs(sbi);
- ilock = mutex_lock_op(sbi);
+ f2fs_lock_op(sbi);
page = get_new_data_page(inode, NULL, index, false);
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
if (!IS_ERR(page)) {
wait_on_page_writeback(page);
@@ -484,7 +485,6 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len, int mode)
struct address_space *mapping = inode->i_mapping;
loff_t blk_start, blk_end;
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
- int ilock;
f2fs_balance_fs(sbi);
@@ -493,9 +493,9 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len, int mode)
truncate_inode_pages_range(mapping, blk_start,
blk_end - 1);
- ilock = mutex_lock_op(sbi);
+ f2fs_lock_op(sbi);
ret = truncate_hole(inode, pg_start, pg_end);
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
}
}
@@ -529,13 +529,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
for (index = pg_start; index <= pg_end; index++) {
struct dnode_of_data dn;
- int ilock;
- ilock = mutex_lock_op(sbi);
+ f2fs_lock_op(sbi);
set_new_dnode(&dn, inode, NULL, NULL, 0);
ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
if (ret) {
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
break;
}
@@ -543,12 +542,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
ret = reserve_new_block(&dn);
if (ret) {
f2fs_put_dnode(&dn);
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
break;
}
}
f2fs_put_dnode(&dn);
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
if (pg_start == pg_end)
new_size = offset + len;
@@ -685,8 +684,8 @@ const struct file_operations f2fs_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
- .aio_read = generic_file_aio_read,
- .aio_write = generic_file_aio_write,
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
.open = generic_file_open,
.mmap = f2fs_file_mmap,
.fsync = f2fs_sync_file,
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 2f157e883687..b7ad1ec7e4cc 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -77,13 +77,15 @@ static int gc_thread_func(void *data)
else
wait_ms = increase_sleep_time(gc_th, wait_ms);
-#ifdef CONFIG_F2FS_STAT_FS
- sbi->bg_gc++;
-#endif
+ stat_inc_bggc_count(sbi);
/* if return value is not zero, no victim was selected */
if (f2fs_gc(sbi))
wait_ms = gc_th->no_gc_sleep_time;
+
+ /* balancing f2fs's metadata periodically */
+ f2fs_balance_fs_bg(sbi);
+
} while (!kthread_should_stop());
return 0;
}
@@ -236,8 +238,8 @@ static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
}
-static unsigned int get_gc_cost(struct f2fs_sb_info *sbi, unsigned int segno,
- struct victim_sel_policy *p)
+static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
+ unsigned int segno, struct victim_sel_policy *p)
{
if (p->alloc_mode == SSR)
return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
@@ -293,7 +295,11 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
}
break;
}
- p.offset = ((segno / p.ofs_unit) * p.ofs_unit) + p.ofs_unit;
+
+ p.offset = segno + p.ofs_unit;
+ if (p.ofs_unit > 1)
+ p.offset -= segno % p.ofs_unit;
+
secno = GET_SECNO(sbi, segno);
if (sec_usage_check(sbi, secno))
@@ -306,10 +312,9 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
if (p.min_cost > cost) {
p.min_segno = segno;
p.min_cost = cost;
- }
-
- if (cost == max_cost)
+ } else if (unlikely(cost == max_cost)) {
continue;
+ }
if (nsearched++ >= p.max_search) {
sbi->last_victim[p.gc_mode] = segno;
@@ -358,12 +363,8 @@ static void add_gc_inode(struct inode *inode, struct list_head *ilist)
iput(inode);
return;
}
-repeat:
- new_ie = kmem_cache_alloc(winode_slab, GFP_NOFS);
- if (!new_ie) {
- cond_resched();
- goto repeat;
- }
+
+ new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS);
new_ie->inode = inode;
list_add_tail(&new_ie->list, ilist);
}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 9339cd292047..d0eaa9faeca0 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -37,6 +37,31 @@ void f2fs_set_inode_flags(struct inode *inode)
inode->i_flags |= S_DIRSYNC;
}
+static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
+{
+ if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
+ S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
+ if (ri->i_addr[0])
+ inode->i_rdev = old_decode_dev(le32_to_cpu(ri->i_addr[0]));
+ else
+ inode->i_rdev = new_decode_dev(le32_to_cpu(ri->i_addr[1]));
+ }
+}
+
+static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
+{
+ if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
+ if (old_valid_dev(inode->i_rdev)) {
+ ri->i_addr[0] = cpu_to_le32(old_encode_dev(inode->i_rdev));
+ ri->i_addr[1] = 0;
+ } else {
+ ri->i_addr[0] = 0;
+ ri->i_addr[1] = cpu_to_le32(new_encode_dev(inode->i_rdev));
+ ri->i_addr[2] = 0;
+ }
+ }
+}
+
static int do_read_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
@@ -73,10 +98,6 @@ static int do_read_inode(struct inode *inode)
inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
inode->i_generation = le32_to_cpu(ri->i_generation);
- if (ri->i_addr[0])
- inode->i_rdev = old_decode_dev(le32_to_cpu(ri->i_addr[0]));
- else
- inode->i_rdev = new_decode_dev(le32_to_cpu(ri->i_addr[1]));
fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
@@ -84,8 +105,13 @@ static int do_read_inode(struct inode *inode)
fi->flags = 0;
fi->i_advise = ri->i_advise;
fi->i_pino = le32_to_cpu(ri->i_pino);
+
get_extent_info(&fi->ext, ri->i_ext);
get_inline_info(fi, ri);
+
+ /* get rdev by using inline_info */
+ __get_inode_rdev(inode, ri);
+
f2fs_put_page(node_page, 1);
return 0;
}
@@ -179,21 +205,10 @@ void update_inode(struct inode *inode, struct page *node_page)
ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
ri->i_generation = cpu_to_le32(inode->i_generation);
- if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
- if (old_valid_dev(inode->i_rdev)) {
- ri->i_addr[0] =
- cpu_to_le32(old_encode_dev(inode->i_rdev));
- ri->i_addr[1] = 0;
- } else {
- ri->i_addr[0] = 0;
- ri->i_addr[1] =
- cpu_to_le32(new_encode_dev(inode->i_rdev));
- ri->i_addr[2] = 0;
- }
- }
-
+ __set_inode_rdev(inode, ri);
set_cold_node(inode, node_page);
set_page_dirty(node_page);
+
clear_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
}
@@ -214,7 +229,7 @@ int update_inode_page(struct inode *inode)
int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
- int ret, ilock;
+ int ret;
if (inode->i_ino == F2FS_NODE_INO(sbi) ||
inode->i_ino == F2FS_META_INO(sbi))
@@ -227,9 +242,9 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
* We need to lock here to prevent from producing dirty node pages
* during the urgent cleaning time when runing out of free sections.
*/
- ilock = mutex_lock_op(sbi);
+ f2fs_lock_op(sbi);
ret = update_inode_page(inode);
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
if (wbc)
f2fs_balance_fs(sbi);
@@ -243,7 +258,6 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
void f2fs_evict_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
- int ilock;
trace_f2fs_evict_inode(inode);
truncate_inode_pages(&inode->i_data, 0);
@@ -252,7 +266,7 @@ void f2fs_evict_inode(struct inode *inode)
inode->i_ino == F2FS_META_INO(sbi))
goto no_delete;
- BUG_ON(atomic_read(&F2FS_I(inode)->dirty_dents));
+ f2fs_bug_on(atomic_read(&F2FS_I(inode)->dirty_dents));
remove_dirty_dir_inode(inode);
if (inode->i_nlink || is_bad_inode(inode))
@@ -265,9 +279,9 @@ void f2fs_evict_inode(struct inode *inode)
if (F2FS_HAS_BLOCKS(inode))
f2fs_truncate(inode);
- ilock = mutex_lock_op(sbi);
+ f2fs_lock_op(sbi);
remove_inode_page(inode);
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
sb_end_intwrite(inode->i_sb);
no_delete:
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 2a5359c990fc..575adac17f8b 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -27,19 +27,19 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
nid_t ino;
struct inode *inode;
bool nid_free = false;
- int err, ilock;
+ int err;
inode = new_inode(sb);
if (!inode)
return ERR_PTR(-ENOMEM);
- ilock = mutex_lock_op(sbi);
+ f2fs_lock_op(sbi);
if (!alloc_nid(sbi, &ino)) {
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
err = -ENOSPC;
goto fail;
}
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
inode->i_uid = current_fsuid();
@@ -115,7 +115,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct f2fs_sb_info *sbi = F2FS_SB(sb);
struct inode *inode;
nid_t ino = 0;
- int err, ilock;
+ int err;
f2fs_balance_fs(sbi);
@@ -131,9 +131,9 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
inode->i_mapping->a_ops = &f2fs_dblock_aops;
ino = inode->i_ino;
- ilock = mutex_lock_op(sbi);
+ f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
if (err)
goto out;
@@ -157,7 +157,7 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
struct inode *inode = old_dentry->d_inode;
struct super_block *sb = dir->i_sb;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
- int err, ilock;
+ int err;
f2fs_balance_fs(sbi);
@@ -165,9 +165,9 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
ihold(inode);
set_inode_flag(F2FS_I(inode), FI_INC_LINK);
- ilock = mutex_lock_op(sbi);
+ f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
if (err)
goto out;
@@ -220,7 +220,6 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
struct f2fs_dir_entry *de;
struct page *page;
int err = -ENOENT;
- int ilock;
trace_f2fs_unlink_enter(dir, dentry);
f2fs_balance_fs(sbi);
@@ -229,16 +228,16 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
if (!de)
goto fail;
+ f2fs_lock_op(sbi);
err = acquire_orphan_inode(sbi);
if (err) {
+ f2fs_unlock_op(sbi);
kunmap(page);
f2fs_put_page(page, 0);
goto fail;
}
-
- ilock = mutex_lock_op(sbi);
f2fs_delete_entry(de, page, inode);
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
/* In order to evict this inode, we set it dirty */
mark_inode_dirty(inode);
@@ -254,7 +253,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
struct f2fs_sb_info *sbi = F2FS_SB(sb);
struct inode *inode;
size_t symlen = strlen(symname) + 1;
- int err, ilock;
+ int err;
f2fs_balance_fs(sbi);
@@ -265,9 +264,9 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
inode->i_op = &f2fs_symlink_inode_operations;
inode->i_mapping->a_ops = &f2fs_dblock_aops;
- ilock = mutex_lock_op(sbi);
+ f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
if (err)
goto out;
@@ -290,7 +289,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
struct inode *inode;
- int err, ilock;
+ int err;
f2fs_balance_fs(sbi);
@@ -304,9 +303,9 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
set_inode_flag(F2FS_I(inode), FI_INC_LINK);
- ilock = mutex_lock_op(sbi);
+ f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
if (err)
goto out_fail;
@@ -342,7 +341,6 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
struct f2fs_sb_info *sbi = F2FS_SB(sb);
struct inode *inode;
int err = 0;
- int ilock;
if (!new_valid_dev(rdev))
return -EINVAL;
@@ -356,9 +354,9 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
init_special_inode(inode, inode->i_mode, rdev);
inode->i_op = &f2fs_special_inode_operations;
- ilock = mutex_lock_op(sbi);
+ f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
if (err)
goto out;
@@ -387,7 +385,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct f2fs_dir_entry *old_dir_entry = NULL;
struct f2fs_dir_entry *old_entry;
struct f2fs_dir_entry *new_entry;
- int err = -ENOENT, ilock = -1;
+ int err = -ENOENT;
f2fs_balance_fs(sbi);
@@ -402,7 +400,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto out_old;
}
- ilock = mutex_lock_op(sbi);
+ f2fs_lock_op(sbi);
if (new_inode) {
@@ -467,7 +465,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
update_inode_page(old_dir);
}
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
return 0;
put_out_dir:
@@ -477,7 +475,7 @@ out_dir:
kunmap(old_dir_page);
f2fs_put_page(old_dir_page, 0);
}
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
out_old:
kunmap(old_page);
f2fs_put_page(old_page, 0);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 51ef27894433..4ac4150d421d 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -204,7 +204,7 @@ retry:
}
e->ni = *ni;
e->checkpointed = true;
- BUG_ON(ni->blk_addr == NEW_ADDR);
+ f2fs_bug_on(ni->blk_addr == NEW_ADDR);
} else if (new_blkaddr == NEW_ADDR) {
/*
* when nid is reallocated,
@@ -212,19 +212,19 @@ retry:
* So, reinitialize it with new information.
*/
e->ni = *ni;
- BUG_ON(ni->blk_addr != NULL_ADDR);
+ f2fs_bug_on(ni->blk_addr != NULL_ADDR);
}
if (new_blkaddr == NEW_ADDR)
e->checkpointed = false;
/* sanity check */
- BUG_ON(nat_get_blkaddr(e) != ni->blk_addr);
- BUG_ON(nat_get_blkaddr(e) == NULL_ADDR &&
+ f2fs_bug_on(nat_get_blkaddr(e) != ni->blk_addr);
+ f2fs_bug_on(nat_get_blkaddr(e) == NULL_ADDR &&
new_blkaddr == NULL_ADDR);
- BUG_ON(nat_get_blkaddr(e) == NEW_ADDR &&
+ f2fs_bug_on(nat_get_blkaddr(e) == NEW_ADDR &&
new_blkaddr == NEW_ADDR);
- BUG_ON(nat_get_blkaddr(e) != NEW_ADDR &&
+ f2fs_bug_on(nat_get_blkaddr(e) != NEW_ADDR &&
nat_get_blkaddr(e) != NULL_ADDR &&
new_blkaddr == NEW_ADDR);
@@ -240,7 +240,7 @@ retry:
write_unlock(&nm_i->nat_tree_lock);
}
-static int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
+int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -495,10 +495,10 @@ static void truncate_node(struct dnode_of_data *dn)
get_node_info(sbi, dn->nid, &ni);
if (dn->inode->i_blocks == 0) {
- BUG_ON(ni.blk_addr != NULL_ADDR);
+ f2fs_bug_on(ni.blk_addr != NULL_ADDR);
goto invalidate;
}
- BUG_ON(ni.blk_addr == NULL_ADDR);
+ f2fs_bug_on(ni.blk_addr == NULL_ADDR);
/* Deallocate node address */
invalidate_blocks(sbi, ni.blk_addr);
@@ -822,7 +822,7 @@ int remove_inode_page(struct inode *inode)
}
/* 0 is possible, after f2fs_new_inode() is failed */
- BUG_ON(inode->i_blocks != 0 && inode->i_blocks != 1);
+ f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1);
set_new_dnode(&dn, inode, page, page, ino);
truncate_node(&dn);
return 0;
@@ -863,7 +863,7 @@ struct page *new_node_page(struct dnode_of_data *dn,
get_node_info(sbi, dn->nid, &old_ni);
/* Reinitialize old_ni with new node page */
- BUG_ON(old_ni.blk_addr != NULL_ADDR);
+ f2fs_bug_on(old_ni.blk_addr != NULL_ADDR);
new_ni = old_ni;
new_ni.ino = dn->inode->i_ino;
set_node_addr(sbi, &new_ni, NEW_ADDR);
@@ -969,7 +969,7 @@ repeat:
goto repeat;
}
got_it:
- BUG_ON(nid != nid_of_node(page));
+ f2fs_bug_on(nid != nid_of_node(page));
mark_page_accessed(page);
return page;
}
@@ -1148,6 +1148,47 @@ continue_unlock:
return nwritten;
}
+int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
+{
+ struct address_space *mapping = sbi->node_inode->i_mapping;
+ pgoff_t index = 0, end = LONG_MAX;
+ struct pagevec pvec;
+ int nr_pages;
+ int ret2 = 0, ret = 0;
+
+ pagevec_init(&pvec, 0);
+ while ((index <= end) &&
+ (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+ PAGECACHE_TAG_WRITEBACK,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
+ unsigned i;
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+
+ /* until radix tree lookup accepts end_index */
+ if (page->index > end)
+ continue;
+
+ if (ino && ino_of_node(page) == ino) {
+ wait_on_page_writeback(page);
+ if (TestClearPageError(page))
+ ret = -EIO;
+ }
+ }
+ pagevec_release(&pvec);
+ cond_resched();
+ }
+
+ if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
+ ret2 = -ENOSPC;
+ if (test_and_clear_bit(AS_EIO, &mapping->flags))
+ ret2 = -EIO;
+ if (!ret)
+ ret = ret2;
+ return ret;
+}
+
static int f2fs_write_node_page(struct page *page,
struct writeback_control *wbc)
{
@@ -1156,11 +1197,14 @@ static int f2fs_write_node_page(struct page *page,
block_t new_addr;
struct node_info ni;
+ if (sbi->por_doing)
+ goto redirty_out;
+
wait_on_page_writeback(page);
/* get old block addr of this node page */
nid = nid_of_node(page);
- BUG_ON(page->index != nid);
+ f2fs_bug_on(page->index != nid);
get_node_info(sbi, nid, &ni);
@@ -1171,12 +1215,8 @@ static int f2fs_write_node_page(struct page *page,
return 0;
}
- if (wbc->for_reclaim) {
- dec_page_count(sbi, F2FS_DIRTY_NODES);
- wbc->pages_skipped++;
- set_page_dirty(page);
- return AOP_WRITEPAGE_ACTIVATE;
- }
+ if (wbc->for_reclaim)
+ goto redirty_out;
mutex_lock(&sbi->node_write);
set_page_writeback(page);
@@ -1186,6 +1226,12 @@ static int f2fs_write_node_page(struct page *page,
mutex_unlock(&sbi->node_write);
unlock_page(page);
return 0;
+
+redirty_out:
+ dec_page_count(sbi, F2FS_DIRTY_NODES);
+ wbc->pages_skipped++;
+ set_page_dirty(page);
+ return AOP_WRITEPAGE_ACTIVATE;
}
/*
@@ -1200,11 +1246,8 @@ static int f2fs_write_node_pages(struct address_space *mapping,
struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
long nr_to_write = wbc->nr_to_write;
- /* First check balancing cached NAT entries */
- if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
- f2fs_sync_fs(sbi->sb, true);
- return 0;
- }
+ /* balancing f2fs's metadata in background */
+ f2fs_balance_fs_bg(sbi);
/* collect a number of dirty node pages and write together */
if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
@@ -1223,6 +1266,8 @@ static int f2fs_set_node_page_dirty(struct page *page)
struct address_space *mapping = page->mapping;
struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
+ trace_f2fs_set_page_dirty(page, NODE);
+
SetPageUptodate(page);
if (!PageDirty(page)) {
__set_page_dirty_nobuffers(page);
@@ -1291,23 +1336,18 @@ static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
if (nid == 0)
return 0;
- if (!build)
- goto retry;
-
- /* do not add allocated nids */
- read_lock(&nm_i->nat_tree_lock);
- ne = __lookup_nat_cache(nm_i, nid);
- if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
- allocated = true;
- read_unlock(&nm_i->nat_tree_lock);
- if (allocated)
- return 0;
-retry:
- i = kmem_cache_alloc(free_nid_slab, GFP_NOFS);
- if (!i) {
- cond_resched();
- goto retry;
+ if (build) {
+ /* do not add allocated nids */
+ read_lock(&nm_i->nat_tree_lock);
+ ne = __lookup_nat_cache(nm_i, nid);
+ if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
+ allocated = true;
+ read_unlock(&nm_i->nat_tree_lock);
+ if (allocated)
+ return 0;
}
+
+ i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
i->nid = nid;
i->state = NID_NEW;
@@ -1350,7 +1390,7 @@ static void scan_nat_page(struct f2fs_nm_info *nm_i,
break;
blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
- BUG_ON(blk_addr == NEW_ADDR);
+ f2fs_bug_on(blk_addr == NEW_ADDR);
if (blk_addr == NULL_ADDR) {
if (add_free_nid(nm_i, start_nid, true) < 0)
break;
@@ -1421,14 +1461,14 @@ retry:
/* We should not use stale free nids created by build_free_nids */
if (nm_i->fcnt && !sbi->on_build_free_nids) {
- BUG_ON(list_empty(&nm_i->free_nid_list));
+ f2fs_bug_on(list_empty(&nm_i->free_nid_list));
list_for_each(this, &nm_i->free_nid_list) {
i = list_entry(this, struct free_nid, list);
if (i->state == NID_NEW)
break;
}
- BUG_ON(i->state != NID_NEW);
+ f2fs_bug_on(i->state != NID_NEW);
*nid = i->nid;
i->state = NID_ALLOC;
nm_i->fcnt--;
@@ -1439,9 +1479,9 @@ retry:
/* Let's scan nat pages and its caches to get free nids */
mutex_lock(&nm_i->build_lock);
- sbi->on_build_free_nids = 1;
+ sbi->on_build_free_nids = true;
build_free_nids(sbi);
- sbi->on_build_free_nids = 0;
+ sbi->on_build_free_nids = false;
mutex_unlock(&nm_i->build_lock);
goto retry;
}
@@ -1456,7 +1496,7 @@ void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
spin_lock(&nm_i->free_nid_list_lock);
i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
- BUG_ON(!i || i->state != NID_ALLOC);
+ f2fs_bug_on(!i || i->state != NID_ALLOC);
__del_from_free_nid_list(i);
spin_unlock(&nm_i->free_nid_list_lock);
}
@@ -1474,7 +1514,7 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
spin_lock(&nm_i->free_nid_list_lock);
i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
- BUG_ON(!i || i->state != NID_ALLOC);
+ f2fs_bug_on(!i || i->state != NID_ALLOC);
if (nm_i->fcnt > 2 * MAX_FREE_NIDS) {
__del_from_free_nid_list(i);
} else {
@@ -1677,7 +1717,7 @@ to_nat_page:
nat_blk = page_address(page);
}
- BUG_ON(!nat_blk);
+ f2fs_bug_on(!nat_blk);
raw_ne = nat_blk->entries[nid - start_nid];
flush_now:
new_blkaddr = nat_get_blkaddr(ne);
@@ -1781,11 +1821,11 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
/* destroy free nid list */
spin_lock(&nm_i->free_nid_list_lock);
list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
- BUG_ON(i->state == NID_ALLOC);
+ f2fs_bug_on(i->state == NID_ALLOC);
__del_from_free_nid_list(i);
nm_i->fcnt--;
}
- BUG_ON(nm_i->fcnt);
+ f2fs_bug_on(nm_i->fcnt);
spin_unlock(&nm_i->free_nid_list_lock);
/* destroy nat cache */
@@ -1799,7 +1839,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
__del_from_nat_cache(nm_i, e);
}
}
- BUG_ON(nm_i->nat_cnt);
+ f2fs_bug_on(nm_i->nat_cnt);
write_unlock(&nm_i->nat_tree_lock);
kfree(nm_i->nat_bitmap);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 51ef5eec33d7..fdc81161f254 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -64,24 +64,31 @@ static int recover_dentry(struct page *ipage, struct inode *inode)
name.name = raw_inode->i_name;
retry:
de = f2fs_find_entry(dir, &name, &page);
- if (de && inode->i_ino == le32_to_cpu(de->ino)) {
- kunmap(page);
- f2fs_put_page(page, 0);
- goto out;
- }
+ if (de && inode->i_ino == le32_to_cpu(de->ino))
+ goto out_unmap_put;
if (de) {
einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
if (IS_ERR(einode)) {
WARN_ON(1);
if (PTR_ERR(einode) == -ENOENT)
err = -EEXIST;
- goto out;
+ goto out_unmap_put;
+ }
+ err = acquire_orphan_inode(F2FS_SB(inode->i_sb));
+ if (err) {
+ iput(einode);
+ goto out_unmap_put;
}
f2fs_delete_entry(de, page, einode);
iput(einode);
goto retry;
}
err = __f2fs_add_link(dir, &name, inode);
+ goto out;
+
+out_unmap_put:
+ kunmap(page);
+ f2fs_put_page(page, 0);
out:
f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode and its dentry: "
"ino = %x, name = %s, dir = %lx, err = %d",
@@ -285,7 +292,6 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
struct f2fs_summary sum;
struct node_info ni;
int err = 0, recovered = 0;
- int ilock;
start = start_bidx_of_node(ofs_of_node(page), fi);
if (IS_INODE(page))
@@ -293,20 +299,20 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
else
end = start + ADDRS_PER_BLOCK;
- ilock = mutex_lock_op(sbi);
+ f2fs_lock_op(sbi);
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, start, ALLOC_NODE);
if (err) {
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
return err;
}
wait_on_page_writeback(dn.node_page);
get_node_info(sbi, dn.nid, &ni);
- BUG_ON(ni.ino != ino_of_node(page));
- BUG_ON(ofs_of_node(dn.node_page) != ofs_of_node(page));
+ f2fs_bug_on(ni.ino != ino_of_node(page));
+ f2fs_bug_on(ofs_of_node(dn.node_page) != ofs_of_node(page));
for (; start < end; start++) {
block_t src, dest;
@@ -316,9 +322,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) {
if (src == NULL_ADDR) {
- int err = reserve_new_block(&dn);
+ err = reserve_new_block(&dn);
/* We should not get -ENOSPC */
- BUG_ON(err);
+ f2fs_bug_on(err);
}
/* Check the previous node page having this index */
@@ -349,7 +355,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
err:
f2fs_put_dnode(&dn);
- mutex_unlock_op(sbi, ilock);
+ f2fs_unlock_op(sbi);
f2fs_msg(sbi->sb, KERN_NOTICE, "recover_data: ino = %lx, "
"recovered_data = %d blocks, err = %d",
@@ -419,6 +425,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
{
struct list_head inode_list;
int err;
+ bool need_writecp = false;
fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
sizeof(struct fsync_inode_entry), NULL);
@@ -428,7 +435,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
INIT_LIST_HEAD(&inode_list);
/* step #1: find fsynced inode numbers */
- sbi->por_doing = 1;
+ sbi->por_doing = true;
err = find_fsync_dnodes(sbi, &inode_list);
if (err)
goto out;
@@ -436,14 +443,16 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
if (list_empty(&inode_list))
goto out;
+ need_writecp = true;
+
/* step #2: recover data */
err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
- BUG_ON(!list_empty(&inode_list));
+ f2fs_bug_on(!list_empty(&inode_list));
out:
destroy_fsync_dnodes(&inode_list);
kmem_cache_destroy(fsync_entry_slab);
- sbi->por_doing = 0;
- if (!err)
+ sbi->por_doing = false;
+ if (!err && need_writecp)
write_checkpoint(sbi, false);
return err;
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 09af9c7b0f52..45a326a8ac8f 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -36,6 +36,14 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi)
}
}
+void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
+{
+ /* check the # of cached NAT entries and prefree segments */
+ if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
+ excess_prefree_segs(sbi))
+ f2fs_sync_fs(sbi->sb, true);
+}
+
static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
enum dirty_type dirty_type)
{
@@ -50,20 +58,10 @@ static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
if (dirty_type == DIRTY) {
struct seg_entry *sentry = get_seg_entry(sbi, segno);
- enum dirty_type t = DIRTY_HOT_DATA;
+ enum dirty_type t = sentry->type;
- dirty_type = sentry->type;
-
- if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
- dirty_i->nr_dirty[dirty_type]++;
-
- /* Only one bitmap should be set */
- for (; t <= DIRTY_COLD_NODE; t++) {
- if (t == dirty_type)
- continue;
- if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
- dirty_i->nr_dirty[t]--;
- }
+ if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
+ dirty_i->nr_dirty[t]++;
}
}
@@ -76,12 +74,11 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
dirty_i->nr_dirty[dirty_type]--;
if (dirty_type == DIRTY) {
- enum dirty_type t = DIRTY_HOT_DATA;
+ struct seg_entry *sentry = get_seg_entry(sbi, segno);
+ enum dirty_type t = sentry->type;
- /* clear all the bitmaps */
- for (; t <= DIRTY_COLD_NODE; t++)
- if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
- dirty_i->nr_dirty[t]--;
+ if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
+ dirty_i->nr_dirty[t]--;
if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
clear_bit(GET_SECNO(sbi, segno),
@@ -195,7 +192,7 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
new_vblocks = se->valid_blocks + del;
offset = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1);
- BUG_ON((new_vblocks >> (sizeof(unsigned short) << 3) ||
+ f2fs_bug_on((new_vblocks >> (sizeof(unsigned short) << 3) ||
(new_vblocks > sbi->blocks_per_seg)));
se->valid_blocks = new_vblocks;
@@ -235,7 +232,7 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
unsigned int segno = GET_SEGNO(sbi, addr);
struct sit_info *sit_i = SIT_I(sbi);
- BUG_ON(addr == NULL_ADDR);
+ f2fs_bug_on(addr == NULL_ADDR);
if (addr == NEW_ADDR)
return;
@@ -267,9 +264,8 @@ static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
*/
int npages_for_summary_flush(struct f2fs_sb_info *sbi)
{
- int total_size_bytes = 0;
int valid_sum_count = 0;
- int i, sum_space;
+ int i, sum_in_page;
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
if (sbi->ckpt->alloc_type[i] == SSR)
@@ -278,13 +274,12 @@ int npages_for_summary_flush(struct f2fs_sb_info *sbi)
valid_sum_count += curseg_blkoff(sbi, i);
}
- total_size_bytes = valid_sum_count * (SUMMARY_SIZE + 1)
- + sizeof(struct nat_journal) + 2
- + sizeof(struct sit_journal) + 2;
- sum_space = PAGE_CACHE_SIZE - SUM_FOOTER_SIZE;
- if (total_size_bytes < sum_space)
+ sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
+ SUM_FOOTER_SIZE) / SUMMARY_SIZE;
+ if (valid_sum_count <= sum_in_page)
return 1;
- else if (total_size_bytes < 2 * sum_space)
+ else if ((valid_sum_count - sum_in_page) <=
+ (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
return 2;
return 3;
}
@@ -350,7 +345,7 @@ find_other_zone:
if (dir == ALLOC_RIGHT) {
secno = find_next_zero_bit(free_i->free_secmap,
TOTAL_SECS(sbi), 0);
- BUG_ON(secno >= TOTAL_SECS(sbi));
+ f2fs_bug_on(secno >= TOTAL_SECS(sbi));
} else {
go_left = 1;
left_start = hint - 1;
@@ -366,7 +361,7 @@ find_other_zone:
}
left_start = find_next_zero_bit(free_i->free_secmap,
TOTAL_SECS(sbi), 0);
- BUG_ON(left_start >= TOTAL_SECS(sbi));
+ f2fs_bug_on(left_start >= TOTAL_SECS(sbi));
break;
}
secno = left_start;
@@ -405,7 +400,7 @@ skip_left:
}
got_it:
/* set it as dirty segment in free segmap */
- BUG_ON(test_bit(segno, free_i->free_segmap));
+ f2fs_bug_on(test_bit(segno, free_i->free_segmap));
__set_inuse(sbi, segno);
*newseg = segno;
write_unlock(&free_i->segmap_lock);
@@ -550,9 +545,8 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
change_curseg(sbi, type, true);
else
new_curseg(sbi, type, false);
-#ifdef CONFIG_F2FS_STAT_FS
- sbi->segment_count[curseg->alloc_type]++;
-#endif
+
+ stat_inc_seg_type(sbi, curseg);
}
void allocate_new_segments(struct f2fs_sb_info *sbi)
@@ -597,6 +591,10 @@ static void f2fs_end_io_write(struct bio *bio, int err)
if (p->is_sync)
complete(p->wait);
+
+ if (!get_pages(p->sbi, F2FS_WRITEBACK) && p->sbi->cp_task)
+ wake_up_process(p->sbi->cp_task);
+
kfree(p);
bio_put(bio);
}
@@ -657,6 +655,7 @@ static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page,
block_t blk_addr, enum page_type type)
{
struct block_device *bdev = sbi->sb->s_bdev;
+ int bio_blocks;
verify_block_addr(sbi, blk_addr);
@@ -676,8 +675,9 @@ retry:
goto retry;
}
- sbi->bio[type] = f2fs_bio_alloc(bdev, max_hw_blocks(sbi));
- sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
+ bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
+ sbi->bio[type] = f2fs_bio_alloc(bdev, bio_blocks);
+ sbi->bio[type]->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
sbi->bio[type]->bi_private = priv;
/*
* The end_io will be assigned at the sumbission phase.
@@ -771,7 +771,7 @@ static int __get_segment_type(struct page *page, enum page_type p_type)
return __get_segment_type_4(page, p_type);
}
/* NR_CURSEG_TYPE(6) logs by default */
- BUG_ON(sbi->active_logs != NR_CURSEG_TYPE);
+ f2fs_bug_on(sbi->active_logs != NR_CURSEG_TYPE);
return __get_segment_type_6(page, p_type);
}
@@ -801,9 +801,8 @@ static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
mutex_lock(&sit_i->sentry_lock);
__refresh_next_blkoff(sbi, curseg);
-#ifdef CONFIG_F2FS_STAT_FS
- sbi->block_count[curseg->alloc_type]++;
-#endif
+
+ stat_inc_block_count(sbi, curseg);
/*
* SIT information should be updated before segment allocation,
@@ -849,7 +848,7 @@ void write_data_page(struct inode *inode, struct page *page,
struct f2fs_summary sum;
struct node_info ni;
- BUG_ON(old_blkaddr == NULL_ADDR);
+ f2fs_bug_on(old_blkaddr == NULL_ADDR);
get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
@@ -1122,8 +1121,6 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
SUM_JOURNAL_SIZE);
written_size += SUM_JOURNAL_SIZE;
- set_page_dirty(page);
-
/* Step 3: write summary entries */
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
unsigned short blkoff;
@@ -1142,18 +1139,20 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
summary = (struct f2fs_summary *)(kaddr + written_size);
*summary = seg_i->sum_blk->entries[j];
written_size += SUMMARY_SIZE;
- set_page_dirty(page);
if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
SUM_FOOTER_SIZE)
continue;
+ set_page_dirty(page);
f2fs_put_page(page, 1);
page = NULL;
}
}
- if (page)
+ if (page) {
+ set_page_dirty(page);
f2fs_put_page(page, 1);
+ }
}
static void write_normal_summaries(struct f2fs_sb_info *sbi,
@@ -1239,7 +1238,7 @@ static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
/* get current sit block page without lock */
src_page = get_meta_page(sbi, src_off);
dst_page = grab_meta_page(sbi, dst_off);
- BUG_ON(PageDirty(src_page));
+ f2fs_bug_on(PageDirty(src_page));
src_addr = page_address(src_page);
dst_addr = page_address(dst_page);
@@ -1271,9 +1270,9 @@ static bool flush_sits_in_journal(struct f2fs_sb_info *sbi)
__mark_sit_entry_dirty(sbi, segno);
}
update_sits_in_cursum(sum, -sits_in_cursum(sum));
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/*
@@ -1637,6 +1636,7 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
+ sm_info->rec_prefree_segments = DEF_RECLAIM_PREFREE_SEGMENTS;
err = build_sit_info(sbi);
if (err)
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index bdd10eab8c40..269f690b4e24 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -14,6 +14,8 @@
#define NULL_SEGNO ((unsigned int)(~0))
#define NULL_SECNO ((unsigned int)(~0))
+#define DEF_RECLAIM_PREFREE_SEGMENTS 100 /* 200MB of prefree segments */
+
/* L: Logical segment # in volume, R: Relative segment # in main area */
#define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno)
#define GET_R2L_SEGNO(free_i, segno) (segno + free_i->start_segno)
@@ -90,6 +92,8 @@
(blk_addr << ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE))
#define SECTOR_TO_BLOCK(sbi, sectors) \
(sectors >> ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE))
+#define MAX_BIO_BLOCKS(max_hw_blocks) \
+ (min((int)max_hw_blocks, BIO_MAX_PAGES))
/* during checkpoint, bio_private is used to synchronize the last bio */
struct bio_private {
@@ -470,6 +474,11 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
reserved_sections(sbi)));
}
+static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
+{
+ return (prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments);
+}
+
static inline int utilization(struct f2fs_sb_info *sbi)
{
return div_u64((u64)valid_user_blocks(sbi) * 100, sbi->user_block_count);
@@ -513,16 +522,13 @@ static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type)
return curseg->next_blkoff;
}
+#ifdef CONFIG_F2FS_CHECK_FS
static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
{
unsigned int end_segno = SM_I(sbi)->segment_count - 1;
BUG_ON(segno > end_segno);
}
-/*
- * This function is used for only debugging.
- * NOTE: In future, we have to remove this function.
- */
static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
{
struct f2fs_sm_info *sm_info = SM_I(sbi);
@@ -541,8 +547,9 @@ static inline void check_block_count(struct f2fs_sb_info *sbi,
{
struct f2fs_sm_info *sm_info = SM_I(sbi);
unsigned int end_segno = sm_info->segment_count - 1;
+ bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false;
int valid_blocks = 0;
- int i;
+ int cur_pos = 0, next_pos;
/* check segment usage */
BUG_ON(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg);
@@ -551,11 +558,26 @@ static inline void check_block_count(struct f2fs_sb_info *sbi,
BUG_ON(segno > end_segno);
/* check bitmap with valid block count */
- for (i = 0; i < sbi->blocks_per_seg; i++)
- if (f2fs_test_bit(i, raw_sit->valid_map))
- valid_blocks++;
+ do {
+ if (is_valid) {
+ next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
+ sbi->blocks_per_seg,
+ cur_pos);
+ valid_blocks += next_pos - cur_pos;
+ } else
+ next_pos = find_next_bit_le(&raw_sit->valid_map,
+ sbi->blocks_per_seg,
+ cur_pos);
+ cur_pos = next_pos;
+ is_valid = !is_valid;
+ } while (cur_pos < sbi->blocks_per_seg);
BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
}
+#else
+#define check_seg_range(sbi, segno)
+#define verify_block_addr(sbi, blk_addr)
+#define check_block_count(sbi, segno, raw_sit)
+#endif
static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
unsigned int start)
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 13d0a0fe49dd..e42351cbe166 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -43,7 +43,9 @@ enum {
Opt_disable_roll_forward,
Opt_discard,
Opt_noheap,
+ Opt_user_xattr,
Opt_nouser_xattr,
+ Opt_acl,
Opt_noacl,
Opt_active_logs,
Opt_disable_ext_identify,
@@ -56,7 +58,9 @@ static match_table_t f2fs_tokens = {
{Opt_disable_roll_forward, "disable_roll_forward"},
{Opt_discard, "discard"},
{Opt_noheap, "no_heap"},
+ {Opt_user_xattr, "user_xattr"},
{Opt_nouser_xattr, "nouser_xattr"},
+ {Opt_acl, "acl"},
{Opt_noacl, "noacl"},
{Opt_active_logs, "active_logs=%u"},
{Opt_disable_ext_identify, "disable_ext_identify"},
@@ -65,24 +69,40 @@ static match_table_t f2fs_tokens = {
};
/* Sysfs support for f2fs */
+enum {
+ GC_THREAD, /* struct f2fs_gc_thread */
+ SM_INFO, /* struct f2fs_sm_info */
+};
+
struct f2fs_attr {
struct attribute attr;
ssize_t (*show)(struct f2fs_attr *, struct f2fs_sb_info *, char *);
ssize_t (*store)(struct f2fs_attr *, struct f2fs_sb_info *,
const char *, size_t);
+ int struct_type;
int offset;
};
+static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
+{
+ if (struct_type == GC_THREAD)
+ return (unsigned char *)sbi->gc_thread;
+ else if (struct_type == SM_INFO)
+ return (unsigned char *)SM_I(sbi);
+ return NULL;
+}
+
static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
- struct f2fs_gc_kthread *gc_kth = sbi->gc_thread;
+ unsigned char *ptr = NULL;
unsigned int *ui;
- if (!gc_kth)
+ ptr = __struct_ptr(sbi, a->struct_type);
+ if (!ptr)
return -EINVAL;
- ui = (unsigned int *)(((char *)gc_kth) + a->offset);
+ ui = (unsigned int *)(ptr + a->offset);
return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
}
@@ -91,15 +111,16 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
struct f2fs_sb_info *sbi,
const char *buf, size_t count)
{
- struct f2fs_gc_kthread *gc_kth = sbi->gc_thread;
+ unsigned char *ptr;
unsigned long t;
unsigned int *ui;
ssize_t ret;
- if (!gc_kth)
+ ptr = __struct_ptr(sbi, a->struct_type);
+ if (!ptr)
return -EINVAL;
- ui = (unsigned int *)(((char *)gc_kth) + a->offset);
+ ui = (unsigned int *)(ptr + a->offset);
ret = kstrtoul(skip_spaces(buf), 0, &t);
if (ret < 0)
@@ -135,21 +156,25 @@ static void f2fs_sb_release(struct kobject *kobj)
complete(&sbi->s_kobj_unregister);
}
-#define F2FS_ATTR_OFFSET(_name, _mode, _show, _store, _elname) \
+#define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
static struct f2fs_attr f2fs_attr_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.show = _show, \
.store = _store, \
- .offset = offsetof(struct f2fs_gc_kthread, _elname), \
+ .struct_type = _struct_type, \
+ .offset = _offset \
}
-#define F2FS_RW_ATTR(name, elname) \
- F2FS_ATTR_OFFSET(name, 0644, f2fs_sbi_show, f2fs_sbi_store, elname)
+#define F2FS_RW_ATTR(struct_type, struct_name, name, elname) \
+ F2FS_ATTR_OFFSET(struct_type, name, 0644, \
+ f2fs_sbi_show, f2fs_sbi_store, \
+ offsetof(struct struct_name, elname))
-F2FS_RW_ATTR(gc_min_sleep_time, min_sleep_time);
-F2FS_RW_ATTR(gc_max_sleep_time, max_sleep_time);
-F2FS_RW_ATTR(gc_no_gc_sleep_time, no_gc_sleep_time);
-F2FS_RW_ATTR(gc_idle, gc_idle);
+F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
+F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time);
+F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
+F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
@@ -157,6 +182,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(gc_max_sleep_time),
ATTR_LIST(gc_no_gc_sleep_time),
ATTR_LIST(gc_idle),
+ ATTR_LIST(reclaim_segments),
NULL,
};
@@ -237,6 +263,9 @@ static int parse_options(struct super_block *sb, char *options)
set_opt(sbi, NOHEAP);
break;
#ifdef CONFIG_F2FS_FS_XATTR
+ case Opt_user_xattr:
+ set_opt(sbi, XATTR_USER);
+ break;
case Opt_nouser_xattr:
clear_opt(sbi, XATTR_USER);
break;
@@ -244,6 +273,10 @@ static int parse_options(struct super_block *sb, char *options)
set_opt(sbi, INLINE_XATTR);
break;
#else
+ case Opt_user_xattr:
+ f2fs_msg(sb, KERN_INFO,
+ "user_xattr options not supported");
+ break;
case Opt_nouser_xattr:
f2fs_msg(sb, KERN_INFO,
"nouser_xattr options not supported");
@@ -254,10 +287,16 @@ static int parse_options(struct super_block *sb, char *options)
break;
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
+ case Opt_acl:
+ set_opt(sbi, POSIX_ACL);
+ break;
case Opt_noacl:
clear_opt(sbi, POSIX_ACL);
break;
#else
+ case Opt_acl:
+ f2fs_msg(sb, KERN_INFO, "acl options not supported");
+ break;
case Opt_noacl:
f2fs_msg(sb, KERN_INFO, "noacl options not supported");
break;
@@ -355,7 +394,9 @@ static void f2fs_put_super(struct super_block *sb)
f2fs_destroy_stats(sbi);
stop_gc_thread(sbi);
- write_checkpoint(sbi, true);
+ /* We don't need to do checkpoint when it's clean */
+ if (sbi->s_dirty && get_pages(sbi, F2FS_DIRTY_NODES))
+ write_checkpoint(sbi, true);
iput(sbi->node_inode);
iput(sbi->meta_inode);
@@ -727,30 +768,47 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
atomic_set(&sbi->nr_pages[i], 0);
}
-static int validate_superblock(struct super_block *sb,
- struct f2fs_super_block **raw_super,
- struct buffer_head **raw_super_buf, sector_t block)
+/*
+ * Read f2fs raw super block.
+ * Because we have two copies of super block, so read the first one at first,
+ * if the first one is invalid, move to read the second one.
+ */
+static int read_raw_super_block(struct super_block *sb,
+ struct f2fs_super_block **raw_super,
+ struct buffer_head **raw_super_buf)
{
- const char *super = (block == 0 ? "first" : "second");
+ int block = 0;
- /* read f2fs raw super block */
+retry:
*raw_super_buf = sb_bread(sb, block);
if (!*raw_super_buf) {
- f2fs_msg(sb, KERN_ERR, "unable to read %s superblock",
- super);
- return -EIO;
+ f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
+ block + 1);
+ if (block == 0) {
+ block++;
+ goto retry;
+ } else {
+ return -EIO;
+ }
}
*raw_super = (struct f2fs_super_block *)
((char *)(*raw_super_buf)->b_data + F2FS_SUPER_OFFSET);
/* sanity checking of raw super */
- if (!sanity_check_raw_super(sb, *raw_super))
- return 0;
+ if (sanity_check_raw_super(sb, *raw_super)) {
+ brelse(*raw_super_buf);
+ f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem "
+ "in %dth superblock", block + 1);
+ if(block == 0) {
+ block++;
+ goto retry;
+ } else {
+ return -EINVAL;
+ }
+ }
- f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem "
- "in %s superblock", super);
- return -EINVAL;
+ return 0;
}
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
@@ -760,7 +818,6 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
struct buffer_head *raw_super_buf;
struct inode *root;
long err = -EINVAL;
- int i;
/* allocate memory for f2fs-specific super block info */
sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
@@ -773,14 +830,10 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
goto free_sbi;
}
- err = validate_superblock(sb, &raw_super, &raw_super_buf, 0);
- if (err) {
- brelse(raw_super_buf);
- /* check secondary superblock when primary failed */
- err = validate_superblock(sb, &raw_super, &raw_super_buf, 1);
- if (err)
- goto free_sb_buf;
- }
+ err = read_raw_super_block(sb, &raw_super, &raw_super_buf);
+ if (err)
+ goto free_sbi;
+
sb->s_fs_info = sbi;
/* init some FS parameters */
sbi->active_logs = NR_CURSEG_TYPE;
@@ -818,12 +871,11 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
mutex_init(&sbi->gc_mutex);
mutex_init(&sbi->writepages);
mutex_init(&sbi->cp_mutex);
- for (i = 0; i < NR_GLOBAL_LOCKS; i++)
- mutex_init(&sbi->fs_lock[i]);
mutex_init(&sbi->node_write);
- sbi->por_doing = 0;
+ sbi->por_doing = false;
spin_lock_init(&sbi->stat_lock);
init_rwsem(&sbi->bio_sem);
+ init_rwsem(&sbi->cp_rwsem);
init_sb_info(sbi);
/* get an inode for meta space */
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 1ac8a5f6e380..aa7a3f139fe5 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -154,6 +154,9 @@ static int f2fs_xattr_advise_set(struct dentry *dentry, const char *name,
}
#ifdef CONFIG_F2FS_FS_SECURITY
+static int __f2fs_setxattr(struct inode *inode, int name_index,
+ const char *name, const void *value, size_t value_len,
+ struct page *ipage);
static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
void *page)
{
@@ -161,7 +164,7 @@ static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
int err = 0;
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
- err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_SECURITY,
+ err = __f2fs_setxattr(inode, F2FS_XATTR_INDEX_SECURITY,
xattr->name, xattr->value,
xattr->value_len, (struct page *)page);
if (err < 0)
@@ -369,7 +372,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
alloc_nid_failed(sbi, new_nid);
return PTR_ERR(xpage);
}
- BUG_ON(new_nid);
+ f2fs_bug_on(new_nid);
} else {
struct dnode_of_data dn;
set_new_dnode(&dn, inode, NULL, NULL, new_nid);
@@ -469,16 +472,15 @@ cleanup:
return error;
}
-int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
- const void *value, size_t value_len, struct page *ipage)
+static int __f2fs_setxattr(struct inode *inode, int name_index,
+ const char *name, const void *value, size_t value_len,
+ struct page *ipage)
{
- struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_xattr_entry *here, *last;
void *base_addr;
int found, newsize;
size_t name_len;
- int ilock;
__u32 new_hsize;
int error = -ENOMEM;
@@ -493,10 +495,6 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
if (name_len > F2FS_NAME_LEN || value_len > MAX_VALUE_LEN(inode))
return -ERANGE;
- f2fs_balance_fs(sbi);
-
- ilock = mutex_lock_op(sbi);
-
base_addr = read_all_xattrs(inode, ipage);
if (!base_addr)
goto exit;
@@ -522,7 +520,7 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
*/
free = MIN_OFFSET(inode) - ((char *)last - (char *)base_addr);
if (found)
- free = free - ENTRY_SIZE(here);
+ free = free + ENTRY_SIZE(here);
if (free < newsize) {
error = -ENOSPC;
@@ -578,7 +576,21 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
else
update_inode_page(inode);
exit:
- mutex_unlock_op(sbi, ilock);
kzfree(base_addr);
return error;
}
+
+int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
+ const void *value, size_t value_len, struct page *ipage)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ int err;
+
+ f2fs_balance_fs(sbi);
+
+ f2fs_lock_op(sbi);
+ err = __f2fs_setxattr(inode, name_index, name, value, value_len, ipage);
+ f2fs_unlock_op(sbi);
+
+ return err;
+}
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 9b104f543056..33711ff2b4a3 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -172,8 +172,8 @@ const struct file_operations fat_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
- .aio_read = generic_file_aio_read,
- .aio_write = generic_file_aio_write,
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.release = fat_file_release,
.unlocked_ioctl = fat_generic_ioctl,
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 0062da21dd8b..3134d1ede292 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -185,8 +185,7 @@ static int fat_write_end(struct file *file, struct address_space *mapping,
}
static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
- const struct iovec *iov,
- loff_t offset, unsigned long nr_segs)
+ struct iov_iter *iter, loff_t offset)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
@@ -203,7 +202,7 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
*
* Return 0, and fallback to normal buffered write.
*/
- loff_t size = offset + iov_length(iov, nr_segs);
+ loff_t size = offset + iov_iter_count(iter);
if (MSDOS_I(inode)->mmu_private < size)
return 0;
}
@@ -212,10 +211,9 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
* FAT need to use the DIO_LOCKING for avoiding the race
* condition of fat_get_block() and ->truncate().
*/
- ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
- fat_get_block);
+ ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, fat_get_block);
if (ret < 0 && (rw & WRITE))
- fat_write_failed(mapping, offset + iov_length(iov, nr_segs));
+ fat_write_failed(mapping, offset + iov_iter_count(iter));
return ret;
}
diff --git a/fs/file_table.c b/fs/file_table.c
index abdd15ad13c9..e900ca518635 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -297,7 +297,7 @@ void flush_delayed_fput(void)
delayed_fput(NULL);
}
-static DECLARE_WORK(delayed_fput_work, delayed_fput);
+static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
void fput(struct file *file)
{
@@ -317,7 +317,7 @@ void fput(struct file *file)
}
if (llist_add(&file->f_u.fu_llist, &delayed_fput_list))
- schedule_work(&delayed_fput_work);
+ schedule_delayed_work(&delayed_fput_work, 1);
}
}
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index b2a86e324aac..29d7feb62cf7 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -58,15 +58,16 @@ void fscache_cookie_init_once(void *_cookie)
struct fscache_cookie *__fscache_acquire_cookie(
struct fscache_cookie *parent,
const struct fscache_cookie_def *def,
- void *netfs_data)
+ void *netfs_data,
+ bool enable)
{
struct fscache_cookie *cookie;
BUG_ON(!def);
- _enter("{%s},{%s},%p",
+ _enter("{%s},{%s},%p,%u",
parent ? (char *) parent->def->name : "<no-parent>",
- def->name, netfs_data);
+ def->name, netfs_data, enable);
fscache_stat(&fscache_n_acquires);
@@ -106,7 +107,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
cookie->def = def;
cookie->parent = parent;
cookie->netfs_data = netfs_data;
- cookie->flags = 0;
+ cookie->flags = (1 << FSCACHE_COOKIE_NO_DATA_YET);
/* radix tree insertion won't use the preallocation pool unless it's
* told it may not wait */
@@ -124,16 +125,22 @@ struct fscache_cookie *__fscache_acquire_cookie(
break;
}
- /* if the object is an index then we need do nothing more here - we
- * create indices on disk when we need them as an index may exist in
- * multiple caches */
- if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
- if (fscache_acquire_non_index_cookie(cookie) < 0) {
- atomic_dec(&parent->n_children);
- __fscache_cookie_put(cookie);
- fscache_stat(&fscache_n_acquires_nobufs);
- _leave(" = NULL");
- return NULL;
+ if (enable) {
+ /* if the object is an index then we need do nothing more here
+ * - we create indices on disk when we need them as an index
+ * may exist in multiple caches */
+ if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
+ if (fscache_acquire_non_index_cookie(cookie) == 0) {
+ set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
+ } else {
+ atomic_dec(&parent->n_children);
+ __fscache_cookie_put(cookie);
+ fscache_stat(&fscache_n_acquires_nobufs);
+ _leave(" = NULL");
+ return NULL;
+ }
+ } else {
+ set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
}
}
@@ -144,6 +151,39 @@ struct fscache_cookie *__fscache_acquire_cookie(
EXPORT_SYMBOL(__fscache_acquire_cookie);
/*
+ * Enable a cookie to permit it to accept new operations.
+ */
+void __fscache_enable_cookie(struct fscache_cookie *cookie,
+ bool (*can_enable)(void *data),
+ void *data)
+{
+ _enter("%p", cookie);
+
+ wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
+ fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+
+ if (test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
+ goto out_unlock;
+
+ if (can_enable && !can_enable(data)) {
+ /* The netfs decided it didn't want to enable after all */
+ } else if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
+ /* Wait for outstanding disablement to complete */
+ __fscache_wait_on_invalidate(cookie);
+
+ if (fscache_acquire_non_index_cookie(cookie) == 0)
+ set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
+ } else {
+ set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
+ }
+
+out_unlock:
+ clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags);
+ wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK);
+}
+EXPORT_SYMBOL(__fscache_enable_cookie);
+
+/*
* acquire a non-index cookie
* - this must make sure the index chain is instantiated and instantiate the
* object representation too
@@ -157,7 +197,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
_enter("");
- cookie->flags = 1 << FSCACHE_COOKIE_UNAVAILABLE;
+ set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
/* now we need to see whether the backing objects for this cookie yet
* exist, if not there'll be nothing to search */
@@ -180,9 +220,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
_debug("cache %s", cache->tag->name);
- cookie->flags =
- (1 << FSCACHE_COOKIE_LOOKING_UP) |
- (1 << FSCACHE_COOKIE_NO_DATA_YET);
+ set_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
/* ask the cache to allocate objects for this cookie and its parent
* chain */
@@ -398,7 +436,8 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
if (!hlist_empty(&cookie->backing_objects)) {
spin_lock(&cookie->lock);
- if (!hlist_empty(&cookie->backing_objects) &&
+ if (fscache_cookie_enabled(cookie) &&
+ !hlist_empty(&cookie->backing_objects) &&
!test_and_set_bit(FSCACHE_COOKIE_INVALIDATING,
&cookie->flags)) {
object = hlist_entry(cookie->backing_objects.first,
@@ -452,10 +491,14 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
spin_lock(&cookie->lock);
- /* update the index entry on disk in each cache backing this cookie */
- hlist_for_each_entry(object,
- &cookie->backing_objects, cookie_link) {
- fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
+ if (fscache_cookie_enabled(cookie)) {
+ /* update the index entry on disk in each cache backing this
+ * cookie.
+ */
+ hlist_for_each_entry(object,
+ &cookie->backing_objects, cookie_link) {
+ fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
+ }
}
spin_unlock(&cookie->lock);
@@ -464,28 +507,14 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
EXPORT_SYMBOL(__fscache_update_cookie);
/*
- * release a cookie back to the cache
- * - the object will be marked as recyclable on disk if retire is true
- * - all dependents of this cookie must have already been unregistered
- * (indices/files/pages)
+ * Disable a cookie to stop it from accepting new requests from the netfs.
*/
-void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
+void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
{
struct fscache_object *object;
+ bool awaken = false;
- fscache_stat(&fscache_n_relinquishes);
- if (retire)
- fscache_stat(&fscache_n_relinquishes_retire);
-
- if (!cookie) {
- fscache_stat(&fscache_n_relinquishes_null);
- _leave(" [no cookie]");
- return;
- }
-
- _enter("%p{%s,%p,%d},%d",
- cookie, cookie->def->name, cookie->netfs_data,
- atomic_read(&cookie->n_active), retire);
+ _enter("%p,%u", cookie, invalidate);
ASSERTCMP(atomic_read(&cookie->n_active), >, 0);
@@ -495,24 +524,82 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
BUG();
}
- /* No further netfs-accessing operations on this cookie permitted */
- set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags);
- if (retire)
- set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags);
+ wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
+ fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+ if (!test_and_clear_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
+ goto out_unlock_enable;
+
+ /* If the cookie is being invalidated, wait for that to complete first
+ * so that we can reuse the flag.
+ */
+ __fscache_wait_on_invalidate(cookie);
+
+ /* Dispose of the backing objects */
+ set_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags);
spin_lock(&cookie->lock);
- hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
- fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
+ if (!hlist_empty(&cookie->backing_objects)) {
+ hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
+ if (invalidate)
+ set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
+ fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
+ }
+ } else {
+ if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
+ awaken = true;
}
spin_unlock(&cookie->lock);
+ if (awaken)
+ wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
/* Wait for cessation of activity requiring access to the netfs (when
- * n_active reaches 0).
+ * n_active reaches 0). This makes sure outstanding reads and writes
+ * have completed.
*/
if (!atomic_dec_and_test(&cookie->n_active))
wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
TASK_UNINTERRUPTIBLE);
+ /* Reset the cookie state if it wasn't relinquished */
+ if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
+ atomic_inc(&cookie->n_active);
+ set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
+ }
+
+out_unlock_enable:
+ clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags);
+ wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK);
+ _leave("");
+}
+EXPORT_SYMBOL(__fscache_disable_cookie);
+
+/*
+ * release a cookie back to the cache
+ * - the object will be marked as recyclable on disk if retire is true
+ * - all dependents of this cookie must have already been unregistered
+ * (indices/files/pages)
+ */
+void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
+{
+ fscache_stat(&fscache_n_relinquishes);
+ if (retire)
+ fscache_stat(&fscache_n_relinquishes_retire);
+
+ if (!cookie) {
+ fscache_stat(&fscache_n_relinquishes_null);
+ _leave(" [no cookie]");
+ return;
+ }
+
+ _enter("%p{%s,%p,%d},%d",
+ cookie, cookie->def->name, cookie->netfs_data,
+ atomic_read(&cookie->n_active), retire);
+
+ /* No further netfs-accessing operations on this cookie permitted */
+ set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags);
+
+ __fscache_disable_cookie(cookie, retire);
+
/* Clear pointers back to the netfs */
cookie->netfs_data = NULL;
cookie->def = NULL;
@@ -568,6 +655,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
{
struct fscache_operation *op;
struct fscache_object *object;
+ bool wake_cookie = false;
int ret;
_enter("%p,", cookie);
@@ -591,7 +679,8 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
spin_lock(&cookie->lock);
- if (hlist_empty(&cookie->backing_objects))
+ if (!fscache_cookie_enabled(cookie) ||
+ hlist_empty(&cookie->backing_objects))
goto inconsistent;
object = hlist_entry(cookie->backing_objects.first,
struct fscache_object, cookie_link);
@@ -600,7 +689,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
op->debug_id = atomic_inc_return(&fscache_op_debug_id);
- atomic_inc(&cookie->n_active);
+ __fscache_use_cookie(cookie);
if (fscache_submit_op(object, op) < 0)
goto submit_failed;
@@ -622,9 +711,11 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
return ret;
submit_failed:
- atomic_dec(&cookie->n_active);
+ wake_cookie = __fscache_unuse_cookie(cookie);
inconsistent:
spin_unlock(&cookie->lock);
+ if (wake_cookie)
+ __fscache_wake_unused_cookie(cookie);
kfree(op);
_leave(" = -ESTALE");
return -ESTALE;
diff --git a/fs/fscache/fsdef.c b/fs/fscache/fsdef.c
index 10a2ade0bdf8..5a117df2a9ef 100644
--- a/fs/fscache/fsdef.c
+++ b/fs/fscache/fsdef.c
@@ -59,6 +59,7 @@ struct fscache_cookie fscache_fsdef_index = {
.lock = __SPIN_LOCK_UNLOCKED(fscache_fsdef_index.lock),
.backing_objects = HLIST_HEAD_INIT,
.def = &fscache_fsdef_index_def,
+ .flags = 1 << FSCACHE_COOKIE_ENABLED,
};
EXPORT_SYMBOL(fscache_fsdef_index);
diff --git a/fs/fscache/netfs.c b/fs/fscache/netfs.c
index b1bb6117473a..989f39401547 100644
--- a/fs/fscache/netfs.c
+++ b/fs/fscache/netfs.c
@@ -45,6 +45,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
netfs->primary_index->def = &fscache_fsdef_netfs_def;
netfs->primary_index->parent = &fscache_fsdef_index;
netfs->primary_index->netfs_data = netfs;
+ netfs->primary_index->flags = 1 << FSCACHE_COOKIE_ENABLED;
atomic_inc(&netfs->primary_index->parent->usage);
atomic_inc(&netfs->primary_index->parent->n_children);
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 86d75a60b20c..53d35c504240 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -495,6 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
* returning ENODATA.
*/
set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
+ clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
_debug("wake up lookup %p", &cookie->flags);
clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
@@ -527,6 +528,7 @@ void fscache_obtained_object(struct fscache_object *object)
/* We do (presumably) have data */
clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
+ clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
/* Allow write requests to begin stacking up and read requests
* to begin shovelling data.
@@ -679,7 +681,8 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
*/
spin_lock(&cookie->lock);
hlist_del_init(&object->cookie_link);
- if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
+ if (hlist_empty(&cookie->backing_objects) &&
+ test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
awaken = true;
spin_unlock(&cookie->lock);
@@ -796,7 +799,7 @@ void fscache_enqueue_object(struct fscache_object *object)
*/
bool fscache_object_sleep_till_congested(signed long *timeoutp)
{
- wait_queue_head_t *cong_wq = &__get_cpu_var(fscache_object_cong_wait);
+ wait_queue_head_t *cong_wq = this_cpu_ptr(&fscache_object_cong_wait);
DEFINE_WAIT(wait);
if (fscache_object_congested())
@@ -927,7 +930,7 @@ static const struct fscache_state *_fscache_invalidate_object(struct fscache_obj
*/
if (!fscache_use_cookie(object)) {
ASSERT(object->cookie->stores.rnode == NULL);
- set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags);
+ set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
_leave(" [no cookie]");
return transit_to(KILL_OBJECT);
}
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 73899c1c3449..7f5c658af755 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -163,12 +163,10 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
fscache_stat(&fscache_n_attr_changed_calls);
- if (fscache_object_is_active(object) &&
- fscache_use_cookie(object)) {
+ if (fscache_object_is_active(object)) {
fscache_stat(&fscache_n_cop_attr_changed);
ret = object->cache->ops->attr_changed(object);
fscache_stat_d(&fscache_n_cop_attr_changed);
- fscache_unuse_cookie(object);
if (ret < 0)
fscache_abort_object(object);
}
@@ -184,6 +182,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
{
struct fscache_operation *op;
struct fscache_object *object;
+ bool wake_cookie;
_enter("%p", cookie);
@@ -199,15 +198,19 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
}
fscache_operation_init(op, fscache_attr_changed_op, NULL);
- op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
+ op->flags = FSCACHE_OP_ASYNC |
+ (1 << FSCACHE_OP_EXCLUSIVE) |
+ (1 << FSCACHE_OP_UNUSE_COOKIE);
spin_lock(&cookie->lock);
- if (hlist_empty(&cookie->backing_objects))
+ if (!fscache_cookie_enabled(cookie) ||
+ hlist_empty(&cookie->backing_objects))
goto nobufs;
object = hlist_entry(cookie->backing_objects.first,
struct fscache_object, cookie_link);
+ __fscache_use_cookie(cookie);
if (fscache_submit_exclusive_op(object, op) < 0)
goto nobufs;
spin_unlock(&cookie->lock);
@@ -217,8 +220,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
return 0;
nobufs:
+ wake_cookie = __fscache_unuse_cookie(cookie);
spin_unlock(&cookie->lock);
kfree(op);
+ if (wake_cookie)
+ __fscache_wake_unused_cookie(cookie);
fscache_stat(&fscache_n_attr_changed_nobufs);
_leave(" = %d", -ENOBUFS);
return -ENOBUFS;
@@ -263,7 +269,6 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
}
fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
- atomic_inc(&cookie->n_active);
op->op.flags = FSCACHE_OP_MYTHREAD |
(1UL << FSCACHE_OP_WAITING) |
(1UL << FSCACHE_OP_UNUSE_COOKIE);
@@ -384,6 +389,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
{
struct fscache_retrieval *op;
struct fscache_object *object;
+ bool wake_cookie = false;
int ret;
_enter("%p,%p,,,", cookie, page);
@@ -405,7 +411,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
return -ERESTARTSYS;
op = fscache_alloc_retrieval(cookie, page->mapping,
- end_io_func,context);
+ end_io_func, context);
if (!op) {
_leave(" = -ENOMEM");
return -ENOMEM;
@@ -414,13 +420,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
spin_lock(&cookie->lock);
- if (hlist_empty(&cookie->backing_objects))
+ if (!fscache_cookie_enabled(cookie) ||
+ hlist_empty(&cookie->backing_objects))
goto nobufs_unlock;
object = hlist_entry(cookie->backing_objects.first,
struct fscache_object, cookie_link);
ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags));
+ __fscache_use_cookie(cookie);
atomic_inc(&object->n_reads);
__set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
@@ -475,9 +483,11 @@ error:
nobufs_unlock_dec:
atomic_dec(&object->n_reads);
+ wake_cookie = __fscache_unuse_cookie(cookie);
nobufs_unlock:
spin_unlock(&cookie->lock);
- atomic_dec(&cookie->n_active);
+ if (wake_cookie)
+ __fscache_wake_unused_cookie(cookie);
kfree(op);
nobufs:
fscache_stat(&fscache_n_retrievals_nobufs);
@@ -514,6 +524,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
{
struct fscache_retrieval *op;
struct fscache_object *object;
+ bool wake_cookie = false;
int ret;
_enter("%p,,%d,,,", cookie, *nr_pages);
@@ -542,11 +553,13 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
spin_lock(&cookie->lock);
- if (hlist_empty(&cookie->backing_objects))
+ if (!fscache_cookie_enabled(cookie) ||
+ hlist_empty(&cookie->backing_objects))
goto nobufs_unlock;
object = hlist_entry(cookie->backing_objects.first,
struct fscache_object, cookie_link);
+ __fscache_use_cookie(cookie);
atomic_inc(&object->n_reads);
__set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
@@ -601,10 +614,12 @@ error:
nobufs_unlock_dec:
atomic_dec(&object->n_reads);
+ wake_cookie = __fscache_unuse_cookie(cookie);
nobufs_unlock:
spin_unlock(&cookie->lock);
- atomic_dec(&cookie->n_active);
kfree(op);
+ if (wake_cookie)
+ __fscache_wake_unused_cookie(cookie);
nobufs:
fscache_stat(&fscache_n_retrievals_nobufs);
_leave(" = -ENOBUFS");
@@ -626,6 +641,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
{
struct fscache_retrieval *op;
struct fscache_object *object;
+ bool wake_cookie = false;
int ret;
_enter("%p,%p,,,", cookie, page);
@@ -653,13 +669,15 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
spin_lock(&cookie->lock);
- if (hlist_empty(&cookie->backing_objects))
+ if (!fscache_cookie_enabled(cookie) ||
+ hlist_empty(&cookie->backing_objects))
goto nobufs_unlock;
object = hlist_entry(cookie->backing_objects.first,
struct fscache_object, cookie_link);
+ __fscache_use_cookie(cookie);
if (fscache_submit_op(object, &op->op) < 0)
- goto nobufs_unlock;
+ goto nobufs_unlock_dec;
spin_unlock(&cookie->lock);
fscache_stat(&fscache_n_alloc_ops);
@@ -689,10 +707,13 @@ error:
_leave(" = %d", ret);
return ret;
+nobufs_unlock_dec:
+ wake_cookie = __fscache_unuse_cookie(cookie);
nobufs_unlock:
spin_unlock(&cookie->lock);
- atomic_dec(&cookie->n_active);
kfree(op);
+ if (wake_cookie)
+ __fscache_wake_unused_cookie(cookie);
nobufs:
fscache_stat(&fscache_n_allocs_nobufs);
_leave(" = -ENOBUFS");
@@ -889,6 +910,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
{
struct fscache_storage *op;
struct fscache_object *object;
+ bool wake_cookie = false;
int ret;
_enter("%p,%x,", cookie, (u32) page->flags);
@@ -920,7 +942,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
ret = -ENOBUFS;
spin_lock(&cookie->lock);
- if (hlist_empty(&cookie->backing_objects))
+ if (!fscache_cookie_enabled(cookie) ||
+ hlist_empty(&cookie->backing_objects))
goto nobufs;
object = hlist_entry(cookie->backing_objects.first,
struct fscache_object, cookie_link);
@@ -957,7 +980,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
op->store_limit = object->store_limit;
- atomic_inc(&cookie->n_active);
+ __fscache_use_cookie(cookie);
if (fscache_submit_op(object, &op->op) < 0)
goto submit_failed;
@@ -984,10 +1007,10 @@ already_pending:
return 0;
submit_failed:
- atomic_dec(&cookie->n_active);
spin_lock(&cookie->stores_lock);
radix_tree_delete(&cookie->stores, page->index);
spin_unlock(&cookie->stores_lock);
+ wake_cookie = __fscache_unuse_cookie(cookie);
page_cache_release(page);
ret = -ENOBUFS;
goto nobufs;
@@ -999,6 +1022,8 @@ nobufs:
spin_unlock(&cookie->lock);
radix_tree_preload_end();
kfree(op);
+ if (wake_cookie)
+ __fscache_wake_unused_cookie(cookie);
fscache_stat(&fscache_n_stores_nobufs);
_leave(" = -ENOBUFS");
return -ENOBUFS;
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index adbfd66b380f..242fe3eb1ae8 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -94,8 +94,11 @@ static ssize_t cuse_read(struct file *file, char __user *buf, size_t count,
loff_t pos = 0;
struct iovec iov = { .iov_base = buf, .iov_len = count };
struct fuse_io_priv io = { .async = 0, .file = file };
+ struct iov_iter ii;
- return fuse_direct_io(&io, &iov, 1, count, &pos, 0);
+ iov_iter_init(&ii, &iov, 1, count, 0);
+
+ return fuse_direct_io(&io, &ii, count, &pos, 0);
}
static ssize_t cuse_write(struct file *file, const char __user *buf,
@@ -104,12 +107,15 @@ static ssize_t cuse_write(struct file *file, const char __user *buf,
loff_t pos = 0;
struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count };
struct fuse_io_priv io = { .async = 0, .file = file };
+ struct iov_iter ii;
+
+ iov_iter_init(&ii, &iov, 1, count, 0);
/*
* No locking or generic_write_checks(), the server is
* responsible for locking and sanity checks.
*/
- return fuse_direct_io(&io, &iov, 1, count, &pos, 1);
+ return fuse_direct_io(&io, &ii, count, &pos, 1);
}
static int cuse_open(struct inode *inode, struct file *file)
@@ -589,11 +595,14 @@ static struct attribute *cuse_class_dev_attrs[] = {
ATTRIBUTE_GROUPS(cuse_class_dev);
static struct miscdevice cuse_miscdev = {
- .minor = MISC_DYNAMIC_MINOR,
+ .minor = CUSE_MINOR,
.name = "cuse",
.fops = &cuse_channel_fops,
};
+MODULE_ALIAS_MISCDEV(CUSE_MINOR);
+MODULE_ALIAS("devname:cuse");
+
static int __init cuse_init(void)
{
int i, rc;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index b7989f2ab4c4..0747f6eed598 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -342,24 +342,6 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
return err;
}
-static struct dentry *fuse_materialise_dentry(struct dentry *dentry,
- struct inode *inode)
-{
- struct dentry *newent;
-
- if (inode && S_ISDIR(inode->i_mode)) {
- struct fuse_conn *fc = get_fuse_conn(inode);
-
- mutex_lock(&fc->inst_mutex);
- newent = d_materialise_unique(dentry, inode);
- mutex_unlock(&fc->inst_mutex);
- } else {
- newent = d_materialise_unique(dentry, inode);
- }
-
- return newent;
-}
-
static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
unsigned int flags)
{
@@ -382,7 +364,7 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
if (inode && get_node_id(inode) == FUSE_ROOT_ID)
goto out_iput;
- newent = fuse_materialise_dentry(entry, inode);
+ newent = d_materialise_unique(entry, inode);
err = PTR_ERR(newent);
if (IS_ERR(newent))
goto out_err;
@@ -601,21 +583,11 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
}
kfree(forget);
- if (S_ISDIR(inode->i_mode)) {
- struct dentry *alias;
- mutex_lock(&fc->inst_mutex);
- alias = d_find_alias(inode);
- if (alias) {
- /* New directory must have moved since mkdir */
- mutex_unlock(&fc->inst_mutex);
- dput(alias);
- iput(inode);
- return -EBUSY;
- }
- d_instantiate(entry, inode);
- mutex_unlock(&fc->inst_mutex);
- } else
- d_instantiate(entry, inode);
+ err = d_instantiate_no_diralias(entry, inode);
+ if (err) {
+ iput(inode);
+ return err;
+ }
fuse_change_entry_timeout(entry, &outarg);
fuse_invalidate_attr(dir);
@@ -1284,7 +1256,7 @@ static int fuse_direntplus_link(struct file *file,
if (!inode)
goto out;
- alias = fuse_materialise_dentry(dentry, inode);
+ alias = d_materialise_unique(dentry, inode);
err = PTR_ERR(alias);
if (IS_ERR(alias))
goto out;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 4598345ab87d..26c33f36179a 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -334,7 +334,8 @@ static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
BUG_ON(req->inode != inode);
curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
- if (curr_index == index) {
+ if (curr_index <= index &&
+ index < curr_index + req->num_pages) {
found = true;
break;
}
@@ -1178,9 +1179,10 @@ static inline void fuse_page_descs_length_init(struct fuse_req *req,
req->page_descs[i].offset;
}
-static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
+static inline unsigned long fuse_get_user_addr(struct iov_iter *ii)
{
- return (unsigned long)ii->iov->iov_base + ii->iov_offset;
+ struct iovec *iov = iov_iter_iovec(ii);
+ return (unsigned long)iov->iov_base + ii->iov_offset;
}
static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
@@ -1269,9 +1271,8 @@ static inline int fuse_iter_npages(const struct iov_iter *ii_p)
return min(npages, FUSE_MAX_PAGES_PER_REQ);
}
-ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
- unsigned long nr_segs, size_t count, loff_t *ppos,
- int write)
+ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *ii,
+ size_t count, loff_t *ppos, int write)
{
struct file *file = io->file;
struct fuse_file *ff = file->private_data;
@@ -1280,14 +1281,11 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
loff_t pos = *ppos;
ssize_t res = 0;
struct fuse_req *req;
- struct iov_iter ii;
-
- iov_iter_init(&ii, iov, nr_segs, count, 0);
if (io->async)
- req = fuse_get_req_for_background(fc, fuse_iter_npages(&ii));
+ req = fuse_get_req_for_background(fc, fuse_iter_npages(ii));
else
- req = fuse_get_req(fc, fuse_iter_npages(&ii));
+ req = fuse_get_req(fc, fuse_iter_npages(ii));
if (IS_ERR(req))
return PTR_ERR(req);
@@ -1295,7 +1293,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
size_t nres;
fl_owner_t owner = current->files;
size_t nbytes = min(count, nmax);
- int err = fuse_get_user_pages(req, &ii, &nbytes, write);
+ int err = fuse_get_user_pages(req, ii, &nbytes, write);
if (err) {
res = err;
break;
@@ -1325,9 +1323,9 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
fuse_put_request(fc, req);
if (io->async)
req = fuse_get_req_for_background(fc,
- fuse_iter_npages(&ii));
+ fuse_iter_npages(ii));
else
- req = fuse_get_req(fc, fuse_iter_npages(&ii));
+ req = fuse_get_req(fc, fuse_iter_npages(ii));
if (IS_ERR(req))
break;
}
@@ -1341,10 +1339,8 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
}
EXPORT_SYMBOL_GPL(fuse_direct_io);
-static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
- const struct iovec *iov,
- unsigned long nr_segs, loff_t *ppos,
- size_t count)
+static ssize_t __fuse_direct_read(struct fuse_io_priv *io, struct iov_iter *ii,
+ loff_t *ppos, size_t count)
{
ssize_t res;
struct file *file = io->file;
@@ -1353,7 +1349,7 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
if (is_bad_inode(inode))
return -EIO;
- res = fuse_direct_io(io, iov, nr_segs, count, ppos, 0);
+ res = fuse_direct_io(io, ii, count, ppos, 0);
fuse_invalidate_attr(inode);
@@ -1365,21 +1361,24 @@ static ssize_t fuse_direct_read(struct file *file, char __user *buf,
{
struct fuse_io_priv io = { .async = 0, .file = file };
struct iovec iov = { .iov_base = buf, .iov_len = count };
- return __fuse_direct_read(&io, &iov, 1, ppos, count);
+ struct iov_iter ii;
+
+ iov_iter_init(&ii, &iov, 1, count, 0);
+
+ return __fuse_direct_read(&io, &ii, ppos, count);
}
-static ssize_t __fuse_direct_write(struct fuse_io_priv *io,
- const struct iovec *iov,
- unsigned long nr_segs, loff_t *ppos)
+static ssize_t __fuse_direct_write(struct fuse_io_priv *io, struct iov_iter *ii,
+ loff_t *ppos)
{
struct file *file = io->file;
struct inode *inode = file_inode(file);
- size_t count = iov_length(iov, nr_segs);
+ size_t count = iov_iter_count(ii);
ssize_t res;
res = generic_write_checks(file, ppos, &count, 0);
if (!res)
- res = fuse_direct_io(io, iov, nr_segs, count, ppos, 1);
+ res = fuse_direct_io(io, ii, count, ppos, 1);
fuse_invalidate_attr(inode);
@@ -1390,6 +1389,7 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count };
+ struct iov_iter ii;
struct inode *inode = file_inode(file);
ssize_t res;
struct fuse_io_priv io = { .async = 0, .file = file };
@@ -1397,9 +1397,11 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
if (is_bad_inode(inode))
return -EIO;
+ iov_iter_init(&ii, &iov, 1, count, 0);
+
/* Don't allow parallel writes to the same file */
mutex_lock(&inode->i_mutex);
- res = __fuse_direct_write(&io, &iov, 1, ppos);
+ res = __fuse_direct_write(&io, &ii, ppos);
if (res > 0)
fuse_write_update_size(inode, *ppos);
mutex_unlock(&inode->i_mutex);
@@ -1409,8 +1411,13 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
{
- __free_page(req->pages[0]);
- fuse_file_put(req->ff, false);
+ int i;
+
+ for (i = 0; i < req->num_pages; i++)
+ __free_page(req->pages[i]);
+
+ if (req->ff)
+ fuse_file_put(req->ff, false);
}
static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
@@ -1418,30 +1425,34 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
struct inode *inode = req->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
+ int i;
list_del(&req->writepages_entry);
- dec_bdi_stat(bdi, BDI_WRITEBACK);
- dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP);
- bdi_writeout_inc(bdi);
+ for (i = 0; i < req->num_pages; i++) {
+ dec_bdi_stat(bdi, BDI_WRITEBACK);
+ dec_zone_page_state(req->pages[i], NR_WRITEBACK_TEMP);
+ bdi_writeout_inc(bdi);
+ }
wake_up(&fi->page_waitq);
}
/* Called under fc->lock, may release and reacquire it */
-static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
+static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req,
+ loff_t size)
__releases(fc->lock)
__acquires(fc->lock)
{
struct fuse_inode *fi = get_fuse_inode(req->inode);
- loff_t size = i_size_read(req->inode);
struct fuse_write_in *inarg = &req->misc.write.in;
+ __u64 data_size = req->num_pages * PAGE_CACHE_SIZE;
if (!fc->connected)
goto out_free;
- if (inarg->offset + PAGE_CACHE_SIZE <= size) {
- inarg->size = PAGE_CACHE_SIZE;
+ if (inarg->offset + data_size <= size) {
+ inarg->size = data_size;
} else if (inarg->offset < size) {
- inarg->size = size & (PAGE_CACHE_SIZE - 1);
+ inarg->size = size - inarg->offset;
} else {
/* Got truncated off completely */
goto out_free;
@@ -1472,12 +1483,13 @@ __acquires(fc->lock)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
+ size_t crop = i_size_read(inode);
struct fuse_req *req;
while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
req = list_entry(fi->queued_writes.next, struct fuse_req, list);
list_del_init(&req->list);
- fuse_send_writepage(fc, req);
+ fuse_send_writepage(fc, req, crop);
}
}
@@ -1488,12 +1500,62 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
mapping_set_error(inode->i_mapping, req->out.h.error);
spin_lock(&fc->lock);
+ while (req->misc.write.next) {
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_write_in *inarg = &req->misc.write.in;
+ struct fuse_req *next = req->misc.write.next;
+ req->misc.write.next = next->misc.write.next;
+ next->misc.write.next = NULL;
+ next->ff = fuse_file_get(req->ff);
+ list_add(&next->writepages_entry, &fi->writepages);
+
+ /*
+ * Skip fuse_flush_writepages() to make it easy to crop requests
+ * based on primary request size.
+ *
+ * 1st case (trivial): there are no concurrent activities using
+ * fuse_set/release_nowrite. Then we're on safe side because
+ * fuse_flush_writepages() would call fuse_send_writepage()
+ * anyway.
+ *
+ * 2nd case: someone called fuse_set_nowrite and it is waiting
+ * now for completion of all in-flight requests. This happens
+ * rarely and no more than once per page, so this should be
+ * okay.
+ *
+ * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
+ * of fuse_set_nowrite..fuse_release_nowrite section. The fact
+ * that fuse_set_nowrite returned implies that all in-flight
+ * requests were completed along with all of their secondary
+ * requests. Further primary requests are blocked by negative
+ * writectr. Hence there cannot be any in-flight requests and
+ * no invocations of fuse_writepage_end() while we're in
+ * fuse_set_nowrite..fuse_release_nowrite section.
+ */
+ fuse_send_writepage(fc, next, inarg->offset + inarg->size);
+ }
fi->writectr--;
fuse_writepage_finish(fc, req);
spin_unlock(&fc->lock);
fuse_writepage_free(fc, req);
}
+static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc,
+ struct fuse_inode *fi)
+{
+ struct fuse_file *ff = NULL;
+
+ spin_lock(&fc->lock);
+ if (!WARN_ON(list_empty(&fi->write_files))) {
+ ff = list_entry(fi->write_files.next, struct fuse_file,
+ write_entry);
+ fuse_file_get(ff);
+ }
+ spin_unlock(&fc->lock);
+
+ return ff;
+}
+
static int fuse_writepage_locked(struct page *page)
{
struct address_space *mapping = page->mapping;
@@ -1501,8 +1563,8 @@ static int fuse_writepage_locked(struct page *page)
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_req *req;
- struct fuse_file *ff;
struct page *tmp_page;
+ int error = -ENOMEM;
set_page_writeback(page);
@@ -1515,16 +1577,16 @@ static int fuse_writepage_locked(struct page *page)
if (!tmp_page)
goto err_free;
- spin_lock(&fc->lock);
- BUG_ON(list_empty(&fi->write_files));
- ff = list_entry(fi->write_files.next, struct fuse_file, write_entry);
- req->ff = fuse_file_get(ff);
- spin_unlock(&fc->lock);
+ error = -EIO;
+ req->ff = fuse_write_file_get(fc, fi);
+ if (!req->ff)
+ goto err_free;
- fuse_write_fill(req, ff, page_offset(page), 0);
+ fuse_write_fill(req, req->ff, page_offset(page), 0);
copy_highpage(tmp_page, page);
req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
+ req->misc.write.next = NULL;
req->in.argpages = 1;
req->num_pages = 1;
req->pages[0] = tmp_page;
@@ -1550,19 +1612,263 @@ err_free:
fuse_request_free(req);
err:
end_page_writeback(page);
- return -ENOMEM;
+ return error;
}
static int fuse_writepage(struct page *page, struct writeback_control *wbc)
{
int err;
+ if (fuse_page_is_writeback(page->mapping->host, page->index)) {
+ /*
+ * ->writepages() should be called for sync() and friends. We
+ * should only get here on direct reclaim and then we are
+ * allowed to skip a page which is already in flight
+ */
+ WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
+
+ redirty_page_for_writepage(wbc, page);
+ return 0;
+ }
+
err = fuse_writepage_locked(page);
unlock_page(page);
return err;
}
+struct fuse_fill_wb_data {
+ struct fuse_req *req;
+ struct fuse_file *ff;
+ struct inode *inode;
+ struct page **orig_pages;
+};
+
+static void fuse_writepages_send(struct fuse_fill_wb_data *data)
+{
+ struct fuse_req *req = data->req;
+ struct inode *inode = data->inode;
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_inode *fi = get_fuse_inode(inode);
+ int num_pages = req->num_pages;
+ int i;
+
+ req->ff = fuse_file_get(data->ff);
+ spin_lock(&fc->lock);
+ list_add_tail(&req->list, &fi->queued_writes);
+ fuse_flush_writepages(inode);
+ spin_unlock(&fc->lock);
+
+ for (i = 0; i < num_pages; i++)
+ end_page_writeback(data->orig_pages[i]);
+}
+
+static bool fuse_writepage_in_flight(struct fuse_req *new_req,
+ struct page *page)
+{
+ struct fuse_conn *fc = get_fuse_conn(new_req->inode);
+ struct fuse_inode *fi = get_fuse_inode(new_req->inode);
+ struct fuse_req *tmp;
+ struct fuse_req *old_req;
+ bool found = false;
+ pgoff_t curr_index;
+
+ BUG_ON(new_req->num_pages != 0);
+
+ spin_lock(&fc->lock);
+ list_del(&new_req->writepages_entry);
+ list_for_each_entry(old_req, &fi->writepages, writepages_entry) {
+ BUG_ON(old_req->inode != new_req->inode);
+ curr_index = old_req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
+ if (curr_index <= page->index &&
+ page->index < curr_index + old_req->num_pages) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ list_add(&new_req->writepages_entry, &fi->writepages);
+ goto out_unlock;
+ }
+
+ new_req->num_pages = 1;
+ for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) {
+ BUG_ON(tmp->inode != new_req->inode);
+ curr_index = tmp->misc.write.in.offset >> PAGE_CACHE_SHIFT;
+ if (tmp->num_pages == 1 &&
+ curr_index == page->index) {
+ old_req = tmp;
+ }
+ }
+
+ if (old_req->num_pages == 1 && (old_req->state == FUSE_REQ_INIT ||
+ old_req->state == FUSE_REQ_PENDING)) {
+ struct backing_dev_info *bdi = page->mapping->backing_dev_info;
+
+ copy_highpage(old_req->pages[0], page);
+ spin_unlock(&fc->lock);
+
+ dec_bdi_stat(bdi, BDI_WRITEBACK);
+ dec_zone_page_state(page, NR_WRITEBACK_TEMP);
+ bdi_writeout_inc(bdi);
+ fuse_writepage_free(fc, new_req);
+ fuse_request_free(new_req);
+ goto out;
+ } else {
+ new_req->misc.write.next = old_req->misc.write.next;
+ old_req->misc.write.next = new_req;
+ }
+out_unlock:
+ spin_unlock(&fc->lock);
+out:
+ return found;
+}
+
+static int fuse_writepages_fill(struct page *page,
+ struct writeback_control *wbc, void *_data)
+{
+ struct fuse_fill_wb_data *data = _data;
+ struct fuse_req *req = data->req;
+ struct inode *inode = data->inode;
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct page *tmp_page;
+ bool is_writeback;
+ int err;
+
+ if (!data->ff) {
+ err = -EIO;
+ data->ff = fuse_write_file_get(fc, get_fuse_inode(inode));
+ if (!data->ff)
+ goto out_unlock;
+ }
+
+ /*
+ * Being under writeback is unlikely but possible. For example direct
+ * read to an mmaped fuse file will set the page dirty twice; once when
+ * the pages are faulted with get_user_pages(), and then after the read
+ * completed.
+ */
+ is_writeback = fuse_page_is_writeback(inode, page->index);
+
+ if (req && req->num_pages &&
+ (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
+ (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_write ||
+ data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) {
+ fuse_writepages_send(data);
+ data->req = NULL;
+ }
+ err = -ENOMEM;
+ tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+ if (!tmp_page)
+ goto out_unlock;
+
+ /*
+ * The page must not be redirtied until the writeout is completed
+ * (i.e. userspace has sent a reply to the write request). Otherwise
+ * there could be more than one temporary page instance for each real
+ * page.
+ *
+ * This is ensured by holding the page lock in page_mkwrite() while
+ * checking fuse_page_is_writeback(). We already hold the page lock
+ * since clear_page_dirty_for_io() and keep it held until we add the
+ * request to the fi->writepages list and increment req->num_pages.
+ * After this fuse_page_is_writeback() will indicate that the page is
+ * under writeback, so we can release the page lock.
+ */
+ if (data->req == NULL) {
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
+ err = -ENOMEM;
+ req = fuse_request_alloc_nofs(FUSE_MAX_PAGES_PER_REQ);
+ if (!req) {
+ __free_page(tmp_page);
+ goto out_unlock;
+ }
+
+ fuse_write_fill(req, data->ff, page_offset(page), 0);
+ req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
+ req->misc.write.next = NULL;
+ req->in.argpages = 1;
+ req->background = 1;
+ req->num_pages = 0;
+ req->end = fuse_writepage_end;
+ req->inode = inode;
+
+ spin_lock(&fc->lock);
+ list_add(&req->writepages_entry, &fi->writepages);
+ spin_unlock(&fc->lock);
+
+ data->req = req;
+ }
+ set_page_writeback(page);
+
+ copy_highpage(tmp_page, page);
+ req->pages[req->num_pages] = tmp_page;
+ req->page_descs[req->num_pages].offset = 0;
+ req->page_descs[req->num_pages].length = PAGE_SIZE;
+
+ inc_bdi_stat(page->mapping->backing_dev_info, BDI_WRITEBACK);
+ inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
+
+ err = 0;
+ if (is_writeback && fuse_writepage_in_flight(req, page)) {
+ end_page_writeback(page);
+ data->req = NULL;
+ goto out_unlock;
+ }
+ data->orig_pages[req->num_pages] = page;
+
+ /*
+ * Protected by fc->lock against concurrent access by
+ * fuse_page_is_writeback().
+ */
+ spin_lock(&fc->lock);
+ req->num_pages++;
+ spin_unlock(&fc->lock);
+
+out_unlock:
+ unlock_page(page);
+
+ return err;
+}
+
+static int fuse_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct inode *inode = mapping->host;
+ struct fuse_fill_wb_data data;
+ int err;
+
+ err = -EIO;
+ if (is_bad_inode(inode))
+ goto out;
+
+ data.inode = inode;
+ data.req = NULL;
+ data.ff = NULL;
+
+ err = -ENOMEM;
+ data.orig_pages = kzalloc(sizeof(struct page *) *
+ FUSE_MAX_PAGES_PER_REQ,
+ GFP_NOFS);
+ if (!data.orig_pages)
+ goto out;
+
+ err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
+ if (data.req) {
+ /* Ignore errors if we can write at least one page */
+ BUG_ON(!data.req->num_pages);
+ fuse_writepages_send(&data);
+ err = 0;
+ }
+ if (data.ff)
+ fuse_file_put(data.ff, false);
+
+ kfree(data.orig_pages);
+out:
+ return err;
+}
+
static int fuse_launder_page(struct page *page)
{
int err = 0;
@@ -1602,14 +1908,17 @@ static void fuse_vma_close(struct vm_area_struct *vma)
static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
- /*
- * Don't use page->mapping as it may become NULL from a
- * concurrent truncate.
- */
- struct inode *inode = vma->vm_file->f_mapping->host;
+ struct inode *inode = file_inode(vma->vm_file);
+
+ file_update_time(vma->vm_file);
+ lock_page(page);
+ if (page->mapping != inode->i_mapping) {
+ unlock_page(page);
+ return VM_FAULT_NOPAGE;
+ }
fuse_wait_on_page_writeback(inode, page->index);
- return 0;
+ return VM_FAULT_LOCKED;
}
static const struct vm_operations_struct fuse_file_vm_ops = {
@@ -1868,30 +2177,17 @@ static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
while (iov_iter_count(&ii)) {
struct page *page = pages[page_idx++];
size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii));
- void *kaddr;
-
- kaddr = kmap(page);
+ size_t left;
- while (todo) {
- char __user *uaddr = ii.iov->iov_base + ii.iov_offset;
- size_t iov_len = ii.iov->iov_len - ii.iov_offset;
- size_t copy = min(todo, iov_len);
- size_t left;
-
- if (!to_user)
- left = copy_from_user(kaddr, uaddr, copy);
- else
- left = copy_to_user(uaddr, kaddr, copy);
-
- if (unlikely(left))
- return -EFAULT;
+ if (!to_user)
+ left = iov_iter_copy_from_user(page, &ii, 0, todo);
+ else
+ left = iov_iter_copy_to_user(page, &ii, 0, todo);
- iov_iter_advance(&ii, copy);
- todo -= copy;
- kaddr += copy;
- }
+ if (unlikely(left))
+ return -EFAULT;
- kunmap(page);
+ iov_iter_advance(&ii, todo);
}
return 0;
@@ -2385,8 +2681,8 @@ static inline loff_t fuse_round_up(loff_t off)
}
static ssize_t
-fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
- loff_t offset, unsigned long nr_segs)
+fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *ii,
+ loff_t offset)
{
ssize_t ret = 0;
struct file *file = iocb->ki_filp;
@@ -2395,7 +2691,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
loff_t pos = 0;
struct inode *inode;
loff_t i_size;
- size_t count = iov_length(iov, nr_segs);
+ size_t count = iov_iter_count(ii);
struct fuse_io_priv *io;
pos = offset;
@@ -2436,9 +2732,9 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
io->async = false;
if (rw == WRITE)
- ret = __fuse_direct_write(io, iov, nr_segs, &pos);
+ ret = __fuse_direct_write(io, ii, &pos);
else
- ret = __fuse_direct_read(io, iov, nr_segs, &pos, count);
+ ret = __fuse_direct_read(io, ii, &pos, count);
if (io->async) {
fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
@@ -2581,6 +2877,7 @@ static const struct file_operations fuse_direct_io_file_operations = {
static const struct address_space_operations fuse_file_aops = {
.readpage = fuse_readpage,
.writepage = fuse_writepage,
+ .writepages = fuse_writepages,
.launder_page = fuse_launder_page,
.readpages = fuse_readpages,
.set_page_dirty = __set_page_dirty_nobuffers,
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 5b9e6f3b6aef..04c6084b0cf9 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -321,6 +321,7 @@ struct fuse_req {
struct {
struct fuse_write_in in;
struct fuse_write_out out;
+ struct fuse_req *next;
} write;
struct fuse_notify_retrieve_in retrieve_in;
struct fuse_lk_in lk_in;
@@ -374,9 +375,6 @@ struct fuse_conn {
/** Lock protecting accessess to members of this structure */
spinlock_t lock;
- /** Mutex protecting against directory alias creation */
- struct mutex inst_mutex;
-
/** Refcount */
atomic_t count;
@@ -858,9 +856,8 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
bool isdir);
-ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
- unsigned long nr_segs, size_t count, loff_t *ppos,
- int write);
+ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *ii,
+ size_t count, loff_t *ppos, int write);
long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
unsigned int flags);
long fuse_ioctl_common(struct file *file, unsigned int cmd,
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index a8ce6dab60a0..1c15613c64f8 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -565,7 +565,6 @@ void fuse_conn_init(struct fuse_conn *fc)
{
memset(fc, 0, sizeof(*fc));
spin_lock_init(&fc->lock);
- mutex_init(&fc->inst_mutex);
init_rwsem(&fc->killsb);
atomic_set(&fc->count, 1);
init_waitqueue_head(&fc->waitq);
@@ -596,7 +595,6 @@ void fuse_conn_put(struct fuse_conn *fc)
if (atomic_dec_and_test(&fc->count)) {
if (fc->destroy_req)
fuse_request_free(fc->destroy_req);
- mutex_destroy(&fc->inst_mutex);
fc->release(fc);
}
}
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 1f7d8057ea68..01a2aa5f25a1 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -611,12 +611,14 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
if (alloc_required) {
+ struct gfs2_alloc_parms ap = { .aflags = 0, };
error = gfs2_quota_lock_check(ip);
if (error)
goto out_unlock;
requested = data_blocks + ind_blocks;
- error = gfs2_inplace_reserve(ip, requested, 0);
+ ap.target = requested;
+ error = gfs2_inplace_reserve(ip, &ap);
if (error)
goto out_qunlock;
}
@@ -979,8 +981,7 @@ static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
- const struct iovec *iov, loff_t offset,
- unsigned long nr_segs)
+ struct iov_iter *iter, loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
@@ -1004,8 +1005,8 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
if (rv != 1)
goto out; /* dio not valid, fall back to buffered i/o */
- rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
- offset, nr_segs, gfs2_get_block_direct,
+ rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iter,
+ offset, gfs2_get_block_direct,
NULL, NULL, 0);
out:
gfs2_glock_dq(&gh);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 62a65fc448dc..fe0500c0af7a 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1216,6 +1216,7 @@ static int do_grow(struct inode *inode, u64 size)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct gfs2_alloc_parms ap = { .target = 1, };
struct buffer_head *dibh;
int error;
int unstuff = 0;
@@ -1226,7 +1227,7 @@ static int do_grow(struct inode *inode, u64 size)
if (error)
return error;
- error = gfs2_inplace_reserve(ip, 1, 0);
+ error = gfs2_inplace_reserve(ip, &ap);
if (error)
goto do_grow_qunlock;
unstuff = 1;
@@ -1279,6 +1280,7 @@ do_grow_qunlock:
int gfs2_setattr_size(struct inode *inode, u64 newsize)
{
+ struct gfs2_inode *ip = GFS2_I(inode);
int ret;
u64 oldsize;
@@ -1294,7 +1296,7 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
inode_dio_wait(inode);
- ret = gfs2_rs_alloc(GFS2_I(inode));
+ ret = gfs2_rs_alloc(ip);
if (ret)
goto out;
@@ -1304,6 +1306,7 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
goto out;
}
+ gfs2_rs_deltree(ip->i_res);
ret = do_shrink(inode, oldsize, newsize);
out:
put_write_access(inode);
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 0621b46d474d..0838913ca568 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -383,6 +383,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
struct inode *inode = file_inode(vma->vm_file);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct gfs2_alloc_parms ap = { .aflags = 0, };
unsigned long last_index;
u64 pos = page->index << PAGE_CACHE_SHIFT;
unsigned int data_blocks, ind_blocks, rblocks;
@@ -430,7 +431,8 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
goto out_unlock;
gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
- ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks, 0);
+ ap.target = data_blocks + ind_blocks;
+ ret = gfs2_inplace_reserve(ip, &ap);
if (ret)
goto out_quota_unlock;
@@ -620,7 +622,7 @@ static int gfs2_release(struct inode *inode, struct file *file)
if (!(file->f_mode & FMODE_WRITE))
return 0;
- gfs2_rs_delete(ip);
+ gfs2_rs_delete(ip, &inode->i_writecount);
return 0;
}
@@ -681,10 +683,9 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
}
/**
- * gfs2_file_aio_write - Perform a write to a file
+ * gfs2_file_write_iter - Perform a write to a file
* @iocb: The io context
- * @iov: The data to write
- * @nr_segs: Number of @iov segments
+ * @iter: The data to write
* @pos: The file position
*
* We have to do a lock/unlock here to refresh the inode size for
@@ -694,11 +695,11 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
*
*/
-static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t pos)
{
struct file *file = iocb->ki_filp;
- size_t writesize = iov_length(iov, nr_segs);
+ size_t writesize = iov_iter_count(iter);
struct gfs2_inode *ip = GFS2_I(file_inode(file));
int ret;
@@ -717,7 +718,7 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
gfs2_glock_dq_uninit(&gh);
}
- return generic_file_aio_write(iocb, iov, nr_segs, pos);
+ return generic_file_write_iter(iocb, iter, pos);
}
static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
@@ -800,6 +801,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
struct inode *inode = file_inode(file);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_alloc_parms ap = { .aflags = 0, };
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
loff_t bytes, max_bytes;
int error;
@@ -850,7 +852,8 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
retry:
gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
- error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks, 0);
+ ap.target = data_blocks + ind_blocks;
+ error = gfs2_inplace_reserve(ip, &ap);
if (error) {
if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
bytes >>= 1;
@@ -1049,9 +1052,9 @@ static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
const struct file_operations gfs2_file_fops = {
.llseek = gfs2_llseek,
.read = do_sync_read,
- .aio_read = generic_file_aio_read,
+ .read_iter = generic_file_read_iter,
.write = do_sync_write,
- .aio_write = gfs2_file_aio_write,
+ .write_iter = gfs2_file_write_iter,
.unlocked_ioctl = gfs2_ioctl,
.mmap = gfs2_mmap,
.open = gfs2_open,
@@ -1081,9 +1084,9 @@ const struct file_operations gfs2_dir_fops = {
const struct file_operations gfs2_file_fops_nolock = {
.llseek = gfs2_llseek,
.read = do_sync_read,
- .aio_read = generic_file_aio_read,
+ .read_iter = generic_file_read_iter,
.write = do_sync_write,
- .aio_write = gfs2_file_aio_write,
+ .write_iter = gfs2_file_write_iter,
.unlocked_ioctl = gfs2_ioctl,
.mmap = gfs2_mmap,
.open = gfs2_open,
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index c2f41b4d00b9..e66a8009aff1 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -31,6 +31,7 @@
#include <linux/bit_spinlock.h>
#include <linux/percpu.h>
#include <linux/list_sort.h>
+#include <linux/lockref.h>
#include "gfs2.h"
#include "incore.h"
@@ -129,10 +130,10 @@ void gfs2_glock_free(struct gfs2_glock *gl)
*
*/
-void gfs2_glock_hold(struct gfs2_glock *gl)
+static void gfs2_glock_hold(struct gfs2_glock *gl)
{
- GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
- atomic_inc(&gl->gl_ref);
+ GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
+ lockref_get(&gl->gl_lockref);
}
/**
@@ -187,20 +188,6 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
}
/**
- * gfs2_glock_put_nolock() - Decrement reference count on glock
- * @gl: The glock to put
- *
- * This function should only be used if the caller has its own reference
- * to the glock, in addition to the one it is dropping.
- */
-
-void gfs2_glock_put_nolock(struct gfs2_glock *gl)
-{
- if (atomic_dec_and_test(&gl->gl_ref))
- GLOCK_BUG_ON(gl, 1);
-}
-
-/**
* gfs2_glock_put() - Decrement reference count on glock
* @gl: The glock to put
*
@@ -211,17 +198,22 @@ void gfs2_glock_put(struct gfs2_glock *gl)
struct gfs2_sbd *sdp = gl->gl_sbd;
struct address_space *mapping = gfs2_glock2aspace(gl);
- if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
- __gfs2_glock_remove_from_lru(gl);
- spin_unlock(&lru_lock);
- spin_lock_bucket(gl->gl_hash);
- hlist_bl_del_rcu(&gl->gl_list);
- spin_unlock_bucket(gl->gl_hash);
- GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
- GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
- trace_gfs2_glock_put(gl);
- sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
- }
+ if (lockref_put_or_lock(&gl->gl_lockref))
+ return;
+
+ lockref_mark_dead(&gl->gl_lockref);
+
+ spin_lock(&lru_lock);
+ __gfs2_glock_remove_from_lru(gl);
+ spin_unlock(&lru_lock);
+ spin_unlock(&gl->gl_lockref.lock);
+ spin_lock_bucket(gl->gl_hash);
+ hlist_bl_del_rcu(&gl->gl_list);
+ spin_unlock_bucket(gl->gl_hash);
+ GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
+ GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
+ trace_gfs2_glock_put(gl);
+ sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
}
/**
@@ -244,7 +236,7 @@ static struct gfs2_glock *search_bucket(unsigned int hash,
continue;
if (gl->gl_sbd != sdp)
continue;
- if (atomic_inc_not_zero(&gl->gl_ref))
+ if (lockref_get_not_dead(&gl->gl_lockref))
return gl;
}
@@ -396,10 +388,11 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
held2 = (new_state != LM_ST_UNLOCKED);
if (held1 != held2) {
+ GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
if (held2)
- gfs2_glock_hold(gl);
+ gl->gl_lockref.count++;
else
- gfs2_glock_put_nolock(gl);
+ gl->gl_lockref.count--;
}
if (held1 && held2 && list_empty(&gl->gl_holders))
clear_bit(GLF_QUEUED, &gl->gl_flags);
@@ -626,9 +619,9 @@ out:
out_sched:
clear_bit(GLF_LOCK, &gl->gl_flags);
smp_mb__after_clear_bit();
- gfs2_glock_hold(gl);
+ gl->gl_lockref.count++;
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
- gfs2_glock_put_nolock(gl);
+ gl->gl_lockref.count--;
return;
out_unlock:
@@ -754,7 +747,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_sbd = sdp;
gl->gl_flags = 0;
gl->gl_name = name;
- atomic_set(&gl->gl_ref, 1);
+ gl->gl_lockref.count = 1;
gl->gl_state = LM_ST_UNLOCKED;
gl->gl_target = LM_ST_UNLOCKED;
gl->gl_demote_state = LM_ST_EXCLUSIVE;
@@ -1356,10 +1349,10 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
}
}
- spin_unlock(&gl->gl_spin);
+ gl->gl_lockref.count++;
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
- smp_wmb();
- gfs2_glock_hold(gl);
+ spin_unlock(&gl->gl_spin);
+
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put(gl);
}
@@ -1404,15 +1397,19 @@ __acquires(&lru_lock)
while(!list_empty(list)) {
gl = list_entry(list->next, struct gfs2_glock, gl_lru);
list_del_init(&gl->gl_lru);
+ if (!spin_trylock(&gl->gl_spin)) {
+ list_add(&gl->gl_lru, &lru_list);
+ atomic_inc(&lru_count);
+ continue;
+ }
clear_bit(GLF_LRU, &gl->gl_flags);
- gfs2_glock_hold(gl);
spin_unlock(&lru_lock);
- spin_lock(&gl->gl_spin);
+ gl->gl_lockref.count++;
if (demote_ok(gl))
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
- gfs2_glock_put_nolock(gl);
+ gl->gl_lockref.count--;
spin_unlock(&gl->gl_spin);
spin_lock(&lru_lock);
}
@@ -1493,7 +1490,7 @@ static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
rcu_read_lock();
hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
- if ((gl->gl_sbd == sdp) && atomic_inc_not_zero(&gl->gl_ref))
+ if ((gl->gl_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref))
examiner(gl);
}
rcu_read_unlock();
@@ -1746,7 +1743,7 @@ int gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
state2str(gl->gl_demote_state), dtime,
atomic_read(&gl->gl_ail_count),
atomic_read(&gl->gl_revokes),
- atomic_read(&gl->gl_ref), gl->gl_hold_time);
+ (int)gl->gl_lockref.count, gl->gl_hold_time);
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
error = dump_holder(seq, gh);
@@ -1902,7 +1899,7 @@ static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
gi->nhash = 0;
}
/* Skip entries for other sb and dead entries */
- } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
+ } while (gi->sdp != gi->gl->gl_sbd || __lockref_is_dead(&gl->gl_lockref));
return 0;
}
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 69f66e3d22bf..6647d77366ba 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -181,8 +181,6 @@ static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
const struct gfs2_glock_operations *glops,
int create, struct gfs2_glock **glp);
-extern void gfs2_glock_hold(struct gfs2_glock *gl);
-extern void gfs2_glock_put_nolock(struct gfs2_glock *gl);
extern void gfs2_glock_put(struct gfs2_glock *gl);
extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
unsigned flags, struct gfs2_holder *gh);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index e2e0a90396e7..db908f697139 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -525,9 +525,9 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
gl->gl_state == LM_ST_SHARED && ip) {
- gfs2_glock_hold(gl);
+ gl->gl_lockref.count++;
if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
- gfs2_glock_put_nolock(gl);
+ gl->gl_lockref.count--;
}
}
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 26aabd7caba7..ba1ea67f4eeb 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -21,6 +21,7 @@
#include <linux/rbtree.h>
#include <linux/ktime.h>
#include <linux/percpu.h>
+#include <linux/lockref.h>
#define DIO_WAIT 0x00000010
#define DIO_METADATA 0x00000020
@@ -71,6 +72,7 @@ struct gfs2_bitmap {
u32 bi_offset;
u32 bi_start;
u32 bi_len;
+ u32 bi_blocks;
};
struct gfs2_rgrpd {
@@ -101,19 +103,25 @@ struct gfs2_rgrpd {
struct gfs2_rbm {
struct gfs2_rgrpd *rgd;
- struct gfs2_bitmap *bi; /* Bitmap must belong to the rgd */
u32 offset; /* The offset is bitmap relative */
+ int bii; /* Bitmap index */
};
+static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
+{
+ return rbm->rgd->rd_bits + rbm->bii;
+}
+
static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
{
- return rbm->rgd->rd_data0 + (rbm->bi->bi_start * GFS2_NBBY) + rbm->offset;
+ return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
+ rbm->offset;
}
static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1,
const struct gfs2_rbm *rbm2)
{
- return (rbm1->rgd == rbm2->rgd) && (rbm1->bi == rbm2->bi) &&
+ return (rbm1->rgd == rbm2->rgd) && (rbm1->bii == rbm2->bii) &&
(rbm1->offset == rbm2->offset);
}
@@ -278,6 +286,20 @@ struct gfs2_blkreserv {
unsigned int rs_qa_qd_num;
};
+/*
+ * Allocation parameters
+ * @target: The number of blocks we'd ideally like to allocate
+ * @aflags: The flags (e.g. Orlov flag)
+ *
+ * The intent is to gradually expand this structure over time in
+ * order to give more information, e.g. alignment, min extent size
+ * to the allocation code.
+ */
+struct gfs2_alloc_parms {
+ u32 target;
+ u32 aflags;
+};
+
enum {
GLF_LOCK = 1,
GLF_DEMOTE = 3,
@@ -300,9 +322,9 @@ struct gfs2_glock {
struct gfs2_sbd *gl_sbd;
unsigned long gl_flags; /* GLF_... */
struct lm_lockname gl_name;
- atomic_t gl_ref;
- spinlock_t gl_spin;
+ struct lockref gl_lockref;
+#define gl_spin gl_lockref.lock
/* State fields protected by gl_spin */
unsigned int gl_state:2, /* Current state */
@@ -398,11 +420,10 @@ enum {
struct gfs2_quota_data {
struct list_head qd_list;
- struct list_head qd_reclaim;
-
- atomic_t qd_count;
-
struct kqid qd_id;
+ struct lockref qd_lockref;
+ struct list_head qd_lru;
+
unsigned long qd_flags; /* QDF_... */
s64 qd_change;
@@ -516,7 +537,6 @@ struct gfs2_tune {
unsigned int gt_logd_secs;
- unsigned int gt_quota_simul_sync; /* Max quotavals to sync at once */
unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
unsigned int gt_quota_scale_num; /* Numerator */
unsigned int gt_quota_scale_den; /* Denominator */
@@ -694,6 +714,7 @@ struct gfs2_sbd {
struct list_head sd_quota_list;
atomic_t sd_quota_count;
struct mutex sd_quota_mutex;
+ struct mutex sd_quota_sync_mutex;
wait_queue_head_t sd_quota_wait;
struct list_head sd_trunc_list;
spinlock_t sd_trunc_lock;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index ced3257f06e8..1615df16cf4e 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -379,6 +379,7 @@ static void munge_mode_uid_gid(const struct gfs2_inode *dip,
static int alloc_dinode(struct gfs2_inode *ip, u32 flags)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ struct gfs2_alloc_parms ap = { .target = RES_DINODE, .aflags = flags, };
int error;
int dblocks = 1;
@@ -386,7 +387,7 @@ static int alloc_dinode(struct gfs2_inode *ip, u32 flags)
if (error)
goto out;
- error = gfs2_inplace_reserve(ip, RES_DINODE, flags);
+ error = gfs2_inplace_reserve(ip, &ap);
if (error)
goto out_quota;
@@ -472,6 +473,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
struct gfs2_inode *ip, int arq)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
+ struct gfs2_alloc_parms ap = { .target = sdp->sd_max_dirres, };
int error;
if (arq) {
@@ -479,7 +481,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
if (error)
goto fail_quota_locks;
- error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres, 0);
+ error = gfs2_inplace_reserve(dip, &ap);
if (error)
goto fail_quota_locks;
@@ -584,17 +586,17 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
if (!IS_ERR(inode)) {
d = d_splice_alias(inode, dentry);
error = 0;
- if (file && !IS_ERR(d)) {
- if (d == NULL)
- d = dentry;
- if (S_ISREG(inode->i_mode))
- error = finish_open(file, d, gfs2_open_common, opened);
- else
+ if (file) {
+ if (S_ISREG(inode->i_mode)) {
+ WARN_ON(d != NULL);
+ error = finish_open(file, dentry, gfs2_open_common, opened);
+ } else {
error = finish_no_open(file, d);
+ }
+ } else {
+ dput(d);
}
gfs2_glock_dq_uninit(ghs);
- if (IS_ERR(d))
- return PTR_ERR(d);
return error;
} else if (error != -ENOENT) {
goto fail_gunlock;
@@ -713,7 +715,7 @@ fail_gunlock2:
fail_free_inode:
if (ip->i_gl)
gfs2_glock_put(ip->i_gl);
- gfs2_rs_delete(ip);
+ gfs2_rs_delete(ip, NULL);
free_inode_nonrcu(inode);
inode = NULL;
fail_gunlock:
@@ -781,8 +783,10 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
error = finish_open(file, dentry, gfs2_open_common, opened);
gfs2_glock_dq_uninit(&gh);
- if (error)
+ if (error) {
+ dput(d);
return ERR_PTR(error);
+ }
return d;
}
@@ -874,11 +878,12 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
error = 0;
if (alloc_required) {
+ struct gfs2_alloc_parms ap = { .target = sdp->sd_max_dirres, };
error = gfs2_quota_lock_check(dip);
if (error)
goto out_gunlock;
- error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres, 0);
+ error = gfs2_inplace_reserve(dip, &ap);
if (error)
goto out_gunlock_q;
@@ -1163,14 +1168,16 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
d = __gfs2_lookup(dir, dentry, file, opened);
if (IS_ERR(d))
return PTR_ERR(d);
- if (d == NULL)
- d = dentry;
- if (d->d_inode) {
+ if (d != NULL)
+ dentry = d;
+ if (dentry->d_inode) {
if (!(*opened & FILE_OPENED))
- return finish_no_open(file, d);
+ return finish_no_open(file, dentry);
+ dput(d);
return 0;
}
+ BUG_ON(d != NULL);
if (!(flags & O_CREAT))
return -ENOENT;
@@ -1385,11 +1392,12 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
goto out_gunlock;
if (alloc_required) {
+ struct gfs2_alloc_parms ap = { .target = sdp->sd_max_dirres, };
error = gfs2_quota_lock_check(ndip);
if (error)
goto out_gunlock;
- error = gfs2_inplace_reserve(ndip, sdp->sd_max_dirres, 0);
+ error = gfs2_inplace_reserve(ndip, &ap);
if (error)
goto out_gunlock_q;
@@ -1506,13 +1514,6 @@ out:
return NULL;
}
-static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
-{
- char *s = nd_get_link(nd);
- if (!IS_ERR(s))
- kfree(s);
-}
-
/**
* gfs2_permission -
* @inode: The inode
@@ -1864,7 +1865,7 @@ const struct inode_operations gfs2_dir_iops = {
const struct inode_operations gfs2_symlink_iops = {
.readlink = generic_readlink,
.follow_link = gfs2_follow_link,
- .put_link = gfs2_put_link,
+ .put_link = kfree_put_link,
.permission = gfs2_permission,
.setattr = gfs2_setattr,
.getattr = gfs2_getattr,
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 010b9fb9fec6..985da945f0b5 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -272,7 +272,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
nrvecs = max(nrvecs/2, 1U);
}
- bio->bi_sector = blkno * (sb->s_blocksize >> 9);
+ bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
bio->bi_bdev = sb->s_bdev;
bio->bi_end_io = gfs2_end_log_write;
bio->bi_private = sdp;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 351586e24e30..0650db2541ef 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -31,12 +31,6 @@
struct workqueue_struct *gfs2_control_wq;
-static struct shrinker qd_shrinker = {
- .count_objects = gfs2_qd_shrink_count,
- .scan_objects = gfs2_qd_shrink_scan,
- .seeks = DEFAULT_SEEKS,
-};
-
static void gfs2_init_inode_once(void *foo)
{
struct gfs2_inode *ip = foo;
@@ -87,6 +81,10 @@ static int __init init_gfs2_fs(void)
if (error)
return error;
+ error = list_lru_init(&gfs2_qd_lru);
+ if (error)
+ goto fail_lru;
+
error = gfs2_glock_init();
if (error)
goto fail;
@@ -139,7 +137,7 @@ static int __init init_gfs2_fs(void)
if (!gfs2_rsrv_cachep)
goto fail;
- register_shrinker(&qd_shrinker);
+ register_shrinker(&gfs2_qd_shrinker);
error = register_filesystem(&gfs2_fs_type);
if (error)
@@ -179,7 +177,9 @@ fail_wq:
fail_unregister:
unregister_filesystem(&gfs2_fs_type);
fail:
- unregister_shrinker(&qd_shrinker);
+ list_lru_destroy(&gfs2_qd_lru);
+fail_lru:
+ unregister_shrinker(&gfs2_qd_shrinker);
gfs2_glock_exit();
if (gfs2_rsrv_cachep)
@@ -214,13 +214,14 @@ fail:
static void __exit exit_gfs2_fs(void)
{
- unregister_shrinker(&qd_shrinker);
+ unregister_shrinker(&gfs2_qd_shrinker);
gfs2_glock_exit();
gfs2_unregister_debugfs();
unregister_filesystem(&gfs2_fs_type);
unregister_filesystem(&gfs2meta_fs_type);
destroy_workqueue(gfs_recovery_wq);
destroy_workqueue(gfs2_control_wq);
+ list_lru_destroy(&gfs2_qd_lru);
rcu_barrier();
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 19ff5e8c285c..16194da91652 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -51,7 +51,6 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
{
spin_lock_init(&gt->gt_spin);
- gt->gt_quota_simul_sync = 64;
gt->gt_quota_warn_period = 10;
gt->gt_quota_scale_num = 1;
gt->gt_quota_scale_den = 1;
@@ -94,6 +93,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
INIT_LIST_HEAD(&sdp->sd_quota_list);
mutex_init(&sdp->sd_quota_mutex);
+ mutex_init(&sdp->sd_quota_sync_mutex);
init_waitqueue_head(&sdp->sd_quota_wait);
INIT_LIST_HEAD(&sdp->sd_trunc_list);
spin_lock_init(&sdp->sd_trunc_lock);
@@ -224,7 +224,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
lock_page(page);
bio = bio_alloc(GFP_NOFS, 1);
- bio->bi_sector = sector * (sb->s_blocksize >> 9);
+ bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
bio->bi_bdev = sb->s_bdev;
bio_add_page(bio, page, PAGE_SIZE, 0);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index db441359ee8c..453b50eaddec 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -50,6 +50,8 @@
#include <linux/freezer.h>
#include <linux/quota.h>
#include <linux/dqblk_xfs.h>
+#include <linux/lockref.h>
+#include <linux/list_lru.h>
#include "gfs2.h"
#include "incore.h"
@@ -71,29 +73,25 @@ struct gfs2_quota_change_host {
struct kqid qc_id;
};
-static LIST_HEAD(qd_lru_list);
-static atomic_t qd_lru_count = ATOMIC_INIT(0);
-static DEFINE_SPINLOCK(qd_lru_lock);
+/* Lock order: qd_lock -> qd->lockref.lock -> lru lock */
+static DEFINE_SPINLOCK(qd_lock);
+struct list_lru gfs2_qd_lru;
-unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
- struct shrink_control *sc)
+static void gfs2_qd_dispose(struct list_head *list)
{
struct gfs2_quota_data *qd;
struct gfs2_sbd *sdp;
- int nr_to_scan = sc->nr_to_scan;
- long freed = 0;
- if (!(sc->gfp_mask & __GFP_FS))
- return SHRINK_STOP;
-
- spin_lock(&qd_lru_lock);
- while (nr_to_scan && !list_empty(&qd_lru_list)) {
- qd = list_entry(qd_lru_list.next,
- struct gfs2_quota_data, qd_reclaim);
+ while (!list_empty(list)) {
+ qd = list_entry(list->next, struct gfs2_quota_data, qd_lru);
sdp = qd->qd_gl->gl_sbd;
+ list_del(&qd->qd_lru);
+
/* Free from the filesystem-specific list */
+ spin_lock(&qd_lock);
list_del(&qd->qd_list);
+ spin_unlock(&qd_lock);
gfs2_assert_warn(sdp, !qd->qd_change);
gfs2_assert_warn(sdp, !qd->qd_slot_count);
@@ -103,24 +101,59 @@ unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
atomic_dec(&sdp->sd_quota_count);
/* Delete it from the common reclaim list */
- list_del_init(&qd->qd_reclaim);
- atomic_dec(&qd_lru_count);
- spin_unlock(&qd_lru_lock);
kmem_cache_free(gfs2_quotad_cachep, qd);
- spin_lock(&qd_lru_lock);
- nr_to_scan--;
- freed++;
}
- spin_unlock(&qd_lru_lock);
+}
+
+
+static enum lru_status gfs2_qd_isolate(struct list_head *item, spinlock_t *lock, void *arg)
+{
+ struct list_head *dispose = arg;
+ struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
+
+ if (!spin_trylock(&qd->qd_lockref.lock))
+ return LRU_SKIP;
+
+ if (qd->qd_lockref.count == 0) {
+ lockref_mark_dead(&qd->qd_lockref);
+ list_move(&qd->qd_lru, dispose);
+ }
+
+ spin_unlock(&qd->qd_lockref.lock);
+ return LRU_REMOVED;
+}
+
+static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ LIST_HEAD(dispose);
+ unsigned long freed;
+
+ if (!(sc->gfp_mask & __GFP_FS))
+ return SHRINK_STOP;
+
+ freed = list_lru_walk_node(&gfs2_qd_lru, sc->nid, gfs2_qd_isolate,
+ &dispose, &sc->nr_to_scan);
+
+ gfs2_qd_dispose(&dispose);
+
return freed;
}
-unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
- struct shrink_control *sc)
+static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
+ struct shrink_control *sc)
{
- return vfs_pressure_ratio(atomic_read(&qd_lru_count));
+ return vfs_pressure_ratio(list_lru_count_node(&gfs2_qd_lru, sc->nid));
}
+struct shrinker gfs2_qd_shrinker = {
+ .count_objects = gfs2_qd_shrink_count,
+ .scan_objects = gfs2_qd_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+ .flags = SHRINKER_NUMA_AWARE,
+};
+
+
static u64 qd2index(struct gfs2_quota_data *qd)
{
struct kqid qid = qd->qd_id;
@@ -148,10 +181,11 @@ static int qd_alloc(struct gfs2_sbd *sdp, struct kqid qid,
if (!qd)
return -ENOMEM;
- atomic_set(&qd->qd_count, 1);
+ qd->qd_lockref.count = 1;
+ spin_lock_init(&qd->qd_lockref.lock);
qd->qd_id = qid;
qd->qd_slot = -1;
- INIT_LIST_HEAD(&qd->qd_reclaim);
+ INIT_LIST_HEAD(&qd->qd_lru);
error = gfs2_glock_get(sdp, qd2index(qd),
&gfs2_quota_glops, CREATE, &qd->qd_gl);
@@ -177,16 +211,11 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
for (;;) {
found = 0;
- spin_lock(&qd_lru_lock);
+ spin_lock(&qd_lock);
list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
- if (qid_eq(qd->qd_id, qid)) {
- if (!atomic_read(&qd->qd_count) &&
- !list_empty(&qd->qd_reclaim)) {
- /* Remove it from reclaim list */
- list_del_init(&qd->qd_reclaim);
- atomic_dec(&qd_lru_count);
- }
- atomic_inc(&qd->qd_count);
+ if (qid_eq(qd->qd_id, qid) &&
+ lockref_get_not_dead(&qd->qd_lockref)) {
+ list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
found = 1;
break;
}
@@ -202,7 +231,7 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
new_qd = NULL;
}
- spin_unlock(&qd_lru_lock);
+ spin_unlock(&qd_lock);
if (qd) {
if (new_qd) {
@@ -222,18 +251,19 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
static void qd_hold(struct gfs2_quota_data *qd)
{
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
- gfs2_assert(sdp, atomic_read(&qd->qd_count));
- atomic_inc(&qd->qd_count);
+ gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
+ lockref_get(&qd->qd_lockref);
}
static void qd_put(struct gfs2_quota_data *qd)
{
- if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) {
- /* Add to the reclaim list */
- list_add_tail(&qd->qd_reclaim, &qd_lru_list);
- atomic_inc(&qd_lru_count);
- spin_unlock(&qd_lru_lock);
- }
+ if (lockref_put_or_lock(&qd->qd_lockref))
+ return;
+
+ qd->qd_lockref.count = 0;
+ list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
+ spin_unlock(&qd->qd_lockref.lock);
+
}
static int slot_get(struct gfs2_quota_data *qd)
@@ -242,10 +272,10 @@ static int slot_get(struct gfs2_quota_data *qd)
unsigned int c, o = 0, b;
unsigned char byte = 0;
- spin_lock(&qd_lru_lock);
+ spin_lock(&qd_lock);
if (qd->qd_slot_count++) {
- spin_unlock(&qd_lru_lock);
+ spin_unlock(&qd_lock);
return 0;
}
@@ -269,13 +299,13 @@ found:
sdp->sd_quota_bitmap[c][o] |= 1 << b;
- spin_unlock(&qd_lru_lock);
+ spin_unlock(&qd_lock);
return 0;
fail:
qd->qd_slot_count--;
- spin_unlock(&qd_lru_lock);
+ spin_unlock(&qd_lock);
return -ENOSPC;
}
@@ -283,23 +313,43 @@ static void slot_hold(struct gfs2_quota_data *qd)
{
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
- spin_lock(&qd_lru_lock);
+ spin_lock(&qd_lock);
gfs2_assert(sdp, qd->qd_slot_count);
qd->qd_slot_count++;
- spin_unlock(&qd_lru_lock);
+ spin_unlock(&qd_lock);
+}
+
+static void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
+ unsigned int bit, int new_value)
+{
+ unsigned int c, o, b = bit;
+ int old_value;
+
+ c = b / (8 * PAGE_SIZE);
+ b %= 8 * PAGE_SIZE;
+ o = b / 8;
+ b %= 8;
+
+ old_value = (bitmap[c][o] & (1 << b));
+ gfs2_assert_withdraw(sdp, !old_value != !new_value);
+
+ if (new_value)
+ bitmap[c][o] |= 1 << b;
+ else
+ bitmap[c][o] &= ~(1 << b);
}
static void slot_put(struct gfs2_quota_data *qd)
{
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
- spin_lock(&qd_lru_lock);
+ spin_lock(&qd_lock);
gfs2_assert(sdp, qd->qd_slot_count);
if (!--qd->qd_slot_count) {
gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
qd->qd_slot = -1;
}
- spin_unlock(&qd_lru_lock);
+ spin_unlock(&qd_lock);
}
static int bh_get(struct gfs2_quota_data *qd)
@@ -363,6 +413,25 @@ static void bh_put(struct gfs2_quota_data *qd)
mutex_unlock(&sdp->sd_quota_mutex);
}
+static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
+ u64 *sync_gen)
+{
+ if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
+ !test_bit(QDF_CHANGE, &qd->qd_flags) ||
+ (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
+ return 0;
+
+ if (!lockref_get_not_dead(&qd->qd_lockref))
+ return 0;
+
+ list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
+ set_bit(QDF_LOCKED, &qd->qd_flags);
+ qd->qd_change_sync = qd->qd_change;
+ gfs2_assert_warn(sdp, qd->qd_slot_count);
+ qd->qd_slot_count++;
+ return 1;
+}
+
static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
{
struct gfs2_quota_data *qd = NULL;
@@ -374,31 +443,18 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
if (sdp->sd_vfs->s_flags & MS_RDONLY)
return 0;
- spin_lock(&qd_lru_lock);
+ spin_lock(&qd_lock);
list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
- if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
- !test_bit(QDF_CHANGE, &qd->qd_flags) ||
- qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
- continue;
-
- list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
-
- set_bit(QDF_LOCKED, &qd->qd_flags);
- gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
- atomic_inc(&qd->qd_count);
- qd->qd_change_sync = qd->qd_change;
- gfs2_assert_warn(sdp, qd->qd_slot_count);
- qd->qd_slot_count++;
- found = 1;
-
- break;
+ found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
+ if (found)
+ break;
}
if (!found)
qd = NULL;
- spin_unlock(&qd_lru_lock);
+ spin_unlock(&qd_lock);
if (qd) {
gfs2_assert_warn(sdp, qd->qd_change_sync);
@@ -416,43 +472,6 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
return 0;
}
-static int qd_trylock(struct gfs2_quota_data *qd)
-{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
-
- if (sdp->sd_vfs->s_flags & MS_RDONLY)
- return 0;
-
- spin_lock(&qd_lru_lock);
-
- if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
- !test_bit(QDF_CHANGE, &qd->qd_flags)) {
- spin_unlock(&qd_lru_lock);
- return 0;
- }
-
- list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
-
- set_bit(QDF_LOCKED, &qd->qd_flags);
- gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
- atomic_inc(&qd->qd_count);
- qd->qd_change_sync = qd->qd_change;
- gfs2_assert_warn(sdp, qd->qd_slot_count);
- qd->qd_slot_count++;
-
- spin_unlock(&qd_lru_lock);
-
- gfs2_assert_warn(sdp, qd->qd_change_sync);
- if (bh_get(qd)) {
- clear_bit(QDF_LOCKED, &qd->qd_flags);
- slot_put(qd);
- qd_put(qd);
- return 0;
- }
-
- return 1;
-}
-
static void qd_unlock(struct gfs2_quota_data *qd)
{
gfs2_assert_warn(qd->qd_gl->gl_sbd,
@@ -602,9 +621,9 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
x = be64_to_cpu(qc->qc_change) + change;
qc->qc_change = cpu_to_be64(x);
- spin_lock(&qd_lru_lock);
+ spin_lock(&qd_lock);
qd->qd_change = x;
- spin_unlock(&qd_lru_lock);
+ spin_unlock(&qd_lock);
if (!x) {
gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
@@ -763,6 +782,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
{
struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
+ struct gfs2_alloc_parms ap = { .aflags = 0, };
unsigned int data_blocks, ind_blocks;
struct gfs2_holder *ghs, i_gh;
unsigned int qx, x;
@@ -815,7 +835,8 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
reserved = 1 + (nalloc * (data_blocks + ind_blocks));
- error = gfs2_inplace_reserve(ip, reserved, 0);
+ ap.target = reserved;
+ error = gfs2_inplace_reserve(ip, &ap);
if (error)
goto out_alloc;
@@ -974,9 +995,9 @@ static int need_sync(struct gfs2_quota_data *qd)
if (!qd->qd_qb.qb_limit)
return 0;
- spin_lock(&qd_lru_lock);
+ spin_lock(&qd_lock);
value = qd->qd_change;
- spin_unlock(&qd_lru_lock);
+ spin_unlock(&qd_lock);
spin_lock(&gt->gt_spin);
num = gt->gt_quota_scale_num;
@@ -1001,9 +1022,11 @@ static int need_sync(struct gfs2_quota_data *qd)
void gfs2_quota_unlock(struct gfs2_inode *ip)
{
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_quota_data *qda[4];
unsigned int count = 0;
unsigned int x;
+ int found;
if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
goto out;
@@ -1016,9 +1039,25 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
sync = need_sync(qd);
gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
+ if (!sync)
+ continue;
+
+ spin_lock(&qd_lock);
+ found = qd_check_sync(sdp, qd, NULL);
+ spin_unlock(&qd_lock);
+
+ if (!found)
+ continue;
- if (sync && qd_trylock(qd))
- qda[count++] = qd;
+ gfs2_assert_warn(sdp, qd->qd_change_sync);
+ if (bh_get(qd)) {
+ clear_bit(QDF_LOCKED, &qd->qd_flags);
+ slot_put(qd);
+ qd_put(qd);
+ continue;
+ }
+
+ qda[count++] = qd;
}
if (count) {
@@ -1067,9 +1106,9 @@ int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
continue;
value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
- spin_lock(&qd_lru_lock);
+ spin_lock(&qd_lock);
value += qd->qd_change;
- spin_unlock(&qd_lru_lock);
+ spin_unlock(&qd_lock);
if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
print_message(qd, "exceeded");
@@ -1118,17 +1157,18 @@ int gfs2_quota_sync(struct super_block *sb, int type)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_quota_data **qda;
- unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
+ unsigned int max_qd = PAGE_SIZE/sizeof(struct gfs2_holder);
unsigned int num_qd;
unsigned int x;
int error = 0;
- sdp->sd_quota_sync_gen++;
-
qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
if (!qda)
return -ENOMEM;
+ mutex_lock(&sdp->sd_quota_sync_mutex);
+ sdp->sd_quota_sync_gen++;
+
do {
num_qd = 0;
@@ -1153,6 +1193,7 @@ int gfs2_quota_sync(struct super_block *sb, int type)
}
} while (!error && num_qd == max_qd);
+ mutex_unlock(&sdp->sd_quota_sync_mutex);
kfree(qda);
return error;
@@ -1258,11 +1299,11 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
qd->qd_slot = slot;
qd->qd_slot_count = 1;
- spin_lock(&qd_lru_lock);
+ spin_lock(&qd_lock);
gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
list_add(&qd->qd_list, &sdp->sd_quota_list);
atomic_inc(&sdp->sd_quota_count);
- spin_unlock(&qd_lru_lock);
+ spin_unlock(&qd_lock);
found++;
}
@@ -1288,30 +1329,34 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
struct gfs2_quota_data *qd;
unsigned int x;
- spin_lock(&qd_lru_lock);
+ spin_lock(&qd_lock);
while (!list_empty(head)) {
qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
- if (atomic_read(&qd->qd_count) > 1 ||
- (atomic_read(&qd->qd_count) &&
- !test_bit(QDF_CHANGE, &qd->qd_flags))) {
+ /*
+ * To be removed in due course... we should be able to
+ * ensure that all refs to the qd have done by this point
+ * so that this rather odd test is not required
+ */
+ spin_lock(&qd->qd_lockref.lock);
+ if (qd->qd_lockref.count > 1 ||
+ (qd->qd_lockref.count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
+ spin_unlock(&qd->qd_lockref.lock);
list_move(&qd->qd_list, head);
- spin_unlock(&qd_lru_lock);
+ spin_unlock(&qd_lock);
schedule();
- spin_lock(&qd_lru_lock);
+ spin_lock(&qd_lock);
continue;
}
+ spin_unlock(&qd->qd_lockref.lock);
list_del(&qd->qd_list);
/* Also remove if this qd exists in the reclaim list */
- if (!list_empty(&qd->qd_reclaim)) {
- list_del_init(&qd->qd_reclaim);
- atomic_dec(&qd_lru_count);
- }
+ list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
atomic_dec(&sdp->sd_quota_count);
- spin_unlock(&qd_lru_lock);
+ spin_unlock(&qd_lock);
- if (!atomic_read(&qd->qd_count)) {
+ if (!qd->qd_lockref.count) {
gfs2_assert_warn(sdp, !qd->qd_change);
gfs2_assert_warn(sdp, !qd->qd_slot_count);
} else
@@ -1321,9 +1366,9 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
gfs2_glock_put(qd->qd_gl);
kmem_cache_free(gfs2_quotad_cachep, qd);
- spin_lock(&qd_lru_lock);
+ spin_lock(&qd_lock);
}
- spin_unlock(&qd_lru_lock);
+ spin_unlock(&qd_lock);
gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
@@ -1462,7 +1507,7 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
}
fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
- fqs->qs_incoredqs = atomic_read(&qd_lru_count);
+ fqs->qs_incoredqs = list_lru_count(&gfs2_qd_lru);
return 0;
}
@@ -1573,10 +1618,12 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
if (gfs2_is_stuffed(ip))
alloc_required = 1;
if (alloc_required) {
+ struct gfs2_alloc_parms ap = { .aflags = 0, };
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
&data_blocks, &ind_blocks);
blocks = 1 + data_blocks + ind_blocks;
- error = gfs2_inplace_reserve(ip, blocks, 0);
+ ap.target = blocks;
+ error = gfs2_inplace_reserve(ip, &ap);
if (error)
goto out_i;
blocks += gfs2_rg_blocks(ip, blocks);
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index 0f64d9deb1b0..96e4f34a03b0 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -10,9 +10,10 @@
#ifndef __QUOTA_DOT_H__
#define __QUOTA_DOT_H__
+#include <linux/list_lru.h>
+
struct gfs2_inode;
struct gfs2_sbd;
-struct shrink_control;
#define NO_UID_QUOTA_CHANGE INVALID_UID
#define NO_GID_QUOTA_CHANGE INVALID_GID
@@ -53,10 +54,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
return ret;
}
-extern unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
- struct shrink_control *sc);
-extern unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
- struct shrink_control *sc);
extern const struct quotactl_ops gfs2_quotactl_ops;
+extern struct shrinker gfs2_qd_shrinker;
+extern struct list_lru gfs2_qd_lru;
#endif /* __QUOTA_DOT_H__ */
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 69317435faa7..4d83abdd5635 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -81,11 +81,12 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
unsigned char new_state)
{
unsigned char *byte1, *byte2, *end, cur_state;
- unsigned int buflen = rbm->bi->bi_len;
+ struct gfs2_bitmap *bi = rbm_bi(rbm);
+ unsigned int buflen = bi->bi_len;
const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
- byte1 = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY);
- end = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + buflen;
+ byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
+ end = bi->bi_bh->b_data + bi->bi_offset + buflen;
BUG_ON(byte1 >= end);
@@ -95,18 +96,17 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
printk(KERN_WARNING "GFS2: buf_blk = 0x%x old_state=%d, "
"new_state=%d\n", rbm->offset, cur_state, new_state);
printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%x\n",
- (unsigned long long)rbm->rgd->rd_addr,
- rbm->bi->bi_start);
+ (unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
printk(KERN_WARNING "GFS2: bi_offset=0x%x bi_len=0x%x\n",
- rbm->bi->bi_offset, rbm->bi->bi_len);
+ bi->bi_offset, bi->bi_len);
dump_stack();
gfs2_consist_rgrpd(rbm->rgd);
return;
}
*byte1 ^= (cur_state ^ new_state) << bit;
- if (do_clone && rbm->bi->bi_clone) {
- byte2 = rbm->bi->bi_clone + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY);
+ if (do_clone && bi->bi_clone) {
+ byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
*byte2 ^= (cur_state ^ new_state) << bit;
}
@@ -121,7 +121,8 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
{
- const u8 *buffer = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset;
+ struct gfs2_bitmap *bi = rbm_bi(rbm);
+ const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
const u8 *byte;
unsigned int bit;
@@ -252,29 +253,53 @@ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
{
u64 rblock = block - rbm->rgd->rd_data0;
- u32 x;
if (WARN_ON_ONCE(rblock > UINT_MAX))
return -EINVAL;
if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
return -E2BIG;
- rbm->bi = rbm->rgd->rd_bits;
+ rbm->bii = 0;
rbm->offset = (u32)(rblock);
/* Check if the block is within the first block */
- if (rbm->offset < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY)
+ if (rbm->offset < rbm_bi(rbm)->bi_blocks)
return 0;
/* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
rbm->offset += (sizeof(struct gfs2_rgrp) -
sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
- x = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
- rbm->offset -= x * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
- rbm->bi += x;
+ rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
+ rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
return 0;
}
/**
+ * gfs2_rbm_incr - increment an rbm structure
+ * @rbm: The rbm with rgd already set correctly
+ *
+ * This function takes an existing rbm structure and increments it to the next
+ * viable block offset.
+ *
+ * Returns: If incrementing the offset would cause the rbm to go past the
+ * end of the rgrp, true is returned, otherwise false.
+ *
+ */
+
+static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
+{
+ if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
+ rbm->offset++;
+ return false;
+ }
+ if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
+ return true;
+
+ rbm->offset = 0;
+ rbm->bii++;
+ return false;
+}
+
+/**
* gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
* @rbm: Position to search (value/result)
* @n_unaligned: Number of unaligned blocks to check
@@ -285,7 +310,6 @@ static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
{
- u64 block;
u32 n;
u8 res;
@@ -296,8 +320,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le
(*len)--;
if (*len == 0)
return true;
- block = gfs2_rbm_to_block(rbm);
- if (gfs2_rbm_from_block(rbm, block + 1))
+ if (gfs2_rbm_incr(rbm))
return true;
}
@@ -328,6 +351,7 @@ static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
u32 chunk_size;
u8 *ptr, *start, *end;
u64 block;
+ struct gfs2_bitmap *bi;
if (n_unaligned &&
gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
@@ -336,11 +360,12 @@ static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
n_unaligned = len & 3;
/* Start is now byte aligned */
while (len > 3) {
- start = rbm.bi->bi_bh->b_data;
- if (rbm.bi->bi_clone)
- start = rbm.bi->bi_clone;
- end = start + rbm.bi->bi_bh->b_size;
- start += rbm.bi->bi_offset;
+ bi = rbm_bi(&rbm);
+ start = bi->bi_bh->b_data;
+ if (bi->bi_clone)
+ start = bi->bi_clone;
+ end = start + bi->bi_bh->b_size;
+ start += bi->bi_offset;
BUG_ON(rbm.offset & 3);
start += (rbm.offset / GFS2_NBBY);
bytes = min_t(u32, len / GFS2_NBBY, (end - start));
@@ -605,11 +630,13 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
RB_CLEAR_NODE(&rs->rs_node);
if (rs->rs_free) {
+ struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);
+
/* return reserved blocks to the rgrp */
BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
rs->rs_free = 0;
- clear_bit(GBF_FULL, &rs->rs_rbm.bi->bi_flags);
+ clear_bit(GBF_FULL, &bi->bi_flags);
smp_mb__after_clear_bit();
}
}
@@ -634,14 +661,13 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
/**
* gfs2_rs_delete - delete a multi-block reservation
* @ip: The inode for this reservation
+ * @wcount: The inode's write count, or NULL
*
*/
-void gfs2_rs_delete(struct gfs2_inode *ip)
+void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount)
{
- struct inode *inode = &ip->i_inode;
-
down_write(&ip->i_rw_mutex);
- if (ip->i_res && atomic_read(&inode->i_writecount) <= 1) {
+ if (ip->i_res && ((wcount == NULL) || (atomic_read(wcount) <= 1))) {
gfs2_rs_deltree(ip->i_res);
BUG_ON(ip->i_res->rs_free);
kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
@@ -743,18 +769,21 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
bi->bi_offset = sizeof(struct gfs2_rgrp);
bi->bi_start = 0;
bi->bi_len = bytes;
+ bi->bi_blocks = bytes * GFS2_NBBY;
/* header block */
} else if (x == 0) {
bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
bi->bi_offset = sizeof(struct gfs2_rgrp);
bi->bi_start = 0;
bi->bi_len = bytes;
+ bi->bi_blocks = bytes * GFS2_NBBY;
/* last block */
} else if (x + 1 == length) {
bytes = bytes_left;
bi->bi_offset = sizeof(struct gfs2_meta_header);
bi->bi_start = rgd->rd_bitbytes - bytes_left;
bi->bi_len = bytes;
+ bi->bi_blocks = bytes * GFS2_NBBY;
/* other blocks */
} else {
bytes = sdp->sd_sb.sb_bsize -
@@ -762,6 +791,7 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
bi->bi_offset = sizeof(struct gfs2_meta_header);
bi->bi_start = rgd->rd_bitbytes - bytes_left;
bi->bi_len = bytes;
+ bi->bi_blocks = bytes * GFS2_NBBY;
}
bytes_left -= bytes;
@@ -1392,12 +1422,12 @@ static void rs_insert(struct gfs2_inode *ip)
* rg_mblk_search - find a group of multiple free blocks to form a reservation
* @rgd: the resource group descriptor
* @ip: pointer to the inode for which we're reserving blocks
- * @requested: number of blocks required for this allocation
+ * @ap: the allocation parameters
*
*/
static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
- unsigned requested)
+ const struct gfs2_alloc_parms *ap)
{
struct gfs2_rbm rbm = { .rgd = rgd, };
u64 goal;
@@ -1410,7 +1440,7 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
if (S_ISDIR(inode->i_mode))
extlen = 1;
else {
- extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested);
+ extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target);
extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
}
if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
@@ -1554,14 +1584,14 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
const struct gfs2_inode *ip, bool nowrap)
{
struct buffer_head *bh;
- struct gfs2_bitmap *initial_bi;
+ int initial_bii;
u32 initial_offset;
u32 offset;
u8 *buffer;
- int index;
int n = 0;
int iters = rbm->rgd->rd_length;
int ret;
+ struct gfs2_bitmap *bi;
/* If we are not starting at the beginning of a bitmap, then we
* need to add one to the bitmap count to ensure that we search
@@ -1571,52 +1601,53 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
iters++;
while(1) {
- if (test_bit(GBF_FULL, &rbm->bi->bi_flags) &&
+ bi = rbm_bi(rbm);
+ if (test_bit(GBF_FULL, &bi->bi_flags) &&
(state == GFS2_BLKST_FREE))
goto next_bitmap;
- bh = rbm->bi->bi_bh;
- buffer = bh->b_data + rbm->bi->bi_offset;
+ bh = bi->bi_bh;
+ buffer = bh->b_data + bi->bi_offset;
WARN_ON(!buffer_uptodate(bh));
- if (state != GFS2_BLKST_UNLINKED && rbm->bi->bi_clone)
- buffer = rbm->bi->bi_clone + rbm->bi->bi_offset;
+ if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
+ buffer = bi->bi_clone + bi->bi_offset;
initial_offset = rbm->offset;
- offset = gfs2_bitfit(buffer, rbm->bi->bi_len, rbm->offset, state);
+ offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
if (offset == BFITNOENT)
goto bitmap_full;
rbm->offset = offset;
if (ip == NULL)
return 0;
- initial_bi = rbm->bi;
+ initial_bii = rbm->bii;
ret = gfs2_reservation_check_and_update(rbm, ip, minext);
if (ret == 0)
return 0;
if (ret > 0) {
- n += (rbm->bi - initial_bi);
+ n += (rbm->bii - initial_bii);
goto next_iter;
}
if (ret == -E2BIG) {
- index = 0;
+ rbm->bii = 0;
rbm->offset = 0;
- n += (rbm->bi - initial_bi);
+ n += (rbm->bii - initial_bii);
goto res_covered_end_of_rgrp;
}
return ret;
bitmap_full: /* Mark bitmap as full and fall through */
- if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
- set_bit(GBF_FULL, &rbm->bi->bi_flags);
+ if ((state == GFS2_BLKST_FREE) && initial_offset == 0) {
+ struct gfs2_bitmap *bi = rbm_bi(rbm);
+ set_bit(GBF_FULL, &bi->bi_flags);
+ }
next_bitmap: /* Find next bitmap in the rgrp */
rbm->offset = 0;
- index = rbm->bi - rbm->rgd->rd_bits;
- index++;
- if (index == rbm->rgd->rd_length)
- index = 0;
+ rbm->bii++;
+ if (rbm->bii == rbm->rgd->rd_length)
+ rbm->bii = 0;
res_covered_end_of_rgrp:
- rbm->bi = &rbm->rgd->rd_bits[index];
- if ((index == 0) && nowrap)
+ if ((rbm->bii == 0) && nowrap)
break;
n++;
next_iter:
@@ -1645,7 +1676,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
struct gfs2_inode *ip;
int error;
int found = 0;
- struct gfs2_rbm rbm = { .rgd = rgd, .bi = rgd->rd_bits, .offset = 0 };
+ struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
while (1) {
down_write(&sdp->sd_log_flush_lock);
@@ -1800,12 +1831,12 @@ static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *b
/**
* gfs2_inplace_reserve - Reserve space in the filesystem
* @ip: the inode to reserve space for
- * @requested: the number of blocks to be reserved
+ * @ap: the allocation parameters
*
* Returns: errno
*/
-int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags)
+int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *begin = NULL;
@@ -1817,17 +1848,16 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags)
if (sdp->sd_args.ar_rgrplvb)
flags |= GL_SKIP;
- if (gfs2_assert_warn(sdp, requested))
+ if (gfs2_assert_warn(sdp, ap->target))
return -EINVAL;
if (gfs2_rs_active(rs)) {
begin = rs->rs_rbm.rgd;
- flags = 0; /* Yoda: Do or do not. There is no try */
} else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
rs->rs_rbm.rgd = begin = ip->i_rgd;
} else {
rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
}
- if (S_ISDIR(ip->i_inode.i_mode) && (aflags & GFS2_AF_ORLOV))
+ if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
skip = gfs2_orlov_skip(ip);
if (rs->rs_rbm.rgd == NULL)
return -EBADSLT;
@@ -1869,14 +1899,14 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags)
/* Get a reservation if we don't already have one */
if (!gfs2_rs_active(rs))
- rg_mblk_search(rs->rs_rbm.rgd, ip, requested);
+ rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
/* Skip rgrps when we can't get a reservation on first pass */
if (!gfs2_rs_active(rs) && (loops < 1))
goto check_rgrp;
/* If rgrp has enough free space, use it */
- if (rs->rs_rbm.rgd->rd_free_clone >= requested) {
+ if (rs->rs_rbm.rgd->rd_free_clone >= ap->target) {
ip->i_rgd = rs->rs_rbm.rgd;
return 0;
}
@@ -1973,14 +2003,14 @@ static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
*n = 1;
block = gfs2_rbm_to_block(rbm);
- gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm->bi->bi_bh);
+ gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
block++;
while (*n < elen) {
ret = gfs2_rbm_from_block(&pos, block);
if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
break;
- gfs2_trans_add_meta(pos.rgd->rd_gl, pos.bi->bi_bh);
+ gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
gfs2_setbit(&pos, true, GFS2_BLKST_USED);
(*n)++;
block++;
@@ -2001,6 +2031,7 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
u32 blen, unsigned char new_state)
{
struct gfs2_rbm rbm;
+ struct gfs2_bitmap *bi;
rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
if (!rbm.rgd) {
@@ -2011,15 +2042,15 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
while (blen--) {
gfs2_rbm_from_block(&rbm, bstart);
+ bi = rbm_bi(&rbm);
bstart++;
- if (!rbm.bi->bi_clone) {
- rbm.bi->bi_clone = kmalloc(rbm.bi->bi_bh->b_size,
- GFP_NOFS | __GFP_NOFAIL);
- memcpy(rbm.bi->bi_clone + rbm.bi->bi_offset,
- rbm.bi->bi_bh->b_data + rbm.bi->bi_offset,
- rbm.bi->bi_len);
+ if (!bi->bi_clone) {
+ bi->bi_clone = kmalloc(bi->bi_bh->b_size,
+ GFP_NOFS | __GFP_NOFAIL);
+ memcpy(bi->bi_clone + bi->bi_offset,
+ bi->bi_bh->b_data + bi->bi_offset, bi->bi_len);
}
- gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.bi->bi_bh);
+ gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
gfs2_setbit(&rbm, false, new_state);
}
@@ -2103,6 +2134,35 @@ out:
}
/**
+ * gfs2_set_alloc_start - Set starting point for block allocation
+ * @rbm: The rbm which will be set to the required location
+ * @ip: The gfs2 inode
+ * @dinode: Flag to say if allocation includes a new inode
+ *
+ * This sets the starting point from the reservation if one is active
+ * otherwise it falls back to guessing a start point based on the
+ * inode's goal block or the last allocation point in the rgrp.
+ */
+
+static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
+ const struct gfs2_inode *ip, bool dinode)
+{
+ u64 goal;
+
+ if (gfs2_rs_active(ip->i_res)) {
+ *rbm = ip->i_res->rs_rbm;
+ return;
+ }
+
+ if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
+ goal = ip->i_goal;
+ else
+ goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
+
+ gfs2_rbm_from_block(rbm, goal);
+}
+
+/**
* gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
* @ip: the inode to allocate the block for
* @bn: Used to return the starting block number
@@ -2120,22 +2180,14 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
struct buffer_head *dibh;
struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
unsigned int ndata;
- u64 goal;
u64 block; /* block, within the file system scope */
int error;
- if (gfs2_rs_active(ip->i_res))
- goal = gfs2_rbm_to_block(&ip->i_res->rs_rbm);
- else if (!dinode && rgrp_contains_block(rbm.rgd, ip->i_goal))
- goal = ip->i_goal;
- else
- goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0;
-
- gfs2_rbm_from_block(&rbm, goal);
+ gfs2_set_alloc_start(&rbm, ip, dinode);
error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, ip, false);
if (error == -ENOSPC) {
- gfs2_rbm_from_block(&rbm, goal);
+ gfs2_set_alloc_start(&rbm, ip, dinode);
error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, NULL, false);
}
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index 5b3f4a896e6c..3a10d2ffbbe7 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -40,7 +40,7 @@ extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh);
extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
#define GFS2_AF_ORLOV 1
-extern int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 flags);
+extern int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap);
extern void gfs2_inplace_release(struct gfs2_inode *ip);
extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
@@ -48,7 +48,7 @@ extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
extern int gfs2_rs_alloc(struct gfs2_inode *ip);
extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
-extern void gfs2_rs_delete(struct gfs2_inode *ip);
+extern void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount);
extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta);
extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index e5639dec66c4..35da5b19c0de 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1526,7 +1526,7 @@ out_unlock:
out:
/* Case 3 starts here */
truncate_inode_pages(&inode->i_data, 0);
- gfs2_rs_delete(ip);
+ gfs2_rs_delete(ip, NULL);
gfs2_ordered_del_inode(ip);
clear_inode(inode);
gfs2_dir_hash_inval(ip);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index aa5c48044966..d09f6edda0ff 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -587,7 +587,6 @@ TUNE_ATTR(max_readahead, 0);
TUNE_ATTR(complain_secs, 0);
TUNE_ATTR(statfs_slow, 0);
TUNE_ATTR(new_files_jdata, 0);
-TUNE_ATTR(quota_simul_sync, 1);
TUNE_ATTR(statfs_quantum, 1);
TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
@@ -597,7 +596,6 @@ static struct attribute *tune_attrs[] = {
&tune_attr_max_readahead.attr,
&tune_attr_complain_secs.attr,
&tune_attr_statfs_slow.attr,
- &tune_attr_quota_simul_sync.attr,
&tune_attr_statfs_quantum.attr,
&tune_attr_quota_scale.attr,
&tune_attr_new_files_jdata.attr,
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index 6402fb69d71b..f7109f689e61 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -268,23 +268,3 @@ int gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
return rv;
}
-void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
- unsigned int bit, int new_value)
-{
- unsigned int c, o, b = bit;
- int old_value;
-
- c = b / (8 * PAGE_SIZE);
- b %= 8 * PAGE_SIZE;
- o = b / 8;
- b %= 8;
-
- old_value = (bitmap[c][o] & (1 << b));
- gfs2_assert_withdraw(sdp, !old_value != !new_value);
-
- if (new_value)
- bitmap[c][o] |= 1 << b;
- else
- bitmap[c][o] &= ~(1 << b);
-}
-
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 80535739ac7b..b7ffb09b99ea 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -164,8 +164,6 @@ static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt,
#define gfs2_tune_get(sdp, field) \
gfs2_tune_get_i(&(sdp)->sd_tune, &(sdp)->sd_tune.field)
-void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
- unsigned int bit, int new_value);
int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...);
#endif /* __UTIL_DOT_H__ */
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index ecd37f30ab91..8c6a6f6bdba9 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -723,6 +723,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
unsigned int blks,
ea_skeleton_call_t skeleton_call, void *private)
{
+ struct gfs2_alloc_parms ap = { .target = blks };
struct buffer_head *dibh;
int error;
@@ -734,7 +735,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
if (error)
return error;
- error = gfs2_inplace_reserve(ip, blks, 0);
+ error = gfs2_inplace_reserve(ip, &ap);
if (error)
goto out_gunlock_q;
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 380ab31b5e0f..3fe7b8e53290 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -125,15 +125,14 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
}
static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
- const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+ struct iov_iter *iter, loff_t offset)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = file_inode(file)->i_mapping->host;
ssize_t ret;
- ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
- hfs_get_block);
+ ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, hfs_get_block);
/*
* In case of error extending write may have instantiated a few
@@ -141,7 +140,7 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
*/
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);
- loff_t end = offset + iov_length(iov, nr_segs);
+ loff_t end = offset + iov_iter_count(iter);
if (end > isize)
hfs_write_failed(mapping, end);
@@ -675,9 +674,9 @@ static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end,
static const struct file_operations hfs_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
- .aio_read = generic_file_aio_read,
+ .read_iter = generic_file_read_iter,
.write = do_sync_write,
- .aio_write = generic_file_aio_write,
+ .write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.splice_read = generic_file_splice_read,
.fsync = hfs_file_fsync,
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 37213d075f3c..96d7a2ccded2 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -123,14 +123,14 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
}
static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
- const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+ struct iov_iter *iter, loff_t offset)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = file_inode(file)->i_mapping->host;
ssize_t ret;
- ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
+ ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
hfsplus_get_block);
/*
@@ -139,7 +139,7 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
*/
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);
- loff_t end = offset + iov_length(iov, nr_segs);
+ loff_t end = offset + iov_iter_count(iter);
if (end > isize)
hfsplus_write_failed(mapping, end);
@@ -399,9 +399,9 @@ static const struct inode_operations hfsplus_file_inode_operations = {
static const struct file_operations hfsplus_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
- .aio_read = generic_file_aio_read,
+ .read_iter = generic_file_read_iter,
.write = do_sync_write,
- .aio_write = generic_file_aio_write,
+ .write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.splice_read = generic_file_splice_read,
.fsync = hfsplus_file_fsync,
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index b51a6079108d..5585a327b0a5 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -71,7 +71,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);
bio = bio_alloc(GFP_NOIO, 1);
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
bio->bi_bdev = sb->s_bdev;
bio->bi_end_io = hfsplus_end_io_sync;
bio->bi_private = &wait;
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 25437280a207..111a9916bcf5 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -388,8 +388,8 @@ static const struct file_operations hostfs_file_fops = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.splice_read = generic_file_splice_read,
- .aio_read = generic_file_aio_read,
- .aio_write = generic_file_aio_write,
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
.write = do_sync_write,
.mmap = generic_file_mmap,
.open = hostfs_file_open,
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 67c1a61e0955..1ff95c19a469 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -198,9 +198,9 @@ const struct file_operations hpfs_file_ops =
{
.llseek = generic_file_llseek,
.read = do_sync_read,
- .aio_read = generic_file_aio_read,
+ .read_iter = generic_file_read_iter,
.write = do_sync_write,
- .aio_write = generic_file_aio_write,
+ .write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.release = hpfs_file_release,
.fsync = hpfs_file_fsync,
diff --git a/fs/internal.h b/fs/internal.h
index 513e0d859a6c..6964003cfef8 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -140,6 +140,10 @@ extern long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan,
*/
extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
extern int rw_verify_area(int, struct file *, const loff_t *, size_t);
+extern ssize_t do_aio_read(struct kiocb *kiocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos);
+extern ssize_t do_aio_write(struct kiocb *kiocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos);
/*
* splice.c
diff --git a/fs/iov-iter.c b/fs/iov-iter.c
new file mode 100644
index 000000000000..ec461c8fea22
--- /dev/null
+++ b/fs/iov-iter.c
@@ -0,0 +1,411 @@
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/uio.h>
+#include <linux/hardirq.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/bio.h>
+
+static size_t __iovec_copy_to_user(char *vaddr, const struct iovec *iov,
+ size_t base, size_t bytes, int atomic)
+{
+ size_t copied = 0, left = 0;
+
+ while (bytes) {
+ char __user *buf = iov->iov_base + base;
+ int copy = min(bytes, iov->iov_len - base);
+
+ base = 0;
+ if (atomic)
+ left = __copy_to_user_inatomic(buf, vaddr, copy);
+ else
+ left = __copy_to_user(buf, vaddr, copy);
+ copied += copy;
+ bytes -= copy;
+ vaddr += copy;
+ iov++;
+
+ if (unlikely(left))
+ break;
+ }
+ return copied - left;
+}
+
+/*
+ * Copy as much as we can into the page and return the number of bytes which
+ * were sucessfully copied. If a fault is encountered then return the number of
+ * bytes which were copied.
+ */
+static size_t ii_iovec_copy_to_user_atomic(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+ struct iovec *iov = (struct iovec *)i->data;
+ char *kaddr;
+ size_t copied;
+
+ BUG_ON(!in_atomic());
+ kaddr = kmap_atomic(page);
+ if (likely(i->nr_segs == 1)) {
+ int left;
+ char __user *buf = iov->iov_base + i->iov_offset;
+ left = __copy_to_user_inatomic(buf, kaddr + offset, bytes);
+ copied = bytes - left;
+ } else {
+ copied = __iovec_copy_to_user(kaddr + offset, iov,
+ i->iov_offset, bytes, 1);
+ }
+ kunmap_atomic(kaddr);
+
+ return copied;
+}
+
+/*
+ * This has the same sideeffects and return value as
+ * ii_iovec_copy_to_user_atomic().
+ * The difference is that it attempts to resolve faults.
+ * Page must not be locked.
+ */
+static size_t ii_iovec_copy_to_user(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes,
+ int check_access)
+{
+ struct iovec *iov = (struct iovec *)i->data;
+ char *kaddr;
+ size_t copied;
+
+ if (check_access) {
+ might_sleep();
+ if (generic_segment_checks(iov, &i->nr_segs, &bytes,
+ VERIFY_WRITE))
+ return 0;
+ }
+
+ if (likely(i->nr_segs == 1)) {
+ int left;
+ char __user *buf = iov->iov_base + i->iov_offset;
+ /*
+ * Faults on the destination of a read are common, so do it
+ * before taking the kmap.
+ */
+ if (!fault_in_pages_writeable(buf, bytes)) {
+ kaddr = kmap_atomic(page);
+ left = __copy_to_user_inatomic(buf, kaddr + offset,
+ bytes);
+ kunmap_atomic(kaddr);
+ if (left == 0)
+ goto success;
+ }
+ kaddr = kmap(page);
+ left = copy_to_user(buf, kaddr + offset, bytes);
+ kunmap(page);
+success:
+ copied = bytes - left;
+ } else {
+ kaddr = kmap(page);
+ copied = __iovec_copy_to_user(kaddr + offset, iov,
+ i->iov_offset, bytes, 0);
+ kunmap(page);
+ }
+ return copied;
+}
+
+#ifdef CONFIG_BLOCK
+/*
+ * As an easily verifiable first pass, we implement all the methods that
+ * copy data to and from bvec pages with one function. We implement it
+ * all with kmap_atomic().
+ */
+static size_t bvec_copy_tofrom_page(struct iov_iter *iter, struct page *page,
+ unsigned long page_offset, size_t bytes,
+ int topage)
+{
+ struct bio_vec *bvec = (struct bio_vec *)iter->data;
+ size_t bvec_offset = iter->iov_offset;
+ size_t remaining = bytes;
+ void *bvec_map;
+ void *page_map;
+ size_t copy;
+
+ page_map = kmap_atomic(page);
+
+ BUG_ON(bytes > iter->count);
+ while (remaining) {
+ BUG_ON(bvec->bv_len == 0);
+ BUG_ON(bvec_offset >= bvec->bv_len);
+ copy = min(remaining, bvec->bv_len - bvec_offset);
+ bvec_map = kmap_atomic(bvec->bv_page);
+ if (topage)
+ memcpy(page_map + page_offset,
+ bvec_map + bvec->bv_offset + bvec_offset,
+ copy);
+ else
+ memcpy(bvec_map + bvec->bv_offset + bvec_offset,
+ page_map + page_offset,
+ copy);
+ kunmap_atomic(bvec_map);
+ remaining -= copy;
+ bvec_offset += copy;
+ page_offset += copy;
+ if (bvec_offset == bvec->bv_len) {
+ bvec_offset = 0;
+ bvec++;
+ }
+ }
+
+ kunmap_atomic(page_map);
+
+ return bytes;
+}
+
+static size_t ii_bvec_copy_to_user_atomic(struct page *page, struct iov_iter *i,
+ unsigned long offset, size_t bytes)
+{
+ return bvec_copy_tofrom_page(i, page, offset, bytes, 0);
+}
+static size_t ii_bvec_copy_to_user(struct page *page, struct iov_iter *i,
+ unsigned long offset, size_t bytes,
+ int check_access)
+{
+ return bvec_copy_tofrom_page(i, page, offset, bytes, 0);
+}
+static size_t ii_bvec_copy_from_user_atomic(struct page *page,
+ struct iov_iter *i,
+ unsigned long offset, size_t bytes)
+{
+ return bvec_copy_tofrom_page(i, page, offset, bytes, 1);
+}
+static size_t ii_bvec_copy_from_user(struct page *page, struct iov_iter *i,
+ unsigned long offset, size_t bytes)
+{
+ return bvec_copy_tofrom_page(i, page, offset, bytes, 1);
+}
+
+/*
+ * bio_vecs have a stricter structure than iovecs that might have
+ * come from userspace. There are no zero length bio_vec elements.
+ */
+static void ii_bvec_advance(struct iov_iter *i, size_t bytes)
+{
+ struct bio_vec *bvec = (struct bio_vec *)i->data;
+ size_t offset = i->iov_offset;
+ size_t delta;
+
+ BUG_ON(i->count < bytes);
+ while (bytes) {
+ BUG_ON(bvec->bv_len == 0);
+ BUG_ON(bvec->bv_len <= offset);
+ delta = min(bytes, bvec->bv_len - offset);
+ offset += delta;
+ i->count -= delta;
+ bytes -= delta;
+ if (offset == bvec->bv_len) {
+ bvec++;
+ offset = 0;
+ }
+ }
+
+ i->data = (unsigned long)bvec;
+ i->iov_offset = offset;
+}
+
+/*
+ * pages pointed to by bio_vecs are always pinned.
+ */
+static int ii_bvec_fault_in_readable(struct iov_iter *i, size_t bytes)
+{
+ return 0;
+}
+
+static size_t ii_bvec_single_seg_count(const struct iov_iter *i)
+{
+ const struct bio_vec *bvec = (struct bio_vec *)i->data;
+ if (i->nr_segs == 1)
+ return i->count;
+ else
+ return min(i->count, bvec->bv_len - i->iov_offset);
+}
+
+static int ii_bvec_shorten(struct iov_iter *i, size_t count)
+{
+ return -EINVAL;
+}
+
+struct iov_iter_ops ii_bvec_ops = {
+ .ii_copy_to_user_atomic = ii_bvec_copy_to_user_atomic,
+ .ii_copy_to_user = ii_bvec_copy_to_user,
+ .ii_copy_from_user_atomic = ii_bvec_copy_from_user_atomic,
+ .ii_copy_from_user = ii_bvec_copy_from_user,
+ .ii_advance = ii_bvec_advance,
+ .ii_fault_in_readable = ii_bvec_fault_in_readable,
+ .ii_single_seg_count = ii_bvec_single_seg_count,
+ .ii_shorten = ii_bvec_shorten,
+};
+EXPORT_SYMBOL(ii_bvec_ops);
+#endif /* CONFIG_BLOCK */
+
+static size_t __iovec_copy_from_user(char *vaddr, const struct iovec *iov,
+ size_t base, size_t bytes, int atomic)
+{
+ size_t copied = 0, left = 0;
+
+ while (bytes) {
+ char __user *buf = iov->iov_base + base;
+ int copy = min(bytes, iov->iov_len - base);
+
+ base = 0;
+ if (atomic)
+ left = __copy_from_user_inatomic(vaddr, buf, copy);
+ else
+ left = __copy_from_user(vaddr, buf, copy);
+ copied += copy;
+ bytes -= copy;
+ vaddr += copy;
+ iov++;
+
+ if (unlikely(left))
+ break;
+ }
+ return copied - left;
+}
+
+/*
+ * Copy as much as we can into the page and return the number of bytes which
+ * were successfully copied. If a fault is encountered then return the number
+ * of bytes which were copied.
+ */
+static size_t ii_iovec_copy_from_user_atomic(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+ struct iovec *iov = (struct iovec *)i->data;
+ char *kaddr;
+ size_t copied;
+
+ BUG_ON(!in_atomic());
+ kaddr = kmap_atomic(page);
+ if (likely(i->nr_segs == 1)) {
+ int left;
+ char __user *buf = iov->iov_base + i->iov_offset;
+ left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
+ copied = bytes - left;
+ } else {
+ copied = __iovec_copy_from_user(kaddr + offset, iov,
+ i->iov_offset, bytes, 1);
+ }
+ kunmap_atomic(kaddr);
+
+ return copied;
+}
+EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
+
+/*
+ * This has the same sideeffects and return value as
+ * ii_iovec_copy_from_user_atomic().
+ * The difference is that it attempts to resolve faults.
+ * Page must not be locked.
+ */
+static size_t ii_iovec_copy_from_user(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+ struct iovec *iov = (struct iovec *)i->data;
+ char *kaddr;
+ size_t copied;
+
+ kaddr = kmap(page);
+ if (likely(i->nr_segs == 1)) {
+ int left;
+ char __user *buf = iov->iov_base + i->iov_offset;
+ left = __copy_from_user(kaddr + offset, buf, bytes);
+ copied = bytes - left;
+ } else {
+ copied = __iovec_copy_from_user(kaddr + offset, iov,
+ i->iov_offset, bytes, 0);
+ }
+ kunmap(page);
+ return copied;
+}
+
+static void ii_iovec_advance(struct iov_iter *i, size_t bytes)
+{
+ BUG_ON(i->count < bytes);
+
+ if (likely(i->nr_segs == 1)) {
+ i->iov_offset += bytes;
+ i->count -= bytes;
+ } else {
+ struct iovec *iov = (struct iovec *)i->data;
+ size_t base = i->iov_offset;
+ unsigned long nr_segs = i->nr_segs;
+
+ /*
+ * The !iov->iov_len check ensures we skip over unlikely
+ * zero-length segments (without overruning the iovec).
+ */
+ while (bytes || unlikely(i->count && !iov->iov_len)) {
+ int copy;
+
+ copy = min(bytes, iov->iov_len - base);
+ BUG_ON(!i->count || i->count < copy);
+ i->count -= copy;
+ bytes -= copy;
+ base += copy;
+ if (iov->iov_len == base) {
+ iov++;
+ nr_segs--;
+ base = 0;
+ }
+ }
+ i->data = (unsigned long)iov;
+ i->iov_offset = base;
+ i->nr_segs = nr_segs;
+ }
+}
+
+/*
+ * Fault in the first iovec of the given iov_iter, to a maximum length
+ * of bytes. Returns 0 on success, or non-zero if the memory could not be
+ * accessed (ie. because it is an invalid address).
+ *
+ * writev-intensive code may want this to prefault several iovecs -- that
+ * would be possible (callers must not rely on the fact that _only_ the
+ * first iovec will be faulted with the current implementation).
+ */
+static int ii_iovec_fault_in_readable(struct iov_iter *i, size_t bytes)
+{
+ struct iovec *iov = (struct iovec *)i->data;
+ char __user *buf = iov->iov_base + i->iov_offset;
+ bytes = min(bytes, iov->iov_len - i->iov_offset);
+ return fault_in_pages_readable(buf, bytes);
+}
+
+/*
+ * Return the count of just the current iov_iter segment.
+ */
+static size_t ii_iovec_single_seg_count(const struct iov_iter *i)
+{
+ const struct iovec *iov = (struct iovec *)i->data;
+ if (i->nr_segs == 1)
+ return i->count;
+ else
+ return min(i->count, iov->iov_len - i->iov_offset);
+}
+
+static int ii_iovec_shorten(struct iov_iter *i, size_t count)
+{
+ struct iovec *iov = (struct iovec *)i->data;
+ i->nr_segs = iov_shorten(iov, i->nr_segs, count);
+ i->count = min(i->count, count);
+ return 0;
+}
+
+struct iov_iter_ops ii_iovec_ops = {
+ .ii_copy_to_user_atomic = ii_iovec_copy_to_user_atomic,
+ .ii_copy_to_user = ii_iovec_copy_to_user,
+ .ii_copy_from_user_atomic = ii_iovec_copy_from_user_atomic,
+ .ii_copy_from_user = ii_iovec_copy_from_user,
+ .ii_advance = ii_iovec_advance,
+ .ii_fault_in_readable = ii_iovec_fault_in_readable,
+ .ii_single_seg_count = ii_iovec_single_seg_count,
+ .ii_shorten = ii_iovec_shorten,
+};
+EXPORT_SYMBOL(ii_iovec_ops);
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index be0c39b66fe0..aa603e017d22 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -26,7 +26,6 @@
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/hrtimer.h>
-#include <linux/backing-dev.h>
static void __journal_temp_unlink_buffer(struct journal_head *jh);
@@ -100,10 +99,11 @@ static int start_this_handle(journal_t *journal, handle_t *handle)
alloc_transaction:
if (!journal->j_running_transaction) {
- new_transaction = kzalloc(sizeof(*new_transaction), GFP_NOFS);
+ new_transaction = kzalloc(sizeof(*new_transaction),
+ GFP_NOFS|__GFP_NOFAIL);
if (!new_transaction) {
- congestion_wait(BLK_RW_ASYNC, HZ/50);
- goto alloc_transaction;
+ ret = -ENOMEM;
+ goto out;
}
}
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 1506673c087e..1d7ab8b7d41e 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -51,10 +51,10 @@ const struct file_operations jffs2_file_operations =
{
.llseek = generic_file_llseek,
.open = generic_file_open,
- .read = do_sync_read,
- .aio_read = generic_file_aio_read,
- .write = do_sync_write,
- .aio_write = generic_file_aio_write,
+ .read = do_sync_read,
+ .read_iter = generic_file_read_iter,
+ .write = do_sync_write,
+ .write_iter = generic_file_write_iter,
.unlocked_ioctl=jffs2_ioctl,
.mmap = generic_file_readonly_mmap,
.fsync = jffs2_fsync,
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index fe3c0527545f..09b3ed455724 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -515,6 +515,10 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
c = JFFS2_SB_INFO(sb);
+ /* Do not support the MLC nand */
+ if (c->mtd->type == MTD_MLCNANDFLASH)
+ return -EINVAL;
+
#ifndef CONFIG_JFFS2_FS_WRITEBUFFER
if (c->mtd->type == MTD_NANDFLASH) {
pr_err("Cannot operate on NAND flash unless jffs2 NAND support is compiled in\n");
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index dd7442c58358..040b6c7725ad 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -151,8 +151,8 @@ const struct file_operations jfs_file_operations = {
.llseek = generic_file_llseek,
.write = do_sync_write,
.read = do_sync_read,
- .aio_read = generic_file_aio_read,
- .aio_write = generic_file_aio_write,
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.splice_read = generic_file_splice_read,
.splice_write = generic_file_splice_write,
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index f4aab719add5..51652aaa3dc8 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -331,15 +331,14 @@ static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
}
static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
- const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+ struct iov_iter *iter, loff_t offset)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = file->f_mapping->host;
ssize_t ret;
- ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
- jfs_get_block);
+ ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, jfs_get_block);
/*
* In case of error extending write may have instantiated a few
@@ -347,7 +346,7 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
*/
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);
- loff_t end = offset + iov_length(iov, nr_segs);
+ loff_t end = offset + iov_iter_count(iter);
if (end > isize)
jfs_write_failed(mapping, end);
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index c1a3e603279c..7f464c513ba0 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -95,7 +95,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
if (insert_inode_locked(inode) < 0) {
rc = -EINVAL;
- goto fail_unlock;
+ goto fail_put;
}
inode_init_owner(inode, parent, mode);
@@ -156,7 +156,6 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
fail_drop:
dquot_drop(inode);
inode->i_flags |= S_NOQUOTA;
-fail_unlock:
clear_nlink(inode);
unlock_new_inode(inode);
fail_put:
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 360d27c48887..8d811e02b4b9 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1998,20 +1998,20 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
bio = bio_alloc(GFP_NOFS, 1);
- bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
+ bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
bio->bi_bdev = log->bdev;
bio->bi_io_vec[0].bv_page = bp->l_page;
bio->bi_io_vec[0].bv_len = LOGPSIZE;
bio->bi_io_vec[0].bv_offset = bp->l_offset;
bio->bi_vcnt = 1;
- bio->bi_size = LOGPSIZE;
+ bio->bi_iter.bi_size = LOGPSIZE;
bio->bi_end_io = lbmIODone;
bio->bi_private = bp;
/*check if journaling to disk has been disabled*/
if (log->no_integrity) {
- bio->bi_size = 0;
+ bio->bi_iter.bi_size = 0;
lbmIODone(bio, 0);
} else {
submit_bio(READ_SYNC, bio);
@@ -2144,21 +2144,21 @@ static void lbmStartIO(struct lbuf * bp)
jfs_info("lbmStartIO\n");
bio = bio_alloc(GFP_NOFS, 1);
- bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
+ bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
bio->bi_bdev = log->bdev;
bio->bi_io_vec[0].bv_page = bp->l_page;
bio->bi_io_vec[0].bv_len = LOGPSIZE;
bio->bi_io_vec[0].bv_offset = bp->l_offset;
bio->bi_vcnt = 1;
- bio->bi_size = LOGPSIZE;
+ bio->bi_iter.bi_size = LOGPSIZE;
bio->bi_end_io = lbmIODone;
bio->bi_private = bp;
/* check if journaling to disk has been disabled */
if (log->no_integrity) {
- bio->bi_size = 0;
+ bio->bi_iter.bi_size = 0;
lbmIODone(bio, 0);
} else {
submit_bio(WRITE_SYNC, bio);
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index d165cde0c68d..49ba7ff1bbb9 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -416,7 +416,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
* count from hitting zero before we're through
*/
inc_io(page);
- if (!bio->bi_size)
+ if (!bio->bi_iter.bi_size)
goto dump_bio;
submit_bio(WRITE, bio);
nr_underway++;
@@ -438,7 +438,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
bio = bio_alloc(GFP_NOFS, 1);
bio->bi_bdev = inode->i_sb->s_bdev;
- bio->bi_sector = pblock << (inode->i_blkbits - 9);
+ bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_write_end_io;
bio->bi_private = page;
@@ -452,7 +452,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
if (bio) {
if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
goto add_failed;
- if (!bio->bi_size)
+ if (!bio->bi_iter.bi_size)
goto dump_bio;
submit_bio(WRITE, bio);
@@ -517,7 +517,8 @@ static int metapage_readpage(struct file *fp, struct page *page)
bio = bio_alloc(GFP_NOFS, 1);
bio->bi_bdev = inode->i_sb->s_bdev;
- bio->bi_sector = pblock << (inode->i_blkbits - 9);
+ bio->bi_iter.bi_sector =
+ pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_read_end_io;
bio->bi_private = page;
len = xlen << inode->i_blkbits;
diff --git a/fs/libfs.c b/fs/libfs.c
index 3a3a9b53bf5a..8c5018493154 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -10,6 +10,7 @@
#include <linux/vfs.h>
#include <linux/quotaops.h>
#include <linux/mutex.h>
+#include <linux/namei.h>
#include <linux/exportfs.h>
#include <linux/writeback.h>
#include <linux/buffer_head.h> /* sync_mapping_buffers */
@@ -31,6 +32,7 @@ int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9);
return 0;
}
+EXPORT_SYMBOL(simple_getattr);
int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
{
@@ -39,6 +41,7 @@ int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_namelen = NAME_MAX;
return 0;
}
+EXPORT_SYMBOL(simple_statfs);
/*
* Retaining negative dentries for an in-memory filesystem just wastes
@@ -66,6 +69,7 @@ struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, unsigned
d_add(dentry, NULL);
return NULL;
}
+EXPORT_SYMBOL(simple_lookup);
int dcache_dir_open(struct inode *inode, struct file *file)
{
@@ -75,12 +79,14 @@ int dcache_dir_open(struct inode *inode, struct file *file)
return file->private_data ? 0 : -ENOMEM;
}
+EXPORT_SYMBOL(dcache_dir_open);
int dcache_dir_close(struct inode *inode, struct file *file)
{
dput(file->private_data);
return 0;
}
+EXPORT_SYMBOL(dcache_dir_close);
loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
{
@@ -123,6 +129,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
mutex_unlock(&dentry->d_inode->i_mutex);
return offset;
}
+EXPORT_SYMBOL(dcache_dir_lseek);
/* Relationship between i_mode and the DT_xxx types */
static inline unsigned char dt_type(struct inode *inode)
@@ -172,11 +179,13 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
spin_unlock(&dentry->d_lock);
return 0;
}
+EXPORT_SYMBOL(dcache_readdir);
ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos)
{
return -EISDIR;
}
+EXPORT_SYMBOL(generic_read_dir);
const struct file_operations simple_dir_operations = {
.open = dcache_dir_open,
@@ -186,10 +195,12 @@ const struct file_operations simple_dir_operations = {
.iterate = dcache_readdir,
.fsync = noop_fsync,
};
+EXPORT_SYMBOL(simple_dir_operations);
const struct inode_operations simple_dir_inode_operations = {
.lookup = simple_lookup,
};
+EXPORT_SYMBOL(simple_dir_inode_operations);
static const struct super_operations simple_super_operations = {
.statfs = simple_statfs,
@@ -244,6 +255,7 @@ Enomem:
deactivate_locked_super(s);
return ERR_PTR(-ENOMEM);
}
+EXPORT_SYMBOL(mount_pseudo);
int simple_open(struct inode *inode, struct file *file)
{
@@ -251,6 +263,7 @@ int simple_open(struct inode *inode, struct file *file)
file->private_data = inode->i_private;
return 0;
}
+EXPORT_SYMBOL(simple_open);
int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
{
@@ -263,6 +276,7 @@ int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *den
d_instantiate(dentry, inode);
return 0;
}
+EXPORT_SYMBOL(simple_link);
int simple_empty(struct dentry *dentry)
{
@@ -283,6 +297,7 @@ out:
spin_unlock(&dentry->d_lock);
return ret;
}
+EXPORT_SYMBOL(simple_empty);
int simple_unlink(struct inode *dir, struct dentry *dentry)
{
@@ -293,6 +308,7 @@ int simple_unlink(struct inode *dir, struct dentry *dentry)
dput(dentry);
return 0;
}
+EXPORT_SYMBOL(simple_unlink);
int simple_rmdir(struct inode *dir, struct dentry *dentry)
{
@@ -304,6 +320,7 @@ int simple_rmdir(struct inode *dir, struct dentry *dentry)
drop_nlink(dir);
return 0;
}
+EXPORT_SYMBOL(simple_rmdir);
int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
@@ -330,6 +347,7 @@ int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
return 0;
}
+EXPORT_SYMBOL(simple_rename);
/**
* simple_setattr - setattr for simple filesystem
@@ -370,6 +388,7 @@ int simple_readpage(struct file *file, struct page *page)
unlock_page(page);
return 0;
}
+EXPORT_SYMBOL(simple_readpage);
int simple_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
@@ -393,6 +412,7 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
}
return 0;
}
+EXPORT_SYMBOL(simple_write_begin);
/**
* simple_write_end - .write_end helper for non-block-device FSes
@@ -444,6 +464,7 @@ int simple_write_end(struct file *file, struct address_space *mapping,
return copied;
}
+EXPORT_SYMBOL(simple_write_end);
/*
* the inodes created here are not hashed. If you use iunique to generate
@@ -512,6 +533,7 @@ out:
dput(root);
return -ENOMEM;
}
+EXPORT_SYMBOL(simple_fill_super);
static DEFINE_SPINLOCK(pin_fs_lock);
@@ -534,6 +556,7 @@ int simple_pin_fs(struct file_system_type *type, struct vfsmount **mount, int *c
mntput(mnt);
return 0;
}
+EXPORT_SYMBOL(simple_pin_fs);
void simple_release_fs(struct vfsmount **mount, int *count)
{
@@ -545,6 +568,7 @@ void simple_release_fs(struct vfsmount **mount, int *count)
spin_unlock(&pin_fs_lock);
mntput(mnt);
}
+EXPORT_SYMBOL(simple_release_fs);
/**
* simple_read_from_buffer - copy data from the buffer to user space
@@ -579,6 +603,7 @@ ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos,
*ppos = pos + count;
return count;
}
+EXPORT_SYMBOL(simple_read_from_buffer);
/**
* simple_write_to_buffer - copy data from user space to the buffer
@@ -613,6 +638,7 @@ ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
*ppos = pos + count;
return count;
}
+EXPORT_SYMBOL(simple_write_to_buffer);
/**
* memory_read_from_buffer - copy data from the buffer
@@ -644,6 +670,7 @@ ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
return count;
}
+EXPORT_SYMBOL(memory_read_from_buffer);
/*
* Transaction based IO.
@@ -665,6 +692,7 @@ void simple_transaction_set(struct file *file, size_t n)
smp_mb();
ar->size = n;
}
+EXPORT_SYMBOL(simple_transaction_set);
char *simple_transaction_get(struct file *file, const char __user *buf, size_t size)
{
@@ -696,6 +724,7 @@ char *simple_transaction_get(struct file *file, const char __user *buf, size_t s
return ar->data;
}
+EXPORT_SYMBOL(simple_transaction_get);
ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos)
{
@@ -705,12 +734,14 @@ ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size
return 0;
return simple_read_from_buffer(buf, size, pos, ar->data, ar->size);
}
+EXPORT_SYMBOL(simple_transaction_read);
int simple_transaction_release(struct inode *inode, struct file *file)
{
free_page((unsigned long)file->private_data);
return 0;
}
+EXPORT_SYMBOL(simple_transaction_release);
/* Simple attribute files */
@@ -746,12 +777,14 @@ int simple_attr_open(struct inode *inode, struct file *file,
return nonseekable_open(inode, file);
}
+EXPORT_SYMBOL_GPL(simple_attr_open);
int simple_attr_release(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
+EXPORT_SYMBOL_GPL(simple_attr_release); /* GPL-only? This? Really? */
/* read from the buffer that is filled with the get function */
ssize_t simple_attr_read(struct file *file, char __user *buf,
@@ -787,6 +820,7 @@ out:
mutex_unlock(&attr->mutex);
return ret;
}
+EXPORT_SYMBOL_GPL(simple_attr_read);
/* interpret the buffer as a number to call the set function with */
ssize_t simple_attr_write(struct file *file, const char __user *buf,
@@ -819,6 +853,7 @@ out:
mutex_unlock(&attr->mutex);
return ret;
}
+EXPORT_SYMBOL_GPL(simple_attr_write);
/**
* generic_fh_to_dentry - generic helper for the fh_to_dentry export operation
@@ -957,39 +992,13 @@ int noop_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
return 0;
}
-
-EXPORT_SYMBOL(dcache_dir_close);
-EXPORT_SYMBOL(dcache_dir_lseek);
-EXPORT_SYMBOL(dcache_dir_open);
-EXPORT_SYMBOL(dcache_readdir);
-EXPORT_SYMBOL(generic_read_dir);
-EXPORT_SYMBOL(mount_pseudo);
-EXPORT_SYMBOL(simple_write_begin);
-EXPORT_SYMBOL(simple_write_end);
-EXPORT_SYMBOL(simple_dir_inode_operations);
-EXPORT_SYMBOL(simple_dir_operations);
-EXPORT_SYMBOL(simple_empty);
-EXPORT_SYMBOL(simple_fill_super);
-EXPORT_SYMBOL(simple_getattr);
-EXPORT_SYMBOL(simple_open);
-EXPORT_SYMBOL(simple_link);
-EXPORT_SYMBOL(simple_lookup);
-EXPORT_SYMBOL(simple_pin_fs);
-EXPORT_SYMBOL(simple_readpage);
-EXPORT_SYMBOL(simple_release_fs);
-EXPORT_SYMBOL(simple_rename);
-EXPORT_SYMBOL(simple_rmdir);
-EXPORT_SYMBOL(simple_statfs);
EXPORT_SYMBOL(noop_fsync);
-EXPORT_SYMBOL(simple_unlink);
-EXPORT_SYMBOL(simple_read_from_buffer);
-EXPORT_SYMBOL(simple_write_to_buffer);
-EXPORT_SYMBOL(memory_read_from_buffer);
-EXPORT_SYMBOL(simple_transaction_set);
-EXPORT_SYMBOL(simple_transaction_get);
-EXPORT_SYMBOL(simple_transaction_read);
-EXPORT_SYMBOL(simple_transaction_release);
-EXPORT_SYMBOL_GPL(simple_attr_open);
-EXPORT_SYMBOL_GPL(simple_attr_release);
-EXPORT_SYMBOL_GPL(simple_attr_read);
-EXPORT_SYMBOL_GPL(simple_attr_write);
+
+void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
+ void *cookie)
+{
+ char *s = nd_get_link(nd);
+ if (!IS_ERR(s))
+ kfree(s);
+}
+EXPORT_SYMBOL(kfree_put_link);
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 550475ca6a0e..a1b161f29148 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -32,9 +32,9 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
bio_vec.bv_len = PAGE_SIZE;
bio_vec.bv_offset = 0;
bio.bi_vcnt = 1;
- bio.bi_size = PAGE_SIZE;
+ bio.bi_iter.bi_size = PAGE_SIZE;
bio.bi_bdev = bdev;
- bio.bi_sector = page->index * (PAGE_SIZE >> 9);
+ bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9);
init_completion(&complete);
bio.bi_private = &complete;
bio.bi_end_io = request_complete;
@@ -107,9 +107,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
if (i >= max_pages) {
/* Block layer cannot split bios :( */
bio->bi_vcnt = i;
- bio->bi_size = i * PAGE_SIZE;
+ bio->bi_iter.bi_size = i * PAGE_SIZE;
bio->bi_bdev = super->s_bdev;
- bio->bi_sector = ofs >> 9;
+ bio->bi_iter.bi_sector = ofs >> 9;
bio->bi_private = sb;
bio->bi_end_io = writeseg_end_io;
atomic_inc(&super->s_pending_writes);
@@ -134,9 +134,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
unlock_page(page);
}
bio->bi_vcnt = nr_pages;
- bio->bi_size = nr_pages * PAGE_SIZE;
+ bio->bi_iter.bi_size = nr_pages * PAGE_SIZE;
bio->bi_bdev = super->s_bdev;
- bio->bi_sector = ofs >> 9;
+ bio->bi_iter.bi_sector = ofs >> 9;
bio->bi_private = sb;
bio->bi_end_io = writeseg_end_io;
atomic_inc(&super->s_pending_writes);
@@ -199,9 +199,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
if (i >= max_pages) {
/* Block layer cannot split bios :( */
bio->bi_vcnt = i;
- bio->bi_size = i * PAGE_SIZE;
+ bio->bi_iter.bi_size = i * PAGE_SIZE;
bio->bi_bdev = super->s_bdev;
- bio->bi_sector = ofs >> 9;
+ bio->bi_iter.bi_sector = ofs >> 9;
bio->bi_private = sb;
bio->bi_end_io = erase_end_io;
atomic_inc(&super->s_pending_writes);
@@ -220,9 +220,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
bio->bi_io_vec[i].bv_offset = 0;
}
bio->bi_vcnt = nr_pages;
- bio->bi_size = nr_pages * PAGE_SIZE;
+ bio->bi_iter.bi_size = nr_pages * PAGE_SIZE;
bio->bi_bdev = super->s_bdev;
- bio->bi_sector = ofs >> 9;
+ bio->bi_iter.bi_sector = ofs >> 9;
bio->bi_private = sb;
bio->bi_end_io = erase_end_io;
atomic_inc(&super->s_pending_writes);
diff --git a/fs/logfs/dev_mtd.c b/fs/logfs/dev_mtd.c
index 9c501449450d..427bb73e298f 100644
--- a/fs/logfs/dev_mtd.c
+++ b/fs/logfs/dev_mtd.c
@@ -245,8 +245,8 @@ static int logfs_mtd_can_write_buf(struct super_block *sb, u64 ofs)
goto out;
if (memchr_inv(buf, 0xff, super->s_writesize))
err = -EIO;
- kfree(buf);
out:
+ kfree(buf);
return err;
}
diff --git a/fs/logfs/file.c b/fs/logfs/file.c
index 57914fc32b62..57f994e887b5 100644
--- a/fs/logfs/file.c
+++ b/fs/logfs/file.c
@@ -264,8 +264,8 @@ const struct inode_operations logfs_reg_iops = {
};
const struct file_operations logfs_reg_fops = {
- .aio_read = generic_file_aio_read,
- .aio_write = generic_file_aio_write,
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
.fsync = logfs_fsync,
.unlocked_ioctl = logfs_ioctl,
.llseek = generic_file_llseek,
diff --git a/fs/logfs/super.c b/fs/logfs/super.c
index 54360293bcb5..b256c0690e5b 100644
--- a/fs/logfs/super.c
+++ b/fs/logfs/super.c
@@ -287,14 +287,14 @@ static int logfs_make_writeable(struct super_block *sb)
if (err)
return err;
+ /* Do one GC pass before any data gets dirtied */
+ logfs_gc_pass(sb);
+
/* Check areas for trailing unaccounted data */
err = logfs_check_areas(sb);
if (err)
return err;
- /* Do one GC pass before any data gets dirtied */
- logfs_gc_pass(sb);
-
/* after all initializations are done, replay the journal
* for rw-mounts, if necessary */
err = logfs_replay_journal(sb);
diff --git a/fs/minix/Kconfig b/fs/minix/Kconfig
index 6624684dd5de..f2a0cfcef11d 100644
--- a/fs/minix/Kconfig
+++ b/fs/minix/Kconfig
@@ -18,7 +18,7 @@ config MINIX_FS
config MINIX_FS_NATIVE_ENDIAN
def_bool MINIX_FS
- depends on H8300 || M32R || MICROBLAZE || MIPS || S390 || SUPERH || SPARC || XTENSA || (M68K && !MMU)
+ depends on M32R || MICROBLAZE || MIPS || S390 || SUPERH || SPARC || XTENSA || (M68K && !MMU)
config MINIX_FS_BIG_ENDIAN_16BIT_INDEXED
def_bool MINIX_FS
diff --git a/fs/minix/file.c b/fs/minix/file.c
index adc6f5494231..346d8f37d342 100644
--- a/fs/minix/file.c
+++ b/fs/minix/file.c
@@ -15,9 +15,9 @@
const struct file_operations minix_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
- .aio_read = generic_file_aio_read,
+ .read_iter = generic_file_read_iter,
.write = do_sync_write,
- .aio_write = generic_file_aio_write,
+ .write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.fsync = generic_file_fsync,
.splice_read = generic_file_splice_read,
diff --git a/fs/mpage.c b/fs/mpage.c
index 0face1c4d4c6..92b125f03697 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -94,7 +94,7 @@ mpage_alloc(struct block_device *bdev,
if (bio) {
bio->bi_bdev = bdev;
- bio->bi_sector = first_sector;
+ bio->bi_iter.bi_sector = first_sector;
}
return bio;
}
diff --git a/fs/namei.c b/fs/namei.c
index 645268f23eb6..caa28051e197 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2294,10 +2294,11 @@ out:
* path_mountpoint - look up a path to be umounted
* @dfd: directory file descriptor to start walk from
* @name: full pathname to walk
+ * @path: pointer to container for result
* @flags: lookup flags
*
* Look up the given name, but don't attempt to revalidate the last component.
- * Returns 0 and "path" will be valid on success; Retuns error otherwise.
+ * Returns 0 and "path" will be valid on success; Returns error otherwise.
*/
static int
path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags)
diff --git a/fs/namespace.c b/fs/namespace.c
index da5c49483430..3ee6e59ead55 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -39,7 +39,7 @@ static int mnt_group_start = 1;
static struct list_head *mount_hashtable __read_mostly;
static struct list_head *mountpoint_hashtable __read_mostly;
static struct kmem_cache *mnt_cache __read_mostly;
-static struct rw_semaphore namespace_sem;
+static DECLARE_RWSEM(namespace_sem);
/* /sys/fs */
struct kobject *fs_kobj;
@@ -1849,14 +1849,10 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
br_write_lock(&vfsmount_lock);
mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
mnt->mnt.mnt_flags = mnt_flags;
- br_write_unlock(&vfsmount_lock);
- }
- up_write(&sb->s_umount);
- if (!err) {
- br_write_lock(&vfsmount_lock);
touch_mnt_namespace(mnt->mnt_ns);
br_write_unlock(&vfsmount_lock);
}
+ up_write(&sb->s_umount);
return err;
}
@@ -2444,9 +2440,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
return ERR_CAST(new);
}
new_ns->root = new;
- br_write_lock(&vfsmount_lock);
list_add_tail(&new_ns->list, &new->mnt_list);
- br_write_unlock(&vfsmount_lock);
/*
* Second pass: switch the tsk->fs->* elements and mark new vfsmounts
@@ -2767,8 +2761,6 @@ void __init mnt_init(void)
unsigned u;
int err;
- init_rwsem(&namespace_sem);
-
mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
@@ -2802,11 +2794,7 @@ void put_mnt_ns(struct mnt_namespace *ns)
{
if (!atomic_dec_and_test(&ns->count))
return;
- namespace_lock();
- br_write_lock(&vfsmount_lock);
- umount_tree(ns->root, 0);
- br_write_unlock(&vfsmount_lock);
- namespace_unlock();
+ drop_collected_mounts(&ns->root->mnt);
free_mnt_ns(ns);
}
@@ -2875,7 +2863,7 @@ bool fs_fully_visible(struct file_system_type *type)
if (unlikely(!ns))
return false;
- namespace_lock();
+ down_read(&namespace_sem);
list_for_each_entry(mnt, &ns->list, mnt_list) {
struct mount *child;
if (mnt->mnt.mnt_sb->s_type != type)
@@ -2896,7 +2884,7 @@ bool fs_fully_visible(struct file_system_type *type)
next: ;
}
found:
- namespace_unlock();
+ up_read(&namespace_sem);
return visible;
}
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 3be047474bfc..c320ac52353e 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -339,9 +339,8 @@ ncp_lookup_validate(struct dentry *dentry, unsigned int flags)
if (val)
goto finished;
- DDPRINTK("ncp_lookup_validate: %s/%s not valid, age=%ld, server lookup\n",
- dentry->d_parent->d_name.name, dentry->d_name.name,
- NCP_GET_AGE(dentry));
+ DDPRINTK("ncp_lookup_validate: %pd2 not valid, age=%ld, server lookup\n",
+ dentry, NCP_GET_AGE(dentry));
len = sizeof(__name);
if (ncp_is_server_root(dir)) {
@@ -359,8 +358,8 @@ ncp_lookup_validate(struct dentry *dentry, unsigned int flags)
res = ncp_obtain_info(server, dir, __name, &(finfo.i));
}
finfo.volume = finfo.i.volNumber;
- DDPRINTK("ncp_lookup_validate: looked for %s/%s, res=%d\n",
- dentry->d_parent->d_name.name, __name, res);
+ DDPRINTK("ncp_lookup_validate: looked for %pd/%s, res=%d\n",
+ dentry->d_parent, __name, res);
/*
* If we didn't find it, or if it has a different dirEntNum to
* what we remember, it's not valid any more.
@@ -454,8 +453,7 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx)
ctl.page = NULL;
ctl.cache = NULL;
- DDPRINTK("ncp_readdir: reading %s/%s, pos=%d\n",
- dentry->d_parent->d_name.name, dentry->d_name.name,
+ DDPRINTK("ncp_readdir: reading %pD2, pos=%d\n", file,
(int) ctx->pos);
result = -EIO;
@@ -740,12 +738,10 @@ ncp_do_readdir(struct file *file, struct dir_context *ctx,
int more;
size_t bufsize;
- DPRINTK("ncp_do_readdir: %s/%s, fpos=%ld\n",
- dentry->d_parent->d_name.name, dentry->d_name.name,
+ DPRINTK("ncp_do_readdir: %pD2, fpos=%ld\n", file,
(unsigned long) ctx->pos);
- PPRINTK("ncp_do_readdir: init %s, volnum=%d, dirent=%u\n",
- dentry->d_name.name, NCP_FINFO(dir)->volNumber,
- NCP_FINFO(dir)->dirEntNum);
+ PPRINTK("ncp_do_readdir: init %pD, volnum=%d, dirent=%u\n",
+ file, NCP_FINFO(dir)->volNumber, NCP_FINFO(dir)->dirEntNum);
err = ncp_initialize_search(server, dir, &seq);
if (err) {
@@ -850,8 +846,7 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, unsig
if (!ncp_conn_valid(server))
goto finished;
- PPRINTK("ncp_lookup: server lookup for %s/%s\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
+ PPRINTK("ncp_lookup: server lookup for %pd2\n", dentry);
len = sizeof(__name);
if (ncp_is_server_root(dir)) {
@@ -867,8 +862,7 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, unsig
if (!res)
res = ncp_obtain_info(server, dir, __name, &(finfo.i));
}
- PPRINTK("ncp_lookup: looked for %s/%s, res=%d\n",
- dentry->d_parent->d_name.name, __name, res);
+ PPRINTK("ncp_lookup: looked for %pd2, res=%d\n", dentry, res);
/*
* If we didn't find an entry, make a negative dentry.
*/
@@ -915,8 +909,7 @@ out:
return error;
out_close:
- PPRINTK("ncp_instantiate: %s/%s failed, closing file\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
+ PPRINTK("ncp_instantiate: %pd2 failed, closing file\n", dentry);
ncp_close_file(NCP_SERVER(dir), finfo->file_handle);
goto out;
}
@@ -930,8 +923,7 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, umode_t mode,
int opmode;
__u8 __name[NCP_MAXPATHLEN + 1];
- PPRINTK("ncp_create_new: creating %s/%s, mode=%hx\n",
- dentry->d_parent->d_name.name, dentry->d_name.name, mode);
+ PPRINTK("ncp_create_new: creating %pd2, mode=%hx\n", dentry, mode);
ncp_age_dentry(server, dentry);
len = sizeof(__name);
@@ -960,8 +952,7 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, umode_t mode,
error = -ENAMETOOLONG;
else if (result < 0)
error = result;
- DPRINTK("ncp_create: %s/%s failed\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
+ DPRINTK("ncp_create: %pd2 failed\n", dentry);
goto out;
}
opmode = O_WRONLY;
@@ -994,8 +985,7 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
int error, len;
__u8 __name[NCP_MAXPATHLEN + 1];
- DPRINTK("ncp_mkdir: making %s/%s\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
+ DPRINTK("ncp_mkdir: making %pd2\n", dentry);
ncp_age_dentry(server, dentry);
len = sizeof(__name);
@@ -1032,8 +1022,7 @@ static int ncp_rmdir(struct inode *dir, struct dentry *dentry)
int error, result, len;
__u8 __name[NCP_MAXPATHLEN + 1];
- DPRINTK("ncp_rmdir: removing %s/%s\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
+ DPRINTK("ncp_rmdir: removing %pd2\n", dentry);
len = sizeof(__name);
error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
@@ -1078,8 +1067,7 @@ static int ncp_unlink(struct inode *dir, struct dentry *dentry)
int error;
server = NCP_SERVER(dir);
- DPRINTK("ncp_unlink: unlinking %s/%s\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
+ DPRINTK("ncp_unlink: unlinking %pd2\n", dentry);
/*
* Check whether to close the file ...
@@ -1099,8 +1087,7 @@ static int ncp_unlink(struct inode *dir, struct dentry *dentry)
#endif
switch (error) {
case 0x00:
- DPRINTK("ncp: removed %s/%s\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
+ DPRINTK("ncp: removed %pd2\n", dentry);
break;
case 0x85:
case 0x8A:
@@ -1133,9 +1120,7 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
int old_len, new_len;
__u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
- DPRINTK("ncp_rename: %s/%s to %s/%s\n",
- old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
- new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
+ DPRINTK("ncp_rename: %pd2 to %pd2\n", old_dentry, new_dentry);
ncp_age_dentry(server, old_dentry);
ncp_age_dentry(server, new_dentry);
@@ -1165,8 +1150,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
#endif
switch (error) {
case 0x00:
- DPRINTK("ncp renamed %s -> %s.\n",
- old_dentry->d_name.name,new_dentry->d_name.name);
+ DPRINTK("ncp renamed %pd -> %pd.\n",
+ old_dentry, new_dentry);
break;
case 0x9E:
error = -ENAMETOOLONG;
diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c
index 122e260247f5..8f5074e1ecb9 100644
--- a/fs/ncpfs/file.c
+++ b/fs/ncpfs/file.c
@@ -107,8 +107,7 @@ ncp_file_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
void* freepage;
size_t freelen;
- DPRINTK("ncp_file_read: enter %s/%s\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
+ DPRINTK("ncp_file_read: enter %pd2\n", dentry);
pos = *ppos;
@@ -166,8 +165,7 @@ ncp_file_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
file_accessed(file);
- DPRINTK("ncp_file_read: exit %s/%s\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
+ DPRINTK("ncp_file_read: exit %pd2\n", dentry);
outrel:
ncp_inode_close(inode);
return already_read ? already_read : error;
@@ -184,8 +182,7 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
int errno;
void* bouncebuffer;
- DPRINTK("ncp_file_write: enter %s/%s\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
+ DPRINTK("ncp_file_write: enter %pd2\n", dentry);
if ((ssize_t) count < 0)
return -EINVAL;
pos = *ppos;
@@ -264,8 +261,7 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
i_size_write(inode, pos);
mutex_unlock(&inode->i_mutex);
}
- DPRINTK("ncp_file_write: exit %s/%s\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
+ DPRINTK("ncp_file_write: exit %pd2\n", dentry);
outrel:
ncp_inode_close(inode);
return already_written ? already_written : errno;
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index b5e80b0af315..38c1768b4142 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -140,6 +140,17 @@ config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN
If the NFS client is unchanged from the upstream kernel, this
option should be set to the default "kernel.org".
+config NFS_V4_1_MIGRATION
+ bool "NFSv4.1 client support for migration"
+ depends on NFS_V4_1
+ default n
+ help
+ This option makes the NFS client advertise to NFSv4.1 servers that
+ it can support NFSv4 migration.
+
+ The NFSv4.1 pieces of the Linux NFSv4 migration implementation are
+ still experimental. If you are not an NFSv4 developer, say N here.
+
config NFS_V4_SECURITY_LABEL
bool
depends on NFS_V4_2 && SECURITY
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index e242bbf72972..af73896abea3 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -134,8 +134,8 @@ bl_submit_bio(int rw, struct bio *bio)
if (bio) {
get_parallel(bio->bi_private);
dprintk("%s submitting %s bio %u@%llu\n", __func__,
- rw == READ ? "read" : "write",
- bio->bi_size, (unsigned long long)bio->bi_sector);
+ rw == READ ? "read" : "write", bio->bi_iter.bi_size,
+ (unsigned long long)bio->bi_iter.bi_sector);
submit_bio(rw, bio);
}
return NULL;
@@ -156,7 +156,8 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
}
if (bio) {
- bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
+ bio->bi_iter.bi_sector = isect - be->be_f_offset +
+ be->be_v_offset;
bio->bi_bdev = be->be_mdev;
bio->bi_end_io = end_io;
bio->bi_private = par;
@@ -519,7 +520,7 @@ bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
(offset / SECTOR_SIZE);
- bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
+ bio->bi_iter.bi_sector = isect - be->be_f_offset + be->be_v_offset;
bio->bi_bdev = be->be_mdev;
bio->bi_end_io = bl_read_single_end_io;
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 67cd73213168..073b4cf67ed9 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -164,8 +164,7 @@ nfs41_callback_up(struct svc_serv *serv)
svc_xprt_put(serv->sv_bc_xprt);
serv->sv_bc_xprt = NULL;
}
- dprintk("--> %s return %ld\n", __func__,
- IS_ERR(rqstp) ? PTR_ERR(rqstp) : 0);
+ dprintk("--> %s return %d\n", __func__, PTR_ERR_OR_ZERO(rqstp));
return rqstp;
}
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 2dceee4db076..1d09289c8f0e 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -590,6 +590,8 @@ int nfs_create_rpc_client(struct nfs_client *clp,
if (test_bit(NFS_CS_DISCRTRY, &clp->cl_flags))
args.flags |= RPC_CLNT_CREATE_DISCRTRY;
+ if (test_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags))
+ args.flags |= RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT;
if (test_bit(NFS_CS_NORESVPORT, &clp->cl_flags))
args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
if (test_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags))
@@ -784,8 +786,10 @@ static int nfs_init_server(struct nfs_server *server,
goto error;
server->port = data->nfs_server.port;
+ server->auth_info = data->auth_info;
- error = nfs_init_server_rpcclient(server, &timeparms, data->auth_flavors[0]);
+ error = nfs_init_server_rpcclient(server, &timeparms,
+ data->selected_flavor);
if (error < 0)
goto error;
@@ -926,6 +930,7 @@ void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *sour
target->acdirmax = source->acdirmax;
target->caps = source->caps;
target->options = source->options;
+ target->auth_info = source->auth_info;
}
EXPORT_SYMBOL_GPL(nfs_server_copy_userdata);
@@ -943,7 +948,7 @@ void nfs_server_insert_lists(struct nfs_server *server)
}
EXPORT_SYMBOL_GPL(nfs_server_insert_lists);
-static void nfs_server_remove_lists(struct nfs_server *server)
+void nfs_server_remove_lists(struct nfs_server *server)
{
struct nfs_client *clp = server->nfs_client;
struct nfs_net *nn;
@@ -960,6 +965,7 @@ static void nfs_server_remove_lists(struct nfs_server *server)
synchronize_rcu();
}
+EXPORT_SYMBOL_GPL(nfs_server_remove_lists);
/*
* Allocate and initialise a server record
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 02b0df769e2d..812154aff981 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -98,9 +98,7 @@ nfs_opendir(struct inode *inode, struct file *filp)
struct nfs_open_dir_context *ctx;
struct rpc_cred *cred;
- dfprintk(FILE, "NFS: open dir(%s/%s)\n",
- filp->f_path.dentry->d_parent->d_name.name,
- filp->f_path.dentry->d_name.name);
+ dfprintk(FILE, "NFS: open dir(%pD2)\n", filp);
nfs_inc_stats(inode, NFSIOS_VFSOPEN);
@@ -297,11 +295,10 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des
if (ctx->duped > 0
&& ctx->dup_cookie == *desc->dir_cookie) {
if (printk_ratelimit()) {
- pr_notice("NFS: directory %s/%s contains a readdir loop."
+ pr_notice("NFS: directory %pD2 contains a readdir loop."
"Please contact your server vendor. "
"The file: %s has duplicate cookie %llu\n",
- desc->file->f_dentry->d_parent->d_name.name,
- desc->file->f_dentry->d_name.name,
+ desc->file,
array->array[i].string.name,
*desc->dir_cookie);
}
@@ -822,9 +819,8 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
struct nfs_open_dir_context *dir_ctx = file->private_data;
int res = 0;
- dfprintk(FILE, "NFS: readdir(%s/%s) starting at cookie %llu\n",
- dentry->d_parent->d_name.name, dentry->d_name.name,
- (long long)ctx->pos);
+ dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n",
+ file, (long long)ctx->pos);
nfs_inc_stats(inode, NFSIOS_VFSGETDENTS);
/*
@@ -880,22 +876,17 @@ out:
nfs_unblock_sillyrename(dentry);
if (res > 0)
res = 0;
- dfprintk(FILE, "NFS: readdir(%s/%s) returns %d\n",
- dentry->d_parent->d_name.name, dentry->d_name.name,
- res);
+ dfprintk(FILE, "NFS: readdir(%pD2) returns %d\n", file, res);
return res;
}
static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
{
- struct dentry *dentry = filp->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct nfs_open_dir_context *dir_ctx = filp->private_data;
- dfprintk(FILE, "NFS: llseek dir(%s/%s, %lld, %d)\n",
- dentry->d_parent->d_name.name,
- dentry->d_name.name,
- offset, whence);
+ dfprintk(FILE, "NFS: llseek dir(%pD2, %lld, %d)\n",
+ filp, offset, whence);
mutex_lock(&inode->i_mutex);
switch (whence) {
@@ -925,15 +916,12 @@ out:
static int nfs_fsync_dir(struct file *filp, loff_t start, loff_t end,
int datasync)
{
- struct dentry *dentry = filp->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = file_inode(filp);
- dfprintk(FILE, "NFS: fsync dir(%s/%s) datasync %d\n",
- dentry->d_parent->d_name.name, dentry->d_name.name,
- datasync);
+ dfprintk(FILE, "NFS: fsync dir(%pD2) datasync %d\n", filp, datasync);
mutex_lock(&inode->i_mutex);
- nfs_inc_stats(dentry->d_inode, NFSIOS_VFSFSYNC);
+ nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
mutex_unlock(&inode->i_mutex);
return 0;
}
@@ -1073,9 +1061,8 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
}
if (is_bad_inode(inode)) {
- dfprintk(LOOKUPCACHE, "%s: %s/%s has dud inode\n",
- __func__, dentry->d_parent->d_name.name,
- dentry->d_name.name);
+ dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n",
+ __func__, dentry);
goto out_bad;
}
@@ -1125,9 +1112,8 @@ out_set_verifier:
nfs_advise_use_readdirplus(dir);
out_valid_noent:
dput(parent);
- dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is valid\n",
- __func__, dentry->d_parent->d_name.name,
- dentry->d_name.name);
+ dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n",
+ __func__, dentry);
return 1;
out_zap_parent:
nfs_zap_caches(dir);
@@ -1139,7 +1125,13 @@ out_zap_parent:
if (inode && S_ISDIR(inode->i_mode)) {
/* Purge readdir caches. */
nfs_zap_caches(inode);
- if (dentry->d_flags & DCACHE_DISCONNECTED)
+ /*
+ * We can't d_drop the root of a disconnected tree:
+ * its d_hash is on the s_anon list and d_drop() would hide
+ * it from shrink_dcache_for_unmount(), leading to busy
+ * inodes on unmount and further oopses.
+ */
+ if (IS_ROOT(dentry))
goto out_valid;
}
/* If we have submounts, don't unhash ! */
@@ -1147,18 +1139,16 @@ out_zap_parent:
goto out_valid;
dput(parent);
- dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is invalid\n",
- __func__, dentry->d_parent->d_name.name,
- dentry->d_name.name);
+ dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
+ __func__, dentry);
return 0;
out_error:
nfs_free_fattr(fattr);
nfs_free_fhandle(fhandle);
nfs4_label_free(label);
dput(parent);
- dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) lookup returned error %d\n",
- __func__, dentry->d_parent->d_name.name,
- dentry->d_name.name, error);
+ dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) lookup returned error %d\n",
+ __func__, dentry, error);
return error;
}
@@ -1182,16 +1172,14 @@ static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
* eventually need to do something more here.
*/
if (!inode) {
- dfprintk(LOOKUPCACHE, "%s: %s/%s has negative inode\n",
- __func__, dentry->d_parent->d_name.name,
- dentry->d_name.name);
+ dfprintk(LOOKUPCACHE, "%s: %pd2 has negative inode\n",
+ __func__, dentry);
return 1;
}
if (is_bad_inode(inode)) {
- dfprintk(LOOKUPCACHE, "%s: %s/%s has dud inode\n",
- __func__, dentry->d_parent->d_name.name,
- dentry->d_name.name);
+ dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n",
+ __func__, dentry);
return 0;
}
@@ -1206,9 +1194,8 @@ static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
*/
static int nfs_dentry_delete(const struct dentry *dentry)
{
- dfprintk(VFS, "NFS: dentry_delete(%s/%s, %x)\n",
- dentry->d_parent->d_name.name, dentry->d_name.name,
- dentry->d_flags);
+ dfprintk(VFS, "NFS: dentry_delete(%pd2, %x)\n",
+ dentry, dentry->d_flags);
/* Unhash any dentry with a stale inode */
if (dentry->d_inode != NULL && NFS_STALE(dentry->d_inode))
@@ -1286,8 +1273,7 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in
struct nfs4_label *label = NULL;
int error;
- dfprintk(VFS, "NFS: lookup(%s/%s)\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
+ dfprintk(VFS, "NFS: lookup(%pd2)\n", dentry);
nfs_inc_stats(dir, NFSIOS_VFSLOOKUP);
res = ERR_PTR(-ENAMETOOLONG);
@@ -1381,7 +1367,7 @@ static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, i
static int do_open(struct inode *inode, struct file *filp)
{
- nfs_fscache_set_inode_cookie(inode, filp);
+ nfs_fscache_open_file(inode, filp);
return 0;
}
@@ -1418,8 +1404,8 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
/* Expect a negative dentry */
BUG_ON(dentry->d_inode);
- dfprintk(VFS, "NFS: atomic_open(%s/%ld), %s\n",
- dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+ dfprintk(VFS, "NFS: atomic_open(%s/%ld), %pd\n",
+ dir->i_sb->s_id, dir->i_ino, dentry);
err = nfs_check_flags(open_flags);
if (err)
@@ -1608,8 +1594,8 @@ int nfs_create(struct inode *dir, struct dentry *dentry,
int open_flags = excl ? O_CREAT | O_EXCL : O_CREAT;
int error;
- dfprintk(VFS, "NFS: create(%s/%ld), %s\n",
- dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+ dfprintk(VFS, "NFS: create(%s/%ld), %pd\n",
+ dir->i_sb->s_id, dir->i_ino, dentry);
attr.ia_mode = mode;
attr.ia_valid = ATTR_MODE;
@@ -1635,8 +1621,8 @@ nfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
struct iattr attr;
int status;
- dfprintk(VFS, "NFS: mknod(%s/%ld), %s\n",
- dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+ dfprintk(VFS, "NFS: mknod(%s/%ld), %pd\n",
+ dir->i_sb->s_id, dir->i_ino, dentry);
if (!new_valid_dev(rdev))
return -EINVAL;
@@ -1664,8 +1650,8 @@ int nfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
struct iattr attr;
int error;
- dfprintk(VFS, "NFS: mkdir(%s/%ld), %s\n",
- dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+ dfprintk(VFS, "NFS: mkdir(%s/%ld), %pd\n",
+ dir->i_sb->s_id, dir->i_ino, dentry);
attr.ia_valid = ATTR_MODE;
attr.ia_mode = mode | S_IFDIR;
@@ -1692,8 +1678,8 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
{
int error;
- dfprintk(VFS, "NFS: rmdir(%s/%ld), %s\n",
- dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+ dfprintk(VFS, "NFS: rmdir(%s/%ld), %pd\n",
+ dir->i_sb->s_id, dir->i_ino, dentry);
trace_nfs_rmdir_enter(dir, dentry);
if (dentry->d_inode) {
@@ -1728,8 +1714,7 @@ static int nfs_safe_remove(struct dentry *dentry)
struct inode *inode = dentry->d_inode;
int error = -EBUSY;
- dfprintk(VFS, "NFS: safe_remove(%s/%s)\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
+ dfprintk(VFS, "NFS: safe_remove(%pd2)\n", dentry);
/* If the dentry was sillyrenamed, we simply call d_delete() */
if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
@@ -1762,8 +1747,8 @@ int nfs_unlink(struct inode *dir, struct dentry *dentry)
int error;
int need_rehash = 0;
- dfprintk(VFS, "NFS: unlink(%s/%ld, %s)\n", dir->i_sb->s_id,
- dir->i_ino, dentry->d_name.name);
+ dfprintk(VFS, "NFS: unlink(%s/%ld, %pd)\n", dir->i_sb->s_id,
+ dir->i_ino, dentry);
trace_nfs_unlink_enter(dir, dentry);
spin_lock(&dentry->d_lock);
@@ -1813,8 +1798,8 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
unsigned int pathlen = strlen(symname);
int error;
- dfprintk(VFS, "NFS: symlink(%s/%ld, %s, %s)\n", dir->i_sb->s_id,
- dir->i_ino, dentry->d_name.name, symname);
+ dfprintk(VFS, "NFS: symlink(%s/%ld, %pd, %s)\n", dir->i_sb->s_id,
+ dir->i_ino, dentry, symname);
if (pathlen > PAGE_SIZE)
return -ENAMETOOLONG;
@@ -1836,9 +1821,9 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr);
trace_nfs_symlink_exit(dir, dentry, error);
if (error != 0) {
- dfprintk(VFS, "NFS: symlink(%s/%ld, %s, %s) error %d\n",
+ dfprintk(VFS, "NFS: symlink(%s/%ld, %pd, %s) error %d\n",
dir->i_sb->s_id, dir->i_ino,
- dentry->d_name.name, symname, error);
+ dentry, symname, error);
d_drop(dentry);
__free_page(page);
return error;
@@ -1865,9 +1850,8 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
struct inode *inode = old_dentry->d_inode;
int error;
- dfprintk(VFS, "NFS: link(%s/%s -> %s/%s)\n",
- old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
- dentry->d_parent->d_name.name, dentry->d_name.name);
+ dfprintk(VFS, "NFS: link(%pd2 -> %pd2)\n",
+ old_dentry, dentry);
trace_nfs_link_enter(inode, dir, dentry);
NFS_PROTO(inode)->return_delegation(inode);
@@ -1915,9 +1899,8 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct dentry *dentry = NULL, *rehash = NULL;
int error = -EBUSY;
- dfprintk(VFS, "NFS: rename(%s/%s -> %s/%s, ct=%d)\n",
- old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
- new_dentry->d_parent->d_name.name, new_dentry->d_name.name,
+ dfprintk(VFS, "NFS: rename(%pd2 -> %pd2, ct=%d)\n",
+ old_dentry, new_dentry,
d_count(new_dentry));
trace_nfs_rename_enter(old_dir, old_dentry, new_dir, new_dentry);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 91ff089d3412..87a6475eb170 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -90,6 +90,7 @@ struct nfs_direct_req {
int flags;
#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
#define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
+#define NFS_ODIRECT_MARK_DIRTY (4) /* mark read pages dirty */
struct nfs_writeverf verf; /* unstable write verifier */
};
@@ -112,32 +113,22 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
* nfs_direct_IO - NFS address space operation for direct I/O
* @rw: direction (read or write)
* @iocb: target I/O control block
- * @iov: array of vectors that define I/O buffer
+ * @iter: array of vectors that define I/O buffer
* @pos: offset in file to begin the operation
* @nr_segs: size of iovec array
*
* The presence of this routine in the address space ops vector means
- * the NFS client supports direct I/O. However, for most direct IO, we
- * shunt off direct read and write requests before the VFS gets them,
- * so this method is only ever called for swap.
+ * the NFS client supports direct I/O. However, we shunt off direct
+ * read and write requests before the VFS gets them, so this method
+ * should never be called.
*/
-ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
+ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
+ loff_t pos)
{
-#ifndef CONFIG_NFS_SWAP
- dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
- iocb->ki_filp->f_path.dentry->d_name.name,
- (long long) pos, nr_segs);
+ dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
+ iocb->ki_filp, (long long) pos, iter->nr_segs);
return -EINVAL;
-#else
- VM_BUG_ON(iocb->ki_nbytes != PAGE_SIZE);
-
- if (rw == READ || rw == KERNEL_READ)
- return nfs_file_direct_read(iocb, iov, nr_segs, pos,
- rw == READ ? true : false);
- return nfs_file_direct_write(iocb, iov, nr_segs, pos,
- rw == WRITE ? true : false);
-#endif /* CONFIG_NFS_SWAP */
}
static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
@@ -265,7 +256,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
struct page *page = req->wb_page;
- if (!PageCompound(page) && bytes < hdr->good_bytes)
+ if ((dreq->flags & NFS_ODIRECT_MARK_DIRTY) &&
+ !PageCompound(page) && bytes < hdr->good_bytes)
set_page_dirty(page);
bytes += req->wb_bytes;
nfs_list_remove_request(req);
@@ -308,7 +300,7 @@ static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
*/
static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
const struct iovec *iov,
- loff_t pos, bool uio)
+ loff_t pos)
{
struct nfs_direct_req *dreq = desc->pg_dreq;
struct nfs_open_context *ctx = dreq->ctx;
@@ -336,20 +328,12 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *de
GFP_KERNEL);
if (!pagevec)
break;
- if (uio) {
- down_read(&current->mm->mmap_sem);
- result = get_user_pages(current, current->mm, user_addr,
+ down_read(&current->mm->mmap_sem);
+ result = get_user_pages(current, current->mm, user_addr,
npages, 1, 0, pagevec, NULL);
- up_read(&current->mm->mmap_sem);
- if (result < 0)
- break;
- } else {
- WARN_ON(npages != 1);
- result = get_kernel_page(user_addr, 1, pagevec);
- if (WARN_ON(result != 1))
- break;
- }
-
+ up_read(&current->mm->mmap_sem);
+ if (result < 0)
+ break;
if ((unsigned)result < npages) {
bytes = result * PAGE_SIZE;
if (bytes <= pgbase) {
@@ -397,24 +381,17 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *de
return result < 0 ? (ssize_t) result : -EFAULT;
}
-static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
- const struct iovec *iov,
- unsigned long nr_segs,
- loff_t pos, bool uio)
+static ssize_t nfs_direct_do_schedule_read_iovec(
+ struct nfs_pageio_descriptor *desc, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
{
- struct nfs_pageio_descriptor desc;
ssize_t result = -EINVAL;
size_t requested_bytes = 0;
unsigned long seg;
- NFS_PROTO(dreq->inode)->read_pageio_init(&desc, dreq->inode,
- &nfs_direct_read_completion_ops);
- get_dreq(dreq);
- desc.pg_dreq = dreq;
-
for (seg = 0; seg < nr_segs; seg++) {
const struct iovec *vec = &iov[seg];
- result = nfs_direct_read_schedule_segment(&desc, vec, pos, uio);
+ result = nfs_direct_read_schedule_segment(desc, vec, pos);
if (result < 0)
break;
requested_bytes += result;
@@ -422,6 +399,75 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
break;
pos += vec->iov_len;
}
+ if (requested_bytes)
+ return requested_bytes;
+
+ return result < 0 ? result : -EIO;
+}
+
+#ifdef CONFIG_BLOCK
+static ssize_t nfs_direct_do_schedule_read_bvec(
+ struct nfs_pageio_descriptor *desc,
+ struct bio_vec *bvec, unsigned long nr_segs, loff_t pos)
+{
+ struct nfs_direct_req *dreq = desc->pg_dreq;
+ struct nfs_open_context *ctx = dreq->ctx;
+ struct inode *inode = ctx->dentry->d_inode;
+ ssize_t result = -EINVAL;
+ size_t requested_bytes = 0;
+ unsigned long seg;
+ struct nfs_page *req;
+ unsigned int req_len;
+
+ for (seg = 0; seg < nr_segs; seg++) {
+ result = -EIO;
+ req_len = bvec[seg].bv_len;
+ req = nfs_create_request(ctx, inode,
+ bvec[seg].bv_page,
+ bvec[seg].bv_offset, req_len);
+ if (IS_ERR(req)) {
+ result = PTR_ERR(req);
+ break;
+ }
+ req->wb_index = pos >> PAGE_SHIFT;
+ req->wb_offset = pos & ~PAGE_MASK;
+ if (!nfs_pageio_add_request(desc, req)) {
+ result = desc->pg_error;
+ nfs_release_request(req);
+ break;
+ }
+ requested_bytes += req_len;
+ pos += req_len;
+ }
+
+ if (requested_bytes)
+ return requested_bytes;
+
+ return result < 0 ? result : -EIO;
+}
+#endif /* CONFIG_BLOCK */
+
+static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq,
+ struct iov_iter *iter, loff_t pos)
+{
+ struct nfs_pageio_descriptor desc;
+ ssize_t result;
+
+ NFS_PROTO(dreq->inode)->read_pageio_init(&desc, dreq->inode,
+ &nfs_direct_read_completion_ops);
+ get_dreq(dreq);
+ desc.pg_dreq = dreq;
+
+ if (iov_iter_has_iovec(iter)) {
+ result = nfs_direct_do_schedule_read_iovec(&desc,
+ iov_iter_iovec(iter), iter->nr_segs, pos);
+#ifdef CONFIG_BLOCK
+ } else if (iov_iter_has_bvec(iter)) {
+ result = nfs_direct_do_schedule_read_bvec(&desc,
+ iov_iter_bvec(iter), iter->nr_segs, pos);
+#endif
+ } else
+ BUG();
nfs_pageio_complete(&desc);
@@ -429,9 +475,9 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
* If no bytes were started, return the error, and let the
* generic layer handle the completion.
*/
- if (requested_bytes == 0) {
+ if (result < 0) {
nfs_direct_req_release(dreq);
- return result < 0 ? result : -EIO;
+ return result;
}
if (put_dreq(dreq))
@@ -439,8 +485,8 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
return 0;
}
-static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos, bool uio)
+static ssize_t nfs_direct_read(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t pos)
{
ssize_t result = -ENOMEM;
struct inode *inode = iocb->ki_filp->f_mapping->host;
@@ -452,7 +498,7 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
goto out;
dreq->inode = inode;
- dreq->bytes_left = iov_length(iov, nr_segs);
+ dreq->bytes_left = iov_iter_count(iter);
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
l_ctx = nfs_get_lock_context(dreq->ctx);
if (IS_ERR(l_ctx)) {
@@ -463,8 +509,8 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
- NFS_I(inode)->read_io += iov_length(iov, nr_segs);
- result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos, uio);
+ NFS_I(inode)->read_io += iov_iter_count(iter);
+ result = nfs_direct_read_schedule(dreq, iter, pos);
if (!result)
result = nfs_direct_wait(dreq);
out_release:
@@ -629,7 +675,7 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode
*/
static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
const struct iovec *iov,
- loff_t pos, bool uio)
+ loff_t pos)
{
struct nfs_direct_req *dreq = desc->pg_dreq;
struct nfs_open_context *ctx = dreq->ctx;
@@ -657,19 +703,12 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *d
if (!pagevec)
break;
- if (uio) {
- down_read(&current->mm->mmap_sem);
- result = get_user_pages(current, current->mm, user_addr,
- npages, 0, 0, pagevec, NULL);
- up_read(&current->mm->mmap_sem);
- if (result < 0)
- break;
- } else {
- WARN_ON(npages != 1);
- result = get_kernel_page(user_addr, 0, pagevec);
- if (WARN_ON(result != 1))
- break;
- }
+ down_read(&current->mm->mmap_sem);
+ result = get_user_pages(current, current->mm, user_addr,
+ npages, 0, 0, pagevec, NULL);
+ up_read(&current->mm->mmap_sem);
+ if (result < 0)
+ break;
if ((unsigned)result < npages) {
bytes = result * PAGE_SIZE;
@@ -798,27 +837,18 @@ static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
.completion = nfs_direct_write_completion,
};
-static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
- const struct iovec *iov,
- unsigned long nr_segs,
- loff_t pos, bool uio)
+static ssize_t nfs_direct_do_schedule_write_iovec(
+ struct nfs_pageio_descriptor *desc, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
{
- struct nfs_pageio_descriptor desc;
- struct inode *inode = dreq->inode;
- ssize_t result = 0;
+ ssize_t result = -EINVAL;
size_t requested_bytes = 0;
unsigned long seg;
- NFS_PROTO(inode)->write_pageio_init(&desc, inode, FLUSH_COND_STABLE,
- &nfs_direct_write_completion_ops);
- desc.pg_dreq = dreq;
- get_dreq(dreq);
- atomic_inc(&inode->i_dio_count);
-
- NFS_I(dreq->inode)->write_io += iov_length(iov, nr_segs);
for (seg = 0; seg < nr_segs; seg++) {
const struct iovec *vec = &iov[seg];
- result = nfs_direct_write_schedule_segment(&desc, vec, pos, uio);
+ result = nfs_direct_write_schedule_segment(desc, vec,
+ pos);
if (result < 0)
break;
requested_bytes += result;
@@ -826,16 +856,91 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
break;
pos += vec->iov_len;
}
+
+ if (requested_bytes)
+ return requested_bytes;
+
+ return result < 0 ? result : -EIO;
+}
+
+#ifdef CONFIG_BLOCK
+static ssize_t nfs_direct_do_schedule_write_bvec(
+ struct nfs_pageio_descriptor *desc,
+ struct bio_vec *bvec, unsigned long nr_segs, loff_t pos)
+{
+ struct nfs_direct_req *dreq = desc->pg_dreq;
+ struct nfs_open_context *ctx = dreq->ctx;
+ struct inode *inode = dreq->inode;
+ ssize_t result = 0;
+ size_t requested_bytes = 0;
+ unsigned long seg;
+ struct nfs_page *req;
+ unsigned int req_len;
+
+ for (seg = 0; seg < nr_segs; seg++) {
+ req_len = bvec[seg].bv_len;
+
+ req = nfs_create_request(ctx, inode, bvec[seg].bv_page,
+ bvec[seg].bv_offset, req_len);
+ if (IS_ERR(req)) {
+ result = PTR_ERR(req);
+ break;
+ }
+ nfs_lock_request(req);
+ req->wb_index = pos >> PAGE_SHIFT;
+ req->wb_offset = pos & ~PAGE_MASK;
+ if (!nfs_pageio_add_request(desc, req)) {
+ result = desc->pg_error;
+ nfs_unlock_and_release_request(req);
+ break;
+ }
+ requested_bytes += req_len;
+ pos += req_len;
+ }
+
+ if (requested_bytes)
+ return requested_bytes;
+
+ return result < 0 ? result : -EIO;
+}
+#endif /* CONFIG_BLOCK */
+
+static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq,
+ struct iov_iter *iter, loff_t pos)
+{
+ struct nfs_pageio_descriptor desc;
+ struct inode *inode = dreq->inode;
+ ssize_t result = 0;
+
+ NFS_PROTO(inode)->write_pageio_init(&desc, inode, FLUSH_COND_STABLE,
+ &nfs_direct_write_completion_ops);
+ desc.pg_dreq = dreq;
+ get_dreq(dreq);
+ atomic_inc(&inode->i_dio_count);
+
+ NFS_I(dreq->inode)->write_io += iov_iter_count(iter);
+
+ if (iov_iter_has_iovec(iter)) {
+ result = nfs_direct_do_schedule_write_iovec(&desc,
+ iov_iter_iovec(iter), iter->nr_segs, pos);
+#ifdef CONFIG_BLOCK
+ } else if (iov_iter_has_bvec(iter)) {
+ result = nfs_direct_do_schedule_write_bvec(&desc,
+ iov_iter_bvec(iter), iter->nr_segs, pos);
+#endif
+ } else
+ BUG();
+
nfs_pageio_complete(&desc);
/*
* If no bytes were started, return the error, and let the
* generic layer handle the completion.
*/
- if (requested_bytes == 0) {
+ if (result < 0) {
inode_dio_done(inode);
nfs_direct_req_release(dreq);
- return result < 0 ? result : -EIO;
+ return result;
}
if (put_dreq(dreq))
@@ -843,9 +948,8 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
return 0;
}
-static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos,
- size_t count, bool uio)
+static ssize_t nfs_direct_write(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t pos)
{
ssize_t result = -ENOMEM;
struct inode *inode = iocb->ki_filp->f_mapping->host;
@@ -857,7 +961,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
goto out;
dreq->inode = inode;
- dreq->bytes_left = count;
+ dreq->bytes_left = iov_iter_count(iter);
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
l_ctx = nfs_get_lock_context(dreq->ctx);
if (IS_ERR(l_ctx)) {
@@ -868,7 +972,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
- result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, uio);
+ result = nfs_direct_write_schedule(dreq, iter, pos);
if (!result)
result = nfs_direct_wait(dreq);
out_release:
@@ -880,12 +984,11 @@ out:
/**
* nfs_file_direct_read - file direct read operation for NFS files
* @iocb: target I/O control block
- * @iov: vector of user buffers into which to read data
- * @nr_segs: size of iov vector
+ * @iter: vector of buffers into which to read data
* @pos: byte offset in file where reading starts
*
* We use this function for direct reads instead of calling
- * generic_file_aio_read() in order to avoid gfar's check to see if
+ * generic_file_read_iter() in order to avoid gfar's check to see if
* the request starts before the end of the file. For that check
* to work, we must generate a GETATTR before each direct read, and
* even then there is a window between the GETATTR and the subsequent
@@ -898,21 +1001,19 @@ out:
* client must read the updated atime from the server back into its
* cache.
*/
-ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos, bool uio)
+ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t pos)
{
ssize_t retval = -EINVAL;
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
size_t count;
- count = iov_length(iov, nr_segs);
+ count = iov_iter_count(iter);
nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
- dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
- file->f_path.dentry->d_parent->d_name.name,
- file->f_path.dentry->d_name.name,
- count, (long long) pos);
+ dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
+ file, count, (long long) pos);
retval = 0;
if (!count)
@@ -924,7 +1025,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
task_io_account_read(count);
- retval = nfs_direct_read(iocb, iov, nr_segs, pos, uio);
+ retval = nfs_direct_read(iocb, iter, pos);
if (retval > 0)
iocb->ki_pos = pos + retval;
@@ -935,12 +1036,11 @@ out:
/**
* nfs_file_direct_write - file direct write operation for NFS files
* @iocb: target I/O control block
- * @iov: vector of user buffers from which to write data
- * @nr_segs: size of iov vector
+ * @iter: vector of buffers from which to write data
* @pos: byte offset in file where writing starts
*
* We use this function for direct writes instead of calling
- * generic_file_aio_write() in order to avoid taking the inode
+ * generic_file_write_iter() in order to avoid taking the inode
* semaphore and updating the i_size. The NFS server will set
* the new i_size and this client must read the updated size
* back into its cache. We let the server do generic write
@@ -954,21 +1054,19 @@ out:
* Note that O_APPEND is not supported for NFS direct writes, as there
* is no atomic O_APPEND write facility in the NFS protocol.
*/
-ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos, bool uio)
+ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t pos)
{
ssize_t retval = -EINVAL;
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
size_t count;
- count = iov_length(iov, nr_segs);
+ count = iov_iter_count(iter);
nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
- dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
- file->f_path.dentry->d_parent->d_name.name,
- file->f_path.dentry->d_name.name,
- count, (long long) pos);
+ dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
+ file, count, (long long) pos);
retval = generic_write_checks(file, &pos, &count, 0);
if (retval)
@@ -987,7 +1085,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
task_io_account_write(count);
- retval = nfs_direct_write(iocb, iov, nr_segs, pos, count, uio);
+ retval = nfs_direct_write(iocb, iter, pos);
if (retval > 0) {
struct inode *inode = mapping->host;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 1e6bfdbc1aff..e022fe909ded 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -65,9 +65,7 @@ nfs_file_open(struct inode *inode, struct file *filp)
{
int res;
- dprintk("NFS: open file(%s/%s)\n",
- filp->f_path.dentry->d_parent->d_name.name,
- filp->f_path.dentry->d_name.name);
+ dprintk("NFS: open file(%pD2)\n", filp);
nfs_inc_stats(inode, NFSIOS_VFSOPEN);
res = nfs_check_flags(filp->f_flags);
@@ -81,9 +79,7 @@ nfs_file_open(struct inode *inode, struct file *filp)
int
nfs_file_release(struct inode *inode, struct file *filp)
{
- dprintk("NFS: release(%s/%s)\n",
- filp->f_path.dentry->d_parent->d_name.name,
- filp->f_path.dentry->d_name.name);
+ dprintk("NFS: release(%pD2)\n", filp);
nfs_inc_stats(inode, NFSIOS_VFSRELEASE);
return nfs_release(inode, filp);
@@ -123,10 +119,8 @@ force_reval:
loff_t nfs_file_llseek(struct file *filp, loff_t offset, int whence)
{
- dprintk("NFS: llseek file(%s/%s, %lld, %d)\n",
- filp->f_path.dentry->d_parent->d_name.name,
- filp->f_path.dentry->d_name.name,
- offset, whence);
+ dprintk("NFS: llseek file(%pD2, %lld, %d)\n",
+ filp, offset, whence);
/*
* whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
@@ -150,12 +144,9 @@ EXPORT_SYMBOL_GPL(nfs_file_llseek);
int
nfs_file_flush(struct file *file, fl_owner_t id)
{
- struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = file_inode(file);
- dprintk("NFS: flush(%s/%s)\n",
- dentry->d_parent->d_name.name,
- dentry->d_name.name);
+ dprintk("NFS: flush(%pD2)\n", file);
nfs_inc_stats(inode, NFSIOS_VFSFLUSH);
if ((file->f_mode & FMODE_WRITE) == 0)
@@ -174,42 +165,38 @@ nfs_file_flush(struct file *file, fl_owner_t id)
EXPORT_SYMBOL_GPL(nfs_file_flush);
ssize_t
-nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+nfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
{
- struct dentry * dentry = iocb->ki_filp->f_path.dentry;
- struct inode * inode = dentry->d_inode;
+ struct inode *inode = file_inode(iocb->ki_filp);
ssize_t result;
if (iocb->ki_filp->f_flags & O_DIRECT)
- return nfs_file_direct_read(iocb, iov, nr_segs, pos, true);
+ return nfs_file_direct_read(iocb, iter, pos);
- dprintk("NFS: read(%s/%s, %lu@%lu)\n",
- dentry->d_parent->d_name.name, dentry->d_name.name,
- (unsigned long) iov_length(iov, nr_segs), (unsigned long) pos);
+ dprintk("NFS: read_iter(%pD2, %lu@%lu)\n",
+ iocb->ki_filp,
+ (unsigned long) iov_iter_count(iter), (unsigned long) pos);
result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
if (!result) {
- result = generic_file_aio_read(iocb, iov, nr_segs, pos);
+ result = generic_file_read_iter(iocb, iter, pos);
if (result > 0)
nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, result);
}
return result;
}
-EXPORT_SYMBOL_GPL(nfs_file_read);
+EXPORT_SYMBOL_GPL(nfs_file_read_iter);
ssize_t
nfs_file_splice_read(struct file *filp, loff_t *ppos,
struct pipe_inode_info *pipe, size_t count,
unsigned int flags)
{
- struct dentry *dentry = filp->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = file_inode(filp);
ssize_t res;
- dprintk("NFS: splice_read(%s/%s, %lu@%Lu)\n",
- dentry->d_parent->d_name.name, dentry->d_name.name,
- (unsigned long) count, (unsigned long long) *ppos);
+ dprintk("NFS: splice_read(%pD2, %lu@%Lu)\n",
+ filp, (unsigned long) count, (unsigned long long) *ppos);
res = nfs_revalidate_mapping(inode, filp->f_mapping);
if (!res) {
@@ -224,12 +211,10 @@ EXPORT_SYMBOL_GPL(nfs_file_splice_read);
int
nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
{
- struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = file_inode(file);
int status;
- dprintk("NFS: mmap(%s/%s)\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
+ dprintk("NFS: mmap(%pD2)\n", file);
/* Note: generic_file_mmap() returns ENOSYS on nommu systems
* so we call that before revalidating the mapping
@@ -252,21 +237,18 @@ EXPORT_SYMBOL_GPL(nfs_file_mmap);
* disk, but it retrieves and clears ctx->error after synching, despite
* the two being set at the same time in nfs_context_set_write_error().
* This is because the former is used to notify the _next_ call to
- * nfs_file_write() that a write error occurred, and hence cause it to
+ * nfs_file_write_iter() that a write error occurred, and hence cause it to
* fall back to doing a synchronous write.
*/
int
nfs_file_fsync_commit(struct file *file, loff_t start, loff_t end, int datasync)
{
- struct dentry *dentry = file->f_path.dentry;
struct nfs_open_context *ctx = nfs_file_open_context(file);
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = file_inode(file);
int have_error, do_resend, status;
int ret = 0;
- dprintk("NFS: fsync file(%s/%s) datasync %d\n",
- dentry->d_parent->d_name.name, dentry->d_name.name,
- datasync);
+ dprintk("NFS: fsync file(%pD2) datasync %d\n", file, datasync);
nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
do_resend = test_and_clear_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags);
@@ -371,10 +353,8 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
struct page *page;
int once_thru = 0;
- dfprintk(PAGECACHE, "NFS: write_begin(%s/%s(%ld), %u@%lld)\n",
- file->f_path.dentry->d_parent->d_name.name,
- file->f_path.dentry->d_name.name,
- mapping->host->i_ino, len, (long long) pos);
+ dfprintk(PAGECACHE, "NFS: write_begin(%pD2(%ld), %u@%lld)\n",
+ file, mapping->host->i_ino, len, (long long) pos);
start:
/*
@@ -414,10 +394,8 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
struct nfs_open_context *ctx = nfs_file_open_context(file);
int status;
- dfprintk(PAGECACHE, "NFS: write_end(%s/%s(%ld), %u@%lld)\n",
- file->f_path.dentry->d_parent->d_name.name,
- file->f_path.dentry->d_name.name,
- mapping->host->i_ino, len, (long long) pos);
+ dfprintk(PAGECACHE, "NFS: write_end(%pD2(%ld), %u@%lld)\n",
+ file, mapping->host->i_ino, len, (long long) pos);
/*
* Zero any uninitialised parts of the page, and then mark the page
@@ -601,22 +579,21 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct file *filp = vma->vm_file;
- struct dentry *dentry = filp->f_path.dentry;
+ struct inode *inode = file_inode(filp);
unsigned pagelen;
int ret = VM_FAULT_NOPAGE;
struct address_space *mapping;
- dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%s/%s(%ld), offset %lld)\n",
- dentry->d_parent->d_name.name, dentry->d_name.name,
- filp->f_mapping->host->i_ino,
+ dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%pD2(%ld), offset %lld)\n",
+ filp, filp->f_mapping->host->i_ino,
(long long)page_offset(page));
/* make sure the cache has finished storing the page */
- nfs_fscache_wait_on_page_write(NFS_I(dentry->d_inode), page);
+ nfs_fscache_wait_on_page_write(NFS_I(inode), page);
lock_page(page);
mapping = page_file_mapping(page);
- if (mapping != dentry->d_inode->i_mapping)
+ if (mapping != inode->i_mapping)
goto out_unlock;
wait_on_page_writeback(page);
@@ -656,25 +633,24 @@ static int nfs_need_sync_write(struct file *filp, struct inode *inode)
return 0;
}
-ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+ssize_t nfs_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t pos)
{
- struct dentry * dentry = iocb->ki_filp->f_path.dentry;
- struct inode * inode = dentry->d_inode;
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
unsigned long written = 0;
ssize_t result;
- size_t count = iov_length(iov, nr_segs);
+ size_t count = iov_iter_count(iter);
- result = nfs_key_timeout_notify(iocb->ki_filp, inode);
+ result = nfs_key_timeout_notify(file, inode);
if (result)
return result;
- if (iocb->ki_filp->f_flags & O_DIRECT)
- return nfs_file_direct_write(iocb, iov, nr_segs, pos, true);
+ if (file->f_flags & O_DIRECT)
+ return nfs_file_direct_write(iocb, iter, pos);
- dprintk("NFS: write(%s/%s, %lu@%Ld)\n",
- dentry->d_parent->d_name.name, dentry->d_name.name,
- (unsigned long) count, (long long) pos);
+ dprintk("NFS: write_iter(%pD2, %lu@%Ld)\n",
+ file, (unsigned long) count, (long long) pos);
result = -EBUSY;
if (IS_SWAPFILE(inode))
@@ -682,8 +658,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
/*
* O_APPEND implies that we must revalidate the file length.
*/
- if (iocb->ki_filp->f_flags & O_APPEND) {
- result = nfs_revalidate_file_size(inode, iocb->ki_filp);
+ if (file->f_flags & O_APPEND) {
+ result = nfs_revalidate_file_size(inode, file);
if (result)
goto out;
}
@@ -692,13 +668,13 @@ ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
if (!count)
goto out;
- result = generic_file_aio_write(iocb, iov, nr_segs, pos);
+ result = generic_file_write_iter(iocb, iter, pos);
if (result > 0)
written = result;
/* Return error values for O_DSYNC and IS_SYNC() */
- if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) {
- int err = vfs_fsync(iocb->ki_filp, 0);
+ if (result >= 0 && nfs_need_sync_write(file, inode)) {
+ int err = vfs_fsync(file, 0);
if (err < 0)
result = err;
}
@@ -711,20 +687,18 @@ out_swapfile:
printk(KERN_INFO "NFS: attempt to write to active swap file!\n");
goto out;
}
-EXPORT_SYMBOL_GPL(nfs_file_write);
+EXPORT_SYMBOL_GPL(nfs_file_write_iter);
ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
struct file *filp, loff_t *ppos,
size_t count, unsigned int flags)
{
- struct dentry *dentry = filp->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = file_inode(filp);
unsigned long written = 0;
ssize_t ret;
- dprintk("NFS splice_write(%s/%s, %lu@%llu)\n",
- dentry->d_parent->d_name.name, dentry->d_name.name,
- (unsigned long) count, (unsigned long long) *ppos);
+ dprintk("NFS splice_write(%pD2, %lu@%llu)\n",
+ filp, (unsigned long) count, (unsigned long long) *ppos);
/*
* The combination of splice and an O_APPEND destination is disallowed.
@@ -883,10 +857,8 @@ int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
int ret = -ENOLCK;
int is_local = 0;
- dprintk("NFS: lock(%s/%s, t=%x, fl=%x, r=%lld:%lld)\n",
- filp->f_path.dentry->d_parent->d_name.name,
- filp->f_path.dentry->d_name.name,
- fl->fl_type, fl->fl_flags,
+ dprintk("NFS: lock(%pD2, t=%x, fl=%x, r=%lld:%lld)\n",
+ filp, fl->fl_type, fl->fl_flags,
(long long)fl->fl_start, (long long)fl->fl_end);
nfs_inc_stats(inode, NFSIOS_VFSLOCK);
@@ -923,10 +895,8 @@ int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
struct inode *inode = filp->f_mapping->host;
int is_local = 0;
- dprintk("NFS: flock(%s/%s, t=%x, fl=%x)\n",
- filp->f_path.dentry->d_parent->d_name.name,
- filp->f_path.dentry->d_name.name,
- fl->fl_type, fl->fl_flags);
+ dprintk("NFS: flock(%pD2, t=%x, fl=%x)\n",
+ filp, fl->fl_type, fl->fl_flags);
if (!(fl->fl_flags & FL_FLOCK))
return -ENOLCK;
@@ -960,9 +930,7 @@ EXPORT_SYMBOL_GPL(nfs_flock);
*/
int nfs_setlease(struct file *file, long arg, struct file_lock **fl)
{
- dprintk("NFS: setlease(%s/%s, arg=%ld)\n",
- file->f_path.dentry->d_parent->d_name.name,
- file->f_path.dentry->d_name.name, arg);
+ dprintk("NFS: setlease(%pD2, arg=%ld)\n", file, arg);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(nfs_setlease);
@@ -971,8 +939,8 @@ const struct file_operations nfs_file_operations = {
.llseek = nfs_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
- .aio_read = nfs_file_read,
- .aio_write = nfs_file_write,
+ .read_iter = nfs_file_read_iter,
+ .write_iter = nfs_file_write_iter,
.mmap = nfs_file_mmap,
.open = nfs_file_open,
.flush = nfs_file_flush,
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index 24d1d1c5fcaf..3ef01f0ba0bc 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -39,7 +39,7 @@ void nfs_fscache_get_client_cookie(struct nfs_client *clp)
/* create a cache index for looking up filehandles */
clp->fscache = fscache_acquire_cookie(nfs_fscache_netfs.primary_index,
&nfs_fscache_server_index_def,
- clp);
+ clp, true);
dfprintk(FSCACHE, "NFS: get client cookie (0x%p/0x%p)\n",
clp, clp->fscache);
}
@@ -139,7 +139,7 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
/* create a cache index for looking up filehandles */
nfss->fscache = fscache_acquire_cookie(nfss->nfs_client->fscache,
&nfs_fscache_super_index_def,
- nfss);
+ nfss, true);
dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n",
nfss, nfss->fscache);
return;
@@ -178,163 +178,79 @@ void nfs_fscache_release_super_cookie(struct super_block *sb)
/*
* Initialise the per-inode cache cookie pointer for an NFS inode.
*/
-void nfs_fscache_init_inode_cookie(struct inode *inode)
+void nfs_fscache_init_inode(struct inode *inode)
{
- NFS_I(inode)->fscache = NULL;
- if (S_ISREG(inode->i_mode))
- set_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
-}
-
-/*
- * Get the per-inode cache cookie for an NFS inode.
- */
-static void nfs_fscache_enable_inode_cookie(struct inode *inode)
-{
- struct super_block *sb = inode->i_sb;
struct nfs_inode *nfsi = NFS_I(inode);
- if (nfsi->fscache || !NFS_FSCACHE(inode))
+ nfsi->fscache = NULL;
+ if (!S_ISREG(inode->i_mode))
return;
-
- if ((NFS_SB(sb)->options & NFS_OPTION_FSCACHE)) {
- nfsi->fscache = fscache_acquire_cookie(
- NFS_SB(sb)->fscache,
- &nfs_fscache_inode_object_def,
- nfsi);
-
- dfprintk(FSCACHE, "NFS: get FH cookie (0x%p/0x%p/0x%p)\n",
- sb, nfsi, nfsi->fscache);
- }
+ nfsi->fscache = fscache_acquire_cookie(NFS_SB(inode->i_sb)->fscache,
+ &nfs_fscache_inode_object_def,
+ nfsi, false);
}
/*
* Release a per-inode cookie.
*/
-void nfs_fscache_release_inode_cookie(struct inode *inode)
+void nfs_fscache_clear_inode(struct inode *inode)
{
struct nfs_inode *nfsi = NFS_I(inode);
+ struct fscache_cookie *cookie = nfs_i_fscache(inode);
- dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n",
- nfsi, nfsi->fscache);
+ dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie);
- fscache_relinquish_cookie(nfsi->fscache, 0);
+ fscache_relinquish_cookie(cookie, false);
nfsi->fscache = NULL;
}
-/*
- * Retire a per-inode cookie, destroying the data attached to it.
- */
-void nfs_fscache_zap_inode_cookie(struct inode *inode)
+static bool nfs_fscache_can_enable(void *data)
{
- struct nfs_inode *nfsi = NFS_I(inode);
+ struct inode *inode = data;
- dfprintk(FSCACHE, "NFS: zapping cookie (0x%p/0x%p)\n",
- nfsi, nfsi->fscache);
-
- fscache_relinquish_cookie(nfsi->fscache, 1);
- nfsi->fscache = NULL;
+ return !inode_is_open_for_write(inode);
}
/*
- * Turn off the cache with regard to a per-inode cookie if opened for writing,
- * invalidating all the pages in the page cache relating to the associated
- * inode to clear the per-page caching.
- */
-static void nfs_fscache_disable_inode_cookie(struct inode *inode)
-{
- clear_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
-
- if (NFS_I(inode)->fscache) {
- dfprintk(FSCACHE,
- "NFS: nfsi 0x%p turning cache off\n", NFS_I(inode));
-
- /* Need to uncache any pages attached to this inode that
- * fscache knows about before turning off the cache.
- */
- fscache_uncache_all_inode_pages(NFS_I(inode)->fscache, inode);
- nfs_fscache_zap_inode_cookie(inode);
- }
-}
-
-/*
- * wait_on_bit() sleep function for uninterruptible waiting
- */
-static int nfs_fscache_wait_bit(void *flags)
-{
- schedule();
- return 0;
-}
-
-/*
- * Lock against someone else trying to also acquire or relinquish a cookie
- */
-static inline void nfs_fscache_inode_lock(struct inode *inode)
-{
- struct nfs_inode *nfsi = NFS_I(inode);
-
- while (test_and_set_bit(NFS_INO_FSCACHE_LOCK, &nfsi->flags))
- wait_on_bit(&nfsi->flags, NFS_INO_FSCACHE_LOCK,
- nfs_fscache_wait_bit, TASK_UNINTERRUPTIBLE);
-}
-
-/*
- * Unlock cookie management lock
- */
-static inline void nfs_fscache_inode_unlock(struct inode *inode)
-{
- struct nfs_inode *nfsi = NFS_I(inode);
-
- smp_mb__before_clear_bit();
- clear_bit(NFS_INO_FSCACHE_LOCK, &nfsi->flags);
- smp_mb__after_clear_bit();
- wake_up_bit(&nfsi->flags, NFS_INO_FSCACHE_LOCK);
-}
-
-/*
- * Decide if we should enable or disable local caching for this inode.
- * - For now, with NFS, only regular files that are open read-only will be able
- * to use the cache.
- * - May be invoked multiple times in parallel by parallel nfs_open() functions.
- */
-void nfs_fscache_set_inode_cookie(struct inode *inode, struct file *filp)
-{
- if (NFS_FSCACHE(inode)) {
- nfs_fscache_inode_lock(inode);
- if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
- nfs_fscache_disable_inode_cookie(inode);
- else
- nfs_fscache_enable_inode_cookie(inode);
- nfs_fscache_inode_unlock(inode);
- }
-}
-EXPORT_SYMBOL_GPL(nfs_fscache_set_inode_cookie);
-
-/*
- * Replace a per-inode cookie due to revalidation detecting a file having
- * changed on the server.
+ * Enable or disable caching for a file that is being opened as appropriate.
+ * The cookie is allocated when the inode is initialised, but is not enabled at
+ * that time. Enablement is deferred to file-open time to avoid stat() and
+ * access() thrashing the cache.
+ *
+ * For now, with NFS, only regular files that are open read-only will be able
+ * to use the cache.
+ *
+ * We enable the cache for an inode if we open it read-only and it isn't
+ * currently open for writing. We disable the cache if the inode is open
+ * write-only.
+ *
+ * The caller uses the file struct to pin i_writecount on the inode before
+ * calling us when a file is opened for writing, so we can make use of that.
+ *
+ * Note that this may be invoked multiple times in parallel by parallel
+ * nfs_open() functions.
*/
-void nfs_fscache_reset_inode_cookie(struct inode *inode)
+void nfs_fscache_open_file(struct inode *inode, struct file *filp)
{
struct nfs_inode *nfsi = NFS_I(inode);
- struct nfs_server *nfss = NFS_SERVER(inode);
- NFS_IFDEBUG(struct fscache_cookie *old = nfsi->fscache);
+ struct fscache_cookie *cookie = nfs_i_fscache(inode);
- nfs_fscache_inode_lock(inode);
- if (nfsi->fscache) {
- /* retire the current fscache cache and get a new one */
- fscache_relinquish_cookie(nfsi->fscache, 1);
-
- nfsi->fscache = fscache_acquire_cookie(
- nfss->nfs_client->fscache,
- &nfs_fscache_inode_object_def,
- nfsi);
+ if (!fscache_cookie_valid(cookie))
+ return;
- dfprintk(FSCACHE,
- "NFS: revalidation new cookie (0x%p/0x%p/0x%p/0x%p)\n",
- nfss, nfsi, old, nfsi->fscache);
+ if (inode_is_open_for_write(inode)) {
+ dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi);
+ clear_bit(NFS_INO_FSCACHE, &nfsi->flags);
+ fscache_disable_cookie(cookie, true);
+ fscache_uncache_all_inode_pages(cookie, inode);
+ } else {
+ dfprintk(FSCACHE, "NFS: nfsi 0x%p enabling cache\n", nfsi);
+ fscache_enable_cookie(cookie, nfs_fscache_can_enable, inode);
+ if (fscache_cookie_enabled(cookie))
+ set_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
}
- nfs_fscache_inode_unlock(inode);
}
+EXPORT_SYMBOL_GPL(nfs_fscache_open_file);
/*
* Release the caching state associated with a page, if the page isn't busy
@@ -344,12 +260,11 @@ void nfs_fscache_reset_inode_cookie(struct inode *inode)
int nfs_fscache_release_page(struct page *page, gfp_t gfp)
{
if (PageFsCache(page)) {
- struct nfs_inode *nfsi = NFS_I(page->mapping->host);
- struct fscache_cookie *cookie = nfsi->fscache;
+ struct fscache_cookie *cookie = nfs_i_fscache(page->mapping->host);
BUG_ON(!cookie);
dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
- cookie, page, nfsi);
+ cookie, page, NFS_I(page->mapping->host));
if (!fscache_maybe_release_page(cookie, page, gfp))
return 0;
@@ -367,13 +282,12 @@ int nfs_fscache_release_page(struct page *page, gfp_t gfp)
*/
void __nfs_fscache_invalidate_page(struct page *page, struct inode *inode)
{
- struct nfs_inode *nfsi = NFS_I(inode);
- struct fscache_cookie *cookie = nfsi->fscache;
+ struct fscache_cookie *cookie = nfs_i_fscache(inode);
BUG_ON(!cookie);
dfprintk(FSCACHE, "NFS: fscache invalidatepage (0x%p/0x%p/0x%p)\n",
- cookie, page, nfsi);
+ cookie, page, NFS_I(inode));
fscache_wait_on_page_write(cookie, page);
@@ -417,9 +331,9 @@ int __nfs_readpage_from_fscache(struct nfs_open_context *ctx,
dfprintk(FSCACHE,
"NFS: readpage_from_fscache(fsc:%p/p:%p(i:%lx f:%lx)/0x%p)\n",
- NFS_I(inode)->fscache, page, page->index, page->flags, inode);
+ nfs_i_fscache(inode), page, page->index, page->flags, inode);
- ret = fscache_read_or_alloc_page(NFS_I(inode)->fscache,
+ ret = fscache_read_or_alloc_page(nfs_i_fscache(inode),
page,
nfs_readpage_from_fscache_complete,
ctx,
@@ -459,9 +373,9 @@ int __nfs_readpages_from_fscache(struct nfs_open_context *ctx,
int ret;
dfprintk(FSCACHE, "NFS: nfs_getpages_from_fscache (0x%p/%u/0x%p)\n",
- NFS_I(inode)->fscache, npages, inode);
+ nfs_i_fscache(inode), npages, inode);
- ret = fscache_read_or_alloc_pages(NFS_I(inode)->fscache,
+ ret = fscache_read_or_alloc_pages(nfs_i_fscache(inode),
mapping, pages, nr_pages,
nfs_readpage_from_fscache_complete,
ctx,
@@ -506,15 +420,15 @@ void __nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync)
dfprintk(FSCACHE,
"NFS: readpage_to_fscache(fsc:%p/p:%p(i:%lx f:%lx)/%d)\n",
- NFS_I(inode)->fscache, page, page->index, page->flags, sync);
+ nfs_i_fscache(inode), page, page->index, page->flags, sync);
- ret = fscache_write_page(NFS_I(inode)->fscache, page, GFP_KERNEL);
+ ret = fscache_write_page(nfs_i_fscache(inode), page, GFP_KERNEL);
dfprintk(FSCACHE,
"NFS: readpage_to_fscache: p:%p(i:%lu f:%lx) ret %d\n",
page, page->index, page->flags, ret);
if (ret != 0) {
- fscache_uncache_page(NFS_I(inode)->fscache, page);
+ fscache_uncache_page(nfs_i_fscache(inode), page);
nfs_add_fscache_stats(inode,
NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL, 1);
nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index 4ecb76652eba..d7fe3e799f2f 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -76,11 +76,9 @@ extern void nfs_fscache_release_client_cookie(struct nfs_client *);
extern void nfs_fscache_get_super_cookie(struct super_block *, const char *, int);
extern void nfs_fscache_release_super_cookie(struct super_block *);
-extern void nfs_fscache_init_inode_cookie(struct inode *);
-extern void nfs_fscache_release_inode_cookie(struct inode *);
-extern void nfs_fscache_zap_inode_cookie(struct inode *);
-extern void nfs_fscache_set_inode_cookie(struct inode *, struct file *);
-extern void nfs_fscache_reset_inode_cookie(struct inode *);
+extern void nfs_fscache_init_inode(struct inode *);
+extern void nfs_fscache_clear_inode(struct inode *);
+extern void nfs_fscache_open_file(struct inode *, struct file *);
extern void __nfs_fscache_invalidate_page(struct page *, struct inode *);
extern int nfs_fscache_release_page(struct page *, gfp_t);
@@ -187,12 +185,10 @@ static inline void nfs_fscache_release_client_cookie(struct nfs_client *clp) {}
static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {}
-static inline void nfs_fscache_init_inode_cookie(struct inode *inode) {}
-static inline void nfs_fscache_release_inode_cookie(struct inode *inode) {}
-static inline void nfs_fscache_zap_inode_cookie(struct inode *inode) {}
-static inline void nfs_fscache_set_inode_cookie(struct inode *inode,
- struct file *filp) {}
-static inline void nfs_fscache_reset_inode_cookie(struct inode *inode) {}
+static inline void nfs_fscache_init_inode(struct inode *inode) {}
+static inline void nfs_fscache_clear_inode(struct inode *inode) {}
+static inline void nfs_fscache_open_file(struct inode *inode,
+ struct file *filp) {}
static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp)
{
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index eda8879171c4..471ba59c42f9 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -122,7 +122,7 @@ void nfs_clear_inode(struct inode *inode)
WARN_ON_ONCE(!list_empty(&NFS_I(inode)->open_files));
nfs_zap_acl_cache(inode);
nfs_access_zap_cache(inode);
- nfs_fscache_release_inode_cookie(inode);
+ nfs_fscache_clear_inode(inode);
}
EXPORT_SYMBOL_GPL(nfs_clear_inode);
@@ -459,7 +459,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
nfsi->attrtimeo_timestamp = now;
nfsi->access_cache = RB_ROOT;
- nfs_fscache_init_inode_cookie(inode);
+ nfs_fscache_init_inode(inode);
unlock_new_inode(inode);
} else
@@ -854,7 +854,7 @@ int nfs_open(struct inode *inode, struct file *filp)
return PTR_ERR(ctx);
nfs_file_set_open_context(filp, ctx);
put_nfs_open_context(ctx);
- nfs_fscache_set_inode_cookie(inode, filp);
+ nfs_fscache_open_file(inode, filp);
return 0;
}
@@ -1209,6 +1209,7 @@ u32 _nfs_display_fhandle_hash(const struct nfs_fh *fh)
* not on the result */
return nfs_fhandle_hash(fh);
}
+EXPORT_SYMBOL_GPL(_nfs_display_fhandle_hash);
/*
* _nfs_display_fhandle - display an NFS file handle on the console
@@ -1253,6 +1254,7 @@ void _nfs_display_fhandle(const struct nfs_fh *fh, const char *caption)
}
}
}
+EXPORT_SYMBOL_GPL(_nfs_display_fhandle);
#endif
/**
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 38da8c2b81ac..9e70d767af88 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -88,8 +88,8 @@ struct nfs_parsed_mount_data {
unsigned int namlen;
unsigned int options;
unsigned int bsize;
- unsigned int auth_flavor_len;
- rpc_authflavor_t auth_flavors[1];
+ struct nfs_auth_info auth_info;
+ rpc_authflavor_t selected_flavor;
char *client_address;
unsigned int version;
unsigned int minorversion;
@@ -154,6 +154,7 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *,
rpc_authflavor_t);
int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *, struct nfs_fattr *);
void nfs_server_insert_lists(struct nfs_server *);
+void nfs_server_remove_lists(struct nfs_server *);
void nfs_init_timeout_values(struct rpc_timeout *, int, unsigned int, unsigned int);
int nfs_init_server_rpcclient(struct nfs_server *, const struct rpc_timeout *t,
rpc_authflavor_t);
@@ -174,6 +175,8 @@ extern struct nfs_server *nfs4_create_server(
struct nfs_subversion *);
extern struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *,
struct nfs_fh *);
+extern int nfs4_update_server(struct nfs_server *server, const char *hostname,
+ struct sockaddr *sap, size_t salen);
extern void nfs_free_server(struct nfs_server *server);
extern struct nfs_server *nfs_clone_server(struct nfs_server *,
struct nfs_fh *,
@@ -291,11 +294,11 @@ int nfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *)
int nfs_file_fsync_commit(struct file *, loff_t, loff_t, int);
loff_t nfs_file_llseek(struct file *, loff_t, int);
int nfs_file_flush(struct file *, fl_owner_t);
-ssize_t nfs_file_read(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+ssize_t nfs_file_read_iter(struct kiocb *, struct iov_iter *, loff_t);
ssize_t nfs_file_splice_read(struct file *, loff_t *, struct pipe_inode_info *,
size_t, unsigned int);
int nfs_file_mmap(struct file *, struct vm_area_struct *);
-ssize_t nfs_file_write(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+ssize_t nfs_file_write_iter(struct kiocb *, struct iov_iter *, loff_t);
int nfs_file_release(struct inode *, struct file *);
int nfs_lock(struct file *, int, struct file_lock *);
int nfs_flock(struct file *, int, struct file_lock *);
@@ -323,6 +326,7 @@ extern struct file_system_type nfs_xdev_fs_type;
extern struct file_system_type nfs4_xdev_fs_type;
extern struct file_system_type nfs4_referral_fs_type;
#endif
+bool nfs_auth_info_match(const struct nfs_auth_info *, rpc_authflavor_t);
struct dentry *nfs_try_mount(int, const char *, struct nfs_mount_info *,
struct nfs_subversion *);
void nfs_initialise_sb(struct super_block *);
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 348b535cd786..b5a0afc3ee10 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -253,9 +253,8 @@ struct vfsmount *nfs_do_submount(struct dentry *dentry, struct nfs_fh *fh,
dprintk("--> nfs_do_submount()\n");
- dprintk("%s: submounting on %s/%s\n", __func__,
- dentry->d_parent->d_name.name,
- dentry->d_name.name);
+ dprintk("%s: submounting on %pd2\n", __func__,
+ dentry);
if (page == NULL)
goto out;
devname = nfs_devname(dentry, page, PAGE_SIZE);
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 90cb10d7b693..01b6f6a49d16 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -321,7 +321,7 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
umode_t mode = sattr->ia_mode;
int status = -ENOMEM;
- dprintk("NFS call create %s\n", dentry->d_name.name);
+ dprintk("NFS call create %pd\n", dentry);
data = nfs3_alloc_createdata();
if (data == NULL)
@@ -548,7 +548,7 @@ nfs3_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
if (len > NFS3_MAXPATHLEN)
return -ENAMETOOLONG;
- dprintk("NFS call symlink %s\n", dentry->d_name.name);
+ dprintk("NFS call symlink %pd\n", dentry);
data = nfs3_alloc_createdata();
if (data == NULL)
@@ -576,7 +576,7 @@ nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
umode_t mode = sattr->ia_mode;
int status = -ENOMEM;
- dprintk("NFS call mkdir %s\n", dentry->d_name.name);
+ dprintk("NFS call mkdir %pd\n", dentry);
sattr->ia_mode &= ~current_umask();
@@ -695,7 +695,7 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
umode_t mode = sattr->ia_mode;
int status = -ENOMEM;
- dprintk("NFS call mknod %s %u:%u\n", dentry->d_name.name,
+ dprintk("NFS call mknod %pd %u:%u\n", dentry,
MAJOR(rdev), MINOR(rdev));
sattr->ia_mode &= ~current_umask();
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 28842abafab4..3ce79b04522e 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -29,6 +29,8 @@ enum nfs4_client_state {
NFS4CLNT_SERVER_SCOPE_MISMATCH,
NFS4CLNT_PURGE_STATE,
NFS4CLNT_BIND_CONN_TO_SESSION,
+ NFS4CLNT_MOVED,
+ NFS4CLNT_LEASE_MOVED,
};
#define NFS4_RENEW_TIMEOUT 0x01
@@ -50,6 +52,7 @@ struct nfs4_minor_version_ops {
const struct nfs4_state_recovery_ops *reboot_recovery_ops;
const struct nfs4_state_recovery_ops *nograce_recovery_ops;
const struct nfs4_state_maintenance_ops *state_renewal_ops;
+ const struct nfs4_mig_recovery_ops *mig_recovery_ops;
};
#define NFS_SEQID_CONFIRMED 1
@@ -203,6 +206,12 @@ struct nfs4_state_maintenance_ops {
int (*renew_lease)(struct nfs_client *, struct rpc_cred *);
};
+struct nfs4_mig_recovery_ops {
+ int (*get_locations)(struct inode *, struct nfs4_fs_locations *,
+ struct page *, struct rpc_cred *);
+ int (*fsid_present)(struct inode *, struct rpc_cred *);
+};
+
extern const struct dentry_operations nfs4_dentry_operations;
/* dir.c */
@@ -213,10 +222,11 @@ int nfs_atomic_open(struct inode *, struct dentry *, struct file *,
extern struct file_system_type nfs4_fs_type;
/* nfs4namespace.c */
-rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *);
struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *);
struct vfsmount *nfs4_submount(struct nfs_server *, struct dentry *,
struct nfs_fh *, struct nfs_fattr *);
+int nfs4_replace_transport(struct nfs_server *server,
+ const struct nfs4_fs_locations *locations);
/* nfs4proc.c */
extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
@@ -231,6 +241,9 @@ extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait);
extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
extern int nfs4_proc_fs_locations(struct rpc_clnt *, struct inode *, const struct qstr *,
struct nfs4_fs_locations *, struct page *);
+extern int nfs4_proc_get_locations(struct inode *, struct nfs4_fs_locations *,
+ struct page *page, struct rpc_cred *);
+extern int nfs4_proc_fsid_present(struct inode *, struct rpc_cred *);
extern struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *, struct qstr *,
struct nfs_fh *, struct nfs_fattr *);
extern int nfs4_proc_secinfo(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
@@ -411,6 +424,8 @@ extern int nfs4_client_recover_expired_lease(struct nfs_client *clp);
extern void nfs4_schedule_state_manager(struct nfs_client *);
extern void nfs4_schedule_path_down_recovery(struct nfs_client *clp);
extern int nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *);
+extern int nfs4_schedule_migration_recovery(const struct nfs_server *);
+extern void nfs4_schedule_lease_moved_recovery(struct nfs_client *);
extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
extern void nfs41_handle_server_scope(struct nfs_client *,
struct nfs41_server_scope **);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index a860ab566d6e..b4a160a405ce 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -197,6 +197,7 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
clp->cl_minorversion = cl_init->minorversion;
clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
+ clp->cl_mig_gen = 1;
return clp;
error:
@@ -368,6 +369,7 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
if (clp->cl_minorversion != 0)
__set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags);
__set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
+ __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_GSS_KRB5I);
if (error == -EINVAL)
error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX);
@@ -924,7 +926,7 @@ static int nfs4_server_common_setup(struct nfs_server *server,
dprintk("Server FSID: %llx:%llx\n",
(unsigned long long) server->fsid.major,
(unsigned long long) server->fsid.minor);
- dprintk("Mount FH: %d\n", mntfh->size);
+ nfs_display_fhandle(mntfh, "Pseudo-fs root FH");
nfs4_session_set_rwsize(server);
@@ -947,9 +949,8 @@ out:
* Create a version 4 volume record
*/
static int nfs4_init_server(struct nfs_server *server,
- const struct nfs_parsed_mount_data *data)
+ struct nfs_parsed_mount_data *data)
{
- rpc_authflavor_t pseudoflavor = RPC_AUTH_UNIX;
struct rpc_timeout timeparms;
int error;
@@ -961,9 +962,15 @@ static int nfs4_init_server(struct nfs_server *server,
/* Initialise the client representation from the mount data */
server->flags = data->flags;
server->options = data->options;
+ server->auth_info = data->auth_info;
- if (data->auth_flavor_len >= 1)
- pseudoflavor = data->auth_flavors[0];
+ /* Use the first specified auth flavor. If this flavor isn't
+ * allowed by the server, use the SECINFO path to try the
+ * other specified flavors */
+ if (data->auth_info.flavor_len >= 1)
+ data->selected_flavor = data->auth_info.flavors[0];
+ else
+ data->selected_flavor = RPC_AUTH_UNIX;
/* Get a client record */
error = nfs4_set_client(server,
@@ -971,7 +978,7 @@ static int nfs4_init_server(struct nfs_server *server,
(const struct sockaddr *)&data->nfs_server.address,
data->nfs_server.addrlen,
data->client_address,
- pseudoflavor,
+ data->selected_flavor,
data->nfs_server.protocol,
&timeparms,
data->minorversion,
@@ -991,7 +998,8 @@ static int nfs4_init_server(struct nfs_server *server,
server->port = data->nfs_server.port;
- error = nfs_init_server_rpcclient(server, &timeparms, pseudoflavor);
+ error = nfs_init_server_rpcclient(server, &timeparms,
+ data->selected_flavor);
error:
/* Done */
@@ -1018,7 +1026,7 @@ struct nfs_server *nfs4_create_server(struct nfs_mount_info *mount_info,
if (!server)
return ERR_PTR(-ENOMEM);
- auth_probe = mount_info->parsed->auth_flavor_len < 1;
+ auth_probe = mount_info->parsed->auth_info.flavor_len < 1;
/* set up the general RPC client */
error = nfs4_init_server(server, mount_info->parsed);
@@ -1046,6 +1054,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
{
struct nfs_client *parent_client;
struct nfs_server *server, *parent_server;
+ bool auth_probe;
int error;
dprintk("--> nfs4_create_referral_server()\n");
@@ -1078,8 +1087,9 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
if (error < 0)
goto error;
- error = nfs4_server_common_setup(server, mntfh,
- !(parent_server->flags & NFS_MOUNT_SECFLAVOUR));
+ auth_probe = parent_server->auth_info.flavor_len < 1;
+
+ error = nfs4_server_common_setup(server, mntfh, auth_probe);
if (error < 0)
goto error;
@@ -1091,3 +1101,111 @@ error:
dprintk("<-- nfs4_create_referral_server() = error %d\n", error);
return ERR_PTR(error);
}
+
+/*
+ * Grab the destination's particulars, including lease expiry time.
+ *
+ * Returns zero if probe succeeded and retrieved FSID matches the FSID
+ * we have cached.
+ */
+static int nfs_probe_destination(struct nfs_server *server)
+{
+ struct inode *inode = server->super->s_root->d_inode;
+ struct nfs_fattr *fattr;
+ int error;
+
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ return -ENOMEM;
+
+ /* Sanity: the probe won't work if the destination server
+ * does not recognize the migrated FH. */
+ error = nfs_probe_fsinfo(server, NFS_FH(inode), fattr);
+
+ nfs_free_fattr(fattr);
+ return error;
+}
+
+/**
+ * nfs4_update_server - Move an nfs_server to a different nfs_client
+ *
+ * @server: represents FSID to be moved
+ * @hostname: new end-point's hostname
+ * @sap: new end-point's socket address
+ * @salen: size of "sap"
+ *
+ * The nfs_server must be quiescent before this function is invoked.
+ * Either its session is drained (NFSv4.1+), or its transport is
+ * plugged and drained (NFSv4.0).
+ *
+ * Returns zero on success, or a negative errno value.
+ */
+int nfs4_update_server(struct nfs_server *server, const char *hostname,
+ struct sockaddr *sap, size_t salen)
+{
+ struct nfs_client *clp = server->nfs_client;
+ struct rpc_clnt *clnt = server->client;
+ struct xprt_create xargs = {
+ .ident = clp->cl_proto,
+ .net = &init_net,
+ .dstaddr = sap,
+ .addrlen = salen,
+ .servername = hostname,
+ };
+ char buf[INET6_ADDRSTRLEN + 1];
+ struct sockaddr_storage address;
+ struct sockaddr *localaddr = (struct sockaddr *)&address;
+ int error;
+
+ dprintk("--> %s: move FSID %llx:%llx to \"%s\")\n", __func__,
+ (unsigned long long)server->fsid.major,
+ (unsigned long long)server->fsid.minor,
+ hostname);
+
+ error = rpc_switch_client_transport(clnt, &xargs, clnt->cl_timeout);
+ if (error != 0) {
+ dprintk("<-- %s(): rpc_switch_client_transport returned %d\n",
+ __func__, error);
+ goto out;
+ }
+
+ error = rpc_localaddr(clnt, localaddr, sizeof(address));
+ if (error != 0) {
+ dprintk("<-- %s(): rpc_localaddr returned %d\n",
+ __func__, error);
+ goto out;
+ }
+
+ error = -EAFNOSUPPORT;
+ if (rpc_ntop(localaddr, buf, sizeof(buf)) == 0) {
+ dprintk("<-- %s(): rpc_ntop returned %d\n",
+ __func__, error);
+ goto out;
+ }
+
+ nfs_server_remove_lists(server);
+ error = nfs4_set_client(server, hostname, sap, salen, buf,
+ clp->cl_rpcclient->cl_auth->au_flavor,
+ clp->cl_proto, clnt->cl_timeout,
+ clp->cl_minorversion, clp->cl_net);
+ nfs_put_client(clp);
+ if (error != 0) {
+ nfs_server_insert_lists(server);
+ dprintk("<-- %s(): nfs4_set_client returned %d\n",
+ __func__, error);
+ goto out;
+ }
+
+ if (server->nfs_client->cl_hostname == NULL)
+ server->nfs_client->cl_hostname = kstrdup(hostname, GFP_KERNEL);
+ nfs_server_insert_lists(server);
+
+ error = nfs_probe_destination(server);
+ if (error < 0)
+ goto out;
+
+ dprintk("<-- %s() succeeded\n", __func__);
+
+out:
+ return error;
+}
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 77efaf15ec90..c34007ae921a 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -31,9 +31,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
* -EOPENSTALE. The VFS will retry the lookup/create/open.
*/
- dprintk("NFS: open file(%s/%s)\n",
- dentry->d_parent->d_name.name,
- dentry->d_name.name);
+ dprintk("NFS: open file(%pd2)\n", dentry);
if ((openflags & O_ACCMODE) == 3)
openflags--;
@@ -75,7 +73,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
nfs_file_set_open_context(filp, ctx);
- nfs_fscache_set_inode_cookie(inode, filp);
+ nfs_fscache_open_file(inode, filp);
err = 0;
out_put_ctx:
@@ -122,8 +120,8 @@ const struct file_operations nfs4_file_operations = {
.llseek = nfs_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
- .aio_read = nfs_file_read,
- .aio_write = nfs_file_write,
+ .read_iter = nfs_file_read_iter,
+ .write_iter = nfs_file_write_iter,
.mmap = nfs_file_mmap,
.open = nfs4_file_open,
.flush = nfs_file_flush,
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 2288cd3c9278..4e7f05d3e9db 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -137,6 +137,7 @@ static size_t nfs_parse_server_name(char *string, size_t len,
/**
* nfs_find_best_sec - Find a security mechanism supported locally
+ * @server: NFS server struct
* @flavors: List of security tuples returned by SECINFO procedure
*
* Return the pseudoflavor of the first security mechanism in
@@ -145,7 +146,8 @@ static size_t nfs_parse_server_name(char *string, size_t len,
* is searched in the order returned from the server, per RFC 3530
* recommendation.
*/
-rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors)
+static rpc_authflavor_t nfs_find_best_sec(struct nfs_server *server,
+ struct nfs4_secinfo_flavors *flavors)
{
rpc_authflavor_t pseudoflavor;
struct nfs4_secinfo4 *secinfo;
@@ -160,12 +162,19 @@ rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors)
case RPC_AUTH_GSS:
pseudoflavor = rpcauth_get_pseudoflavor(secinfo->flavor,
&secinfo->flavor_info);
- if (pseudoflavor != RPC_AUTH_MAXFLAVOR)
+ /* make sure pseudoflavor matches sec= mount opt */
+ if (pseudoflavor != RPC_AUTH_MAXFLAVOR &&
+ nfs_auth_info_match(&server->auth_info,
+ pseudoflavor))
return pseudoflavor;
break;
}
}
+ /* if there were any sec= options then nothing matched */
+ if (server->auth_info.flavor_len > 0)
+ return -EPERM;
+
return RPC_AUTH_UNIX;
}
@@ -187,7 +196,7 @@ static rpc_authflavor_t nfs4_negotiate_security(struct inode *inode, struct qstr
goto out;
}
- flavor = nfs_find_best_sec(flavors);
+ flavor = nfs_find_best_sec(NFS_SERVER(inode), flavors);
out:
put_page(page);
@@ -283,8 +292,7 @@ static struct vfsmount *nfs_follow_referral(struct dentry *dentry,
if (locations == NULL || locations->nlocations <= 0)
goto out;
- dprintk("%s: referral at %s/%s\n", __func__,
- dentry->d_parent->d_name.name, dentry->d_name.name);
+ dprintk("%s: referral at %pd2\n", __func__, dentry);
page = (char *) __get_free_page(GFP_USER);
if (!page)
@@ -348,8 +356,8 @@ static struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *
mnt = ERR_PTR(-ENOENT);
parent = dget_parent(dentry);
- dprintk("%s: getting locations for %s/%s\n",
- __func__, parent->d_name.name, dentry->d_name.name);
+ dprintk("%s: getting locations for %pd2\n",
+ __func__, dentry);
err = nfs4_proc_fs_locations(client, parent->d_inode, &dentry->d_name, fs_locations, page);
dput(parent);
@@ -390,7 +398,7 @@ struct vfsmount *nfs4_submount(struct nfs_server *server, struct dentry *dentry,
if (client->cl_auth->au_flavor != flavor)
flavor = client->cl_auth->au_flavor;
- else if (!(server->flags & NFS_MOUNT_SECFLAVOUR)) {
+ else {
rpc_authflavor_t new = nfs4_negotiate_security(dir, name);
if ((int)new >= 0)
flavor = new;
@@ -400,3 +408,104 @@ out:
rpc_shutdown_client(client);
return mnt;
}
+
+/*
+ * Try one location from the fs_locations array.
+ *
+ * Returns zero on success, or a negative errno value.
+ */
+static int nfs4_try_replacing_one_location(struct nfs_server *server,
+ char *page, char *page2,
+ const struct nfs4_fs_location *location)
+{
+ const size_t addr_bufsize = sizeof(struct sockaddr_storage);
+ struct sockaddr *sap;
+ unsigned int s;
+ size_t salen;
+ int error;
+
+ sap = kmalloc(addr_bufsize, GFP_KERNEL);
+ if (sap == NULL)
+ return -ENOMEM;
+
+ error = -ENOENT;
+ for (s = 0; s < location->nservers; s++) {
+ const struct nfs4_string *buf = &location->servers[s];
+ char *hostname;
+
+ if (buf->len <= 0 || buf->len > PAGE_SIZE)
+ continue;
+
+ if (memchr(buf->data, IPV6_SCOPE_DELIMITER, buf->len) != NULL)
+ continue;
+
+ salen = nfs_parse_server_name(buf->data, buf->len,
+ sap, addr_bufsize, server);
+ if (salen == 0)
+ continue;
+ rpc_set_port(sap, NFS_PORT);
+
+ error = -ENOMEM;
+ hostname = kstrndup(buf->data, buf->len, GFP_KERNEL);
+ if (hostname == NULL)
+ break;
+
+ error = nfs4_update_server(server, hostname, sap, salen);
+ kfree(hostname);
+ if (error == 0)
+ break;
+ }
+
+ kfree(sap);
+ return error;
+}
+
+/**
+ * nfs4_replace_transport - set up transport to destination server
+ *
+ * @server: export being migrated
+ * @locations: fs_locations array
+ *
+ * Returns zero on success, or a negative errno value.
+ *
+ * The client tries all the entries in the "locations" array, in the
+ * order returned by the server, until one works or the end of the
+ * array is reached.
+ */
+int nfs4_replace_transport(struct nfs_server *server,
+ const struct nfs4_fs_locations *locations)
+{
+ char *page = NULL, *page2 = NULL;
+ int loc, error;
+
+ error = -ENOENT;
+ if (locations == NULL || locations->nlocations <= 0)
+ goto out;
+
+ error = -ENOMEM;
+ page = (char *) __get_free_page(GFP_USER);
+ if (!page)
+ goto out;
+ page2 = (char *) __get_free_page(GFP_USER);
+ if (!page2)
+ goto out;
+
+ for (loc = 0; loc < locations->nlocations; loc++) {
+ const struct nfs4_fs_location *location =
+ &locations->locations[loc];
+
+ if (location == NULL || location->nservers <= 0 ||
+ location->rootpath.ncomponents == 0)
+ continue;
+
+ error = nfs4_try_replacing_one_location(server, page,
+ page2, location);
+ if (error == 0)
+ break;
+ }
+
+out:
+ free_page((unsigned long)page);
+ free_page((unsigned long)page2);
+ return error;
+}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index d53d6785cba2..a2d91f392947 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -384,6 +384,14 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
case -NFS4ERR_STALE_CLIENTID:
nfs4_schedule_lease_recovery(clp);
goto wait_on_recovery;
+ case -NFS4ERR_MOVED:
+ ret = nfs4_schedule_migration_recovery(server);
+ if (ret < 0)
+ break;
+ goto wait_on_recovery;
+ case -NFS4ERR_LEASE_MOVED:
+ nfs4_schedule_lease_moved_recovery(clp);
+ goto wait_on_recovery;
#if defined(CONFIG_NFS_V4_1)
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
@@ -431,6 +439,8 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
return nfs4_map_errors(ret);
wait_on_recovery:
ret = nfs4_wait_clnt_recover(clp);
+ if (test_bit(NFS_MIG_FAILED, &server->mig_status))
+ return -EIO;
if (ret == 0)
exception->retry = 1;
return ret;
@@ -1318,31 +1328,24 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
int ret;
if (!data->rpc_done) {
- ret = data->rpc_status;
- goto err;
+ if (data->rpc_status) {
+ ret = data->rpc_status;
+ goto err;
+ }
+ /* cached opens have already been processed */
+ goto update;
}
- ret = -ESTALE;
- if (!(data->f_attr.valid & NFS_ATTR_FATTR_TYPE) ||
- !(data->f_attr.valid & NFS_ATTR_FATTR_FILEID) ||
- !(data->f_attr.valid & NFS_ATTR_FATTR_CHANGE))
- goto err;
-
- ret = -ENOMEM;
- state = nfs4_get_open_state(inode, data->owner);
- if (state == NULL)
- goto err;
-
ret = nfs_refresh_inode(inode, &data->f_attr);
if (ret)
goto err;
- nfs_setsecurity(inode, &data->f_attr, data->f_label);
-
if (data->o_res.delegation_type != 0)
nfs4_opendata_check_deleg(data, state);
+update:
update_open_stateid(state, &data->o_res.stateid, NULL,
data->o_arg.fmode);
+ atomic_inc(&state->count);
return state;
err:
@@ -1575,6 +1578,12 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
/* Don't recall a delegation if it was lost */
nfs4_schedule_lease_recovery(server->nfs_client);
return -EAGAIN;
+ case -NFS4ERR_MOVED:
+ nfs4_schedule_migration_recovery(server);
+ return -EAGAIN;
+ case -NFS4ERR_LEASE_MOVED:
+ nfs4_schedule_lease_moved_recovery(server->nfs_client);
+ return -EAGAIN;
case -NFS4ERR_DELEG_REVOKED:
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
@@ -2864,11 +2873,24 @@ static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
int status = -EPERM;
size_t i;
- for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
- status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
- if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
- continue;
- break;
+ if (server->auth_info.flavor_len > 0) {
+ /* try each flavor specified by user */
+ for (i = 0; i < server->auth_info.flavor_len; i++) {
+ status = nfs4_lookup_root_sec(server, fhandle, info,
+ server->auth_info.flavors[i]);
+ if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
+ continue;
+ break;
+ }
+ } else {
+ /* no flavors specified by user, try default list */
+ for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
+ status = nfs4_lookup_root_sec(server, fhandle, info,
+ flav_array[i]);
+ if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
+ continue;
+ break;
+ }
}
/*
@@ -2910,9 +2932,6 @@ int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
status = nfs4_lookup_root(server, fhandle, info);
if (status != -NFS4ERR_WRONGSEC)
break;
- /* Did user force a 'sec=' mount option? */
- if (server->flags & NFS_MOUNT_SECFLAVOUR)
- break;
default:
status = nfs4_do_find_root_sec(server, fhandle, info);
}
@@ -2981,11 +3000,16 @@ static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
status = nfs4_proc_fs_locations(client, dir, name, locations, page);
if (status != 0)
goto out;
- /* Make sure server returned a different fsid for the referral */
+
+ /*
+ * If the fsid didn't change, this is a migration event, not a
+ * referral. Cause us to drop into the exception handler, which
+ * will kick off migration recovery.
+ */
if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
dprintk("%s: server did not return a different fsid for"
" a referral at %s\n", __func__, name->name);
- status = -EIO;
+ status = -NFS4ERR_MOVED;
goto out;
}
/* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
@@ -3165,9 +3189,6 @@ static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
err = -EPERM;
if (client != *clnt)
goto out;
- /* No security negotiation if the user specified 'sec=' */
- if (NFS_SERVER(dir)->flags & NFS_MOUNT_SECFLAVOUR)
- goto out;
client = nfs4_create_sec_client(client, dir, name);
if (IS_ERR(client))
return PTR_ERR(client);
@@ -3738,9 +3759,8 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
};
int status;
- dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
- dentry->d_parent->d_name.name,
- dentry->d_name.name,
+ dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
+ dentry,
(unsigned long long)cookie);
nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
res.pgbase = args.pgbase;
@@ -4221,7 +4241,13 @@ static void nfs4_renew_done(struct rpc_task *task, void *calldata)
unsigned long timestamp = data->timestamp;
trace_nfs4_renew_async(clp, task->tk_status);
- if (task->tk_status < 0) {
+ switch (task->tk_status) {
+ case 0:
+ break;
+ case -NFS4ERR_LEASE_MOVED:
+ nfs4_schedule_lease_moved_recovery(clp);
+ break;
+ default:
/* Unless we're shutting down, schedule state recovery! */
if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
return;
@@ -4575,7 +4601,7 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf,
struct nfs4_label label = {0, 0, buflen, buf};
u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
- struct nfs4_getattr_arg args = {
+ struct nfs4_getattr_arg arg = {
.fh = NFS_FH(inode),
.bitmask = bitmask,
};
@@ -4586,14 +4612,14 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
- .rpc_argp = &args,
+ .rpc_argp = &arg,
.rpc_resp = &res,
};
int ret;
nfs_fattr_init(&fattr);
- ret = rpc_call_sync(server->client, &msg, 0);
+ ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
if (ret)
return ret;
if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
@@ -4630,7 +4656,7 @@ static int _nfs4_do_set_security_label(struct inode *inode,
struct iattr sattr = {0};
struct nfs_server *server = NFS_SERVER(inode);
const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
- struct nfs_setattrargs args = {
+ struct nfs_setattrargs arg = {
.fh = NFS_FH(inode),
.iap = &sattr,
.server = server,
@@ -4644,14 +4670,14 @@ static int _nfs4_do_set_security_label(struct inode *inode,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
- .rpc_argp = &args,
+ .rpc_argp = &arg,
.rpc_resp = &res,
};
int status;
- nfs4_stateid_copy(&args.stateid, &zero_stateid);
+ nfs4_stateid_copy(&arg.stateid, &zero_stateid);
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (status)
dprintk("%s failed: %d\n", __func__, status);
@@ -4735,17 +4761,24 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
if (state == NULL)
break;
if (nfs4_schedule_stateid_recovery(server, state) < 0)
- goto stateid_invalid;
+ goto recovery_failed;
goto wait_on_recovery;
case -NFS4ERR_EXPIRED:
if (state != NULL) {
if (nfs4_schedule_stateid_recovery(server, state) < 0)
- goto stateid_invalid;
+ goto recovery_failed;
}
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_STALE_CLIENTID:
nfs4_schedule_lease_recovery(clp);
goto wait_on_recovery;
+ case -NFS4ERR_MOVED:
+ if (nfs4_schedule_migration_recovery(server) < 0)
+ goto recovery_failed;
+ goto wait_on_recovery;
+ case -NFS4ERR_LEASE_MOVED:
+ nfs4_schedule_lease_moved_recovery(clp);
+ goto wait_on_recovery;
#if defined(CONFIG_NFS_V4_1)
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
@@ -4757,29 +4790,28 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
dprintk("%s ERROR %d, Reset session\n", __func__,
task->tk_status);
nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
- task->tk_status = 0;
- return -EAGAIN;
+ goto restart_call;
#endif /* CONFIG_NFS_V4_1 */
case -NFS4ERR_DELAY:
nfs_inc_server_stats(server, NFSIOS_DELAY);
case -NFS4ERR_GRACE:
rpc_delay(task, NFS4_POLL_RETRY_MAX);
- task->tk_status = 0;
- return -EAGAIN;
case -NFS4ERR_RETRY_UNCACHED_REP:
case -NFS4ERR_OLD_STATEID:
- task->tk_status = 0;
- return -EAGAIN;
+ goto restart_call;
}
task->tk_status = nfs4_map_errors(task->tk_status);
return 0;
-stateid_invalid:
+recovery_failed:
task->tk_status = -EIO;
return 0;
wait_on_recovery:
rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
+ if (test_bit(NFS_MIG_FAILED, &server->mig_status))
+ goto recovery_failed;
+restart_call:
task->tk_status = 0;
return -EAGAIN;
}
@@ -5106,6 +5138,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
status = 0;
}
request->fl_ops->fl_release_private(request);
+ request->fl_ops = NULL;
out:
return status;
}
@@ -5779,6 +5812,7 @@ struct nfs_release_lockowner_data {
struct nfs_release_lockowner_args args;
struct nfs4_sequence_args seq_args;
struct nfs4_sequence_res seq_res;
+ unsigned long timestamp;
};
static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
@@ -5786,12 +5820,27 @@ static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata
struct nfs_release_lockowner_data *data = calldata;
nfs40_setup_sequence(data->server,
&data->seq_args, &data->seq_res, task);
+ data->timestamp = jiffies;
}
static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
{
struct nfs_release_lockowner_data *data = calldata;
+ struct nfs_server *server = data->server;
+
nfs40_sequence_done(task, &data->seq_res);
+
+ switch (task->tk_status) {
+ case 0:
+ renew_lease(server, data->timestamp);
+ break;
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_EXPIRED:
+ case -NFS4ERR_LEASE_MOVED:
+ case -NFS4ERR_DELAY:
+ if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN)
+ rpc_restart_call_prepare(task);
+ }
}
static void nfs4_release_lockowner_release(void *calldata)
@@ -5990,6 +6039,283 @@ int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
return err;
}
+/*
+ * This operation also signals the server that this client is
+ * performing migration recovery. The server can stop returning
+ * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
+ * appended to this compound to identify the client ID which is
+ * performing recovery.
+ */
+static int _nfs40_proc_get_locations(struct inode *inode,
+ struct nfs4_fs_locations *locations,
+ struct page *page, struct rpc_cred *cred)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct rpc_clnt *clnt = server->client;
+ u32 bitmask[2] = {
+ [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
+ };
+ struct nfs4_fs_locations_arg args = {
+ .clientid = server->nfs_client->cl_clientid,
+ .fh = NFS_FH(inode),
+ .page = page,
+ .bitmask = bitmask,
+ .migration = 1, /* skip LOOKUP */
+ .renew = 1, /* append RENEW */
+ };
+ struct nfs4_fs_locations_res res = {
+ .fs_locations = locations,
+ .migration = 1,
+ .renew = 1,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ .rpc_cred = cred,
+ };
+ unsigned long now = jiffies;
+ int status;
+
+ nfs_fattr_init(&locations->fattr);
+ locations->server = server;
+ locations->nlocations = 0;
+
+ nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
+ nfs4_set_sequence_privileged(&args.seq_args);
+ status = nfs4_call_sync_sequence(clnt, server, &msg,
+ &args.seq_args, &res.seq_res);
+ if (status)
+ return status;
+
+ renew_lease(server, now);
+ return 0;
+}
+
+#ifdef CONFIG_NFS_V4_1
+
+/*
+ * This operation also signals the server that this client is
+ * performing migration recovery. The server can stop asserting
+ * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
+ * performing this operation is identified in the SEQUENCE
+ * operation in this compound.
+ *
+ * When the client supports GETATTR(fs_locations_info), it can
+ * be plumbed in here.
+ */
+static int _nfs41_proc_get_locations(struct inode *inode,
+ struct nfs4_fs_locations *locations,
+ struct page *page, struct rpc_cred *cred)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct rpc_clnt *clnt = server->client;
+ u32 bitmask[2] = {
+ [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
+ };
+ struct nfs4_fs_locations_arg args = {
+ .fh = NFS_FH(inode),
+ .page = page,
+ .bitmask = bitmask,
+ .migration = 1, /* skip LOOKUP */
+ };
+ struct nfs4_fs_locations_res res = {
+ .fs_locations = locations,
+ .migration = 1,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ .rpc_cred = cred,
+ };
+ int status;
+
+ nfs_fattr_init(&locations->fattr);
+ locations->server = server;
+ locations->nlocations = 0;
+
+ nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
+ nfs4_set_sequence_privileged(&args.seq_args);
+ status = nfs4_call_sync_sequence(clnt, server, &msg,
+ &args.seq_args, &res.seq_res);
+ if (status == NFS4_OK &&
+ res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
+ status = -NFS4ERR_LEASE_MOVED;
+ return status;
+}
+
+#endif /* CONFIG_NFS_V4_1 */
+
+/**
+ * nfs4_proc_get_locations - discover locations for a migrated FSID
+ * @inode: inode on FSID that is migrating
+ * @locations: result of query
+ * @page: buffer
+ * @cred: credential to use for this operation
+ *
+ * Returns NFS4_OK on success, a negative NFS4ERR status code if the
+ * operation failed, or a negative errno if a local error occurred.
+ *
+ * On success, "locations" is filled in, but if the server has
+ * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
+ * asserted.
+ *
+ * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
+ * from this client that require migration recovery.
+ */
+int nfs4_proc_get_locations(struct inode *inode,
+ struct nfs4_fs_locations *locations,
+ struct page *page, struct rpc_cred *cred)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs_client *clp = server->nfs_client;
+ const struct nfs4_mig_recovery_ops *ops =
+ clp->cl_mvops->mig_recovery_ops;
+ struct nfs4_exception exception = { };
+ int status;
+
+ dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
+ (unsigned long long)server->fsid.major,
+ (unsigned long long)server->fsid.minor,
+ clp->cl_hostname);
+ nfs_display_fhandle(NFS_FH(inode), __func__);
+
+ do {
+ status = ops->get_locations(inode, locations, page, cred);
+ if (status != -NFS4ERR_DELAY)
+ break;
+ nfs4_handle_exception(server, status, &exception);
+ } while (exception.retry);
+ return status;
+}
+
+/*
+ * This operation also signals the server that this client is
+ * performing "lease moved" recovery. The server can stop
+ * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
+ * is appended to this compound to identify the client ID which is
+ * performing recovery.
+ */
+static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+ struct rpc_clnt *clnt = server->client;
+ struct nfs4_fsid_present_arg args = {
+ .fh = NFS_FH(inode),
+ .clientid = clp->cl_clientid,
+ .renew = 1, /* append RENEW */
+ };
+ struct nfs4_fsid_present_res res = {
+ .renew = 1,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ .rpc_cred = cred,
+ };
+ unsigned long now = jiffies;
+ int status;
+
+ res.fh = nfs_alloc_fhandle();
+ if (res.fh == NULL)
+ return -ENOMEM;
+
+ nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
+ nfs4_set_sequence_privileged(&args.seq_args);
+ status = nfs4_call_sync_sequence(clnt, server, &msg,
+ &args.seq_args, &res.seq_res);
+ nfs_free_fhandle(res.fh);
+ if (status)
+ return status;
+
+ do_renew_lease(clp, now);
+ return 0;
+}
+
+#ifdef CONFIG_NFS_V4_1
+
+/*
+ * This operation also signals the server that this client is
+ * performing "lease moved" recovery. The server can stop asserting
+ * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
+ * this operation is identified in the SEQUENCE operation in this
+ * compound.
+ */
+static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct rpc_clnt *clnt = server->client;
+ struct nfs4_fsid_present_arg args = {
+ .fh = NFS_FH(inode),
+ };
+ struct nfs4_fsid_present_res res = {
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ .rpc_cred = cred,
+ };
+ int status;
+
+ res.fh = nfs_alloc_fhandle();
+ if (res.fh == NULL)
+ return -ENOMEM;
+
+ nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
+ nfs4_set_sequence_privileged(&args.seq_args);
+ status = nfs4_call_sync_sequence(clnt, server, &msg,
+ &args.seq_args, &res.seq_res);
+ nfs_free_fhandle(res.fh);
+ if (status == NFS4_OK &&
+ res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
+ status = -NFS4ERR_LEASE_MOVED;
+ return status;
+}
+
+#endif /* CONFIG_NFS_V4_1 */
+
+/**
+ * nfs4_proc_fsid_present - Is this FSID present or absent on server?
+ * @inode: inode on FSID to check
+ * @cred: credential to use for this operation
+ *
+ * Server indicates whether the FSID is present, moved, or not
+ * recognized. This operation is necessary to clear a LEASE_MOVED
+ * condition for this client ID.
+ *
+ * Returns NFS4_OK if the FSID is present on this server,
+ * -NFS4ERR_MOVED if the FSID is no longer present, a negative
+ * NFS4ERR code if some error occurred on the server, or a
+ * negative errno if a local failure occurred.
+ */
+int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs_client *clp = server->nfs_client;
+ const struct nfs4_mig_recovery_ops *ops =
+ clp->cl_mvops->mig_recovery_ops;
+ struct nfs4_exception exception = { };
+ int status;
+
+ dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
+ (unsigned long long)server->fsid.major,
+ (unsigned long long)server->fsid.minor,
+ clp->cl_hostname);
+ nfs_display_fhandle(NFS_FH(inode), __func__);
+
+ do {
+ status = ops->fsid_present(inode, cred);
+ if (status != -NFS4ERR_DELAY)
+ break;
+ nfs4_handle_exception(server, status, &exception);
+ } while (exception.retry);
+ return status;
+}
+
/**
* If 'use_integrity' is true and the state managment nfs_client
* cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
@@ -6276,8 +6602,14 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
struct nfs41_exchange_id_args args = {
.verifier = &verifier,
.client = clp,
+#ifdef CONFIG_NFS_V4_1_MIGRATION
.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
- EXCHGID4_FLAG_BIND_PRINC_STATEID,
+ EXCHGID4_FLAG_BIND_PRINC_STATEID |
+ EXCHGID4_FLAG_SUPP_MOVED_MIGR,
+#else
+ .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
+ EXCHGID4_FLAG_BIND_PRINC_STATEID,
+#endif
};
struct nfs41_exchange_id_res res = {
0
@@ -7616,6 +7948,9 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
break;
}
+ if (!nfs_auth_info_match(&server->auth_info, flavor))
+ flavor = RPC_AUTH_MAXFLAVOR;
+
if (flavor != RPC_AUTH_MAXFLAVOR) {
err = nfs4_lookup_root_sec(server, fhandle,
info, flavor);
@@ -7887,6 +8222,18 @@ static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
};
#endif
+static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
+ .get_locations = _nfs40_proc_get_locations,
+ .fsid_present = _nfs40_proc_fsid_present,
+};
+
+#if defined(CONFIG_NFS_V4_1)
+static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
+ .get_locations = _nfs41_proc_get_locations,
+ .fsid_present = _nfs41_proc_fsid_present,
+};
+#endif /* CONFIG_NFS_V4_1 */
+
static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
.minor_version = 0,
.init_caps = NFS_CAP_READDIRPLUS
@@ -7902,6 +8249,7 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
.reboot_recovery_ops = &nfs40_reboot_recovery_ops,
.nograce_recovery_ops = &nfs40_nograce_recovery_ops,
.state_renewal_ops = &nfs40_state_renewal_ops,
+ .mig_recovery_ops = &nfs40_mig_recovery_ops,
};
#if defined(CONFIG_NFS_V4_1)
@@ -7922,6 +8270,7 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
.state_renewal_ops = &nfs41_state_renewal_ops,
+ .mig_recovery_ops = &nfs41_mig_recovery_ops,
};
#endif
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index cc14cbb78b73..c8e729deb4f7 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -239,8 +239,6 @@ static void nfs4_end_drain_session(struct nfs_client *clp)
}
}
-#if defined(CONFIG_NFS_V4_1)
-
static int nfs4_drain_slot_tbl(struct nfs4_slot_table *tbl)
{
set_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state);
@@ -270,6 +268,8 @@ static int nfs4_begin_drain_session(struct nfs_client *clp)
return nfs4_drain_slot_tbl(&ses->fc_slot_table);
}
+#if defined(CONFIG_NFS_V4_1)
+
static int nfs41_setup_state_renewal(struct nfs_client *clp)
{
int status;
@@ -1197,20 +1197,74 @@ void nfs4_schedule_lease_recovery(struct nfs_client *clp)
}
EXPORT_SYMBOL_GPL(nfs4_schedule_lease_recovery);
+/**
+ * nfs4_schedule_migration_recovery - trigger migration recovery
+ *
+ * @server: FSID that is migrating
+ *
+ * Returns zero if recovery has started, otherwise a negative NFS4ERR
+ * value is returned.
+ */
+int nfs4_schedule_migration_recovery(const struct nfs_server *server)
+{
+ struct nfs_client *clp = server->nfs_client;
+
+ if (server->fh_expire_type != NFS4_FH_PERSISTENT) {
+ pr_err("NFS: volatile file handles not supported (server %s)\n",
+ clp->cl_hostname);
+ return -NFS4ERR_IO;
+ }
+
+ if (test_bit(NFS_MIG_FAILED, &server->mig_status))
+ return -NFS4ERR_IO;
+
+ dprintk("%s: scheduling migration recovery for (%llx:%llx) on %s\n",
+ __func__,
+ (unsigned long long)server->fsid.major,
+ (unsigned long long)server->fsid.minor,
+ clp->cl_hostname);
+
+ set_bit(NFS_MIG_IN_TRANSITION,
+ &((struct nfs_server *)server)->mig_status);
+ set_bit(NFS4CLNT_MOVED, &clp->cl_state);
+
+ nfs4_schedule_state_manager(clp);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nfs4_schedule_migration_recovery);
+
+/**
+ * nfs4_schedule_lease_moved_recovery - start lease-moved recovery
+ *
+ * @clp: server to check for moved leases
+ *
+ */
+void nfs4_schedule_lease_moved_recovery(struct nfs_client *clp)
+{
+ dprintk("%s: scheduling lease-moved recovery for client ID %llx on %s\n",
+ __func__, clp->cl_clientid, clp->cl_hostname);
+
+ set_bit(NFS4CLNT_LEASE_MOVED, &clp->cl_state);
+ nfs4_schedule_state_manager(clp);
+}
+EXPORT_SYMBOL_GPL(nfs4_schedule_lease_moved_recovery);
+
int nfs4_wait_clnt_recover(struct nfs_client *clp)
{
int res;
might_sleep();
+ atomic_inc(&clp->cl_count);
res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
nfs_wait_bit_killable, TASK_KILLABLE);
if (res)
- return res;
-
+ goto out;
if (clp->cl_cons_state < 0)
- return clp->cl_cons_state;
- return 0;
+ res = clp->cl_cons_state;
+out:
+ nfs_put_client(clp);
+ return res;
}
int nfs4_client_recover_expired_lease(struct nfs_client *clp)
@@ -1375,8 +1429,8 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
goto out;
default:
- printk(KERN_ERR "NFS: %s: unhandled error %d. "
- "Zeroing state\n", __func__, status);
+ printk(KERN_ERR "NFS: %s: unhandled error %d\n",
+ __func__, status);
case -ENOMEM:
case -NFS4ERR_DENIED:
case -NFS4ERR_RECLAIM_BAD:
@@ -1422,7 +1476,7 @@ restart:
if (status >= 0) {
status = nfs4_reclaim_locks(state, ops);
if (status >= 0) {
- if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) {
+ if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) {
spin_lock(&state->state_lock);
list_for_each_entry(lock, &state->lock_states, ls_locks) {
if (!test_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags))
@@ -1439,15 +1493,12 @@ restart:
}
switch (status) {
default:
- printk(KERN_ERR "NFS: %s: unhandled error %d. "
- "Zeroing state\n", __func__, status);
+ printk(KERN_ERR "NFS: %s: unhandled error %d\n",
+ __func__, status);
case -ENOENT:
case -ENOMEM:
case -ESTALE:
- /*
- * Open state on this file cannot be recovered
- * All we can do is revert to using the zero stateid.
- */
+ /* Open state on this file cannot be recovered */
nfs4_state_mark_recovery_failed(state, status);
break;
case -EAGAIN:
@@ -1628,7 +1679,6 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
nfs4_state_end_reclaim_reboot(clp);
break;
case -NFS4ERR_STALE_CLIENTID:
- case -NFS4ERR_LEASE_MOVED:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
nfs4_state_clear_reclaim_reboot(clp);
nfs4_state_start_reclaim_reboot(clp);
@@ -1829,6 +1879,168 @@ static int nfs4_purge_lease(struct nfs_client *clp)
return 0;
}
+/*
+ * Try remote migration of one FSID from a source server to a
+ * destination server. The source server provides a list of
+ * potential destinations.
+ *
+ * Returns zero or a negative NFS4ERR status code.
+ */
+static int nfs4_try_migration(struct nfs_server *server, struct rpc_cred *cred)
+{
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs4_fs_locations *locations = NULL;
+ struct inode *inode;
+ struct page *page;
+ int status, result;
+
+ dprintk("--> %s: FSID %llx:%llx on \"%s\"\n", __func__,
+ (unsigned long long)server->fsid.major,
+ (unsigned long long)server->fsid.minor,
+ clp->cl_hostname);
+
+ result = 0;
+ page = alloc_page(GFP_KERNEL);
+ locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
+ if (page == NULL || locations == NULL) {
+ dprintk("<-- %s: no memory\n", __func__);
+ goto out;
+ }
+
+ inode = server->super->s_root->d_inode;
+ result = nfs4_proc_get_locations(inode, locations, page, cred);
+ if (result) {
+ dprintk("<-- %s: failed to retrieve fs_locations: %d\n",
+ __func__, result);
+ goto out;
+ }
+
+ result = -NFS4ERR_NXIO;
+ if (!(locations->fattr.valid & NFS_ATTR_FATTR_V4_LOCATIONS)) {
+ dprintk("<-- %s: No fs_locations data, migration skipped\n",
+ __func__);
+ goto out;
+ }
+
+ nfs4_begin_drain_session(clp);
+
+ status = nfs4_replace_transport(server, locations);
+ if (status != 0) {
+ dprintk("<-- %s: failed to replace transport: %d\n",
+ __func__, status);
+ goto out;
+ }
+
+ result = 0;
+ dprintk("<-- %s: migration succeeded\n", __func__);
+
+out:
+ if (page != NULL)
+ __free_page(page);
+ kfree(locations);
+ if (result) {
+ pr_err("NFS: migration recovery failed (server %s)\n",
+ clp->cl_hostname);
+ set_bit(NFS_MIG_FAILED, &server->mig_status);
+ }
+ return result;
+}
+
+/*
+ * Returns zero or a negative NFS4ERR status code.
+ */
+static int nfs4_handle_migration(struct nfs_client *clp)
+{
+ const struct nfs4_state_maintenance_ops *ops =
+ clp->cl_mvops->state_renewal_ops;
+ struct nfs_server *server;
+ struct rpc_cred *cred;
+
+ dprintk("%s: migration reported on \"%s\"\n", __func__,
+ clp->cl_hostname);
+
+ spin_lock(&clp->cl_lock);
+ cred = ops->get_state_renewal_cred_locked(clp);
+ spin_unlock(&clp->cl_lock);
+ if (cred == NULL)
+ return -NFS4ERR_NOENT;
+
+ clp->cl_mig_gen++;
+restart:
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ int status;
+
+ if (server->mig_gen == clp->cl_mig_gen)
+ continue;
+ server->mig_gen = clp->cl_mig_gen;
+
+ if (!test_and_clear_bit(NFS_MIG_IN_TRANSITION,
+ &server->mig_status))
+ continue;
+
+ rcu_read_unlock();
+ status = nfs4_try_migration(server, cred);
+ if (status < 0) {
+ put_rpccred(cred);
+ return status;
+ }
+ goto restart;
+ }
+ rcu_read_unlock();
+ put_rpccred(cred);
+ return 0;
+}
+
+/*
+ * Test each nfs_server on the clp's cl_superblocks list to see
+ * if it's moved to another server. Stop when the server no longer
+ * returns NFS4ERR_LEASE_MOVED.
+ */
+static int nfs4_handle_lease_moved(struct nfs_client *clp)
+{
+ const struct nfs4_state_maintenance_ops *ops =
+ clp->cl_mvops->state_renewal_ops;
+ struct nfs_server *server;
+ struct rpc_cred *cred;
+
+ dprintk("%s: lease moved reported on \"%s\"\n", __func__,
+ clp->cl_hostname);
+
+ spin_lock(&clp->cl_lock);
+ cred = ops->get_state_renewal_cred_locked(clp);
+ spin_unlock(&clp->cl_lock);
+ if (cred == NULL)
+ return -NFS4ERR_NOENT;
+
+ clp->cl_mig_gen++;
+restart:
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ struct inode *inode;
+ int status;
+
+ if (server->mig_gen == clp->cl_mig_gen)
+ continue;
+ server->mig_gen = clp->cl_mig_gen;
+
+ rcu_read_unlock();
+
+ inode = server->super->s_root->d_inode;
+ status = nfs4_proc_fsid_present(inode, cred);
+ if (status != -NFS4ERR_MOVED)
+ goto restart; /* wasn't this one */
+ if (nfs4_try_migration(server, cred) == -NFS4ERR_LEASE_MOVED)
+ goto restart; /* there are more */
+ goto out;
+ }
+ rcu_read_unlock();
+
+out:
+ put_rpccred(cred);
+ return 0;
+}
+
/**
* nfs4_discover_server_trunking - Detect server IP address trunking
*
@@ -2017,9 +2229,10 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
nfs41_handle_server_reboot(clp);
if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
- SEQ4_STATUS_ADMIN_STATE_REVOKED |
- SEQ4_STATUS_LEASE_MOVED))
+ SEQ4_STATUS_ADMIN_STATE_REVOKED))
nfs41_handle_state_revoked(clp);
+ if (flags & SEQ4_STATUS_LEASE_MOVED)
+ nfs4_schedule_lease_moved_recovery(clp);
if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
nfs41_handle_recallable_state_revoked(clp);
if (flags & SEQ4_STATUS_BACKCHANNEL_FAULT)
@@ -2157,7 +2370,20 @@ static void nfs4_state_manager(struct nfs_client *clp)
status = nfs4_check_lease(clp);
if (status < 0)
goto out_error;
- continue;
+ }
+
+ if (test_and_clear_bit(NFS4CLNT_MOVED, &clp->cl_state)) {
+ section = "migration";
+ status = nfs4_handle_migration(clp);
+ if (status < 0)
+ goto out_error;
+ }
+
+ if (test_and_clear_bit(NFS4CLNT_LEASE_MOVED, &clp->cl_state)) {
+ section = "lease moved";
+ status = nfs4_handle_lease_moved(clp);
+ if (status < 0)
+ goto out_error;
}
/* First recover reboot state... */
diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
index e26acdd1a645..65ab0a0ca1c4 100644
--- a/fs/nfs/nfs4super.c
+++ b/fs/nfs/nfs4super.c
@@ -261,9 +261,9 @@ struct dentry *nfs4_try_mount(int flags, const char *dev_name,
res = nfs_follow_remote_path(root_mnt, export_path);
- dfprintk(MOUNT, "<-- nfs4_try_mount() = %ld%s\n",
- IS_ERR(res) ? PTR_ERR(res) : 0,
- IS_ERR(res) ? " [error]" : "");
+ dfprintk(MOUNT, "<-- nfs4_try_mount() = %d%s\n",
+ PTR_ERR_OR_ZERO(res),
+ IS_ERR(res) ? " [error]" : "");
return res;
}
@@ -319,9 +319,9 @@ static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
data->mnt_path = export_path;
res = nfs_follow_remote_path(root_mnt, export_path);
- dprintk("<-- nfs4_referral_mount() = %ld%s\n",
- IS_ERR(res) ? PTR_ERR(res) : 0,
- IS_ERR(res) ? " [error]" : "");
+ dprintk("<-- nfs4_referral_mount() = %d%s\n",
+ PTR_ERR_OR_ZERO(res),
+ IS_ERR(res) ? " [error]" : "");
return res;
}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 79210d23f607..f903389d90f1 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -595,11 +595,13 @@ static int nfs4_stat_to_errno(int);
#define NFS4_enc_getattr_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
- encode_getattr_maxsz)
+ encode_getattr_maxsz + \
+ encode_renew_maxsz)
#define NFS4_dec_getattr_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
- decode_getattr_maxsz)
+ decode_getattr_maxsz + \
+ decode_renew_maxsz)
#define NFS4_enc_lookup_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
@@ -736,13 +738,15 @@ static int nfs4_stat_to_errno(int);
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_lookup_maxsz + \
- encode_fs_locations_maxsz)
+ encode_fs_locations_maxsz + \
+ encode_renew_maxsz)
#define NFS4_dec_fs_locations_sz \
(compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_lookup_maxsz + \
- decode_fs_locations_maxsz)
+ decode_fs_locations_maxsz + \
+ decode_renew_maxsz)
#define NFS4_enc_secinfo_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
@@ -751,6 +755,18 @@ static int nfs4_stat_to_errno(int);
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_secinfo_maxsz)
+#define NFS4_enc_fsid_present_sz \
+ (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_getfh_maxsz + \
+ encode_renew_maxsz)
+#define NFS4_dec_fsid_present_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_getfh_maxsz + \
+ decode_renew_maxsz)
#if defined(CONFIG_NFS_V4_1)
#define NFS4_enc_bind_conn_to_session_sz \
(compound_encode_hdr_maxsz + \
@@ -2687,11 +2703,20 @@ static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req,
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
- encode_putfh(xdr, args->dir_fh, &hdr);
- encode_lookup(xdr, args->name, &hdr);
- replen = hdr.replen; /* get the attribute into args->page */
- encode_fs_locations(xdr, args->bitmask, &hdr);
+ if (args->migration) {
+ encode_putfh(xdr, args->fh, &hdr);
+ replen = hdr.replen;
+ encode_fs_locations(xdr, args->bitmask, &hdr);
+ if (args->renew)
+ encode_renew(xdr, args->clientid, &hdr);
+ } else {
+ encode_putfh(xdr, args->dir_fh, &hdr);
+ encode_lookup(xdr, args->name, &hdr);
+ replen = hdr.replen;
+ encode_fs_locations(xdr, args->bitmask, &hdr);
+ }
+ /* Set up reply kvec to capture returned fs_locations array. */
xdr_inline_pages(&req->rq_rcv_buf, replen << 2, &args->page,
0, PAGE_SIZE);
encode_nops(&hdr);
@@ -2715,6 +2740,26 @@ static void nfs4_xdr_enc_secinfo(struct rpc_rqst *req,
encode_nops(&hdr);
}
+/*
+ * Encode FSID_PRESENT request
+ */
+static void nfs4_xdr_enc_fsid_present(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_fsid_present_arg *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_getfh(xdr, &hdr);
+ if (args->renew)
+ encode_renew(xdr, args->clientid, &hdr);
+ encode_nops(&hdr);
+}
+
#if defined(CONFIG_NFS_V4_1)
/*
* BIND_CONN_TO_SESSION request
@@ -6824,13 +6869,26 @@ static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req,
status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_lookup(xdr);
- if (status)
- goto out;
- xdr_enter_page(xdr, PAGE_SIZE);
- status = decode_getfattr_generic(xdr, &res->fs_locations->fattr,
+ if (res->migration) {
+ xdr_enter_page(xdr, PAGE_SIZE);
+ status = decode_getfattr_generic(xdr,
+ &res->fs_locations->fattr,
NULL, res->fs_locations,
NULL, res->fs_locations->server);
+ if (status)
+ goto out;
+ if (res->renew)
+ status = decode_renew(xdr);
+ } else {
+ status = decode_lookup(xdr);
+ if (status)
+ goto out;
+ xdr_enter_page(xdr, PAGE_SIZE);
+ status = decode_getfattr_generic(xdr,
+ &res->fs_locations->fattr,
+ NULL, res->fs_locations,
+ NULL, res->fs_locations->server);
+ }
out:
return status;
}
@@ -6859,6 +6917,34 @@ out:
return status;
}
+/*
+ * Decode FSID_PRESENT response
+ */
+static int nfs4_xdr_dec_fsid_present(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs4_fsid_present_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_getfh(xdr, res->fh);
+ if (status)
+ goto out;
+ if (res->renew)
+ status = decode_renew(xdr);
+out:
+ return status;
+}
+
#if defined(CONFIG_NFS_V4_1)
/*
* Decode BIND_CONN_TO_SESSION response
@@ -7373,6 +7459,7 @@ struct rpc_procinfo nfs4_procedures[] = {
PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations),
PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner),
PROC(SECINFO, enc_secinfo, dec_secinfo),
+ PROC(FSID_PRESENT, enc_fsid_present, dec_fsid_present),
#if defined(CONFIG_NFS_V4_1)
PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
PROC(CREATE_SESSION, enc_create_session, dec_create_session),
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index a8f57c728df5..fddbba2d9eff 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -235,7 +235,7 @@ nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
};
int status = -ENOMEM;
- dprintk("NFS call create %s\n", dentry->d_name.name);
+ dprintk("NFS call create %pd\n", dentry);
data = nfs_alloc_createdata(dir, dentry, sattr);
if (data == NULL)
goto out;
@@ -265,7 +265,7 @@ nfs_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
umode_t mode;
int status = -ENOMEM;
- dprintk("NFS call mknod %s\n", dentry->d_name.name);
+ dprintk("NFS call mknod %pd\n", dentry);
mode = sattr->ia_mode;
if (S_ISFIFO(mode)) {
@@ -423,7 +423,7 @@ nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
};
int status = -ENAMETOOLONG;
- dprintk("NFS call symlink %s\n", dentry->d_name.name);
+ dprintk("NFS call symlink %pd\n", dentry);
if (len > NFS2_MAXPATHLEN)
goto out;
@@ -462,7 +462,7 @@ nfs_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
};
int status = -ENOMEM;
- dprintk("NFS call mkdir %s\n", dentry->d_name.name);
+ dprintk("NFS call mkdir %pd\n", dentry);
data = nfs_alloc_createdata(dir, dentry, sattr);
if (data == NULL)
goto out;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index a03b9c6f9489..317d6fc2160e 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -497,7 +497,8 @@ static const char *nfs_pseudoflavour_to_name(rpc_authflavor_t flavour)
static const struct {
rpc_authflavor_t flavour;
const char *str;
- } sec_flavours[] = {
+ } sec_flavours[NFS_AUTH_INFO_MAX_FLAVORS] = {
+ /* update NFS_AUTH_INFO_MAX_FLAVORS when this list changes! */
{ RPC_AUTH_NULL, "null" },
{ RPC_AUTH_UNIX, "sys" },
{ RPC_AUTH_GSS_KRB5, "krb5" },
@@ -923,8 +924,7 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(void)
data->mount_server.port = NFS_UNSPEC_PORT;
data->nfs_server.port = NFS_UNSPEC_PORT;
data->nfs_server.protocol = XPRT_TRANSPORT_TCP;
- data->auth_flavors[0] = RPC_AUTH_MAXFLAVOR;
- data->auth_flavor_len = 0;
+ data->selected_flavor = RPC_AUTH_MAXFLAVOR;
data->minorversion = 0;
data->need_mount = true;
data->net = current->nsproxy->net_ns;
@@ -1019,12 +1019,51 @@ static void nfs_set_mount_transport_protocol(struct nfs_parsed_mount_data *mnt)
}
}
-static void nfs_set_auth_parsed_mount_data(struct nfs_parsed_mount_data *data,
- rpc_authflavor_t pseudoflavor)
+/*
+ * Add 'flavor' to 'auth_info' if not already present.
+ * Returns true if 'flavor' ends up in the list, false otherwise
+ */
+static bool nfs_auth_info_add(struct nfs_auth_info *auth_info,
+ rpc_authflavor_t flavor)
+{
+ unsigned int i;
+ unsigned int max_flavor_len = (sizeof(auth_info->flavors) /
+ sizeof(auth_info->flavors[0]));
+
+ /* make sure this flavor isn't already in the list */
+ for (i = 0; i < auth_info->flavor_len; i++) {
+ if (flavor == auth_info->flavors[i])
+ return true;
+ }
+
+ if (auth_info->flavor_len + 1 >= max_flavor_len) {
+ dfprintk(MOUNT, "NFS: too many sec= flavors\n");
+ return false;
+ }
+
+ auth_info->flavors[auth_info->flavor_len++] = flavor;
+ return true;
+}
+
+/*
+ * Return true if 'match' is in auth_info or auth_info is empty.
+ * Return false otherwise.
+ */
+bool nfs_auth_info_match(const struct nfs_auth_info *auth_info,
+ rpc_authflavor_t match)
{
- data->auth_flavors[0] = pseudoflavor;
- data->auth_flavor_len = 1;
+ int i;
+
+ if (!auth_info->flavor_len)
+ return true;
+
+ for (i = 0; i < auth_info->flavor_len; i++) {
+ if (auth_info->flavors[i] == match)
+ return true;
+ }
+ return false;
}
+EXPORT_SYMBOL_GPL(nfs_auth_info_match);
/*
* Parse the value of the 'sec=' option.
@@ -1034,49 +1073,55 @@ static int nfs_parse_security_flavors(char *value,
{
substring_t args[MAX_OPT_ARGS];
rpc_authflavor_t pseudoflavor;
+ char *p;
dfprintk(MOUNT, "NFS: parsing sec=%s option\n", value);
- switch (match_token(value, nfs_secflavor_tokens, args)) {
- case Opt_sec_none:
- pseudoflavor = RPC_AUTH_NULL;
- break;
- case Opt_sec_sys:
- pseudoflavor = RPC_AUTH_UNIX;
- break;
- case Opt_sec_krb5:
- pseudoflavor = RPC_AUTH_GSS_KRB5;
- break;
- case Opt_sec_krb5i:
- pseudoflavor = RPC_AUTH_GSS_KRB5I;
- break;
- case Opt_sec_krb5p:
- pseudoflavor = RPC_AUTH_GSS_KRB5P;
- break;
- case Opt_sec_lkey:
- pseudoflavor = RPC_AUTH_GSS_LKEY;
- break;
- case Opt_sec_lkeyi:
- pseudoflavor = RPC_AUTH_GSS_LKEYI;
- break;
- case Opt_sec_lkeyp:
- pseudoflavor = RPC_AUTH_GSS_LKEYP;
- break;
- case Opt_sec_spkm:
- pseudoflavor = RPC_AUTH_GSS_SPKM;
- break;
- case Opt_sec_spkmi:
- pseudoflavor = RPC_AUTH_GSS_SPKMI;
- break;
- case Opt_sec_spkmp:
- pseudoflavor = RPC_AUTH_GSS_SPKMP;
- break;
- default:
- return 0;
+ while ((p = strsep(&value, ":")) != NULL) {
+ switch (match_token(p, nfs_secflavor_tokens, args)) {
+ case Opt_sec_none:
+ pseudoflavor = RPC_AUTH_NULL;
+ break;
+ case Opt_sec_sys:
+ pseudoflavor = RPC_AUTH_UNIX;
+ break;
+ case Opt_sec_krb5:
+ pseudoflavor = RPC_AUTH_GSS_KRB5;
+ break;
+ case Opt_sec_krb5i:
+ pseudoflavor = RPC_AUTH_GSS_KRB5I;
+ break;
+ case Opt_sec_krb5p:
+ pseudoflavor = RPC_AUTH_GSS_KRB5P;
+ break;
+ case Opt_sec_lkey:
+ pseudoflavor = RPC_AUTH_GSS_LKEY;
+ break;
+ case Opt_sec_lkeyi:
+ pseudoflavor = RPC_AUTH_GSS_LKEYI;
+ break;
+ case Opt_sec_lkeyp:
+ pseudoflavor = RPC_AUTH_GSS_LKEYP;
+ break;
+ case Opt_sec_spkm:
+ pseudoflavor = RPC_AUTH_GSS_SPKM;
+ break;
+ case Opt_sec_spkmi:
+ pseudoflavor = RPC_AUTH_GSS_SPKMI;
+ break;
+ case Opt_sec_spkmp:
+ pseudoflavor = RPC_AUTH_GSS_SPKMP;
+ break;
+ default:
+ dfprintk(MOUNT,
+ "NFS: sec= option '%s' not recognized\n", p);
+ return 0;
+ }
+
+ if (!nfs_auth_info_add(&mnt->auth_info, pseudoflavor))
+ return 0;
}
- mnt->flags |= NFS_MOUNT_SECFLAVOUR;
- nfs_set_auth_parsed_mount_data(mnt, pseudoflavor);
return 1;
}
@@ -1623,12 +1668,14 @@ out_security_failure:
}
/*
- * Ensure that the specified authtype in args->auth_flavors[0] is supported by
- * the server. Returns 0 if it's ok, and -EACCES if not.
+ * Ensure that a specified authtype in args->auth_info is supported by
+ * the server. Returns 0 and sets args->selected_flavor if it's ok, and
+ * -EACCES if not.
*/
-static int nfs_verify_authflavor(struct nfs_parsed_mount_data *args,
+static int nfs_verify_authflavors(struct nfs_parsed_mount_data *args,
rpc_authflavor_t *server_authlist, unsigned int count)
{
+ rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
unsigned int i;
/*
@@ -1640,17 +1687,20 @@ static int nfs_verify_authflavor(struct nfs_parsed_mount_data *args,
* can be used.
*/
for (i = 0; i < count; i++) {
- if (args->auth_flavors[0] == server_authlist[i] ||
- server_authlist[i] == RPC_AUTH_NULL)
+ flavor = server_authlist[i];
+
+ if (nfs_auth_info_match(&args->auth_info, flavor) ||
+ flavor == RPC_AUTH_NULL)
goto out;
}
- dfprintk(MOUNT, "NFS: auth flavor %u not supported by server\n",
- args->auth_flavors[0]);
+ dfprintk(MOUNT,
+ "NFS: specified auth flavors not supported by server\n");
return -EACCES;
out:
- dfprintk(MOUNT, "NFS: using auth flavor %u\n", args->auth_flavors[0]);
+ args->selected_flavor = flavor;
+ dfprintk(MOUNT, "NFS: using auth flavor %u\n", args->selected_flavor);
return 0;
}
@@ -1738,9 +1788,10 @@ static struct nfs_server *nfs_try_mount_request(struct nfs_mount_info *mount_inf
* Was a sec= authflavor specified in the options? First, verify
* whether the server supports it, and then just try to use it if so.
*/
- if (args->auth_flavor_len > 0) {
- status = nfs_verify_authflavor(args, authlist, authlist_len);
- dfprintk(MOUNT, "NFS: using auth flavor %u\n", args->auth_flavors[0]);
+ if (args->auth_info.flavor_len > 0) {
+ status = nfs_verify_authflavors(args, authlist, authlist_len);
+ dfprintk(MOUNT, "NFS: using auth flavor %u\n",
+ args->selected_flavor);
if (status)
return ERR_PTR(status);
return nfs_mod->rpc_ops->create_server(mount_info, nfs_mod);
@@ -1769,7 +1820,7 @@ static struct nfs_server *nfs_try_mount_request(struct nfs_mount_info *mount_inf
/* Fallthrough */
}
dfprintk(MOUNT, "NFS: attempting to use auth flavor %u\n", flavor);
- nfs_set_auth_parsed_mount_data(args, flavor);
+ args->selected_flavor = flavor;
server = nfs_mod->rpc_ops->create_server(mount_info, nfs_mod);
if (!IS_ERR(server))
return server;
@@ -1785,7 +1836,7 @@ static struct nfs_server *nfs_try_mount_request(struct nfs_mount_info *mount_inf
/* Last chance! Try AUTH_UNIX */
dfprintk(MOUNT, "NFS: attempting to use auth flavor %u\n", RPC_AUTH_UNIX);
- nfs_set_auth_parsed_mount_data(args, RPC_AUTH_UNIX);
+ args->selected_flavor = RPC_AUTH_UNIX;
return nfs_mod->rpc_ops->create_server(mount_info, nfs_mod);
}
@@ -1972,9 +2023,9 @@ static int nfs23_validate_mount_data(void *options,
args->bsize = data->bsize;
if (data->flags & NFS_MOUNT_SECFLAVOUR)
- nfs_set_auth_parsed_mount_data(args, data->pseudoflavor);
+ args->selected_flavor = data->pseudoflavor;
else
- nfs_set_auth_parsed_mount_data(args, RPC_AUTH_UNIX);
+ args->selected_flavor = RPC_AUTH_UNIX;
if (!args->nfs_server.hostname)
goto out_nomem;
@@ -2108,9 +2159,6 @@ static int nfs_validate_text_mount_data(void *options,
nfs_set_port(sap, &args->nfs_server.port, port);
- if (args->auth_flavor_len > 1)
- goto out_bad_auth;
-
return nfs_parse_devname(dev_name,
&args->nfs_server.hostname,
max_namelen,
@@ -2130,10 +2178,6 @@ out_invalid_transport_udp:
out_no_address:
dfprintk(MOUNT, "NFS: mount program didn't pass remote address\n");
return -EINVAL;
-
-out_bad_auth:
- dfprintk(MOUNT, "NFS: Too many RPC auth flavours specified\n");
- return -EINVAL;
}
static int
@@ -2143,8 +2187,10 @@ nfs_compare_remount_data(struct nfs_server *nfss,
if (data->flags != nfss->flags ||
data->rsize != nfss->rsize ||
data->wsize != nfss->wsize ||
+ data->version != nfss->nfs_client->rpc_ops->version ||
+ data->minorversion != nfss->nfs_client->cl_minorversion ||
data->retrans != nfss->client->cl_timeout->to_retries ||
- data->auth_flavors[0] != nfss->client->cl_auth->au_flavor ||
+ data->selected_flavor != nfss->client->cl_auth->au_flavor ||
data->acregmin != nfss->acregmin / HZ ||
data->acregmax != nfss->acregmax / HZ ||
data->acdirmin != nfss->acdirmin / HZ ||
@@ -2189,7 +2235,8 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
data->rsize = nfss->rsize;
data->wsize = nfss->wsize;
data->retrans = nfss->client->cl_timeout->to_retries;
- nfs_set_auth_parsed_mount_data(data, nfss->client->cl_auth->au_flavor);
+ data->selected_flavor = nfss->client->cl_auth->au_flavor;
+ data->auth_info = nfss->auth_info;
data->acregmin = nfss->acregmin / HZ;
data->acregmax = nfss->acregmax / HZ;
data->acdirmin = nfss->acdirmin / HZ;
@@ -2197,12 +2244,14 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
data->timeo = 10U * nfss->client->cl_timeout->to_initval / HZ;
data->nfs_server.port = nfss->port;
data->nfs_server.addrlen = nfss->nfs_client->cl_addrlen;
+ data->version = nfsvers;
+ data->minorversion = nfss->nfs_client->cl_minorversion;
memcpy(&data->nfs_server.address, &nfss->nfs_client->cl_addr,
data->nfs_server.addrlen);
/* overwrite those values with any that were specified */
- error = nfs_parse_mount_options((char *)options, data);
- if (error < 0)
+ error = -EINVAL;
+ if (!nfs_parse_mount_options((char *)options, data))
goto out;
/*
@@ -2332,7 +2381,7 @@ static int nfs_compare_mount_options(const struct super_block *s, const struct n
goto Ebusy;
if (a->acdirmax != b->acdirmax)
goto Ebusy;
- if (b->flags & NFS_MOUNT_SECFLAVOUR &&
+ if (b->auth_info.flavor_len > 0 &&
clnt_a->cl_auth->au_flavor != clnt_b->cl_auth->au_flavor)
goto Ebusy;
return 1;
@@ -2530,6 +2579,7 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
mntroot = ERR_PTR(error);
goto error_splat_bdi;
}
+ server->super = s;
}
if (!s->s_root) {
@@ -2713,9 +2763,9 @@ static int nfs4_validate_mount_data(void *options,
data->auth_flavours,
sizeof(pseudoflavor)))
return -EFAULT;
- nfs_set_auth_parsed_mount_data(args, pseudoflavor);
+ args->selected_flavor = pseudoflavor;
} else
- nfs_set_auth_parsed_mount_data(args, RPC_AUTH_UNIX);
+ args->selected_flavor = RPC_AUTH_UNIX;
c = strndup_user(data->hostname.data, NFS4_MAXNAMLEN);
if (IS_ERR(c))
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index bb939edd4c99..11d78944de79 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -493,17 +493,15 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
unsigned long long fileid;
struct dentry *sdentry;
struct rpc_task *task;
- int error = -EIO;
+ int error = -EBUSY;
- dfprintk(VFS, "NFS: silly-rename(%s/%s, ct=%d)\n",
- dentry->d_parent->d_name.name, dentry->d_name.name,
- d_count(dentry));
+ dfprintk(VFS, "NFS: silly-rename(%pd2, ct=%d)\n",
+ dentry, d_count(dentry));
nfs_inc_stats(dir, NFSIOS_SILLYRENAME);
/*
* We don't allow a dentry to be silly-renamed twice.
*/
- error = -EBUSY;
if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
goto out;
@@ -522,8 +520,8 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
SILLYNAME_FILEID_LEN, fileid,
SILLYNAME_COUNTER_LEN, sillycounter);
- dfprintk(VFS, "NFS: trying to rename %s to %s\n",
- dentry->d_name.name, silly);
+ dfprintk(VFS, "NFS: trying to rename %pd to %s\n",
+ dentry, silly);
sdentry = lookup_one_len(silly, dentry->d_parent, slen);
/*
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index ac1dc331ba31..c1d548211c31 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -954,10 +954,8 @@ int nfs_updatepage(struct file *file, struct page *page,
nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
- dprintk("NFS: nfs_updatepage(%s/%s %d@%lld)\n",
- file->f_path.dentry->d_parent->d_name.name,
- file->f_path.dentry->d_name.name, count,
- (long long)(page_file_offset(page) + offset));
+ dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n",
+ file, count, (long long)(page_file_offset(page) + offset));
if (nfs_can_extend_write(file, page, inode)) {
count = max(count + offset, nfs_page_length(page));
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index dc8f1ef665ce..f994e750e0d1 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -95,7 +95,7 @@ config NFSD_V4_SECURITY_LABEL
Smack policies on NFSv4 files, say N.
WARNING: there is still a chance of backwards-incompatible protocol changes.
- For now we recommend "Y" only for developers and testers."
+ For now we recommend "Y" only for developers and testers.
config NFSD_FAULT_INJECTION
bool "NFS server manual fault injection"
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 5f38ea36e266..8513c598fabf 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -536,16 +536,12 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
if (err)
goto out3;
exp.ex_anon_uid= make_kuid(&init_user_ns, an_int);
- if (!uid_valid(exp.ex_anon_uid))
- goto out3;
/* anon gid */
err = get_int(&mesg, &an_int);
if (err)
goto out3;
exp.ex_anon_gid= make_kgid(&init_user_ns, an_int);
- if (!gid_valid(exp.ex_anon_gid))
- goto out3;
/* fsid */
err = get_int(&mesg, &an_int);
@@ -583,6 +579,26 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
exp.ex_uuid);
if (err)
goto out4;
+ /*
+ * No point caching this if it would immediately expire.
+ * Also, this protects exportfs's dummy export from the
+ * anon_uid/anon_gid checks:
+ */
+ if (exp.h.expiry_time < seconds_since_boot())
+ goto out4;
+ /*
+ * For some reason exportfs has been passing down an
+ * invalid (-1) uid & gid on the "dummy" export which it
+ * uses to test export support. To make sure exportfs
+ * sees errors from check_export we therefore need to
+ * delay these checks till after check_export:
+ */
+ err = -EINVAL;
+ if (!uid_valid(exp.ex_anon_uid))
+ goto out4;
+ if (!gid_valid(exp.ex_anon_gid))
+ goto out4;
+ err = 0;
}
expp = svc_export_lookup(&exp);
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index e0a65a9e37e9..9c271f42604a 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -385,8 +385,8 @@ purge_old(struct dentry *parent, struct dentry *child, struct nfsd_net *nn)
status = vfs_rmdir(parent->d_inode, child);
if (status)
- printk("failed to remove client recovery directory %s\n",
- child->d_name.name);
+ printk("failed to remove client recovery directory %pd\n",
+ child);
/* Keep trying, success or failure: */
return 0;
}
@@ -410,15 +410,15 @@ out:
nfs4_release_reclaim(nn);
if (status)
printk("nfsd4: failed to purge old clients from recovery"
- " directory %s\n", nn->rec_file->f_path.dentry->d_name.name);
+ " directory %pD\n", nn->rec_file);
}
static int
load_recdir(struct dentry *parent, struct dentry *child, struct nfsd_net *nn)
{
if (child->d_name.len != HEXDIR_LEN - 1) {
- printk("nfsd4: illegal name %s in recovery directory\n",
- child->d_name.name);
+ printk("nfsd4: illegal name %pd in recovery directory\n",
+ child);
/* Keep trying; maybe the others are OK: */
return 0;
}
@@ -437,7 +437,7 @@ nfsd4_recdir_load(struct net *net) {
status = nfsd4_list_rec_dir(load_recdir, nn);
if (status)
printk("nfsd4: failed loading clients from recovery"
- " directory %s\n", nn->rec_file->f_path.dentry->d_name.name);
+ " directory %pD\n", nn->rec_file);
return status;
}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 0874998a49cd..e03e8efff53f 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -402,11 +402,17 @@ static void remove_stid(struct nfs4_stid *s)
idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
}
+static void nfs4_free_stid(struct kmem_cache *slab, struct nfs4_stid *s)
+{
+ kmem_cache_free(slab, s);
+}
+
void
nfs4_put_delegation(struct nfs4_delegation *dp)
{
+ remove_stid(&dp->dl_stid);
if (atomic_dec_and_test(&dp->dl_count)) {
- kmem_cache_free(deleg_slab, dp);
+ nfs4_free_stid(deleg_slab, &dp->dl_stid);
num_delegations--;
}
}
@@ -445,14 +451,12 @@ unhash_delegation(struct nfs4_delegation *dp)
static void destroy_revoked_delegation(struct nfs4_delegation *dp)
{
list_del_init(&dp->dl_recall_lru);
- remove_stid(&dp->dl_stid);
nfs4_put_delegation(dp);
}
static void destroy_delegation(struct nfs4_delegation *dp)
{
unhash_delegation(dp);
- remove_stid(&dp->dl_stid);
nfs4_put_delegation(dp);
}
@@ -610,7 +614,7 @@ static void close_generic_stateid(struct nfs4_ol_stateid *stp)
static void free_generic_stateid(struct nfs4_ol_stateid *stp)
{
remove_stid(&stp->st_stid);
- kmem_cache_free(stateid_slab, stp);
+ nfs4_free_stid(stateid_slab, &stp->st_stid);
}
static void release_lock_stateid(struct nfs4_ol_stateid *stp)
@@ -668,7 +672,6 @@ static void unhash_open_stateid(struct nfs4_ol_stateid *stp)
static void release_open_stateid(struct nfs4_ol_stateid *stp)
{
unhash_open_stateid(stp);
- unhash_stid(&stp->st_stid);
free_generic_stateid(stp);
}
@@ -690,7 +693,6 @@ static void release_last_closed_stateid(struct nfs4_openowner *oo)
struct nfs4_ol_stateid *s = oo->oo_last_closed_stid;
if (s) {
- unhash_stid(&s->st_stid);
free_generic_stateid(s);
oo->oo_last_closed_stid = NULL;
}
@@ -1127,6 +1129,11 @@ destroy_client(struct nfs4_client *clp)
dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
destroy_delegation(dp);
}
+ list_splice_init(&clp->cl_revoked, &reaplist);
+ while (!list_empty(&reaplist)) {
+ dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
+ destroy_revoked_delegation(dp);
+ }
while (!list_empty(&clp->cl_openowners)) {
oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
release_openowner(oo);
@@ -3154,7 +3161,6 @@ nfs4_open_delegation(struct net *net, struct svc_fh *fh,
open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
return;
out_free:
- unhash_stid(&dp->dl_stid);
nfs4_put_delegation(dp);
out_no_deleg:
open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
@@ -3843,9 +3849,8 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfs4_ol_stateid *stp;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
- dprintk("NFSD: nfsd4_open_confirm on file %.*s\n",
- (int)cstate->current_fh.fh_dentry->d_name.len,
- cstate->current_fh.fh_dentry->d_name.name);
+ dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
+ cstate->current_fh.fh_dentry);
status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
if (status)
@@ -3922,9 +3927,8 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
struct nfs4_ol_stateid *stp;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
- dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n",
- (int)cstate->current_fh.fh_dentry->d_name.len,
- cstate->current_fh.fh_dentry->d_name.name);
+ dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
+ cstate->current_fh.fh_dentry);
/* We don't yet support WANT bits: */
if (od->od_deleg_want)
@@ -3980,9 +3984,8 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- dprintk("NFSD: nfsd4_close on file %.*s\n",
- (int)cstate->current_fh.fh_dentry->d_name.len,
- cstate->current_fh.fh_dentry->d_name.name);
+ dprintk("NFSD: nfsd4_close on file %pd\n",
+ cstate->current_fh.fh_dentry);
nfs4_lock_state();
status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
@@ -3998,10 +4001,9 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfsd4_close_open_stateid(stp);
- if (cstate->minorversion) {
- unhash_stid(&stp->st_stid);
+ if (cstate->minorversion)
free_generic_stateid(stp);
- } else
+ else
oo->oo_last_closed_stid = stp;
if (list_empty(&oo->oo_owner.so_stateids)) {
@@ -5122,7 +5124,6 @@ out_recovery:
return ret;
}
-/* should be called with the state lock held */
void
nfs4_state_shutdown_net(struct net *net)
{
@@ -5133,6 +5134,7 @@ nfs4_state_shutdown_net(struct net *net)
cancel_delayed_work_sync(&nn->laundromat_work);
locks_end_grace(&nn->nfsd4_manager);
+ nfs4_lock_state();
INIT_LIST_HEAD(&reaplist);
spin_lock(&recall_lock);
list_for_each_safe(pos, next, &nn->del_recall_lru) {
@@ -5147,6 +5149,7 @@ nfs4_state_shutdown_net(struct net *net)
nfsd4_client_tracking_exit(net);
nfs4_state_destroy_net(net);
+ nfs4_unlock_state();
}
void
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index d9454fe5653f..d9d7fa94967f 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -411,6 +411,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
label->data = kzalloc(dummy32 + 1, GFP_KERNEL);
if (!label->data)
return nfserr_jukebox;
+ label->len = dummy32;
defer_free(argp, kfree, label->data);
memcpy(label->data, buf, dummy32);
}
@@ -945,13 +946,16 @@ static __be32
nfsd4_decode_open_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_open_confirm *open_conf)
{
DECODE_HEAD;
-
+
+ if (argp->minorversion >= 1)
+ return nfserr_notsupp;
+
status = nfsd4_decode_stateid(argp, &open_conf->oc_req_stateid);
if (status)
return status;
READ_BUF(4);
READ32(open_conf->oc_seqid);
-
+
DECODE_TAIL;
}
@@ -991,6 +995,14 @@ nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, struct nfsd4_putfh *putfh)
}
static __be32
+nfsd4_decode_putpubfh(struct nfsd4_compoundargs *argp, void *p)
+{
+ if (argp->minorversion == 0)
+ return nfs_ok;
+ return nfserr_notsupp;
+}
+
+static __be32
nfsd4_decode_read(struct nfsd4_compoundargs *argp, struct nfsd4_read *read)
{
DECODE_HEAD;
@@ -1061,6 +1073,9 @@ nfsd4_decode_renew(struct nfsd4_compoundargs *argp, clientid_t *clientid)
{
DECODE_HEAD;
+ if (argp->minorversion >= 1)
+ return nfserr_notsupp;
+
READ_BUF(sizeof(clientid_t));
COPYMEM(clientid, sizeof(clientid_t));
@@ -1111,6 +1126,9 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
{
DECODE_HEAD;
+ if (argp->minorversion >= 1)
+ return nfserr_notsupp;
+
READ_BUF(NFS4_VERIFIER_SIZE);
COPYMEM(setclientid->se_verf.data, NFS4_VERIFIER_SIZE);
@@ -1137,6 +1155,9 @@ nfsd4_decode_setclientid_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_s
{
DECODE_HEAD;
+ if (argp->minorversion >= 1)
+ return nfserr_notsupp;
+
READ_BUF(8 + NFS4_VERIFIER_SIZE);
COPYMEM(&scd_c->sc_clientid, 8);
COPYMEM(&scd_c->sc_confirm, NFS4_VERIFIER_SIZE);
@@ -1220,6 +1241,9 @@ nfsd4_decode_release_lockowner(struct nfsd4_compoundargs *argp, struct nfsd4_rel
{
DECODE_HEAD;
+ if (argp->minorversion >= 1)
+ return nfserr_notsupp;
+
READ_BUF(12);
COPYMEM(&rlockowner->rl_clientid, sizeof(clientid_t));
READ32(rlockowner->rl_owner.len);
@@ -1519,7 +1543,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
[OP_OPEN_CONFIRM] = (nfsd4_dec)nfsd4_decode_open_confirm,
[OP_OPEN_DOWNGRADE] = (nfsd4_dec)nfsd4_decode_open_downgrade,
[OP_PUTFH] = (nfsd4_dec)nfsd4_decode_putfh,
- [OP_PUTPUBFH] = (nfsd4_dec)nfsd4_decode_noop,
+ [OP_PUTPUBFH] = (nfsd4_dec)nfsd4_decode_putpubfh,
[OP_PUTROOTFH] = (nfsd4_dec)nfsd4_decode_noop,
[OP_READ] = (nfsd4_dec)nfsd4_decode_read,
[OP_READDIR] = (nfsd4_dec)nfsd4_decode_readdir,
@@ -1536,46 +1560,6 @@ static nfsd4_dec nfsd4_dec_ops[] = {
[OP_VERIFY] = (nfsd4_dec)nfsd4_decode_verify,
[OP_WRITE] = (nfsd4_dec)nfsd4_decode_write,
[OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
-};
-
-static nfsd4_dec nfsd41_dec_ops[] = {
- [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
- [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
- [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
- [OP_CREATE] = (nfsd4_dec)nfsd4_decode_create,
- [OP_DELEGPURGE] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_DELEGRETURN] = (nfsd4_dec)nfsd4_decode_delegreturn,
- [OP_GETATTR] = (nfsd4_dec)nfsd4_decode_getattr,
- [OP_GETFH] = (nfsd4_dec)nfsd4_decode_noop,
- [OP_LINK] = (nfsd4_dec)nfsd4_decode_link,
- [OP_LOCK] = (nfsd4_dec)nfsd4_decode_lock,
- [OP_LOCKT] = (nfsd4_dec)nfsd4_decode_lockt,
- [OP_LOCKU] = (nfsd4_dec)nfsd4_decode_locku,
- [OP_LOOKUP] = (nfsd4_dec)nfsd4_decode_lookup,
- [OP_LOOKUPP] = (nfsd4_dec)nfsd4_decode_noop,
- [OP_NVERIFY] = (nfsd4_dec)nfsd4_decode_verify,
- [OP_OPEN] = (nfsd4_dec)nfsd4_decode_open,
- [OP_OPENATTR] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_OPEN_CONFIRM] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_OPEN_DOWNGRADE] = (nfsd4_dec)nfsd4_decode_open_downgrade,
- [OP_PUTFH] = (nfsd4_dec)nfsd4_decode_putfh,
- [OP_PUTPUBFH] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_PUTROOTFH] = (nfsd4_dec)nfsd4_decode_noop,
- [OP_READ] = (nfsd4_dec)nfsd4_decode_read,
- [OP_READDIR] = (nfsd4_dec)nfsd4_decode_readdir,
- [OP_READLINK] = (nfsd4_dec)nfsd4_decode_noop,
- [OP_REMOVE] = (nfsd4_dec)nfsd4_decode_remove,
- [OP_RENAME] = (nfsd4_dec)nfsd4_decode_rename,
- [OP_RENEW] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_RESTOREFH] = (nfsd4_dec)nfsd4_decode_noop,
- [OP_SAVEFH] = (nfsd4_dec)nfsd4_decode_noop,
- [OP_SECINFO] = (nfsd4_dec)nfsd4_decode_secinfo,
- [OP_SETATTR] = (nfsd4_dec)nfsd4_decode_setattr,
- [OP_SETCLIENTID] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_SETCLIENTID_CONFIRM]= (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_VERIFY] = (nfsd4_dec)nfsd4_decode_verify,
- [OP_WRITE] = (nfsd4_dec)nfsd4_decode_write,
- [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_notsupp,
/* new operations for NFSv4.1 */
[OP_BACKCHANNEL_CTL] = (nfsd4_dec)nfsd4_decode_backchannel_ctl,
@@ -1599,23 +1583,25 @@ static nfsd4_dec nfsd41_dec_ops[] = {
[OP_RECLAIM_COMPLETE] = (nfsd4_dec)nfsd4_decode_reclaim_complete,
};
-struct nfsd4_minorversion_ops {
- nfsd4_dec *decoders;
- int nops;
-};
-
-static struct nfsd4_minorversion_ops nfsd4_minorversion[] = {
- [0] = { nfsd4_dec_ops, ARRAY_SIZE(nfsd4_dec_ops) },
- [1] = { nfsd41_dec_ops, ARRAY_SIZE(nfsd41_dec_ops) },
- [2] = { nfsd41_dec_ops, ARRAY_SIZE(nfsd41_dec_ops) },
-};
+static inline bool
+nfsd4_opnum_in_range(struct nfsd4_compoundargs *argp, struct nfsd4_op *op)
+{
+ if (op->opnum < FIRST_NFS4_OP)
+ return false;
+ else if (argp->minorversion == 0 && op->opnum > LAST_NFS40_OP)
+ return false;
+ else if (argp->minorversion == 1 && op->opnum > LAST_NFS41_OP)
+ return false;
+ else if (argp->minorversion == 2 && op->opnum > LAST_NFS42_OP)
+ return false;
+ return true;
+}
static __be32
nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
{
DECODE_HEAD;
struct nfsd4_op *op;
- struct nfsd4_minorversion_ops *ops;
bool cachethis = false;
int i;
@@ -1640,10 +1626,9 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
}
}
- if (argp->minorversion >= ARRAY_SIZE(nfsd4_minorversion))
+ if (argp->minorversion > NFSD_SUPPORTED_MINOR_VERSION)
argp->opcnt = 0;
- ops = &nfsd4_minorversion[argp->minorversion];
for (i = 0; i < argp->opcnt; i++) {
op = &argp->ops[i];
op->replay = NULL;
@@ -1651,8 +1636,8 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
READ_BUF(4);
READ32(op->opnum);
- if (op->opnum >= FIRST_NFS4_OP && op->opnum <= LAST_NFS4_OP)
- op->status = ops->decoders[op->opnum](argp, &op->u);
+ if (nfsd4_opnum_in_range(argp, op))
+ op->status = nfsd4_dec_ops[op->opnum](argp, &op->u);
else {
op->opnum = OP_ILLEGAL;
op->status = nfserr_op_illegal;
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 814afaa4458a..3c37b160dcad 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -47,7 +47,7 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry)
tdentry = parent;
}
if (tdentry != exp->ex_path.dentry)
- dprintk("nfsd_acceptable failed at %p %s\n", tdentry, tdentry->d_name.name);
+ dprintk("nfsd_acceptable failed at %p %pd\n", tdentry, tdentry);
rv = (tdentry == exp->ex_path.dentry);
dput(tdentry);
return rv;
@@ -253,8 +253,8 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
if (S_ISDIR(dentry->d_inode->i_mode) &&
(dentry->d_flags & DCACHE_DISCONNECTED)) {
- printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %s/%s\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
+ printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %pd2\n",
+ dentry);
}
fhp->fh_dentry = dentry;
@@ -361,10 +361,9 @@ skip_pseudoflavor_check:
error = nfsd_permission(rqstp, exp, dentry, access);
if (error) {
- dprintk("fh_verify: %s/%s permission failure, "
+ dprintk("fh_verify: %pd2 permission failure, "
"acc=%x, error=%d\n",
- dentry->d_parent->d_name.name,
- dentry->d_name.name,
+ dentry,
access, ntohl(error));
}
out:
@@ -514,14 +513,13 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
*/
struct inode * inode = dentry->d_inode;
- struct dentry *parent = dentry->d_parent;
__u32 *datap;
dev_t ex_dev = exp_sb(exp)->s_dev;
- dprintk("nfsd: fh_compose(exp %02x:%02x/%ld %s/%s, ino=%ld)\n",
+ dprintk("nfsd: fh_compose(exp %02x:%02x/%ld %pd2, ino=%ld)\n",
MAJOR(ex_dev), MINOR(ex_dev),
(long) exp->ex_path.dentry->d_inode->i_ino,
- parent->d_name.name, dentry->d_name.name,
+ dentry,
(inode ? inode->i_ino : 0));
/* Choose filehandle version and fsid type based on
@@ -534,13 +532,13 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
fh_put(ref_fh);
if (fhp->fh_locked || fhp->fh_dentry) {
- printk(KERN_ERR "fh_compose: fh %s/%s not initialized!\n",
- parent->d_name.name, dentry->d_name.name);
+ printk(KERN_ERR "fh_compose: fh %pd2 not initialized!\n",
+ dentry);
}
if (fhp->fh_maxsize < NFS_FHSIZE)
- printk(KERN_ERR "fh_compose: called with maxsize %d! %s/%s\n",
+ printk(KERN_ERR "fh_compose: called with maxsize %d! %pd2\n",
fhp->fh_maxsize,
- parent->d_name.name, dentry->d_name.name);
+ dentry);
fhp->fh_dentry = dget(dentry); /* our internal copy */
fhp->fh_export = exp;
@@ -600,22 +598,20 @@ fh_update(struct svc_fh *fhp)
_fh_update_old(dentry, fhp->fh_export, &fhp->fh_handle);
} else {
if (fhp->fh_handle.fh_fileid_type != FILEID_ROOT)
- goto out;
+ return 0;
_fh_update(fhp, fhp->fh_export, dentry);
if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID)
return nfserr_opnotsupp;
}
-out:
return 0;
-
out_bad:
printk(KERN_ERR "fh_update: fh not verified!\n");
- goto out;
+ return nfserr_serverfault;
out_negative:
- printk(KERN_ERR "fh_update: %s/%s still negative!\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
- goto out;
+ printk(KERN_ERR "fh_update: %pd2 still negative!\n",
+ dentry);
+ return nfserr_serverfault;
}
/*
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index e5e6707ba687..4775bc4896c8 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -173,8 +173,8 @@ fh_lock_nested(struct svc_fh *fhp, unsigned int subclass)
BUG_ON(!dentry);
if (fhp->fh_locked) {
- printk(KERN_WARNING "fh_lock: %s/%s already locked!\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
+ printk(KERN_WARNING "fh_lock: %pd2 already locked!\n",
+ dentry);
return;
}
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index c827acb0e943..13886f7f40d5 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1317,9 +1317,8 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (!fhp->fh_locked) {
/* not actually possible */
printk(KERN_ERR
- "nfsd_create: parent %s/%s not locked!\n",
- dentry->d_parent->d_name.name,
- dentry->d_name.name);
+ "nfsd_create: parent %pd2 not locked!\n",
+ dentry);
err = nfserr_io;
goto out;
}
@@ -1329,8 +1328,8 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
*/
err = nfserr_exist;
if (dchild->d_inode) {
- dprintk("nfsd_create: dentry %s/%s not negative!\n",
- dentry->d_name.name, dchild->d_name.name);
+ dprintk("nfsd_create: dentry %pd/%pd not negative!\n",
+ dentry, dchild);
goto out;
}
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 08fdb77852ac..7aeb8ee01305 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -153,8 +153,8 @@ const struct file_operations nilfs_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
- .aio_read = generic_file_aio_read,
- .aio_write = generic_file_aio_write,
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
.unlocked_ioctl = nilfs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = nilfs_compat_ioctl,
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 7e350c562e0e..4a99a24b54a2 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -298,8 +298,8 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping,
}
static ssize_t
-nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
- loff_t offset, unsigned long nr_segs)
+nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
+ loff_t offset)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
@@ -310,7 +310,7 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
return 0;
/* Needs synchronization with the cleaner */
- size = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
+ size = blockdev_direct_IO(rw, iocb, inode, iter, offset,
nilfs_get_block);
/*
@@ -319,7 +319,7 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
*/
if (unlikely((rw & WRITE) && size < 0)) {
loff_t isize = i_size_read(inode);
- loff_t end = offset + iov_length(iov, nr_segs);
+ loff_t end = offset + iov_iter_count(iter);
if (end > isize)
nilfs_write_failed(mapping, end);
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 2d8be51f90dc..dc3a9efdaab8 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -416,7 +416,8 @@ static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
}
if (likely(bio)) {
bio->bi_bdev = nilfs->ns_bdev;
- bio->bi_sector = start << (nilfs->ns_blocksize_bits - 9);
+ bio->bi_iter.bi_sector =
+ start << (nilfs->ns_blocksize_bits - 9);
}
return bio;
}
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index f37d3c0e2053..2921dcf300d3 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -616,9 +616,8 @@ static int ocfs2_releasepage(struct page *page, gfp_t wait)
static ssize_t ocfs2_direct_IO(int rw,
struct kiocb *iocb,
- const struct iovec *iov,
- loff_t offset,
- unsigned long nr_segs)
+ struct iov_iter *iter,
+ loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file)->i_mapping->host;
@@ -635,8 +634,7 @@ static ssize_t ocfs2_direct_IO(int rw,
return 0;
return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
- iov, offset, nr_segs,
- ocfs2_direct_IO_get_blocks,
+ iter, offset, ocfs2_direct_IO_get_blocks,
ocfs2_dio_end_io, NULL, 0);
}
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index f671e49beb34..573f41d1e459 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -74,7 +74,7 @@ static inline void ocfs2_iocb_set_rw_locked(struct kiocb *iocb, int level)
/*
* Using a named enum representing lock types in terms of #N bit stored in
* iocb->private, which is going to be used for communication between
- * ocfs2_dio_end_io() and ocfs2_file_aio_write/read().
+ * ocfs2_dio_end_io() and ocfs2_file_write/read_iter().
*/
enum ocfs2_iocb_lock_bits {
OCFS2_IOCB_RW_LOCK = 0,
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 363f0dcc924f..a90ad76ae67c 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -421,7 +421,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
}
/* Must put everything in 512 byte sectors for the bio... */
- bio->bi_sector = (reg->hr_start_block + cs) << (bits - 9);
+ bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9);
bio->bi_bdev = reg->hr_bdev;
bio->bi_private = wc;
bio->bi_end_io = o2hb_bio_end_io;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index d71903c6068b..1d85492684ac 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2217,15 +2217,13 @@ out:
return ret;
}
-static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
- const struct iovec *iov,
- unsigned long nr_segs,
- loff_t pos)
+static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
+ struct iov_iter *iter,
+ loff_t pos)
{
int ret, direct_io, appending, rw_level, have_alloc_sem = 0;
int can_do_direct, has_refcount = 0;
ssize_t written = 0;
- size_t ocount; /* original count */
size_t count; /* after file limit checks */
loff_t old_size, *ppos = &iocb->ki_pos;
u32 old_clusters;
@@ -2236,11 +2234,11 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
OCFS2_MOUNT_COHERENCY_BUFFERED);
int unaligned_dio = 0;
- trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
+ trace_ocfs2_file_write_iter(inode, file, file->f_path.dentry,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
file->f_path.dentry->d_name.len,
file->f_path.dentry->d_name.name,
- (unsigned int)nr_segs);
+ (unsigned long)pos);
if (iocb->ki_nbytes == 0)
return 0;
@@ -2340,28 +2338,24 @@ relock:
/* communicate with ocfs2_dio_end_io */
ocfs2_iocb_set_rw_locked(iocb, rw_level);
- ret = generic_segment_checks(iov, &nr_segs, &ocount,
- VERIFY_READ);
- if (ret)
- goto out_dio;
- count = ocount;
+ count = iov_iter_count(iter);
ret = generic_write_checks(file, ppos, &count,
S_ISBLK(inode->i_mode));
if (ret)
goto out_dio;
if (direct_io) {
- written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
- ppos, count, ocount);
+ written = generic_file_direct_write_iter(iocb, iter, *ppos,
+ ppos, count);
if (written < 0) {
ret = written;
goto out_dio;
}
} else {
current->backing_dev_info = file->f_mapping->backing_dev_info;
- written = generic_file_buffered_write(iocb, iov, nr_segs, *ppos,
- ppos, count, 0);
+ written = generic_file_buffered_write_iter(iocb, iter, *ppos,
+ ppos, count, 0);
current->backing_dev_info = NULL;
}
@@ -2517,7 +2511,7 @@ static ssize_t ocfs2_file_splice_read(struct file *in,
in->f_path.dentry->d_name.name, len);
/*
- * See the comment in ocfs2_file_aio_read()
+ * See the comment in ocfs2_file_read_iter()
*/
ret = ocfs2_inode_lock_atime(inode, in->f_path.mnt, &lock_level);
if (ret < 0) {
@@ -2532,19 +2526,18 @@ bail:
return ret;
}
-static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
- const struct iovec *iov,
- unsigned long nr_segs,
- loff_t pos)
+static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
+ struct iov_iter *iter,
+ loff_t pos)
{
int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
struct file *filp = iocb->ki_filp;
struct inode *inode = file_inode(filp);
- trace_ocfs2_file_aio_read(inode, filp, filp->f_path.dentry,
+ trace_ocfs2_file_read_iter(inode, filp, filp->f_path.dentry,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
filp->f_path.dentry->d_name.len,
- filp->f_path.dentry->d_name.name, nr_segs);
+ filp->f_path.dentry->d_name.name, pos);
if (!inode) {
@@ -2580,7 +2573,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
*
* Take and drop the meta data lock to update inode fields
* like i_size. This allows the checks down below
- * generic_file_aio_read() a chance of actually working.
+ * generic_file_read_iter() a chance of actually working.
*/
ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level);
if (ret < 0) {
@@ -2589,13 +2582,13 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
}
ocfs2_inode_unlock(inode, lock_level);
- ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
- trace_generic_file_aio_read_ret(ret);
+ ret = generic_file_read_iter(iocb, iter, iocb->ki_pos);
+ trace_generic_file_read_iter_ret(ret);
/* buffered aio wouldn't have proper lock coverage today */
BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
- /* see ocfs2_file_aio_write */
+ /* see ocfs2_file_write_iter */
if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
rw_level = -1;
have_alloc_sem = 0;
@@ -2683,8 +2676,8 @@ const struct file_operations ocfs2_fops = {
.fsync = ocfs2_sync_file,
.release = ocfs2_file_release,
.open = ocfs2_file_open,
- .aio_read = ocfs2_file_aio_read,
- .aio_write = ocfs2_file_aio_write,
+ .read_iter = ocfs2_file_read_iter,
+ .write_iter = ocfs2_file_write_iter,
.unlocked_ioctl = ocfs2_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ocfs2_compat_ioctl,
@@ -2731,8 +2724,8 @@ const struct file_operations ocfs2_fops_no_plocks = {
.fsync = ocfs2_sync_file,
.release = ocfs2_file_release,
.open = ocfs2_file_open,
- .aio_read = ocfs2_file_aio_read,
- .aio_write = ocfs2_file_aio_write,
+ .read_iter = ocfs2_file_read_iter,
+ .write_iter = ocfs2_file_write_iter,
.unlocked_ioctl = ocfs2_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ocfs2_compat_ioctl,
diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h
index 1b60c62aa9d6..67f08ba77260 100644
--- a/fs/ocfs2/ocfs2_trace.h
+++ b/fs/ocfs2/ocfs2_trace.h
@@ -1310,13 +1310,13 @@ DEFINE_OCFS2_FILE_OPS(ocfs2_file_release);
DEFINE_OCFS2_FILE_OPS(ocfs2_sync_file);
-DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_write);
+DEFINE_OCFS2_FILE_OPS(ocfs2_file_write_iter);
DEFINE_OCFS2_FILE_OPS(ocfs2_file_splice_write);
DEFINE_OCFS2_FILE_OPS(ocfs2_file_splice_read);
-DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_read);
+DEFINE_OCFS2_FILE_OPS(ocfs2_file_read_iter);
DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_truncate_file);
@@ -1474,7 +1474,7 @@ TRACE_EVENT(ocfs2_prepare_inode_for_write,
__entry->direct_io, __entry->has_refcount)
);
-DEFINE_OCFS2_INT_EVENT(generic_file_aio_read_ret);
+DEFINE_OCFS2_INT_EVENT(generic_file_read_iter_ret);
/* End of trace events for fs/ocfs2/file.c. */
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index 54d57d6ba68d..0fe505b2cded 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -339,8 +339,8 @@ const struct file_operations omfs_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
- .aio_read = generic_file_aio_read,
- .aio_write = generic_file_aio_write,
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.fsync = generic_file_fsync,
.splice_read = generic_file_splice_read,
diff --git a/fs/proc/array.c b/fs/proc/array.c
index cbd0f1b324b9..1bd2077187fd 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -183,6 +183,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
seq_printf(m,
"State:\t%s\n"
"Tgid:\t%d\n"
+ "Ngid:\t%d\n"
"Pid:\t%d\n"
"PPid:\t%d\n"
"TracerPid:\t%d\n"
@@ -190,6 +191,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
"Gid:\t%d\t%d\t%d\t%d\n",
get_task_state(p),
task_tgid_nr_ns(p, ns),
+ task_numa_group_id(p),
pid_nr_ns(pid, ns),
ppid, tpid,
from_kuid_munged(user_ns, cred->uid),
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index 106a83570630..70779b2fc209 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -14,16 +14,13 @@
#include <linux/of.h>
#include <linux/export.h>
#include <linux/slab.h>
-#include <asm/prom.h>
#include <asm/uaccess.h>
#include "internal.h"
static inline void set_node_proc_entry(struct device_node *np,
struct proc_dir_entry *de)
{
-#ifdef HAVE_ARCH_DEVTREE_FIXUPS
np->pde = de;
-#endif
}
static struct proc_dir_entry *proc_device_tree;
diff --git a/fs/proc/self.c b/fs/proc/self.c
index 6b6a993b5c25..ffeb202ec942 100644
--- a/fs/proc/self.c
+++ b/fs/proc/self.c
@@ -36,18 +36,10 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
return NULL;
}
-static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
- void *cookie)
-{
- char *s = nd_get_link(nd);
- if (!IS_ERR(s))
- kfree(s);
-}
-
static const struct inode_operations proc_self_inode_operations = {
.readlink = proc_self_readlink,
.follow_link = proc_self_follow_link,
- .put_link = proc_self_put_link,
+ .put_link = kfree_put_link,
};
static unsigned self_inum;
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index dea86e8967ee..2b363e23f36e 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -117,6 +117,7 @@ static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src)
{
+ memset(dst, 0, sizeof(*dst));
dst->dqb_bhardlimit = src->d_blk_hardlimit;
dst->dqb_bsoftlimit = src->d_blk_softlimit;
dst->dqb_curspace = src->d_bcount;
diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c
index 4884ac5ae9be..c4d8572a37df 100644
--- a/fs/ramfs/file-mmu.c
+++ b/fs/ramfs/file-mmu.c
@@ -39,9 +39,9 @@ const struct address_space_operations ramfs_aops = {
const struct file_operations ramfs_file_operations = {
.read = do_sync_read,
- .aio_read = generic_file_aio_read,
+ .read_iter = generic_file_read_iter,
.write = do_sync_write,
- .aio_write = generic_file_aio_write,
+ .write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.fsync = noop_fsync,
.splice_read = generic_file_splice_read,
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 8d5b438cc188..f2487c3cc3f3 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -39,9 +39,9 @@ const struct file_operations ramfs_file_operations = {
.mmap = ramfs_nommu_mmap,
.get_unmapped_area = ramfs_nommu_get_unmapped_area,
.read = do_sync_read,
- .aio_read = generic_file_aio_read,
+ .read_iter = generic_file_read_iter,
.write = do_sync_write,
- .aio_write = generic_file_aio_write,
+ .write_iter = generic_file_write_iter,
.fsync = noop_fsync,
.splice_read = generic_file_splice_read,
.splice_write = generic_file_splice_write,
diff --git a/fs/read_write.c b/fs/read_write.c
index e3cd280b158c..296b5711a78b 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -29,7 +29,7 @@ typedef ssize_t (*iov_fn_t)(struct kiocb *, const struct iovec *,
const struct file_operations generic_ro_fops = {
.llseek = generic_file_llseek,
.read = do_sync_read,
- .aio_read = generic_file_aio_read,
+ .read_iter = generic_file_read_iter,
.mmap = generic_file_readonly_mmap,
.splice_read = generic_file_splice_read,
};
@@ -359,6 +359,29 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t
return count > MAX_RW_COUNT ? MAX_RW_COUNT : count;
}
+ssize_t do_aio_read(struct kiocb *kiocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct file *file = kiocb->ki_filp;
+
+ if (file->f_op->read_iter) {
+ size_t count;
+ struct iov_iter iter;
+ int ret;
+
+ count = 0;
+ ret = generic_segment_checks(iov, &nr_segs, &count,
+ VERIFY_WRITE);
+ if (ret)
+ return ret;
+
+ iov_iter_init(&iter, iov, nr_segs, count, 0);
+ return file->f_op->read_iter(kiocb, &iter, pos);
+ }
+
+ return file->f_op->aio_read(kiocb, iov, nr_segs, pos);
+}
+
ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
{
struct iovec iov = { .iov_base = buf, .iov_len = len };
@@ -369,7 +392,7 @@ ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *pp
kiocb.ki_pos = *ppos;
kiocb.ki_nbytes = len;
- ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
+ ret = do_aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
if (-EIOCBQUEUED == ret)
ret = wait_on_sync_kiocb(&kiocb);
*ppos = kiocb.ki_pos;
@@ -384,7 +407,7 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
if (!(file->f_mode & FMODE_READ))
return -EBADF;
- if (!file->f_op || (!file->f_op->read && !file->f_op->aio_read))
+ if (!file_readable(file))
return -EINVAL;
if (unlikely(!access_ok(VERIFY_WRITE, buf, count)))
return -EFAULT;
@@ -408,6 +431,29 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
EXPORT_SYMBOL(vfs_read);
+ssize_t do_aio_write(struct kiocb *kiocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct file *file = kiocb->ki_filp;
+
+ if (file->f_op->write_iter) {
+ size_t count;
+ struct iov_iter iter;
+ int ret;
+
+ count = 0;
+ ret = generic_segment_checks(iov, &nr_segs, &count,
+ VERIFY_READ);
+ if (ret)
+ return ret;
+
+ iov_iter_init(&iter, iov, nr_segs, count, 0);
+ return file->f_op->write_iter(kiocb, &iter, pos);
+ }
+
+ return file->f_op->aio_write(kiocb, iov, nr_segs, pos);
+}
+
ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
{
struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
@@ -418,7 +464,7 @@ ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, lof
kiocb.ki_pos = *ppos;
kiocb.ki_nbytes = len;
- ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
+ ret = do_aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
if (-EIOCBQUEUED == ret)
ret = wait_on_sync_kiocb(&kiocb);
*ppos = kiocb.ki_pos;
@@ -433,7 +479,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
const char __user *p;
ssize_t ret;
- if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
+ if (!file_writable(file))
return -EINVAL;
old_fs = get_fs();
@@ -460,7 +506,7 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
if (!(file->f_mode & FMODE_WRITE))
return -EBADF;
- if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
+ if (!file_writable(file))
return -EINVAL;
if (unlikely(!access_ok(VERIFY_READ, buf, count)))
return -EFAULT;
@@ -745,10 +791,12 @@ static ssize_t do_readv_writev(int type, struct file *file,
fnv = NULL;
if (type == READ) {
fn = file->f_op->read;
- fnv = file->f_op->aio_read;
+ if (file->f_op->aio_read || file->f_op->read_iter)
+ fnv = do_aio_read;
} else {
fn = (io_fn_t)file->f_op->write;
- fnv = file->f_op->aio_write;
+ if (file->f_op->aio_write || file->f_op->write_iter)
+ fnv = do_aio_write;
file_start_write(file);
}
@@ -778,7 +826,7 @@ ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
{
if (!(file->f_mode & FMODE_READ))
return -EBADF;
- if (!file->f_op || (!file->f_op->aio_read && !file->f_op->read))
+ if (!file_readable(file))
return -EINVAL;
return do_readv_writev(READ, file, vec, vlen, pos);
@@ -791,7 +839,7 @@ ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
{
if (!(file->f_mode & FMODE_WRITE))
return -EBADF;
- if (!file->f_op || (!file->f_op->aio_write && !file->f_op->write))
+ if (!file_writable(file))
return -EINVAL;
return do_readv_writev(WRITE, file, vec, vlen, pos);
@@ -927,10 +975,12 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
fnv = NULL;
if (type == READ) {
fn = file->f_op->read;
- fnv = file->f_op->aio_read;
+ if (file->f_op->aio_read || file->f_op->read_iter)
+ fnv = do_aio_read;
} else {
fn = (io_fn_t)file->f_op->write;
- fnv = file->f_op->aio_write;
+ if (file->f_op->aio_write || file->f_op->write_iter)
+ fnv = do_aio_write;
file_start_write(file);
}
@@ -965,7 +1015,7 @@ static size_t compat_readv(struct file *file,
goto out;
ret = -EINVAL;
- if (!file->f_op || (!file->f_op->aio_read && !file->f_op->read))
+ if (!file_readable(file))
goto out;
ret = compat_do_readv_writev(READ, file, vec, vlen, pos);
@@ -1032,7 +1082,7 @@ static size_t compat_writev(struct file *file,
goto out;
ret = -EINVAL;
- if (!file->f_op || (!file->f_op->aio_write && !file->f_op->write))
+ if (!file_writable(file))
goto out;
ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos);
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index dcaafcfc23b0..f98feb229ec4 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -245,8 +245,8 @@ const struct file_operations reiserfs_file_operations = {
.open = reiserfs_file_open,
.release = reiserfs_file_release,
.fsync = reiserfs_sync_file,
- .aio_read = generic_file_aio_read,
- .aio_write = generic_file_aio_write,
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
.splice_read = generic_file_splice_read,
.splice_write = generic_file_splice_write,
.llseek = generic_file_llseek,
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index ad62bdbb451e..6d652af02c5b 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -3083,14 +3083,13 @@ static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
/* We thank Mingming Cao for helping us understand in great detail what
to do in this section of the code. */
static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
- const struct iovec *iov, loff_t offset,
- unsigned long nr_segs)
+ struct iov_iter *iter, loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
ssize_t ret;
- ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
+ ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
reiserfs_get_blocks_direct_io);
/*
@@ -3099,7 +3098,7 @@ static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
*/
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);
- loff_t end = offset + iov_length(iov, nr_segs);
+ loff_t end = offset + iov_iter_count(iter);
if ((end > isize) && inode_newsize_ok(inode, isize) == 0) {
truncate_setsize(inode, isize);
diff --git a/fs/romfs/mmap-nommu.c b/fs/romfs/mmap-nommu.c
index f373bde8f545..f8a9e2bf8d8b 100644
--- a/fs/romfs/mmap-nommu.c
+++ b/fs/romfs/mmap-nommu.c
@@ -73,7 +73,7 @@ static int romfs_mmap(struct file *file, struct vm_area_struct *vma)
const struct file_operations romfs_ro_fops = {
.llseek = generic_file_llseek,
.read = do_sync_read,
- .aio_read = generic_file_aio_read,
+ .read_iter = generic_file_read_iter,
.splice_read = generic_file_splice_read,
.mmap = romfs_mmap,
.get_unmapped_area = romfs_get_unmapped_area,
diff --git a/fs/select.c b/fs/select.c
index 35d4adc749d9..dfd5cb18c012 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -238,8 +238,7 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
set_current_state(state);
if (!pwq->triggered)
- rc = freezable_schedule_hrtimeout_range(expires, slack,
- HRTIMER_MODE_ABS);
+ rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
__set_current_state(TASK_RUNNING);
/*
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 3135c2525c76..a290157265ef 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -328,6 +328,8 @@ loff_t seq_lseek(struct file *file, loff_t offset, int whence)
m->read_pos = offset;
retval = file->f_pos = offset;
}
+ } else {
+ file->f_pos = offset;
}
}
file->f_version = m->version;
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
index c70111ebefd4..1c6d340fc61f 100644
--- a/fs/squashfs/Kconfig
+++ b/fs/squashfs/Kconfig
@@ -63,6 +63,19 @@ config SQUASHFS_LZO
If unsure, say N.
+config SQUASHFS_MULTI_DECOMPRESSOR
+ bool "Use multiple decompressors for handling parallel I/O"
+ depends on SQUASHFS
+ help
+ By default Squashfs uses a single decompressor but it gives
+ poor performance on parallel I/O workloads when using multiple CPU
+ machines due to waiting on decompressor availability.
+
+ If you have a parallel I/O workload and your system has enough memory,
+ using this option may improve overall I/O performance.
+
+ If unsure, say N.
+
config SQUASHFS_XZ
bool "Include support for XZ compressed file systems"
depends on SQUASHFS
diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile
index 110b0476f3b4..dfebc3b12d61 100644
--- a/fs/squashfs/Makefile
+++ b/fs/squashfs/Makefile
@@ -5,7 +5,14 @@
obj-$(CONFIG_SQUASHFS) += squashfs.o
squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o
squashfs-y += namei.o super.o symlink.o decompressor.o
+
squashfs-$(CONFIG_SQUASHFS_XATTR) += xattr.o xattr_id.o
squashfs-$(CONFIG_SQUASHFS_LZO) += lzo_wrapper.o
squashfs-$(CONFIG_SQUASHFS_XZ) += xz_wrapper.o
squashfs-$(CONFIG_SQUASHFS_ZLIB) += zlib_wrapper.o
+
+ifdef CONFIG_SQUASHFS_MULTI_DECOMPRESSOR
+ squashfs-y += decompressor_multi.o
+else
+ squashfs-y += decompressor_single.o
+endif
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 41d108ecc9be..4dd402597f22 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -93,7 +93,7 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
struct buffer_head **bh;
int offset = index & ((1 << msblk->devblksize_log2) - 1);
u64 cur_index = index >> msblk->devblksize_log2;
- int bytes, compressed, b = 0, k = 0, page = 0, avail;
+ int bytes, compressed, b = 0, k = 0, page = 0, avail, i;
bh = kcalloc(((srclength + msblk->devblksize - 1)
>> msblk->devblksize_log2) + 1, sizeof(*bh), GFP_KERNEL);
@@ -158,6 +158,12 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
ll_rw_block(READ, b - 1, bh + 1);
}
+ for (i = 0; i < b; i++) {
+ wait_on_buffer(bh[i]);
+ if (!buffer_uptodate(bh[i]))
+ goto block_release;
+ }
+
if (compressed) {
length = squashfs_decompress(msblk, buffer, bh, b, offset,
length, srclength, pages);
@@ -172,9 +178,6 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
for (bytes = length; k < b; k++) {
in = min(bytes, msblk->devblksize - offset);
bytes -= in;
- wait_on_buffer(bh[k]);
- if (!buffer_uptodate(bh[k]))
- goto block_release;
while (in) {
if (pg_offset == PAGE_CACHE_SIZE) {
page++;
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
index 3f6271d86abc..234291f79ba5 100644
--- a/fs/squashfs/decompressor.c
+++ b/fs/squashfs/decompressor.c
@@ -37,29 +37,29 @@
*/
static const struct squashfs_decompressor squashfs_lzma_unsupported_comp_ops = {
- NULL, NULL, NULL, LZMA_COMPRESSION, "lzma", 0
+ NULL, NULL, NULL, NULL, LZMA_COMPRESSION, "lzma", 0
};
#ifndef CONFIG_SQUASHFS_LZO
static const struct squashfs_decompressor squashfs_lzo_comp_ops = {
- NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0
+ NULL, NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0
};
#endif
#ifndef CONFIG_SQUASHFS_XZ
static const struct squashfs_decompressor squashfs_xz_comp_ops = {
- NULL, NULL, NULL, XZ_COMPRESSION, "xz", 0
+ NULL, NULL, NULL, NULL, XZ_COMPRESSION, "xz", 0
};
#endif
#ifndef CONFIG_SQUASHFS_ZLIB
static const struct squashfs_decompressor squashfs_zlib_comp_ops = {
- NULL, NULL, NULL, ZLIB_COMPRESSION, "zlib", 0
+ NULL, NULL, NULL, NULL, ZLIB_COMPRESSION, "zlib", 0
};
#endif
static const struct squashfs_decompressor squashfs_unknown_comp_ops = {
- NULL, NULL, NULL, 0, "unknown", 0
+ NULL, NULL, NULL, NULL, 0, "unknown", 0
};
static const struct squashfs_decompressor *decompressor[] = {
@@ -83,10 +83,10 @@ const struct squashfs_decompressor *squashfs_lookup_decompressor(int id)
}
-void *squashfs_decompressor_init(struct super_block *sb, unsigned short flags)
+static void *get_comp_opts(struct super_block *sb, unsigned short flags)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
- void *strm, *buffer = NULL;
+ void *buffer = NULL, *comp_opts;
int length = 0;
/*
@@ -94,23 +94,40 @@ void *squashfs_decompressor_init(struct super_block *sb, unsigned short flags)
*/
if (SQUASHFS_COMP_OPTS(flags)) {
buffer = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
- if (buffer == NULL)
- return ERR_PTR(-ENOMEM);
+ if (buffer == NULL) {
+ comp_opts = ERR_PTR(-ENOMEM);
+ goto out;
+ }
length = squashfs_read_data(sb, &buffer,
sizeof(struct squashfs_super_block), 0, NULL,
- PAGE_CACHE_SIZE, 1);
+ PAGE_CACHE_SIZE, 1);
if (length < 0) {
- strm = ERR_PTR(length);
- goto finished;
+ comp_opts = ERR_PTR(length);
+ goto out;
}
}
- strm = msblk->decompressor->init(msblk, buffer, length);
+ comp_opts = squashfs_comp_opts(msblk, buffer, length);
-finished:
+out:
kfree(buffer);
+ return comp_opts;
+}
+
+
+void *squashfs_decompressor_setup(struct super_block *sb, unsigned short flags)
+{
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ void *stream, *comp_opts = get_comp_opts(sb, flags);
+
+ if (IS_ERR(comp_opts))
+ return comp_opts;
+
+ stream = squashfs_decompressor_create(msblk, comp_opts);
+ if (IS_ERR(stream))
+ kfree(comp_opts);
- return strm;
+ return stream;
}
diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h
index 330073e29029..6cdb20a3878a 100644
--- a/fs/squashfs/decompressor.h
+++ b/fs/squashfs/decompressor.h
@@ -24,28 +24,21 @@
*/
struct squashfs_decompressor {
- void *(*init)(struct squashfs_sb_info *, void *, int);
+ void *(*init)(struct squashfs_sb_info *, void *);
+ void *(*comp_opts)(struct squashfs_sb_info *, void *, int);
void (*free)(void *);
- int (*decompress)(struct squashfs_sb_info *, void **,
+ int (*decompress)(struct squashfs_sb_info *, void *, void **,
struct buffer_head **, int, int, int, int, int);
int id;
char *name;
int supported;
};
-static inline void squashfs_decompressor_free(struct squashfs_sb_info *msblk,
- void *s)
+static inline void *squashfs_comp_opts(struct squashfs_sb_info *msblk,
+ void *buff, int length)
{
- if (msblk->decompressor)
- msblk->decompressor->free(s);
-}
-
-static inline int squashfs_decompress(struct squashfs_sb_info *msblk,
- void **buffer, struct buffer_head **bh, int b, int offset, int length,
- int srclength, int pages)
-{
- return msblk->decompressor->decompress(msblk, buffer, bh, b, offset,
- length, srclength, pages);
+ return msblk->decompressor->comp_opts ?
+ msblk->decompressor->comp_opts(msblk, buff, length) : NULL;
}
#ifdef CONFIG_SQUASHFS_XZ
diff --git a/fs/squashfs/decompressor_multi.c b/fs/squashfs/decompressor_multi.c
new file mode 100644
index 000000000000..462731db5130
--- /dev/null
+++ b/fs/squashfs/decompressor_multi.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2013
+ * Minchan Kim <minchan@kernel.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/cpumask.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "decompressor.h"
+#include "squashfs.h"
+
+/*
+ * This file implements multi-threaded decompression in the
+ * decompressor framework
+ */
+
+
+/*
+ * The reason that multiply two is that a CPU can request new I/O
+ * while it is waiting previous request.
+ */
+#define MAX_DECOMPRESSOR (num_online_cpus() * 2)
+
+
+int squashfs_max_decompressors(void)
+{
+ return MAX_DECOMPRESSOR;
+}
+
+
+struct squashfs_stream {
+ void *comp_opts;
+ struct list_head strm_list;
+ struct mutex mutex;
+ int avail_decomp;
+ wait_queue_head_t wait;
+};
+
+
+struct decomp_stream {
+ void *stream;
+ struct list_head list;
+};
+
+
+static void put_decomp_stream(struct decomp_stream *decomp_strm,
+ struct squashfs_stream *stream)
+{
+ mutex_lock(&stream->mutex);
+ list_add(&decomp_strm->list, &stream->strm_list);
+ mutex_unlock(&stream->mutex);
+ wake_up(&stream->wait);
+}
+
+void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
+ void *comp_opts)
+{
+ struct squashfs_stream *stream;
+ struct decomp_stream *decomp_strm = NULL;
+ int err = -ENOMEM;
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ goto out;
+
+ stream->comp_opts = comp_opts;
+ mutex_init(&stream->mutex);
+ INIT_LIST_HEAD(&stream->strm_list);
+ init_waitqueue_head(&stream->wait);
+
+ /*
+ * We should have a decompressor at least as default
+ * so if we fail to allocate new decompressor dynamically,
+ * we could always fall back to default decompressor and
+ * file system works.
+ */
+ decomp_strm = kmalloc(sizeof(*decomp_strm), GFP_KERNEL);
+ if (!decomp_strm)
+ goto out;
+
+ decomp_strm->stream = msblk->decompressor->init(msblk,
+ stream->comp_opts);
+ if (IS_ERR(decomp_strm->stream)) {
+ err = PTR_ERR(decomp_strm->stream);
+ goto out;
+ }
+
+ list_add(&decomp_strm->list, &stream->strm_list);
+ stream->avail_decomp = 1;
+ return stream;
+
+out:
+ kfree(decomp_strm);
+ kfree(stream);
+ return ERR_PTR(err);
+}
+
+
+void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk)
+{
+ struct squashfs_stream *stream = msblk->stream;
+ if (stream) {
+ struct decomp_stream *decomp_strm;
+
+ while (!list_empty(&stream->strm_list)) {
+ decomp_strm = list_entry(stream->strm_list.prev,
+ struct decomp_stream, list);
+ list_del(&decomp_strm->list);
+ msblk->decompressor->free(decomp_strm->stream);
+ kfree(decomp_strm);
+ stream->avail_decomp--;
+ }
+ }
+
+ WARN_ON(stream->avail_decomp);
+ kfree(stream->comp_opts);
+ kfree(stream);
+}
+
+
+static struct decomp_stream *get_decomp_stream(struct squashfs_sb_info *msblk,
+ struct squashfs_stream *stream)
+{
+ struct decomp_stream *decomp_strm;
+
+ while (1) {
+ mutex_lock(&stream->mutex);
+
+ /* There is available decomp_stream */
+ if (!list_empty(&stream->strm_list)) {
+ decomp_strm = list_entry(stream->strm_list.prev,
+ struct decomp_stream, list);
+ list_del(&decomp_strm->list);
+ mutex_unlock(&stream->mutex);
+ break;
+ }
+
+ /*
+ * If there is no available decomp and already full,
+ * let's wait for releasing decomp from other users.
+ */
+ if (stream->avail_decomp >= MAX_DECOMPRESSOR)
+ goto wait;
+
+ /* Let's allocate new decomp */
+ decomp_strm = kmalloc(sizeof(*decomp_strm), GFP_KERNEL);
+ if (!decomp_strm)
+ goto wait;
+
+ decomp_strm->stream = msblk->decompressor->init(msblk,
+ stream->comp_opts);
+ if (IS_ERR(decomp_strm->stream)) {
+ kfree(decomp_strm);
+ goto wait;
+ }
+
+ stream->avail_decomp++;
+ WARN_ON(stream->avail_decomp > MAX_DECOMPRESSOR);
+
+ mutex_unlock(&stream->mutex);
+ break;
+wait:
+ /*
+ * If system memory is tough, let's for other's
+ * releasing instead of hurting VM because it could
+ * make page cache thrashing.
+ */
+ mutex_unlock(&stream->mutex);
+ wait_event(stream->wait,
+ !list_empty(&stream->strm_list));
+ }
+
+ return decomp_strm;
+}
+
+
+int squashfs_decompress(struct squashfs_sb_info *msblk,
+ void **buffer, struct buffer_head **bh, int b, int offset, int length,
+ int srclength, int pages)
+{
+ int res;
+ struct squashfs_stream *stream = msblk->stream;
+ struct decomp_stream *decomp_stream = get_decomp_stream(msblk, stream);
+ res = msblk->decompressor->decompress(msblk, decomp_stream->stream,
+ buffer, bh, b, offset, length, srclength, pages);
+ put_decomp_stream(decomp_stream, stream);
+ if (res < 0)
+ ERROR("%s decompression failed, data probably corrupt\n",
+ msblk->decompressor->name);
+ return res;
+}
diff --git a/fs/squashfs/decompressor_single.c b/fs/squashfs/decompressor_single.c
new file mode 100644
index 000000000000..c5bb69422f6a
--- /dev/null
+++ b/fs/squashfs/decompressor_single.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "decompressor.h"
+#include "squashfs.h"
+
+/*
+ * This file implements single-threaded decompression in the
+ * decompressor framework
+ */
+
+struct squashfs_stream {
+ void *stream;
+ struct mutex mutex;
+};
+
+void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
+ void *comp_opts)
+{
+ struct squashfs_stream *stream;
+ int err = -ENOMEM;
+
+ stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+ if (stream == NULL)
+ goto out;
+
+ stream->stream = msblk->decompressor->init(msblk, comp_opts);
+ kfree(comp_opts);
+ if (IS_ERR(stream->stream)) {
+ err = PTR_ERR(stream->stream);
+ goto out;
+ }
+
+ mutex_init(&stream->mutex);
+ return stream;
+
+out:
+ kfree(stream);
+ return ERR_PTR(err);
+}
+
+void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk)
+{
+ struct squashfs_stream *stream = msblk->stream;
+
+ if (stream) {
+ msblk->decompressor->free(stream->stream);
+ kfree(stream);
+ }
+}
+
+int squashfs_decompress(struct squashfs_sb_info *msblk,
+ void **buffer, struct buffer_head **bh, int b, int offset, int length,
+ int srclength, int pages)
+{
+ int res;
+ struct squashfs_stream *stream = msblk->stream;
+
+ mutex_lock(&stream->mutex);
+ res = msblk->decompressor->decompress(msblk, stream->stream, buffer,
+ bh, b, offset, length, srclength, pages);
+ mutex_unlock(&stream->mutex);
+
+ if (res < 0)
+ ERROR("%s decompression failed, data probably corrupt\n",
+ msblk->decompressor->name);
+
+ return res;
+}
+
+int squashfs_max_decompressors(void)
+{
+ return 1;
+}
diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c
index 00f4dfc5f088..75c3b5779172 100644
--- a/fs/squashfs/lzo_wrapper.c
+++ b/fs/squashfs/lzo_wrapper.c
@@ -37,7 +37,7 @@ struct squashfs_lzo {
void *output;
};
-static void *lzo_init(struct squashfs_sb_info *msblk, void *buff, int len)
+static void *lzo_init(struct squashfs_sb_info *msblk, void *buff)
{
int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE);
@@ -74,22 +74,16 @@ static void lzo_free(void *strm)
}
-static int lzo_uncompress(struct squashfs_sb_info *msblk, void **buffer,
- struct buffer_head **bh, int b, int offset, int length, int srclength,
- int pages)
+static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm,
+ void **buffer, struct buffer_head **bh, int b, int offset, int length,
+ int srclength, int pages)
{
- struct squashfs_lzo *stream = msblk->stream;
+ struct squashfs_lzo *stream = strm;
void *buff = stream->input;
int avail, i, bytes = length, res;
size_t out_len = srclength;
- mutex_lock(&msblk->read_data_mutex);
-
for (i = 0; i < b; i++) {
- wait_on_buffer(bh[i]);
- if (!buffer_uptodate(bh[i]))
- goto block_release;
-
avail = min(bytes, msblk->devblksize - offset);
memcpy(buff, bh[i]->b_data + offset, avail);
buff += avail;
@@ -111,17 +105,9 @@ static int lzo_uncompress(struct squashfs_sb_info *msblk, void **buffer,
bytes -= avail;
}
- mutex_unlock(&msblk->read_data_mutex);
return res;
-block_release:
- for (; i < b; i++)
- put_bh(bh[i]);
-
failed:
- mutex_unlock(&msblk->read_data_mutex);
-
- ERROR("lzo decompression failed, data probably corrupt\n");
return -EIO;
}
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index d1266516ed08..2e2751df8452 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -48,7 +48,14 @@ extern void *squashfs_read_table(struct super_block *, u64, int);
/* decompressor.c */
extern const struct squashfs_decompressor *squashfs_lookup_decompressor(int);
-extern void *squashfs_decompressor_init(struct super_block *, unsigned short);
+extern void *squashfs_decompressor_setup(struct super_block *, unsigned short);
+
+/* decompressor_xxx.c */
+extern void *squashfs_decompressor_create(struct squashfs_sb_info *, void *);
+extern void squashfs_decompressor_destroy(struct squashfs_sb_info *);
+extern int squashfs_decompress(struct squashfs_sb_info *, void **,
+ struct buffer_head **, int, int, int, int, int);
+extern int squashfs_max_decompressors(void);
/* export.c */
extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64, u64,
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
index 52934a22f296..9cdcf4150d59 100644
--- a/fs/squashfs/squashfs_fs_sb.h
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -63,10 +63,9 @@ struct squashfs_sb_info {
__le64 *id_table;
__le64 *fragment_index;
__le64 *xattr_id_table;
- struct mutex read_data_mutex;
struct mutex meta_index_mutex;
struct meta_index *meta_index;
- void *stream;
+ struct squashfs_stream *stream;
__le64 *inode_lookup_table;
u64 inode_table;
u64 directory_table;
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 60553a9053ca..202df6312d4e 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -98,7 +98,6 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
msblk->devblksize_log2 = ffz(~msblk->devblksize);
- mutex_init(&msblk->read_data_mutex);
mutex_init(&msblk->meta_index_mutex);
/*
@@ -206,13 +205,14 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount;
/* Allocate read_page block */
- msblk->read_page = squashfs_cache_init("data", 1, msblk->block_size);
+ msblk->read_page = squashfs_cache_init("data",
+ squashfs_max_decompressors(), msblk->block_size);
if (msblk->read_page == NULL) {
ERROR("Failed to allocate read_page block\n");
goto failed_mount;
}
- msblk->stream = squashfs_decompressor_init(sb, flags);
+ msblk->stream = squashfs_decompressor_setup(sb, flags);
if (IS_ERR(msblk->stream)) {
err = PTR_ERR(msblk->stream);
msblk->stream = NULL;
@@ -336,7 +336,7 @@ failed_mount:
squashfs_cache_delete(msblk->block_cache);
squashfs_cache_delete(msblk->fragment_cache);
squashfs_cache_delete(msblk->read_page);
- squashfs_decompressor_free(msblk, msblk->stream);
+ squashfs_decompressor_destroy(msblk);
kfree(msblk->inode_lookup_table);
kfree(msblk->fragment_index);
kfree(msblk->id_table);
@@ -383,7 +383,7 @@ static void squashfs_put_super(struct super_block *sb)
squashfs_cache_delete(sbi->block_cache);
squashfs_cache_delete(sbi->fragment_cache);
squashfs_cache_delete(sbi->read_page);
- squashfs_decompressor_free(sbi, sbi->stream);
+ squashfs_decompressor_destroy(sbi);
kfree(sbi->id_table);
kfree(sbi->fragment_index);
kfree(sbi->meta_index);
diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c
index 1760b7d108f6..5d1d07cca6b4 100644
--- a/fs/squashfs/xz_wrapper.c
+++ b/fs/squashfs/xz_wrapper.c
@@ -38,38 +38,63 @@ struct squashfs_xz {
struct xz_buf buf;
};
-struct comp_opts {
+struct disk_comp_opts {
__le32 dictionary_size;
__le32 flags;
};
-static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff,
- int len)
+struct comp_opts {
+ int dict_size;
+};
+
+static void *squashfs_xz_comp_opts(struct squashfs_sb_info *msblk,
+ void *buff, int len)
{
- struct comp_opts *comp_opts = buff;
- struct squashfs_xz *stream;
- int dict_size = msblk->block_size;
- int err, n;
+ struct disk_comp_opts *comp_opts = buff;
+ struct comp_opts *opts;
+ int err = 0, n;
+
+ opts = kmalloc(sizeof(*opts), GFP_KERNEL);
+ if (opts == NULL) {
+ err = -ENOMEM;
+ goto out2;
+ }
if (comp_opts) {
/* check compressor options are the expected length */
if (len < sizeof(*comp_opts)) {
err = -EIO;
- goto failed;
+ goto out;
}
- dict_size = le32_to_cpu(comp_opts->dictionary_size);
+ opts->dict_size = le32_to_cpu(comp_opts->dictionary_size);
/* the dictionary size should be 2^n or 2^n+2^(n+1) */
- n = ffs(dict_size) - 1;
- if (dict_size != (1 << n) && dict_size != (1 << n) +
+ n = ffs(opts->dict_size) - 1;
+ if (opts->dict_size != (1 << n) && opts->dict_size != (1 << n) +
(1 << (n + 1))) {
err = -EIO;
- goto failed;
+ goto out;
}
- }
+ } else
+ /* use defaults */
+ opts->dict_size = max_t(int, msblk->block_size,
+ SQUASHFS_METADATA_SIZE);
- dict_size = max_t(int, dict_size, SQUASHFS_METADATA_SIZE);
+ return opts;
+
+out:
+ kfree(opts);
+out2:
+ return ERR_PTR(err);
+}
+
+
+static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff)
+{
+ struct comp_opts *comp_opts = buff;
+ struct squashfs_xz *stream;
+ int err;
stream = kmalloc(sizeof(*stream), GFP_KERNEL);
if (stream == NULL) {
@@ -77,7 +102,7 @@ static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff,
goto failed;
}
- stream->state = xz_dec_init(XZ_PREALLOC, dict_size);
+ stream->state = xz_dec_init(XZ_PREALLOC, comp_opts->dict_size);
if (stream->state == NULL) {
kfree(stream);
err = -ENOMEM;
@@ -103,15 +128,13 @@ static void squashfs_xz_free(void *strm)
}
-static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void **buffer,
- struct buffer_head **bh, int b, int offset, int length, int srclength,
- int pages)
+static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
+ void **buffer, struct buffer_head **bh, int b, int offset, int length,
+ int srclength, int pages)
{
enum xz_ret xz_err;
int avail, total = 0, k = 0, page = 0;
- struct squashfs_xz *stream = msblk->stream;
-
- mutex_lock(&msblk->read_data_mutex);
+ struct squashfs_xz *stream = strm;
xz_dec_reset(stream->state);
stream->buf.in_pos = 0;
@@ -124,10 +147,6 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void **buffer,
if (stream->buf.in_pos == stream->buf.in_size && k < b) {
avail = min(length, msblk->devblksize - offset);
length -= avail;
- wait_on_buffer(bh[k]);
- if (!buffer_uptodate(bh[k]))
- goto release_mutex;
-
stream->buf.in = bh[k]->b_data + offset;
stream->buf.in_size = avail;
stream->buf.in_pos = 0;
@@ -147,23 +166,12 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void **buffer,
put_bh(bh[k++]);
} while (xz_err == XZ_OK);
- if (xz_err != XZ_STREAM_END) {
- ERROR("xz_dec_run error, data probably corrupt\n");
- goto release_mutex;
- }
-
- if (k < b) {
- ERROR("xz_uncompress error, input remaining\n");
- goto release_mutex;
- }
-
- total += stream->buf.out_pos;
- mutex_unlock(&msblk->read_data_mutex);
- return total;
+ if (xz_err != XZ_STREAM_END || k < b)
+ goto out;
-release_mutex:
- mutex_unlock(&msblk->read_data_mutex);
+ return total + stream->buf.out_pos;
+out:
for (; k < b; k++)
put_bh(bh[k]);
@@ -172,6 +180,7 @@ release_mutex:
const struct squashfs_decompressor squashfs_xz_comp_ops = {
.init = squashfs_xz_init,
+ .comp_opts = squashfs_xz_comp_opts,
.free = squashfs_xz_free,
.decompress = squashfs_xz_uncompress,
.id = XZ_COMPRESSION,
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
index 55d918fd2d86..bb049027d15c 100644
--- a/fs/squashfs/zlib_wrapper.c
+++ b/fs/squashfs/zlib_wrapper.c
@@ -33,7 +33,7 @@
#include "squashfs.h"
#include "decompressor.h"
-static void *zlib_init(struct squashfs_sb_info *dummy, void *buff, int len)
+static void *zlib_init(struct squashfs_sb_info *dummy, void *buff)
{
z_stream *stream = kmalloc(sizeof(z_stream), GFP_KERNEL);
if (stream == NULL)
@@ -61,15 +61,13 @@ static void zlib_free(void *strm)
}
-static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer,
- struct buffer_head **bh, int b, int offset, int length, int srclength,
- int pages)
+static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
+ void **buffer, struct buffer_head **bh, int b, int offset, int length,
+ int srclength, int pages)
{
int zlib_err, zlib_init = 0;
int k = 0, page = 0;
- z_stream *stream = msblk->stream;
-
- mutex_lock(&msblk->read_data_mutex);
+ z_stream *stream = strm;
stream->avail_out = 0;
stream->avail_in = 0;
@@ -78,10 +76,6 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer,
if (stream->avail_in == 0 && k < b) {
int avail = min(length, msblk->devblksize - offset);
length -= avail;
- wait_on_buffer(bh[k]);
- if (!buffer_uptodate(bh[k]))
- goto release_mutex;
-
stream->next_in = bh[k]->b_data + offset;
stream->avail_in = avail;
offset = 0;
@@ -94,12 +88,8 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer,
if (!zlib_init) {
zlib_err = zlib_inflateInit(stream);
- if (zlib_err != Z_OK) {
- ERROR("zlib_inflateInit returned unexpected "
- "result 0x%x, srclength %d\n",
- zlib_err, srclength);
- goto release_mutex;
- }
+ if (zlib_err != Z_OK)
+ goto out;
zlib_init = 1;
}
@@ -109,29 +99,19 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer,
put_bh(bh[k++]);
} while (zlib_err == Z_OK);
- if (zlib_err != Z_STREAM_END) {
- ERROR("zlib_inflate error, data probably corrupt\n");
- goto release_mutex;
- }
+ if (zlib_err != Z_STREAM_END)
+ goto out;
zlib_err = zlib_inflateEnd(stream);
- if (zlib_err != Z_OK) {
- ERROR("zlib_inflate error, data probably corrupt\n");
- goto release_mutex;
- }
-
- if (k < b) {
- ERROR("zlib_uncompress error, data remaining\n");
- goto release_mutex;
- }
+ if (zlib_err != Z_OK)
+ goto out;
- length = stream->total_out;
- mutex_unlock(&msblk->read_data_mutex);
- return length;
+ if (k < b)
+ goto out;
-release_mutex:
- mutex_unlock(&msblk->read_data_mutex);
+ return stream->total_out;
+out:
for (; k < b; k++)
put_bh(bh[k]);
diff --git a/fs/sysfs/Makefile b/fs/sysfs/Makefile
index 7a1ceb946b80..8876ac183373 100644
--- a/fs/sysfs/Makefile
+++ b/fs/sysfs/Makefile
@@ -2,5 +2,4 @@
# Makefile for the sysfs virtual filesystem
#
-obj-y := inode.o file.o dir.o symlink.o mount.o bin.o \
- group.o
+obj-y := inode.o file.o dir.o symlink.o mount.o group.o
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
deleted file mode 100644
index c590cabd57bb..000000000000
--- a/fs/sysfs/bin.c
+++ /dev/null
@@ -1,502 +0,0 @@
-/*
- * fs/sysfs/bin.c - sysfs binary file implementation
- *
- * Copyright (c) 2003 Patrick Mochel
- * Copyright (c) 2003 Matthew Wilcox
- * Copyright (c) 2004 Silicon Graphics, Inc.
- * Copyright (c) 2007 SUSE Linux Products GmbH
- * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
- *
- * This file is released under the GPLv2.
- *
- * Please see Documentation/filesystems/sysfs.txt for more information.
- */
-
-#undef DEBUG
-
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/kobject.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/mm.h>
-#include <linux/uaccess.h>
-
-#include "sysfs.h"
-
-/*
- * There's one bin_buffer for each open file.
- *
- * filp->private_data points to bin_buffer and
- * sysfs_dirent->s_bin_attr.buffers points to a the bin_buffer s
- * sysfs_dirent->s_bin_attr.buffers is protected by sysfs_bin_lock
- */
-static DEFINE_MUTEX(sysfs_bin_lock);
-
-struct bin_buffer {
- struct mutex mutex;
- void *buffer;
- int mmapped;
- const struct vm_operations_struct *vm_ops;
- struct file *file;
- struct hlist_node list;
-};
-
-static int
-fill_read(struct file *file, char *buffer, loff_t off, size_t count)
-{
- struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
- struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr;
- struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
- int rc;
-
- /* need attr_sd for attr, its parent for kobj */
- if (!sysfs_get_active(attr_sd))
- return -ENODEV;
-
- rc = -EIO;
- if (attr->read)
- rc = attr->read(file, kobj, attr, buffer, off, count);
-
- sysfs_put_active(attr_sd);
-
- return rc;
-}
-
-static ssize_t
-read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off)
-{
- struct bin_buffer *bb = file->private_data;
- int size = file_inode(file)->i_size;
- loff_t offs = *off;
- int count = min_t(size_t, bytes, PAGE_SIZE);
- char *temp;
-
- if (!bytes)
- return 0;
-
- if (size) {
- if (offs > size)
- return 0;
- if (offs + count > size)
- count = size - offs;
- }
-
- temp = kmalloc(count, GFP_KERNEL);
- if (!temp)
- return -ENOMEM;
-
- mutex_lock(&bb->mutex);
-
- count = fill_read(file, bb->buffer, offs, count);
- if (count < 0) {
- mutex_unlock(&bb->mutex);
- goto out_free;
- }
-
- memcpy(temp, bb->buffer, count);
-
- mutex_unlock(&bb->mutex);
-
- if (copy_to_user(userbuf, temp, count)) {
- count = -EFAULT;
- goto out_free;
- }
-
- pr_debug("offs = %lld, *off = %lld, count = %d\n", offs, *off, count);
-
- *off = offs + count;
-
- out_free:
- kfree(temp);
- return count;
-}
-
-static int
-flush_write(struct file *file, char *buffer, loff_t offset, size_t count)
-{
- struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
- struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr;
- struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
- int rc;
-
- /* need attr_sd for attr, its parent for kobj */
- if (!sysfs_get_active(attr_sd))
- return -ENODEV;
-
- rc = -EIO;
- if (attr->write)
- rc = attr->write(file, kobj, attr, buffer, offset, count);
-
- sysfs_put_active(attr_sd);
-
- return rc;
-}
-
-static ssize_t write(struct file *file, const char __user *userbuf,
- size_t bytes, loff_t *off)
-{
- struct bin_buffer *bb = file->private_data;
- int size = file_inode(file)->i_size;
- loff_t offs = *off;
- int count = min_t(size_t, bytes, PAGE_SIZE);
- char *temp;
-
- if (!bytes)
- return 0;
-
- if (size) {
- if (offs > size)
- return 0;
- if (offs + count > size)
- count = size - offs;
- }
-
- temp = memdup_user(userbuf, count);
- if (IS_ERR(temp))
- return PTR_ERR(temp);
-
- mutex_lock(&bb->mutex);
-
- memcpy(bb->buffer, temp, count);
-
- count = flush_write(file, bb->buffer, offs, count);
- mutex_unlock(&bb->mutex);
-
- if (count > 0)
- *off = offs + count;
-
- kfree(temp);
- return count;
-}
-
-static void bin_vma_open(struct vm_area_struct *vma)
-{
- struct file *file = vma->vm_file;
- struct bin_buffer *bb = file->private_data;
- struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
-
- if (!bb->vm_ops)
- return;
-
- if (!sysfs_get_active(attr_sd))
- return;
-
- if (bb->vm_ops->open)
- bb->vm_ops->open(vma);
-
- sysfs_put_active(attr_sd);
-}
-
-static int bin_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct file *file = vma->vm_file;
- struct bin_buffer *bb = file->private_data;
- struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
- int ret;
-
- if (!bb->vm_ops)
- return VM_FAULT_SIGBUS;
-
- if (!sysfs_get_active(attr_sd))
- return VM_FAULT_SIGBUS;
-
- ret = VM_FAULT_SIGBUS;
- if (bb->vm_ops->fault)
- ret = bb->vm_ops->fault(vma, vmf);
-
- sysfs_put_active(attr_sd);
- return ret;
-}
-
-static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct file *file = vma->vm_file;
- struct bin_buffer *bb = file->private_data;
- struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
- int ret;
-
- if (!bb->vm_ops)
- return VM_FAULT_SIGBUS;
-
- if (!sysfs_get_active(attr_sd))
- return VM_FAULT_SIGBUS;
-
- ret = 0;
- if (bb->vm_ops->page_mkwrite)
- ret = bb->vm_ops->page_mkwrite(vma, vmf);
- else
- file_update_time(file);
-
- sysfs_put_active(attr_sd);
- return ret;
-}
-
-static int bin_access(struct vm_area_struct *vma, unsigned long addr,
- void *buf, int len, int write)
-{
- struct file *file = vma->vm_file;
- struct bin_buffer *bb = file->private_data;
- struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
- int ret;
-
- if (!bb->vm_ops)
- return -EINVAL;
-
- if (!sysfs_get_active(attr_sd))
- return -EINVAL;
-
- ret = -EINVAL;
- if (bb->vm_ops->access)
- ret = bb->vm_ops->access(vma, addr, buf, len, write);
-
- sysfs_put_active(attr_sd);
- return ret;
-}
-
-#ifdef CONFIG_NUMA
-static int bin_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
-{
- struct file *file = vma->vm_file;
- struct bin_buffer *bb = file->private_data;
- struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
- int ret;
-
- if (!bb->vm_ops)
- return 0;
-
- if (!sysfs_get_active(attr_sd))
- return -EINVAL;
-
- ret = 0;
- if (bb->vm_ops->set_policy)
- ret = bb->vm_ops->set_policy(vma, new);
-
- sysfs_put_active(attr_sd);
- return ret;
-}
-
-static struct mempolicy *bin_get_policy(struct vm_area_struct *vma,
- unsigned long addr)
-{
- struct file *file = vma->vm_file;
- struct bin_buffer *bb = file->private_data;
- struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
- struct mempolicy *pol;
-
- if (!bb->vm_ops)
- return vma->vm_policy;
-
- if (!sysfs_get_active(attr_sd))
- return vma->vm_policy;
-
- pol = vma->vm_policy;
- if (bb->vm_ops->get_policy)
- pol = bb->vm_ops->get_policy(vma, addr);
-
- sysfs_put_active(attr_sd);
- return pol;
-}
-
-static int bin_migrate(struct vm_area_struct *vma, const nodemask_t *from,
- const nodemask_t *to, unsigned long flags)
-{
- struct file *file = vma->vm_file;
- struct bin_buffer *bb = file->private_data;
- struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
- int ret;
-
- if (!bb->vm_ops)
- return 0;
-
- if (!sysfs_get_active(attr_sd))
- return 0;
-
- ret = 0;
- if (bb->vm_ops->migrate)
- ret = bb->vm_ops->migrate(vma, from, to, flags);
-
- sysfs_put_active(attr_sd);
- return ret;
-}
-#endif
-
-static const struct vm_operations_struct bin_vm_ops = {
- .open = bin_vma_open,
- .fault = bin_fault,
- .page_mkwrite = bin_page_mkwrite,
- .access = bin_access,
-#ifdef CONFIG_NUMA
- .set_policy = bin_set_policy,
- .get_policy = bin_get_policy,
- .migrate = bin_migrate,
-#endif
-};
-
-static int mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct bin_buffer *bb = file->private_data;
- struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
- struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr;
- struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
- int rc;
-
- mutex_lock(&bb->mutex);
-
- /* need attr_sd for attr, its parent for kobj */
- rc = -ENODEV;
- if (!sysfs_get_active(attr_sd))
- goto out_unlock;
-
- rc = -EINVAL;
- if (!attr->mmap)
- goto out_put;
-
- rc = attr->mmap(file, kobj, attr, vma);
- if (rc)
- goto out_put;
-
- /*
- * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
- * to satisfy versions of X which crash if the mmap fails: that
- * substitutes a new vm_file, and we don't then want bin_vm_ops.
- */
- if (vma->vm_file != file)
- goto out_put;
-
- rc = -EINVAL;
- if (bb->mmapped && bb->vm_ops != vma->vm_ops)
- goto out_put;
-
- /*
- * It is not possible to successfully wrap close.
- * So error if someone is trying to use close.
- */
- rc = -EINVAL;
- if (vma->vm_ops && vma->vm_ops->close)
- goto out_put;
-
- rc = 0;
- bb->mmapped = 1;
- bb->vm_ops = vma->vm_ops;
- vma->vm_ops = &bin_vm_ops;
-out_put:
- sysfs_put_active(attr_sd);
-out_unlock:
- mutex_unlock(&bb->mutex);
-
- return rc;
-}
-
-static int open(struct inode *inode, struct file *file)
-{
- struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
- struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr;
- struct bin_buffer *bb = NULL;
- int error;
-
- /* binary file operations requires both @sd and its parent */
- if (!sysfs_get_active(attr_sd))
- return -ENODEV;
-
- error = -EACCES;
- if ((file->f_mode & FMODE_WRITE) && !(attr->write || attr->mmap))
- goto err_out;
- if ((file->f_mode & FMODE_READ) && !(attr->read || attr->mmap))
- goto err_out;
-
- error = -ENOMEM;
- bb = kzalloc(sizeof(*bb), GFP_KERNEL);
- if (!bb)
- goto err_out;
-
- bb->buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!bb->buffer)
- goto err_out;
-
- mutex_init(&bb->mutex);
- bb->file = file;
- file->private_data = bb;
-
- mutex_lock(&sysfs_bin_lock);
- hlist_add_head(&bb->list, &attr_sd->s_bin_attr.buffers);
- mutex_unlock(&sysfs_bin_lock);
-
- /* open succeeded, put active references */
- sysfs_put_active(attr_sd);
- return 0;
-
- err_out:
- sysfs_put_active(attr_sd);
- kfree(bb);
- return error;
-}
-
-static int release(struct inode *inode, struct file *file)
-{
- struct bin_buffer *bb = file->private_data;
-
- mutex_lock(&sysfs_bin_lock);
- hlist_del(&bb->list);
- mutex_unlock(&sysfs_bin_lock);
-
- kfree(bb->buffer);
- kfree(bb);
- return 0;
-}
-
-const struct file_operations bin_fops = {
- .read = read,
- .write = write,
- .mmap = mmap,
- .llseek = generic_file_llseek,
- .open = open,
- .release = release,
-};
-
-
-void unmap_bin_file(struct sysfs_dirent *attr_sd)
-{
- struct bin_buffer *bb;
-
- if (sysfs_type(attr_sd) != SYSFS_KOBJ_BIN_ATTR)
- return;
-
- mutex_lock(&sysfs_bin_lock);
-
- hlist_for_each_entry(bb, &attr_sd->s_bin_attr.buffers, list) {
- struct inode *inode = file_inode(bb->file);
-
- unmap_mapping_range(inode->i_mapping, 0, 0, 1);
- }
-
- mutex_unlock(&sysfs_bin_lock);
-}
-
-/**
- * sysfs_create_bin_file - create binary file for object.
- * @kobj: object.
- * @attr: attribute descriptor.
- */
-int sysfs_create_bin_file(struct kobject *kobj,
- const struct bin_attribute *attr)
-{
- BUG_ON(!kobj || !kobj->sd || !attr);
-
- return sysfs_add_file(kobj->sd, &attr->attr, SYSFS_KOBJ_BIN_ATTR);
-}
-EXPORT_SYMBOL_GPL(sysfs_create_bin_file);
-
-/**
- * sysfs_remove_bin_file - remove binary file for object.
- * @kobj: object.
- * @attr: attribute descriptor.
- */
-void sysfs_remove_bin_file(struct kobject *kobj,
- const struct bin_attribute *attr)
-{
- sysfs_hash_and_remove(kobj->sd, NULL, attr->attr.name);
-}
-EXPORT_SYMBOL_GPL(sysfs_remove_bin_file);
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 4d83cedb9fcb..08c66969d52a 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -26,21 +26,21 @@
#include "sysfs.h"
DEFINE_MUTEX(sysfs_mutex);
-DEFINE_SPINLOCK(sysfs_assoc_lock);
+DEFINE_SPINLOCK(sysfs_symlink_target_lock);
-#define to_sysfs_dirent(X) rb_entry((X), struct sysfs_dirent, s_rb);
+#define to_sysfs_dirent(X) rb_entry((X), struct sysfs_dirent, s_rb)
static DEFINE_SPINLOCK(sysfs_ino_lock);
static DEFINE_IDA(sysfs_ino_ida);
/**
* sysfs_name_hash
- * @ns: Namespace tag to hash
* @name: Null terminated string to hash
+ * @ns: Namespace tag to hash
*
* Returns 31 bit hash of ns + name (so it fits in an off_t )
*/
-static unsigned int sysfs_name_hash(const void *ns, const char *name)
+static unsigned int sysfs_name_hash(const char *name, const void *ns)
{
unsigned long hash = init_name_hash();
unsigned int len = strlen(name);
@@ -56,8 +56,8 @@ static unsigned int sysfs_name_hash(const void *ns, const char *name)
return hash;
}
-static int sysfs_name_compare(unsigned int hash, const void *ns,
- const char *name, const struct sysfs_dirent *sd)
+static int sysfs_name_compare(unsigned int hash, const char *name,
+ const void *ns, const struct sysfs_dirent *sd)
{
if (hash != sd->s_hash)
return hash - sd->s_hash;
@@ -69,7 +69,7 @@ static int sysfs_name_compare(unsigned int hash, const void *ns,
static int sysfs_sd_compare(const struct sysfs_dirent *left,
const struct sysfs_dirent *right)
{
- return sysfs_name_compare(left->s_hash, left->s_ns, left->s_name,
+ return sysfs_name_compare(left->s_hash, left->s_name, left->s_ns,
right);
}
@@ -111,6 +111,11 @@ static int sysfs_link_sibling(struct sysfs_dirent *sd)
/* add new node and rebalance the tree */
rb_link_node(&sd->s_rb, parent, node);
rb_insert_color(&sd->s_rb, &sd->s_parent->s_dir.children);
+
+ /* if @sd has ns tag, mark the parent to enable ns filtering */
+ if (sd->s_ns)
+ sd->s_parent->s_flags |= SYSFS_FLAG_HAS_NS;
+
return 0;
}
@@ -130,26 +135,15 @@ static void sysfs_unlink_sibling(struct sysfs_dirent *sd)
sd->s_parent->s_dir.subdirs--;
rb_erase(&sd->s_rb, &sd->s_parent->s_dir.children);
-}
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-/* Test for attributes that want to ignore lockdep for read-locking */
-static bool ignore_lockdep(struct sysfs_dirent *sd)
-{
- return sysfs_type(sd) == SYSFS_KOBJ_ATTR &&
- sd->s_attr.attr->ignore_lockdep;
-}
-
-#else
-
-static inline bool ignore_lockdep(struct sysfs_dirent *sd)
-{
- return true;
+ /*
+ * Either all or none of the children have tags. Clearing HAS_NS
+ * when there's no child left is enough to keep the flag synced.
+ */
+ if (RB_EMPTY_ROOT(&sd->s_parent->s_dir.children))
+ sd->s_parent->s_flags &= ~SYSFS_FLAG_HAS_NS;
}
-#endif
-
/**
* sysfs_get_active - get an active reference to sysfs_dirent
* @sd: sysfs_dirent to get an active reference to
@@ -168,7 +162,7 @@ struct sysfs_dirent *sysfs_get_active(struct sysfs_dirent *sd)
if (!atomic_inc_unless_negative(&sd->s_active))
return NULL;
- if (likely(!ignore_lockdep(sd)))
+ if (likely(!sysfs_ignore_lockdep(sd)))
rwsem_acquire_read(&sd->dep_map, 0, 1, _RET_IP_);
return sd;
}
@@ -187,7 +181,7 @@ void sysfs_put_active(struct sysfs_dirent *sd)
if (unlikely(!sd))
return;
- if (likely(!ignore_lockdep(sd)))
+ if (likely(!sysfs_ignore_lockdep(sd)))
rwsem_release(&sd->dep_map, 1, _RET_IP_);
v = atomic_dec_return(&sd->s_active);
if (likely(v != SD_DEACTIVATED_BIAS))
@@ -297,7 +291,6 @@ static int sysfs_dentry_delete(const struct dentry *dentry)
static int sysfs_dentry_revalidate(struct dentry *dentry, unsigned int flags)
{
struct sysfs_dirent *sd;
- int type;
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -318,13 +311,8 @@ static int sysfs_dentry_revalidate(struct dentry *dentry, unsigned int flags)
goto out_bad;
/* The sysfs dirent has been moved to a different namespace */
- type = KOBJ_NS_TYPE_NONE;
- if (sd->s_parent) {
- type = sysfs_ns_type(sd->s_parent);
- if (type != KOBJ_NS_TYPE_NONE &&
- sysfs_info(dentry->d_sb)->ns[type] != sd->s_ns)
- goto out_bad;
- }
+ if (sd->s_ns && sd->s_ns != sysfs_info(dentry->d_sb)->ns)
+ goto out_bad;
mutex_unlock(&sysfs_mutex);
out_valid:
@@ -400,22 +388,19 @@ struct sysfs_dirent *sysfs_new_dirent(const char *name, umode_t mode, int type)
/**
* sysfs_addrm_start - prepare for sysfs_dirent add/remove
* @acxt: pointer to sysfs_addrm_cxt to be used
- * @parent_sd: parent sysfs_dirent
*
- * This function is called when the caller is about to add or
- * remove sysfs_dirent under @parent_sd. This function acquires
- * sysfs_mutex. @acxt is used to keep and pass context to
- * other addrm functions.
+ * This function is called when the caller is about to add or remove
+ * sysfs_dirent. This function acquires sysfs_mutex. @acxt is used
+ * to keep and pass context to other addrm functions.
*
* LOCKING:
* Kernel thread context (may sleep). sysfs_mutex is locked on
* return.
*/
-void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt,
- struct sysfs_dirent *parent_sd)
+void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt)
+ __acquires(sysfs_mutex)
{
memset(acxt, 0, sizeof(*acxt));
- acxt->parent_sd = parent_sd;
mutex_lock(&sysfs_mutex);
}
@@ -424,10 +409,11 @@ void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt,
* __sysfs_add_one - add sysfs_dirent to parent without warning
* @acxt: addrm context to use
* @sd: sysfs_dirent to be added
+ * @parent_sd: the parent sysfs_dirent to add @sd to
*
- * Get @acxt->parent_sd and set sd->s_parent to it and increment
- * nlink of parent inode if @sd is a directory and link into the
- * children list of the parent.
+ * Get @parent_sd and set @sd->s_parent to it and increment nlink of
+ * the parent inode if @sd is a directory and link into the children
+ * list of the parent.
*
* This function should be called between calls to
* sysfs_addrm_start() and sysfs_addrm_finish() and should be
@@ -440,27 +426,21 @@ void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt,
* 0 on success, -EEXIST if entry with the given name already
* exists.
*/
-int __sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
+int __sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd,
+ struct sysfs_dirent *parent_sd)
{
struct sysfs_inode_attrs *ps_iattr;
int ret;
- if (!!sysfs_ns_type(acxt->parent_sd) != !!sd->s_ns) {
- WARN(1, KERN_WARNING "sysfs: ns %s in '%s' for '%s'\n",
- sysfs_ns_type(acxt->parent_sd) ? "required" : "invalid",
- acxt->parent_sd->s_name, sd->s_name);
- return -EINVAL;
- }
-
- sd->s_hash = sysfs_name_hash(sd->s_ns, sd->s_name);
- sd->s_parent = sysfs_get(acxt->parent_sd);
+ sd->s_hash = sysfs_name_hash(sd->s_name, sd->s_ns);
+ sd->s_parent = sysfs_get(parent_sd);
ret = sysfs_link_sibling(sd);
if (ret)
return ret;
/* Update timestamps on the parent */
- ps_iattr = acxt->parent_sd->s_iattr;
+ ps_iattr = parent_sd->s_iattr;
if (ps_iattr) {
struct iattr *ps_iattrs = &ps_iattr->ia_iattr;
ps_iattrs->ia_ctime = ps_iattrs->ia_mtime = CURRENT_TIME;
@@ -490,14 +470,32 @@ static char *sysfs_pathname(struct sysfs_dirent *sd, char *path)
return path;
}
+void sysfs_warn_dup(struct sysfs_dirent *parent, const char *name)
+{
+ char *path;
+
+ path = kzalloc(PATH_MAX, GFP_KERNEL);
+ if (path) {
+ sysfs_pathname(parent, path);
+ strlcat(path, "/", PATH_MAX);
+ strlcat(path, name, PATH_MAX);
+ }
+
+ WARN(1, KERN_WARNING "sysfs: cannot create duplicate filename '%s'\n",
+ path ? path : name);
+
+ kfree(path);
+}
+
/**
* sysfs_add_one - add sysfs_dirent to parent
* @acxt: addrm context to use
* @sd: sysfs_dirent to be added
+ * @parent_sd: the parent sysfs_dirent to add @sd to
*
- * Get @acxt->parent_sd and set sd->s_parent to it and increment
- * nlink of parent inode if @sd is a directory and link into the
- * children list of the parent.
+ * Get @parent_sd and set @sd->s_parent to it and increment nlink of
+ * the parent inode if @sd is a directory and link into the children
+ * list of the parent.
*
* This function should be called between calls to
* sysfs_addrm_start() and sysfs_addrm_finish() and should be
@@ -510,23 +508,15 @@ static char *sysfs_pathname(struct sysfs_dirent *sd, char *path)
* 0 on success, -EEXIST if entry with the given name already
* exists.
*/
-int sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
+int sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd,
+ struct sysfs_dirent *parent_sd)
{
int ret;
- ret = __sysfs_add_one(acxt, sd);
- if (ret == -EEXIST) {
- char *path = kzalloc(PATH_MAX, GFP_KERNEL);
- WARN(1, KERN_WARNING
- "sysfs: cannot create duplicate filename '%s'\n",
- (path == NULL) ? sd->s_name
- : (sysfs_pathname(acxt->parent_sd, path),
- strlcat(path, "/", PATH_MAX),
- strlcat(path, sd->s_name, PATH_MAX),
- path));
- kfree(path);
- }
+ ret = __sysfs_add_one(acxt, sd, parent_sd);
+ if (ret == -EEXIST)
+ sysfs_warn_dup(parent_sd, sd->s_name);
return ret;
}
@@ -545,16 +535,22 @@ int sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
* LOCKING:
* Determined by sysfs_addrm_start().
*/
-void sysfs_remove_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
+static void sysfs_remove_one(struct sysfs_addrm_cxt *acxt,
+ struct sysfs_dirent *sd)
{
struct sysfs_inode_attrs *ps_iattr;
- BUG_ON(sd->s_flags & SYSFS_FLAG_REMOVED);
+ /*
+ * Removal can be called multiple times on the same node. Only the
+ * first invocation is effective and puts the base ref.
+ */
+ if (sd->s_flags & SYSFS_FLAG_REMOVED)
+ return;
sysfs_unlink_sibling(sd);
/* Update timestamps on the parent */
- ps_iattr = acxt->parent_sd->s_iattr;
+ ps_iattr = sd->s_parent->s_iattr;
if (ps_iattr) {
struct iattr *ps_iattrs = &ps_iattr->ia_iattr;
ps_iattrs->ia_ctime = ps_iattrs->ia_mtime = CURRENT_TIME;
@@ -577,6 +573,7 @@ void sysfs_remove_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
* sysfs_mutex is released.
*/
void sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt)
+ __releases(sysfs_mutex)
{
/* release resources acquired by sysfs_addrm_start() */
mutex_unlock(&sysfs_mutex);
@@ -588,7 +585,7 @@ void sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt)
acxt->removed = sd->u.removed_list;
sysfs_deactivate(sd);
- unmap_bin_file(sd);
+ sysfs_unmap_bin_file(sd);
sysfs_put(sd);
}
}
@@ -597,6 +594,7 @@ void sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt)
* sysfs_find_dirent - find sysfs_dirent with the given name
* @parent_sd: sysfs_dirent to search under
* @name: name to look for
+ * @ns: the namespace tag to use
*
* Look for sysfs_dirent with name @name under @parent_sd.
*
@@ -607,26 +605,19 @@ void sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt)
* Pointer to sysfs_dirent if found, NULL if not.
*/
struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
- const void *ns,
- const unsigned char *name)
+ const unsigned char *name,
+ const void *ns)
{
struct rb_node *node = parent_sd->s_dir.children.rb_node;
unsigned int hash;
- if (!!sysfs_ns_type(parent_sd) != !!ns) {
- WARN(1, KERN_WARNING "sysfs: ns %s in '%s' for '%s'\n",
- sysfs_ns_type(parent_sd) ? "required" : "invalid",
- parent_sd->s_name, name);
- return NULL;
- }
-
- hash = sysfs_name_hash(ns, name);
+ hash = sysfs_name_hash(name, ns);
while (node) {
struct sysfs_dirent *sd;
int result;
sd = to_sysfs_dirent(node);
- result = sysfs_name_compare(hash, ns, name, sd);
+ result = sysfs_name_compare(hash, name, ns, sd);
if (result < 0)
node = node->rb_left;
else if (result > 0)
@@ -638,9 +629,10 @@ struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
}
/**
- * sysfs_get_dirent - find and get sysfs_dirent with the given name
+ * sysfs_get_dirent_ns - find and get sysfs_dirent with the given name
* @parent_sd: sysfs_dirent to search under
* @name: name to look for
+ * @ns: the namespace tag to use
*
* Look for sysfs_dirent with name @name under @parent_sd and get
* it if found.
@@ -651,24 +643,24 @@ struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
* RETURNS:
* Pointer to sysfs_dirent if found, NULL if not.
*/
-struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
- const void *ns,
- const unsigned char *name)
+struct sysfs_dirent *sysfs_get_dirent_ns(struct sysfs_dirent *parent_sd,
+ const unsigned char *name,
+ const void *ns)
{
struct sysfs_dirent *sd;
mutex_lock(&sysfs_mutex);
- sd = sysfs_find_dirent(parent_sd, ns, name);
+ sd = sysfs_find_dirent(parent_sd, name, ns);
sysfs_get(sd);
mutex_unlock(&sysfs_mutex);
return sd;
}
-EXPORT_SYMBOL_GPL(sysfs_get_dirent);
+EXPORT_SYMBOL_GPL(sysfs_get_dirent_ns);
static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
- enum kobj_ns_type type, const void *ns, const char *name,
- struct sysfs_dirent **p_sd)
+ const char *name, const void *ns,
+ struct sysfs_dirent **p_sd)
{
umode_t mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
struct sysfs_addrm_cxt acxt;
@@ -680,13 +672,12 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
if (!sd)
return -ENOMEM;
- sd->s_flags |= (type << SYSFS_NS_TYPE_SHIFT);
sd->s_ns = ns;
sd->s_dir.kobj = kobj;
/* link in */
- sysfs_addrm_start(&acxt, parent_sd);
- rc = sysfs_add_one(&acxt, sd);
+ sysfs_addrm_start(&acxt);
+ rc = sysfs_add_one(&acxt, sd, parent_sd);
sysfs_addrm_finish(&acxt);
if (rc == 0)
@@ -700,44 +691,17 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
int sysfs_create_subdir(struct kobject *kobj, const char *name,
struct sysfs_dirent **p_sd)
{
- return create_dir(kobj, kobj->sd,
- KOBJ_NS_TYPE_NONE, NULL, name, p_sd);
+ return create_dir(kobj, kobj->sd, name, NULL, p_sd);
}
/**
- * sysfs_read_ns_type: return associated ns_type
- * @kobj: the kobject being queried
- *
- * Each kobject can be tagged with exactly one namespace type
- * (i.e. network or user). Return the ns_type associated with
- * this object if any
+ * sysfs_create_dir_ns - create a directory for an object with a namespace tag
+ * @kobj: object we're creating directory for
+ * @ns: the namespace tag to use
*/
-static enum kobj_ns_type sysfs_read_ns_type(struct kobject *kobj)
+int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
{
- const struct kobj_ns_type_operations *ops;
- enum kobj_ns_type type;
-
- ops = kobj_child_ns_ops(kobj);
- if (!ops)
- return KOBJ_NS_TYPE_NONE;
-
- type = ops->type;
- BUG_ON(type <= KOBJ_NS_TYPE_NONE);
- BUG_ON(type >= KOBJ_NS_TYPES);
- BUG_ON(!kobj_ns_type_registered(type));
-
- return type;
-}
-
-/**
- * sysfs_create_dir - create a directory for an object.
- * @kobj: object we're creating directory for.
- */
-int sysfs_create_dir(struct kobject *kobj)
-{
- enum kobj_ns_type type;
struct sysfs_dirent *parent_sd, *sd;
- const void *ns = NULL;
int error = 0;
BUG_ON(!kobj);
@@ -750,11 +714,7 @@ int sysfs_create_dir(struct kobject *kobj)
if (!parent_sd)
return -ENOENT;
- if (sysfs_ns_type(parent_sd))
- ns = kobj->ktype->namespace(kobj);
- type = sysfs_read_ns_type(kobj);
-
- error = create_dir(kobj, parent_sd, type, ns, kobject_name(kobj), &sd);
+ error = create_dir(kobj, parent_sd, kobject_name(kobj), ns, &sd);
if (!error)
kobj->sd = sd;
return error;
@@ -768,15 +728,14 @@ static struct dentry *sysfs_lookup(struct inode *dir, struct dentry *dentry,
struct sysfs_dirent *parent_sd = parent->d_fsdata;
struct sysfs_dirent *sd;
struct inode *inode;
- enum kobj_ns_type type;
- const void *ns;
+ const void *ns = NULL;
mutex_lock(&sysfs_mutex);
- type = sysfs_ns_type(parent_sd);
- ns = sysfs_info(dir->i_sb)->ns[type];
+ if (parent_sd->s_flags & SYSFS_FLAG_HAS_NS)
+ ns = sysfs_info(dir->i_sb)->ns;
- sd = sysfs_find_dirent(parent_sd, ns, dentry->d_name.name);
+ sd = sysfs_find_dirent(parent_sd, dentry->d_name.name, ns);
/* no such entry */
if (!sd) {
@@ -807,41 +766,128 @@ const struct inode_operations sysfs_dir_inode_operations = {
.setxattr = sysfs_setxattr,
};
-static void remove_dir(struct sysfs_dirent *sd)
+static struct sysfs_dirent *sysfs_leftmost_descendant(struct sysfs_dirent *pos)
{
- struct sysfs_addrm_cxt acxt;
+ struct sysfs_dirent *last;
- sysfs_addrm_start(&acxt, sd->s_parent);
- sysfs_remove_one(&acxt, sd);
- sysfs_addrm_finish(&acxt);
+ while (true) {
+ struct rb_node *rbn;
+
+ last = pos;
+
+ if (sysfs_type(pos) != SYSFS_DIR)
+ break;
+
+ rbn = rb_first(&pos->s_dir.children);
+ if (!rbn)
+ break;
+
+ pos = to_sysfs_dirent(rbn);
+ }
+
+ return last;
}
-void sysfs_remove_subdir(struct sysfs_dirent *sd)
+/**
+ * sysfs_next_descendant_post - find the next descendant for post-order walk
+ * @pos: the current position (%NULL to initiate traversal)
+ * @root: sysfs_dirent whose descendants to walk
+ *
+ * Find the next descendant to visit for post-order traversal of @root's
+ * descendants. @root is included in the iteration and the last node to be
+ * visited.
+ */
+static struct sysfs_dirent *sysfs_next_descendant_post(struct sysfs_dirent *pos,
+ struct sysfs_dirent *root)
{
- remove_dir(sd);
+ struct rb_node *rbn;
+
+ lockdep_assert_held(&sysfs_mutex);
+
+ /* if first iteration, visit leftmost descendant which may be root */
+ if (!pos)
+ return sysfs_leftmost_descendant(root);
+
+ /* if we visited @root, we're done */
+ if (pos == root)
+ return NULL;
+
+ /* if there's an unvisited sibling, visit its leftmost descendant */
+ rbn = rb_next(&pos->s_rb);
+ if (rbn)
+ return sysfs_leftmost_descendant(to_sysfs_dirent(rbn));
+
+ /* no sibling left, visit parent */
+ return pos->s_parent;
}
+static void __sysfs_remove(struct sysfs_addrm_cxt *acxt,
+ struct sysfs_dirent *sd)
+{
+ struct sysfs_dirent *pos, *next;
+
+ if (!sd)
+ return;
+
+ pr_debug("sysfs %s: removing\n", sd->s_name);
+
+ next = NULL;
+ do {
+ pos = next;
+ next = sysfs_next_descendant_post(pos, sd);
+ if (pos)
+ sysfs_remove_one(acxt, pos);
+ } while (next);
+}
-static void __sysfs_remove_dir(struct sysfs_dirent *dir_sd)
+/**
+ * sysfs_remove - remove a sysfs_dirent recursively
+ * @sd: the sysfs_dirent to remove
+ *
+ * Remove @sd along with all its subdirectories and files.
+ */
+void sysfs_remove(struct sysfs_dirent *sd)
{
struct sysfs_addrm_cxt acxt;
- struct rb_node *pos;
- if (!dir_sd)
- return;
+ sysfs_addrm_start(&acxt);
+ __sysfs_remove(&acxt, sd);
+ sysfs_addrm_finish(&acxt);
+}
- pr_debug("sysfs %s: removing dir\n", dir_sd->s_name);
- sysfs_addrm_start(&acxt, dir_sd);
- pos = rb_first(&dir_sd->s_dir.children);
- while (pos) {
- struct sysfs_dirent *sd = to_sysfs_dirent(pos);
- pos = rb_next(pos);
- if (sysfs_type(sd) != SYSFS_DIR)
- sysfs_remove_one(&acxt, sd);
+/**
+ * sysfs_hash_and_remove - find a sysfs_dirent by name and remove it
+ * @dir_sd: parent of the target
+ * @name: name of the sysfs_dirent to remove
+ * @ns: namespace tag of the sysfs_dirent to remove
+ *
+ * Look for the sysfs_dirent with @name and @ns under @dir_sd and remove
+ * it. Returns 0 on success, -ENOENT if such entry doesn't exist.
+ */
+int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name,
+ const void *ns)
+{
+ struct sysfs_addrm_cxt acxt;
+ struct sysfs_dirent *sd;
+
+ if (!dir_sd) {
+ WARN(1, KERN_WARNING "sysfs: can not remove '%s', no directory\n",
+ name);
+ return -ENOENT;
}
+
+ sysfs_addrm_start(&acxt);
+
+ sd = sysfs_find_dirent(dir_sd, name, ns);
+ if (sd)
+ __sysfs_remove(&acxt, sd);
+
sysfs_addrm_finish(&acxt);
- remove_dir(dir_sd);
+ if (sd)
+ return 0;
+ else
+ return -ENOENT;
}
/**
@@ -852,21 +898,34 @@ static void __sysfs_remove_dir(struct sysfs_dirent *dir_sd)
* the directory before we remove the directory, and we've inlined
* what used to be sysfs_rmdir() below, instead of calling separately.
*/
-
void sysfs_remove_dir(struct kobject *kobj)
{
struct sysfs_dirent *sd = kobj->sd;
- spin_lock(&sysfs_assoc_lock);
+ /*
+ * In general, kboject owner is responsible for ensuring removal
+ * doesn't race with other operations and sysfs doesn't provide any
+ * protection; however, when @kobj is used as a symlink target, the
+ * symlinking entity usually doesn't own @kobj and thus has no
+ * control over removal. @kobj->sd may be removed anytime and
+ * symlink code may end up dereferencing an already freed sd.
+ *
+ * sysfs_symlink_target_lock synchronizes @kobj->sd disassociation
+ * against symlink operations so that symlink code can safely
+ * dereference @kobj->sd.
+ */
+ spin_lock(&sysfs_symlink_target_lock);
kobj->sd = NULL;
- spin_unlock(&sysfs_assoc_lock);
+ spin_unlock(&sysfs_symlink_target_lock);
- __sysfs_remove_dir(sd);
+ if (sd) {
+ WARN_ON_ONCE(sysfs_type(sd) != SYSFS_DIR);
+ sysfs_remove(sd);
+ }
}
-int sysfs_rename(struct sysfs_dirent *sd,
- struct sysfs_dirent *new_parent_sd, const void *new_ns,
- const char *new_name)
+int sysfs_rename(struct sysfs_dirent *sd, struct sysfs_dirent *new_parent_sd,
+ const char *new_name, const void *new_ns)
{
int error;
@@ -878,7 +937,7 @@ int sysfs_rename(struct sysfs_dirent *sd,
goto out; /* nothing to rename */
error = -EEXIST;
- if (sysfs_find_dirent(new_parent_sd, new_ns, new_name))
+ if (sysfs_find_dirent(new_parent_sd, new_name, new_ns))
goto out;
/* rename sysfs_dirent */
@@ -899,7 +958,7 @@ int sysfs_rename(struct sysfs_dirent *sd,
sysfs_get(new_parent_sd);
sysfs_put(sd->s_parent);
sd->s_ns = new_ns;
- sd->s_hash = sysfs_name_hash(sd->s_ns, sd->s_name);
+ sd->s_hash = sysfs_name_hash(sd->s_name, sd->s_ns);
sd->s_parent = new_parent_sd;
sysfs_link_sibling(sd);
@@ -909,30 +968,25 @@ int sysfs_rename(struct sysfs_dirent *sd,
return error;
}
-int sysfs_rename_dir(struct kobject *kobj, const char *new_name)
+int sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name,
+ const void *new_ns)
{
struct sysfs_dirent *parent_sd = kobj->sd->s_parent;
- const void *new_ns = NULL;
-
- if (sysfs_ns_type(parent_sd))
- new_ns = kobj->ktype->namespace(kobj);
- return sysfs_rename(kobj->sd, parent_sd, new_ns, new_name);
+ return sysfs_rename(kobj->sd, parent_sd, new_name, new_ns);
}
-int sysfs_move_dir(struct kobject *kobj, struct kobject *new_parent_kobj)
+int sysfs_move_dir_ns(struct kobject *kobj, struct kobject *new_parent_kobj,
+ const void *new_ns)
{
struct sysfs_dirent *sd = kobj->sd;
struct sysfs_dirent *new_parent_sd;
- const void *new_ns = NULL;
BUG_ON(!sd->s_parent);
- if (sysfs_ns_type(sd->s_parent))
- new_ns = kobj->ktype->namespace(kobj);
new_parent_sd = new_parent_kobj && new_parent_kobj->sd ?
new_parent_kobj->sd : &sysfs_root;
- return sysfs_rename(sd, new_parent_sd, new_ns, sd->s_name);
+ return sysfs_rename(sd, new_parent_sd, sd->s_name, new_ns);
}
/* Relationship between s_mode and the DT_xxx types */
@@ -1002,15 +1056,15 @@ static int sysfs_readdir(struct file *file, struct dir_context *ctx)
struct dentry *dentry = file->f_path.dentry;
struct sysfs_dirent *parent_sd = dentry->d_fsdata;
struct sysfs_dirent *pos = file->private_data;
- enum kobj_ns_type type;
- const void *ns;
-
- type = sysfs_ns_type(parent_sd);
- ns = sysfs_info(dentry->d_sb)->ns[type];
+ const void *ns = NULL;
if (!dir_emit_dots(file, ctx))
return 0;
mutex_lock(&sysfs_mutex);
+
+ if (parent_sd->s_flags & SYSFS_FLAG_HAS_NS)
+ ns = sysfs_info(dentry->d_sb)->ns;
+
for (pos = sysfs_dir_pos(ns, parent_sd, ctx->pos, pos);
pos;
pos = sysfs_dir_next_pos(ns, parent_sd, ctx->pos, pos)) {
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 15ef5eb13663..79b5da2acbe1 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -21,70 +21,114 @@
#include <linux/mutex.h>
#include <linux/limits.h>
#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+#include <linux/mm.h>
#include "sysfs.h"
/*
- * There's one sysfs_buffer for each open file and one
- * sysfs_open_dirent for each sysfs_dirent with one or more open
- * files.
+ * There's one sysfs_open_file for each open file and one sysfs_open_dirent
+ * for each sysfs_dirent with one or more open files.
*
- * filp->private_data points to sysfs_buffer and
- * sysfs_dirent->s_attr.open points to sysfs_open_dirent. s_attr.open
- * is protected by sysfs_open_dirent_lock.
+ * sysfs_dirent->s_attr.open points to sysfs_open_dirent. s_attr.open is
+ * protected by sysfs_open_dirent_lock.
+ *
+ * filp->private_data points to seq_file whose ->private points to
+ * sysfs_open_file. sysfs_open_files are chained at
+ * sysfs_open_dirent->files, which is protected by sysfs_open_file_mutex.
*/
static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
+static DEFINE_MUTEX(sysfs_open_file_mutex);
struct sysfs_open_dirent {
atomic_t refcnt;
atomic_t event;
wait_queue_head_t poll;
- struct list_head buffers; /* goes through sysfs_buffer.list */
+ struct list_head files; /* goes through sysfs_open_file.list */
};
-struct sysfs_buffer {
- size_t count;
- loff_t pos;
- char *page;
- const struct sysfs_ops *ops;
+struct sysfs_open_file {
+ struct sysfs_dirent *sd;
+ struct file *file;
struct mutex mutex;
- int needs_read_fill;
int event;
struct list_head list;
+
+ bool mmapped;
+ const struct vm_operations_struct *vm_ops;
};
-/**
- * fill_read_buffer - allocate and fill buffer from object.
- * @dentry: dentry pointer.
- * @buffer: data buffer for file.
- *
- * Allocate @buffer->page, if it hasn't been already, then call the
- * kobject's show() method to fill the buffer with this attribute's
- * data.
- * This is called only once, on the file's first read unless an error
- * is returned.
+static bool sysfs_is_bin(struct sysfs_dirent *sd)
+{
+ return sysfs_type(sd) == SYSFS_KOBJ_BIN_ATTR;
+}
+
+static struct sysfs_open_file *sysfs_of(struct file *file)
+{
+ return ((struct seq_file *)file->private_data)->private;
+}
+
+/*
+ * Determine ktype->sysfs_ops for the given sysfs_dirent. This function
+ * must be called while holding an active reference.
*/
-static int fill_read_buffer(struct dentry *dentry, struct sysfs_buffer *buffer)
+static const struct sysfs_ops *sysfs_file_ops(struct sysfs_dirent *sd)
{
- struct sysfs_dirent *attr_sd = dentry->d_fsdata;
- struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
- const struct sysfs_ops *ops = buffer->ops;
- int ret = 0;
+ struct kobject *kobj = sd->s_parent->s_dir.kobj;
+
+ if (!sysfs_ignore_lockdep(sd))
+ lockdep_assert_held(sd);
+ return kobj->ktype ? kobj->ktype->sysfs_ops : NULL;
+}
+
+/*
+ * Reads on sysfs are handled through seq_file, which takes care of hairy
+ * details like buffering and seeking. The following function pipes
+ * sysfs_ops->show() result through seq_file.
+ */
+static int sysfs_seq_show(struct seq_file *sf, void *v)
+{
+ struct sysfs_open_file *of = sf->private;
+ struct kobject *kobj = of->sd->s_parent->s_dir.kobj;
+ const struct sysfs_ops *ops;
+ char *buf;
ssize_t count;
- if (!buffer->page)
- buffer->page = (char *) get_zeroed_page(GFP_KERNEL);
- if (!buffer->page)
- return -ENOMEM;
+ /* acquire buffer and ensure that it's >= PAGE_SIZE */
+ count = seq_get_buf(sf, &buf);
+ if (count < PAGE_SIZE) {
+ seq_commit(sf, -1);
+ return 0;
+ }
- /* need attr_sd for attr and ops, its parent for kobj */
- if (!sysfs_get_active(attr_sd))
+ /*
+ * Need @of->sd for attr and ops, its parent for kobj. @of->mutex
+ * nests outside active ref and is just to ensure that the ops
+ * aren't called concurrently for the same open file.
+ */
+ mutex_lock(&of->mutex);
+ if (!sysfs_get_active(of->sd)) {
+ mutex_unlock(&of->mutex);
return -ENODEV;
+ }
- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
- count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
+ of->event = atomic_read(&of->sd->s_attr.open->event);
- sysfs_put_active(attr_sd);
+ /*
+ * Lookup @ops and invoke show(). Control may reach here via seq
+ * file lseek even if @ops->show() isn't implemented.
+ */
+ ops = sysfs_file_ops(of->sd);
+ if (ops->show)
+ count = ops->show(kobj, of->sd->s_attr.attr, buf);
+ else
+ count = 0;
+
+ sysfs_put_active(of->sd);
+ mutex_unlock(&of->mutex);
+
+ if (count < 0)
+ return count;
/*
* The code works fine with PAGE_SIZE return but it's likely to
@@ -96,155 +140,389 @@ static int fill_read_buffer(struct dentry *dentry, struct sysfs_buffer *buffer)
/* Try to struggle along */
count = PAGE_SIZE - 1;
}
- if (count >= 0) {
- buffer->needs_read_fill = 0;
- buffer->count = count;
- } else {
- ret = count;
- }
- return ret;
+ seq_commit(sf, count);
+ return 0;
}
-/**
- * sysfs_read_file - read an attribute.
- * @file: file pointer.
- * @buf: buffer to fill.
- * @count: number of bytes to read.
- * @ppos: starting offset in file.
- *
- * Userspace wants to read an attribute file. The attribute descriptor
- * is in the file's ->d_fsdata. The target object is in the directory's
- * ->d_fsdata.
- *
- * We call fill_read_buffer() to allocate and fill the buffer from the
- * object's show() method exactly once (if the read is happening from
- * the beginning of the file). That should fill the entire buffer with
- * all the data the object has to offer for that attribute.
- * We then call flush_read_buffer() to copy the buffer to userspace
- * in the increments specified.
+/*
+ * Read method for bin files. As reading a bin file can have side-effects,
+ * the exact offset and bytes specified in read(2) call should be passed to
+ * the read callback making it difficult to use seq_file. Implement
+ * simplistic custom buffering for bin files.
*/
-
-static ssize_t
-sysfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+static ssize_t sysfs_bin_read(struct file *file, char __user *userbuf,
+ size_t bytes, loff_t *off)
{
- struct sysfs_buffer *buffer = file->private_data;
- ssize_t retval = 0;
+ struct sysfs_open_file *of = sysfs_of(file);
+ struct bin_attribute *battr = of->sd->s_attr.bin_attr;
+ struct kobject *kobj = of->sd->s_parent->s_dir.kobj;
+ loff_t size = file_inode(file)->i_size;
+ int count = min_t(size_t, bytes, PAGE_SIZE);
+ loff_t offs = *off;
+ char *buf;
+
+ if (!bytes)
+ return 0;
- mutex_lock(&buffer->mutex);
- if (buffer->needs_read_fill || *ppos == 0) {
- retval = fill_read_buffer(file->f_path.dentry, buffer);
- if (retval)
- goto out;
+ if (size) {
+ if (offs > size)
+ return 0;
+ if (offs + count > size)
+ count = size - offs;
}
- pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n",
- __func__, count, *ppos, buffer->page);
- retval = simple_read_from_buffer(buf, count, ppos, buffer->page,
- buffer->count);
-out:
- mutex_unlock(&buffer->mutex);
- return retval;
-}
-/**
- * fill_write_buffer - copy buffer from userspace.
- * @buffer: data buffer for file.
- * @buf: data from user.
- * @count: number of bytes in @userbuf.
- *
- * Allocate @buffer->page if it hasn't been already, then
- * copy the user-supplied buffer into it.
- */
-static int fill_write_buffer(struct sysfs_buffer *buffer,
- const char __user *buf, size_t count)
-{
- int error;
-
- if (!buffer->page)
- buffer->page = (char *)get_zeroed_page(GFP_KERNEL);
- if (!buffer->page)
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
return -ENOMEM;
- if (count >= PAGE_SIZE)
- count = PAGE_SIZE - 1;
- error = copy_from_user(buffer->page, buf, count);
- buffer->needs_read_fill = 1;
- /* if buf is assumed to contain a string, terminate it by \0,
- so e.g. sscanf() can scan the string easily */
- buffer->page[count] = 0;
- return error ? -EFAULT : count;
-}
+ /* need of->sd for battr, its parent for kobj */
+ mutex_lock(&of->mutex);
+ if (!sysfs_get_active(of->sd)) {
+ count = -ENODEV;
+ mutex_unlock(&of->mutex);
+ goto out_free;
+ }
+
+ if (battr->read)
+ count = battr->read(file, kobj, battr, buf, offs, count);
+ else
+ count = -EIO;
+ sysfs_put_active(of->sd);
+ mutex_unlock(&of->mutex);
+
+ if (count < 0)
+ goto out_free;
+
+ if (copy_to_user(userbuf, buf, count)) {
+ count = -EFAULT;
+ goto out_free;
+ }
+
+ pr_debug("offs = %lld, *off = %lld, count = %d\n", offs, *off, count);
+
+ *off = offs + count;
+
+ out_free:
+ kfree(buf);
+ return count;
+}
/**
- * flush_write_buffer - push buffer to kobject.
- * @dentry: dentry to the attribute
- * @buffer: data buffer for file.
- * @count: number of bytes
+ * flush_write_buffer - push buffer to kobject
+ * @of: open file
+ * @buf: data buffer for file
+ * @off: file offset to write to
+ * @count: number of bytes
*
- * Get the correct pointers for the kobject and the attribute we're
- * dealing with, then call the store() method for the attribute,
- * passing the buffer that we acquired in fill_write_buffer().
+ * Get the correct pointers for the kobject and the attribute we're dealing
+ * with, then call the store() method for it with @buf.
*/
-static int flush_write_buffer(struct dentry *dentry,
- struct sysfs_buffer *buffer, size_t count)
+static int flush_write_buffer(struct sysfs_open_file *of, char *buf, loff_t off,
+ size_t count)
{
- struct sysfs_dirent *attr_sd = dentry->d_fsdata;
- struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
- const struct sysfs_ops *ops = buffer->ops;
- int rc;
+ struct kobject *kobj = of->sd->s_parent->s_dir.kobj;
+ int rc = 0;
- /* need attr_sd for attr and ops, its parent for kobj */
- if (!sysfs_get_active(attr_sd))
+ /*
+ * Need @of->sd for attr and ops, its parent for kobj. @of->mutex
+ * nests outside active ref and is just to ensure that the ops
+ * aren't called concurrently for the same open file.
+ */
+ mutex_lock(&of->mutex);
+ if (!sysfs_get_active(of->sd)) {
+ mutex_unlock(&of->mutex);
return -ENODEV;
+ }
- rc = ops->store(kobj, attr_sd->s_attr.attr, buffer->page, count);
+ if (sysfs_is_bin(of->sd)) {
+ struct bin_attribute *battr = of->sd->s_attr.bin_attr;
- sysfs_put_active(attr_sd);
+ rc = -EIO;
+ if (battr->write)
+ rc = battr->write(of->file, kobj, battr, buf, off,
+ count);
+ } else {
+ const struct sysfs_ops *ops = sysfs_file_ops(of->sd);
+
+ rc = ops->store(kobj, of->sd->s_attr.attr, buf, count);
+ }
+
+ sysfs_put_active(of->sd);
+ mutex_unlock(&of->mutex);
return rc;
}
-
/**
- * sysfs_write_file - write an attribute.
- * @file: file pointer
- * @buf: data to write
- * @count: number of bytes
- * @ppos: starting offset
+ * sysfs_write_file - write an attribute
+ * @file: file pointer
+ * @user_buf: data to write
+ * @count: number of bytes
+ * @ppos: starting offset
+ *
+ * Copy data in from userland and pass it to the matching
+ * sysfs_ops->store() by invoking flush_write_buffer().
*
- * Similar to sysfs_read_file(), though working in the opposite direction.
- * We allocate and fill the data from the user in fill_write_buffer(),
- * then push it to the kobject in flush_write_buffer().
- * There is no easy way for us to know if userspace is only doing a partial
- * write, so we don't support them. We expect the entire buffer to come
- * on the first write.
- * Hint: if you're writing a value, first read the file, modify only the
- * the value you're changing, then write entire buffer back.
+ * There is no easy way for us to know if userspace is only doing a partial
+ * write, so we don't support them. We expect the entire buffer to come on
+ * the first write. Hint: if you're writing a value, first read the file,
+ * modify only the the value you're changing, then write entire buffer
+ * back.
*/
-static ssize_t sysfs_write_file(struct file *file, const char __user *buf,
+static ssize_t sysfs_write_file(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct sysfs_buffer *buffer = file->private_data;
- ssize_t len;
+ struct sysfs_open_file *of = sysfs_of(file);
+ ssize_t len = min_t(size_t, count, PAGE_SIZE);
+ loff_t size = file_inode(file)->i_size;
+ char *buf;
+
+ if (sysfs_is_bin(of->sd) && size) {
+ if (size <= *ppos)
+ return 0;
+ len = min_t(ssize_t, len, size - *ppos);
+ }
- mutex_lock(&buffer->mutex);
- len = fill_write_buffer(buffer, buf, count);
- if (len > 0)
- len = flush_write_buffer(file->f_path.dentry, buffer, len);
+ if (!len)
+ return 0;
+
+ buf = kmalloc(len + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, user_buf, len)) {
+ len = -EFAULT;
+ goto out_free;
+ }
+ buf[len] = '\0'; /* guarantee string termination */
+
+ len = flush_write_buffer(of, buf, *ppos, len);
if (len > 0)
*ppos += len;
- mutex_unlock(&buffer->mutex);
+out_free:
+ kfree(buf);
return len;
}
+static void sysfs_bin_vma_open(struct vm_area_struct *vma)
+{
+ struct file *file = vma->vm_file;
+ struct sysfs_open_file *of = sysfs_of(file);
+
+ if (!of->vm_ops)
+ return;
+
+ if (!sysfs_get_active(of->sd))
+ return;
+
+ if (of->vm_ops->open)
+ of->vm_ops->open(vma);
+
+ sysfs_put_active(of->sd);
+}
+
+static int sysfs_bin_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct file *file = vma->vm_file;
+ struct sysfs_open_file *of = sysfs_of(file);
+ int ret;
+
+ if (!of->vm_ops)
+ return VM_FAULT_SIGBUS;
+
+ if (!sysfs_get_active(of->sd))
+ return VM_FAULT_SIGBUS;
+
+ ret = VM_FAULT_SIGBUS;
+ if (of->vm_ops->fault)
+ ret = of->vm_ops->fault(vma, vmf);
+
+ sysfs_put_active(of->sd);
+ return ret;
+}
+
+static int sysfs_bin_page_mkwrite(struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+{
+ struct file *file = vma->vm_file;
+ struct sysfs_open_file *of = sysfs_of(file);
+ int ret;
+
+ if (!of->vm_ops)
+ return VM_FAULT_SIGBUS;
+
+ if (!sysfs_get_active(of->sd))
+ return VM_FAULT_SIGBUS;
+
+ ret = 0;
+ if (of->vm_ops->page_mkwrite)
+ ret = of->vm_ops->page_mkwrite(vma, vmf);
+ else
+ file_update_time(file);
+
+ sysfs_put_active(of->sd);
+ return ret;
+}
+
+static int sysfs_bin_access(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write)
+{
+ struct file *file = vma->vm_file;
+ struct sysfs_open_file *of = sysfs_of(file);
+ int ret;
+
+ if (!of->vm_ops)
+ return -EINVAL;
+
+ if (!sysfs_get_active(of->sd))
+ return -EINVAL;
+
+ ret = -EINVAL;
+ if (of->vm_ops->access)
+ ret = of->vm_ops->access(vma, addr, buf, len, write);
+
+ sysfs_put_active(of->sd);
+ return ret;
+}
+
+#ifdef CONFIG_NUMA
+static int sysfs_bin_set_policy(struct vm_area_struct *vma,
+ struct mempolicy *new)
+{
+ struct file *file = vma->vm_file;
+ struct sysfs_open_file *of = sysfs_of(file);
+ int ret;
+
+ if (!of->vm_ops)
+ return 0;
+
+ if (!sysfs_get_active(of->sd))
+ return -EINVAL;
+
+ ret = 0;
+ if (of->vm_ops->set_policy)
+ ret = of->vm_ops->set_policy(vma, new);
+
+ sysfs_put_active(of->sd);
+ return ret;
+}
+
+static struct mempolicy *sysfs_bin_get_policy(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ struct file *file = vma->vm_file;
+ struct sysfs_open_file *of = sysfs_of(file);
+ struct mempolicy *pol;
+
+ if (!of->vm_ops)
+ return vma->vm_policy;
+
+ if (!sysfs_get_active(of->sd))
+ return vma->vm_policy;
+
+ pol = vma->vm_policy;
+ if (of->vm_ops->get_policy)
+ pol = of->vm_ops->get_policy(vma, addr);
+
+ sysfs_put_active(of->sd);
+ return pol;
+}
+
+static int sysfs_bin_migrate(struct vm_area_struct *vma, const nodemask_t *from,
+ const nodemask_t *to, unsigned long flags)
+{
+ struct file *file = vma->vm_file;
+ struct sysfs_open_file *of = sysfs_of(file);
+ int ret;
+
+ if (!of->vm_ops)
+ return 0;
+
+ if (!sysfs_get_active(of->sd))
+ return 0;
+
+ ret = 0;
+ if (of->vm_ops->migrate)
+ ret = of->vm_ops->migrate(vma, from, to, flags);
+
+ sysfs_put_active(of->sd);
+ return ret;
+}
+#endif
+
+static const struct vm_operations_struct sysfs_bin_vm_ops = {
+ .open = sysfs_bin_vma_open,
+ .fault = sysfs_bin_fault,
+ .page_mkwrite = sysfs_bin_page_mkwrite,
+ .access = sysfs_bin_access,
+#ifdef CONFIG_NUMA
+ .set_policy = sysfs_bin_set_policy,
+ .get_policy = sysfs_bin_get_policy,
+ .migrate = sysfs_bin_migrate,
+#endif
+};
+
+static int sysfs_bin_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct sysfs_open_file *of = sysfs_of(file);
+ struct bin_attribute *battr = of->sd->s_attr.bin_attr;
+ struct kobject *kobj = of->sd->s_parent->s_dir.kobj;
+ int rc;
+
+ mutex_lock(&of->mutex);
+
+ /* need of->sd for battr, its parent for kobj */
+ rc = -ENODEV;
+ if (!sysfs_get_active(of->sd))
+ goto out_unlock;
+
+ if (!battr->mmap)
+ goto out_put;
+
+ rc = battr->mmap(file, kobj, battr, vma);
+ if (rc)
+ goto out_put;
+
+ /*
+ * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
+ * to satisfy versions of X which crash if the mmap fails: that
+ * substitutes a new vm_file, and we don't then want bin_vm_ops.
+ */
+ if (vma->vm_file != file)
+ goto out_put;
+
+ rc = -EINVAL;
+ if (of->mmapped && of->vm_ops != vma->vm_ops)
+ goto out_put;
+
+ /*
+ * It is not possible to successfully wrap close.
+ * So error if someone is trying to use close.
+ */
+ rc = -EINVAL;
+ if (vma->vm_ops && vma->vm_ops->close)
+ goto out_put;
+
+ rc = 0;
+ of->mmapped = 1;
+ of->vm_ops = vma->vm_ops;
+ vma->vm_ops = &sysfs_bin_vm_ops;
+out_put:
+ sysfs_put_active(of->sd);
+out_unlock:
+ mutex_unlock(&of->mutex);
+
+ return rc;
+}
+
/**
* sysfs_get_open_dirent - get or create sysfs_open_dirent
* @sd: target sysfs_dirent
- * @buffer: sysfs_buffer for this instance of open
+ * @of: sysfs_open_file for this instance of open
*
* If @sd->s_attr.open exists, increment its reference count;
- * otherwise, create one. @buffer is chained to the buffers
- * list.
+ * otherwise, create one. @of is chained to the files list.
*
* LOCKING:
* Kernel thread context (may sleep).
@@ -253,11 +531,12 @@ static ssize_t sysfs_write_file(struct file *file, const char __user *buf,
* 0 on success, -errno on failure.
*/
static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
- struct sysfs_buffer *buffer)
+ struct sysfs_open_file *of)
{
struct sysfs_open_dirent *od, *new_od = NULL;
retry:
+ mutex_lock(&sysfs_open_file_mutex);
spin_lock_irq(&sysfs_open_dirent_lock);
if (!sd->s_attr.open && new_od) {
@@ -268,10 +547,11 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
od = sd->s_attr.open;
if (od) {
atomic_inc(&od->refcnt);
- list_add_tail(&buffer->list, &od->buffers);
+ list_add_tail(&of->list, &od->files);
}
spin_unlock_irq(&sysfs_open_dirent_lock);
+ mutex_unlock(&sysfs_open_file_mutex);
if (od) {
kfree(new_od);
@@ -286,36 +566,40 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
atomic_set(&new_od->refcnt, 0);
atomic_set(&new_od->event, 1);
init_waitqueue_head(&new_od->poll);
- INIT_LIST_HEAD(&new_od->buffers);
+ INIT_LIST_HEAD(&new_od->files);
goto retry;
}
/**
* sysfs_put_open_dirent - put sysfs_open_dirent
* @sd: target sysfs_dirent
- * @buffer: associated sysfs_buffer
+ * @of: associated sysfs_open_file
*
- * Put @sd->s_attr.open and unlink @buffer from the buffers list.
- * If reference count reaches zero, disassociate and free it.
+ * Put @sd->s_attr.open and unlink @of from the files list. If
+ * reference count reaches zero, disassociate and free it.
*
* LOCKING:
* None.
*/
static void sysfs_put_open_dirent(struct sysfs_dirent *sd,
- struct sysfs_buffer *buffer)
+ struct sysfs_open_file *of)
{
struct sysfs_open_dirent *od = sd->s_attr.open;
unsigned long flags;
+ mutex_lock(&sysfs_open_file_mutex);
spin_lock_irqsave(&sysfs_open_dirent_lock, flags);
- list_del(&buffer->list);
+ if (of)
+ list_del(&of->list);
+
if (atomic_dec_and_test(&od->refcnt))
sd->s_attr.open = NULL;
else
od = NULL;
spin_unlock_irqrestore(&sysfs_open_dirent_lock, flags);
+ mutex_unlock(&sysfs_open_file_mutex);
kfree(od);
}
@@ -324,67 +608,81 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
{
struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
- struct sysfs_buffer *buffer;
- const struct sysfs_ops *ops;
+ struct sysfs_open_file *of;
+ bool has_read, has_write;
int error = -EACCES;
/* need attr_sd for attr and ops, its parent for kobj */
if (!sysfs_get_active(attr_sd))
return -ENODEV;
- /* every kobject with an attribute needs a ktype assigned */
- if (kobj->ktype && kobj->ktype->sysfs_ops)
- ops = kobj->ktype->sysfs_ops;
- else {
- WARN(1, KERN_ERR
- "missing sysfs attribute operations for kobject: %s\n",
- kobject_name(kobj));
- goto err_out;
- }
+ if (sysfs_is_bin(attr_sd)) {
+ struct bin_attribute *battr = attr_sd->s_attr.bin_attr;
- /* File needs write support.
- * The inode's perms must say it's ok,
- * and we must have a store method.
- */
- if (file->f_mode & FMODE_WRITE) {
- if (!(inode->i_mode & S_IWUGO) || !ops->store)
- goto err_out;
- }
+ has_read = battr->read || battr->mmap;
+ has_write = battr->write || battr->mmap;
+ } else {
+ const struct sysfs_ops *ops = sysfs_file_ops(attr_sd);
- /* File needs read support.
- * The inode's perms must say it's ok, and we there
- * must be a show method for it.
- */
- if (file->f_mode & FMODE_READ) {
- if (!(inode->i_mode & S_IRUGO) || !ops->show)
+ /* every kobject with an attribute needs a ktype assigned */
+ if (WARN(!ops, KERN_ERR
+ "missing sysfs attribute operations for kobject: %s\n",
+ kobject_name(kobj)))
goto err_out;
+
+ has_read = ops->show;
+ has_write = ops->store;
}
- /* No error? Great, allocate a buffer for the file, and store it
- * it in file->private_data for easy access.
- */
+ /* check perms and supported operations */
+ if ((file->f_mode & FMODE_WRITE) &&
+ (!(inode->i_mode & S_IWUGO) || !has_write))
+ goto err_out;
+
+ if ((file->f_mode & FMODE_READ) &&
+ (!(inode->i_mode & S_IRUGO) || !has_read))
+ goto err_out;
+
+ /* allocate a sysfs_open_file for the file */
error = -ENOMEM;
- buffer = kzalloc(sizeof(struct sysfs_buffer), GFP_KERNEL);
- if (!buffer)
+ of = kzalloc(sizeof(struct sysfs_open_file), GFP_KERNEL);
+ if (!of)
goto err_out;
- mutex_init(&buffer->mutex);
- buffer->needs_read_fill = 1;
- buffer->ops = ops;
- file->private_data = buffer;
+ mutex_init(&of->mutex);
+ of->sd = attr_sd;
+ of->file = file;
- /* make sure we have open dirent struct */
- error = sysfs_get_open_dirent(attr_sd, buffer);
+ /*
+ * Always instantiate seq_file even if read access doesn't use
+ * seq_file or is not requested. This unifies private data access
+ * and readable regular files are the vast majority anyway.
+ */
+ if (sysfs_is_bin(attr_sd))
+ error = single_open(file, NULL, of);
+ else
+ error = single_open(file, sysfs_seq_show, of);
if (error)
goto err_free;
+ /* seq_file clears PWRITE unconditionally, restore it if WRITE */
+ if (file->f_mode & FMODE_WRITE)
+ file->f_mode |= FMODE_PWRITE;
+
+ /* make sure we have open dirent struct */
+ error = sysfs_get_open_dirent(attr_sd, of);
+ if (error)
+ goto err_close;
+
/* open succeeded, put active references */
sysfs_put_active(attr_sd);
return 0;
- err_free:
- kfree(buffer);
- err_out:
+err_close:
+ single_release(inode, file);
+err_free:
+ kfree(of);
+err_out:
sysfs_put_active(attr_sd);
return error;
}
@@ -392,17 +690,41 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
static int sysfs_release(struct inode *inode, struct file *filp)
{
struct sysfs_dirent *sd = filp->f_path.dentry->d_fsdata;
- struct sysfs_buffer *buffer = filp->private_data;
+ struct sysfs_open_file *of = sysfs_of(filp);
- sysfs_put_open_dirent(sd, buffer);
-
- if (buffer->page)
- free_page((unsigned long)buffer->page);
- kfree(buffer);
+ sysfs_put_open_dirent(sd, of);
+ single_release(inode, filp);
+ kfree(of);
return 0;
}
+void sysfs_unmap_bin_file(struct sysfs_dirent *sd)
+{
+ struct sysfs_open_dirent *od;
+ struct sysfs_open_file *of;
+
+ if (!sysfs_is_bin(sd))
+ return;
+
+ spin_lock_irq(&sysfs_open_dirent_lock);
+ od = sd->s_attr.open;
+ if (od)
+ atomic_inc(&od->refcnt);
+ spin_unlock_irq(&sysfs_open_dirent_lock);
+ if (!od)
+ return;
+
+ mutex_lock(&sysfs_open_file_mutex);
+ list_for_each_entry(of, &od->files, list) {
+ struct inode *inode = file_inode(of->file);
+ unmap_mapping_range(inode->i_mapping, 0, 0, 1);
+ }
+ mutex_unlock(&sysfs_open_file_mutex);
+
+ sysfs_put_open_dirent(sd, NULL);
+}
+
/* Sysfs attribute files are pollable. The idea is that you read
* the content and then you use 'poll' or 'select' to wait for
* the content to change. When the content changes (assuming the
@@ -418,7 +740,7 @@ static int sysfs_release(struct inode *inode, struct file *filp)
*/
static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
{
- struct sysfs_buffer *buffer = filp->private_data;
+ struct sysfs_open_file *of = sysfs_of(filp);
struct sysfs_dirent *attr_sd = filp->f_path.dentry->d_fsdata;
struct sysfs_open_dirent *od = attr_sd->s_attr.open;
@@ -430,13 +752,12 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
sysfs_put_active(attr_sd);
- if (buffer->event != atomic_read(&od->event))
+ if (of->event != atomic_read(&od->event))
goto trigger;
return DEFAULT_POLLMASK;
trigger:
- buffer->needs_read_fill = 1;
return DEFAULT_POLLMASK|POLLERR|POLLPRI;
}
@@ -466,9 +787,9 @@ void sysfs_notify(struct kobject *k, const char *dir, const char *attr)
mutex_lock(&sysfs_mutex);
if (sd && dir)
- sd = sysfs_find_dirent(sd, NULL, dir);
+ sd = sysfs_find_dirent(sd, dir, NULL);
if (sd && attr)
- sd = sysfs_find_dirent(sd, NULL, attr);
+ sd = sysfs_find_dirent(sd, attr, NULL);
if (sd)
sysfs_notify_dirent(sd);
@@ -477,7 +798,7 @@ void sysfs_notify(struct kobject *k, const char *dir, const char *attr)
EXPORT_SYMBOL_GPL(sysfs_notify);
const struct file_operations sysfs_file_operations = {
- .read = sysfs_read_file,
+ .read = seq_read,
.write = sysfs_write_file,
.llseek = generic_file_llseek,
.open = sysfs_open_file,
@@ -485,58 +806,25 @@ const struct file_operations sysfs_file_operations = {
.poll = sysfs_poll,
};
-static int sysfs_attr_ns(struct kobject *kobj, const struct attribute *attr,
- const void **pns)
-{
- struct sysfs_dirent *dir_sd = kobj->sd;
- const struct sysfs_ops *ops;
- const void *ns = NULL;
- int err;
-
- if (!dir_sd) {
- WARN(1, KERN_ERR "sysfs: kobject %s without dirent\n",
- kobject_name(kobj));
- return -ENOENT;
- }
-
- err = 0;
- if (!sysfs_ns_type(dir_sd))
- goto out;
-
- err = -EINVAL;
- if (!kobj->ktype)
- goto out;
- ops = kobj->ktype->sysfs_ops;
- if (!ops)
- goto out;
- if (!ops->namespace)
- goto out;
-
- err = 0;
- ns = ops->namespace(kobj, attr);
-out:
- if (err) {
- WARN(1, KERN_ERR
- "missing sysfs namespace attribute operation for kobject: %s\n",
- kobject_name(kobj));
- }
- *pns = ns;
- return err;
-}
+const struct file_operations sysfs_bin_operations = {
+ .read = sysfs_bin_read,
+ .write = sysfs_write_file,
+ .llseek = generic_file_llseek,
+ .mmap = sysfs_bin_mmap,
+ .open = sysfs_open_file,
+ .release = sysfs_release,
+ .poll = sysfs_poll,
+};
-int sysfs_add_file_mode(struct sysfs_dirent *dir_sd,
- const struct attribute *attr, int type, umode_t amode)
+int sysfs_add_file_mode_ns(struct sysfs_dirent *dir_sd,
+ const struct attribute *attr, int type,
+ umode_t amode, const void *ns)
{
umode_t mode = (amode & S_IALLUGO) | S_IFREG;
struct sysfs_addrm_cxt acxt;
struct sysfs_dirent *sd;
- const void *ns;
int rc;
- rc = sysfs_attr_ns(dir_sd->s_dir.kobj, attr, &ns);
- if (rc)
- return rc;
-
sd = sysfs_new_dirent(attr->name, mode, type);
if (!sd)
return -ENOMEM;
@@ -545,8 +833,8 @@ int sysfs_add_file_mode(struct sysfs_dirent *dir_sd,
sd->s_attr.attr = (void *)attr;
sysfs_dirent_init_lockdep(sd);
- sysfs_addrm_start(&acxt, dir_sd);
- rc = sysfs_add_one(&acxt, sd);
+ sysfs_addrm_start(&acxt);
+ rc = sysfs_add_one(&acxt, sd, dir_sd);
sysfs_addrm_finish(&acxt);
if (rc)
@@ -559,23 +847,25 @@ int sysfs_add_file_mode(struct sysfs_dirent *dir_sd,
int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
int type)
{
- return sysfs_add_file_mode(dir_sd, attr, type, attr->mode);
+ return sysfs_add_file_mode_ns(dir_sd, attr, type, attr->mode, NULL);
}
-
/**
- * sysfs_create_file - create an attribute file for an object.
- * @kobj: object we're creating for.
- * @attr: attribute descriptor.
+ * sysfs_create_file_ns - create an attribute file for an object with custom ns
+ * @kobj: object we're creating for
+ * @attr: attribute descriptor
+ * @ns: namespace the new file should belong to
*/
-int sysfs_create_file(struct kobject *kobj, const struct attribute *attr)
+int sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr,
+ const void *ns)
{
BUG_ON(!kobj || !kobj->sd || !attr);
- return sysfs_add_file(kobj->sd, attr, SYSFS_KOBJ_ATTR);
+ return sysfs_add_file_mode_ns(kobj->sd, attr, SYSFS_KOBJ_ATTR,
+ attr->mode, ns);
}
-EXPORT_SYMBOL_GPL(sysfs_create_file);
+EXPORT_SYMBOL_GPL(sysfs_create_file_ns);
int sysfs_create_files(struct kobject *kobj, const struct attribute **ptr)
{
@@ -604,7 +894,7 @@ int sysfs_add_file_to_group(struct kobject *kobj,
int error;
if (group)
- dir_sd = sysfs_get_dirent(kobj->sd, NULL, group);
+ dir_sd = sysfs_get_dirent(kobj->sd, group);
else
dir_sd = sysfs_get(kobj->sd);
@@ -630,17 +920,12 @@ int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr,
{
struct sysfs_dirent *sd;
struct iattr newattrs;
- const void *ns;
int rc;
- rc = sysfs_attr_ns(kobj, attr, &ns);
- if (rc)
- return rc;
-
mutex_lock(&sysfs_mutex);
rc = -ENOENT;
- sd = sysfs_find_dirent(kobj->sd, ns, attr->name);
+ sd = sysfs_find_dirent(kobj->sd, attr->name, NULL);
if (!sd)
goto out;
@@ -655,22 +940,21 @@ int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr,
EXPORT_SYMBOL_GPL(sysfs_chmod_file);
/**
- * sysfs_remove_file - remove an object attribute.
- * @kobj: object we're acting for.
- * @attr: attribute descriptor.
+ * sysfs_remove_file_ns - remove an object attribute with a custom ns tag
+ * @kobj: object we're acting for
+ * @attr: attribute descriptor
+ * @ns: namespace tag of the file to remove
*
- * Hash the attribute name and kill the victim.
+ * Hash the attribute name and namespace tag and kill the victim.
*/
-void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr)
+void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
+ const void *ns)
{
- const void *ns;
-
- if (sysfs_attr_ns(kobj, attr, &ns))
- return;
+ struct sysfs_dirent *dir_sd = kobj->sd;
- sysfs_hash_and_remove(kobj->sd, ns, attr->name);
+ sysfs_hash_and_remove(dir_sd, attr->name, ns);
}
-EXPORT_SYMBOL_GPL(sysfs_remove_file);
+EXPORT_SYMBOL_GPL(sysfs_remove_file_ns);
void sysfs_remove_files(struct kobject *kobj, const struct attribute **ptr)
{
@@ -692,16 +976,42 @@ void sysfs_remove_file_from_group(struct kobject *kobj,
struct sysfs_dirent *dir_sd;
if (group)
- dir_sd = sysfs_get_dirent(kobj->sd, NULL, group);
+ dir_sd = sysfs_get_dirent(kobj->sd, group);
else
dir_sd = sysfs_get(kobj->sd);
if (dir_sd) {
- sysfs_hash_and_remove(dir_sd, NULL, attr->name);
+ sysfs_hash_and_remove(dir_sd, attr->name, NULL);
sysfs_put(dir_sd);
}
}
EXPORT_SYMBOL_GPL(sysfs_remove_file_from_group);
+/**
+ * sysfs_create_bin_file - create binary file for object.
+ * @kobj: object.
+ * @attr: attribute descriptor.
+ */
+int sysfs_create_bin_file(struct kobject *kobj,
+ const struct bin_attribute *attr)
+{
+ BUG_ON(!kobj || !kobj->sd || !attr);
+
+ return sysfs_add_file(kobj->sd, &attr->attr, SYSFS_KOBJ_BIN_ATTR);
+}
+EXPORT_SYMBOL_GPL(sysfs_create_bin_file);
+
+/**
+ * sysfs_remove_bin_file - remove binary file for object.
+ * @kobj: object.
+ * @attr: attribute descriptor.
+ */
+void sysfs_remove_bin_file(struct kobject *kobj,
+ const struct bin_attribute *attr)
+{
+ sysfs_hash_and_remove(kobj->sd, attr->attr.name, NULL);
+}
+EXPORT_SYMBOL_GPL(sysfs_remove_bin_file);
+
struct sysfs_schedule_callback_struct {
struct list_head workq_list;
struct kobject *kobj;
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 5f92cd2f61c1..1898a10e38ce 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -26,7 +26,7 @@ static void remove_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
if (grp->attrs)
for (attr = grp->attrs; *attr; attr++)
- sysfs_hash_and_remove(dir_sd, NULL, (*attr)->name);
+ sysfs_hash_and_remove(dir_sd, (*attr)->name, NULL);
if (grp->bin_attrs)
for (bin_attr = grp->bin_attrs; *bin_attr; bin_attr++)
sysfs_remove_bin_file(kobj, *bin_attr);
@@ -49,16 +49,17 @@ static int create_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
* re-adding (if required) the file.
*/
if (update)
- sysfs_hash_and_remove(dir_sd, NULL,
- (*attr)->name);
+ sysfs_hash_and_remove(dir_sd, (*attr)->name,
+ NULL);
if (grp->is_visible) {
mode = grp->is_visible(kobj, *attr, i);
if (!mode)
continue;
}
- error = sysfs_add_file_mode(dir_sd, *attr,
- SYSFS_KOBJ_ATTR,
- (*attr)->mode | mode);
+ error = sysfs_add_file_mode_ns(dir_sd, *attr,
+ SYSFS_KOBJ_ATTR,
+ (*attr)->mode | mode,
+ NULL);
if (unlikely(error))
break;
}
@@ -110,7 +111,7 @@ static int internal_create_group(struct kobject *kobj, int update,
error = create_files(sd, kobj, grp, update);
if (error) {
if (grp->name)
- sysfs_remove_subdir(sd);
+ sysfs_remove(sd);
}
sysfs_put(sd);
return error;
@@ -206,7 +207,7 @@ void sysfs_remove_group(struct kobject *kobj,
struct sysfs_dirent *sd;
if (grp->name) {
- sd = sysfs_get_dirent(dir_sd, NULL, grp->name);
+ sd = sysfs_get_dirent(dir_sd, grp->name);
if (!sd) {
WARN(!sd, KERN_WARNING
"sysfs group %p not found for kobject '%s'\n",
@@ -218,7 +219,7 @@ void sysfs_remove_group(struct kobject *kobj,
remove_files(sd, kobj, grp);
if (grp->name)
- sysfs_remove_subdir(sd);
+ sysfs_remove(sd);
sysfs_put(sd);
}
@@ -261,7 +262,7 @@ int sysfs_merge_group(struct kobject *kobj,
struct attribute *const *attr;
int i;
- dir_sd = sysfs_get_dirent(kobj->sd, NULL, grp->name);
+ dir_sd = sysfs_get_dirent(kobj->sd, grp->name);
if (!dir_sd)
return -ENOENT;
@@ -269,7 +270,7 @@ int sysfs_merge_group(struct kobject *kobj,
error = sysfs_add_file(dir_sd, *attr, SYSFS_KOBJ_ATTR);
if (error) {
while (--i >= 0)
- sysfs_hash_and_remove(dir_sd, NULL, (*--attr)->name);
+ sysfs_hash_and_remove(dir_sd, (*--attr)->name, NULL);
}
sysfs_put(dir_sd);
@@ -288,10 +289,10 @@ void sysfs_unmerge_group(struct kobject *kobj,
struct sysfs_dirent *dir_sd;
struct attribute *const *attr;
- dir_sd = sysfs_get_dirent(kobj->sd, NULL, grp->name);
+ dir_sd = sysfs_get_dirent(kobj->sd, grp->name);
if (dir_sd) {
for (attr = grp->attrs; *attr; ++attr)
- sysfs_hash_and_remove(dir_sd, NULL, (*attr)->name);
+ sysfs_hash_and_remove(dir_sd, (*attr)->name, NULL);
sysfs_put(dir_sd);
}
}
@@ -310,7 +311,7 @@ int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name,
struct sysfs_dirent *dir_sd;
int error = 0;
- dir_sd = sysfs_get_dirent(kobj->sd, NULL, group_name);
+ dir_sd = sysfs_get_dirent(kobj->sd, group_name);
if (!dir_sd)
return -ENOENT;
@@ -332,9 +333,9 @@ void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name,
{
struct sysfs_dirent *dir_sd;
- dir_sd = sysfs_get_dirent(kobj->sd, NULL, group_name);
+ dir_sd = sysfs_get_dirent(kobj->sd, group_name);
if (dir_sd) {
- sysfs_hash_and_remove(dir_sd, NULL, link_name);
+ sysfs_hash_and_remove(dir_sd, link_name, NULL);
sysfs_put(dir_sd);
}
}
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 963f910c8034..1750f790af3b 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -258,9 +258,9 @@ static void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode)
inode->i_fop = &sysfs_file_operations;
break;
case SYSFS_KOBJ_BIN_ATTR:
- bin_attr = sd->s_bin_attr.bin_attr;
+ bin_attr = sd->s_attr.bin_attr;
inode->i_size = bin_attr->size;
- inode->i_fop = &bin_fops;
+ inode->i_fop = &sysfs_bin_operations;
break;
case SYSFS_KOBJ_LINK:
inode->i_op = &sysfs_symlink_inode_operations;
@@ -314,32 +314,6 @@ void sysfs_evict_inode(struct inode *inode)
sysfs_put(sd);
}
-int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const void *ns,
- const char *name)
-{
- struct sysfs_addrm_cxt acxt;
- struct sysfs_dirent *sd;
-
- if (!dir_sd) {
- WARN(1, KERN_WARNING "sysfs: can not remove '%s', no directory\n",
- name);
- return -ENOENT;
- }
-
- sysfs_addrm_start(&acxt, dir_sd);
-
- sd = sysfs_find_dirent(dir_sd, ns, name);
- if (sd)
- sysfs_remove_one(&acxt, sd);
-
- sysfs_addrm_finish(&acxt);
-
- if (sd)
- return 0;
- else
- return -ENOENT;
-}
-
int sysfs_permission(struct inode *inode, int mask)
{
struct sysfs_dirent *sd;
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 834ec2cdb7a3..8c24bce2f4ae 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -36,7 +36,7 @@ static const struct super_operations sysfs_ops = {
struct sysfs_dirent sysfs_root = {
.s_name = "",
.s_count = ATOMIC_INIT(1),
- .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
+ .s_flags = SYSFS_DIR,
.s_mode = S_IFDIR | S_IRUGO | S_IXUGO,
.s_ino = 1,
};
@@ -77,14 +77,8 @@ static int sysfs_test_super(struct super_block *sb, void *data)
{
struct sysfs_super_info *sb_info = sysfs_info(sb);
struct sysfs_super_info *info = data;
- enum kobj_ns_type type;
- int found = 1;
- for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++) {
- if (sb_info->ns[type] != info->ns[type])
- found = 0;
- }
- return found;
+ return sb_info->ns == info->ns;
}
static int sysfs_set_super(struct super_block *sb, void *data)
@@ -98,9 +92,7 @@ static int sysfs_set_super(struct super_block *sb, void *data)
static void free_sysfs_super_info(struct sysfs_super_info *info)
{
- int type;
- for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++)
- kobj_ns_drop(type, info->ns[type]);
+ kobj_ns_drop(KOBJ_NS_TYPE_NET, info->ns);
kfree(info);
}
@@ -108,7 +100,6 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
struct sysfs_super_info *info;
- enum kobj_ns_type type;
struct super_block *sb;
int error;
@@ -116,18 +107,15 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
if (!capable(CAP_SYS_ADMIN) && !fs_fully_visible(fs_type))
return ERR_PTR(-EPERM);
- for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++) {
- if (!kobj_ns_current_may_mount(type))
- return ERR_PTR(-EPERM);
- }
+ if (!kobj_ns_current_may_mount(KOBJ_NS_TYPE_NET))
+ return ERR_PTR(-EPERM);
}
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return ERR_PTR(-ENOMEM);
- for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++)
- info->ns[type] = kobj_ns_grab_current(type);
+ info->ns = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
sb = sget(fs_type, sysfs_test_super, sysfs_set_super, flags, info);
if (IS_ERR(sb) || sb->s_fs_info != info)
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index 2dd4507d9edd..1a23681b8179 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -28,18 +28,19 @@ static int sysfs_do_create_link_sd(struct sysfs_dirent *parent_sd,
struct sysfs_dirent *target_sd = NULL;
struct sysfs_dirent *sd = NULL;
struct sysfs_addrm_cxt acxt;
- enum kobj_ns_type ns_type;
int error;
BUG_ON(!name || !parent_sd);
- /* target->sd can go away beneath us but is protected with
- * sysfs_assoc_lock. Fetch target_sd from it.
+ /*
+ * We don't own @target and it may be removed at any time.
+ * Synchronize using sysfs_symlink_target_lock. See
+ * sysfs_remove_dir() for details.
*/
- spin_lock(&sysfs_assoc_lock);
+ spin_lock(&sysfs_symlink_target_lock);
if (target->sd)
target_sd = sysfs_get(target->sd);
- spin_unlock(&sysfs_assoc_lock);
+ spin_unlock(&sysfs_symlink_target_lock);
error = -ENOENT;
if (!target_sd)
@@ -50,29 +51,15 @@ static int sysfs_do_create_link_sd(struct sysfs_dirent *parent_sd,
if (!sd)
goto out_put;
- ns_type = sysfs_ns_type(parent_sd);
- if (ns_type)
- sd->s_ns = target->ktype->namespace(target);
+ sd->s_ns = target_sd->s_ns;
sd->s_symlink.target_sd = target_sd;
target_sd = NULL; /* reference is now owned by the symlink */
- sysfs_addrm_start(&acxt, parent_sd);
- /* Symlinks must be between directories with the same ns_type */
- if (!ns_type ||
- (ns_type == sysfs_ns_type(sd->s_symlink.target_sd->s_parent))) {
- if (warn)
- error = sysfs_add_one(&acxt, sd);
- else
- error = __sysfs_add_one(&acxt, sd);
- } else {
- error = -EINVAL;
- WARN(1, KERN_WARNING
- "sysfs: symlink across ns_types %s/%s -> %s/%s\n",
- parent_sd->s_name,
- sd->s_name,
- sd->s_symlink.target_sd->s_parent->s_name,
- sd->s_symlink.target_sd->s_name);
- }
+ sysfs_addrm_start(&acxt);
+ if (warn)
+ error = sysfs_add_one(&acxt, sd, parent_sd);
+ else
+ error = __sysfs_add_one(&acxt, sd, parent_sd);
sysfs_addrm_finish(&acxt);
if (error)
@@ -155,11 +142,17 @@ void sysfs_delete_link(struct kobject *kobj, struct kobject *targ,
const char *name)
{
const void *ns = NULL;
- spin_lock(&sysfs_assoc_lock);
- if (targ->sd && sysfs_ns_type(kobj->sd))
+
+ /*
+ * We don't own @target and it may be removed at any time.
+ * Synchronize using sysfs_symlink_target_lock. See
+ * sysfs_remove_dir() for details.
+ */
+ spin_lock(&sysfs_symlink_target_lock);
+ if (targ->sd)
ns = targ->sd->s_ns;
- spin_unlock(&sysfs_assoc_lock);
- sysfs_hash_and_remove(kobj->sd, ns, name);
+ spin_unlock(&sysfs_symlink_target_lock);
+ sysfs_hash_and_remove(kobj->sd, name, ns);
}
/**
@@ -176,24 +169,25 @@ void sysfs_remove_link(struct kobject *kobj, const char *name)
else
parent_sd = kobj->sd;
- sysfs_hash_and_remove(parent_sd, NULL, name);
+ sysfs_hash_and_remove(parent_sd, name, NULL);
}
EXPORT_SYMBOL_GPL(sysfs_remove_link);
/**
- * sysfs_rename_link - rename symlink in object's directory.
+ * sysfs_rename_link_ns - rename symlink in object's directory.
* @kobj: object we're acting for.
* @targ: object we're pointing to.
* @old: previous name of the symlink.
* @new: new name of the symlink.
+ * @new_ns: new namespace of the symlink.
*
* A helper function for the common rename symlink idiom.
*/
-int sysfs_rename_link(struct kobject *kobj, struct kobject *targ,
- const char *old, const char *new)
+int sysfs_rename_link_ns(struct kobject *kobj, struct kobject *targ,
+ const char *old, const char *new, const void *new_ns)
{
struct sysfs_dirent *parent_sd, *sd = NULL;
- const void *old_ns = NULL, *new_ns = NULL;
+ const void *old_ns = NULL;
int result;
if (!kobj)
@@ -205,7 +199,7 @@ int sysfs_rename_link(struct kobject *kobj, struct kobject *targ,
old_ns = targ->sd->s_ns;
result = -ENOENT;
- sd = sysfs_get_dirent(parent_sd, old_ns, old);
+ sd = sysfs_get_dirent_ns(parent_sd, old, old_ns);
if (!sd)
goto out;
@@ -215,16 +209,13 @@ int sysfs_rename_link(struct kobject *kobj, struct kobject *targ,
if (sd->s_symlink.target_sd->s_dir.kobj != targ)
goto out;
- if (sysfs_ns_type(parent_sd))
- new_ns = targ->ktype->namespace(targ);
-
- result = sysfs_rename(sd, parent_sd, new_ns, new);
+ result = sysfs_rename(sd, parent_sd, new, new_ns);
out:
sysfs_put(sd);
return result;
}
-EXPORT_SYMBOL_GPL(sysfs_rename_link);
+EXPORT_SYMBOL_GPL(sysfs_rename_link_ns);
static int sysfs_get_target_path(struct sysfs_dirent *parent_sd,
struct sysfs_dirent *target_sd, char *path)
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index b6deca3e301d..e3aea92ebfa3 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -29,15 +29,13 @@ struct sysfs_elem_symlink {
};
struct sysfs_elem_attr {
- struct attribute *attr;
+ union {
+ struct attribute *attr;
+ struct bin_attribute *bin_attr;
+ };
struct sysfs_open_dirent *open;
};
-struct sysfs_elem_bin_attr {
- struct bin_attribute *bin_attr;
- struct hlist_head buffers;
-};
-
struct sysfs_inode_attrs {
struct iattr ia_iattr;
void *ia_secdata;
@@ -74,7 +72,6 @@ struct sysfs_dirent {
struct sysfs_elem_dir s_dir;
struct sysfs_elem_symlink s_symlink;
struct sysfs_elem_attr s_attr;
- struct sysfs_elem_bin_attr s_bin_attr;
};
unsigned short s_flags;
@@ -93,11 +90,8 @@ struct sysfs_dirent {
#define SYSFS_COPY_NAME (SYSFS_DIR | SYSFS_KOBJ_LINK)
#define SYSFS_ACTIVE_REF (SYSFS_KOBJ_ATTR | SYSFS_KOBJ_BIN_ATTR)
-/* identify any namespace tag on sysfs_dirents */
-#define SYSFS_NS_TYPE_MASK 0xf00
-#define SYSFS_NS_TYPE_SHIFT 8
-
-#define SYSFS_FLAG_MASK ~(SYSFS_NS_TYPE_MASK|SYSFS_TYPE_MASK)
+#define SYSFS_FLAG_MASK ~SYSFS_TYPE_MASK
+#define SYSFS_FLAG_HAS_NS 0x01000
#define SYSFS_FLAG_REMOVED 0x02000
static inline unsigned int sysfs_type(struct sysfs_dirent *sd)
@@ -105,16 +99,8 @@ static inline unsigned int sysfs_type(struct sysfs_dirent *sd)
return sd->s_flags & SYSFS_TYPE_MASK;
}
-/*
- * Return any namespace tags on this dirent.
- * enum kobj_ns_type is defined in linux/kobject.h
- */
-static inline enum kobj_ns_type sysfs_ns_type(struct sysfs_dirent *sd)
-{
- return (sd->s_flags & SYSFS_NS_TYPE_MASK) >> SYSFS_NS_TYPE_SHIFT;
-}
-
#ifdef CONFIG_DEBUG_LOCK_ALLOC
+
#define sysfs_dirent_init_lockdep(sd) \
do { \
struct attribute *attr = sd->s_attr.attr; \
@@ -124,15 +110,31 @@ do { \
\
lockdep_init_map(&sd->dep_map, "s_active", key, 0); \
} while (0)
+
+/* Test for attributes that want to ignore lockdep for read-locking */
+static inline bool sysfs_ignore_lockdep(struct sysfs_dirent *sd)
+{
+ int type = sysfs_type(sd);
+
+ return (type == SYSFS_KOBJ_ATTR || type == SYSFS_KOBJ_BIN_ATTR) &&
+ sd->s_attr.attr->ignore_lockdep;
+}
+
#else
+
#define sysfs_dirent_init_lockdep(sd) do {} while (0)
+
+static inline bool sysfs_ignore_lockdep(struct sysfs_dirent *sd)
+{
+ return true;
+}
+
#endif
/*
* Context structure to be used while adding/removing nodes.
*/
struct sysfs_addrm_cxt {
- struct sysfs_dirent *parent_sd;
struct sysfs_dirent *removed;
};
@@ -141,12 +143,13 @@ struct sysfs_addrm_cxt {
*/
/*
- * Each sb is associated with a set of namespace tags (i.e.
- * the network namespace of the task which mounted this sysfs
- * instance).
+ * Each sb is associated with one namespace tag, currently the network
+ * namespace of the task which mounted this sysfs instance. If multiple
+ * tags become necessary, make the following an array and compare
+ * sysfs_dirent tag against every entry.
*/
struct sysfs_super_info {
- void *ns[KOBJ_NS_TYPES];
+ void *ns;
};
#define sysfs_info(SB) ((struct sysfs_super_info *)(SB->s_fs_info))
extern struct sysfs_dirent sysfs_root;
@@ -156,38 +159,37 @@ extern struct kmem_cache *sysfs_dir_cachep;
* dir.c
*/
extern struct mutex sysfs_mutex;
-extern spinlock_t sysfs_assoc_lock;
+extern spinlock_t sysfs_symlink_target_lock;
extern const struct dentry_operations sysfs_dentry_ops;
extern const struct file_operations sysfs_dir_operations;
extern const struct inode_operations sysfs_dir_inode_operations;
-struct dentry *sysfs_get_dentry(struct sysfs_dirent *sd);
struct sysfs_dirent *sysfs_get_active(struct sysfs_dirent *sd);
void sysfs_put_active(struct sysfs_dirent *sd);
-void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt,
- struct sysfs_dirent *parent_sd);
-int __sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd);
-int sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd);
-void sysfs_remove_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd);
+void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt);
+void sysfs_warn_dup(struct sysfs_dirent *parent, const char *name);
+int __sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd,
+ struct sysfs_dirent *parent_sd);
+int sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd,
+ struct sysfs_dirent *parent_sd);
+void sysfs_remove(struct sysfs_dirent *sd);
+int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name,
+ const void *ns);
void sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt);
struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
- const void *ns,
- const unsigned char *name);
-struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
- const void *ns,
- const unsigned char *name);
+ const unsigned char *name,
+ const void *ns);
struct sysfs_dirent *sysfs_new_dirent(const char *name, umode_t mode, int type);
void release_sysfs_dirent(struct sysfs_dirent *sd);
int sysfs_create_subdir(struct kobject *kobj, const char *name,
struct sysfs_dirent **p_sd);
-void sysfs_remove_subdir(struct sysfs_dirent *sd);
int sysfs_rename(struct sysfs_dirent *sd, struct sysfs_dirent *new_parent_sd,
- const void *ns, const char *new_name);
+ const char *new_name, const void *new_ns);
static inline struct sysfs_dirent *__sysfs_get(struct sysfs_dirent *sd)
{
@@ -218,25 +220,21 @@ int sysfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat);
int sysfs_setxattr(struct dentry *dentry, const char *name, const void *value,
size_t size, int flags);
-int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const void *ns,
- const char *name);
int sysfs_inode_init(void);
/*
* file.c
*/
extern const struct file_operations sysfs_file_operations;
+extern const struct file_operations sysfs_bin_operations;
int sysfs_add_file(struct sysfs_dirent *dir_sd,
const struct attribute *attr, int type);
-int sysfs_add_file_mode(struct sysfs_dirent *dir_sd,
- const struct attribute *attr, int type, umode_t amode);
-/*
- * bin.c
- */
-extern const struct file_operations bin_fops;
-void unmap_bin_file(struct sysfs_dirent *attr_sd);
+int sysfs_add_file_mode_ns(struct sysfs_dirent *dir_sd,
+ const struct attribute *attr, int type,
+ umode_t amode, const void *ns);
+void sysfs_unmap_bin_file(struct sysfs_dirent *sd);
/*
* symlink.c
diff --git a/fs/sysv/file.c b/fs/sysv/file.c
index 9d4dc6831792..ff4b363ba5c9 100644
--- a/fs/sysv/file.c
+++ b/fs/sysv/file.c
@@ -22,9 +22,9 @@
const struct file_operations sysv_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
- .aio_read = generic_file_aio_read,
+ .read_iter = generic_file_read_iter,
.write = do_sync_write,
- .aio_write = generic_file_aio_write,
+ .write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.fsync = generic_file_fsync,
.splice_read = generic_file_splice_read,
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 6e025e02ffde..cc1febd8fadf 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -2563,9 +2563,9 @@ static int corrupt_data(const struct ubifs_info *c, const void *buf,
unsigned int from, to, ffs = chance(1, 2);
unsigned char *p = (void *)buf;
- from = prandom_u32() % (len + 1);
- /* Corruption may only span one max. write unit */
- to = min(len, ALIGN(from, c->max_write_size));
+ from = prandom_u32() % len;
+ /* Corruption span max to end of write unit */
+ to = min(len, ALIGN(from + 1, c->max_write_size));
ubifs_warn("filled bytes %u-%u with %s", from, to - 1,
ffs ? "0xFFs" : "random data");
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 6b4947f75af7..ea41649e4ca5 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -192,8 +192,7 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
struct ubifs_dent_node *dent;
struct ubifs_info *c = dir->i_sb->s_fs_info;
- dbg_gen("'%.*s' in dir ino %lu",
- dentry->d_name.len, dentry->d_name.name, dir->i_ino);
+ dbg_gen("'%pd' in dir ino %lu", dentry, dir->i_ino);
if (dentry->d_name.len > UBIFS_MAX_NLEN)
return ERR_PTR(-ENAMETOOLONG);
@@ -225,8 +224,8 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
* checking.
*/
err = PTR_ERR(inode);
- ubifs_err("dead directory entry '%.*s', error %d",
- dentry->d_name.len, dentry->d_name.name, err);
+ ubifs_err("dead directory entry '%pd', error %d",
+ dentry, err);
ubifs_ro_mode(c, err);
goto out;
}
@@ -260,8 +259,8 @@ static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
* parent directory inode.
*/
- dbg_gen("dent '%.*s', mode %#hx in dir ino %lu",
- dentry->d_name.len, dentry->d_name.name, mode, dir->i_ino);
+ dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
+ dentry, mode, dir->i_ino);
err = ubifs_budget_space(c, &req);
if (err)
@@ -509,8 +508,8 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
* changing the parent inode.
*/
- dbg_gen("dent '%.*s' to ino %lu (nlink %d) in dir ino %lu",
- dentry->d_name.len, dentry->d_name.name, inode->i_ino,
+ dbg_gen("dent '%pd' to ino %lu (nlink %d) in dir ino %lu",
+ dentry, inode->i_ino,
inode->i_nlink, dir->i_ino);
ubifs_assert(mutex_is_locked(&dir->i_mutex));
ubifs_assert(mutex_is_locked(&inode->i_mutex));
@@ -566,8 +565,8 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
* deletions.
*/
- dbg_gen("dent '%.*s' from ino %lu (nlink %d) in dir ino %lu",
- dentry->d_name.len, dentry->d_name.name, inode->i_ino,
+ dbg_gen("dent '%pd' from ino %lu (nlink %d) in dir ino %lu",
+ dentry, inode->i_ino,
inode->i_nlink, dir->i_ino);
ubifs_assert(mutex_is_locked(&dir->i_mutex));
ubifs_assert(mutex_is_locked(&inode->i_mutex));
@@ -656,8 +655,8 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
* because we have extra space reserved for deletions.
*/
- dbg_gen("directory '%.*s', ino %lu in dir ino %lu", dentry->d_name.len,
- dentry->d_name.name, inode->i_ino, dir->i_ino);
+ dbg_gen("directory '%pd', ino %lu in dir ino %lu", dentry,
+ inode->i_ino, dir->i_ino);
ubifs_assert(mutex_is_locked(&dir->i_mutex));
ubifs_assert(mutex_is_locked(&inode->i_mutex));
err = check_dir_empty(c, dentry->d_inode);
@@ -716,8 +715,8 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
* directory inode.
*/
- dbg_gen("dent '%.*s', mode %#hx in dir ino %lu",
- dentry->d_name.len, dentry->d_name.name, mode, dir->i_ino);
+ dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
+ dentry, mode, dir->i_ino);
err = ubifs_budget_space(c, &req);
if (err)
@@ -778,8 +777,7 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
* directory inode.
*/
- dbg_gen("dent '%.*s' in dir ino %lu",
- dentry->d_name.len, dentry->d_name.name, dir->i_ino);
+ dbg_gen("dent '%pd' in dir ino %lu", dentry, dir->i_ino);
if (!new_valid_dev(rdev))
return -EINVAL;
@@ -853,8 +851,8 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
* directory inode.
*/
- dbg_gen("dent '%.*s', target '%s' in dir ino %lu", dentry->d_name.len,
- dentry->d_name.name, symname, dir->i_ino);
+ dbg_gen("dent '%pd', target '%s' in dir ino %lu", dentry,
+ symname, dir->i_ino);
if (len > UBIFS_MAX_INO_DATA)
return -ENAMETOOLONG;
@@ -979,10 +977,9 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
* separately.
*/
- dbg_gen("dent '%.*s' ino %lu in dir ino %lu to dent '%.*s' in dir ino %lu",
- old_dentry->d_name.len, old_dentry->d_name.name,
- old_inode->i_ino, old_dir->i_ino, new_dentry->d_name.len,
- new_dentry->d_name.name, new_dir->i_ino);
+ dbg_gen("dent '%pd' ino %lu in dir ino %lu to dent '%pd' in dir ino %lu",
+ old_dentry, old_inode->i_ino, old_dir->i_ino,
+ new_dentry, new_dir->i_ino);
ubifs_assert(mutex_is_locked(&old_dir->i_mutex));
ubifs_assert(mutex_is_locked(&new_dir->i_mutex));
if (unlink)
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 123c79b7261e..22924e048ac0 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -44,7 +44,7 @@
* 'ubifs_writepage()' we are only guaranteed that the page is locked.
*
* Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the
- * read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
+ * read-ahead path does not lock it ("sys_read -> generic_file_read_iter ->
* ondemand_readahead -> readpage"). In case of readahead, @I_SYNC flag is not
* set as well. However, UBIFS disables readahead.
*/
@@ -1396,8 +1396,8 @@ static int update_mctime(struct ubifs_info *c, struct inode *inode)
return 0;
}
-static ssize_t ubifs_aio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t pos)
{
int err;
struct inode *inode = iocb->ki_filp->f_mapping->host;
@@ -1407,7 +1407,7 @@ static ssize_t ubifs_aio_write(struct kiocb *iocb, const struct iovec *iov,
if (err)
return err;
- return generic_file_aio_write(iocb, iov, nr_segs, pos);
+ return generic_file_write_iter(iocb, iter, pos);
}
static int ubifs_set_page_dirty(struct page *page)
@@ -1583,8 +1583,8 @@ const struct file_operations ubifs_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
- .aio_read = generic_file_aio_read,
- .aio_write = ubifs_aio_write,
+ .read_iter = generic_file_read_iter,
+ .write_iter = ubifs_write_iter,
.mmap = ubifs_file_mmap,
.fsync = ubifs_fsync,
.unlocked_ioctl = ubifs_ioctl,
diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c
index 76ca53cd3eee..9718da86ad01 100644
--- a/fs/ubifs/gc.c
+++ b/fs/ubifs/gc.c
@@ -668,8 +668,7 @@ int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
ubifs_assert(!wbuf->used);
for (i = 0; ; i++) {
- int space_before = c->leb_size - wbuf->offs - wbuf->used;
- int space_after;
+ int space_before, space_after;
cond_resched();
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index afaad07f3b29..0e045e75abd8 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -933,10 +933,8 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
int move = (old_dir != new_dir);
struct ubifs_inode *uninitialized_var(new_ui);
- dbg_jnl("dent '%.*s' in dir ino %lu to dent '%.*s' in dir ino %lu",
- old_dentry->d_name.len, old_dentry->d_name.name,
- old_dir->i_ino, new_dentry->d_name.len,
- new_dentry->d_name.name, new_dir->i_ino);
+ dbg_jnl("dent '%pd' in dir ino %lu to dent '%pd' in dir ino %lu",
+ old_dentry, old_dir->i_ino, new_dentry, new_dir->i_ino);
ubifs_assert(ubifs_inode(old_dir)->data_len == 0);
ubifs_assert(ubifs_inode(new_dir)->data_len == 0);
ubifs_assert(mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex));
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 3e4aa7281e04..f69daa514a57 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1630,8 +1630,10 @@ static int ubifs_remount_rw(struct ubifs_info *c)
}
c->write_reserve_buf = kmalloc(COMPRESSED_DATA_NODE_BUF_SZ, GFP_KERNEL);
- if (!c->write_reserve_buf)
+ if (!c->write_reserve_buf) {
+ err = -ENOMEM;
goto out;
+ }
err = ubifs_lpt_init(c, 0, 1);
if (err)
@@ -2064,8 +2066,10 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
}
sb->s_root = d_make_root(root);
- if (!sb->s_root)
+ if (!sb->s_root) {
+ err = -ENOMEM;
goto out_umount;
+ }
mutex_unlock(&c->umount_mutex);
return 0;
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 0f7139bdb2c2..5e0a63b1b0d5 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -303,8 +303,8 @@ int ubifs_setxattr(struct dentry *dentry, const char *name,
union ubifs_key key;
int err, type;
- dbg_gen("xattr '%s', host ino %lu ('%.*s'), size %zd", name,
- host->i_ino, dentry->d_name.len, dentry->d_name.name, size);
+ dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd", name,
+ host->i_ino, dentry, size);
ubifs_assert(mutex_is_locked(&host->i_mutex));
if (size > UBIFS_MAX_INO_DATA)
@@ -367,8 +367,8 @@ ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf,
union ubifs_key key;
int err;
- dbg_gen("xattr '%s', ino %lu ('%.*s'), buf size %zd", name,
- host->i_ino, dentry->d_name.len, dentry->d_name.name, size);
+ dbg_gen("xattr '%s', ino %lu ('%pd'), buf size %zd", name,
+ host->i_ino, dentry, size);
err = check_namespace(&nm);
if (err < 0)
@@ -426,8 +426,8 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size)
int err, len, written = 0;
struct qstr nm = { .name = NULL };
- dbg_gen("ino %lu ('%.*s'), buffer size %zd", host->i_ino,
- dentry->d_name.len, dentry->d_name.name, size);
+ dbg_gen("ino %lu ('%pd'), buffer size %zd", host->i_ino,
+ dentry, size);
len = host_ui->xattr_names + host_ui->xattr_cnt;
if (!buffer)
@@ -529,8 +529,8 @@ int ubifs_removexattr(struct dentry *dentry, const char *name)
union ubifs_key key;
int err;
- dbg_gen("xattr '%s', ino %lu ('%.*s')", name,
- host->i_ino, dentry->d_name.len, dentry->d_name.name);
+ dbg_gen("xattr '%s', ino %lu ('%pd')", name,
+ host->i_ino, dentry);
ubifs_assert(mutex_is_locked(&host->i_mutex));
err = check_namespace(&nm);
diff --git a/fs/udf/file.c b/fs/udf/file.c
index c02a27a19c6d..9985beecffca 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -119,8 +119,7 @@ static int udf_adinicb_write_end(struct file *file,
}
static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb,
- const struct iovec *iov,
- loff_t offset, unsigned long nr_segs)
+ struct iov_iter *iter, loff_t offset)
{
/* Fallback to buffered I/O. */
return 0;
@@ -134,8 +133,8 @@ const struct address_space_operations udf_adinicb_aops = {
.direct_IO = udf_adinicb_direct_IO,
};
-static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t ppos)
+static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t ppos)
{
ssize_t retval;
struct file *file = iocb->ki_filp;
@@ -169,7 +168,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
} else
up_write(&iinfo->i_data_sem);
- retval = generic_file_aio_write(iocb, iov, nr_segs, ppos);
+ retval = generic_file_write_iter(iocb, iter, ppos);
if (retval > 0)
mark_inode_dirty(inode);
@@ -243,12 +242,12 @@ static int udf_release_file(struct inode *inode, struct file *filp)
const struct file_operations udf_file_operations = {
.read = do_sync_read,
- .aio_read = generic_file_aio_read,
+ .read_iter = generic_file_read_iter,
.unlocked_ioctl = udf_ioctl,
.open = generic_file_open,
.mmap = generic_file_mmap,
.write = do_sync_write,
- .aio_write = udf_file_aio_write,
+ .write_iter = udf_file_write_iter,
.release = udf_release_file,
.fsync = generic_file_fsync,
.splice_read = generic_file_splice_read,
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 062b7925bca0..986e11ad176b 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -216,19 +216,17 @@ static int udf_write_begin(struct file *file, struct address_space *mapping,
return ret;
}
-static ssize_t udf_direct_IO(int rw, struct kiocb *iocb,
- const struct iovec *iov,
- loff_t offset, unsigned long nr_segs)
+static ssize_t udf_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
+ loff_t offset)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
ssize_t ret;
- ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
- udf_get_block);
+ ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, udf_get_block);
if (unlikely(ret < 0 && (rw & WRITE)))
- udf_write_failed(mapping, offset + iov_length(iov, nr_segs));
+ udf_write_failed(mapping, offset + iov_iter_count(iter));
return ret;
}
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 91219385691d..3306b9f69bed 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -76,6 +76,9 @@
#define UDF_DEFAULT_BLOCKSIZE 2048
+#define VSD_FIRST_SECTOR_OFFSET 32768
+#define VSD_MAX_SECTOR_OFFSET 0x800000
+
enum { UDF_MAX_LINKS = 0xffff };
/* These are the "meat" - everything else is stuffing */
@@ -685,7 +688,7 @@ out_unlock:
static loff_t udf_check_vsd(struct super_block *sb)
{
struct volStructDesc *vsd = NULL;
- loff_t sector = 32768;
+ loff_t sector = VSD_FIRST_SECTOR_OFFSET;
int sectorsize;
struct buffer_head *bh = NULL;
int nsr02 = 0;
@@ -703,8 +706,18 @@ static loff_t udf_check_vsd(struct super_block *sb)
udf_debug("Starting at sector %u (%ld byte sectors)\n",
(unsigned int)(sector >> sb->s_blocksize_bits),
sb->s_blocksize);
- /* Process the sequence (if applicable) */
- for (; !nsr02 && !nsr03; sector += sectorsize) {
+ /* Process the sequence (if applicable). The hard limit on the sector
+ * offset is arbitrary, hopefully large enough so that all valid UDF
+ * filesystems will be recognised. There is no mention of an upper
+ * bound to the size of the volume recognition area in the standard.
+ * The limit will prevent the code to read all the sectors of a
+ * specially crafted image (like a bluray disc full of CD001 sectors),
+ * potentially causing minutes or even hours of uninterruptible I/O
+ * activity. This actually happened with uninitialised SSD partitions
+ * (all 0xFF) before the check for the limit and all valid IDs were
+ * added */
+ for (; !nsr02 && !nsr03 && sector < VSD_MAX_SECTOR_OFFSET;
+ sector += sectorsize) {
/* Read a block */
bh = udf_tread(sb, sector >> sb->s_blocksize_bits);
if (!bh)
@@ -714,10 +727,7 @@ static loff_t udf_check_vsd(struct super_block *sb)
vsd = (struct volStructDesc *)(bh->b_data +
(sector & (sb->s_blocksize - 1)));
- if (vsd->stdIdent[0] == 0) {
- brelse(bh);
- break;
- } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001,
+ if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001,
VSD_STD_ID_LEN)) {
switch (vsd->structType) {
case 0:
@@ -753,6 +763,17 @@ static loff_t udf_check_vsd(struct super_block *sb)
else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR03,
VSD_STD_ID_LEN))
nsr03 = sector;
+ else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BOOT2,
+ VSD_STD_ID_LEN))
+ ; /* nothing */
+ else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CDW02,
+ VSD_STD_ID_LEN))
+ ; /* nothing */
+ else {
+ /* invalid id : end of volume recognition area */
+ brelse(bh);
+ break;
+ }
brelse(bh);
}
@@ -760,7 +781,8 @@ static loff_t udf_check_vsd(struct super_block *sb)
return nsr03;
else if (nsr02)
return nsr02;
- else if (sector - (sbi->s_session << sb->s_blocksize_bits) == 32768)
+ else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) ==
+ VSD_FIRST_SECTOR_OFFSET)
return -1;
else
return 0;
@@ -1270,6 +1292,9 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
* PHYSICAL partitions are already set up
*/
type1_idx = i;
+#ifdef UDFFS_DEBUG
+ map = NULL; /* supress 'maybe used uninitialized' warning */
+#endif
for (i = 0; i < sbi->s_partitions; i++) {
map = &sbi->s_partmaps[i];
@@ -1891,7 +1916,9 @@ static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
return 0;
}
if (nsr_off == -1)
- udf_debug("Failed to read byte 32768. Assuming open disc. Skipping validity check\n");
+ udf_debug("Failed to read sector at offset %d. "
+ "Assuming open disc. Skipping validity "
+ "check\n", VSD_FIRST_SECTOR_OFFSET);
if (!sbi->s_last_block)
sbi->s_last_block = udf_get_last_block(sb);
} else {
diff --git a/fs/ufs/file.c b/fs/ufs/file.c
index 33afa20d4509..e155e4c4af87 100644
--- a/fs/ufs/file.c
+++ b/fs/ufs/file.c
@@ -36,9 +36,9 @@
const struct file_operations ufs_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
- .aio_read = generic_file_aio_read,
+ .read_iter = generic_file_read_iter,
.write = do_sync_write,
- .aio_write = generic_file_aio_write,
+ .write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.open = generic_file_open,
.fsync = generic_file_fsync,
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 0719e4db93f2..c21f43506661 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -66,12 +66,14 @@ xfs-y += xfs_alloc.o \
xfs_bmap_btree.o \
xfs_btree.o \
xfs_da_btree.o \
+ xfs_da_format.o \
xfs_dir2.o \
xfs_dir2_block.o \
xfs_dir2_data.o \
xfs_dir2_leaf.o \
xfs_dir2_node.o \
xfs_dir2_sf.o \
+ xfs_dquot_buf.o \
xfs_ialloc.o \
xfs_ialloc_btree.o \
xfs_icreate_item.o \
@@ -103,7 +105,11 @@ xfs-$(CONFIG_XFS_QUOTA) += xfs_dquot.o \
xfs_qm_bhv.o \
xfs_qm.o \
xfs_quotaops.o
-xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o
+
+# xfs_rtbitmap is shared with libxfs
+xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o \
+ xfs_rtbitmap.o
+
xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o
xfs-$(CONFIG_PROC_FS) += xfs_stats.o
xfs-$(CONFIG_SYSCTL) += xfs_sysctl.o
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 0e2f37efedd0..370eb3e121d1 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -16,15 +16,15 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
+#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_acl.h"
-#include "xfs_attr.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_inode.h"
#include "xfs_ag.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_acl.h"
+#include "xfs_attr.h"
#include "xfs_trace.h"
#include <linux/slab.h>
#include <linux/xattr.h>
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index 1cb740afd674..3fc109819c34 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -128,8 +128,6 @@ typedef struct xfs_agf {
extern int xfs_read_agf(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_agnumber_t agno, int flags, struct xfs_buf **bpp);
-extern const struct xfs_buf_ops xfs_agf_buf_ops;
-
/*
* Size of the unlinked inode hash table in the agi.
*/
@@ -191,8 +189,6 @@ typedef struct xfs_agi {
extern int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_agnumber_t agno, struct xfs_buf **bpp);
-extern const struct xfs_buf_ops xfs_agi_buf_ops;
-
/*
* The third a.g. block contains the a.g. freelist, an array
* of block pointers to blocks owned by the allocation btree code.
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 5a1393f5e020..bcf16528bac5 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -17,25 +17,25 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_shared.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
+#include "xfs_alloc_btree.h"
#include "xfs_alloc.h"
#include "xfs_extent_busy.h"
#include "xfs_error.h"
#include "xfs_cksum.h"
#include "xfs_trace.h"
+#include "xfs_trans.h"
#include "xfs_buf_item.h"
+#include "xfs_log.h"
struct workqueue_struct *xfs_alloc_wq;
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h
index 99d0a6101558..feacb061bab7 100644
--- a/fs/xfs/xfs_alloc.h
+++ b/fs/xfs/xfs_alloc.h
@@ -231,7 +231,4 @@ xfs_alloc_get_rec(
xfs_extlen_t *len, /* output: length of extent */
int *stat); /* output: success/failure */
-extern const struct xfs_buf_ops xfs_agf_buf_ops;
-extern const struct xfs_buf_ops xfs_agfl_buf_ops;
-
#endif /* __XFS_ALLOC_H__ */
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index cafc90251d19..13085429e523 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -17,23 +17,21 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
#include "xfs_btree.h"
+#include "xfs_alloc_btree.h"
#include "xfs_alloc.h"
#include "xfs_extent_busy.h"
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_cksum.h"
+#include "xfs_trans.h"
STATIC struct xfs_btree_cur *
diff --git a/fs/xfs/xfs_alloc_btree.h b/fs/xfs/xfs_alloc_btree.h
index e3a3f7424192..45e189e7e81c 100644
--- a/fs/xfs/xfs_alloc_btree.h
+++ b/fs/xfs/xfs_alloc_btree.h
@@ -27,39 +27,6 @@ struct xfs_btree_cur;
struct xfs_mount;
/*
- * There are two on-disk btrees, one sorted by blockno and one sorted
- * by blockcount and blockno. All blocks look the same to make the code
- * simpler; if we have time later, we'll make the optimizations.
- */
-#define XFS_ABTB_MAGIC 0x41425442 /* 'ABTB' for bno tree */
-#define XFS_ABTB_CRC_MAGIC 0x41423342 /* 'AB3B' */
-#define XFS_ABTC_MAGIC 0x41425443 /* 'ABTC' for cnt tree */
-#define XFS_ABTC_CRC_MAGIC 0x41423343 /* 'AB3C' */
-
-/*
- * Data record/key structure
- */
-typedef struct xfs_alloc_rec {
- __be32 ar_startblock; /* starting block number */
- __be32 ar_blockcount; /* count of free blocks */
-} xfs_alloc_rec_t, xfs_alloc_key_t;
-
-typedef struct xfs_alloc_rec_incore {
- xfs_agblock_t ar_startblock; /* starting block number */
- xfs_extlen_t ar_blockcount; /* count of free blocks */
-} xfs_alloc_rec_incore_t;
-
-/* btree pointer type */
-typedef __be32 xfs_alloc_ptr_t;
-
-/*
- * Block numbers in the AG:
- * SB is sector 0, AGF is sector 1, AGI is sector 2, AGFL is sector 3.
- */
-#define XFS_BNO_BLOCK(mp) ((xfs_agblock_t)(XFS_AGFL_BLOCK(mp) + 1))
-#define XFS_CNT_BLOCK(mp) ((xfs_agblock_t)(XFS_BNO_BLOCK(mp) + 1))
-
-/*
* Btree block header size depends on a superblock flag.
*/
#define XFS_ALLOC_BLOCK_LEN(mp) \
@@ -95,6 +62,4 @@ extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *,
xfs_agnumber_t, xfs_btnum_t);
extern int xfs_allocbt_maxrecs(struct xfs_mount *, int, int);
-extern const struct xfs_buf_ops xfs_allocbt_buf_ops;
-
#endif /* __XFS_ALLOC_BTREE_H__ */
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index e51e581454e9..e52525b25618 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -16,14 +16,15 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
-#include "xfs_log.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_trans.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
@@ -31,6 +32,8 @@
#include "xfs_trace.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_dinode.h"
#include <linux/aio.h>
#include <linux/gfp.h>
#include <linux/mpage.h>
@@ -333,7 +336,7 @@ xfs_map_blocks(
if (type == XFS_IO_DELALLOC &&
(!nimaps || isnullstartblock(imap->br_startblock))) {
- error = xfs_iomap_write_allocate(ip, offset, count, imap);
+ error = xfs_iomap_write_allocate(ip, offset, imap);
if (!error)
trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
return -XFS_ERROR(error);
@@ -404,7 +407,7 @@ xfs_alloc_ioend_bio(
struct bio *bio = bio_alloc(GFP_NOIO, nvecs);
ASSERT(bio->bi_private == NULL);
- bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+ bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_bdev = bh->b_bdev;
return bio;
}
@@ -1413,9 +1416,8 @@ STATIC ssize_t
xfs_vm_direct_IO(
int rw,
struct kiocb *iocb,
- const struct iovec *iov,
- loff_t offset,
- unsigned long nr_segs)
+ struct iov_iter *iter,
+ loff_t offset)
{
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct block_device *bdev = xfs_find_bdev_for_inode(inode);
@@ -1423,7 +1425,7 @@ xfs_vm_direct_IO(
ssize_t ret;
if (rw & WRITE) {
- size_t size = iov_length(iov, nr_segs);
+ size_t size = iov_iter_count(iter);
/*
* We cannot preallocate a size update transaction here as we
@@ -1435,15 +1437,13 @@ xfs_vm_direct_IO(
if (offset + size > XFS_I(inode)->i_d.di_size)
ioend->io_isdirect = 1;
- ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
- offset, nr_segs,
+ ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset,
xfs_get_blocks_direct,
xfs_end_io_direct_write, NULL, 0);
if (ret != -EIOCBQUEUED && iocb->private)
goto out_destroy_ioend;
} else {
- ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
- offset, nr_segs,
+ ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset,
xfs_get_blocks_direct,
NULL, NULL, 0);
}
@@ -1569,8 +1569,7 @@ xfs_vm_write_begin(
ASSERT(len <= PAGE_CACHE_SIZE);
- page = grab_cache_page_write_begin(mapping, index,
- flags | AOP_FLAG_NOFS);
+ page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
return -ENOMEM;
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index ddcf2267ffa6..b86127072ac3 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -17,23 +17,24 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
+#include "xfs_da_format.h"
#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_alloc.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
#include "xfs_attr_remote.h"
@@ -41,6 +42,7 @@
#include "xfs_quota.h"
#include "xfs_trans_space.h"
#include "xfs_trace.h"
+#include "xfs_dinode.h"
/*
* xfs_attr.c
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index bb24b07cbedb..09480c57f069 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -18,22 +18,20 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
+#include "xfs_da_format.h"
#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
+#include "xfs_inode.h"
#include "xfs_alloc.h"
-#include "xfs_btree.h"
#include "xfs_attr_remote.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_attr.h"
@@ -41,7 +39,8 @@
#include "xfs_error.h"
#include "xfs_quota.h"
#include "xfs_trace.h"
-#include "xfs_trans_priv.h"
+#include "xfs_dinode.h"
+#include "xfs_dir2.h"
/*
* Look at all the extents for this logical region,
@@ -232,13 +231,13 @@ xfs_attr3_node_inactive(
}
node = bp->b_addr;
- xfs_da3_node_hdr_from_disk(&ichdr, node);
+ dp->d_ops->node_hdr_from_disk(&ichdr, node);
parent_blkno = bp->b_bn;
if (!ichdr.count) {
xfs_trans_brelse(*trans, bp);
return 0;
}
- btree = xfs_da3_node_tree_p(node);
+ btree = dp->d_ops->node_tree_p(node);
child_fsb = be32_to_cpu(btree[0].before);
xfs_trans_brelse(*trans, bp); /* no locks for later trans */
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 86db20a9cc02..7b126f46a2f9 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -18,32 +18,31 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
+#include "xfs_da_format.h"
#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_attr_remote.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
+#include "xfs_bmap_btree.h"
#include "xfs_bmap.h"
+#include "xfs_attr_sf.h"
+#include "xfs_attr_remote.h"
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_buf_item.h"
#include "xfs_cksum.h"
+#include "xfs_dinode.h"
+#include "xfs_dir2.h"
/*
@@ -918,8 +917,8 @@ xfs_attr3_leaf_to_node(
if (error)
goto out;
node = bp1->b_addr;
- xfs_da3_node_hdr_from_disk(&icnodehdr, node);
- btree = xfs_da3_node_tree_p(node);
+ dp->d_ops->node_hdr_from_disk(&icnodehdr, node);
+ btree = dp->d_ops->node_tree_p(node);
leaf = bp2->b_addr;
xfs_attr3_leaf_hdr_from_disk(&icleafhdr, leaf);
@@ -929,7 +928,7 @@ xfs_attr3_leaf_to_node(
btree[0].hashval = entries[icleafhdr.count - 1].hashval;
btree[0].before = cpu_to_be32(blkno);
icnodehdr.count = 1;
- xfs_da3_node_hdr_to_disk(node, &icnodehdr);
+ dp->d_ops->node_hdr_to_disk(node, &icnodehdr);
xfs_trans_log_buf(args->trans, bp1, 0, XFS_LBSIZE(mp) - 1);
error = 0;
out:
diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h
index c1022138c7e6..3ec5ec0b8678 100644
--- a/fs/xfs/xfs_attr_leaf.h
+++ b/fs/xfs/xfs_attr_leaf.h
@@ -19,16 +19,6 @@
#ifndef __XFS_ATTR_LEAF_H__
#define __XFS_ATTR_LEAF_H__
-/*
- * Attribute storage layout, internal structure, access macros, etc.
- *
- * Attribute lists are structured around Btrees where all the data
- * elements are in the leaf nodes. Attribute names are hashed into an int,
- * then that int is used as the index into the Btree. Since the hashval
- * of an attribute name may not be unique, we may have duplicate keys. The
- * internal links in the Btree are logical block offsets into the file.
- */
-
struct attrlist;
struct attrlist_cursor_kern;
struct xfs_attr_list_context;
@@ -38,226 +28,6 @@ struct xfs_da_state_blk;
struct xfs_inode;
struct xfs_trans;
-/*========================================================================
- * Attribute structure when equal to XFS_LBSIZE(mp) bytes.
- *========================================================================*/
-
-/*
- * This is the structure of the leaf nodes in the Btree.
- *
- * Struct leaf_entry's are packed from the top. Name/values grow from the
- * bottom but are not packed. The freemap contains run-length-encoded entries
- * for the free bytes after the leaf_entry's, but only the N largest such,
- * smaller runs are dropped. When the freemap doesn't show enough space
- * for an allocation, we compact the name/value area and try again. If we
- * still don't have enough space, then we have to split the block. The
- * name/value structs (both local and remote versions) must be 32bit aligned.
- *
- * Since we have duplicate hash keys, for each key that matches, compare
- * the actual name string. The root and intermediate node search always
- * takes the first-in-the-block key match found, so we should only have
- * to work "forw"ard. If none matches, continue with the "forw"ard leaf
- * nodes until the hash key changes or the attribute name is found.
- *
- * We store the fact that an attribute is a ROOT/USER/SECURE attribute in
- * the leaf_entry. The namespaces are independent only because we also look
- * at the namespace bit when we are looking for a matching attribute name.
- *
- * We also store an "incomplete" bit in the leaf_entry. It shows that an
- * attribute is in the middle of being created and should not be shown to
- * the user if we crash during the time that the bit is set. We clear the
- * bit when we have finished setting up the attribute. We do this because
- * we cannot create some large attributes inside a single transaction, and we
- * need some indication that we weren't finished if we crash in the middle.
- */
-#define XFS_ATTR_LEAF_MAPSIZE 3 /* how many freespace slots */
-
-typedef struct xfs_attr_leaf_map { /* RLE map of free bytes */
- __be16 base; /* base of free region */
- __be16 size; /* length of free region */
-} xfs_attr_leaf_map_t;
-
-typedef struct xfs_attr_leaf_hdr { /* constant-structure header block */
- xfs_da_blkinfo_t info; /* block type, links, etc. */
- __be16 count; /* count of active leaf_entry's */
- __be16 usedbytes; /* num bytes of names/values stored */
- __be16 firstused; /* first used byte in name area */
- __u8 holes; /* != 0 if blk needs compaction */
- __u8 pad1;
- xfs_attr_leaf_map_t freemap[XFS_ATTR_LEAF_MAPSIZE];
- /* N largest free regions */
-} xfs_attr_leaf_hdr_t;
-
-typedef struct xfs_attr_leaf_entry { /* sorted on key, not name */
- __be32 hashval; /* hash value of name */
- __be16 nameidx; /* index into buffer of name/value */
- __u8 flags; /* LOCAL/ROOT/SECURE/INCOMPLETE flag */
- __u8 pad2; /* unused pad byte */
-} xfs_attr_leaf_entry_t;
-
-typedef struct xfs_attr_leaf_name_local {
- __be16 valuelen; /* number of bytes in value */
- __u8 namelen; /* length of name bytes */
- __u8 nameval[1]; /* name/value bytes */
-} xfs_attr_leaf_name_local_t;
-
-typedef struct xfs_attr_leaf_name_remote {
- __be32 valueblk; /* block number of value bytes */
- __be32 valuelen; /* number of bytes in value */
- __u8 namelen; /* length of name bytes */
- __u8 name[1]; /* name bytes */
-} xfs_attr_leaf_name_remote_t;
-
-typedef struct xfs_attr_leafblock {
- xfs_attr_leaf_hdr_t hdr; /* constant-structure header block */
- xfs_attr_leaf_entry_t entries[1]; /* sorted on key, not name */
- xfs_attr_leaf_name_local_t namelist; /* grows from bottom of buf */
- xfs_attr_leaf_name_remote_t valuelist; /* grows from bottom of buf */
-} xfs_attr_leafblock_t;
-
-/*
- * CRC enabled leaf structures. Called "version 3" structures to match the
- * version number of the directory and dablk structures for this feature, and
- * attr2 is already taken by the variable inode attribute fork size feature.
- */
-struct xfs_attr3_leaf_hdr {
- struct xfs_da3_blkinfo info;
- __be16 count;
- __be16 usedbytes;
- __be16 firstused;
- __u8 holes;
- __u8 pad1;
- struct xfs_attr_leaf_map freemap[XFS_ATTR_LEAF_MAPSIZE];
- __be32 pad2; /* 64 bit alignment */
-};
-
-#define XFS_ATTR3_LEAF_CRC_OFF (offsetof(struct xfs_attr3_leaf_hdr, info.crc))
-
-struct xfs_attr3_leafblock {
- struct xfs_attr3_leaf_hdr hdr;
- struct xfs_attr_leaf_entry entries[1];
-
- /*
- * The rest of the block contains the following structures after the
- * leaf entries, growing from the bottom up. The variables are never
- * referenced, the locations accessed purely from helper functions.
- *
- * struct xfs_attr_leaf_name_local
- * struct xfs_attr_leaf_name_remote
- */
-};
-
-/*
- * incore, neutral version of the attribute leaf header
- */
-struct xfs_attr3_icleaf_hdr {
- __uint32_t forw;
- __uint32_t back;
- __uint16_t magic;
- __uint16_t count;
- __uint16_t usedbytes;
- __uint16_t firstused;
- __u8 holes;
- struct {
- __uint16_t base;
- __uint16_t size;
- } freemap[XFS_ATTR_LEAF_MAPSIZE];
-};
-
-/*
- * Flags used in the leaf_entry[i].flags field.
- * NOTE: the INCOMPLETE bit must not collide with the flags bits specified
- * on the system call, they are "or"ed together for various operations.
- */
-#define XFS_ATTR_LOCAL_BIT 0 /* attr is stored locally */
-#define XFS_ATTR_ROOT_BIT 1 /* limit access to trusted attrs */
-#define XFS_ATTR_SECURE_BIT 2 /* limit access to secure attrs */
-#define XFS_ATTR_INCOMPLETE_BIT 7 /* attr in middle of create/delete */
-#define XFS_ATTR_LOCAL (1 << XFS_ATTR_LOCAL_BIT)
-#define XFS_ATTR_ROOT (1 << XFS_ATTR_ROOT_BIT)
-#define XFS_ATTR_SECURE (1 << XFS_ATTR_SECURE_BIT)
-#define XFS_ATTR_INCOMPLETE (1 << XFS_ATTR_INCOMPLETE_BIT)
-
-/*
- * Conversion macros for converting namespace bits from argument flags
- * to ondisk flags.
- */
-#define XFS_ATTR_NSP_ARGS_MASK (ATTR_ROOT | ATTR_SECURE)
-#define XFS_ATTR_NSP_ONDISK_MASK (XFS_ATTR_ROOT | XFS_ATTR_SECURE)
-#define XFS_ATTR_NSP_ONDISK(flags) ((flags) & XFS_ATTR_NSP_ONDISK_MASK)
-#define XFS_ATTR_NSP_ARGS(flags) ((flags) & XFS_ATTR_NSP_ARGS_MASK)
-#define XFS_ATTR_NSP_ARGS_TO_ONDISK(x) (((x) & ATTR_ROOT ? XFS_ATTR_ROOT : 0) |\
- ((x) & ATTR_SECURE ? XFS_ATTR_SECURE : 0))
-#define XFS_ATTR_NSP_ONDISK_TO_ARGS(x) (((x) & XFS_ATTR_ROOT ? ATTR_ROOT : 0) |\
- ((x) & XFS_ATTR_SECURE ? ATTR_SECURE : 0))
-
-/*
- * Alignment for namelist and valuelist entries (since they are mixed
- * there can be only one alignment value)
- */
-#define XFS_ATTR_LEAF_NAME_ALIGN ((uint)sizeof(xfs_dablk_t))
-
-static inline int
-xfs_attr3_leaf_hdr_size(struct xfs_attr_leafblock *leafp)
-{
- if (leafp->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC))
- return sizeof(struct xfs_attr3_leaf_hdr);
- return sizeof(struct xfs_attr_leaf_hdr);
-}
-
-static inline struct xfs_attr_leaf_entry *
-xfs_attr3_leaf_entryp(xfs_attr_leafblock_t *leafp)
-{
- if (leafp->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC))
- return &((struct xfs_attr3_leafblock *)leafp)->entries[0];
- return &leafp->entries[0];
-}
-
-/*
- * Cast typed pointers for "local" and "remote" name/value structs.
- */
-static inline char *
-xfs_attr3_leaf_name(xfs_attr_leafblock_t *leafp, int idx)
-{
- struct xfs_attr_leaf_entry *entries = xfs_attr3_leaf_entryp(leafp);
-
- return &((char *)leafp)[be16_to_cpu(entries[idx].nameidx)];
-}
-
-static inline xfs_attr_leaf_name_remote_t *
-xfs_attr3_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx)
-{
- return (xfs_attr_leaf_name_remote_t *)xfs_attr3_leaf_name(leafp, idx);
-}
-
-static inline xfs_attr_leaf_name_local_t *
-xfs_attr3_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx)
-{
- return (xfs_attr_leaf_name_local_t *)xfs_attr3_leaf_name(leafp, idx);
-}
-
-/*
- * Calculate total bytes used (including trailing pad for alignment) for
- * a "local" name/value structure, a "remote" name/value structure, and
- * a pointer which might be either.
- */
-static inline int xfs_attr_leaf_entsize_remote(int nlen)
-{
- return ((uint)sizeof(xfs_attr_leaf_name_remote_t) - 1 + (nlen) + \
- XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
-}
-
-static inline int xfs_attr_leaf_entsize_local(int nlen, int vlen)
-{
- return ((uint)sizeof(xfs_attr_leaf_name_local_t) - 1 + (nlen) + (vlen) +
- XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
-}
-
-static inline int xfs_attr_leaf_entsize_local_max(int bsize)
-{
- return (((bsize) >> 1) + ((bsize) >> 2));
-}
-
/*
* Used to keep a list of "remote value" extents when unlinking an inode.
*/
@@ -336,6 +106,4 @@ void xfs_attr3_leaf_hdr_from_disk(struct xfs_attr3_icleaf_hdr *to,
void xfs_attr3_leaf_hdr_to_disk(struct xfs_attr_leafblock *to,
struct xfs_attr3_icleaf_hdr *from);
-extern const struct xfs_buf_ops xfs_attr3_leaf_buf_ops;
-
#endif /* __XFS_ATTR_LEAF_H__ */
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index cbc80d485177..2d174b128153 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -18,31 +18,29 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
+#include "xfs_da_format.h"
#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_attr_remote.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_attr.h"
+#include "xfs_attr_sf.h"
+#include "xfs_attr_remote.h"
#include "xfs_attr_leaf.h"
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_buf_item.h"
#include "xfs_cksum.h"
+#include "xfs_dinode.h"
+#include "xfs_dir2.h"
STATIC int
xfs_attr_shortform_compare(const void *a, const void *b)
@@ -229,6 +227,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
struct xfs_da_node_entry *btree;
int error, i;
struct xfs_buf *bp;
+ struct xfs_inode *dp = context->dp;
trace_xfs_attr_node_list(context);
@@ -242,7 +241,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
*/
bp = NULL;
if (cursor->blkno > 0) {
- error = xfs_da3_node_read(NULL, context->dp, cursor->blkno, -1,
+ error = xfs_da3_node_read(NULL, dp, cursor->blkno, -1,
&bp, XFS_ATTR_FORK);
if ((error != 0) && (error != EFSCORRUPTED))
return(error);
@@ -292,7 +291,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
for (;;) {
__uint16_t magic;
- error = xfs_da3_node_read(NULL, context->dp,
+ error = xfs_da3_node_read(NULL, dp,
cursor->blkno, -1, &bp,
XFS_ATTR_FORK);
if (error)
@@ -312,8 +311,8 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
return XFS_ERROR(EFSCORRUPTED);
}
- xfs_da3_node_hdr_from_disk(&nodehdr, node);
- btree = xfs_da3_node_tree_p(node);
+ dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+ btree = dp->d_ops->node_tree_p(node);
for (i = 0; i < nodehdr.count; btree++, i++) {
if (cursor->hashval
<= be32_to_cpu(btree->hashval)) {
@@ -349,8 +348,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
break;
cursor->blkno = leafhdr.forw;
xfs_trans_brelse(NULL, bp);
- error = xfs_attr3_leaf_read(NULL, context->dp, cursor->blkno, -1,
- &bp);
+ error = xfs_attr3_leaf_read(NULL, dp, cursor->blkno, -1, &bp);
if (error)
return error;
}
diff --git a/fs/xfs/xfs_attr_remote.c b/fs/xfs/xfs_attr_remote.c
index 712a502de619..739e0a52deda 100644
--- a/fs/xfs/xfs_attr_remote.c
+++ b/fs/xfs/xfs_attr_remote.c
@@ -18,20 +18,19 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_error.h"
+#include "xfs_da_format.h"
#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_alloc.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
@@ -42,6 +41,7 @@
#include "xfs_trace.h"
#include "xfs_cksum.h"
#include "xfs_buf_item.h"
+#include "xfs_error.h"
#define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */
diff --git a/fs/xfs/xfs_attr_remote.h b/fs/xfs/xfs_attr_remote.h
index 92a8fd7977cc..5a9acfa156d7 100644
--- a/fs/xfs/xfs_attr_remote.h
+++ b/fs/xfs/xfs_attr_remote.h
@@ -18,35 +18,6 @@
#ifndef __XFS_ATTR_REMOTE_H__
#define __XFS_ATTR_REMOTE_H__
-#define XFS_ATTR3_RMT_MAGIC 0x5841524d /* XARM */
-
-/*
- * There is one of these headers per filesystem block in a remote attribute.
- * This is done to ensure there is a 1:1 mapping between the attribute value
- * length and the number of blocks needed to store the attribute. This makes the
- * verification of a buffer a little more complex, but greatly simplifies the
- * allocation, reading and writing of these attributes as we don't have to guess
- * the number of blocks needed to store the attribute data.
- */
-struct xfs_attr3_rmt_hdr {
- __be32 rm_magic;
- __be32 rm_offset;
- __be32 rm_bytes;
- __be32 rm_crc;
- uuid_t rm_uuid;
- __be64 rm_owner;
- __be64 rm_blkno;
- __be64 rm_lsn;
-};
-
-#define XFS_ATTR3_RMT_CRC_OFF offsetof(struct xfs_attr3_rmt_hdr, rm_crc)
-
-#define XFS_ATTR3_RMT_BUF_SPACE(mp, bufsize) \
- ((bufsize) - (xfs_sb_version_hascrc(&(mp)->m_sb) ? \
- sizeof(struct xfs_attr3_rmt_hdr) : 0))
-
-extern const struct xfs_buf_ops xfs_attr3_rmt_buf_ops;
-
int xfs_attr3_rmt_blocks(struct xfs_mount *mp, int attrlen);
int xfs_attr_rmtval_get(struct xfs_da_args *args);
diff --git a/fs/xfs/xfs_bit.c b/fs/xfs/xfs_bit.c
index 48228848f5ae..0e8885a59646 100644
--- a/fs/xfs/xfs_bit.c
+++ b/fs/xfs/xfs_bit.c
@@ -16,10 +16,8 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
+#include "xfs_log_format.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_buf_item.h"
/*
* XFS bit manipulation routines, used in non-realtime code.
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index f47e65c30be6..1c02da8bb7df 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -17,39 +17,37 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
#include "xfs_inum.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
+#include "xfs_da_format.h"
#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
#include "xfs_dir2.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
-#include "xfs_mount.h"
-#include "xfs_itable.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_extfree_item.h"
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
-#include "xfs_attr_leaf.h"
#include "xfs_quota.h"
#include "xfs_trans_space.h"
#include "xfs_buf_item.h"
-#include "xfs_filestream.h"
#include "xfs_trace.h"
#include "xfs_symlink.h"
+#include "xfs_attr_leaf.h"
+#include "xfs_dinode.h"
+#include "xfs_filestream.h"
kmem_zone_t *xfs_bmap_free_item_zone;
@@ -1482,7 +1480,7 @@ xfs_bmap_search_extents(
xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
"Access to block zero in inode %llu "
"start_block: %llx start_off: %llx "
- "blkcnt: %llx extent-state: %x lastx: %x\n",
+ "blkcnt: %llx extent-state: %x lastx: %x",
(unsigned long long)ip->i_ino,
(unsigned long long)gotp->br_startblock,
(unsigned long long)gotp->br_startoff,
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index bb8de8e399c4..706bc3f777cb 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -17,27 +17,26 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_alloc.h"
#include "xfs_btree.h"
-#include "xfs_itable.h"
+#include "xfs_bmap_btree.h"
#include "xfs_bmap.h"
#include "xfs_error.h"
#include "xfs_quota.h"
#include "xfs_trace.h"
#include "xfs_cksum.h"
+#include "xfs_dinode.h"
/*
* Determine the extent state.
diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h
index e367461a638e..6e42e1e50b89 100644
--- a/fs/xfs/xfs_bmap_btree.h
+++ b/fs/xfs/xfs_bmap_btree.h
@@ -18,9 +18,6 @@
#ifndef __XFS_BMAP_BTREE_H__
#define __XFS_BMAP_BTREE_H__
-#define XFS_BMAP_MAGIC 0x424d4150 /* 'BMAP' */
-#define XFS_BMAP_CRC_MAGIC 0x424d4133 /* 'BMA3' */
-
struct xfs_btree_cur;
struct xfs_btree_block;
struct xfs_mount;
@@ -28,85 +25,6 @@ struct xfs_inode;
struct xfs_trans;
/*
- * Bmap root header, on-disk form only.
- */
-typedef struct xfs_bmdr_block {
- __be16 bb_level; /* 0 is a leaf */
- __be16 bb_numrecs; /* current # of data records */
-} xfs_bmdr_block_t;
-
-/*
- * Bmap btree record and extent descriptor.
- * l0:63 is an extent flag (value 1 indicates non-normal).
- * l0:9-62 are startoff.
- * l0:0-8 and l1:21-63 are startblock.
- * l1:0-20 are blockcount.
- */
-#define BMBT_EXNTFLAG_BITLEN 1
-#define BMBT_STARTOFF_BITLEN 54
-#define BMBT_STARTBLOCK_BITLEN 52
-#define BMBT_BLOCKCOUNT_BITLEN 21
-
-typedef struct xfs_bmbt_rec {
- __be64 l0, l1;
-} xfs_bmbt_rec_t;
-
-typedef __uint64_t xfs_bmbt_rec_base_t; /* use this for casts */
-typedef xfs_bmbt_rec_t xfs_bmdr_rec_t;
-
-typedef struct xfs_bmbt_rec_host {
- __uint64_t l0, l1;
-} xfs_bmbt_rec_host_t;
-
-/*
- * Values and macros for delayed-allocation startblock fields.
- */
-#define STARTBLOCKVALBITS 17
-#define STARTBLOCKMASKBITS (15 + XFS_BIG_BLKNOS * 20)
-#define DSTARTBLOCKMASKBITS (15 + 20)
-#define STARTBLOCKMASK \
- (((((xfs_fsblock_t)1) << STARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS)
-#define DSTARTBLOCKMASK \
- (((((xfs_dfsbno_t)1) << DSTARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS)
-
-static inline int isnullstartblock(xfs_fsblock_t x)
-{
- return ((x) & STARTBLOCKMASK) == STARTBLOCKMASK;
-}
-
-static inline int isnulldstartblock(xfs_dfsbno_t x)
-{
- return ((x) & DSTARTBLOCKMASK) == DSTARTBLOCKMASK;
-}
-
-static inline xfs_fsblock_t nullstartblock(int k)
-{
- ASSERT(k < (1 << STARTBLOCKVALBITS));
- return STARTBLOCKMASK | (k);
-}
-
-static inline xfs_filblks_t startblockval(xfs_fsblock_t x)
-{
- return (xfs_filblks_t)((x) & ~STARTBLOCKMASK);
-}
-
-/*
- * Possible extent formats.
- */
-typedef enum {
- XFS_EXTFMT_NOSTATE = 0,
- XFS_EXTFMT_HASSTATE
-} xfs_exntfmt_t;
-
-/*
- * Possible extent states.
- */
-typedef enum {
- XFS_EXT_NORM, XFS_EXT_UNWRITTEN,
- XFS_EXT_DMAPI_OFFLINE, XFS_EXT_INVALID
-} xfs_exntst_t;
-
-/*
* Extent state and extent format macros.
*/
#define XFS_EXTFMT_INODE(x) \
@@ -115,27 +33,6 @@ typedef enum {
#define ISUNWRITTEN(x) ((x)->br_state == XFS_EXT_UNWRITTEN)
/*
- * Incore version of above.
- */
-typedef struct xfs_bmbt_irec
-{
- xfs_fileoff_t br_startoff; /* starting file offset */
- xfs_fsblock_t br_startblock; /* starting block number */
- xfs_filblks_t br_blockcount; /* number of blocks */
- xfs_exntst_t br_state; /* extent state */
-} xfs_bmbt_irec_t;
-
-/*
- * Key structure for non-leaf levels of the tree.
- */
-typedef struct xfs_bmbt_key {
- __be64 br_startoff; /* starting file offset */
-} xfs_bmbt_key_t, xfs_bmdr_key_t;
-
-/* btree pointer type */
-typedef __be64 xfs_bmbt_ptr_t, xfs_bmdr_ptr_t;
-
-/*
* Btree block header size depends on a superblock flag.
*/
#define XFS_BMBT_BLOCK_LEN(mp) \
@@ -243,6 +140,4 @@ extern int xfs_bmbt_change_owner(struct xfs_trans *tp, struct xfs_inode *ip,
extern struct xfs_btree_cur *xfs_bmbt_init_cursor(struct xfs_mount *,
struct xfs_trans *, struct xfs_inode *, int);
-extern const struct xfs_buf_ops xfs_bmbt_buf_ops;
-
#endif /* __XFS_BMAP_BTREE_H__ */
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 97f952caea74..5887e41c0323 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -18,31 +18,31 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
+#include "xfs_trans.h"
#include "xfs_extfree_item.h"
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_quota.h"
#include "xfs_trans_space.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
+#include "xfs_log.h"
+#include "xfs_dinode.h"
/* Kernel only BMAP related definitions and functions */
@@ -965,32 +965,12 @@ xfs_free_eofblocks(
return error;
}
-/*
- * xfs_alloc_file_space()
- * This routine allocates disk space for the given file.
- *
- * If alloc_type == 0, this request is for an ALLOCSP type
- * request which will change the file size. In this case, no
- * DMAPI event will be generated by the call. A TRUNCATE event
- * will be generated later by xfs_setattr.
- *
- * If alloc_type != 0, this request is for a RESVSP type
- * request, and a DMAPI DM_EVENT_WRITE will be generated if the
- * lower block boundary byte address is less than the file's
- * length.
- *
- * RETURNS:
- * 0 on success
- * errno on error
- *
- */
-STATIC int
+int
xfs_alloc_file_space(
- xfs_inode_t *ip,
+ struct xfs_inode *ip,
xfs_off_t offset,
xfs_off_t len,
- int alloc_type,
- int attr_flags)
+ int alloc_type)
{
xfs_mount_t *mp = ip->i_mount;
xfs_off_t count;
@@ -1232,24 +1212,11 @@ xfs_zero_remaining_bytes(
return error;
}
-/*
- * xfs_free_file_space()
- * This routine frees disk space for the given file.
- *
- * This routine is only called by xfs_change_file_space
- * for an UNRESVSP type call.
- *
- * RETURNS:
- * 0 on success
- * errno on error
- *
- */
-STATIC int
+int
xfs_free_file_space(
- xfs_inode_t *ip,
+ struct xfs_inode *ip,
xfs_off_t offset,
- xfs_off_t len,
- int attr_flags)
+ xfs_off_t len)
{
int committed;
int done;
@@ -1267,7 +1234,6 @@ xfs_free_file_space(
int rt;
xfs_fileoff_t startoffset_fsb;
xfs_trans_t *tp;
- int need_iolock = 1;
mp = ip->i_mount;
@@ -1284,20 +1250,15 @@ xfs_free_file_space(
startoffset_fsb = XFS_B_TO_FSB(mp, offset);
endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
- if (attr_flags & XFS_ATTR_NOLOCK)
- need_iolock = 0;
- if (need_iolock) {
- xfs_ilock(ip, XFS_IOLOCK_EXCL);
- /* wait for the completion of any pending DIOs */
- inode_dio_wait(VFS_I(ip));
- }
+ /* wait for the completion of any pending DIOs */
+ inode_dio_wait(VFS_I(ip));
rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
ioffset = offset & ~(rounding - 1);
error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
ioffset, -1);
if (error)
- goto out_unlock_iolock;
+ goto out;
truncate_pagecache_range(VFS_I(ip), ioffset, -1);
/*
@@ -1311,7 +1272,7 @@ xfs_free_file_space(
error = xfs_bmapi_read(ip, startoffset_fsb, 1,
&imap, &nimap, 0);
if (error)
- goto out_unlock_iolock;
+ goto out;
ASSERT(nimap == 0 || nimap == 1);
if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
xfs_daddr_t block;
@@ -1326,7 +1287,7 @@ xfs_free_file_space(
error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
&imap, &nimap, 0);
if (error)
- goto out_unlock_iolock;
+ goto out;
ASSERT(nimap == 0 || nimap == 1);
if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
@@ -1412,27 +1373,23 @@ xfs_free_file_space(
xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
- out_unlock_iolock:
- if (need_iolock)
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+ out:
return error;
error0:
xfs_bmap_cancel(&free_list);
error1:
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
- xfs_iunlock(ip, need_iolock ? (XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL) :
- XFS_ILOCK_EXCL);
- return error;
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ goto out;
}
-STATIC int
+int
xfs_zero_file_space(
struct xfs_inode *ip,
xfs_off_t offset,
- xfs_off_t len,
- int attr_flags)
+ xfs_off_t len)
{
struct xfs_mount *mp = ip->i_mount;
uint granularity;
@@ -1453,9 +1410,6 @@ xfs_zero_file_space(
ASSERT(start_boundary >= offset);
ASSERT(end_boundary <= offset + len);
- if (!(attr_flags & XFS_ATTR_NOLOCK))
- xfs_ilock(ip, XFS_IOLOCK_EXCL);
-
if (start_boundary < end_boundary - 1) {
/* punch out the page cache over the conversion range */
truncate_pagecache_range(VFS_I(ip), start_boundary,
@@ -1463,16 +1417,16 @@ xfs_zero_file_space(
/* convert the blocks */
error = xfs_alloc_file_space(ip, start_boundary,
end_boundary - start_boundary - 1,
- XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT,
- attr_flags);
+ XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT);
if (error)
- goto out_unlock;
+ goto out;
/* We've handled the interior of the range, now for the edges */
- if (start_boundary != offset)
+ if (start_boundary != offset) {
error = xfs_iozero(ip, offset, start_boundary - offset);
- if (error)
- goto out_unlock;
+ if (error)
+ goto out;
+ }
if (end_boundary != offset + len)
error = xfs_iozero(ip, end_boundary,
@@ -1486,197 +1440,12 @@ xfs_zero_file_space(
error = xfs_iozero(ip, offset, len);
}
-out_unlock:
- if (!(attr_flags & XFS_ATTR_NOLOCK))
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+out:
return error;
}
/*
- * xfs_change_file_space()
- * This routine allocates or frees disk space for the given file.
- * The user specified parameters are checked for alignment and size
- * limitations.
- *
- * RETURNS:
- * 0 on success
- * errno on error
- *
- */
-int
-xfs_change_file_space(
- xfs_inode_t *ip,
- int cmd,
- xfs_flock64_t *bf,
- xfs_off_t offset,
- int attr_flags)
-{
- xfs_mount_t *mp = ip->i_mount;
- int clrprealloc;
- int error;
- xfs_fsize_t fsize;
- int setprealloc;
- xfs_off_t startoffset;
- xfs_trans_t *tp;
- struct iattr iattr;
-
- if (!S_ISREG(ip->i_d.di_mode))
- return XFS_ERROR(EINVAL);
-
- switch (bf->l_whence) {
- case 0: /*SEEK_SET*/
- break;
- case 1: /*SEEK_CUR*/
- bf->l_start += offset;
- break;
- case 2: /*SEEK_END*/
- bf->l_start += XFS_ISIZE(ip);
- break;
- default:
- return XFS_ERROR(EINVAL);
- }
-
- /*
- * length of <= 0 for resv/unresv/zero is invalid. length for
- * alloc/free is ignored completely and we have no idea what userspace
- * might have set it to, so set it to zero to allow range
- * checks to pass.
- */
- switch (cmd) {
- case XFS_IOC_ZERO_RANGE:
- case XFS_IOC_RESVSP:
- case XFS_IOC_RESVSP64:
- case XFS_IOC_UNRESVSP:
- case XFS_IOC_UNRESVSP64:
- if (bf->l_len <= 0)
- return XFS_ERROR(EINVAL);
- break;
- default:
- bf->l_len = 0;
- break;
- }
-
- if (bf->l_start < 0 ||
- bf->l_start > mp->m_super->s_maxbytes ||
- bf->l_start + bf->l_len < 0 ||
- bf->l_start + bf->l_len >= mp->m_super->s_maxbytes)
- return XFS_ERROR(EINVAL);
-
- bf->l_whence = 0;
-
- startoffset = bf->l_start;
- fsize = XFS_ISIZE(ip);
-
- setprealloc = clrprealloc = 0;
- switch (cmd) {
- case XFS_IOC_ZERO_RANGE:
- error = xfs_zero_file_space(ip, startoffset, bf->l_len,
- attr_flags);
- if (error)
- return error;
- setprealloc = 1;
- break;
-
- case XFS_IOC_RESVSP:
- case XFS_IOC_RESVSP64:
- error = xfs_alloc_file_space(ip, startoffset, bf->l_len,
- XFS_BMAPI_PREALLOC, attr_flags);
- if (error)
- return error;
- setprealloc = 1;
- break;
-
- case XFS_IOC_UNRESVSP:
- case XFS_IOC_UNRESVSP64:
- if ((error = xfs_free_file_space(ip, startoffset, bf->l_len,
- attr_flags)))
- return error;
- break;
-
- case XFS_IOC_ALLOCSP:
- case XFS_IOC_ALLOCSP64:
- case XFS_IOC_FREESP:
- case XFS_IOC_FREESP64:
- /*
- * These operations actually do IO when extending the file, but
- * the allocation is done seperately to the zeroing that is
- * done. This set of operations need to be serialised against
- * other IO operations, such as truncate and buffered IO. We
- * need to take the IOLOCK here to serialise the allocation and
- * zeroing IO to prevent other IOLOCK holders (e.g. getbmap,
- * truncate, direct IO) from racing against the transient
- * allocated but not written state we can have here.
- */
- xfs_ilock(ip, XFS_IOLOCK_EXCL);
- if (startoffset > fsize) {
- error = xfs_alloc_file_space(ip, fsize,
- startoffset - fsize, 0,
- attr_flags | XFS_ATTR_NOLOCK);
- if (error) {
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- break;
- }
- }
-
- iattr.ia_valid = ATTR_SIZE;
- iattr.ia_size = startoffset;
-
- error = xfs_setattr_size(ip, &iattr,
- attr_flags | XFS_ATTR_NOLOCK);
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-
- if (error)
- return error;
-
- clrprealloc = 1;
- break;
-
- default:
- ASSERT(0);
- return XFS_ERROR(EINVAL);
- }
-
- /*
- * update the inode timestamp, mode, and prealloc flag bits
- */
- tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_writeid, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- return error;
- }
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-
- if ((attr_flags & XFS_ATTR_DMI) == 0) {
- ip->i_d.di_mode &= ~S_ISUID;
-
- /*
- * Note that we don't have to worry about mandatory
- * file locking being disabled here because we only
- * clear the S_ISGID bit if the Group execute bit is
- * on, but if it was on then mandatory locking wouldn't
- * have been enabled.
- */
- if (ip->i_d.di_mode & S_IXGRP)
- ip->i_d.di_mode &= ~S_ISGID;
-
- xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
- }
- if (setprealloc)
- ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
- else if (clrprealloc)
- ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
-
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- if (attr_flags & XFS_ATTR_SYNC)
- xfs_trans_set_sync(tp);
- return xfs_trans_commit(tp, 0);
-}
-
-/*
* We need to check that the format of the data fork in the temporary inode is
* valid for the target inode before doing the swap. This is not a problem with
* attr1 because of the fixed fork offset, but attr2 has a dynamically sized
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 061260946f7a..900747b25772 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -93,9 +93,12 @@ int xfs_bmap_last_extent(struct xfs_trans *tp, struct xfs_inode *ip,
int *is_empty);
/* preallocation and hole punch interface */
-int xfs_change_file_space(struct xfs_inode *ip, int cmd,
- xfs_flock64_t *bf, xfs_off_t offset,
- int attr_flags);
+int xfs_alloc_file_space(struct xfs_inode *ip, xfs_off_t offset,
+ xfs_off_t len, int alloc_type);
+int xfs_free_file_space(struct xfs_inode *ip, xfs_off_t offset,
+ xfs_off_t len);
+int xfs_zero_file_space(struct xfs_inode *ip, xfs_off_t offset,
+ xfs_off_t len);
/* EOF block manipulation functions */
bool xfs_can_free_eofblocks(struct xfs_inode *ip, bool force);
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 5690e102243d..9adaae4f3e2f 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -17,18 +17,16 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_buf_item.h"
#include "xfs_btree.h"
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h
index 06729b67ad58..91e34f21bace 100644
--- a/fs/xfs/xfs_btree.h
+++ b/fs/xfs/xfs_btree.h
@@ -27,73 +27,6 @@ struct xfs_trans;
extern kmem_zone_t *xfs_btree_cur_zone;
/*
- * This nonsense is to make -wlint happy.
- */
-#define XFS_LOOKUP_EQ ((xfs_lookup_t)XFS_LOOKUP_EQi)
-#define XFS_LOOKUP_LE ((xfs_lookup_t)XFS_LOOKUP_LEi)
-#define XFS_LOOKUP_GE ((xfs_lookup_t)XFS_LOOKUP_GEi)
-
-#define XFS_BTNUM_BNO ((xfs_btnum_t)XFS_BTNUM_BNOi)
-#define XFS_BTNUM_CNT ((xfs_btnum_t)XFS_BTNUM_CNTi)
-#define XFS_BTNUM_BMAP ((xfs_btnum_t)XFS_BTNUM_BMAPi)
-#define XFS_BTNUM_INO ((xfs_btnum_t)XFS_BTNUM_INOi)
-
-/*
- * Generic btree header.
- *
- * This is a combination of the actual format used on disk for short and long
- * format btrees. The first three fields are shared by both format, but the
- * pointers are different and should be used with care.
- *
- * To get the size of the actual short or long form headers please use the size
- * macros below. Never use sizeof(xfs_btree_block).
- *
- * The blkno, crc, lsn, owner and uuid fields are only available in filesystems
- * with the crc feature bit, and all accesses to them must be conditional on
- * that flag.
- */
-struct xfs_btree_block {
- __be32 bb_magic; /* magic number for block type */
- __be16 bb_level; /* 0 is a leaf */
- __be16 bb_numrecs; /* current # of data records */
- union {
- struct {
- __be32 bb_leftsib;
- __be32 bb_rightsib;
-
- __be64 bb_blkno;
- __be64 bb_lsn;
- uuid_t bb_uuid;
- __be32 bb_owner;
- __le32 bb_crc;
- } s; /* short form pointers */
- struct {
- __be64 bb_leftsib;
- __be64 bb_rightsib;
-
- __be64 bb_blkno;
- __be64 bb_lsn;
- uuid_t bb_uuid;
- __be64 bb_owner;
- __le32 bb_crc;
- __be32 bb_pad; /* padding for alignment */
- } l; /* long form pointers */
- } bb_u; /* rest */
-};
-
-#define XFS_BTREE_SBLOCK_LEN 16 /* size of a short form block */
-#define XFS_BTREE_LBLOCK_LEN 24 /* size of a long form block */
-
-/* sizes of CRC enabled btree blocks */
-#define XFS_BTREE_SBLOCK_CRC_LEN (XFS_BTREE_SBLOCK_LEN + 40)
-#define XFS_BTREE_LBLOCK_CRC_LEN (XFS_BTREE_LBLOCK_LEN + 48)
-
-#define XFS_BTREE_SBLOCK_CRC_OFF \
- offsetof(struct xfs_btree_block, bb_u.s.bb_crc)
-#define XFS_BTREE_LBLOCK_CRC_OFF \
- offsetof(struct xfs_btree_block, bb_u.l.bb_crc)
-
-/*
* Generic key, ptr and record wrapper structures.
*
* These are disk format structures, and are converted where necessary
@@ -119,6 +52,18 @@ union xfs_btree_rec {
};
/*
+ * This nonsense is to make -wlint happy.
+ */
+#define XFS_LOOKUP_EQ ((xfs_lookup_t)XFS_LOOKUP_EQi)
+#define XFS_LOOKUP_LE ((xfs_lookup_t)XFS_LOOKUP_LEi)
+#define XFS_LOOKUP_GE ((xfs_lookup_t)XFS_LOOKUP_GEi)
+
+#define XFS_BTNUM_BNO ((xfs_btnum_t)XFS_BTNUM_BNOi)
+#define XFS_BTNUM_CNT ((xfs_btnum_t)XFS_BTNUM_CNTi)
+#define XFS_BTNUM_BMAP ((xfs_btnum_t)XFS_BTNUM_BMAPi)
+#define XFS_BTNUM_INO ((xfs_btnum_t)XFS_BTNUM_INOi)
+
+/*
* For logging record fields.
*/
#define XFS_BB_MAGIC (1 << 0)
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 263470075ea2..5f3ea443ebbe 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -34,12 +34,13 @@
#include <linux/backing-dev.h>
#include <linux/freezer.h>
-#include "xfs_sb.h"
+#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_log.h"
+#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_trace.h"
+#include "xfs_log.h"
static kmem_zone_t *xfs_buf_zone;
@@ -590,7 +591,7 @@ found:
error = _xfs_buf_map_pages(bp, flags);
if (unlikely(error)) {
xfs_warn(target->bt_mount,
- "%s: failed to map pages\n", __func__);
+ "%s: failed to map pagesn", __func__);
xfs_buf_relse(bp);
return NULL;
}
@@ -809,7 +810,7 @@ xfs_buf_get_uncached(
error = _xfs_buf_map_pages(bp, 0);
if (unlikely(error)) {
xfs_warn(target->bt_mount,
- "%s: failed to map pages\n", __func__);
+ "%s: failed to map pages", __func__);
goto fail_free_mem;
}
@@ -1254,7 +1255,7 @@ next_chunk:
bio = bio_alloc(GFP_NOIO, nr_pages);
bio->bi_bdev = bp->b_target->bt_bdev;
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
bio->bi_end_io = xfs_buf_bio_end_io;
bio->bi_private = bp;
@@ -1276,7 +1277,7 @@ next_chunk:
total_nr_pages--;
}
- if (likely(bio->bi_size)) {
+ if (likely(bio->bi_iter.bi_size)) {
if (xfs_buf_is_vmapped(bp)) {
flush_kernel_vmap_range(bp->b_addr,
xfs_buf_vmap_len(bp));
@@ -1618,7 +1619,7 @@ xfs_setsize_buftarg_flags(
bdevname(btp->bt_bdev, name);
xfs_warn(btp->bt_mount,
- "Cannot set_blocksize to %u on device %s\n",
+ "Cannot set_blocksize to %u on device %s",
sectorsize, name);
return EINVAL;
}
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index f1d85cfc0a54..a64f67ba25d3 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -17,17 +17,18 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
+#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_trans_priv.h"
#include "xfs_error.h"
#include "xfs_trace.h"
+#include "xfs_log.h"
kmem_zone_t *xfs_buf_item_zone;
@@ -808,7 +809,7 @@ xfs_buf_item_init(
* Mark bytes first through last inclusive as dirty in the buf
* item's bitmap.
*/
-void
+static void
xfs_buf_item_log_segment(
struct xfs_buf_log_item *bip,
uint first,
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index db6371087fe8..3f3455a41510 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -71,10 +71,6 @@ void xfs_buf_attach_iodone(struct xfs_buf *,
void xfs_buf_iodone_callbacks(struct xfs_buf *);
void xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *);
-void xfs_trans_buf_set_type(struct xfs_trans *, struct xfs_buf *,
- enum xfs_blft);
-void xfs_trans_buf_copy_type(struct xfs_buf *dst_bp, struct xfs_buf *src_bp);
-
extern kmem_zone_t *xfs_buf_item_zone;
#endif /* __XFS_BUF_ITEM_H__ */
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 20bf8e8002d6..796272a2e129 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -18,20 +18,20 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
+#include "xfs_da_format.h"
#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dir2_format.h"
#include "xfs_dir2.h"
#include "xfs_dir2_priv.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_alloc.h"
#include "xfs_bmap.h"
@@ -129,56 +129,6 @@ xfs_da_state_free(xfs_da_state_t *state)
kmem_zone_free(xfs_da_state_zone, state);
}
-void
-xfs_da3_node_hdr_from_disk(
- struct xfs_da3_icnode_hdr *to,
- struct xfs_da_intnode *from)
-{
- ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
- from->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
-
- if (from->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
- struct xfs_da3_node_hdr *hdr3 = (struct xfs_da3_node_hdr *)from;
-
- to->forw = be32_to_cpu(hdr3->info.hdr.forw);
- to->back = be32_to_cpu(hdr3->info.hdr.back);
- to->magic = be16_to_cpu(hdr3->info.hdr.magic);
- to->count = be16_to_cpu(hdr3->__count);
- to->level = be16_to_cpu(hdr3->__level);
- return;
- }
- to->forw = be32_to_cpu(from->hdr.info.forw);
- to->back = be32_to_cpu(from->hdr.info.back);
- to->magic = be16_to_cpu(from->hdr.info.magic);
- to->count = be16_to_cpu(from->hdr.__count);
- to->level = be16_to_cpu(from->hdr.__level);
-}
-
-void
-xfs_da3_node_hdr_to_disk(
- struct xfs_da_intnode *to,
- struct xfs_da3_icnode_hdr *from)
-{
- ASSERT(from->magic == XFS_DA_NODE_MAGIC ||
- from->magic == XFS_DA3_NODE_MAGIC);
-
- if (from->magic == XFS_DA3_NODE_MAGIC) {
- struct xfs_da3_node_hdr *hdr3 = (struct xfs_da3_node_hdr *)to;
-
- hdr3->info.hdr.forw = cpu_to_be32(from->forw);
- hdr3->info.hdr.back = cpu_to_be32(from->back);
- hdr3->info.hdr.magic = cpu_to_be16(from->magic);
- hdr3->__count = cpu_to_be16(from->count);
- hdr3->__level = cpu_to_be16(from->level);
- return;
- }
- to->hdr.info.forw = cpu_to_be32(from->forw);
- to->hdr.info.back = cpu_to_be32(from->back);
- to->hdr.info.magic = cpu_to_be16(from->magic);
- to->hdr.__count = cpu_to_be16(from->count);
- to->hdr.__level = cpu_to_be16(from->level);
-}
-
static bool
xfs_da3_node_verify(
struct xfs_buf *bp)
@@ -186,8 +136,11 @@ xfs_da3_node_verify(
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_da_intnode *hdr = bp->b_addr;
struct xfs_da3_icnode_hdr ichdr;
+ const struct xfs_dir_ops *ops;
- xfs_da3_node_hdr_from_disk(&ichdr, hdr);
+ ops = xfs_dir_get_ops(mp, NULL);
+
+ ops->node_hdr_from_disk(&ichdr, hdr);
if (xfs_sb_version_hascrc(&mp->m_sb)) {
struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
@@ -354,11 +307,12 @@ xfs_da3_node_create(
struct xfs_da3_icnode_hdr ichdr = {0};
struct xfs_buf *bp;
int error;
+ struct xfs_inode *dp = args->dp;
trace_xfs_da_node_create(args);
ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
- error = xfs_da_get_buf(tp, args->dp, blkno, -1, &bp, whichfork);
+ error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork);
if (error)
return(error);
bp->b_ops = &xfs_da3_node_buf_ops;
@@ -377,9 +331,9 @@ xfs_da3_node_create(
}
ichdr.level = level;
- xfs_da3_node_hdr_to_disk(node, &ichdr);
+ dp->d_ops->node_hdr_to_disk(node, &ichdr);
xfs_trans_log_buf(tp, bp,
- XFS_DA_LOGRANGE(node, &node->hdr, xfs_da3_node_hdr_size(node)));
+ XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
*bpp = bp;
return(0);
@@ -589,8 +543,8 @@ xfs_da3_root_split(
oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
struct xfs_da3_icnode_hdr nodehdr;
- xfs_da3_node_hdr_from_disk(&nodehdr, oldroot);
- btree = xfs_da3_node_tree_p(oldroot);
+ dp->d_ops->node_hdr_from_disk(&nodehdr, oldroot);
+ btree = dp->d_ops->node_tree_p(oldroot);
size = (int)((char *)&btree[nodehdr.count] - (char *)oldroot);
level = nodehdr.level;
@@ -604,8 +558,8 @@ xfs_da3_root_split(
struct xfs_dir2_leaf_entry *ents;
leaf = (xfs_dir2_leaf_t *)oldroot;
- xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
- ents = xfs_dir3_leaf_ents_p(leaf);
+ dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ ents = dp->d_ops->leaf_ents_p(leaf);
ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
@@ -649,14 +603,14 @@ xfs_da3_root_split(
return error;
node = bp->b_addr;
- xfs_da3_node_hdr_from_disk(&nodehdr, node);
- btree = xfs_da3_node_tree_p(node);
+ dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+ btree = dp->d_ops->node_tree_p(node);
btree[0].hashval = cpu_to_be32(blk1->hashval);
btree[0].before = cpu_to_be32(blk1->blkno);
btree[1].hashval = cpu_to_be32(blk2->hashval);
btree[1].before = cpu_to_be32(blk2->blkno);
nodehdr.count = 2;
- xfs_da3_node_hdr_to_disk(node, &nodehdr);
+ dp->d_ops->node_hdr_to_disk(node, &nodehdr);
#ifdef DEBUG
if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
@@ -693,11 +647,12 @@ xfs_da3_node_split(
int newcount;
int error;
int useextra;
+ struct xfs_inode *dp = state->args->dp;
trace_xfs_da_node_split(state->args);
node = oldblk->bp->b_addr;
- xfs_da3_node_hdr_from_disk(&nodehdr, node);
+ dp->d_ops->node_hdr_from_disk(&nodehdr, node);
/*
* With V2 dirs the extra block is data or freespace.
@@ -744,7 +699,7 @@ xfs_da3_node_split(
* If we had double-split op below us, then add the extra block too.
*/
node = oldblk->bp->b_addr;
- xfs_da3_node_hdr_from_disk(&nodehdr, node);
+ dp->d_ops->node_hdr_from_disk(&nodehdr, node);
if (oldblk->index <= nodehdr.count) {
oldblk->index++;
xfs_da3_node_add(state, oldblk, addblk);
@@ -793,15 +748,16 @@ xfs_da3_node_rebalance(
int count;
int tmp;
int swap = 0;
+ struct xfs_inode *dp = state->args->dp;
trace_xfs_da_node_rebalance(state->args);
node1 = blk1->bp->b_addr;
node2 = blk2->bp->b_addr;
- xfs_da3_node_hdr_from_disk(&nodehdr1, node1);
- xfs_da3_node_hdr_from_disk(&nodehdr2, node2);
- btree1 = xfs_da3_node_tree_p(node1);
- btree2 = xfs_da3_node_tree_p(node2);
+ dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
+ dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
+ btree1 = dp->d_ops->node_tree_p(node1);
+ btree2 = dp->d_ops->node_tree_p(node2);
/*
* Figure out how many entries need to move, and in which direction.
@@ -814,10 +770,10 @@ xfs_da3_node_rebalance(
tmpnode = node1;
node1 = node2;
node2 = tmpnode;
- xfs_da3_node_hdr_from_disk(&nodehdr1, node1);
- xfs_da3_node_hdr_from_disk(&nodehdr2, node2);
- btree1 = xfs_da3_node_tree_p(node1);
- btree2 = xfs_da3_node_tree_p(node2);
+ dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
+ dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
+ btree1 = dp->d_ops->node_tree_p(node1);
+ btree2 = dp->d_ops->node_tree_p(node2);
swap = 1;
}
@@ -879,15 +835,14 @@ xfs_da3_node_rebalance(
/*
* Log header of node 1 and all current bits of node 2.
*/
- xfs_da3_node_hdr_to_disk(node1, &nodehdr1);
+ dp->d_ops->node_hdr_to_disk(node1, &nodehdr1);
xfs_trans_log_buf(tp, blk1->bp,
- XFS_DA_LOGRANGE(node1, &node1->hdr,
- xfs_da3_node_hdr_size(node1)));
+ XFS_DA_LOGRANGE(node1, &node1->hdr, dp->d_ops->node_hdr_size));
- xfs_da3_node_hdr_to_disk(node2, &nodehdr2);
+ dp->d_ops->node_hdr_to_disk(node2, &nodehdr2);
xfs_trans_log_buf(tp, blk2->bp,
XFS_DA_LOGRANGE(node2, &node2->hdr,
- xfs_da3_node_hdr_size(node2) +
+ dp->d_ops->node_hdr_size +
(sizeof(btree2[0]) * nodehdr2.count)));
/*
@@ -897,10 +852,10 @@ xfs_da3_node_rebalance(
if (swap) {
node1 = blk1->bp->b_addr;
node2 = blk2->bp->b_addr;
- xfs_da3_node_hdr_from_disk(&nodehdr1, node1);
- xfs_da3_node_hdr_from_disk(&nodehdr2, node2);
- btree1 = xfs_da3_node_tree_p(node1);
- btree2 = xfs_da3_node_tree_p(node2);
+ dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
+ dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
+ btree1 = dp->d_ops->node_tree_p(node1);
+ btree2 = dp->d_ops->node_tree_p(node2);
}
blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
@@ -927,12 +882,13 @@ xfs_da3_node_add(
struct xfs_da3_icnode_hdr nodehdr;
struct xfs_da_node_entry *btree;
int tmp;
+ struct xfs_inode *dp = state->args->dp;
trace_xfs_da_node_add(state->args);
node = oldblk->bp->b_addr;
- xfs_da3_node_hdr_from_disk(&nodehdr, node);
- btree = xfs_da3_node_tree_p(node);
+ dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+ btree = dp->d_ops->node_tree_p(node);
ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
ASSERT(newblk->blkno != 0);
@@ -955,9 +911,9 @@ xfs_da3_node_add(
tmp + sizeof(*btree)));
nodehdr.count += 1;
- xfs_da3_node_hdr_to_disk(node, &nodehdr);
+ dp->d_ops->node_hdr_to_disk(node, &nodehdr);
xfs_trans_log_buf(state->args->trans, oldblk->bp,
- XFS_DA_LOGRANGE(node, &node->hdr, xfs_da3_node_hdr_size(node)));
+ XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
/*
* Copy the last hash value from the oldblk to propagate upwards.
@@ -1094,6 +1050,7 @@ xfs_da3_root_join(
struct xfs_da3_icnode_hdr oldroothdr;
struct xfs_da_node_entry *btree;
int error;
+ struct xfs_inode *dp = state->args->dp;
trace_xfs_da_root_join(state->args);
@@ -1101,7 +1058,7 @@ xfs_da3_root_join(
args = state->args;
oldroot = root_blk->bp->b_addr;
- xfs_da3_node_hdr_from_disk(&oldroothdr, oldroot);
+ dp->d_ops->node_hdr_from_disk(&oldroothdr, oldroot);
ASSERT(oldroothdr.forw == 0);
ASSERT(oldroothdr.back == 0);
@@ -1115,10 +1072,10 @@ xfs_da3_root_join(
* Read in the (only) child block, then copy those bytes into
* the root block's buffer and free the original child block.
*/
- btree = xfs_da3_node_tree_p(oldroot);
+ btree = dp->d_ops->node_tree_p(oldroot);
child = be32_to_cpu(btree[0].before);
ASSERT(child != 0);
- error = xfs_da3_node_read(args->trans, args->dp, child, -1, &bp,
+ error = xfs_da3_node_read(args->trans, dp, child, -1, &bp,
args->whichfork);
if (error)
return error;
@@ -1168,6 +1125,7 @@ xfs_da3_node_toosmall(
int error;
int retval;
int i;
+ struct xfs_inode *dp = state->args->dp;
trace_xfs_da_node_toosmall(state->args);
@@ -1179,7 +1137,7 @@ xfs_da3_node_toosmall(
blk = &state->path.blk[ state->path.active-1 ];
info = blk->bp->b_addr;
node = (xfs_da_intnode_t *)info;
- xfs_da3_node_hdr_from_disk(&nodehdr, node);
+ dp->d_ops->node_hdr_from_disk(&nodehdr, node);
if (nodehdr.count > (state->node_ents >> 1)) {
*action = 0; /* blk over 50%, don't try to join */
return(0); /* blk over 50%, don't try to join */
@@ -1231,13 +1189,13 @@ xfs_da3_node_toosmall(
blkno = nodehdr.back;
if (blkno == 0)
continue;
- error = xfs_da3_node_read(state->args->trans, state->args->dp,
+ error = xfs_da3_node_read(state->args->trans, dp,
blkno, -1, &bp, state->args->whichfork);
if (error)
return(error);
node = bp->b_addr;
- xfs_da3_node_hdr_from_disk(&thdr, node);
+ dp->d_ops->node_hdr_from_disk(&thdr, node);
xfs_trans_brelse(state->args->trans, bp);
if (count - thdr.count >= 0)
@@ -1275,6 +1233,7 @@ xfs_da3_node_toosmall(
*/
STATIC uint
xfs_da3_node_lasthash(
+ struct xfs_inode *dp,
struct xfs_buf *bp,
int *count)
{
@@ -1283,12 +1242,12 @@ xfs_da3_node_lasthash(
struct xfs_da3_icnode_hdr nodehdr;
node = bp->b_addr;
- xfs_da3_node_hdr_from_disk(&nodehdr, node);
+ dp->d_ops->node_hdr_from_disk(&nodehdr, node);
if (count)
*count = nodehdr.count;
if (!nodehdr.count)
return 0;
- btree = xfs_da3_node_tree_p(node);
+ btree = dp->d_ops->node_tree_p(node);
return be32_to_cpu(btree[nodehdr.count - 1].hashval);
}
@@ -1307,6 +1266,7 @@ xfs_da3_fixhashpath(
xfs_dahash_t lasthash=0;
int level;
int count;
+ struct xfs_inode *dp = state->args->dp;
trace_xfs_da_fixhashpath(state->args);
@@ -1319,12 +1279,12 @@ xfs_da3_fixhashpath(
return;
break;
case XFS_DIR2_LEAFN_MAGIC:
- lasthash = xfs_dir2_leafn_lasthash(blk->bp, &count);
+ lasthash = xfs_dir2_leafn_lasthash(dp, blk->bp, &count);
if (count == 0)
return;
break;
case XFS_DA_NODE_MAGIC:
- lasthash = xfs_da3_node_lasthash(blk->bp, &count);
+ lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count);
if (count == 0)
return;
break;
@@ -1333,8 +1293,8 @@ xfs_da3_fixhashpath(
struct xfs_da3_icnode_hdr nodehdr;
node = blk->bp->b_addr;
- xfs_da3_node_hdr_from_disk(&nodehdr, node);
- btree = xfs_da3_node_tree_p(node);
+ dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+ btree = dp->d_ops->node_tree_p(node);
if (be32_to_cpu(btree->hashval) == lasthash)
break;
blk->hashval = lasthash;
@@ -1360,11 +1320,12 @@ xfs_da3_node_remove(
struct xfs_da_node_entry *btree;
int index;
int tmp;
+ struct xfs_inode *dp = state->args->dp;
trace_xfs_da_node_remove(state->args);
node = drop_blk->bp->b_addr;
- xfs_da3_node_hdr_from_disk(&nodehdr, node);
+ dp->d_ops->node_hdr_from_disk(&nodehdr, node);
ASSERT(drop_blk->index < nodehdr.count);
ASSERT(drop_blk->index >= 0);
@@ -1372,7 +1333,7 @@ xfs_da3_node_remove(
* Copy over the offending entry, or just zero it out.
*/
index = drop_blk->index;
- btree = xfs_da3_node_tree_p(node);
+ btree = dp->d_ops->node_tree_p(node);
if (index < nodehdr.count - 1) {
tmp = nodehdr.count - index - 1;
tmp *= (uint)sizeof(xfs_da_node_entry_t);
@@ -1385,9 +1346,9 @@ xfs_da3_node_remove(
xfs_trans_log_buf(state->args->trans, drop_blk->bp,
XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
nodehdr.count -= 1;
- xfs_da3_node_hdr_to_disk(node, &nodehdr);
+ dp->d_ops->node_hdr_to_disk(node, &nodehdr);
xfs_trans_log_buf(state->args->trans, drop_blk->bp,
- XFS_DA_LOGRANGE(node, &node->hdr, xfs_da3_node_hdr_size(node)));
+ XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
/*
* Copy the last hash value from the block to propagate upwards.
@@ -1414,15 +1375,16 @@ xfs_da3_node_unbalance(
struct xfs_trans *tp;
int sindex;
int tmp;
+ struct xfs_inode *dp = state->args->dp;
trace_xfs_da_node_unbalance(state->args);
drop_node = drop_blk->bp->b_addr;
save_node = save_blk->bp->b_addr;
- xfs_da3_node_hdr_from_disk(&drop_hdr, drop_node);
- xfs_da3_node_hdr_from_disk(&save_hdr, save_node);
- drop_btree = xfs_da3_node_tree_p(drop_node);
- save_btree = xfs_da3_node_tree_p(save_node);
+ dp->d_ops->node_hdr_from_disk(&drop_hdr, drop_node);
+ dp->d_ops->node_hdr_from_disk(&save_hdr, save_node);
+ drop_btree = dp->d_ops->node_tree_p(drop_node);
+ save_btree = dp->d_ops->node_tree_p(save_node);
tp = state->args->trans;
/*
@@ -1456,10 +1418,10 @@ xfs_da3_node_unbalance(
memcpy(&save_btree[sindex], &drop_btree[0], tmp);
save_hdr.count += drop_hdr.count;
- xfs_da3_node_hdr_to_disk(save_node, &save_hdr);
+ dp->d_ops->node_hdr_to_disk(save_node, &save_hdr);
xfs_trans_log_buf(tp, save_blk->bp,
XFS_DA_LOGRANGE(save_node, &save_node->hdr,
- xfs_da3_node_hdr_size(save_node)));
+ dp->d_ops->node_hdr_size));
/*
* Save the last hashval in the remaining block for upward propagation.
@@ -1501,6 +1463,7 @@ xfs_da3_node_lookup_int(
int max;
int error;
int retval;
+ struct xfs_inode *dp = state->args->dp;
args = state->args;
@@ -1536,7 +1499,8 @@ xfs_da3_node_lookup_int(
if (blk->magic == XFS_DIR2_LEAFN_MAGIC ||
blk->magic == XFS_DIR3_LEAFN_MAGIC) {
blk->magic = XFS_DIR2_LEAFN_MAGIC;
- blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL);
+ blk->hashval = xfs_dir2_leafn_lasthash(args->dp,
+ blk->bp, NULL);
break;
}
@@ -1547,8 +1511,8 @@ xfs_da3_node_lookup_int(
* Search an intermediate node for a match.
*/
node = blk->bp->b_addr;
- xfs_da3_node_hdr_from_disk(&nodehdr, node);
- btree = xfs_da3_node_tree_p(node);
+ dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+ btree = dp->d_ops->node_tree_p(node);
max = nodehdr.count;
blk->hashval = be32_to_cpu(btree[max - 1].hashval);
@@ -1643,6 +1607,7 @@ xfs_da3_node_lookup_int(
*/
STATIC int
xfs_da3_node_order(
+ struct xfs_inode *dp,
struct xfs_buf *node1_bp,
struct xfs_buf *node2_bp)
{
@@ -1655,10 +1620,10 @@ xfs_da3_node_order(
node1 = node1_bp->b_addr;
node2 = node2_bp->b_addr;
- xfs_da3_node_hdr_from_disk(&node1hdr, node1);
- xfs_da3_node_hdr_from_disk(&node2hdr, node2);
- btree1 = xfs_da3_node_tree_p(node1);
- btree2 = xfs_da3_node_tree_p(node2);
+ dp->d_ops->node_hdr_from_disk(&node1hdr, node1);
+ dp->d_ops->node_hdr_from_disk(&node2hdr, node2);
+ btree1 = dp->d_ops->node_tree_p(node1);
+ btree2 = dp->d_ops->node_tree_p(node2);
if (node1hdr.count > 0 && node2hdr.count > 0 &&
((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
@@ -1685,6 +1650,7 @@ xfs_da3_blk_link(
struct xfs_buf *bp;
int before = 0;
int error;
+ struct xfs_inode *dp = state->args->dp;
/*
* Set up environment.
@@ -1702,10 +1668,10 @@ xfs_da3_blk_link(
before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
break;
case XFS_DIR2_LEAFN_MAGIC:
- before = xfs_dir2_leafn_order(old_blk->bp, new_blk->bp);
+ before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp);
break;
case XFS_DA_NODE_MAGIC:
- before = xfs_da3_node_order(old_blk->bp, new_blk->bp);
+ before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp);
break;
}
@@ -1720,7 +1686,7 @@ xfs_da3_blk_link(
new_info->forw = cpu_to_be32(old_blk->blkno);
new_info->back = old_info->back;
if (old_info->back) {
- error = xfs_da3_node_read(args->trans, args->dp,
+ error = xfs_da3_node_read(args->trans, dp,
be32_to_cpu(old_info->back),
-1, &bp, args->whichfork);
if (error)
@@ -1741,7 +1707,7 @@ xfs_da3_blk_link(
new_info->forw = old_info->forw;
new_info->back = cpu_to_be32(old_blk->blkno);
if (old_info->forw) {
- error = xfs_da3_node_read(args->trans, args->dp,
+ error = xfs_da3_node_read(args->trans, dp,
be32_to_cpu(old_info->forw),
-1, &bp, args->whichfork);
if (error)
@@ -1861,6 +1827,7 @@ xfs_da3_path_shift(
xfs_dablk_t blkno = 0;
int level;
int error;
+ struct xfs_inode *dp = state->args->dp;
trace_xfs_da_path_shift(state->args);
@@ -1876,8 +1843,8 @@ xfs_da3_path_shift(
level = (path->active-1) - 1; /* skip bottom layer in path */
for (blk = &path->blk[level]; level >= 0; blk--, level--) {
node = blk->bp->b_addr;
- xfs_da3_node_hdr_from_disk(&nodehdr, node);
- btree = xfs_da3_node_tree_p(node);
+ dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+ btree = dp->d_ops->node_tree_p(node);
if (forward && (blk->index < nodehdr.count - 1)) {
blk->index++;
@@ -1911,7 +1878,7 @@ xfs_da3_path_shift(
* Read the next child block.
*/
blk->blkno = blkno;
- error = xfs_da3_node_read(args->trans, args->dp, blkno, -1,
+ error = xfs_da3_node_read(args->trans, dp, blkno, -1,
&blk->bp, args->whichfork);
if (error)
return(error);
@@ -1933,8 +1900,8 @@ xfs_da3_path_shift(
case XFS_DA3_NODE_MAGIC:
blk->magic = XFS_DA_NODE_MAGIC;
node = (xfs_da_intnode_t *)info;
- xfs_da3_node_hdr_from_disk(&nodehdr, node);
- btree = xfs_da3_node_tree_p(node);
+ dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+ btree = dp->d_ops->node_tree_p(node);
blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
if (forward)
blk->index = 0;
@@ -1947,16 +1914,15 @@ xfs_da3_path_shift(
blk->magic = XFS_ATTR_LEAF_MAGIC;
ASSERT(level == path->active-1);
blk->index = 0;
- blk->hashval = xfs_attr_leaf_lasthash(blk->bp,
- NULL);
+ blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
break;
case XFS_DIR2_LEAFN_MAGIC:
case XFS_DIR3_LEAFN_MAGIC:
blk->magic = XFS_DIR2_LEAFN_MAGIC;
ASSERT(level == path->active-1);
blk->index = 0;
- blk->hashval = xfs_dir2_leafn_lasthash(blk->bp,
- NULL);
+ blk->hashval = xfs_dir2_leafn_lasthash(args->dp,
+ blk->bp, NULL);
break;
default:
ASSERT(0);
@@ -2163,7 +2129,7 @@ xfs_da3_swap_lastblock(
struct xfs_dir2_leaf *dead_leaf2;
struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr par_hdr;
- struct xfs_inode *ip;
+ struct xfs_inode *dp;
struct xfs_trans *tp;
struct xfs_mount *mp;
struct xfs_buf *dead_buf;
@@ -2187,12 +2153,12 @@ xfs_da3_swap_lastblock(
dead_buf = *dead_bufp;
dead_blkno = *dead_blknop;
tp = args->trans;
- ip = args->dp;
+ dp = args->dp;
w = args->whichfork;
ASSERT(w == XFS_DATA_FORK);
- mp = ip->i_mount;
+ mp = dp->i_mount;
lastoff = mp->m_dirfreeblk;
- error = xfs_bmap_last_before(tp, ip, &lastoff, w);
+ error = xfs_bmap_last_before(tp, dp, &lastoff, w);
if (error)
return error;
if (unlikely(lastoff == 0)) {
@@ -2204,7 +2170,7 @@ xfs_da3_swap_lastblock(
* Read the last block in the btree space.
*/
last_blkno = (xfs_dablk_t)lastoff - mp->m_dirblkfsbs;
- error = xfs_da3_node_read(tp, ip, last_blkno, -1, &last_buf, w);
+ error = xfs_da3_node_read(tp, dp, last_blkno, -1, &last_buf, w);
if (error)
return error;
/*
@@ -2222,16 +2188,16 @@ xfs_da3_swap_lastblock(
struct xfs_dir2_leaf_entry *ents;
dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
- xfs_dir3_leaf_hdr_from_disk(&leafhdr, dead_leaf2);
- ents = xfs_dir3_leaf_ents_p(dead_leaf2);
+ dp->d_ops->leaf_hdr_from_disk(&leafhdr, dead_leaf2);
+ ents = dp->d_ops->leaf_ents_p(dead_leaf2);
dead_level = 0;
dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
} else {
struct xfs_da3_icnode_hdr deadhdr;
dead_node = (xfs_da_intnode_t *)dead_info;
- xfs_da3_node_hdr_from_disk(&deadhdr, dead_node);
- btree = xfs_da3_node_tree_p(dead_node);
+ dp->d_ops->node_hdr_from_disk(&deadhdr, dead_node);
+ btree = dp->d_ops->node_tree_p(dead_node);
dead_level = deadhdr.level;
dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
}
@@ -2240,7 +2206,7 @@ xfs_da3_swap_lastblock(
* If the moved block has a left sibling, fix up the pointers.
*/
if ((sib_blkno = be32_to_cpu(dead_info->back))) {
- error = xfs_da3_node_read(tp, ip, sib_blkno, -1, &sib_buf, w);
+ error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
if (error)
goto done;
sib_info = sib_buf->b_addr;
@@ -2262,7 +2228,7 @@ xfs_da3_swap_lastblock(
* If the moved block has a right sibling, fix up the pointers.
*/
if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
- error = xfs_da3_node_read(tp, ip, sib_blkno, -1, &sib_buf, w);
+ error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
if (error)
goto done;
sib_info = sib_buf->b_addr;
@@ -2286,11 +2252,11 @@ xfs_da3_swap_lastblock(
* Walk down the tree looking for the parent of the moved block.
*/
for (;;) {
- error = xfs_da3_node_read(tp, ip, par_blkno, -1, &par_buf, w);
+ error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
if (error)
goto done;
par_node = par_buf->b_addr;
- xfs_da3_node_hdr_from_disk(&par_hdr, par_node);
+ dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
if (level >= 0 && level != par_hdr.level + 1) {
XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
XFS_ERRLEVEL_LOW, mp);
@@ -2298,7 +2264,7 @@ xfs_da3_swap_lastblock(
goto done;
}
level = par_hdr.level;
- btree = xfs_da3_node_tree_p(par_node);
+ btree = dp->d_ops->node_tree_p(par_node);
for (entno = 0;
entno < par_hdr.count &&
be32_to_cpu(btree[entno].hashval) < dead_hash;
@@ -2337,18 +2303,18 @@ xfs_da3_swap_lastblock(
error = XFS_ERROR(EFSCORRUPTED);
goto done;
}
- error = xfs_da3_node_read(tp, ip, par_blkno, -1, &par_buf, w);
+ error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
if (error)
goto done;
par_node = par_buf->b_addr;
- xfs_da3_node_hdr_from_disk(&par_hdr, par_node);
+ dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
if (par_hdr.level != level) {
XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
XFS_ERRLEVEL_LOW, mp);
error = XFS_ERROR(EFSCORRUPTED);
goto done;
}
- btree = xfs_da3_node_tree_p(par_node);
+ btree = dp->d_ops->node_tree_p(par_node);
entno = 0;
}
/*
diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h
index b1f267995dea..6e95ea79f5d7 100644
--- a/fs/xfs/xfs_da_btree.h
+++ b/fs/xfs/xfs_da_btree.h
@@ -23,146 +23,7 @@ struct xfs_bmap_free;
struct xfs_inode;
struct xfs_trans;
struct zone;
-
-/*========================================================================
- * Directory Structure when greater than XFS_LBSIZE(mp) bytes.
- *========================================================================*/
-
-/*
- * This structure is common to both leaf nodes and non-leaf nodes in the Btree.
- *
- * It is used to manage a doubly linked list of all blocks at the same
- * level in the Btree, and to identify which type of block this is.
- */
-#define XFS_DA_NODE_MAGIC 0xfebe /* magic number: non-leaf blocks */
-#define XFS_ATTR_LEAF_MAGIC 0xfbee /* magic number: attribute leaf blks */
-#define XFS_DIR2_LEAF1_MAGIC 0xd2f1 /* magic number: v2 dirlf single blks */
-#define XFS_DIR2_LEAFN_MAGIC 0xd2ff /* magic number: v2 dirlf multi blks */
-
-typedef struct xfs_da_blkinfo {
- __be32 forw; /* previous block in list */
- __be32 back; /* following block in list */
- __be16 magic; /* validity check on block */
- __be16 pad; /* unused */
-} xfs_da_blkinfo_t;
-
-/*
- * CRC enabled directory structure types
- *
- * The headers change size for the additional verification information, but
- * otherwise the tree layouts and contents are unchanged. Hence the da btree
- * code can use the struct xfs_da_blkinfo for manipulating the tree links and
- * magic numbers without modification for both v2 and v3 nodes.
- */
-#define XFS_DA3_NODE_MAGIC 0x3ebe /* magic number: non-leaf blocks */
-#define XFS_ATTR3_LEAF_MAGIC 0x3bee /* magic number: attribute leaf blks */
-#define XFS_DIR3_LEAF1_MAGIC 0x3df1 /* magic number: v2 dirlf single blks */
-#define XFS_DIR3_LEAFN_MAGIC 0x3dff /* magic number: v2 dirlf multi blks */
-
-struct xfs_da3_blkinfo {
- /*
- * the node link manipulation code relies on the fact that the first
- * element of this structure is the struct xfs_da_blkinfo so it can
- * ignore the differences in the rest of the structures.
- */
- struct xfs_da_blkinfo hdr;
- __be32 crc; /* CRC of block */
- __be64 blkno; /* first block of the buffer */
- __be64 lsn; /* sequence number of last write */
- uuid_t uuid; /* filesystem we belong to */
- __be64 owner; /* inode that owns the block */
-};
-
-/*
- * This is the structure of the root and intermediate nodes in the Btree.
- * The leaf nodes are defined above.
- *
- * Entries are not packed.
- *
- * Since we have duplicate keys, use a binary search but always follow
- * all match in the block, not just the first match found.
- */
-#define XFS_DA_NODE_MAXDEPTH 5 /* max depth of Btree */
-
-typedef struct xfs_da_node_hdr {
- struct xfs_da_blkinfo info; /* block type, links, etc. */
- __be16 __count; /* count of active entries */
- __be16 __level; /* level above leaves (leaf == 0) */
-} xfs_da_node_hdr_t;
-
-struct xfs_da3_node_hdr {
- struct xfs_da3_blkinfo info; /* block type, links, etc. */
- __be16 __count; /* count of active entries */
- __be16 __level; /* level above leaves (leaf == 0) */
- __be32 __pad32;
-};
-
-#define XFS_DA3_NODE_CRC_OFF (offsetof(struct xfs_da3_node_hdr, info.crc))
-
-typedef struct xfs_da_node_entry {
- __be32 hashval; /* hash value for this descendant */
- __be32 before; /* Btree block before this key */
-} xfs_da_node_entry_t;
-
-typedef struct xfs_da_intnode {
- struct xfs_da_node_hdr hdr;
- struct xfs_da_node_entry __btree[];
-} xfs_da_intnode_t;
-
-struct xfs_da3_intnode {
- struct xfs_da3_node_hdr hdr;
- struct xfs_da_node_entry __btree[];
-};
-
-/*
- * In-core version of the node header to abstract the differences in the v2 and
- * v3 disk format of the headers. Callers need to convert to/from disk format as
- * appropriate.
- */
-struct xfs_da3_icnode_hdr {
- __uint32_t forw;
- __uint32_t back;
- __uint16_t magic;
- __uint16_t count;
- __uint16_t level;
-};
-
-extern void xfs_da3_node_hdr_from_disk(struct xfs_da3_icnode_hdr *to,
- struct xfs_da_intnode *from);
-extern void xfs_da3_node_hdr_to_disk(struct xfs_da_intnode *to,
- struct xfs_da3_icnode_hdr *from);
-
-static inline int
-__xfs_da3_node_hdr_size(bool v3)
-{
- if (v3)
- return sizeof(struct xfs_da3_node_hdr);
- return sizeof(struct xfs_da_node_hdr);
-}
-static inline int
-xfs_da3_node_hdr_size(struct xfs_da_intnode *dap)
-{
- bool v3 = dap->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC);
-
- return __xfs_da3_node_hdr_size(v3);
-}
-
-static inline struct xfs_da_node_entry *
-xfs_da3_node_tree_p(struct xfs_da_intnode *dap)
-{
- if (dap->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
- struct xfs_da3_intnode *dap3 = (struct xfs_da3_intnode *)dap;
- return dap3->__btree;
- }
- return dap->__btree;
-}
-
-extern void xfs_da3_intnode_from_disk(struct xfs_da3_icnode_hdr *to,
- struct xfs_da_intnode *from);
-extern void xfs_da3_intnode_to_disk(struct xfs_da_intnode *to,
- struct xfs_da3_icnode_hdr *from);
-
-#define XFS_LBSIZE(mp) (mp)->m_sb.sb_blocksize
+struct xfs_dir_ops;
/*========================================================================
* Btree searching and modification structure definitions.
@@ -309,8 +170,6 @@ int xfs_da3_node_read(struct xfs_trans *tp, struct xfs_inode *dp,
xfs_dablk_t bno, xfs_daddr_t mappedbno,
struct xfs_buf **bpp, int which_fork);
-extern const struct xfs_buf_ops xfs_da3_node_buf_ops;
-
/*
* Utility routines.
*/
diff --git a/fs/xfs/xfs_da_format.c b/fs/xfs/xfs_da_format.c
new file mode 100644
index 000000000000..e6c83e1fbc8a
--- /dev/null
+++ b/fs/xfs/xfs_da_format.c
@@ -0,0 +1,907 @@
+/*
+ * Copyright (c) 2000,2002,2005 Silicon Graphics, Inc.
+ * Copyright (c) 2013 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_da_format.h"
+#include "xfs_inode.h"
+#include "xfs_dir2.h"
+
+/*
+ * Shortform directory ops
+ */
+static int
+xfs_dir2_sf_entsize(
+ struct xfs_dir2_sf_hdr *hdr,
+ int len)
+{
+ int count = sizeof(struct xfs_dir2_sf_entry); /* namelen + offset */
+
+ count += len; /* name */
+ count += hdr->i8count ? sizeof(xfs_dir2_ino8_t) :
+ sizeof(xfs_dir2_ino4_t); /* ino # */
+ return count;
+}
+
+static int
+xfs_dir3_sf_entsize(
+ struct xfs_dir2_sf_hdr *hdr,
+ int len)
+{
+ return xfs_dir2_sf_entsize(hdr, len) + sizeof(__uint8_t);
+}
+
+static struct xfs_dir2_sf_entry *
+xfs_dir2_sf_nextentry(
+ struct xfs_dir2_sf_hdr *hdr,
+ struct xfs_dir2_sf_entry *sfep)
+{
+ return (struct xfs_dir2_sf_entry *)
+ ((char *)sfep + xfs_dir2_sf_entsize(hdr, sfep->namelen));
+}
+
+static struct xfs_dir2_sf_entry *
+xfs_dir3_sf_nextentry(
+ struct xfs_dir2_sf_hdr *hdr,
+ struct xfs_dir2_sf_entry *sfep)
+{
+ return (struct xfs_dir2_sf_entry *)
+ ((char *)sfep + xfs_dir3_sf_entsize(hdr, sfep->namelen));
+}
+
+
+/*
+ * For filetype enabled shortform directories, the file type field is stored at
+ * the end of the name. Because it's only a single byte, endian conversion is
+ * not necessary. For non-filetype enable directories, the type is always
+ * unknown and we never store the value.
+ */
+static __uint8_t
+xfs_dir2_sfe_get_ftype(
+ struct xfs_dir2_sf_entry *sfep)
+{
+ return XFS_DIR3_FT_UNKNOWN;
+}
+
+static void
+xfs_dir2_sfe_put_ftype(
+ struct xfs_dir2_sf_entry *sfep,
+ __uint8_t ftype)
+{
+ ASSERT(ftype < XFS_DIR3_FT_MAX);
+}
+
+static __uint8_t
+xfs_dir3_sfe_get_ftype(
+ struct xfs_dir2_sf_entry *sfep)
+{
+ __uint8_t ftype;
+
+ ftype = sfep->name[sfep->namelen];
+ if (ftype >= XFS_DIR3_FT_MAX)
+ return XFS_DIR3_FT_UNKNOWN;
+ return ftype;
+}
+
+static void
+xfs_dir3_sfe_put_ftype(
+ struct xfs_dir2_sf_entry *sfep,
+ __uint8_t ftype)
+{
+ ASSERT(ftype < XFS_DIR3_FT_MAX);
+
+ sfep->name[sfep->namelen] = ftype;
+}
+
+/*
+ * Inode numbers in short-form directories can come in two versions,
+ * either 4 bytes or 8 bytes wide. These helpers deal with the
+ * two forms transparently by looking at the headers i8count field.
+ *
+ * For 64-bit inode number the most significant byte must be zero.
+ */
+static xfs_ino_t
+xfs_dir2_sf_get_ino(
+ struct xfs_dir2_sf_hdr *hdr,
+ xfs_dir2_inou_t *from)
+{
+ if (hdr->i8count)
+ return get_unaligned_be64(&from->i8.i) & 0x00ffffffffffffffULL;
+ else
+ return get_unaligned_be32(&from->i4.i);
+}
+
+static void
+xfs_dir2_sf_put_ino(
+ struct xfs_dir2_sf_hdr *hdr,
+ xfs_dir2_inou_t *to,
+ xfs_ino_t ino)
+{
+ ASSERT((ino & 0xff00000000000000ULL) == 0);
+
+ if (hdr->i8count)
+ put_unaligned_be64(ino, &to->i8.i);
+ else
+ put_unaligned_be32(ino, &to->i4.i);
+}
+
+static xfs_ino_t
+xfs_dir2_sf_get_parent_ino(
+ struct xfs_dir2_sf_hdr *hdr)
+{
+ return xfs_dir2_sf_get_ino(hdr, &hdr->parent);
+}
+
+static void
+xfs_dir2_sf_put_parent_ino(
+ struct xfs_dir2_sf_hdr *hdr,
+ xfs_ino_t ino)
+{
+ xfs_dir2_sf_put_ino(hdr, &hdr->parent, ino);
+}
+
+/*
+ * In short-form directory entries the inode numbers are stored at variable
+ * offset behind the entry name. If the entry stores a filetype value, then it
+ * sits between the name and the inode number. Hence the inode numbers may only
+ * be accessed through the helpers below.
+ */
+static xfs_ino_t
+xfs_dir2_sfe_get_ino(
+ struct xfs_dir2_sf_hdr *hdr,
+ struct xfs_dir2_sf_entry *sfep)
+{
+ return xfs_dir2_sf_get_ino(hdr,
+ (xfs_dir2_inou_t *)&sfep->name[sfep->namelen]);
+}
+
+static void
+xfs_dir2_sfe_put_ino(
+ struct xfs_dir2_sf_hdr *hdr,
+ struct xfs_dir2_sf_entry *sfep,
+ xfs_ino_t ino)
+{
+ xfs_dir2_sf_put_ino(hdr,
+ (xfs_dir2_inou_t *)&sfep->name[sfep->namelen], ino);
+}
+
+static xfs_ino_t
+xfs_dir3_sfe_get_ino(
+ struct xfs_dir2_sf_hdr *hdr,
+ struct xfs_dir2_sf_entry *sfep)
+{
+ return xfs_dir2_sf_get_ino(hdr,
+ (xfs_dir2_inou_t *)&sfep->name[sfep->namelen + 1]);
+}
+
+static void
+xfs_dir3_sfe_put_ino(
+ struct xfs_dir2_sf_hdr *hdr,
+ struct xfs_dir2_sf_entry *sfep,
+ xfs_ino_t ino)
+{
+ xfs_dir2_sf_put_ino(hdr,
+ (xfs_dir2_inou_t *)&sfep->name[sfep->namelen + 1], ino);
+}
+
+
+/*
+ * Directory data block operations
+ */
+
+/*
+ * For special situations, the dirent size ends up fixed because we always know
+ * what the size of the entry is. That's true for the "." and "..", and
+ * therefore we know that they are a fixed size and hence their offsets are
+ * constant, as is the first entry.
+ *
+ * Hence, this calculation is written as a macro to be able to be calculated at
+ * compile time and so certain offsets can be calculated directly in the
+ * structure initaliser via the macro. There are two macros - one for dirents
+ * with ftype and without so there are no unresolvable conditionals in the
+ * calculations. We also use round_up() as XFS_DIR2_DATA_ALIGN is always a power
+ * of 2 and the compiler doesn't reject it (unlike roundup()).
+ */
+#define XFS_DIR2_DATA_ENTSIZE(n) \
+ round_up((offsetof(struct xfs_dir2_data_entry, name[0]) + (n) + \
+ sizeof(xfs_dir2_data_off_t)), XFS_DIR2_DATA_ALIGN)
+
+#define XFS_DIR3_DATA_ENTSIZE(n) \
+ round_up((offsetof(struct xfs_dir2_data_entry, name[0]) + (n) + \
+ sizeof(xfs_dir2_data_off_t) + sizeof(__uint8_t)), \
+ XFS_DIR2_DATA_ALIGN)
+
+static int
+xfs_dir2_data_entsize(
+ int n)
+{
+ return XFS_DIR2_DATA_ENTSIZE(n);
+}
+
+static int
+xfs_dir3_data_entsize(
+ int n)
+{
+ return XFS_DIR3_DATA_ENTSIZE(n);
+}
+
+static __uint8_t
+xfs_dir2_data_get_ftype(
+ struct xfs_dir2_data_entry *dep)
+{
+ return XFS_DIR3_FT_UNKNOWN;
+}
+
+static void
+xfs_dir2_data_put_ftype(
+ struct xfs_dir2_data_entry *dep,
+ __uint8_t ftype)
+{
+ ASSERT(ftype < XFS_DIR3_FT_MAX);
+}
+
+static __uint8_t
+xfs_dir3_data_get_ftype(
+ struct xfs_dir2_data_entry *dep)
+{
+ __uint8_t ftype = dep->name[dep->namelen];
+
+ ASSERT(ftype < XFS_DIR3_FT_MAX);
+ if (ftype >= XFS_DIR3_FT_MAX)
+ return XFS_DIR3_FT_UNKNOWN;
+ return ftype;
+}
+
+static void
+xfs_dir3_data_put_ftype(
+ struct xfs_dir2_data_entry *dep,
+ __uint8_t type)
+{
+ ASSERT(type < XFS_DIR3_FT_MAX);
+ ASSERT(dep->namelen != 0);
+
+ dep->name[dep->namelen] = type;
+}
+
+/*
+ * Pointer to an entry's tag word.
+ */
+static __be16 *
+xfs_dir2_data_entry_tag_p(
+ struct xfs_dir2_data_entry *dep)
+{
+ return (__be16 *)((char *)dep +
+ xfs_dir2_data_entsize(dep->namelen) - sizeof(__be16));
+}
+
+static __be16 *
+xfs_dir3_data_entry_tag_p(
+ struct xfs_dir2_data_entry *dep)
+{
+ return (__be16 *)((char *)dep +
+ xfs_dir3_data_entsize(dep->namelen) - sizeof(__be16));
+}
+
+/*
+ * location of . and .. in data space (always block 0)
+ */
+static struct xfs_dir2_data_entry *
+xfs_dir2_data_dot_entry_p(
+ struct xfs_dir2_data_hdr *hdr)
+{
+ return (struct xfs_dir2_data_entry *)
+ ((char *)hdr + sizeof(struct xfs_dir2_data_hdr));
+}
+
+static struct xfs_dir2_data_entry *
+xfs_dir2_data_dotdot_entry_p(
+ struct xfs_dir2_data_hdr *hdr)
+{
+ return (struct xfs_dir2_data_entry *)
+ ((char *)hdr + sizeof(struct xfs_dir2_data_hdr) +
+ XFS_DIR2_DATA_ENTSIZE(1));
+}
+
+static struct xfs_dir2_data_entry *
+xfs_dir2_data_first_entry_p(
+ struct xfs_dir2_data_hdr *hdr)
+{
+ return (struct xfs_dir2_data_entry *)
+ ((char *)hdr + sizeof(struct xfs_dir2_data_hdr) +
+ XFS_DIR2_DATA_ENTSIZE(1) +
+ XFS_DIR2_DATA_ENTSIZE(2));
+}
+
+static struct xfs_dir2_data_entry *
+xfs_dir2_ftype_data_dotdot_entry_p(
+ struct xfs_dir2_data_hdr *hdr)
+{
+ return (struct xfs_dir2_data_entry *)
+ ((char *)hdr + sizeof(struct xfs_dir2_data_hdr) +
+ XFS_DIR3_DATA_ENTSIZE(1));
+}
+
+static struct xfs_dir2_data_entry *
+xfs_dir2_ftype_data_first_entry_p(
+ struct xfs_dir2_data_hdr *hdr)
+{
+ return (struct xfs_dir2_data_entry *)
+ ((char *)hdr + sizeof(struct xfs_dir2_data_hdr) +
+ XFS_DIR3_DATA_ENTSIZE(1) +
+ XFS_DIR3_DATA_ENTSIZE(2));
+}
+
+static struct xfs_dir2_data_entry *
+xfs_dir3_data_dot_entry_p(
+ struct xfs_dir2_data_hdr *hdr)
+{
+ return (struct xfs_dir2_data_entry *)
+ ((char *)hdr + sizeof(struct xfs_dir3_data_hdr));
+}
+
+static struct xfs_dir2_data_entry *
+xfs_dir3_data_dotdot_entry_p(
+ struct xfs_dir2_data_hdr *hdr)
+{
+ return (struct xfs_dir2_data_entry *)
+ ((char *)hdr + sizeof(struct xfs_dir3_data_hdr) +
+ XFS_DIR3_DATA_ENTSIZE(1));
+}
+
+static struct xfs_dir2_data_entry *
+xfs_dir3_data_first_entry_p(
+ struct xfs_dir2_data_hdr *hdr)
+{
+ return (struct xfs_dir2_data_entry *)
+ ((char *)hdr + sizeof(struct xfs_dir3_data_hdr) +
+ XFS_DIR3_DATA_ENTSIZE(1) +
+ XFS_DIR3_DATA_ENTSIZE(2));
+}
+
+static struct xfs_dir2_data_free *
+xfs_dir2_data_bestfree_p(struct xfs_dir2_data_hdr *hdr)
+{
+ return hdr->bestfree;
+}
+
+static struct xfs_dir2_data_free *
+xfs_dir3_data_bestfree_p(struct xfs_dir2_data_hdr *hdr)
+{
+ return ((struct xfs_dir3_data_hdr *)hdr)->best_free;
+}
+
+static struct xfs_dir2_data_entry *
+xfs_dir2_data_entry_p(struct xfs_dir2_data_hdr *hdr)
+{
+ return (struct xfs_dir2_data_entry *)
+ ((char *)hdr + sizeof(struct xfs_dir2_data_hdr));
+}
+
+static struct xfs_dir2_data_unused *
+xfs_dir2_data_unused_p(struct xfs_dir2_data_hdr *hdr)
+{
+ return (struct xfs_dir2_data_unused *)
+ ((char *)hdr + sizeof(struct xfs_dir2_data_hdr));
+}
+
+static struct xfs_dir2_data_entry *
+xfs_dir3_data_entry_p(struct xfs_dir2_data_hdr *hdr)
+{
+ return (struct xfs_dir2_data_entry *)
+ ((char *)hdr + sizeof(struct xfs_dir3_data_hdr));
+}
+
+static struct xfs_dir2_data_unused *
+xfs_dir3_data_unused_p(struct xfs_dir2_data_hdr *hdr)
+{
+ return (struct xfs_dir2_data_unused *)
+ ((char *)hdr + sizeof(struct xfs_dir3_data_hdr));
+}
+
+
+/*
+ * Directory Leaf block operations
+ */
+static int
+xfs_dir2_max_leaf_ents(struct xfs_mount *mp)
+{
+ return (mp->m_dirblksize - sizeof(struct xfs_dir2_leaf_hdr)) /
+ (uint)sizeof(struct xfs_dir2_leaf_entry);
+}
+
+static struct xfs_dir2_leaf_entry *
+xfs_dir2_leaf_ents_p(struct xfs_dir2_leaf *lp)
+{
+ return lp->__ents;
+}
+
+static int
+xfs_dir3_max_leaf_ents(struct xfs_mount *mp)
+{
+ return (mp->m_dirblksize - sizeof(struct xfs_dir3_leaf_hdr)) /
+ (uint)sizeof(struct xfs_dir2_leaf_entry);
+}
+
+static struct xfs_dir2_leaf_entry *
+xfs_dir3_leaf_ents_p(struct xfs_dir2_leaf *lp)
+{
+ return ((struct xfs_dir3_leaf *)lp)->__ents;
+}
+
+static void
+xfs_dir2_leaf_hdr_from_disk(
+ struct xfs_dir3_icleaf_hdr *to,
+ struct xfs_dir2_leaf *from)
+{
+ to->forw = be32_to_cpu(from->hdr.info.forw);
+ to->back = be32_to_cpu(from->hdr.info.back);
+ to->magic = be16_to_cpu(from->hdr.info.magic);
+ to->count = be16_to_cpu(from->hdr.count);
+ to->stale = be16_to_cpu(from->hdr.stale);
+
+ ASSERT(to->magic == XFS_DIR2_LEAF1_MAGIC ||
+ to->magic == XFS_DIR2_LEAFN_MAGIC);
+}
+
+static void
+xfs_dir2_leaf_hdr_to_disk(
+ struct xfs_dir2_leaf *to,
+ struct xfs_dir3_icleaf_hdr *from)
+{
+ ASSERT(from->magic == XFS_DIR2_LEAF1_MAGIC ||
+ from->magic == XFS_DIR2_LEAFN_MAGIC);
+
+ to->hdr.info.forw = cpu_to_be32(from->forw);
+ to->hdr.info.back = cpu_to_be32(from->back);
+ to->hdr.info.magic = cpu_to_be16(from->magic);
+ to->hdr.count = cpu_to_be16(from->count);
+ to->hdr.stale = cpu_to_be16(from->stale);
+}
+
+static void
+xfs_dir3_leaf_hdr_from_disk(
+ struct xfs_dir3_icleaf_hdr *to,
+ struct xfs_dir2_leaf *from)
+{
+ struct xfs_dir3_leaf_hdr *hdr3 = (struct xfs_dir3_leaf_hdr *)from;
+
+ to->forw = be32_to_cpu(hdr3->info.hdr.forw);
+ to->back = be32_to_cpu(hdr3->info.hdr.back);
+ to->magic = be16_to_cpu(hdr3->info.hdr.magic);
+ to->count = be16_to_cpu(hdr3->count);
+ to->stale = be16_to_cpu(hdr3->stale);
+
+ ASSERT(to->magic == XFS_DIR3_LEAF1_MAGIC ||
+ to->magic == XFS_DIR3_LEAFN_MAGIC);
+}
+
+static void
+xfs_dir3_leaf_hdr_to_disk(
+ struct xfs_dir2_leaf *to,
+ struct xfs_dir3_icleaf_hdr *from)
+{
+ struct xfs_dir3_leaf_hdr *hdr3 = (struct xfs_dir3_leaf_hdr *)to;
+
+ ASSERT(from->magic == XFS_DIR3_LEAF1_MAGIC ||
+ from->magic == XFS_DIR3_LEAFN_MAGIC);
+
+ hdr3->info.hdr.forw = cpu_to_be32(from->forw);
+ hdr3->info.hdr.back = cpu_to_be32(from->back);
+ hdr3->info.hdr.magic = cpu_to_be16(from->magic);
+ hdr3->count = cpu_to_be16(from->count);
+ hdr3->stale = cpu_to_be16(from->stale);
+}
+
+
+/*
+ * Directory/Attribute Node block operations
+ */
+static struct xfs_da_node_entry *
+xfs_da2_node_tree_p(struct xfs_da_intnode *dap)
+{
+ return dap->__btree;
+}
+
+static struct xfs_da_node_entry *
+xfs_da3_node_tree_p(struct xfs_da_intnode *dap)
+{
+ return ((struct xfs_da3_intnode *)dap)->__btree;
+}
+
+static void
+xfs_da2_node_hdr_from_disk(
+ struct xfs_da3_icnode_hdr *to,
+ struct xfs_da_intnode *from)
+{
+ ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
+ to->forw = be32_to_cpu(from->hdr.info.forw);
+ to->back = be32_to_cpu(from->hdr.info.back);
+ to->magic = be16_to_cpu(from->hdr.info.magic);
+ to->count = be16_to_cpu(from->hdr.__count);
+ to->level = be16_to_cpu(from->hdr.__level);
+}
+
+static void
+xfs_da2_node_hdr_to_disk(
+ struct xfs_da_intnode *to,
+ struct xfs_da3_icnode_hdr *from)
+{
+ ASSERT(from->magic == XFS_DA_NODE_MAGIC);
+ to->hdr.info.forw = cpu_to_be32(from->forw);
+ to->hdr.info.back = cpu_to_be32(from->back);
+ to->hdr.info.magic = cpu_to_be16(from->magic);
+ to->hdr.__count = cpu_to_be16(from->count);
+ to->hdr.__level = cpu_to_be16(from->level);
+}
+
+static void
+xfs_da3_node_hdr_from_disk(
+ struct xfs_da3_icnode_hdr *to,
+ struct xfs_da_intnode *from)
+{
+ struct xfs_da3_node_hdr *hdr3 = (struct xfs_da3_node_hdr *)from;
+
+ ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
+ to->forw = be32_to_cpu(hdr3->info.hdr.forw);
+ to->back = be32_to_cpu(hdr3->info.hdr.back);
+ to->magic = be16_to_cpu(hdr3->info.hdr.magic);
+ to->count = be16_to_cpu(hdr3->__count);
+ to->level = be16_to_cpu(hdr3->__level);
+}
+
+static void
+xfs_da3_node_hdr_to_disk(
+ struct xfs_da_intnode *to,
+ struct xfs_da3_icnode_hdr *from)
+{
+ struct xfs_da3_node_hdr *hdr3 = (struct xfs_da3_node_hdr *)to;
+
+ ASSERT(from->magic == XFS_DA3_NODE_MAGIC);
+ hdr3->info.hdr.forw = cpu_to_be32(from->forw);
+ hdr3->info.hdr.back = cpu_to_be32(from->back);
+ hdr3->info.hdr.magic = cpu_to_be16(from->magic);
+ hdr3->__count = cpu_to_be16(from->count);
+ hdr3->__level = cpu_to_be16(from->level);
+}
+
+
+/*
+ * Directory free space block operations
+ */
+static int
+xfs_dir2_free_max_bests(struct xfs_mount *mp)
+{
+ return (mp->m_dirblksize - sizeof(struct xfs_dir2_free_hdr)) /
+ sizeof(xfs_dir2_data_off_t);
+}
+
+static __be16 *
+xfs_dir2_free_bests_p(struct xfs_dir2_free *free)
+{
+ return (__be16 *)((char *)free + sizeof(struct xfs_dir2_free_hdr));
+}
+
+/*
+ * Convert data space db to the corresponding free db.
+ */
+static xfs_dir2_db_t
+xfs_dir2_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db)
+{
+ return XFS_DIR2_FREE_FIRSTDB(mp) + db / xfs_dir2_free_max_bests(mp);
+}
+
+/*
+ * Convert data space db to the corresponding index in a free db.
+ */
+static int
+xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db)
+{
+ return db % xfs_dir2_free_max_bests(mp);
+}
+
+static int
+xfs_dir3_free_max_bests(struct xfs_mount *mp)
+{
+ return (mp->m_dirblksize - sizeof(struct xfs_dir3_free_hdr)) /
+ sizeof(xfs_dir2_data_off_t);
+}
+
+static __be16 *
+xfs_dir3_free_bests_p(struct xfs_dir2_free *free)
+{
+ return (__be16 *)((char *)free + sizeof(struct xfs_dir3_free_hdr));
+}
+
+/*
+ * Convert data space db to the corresponding free db.
+ */
+static xfs_dir2_db_t
+xfs_dir3_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db)
+{
+ return XFS_DIR2_FREE_FIRSTDB(mp) + db / xfs_dir3_free_max_bests(mp);
+}
+
+/*
+ * Convert data space db to the corresponding index in a free db.
+ */
+static int
+xfs_dir3_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db)
+{
+ return db % xfs_dir3_free_max_bests(mp);
+}
+
+static void
+xfs_dir2_free_hdr_from_disk(
+ struct xfs_dir3_icfree_hdr *to,
+ struct xfs_dir2_free *from)
+{
+ to->magic = be32_to_cpu(from->hdr.magic);
+ to->firstdb = be32_to_cpu(from->hdr.firstdb);
+ to->nvalid = be32_to_cpu(from->hdr.nvalid);
+ to->nused = be32_to_cpu(from->hdr.nused);
+ ASSERT(to->magic == XFS_DIR2_FREE_MAGIC);
+}
+
+static void
+xfs_dir2_free_hdr_to_disk(
+ struct xfs_dir2_free *to,
+ struct xfs_dir3_icfree_hdr *from)
+{
+ ASSERT(from->magic == XFS_DIR2_FREE_MAGIC);
+
+ to->hdr.magic = cpu_to_be32(from->magic);
+ to->hdr.firstdb = cpu_to_be32(from->firstdb);
+ to->hdr.nvalid = cpu_to_be32(from->nvalid);
+ to->hdr.nused = cpu_to_be32(from->nused);
+}
+
+static void
+xfs_dir3_free_hdr_from_disk(
+ struct xfs_dir3_icfree_hdr *to,
+ struct xfs_dir2_free *from)
+{
+ struct xfs_dir3_free_hdr *hdr3 = (struct xfs_dir3_free_hdr *)from;
+
+ to->magic = be32_to_cpu(hdr3->hdr.magic);
+ to->firstdb = be32_to_cpu(hdr3->firstdb);
+ to->nvalid = be32_to_cpu(hdr3->nvalid);
+ to->nused = be32_to_cpu(hdr3->nused);
+
+ ASSERT(to->magic == XFS_DIR3_FREE_MAGIC);
+}
+
+static void
+xfs_dir3_free_hdr_to_disk(
+ struct xfs_dir2_free *to,
+ struct xfs_dir3_icfree_hdr *from)
+{
+ struct xfs_dir3_free_hdr *hdr3 = (struct xfs_dir3_free_hdr *)to;
+
+ ASSERT(from->magic == XFS_DIR3_FREE_MAGIC);
+
+ hdr3->hdr.magic = cpu_to_be32(from->magic);
+ hdr3->firstdb = cpu_to_be32(from->firstdb);
+ hdr3->nvalid = cpu_to_be32(from->nvalid);
+ hdr3->nused = cpu_to_be32(from->nused);
+}
+
+static const struct xfs_dir_ops xfs_dir2_ops = {
+ .sf_entsize = xfs_dir2_sf_entsize,
+ .sf_nextentry = xfs_dir2_sf_nextentry,
+ .sf_get_ftype = xfs_dir2_sfe_get_ftype,
+ .sf_put_ftype = xfs_dir2_sfe_put_ftype,
+ .sf_get_ino = xfs_dir2_sfe_get_ino,
+ .sf_put_ino = xfs_dir2_sfe_put_ino,
+ .sf_get_parent_ino = xfs_dir2_sf_get_parent_ino,
+ .sf_put_parent_ino = xfs_dir2_sf_put_parent_ino,
+
+ .data_entsize = xfs_dir2_data_entsize,
+ .data_get_ftype = xfs_dir2_data_get_ftype,
+ .data_put_ftype = xfs_dir2_data_put_ftype,
+ .data_entry_tag_p = xfs_dir2_data_entry_tag_p,
+ .data_bestfree_p = xfs_dir2_data_bestfree_p,
+
+ .data_dot_offset = sizeof(struct xfs_dir2_data_hdr),
+ .data_dotdot_offset = sizeof(struct xfs_dir2_data_hdr) +
+ XFS_DIR2_DATA_ENTSIZE(1),
+ .data_first_offset = sizeof(struct xfs_dir2_data_hdr) +
+ XFS_DIR2_DATA_ENTSIZE(1) +
+ XFS_DIR2_DATA_ENTSIZE(2),
+ .data_entry_offset = sizeof(struct xfs_dir2_data_hdr),
+
+ .data_dot_entry_p = xfs_dir2_data_dot_entry_p,
+ .data_dotdot_entry_p = xfs_dir2_data_dotdot_entry_p,
+ .data_first_entry_p = xfs_dir2_data_first_entry_p,
+ .data_entry_p = xfs_dir2_data_entry_p,
+ .data_unused_p = xfs_dir2_data_unused_p,
+
+ .leaf_hdr_size = sizeof(struct xfs_dir2_leaf_hdr),
+ .leaf_hdr_to_disk = xfs_dir2_leaf_hdr_to_disk,
+ .leaf_hdr_from_disk = xfs_dir2_leaf_hdr_from_disk,
+ .leaf_max_ents = xfs_dir2_max_leaf_ents,
+ .leaf_ents_p = xfs_dir2_leaf_ents_p,
+
+ .node_hdr_size = sizeof(struct xfs_da_node_hdr),
+ .node_hdr_to_disk = xfs_da2_node_hdr_to_disk,
+ .node_hdr_from_disk = xfs_da2_node_hdr_from_disk,
+ .node_tree_p = xfs_da2_node_tree_p,
+
+ .free_hdr_size = sizeof(struct xfs_dir2_free_hdr),
+ .free_hdr_to_disk = xfs_dir2_free_hdr_to_disk,
+ .free_hdr_from_disk = xfs_dir2_free_hdr_from_disk,
+ .free_max_bests = xfs_dir2_free_max_bests,
+ .free_bests_p = xfs_dir2_free_bests_p,
+ .db_to_fdb = xfs_dir2_db_to_fdb,
+ .db_to_fdindex = xfs_dir2_db_to_fdindex,
+};
+
+static const struct xfs_dir_ops xfs_dir2_ftype_ops = {
+ .sf_entsize = xfs_dir3_sf_entsize,
+ .sf_nextentry = xfs_dir3_sf_nextentry,
+ .sf_get_ftype = xfs_dir3_sfe_get_ftype,
+ .sf_put_ftype = xfs_dir3_sfe_put_ftype,
+ .sf_get_ino = xfs_dir3_sfe_get_ino,
+ .sf_put_ino = xfs_dir3_sfe_put_ino,
+ .sf_get_parent_ino = xfs_dir2_sf_get_parent_ino,
+ .sf_put_parent_ino = xfs_dir2_sf_put_parent_ino,
+
+ .data_entsize = xfs_dir3_data_entsize,
+ .data_get_ftype = xfs_dir3_data_get_ftype,
+ .data_put_ftype = xfs_dir3_data_put_ftype,
+ .data_entry_tag_p = xfs_dir3_data_entry_tag_p,
+ .data_bestfree_p = xfs_dir2_data_bestfree_p,
+
+ .data_dot_offset = sizeof(struct xfs_dir2_data_hdr),
+ .data_dotdot_offset = sizeof(struct xfs_dir2_data_hdr) +
+ XFS_DIR3_DATA_ENTSIZE(1),
+ .data_first_offset = sizeof(struct xfs_dir2_data_hdr) +
+ XFS_DIR3_DATA_ENTSIZE(1) +
+ XFS_DIR3_DATA_ENTSIZE(2),
+ .data_entry_offset = sizeof(struct xfs_dir2_data_hdr),
+
+ .data_dot_entry_p = xfs_dir2_data_dot_entry_p,
+ .data_dotdot_entry_p = xfs_dir2_ftype_data_dotdot_entry_p,
+ .data_first_entry_p = xfs_dir2_ftype_data_first_entry_p,
+ .data_entry_p = xfs_dir2_data_entry_p,
+ .data_unused_p = xfs_dir2_data_unused_p,
+
+ .leaf_hdr_size = sizeof(struct xfs_dir2_leaf_hdr),
+ .leaf_hdr_to_disk = xfs_dir2_leaf_hdr_to_disk,
+ .leaf_hdr_from_disk = xfs_dir2_leaf_hdr_from_disk,
+ .leaf_max_ents = xfs_dir2_max_leaf_ents,
+ .leaf_ents_p = xfs_dir2_leaf_ents_p,
+
+ .node_hdr_size = sizeof(struct xfs_da_node_hdr),
+ .node_hdr_to_disk = xfs_da2_node_hdr_to_disk,
+ .node_hdr_from_disk = xfs_da2_node_hdr_from_disk,
+ .node_tree_p = xfs_da2_node_tree_p,
+
+ .free_hdr_size = sizeof(struct xfs_dir2_free_hdr),
+ .free_hdr_to_disk = xfs_dir2_free_hdr_to_disk,
+ .free_hdr_from_disk = xfs_dir2_free_hdr_from_disk,
+ .free_max_bests = xfs_dir2_free_max_bests,
+ .free_bests_p = xfs_dir2_free_bests_p,
+ .db_to_fdb = xfs_dir2_db_to_fdb,
+ .db_to_fdindex = xfs_dir2_db_to_fdindex,
+};
+
+static const struct xfs_dir_ops xfs_dir3_ops = {
+ .sf_entsize = xfs_dir3_sf_entsize,
+ .sf_nextentry = xfs_dir3_sf_nextentry,
+ .sf_get_ftype = xfs_dir3_sfe_get_ftype,
+ .sf_put_ftype = xfs_dir3_sfe_put_ftype,
+ .sf_get_ino = xfs_dir3_sfe_get_ino,
+ .sf_put_ino = xfs_dir3_sfe_put_ino,
+ .sf_get_parent_ino = xfs_dir2_sf_get_parent_ino,
+ .sf_put_parent_ino = xfs_dir2_sf_put_parent_ino,
+
+ .data_entsize = xfs_dir3_data_entsize,
+ .data_get_ftype = xfs_dir3_data_get_ftype,
+ .data_put_ftype = xfs_dir3_data_put_ftype,
+ .data_entry_tag_p = xfs_dir3_data_entry_tag_p,
+ .data_bestfree_p = xfs_dir3_data_bestfree_p,
+
+ .data_dot_offset = sizeof(struct xfs_dir3_data_hdr),
+ .data_dotdot_offset = sizeof(struct xfs_dir3_data_hdr) +
+ XFS_DIR3_DATA_ENTSIZE(1),
+ .data_first_offset = sizeof(struct xfs_dir3_data_hdr) +
+ XFS_DIR3_DATA_ENTSIZE(1) +
+ XFS_DIR3_DATA_ENTSIZE(2),
+ .data_entry_offset = sizeof(struct xfs_dir3_data_hdr),
+
+ .data_dot_entry_p = xfs_dir3_data_dot_entry_p,
+ .data_dotdot_entry_p = xfs_dir3_data_dotdot_entry_p,
+ .data_first_entry_p = xfs_dir3_data_first_entry_p,
+ .data_entry_p = xfs_dir3_data_entry_p,
+ .data_unused_p = xfs_dir3_data_unused_p,
+
+ .leaf_hdr_size = sizeof(struct xfs_dir3_leaf_hdr),
+ .leaf_hdr_to_disk = xfs_dir3_leaf_hdr_to_disk,
+ .leaf_hdr_from_disk = xfs_dir3_leaf_hdr_from_disk,
+ .leaf_max_ents = xfs_dir3_max_leaf_ents,
+ .leaf_ents_p = xfs_dir3_leaf_ents_p,
+
+ .node_hdr_size = sizeof(struct xfs_da3_node_hdr),
+ .node_hdr_to_disk = xfs_da3_node_hdr_to_disk,
+ .node_hdr_from_disk = xfs_da3_node_hdr_from_disk,
+ .node_tree_p = xfs_da3_node_tree_p,
+
+ .free_hdr_size = sizeof(struct xfs_dir3_free_hdr),
+ .free_hdr_to_disk = xfs_dir3_free_hdr_to_disk,
+ .free_hdr_from_disk = xfs_dir3_free_hdr_from_disk,
+ .free_max_bests = xfs_dir3_free_max_bests,
+ .free_bests_p = xfs_dir3_free_bests_p,
+ .db_to_fdb = xfs_dir3_db_to_fdb,
+ .db_to_fdindex = xfs_dir3_db_to_fdindex,
+};
+
+static const struct xfs_dir_ops xfs_dir2_nondir_ops = {
+ .node_hdr_size = sizeof(struct xfs_da_node_hdr),
+ .node_hdr_to_disk = xfs_da2_node_hdr_to_disk,
+ .node_hdr_from_disk = xfs_da2_node_hdr_from_disk,
+ .node_tree_p = xfs_da2_node_tree_p,
+};
+
+static const struct xfs_dir_ops xfs_dir3_nondir_ops = {
+ .node_hdr_size = sizeof(struct xfs_da3_node_hdr),
+ .node_hdr_to_disk = xfs_da3_node_hdr_to_disk,
+ .node_hdr_from_disk = xfs_da3_node_hdr_from_disk,
+ .node_tree_p = xfs_da3_node_tree_p,
+};
+
+/*
+ * Return the ops structure according to the current config. If we are passed
+ * an inode, then that overrides the default config we use which is based on
+ * feature bits.
+ */
+const struct xfs_dir_ops *
+xfs_dir_get_ops(
+ struct xfs_mount *mp,
+ struct xfs_inode *dp)
+{
+ if (dp)
+ return dp->d_ops;
+ if (mp->m_dir_inode_ops)
+ return mp->m_dir_inode_ops;
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ return &xfs_dir3_ops;
+ if (xfs_sb_version_hasftype(&mp->m_sb))
+ return &xfs_dir2_ftype_ops;
+ return &xfs_dir2_ops;
+}
+
+const struct xfs_dir_ops *
+xfs_nondir_get_ops(
+ struct xfs_mount *mp,
+ struct xfs_inode *dp)
+{
+ if (dp)
+ return dp->d_ops;
+ if (mp->m_nondir_inode_ops)
+ return mp->m_nondir_inode_ops;
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ return &xfs_dir3_nondir_ops;
+ return &xfs_dir2_nondir_ops;
+}
diff --git a/fs/xfs/xfs_dir2_format.h b/fs/xfs/xfs_da_format.h
index 9cf67381adf6..a19d3f8f639c 100644
--- a/fs/xfs/xfs_dir2_format.h
+++ b/fs/xfs/xfs_da_format.h
@@ -16,8 +16,113 @@
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#ifndef __XFS_DIR2_FORMAT_H__
-#define __XFS_DIR2_FORMAT_H__
+#ifndef __XFS_DA_FORMAT_H__
+#define __XFS_DA_FORMAT_H__
+
+/*========================================================================
+ * Directory Structure when greater than XFS_LBSIZE(mp) bytes.
+ *========================================================================*/
+
+/*
+ * This structure is common to both leaf nodes and non-leaf nodes in the Btree.
+ *
+ * It is used to manage a doubly linked list of all blocks at the same
+ * level in the Btree, and to identify which type of block this is.
+ */
+#define XFS_DA_NODE_MAGIC 0xfebe /* magic number: non-leaf blocks */
+#define XFS_ATTR_LEAF_MAGIC 0xfbee /* magic number: attribute leaf blks */
+#define XFS_DIR2_LEAF1_MAGIC 0xd2f1 /* magic number: v2 dirlf single blks */
+#define XFS_DIR2_LEAFN_MAGIC 0xd2ff /* magic number: v2 dirlf multi blks */
+
+typedef struct xfs_da_blkinfo {
+ __be32 forw; /* previous block in list */
+ __be32 back; /* following block in list */
+ __be16 magic; /* validity check on block */
+ __be16 pad; /* unused */
+} xfs_da_blkinfo_t;
+
+/*
+ * CRC enabled directory structure types
+ *
+ * The headers change size for the additional verification information, but
+ * otherwise the tree layouts and contents are unchanged. Hence the da btree
+ * code can use the struct xfs_da_blkinfo for manipulating the tree links and
+ * magic numbers without modification for both v2 and v3 nodes.
+ */
+#define XFS_DA3_NODE_MAGIC 0x3ebe /* magic number: non-leaf blocks */
+#define XFS_ATTR3_LEAF_MAGIC 0x3bee /* magic number: attribute leaf blks */
+#define XFS_DIR3_LEAF1_MAGIC 0x3df1 /* magic number: v2 dirlf single blks */
+#define XFS_DIR3_LEAFN_MAGIC 0x3dff /* magic number: v2 dirlf multi blks */
+
+struct xfs_da3_blkinfo {
+ /*
+ * the node link manipulation code relies on the fact that the first
+ * element of this structure is the struct xfs_da_blkinfo so it can
+ * ignore the differences in the rest of the structures.
+ */
+ struct xfs_da_blkinfo hdr;
+ __be32 crc; /* CRC of block */
+ __be64 blkno; /* first block of the buffer */
+ __be64 lsn; /* sequence number of last write */
+ uuid_t uuid; /* filesystem we belong to */
+ __be64 owner; /* inode that owns the block */
+};
+
+/*
+ * This is the structure of the root and intermediate nodes in the Btree.
+ * The leaf nodes are defined above.
+ *
+ * Entries are not packed.
+ *
+ * Since we have duplicate keys, use a binary search but always follow
+ * all match in the block, not just the first match found.
+ */
+#define XFS_DA_NODE_MAXDEPTH 5 /* max depth of Btree */
+
+typedef struct xfs_da_node_hdr {
+ struct xfs_da_blkinfo info; /* block type, links, etc. */
+ __be16 __count; /* count of active entries */
+ __be16 __level; /* level above leaves (leaf == 0) */
+} xfs_da_node_hdr_t;
+
+struct xfs_da3_node_hdr {
+ struct xfs_da3_blkinfo info; /* block type, links, etc. */
+ __be16 __count; /* count of active entries */
+ __be16 __level; /* level above leaves (leaf == 0) */
+ __be32 __pad32;
+};
+
+#define XFS_DA3_NODE_CRC_OFF (offsetof(struct xfs_da3_node_hdr, info.crc))
+
+typedef struct xfs_da_node_entry {
+ __be32 hashval; /* hash value for this descendant */
+ __be32 before; /* Btree block before this key */
+} xfs_da_node_entry_t;
+
+typedef struct xfs_da_intnode {
+ struct xfs_da_node_hdr hdr;
+ struct xfs_da_node_entry __btree[];
+} xfs_da_intnode_t;
+
+struct xfs_da3_intnode {
+ struct xfs_da3_node_hdr hdr;
+ struct xfs_da_node_entry __btree[];
+};
+
+/*
+ * In-core version of the node header to abstract the differences in the v2 and
+ * v3 disk format of the headers. Callers need to convert to/from disk format as
+ * appropriate.
+ */
+struct xfs_da3_icnode_hdr {
+ __uint32_t forw;
+ __uint32_t back;
+ __uint16_t magic;
+ __uint16_t count;
+ __uint16_t level;
+};
+
+#define XFS_LBSIZE(mp) (mp)->m_sb.sb_blocksize
/*
* Directory version 2.
@@ -189,79 +294,6 @@ xfs_dir2_sf_firstentry(struct xfs_dir2_sf_hdr *hdr)
((char *)hdr + xfs_dir2_sf_hdr_size(hdr->i8count));
}
-static inline int
-xfs_dir3_sf_entsize(
- struct xfs_mount *mp,
- struct xfs_dir2_sf_hdr *hdr,
- int len)
-{
- int count = sizeof(struct xfs_dir2_sf_entry); /* namelen + offset */
-
- count += len; /* name */
- count += hdr->i8count ? sizeof(xfs_dir2_ino8_t) :
- sizeof(xfs_dir2_ino4_t); /* ino # */
- if (xfs_sb_version_hasftype(&mp->m_sb))
- count += sizeof(__uint8_t); /* file type */
- return count;
-}
-
-static inline struct xfs_dir2_sf_entry *
-xfs_dir3_sf_nextentry(
- struct xfs_mount *mp,
- struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep)
-{
- return (struct xfs_dir2_sf_entry *)
- ((char *)sfep + xfs_dir3_sf_entsize(mp, hdr, sfep->namelen));
-}
-
-/*
- * in dir3 shortform directories, the file type field is stored at a variable
- * offset after the inode number. Because it's only a single byte, endian
- * conversion is not necessary.
- */
-static inline __uint8_t *
-xfs_dir3_sfe_ftypep(
- struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep)
-{
- return (__uint8_t *)&sfep->name[sfep->namelen];
-}
-
-static inline __uint8_t
-xfs_dir3_sfe_get_ftype(
- struct xfs_mount *mp,
- struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep)
-{
- __uint8_t *ftp;
-
- if (!xfs_sb_version_hasftype(&mp->m_sb))
- return XFS_DIR3_FT_UNKNOWN;
-
- ftp = xfs_dir3_sfe_ftypep(hdr, sfep);
- if (*ftp >= XFS_DIR3_FT_MAX)
- return XFS_DIR3_FT_UNKNOWN;
- return *ftp;
-}
-
-static inline void
-xfs_dir3_sfe_put_ftype(
- struct xfs_mount *mp,
- struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep,
- __uint8_t ftype)
-{
- __uint8_t *ftp;
-
- ASSERT(ftype < XFS_DIR3_FT_MAX);
-
- if (!xfs_sb_version_hasftype(&mp->m_sb))
- return;
- ftp = xfs_dir3_sfe_ftypep(hdr, sfep);
- *ftp = ftype;
-}
-
/*
* Data block structures.
*
@@ -345,17 +377,6 @@ struct xfs_dir3_data_hdr {
#define XFS_DIR3_DATA_CRC_OFF offsetof(struct xfs_dir3_data_hdr, hdr.crc)
-static inline struct xfs_dir2_data_free *
-xfs_dir3_data_bestfree_p(struct xfs_dir2_data_hdr *hdr)
-{
- if (hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) ||
- hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)) {
- struct xfs_dir3_data_hdr *hdr3 = (struct xfs_dir3_data_hdr *)hdr;
- return hdr3->best_free;
- }
- return hdr->bestfree;
-}
-
/*
* Active entry in a data block.
*
@@ -389,72 +410,6 @@ typedef struct xfs_dir2_data_unused {
} xfs_dir2_data_unused_t;
/*
- * Size of a data entry.
- */
-static inline int
-__xfs_dir3_data_entsize(
- bool ftype,
- int n)
-{
- int size = offsetof(struct xfs_dir2_data_entry, name[0]);
-
- size += n;
- size += sizeof(xfs_dir2_data_off_t);
- if (ftype)
- size += sizeof(__uint8_t);
- return roundup(size, XFS_DIR2_DATA_ALIGN);
-}
-static inline int
-xfs_dir3_data_entsize(
- struct xfs_mount *mp,
- int n)
-{
- bool ftype = xfs_sb_version_hasftype(&mp->m_sb) ? true : false;
- return __xfs_dir3_data_entsize(ftype, n);
-}
-
-static inline __uint8_t
-xfs_dir3_dirent_get_ftype(
- struct xfs_mount *mp,
- struct xfs_dir2_data_entry *dep)
-{
- if (xfs_sb_version_hasftype(&mp->m_sb)) {
- __uint8_t type = dep->name[dep->namelen];
-
- ASSERT(type < XFS_DIR3_FT_MAX);
- if (type < XFS_DIR3_FT_MAX)
- return type;
-
- }
- return XFS_DIR3_FT_UNKNOWN;
-}
-
-static inline void
-xfs_dir3_dirent_put_ftype(
- struct xfs_mount *mp,
- struct xfs_dir2_data_entry *dep,
- __uint8_t type)
-{
- ASSERT(type < XFS_DIR3_FT_MAX);
- ASSERT(dep->namelen != 0);
-
- if (xfs_sb_version_hasftype(&mp->m_sb))
- dep->name[dep->namelen] = type;
-}
-
-/*
- * Pointer to an entry's tag word.
- */
-static inline __be16 *
-xfs_dir3_data_entry_tag_p(
- struct xfs_mount *mp,
- struct xfs_dir2_data_entry *dep)
-{
- return (__be16 *)((char *)dep +
- xfs_dir3_data_entsize(mp, dep->namelen) - sizeof(__be16));
-}
-
-/*
* Pointer to a freespace's tag word.
*/
static inline __be16 *
@@ -464,93 +419,6 @@ xfs_dir2_data_unused_tag_p(struct xfs_dir2_data_unused *dup)
be16_to_cpu(dup->length) - sizeof(__be16));
}
-static inline size_t
-xfs_dir3_data_hdr_size(bool dir3)
-{
- if (dir3)
- return sizeof(struct xfs_dir3_data_hdr);
- return sizeof(struct xfs_dir2_data_hdr);
-}
-
-static inline size_t
-xfs_dir3_data_entry_offset(struct xfs_dir2_data_hdr *hdr)
-{
- bool dir3 = hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) ||
- hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC);
- return xfs_dir3_data_hdr_size(dir3);
-}
-
-static inline struct xfs_dir2_data_entry *
-xfs_dir3_data_entry_p(struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + xfs_dir3_data_entry_offset(hdr));
-}
-
-static inline struct xfs_dir2_data_unused *
-xfs_dir3_data_unused_p(struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_unused *)
- ((char *)hdr + xfs_dir3_data_entry_offset(hdr));
-}
-
-/*
- * Offsets of . and .. in data space (always block 0)
- *
- * XXX: there is scope for significant optimisation of the logic here. Right
- * now we are checking for "dir3 format" over and over again. Ideally we should
- * only do it once for each operation.
- */
-static inline xfs_dir2_data_aoff_t
-xfs_dir3_data_dot_offset(struct xfs_mount *mp)
-{
- return xfs_dir3_data_hdr_size(xfs_sb_version_hascrc(&mp->m_sb));
-}
-
-static inline xfs_dir2_data_aoff_t
-xfs_dir3_data_dotdot_offset(struct xfs_mount *mp)
-{
- return xfs_dir3_data_dot_offset(mp) +
- xfs_dir3_data_entsize(mp, 1);
-}
-
-static inline xfs_dir2_data_aoff_t
-xfs_dir3_data_first_offset(struct xfs_mount *mp)
-{
- return xfs_dir3_data_dotdot_offset(mp) +
- xfs_dir3_data_entsize(mp, 2);
-}
-
-/*
- * location of . and .. in data space (always block 0)
- */
-static inline struct xfs_dir2_data_entry *
-xfs_dir3_data_dot_entry_p(
- struct xfs_mount *mp,
- struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + xfs_dir3_data_dot_offset(mp));
-}
-
-static inline struct xfs_dir2_data_entry *
-xfs_dir3_data_dotdot_entry_p(
- struct xfs_mount *mp,
- struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + xfs_dir3_data_dotdot_offset(mp));
-}
-
-static inline struct xfs_dir2_data_entry *
-xfs_dir3_data_first_entry_p(
- struct xfs_mount *mp,
- struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + xfs_dir3_data_first_offset(mp));
-}
-
/*
* Leaf block structures.
*
@@ -645,39 +513,6 @@ struct xfs_dir3_leaf {
#define XFS_DIR3_LEAF_CRC_OFF offsetof(struct xfs_dir3_leaf_hdr, info.crc)
-extern void xfs_dir3_leaf_hdr_from_disk(struct xfs_dir3_icleaf_hdr *to,
- struct xfs_dir2_leaf *from);
-
-static inline int
-xfs_dir3_leaf_hdr_size(struct xfs_dir2_leaf *lp)
-{
- if (lp->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAF1_MAGIC) ||
- lp->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC))
- return sizeof(struct xfs_dir3_leaf_hdr);
- return sizeof(struct xfs_dir2_leaf_hdr);
-}
-
-static inline int
-xfs_dir3_max_leaf_ents(struct xfs_mount *mp, struct xfs_dir2_leaf *lp)
-{
- return (mp->m_dirblksize - xfs_dir3_leaf_hdr_size(lp)) /
- (uint)sizeof(struct xfs_dir2_leaf_entry);
-}
-
-/*
- * Get address of the bestcount field in the single-leaf block.
- */
-static inline struct xfs_dir2_leaf_entry *
-xfs_dir3_leaf_ents_p(struct xfs_dir2_leaf *lp)
-{
- if (lp->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAF1_MAGIC) ||
- lp->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
- struct xfs_dir3_leaf *lp3 = (struct xfs_dir3_leaf *)lp;
- return lp3->__ents;
- }
- return lp->__ents;
-}
-
/*
* Get address of the bestcount field in the single-leaf block.
*/
@@ -869,48 +704,6 @@ struct xfs_dir3_icfree_hdr {
};
-void xfs_dir3_free_hdr_from_disk(struct xfs_dir3_icfree_hdr *to,
- struct xfs_dir2_free *from);
-
-static inline int
-xfs_dir3_free_hdr_size(struct xfs_mount *mp)
-{
- if (xfs_sb_version_hascrc(&mp->m_sb))
- return sizeof(struct xfs_dir3_free_hdr);
- return sizeof(struct xfs_dir2_free_hdr);
-}
-
-static inline int
-xfs_dir3_free_max_bests(struct xfs_mount *mp)
-{
- return (mp->m_dirblksize - xfs_dir3_free_hdr_size(mp)) /
- sizeof(xfs_dir2_data_off_t);
-}
-
-static inline __be16 *
-xfs_dir3_free_bests_p(struct xfs_mount *mp, struct xfs_dir2_free *free)
-{
- return (__be16 *)((char *)free + xfs_dir3_free_hdr_size(mp));
-}
-
-/*
- * Convert data space db to the corresponding free db.
- */
-static inline xfs_dir2_db_t
-xfs_dir2_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db)
-{
- return XFS_DIR2_FREE_FIRSTDB(mp) + db / xfs_dir3_free_max_bests(mp);
-}
-
-/*
- * Convert data space db to the corresponding index in a free db.
- */
-static inline int
-xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db)
-{
- return db % xfs_dir3_free_max_bests(mp);
-}
-
/*
* Single block format.
*
@@ -961,4 +754,262 @@ xfs_dir2_block_leaf_p(struct xfs_dir2_block_tail *btp)
return ((struct xfs_dir2_leaf_entry *)btp) - be32_to_cpu(btp->count);
}
-#endif /* __XFS_DIR2_FORMAT_H__ */
+
+/*
+ * Attribute storage layout
+ *
+ * Attribute lists are structured around Btrees where all the data
+ * elements are in the leaf nodes. Attribute names are hashed into an int,
+ * then that int is used as the index into the Btree. Since the hashval
+ * of an attribute name may not be unique, we may have duplicate keys. The
+ * internal links in the Btree are logical block offsets into the file.
+ *
+ *========================================================================
+ * Attribute structure when equal to XFS_LBSIZE(mp) bytes.
+ *========================================================================
+ *
+ * Struct leaf_entry's are packed from the top. Name/values grow from the
+ * bottom but are not packed. The freemap contains run-length-encoded entries
+ * for the free bytes after the leaf_entry's, but only the N largest such,
+ * smaller runs are dropped. When the freemap doesn't show enough space
+ * for an allocation, we compact the name/value area and try again. If we
+ * still don't have enough space, then we have to split the block. The
+ * name/value structs (both local and remote versions) must be 32bit aligned.
+ *
+ * Since we have duplicate hash keys, for each key that matches, compare
+ * the actual name string. The root and intermediate node search always
+ * takes the first-in-the-block key match found, so we should only have
+ * to work "forw"ard. If none matches, continue with the "forw"ard leaf
+ * nodes until the hash key changes or the attribute name is found.
+ *
+ * We store the fact that an attribute is a ROOT/USER/SECURE attribute in
+ * the leaf_entry. The namespaces are independent only because we also look
+ * at the namespace bit when we are looking for a matching attribute name.
+ *
+ * We also store an "incomplete" bit in the leaf_entry. It shows that an
+ * attribute is in the middle of being created and should not be shown to
+ * the user if we crash during the time that the bit is set. We clear the
+ * bit when we have finished setting up the attribute. We do this because
+ * we cannot create some large attributes inside a single transaction, and we
+ * need some indication that we weren't finished if we crash in the middle.
+ */
+#define XFS_ATTR_LEAF_MAPSIZE 3 /* how many freespace slots */
+
+typedef struct xfs_attr_leaf_map { /* RLE map of free bytes */
+ __be16 base; /* base of free region */
+ __be16 size; /* length of free region */
+} xfs_attr_leaf_map_t;
+
+typedef struct xfs_attr_leaf_hdr { /* constant-structure header block */
+ xfs_da_blkinfo_t info; /* block type, links, etc. */
+ __be16 count; /* count of active leaf_entry's */
+ __be16 usedbytes; /* num bytes of names/values stored */
+ __be16 firstused; /* first used byte in name area */
+ __u8 holes; /* != 0 if blk needs compaction */
+ __u8 pad1;
+ xfs_attr_leaf_map_t freemap[XFS_ATTR_LEAF_MAPSIZE];
+ /* N largest free regions */
+} xfs_attr_leaf_hdr_t;
+
+typedef struct xfs_attr_leaf_entry { /* sorted on key, not name */
+ __be32 hashval; /* hash value of name */
+ __be16 nameidx; /* index into buffer of name/value */
+ __u8 flags; /* LOCAL/ROOT/SECURE/INCOMPLETE flag */
+ __u8 pad2; /* unused pad byte */
+} xfs_attr_leaf_entry_t;
+
+typedef struct xfs_attr_leaf_name_local {
+ __be16 valuelen; /* number of bytes in value */
+ __u8 namelen; /* length of name bytes */
+ __u8 nameval[1]; /* name/value bytes */
+} xfs_attr_leaf_name_local_t;
+
+typedef struct xfs_attr_leaf_name_remote {
+ __be32 valueblk; /* block number of value bytes */
+ __be32 valuelen; /* number of bytes in value */
+ __u8 namelen; /* length of name bytes */
+ __u8 name[1]; /* name bytes */
+} xfs_attr_leaf_name_remote_t;
+
+typedef struct xfs_attr_leafblock {
+ xfs_attr_leaf_hdr_t hdr; /* constant-structure header block */
+ xfs_attr_leaf_entry_t entries[1]; /* sorted on key, not name */
+ xfs_attr_leaf_name_local_t namelist; /* grows from bottom of buf */
+ xfs_attr_leaf_name_remote_t valuelist; /* grows from bottom of buf */
+} xfs_attr_leafblock_t;
+
+/*
+ * CRC enabled leaf structures. Called "version 3" structures to match the
+ * version number of the directory and dablk structures for this feature, and
+ * attr2 is already taken by the variable inode attribute fork size feature.
+ */
+struct xfs_attr3_leaf_hdr {
+ struct xfs_da3_blkinfo info;
+ __be16 count;
+ __be16 usedbytes;
+ __be16 firstused;
+ __u8 holes;
+ __u8 pad1;
+ struct xfs_attr_leaf_map freemap[XFS_ATTR_LEAF_MAPSIZE];
+ __be32 pad2; /* 64 bit alignment */
+};
+
+#define XFS_ATTR3_LEAF_CRC_OFF (offsetof(struct xfs_attr3_leaf_hdr, info.crc))
+
+struct xfs_attr3_leafblock {
+ struct xfs_attr3_leaf_hdr hdr;
+ struct xfs_attr_leaf_entry entries[1];
+
+ /*
+ * The rest of the block contains the following structures after the
+ * leaf entries, growing from the bottom up. The variables are never
+ * referenced, the locations accessed purely from helper functions.
+ *
+ * struct xfs_attr_leaf_name_local
+ * struct xfs_attr_leaf_name_remote
+ */
+};
+
+/*
+ * incore, neutral version of the attribute leaf header
+ */
+struct xfs_attr3_icleaf_hdr {
+ __uint32_t forw;
+ __uint32_t back;
+ __uint16_t magic;
+ __uint16_t count;
+ __uint16_t usedbytes;
+ __uint16_t firstused;
+ __u8 holes;
+ struct {
+ __uint16_t base;
+ __uint16_t size;
+ } freemap[XFS_ATTR_LEAF_MAPSIZE];
+};
+
+/*
+ * Flags used in the leaf_entry[i].flags field.
+ * NOTE: the INCOMPLETE bit must not collide with the flags bits specified
+ * on the system call, they are "or"ed together for various operations.
+ */
+#define XFS_ATTR_LOCAL_BIT 0 /* attr is stored locally */
+#define XFS_ATTR_ROOT_BIT 1 /* limit access to trusted attrs */
+#define XFS_ATTR_SECURE_BIT 2 /* limit access to secure attrs */
+#define XFS_ATTR_INCOMPLETE_BIT 7 /* attr in middle of create/delete */
+#define XFS_ATTR_LOCAL (1 << XFS_ATTR_LOCAL_BIT)
+#define XFS_ATTR_ROOT (1 << XFS_ATTR_ROOT_BIT)
+#define XFS_ATTR_SECURE (1 << XFS_ATTR_SECURE_BIT)
+#define XFS_ATTR_INCOMPLETE (1 << XFS_ATTR_INCOMPLETE_BIT)
+
+/*
+ * Conversion macros for converting namespace bits from argument flags
+ * to ondisk flags.
+ */
+#define XFS_ATTR_NSP_ARGS_MASK (ATTR_ROOT | ATTR_SECURE)
+#define XFS_ATTR_NSP_ONDISK_MASK (XFS_ATTR_ROOT | XFS_ATTR_SECURE)
+#define XFS_ATTR_NSP_ONDISK(flags) ((flags) & XFS_ATTR_NSP_ONDISK_MASK)
+#define XFS_ATTR_NSP_ARGS(flags) ((flags) & XFS_ATTR_NSP_ARGS_MASK)
+#define XFS_ATTR_NSP_ARGS_TO_ONDISK(x) (((x) & ATTR_ROOT ? XFS_ATTR_ROOT : 0) |\
+ ((x) & ATTR_SECURE ? XFS_ATTR_SECURE : 0))
+#define XFS_ATTR_NSP_ONDISK_TO_ARGS(x) (((x) & XFS_ATTR_ROOT ? ATTR_ROOT : 0) |\
+ ((x) & XFS_ATTR_SECURE ? ATTR_SECURE : 0))
+
+/*
+ * Alignment for namelist and valuelist entries (since they are mixed
+ * there can be only one alignment value)
+ */
+#define XFS_ATTR_LEAF_NAME_ALIGN ((uint)sizeof(xfs_dablk_t))
+
+static inline int
+xfs_attr3_leaf_hdr_size(struct xfs_attr_leafblock *leafp)
+{
+ if (leafp->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC))
+ return sizeof(struct xfs_attr3_leaf_hdr);
+ return sizeof(struct xfs_attr_leaf_hdr);
+}
+
+static inline struct xfs_attr_leaf_entry *
+xfs_attr3_leaf_entryp(xfs_attr_leafblock_t *leafp)
+{
+ if (leafp->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC))
+ return &((struct xfs_attr3_leafblock *)leafp)->entries[0];
+ return &leafp->entries[0];
+}
+
+/*
+ * Cast typed pointers for "local" and "remote" name/value structs.
+ */
+static inline char *
+xfs_attr3_leaf_name(xfs_attr_leafblock_t *leafp, int idx)
+{
+ struct xfs_attr_leaf_entry *entries = xfs_attr3_leaf_entryp(leafp);
+
+ return &((char *)leafp)[be16_to_cpu(entries[idx].nameidx)];
+}
+
+static inline xfs_attr_leaf_name_remote_t *
+xfs_attr3_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx)
+{
+ return (xfs_attr_leaf_name_remote_t *)xfs_attr3_leaf_name(leafp, idx);
+}
+
+static inline xfs_attr_leaf_name_local_t *
+xfs_attr3_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx)
+{
+ return (xfs_attr_leaf_name_local_t *)xfs_attr3_leaf_name(leafp, idx);
+}
+
+/*
+ * Calculate total bytes used (including trailing pad for alignment) for
+ * a "local" name/value structure, a "remote" name/value structure, and
+ * a pointer which might be either.
+ */
+static inline int xfs_attr_leaf_entsize_remote(int nlen)
+{
+ return ((uint)sizeof(xfs_attr_leaf_name_remote_t) - 1 + (nlen) + \
+ XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
+}
+
+static inline int xfs_attr_leaf_entsize_local(int nlen, int vlen)
+{
+ return ((uint)sizeof(xfs_attr_leaf_name_local_t) - 1 + (nlen) + (vlen) +
+ XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
+}
+
+static inline int xfs_attr_leaf_entsize_local_max(int bsize)
+{
+ return (((bsize) >> 1) + ((bsize) >> 2));
+}
+
+
+
+/*
+ * Remote attribute block format definition
+ *
+ * There is one of these headers per filesystem block in a remote attribute.
+ * This is done to ensure there is a 1:1 mapping between the attribute value
+ * length and the number of blocks needed to store the attribute. This makes the
+ * verification of a buffer a little more complex, but greatly simplifies the
+ * allocation, reading and writing of these attributes as we don't have to guess
+ * the number of blocks needed to store the attribute data.
+ */
+#define XFS_ATTR3_RMT_MAGIC 0x5841524d /* XARM */
+
+struct xfs_attr3_rmt_hdr {
+ __be32 rm_magic;
+ __be32 rm_offset;
+ __be32 rm_bytes;
+ __be32 rm_crc;
+ uuid_t rm_uuid;
+ __be64 rm_owner;
+ __be64 rm_blkno;
+ __be64 rm_lsn;
+};
+
+#define XFS_ATTR3_RMT_CRC_OFF offsetof(struct xfs_attr3_rmt_hdr, rm_crc)
+
+#define XFS_ATTR3_RMT_BUF_SPACE(mp, bufsize) \
+ ((bufsize) - (xfs_sb_version_hascrc(&(mp)->m_sb) ? \
+ sizeof(struct xfs_attr3_rmt_hdr) : 0))
+
+#endif /* __XFS_DA_FORMAT_H__ */
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index edf203ab50af..ce16ef02997a 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -17,25 +17,24 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_inum.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
+#include "xfs_da_format.h"
#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
-#include "xfs_dir2_format.h"
#include "xfs_dir2.h"
#include "xfs_dir2_priv.h"
#include "xfs_error.h"
#include "xfs_trace.h"
+#include "xfs_dinode.h"
struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR };
@@ -96,13 +95,17 @@ xfs_dir_mount(
ASSERT(xfs_sb_version_hasdirv2(&mp->m_sb));
ASSERT((1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog)) <=
XFS_MAX_BLOCKSIZE);
+
+ mp->m_dir_inode_ops = xfs_dir_get_ops(mp, NULL);
+ mp->m_nondir_inode_ops = xfs_nondir_get_ops(mp, NULL);
+
mp->m_dirblksize = 1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog);
mp->m_dirblkfsbs = 1 << mp->m_sb.sb_dirblklog;
mp->m_dirdatablk = xfs_dir2_db_to_da(mp, XFS_DIR2_DATA_FIRSTDB(mp));
mp->m_dirleafblk = xfs_dir2_db_to_da(mp, XFS_DIR2_LEAF_FIRSTDB(mp));
mp->m_dirfreeblk = xfs_dir2_db_to_da(mp, XFS_DIR2_FREE_FIRSTDB(mp));
- nodehdr_size = __xfs_da3_node_hdr_size(xfs_sb_version_hascrc(&mp->m_sb));
+ nodehdr_size = mp->m_dir_inode_ops->node_hdr_size;
mp->m_attr_node_ents = (mp->m_sb.sb_blocksize - nodehdr_size) /
(uint)sizeof(xfs_da_node_entry_t);
mp->m_dir_node_ents = (mp->m_dirblksize - nodehdr_size) /
@@ -113,6 +116,7 @@ xfs_dir_mount(
mp->m_dirnameops = &xfs_ascii_ci_nameops;
else
mp->m_dirnameops = &xfs_default_nameops;
+
}
/*
diff --git a/fs/xfs/xfs_dir2.h b/fs/xfs/xfs_dir2.h
index 9910401327d4..cec70e0781ab 100644
--- a/fs/xfs/xfs_dir2.h
+++ b/fs/xfs/xfs_dir2.h
@@ -32,6 +32,83 @@ struct xfs_dir2_data_unused;
extern struct xfs_name xfs_name_dotdot;
/*
+ * directory operations vector for encode/decode routines
+ */
+struct xfs_dir_ops {
+ int (*sf_entsize)(struct xfs_dir2_sf_hdr *hdr, int len);
+ struct xfs_dir2_sf_entry *
+ (*sf_nextentry)(struct xfs_dir2_sf_hdr *hdr,
+ struct xfs_dir2_sf_entry *sfep);
+ __uint8_t (*sf_get_ftype)(struct xfs_dir2_sf_entry *sfep);
+ void (*sf_put_ftype)(struct xfs_dir2_sf_entry *sfep,
+ __uint8_t ftype);
+ xfs_ino_t (*sf_get_ino)(struct xfs_dir2_sf_hdr *hdr,
+ struct xfs_dir2_sf_entry *sfep);
+ void (*sf_put_ino)(struct xfs_dir2_sf_hdr *hdr,
+ struct xfs_dir2_sf_entry *sfep,
+ xfs_ino_t ino);
+ xfs_ino_t (*sf_get_parent_ino)(struct xfs_dir2_sf_hdr *hdr);
+ void (*sf_put_parent_ino)(struct xfs_dir2_sf_hdr *hdr,
+ xfs_ino_t ino);
+
+ int (*data_entsize)(int len);
+ __uint8_t (*data_get_ftype)(struct xfs_dir2_data_entry *dep);
+ void (*data_put_ftype)(struct xfs_dir2_data_entry *dep,
+ __uint8_t ftype);
+ __be16 * (*data_entry_tag_p)(struct xfs_dir2_data_entry *dep);
+ struct xfs_dir2_data_free *
+ (*data_bestfree_p)(struct xfs_dir2_data_hdr *hdr);
+
+ xfs_dir2_data_aoff_t data_dot_offset;
+ xfs_dir2_data_aoff_t data_dotdot_offset;
+ xfs_dir2_data_aoff_t data_first_offset;
+ size_t data_entry_offset;
+
+ struct xfs_dir2_data_entry *
+ (*data_dot_entry_p)(struct xfs_dir2_data_hdr *hdr);
+ struct xfs_dir2_data_entry *
+ (*data_dotdot_entry_p)(struct xfs_dir2_data_hdr *hdr);
+ struct xfs_dir2_data_entry *
+ (*data_first_entry_p)(struct xfs_dir2_data_hdr *hdr);
+ struct xfs_dir2_data_entry *
+ (*data_entry_p)(struct xfs_dir2_data_hdr *hdr);
+ struct xfs_dir2_data_unused *
+ (*data_unused_p)(struct xfs_dir2_data_hdr *hdr);
+
+ int leaf_hdr_size;
+ void (*leaf_hdr_to_disk)(struct xfs_dir2_leaf *to,
+ struct xfs_dir3_icleaf_hdr *from);
+ void (*leaf_hdr_from_disk)(struct xfs_dir3_icleaf_hdr *to,
+ struct xfs_dir2_leaf *from);
+ int (*leaf_max_ents)(struct xfs_mount *mp);
+ struct xfs_dir2_leaf_entry *
+ (*leaf_ents_p)(struct xfs_dir2_leaf *lp);
+
+ int node_hdr_size;
+ void (*node_hdr_to_disk)(struct xfs_da_intnode *to,
+ struct xfs_da3_icnode_hdr *from);
+ void (*node_hdr_from_disk)(struct xfs_da3_icnode_hdr *to,
+ struct xfs_da_intnode *from);
+ struct xfs_da_node_entry *
+ (*node_tree_p)(struct xfs_da_intnode *dap);
+
+ int free_hdr_size;
+ void (*free_hdr_to_disk)(struct xfs_dir2_free *to,
+ struct xfs_dir3_icfree_hdr *from);
+ void (*free_hdr_from_disk)(struct xfs_dir3_icfree_hdr *to,
+ struct xfs_dir2_free *from);
+ int (*free_max_bests)(struct xfs_mount *mp);
+ __be16 * (*free_bests_p)(struct xfs_dir2_free *free);
+ xfs_dir2_db_t (*db_to_fdb)(struct xfs_mount *mp, xfs_dir2_db_t db);
+ int (*db_to_fdindex)(struct xfs_mount *mp, xfs_dir2_db_t db);
+};
+
+extern const struct xfs_dir_ops *
+ xfs_dir_get_ops(struct xfs_mount *mp, struct xfs_inode *dp);
+extern const struct xfs_dir_ops *
+ xfs_nondir_get_ops(struct xfs_mount *mp, struct xfs_inode *dp);
+
+/*
* Generic directory interface routines
*/
extern void xfs_dir_startup(void);
@@ -65,37 +142,30 @@ extern int xfs_dir2_sf_to_block(struct xfs_da_args *args);
/*
* Interface routines used by userspace utilities
*/
-extern xfs_ino_t xfs_dir2_sf_get_parent_ino(struct xfs_dir2_sf_hdr *sfp);
-extern void xfs_dir2_sf_put_parent_ino(struct xfs_dir2_sf_hdr *sfp,
- xfs_ino_t ino);
-extern xfs_ino_t xfs_dir3_sfe_get_ino(struct xfs_mount *mp,
- struct xfs_dir2_sf_hdr *sfp, struct xfs_dir2_sf_entry *sfep);
-extern void xfs_dir3_sfe_put_ino(struct xfs_mount *mp,
- struct xfs_dir2_sf_hdr *hdr, struct xfs_dir2_sf_entry *sfep,
- xfs_ino_t ino);
-
extern int xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp, int *r);
extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, int *r);
extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
struct xfs_buf *bp);
-extern void xfs_dir2_data_freescan(struct xfs_mount *mp,
+extern void xfs_dir2_data_freescan(struct xfs_inode *dp,
struct xfs_dir2_data_hdr *hdr, int *loghead);
-extern void xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_buf *bp,
- struct xfs_dir2_data_entry *dep);
-extern void xfs_dir2_data_log_header(struct xfs_trans *tp,
+extern void xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_inode *dp,
+ struct xfs_buf *bp, struct xfs_dir2_data_entry *dep);
+extern void xfs_dir2_data_log_header(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_buf *bp);
extern void xfs_dir2_data_log_unused(struct xfs_trans *tp, struct xfs_buf *bp,
struct xfs_dir2_data_unused *dup);
-extern void xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_buf *bp,
+extern void xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_inode *dp,
+ struct xfs_buf *bp, xfs_dir2_data_aoff_t offset,
+ xfs_dir2_data_aoff_t len, int *needlogp, int *needscanp);
+extern void xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_inode *dp,
+ struct xfs_buf *bp, struct xfs_dir2_data_unused *dup,
xfs_dir2_data_aoff_t offset, xfs_dir2_data_aoff_t len,
int *needlogp, int *needscanp);
-extern void xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_buf *bp,
- struct xfs_dir2_data_unused *dup, xfs_dir2_data_aoff_t offset,
- xfs_dir2_data_aoff_t len, int *needlogp, int *needscanp);
extern struct xfs_dir2_data_free *xfs_dir2_data_freefind(
- struct xfs_dir2_data_hdr *hdr, struct xfs_dir2_data_unused *dup);
+ struct xfs_dir2_data_hdr *hdr, struct xfs_dir2_data_free *bf,
+ struct xfs_dir2_data_unused *dup);
extern const struct xfs_buf_ops xfs_dir3_block_buf_ops;
extern const struct xfs_buf_ops xfs_dir3_leafn_buf_ops;
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index 12dad188939d..90cdbf4b5f19 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -18,25 +18,25 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
+#include "xfs_da_format.h"
#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_buf_item.h"
-#include "xfs_dir2_format.h"
#include "xfs_dir2.h"
#include "xfs_dir2_priv.h"
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_cksum.h"
+#include "xfs_dinode.h"
/*
* Local function prototypes.
@@ -168,6 +168,7 @@ xfs_dir3_block_init(
static void
xfs_dir2_block_need_space(
+ struct xfs_inode *dp,
struct xfs_dir2_data_hdr *hdr,
struct xfs_dir2_block_tail *btp,
struct xfs_dir2_leaf_entry *blp,
@@ -183,7 +184,7 @@ xfs_dir2_block_need_space(
struct xfs_dir2_data_unused *enddup = NULL;
*compact = 0;
- bf = xfs_dir3_data_bestfree_p(hdr);
+ bf = dp->d_ops->data_bestfree_p(hdr);
/*
* If there are stale entries we'll use one for the leaf.
@@ -280,6 +281,7 @@ out:
static void
xfs_dir2_block_compact(
struct xfs_trans *tp,
+ struct xfs_inode *dp,
struct xfs_buf *bp,
struct xfs_dir2_data_hdr *hdr,
struct xfs_dir2_block_tail *btp,
@@ -312,7 +314,7 @@ xfs_dir2_block_compact(
*lfloglow = toidx + 1 - (be32_to_cpu(btp->stale) - 1);
*lfloghigh -= be32_to_cpu(btp->stale) - 1;
be32_add_cpu(&btp->count, -(be32_to_cpu(btp->stale) - 1));
- xfs_dir2_data_make_free(tp, bp,
+ xfs_dir2_data_make_free(tp, dp, bp,
(xfs_dir2_data_aoff_t)((char *)blp - (char *)hdr),
(xfs_dir2_data_aoff_t)((be32_to_cpu(btp->stale) - 1) * sizeof(*blp)),
needlog, &needscan);
@@ -323,7 +325,7 @@ xfs_dir2_block_compact(
* This needs to happen before the next call to use_free.
*/
if (needscan)
- xfs_dir2_data_freescan(tp->t_mountp, hdr, needlog);
+ xfs_dir2_data_freescan(dp, hdr, needlog);
}
/*
@@ -369,7 +371,7 @@ xfs_dir2_block_addname(
if (error)
return error;
- len = xfs_dir3_data_entsize(mp, args->namelen);
+ len = dp->d_ops->data_entsize(args->namelen);
/*
* Set up pointers to parts of the block.
@@ -382,7 +384,7 @@ xfs_dir2_block_addname(
* Find out if we can reuse stale entries or whether we need extra
* space for entry and new leaf.
*/
- xfs_dir2_block_need_space(hdr, btp, blp, &tagp, &dup,
+ xfs_dir2_block_need_space(dp, hdr, btp, blp, &tagp, &dup,
&enddup, &compact, len);
/*
@@ -418,7 +420,7 @@ xfs_dir2_block_addname(
* If need to compact the leaf entries, do it now.
*/
if (compact) {
- xfs_dir2_block_compact(tp, bp, hdr, btp, blp, &needlog,
+ xfs_dir2_block_compact(tp, dp, bp, hdr, btp, blp, &needlog,
&lfloghigh, &lfloglow);
/* recalculate blp post-compaction */
blp = xfs_dir2_block_leaf_p(btp);
@@ -453,7 +455,7 @@ xfs_dir2_block_addname(
/*
* Mark the space needed for the new leaf entry, now in use.
*/
- xfs_dir2_data_use_free(tp, bp, enddup,
+ xfs_dir2_data_use_free(tp, dp, bp, enddup,
(xfs_dir2_data_aoff_t)
((char *)enddup - (char *)hdr + be16_to_cpu(enddup->length) -
sizeof(*blp)),
@@ -468,7 +470,7 @@ xfs_dir2_block_addname(
* This needs to happen before the next call to use_free.
*/
if (needscan) {
- xfs_dir2_data_freescan(mp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp, hdr, &needlog);
needscan = 0;
}
/*
@@ -540,7 +542,7 @@ xfs_dir2_block_addname(
/*
* Mark space for the data entry used.
*/
- xfs_dir2_data_use_free(tp, bp, dup,
+ xfs_dir2_data_use_free(tp, dp, bp, dup,
(xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr),
(xfs_dir2_data_aoff_t)len, &needlog, &needscan);
/*
@@ -549,18 +551,18 @@ xfs_dir2_block_addname(
dep->inumber = cpu_to_be64(args->inumber);
dep->namelen = args->namelen;
memcpy(dep->name, args->name, args->namelen);
- xfs_dir3_dirent_put_ftype(mp, dep, args->filetype);
- tagp = xfs_dir3_data_entry_tag_p(mp, dep);
+ dp->d_ops->data_put_ftype(dep, args->filetype);
+ tagp = dp->d_ops->data_entry_tag_p(dep);
*tagp = cpu_to_be16((char *)dep - (char *)hdr);
/*
* Clean up the bestfree array and log the header, tail, and entry.
*/
if (needscan)
- xfs_dir2_data_freescan(mp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp, hdr, &needlog);
if (needlog)
- xfs_dir2_data_log_header(tp, bp);
+ xfs_dir2_data_log_header(tp, dp, bp);
xfs_dir2_block_log_tail(tp, bp);
- xfs_dir2_data_log_entry(tp, bp, dep);
+ xfs_dir2_data_log_entry(tp, dp, bp, dep);
xfs_dir3_data_check(dp, bp);
return 0;
}
@@ -642,7 +644,7 @@ xfs_dir2_block_lookup(
* Fill in inode number, CI name if appropriate, release the block.
*/
args->inumber = be64_to_cpu(dep->inumber);
- args->filetype = xfs_dir3_dirent_get_ftype(mp, dep);
+ args->filetype = dp->d_ops->data_get_ftype(dep);
error = xfs_dir_cilookup_result(args, dep->name, dep->namelen);
xfs_trans_brelse(args->trans, bp);
return XFS_ERROR(error);
@@ -799,9 +801,9 @@ xfs_dir2_block_removename(
* Mark the data entry's space free.
*/
needlog = needscan = 0;
- xfs_dir2_data_make_free(tp, bp,
+ xfs_dir2_data_make_free(tp, dp, bp,
(xfs_dir2_data_aoff_t)((char *)dep - (char *)hdr),
- xfs_dir3_data_entsize(mp, dep->namelen), &needlog, &needscan);
+ dp->d_ops->data_entsize(dep->namelen), &needlog, &needscan);
/*
* Fix up the block tail.
*/
@@ -816,9 +818,9 @@ xfs_dir2_block_removename(
* Fix up bestfree, log the header if necessary.
*/
if (needscan)
- xfs_dir2_data_freescan(mp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp, hdr, &needlog);
if (needlog)
- xfs_dir2_data_log_header(tp, bp);
+ xfs_dir2_data_log_header(tp, dp, bp);
xfs_dir3_data_check(dp, bp);
/*
* See if the size as a shortform is good enough.
@@ -875,8 +877,8 @@ xfs_dir2_block_replace(
* Change the inode number to the new value.
*/
dep->inumber = cpu_to_be64(args->inumber);
- xfs_dir3_dirent_put_ftype(mp, dep, args->filetype);
- xfs_dir2_data_log_entry(args->trans, bp, dep);
+ dp->d_ops->data_put_ftype(dep, args->filetype);
+ xfs_dir2_data_log_entry(args->trans, dp, bp, dep);
xfs_dir3_data_check(dp, bp);
return 0;
}
@@ -934,8 +936,8 @@ xfs_dir2_leaf_to_block(
tp = args->trans;
mp = dp->i_mount;
leaf = lbp->b_addr;
- xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
- ents = xfs_dir3_leaf_ents_p(leaf);
+ dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ ents = dp->d_ops->leaf_ents_p(leaf);
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
ASSERT(leafhdr.magic == XFS_DIR2_LEAF1_MAGIC ||
@@ -949,7 +951,7 @@ xfs_dir2_leaf_to_block(
while (dp->i_d.di_size > mp->m_dirblksize) {
int hdrsz;
- hdrsz = xfs_dir3_data_hdr_size(xfs_sb_version_hascrc(&mp->m_sb));
+ hdrsz = dp->d_ops->data_entry_offset;
bestsp = xfs_dir2_leaf_bests_p(ltp);
if (be16_to_cpu(bestsp[be32_to_cpu(ltp->bestcount) - 1]) ==
mp->m_dirblksize - hdrsz) {
@@ -999,7 +1001,7 @@ xfs_dir2_leaf_to_block(
/*
* Use up the space at the end of the block (blp/btp).
*/
- xfs_dir2_data_use_free(tp, dbp, dup, mp->m_dirblksize - size, size,
+ xfs_dir2_data_use_free(tp, dp, dbp, dup, mp->m_dirblksize - size, size,
&needlog, &needscan);
/*
* Initialize the block tail.
@@ -1023,9 +1025,9 @@ xfs_dir2_leaf_to_block(
* Scan the bestfree if we need it and log the data block header.
*/
if (needscan)
- xfs_dir2_data_freescan(mp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp, hdr, &needlog);
if (needlog)
- xfs_dir2_data_log_header(tp, dbp);
+ xfs_dir2_data_log_header(tp, dp, dbp);
/*
* Pitch the old leaf block.
*/
@@ -1136,9 +1138,9 @@ xfs_dir2_sf_to_block(
* The whole thing is initialized to free by the init routine.
* Say we're using the leaf and tail area.
*/
- dup = xfs_dir3_data_unused_p(hdr);
+ dup = dp->d_ops->data_unused_p(hdr);
needlog = needscan = 0;
- xfs_dir2_data_use_free(tp, bp, dup, mp->m_dirblksize - i, i, &needlog,
+ xfs_dir2_data_use_free(tp, dp, bp, dup, mp->m_dirblksize - i, i, &needlog,
&needscan);
ASSERT(needscan == 0);
/*
@@ -1152,38 +1154,38 @@ xfs_dir2_sf_to_block(
/*
* Remove the freespace, we'll manage it.
*/
- xfs_dir2_data_use_free(tp, bp, dup,
+ xfs_dir2_data_use_free(tp, dp, bp, dup,
(xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr),
be16_to_cpu(dup->length), &needlog, &needscan);
/*
* Create entry for .
*/
- dep = xfs_dir3_data_dot_entry_p(mp, hdr);
+ dep = dp->d_ops->data_dot_entry_p(hdr);
dep->inumber = cpu_to_be64(dp->i_ino);
dep->namelen = 1;
dep->name[0] = '.';
- xfs_dir3_dirent_put_ftype(mp, dep, XFS_DIR3_FT_DIR);
- tagp = xfs_dir3_data_entry_tag_p(mp, dep);
+ dp->d_ops->data_put_ftype(dep, XFS_DIR3_FT_DIR);
+ tagp = dp->d_ops->data_entry_tag_p(dep);
*tagp = cpu_to_be16((char *)dep - (char *)hdr);
- xfs_dir2_data_log_entry(tp, bp, dep);
+ xfs_dir2_data_log_entry(tp, dp, bp, dep);
blp[0].hashval = cpu_to_be32(xfs_dir_hash_dot);
blp[0].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
(char *)dep - (char *)hdr));
/*
* Create entry for ..
*/
- dep = xfs_dir3_data_dotdot_entry_p(mp, hdr);
- dep->inumber = cpu_to_be64(xfs_dir2_sf_get_parent_ino(sfp));
+ dep = dp->d_ops->data_dotdot_entry_p(hdr);
+ dep->inumber = cpu_to_be64(dp->d_ops->sf_get_parent_ino(sfp));
dep->namelen = 2;
dep->name[0] = dep->name[1] = '.';
- xfs_dir3_dirent_put_ftype(mp, dep, XFS_DIR3_FT_DIR);
- tagp = xfs_dir3_data_entry_tag_p(mp, dep);
+ dp->d_ops->data_put_ftype(dep, XFS_DIR3_FT_DIR);
+ tagp = dp->d_ops->data_entry_tag_p(dep);
*tagp = cpu_to_be16((char *)dep - (char *)hdr);
- xfs_dir2_data_log_entry(tp, bp, dep);
+ xfs_dir2_data_log_entry(tp, dp, bp, dep);
blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot);
blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
(char *)dep - (char *)hdr));
- offset = xfs_dir3_data_first_offset(mp);
+ offset = dp->d_ops->data_first_offset;
/*
* Loop over existing entries, stuff them in.
*/
@@ -1214,7 +1216,9 @@ xfs_dir2_sf_to_block(
*xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16(
((char *)dup - (char *)hdr));
xfs_dir2_data_log_unused(tp, bp, dup);
- xfs_dir2_data_freeinsert(hdr, dup, &dummy);
+ xfs_dir2_data_freeinsert(hdr,
+ dp->d_ops->data_bestfree_p(hdr),
+ dup, &dummy);
offset += be16_to_cpu(dup->length);
continue;
}
@@ -1222,14 +1226,13 @@ xfs_dir2_sf_to_block(
* Copy a real entry.
*/
dep = (xfs_dir2_data_entry_t *)((char *)hdr + newoffset);
- dep->inumber = cpu_to_be64(xfs_dir3_sfe_get_ino(mp, sfp, sfep));
+ dep->inumber = cpu_to_be64(dp->d_ops->sf_get_ino(sfp, sfep));
dep->namelen = sfep->namelen;
- xfs_dir3_dirent_put_ftype(mp, dep,
- xfs_dir3_sfe_get_ftype(mp, sfp, sfep));
+ dp->d_ops->data_put_ftype(dep, dp->d_ops->sf_get_ftype(sfep));
memcpy(dep->name, sfep->name, dep->namelen);
- tagp = xfs_dir3_data_entry_tag_p(mp, dep);
+ tagp = dp->d_ops->data_entry_tag_p(dep);
*tagp = cpu_to_be16((char *)dep - (char *)hdr);
- xfs_dir2_data_log_entry(tp, bp, dep);
+ xfs_dir2_data_log_entry(tp, dp, bp, dep);
name.name = sfep->name;
name.len = sfep->namelen;
blp[2 + i].hashval = cpu_to_be32(mp->m_dirnameops->
@@ -1240,7 +1243,7 @@ xfs_dir2_sf_to_block(
if (++i == sfp->count)
sfep = NULL;
else
- sfep = xfs_dir3_sf_nextentry(mp, sfp, sfep);
+ sfep = dp->d_ops->sf_nextentry(sfp, sfep);
}
/* Done with the temporary buffer */
kmem_free(sfp);
diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c
index 47e1326c169a..70acff4ee173 100644
--- a/fs/xfs/xfs_dir2_data.c
+++ b/fs/xfs/xfs_dir2_data.c
@@ -18,20 +18,19 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
+#include "xfs_da_format.h"
#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
-#include "xfs_dir2_format.h"
#include "xfs_dir2.h"
#include "xfs_dir2_priv.h"
#include "xfs_error.h"
+#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_cksum.h"
@@ -63,11 +62,18 @@ __xfs_dir3_data_check(
char *p; /* current data position */
int stale; /* count of stale leaves */
struct xfs_name name;
+ const struct xfs_dir_ops *ops;
mp = bp->b_target->bt_mount;
+
+ /*
+ * We can be passed a null dp here from a verifier, so we need to go the
+ * hard way to get them.
+ */
+ ops = xfs_dir_get_ops(mp, dp);
+
hdr = bp->b_addr;
- bf = xfs_dir3_data_bestfree_p(hdr);
- p = (char *)xfs_dir3_data_entry_p(hdr);
+ p = (char *)ops->data_entry_p(hdr);
switch (hdr->magic) {
case cpu_to_be32(XFS_DIR3_BLOCK_MAGIC):
@@ -75,6 +81,16 @@ __xfs_dir3_data_check(
btp = xfs_dir2_block_tail_p(mp, hdr);
lep = xfs_dir2_block_leaf_p(btp);
endp = (char *)lep;
+
+ /*
+ * The number of leaf entries is limited by the size of the
+ * block and the amount of space used by the data entries.
+ * We don't know how much space is used by the data entries yet,
+ * so just ensure that the count falls somewhere inside the
+ * block right now.
+ */
+ XFS_WANT_CORRUPTED_RETURN(be32_to_cpu(btp->count) <
+ ((char *)btp - p) / sizeof(struct xfs_dir2_leaf_entry));
break;
case cpu_to_be32(XFS_DIR3_DATA_MAGIC):
case cpu_to_be32(XFS_DIR2_DATA_MAGIC):
@@ -85,10 +101,11 @@ __xfs_dir3_data_check(
return EFSCORRUPTED;
}
- count = lastfree = freeseen = 0;
/*
* Account for zero bestfree entries.
*/
+ bf = ops->data_bestfree_p(hdr);
+ count = lastfree = freeseen = 0;
if (!bf[0].length) {
XFS_WANT_CORRUPTED_RETURN(!bf[0].offset);
freeseen |= 1 << 0;
@@ -121,7 +138,7 @@ __xfs_dir3_data_check(
XFS_WANT_CORRUPTED_RETURN(
be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) ==
(char *)dup - (char *)hdr);
- dfp = xfs_dir2_data_freefind(hdr, dup);
+ dfp = xfs_dir2_data_freefind(hdr, bf, dup);
if (dfp) {
i = (int)(dfp - bf);
XFS_WANT_CORRUPTED_RETURN(
@@ -147,10 +164,10 @@ __xfs_dir3_data_check(
XFS_WANT_CORRUPTED_RETURN(
!xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber)));
XFS_WANT_CORRUPTED_RETURN(
- be16_to_cpu(*xfs_dir3_data_entry_tag_p(mp, dep)) ==
+ be16_to_cpu(*ops->data_entry_tag_p(dep)) ==
(char *)dep - (char *)hdr);
XFS_WANT_CORRUPTED_RETURN(
- xfs_dir3_dirent_get_ftype(mp, dep) < XFS_DIR3_FT_MAX);
+ ops->data_get_ftype(dep) < XFS_DIR3_FT_MAX);
count++;
lastfree = 0;
if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) ||
@@ -168,7 +185,7 @@ __xfs_dir3_data_check(
}
XFS_WANT_CORRUPTED_RETURN(i < be32_to_cpu(btp->count));
}
- p += xfs_dir3_data_entsize(mp, dep->namelen);
+ p += ops->data_entsize(dep->namelen);
}
/*
* Need to have seen all the entries and all the bestfree slots.
@@ -327,19 +344,18 @@ xfs_dir3_data_readahead(
*/
xfs_dir2_data_free_t *
xfs_dir2_data_freefind(
- xfs_dir2_data_hdr_t *hdr, /* data block */
- xfs_dir2_data_unused_t *dup) /* data unused entry */
+ struct xfs_dir2_data_hdr *hdr, /* data block header */
+ struct xfs_dir2_data_free *bf, /* bestfree table pointer */
+ struct xfs_dir2_data_unused *dup) /* unused space */
{
xfs_dir2_data_free_t *dfp; /* bestfree entry */
xfs_dir2_data_aoff_t off; /* offset value needed */
- struct xfs_dir2_data_free *bf;
#ifdef DEBUG
int matched; /* matched the value */
int seenzero; /* saw a 0 bestfree entry */
#endif
off = (xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr);
- bf = xfs_dir3_data_bestfree_p(hdr);
#ifdef DEBUG
/*
@@ -399,11 +415,11 @@ xfs_dir2_data_freefind(
*/
xfs_dir2_data_free_t * /* entry inserted */
xfs_dir2_data_freeinsert(
- xfs_dir2_data_hdr_t *hdr, /* data block pointer */
- xfs_dir2_data_unused_t *dup, /* unused space */
+ struct xfs_dir2_data_hdr *hdr, /* data block pointer */
+ struct xfs_dir2_data_free *dfp, /* bestfree table pointer */
+ struct xfs_dir2_data_unused *dup, /* unused space */
int *loghead) /* log the data header (out) */
{
- xfs_dir2_data_free_t *dfp; /* bestfree table pointer */
xfs_dir2_data_free_t new; /* new bestfree entry */
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
@@ -411,7 +427,6 @@ xfs_dir2_data_freeinsert(
hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC));
- dfp = xfs_dir3_data_bestfree_p(hdr);
new.length = dup->length;
new.offset = cpu_to_be16((char *)dup - (char *)hdr);
@@ -444,11 +459,11 @@ xfs_dir2_data_freeinsert(
*/
STATIC void
xfs_dir2_data_freeremove(
- xfs_dir2_data_hdr_t *hdr, /* data block header */
- xfs_dir2_data_free_t *dfp, /* bestfree entry pointer */
+ struct xfs_dir2_data_hdr *hdr, /* data block header */
+ struct xfs_dir2_data_free *bf, /* bestfree table pointer */
+ struct xfs_dir2_data_free *dfp, /* bestfree entry pointer */
int *loghead) /* out: log data header */
{
- struct xfs_dir2_data_free *bf;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) ||
@@ -458,7 +473,6 @@ xfs_dir2_data_freeremove(
/*
* It's the first entry, slide the next 2 up.
*/
- bf = xfs_dir3_data_bestfree_p(hdr);
if (dfp == &bf[0]) {
bf[0] = bf[1];
bf[1] = bf[2];
@@ -486,9 +500,9 @@ xfs_dir2_data_freeremove(
*/
void
xfs_dir2_data_freescan(
- xfs_mount_t *mp, /* filesystem mount point */
- xfs_dir2_data_hdr_t *hdr, /* data block header */
- int *loghead) /* out: log data header */
+ struct xfs_inode *dp,
+ struct xfs_dir2_data_hdr *hdr,
+ int *loghead)
{
xfs_dir2_block_tail_t *btp; /* block tail */
xfs_dir2_data_entry_t *dep; /* active data entry */
@@ -505,19 +519,19 @@ xfs_dir2_data_freescan(
/*
* Start by clearing the table.
*/
- bf = xfs_dir3_data_bestfree_p(hdr);
+ bf = dp->d_ops->data_bestfree_p(hdr);
memset(bf, 0, sizeof(*bf) * XFS_DIR2_DATA_FD_COUNT);
*loghead = 1;
/*
* Set up pointers.
*/
- p = (char *)xfs_dir3_data_entry_p(hdr);
+ p = (char *)dp->d_ops->data_entry_p(hdr);
if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)) {
- btp = xfs_dir2_block_tail_p(mp, hdr);
+ btp = xfs_dir2_block_tail_p(dp->i_mount, hdr);
endp = (char *)xfs_dir2_block_leaf_p(btp);
} else
- endp = (char *)hdr + mp->m_dirblksize;
+ endp = (char *)hdr + dp->i_mount->m_dirblksize;
/*
* Loop over the block's entries.
*/
@@ -529,7 +543,7 @@ xfs_dir2_data_freescan(
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
ASSERT((char *)dup - (char *)hdr ==
be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)));
- xfs_dir2_data_freeinsert(hdr, dup, loghead);
+ xfs_dir2_data_freeinsert(hdr, bf, dup, loghead);
p += be16_to_cpu(dup->length);
}
/*
@@ -538,8 +552,8 @@ xfs_dir2_data_freescan(
else {
dep = (xfs_dir2_data_entry_t *)p;
ASSERT((char *)dep - (char *)hdr ==
- be16_to_cpu(*xfs_dir3_data_entry_tag_p(mp, dep)));
- p += xfs_dir3_data_entsize(mp, dep->namelen);
+ be16_to_cpu(*dp->d_ops->data_entry_tag_p(dep)));
+ p += dp->d_ops->data_entsize(dep->namelen);
}
}
}
@@ -594,8 +608,8 @@ xfs_dir3_data_init(
} else
hdr->magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC);
- bf = xfs_dir3_data_bestfree_p(hdr);
- bf[0].offset = cpu_to_be16(xfs_dir3_data_entry_offset(hdr));
+ bf = dp->d_ops->data_bestfree_p(hdr);
+ bf[0].offset = cpu_to_be16(dp->d_ops->data_entry_offset);
for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) {
bf[i].length = 0;
bf[i].offset = 0;
@@ -604,17 +618,17 @@ xfs_dir3_data_init(
/*
* Set up an unused entry for the block's body.
*/
- dup = xfs_dir3_data_unused_p(hdr);
+ dup = dp->d_ops->data_unused_p(hdr);
dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
- t = mp->m_dirblksize - (uint)xfs_dir3_data_entry_offset(hdr);
+ t = mp->m_dirblksize - (uint)dp->d_ops->data_entry_offset;
bf[0].length = cpu_to_be16(t);
dup->length = cpu_to_be16(t);
*xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16((char *)dup - (char *)hdr);
/*
* Log it and return it.
*/
- xfs_dir2_data_log_header(tp, bp);
+ xfs_dir2_data_log_header(tp, dp, bp);
xfs_dir2_data_log_unused(tp, bp, dup);
*bpp = bp;
return 0;
@@ -626,11 +640,11 @@ xfs_dir3_data_init(
void
xfs_dir2_data_log_entry(
struct xfs_trans *tp,
+ struct xfs_inode *dp,
struct xfs_buf *bp,
xfs_dir2_data_entry_t *dep) /* data entry pointer */
{
struct xfs_dir2_data_hdr *hdr = bp->b_addr;
- struct xfs_mount *mp = tp->t_mountp;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) ||
@@ -638,7 +652,7 @@ xfs_dir2_data_log_entry(
hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC));
xfs_trans_log_buf(tp, bp, (uint)((char *)dep - (char *)hdr),
- (uint)((char *)(xfs_dir3_data_entry_tag_p(mp, dep) + 1) -
+ (uint)((char *)(dp->d_ops->data_entry_tag_p(dep) + 1) -
(char *)hdr - 1));
}
@@ -648,16 +662,19 @@ xfs_dir2_data_log_entry(
void
xfs_dir2_data_log_header(
struct xfs_trans *tp,
+ struct xfs_inode *dp,
struct xfs_buf *bp)
{
- xfs_dir2_data_hdr_t *hdr = bp->b_addr;
+#ifdef DEBUG
+ struct xfs_dir2_data_hdr *hdr = bp->b_addr;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC));
+#endif
- xfs_trans_log_buf(tp, bp, 0, xfs_dir3_data_entry_offset(hdr) - 1);
+ xfs_trans_log_buf(tp, bp, 0, dp->d_ops->data_entry_offset - 1);
}
/*
@@ -698,6 +715,7 @@ xfs_dir2_data_log_unused(
void
xfs_dir2_data_make_free(
struct xfs_trans *tp,
+ struct xfs_inode *dp,
struct xfs_buf *bp,
xfs_dir2_data_aoff_t offset, /* starting byte offset */
xfs_dir2_data_aoff_t len, /* length in bytes */
@@ -735,7 +753,7 @@ xfs_dir2_data_make_free(
* If this isn't the start of the block, then back up to
* the previous entry and see if it's free.
*/
- if (offset > xfs_dir3_data_entry_offset(hdr)) {
+ if (offset > dp->d_ops->data_entry_offset) {
__be16 *tagp; /* tag just before us */
tagp = (__be16 *)((char *)hdr + offset) - 1;
@@ -761,15 +779,15 @@ xfs_dir2_data_make_free(
* Previous and following entries are both free,
* merge everything into a single free entry.
*/
- bf = xfs_dir3_data_bestfree_p(hdr);
+ bf = dp->d_ops->data_bestfree_p(hdr);
if (prevdup && postdup) {
xfs_dir2_data_free_t *dfp2; /* another bestfree pointer */
/*
* See if prevdup and/or postdup are in bestfree table.
*/
- dfp = xfs_dir2_data_freefind(hdr, prevdup);
- dfp2 = xfs_dir2_data_freefind(hdr, postdup);
+ dfp = xfs_dir2_data_freefind(hdr, bf, prevdup);
+ dfp2 = xfs_dir2_data_freefind(hdr, bf, postdup);
/*
* We need a rescan unless there are exactly 2 free entries
* namely our two. Then we know what's happening, otherwise
@@ -797,12 +815,13 @@ xfs_dir2_data_make_free(
ASSERT(dfp2 == dfp);
dfp2 = &bf[1];
}
- xfs_dir2_data_freeremove(hdr, dfp2, needlogp);
- xfs_dir2_data_freeremove(hdr, dfp, needlogp);
+ xfs_dir2_data_freeremove(hdr, bf, dfp2, needlogp);
+ xfs_dir2_data_freeremove(hdr, bf, dfp, needlogp);
/*
* Now insert the new entry.
*/
- dfp = xfs_dir2_data_freeinsert(hdr, prevdup, needlogp);
+ dfp = xfs_dir2_data_freeinsert(hdr, bf, prevdup,
+ needlogp);
ASSERT(dfp == &bf[0]);
ASSERT(dfp->length == prevdup->length);
ASSERT(!dfp[1].length);
@@ -813,7 +832,7 @@ xfs_dir2_data_make_free(
* The entry before us is free, merge with it.
*/
else if (prevdup) {
- dfp = xfs_dir2_data_freefind(hdr, prevdup);
+ dfp = xfs_dir2_data_freefind(hdr, bf, prevdup);
be16_add_cpu(&prevdup->length, len);
*xfs_dir2_data_unused_tag_p(prevdup) =
cpu_to_be16((char *)prevdup - (char *)hdr);
@@ -824,8 +843,8 @@ xfs_dir2_data_make_free(
* the old one and add the new one.
*/
if (dfp) {
- xfs_dir2_data_freeremove(hdr, dfp, needlogp);
- xfs_dir2_data_freeinsert(hdr, prevdup, needlogp);
+ xfs_dir2_data_freeremove(hdr, bf, dfp, needlogp);
+ xfs_dir2_data_freeinsert(hdr, bf, prevdup, needlogp);
}
/*
* Otherwise we need a scan if the new entry is big enough.
@@ -839,7 +858,7 @@ xfs_dir2_data_make_free(
* The following entry is free, merge with it.
*/
else if (postdup) {
- dfp = xfs_dir2_data_freefind(hdr, postdup);
+ dfp = xfs_dir2_data_freefind(hdr, bf, postdup);
newdup = (xfs_dir2_data_unused_t *)((char *)hdr + offset);
newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
newdup->length = cpu_to_be16(len + be16_to_cpu(postdup->length));
@@ -852,8 +871,8 @@ xfs_dir2_data_make_free(
* the old one and add the new one.
*/
if (dfp) {
- xfs_dir2_data_freeremove(hdr, dfp, needlogp);
- xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
+ xfs_dir2_data_freeremove(hdr, bf, dfp, needlogp);
+ xfs_dir2_data_freeinsert(hdr, bf, newdup, needlogp);
}
/*
* Otherwise we need a scan if the new entry is big enough.
@@ -873,7 +892,7 @@ xfs_dir2_data_make_free(
*xfs_dir2_data_unused_tag_p(newdup) =
cpu_to_be16((char *)newdup - (char *)hdr);
xfs_dir2_data_log_unused(tp, bp, newdup);
- xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
+ xfs_dir2_data_freeinsert(hdr, bf, newdup, needlogp);
}
*needscanp = needscan;
}
@@ -884,6 +903,7 @@ xfs_dir2_data_make_free(
void
xfs_dir2_data_use_free(
struct xfs_trans *tp,
+ struct xfs_inode *dp,
struct xfs_buf *bp,
xfs_dir2_data_unused_t *dup, /* unused entry */
xfs_dir2_data_aoff_t offset, /* starting offset to use */
@@ -913,9 +933,9 @@ xfs_dir2_data_use_free(
/*
* Look up the entry in the bestfree table.
*/
- dfp = xfs_dir2_data_freefind(hdr, dup);
oldlen = be16_to_cpu(dup->length);
- bf = xfs_dir3_data_bestfree_p(hdr);
+ bf = dp->d_ops->data_bestfree_p(hdr);
+ dfp = xfs_dir2_data_freefind(hdr, bf, dup);
ASSERT(dfp || oldlen <= be16_to_cpu(bf[2].length));
/*
* Check for alignment with front and back of the entry.
@@ -932,7 +952,8 @@ xfs_dir2_data_use_free(
if (dfp) {
needscan = (bf[2].offset != 0);
if (!needscan)
- xfs_dir2_data_freeremove(hdr, dfp, needlogp);
+ xfs_dir2_data_freeremove(hdr, bf, dfp,
+ needlogp);
}
}
/*
@@ -950,8 +971,9 @@ xfs_dir2_data_use_free(
* If it was in the table, remove it and add the new one.
*/
if (dfp) {
- xfs_dir2_data_freeremove(hdr, dfp, needlogp);
- dfp = xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
+ xfs_dir2_data_freeremove(hdr, bf, dfp, needlogp);
+ dfp = xfs_dir2_data_freeinsert(hdr, bf, newdup,
+ needlogp);
ASSERT(dfp != NULL);
ASSERT(dfp->length == newdup->length);
ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)hdr);
@@ -977,8 +999,9 @@ xfs_dir2_data_use_free(
* If it was in the table, remove it and add the new one.
*/
if (dfp) {
- xfs_dir2_data_freeremove(hdr, dfp, needlogp);
- dfp = xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
+ xfs_dir2_data_freeremove(hdr, bf, dfp, needlogp);
+ dfp = xfs_dir2_data_freeinsert(hdr, bf, newdup,
+ needlogp);
ASSERT(dfp != NULL);
ASSERT(dfp->length == newdup->length);
ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)hdr);
@@ -1017,9 +1040,11 @@ xfs_dir2_data_use_free(
if (dfp) {
needscan = (bf[2].length != 0);
if (!needscan) {
- xfs_dir2_data_freeremove(hdr, dfp, needlogp);
- xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
- xfs_dir2_data_freeinsert(hdr, newdup2,
+ xfs_dir2_data_freeremove(hdr, bf, dfp,
+ needlogp);
+ xfs_dir2_data_freeinsert(hdr, bf, newdup,
+ needlogp);
+ xfs_dir2_data_freeinsert(hdr, bf, newdup2,
needlogp);
}
}
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index 1021c8356d08..ae47ec6e16c4 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -18,23 +18,21 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
+#include "xfs_da_format.h"
#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
-#include "xfs_dir2_format.h"
#include "xfs_dir2.h"
#include "xfs_dir2_priv.h"
#include "xfs_error.h"
#include "xfs_trace.h"
+#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_cksum.h"
@@ -52,21 +50,21 @@ static void xfs_dir3_leaf_log_tail(struct xfs_trans *tp, struct xfs_buf *bp);
* Pop an assert if something is wrong.
*/
#ifdef DEBUG
-#define xfs_dir3_leaf_check(mp, bp) \
+#define xfs_dir3_leaf_check(dp, bp) \
do { \
- if (!xfs_dir3_leaf1_check((mp), (bp))) \
+ if (!xfs_dir3_leaf1_check((dp), (bp))) \
ASSERT(0); \
} while (0);
STATIC bool
xfs_dir3_leaf1_check(
- struct xfs_mount *mp,
+ struct xfs_inode *dp,
struct xfs_buf *bp)
{
struct xfs_dir2_leaf *leaf = bp->b_addr;
struct xfs_dir3_icleaf_hdr leafhdr;
- xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
+ dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
if (leafhdr.magic == XFS_DIR3_LEAF1_MAGIC) {
struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr;
@@ -75,71 +73,16 @@ xfs_dir3_leaf1_check(
} else if (leafhdr.magic != XFS_DIR2_LEAF1_MAGIC)
return false;
- return xfs_dir3_leaf_check_int(mp, &leafhdr, leaf);
+ return xfs_dir3_leaf_check_int(dp->i_mount, dp, &leafhdr, leaf);
}
#else
-#define xfs_dir3_leaf_check(mp, bp)
+#define xfs_dir3_leaf_check(dp, bp)
#endif
-void
-xfs_dir3_leaf_hdr_from_disk(
- struct xfs_dir3_icleaf_hdr *to,
- struct xfs_dir2_leaf *from)
-{
- if (from->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC) ||
- from->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
- to->forw = be32_to_cpu(from->hdr.info.forw);
- to->back = be32_to_cpu(from->hdr.info.back);
- to->magic = be16_to_cpu(from->hdr.info.magic);
- to->count = be16_to_cpu(from->hdr.count);
- to->stale = be16_to_cpu(from->hdr.stale);
- } else {
- struct xfs_dir3_leaf_hdr *hdr3 = (struct xfs_dir3_leaf_hdr *)from;
-
- to->forw = be32_to_cpu(hdr3->info.hdr.forw);
- to->back = be32_to_cpu(hdr3->info.hdr.back);
- to->magic = be16_to_cpu(hdr3->info.hdr.magic);
- to->count = be16_to_cpu(hdr3->count);
- to->stale = be16_to_cpu(hdr3->stale);
- }
-
- ASSERT(to->magic == XFS_DIR2_LEAF1_MAGIC ||
- to->magic == XFS_DIR3_LEAF1_MAGIC ||
- to->magic == XFS_DIR2_LEAFN_MAGIC ||
- to->magic == XFS_DIR3_LEAFN_MAGIC);
-}
-
-void
-xfs_dir3_leaf_hdr_to_disk(
- struct xfs_dir2_leaf *to,
- struct xfs_dir3_icleaf_hdr *from)
-{
- ASSERT(from->magic == XFS_DIR2_LEAF1_MAGIC ||
- from->magic == XFS_DIR3_LEAF1_MAGIC ||
- from->magic == XFS_DIR2_LEAFN_MAGIC ||
- from->magic == XFS_DIR3_LEAFN_MAGIC);
-
- if (from->magic == XFS_DIR2_LEAF1_MAGIC ||
- from->magic == XFS_DIR2_LEAFN_MAGIC) {
- to->hdr.info.forw = cpu_to_be32(from->forw);
- to->hdr.info.back = cpu_to_be32(from->back);
- to->hdr.info.magic = cpu_to_be16(from->magic);
- to->hdr.count = cpu_to_be16(from->count);
- to->hdr.stale = cpu_to_be16(from->stale);
- } else {
- struct xfs_dir3_leaf_hdr *hdr3 = (struct xfs_dir3_leaf_hdr *)to;
-
- hdr3->info.hdr.forw = cpu_to_be32(from->forw);
- hdr3->info.hdr.back = cpu_to_be32(from->back);
- hdr3->info.hdr.magic = cpu_to_be16(from->magic);
- hdr3->count = cpu_to_be16(from->count);
- hdr3->stale = cpu_to_be16(from->stale);
- }
-}
-
bool
xfs_dir3_leaf_check_int(
struct xfs_mount *mp,
+ struct xfs_inode *dp,
struct xfs_dir3_icleaf_hdr *hdr,
struct xfs_dir2_leaf *leaf)
{
@@ -147,8 +90,21 @@ xfs_dir3_leaf_check_int(
xfs_dir2_leaf_tail_t *ltp;
int stale;
int i;
+ const struct xfs_dir_ops *ops;
+ struct xfs_dir3_icleaf_hdr leafhdr;
- ents = xfs_dir3_leaf_ents_p(leaf);
+ /*
+ * we can be passed a null dp here from a verifier, so we need to go the
+ * hard way to get them.
+ */
+ ops = xfs_dir_get_ops(mp, dp);
+
+ if (!hdr) {
+ ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ hdr = &leafhdr;
+ }
+
+ ents = ops->leaf_ents_p(leaf);
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
/*
@@ -156,7 +112,7 @@ xfs_dir3_leaf_check_int(
* Should factor in the size of the bests table as well.
* We can deduce a value for that from di_size.
*/
- if (hdr->count > xfs_dir3_max_leaf_ents(mp, leaf))
+ if (hdr->count > ops->leaf_max_ents(mp))
return false;
/* Leaves and bests don't overlap in leaf format. */
@@ -192,7 +148,6 @@ xfs_dir3_leaf_verify(
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_dir2_leaf *leaf = bp->b_addr;
- struct xfs_dir3_icleaf_hdr leafhdr;
ASSERT(magic == XFS_DIR2_LEAF1_MAGIC || magic == XFS_DIR2_LEAFN_MAGIC);
@@ -214,8 +169,7 @@ xfs_dir3_leaf_verify(
return false;
}
- xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
- return xfs_dir3_leaf_check_int(mp, &leafhdr, leaf);
+ return xfs_dir3_leaf_check_int(mp, NULL, NULL, leaf);
}
static void
@@ -401,7 +355,7 @@ xfs_dir3_leaf_get_buf(
return error;
xfs_dir3_leaf_init(mp, tp, bp, dp->i_ino, magic);
- xfs_dir3_leaf_log_header(tp, bp);
+ xfs_dir3_leaf_log_header(tp, dp, bp);
if (magic == XFS_DIR2_LEAF1_MAGIC)
xfs_dir3_leaf_log_tail(tp, bp);
*bpp = bp;
@@ -462,31 +416,31 @@ xfs_dir2_block_to_leaf(
xfs_dir3_data_check(dp, dbp);
btp = xfs_dir2_block_tail_p(mp, hdr);
blp = xfs_dir2_block_leaf_p(btp);
- bf = xfs_dir3_data_bestfree_p(hdr);
- ents = xfs_dir3_leaf_ents_p(leaf);
+ bf = dp->d_ops->data_bestfree_p(hdr);
+ ents = dp->d_ops->leaf_ents_p(leaf);
/*
* Set the counts in the leaf header.
*/
- xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
+ dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
leafhdr.count = be32_to_cpu(btp->count);
leafhdr.stale = be32_to_cpu(btp->stale);
- xfs_dir3_leaf_hdr_to_disk(leaf, &leafhdr);
- xfs_dir3_leaf_log_header(tp, lbp);
+ dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+ xfs_dir3_leaf_log_header(tp, dp, lbp);
/*
* Could compact these but I think we always do the conversion
* after squeezing out stale entries.
*/
memcpy(ents, blp, be32_to_cpu(btp->count) * sizeof(xfs_dir2_leaf_entry_t));
- xfs_dir3_leaf_log_ents(tp, lbp, 0, leafhdr.count - 1);
+ xfs_dir3_leaf_log_ents(tp, dp, lbp, 0, leafhdr.count - 1);
needscan = 0;
needlog = 1;
/*
* Make the space formerly occupied by the leaf entries and block
* tail be free.
*/
- xfs_dir2_data_make_free(tp, dbp,
+ xfs_dir2_data_make_free(tp, dp, dbp,
(xfs_dir2_data_aoff_t)((char *)blp - (char *)hdr),
(xfs_dir2_data_aoff_t)((char *)hdr + mp->m_dirblksize -
(char *)blp),
@@ -502,7 +456,7 @@ xfs_dir2_block_to_leaf(
hdr->magic = cpu_to_be32(XFS_DIR3_DATA_MAGIC);
if (needscan)
- xfs_dir2_data_freescan(mp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp, hdr, &needlog);
/*
* Set up leaf tail and bests table.
*/
@@ -514,8 +468,8 @@ xfs_dir2_block_to_leaf(
* Log the data header and leaf bests table.
*/
if (needlog)
- xfs_dir2_data_log_header(tp, dbp);
- xfs_dir3_leaf_check(mp, lbp);
+ xfs_dir2_data_log_header(tp, dp, dbp);
+ xfs_dir3_leaf_check(dp, lbp);
xfs_dir3_data_check(dp, dbp);
xfs_dir3_leaf_log_bests(tp, lbp, 0, 0);
return 0;
@@ -699,10 +653,10 @@ xfs_dir2_leaf_addname(
index = xfs_dir2_leaf_search_hash(args, lbp);
leaf = lbp->b_addr;
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
- ents = xfs_dir3_leaf_ents_p(leaf);
- xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
+ ents = dp->d_ops->leaf_ents_p(leaf);
+ dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
bestsp = xfs_dir2_leaf_bests_p(ltp);
- length = xfs_dir3_data_entsize(mp, args->namelen);
+ length = dp->d_ops->data_entsize(args->namelen);
/*
* See if there are any entries with the same hash value
@@ -864,7 +818,7 @@ xfs_dir2_leaf_addname(
else
xfs_dir3_leaf_log_bests(tp, lbp, use_block, use_block);
hdr = dbp->b_addr;
- bf = xfs_dir3_data_bestfree_p(hdr);
+ bf = dp->d_ops->data_bestfree_p(hdr);
bestsp[use_block] = bf[0].length;
grown = 1;
} else {
@@ -880,7 +834,7 @@ xfs_dir2_leaf_addname(
return error;
}
hdr = dbp->b_addr;
- bf = xfs_dir3_data_bestfree_p(hdr);
+ bf = dp->d_ops->data_bestfree_p(hdr);
grown = 0;
}
/*
@@ -893,7 +847,7 @@ xfs_dir2_leaf_addname(
/*
* Mark the initial part of our freespace in use for the new entry.
*/
- xfs_dir2_data_use_free(tp, dbp, dup,
+ xfs_dir2_data_use_free(tp, dp, dbp, dup,
(xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr), length,
&needlog, &needscan);
/*
@@ -903,20 +857,20 @@ xfs_dir2_leaf_addname(
dep->inumber = cpu_to_be64(args->inumber);
dep->namelen = args->namelen;
memcpy(dep->name, args->name, dep->namelen);
- xfs_dir3_dirent_put_ftype(mp, dep, args->filetype);
- tagp = xfs_dir3_data_entry_tag_p(mp, dep);
+ dp->d_ops->data_put_ftype(dep, args->filetype);
+ tagp = dp->d_ops->data_entry_tag_p(dep);
*tagp = cpu_to_be16((char *)dep - (char *)hdr);
/*
* Need to scan fix up the bestfree table.
*/
if (needscan)
- xfs_dir2_data_freescan(mp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp, hdr, &needlog);
/*
* Need to log the data block's header.
*/
if (needlog)
- xfs_dir2_data_log_header(tp, dbp);
- xfs_dir2_data_log_entry(tp, dbp, dep);
+ xfs_dir2_data_log_header(tp, dp, dbp);
+ xfs_dir2_data_log_entry(tp, dp, dbp, dep);
/*
* If the bests table needs to be changed, do it.
* Log the change unless we've already done that.
@@ -939,10 +893,10 @@ xfs_dir2_leaf_addname(
/*
* Log the leaf fields and give up the buffers.
*/
- xfs_dir3_leaf_hdr_to_disk(leaf, &leafhdr);
- xfs_dir3_leaf_log_header(tp, lbp);
- xfs_dir3_leaf_log_ents(tp, lbp, lfloglow, lfloghigh);
- xfs_dir3_leaf_check(mp, lbp);
+ dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+ xfs_dir3_leaf_log_header(tp, dp, lbp);
+ xfs_dir3_leaf_log_ents(tp, dp, lbp, lfloglow, lfloghigh);
+ xfs_dir3_leaf_check(dp, lbp);
xfs_dir3_data_check(dp, dbp);
return 0;
}
@@ -962,6 +916,7 @@ xfs_dir3_leaf_compact(
int loglow; /* first leaf entry to log */
int to; /* target leaf index */
struct xfs_dir2_leaf_entry *ents;
+ struct xfs_inode *dp = args->dp;
leaf = bp->b_addr;
if (!leafhdr->stale)
@@ -970,7 +925,7 @@ xfs_dir3_leaf_compact(
/*
* Compress out the stale entries in place.
*/
- ents = xfs_dir3_leaf_ents_p(leaf);
+ ents = dp->d_ops->leaf_ents_p(leaf);
for (from = to = 0, loglow = -1; from < leafhdr->count; from++) {
if (ents[from].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
continue;
@@ -991,10 +946,10 @@ xfs_dir3_leaf_compact(
leafhdr->count -= leafhdr->stale;
leafhdr->stale = 0;
- xfs_dir3_leaf_hdr_to_disk(leaf, leafhdr);
- xfs_dir3_leaf_log_header(args->trans, bp);
+ dp->d_ops->leaf_hdr_to_disk(leaf, leafhdr);
+ xfs_dir3_leaf_log_header(args->trans, dp, bp);
if (loglow != -1)
- xfs_dir3_leaf_log_ents(args->trans, bp, loglow, to - 1);
+ xfs_dir3_leaf_log_ents(args->trans, dp, bp, loglow, to - 1);
}
/*
@@ -1121,10 +1076,11 @@ xfs_dir3_leaf_log_bests(
*/
void
xfs_dir3_leaf_log_ents(
- xfs_trans_t *tp, /* transaction pointer */
- struct xfs_buf *bp, /* leaf buffer */
- int first, /* first entry to log */
- int last) /* last entry to log */
+ struct xfs_trans *tp,
+ struct xfs_inode *dp,
+ struct xfs_buf *bp,
+ int first,
+ int last)
{
xfs_dir2_leaf_entry_t *firstlep; /* pointer to first entry */
xfs_dir2_leaf_entry_t *lastlep; /* pointer to last entry */
@@ -1136,7 +1092,7 @@ xfs_dir3_leaf_log_ents(
leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
leaf->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC));
- ents = xfs_dir3_leaf_ents_p(leaf);
+ ents = dp->d_ops->leaf_ents_p(leaf);
firstlep = &ents[first];
lastlep = &ents[last];
xfs_trans_log_buf(tp, bp, (uint)((char *)firstlep - (char *)leaf),
@@ -1149,6 +1105,7 @@ xfs_dir3_leaf_log_ents(
void
xfs_dir3_leaf_log_header(
struct xfs_trans *tp,
+ struct xfs_inode *dp,
struct xfs_buf *bp)
{
struct xfs_dir2_leaf *leaf = bp->b_addr;
@@ -1159,7 +1116,7 @@ xfs_dir3_leaf_log_header(
leaf->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC));
xfs_trans_log_buf(tp, bp, (uint)((char *)&leaf->hdr - (char *)leaf),
- xfs_dir3_leaf_hdr_size(leaf) - 1);
+ dp->d_ops->leaf_hdr_size - 1);
}
/*
@@ -1214,9 +1171,9 @@ xfs_dir2_leaf_lookup(
}
tp = args->trans;
dp = args->dp;
- xfs_dir3_leaf_check(dp->i_mount, lbp);
+ xfs_dir3_leaf_check(dp, lbp);
leaf = lbp->b_addr;
- ents = xfs_dir3_leaf_ents_p(leaf);
+ ents = dp->d_ops->leaf_ents_p(leaf);
/*
* Get to the leaf entry and contained data entry address.
*/
@@ -1232,7 +1189,7 @@ xfs_dir2_leaf_lookup(
* Return the found inode number & CI name if appropriate
*/
args->inumber = be64_to_cpu(dep->inumber);
- args->filetype = xfs_dir3_dirent_get_ftype(dp->i_mount, dep);
+ args->filetype = dp->d_ops->data_get_ftype(dep);
error = xfs_dir_cilookup_result(args, dep->name, dep->namelen);
xfs_trans_brelse(tp, dbp);
xfs_trans_brelse(tp, lbp);
@@ -1279,9 +1236,9 @@ xfs_dir2_leaf_lookup_int(
*lbpp = lbp;
leaf = lbp->b_addr;
- xfs_dir3_leaf_check(mp, lbp);
- ents = xfs_dir3_leaf_ents_p(leaf);
- xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
+ xfs_dir3_leaf_check(dp, lbp);
+ ents = dp->d_ops->leaf_ents_p(leaf);
+ dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
/*
* Look for the first leaf entry with our hash value.
@@ -1415,9 +1372,9 @@ xfs_dir2_leaf_removename(
leaf = lbp->b_addr;
hdr = dbp->b_addr;
xfs_dir3_data_check(dp, dbp);
- bf = xfs_dir3_data_bestfree_p(hdr);
- xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
- ents = xfs_dir3_leaf_ents_p(leaf);
+ bf = dp->d_ops->data_bestfree_p(hdr);
+ dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ ents = dp->d_ops->leaf_ents_p(leaf);
/*
* Point to the leaf entry, use that to point to the data entry.
*/
@@ -1433,27 +1390,27 @@ xfs_dir2_leaf_removename(
/*
* Mark the former data entry unused.
*/
- xfs_dir2_data_make_free(tp, dbp,
+ xfs_dir2_data_make_free(tp, dp, dbp,
(xfs_dir2_data_aoff_t)((char *)dep - (char *)hdr),
- xfs_dir3_data_entsize(mp, dep->namelen), &needlog, &needscan);
+ dp->d_ops->data_entsize(dep->namelen), &needlog, &needscan);
/*
* We just mark the leaf entry stale by putting a null in it.
*/
leafhdr.stale++;
- xfs_dir3_leaf_hdr_to_disk(leaf, &leafhdr);
- xfs_dir3_leaf_log_header(tp, lbp);
+ dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+ xfs_dir3_leaf_log_header(tp, dp, lbp);
lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR);
- xfs_dir3_leaf_log_ents(tp, lbp, index, index);
+ xfs_dir3_leaf_log_ents(tp, dp, lbp, index, index);
/*
* Scan the freespace in the data block again if necessary,
* log the data block header if necessary.
*/
if (needscan)
- xfs_dir2_data_freescan(mp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp, hdr, &needlog);
if (needlog)
- xfs_dir2_data_log_header(tp, dbp);
+ xfs_dir2_data_log_header(tp, dp, dbp);
/*
* If the longest freespace in the data block has changed,
* put the new value in the bests table and log that.
@@ -1467,7 +1424,7 @@ xfs_dir2_leaf_removename(
* If the data block is now empty then get rid of the data block.
*/
if (be16_to_cpu(bf[0].length) ==
- mp->m_dirblksize - xfs_dir3_data_entry_offset(hdr)) {
+ mp->m_dirblksize - dp->d_ops->data_entry_offset) {
ASSERT(db != mp->m_dirdatablk);
if ((error = xfs_dir2_shrink_inode(args, db, dbp))) {
/*
@@ -1478,7 +1435,7 @@ xfs_dir2_leaf_removename(
*/
if (error == ENOSPC && args->total == 0)
error = 0;
- xfs_dir3_leaf_check(mp, lbp);
+ xfs_dir3_leaf_check(dp, lbp);
return error;
}
dbp = NULL;
@@ -1512,7 +1469,7 @@ xfs_dir2_leaf_removename(
else if (db != mp->m_dirdatablk)
dbp = NULL;
- xfs_dir3_leaf_check(mp, lbp);
+ xfs_dir3_leaf_check(dp, lbp);
/*
* See if we can convert to block form.
*/
@@ -1547,7 +1504,7 @@ xfs_dir2_leaf_replace(
}
dp = args->dp;
leaf = lbp->b_addr;
- ents = xfs_dir3_leaf_ents_p(leaf);
+ ents = dp->d_ops->leaf_ents_p(leaf);
/*
* Point to the leaf entry, get data address from it.
*/
@@ -1563,10 +1520,10 @@ xfs_dir2_leaf_replace(
* Put the new inode number in, log it.
*/
dep->inumber = cpu_to_be64(args->inumber);
- xfs_dir3_dirent_put_ftype(dp->i_mount, dep, args->filetype);
+ dp->d_ops->data_put_ftype(dep, args->filetype);
tp = args->trans;
- xfs_dir2_data_log_entry(tp, dbp, dep);
- xfs_dir3_leaf_check(dp->i_mount, lbp);
+ xfs_dir2_data_log_entry(tp, dp, dbp, dep);
+ xfs_dir3_leaf_check(dp, lbp);
xfs_trans_brelse(tp, lbp);
return 0;
}
@@ -1592,8 +1549,8 @@ xfs_dir2_leaf_search_hash(
struct xfs_dir3_icleaf_hdr leafhdr;
leaf = lbp->b_addr;
- ents = xfs_dir3_leaf_ents_p(leaf);
- xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
+ ents = args->dp->d_ops->leaf_ents_p(leaf);
+ args->dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
/*
* Note, the table cannot be empty, so we have to go through the loop.
@@ -1661,12 +1618,12 @@ xfs_dir2_leaf_trim_data(
#ifdef DEBUG
{
struct xfs_dir2_data_hdr *hdr = dbp->b_addr;
- struct xfs_dir2_data_free *bf = xfs_dir3_data_bestfree_p(hdr);
+ struct xfs_dir2_data_free *bf = dp->d_ops->data_bestfree_p(hdr);
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC));
ASSERT(be16_to_cpu(bf[0].length) ==
- mp->m_dirblksize - xfs_dir3_data_entry_offset(hdr));
+ mp->m_dirblksize - dp->d_ops->data_entry_offset);
ASSERT(db == be32_to_cpu(ltp->bestcount) - 1);
}
#endif
@@ -1782,7 +1739,7 @@ xfs_dir2_node_to_leaf(
return 0;
lbp = state->path.blk[0].bp;
leaf = lbp->b_addr;
- xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
+ dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
@@ -1794,7 +1751,7 @@ xfs_dir2_node_to_leaf(
if (error)
return error;
free = fbp->b_addr;
- xfs_dir3_free_hdr_from_disk(&freehdr, free);
+ dp->d_ops->free_hdr_from_disk(&freehdr, free);
ASSERT(!freehdr.firstdb);
@@ -1828,14 +1785,14 @@ xfs_dir2_node_to_leaf(
/*
* Set up the leaf bests table.
*/
- memcpy(xfs_dir2_leaf_bests_p(ltp), xfs_dir3_free_bests_p(mp, free),
+ memcpy(xfs_dir2_leaf_bests_p(ltp), dp->d_ops->free_bests_p(free),
freehdr.nvalid * sizeof(xfs_dir2_data_off_t));
- xfs_dir3_leaf_hdr_to_disk(leaf, &leafhdr);
- xfs_dir3_leaf_log_header(tp, lbp);
+ dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+ xfs_dir3_leaf_log_header(tp, dp, lbp);
xfs_dir3_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1);
xfs_dir3_leaf_log_tail(tp, lbp);
- xfs_dir3_leaf_check(mp, lbp);
+ xfs_dir3_leaf_check(dp, lbp);
/*
* Get rid of the freespace block.
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index 4c3dba7ffb74..56369d4509d5 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -18,22 +18,21 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
+#include "xfs_da_format.h"
#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
-#include "xfs_dir2_format.h"
#include "xfs_dir2.h"
#include "xfs_dir2_priv.h"
#include "xfs_error.h"
#include "xfs_trace.h"
+#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_cksum.h"
@@ -55,21 +54,21 @@ static int xfs_dir2_node_addname_int(xfs_da_args_t *args,
* Check internal consistency of a leafn block.
*/
#ifdef DEBUG
-#define xfs_dir3_leaf_check(mp, bp) \
+#define xfs_dir3_leaf_check(dp, bp) \
do { \
- if (!xfs_dir3_leafn_check((mp), (bp))) \
+ if (!xfs_dir3_leafn_check((dp), (bp))) \
ASSERT(0); \
} while (0);
static bool
xfs_dir3_leafn_check(
- struct xfs_mount *mp,
+ struct xfs_inode *dp,
struct xfs_buf *bp)
{
struct xfs_dir2_leaf *leaf = bp->b_addr;
struct xfs_dir3_icleaf_hdr leafhdr;
- xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
+ dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
if (leafhdr.magic == XFS_DIR3_LEAFN_MAGIC) {
struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr;
@@ -78,10 +77,10 @@ xfs_dir3_leafn_check(
} else if (leafhdr.magic != XFS_DIR2_LEAFN_MAGIC)
return false;
- return xfs_dir3_leaf_check_int(mp, &leafhdr, leaf);
+ return xfs_dir3_leaf_check_int(dp->i_mount, dp, &leafhdr, leaf);
}
#else
-#define xfs_dir3_leaf_check(mp, bp)
+#define xfs_dir3_leaf_check(dp, bp)
#endif
static bool
@@ -193,53 +192,6 @@ xfs_dir2_free_try_read(
return __xfs_dir3_free_read(tp, dp, fbno, -2, bpp);
}
-
-void
-xfs_dir3_free_hdr_from_disk(
- struct xfs_dir3_icfree_hdr *to,
- struct xfs_dir2_free *from)
-{
- if (from->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)) {
- to->magic = be32_to_cpu(from->hdr.magic);
- to->firstdb = be32_to_cpu(from->hdr.firstdb);
- to->nvalid = be32_to_cpu(from->hdr.nvalid);
- to->nused = be32_to_cpu(from->hdr.nused);
- } else {
- struct xfs_dir3_free_hdr *hdr3 = (struct xfs_dir3_free_hdr *)from;
-
- to->magic = be32_to_cpu(hdr3->hdr.magic);
- to->firstdb = be32_to_cpu(hdr3->firstdb);
- to->nvalid = be32_to_cpu(hdr3->nvalid);
- to->nused = be32_to_cpu(hdr3->nused);
- }
-
- ASSERT(to->magic == XFS_DIR2_FREE_MAGIC ||
- to->magic == XFS_DIR3_FREE_MAGIC);
-}
-
-static void
-xfs_dir3_free_hdr_to_disk(
- struct xfs_dir2_free *to,
- struct xfs_dir3_icfree_hdr *from)
-{
- ASSERT(from->magic == XFS_DIR2_FREE_MAGIC ||
- from->magic == XFS_DIR3_FREE_MAGIC);
-
- if (from->magic == XFS_DIR2_FREE_MAGIC) {
- to->hdr.magic = cpu_to_be32(from->magic);
- to->hdr.firstdb = cpu_to_be32(from->firstdb);
- to->hdr.nvalid = cpu_to_be32(from->nvalid);
- to->hdr.nused = cpu_to_be32(from->nused);
- } else {
- struct xfs_dir3_free_hdr *hdr3 = (struct xfs_dir3_free_hdr *)to;
-
- hdr3->hdr.magic = cpu_to_be32(from->magic);
- hdr3->firstdb = cpu_to_be32(from->firstdb);
- hdr3->nvalid = cpu_to_be32(from->nvalid);
- hdr3->nused = cpu_to_be32(from->nused);
- }
-}
-
static int
xfs_dir3_free_get_buf(
struct xfs_trans *tp,
@@ -277,7 +229,7 @@ xfs_dir3_free_get_buf(
uuid_copy(&hdr3->hdr.uuid, &mp->m_sb.sb_uuid);
} else
hdr.magic = XFS_DIR2_FREE_MAGIC;
- xfs_dir3_free_hdr_to_disk(bp->b_addr, &hdr);
+ dp->d_ops->free_hdr_to_disk(bp->b_addr, &hdr);
*bpp = bp;
return 0;
}
@@ -288,6 +240,7 @@ xfs_dir3_free_get_buf(
STATIC void
xfs_dir2_free_log_bests(
struct xfs_trans *tp,
+ struct xfs_inode *dp,
struct xfs_buf *bp,
int first, /* first entry to log */
int last) /* last entry to log */
@@ -296,7 +249,7 @@ xfs_dir2_free_log_bests(
__be16 *bests;
free = bp->b_addr;
- bests = xfs_dir3_free_bests_p(tp->t_mountp, free);
+ bests = dp->d_ops->free_bests_p(free);
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC) ||
free->hdr.magic == cpu_to_be32(XFS_DIR3_FREE_MAGIC));
xfs_trans_log_buf(tp, bp,
@@ -311,6 +264,7 @@ xfs_dir2_free_log_bests(
static void
xfs_dir2_free_log_header(
struct xfs_trans *tp,
+ struct xfs_inode *dp,
struct xfs_buf *bp)
{
#ifdef DEBUG
@@ -320,7 +274,7 @@ xfs_dir2_free_log_header(
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC) ||
free->hdr.magic == cpu_to_be32(XFS_DIR3_FREE_MAGIC));
#endif
- xfs_trans_log_buf(tp, bp, 0, xfs_dir3_free_hdr_size(tp->t_mountp) - 1);
+ xfs_trans_log_buf(tp, bp, 0, dp->d_ops->free_hdr_size - 1);
}
/*
@@ -369,7 +323,7 @@ xfs_dir2_leaf_to_node(
return error;
free = fbp->b_addr;
- xfs_dir3_free_hdr_from_disk(&freehdr, free);
+ dp->d_ops->free_hdr_from_disk(&freehdr, free);
leaf = lbp->b_addr;
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
ASSERT(be32_to_cpu(ltp->bestcount) <=
@@ -380,7 +334,7 @@ xfs_dir2_leaf_to_node(
* Count active entries.
*/
from = xfs_dir2_leaf_bests_p(ltp);
- to = xfs_dir3_free_bests_p(mp, free);
+ to = dp->d_ops->free_bests_p(free);
for (i = n = 0; i < be32_to_cpu(ltp->bestcount); i++, from++, to++) {
if ((off = be16_to_cpu(*from)) != NULLDATAOFF)
n++;
@@ -393,9 +347,9 @@ xfs_dir2_leaf_to_node(
freehdr.nused = n;
freehdr.nvalid = be32_to_cpu(ltp->bestcount);
- xfs_dir3_free_hdr_to_disk(fbp->b_addr, &freehdr);
- xfs_dir2_free_log_bests(tp, fbp, 0, freehdr.nvalid - 1);
- xfs_dir2_free_log_header(tp, fbp);
+ dp->d_ops->free_hdr_to_disk(fbp->b_addr, &freehdr);
+ xfs_dir2_free_log_bests(tp, dp, fbp, 0, freehdr.nvalid - 1);
+ xfs_dir2_free_log_header(tp, dp, fbp);
/*
* Converting the leaf to a leafnode is just a matter of changing the
@@ -409,8 +363,8 @@ xfs_dir2_leaf_to_node(
leaf->hdr.info.magic = cpu_to_be16(XFS_DIR3_LEAFN_MAGIC);
lbp->b_ops = &xfs_dir3_leafn_buf_ops;
xfs_trans_buf_set_type(tp, lbp, XFS_BLFT_DIR_LEAFN_BUF);
- xfs_dir3_leaf_log_header(tp, lbp);
- xfs_dir3_leaf_check(mp, lbp);
+ xfs_dir3_leaf_log_header(tp, dp, lbp);
+ xfs_dir3_leaf_check(dp, lbp);
return 0;
}
@@ -443,8 +397,8 @@ xfs_dir2_leafn_add(
mp = dp->i_mount;
tp = args->trans;
leaf = bp->b_addr;
- xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
- ents = xfs_dir3_leaf_ents_p(leaf);
+ dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ ents = dp->d_ops->leaf_ents_p(leaf);
/*
* Quick check just to make sure we are not going to index
@@ -460,7 +414,7 @@ xfs_dir2_leafn_add(
* a compact.
*/
- if (leafhdr.count == xfs_dir3_max_leaf_ents(mp, leaf)) {
+ if (leafhdr.count == dp->d_ops->leaf_max_ents(mp)) {
if (!leafhdr.stale)
return XFS_ERROR(ENOSPC);
compact = leafhdr.stale > 1;
@@ -498,30 +452,30 @@ xfs_dir2_leafn_add(
lep->address = cpu_to_be32(xfs_dir2_db_off_to_dataptr(mp,
args->blkno, args->index));
- xfs_dir3_leaf_hdr_to_disk(leaf, &leafhdr);
- xfs_dir3_leaf_log_header(tp, bp);
- xfs_dir3_leaf_log_ents(tp, bp, lfloglow, lfloghigh);
- xfs_dir3_leaf_check(mp, bp);
+ dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+ xfs_dir3_leaf_log_header(tp, dp, bp);
+ xfs_dir3_leaf_log_ents(tp, dp, bp, lfloglow, lfloghigh);
+ xfs_dir3_leaf_check(dp, bp);
return 0;
}
#ifdef DEBUG
static void
xfs_dir2_free_hdr_check(
- struct xfs_mount *mp,
+ struct xfs_inode *dp,
struct xfs_buf *bp,
xfs_dir2_db_t db)
{
struct xfs_dir3_icfree_hdr hdr;
- xfs_dir3_free_hdr_from_disk(&hdr, bp->b_addr);
+ dp->d_ops->free_hdr_from_disk(&hdr, bp->b_addr);
- ASSERT((hdr.firstdb % xfs_dir3_free_max_bests(mp)) == 0);
+ ASSERT((hdr.firstdb % dp->d_ops->free_max_bests(dp->i_mount)) == 0);
ASSERT(hdr.firstdb <= db);
ASSERT(db < hdr.firstdb + hdr.nvalid);
}
#else
-#define xfs_dir2_free_hdr_check(mp, dp, db)
+#define xfs_dir2_free_hdr_check(dp, bp, db)
#endif /* DEBUG */
/*
@@ -530,6 +484,7 @@ xfs_dir2_free_hdr_check(
*/
xfs_dahash_t /* hash value */
xfs_dir2_leafn_lasthash(
+ struct xfs_inode *dp,
struct xfs_buf *bp, /* leaf buffer */
int *count) /* count of entries in leaf */
{
@@ -537,7 +492,7 @@ xfs_dir2_leafn_lasthash(
struct xfs_dir2_leaf_entry *ents;
struct xfs_dir3_icleaf_hdr leafhdr;
- xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
+ dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
@@ -547,7 +502,7 @@ xfs_dir2_leafn_lasthash(
if (!leafhdr.count)
return 0;
- ents = xfs_dir3_leaf_ents_p(leaf);
+ ents = dp->d_ops->leaf_ents_p(leaf);
return be32_to_cpu(ents[leafhdr.count - 1].hashval);
}
@@ -584,10 +539,10 @@ xfs_dir2_leafn_lookup_for_addname(
tp = args->trans;
mp = dp->i_mount;
leaf = bp->b_addr;
- xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
- ents = xfs_dir3_leaf_ents_p(leaf);
+ dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ ents = dp->d_ops->leaf_ents_p(leaf);
- xfs_dir3_leaf_check(mp, bp);
+ xfs_dir3_leaf_check(dp, bp);
ASSERT(leafhdr.count > 0);
/*
@@ -605,7 +560,7 @@ xfs_dir2_leafn_lookup_for_addname(
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC) ||
free->hdr.magic == cpu_to_be32(XFS_DIR3_FREE_MAGIC));
}
- length = xfs_dir3_data_entsize(mp, args->namelen);
+ length = dp->d_ops->data_entsize(args->namelen);
/*
* Loop over leaf entries with the right hash value.
*/
@@ -637,7 +592,7 @@ xfs_dir2_leafn_lookup_for_addname(
* Convert the data block to the free block
* holding its freespace information.
*/
- newfdb = xfs_dir2_db_to_fdb(mp, newdb);
+ newfdb = dp->d_ops->db_to_fdb(mp, newdb);
/*
* If it's not the one we have in hand, read it in.
*/
@@ -655,16 +610,16 @@ xfs_dir2_leafn_lookup_for_addname(
return error;
free = curbp->b_addr;
- xfs_dir2_free_hdr_check(mp, curbp, curdb);
+ xfs_dir2_free_hdr_check(dp, curbp, curdb);
}
/*
* Get the index for our entry.
*/
- fi = xfs_dir2_db_to_fdindex(mp, curdb);
+ fi = dp->d_ops->db_to_fdindex(mp, curdb);
/*
* If it has room, return it.
*/
- bests = xfs_dir3_free_bests_p(mp, free);
+ bests = dp->d_ops->free_bests_p(free);
if (unlikely(bests[fi] == cpu_to_be16(NULLDATAOFF))) {
XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int",
XFS_ERRLEVEL_LOW, mp);
@@ -734,10 +689,10 @@ xfs_dir2_leafn_lookup_for_entry(
tp = args->trans;
mp = dp->i_mount;
leaf = bp->b_addr;
- xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
- ents = xfs_dir3_leaf_ents_p(leaf);
+ dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ ents = dp->d_ops->leaf_ents_p(leaf);
- xfs_dir3_leaf_check(mp, bp);
+ xfs_dir3_leaf_check(dp, bp);
ASSERT(leafhdr.count > 0);
/*
@@ -816,7 +771,7 @@ xfs_dir2_leafn_lookup_for_entry(
xfs_trans_brelse(tp, state->extrablk.bp);
args->cmpresult = cmp;
args->inumber = be64_to_cpu(dep->inumber);
- args->filetype = xfs_dir3_dirent_get_ftype(mp, dep);
+ args->filetype = dp->d_ops->data_get_ftype(dep);
*indexp = index;
state->extravalid = 1;
state->extrablk.bp = curbp;
@@ -907,7 +862,7 @@ xfs_dir3_leafn_moveents(
if (start_d < dhdr->count) {
memmove(&dents[start_d + count], &dents[start_d],
(dhdr->count - start_d) * sizeof(xfs_dir2_leaf_entry_t));
- xfs_dir3_leaf_log_ents(tp, bp_d, start_d + count,
+ xfs_dir3_leaf_log_ents(tp, args->dp, bp_d, start_d + count,
count + dhdr->count - 1);
}
/*
@@ -929,7 +884,8 @@ xfs_dir3_leafn_moveents(
*/
memcpy(&dents[start_d], &sents[start_s],
count * sizeof(xfs_dir2_leaf_entry_t));
- xfs_dir3_leaf_log_ents(tp, bp_d, start_d, start_d + count - 1);
+ xfs_dir3_leaf_log_ents(tp, args->dp, bp_d,
+ start_d, start_d + count - 1);
/*
* If there are source entries after the ones we copied,
@@ -938,7 +894,8 @@ xfs_dir3_leafn_moveents(
if (start_s + count < shdr->count) {
memmove(&sents[start_s], &sents[start_s + count],
count * sizeof(xfs_dir2_leaf_entry_t));
- xfs_dir3_leaf_log_ents(tp, bp_s, start_s, start_s + count - 1);
+ xfs_dir3_leaf_log_ents(tp, args->dp, bp_s,
+ start_s, start_s + count - 1);
}
/*
@@ -956,6 +913,7 @@ xfs_dir3_leafn_moveents(
*/
int /* sort order */
xfs_dir2_leafn_order(
+ struct xfs_inode *dp,
struct xfs_buf *leaf1_bp, /* leaf1 buffer */
struct xfs_buf *leaf2_bp) /* leaf2 buffer */
{
@@ -966,10 +924,10 @@ xfs_dir2_leafn_order(
struct xfs_dir3_icleaf_hdr hdr1;
struct xfs_dir3_icleaf_hdr hdr2;
- xfs_dir3_leaf_hdr_from_disk(&hdr1, leaf1);
- xfs_dir3_leaf_hdr_from_disk(&hdr2, leaf2);
- ents1 = xfs_dir3_leaf_ents_p(leaf1);
- ents2 = xfs_dir3_leaf_ents_p(leaf2);
+ dp->d_ops->leaf_hdr_from_disk(&hdr1, leaf1);
+ dp->d_ops->leaf_hdr_from_disk(&hdr2, leaf2);
+ ents1 = dp->d_ops->leaf_ents_p(leaf1);
+ ents2 = dp->d_ops->leaf_ents_p(leaf2);
if (hdr1.count > 0 && hdr2.count > 0 &&
(be32_to_cpu(ents2[0].hashval) < be32_to_cpu(ents1[0].hashval) ||
@@ -1007,12 +965,13 @@ xfs_dir2_leafn_rebalance(
struct xfs_dir2_leaf_entry *ents2;
struct xfs_dir3_icleaf_hdr hdr1;
struct xfs_dir3_icleaf_hdr hdr2;
+ struct xfs_inode *dp = state->args->dp;
args = state->args;
/*
* If the block order is wrong, swap the arguments.
*/
- if ((swap = xfs_dir2_leafn_order(blk1->bp, blk2->bp))) {
+ if ((swap = xfs_dir2_leafn_order(dp, blk1->bp, blk2->bp))) {
xfs_da_state_blk_t *tmp; /* temp for block swap */
tmp = blk1;
@@ -1021,10 +980,10 @@ xfs_dir2_leafn_rebalance(
}
leaf1 = blk1->bp->b_addr;
leaf2 = blk2->bp->b_addr;
- xfs_dir3_leaf_hdr_from_disk(&hdr1, leaf1);
- xfs_dir3_leaf_hdr_from_disk(&hdr2, leaf2);
- ents1 = xfs_dir3_leaf_ents_p(leaf1);
- ents2 = xfs_dir3_leaf_ents_p(leaf2);
+ dp->d_ops->leaf_hdr_from_disk(&hdr1, leaf1);
+ dp->d_ops->leaf_hdr_from_disk(&hdr2, leaf2);
+ ents1 = dp->d_ops->leaf_ents_p(leaf1);
+ ents2 = dp->d_ops->leaf_ents_p(leaf2);
oldsum = hdr1.count + hdr2.count;
#if defined(DEBUG) || defined(XFS_WARN)
@@ -1070,13 +1029,13 @@ xfs_dir2_leafn_rebalance(
ASSERT(hdr1.stale + hdr2.stale == oldstale);
/* log the changes made when moving the entries */
- xfs_dir3_leaf_hdr_to_disk(leaf1, &hdr1);
- xfs_dir3_leaf_hdr_to_disk(leaf2, &hdr2);
- xfs_dir3_leaf_log_header(args->trans, blk1->bp);
- xfs_dir3_leaf_log_header(args->trans, blk2->bp);
+ dp->d_ops->leaf_hdr_to_disk(leaf1, &hdr1);
+ dp->d_ops->leaf_hdr_to_disk(leaf2, &hdr2);
+ xfs_dir3_leaf_log_header(args->trans, dp, blk1->bp);
+ xfs_dir3_leaf_log_header(args->trans, dp, blk2->bp);
- xfs_dir3_leaf_check(args->dp->i_mount, blk1->bp);
- xfs_dir3_leaf_check(args->dp->i_mount, blk2->bp);
+ xfs_dir3_leaf_check(dp, blk1->bp);
+ xfs_dir3_leaf_check(dp, blk2->bp);
/*
* Mark whether we're inserting into the old or new leaf.
@@ -1097,11 +1056,11 @@ xfs_dir2_leafn_rebalance(
* Finally sanity check just to make sure we are not returning a
* negative index
*/
- if(blk2->index < 0) {
+ if (blk2->index < 0) {
state->inleaf = 1;
blk2->index = 0;
- xfs_alert(args->dp->i_mount,
- "%s: picked the wrong leaf? reverting original leaf: blk1->index %d\n",
+ xfs_alert(dp->i_mount,
+ "%s: picked the wrong leaf? reverting original leaf: blk1->index %d",
__func__, blk1->index);
}
}
@@ -1120,17 +1079,17 @@ xfs_dir3_data_block_free(
int logfree = 0;
__be16 *bests;
struct xfs_dir3_icfree_hdr freehdr;
+ struct xfs_inode *dp = args->dp;
- xfs_dir3_free_hdr_from_disk(&freehdr, free);
-
- bests = xfs_dir3_free_bests_p(tp->t_mountp, free);
+ dp->d_ops->free_hdr_from_disk(&freehdr, free);
+ bests = dp->d_ops->free_bests_p(free);
if (hdr) {
/*
* Data block is not empty, just set the free entry to the new
* value.
*/
bests[findex] = cpu_to_be16(longest);
- xfs_dir2_free_log_bests(tp, fbp, findex, findex);
+ xfs_dir2_free_log_bests(tp, dp, fbp, findex, findex);
return 0;
}
@@ -1157,8 +1116,8 @@ xfs_dir3_data_block_free(
logfree = 1;
}
- xfs_dir3_free_hdr_to_disk(free, &freehdr);
- xfs_dir2_free_log_header(tp, fbp);
+ dp->d_ops->free_hdr_to_disk(free, &freehdr);
+ xfs_dir2_free_log_header(tp, dp, fbp);
/*
* If there are no useful entries left in the block, get rid of the
@@ -1182,7 +1141,7 @@ xfs_dir3_data_block_free(
/* Log the free entry that changed, unless we got rid of it. */
if (logfree)
- xfs_dir2_free_log_bests(tp, fbp, findex, findex);
+ xfs_dir2_free_log_bests(tp, dp, fbp, findex, findex);
return 0;
}
@@ -1222,8 +1181,8 @@ xfs_dir2_leafn_remove(
tp = args->trans;
mp = dp->i_mount;
leaf = bp->b_addr;
- xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
- ents = xfs_dir3_leaf_ents_p(leaf);
+ dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ ents = dp->d_ops->leaf_ents_p(leaf);
/*
* Point to the entry we're removing.
@@ -1243,11 +1202,11 @@ xfs_dir2_leafn_remove(
* Log the leaf block changes.
*/
leafhdr.stale++;
- xfs_dir3_leaf_hdr_to_disk(leaf, &leafhdr);
- xfs_dir3_leaf_log_header(tp, bp);
+ dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+ xfs_dir3_leaf_log_header(tp, dp, bp);
lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR);
- xfs_dir3_leaf_log_ents(tp, bp, index, index);
+ xfs_dir3_leaf_log_ents(tp, dp, bp, index, index);
/*
* Make the data entry free. Keep track of the longest freespace
@@ -1256,19 +1215,19 @@ xfs_dir2_leafn_remove(
dbp = dblk->bp;
hdr = dbp->b_addr;
dep = (xfs_dir2_data_entry_t *)((char *)hdr + off);
- bf = xfs_dir3_data_bestfree_p(hdr);
+ bf = dp->d_ops->data_bestfree_p(hdr);
longest = be16_to_cpu(bf[0].length);
needlog = needscan = 0;
- xfs_dir2_data_make_free(tp, dbp, off,
- xfs_dir3_data_entsize(mp, dep->namelen), &needlog, &needscan);
+ xfs_dir2_data_make_free(tp, dp, dbp, off,
+ dp->d_ops->data_entsize(dep->namelen), &needlog, &needscan);
/*
* Rescan the data block freespaces for bestfree.
* Log the data block header if needed.
*/
if (needscan)
- xfs_dir2_data_freescan(mp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp, hdr, &needlog);
if (needlog)
- xfs_dir2_data_log_header(tp, dbp);
+ xfs_dir2_data_log_header(tp, dp, dbp);
xfs_dir3_data_check(dp, dbp);
/*
* If the longest data block freespace changes, need to update
@@ -1285,7 +1244,7 @@ xfs_dir2_leafn_remove(
* Convert the data block number to a free block,
* read in the free block.
*/
- fdb = xfs_dir2_db_to_fdb(mp, db);
+ fdb = dp->d_ops->db_to_fdb(mp, db);
error = xfs_dir2_free_read(tp, dp, xfs_dir2_db_to_da(mp, fdb),
&fbp);
if (error)
@@ -1294,22 +1253,22 @@ xfs_dir2_leafn_remove(
#ifdef DEBUG
{
struct xfs_dir3_icfree_hdr freehdr;
- xfs_dir3_free_hdr_from_disk(&freehdr, free);
- ASSERT(freehdr.firstdb == xfs_dir3_free_max_bests(mp) *
+ dp->d_ops->free_hdr_from_disk(&freehdr, free);
+ ASSERT(freehdr.firstdb == dp->d_ops->free_max_bests(mp) *
(fdb - XFS_DIR2_FREE_FIRSTDB(mp)));
}
#endif
/*
* Calculate which entry we need to fix.
*/
- findex = xfs_dir2_db_to_fdindex(mp, db);
+ findex = dp->d_ops->db_to_fdindex(mp, db);
longest = be16_to_cpu(bf[0].length);
/*
* If the data block is now empty we can get rid of it
* (usually).
*/
if (longest == mp->m_dirblksize -
- xfs_dir3_data_entry_offset(hdr)) {
+ dp->d_ops->data_entry_offset) {
/*
* Try to punch out the data block.
*/
@@ -1336,12 +1295,12 @@ xfs_dir2_leafn_remove(
return error;
}
- xfs_dir3_leaf_check(mp, bp);
+ xfs_dir3_leaf_check(dp, bp);
/*
* Return indication of whether this leaf block is empty enough
* to justify trying to join it with a neighbor.
*/
- *rval = (xfs_dir3_leaf_hdr_size(leaf) +
+ *rval = (dp->d_ops->leaf_hdr_size +
(uint)sizeof(ents[0]) * (leafhdr.count - leafhdr.stale)) <
mp->m_dir_magicpct;
return 0;
@@ -1360,13 +1319,14 @@ xfs_dir2_leafn_split(
xfs_dablk_t blkno; /* new leaf block number */
int error; /* error return value */
xfs_mount_t *mp; /* filesystem mount point */
+ struct xfs_inode *dp;
/*
* Allocate space for a new leaf node.
*/
args = state->args;
- mp = args->dp->i_mount;
- ASSERT(args != NULL);
+ dp = args->dp;
+ mp = dp->i_mount;
ASSERT(oldblk->magic == XFS_DIR2_LEAFN_MAGIC);
error = xfs_da_grow_inode(args, &blkno);
if (error) {
@@ -1401,10 +1361,10 @@ xfs_dir2_leafn_split(
/*
* Update last hashval in each block since we added the name.
*/
- oldblk->hashval = xfs_dir2_leafn_lasthash(oldblk->bp, NULL);
- newblk->hashval = xfs_dir2_leafn_lasthash(newblk->bp, NULL);
- xfs_dir3_leaf_check(mp, oldblk->bp);
- xfs_dir3_leaf_check(mp, newblk->bp);
+ oldblk->hashval = xfs_dir2_leafn_lasthash(dp, oldblk->bp, NULL);
+ newblk->hashval = xfs_dir2_leafn_lasthash(dp, newblk->bp, NULL);
+ xfs_dir3_leaf_check(dp, oldblk->bp);
+ xfs_dir3_leaf_check(dp, newblk->bp);
return error;
}
@@ -1434,6 +1394,7 @@ xfs_dir2_leafn_toosmall(
int rval; /* result from path_shift */
struct xfs_dir3_icleaf_hdr leafhdr;
struct xfs_dir2_leaf_entry *ents;
+ struct xfs_inode *dp = state->args->dp;
/*
* Check for the degenerate case of the block being over 50% full.
@@ -1442,12 +1403,12 @@ xfs_dir2_leafn_toosmall(
*/
blk = &state->path.blk[state->path.active - 1];
leaf = blk->bp->b_addr;
- xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
- ents = xfs_dir3_leaf_ents_p(leaf);
- xfs_dir3_leaf_check(state->args->dp->i_mount, blk->bp);
+ dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ ents = dp->d_ops->leaf_ents_p(leaf);
+ xfs_dir3_leaf_check(dp, blk->bp);
count = leafhdr.count - leafhdr.stale;
- bytes = xfs_dir3_leaf_hdr_size(leaf) + count * sizeof(ents[0]);
+ bytes = dp->d_ops->leaf_hdr_size + count * sizeof(ents[0]);
if (bytes > (state->blocksize >> 1)) {
/*
* Blk over 50%, don't try to join.
@@ -1492,7 +1453,7 @@ xfs_dir2_leafn_toosmall(
/*
* Read the sibling leaf block.
*/
- error = xfs_dir3_leafn_read(state->args->trans, state->args->dp,
+ error = xfs_dir3_leafn_read(state->args->trans, dp,
blkno, -1, &bp);
if (error)
return error;
@@ -1504,8 +1465,8 @@ xfs_dir2_leafn_toosmall(
bytes = state->blocksize - (state->blocksize >> 2);
leaf = bp->b_addr;
- xfs_dir3_leaf_hdr_from_disk(&hdr2, leaf);
- ents = xfs_dir3_leaf_ents_p(leaf);
+ dp->d_ops->leaf_hdr_from_disk(&hdr2, leaf);
+ ents = dp->d_ops->leaf_ents_p(leaf);
count += hdr2.count - hdr2.stale;
bytes -= count * sizeof(ents[0]);
@@ -1559,6 +1520,7 @@ xfs_dir2_leafn_unbalance(
struct xfs_dir3_icleaf_hdr drophdr;
struct xfs_dir2_leaf_entry *sents;
struct xfs_dir2_leaf_entry *dents;
+ struct xfs_inode *dp = state->args->dp;
args = state->args;
ASSERT(drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
@@ -1566,10 +1528,10 @@ xfs_dir2_leafn_unbalance(
drop_leaf = drop_blk->bp->b_addr;
save_leaf = save_blk->bp->b_addr;
- xfs_dir3_leaf_hdr_from_disk(&savehdr, save_leaf);
- xfs_dir3_leaf_hdr_from_disk(&drophdr, drop_leaf);
- sents = xfs_dir3_leaf_ents_p(save_leaf);
- dents = xfs_dir3_leaf_ents_p(drop_leaf);
+ dp->d_ops->leaf_hdr_from_disk(&savehdr, save_leaf);
+ dp->d_ops->leaf_hdr_from_disk(&drophdr, drop_leaf);
+ sents = dp->d_ops->leaf_ents_p(save_leaf);
+ dents = dp->d_ops->leaf_ents_p(drop_leaf);
/*
* If there are any stale leaf entries, take this opportunity
@@ -1584,7 +1546,7 @@ xfs_dir2_leafn_unbalance(
* Move the entries from drop to the appropriate end of save.
*/
drop_blk->hashval = be32_to_cpu(dents[drophdr.count - 1].hashval);
- if (xfs_dir2_leafn_order(save_blk->bp, drop_blk->bp))
+ if (xfs_dir2_leafn_order(dp, save_blk->bp, drop_blk->bp))
xfs_dir3_leafn_moveents(args, drop_blk->bp, &drophdr, dents, 0,
save_blk->bp, &savehdr, sents, 0,
drophdr.count);
@@ -1595,13 +1557,13 @@ xfs_dir2_leafn_unbalance(
save_blk->hashval = be32_to_cpu(sents[savehdr.count - 1].hashval);
/* log the changes made when moving the entries */
- xfs_dir3_leaf_hdr_to_disk(save_leaf, &savehdr);
- xfs_dir3_leaf_hdr_to_disk(drop_leaf, &drophdr);
- xfs_dir3_leaf_log_header(args->trans, save_blk->bp);
- xfs_dir3_leaf_log_header(args->trans, drop_blk->bp);
+ dp->d_ops->leaf_hdr_to_disk(save_leaf, &savehdr);
+ dp->d_ops->leaf_hdr_to_disk(drop_leaf, &drophdr);
+ xfs_dir3_leaf_log_header(args->trans, dp, save_blk->bp);
+ xfs_dir3_leaf_log_header(args->trans, dp, drop_blk->bp);
- xfs_dir3_leaf_check(args->dp->i_mount, save_blk->bp);
- xfs_dir3_leaf_check(args->dp->i_mount, drop_blk->bp);
+ xfs_dir3_leaf_check(dp, save_blk->bp);
+ xfs_dir3_leaf_check(dp, drop_blk->bp);
}
/*
@@ -1712,7 +1674,7 @@ xfs_dir2_node_addname_int(
dp = args->dp;
mp = dp->i_mount;
tp = args->trans;
- length = xfs_dir3_data_entsize(mp, args->namelen);
+ length = dp->d_ops->data_entsize(args->namelen);
/*
* If we came in with a freespace block that means that lookup
* found an entry with our hash value. This is the freespace
@@ -1726,8 +1688,8 @@ xfs_dir2_node_addname_int(
ifbno = fblk->blkno;
free = fbp->b_addr;
findex = fblk->index;
- bests = xfs_dir3_free_bests_p(mp, free);
- xfs_dir3_free_hdr_from_disk(&freehdr, free);
+ bests = dp->d_ops->free_bests_p(free);
+ dp->d_ops->free_hdr_from_disk(&freehdr, free);
/*
* This means the free entry showed that the data block had
@@ -1819,8 +1781,8 @@ xfs_dir2_node_addname_int(
* and the freehdr are actually initialised if they are placed
* there, so we have to do it here to avoid warnings. Blech.
*/
- bests = xfs_dir3_free_bests_p(mp, free);
- xfs_dir3_free_hdr_from_disk(&freehdr, free);
+ bests = dp->d_ops->free_bests_p(free);
+ dp->d_ops->free_hdr_from_disk(&freehdr, free);
if (be16_to_cpu(bests[findex]) != NULLDATAOFF &&
be16_to_cpu(bests[findex]) >= length)
dbno = freehdr.firstdb + findex;
@@ -1871,7 +1833,7 @@ xfs_dir2_node_addname_int(
* Get the freespace block corresponding to the data block
* that was just allocated.
*/
- fbno = xfs_dir2_db_to_fdb(mp, dbno);
+ fbno = dp->d_ops->db_to_fdb(mp, dbno);
error = xfs_dir2_free_try_read(tp, dp,
xfs_dir2_db_to_da(mp, fbno),
&fbp);
@@ -1888,12 +1850,12 @@ xfs_dir2_node_addname_int(
if (error)
return error;
- if (unlikely(xfs_dir2_db_to_fdb(mp, dbno) != fbno)) {
+ if (unlikely(dp->d_ops->db_to_fdb(mp, dbno) != fbno)) {
xfs_alert(mp,
"%s: dir ino %llu needed freesp block %lld for\n"
" data block %lld, got %lld ifbno %llu lastfbno %d",
__func__, (unsigned long long)dp->i_ino,
- (long long)xfs_dir2_db_to_fdb(mp, dbno),
+ (long long)dp->d_ops->db_to_fdb(mp, dbno),
(long long)dbno, (long long)fbno,
(unsigned long long)ifbno, lastfbno);
if (fblk) {
@@ -1918,30 +1880,30 @@ xfs_dir2_node_addname_int(
if (error)
return error;
free = fbp->b_addr;
- bests = xfs_dir3_free_bests_p(mp, free);
- xfs_dir3_free_hdr_from_disk(&freehdr, free);
+ bests = dp->d_ops->free_bests_p(free);
+ dp->d_ops->free_hdr_from_disk(&freehdr, free);
/*
* Remember the first slot as our empty slot.
*/
freehdr.firstdb = (fbno - XFS_DIR2_FREE_FIRSTDB(mp)) *
- xfs_dir3_free_max_bests(mp);
+ dp->d_ops->free_max_bests(mp);
} else {
free = fbp->b_addr;
- bests = xfs_dir3_free_bests_p(mp, free);
- xfs_dir3_free_hdr_from_disk(&freehdr, free);
+ bests = dp->d_ops->free_bests_p(free);
+ dp->d_ops->free_hdr_from_disk(&freehdr, free);
}
/*
* Set the freespace block index from the data block number.
*/
- findex = xfs_dir2_db_to_fdindex(mp, dbno);
+ findex = dp->d_ops->db_to_fdindex(mp, dbno);
/*
* If it's after the end of the current entries in the
* freespace block, extend that table.
*/
if (findex >= freehdr.nvalid) {
- ASSERT(findex < xfs_dir3_free_max_bests(mp));
+ ASSERT(findex < dp->d_ops->free_max_bests(mp));
freehdr.nvalid = findex + 1;
/*
* Tag new entry so nused will go up.
@@ -1954,8 +1916,8 @@ xfs_dir2_node_addname_int(
*/
if (bests[findex] == cpu_to_be16(NULLDATAOFF)) {
freehdr.nused++;
- xfs_dir3_free_hdr_to_disk(fbp->b_addr, &freehdr);
- xfs_dir2_free_log_header(tp, fbp);
+ dp->d_ops->free_hdr_to_disk(fbp->b_addr, &freehdr);
+ xfs_dir2_free_log_header(tp, dp, fbp);
}
/*
* Update the real value in the table.
@@ -1963,7 +1925,7 @@ xfs_dir2_node_addname_int(
* change again.
*/
hdr = dbp->b_addr;
- bf = xfs_dir3_data_bestfree_p(hdr);
+ bf = dp->d_ops->data_bestfree_p(hdr);
bests[findex] = bf[0].length;
logfree = 1;
}
@@ -1985,7 +1947,7 @@ xfs_dir2_node_addname_int(
if (error)
return error;
hdr = dbp->b_addr;
- bf = xfs_dir3_data_bestfree_p(hdr);
+ bf = dp->d_ops->data_bestfree_p(hdr);
logfree = 0;
}
ASSERT(be16_to_cpu(bf[0].length) >= length);
@@ -1998,7 +1960,7 @@ xfs_dir2_node_addname_int(
/*
* Mark the first part of the unused space, inuse for us.
*/
- xfs_dir2_data_use_free(tp, dbp, dup,
+ xfs_dir2_data_use_free(tp, dp, dbp, dup,
(xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr), length,
&needlog, &needscan);
/*
@@ -2008,24 +1970,24 @@ xfs_dir2_node_addname_int(
dep->inumber = cpu_to_be64(args->inumber);
dep->namelen = args->namelen;
memcpy(dep->name, args->name, dep->namelen);
- xfs_dir3_dirent_put_ftype(mp, dep, args->filetype);
- tagp = xfs_dir3_data_entry_tag_p(mp, dep);
+ dp->d_ops->data_put_ftype(dep, args->filetype);
+ tagp = dp->d_ops->data_entry_tag_p(dep);
*tagp = cpu_to_be16((char *)dep - (char *)hdr);
- xfs_dir2_data_log_entry(tp, dbp, dep);
+ xfs_dir2_data_log_entry(tp, dp, dbp, dep);
/*
* Rescan the block for bestfree if needed.
*/
if (needscan)
- xfs_dir2_data_freescan(mp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp, hdr, &needlog);
/*
* Log the data block header if needed.
*/
if (needlog)
- xfs_dir2_data_log_header(tp, dbp);
+ xfs_dir2_data_log_header(tp, dp, dbp);
/*
* If the freespace entry is now wrong, update it.
*/
- bests = xfs_dir3_free_bests_p(mp, free); /* gcc is so stupid */
+ bests = dp->d_ops->free_bests_p(free); /* gcc is so stupid */
if (be16_to_cpu(bests[findex]) != be16_to_cpu(bf[0].length)) {
bests[findex] = bf[0].length;
logfree = 1;
@@ -2034,7 +1996,7 @@ xfs_dir2_node_addname_int(
* Log the freespace entry if needed.
*/
if (logfree)
- xfs_dir2_free_log_bests(tp, fbp, findex, findex);
+ xfs_dir2_free_log_bests(tp, dp, fbp, findex, findex);
/*
* Return the data block and offset in args, then drop the data block.
*/
@@ -2212,7 +2174,7 @@ xfs_dir2_node_replace(
blk = &state->path.blk[state->path.active - 1];
ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC);
leaf = blk->bp->b_addr;
- ents = xfs_dir3_leaf_ents_p(leaf);
+ ents = args->dp->d_ops->leaf_ents_p(leaf);
lep = &ents[blk->index];
ASSERT(state->extravalid);
/*
@@ -2229,8 +2191,9 @@ xfs_dir2_node_replace(
* Fill in the new inode number and log the entry.
*/
dep->inumber = cpu_to_be64(inum);
- xfs_dir3_dirent_put_ftype(state->mp, dep, args->filetype);
- xfs_dir2_data_log_entry(args->trans, state->extrablk.bp, dep);
+ args->dp->d_ops->data_put_ftype(dep, args->filetype);
+ xfs_dir2_data_log_entry(args->trans, args->dp,
+ state->extrablk.bp, dep);
rval = 0;
}
/*
@@ -2285,7 +2248,7 @@ xfs_dir2_node_trim_free(
if (!bp)
return 0;
free = bp->b_addr;
- xfs_dir3_free_hdr_from_disk(&freehdr, free);
+ dp->d_ops->free_hdr_from_disk(&freehdr, free);
/*
* If there are used entries, there's nothing to do.
diff --git a/fs/xfs/xfs_dir2_priv.h b/fs/xfs/xfs_dir2_priv.h
index 1bad84c40829..8b9d2281f85b 100644
--- a/fs/xfs/xfs_dir2_priv.h
+++ b/fs/xfs/xfs_dir2_priv.h
@@ -59,7 +59,8 @@ extern int xfs_dir3_data_readahead(struct xfs_trans *tp, struct xfs_inode *dp,
extern struct xfs_dir2_data_free *
xfs_dir2_data_freeinsert(struct xfs_dir2_data_hdr *hdr,
- struct xfs_dir2_data_unused *dup, int *loghead);
+ struct xfs_dir2_data_free *bf, struct xfs_dir2_data_unused *dup,
+ int *loghead);
extern int xfs_dir3_data_init(struct xfs_da_args *args, xfs_dir2_db_t blkno,
struct xfs_buf **bpp);
@@ -76,9 +77,9 @@ extern void xfs_dir3_leaf_compact_x1(struct xfs_dir3_icleaf_hdr *leafhdr,
int *lowstalep, int *highstalep, int *lowlogp, int *highlogp);
extern int xfs_dir3_leaf_get_buf(struct xfs_da_args *args, xfs_dir2_db_t bno,
struct xfs_buf **bpp, __uint16_t magic);
-extern void xfs_dir3_leaf_log_ents(struct xfs_trans *tp, struct xfs_buf *bp,
- int first, int last);
-extern void xfs_dir3_leaf_log_header(struct xfs_trans *tp,
+extern void xfs_dir3_leaf_log_ents(struct xfs_trans *tp, struct xfs_inode *dp,
+ struct xfs_buf *bp, int first, int last);
+extern void xfs_dir3_leaf_log_header(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_buf *bp);
extern int xfs_dir2_leaf_lookup(struct xfs_da_args *args);
extern int xfs_dir2_leaf_removename(struct xfs_da_args *args);
@@ -93,21 +94,18 @@ xfs_dir3_leaf_find_entry(struct xfs_dir3_icleaf_hdr *leafhdr,
int lowstale, int highstale, int *lfloglow, int *lfloghigh);
extern int xfs_dir2_node_to_leaf(struct xfs_da_state *state);
-extern void xfs_dir3_leaf_hdr_from_disk(struct xfs_dir3_icleaf_hdr *to,
- struct xfs_dir2_leaf *from);
-extern void xfs_dir3_leaf_hdr_to_disk(struct xfs_dir2_leaf *to,
- struct xfs_dir3_icleaf_hdr *from);
-extern bool xfs_dir3_leaf_check_int(struct xfs_mount *mp,
+extern bool xfs_dir3_leaf_check_int(struct xfs_mount *mp, struct xfs_inode *dp,
struct xfs_dir3_icleaf_hdr *hdr, struct xfs_dir2_leaf *leaf);
/* xfs_dir2_node.c */
extern int xfs_dir2_leaf_to_node(struct xfs_da_args *args,
struct xfs_buf *lbp);
-extern xfs_dahash_t xfs_dir2_leafn_lasthash(struct xfs_buf *bp, int *count);
+extern xfs_dahash_t xfs_dir2_leafn_lasthash(struct xfs_inode *dp,
+ struct xfs_buf *bp, int *count);
extern int xfs_dir2_leafn_lookup_int(struct xfs_buf *bp,
struct xfs_da_args *args, int *indexp,
struct xfs_da_state *state);
-extern int xfs_dir2_leafn_order(struct xfs_buf *leaf1_bp,
+extern int xfs_dir2_leafn_order(struct xfs_inode *dp, struct xfs_buf *leaf1_bp,
struct xfs_buf *leaf2_bp);
extern int xfs_dir2_leafn_split(struct xfs_da_state *state,
struct xfs_da_state_blk *oldblk, struct xfs_da_state_blk *newblk);
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 8f84153e98a8..c4e50c6ed584 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -18,23 +18,23 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
+#include "xfs_da_format.h"
#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
-#include "xfs_dir2_format.h"
#include "xfs_dir2.h"
#include "xfs_dir2_priv.h"
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_bmap.h"
+#include "xfs_trans.h"
+#include "xfs_dinode.h"
/*
* Directory file type support functions
@@ -119,9 +119,9 @@ xfs_dir2_sf_getdents(
* mp->m_dirdatablk.
*/
dot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
- xfs_dir3_data_dot_offset(mp));
+ dp->d_ops->data_dot_offset);
dotdot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
- xfs_dir3_data_dotdot_offset(mp));
+ dp->d_ops->data_dotdot_offset);
/*
* Put . entry unless we're starting past it.
@@ -136,7 +136,7 @@ xfs_dir2_sf_getdents(
* Put .. entry unless we're starting past it.
*/
if (ctx->pos <= dotdot_offset) {
- ino = xfs_dir2_sf_get_parent_ino(sfp);
+ ino = dp->d_ops->sf_get_parent_ino(sfp);
ctx->pos = dotdot_offset & 0x7fffffff;
if (!dir_emit(ctx, "..", 2, ino, DT_DIR))
return 0;
@@ -153,17 +153,17 @@ xfs_dir2_sf_getdents(
xfs_dir2_sf_get_offset(sfep));
if (ctx->pos > off) {
- sfep = xfs_dir3_sf_nextentry(mp, sfp, sfep);
+ sfep = dp->d_ops->sf_nextentry(sfp, sfep);
continue;
}
- ino = xfs_dir3_sfe_get_ino(mp, sfp, sfep);
- filetype = xfs_dir3_sfe_get_ftype(mp, sfp, sfep);
+ ino = dp->d_ops->sf_get_ino(sfp, sfep);
+ filetype = dp->d_ops->sf_get_ftype(sfep);
ctx->pos = off & 0x7fffffff;
if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
xfs_dir3_get_dtype(mp, filetype)))
return 0;
- sfep = xfs_dir3_sf_nextentry(mp, sfp, sfep);
+ sfep = dp->d_ops->sf_nextentry(sfp, sfep);
}
ctx->pos = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
@@ -213,7 +213,7 @@ xfs_dir2_block_getdents(
* Set up values for the loop.
*/
btp = xfs_dir2_block_tail_p(mp, hdr);
- ptr = (char *)xfs_dir3_data_entry_p(hdr);
+ ptr = (char *)dp->d_ops->data_entry_p(hdr);
endptr = (char *)xfs_dir2_block_leaf_p(btp);
/*
@@ -237,7 +237,7 @@ xfs_dir2_block_getdents(
/*
* Bump pointer for the next iteration.
*/
- ptr += xfs_dir3_data_entsize(mp, dep->namelen);
+ ptr += dp->d_ops->data_entsize(dep->namelen);
/*
* The entry is before the desired starting point, skip it.
*/
@@ -248,7 +248,7 @@ xfs_dir2_block_getdents(
(char *)dep - (char *)hdr);
ctx->pos = cook & 0x7fffffff;
- filetype = xfs_dir3_dirent_get_ftype(mp, dep);
+ filetype = dp->d_ops->data_get_ftype(dep);
/*
* If it didn't fit, set the final offset to here & return.
*/
@@ -578,13 +578,13 @@ xfs_dir2_leaf_getdents(
/*
* Find our position in the block.
*/
- ptr = (char *)xfs_dir3_data_entry_p(hdr);
+ ptr = (char *)dp->d_ops->data_entry_p(hdr);
byteoff = xfs_dir2_byte_to_off(mp, curoff);
/*
* Skip past the header.
*/
if (byteoff == 0)
- curoff += xfs_dir3_data_entry_offset(hdr);
+ curoff += dp->d_ops->data_entry_offset;
/*
* Skip past entries until we reach our offset.
*/
@@ -601,7 +601,7 @@ xfs_dir2_leaf_getdents(
}
dep = (xfs_dir2_data_entry_t *)ptr;
length =
- xfs_dir3_data_entsize(mp, dep->namelen);
+ dp->d_ops->data_entsize(dep->namelen);
ptr += length;
}
/*
@@ -632,8 +632,8 @@ xfs_dir2_leaf_getdents(
}
dep = (xfs_dir2_data_entry_t *)ptr;
- length = xfs_dir3_data_entsize(mp, dep->namelen);
- filetype = xfs_dir3_dirent_get_ftype(mp, dep);
+ length = dp->d_ops->data_entsize(dep->namelen);
+ filetype = dp->d_ops->data_get_ftype(dep);
ctx->pos = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff;
if (!dir_emit(ctx, (char *)dep->name, dep->namelen,
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index 3ef6d402084c..aafc6e46cb58 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -17,22 +17,22 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
+#include "xfs_da_format.h"
#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_error.h"
-#include "xfs_dir2_format.h"
#include "xfs_dir2.h"
#include "xfs_dir2_priv.h"
#include "xfs_trace.h"
+#include "xfs_dinode.h"
/*
* Prototypes for internal functions.
@@ -57,89 +57,6 @@ static void xfs_dir2_sf_toino8(xfs_da_args_t *args);
#endif /* XFS_BIG_INUMS */
/*
- * Inode numbers in short-form directories can come in two versions,
- * either 4 bytes or 8 bytes wide. These helpers deal with the
- * two forms transparently by looking at the headers i8count field.
- *
- * For 64-bit inode number the most significant byte must be zero.
- */
-static xfs_ino_t
-xfs_dir2_sf_get_ino(
- struct xfs_dir2_sf_hdr *hdr,
- xfs_dir2_inou_t *from)
-{
- if (hdr->i8count)
- return get_unaligned_be64(&from->i8.i) & 0x00ffffffffffffffULL;
- else
- return get_unaligned_be32(&from->i4.i);
-}
-
-static void
-xfs_dir2_sf_put_ino(
- struct xfs_dir2_sf_hdr *hdr,
- xfs_dir2_inou_t *to,
- xfs_ino_t ino)
-{
- ASSERT((ino & 0xff00000000000000ULL) == 0);
-
- if (hdr->i8count)
- put_unaligned_be64(ino, &to->i8.i);
- else
- put_unaligned_be32(ino, &to->i4.i);
-}
-
-xfs_ino_t
-xfs_dir2_sf_get_parent_ino(
- struct xfs_dir2_sf_hdr *hdr)
-{
- return xfs_dir2_sf_get_ino(hdr, &hdr->parent);
-}
-
-void
-xfs_dir2_sf_put_parent_ino(
- struct xfs_dir2_sf_hdr *hdr,
- xfs_ino_t ino)
-{
- xfs_dir2_sf_put_ino(hdr, &hdr->parent, ino);
-}
-
-/*
- * In short-form directory entries the inode numbers are stored at variable
- * offset behind the entry name. If the entry stores a filetype value, then it
- * sits between the name and the inode number. Hence the inode numbers may only
- * be accessed through the helpers below.
- */
-static xfs_dir2_inou_t *
-xfs_dir3_sfe_inop(
- struct xfs_mount *mp,
- struct xfs_dir2_sf_entry *sfep)
-{
- __uint8_t *ptr = &sfep->name[sfep->namelen];
- if (xfs_sb_version_hasftype(&mp->m_sb))
- ptr++;
- return (xfs_dir2_inou_t *)ptr;
-}
-
-xfs_ino_t
-xfs_dir3_sfe_get_ino(
- struct xfs_mount *mp,
- struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep)
-{
- return xfs_dir2_sf_get_ino(hdr, xfs_dir3_sfe_inop(mp, sfep));
-}
-
-void
-xfs_dir3_sfe_put_ino(
- struct xfs_mount *mp,
- struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep,
- xfs_ino_t ino)
-{
- xfs_dir2_sf_put_ino(hdr, xfs_dir3_sfe_inop(mp, sfep), ino);
-}
-
-/*
* Given a block directory (dp/block), calculate its size as a shortform (sf)
* directory and a header for the sf directory, if it will fit it the
* space currently present in the inode. If it won't fit, the output
@@ -226,7 +143,7 @@ xfs_dir2_block_sfsize(
*/
sfhp->count = count;
sfhp->i8count = i8count;
- xfs_dir2_sf_put_parent_ino(sfhp, parent);
+ dp->d_ops->sf_put_parent_ino(sfhp, parent);
return size;
}
@@ -293,7 +210,7 @@ xfs_dir2_block_to_sf(
* Set up to loop over the block's entries.
*/
btp = xfs_dir2_block_tail_p(mp, hdr);
- ptr = (char *)xfs_dir3_data_entry_p(hdr);
+ ptr = (char *)dp->d_ops->data_entry_p(hdr);
endptr = (char *)xfs_dir2_block_leaf_p(btp);
sfep = xfs_dir2_sf_firstentry(sfp);
/*
@@ -321,7 +238,7 @@ xfs_dir2_block_to_sf(
else if (dep->namelen == 2 &&
dep->name[0] == '.' && dep->name[1] == '.')
ASSERT(be64_to_cpu(dep->inumber) ==
- xfs_dir2_sf_get_parent_ino(sfp));
+ dp->d_ops->sf_get_parent_ino(sfp));
/*
* Normal entry, copy it into shortform.
*/
@@ -331,14 +248,14 @@ xfs_dir2_block_to_sf(
(xfs_dir2_data_aoff_t)
((char *)dep - (char *)hdr));
memcpy(sfep->name, dep->name, dep->namelen);
- xfs_dir3_sfe_put_ino(mp, sfp, sfep,
- be64_to_cpu(dep->inumber));
- xfs_dir3_sfe_put_ftype(mp, sfp, sfep,
- xfs_dir3_dirent_get_ftype(mp, dep));
+ dp->d_ops->sf_put_ino(sfp, sfep,
+ be64_to_cpu(dep->inumber));
+ dp->d_ops->sf_put_ftype(sfep,
+ dp->d_ops->data_get_ftype(dep));
- sfep = xfs_dir3_sf_nextentry(mp, sfp, sfep);
+ sfep = dp->d_ops->sf_nextentry(sfp, sfep);
}
- ptr += xfs_dir3_data_entsize(mp, dep->namelen);
+ ptr += dp->d_ops->data_entsize(dep->namelen);
}
ASSERT((char *)sfep - (char *)sfp == size);
xfs_dir2_sf_check(args);
@@ -389,7 +306,7 @@ xfs_dir2_sf_addname(
/*
* Compute entry (and change in) size.
*/
- add_entsize = xfs_dir3_sf_entsize(dp->i_mount, sfp, args->namelen);
+ add_entsize = dp->d_ops->sf_entsize(sfp, args->namelen);
incr_isize = add_entsize;
objchange = 0;
#if XFS_BIG_INUMS
@@ -483,8 +400,7 @@ xfs_dir2_sf_addname_easy(
/*
* Grow the in-inode space.
*/
- xfs_idata_realloc(dp,
- xfs_dir3_sf_entsize(dp->i_mount, sfp, args->namelen),
+ xfs_idata_realloc(dp, dp->d_ops->sf_entsize(sfp, args->namelen),
XFS_DATA_FORK);
/*
* Need to set up again due to realloc of the inode data.
@@ -497,8 +413,8 @@ xfs_dir2_sf_addname_easy(
sfep->namelen = args->namelen;
xfs_dir2_sf_put_offset(sfep, offset);
memcpy(sfep->name, args->name, sfep->namelen);
- xfs_dir3_sfe_put_ino(dp->i_mount, sfp, sfep, args->inumber);
- xfs_dir3_sfe_put_ftype(dp->i_mount, sfp, sfep, args->filetype);
+ dp->d_ops->sf_put_ino(sfp, sfep, args->inumber);
+ dp->d_ops->sf_put_ftype(sfep, args->filetype);
/*
* Update the header and inode.
@@ -557,13 +473,13 @@ xfs_dir2_sf_addname_hard(
* to insert the new entry.
* If it's going to end up at the end then oldsfep will point there.
*/
- for (offset = xfs_dir3_data_first_offset(mp),
+ for (offset = dp->d_ops->data_first_offset,
oldsfep = xfs_dir2_sf_firstentry(oldsfp),
- add_datasize = xfs_dir3_data_entsize(mp, args->namelen),
+ add_datasize = dp->d_ops->data_entsize(args->namelen),
eof = (char *)oldsfep == &buf[old_isize];
!eof;
- offset = new_offset + xfs_dir3_data_entsize(mp, oldsfep->namelen),
- oldsfep = xfs_dir3_sf_nextentry(mp, oldsfp, oldsfep),
+ offset = new_offset + dp->d_ops->data_entsize(oldsfep->namelen),
+ oldsfep = dp->d_ops->sf_nextentry(oldsfp, oldsfep),
eof = (char *)oldsfep == &buf[old_isize]) {
new_offset = xfs_dir2_sf_get_offset(oldsfep);
if (offset + add_datasize <= new_offset)
@@ -592,8 +508,8 @@ xfs_dir2_sf_addname_hard(
sfep->namelen = args->namelen;
xfs_dir2_sf_put_offset(sfep, offset);
memcpy(sfep->name, args->name, sfep->namelen);
- xfs_dir3_sfe_put_ino(mp, sfp, sfep, args->inumber);
- xfs_dir3_sfe_put_ftype(mp, sfp, sfep, args->filetype);
+ dp->d_ops->sf_put_ino(sfp, sfep, args->inumber);
+ dp->d_ops->sf_put_ftype(sfep, args->filetype);
sfp->count++;
#if XFS_BIG_INUMS
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && !objchange)
@@ -603,7 +519,7 @@ xfs_dir2_sf_addname_hard(
* If there's more left to copy, do that.
*/
if (!eof) {
- sfep = xfs_dir3_sf_nextentry(mp, sfp, sfep);
+ sfep = dp->d_ops->sf_nextentry(sfp, sfep);
memcpy(sfep, oldsfep, old_isize - nbytes);
}
kmem_free(buf);
@@ -639,8 +555,8 @@ xfs_dir2_sf_addname_pick(
mp = dp->i_mount;
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
- size = xfs_dir3_data_entsize(mp, args->namelen);
- offset = xfs_dir3_data_first_offset(mp);
+ size = dp->d_ops->data_entsize(args->namelen);
+ offset = dp->d_ops->data_first_offset;
sfep = xfs_dir2_sf_firstentry(sfp);
holefit = 0;
/*
@@ -652,8 +568,8 @@ xfs_dir2_sf_addname_pick(
if (!holefit)
holefit = offset + size <= xfs_dir2_sf_get_offset(sfep);
offset = xfs_dir2_sf_get_offset(sfep) +
- xfs_dir3_data_entsize(mp, sfep->namelen);
- sfep = xfs_dir3_sf_nextentry(mp, sfp, sfep);
+ dp->d_ops->data_entsize(sfep->namelen);
+ sfep = dp->d_ops->sf_nextentry(sfp, sfep);
}
/*
* Calculate data bytes used excluding the new entry, if this
@@ -713,21 +629,20 @@ xfs_dir2_sf_check(
mp = dp->i_mount;
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
- offset = xfs_dir3_data_first_offset(mp);
- ino = xfs_dir2_sf_get_parent_ino(sfp);
+ offset = dp->d_ops->data_first_offset;
+ ino = dp->d_ops->sf_get_parent_ino(sfp);
i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp);
i < sfp->count;
- i++, sfep = xfs_dir3_sf_nextentry(mp, sfp, sfep)) {
+ i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep)) {
ASSERT(xfs_dir2_sf_get_offset(sfep) >= offset);
- ino = xfs_dir3_sfe_get_ino(mp, sfp, sfep);
+ ino = dp->d_ops->sf_get_ino(sfp, sfep);
i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
offset =
xfs_dir2_sf_get_offset(sfep) +
- xfs_dir3_data_entsize(mp, sfep->namelen);
- ASSERT(xfs_dir3_sfe_get_ftype(mp, sfp, sfep) <
- XFS_DIR3_FT_MAX);
+ dp->d_ops->data_entsize(sfep->namelen);
+ ASSERT(dp->d_ops->sf_get_ftype(sfep) < XFS_DIR3_FT_MAX);
}
ASSERT(i8count == sfp->i8count);
ASSERT(XFS_BIG_INUMS || i8count == 0);
@@ -783,7 +698,7 @@ xfs_dir2_sf_create(
/*
* Now can put in the inode number, since i8count is set.
*/
- xfs_dir2_sf_put_parent_ino(sfp, pino);
+ dp->d_ops->sf_put_parent_ino(sfp, pino);
sfp->count = 0;
dp->i_d.di_size = size;
xfs_dir2_sf_check(args);
@@ -838,7 +753,7 @@ xfs_dir2_sf_lookup(
*/
if (args->namelen == 2 &&
args->name[0] == '.' && args->name[1] == '.') {
- args->inumber = xfs_dir2_sf_get_parent_ino(sfp);
+ args->inumber = dp->d_ops->sf_get_parent_ino(sfp);
args->cmpresult = XFS_CMP_EXACT;
args->filetype = XFS_DIR3_FT_DIR;
return XFS_ERROR(EEXIST);
@@ -848,7 +763,7 @@ xfs_dir2_sf_lookup(
*/
ci_sfep = NULL;
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->count;
- i++, sfep = xfs_dir3_sf_nextentry(dp->i_mount, sfp, sfep)) {
+ i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep)) {
/*
* Compare name and if it's an exact match, return the inode
* number. If it's the first case-insensitive match, store the
@@ -858,10 +773,8 @@ xfs_dir2_sf_lookup(
sfep->namelen);
if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) {
args->cmpresult = cmp;
- args->inumber = xfs_dir3_sfe_get_ino(dp->i_mount,
- sfp, sfep);
- args->filetype = xfs_dir3_sfe_get_ftype(dp->i_mount,
- sfp, sfep);
+ args->inumber = dp->d_ops->sf_get_ino(sfp, sfep);
+ args->filetype = dp->d_ops->sf_get_ftype(sfep);
if (cmp == XFS_CMP_EXACT)
return XFS_ERROR(EEXIST);
ci_sfep = sfep;
@@ -917,10 +830,10 @@ xfs_dir2_sf_removename(
* Find the one we're deleting.
*/
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->count;
- i++, sfep = xfs_dir3_sf_nextentry(dp->i_mount, sfp, sfep)) {
+ i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep)) {
if (xfs_da_compname(args, sfep->name, sfep->namelen) ==
XFS_CMP_EXACT) {
- ASSERT(xfs_dir3_sfe_get_ino(dp->i_mount, sfp, sfep) ==
+ ASSERT(dp->d_ops->sf_get_ino(sfp, sfep) ==
args->inumber);
break;
}
@@ -934,7 +847,7 @@ xfs_dir2_sf_removename(
* Calculate sizes.
*/
byteoff = (int)((char *)sfep - (char *)sfp);
- entsize = xfs_dir3_sf_entsize(dp->i_mount, sfp, args->namelen);
+ entsize = dp->d_ops->sf_entsize(sfp, args->namelen);
newsize = oldsize - entsize;
/*
* Copy the part if any after the removed entry, sliding it down.
@@ -1041,28 +954,25 @@ xfs_dir2_sf_replace(
if (args->namelen == 2 &&
args->name[0] == '.' && args->name[1] == '.') {
#if XFS_BIG_INUMS || defined(DEBUG)
- ino = xfs_dir2_sf_get_parent_ino(sfp);
+ ino = dp->d_ops->sf_get_parent_ino(sfp);
ASSERT(args->inumber != ino);
#endif
- xfs_dir2_sf_put_parent_ino(sfp, args->inumber);
+ dp->d_ops->sf_put_parent_ino(sfp, args->inumber);
}
/*
* Normal entry, look for the name.
*/
else {
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->count;
- i++, sfep = xfs_dir3_sf_nextentry(dp->i_mount, sfp, sfep)) {
+ i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep)) {
if (xfs_da_compname(args, sfep->name, sfep->namelen) ==
XFS_CMP_EXACT) {
#if XFS_BIG_INUMS || defined(DEBUG)
- ino = xfs_dir3_sfe_get_ino(dp->i_mount,
- sfp, sfep);
+ ino = dp->d_ops->sf_get_ino(sfp, sfep);
ASSERT(args->inumber != ino);
#endif
- xfs_dir3_sfe_put_ino(dp->i_mount, sfp, sfep,
- args->inumber);
- xfs_dir3_sfe_put_ftype(dp->i_mount, sfp, sfep,
- args->filetype);
+ dp->d_ops->sf_put_ino(sfp, sfep, args->inumber);
+ dp->d_ops->sf_put_ftype(sfep, args->filetype);
break;
}
}
@@ -1165,22 +1075,21 @@ xfs_dir2_sf_toino4(
*/
sfp->count = oldsfp->count;
sfp->i8count = 0;
- xfs_dir2_sf_put_parent_ino(sfp, xfs_dir2_sf_get_parent_ino(oldsfp));
+ dp->d_ops->sf_put_parent_ino(sfp, dp->d_ops->sf_get_parent_ino(oldsfp));
/*
* Copy the entries field by field.
*/
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp),
oldsfep = xfs_dir2_sf_firstentry(oldsfp);
i < sfp->count;
- i++, sfep = xfs_dir3_sf_nextentry(mp, sfp, sfep),
- oldsfep = xfs_dir3_sf_nextentry(mp, oldsfp, oldsfep)) {
+ i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep),
+ oldsfep = dp->d_ops->sf_nextentry(oldsfp, oldsfep)) {
sfep->namelen = oldsfep->namelen;
sfep->offset = oldsfep->offset;
memcpy(sfep->name, oldsfep->name, sfep->namelen);
- xfs_dir3_sfe_put_ino(mp, sfp, sfep,
- xfs_dir3_sfe_get_ino(mp, oldsfp, oldsfep));
- xfs_dir3_sfe_put_ftype(mp, sfp, sfep,
- xfs_dir3_sfe_get_ftype(mp, oldsfp, oldsfep));
+ dp->d_ops->sf_put_ino(sfp, sfep,
+ dp->d_ops->sf_get_ino(oldsfp, oldsfep));
+ dp->d_ops->sf_put_ftype(sfep, dp->d_ops->sf_get_ftype(oldsfep));
}
/*
* Clean up the inode.
@@ -1244,22 +1153,21 @@ xfs_dir2_sf_toino8(
*/
sfp->count = oldsfp->count;
sfp->i8count = 1;
- xfs_dir2_sf_put_parent_ino(sfp, xfs_dir2_sf_get_parent_ino(oldsfp));
+ dp->d_ops->sf_put_parent_ino(sfp, dp->d_ops->sf_get_parent_ino(oldsfp));
/*
* Copy the entries field by field.
*/
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp),
oldsfep = xfs_dir2_sf_firstentry(oldsfp);
i < sfp->count;
- i++, sfep = xfs_dir3_sf_nextentry(mp, sfp, sfep),
- oldsfep = xfs_dir3_sf_nextentry(mp, oldsfp, oldsfep)) {
+ i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep),
+ oldsfep = dp->d_ops->sf_nextentry(oldsfp, oldsfep)) {
sfep->namelen = oldsfep->namelen;
sfep->offset = oldsfep->offset;
memcpy(sfep->name, oldsfep->name, sfep->namelen);
- xfs_dir3_sfe_put_ino(mp, sfp, sfep,
- xfs_dir3_sfe_get_ino(mp, oldsfp, oldsfep));
- xfs_dir3_sfe_put_ftype(mp, sfp, sfep,
- xfs_dir3_sfe_get_ftype(mp, oldsfp, oldsfep));
+ dp->d_ops->sf_put_ino(sfp, sfep,
+ dp->d_ops->sf_get_ino(oldsfp, oldsfep));
+ dp->d_ops->sf_put_ftype(sfep, dp->d_ops->sf_get_ftype(oldsfep));
}
/*
* Clean up the inode.
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 45560ee1a4ba..8367d6dc18c9 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -17,22 +17,21 @@
*/
#include "xfs.h"
#include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_quota.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_btree.h"
#include "xfs_inode.h"
+#include "xfs_btree.h"
+#include "xfs_alloc_btree.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
#include "xfs_extent_busy.h"
#include "xfs_discard.h"
#include "xfs_trace.h"
+#include "xfs_log.h"
STATIC int
xfs_trim_extents(
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 1ee776d477c3..6b1e695caf0e 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -18,28 +18,28 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_shared.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
-#include "xfs_rtalloc.h"
+#include "xfs_alloc.h"
+#include "xfs_quota.h"
#include "xfs_error.h"
-#include "xfs_itable.h"
-#include "xfs_attr.h"
+#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_trans_space.h"
#include "xfs_trans_priv.h"
#include "xfs_qm.h"
#include "xfs_cksum.h"
#include "xfs_trace.h"
+#include "xfs_log.h"
+#include "xfs_bmap_btree.h"
/*
* Lock order:
@@ -292,118 +292,6 @@ xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
}
-STATIC bool
-xfs_dquot_buf_verify_crc(
- struct xfs_mount *mp,
- struct xfs_buf *bp)
-{
- struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr;
- int ndquots;
- int i;
-
- if (!xfs_sb_version_hascrc(&mp->m_sb))
- return true;
-
- /*
- * if we are in log recovery, the quota subsystem has not been
- * initialised so we have no quotainfo structure. In that case, we need
- * to manually calculate the number of dquots in the buffer.
- */
- if (mp->m_quotainfo)
- ndquots = mp->m_quotainfo->qi_dqperchunk;
- else
- ndquots = xfs_qm_calc_dquots_per_chunk(mp,
- XFS_BB_TO_FSB(mp, bp->b_length));
-
- for (i = 0; i < ndquots; i++, d++) {
- if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
- XFS_DQUOT_CRC_OFF))
- return false;
- if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid))
- return false;
- }
- return true;
-}
-
-STATIC bool
-xfs_dquot_buf_verify(
- struct xfs_mount *mp,
- struct xfs_buf *bp)
-{
- struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr;
- xfs_dqid_t id = 0;
- int ndquots;
- int i;
-
- /*
- * if we are in log recovery, the quota subsystem has not been
- * initialised so we have no quotainfo structure. In that case, we need
- * to manually calculate the number of dquots in the buffer.
- */
- if (mp->m_quotainfo)
- ndquots = mp->m_quotainfo->qi_dqperchunk;
- else
- ndquots = xfs_qm_calc_dquots_per_chunk(mp, bp->b_length);
-
- /*
- * On the first read of the buffer, verify that each dquot is valid.
- * We don't know what the id of the dquot is supposed to be, just that
- * they should be increasing monotonically within the buffer. If the
- * first id is corrupt, then it will fail on the second dquot in the
- * buffer so corruptions could point to the wrong dquot in this case.
- */
- for (i = 0; i < ndquots; i++) {
- struct xfs_disk_dquot *ddq;
- int error;
-
- ddq = &d[i].dd_diskdq;
-
- if (i == 0)
- id = be32_to_cpu(ddq->d_id);
-
- error = xfs_qm_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN,
- "xfs_dquot_buf_verify");
- if (error)
- return false;
- }
- return true;
-}
-
-static void
-xfs_dquot_buf_read_verify(
- struct xfs_buf *bp)
-{
- struct xfs_mount *mp = bp->b_target->bt_mount;
-
- if (!xfs_dquot_buf_verify_crc(mp, bp) || !xfs_dquot_buf_verify(mp, bp)) {
- XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
- xfs_buf_ioerror(bp, EFSCORRUPTED);
- }
-}
-
-/*
- * we don't calculate the CRC here as that is done when the dquot is flushed to
- * the buffer after the update is done. This ensures that the dquot in the
- * buffer always has an up-to-date CRC value.
- */
-void
-xfs_dquot_buf_write_verify(
- struct xfs_buf *bp)
-{
- struct xfs_mount *mp = bp->b_target->bt_mount;
-
- if (!xfs_dquot_buf_verify(mp, bp)) {
- XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
- xfs_buf_ioerror(bp, EFSCORRUPTED);
- return;
- }
-}
-
-const struct xfs_buf_ops xfs_dquot_buf_ops = {
- .verify_read = xfs_dquot_buf_read_verify,
- .verify_write = xfs_dquot_buf_write_verify,
-};
-
/*
* Allocate a block and fill it with dquots.
* This is called when the bmapi finds a hole.
@@ -514,6 +402,7 @@ xfs_qm_dqalloc(
return (error);
}
+
STATIC int
xfs_qm_dqrepair(
struct xfs_mount *mp,
@@ -547,7 +436,7 @@ xfs_qm_dqrepair(
/* Do the actual repair of dquots in this buffer */
for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
ddq = &d[i].dd_diskdq;
- error = xfs_qm_dqcheck(mp, ddq, firstid + i,
+ error = xfs_dqcheck(mp, ddq, firstid + i,
dqp->dq_flags & XFS_DQ_ALLTYPES,
XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair");
if (error) {
@@ -1133,7 +1022,7 @@ xfs_qm_dqflush(
/*
* A simple sanity check in case we got a corrupted dquot..
*/
- error = xfs_qm_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
+ error = xfs_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
XFS_QMOPT_DOWARN, "dqflush (incore copy)");
if (error) {
xfs_buf_relse(bp);
diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h
index 55abbca2883d..d22ed0053c32 100644
--- a/fs/xfs/xfs_dquot.h
+++ b/fs/xfs/xfs_dquot.h
@@ -172,6 +172,4 @@ static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp)
return dqp;
}
-extern const struct xfs_buf_ops xfs_dquot_buf_ops;
-
#endif /* __XFS_DQUOT_H__ */
diff --git a/fs/xfs/xfs_dquot_buf.c b/fs/xfs/xfs_dquot_buf.c
new file mode 100644
index 000000000000..d401457d2f25
--- /dev/null
+++ b/fs/xfs/xfs_dquot_buf.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
+ * Copyright (c) 2013 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_quota.h"
+#include "xfs_trans.h"
+#include "xfs_qm.h"
+#include "xfs_error.h"
+#include "xfs_cksum.h"
+#include "xfs_trace.h"
+
+int
+xfs_calc_dquots_per_chunk(
+ struct xfs_mount *mp,
+ unsigned int nbblks) /* basic block units */
+{
+ unsigned int ndquots;
+
+ ASSERT(nbblks > 0);
+ ndquots = BBTOB(nbblks);
+ do_div(ndquots, sizeof(xfs_dqblk_t));
+
+ return ndquots;
+}
+
+/*
+ * Do some primitive error checking on ondisk dquot data structures.
+ */
+int
+xfs_dqcheck(
+ struct xfs_mount *mp,
+ xfs_disk_dquot_t *ddq,
+ xfs_dqid_t id,
+ uint type, /* used only when IO_dorepair is true */
+ uint flags,
+ char *str)
+{
+ xfs_dqblk_t *d = (xfs_dqblk_t *)ddq;
+ int errs = 0;
+
+ /*
+ * We can encounter an uninitialized dquot buffer for 2 reasons:
+ * 1. If we crash while deleting the quotainode(s), and those blks got
+ * used for user data. This is because we take the path of regular
+ * file deletion; however, the size field of quotainodes is never
+ * updated, so all the tricks that we play in itruncate_finish
+ * don't quite matter.
+ *
+ * 2. We don't play the quota buffers when there's a quotaoff logitem.
+ * But the allocation will be replayed so we'll end up with an
+ * uninitialized quota block.
+ *
+ * This is all fine; things are still consistent, and we haven't lost
+ * any quota information. Just don't complain about bad dquot blks.
+ */
+ if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
+ if (flags & XFS_QMOPT_DOWARN)
+ xfs_alert(mp,
+ "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
+ str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
+ errs++;
+ }
+ if (ddq->d_version != XFS_DQUOT_VERSION) {
+ if (flags & XFS_QMOPT_DOWARN)
+ xfs_alert(mp,
+ "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
+ str, id, ddq->d_version, XFS_DQUOT_VERSION);
+ errs++;
+ }
+
+ if (ddq->d_flags != XFS_DQ_USER &&
+ ddq->d_flags != XFS_DQ_PROJ &&
+ ddq->d_flags != XFS_DQ_GROUP) {
+ if (flags & XFS_QMOPT_DOWARN)
+ xfs_alert(mp,
+ "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
+ str, id, ddq->d_flags);
+ errs++;
+ }
+
+ if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
+ if (flags & XFS_QMOPT_DOWARN)
+ xfs_alert(mp,
+ "%s : ondisk-dquot 0x%p, ID mismatch: "
+ "0x%x expected, found id 0x%x",
+ str, ddq, id, be32_to_cpu(ddq->d_id));
+ errs++;
+ }
+
+ if (!errs && ddq->d_id) {
+ if (ddq->d_blk_softlimit &&
+ be64_to_cpu(ddq->d_bcount) >
+ be64_to_cpu(ddq->d_blk_softlimit)) {
+ if (!ddq->d_btimer) {
+ if (flags & XFS_QMOPT_DOWARN)
+ xfs_alert(mp,
+ "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
+ str, (int)be32_to_cpu(ddq->d_id), ddq);
+ errs++;
+ }
+ }
+ if (ddq->d_ino_softlimit &&
+ be64_to_cpu(ddq->d_icount) >
+ be64_to_cpu(ddq->d_ino_softlimit)) {
+ if (!ddq->d_itimer) {
+ if (flags & XFS_QMOPT_DOWARN)
+ xfs_alert(mp,
+ "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
+ str, (int)be32_to_cpu(ddq->d_id), ddq);
+ errs++;
+ }
+ }
+ if (ddq->d_rtb_softlimit &&
+ be64_to_cpu(ddq->d_rtbcount) >
+ be64_to_cpu(ddq->d_rtb_softlimit)) {
+ if (!ddq->d_rtbtimer) {
+ if (flags & XFS_QMOPT_DOWARN)
+ xfs_alert(mp,
+ "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
+ str, (int)be32_to_cpu(ddq->d_id), ddq);
+ errs++;
+ }
+ }
+ }
+
+ if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
+ return errs;
+
+ if (flags & XFS_QMOPT_DOWARN)
+ xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
+
+ /*
+ * Typically, a repair is only requested by quotacheck.
+ */
+ ASSERT(id != -1);
+ ASSERT(flags & XFS_QMOPT_DQREPAIR);
+ memset(d, 0, sizeof(xfs_dqblk_t));
+
+ d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
+ d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
+ d->dd_diskdq.d_flags = type;
+ d->dd_diskdq.d_id = cpu_to_be32(id);
+
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
+ xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
+ XFS_DQUOT_CRC_OFF);
+ }
+
+ return errs;
+}
+
+STATIC bool
+xfs_dquot_buf_verify_crc(
+ struct xfs_mount *mp,
+ struct xfs_buf *bp)
+{
+ struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr;
+ int ndquots;
+ int i;
+
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
+ return true;
+
+ /*
+ * if we are in log recovery, the quota subsystem has not been
+ * initialised so we have no quotainfo structure. In that case, we need
+ * to manually calculate the number of dquots in the buffer.
+ */
+ if (mp->m_quotainfo)
+ ndquots = mp->m_quotainfo->qi_dqperchunk;
+ else
+ ndquots = xfs_calc_dquots_per_chunk(mp,
+ XFS_BB_TO_FSB(mp, bp->b_length));
+
+ for (i = 0; i < ndquots; i++, d++) {
+ if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
+ XFS_DQUOT_CRC_OFF))
+ return false;
+ if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid))
+ return false;
+ }
+ return true;
+}
+
+STATIC bool
+xfs_dquot_buf_verify(
+ struct xfs_mount *mp,
+ struct xfs_buf *bp)
+{
+ struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr;
+ xfs_dqid_t id = 0;
+ int ndquots;
+ int i;
+
+ /*
+ * if we are in log recovery, the quota subsystem has not been
+ * initialised so we have no quotainfo structure. In that case, we need
+ * to manually calculate the number of dquots in the buffer.
+ */
+ if (mp->m_quotainfo)
+ ndquots = mp->m_quotainfo->qi_dqperchunk;
+ else
+ ndquots = xfs_calc_dquots_per_chunk(mp, bp->b_length);
+
+ /*
+ * On the first read of the buffer, verify that each dquot is valid.
+ * We don't know what the id of the dquot is supposed to be, just that
+ * they should be increasing monotonically within the buffer. If the
+ * first id is corrupt, then it will fail on the second dquot in the
+ * buffer so corruptions could point to the wrong dquot in this case.
+ */
+ for (i = 0; i < ndquots; i++) {
+ struct xfs_disk_dquot *ddq;
+ int error;
+
+ ddq = &d[i].dd_diskdq;
+
+ if (i == 0)
+ id = be32_to_cpu(ddq->d_id);
+
+ error = xfs_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN,
+ "xfs_dquot_buf_verify");
+ if (error)
+ return false;
+ }
+ return true;
+}
+
+static void
+xfs_dquot_buf_read_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+
+ if (!xfs_dquot_buf_verify_crc(mp, bp) || !xfs_dquot_buf_verify(mp, bp)) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
+ xfs_buf_ioerror(bp, EFSCORRUPTED);
+ }
+}
+
+/*
+ * we don't calculate the CRC here as that is done when the dquot is flushed to
+ * the buffer after the update is done. This ensures that the dquot in the
+ * buffer always has an up-to-date CRC value.
+ */
+static void
+xfs_dquot_buf_write_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+
+ if (!xfs_dquot_buf_verify(mp, bp)) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
+ xfs_buf_ioerror(bp, EFSCORRUPTED);
+ return;
+ }
+}
+
+const struct xfs_buf_ops xfs_dquot_buf_ops = {
+ .verify_read = xfs_dquot_buf_read_verify,
+ .verify_write = xfs_dquot_buf_write_verify,
+};
+
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index e838d84b4e85..92e5f62eefc6 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -18,23 +18,19 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
-#include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
+#include "xfs_quota.h"
#include "xfs_error.h"
-#include "xfs_itable.h"
-#include "xfs_attr.h"
+#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_trans_priv.h"
#include "xfs_qm.h"
+#include "xfs_log.h"
static inline struct xfs_dq_logitem *DQUOT_ITEM(struct xfs_log_item *lip)
{
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 1123d93ff795..9995b807d627 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -16,16 +16,13 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
+#include "xfs_format.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
#include "xfs_error.h"
#ifdef DEBUG
@@ -159,7 +156,7 @@ xfs_error_report(
{
if (level <= xfs_error_level) {
xfs_alert_tag(mp, XFS_PTAG_ERROR_REPORT,
- "Internal error %s at line %d of file %s. Caller 0x%p\n",
+ "Internal error %s at line %d of file %s. Caller 0x%p",
tag, linenum, filename, ra);
xfs_stack_trace();
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index 066df425c14f..1399e187d425 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -16,21 +16,21 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
+#include "xfs_da_format.h"
#include "xfs_dir2.h"
#include "xfs_export.h"
-#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
+#include "xfs_log.h"
/*
* Note that we only accept fileids which are long enough rather than allow
diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c
index e43708e2f080..fd22f69049d4 100644
--- a/fs/xfs/xfs_extent_busy.c
+++ b/fs/xfs/xfs_extent_busy.c
@@ -19,17 +19,18 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_shared.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
#include "xfs_alloc.h"
-#include "xfs_inode.h"
#include "xfs_extent_busy.h"
#include "xfs_trace.h"
+#include "xfs_trans.h"
+#include "xfs_log.h"
void
xfs_extent_busy_insert(
diff --git a/fs/xfs/xfs_extent_busy.h b/fs/xfs/xfs_extent_busy.h
index 985412d65ba5..bfff284d2dcc 100644
--- a/fs/xfs/xfs_extent_busy.h
+++ b/fs/xfs/xfs_extent_busy.h
@@ -20,6 +20,10 @@
#ifndef __XFS_EXTENT_BUSY_H__
#define __XFS_EXTENT_BUSY_H__
+struct xfs_mount;
+struct xfs_trans;
+struct xfs_alloc_arg;
+
/*
* Busy block/extent entry. Indexed by a rbtree in perag to mark blocks that
* have been freed but whose transactions aren't committed to disk yet.
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index dc53e8febbbe..3680d04f973f 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -17,14 +17,14 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_buf_item.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
+#include "xfs_trans.h"
#include "xfs_trans_priv.h"
+#include "xfs_buf_item.h"
#include "xfs_extfree_item.h"
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 4c749ab543d0..e6035bd58294 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -17,25 +17,27 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_log.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_trans.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
#include "xfs_inode.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
#include "xfs_error.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
#include "xfs_dir2.h"
#include "xfs_dir2_priv.h"
#include "xfs_ioctl.h"
#include "xfs_trace.h"
+#include "xfs_log.h"
+#include "xfs_dinode.h"
#include <linux/aio.h>
#include <linux/dcache.h>
@@ -227,10 +229,9 @@ xfs_file_fsync(
}
STATIC ssize_t
-xfs_file_aio_read(
+xfs_file_read_iter(
struct kiocb *iocb,
- const struct iovec *iovp,
- unsigned long nr_segs,
+ struct iov_iter *iter,
loff_t pos)
{
struct file *file = iocb->ki_filp;
@@ -251,9 +252,7 @@ xfs_file_aio_read(
if (file->f_mode & FMODE_NOCMTIME)
ioflags |= IO_INVIS;
- ret = generic_segment_checks(iovp, &nr_segs, &size, VERIFY_WRITE);
- if (ret < 0)
- return ret;
+ size = iov_iter_count(iter);
if (unlikely(ioflags & IO_ISDIRECT)) {
xfs_buftarg_t *target =
@@ -306,7 +305,7 @@ xfs_file_aio_read(
trace_xfs_file_read(ip, size, pos, ioflags);
- ret = generic_file_aio_read(iocb, iovp, nr_segs, pos);
+ ret = generic_file_read_iter(iocb, iter, pos);
if (ret > 0)
XFS_STATS_ADD(xs_read_bytes, ret);
@@ -622,10 +621,9 @@ restart:
STATIC ssize_t
xfs_file_dio_aio_write(
struct kiocb *iocb,
- const struct iovec *iovp,
- unsigned long nr_segs,
+ struct iov_iter *iter,
loff_t pos,
- size_t ocount)
+ size_t count)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
@@ -633,7 +631,6 @@ xfs_file_dio_aio_write(
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
ssize_t ret = 0;
- size_t count = ocount;
int unaligned_io = 0;
int iolock;
struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
@@ -693,8 +690,8 @@ xfs_file_dio_aio_write(
}
trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
- ret = generic_file_direct_write(iocb, iovp,
- &nr_segs, pos, &iocb->ki_pos, count, ocount);
+ ret = generic_file_direct_write_iter(iocb, iter,
+ pos, &iocb->ki_pos, count);
out:
xfs_rw_iunlock(ip, iolock);
@@ -707,10 +704,9 @@ out:
STATIC ssize_t
xfs_file_buffered_aio_write(
struct kiocb *iocb,
- const struct iovec *iovp,
- unsigned long nr_segs,
+ struct iov_iter *iter,
loff_t pos,
- size_t ocount)
+ size_t count)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
@@ -719,7 +715,6 @@ xfs_file_buffered_aio_write(
ssize_t ret;
int enospc = 0;
int iolock = XFS_IOLOCK_EXCL;
- size_t count = ocount;
xfs_rw_ilock(ip, iolock);
@@ -732,7 +727,7 @@ xfs_file_buffered_aio_write(
write_retry:
trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
- ret = generic_file_buffered_write(iocb, iovp, nr_segs,
+ ret = generic_file_buffered_write_iter(iocb, iter,
pos, &iocb->ki_pos, count, 0);
/*
@@ -753,10 +748,9 @@ out:
}
STATIC ssize_t
-xfs_file_aio_write(
+xfs_file_write_iter(
struct kiocb *iocb,
- const struct iovec *iovp,
- unsigned long nr_segs,
+ struct iov_iter *iter,
loff_t pos)
{
struct file *file = iocb->ki_filp;
@@ -764,17 +758,15 @@ xfs_file_aio_write(
struct inode *inode = mapping->host;
struct xfs_inode *ip = XFS_I(inode);
ssize_t ret;
- size_t ocount = 0;
+ size_t count = 0;
XFS_STATS_INC(xs_write_calls);
BUG_ON(iocb->ki_pos != pos);
- ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
- if (ret)
- return ret;
+ count = iov_iter_count(iter);
- if (ocount == 0)
+ if (count == 0)
return 0;
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
@@ -783,10 +775,9 @@ xfs_file_aio_write(
}
if (unlikely(file->f_flags & O_DIRECT))
- ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount);
+ ret = xfs_file_dio_aio_write(iocb, iter, pos, count);
else
- ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos,
- ocount);
+ ret = xfs_file_buffered_aio_write(iocb, iter, pos, count);
if (ret > 0) {
ssize_t err;
@@ -805,44 +796,64 @@ out:
STATIC long
xfs_file_fallocate(
- struct file *file,
- int mode,
- loff_t offset,
- loff_t len)
+ struct file *file,
+ int mode,
+ loff_t offset,
+ loff_t len)
{
- struct inode *inode = file_inode(file);
- long error;
- loff_t new_size = 0;
- xfs_flock64_t bf;
- xfs_inode_t *ip = XFS_I(inode);
- int cmd = XFS_IOC_RESVSP;
- int attr_flags = XFS_ATTR_NOLOCK;
+ struct inode *inode = file_inode(file);
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_trans *tp;
+ long error;
+ loff_t new_size = 0;
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
return -EOPNOTSUPP;
- bf.l_whence = 0;
- bf.l_start = offset;
- bf.l_len = len;
-
xfs_ilock(ip, XFS_IOLOCK_EXCL);
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ error = xfs_free_file_space(ip, offset, len);
+ if (error)
+ goto out_unlock;
+ } else {
+ if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+ offset + len > i_size_read(inode)) {
+ new_size = offset + len;
+ error = -inode_newsize_ok(inode, new_size);
+ if (error)
+ goto out_unlock;
+ }
- if (mode & FALLOC_FL_PUNCH_HOLE)
- cmd = XFS_IOC_UNRESVSP;
-
- /* check the new inode size is valid before allocating */
- if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- offset + len > i_size_read(inode)) {
- new_size = offset + len;
- error = inode_newsize_ok(inode, new_size);
+ error = xfs_alloc_file_space(ip, offset, len,
+ XFS_BMAPI_PREALLOC);
if (error)
goto out_unlock;
}
- if (file->f_flags & O_DSYNC)
- attr_flags |= XFS_ATTR_SYNC;
+ tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
+ error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ goto out_unlock;
+ }
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ ip->i_d.di_mode &= ~S_ISUID;
+ if (ip->i_d.di_mode & S_IXGRP)
+ ip->i_d.di_mode &= ~S_ISGID;
+
+ if (!(mode & FALLOC_FL_PUNCH_HOLE))
+ ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
- error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags);
+ xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+ if (file->f_flags & O_DSYNC)
+ xfs_trans_set_sync(tp);
+ error = xfs_trans_commit(tp, 0);
if (error)
goto out_unlock;
@@ -852,12 +863,12 @@ xfs_file_fallocate(
iattr.ia_valid = ATTR_SIZE;
iattr.ia_size = new_size;
- error = -xfs_setattr_size(ip, &iattr, XFS_ATTR_NOLOCK);
+ error = xfs_setattr_size(ip, &iattr);
}
out_unlock:
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- return error;
+ return -error;
}
@@ -1411,8 +1422,8 @@ const struct file_operations xfs_file_operations = {
.llseek = xfs_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
- .aio_read = xfs_file_aio_read,
- .aio_write = xfs_file_aio_write,
+ .read_iter = xfs_file_read_iter,
+ .write_iter = xfs_file_write_iter,
.splice_read = xfs_file_splice_read,
.splice_write = xfs_file_splice_write,
.unlocked_ioctl = xfs_file_ioctl,
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index ce78e654d37b..12b6e7701985 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -16,19 +16,19 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
-#include "xfs_log.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_inum.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_ag.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
+#include "xfs_inum.h"
+#include "xfs_inode.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
#include "xfs_alloc.h"
#include "xfs_mru_cache.h"
+#include "xfs_dinode.h"
#include "xfs_filestream.h"
#include "xfs_trace.h"
diff --git a/fs/xfs/xfs_format.h b/fs/xfs/xfs_format.h
index 35c08ff54ca0..b6ab5a3cfa12 100644
--- a/fs/xfs/xfs_format.h
+++ b/fs/xfs/xfs_format.h
@@ -156,14 +156,259 @@ struct xfs_dsymlink_hdr {
((bufsize) - (xfs_sb_version_hascrc(&(mp)->m_sb) ? \
sizeof(struct xfs_dsymlink_hdr) : 0))
-int xfs_symlink_blocks(struct xfs_mount *mp, int pathlen);
-int xfs_symlink_hdr_set(struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset,
- uint32_t size, struct xfs_buf *bp);
-bool xfs_symlink_hdr_ok(struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset,
- uint32_t size, struct xfs_buf *bp);
-void xfs_symlink_local_to_remote(struct xfs_trans *tp, struct xfs_buf *bp,
- struct xfs_inode *ip, struct xfs_ifork *ifp);
-
-extern const struct xfs_buf_ops xfs_symlink_buf_ops;
+
+/*
+ * Allocation Btree format definitions
+ *
+ * There are two on-disk btrees, one sorted by blockno and one sorted
+ * by blockcount and blockno. All blocks look the same to make the code
+ * simpler; if we have time later, we'll make the optimizations.
+ */
+#define XFS_ABTB_MAGIC 0x41425442 /* 'ABTB' for bno tree */
+#define XFS_ABTB_CRC_MAGIC 0x41423342 /* 'AB3B' */
+#define XFS_ABTC_MAGIC 0x41425443 /* 'ABTC' for cnt tree */
+#define XFS_ABTC_CRC_MAGIC 0x41423343 /* 'AB3C' */
+
+/*
+ * Data record/key structure
+ */
+typedef struct xfs_alloc_rec {
+ __be32 ar_startblock; /* starting block number */
+ __be32 ar_blockcount; /* count of free blocks */
+} xfs_alloc_rec_t, xfs_alloc_key_t;
+
+typedef struct xfs_alloc_rec_incore {
+ xfs_agblock_t ar_startblock; /* starting block number */
+ xfs_extlen_t ar_blockcount; /* count of free blocks */
+} xfs_alloc_rec_incore_t;
+
+/* btree pointer type */
+typedef __be32 xfs_alloc_ptr_t;
+
+/*
+ * Block numbers in the AG:
+ * SB is sector 0, AGF is sector 1, AGI is sector 2, AGFL is sector 3.
+ */
+#define XFS_BNO_BLOCK(mp) ((xfs_agblock_t)(XFS_AGFL_BLOCK(mp) + 1))
+#define XFS_CNT_BLOCK(mp) ((xfs_agblock_t)(XFS_BNO_BLOCK(mp) + 1))
+
+
+/*
+ * Inode Allocation Btree format definitions
+ *
+ * There is a btree for the inode map per allocation group.
+ */
+#define XFS_IBT_MAGIC 0x49414254 /* 'IABT' */
+#define XFS_IBT_CRC_MAGIC 0x49414233 /* 'IAB3' */
+
+typedef __uint64_t xfs_inofree_t;
+#define XFS_INODES_PER_CHUNK (NBBY * sizeof(xfs_inofree_t))
+#define XFS_INODES_PER_CHUNK_LOG (XFS_NBBYLOG + 3)
+#define XFS_INOBT_ALL_FREE ((xfs_inofree_t)-1)
+#define XFS_INOBT_MASK(i) ((xfs_inofree_t)1 << (i))
+
+static inline xfs_inofree_t xfs_inobt_maskn(int i, int n)
+{
+ return ((n >= XFS_INODES_PER_CHUNK ? 0 : XFS_INOBT_MASK(n)) - 1) << i;
+}
+
+/*
+ * Data record structure
+ */
+typedef struct xfs_inobt_rec {
+ __be32 ir_startino; /* starting inode number */
+ __be32 ir_freecount; /* count of free inodes (set bits) */
+ __be64 ir_free; /* free inode mask */
+} xfs_inobt_rec_t;
+
+typedef struct xfs_inobt_rec_incore {
+ xfs_agino_t ir_startino; /* starting inode number */
+ __int32_t ir_freecount; /* count of free inodes (set bits) */
+ xfs_inofree_t ir_free; /* free inode mask */
+} xfs_inobt_rec_incore_t;
+
+
+/*
+ * Key structure
+ */
+typedef struct xfs_inobt_key {
+ __be32 ir_startino; /* starting inode number */
+} xfs_inobt_key_t;
+
+/* btree pointer type */
+typedef __be32 xfs_inobt_ptr_t;
+
+/*
+ * block numbers in the AG.
+ */
+#define XFS_IBT_BLOCK(mp) ((xfs_agblock_t)(XFS_CNT_BLOCK(mp) + 1))
+#define XFS_PREALLOC_BLOCKS(mp) ((xfs_agblock_t)(XFS_IBT_BLOCK(mp) + 1))
+
+
+
+/*
+ * BMAP Btree format definitions
+ *
+ * This includes both the root block definition that sits inside an inode fork
+ * and the record/pointer formats for the leaf/node in the blocks.
+ */
+#define XFS_BMAP_MAGIC 0x424d4150 /* 'BMAP' */
+#define XFS_BMAP_CRC_MAGIC 0x424d4133 /* 'BMA3' */
+
+/*
+ * Bmap root header, on-disk form only.
+ */
+typedef struct xfs_bmdr_block {
+ __be16 bb_level; /* 0 is a leaf */
+ __be16 bb_numrecs; /* current # of data records */
+} xfs_bmdr_block_t;
+
+/*
+ * Bmap btree record and extent descriptor.
+ * l0:63 is an extent flag (value 1 indicates non-normal).
+ * l0:9-62 are startoff.
+ * l0:0-8 and l1:21-63 are startblock.
+ * l1:0-20 are blockcount.
+ */
+#define BMBT_EXNTFLAG_BITLEN 1
+#define BMBT_STARTOFF_BITLEN 54
+#define BMBT_STARTBLOCK_BITLEN 52
+#define BMBT_BLOCKCOUNT_BITLEN 21
+
+typedef struct xfs_bmbt_rec {
+ __be64 l0, l1;
+} xfs_bmbt_rec_t;
+
+typedef __uint64_t xfs_bmbt_rec_base_t; /* use this for casts */
+typedef xfs_bmbt_rec_t xfs_bmdr_rec_t;
+
+typedef struct xfs_bmbt_rec_host {
+ __uint64_t l0, l1;
+} xfs_bmbt_rec_host_t;
+
+/*
+ * Values and macros for delayed-allocation startblock fields.
+ */
+#define STARTBLOCKVALBITS 17
+#define STARTBLOCKMASKBITS (15 + XFS_BIG_BLKNOS * 20)
+#define DSTARTBLOCKMASKBITS (15 + 20)
+#define STARTBLOCKMASK \
+ (((((xfs_fsblock_t)1) << STARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS)
+#define DSTARTBLOCKMASK \
+ (((((xfs_dfsbno_t)1) << DSTARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS)
+
+static inline int isnullstartblock(xfs_fsblock_t x)
+{
+ return ((x) & STARTBLOCKMASK) == STARTBLOCKMASK;
+}
+
+static inline int isnulldstartblock(xfs_dfsbno_t x)
+{
+ return ((x) & DSTARTBLOCKMASK) == DSTARTBLOCKMASK;
+}
+
+static inline xfs_fsblock_t nullstartblock(int k)
+{
+ ASSERT(k < (1 << STARTBLOCKVALBITS));
+ return STARTBLOCKMASK | (k);
+}
+
+static inline xfs_filblks_t startblockval(xfs_fsblock_t x)
+{
+ return (xfs_filblks_t)((x) & ~STARTBLOCKMASK);
+}
+
+/*
+ * Possible extent formats.
+ */
+typedef enum {
+ XFS_EXTFMT_NOSTATE = 0,
+ XFS_EXTFMT_HASSTATE
+} xfs_exntfmt_t;
+
+/*
+ * Possible extent states.
+ */
+typedef enum {
+ XFS_EXT_NORM, XFS_EXT_UNWRITTEN,
+ XFS_EXT_DMAPI_OFFLINE, XFS_EXT_INVALID
+} xfs_exntst_t;
+
+/*
+ * Incore version of above.
+ */
+typedef struct xfs_bmbt_irec
+{
+ xfs_fileoff_t br_startoff; /* starting file offset */
+ xfs_fsblock_t br_startblock; /* starting block number */
+ xfs_filblks_t br_blockcount; /* number of blocks */
+ xfs_exntst_t br_state; /* extent state */
+} xfs_bmbt_irec_t;
+
+/*
+ * Key structure for non-leaf levels of the tree.
+ */
+typedef struct xfs_bmbt_key {
+ __be64 br_startoff; /* starting file offset */
+} xfs_bmbt_key_t, xfs_bmdr_key_t;
+
+/* btree pointer type */
+typedef __be64 xfs_bmbt_ptr_t, xfs_bmdr_ptr_t;
+
+
+/*
+ * Generic Btree block format definitions
+ *
+ * This is a combination of the actual format used on disk for short and long
+ * format btrees. The first three fields are shared by both format, but the
+ * pointers are different and should be used with care.
+ *
+ * To get the size of the actual short or long form headers please use the size
+ * macros below. Never use sizeof(xfs_btree_block).
+ *
+ * The blkno, crc, lsn, owner and uuid fields are only available in filesystems
+ * with the crc feature bit, and all accesses to them must be conditional on
+ * that flag.
+ */
+struct xfs_btree_block {
+ __be32 bb_magic; /* magic number for block type */
+ __be16 bb_level; /* 0 is a leaf */
+ __be16 bb_numrecs; /* current # of data records */
+ union {
+ struct {
+ __be32 bb_leftsib;
+ __be32 bb_rightsib;
+
+ __be64 bb_blkno;
+ __be64 bb_lsn;
+ uuid_t bb_uuid;
+ __be32 bb_owner;
+ __le32 bb_crc;
+ } s; /* short form pointers */
+ struct {
+ __be64 bb_leftsib;
+ __be64 bb_rightsib;
+
+ __be64 bb_blkno;
+ __be64 bb_lsn;
+ uuid_t bb_uuid;
+ __be64 bb_owner;
+ __le32 bb_crc;
+ __be32 bb_pad; /* padding for alignment */
+ } l; /* long form pointers */
+ } bb_u; /* rest */
+};
+
+#define XFS_BTREE_SBLOCK_LEN 16 /* size of a short form block */
+#define XFS_BTREE_LBLOCK_LEN 24 /* size of a long form block */
+
+/* sizes of CRC enabled btree blocks */
+#define XFS_BTREE_SBLOCK_CRC_LEN (XFS_BTREE_SBLOCK_LEN + 40)
+#define XFS_BTREE_LBLOCK_CRC_LEN (XFS_BTREE_LBLOCK_LEN + 48)
+
+#define XFS_BTREE_SBLOCK_CRC_OFF \
+ offsetof(struct xfs_btree_block, bb_u.s.bb_crc)
+#define XFS_BTREE_LBLOCK_CRC_OFF \
+ offsetof(struct xfs_btree_block, bb_u.l.bb_crc)
#endif /* __XFS_FORMAT_H__ */
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index 18272c766a50..c5fc116dfaa3 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -233,11 +233,11 @@ typedef struct xfs_fsop_resblks {
#define XFS_FSOP_GEOM_FLAGS_LOGV2 0x0100 /* log format version 2 */
#define XFS_FSOP_GEOM_FLAGS_SECTOR 0x0200 /* sector sizes >1BB */
#define XFS_FSOP_GEOM_FLAGS_ATTR2 0x0400 /* inline attributes rework */
-#define XFS_FSOP_GEOM_FLAGS_PROJID32 0x0800 /* 32-bit project IDs */
+#define XFS_FSOP_GEOM_FLAGS_PROJID32 0x0800 /* 32-bit project IDs */
#define XFS_FSOP_GEOM_FLAGS_DIRV2CI 0x1000 /* ASCII only CI names */
#define XFS_FSOP_GEOM_FLAGS_LAZYSB 0x4000 /* lazy superblock counters */
#define XFS_FSOP_GEOM_FLAGS_V5SB 0x8000 /* version 5 superblock */
-
+#define XFS_FSOP_GEOM_FLAGS_FTYPE 0x10000 /* inode directory types */
/*
* Minimum and maximum sizes need for growth checks.
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index e64ee5288b86..a6e54b3319bd 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -17,28 +17,29 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
-#include "xfs_btree.h"
#include "xfs_error.h"
+#include "xfs_btree.h"
+#include "xfs_alloc_btree.h"
#include "xfs_alloc.h"
#include "xfs_ialloc.h"
#include "xfs_fsops.h"
#include "xfs_itable.h"
#include "xfs_trans_space.h"
#include "xfs_rtalloc.h"
-#include "xfs_filestream.h"
#include "xfs_trace.h"
+#include "xfs_log.h"
+#include "xfs_dinode.h"
+#include "xfs_filestream.h"
/*
* File system operations
@@ -101,7 +102,9 @@ xfs_fs_geometry(
(xfs_sb_version_hasprojid32bit(&mp->m_sb) ?
XFS_FSOP_GEOM_FLAGS_PROJID32 : 0) |
(xfs_sb_version_hascrc(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_V5SB : 0);
+ XFS_FSOP_GEOM_FLAGS_V5SB : 0) |
+ (xfs_sb_version_hasftype(&mp->m_sb) ?
+ XFS_FSOP_GEOM_FLAGS_FTYPE : 0);
geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ?
mp->m_sb.sb_logsectsize : BBSIZE;
geo->rtsectsize = mp->m_sb.sb_blocksize;
@@ -153,7 +156,7 @@ xfs_growfs_data_private(
xfs_buf_t *bp;
int bucket;
int dpct;
- int error;
+ int error, saved_error = 0;
xfs_agnumber_t nagcount;
xfs_agnumber_t nagimax = 0;
xfs_rfsblock_t nb, nb_mod;
@@ -496,29 +499,33 @@ xfs_growfs_data_private(
error = ENOMEM;
}
+ /*
+ * If we get an error reading or writing alternate superblocks,
+ * continue. xfs_repair chooses the "best" superblock based
+ * on most matches; if we break early, we'll leave more
+ * superblocks un-updated than updated, and xfs_repair may
+ * pick them over the properly-updated primary.
+ */
if (error) {
xfs_warn(mp,
"error %d reading secondary superblock for ag %d",
error, agno);
- break;
+ saved_error = error;
+ continue;
}
xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, XFS_SB_ALL_BITS);
- /*
- * If we get an error writing out the alternate superblocks,
- * just issue a warning and continue. The real work is
- * already done and committed.
- */
error = xfs_bwrite(bp);
xfs_buf_relse(bp);
if (error) {
xfs_warn(mp,
"write error %d updating secondary superblock for ag %d",
error, agno);
- break; /* no point in continuing */
+ saved_error = error;
+ continue;
}
}
- return error;
+ return saved_error ? saved_error : error;
error0:
xfs_trans_cancel(tp, XFS_TRANS_ABORT);
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index ccf2fb143962..14d732f61a41 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -17,29 +17,29 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
#include "xfs_inum.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
+#include "xfs_ialloc_btree.h"
#include "xfs_alloc.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_bmap.h"
#include "xfs_cksum.h"
+#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_icreate_item.h"
#include "xfs_icache.h"
+#include "xfs_dinode.h"
/*
diff --git a/fs/xfs/xfs_ialloc.h b/fs/xfs/xfs_ialloc.h
index 68c07320f096..a8f76a5ff418 100644
--- a/fs/xfs/xfs_ialloc.h
+++ b/fs/xfs/xfs_ialloc.h
@@ -23,6 +23,7 @@ struct xfs_dinode;
struct xfs_imap;
struct xfs_mount;
struct xfs_trans;
+struct xfs_btree_cur;
/*
* Allocation parameters for inode allocation.
@@ -42,7 +43,7 @@ struct xfs_trans;
static inline struct xfs_dinode *
xfs_make_iptr(struct xfs_mount *mp, struct xfs_buf *b, int o)
{
- return (xfs_dinode_t *)
+ return (struct xfs_dinode *)
(xfs_buf_offset(b, o << (mp)->m_sb.sb_inodelog));
}
@@ -158,6 +159,4 @@ int xfs_ialloc_inode_init(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_agnumber_t agno, xfs_agblock_t agbno,
xfs_agblock_t length, unsigned int gen);
-extern const struct xfs_buf_ops xfs_agi_buf_ops;
-
#endif /* __XFS_IALLOC_H__ */
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
index 5448eb6b8c12..c8fa5bbb36de 100644
--- a/fs/xfs/xfs_ialloc_btree.c
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -17,24 +17,23 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
+#include "xfs_ialloc_btree.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_cksum.h"
+#include "xfs_trans.h"
STATIC int
diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h
index 3ac36b7642e9..f38b22011c4e 100644
--- a/fs/xfs/xfs_ialloc_btree.h
+++ b/fs/xfs/xfs_ialloc_btree.h
@@ -27,55 +27,6 @@ struct xfs_btree_cur;
struct xfs_mount;
/*
- * There is a btree for the inode map per allocation group.
- */
-#define XFS_IBT_MAGIC 0x49414254 /* 'IABT' */
-#define XFS_IBT_CRC_MAGIC 0x49414233 /* 'IAB3' */
-
-typedef __uint64_t xfs_inofree_t;
-#define XFS_INODES_PER_CHUNK (NBBY * sizeof(xfs_inofree_t))
-#define XFS_INODES_PER_CHUNK_LOG (XFS_NBBYLOG + 3)
-#define XFS_INOBT_ALL_FREE ((xfs_inofree_t)-1)
-#define XFS_INOBT_MASK(i) ((xfs_inofree_t)1 << (i))
-
-static inline xfs_inofree_t xfs_inobt_maskn(int i, int n)
-{
- return ((n >= XFS_INODES_PER_CHUNK ? 0 : XFS_INOBT_MASK(n)) - 1) << i;
-}
-
-/*
- * Data record structure
- */
-typedef struct xfs_inobt_rec {
- __be32 ir_startino; /* starting inode number */
- __be32 ir_freecount; /* count of free inodes (set bits) */
- __be64 ir_free; /* free inode mask */
-} xfs_inobt_rec_t;
-
-typedef struct xfs_inobt_rec_incore {
- xfs_agino_t ir_startino; /* starting inode number */
- __int32_t ir_freecount; /* count of free inodes (set bits) */
- xfs_inofree_t ir_free; /* free inode mask */
-} xfs_inobt_rec_incore_t;
-
-
-/*
- * Key structure
- */
-typedef struct xfs_inobt_key {
- __be32 ir_startino; /* starting inode number */
-} xfs_inobt_key_t;
-
-/* btree pointer type */
-typedef __be32 xfs_inobt_ptr_t;
-
-/*
- * block numbers in the AG.
- */
-#define XFS_IBT_BLOCK(mp) ((xfs_agblock_t)(XFS_CNT_BLOCK(mp) + 1))
-#define XFS_PREALLOC_BLOCKS(mp) ((xfs_agblock_t)(XFS_IBT_BLOCK(mp) + 1))
-
-/*
* Btree block header size depends on a superblock flag.
*/
#define XFS_INOBT_BLOCK_LEN(mp) \
@@ -110,6 +61,4 @@ extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_mount *,
struct xfs_trans *, struct xfs_buf *, xfs_agnumber_t);
extern int xfs_inobt_maxrecs(struct xfs_mount *, int, int);
-extern const struct xfs_buf_ops xfs_inobt_buf_ops;
-
#endif /* __XFS_IALLOC_BTREE_H__ */
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 474807a401c8..98d35244eecc 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -18,24 +18,19 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_format.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_log_priv.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
-#include "xfs_dinode.h"
#include "xfs_error.h"
-#include "xfs_filestream.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
#include "xfs_inode_item.h"
#include "xfs_quota.h"
#include "xfs_trace.h"
-#include "xfs_fsops.h"
#include "xfs_icache.h"
#include "xfs_bmap_util.h"
@@ -500,11 +495,6 @@ xfs_inode_ag_walk_grab(
if (!igrab(inode))
return ENOENT;
- if (is_bad_inode(inode)) {
- IRELE(ip);
- return ENOENT;
- }
-
/* inode is valid */
return 0;
@@ -918,8 +908,6 @@ restart:
xfs_iflock(ip);
}
- if (is_bad_inode(VFS_I(ip)))
- goto reclaim;
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
xfs_iunpin_wait(ip);
xfs_iflush_abort(ip, false);
diff --git a/fs/xfs/xfs_icreate_item.c b/fs/xfs/xfs_icreate_item.c
index 5a5a593994d4..d2eaccfa73f4 100644
--- a/fs/xfs/xfs_icreate_item.c
+++ b/fs/xfs/xfs_icreate_item.c
@@ -17,13 +17,14 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
+#include "xfs_trans.h"
#include "xfs_trans_priv.h"
#include "xfs_error.h"
#include "xfs_icreate_item.h"
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index e3d75385aa76..326b94dbe159 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -19,39 +19,38 @@
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
-#include "xfs_log.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_trans_space.h"
-#include "xfs_trans_priv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_da_format.h"
#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
#include "xfs_dir2.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
#include "xfs_attr_sf.h"
#include "xfs_attr.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
+#include "xfs_trans_space.h"
+#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_inode_item.h"
-#include "xfs_btree.h"
-#include "xfs_alloc.h"
#include "xfs_ialloc.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
#include "xfs_error.h"
#include "xfs_quota.h"
+#include "xfs_dinode.h"
#include "xfs_filestream.h"
#include "xfs_cksum.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_symlink.h"
+#include "xfs_trans_priv.h"
+#include "xfs_log.h"
+#include "xfs_bmap_btree.h"
kmem_zone_t *xfs_inode_zone;
@@ -1663,6 +1662,126 @@ xfs_release(
}
/*
+ * xfs_inactive_truncate
+ *
+ * Called to perform a truncate when an inode becomes unlinked.
+ */
+STATIC int
+xfs_inactive_truncate(
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ int error;
+
+ tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
+ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
+ if (error) {
+ ASSERT(XFS_FORCED_SHUTDOWN(mp));
+ xfs_trans_cancel(tp, 0);
+ return error;
+ }
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, 0);
+
+ /*
+ * Log the inode size first to prevent stale data exposure in the event
+ * of a system crash before the truncate completes. See the related
+ * comment in xfs_setattr_size() for details.
+ */
+ ip->i_d.di_size = 0;
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+ error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
+ if (error)
+ goto error_trans_cancel;
+
+ ASSERT(ip->i_d.di_nextents == 0);
+
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+ if (error)
+ goto error_unlock;
+
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return 0;
+
+error_trans_cancel:
+ xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+error_unlock:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return error;
+}
+
+/*
+ * xfs_inactive_ifree()
+ *
+ * Perform the inode free when an inode is unlinked.
+ */
+STATIC int
+xfs_inactive_ifree(
+ struct xfs_inode *ip)
+{
+ xfs_bmap_free_t free_list;
+ xfs_fsblock_t first_block;
+ int committed;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ int error;
+
+ tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
+ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ifree, 0, 0);
+ if (error) {
+ ASSERT(XFS_FORCED_SHUTDOWN(mp));
+ xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES);
+ return error;
+ }
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, 0);
+
+ xfs_bmap_init(&free_list, &first_block);
+ error = xfs_ifree(tp, ip, &free_list);
+ if (error) {
+ /*
+ * If we fail to free the inode, shut down. The cancel
+ * might do that, we need to make sure. Otherwise the
+ * inode might be lost for a long time or forever.
+ */
+ if (!XFS_FORCED_SHUTDOWN(mp)) {
+ xfs_notice(mp, "%s: xfs_ifree returned error %d",
+ __func__, error);
+ xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
+ }
+ xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return error;
+ }
+
+ /*
+ * Credit the quota account(s). The inode is gone.
+ */
+ xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
+
+ /*
+ * Just ignore errors at this point. There is nothing we can
+ * do except to try to keep going. Make sure it's not a silent
+ * error.
+ */
+ error = xfs_bmap_finish(&tp, &free_list, &committed);
+ if (error)
+ xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
+ __func__, error);
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+ if (error)
+ xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
+ __func__, error);
+
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return 0;
+}
+
+/*
* xfs_inactive
*
* This is called when the vnode reference count for the vnode
@@ -1670,16 +1789,11 @@ xfs_release(
* now be truncated. Also, we clear all of the read-ahead state
* kept for the inode here since the file is now closed.
*/
-int
+void
xfs_inactive(
xfs_inode_t *ip)
{
- xfs_bmap_free_t free_list;
- xfs_fsblock_t first_block;
- int committed;
- struct xfs_trans *tp;
struct xfs_mount *mp;
- struct xfs_trans_res *resp;
int error;
int truncate = 0;
@@ -1687,19 +1801,17 @@ xfs_inactive(
* If the inode is already free, then there can be nothing
* to clean up here.
*/
- if (ip->i_d.di_mode == 0 || is_bad_inode(VFS_I(ip))) {
+ if (ip->i_d.di_mode == 0) {
ASSERT(ip->i_df.if_real_bytes == 0);
ASSERT(ip->i_df.if_broot_bytes == 0);
- return VN_INACTIVE_CACHE;
+ return;
}
mp = ip->i_mount;
- error = 0;
-
/* If this is a read-only mount, don't do this (would generate I/O) */
if (mp->m_flags & XFS_MOUNT_RDONLY)
- goto out;
+ return;
if (ip->i_d.di_nlink != 0) {
/*
@@ -1707,12 +1819,10 @@ xfs_inactive(
* cache. Post-eof blocks must be freed, lest we end up with
* broken free space accounting.
*/
- if (xfs_can_free_eofblocks(ip, true)) {
- error = xfs_free_eofblocks(mp, ip, false);
- if (error)
- return VN_INACTIVE_CACHE;
- }
- goto out;
+ if (xfs_can_free_eofblocks(ip, true))
+ xfs_free_eofblocks(mp, ip, false);
+
+ return;
}
if (S_ISREG(ip->i_d.di_mode) &&
@@ -1722,36 +1832,14 @@ xfs_inactive(
error = xfs_qm_dqattach(ip, 0);
if (error)
- return VN_INACTIVE_CACHE;
-
- tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
- resp = (truncate || S_ISLNK(ip->i_d.di_mode)) ?
- &M_RES(mp)->tr_itruncate : &M_RES(mp)->tr_ifree;
+ return;
- error = xfs_trans_reserve(tp, resp, 0, 0);
- if (error) {
- ASSERT(XFS_FORCED_SHUTDOWN(mp));
- xfs_trans_cancel(tp, 0);
- return VN_INACTIVE_CACHE;
- }
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, 0);
-
- if (S_ISLNK(ip->i_d.di_mode)) {
- error = xfs_inactive_symlink(ip, &tp);
- if (error)
- goto out_cancel;
- } else if (truncate) {
- ip->i_d.di_size = 0;
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
- error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
- if (error)
- goto out_cancel;
-
- ASSERT(ip->i_d.di_nextents == 0);
- }
+ if (S_ISLNK(ip->i_d.di_mode))
+ error = xfs_inactive_symlink(ip);
+ else if (truncate)
+ error = xfs_inactive_truncate(ip);
+ if (error)
+ return;
/*
* If there are attributes associated with the file then blow them away
@@ -1762,25 +1850,9 @@ xfs_inactive(
if (ip->i_d.di_anextents > 0) {
ASSERT(ip->i_d.di_forkoff != 0);
- error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
- if (error)
- goto out_unlock;
-
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
error = xfs_attr_inactive(ip);
if (error)
- goto out;
-
- tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ifree, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- goto out;
- }
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, 0);
+ return;
}
if (ip->i_afp)
@@ -1791,52 +1863,14 @@ xfs_inactive(
/*
* Free the inode.
*/
- xfs_bmap_init(&free_list, &first_block);
- error = xfs_ifree(tp, ip, &free_list);
- if (error) {
- /*
- * If we fail to free the inode, shut down. The cancel
- * might do that, we need to make sure. Otherwise the
- * inode might be lost for a long time or forever.
- */
- if (!XFS_FORCED_SHUTDOWN(mp)) {
- xfs_notice(mp, "%s: xfs_ifree returned error %d",
- __func__, error);
- xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
- }
- xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
- } else {
- /*
- * Credit the quota account(s). The inode is gone.
- */
- xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
-
- /*
- * Just ignore errors at this point. There is nothing we can
- * do except to try to keep going. Make sure it's not a silent
- * error.
- */
- error = xfs_bmap_finish(&tp, &free_list, &committed);
- if (error)
- xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
- __func__, error);
- error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
- if (error)
- xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
- __func__, error);
- }
+ error = xfs_inactive_ifree(ip);
+ if (error)
+ return;
/*
* Release the dquots held by inode, if any.
*/
xfs_qm_dqdetach(ip);
-out_unlock:
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
-out:
- return VN_INACTIVE_CACHE;
-out_cancel:
- xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
- goto out_unlock;
}
/*
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 4a91358c1470..9e6efccbae04 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -24,7 +24,6 @@
/*
* Kernel only inode definitions
*/
-
struct xfs_dinode;
struct xfs_inode;
struct xfs_buf;
@@ -50,6 +49,9 @@ typedef struct xfs_inode {
xfs_ifork_t *i_afp; /* attribute fork pointer */
xfs_ifork_t i_df; /* data fork */
+ /* operations vectors */
+ const struct xfs_dir_ops *d_ops; /* directory ops vector */
+
/* Transaction and locking information. */
struct xfs_inode_log_item *i_itemp; /* logging information */
mrlock_t i_lock; /* inode lock */
@@ -316,7 +318,7 @@ static inline int xfs_isiflocked(struct xfs_inode *ip)
int xfs_release(struct xfs_inode *ip);
-int xfs_inactive(struct xfs_inode *ip);
+void xfs_inactive(struct xfs_inode *ip);
int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name,
struct xfs_inode **ipp, struct xfs_name *ci_name);
int xfs_create(struct xfs_inode *dp, struct xfs_name *name,
diff --git a/fs/xfs/xfs_inode_buf.c b/fs/xfs/xfs_inode_buf.c
index 63382d37f565..4fc9f39dd89e 100644
--- a/fs/xfs/xfs_inode_buf.c
+++ b/fs/xfs/xfs_inode_buf.c
@@ -17,20 +17,20 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_error.h"
#include "xfs_cksum.h"
#include "xfs_icache.h"
+#include "xfs_trans.h"
#include "xfs_ialloc.h"
+#include "xfs_dinode.h"
/*
* Check that none of the inode's in the buffer have a next
diff --git a/fs/xfs/xfs_inode_buf.h b/fs/xfs/xfs_inode_buf.h
index abba0ae8cf2d..9308c47f2a52 100644
--- a/fs/xfs/xfs_inode_buf.h
+++ b/fs/xfs/xfs_inode_buf.h
@@ -47,7 +47,4 @@ void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *);
#define xfs_inobp_check(mp, bp)
#endif /* DEBUG */
-extern const struct xfs_buf_ops xfs_inode_buf_ops;
-extern const struct xfs_buf_ops xfs_inode_buf_ra_ops;
-
#endif /* __XFS_INODE_BUF_H__ */
diff --git a/fs/xfs/xfs_inode_fork.c b/fs/xfs/xfs_inode_fork.c
index 02f1083955bb..22c9837c5d4b 100644
--- a/fs/xfs/xfs_inode_fork.c
+++ b/fs/xfs/xfs_inode_fork.c
@@ -20,31 +20,21 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_format.h"
-#include "xfs_log.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
-#include "xfs_buf_item.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
-#include "xfs_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_ialloc.h"
+#include "xfs_bmap_btree.h"
#include "xfs_bmap.h"
#include "xfs_error.h"
-#include "xfs_quota.h"
-#include "xfs_filestream.h"
-#include "xfs_cksum.h"
#include "xfs_trace.h"
-#include "xfs_icache.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dinode.h"
kmem_zone_t *xfs_ifork_zone;
@@ -1359,7 +1349,7 @@ xfs_iext_remove_indirect(
void
xfs_iext_realloc_direct(
xfs_ifork_t *ifp, /* inode fork pointer */
- int new_size) /* new size of extents */
+ int new_size) /* new size of extents after adding */
{
int rnew_size; /* real new size of extents */
@@ -1397,13 +1387,8 @@ xfs_iext_realloc_direct(
rnew_size - ifp->if_real_bytes);
}
}
- /*
- * Switch from the inline extent buffer to a direct
- * extent list. Be sure to include the inline extent
- * bytes in new_size.
- */
+ /* Switch from the inline extent buffer to a direct extent list */
else {
- new_size += ifp->if_bytes;
if (!is_power_of_2(new_size)) {
rnew_size = roundup_pow_of_two(new_size);
}
diff --git a/fs/xfs/xfs_inode_fork.h b/fs/xfs/xfs_inode_fork.h
index 28661a0d9058..eb329a1ea888 100644
--- a/fs/xfs/xfs_inode_fork.h
+++ b/fs/xfs/xfs_inode_fork.h
@@ -19,6 +19,7 @@
#define __XFS_INODE_FORK_H__
struct xfs_inode_log_item;
+struct xfs_dinode;
/*
* The following xfs_ext_irec_t struct introduces a second (top) level
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 378081109844..7c0d391f9a6e 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -17,19 +17,19 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_trans_priv.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_error.h"
#include "xfs_trace.h"
+#include "xfs_trans_priv.h"
+#include "xfs_dinode.h"
kmem_zone_t *xfs_ili_zone; /* inode log item zone */
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 668e8f4ccf5e..4d613401a5e0 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -17,32 +17,31 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_alloc.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_ioctl.h"
+#include "xfs_alloc.h"
#include "xfs_rtalloc.h"
#include "xfs_itable.h"
#include "xfs_error.h"
#include "xfs_attr.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
-#include "xfs_buf_item.h"
#include "xfs_fsops.h"
#include "xfs_discard.h"
#include "xfs_quota.h"
-#include "xfs_inode_item.h"
#include "xfs_export.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_symlink.h"
+#include "xfs_dinode.h"
+#include "xfs_trans.h"
#include <linux/capability.h>
#include <linux/dcache.h>
@@ -641,7 +640,11 @@ xfs_ioc_space(
unsigned int cmd,
xfs_flock64_t *bf)
{
- int attr_flags = 0;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ struct iattr iattr;
+ bool setprealloc = false;
+ bool clrprealloc = false;
int error;
/*
@@ -661,19 +664,128 @@ xfs_ioc_space(
if (!S_ISREG(inode->i_mode))
return -XFS_ERROR(EINVAL);
- if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
- attr_flags |= XFS_ATTR_NONBLOCK;
+ error = mnt_want_write_file(filp);
+ if (error)
+ return error;
- if (filp->f_flags & O_DSYNC)
- attr_flags |= XFS_ATTR_SYNC;
+ xfs_ilock(ip, XFS_IOLOCK_EXCL);
+
+ switch (bf->l_whence) {
+ case 0: /*SEEK_SET*/
+ break;
+ case 1: /*SEEK_CUR*/
+ bf->l_start += filp->f_pos;
+ break;
+ case 2: /*SEEK_END*/
+ bf->l_start += XFS_ISIZE(ip);
+ break;
+ default:
+ error = XFS_ERROR(EINVAL);
+ goto out_unlock;
+ }
- if (ioflags & IO_INVIS)
- attr_flags |= XFS_ATTR_DMI;
+ /*
+ * length of <= 0 for resv/unresv/zero is invalid. length for
+ * alloc/free is ignored completely and we have no idea what userspace
+ * might have set it to, so set it to zero to allow range
+ * checks to pass.
+ */
+ switch (cmd) {
+ case XFS_IOC_ZERO_RANGE:
+ case XFS_IOC_RESVSP:
+ case XFS_IOC_RESVSP64:
+ case XFS_IOC_UNRESVSP:
+ case XFS_IOC_UNRESVSP64:
+ if (bf->l_len <= 0) {
+ error = XFS_ERROR(EINVAL);
+ goto out_unlock;
+ }
+ break;
+ default:
+ bf->l_len = 0;
+ break;
+ }
+
+ if (bf->l_start < 0 ||
+ bf->l_start > mp->m_super->s_maxbytes ||
+ bf->l_start + bf->l_len < 0 ||
+ bf->l_start + bf->l_len >= mp->m_super->s_maxbytes) {
+ error = XFS_ERROR(EINVAL);
+ goto out_unlock;
+ }
+
+ switch (cmd) {
+ case XFS_IOC_ZERO_RANGE:
+ error = xfs_zero_file_space(ip, bf->l_start, bf->l_len);
+ if (!error)
+ setprealloc = true;
+ break;
+ case XFS_IOC_RESVSP:
+ case XFS_IOC_RESVSP64:
+ error = xfs_alloc_file_space(ip, bf->l_start, bf->l_len,
+ XFS_BMAPI_PREALLOC);
+ if (!error)
+ setprealloc = true;
+ break;
+ case XFS_IOC_UNRESVSP:
+ case XFS_IOC_UNRESVSP64:
+ error = xfs_free_file_space(ip, bf->l_start, bf->l_len);
+ break;
+ case XFS_IOC_ALLOCSP:
+ case XFS_IOC_ALLOCSP64:
+ case XFS_IOC_FREESP:
+ case XFS_IOC_FREESP64:
+ if (bf->l_start > XFS_ISIZE(ip)) {
+ error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
+ bf->l_start - XFS_ISIZE(ip), 0);
+ if (error)
+ goto out_unlock;
+ }
+
+ iattr.ia_valid = ATTR_SIZE;
+ iattr.ia_size = bf->l_start;
+
+ error = xfs_setattr_size(ip, &iattr);
+ if (!error)
+ clrprealloc = true;
+ break;
+ default:
+ ASSERT(0);
+ error = XFS_ERROR(EINVAL);
+ }
- error = mnt_want_write_file(filp);
if (error)
- return error;
- error = xfs_change_file_space(ip, cmd, bf, filp->f_pos, attr_flags);
+ goto out_unlock;
+
+ tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
+ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_writeid, 0, 0);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ goto out_unlock;
+ }
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+ if (!(ioflags & IO_INVIS)) {
+ ip->i_d.di_mode &= ~S_ISUID;
+ if (ip->i_d.di_mode & S_IXGRP)
+ ip->i_d.di_mode &= ~S_ISGID;
+ xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ }
+
+ if (setprealloc)
+ ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
+ else if (clrprealloc)
+ ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
+
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ if (filp->f_flags & O_DSYNC)
+ xfs_trans_set_sync(tp);
+ error = xfs_trans_commit(tp, 0);
+
+out_unlock:
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
mnt_drop_write_file(filp);
return -error;
}
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index f671f7e472ac..e8fb1231db81 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -22,14 +22,13 @@
#include <asm/uaccess.h>
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
#include "xfs_vnode.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_itable.h"
#include "xfs_error.h"
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 8d4d49b6fbf3..22d1cbea283d 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -17,34 +17,28 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
-#include "xfs_inode_item.h"
#include "xfs_btree.h"
+#include "xfs_bmap_btree.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
-#include "xfs_rtalloc.h"
#include "xfs_error.h"
-#include "xfs_itable.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
+#include "xfs_trans.h"
#include "xfs_trans_space.h"
#include "xfs_iomap.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
+#include "xfs_quota.h"
#include "xfs_dquot_item.h"
#include "xfs_dquot.h"
+#include "xfs_dinode.h"
#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
@@ -110,7 +104,7 @@ xfs_alert_fsblock_zero(
xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
"Access to block zero in inode %llu "
"start_block: %llx start_off: %llx "
- "blkcnt: %llx extent-state: %x\n",
+ "blkcnt: %llx extent-state: %x",
(unsigned long long)ip->i_ino,
(unsigned long long)imap->br_startblock,
(unsigned long long)imap->br_startoff,
@@ -655,7 +649,6 @@ int
xfs_iomap_write_allocate(
xfs_inode_t *ip,
xfs_off_t offset,
- size_t count,
xfs_bmbt_irec_t *imap)
{
xfs_mount_t *mp = ip->i_mount;
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 80615760959a..411fbb8919ef 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -21,12 +21,12 @@
struct xfs_inode;
struct xfs_bmbt_irec;
-extern int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
+int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
struct xfs_bmbt_irec *, int);
-extern int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t,
+int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t,
struct xfs_bmbt_irec *);
-extern int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t,
+int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t,
struct xfs_bmbt_irec *);
-extern int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t);
+int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t);
#endif /* __XFS_IOMAP_H__*/
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 2b8952d9199b..27e0e544e963 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -17,32 +17,28 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
-#include "xfs_acl.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
-#include "xfs_rtalloc.h"
+#include "xfs_acl.h"
+#include "xfs_quota.h"
#include "xfs_error.h"
-#include "xfs_itable.h"
#include "xfs_attr.h"
-#include "xfs_buf_item.h"
-#include "xfs_inode_item.h"
+#include "xfs_trans.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_symlink.h"
#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
#include "xfs_dir2_priv.h"
+#include "xfs_dinode.h"
#include <linux/capability.h>
#include <linux/xattr.h>
@@ -709,8 +705,7 @@ out_dqrele:
int
xfs_setattr_size(
struct xfs_inode *ip,
- struct iattr *iattr,
- int flags)
+ struct iattr *iattr)
{
struct xfs_mount *mp = ip->i_mount;
struct inode *inode = VFS_I(ip);
@@ -733,15 +728,11 @@ xfs_setattr_size(
if (error)
return XFS_ERROR(error);
+ ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
ASSERT(S_ISREG(ip->i_d.di_mode));
ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
- if (!(flags & XFS_ATTR_NOLOCK)) {
- lock_flags |= XFS_IOLOCK_EXCL;
- xfs_ilock(ip, lock_flags);
- }
-
oldsize = inode->i_size;
newsize = iattr->ia_size;
@@ -750,12 +741,11 @@ xfs_setattr_size(
*/
if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) {
if (!(mask & (ATTR_CTIME|ATTR_MTIME)))
- goto out_unlock;
+ return 0;
/*
* Use the regular setattr path to update the timestamps.
*/
- xfs_iunlock(ip, lock_flags);
iattr->ia_valid &= ~ATTR_SIZE;
return xfs_setattr_nonsize(ip, iattr, 0);
}
@@ -765,7 +755,7 @@ xfs_setattr_size(
*/
error = xfs_qm_dqattach(ip, 0);
if (error)
- goto out_unlock;
+ return error;
/*
* Now we can make the changes. Before we join the inode to the
@@ -783,7 +773,7 @@ xfs_setattr_size(
*/
error = xfs_zero_eof(ip, newsize, oldsize);
if (error)
- goto out_unlock;
+ return error;
}
/*
@@ -802,7 +792,7 @@ xfs_setattr_size(
error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
ip->i_d.di_size, newsize);
if (error)
- goto out_unlock;
+ return error;
}
/*
@@ -812,7 +802,7 @@ xfs_setattr_size(
error = -block_truncate_page(inode->i_mapping, newsize, xfs_get_blocks);
if (error)
- goto out_unlock;
+ return error;
tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);
error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
@@ -916,12 +906,21 @@ out_trans_cancel:
STATIC int
xfs_vn_setattr(
- struct dentry *dentry,
- struct iattr *iattr)
+ struct dentry *dentry,
+ struct iattr *iattr)
{
- if (iattr->ia_valid & ATTR_SIZE)
- return -xfs_setattr_size(XFS_I(dentry->d_inode), iattr, 0);
- return -xfs_setattr_nonsize(XFS_I(dentry->d_inode), iattr, 0);
+ struct xfs_inode *ip = XFS_I(dentry->d_inode);
+ int error;
+
+ if (iattr->ia_valid & ATTR_SIZE) {
+ xfs_ilock(ip, XFS_IOLOCK_EXCL);
+ error = xfs_setattr_size(ip, iattr);
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+ } else {
+ error = xfs_setattr_nonsize(ip, iattr, 0);
+ }
+
+ return -error;
}
STATIC int
@@ -1169,6 +1168,7 @@ xfs_setup_inode(
struct xfs_inode *ip)
{
struct inode *inode = &ip->i_vnode;
+ gfp_t gfp_mask;
inode->i_ino = ip->i_ino;
inode->i_state = I_NEW;
@@ -1204,6 +1204,7 @@ xfs_setup_inode(
inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
xfs_diflags_to_iflags(inode, ip);
+ ip->d_ops = ip->i_mount->m_nondir_inode_ops;
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
inode->i_op = &xfs_inode_operations;
@@ -1216,6 +1217,7 @@ xfs_setup_inode(
else
inode->i_op = &xfs_dir_inode_operations;
inode->i_fop = &xfs_dir_file_operations;
+ ip->d_ops = ip->i_mount->m_dir_inode_ops;
break;
case S_IFLNK:
inode->i_op = &xfs_symlink_inode_operations;
@@ -1229,6 +1231,14 @@ xfs_setup_inode(
}
/*
+ * Ensure all page cache allocations are done from GFP_NOFS context to
+ * prevent direct reclaim recursion back into the filesystem and blowing
+ * stacks or deadlocking.
+ */
+ gfp_mask = mapping_gfp_mask(inode->i_mapping);
+ mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS)));
+
+ /*
* If there is no attribute fork no ACL can exist on this inode,
* and it can't have any file capabilities attached to it either.
*/
diff --git a/fs/xfs/xfs_iops.h b/fs/xfs/xfs_iops.h
index d81fb41205ec..d2c5057b5cc4 100644
--- a/fs/xfs/xfs_iops.h
+++ b/fs/xfs/xfs_iops.h
@@ -30,14 +30,10 @@ extern void xfs_setup_inode(struct xfs_inode *);
/*
* Internal setattr interfaces.
*/
-#define XFS_ATTR_DMI 0x01 /* invocation from a DMI function */
-#define XFS_ATTR_NONBLOCK 0x02 /* return EAGAIN if op would block */
-#define XFS_ATTR_NOLOCK 0x04 /* Don't grab any conflicting locks */
-#define XFS_ATTR_NOACL 0x08 /* Don't call xfs_acl_chmod */
-#define XFS_ATTR_SYNC 0x10 /* synchronous operation required */
+#define XFS_ATTR_NOACL 0x01 /* Don't call xfs_acl_chmod */
extern int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap,
int flags);
-extern int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap, int flags);
+extern int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap);
#endif /* __XFS_IOPS_H__ */
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 084b3e1741fd..c237ad15d500 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -17,24 +17,23 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_inum.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
+#include "xfs_btree.h"
#include "xfs_ialloc.h"
+#include "xfs_ialloc_btree.h"
#include "xfs_itable.h"
#include "xfs_error.h"
-#include "xfs_btree.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
+#include "xfs_dinode.h"
STATIC int
xfs_internal_inum(
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index a2dea108071a..49dd41e6a2dc 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -17,21 +17,19 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_error.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_log.h"
#include "xfs_log_priv.h"
-#include "xfs_buf_item.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
#include "xfs_log_recover.h"
-#include "xfs_trans_priv.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_trace.h"
#include "xfs_fsops.h"
@@ -1000,27 +998,34 @@ xfs_log_space_wake(
}
/*
- * Determine if we have a transaction that has gone to disk
- * that needs to be covered. To begin the transition to the idle state
- * firstly the log needs to be idle (no AIL and nothing in the iclogs).
- * If we are then in a state where covering is needed, the caller is informed
- * that dummy transactions are required to move the log into the idle state.
+ * Determine if we have a transaction that has gone to disk that needs to be
+ * covered. To begin the transition to the idle state firstly the log needs to
+ * be idle. That means the CIL, the AIL and the iclogs needs to be empty before
+ * we start attempting to cover the log.
*
- * Because this is called as part of the sync process, we should also indicate
- * that dummy transactions should be issued in anything but the covered or
- * idle states. This ensures that the log tail is accurately reflected in
- * the log at the end of the sync, hence if a crash occurrs avoids replay
- * of transactions where the metadata is already on disk.
+ * Only if we are then in a state where covering is needed, the caller is
+ * informed that dummy transactions are required to move the log into the idle
+ * state.
+ *
+ * If there are any items in the AIl or CIL, then we do not want to attempt to
+ * cover the log as we may be in a situation where there isn't log space
+ * available to run a dummy transaction and this can lead to deadlocks when the
+ * tail of the log is pinned by an item that is modified in the CIL. Hence
+ * there's no point in running a dummy transaction at this point because we
+ * can't start trying to idle the log until both the CIL and AIL are empty.
*/
int
xfs_log_need_covered(xfs_mount_t *mp)
{
- int needed = 0;
struct xlog *log = mp->m_log;
+ int needed = 0;
if (!xfs_fs_writable(mp))
return 0;
+ if (!xlog_cil_empty(log))
+ return 0;
+
spin_lock(&log->l_icloglock);
switch (log->l_covered_state) {
case XLOG_STATE_COVER_DONE:
@@ -1029,14 +1034,17 @@ xfs_log_need_covered(xfs_mount_t *mp)
break;
case XLOG_STATE_COVER_NEED:
case XLOG_STATE_COVER_NEED2:
- if (!xfs_ail_min_lsn(log->l_ailp) &&
- xlog_iclogs_empty(log)) {
- if (log->l_covered_state == XLOG_STATE_COVER_NEED)
- log->l_covered_state = XLOG_STATE_COVER_DONE;
- else
- log->l_covered_state = XLOG_STATE_COVER_DONE2;
- }
- /* FALLTHRU */
+ if (xfs_ail_min_lsn(log->l_ailp))
+ break;
+ if (!xlog_iclogs_empty(log))
+ break;
+
+ needed = 1;
+ if (log->l_covered_state == XLOG_STATE_COVER_NEED)
+ log->l_covered_state = XLOG_STATE_COVER_DONE;
+ else
+ log->l_covered_state = XLOG_STATE_COVER_DONE2;
+ break;
default:
needed = 1;
break;
@@ -1979,7 +1987,7 @@ xlog_print_tic_res(
for (i = 0; i < ticket->t_res_num; i++) {
uint r_type = ticket->t_res_arr[i].r_type;
- xfs_warn(mp, "region[%u]: %s - %u bytes\n", i,
+ xfs_warn(mp, "region[%u]: %s - %u bytes", i,
((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ?
"bad-rtype" : res_type_str[r_type-1]),
ticket->t_res_arr[i].r_len);
@@ -3702,11 +3710,9 @@ xlog_verify_iclog(
/* check validity of iclog pointers */
spin_lock(&log->l_icloglock);
icptr = log->l_iclog;
- for (i=0; i < log->l_iclog_bufs; i++) {
- if (icptr == NULL)
- xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
- icptr = icptr->ic_next;
- }
+ for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next)
+ ASSERT(icptr);
+
if (icptr != log->l_iclog)
xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
spin_unlock(&log->l_icloglock);
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 1c458487f000..e148719e0a5d 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -18,8 +18,6 @@
#ifndef __XFS_LOG_H__
#define __XFS_LOG_H__
-#include "xfs_log_format.h"
-
struct xfs_log_vec {
struct xfs_log_vec *lv_next; /* next lv in build list */
int lv_niovecs; /* number of iovecs in lv */
@@ -82,11 +80,7 @@ struct xlog_ticket;
struct xfs_log_item;
struct xfs_item_ops;
struct xfs_trans;
-
-void xfs_log_item_init(struct xfs_mount *mp,
- struct xfs_log_item *item,
- int type,
- const struct xfs_item_ops *ops);
+struct xfs_log_callback;
xfs_lsn_t xfs_log_done(struct xfs_mount *mp,
struct xlog_ticket *ticket,
@@ -114,7 +108,7 @@ xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
void xfs_log_space_wake(struct xfs_mount *mp);
int xfs_log_notify(struct xfs_mount *mp,
struct xlog_in_core *iclog,
- xfs_log_callback_t *callback_entry);
+ struct xfs_log_callback *callback_entry);
int xfs_log_release_iclog(struct xfs_mount *mp,
struct xlog_in_core *iclog);
int xfs_log_reserve(struct xfs_mount *mp,
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index cfe97973ba36..5eb51fc5eb84 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -17,11 +17,9 @@
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
-#include "xfs_log_priv.h"
+#include "xfs_log_format.h"
+#include "xfs_shared.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
@@ -29,6 +27,10 @@
#include "xfs_alloc.h"
#include "xfs_extent_busy.h"
#include "xfs_discard.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_log.h"
+#include "xfs_log_priv.h"
/*
* Allocate a new ticket. Failing to get a new ticket makes it really hard to
@@ -711,6 +713,20 @@ xlog_cil_push_foreground(
xlog_cil_push(log);
}
+bool
+xlog_cil_empty(
+ struct xlog *log)
+{
+ struct xfs_cil *cil = log->l_cilp;
+ bool empty = false;
+
+ spin_lock(&cil->xc_push_lock);
+ if (list_empty(&cil->xc_cil))
+ empty = true;
+ spin_unlock(&cil->xc_push_lock);
+ return empty;
+}
+
/*
* Commit a transaction with the given vector to the Committed Item List.
*
diff --git a/fs/xfs/xfs_log_format.h b/fs/xfs/xfs_log_format.h
index ca7e28a8ed31..f0969c77bdbe 100644
--- a/fs/xfs/xfs_log_format.h
+++ b/fs/xfs/xfs_log_format.h
@@ -234,178 +234,6 @@ typedef struct xfs_trans_header {
{ XFS_LI_ICREATE, "XFS_LI_ICREATE" }
/*
- * Transaction types. Used to distinguish types of buffers.
- */
-#define XFS_TRANS_SETATTR_NOT_SIZE 1
-#define XFS_TRANS_SETATTR_SIZE 2
-#define XFS_TRANS_INACTIVE 3
-#define XFS_TRANS_CREATE 4
-#define XFS_TRANS_CREATE_TRUNC 5
-#define XFS_TRANS_TRUNCATE_FILE 6
-#define XFS_TRANS_REMOVE 7
-#define XFS_TRANS_LINK 8
-#define XFS_TRANS_RENAME 9
-#define XFS_TRANS_MKDIR 10
-#define XFS_TRANS_RMDIR 11
-#define XFS_TRANS_SYMLINK 12
-#define XFS_TRANS_SET_DMATTRS 13
-#define XFS_TRANS_GROWFS 14
-#define XFS_TRANS_STRAT_WRITE 15
-#define XFS_TRANS_DIOSTRAT 16
-/* 17 was XFS_TRANS_WRITE_SYNC */
-#define XFS_TRANS_WRITEID 18
-#define XFS_TRANS_ADDAFORK 19
-#define XFS_TRANS_ATTRINVAL 20
-#define XFS_TRANS_ATRUNCATE 21
-#define XFS_TRANS_ATTR_SET 22
-#define XFS_TRANS_ATTR_RM 23
-#define XFS_TRANS_ATTR_FLAG 24
-#define XFS_TRANS_CLEAR_AGI_BUCKET 25
-#define XFS_TRANS_QM_SBCHANGE 26
-/*
- * Dummy entries since we use the transaction type to index into the
- * trans_type[] in xlog_recover_print_trans_head()
- */
-#define XFS_TRANS_DUMMY1 27
-#define XFS_TRANS_DUMMY2 28
-#define XFS_TRANS_QM_QUOTAOFF 29
-#define XFS_TRANS_QM_DQALLOC 30
-#define XFS_TRANS_QM_SETQLIM 31
-#define XFS_TRANS_QM_DQCLUSTER 32
-#define XFS_TRANS_QM_QINOCREATE 33
-#define XFS_TRANS_QM_QUOTAOFF_END 34
-#define XFS_TRANS_SB_UNIT 35
-#define XFS_TRANS_FSYNC_TS 36
-#define XFS_TRANS_GROWFSRT_ALLOC 37
-#define XFS_TRANS_GROWFSRT_ZERO 38
-#define XFS_TRANS_GROWFSRT_FREE 39
-#define XFS_TRANS_SWAPEXT 40
-#define XFS_TRANS_SB_COUNT 41
-#define XFS_TRANS_CHECKPOINT 42
-#define XFS_TRANS_ICREATE 43
-#define XFS_TRANS_TYPE_MAX 43
-/* new transaction types need to be reflected in xfs_logprint(8) */
-
-#define XFS_TRANS_TYPES \
- { XFS_TRANS_SETATTR_NOT_SIZE, "SETATTR_NOT_SIZE" }, \
- { XFS_TRANS_SETATTR_SIZE, "SETATTR_SIZE" }, \
- { XFS_TRANS_INACTIVE, "INACTIVE" }, \
- { XFS_TRANS_CREATE, "CREATE" }, \
- { XFS_TRANS_CREATE_TRUNC, "CREATE_TRUNC" }, \
- { XFS_TRANS_TRUNCATE_FILE, "TRUNCATE_FILE" }, \
- { XFS_TRANS_REMOVE, "REMOVE" }, \
- { XFS_TRANS_LINK, "LINK" }, \
- { XFS_TRANS_RENAME, "RENAME" }, \
- { XFS_TRANS_MKDIR, "MKDIR" }, \
- { XFS_TRANS_RMDIR, "RMDIR" }, \
- { XFS_TRANS_SYMLINK, "SYMLINK" }, \
- { XFS_TRANS_SET_DMATTRS, "SET_DMATTRS" }, \
- { XFS_TRANS_GROWFS, "GROWFS" }, \
- { XFS_TRANS_STRAT_WRITE, "STRAT_WRITE" }, \
- { XFS_TRANS_DIOSTRAT, "DIOSTRAT" }, \
- { XFS_TRANS_WRITEID, "WRITEID" }, \
- { XFS_TRANS_ADDAFORK, "ADDAFORK" }, \
- { XFS_TRANS_ATTRINVAL, "ATTRINVAL" }, \
- { XFS_TRANS_ATRUNCATE, "ATRUNCATE" }, \
- { XFS_TRANS_ATTR_SET, "ATTR_SET" }, \
- { XFS_TRANS_ATTR_RM, "ATTR_RM" }, \
- { XFS_TRANS_ATTR_FLAG, "ATTR_FLAG" }, \
- { XFS_TRANS_CLEAR_AGI_BUCKET, "CLEAR_AGI_BUCKET" }, \
- { XFS_TRANS_QM_SBCHANGE, "QM_SBCHANGE" }, \
- { XFS_TRANS_QM_QUOTAOFF, "QM_QUOTAOFF" }, \
- { XFS_TRANS_QM_DQALLOC, "QM_DQALLOC" }, \
- { XFS_TRANS_QM_SETQLIM, "QM_SETQLIM" }, \
- { XFS_TRANS_QM_DQCLUSTER, "QM_DQCLUSTER" }, \
- { XFS_TRANS_QM_QINOCREATE, "QM_QINOCREATE" }, \
- { XFS_TRANS_QM_QUOTAOFF_END, "QM_QOFF_END" }, \
- { XFS_TRANS_SB_UNIT, "SB_UNIT" }, \
- { XFS_TRANS_FSYNC_TS, "FSYNC_TS" }, \
- { XFS_TRANS_GROWFSRT_ALLOC, "GROWFSRT_ALLOC" }, \
- { XFS_TRANS_GROWFSRT_ZERO, "GROWFSRT_ZERO" }, \
- { XFS_TRANS_GROWFSRT_FREE, "GROWFSRT_FREE" }, \
- { XFS_TRANS_SWAPEXT, "SWAPEXT" }, \
- { XFS_TRANS_SB_COUNT, "SB_COUNT" }, \
- { XFS_TRANS_CHECKPOINT, "CHECKPOINT" }, \
- { XFS_TRANS_DUMMY1, "DUMMY1" }, \
- { XFS_TRANS_DUMMY2, "DUMMY2" }, \
- { XLOG_UNMOUNT_REC_TYPE, "UNMOUNT" }
-
-/*
- * This structure is used to track log items associated with
- * a transaction. It points to the log item and keeps some
- * flags to track the state of the log item. It also tracks
- * the amount of space needed to log the item it describes
- * once we get to commit processing (see xfs_trans_commit()).
- */
-struct xfs_log_item_desc {
- struct xfs_log_item *lid_item;
- struct list_head lid_trans;
- unsigned char lid_flags;
-};
-
-#define XFS_LID_DIRTY 0x1
-
-/*
- * Values for t_flags.
- */
-#define XFS_TRANS_DIRTY 0x01 /* something needs to be logged */
-#define XFS_TRANS_SB_DIRTY 0x02 /* superblock is modified */
-#define XFS_TRANS_PERM_LOG_RES 0x04 /* xact took a permanent log res */
-#define XFS_TRANS_SYNC 0x08 /* make commit synchronous */
-#define XFS_TRANS_DQ_DIRTY 0x10 /* at least one dquot in trx dirty */
-#define XFS_TRANS_RESERVE 0x20 /* OK to use reserved data blocks */
-#define XFS_TRANS_FREEZE_PROT 0x40 /* Transaction has elevated writer
- count in superblock */
-
-/*
- * Values for call flags parameter.
- */
-#define XFS_TRANS_RELEASE_LOG_RES 0x4
-#define XFS_TRANS_ABORT 0x8
-
-/*
- * Field values for xfs_trans_mod_sb.
- */
-#define XFS_TRANS_SB_ICOUNT 0x00000001
-#define XFS_TRANS_SB_IFREE 0x00000002
-#define XFS_TRANS_SB_FDBLOCKS 0x00000004
-#define XFS_TRANS_SB_RES_FDBLOCKS 0x00000008
-#define XFS_TRANS_SB_FREXTENTS 0x00000010
-#define XFS_TRANS_SB_RES_FREXTENTS 0x00000020
-#define XFS_TRANS_SB_DBLOCKS 0x00000040
-#define XFS_TRANS_SB_AGCOUNT 0x00000080
-#define XFS_TRANS_SB_IMAXPCT 0x00000100
-#define XFS_TRANS_SB_REXTSIZE 0x00000200
-#define XFS_TRANS_SB_RBMBLOCKS 0x00000400
-#define XFS_TRANS_SB_RBLOCKS 0x00000800
-#define XFS_TRANS_SB_REXTENTS 0x00001000
-#define XFS_TRANS_SB_REXTSLOG 0x00002000
-
-/*
- * Here we centralize the specification of XFS meta-data buffer
- * reference count values. This determine how hard the buffer
- * cache tries to hold onto the buffer.
- */
-#define XFS_AGF_REF 4
-#define XFS_AGI_REF 4
-#define XFS_AGFL_REF 3
-#define XFS_INO_BTREE_REF 3
-#define XFS_ALLOC_BTREE_REF 2
-#define XFS_BMAP_BTREE_REF 2
-#define XFS_DIR_BTREE_REF 2
-#define XFS_INO_REF 2
-#define XFS_ATTR_BTREE_REF 1
-#define XFS_DQUOT_REF 1
-
-/*
- * Flags for xfs_trans_ichgtime().
- */
-#define XFS_ICHGTIME_MOD 0x1 /* data fork modification timestamp */
-#define XFS_ICHGTIME_CHG 0x2 /* inode field change timestamp */
-#define XFS_ICHGTIME_CREATE 0x4 /* inode create timestamp */
-
-
-/*
* Inode Log Item Format definitions.
*
* This is the structure used to lay out an inode log item in the
@@ -797,7 +625,6 @@ typedef struct xfs_qoff_logformat {
char qf_pad[12]; /* padding for future */
} xfs_qoff_logformat_t;
-
/*
* Disk quotas status in m_qflags, and also sb_qflags. 16 bits.
*/
@@ -849,8 +676,4 @@ struct xfs_icreate_log {
__be32 icl_gen; /* inode generation number to use */
};
-int xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes);
-int xfs_log_calc_minimum_size(struct xfs_mount *);
-
-
#endif /* __XFS_LOG_FORMAT_H__ */
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 136654b9400d..9bc403a9e54f 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -22,6 +22,7 @@ struct xfs_buf;
struct xlog;
struct xlog_ticket;
struct xfs_mount;
+struct xfs_log_callback;
/*
* Flags for log structure
@@ -227,8 +228,8 @@ typedef struct xlog_in_core {
/* Callback structures need their own cacheline */
spinlock_t ic_callback_lock ____cacheline_aligned_in_smp;
- xfs_log_callback_t *ic_callback;
- xfs_log_callback_t **ic_callback_tail;
+ struct xfs_log_callback *ic_callback;
+ struct xfs_log_callback **ic_callback_tail;
/* reference counts need their own cacheline */
atomic_t ic_refcnt ____cacheline_aligned_in_smp;
@@ -254,7 +255,7 @@ struct xfs_cil_ctx {
int space_used; /* aggregate size of regions */
struct list_head busy_extents; /* busy extents in chkpt */
struct xfs_log_vec *lv_chain; /* logvecs being pushed */
- xfs_log_callback_t log_cb; /* completion callback hook. */
+ struct xfs_log_callback log_cb; /* completion callback hook. */
struct list_head committing; /* ctx committing list */
};
@@ -514,12 +515,10 @@ xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
/*
* Committed Item List interfaces
*/
-int
-xlog_cil_init(struct xlog *log);
-void
-xlog_cil_init_post_recovery(struct xlog *log);
-void
-xlog_cil_destroy(struct xlog *log);
+int xlog_cil_init(struct xlog *log);
+void xlog_cil_init_post_recovery(struct xlog *log);
+void xlog_cil_destroy(struct xlog *log);
+bool xlog_cil_empty(struct xlog *log);
/*
* CIL force routines
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 39797490a1f1..b6b669df40f3 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -17,42 +17,34 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
#include "xfs_inum.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_error.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
#include "xfs_inode.h"
-#include "xfs_inode_item.h"
-#include "xfs_alloc.h"
-#include "xfs_ialloc.h"
+#include "xfs_trans.h"
+#include "xfs_log.h"
#include "xfs_log_priv.h"
-#include "xfs_buf_item.h"
#include "xfs_log_recover.h"
+#include "xfs_inode_item.h"
#include "xfs_extfree_item.h"
#include "xfs_trans_priv.h"
+#include "xfs_alloc.h"
+#include "xfs_ialloc.h"
#include "xfs_quota.h"
#include "xfs_cksum.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
-#include "xfs_icreate_item.h"
-
-/* Need all the magic numbers and buffer ops structures from these headers */
-#include "xfs_symlink.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_dinode.h"
+#include "xfs_error.h"
#include "xfs_dir2.h"
-#include "xfs_attr_leaf.h"
-#include "xfs_attr_remote.h"
#define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
@@ -305,9 +297,9 @@ xlog_header_check_dump(
xfs_mount_t *mp,
xlog_rec_header_t *head)
{
- xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d\n",
+ xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d",
__func__, &mp->m_sb.sb_uuid, XLOG_FMT);
- xfs_debug(mp, " log : uuid = %pU, fmt = %d\n",
+ xfs_debug(mp, " log : uuid = %pU, fmt = %d",
&head->h_fs_uuid, be32_to_cpu(head->h_fmt));
}
#else
@@ -2362,7 +2354,7 @@ xlog_recover_do_reg_buffer(
item->ri_buf[i].i_len, __func__);
goto next;
}
- error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr,
+ error = xfs_dqcheck(mp, item->ri_buf[i].i_addr,
-1, 0, XFS_QMOPT_DOWARN,
"dquot_buf_recover");
if (error)
@@ -2394,133 +2386,6 @@ xlog_recover_do_reg_buffer(
}
/*
- * Do some primitive error checking on ondisk dquot data structures.
- */
-int
-xfs_qm_dqcheck(
- struct xfs_mount *mp,
- xfs_disk_dquot_t *ddq,
- xfs_dqid_t id,
- uint type, /* used only when IO_dorepair is true */
- uint flags,
- char *str)
-{
- xfs_dqblk_t *d = (xfs_dqblk_t *)ddq;
- int errs = 0;
-
- /*
- * We can encounter an uninitialized dquot buffer for 2 reasons:
- * 1. If we crash while deleting the quotainode(s), and those blks got
- * used for user data. This is because we take the path of regular
- * file deletion; however, the size field of quotainodes is never
- * updated, so all the tricks that we play in itruncate_finish
- * don't quite matter.
- *
- * 2. We don't play the quota buffers when there's a quotaoff logitem.
- * But the allocation will be replayed so we'll end up with an
- * uninitialized quota block.
- *
- * This is all fine; things are still consistent, and we haven't lost
- * any quota information. Just don't complain about bad dquot blks.
- */
- if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
- if (flags & XFS_QMOPT_DOWARN)
- xfs_alert(mp,
- "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
- str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
- errs++;
- }
- if (ddq->d_version != XFS_DQUOT_VERSION) {
- if (flags & XFS_QMOPT_DOWARN)
- xfs_alert(mp,
- "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
- str, id, ddq->d_version, XFS_DQUOT_VERSION);
- errs++;
- }
-
- if (ddq->d_flags != XFS_DQ_USER &&
- ddq->d_flags != XFS_DQ_PROJ &&
- ddq->d_flags != XFS_DQ_GROUP) {
- if (flags & XFS_QMOPT_DOWARN)
- xfs_alert(mp,
- "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
- str, id, ddq->d_flags);
- errs++;
- }
-
- if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
- if (flags & XFS_QMOPT_DOWARN)
- xfs_alert(mp,
- "%s : ondisk-dquot 0x%p, ID mismatch: "
- "0x%x expected, found id 0x%x",
- str, ddq, id, be32_to_cpu(ddq->d_id));
- errs++;
- }
-
- if (!errs && ddq->d_id) {
- if (ddq->d_blk_softlimit &&
- be64_to_cpu(ddq->d_bcount) >
- be64_to_cpu(ddq->d_blk_softlimit)) {
- if (!ddq->d_btimer) {
- if (flags & XFS_QMOPT_DOWARN)
- xfs_alert(mp,
- "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
- str, (int)be32_to_cpu(ddq->d_id), ddq);
- errs++;
- }
- }
- if (ddq->d_ino_softlimit &&
- be64_to_cpu(ddq->d_icount) >
- be64_to_cpu(ddq->d_ino_softlimit)) {
- if (!ddq->d_itimer) {
- if (flags & XFS_QMOPT_DOWARN)
- xfs_alert(mp,
- "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
- str, (int)be32_to_cpu(ddq->d_id), ddq);
- errs++;
- }
- }
- if (ddq->d_rtb_softlimit &&
- be64_to_cpu(ddq->d_rtbcount) >
- be64_to_cpu(ddq->d_rtb_softlimit)) {
- if (!ddq->d_rtbtimer) {
- if (flags & XFS_QMOPT_DOWARN)
- xfs_alert(mp,
- "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
- str, (int)be32_to_cpu(ddq->d_id), ddq);
- errs++;
- }
- }
- }
-
- if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
- return errs;
-
- if (flags & XFS_QMOPT_DOWARN)
- xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
-
- /*
- * Typically, a repair is only requested by quotacheck.
- */
- ASSERT(id != -1);
- ASSERT(flags & XFS_QMOPT_DQREPAIR);
- memset(d, 0, sizeof(xfs_dqblk_t));
-
- d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
- d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
- d->dd_diskdq.d_flags = type;
- d->dd_diskdq.d_id = cpu_to_be32(id);
-
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
- uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
- xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
- XFS_DQUOT_CRC_OFF);
- }
-
- return errs;
-}
-
-/*
* Perform a dquot buffer recovery.
* Simple algorithm: if we have found a QUOTAOFF log item of the same type
* (ie. USR or GRP), then just toss this buffer away; don't recover it.
@@ -3125,7 +2990,7 @@ xlog_recover_dquot_pass2(
*/
dq_f = item->ri_buf[0].i_addr;
ASSERT(dq_f);
- error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
+ error = xfs_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
"xlog_recover_dquot_pass2 (log copy)");
if (error)
return XFS_ERROR(EIO);
@@ -3145,7 +3010,7 @@ xlog_recover_dquot_pass2(
* was among a chunk of dquots created earlier, and we did some
* minimal initialization then.
*/
- error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
+ error = xfs_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
"xlog_recover_dquot_pass2");
if (error) {
xfs_buf_relse(bp);
@@ -4077,7 +3942,7 @@ xlog_unpack_data_crc(
if (crc != rhead->h_crc) {
if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
xfs_alert(log->l_mp,
- "log record CRC mismatch: found 0x%x, expected 0x%x.\n",
+ "log record CRC mismatch: found 0x%x, expected 0x%x.",
le32_to_cpu(rhead->h_crc),
le32_to_cpu(crc));
xfs_hex_dump(dp, 32);
diff --git a/fs/xfs/xfs_log_rlimit.c b/fs/xfs/xfs_log_rlimit.c
index bbcec0bbc12d..2af1a0a4d0f1 100644
--- a/fs/xfs/xfs_log_rlimit.c
+++ b/fs/xfs/xfs_log_rlimit.c
@@ -17,16 +17,19 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_ag.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
+#include "xfs_da_format.h"
#include "xfs_trans_space.h"
-#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
#include "xfs_da_btree.h"
#include "xfs_attr_leaf.h"
+#include "xfs_bmap_btree.h"
/*
* Calculate the maximum length in bytes that would be required for a local
diff --git a/fs/xfs/xfs_message.c b/fs/xfs/xfs_message.c
index 9163dc140532..63ca2f0420b1 100644
--- a/fs/xfs/xfs_message.c
+++ b/fs/xfs/xfs_message.c
@@ -17,9 +17,8 @@
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 5dcc68019d1b..da88f167af78 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -17,35 +17,30 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
#include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
-#include "xfs_dir2.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
#include "xfs_inode.h"
-#include "xfs_btree.h"
+#include "xfs_dir2.h"
#include "xfs_ialloc.h"
#include "xfs_alloc.h"
#include "xfs_rtalloc.h"
#include "xfs_bmap.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_log.h"
#include "xfs_error.h"
#include "xfs_quota.h"
#include "xfs_fsops.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
-#include "xfs_cksum.h"
-#include "xfs_buf_item.h"
#ifdef HAVE_PERCPU_SB
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 1fa0584b5627..1d8101a10d8e 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -26,6 +26,7 @@ struct xfs_mru_cache;
struct xfs_nameops;
struct xfs_ail;
struct xfs_quotainfo;
+struct xfs_dir_ops;
#ifdef HAVE_PERCPU_SB
@@ -148,6 +149,8 @@ typedef struct xfs_mount {
int m_dir_magicpct; /* 37% of the dir blocksize */
__uint8_t m_sectbb_log; /* sectlog - BBSHIFT */
const struct xfs_nameops *m_dirnameops; /* vector of dir name ops */
+ const struct xfs_dir_ops *m_dir_inode_ops; /* vector of dir inode ops */
+ const struct xfs_dir_ops *m_nondir_inode_ops; /* !dir inode ops */
int m_dirblksize; /* directory block sz--bytes */
int m_dirblkfsbs; /* directory block sz--fsbs */
xfs_dablk_t m_dirdatablk; /* blockno of dir data v2 */
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 3e6c2e6c9cd2..14a4996cfec6 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -17,31 +17,28 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_ialloc.h"
#include "xfs_itable.h"
-#include "xfs_rtalloc.h"
+#include "xfs_quota.h"
#include "xfs_error.h"
#include "xfs_bmap.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_trans.h"
#include "xfs_trans_space.h"
#include "xfs_qm.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_cksum.h"
+#include "xfs_dinode.h"
/*
* The global quota manager. There is only one of these for the entire
@@ -664,20 +661,6 @@ xfs_qm_dqdetach(
}
}
-int
-xfs_qm_calc_dquots_per_chunk(
- struct xfs_mount *mp,
- unsigned int nbblks) /* basic block units */
-{
- unsigned int ndquots;
-
- ASSERT(nbblks > 0);
- ndquots = BBTOB(nbblks);
- do_div(ndquots, sizeof(xfs_dqblk_t));
-
- return ndquots;
-}
-
struct xfs_qm_isolate {
struct list_head buffers;
struct list_head dispose;
@@ -858,7 +841,7 @@ xfs_qm_init_quotainfo(
/* Precalc some constants */
qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
- qinf->qi_dqperchunk = xfs_qm_calc_dquots_per_chunk(mp,
+ qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(mp,
qinf->qi_dqchunklen);
mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
@@ -1092,10 +1075,10 @@ xfs_qm_reset_dqcounts(
/*
* Do a sanity check, and if needed, repair the dqblk. Don't
* output any warnings because it's perfectly possible to
- * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
+ * find uninitialised dquot blks. See comment in xfs_dqcheck.
*/
- (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
- "xfs_quotacheck");
+ xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
+ "xfs_quotacheck");
ddq->d_bcount = 0;
ddq->d_icount = 0;
ddq->d_rtbcount = 0;
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index 2b602df9c242..a788b66a5cb1 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -103,8 +103,6 @@ xfs_dq_to_quota_inode(struct xfs_dquot *dqp)
return NULL;
}
-extern int xfs_qm_calc_dquots_per_chunk(struct xfs_mount *mp,
- unsigned int nbblks);
extern void xfs_trans_mod_dquot(struct xfs_trans *,
struct xfs_dquot *, uint, long);
extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *,
diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c
index 3af50ccdfac1..e9be63abd8d2 100644
--- a/fs/xfs/xfs_qm_bhv.c
+++ b/fs/xfs/xfs_qm_bhv.c
@@ -18,21 +18,15 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_alloc.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
-#include "xfs_itable.h"
-#include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
#include "xfs_error.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
+#include "xfs_trans.h"
#include "xfs_qm.h"
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 8174aad0b388..437c9198031a 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -20,24 +20,18 @@
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
-#include "xfs_inode_item.h"
-#include "xfs_itable.h"
-#include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
+#include "xfs_trans.h"
#include "xfs_error.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
+#include "xfs_quota.h"
#include "xfs_qm.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
@@ -287,7 +281,7 @@ xfs_qm_scall_trunc_qfiles(
int error = 0, error2 = 0;
if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
- xfs_debug(mp, "%s: flags=%x m_qflags=%x\n",
+ xfs_debug(mp, "%s: flags=%x m_qflags=%x",
__func__, flags, mp->m_qflags);
return XFS_ERROR(EINVAL);
}
@@ -325,7 +319,7 @@ xfs_qm_scall_quotaon(
sbflags = 0;
if (flags == 0) {
- xfs_debug(mp, "%s: zero flags, m_qflags=%x\n",
+ xfs_debug(mp, "%s: zero flags, m_qflags=%x",
__func__, mp->m_qflags);
return XFS_ERROR(EINVAL);
}
@@ -348,7 +342,7 @@ xfs_qm_scall_quotaon(
(mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
(flags & XFS_PQUOTA_ENFD))) {
xfs_debug(mp,
- "%s: Can't enforce without acct, flags=%x sbflags=%x\n",
+ "%s: Can't enforce without acct, flags=%x sbflags=%x",
__func__, flags, mp->m_sb.sb_qflags);
return XFS_ERROR(EINVAL);
}
@@ -648,7 +642,7 @@ xfs_qm_scall_setqlim(
q->qi_bsoftlimit = soft;
}
} else {
- xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft);
+ xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
}
hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
@@ -664,7 +658,7 @@ xfs_qm_scall_setqlim(
q->qi_rtbsoftlimit = soft;
}
} else {
- xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
+ xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
}
hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
@@ -681,7 +675,7 @@ xfs_qm_scall_setqlim(
q->qi_isoftlimit = soft;
}
} else {
- xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft);
+ xfs_debug(mp, "ihard %Ld < isoft %Ld", hard, soft);
}
/*
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index e7d84d2d8683..5376dd406ba2 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -150,10 +150,6 @@ static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp,
xfs_trans_reserve_quota_bydquots(tp, mp, ud, gd, pd, nb, ni, \
f | XFS_QMOPT_RES_REGBLKS)
-extern int xfs_qm_dqcheck(struct xfs_mount *, xfs_disk_dquot_t *,
- xfs_dqid_t, uint, uint, char *);
extern int xfs_mount_reset_sbqflags(struct xfs_mount *);
-extern const struct xfs_buf_ops xfs_dquot_buf_ops;
-
#endif /* __XFS_QUOTA_H__ */
diff --git a/fs/xfs/xfs_quota_defs.h b/fs/xfs/xfs_quota_defs.h
index e6b0d6e1f4f2..b3b2b1065c0f 100644
--- a/fs/xfs/xfs_quota_defs.h
+++ b/fs/xfs/xfs_quota_defs.h
@@ -154,4 +154,8 @@ typedef __uint16_t xfs_qwarncnt_t;
(XFS_QMOPT_UQUOTA | XFS_QMOPT_PQUOTA | XFS_QMOPT_GQUOTA)
#define XFS_QMOPT_RESBLK_MASK (XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_RES_RTBLKS)
+extern int xfs_dqcheck(struct xfs_mount *mp, xfs_disk_dquot_t *ddq,
+ xfs_dqid_t id, uint type, uint flags, char *str);
+extern int xfs_calc_dquots_per_chunk(struct xfs_mount *mp, unsigned int nbblks);
+
#endif /* __XFS_QUOTA_H__ */
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index 1326d81596c2..af33cafe69b6 100644
--- a/fs/xfs/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -17,15 +17,14 @@
*/
#include "xfs.h"
#include "xfs_format.h"
+#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_log.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
+#include "xfs_inode.h"
#include "xfs_quota.h"
#include "xfs_trans.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_inode.h"
#include "xfs_qm.h"
#include <linux/quota.h>
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 6f9e63c9fc26..a6a76b2b6a85 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -17,172 +17,260 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
-#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
-#include "xfs_rtalloc.h"
-#include "xfs_fsops.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc.h"
#include "xfs_error.h"
-#include "xfs_inode_item.h"
+#include "xfs_trans.h"
#include "xfs_trans_space.h"
#include "xfs_trace.h"
#include "xfs_buf.h"
#include "xfs_icache.h"
+#include "xfs_dinode.h"
+#include "xfs_rtalloc.h"
/*
- * Prototypes for internal functions.
+ * Read and return the summary information for a given extent size,
+ * bitmap block combination.
+ * Keeps track of a current summary block, so we don't keep reading
+ * it from the buffer cache.
*/
+STATIC int /* error */
+xfs_rtget_summary(
+ xfs_mount_t *mp, /* file system mount structure */
+ xfs_trans_t *tp, /* transaction pointer */
+ int log, /* log2 of extent size */
+ xfs_rtblock_t bbno, /* bitmap block number */
+ xfs_buf_t **rbpp, /* in/out: summary block buffer */
+ xfs_fsblock_t *rsb, /* in/out: summary block number */
+ xfs_suminfo_t *sum) /* out: summary info for this block */
+{
+ xfs_buf_t *bp; /* buffer for summary block */
+ int error; /* error value */
+ xfs_fsblock_t sb; /* summary fsblock */
+ int so; /* index into the summary file */
+ xfs_suminfo_t *sp; /* pointer to returned data */
+ /*
+ * Compute entry number in the summary file.
+ */
+ so = XFS_SUMOFFS(mp, log, bbno);
+ /*
+ * Compute the block number in the summary file.
+ */
+ sb = XFS_SUMOFFSTOBLOCK(mp, so);
+ /*
+ * If we have an old buffer, and the block number matches, use that.
+ */
+ if (rbpp && *rbpp && *rsb == sb)
+ bp = *rbpp;
+ /*
+ * Otherwise we have to get the buffer.
+ */
+ else {
+ /*
+ * If there was an old one, get rid of it first.
+ */
+ if (rbpp && *rbpp)
+ xfs_trans_brelse(tp, *rbpp);
+ error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
+ if (error) {
+ return error;
+ }
+ /*
+ * Remember this buffer and block for the next call.
+ */
+ if (rbpp) {
+ *rbpp = bp;
+ *rsb = sb;
+ }
+ }
+ /*
+ * Point to the summary information & copy it out.
+ */
+ sp = XFS_SUMPTR(mp, bp, so);
+ *sum = *sp;
+ /*
+ * Drop the buffer if we're not asked to remember it.
+ */
+ if (!rbpp)
+ xfs_trans_brelse(tp, bp);
+ return 0;
+}
-STATIC int xfs_rtallocate_range(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
- xfs_extlen_t, xfs_buf_t **, xfs_fsblock_t *);
-STATIC int xfs_rtany_summary(xfs_mount_t *, xfs_trans_t *, int, int,
- xfs_rtblock_t, xfs_buf_t **, xfs_fsblock_t *, int *);
-STATIC int xfs_rtcheck_range(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
- xfs_extlen_t, int, xfs_rtblock_t *, int *);
-STATIC int xfs_rtfind_back(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
- xfs_rtblock_t, xfs_rtblock_t *);
-STATIC int xfs_rtfind_forw(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
- xfs_rtblock_t, xfs_rtblock_t *);
-STATIC int xfs_rtget_summary( xfs_mount_t *, xfs_trans_t *, int,
- xfs_rtblock_t, xfs_buf_t **, xfs_fsblock_t *, xfs_suminfo_t *);
-STATIC int xfs_rtmodify_range(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
- xfs_extlen_t, int);
-STATIC int xfs_rtmodify_summary(xfs_mount_t *, xfs_trans_t *, int,
- xfs_rtblock_t, int, xfs_buf_t **, xfs_fsblock_t *);
-
-/*
- * Internal functions.
- */
/*
- * Allocate space to the bitmap or summary file, and zero it, for growfs.
+ * Return whether there are any free extents in the size range given
+ * by low and high, for the bitmap block bbno.
*/
STATIC int /* error */
-xfs_growfs_rt_alloc(
- xfs_mount_t *mp, /* file system mount point */
- xfs_extlen_t oblocks, /* old count of blocks */
- xfs_extlen_t nblocks, /* new count of blocks */
- xfs_inode_t *ip) /* inode (bitmap/summary) */
+xfs_rtany_summary(
+ xfs_mount_t *mp, /* file system mount structure */
+ xfs_trans_t *tp, /* transaction pointer */
+ int low, /* low log2 extent size */
+ int high, /* high log2 extent size */
+ xfs_rtblock_t bbno, /* bitmap block number */
+ xfs_buf_t **rbpp, /* in/out: summary block buffer */
+ xfs_fsblock_t *rsb, /* in/out: summary block number */
+ int *stat) /* out: any good extents here? */
{
- xfs_fileoff_t bno; /* block number in file */
- xfs_buf_t *bp; /* temporary buffer for zeroing */
- int committed; /* transaction committed flag */
- xfs_daddr_t d; /* disk block address */
- int error; /* error return value */
- xfs_fsblock_t firstblock; /* first block allocated in xaction */
- xfs_bmap_free_t flist; /* list of freed blocks */
- xfs_fsblock_t fsbno; /* filesystem block for bno */
- xfs_bmbt_irec_t map; /* block map output */
- int nmap; /* number of block maps */
- int resblks; /* space reservation */
+ int error; /* error value */
+ int log; /* loop counter, log2 of ext. size */
+ xfs_suminfo_t sum; /* summary data */
/*
- * Allocate space to the file, as necessary.
+ * Loop over logs of extent sizes. Order is irrelevant.
*/
- while (oblocks < nblocks) {
- int cancelflags = 0;
- xfs_trans_t *tp;
-
- tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ALLOC);
- resblks = XFS_GROWFSRT_SPACE_RES(mp, nblocks - oblocks);
+ for (log = low; log <= high; log++) {
/*
- * Reserve space & log for one extent added to the file.
+ * Get one summary datum.
*/
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growdata,
- resblks, 0);
- if (error)
- goto error_cancel;
- cancelflags = XFS_TRANS_RELEASE_LOG_RES;
+ error = xfs_rtget_summary(mp, tp, log, bbno, rbpp, rsb, &sum);
+ if (error) {
+ return error;
+ }
/*
- * Lock the inode.
+ * If there are any, return success.
*/
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ if (sum) {
+ *stat = 1;
+ return 0;
+ }
+ }
+ /*
+ * Found nothing, return failure.
+ */
+ *stat = 0;
+ return 0;
+}
- xfs_bmap_init(&flist, &firstblock);
- /*
- * Allocate blocks to the bitmap file.
- */
- nmap = 1;
- cancelflags |= XFS_TRANS_ABORT;
- error = xfs_bmapi_write(tp, ip, oblocks, nblocks - oblocks,
- XFS_BMAPI_METADATA, &firstblock,
- resblks, &map, &nmap, &flist);
- if (!error && nmap < 1)
- error = XFS_ERROR(ENOSPC);
- if (error)
- goto error_cancel;
- /*
- * Free any blocks freed up in the transaction, then commit.
- */
- error = xfs_bmap_finish(&tp, &flist, &committed);
- if (error)
- goto error_cancel;
- error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
- if (error)
- goto error;
- /*
- * Now we need to clear the allocated blocks.
- * Do this one block per transaction, to keep it simple.
- */
- cancelflags = 0;
- for (bno = map.br_startoff, fsbno = map.br_startblock;
- bno < map.br_startoff + map.br_blockcount;
- bno++, fsbno++) {
- tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ZERO);
- /*
- * Reserve log for one block zeroing.
- */
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growrtzero,
- 0, 0);
+
+/*
+ * Copy and transform the summary file, given the old and new
+ * parameters in the mount structures.
+ */
+STATIC int /* error */
+xfs_rtcopy_summary(
+ xfs_mount_t *omp, /* old file system mount point */
+ xfs_mount_t *nmp, /* new file system mount point */
+ xfs_trans_t *tp) /* transaction pointer */
+{
+ xfs_rtblock_t bbno; /* bitmap block number */
+ xfs_buf_t *bp; /* summary buffer */
+ int error; /* error return value */
+ int log; /* summary level number (log length) */
+ xfs_suminfo_t sum; /* summary data */
+ xfs_fsblock_t sumbno; /* summary block number */
+
+ bp = NULL;
+ for (log = omp->m_rsumlevels - 1; log >= 0; log--) {
+ for (bbno = omp->m_sb.sb_rbmblocks - 1;
+ (xfs_srtblock_t)bbno >= 0;
+ bbno--) {
+ error = xfs_rtget_summary(omp, tp, log, bbno, &bp,
+ &sumbno, &sum);
if (error)
- goto error_cancel;
- /*
- * Lock the bitmap inode.
- */
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- /*
- * Get a buffer for the block.
- */
- d = XFS_FSB_TO_DADDR(mp, fsbno);
- bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
- mp->m_bsize, 0);
- if (bp == NULL) {
- error = XFS_ERROR(EIO);
-error_cancel:
- xfs_trans_cancel(tp, cancelflags);
- goto error;
- }
- memset(bp->b_addr, 0, mp->m_sb.sb_blocksize);
- xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
- /*
- * Commit the transaction.
- */
- error = xfs_trans_commit(tp, 0);
+ return error;
+ if (sum == 0)
+ continue;
+ error = xfs_rtmodify_summary(omp, tp, log, bbno, -sum,
+ &bp, &sumbno);
if (error)
- goto error;
+ return error;
+ error = xfs_rtmodify_summary(nmp, tp, log, bbno, sum,
+ &bp, &sumbno);
+ if (error)
+ return error;
+ ASSERT(sum > 0);
}
- /*
- * Go on to the next extent, if any.
- */
- oblocks = map.br_startoff + map.br_blockcount;
}
return 0;
+}
+/*
+ * Mark an extent specified by start and len allocated.
+ * Updates all the summary information as well as the bitmap.
+ */
+STATIC int /* error */
+xfs_rtallocate_range(
+ xfs_mount_t *mp, /* file system mount point */
+ xfs_trans_t *tp, /* transaction pointer */
+ xfs_rtblock_t start, /* start block to allocate */
+ xfs_extlen_t len, /* length to allocate */
+ xfs_buf_t **rbpp, /* in/out: summary block buffer */
+ xfs_fsblock_t *rsb) /* in/out: summary block number */
+{
+ xfs_rtblock_t end; /* end of the allocated extent */
+ int error; /* error value */
+ xfs_rtblock_t postblock = 0; /* first block allocated > end */
+ xfs_rtblock_t preblock = 0; /* first block allocated < start */
-error:
+ end = start + len - 1;
+ /*
+ * Assume we're allocating out of the middle of a free extent.
+ * We need to find the beginning and end of the extent so we can
+ * properly update the summary.
+ */
+ error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
+ if (error) {
+ return error;
+ }
+ /*
+ * Find the next allocated block (end of free extent).
+ */
+ error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
+ &postblock);
+ if (error) {
+ return error;
+ }
+ /*
+ * Decrement the summary information corresponding to the entire
+ * (old) free extent.
+ */
+ error = xfs_rtmodify_summary(mp, tp,
+ XFS_RTBLOCKLOG(postblock + 1 - preblock),
+ XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
+ if (error) {
+ return error;
+ }
+ /*
+ * If there are blocks not being allocated at the front of the
+ * old extent, add summary data for them to be free.
+ */
+ if (preblock < start) {
+ error = xfs_rtmodify_summary(mp, tp,
+ XFS_RTBLOCKLOG(start - preblock),
+ XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
+ if (error) {
+ return error;
+ }
+ }
+ /*
+ * If there are blocks not being allocated at the end of the
+ * old extent, add summary data for them to be free.
+ */
+ if (postblock > end) {
+ error = xfs_rtmodify_summary(mp, tp,
+ XFS_RTBLOCKLOG(postblock - end),
+ XFS_BITTOBLOCK(mp, end + 1), 1, rbpp, rsb);
+ if (error) {
+ return error;
+ }
+ }
+ /*
+ * Modify the bitmap to mark this extent allocated.
+ */
+ error = xfs_rtmodify_range(mp, tp, start, len, 0);
return error;
}
@@ -721,1112 +809,126 @@ xfs_rtallocate_extent_size(
}
/*
- * Mark an extent specified by start and len allocated.
- * Updates all the summary information as well as the bitmap.
+ * Allocate space to the bitmap or summary file, and zero it, for growfs.
*/
STATIC int /* error */
-xfs_rtallocate_range(
+xfs_growfs_rt_alloc(
xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t start, /* start block to allocate */
- xfs_extlen_t len, /* length to allocate */
- xfs_buf_t **rbpp, /* in/out: summary block buffer */
- xfs_fsblock_t *rsb) /* in/out: summary block number */
+ xfs_extlen_t oblocks, /* old count of blocks */
+ xfs_extlen_t nblocks, /* new count of blocks */
+ xfs_inode_t *ip) /* inode (bitmap/summary) */
{
- xfs_rtblock_t end; /* end of the allocated extent */
- int error; /* error value */
- xfs_rtblock_t postblock = 0; /* first block allocated > end */
- xfs_rtblock_t preblock = 0; /* first block allocated < start */
+ xfs_fileoff_t bno; /* block number in file */
+ xfs_buf_t *bp; /* temporary buffer for zeroing */
+ int committed; /* transaction committed flag */
+ xfs_daddr_t d; /* disk block address */
+ int error; /* error return value */
+ xfs_fsblock_t firstblock; /* first block allocated in xaction */
+ xfs_bmap_free_t flist; /* list of freed blocks */
+ xfs_fsblock_t fsbno; /* filesystem block for bno */
+ xfs_bmbt_irec_t map; /* block map output */
+ int nmap; /* number of block maps */
+ int resblks; /* space reservation */
- end = start + len - 1;
- /*
- * Assume we're allocating out of the middle of a free extent.
- * We need to find the beginning and end of the extent so we can
- * properly update the summary.
- */
- error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
- if (error) {
- return error;
- }
- /*
- * Find the next allocated block (end of free extent).
- */
- error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
- &postblock);
- if (error) {
- return error;
- }
- /*
- * Decrement the summary information corresponding to the entire
- * (old) free extent.
- */
- error = xfs_rtmodify_summary(mp, tp,
- XFS_RTBLOCKLOG(postblock + 1 - preblock),
- XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
- if (error) {
- return error;
- }
- /*
- * If there are blocks not being allocated at the front of the
- * old extent, add summary data for them to be free.
- */
- if (preblock < start) {
- error = xfs_rtmodify_summary(mp, tp,
- XFS_RTBLOCKLOG(start - preblock),
- XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
- if (error) {
- return error;
- }
- }
- /*
- * If there are blocks not being allocated at the end of the
- * old extent, add summary data for them to be free.
- */
- if (postblock > end) {
- error = xfs_rtmodify_summary(mp, tp,
- XFS_RTBLOCKLOG(postblock - end),
- XFS_BITTOBLOCK(mp, end + 1), 1, rbpp, rsb);
- if (error) {
- return error;
- }
- }
/*
- * Modify the bitmap to mark this extent allocated.
+ * Allocate space to the file, as necessary.
*/
- error = xfs_rtmodify_range(mp, tp, start, len, 0);
- return error;
-}
-
-/*
- * Return whether there are any free extents in the size range given
- * by low and high, for the bitmap block bbno.
- */
-STATIC int /* error */
-xfs_rtany_summary(
- xfs_mount_t *mp, /* file system mount structure */
- xfs_trans_t *tp, /* transaction pointer */
- int low, /* low log2 extent size */
- int high, /* high log2 extent size */
- xfs_rtblock_t bbno, /* bitmap block number */
- xfs_buf_t **rbpp, /* in/out: summary block buffer */
- xfs_fsblock_t *rsb, /* in/out: summary block number */
- int *stat) /* out: any good extents here? */
-{
- int error; /* error value */
- int log; /* loop counter, log2 of ext. size */
- xfs_suminfo_t sum; /* summary data */
+ while (oblocks < nblocks) {
+ int cancelflags = 0;
+ xfs_trans_t *tp;
- /*
- * Loop over logs of extent sizes. Order is irrelevant.
- */
- for (log = low; log <= high; log++) {
+ tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ALLOC);
+ resblks = XFS_GROWFSRT_SPACE_RES(mp, nblocks - oblocks);
/*
- * Get one summary datum.
+ * Reserve space & log for one extent added to the file.
*/
- error = xfs_rtget_summary(mp, tp, log, bbno, rbpp, rsb, &sum);
- if (error) {
- return error;
- }
+ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growdata,
+ resblks, 0);
+ if (error)
+ goto error_cancel;
+ cancelflags = XFS_TRANS_RELEASE_LOG_RES;
/*
- * If there are any, return success.
+ * Lock the inode.
*/
- if (sum) {
- *stat = 1;
- return 0;
- }
- }
- /*
- * Found nothing, return failure.
- */
- *stat = 0;
- return 0;
-}
-
-/*
- * Get a buffer for the bitmap or summary file block specified.
- * The buffer is returned read and locked.
- */
-STATIC int /* error */
-xfs_rtbuf_get(
- xfs_mount_t *mp, /* file system mount structure */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t block, /* block number in bitmap or summary */
- int issum, /* is summary not bitmap */
- xfs_buf_t **bpp) /* output: buffer for the block */
-{
- xfs_buf_t *bp; /* block buffer, result */
- xfs_inode_t *ip; /* bitmap or summary inode */
- xfs_bmbt_irec_t map;
- int nmap = 1;
- int error; /* error value */
-
- ip = issum ? mp->m_rsumip : mp->m_rbmip;
-
- error = xfs_bmapi_read(ip, block, 1, &map, &nmap, XFS_DATA_FORK);
- if (error)
- return error;
-
- ASSERT(map.br_startblock != NULLFSBLOCK);
- error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
- XFS_FSB_TO_DADDR(mp, map.br_startblock),
- mp->m_bsize, 0, &bp, NULL);
- if (error)
- return error;
- ASSERT(!xfs_buf_geterror(bp));
- *bpp = bp;
- return 0;
-}
-
-#ifdef DEBUG
-/*
- * Check that the given extent (block range) is allocated already.
- */
-STATIC int /* error */
-xfs_rtcheck_alloc_range(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t bno, /* starting block number of extent */
- xfs_extlen_t len, /* length of extent */
- int *stat) /* out: 1 for allocated, 0 for not */
-{
- xfs_rtblock_t new; /* dummy for xfs_rtcheck_range */
-
- return xfs_rtcheck_range(mp, tp, bno, len, 0, &new, stat);
-}
-#endif
-
-/*
- * Check that the given range is either all allocated (val = 0) or
- * all free (val = 1).
- */
-STATIC int /* error */
-xfs_rtcheck_range(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t start, /* starting block number of extent */
- xfs_extlen_t len, /* length of extent */
- int val, /* 1 for free, 0 for allocated */
- xfs_rtblock_t *new, /* out: first block not matching */
- int *stat) /* out: 1 for matches, 0 for not */
-{
- xfs_rtword_t *b; /* current word in buffer */
- int bit; /* bit number in the word */
- xfs_rtblock_t block; /* bitmap block number */
- xfs_buf_t *bp; /* buf for the block */
- xfs_rtword_t *bufp; /* starting word in buffer */
- int error; /* error value */
- xfs_rtblock_t i; /* current bit number rel. to start */
- xfs_rtblock_t lastbit; /* last useful bit in word */
- xfs_rtword_t mask; /* mask of relevant bits for value */
- xfs_rtword_t wdiff; /* difference from wanted value */
- int word; /* word number in the buffer */
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- /*
- * Compute starting bitmap block number
- */
- block = XFS_BITTOBLOCK(mp, start);
- /*
- * Read the bitmap block.
- */
- error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
- if (error) {
- return error;
- }
- bufp = bp->b_addr;
- /*
- * Compute the starting word's address, and starting bit.
- */
- word = XFS_BITTOWORD(mp, start);
- b = &bufp[word];
- bit = (int)(start & (XFS_NBWORD - 1));
- /*
- * 0 (allocated) => all zero's; 1 (free) => all one's.
- */
- val = -val;
- /*
- * If not starting on a word boundary, deal with the first
- * (partial) word.
- */
- if (bit) {
- /*
- * Compute first bit not examined.
- */
- lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
- /*
- * Mask of relevant bits.
- */
- mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
- /*
- * Compute difference between actual and desired value.
- */
- if ((wdiff = (*b ^ val) & mask)) {
- /*
- * Different, compute first wrong bit and return.
- */
- xfs_trans_brelse(tp, bp);
- i = XFS_RTLOBIT(wdiff) - bit;
- *new = start + i;
- *stat = 0;
- return 0;
- }
- i = lastbit - bit;
- /*
- * Go on to next block if that's where the next word is
- * and we need the next word.
- */
- if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
- /*
- * If done with this block, get the next one.
- */
- xfs_trans_brelse(tp, bp);
- error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
- if (error) {
- return error;
- }
- b = bufp = bp->b_addr;
- word = 0;
- } else {
- /*
- * Go on to the next word in the buffer.
- */
- b++;
- }
- } else {
- /*
- * Starting on a word boundary, no partial word.
- */
- i = 0;
- }
- /*
- * Loop over whole words in buffers. When we use up one buffer
- * we move on to the next one.
- */
- while (len - i >= XFS_NBWORD) {
- /*
- * Compute difference between actual and desired value.
- */
- if ((wdiff = *b ^ val)) {
- /*
- * Different, compute first wrong bit and return.
- */
- xfs_trans_brelse(tp, bp);
- i += XFS_RTLOBIT(wdiff);
- *new = start + i;
- *stat = 0;
- return 0;
- }
- i += XFS_NBWORD;
+ xfs_bmap_init(&flist, &firstblock);
/*
- * Go on to next block if that's where the next word is
- * and we need the next word.
+ * Allocate blocks to the bitmap file.
*/
- if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
- /*
- * If done with this block, get the next one.
- */
- xfs_trans_brelse(tp, bp);
- error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
- if (error) {
- return error;
- }
- b = bufp = bp->b_addr;
- word = 0;
- } else {
- /*
- * Go on to the next word in the buffer.
- */
- b++;
- }
- }
- /*
- * If not ending on a word boundary, deal with the last
- * (partial) word.
- */
- if ((lastbit = len - i)) {
+ nmap = 1;
+ cancelflags |= XFS_TRANS_ABORT;
+ error = xfs_bmapi_write(tp, ip, oblocks, nblocks - oblocks,
+ XFS_BMAPI_METADATA, &firstblock,
+ resblks, &map, &nmap, &flist);
+ if (!error && nmap < 1)
+ error = XFS_ERROR(ENOSPC);
+ if (error)
+ goto error_cancel;
/*
- * Mask of relevant bits.
+ * Free any blocks freed up in the transaction, then commit.
*/
- mask = ((xfs_rtword_t)1 << lastbit) - 1;
+ error = xfs_bmap_finish(&tp, &flist, &committed);
+ if (error)
+ goto error_cancel;
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+ if (error)
+ goto error;
/*
- * Compute difference between actual and desired value.
+ * Now we need to clear the allocated blocks.
+ * Do this one block per transaction, to keep it simple.
*/
- if ((wdiff = (*b ^ val) & mask)) {
+ cancelflags = 0;
+ for (bno = map.br_startoff, fsbno = map.br_startblock;
+ bno < map.br_startoff + map.br_blockcount;
+ bno++, fsbno++) {
+ tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ZERO);
/*
- * Different, compute first wrong bit and return.
+ * Reserve log for one block zeroing.
*/
- xfs_trans_brelse(tp, bp);
- i += XFS_RTLOBIT(wdiff);
- *new = start + i;
- *stat = 0;
- return 0;
- } else
- i = len;
- }
- /*
- * Successful, return.
- */
- xfs_trans_brelse(tp, bp);
- *new = start + i;
- *stat = 1;
- return 0;
-}
-
-/*
- * Copy and transform the summary file, given the old and new
- * parameters in the mount structures.
- */
-STATIC int /* error */
-xfs_rtcopy_summary(
- xfs_mount_t *omp, /* old file system mount point */
- xfs_mount_t *nmp, /* new file system mount point */
- xfs_trans_t *tp) /* transaction pointer */
-{
- xfs_rtblock_t bbno; /* bitmap block number */
- xfs_buf_t *bp; /* summary buffer */
- int error; /* error return value */
- int log; /* summary level number (log length) */
- xfs_suminfo_t sum; /* summary data */
- xfs_fsblock_t sumbno; /* summary block number */
-
- bp = NULL;
- for (log = omp->m_rsumlevels - 1; log >= 0; log--) {
- for (bbno = omp->m_sb.sb_rbmblocks - 1;
- (xfs_srtblock_t)bbno >= 0;
- bbno--) {
- error = xfs_rtget_summary(omp, tp, log, bbno, &bp,
- &sumbno, &sum);
- if (error)
- return error;
- if (sum == 0)
- continue;
- error = xfs_rtmodify_summary(omp, tp, log, bbno, -sum,
- &bp, &sumbno);
- if (error)
- return error;
- error = xfs_rtmodify_summary(nmp, tp, log, bbno, sum,
- &bp, &sumbno);
+ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growrtzero,
+ 0, 0);
if (error)
- return error;
- ASSERT(sum > 0);
- }
- }
- return 0;
-}
-
-/*
- * Searching backward from start to limit, find the first block whose
- * allocated/free state is different from start's.
- */
-STATIC int /* error */
-xfs_rtfind_back(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t start, /* starting block to look at */
- xfs_rtblock_t limit, /* last block to look at */
- xfs_rtblock_t *rtblock) /* out: start block found */
-{
- xfs_rtword_t *b; /* current word in buffer */
- int bit; /* bit number in the word */
- xfs_rtblock_t block; /* bitmap block number */
- xfs_buf_t *bp; /* buf for the block */
- xfs_rtword_t *bufp; /* starting word in buffer */
- int error; /* error value */
- xfs_rtblock_t firstbit; /* first useful bit in the word */
- xfs_rtblock_t i; /* current bit number rel. to start */
- xfs_rtblock_t len; /* length of inspected area */
- xfs_rtword_t mask; /* mask of relevant bits for value */
- xfs_rtword_t want; /* mask for "good" values */
- xfs_rtword_t wdiff; /* difference from wanted value */
- int word; /* word number in the buffer */
-
- /*
- * Compute and read in starting bitmap block for starting block.
- */
- block = XFS_BITTOBLOCK(mp, start);
- error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
- if (error) {
- return error;
- }
- bufp = bp->b_addr;
- /*
- * Get the first word's index & point to it.
- */
- word = XFS_BITTOWORD(mp, start);
- b = &bufp[word];
- bit = (int)(start & (XFS_NBWORD - 1));
- len = start - limit + 1;
- /*
- * Compute match value, based on the bit at start: if 1 (free)
- * then all-ones, else all-zeroes.
- */
- want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
- /*
- * If the starting position is not word-aligned, deal with the
- * partial word.
- */
- if (bit < XFS_NBWORD - 1) {
- /*
- * Calculate first (leftmost) bit number to look at,
- * and mask for all the relevant bits in this word.
- */
- firstbit = XFS_RTMAX((xfs_srtblock_t)(bit - len + 1), 0);
- mask = (((xfs_rtword_t)1 << (bit - firstbit + 1)) - 1) <<
- firstbit;
- /*
- * Calculate the difference between the value there
- * and what we're looking for.
- */
- if ((wdiff = (*b ^ want) & mask)) {
- /*
- * Different. Mark where we are and return.
- */
- xfs_trans_brelse(tp, bp);
- i = bit - XFS_RTHIBIT(wdiff);
- *rtblock = start - i + 1;
- return 0;
- }
- i = bit - firstbit + 1;
- /*
- * Go on to previous block if that's where the previous word is
- * and we need the previous word.
- */
- if (--word == -1 && i < len) {
- /*
- * If done with this block, get the previous one.
- */
- xfs_trans_brelse(tp, bp);
- error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
- if (error) {
- return error;
- }
- bufp = bp->b_addr;
- word = XFS_BLOCKWMASK(mp);
- b = &bufp[word];
- } else {
- /*
- * Go on to the previous word in the buffer.
- */
- b--;
- }
- } else {
- /*
- * Starting on a word boundary, no partial word.
- */
- i = 0;
- }
- /*
- * Loop over whole words in buffers. When we use up one buffer
- * we move on to the previous one.
- */
- while (len - i >= XFS_NBWORD) {
- /*
- * Compute difference between actual and desired value.
- */
- if ((wdiff = *b ^ want)) {
- /*
- * Different, mark where we are and return.
- */
- xfs_trans_brelse(tp, bp);
- i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
- *rtblock = start - i + 1;
- return 0;
- }
- i += XFS_NBWORD;
- /*
- * Go on to previous block if that's where the previous word is
- * and we need the previous word.
- */
- if (--word == -1 && i < len) {
- /*
- * If done with this block, get the previous one.
- */
- xfs_trans_brelse(tp, bp);
- error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
- if (error) {
- return error;
- }
- bufp = bp->b_addr;
- word = XFS_BLOCKWMASK(mp);
- b = &bufp[word];
- } else {
- /*
- * Go on to the previous word in the buffer.
- */
- b--;
- }
- }
- /*
- * If not ending on a word boundary, deal with the last
- * (partial) word.
- */
- if (len - i) {
- /*
- * Calculate first (leftmost) bit number to look at,
- * and mask for all the relevant bits in this word.
- */
- firstbit = XFS_NBWORD - (len - i);
- mask = (((xfs_rtword_t)1 << (len - i)) - 1) << firstbit;
- /*
- * Compute difference between actual and desired value.
- */
- if ((wdiff = (*b ^ want) & mask)) {
- /*
- * Different, mark where we are and return.
- */
- xfs_trans_brelse(tp, bp);
- i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
- *rtblock = start - i + 1;
- return 0;
- } else
- i = len;
- }
- /*
- * No match, return that we scanned the whole area.
- */
- xfs_trans_brelse(tp, bp);
- *rtblock = start - i + 1;
- return 0;
-}
-
-/*
- * Searching forward from start to limit, find the first block whose
- * allocated/free state is different from start's.
- */
-STATIC int /* error */
-xfs_rtfind_forw(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t start, /* starting block to look at */
- xfs_rtblock_t limit, /* last block to look at */
- xfs_rtblock_t *rtblock) /* out: start block found */
-{
- xfs_rtword_t *b; /* current word in buffer */
- int bit; /* bit number in the word */
- xfs_rtblock_t block; /* bitmap block number */
- xfs_buf_t *bp; /* buf for the block */
- xfs_rtword_t *bufp; /* starting word in buffer */
- int error; /* error value */
- xfs_rtblock_t i; /* current bit number rel. to start */
- xfs_rtblock_t lastbit; /* last useful bit in the word */
- xfs_rtblock_t len; /* length of inspected area */
- xfs_rtword_t mask; /* mask of relevant bits for value */
- xfs_rtword_t want; /* mask for "good" values */
- xfs_rtword_t wdiff; /* difference from wanted value */
- int word; /* word number in the buffer */
-
- /*
- * Compute and read in starting bitmap block for starting block.
- */
- block = XFS_BITTOBLOCK(mp, start);
- error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
- if (error) {
- return error;
- }
- bufp = bp->b_addr;
- /*
- * Get the first word's index & point to it.
- */
- word = XFS_BITTOWORD(mp, start);
- b = &bufp[word];
- bit = (int)(start & (XFS_NBWORD - 1));
- len = limit - start + 1;
- /*
- * Compute match value, based on the bit at start: if 1 (free)
- * then all-ones, else all-zeroes.
- */
- want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
- /*
- * If the starting position is not word-aligned, deal with the
- * partial word.
- */
- if (bit) {
- /*
- * Calculate last (rightmost) bit number to look at,
- * and mask for all the relevant bits in this word.
- */
- lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
- mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
- /*
- * Calculate the difference between the value there
- * and what we're looking for.
- */
- if ((wdiff = (*b ^ want) & mask)) {
- /*
- * Different. Mark where we are and return.
- */
- xfs_trans_brelse(tp, bp);
- i = XFS_RTLOBIT(wdiff) - bit;
- *rtblock = start + i - 1;
- return 0;
- }
- i = lastbit - bit;
- /*
- * Go on to next block if that's where the next word is
- * and we need the next word.
- */
- if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
- /*
- * If done with this block, get the previous one.
- */
- xfs_trans_brelse(tp, bp);
- error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
- if (error) {
- return error;
- }
- b = bufp = bp->b_addr;
- word = 0;
- } else {
- /*
- * Go on to the previous word in the buffer.
- */
- b++;
- }
- } else {
- /*
- * Starting on a word boundary, no partial word.
- */
- i = 0;
- }
- /*
- * Loop over whole words in buffers. When we use up one buffer
- * we move on to the next one.
- */
- while (len - i >= XFS_NBWORD) {
- /*
- * Compute difference between actual and desired value.
- */
- if ((wdiff = *b ^ want)) {
+ goto error_cancel;
/*
- * Different, mark where we are and return.
+ * Lock the bitmap inode.
*/
- xfs_trans_brelse(tp, bp);
- i += XFS_RTLOBIT(wdiff);
- *rtblock = start + i - 1;
- return 0;
- }
- i += XFS_NBWORD;
- /*
- * Go on to next block if that's where the next word is
- * and we need the next word.
- */
- if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
/*
- * If done with this block, get the next one.
+ * Get a buffer for the block.
*/
- xfs_trans_brelse(tp, bp);
- error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
- if (error) {
- return error;
+ d = XFS_FSB_TO_DADDR(mp, fsbno);
+ bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
+ mp->m_bsize, 0);
+ if (bp == NULL) {
+ error = XFS_ERROR(EIO);
+error_cancel:
+ xfs_trans_cancel(tp, cancelflags);
+ goto error;
}
- b = bufp = bp->b_addr;
- word = 0;
- } else {
+ memset(bp->b_addr, 0, mp->m_sb.sb_blocksize);
+ xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
/*
- * Go on to the next word in the buffer.
+ * Commit the transaction.
*/
- b++;
+ error = xfs_trans_commit(tp, 0);
+ if (error)
+ goto error;
}
- }
- /*
- * If not ending on a word boundary, deal with the last
- * (partial) word.
- */
- if ((lastbit = len - i)) {
- /*
- * Calculate mask for all the relevant bits in this word.
- */
- mask = ((xfs_rtword_t)1 << lastbit) - 1;
/*
- * Compute difference between actual and desired value.
+ * Go on to the next extent, if any.
*/
- if ((wdiff = (*b ^ want) & mask)) {
- /*
- * Different, mark where we are and return.
- */
- xfs_trans_brelse(tp, bp);
- i += XFS_RTLOBIT(wdiff);
- *rtblock = start + i - 1;
- return 0;
- } else
- i = len;
+ oblocks = map.br_startoff + map.br_blockcount;
}
- /*
- * No match, return that we scanned the whole area.
- */
- xfs_trans_brelse(tp, bp);
- *rtblock = start + i - 1;
return 0;
-}
-/*
- * Mark an extent specified by start and len freed.
- * Updates all the summary information as well as the bitmap.
- */
-STATIC int /* error */
-xfs_rtfree_range(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t start, /* starting block to free */
- xfs_extlen_t len, /* length to free */
- xfs_buf_t **rbpp, /* in/out: summary block buffer */
- xfs_fsblock_t *rsb) /* in/out: summary block number */
-{
- xfs_rtblock_t end; /* end of the freed extent */
- int error; /* error value */
- xfs_rtblock_t postblock; /* first block freed > end */
- xfs_rtblock_t preblock; /* first block freed < start */
-
- end = start + len - 1;
- /*
- * Modify the bitmap to mark this extent freed.
- */
- error = xfs_rtmodify_range(mp, tp, start, len, 1);
- if (error) {
- return error;
- }
- /*
- * Assume we're freeing out of the middle of an allocated extent.
- * We need to find the beginning and end of the extent so we can
- * properly update the summary.
- */
- error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
- if (error) {
- return error;
- }
- /*
- * Find the next allocated block (end of allocated extent).
- */
- error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
- &postblock);
- if (error)
- return error;
- /*
- * If there are blocks not being freed at the front of the
- * old extent, add summary data for them to be allocated.
- */
- if (preblock < start) {
- error = xfs_rtmodify_summary(mp, tp,
- XFS_RTBLOCKLOG(start - preblock),
- XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
- if (error) {
- return error;
- }
- }
- /*
- * If there are blocks not being freed at the end of the
- * old extent, add summary data for them to be allocated.
- */
- if (postblock > end) {
- error = xfs_rtmodify_summary(mp, tp,
- XFS_RTBLOCKLOG(postblock - end),
- XFS_BITTOBLOCK(mp, end + 1), -1, rbpp, rsb);
- if (error) {
- return error;
- }
- }
- /*
- * Increment the summary information corresponding to the entire
- * (new) free extent.
- */
- error = xfs_rtmodify_summary(mp, tp,
- XFS_RTBLOCKLOG(postblock + 1 - preblock),
- XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
+error:
return error;
}
/*
- * Read and return the summary information for a given extent size,
- * bitmap block combination.
- * Keeps track of a current summary block, so we don't keep reading
- * it from the buffer cache.
- */
-STATIC int /* error */
-xfs_rtget_summary(
- xfs_mount_t *mp, /* file system mount structure */
- xfs_trans_t *tp, /* transaction pointer */
- int log, /* log2 of extent size */
- xfs_rtblock_t bbno, /* bitmap block number */
- xfs_buf_t **rbpp, /* in/out: summary block buffer */
- xfs_fsblock_t *rsb, /* in/out: summary block number */
- xfs_suminfo_t *sum) /* out: summary info for this block */
-{
- xfs_buf_t *bp; /* buffer for summary block */
- int error; /* error value */
- xfs_fsblock_t sb; /* summary fsblock */
- int so; /* index into the summary file */
- xfs_suminfo_t *sp; /* pointer to returned data */
-
- /*
- * Compute entry number in the summary file.
- */
- so = XFS_SUMOFFS(mp, log, bbno);
- /*
- * Compute the block number in the summary file.
- */
- sb = XFS_SUMOFFSTOBLOCK(mp, so);
- /*
- * If we have an old buffer, and the block number matches, use that.
- */
- if (rbpp && *rbpp && *rsb == sb)
- bp = *rbpp;
- /*
- * Otherwise we have to get the buffer.
- */
- else {
- /*
- * If there was an old one, get rid of it first.
- */
- if (rbpp && *rbpp)
- xfs_trans_brelse(tp, *rbpp);
- error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
- if (error) {
- return error;
- }
- /*
- * Remember this buffer and block for the next call.
- */
- if (rbpp) {
- *rbpp = bp;
- *rsb = sb;
- }
- }
- /*
- * Point to the summary information & copy it out.
- */
- sp = XFS_SUMPTR(mp, bp, so);
- *sum = *sp;
- /*
- * Drop the buffer if we're not asked to remember it.
- */
- if (!rbpp)
- xfs_trans_brelse(tp, bp);
- return 0;
-}
-
-/*
- * Set the given range of bitmap bits to the given value.
- * Do whatever I/O and logging is required.
- */
-STATIC int /* error */
-xfs_rtmodify_range(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t start, /* starting block to modify */
- xfs_extlen_t len, /* length of extent to modify */
- int val) /* 1 for free, 0 for allocated */
-{
- xfs_rtword_t *b; /* current word in buffer */
- int bit; /* bit number in the word */
- xfs_rtblock_t block; /* bitmap block number */
- xfs_buf_t *bp; /* buf for the block */
- xfs_rtword_t *bufp; /* starting word in buffer */
- int error; /* error value */
- xfs_rtword_t *first; /* first used word in the buffer */
- int i; /* current bit number rel. to start */
- int lastbit; /* last useful bit in word */
- xfs_rtword_t mask; /* mask o frelevant bits for value */
- int word; /* word number in the buffer */
-
- /*
- * Compute starting bitmap block number.
- */
- block = XFS_BITTOBLOCK(mp, start);
- /*
- * Read the bitmap block, and point to its data.
- */
- error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
- if (error) {
- return error;
- }
- bufp = bp->b_addr;
- /*
- * Compute the starting word's address, and starting bit.
- */
- word = XFS_BITTOWORD(mp, start);
- first = b = &bufp[word];
- bit = (int)(start & (XFS_NBWORD - 1));
- /*
- * 0 (allocated) => all zeroes; 1 (free) => all ones.
- */
- val = -val;
- /*
- * If not starting on a word boundary, deal with the first
- * (partial) word.
- */
- if (bit) {
- /*
- * Compute first bit not changed and mask of relevant bits.
- */
- lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
- mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
- /*
- * Set/clear the active bits.
- */
- if (val)
- *b |= mask;
- else
- *b &= ~mask;
- i = lastbit - bit;
- /*
- * Go on to the next block if that's where the next word is
- * and we need the next word.
- */
- if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
- /*
- * Log the changed part of this block.
- * Get the next one.
- */
- xfs_trans_log_buf(tp, bp,
- (uint)((char *)first - (char *)bufp),
- (uint)((char *)b - (char *)bufp));
- error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
- if (error) {
- return error;
- }
- first = b = bufp = bp->b_addr;
- word = 0;
- } else {
- /*
- * Go on to the next word in the buffer
- */
- b++;
- }
- } else {
- /*
- * Starting on a word boundary, no partial word.
- */
- i = 0;
- }
- /*
- * Loop over whole words in buffers. When we use up one buffer
- * we move on to the next one.
- */
- while (len - i >= XFS_NBWORD) {
- /*
- * Set the word value correctly.
- */
- *b = val;
- i += XFS_NBWORD;
- /*
- * Go on to the next block if that's where the next word is
- * and we need the next word.
- */
- if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
- /*
- * Log the changed part of this block.
- * Get the next one.
- */
- xfs_trans_log_buf(tp, bp,
- (uint)((char *)first - (char *)bufp),
- (uint)((char *)b - (char *)bufp));
- error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
- if (error) {
- return error;
- }
- first = b = bufp = bp->b_addr;
- word = 0;
- } else {
- /*
- * Go on to the next word in the buffer
- */
- b++;
- }
- }
- /*
- * If not ending on a word boundary, deal with the last
- * (partial) word.
- */
- if ((lastbit = len - i)) {
- /*
- * Compute a mask of relevant bits.
- */
- bit = 0;
- mask = ((xfs_rtword_t)1 << lastbit) - 1;
- /*
- * Set/clear the active bits.
- */
- if (val)
- *b |= mask;
- else
- *b &= ~mask;
- b++;
- }
- /*
- * Log any remaining changed bytes.
- */
- if (b > first)
- xfs_trans_log_buf(tp, bp, (uint)((char *)first - (char *)bufp),
- (uint)((char *)b - (char *)bufp - 1));
- return 0;
-}
-
-/*
- * Read and modify the summary information for a given extent size,
- * bitmap block combination.
- * Keeps track of a current summary block, so we don't keep reading
- * it from the buffer cache.
- */
-STATIC int /* error */
-xfs_rtmodify_summary(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- int log, /* log2 of extent size */
- xfs_rtblock_t bbno, /* bitmap block number */
- int delta, /* change to make to summary info */
- xfs_buf_t **rbpp, /* in/out: summary block buffer */
- xfs_fsblock_t *rsb) /* in/out: summary block number */
-{
- xfs_buf_t *bp; /* buffer for the summary block */
- int error; /* error value */
- xfs_fsblock_t sb; /* summary fsblock */
- int so; /* index into the summary file */
- xfs_suminfo_t *sp; /* pointer to returned data */
-
- /*
- * Compute entry number in the summary file.
- */
- so = XFS_SUMOFFS(mp, log, bbno);
- /*
- * Compute the block number in the summary file.
- */
- sb = XFS_SUMOFFSTOBLOCK(mp, so);
- /*
- * If we have an old buffer, and the block number matches, use that.
- */
- if (rbpp && *rbpp && *rsb == sb)
- bp = *rbpp;
- /*
- * Otherwise we have to get the buffer.
- */
- else {
- /*
- * If there was an old one, get rid of it first.
- */
- if (rbpp && *rbpp)
- xfs_trans_brelse(tp, *rbpp);
- error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
- if (error) {
- return error;
- }
- /*
- * Remember this buffer and block for the next call.
- */
- if (rbpp) {
- *rbpp = bp;
- *rsb = sb;
- }
- }
- /*
- * Point to the summary information, modify and log it.
- */
- sp = XFS_SUMPTR(mp, bp, so);
- *sp += delta;
- xfs_trans_log_buf(tp, bp, (uint)((char *)sp - (char *)bp->b_addr),
- (uint)((char *)sp - (char *)bp->b_addr + sizeof(*sp) - 1));
- return 0;
-}
-
-/*
* Visible (exported) functions.
*/
@@ -2129,66 +1231,6 @@ xfs_rtallocate_extent(
}
/*
- * Free an extent in the realtime subvolume. Length is expressed in
- * realtime extents, as is the block number.
- */
-int /* error */
-xfs_rtfree_extent(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_rtblock_t bno, /* starting block number to free */
- xfs_extlen_t len) /* length of extent freed */
-{
- int error; /* error value */
- xfs_mount_t *mp; /* file system mount structure */
- xfs_fsblock_t sb; /* summary file block number */
- xfs_buf_t *sumbp; /* summary file block buffer */
-
- mp = tp->t_mountp;
-
- ASSERT(mp->m_rbmip->i_itemp != NULL);
- ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
-
-#ifdef DEBUG
- /*
- * Check to see that this whole range is currently allocated.
- */
- {
- int stat; /* result from checking range */
-
- error = xfs_rtcheck_alloc_range(mp, tp, bno, len, &stat);
- if (error) {
- return error;
- }
- ASSERT(stat);
- }
-#endif
- sumbp = NULL;
- /*
- * Free the range of realtime blocks.
- */
- error = xfs_rtfree_range(mp, tp, bno, len, &sumbp, &sb);
- if (error) {
- return error;
- }
- /*
- * Mark more blocks free in the superblock.
- */
- xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, (long)len);
- /*
- * If we've now freed all the blocks, reset the file sequence
- * number to 0.
- */
- if (tp->t_frextents_delta + mp->m_sb.sb_frextents ==
- mp->m_sb.sb_rextents) {
- if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM))
- mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
- *(__uint64_t *)&mp->m_rbmip->i_d.di_atime = 0;
- xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
- }
- return 0;
-}
-
-/*
* Initialize realtime fields in the mount structure.
*/
int /* error */
diff --git a/fs/xfs/xfs_rtalloc.h b/fs/xfs/xfs_rtalloc.h
index b2a1a24c0e2f..752b63d10300 100644
--- a/fs/xfs/xfs_rtalloc.h
+++ b/fs/xfs/xfs_rtalloc.h
@@ -95,6 +95,30 @@ xfs_growfs_rt(
struct xfs_mount *mp, /* file system mount structure */
xfs_growfs_rt_t *in); /* user supplied growfs struct */
+/*
+ * From xfs_rtbitmap.c
+ */
+int xfs_rtbuf_get(struct xfs_mount *mp, struct xfs_trans *tp,
+ xfs_rtblock_t block, int issum, struct xfs_buf **bpp);
+int xfs_rtcheck_range(struct xfs_mount *mp, struct xfs_trans *tp,
+ xfs_rtblock_t start, xfs_extlen_t len, int val,
+ xfs_rtblock_t *new, int *stat);
+int xfs_rtfind_back(struct xfs_mount *mp, struct xfs_trans *tp,
+ xfs_rtblock_t start, xfs_rtblock_t limit,
+ xfs_rtblock_t *rtblock);
+int xfs_rtfind_forw(struct xfs_mount *mp, struct xfs_trans *tp,
+ xfs_rtblock_t start, xfs_rtblock_t limit,
+ xfs_rtblock_t *rtblock);
+int xfs_rtmodify_range(struct xfs_mount *mp, struct xfs_trans *tp,
+ xfs_rtblock_t start, xfs_extlen_t len, int val);
+int xfs_rtmodify_summary(struct xfs_mount *mp, struct xfs_trans *tp, int log,
+ xfs_rtblock_t bbno, int delta, xfs_buf_t **rbpp,
+ xfs_fsblock_t *rsb);
+int xfs_rtfree_range(struct xfs_mount *mp, struct xfs_trans *tp,
+ xfs_rtblock_t start, xfs_extlen_t len,
+ struct xfs_buf **rbpp, xfs_fsblock_t *rsb);
+
+
#else
# define xfs_rtallocate_extent(t,b,min,max,l,a,f,p,rb) (ENOSYS)
# define xfs_rtfree_extent(t,b,l) (ENOSYS)
diff --git a/fs/xfs/xfs_rtbitmap.c b/fs/xfs/xfs_rtbitmap.c
new file mode 100644
index 000000000000..b1f2fe8af4a8
--- /dev/null
+++ b/fs/xfs/xfs_rtbitmap.c
@@ -0,0 +1,974 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_error.h"
+#include "xfs_trans.h"
+#include "xfs_trans_space.h"
+#include "xfs_trace.h"
+#include "xfs_buf.h"
+#include "xfs_icache.h"
+#include "xfs_dinode.h"
+#include "xfs_rtalloc.h"
+
+
+/*
+ * Realtime allocator bitmap functions shared with userspace.
+ */
+
+/*
+ * Get a buffer for the bitmap or summary file block specified.
+ * The buffer is returned read and locked.
+ */
+int
+xfs_rtbuf_get(
+ xfs_mount_t *mp, /* file system mount structure */
+ xfs_trans_t *tp, /* transaction pointer */
+ xfs_rtblock_t block, /* block number in bitmap or summary */
+ int issum, /* is summary not bitmap */
+ xfs_buf_t **bpp) /* output: buffer for the block */
+{
+ xfs_buf_t *bp; /* block buffer, result */
+ xfs_inode_t *ip; /* bitmap or summary inode */
+ xfs_bmbt_irec_t map;
+ int nmap = 1;
+ int error; /* error value */
+
+ ip = issum ? mp->m_rsumip : mp->m_rbmip;
+
+ error = xfs_bmapi_read(ip, block, 1, &map, &nmap, XFS_DATA_FORK);
+ if (error)
+ return error;
+
+ ASSERT(map.br_startblock != NULLFSBLOCK);
+ error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
+ XFS_FSB_TO_DADDR(mp, map.br_startblock),
+ mp->m_bsize, 0, &bp, NULL);
+ if (error)
+ return error;
+ ASSERT(!xfs_buf_geterror(bp));
+ *bpp = bp;
+ return 0;
+}
+
+/*
+ * Searching backward from start to limit, find the first block whose
+ * allocated/free state is different from start's.
+ */
+int
+xfs_rtfind_back(
+ xfs_mount_t *mp, /* file system mount point */
+ xfs_trans_t *tp, /* transaction pointer */
+ xfs_rtblock_t start, /* starting block to look at */
+ xfs_rtblock_t limit, /* last block to look at */
+ xfs_rtblock_t *rtblock) /* out: start block found */
+{
+ xfs_rtword_t *b; /* current word in buffer */
+ int bit; /* bit number in the word */
+ xfs_rtblock_t block; /* bitmap block number */
+ xfs_buf_t *bp; /* buf for the block */
+ xfs_rtword_t *bufp; /* starting word in buffer */
+ int error; /* error value */
+ xfs_rtblock_t firstbit; /* first useful bit in the word */
+ xfs_rtblock_t i; /* current bit number rel. to start */
+ xfs_rtblock_t len; /* length of inspected area */
+ xfs_rtword_t mask; /* mask of relevant bits for value */
+ xfs_rtword_t want; /* mask for "good" values */
+ xfs_rtword_t wdiff; /* difference from wanted value */
+ int word; /* word number in the buffer */
+
+ /*
+ * Compute and read in starting bitmap block for starting block.
+ */
+ block = XFS_BITTOBLOCK(mp, start);
+ error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
+ if (error) {
+ return error;
+ }
+ bufp = bp->b_addr;
+ /*
+ * Get the first word's index & point to it.
+ */
+ word = XFS_BITTOWORD(mp, start);
+ b = &bufp[word];
+ bit = (int)(start & (XFS_NBWORD - 1));
+ len = start - limit + 1;
+ /*
+ * Compute match value, based on the bit at start: if 1 (free)
+ * then all-ones, else all-zeroes.
+ */
+ want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
+ /*
+ * If the starting position is not word-aligned, deal with the
+ * partial word.
+ */
+ if (bit < XFS_NBWORD - 1) {
+ /*
+ * Calculate first (leftmost) bit number to look at,
+ * and mask for all the relevant bits in this word.
+ */
+ firstbit = XFS_RTMAX((xfs_srtblock_t)(bit - len + 1), 0);
+ mask = (((xfs_rtword_t)1 << (bit - firstbit + 1)) - 1) <<
+ firstbit;
+ /*
+ * Calculate the difference between the value there
+ * and what we're looking for.
+ */
+ if ((wdiff = (*b ^ want) & mask)) {
+ /*
+ * Different. Mark where we are and return.
+ */
+ xfs_trans_brelse(tp, bp);
+ i = bit - XFS_RTHIBIT(wdiff);
+ *rtblock = start - i + 1;
+ return 0;
+ }
+ i = bit - firstbit + 1;
+ /*
+ * Go on to previous block if that's where the previous word is
+ * and we need the previous word.
+ */
+ if (--word == -1 && i < len) {
+ /*
+ * If done with this block, get the previous one.
+ */
+ xfs_trans_brelse(tp, bp);
+ error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
+ if (error) {
+ return error;
+ }
+ bufp = bp->b_addr;
+ word = XFS_BLOCKWMASK(mp);
+ b = &bufp[word];
+ } else {
+ /*
+ * Go on to the previous word in the buffer.
+ */
+ b--;
+ }
+ } else {
+ /*
+ * Starting on a word boundary, no partial word.
+ */
+ i = 0;
+ }
+ /*
+ * Loop over whole words in buffers. When we use up one buffer
+ * we move on to the previous one.
+ */
+ while (len - i >= XFS_NBWORD) {
+ /*
+ * Compute difference between actual and desired value.
+ */
+ if ((wdiff = *b ^ want)) {
+ /*
+ * Different, mark where we are and return.
+ */
+ xfs_trans_brelse(tp, bp);
+ i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
+ *rtblock = start - i + 1;
+ return 0;
+ }
+ i += XFS_NBWORD;
+ /*
+ * Go on to previous block if that's where the previous word is
+ * and we need the previous word.
+ */
+ if (--word == -1 && i < len) {
+ /*
+ * If done with this block, get the previous one.
+ */
+ xfs_trans_brelse(tp, bp);
+ error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
+ if (error) {
+ return error;
+ }
+ bufp = bp->b_addr;
+ word = XFS_BLOCKWMASK(mp);
+ b = &bufp[word];
+ } else {
+ /*
+ * Go on to the previous word in the buffer.
+ */
+ b--;
+ }
+ }
+ /*
+ * If not ending on a word boundary, deal with the last
+ * (partial) word.
+ */
+ if (len - i) {
+ /*
+ * Calculate first (leftmost) bit number to look at,
+ * and mask for all the relevant bits in this word.
+ */
+ firstbit = XFS_NBWORD - (len - i);
+ mask = (((xfs_rtword_t)1 << (len - i)) - 1) << firstbit;
+ /*
+ * Compute difference between actual and desired value.
+ */
+ if ((wdiff = (*b ^ want) & mask)) {
+ /*
+ * Different, mark where we are and return.
+ */
+ xfs_trans_brelse(tp, bp);
+ i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
+ *rtblock = start - i + 1;
+ return 0;
+ } else
+ i = len;
+ }
+ /*
+ * No match, return that we scanned the whole area.
+ */
+ xfs_trans_brelse(tp, bp);
+ *rtblock = start - i + 1;
+ return 0;
+}
+
+/*
+ * Searching forward from start to limit, find the first block whose
+ * allocated/free state is different from start's.
+ */
+int
+xfs_rtfind_forw(
+ xfs_mount_t *mp, /* file system mount point */
+ xfs_trans_t *tp, /* transaction pointer */
+ xfs_rtblock_t start, /* starting block to look at */
+ xfs_rtblock_t limit, /* last block to look at */
+ xfs_rtblock_t *rtblock) /* out: start block found */
+{
+ xfs_rtword_t *b; /* current word in buffer */
+ int bit; /* bit number in the word */
+ xfs_rtblock_t block; /* bitmap block number */
+ xfs_buf_t *bp; /* buf for the block */
+ xfs_rtword_t *bufp; /* starting word in buffer */
+ int error; /* error value */
+ xfs_rtblock_t i; /* current bit number rel. to start */
+ xfs_rtblock_t lastbit; /* last useful bit in the word */
+ xfs_rtblock_t len; /* length of inspected area */
+ xfs_rtword_t mask; /* mask of relevant bits for value */
+ xfs_rtword_t want; /* mask for "good" values */
+ xfs_rtword_t wdiff; /* difference from wanted value */
+ int word; /* word number in the buffer */
+
+ /*
+ * Compute and read in starting bitmap block for starting block.
+ */
+ block = XFS_BITTOBLOCK(mp, start);
+ error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
+ if (error) {
+ return error;
+ }
+ bufp = bp->b_addr;
+ /*
+ * Get the first word's index & point to it.
+ */
+ word = XFS_BITTOWORD(mp, start);
+ b = &bufp[word];
+ bit = (int)(start & (XFS_NBWORD - 1));
+ len = limit - start + 1;
+ /*
+ * Compute match value, based on the bit at start: if 1 (free)
+ * then all-ones, else all-zeroes.
+ */
+ want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
+ /*
+ * If the starting position is not word-aligned, deal with the
+ * partial word.
+ */
+ if (bit) {
+ /*
+ * Calculate last (rightmost) bit number to look at,
+ * and mask for all the relevant bits in this word.
+ */
+ lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
+ mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
+ /*
+ * Calculate the difference between the value there
+ * and what we're looking for.
+ */
+ if ((wdiff = (*b ^ want) & mask)) {
+ /*
+ * Different. Mark where we are and return.
+ */
+ xfs_trans_brelse(tp, bp);
+ i = XFS_RTLOBIT(wdiff) - bit;
+ *rtblock = start + i - 1;
+ return 0;
+ }
+ i = lastbit - bit;
+ /*
+ * Go on to next block if that's where the next word is
+ * and we need the next word.
+ */
+ if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+ /*
+ * If done with this block, get the previous one.
+ */
+ xfs_trans_brelse(tp, bp);
+ error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+ if (error) {
+ return error;
+ }
+ b = bufp = bp->b_addr;
+ word = 0;
+ } else {
+ /*
+ * Go on to the previous word in the buffer.
+ */
+ b++;
+ }
+ } else {
+ /*
+ * Starting on a word boundary, no partial word.
+ */
+ i = 0;
+ }
+ /*
+ * Loop over whole words in buffers. When we use up one buffer
+ * we move on to the next one.
+ */
+ while (len - i >= XFS_NBWORD) {
+ /*
+ * Compute difference between actual and desired value.
+ */
+ if ((wdiff = *b ^ want)) {
+ /*
+ * Different, mark where we are and return.
+ */
+ xfs_trans_brelse(tp, bp);
+ i += XFS_RTLOBIT(wdiff);
+ *rtblock = start + i - 1;
+ return 0;
+ }
+ i += XFS_NBWORD;
+ /*
+ * Go on to next block if that's where the next word is
+ * and we need the next word.
+ */
+ if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+ /*
+ * If done with this block, get the next one.
+ */
+ xfs_trans_brelse(tp, bp);
+ error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+ if (error) {
+ return error;
+ }
+ b = bufp = bp->b_addr;
+ word = 0;
+ } else {
+ /*
+ * Go on to the next word in the buffer.
+ */
+ b++;
+ }
+ }
+ /*
+ * If not ending on a word boundary, deal with the last
+ * (partial) word.
+ */
+ if ((lastbit = len - i)) {
+ /*
+ * Calculate mask for all the relevant bits in this word.
+ */
+ mask = ((xfs_rtword_t)1 << lastbit) - 1;
+ /*
+ * Compute difference between actual and desired value.
+ */
+ if ((wdiff = (*b ^ want) & mask)) {
+ /*
+ * Different, mark where we are and return.
+ */
+ xfs_trans_brelse(tp, bp);
+ i += XFS_RTLOBIT(wdiff);
+ *rtblock = start + i - 1;
+ return 0;
+ } else
+ i = len;
+ }
+ /*
+ * No match, return that we scanned the whole area.
+ */
+ xfs_trans_brelse(tp, bp);
+ *rtblock = start + i - 1;
+ return 0;
+}
+
+/*
+ * Read and modify the summary information for a given extent size,
+ * bitmap block combination.
+ * Keeps track of a current summary block, so we don't keep reading
+ * it from the buffer cache.
+ */
+int
+xfs_rtmodify_summary(
+ xfs_mount_t *mp, /* file system mount point */
+ xfs_trans_t *tp, /* transaction pointer */
+ int log, /* log2 of extent size */
+ xfs_rtblock_t bbno, /* bitmap block number */
+ int delta, /* change to make to summary info */
+ xfs_buf_t **rbpp, /* in/out: summary block buffer */
+ xfs_fsblock_t *rsb) /* in/out: summary block number */
+{
+ xfs_buf_t *bp; /* buffer for the summary block */
+ int error; /* error value */
+ xfs_fsblock_t sb; /* summary fsblock */
+ int so; /* index into the summary file */
+ xfs_suminfo_t *sp; /* pointer to returned data */
+
+ /*
+ * Compute entry number in the summary file.
+ */
+ so = XFS_SUMOFFS(mp, log, bbno);
+ /*
+ * Compute the block number in the summary file.
+ */
+ sb = XFS_SUMOFFSTOBLOCK(mp, so);
+ /*
+ * If we have an old buffer, and the block number matches, use that.
+ */
+ if (rbpp && *rbpp && *rsb == sb)
+ bp = *rbpp;
+ /*
+ * Otherwise we have to get the buffer.
+ */
+ else {
+ /*
+ * If there was an old one, get rid of it first.
+ */
+ if (rbpp && *rbpp)
+ xfs_trans_brelse(tp, *rbpp);
+ error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
+ if (error) {
+ return error;
+ }
+ /*
+ * Remember this buffer and block for the next call.
+ */
+ if (rbpp) {
+ *rbpp = bp;
+ *rsb = sb;
+ }
+ }
+ /*
+ * Point to the summary information, modify and log it.
+ */
+ sp = XFS_SUMPTR(mp, bp, so);
+ *sp += delta;
+ xfs_trans_log_buf(tp, bp, (uint)((char *)sp - (char *)bp->b_addr),
+ (uint)((char *)sp - (char *)bp->b_addr + sizeof(*sp) - 1));
+ return 0;
+}
+
+/*
+ * Set the given range of bitmap bits to the given value.
+ * Do whatever I/O and logging is required.
+ */
+int
+xfs_rtmodify_range(
+ xfs_mount_t *mp, /* file system mount point */
+ xfs_trans_t *tp, /* transaction pointer */
+ xfs_rtblock_t start, /* starting block to modify */
+ xfs_extlen_t len, /* length of extent to modify */
+ int val) /* 1 for free, 0 for allocated */
+{
+ xfs_rtword_t *b; /* current word in buffer */
+ int bit; /* bit number in the word */
+ xfs_rtblock_t block; /* bitmap block number */
+ xfs_buf_t *bp; /* buf for the block */
+ xfs_rtword_t *bufp; /* starting word in buffer */
+ int error; /* error value */
+ xfs_rtword_t *first; /* first used word in the buffer */
+ int i; /* current bit number rel. to start */
+ int lastbit; /* last useful bit in word */
+ xfs_rtword_t mask; /* mask o frelevant bits for value */
+ int word; /* word number in the buffer */
+
+ /*
+ * Compute starting bitmap block number.
+ */
+ block = XFS_BITTOBLOCK(mp, start);
+ /*
+ * Read the bitmap block, and point to its data.
+ */
+ error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
+ if (error) {
+ return error;
+ }
+ bufp = bp->b_addr;
+ /*
+ * Compute the starting word's address, and starting bit.
+ */
+ word = XFS_BITTOWORD(mp, start);
+ first = b = &bufp[word];
+ bit = (int)(start & (XFS_NBWORD - 1));
+ /*
+ * 0 (allocated) => all zeroes; 1 (free) => all ones.
+ */
+ val = -val;
+ /*
+ * If not starting on a word boundary, deal with the first
+ * (partial) word.
+ */
+ if (bit) {
+ /*
+ * Compute first bit not changed and mask of relevant bits.
+ */
+ lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
+ mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
+ /*
+ * Set/clear the active bits.
+ */
+ if (val)
+ *b |= mask;
+ else
+ *b &= ~mask;
+ i = lastbit - bit;
+ /*
+ * Go on to the next block if that's where the next word is
+ * and we need the next word.
+ */
+ if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+ /*
+ * Log the changed part of this block.
+ * Get the next one.
+ */
+ xfs_trans_log_buf(tp, bp,
+ (uint)((char *)first - (char *)bufp),
+ (uint)((char *)b - (char *)bufp));
+ error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+ if (error) {
+ return error;
+ }
+ first = b = bufp = bp->b_addr;
+ word = 0;
+ } else {
+ /*
+ * Go on to the next word in the buffer
+ */
+ b++;
+ }
+ } else {
+ /*
+ * Starting on a word boundary, no partial word.
+ */
+ i = 0;
+ }
+ /*
+ * Loop over whole words in buffers. When we use up one buffer
+ * we move on to the next one.
+ */
+ while (len - i >= XFS_NBWORD) {
+ /*
+ * Set the word value correctly.
+ */
+ *b = val;
+ i += XFS_NBWORD;
+ /*
+ * Go on to the next block if that's where the next word is
+ * and we need the next word.
+ */
+ if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+ /*
+ * Log the changed part of this block.
+ * Get the next one.
+ */
+ xfs_trans_log_buf(tp, bp,
+ (uint)((char *)first - (char *)bufp),
+ (uint)((char *)b - (char *)bufp));
+ error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+ if (error) {
+ return error;
+ }
+ first = b = bufp = bp->b_addr;
+ word = 0;
+ } else {
+ /*
+ * Go on to the next word in the buffer
+ */
+ b++;
+ }
+ }
+ /*
+ * If not ending on a word boundary, deal with the last
+ * (partial) word.
+ */
+ if ((lastbit = len - i)) {
+ /*
+ * Compute a mask of relevant bits.
+ */
+ bit = 0;
+ mask = ((xfs_rtword_t)1 << lastbit) - 1;
+ /*
+ * Set/clear the active bits.
+ */
+ if (val)
+ *b |= mask;
+ else
+ *b &= ~mask;
+ b++;
+ }
+ /*
+ * Log any remaining changed bytes.
+ */
+ if (b > first)
+ xfs_trans_log_buf(tp, bp, (uint)((char *)first - (char *)bufp),
+ (uint)((char *)b - (char *)bufp - 1));
+ return 0;
+}
+
+/*
+ * Mark an extent specified by start and len freed.
+ * Updates all the summary information as well as the bitmap.
+ */
+int
+xfs_rtfree_range(
+ xfs_mount_t *mp, /* file system mount point */
+ xfs_trans_t *tp, /* transaction pointer */
+ xfs_rtblock_t start, /* starting block to free */
+ xfs_extlen_t len, /* length to free */
+ xfs_buf_t **rbpp, /* in/out: summary block buffer */
+ xfs_fsblock_t *rsb) /* in/out: summary block number */
+{
+ xfs_rtblock_t end; /* end of the freed extent */
+ int error; /* error value */
+ xfs_rtblock_t postblock; /* first block freed > end */
+ xfs_rtblock_t preblock; /* first block freed < start */
+
+ end = start + len - 1;
+ /*
+ * Modify the bitmap to mark this extent freed.
+ */
+ error = xfs_rtmodify_range(mp, tp, start, len, 1);
+ if (error) {
+ return error;
+ }
+ /*
+ * Assume we're freeing out of the middle of an allocated extent.
+ * We need to find the beginning and end of the extent so we can
+ * properly update the summary.
+ */
+ error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
+ if (error) {
+ return error;
+ }
+ /*
+ * Find the next allocated block (end of allocated extent).
+ */
+ error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
+ &postblock);
+ if (error)
+ return error;
+ /*
+ * If there are blocks not being freed at the front of the
+ * old extent, add summary data for them to be allocated.
+ */
+ if (preblock < start) {
+ error = xfs_rtmodify_summary(mp, tp,
+ XFS_RTBLOCKLOG(start - preblock),
+ XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
+ if (error) {
+ return error;
+ }
+ }
+ /*
+ * If there are blocks not being freed at the end of the
+ * old extent, add summary data for them to be allocated.
+ */
+ if (postblock > end) {
+ error = xfs_rtmodify_summary(mp, tp,
+ XFS_RTBLOCKLOG(postblock - end),
+ XFS_BITTOBLOCK(mp, end + 1), -1, rbpp, rsb);
+ if (error) {
+ return error;
+ }
+ }
+ /*
+ * Increment the summary information corresponding to the entire
+ * (new) free extent.
+ */
+ error = xfs_rtmodify_summary(mp, tp,
+ XFS_RTBLOCKLOG(postblock + 1 - preblock),
+ XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
+ return error;
+}
+
+/*
+ * Check that the given range is either all allocated (val = 0) or
+ * all free (val = 1).
+ */
+int
+xfs_rtcheck_range(
+ xfs_mount_t *mp, /* file system mount point */
+ xfs_trans_t *tp, /* transaction pointer */
+ xfs_rtblock_t start, /* starting block number of extent */
+ xfs_extlen_t len, /* length of extent */
+ int val, /* 1 for free, 0 for allocated */
+ xfs_rtblock_t *new, /* out: first block not matching */
+ int *stat) /* out: 1 for matches, 0 for not */
+{
+ xfs_rtword_t *b; /* current word in buffer */
+ int bit; /* bit number in the word */
+ xfs_rtblock_t block; /* bitmap block number */
+ xfs_buf_t *bp; /* buf for the block */
+ xfs_rtword_t *bufp; /* starting word in buffer */
+ int error; /* error value */
+ xfs_rtblock_t i; /* current bit number rel. to start */
+ xfs_rtblock_t lastbit; /* last useful bit in word */
+ xfs_rtword_t mask; /* mask of relevant bits for value */
+ xfs_rtword_t wdiff; /* difference from wanted value */
+ int word; /* word number in the buffer */
+
+ /*
+ * Compute starting bitmap block number
+ */
+ block = XFS_BITTOBLOCK(mp, start);
+ /*
+ * Read the bitmap block.
+ */
+ error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
+ if (error) {
+ return error;
+ }
+ bufp = bp->b_addr;
+ /*
+ * Compute the starting word's address, and starting bit.
+ */
+ word = XFS_BITTOWORD(mp, start);
+ b = &bufp[word];
+ bit = (int)(start & (XFS_NBWORD - 1));
+ /*
+ * 0 (allocated) => all zero's; 1 (free) => all one's.
+ */
+ val = -val;
+ /*
+ * If not starting on a word boundary, deal with the first
+ * (partial) word.
+ */
+ if (bit) {
+ /*
+ * Compute first bit not examined.
+ */
+ lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
+ /*
+ * Mask of relevant bits.
+ */
+ mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
+ /*
+ * Compute difference between actual and desired value.
+ */
+ if ((wdiff = (*b ^ val) & mask)) {
+ /*
+ * Different, compute first wrong bit and return.
+ */
+ xfs_trans_brelse(tp, bp);
+ i = XFS_RTLOBIT(wdiff) - bit;
+ *new = start + i;
+ *stat = 0;
+ return 0;
+ }
+ i = lastbit - bit;
+ /*
+ * Go on to next block if that's where the next word is
+ * and we need the next word.
+ */
+ if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+ /*
+ * If done with this block, get the next one.
+ */
+ xfs_trans_brelse(tp, bp);
+ error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+ if (error) {
+ return error;
+ }
+ b = bufp = bp->b_addr;
+ word = 0;
+ } else {
+ /*
+ * Go on to the next word in the buffer.
+ */
+ b++;
+ }
+ } else {
+ /*
+ * Starting on a word boundary, no partial word.
+ */
+ i = 0;
+ }
+ /*
+ * Loop over whole words in buffers. When we use up one buffer
+ * we move on to the next one.
+ */
+ while (len - i >= XFS_NBWORD) {
+ /*
+ * Compute difference between actual and desired value.
+ */
+ if ((wdiff = *b ^ val)) {
+ /*
+ * Different, compute first wrong bit and return.
+ */
+ xfs_trans_brelse(tp, bp);
+ i += XFS_RTLOBIT(wdiff);
+ *new = start + i;
+ *stat = 0;
+ return 0;
+ }
+ i += XFS_NBWORD;
+ /*
+ * Go on to next block if that's where the next word is
+ * and we need the next word.
+ */
+ if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+ /*
+ * If done with this block, get the next one.
+ */
+ xfs_trans_brelse(tp, bp);
+ error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+ if (error) {
+ return error;
+ }
+ b = bufp = bp->b_addr;
+ word = 0;
+ } else {
+ /*
+ * Go on to the next word in the buffer.
+ */
+ b++;
+ }
+ }
+ /*
+ * If not ending on a word boundary, deal with the last
+ * (partial) word.
+ */
+ if ((lastbit = len - i)) {
+ /*
+ * Mask of relevant bits.
+ */
+ mask = ((xfs_rtword_t)1 << lastbit) - 1;
+ /*
+ * Compute difference between actual and desired value.
+ */
+ if ((wdiff = (*b ^ val) & mask)) {
+ /*
+ * Different, compute first wrong bit and return.
+ */
+ xfs_trans_brelse(tp, bp);
+ i += XFS_RTLOBIT(wdiff);
+ *new = start + i;
+ *stat = 0;
+ return 0;
+ } else
+ i = len;
+ }
+ /*
+ * Successful, return.
+ */
+ xfs_trans_brelse(tp, bp);
+ *new = start + i;
+ *stat = 1;
+ return 0;
+}
+
+#ifdef DEBUG
+/*
+ * Check that the given extent (block range) is allocated already.
+ */
+STATIC int /* error */
+xfs_rtcheck_alloc_range(
+ xfs_mount_t *mp, /* file system mount point */
+ xfs_trans_t *tp, /* transaction pointer */
+ xfs_rtblock_t bno, /* starting block number of extent */
+ xfs_extlen_t len) /* length of extent */
+{
+ xfs_rtblock_t new; /* dummy for xfs_rtcheck_range */
+ int stat;
+ int error;
+
+ error = xfs_rtcheck_range(mp, tp, bno, len, 0, &new, &stat);
+ if (error)
+ return error;
+ ASSERT(stat);
+ return 0;
+}
+#else
+#define xfs_rtcheck_alloc_range(m,t,b,l) (0)
+#endif
+/*
+ * Free an extent in the realtime subvolume. Length is expressed in
+ * realtime extents, as is the block number.
+ */
+int /* error */
+xfs_rtfree_extent(
+ xfs_trans_t *tp, /* transaction pointer */
+ xfs_rtblock_t bno, /* starting block number to free */
+ xfs_extlen_t len) /* length of extent freed */
+{
+ int error; /* error value */
+ xfs_mount_t *mp; /* file system mount structure */
+ xfs_fsblock_t sb; /* summary file block number */
+ xfs_buf_t *sumbp = NULL; /* summary file block buffer */
+
+ mp = tp->t_mountp;
+
+ ASSERT(mp->m_rbmip->i_itemp != NULL);
+ ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
+
+ error = xfs_rtcheck_alloc_range(mp, tp, bno, len);
+ if (error)
+ return error;
+
+ /*
+ * Free the range of realtime blocks.
+ */
+ error = xfs_rtfree_range(mp, tp, bno, len, &sumbp, &sb);
+ if (error) {
+ return error;
+ }
+ /*
+ * Mark more blocks free in the superblock.
+ */
+ xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, (long)len);
+ /*
+ * If we've now freed all the blocks, reset the file sequence
+ * number to 0.
+ */
+ if (tp->t_frextents_delta + mp->m_sb.sb_frextents ==
+ mp->m_sb.sb_rextents) {
+ if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM))
+ mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
+ *(__uint64_t *)&mp->m_rbmip->i_d.di_atime = 0;
+ xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
+ }
+ return 0;
+}
+
diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c
index a5b59d92eb70..b7c9aea77f8f 100644
--- a/fs/xfs/xfs_sb.c
+++ b/fs/xfs/xfs_sb.c
@@ -17,34 +17,26 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
-#include "xfs_dir2.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
-#include "xfs_btree.h"
#include "xfs_ialloc.h"
#include "xfs_alloc.h"
-#include "xfs_rtalloc.h"
-#include "xfs_bmap.h"
#include "xfs_error.h"
-#include "xfs_quota.h"
-#include "xfs_fsops.h"
#include "xfs_trace.h"
#include "xfs_cksum.h"
+#include "xfs_trans.h"
#include "xfs_buf_item.h"
+#include "xfs_dinode.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_ialloc_btree.h"
/*
* Physical superblock buffer manipulations. Shared with libxfs in userspace.
@@ -249,13 +241,13 @@ xfs_mount_validate_sb(
if (xfs_sb_version_has_pquotino(sbp)) {
if (sbp->sb_qflags & (XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD)) {
xfs_notice(mp,
- "Version 5 of Super block has XFS_OQUOTA bits.\n");
+ "Version 5 of Super block has XFS_OQUOTA bits.");
return XFS_ERROR(EFSCORRUPTED);
}
} else if (sbp->sb_qflags & (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD |
XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD)) {
xfs_notice(mp,
-"Superblock earlier than Version 5 has XFS_[PQ]UOTA_{ENFD|CHKD} bits.\n");
+"Superblock earlier than Version 5 has XFS_[PQ]UOTA_{ENFD|CHKD} bits.");
return XFS_ERROR(EFSCORRUPTED);
}
@@ -596,6 +588,11 @@ xfs_sb_verify(
* single bit error could clear the feature bit and unused parts of the
* superblock are supposed to be zero. Hence a non-null crc field indicates that
* we've potentially lost a feature bit and we should check it anyway.
+ *
+ * However, past bugs (i.e. in growfs) left non-zeroed regions beyond the
+ * last field in V4 secondary superblocks. So for secondary superblocks,
+ * we are more forgiving, and ignore CRC failures if the primary doesn't
+ * indicate that the fs version is V5.
*/
static void
xfs_sb_read_verify(
@@ -616,16 +613,21 @@ xfs_sb_read_verify(
if (!xfs_verify_cksum(bp->b_addr, be16_to_cpu(dsb->sb_sectsize),
offsetof(struct xfs_sb, sb_crc))) {
- error = EFSCORRUPTED;
- goto out_error;
+ /* Only fail bad secondaries on a known V5 filesystem */
+ if (bp->b_bn != XFS_SB_DADDR &&
+ xfs_sb_version_hascrc(&mp->m_sb)) {
+ error = EFSCORRUPTED;
+ goto out_error;
+ }
}
}
error = xfs_sb_verify(bp, true);
out_error:
if (error) {
- XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
- mp, bp->b_addr);
+ if (error != EWRONGFS)
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
+ mp, bp->b_addr);
xfs_buf_ioerror(bp, error);
}
}
diff --git a/fs/xfs/xfs_sb.h b/fs/xfs/xfs_sb.h
index 6835b44f850e..35061d4b614c 100644
--- a/fs/xfs/xfs_sb.h
+++ b/fs/xfs/xfs_sb.h
@@ -699,7 +699,4 @@ extern void xfs_sb_from_disk(struct xfs_sb *, struct xfs_dsb *);
extern void xfs_sb_to_disk(struct xfs_dsb *, struct xfs_sb *, __int64_t);
extern void xfs_sb_quota_from_disk(struct xfs_sb *sbp);
-extern const struct xfs_buf_ops xfs_sb_buf_ops;
-extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops;
-
#endif /* __XFS_SB_H__ */
diff --git a/fs/xfs/xfs_shared.h b/fs/xfs/xfs_shared.h
new file mode 100644
index 000000000000..8c5035a13df1
--- /dev/null
+++ b/fs/xfs/xfs_shared.h
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * Copyright (c) 2013 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef __XFS_SHARED_H__
+#define __XFS_SHARED_H__
+
+/*
+ * Definitions shared between kernel and userspace that don't fit into any other
+ * header file that is shared with userspace.
+ */
+struct xfs_ifork;
+struct xfs_buf;
+struct xfs_buf_ops;
+struct xfs_mount;
+struct xfs_trans;
+struct xfs_inode;
+
+/*
+ * Buffer verifier operations are widely used, including userspace tools
+ */
+extern const struct xfs_buf_ops xfs_agf_buf_ops;
+extern const struct xfs_buf_ops xfs_agi_buf_ops;
+extern const struct xfs_buf_ops xfs_agf_buf_ops;
+extern const struct xfs_buf_ops xfs_agfl_buf_ops;
+extern const struct xfs_buf_ops xfs_allocbt_buf_ops;
+extern const struct xfs_buf_ops xfs_attr3_leaf_buf_ops;
+extern const struct xfs_buf_ops xfs_attr3_rmt_buf_ops;
+extern const struct xfs_buf_ops xfs_bmbt_buf_ops;
+extern const struct xfs_buf_ops xfs_da3_node_buf_ops;
+extern const struct xfs_buf_ops xfs_dquot_buf_ops;
+extern const struct xfs_buf_ops xfs_symlink_buf_ops;
+extern const struct xfs_buf_ops xfs_agi_buf_ops;
+extern const struct xfs_buf_ops xfs_inobt_buf_ops;
+extern const struct xfs_buf_ops xfs_inode_buf_ops;
+extern const struct xfs_buf_ops xfs_inode_buf_ra_ops;
+extern const struct xfs_buf_ops xfs_dquot_buf_ops;
+extern const struct xfs_buf_ops xfs_sb_buf_ops;
+extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops;
+extern const struct xfs_buf_ops xfs_symlink_buf_ops;
+
+/*
+ * Transaction types. Used to distinguish types of buffers. These never reach
+ * the log.
+ */
+#define XFS_TRANS_SETATTR_NOT_SIZE 1
+#define XFS_TRANS_SETATTR_SIZE 2
+#define XFS_TRANS_INACTIVE 3
+#define XFS_TRANS_CREATE 4
+#define XFS_TRANS_CREATE_TRUNC 5
+#define XFS_TRANS_TRUNCATE_FILE 6
+#define XFS_TRANS_REMOVE 7
+#define XFS_TRANS_LINK 8
+#define XFS_TRANS_RENAME 9
+#define XFS_TRANS_MKDIR 10
+#define XFS_TRANS_RMDIR 11
+#define XFS_TRANS_SYMLINK 12
+#define XFS_TRANS_SET_DMATTRS 13
+#define XFS_TRANS_GROWFS 14
+#define XFS_TRANS_STRAT_WRITE 15
+#define XFS_TRANS_DIOSTRAT 16
+/* 17 was XFS_TRANS_WRITE_SYNC */
+#define XFS_TRANS_WRITEID 18
+#define XFS_TRANS_ADDAFORK 19
+#define XFS_TRANS_ATTRINVAL 20
+#define XFS_TRANS_ATRUNCATE 21
+#define XFS_TRANS_ATTR_SET 22
+#define XFS_TRANS_ATTR_RM 23
+#define XFS_TRANS_ATTR_FLAG 24
+#define XFS_TRANS_CLEAR_AGI_BUCKET 25
+#define XFS_TRANS_QM_SBCHANGE 26
+/*
+ * Dummy entries since we use the transaction type to index into the
+ * trans_type[] in xlog_recover_print_trans_head()
+ */
+#define XFS_TRANS_DUMMY1 27
+#define XFS_TRANS_DUMMY2 28
+#define XFS_TRANS_QM_QUOTAOFF 29
+#define XFS_TRANS_QM_DQALLOC 30
+#define XFS_TRANS_QM_SETQLIM 31
+#define XFS_TRANS_QM_DQCLUSTER 32
+#define XFS_TRANS_QM_QINOCREATE 33
+#define XFS_TRANS_QM_QUOTAOFF_END 34
+#define XFS_TRANS_SB_UNIT 35
+#define XFS_TRANS_FSYNC_TS 36
+#define XFS_TRANS_GROWFSRT_ALLOC 37
+#define XFS_TRANS_GROWFSRT_ZERO 38
+#define XFS_TRANS_GROWFSRT_FREE 39
+#define XFS_TRANS_SWAPEXT 40
+#define XFS_TRANS_SB_COUNT 41
+#define XFS_TRANS_CHECKPOINT 42
+#define XFS_TRANS_ICREATE 43
+#define XFS_TRANS_TYPE_MAX 43
+/* new transaction types need to be reflected in xfs_logprint(8) */
+
+#define XFS_TRANS_TYPES \
+ { XFS_TRANS_SETATTR_NOT_SIZE, "SETATTR_NOT_SIZE" }, \
+ { XFS_TRANS_SETATTR_SIZE, "SETATTR_SIZE" }, \
+ { XFS_TRANS_INACTIVE, "INACTIVE" }, \
+ { XFS_TRANS_CREATE, "CREATE" }, \
+ { XFS_TRANS_CREATE_TRUNC, "CREATE_TRUNC" }, \
+ { XFS_TRANS_TRUNCATE_FILE, "TRUNCATE_FILE" }, \
+ { XFS_TRANS_REMOVE, "REMOVE" }, \
+ { XFS_TRANS_LINK, "LINK" }, \
+ { XFS_TRANS_RENAME, "RENAME" }, \
+ { XFS_TRANS_MKDIR, "MKDIR" }, \
+ { XFS_TRANS_RMDIR, "RMDIR" }, \
+ { XFS_TRANS_SYMLINK, "SYMLINK" }, \
+ { XFS_TRANS_SET_DMATTRS, "SET_DMATTRS" }, \
+ { XFS_TRANS_GROWFS, "GROWFS" }, \
+ { XFS_TRANS_STRAT_WRITE, "STRAT_WRITE" }, \
+ { XFS_TRANS_DIOSTRAT, "DIOSTRAT" }, \
+ { XFS_TRANS_WRITEID, "WRITEID" }, \
+ { XFS_TRANS_ADDAFORK, "ADDAFORK" }, \
+ { XFS_TRANS_ATTRINVAL, "ATTRINVAL" }, \
+ { XFS_TRANS_ATRUNCATE, "ATRUNCATE" }, \
+ { XFS_TRANS_ATTR_SET, "ATTR_SET" }, \
+ { XFS_TRANS_ATTR_RM, "ATTR_RM" }, \
+ { XFS_TRANS_ATTR_FLAG, "ATTR_FLAG" }, \
+ { XFS_TRANS_CLEAR_AGI_BUCKET, "CLEAR_AGI_BUCKET" }, \
+ { XFS_TRANS_QM_SBCHANGE, "QM_SBCHANGE" }, \
+ { XFS_TRANS_QM_QUOTAOFF, "QM_QUOTAOFF" }, \
+ { XFS_TRANS_QM_DQALLOC, "QM_DQALLOC" }, \
+ { XFS_TRANS_QM_SETQLIM, "QM_SETQLIM" }, \
+ { XFS_TRANS_QM_DQCLUSTER, "QM_DQCLUSTER" }, \
+ { XFS_TRANS_QM_QINOCREATE, "QM_QINOCREATE" }, \
+ { XFS_TRANS_QM_QUOTAOFF_END, "QM_QOFF_END" }, \
+ { XFS_TRANS_SB_UNIT, "SB_UNIT" }, \
+ { XFS_TRANS_FSYNC_TS, "FSYNC_TS" }, \
+ { XFS_TRANS_GROWFSRT_ALLOC, "GROWFSRT_ALLOC" }, \
+ { XFS_TRANS_GROWFSRT_ZERO, "GROWFSRT_ZERO" }, \
+ { XFS_TRANS_GROWFSRT_FREE, "GROWFSRT_FREE" }, \
+ { XFS_TRANS_SWAPEXT, "SWAPEXT" }, \
+ { XFS_TRANS_SB_COUNT, "SB_COUNT" }, \
+ { XFS_TRANS_CHECKPOINT, "CHECKPOINT" }, \
+ { XFS_TRANS_DUMMY1, "DUMMY1" }, \
+ { XFS_TRANS_DUMMY2, "DUMMY2" }, \
+ { XLOG_UNMOUNT_REC_TYPE, "UNMOUNT" }
+
+/*
+ * This structure is used to track log items associated with
+ * a transaction. It points to the log item and keeps some
+ * flags to track the state of the log item. It also tracks
+ * the amount of space needed to log the item it describes
+ * once we get to commit processing (see xfs_trans_commit()).
+ */
+struct xfs_log_item_desc {
+ struct xfs_log_item *lid_item;
+ struct list_head lid_trans;
+ unsigned char lid_flags;
+};
+
+#define XFS_LID_DIRTY 0x1
+
+/* log size calculation functions */
+int xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes);
+int xfs_log_calc_minimum_size(struct xfs_mount *);
+
+
+/*
+ * Values for t_flags.
+ */
+#define XFS_TRANS_DIRTY 0x01 /* something needs to be logged */
+#define XFS_TRANS_SB_DIRTY 0x02 /* superblock is modified */
+#define XFS_TRANS_PERM_LOG_RES 0x04 /* xact took a permanent log res */
+#define XFS_TRANS_SYNC 0x08 /* make commit synchronous */
+#define XFS_TRANS_DQ_DIRTY 0x10 /* at least one dquot in trx dirty */
+#define XFS_TRANS_RESERVE 0x20 /* OK to use reserved data blocks */
+#define XFS_TRANS_FREEZE_PROT 0x40 /* Transaction has elevated writer
+ count in superblock */
+/*
+ * Values for call flags parameter.
+ */
+#define XFS_TRANS_RELEASE_LOG_RES 0x4
+#define XFS_TRANS_ABORT 0x8
+
+/*
+ * Field values for xfs_trans_mod_sb.
+ */
+#define XFS_TRANS_SB_ICOUNT 0x00000001
+#define XFS_TRANS_SB_IFREE 0x00000002
+#define XFS_TRANS_SB_FDBLOCKS 0x00000004
+#define XFS_TRANS_SB_RES_FDBLOCKS 0x00000008
+#define XFS_TRANS_SB_FREXTENTS 0x00000010
+#define XFS_TRANS_SB_RES_FREXTENTS 0x00000020
+#define XFS_TRANS_SB_DBLOCKS 0x00000040
+#define XFS_TRANS_SB_AGCOUNT 0x00000080
+#define XFS_TRANS_SB_IMAXPCT 0x00000100
+#define XFS_TRANS_SB_REXTSIZE 0x00000200
+#define XFS_TRANS_SB_RBMBLOCKS 0x00000400
+#define XFS_TRANS_SB_RBLOCKS 0x00000800
+#define XFS_TRANS_SB_REXTENTS 0x00001000
+#define XFS_TRANS_SB_REXTSLOG 0x00002000
+
+/*
+ * Here we centralize the specification of XFS meta-data buffer reference count
+ * values. This determines how hard the buffer cache tries to hold onto the
+ * buffer.
+ */
+#define XFS_AGF_REF 4
+#define XFS_AGI_REF 4
+#define XFS_AGFL_REF 3
+#define XFS_INO_BTREE_REF 3
+#define XFS_ALLOC_BTREE_REF 2
+#define XFS_BMAP_BTREE_REF 2
+#define XFS_DIR_BTREE_REF 2
+#define XFS_INO_REF 2
+#define XFS_ATTR_BTREE_REF 1
+#define XFS_DQUOT_REF 1
+
+/*
+ * Flags for xfs_trans_ichgtime().
+ */
+#define XFS_ICHGTIME_MOD 0x1 /* data fork modification timestamp */
+#define XFS_ICHGTIME_CHG 0x2 /* inode field change timestamp */
+#define XFS_ICHGTIME_CREATE 0x4 /* inode create timestamp */
+
+
+/*
+ * Symlink decoding/encoding functions
+ */
+int xfs_symlink_blocks(struct xfs_mount *mp, int pathlen);
+int xfs_symlink_hdr_set(struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset,
+ uint32_t size, struct xfs_buf *bp);
+bool xfs_symlink_hdr_ok(struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset,
+ uint32_t size, struct xfs_buf *bp);
+void xfs_symlink_local_to_remote(struct xfs_trans *tp, struct xfs_buf *bp,
+ struct xfs_inode *ip, struct xfs_ifork *ifp);
+
+#endif /* __XFS_SHARED_H__ */
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 15188cc99449..d971f4932b5d 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -17,34 +17,26 @@
*/
#include "xfs.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
-#include "xfs_log.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_inum.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
-#include "xfs_ialloc.h"
#include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
+#include "xfs_alloc.h"
#include "xfs_error.h"
-#include "xfs_itable.h"
#include "xfs_fsops.h"
-#include "xfs_attr.h"
+#include "xfs_trans.h"
#include "xfs_buf_item.h"
+#include "xfs_log.h"
#include "xfs_log_priv.h"
-#include "xfs_trans_priv.h"
-#include "xfs_filestream.h"
#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
#include "xfs_dir2.h"
#include "xfs_extfree_item.h"
#include "xfs_mru_cache.h"
@@ -52,6 +44,9 @@
#include "xfs_icache.h"
#include "xfs_trace.h"
#include "xfs_icreate_item.h"
+#include "xfs_dinode.h"
+#include "xfs_filestream.h"
+#include "xfs_quota.h"
#include <linux/namei.h>
#include <linux/init.h>
@@ -946,10 +941,6 @@ xfs_fs_destroy_inode(
XFS_STATS_INC(vn_reclaim);
- /* bad inode, get out here ASAP */
- if (is_bad_inode(inode))
- goto out_reclaim;
-
ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
/*
@@ -965,7 +956,6 @@ xfs_fs_destroy_inode(
* this more efficiently than we can here, so simply let background
* reclaim tear down all inodes.
*/
-out_reclaim:
xfs_inode_set_reclaim_tag(ip);
}
@@ -1165,7 +1155,7 @@ xfs_restore_resvblks(struct xfs_mount *mp)
* Note: xfs_log_quiesce() stops background log work - the callers must ensure
* it is started again when appropriate.
*/
-void
+static void
xfs_quiesce_attr(
struct xfs_mount *mp)
{
@@ -1246,7 +1236,7 @@ xfs_fs_remount(
*/
#if 0
xfs_info(mp,
- "mount option \"%s\" not supported for remount\n", p);
+ "mount option \"%s\" not supported for remount", p);
return -EINVAL;
#else
break;
@@ -1491,10 +1481,6 @@ xfs_fs_fill_super(
error = ENOENT;
goto out_unmount;
}
- if (is_bad_inode(root)) {
- error = EINVAL;
- goto out_unmount;
- }
sb->s_root = d_make_root(root);
if (!sb->s_root) {
error = ENOMEM;
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index f622a97a7e33..14e58f2c96bd 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -17,31 +17,31 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
+#include "xfs_shared.h"
#include "xfs_fs.h"
#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
+#include "xfs_da_format.h"
#include "xfs_dir2.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_ialloc.h"
#include "xfs_alloc.h"
#include "xfs_bmap.h"
+#include "xfs_bmap_btree.h"
#include "xfs_bmap_util.h"
#include "xfs_error.h"
#include "xfs_quota.h"
#include "xfs_trans_space.h"
#include "xfs_trace.h"
#include "xfs_symlink.h"
-#include "xfs_buf_item.h"
+#include "xfs_trans.h"
+#include "xfs_log.h"
+#include "xfs_dinode.h"
/* ----- Kernel only functions below ----- */
STATIC int
@@ -424,8 +424,7 @@ xfs_symlink(
*/
STATIC int
xfs_inactive_symlink_rmt(
- xfs_inode_t *ip,
- xfs_trans_t **tpp)
+ struct xfs_inode *ip)
{
xfs_buf_t *bp;
int committed;
@@ -437,11 +436,9 @@ xfs_inactive_symlink_rmt(
xfs_mount_t *mp;
xfs_bmbt_irec_t mval[XFS_SYMLINK_MAPS];
int nmaps;
- xfs_trans_t *ntp;
int size;
xfs_trans_t *tp;
- tp = *tpp;
mp = ip->i_mount;
ASSERT(ip->i_df.if_flags & XFS_IFEXTENTS);
/*
@@ -453,6 +450,16 @@ xfs_inactive_symlink_rmt(
*/
ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2);
+ tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
+ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ return error;
+ }
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, 0);
+
/*
* Lock the inode, fix the size, and join it to the transaction.
* Hold it so in the normal path, we still have it locked for
@@ -471,7 +478,7 @@ xfs_inactive_symlink_rmt(
error = xfs_bmapi_read(ip, 0, xfs_symlink_blocks(mp, size),
mval, &nmaps, 0);
if (error)
- goto error0;
+ goto error_trans_cancel;
/*
* Invalidate the block(s). No validation is done.
*/
@@ -481,22 +488,24 @@ xfs_inactive_symlink_rmt(
XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0);
if (!bp) {
error = ENOMEM;
- goto error1;
+ goto error_bmap_cancel;
}
xfs_trans_binval(tp, bp);
}
/*
* Unmap the dead block(s) to the free_list.
*/
- if ((error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps,
- &first_block, &free_list, &done)))
- goto error1;
+ error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps,
+ &first_block, &free_list, &done);
+ if (error)
+ goto error_bmap_cancel;
ASSERT(done);
/*
* Commit the first transaction. This logs the EFI and the inode.
*/
- if ((error = xfs_bmap_finish(&tp, &free_list, &committed)))
- goto error1;
+ error = xfs_bmap_finish(&tp, &free_list, &committed);
+ if (error)
+ goto error_bmap_cancel;
/*
* The transaction must have been committed, since there were
* actually extents freed by xfs_bunmapi. See xfs_bmap_finish.
@@ -511,26 +520,13 @@ xfs_inactive_symlink_rmt(
xfs_trans_ijoin(tp, ip, 0);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
/*
- * Get a new, empty transaction to return to our caller.
- */
- ntp = xfs_trans_dup(tp);
- /*
* Commit the transaction containing extent freeing and EFDs.
- * If we get an error on the commit here or on the reserve below,
- * we need to unlock the inode since the new transaction doesn't
- * have the inode attached.
*/
- error = xfs_trans_commit(tp, 0);
- tp = ntp;
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
if (error) {
ASSERT(XFS_FORCED_SHUTDOWN(mp));
- goto error0;
+ goto error_unlock;
}
- /*
- * transaction commit worked ok so we can drop the extra ticket
- * reference that we gained in xfs_trans_dup()
- */
- xfs_log_ticket_put(tp->t_ticket);
/*
* Remove the memory for extent descriptions (just bookkeeping).
@@ -538,23 +534,16 @@ xfs_inactive_symlink_rmt(
if (ip->i_df.if_bytes)
xfs_idata_realloc(ip, -ip->i_df.if_bytes, XFS_DATA_FORK);
ASSERT(ip->i_df.if_bytes == 0);
- /*
- * Put an itruncate log reservation in the new transaction
- * for our caller.
- */
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
- if (error) {
- ASSERT(XFS_FORCED_SHUTDOWN(mp));
- goto error0;
- }
- xfs_trans_ijoin(tp, ip, 0);
- *tpp = tp;
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
return 0;
- error1:
+error_bmap_cancel:
xfs_bmap_cancel(&free_list);
- error0:
+error_trans_cancel:
+ xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+error_unlock:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
@@ -563,41 +552,46 @@ xfs_inactive_symlink_rmt(
*/
int
xfs_inactive_symlink(
- struct xfs_inode *ip,
- struct xfs_trans **tp)
+ struct xfs_inode *ip)
{
struct xfs_mount *mp = ip->i_mount;
int pathlen;
trace_xfs_inactive_symlink(ip);
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
-
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+
/*
* Zero length symlinks _can_ exist.
*/
pathlen = (int)ip->i_d.di_size;
- if (!pathlen)
+ if (!pathlen) {
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
return 0;
+ }
if (pathlen < 0 || pathlen > MAXPATHLEN) {
xfs_alert(mp, "%s: inode (0x%llx) bad symlink length (%d)",
__func__, (unsigned long long)ip->i_ino, pathlen);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
ASSERT(0);
return XFS_ERROR(EFSCORRUPTED);
}
if (ip->i_df.if_flags & XFS_IFINLINE) {
- if (ip->i_df.if_bytes > 0)
+ if (ip->i_df.if_bytes > 0)
xfs_idata_realloc(ip, -(ip->i_df.if_bytes),
XFS_DATA_FORK);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
ASSERT(ip->i_df.if_bytes == 0);
return 0;
}
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
/* remove the remote symlink */
- return xfs_inactive_symlink_rmt(ip, tp);
+ return xfs_inactive_symlink_rmt(ip);
}
diff --git a/fs/xfs/xfs_symlink.h b/fs/xfs/xfs_symlink.h
index 99338ba666ac..e75245d09116 100644
--- a/fs/xfs/xfs_symlink.h
+++ b/fs/xfs/xfs_symlink.h
@@ -22,6 +22,6 @@
int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
const char *target_path, umode_t mode, struct xfs_inode **ipp);
int xfs_readlink(struct xfs_inode *ip, char *link);
-int xfs_inactive_symlink(struct xfs_inode *ip, struct xfs_trans **tpp);
+int xfs_inactive_symlink(struct xfs_inode *ip);
#endif /* __XFS_SYMLINK_H */
diff --git a/fs/xfs/xfs_symlink_remote.c b/fs/xfs/xfs_symlink_remote.c
index 01c85e3f6470..bf59a2b45f8c 100644
--- a/fs/xfs/xfs_symlink_remote.c
+++ b/fs/xfs/xfs_symlink_remote.c
@@ -19,8 +19,9 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_shared.h"
+#include "xfs_trans_resv.h"
#include "xfs_ag.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
@@ -30,6 +31,7 @@
#include "xfs_trace.h"
#include "xfs_symlink.h"
#include "xfs_cksum.h"
+#include "xfs_trans.h"
#include "xfs_buf_item.h"
diff --git a/fs/xfs/xfs_trace.c b/fs/xfs/xfs_trace.c
index 5d7b3e40705f..dee3279c095e 100644
--- a/fs/xfs/xfs_trace.c
+++ b/fs/xfs/xfs_trace.c
@@ -17,19 +17,16 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_mount.h"
+#include "xfs_da_format.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
-#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_ialloc.h"
#include "xfs_itable.h"
@@ -37,6 +34,8 @@
#include "xfs_bmap.h"
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
+#include "xfs_trans.h"
+#include "xfs_log.h"
#include "xfs_log_priv.h"
#include "xfs_buf_item.h"
#include "xfs_quota.h"
@@ -46,6 +45,7 @@
#include "xfs_dquot.h"
#include "xfs_log_recover.h"
#include "xfs_inode_item.h"
+#include "xfs_bmap_btree.h"
/*
* We include this last to have the helpers above available for the trace
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 5411e01ab452..c812c5c060de 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -18,32 +18,21 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_error.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
-#include "xfs_alloc.h"
#include "xfs_extent_busy.h"
-#include "xfs_bmap.h"
#include "xfs_quota.h"
-#include "xfs_qm.h"
+#include "xfs_trans.h"
#include "xfs_trans_priv.h"
-#include "xfs_trans_space.h"
-#include "xfs_inode_item.h"
-#include "xfs_log_priv.h"
-#include "xfs_buf_item.h"
+#include "xfs_log.h"
#include "xfs_trace.h"
+#include "xfs_error.h"
kmem_zone_t *xfs_trans_zone;
kmem_zone_t *xfs_log_item_desc_zone;
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 09cf40b89e8c..9b96d35e483d 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -18,10 +18,6 @@
#ifndef __XFS_TRANS_H__
#define __XFS_TRANS_H__
-struct xfs_log_item;
-
-#include "xfs_trans_resv.h"
-
/* kernel only transaction subsystem defines */
struct xfs_buf;
@@ -77,6 +73,9 @@ struct xfs_item_ops {
void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
};
+void xfs_log_item_init(struct xfs_mount *mp, struct xfs_log_item *item,
+ int type, const struct xfs_item_ops *ops);
+
/*
* Return values for the iop_push() routines.
*/
@@ -85,18 +84,12 @@ struct xfs_item_ops {
#define XFS_ITEM_LOCKED 2
#define XFS_ITEM_FLUSHING 3
-/*
- * This is the type of function which can be given to xfs_trans_callback()
- * to be called upon the transaction's commit to disk.
- */
-typedef void (*xfs_trans_callback_t)(struct xfs_trans *, void *);
/*
* This is the structure maintained for every active transaction.
*/
typedef struct xfs_trans {
unsigned int t_magic; /* magic number */
- xfs_log_callback_t t_logcb; /* log callback struct */
unsigned int t_type; /* transaction type */
unsigned int t_log_res; /* amt of log space resvd */
unsigned int t_log_count; /* count for perm log res */
@@ -132,7 +125,6 @@ typedef struct xfs_trans {
int64_t t_rextents_delta;/* superblocks rextents chg */
int64_t t_rextslog_delta;/* superblocks rextslog chg */
struct list_head t_items; /* log item descriptors */
- xfs_trans_header_t t_header; /* header for in-log trans */
struct list_head t_busy; /* list of busy extents */
unsigned long t_pflags; /* saved process flags state */
} xfs_trans_t;
@@ -237,10 +229,16 @@ void xfs_trans_log_efd_extent(xfs_trans_t *,
xfs_fsblock_t,
xfs_extlen_t);
int xfs_trans_commit(xfs_trans_t *, uint flags);
+int xfs_trans_roll(struct xfs_trans **, struct xfs_inode *);
void xfs_trans_cancel(xfs_trans_t *, int);
int xfs_trans_ail_init(struct xfs_mount *);
void xfs_trans_ail_destroy(struct xfs_mount *);
+void xfs_trans_buf_set_type(struct xfs_trans *, struct xfs_buf *,
+ enum xfs_blft);
+void xfs_trans_buf_copy_type(struct xfs_buf *dst_bp,
+ struct xfs_buf *src_bp);
+
extern kmem_zone_t *xfs_trans_zone;
extern kmem_zone_t *xfs_log_item_desc_zone;
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 21c6d7ddbc06..4b47cfebd25b 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -18,15 +18,16 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
+#include "xfs_trans.h"
#include "xfs_trans_priv.h"
#include "xfs_trace.h"
#include "xfs_error.h"
+#include "xfs_log.h"
#ifdef DEBUG
/*
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 8c75b8f67270..c035d11b7734 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -17,17 +17,15 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
+#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_trans_priv.h"
#include "xfs_error.h"
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
index 54ee3c5dee76..cd2a10e15d3a 100644
--- a/fs/xfs/xfs_trans_dquot.c
+++ b/fs/xfs/xfs_trans_dquot.c
@@ -17,23 +17,18 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
-#include "xfs_itable.h"
-#include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
#include "xfs_error.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
+#include "xfs_trans.h"
#include "xfs_trans_priv.h"
+#include "xfs_quota.h"
#include "xfs_qm.h"
STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *);
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
index 8d71b16eccae..47978ba89dae 100644
--- a/fs/xfs/xfs_trans_extfree.c
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -17,12 +17,13 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
+#include "xfs_trans.h"
#include "xfs_trans_priv.h"
#include "xfs_extfree_item.h"
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index 53dfe46f3680..1bba7f60d94c 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -17,18 +17,15 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
-#include "xfs_btree.h"
+#include "xfs_trans.h"
#include "xfs_trans_priv.h"
#include "xfs_inode_item.h"
#include "xfs_trace.h"
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index c52def0b441c..12e86af9d9b9 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -27,7 +27,6 @@ struct xfs_log_vec;
void xfs_trans_init(struct xfs_mount *);
-int xfs_trans_roll(struct xfs_trans **, struct xfs_inode *);
void xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *);
void xfs_trans_del_item(struct xfs_log_item *);
void xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn,
diff --git a/fs/xfs/xfs_trans_resv.c b/fs/xfs/xfs_trans_resv.c
index a65a3cc40610..d53d9f0627a7 100644
--- a/fs/xfs/xfs_trans_resv.c
+++ b/fs/xfs/xfs_trans_resv.c
@@ -18,27 +18,19 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
-#include "xfs_log.h"
+#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
-#include "xfs_error.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
#include "xfs_inode.h"
-#include "xfs_btree.h"
+#include "xfs_bmap_btree.h"
#include "xfs_ialloc.h"
-#include "xfs_alloc.h"
-#include "xfs_extent_busy.h"
-#include "xfs_bmap.h"
-#include "xfs_bmap_util.h"
#include "xfs_quota.h"
+#include "xfs_trans.h"
#include "xfs_qm.h"
#include "xfs_trans_space.h"
#include "xfs_trace.h"
diff --git a/fs/xfs/xfs_vnode.h b/fs/xfs/xfs_vnode.h
index db14d0c08682..3e8e797c6d11 100644
--- a/fs/xfs/xfs_vnode.h
+++ b/fs/xfs/xfs_vnode.h
@@ -25,14 +25,6 @@ struct xfs_inode;
struct attrlist_cursor_kern;
/*
- * Return values for xfs_inactive. A return value of
- * VN_INACTIVE_NOCACHE implies that the file system behavior
- * has disassociated its state and bhv_desc_t from the vnode.
- */
-#define VN_INACTIVE_CACHE 0
-#define VN_INACTIVE_NOCACHE 1
-
-/*
* Flags for read/write calls - same values as IRIX
*/
#define IO_ISDIRECT 0x00004 /* bypass page cache */
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index e01f35ea76ba..9d479073ba41 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -17,9 +17,13 @@
*/
#include "xfs.h"
+#include "xfs_format.h"
#include "xfs_log_format.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
+#include "xfs_trans_resv.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_da_format.h"
#include "xfs_inode.h"
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 1c16f821434f..d98c67001840 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -100,7 +100,9 @@
* ACPI PM timer
* FACS table (Waking vectors and Global Lock)
*/
+#ifndef ACPI_REDUCED_HARDWARE
#define ACPI_REDUCED_HARDWARE FALSE
+#endif
/******************************************************************************
*
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index cf051e05a8fe..4e280bd226dd 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -125,8 +125,9 @@ struct acpi_exception_info {
#define AE_NO_HANDLER EXCEP_ENV (0x001A)
#define AE_OWNER_ID_LIMIT EXCEP_ENV (0x001B)
#define AE_NOT_CONFIGURED EXCEP_ENV (0x001C)
+#define AE_ACCESS EXCEP_ENV (0x001D)
-#define AE_CODE_ENV_MAX 0x001C
+#define AE_CODE_ENV_MAX 0x001D
/*
* Programmer exceptions
@@ -227,7 +228,7 @@ static const struct acpi_exception_info acpi_gbl_exception_names_env[] = {
EXCEP_TXT("AE_NO_ACPI_TABLES", "ACPI tables could not be found"),
EXCEP_TXT("AE_NO_NAMESPACE", "A namespace has not been loaded"),
EXCEP_TXT("AE_NO_MEMORY", "Insufficient dynamic memory"),
- EXCEP_TXT("AE_NOT_FOUND", "The name was not found in the namespace"),
+ EXCEP_TXT("AE_NOT_FOUND", "A requested entity is not found"),
EXCEP_TXT("AE_NOT_EXIST", "A required entity does not exist"),
EXCEP_TXT("AE_ALREADY_EXISTS", "An entity already exists"),
EXCEP_TXT("AE_TYPE", "The object type is incorrect"),
@@ -259,7 +260,8 @@ static const struct acpi_exception_info acpi_gbl_exception_names_env[] = {
EXCEP_TXT("AE_OWNER_ID_LIMIT",
"There are no more Owner IDs available for ACPI tables or control methods"),
EXCEP_TXT("AE_NOT_CONFIGURED",
- "The interface is not part of the current subsystem configuration")
+ "The interface is not part of the current subsystem configuration"),
+ EXCEP_TXT("AE_ACCESS", "Permission denied for the requested operation")
};
static const struct acpi_exception_info acpi_gbl_exception_names_pgm[] = {
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index ce08ef7d969c..1f36777e26fe 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -46,25 +46,25 @@
/* Method names - these methods can appear anywhere in the namespace */
-#define METHOD_NAME__SB_ "_SB_"
-#define METHOD_NAME__HID "_HID"
-#define METHOD_NAME__CID "_CID"
-#define METHOD_NAME__UID "_UID"
-#define METHOD_NAME__SUB "_SUB"
#define METHOD_NAME__ADR "_ADR"
-#define METHOD_NAME__INI "_INI"
-#define METHOD_NAME__STA "_STA"
-#define METHOD_NAME__REG "_REG"
-#define METHOD_NAME__SEG "_SEG"
+#define METHOD_NAME__AEI "_AEI"
#define METHOD_NAME__BBN "_BBN"
-#define METHOD_NAME__PRT "_PRT"
+#define METHOD_NAME__CBA "_CBA"
+#define METHOD_NAME__CID "_CID"
#define METHOD_NAME__CRS "_CRS"
+#define METHOD_NAME__HID "_HID"
+#define METHOD_NAME__INI "_INI"
+#define METHOD_NAME__PLD "_PLD"
#define METHOD_NAME__PRS "_PRS"
-#define METHOD_NAME__AEI "_AEI"
+#define METHOD_NAME__PRT "_PRT"
#define METHOD_NAME__PRW "_PRW"
+#define METHOD_NAME__REG "_REG"
+#define METHOD_NAME__SB_ "_SB_"
+#define METHOD_NAME__SEG "_SEG"
#define METHOD_NAME__SRS "_SRS"
-#define METHOD_NAME__CBA "_CBA"
-#define METHOD_NAME__PLD "_PLD"
+#define METHOD_NAME__STA "_STA"
+#define METHOD_NAME__SUB "_SUB"
+#define METHOD_NAME__UID "_UID"
/* Method names - these methods must appear at the namespace root */
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index d9019821aa60..15100f625e65 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -222,7 +222,8 @@ struct acpi_device_power_flags {
u32 power_resources:1; /* Power resources */
u32 inrush_current:1; /* Serialize Dx->D0 */
u32 power_removed:1; /* Optimize Dx->D0 */
- u32 reserved:28;
+ u32 ignore_parent:1; /* Power is independent of parent power state */
+ u32 reserved:27;
};
struct acpi_device_power_state {
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 64b8c7639520..01e6c6d8b7e1 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -77,54 +77,80 @@ struct acpi_signal_fatal_info {
/*
* OSL Initialization and shutdown primitives
*/
-acpi_status __init acpi_os_initialize(void);
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize
+acpi_status acpi_os_initialize(void);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate
acpi_status acpi_os_terminate(void);
+#endif
/*
* ACPI Table interfaces
*/
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_root_pointer
acpi_physical_address acpi_os_get_root_pointer(void);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_predefined_override
acpi_status
acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
acpi_string * new_val);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_table_override
acpi_status
acpi_os_table_override(struct acpi_table_header *existing_table,
struct acpi_table_header **new_table);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_physical_table_override
acpi_status
acpi_os_physical_table_override(struct acpi_table_header *existing_table,
acpi_physical_address * new_address,
u32 *new_table_length);
+#endif
/*
* Spinlock primitives
*/
-#ifndef acpi_os_create_lock
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock
acpi_status acpi_os_create_lock(acpi_spinlock * out_handle);
#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_lock
void acpi_os_delete_lock(acpi_spinlock handle);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_lock
acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock handle);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_release_lock
void acpi_os_release_lock(acpi_spinlock handle, acpi_cpu_flags flags);
+#endif
/*
* Semaphore primitives
*/
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_semaphore
acpi_status
acpi_os_create_semaphore(u32 max_units,
u32 initial_units, acpi_semaphore * out_handle);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_semaphore
acpi_status acpi_os_delete_semaphore(acpi_semaphore handle);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_wait_semaphore
acpi_status
acpi_os_wait_semaphore(acpi_semaphore handle, u32 units, u16 timeout);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_signal_semaphore
acpi_status acpi_os_signal_semaphore(acpi_semaphore handle, u32 units);
+#endif
/*
* Mutex primitives. May be configured to use semaphores instead via
@@ -132,29 +158,48 @@ acpi_status acpi_os_signal_semaphore(acpi_semaphore handle, u32 units);
*/
#if (ACPI_MUTEX_TYPE != ACPI_BINARY_SEMAPHORE)
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_mutex
acpi_status acpi_os_create_mutex(acpi_mutex * out_handle);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_mutex
void acpi_os_delete_mutex(acpi_mutex handle);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_mutex
acpi_status acpi_os_acquire_mutex(acpi_mutex handle, u16 timeout);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_release_mutex
void acpi_os_release_mutex(acpi_mutex handle);
#endif
+#endif
+
/*
* Memory allocation and mapping
*/
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_allocate
void *acpi_os_allocate(acpi_size size);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_allocate_zeroed
+void *acpi_os_allocate_zeroed(acpi_size size);
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_free
void acpi_os_free(void *memory);
+#endif
-void __iomem *acpi_os_map_memory(acpi_physical_address where,
- acpi_size length);
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_map_memory
+void *acpi_os_map_memory(acpi_physical_address where, acpi_size length);
+#endif
-void acpi_os_unmap_memory(void __iomem * logical_address, acpi_size size);
-void early_acpi_os_unmap_memory(void __iomem * virt, acpi_size size);
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_unmap_memory
+void acpi_os_unmap_memory(void *logical_address, acpi_size size);
+#endif
-#ifdef ACPI_FUTURE_USAGE
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_physical_address
acpi_status
acpi_os_get_physical_address(void *logical_address,
acpi_physical_address * physical_address);
@@ -163,117 +208,195 @@ acpi_os_get_physical_address(void *logical_address,
/*
* Memory/Object Cache
*/
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_cache
acpi_status
acpi_os_create_cache(char *cache_name,
u16 object_size,
u16 max_depth, acpi_cache_t ** return_cache);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_cache
acpi_status acpi_os_delete_cache(acpi_cache_t * cache);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_purge_cache
acpi_status acpi_os_purge_cache(acpi_cache_t * cache);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_object
void *acpi_os_acquire_object(acpi_cache_t * cache);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_release_object
acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object);
+#endif
/*
* Interrupt handlers
*/
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_install_interrupt_handler
acpi_status
acpi_os_install_interrupt_handler(u32 interrupt_number,
acpi_osd_handler service_routine,
void *context);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_remove_interrupt_handler
acpi_status
acpi_os_remove_interrupt_handler(u32 interrupt_number,
acpi_osd_handler service_routine);
-
-void acpi_os_gpe_count(u32 gpe_number);
-void acpi_os_fixed_event_count(u32 fixed_event_number);
+#endif
/*
* Threads and Scheduling
*/
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id
acpi_thread_id acpi_os_get_thread_id(void);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_execute
acpi_status
acpi_os_execute(acpi_execute_type type,
acpi_osd_exec_callback function, void *context);
+#endif
-acpi_status
-acpi_os_hotplug_execute(acpi_osd_exec_callback function, void *context);
-
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_wait_events_complete
void acpi_os_wait_events_complete(void);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_sleep
void acpi_os_sleep(u64 milliseconds);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_stall
void acpi_os_stall(u32 microseconds);
+#endif
/*
* Platform and hardware-independent I/O interfaces
*/
-acpi_status acpi_os_read_port(acpi_io_address address, u32 * value, u32 width);
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_read_port
+acpi_status acpi_os_read_port(acpi_io_address address, u32 *value, u32 width);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_write_port
acpi_status acpi_os_write_port(acpi_io_address address, u32 value, u32 width);
+#endif
/*
* Platform and hardware-independent physical memory interfaces
*/
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_read_memory
acpi_status
acpi_os_read_memory(acpi_physical_address address, u64 *value, u32 width);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_write_memory
acpi_status
acpi_os_write_memory(acpi_physical_address address, u64 value, u32 width);
+#endif
/*
* Platform and hardware-independent PCI configuration space access
* Note: Can't use "Register" as a parameter, changed to "Reg" --
* certain compilers complain.
*/
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_read_pci_configuration
acpi_status
acpi_os_read_pci_configuration(struct acpi_pci_id *pci_id,
u32 reg, u64 *value, u32 width);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_write_pci_configuration
acpi_status
acpi_os_write_pci_configuration(struct acpi_pci_id *pci_id,
u32 reg, u64 value, u32 width);
+#endif
/*
* Miscellaneous
*/
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_readable
+u8 acpi_os_readable(void *pointer, acpi_size length);
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_writable
+u8 acpi_os_writable(void *pointer, acpi_size length);
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_timer
u64 acpi_os_get_timer(void);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_signal
acpi_status acpi_os_signal(u32 function, void *info);
+#endif
/*
* Debug print routines
*/
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_printf
void ACPI_INTERNAL_VAR_XFACE acpi_os_printf(const char *format, ...);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_vprintf
void acpi_os_vprintf(const char *format, va_list args);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_redirect_output
void acpi_os_redirect_output(void *destination);
+#endif
-#ifdef ACPI_FUTURE_USAGE
/*
* Debug input
*/
-u32 acpi_os_get_line(char *buffer);
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_line
+acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read);
+#endif
+
+/*
+ * Obtain ACPI table(s)
+ */
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_name
+acpi_status
+acpi_os_get_table_by_name(char *signature,
+ u32 instance,
+ struct acpi_table_header **table,
+ acpi_physical_address * address);
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_index
+acpi_status
+acpi_os_get_table_by_index(u32 index,
+ struct acpi_table_header **table,
+ u32 *instance, acpi_physical_address * address);
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_address
+acpi_status
+acpi_os_get_table_by_address(acpi_physical_address address,
+ struct acpi_table_header **table);
#endif
/*
* Directory manipulation
*/
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_open_directory
void *acpi_os_open_directory(char *pathname,
char *wildcard_spec, char requested_file_type);
+#endif
/* requeste_file_type values */
#define REQUEST_FILE_ONLY 0
#define REQUEST_DIR_ONLY 1
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_next_filename
char *acpi_os_get_next_filename(void *dir_handle);
+#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_close_directory
void acpi_os_close_directory(void *dir_handle);
+#endif
#endif /* __ACPIOSXF_H__ */
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 85bfdbe17805..d8f9457755b4 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20130725
+#define ACPI_CA_VERSION 0x20130927
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -54,6 +54,7 @@
#include <acpi/acbuffer.h>
extern u8 acpi_gbl_permanent_mmap;
+extern u32 acpi_rsdt_forced;
/*
* Globals that are publically available
@@ -106,39 +107,41 @@ extern u8 acpi_gbl_disable_ssdt_table_load;
static ACPI_INLINE prototype {return(AE_OK);}
#define ACPI_HW_DEPENDENT_RETURN_VOID(prototype) \
- static ACPI_INLINE prototype {}
+ static ACPI_INLINE prototype {return;}
#endif /* !ACPI_REDUCED_HARDWARE */
-extern u32 acpi_rsdt_forced;
/*
* Initialization
*/
-acpi_status
+acpi_status __init
acpi_initialize_tables(struct acpi_table_desc *initial_storage,
u32 initial_table_count, u8 allow_resize);
acpi_status __init acpi_initialize_subsystem(void);
-acpi_status acpi_enable_subsystem(u32 flags);
+acpi_status __init acpi_enable_subsystem(u32 flags);
-acpi_status acpi_initialize_objects(u32 flags);
+acpi_status __init acpi_initialize_objects(u32 flags);
-acpi_status acpi_terminate(void);
+acpi_status __init acpi_terminate(void);
/*
* Miscellaneous global interfaces
*/
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable(void))
+
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable(void))
#ifdef ACPI_FUTURE_USAGE
-acpi_status acpi_subsystem_status(void);
+ acpi_status acpi_subsystem_status(void);
#endif
#ifdef ACPI_FUTURE_USAGE
acpi_status acpi_get_system_info(struct acpi_buffer *ret_buffer);
#endif
+acpi_status acpi_get_statistics(struct acpi_statistics *stats);
+
const char *acpi_format_exception(acpi_status exception);
acpi_status acpi_purge_cached_objects(void);
@@ -159,29 +162,20 @@ acpi_decode_pld_buffer(u8 *in_buffer,
acpi_size length, struct acpi_pld_info **return_buffer);
/*
- * ACPI Memory management
- */
-void *acpi_allocate(u32 size);
-
-void *acpi_callocate(u32 size);
-
-void acpi_free(void *address);
-
-/*
* ACPI table load/unload interfaces
*/
acpi_status acpi_load_table(struct acpi_table_header *table);
acpi_status acpi_unload_parent_table(acpi_handle object);
-acpi_status acpi_load_tables(void);
+acpi_status __init acpi_load_tables(void);
/*
* ACPI table manipulation interfaces
*/
-acpi_status acpi_reallocate_root_table(void);
+acpi_status __init acpi_reallocate_root_table(void);
-acpi_status acpi_find_root_pointer(acpi_size *rsdp_address);
+acpi_status __init acpi_find_root_pointer(acpi_size *rsdp_address);
acpi_status acpi_unload_table_id(acpi_owner_id id);
@@ -193,6 +187,7 @@ acpi_status
acpi_get_table_with_size(acpi_string signature,
u32 instance, struct acpi_table_header **out_table,
acpi_size *tbl_size);
+
acpi_status
acpi_get_table(acpi_string signature,
u32 instance, struct acpi_table_header **out_table);
@@ -280,8 +275,18 @@ acpi_status
acpi_install_initialization_handler(acpi_init_handler handler, u32 function);
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_install_global_event_handler
- (acpi_gbl_event_handler handler, void *context))
+ acpi_install_sci_handler(acpi_sci_handler
+ address,
+ void *context))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_remove_sci_handler(acpi_sci_handler
+ address))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_install_global_event_handler
+ (acpi_gbl_event_handler handler,
+ void *context))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_install_fixed_event_handler(u32
@@ -290,10 +295,12 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
handler,
void
*context))
+
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_remove_fixed_event_handler(u32 acpi_event,
acpi_event_handler
handler))
+
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_install_gpe_handler(acpi_handle
gpe_device,
@@ -302,6 +309,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_gpe_handler
address,
void *context))
+
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_remove_gpe_handler(acpi_handle gpe_device,
u32 gpe_number,
@@ -338,6 +346,7 @@ acpi_status acpi_install_interface_handler(acpi_interface_handler handler);
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_acquire_global_lock(u16 timeout,
u32 *handle))
+
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_release_global_lock(u32 handle))
@@ -364,6 +373,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_get_event_status(u32 event,
acpi_event_status
*event_status))
+
/*
* General Purpose Event (GPE) Interfaces
*/
@@ -394,10 +404,12 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
parent_device,
acpi_handle gpe_device,
u32 gpe_number))
+
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_set_gpe_wake_mask(acpi_handle gpe_device,
u32 gpe_number,
u8 action))
+
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_get_gpe_status(acpi_handle gpe_device,
u32 gpe_number,
@@ -419,6 +431,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
*gpe_block_address,
u32 register_count,
u32 interrupt_number))
+
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_remove_gpe_block(acpi_handle gpe_device))
@@ -493,13 +506,13 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
* Sleep/Wake interfaces
*/
acpi_status
-acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b);
+acpi_get_sleep_type_data(u8 sleep_state, u8 *slp_typ_a, u8 *slp_typ_b);
acpi_status acpi_enter_sleep_state_prep(u8 sleep_state);
-acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state);
+acpi_status acpi_enter_sleep_state(u8 sleep_state);
-ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enter_sleep_state_s4bios(void))
acpi_status acpi_leave_sleep_state_prep(u8 sleep_state);
@@ -508,7 +521,6 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state);
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_set_firmware_waking_vector(u32
physical_address))
-
#if ACPI_MACHINE_WIDTH == 64
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_set_firmware_waking_vector64(u64
@@ -532,50 +544,53 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
/*
* Error/Warning output
*/
+ACPI_PRINTF_LIKE(3)
void ACPI_INTERNAL_VAR_XFACE
-acpi_error(const char *module_name,
- u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
+acpi_error(const char *module_name, u32 line_number, const char *format, ...);
+ACPI_PRINTF_LIKE(4)
void ACPI_INTERNAL_VAR_XFACE
acpi_exception(const char *module_name,
- u32 line_number,
- acpi_status status, const char *format, ...) ACPI_PRINTF_LIKE(4);
+ u32 line_number, acpi_status status, const char *format, ...);
+ACPI_PRINTF_LIKE(3)
void ACPI_INTERNAL_VAR_XFACE
-acpi_warning(const char *module_name,
- u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
+acpi_warning(const char *module_name, u32 line_number, const char *format, ...);
+ACPI_PRINTF_LIKE(3)
void ACPI_INTERNAL_VAR_XFACE
-acpi_info(const char *module_name,
- u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
+acpi_info(const char *module_name, u32 line_number, const char *format, ...);
+ACPI_PRINTF_LIKE(3)
void ACPI_INTERNAL_VAR_XFACE
acpi_bios_error(const char *module_name,
- u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
+ u32 line_number, const char *format, ...);
+ACPI_PRINTF_LIKE(3)
void ACPI_INTERNAL_VAR_XFACE
acpi_bios_warning(const char *module_name,
- u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
+ u32 line_number, const char *format, ...);
/*
* Debug output
*/
#ifdef ACPI_DEBUG_OUTPUT
+ACPI_PRINTF_LIKE(6)
void ACPI_INTERNAL_VAR_XFACE
acpi_debug_print(u32 requested_debug_level,
u32 line_number,
const char *function_name,
const char *module_name,
- u32 component_id, const char *format, ...) ACPI_PRINTF_LIKE(6);
+ u32 component_id, const char *format, ...);
+ACPI_PRINTF_LIKE(6)
void ACPI_INTERNAL_VAR_XFACE
acpi_debug_print_raw(u32 requested_debug_level,
u32 line_number,
const char *function_name,
const char *module_name,
- u32 component_id,
- const char *format, ...) ACPI_PRINTF_LIKE(6);
+ u32 component_id, const char *format, ...);
#endif
#endif /* __ACXFACE_H__ */
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 9b58a8f43771..94970880126f 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -146,7 +146,24 @@ struct acpi_table_rsdp {
u8 reserved[3]; /* Reserved, must be zero */
};
-#define ACPI_RSDP_REV0_SIZE 20 /* Size of original ACPI 1.0 RSDP */
+/* Standalone struct for the ACPI 1.0 RSDP */
+
+struct acpi_rsdp_common {
+ char signature[8];
+ u8 checksum;
+ char oem_id[ACPI_OEM_ID_SIZE];
+ u8 revision;
+ u32 rsdt_physical_address;
+};
+
+/* Standalone struct for the extended part of the RSDP (ACPI 2.0+) */
+
+struct acpi_rsdp_extension {
+ u32 length;
+ u64 xsdt_physical_address;
+ u8 extended_checksum;
+ u8 reserved[3];
+};
/*******************************************************************************
*
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 0bd750ebeb49..556c83ee6b42 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -596,7 +596,7 @@ struct acpi_hest_generic {
/* Generic Error Status block */
-struct acpi_hest_generic_status {
+struct acpi_generic_status {
u32 block_status;
u32 raw_data_offset;
u32 raw_data_length;
@@ -606,15 +606,15 @@ struct acpi_hest_generic_status {
/* Values for block_status flags above */
-#define ACPI_HEST_UNCORRECTABLE (1)
-#define ACPI_HEST_CORRECTABLE (1<<1)
-#define ACPI_HEST_MULTIPLE_UNCORRECTABLE (1<<2)
-#define ACPI_HEST_MULTIPLE_CORRECTABLE (1<<3)
-#define ACPI_HEST_ERROR_ENTRY_COUNT (0xFF<<4) /* 8 bits, error count */
+#define ACPI_GEN_ERR_UC BIT(0)
+#define ACPI_GEN_ERR_CE BIT(1)
+#define ACPI_GEN_ERR_MULTI_UC BIT(2)
+#define ACPI_GEN_ERR_MULTI_CE BIT(3)
+#define ACPI_GEN_ERR_COUNT_SHIFT (0xFF<<4) /* 8 bits, error count */
/* Generic Error Data entry */
-struct acpi_hest_generic_data {
+struct acpi_generic_data {
u8 section_type[16];
u32 error_severity;
u16 revision;
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index ffaac0e7e0c6..40f7ed115452 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -444,8 +444,8 @@ enum acpi_dmar_scope_type {
};
struct acpi_dmar_pci_path {
- u8 dev;
- u8 fn;
+ u8 device;
+ u8 function;
};
/*
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index b748aefce929..809b1a0fee7f 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -299,13 +299,57 @@ typedef u32 acpi_physical_address;
#endif
/*
- * All ACPICA functions that are available to the rest of the kernel are
- * tagged with this macro which can be defined as appropriate for the host.
+ * All ACPICA external functions that are available to the rest of the kernel
+ * are tagged with thes macros which can be defined as appropriate for the host.
+ *
+ * Notes:
+ * ACPI_EXPORT_SYMBOL_INIT is used for initialization and termination
+ * interfaces that may need special processing.
+ * ACPI_EXPORT_SYMBOL is used for all other public external functions.
*/
+#ifndef ACPI_EXPORT_SYMBOL_INIT
+#define ACPI_EXPORT_SYMBOL_INIT(symbol)
+#endif
+
#ifndef ACPI_EXPORT_SYMBOL
#define ACPI_EXPORT_SYMBOL(symbol)
#endif
+/*
+ * Compiler/Clibrary-dependent debug initialization. Used for ACPICA
+ * utilities only.
+ */
+#ifndef ACPI_DEBUG_INITIALIZE
+#define ACPI_DEBUG_INITIALIZE()
+#endif
+
+/*******************************************************************************
+ *
+ * Configuration
+ *
+ ******************************************************************************/
+
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+/*
+ * Memory allocation tracking (used by acpi_exec to detect memory leaks)
+ */
+#define ACPI_MEM_PARAMETERS _COMPONENT, _acpi_module_name, __LINE__
+#define ACPI_ALLOCATE(a) acpi_ut_allocate_and_track ((acpi_size) (a), ACPI_MEM_PARAMETERS)
+#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed_and_track ((acpi_size) (a), ACPI_MEM_PARAMETERS)
+#define ACPI_FREE(a) acpi_ut_free_and_track (a, ACPI_MEM_PARAMETERS)
+#define ACPI_MEM_TRACKING(a) a
+
+#else
+/*
+ * Normal memory allocation directly via the OS services layer
+ */
+#define ACPI_ALLOCATE(a) acpi_os_allocate ((acpi_size) (a))
+#define ACPI_ALLOCATE_ZEROED(a) acpi_os_allocate_zeroed ((acpi_size) (a))
+#define ACPI_FREE(a) acpi_os_free (a)
+#define ACPI_MEM_TRACKING(a)
+
+#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
+
/******************************************************************************
*
* ACPI Specification constants (Do not change unless the specification changes)
@@ -322,6 +366,7 @@ typedef u32 acpi_physical_address;
#define ACPI_PM1_REGISTER_WIDTH 16
#define ACPI_PM2_REGISTER_WIDTH 8
#define ACPI_PM_TIMER_WIDTH 32
+#define ACPI_RESET_REGISTER_WIDTH 8
/* Names within the namespace are 4 bytes long */
@@ -474,6 +519,11 @@ typedef u64 acpi_integer;
#define ACPI_MOVE_NAME(dest,src) (ACPI_STRNCPY (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAME_SIZE))
#endif
+/* Support for the special RSDP signature (8 characters) */
+
+#define ACPI_VALIDATE_RSDP_SIG(a) (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8))
+#define ACPI_MAKE_RSDP_SIG(dest) (ACPI_MEMCPY (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
+
/*******************************************************************************
*
* Miscellaneous constants
@@ -886,9 +936,13 @@ struct acpi_buffer {
void *pointer; /* pointer to buffer */
};
-/* Free a buffer created in an struct acpi_buffer via ACPI_ALLOCATE_LOCAL_BUFFER */
-
-#define ACPI_FREE_BUFFER(b) ACPI_FREE(b.pointer)
+/*
+ * Free a buffer created in an struct acpi_buffer via ACPI_ALLOCATE_BUFFER.
+ * Note: We use acpi_os_free here because acpi_os_allocate was used to allocate
+ * the buffer. This purposefully bypasses the internal allocation tracking
+ * mechanism (if it is enabled).
+ */
+#define ACPI_FREE_BUFFER(b) acpi_os_free((b).pointer)
/*
* name_type for acpi_get_name
@@ -927,6 +981,16 @@ struct acpi_system_info {
u32 debug_layer;
};
+/*
+ * System statistics returned by acpi_get_statistics()
+ */
+struct acpi_statistics {
+ u32 sci_count;
+ u32 gpe_count;
+ u32 fixed_event_count[ACPI_NUM_FIXED_EVENTS];
+ u32 method_count;
+};
+
/* Table Event Types */
#define ACPI_TABLE_EVENT_LOAD 0x0
@@ -946,6 +1010,9 @@ typedef void
* Various handlers and callback procedures
*/
typedef
+u32 (*acpi_sci_handler) (void *context);
+
+typedef
void (*acpi_gbl_event_handler) (u32 event_type,
acpi_handle device,
u32 event_number, void *context);
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
index 720446cb243e..dfd60d0bfd27 100644
--- a/include/acpi/ghes.h
+++ b/include/acpi/ghes.h
@@ -14,7 +14,7 @@
struct ghes {
struct acpi_hest_generic *generic;
- struct acpi_hest_generic_status *estatus;
+ struct acpi_generic_status *estatus;
u64 buffer_paddr;
unsigned long flags;
union {
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index ef04b36ca6ed..974d3ef7c141 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -96,10 +96,11 @@
#endif
/*
- * acpi_bin/acpi_help/acpi_src configuration. All single threaded, with
- * no debug output.
+ * acpi_bin/acpi_dump/acpi_src/acpi_xtract configuration. All single
+ * threaded, with no debug output.
*/
#if (defined ACPI_BIN_APP) || \
+ (defined ACPI_DUMP_APP) || \
(defined ACPI_SRC_APP) || \
(defined ACPI_XTRACT_APP)
#define ACPI_APPLICATION
@@ -147,6 +148,9 @@
#if defined(_LINUX) || defined(__linux__)
#include <acpi/platform/aclinux.h>
+#elif defined(_APPLE) || defined(__APPLE__)
+#include "acmacosx.h"
+
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
#include "acfreebsd.h"
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 68534ef86ec8..ab57930794a5 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -50,7 +50,6 @@
#define ACPI_USE_DO_WHILE_0
#define ACPI_MUTEX_TYPE ACPI_BINARY_SEMAPHORE
-
#ifdef __KERNEL__
#include <linux/string.h>
@@ -58,11 +57,13 @@
#include <linux/ctype.h>
#include <linux/sched.h>
#include <linux/atomic.h>
-#include <asm/div64.h>
-#include <asm/acpi.h>
+#include <linux/math64.h>
#include <linux/slab.h>
#include <linux/spinlock_types.h>
-#include <asm/current.h>
+#ifdef EXPORT_ACPI_INTERFACES
+#include <linux/export.h>
+#endif
+#include <asm/acpi.h>
/* Host-dependent types and defines for in-kernel ACPICA */
@@ -74,7 +75,7 @@
#define acpi_spinlock spinlock_t *
#define acpi_cpu_flags unsigned long
-#else /* !__KERNEL__ */
+#else /* !__KERNEL__ */
#include <stdarg.h>
#include <string.h>
@@ -87,7 +88,7 @@
#define ACPI_FLUSH_CPU_CACHE()
#define ACPI_CAST_PTHREAD_T(pthread) ((acpi_thread_id) (pthread))
-#if defined(__ia64__) || defined(__x86_64__)
+#if defined(__ia64__) || defined(__x86_64__) || defined(__aarch64__)
#define ACPI_MACHINE_WIDTH 64
#define COMPILER_DEPENDENT_INT64 long
#define COMPILER_DEPENDENT_UINT64 unsigned long
@@ -102,21 +103,35 @@
#define __cdecl
#endif
-#endif /* __KERNEL__ */
+#endif /* __KERNEL__ */
/* Linux uses GCC */
#include <acpi/platform/acgcc.h>
#ifdef __KERNEL__
+
+/*
+ * FIXME: Inclusion of actypes.h
+ * Linux kernel need this before defining inline OSL interfaces as
+ * actypes.h need to be included to find ACPICA type definitions.
+ * Since from ACPICA's perspective, the actypes.h should be included after
+ * acenv.h (aclinux.h), this leads to a inclusion mis-ordering issue.
+ */
#include <acpi/actypes.h>
+
/*
* Overrides for in-kernel ACPICA
*/
-static inline acpi_thread_id acpi_os_get_thread_id(void)
-{
- return (acpi_thread_id)(unsigned long)current;
-}
+acpi_status __init acpi_os_initialize(void);
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize
+
+acpi_status acpi_os_terminate(void);
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate
+
+/*
+ * Memory allocation/deallocation
+ */
/*
* The irqs_disabled() check is for resume from RAM.
@@ -126,25 +141,45 @@ static inline acpi_thread_id acpi_os_get_thread_id(void)
*/
static inline void *acpi_os_allocate(acpi_size size)
{
- return kmalloc(size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
+ return kmalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
}
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_allocate
+
+/* Use native linux version of acpi_os_allocate_zeroed */
+
static inline void *acpi_os_allocate_zeroed(acpi_size size)
{
- return kzalloc(size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
+ return kzalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
}
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_allocate_zeroed
+#define USE_NATIVE_ALLOCATE_ZEROED
+
+static inline void acpi_os_free(void *memory)
+{
+ kfree(memory);
+}
+
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_free
+
static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
{
return kmem_cache_zalloc(cache,
- irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
+ irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
}
-#define ACPI_ALLOCATE(a) acpi_os_allocate(a)
-#define ACPI_ALLOCATE_ZEROED(a) acpi_os_allocate_zeroed(a)
-#define ACPI_FREE(a) kfree(a)
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_object
+
+static inline acpi_thread_id acpi_os_get_thread_id(void)
+{
+ return (acpi_thread_id) (unsigned long)current;
+}
+
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id
#ifndef CONFIG_PREEMPT
+
/*
* Used within ACPICA to show where it is safe to preempt execution
* when CONFIG_PREEMPT=n
@@ -154,6 +189,7 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
if (!irqs_disabled()) \
cond_resched(); \
} while (0)
+
#endif
/*
@@ -163,17 +199,53 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
* all locks to the name of the argument of acpi_os_create_lock(), which
* prevents lockdep from reporting false positives for ACPICA locks.
*/
-#define acpi_os_create_lock(__handle) \
-({ \
- spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
- \
- if (lock) { \
- *(__handle) = lock; \
- spin_lock_init(*(__handle)); \
- } \
- lock ? AE_OK : AE_NO_MEMORY; \
-})
-
-#endif /* __KERNEL__ */
-
-#endif /* __ACLINUX_H__ */
+#define acpi_os_create_lock(__handle) \
+ ({ \
+ spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
+ if (lock) { \
+ *(__handle) = lock; \
+ spin_lock_init(*(__handle)); \
+ } \
+ lock ? AE_OK : AE_NO_MEMORY; \
+ })
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock
+
+void __iomem *acpi_os_map_memory(acpi_physical_address where, acpi_size length);
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_map_memory
+
+void acpi_os_unmap_memory(void __iomem * logical_address, acpi_size size);
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_unmap_memory
+
+/*
+ * OSL interfaces used by debugger/disassembler
+ */
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_readable
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_writable
+
+/*
+ * OSL interfaces used by utilities
+ */
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_redirect_output
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_line
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_name
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_index
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_address
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_open_directory
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_next_filename
+#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_close_directory
+
+/*
+ * OSL interfaces added by Linux
+ */
+void early_acpi_os_unmap_memory(void __iomem * virt, acpi_size size);
+
+void acpi_os_gpe_count(u32 gpe_number);
+
+void acpi_os_fixed_event_count(u32 fixed_event_number);
+
+acpi_status
+acpi_os_hotplug_execute(acpi_osd_exec_callback function, void *context);
+
+#endif /* __KERNEL__ */
+
+#endif /* __ACLINUX_H__ */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 66096d06925e..6eb1d3cb5104 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -199,6 +199,7 @@ struct acpi_processor_flags {
struct acpi_processor {
acpi_handle handle;
u32 acpi_id;
+ u32 apic_id;
u32 id;
u32 pblk;
int performance_platform_limit;
@@ -224,7 +225,6 @@ struct acpi_processor_errata {
} piix4;
};
-extern void acpi_processor_load_module(struct acpi_processor *pr);
extern int acpi_processor_preregister_performance(struct
acpi_processor_performance
__percpu *performance);
@@ -314,6 +314,8 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
/* in processor_core.c */
void acpi_processor_set_pdc(acpi_handle handle);
+int acpi_get_apicid(acpi_handle, int type, u32 acpi_id);
+int acpi_map_cpuid(int apic_id, u32 acpi_id);
int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
/* in processor_throttling.c */
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index aea9e45efce6..14909b0b9cae 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -53,7 +53,7 @@
#elif defined(CONFIG_SPARSEMEM)
/*
- * Note: section's mem_map is encorded to reflect its start_pfn.
+ * Note: section's mem_map is encoded to reflect its start_pfn.
* section[i].section_mem_map == mem_map's address - start_pfn;
*/
#define __page_to_pfn(pg) \
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
new file mode 100644
index 000000000000..ddf2b420ac8f
--- /dev/null
+++ b/include/asm-generic/preempt.h
@@ -0,0 +1,105 @@
+#ifndef __ASM_PREEMPT_H
+#define __ASM_PREEMPT_H
+
+#include <linux/thread_info.h>
+
+/*
+ * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
+ * that think a non-zero value indicates we cannot preempt.
+ */
+static __always_inline int preempt_count(void)
+{
+ return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED;
+}
+
+static __always_inline int *preempt_count_ptr(void)
+{
+ return &current_thread_info()->preempt_count;
+}
+
+/*
+ * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the
+ * alternative is loosing a reschedule. Better schedule too often -- also this
+ * should be a very rare operation.
+ */
+static __always_inline void preempt_count_set(int pc)
+{
+ *preempt_count_ptr() = pc;
+}
+
+/*
+ * must be macros to avoid header recursion hell
+ */
+#define task_preempt_count(p) \
+ (task_thread_info(p)->preempt_count & ~PREEMPT_NEED_RESCHED)
+
+#define init_task_preempt_count(p) do { \
+ task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
+} while (0)
+
+#define init_idle_preempt_count(p, cpu) do { \
+ task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
+} while (0)
+
+/*
+ * We fold the NEED_RESCHED bit into the preempt count such that
+ * preempt_enable() can decrement and test for needing to reschedule with a
+ * single instruction.
+ *
+ * We invert the actual bit, so that when the decrement hits 0 we know we both
+ * need to resched (the bit is cleared) and can resched (no preempt count).
+ */
+
+static __always_inline void set_preempt_need_resched(void)
+{
+ *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED;
+}
+
+static __always_inline void clear_preempt_need_resched(void)
+{
+ *preempt_count_ptr() |= PREEMPT_NEED_RESCHED;
+}
+
+static __always_inline bool test_preempt_need_resched(void)
+{
+ return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED);
+}
+
+/*
+ * The various preempt_count add/sub methods
+ */
+
+static __always_inline void __preempt_count_add(int val)
+{
+ *preempt_count_ptr() += val;
+}
+
+static __always_inline void __preempt_count_sub(int val)
+{
+ *preempt_count_ptr() -= val;
+}
+
+static __always_inline bool __preempt_count_dec_and_test(void)
+{
+ return !--*preempt_count_ptr();
+}
+
+/*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+static __always_inline bool should_resched(void)
+{
+ return unlikely(!*preempt_count_ptr());
+}
+
+#ifdef CONFIG_PREEMPT
+extern asmlinkage void preempt_schedule(void);
+#define __preempt_schedule() preempt_schedule()
+
+#ifdef CONFIG_CONTEXT_TRACKING
+extern asmlinkage void preempt_schedule_context(void);
+#define __preempt_schedule_context() preempt_schedule_context()
+#endif
+#endif /* CONFIG_PREEMPT */
+
+#endif /* __ASM_PREEMPT_H */
diff --git a/include/asm-generic/simd.h b/include/asm-generic/simd.h
new file mode 100644
index 000000000000..f57eb7b5c23b
--- /dev/null
+++ b/include/asm-generic/simd.h
@@ -0,0 +1,14 @@
+
+#include <linux/hardirq.h>
+
+/*
+ * may_use_simd - whether it is allowable at this time to issue SIMD
+ * instructions or access the SIMD register file
+ *
+ * As architectures typically don't preserve the SIMD register file when
+ * taking an interrupt, !in_interrupt() should be a reasonable default.
+ */
+static __must_check inline bool may_use_simd(void)
+{
+ return !in_interrupt();
+}
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 83e2c31e8b00..bc2121fa9132 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -473,6 +473,7 @@
#define KERNEL_CTORS() . = ALIGN(8); \
VMLINUX_SYMBOL(__ctors_start) = .; \
*(.ctors) \
+ *(.init_array) \
VMLINUX_SYMBOL(__ctors_end) = .;
#else
#define KERNEL_CTORS()
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index 93b7f96f9c59..6d26b40cbf5d 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -33,6 +33,16 @@ enum arch_timer_reg {
#define ARCH_TIMER_MEM_PHYS_ACCESS 2
#define ARCH_TIMER_MEM_VIRT_ACCESS 3
+#define ARCH_TIMER_USR_PCT_ACCESS_EN (1 << 0) /* physical counter */
+#define ARCH_TIMER_USR_VCT_ACCESS_EN (1 << 1) /* virtual counter */
+#define ARCH_TIMER_VIRT_EVT_EN (1 << 2)
+#define ARCH_TIMER_EVT_TRIGGER_SHIFT (4)
+#define ARCH_TIMER_EVT_TRIGGER_MASK (0xF << ARCH_TIMER_EVT_TRIGGER_SHIFT)
+#define ARCH_TIMER_USR_VT_ACCESS_EN (1 << 8) /* virtual timer registers */
+#define ARCH_TIMER_USR_PT_ACCESS_EN (1 << 9) /* physical timer registers */
+
+#define ARCH_TIMER_EVT_STREAM_FREQ 10000 /* 100us */
+
#ifdef CONFIG_ARM_ARCH_TIMER
extern u32 arch_timer_get_rate(void);
diff --git a/arch/x86/include/asm/crypto/ablk_helper.h b/include/crypto/ablk_helper.h
index 4f93df50c23e..4f93df50c23e 100644
--- a/arch/x86/include/asm/crypto/ablk_helper.h
+++ b/include/crypto/ablk_helper.h
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 418d270e1806..e73c19e90e38 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -386,5 +386,21 @@ static inline int crypto_requires_sync(u32 type, u32 mask)
return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC;
}
-#endif /* _CRYPTO_ALGAPI_H */
+noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
+
+/**
+ * crypto_memneq - Compare two areas of memory without leaking
+ * timing information.
+ *
+ * @a: One area of memory
+ * @b: Another area of memory
+ * @size: The size of the area.
+ *
+ * Returns 0 when data is equal, 1 otherwise.
+ */
+static inline int crypto_memneq(const void *a, const void *b, size_t size)
+{
+ return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
+}
+#endif /* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/authenc.h b/include/crypto/authenc.h
index e47b044929a8..6775059539b5 100644
--- a/include/crypto/authenc.h
+++ b/include/crypto/authenc.h
@@ -23,5 +23,15 @@ struct crypto_authenc_key_param {
__be32 enckeylen;
};
-#endif /* _CRYPTO_AUTHENC_H */
+struct crypto_authenc_keys {
+ const u8 *authkey;
+ const u8 *enckey;
+
+ unsigned int authkeylen;
+ unsigned int enckeylen;
+};
+int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
+ unsigned int keylen);
+
+#endif /* _CRYPTO_AUTHENC_H */
diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h
new file mode 100644
index 000000000000..e1e5a3e5dd1b
--- /dev/null
+++ b/include/crypto/hash_info.h
@@ -0,0 +1,40 @@
+/*
+ * Hash Info: Hash algorithms information
+ *
+ * Copyright (c) 2013 Dmitry Kasatkin <d.kasatkin@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_HASH_INFO_H
+#define _CRYPTO_HASH_INFO_H
+
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+
+#include <uapi/linux/hash_info.h>
+
+/* not defined in include/crypto/ */
+#define RMD128_DIGEST_SIZE 16
+#define RMD160_DIGEST_SIZE 20
+#define RMD256_DIGEST_SIZE 32
+#define RMD320_DIGEST_SIZE 40
+
+/* not defined in include/crypto/ */
+#define WP512_DIGEST_SIZE 64
+#define WP384_DIGEST_SIZE 48
+#define WP256_DIGEST_SIZE 32
+
+/* not defined in include/crypto/ */
+#define TGR128_DIGEST_SIZE 16
+#define TGR160_DIGEST_SIZE 20
+#define TGR192_DIGEST_SIZE 24
+
+extern const char *const hash_algo_name[HASH_ALGO__LAST];
+extern const int hash_digest_size[HASH_ALGO__LAST];
+
+#endif /* _CRYPTO_HASH_INFO_H */
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index f5b0224c9967..fc09732613ad 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -15,6 +15,7 @@
#define _LINUX_PUBLIC_KEY_H
#include <linux/mpi.h>
+#include <crypto/hash_info.h>
enum pkey_algo {
PKEY_ALGO_DSA,
@@ -22,21 +23,11 @@ enum pkey_algo {
PKEY_ALGO__LAST
};
-extern const char *const pkey_algo[PKEY_ALGO__LAST];
+extern const char *const pkey_algo_name[PKEY_ALGO__LAST];
+extern const struct public_key_algorithm *pkey_algo[PKEY_ALGO__LAST];
-enum pkey_hash_algo {
- PKEY_HASH_MD4,
- PKEY_HASH_MD5,
- PKEY_HASH_SHA1,
- PKEY_HASH_RIPE_MD_160,
- PKEY_HASH_SHA256,
- PKEY_HASH_SHA384,
- PKEY_HASH_SHA512,
- PKEY_HASH_SHA224,
- PKEY_HASH__LAST
-};
-
-extern const char *const pkey_hash_algo[PKEY_HASH__LAST];
+/* asymmetric key implementation supports only up to SHA224 */
+#define PKEY_HASH__LAST (HASH_ALGO_SHA224 + 1)
enum pkey_id_type {
PKEY_ID_PGP, /* OpenPGP generated key ID */
@@ -44,7 +35,7 @@ enum pkey_id_type {
PKEY_ID_TYPE__LAST
};
-extern const char *const pkey_id_type[PKEY_ID_TYPE__LAST];
+extern const char *const pkey_id_type_name[PKEY_ID_TYPE__LAST];
/*
* Cryptographic data for the public-key subtype of the asymmetric key type.
@@ -59,6 +50,7 @@ struct public_key {
#define PKEY_CAN_DECRYPT 0x02
#define PKEY_CAN_SIGN 0x04
#define PKEY_CAN_VERIFY 0x08
+ enum pkey_algo pkey_algo : 8;
enum pkey_id_type id_type : 8;
union {
MPI mpi[5];
@@ -88,7 +80,8 @@ struct public_key_signature {
u8 *digest;
u8 digest_size; /* Number of bytes in digest */
u8 nr_mpi; /* Occupancy of mpi[] */
- enum pkey_hash_algo pkey_hash_algo : 8;
+ enum pkey_algo pkey_algo : 8;
+ enum hash_algo pkey_hash_algo : 8;
union {
MPI mpi[2];
struct {
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index b46fb45f2cca..e6d0cd9f518e 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -150,6 +150,7 @@ int drm_err(const char *func, const char *format, ...);
#define DRIVER_BUS_PCI 0x1
#define DRIVER_BUS_PLATFORM 0x2
#define DRIVER_BUS_USB 0x3
+#define DRIVER_BUS_HOST1X 0x4
/***********************************************************************/
/** \name Begin the DRM... */
@@ -433,6 +434,9 @@ struct drm_file {
struct drm_master *master; /* master this node is currently associated with
N.B. not always minor->master */
+ /* true when the client has asked us to expose stereo 3D mode flags */
+ bool stereo_allowed;
+
/**
* fbs - List of framebuffers associated with this file.
*
@@ -667,8 +671,6 @@ struct drm_gem_object {
uint32_t pending_read_domains;
uint32_t pending_write_domain;
- void *driver_private;
-
/**
* dma_buf - dma buf associated with this GEM object
*
@@ -922,7 +924,6 @@ struct drm_driver {
*
* Returns 0 on success.
*/
- int (*gem_init_object) (struct drm_gem_object *obj);
void (*gem_free_object) (struct drm_gem_object *obj);
int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
@@ -1046,7 +1047,7 @@ struct drm_minor {
int index; /**< Minor device number */
int type; /**< Control or render */
dev_t device; /**< Device number for mknod */
- struct device kdev; /**< Linux device */
+ struct device *kdev; /**< Linux device */
struct drm_device *dev;
struct dentry *debugfs_root;
@@ -1081,6 +1082,19 @@ struct drm_pending_vblank_event {
struct drm_event_vblank event;
};
+struct drm_vblank_crtc {
+ wait_queue_head_t queue; /**< VBLANK wait queue */
+ struct timeval time[DRM_VBLANKTIME_RBSIZE]; /**< timestamp of current count */
+ atomic_t count; /**< number of VBLANK interrupts */
+ atomic_t refcount; /* number of users of vblank interruptsper crtc */
+ u32 last; /* protected by dev->vbl_lock, used */
+ /* for wraparound handling */
+ u32 last_wait; /* Last vblank seqno waited per CRTC */
+ unsigned int inmodeset; /* Display driver is setting mode */
+ bool enabled; /* so we don't call enable more than
+ once per disable */
+};
+
/**
* DRM device structure. This structure represent a complete card that
* may contain multiple heads.
@@ -1105,25 +1119,16 @@ struct drm_device {
atomic_t buf_alloc; /**< Buffer allocation in progress */
/*@} */
- /** \name Performance counters */
- /*@{ */
- unsigned long counters;
- enum drm_stat_type types[15];
- atomic_t counts[15];
- /*@} */
-
struct list_head filelist;
/** \name Memory management */
/*@{ */
struct list_head maplist; /**< Linked list of regions */
- int map_count; /**< Number of mappable regions */
struct drm_open_hash map_hash; /**< User token hash table for maps */
/** \name Context handle management */
/*@{ */
struct list_head ctxlist; /**< Linked list of context handles */
- int ctx_count; /**< Number of context handles */
struct mutex ctxlist_mutex; /**< For ctxlist */
struct idr ctx_idr;
@@ -1139,12 +1144,11 @@ struct drm_device {
/** \name Context support */
/*@{ */
- int irq_enabled; /**< True if irq handler is enabled */
+ bool irq_enabled; /**< True if irq handler is enabled */
__volatile__ long context_flag; /**< Context swapping flag */
int last_context; /**< Last current context */
/*@} */
- struct work_struct work;
/** \name VBLANK IRQ support */
/*@{ */
@@ -1154,20 +1158,13 @@ struct drm_device {
* Once the modeset ioctl *has* been called though, we can safely
* disable them when unused.
*/
- int vblank_disable_allowed;
+ bool vblank_disable_allowed;
+
+ /* array of size num_crtcs */
+ struct drm_vblank_crtc *vblank;
- wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
- atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
- struct timeval *_vblank_time; /**< timestamp of current vblank_count (drivers must alloc right number of fields) */
spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */
spinlock_t vbl_lock;
- atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */
- u32 *last_vblank; /* protected by dev->vbl_lock, used */
- /* for wraparound handling */
- int *vblank_enabled; /* so we don't call enable more than
- once per disable */
- int *vblank_inmodeset; /* Display driver is setting mode */
- u32 *last_vblank_wait; /* Last vblank seqno waited per CRTC */
struct timer_list vblank_disable_timer;
u32 max_vblank_count; /**< size of vblank counter register */
@@ -1184,8 +1181,6 @@ struct drm_device {
struct device *dev; /**< Device structure */
struct pci_dev *pdev; /**< PCI device structure */
- int pci_vendor; /**< PCI vendor id */
- int pci_device; /**< PCI device id */
#ifdef __alpha__
struct pci_controller *hose;
#endif
@@ -1303,6 +1298,8 @@ extern int drm_getstats(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_getcap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern int drm_setclientcap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
extern int drm_setversion(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_noop(struct drm_device *dev, void *data,
@@ -1556,8 +1553,6 @@ int drm_gem_init(struct drm_device *dev);
void drm_gem_destroy(struct drm_device *dev);
void drm_gem_object_release(struct drm_gem_object *obj);
void drm_gem_object_free(struct kref *kref);
-struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
- size_t size);
int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
void drm_gem_private_object_init(struct drm_device *dev,
@@ -1645,9 +1640,11 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map)
#include <drm/drm_mem_util.h>
-extern int drm_fill_in_dev(struct drm_device *dev,
- const struct pci_device_id *ent,
- struct drm_driver *driver);
+struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+ struct device *parent);
+void drm_dev_free(struct drm_device *dev);
+int drm_dev_register(struct drm_device *dev, unsigned long flags);
+void drm_dev_unregister(struct drm_device *dev);
int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type);
/*@}*/
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 24f499569a2f..d3a91ade1d37 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -108,6 +108,7 @@ enum drm_mode_status {
MODE_ONE_HEIGHT, /* only one height is supported */
MODE_ONE_SIZE, /* only one resolution is supported */
MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */
+ MODE_NO_STEREO, /* stereo modes not supported */
MODE_UNVERIFIED = -3, /* mode needs to reverified */
MODE_BAD = -2, /* unspecified reason */
MODE_ERROR = -1 /* error condition */
@@ -124,7 +125,10 @@ enum drm_mode_status {
.vscan = (vs), .flags = (f), \
.base.type = DRM_MODE_OBJECT_MODE
-#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
+#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
+#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
+
+#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
struct drm_display_mode {
/* Header */
@@ -155,8 +159,7 @@ struct drm_display_mode {
int height_mm;
/* Actual mode we give to hw */
- int clock_index;
- int synth_clock;
+ int crtc_clock; /* in KHz */
int crtc_hdisplay;
int crtc_hblank_start;
int crtc_hblank_end;
@@ -180,6 +183,11 @@ struct drm_display_mode {
int hsync; /* in kHz */
};
+static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode)
+{
+ return mode->flags & DRM_MODE_FLAG_3D_MASK;
+}
+
enum drm_connector_status {
connector_status_connected = 1,
connector_status_disconnected = 2,
@@ -587,7 +595,7 @@ enum drm_connector_force {
*/
struct drm_connector {
struct drm_device *dev;
- struct device kdev;
+ struct device *kdev;
struct device_attribute *attr;
struct list_head head;
@@ -597,6 +605,7 @@ struct drm_connector {
int connector_type_id;
bool interlace_allowed;
bool doublescan_allowed;
+ bool stereo_allowed;
struct list_head modes; /* list of modes on this connector */
enum drm_connector_status status;
@@ -964,6 +973,7 @@ extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_m
extern bool drm_probe_ddc(struct i2c_adapter *adapter);
extern struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
+extern struct edid *drm_edid_duplicate(const struct edid *edid);
extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
@@ -975,7 +985,7 @@ extern void drm_mode_config_reset(struct drm_device *dev);
extern void drm_mode_config_cleanup(struct drm_device *dev);
extern void drm_mode_set_name(struct drm_display_mode *mode);
extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
-extern bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
+extern bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
extern int drm_mode_width(const struct drm_display_mode *mode);
extern int drm_mode_height(const struct drm_display_mode *mode);
@@ -1135,4 +1145,21 @@ extern int drm_format_horz_chroma_subsampling(uint32_t format);
extern int drm_format_vert_chroma_subsampling(uint32_t format);
extern const char *drm_get_format_name(uint32_t format);
+/* Helpers */
+static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *mo;
+ mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CRTC);
+ return mo ? obj_to_crtc(mo) : NULL;
+}
+
+static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *mo;
+ mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
+ return mo ? obj_to_encoder(mo) : NULL;
+}
+
#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index f43d556bf40b..ef6ad3a8e58e 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -163,7 +163,7 @@ static inline void drm_connector_helper_add(struct drm_connector *connector,
extern int drm_helper_resume_force_mode(struct drm_device *dev);
extern void drm_kms_helper_poll_init(struct drm_device *dev);
extern void drm_kms_helper_poll_fini(struct drm_device *dev);
-extern void drm_helper_hpd_irq_event(struct drm_device *dev);
+extern bool drm_helper_hpd_irq_event(struct drm_device *dev);
extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
extern void drm_kms_helper_poll_disable(struct drm_device *dev);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index ae8dbfb1207c..a92c3754e3bb 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -77,10 +77,10 @@
#define DP_DOWNSTREAMPORT_PRESENT 0x005
# define DP_DWN_STRM_PORT_PRESENT (1 << 0)
# define DP_DWN_STRM_PORT_TYPE_MASK 0x06
-/* 00b = DisplayPort */
-/* 01b = Analog */
-/* 10b = TMDS or HDMI */
-/* 11b = Other */
+# define DP_DWN_STRM_PORT_TYPE_DP (0 << 1)
+# define DP_DWN_STRM_PORT_TYPE_ANALOG (1 << 1)
+# define DP_DWN_STRM_PORT_TYPE_TMDS (2 << 1)
+# define DP_DWN_STRM_PORT_TYPE_OTHER (3 << 1)
# define DP_FORMAT_CONVERSION (1 << 3)
# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */
@@ -333,20 +333,20 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
#define DP_LINK_STATUS_SIZE 6
-bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count);
-bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count);
-u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
-u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
#define DP_RECEIVER_CAP_SIZE 0xf
#define EDP_PSR_RECEIVER_CAP_SIZE 2
-void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
-void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
u8 drm_dp_link_rate_to_bw_code(int link_rate);
int drm_dp_bw_code_to_link_rate(u8 link_bw);
@@ -379,15 +379,22 @@ struct edp_vsc_psr {
#define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2)
static inline int
-drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
}
static inline u8
-drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+drm_dp_max_lane_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
}
+static inline bool
+drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x11 &&
+ (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
+}
+
#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/dt-bindings/mfd/as3722.h b/include/dt-bindings/mfd/as3722.h
new file mode 100644
index 000000000000..0e692562d77b
--- /dev/null
+++ b/include/dt-bindings/mfd/as3722.h
@@ -0,0 +1,52 @@
+/*
+ * This header provides macros for ams AS3722 device bindings.
+ *
+ * Copyright (c) 2013, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ */
+
+#ifndef __DT_BINDINGS_AS3722_H__
+#define __DT_BINDINGS_AS3722_H__
+
+/* External control pins */
+#define AS3722_EXT_CONTROL_PIN_ENABLE1 1
+#define AS3722_EXT_CONTROL_PIN_ENABLE2 2
+#define AS3722_EXT_CONTROL_PIN_ENABLE2 3
+
+/* Interrupt numbers for AS3722 */
+#define AS3722_IRQ_LID 0
+#define AS3722_IRQ_ACOK 1
+#define AS3722_IRQ_ENABLE1 2
+#define AS3722_IRQ_OCCUR_ALARM_SD0 3
+#define AS3722_IRQ_ONKEY_LONG_PRESS 4
+#define AS3722_IRQ_ONKEY 5
+#define AS3722_IRQ_OVTMP 6
+#define AS3722_IRQ_LOWBAT 7
+#define AS3722_IRQ_SD0_LV 8
+#define AS3722_IRQ_SD1_LV 9
+#define AS3722_IRQ_SD2_LV 10
+#define AS3722_IRQ_PWM1_OV_PROT 11
+#define AS3722_IRQ_PWM2_OV_PROT 12
+#define AS3722_IRQ_ENABLE2 13
+#define AS3722_IRQ_SD6_LV 14
+#define AS3722_IRQ_RTC_REP 15
+#define AS3722_IRQ_RTC_ALARM 16
+#define AS3722_IRQ_GPIO1 17
+#define AS3722_IRQ_GPIO2 18
+#define AS3722_IRQ_GPIO3 19
+#define AS3722_IRQ_GPIO4 20
+#define AS3722_IRQ_GPIO5 21
+#define AS3722_IRQ_WATCHDOG 22
+#define AS3722_IRQ_ENABLE3 23
+#define AS3722_IRQ_TEMP_SD0_SHUTDOWN 24
+#define AS3722_IRQ_TEMP_SD1_SHUTDOWN 25
+#define AS3722_IRQ_TEMP_SD2_SHUTDOWN 26
+#define AS3722_IRQ_TEMP_SD0_ALARM 27
+#define AS3722_IRQ_TEMP_SD1_ALARM 28
+#define AS3722_IRQ_TEMP_SD6_ALARM 29
+#define AS3722_IRQ_OCCUR_ALARM_SD6 30
+#define AS3722_IRQ_ADC 31
+
+#endif /* __DT_BINDINGS_AS3722_H__ */
diff --git a/include/dt-bindings/mfd/dbx500-prcmu.h b/include/dt-bindings/mfd/dbx500-prcmu.h
new file mode 100644
index 000000000000..552a2d174f01
--- /dev/null
+++ b/include/dt-bindings/mfd/dbx500-prcmu.h
@@ -0,0 +1,83 @@
+/*
+ * This header provides constants for the PRCMU bindings.
+ *
+ */
+
+#ifndef _DT_BINDINGS_MFD_PRCMU_H
+#define _DT_BINDINGS_MFD_PRCMU_H
+
+/*
+ * Clock identifiers.
+ */
+#define ARMCLK 0
+#define PRCMU_ACLK 1
+#define PRCMU_SVAMMCSPCLK 2
+#define PRCMU_SDMMCHCLK 2 /* DBx540 only. */
+#define PRCMU_SIACLK 3
+#define PRCMU_SIAMMDSPCLK 3 /* DBx540 only. */
+#define PRCMU_SGACLK 4
+#define PRCMU_UARTCLK 5
+#define PRCMU_MSP02CLK 6
+#define PRCMU_MSP1CLK 7
+#define PRCMU_I2CCLK 8
+#define PRCMU_SDMMCCLK 9
+#define PRCMU_SLIMCLK 10
+#define PRCMU_CAMCLK 10 /* DBx540 only. */
+#define PRCMU_PER1CLK 11
+#define PRCMU_PER2CLK 12
+#define PRCMU_PER3CLK 13
+#define PRCMU_PER5CLK 14
+#define PRCMU_PER6CLK 15
+#define PRCMU_PER7CLK 16
+#define PRCMU_LCDCLK 17
+#define PRCMU_BMLCLK 18
+#define PRCMU_HSITXCLK 19
+#define PRCMU_HSIRXCLK 20
+#define PRCMU_HDMICLK 21
+#define PRCMU_APEATCLK 22
+#define PRCMU_APETRACECLK 23
+#define PRCMU_MCDECLK 24
+#define PRCMU_IPI2CCLK 25
+#define PRCMU_DSIALTCLK 26
+#define PRCMU_DMACLK 27
+#define PRCMU_B2R2CLK 28
+#define PRCMU_TVCLK 29
+#define SPARE_UNIPROCLK 30
+#define PRCMU_SSPCLK 31
+#define PRCMU_RNGCLK 32
+#define PRCMU_UICCCLK 33
+#define PRCMU_G1CLK 34 /* DBx540 only. */
+#define PRCMU_HVACLK 35 /* DBx540 only. */
+#define PRCMU_SPARE1CLK 36
+#define PRCMU_SPARE2CLK 37
+
+#define PRCMU_NUM_REG_CLOCKS 38
+
+#define PRCMU_RTCCLK PRCMU_NUM_REG_CLOCKS
+#define PRCMU_SYSCLK 39
+#define PRCMU_CDCLK 40
+#define PRCMU_TIMCLK 41
+#define PRCMU_PLLSOC0 42
+#define PRCMU_PLLSOC1 43
+#define PRCMU_ARMSS 44
+#define PRCMU_PLLDDR 45
+
+/* DSI Clocks */
+#define PRCMU_PLLDSI 46
+#define PRCMU_DSI0CLK 47
+#define PRCMU_DSI1CLK 48
+#define PRCMU_DSI0ESCCLK 49
+#define PRCMU_DSI1ESCCLK 50
+#define PRCMU_DSI2ESCCLK 51
+
+/* LCD DSI PLL - Ux540 only */
+#define PRCMU_PLLDSI_LCD 52
+#define PRCMU_DSI0CLK_LCD 53
+#define PRCMU_DSI1CLK_LCD 54
+#define PRCMU_DSI0ESCCLK_LCD 55
+#define PRCMU_DSI1ESCCLK_LCD 56
+#define PRCMU_DSI2ESCCLK_LCD 57
+
+#define PRCMU_NUM_CLKS 58
+
+#endif
diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h
new file mode 100644
index 000000000000..eb6c366adfba
--- /dev/null
+++ b/include/dt-bindings/pinctrl/am43xx.h
@@ -0,0 +1,31 @@
+/*
+ * This header provides constants specific to AM43XX pinctrl bindings.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_AM43XX_H
+#define _DT_BINDINGS_PINCTRL_AM43XX_H
+
+#define MUX_MODE0 0
+#define MUX_MODE1 1
+#define MUX_MODE2 2
+#define MUX_MODE3 3
+#define MUX_MODE4 4
+#define MUX_MODE5 5
+#define MUX_MODE6 6
+#define MUX_MODE7 7
+
+#define PULL_DISABLE (1 << 16)
+#define PULL_UP (1 << 17)
+#define INPUT_EN (1 << 18)
+#define SLEWCTRL_FAST (1 << 19)
+#define DS0_PULL_UP_DOWN_EN (1 << 27)
+
+#define PIN_OUTPUT (PULL_DISABLE)
+#define PIN_OUTPUT_PULLUP (PULL_UP)
+#define PIN_OUTPUT_PULLDOWN 0
+#define PIN_INPUT (INPUT_EN | PULL_DISABLE)
+#define PIN_INPUT_PULLUP (INPUT_EN | PULL_UP)
+#define PIN_INPUT_PULLDOWN (INPUT_EN)
+
+#endif
+
diff --git a/include/dt-bindings/pinctrl/dra.h b/include/dt-bindings/pinctrl/dra.h
new file mode 100644
index 000000000000..002a2855c046
--- /dev/null
+++ b/include/dt-bindings/pinctrl/dra.h
@@ -0,0 +1,50 @@
+/*
+ * This header provides constants for DRA pinctrl bindings.
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Rajendra Nayak <rnayak@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_DRA_H
+#define _DT_BINDINGS_PINCTRL_DRA_H
+
+/* DRA7 mux mode options for each pin. See TRM for options */
+#define MUX_MODE0 0x0
+#define MUX_MODE1 0x1
+#define MUX_MODE2 0x2
+#define MUX_MODE3 0x3
+#define MUX_MODE4 0x4
+#define MUX_MODE5 0x5
+#define MUX_MODE6 0x6
+#define MUX_MODE7 0x7
+#define MUX_MODE8 0x8
+#define MUX_MODE9 0x9
+#define MUX_MODE10 0xa
+#define MUX_MODE11 0xb
+#define MUX_MODE12 0xc
+#define MUX_MODE13 0xd
+#define MUX_MODE14 0xe
+#define MUX_MODE15 0xf
+
+#define PULL_ENA (1 << 16)
+#define PULL_UP (1 << 17)
+#define INPUT_EN (1 << 18)
+#define SLEWCONTROL (1 << 19)
+#define WAKEUP_EN (1 << 24)
+#define WAKEUP_EVENT (1 << 25)
+
+/* Active pin states */
+#define PIN_OUTPUT 0
+#define PIN_OUTPUT_PULLUP (PIN_OUTPUT | PULL_ENA | PULL_UP)
+#define PIN_OUTPUT_PULLDOWN (PIN_OUTPUT | PULL_ENA)
+#define PIN_INPUT INPUT_EN
+#define PIN_INPUT_SLEW (INPUT_EN | SLEWCONTROL)
+#define PIN_INPUT_PULLUP (PULL_ENA | INPUT_EN | PULL_UP)
+#define PIN_INPUT_PULLDOWN (PULL_ENA | INPUT_EN)
+
+#endif
+
diff --git a/include/keys/big_key-type.h b/include/keys/big_key-type.h
new file mode 100644
index 000000000000..d69bc8af3292
--- /dev/null
+++ b/include/keys/big_key-type.h
@@ -0,0 +1,25 @@
+/* Big capacity key type.
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _KEYS_BIG_KEY_TYPE_H
+#define _KEYS_BIG_KEY_TYPE_H
+
+#include <linux/key-type.h>
+
+extern struct key_type key_type_big_key;
+
+extern int big_key_instantiate(struct key *key, struct key_preparsed_payload *prep);
+extern void big_key_revoke(struct key *key);
+extern void big_key_destroy(struct key *key);
+extern void big_key_describe(const struct key *big_key, struct seq_file *m);
+extern long big_key_read(const struct key *key, char __user *buffer, size_t buflen);
+
+#endif /* _KEYS_BIG_KEY_TYPE_H */
diff --git a/include/keys/keyring-type.h b/include/keys/keyring-type.h
index cf49159b0e3a..fca5c62340a4 100644
--- a/include/keys/keyring-type.h
+++ b/include/keys/keyring-type.h
@@ -1,6 +1,6 @@
/* Keyring key type
*
- * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2008, 2013 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -13,19 +13,6 @@
#define _KEYS_KEYRING_TYPE_H
#include <linux/key.h>
-#include <linux/rcupdate.h>
-
-/*
- * the keyring payload contains a list of the keys to which the keyring is
- * subscribed
- */
-struct keyring_list {
- struct rcu_head rcu; /* RCU deletion hook */
- unsigned short maxkeys; /* max keys this list can hold */
- unsigned short nkeys; /* number of keys currently held */
- unsigned short delkey; /* key to be unlinked by RCU */
- struct key __rcu *keys[0];
-};
-
+#include <linux/assoc_array.h>
#endif /* _KEYS_KEYRING_TYPE_H */
diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
new file mode 100644
index 000000000000..8dabc399bd1d
--- /dev/null
+++ b/include/keys/system_keyring.h
@@ -0,0 +1,23 @@
+/* System keyring containing trusted public keys.
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _KEYS_SYSTEM_KEYRING_H
+#define _KEYS_SYSTEM_KEYRING_H
+
+#ifdef CONFIG_SYSTEM_TRUSTED_KEYRING
+
+#include <linux/key.h>
+
+extern struct key *system_trusted_keyring;
+
+#endif
+
+#endif /* _KEYS_SYSTEM_KEYRING_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index a5db4aeefa36..84bd2dc18b86 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -116,7 +116,7 @@ void acpi_numa_arch_fixup(void);
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
-int acpi_map_lsapic(acpi_handle handle, int *pcpu);
+int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu);
int acpi_unmap_lsapic(int cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
@@ -294,58 +294,52 @@ void __init acpi_nvs_nosave_s3(void);
#endif /* CONFIG_PM_SLEEP */
struct acpi_osc_context {
- char *uuid_str; /* uuid string */
+ char *uuid_str; /* UUID string */
int rev;
- struct acpi_buffer cap; /* arg2/arg3 */
- struct acpi_buffer ret; /* free by caller if success */
+ struct acpi_buffer cap; /* list of DWORD capabilities */
+ struct acpi_buffer ret; /* free by caller if success */
};
-#define OSC_QUERY_TYPE 0
-#define OSC_SUPPORT_TYPE 1
-#define OSC_CONTROL_TYPE 2
-
-/* _OSC DW0 Definition */
-#define OSC_QUERY_ENABLE 1
-#define OSC_REQUEST_ERROR 2
-#define OSC_INVALID_UUID_ERROR 4
-#define OSC_INVALID_REVISION_ERROR 8
-#define OSC_CAPABILITIES_MASK_ERROR 16
-
+acpi_status acpi_str_to_uuid(char *str, u8 *uuid);
acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
-/* platform-wide _OSC bits */
-#define OSC_SB_PAD_SUPPORT 1
-#define OSC_SB_PPC_OST_SUPPORT 2
-#define OSC_SB_PR3_SUPPORT 4
-#define OSC_SB_HOTPLUG_OST_SUPPORT 8
-#define OSC_SB_APEI_SUPPORT 16
+/* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */
+#define OSC_QUERY_DWORD 0 /* DWORD 1 */
+#define OSC_SUPPORT_DWORD 1 /* DWORD 2 */
+#define OSC_CONTROL_DWORD 2 /* DWORD 3 */
+
+/* _OSC Capabilities DWORD 1: Query/Control and Error Returns (generic) */
+#define OSC_QUERY_ENABLE 0x00000001 /* input */
+#define OSC_REQUEST_ERROR 0x00000002 /* return */
+#define OSC_INVALID_UUID_ERROR 0x00000004 /* return */
+#define OSC_INVALID_REVISION_ERROR 0x00000008 /* return */
+#define OSC_CAPABILITIES_MASK_ERROR 0x00000010 /* return */
+
+/* Platform-Wide Capabilities _OSC: Capabilities DWORD 2: Support Field */
+#define OSC_SB_PAD_SUPPORT 0x00000001
+#define OSC_SB_PPC_OST_SUPPORT 0x00000002
+#define OSC_SB_PR3_SUPPORT 0x00000004
+#define OSC_SB_HOTPLUG_OST_SUPPORT 0x00000008
+#define OSC_SB_APEI_SUPPORT 0x00000010
+#define OSC_SB_CPC_SUPPORT 0x00000020
extern bool osc_sb_apei_support_acked;
-/* PCI defined _OSC bits */
-/* _OSC DW1 Definition (OS Support Fields) */
-#define OSC_EXT_PCI_CONFIG_SUPPORT 1
-#define OSC_ACTIVE_STATE_PWR_SUPPORT 2
-#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4
-#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8
-#define OSC_MSI_SUPPORT 16
-#define OSC_PCI_SUPPORT_MASKS 0x1f
-
-/* _OSC DW1 Definition (OS Control Fields) */
-#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1
-#define OSC_SHPC_NATIVE_HP_CONTROL 2
-#define OSC_PCI_EXPRESS_PME_CONTROL 4
-#define OSC_PCI_EXPRESS_AER_CONTROL 8
-#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16
-
-#define OSC_PCI_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
- OSC_SHPC_NATIVE_HP_CONTROL | \
- OSC_PCI_EXPRESS_PME_CONTROL | \
- OSC_PCI_EXPRESS_AER_CONTROL | \
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
-
-#define OSC_PCI_NATIVE_HOTPLUG (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
- OSC_SHPC_NATIVE_HP_CONTROL)
+/* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */
+#define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001
+#define OSC_PCI_ASPM_SUPPORT 0x00000002
+#define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004
+#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008
+#define OSC_PCI_MSI_SUPPORT 0x00000010
+#define OSC_PCI_SUPPORT_MASKS 0x0000001f
+
+/* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */
+#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001
+#define OSC_PCI_SHPC_NATIVE_HP_CONTROL 0x00000002
+#define OSC_PCI_EXPRESS_PME_CONTROL 0x00000004
+#define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008
+#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010
+#define OSC_PCI_CONTROL_MASKS 0x0000001f
extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
u32 *mask, u32 req);
@@ -472,7 +466,17 @@ static inline bool acpi_driver_match_device(struct device *dev,
}
#define ACPI_PTR(_ptr) (NULL)
+typedef void * acpi_handle;
+
+struct acpi_device {
+ struct device dev;
+};
+static inline int acpi_bus_get_device(acpi_handle handle,
+ struct acpi_device **device)
+{
+ return -ENODEV;
+}
#endif /* !CONFIG_ACPI */
#ifdef CONFIG_ACPI
diff --git a/include/linux/aio.h b/include/linux/aio.h
index d9c92daa3944..f01e7e370691 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -15,6 +15,14 @@ struct kiocb;
#define KIOCB_KEY 0
/*
+ * opcode values not exposed to user space
+ */
+enum {
+ IOCB_CMD_READ_ITER = 0x10000,
+ IOCB_CMD_WRITE_ITER = 0x10001,
+};
+
+/*
* We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either
* cancelled or completed (this makes a certain amount of sense because
* successful cancellation - io_cancel() - does deliver the completion to
@@ -31,13 +39,15 @@ typedef int (kiocb_cancel_fn)(struct kiocb *);
struct kiocb {
struct file *ki_filp;
- struct kioctx *ki_ctx; /* NULL for sync ops */
+ struct kioctx *ki_ctx; /* NULL for sync ops,
+ * -1 for kernel caller */
kiocb_cancel_fn *ki_cancel;
void *private;
union {
void __user *user;
struct task_struct *tsk;
+ void (*complete)(u64 user_data, long res);
} ki_obj;
__u64 ki_user_data; /* user's data for completion */
@@ -59,6 +69,11 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
return kiocb->ki_ctx == NULL;
}
+static inline bool is_kernel_kiocb(struct kiocb *kiocb)
+{
+ return kiocb->ki_ctx == (void *)-1;
+}
+
static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
{
*kiocb = (struct kiocb) {
@@ -77,6 +92,14 @@ extern void exit_aio(struct mm_struct *mm);
extern long do_io_submit(aio_context_t ctx_id, long nr,
struct iocb __user *__user *iocbpp, bool compat);
void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
+struct kiocb *aio_kernel_alloc(gfp_t gfp);
+void aio_kernel_free(struct kiocb *iocb);
+void aio_kernel_init_rw(struct kiocb *iocb, struct file *filp, size_t nr,
+ loff_t off);
+void aio_kernel_init_callback(struct kiocb *iocb,
+ void (*complete)(u64 user_data, long res),
+ u64 user_data);
+int aio_kernel_submit(struct kiocb *iocb, unsigned op, void *ptr);
#else
static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
static inline void aio_complete(struct kiocb *iocb, long res, long res2) { }
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 43ec7e247a80..682df0e1954a 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -30,7 +30,6 @@ struct amba_device {
struct device dev;
struct resource res;
struct clk *pclk;
- u64 dma_mask;
unsigned int periphid;
unsigned int irq[AMBA_NR_IRQS];
};
@@ -131,7 +130,6 @@ struct amba_device name##_device = { \
struct amba_device name##_device = { \
.dev = __AMBA_DEV(busid, data, ~0ULL), \
.res = DEFINE_RES_MEM(base, SZ_4K), \
- .dma_mask = ~0ULL, \
.irq = irqs, \
.periphid = id, \
}
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index 62d9303c2837..0ddb5c02ad8b 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -40,7 +40,7 @@
#define UART010_LCRL 0x10 /* Line control register, low byte. */
#define UART010_CR 0x14 /* Control register. */
#define UART01x_FR 0x18 /* Flag register (Read only). */
-#define UART010_IIR 0x1C /* Interrupt indentification register (Read). */
+#define UART010_IIR 0x1C /* Interrupt identification register (Read). */
#define UART010_ICR 0x1C /* Interrupt clear register (Write). */
#define ST_UART011_LCRH_RX 0x1C /* Rx line control register. */
#define UART01x_ILPR 0x20 /* IrDA low power counter register. */
diff --git a/include/linux/assoc_array.h b/include/linux/assoc_array.h
new file mode 100644
index 000000000000..9a193b84238a
--- /dev/null
+++ b/include/linux/assoc_array.h
@@ -0,0 +1,92 @@
+/* Generic associative array implementation.
+ *
+ * See Documentation/assoc_array.txt for information.
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_ASSOC_ARRAY_H
+#define _LINUX_ASSOC_ARRAY_H
+
+#ifdef CONFIG_ASSOCIATIVE_ARRAY
+
+#include <linux/types.h>
+
+#define ASSOC_ARRAY_KEY_CHUNK_SIZE BITS_PER_LONG /* Key data retrieved in chunks of this size */
+
+/*
+ * Generic associative array.
+ */
+struct assoc_array {
+ struct assoc_array_ptr *root; /* The node at the root of the tree */
+ unsigned long nr_leaves_on_tree;
+};
+
+/*
+ * Operations on objects and index keys for use by array manipulation routines.
+ */
+struct assoc_array_ops {
+ /* Method to get a chunk of an index key from caller-supplied data */
+ unsigned long (*get_key_chunk)(const void *index_key, int level);
+
+ /* Method to get a piece of an object's index key */
+ unsigned long (*get_object_key_chunk)(const void *object, int level);
+
+ /* Is this the object we're looking for? */
+ bool (*compare_object)(const void *object, const void *index_key);
+
+ /* How different are two objects, to a bit position in their keys? (or
+ * -1 if they're the same)
+ */
+ int (*diff_objects)(const void *a, const void *b);
+
+ /* Method to free an object. */
+ void (*free_object)(void *object);
+};
+
+/*
+ * Access and manipulation functions.
+ */
+struct assoc_array_edit;
+
+static inline void assoc_array_init(struct assoc_array *array)
+{
+ array->root = NULL;
+ array->nr_leaves_on_tree = 0;
+}
+
+extern int assoc_array_iterate(const struct assoc_array *array,
+ int (*iterator)(const void *object,
+ void *iterator_data),
+ void *iterator_data);
+extern void *assoc_array_find(const struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key);
+extern void assoc_array_destroy(struct assoc_array *array,
+ const struct assoc_array_ops *ops);
+extern struct assoc_array_edit *assoc_array_insert(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key,
+ void *object);
+extern void assoc_array_insert_set_object(struct assoc_array_edit *edit,
+ void *object);
+extern struct assoc_array_edit *assoc_array_delete(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key);
+extern struct assoc_array_edit *assoc_array_clear(struct assoc_array *array,
+ const struct assoc_array_ops *ops);
+extern void assoc_array_apply_edit(struct assoc_array_edit *edit);
+extern void assoc_array_cancel_edit(struct assoc_array_edit *edit);
+extern int assoc_array_gc(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ bool (*iterator)(void *object, void *iterator_data),
+ void *iterator_data);
+
+#endif /* CONFIG_ASSOCIATIVE_ARRAY */
+#endif /* _LINUX_ASSOC_ARRAY_H */
diff --git a/include/linux/assoc_array_priv.h b/include/linux/assoc_array_priv.h
new file mode 100644
index 000000000000..711275e6681c
--- /dev/null
+++ b/include/linux/assoc_array_priv.h
@@ -0,0 +1,182 @@
+/* Private definitions for the generic associative array implementation.
+ *
+ * See Documentation/assoc_array.txt for information.
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_ASSOC_ARRAY_PRIV_H
+#define _LINUX_ASSOC_ARRAY_PRIV_H
+
+#ifdef CONFIG_ASSOCIATIVE_ARRAY
+
+#include <linux/assoc_array.h>
+
+#define ASSOC_ARRAY_FAN_OUT 16 /* Number of slots per node */
+#define ASSOC_ARRAY_FAN_MASK (ASSOC_ARRAY_FAN_OUT - 1)
+#define ASSOC_ARRAY_LEVEL_STEP (ilog2(ASSOC_ARRAY_FAN_OUT))
+#define ASSOC_ARRAY_LEVEL_STEP_MASK (ASSOC_ARRAY_LEVEL_STEP - 1)
+#define ASSOC_ARRAY_KEY_CHUNK_MASK (ASSOC_ARRAY_KEY_CHUNK_SIZE - 1)
+#define ASSOC_ARRAY_KEY_CHUNK_SHIFT (ilog2(BITS_PER_LONG))
+
+/*
+ * Undefined type representing a pointer with type information in the bottom
+ * two bits.
+ */
+struct assoc_array_ptr;
+
+/*
+ * An N-way node in the tree.
+ *
+ * Each slot contains one of four things:
+ *
+ * (1) Nothing (NULL).
+ *
+ * (2) A leaf object (pointer types 0).
+ *
+ * (3) A next-level node (pointer type 1, subtype 0).
+ *
+ * (4) A shortcut (pointer type 1, subtype 1).
+ *
+ * The tree is optimised for search-by-ID, but permits reasonable iteration
+ * also.
+ *
+ * The tree is navigated by constructing an index key consisting of an array of
+ * segments, where each segment is ilog2(ASSOC_ARRAY_FAN_OUT) bits in size.
+ *
+ * The segments correspond to levels of the tree (the first segment is used at
+ * level 0, the second at level 1, etc.).
+ */
+struct assoc_array_node {
+ struct assoc_array_ptr *back_pointer;
+ u8 parent_slot;
+ struct assoc_array_ptr *slots[ASSOC_ARRAY_FAN_OUT];
+ unsigned long nr_leaves_on_branch;
+};
+
+/*
+ * A shortcut through the index space out to where a collection of nodes/leaves
+ * with the same IDs live.
+ */
+struct assoc_array_shortcut {
+ struct assoc_array_ptr *back_pointer;
+ int parent_slot;
+ int skip_to_level;
+ struct assoc_array_ptr *next_node;
+ unsigned long index_key[];
+};
+
+/*
+ * Preallocation cache.
+ */
+struct assoc_array_edit {
+ struct rcu_head rcu;
+ struct assoc_array *array;
+ const struct assoc_array_ops *ops;
+ const struct assoc_array_ops *ops_for_excised_subtree;
+ struct assoc_array_ptr *leaf;
+ struct assoc_array_ptr **leaf_p;
+ struct assoc_array_ptr *dead_leaf;
+ struct assoc_array_ptr *new_meta[3];
+ struct assoc_array_ptr *excised_meta[1];
+ struct assoc_array_ptr *excised_subtree;
+ struct assoc_array_ptr **set_backpointers[ASSOC_ARRAY_FAN_OUT];
+ struct assoc_array_ptr *set_backpointers_to;
+ struct assoc_array_node *adjust_count_on;
+ long adjust_count_by;
+ struct {
+ struct assoc_array_ptr **ptr;
+ struct assoc_array_ptr *to;
+ } set[2];
+ struct {
+ u8 *p;
+ u8 to;
+ } set_parent_slot[1];
+ u8 segment_cache[ASSOC_ARRAY_FAN_OUT + 1];
+};
+
+/*
+ * Internal tree member pointers are marked in the bottom one or two bits to
+ * indicate what type they are so that we don't have to look behind every
+ * pointer to see what it points to.
+ *
+ * We provide functions to test type annotations and to create and translate
+ * the annotated pointers.
+ */
+#define ASSOC_ARRAY_PTR_TYPE_MASK 0x1UL
+#define ASSOC_ARRAY_PTR_LEAF_TYPE 0x0UL /* Points to leaf (or nowhere) */
+#define ASSOC_ARRAY_PTR_META_TYPE 0x1UL /* Points to node or shortcut */
+#define ASSOC_ARRAY_PTR_SUBTYPE_MASK 0x2UL
+#define ASSOC_ARRAY_PTR_NODE_SUBTYPE 0x0UL
+#define ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE 0x2UL
+
+static inline bool assoc_array_ptr_is_meta(const struct assoc_array_ptr *x)
+{
+ return (unsigned long)x & ASSOC_ARRAY_PTR_TYPE_MASK;
+}
+static inline bool assoc_array_ptr_is_leaf(const struct assoc_array_ptr *x)
+{
+ return !assoc_array_ptr_is_meta(x);
+}
+static inline bool assoc_array_ptr_is_shortcut(const struct assoc_array_ptr *x)
+{
+ return (unsigned long)x & ASSOC_ARRAY_PTR_SUBTYPE_MASK;
+}
+static inline bool assoc_array_ptr_is_node(const struct assoc_array_ptr *x)
+{
+ return !assoc_array_ptr_is_shortcut(x);
+}
+
+static inline void *assoc_array_ptr_to_leaf(const struct assoc_array_ptr *x)
+{
+ return (void *)((unsigned long)x & ~ASSOC_ARRAY_PTR_TYPE_MASK);
+}
+
+static inline
+unsigned long __assoc_array_ptr_to_meta(const struct assoc_array_ptr *x)
+{
+ return (unsigned long)x &
+ ~(ASSOC_ARRAY_PTR_SUBTYPE_MASK | ASSOC_ARRAY_PTR_TYPE_MASK);
+}
+static inline
+struct assoc_array_node *assoc_array_ptr_to_node(const struct assoc_array_ptr *x)
+{
+ return (struct assoc_array_node *)__assoc_array_ptr_to_meta(x);
+}
+static inline
+struct assoc_array_shortcut *assoc_array_ptr_to_shortcut(const struct assoc_array_ptr *x)
+{
+ return (struct assoc_array_shortcut *)__assoc_array_ptr_to_meta(x);
+}
+
+static inline
+struct assoc_array_ptr *__assoc_array_x_to_ptr(const void *p, unsigned long t)
+{
+ return (struct assoc_array_ptr *)((unsigned long)p | t);
+}
+static inline
+struct assoc_array_ptr *assoc_array_leaf_to_ptr(const void *p)
+{
+ return __assoc_array_x_to_ptr(p, ASSOC_ARRAY_PTR_LEAF_TYPE);
+}
+static inline
+struct assoc_array_ptr *assoc_array_node_to_ptr(const struct assoc_array_node *p)
+{
+ return __assoc_array_x_to_ptr(
+ p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_NODE_SUBTYPE);
+}
+static inline
+struct assoc_array_ptr *assoc_array_shortcut_to_ptr(const struct assoc_array_shortcut *p)
+{
+ return __assoc_array_x_to_ptr(
+ p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE);
+}
+
+#endif /* CONFIG_ASSOCIATIVE_ARRAY */
+#endif /* _LINUX_ASSOC_ARRAY_PRIV_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index bf4c69ca76df..f2f4d8da97c0 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -219,6 +219,7 @@ enum {
ATA_CMD_IDLE = 0xE3, /* place in idle power mode */
ATA_CMD_EDD = 0x90, /* execute device diagnostic */
ATA_CMD_DOWNLOAD_MICRO = 0x92,
+ ATA_CMD_DOWNLOAD_MICRO_DMA = 0x93,
ATA_CMD_NOP = 0x00,
ATA_CMD_FLUSH = 0xE7,
ATA_CMD_FLUSH_EXT = 0xEA,
@@ -268,12 +269,15 @@ enum {
ATA_CMD_WRITE_LOG_EXT = 0x3F,
ATA_CMD_READ_LOG_DMA_EXT = 0x47,
ATA_CMD_WRITE_LOG_DMA_EXT = 0x57,
+ ATA_CMD_TRUSTED_NONDATA = 0x5B,
ATA_CMD_TRUSTED_RCV = 0x5C,
ATA_CMD_TRUSTED_RCV_DMA = 0x5D,
ATA_CMD_TRUSTED_SND = 0x5E,
ATA_CMD_TRUSTED_SND_DMA = 0x5F,
ATA_CMD_PMP_READ = 0xE4,
+ ATA_CMD_PMP_READ_DMA = 0xE9,
ATA_CMD_PMP_WRITE = 0xE8,
+ ATA_CMD_PMP_WRITE_DMA = 0xEB,
ATA_CMD_CONF_OVERLAY = 0xB1,
ATA_CMD_SEC_SET_PASS = 0xF1,
ATA_CMD_SEC_UNLOCK = 0xF2,
@@ -292,6 +296,9 @@ enum {
ATA_CMD_CFA_TRANS_SECT = 0x87,
ATA_CMD_CFA_ERASE = 0xC0,
ATA_CMD_CFA_WRITE_MULT_NE = 0xCD,
+ ATA_CMD_REQ_SENSE_DATA = 0x0B,
+ ATA_CMD_SANITIZE_DEVICE = 0xB4,
+
/* marked obsolete in the ATA/ATAPI-7 spec */
ATA_CMD_RESTORE = 0x10,
diff --git a/include/linux/atmel_serial.h b/include/linux/atmel_serial.h
index be201ca2990c..00beddf6be20 100644
--- a/include/linux/atmel_serial.h
+++ b/include/linux/atmel_serial.h
@@ -125,5 +125,6 @@
#define ATMEL_US_IF 0x4c /* IrDA Filter Register */
#define ATMEL_US_NAME 0xf0 /* Ip Name */
+#define ATMEL_US_VERSION 0xfc /* Ip Version */
#endif
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 5f66d519a726..24819001f5c8 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -109,7 +109,7 @@ struct backing_dev_info {
#endif
};
-int bdi_init(struct backing_dev_info *bdi);
+int __must_check bdi_init(struct backing_dev_info *bdi);
void bdi_destroy(struct backing_dev_info *bdi);
__printf(3, 4)
@@ -117,7 +117,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...);
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
void bdi_unregister(struct backing_dev_info *bdi);
-int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
+int __must_check bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
enum wb_reason reason);
void bdi_start_background_writeback(struct backing_dev_info *bdi);
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 53b77949c79d..5f9cd963213d 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -100,6 +100,9 @@ struct backlight_device {
/* The framebuffer notifier block */
struct notifier_block fb_notif;
+ /* list entry of all registered backlight devices */
+ struct list_head entry;
+
struct device dev;
};
@@ -123,6 +126,7 @@ extern void devm_backlight_device_unregister(struct device *dev,
struct backlight_device *bd);
extern void backlight_force_update(struct backlight_device *bd,
enum backlight_update_reason reason);
+extern bool backlight_device_registered(enum backlight_type type);
#define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
diff --git a/include/linux/bio.h b/include/linux/bio.h
index ec48bac5b039..45926a27f4ff 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -61,25 +61,87 @@
* various member access, note that bio_data should of course not be used
* on highmem page vectors
*/
-#define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)]))
-#define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx)
-#define bio_page(bio) bio_iovec((bio))->bv_page
-#define bio_offset(bio) bio_iovec((bio))->bv_offset
-#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
-#define bio_sectors(bio) ((bio)->bi_size >> 9)
-#define bio_end_sector(bio) ((bio)->bi_sector + bio_sectors((bio)))
+#define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx])
+
+#define bvec_iter_page(bvec, iter) \
+ (__bvec_iter_bvec((bvec), (iter))->bv_page)
+
+#define bvec_iter_len(bvec, iter) \
+ min((iter).bi_size, \
+ __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
+
+#define bvec_iter_offset(bvec, iter) \
+ (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
+
+#define bvec_iter_bvec(bvec, iter) \
+((struct bio_vec) { \
+ .bv_page = bvec_iter_page((bvec), (iter)), \
+ .bv_len = bvec_iter_len((bvec), (iter)), \
+ .bv_offset = bvec_iter_offset((bvec), (iter)), \
+})
+
+#define bio_iter_iovec(bio, iter) \
+ bvec_iter_bvec((bio)->bi_io_vec, (iter))
+
+#define bio_iter_page(bio, iter) \
+ bvec_iter_page((bio)->bi_io_vec, (iter))
+#define bio_iter_len(bio, iter) \
+ bvec_iter_len((bio)->bi_io_vec, (iter))
+#define bio_iter_offset(bio, iter) \
+ bvec_iter_offset((bio)->bi_io_vec, (iter))
+
+#define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
+#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
+#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
+
+#define bio_multiple_segments(bio) \
+ ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
+#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
+#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
+
+/*
+ * Check whether this bio carries any data or not. A NULL bio is allowed.
+ */
+static inline bool bio_has_data(struct bio *bio)
+{
+ if (bio &&
+ bio->bi_iter.bi_size &&
+ !(bio->bi_rw & REQ_DISCARD))
+ return true;
+
+ return false;
+}
+
+static inline bool bio_is_rw(struct bio *bio)
+{
+ if (!bio_has_data(bio))
+ return false;
+
+ if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
+ return false;
+
+ return true;
+}
+
+static inline bool bio_mergeable(struct bio *bio)
+{
+ if (bio->bi_rw & REQ_NOMERGE_FLAGS)
+ return false;
+
+ return true;
+}
static inline unsigned int bio_cur_bytes(struct bio *bio)
{
- if (bio->bi_vcnt)
- return bio_iovec(bio)->bv_len;
+ if (bio_has_data(bio))
+ return bio_iovec(bio).bv_len;
else /* dataless requests such as discard */
- return bio->bi_size;
+ return bio->bi_iter.bi_size;
}
static inline void *bio_data(struct bio *bio)
{
- if (bio->bi_vcnt)
+ if (bio_has_data(bio))
return page_address(bio_page(bio)) + bio_offset(bio);
return NULL;
@@ -97,19 +159,16 @@ static inline void *bio_data(struct bio *bio)
* permanent PIO fall back, user is probably better off disabling highmem
* I/O completely on that queue (see ide-dma for example)
*/
-#define __bio_kmap_atomic(bio, idx) \
- (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) + \
- bio_iovec_idx((bio), (idx))->bv_offset)
+#define __bio_kmap_atomic(bio, iter) \
+ (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \
+ bio_iter_iovec((bio), (iter)).bv_offset)
-#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
+#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
/*
* merge helpers etc
*/
-#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
-#define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx)
-
/* Default implementation of BIOVEC_PHYS_MERGEABLE */
#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
@@ -126,33 +185,76 @@ static inline void *bio_data(struct bio *bio)
(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
__BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
-#define BIO_SEG_BOUNDARY(q, b1, b2) \
- BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
#define bio_io_error(bio) bio_endio((bio), -EIO)
/*
- * drivers should not use the __ version unless they _really_ know what
- * they're doing
- */
-#define __bio_for_each_segment(bvl, bio, i, start_idx) \
- for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \
- i < (bio)->bi_vcnt; \
- bvl++, i++)
-
-/*
* drivers should _never_ use the all version - the bio may have been split
* before it got to the driver and the driver won't own all of it
*/
#define bio_for_each_segment_all(bvl, bio, i) \
- for (i = 0; \
- bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \
- i++)
+ for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
+
+static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter,
+ unsigned bytes)
+{
+ WARN_ONCE(bytes > iter->bi_size,
+ "Attempted to advance past end of bvec iter\n");
+
+ while (bytes) {
+ unsigned len = min(bytes, bvec_iter_len(bv, *iter));
+
+ bytes -= len;
+ iter->bi_size -= len;
+ iter->bi_bvec_done += len;
+
+ if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
+ iter->bi_bvec_done = 0;
+ iter->bi_idx++;
+ }
+ }
+}
+
+#define for_each_bvec(bvl, bio_vec, iter, start) \
+ for ((iter) = start; \
+ (bvl) = bvec_iter_bvec((bio_vec), (iter)), \
+ (iter).bi_size; \
+ bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
+
+
+static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
+ unsigned bytes)
+{
+ iter->bi_sector += bytes >> 9;
+
+ if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
+ iter->bi_size -= bytes;
+ else
+ bvec_iter_advance(bio->bi_io_vec, iter, bytes);
+}
+
+#define __bio_for_each_segment(bvl, bio, iter, start) \
+ for (iter = (start); \
+ (iter).bi_size && \
+ ((bvl = bio_iter_iovec((bio), (iter))), 1); \
+ bio_advance_iter((bio), &(iter), (bvl).bv_len))
-#define bio_for_each_segment(bvl, bio, i) \
- for (i = (bio)->bi_idx; \
- bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \
- i++)
+#define bio_for_each_segment(bvl, bio, iter) \
+ __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
+
+#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
+
+static inline unsigned bio_segments(struct bio *bio)
+{
+ unsigned segs = 0;
+ struct bio_vec bv;
+ struct bvec_iter iter;
+
+ bio_for_each_segment(bv, bio, iter)
+ segs++;
+
+ return segs;
+}
/*
* get a reference to a bio, so it won't disappear. the intended use is
@@ -177,16 +279,15 @@ static inline void *bio_data(struct bio *bio)
struct bio_integrity_payload {
struct bio *bip_bio; /* parent bio */
- sector_t bip_sector; /* virtual start sector */
+ struct bvec_iter bip_iter;
+ /* kill - should just use bip_vec */
void *bip_buf; /* generated integrity data */
- bio_end_io_t *bip_end_io; /* saved I/O completion fn */
- unsigned int bip_size;
+ bio_end_io_t *bip_end_io; /* saved I/O completion fn */
unsigned short bip_slab; /* slab the bip came from */
unsigned short bip_vcnt; /* # of integrity bio_vecs */
- unsigned short bip_idx; /* current bip_vec index */
unsigned bip_owns_buf:1; /* should free bip_buf */
struct work_struct bip_work; /* I/O completion */
@@ -196,28 +297,28 @@ struct bio_integrity_payload {
};
#endif /* CONFIG_BLK_DEV_INTEGRITY */
-/*
- * A bio_pair is used when we need to split a bio.
- * This can only happen for a bio that refers to just one
- * page of data, and in the unusual situation when the
- * page crosses a chunk/device boundary
+extern void bio_trim(struct bio *bio, int offset, int size);
+extern struct bio *bio_split(struct bio *bio, int sectors,
+ gfp_t gfp, struct bio_set *bs);
+
+/**
+ * bio_next_split - get next @sectors from a bio, splitting if necessary
+ * @bio: bio to split
+ * @sectors: number of sectors to split from the front of @bio
+ * @gfp: gfp mask
+ * @bs: bio set to allocate from
*
- * The address of the master bio is stored in bio1.bi_private
- * The address of the pool the pair was allocated from is stored
- * in bio2.bi_private
+ * Returns a bio representing the next @sectors of @bio - if the bio is smaller
+ * than @sectors, returns the original bio unchanged.
*/
-struct bio_pair {
- struct bio bio1, bio2;
- struct bio_vec bv1, bv2;
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
- struct bio_integrity_payload bip1, bip2;
- struct bio_vec iv1, iv2;
-#endif
- atomic_t cnt;
- int error;
-};
-extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
-extern void bio_pair_release(struct bio_pair *dbio);
+static inline struct bio *bio_next_split(struct bio *bio, int sectors,
+ gfp_t gfp, struct bio_set *bs)
+{
+ if (sectors >= bio_sectors(bio))
+ return bio;
+
+ return bio_split(bio, sectors, gfp, bs);
+}
extern struct bio_set *bioset_create(unsigned int, unsigned int);
extern void bioset_free(struct bio_set *);
@@ -228,6 +329,7 @@ extern void bio_put(struct bio *);
extern void __bio_clone(struct bio *, struct bio *);
extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
+extern int bio_clone_biovec(struct bio *bio, gfp_t gfp_mask);
extern struct bio_set *fs_bio_set;
@@ -253,6 +355,7 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
}
extern void bio_endio(struct bio *, int);
+extern void bio_endio_nodec(struct bio *, int);
struct request_queue;
extern int bio_phys_segments(struct request_queue *, struct bio *);
@@ -261,12 +364,12 @@ extern void bio_advance(struct bio *, unsigned);
extern void bio_init(struct bio *);
extern void bio_reset(struct bio *);
+void bio_chain(struct bio *, struct bio *);
extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
extern int bio_get_nr_vecs(struct block_device *);
-extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
unsigned long, unsigned int, int, gfp_t);
struct sg_iovec;
@@ -307,6 +410,14 @@ extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
extern unsigned int bvec_nr_vecs(unsigned short idx);
+static inline ssize_t bvec_length(const struct bio_vec *bvec, unsigned long nr)
+{
+ ssize_t bytes = 0;
+ while (nr--)
+ bytes += (bvec++)->bv_len;
+ return bytes;
+}
+
#ifdef CONFIG_BLK_CGROUP
int bio_associate_current(struct bio *bio);
void bio_disassociate_task(struct bio *bio);
@@ -356,48 +467,18 @@ static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
}
#endif
-static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
+static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter,
unsigned long *flags)
{
- return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
+ return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags);
}
#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
#define bio_kmap_irq(bio, flags) \
- __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
+ __bio_kmap_irq((bio), (bio)->bi_iter, (flags))
#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
/*
- * Check whether this bio carries any data or not. A NULL bio is allowed.
- */
-static inline bool bio_has_data(struct bio *bio)
-{
- if (bio && bio->bi_vcnt)
- return true;
-
- return false;
-}
-
-static inline bool bio_is_rw(struct bio *bio)
-{
- if (!bio_has_data(bio))
- return false;
-
- if (bio->bi_rw & REQ_WRITE_SAME)
- return false;
-
- return true;
-}
-
-static inline bool bio_mergeable(struct bio *bio)
-{
- if (bio->bi_rw & REQ_NOMERGE_FLAGS)
- return false;
-
- return true;
-}
-
-/*
* BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
*
* A bio_list anchors a singly-linked list of bios chained through the bi_next
@@ -419,6 +500,8 @@ static inline void bio_list_init(struct bio_list *bl)
bl->head = bl->tail = NULL;
}
+#define BIO_EMPTY_LIST { NULL, NULL }
+
#define bio_list_for_each(bio, bl) \
for (bio = (bl)->head; bio; bio = bio->bi_next)
@@ -556,16 +639,12 @@ struct biovec_slab {
#if defined(CONFIG_BLK_DEV_INTEGRITY)
-#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
-#define bip_vec(bip) bip_vec_idx(bip, 0)
-#define __bip_for_each_vec(bvl, bip, i, start_idx) \
- for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx); \
- i < (bip)->bip_vcnt; \
- bvl++, i++)
-#define bip_for_each_vec(bvl, bip, i) \
- __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
+#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
+
+#define bip_for_each_vec(bvl, bip, iter) \
+ for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
for_each_bio(_bio) \
@@ -583,7 +662,6 @@ extern int bio_integrity_prep(struct bio *);
extern void bio_integrity_endio(struct bio *, int);
extern void bio_integrity_advance(struct bio *, unsigned int);
extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
-extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
extern int bioset_integrity_create(struct bio_set *, int);
extern void bioset_integrity_free(struct bio_set *);
@@ -627,12 +705,6 @@ static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
return 0;
}
-static inline void bio_integrity_split(struct bio *bio, struct bio_pair *bp,
- int sectors)
-{
- return;
-}
-
static inline void bio_integrity_advance(struct bio *bio,
unsigned int bytes_done)
{
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index a3b6b82108b9..abc9ca778456 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -4,12 +4,23 @@
#ifdef __KERNEL__
#define BIT(nr) (1UL << (nr))
+#define BIT_ULL(nr) (1ULL << (nr))
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
#endif
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
+#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
+
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
extern unsigned int __sw_hweight32(unsigned int w);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
new file mode 100644
index 000000000000..ab0e9b2025b3
--- /dev/null
+++ b/include/linux/blk-mq.h
@@ -0,0 +1,183 @@
+#ifndef BLK_MQ_H
+#define BLK_MQ_H
+
+#include <linux/blkdev.h>
+
+struct blk_mq_tags;
+
+struct blk_mq_cpu_notifier {
+ struct list_head list;
+ void *data;
+ void (*notify)(void *data, unsigned long action, unsigned int cpu);
+};
+
+struct blk_mq_hw_ctx {
+ struct {
+ spinlock_t lock;
+ struct list_head dispatch;
+ } ____cacheline_aligned_in_smp;
+
+ unsigned long state; /* BLK_MQ_S_* flags */
+ struct delayed_work delayed_work;
+
+ unsigned long flags; /* BLK_MQ_F_* flags */
+
+ struct request_queue *queue;
+ unsigned int queue_num;
+
+ void *driver_data;
+
+ unsigned int nr_ctx;
+ struct blk_mq_ctx **ctxs;
+ unsigned int nr_ctx_map;
+ unsigned long *ctx_map;
+
+ struct request **rqs;
+ struct list_head page_list;
+ struct blk_mq_tags *tags;
+
+ unsigned long queued;
+ unsigned long run;
+#define BLK_MQ_MAX_DISPATCH_ORDER 10
+ unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
+
+ unsigned int queue_depth;
+ unsigned int numa_node;
+ unsigned int cmd_size; /* per-request extra data */
+
+ struct blk_mq_cpu_notifier cpu_notifier;
+ struct kobject kobj;
+};
+
+struct blk_mq_reg {
+ struct blk_mq_ops *ops;
+ unsigned int nr_hw_queues;
+ unsigned int queue_depth;
+ unsigned int reserved_tags;
+ unsigned int cmd_size; /* per-request extra data */
+ int numa_node;
+ unsigned int timeout;
+ unsigned int flags; /* BLK_MQ_F_* */
+};
+
+typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
+typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
+typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_reg *,unsigned int);
+typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
+typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
+typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
+
+struct blk_mq_ops {
+ /*
+ * Queue request
+ */
+ queue_rq_fn *queue_rq;
+
+ /*
+ * Map to specific hardware queue
+ */
+ map_queue_fn *map_queue;
+
+ /*
+ * Called on request timeout
+ */
+ rq_timed_out_fn *timeout;
+
+ /*
+ * Override for hctx allocations (should probably go)
+ */
+ alloc_hctx_fn *alloc_hctx;
+ free_hctx_fn *free_hctx;
+
+ /*
+ * Called when the block layer side of a hardware queue has been
+ * set up, allowing the driver to allocate/init matching structures.
+ * Ditto for exit/teardown.
+ */
+ init_hctx_fn *init_hctx;
+ exit_hctx_fn *exit_hctx;
+};
+
+enum {
+ BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */
+ BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */
+ BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */
+
+ BLK_MQ_F_SHOULD_MERGE = 1 << 0,
+ BLK_MQ_F_SHOULD_SORT = 1 << 1,
+ BLK_MQ_F_SHOULD_IPI = 1 << 2,
+
+ BLK_MQ_S_STOPPED = 1 << 0,
+
+ BLK_MQ_MAX_DEPTH = 2048,
+};
+
+struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *);
+void blk_mq_free_queue(struct request_queue *);
+int blk_mq_register_disk(struct gendisk *);
+void blk_mq_unregister_disk(struct gendisk *);
+void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
+
+void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
+
+void blk_mq_insert_request(struct request_queue *, struct request *, bool);
+void blk_mq_run_queues(struct request_queue *q, bool async);
+void blk_mq_free_request(struct request *rq);
+bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved);
+struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
+struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
+
+struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
+struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int);
+void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
+
+void blk_mq_end_io(struct request *rq, int error);
+
+void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
+void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
+void blk_mq_stop_hw_queues(struct request_queue *q);
+void blk_mq_start_stopped_hw_queues(struct request_queue *q);
+
+/*
+ * Driver command data is immediately after the request. So subtract request
+ * size to get back to the original request.
+ */
+static inline struct request *blk_mq_rq_from_pdu(void *pdu)
+{
+ return pdu - sizeof(struct request);
+}
+static inline void *blk_mq_rq_to_pdu(struct request *rq)
+{
+ return (void *) rq + sizeof(*rq);
+}
+
+static inline struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx,
+ unsigned int tag)
+{
+ return hctx->rqs[tag];
+}
+
+#define queue_for_each_hw_ctx(q, hctx, i) \
+ for ((i) = 0, hctx = (q)->queue_hw_ctx[0]; \
+ (i) < (q)->nr_hw_queues; (i)++, hctx = (q)->queue_hw_ctx[i])
+
+#define queue_for_each_ctx(q, ctx, i) \
+ for ((i) = 0, ctx = per_cpu_ptr((q)->queue_ctx, 0); \
+ (i) < (q)->nr_queues; (i)++, ctx = per_cpu_ptr(q->queue_ctx, (i)))
+
+#define hctx_for_each_ctx(hctx, ctx, i) \
+ for ((i) = 0, ctx = (hctx)->ctxs[0]; \
+ (i) < (hctx)->nr_ctx; (i)++, ctx = (hctx)->ctxs[(i)])
+
+#define blk_ctx_sum(q, sum) \
+({ \
+ struct blk_mq_ctx *__x; \
+ unsigned int __ret = 0, __i; \
+ \
+ queue_for_each_ctx((q), __x, __i) \
+ __ret += sum; \
+ __ret; \
+})
+
+#endif
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index fa1abeb45b76..701c3e3f0d87 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -28,13 +28,22 @@ struct bio_vec {
unsigned int bv_offset;
};
+struct bvec_iter {
+ sector_t bi_sector; /* device address in 512 byte
+ sectors */
+ unsigned int bi_size; /* residual I/O count */
+
+ unsigned int bi_idx; /* current index into bvl_vec */
+
+ unsigned int bi_bvec_done; /* number of bytes completed in
+ current bvec */
+};
+
/*
* main unit of I/O for the block layer and lower layers (ie drivers and
* stacking drivers)
*/
struct bio {
- sector_t bi_sector; /* device address in 512 byte
- sectors */
struct bio *bi_next; /* request queue link */
struct block_device *bi_bdev;
unsigned long bi_flags; /* status, command, etc */
@@ -42,16 +51,13 @@ struct bio {
* top bits priority
*/
- unsigned short bi_vcnt; /* how many bio_vec's */
- unsigned short bi_idx; /* current index into bvl_vec */
+ struct bvec_iter bi_iter;
/* Number of segments in this BIO after
* physical address coalescing is performed.
*/
unsigned int bi_phys_segments;
- unsigned int bi_size; /* residual I/O count */
-
/*
* To keep track of the max segment size, we account for the
* sizes of the first and last mergeable segments in this bio.
@@ -59,6 +65,8 @@ struct bio {
unsigned int bi_seg_front_size;
unsigned int bi_seg_back_size;
+ atomic_t bi_remaining;
+
bio_end_io_t *bi_end_io;
void *bi_private;
@@ -74,11 +82,13 @@ struct bio {
struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif
+ unsigned short bi_vcnt; /* how many bio_vec's */
+
/*
* Everything starting with bi_max_vecs will be preserved by bio_reset()
*/
- unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
+ unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
atomic_t bi_cnt; /* pin count */
@@ -155,6 +165,8 @@ enum rq_flag_bits {
__REQ_FUA, /* forced unit access */
__REQ_FLUSH, /* request for cache flush */
+ __REQ_ATOMIC, /* atomic write */
+
/* bio only flags */
__REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_THROTTLED, /* This bio has already been subjected to
@@ -176,58 +188,60 @@ enum rq_flag_bits {
__REQ_FLUSH_SEQ, /* request for flush sequence */
__REQ_IO_STAT, /* account I/O stat */
__REQ_MIXED_MERGE, /* merge of different types, fail separately */
- __REQ_KERNEL, /* direct IO to kernel pages */
__REQ_PM, /* runtime pm request */
+ __REQ_END, /* last of chain of requests */
__REQ_NR_BITS, /* stops here */
};
-#define REQ_WRITE (1 << __REQ_WRITE)
-#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
-#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
-#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
-#define REQ_SYNC (1 << __REQ_SYNC)
-#define REQ_META (1 << __REQ_META)
-#define REQ_PRIO (1 << __REQ_PRIO)
-#define REQ_DISCARD (1 << __REQ_DISCARD)
-#define REQ_WRITE_SAME (1 << __REQ_WRITE_SAME)
-#define REQ_NOIDLE (1 << __REQ_NOIDLE)
+#define REQ_WRITE (1ULL << __REQ_WRITE)
+#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
+#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
+#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
+#define REQ_SYNC (1ULL << __REQ_SYNC)
+#define REQ_META (1ULL << __REQ_META)
+#define REQ_PRIO (1ULL << __REQ_PRIO)
+#define REQ_DISCARD (1ULL << __REQ_DISCARD)
+#define REQ_WRITE_SAME (1ULL << __REQ_WRITE_SAME)
+#define REQ_NOIDLE (1ULL << __REQ_NOIDLE)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \
(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \
- REQ_SECURE)
+ REQ_SECURE | REQ_ATOMIC)
#define REQ_CLONE_MASK REQ_COMMON_MASK
#define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME)
/* This mask is used for both bio and request merge checking */
#define REQ_NOMERGE_FLAGS \
- (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
-
-#define REQ_RAHEAD (1 << __REQ_RAHEAD)
-#define REQ_THROTTLED (1 << __REQ_THROTTLED)
-
-#define REQ_SORTED (1 << __REQ_SORTED)
-#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
-#define REQ_FUA (1 << __REQ_FUA)
-#define REQ_NOMERGE (1 << __REQ_NOMERGE)
-#define REQ_STARTED (1 << __REQ_STARTED)
-#define REQ_DONTPREP (1 << __REQ_DONTPREP)
-#define REQ_QUEUED (1 << __REQ_QUEUED)
-#define REQ_ELVPRIV (1 << __REQ_ELVPRIV)
-#define REQ_FAILED (1 << __REQ_FAILED)
-#define REQ_QUIET (1 << __REQ_QUIET)
-#define REQ_PREEMPT (1 << __REQ_PREEMPT)
-#define REQ_ALLOCED (1 << __REQ_ALLOCED)
-#define REQ_COPY_USER (1 << __REQ_COPY_USER)
-#define REQ_FLUSH (1 << __REQ_FLUSH)
-#define REQ_FLUSH_SEQ (1 << __REQ_FLUSH_SEQ)
-#define REQ_IO_STAT (1 << __REQ_IO_STAT)
-#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
-#define REQ_SECURE (1 << __REQ_SECURE)
-#define REQ_KERNEL (1 << __REQ_KERNEL)
-#define REQ_PM (1 << __REQ_PM)
+ (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | \
+ REQ_ATOMIC)
+
+#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
+#define REQ_THROTTLED (1ULL << __REQ_THROTTLED)
+
+#define REQ_SORTED (1ULL << __REQ_SORTED)
+#define REQ_SOFTBARRIER (1ULL << __REQ_SOFTBARRIER)
+#define REQ_FUA (1ULL << __REQ_FUA)
+#define REQ_ATOMIC (1ULL << __REQ_ATOMIC)
+#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
+#define REQ_STARTED (1ULL << __REQ_STARTED)
+#define REQ_DONTPREP (1ULL << __REQ_DONTPREP)
+#define REQ_QUEUED (1ULL << __REQ_QUEUED)
+#define REQ_ELVPRIV (1ULL << __REQ_ELVPRIV)
+#define REQ_FAILED (1ULL << __REQ_FAILED)
+#define REQ_QUIET (1ULL << __REQ_QUIET)
+#define REQ_PREEMPT (1ULL << __REQ_PREEMPT)
+#define REQ_ALLOCED (1ULL << __REQ_ALLOCED)
+#define REQ_COPY_USER (1ULL << __REQ_COPY_USER)
+#define REQ_FLUSH (1ULL << __REQ_FLUSH)
+#define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ)
+#define REQ_IO_STAT (1ULL << __REQ_IO_STAT)
+#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE)
+#define REQ_SECURE (1ULL << __REQ_SECURE)
+#define REQ_PM (1ULL << __REQ_PM)
+#define REQ_END (1ULL << __REQ_END)
#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0e6f765aa1f5..ca0119dbfb82 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -8,6 +8,7 @@
#include <linux/major.h>
#include <linux/genhd.h>
#include <linux/list.h>
+#include <linux/llist.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/pagemap.h>
@@ -94,12 +95,19 @@ enum rq_cmd_type_bits {
* as well!
*/
struct request {
- struct list_head queuelist;
- struct call_single_data csd;
+ union {
+ struct list_head queuelist;
+ struct llist_node ll_list;
+ };
+ union {
+ struct call_single_data csd;
+ struct work_struct mq_flush_data;
+ };
struct request_queue *q;
+ struct blk_mq_ctx *mq_ctx;
- unsigned int cmd_flags;
+ u64 cmd_flags;
enum rq_cmd_type_bits cmd_type;
unsigned long atomic_flags;
@@ -160,8 +168,6 @@ struct request {
unsigned short ioprio;
- int ref_count;
-
void *special; /* opaque pointer available for LLD use */
char *buffer; /* kaddr of the current segment if available */
@@ -215,6 +221,8 @@ struct request_pm_state
#include <linux/elevator.h>
+struct blk_queue_ctx;
+
typedef void (request_fn_proc) (struct request_queue *q);
typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
@@ -313,6 +321,18 @@ struct request_queue {
dma_drain_needed_fn *dma_drain_needed;
lld_busy_fn *lld_busy_fn;
+ struct blk_mq_ops *mq_ops;
+
+ unsigned int *mq_map;
+
+ /* sw queues */
+ struct blk_mq_ctx *queue_ctx;
+ unsigned int nr_queues;
+
+ /* hw dispatch queues */
+ struct blk_mq_hw_ctx **queue_hw_ctx;
+ unsigned int nr_hw_queues;
+
/*
* Dispatch queue sorting
*/
@@ -361,6 +381,11 @@ struct request_queue {
*/
struct kobject kobj;
+ /*
+ * mq queue kobject
+ */
+ struct kobject mq_kobj;
+
#ifdef CONFIG_PM_RUNTIME
struct device *dev;
int rpm_status;
@@ -425,7 +450,13 @@ struct request_queue {
unsigned long flush_pending_since;
struct list_head flush_queue[2];
struct list_head flush_data_in_flight;
- struct request flush_rq;
+ union {
+ struct request flush_rq;
+ struct {
+ spinlock_t mq_flush_lock;
+ struct work_struct mq_flush_work;
+ };
+ };
struct mutex sysfs_lock;
@@ -437,14 +468,14 @@ struct request_queue {
struct bsg_class_device bsg_dev;
#endif
-#ifdef CONFIG_BLK_CGROUP
- struct list_head all_q_node;
-#endif
#ifdef CONFIG_BLK_DEV_THROTTLING
/* Throttle data */
struct throtl_data *td;
#endif
struct rcu_head rcu_head;
+ wait_queue_head_t mq_freeze_wq;
+ struct percpu_counter mq_usage_counter;
+ struct list_head all_q_node;
};
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -467,6 +498,7 @@ struct request_queue {
#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
+#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -539,6 +571,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
+#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
@@ -570,7 +603,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
-#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
+#define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0)
static inline unsigned int blk_queue_cluster(struct request_queue *q)
{
@@ -699,7 +732,7 @@ struct rq_map_data {
};
struct req_iterator {
- int i;
+ struct bvec_iter iter;
struct bio *bio;
};
@@ -712,10 +745,11 @@ struct req_iterator {
#define rq_for_each_segment(bvl, _rq, _iter) \
__rq_for_each_bio(_iter.bio, _rq) \
- bio_for_each_segment(bvl, _iter.bio, _iter.i)
+ bio_for_each_segment(bvl, _iter.bio, _iter.iter)
-#define rq_iter_last(rq, _iter) \
- (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
+#define rq_iter_last(bvec, _iter) \
+ (_iter.bio->bi_next == NULL && \
+ bio_iter_last(bvec, _iter.iter))
#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
@@ -1013,6 +1047,7 @@ static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
struct blk_plug {
unsigned long magic; /* detect uninitialized use-cases */
struct list_head list; /* requests */
+ struct list_head mq_list; /* blk-mq requests */
struct list_head cb_list; /* md requires an unplug callback */
};
#define BLK_MAX_REQUEST_COUNT 16
@@ -1050,7 +1085,10 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
{
struct blk_plug *plug = tsk->plug;
- return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list));
+ return plug &&
+ (!list_empty(&plug->list) ||
+ !list_empty(&plug->mq_list) ||
+ !list_empty(&plug->cb_list));
}
/*
@@ -1325,6 +1363,7 @@ static inline void put_dev_sector(Sector p)
struct work_struct;
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
+int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
#ifdef CONFIG_BLK_CGROUP
/*
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 7c2e030e72f1..afc1343df3c7 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -5,6 +5,7 @@
#include <linux/relay.h>
#include <linux/compat.h>
#include <uapi/linux/blktrace_api.h>
+#include <linux/list.h>
#if defined(CONFIG_BLK_DEV_IO_TRACE)
@@ -23,6 +24,7 @@ struct blk_trace {
struct dentry *dir;
struct dentry *dropped_file;
struct dentry *msg_file;
+ struct list_head running_list;
atomic_t dropped;
};
@@ -87,7 +89,7 @@ static inline int blk_trace_init_sysfs(struct device *dev)
#ifdef CONFIG_COMPAT
struct compat_blk_user_trace_setup {
- char name[32];
+ char name[BLKTRACE_BDEV_SIZE];
u16 act_mask;
u32 buf_size;
u32 buf_nr;
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 7c1420bb1dce..091fdb600d55 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -1,6 +1,7 @@
#ifndef __FS_CEPH_MESSENGER_H
#define __FS_CEPH_MESSENGER_H
+#include <linux/blk_types.h>
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/net.h>
@@ -119,8 +120,7 @@ struct ceph_msg_data_cursor {
#ifdef CONFIG_BLOCK
struct { /* bio */
struct bio *bio; /* bio from list */
- unsigned int vector_index; /* vector from bio */
- unsigned int vector_offset; /* bytes from vector */
+ struct bvec_iter bvec_iter;
};
#endif /* CONFIG_BLOCK */
struct { /* pages */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 3561d305b1e0..39c1d9469677 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -612,11 +612,6 @@ struct cgroup_subsys {
int subsys_id;
int disabled;
int early_init;
- /*
- * True if this subsys uses ID. ID is not available before cgroup_init()
- * (not available in early_init time.)
- */
- bool use_id;
/*
* If %false, this subsystem is properly hierarchical -
@@ -642,9 +637,6 @@ struct cgroup_subsys {
*/
struct cgroupfs_root *root;
struct list_head sibling;
- /* used when use_id == true */
- struct idr idr;
- spinlock_t id_lock;
/* list of cftype_sets */
struct list_head cftsets;
@@ -875,35 +867,6 @@ int css_scan_tasks(struct cgroup_subsys_state *css,
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
-/*
- * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
- * if cgroup_subsys.use_id == true. It can be used for looking up and scanning.
- * CSS ID is assigned at cgroup allocation (create) automatically
- * and removed when subsys calls free_css_id() function. This is because
- * the lifetime of cgroup_subsys_state is subsys's matter.
- *
- * Looking up and scanning function should be called under rcu_read_lock().
- * Taking cgroup_mutex is not necessary for following calls.
- * But the css returned by this routine can be "not populated yet" or "being
- * destroyed". The caller should check css and cgroup's status.
- */
-
-/*
- * Typically Called at ->destroy(), or somewhere the subsys frees
- * cgroup_subsys_state.
- */
-void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css);
-
-/* Find a cgroup_subsys_state which has given ID */
-
-struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id);
-
-/* Returns true if root is ancestor of cg */
-bool css_is_ancestor(struct cgroup_subsys_state *cg,
- const struct cgroup_subsys_state *root);
-
-/* Get id and depth of css */
-unsigned short css_id(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *css_from_dir(struct dentry *dentry,
struct cgroup_subsys *ss);
diff --git a/include/linux/clk/mxs.h b/include/linux/clk/mxs.h
index 90c30dc3efc7..5138a90e018c 100644
--- a/include/linux/clk/mxs.h
+++ b/include/linux/clk/mxs.h
@@ -9,8 +9,6 @@
#ifndef __LINUX_CLK_MXS_H
#define __LINUX_CLK_MXS_H
-int mx23_clocks_init(void);
-int mx28_clocks_init(void);
int mxs_saif_clkmux_select(unsigned int clkmux);
#endif
diff --git a/include/linux/clk/sunxi.h b/include/linux/clk/sunxi.h
deleted file mode 100644
index e074fdd5a236..000000000000
--- a/include/linux/clk/sunxi.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright 2012 Maxime Ripard
- *
- * Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __LINUX_CLK_SUNXI_H_
-#define __LINUX_CLK_SUNXI_H_
-
-void __init sunxi_init_clocks(void);
-
-#endif
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 0857922e8ad0..493aa021c7a9 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -60,6 +60,7 @@ enum clock_event_mode {
* Core shall set the interrupt affinity dynamically in broadcast mode
*/
#define CLOCK_EVT_FEAT_DYNIRQ 0x000020
+#define CLOCK_EVT_FEAT_PERCPU 0x000040
/**
* struct clock_event_device - clock event device descriptor
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index dbbf8aa7731b..67301a405712 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -292,6 +292,8 @@ extern void clocksource_resume(void);
extern struct clocksource * __init __weak clocksource_default_clock(void);
extern void clocksource_mark_unstable(struct clocksource *cs);
+extern u64
+clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask);
extern void
clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec);
diff --git a/include/linux/cper.h b/include/linux/cper.h
index c23049496531..2fc0ec3d89cc 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -218,8 +218,8 @@ enum {
#define CPER_PROC_VALID_IP 0x1000
#define CPER_MEM_VALID_ERROR_STATUS 0x0001
-#define CPER_MEM_VALID_PHYSICAL_ADDRESS 0x0002
-#define CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK 0x0004
+#define CPER_MEM_VALID_PA 0x0002
+#define CPER_MEM_VALID_PA_MASK 0x0004
#define CPER_MEM_VALID_NODE 0x0008
#define CPER_MEM_VALID_CARD 0x0010
#define CPER_MEM_VALID_MODULE 0x0020
@@ -232,6 +232,9 @@ enum {
#define CPER_MEM_VALID_RESPONDER_ID 0x1000
#define CPER_MEM_VALID_TARGET_ID 0x2000
#define CPER_MEM_VALID_ERROR_TYPE 0x4000
+#define CPER_MEM_VALID_RANK_NUMBER 0x8000
+#define CPER_MEM_VALID_CARD_HANDLE 0x10000
+#define CPER_MEM_VALID_MODULE_HANDLE 0x20000
#define CPER_PCIE_VALID_PORT_TYPE 0x0001
#define CPER_PCIE_VALID_VERSION 0x0002
@@ -347,6 +350,10 @@ struct cper_sec_mem_err {
__u64 responder_id;
__u64 target_id;
__u8 error_type;
+ __u8 reserved;
+ __u16 rank;
+ __u16 mem_array_handle; /* card handle in UEFI 2.4 */
+ __u16 mem_dev_handle; /* module handle in UEFI 2.4 */
};
struct cper_sec_pcie {
@@ -389,6 +396,6 @@ struct cper_sec_pcie {
u64 cper_next_record_id(void);
void cper_print_bits(const char *prefix, unsigned int bits,
- const char *strs[], unsigned int strs_size);
+ const char * const strs[], unsigned int strs_size);
#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 801ff9e73679..03e235ad1bba 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -18,6 +18,7 @@
#include <linux/cpumask.h>
struct device;
+struct device_node;
struct cpu {
int node_id; /* The node which contains the CPU */
@@ -29,6 +30,8 @@ extern int register_cpu(struct cpu *cpu, int num);
extern struct device *get_cpu_device(unsigned cpu);
extern bool cpu_is_hotpluggable(unsigned cpu);
extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
+extern bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
+ int cpu, unsigned int *thread);
extern int cpu_add_dev_attr(struct device_attribute *attr);
extern void cpu_remove_dev_attr(struct device_attribute *attr);
@@ -185,19 +188,6 @@ extern void cpu_hotplug_enable(void);
void clear_tasks_mm_cpumask(int cpu);
int cpu_down(unsigned int cpu);
-#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
-extern void cpu_hotplug_driver_lock(void);
-extern void cpu_hotplug_driver_unlock(void);
-#else
-static inline void cpu_hotplug_driver_lock(void)
-{
-}
-
-static inline void cpu_hotplug_driver_unlock(void)
-{
-}
-#endif
-
#else /* CONFIG_HOTPLUG_CPU */
static inline void cpu_hotplug_begin(void) {}
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index fcabc42d66ab..dc196bbcf227 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -85,6 +85,20 @@ struct cpufreq_policy {
struct list_head policy_list;
struct kobject kobj;
struct completion kobj_unregister;
+
+ /*
+ * The rules for this semaphore:
+ * - Any routine that wants to read from the policy structure will
+ * do a down_read on this semaphore.
+ * - Any routine that will write to the policy structure and/or may take away
+ * the policy altogether (eg. CPU hotplug), will hold this lock in write
+ * mode before doing so.
+ *
+ * Additional rules:
+ * - Lock should not be held across
+ * __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
+ */
+ struct rw_semaphore rwsem;
};
/* Only for ACPI */
@@ -93,8 +107,16 @@ struct cpufreq_policy {
#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */
#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
+#ifdef CONFIG_CPU_FREQ
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
void cpufreq_cpu_put(struct cpufreq_policy *policy);
+#else
+static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
+{
+ return NULL;
+}
+static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
+#endif
static inline bool policy_is_shared(struct cpufreq_policy *policy)
{
@@ -180,13 +202,6 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
struct cpufreq_driver {
char name[CPUFREQ_NAME_LEN];
u8 flags;
- /*
- * This should be set by platforms having multiple clock-domains, i.e.
- * supporting multiple policies. With this sysfs directories of governor
- * would be created in cpu/cpu<num>/cpufreq/ directory and so they can
- * use the same governor with different tunables for different clusters.
- */
- bool have_governor_per_policy;
/* needed by all drivers */
int (*init) (struct cpufreq_policy *policy);
@@ -194,9 +209,11 @@ struct cpufreq_driver {
/* define one out of two */
int (*setpolicy) (struct cpufreq_policy *policy);
- int (*target) (struct cpufreq_policy *policy,
+ int (*target) (struct cpufreq_policy *policy, /* Deprecated */
unsigned int target_freq,
unsigned int relation);
+ int (*target_index) (struct cpufreq_policy *policy,
+ unsigned int index);
/* should be defined, if possible */
unsigned int (*get) (unsigned int cpu);
@@ -211,13 +228,29 @@ struct cpufreq_driver {
};
/* flags */
-#define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if
- * all ->init() calls failed */
-#define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel
- * "constants" aren't affected by
- * frequency transitions */
-#define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed
- * mismatches */
+#define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
+ all ->init() calls failed */
+#define CPUFREQ_CONST_LOOPS (1 << 1) /* loops_per_jiffy or other
+ kernel "constants" aren't
+ affected by frequency
+ transitions */
+#define CPUFREQ_PM_NO_WARN (1 << 2) /* don't warn on suspend/resume
+ speed mismatches */
+
+/*
+ * This should be set by platforms having multiple clock-domains, i.e.
+ * supporting multiple policies. With this sysfs directories of governor would
+ * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same
+ * governor with different tunables for different clusters.
+ */
+#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY (1 << 3)
+
+/*
+ * Driver will do POSTCHANGE notifications from outside of their ->target()
+ * routine and so must set cpufreq_driver->flags with this flag, so that core
+ * can handle them specially.
+ */
+#define CPUFREQ_ASYNC_NOTIFICATION (1 << 4)
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
@@ -240,6 +273,13 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
return;
}
+static inline void
+cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
+{
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+}
+
/*********************************************************************
* CPUFREQ NOTIFIER INTERFACE *
*********************************************************************/
@@ -392,6 +432,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy);
int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
@@ -407,8 +448,20 @@ struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
+extern struct freq_attr *cpufreq_generic_attr[];
void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
unsigned int cpu);
void cpufreq_frequency_table_put_attr(unsigned int cpu);
+int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table);
+
+int cpufreq_generic_init(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table,
+ unsigned int transition_latency);
+static inline int cpufreq_generic_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
#endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 781addc66f03..50fcbb0ac4e7 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -114,7 +114,7 @@ struct cpuidle_driver {
int safe_state_index;
/* the driver handles the cpus in cpumask */
- struct cpumask *cpumask;
+ struct cpumask *cpumask;
};
#ifdef CONFIG_CPU_IDLE
@@ -195,16 +195,10 @@ struct cpuidle_governor {
};
#ifdef CONFIG_CPU_IDLE
-
extern int cpuidle_register_governor(struct cpuidle_governor *gov);
-extern void cpuidle_unregister_governor(struct cpuidle_governor *gov);
-
#else
-
static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
{return 0;}
-static inline void cpuidle_unregister_governor(struct cpuidle_governor *gov) { }
-
#endif
#ifdef CONFIG_ARCH_HAS_CPU_RELAX
diff --git a/include/linux/crc32.h b/include/linux/crc32.h
index 68267b64bb98..7d275c4fc011 100644
--- a/include/linux/crc32.h
+++ b/include/linux/crc32.h
@@ -11,8 +11,48 @@
extern u32 crc32_le(u32 crc, unsigned char const *p, size_t len);
extern u32 crc32_be(u32 crc, unsigned char const *p, size_t len);
+/**
+ * crc32_le_combine - Combine two crc32 check values into one. For two
+ * sequences of bytes, seq1 and seq2 with lengths len1
+ * and len2, crc32_le() check values were calculated
+ * for each, crc1 and crc2.
+ *
+ * @crc1: crc32 of the first block
+ * @crc2: crc32 of the second block
+ * @len2: length of the second block
+ *
+ * Return: The crc32_le() check value of seq1 and seq2 concatenated,
+ * requiring only crc1, crc2, and len2. Note: If seq_full denotes
+ * the concatenated memory area of seq1 with seq2, and crc_full
+ * the crc32_le() value of seq_full, then crc_full ==
+ * crc32_le_combine(crc1, crc2, len2) when crc_full was seeded
+ * with the same initializer as crc1, and crc2 seed was 0. See
+ * also crc32_combine_test().
+ */
+extern u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2);
+
extern u32 __crc32c_le(u32 crc, unsigned char const *p, size_t len);
+/**
+ * __crc32c_le_combine - Combine two crc32c check values into one. For two
+ * sequences of bytes, seq1 and seq2 with lengths len1
+ * and len2, __crc32c_le() check values were calculated
+ * for each, crc1 and crc2.
+ *
+ * @crc1: crc32c of the first block
+ * @crc2: crc32c of the second block
+ * @len2: length of the second block
+ *
+ * Return: The __crc32c_le() check value of seq1 and seq2 concatenated,
+ * requiring only crc1, crc2, and len2. Note: If seq_full denotes
+ * the concatenated memory area of seq1 with seq2, and crc_full
+ * the __crc32c_le() value of seq_full, then crc_full ==
+ * __crc32c_le_combine(crc1, crc2, len2) when crc_full was
+ * seeded with the same initializer as crc1, and crc2 seed
+ * was 0. See also crc32c_combine_test().
+ */
+extern u32 __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2);
+
#define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)(data), length)
/*
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 59066e0b4ff1..716c3760ee39 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -224,6 +224,7 @@ static inline int dname_external(const struct dentry *dentry)
extern void d_instantiate(struct dentry *, struct inode *);
extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
extern struct dentry * d_materialise_unique(struct dentry *, struct inode *);
+extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
extern void __d_drop(struct dentry *dentry);
extern void d_drop(struct dentry *dentry);
extern void d_delete(struct dentry *);
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 263489d0788d..4d0b4d1aa132 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -206,6 +206,12 @@ static inline struct dentry *debugfs_create_size_t(const char *name, umode_t mod
return ERR_PTR(-ENODEV);
}
+static inline struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
+ struct dentry *parent, atomic_t *value)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode,
struct dentry *parent,
u32 *value)
@@ -227,6 +233,12 @@ static inline struct dentry *debugfs_create_regset32(const char *name,
return ERR_PTR(-ENODEV);
}
+static inline int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
+ int nregs, void __iomem *base, char *prefix)
+{
+ return 0;
+}
+
static inline bool debugfs_initialized(void)
{
return false;
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 5f1ab92107e6..d48dc00232a4 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -15,7 +15,7 @@
#include <linux/device.h>
#include <linux/notifier.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#define DEVFREQ_NAME_LEN 16
@@ -168,7 +168,7 @@ struct devfreq {
unsigned long max_freq;
bool stop_polling;
- /* information for device freqeuncy transition */
+ /* information for device frequency transition */
unsigned int total_trans;
unsigned int *trans_table;
unsigned long *time_in_state;
@@ -187,7 +187,7 @@ extern int devfreq_suspend_device(struct devfreq *devfreq);
extern int devfreq_resume_device(struct devfreq *devfreq);
/* Helper functions for devfreq user device driver with OPP. */
-extern struct opp *devfreq_recommended_opp(struct device *dev,
+extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
unsigned long *freq, u32 flags);
extern int devfreq_register_opp_notifier(struct device *dev,
struct devfreq *devfreq);
@@ -238,7 +238,7 @@ static inline int devfreq_resume_device(struct devfreq *devfreq)
return 0;
}
-static inline struct opp *devfreq_recommended_opp(struct device *dev,
+static inline struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
unsigned long *freq, u32 flags)
{
return ERR_PTR(-EINVAL);
diff --git a/include/linux/device.h b/include/linux/device.h
index 2a9d6ed59579..b025925df7f7 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -26,6 +26,7 @@
#include <linux/atomic.h>
#include <linux/ratelimit.h>
#include <linux/uidgid.h>
+#include <linux/gfp.h>
#include <asm/device.h>
struct device;
@@ -63,9 +64,7 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
* @name: The name of the bus.
* @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id).
* @dev_root: Default device to use as the parent.
- * @bus_attrs: Default attributes of the bus.
* @dev_attrs: Default attributes of the devices on the bus.
- * @drv_attrs: Default attributes of the device drivers on the bus.
* @bus_groups: Default attributes of the bus.
* @dev_groups: Default attributes of the devices on the bus.
* @drv_groups: Default attributes of the device drivers on the bus.
@@ -106,9 +105,7 @@ struct bus_type {
const char *name;
const char *dev_name;
struct device *dev_root;
- struct bus_attribute *bus_attrs; /* use bus_groups instead */
struct device_attribute *dev_attrs; /* use dev_groups instead */
- struct driver_attribute *drv_attrs; /* use drv_groups instead */
const struct attribute_group **bus_groups;
const struct attribute_group **dev_groups;
const struct attribute_group **drv_groups;
@@ -329,8 +326,6 @@ int subsys_virtual_register(struct bus_type *subsys,
* @owner: The module owner.
* @class_attrs: Default attributes of this class.
* @dev_groups: Default attributes of the devices that belong to the class.
- * @dev_attrs: Default attributes of the devices belong to the class.
- * @dev_bin_attrs: Default binary attributes of the devices belong to the class.
* @dev_kobj: The kobject that represents this class and links it into the hierarchy.
* @dev_uevent: Called when a device is added, removed from this class, or a
* few other things that generate uevents to add the environment
@@ -358,9 +353,7 @@ struct class {
struct module *owner;
struct class_attribute *class_attrs;
- struct device_attribute *dev_attrs; /* use dev_groups instead */
const struct attribute_group **dev_groups;
- struct bin_attribute *dev_bin_attrs;
struct kobject *dev_kobj;
int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
@@ -427,8 +420,6 @@ struct class_attribute {
char *buf);
ssize_t (*store)(struct class *class, struct class_attribute *attr,
const char *buf, size_t count);
- const void *(*namespace)(struct class *class,
- const struct class_attribute *attr);
};
#define CLASS_ATTR(_name, _mode, _show, _store) \
@@ -438,10 +429,24 @@ struct class_attribute {
#define CLASS_ATTR_RO(_name) \
struct class_attribute class_attr_##_name = __ATTR_RO(_name)
-extern int __must_check class_create_file(struct class *class,
- const struct class_attribute *attr);
-extern void class_remove_file(struct class *class,
- const struct class_attribute *attr);
+extern int __must_check class_create_file_ns(struct class *class,
+ const struct class_attribute *attr,
+ const void *ns);
+extern void class_remove_file_ns(struct class *class,
+ const struct class_attribute *attr,
+ const void *ns);
+
+static inline int __must_check class_create_file(struct class *class,
+ const struct class_attribute *attr)
+{
+ return class_create_file_ns(class, attr, NULL);
+}
+
+static inline void class_remove_file(struct class *class,
+ const struct class_attribute *attr)
+{
+ return class_remove_file_ns(class, attr, NULL);
+}
/* Simple class attribute that is just a static string */
struct class_attribute_string {
@@ -602,8 +607,24 @@ extern void devres_close_group(struct device *dev, void *id);
extern void devres_remove_group(struct device *dev, void *id);
extern int devres_release_group(struct device *dev, void *id);
-/* managed kzalloc/kfree for device drivers, no kmalloc, always use kzalloc */
-extern void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp);
+/* managed devm_k.alloc/kfree for device drivers */
+extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
+static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
+{
+ return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
+}
+static inline void *devm_kmalloc_array(struct device *dev,
+ size_t n, size_t size, gfp_t flags)
+{
+ if (size != 0 && n > SIZE_MAX / size)
+ return NULL;
+ return devm_kmalloc(dev, n * size, flags);
+}
+static inline void *devm_kcalloc(struct device *dev,
+ size_t n, size_t size, gfp_t flags)
+{
+ return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
+}
extern void devm_kfree(struct device *dev, void *p);
void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
@@ -1149,16 +1170,15 @@ do { \
#endif
/*
- * dev_WARN*() acts like dev_printk(), but with the key difference
- * of using a WARN/WARN_ON to get the message out, including the
- * file/line information and a backtrace.
+ * dev_WARN*() acts like dev_printk(), but with the key difference of
+ * using WARN/WARN_ONCE to include file/line information and a backtrace.
*/
#define dev_WARN(dev, format, arg...) \
- WARN(1, "Device: %s\n" format, dev_driver_string(dev), ## arg);
+ WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
#define dev_WARN_ONCE(dev, condition, format, arg...) \
- WARN_ONCE(condition, "Device %s\n" format, \
- dev_driver_string(dev), ## arg)
+ WARN_ONCE(condition, "%s %s: " format, \
+ dev_driver_string(dev), dev_name(dev), ## arg)
/* Create alias, so I can be autoloaded. */
#define MODULE_ALIAS_CHARDEV(major,minor) \
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
index f4b0aa3126f5..a68cbe59e6ad 100644
--- a/include/linux/dm-io.h
+++ b/include/linux/dm-io.h
@@ -29,7 +29,7 @@ typedef void (*io_notify_fn)(unsigned long error, void *context);
enum dm_io_mem_type {
DM_IO_PAGE_LIST,/* Page list */
- DM_IO_BVEC, /* Bio vector */
+ DM_IO_BIO, /* Bio vector */
DM_IO_VMA, /* Virtual memory area */
DM_IO_KMEM, /* Kernel memory */
};
@@ -41,7 +41,7 @@ struct dm_io_memory {
union {
struct page_list *pl;
- struct bio_vec *bvec;
+ struct bio *bio;
void *vma;
void *addr;
} ptr;
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 3a8d0a2af607..fd4aee29ad10 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -97,6 +97,30 @@ static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
}
#endif
+/*
+ * Set both the DMA mask and the coherent DMA mask to the same thing.
+ * Note that we don't check the return value from dma_set_coherent_mask()
+ * as the DMA API guarantees that the coherent DMA mask can be set to
+ * the same or smaller than the streaming DMA mask.
+ */
+static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
+{
+ int rc = dma_set_mask(dev, mask);
+ if (rc == 0)
+ dma_set_coherent_mask(dev, mask);
+ return rc;
+}
+
+/*
+ * Similar to the above, except it deals with the case where the device
+ * does not have dev->dma_mask appropriately setup.
+ */
+static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
+{
+ dev->dma_mask = &dev->coherent_dma_mask;
+ return dma_set_mask_and_coherent(dev, mask);
+}
+
extern u64 dma_get_required_mask(struct device *dev);
static inline unsigned int dma_get_max_seg_size(struct device *dev)
@@ -129,6 +153,13 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
return -EIO;
}
+#ifndef dma_max_pfn
+static inline unsigned long dma_max_pfn(struct device *dev)
+{
+ return *dev->dma_mask >> PAGE_SHIFT;
+}
+#endif
+
static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 0bc727534108..4b460a683968 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -45,13 +45,13 @@ static inline int dma_submit_error(dma_cookie_t cookie)
/**
* enum dma_status - DMA transaction status
- * @DMA_SUCCESS: transaction completed successfully
+ * @DMA_COMPLETE: transaction completed
* @DMA_IN_PROGRESS: transaction not yet processed
* @DMA_PAUSED: transaction is paused
* @DMA_ERROR: transaction failed
*/
enum dma_status {
- DMA_SUCCESS,
+ DMA_COMPLETE,
DMA_IN_PROGRESS,
DMA_PAUSED,
DMA_ERROR,
@@ -979,10 +979,10 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
{
if (last_complete <= last_used) {
if ((cookie <= last_complete) || (cookie > last_used))
- return DMA_SUCCESS;
+ return DMA_COMPLETE;
} else {
if ((cookie <= last_complete) && (cookie > last_used))
- return DMA_SUCCESS;
+ return DMA_COMPLETE;
}
return DMA_IN_PROGRESS;
}
@@ -1013,11 +1013,11 @@ static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_typ
}
static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
{
- return DMA_SUCCESS;
+ return DMA_COMPLETE;
}
static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
{
- return DMA_SUCCESS;
+ return DMA_COMPLETE;
}
static inline void dma_issue_pending_all(void)
{
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index b6eb7a05d58e..f820f0a336c9 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -99,6 +99,7 @@ extern const char * dmi_get_system_info(int field);
extern const struct dmi_device * dmi_find_device(int type, const char *name,
const struct dmi_device *from);
extern void dmi_scan_machine(void);
+extern void dmi_memdev_walk(void);
extern void dmi_set_dump_stack_arch_desc(void);
extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp);
extern int dmi_name_in_vendors(const char *str);
@@ -107,6 +108,7 @@ extern int dmi_available;
extern int dmi_walk(void (*decode)(const struct dmi_header *, void *),
void *private_data);
extern bool dmi_match(enum dmi_field f, const char *str);
+extern void dmi_memdev_name(u16 handle, const char **bank, const char **device);
#else
@@ -115,6 +117,7 @@ static inline const char * dmi_get_system_info(int field) { return NULL; }
static inline const struct dmi_device * dmi_find_device(int type, const char *name,
const struct dmi_device *from) { return NULL; }
static inline void dmi_scan_machine(void) { return; }
+static inline void dmi_memdev_walk(void) { }
static inline void dmi_set_dump_stack_arch_desc(void) { }
static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
{
@@ -133,6 +136,8 @@ static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *),
void *private_data) { return -1; }
static inline bool dmi_match(enum dmi_field f, const char *str)
{ return false; }
+static inline void dmi_memdev_name(u16 handle, const char **bank,
+ const char **device) { }
static inline const struct dmi_system_id *
dmi_first_match(const struct dmi_system_id *list) { return NULL; }
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 5c6d7fbaf89e..dbdffe8d4469 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -51,7 +51,7 @@ static inline void opstate_init(void)
#define EDAC_MC_LABEL_LEN 31
/* Maximum size of the location string */
-#define LOCATION_SIZE 80
+#define LOCATION_SIZE 256
/* Defines the maximum number of labels that can be reported */
#define EDAC_MAX_LABELS 8
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 5f8f176154f7..bc5687d0f315 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -39,6 +39,8 @@
typedef unsigned long efi_status_t;
typedef u8 efi_bool_t;
typedef u16 efi_char16_t; /* UNICODE character */
+typedef u64 efi_physical_addr_t;
+typedef void *efi_handle_t;
typedef struct {
@@ -96,6 +98,7 @@ typedef struct {
#define EFI_MEMORY_DESCRIPTOR_VERSION 1
#define EFI_PAGE_SHIFT 12
+#define EFI_PAGE_SIZE (1UL << EFI_PAGE_SHIFT)
typedef struct {
u32 type;
@@ -157,11 +160,13 @@ typedef struct {
efi_table_hdr_t hdr;
void *raise_tpl;
void *restore_tpl;
- void *allocate_pages;
- void *free_pages;
- void *get_memory_map;
- void *allocate_pool;
- void *free_pool;
+ efi_status_t (*allocate_pages)(int, int, unsigned long,
+ efi_physical_addr_t *);
+ efi_status_t (*free_pages)(efi_physical_addr_t, unsigned long);
+ efi_status_t (*get_memory_map)(unsigned long *, void *, unsigned long *,
+ unsigned long *, u32 *);
+ efi_status_t (*allocate_pool)(int, unsigned long, void **);
+ efi_status_t (*free_pool)(void *);
void *create_event;
void *set_timer;
void *wait_for_event;
@@ -171,7 +176,7 @@ typedef struct {
void *install_protocol_interface;
void *reinstall_protocol_interface;
void *uninstall_protocol_interface;
- void *handle_protocol;
+ efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **);
void *__reserved;
void *register_protocol_notify;
void *locate_handle;
@@ -181,7 +186,7 @@ typedef struct {
void *start_image;
void *exit;
void *unload_image;
- void *exit_boot_services;
+ efi_status_t (*exit_boot_services)(efi_handle_t, unsigned long);
void *get_next_monotonic_count;
void *stall;
void *set_watchdog_timer;
@@ -404,6 +409,12 @@ typedef struct {
unsigned long table;
} efi_config_table_t;
+typedef struct {
+ efi_guid_t guid;
+ const char *name;
+ unsigned long *ptr;
+} efi_config_table_type_t;
+
#define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL)
#define EFI_2_30_SYSTEM_TABLE_REVISION ((2 << 16) | (30))
@@ -488,10 +499,6 @@ typedef struct {
unsigned long unload;
} efi_loaded_image_t;
-typedef struct {
- u64 revision;
- void *open_volume;
-} efi_file_io_interface_t;
typedef struct {
u64 size;
@@ -504,20 +511,30 @@ typedef struct {
efi_char16_t filename[1];
} efi_file_info_t;
-typedef struct {
+typedef struct _efi_file_handle {
u64 revision;
- void *open;
- void *close;
+ efi_status_t (*open)(struct _efi_file_handle *,
+ struct _efi_file_handle **,
+ efi_char16_t *, u64, u64);
+ efi_status_t (*close)(struct _efi_file_handle *);
void *delete;
- void *read;
+ efi_status_t (*read)(struct _efi_file_handle *, unsigned long *,
+ void *);
void *write;
void *get_position;
void *set_position;
- void *get_info;
+ efi_status_t (*get_info)(struct _efi_file_handle *, efi_guid_t *,
+ unsigned long *, void *);
void *set_info;
void *flush;
} efi_file_handle_t;
+typedef struct _efi_file_io_interface {
+ u64 revision;
+ int (*open_volume)(struct _efi_file_io_interface *,
+ efi_file_handle_t **);
+} efi_file_io_interface_t;
+
#define EFI_FILE_MODE_READ 0x0000000000000001
#define EFI_FILE_MODE_WRITE 0x0000000000000002
#define EFI_FILE_MODE_CREATE 0x8000000000000000
@@ -552,6 +569,7 @@ extern struct efi {
efi_get_next_high_mono_count_t *get_next_high_mono_count;
efi_reset_system_t *reset_system;
efi_set_virtual_address_map_t *set_virtual_address_map;
+ struct efi_memory_map *memmap;
} efi;
static inline int
@@ -587,6 +605,7 @@ static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned lon
}
#endif
extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
+extern int efi_config_init(efi_config_table_type_t *arch_tables);
extern u64 efi_get_iobase (void);
extern u32 efi_mem_type (unsigned long phys_addr);
extern u64 efi_mem_attributes (unsigned long phys_addr);
@@ -784,6 +803,13 @@ struct efivar_entry {
struct kobject kobj;
};
+
+struct efi_simple_text_output_protocol {
+ void *reset;
+ efi_status_t (*output_string)(void *, void *);
+ void *test_string;
+};
+
extern struct list_head efivar_sysfs_list;
static inline void
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index d8b512496e50..fc4a9aa7dd82 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -28,27 +28,24 @@
#include <asm/unaligned.h>
#ifdef __KERNEL__
-extern __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
+__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
extern const struct header_ops eth_header_ops;
-extern int eth_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type,
- const void *daddr, const void *saddr, unsigned len);
-extern int eth_rebuild_header(struct sk_buff *skb);
-extern int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
-extern int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
-extern void eth_header_cache_update(struct hh_cache *hh,
- const struct net_device *dev,
- const unsigned char *haddr);
-extern int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
-extern void eth_commit_mac_addr_change(struct net_device *dev, void *p);
-extern int eth_mac_addr(struct net_device *dev, void *p);
-extern int eth_change_mtu(struct net_device *dev, int new_mtu);
-extern int eth_validate_addr(struct net_device *dev);
-
-
-
-extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
+int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
+ const void *daddr, const void *saddr, unsigned len);
+int eth_rebuild_header(struct sk_buff *skb);
+int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
+int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
+ __be16 type);
+void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
+ const unsigned char *haddr);
+int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
+void eth_commit_mac_addr_change(struct net_device *dev, void *p);
+int eth_mac_addr(struct net_device *dev, void *p);
+int eth_change_mtu(struct net_device *dev, int new_mtu);
+int eth_validate_addr(struct net_device *dev);
+
+struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
unsigned int rxqs);
#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
diff --git a/include/linux/fb.h b/include/linux/fb.h
index ffac70aab3e9..70c4836e4a9f 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -792,4 +792,16 @@ extern int fb_find_mode(struct fb_var_screeninfo *var,
const struct fb_videomode *default_mode,
unsigned int default_bpp);
+/* Convenience logging macros */
+#define fb_err(fb_info, fmt, ...) \
+ pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_notice(info, fmt, ...) \
+ pr_notice("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_warn(fb_info, fmt, ...) \
+ pr_warn("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_info(fb_info, fmt, ...) \
+ pr_info("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_dbg(fb_info, fmt, ...) \
+ pr_debug("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+
#endif /* _LINUX_FB_H */
diff --git a/include/linux/fcdevice.h b/include/linux/fcdevice.h
index e460ef831984..5009fa16b5d8 100644
--- a/include/linux/fcdevice.h
+++ b/include/linux/fcdevice.h
@@ -27,7 +27,7 @@
#include <linux/if_fc.h>
#ifdef __KERNEL__
-extern struct net_device *alloc_fcdev(int sizeof_priv);
+struct net_device *alloc_fcdev(int sizeof_priv);
#endif
#endif /* _LINUX_FCDEVICE_H */
diff --git a/include/linux/fddidevice.h b/include/linux/fddidevice.h
index 155bafd9e886..9a79f0106da1 100644
--- a/include/linux/fddidevice.h
+++ b/include/linux/fddidevice.h
@@ -25,10 +25,9 @@
#include <linux/if_fddi.h>
#ifdef __KERNEL__
-extern __be16 fddi_type_trans(struct sk_buff *skb,
- struct net_device *dev);
-extern int fddi_change_mtu(struct net_device *dev, int new_mtu);
-extern struct net_device *alloc_fddidev(int sizeof_priv);
+__be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev);
+int fddi_change_mtu(struct net_device *dev, int new_mtu);
+struct net_device *alloc_fddidev(int sizeof_priv);
#endif
#endif /* _LINUX_FDDIDEVICE_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a6ac84871d6d..ff4e40cd45b1 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -6,6 +6,7 @@
#include <linux/atomic.h>
#include <linux/compat.h>
+#include <linux/workqueue.h>
#include <uapi/linux/filter.h>
#ifdef CONFIG_COMPAT
@@ -25,15 +26,19 @@ struct sk_filter
{
atomic_t refcnt;
unsigned int len; /* Number of filter blocks */
+ struct rcu_head rcu;
unsigned int (*bpf_func)(const struct sk_buff *skb,
const struct sock_filter *filter);
- struct rcu_head rcu;
- struct sock_filter insns[0];
+ union {
+ struct sock_filter insns[0];
+ struct work_struct work;
+ };
};
-static inline unsigned int sk_filter_len(const struct sk_filter *fp)
+static inline unsigned int sk_filter_size(unsigned int proglen)
{
- return fp->len * sizeof(struct sock_filter) + sizeof(*fp);
+ return max(sizeof(struct sk_filter),
+ offsetof(struct sk_filter, insns[proglen]));
}
extern int sk_filter(struct sock *sk, struct sk_buff *skb);
@@ -67,11 +72,13 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
}
#define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns)
#else
+#include <linux/slab.h>
static inline void bpf_jit_compile(struct sk_filter *fp)
{
}
static inline void bpf_jit_free(struct sk_filter *fp)
{
+ kfree(fp);
}
#define SK_RUN_FILTER(FILTER, SKB) sk_run_filter(SKB, FILTER->insns)
#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 3f40547ba191..4c743ed2e46e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -182,8 +182,6 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define READ 0
#define WRITE RW_MASK
#define READA RWA_MASK
-#define KERNEL_READ (READ|REQ_KERNEL)
-#define KERNEL_WRITE (WRITE|REQ_KERNEL)
#define READ_SYNC (READ | REQ_SYNC)
#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE)
@@ -291,25 +289,108 @@ struct address_space;
struct writeback_control;
struct iov_iter {
- const struct iovec *iov;
+ struct iov_iter_ops *ops;
+ unsigned long data;
unsigned long nr_segs;
size_t iov_offset;
size_t count;
};
-size_t iov_iter_copy_from_user_atomic(struct page *page,
- struct iov_iter *i, unsigned long offset, size_t bytes);
-size_t iov_iter_copy_from_user(struct page *page,
- struct iov_iter *i, unsigned long offset, size_t bytes);
-void iov_iter_advance(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
-size_t iov_iter_single_seg_count(const struct iov_iter *i);
+struct iov_iter_ops {
+ size_t (*ii_copy_to_user_atomic)(struct page *, struct iov_iter *,
+ unsigned long, size_t);
+ size_t (*ii_copy_to_user)(struct page *, struct iov_iter *,
+ unsigned long, size_t, int);
+ size_t (*ii_copy_from_user_atomic)(struct page *, struct iov_iter *,
+ unsigned long, size_t);
+ size_t (*ii_copy_from_user)(struct page *, struct iov_iter *,
+ unsigned long, size_t);
+ void (*ii_advance)(struct iov_iter *, size_t);
+ int (*ii_fault_in_readable)(struct iov_iter *, size_t);
+ size_t (*ii_single_seg_count)(const struct iov_iter *);
+ int (*ii_shorten)(struct iov_iter *, size_t);
+};
+
+static inline size_t iov_iter_copy_to_user_atomic(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+ return i->ops->ii_copy_to_user_atomic(page, i, offset, bytes);
+}
+static inline size_t __iov_iter_copy_to_user(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+ return i->ops->ii_copy_to_user(page, i, offset, bytes, 0);
+}
+static inline size_t iov_iter_copy_to_user(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+ return i->ops->ii_copy_to_user(page, i, offset, bytes, 1);
+}
+static inline size_t iov_iter_copy_from_user_atomic(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+ return i->ops->ii_copy_from_user_atomic(page, i, offset, bytes);
+}
+static inline size_t iov_iter_copy_from_user(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+ return i->ops->ii_copy_from_user(page, i, offset, bytes);
+}
+static inline void iov_iter_advance(struct iov_iter *i, size_t bytes)
+{
+ return i->ops->ii_advance(i, bytes);
+}
+static inline int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+{
+ return i->ops->ii_fault_in_readable(i, bytes);
+}
+static inline size_t iov_iter_single_seg_count(const struct iov_iter *i)
+{
+ return i->ops->ii_single_seg_count(i);
+}
+static inline int iov_iter_shorten(struct iov_iter *i, size_t count)
+{
+ return i->ops->ii_shorten(i, count);
+}
+
+#ifdef CONFIG_BLOCK
+extern struct iov_iter_ops ii_bvec_ops;
+
+struct bio_vec;
+static inline void iov_iter_init_bvec(struct iov_iter *i,
+ struct bio_vec *bvec,
+ unsigned long nr_segs,
+ size_t count, size_t written)
+{
+ i->ops = &ii_bvec_ops;
+ i->data = (unsigned long)bvec;
+ i->nr_segs = nr_segs;
+ i->iov_offset = 0;
+ i->count = count + written;
+
+ iov_iter_advance(i, written);
+}
+
+static inline int iov_iter_has_bvec(struct iov_iter *i)
+{
+ return i->ops == &ii_bvec_ops;
+}
+
+static inline struct bio_vec *iov_iter_bvec(struct iov_iter *i)
+{
+ BUG_ON(!iov_iter_has_bvec(i));
+ return (struct bio_vec *)i->data;
+}
+#endif
+
+extern struct iov_iter_ops ii_iovec_ops;
static inline void iov_iter_init(struct iov_iter *i,
const struct iovec *iov, unsigned long nr_segs,
size_t count, size_t written)
{
- i->iov = iov;
+ i->ops = &ii_iovec_ops;
+ i->data = (unsigned long)iov;
i->nr_segs = nr_segs;
i->iov_offset = 0;
i->count = count + written;
@@ -317,6 +398,17 @@ static inline void iov_iter_init(struct iov_iter *i,
iov_iter_advance(i, written);
}
+static inline int iov_iter_has_iovec(struct iov_iter *i)
+{
+ return i->ops == &ii_iovec_ops;
+}
+
+static inline struct iovec *iov_iter_iovec(struct iov_iter *i)
+{
+ BUG_ON(!iov_iter_has_iovec(i));
+ return (struct iovec *)i->data;
+}
+
static inline size_t iov_iter_count(struct iov_iter *i)
{
return i->count;
@@ -369,8 +461,8 @@ struct address_space_operations {
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
int (*releasepage) (struct page *, gfp_t);
void (*freepage)(struct page *);
- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
- loff_t offset, unsigned long nr_segs);
+ ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter,
+ loff_t offset);
int (*get_xip_mem)(struct address_space *, pgoff_t, int,
void **, unsigned long *);
/*
@@ -1529,7 +1621,9 @@ struct file_operations {
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+ ssize_t (*read_iter) (struct kiocb *, struct iov_iter *, loff_t);
ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+ ssize_t (*write_iter) (struct kiocb *, struct iov_iter *, loff_t);
int (*iterate) (struct file *, struct dir_context *);
unsigned int (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
@@ -1554,6 +1648,18 @@ struct file_operations {
int (*show_fdinfo)(struct seq_file *m, struct file *f);
};
+static inline int file_readable(struct file *filp)
+{
+ return filp && (filp->f_op->read || filp->f_op->aio_read ||
+ filp->f_op->read_iter);
+}
+
+static inline int file_writable(struct file *filp)
+{
+ return filp && (filp->f_op->write || filp->f_op->aio_write ||
+ filp->f_op->write_iter);
+}
+
struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
void * (*follow_link) (struct dentry *, struct nameidata *);
@@ -2292,6 +2398,11 @@ static inline void allow_write_access(struct file *file)
if (file)
atomic_inc(&file_inode(file)->i_writecount);
}
+static inline bool inode_is_open_for_write(const struct inode *inode)
+{
+ return atomic_read(&inode->i_writecount) > 0;
+}
+
#ifdef CONFIG_IMA
static inline void i_readcount_dec(struct inode *inode)
{
@@ -2398,25 +2509,36 @@ extern int sb_min_blocksize(struct super_block *, int);
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_remap_pages(struct vm_area_struct *, unsigned long addr,
- unsigned long size, pgoff_t pgoff);
-extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
+ unsigned long size, pgoff_t pgoff);
+extern int file_read_iter_actor(read_descriptor_t *desc, struct page *page,
+ unsigned long offset, unsigned long size);
int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *,
+ loff_t);
extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long,
loff_t *);
+extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *,
+ loff_t *);
extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *,
+ loff_t);
extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *,
unsigned long *, loff_t, loff_t *, size_t, size_t);
+extern ssize_t generic_file_direct_write_iter(struct kiocb *, struct iov_iter *,
+ loff_t, loff_t *, size_t);
extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *,
unsigned long, loff_t, loff_t *, size_t, ssize_t);
+extern ssize_t generic_file_buffered_write_iter(struct kiocb *,
+ struct iov_iter *, loff_t, loff_t *, size_t, ssize_t);
extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
extern int generic_segment_checks(const struct iovec *iov,
unsigned long *nr_segs, size_t *count, int access_flags);
/* fs/block_dev.c */
-extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos);
+extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t pos);
extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
int datasync);
extern void block_sync_page(struct page *page);
@@ -2473,16 +2595,16 @@ enum {
void dio_end_io(struct bio *bio, int error);
ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
- struct block_device *bdev, const struct iovec *iov, loff_t offset,
- unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
- dio_submit_t submit_io, int flags);
+ struct block_device *bdev, struct iov_iter *iter, loff_t offset,
+ get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io,
+ int flags);
static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
- struct inode *inode, const struct iovec *iov, loff_t offset,
- unsigned long nr_segs, get_block_t get_block)
+ struct inode *inode, struct iov_iter *iter, loff_t offset,
+ get_block_t get_block)
{
- return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
- offset, nr_segs, get_block, NULL, NULL,
+ return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iter,
+ offset, get_block, NULL, NULL,
DIO_LOCKING | DIO_SKIP_HOLES);
}
#endif
@@ -2502,6 +2624,7 @@ extern int __page_symlink(struct inode *inode, const char *symname, int len,
int nofs);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
+extern void kfree_put_link(struct dentry *, struct nameidata *, void *);
extern int generic_readlink(struct dentry *, char __user *, int);
extern void generic_fillattr(struct inode *, struct kstat *);
extern int vfs_getattr(struct path *, struct kstat *);
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 7823e9ef995e..771484993ca7 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -308,36 +308,6 @@ struct fscache_cache_ops {
void (*dissociate_pages)(struct fscache_cache *cache);
};
-/*
- * data file or index object cookie
- * - a file will only appear in one cache
- * - a request to cache a file may or may not be honoured, subject to
- * constraints such as disk space
- * - indices are created on disk just-in-time
- */
-struct fscache_cookie {
- atomic_t usage; /* number of users of this cookie */
- atomic_t n_children; /* number of children of this cookie */
- atomic_t n_active; /* number of active users of netfs ptrs */
- spinlock_t lock;
- spinlock_t stores_lock; /* lock on page store tree */
- struct hlist_head backing_objects; /* object(s) backing this file/index */
- const struct fscache_cookie_def *def; /* definition */
- struct fscache_cookie *parent; /* parent of this entry */
- void *netfs_data; /* back pointer to netfs */
- struct radix_tree_root stores; /* pages to be stored on this cookie */
-#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
-#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */
-
- unsigned long flags;
-#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
-#define FSCACHE_COOKIE_NO_DATA_YET 1 /* T if new object with no cached data yet */
-#define FSCACHE_COOKIE_UNAVAILABLE 2 /* T if cookie is unavailable (error, etc) */
-#define FSCACHE_COOKIE_INVALIDATING 3 /* T if cookie is being invalidated */
-#define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */
-#define FSCACHE_COOKIE_RETIRED 5 /* T if cookie was retired */
-};
-
extern struct fscache_cookie fscache_fsdef_index;
/*
@@ -400,6 +370,7 @@ struct fscache_object {
#define FSCACHE_OBJECT_IS_LIVE 3 /* T if object is not withdrawn or relinquished */
#define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */
#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
+#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
struct list_head cache_link; /* link in cache->object_list */
struct hlist_node cookie_link; /* link in cookie->backing_objects */
@@ -511,6 +482,11 @@ static inline void fscache_end_io(struct fscache_retrieval *op,
op->end_io_func(page, op->context, error);
}
+static inline void __fscache_use_cookie(struct fscache_cookie *cookie)
+{
+ atomic_inc(&cookie->n_active);
+}
+
/**
* fscache_use_cookie - Request usage of cookie attached to an object
* @object: Object description
@@ -524,6 +500,16 @@ static inline bool fscache_use_cookie(struct fscache_object *object)
return atomic_inc_not_zero(&cookie->n_active) != 0;
}
+static inline bool __fscache_unuse_cookie(struct fscache_cookie *cookie)
+{
+ return atomic_dec_and_test(&cookie->n_active);
+}
+
+static inline void __fscache_wake_unused_cookie(struct fscache_cookie *cookie)
+{
+ wake_up_atomic_t(&cookie->n_active);
+}
+
/**
* fscache_unuse_cookie - Cease usage of cookie attached to an object
* @object: Object description
@@ -534,8 +520,8 @@ static inline bool fscache_use_cookie(struct fscache_object *object)
static inline void fscache_unuse_cookie(struct fscache_object *object)
{
struct fscache_cookie *cookie = object->cookie;
- if (atomic_dec_and_test(&cookie->n_active))
- wake_up_atomic_t(&cookie->n_active);
+ if (__fscache_unuse_cookie(cookie))
+ __fscache_wake_unused_cookie(cookie);
}
/*
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 19b46458e4e8..115bb81912cc 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -167,6 +167,42 @@ struct fscache_netfs {
};
/*
+ * data file or index object cookie
+ * - a file will only appear in one cache
+ * - a request to cache a file may or may not be honoured, subject to
+ * constraints such as disk space
+ * - indices are created on disk just-in-time
+ */
+struct fscache_cookie {
+ atomic_t usage; /* number of users of this cookie */
+ atomic_t n_children; /* number of children of this cookie */
+ atomic_t n_active; /* number of active users of netfs ptrs */
+ spinlock_t lock;
+ spinlock_t stores_lock; /* lock on page store tree */
+ struct hlist_head backing_objects; /* object(s) backing this file/index */
+ const struct fscache_cookie_def *def; /* definition */
+ struct fscache_cookie *parent; /* parent of this entry */
+ void *netfs_data; /* back pointer to netfs */
+ struct radix_tree_root stores; /* pages to be stored on this cookie */
+#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
+#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */
+
+ unsigned long flags;
+#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
+#define FSCACHE_COOKIE_NO_DATA_YET 1 /* T if new object with no cached data yet */
+#define FSCACHE_COOKIE_UNAVAILABLE 2 /* T if cookie is unavailable (error, etc) */
+#define FSCACHE_COOKIE_INVALIDATING 3 /* T if cookie is being invalidated */
+#define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */
+#define FSCACHE_COOKIE_ENABLED 5 /* T if cookie is enabled */
+#define FSCACHE_COOKIE_ENABLEMENT_LOCK 6 /* T if cookie is being en/disabled */
+};
+
+static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
+{
+ return test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
+}
+
+/*
* slow-path functions for when there is actually caching available, and the
* netfs does actually have a valid token
* - these are not to be called directly
@@ -181,8 +217,8 @@ extern void __fscache_release_cache_tag(struct fscache_cache_tag *);
extern struct fscache_cookie *__fscache_acquire_cookie(
struct fscache_cookie *,
const struct fscache_cookie_def *,
- void *);
-extern void __fscache_relinquish_cookie(struct fscache_cookie *, int);
+ void *, bool);
+extern void __fscache_relinquish_cookie(struct fscache_cookie *, bool);
extern int __fscache_check_consistency(struct fscache_cookie *);
extern void __fscache_update_cookie(struct fscache_cookie *);
extern int __fscache_attr_changed(struct fscache_cookie *);
@@ -211,6 +247,9 @@ extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *,
struct inode *);
extern void __fscache_readpages_cancel(struct fscache_cookie *cookie,
struct list_head *pages);
+extern void __fscache_disable_cookie(struct fscache_cookie *, bool);
+extern void __fscache_enable_cookie(struct fscache_cookie *,
+ bool (*)(void *), void *);
/**
* fscache_register_netfs - Register a filesystem as desiring caching services
@@ -289,6 +328,7 @@ void fscache_release_cache_tag(struct fscache_cache_tag *tag)
* @def: A description of the cache object, including callback operations
* @netfs_data: An arbitrary piece of data to be kept in the cookie to
* represent the cache object to the netfs
+ * @enable: Whether or not to enable a data cookie immediately
*
* This function is used to inform FS-Cache about part of an index hierarchy
* that can be used to locate files. This is done by requesting a cookie for
@@ -301,10 +341,12 @@ static inline
struct fscache_cookie *fscache_acquire_cookie(
struct fscache_cookie *parent,
const struct fscache_cookie_def *def,
- void *netfs_data)
+ void *netfs_data,
+ bool enable)
{
- if (fscache_cookie_valid(parent))
- return __fscache_acquire_cookie(parent, def, netfs_data);
+ if (fscache_cookie_valid(parent) && fscache_cookie_enabled(parent))
+ return __fscache_acquire_cookie(parent, def, netfs_data,
+ enable);
else
return NULL;
}
@@ -322,7 +364,7 @@ struct fscache_cookie *fscache_acquire_cookie(
* description.
*/
static inline
-void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
+void fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
{
if (fscache_cookie_valid(cookie))
__fscache_relinquish_cookie(cookie, retire);
@@ -341,7 +383,7 @@ void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
static inline
int fscache_check_consistency(struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie))
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
return __fscache_check_consistency(cookie);
else
return 0;
@@ -360,7 +402,7 @@ int fscache_check_consistency(struct fscache_cookie *cookie)
static inline
void fscache_update_cookie(struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie))
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
__fscache_update_cookie(cookie);
}
@@ -407,7 +449,7 @@ void fscache_unpin_cookie(struct fscache_cookie *cookie)
static inline
int fscache_attr_changed(struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie))
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
return __fscache_attr_changed(cookie);
else
return -ENOBUFS;
@@ -429,7 +471,7 @@ int fscache_attr_changed(struct fscache_cookie *cookie)
static inline
void fscache_invalidate(struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie))
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
__fscache_invalidate(cookie);
}
@@ -503,7 +545,7 @@ int fscache_read_or_alloc_page(struct fscache_cookie *cookie,
void *context,
gfp_t gfp)
{
- if (fscache_cookie_valid(cookie))
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
return __fscache_read_or_alloc_page(cookie, page, end_io_func,
context, gfp);
else
@@ -554,7 +596,7 @@ int fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
void *context,
gfp_t gfp)
{
- if (fscache_cookie_valid(cookie))
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
return __fscache_read_or_alloc_pages(cookie, mapping, pages,
nr_pages, end_io_func,
context, gfp);
@@ -585,7 +627,7 @@ int fscache_alloc_page(struct fscache_cookie *cookie,
struct page *page,
gfp_t gfp)
{
- if (fscache_cookie_valid(cookie))
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
return __fscache_alloc_page(cookie, page, gfp);
else
return -ENOBUFS;
@@ -634,7 +676,7 @@ int fscache_write_page(struct fscache_cookie *cookie,
struct page *page,
gfp_t gfp)
{
- if (fscache_cookie_valid(cookie))
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
return __fscache_write_page(cookie, page, gfp);
else
return -ENOBUFS;
@@ -744,4 +786,47 @@ void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
__fscache_uncache_all_inode_pages(cookie, inode);
}
+/**
+ * fscache_disable_cookie - Disable a cookie
+ * @cookie: The cookie representing the cache object
+ * @invalidate: Invalidate the backing object
+ *
+ * Disable a cookie from accepting further alloc, read, write, invalidate,
+ * update or acquire operations. Outstanding operations can still be waited
+ * upon and pages can still be uncached and the cookie relinquished.
+ *
+ * This will not return until all outstanding operations have completed.
+ *
+ * If @invalidate is set, then the backing object will be invalidated and
+ * detached, otherwise it will just be detached.
+ */
+static inline
+void fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
+{
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
+ __fscache_disable_cookie(cookie, invalidate);
+}
+
+/**
+ * fscache_enable_cookie - Reenable a cookie
+ * @cookie: The cookie representing the cache object
+ * @can_enable: A function to permit enablement once lock is held
+ * @data: Data for can_enable()
+ *
+ * Reenable a previously disabled cookie, allowing it to accept further alloc,
+ * read, write, invalidate, update or acquire operations. An attempt will be
+ * made to immediately reattach the cookie to a backing object.
+ *
+ * The can_enable() function is called (if not NULL) once the enablement lock
+ * is held to rule on whether enablement is still permitted to go ahead.
+ */
+static inline
+void fscache_enable_cookie(struct fscache_cookie *cookie,
+ bool (*can_enable)(void *data),
+ void *data)
+{
+ if (fscache_cookie_valid(cookie) && !fscache_cookie_enabled(cookie))
+ __fscache_enable_cookie(cookie, can_enable, data);
+}
+
#endif /* _LINUX_FSCACHE_H */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 9f15c0064c50..ec85d48619e1 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -721,6 +721,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
extern char __irqentry_text_start[];
extern char __irqentry_text_end[];
+#define FTRACE_NOTRACE_DEPTH 65536
#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 1e041063b226..d9cf963ac832 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -33,7 +33,7 @@ extern void rcu_nmi_exit(void);
#define __irq_enter() \
do { \
account_irq_enter_time(current); \
- add_preempt_count(HARDIRQ_OFFSET); \
+ preempt_count_add(HARDIRQ_OFFSET); \
trace_hardirq_enter(); \
} while (0)
@@ -49,7 +49,7 @@ extern void irq_enter(void);
do { \
trace_hardirq_exit(); \
account_irq_exit_time(current); \
- sub_preempt_count(HARDIRQ_OFFSET); \
+ preempt_count_sub(HARDIRQ_OFFSET); \
} while (0)
/*
@@ -62,7 +62,7 @@ extern void irq_exit(void);
lockdep_off(); \
ftrace_nmi_enter(); \
BUG_ON(in_nmi()); \
- add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
+ preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
rcu_nmi_enter(); \
trace_hardirq_enter(); \
} while (0)
@@ -72,7 +72,7 @@ extern void irq_exit(void);
trace_hardirq_exit(); \
rcu_nmi_exit(); \
BUG_ON(!in_nmi()); \
- sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
+ preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
ftrace_nmi_exit(); \
lockdep_on(); \
} while (0)
diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h
index a9df51f5d54c..519b6e2d769e 100644
--- a/include/linux/hashtable.h
+++ b/include/linux/hashtable.h
@@ -174,6 +174,21 @@ static inline void hash_del_rcu(struct hlist_node *node)
member)
/**
+ * hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing
+ * to the same bucket in an rcu enabled hashtable in a rcu enabled hashtable
+ * @name: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ *
+ * This is the same as hash_for_each_possible_rcu() except that it does
+ * not do any RCU debugging or tracing.
+ */
+#define hash_for_each_possible_rcu_notrace(name, obj, member, key) \
+ hlist_for_each_entry_rcu_notrace(obj, \
+ &name[hash_min(key, HASH_BITS(name))], member)
+
+/**
* hash_for_each_possible_safe - iterate over all possible objects hashing to the
* same bucket safe against removals
* @name: hashtable to iterate
diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h
index f148e4908410..8ec23fb0b412 100644
--- a/include/linux/hippidevice.h
+++ b/include/linux/hippidevice.h
@@ -31,11 +31,11 @@ struct hippi_cb {
__u32 ifield;
};
-extern __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
-extern int hippi_change_mtu(struct net_device *dev, int new_mtu);
-extern int hippi_mac_addr(struct net_device *dev, void *p);
-extern int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
-extern struct net_device *alloc_hippi_dev(int sizeof_priv);
+__be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
+int hippi_change_mtu(struct net_device *dev, int new_mtu);
+int hippi_mac_addr(struct net_device *dev, void *p);
+int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
+struct net_device *alloc_hippi_dev(int sizeof_priv);
#endif
#endif /* _LINUX_HIPPIDEVICE_H */
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
new file mode 100644
index 000000000000..f5b9b87ac9a9
--- /dev/null
+++ b/include/linux/host1x.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __LINUX_HOST1X_H
+#define __LINUX_HOST1X_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+enum host1x_class {
+ HOST1X_CLASS_HOST1X = 0x1,
+ HOST1X_CLASS_GR2D = 0x51,
+ HOST1X_CLASS_GR2D_SB = 0x52,
+ HOST1X_CLASS_GR3D = 0x60,
+};
+
+struct host1x_client;
+
+struct host1x_client_ops {
+ int (*init)(struct host1x_client *client);
+ int (*exit)(struct host1x_client *client);
+};
+
+struct host1x_client {
+ struct list_head list;
+ struct device *parent;
+ struct device *dev;
+
+ const struct host1x_client_ops *ops;
+
+ enum host1x_class class;
+ struct host1x_channel *channel;
+
+ struct host1x_syncpt **syncpts;
+ unsigned int num_syncpts;
+};
+
+/*
+ * host1x buffer objects
+ */
+
+struct host1x_bo;
+struct sg_table;
+
+struct host1x_bo_ops {
+ struct host1x_bo *(*get)(struct host1x_bo *bo);
+ void (*put)(struct host1x_bo *bo);
+ dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
+ void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
+ void *(*mmap)(struct host1x_bo *bo);
+ void (*munmap)(struct host1x_bo *bo, void *addr);
+ void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
+ void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
+};
+
+struct host1x_bo {
+ const struct host1x_bo_ops *ops;
+};
+
+static inline void host1x_bo_init(struct host1x_bo *bo,
+ const struct host1x_bo_ops *ops)
+{
+ bo->ops = ops;
+}
+
+static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
+{
+ return bo->ops->get(bo);
+}
+
+static inline void host1x_bo_put(struct host1x_bo *bo)
+{
+ bo->ops->put(bo);
+}
+
+static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
+ struct sg_table **sgt)
+{
+ return bo->ops->pin(bo, sgt);
+}
+
+static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
+{
+ bo->ops->unpin(bo, sgt);
+}
+
+static inline void *host1x_bo_mmap(struct host1x_bo *bo)
+{
+ return bo->ops->mmap(bo);
+}
+
+static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
+{
+ bo->ops->munmap(bo, addr);
+}
+
+static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
+{
+ return bo->ops->kmap(bo, pagenum);
+}
+
+static inline void host1x_bo_kunmap(struct host1x_bo *bo,
+ unsigned int pagenum, void *addr)
+{
+ bo->ops->kunmap(bo, pagenum, addr);
+}
+
+/*
+ * host1x syncpoints
+ */
+
+#define HOST1X_SYNCPT_CLIENT_MANAGED (1 << 0)
+#define HOST1X_SYNCPT_HAS_BASE (1 << 1)
+
+struct host1x_syncpt_base;
+struct host1x_syncpt;
+struct host1x;
+
+struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
+u32 host1x_syncpt_id(struct host1x_syncpt *sp);
+u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
+u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
+int host1x_syncpt_incr(struct host1x_syncpt *sp);
+int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
+ u32 *value);
+struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
+ unsigned long flags);
+void host1x_syncpt_free(struct host1x_syncpt *sp);
+
+struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
+u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
+
+/*
+ * host1x channel
+ */
+
+struct host1x_channel;
+struct host1x_job;
+
+struct host1x_channel *host1x_channel_request(struct device *dev);
+void host1x_channel_free(struct host1x_channel *channel);
+struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
+void host1x_channel_put(struct host1x_channel *channel);
+int host1x_job_submit(struct host1x_job *job);
+
+/*
+ * host1x job
+ */
+
+struct host1x_reloc {
+ struct host1x_bo *cmdbuf;
+ u32 cmdbuf_offset;
+ struct host1x_bo *target;
+ u32 target_offset;
+ u32 shift;
+ u32 pad;
+};
+
+struct host1x_job {
+ /* When refcount goes to zero, job can be freed */
+ struct kref ref;
+
+ /* List entry */
+ struct list_head list;
+
+ /* Channel where job is submitted to */
+ struct host1x_channel *channel;
+
+ u32 client;
+
+ /* Gathers and their memory */
+ struct host1x_job_gather *gathers;
+ unsigned int num_gathers;
+
+ /* Wait checks to be processed at submit time */
+ struct host1x_waitchk *waitchk;
+ unsigned int num_waitchk;
+ u32 waitchk_mask;
+
+ /* Array of handles to be pinned & unpinned */
+ struct host1x_reloc *relocarray;
+ unsigned int num_relocs;
+ struct host1x_job_unpin_data *unpins;
+ unsigned int num_unpins;
+
+ dma_addr_t *addr_phys;
+ dma_addr_t *gather_addr_phys;
+ dma_addr_t *reloc_addr_phys;
+
+ /* Sync point id, number of increments and end related to the submit */
+ u32 syncpt_id;
+ u32 syncpt_incrs;
+ u32 syncpt_end;
+
+ /* Maximum time to wait for this job */
+ unsigned int timeout;
+
+ /* Index and number of slots used in the push buffer */
+ unsigned int first_get;
+ unsigned int num_slots;
+
+ /* Copy of gathers */
+ size_t gather_copy_size;
+ dma_addr_t gather_copy;
+ u8 *gather_copy_mapped;
+
+ /* Check if register is marked as an address reg */
+ int (*is_addr_reg)(struct device *dev, u32 reg, u32 class);
+
+ /* Request a SETCLASS to this class */
+ u32 class;
+
+ /* Add a channel wait for previous ops to complete */
+ bool serialize;
+};
+
+struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
+ u32 num_cmdbufs, u32 num_relocs,
+ u32 num_waitchks);
+void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
+ u32 words, u32 offset);
+struct host1x_job *host1x_job_get(struct host1x_job *job);
+void host1x_job_put(struct host1x_job *job);
+int host1x_job_pin(struct host1x_job *job, struct device *dev);
+void host1x_job_unpin(struct host1x_job *job);
+
+/*
+ * subdevice probe infrastructure
+ */
+
+struct host1x_device;
+
+struct host1x_driver {
+ const struct of_device_id *subdevs;
+ struct list_head list;
+ const char *name;
+
+ int (*probe)(struct host1x_device *device);
+ int (*remove)(struct host1x_device *device);
+};
+
+int host1x_driver_register(struct host1x_driver *driver);
+void host1x_driver_unregister(struct host1x_driver *driver);
+
+struct host1x_device {
+ struct host1x_driver *driver;
+ struct list_head list;
+ struct device dev;
+
+ struct mutex subdevs_lock;
+ struct list_head subdevs;
+ struct list_head active;
+
+ struct mutex clients_lock;
+ struct list_head clients;
+};
+
+static inline struct host1x_device *to_host1x_device(struct device *dev)
+{
+ return container_of(dev, struct host1x_device, dev);
+}
+
+int host1x_device_init(struct host1x_device *device);
+int host1x_device_exit(struct host1x_device *device);
+
+int host1x_client_register(struct host1x_client *client);
+int host1x_client_unregister(struct host1x_client *client);
+
+#endif
diff --git a/include/linux/hwmon-vid.h b/include/linux/hwmon-vid.h
index f346e4d5381c..da0a680e2f6d 100644
--- a/include/linux/hwmon-vid.h
+++ b/include/linux/hwmon-vid.h
@@ -38,7 +38,7 @@ static inline int vid_to_reg(int val, u8 vrm)
return ((val >= 1100) && (val <= 1850) ?
((18499 - val * 10) / 25 + 5) / 10 : -1);
default:
- return -1;
+ return -EINVAL;
}
}
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index b2514f70d591..09354f6c1d63 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -15,9 +15,19 @@
#define _HWMON_H_
struct device;
+struct attribute_group;
struct device *hwmon_device_register(struct device *dev);
+struct device *
+hwmon_device_register_with_groups(struct device *dev, const char *name,
+ void *drvdata,
+ const struct attribute_group **groups);
+struct device *
+devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
+ void *drvdata,
+ const struct attribute_group **groups);
void hwmon_device_unregister(struct device *dev);
+void devm_hwmon_device_unregister(struct device *dev);
#endif
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 2ab11dc38077..eff50e062be8 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -205,7 +205,6 @@ struct i2c_driver {
* @name: Indicates the type of the device, usually a chip name that's
* generic enough to hide second-sourcing and compatible revisions.
* @adapter: manages the bus segment hosting this I2C device
- * @driver: device's driver, hence pointer to access routines
* @dev: Driver model device node for the slave.
* @irq: indicates the IRQ generated by this device (if any)
* @detected: member of an i2c_driver.clients list or i2c-core's
@@ -222,7 +221,6 @@ struct i2c_client {
/* _LOWER_ 7 bits */
char name[I2C_NAME_SIZE];
struct i2c_adapter *adapter; /* the adapter we sit on */
- struct i2c_driver *driver; /* and our access routines */
struct device dev; /* the device structure */
int irq; /* irq issued by device */
struct list_head detected;
diff --git a/include/linux/ide.h b/include/linux/ide.h
index b17974917dbf..46a14229a162 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -1514,7 +1514,7 @@ static inline void ide_set_max_pio(ide_drive_t *drive)
char *ide_media_string(ide_drive_t *);
-extern struct device_attribute ide_dev_attrs[];
+extern const struct attribute_group *ide_dev_groups[];
extern struct bus_type ide_bus_type;
extern struct class *ide_port_class;
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index a5b598a79bec..7c1e1ebc0e23 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1391,8 +1391,8 @@ struct ieee80211_vht_operation {
#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700
#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800
#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000
-#define IEEE80211_VHT_CAP_BEAMFORMER_ANTENNAS_MAX 0x00006000
-#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MAX 0x00030000
+#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_MAX 0x0000e000
+#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MAX 0x00070000
#define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE 0x00080000
#define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE 0x00100000
#define IEEE80211_VHT_CAP_VHT_TXOP_PS 0x00200000
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 79640e015a86..0d678aefe69d 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -147,25 +147,27 @@ struct in_ifaddr {
unsigned long ifa_tstamp; /* updated timestamp */
};
-extern int register_inetaddr_notifier(struct notifier_block *nb);
-extern int unregister_inetaddr_notifier(struct notifier_block *nb);
+int register_inetaddr_notifier(struct notifier_block *nb);
+int unregister_inetaddr_notifier(struct notifier_block *nb);
-extern void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
- struct ipv4_devconf *devconf);
+void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
+ struct ipv4_devconf *devconf);
-extern struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref);
+struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref);
static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
{
return __ip_dev_find(net, addr, true);
}
-extern int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
-extern int devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
-extern void devinet_init(void);
-extern struct in_device *inetdev_by_index(struct net *, int);
-extern __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
-extern __be32 inet_confirm_addr(struct in_device *in_dev, __be32 dst, __be32 local, int scope);
-extern struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, __be32 mask);
+int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
+int devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
+void devinet_init(void);
+struct in_device *inetdev_by_index(struct net *, int);
+__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
+__be32 inet_confirm_addr(struct in_device *in_dev, __be32 dst, __be32 local,
+ int scope);
+struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
+ __be32 mask);
static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa)
{
@@ -218,7 +220,7 @@ static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev)
return rtnl_dereference(dev->ip_ptr);
}
-extern void in_dev_finish_destroy(struct in_device *idev);
+void in_dev_finish_destroy(struct in_device *idev);
static inline void in_dev_put(struct in_device *idev)
{
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5e865b554940..c9e831dc80bc 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -19,6 +19,7 @@
#include <linux/atomic.h>
#include <asm/ptrace.h>
+#include <asm/irq.h>
/*
* These correspond to the IORESOURCE_IRQ_* defines in
@@ -374,6 +375,16 @@ struct softirq_action
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
+
+#ifdef __ARCH_HAS_DO_SOFTIRQ
+void do_softirq_own_stack(void);
+#else
+static inline void do_softirq_own_stack(void)
+{
+ __do_softirq();
+}
+#endif
+
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 7ea319e95b47..a444c790fa72 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -22,6 +22,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/types.h>
+#include <trace/events/iommu.h>
#define IOMMU_READ (1)
#define IOMMU_WRITE (2)
@@ -227,6 +228,7 @@ static inline int report_iommu_fault(struct iommu_domain *domain,
ret = domain->handler(domain, dev, iova, flags,
domain->handler_token);
+ trace_io_page_fault(dev, iova, flags);
return ret;
}
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 19c19a5eee29..f6c82de12541 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -34,9 +34,9 @@ struct ipc_namespace {
int sem_ctls[4];
int used_sems;
- int msg_ctlmax;
- int msg_ctlmnb;
- int msg_ctlmni;
+ unsigned int msg_ctlmax;
+ unsigned int msg_ctlmnb;
+ unsigned int msg_ctlmni;
atomic_t msg_bytes;
atomic_t msg_hdrs;
int auto_msgmni;
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 28ea38439313..5d89d1b808a6 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -21,13 +21,11 @@ struct ipv6_devconf {
__s32 force_mld_version;
__s32 mldv1_unsolicited_report_interval;
__s32 mldv2_unsolicited_report_interval;
-#ifdef CONFIG_IPV6_PRIVACY
__s32 use_tempaddr;
__s32 temp_valid_lft;
__s32 temp_prefered_lft;
__s32 regen_max_retry;
__s32 max_desync_factor;
-#endif
__s32 max_addresses;
__s32 accept_ra_defrtr;
__s32 accept_ra_pinfo;
@@ -115,16 +113,8 @@ static inline int inet6_iif(const struct sk_buff *skb)
return IP6CB(skb)->iif;
}
-struct inet6_request_sock {
- struct in6_addr loc_addr;
- struct in6_addr rmt_addr;
- struct sk_buff *pktopts;
- int iif;
-};
-
struct tcp6_request_sock {
struct tcp_request_sock tcp6rsk_tcp;
- struct inet6_request_sock tcp6rsk_inet6;
};
struct ipv6_mc_socklist;
@@ -141,8 +131,6 @@ struct ipv6_fl_socklist;
*/
struct ipv6_pinfo {
struct in6_addr saddr;
- struct in6_addr rcv_saddr;
- struct in6_addr daddr;
struct in6_pktinfo sticky_pktinfo;
const struct in6_addr *daddr_cache;
#ifdef CONFIG_IPV6_SUBTREES
@@ -256,48 +244,22 @@ struct tcp6_sock {
extern int inet6_sk_rebuild_header(struct sock *sk);
-struct inet6_timewait_sock {
- struct in6_addr tw_v6_daddr;
- struct in6_addr tw_v6_rcv_saddr;
-};
-
struct tcp6_timewait_sock {
struct tcp_timewait_sock tcp6tw_tcp;
- struct inet6_timewait_sock tcp6tw_inet6;
};
-static inline struct inet6_timewait_sock *inet6_twsk(const struct sock *sk)
-{
- return (struct inet6_timewait_sock *)(((u8 *)sk) +
- inet_twsk(sk)->tw_ipv6_offset);
-}
-
#if IS_ENABLED(CONFIG_IPV6)
static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
{
return inet_sk(__sk)->pinet6;
}
-static inline struct inet6_request_sock *
- inet6_rsk(const struct request_sock *rsk)
-{
- return (struct inet6_request_sock *)(((u8 *)rsk) +
- inet_rsk(rsk)->inet6_rsk_offset);
-}
-
-static inline u32 inet6_rsk_offset(struct request_sock *rsk)
-{
- return rsk->rsk_ops->obj_size - sizeof(struct inet6_request_sock);
-}
-
static inline struct request_sock *inet6_reqsk_alloc(struct request_sock_ops *ops)
{
struct request_sock *req = reqsk_alloc(ops);
- if (req != NULL) {
- inet_rsk(req)->inet6_rsk_offset = inet6_rsk_offset(req);
- inet6_rsk(req)->pktopts = NULL;
- }
+ if (req)
+ inet_rsk(req)->pktopts = NULL;
return req;
}
@@ -321,21 +283,11 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
#define __ipv6_only_sock(sk) (inet6_sk(sk)->ipv6only)
#define ipv6_only_sock(sk) ((sk)->sk_family == PF_INET6 && __ipv6_only_sock(sk))
-static inline u16 inet6_tw_offset(const struct proto *prot)
+static inline const struct in6_addr *inet6_rcv_saddr(const struct sock *sk)
{
- return prot->twsk_prot->twsk_obj_size -
- sizeof(struct inet6_timewait_sock);
-}
-
-static inline struct in6_addr *__inet6_rcv_saddr(const struct sock *sk)
-{
- return likely(sk->sk_state != TCP_TIME_WAIT) ?
- &inet6_sk(sk)->rcv_saddr : &inet6_twsk(sk)->tw_v6_rcv_saddr;
-}
-
-static inline struct in6_addr *inet6_rcv_saddr(const struct sock *sk)
-{
- return sk->sk_family == AF_INET6 ? __inet6_rcv_saddr(sk) : NULL;
+ if (sk->sk_family == AF_INET6)
+ return &sk->sk_v6_rcv_saddr;
+ return NULL;
}
static inline int inet_v6_ipv6only(const struct sock *sk)
@@ -363,28 +315,18 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
return NULL;
}
-#define __inet6_rcv_saddr(__sk) NULL
#define inet6_rcv_saddr(__sk) NULL
#define tcp_twsk_ipv6only(__sk) 0
#define inet_v6_ipv6only(__sk) 0
#endif /* IS_ENABLED(CONFIG_IPV6) */
#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \
- ((inet_sk(__sk)->inet_portpair == (__ports)) && \
+ (((__sk)->sk_portpair == (__ports)) && \
((__sk)->sk_family == AF_INET6) && \
- ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \
- ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \
+ ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \
+ ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \
(!(__sk)->sk_bound_dev_if || \
((__sk)->sk_bound_dev_if == (__dif))) && \
net_eq(sock_net(__sk), (__net)))
-#define INET6_TW_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \
- ((inet_twsk(__sk)->tw_portpair == (__ports)) && \
- ((__sk)->sk_family == AF_INET6) && \
- ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_daddr, (__saddr)) && \
- ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_rcv_saddr, (__daddr)) && \
- (!(__sk)->sk_bound_dev_if || \
- ((__sk)->sk_bound_dev_if == (__dif))) && \
- net_eq(sock_net(__sk), (__net)))
-
#endif /* _IPV6_H */
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 0e5d9ecdb2b6..cac496b1e279 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -31,6 +31,8 @@
#define GIC_DIST_TARGET 0x800
#define GIC_DIST_CONFIG 0xc00
#define GIC_DIST_SOFTINT 0xf00
+#define GIC_DIST_SGI_PENDING_CLEAR 0xf10
+#define GIC_DIST_SGI_PENDING_SET 0xf20
#define GICH_HCR 0x0
#define GICH_VTR 0x4
@@ -74,6 +76,11 @@ static inline void gic_init(unsigned int nr, int start,
gic_init_bases(nr, start, dist, cpu, 0, NULL);
}
+void gic_send_sgi(unsigned int cpu_id, unsigned int irq);
+int gic_get_cpu_id(unsigned int cpu);
+void gic_migrate_target(unsigned int new_cpu_id);
+unsigned long gic_get_sgir_physaddr(void);
+
#endif /* __ASSEMBLY */
#endif
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index a5079072da66..e96be7245717 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -48,6 +48,13 @@
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/bug.h>
+
+extern bool static_key_initialized;
+
+#define STATIC_KEY_CHECK_USE() WARN(!static_key_initialized, \
+ "%s used before call to jump_label_init", \
+ __func__)
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
@@ -128,6 +135,7 @@ struct static_key {
static __always_inline void jump_label_init(void)
{
+ static_key_initialized = true;
}
static __always_inline bool static_key_false(struct static_key *key)
@@ -146,11 +154,13 @@ static __always_inline bool static_key_true(struct static_key *key)
static inline void static_key_slow_inc(struct static_key *key)
{
+ STATIC_KEY_CHECK_USE();
atomic_inc(&key->enabled);
}
static inline void static_key_slow_dec(struct static_key *key)
{
+ STATIC_KEY_CHECK_USE();
atomic_dec(&key->enabled);
}
diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
index 113788389b3d..089f70f83e97 100644
--- a/include/linux/jump_label_ratelimit.h
+++ b/include/linux/jump_label_ratelimit.h
@@ -23,12 +23,14 @@ struct static_key_deferred {
};
static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
{
+ STATIC_KEY_CHECK_USE();
static_key_slow_dec(&key->key);
}
static inline void
jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
{
+ STATIC_KEY_CHECK_USE();
}
#endif /* HAVE_JUMP_LABEL */
#endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 7f6fe6e015bc..290db1269c4c 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -109,6 +109,7 @@ typedef enum {
KDB_REASON_RECURSE, /* Recursive entry to kdb;
* regs probably valid */
KDB_REASON_SSTEP, /* Single Step trap. - regs valid */
+ KDB_REASON_SYSTEM_NMI, /* In NMI due to SYSTEM cmd; regs valid */
} kdb_reason_t;
extern int kdb_trap_printk;
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 518a53afb9ea..a74c3a84dfdd 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -45,6 +45,7 @@ struct key_preparsed_payload {
const void *data; /* Raw data */
size_t datalen; /* Raw datalen */
size_t quotalen; /* Quota length for proposed payload */
+ bool trusted; /* True if key is trusted */
};
typedef int (*request_key_actor_t)(struct key_construction *key,
@@ -63,6 +64,11 @@ struct key_type {
*/
size_t def_datalen;
+ /* Default key search algorithm. */
+ unsigned def_lookup_type;
+#define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */
+#define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */
+
/* vet a description */
int (*vet_description)(const char *description);
diff --git a/include/linux/key.h b/include/linux/key.h
index 4dfde1161c5e..80d677483e31 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -22,6 +22,7 @@
#include <linux/sysctl.h>
#include <linux/rwsem.h>
#include <linux/atomic.h>
+#include <linux/assoc_array.h>
#ifdef __KERNEL__
#include <linux/uidgid.h>
@@ -82,6 +83,12 @@ struct key_owner;
struct keyring_list;
struct keyring_name;
+struct keyring_index_key {
+ struct key_type *type;
+ const char *description;
+ size_t desc_len;
+};
+
/*****************************************************************************/
/*
* key reference with possession attribute handling
@@ -99,7 +106,7 @@ struct keyring_name;
typedef struct __key_reference_with_attributes *key_ref_t;
static inline key_ref_t make_key_ref(const struct key *key,
- unsigned long possession)
+ bool possession)
{
return (key_ref_t) ((unsigned long) key | possession);
}
@@ -109,7 +116,7 @@ static inline struct key *key_ref_to_ptr(const key_ref_t key_ref)
return (struct key *) ((unsigned long) key_ref & ~1UL);
}
-static inline unsigned long is_key_possessed(const key_ref_t key_ref)
+static inline bool is_key_possessed(const key_ref_t key_ref)
{
return (unsigned long) key_ref & 1UL;
}
@@ -129,7 +136,6 @@ struct key {
struct list_head graveyard_link;
struct rb_node serial_node;
};
- struct key_type *type; /* type of key */
struct rw_semaphore sem; /* change vs change sem */
struct key_user *user; /* owner of this key */
void *security; /* security data for this key */
@@ -162,13 +168,21 @@ struct key {
#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */
#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */
#define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */
+#define KEY_FLAG_TRUSTED 8 /* set if key is trusted */
+#define KEY_FLAG_TRUSTED_ONLY 9 /* set if keyring only accepts links to trusted keys */
- /* the description string
- * - this is used to match a key against search criteria
- * - this should be a printable string
+ /* the key type and key description string
+ * - the desc is used to match a key against search criteria
+ * - it should be a printable string
* - eg: for krb5 AFS, this might be "afs@REDHAT.COM"
*/
- char *description;
+ union {
+ struct keyring_index_key index_key;
+ struct {
+ struct key_type *type; /* type of key */
+ char *description;
+ };
+ };
/* type specific data
* - this is used by the keyring type to index the name
@@ -185,11 +199,14 @@ struct key {
* whatever
*/
union {
- unsigned long value;
- void __rcu *rcudata;
- void *data;
- struct keyring_list __rcu *subscriptions;
- } payload;
+ union {
+ unsigned long value;
+ void __rcu *rcudata;
+ void *data;
+ void *data2[2];
+ } payload;
+ struct assoc_array keys;
+ };
};
extern struct key *key_alloc(struct key_type *type,
@@ -203,18 +220,23 @@ extern struct key *key_alloc(struct key_type *type,
#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */
#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */
#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
+#define KEY_ALLOC_TRUSTED 0x0004 /* Key should be flagged as trusted */
extern void key_revoke(struct key *key);
extern void key_invalidate(struct key *key);
extern void key_put(struct key *key);
-static inline struct key *key_get(struct key *key)
+static inline struct key *__key_get(struct key *key)
{
- if (key)
- atomic_inc(&key->usage);
+ atomic_inc(&key->usage);
return key;
}
+static inline struct key *key_get(struct key *key)
+{
+ return key ? __key_get(key) : key;
+}
+
static inline void key_ref_put(key_ref_t key_ref)
{
key_put(key_ref_to_ptr(key_ref));
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index c6e091bf39a5..dfb4f2ffdaa2 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -310,6 +310,7 @@ extern int
kgdb_handle_exception(int ex_vector, int signo, int err_code,
struct pt_regs *regs);
extern int kgdb_nmicallback(int cpu, void *regs);
+extern int kgdb_nmicallin(int cpu, int trapnr, void *regs, atomic_t *snd_rdy);
extern void gdbstub_exit(int status);
extern int kgdb_single_step;
diff --git a/include/linux/kobj_completion.h b/include/linux/kobj_completion.h
new file mode 100644
index 000000000000..a428f6436063
--- /dev/null
+++ b/include/linux/kobj_completion.h
@@ -0,0 +1,18 @@
+#ifndef _KOBJ_COMPLETION_H_
+#define _KOBJ_COMPLETION_H_
+
+#include <linux/kobject.h>
+#include <linux/completion.h>
+
+struct kobj_completion {
+ struct kobject kc_kobj;
+ struct completion kc_unregister;
+};
+
+#define kobj_to_kobj_completion(kobj) \
+ container_of(kobj, struct kobj_completion, kc_kobj)
+
+void kobj_completion_init(struct kobj_completion *kc, struct kobj_type *ktype);
+void kobj_completion_release(struct kobject *kobj);
+void kobj_completion_del_and_wait(struct kobj_completion *kc);
+#endif /* _KOBJ_COMPLETION_H_ */
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index de6dcbcc6ef7..e7ba650086ce 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -107,6 +107,7 @@ extern int __must_check kobject_move(struct kobject *, struct kobject *);
extern struct kobject *kobject_get(struct kobject *kobj);
extern void kobject_put(struct kobject *kobj);
+extern const void *kobject_namespace(struct kobject *kobj);
extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
struct kobj_type {
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0fbbc7aa02cb..9523d2ad7535 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -142,7 +142,7 @@ struct kvm;
struct kvm_vcpu;
extern struct kmem_cache *kvm_vcpu_cache;
-extern raw_spinlock_t kvm_lock;
+extern spinlock_t kvm_lock;
extern struct list_head vm_list;
struct kvm_io_range {
@@ -189,8 +189,7 @@ struct kvm_async_pf {
gva_t gva;
unsigned long addr;
struct kvm_arch_async_pf arch;
- struct page *page;
- bool done;
+ bool wakeup_all;
};
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
@@ -508,9 +507,10 @@ int kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem);
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem);
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont);
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+ unsigned long npages);
void kvm_arch_memslots_updated(struct kvm *kvm);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
@@ -671,6 +671,25 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
}
#endif
+#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
+void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
+void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
+bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
+#else
+static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
+{
+}
+
+static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
+{
+}
+
+static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
+{
+ return false;
+}
+#endif
+
static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
{
#ifdef __KVM_HAVE_ARCH_WQP
@@ -747,9 +766,6 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
int kvm_request_irq_source_id(struct kvm *kvm);
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
-/* For vcpu->arch.iommu_flags */
-#define KVM_IOMMU_CACHE_COHERENCY 0x1
-
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
@@ -789,7 +805,7 @@ static inline void kvm_guest_enter(void)
/* KVM does not hold any references to rcu protected data when it
* switches CPU into a guest mode. In fact switching to a guest mode
- * is very similar to exiting to userspase from rcu point of view. In
+ * is very similar to exiting to userspace from rcu point of view. In
* addition CPU may stay in a guest mode for quite a long time (up to
* one time slice). Lets treat guest mode as quiescent state, just like
* we do with user-mode execution.
@@ -842,13 +858,6 @@ static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
return gfn_to_memslot(kvm, gfn)->id;
}
-static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
-{
- /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
- return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
- (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
-}
-
static inline gfn_t
hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
{
@@ -1066,6 +1075,7 @@ struct kvm_device *kvm_device_from_filp(struct file *filp);
extern struct kvm_device_ops kvm_mpic_ops;
extern struct kvm_device_ops kvm_xics_ops;
+extern struct kvm_device_ops kvm_vfio_ops;
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index f279ed9a9163..13dfd36a3294 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -36,4 +36,10 @@ extern int lockref_put_or_lock(struct lockref *);
extern void lockref_mark_dead(struct lockref *);
extern int lockref_get_not_dead(struct lockref *);
+/* Must be called under spinlock for reliable results */
+static inline int __lockref_is_dead(const struct lockref *l)
+{
+ return ((int)l->count < 0);
+}
+
#endif /* __LINUX_LOCKREF_H */
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index da6716b9e3fe..ea4d2495c646 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -136,6 +136,7 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
struct mempolicy *get_vma_policy(struct task_struct *tsk,
struct vm_area_struct *vma, unsigned long addr);
+bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma);
extern void numa_default_policy(void);
extern void numa_policy_init(void);
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index 4706d3d46e56..cb49417f8ba9 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -1908,7 +1908,7 @@
#define ARIZONA_FLL2_SYNC_GAIN_MASK 0x003c /* FLL2_SYNC_GAIN */
#define ARIZONA_FLL2_SYNC_GAIN_SHIFT 2 /* FLL2_SYNC_GAIN */
#define ARIZONA_FLL2_SYNC_GAIN_WIDTH 4 /* FLL2_SYNC_GAIN */
-#define ARIZONA_FLL2_SYNC_BW_MASK 0x0001 /* FLL2_SYNC_BW */
+#define ARIZONA_FLL2_SYNC_BW 0x0001 /* FLL2_SYNC_BW */
#define ARIZONA_FLL2_SYNC_BW_MASK 0x0001 /* FLL2_SYNC_BW */
#define ARIZONA_FLL2_SYNC_BW_SHIFT 0 /* FLL2_SYNC_BW */
#define ARIZONA_FLL2_SYNC_BW_WIDTH 1 /* FLL2_SYNC_BW */
diff --git a/include/linux/mfd/as3722.h b/include/linux/mfd/as3722.h
new file mode 100644
index 000000000000..16bf8a0dcd97
--- /dev/null
+++ b/include/linux/mfd/as3722.h
@@ -0,0 +1,423 @@
+/*
+ * as3722 definitions
+ *
+ * Copyright (C) 2013 ams
+ * Copyright (c) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * Author: Florian Lobmaier <florian.lobmaier@ams.com>
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __LINUX_MFD_AS3722_H__
+#define __LINUX_MFD_AS3722_H__
+
+#include <linux/regmap.h>
+
+/* AS3722 registers */
+#define AS3722_SD0_VOLTAGE_REG 0x00
+#define AS3722_SD1_VOLTAGE_REG 0x01
+#define AS3722_SD2_VOLTAGE_REG 0x02
+#define AS3722_SD3_VOLTAGE_REG 0x03
+#define AS3722_SD4_VOLTAGE_REG 0x04
+#define AS3722_SD5_VOLTAGE_REG 0x05
+#define AS3722_SD6_VOLTAGE_REG 0x06
+#define AS3722_GPIO0_CONTROL_REG 0x08
+#define AS3722_GPIO1_CONTROL_REG 0x09
+#define AS3722_GPIO2_CONTROL_REG 0x0A
+#define AS3722_GPIO3_CONTROL_REG 0x0B
+#define AS3722_GPIO4_CONTROL_REG 0x0C
+#define AS3722_GPIO5_CONTROL_REG 0x0D
+#define AS3722_GPIO6_CONTROL_REG 0x0E
+#define AS3722_GPIO7_CONTROL_REG 0x0F
+#define AS3722_LDO0_VOLTAGE_REG 0x10
+#define AS3722_LDO1_VOLTAGE_REG 0x11
+#define AS3722_LDO2_VOLTAGE_REG 0x12
+#define AS3722_LDO3_VOLTAGE_REG 0x13
+#define AS3722_LDO4_VOLTAGE_REG 0x14
+#define AS3722_LDO5_VOLTAGE_REG 0x15
+#define AS3722_LDO6_VOLTAGE_REG 0x16
+#define AS3722_LDO7_VOLTAGE_REG 0x17
+#define AS3722_LDO9_VOLTAGE_REG 0x19
+#define AS3722_LDO10_VOLTAGE_REG 0x1A
+#define AS3722_LDO11_VOLTAGE_REG 0x1B
+#define AS3722_GPIO_DEB1_REG 0x1E
+#define AS3722_GPIO_DEB2_REG 0x1F
+#define AS3722_GPIO_SIGNAL_OUT_REG 0x20
+#define AS3722_GPIO_SIGNAL_IN_REG 0x21
+#define AS3722_REG_SEQU_MOD1_REG 0x22
+#define AS3722_REG_SEQU_MOD2_REG 0x23
+#define AS3722_REG_SEQU_MOD3_REG 0x24
+#define AS3722_SD_PHSW_CTRL_REG 0x27
+#define AS3722_SD_PHSW_STATUS 0x28
+#define AS3722_SD0_CONTROL_REG 0x29
+#define AS3722_SD1_CONTROL_REG 0x2A
+#define AS3722_SDmph_CONTROL_REG 0x2B
+#define AS3722_SD23_CONTROL_REG 0x2C
+#define AS3722_SD4_CONTROL_REG 0x2D
+#define AS3722_SD5_CONTROL_REG 0x2E
+#define AS3722_SD6_CONTROL_REG 0x2F
+#define AS3722_SD_DVM_REG 0x30
+#define AS3722_RESET_REASON_REG 0x31
+#define AS3722_BATTERY_VOLTAGE_MONITOR_REG 0x32
+#define AS3722_STARTUP_CONTROL_REG 0x33
+#define AS3722_RESET_TIMER_REG 0x34
+#define AS3722_REFERENCE_CONTROL_REG 0x35
+#define AS3722_RESET_CONTROL_REG 0x36
+#define AS3722_OVER_TEMP_CONTROL_REG 0x37
+#define AS3722_WATCHDOG_CONTROL_REG 0x38
+#define AS3722_REG_STANDBY_MOD1_REG 0x39
+#define AS3722_REG_STANDBY_MOD2_REG 0x3A
+#define AS3722_REG_STANDBY_MOD3_REG 0x3B
+#define AS3722_ENABLE_CTRL1_REG 0x3C
+#define AS3722_ENABLE_CTRL2_REG 0x3D
+#define AS3722_ENABLE_CTRL3_REG 0x3E
+#define AS3722_ENABLE_CTRL4_REG 0x3F
+#define AS3722_ENABLE_CTRL5_REG 0x40
+#define AS3722_PWM_CONTROL_L_REG 0x41
+#define AS3722_PWM_CONTROL_H_REG 0x42
+#define AS3722_WATCHDOG_TIMER_REG 0x46
+#define AS3722_WATCHDOG_SOFTWARE_SIGNAL_REG 0x48
+#define AS3722_IOVOLTAGE_REG 0x49
+#define AS3722_BATTERY_VOLTAGE_MONITOR2_REG 0x4A
+#define AS3722_SD_CONTROL_REG 0x4D
+#define AS3722_LDOCONTROL0_REG 0x4E
+#define AS3722_LDOCONTROL1_REG 0x4F
+#define AS3722_SD0_PROTECT_REG 0x50
+#define AS3722_SD6_PROTECT_REG 0x51
+#define AS3722_PWM_VCONTROL1_REG 0x52
+#define AS3722_PWM_VCONTROL2_REG 0x53
+#define AS3722_PWM_VCONTROL3_REG 0x54
+#define AS3722_PWM_VCONTROL4_REG 0x55
+#define AS3722_BB_CHARGER_REG 0x57
+#define AS3722_CTRL_SEQU1_REG 0x58
+#define AS3722_CTRL_SEQU2_REG 0x59
+#define AS3722_OVCURRENT_REG 0x5A
+#define AS3722_OVCURRENT_DEB_REG 0x5B
+#define AS3722_SDLV_DEB_REG 0x5C
+#define AS3722_OC_PG_CTRL_REG 0x5D
+#define AS3722_OC_PG_CTRL2_REG 0x5E
+#define AS3722_CTRL_STATUS 0x5F
+#define AS3722_RTC_CONTROL_REG 0x60
+#define AS3722_RTC_SECOND_REG 0x61
+#define AS3722_RTC_MINUTE_REG 0x62
+#define AS3722_RTC_HOUR_REG 0x63
+#define AS3722_RTC_DAY_REG 0x64
+#define AS3722_RTC_MONTH_REG 0x65
+#define AS3722_RTC_YEAR_REG 0x66
+#define AS3722_RTC_ALARM_SECOND_REG 0x67
+#define AS3722_RTC_ALARM_MINUTE_REG 0x68
+#define AS3722_RTC_ALARM_HOUR_REG 0x69
+#define AS3722_RTC_ALARM_DAY_REG 0x6A
+#define AS3722_RTC_ALARM_MONTH_REG 0x6B
+#define AS3722_RTC_ALARM_YEAR_REG 0x6C
+#define AS3722_SRAM_REG 0x6D
+#define AS3722_RTC_ACCESS_REG 0x6F
+#define AS3722_RTC_STATUS_REG 0x73
+#define AS3722_INTERRUPT_MASK1_REG 0x74
+#define AS3722_INTERRUPT_MASK2_REG 0x75
+#define AS3722_INTERRUPT_MASK3_REG 0x76
+#define AS3722_INTERRUPT_MASK4_REG 0x77
+#define AS3722_INTERRUPT_STATUS1_REG 0x78
+#define AS3722_INTERRUPT_STATUS2_REG 0x79
+#define AS3722_INTERRUPT_STATUS3_REG 0x7A
+#define AS3722_INTERRUPT_STATUS4_REG 0x7B
+#define AS3722_TEMP_STATUS_REG 0x7D
+#define AS3722_ADC0_CONTROL_REG 0x80
+#define AS3722_ADC1_CONTROL_REG 0x81
+#define AS3722_ADC0_MSB_RESULT_REG 0x82
+#define AS3722_ADC0_LSB_RESULT_REG 0x83
+#define AS3722_ADC1_MSB_RESULT_REG 0x84
+#define AS3722_ADC1_LSB_RESULT_REG 0x85
+#define AS3722_ADC1_THRESHOLD_HI_MSB_REG 0x86
+#define AS3722_ADC1_THRESHOLD_HI_LSB_REG 0x87
+#define AS3722_ADC1_THRESHOLD_LO_MSB_REG 0x88
+#define AS3722_ADC1_THRESHOLD_LO_LSB_REG 0x89
+#define AS3722_ADC_CONFIGURATION_REG 0x8A
+#define AS3722_ASIC_ID1_REG 0x90
+#define AS3722_ASIC_ID2_REG 0x91
+#define AS3722_LOCK_REG 0x9E
+#define AS3722_MAX_REGISTER 0xF4
+
+#define AS3722_SD0_EXT_ENABLE_MASK 0x03
+#define AS3722_SD1_EXT_ENABLE_MASK 0x0C
+#define AS3722_SD2_EXT_ENABLE_MASK 0x30
+#define AS3722_SD3_EXT_ENABLE_MASK 0xC0
+#define AS3722_SD4_EXT_ENABLE_MASK 0x03
+#define AS3722_SD5_EXT_ENABLE_MASK 0x0C
+#define AS3722_SD6_EXT_ENABLE_MASK 0x30
+#define AS3722_LDO0_EXT_ENABLE_MASK 0x03
+#define AS3722_LDO1_EXT_ENABLE_MASK 0x0C
+#define AS3722_LDO2_EXT_ENABLE_MASK 0x30
+#define AS3722_LDO3_EXT_ENABLE_MASK 0xC0
+#define AS3722_LDO4_EXT_ENABLE_MASK 0x03
+#define AS3722_LDO5_EXT_ENABLE_MASK 0x0C
+#define AS3722_LDO6_EXT_ENABLE_MASK 0x30
+#define AS3722_LDO7_EXT_ENABLE_MASK 0xC0
+#define AS3722_LDO9_EXT_ENABLE_MASK 0x0C
+#define AS3722_LDO10_EXT_ENABLE_MASK 0x30
+#define AS3722_LDO11_EXT_ENABLE_MASK 0xC0
+
+#define AS3722_OVCURRENT_SD0_ALARM_MASK 0x07
+#define AS3722_OVCURRENT_SD0_ALARM_SHIFT 0x01
+#define AS3722_OVCURRENT_SD0_TRIP_MASK 0x18
+#define AS3722_OVCURRENT_SD0_TRIP_SHIFT 0x03
+#define AS3722_OVCURRENT_SD1_TRIP_MASK 0x60
+#define AS3722_OVCURRENT_SD1_TRIP_SHIFT 0x05
+
+#define AS3722_OVCURRENT_SD6_ALARM_MASK 0x07
+#define AS3722_OVCURRENT_SD6_ALARM_SHIFT 0x01
+#define AS3722_OVCURRENT_SD6_TRIP_MASK 0x18
+#define AS3722_OVCURRENT_SD6_TRIP_SHIFT 0x03
+
+/* AS3722 register bits and bit masks */
+#define AS3722_LDO_ILIMIT_MASK BIT(7)
+#define AS3722_LDO_ILIMIT_BIT BIT(7)
+#define AS3722_LDO0_VSEL_MASK 0x1F
+#define AS3722_LDO0_VSEL_MIN 0x01
+#define AS3722_LDO0_VSEL_MAX 0x12
+#define AS3722_LDO0_NUM_VOLT 0x12
+#define AS3722_LDO3_VSEL_MASK 0x3F
+#define AS3722_LDO3_VSEL_MIN 0x01
+#define AS3722_LDO3_VSEL_MAX 0x2D
+#define AS3722_LDO3_NUM_VOLT 0x2D
+#define AS3722_LDO_VSEL_MASK 0x7F
+#define AS3722_LDO_VSEL_MIN 0x01
+#define AS3722_LDO_VSEL_MAX 0x7F
+#define AS3722_LDO_VSEL_DNU_MIN 0x25
+#define AS3722_LDO_VSEL_DNU_MAX 0x3F
+#define AS3722_LDO_NUM_VOLT 0x80
+
+#define AS3722_LDO0_CTRL BIT(0)
+#define AS3722_LDO1_CTRL BIT(1)
+#define AS3722_LDO2_CTRL BIT(2)
+#define AS3722_LDO3_CTRL BIT(3)
+#define AS3722_LDO4_CTRL BIT(4)
+#define AS3722_LDO5_CTRL BIT(5)
+#define AS3722_LDO6_CTRL BIT(6)
+#define AS3722_LDO7_CTRL BIT(7)
+#define AS3722_LDO9_CTRL BIT(1)
+#define AS3722_LDO10_CTRL BIT(2)
+#define AS3722_LDO11_CTRL BIT(3)
+
+#define AS3722_LDO3_MODE_MASK (3 << 6)
+#define AS3722_LDO3_MODE_VAL(n) (((n) & 0x3) << 6)
+#define AS3722_LDO3_MODE_PMOS AS3722_LDO3_MODE_VAL(0)
+#define AS3722_LDO3_MODE_PMOS_TRACKING AS3722_LDO3_MODE_VAL(1)
+#define AS3722_LDO3_MODE_NMOS AS3722_LDO3_MODE_VAL(2)
+#define AS3722_LDO3_MODE_SWITCH AS3722_LDO3_MODE_VAL(3)
+
+#define AS3722_SD_VSEL_MASK 0x7F
+#define AS3722_SD0_VSEL_MIN 0x01
+#define AS3722_SD0_VSEL_MAX 0x5A
+#define AS3722_SD2_VSEL_MIN 0x01
+#define AS3722_SD2_VSEL_MAX 0x7F
+
+#define AS3722_SDn_CTRL(n) BIT(n)
+
+#define AS3722_SD0_MODE_FAST BIT(4)
+#define AS3722_SD1_MODE_FAST BIT(4)
+#define AS3722_SD2_MODE_FAST BIT(2)
+#define AS3722_SD3_MODE_FAST BIT(6)
+#define AS3722_SD4_MODE_FAST BIT(2)
+#define AS3722_SD5_MODE_FAST BIT(2)
+#define AS3722_SD6_MODE_FAST BIT(4)
+
+#define AS3722_POWER_OFF BIT(1)
+
+#define AS3722_INTERRUPT_MASK1_LID BIT(0)
+#define AS3722_INTERRUPT_MASK1_ACOK BIT(1)
+#define AS3722_INTERRUPT_MASK1_ENABLE1 BIT(2)
+#define AS3722_INTERRUPT_MASK1_OCURR_ALARM_SD0 BIT(3)
+#define AS3722_INTERRUPT_MASK1_ONKEY_LONG BIT(4)
+#define AS3722_INTERRUPT_MASK1_ONKEY BIT(5)
+#define AS3722_INTERRUPT_MASK1_OVTMP BIT(6)
+#define AS3722_INTERRUPT_MASK1_LOWBAT BIT(7)
+
+#define AS3722_INTERRUPT_MASK2_SD0_LV BIT(0)
+#define AS3722_INTERRUPT_MASK2_SD1_LV BIT(1)
+#define AS3722_INTERRUPT_MASK2_SD2345_LV BIT(2)
+#define AS3722_INTERRUPT_MASK2_PWM1_OV_PROT BIT(3)
+#define AS3722_INTERRUPT_MASK2_PWM2_OV_PROT BIT(4)
+#define AS3722_INTERRUPT_MASK2_ENABLE2 BIT(5)
+#define AS3722_INTERRUPT_MASK2_SD6_LV BIT(6)
+#define AS3722_INTERRUPT_MASK2_RTC_REP BIT(7)
+
+#define AS3722_INTERRUPT_MASK3_RTC_ALARM BIT(0)
+#define AS3722_INTERRUPT_MASK3_GPIO1 BIT(1)
+#define AS3722_INTERRUPT_MASK3_GPIO2 BIT(2)
+#define AS3722_INTERRUPT_MASK3_GPIO3 BIT(3)
+#define AS3722_INTERRUPT_MASK3_GPIO4 BIT(4)
+#define AS3722_INTERRUPT_MASK3_GPIO5 BIT(5)
+#define AS3722_INTERRUPT_MASK3_WATCHDOG BIT(6)
+#define AS3722_INTERRUPT_MASK3_ENABLE3 BIT(7)
+
+#define AS3722_INTERRUPT_MASK4_TEMP_SD0_SHUTDOWN BIT(0)
+#define AS3722_INTERRUPT_MASK4_TEMP_SD1_SHUTDOWN BIT(1)
+#define AS3722_INTERRUPT_MASK4_TEMP_SD6_SHUTDOWN BIT(2)
+#define AS3722_INTERRUPT_MASK4_TEMP_SD0_ALARM BIT(3)
+#define AS3722_INTERRUPT_MASK4_TEMP_SD1_ALARM BIT(4)
+#define AS3722_INTERRUPT_MASK4_TEMP_SD6_ALARM BIT(5)
+#define AS3722_INTERRUPT_MASK4_OCCUR_ALARM_SD6 BIT(6)
+#define AS3722_INTERRUPT_MASK4_ADC BIT(7)
+
+#define AS3722_ADC1_INTERVAL_TIME BIT(0)
+#define AS3722_ADC1_INT_MODE_ON BIT(1)
+#define AS3722_ADC_BUF_ON BIT(2)
+#define AS3722_ADC1_LOW_VOLTAGE_RANGE BIT(5)
+#define AS3722_ADC1_INTEVAL_SCAN BIT(6)
+#define AS3722_ADC1_INT_MASK BIT(7)
+
+#define AS3722_ADC_MSB_VAL_MASK 0x7F
+#define AS3722_ADC_LSB_VAL_MASK 0x07
+
+#define AS3722_ADC0_CONV_START BIT(7)
+#define AS3722_ADC0_CONV_NOTREADY BIT(7)
+#define AS3722_ADC0_SOURCE_SELECT_MASK 0x1F
+
+#define AS3722_ADC1_CONV_START BIT(7)
+#define AS3722_ADC1_CONV_NOTREADY BIT(7)
+#define AS3722_ADC1_SOURCE_SELECT_MASK 0x1F
+
+/* GPIO modes */
+#define AS3722_GPIO_MODE_MASK 0x07
+#define AS3722_GPIO_MODE_INPUT 0x00
+#define AS3722_GPIO_MODE_OUTPUT_VDDH 0x01
+#define AS3722_GPIO_MODE_IO_OPEN_DRAIN 0x02
+#define AS3722_GPIO_MODE_ADC_IN 0x03
+#define AS3722_GPIO_MODE_INPUT_PULL_UP 0x04
+#define AS3722_GPIO_MODE_INPUT_PULL_DOWN 0x05
+#define AS3722_GPIO_MODE_IO_OPEN_DRAIN_PULL_UP 0x06
+#define AS3722_GPIO_MODE_OUTPUT_VDDL 0x07
+#define AS3722_GPIO_MODE_VAL(n) ((n) & AS3722_GPIO_MODE_MASK)
+
+#define AS3722_GPIO_INV BIT(7)
+#define AS3722_GPIO_IOSF_MASK 0x78
+#define AS3722_GPIO_IOSF_VAL(n) (((n) & 0xF) << 3)
+#define AS3722_GPIO_IOSF_NORMAL AS3722_GPIO_IOSF_VAL(0)
+#define AS3722_GPIO_IOSF_INTERRUPT_OUT AS3722_GPIO_IOSF_VAL(1)
+#define AS3722_GPIO_IOSF_VSUP_LOW_OUT AS3722_GPIO_IOSF_VAL(2)
+#define AS3722_GPIO_IOSF_GPIO_INTERRUPT_IN AS3722_GPIO_IOSF_VAL(3)
+#define AS3722_GPIO_IOSF_ISINK_PWM_IN AS3722_GPIO_IOSF_VAL(4)
+#define AS3722_GPIO_IOSF_VOLTAGE_STBY AS3722_GPIO_IOSF_VAL(5)
+#define AS3722_GPIO_IOSF_PWR_GOOD_OUT AS3722_GPIO_IOSF_VAL(7)
+#define AS3722_GPIO_IOSF_Q32K_OUT AS3722_GPIO_IOSF_VAL(8)
+#define AS3722_GPIO_IOSF_WATCHDOG_IN AS3722_GPIO_IOSF_VAL(9)
+#define AS3722_GPIO_IOSF_SOFT_RESET_IN AS3722_GPIO_IOSF_VAL(11)
+#define AS3722_GPIO_IOSF_PWM_OUT AS3722_GPIO_IOSF_VAL(12)
+#define AS3722_GPIO_IOSF_VSUP_LOW_DEB_OUT AS3722_GPIO_IOSF_VAL(13)
+#define AS3722_GPIO_IOSF_SD6_LOW_VOLT_LOW AS3722_GPIO_IOSF_VAL(14)
+
+#define AS3722_GPIOn_SIGNAL(n) BIT(n)
+#define AS3722_GPIOn_CONTROL_REG(n) (AS3722_GPIO0_CONTROL_REG + n)
+#define AS3722_I2C_PULL_UP BIT(4)
+#define AS3722_INT_PULL_UP BIT(5)
+
+#define AS3722_RTC_REP_WAKEUP_EN BIT(0)
+#define AS3722_RTC_ALARM_WAKEUP_EN BIT(1)
+#define AS3722_RTC_ON BIT(2)
+#define AS3722_RTC_IRQMODE BIT(3)
+#define AS3722_RTC_CLK32K_OUT_EN BIT(5)
+
+#define AS3722_WATCHDOG_TIMER_MAX 0x7F
+#define AS3722_WATCHDOG_ON BIT(0)
+#define AS3722_WATCHDOG_SW_SIG BIT(0)
+
+#define AS3722_EXT_CONTROL_ENABLE1 0x1
+#define AS3722_EXT_CONTROL_ENABLE2 0x2
+#define AS3722_EXT_CONTROL_ENABLE3 0x3
+
+/* Interrupt IDs */
+enum as3722_irq {
+ AS3722_IRQ_LID,
+ AS3722_IRQ_ACOK,
+ AS3722_IRQ_ENABLE1,
+ AS3722_IRQ_OCCUR_ALARM_SD0,
+ AS3722_IRQ_ONKEY_LONG_PRESS,
+ AS3722_IRQ_ONKEY,
+ AS3722_IRQ_OVTMP,
+ AS3722_IRQ_LOWBAT,
+ AS3722_IRQ_SD0_LV,
+ AS3722_IRQ_SD1_LV,
+ AS3722_IRQ_SD2_LV,
+ AS3722_IRQ_PWM1_OV_PROT,
+ AS3722_IRQ_PWM2_OV_PROT,
+ AS3722_IRQ_ENABLE2,
+ AS3722_IRQ_SD6_LV,
+ AS3722_IRQ_RTC_REP,
+ AS3722_IRQ_RTC_ALARM,
+ AS3722_IRQ_GPIO1,
+ AS3722_IRQ_GPIO2,
+ AS3722_IRQ_GPIO3,
+ AS3722_IRQ_GPIO4,
+ AS3722_IRQ_GPIO5,
+ AS3722_IRQ_WATCHDOG,
+ AS3722_IRQ_ENABLE3,
+ AS3722_IRQ_TEMP_SD0_SHUTDOWN,
+ AS3722_IRQ_TEMP_SD1_SHUTDOWN,
+ AS3722_IRQ_TEMP_SD2_SHUTDOWN,
+ AS3722_IRQ_TEMP_SD0_ALARM,
+ AS3722_IRQ_TEMP_SD1_ALARM,
+ AS3722_IRQ_TEMP_SD6_ALARM,
+ AS3722_IRQ_OCCUR_ALARM_SD6,
+ AS3722_IRQ_ADC,
+ AS3722_IRQ_MAX,
+};
+
+struct as3722 {
+ struct device *dev;
+ struct regmap *regmap;
+ int chip_irq;
+ unsigned long irq_flags;
+ bool en_intern_int_pullup;
+ bool en_intern_i2c_pullup;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+static inline int as3722_read(struct as3722 *as3722, u32 reg, u32 *dest)
+{
+ return regmap_read(as3722->regmap, reg, dest);
+}
+
+static inline int as3722_write(struct as3722 *as3722, u32 reg, u32 value)
+{
+ return regmap_write(as3722->regmap, reg, value);
+}
+
+static inline int as3722_block_read(struct as3722 *as3722, u32 reg,
+ int count, u8 *buf)
+{
+ return regmap_bulk_read(as3722->regmap, reg, buf, count);
+}
+
+static inline int as3722_block_write(struct as3722 *as3722, u32 reg,
+ int count, u8 *data)
+{
+ return regmap_bulk_write(as3722->regmap, reg, data, count);
+}
+
+static inline int as3722_update_bits(struct as3722 *as3722, u32 reg,
+ u32 mask, u8 val)
+{
+ return regmap_update_bits(as3722->regmap, reg, mask, val);
+}
+
+static inline int as3722_irq_get_virq(struct as3722 *as3722, int irq)
+{
+ return regmap_irq_get_virq(as3722->irq_data, irq);
+}
+#endif /* __LINUX_MFD_AS3722_H__ */
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index cebe97ee98b8..7314fc4e6d25 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -59,6 +59,12 @@ struct mfd_cell {
* pm_runtime_no_callbacks().
*/
bool pm_runtime_no_callbacks;
+
+ /* A list of regulator supplies that should be mapped to the MFD
+ * device rather than the child device when requested
+ */
+ const char **parent_supplies;
+ int num_parent_supplies;
};
/*
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index 786d02eb79d2..21e21b81cc75 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -148,10 +148,15 @@ static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg,
unsigned reg_cnt, unsigned char *val)
{
int ret;
+ unsigned int tmp;
+ int i;
- ret = regmap_bulk_read(da9052->regmap, reg, val, reg_cnt);
- if (ret < 0)
- return ret;
+ for (i = 0; i < reg_cnt; i++) {
+ ret = regmap_read(da9052->regmap, reg + i, &tmp);
+ val[i] = (unsigned char)tmp;
+ if (ret < 0)
+ return ret;
+ }
if (da9052->fix_io) {
ret = da9052->fix_io(da9052, reg);
@@ -166,10 +171,13 @@ static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg,
unsigned reg_cnt, unsigned char *val)
{
int ret;
+ int i;
- ret = regmap_raw_write(da9052->regmap, reg, val, reg_cnt);
- if (ret < 0)
- return ret;
+ for (i = 0; i < reg_cnt; i++) {
+ ret = regmap_write(da9052->regmap, reg + i, val[i]);
+ if (ret < 0)
+ return ret;
+ }
if (da9052->fix_io) {
ret = da9052->fix_io(da9052, reg);
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index ca0790fba2f5..060e11256fbc 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -12,6 +12,8 @@
#include <linux/notifier.h>
#include <linux/err.h>
+#include <dt-bindings/mfd/dbx500-prcmu.h> /* For clock identifiers */
+
/* Offset for the firmware version within the TCPM */
#define DB8500_PRCMU_FW_VERSION_OFFSET 0xA4
#define DBX540_PRCMU_FW_VERSION_OFFSET 0xA8
@@ -94,74 +96,6 @@ enum prcmu_wakeup_index {
#define PRCMU_CLKSRC_ARMCLKFIX 0x46
#define PRCMU_CLKSRC_HDMICLK 0x47
-/*
- * Clock identifiers.
- */
-enum prcmu_clock {
- PRCMU_SGACLK,
- PRCMU_UARTCLK,
- PRCMU_MSP02CLK,
- PRCMU_MSP1CLK,
- PRCMU_I2CCLK,
- PRCMU_SDMMCCLK,
- PRCMU_SPARE1CLK,
- PRCMU_SLIMCLK,
- PRCMU_PER1CLK,
- PRCMU_PER2CLK,
- PRCMU_PER3CLK,
- PRCMU_PER5CLK,
- PRCMU_PER6CLK,
- PRCMU_PER7CLK,
- PRCMU_LCDCLK,
- PRCMU_BMLCLK,
- PRCMU_HSITXCLK,
- PRCMU_HSIRXCLK,
- PRCMU_HDMICLK,
- PRCMU_APEATCLK,
- PRCMU_APETRACECLK,
- PRCMU_MCDECLK,
- PRCMU_IPI2CCLK,
- PRCMU_DSIALTCLK,
- PRCMU_DMACLK,
- PRCMU_B2R2CLK,
- PRCMU_TVCLK,
- PRCMU_SSPCLK,
- PRCMU_RNGCLK,
- PRCMU_UICCCLK,
- PRCMU_PWMCLK,
- PRCMU_IRDACLK,
- PRCMU_IRRCCLK,
- PRCMU_SIACLK,
- PRCMU_SVACLK,
- PRCMU_ACLK,
- PRCMU_HVACLK, /* Ux540 only */
- PRCMU_G1CLK, /* Ux540 only */
- PRCMU_SDMMCHCLK,
- PRCMU_CAMCLK,
- PRCMU_BML8580CLK,
- PRCMU_NUM_REG_CLOCKS,
- PRCMU_SYSCLK = PRCMU_NUM_REG_CLOCKS,
- PRCMU_CDCLK,
- PRCMU_TIMCLK,
- PRCMU_PLLSOC0,
- PRCMU_PLLSOC1,
- PRCMU_ARMSS,
- PRCMU_PLLDDR,
- PRCMU_PLLDSI,
- PRCMU_DSI0CLK,
- PRCMU_DSI1CLK,
- PRCMU_DSI0ESCCLK,
- PRCMU_DSI1ESCCLK,
- PRCMU_DSI2ESCCLK,
- /* LCD DSI PLL - Ux540 only */
- PRCMU_PLLDSI_LCD,
- PRCMU_DSI0CLK_LCD,
- PRCMU_DSI1CLK_LCD,
- PRCMU_DSI0ESCCLK_LCD,
- PRCMU_DSI1ESCCLK_LCD,
- PRCMU_DSI2ESCCLK_LCD,
-};
-
/**
* enum prcmu_wdog_id - PRCMU watchdog IDs
* @PRCMU_WDOG_ALL: use all timers
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 244fb0d51589..3e050b933dd0 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -323,7 +323,6 @@ struct max77693_dev {
int irq;
int irq_gpio;
- bool wakeup;
struct mutex irqlock;
int irq_masks_cur[MAX77693_IRQ_GROUP_NR];
int irq_masks_cache[MAX77693_IRQ_GROUP_NR];
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
index 676f0f388992..3f3dc45f93ee 100644
--- a/include/linux/mfd/max77693.h
+++ b/include/linux/mfd/max77693.h
@@ -64,8 +64,6 @@ struct max77693_muic_platform_data {
};
struct max77693_platform_data {
- int wakeup;
-
/* regulator data */
struct max77693_regulator_data *regulators;
int num_regulators;
diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h
index 41ed59276c00..67c17b5a6f44 100644
--- a/include/linux/mfd/mc13xxx.h
+++ b/include/linux/mfd/mc13xxx.h
@@ -41,6 +41,13 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx,
unsigned int mode, unsigned int channel,
u8 ato, bool atox, unsigned int *sample);
+#define MC13783_AUDIO_RX0 36
+#define MC13783_AUDIO_RX1 37
+#define MC13783_AUDIO_TX 38
+#define MC13783_SSI_NETWORK 39
+#define MC13783_AUDIO_CODEC 40
+#define MC13783_AUDIO_DAC 41
+
#define MC13XXX_IRQ_ADCDONE 0
#define MC13XXX_IRQ_ADCBISDONE 1
#define MC13XXX_IRQ_TS 2
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
index d1382dfbeff0..0ce772105508 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/mfd/rtsx_pci.h
@@ -756,6 +756,59 @@
#define PCR_SETTING_REG2 0x814
#define PCR_SETTING_REG3 0x747
+/* Phy bits */
+#define PHY_PCR_FORCE_CODE 0xB000
+#define PHY_PCR_OOBS_CALI_50 0x0800
+#define PHY_PCR_OOBS_VCM_08 0x0200
+#define PHY_PCR_OOBS_SEN_90 0x0040
+#define PHY_PCR_RSSI_EN 0x0002
+
+#define PHY_RCR1_ADP_TIME 0x0100
+#define PHY_RCR1_VCO_COARSE 0x001F
+
+#define PHY_RCR2_EMPHASE_EN 0x8000
+#define PHY_RCR2_NADJR 0x4000
+#define PHY_RCR2_CDR_CP_10 0x0400
+#define PHY_RCR2_CDR_SR_2 0x0100
+#define PHY_RCR2_FREQSEL_12 0x0040
+#define PHY_RCR2_CPADJEN 0x0020
+#define PHY_RCR2_CDR_SC_8 0x0008
+#define PHY_RCR2_CALIB_LATE 0x0002
+
+#define PHY_RDR_RXDSEL_1_9 0x4000
+
+#define PHY_TUNE_TUNEREF_1_0 0x4000
+#define PHY_TUNE_VBGSEL_1252 0x0C00
+#define PHY_TUNE_SDBUS_33 0x0200
+#define PHY_TUNE_TUNED18 0x01C0
+#define PHY_TUNE_TUNED12 0X0020
+
+#define PHY_BPCR_IBRXSEL 0x0400
+#define PHY_BPCR_IBTXSEL 0x0100
+#define PHY_BPCR_IB_FILTER 0x0080
+#define PHY_BPCR_CMIRROR_EN 0x0040
+
+#define PHY_REG_REV_RESV 0xE000
+#define PHY_REG_REV_RXIDLE_LATCHED 0x1000
+#define PHY_REG_REV_P1_EN 0x0800
+#define PHY_REG_REV_RXIDLE_EN 0x0400
+#define PHY_REG_REV_CLKREQ_DLY_TIMER_1_0 0x0040
+#define PHY_REG_REV_STOP_CLKRD 0x0020
+#define PHY_REG_REV_RX_PWST 0x0008
+#define PHY_REG_REV_STOP_CLKWR 0x0004
+
+#define PHY_FLD3_TIMER_4 0x7800
+#define PHY_FLD3_TIMER_6 0x00E0
+#define PHY_FLD3_RXDELINK 0x0004
+
+#define PHY_FLD4_FLDEN_SEL 0x4000
+#define PHY_FLD4_REQ_REF 0x2000
+#define PHY_FLD4_RXAMP_OFF 0x1000
+#define PHY_FLD4_REQ_ADDA 0x0800
+#define PHY_FLD4_BER_COUNT 0x00E0
+#define PHY_FLD4_BER_TIMER 0x000A
+#define PHY_FLD4_BER_CHK_EN 0x0001
+
#define rtsx_pci_init_cmd(pcr) ((pcr)->ci = 0)
struct rtsx_pcr;
diff --git a/include/linux/mfd/si476x-core.h b/include/linux/mfd/si476x-core.h
index ba89b94e4a56..674b45d5a757 100644
--- a/include/linux/mfd/si476x-core.h
+++ b/include/linux/mfd/si476x-core.h
@@ -316,7 +316,7 @@ enum si476x_smoothmetrics {
* response to 'FM_RD_STATUS' command
* @rdstpptyint: Traffic program flag(TP) and/or program type(PTY)
* code has changed.
- * @rdspiint: Program indentifiaction(PI) code has changed.
+ * @rdspiint: Program identification(PI) code has changed.
* @rdssyncint: RDS synchronization has changed.
* @rdsfifoint: RDS was received and the RDS FIFO has at least
* 'FM_RDS_INTERRUPT_FIFO_COUNT' elements in it.
diff --git a/include/linux/mfd/stw481x.h b/include/linux/mfd/stw481x.h
new file mode 100644
index 000000000000..eda121556e5d
--- /dev/null
+++ b/include/linux/mfd/stw481x.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef MFD_STW481X_H
+#define MFD_STW481X_H
+
+#include <linux/i2c.h>
+#include <linux/regulator/machine.h>
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+
+/* These registers are accessed from more than one driver */
+#define STW_CONF1 0x11U
+#define STW_CONF1_PDN_VMMC 0x01U
+#define STW_CONF1_VMMC_MASK 0x0eU
+#define STW_CONF1_VMMC_1_8V 0x02U
+#define STW_CONF1_VMMC_2_85V 0x04U
+#define STW_CONF1_VMMC_3V 0x06U
+#define STW_CONF1_VMMC_1_85V 0x08U
+#define STW_CONF1_VMMC_2_6V 0x0aU
+#define STW_CONF1_VMMC_2_7V 0x0cU
+#define STW_CONF1_VMMC_3_3V 0x0eU
+#define STW_CONF1_MMC_LS_STATUS 0x10U
+#define STW_PCTL_REG_LO 0x1eU
+#define STW_PCTL_REG_HI 0x1fU
+#define STW_CONF1_V_MONITORING 0x20U
+#define STW_CONF1_IT_WARN 0x40U
+#define STW_CONF1_PDN_VAUX 0x80U
+#define STW_CONF2 0x20U
+#define STW_CONF2_MASK_TWARN 0x01U
+#define STW_CONF2_VMMC_EXT 0x02U
+#define STW_CONF2_MASK_IT_WAKE_UP 0x04U
+#define STW_CONF2_GPO1 0x08U
+#define STW_CONF2_GPO2 0x10U
+#define STW_VCORE_SLEEP 0x21U
+
+/**
+ * struct stw481x - state holder for the Stw481x drivers
+ * @mutex: mutex to serialize I2C accesses
+ * @i2c_client: corresponding I2C client
+ * @regulator: regulator device for regulator children
+ * @map: regmap handle to access device registers
+ */
+struct stw481x {
+ struct mutex lock;
+ struct i2c_client *client;
+ struct regulator_dev *vmmc_regulator;
+ struct regmap *map;
+};
+
+#endif
diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h
index b473577f36db..8789fa3c7fd9 100644
--- a/include/linux/mfd/syscon.h
+++ b/include/linux/mfd/syscon.h
@@ -17,10 +17,35 @@
struct device_node;
+#ifdef CONFIG_MFD_SYSCON
extern struct regmap *syscon_node_to_regmap(struct device_node *np);
extern struct regmap *syscon_regmap_lookup_by_compatible(const char *s);
extern struct regmap *syscon_regmap_lookup_by_pdevname(const char *s);
extern struct regmap *syscon_regmap_lookup_by_phandle(
struct device_node *np,
const char *property);
+#else
+static inline struct regmap *syscon_node_to_regmap(struct device_node *np)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct regmap *syscon_regmap_lookup_by_compatible(const char *s)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct regmap *syscon_regmap_lookup_by_pdevname(const char *s)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct regmap *syscon_regmap_lookup_by_phandle(
+ struct device_node *np,
+ const char *property)
+{
+ return ERR_PTR(-ENOSYS);
+}
+#endif
+
#endif /* __LINUX_MFD_SYSCON_H__ */
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index b6bdcd66c07d..b6d36b38b99c 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -241,6 +241,12 @@
#define IMX6Q_GPR5_L2_CLK_STOP BIT(8)
+#define IMX6Q_GPR8_TX_SWING_LOW (0x7f << 25)
+#define IMX6Q_GPR8_TX_SWING_FULL (0x7f << 18)
+#define IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB (0x3f << 12)
+#define IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB (0x3f << 6)
+#define IMX6Q_GPR8_TX_DEEMPH_GEN1 (0x3f << 0)
+
#define IMX6Q_GPR9_TZASC2_BYP BIT(1)
#define IMX6Q_GPR9_TZASC1_BYP BIT(0)
@@ -273,7 +279,9 @@
#define IMX6Q_GPR12_ARMP_AHB_CLK_EN BIT(26)
#define IMX6Q_GPR12_ARMP_ATB_CLK_EN BIT(25)
#define IMX6Q_GPR12_ARMP_APB_CLK_EN BIT(24)
+#define IMX6Q_GPR12_DEVICE_TYPE (0xf << 12)
#define IMX6Q_GPR12_PCIE_CTL_2 BIT(10)
+#define IMX6Q_GPR12_LOS_LEVEL (0x1f << 4)
#define IMX6Q_GPR13_SDMA_STOP_REQ BIT(30)
#define IMX6Q_GPR13_CAN2_STOP_REQ BIT(29)
@@ -363,4 +371,9 @@
#define IMX6Q_GPR13_SATA_TX_LVL_1_240_V (0x1f << 2)
#define IMX6Q_GPR13_SATA_MPLL_CLK_EN BIT(1)
#define IMX6Q_GPR13_SATA_TX_EDGE_RATE BIT(0)
+
+/* For imx6sl iomux gpr register field define */
+#define IMX6SL_GPR1_FEC_CLOCK_MUX1_SEL_MASK (0x3 << 17)
+#define IMX6SL_GPR1_FEC_CLOCK_MUX2_SEL_MASK (0x1 << 14)
+
#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index 25f2c611ab01..7b68a061cd60 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -125,13 +125,18 @@
#define TOTAL_CHANNELS 8
/*
-* ADC runs at 3MHz, and it takes
-* 15 cycles to latch one data output.
-* Hence the idle time for ADC to
-* process one sample data would be
-* around 5 micro seconds.
-*/
-#define IDLE_TIMEOUT 5 /* microsec */
+ * time in us for processing a single channel, calculated as follows:
+ *
+ * num cycles = open delay + (sample delay + conv time) * averaging
+ *
+ * num cycles: 152 + (1 + 13) * 16 = 376
+ *
+ * clock frequency: 26MHz / 8 = 3.25MHz
+ * clock period: 1 / 3.25MHz = 308ns
+ *
+ * processing time: 376 * 308ns = 116us
+ */
+#define IDLE_TIMEOUT 116 /* microsec */
#define TSCADC_CELLS 2
@@ -146,6 +151,7 @@ struct ti_tscadc_dev {
struct mfd_cell cells[TSCADC_CELLS];
u32 reg_se_cache;
spinlock_t reg_lock;
+ unsigned int clk_div;
/* tsc device */
struct titsc *tsc;
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index 40854ac0ba3d..eefafa62d304 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -56,8 +56,6 @@ struct irq_domain;
#define WM8994_IRQ_GPIO(x) (x + WM8994_IRQ_TEMP_WARN)
struct wm8994 {
- struct mutex irq_lock;
-
struct wm8994_pdata pdata;
enum wm8994_type type;
@@ -85,16 +83,43 @@ struct wm8994 {
};
/* Device I/O API */
-int wm8994_reg_read(struct wm8994 *wm8994, unsigned short reg);
-int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg,
- unsigned short val);
-int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg,
- unsigned short mask, unsigned short val);
-int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg,
- int count, u16 *buf);
-int wm8994_bulk_write(struct wm8994 *wm8994, unsigned short reg,
- int count, const u16 *buf);
+static inline int wm8994_reg_read(struct wm8994 *wm8994, unsigned short reg)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(wm8994->regmap, reg, &val);
+
+ if (ret < 0)
+ return ret;
+ else
+ return val;
+}
+
+static inline int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg,
+ unsigned short val)
+{
+ return regmap_write(wm8994->regmap, reg, val);
+}
+
+static inline int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg,
+ int count, u16 *buf)
+{
+ return regmap_bulk_read(wm8994->regmap, reg, buf, count);
+}
+
+static inline int wm8994_bulk_write(struct wm8994 *wm8994, unsigned short reg,
+ int count, const u16 *buf)
+{
+ return regmap_raw_write(wm8994->regmap, reg, buf, count * sizeof(u16));
+}
+
+static inline int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg,
+ unsigned short mask, unsigned short val)
+{
+ return regmap_update_bits(wm8994->regmap, reg, mask, val);
+}
/* Helper to save on boilerplate */
static inline int wm8994_request_irq(struct wm8994 *wm8994, int irq,
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 8d3c57fdf221..f5096b58b20d 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -90,11 +90,12 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_NUMA_BALANCING
-extern int migrate_misplaced_page(struct page *page, int node);
-extern int migrate_misplaced_page(struct page *page, int node);
+extern int migrate_misplaced_page(struct page *page,
+ struct vm_area_struct *vma, int node);
extern bool migrate_ratelimited(int node);
#else
-static inline int migrate_misplaced_page(struct page *page, int node)
+static inline int migrate_misplaced_page(struct page *page,
+ struct vm_area_struct *vma, int node)
{
return -EAGAIN; /* can't migrate now */
}
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index cb358355ef43..f7eaf2d60083 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -31,6 +31,7 @@
#define I2O_MINOR 166
#define MICROCODE_MINOR 184
#define TUN_MINOR 200
+#define CUSE_MINOR 203
#define MWAVE_MINOR 219 /* ACP/Mwave Modem */
#define MPT_MINOR 220
#define MPT2SAS_MINOR 221
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index cd1fdf75103b..8df61bc5da00 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -154,10 +154,6 @@ enum {
MLX4_CMD_QUERY_IF_STAT = 0X54,
MLX4_CMD_SET_IF_STAT = 0X55,
- /* set port opcode modifiers */
- MLX4_SET_PORT_PRIO2TC = 0x8,
- MLX4_SET_PORT_SCHEDULER = 0x9,
-
/* register/delete flow steering network rules */
MLX4_QP_FLOW_STEERING_ATTACH = 0x65,
MLX4_QP_FLOW_STEERING_DETACH = 0x66,
@@ -182,6 +178,8 @@ enum {
MLX4_SET_PORT_VLAN_TABLE = 0x3,
MLX4_SET_PORT_PRIO_MAP = 0x4,
MLX4_SET_PORT_GID_TABLE = 0x5,
+ MLX4_SET_PORT_PRIO2TC = 0x8,
+ MLX4_SET_PORT_SCHEDULER = 0x9,
};
enum {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 24ce6bdd540e..f6f59271f857 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -54,6 +54,7 @@ enum {
MLX4_FLAG_MASTER = 1 << 2,
MLX4_FLAG_SLAVE = 1 << 3,
MLX4_FLAG_SRIOV = 1 << 4,
+ MLX4_FLAG_OLD_REG_MAC = 1 << 6,
};
enum {
@@ -155,7 +156,7 @@ enum {
MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1,
MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2,
MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3,
- MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN = 1LL << 4,
+ MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN = 1LL << 4,
MLX4_DEV_CAP_FLAG2_TS = 1LL << 5,
MLX4_DEV_CAP_FLAG2_VLAN_CONTROL = 1LL << 6,
MLX4_DEV_CAP_FLAG2_FSM = 1LL << 7,
@@ -640,12 +641,23 @@ struct mlx4_counter {
__be64 tx_bytes;
};
+struct mlx4_quotas {
+ int qp;
+ int cq;
+ int srq;
+ int mpt;
+ int mtt;
+ int counter;
+ int xrcd;
+};
+
struct mlx4_dev {
struct pci_dev *pdev;
unsigned long flags;
unsigned long num_slaves;
struct mlx4_caps caps;
struct mlx4_phys_caps phys_caps;
+ struct mlx4_quotas quotas;
struct radix_tree_root qp_table_tree;
u8 rev_id;
char board_id[MLX4_BOARD_ID_LEN];
@@ -771,6 +783,12 @@ static inline int mlx4_is_master(struct mlx4_dev *dev)
return dev->flags & MLX4_FLAG_MASTER;
}
+static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev)
+{
+ return dev->phys_caps.base_sqpn + 8 +
+ 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev);
+}
+
static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
{
return (qpn < dev->phys_caps.base_sqpn + 8 +
@@ -1078,7 +1096,7 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
u8 *pg, u16 *ratelimit);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
-void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
int npages, u64 iova, u32 *lkey, u32 *rkey);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 5eb4e31af22b..da78875807fc 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -230,6 +230,15 @@ enum {
MLX5_MAX_PAGE_SHIFT = 31
};
+enum {
+ MLX5_ADAPTER_PAGE_SHIFT = 12
+};
+
+enum {
+ MLX5_CAP_OFF_DCT = 41,
+ MLX5_CAP_OFF_CMDIF_CSUM = 46,
+};
+
struct mlx5_inbox_hdr {
__be16 opcode;
u8 rsvd[4];
@@ -319,9 +328,9 @@ struct mlx5_hca_cap {
u8 rsvd25[42];
__be16 log_uar_page_sz;
u8 rsvd26[28];
- u8 log_msx_atomic_size_qp;
+ u8 log_max_atomic_size_qp;
u8 rsvd27[2];
- u8 log_msx_atomic_size_dc;
+ u8 log_max_atomic_size_dc;
u8 rsvd28[76];
};
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 6b8c496572c8..554548cd3dd4 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -483,6 +483,7 @@ struct mlx5_priv {
struct rb_root page_root;
int fw_pages;
int reg_pages;
+ struct list_head free_list;
struct mlx5_core_health health;
@@ -557,9 +558,11 @@ typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
struct mlx5_cmd_work_ent {
struct mlx5_cmd_msg *in;
struct mlx5_cmd_msg *out;
+ void *uout;
+ int uout_size;
mlx5_cmd_cbk_t callback;
void *context;
- int idx;
+ int idx;
struct completion done;
struct mlx5_cmd *cmd;
struct work_struct work;
@@ -570,6 +573,7 @@ struct mlx5_cmd_work_ent {
u8 token;
struct timespec ts1;
struct timespec ts2;
+ u16 op;
};
struct mlx5_pas {
@@ -653,6 +657,9 @@ void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
+int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
+ void *out, int out_size, mlx5_cmd_cbk_t callback,
+ void *context);
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
@@ -676,7 +683,9 @@ int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq);
int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
- struct mlx5_create_mkey_mbox_in *in, int inlen);
+ struct mlx5_create_mkey_mbox_in *in, int inlen,
+ mlx5_cmd_cbk_t callback, void *context,
+ struct mlx5_create_mkey_mbox_out *out);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
struct mlx5_query_mkey_mbox_out *out, int outlen);
@@ -745,6 +754,11 @@ static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
return mkey_idx << 8;
}
+static inline u8 mlx5_mkey_variant(u32 mkey)
+{
+ return mkey & 0xff;
+}
+
enum {
MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8b6e55ee8855..8aa4006b9636 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -297,12 +297,26 @@ static inline int put_page_testzero(struct page *page)
/*
* Try to grab a ref unless the page has a refcount of zero, return false if
* that is the case.
+ * This can be called when MMU is off so it must not access
+ * any of the virtual mappings.
*/
static inline int get_page_unless_zero(struct page *page)
{
return atomic_inc_not_zero(&page->_count);
}
+/*
+ * Try to drop a ref unless the page has a refcount of one, return false if
+ * that is the case.
+ * This is to make sure that the refcount won't become zero after this drop.
+ * This can be called when MMU is off so it must not access
+ * any of the virtual mappings.
+ */
+static inline int put_page_unless_one(struct page *page)
+{
+ return atomic_add_unless(&page->_count, -1, 1);
+}
+
extern int page_is_ram(unsigned long pfn);
/* Support for virtually mapped pages */
@@ -581,11 +595,11 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
* sets it, so none of the operations on it need to be atomic.
*/
-/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_NID] | ... | FLAGS | */
+/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
-#define LAST_NID_PGOFF (ZONES_PGOFF - LAST_NID_WIDTH)
+#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
/*
* Define the bit shifts to access each section. For non-existent
@@ -595,7 +609,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
-#define LAST_NID_PGSHIFT (LAST_NID_PGOFF * (LAST_NID_WIDTH != 0))
+#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
#ifdef NODE_NOT_IN_PAGE_FLAGS
@@ -617,7 +631,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
-#define LAST_NID_MASK ((1UL << LAST_NID_WIDTH) - 1)
+#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_WIDTH) - 1)
#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
static inline enum zone_type page_zonenum(const struct page *page)
@@ -661,51 +675,117 @@ static inline int page_to_nid(const struct page *page)
#endif
#ifdef CONFIG_NUMA_BALANCING
-#ifdef LAST_NID_NOT_IN_PAGE_FLAGS
-static inline int page_nid_xchg_last(struct page *page, int nid)
+static inline int cpu_pid_to_cpupid(int cpu, int pid)
{
- return xchg(&page->_last_nid, nid);
+ return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
}
-static inline int page_nid_last(struct page *page)
+static inline int cpupid_to_pid(int cpupid)
{
- return page->_last_nid;
+ return cpupid & LAST__PID_MASK;
}
-static inline void page_nid_reset_last(struct page *page)
+
+static inline int cpupid_to_cpu(int cpupid)
{
- page->_last_nid = -1;
+ return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
}
-#else
-static inline int page_nid_last(struct page *page)
+
+static inline int cpupid_to_nid(int cpupid)
+{
+ return cpu_to_node(cpupid_to_cpu(cpupid));
+}
+
+static inline bool cpupid_pid_unset(int cpupid)
{
- return (page->flags >> LAST_NID_PGSHIFT) & LAST_NID_MASK;
+ return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
}
-extern int page_nid_xchg_last(struct page *page, int nid);
+static inline bool cpupid_cpu_unset(int cpupid)
+{
+ return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
+}
+
+static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
+{
+ return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
+}
-static inline void page_nid_reset_last(struct page *page)
+#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
{
- int nid = (1 << LAST_NID_SHIFT) - 1;
+ return xchg(&page->_last_cpupid, cpupid);
+}
- page->flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT);
- page->flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
+static inline int page_cpupid_last(struct page *page)
+{
+ return page->_last_cpupid;
+}
+static inline void page_cpupid_reset_last(struct page *page)
+{
+ page->_last_cpupid = -1;
}
-#endif /* LAST_NID_NOT_IN_PAGE_FLAGS */
#else
-static inline int page_nid_xchg_last(struct page *page, int nid)
+static inline int page_cpupid_last(struct page *page)
{
- return page_to_nid(page);
+ return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
}
-static inline int page_nid_last(struct page *page)
+extern int page_cpupid_xchg_last(struct page *page, int cpupid);
+
+static inline void page_cpupid_reset_last(struct page *page)
{
- return page_to_nid(page);
+ int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
+
+ page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
+ page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
+}
+#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
+#else /* !CONFIG_NUMA_BALANCING */
+static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
+{
+ return page_to_nid(page); /* XXX */
}
-static inline void page_nid_reset_last(struct page *page)
+static inline int page_cpupid_last(struct page *page)
{
+ return page_to_nid(page); /* XXX */
}
-#endif
+
+static inline int cpupid_to_nid(int cpupid)
+{
+ return -1;
+}
+
+static inline int cpupid_to_pid(int cpupid)
+{
+ return -1;
+}
+
+static inline int cpupid_to_cpu(int cpupid)
+{
+ return -1;
+}
+
+static inline int cpu_pid_to_cpupid(int nid, int pid)
+{
+ return -1;
+}
+
+static inline bool cpupid_pid_unset(int cpupid)
+{
+ return 1;
+}
+
+static inline void page_cpupid_reset_last(struct page *page)
+{
+}
+
+static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
+{
+ return false;
+}
+#endif /* CONFIG_NUMA_BALANCING */
static inline struct zone *page_zone(const struct page *page)
{
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index d9851eeb6e1d..21a8bdf5668c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -42,18 +42,22 @@ struct page {
/* First double word block */
unsigned long flags; /* Atomic flags, some possibly
* updated asynchronously */
- struct address_space *mapping; /* If low bit clear, points to
- * inode address_space, or NULL.
- * If page mapped as anonymous
- * memory, low bit is set, and
- * it points to anon_vma object:
- * see PAGE_MAPPING_ANON below.
- */
+ union {
+ struct address_space *mapping; /* If low bit clear, points to
+ * inode address_space, or NULL.
+ * If page mapped as anonymous
+ * memory, low bit is set, and
+ * it points to anon_vma object:
+ * see PAGE_MAPPING_ANON below.
+ */
+ void *s_mem; /* slab first object */
+ };
+
/* Second double word */
struct {
union {
pgoff_t index; /* Our offset within mapping. */
- void *freelist; /* slub/slob first free object */
+ void *freelist; /* sl[aou]b first free object */
bool pfmemalloc; /* If set by the page allocator,
* ALLOC_NO_WATERMARKS was set
* and the low watermark was not
@@ -109,6 +113,7 @@ struct page {
};
atomic_t _count; /* Usage count, see below. */
};
+ unsigned int active; /* SLAB */
};
};
@@ -130,6 +135,9 @@ struct page {
struct list_head list; /* slobs list of pages */
struct slab *slab_page; /* slab fields */
+ struct rcu_head rcu_head; /* Used by SLAB
+ * when destroying via RCU
+ */
};
/* Remainder is not double word aligned */
@@ -174,8 +182,8 @@ struct page {
void *shadow;
#endif
-#ifdef LAST_NID_NOT_IN_PAGE_FLAGS
- int _last_nid;
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+ int _last_cpupid;
#endif
}
/*
@@ -420,28 +428,15 @@ struct mm_struct {
*/
unsigned long numa_next_scan;
- /* numa_next_reset is when the PTE scanner period will be reset */
- unsigned long numa_next_reset;
-
/* Restart point for scanning and setting pte_numa */
unsigned long numa_scan_offset;
/* numa_scan_seq prevents two threads setting pte_numa */
int numa_scan_seq;
-
- /*
- * The first node a task was scheduled on. If a task runs on
- * a different node than Make PTE Scan Go Now.
- */
- int first_nid;
#endif
struct uprobes_state uprobes_state;
};
-/* first nid will either be a valid NID or one of these values */
-#define NUMA_PTE_SCAN_INIT -1
-#define NUMA_PTE_SCAN_ACTIVE -2
-
static inline void mm_init_cpumask(struct mm_struct *mm)
{
#ifdef CONFIG_CPUMASK_OFFSTACK
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 842de3e21e70..176fdf824b14 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -240,6 +240,7 @@ struct mmc_part {
struct mmc_card {
struct mmc_host *host; /* the host this device belongs to */
struct device dev; /* the device */
+ u32 ocr; /* the current OCR setting */
unsigned int rca; /* relative card address of device */
unsigned int type; /* card type */
#define MMC_TYPE_MMC 0 /* MMC card */
@@ -257,6 +258,7 @@ struct mmc_card {
#define MMC_CARD_REMOVED (1<<7) /* card has been removed */
#define MMC_STATE_HIGHSPEED_200 (1<<8) /* card is in HS200 mode */
#define MMC_STATE_DOING_BKOPS (1<<10) /* card is doing BKOPS */
+#define MMC_STATE_SUSPENDED (1<<11) /* card is suspended */
unsigned int quirks; /* card quirks */
#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */
#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */
@@ -420,10 +422,10 @@ static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data)
#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR)
#define mmc_card_ddr_mode(c) ((c)->state & MMC_STATE_HIGHSPEED_DDR)
#define mmc_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED)
-#define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED)
#define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC)
#define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED))
#define mmc_card_doing_bkops(c) ((c)->state & MMC_STATE_DOING_BKOPS)
+#define mmc_card_suspended(c) ((c)->state & MMC_STATE_SUSPENDED)
#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
@@ -432,11 +434,12 @@ static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data)
#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
#define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR)
#define mmc_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED)
-#define mmc_sd_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED)
#define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC)
#define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED)
#define mmc_card_set_doing_bkops(c) ((c)->state |= MMC_STATE_DOING_BKOPS)
#define mmc_card_clr_doing_bkops(c) ((c)->state &= ~MMC_STATE_DOING_BKOPS)
+#define mmc_card_set_suspended(c) ((c)->state |= MMC_STATE_SUSPENDED)
+#define mmc_card_clr_suspended(c) ((c)->state &= ~MMC_STATE_SUSPENDED)
/*
* Quirk add/remove for MMC products.
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index da51bec578c3..87079fc38011 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -151,7 +151,8 @@ extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *);
extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
struct mmc_command *, int);
extern void mmc_start_bkops(struct mmc_card *card, bool from_exception);
-extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool);
+extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool,
+ bool);
extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
extern int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd);
@@ -188,7 +189,6 @@ extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int);
extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort);
extern void mmc_release_host(struct mmc_host *host);
-extern int mmc_try_claim_host(struct mmc_host *host);
extern void mmc_get_card(struct mmc_card *card);
extern void mmc_put_card(struct mmc_card *card);
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 198f0fa44e9f..6ce7d2cd3c7a 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -15,6 +15,7 @@
#define LINUX_MMC_DW_MMC_H
#include <linux/scatterlist.h>
+#include <linux/mmc/core.h>
#define MAX_MCI_SLOTS 2
@@ -129,6 +130,9 @@ struct dw_mci {
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
+ struct mmc_command stop_abort;
+ unsigned int prev_blksz;
+ unsigned char timing;
struct workqueue_struct *card_workqueue;
/* DMA interface members*/
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 3b0c33ae13e1..99f5709ac343 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -254,6 +254,7 @@ struct mmc_host {
#define MMC_CAP_UHS_SDR50 (1 << 17) /* Host supports UHS SDR50 mode */
#define MMC_CAP_UHS_SDR104 (1 << 18) /* Host supports UHS SDR104 mode */
#define MMC_CAP_UHS_DDR50 (1 << 19) /* Host supports UHS DDR50 mode */
+#define MMC_CAP_RUNTIME_RESUME (1 << 20) /* Resume at runtime_resume. */
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
@@ -309,7 +310,6 @@ struct mmc_host {
spinlock_t lock; /* lock for claim and bus ops */
struct mmc_ios ios; /* current io bus settings */
- u32 ocr; /* the current OCR setting */
/* group bitfields together to minimize padding */
unsigned int use_spi_crc:1;
@@ -382,9 +382,6 @@ static inline void *mmc_priv(struct mmc_host *host)
#define mmc_classdev(x) (&(x)->class_dev)
#define mmc_hostname(x) (dev_name(&(x)->class_dev))
-int mmc_suspend_host(struct mmc_host *);
-int mmc_resume_host(struct mmc_host *);
-
int mmc_power_save_host(struct mmc_host *host);
int mmc_power_restore_host(struct mmc_host *host);
diff --git a/include/linux/module.h b/include/linux/module.h
index 05f2447f8c15..15cd6b1b211e 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -367,9 +367,6 @@ struct module
/* What modules do I depend on? */
struct list_head target_list;
- /* Who is waiting for us to be unloaded */
- struct task_struct *waiter;
-
/* Destruction function. */
void (*exit)(void);
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 95fc482cef36..36bb6a503f19 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -91,8 +91,6 @@ struct nand_bbt_descr {
* with NAND_BBT_CREATE.
*/
#define NAND_BBT_CREATE_EMPTY 0x00000400
-/* Search good / bad pattern through all pages of a block */
-#define NAND_BBT_SCANALLPAGES 0x00000800
/* Write bbt if neccecary */
#define NAND_BBT_WRITE 0x00002000
/* Read and write back block contents when writing bbt */
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 4b02512e421c..5f487d776411 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -365,7 +365,7 @@ static inline map_word map_word_load_partial(struct map_info *map, map_word orig
bitpos = (map_bankwidth(map)-1-i)*8;
#endif
orig.x[0] &= ~(0xff << bitpos);
- orig.x[0] |= buf[i-start] << bitpos;
+ orig.x[0] |= (unsigned long)buf[i-start] << bitpos;
}
}
return orig;
@@ -384,7 +384,7 @@ static inline map_word map_word_ff(struct map_info *map)
if (map_bankwidth(map) < MAP_FF_LIMIT) {
int bw = 8 * map_bankwidth(map);
- r.x[0] = (1 << bw) - 1;
+ r.x[0] = (1UL << bw) - 1;
} else {
for (i=0; i<map_words(map); i++)
r.x[i] = ~0UL;
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index f9bfe526d310..8cc0e2fb6894 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -29,9 +29,6 @@
#include <asm/div64.h>
-#define MTD_CHAR_MAJOR 90
-#define MTD_BLOCK_MAJOR 31
-
#define MTD_ERASE_PENDING 0x01
#define MTD_ERASING 0x02
#define MTD_ERASE_SUSPEND 0x04
@@ -354,6 +351,11 @@ static inline int mtd_has_oob(const struct mtd_info *mtd)
return mtd->_read_oob && mtd->_write_oob;
}
+static inline int mtd_type_is_nand(const struct mtd_info *mtd)
+{
+ return mtd->type == MTD_NANDFLASH || mtd->type == MTD_MLCNANDFLASH;
+}
+
static inline int mtd_can_have_bb(const struct mtd_info *mtd)
{
return !!mtd->_block_isbad;
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index ac8e89d5a792..9e6c8f9f306e 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -198,6 +198,7 @@ typedef enum {
/* Cell info constants */
#define NAND_CI_CHIPNR_MSK 0x03
#define NAND_CI_CELLTYPE_MSK 0x0C
+#define NAND_CI_CELLTYPE_SHIFT 2
/* Keep gcc happy */
struct nand_chip;
@@ -477,7 +478,7 @@ struct nand_buffers {
* @badblockbits: [INTERN] minimum number of set bits in a good block's
* bad block marker position; i.e., BBM == 11110111b is
* not bad when badblockbits == 7
- * @cellinfo: [INTERN] MLC/multichip data from chip ident
+ * @bits_per_cell: [INTERN] number of bits per cell. i.e., 1 means SLC.
* @ecc_strength_ds: [INTERN] ECC correctability from the datasheet.
* Minimum amount of bit errors per @ecc_step_ds guaranteed
* to be correctable. If unknown, set to zero.
@@ -498,7 +499,6 @@ struct nand_buffers {
* supported, 0 otherwise.
* @onfi_set_features: [REPLACEABLE] set the features for ONFI nand
* @onfi_get_features: [REPLACEABLE] get the features for ONFI nand
- * @ecclayout: [REPLACEABLE] the default ECC placement scheme
* @bbt: [INTERN] bad block table pointer
* @bbt_td: [REPLACEABLE] bad block table descriptor for flash
* lookup.
@@ -559,7 +559,7 @@ struct nand_chip {
int pagebuf;
unsigned int pagebuf_bitflips;
int subpagesize;
- uint8_t cellinfo;
+ uint8_t bits_per_cell;
uint16_t ecc_strength_ds;
uint16_t ecc_step_ds;
int badblockpos;
@@ -572,7 +572,6 @@ struct nand_chip {
uint8_t *oob_poi;
struct nand_hw_control *controller;
- struct nand_ecclayout *ecclayout;
struct nand_ecc_ctrl ecc;
struct nand_buffers *buffers;
@@ -797,4 +796,13 @@ static inline int onfi_get_sync_timing_mode(struct nand_chip *chip)
return le16_to_cpu(chip->onfi_params.src_sync_timing_mode);
}
+/*
+ * Check if it is a SLC nand.
+ * The !nand_is_slc() can be used to check the MLC/TLC nand chips.
+ * We do not distinguish the MLC and TLC now.
+ */
+static inline bool nand_is_slc(struct nand_chip *chip)
+{
+ return chip->bits_per_cell == 1;
+}
#endif /* __LINUX_MTD_NAND_H */
diff --git a/include/linux/net.h b/include/linux/net.h
index 4f27575ce1d6..b292a0435571 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -24,6 +24,7 @@
#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */
#include <linux/kmemcheck.h>
#include <linux/rcupdate.h>
+#include <linux/jump_label.h>
#include <uapi/linux/net.h>
struct poll_table_struct;
@@ -195,27 +196,23 @@ enum {
SOCK_WAKE_URG,
};
-extern int sock_wake_async(struct socket *sk, int how, int band);
-extern int sock_register(const struct net_proto_family *fam);
-extern void sock_unregister(int family);
-extern int __sock_create(struct net *net, int family, int type, int proto,
- struct socket **res, int kern);
-extern int sock_create(int family, int type, int proto,
- struct socket **res);
-extern int sock_create_kern(int family, int type, int proto,
- struct socket **res);
-extern int sock_create_lite(int family, int type, int proto,
- struct socket **res);
-extern void sock_release(struct socket *sock);
-extern int sock_sendmsg(struct socket *sock, struct msghdr *msg,
- size_t len);
-extern int sock_recvmsg(struct socket *sock, struct msghdr *msg,
- size_t size, int flags);
-extern struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
-extern struct socket *sockfd_lookup(int fd, int *err);
-extern struct socket *sock_from_file(struct file *file, int *err);
+int sock_wake_async(struct socket *sk, int how, int band);
+int sock_register(const struct net_proto_family *fam);
+void sock_unregister(int family);
+int __sock_create(struct net *net, int family, int type, int proto,
+ struct socket **res, int kern);
+int sock_create(int family, int type, int proto, struct socket **res);
+int sock_create_kern(int family, int type, int proto, struct socket **res);
+int sock_create_lite(int family, int type, int proto, struct socket **res);
+void sock_release(struct socket *sock);
+int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len);
+int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags);
+struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
+struct socket *sockfd_lookup(int fd, int *err);
+struct socket *sock_from_file(struct file *file, int *err);
#define sockfd_put(sock) fput(sock->file)
-extern int net_ratelimit(void);
+int net_ratelimit(void);
#define net_ratelimited_function(function, ...) \
do { \
@@ -243,32 +240,52 @@ do { \
#define net_random() prandom_u32()
#define net_srandom(seed) prandom_seed((__force u32)(seed))
-extern int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
- struct kvec *vec, size_t num, size_t len);
-extern int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
- struct kvec *vec, size_t num,
- size_t len, int flags);
-
-extern int kernel_bind(struct socket *sock, struct sockaddr *addr,
- int addrlen);
-extern int kernel_listen(struct socket *sock, int backlog);
-extern int kernel_accept(struct socket *sock, struct socket **newsock,
- int flags);
-extern int kernel_connect(struct socket *sock, struct sockaddr *addr,
- int addrlen, int flags);
-extern int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
- int *addrlen);
-extern int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
- int *addrlen);
-extern int kernel_getsockopt(struct socket *sock, int level, int optname,
- char *optval, int *optlen);
-extern int kernel_setsockopt(struct socket *sock, int level, int optname,
- char *optval, unsigned int optlen);
-extern int kernel_sendpage(struct socket *sock, struct page *page, int offset,
- size_t size, int flags);
-extern int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
-extern int kernel_sock_shutdown(struct socket *sock,
- enum sock_shutdown_cmd how);
+bool __net_get_random_once(void *buf, int nbytes, bool *done,
+ struct static_key *done_key);
+
+#ifdef HAVE_JUMP_LABEL
+#define ___NET_RANDOM_STATIC_KEY_INIT ((struct static_key) \
+ { .enabled = ATOMIC_INIT(0), .entries = (void *)1 })
+#else /* !HAVE_JUMP_LABEL */
+#define ___NET_RANDOM_STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
+#endif /* HAVE_JUMP_LABEL */
+
+#define net_get_random_once(buf, nbytes) \
+ ({ \
+ bool ___ret = false; \
+ static bool ___done = false; \
+ static struct static_key ___done_key = \
+ ___NET_RANDOM_STATIC_KEY_INIT; \
+ if (!static_key_true(&___done_key)) \
+ ___ret = __net_get_random_once(buf, \
+ nbytes, \
+ &___done, \
+ &___done_key); \
+ ___ret; \
+ })
+
+int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
+ size_t num, size_t len);
+int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
+ size_t num, size_t len, int flags);
+
+int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen);
+int kernel_listen(struct socket *sock, int backlog);
+int kernel_accept(struct socket *sock, struct socket **newsock, int flags);
+int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
+ int flags);
+int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
+ int *addrlen);
+int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
+ int *addrlen);
+int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval,
+ int *optlen);
+int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval,
+ unsigned int optlen);
+int kernel_sendpage(struct socket *sock, struct page *page, int offset,
+ size_t size, int flags);
+int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
+int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
#define MODULE_ALIAS_NETPROTO(proto) \
MODULE_ALIAS("net-pf-" __stringify(proto))
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index a2a89a5c7be5..b05a4b501ab5 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -42,6 +42,8 @@ enum {
NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */
NETIF_F_FSO_BIT, /* ... FCoE segmentation */
NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */
+ NETIF_F_GSO_IPIP_BIT, /* ... IPIP tunnel with TSO */
+ NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */
NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */
NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
@@ -107,6 +109,8 @@ enum {
#define NETIF_F_RXFCS __NETIF_F(RXFCS)
#define NETIF_F_RXALL __NETIF_F(RXALL)
#define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE)
+#define NETIF_F_GSO_IPIP __NETIF_F(GSO_IPIP)
+#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT)
#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS)
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3de49aca4519..9da6a04b5975 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -60,8 +60,8 @@ struct wireless_dev;
#define SET_ETHTOOL_OPS(netdev,ops) \
( (netdev)->ethtool_ops = (ops) )
-extern void netdev_set_default_ethtool_ops(struct net_device *dev,
- const struct ethtool_ops *ops);
+void netdev_set_default_ethtool_ops(struct net_device *dev,
+ const struct ethtool_ops *ops);
/* hardware address assignment types */
#define NET_ADDR_PERM 0 /* address is permanent (default) */
@@ -298,7 +298,7 @@ struct netdev_boot_setup {
};
#define NETDEV_BOOT_SETUP_MAX 8
-extern int __init netdev_boot_setup(char *str);
+int __init netdev_boot_setup(char *str);
/*
* Structure for NAPI scheduling similar to tasklet but with weighting
@@ -394,7 +394,7 @@ enum rx_handler_result {
typedef enum rx_handler_result rx_handler_result_t;
typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
-extern void __napi_schedule(struct napi_struct *n);
+void __napi_schedule(struct napi_struct *n);
static inline bool napi_disable_pending(struct napi_struct *n)
{
@@ -445,8 +445,8 @@ static inline bool napi_reschedule(struct napi_struct *napi)
*
* Mark NAPI processing as complete.
*/
-extern void __napi_complete(struct napi_struct *n);
-extern void napi_complete(struct napi_struct *n);
+void __napi_complete(struct napi_struct *n);
+void napi_complete(struct napi_struct *n);
/**
* napi_by_id - lookup a NAPI by napi_id
@@ -455,7 +455,7 @@ extern void napi_complete(struct napi_struct *n);
* lookup @napi_id in napi_hash table
* must be called under rcu_read_lock()
*/
-extern struct napi_struct *napi_by_id(unsigned int napi_id);
+struct napi_struct *napi_by_id(unsigned int napi_id);
/**
* napi_hash_add - add a NAPI to global hashtable
@@ -463,7 +463,7 @@ extern struct napi_struct *napi_by_id(unsigned int napi_id);
*
* generate a new napi_id and store a @napi under it in napi_hash
*/
-extern void napi_hash_add(struct napi_struct *napi);
+void napi_hash_add(struct napi_struct *napi);
/**
* napi_hash_del - remove a NAPI from global table
@@ -472,7 +472,7 @@ extern void napi_hash_add(struct napi_struct *napi);
* Warning: caller must observe rcu grace period
* before freeing memory containing @napi
*/
-extern void napi_hash_del(struct napi_struct *napi);
+void napi_hash_del(struct napi_struct *napi);
/**
* napi_disable - prevent NAPI from scheduling
@@ -483,6 +483,7 @@ extern void napi_hash_del(struct napi_struct *napi);
*/
static inline void napi_disable(struct napi_struct *n)
{
+ might_sleep();
set_bit(NAPI_STATE_DISABLE, &n->state);
while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
msleep(1);
@@ -664,8 +665,8 @@ static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
#ifdef CONFIG_RFS_ACCEL
-extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
- u32 flow_id, u16 filter_id);
+bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
+ u16 filter_id);
#endif
/* This structure contains an instance of an RX queue. */
@@ -1143,8 +1144,19 @@ struct net_device {
struct list_head dev_list;
struct list_head napi_list;
struct list_head unreg_list;
- struct list_head upper_dev_list; /* List of upper devices */
- struct list_head lower_dev_list;
+ struct list_head close_list;
+
+ /* directly linked devices, like slaves for bonding */
+ struct {
+ struct list_head upper;
+ struct list_head lower;
+ } adj_list;
+
+ /* all linked devices, *including* neighbours */
+ struct {
+ struct list_head upper;
+ struct list_head lower;
+ } all_adj_list;
/* currently active device features */
@@ -1487,9 +1499,9 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
f(dev, &dev->_tx[i], arg);
}
-extern struct netdev_queue *netdev_pick_tx(struct net_device *dev,
- struct sk_buff *skb);
-extern u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
+struct netdev_queue *netdev_pick_tx(struct net_device *dev,
+ struct sk_buff *skb);
+u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
/*
* Net namespace inlines
@@ -1546,7 +1558,7 @@ static inline void *netdev_priv(const struct net_device *dev)
#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
/* Set the sysfs device type for the network logical device to allow
- * fin grained indentification of different network device types. For
+ * fine-grained identification of different network device types. For
* example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
*/
#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
@@ -1673,8 +1685,8 @@ struct packet_offload {
#define NETDEV_CHANGEUPPER 0x0015
#define NETDEV_RESEND_IGMP 0x0016
-extern int register_netdevice_notifier(struct notifier_block *nb);
-extern int unregister_netdevice_notifier(struct notifier_block *nb);
+int register_netdevice_notifier(struct notifier_block *nb);
+int unregister_netdevice_notifier(struct notifier_block *nb);
struct netdev_notifier_info {
struct net_device *dev;
@@ -1697,9 +1709,9 @@ netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
return info->dev;
}
-extern int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
- struct netdev_notifier_info *info);
-extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
+int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
+ struct netdev_notifier_info *info);
+int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
extern rwlock_t dev_base_lock; /* Device list lock */
@@ -1754,54 +1766,53 @@ static inline struct net_device *first_net_device_rcu(struct net *net)
return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
}
-extern int netdev_boot_setup_check(struct net_device *dev);
-extern unsigned long netdev_boot_base(const char *prefix, int unit);
-extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
- const char *hwaddr);
-extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
-extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
-extern void dev_add_pack(struct packet_type *pt);
-extern void dev_remove_pack(struct packet_type *pt);
-extern void __dev_remove_pack(struct packet_type *pt);
-extern void dev_add_offload(struct packet_offload *po);
-extern void dev_remove_offload(struct packet_offload *po);
-extern void __dev_remove_offload(struct packet_offload *po);
-
-extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
- unsigned short mask);
-extern struct net_device *dev_get_by_name(struct net *net, const char *name);
-extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
-extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
-extern int dev_alloc_name(struct net_device *dev, const char *name);
-extern int dev_open(struct net_device *dev);
-extern int dev_close(struct net_device *dev);
-extern void dev_disable_lro(struct net_device *dev);
-extern int dev_loopback_xmit(struct sk_buff *newskb);
-extern int dev_queue_xmit(struct sk_buff *skb);
-extern int register_netdevice(struct net_device *dev);
-extern void unregister_netdevice_queue(struct net_device *dev,
- struct list_head *head);
-extern void unregister_netdevice_many(struct list_head *head);
+int netdev_boot_setup_check(struct net_device *dev);
+unsigned long netdev_boot_base(const char *prefix, int unit);
+struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
+ const char *hwaddr);
+struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
+struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
+void dev_add_pack(struct packet_type *pt);
+void dev_remove_pack(struct packet_type *pt);
+void __dev_remove_pack(struct packet_type *pt);
+void dev_add_offload(struct packet_offload *po);
+void dev_remove_offload(struct packet_offload *po);
+void __dev_remove_offload(struct packet_offload *po);
+
+struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
+ unsigned short mask);
+struct net_device *dev_get_by_name(struct net *net, const char *name);
+struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
+struct net_device *__dev_get_by_name(struct net *net, const char *name);
+int dev_alloc_name(struct net_device *dev, const char *name);
+int dev_open(struct net_device *dev);
+int dev_close(struct net_device *dev);
+void dev_disable_lro(struct net_device *dev);
+int dev_loopback_xmit(struct sk_buff *newskb);
+int dev_queue_xmit(struct sk_buff *skb);
+int register_netdevice(struct net_device *dev);
+void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
+void unregister_netdevice_many(struct list_head *head);
static inline void unregister_netdevice(struct net_device *dev)
{
unregister_netdevice_queue(dev, NULL);
}
-extern int netdev_refcnt_read(const struct net_device *dev);
-extern void free_netdev(struct net_device *dev);
-extern void synchronize_net(void);
-extern int init_dummy_netdev(struct net_device *dev);
+int netdev_refcnt_read(const struct net_device *dev);
+void free_netdev(struct net_device *dev);
+void netdev_freemem(struct net_device *dev);
+void synchronize_net(void);
+int init_dummy_netdev(struct net_device *dev);
-extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
-extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
-extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
-extern int netdev_get_name(struct net *net, char *name, int ifindex);
-extern int dev_restart(struct net_device *dev);
+struct net_device *dev_get_by_index(struct net *net, int ifindex);
+struct net_device *__dev_get_by_index(struct net *net, int ifindex);
+struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
+int netdev_get_name(struct net *net, char *name, int ifindex);
+int dev_restart(struct net_device *dev);
#ifdef CONFIG_NETPOLL_TRAP
-extern int netpoll_trap(void);
+int netpoll_trap(void);
#endif
-extern int skb_gro_receive(struct sk_buff **head,
- struct sk_buff *skb);
+int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
{
@@ -1873,7 +1884,7 @@ static inline int dev_parse_header(const struct sk_buff *skb,
}
typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
-extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
+int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
static inline int unregister_gifconf(unsigned int family)
{
return register_gifconf(family, NULL);
@@ -1944,7 +1955,7 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd,
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
-extern void __netif_schedule(struct Qdisc *q);
+void __netif_schedule(struct Qdisc *q);
static inline void netif_schedule_queue(struct netdev_queue *txq)
{
@@ -2264,11 +2275,11 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
}
#ifdef CONFIG_XPS
-extern int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask,
- u16 index);
+int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
+ u16 index);
#else
static inline int netif_set_xps_queue(struct net_device *dev,
- struct cpumask *mask,
+ const struct cpumask *mask,
u16 index)
{
return 0;
@@ -2296,12 +2307,10 @@ static inline bool netif_is_multiqueue(const struct net_device *dev)
return dev->num_tx_queues > 1;
}
-extern int netif_set_real_num_tx_queues(struct net_device *dev,
- unsigned int txq);
+int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
#ifdef CONFIG_RPS
-extern int netif_set_real_num_rx_queues(struct net_device *dev,
- unsigned int rxq);
+int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
#else
static inline int netif_set_real_num_rx_queues(struct net_device *dev,
unsigned int rxq)
@@ -2328,28 +2337,27 @@ static inline int netif_copy_real_num_queues(struct net_device *to_dev,
}
#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
-extern int netif_get_num_default_rss_queues(void);
+int netif_get_num_default_rss_queues(void);
/* Use this variant when it is known for sure that it
* is executing from hardware interrupt context or with hardware interrupts
* disabled.
*/
-extern void dev_kfree_skb_irq(struct sk_buff *skb);
+void dev_kfree_skb_irq(struct sk_buff *skb);
/* Use this variant in places where it could be invoked
* from either hardware interrupt or other context, with hardware interrupts
* either disabled or enabled.
*/
-extern void dev_kfree_skb_any(struct sk_buff *skb);
+void dev_kfree_skb_any(struct sk_buff *skb);
-extern int netif_rx(struct sk_buff *skb);
-extern int netif_rx_ni(struct sk_buff *skb);
-extern int netif_receive_skb(struct sk_buff *skb);
-extern gro_result_t napi_gro_receive(struct napi_struct *napi,
- struct sk_buff *skb);
-extern void napi_gro_flush(struct napi_struct *napi, bool flush_old);
-extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
-extern gro_result_t napi_gro_frags(struct napi_struct *napi);
+int netif_rx(struct sk_buff *skb);
+int netif_rx_ni(struct sk_buff *skb);
+int netif_receive_skb(struct sk_buff *skb);
+gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
+void napi_gro_flush(struct napi_struct *napi, bool flush_old);
+struct sk_buff *napi_get_frags(struct napi_struct *napi);
+gro_result_t napi_gro_frags(struct napi_struct *napi);
static inline void napi_free_frags(struct napi_struct *napi)
{
@@ -2357,40 +2365,36 @@ static inline void napi_free_frags(struct napi_struct *napi)
napi->skb = NULL;
}
-extern int netdev_rx_handler_register(struct net_device *dev,
- rx_handler_func_t *rx_handler,
- void *rx_handler_data);
-extern void netdev_rx_handler_unregister(struct net_device *dev);
-
-extern bool dev_valid_name(const char *name);
-extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
-extern int dev_ethtool(struct net *net, struct ifreq *);
-extern unsigned int dev_get_flags(const struct net_device *);
-extern int __dev_change_flags(struct net_device *, unsigned int flags);
-extern int dev_change_flags(struct net_device *, unsigned int);
-extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
-extern int dev_change_name(struct net_device *, const char *);
-extern int dev_set_alias(struct net_device *, const char *, size_t);
-extern int dev_change_net_namespace(struct net_device *,
- struct net *, const char *);
-extern int dev_set_mtu(struct net_device *, int);
-extern void dev_set_group(struct net_device *, int);
-extern int dev_set_mac_address(struct net_device *,
- struct sockaddr *);
-extern int dev_change_carrier(struct net_device *,
- bool new_carrier);
-extern int dev_get_phys_port_id(struct net_device *dev,
- struct netdev_phys_port_id *ppid);
-extern int dev_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev,
- struct netdev_queue *txq);
-extern int dev_forward_skb(struct net_device *dev,
- struct sk_buff *skb);
+int netdev_rx_handler_register(struct net_device *dev,
+ rx_handler_func_t *rx_handler,
+ void *rx_handler_data);
+void netdev_rx_handler_unregister(struct net_device *dev);
+
+bool dev_valid_name(const char *name);
+int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
+int dev_ethtool(struct net *net, struct ifreq *);
+unsigned int dev_get_flags(const struct net_device *);
+int __dev_change_flags(struct net_device *, unsigned int flags);
+int dev_change_flags(struct net_device *, unsigned int);
+void __dev_notify_flags(struct net_device *, unsigned int old_flags,
+ unsigned int gchanges);
+int dev_change_name(struct net_device *, const char *);
+int dev_set_alias(struct net_device *, const char *, size_t);
+int dev_change_net_namespace(struct net_device *, struct net *, const char *);
+int dev_set_mtu(struct net_device *, int);
+void dev_set_group(struct net_device *, int);
+int dev_set_mac_address(struct net_device *, struct sockaddr *);
+int dev_change_carrier(struct net_device *, bool new_carrier);
+int dev_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_port_id *ppid);
+int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
+ struct netdev_queue *txq);
+int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
extern int netdev_budget;
/* Called by rtnetlink.c:rtnl_unlock() */
-extern void netdev_run_todo(void);
+void netdev_run_todo(void);
/**
* dev_put - release reference to device
@@ -2423,9 +2427,9 @@ static inline void dev_hold(struct net_device *dev)
* kind of lower layer not just hardware media.
*/
-extern void linkwatch_init_dev(struct net_device *dev);
-extern void linkwatch_fire_event(struct net_device *dev);
-extern void linkwatch_forget_dev(struct net_device *dev);
+void linkwatch_init_dev(struct net_device *dev);
+void linkwatch_fire_event(struct net_device *dev);
+void linkwatch_forget_dev(struct net_device *dev);
/**
* netif_carrier_ok - test if carrier present
@@ -2438,13 +2442,13 @@ static inline bool netif_carrier_ok(const struct net_device *dev)
return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
}
-extern unsigned long dev_trans_start(struct net_device *dev);
+unsigned long dev_trans_start(struct net_device *dev);
-extern void __netdev_watchdog_up(struct net_device *dev);
+void __netdev_watchdog_up(struct net_device *dev);
-extern void netif_carrier_on(struct net_device *dev);
+void netif_carrier_on(struct net_device *dev);
-extern void netif_carrier_off(struct net_device *dev);
+void netif_carrier_off(struct net_device *dev);
/**
* netif_dormant_on - mark device as dormant.
@@ -2512,9 +2516,9 @@ static inline bool netif_device_present(struct net_device *dev)
return test_bit(__LINK_STATE_PRESENT, &dev->state);
}
-extern void netif_device_detach(struct net_device *dev);
+void netif_device_detach(struct net_device *dev);
-extern void netif_device_attach(struct net_device *dev);
+void netif_device_attach(struct net_device *dev);
/*
* Network interface message level settings
@@ -2723,119 +2727,138 @@ static inline void netif_addr_unlock_bh(struct net_device *dev)
/* These functions live elsewhere (drivers/net/net_init.c, but related) */
-extern void ether_setup(struct net_device *dev);
+void ether_setup(struct net_device *dev);
/* Support for loadable net-drivers */
-extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
- void (*setup)(struct net_device *),
- unsigned int txqs, unsigned int rxqs);
+struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+ void (*setup)(struct net_device *),
+ unsigned int txqs, unsigned int rxqs);
#define alloc_netdev(sizeof_priv, name, setup) \
alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
-extern int register_netdev(struct net_device *dev);
-extern void unregister_netdev(struct net_device *dev);
+int register_netdev(struct net_device *dev);
+void unregister_netdev(struct net_device *dev);
/* General hardware address lists handling functions */
-extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len, unsigned char addr_type);
-extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len, unsigned char addr_type);
-extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len);
-extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len);
-extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
-extern void __hw_addr_init(struct netdev_hw_addr_list *list);
+int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len, unsigned char addr_type);
+void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len, unsigned char addr_type);
+int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list, int addr_len);
+void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list, int addr_len);
+void __hw_addr_flush(struct netdev_hw_addr_list *list);
+void __hw_addr_init(struct netdev_hw_addr_list *list);
/* Functions used for device addresses handling */
-extern int dev_addr_add(struct net_device *dev, const unsigned char *addr,
- unsigned char addr_type);
-extern int dev_addr_del(struct net_device *dev, const unsigned char *addr,
- unsigned char addr_type);
-extern int dev_addr_add_multiple(struct net_device *to_dev,
- struct net_device *from_dev,
- unsigned char addr_type);
-extern int dev_addr_del_multiple(struct net_device *to_dev,
- struct net_device *from_dev,
- unsigned char addr_type);
-extern void dev_addr_flush(struct net_device *dev);
-extern int dev_addr_init(struct net_device *dev);
+int dev_addr_add(struct net_device *dev, const unsigned char *addr,
+ unsigned char addr_type);
+int dev_addr_del(struct net_device *dev, const unsigned char *addr,
+ unsigned char addr_type);
+int dev_addr_add_multiple(struct net_device *to_dev,
+ struct net_device *from_dev, unsigned char addr_type);
+int dev_addr_del_multiple(struct net_device *to_dev,
+ struct net_device *from_dev, unsigned char addr_type);
+void dev_addr_flush(struct net_device *dev);
+int dev_addr_init(struct net_device *dev);
/* Functions used for unicast addresses handling */
-extern int dev_uc_add(struct net_device *dev, const unsigned char *addr);
-extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
-extern int dev_uc_del(struct net_device *dev, const unsigned char *addr);
-extern int dev_uc_sync(struct net_device *to, struct net_device *from);
-extern int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
-extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
-extern void dev_uc_flush(struct net_device *dev);
-extern void dev_uc_init(struct net_device *dev);
+int dev_uc_add(struct net_device *dev, const unsigned char *addr);
+int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
+int dev_uc_del(struct net_device *dev, const unsigned char *addr);
+int dev_uc_sync(struct net_device *to, struct net_device *from);
+int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
+void dev_uc_unsync(struct net_device *to, struct net_device *from);
+void dev_uc_flush(struct net_device *dev);
+void dev_uc_init(struct net_device *dev);
/* Functions used for multicast addresses handling */
-extern int dev_mc_add(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_del(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_sync(struct net_device *to, struct net_device *from);
-extern int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
-extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
-extern void dev_mc_flush(struct net_device *dev);
-extern void dev_mc_init(struct net_device *dev);
+int dev_mc_add(struct net_device *dev, const unsigned char *addr);
+int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
+int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
+int dev_mc_del(struct net_device *dev, const unsigned char *addr);
+int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
+int dev_mc_sync(struct net_device *to, struct net_device *from);
+int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
+void dev_mc_unsync(struct net_device *to, struct net_device *from);
+void dev_mc_flush(struct net_device *dev);
+void dev_mc_init(struct net_device *dev);
/* Functions used for secondary unicast and multicast support */
-extern void dev_set_rx_mode(struct net_device *dev);
-extern void __dev_set_rx_mode(struct net_device *dev);
-extern int dev_set_promiscuity(struct net_device *dev, int inc);
-extern int dev_set_allmulti(struct net_device *dev, int inc);
-extern void netdev_state_change(struct net_device *dev);
-extern void netdev_notify_peers(struct net_device *dev);
-extern void netdev_features_change(struct net_device *dev);
+void dev_set_rx_mode(struct net_device *dev);
+void __dev_set_rx_mode(struct net_device *dev);
+int dev_set_promiscuity(struct net_device *dev, int inc);
+int dev_set_allmulti(struct net_device *dev, int inc);
+void netdev_state_change(struct net_device *dev);
+void netdev_notify_peers(struct net_device *dev);
+void netdev_features_change(struct net_device *dev);
/* Load a device via the kmod */
-extern void dev_load(struct net *net, const char *name);
-extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
- struct rtnl_link_stats64 *storage);
-extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
- const struct net_device_stats *netdev_stats);
+void dev_load(struct net *net, const char *name);
+struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *storage);
+void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
+ const struct net_device_stats *netdev_stats);
extern int netdev_max_backlog;
extern int netdev_tstamp_prequeue;
extern int weight_p;
extern int bpf_jit_enable;
-extern bool netdev_has_upper_dev(struct net_device *dev,
- struct net_device *upper_dev);
-extern bool netdev_has_any_upper_dev(struct net_device *dev);
-extern struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
- struct list_head **iter);
+bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
+bool netdev_has_any_upper_dev(struct net_device *dev);
+struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
+ struct list_head **iter);
/* iterate through upper list, must be called under RCU read lock */
-#define netdev_for_each_upper_dev_rcu(dev, upper, iter) \
- for (iter = &(dev)->upper_dev_list, \
- upper = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
- upper; \
- upper = netdev_upper_get_next_dev_rcu(dev, &(iter)))
-
-extern struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
-extern struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
-extern int netdev_upper_dev_link(struct net_device *dev,
+#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
+ for (iter = &(dev)->all_adj_list.upper, \
+ updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
+ updev; \
+ updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
+
+void *netdev_lower_get_next_private(struct net_device *dev,
+ struct list_head **iter);
+void *netdev_lower_get_next_private_rcu(struct net_device *dev,
+ struct list_head **iter);
+
+#define netdev_for_each_lower_private(dev, priv, iter) \
+ for (iter = (dev)->adj_list.lower.next, \
+ priv = netdev_lower_get_next_private(dev, &(iter)); \
+ priv; \
+ priv = netdev_lower_get_next_private(dev, &(iter)))
+
+#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
+ for (iter = &(dev)->adj_list.lower, \
+ priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
+ priv; \
+ priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
+
+void *netdev_adjacent_get_private(struct list_head *adj_list);
+struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
+struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
+int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
+int netdev_master_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev);
-extern int netdev_master_upper_dev_link(struct net_device *dev,
- struct net_device *upper_dev);
-extern void netdev_upper_dev_unlink(struct net_device *dev,
- struct net_device *upper_dev);
-extern int skb_checksum_help(struct sk_buff *skb);
-extern struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
- netdev_features_t features, bool tx_path);
-extern struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
- netdev_features_t features);
+int netdev_master_upper_dev_link_private(struct net_device *dev,
+ struct net_device *upper_dev,
+ void *private);
+void netdev_upper_dev_unlink(struct net_device *dev,
+ struct net_device *upper_dev);
+void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
+ struct net_device *lower_dev);
+void *netdev_lower_dev_get_private(struct net_device *dev,
+ struct net_device *lower_dev);
+int skb_checksum_help(struct sk_buff *skb);
+struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
+ netdev_features_t features, bool tx_path);
+struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
+ netdev_features_t features);
static inline
struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
@@ -2857,30 +2880,42 @@ static inline bool can_checksum_protocol(netdev_features_t features,
}
#ifdef CONFIG_BUG
-extern void netdev_rx_csum_fault(struct net_device *dev);
+void netdev_rx_csum_fault(struct net_device *dev);
#else
static inline void netdev_rx_csum_fault(struct net_device *dev)
{
}
#endif
/* rx skb timestamps */
-extern void net_enable_timestamp(void);
-extern void net_disable_timestamp(void);
+void net_enable_timestamp(void);
+void net_disable_timestamp(void);
#ifdef CONFIG_PROC_FS
-extern int __init dev_proc_init(void);
+int __init dev_proc_init(void);
#else
#define dev_proc_init() 0
#endif
-extern int netdev_class_create_file(struct class_attribute *class_attr);
-extern void netdev_class_remove_file(struct class_attribute *class_attr);
+int netdev_class_create_file_ns(struct class_attribute *class_attr,
+ const void *ns);
+void netdev_class_remove_file_ns(struct class_attribute *class_attr,
+ const void *ns);
+
+static inline int netdev_class_create_file(struct class_attribute *class_attr)
+{
+ return netdev_class_create_file_ns(class_attr, NULL);
+}
+
+static inline void netdev_class_remove_file(struct class_attribute *class_attr)
+{
+ netdev_class_remove_file_ns(class_attr, NULL);
+}
extern struct kobj_ns_type_operations net_ns_type_operations;
-extern const char *netdev_drivername(const struct net_device *dev);
+const char *netdev_drivername(const struct net_device *dev);
-extern void linkwatch_run_queue(void);
+void linkwatch_run_queue(void);
static inline netdev_features_t netdev_get_wanted_features(
struct net_device *dev)
@@ -2972,22 +3007,22 @@ static inline const char *netdev_name(const struct net_device *dev)
return dev->name;
}
-extern __printf(3, 4)
+__printf(3, 4)
int netdev_printk(const char *level, const struct net_device *dev,
const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
int netdev_emerg(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
int netdev_alert(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
int netdev_crit(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
int netdev_err(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
int netdev_warn(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
int netdev_notice(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
int netdev_info(const struct net_device *dev, const char *format, ...);
#define MODULE_ALIAS_NETDEV(device) \
@@ -3028,7 +3063,7 @@ do { \
* file/line information and a backtrace.
*/
#define netdev_WARN(dev, format, args...) \
- WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
+ WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args)
/* netif printk helpers, similar to netdev_printk */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 708fe72ab913..2077489f9887 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -35,14 +35,15 @@ static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
result->all[3] = a1->all[3] & mask->all[3];
}
-extern int netfilter_init(void);
+int netfilter_init(void);
/* Largest hook number + 1 */
#define NF_MAX_HOOKS 8
struct sk_buff;
-typedef unsigned int nf_hookfn(unsigned int hooknum,
+struct nf_hook_ops;
+typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -52,12 +53,13 @@ struct nf_hook_ops {
struct list_head list;
/* User fills in from here down. */
- nf_hookfn *hook;
- struct module *owner;
- u_int8_t pf;
- unsigned int hooknum;
+ nf_hookfn *hook;
+ struct module *owner;
+ void *priv;
+ u_int8_t pf;
+ unsigned int hooknum;
/* Hooks are ordered in ascending priority. */
- int priority;
+ int priority;
};
struct nf_sockopt_ops {
@@ -208,7 +210,7 @@ int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
/* Call this before modifying an existing packet: ensures it is
modifiable and linear to the point you care about (writable_len).
Returns true or false. */
-extern int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
+int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
struct flowi;
struct nf_queue_entry;
@@ -269,8 +271,8 @@ nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
return csum;
}
-extern int nf_register_afinfo(const struct nf_afinfo *afinfo);
-extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
+int nf_register_afinfo(const struct nf_afinfo *afinfo);
+void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
#include <net/flow.h>
extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
@@ -315,7 +317,7 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
-extern void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
+void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
struct nf_conn;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 9ac9fbde7b61..7967516adc0d 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -49,31 +49,68 @@ enum ip_set_feature {
/* Set extensions */
enum ip_set_extension {
- IPSET_EXT_NONE = 0,
- IPSET_EXT_BIT_TIMEOUT = 1,
+ IPSET_EXT_BIT_TIMEOUT = 0,
IPSET_EXT_TIMEOUT = (1 << IPSET_EXT_BIT_TIMEOUT),
- IPSET_EXT_BIT_COUNTER = 2,
+ IPSET_EXT_BIT_COUNTER = 1,
IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER),
-};
-
-/* Extension offsets */
-enum ip_set_offset {
- IPSET_OFFSET_TIMEOUT = 0,
- IPSET_OFFSET_COUNTER,
- IPSET_OFFSET_MAX,
+ IPSET_EXT_BIT_COMMENT = 2,
+ IPSET_EXT_COMMENT = (1 << IPSET_EXT_BIT_COMMENT),
+ /* Mark set with an extension which needs to call destroy */
+ IPSET_EXT_BIT_DESTROY = 7,
+ IPSET_EXT_DESTROY = (1 << IPSET_EXT_BIT_DESTROY),
};
#define SET_WITH_TIMEOUT(s) ((s)->extensions & IPSET_EXT_TIMEOUT)
#define SET_WITH_COUNTER(s) ((s)->extensions & IPSET_EXT_COUNTER)
+#define SET_WITH_COMMENT(s) ((s)->extensions & IPSET_EXT_COMMENT)
+
+/* Extension id, in size order */
+enum ip_set_ext_id {
+ IPSET_EXT_ID_COUNTER = 0,
+ IPSET_EXT_ID_TIMEOUT,
+ IPSET_EXT_ID_COMMENT,
+ IPSET_EXT_ID_MAX,
+};
+
+/* Extension type */
+struct ip_set_ext_type {
+ /* Destroy extension private data (can be NULL) */
+ void (*destroy)(void *ext);
+ enum ip_set_extension type;
+ enum ipset_cadt_flags flag;
+ /* Size and minimal alignment */
+ u8 len;
+ u8 align;
+};
+
+extern const struct ip_set_ext_type ip_set_extensions[];
struct ip_set_ext {
- unsigned long timeout;
u64 packets;
u64 bytes;
+ u32 timeout;
+ char *comment;
+};
+
+struct ip_set_counter {
+ atomic64_t bytes;
+ atomic64_t packets;
+};
+
+struct ip_set_comment {
+ char *str;
};
struct ip_set;
+#define ext_timeout(e, s) \
+(unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT])
+#define ext_counter(e, s) \
+(struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER])
+#define ext_comment(e, s) \
+(struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT])
+
+
typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 cmdflags);
@@ -147,7 +184,8 @@ struct ip_set_type {
u8 revision_min, revision_max;
/* Create set */
- int (*create)(struct ip_set *set, struct nlattr *tb[], u32 flags);
+ int (*create)(struct net *net, struct ip_set *set,
+ struct nlattr *tb[], u32 flags);
/* Attribute policies */
const struct nla_policy create_policy[IPSET_ATTR_CREATE_MAX + 1];
@@ -179,14 +217,45 @@ struct ip_set {
u8 revision;
/* Extensions */
u8 extensions;
+ /* Default timeout value, if enabled */
+ u32 timeout;
+ /* Element data size */
+ size_t dsize;
+ /* Offsets to extensions in elements */
+ size_t offset[IPSET_EXT_ID_MAX];
/* The type specific data */
void *data;
};
-struct ip_set_counter {
- atomic64_t bytes;
- atomic64_t packets;
-};
+static inline void
+ip_set_ext_destroy(struct ip_set *set, void *data)
+{
+ /* Check that the extension is enabled for the set and
+ * call it's destroy function for its extension part in data.
+ */
+ if (SET_WITH_COMMENT(set))
+ ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy(
+ ext_comment(data, set));
+}
+
+static inline int
+ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
+{
+ u32 cadt_flags = 0;
+
+ if (SET_WITH_TIMEOUT(set))
+ if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(set->timeout))))
+ return -EMSGSIZE;
+ if (SET_WITH_COUNTER(set))
+ cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
+ if (SET_WITH_COMMENT(set))
+ cadt_flags |= IPSET_FLAG_WITH_COMMENT;
+
+ if (!cadt_flags)
+ return 0;
+ return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags));
+}
static inline void
ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
@@ -248,12 +317,13 @@ ip_set_init_counter(struct ip_set_counter *counter,
}
/* register and unregister set references */
-extern ip_set_id_t ip_set_get_byname(const char *name, struct ip_set **set);
-extern void ip_set_put_byindex(ip_set_id_t index);
-extern const char *ip_set_name_byindex(ip_set_id_t index);
-extern ip_set_id_t ip_set_nfnl_get(const char *name);
-extern ip_set_id_t ip_set_nfnl_get_byindex(ip_set_id_t index);
-extern void ip_set_nfnl_put(ip_set_id_t index);
+extern ip_set_id_t ip_set_get_byname(struct net *net,
+ const char *name, struct ip_set **set);
+extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
+extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
+extern ip_set_id_t ip_set_nfnl_get(struct net *net, const char *name);
+extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
+extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
/* API for iptables set match, and SET target */
@@ -272,6 +342,8 @@ extern void *ip_set_alloc(size_t size);
extern void ip_set_free(void *members);
extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr);
extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
+extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
+ size_t len);
extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
struct ip_set_ext *ext);
@@ -389,13 +461,40 @@ bitmap_bytes(u32 a, u32 b)
}
#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_comment.h>
-#define IP_SET_INIT_KEXT(skb, opt, map) \
+static inline int
+ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
+ const void *e, bool active)
+{
+ if (SET_WITH_TIMEOUT(set)) {
+ unsigned long *timeout = ext_timeout(e, set);
+
+ if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(active ? ip_set_timeout_get(timeout)
+ : *timeout)))
+ return -EMSGSIZE;
+ }
+ if (SET_WITH_COUNTER(set) &&
+ ip_set_put_counter(skb, ext_counter(e, set)))
+ return -EMSGSIZE;
+ if (SET_WITH_COMMENT(set) &&
+ ip_set_put_comment(skb, ext_comment(e, set)))
+ return -EMSGSIZE;
+ return 0;
+}
+
+#define IP_SET_INIT_KEXT(skb, opt, set) \
{ .bytes = (skb)->len, .packets = 1, \
- .timeout = ip_set_adt_opt_timeout(opt, map) }
+ .timeout = ip_set_adt_opt_timeout(opt, set) }
-#define IP_SET_INIT_UEXT(map) \
+#define IP_SET_INIT_UEXT(set) \
{ .bytes = ULLONG_MAX, .packets = ULLONG_MAX, \
- .timeout = (map)->timeout }
+ .timeout = (set)->timeout }
+
+#define IP_SET_INIT_CIDR(a, b) ((a) ? (a) : (b))
+
+#define IPSET_CONCAT(a, b) a##b
+#define IPSET_TOKEN(a, b) IPSET_CONCAT(a, b)
#endif /*_IP_SET_H */
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
new file mode 100644
index 000000000000..21217ea008d7
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_comment.h
@@ -0,0 +1,57 @@
+#ifndef _IP_SET_COMMENT_H
+#define _IP_SET_COMMENT_H
+
+/* Copyright (C) 2013 Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef __KERNEL__
+
+static inline char*
+ip_set_comment_uget(struct nlattr *tb)
+{
+ return nla_data(tb);
+}
+
+static inline void
+ip_set_init_comment(struct ip_set_comment *comment,
+ const struct ip_set_ext *ext)
+{
+ size_t len = ext->comment ? strlen(ext->comment) : 0;
+
+ if (unlikely(comment->str)) {
+ kfree(comment->str);
+ comment->str = NULL;
+ }
+ if (!len)
+ return;
+ if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
+ len = IPSET_MAX_COMMENT_SIZE;
+ comment->str = kzalloc(len + 1, GFP_ATOMIC);
+ if (unlikely(!comment->str))
+ return;
+ strlcpy(comment->str, ext->comment, len + 1);
+}
+
+static inline int
+ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
+{
+ if (!comment->str)
+ return 0;
+ return nla_put_string(skb, IPSET_ATTR_COMMENT, comment->str);
+}
+
+static inline void
+ip_set_comment_free(struct ip_set_comment *comment)
+{
+ if (unlikely(!comment->str))
+ return;
+ kfree(comment->str);
+ comment->str = NULL;
+}
+
+#endif
+#endif
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
index 3aac04167ca7..83c2f9e0886c 100644
--- a/include/linux/netfilter/ipset/ip_set_timeout.h
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -23,8 +23,8 @@
/* Set is defined with timeout support: timeout value may be 0 */
#define IPSET_NO_TIMEOUT UINT_MAX
-#define ip_set_adt_opt_timeout(opt, map) \
-((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (map)->timeout)
+#define ip_set_adt_opt_timeout(opt, set) \
+((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout)
static inline unsigned int
ip_set_timeout_uget(struct nlattr *tb)
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 127d0b90604f..275505792664 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -23,6 +23,6 @@ struct ip_conntrack_stat {
};
/* call to create an explicit dependency on nf_conntrack. */
-extern void need_conntrack(void);
+void need_conntrack(void);
#endif /* _NF_CONNTRACK_COMMON_H */
diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h
index f381020eee92..858d9b214053 100644
--- a/include/linux/netfilter/nf_conntrack_h323.h
+++ b/include/linux/netfilter/nf_conntrack_h323.h
@@ -29,13 +29,13 @@ struct nf_ct_h323_master {
struct nf_conn;
-extern int get_h225_addr(struct nf_conn *ct, unsigned char *data,
- TransportAddress *taddr,
- union nf_inet_addr *addr, __be16 *port);
-extern void nf_conntrack_h245_expect(struct nf_conn *new,
- struct nf_conntrack_expect *this);
-extern void nf_conntrack_q931_expect(struct nf_conn *new,
- struct nf_conntrack_expect *this);
+int get_h225_addr(struct nf_conn *ct, unsigned char *data,
+ TransportAddress *taddr, union nf_inet_addr *addr,
+ __be16 *port);
+void nf_conntrack_h245_expect(struct nf_conn *new,
+ struct nf_conntrack_expect *this);
+void nf_conntrack_q931_expect(struct nf_conn *new,
+ struct nf_conntrack_expect *this);
extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
unsigned char **data, int dataoff,
H245_TransportAddress *taddr,
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index 6a0664c0c451..ec2ffaf418c8 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -87,8 +87,8 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
/* delete keymap entries */
void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
-extern void nf_ct_gre_keymap_flush(struct net *net);
-extern void nf_nat_need_gre(void);
+void nf_ct_gre_keymap_flush(struct net *net);
+void nf_nat_need_gre(void);
#endif /* __KERNEL__ */
#endif /* _CONNTRACK_PROTO_GRE_H */
diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h
index ba7f571a2b1c..d5af3c27fb7d 100644
--- a/include/linux/netfilter/nf_conntrack_sip.h
+++ b/include/linux/netfilter/nf_conntrack_sip.h
@@ -107,85 +107,93 @@ enum sdp_header_types {
SDP_HDR_MEDIA,
};
-extern unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb,
- unsigned int protoff,
- unsigned int dataoff,
- const char **dptr,
- unsigned int *datalen);
-extern void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb,
- unsigned int protoff, s16 off);
-extern unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
- unsigned int protoff,
- unsigned int dataoff,
- const char **dptr,
- unsigned int *datalen,
- struct nf_conntrack_expect *exp,
- unsigned int matchoff,
- unsigned int matchlen);
-extern unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb,
- unsigned int protoff,
- unsigned int dataoff,
- const char **dptr,
- unsigned int *datalen,
- unsigned int sdpoff,
- enum sdp_header_types type,
- enum sdp_header_types term,
- const union nf_inet_addr *addr);
-extern unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
- unsigned int protoff,
- unsigned int dataoff,
- const char **dptr,
- unsigned int *datalen,
- unsigned int matchoff,
- unsigned int matchlen,
- u_int16_t port);
-extern unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
- unsigned int protoff,
- unsigned int dataoff,
- const char **dptr,
- unsigned int *datalen,
- unsigned int sdpoff,
- const union nf_inet_addr *addr);
-extern unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb,
- unsigned int protoff,
- unsigned int dataoff,
- const char **dptr,
- unsigned int *datalen,
- struct nf_conntrack_expect *rtp_exp,
- struct nf_conntrack_expect *rtcp_exp,
- unsigned int mediaoff,
- unsigned int medialen,
- union nf_inet_addr *rtp_addr);
-
-extern int ct_sip_parse_request(const struct nf_conn *ct,
- const char *dptr, unsigned int datalen,
- unsigned int *matchoff, unsigned int *matchlen,
- union nf_inet_addr *addr, __be16 *port);
-extern int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
- unsigned int dataoff, unsigned int datalen,
- enum sip_header_types type,
- unsigned int *matchoff, unsigned int *matchlen);
-extern int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
- unsigned int *dataoff, unsigned int datalen,
- enum sip_header_types type, int *in_header,
- unsigned int *matchoff, unsigned int *matchlen,
- union nf_inet_addr *addr, __be16 *port);
-extern int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
- unsigned int dataoff, unsigned int datalen,
- const char *name,
- unsigned int *matchoff, unsigned int *matchlen,
- union nf_inet_addr *addr, bool delim);
-extern int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
- unsigned int off, unsigned int datalen,
- const char *name,
- unsigned int *matchoff, unsigned int *matchen,
- unsigned int *val);
-
-extern int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
- unsigned int dataoff, unsigned int datalen,
+struct nf_nat_sip_hooks {
+ unsigned int (*msg)(struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen);
+
+ void (*seq_adjust)(struct sk_buff *skb,
+ unsigned int protoff, s16 off);
+
+ unsigned int (*expect)(struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen,
+ struct nf_conntrack_expect *exp,
+ unsigned int matchoff,
+ unsigned int matchlen);
+
+ unsigned int (*sdp_addr)(struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen,
+ unsigned int sdpoff,
enum sdp_header_types type,
enum sdp_header_types term,
- unsigned int *matchoff, unsigned int *matchlen);
+ const union nf_inet_addr *addr);
+
+ unsigned int (*sdp_port)(struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen,
+ unsigned int matchoff,
+ unsigned int matchlen,
+ u_int16_t port);
+
+ unsigned int (*sdp_session)(struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen,
+ unsigned int sdpoff,
+ const union nf_inet_addr *addr);
+
+ unsigned int (*sdp_media)(struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen,
+ struct nf_conntrack_expect *rtp_exp,
+ struct nf_conntrack_expect *rtcp_exp,
+ unsigned int mediaoff,
+ unsigned int medialen,
+ union nf_inet_addr *rtp_addr);
+};
+extern const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
+
+int ct_sip_parse_request(const struct nf_conn *ct, const char *dptr,
+ unsigned int datalen, unsigned int *matchoff,
+ unsigned int *matchlen, union nf_inet_addr *addr,
+ __be16 *port);
+int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
+ unsigned int dataoff, unsigned int datalen,
+ enum sip_header_types type, unsigned int *matchoff,
+ unsigned int *matchlen);
+int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
+ unsigned int *dataoff, unsigned int datalen,
+ enum sip_header_types type, int *in_header,
+ unsigned int *matchoff, unsigned int *matchlen,
+ union nf_inet_addr *addr, __be16 *port);
+int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
+ unsigned int dataoff, unsigned int datalen,
+ const char *name, unsigned int *matchoff,
+ unsigned int *matchlen, union nf_inet_addr *addr,
+ bool delim);
+int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
+ unsigned int off, unsigned int datalen,
+ const char *name, unsigned int *matchoff,
+ unsigned int *matchen, unsigned int *val);
+
+int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
+ unsigned int dataoff, unsigned int datalen,
+ enum sdp_header_types type,
+ enum sdp_header_types term,
+ unsigned int *matchoff, unsigned int *matchlen);
#endif /* __KERNEL__ */
#endif /* __NF_CONNTRACK_SIP_H__ */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index cadb7402d7a7..28c74367e900 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -14,6 +14,9 @@ struct nfnl_callback {
int (*call_rcu)(struct sock *nl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[]);
+ int (*call_batch)(struct sock *nl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[]);
const struct nla_policy *policy; /* netlink attribute policy */
const u_int16_t attr_count; /* number of nlattr's */
};
@@ -23,22 +26,24 @@ struct nfnetlink_subsystem {
__u8 subsys_id; /* nfnetlink subsystem ID */
__u8 cb_count; /* number of callbacks */
const struct nfnl_callback *cb; /* callback for individual types */
+ int (*commit)(struct sk_buff *skb);
+ int (*abort)(struct sk_buff *skb);
};
-extern int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
-extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
+int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
+int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
-extern int nfnetlink_has_listeners(struct net *net, unsigned int group);
-extern struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size,
- u32 dst_portid, gfp_t gfp_mask);
-extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
- unsigned int group, int echo, gfp_t flags);
-extern int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
-extern int nfnetlink_unicast(struct sk_buff *skb, struct net *net,
- u32 portid, int flags);
+int nfnetlink_has_listeners(struct net *net, unsigned int group);
+struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size,
+ u32 dst_portid, gfp_t gfp_mask);
+int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
+ unsigned int group, int echo, gfp_t flags);
+int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
+int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
+ int flags);
-extern void nfnl_lock(__u8 subsys_id);
-extern void nfnl_unlock(__u8 subsys_id);
+void nfnl_lock(__u8 subsys_id);
+void nfnl_unlock(__u8 subsys_id);
#define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
index bb4bbc9b7a18..b2e85e59f760 100644
--- a/include/linux/netfilter/nfnetlink_acct.h
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -6,8 +6,8 @@
struct nf_acct;
-extern struct nf_acct *nfnl_acct_find_get(const char *filter_name);
-extern void nfnl_acct_put(struct nf_acct *acct);
-extern void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
+struct nf_acct *nfnl_acct_find_get(const char *filter_name);
+void nfnl_acct_put(struct nf_acct *acct);
+void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
#endif /* _NFNL_ACCT_H */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index dd49566315c6..a3e215bb0241 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -229,50 +229,48 @@ struct xt_table_info {
#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
+ nr_cpu_ids * sizeof(char *))
-extern int xt_register_target(struct xt_target *target);
-extern void xt_unregister_target(struct xt_target *target);
-extern int xt_register_targets(struct xt_target *target, unsigned int n);
-extern void xt_unregister_targets(struct xt_target *target, unsigned int n);
-
-extern int xt_register_match(struct xt_match *target);
-extern void xt_unregister_match(struct xt_match *target);
-extern int xt_register_matches(struct xt_match *match, unsigned int n);
-extern void xt_unregister_matches(struct xt_match *match, unsigned int n);
-
-extern int xt_check_match(struct xt_mtchk_param *,
- unsigned int size, u_int8_t proto, bool inv_proto);
-extern int xt_check_target(struct xt_tgchk_param *,
- unsigned int size, u_int8_t proto, bool inv_proto);
-
-extern struct xt_table *xt_register_table(struct net *net,
- const struct xt_table *table,
- struct xt_table_info *bootstrap,
- struct xt_table_info *newinfo);
-extern void *xt_unregister_table(struct xt_table *table);
-
-extern struct xt_table_info *xt_replace_table(struct xt_table *table,
- unsigned int num_counters,
- struct xt_table_info *newinfo,
- int *error);
-
-extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
-extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
-extern struct xt_match *xt_request_find_match(u8 af, const char *name,
- u8 revision);
-extern struct xt_target *xt_request_find_target(u8 af, const char *name,
- u8 revision);
-extern int xt_find_revision(u8 af, const char *name, u8 revision,
- int target, int *err);
-
-extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
- const char *name);
-extern void xt_table_unlock(struct xt_table *t);
-
-extern int xt_proto_init(struct net *net, u_int8_t af);
-extern void xt_proto_fini(struct net *net, u_int8_t af);
-
-extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
-extern void xt_free_table_info(struct xt_table_info *info);
+int xt_register_target(struct xt_target *target);
+void xt_unregister_target(struct xt_target *target);
+int xt_register_targets(struct xt_target *target, unsigned int n);
+void xt_unregister_targets(struct xt_target *target, unsigned int n);
+
+int xt_register_match(struct xt_match *target);
+void xt_unregister_match(struct xt_match *target);
+int xt_register_matches(struct xt_match *match, unsigned int n);
+void xt_unregister_matches(struct xt_match *match, unsigned int n);
+
+int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
+ bool inv_proto);
+int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
+ bool inv_proto);
+
+struct xt_table *xt_register_table(struct net *net,
+ const struct xt_table *table,
+ struct xt_table_info *bootstrap,
+ struct xt_table_info *newinfo);
+void *xt_unregister_table(struct xt_table *table);
+
+struct xt_table_info *xt_replace_table(struct xt_table *table,
+ unsigned int num_counters,
+ struct xt_table_info *newinfo,
+ int *error);
+
+struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
+struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
+struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision);
+struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
+int xt_find_revision(u8 af, const char *name, u8 revision, int target,
+ int *err);
+
+struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
+ const char *name);
+void xt_table_unlock(struct xt_table *t);
+
+int xt_proto_init(struct net *net, u_int8_t af);
+void xt_proto_fini(struct net *net, u_int8_t af);
+
+struct xt_table_info *xt_alloc_table_info(unsigned int size);
+void xt_free_table_info(struct xt_table_info *info);
/**
* xt_recseq - recursive seqcount for netfilter use
@@ -353,8 +351,8 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
return ret;
}
-extern struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
-extern void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
+struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
+void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
#ifdef CONFIG_COMPAT
#include <net/compat.h>
@@ -414,25 +412,25 @@ struct _compat_xt_align {
#define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
-extern void xt_compat_lock(u_int8_t af);
-extern void xt_compat_unlock(u_int8_t af);
-
-extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
-extern void xt_compat_flush_offsets(u_int8_t af);
-extern void xt_compat_init_offsets(u_int8_t af, unsigned int number);
-extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
-
-extern int xt_compat_match_offset(const struct xt_match *match);
-extern int xt_compat_match_from_user(struct xt_entry_match *m,
- void **dstptr, unsigned int *size);
-extern int xt_compat_match_to_user(const struct xt_entry_match *m,
- void __user **dstptr, unsigned int *size);
-
-extern int xt_compat_target_offset(const struct xt_target *target);
-extern void xt_compat_target_from_user(struct xt_entry_target *t,
- void **dstptr, unsigned int *size);
-extern int xt_compat_target_to_user(const struct xt_entry_target *t,
- void __user **dstptr, unsigned int *size);
+void xt_compat_lock(u_int8_t af);
+void xt_compat_unlock(u_int8_t af);
+
+int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
+void xt_compat_flush_offsets(u_int8_t af);
+void xt_compat_init_offsets(u_int8_t af, unsigned int number);
+int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
+
+int xt_compat_match_offset(const struct xt_match *match);
+int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+ unsigned int *size);
+int xt_compat_match_to_user(const struct xt_entry_match *m,
+ void __user **dstptr, unsigned int *size);
+
+int xt_compat_target_offset(const struct xt_target *target);
+void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
+ unsigned int *size);
+int xt_compat_target_to_user(const struct xt_entry_target *t,
+ void __user **dstptr, unsigned int *size);
#endif /* CONFIG_COMPAT */
#endif /* _X_TABLES_H */
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index dfb4d9e52bcb..8ab1c278b66d 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -25,7 +25,7 @@ enum nf_br_hook_priorities {
#define BRNF_PPPoE 0x20
/* Only used in br_forward.c */
-extern int nf_bridge_copy_header(struct sk_buff *skb);
+int nf_bridge_copy_header(struct sk_buff *skb);
static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
{
if (skb->nf_bridge &&
@@ -53,7 +53,7 @@ static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
return 0;
}
-extern int br_handle_frame_finish(struct sk_buff *skb);
+int br_handle_frame_finish(struct sk_buff *skb);
/* Only used in br_device.c */
static inline int br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
{
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index dfaf116b3e81..6e4591bb54d4 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -6,7 +6,7 @@
#include <uapi/linux/netfilter_ipv4.h>
-extern int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
-extern __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, u_int8_t protocol);
+int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
+__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol);
#endif /*__LINUX_IP_NETFILTER_H*/
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 2d4df6ce043e..64dad1cc1a4b 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -11,12 +11,12 @@
#ifdef CONFIG_NETFILTER
-extern int ip6_route_me_harder(struct sk_buff *skb);
-extern __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, u_int8_t protocol);
+int ip6_route_me_harder(struct sk_buff *skb);
+__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol);
-extern int ipv6_netfilter_init(void);
-extern void ipv6_netfilter_fini(void);
+int ipv6_netfilter_init(void);
+void ipv6_netfilter_fini(void);
/*
* Hook functions for ipv6 to allow xt_* modules to be built-in even
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index f3c7c24bec1c..fbfdb9d8d3a7 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -24,7 +24,8 @@ struct netpoll {
struct net_device *dev;
char dev_name[IFNAMSIZ];
const char *name;
- void (*rx_hook)(struct netpoll *, int, char *, int);
+ void (*rx_skb_hook)(struct netpoll *np, int source, struct sk_buff *skb,
+ int offset, int len);
union inet_addr local_ip, remote_ip;
bool ipv6;
@@ -41,7 +42,7 @@ struct netpoll_info {
unsigned long rx_flags;
spinlock_t rx_lock;
struct semaphore dev_lock;
- struct list_head rx_np; /* netpolls that registered an rx_hook */
+ struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
struct sk_buff_head txq;
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index e36dee52f224..95777690039d 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -118,6 +118,9 @@ Needs to be updated if more operations are defined in future.*/
#define FIRST_NFS4_OP OP_ACCESS
#define LAST_NFS4_OP OP_RECLAIM_COMPLETE
+#define LAST_NFS40_OP OP_RELEASE_LOCKOWNER
+#define LAST_NFS41_OP OP_RECLAIM_COMPLETE
+#define LAST_NFS42_OP OP_RECLAIM_COMPLETE
enum nfsstat4 {
NFS4_OK = 0,
@@ -460,6 +463,7 @@ enum {
NFSPROC4_CLNT_FS_LOCATIONS,
NFSPROC4_CLNT_RELEASE_LOCKOWNER,
NFSPROC4_CLNT_SECINFO,
+ NFSPROC4_CLNT_FSID_PRESENT,
/* nfs41 */
NFSPROC4_CLNT_EXCHANGE_ID,
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 3ea4cde8701c..96235b53a3fd 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -269,9 +269,13 @@ static inline int NFS_STALE(const struct inode *inode)
return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
}
-static inline int NFS_FSCACHE(const struct inode *inode)
+static inline struct fscache_cookie *nfs_i_fscache(struct inode *inode)
{
- return test_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
+#ifdef CONFIG_NFS_FSCACHE
+ return NFS_I(inode)->fscache;
+#else
+ return NULL;
+#endif
}
static inline __u64 NFS_FILEID(const struct inode *inode)
@@ -457,14 +461,11 @@ extern int nfs3_removexattr (struct dentry *, const char *name);
/*
* linux/fs/nfs/direct.c
*/
-extern ssize_t nfs_direct_IO(int, struct kiocb *, const struct iovec *, loff_t,
- unsigned long);
-extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
- const struct iovec *iov, unsigned long nr_segs,
- loff_t pos, bool uio);
-extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
- const struct iovec *iov, unsigned long nr_segs,
- loff_t pos, bool uio);
+extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t);
+extern ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t pos);
+extern ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t pos);
/*
* linux/fs/nfs/dir.c
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index b8cedced50c9..1150ea41b626 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -41,6 +41,7 @@ struct nfs_client {
#define NFS_CS_DISCRTRY 1 /* - disconnect on RPC retry */
#define NFS_CS_MIGRATION 2 /* - transparent state migr */
#define NFS_CS_INFINITE_SLOTS 3 /* - don't limit TCP slots */
+#define NFS_CS_NO_RETRANS_TIMEOUT 4 /* - Disable retransmit timeouts */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
@@ -78,6 +79,7 @@ struct nfs_client {
char cl_ipaddr[48];
u32 cl_cb_ident; /* v4.0 callback identifier */
const struct nfs4_minor_version_ops *cl_mvops;
+ unsigned long cl_mig_gen;
/* NFSv4.0 transport blocking */
struct nfs4_slot_table *cl_slot_tbl;
@@ -147,7 +149,9 @@ struct nfs_server {
__u64 maxfilesize; /* maximum file size */
struct timespec time_delta; /* smallest time granularity */
unsigned long mount_time; /* when this fs was mounted */
+ struct super_block *super; /* VFS super block */
dev_t s_dev; /* superblock dev numbers */
+ struct nfs_auth_info auth_info; /* parsed auth flavors */
#ifdef CONFIG_NFS_FSCACHE
struct nfs_fscache_key *fscache_key; /* unique key for superblock */
@@ -187,6 +191,12 @@ struct nfs_server {
struct list_head state_owners_lru;
struct list_head layouts;
struct list_head delegations;
+
+ unsigned long mig_gen;
+ unsigned long mig_status;
+#define NFS_MIG_IN_TRANSITION (1)
+#define NFS_MIG_FAILED (2)
+
void (*destroy)(struct nfs_server *);
atomic_t active; /* Keep trace of any activity to this server */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 49f52c8f4422..3ccfcecf8999 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -591,6 +591,13 @@ struct nfs_renameres {
struct nfs_fattr *new_fattr;
};
+/* parsed sec= options */
+#define NFS_AUTH_INFO_MAX_FLAVORS 12 /* see fs/nfs/super.c */
+struct nfs_auth_info {
+ unsigned int flavor_len;
+ rpc_authflavor_t flavors[NFS_AUTH_INFO_MAX_FLAVORS];
+};
+
/*
* Argument struct for decode_entry function
*/
@@ -1053,14 +1060,18 @@ struct nfs4_fs_locations {
struct nfs4_fs_locations_arg {
struct nfs4_sequence_args seq_args;
const struct nfs_fh *dir_fh;
+ const struct nfs_fh *fh;
const struct qstr *name;
struct page *page;
const u32 *bitmask;
+ clientid4 clientid;
+ unsigned char migration:1, renew:1;
};
struct nfs4_fs_locations_res {
struct nfs4_sequence_res seq_res;
struct nfs4_fs_locations *fs_locations;
+ unsigned char migration:1, renew:1;
};
struct nfs4_secinfo4 {
@@ -1084,6 +1095,19 @@ struct nfs4_secinfo_res {
struct nfs4_secinfo_flavors *flavors;
};
+struct nfs4_fsid_present_arg {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh *fh;
+ clientid4 clientid;
+ unsigned char renew:1;
+};
+
+struct nfs4_fsid_present_res {
+ struct nfs4_sequence_res seq_res;
+ struct nfs_fh *fh;
+ unsigned char renew:1;
+};
+
#endif /* CONFIG_NFS_V4 */
struct nfstime4 {
diff --git a/include/linux/of.h b/include/linux/of.h
index f95aee391e30..276c546980d8 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -136,7 +136,9 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
return of_read_number(cell, size);
}
+#if defined(CONFIG_SPARC)
#include <asm/prom.h>
+#endif
/* Default #address and #size cells. Allow arch asm/prom.h to override */
#if !defined(OF_ROOT_NODE_ADDR_CELLS_DEFAULT)
@@ -226,6 +228,19 @@ static inline int of_get_child_count(const struct device_node *np)
return num;
}
+static inline int of_get_available_child_count(const struct device_node *np)
+{
+ struct device_node *child;
+ int num = 0;
+
+ for_each_available_child_of_node(np, child)
+ num++;
+
+ return num;
+}
+
+/* cache lookup */
+extern struct device_node *of_find_next_cache_node(const struct device_node *);
extern struct device_node *of_find_node_with_property(
struct device_node *from, const char *prop_name);
#define for_each_node_with_property(dn, prop_name) \
@@ -275,6 +290,7 @@ extern int of_n_size_cells(struct device_node *np);
extern const struct of_device_id *of_match_node(
const struct of_device_id *matches, const struct device_node *node);
extern int of_modalias_node(struct device_node *node, char *modalias, int len);
+extern void of_print_phandle_args(const char *msg, const struct of_phandle_args *args);
extern struct device_node *of_parse_phandle(const struct device_node *np,
const char *phandle_name,
int index);
@@ -364,6 +380,9 @@ static inline bool of_have_populated_dt(void)
#define for_each_child_of_node(parent, child) \
while (0)
+#define for_each_available_child_of_node(parent, child) \
+ while (0)
+
static inline struct device_node *of_get_child_by_name(
const struct device_node *node,
const char *name)
@@ -376,6 +395,11 @@ static inline int of_get_child_count(const struct device_node *np)
return 0;
}
+static inline int of_get_available_child_count(const struct device_node *np)
+{
+ return 0;
+}
+
static inline int of_device_is_compatible(const struct device_node *device,
const char *name)
{
@@ -534,13 +558,10 @@ static inline const char *of_prop_next_string(struct property *prop,
#define of_match_node(_matches, _node) NULL
#endif /* CONFIG_OF */
-#ifndef of_node_to_nid
-static inline int of_node_to_nid(struct device_node *np)
-{
- return numa_node_id();
-}
-
-#define of_node_to_nid of_node_to_nid
+#if defined(CONFIG_OF) && defined(CONFIG_NUMA)
+extern int of_node_to_nid(struct device_node *np);
+#else
+static inline int of_node_to_nid(struct device_node *device) { return 0; }
#endif
/**
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 4c2e6f26432c..5f6ed6b182b8 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -34,6 +34,10 @@ static inline void of_pci_range_to_resource(struct of_pci_range *range,
res->name = np->full_name;
}
+/* Translate a DMA address from device space to CPU space */
+extern u64 of_translate_dma_address(struct device_node *dev,
+ const __be32 *in_addr);
+
#ifdef CONFIG_OF_ADDRESS
extern u64 of_translate_address(struct device_node *np, const __be32 *addr);
extern bool of_can_translate_address(struct device_node *dev);
@@ -52,10 +56,7 @@ extern void __iomem *of_iomap(struct device_node *device, int index);
extern const __be32 *of_get_address(struct device_node *dev, int index,
u64 *size, unsigned int *flags);
-#ifndef pci_address_to_pio
-static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
-#define pci_address_to_pio pci_address_to_pio
-#endif
+extern unsigned long pci_address_to_pio(phys_addr_t addr);
extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node);
@@ -63,13 +64,6 @@ extern struct of_pci_range *of_pci_range_parser_one(
struct of_pci_range_parser *parser,
struct of_pci_range *range);
#else /* CONFIG_OF_ADDRESS */
-#ifndef of_address_to_resource
-static inline int of_address_to_resource(struct device_node *dev, int index,
- struct resource *r)
-{
- return -EINVAL;
-}
-#endif
static inline struct device_node *of_find_matching_node_by_address(
struct device_node *from,
const struct of_device_id *matches,
@@ -77,12 +71,7 @@ static inline struct device_node *of_find_matching_node_by_address(
{
return NULL;
}
-#ifndef of_iomap
-static inline void __iomem *of_iomap(struct device_node *device, int index)
-{
- return NULL;
-}
-#endif
+
static inline const __be32 *of_get_address(struct device_node *dev, int index,
u64 *size, unsigned int *flags)
{
@@ -103,6 +92,22 @@ static inline struct of_pci_range *of_pci_range_parser_one(
}
#endif /* CONFIG_OF_ADDRESS */
+#ifdef CONFIG_OF
+extern int of_address_to_resource(struct device_node *dev, int index,
+ struct resource *r);
+void __iomem *of_iomap(struct device_node *node, int index);
+#else
+static inline int of_address_to_resource(struct device_node *dev, int index,
+ struct resource *r)
+{
+ return -EINVAL;
+}
+
+static inline void __iomem *of_iomap(struct device_node *device, int index)
+{
+ return NULL;
+}
+#endif
#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI)
extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index a478c62a2aab..0beaee9dac1f 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -96,31 +96,30 @@ extern int of_scan_flat_dt_by_path(const char *path,
extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
int depth, void *data);
-extern void early_init_dt_check_for_initrd(unsigned long node);
extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
int depth, void *data);
extern void early_init_dt_add_memory_arch(u64 base, u64 size);
extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align);
extern u64 dt_mem_next_cell(int s, __be32 **cellp);
-/*
- * If BLK_DEV_INITRD, the fdt early init code will call this function,
- * to be provided by the arch code. start and end are specified as
- * physical addresses.
- */
-#ifdef CONFIG_BLK_DEV_INITRD
-extern void early_init_dt_setup_initrd_arch(u64 start, u64 end);
-#endif
-
/* Early flat tree scan hooks */
extern int early_init_dt_scan_root(unsigned long node, const char *uname,
int depth, void *data);
+extern bool early_init_dt_scan(void *params);
+
+extern const char *of_flat_dt_get_machine_name(void);
+extern const void *of_flat_dt_match_machine(const void *default_match,
+ const void * (*get_next_compat)(const char * const**));
+
/* Other Prototypes */
extern void unflatten_device_tree(void);
+extern void unflatten_and_copy_device_tree(void);
extern void early_init_devtree(void *);
#else /* CONFIG_OF_FLATTREE */
+static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
static inline void unflatten_device_tree(void) {}
+static inline void unflatten_and_copy_device_tree(void) {}
#endif /* CONFIG_OF_FLATTREE */
#endif /* __ASSEMBLY__ */
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index fcd63baee5f2..c0d6dfe80895 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -8,22 +8,6 @@
#include <linux/ioport.h>
#include <linux/of.h>
-/**
- * of_irq - container for device_node/irq_specifier pair for an irq controller
- * @controller: pointer to interrupt controller device tree node
- * @size: size of interrupt specifier
- * @specifier: array of cells @size long specifing the specific interrupt
- *
- * This structure is returned when an interrupt is mapped. The controller
- * field needs to be put() after use
- */
-#define OF_MAX_IRQ_SPEC 4 /* We handle specifiers of at most 4 cells */
-struct of_irq {
- struct device_node *controller; /* Interrupt controller node */
- u32 size; /* Specifier size */
- u32 specifier[OF_MAX_IRQ_SPEC]; /* Specifier copy */
-};
-
typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
/*
@@ -35,27 +19,22 @@ typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
extern unsigned int of_irq_workarounds;
extern struct device_node *of_irq_dflt_pic;
-extern int of_irq_map_oldworld(struct device_node *device, int index,
- struct of_irq *out_irq);
+extern int of_irq_parse_oldworld(struct device_node *device, int index,
+ struct of_phandle_args *out_irq);
#else /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
#define of_irq_workarounds (0)
#define of_irq_dflt_pic (NULL)
-static inline int of_irq_map_oldworld(struct device_node *device, int index,
- struct of_irq *out_irq)
+static inline int of_irq_parse_oldworld(struct device_node *device, int index,
+ struct of_phandle_args *out_irq)
{
return -EINVAL;
}
#endif /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
-
-extern int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
- u32 ointsize, const __be32 *addr,
- struct of_irq *out_irq);
-extern int of_irq_map_one(struct device_node *device, int index,
- struct of_irq *out_irq);
-extern unsigned int irq_create_of_mapping(struct device_node *controller,
- const u32 *intspec,
- unsigned int intsize);
+extern int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq);
+extern int of_irq_parse_one(struct device_node *device, int index,
+ struct of_phandle_args *out_irq);
+extern unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data);
extern int of_irq_to_resource(struct device_node *dev, int index,
struct resource *r);
extern int of_irq_count(struct device_node *dev);
diff --git a/include/linux/of_mtd.h b/include/linux/of_mtd.h
index ed7f267e6389..6f10e938ff7e 100644
--- a/include/linux/of_mtd.h
+++ b/include/linux/of_mtd.h
@@ -10,10 +10,29 @@
#define __LINUX_OF_NET_H
#ifdef CONFIG_OF_MTD
+
#include <linux/of.h>
int of_get_nand_ecc_mode(struct device_node *np);
int of_get_nand_bus_width(struct device_node *np);
bool of_get_nand_on_flash_bbt(struct device_node *np);
-#endif
+
+#else /* CONFIG_OF_MTD */
+
+static inline int of_get_nand_ecc_mode(struct device_node *np)
+{
+ return -ENOSYS;
+}
+
+static inline int of_get_nand_bus_width(struct device_node *np)
+{
+ return -ENOSYS;
+}
+
+static inline bool of_get_nand_on_flash_bbt(struct device_node *np)
+{
+ return false;
+}
+
+#endif /* CONFIG_OF_MTD */
#endif /* __LINUX_OF_MTD_H */
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index fd9c408631a0..1a1f5ffd5288 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -5,8 +5,9 @@
#include <linux/msi.h>
struct pci_dev;
-struct of_irq;
-int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq);
+struct of_phandle_args;
+int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq);
+int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
struct device_node;
struct device_node *of_pci_find_child_device(struct device_node *parent,
diff --git a/include/linux/opp.h b/include/linux/opp.h
deleted file mode 100644
index 3aca2b8def33..000000000000
--- a/include/linux/opp.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Generic OPP Interface
- *
- * Copyright (C) 2009-2010 Texas Instruments Incorporated.
- * Nishanth Menon
- * Romit Dasgupta
- * Kevin Hilman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __LINUX_OPP_H__
-#define __LINUX_OPP_H__
-
-#include <linux/err.h>
-#include <linux/cpufreq.h>
-#include <linux/notifier.h>
-
-struct opp;
-struct device;
-
-enum opp_event {
- OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
-};
-
-#if defined(CONFIG_PM_OPP)
-
-unsigned long opp_get_voltage(struct opp *opp);
-
-unsigned long opp_get_freq(struct opp *opp);
-
-int opp_get_opp_count(struct device *dev);
-
-struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
- bool available);
-
-struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq);
-
-struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq);
-
-int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt);
-
-int opp_enable(struct device *dev, unsigned long freq);
-
-int opp_disable(struct device *dev, unsigned long freq);
-
-struct srcu_notifier_head *opp_get_notifier(struct device *dev);
-#else
-static inline unsigned long opp_get_voltage(struct opp *opp)
-{
- return 0;
-}
-
-static inline unsigned long opp_get_freq(struct opp *opp)
-{
- return 0;
-}
-
-static inline int opp_get_opp_count(struct device *dev)
-{
- return 0;
-}
-
-static inline struct opp *opp_find_freq_exact(struct device *dev,
- unsigned long freq, bool available)
-{
- return ERR_PTR(-EINVAL);
-}
-
-static inline struct opp *opp_find_freq_floor(struct device *dev,
- unsigned long *freq)
-{
- return ERR_PTR(-EINVAL);
-}
-
-static inline struct opp *opp_find_freq_ceil(struct device *dev,
- unsigned long *freq)
-{
- return ERR_PTR(-EINVAL);
-}
-
-static inline int opp_add(struct device *dev, unsigned long freq,
- unsigned long u_volt)
-{
- return -EINVAL;
-}
-
-static inline int opp_enable(struct device *dev, unsigned long freq)
-{
- return 0;
-}
-
-static inline int opp_disable(struct device *dev, unsigned long freq)
-{
- return 0;
-}
-
-static inline struct srcu_notifier_head *opp_get_notifier(struct device *dev)
-{
- return ERR_PTR(-EINVAL);
-}
-#endif /* CONFIG_PM_OPP */
-
-#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
-int of_init_opp_table(struct device *dev);
-#else
-static inline int of_init_opp_table(struct device *dev)
-{
- return -EINVAL;
-}
-#endif
-
-#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
-int opp_init_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table);
-void opp_free_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table);
-#else
-static inline int opp_init_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table)
-{
- return -EINVAL;
-}
-
-static inline
-void opp_free_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table)
-{
-}
-#endif /* CONFIG_CPU_FREQ */
-
-#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 86292beebfe2..438694650471 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -129,10 +129,9 @@ struct parallel_data {
struct padata_serial_queue __percpu *squeue;
atomic_t reorder_objects;
atomic_t refcnt;
+ atomic_t seq_nr;
struct padata_cpumask cpumask;
spinlock_t lock ____cacheline_aligned;
- spinlock_t seq_lock;
- unsigned int seq_nr;
unsigned int processed;
struct timer_list timer;
};
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
index 93506a114034..da523661500a 100644
--- a/include/linux/page-flags-layout.h
+++ b/include/linux/page-flags-layout.h
@@ -38,10 +38,10 @@
* The last is when there is insufficient space in page->flags and a separate
* lookup is necessary.
*
- * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
- * " plus space for last_nid: | NODE | ZONE | LAST_NID ... | FLAGS |
- * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
- * " plus space for last_nid: | SECTION | NODE | ZONE | LAST_NID ... | FLAGS |
+ * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
+ * " plus space for last_cpupid: | NODE | ZONE | LAST_CPUPID ... | FLAGS |
+ * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
+ * " plus space for last_cpupid: | SECTION | NODE | ZONE | LAST_CPUPID ... | FLAGS |
* classic sparse no space for node: | SECTION | ZONE | ... | FLAGS |
*/
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
@@ -62,15 +62,21 @@
#endif
#ifdef CONFIG_NUMA_BALANCING
-#define LAST_NID_SHIFT NODES_SHIFT
+#define LAST__PID_SHIFT 8
+#define LAST__PID_MASK ((1 << LAST__PID_SHIFT)-1)
+
+#define LAST__CPU_SHIFT NR_CPUS_BITS
+#define LAST__CPU_MASK ((1 << LAST__CPU_SHIFT)-1)
+
+#define LAST_CPUPID_SHIFT (LAST__PID_SHIFT+LAST__CPU_SHIFT)
#else
-#define LAST_NID_SHIFT 0
+#define LAST_CPUPID_SHIFT 0
#endif
-#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_NID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
-#define LAST_NID_WIDTH LAST_NID_SHIFT
+#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
+#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
#else
-#define LAST_NID_WIDTH 0
+#define LAST_CPUPID_WIDTH 0
#endif
/*
@@ -81,8 +87,8 @@
#define NODE_NOT_IN_PAGE_FLAGS
#endif
-#if defined(CONFIG_NUMA_BALANCING) && LAST_NID_WIDTH == 0
-#define LAST_NID_NOT_IN_PAGE_FLAGS
+#if defined(CONFIG_NUMA_BALANCING) && LAST_CPUPID_WIDTH == 0
+#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
#endif
#endif /* _LINUX_PAGE_FLAGS_LAYOUT */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 6d53675c2b54..98ada58f9942 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -329,7 +329,9 @@ static inline void set_page_writeback(struct page *page)
* System with lots of page flags available. This allows separate
* flags for PageHead() and PageTail() checks of compound pages so that bit
* tests can be used in performance sensitive paths. PageCompound is
- * generally not used in hot code paths.
+ * generally not used in hot code paths except arch/powerpc/mm/init_64.c
+ * and arch/powerpc/kvm/book3s_64_vio_hv.c which use it to detect huge pages
+ * and avoid handling those in real mode.
*/
__PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head)
__PAGEFLAG(Tail, tail)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index da172f956ad6..d3a888ae4b2e 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -330,8 +330,6 @@ struct pci_dev {
unsigned int msix_enabled:1;
unsigned int ari_enabled:1; /* ARI forwarding */
unsigned int is_managed:1;
- unsigned int is_pcie:1; /* Obsolete. Will be removed.
- Use pci_is_pcie() instead */
unsigned int needs_freset:1; /* Dev requires fundamental reset */
unsigned int state_saved:1;
unsigned int is_physfn:1;
@@ -472,6 +470,10 @@ struct pci_bus {
/*
* Returns true if the pci bus is root (behind host-pci bridge),
* false otherwise
+ *
+ * Some code assumes that "bus->self == NULL" means that bus is a root bus.
+ * This is incorrect because "virtual" buses added for SR-IOV (via
+ * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
*/
static inline bool pci_is_root_bus(struct pci_bus *pbus)
{
@@ -1749,11 +1751,11 @@ static inline int pci_pcie_cap(struct pci_dev *dev)
* pci_is_pcie - check if the PCI device is PCI Express capable
* @dev: PCI device
*
- * Retrun true if the PCI device is PCI Express capable, false otherwise.
+ * Returns: true if the PCI device is PCI Express capable, false otherwise.
*/
static inline bool pci_is_pcie(struct pci_dev *dev)
{
- return !!pci_pcie_cap(dev);
+ return pci_pcie_cap(dev);
}
/**
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index cc88172c7d9a..9e4761caa80c 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -332,7 +332,7 @@ do { \
#endif
#ifndef this_cpu_sub
-# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val))
+# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(typeof(pcp))(val))
#endif
#ifndef this_cpu_inc
@@ -375,22 +375,6 @@ do { \
# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
#endif
-#ifndef this_cpu_xor
-# ifndef this_cpu_xor_1
-# define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# ifndef this_cpu_xor_2
-# define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# ifndef this_cpu_xor_4
-# define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# ifndef this_cpu_xor_8
-# define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
-#endif
-
#define _this_cpu_generic_add_return(pcp, val) \
({ \
typeof(pcp) ret__; \
@@ -418,7 +402,7 @@ do { \
# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
#endif
-#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
+#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val))
#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
@@ -586,7 +570,7 @@ do { \
#endif
#ifndef __this_cpu_sub
-# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val))
+# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val))
#endif
#ifndef __this_cpu_inc
@@ -629,22 +613,6 @@ do { \
# define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val))
#endif
-#ifndef __this_cpu_xor
-# ifndef __this_cpu_xor_1
-# define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# ifndef __this_cpu_xor_2
-# define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# ifndef __this_cpu_xor_4
-# define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# ifndef __this_cpu_xor_8
-# define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
-#endif
-
#define __this_cpu_generic_add_return(pcp, val) \
({ \
__this_cpu_add(pcp, val); \
@@ -668,7 +636,7 @@ do { \
__pcpu_size_call_return2(__this_cpu_add_return_, pcp, val)
#endif
-#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(val))
+#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val))
#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1)
#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h
index 0b23edbee309..1900bd0fa639 100644
--- a/include/linux/percpu_ida.h
+++ b/include/linux/percpu_ida.h
@@ -16,6 +16,8 @@ struct percpu_ida {
* percpu_ida_init()
*/
unsigned nr_tags;
+ unsigned percpu_max_size;
+ unsigned percpu_batch_size;
struct percpu_ida_cpu __percpu *tag_cpu;
@@ -51,10 +53,29 @@ struct percpu_ida {
} ____cacheline_aligned_in_smp;
};
+/*
+ * Number of tags we move between the percpu freelist and the global freelist at
+ * a time
+ */
+#define IDA_DEFAULT_PCPU_BATCH_MOVE 32U
+/* Max size of percpu freelist, */
+#define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2)
+
int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp);
void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
void percpu_ida_destroy(struct percpu_ida *pool);
-int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags);
+int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
+ unsigned long max_size, unsigned long batch_size);
+static inline int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
+{
+ return __percpu_ida_init(pool, nr_tags, IDA_DEFAULT_PCPU_SIZE,
+ IDA_DEFAULT_PCPU_BATCH_MOVE);
+}
+
+typedef int (*percpu_ida_cb)(unsigned, void *);
+int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
+ void *data);
+unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu);
#endif /* __PERCPU_IDA_H__ */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index c8ba627c1d60..2e069d1288df 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -584,6 +584,10 @@ struct perf_sample_data {
struct perf_regs_user regs_user;
u64 stack_user_size;
u64 weight;
+ /*
+ * Transaction flags for abort events:
+ */
+ u64 txn;
};
static inline void perf_sample_data_init(struct perf_sample_data *data,
@@ -599,6 +603,7 @@ static inline void perf_sample_data_init(struct perf_sample_data *data,
data->stack_user_size = 0;
data->weight = 0;
data->data_src.val = 0;
+ data->txn = 0;
}
extern void perf_output_sample(struct perf_output_handle *handle,
diff --git a/include/linux/i2c/at24.h b/include/linux/platform_data/at24.h
index 285025a9cdc9..c42aa89d34ee 100644
--- a/include/linux/i2c/at24.h
+++ b/include/linux/platform_data/at24.h
@@ -28,7 +28,7 @@
*
* void get_mac_addr(struct memory_accessor *mem_acc, void *context)
* {
- * u8 *mac_addr = ethernet_pdata->mac_addr;
+ * u8 *mac_addr = ethernet_pdata->mac_addr;
* off_t offset = context;
*
* // Read MAC addr from EEPROM
diff --git a/include/linux/platform_data/clk-nomadik.h b/include/linux/platform_data/clk-nomadik.h
deleted file mode 100644
index 5713c87b2477..000000000000
--- a/include/linux/platform_data/clk-nomadik.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* Minimal platform data header */
-void nomadik_clk_init(void);
diff --git a/include/linux/platform_data/clk-ux500.h b/include/linux/platform_data/clk-ux500.h
index 9d98f3aaa16c..97baf831e071 100644
--- a/include/linux/platform_data/clk-ux500.h
+++ b/include/linux/platform_data/clk-ux500.h
@@ -10,6 +10,9 @@
#ifndef __CLK_UX500_H
#define __CLK_UX500_H
+void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
+ u32 clkrst5_base, u32 clkrst6_base);
+
void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
u32 clkrst5_base, u32 clkrst6_base);
void u9540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h
index 8db5ae03b6e3..689a856b86f9 100644
--- a/include/linux/platform_data/davinci_asp.h
+++ b/include/linux/platform_data/davinci_asp.h
@@ -84,6 +84,8 @@ struct snd_platform_data {
u8 version;
u8 txnumevt;
u8 rxnumevt;
+ int tx_dma_channel;
+ int rx_dma_channel;
};
enum {
diff --git a/include/linux/platform_data/dma-s3c24xx.h b/include/linux/platform_data/dma-s3c24xx.h
new file mode 100644
index 000000000000..89ba1b0c90e4
--- /dev/null
+++ b/include/linux/platform_data/dma-s3c24xx.h
@@ -0,0 +1,46 @@
+/*
+ * S3C24XX DMA handling
+ *
+ * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/* Helper to encode the source selection constraints for early s3c socs. */
+#define S3C24XX_DMA_CHANREQ(src, chan) ((BIT(3) | src) << chan * 4)
+
+enum s3c24xx_dma_bus {
+ S3C24XX_DMA_APB,
+ S3C24XX_DMA_AHB,
+};
+
+/**
+ * @bus: on which bus does the peripheral reside - AHB or APB.
+ * @handshake: is a handshake with the peripheral necessary
+ * @chansel: channel selection information, depending on variant; reqsel for
+ * s3c2443 and later and channel-selection map for earlier SoCs
+ * see CHANSEL doc in s3c2443-dma.c
+ */
+struct s3c24xx_dma_channel {
+ enum s3c24xx_dma_bus bus;
+ bool handshake;
+ u16 chansel;
+};
+
+/**
+ * struct s3c24xx_dma_platdata - platform specific settings
+ * @num_phy_channels: number of physical channels
+ * @channels: array of virtual channel descriptions
+ * @num_channels: number of virtual channels
+ */
+struct s3c24xx_dma_platdata {
+ int num_phy_channels;
+ struct s3c24xx_dma_channel *channels;
+ int num_channels;
+};
+
+struct dma_chan;
+bool s3c24xx_dma_filter(struct dma_chan *chan, void *param);
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h
index 179fb91bb5f2..f50821cb64be 100644
--- a/include/linux/platform_data/edma.h
+++ b/include/linux/platform_data/edma.h
@@ -67,10 +67,10 @@ struct edmacc_param {
#define ITCCHEN BIT(23)
/*ch_status paramater of callback function possible values*/
-#define DMA_COMPLETE 1
-#define DMA_CC_ERROR 2
-#define DMA_TC1_ERROR 3
-#define DMA_TC2_ERROR 4
+#define EDMA_DMA_COMPLETE 1
+#define EDMA_DMA_CC_ERROR 2
+#define EDMA_DMA_TC1_ERROR 3
+#define EDMA_DMA_TC2_ERROR 4
enum address_mode {
INCR = 0,
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
new file mode 100644
index 000000000000..6efd20264585
--- /dev/null
+++ b/include/linux/platform_data/gpio-davinci.h
@@ -0,0 +1,60 @@
+/*
+ * DaVinci GPIO Platform Related Defines
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DAVINCI_GPIO_PLATFORM_H
+#define __DAVINCI_GPIO_PLATFORM_H
+
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+#include <asm-generic/gpio.h>
+
+enum davinci_gpio_type {
+ GPIO_TYPE_TNETV107X = 0,
+};
+
+struct davinci_gpio_platform_data {
+ u32 ngpio;
+ u32 gpio_unbanked;
+ u32 intc_irq_num;
+};
+
+
+struct davinci_gpio_controller {
+ struct gpio_chip chip;
+ int irq_base;
+ /* Serialize access to GPIO registers */
+ spinlock_t lock;
+ void __iomem *regs;
+ void __iomem *set_data;
+ void __iomem *clr_data;
+ void __iomem *in_data;
+ int gpio_unbanked;
+ unsigned gpio_irq;
+};
+
+/*
+ * basic gpio routines
+ */
+#define GPIO(X) (X) /* 0 <= X <= (DAVINCI_N_GPIO - 1) */
+
+/* Convert GPIO signal to GPIO pin number */
+#define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio))
+
+static inline u32 __gpio_mask(unsigned gpio)
+{
+ return 1 << (gpio % 32);
+}
+#endif
diff --git a/include/linux/platform_data/leds-lp55xx.h b/include/linux/platform_data/leds-lp55xx.h
index 51a2ff579d60..624ff9edad6f 100644
--- a/include/linux/platform_data/leds-lp55xx.h
+++ b/include/linux/platform_data/leds-lp55xx.h
@@ -22,6 +22,7 @@
struct lp55xx_led_config {
const char *name;
+ const char *default_trigger;
u8 chan_nr;
u8 led_current; /* mA x10, 0 if led is not connected */
u8 max_current;
@@ -66,10 +67,8 @@ struct lp55xx_platform_data {
/* Clock configuration */
u8 clock_mode;
- /* Platform specific functions */
- int (*setup_resources)(void);
- void (*release_resources)(void);
- void (*enable)(bool state);
+ /* optional enable GPIO */
+ int enable_gpio;
/* Predefined pattern data */
struct lp55xx_predef_pattern *patterns;
diff --git a/include/linux/platform_data/leds-pca9685.h b/include/linux/platform_data/leds-pca9685.h
new file mode 100644
index 000000000000..778e9e4249cc
--- /dev/null
+++ b/include/linux/platform_data/leds-pca9685.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2013 Maximilian Güntner <maximilian.guentner@gmail.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Based on leds-pca963x.h by Peter Meerwald <p.meerwald@bct-electronic.com>
+ *
+ * LED driver for the NXP PCA9685 PWM chip
+ *
+ */
+
+#ifndef __LINUX_PCA9685_H
+#define __LINUX_PCA9685_H
+
+#include <linux/leds.h>
+
+enum pca9685_outdrv {
+ PCA9685_OPEN_DRAIN,
+ PCA9685_TOTEM_POLE,
+};
+
+enum pca9685_inverted {
+ PCA9685_NOT_INVERTED,
+ PCA9685_INVERTED,
+};
+
+struct pca9685_platform_data {
+ struct led_platform_data leds;
+ enum pca9685_outdrv outdrv;
+ enum pca9685_inverted inverted;
+};
+
+#endif /* __LINUX_PCA9685_H */
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
index d44912d81578..75f70f6ac137 100644
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ b/include/linux/platform_data/mmc-esdhc-imx.h
@@ -10,6 +10,8 @@
#ifndef __ASM_ARCH_IMX_ESDHC_H
#define __ASM_ARCH_IMX_ESDHC_H
+#include <linux/types.h>
+
enum wp_types {
ESDHC_WP_NONE, /* no WP, neither controller nor gpio */
ESDHC_WP_CONTROLLER, /* mmc controller internal WP */
@@ -32,6 +34,7 @@ enum cd_types {
* @cd_gpio: gpio for card_detect interrupt
* @wp_type: type of write_protect method (see wp_types enum above)
* @cd_type: type of card_detect method (see cd_types enum above)
+ * @support_vsel: indicate it supports 1.8v switching
*/
struct esdhc_platform_data {
@@ -41,5 +44,7 @@ struct esdhc_platform_data {
enum cd_types cd_type;
int max_bus_width;
unsigned int f_max;
+ bool support_vsel;
+ unsigned int delay_line;
};
#endif /* __ASM_ARCH_IMX_ESDHC_H */
diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h
index 6bf9ef43ddb1..4da5bfa2147f 100644
--- a/include/linux/platform_data/mtd-nand-omap2.h
+++ b/include/linux/platform_data/mtd-nand-omap2.h
@@ -23,13 +23,16 @@ enum nand_io {
};
enum omap_ecc {
- /* 1-bit ecc: stored at end of spare area */
- OMAP_ECC_HAMMING_CODE_DEFAULT = 0, /* Default, s/w method */
- OMAP_ECC_HAMMING_CODE_HW, /* gpmc to detect the error */
- /* 1-bit ecc: stored at beginning of spare area as romcode */
- OMAP_ECC_HAMMING_CODE_HW_ROMCODE, /* gpmc method & romcode layout */
- OMAP_ECC_BCH4_CODE_HW, /* 4-bit BCH ecc code */
- OMAP_ECC_BCH8_CODE_HW, /* 8-bit BCH ecc code */
+ /* 1-bit ECC calculation by GPMC, Error detection by Software */
+ OMAP_ECC_HAM1_CODE_HW = 0,
+ /* 4-bit ECC calculation by GPMC, Error detection by Software */
+ OMAP_ECC_BCH4_CODE_HW_DETECTION_SW,
+ /* 4-bit ECC calculation by GPMC, Error detection by ELM */
+ OMAP_ECC_BCH4_CODE_HW,
+ /* 8-bit ECC calculation by GPMC, Error detection by Software */
+ OMAP_ECC_BCH8_CODE_HW_DETECTION_SW,
+ /* 8-bit ECC calculation by GPMC, Error detection by ELM */
+ OMAP_ECC_BCH8_CODE_HW,
};
struct gpmc_nand_regs {
@@ -63,5 +66,6 @@ struct omap_nand_platform_data {
/* for passing the partitions */
struct device_node *of_node;
+ struct device_node *elm_of_node;
};
#endif
diff --git a/include/linux/platform_data/pinctrl-single.h b/include/linux/platform_data/pinctrl-single.h
new file mode 100644
index 000000000000..72eacda9b360
--- /dev/null
+++ b/include/linux/platform_data/pinctrl-single.h
@@ -0,0 +1,12 @@
+/**
+ * irq: optional wake-up interrupt
+ * rearm: optional soc specific rearm function
+ *
+ * Note that the irq and rearm setup should come from device
+ * tree except for omap where there are still some dependencies
+ * to the legacy PRM code.
+ */
+struct pcs_pdata {
+ int irq;
+ void (*rearm)(void);
+};
diff --git a/arch/arm/mach-tegra/board-paz00.h b/include/linux/platform_data/zforce_ts.h
index 25c08ecef52f..0472ab2f6ede 100644
--- a/arch/arm/mach-tegra/board-paz00.h
+++ b/include/linux/platform_data/zforce_ts.h
@@ -1,7 +1,6 @@
-/*
- * arch/arm/mach-tegra/board-paz00.h
+/* drivers/input/touchscreen/zforce.c
*
- * Copyright (C) 2010 Marc Dietrich <marvin24@gmx.de>
+ * Copyright (C) 2012-2013 MundoReader S.L.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -11,15 +10,17 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
*/
-#ifndef _MACH_TEGRA_BOARD_PAZ00_H
-#define _MACH_TEGRA_BOARD_PAZ00_H
+#ifndef _LINUX_INPUT_ZFORCE_TS_H
+#define _LINUX_INPUT_ZFORCE_TS_H
-#include "gpio-names.h"
+struct zforce_ts_platdata {
+ int gpio_int;
+ int gpio_rst;
-#define TEGRA_WIFI_PWRN TEGRA_GPIO_PK5
-#define TEGRA_WIFI_RST TEGRA_GPIO_PD1
+ unsigned int x_max;
+ unsigned int y_max;
+};
-#endif
+#endif /* _LINUX_INPUT_ZFORCE_TS_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index ce8e4ffd78c7..16f6654082dd 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -178,6 +178,7 @@ struct platform_driver {
int (*resume)(struct platform_device *);
struct device_driver driver;
const struct platform_device_id *id_table;
+ bool prevent_deferred_probe;
};
#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
new file mode 100644
index 000000000000..5151b0059585
--- /dev/null
+++ b/include/linux/pm_opp.h
@@ -0,0 +1,139 @@
+/*
+ * Generic OPP Interface
+ *
+ * Copyright (C) 2009-2010 Texas Instruments Incorporated.
+ * Nishanth Menon
+ * Romit Dasgupta
+ * Kevin Hilman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_OPP_H__
+#define __LINUX_OPP_H__
+
+#include <linux/err.h>
+#include <linux/cpufreq.h>
+#include <linux/notifier.h>
+
+struct dev_pm_opp;
+struct device;
+
+enum dev_pm_opp_event {
+ OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
+};
+
+#if defined(CONFIG_PM_OPP)
+
+unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
+
+unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
+
+int dev_pm_opp_get_opp_count(struct device *dev);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+ unsigned long freq,
+ bool available);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+ unsigned long *freq);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
+ unsigned long *freq);
+
+int dev_pm_opp_add(struct device *dev, unsigned long freq,
+ unsigned long u_volt);
+
+int dev_pm_opp_enable(struct device *dev, unsigned long freq);
+
+int dev_pm_opp_disable(struct device *dev, unsigned long freq);
+
+struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev);
+#else
+static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
+{
+ return 0;
+}
+
+static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
+{
+ return 0;
+}
+
+static inline int dev_pm_opp_get_opp_count(struct device *dev)
+{
+ return 0;
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+ unsigned long freq, bool available)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+ unsigned long *freq)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
+ unsigned long *freq)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
+ unsigned long u_volt)
+{
+ return -EINVAL;
+}
+
+static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
+{
+ return 0;
+}
+
+static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq)
+{
+ return 0;
+}
+
+static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
+ struct device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif /* CONFIG_PM_OPP */
+
+#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
+int of_init_opp_table(struct device *dev);
+#else
+static inline int of_init_opp_table(struct device *dev)
+{
+ return -EINVAL;
+}
+#endif
+
+#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
+int dev_pm_opp_init_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table);
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table);
+#else
+static inline int dev_pm_opp_init_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+ return -EINVAL;
+}
+
+static inline
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+}
+#endif /* CONFIG_CPU_FREQ */
+
+#endif /* __LINUX_OPP_H__ */
diff --git a/arch/arm/mach-versatile/include/mach/timex.h b/include/linux/power/bq24735-charger.h
index 426199b1add5..f536164a6069 100644
--- a/arch/arm/mach-versatile/include/mach/timex.h
+++ b/include/linux/power/bq24735-charger.h
@@ -1,9 +1,4 @@
/*
- * arch/arm/mach-versatile/include/mach/timex.h
- *
- * Versatile architecture timex specifications
- *
- * Copyright (C) 2003 ARM Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -20,4 +15,25 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#define CLOCK_TICK_RATE (50000000 / 16)
+#ifndef __CHARGER_BQ24735_H_
+#define __CHARGER_BQ24735_H_
+
+#include <linux/types.h>
+#include <linux/power_supply.h>
+
+struct bq24735_platform {
+ uint32_t charge_current;
+ uint32_t charge_voltage;
+ uint32_t input_current;
+
+ const char *name;
+
+ int status_gpio;
+ int status_gpio_active_low;
+ bool status_gpio_valid;
+
+ char **supplied_to;
+ size_t num_supplicants;
+};
+
+#endif /* __CHARGER_BQ24735_H_ */
diff --git a/include/linux/powercap.h b/include/linux/powercap.h
new file mode 100644
index 000000000000..4e250417ee30
--- /dev/null
+++ b/include/linux/powercap.h
@@ -0,0 +1,325 @@
+/*
+ * powercap.h: Data types and headers for sysfs power capping interface
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ *
+ */
+
+#ifndef __POWERCAP_H__
+#define __POWERCAP_H__
+
+#include <linux/device.h>
+#include <linux/idr.h>
+
+/*
+ * A power cap class device can contain multiple powercap control_types.
+ * Each control_type can have multiple power zones, which can be independently
+ * controlled. Each power zone can have one or more constraints.
+ */
+
+struct powercap_control_type;
+struct powercap_zone;
+struct powercap_zone_constraint;
+
+/**
+ * struct powercap_control_type_ops - Define control type callbacks
+ * @set_enable: Enable/Disable whole control type.
+ * Default is enabled. But this callback allows all zones
+ * to be in disable state and remove any applied power
+ * limits. If disabled power zone can only be monitored
+ * not controlled.
+ * @get_enable: get Enable/Disable status.
+ * @release: Callback to inform that last reference to this
+ * control type is closed. So it is safe to free data
+ * structure associated with this control type.
+ * This callback is mandatory if the client own memory
+ * for the control type.
+ *
+ * This structure defines control type callbacks to be implemented by client
+ * drivers
+ */
+struct powercap_control_type_ops {
+ int (*set_enable) (struct powercap_control_type *, bool mode);
+ int (*get_enable) (struct powercap_control_type *, bool *mode);
+ int (*release) (struct powercap_control_type *);
+};
+
+/**
+ * struct powercap_control_type- Defines a powercap control_type
+ * @name: name of control_type
+ * @dev: device for this control_type
+ * @idr: idr to have unique id for its child
+ * @root_node: Root holding power zones for this control_type
+ * @ops: Pointer to callback struct
+ * @node_lock: mutex for control type
+ * @allocated: This is possible that client owns the memory
+ * used by this structure. In this case
+ * this flag is set to false by framework to
+ * prevent deallocation during release process.
+ * Otherwise this flag is set to true.
+ * @ctrl_inst: link to the control_type list
+ *
+ * Defines powercap control_type. This acts as a container for power
+ * zones, which use same method to control power. E.g. RAPL, RAPL-PCI etc.
+ * All fields are private and should not be used by client drivers.
+ */
+struct powercap_control_type {
+ struct device dev;
+ struct idr idr;
+ int nr_zones;
+ const struct powercap_control_type_ops *ops;
+ struct mutex lock;
+ bool allocated;
+ struct list_head node;
+};
+
+/**
+ * struct powercap_zone_ops - Define power zone callbacks
+ * @get_max_energy_range_uj: Get maximum range of energy counter in
+ * micro-joules.
+ * @get_energy_uj: Get current energy counter in micro-joules.
+ * @reset_energy_uj: Reset micro-joules energy counter.
+ * @get_max_power_range_uw: Get maximum range of power counter in
+ * micro-watts.
+ * @get_power_uw: Get current power counter in micro-watts.
+ * @set_enable: Enable/Disable power zone controls.
+ * Default is enabled.
+ * @get_enable: get Enable/Disable status.
+ * @release: Callback to inform that last reference to this
+ * control type is closed. So it is safe to free
+ * data structure associated with this
+ * control type. Mandatory, if client driver owns
+ * the power_zone memory.
+ *
+ * This structure defines zone callbacks to be implemented by client drivers.
+ * Client drives can define both energy and power related callbacks. But at
+ * the least one type (either power or energy) is mandatory. Client drivers
+ * should handle mutual exclusion, if required in callbacks.
+ */
+struct powercap_zone_ops {
+ int (*get_max_energy_range_uj) (struct powercap_zone *, u64 *);
+ int (*get_energy_uj) (struct powercap_zone *, u64 *);
+ int (*reset_energy_uj) (struct powercap_zone *);
+ int (*get_max_power_range_uw) (struct powercap_zone *, u64 *);
+ int (*get_power_uw) (struct powercap_zone *, u64 *);
+ int (*set_enable) (struct powercap_zone *, bool mode);
+ int (*get_enable) (struct powercap_zone *, bool *mode);
+ int (*release) (struct powercap_zone *);
+};
+
+#define POWERCAP_ZONE_MAX_ATTRS 6
+#define POWERCAP_CONSTRAINTS_ATTRS 8
+#define MAX_CONSTRAINTS_PER_ZONE 10
+/**
+ * struct powercap_zone- Defines instance of a power cap zone
+ * @id: Unique id
+ * @name: Power zone name.
+ * @control_type_inst: Control type instance for this zone.
+ * @ops: Pointer to the zone operation structure.
+ * @dev: Instance of a device.
+ * @const_id_cnt: Number of constraint defined.
+ * @idr: Instance to an idr entry for children zones.
+ * @parent_idr: To remove reference from the parent idr.
+ * @private_data: Private data pointer if any for this zone.
+ * @zone_dev_attrs: Attributes associated with this device.
+ * @zone_attr_count: Attribute count.
+ * @dev_zone_attr_group: Attribute group for attributes.
+ * @dev_attr_groups: Attribute group store to register with device.
+ * @allocated: This is possible that client owns the memory
+ * used by this structure. In this case
+ * this flag is set to false by framework to
+ * prevent deallocation during release process.
+ * Otherwise this flag is set to true.
+ * @constraint_ptr: List of constraints for this zone.
+ *
+ * This defines a power zone instance. The fields of this structure are
+ * private, and should not be used by client drivers.
+ */
+struct powercap_zone {
+ int id;
+ char *name;
+ void *control_type_inst;
+ const struct powercap_zone_ops *ops;
+ struct device dev;
+ int const_id_cnt;
+ struct idr idr;
+ struct idr *parent_idr;
+ void *private_data;
+ struct attribute **zone_dev_attrs;
+ int zone_attr_count;
+ struct attribute_group dev_zone_attr_group;
+ const struct attribute_group *dev_attr_groups[2]; /* 1 group + NULL */
+ bool allocated;
+ struct powercap_zone_constraint *constraints;
+};
+
+/**
+ * struct powercap_zone_constraint_ops - Define constraint callbacks
+ * @set_power_limit_uw: Set power limit in micro-watts.
+ * @get_power_limit_uw: Get power limit in micro-watts.
+ * @set_time_window_us: Set time window in micro-seconds.
+ * @get_time_window_us: Get time window in micro-seconds.
+ * @get_max_power_uw: Get max power allowed in micro-watts.
+ * @get_min_power_uw: Get min power allowed in micro-watts.
+ * @get_max_time_window_us: Get max time window allowed in micro-seconds.
+ * @get_min_time_window_us: Get min time window allowed in micro-seconds.
+ * @get_name: Get the name of constraint
+ *
+ * This structure is used to define the constraint callbacks for the client
+ * drivers. The following callbacks are mandatory and can't be NULL:
+ * set_power_limit_uw
+ * get_power_limit_uw
+ * set_time_window_us
+ * get_time_window_us
+ * get_name
+ * Client drivers should handle mutual exclusion, if required in callbacks.
+ */
+struct powercap_zone_constraint_ops {
+ int (*set_power_limit_uw) (struct powercap_zone *, int, u64);
+ int (*get_power_limit_uw) (struct powercap_zone *, int, u64 *);
+ int (*set_time_window_us) (struct powercap_zone *, int, u64);
+ int (*get_time_window_us) (struct powercap_zone *, int, u64 *);
+ int (*get_max_power_uw) (struct powercap_zone *, int, u64 *);
+ int (*get_min_power_uw) (struct powercap_zone *, int, u64 *);
+ int (*get_max_time_window_us) (struct powercap_zone *, int, u64 *);
+ int (*get_min_time_window_us) (struct powercap_zone *, int, u64 *);
+ const char *(*get_name) (struct powercap_zone *, int);
+};
+
+/**
+ * struct powercap_zone_constraint- Defines instance of a constraint
+ * @id: Instance Id of this constraint.
+ * @power_zone: Pointer to the power zone for this constraint.
+ * @ops: Pointer to the constraint callbacks.
+ *
+ * This defines a constraint instance.
+ */
+struct powercap_zone_constraint {
+ int id;
+ struct powercap_zone *power_zone;
+ struct powercap_zone_constraint_ops *ops;
+};
+
+
+/* For clients to get their device pointer, may be used for dev_dbgs */
+#define POWERCAP_GET_DEV(power_zone) (&power_zone->dev)
+
+/**
+* powercap_set_zone_data() - Set private data for a zone
+* @power_zone: A pointer to the valid zone instance.
+* @pdata: A pointer to the user private data.
+*
+* Allows client drivers to associate some private data to zone instance.
+*/
+static inline void powercap_set_zone_data(struct powercap_zone *power_zone,
+ void *pdata)
+{
+ if (power_zone)
+ power_zone->private_data = pdata;
+}
+
+/**
+* powercap_get_zone_data() - Get private data for a zone
+* @power_zone: A pointer to the valid zone instance.
+*
+* Allows client drivers to get private data associate with a zone,
+* using call to powercap_set_zone_data.
+*/
+static inline void *powercap_get_zone_data(struct powercap_zone *power_zone)
+{
+ if (power_zone)
+ return power_zone->private_data;
+ return NULL;
+}
+
+/**
+* powercap_register_control_type() - Register a control_type with framework
+* @control_type: Pointer to client allocated memory for the control type
+* structure storage. If this is NULL, powercap framework
+* will allocate memory and own it.
+* Advantage of this parameter is that client can embed
+* this data in its data structures and allocate in a
+* single call, preventing multiple allocations.
+* @control_type_name: The Name of this control_type, which will be shown
+* in the sysfs Interface.
+* @ops: Callbacks for control type. This parameter is optional.
+*
+* Used to create a control_type with the power capping class. Here control_type
+* can represent a type of technology, which can control a range of power zones.
+* For example a control_type can be RAPL (Running Average Power Limit)
+* Intel® 64 and IA-32 Processor Architectures. The name can be any string
+* which must be unique, otherwise this function returns NULL.
+* A pointer to the control_type instance is returned on success.
+*/
+struct powercap_control_type *powercap_register_control_type(
+ struct powercap_control_type *control_type,
+ const char *name,
+ const struct powercap_control_type_ops *ops);
+
+/**
+* powercap_unregister_control_type() - Unregister a control_type from framework
+* @instance: A pointer to the valid control_type instance.
+*
+* Used to unregister a control_type with the power capping class.
+* All power zones registered under this control type have to be unregistered
+* before calling this function, or it will fail with an error code.
+*/
+int powercap_unregister_control_type(struct powercap_control_type *instance);
+
+/* Zone register/unregister API */
+
+/**
+* powercap_register_zone() - Register a power zone
+* @power_zone: Pointer to client allocated memory for the power zone structure
+* storage. If this is NULL, powercap framework will allocate
+* memory and own it. Advantage of this parameter is that client
+* can embed this data in its data structures and allocate in a
+* single call, preventing multiple allocations.
+* @control_type: A control_type instance under which this zone operates.
+* @name: A name for this zone.
+* @parent: A pointer to the parent power zone instance if any or NULL
+* @ops: Pointer to zone operation callback structure.
+* @no_constraints: Number of constraints for this zone
+* @const_ops: Pointer to constraint callback structure
+*
+* Register a power zone under a given control type. A power zone must register
+* a pointer to a structure representing zone callbacks.
+* A power zone can be located under a parent power zone, in which case @parent
+* should point to it. Otherwise, if @parent is NULL, the new power zone will
+* be located directly under the given control type
+* For each power zone there may be a number of constraints that appear in the
+* sysfs under that zone as attributes with unique numeric IDs.
+* Returns pointer to the power_zone on success.
+*/
+struct powercap_zone *powercap_register_zone(
+ struct powercap_zone *power_zone,
+ struct powercap_control_type *control_type,
+ const char *name,
+ struct powercap_zone *parent,
+ const struct powercap_zone_ops *ops,
+ int nr_constraints,
+ struct powercap_zone_constraint_ops *const_ops);
+
+/**
+* powercap_unregister_zone() - Unregister a zone device
+* @control_type: A pointer to the valid instance of a control_type.
+* @power_zone: A pointer to the valid zone instance for a control_type
+*
+* Used to unregister a zone device for a control_type. Caller should
+* make sure that children for this zone are unregistered first.
+*/
+int powercap_unregister_zone(struct powercap_control_type *control_type,
+ struct powercap_zone *power_zone);
+
+#endif
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index f5d4723cdb3d..a3d9dc8c2c00 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -6,106 +6,95 @@
* preempt_count (used for kernel preemption, interrupt count, etc.)
*/
-#include <linux/thread_info.h>
#include <linux/linkage.h>
#include <linux/list.h>
-#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
- extern void add_preempt_count(int val);
- extern void sub_preempt_count(int val);
-#else
-# define add_preempt_count(val) do { preempt_count() += (val); } while (0)
-# define sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
-#endif
-
-#define inc_preempt_count() add_preempt_count(1)
-#define dec_preempt_count() sub_preempt_count(1)
-
-#define preempt_count() (current_thread_info()->preempt_count)
-
-#ifdef CONFIG_PREEMPT
-
-asmlinkage void preempt_schedule(void);
-
-#define preempt_check_resched() \
-do { \
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
- preempt_schedule(); \
-} while (0)
-
-#ifdef CONFIG_CONTEXT_TRACKING
+/*
+ * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
+ * the other bits -- can't include that header due to inclusion hell.
+ */
+#define PREEMPT_NEED_RESCHED 0x80000000
-void preempt_schedule_context(void);
+#include <asm/preempt.h>
-#define preempt_check_resched_context() \
-do { \
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
- preempt_schedule_context(); \
-} while (0)
+#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
+extern void preempt_count_add(int val);
+extern void preempt_count_sub(int val);
+#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
#else
+#define preempt_count_add(val) __preempt_count_add(val)
+#define preempt_count_sub(val) __preempt_count_sub(val)
+#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
+#endif
-#define preempt_check_resched_context() preempt_check_resched()
-
-#endif /* CONFIG_CONTEXT_TRACKING */
-
-#else /* !CONFIG_PREEMPT */
-
-#define preempt_check_resched() do { } while (0)
-#define preempt_check_resched_context() do { } while (0)
-
-#endif /* CONFIG_PREEMPT */
+#define __preempt_count_inc() __preempt_count_add(1)
+#define __preempt_count_dec() __preempt_count_sub(1)
+#define preempt_count_inc() preempt_count_add(1)
+#define preempt_count_dec() preempt_count_sub(1)
#ifdef CONFIG_PREEMPT_COUNT
#define preempt_disable() \
do { \
- inc_preempt_count(); \
+ preempt_count_inc(); \
barrier(); \
} while (0)
#define sched_preempt_enable_no_resched() \
do { \
barrier(); \
- dec_preempt_count(); \
+ preempt_count_dec(); \
} while (0)
-#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
+#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
+#ifdef CONFIG_PREEMPT
#define preempt_enable() \
do { \
- preempt_enable_no_resched(); \
barrier(); \
- preempt_check_resched(); \
+ if (unlikely(preempt_count_dec_and_test())) \
+ __preempt_schedule(); \
+} while (0)
+
+#define preempt_check_resched() \
+do { \
+ if (should_resched()) \
+ __preempt_schedule(); \
} while (0)
-/* For debugging and tracer internals only! */
-#define add_preempt_count_notrace(val) \
- do { preempt_count() += (val); } while (0)
-#define sub_preempt_count_notrace(val) \
- do { preempt_count() -= (val); } while (0)
-#define inc_preempt_count_notrace() add_preempt_count_notrace(1)
-#define dec_preempt_count_notrace() sub_preempt_count_notrace(1)
+#else
+#define preempt_enable() preempt_enable_no_resched()
+#define preempt_check_resched() do { } while (0)
+#endif
#define preempt_disable_notrace() \
do { \
- inc_preempt_count_notrace(); \
+ __preempt_count_inc(); \
barrier(); \
} while (0)
#define preempt_enable_no_resched_notrace() \
do { \
barrier(); \
- dec_preempt_count_notrace(); \
+ __preempt_count_dec(); \
} while (0)
-/* preempt_check_resched is OK to trace */
+#ifdef CONFIG_PREEMPT
+
+#ifndef CONFIG_CONTEXT_TRACKING
+#define __preempt_schedule_context() __preempt_schedule()
+#endif
+
#define preempt_enable_notrace() \
do { \
- preempt_enable_no_resched_notrace(); \
barrier(); \
- preempt_check_resched_context(); \
+ if (unlikely(__preempt_count_dec_and_test())) \
+ __preempt_schedule_context(); \
} while (0)
+#else
+#define preempt_enable_notrace() preempt_enable_no_resched_notrace()
+#endif
#else /* !CONFIG_PREEMPT_COUNT */
@@ -115,10 +104,11 @@ do { \
* that can cause faults and scheduling migrate into our preempt-protected
* region.
*/
-#define preempt_disable() barrier()
+#define preempt_disable() barrier()
#define sched_preempt_enable_no_resched() barrier()
-#define preempt_enable_no_resched() barrier()
-#define preempt_enable() barrier()
+#define preempt_enable_no_resched() barrier()
+#define preempt_enable() barrier()
+#define preempt_check_resched() do { } while (0)
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 4106721c4e5e..45a0a9e81478 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -19,6 +19,21 @@
*/
/*
+ * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
+ * @list: list to be initialized
+ *
+ * You should instead use INIT_LIST_HEAD() for normal initialization and
+ * cleanup tasks, when readers have no access to the list being initialized.
+ * However, if the list being initialized is visible to readers, you
+ * need to keep the compiler from being too mischievous.
+ */
+static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
+{
+ ACCESS_ONCE(list->next) = list;
+ ACCESS_ONCE(list->prev) = list;
+}
+
+/*
* return the ->next pointer of a list_head in an rcu safe
* way, we must not access it directly
*/
@@ -191,9 +206,13 @@ static inline void list_splice_init_rcu(struct list_head *list,
if (list_empty(list))
return;
- /* "first" and "last" tracking list, so initialize it. */
+ /*
+ * "first" and "last" tracking list, so initialize it. RCU readers
+ * have access to this list, so we must use INIT_LIST_HEAD_RCU()
+ * instead of INIT_LIST_HEAD().
+ */
- INIT_LIST_HEAD(list);
+ INIT_LIST_HEAD_RCU(list);
/*
* At this point, the list body still points to the source list.
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index f1f1bc39346b..39cbb889e20d 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -261,6 +261,10 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
rcu_irq_exit(); \
} while (0)
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
+extern bool __rcu_is_watching(void);
+#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
+
/*
* Infrastructure to implement the synchronize_() primitives in
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
@@ -297,10 +301,6 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
}
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
-#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP)
-extern int rcu_is_cpu_idle(void);
-#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */
-
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
bool rcu_lockdep_current_cpu_online(void);
#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
@@ -351,7 +351,7 @@ static inline int rcu_read_lock_held(void)
{
if (!debug_lockdep_rcu_enabled())
return 1;
- if (rcu_is_cpu_idle())
+ if (!rcu_is_watching())
return 0;
if (!rcu_lockdep_current_cpu_online())
return 0;
@@ -402,7 +402,7 @@ static inline int rcu_read_lock_sched_held(void)
if (!debug_lockdep_rcu_enabled())
return 1;
- if (rcu_is_cpu_idle())
+ if (!rcu_is_watching())
return 0;
if (!rcu_lockdep_current_cpu_online())
return 0;
@@ -771,7 +771,7 @@ static inline void rcu_read_lock(void)
__rcu_read_lock();
__acquire(RCU);
rcu_lock_acquire(&rcu_lock_map);
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_lock() used illegally while idle");
}
@@ -792,7 +792,7 @@ static inline void rcu_read_lock(void)
*/
static inline void rcu_read_unlock(void)
{
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_unlock() used illegally while idle");
rcu_lock_release(&rcu_lock_map);
__release(RCU);
@@ -821,7 +821,7 @@ static inline void rcu_read_lock_bh(void)
local_bh_disable();
__acquire(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map);
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_lock_bh() used illegally while idle");
}
@@ -832,7 +832,7 @@ static inline void rcu_read_lock_bh(void)
*/
static inline void rcu_read_unlock_bh(void)
{
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map);
__release(RCU_BH);
@@ -857,7 +857,7 @@ static inline void rcu_read_lock_sched(void)
preempt_disable();
__acquire(RCU_SCHED);
rcu_lock_acquire(&rcu_sched_lock_map);
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_lock_sched() used illegally while idle");
}
@@ -875,7 +875,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
*/
static inline void rcu_read_unlock_sched(void)
{
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_unlock_sched() used illegally while idle");
rcu_lock_release(&rcu_sched_lock_map);
__release(RCU_SCHED);
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index e31005ee339e..09ebcbe9fd78 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -132,4 +132,21 @@ static inline void rcu_scheduler_starting(void)
}
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
+
+static inline bool rcu_is_watching(void)
+{
+ return __rcu_is_watching();
+}
+
+#else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
+
+static inline bool rcu_is_watching(void)
+{
+ return true;
+}
+
+
+#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
+
#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 226169d1bd2b..4b9c81548742 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -90,4 +90,6 @@ extern void exit_rcu(void);
extern void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __read_mostly;
+extern bool rcu_is_watching(void);
+
#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index a10380bfbeac..e55907804d39 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -23,6 +23,7 @@ struct device;
struct i2c_client;
struct irq_domain;
struct spi_device;
+struct spmi_device;
struct regmap;
struct regmap_range_cfg;
struct regmap_field;
@@ -70,6 +71,8 @@ struct regmap_range {
unsigned int range_max;
};
+#define regmap_reg_range(low, high) { .range_min = low, .range_max = high, }
+
/*
* A table of ranges including some yes ranges and some no ranges.
* If a register belongs to a no_range, the corresponding check function
@@ -318,6 +321,8 @@ struct regmap *regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config);
struct regmap *regmap_init_spi(struct spi_device *dev,
const struct regmap_config *config);
+struct regmap *regmap_init_spmi(struct spmi_device *dev,
+ const struct regmap_config *config);
struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
void __iomem *regs,
const struct regmap_config *config);
@@ -330,6 +335,8 @@ struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config);
struct regmap *devm_regmap_init_spi(struct spi_device *dev,
const struct regmap_config *config);
+struct regmap *devm_regmap_init_spmi(struct spmi_device *dev,
+ const struct regmap_config *config);
struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
void __iomem *regs,
const struct regmap_config *config);
@@ -374,10 +381,13 @@ int regmap_reinit_cache(struct regmap *map,
const struct regmap_config *config);
struct regmap *dev_get_regmap(struct device *dev, const char *name);
int regmap_write(struct regmap *map, unsigned int reg, unsigned int val);
+int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val);
int regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len);
int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
size_t val_count);
+int regmap_multi_reg_write(struct regmap *map, struct reg_default *regs,
+ int num_regs);
int regmap_raw_write_async(struct regmap *map, unsigned int reg,
const void *val, size_t val_len);
int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val);
@@ -387,9 +397,14 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_count);
int regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val);
+int regmap_update_bits_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val);
int regmap_update_bits_check(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change);
+int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change);
int regmap_get_val_bytes(struct regmap *map);
int regmap_async_complete(struct regmap *map);
bool regmap_can_raw_write(struct regmap *map);
@@ -425,11 +440,15 @@ bool regmap_reg_in_ranges(unsigned int reg,
* @reg: Offset of the register within the regmap bank
* @lsb: lsb of the register field.
* @reg: msb of the register field.
+ * @id_size: port size if it has some ports
+ * @id_offset: address offset for each ports
*/
struct reg_field {
unsigned int reg;
unsigned int lsb;
unsigned int msb;
+ unsigned int id_size;
+ unsigned int id_offset;
};
#define REG_FIELD(_reg, _lsb, _msb) { \
@@ -448,6 +467,15 @@ void devm_regmap_field_free(struct device *dev, struct regmap_field *field);
int regmap_field_read(struct regmap_field *field, unsigned int *val);
int regmap_field_write(struct regmap_field *field, unsigned int val);
+int regmap_field_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val);
+
+int regmap_fields_write(struct regmap_field *field, unsigned int id,
+ unsigned int val);
+int regmap_fields_read(struct regmap_field *field, unsigned int id,
+ unsigned int *val);
+int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val);
/**
* Description of an IRQ for the generic regmap irq_chip.
@@ -527,6 +555,13 @@ static inline int regmap_write(struct regmap *map, unsigned int reg,
return -EINVAL;
}
+static inline int regmap_write_async(struct regmap *map, unsigned int reg,
+ unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len)
{
@@ -576,6 +611,14 @@ static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
return -EINVAL;
}
+static inline int regmap_update_bits_async(struct regmap *map,
+ unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_update_bits_check(struct regmap *map,
unsigned int reg,
unsigned int mask, unsigned int val,
@@ -585,6 +628,16 @@ static inline int regmap_update_bits_check(struct regmap *map,
return -EINVAL;
}
+static inline int regmap_update_bits_check_async(struct regmap *map,
+ unsigned int reg,
+ unsigned int mask,
+ unsigned int val,
+ bool *change)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_get_val_bytes(struct regmap *map)
{
WARN_ONCE(1, "regmap API is disabled");
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 27be915caa96..e530681bea70 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -146,6 +146,32 @@ struct regulator *__must_check devm_regulator_get_optional(struct device *dev,
void regulator_put(struct regulator *regulator);
void devm_regulator_put(struct regulator *regulator);
+int regulator_register_supply_alias(struct device *dev, const char *id,
+ struct device *alias_dev,
+ const char *alias_id);
+void regulator_unregister_supply_alias(struct device *dev, const char *id);
+
+int regulator_bulk_register_supply_alias(struct device *dev, const char **id,
+ struct device *alias_dev,
+ const char **alias_id, int num_id);
+void regulator_bulk_unregister_supply_alias(struct device *dev,
+ const char **id, int num_id);
+
+int devm_regulator_register_supply_alias(struct device *dev, const char *id,
+ struct device *alias_dev,
+ const char *alias_id);
+void devm_regulator_unregister_supply_alias(struct device *dev,
+ const char *id);
+
+int devm_regulator_bulk_register_supply_alias(struct device *dev,
+ const char **id,
+ struct device *alias_dev,
+ const char **alias_id,
+ int num_id);
+void devm_regulator_bulk_unregister_supply_alias(struct device *dev,
+ const char **id,
+ int num_id);
+
/* regulator output control and status */
int __must_check regulator_enable(struct regulator *regulator);
int regulator_disable(struct regulator *regulator);
@@ -250,6 +276,59 @@ static inline void devm_regulator_put(struct regulator *regulator)
{
}
+static inline int regulator_register_supply_alias(struct device *dev,
+ const char *id,
+ struct device *alias_dev,
+ const char *alias_id)
+{
+ return 0;
+}
+
+static inline void regulator_unregister_supply_alias(struct device *dev,
+ const char *id)
+{
+}
+
+static inline int regulator_bulk_register_supply_alias(struct device *dev,
+ const char **id,
+ struct device *alias_dev,
+ const char **alias_id,
+ int num_id)
+{
+ return 0;
+}
+
+static inline void regulator_bulk_unregister_supply_alias(struct device *dev,
+ const char **id,
+ int num_id)
+{
+}
+
+static inline int devm_regulator_register_supply_alias(struct device *dev,
+ const char *id,
+ struct device *alias_dev,
+ const char *alias_id)
+{
+ return 0;
+}
+
+static inline void devm_regulator_unregister_supply_alias(struct device *dev,
+ const char *id)
+{
+}
+
+static inline int devm_regulator_bulk_register_supply_alias(
+ struct device *dev, const char **id, struct device *alias_dev,
+ const char **alias_id, int num_id)
+{
+ return 0;
+}
+
+static inline void devm_regulator_bulk_unregister_supply_alias(
+ struct device *dev, const char **id, int num_id)
+{
+}
+
static inline int regulator_enable(struct regulator *regulator)
{
return 0;
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 9bdad43ad228..9370e65348a4 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -46,19 +46,26 @@ enum regulator_status {
* regulator_list_linear_range().
*
* @min_uV: Lowest voltage in range
- * @max_uV: Highest voltage in range
* @min_sel: Lowest selector for range
* @max_sel: Highest selector for range
* @uV_step: Step size
*/
struct regulator_linear_range {
unsigned int min_uV;
- unsigned int max_uV;
unsigned int min_sel;
unsigned int max_sel;
unsigned int uV_step;
};
+/* Initialize struct regulator_linear_range */
+#define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \
+{ \
+ .min_uV = _min_uV, \
+ .min_sel = _min_sel, \
+ .max_sel = _max_sel, \
+ .uV_step = _step_uV, \
+}
+
/**
* struct regulator_ops - regulator operations.
*
@@ -209,6 +216,7 @@ enum regulator_type {
* @min_uV: Voltage given by the lowest selector (if linear mapping)
* @uV_step: Voltage increase with each selector (if linear mapping)
* @linear_min_sel: Minimal selector for starting linear mapping
+ * @fixed_uV: Fixed voltage of rails.
* @ramp_delay: Time to settle down after voltage change (unit: uV/us)
* @volt_table: Voltage mapping table (if table based mapping)
*
@@ -241,6 +249,7 @@ struct regulator_desc {
unsigned int min_uV;
unsigned int uV_step;
unsigned int linear_min_sel;
+ int fixed_uV;
unsigned int ramp_delay;
const struct regulator_linear_range *linear_ranges;
@@ -336,7 +345,12 @@ struct regulator_dev {
struct regulator_dev *
regulator_register(const struct regulator_desc *regulator_desc,
const struct regulator_config *config);
+struct regulator_dev *
+devm_regulator_register(struct device *dev,
+ const struct regulator_desc *regulator_desc,
+ const struct regulator_config *config);
void regulator_unregister(struct regulator_dev *rdev);
+void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev);
int regulator_notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data);
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 999b20ce06cf..730e638c5589 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -95,6 +95,7 @@ struct regulator_state {
* @initial_state: Suspend state to set by default.
* @initial_mode: Mode to set at startup.
* @ramp_delay: Time to settle down after voltage change (unit: uV/us)
+ * @enable_time: Turn-on time of the rails (unit: microseconds)
*/
struct regulation_constraints {
@@ -129,6 +130,7 @@ struct regulation_constraints {
unsigned int initial_mode;
unsigned int ramp_delay;
+ unsigned int enable_time;
/* constraint flags */
unsigned always_on:1; /* regulator never off when system is on */
@@ -193,15 +195,10 @@ int regulator_suspend_finish(void);
#ifdef CONFIG_REGULATOR
void regulator_has_full_constraints(void);
-void regulator_use_dummy_regulator(void);
#else
static inline void regulator_has_full_constraints(void)
{
}
-
-static inline void regulator_use_dummy_regulator(void)
-{
-}
#endif
#endif
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index f28544b2f9af..939428ad25ac 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -15,7 +15,7 @@ extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics);
extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
u32 id, long expires, u32 error);
-extern void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change);
+void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
/* RTNL is used as a global lock for all changes to network configuration */
extern void rtnl_lock(void);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e27baeeda3f4..045b0d227846 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -22,6 +22,7 @@ struct sched_param {
#include <linux/errno.h>
#include <linux/nodemask.h>
#include <linux/mm_types.h>
+#include <linux/preempt.h>
#include <asm/page.h>
#include <asm/ptrace.h>
@@ -427,6 +428,14 @@ struct task_cputime {
.sum_exec_runtime = 0, \
}
+#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
+
+#ifdef CONFIG_PREEMPT_COUNT
+#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
+#else
+#define PREEMPT_DISABLED PREEMPT_ENABLED
+#endif
+
/*
* Disable preemption until the scheduler is running.
* Reset by start_kernel()->sched_init()->init_idle().
@@ -434,7 +443,7 @@ struct task_cputime {
* We include PREEMPT_ACTIVE to avoid cond_resched() from working
* before the scheduler is active -- see should_resched().
*/
-#define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE)
+#define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE)
/**
* struct thread_group_cputimer - thread group interval timer counts
@@ -768,6 +777,7 @@ enum cpu_idle_type {
#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
+#define SD_NUMA 0x4000 /* cross-node balancing */
extern int __weak arch_sd_sibiling_asym_packing(void);
@@ -811,6 +821,10 @@ struct sched_domain {
u64 last_update;
+ /* idle_balance() stats */
+ u64 max_newidle_lb_cost;
+ unsigned long next_decay_max_lb_cost;
+
#ifdef CONFIG_SCHEDSTATS
/* load_balance() stats */
unsigned int lb_count[CPU_MAX_IDLE_TYPES];
@@ -1029,6 +1043,8 @@ struct task_struct {
struct task_struct *last_wakee;
unsigned long wakee_flips;
unsigned long wakee_flip_decay_ts;
+
+ int wake_cpu;
#endif
int on_rq;
@@ -1324,10 +1340,41 @@ struct task_struct {
#endif
#ifdef CONFIG_NUMA_BALANCING
int numa_scan_seq;
- int numa_migrate_seq;
unsigned int numa_scan_period;
+ unsigned int numa_scan_period_max;
+ int numa_preferred_nid;
+ int numa_migrate_deferred;
+ unsigned long numa_migrate_retry;
u64 node_stamp; /* migration stamp */
struct callback_head numa_work;
+
+ struct list_head numa_entry;
+ struct numa_group *numa_group;
+
+ /*
+ * Exponential decaying average of faults on a per-node basis.
+ * Scheduling placement decisions are made based on the these counts.
+ * The values remain static for the duration of a PTE scan
+ */
+ unsigned long *numa_faults;
+ unsigned long total_numa_faults;
+
+ /*
+ * numa_faults_buffer records faults per node during the current
+ * scan window. When the scan completes, the counts in numa_faults
+ * decay and these values are copied.
+ */
+ unsigned long *numa_faults_buffer;
+
+ /*
+ * numa_faults_locality tracks if faults recorded during the last
+ * scan window were remote/local. The task scan period is adapted
+ * based on the locality of the faults with different weights
+ * depending on whether they were shared or private faults
+ */
+ unsigned long numa_faults_locality[2];
+
+ unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */
struct rcu_head rcu;
@@ -1412,16 +1459,33 @@ struct task_struct {
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+#define TNF_MIGRATED 0x01
+#define TNF_NO_GROUP 0x02
+#define TNF_SHARED 0x04
+#define TNF_FAULT_LOCAL 0x08
+
#ifdef CONFIG_NUMA_BALANCING
-extern void task_numa_fault(int node, int pages, bool migrated);
+extern void task_numa_fault(int last_node, int node, int pages, int flags);
+extern pid_t task_numa_group_id(struct task_struct *p);
extern void set_numabalancing_state(bool enabled);
+extern void task_numa_free(struct task_struct *p);
+
+extern unsigned int sysctl_numa_balancing_migrate_deferred;
#else
-static inline void task_numa_fault(int node, int pages, bool migrated)
+static inline void task_numa_fault(int last_node, int node, int pages,
+ int flags)
{
}
+static inline pid_t task_numa_group_id(struct task_struct *p)
+{
+ return 0;
+}
static inline void set_numabalancing_state(bool enabled)
{
}
+static inline void task_numa_free(struct task_struct *p)
+{
+}
#endif
static inline struct pid *task_pid(struct task_struct *task)
@@ -1974,7 +2038,7 @@ extern void wake_up_new_task(struct task_struct *tsk);
#else
static inline void kick_process(struct task_struct *tsk) { }
#endif
-extern void sched_fork(struct task_struct *p);
+extern void sched_fork(unsigned long clone_flags, struct task_struct *p);
extern void sched_dead(struct task_struct *p);
extern void proc_caches_init(void);
@@ -2401,11 +2465,6 @@ static inline int signal_pending_state(long state, struct task_struct *p)
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
-static inline int need_resched(void)
-{
- return unlikely(test_thread_flag(TIF_NEED_RESCHED));
-}
-
/*
* cond_resched() and cond_resched_lock(): latency reduction via
* explicit rescheduling in places that are safe. The return
@@ -2474,36 +2533,105 @@ static inline int tsk_is_polling(struct task_struct *p)
{
return task_thread_info(p)->status & TS_POLLING;
}
-static inline void current_set_polling(void)
+static inline void __current_set_polling(void)
{
current_thread_info()->status |= TS_POLLING;
}
-static inline void current_clr_polling(void)
+static inline bool __must_check current_set_polling_and_test(void)
+{
+ __current_set_polling();
+
+ /*
+ * Polling state must be visible before we test NEED_RESCHED,
+ * paired by resched_task()
+ */
+ smp_mb();
+
+ return unlikely(tif_need_resched());
+}
+
+static inline void __current_clr_polling(void)
{
current_thread_info()->status &= ~TS_POLLING;
- smp_mb__after_clear_bit();
+}
+
+static inline bool __must_check current_clr_polling_and_test(void)
+{
+ __current_clr_polling();
+
+ /*
+ * Polling state must be visible before we test NEED_RESCHED,
+ * paired by resched_task()
+ */
+ smp_mb();
+
+ return unlikely(tif_need_resched());
}
#elif defined(TIF_POLLING_NRFLAG)
static inline int tsk_is_polling(struct task_struct *p)
{
return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
}
-static inline void current_set_polling(void)
+
+static inline void __current_set_polling(void)
{
set_thread_flag(TIF_POLLING_NRFLAG);
}
-static inline void current_clr_polling(void)
+static inline bool __must_check current_set_polling_and_test(void)
+{
+ __current_set_polling();
+
+ /*
+ * Polling state must be visible before we test NEED_RESCHED,
+ * paired by resched_task()
+ *
+ * XXX: assumes set/clear bit are identical barrier wise.
+ */
+ smp_mb__after_clear_bit();
+
+ return unlikely(tif_need_resched());
+}
+
+static inline void __current_clr_polling(void)
{
clear_thread_flag(TIF_POLLING_NRFLAG);
}
+
+static inline bool __must_check current_clr_polling_and_test(void)
+{
+ __current_clr_polling();
+
+ /*
+ * Polling state must be visible before we test NEED_RESCHED,
+ * paired by resched_task()
+ */
+ smp_mb__after_clear_bit();
+
+ return unlikely(tif_need_resched());
+}
+
#else
static inline int tsk_is_polling(struct task_struct *p) { return 0; }
-static inline void current_set_polling(void) { }
-static inline void current_clr_polling(void) { }
+static inline void __current_set_polling(void) { }
+static inline void __current_clr_polling(void) { }
+
+static inline bool __must_check current_set_polling_and_test(void)
+{
+ return unlikely(tif_need_resched());
+}
+static inline bool __must_check current_clr_polling_and_test(void)
+{
+ return unlikely(tif_need_resched());
+}
#endif
+static __always_inline bool need_resched(void)
+{
+ return unlikely(tif_need_resched());
+}
+
/*
* Thread group CPU time accounting.
*/
@@ -2545,6 +2673,11 @@ static inline unsigned int task_cpu(const struct task_struct *p)
return task_thread_info(p)->cpu;
}
+static inline int task_node(const struct task_struct *p)
+{
+ return cpu_to_node(task_cpu(p));
+}
+
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
#else
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index bf8086b2506e..41467f8ff8ec 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -2,8 +2,8 @@
#define _SCHED_SYSCTL_H
#ifdef CONFIG_DETECT_HUNG_TASK
+extern int sysctl_hung_task_check_count;
extern unsigned int sysctl_hung_task_panic;
-extern unsigned long sysctl_hung_task_check_count;
extern unsigned long sysctl_hung_task_timeout_secs;
extern unsigned long sysctl_hung_task_warnings;
extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
@@ -47,7 +47,6 @@ extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
extern unsigned int sysctl_numa_balancing_scan_delay;
extern unsigned int sysctl_numa_balancing_scan_period_min;
extern unsigned int sysctl_numa_balancing_scan_period_max;
-extern unsigned int sysctl_numa_balancing_scan_period_reset;
extern unsigned int sysctl_numa_balancing_scan_size;
extern unsigned int sysctl_numa_balancing_settle_count;
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h
index fa7922c80a41..cddf0c2940b6 100644
--- a/include/linux/sched_clock.h
+++ b/include/linux/sched_clock.h
@@ -15,7 +15,7 @@ static inline void sched_clock_postinit(void) { }
#endif
extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
-
-extern unsigned long long (*sched_clock_func)(void);
+extern void sched_clock_register(u64 (*read)(void), int bits,
+ unsigned long rate);
#endif
diff --git a/include/linux/security.h b/include/linux/security.h
index 9d37e2b9d3ec..5623a7f965b7 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1052,17 +1052,25 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @xfrm_policy_delete_security:
* @ctx contains the xfrm_sec_ctx.
* Authorize deletion of xp->security.
- * @xfrm_state_alloc_security:
+ * @xfrm_state_alloc:
* @x contains the xfrm_state being added to the Security Association
* Database by the XFRM system.
* @sec_ctx contains the security context information being provided by
* the user-level SA generation program (e.g., setkey or racoon).
- * @secid contains the secid from which to take the mls portion of the context.
* Allocate a security structure to the x->security field; the security
* field is initialized to NULL when the xfrm_state is allocated. Set the
- * context to correspond to either sec_ctx or polsec, with the mls portion
- * taken from secid in the latter case.
- * Return 0 if operation was successful (memory to allocate, legal context).
+ * context to correspond to sec_ctx. Return 0 if operation was successful
+ * (memory to allocate, legal context).
+ * @xfrm_state_alloc_acquire:
+ * @x contains the xfrm_state being added to the Security Association
+ * Database by the XFRM system.
+ * @polsec contains the policy's security context.
+ * @secid contains the secid from which to take the mls portion of the
+ * context.
+ * Allocate a security structure to the x->security field; the security
+ * field is initialized to NULL when the xfrm_state is allocated. Set the
+ * context to correspond to secid. Return 0 if operation was successful
+ * (memory to allocate, legal context).
* @xfrm_state_free_security:
* @x contains the xfrm_state.
* Deallocate x->security.
@@ -1679,9 +1687,11 @@ struct security_operations {
int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx);
void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx);
int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx);
- int (*xfrm_state_alloc_security) (struct xfrm_state *x,
- struct xfrm_user_sec_ctx *sec_ctx,
- u32 secid);
+ int (*xfrm_state_alloc) (struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *sec_ctx);
+ int (*xfrm_state_alloc_acquire) (struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec,
+ u32 secid);
void (*xfrm_state_free_security) (struct xfrm_state *x);
int (*xfrm_state_delete_security) (struct xfrm_state *x);
int (*xfrm_policy_lookup) (struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index b98291ac7f14..f729be981da0 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -66,7 +66,6 @@ struct uart_ops {
void (*set_ldisc)(struct uart_port *, int new);
void (*pm)(struct uart_port *, unsigned int state,
unsigned int oldstate);
- int (*set_wake)(struct uart_port *, unsigned int state);
/*
* Return a string describing the type of the port
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index d34049712a4d..3dbdf7e53dcc 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -5,18 +5,22 @@
#include <linux/sh_dma.h>
/*
- * Generic header for SuperH (H)SCI(F) (used by sh/sh64/h8300 and related parts)
+ * Generic header for SuperH (H)SCI(F) (used by sh/sh64 and related parts)
*/
#define SCIx_NOT_SUPPORTED (-1)
enum {
+ SCBRR_ALGO_INVALID,
+
SCBRR_ALGO_1, /* ((clk + 16 * bps) / (16 * bps) - 1) */
SCBRR_ALGO_2, /* ((clk + 16 * bps) / (32 * bps) - 1) */
SCBRR_ALGO_3, /* (((clk * 2) + 16 * bps) / (16 * bps) - 1) */
SCBRR_ALGO_4, /* (((clk * 2) + 16 * bps) / (32 * bps) - 1) */
SCBRR_ALGO_5, /* (((clk * 1000 / 32) / bps) - 1) */
SCBRR_ALGO_6, /* HSCIF variable sample rate algorithm */
+
+ SCBRR_NR_ALGOS,
};
#define SCSCR_TIE (1 << 7)
diff --git a/include/linux/sfi.h b/include/linux/sfi.h
index fe817918b30e..d9b436f09925 100644
--- a/include/linux/sfi.h
+++ b/include/linux/sfi.h
@@ -59,6 +59,9 @@
#ifndef _LINUX_SFI_H
#define _LINUX_SFI_H
+#include <linux/init.h>
+#include <linux/types.h>
+
/* Table signatures reserved by the SFI specification */
#define SFI_SIG_SYST "SYST"
#define SFI_SIG_FREQ "FREQ"
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c2d89335f637..44727b5d4981 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -318,9 +318,13 @@ enum {
SKB_GSO_GRE = 1 << 6,
- SKB_GSO_UDP_TUNNEL = 1 << 7,
+ SKB_GSO_IPIP = 1 << 7,
- SKB_GSO_MPLS = 1 << 8,
+ SKB_GSO_SIT = 1 << 8,
+
+ SKB_GSO_UDP_TUNNEL = 1 << 9,
+
+ SKB_GSO_MPLS = 1 << 10,
};
#if BITS_PER_LONG > 32
@@ -585,8 +589,8 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
skb->_skb_refdst = (unsigned long)dst;
}
-extern void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
- bool force);
+void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
+ bool force);
/**
* skb_dst_set_noref - sets skb dst, hopefully, without taking reference
@@ -634,20 +638,20 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb)
return (struct rtable *)skb_dst(skb);
}
-extern void kfree_skb(struct sk_buff *skb);
-extern void kfree_skb_list(struct sk_buff *segs);
-extern void skb_tx_error(struct sk_buff *skb);
-extern void consume_skb(struct sk_buff *skb);
-extern void __kfree_skb(struct sk_buff *skb);
+void kfree_skb(struct sk_buff *skb);
+void kfree_skb_list(struct sk_buff *segs);
+void skb_tx_error(struct sk_buff *skb);
+void consume_skb(struct sk_buff *skb);
+void __kfree_skb(struct sk_buff *skb);
extern struct kmem_cache *skbuff_head_cache;
-extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
-extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
- bool *fragstolen, int *delta_truesize);
+void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
+bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+ bool *fragstolen, int *delta_truesize);
-extern struct sk_buff *__alloc_skb(unsigned int size,
- gfp_t priority, int flags, int node);
-extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
+struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
+ int node);
+struct sk_buff *build_skb(void *data, unsigned int frag_size);
static inline struct sk_buff *alloc_skb(unsigned int size,
gfp_t priority)
{
@@ -660,41 +664,33 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
}
-extern struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
+struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
static inline struct sk_buff *alloc_skb_head(gfp_t priority)
{
return __alloc_skb_head(priority, -1);
}
-extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
-extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
-extern struct sk_buff *skb_clone(struct sk_buff *skb,
- gfp_t priority);
-extern struct sk_buff *skb_copy(const struct sk_buff *skb,
- gfp_t priority);
-extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
- int headroom, gfp_t gfp_mask);
-
-extern int pskb_expand_head(struct sk_buff *skb,
- int nhead, int ntail,
- gfp_t gfp_mask);
-extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
- unsigned int headroom);
-extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
- int newheadroom, int newtailroom,
- gfp_t priority);
-extern int skb_to_sgvec(struct sk_buff *skb,
- struct scatterlist *sg, int offset,
- int len);
-extern int skb_cow_data(struct sk_buff *skb, int tailbits,
- struct sk_buff **trailer);
-extern int skb_pad(struct sk_buff *skb, int pad);
+struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
+int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
+struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
+struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
+struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask);
+
+int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
+struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
+ unsigned int headroom);
+struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
+ int newtailroom, gfp_t priority);
+int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
+ int len);
+int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
+int skb_pad(struct sk_buff *skb, int pad);
#define dev_kfree_skb(a) consume_skb(a)
-extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
- int getfrag(void *from, char *to, int offset,
- int len,int odd, struct sk_buff *skb),
- void *from, int length);
+int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
+ int getfrag(void *from, char *to, int offset,
+ int len, int odd, struct sk_buff *skb),
+ void *from, int length);
struct skb_seq_state {
__u32 lower_offset;
@@ -706,18 +702,17 @@ struct skb_seq_state {
__u8 *frag_data;
};
-extern void skb_prepare_seq_read(struct sk_buff *skb,
- unsigned int from, unsigned int to,
- struct skb_seq_state *st);
-extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
- struct skb_seq_state *st);
-extern void skb_abort_seq_read(struct skb_seq_state *st);
+void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
+ unsigned int to, struct skb_seq_state *st);
+unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
+ struct skb_seq_state *st);
+void skb_abort_seq_read(struct skb_seq_state *st);
-extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
- unsigned int to, struct ts_config *config,
- struct ts_state *state);
+unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
+ unsigned int to, struct ts_config *config,
+ struct ts_state *state);
-extern void __skb_get_rxhash(struct sk_buff *skb);
+void __skb_get_rxhash(struct sk_buff *skb);
static inline __u32 skb_get_rxhash(struct sk_buff *skb)
{
if (!skb->l4_rxhash)
@@ -1095,7 +1090,8 @@ static inline void skb_queue_head_init_class(struct sk_buff_head *list,
* The "__skb_xxxx()" functions are the non-atomic ones that
* can only be called with interrupts disabled.
*/
-extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
+void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
+ struct sk_buff_head *list);
static inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list)
@@ -1201,8 +1197,8 @@ static inline void __skb_queue_after(struct sk_buff_head *list,
__skb_insert(newsk, prev, prev->next, list);
}
-extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
- struct sk_buff_head *list);
+void skb_append(struct sk_buff *old, struct sk_buff *newsk,
+ struct sk_buff_head *list);
static inline void __skb_queue_before(struct sk_buff_head *list,
struct sk_buff *next,
@@ -1221,7 +1217,7 @@ static inline void __skb_queue_before(struct sk_buff_head *list,
*
* A buffer cannot be placed on two lists at the same time.
*/
-extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
+void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
static inline void __skb_queue_head(struct sk_buff_head *list,
struct sk_buff *newsk)
{
@@ -1238,7 +1234,7 @@ static inline void __skb_queue_head(struct sk_buff_head *list,
*
* A buffer cannot be placed on two lists at the same time.
*/
-extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
+void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
static inline void __skb_queue_tail(struct sk_buff_head *list,
struct sk_buff *newsk)
{
@@ -1249,7 +1245,7 @@ static inline void __skb_queue_tail(struct sk_buff_head *list,
* remove sk_buff from list. _Must_ be called atomically, and with
* the list known..
*/
-extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
+void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{
struct sk_buff *next, *prev;
@@ -1270,7 +1266,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
* so must be used with appropriate locks held only. The head item is
* returned or %NULL if the list is empty.
*/
-extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
+struct sk_buff *skb_dequeue(struct sk_buff_head *list);
static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
{
struct sk_buff *skb = skb_peek(list);
@@ -1287,7 +1283,7 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
* so must be used with appropriate locks held only. The tail item is
* returned or %NULL if the list is empty.
*/
-extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
+struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
{
struct sk_buff *skb = skb_peek_tail(list);
@@ -1373,8 +1369,8 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
skb_shinfo(skb)->nr_frags = i + 1;
}
-extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
- int off, int size, unsigned int truesize);
+void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
+ int size, unsigned int truesize);
#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
@@ -1418,7 +1414,7 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
/*
* Add data to an sk_buff
*/
-extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
+unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
{
unsigned char *tmp = skb_tail_pointer(skb);
@@ -1428,7 +1424,7 @@ static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
return tmp;
}
-extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
+unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
{
skb->data -= len;
@@ -1436,7 +1432,7 @@ static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
return skb->data;
}
-extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
+unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
{
skb->len -= len;
@@ -1449,7 +1445,7 @@ static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int l
return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
}
-extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
+unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
{
@@ -1753,7 +1749,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
#endif
-extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
+int ___pskb_trim(struct sk_buff *skb, unsigned int len);
static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
{
@@ -1765,7 +1761,7 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
skb_set_tail_pointer(skb, len);
}
-extern void skb_trim(struct sk_buff *skb, unsigned int len);
+void skb_trim(struct sk_buff *skb, unsigned int len);
static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
{
@@ -1838,7 +1834,7 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
* the list and one reference dropped. This function does not take the
* list lock and the caller must hold the relevant locks to use it.
*/
-extern void skb_queue_purge(struct sk_buff_head *list);
+void skb_queue_purge(struct sk_buff_head *list);
static inline void __skb_queue_purge(struct sk_buff_head *list)
{
struct sk_buff *skb;
@@ -1850,11 +1846,10 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
#define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
#define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE
-extern void *netdev_alloc_frag(unsigned int fragsz);
+void *netdev_alloc_frag(unsigned int fragsz);
-extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
- unsigned int length,
- gfp_t gfp_mask);
+struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
+ gfp_t gfp_mask);
/**
* netdev_alloc_skb - allocate an skbuff for rx on a specific device
@@ -2071,6 +2066,8 @@ static inline void skb_frag_set_page(struct sk_buff *skb, int f,
__skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
}
+bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
+
/**
* skb_frag_dma_map - maps a paged fragment via the DMA API
* @dev: the device to map the fragment to
@@ -2342,60 +2339,49 @@ static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
#define skb_walk_frags(skb, iter) \
for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
-extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
- int *peeked, int *off, int *err);
-extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
- int noblock, int *err);
-extern unsigned int datagram_poll(struct file *file, struct socket *sock,
- struct poll_table_struct *wait);
-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
- int offset, struct iovec *to,
- int size);
-extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
- int hlen,
- struct iovec *iov);
-extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
- int offset,
- const struct iovec *from,
- int from_offset,
- int len);
-extern int zerocopy_sg_from_iovec(struct sk_buff *skb,
- const struct iovec *frm,
- int offset,
- size_t count);
-extern int skb_copy_datagram_const_iovec(const struct sk_buff *from,
- int offset,
- const struct iovec *to,
- int to_offset,
- int size);
-extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
-extern void skb_free_datagram_locked(struct sock *sk,
- struct sk_buff *skb);
-extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
- unsigned int flags);
-extern __wsum skb_checksum(const struct sk_buff *skb, int offset,
- int len, __wsum csum);
-extern int skb_copy_bits(const struct sk_buff *skb, int offset,
- void *to, int len);
-extern int skb_store_bits(struct sk_buff *skb, int offset,
- const void *from, int len);
-extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb,
- int offset, u8 *to, int len,
- __wsum csum);
-extern int skb_splice_bits(struct sk_buff *skb,
- unsigned int offset,
- struct pipe_inode_info *pipe,
- unsigned int len,
- unsigned int flags);
-extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
-extern void skb_split(struct sk_buff *skb,
- struct sk_buff *skb1, const u32 len);
-extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
- int shiftlen);
-extern void skb_scrub_packet(struct sk_buff *skb, bool xnet);
-
-extern struct sk_buff *skb_segment(struct sk_buff *skb,
- netdev_features_t features);
+struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
+ int *peeked, int *off, int *err);
+struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
+ int *err);
+unsigned int datagram_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
+int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
+ struct iovec *to, int size);
+int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
+ struct iovec *iov);
+int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
+ const struct iovec *from, int from_offset,
+ int len);
+int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm,
+ int offset, size_t count);
+int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset,
+ const struct iovec *to, int to_offset,
+ int size);
+void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
+void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
+int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
+int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
+int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
+__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
+ int len, __wsum csum);
+int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+ struct pipe_inode_info *pipe, unsigned int len,
+ unsigned int flags);
+void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
+void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
+int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
+void skb_scrub_packet(struct sk_buff *skb, bool xnet);
+struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
+
+struct skb_checksum_ops {
+ __wsum (*update)(const void *mem, int len, __wsum wsum);
+ __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
+};
+
+__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
+ __wsum csum, const struct skb_checksum_ops *ops);
+__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
+ __wsum csum);
static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
int len, void *buffer)
@@ -2440,7 +2426,7 @@ static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
memcpy(skb->data + offset, from, len);
}
-extern void skb_init(void);
+void skb_init(void);
static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
{
@@ -2483,12 +2469,12 @@ static inline ktime_t net_invalid_timestamp(void)
return ktime_set(0, 0);
}
-extern void skb_timestamping_init(void);
+void skb_timestamping_init(void);
#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
-extern void skb_clone_tx_timestamp(struct sk_buff *skb);
-extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
+void skb_clone_tx_timestamp(struct sk_buff *skb);
+bool skb_defer_rx_timestamp(struct sk_buff *skb);
#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
@@ -2529,8 +2515,8 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
* generates a software time stamp (otherwise), then queues the clone
* to the error queue of the socket. Errors are silently ignored.
*/
-extern void skb_tstamp_tx(struct sk_buff *orig_skb,
- struct skb_shared_hwtstamps *hwtstamps);
+void skb_tstamp_tx(struct sk_buff *orig_skb,
+ struct skb_shared_hwtstamps *hwtstamps);
static inline void sw_tx_timestamp(struct sk_buff *skb)
{
@@ -2562,8 +2548,8 @@ static inline void skb_tx_timestamp(struct sk_buff *skb)
*/
void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
-extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
-extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
+__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
+__sum16 __skb_checksum_complete(struct sk_buff *skb);
static inline int skb_csum_unnecessary(const struct sk_buff *skb)
{
@@ -2593,7 +2579,7 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
}
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
+void nf_conntrack_destroy(struct nf_conntrack *nfct);
static inline void nf_conntrack_put(struct nf_conntrack *nfct)
{
if (nfct && atomic_dec_and_test(&nfct->use))
@@ -2732,28 +2718,27 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
return skb->queue_mapping != 0;
}
-extern u16 __skb_tx_hash(const struct net_device *dev,
- const struct sk_buff *skb,
- unsigned int num_tx_queues);
+u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
+ unsigned int num_tx_queues);
-#ifdef CONFIG_XFRM
static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
{
+#ifdef CONFIG_XFRM
return skb->sp;
-}
#else
-static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
-{
return NULL;
-}
#endif
+}
/* Keeps track of mac header offset relative to skb->head.
* It is useful for TSO of Tunneling protocol. e.g. GRE.
* For non-tunnel skb it points to skb_mac_header() and for
- * tunnel skb it points to outer mac header. */
+ * tunnel skb it points to outer mac header.
+ * Keeps track of level of encapsulation of network headers.
+ */
struct skb_gso_cb {
- int mac_offset;
+ int mac_offset;
+ int encap_level;
};
#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
@@ -2783,12 +2768,13 @@ static inline bool skb_is_gso(const struct sk_buff *skb)
return skb_shinfo(skb)->gso_size;
}
+/* Note: Should be called only if skb_is_gso(skb) is true */
static inline bool skb_is_gso_v6(const struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
}
-extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
+void __skb_warn_lro_forwarding(const struct sk_buff *skb);
static inline bool skb_warn_if_lro(const struct sk_buff *skb)
{
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 74f105847d13..c2bba248fa63 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -53,7 +53,14 @@
* }
* rcu_read_unlock();
*
- * See also the comment on struct slab_rcu in mm/slab.c.
+ * This is useful if we need to approach a kernel structure obliquely,
+ * from its address obtained without the usual locking. We can lock
+ * the structure to stabilize it and check it's still at the given address,
+ * only if we can be sure that the memory has not been meanwhile reused
+ * for some other kind of object (which our subsystem's lock might corrupt).
+ *
+ * rcu_read_lock before reading the address, then rcu_read_unlock after
+ * taking the spinlock within the structure expected at that address.
*/
#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index e9346b4f1ef4..09bfffb08a56 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -27,8 +27,8 @@ struct kmem_cache {
size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */
- struct kmem_cache *slabp_cache;
- unsigned int slab_size;
+ struct kmem_cache *freelist_cache;
+ unsigned int freelist_size;
/* constructor func */
void (*ctor)(void *obj);
diff --git a/include/linux/spi/rspi.h b/include/linux/spi/rspi.h
index 900f0e328235..a25bd6f65e7f 100644
--- a/include/linux/spi/rspi.h
+++ b/include/linux/spi/rspi.h
@@ -26,6 +26,8 @@ struct rspi_plat_data {
unsigned int dma_rx_id;
unsigned dma_width_16bit:1; /* DMAC read/write width = 16-bit */
+
+ u16 num_chipselect;
};
#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 887116dbce2c..8c62ba74dd91 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -23,6 +23,7 @@
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/kthread.h>
+#include <linux/completion.h>
/*
* INTERFACES between SPI master-side drivers and SPI infrastructure.
@@ -150,8 +151,7 @@ static inline void *spi_get_drvdata(struct spi_device *spi)
}
struct spi_message;
-
-
+struct spi_transfer;
/**
* struct spi_driver - Host side "protocol" driver
@@ -257,6 +257,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @queue_lock: spinlock to syncronise access to message queue
* @queue: message queue
* @cur_msg: the currently in-flight message
+ * @cur_msg_prepared: spi_prepare_message was called for the currently
+ * in-flight message
+ * @xfer_completion: used by core tranfer_one_message()
* @busy: message pump is busy
* @running: message pump is running
* @rt: whether this queue is set to run as a realtime task
@@ -274,6 +277,16 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @unprepare_transfer_hardware: there are currently no more messages on the
* queue so the subsystem notifies the driver that it may relax the
* hardware by issuing this call
+ * @set_cs: assert or deassert chip select, true to assert. May be called
+ * from interrupt context.
+ * @prepare_message: set up the controller to transfer a single message,
+ * for example doing DMA mapping. Called from threaded
+ * context.
+ * @transfer_one: transfer a single spi_transfer. When the
+ * driver is finished with this transfer it must call
+ * spi_finalize_current_transfer() so the subsystem can issue
+ * the next transfer
+ * @unprepare_message: undo any work done by prepare_message().
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -ENOENT for CS lines that
* are not GPIOs (driven by the SPI controller itself).
@@ -388,11 +401,25 @@ struct spi_master {
bool running;
bool rt;
bool auto_runtime_pm;
+ bool cur_msg_prepared;
+ struct completion xfer_completion;
int (*prepare_transfer_hardware)(struct spi_master *master);
int (*transfer_one_message)(struct spi_master *master,
struct spi_message *mesg);
int (*unprepare_transfer_hardware)(struct spi_master *master);
+ int (*prepare_message)(struct spi_master *master,
+ struct spi_message *message);
+ int (*unprepare_message)(struct spi_master *master,
+ struct spi_message *message);
+
+ /*
+ * These hooks are for drivers that use a generic implementation
+ * of transfer_one_message() provied by the core.
+ */
+ void (*set_cs)(struct spi_device *spi, bool enable);
+ int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *transfer);
/* gpio chip select */
int *cs_gpios;
@@ -428,12 +455,15 @@ extern int spi_master_resume(struct spi_master *master);
/* Calls the driver make to interact with the message queue */
extern struct spi_message *spi_get_next_queued_message(struct spi_master *master);
extern void spi_finalize_current_message(struct spi_master *master);
+extern void spi_finalize_current_transfer(struct spi_master *master);
/* the spi driver core manages memory for the spi_master classdev */
extern struct spi_master *
spi_alloc_master(struct device *host, unsigned size);
extern int spi_register_master(struct spi_master *master);
+extern int devm_spi_register_master(struct device *dev,
+ struct spi_master *master);
extern void spi_unregister_master(struct spi_master *master);
extern struct spi_master *spi_busnum_to_master(u16 busnum);
@@ -823,6 +853,33 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
return (status < 0) ? status : result;
}
+/**
+ * spi_w8r16be - SPI synchronous 8 bit write followed by 16 bit big-endian read
+ * @spi: device with which data will be exchanged
+ * @cmd: command to be written before data is read back
+ * Context: can sleep
+ *
+ * This returns the (unsigned) sixteen bit number returned by the device in cpu
+ * endianness, or else a negative error code. Callable only from contexts that
+ * can sleep.
+ *
+ * This function is similar to spi_w8r16, with the exception that it will
+ * convert the read 16 bit data word from big-endian to native endianness.
+ *
+ */
+static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
+
+{
+ ssize_t status;
+ __be16 result;
+
+ status = spi_write_then_read(spi, &cmd, 1, &result, 2);
+ if (status < 0)
+ return status;
+
+ return be16_to_cpu(result);
+}
+
/*---------------------------------------------------------------------------*/
/*
diff --git a/include/linux/ssb/ssb_driver_gige.h b/include/linux/ssb/ssb_driver_gige.h
index 86a12b0cb239..0688472500bb 100644
--- a/include/linux/ssb/ssb_driver_gige.h
+++ b/include/linux/ssb/ssb_driver_gige.h
@@ -108,6 +108,16 @@ static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
return 0;
}
+/* Get the device phy address */
+static inline int ssb_gige_get_phyaddr(struct pci_dev *pdev)
+{
+ struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
+ if (!dev)
+ return -ENODEV;
+
+ return dev->dev->bus->sprom.et0phyaddr;
+}
+
extern int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev,
struct pci_dev *pdev);
extern int ssb_gige_map_irq(struct ssb_device *sdev,
@@ -174,6 +184,10 @@ static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
{
return -ENODEV;
}
+static inline int ssb_gige_get_phyaddr(struct pci_dev *pdev)
+{
+ return -ENODEV;
+}
#endif /* CONFIG_SSB_DRIVER_GIGE */
#endif /* LINUX_SSB_DRIVER_GIGE_H_ */
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index 3b5e910d14ca..d2abbdb8c6aa 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -28,6 +28,7 @@ struct cpu_stop_work {
};
int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
+int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg);
void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
struct cpu_stop_work *work_buf);
int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 6740801aa71a..8af2804bab16 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -49,6 +49,7 @@ struct rpc_clnt {
unsigned int cl_softrtry : 1,/* soft timeouts */
cl_discrtry : 1,/* disconnect before retry */
+ cl_noretranstimeo: 1,/* No retransmit timeouts */
cl_autobind : 1,/* use getport() */
cl_chatty : 1;/* be verbose */
@@ -126,6 +127,7 @@ struct rpc_create_args {
#define RPC_CLNT_CREATE_QUIET (1UL << 6)
#define RPC_CLNT_CREATE_INFINITE_SLOTS (1UL << 7)
#define RPC_CLNT_CREATE_NO_IDLE_TIMEOUT (1UL << 8)
+#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
struct rpc_clnt *rpc_create(struct rpc_create_args *args);
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
@@ -134,6 +136,10 @@ void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt);
struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
struct rpc_clnt *rpc_clone_client_set_auth(struct rpc_clnt *,
rpc_authflavor_t);
+int rpc_switch_client_transport(struct rpc_clnt *,
+ struct xprt_create *,
+ const struct rpc_timeout *);
+
void rpc_shutdown_client(struct rpc_clnt *);
void rpc_release_client(struct rpc_clnt *);
void rpc_task_release_client(struct rpc_task *);
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 096ee58be11a..3a847de83fab 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -122,6 +122,7 @@ struct rpc_task_setup {
#define RPC_TASK_SENT 0x0800 /* message was sent */
#define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */
#define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */
+#define RPC_TASK_NO_RETRANS_TIMEOUT 0x4000 /* wait forever for a reply */
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index cec7b9b5e1bf..8097b9df6773 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -288,7 +288,7 @@ int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
-int xprt_prepare_transmit(struct rpc_task *task);
+bool xprt_prepare_transmit(struct rpc_task *task);
void xprt_transmit(struct rpc_task *task);
void xprt_end_transmit(struct rpc_task *task);
int xprt_adjust_timeout(struct rpc_rqst *req);
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 11baec7c9b26..6695040a0317 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -173,7 +173,6 @@ struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size)
struct sysfs_ops {
ssize_t (*show)(struct kobject *, struct attribute *, char *);
ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t);
- const void *(*namespace)(struct kobject *, const struct attribute *);
};
struct sysfs_dirent;
@@ -183,19 +182,23 @@ struct sysfs_dirent;
int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *),
void *data, struct module *owner);
-int __must_check sysfs_create_dir(struct kobject *kobj);
+int __must_check sysfs_create_dir_ns(struct kobject *kobj, const void *ns);
void sysfs_remove_dir(struct kobject *kobj);
-int __must_check sysfs_rename_dir(struct kobject *kobj, const char *new_name);
-int __must_check sysfs_move_dir(struct kobject *kobj,
- struct kobject *new_parent_kobj);
-
-int __must_check sysfs_create_file(struct kobject *kobj,
- const struct attribute *attr);
+int __must_check sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name,
+ const void *new_ns);
+int __must_check sysfs_move_dir_ns(struct kobject *kobj,
+ struct kobject *new_parent_kobj,
+ const void *new_ns);
+
+int __must_check sysfs_create_file_ns(struct kobject *kobj,
+ const struct attribute *attr,
+ const void *ns);
int __must_check sysfs_create_files(struct kobject *kobj,
const struct attribute **attr);
int __must_check sysfs_chmod_file(struct kobject *kobj,
const struct attribute *attr, umode_t mode);
-void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr);
+void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
+ const void *ns);
void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr);
int __must_check sysfs_create_bin_file(struct kobject *kobj,
@@ -210,8 +213,9 @@ int __must_check sysfs_create_link_nowarn(struct kobject *kobj,
const char *name);
void sysfs_remove_link(struct kobject *kobj, const char *name);
-int sysfs_rename_link(struct kobject *kobj, struct kobject *target,
- const char *old_name, const char *new_name);
+int sysfs_rename_link_ns(struct kobject *kobj, struct kobject *target,
+ const char *old_name, const char *new_name,
+ const void *new_ns);
void sysfs_delete_link(struct kobject *dir, struct kobject *targ,
const char *name);
@@ -241,9 +245,9 @@ void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name,
void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr);
void sysfs_notify_dirent(struct sysfs_dirent *sd);
-struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
- const void *ns,
- const unsigned char *name);
+struct sysfs_dirent *sysfs_get_dirent_ns(struct sysfs_dirent *parent_sd,
+ const unsigned char *name,
+ const void *ns);
struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd);
void sysfs_put(struct sysfs_dirent *sd);
@@ -257,7 +261,7 @@ static inline int sysfs_schedule_callback(struct kobject *kobj,
return -ENOSYS;
}
-static inline int sysfs_create_dir(struct kobject *kobj)
+static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
{
return 0;
}
@@ -266,19 +270,22 @@ static inline void sysfs_remove_dir(struct kobject *kobj)
{
}
-static inline int sysfs_rename_dir(struct kobject *kobj, const char *new_name)
+static inline int sysfs_rename_dir_ns(struct kobject *kobj,
+ const char *new_name, const void *new_ns)
{
return 0;
}
-static inline int sysfs_move_dir(struct kobject *kobj,
- struct kobject *new_parent_kobj)
+static inline int sysfs_move_dir_ns(struct kobject *kobj,
+ struct kobject *new_parent_kobj,
+ const void *new_ns)
{
return 0;
}
-static inline int sysfs_create_file(struct kobject *kobj,
- const struct attribute *attr)
+static inline int sysfs_create_file_ns(struct kobject *kobj,
+ const struct attribute *attr,
+ const void *ns)
{
return 0;
}
@@ -295,8 +302,9 @@ static inline int sysfs_chmod_file(struct kobject *kobj,
return 0;
}
-static inline void sysfs_remove_file(struct kobject *kobj,
- const struct attribute *attr)
+static inline void sysfs_remove_file_ns(struct kobject *kobj,
+ const struct attribute *attr,
+ const void *ns)
{
}
@@ -333,8 +341,9 @@ static inline void sysfs_remove_link(struct kobject *kobj, const char *name)
{
}
-static inline int sysfs_rename_link(struct kobject *k, struct kobject *t,
- const char *old_name, const char *new_name)
+static inline int sysfs_rename_link_ns(struct kobject *k, struct kobject *t,
+ const char *old_name,
+ const char *new_name, const void *ns)
{
return 0;
}
@@ -413,10 +422,9 @@ static inline void sysfs_notify(struct kobject *kobj, const char *dir,
static inline void sysfs_notify_dirent(struct sysfs_dirent *sd)
{
}
-static inline
-struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
- const void *ns,
- const unsigned char *name)
+static inline struct sysfs_dirent *
+sysfs_get_dirent_ns(struct sysfs_dirent *parent_sd, const unsigned char *name,
+ const void *ns)
{
return NULL;
}
@@ -435,4 +443,28 @@ static inline int __must_check sysfs_init(void)
#endif /* CONFIG_SYSFS */
+static inline int __must_check sysfs_create_file(struct kobject *kobj,
+ const struct attribute *attr)
+{
+ return sysfs_create_file_ns(kobj, attr, NULL);
+}
+
+static inline void sysfs_remove_file(struct kobject *kobj,
+ const struct attribute *attr)
+{
+ return sysfs_remove_file_ns(kobj, attr, NULL);
+}
+
+static inline int sysfs_rename_link(struct kobject *kobj, struct kobject *target,
+ const char *old_name, const char *new_name)
+{
+ return sysfs_rename_link_ns(kobj, target, old_name, new_name, NULL);
+}
+
+static inline struct sysfs_dirent *
+sysfs_get_dirent(struct sysfs_dirent *parent_sd, const unsigned char *name)
+{
+ return sysfs_get_dirent_ns(parent_sd, name, NULL);
+}
+
#endif /* _SYSFS_H_ */
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 7faf933cced7..387fa7d05c98 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -17,9 +17,6 @@
#include <linux/errno.h>
#include <linux/types.h>
-/* Enable/disable SYSRQ support by default (0==no, 1==yes). */
-#define SYSRQ_DEFAULT_ENABLE 1
-
/* Possible values of bitmask for enabling sysrq functions */
/* 0x0001 is reserved for enable everything */
#define SYSRQ_ENABLE_LOG 0x0002
diff --git a/include/linux/tegra-powergate.h b/include/linux/tegra-powergate.h
index 55c29a8d5015..c98cfa406952 100644
--- a/include/linux/tegra-powergate.h
+++ b/include/linux/tegra-powergate.h
@@ -34,8 +34,15 @@ struct clk;
#define TEGRA_POWERGATE_CPU3 11
#define TEGRA_POWERGATE_CELP 12
#define TEGRA_POWERGATE_3D1 13
+#define TEGRA_POWERGATE_CPU0 14
+#define TEGRA_POWERGATE_C0NC 15
+#define TEGRA_POWERGATE_C1NC 16
+#define TEGRA_POWERGATE_DIS 18
+#define TEGRA_POWERGATE_DISB 19
+#define TEGRA_POWERGATE_XUSBA 20
+#define TEGRA_POWERGATE_XUSBB 21
+#define TEGRA_POWERGATE_XUSBC 22
-#define TEGRA_POWERGATE_CPU0 TEGRA_POWERGATE_CPU
#define TEGRA_POWERGATE_3D0 TEGRA_POWERGATE_3D
int tegra_powergate_is_powered(int id);
diff --git a/include/linux/thinkpad_acpi.h b/include/linux/thinkpad_acpi.h
new file mode 100644
index 000000000000..361de59a2285
--- /dev/null
+++ b/include/linux/thinkpad_acpi.h
@@ -0,0 +1,15 @@
+#ifndef __THINKPAD_ACPI_H__
+#define __THINKPAD_ACPI_H__
+
+/* These two functions return 0 if success, or negative error code
+ (e g -ENODEV if no led present) */
+
+enum {
+ TPACPI_LED_MUTE,
+ TPACPI_LED_MICMUTE,
+ TPACPI_LED_MAX,
+};
+
+int tpacpi_led_set(int whichled, bool on);
+
+#endif
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index e7e04736802f..fddbe2023a5d 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -104,8 +104,21 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
#define test_thread_flag(flag) \
test_ti_thread_flag(current_thread_info(), flag)
-#define set_need_resched() set_thread_flag(TIF_NEED_RESCHED)
-#define clear_need_resched() clear_thread_flag(TIF_NEED_RESCHED)
+static inline __deprecated void set_need_resched(void)
+{
+ /*
+ * Use of this function in deprecated.
+ *
+ * As of this writing there are only a few users in the DRM tree left
+ * all of which are wrong and can be removed without causing too much
+ * grief.
+ *
+ * The DRM people are aware and are working on removing the last few
+ * instances.
+ */
+}
+
+#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
#if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
/*
diff --git a/include/linux/topology.h b/include/linux/topology.h
index d3cf0d6e7712..12ae6ce997d6 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -106,6 +106,8 @@ int arch_update_cpu_topology(void);
.last_balance = jiffies, \
.balance_interval = 1, \
.smt_gain = 1178, /* 15% */ \
+ .max_newidle_lb_cost = 0, \
+ .next_decay_max_lb_cost = jiffies, \
}
#endif
#endif /* CONFIG_SCHED_SMT */
@@ -135,6 +137,8 @@ int arch_update_cpu_topology(void);
, \
.last_balance = jiffies, \
.balance_interval = 1, \
+ .max_newidle_lb_cost = 0, \
+ .next_decay_max_lb_cost = jiffies, \
}
#endif
#endif /* CONFIG_SCHED_MC */
@@ -166,6 +170,8 @@ int arch_update_cpu_topology(void);
, \
.last_balance = jiffies, \
.balance_interval = 1, \
+ .max_newidle_lb_cost = 0, \
+ .next_decay_max_lb_cost = jiffies, \
}
#endif
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 64f864651d86..97d660ed70c1 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -180,7 +180,6 @@ struct tty_port_operations {
IFF the port was initialized. Do not use to free resources. Called
under the port mutex to serialize against activate/shutdowns */
void (*shutdown)(struct tty_port *port);
- void (*drop)(struct tty_port *port);
/* Called under the port mutex from tty_port_open, serialized using
the port mutex */
/* FIXME: long term getting the tty argument *out* of this would be
@@ -672,31 +671,17 @@ static inline void tty_wait_until_sent_from_close(struct tty_struct *tty,
#define wait_event_interruptible_tty(tty, wq, condition) \
({ \
int __ret = 0; \
- if (!(condition)) { \
- __wait_event_interruptible_tty(tty, wq, condition, __ret); \
- } \
+ if (!(condition)) \
+ __ret = __wait_event_interruptible_tty(tty, wq, \
+ condition); \
__ret; \
})
-#define __wait_event_interruptible_tty(tty, wq, condition, ret) \
-do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (!signal_pending(current)) { \
- tty_unlock(tty); \
+#define __wait_event_interruptible_tty(tty, wq, condition) \
+ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
+ tty_unlock(tty); \
schedule(); \
- tty_lock(tty); \
- continue; \
- } \
- ret = -ERESTARTSYS; \
- break; \
- } \
- finish_wait(&wq, &__wait); \
-} while (0)
+ tty_lock(tty))
#ifdef CONFIG_PROC_FS
extern void proc_tty_register_driver(struct tty_driver *);
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 5ca0951e1855..9d8cf056e661 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -15,7 +15,7 @@
*/
static inline void pagefault_disable(void)
{
- inc_preempt_count();
+ preempt_count_inc();
/*
* make sure to have issued the store before a pagefault
* can hit.
@@ -30,11 +30,7 @@ static inline void pagefault_enable(void)
* the pagefault handler again.
*/
barrier();
- dec_preempt_count();
- /*
- * make sure we do..
- */
- barrier();
+ preempt_count_dec();
preempt_check_resched();
}
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 06f28beed7c2..9e0d5a6fe7a8 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -117,13 +117,13 @@ extern void uprobe_start_dup_mmap(void);
extern void uprobe_end_dup_mmap(void);
extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm);
extern void uprobe_free_utask(struct task_struct *t);
-extern void uprobe_copy_process(struct task_struct *t);
+extern void uprobe_copy_process(struct task_struct *t, unsigned long flags);
extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
extern int uprobe_post_sstep_notifier(struct pt_regs *regs);
extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
extern void uprobe_notify_resume(struct pt_regs *regs);
extern bool uprobe_deny_signal(void);
-extern bool __weak arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs);
+extern bool arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs);
extern void uprobe_clear_state(struct mm_struct *mm);
#else /* !CONFIG_UPROBES */
struct uprobes_state {
@@ -174,7 +174,7 @@ static inline unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
static inline void uprobe_free_utask(struct task_struct *t)
{
}
-static inline void uprobe_copy_process(struct task_struct *t)
+static inline void uprobe_copy_process(struct task_struct *t, unsigned long flags)
{
}
static inline void uprobe_clear_state(struct mm_struct *mm)
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index cc25b70af33c..2300f7492927 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -36,6 +36,9 @@
* SUCH DAMAGE.
*/
+#ifndef __LINUX_USB_CDC_NCM_H
+#define __LINUX_USB_CDC_NCM_H
+
#define CDC_NCM_COMM_ALTSETTING_NCM 0
#define CDC_NCM_COMM_ALTSETTING_MBIM 1
@@ -85,22 +88,13 @@
#define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB)
struct cdc_ncm_ctx {
- struct usb_cdc_ncm_ntb_parameters ncm_parm;
struct hrtimer tx_timer;
struct tasklet_struct bh;
const struct usb_cdc_ncm_desc *func_desc;
- const struct usb_cdc_mbim_desc *mbim_desc;
- const struct usb_cdc_header_desc *header_desc;
- const struct usb_cdc_union_desc *union_desc;
+ const struct usb_cdc_mbim_desc *mbim_desc;
const struct usb_cdc_ether_desc *ether_desc;
- struct net_device *netdev;
- struct usb_device *udev;
- struct usb_host_endpoint *in_ep;
- struct usb_host_endpoint *out_ep;
- struct usb_host_endpoint *status_ep;
- struct usb_interface *intf;
struct usb_interface *control;
struct usb_interface *data;
@@ -113,8 +107,6 @@ struct cdc_ncm_ctx {
u32 tx_timer_pending;
u32 tx_curr_frame_num;
- u32 rx_speed;
- u32 tx_speed;
u32 rx_max;
u32 tx_max;
u32 max_datagram_size;
@@ -127,9 +119,11 @@ struct cdc_ncm_ctx {
u16 connected;
};
-extern u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf);
-extern int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
-extern void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
-extern struct sk_buff *cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign);
-extern int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
-extern int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset);
+u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf);
+int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
+void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
+struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
+int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
+int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset);
+
+#endif /* __LINUX_USB_CDC_NCM_H */
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 4db29859464f..4836ba3c1cd8 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -27,6 +27,12 @@ struct user_namespace {
kuid_t owner;
kgid_t group;
unsigned int proc_inum;
+
+ /* Register of per-UID persistent keyrings for this namespace */
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+ struct key *persistent_keyring_register;
+ struct rw_semaphore persistent_keyring_register_sem;
+#endif
};
extern struct user_namespace init_user_ns;
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 36d36cc89329..e4abb84199be 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -51,11 +51,11 @@ int virtqueue_add_sgs(struct virtqueue *vq,
void *data,
gfp_t gfp);
-void virtqueue_kick(struct virtqueue *vq);
+bool virtqueue_kick(struct virtqueue *vq);
bool virtqueue_kick_prepare(struct virtqueue *vq);
-void virtqueue_notify(struct virtqueue *vq);
+bool virtqueue_notify(struct virtqueue *vq);
void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
@@ -73,6 +73,8 @@ void *virtqueue_detach_unused_buf(struct virtqueue *vq);
unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
+bool virtqueue_is_broken(struct virtqueue *vq);
+
/**
* virtio_device - representation of a device using virtio
* @index: unique position on the virtio bus
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 29b9104232b4..e8f8f71e843c 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -96,33 +96,6 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
return test_bit(fbit, vdev->features);
}
-/**
- * virtio_config_val - look for a feature and get a virtio config entry.
- * @vdev: the virtio device
- * @fbit: the feature bit
- * @offset: the type to search for.
- * @v: a pointer to the value to fill in.
- *
- * The return value is -ENOENT if the feature doesn't exist. Otherwise
- * the config value is copied into whatever is pointed to by v. */
-#define virtio_config_val(vdev, fbit, offset, v) \
- virtio_config_buf((vdev), (fbit), (offset), (v), sizeof(*v))
-
-#define virtio_config_val_len(vdev, fbit, offset, v, len) \
- virtio_config_buf((vdev), (fbit), (offset), (v), (len))
-
-static inline int virtio_config_buf(struct virtio_device *vdev,
- unsigned int fbit,
- unsigned int offset,
- void *buf, unsigned len)
-{
- if (!virtio_has_feature(vdev, fbit))
- return -ENOENT;
-
- vdev->config->get(vdev, offset, buf, len);
- return 0;
-}
-
static inline
struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
vq_callback_t *c, const char *n)
@@ -162,5 +135,139 @@ int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
return 0;
}
+/* Config space accessors. */
+#define virtio_cread(vdev, structname, member, ptr) \
+ do { \
+ /* Must match the member's type, and be integer */ \
+ if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
+ (*ptr) = 1; \
+ \
+ switch (sizeof(*ptr)) { \
+ case 1: \
+ *(ptr) = virtio_cread8(vdev, \
+ offsetof(structname, member)); \
+ break; \
+ case 2: \
+ *(ptr) = virtio_cread16(vdev, \
+ offsetof(structname, member)); \
+ break; \
+ case 4: \
+ *(ptr) = virtio_cread32(vdev, \
+ offsetof(structname, member)); \
+ break; \
+ case 8: \
+ *(ptr) = virtio_cread64(vdev, \
+ offsetof(structname, member)); \
+ break; \
+ default: \
+ BUG(); \
+ } \
+ } while(0)
+
+/* Config space accessors. */
+#define virtio_cwrite(vdev, structname, member, ptr) \
+ do { \
+ /* Must match the member's type, and be integer */ \
+ if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
+ BUG_ON((*ptr) == 1); \
+ \
+ switch (sizeof(*ptr)) { \
+ case 1: \
+ virtio_cwrite8(vdev, \
+ offsetof(structname, member), \
+ *(ptr)); \
+ break; \
+ case 2: \
+ virtio_cwrite16(vdev, \
+ offsetof(structname, member), \
+ *(ptr)); \
+ break; \
+ case 4: \
+ virtio_cwrite32(vdev, \
+ offsetof(structname, member), \
+ *(ptr)); \
+ break; \
+ case 8: \
+ virtio_cwrite64(vdev, \
+ offsetof(structname, member), \
+ *(ptr)); \
+ break; \
+ default: \
+ BUG(); \
+ } \
+ } while(0)
+
+static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
+{
+ u8 ret;
+ vdev->config->get(vdev, offset, &ret, sizeof(ret));
+ return ret;
+}
+
+static inline void virtio_cread_bytes(struct virtio_device *vdev,
+ unsigned int offset,
+ void *buf, size_t len)
+{
+ vdev->config->get(vdev, offset, buf, len);
+}
+
+static inline void virtio_cwrite8(struct virtio_device *vdev,
+ unsigned int offset, u8 val)
+{
+ vdev->config->set(vdev, offset, &val, sizeof(val));
+}
+
+static inline u16 virtio_cread16(struct virtio_device *vdev,
+ unsigned int offset)
+{
+ u16 ret;
+ vdev->config->get(vdev, offset, &ret, sizeof(ret));
+ return ret;
+}
+
+static inline void virtio_cwrite16(struct virtio_device *vdev,
+ unsigned int offset, u16 val)
+{
+ vdev->config->set(vdev, offset, &val, sizeof(val));
+}
+
+static inline u32 virtio_cread32(struct virtio_device *vdev,
+ unsigned int offset)
+{
+ u32 ret;
+ vdev->config->get(vdev, offset, &ret, sizeof(ret));
+ return ret;
+}
+
+static inline void virtio_cwrite32(struct virtio_device *vdev,
+ unsigned int offset, u32 val)
+{
+ vdev->config->set(vdev, offset, &val, sizeof(val));
+}
+
+static inline u64 virtio_cread64(struct virtio_device *vdev,
+ unsigned int offset)
+{
+ u64 ret;
+ vdev->config->get(vdev, offset, &ret, sizeof(ret));
+ return ret;
+}
+
+static inline void virtio_cwrite64(struct virtio_device *vdev,
+ unsigned int offset, u64 val)
+{
+ vdev->config->set(vdev, offset, &val, sizeof(val));
+}
+
+/* Conditional config space accessors. */
+#define virtio_cread_feature(vdev, fbit, structname, member, ptr) \
+ ({ \
+ int _r = 0; \
+ if (!virtio_has_feature(vdev, fbit)) \
+ _r = -ENOENT; \
+ else \
+ virtio_cread((vdev), structname, member, ptr); \
+ _r; \
+ })
#endif /* _LINUX_VIRTIO_CONFIG_H */
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index b300787af8e0..67e06fe18c03 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -71,7 +71,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
struct virtio_device *vdev,
bool weak_barriers,
void *pages,
- void (*notify)(struct virtqueue *vq),
+ bool (*notify)(struct virtqueue *vq),
void (*callback)(struct virtqueue *vq),
const char *name);
void vring_del_virtqueue(struct virtqueue *vq);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index a67fc1635592..d3d033ec5313 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -1,7 +1,8 @@
#ifndef _LINUX_WAIT_H
#define _LINUX_WAIT_H
-
-
+/*
+ * Linux wait queue related types and methods
+ */
#include <linux/list.h>
#include <linux/stddef.h>
#include <linux/spinlock.h>
@@ -13,27 +14,27 @@ typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, v
int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
struct __wait_queue {
- unsigned int flags;
+ unsigned int flags;
#define WQ_FLAG_EXCLUSIVE 0x01
- void *private;
- wait_queue_func_t func;
- struct list_head task_list;
+ void *private;
+ wait_queue_func_t func;
+ struct list_head task_list;
};
struct wait_bit_key {
- void *flags;
- int bit_nr;
-#define WAIT_ATOMIC_T_BIT_NR -1
+ void *flags;
+ int bit_nr;
+#define WAIT_ATOMIC_T_BIT_NR -1
};
struct wait_bit_queue {
- struct wait_bit_key key;
- wait_queue_t wait;
+ struct wait_bit_key key;
+ wait_queue_t wait;
};
struct __wait_queue_head {
- spinlock_t lock;
- struct list_head task_list;
+ spinlock_t lock;
+ struct list_head task_list;
};
typedef struct __wait_queue_head wait_queue_head_t;
@@ -84,17 +85,17 @@ extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct
static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
{
- q->flags = 0;
- q->private = p;
- q->func = default_wake_function;
+ q->flags = 0;
+ q->private = p;
+ q->func = default_wake_function;
}
-static inline void init_waitqueue_func_entry(wait_queue_t *q,
- wait_queue_func_t func)
+static inline void
+init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
{
- q->flags = 0;
- q->private = NULL;
- q->func = func;
+ q->flags = 0;
+ q->private = NULL;
+ q->func = func;
}
static inline int waitqueue_active(wait_queue_head_t *q)
@@ -114,8 +115,8 @@ static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
/*
* Used for wake-one threads:
*/
-static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
- wait_queue_t *wait)
+static inline void
+__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
{
wait->flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue(q, wait);
@@ -127,23 +128,22 @@ static inline void __add_wait_queue_tail(wait_queue_head_t *head,
list_add_tail(&new->task_list, &head->task_list);
}
-static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
- wait_queue_t *wait)
+static inline void
+__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
{
wait->flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue_tail(q, wait);
}
-static inline void __remove_wait_queue(wait_queue_head_t *head,
- wait_queue_t *old)
+static inline void
+__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
{
list_del(&old->task_list);
}
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
-void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
- void *key);
+void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
void __wake_up_bit(wait_queue_head_t *, void *, int);
@@ -170,27 +170,64 @@ wait_queue_head_t *bit_waitqueue(void *, int);
/*
* Wakeup macros to be used to report events to the targets.
*/
-#define wake_up_poll(x, m) \
+#define wake_up_poll(x, m) \
__wake_up(x, TASK_NORMAL, 1, (void *) (m))
-#define wake_up_locked_poll(x, m) \
+#define wake_up_locked_poll(x, m) \
__wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
-#define wake_up_interruptible_poll(x, m) \
+#define wake_up_interruptible_poll(x, m) \
__wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
#define wake_up_interruptible_sync_poll(x, m) \
__wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
-#define __wait_event(wq, condition) \
-do { \
- DEFINE_WAIT(__wait); \
+#define ___wait_cond_timeout(condition) \
+({ \
+ bool __cond = (condition); \
+ if (__cond && !__ret) \
+ __ret = 1; \
+ __cond || !__ret; \
+})
+
+#define ___wait_is_interruptible(state) \
+ (!__builtin_constant_p(state) || \
+ state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
+
+#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
+({ \
+ __label__ __out; \
+ wait_queue_t __wait; \
+ long __ret = ret; \
+ \
+ INIT_LIST_HEAD(&__wait.task_list); \
+ if (exclusive) \
+ __wait.flags = WQ_FLAG_EXCLUSIVE; \
+ else \
+ __wait.flags = 0; \
\
for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
+ long __int = prepare_to_wait_event(&wq, &__wait, state);\
+ \
if (condition) \
break; \
- schedule(); \
+ \
+ if (___wait_is_interruptible(state) && __int) { \
+ __ret = __int; \
+ if (exclusive) { \
+ abort_exclusive_wait(&wq, &__wait, \
+ state, NULL); \
+ goto __out; \
+ } \
+ break; \
+ } \
+ \
+ cmd; \
} \
finish_wait(&wq, &__wait); \
-} while (0)
+__out: __ret; \
+})
+
+#define __wait_event(wq, condition) \
+ (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
+ schedule())
/**
* wait_event - sleep until a condition gets true
@@ -204,29 +241,17 @@ do { \
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*/
-#define wait_event(wq, condition) \
+#define wait_event(wq, condition) \
do { \
- if (condition) \
+ if (condition) \
break; \
__wait_event(wq, condition); \
} while (0)
-#define __wait_event_timeout(wq, condition, ret) \
-do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- ret = schedule_timeout(ret); \
- if (!ret) \
- break; \
- } \
- if (!ret && (condition)) \
- ret = 1; \
- finish_wait(&wq, &__wait); \
-} while (0)
+#define __wait_event_timeout(wq, condition, timeout) \
+ ___wait_event(wq, ___wait_cond_timeout(condition), \
+ TASK_UNINTERRUPTIBLE, 0, timeout, \
+ __ret = schedule_timeout(__ret))
/**
* wait_event_timeout - sleep until a condition gets true or a timeout elapses
@@ -248,30 +273,52 @@ do { \
#define wait_event_timeout(wq, condition, timeout) \
({ \
long __ret = timeout; \
- if (!(condition)) \
- __wait_event_timeout(wq, condition, __ret); \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_timeout(wq, condition, timeout); \
__ret; \
})
-#define __wait_event_interruptible(wq, condition, ret) \
+#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
do { \
DEFINE_WAIT(__wait); \
\
for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
+ prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
if (condition) \
break; \
- if (!signal_pending(current)) { \
- schedule(); \
- continue; \
- } \
- ret = -ERESTARTSYS; \
- break; \
+ cmd1; \
+ schedule(); \
+ cmd2; \
} \
finish_wait(&wq, &__wait); \
} while (0)
/**
+ * wait_event_cmd - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * cmd1: the command will be executed before sleep
+ * cmd2: the command will be executed after sleep
+ *
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
+ * @condition evaluates to true. The @condition is checked each time
+ * the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ */
+#define wait_event_cmd(wq, condition, cmd1, cmd2) \
+do { \
+ if (condition) \
+ break; \
+ __wait_event_cmd(wq, condition, cmd1, cmd2); \
+} while (0)
+
+#define __wait_event_interruptible(wq, condition) \
+ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
+ schedule())
+
+/**
* wait_event_interruptible - sleep until a condition gets true
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
@@ -290,31 +337,14 @@ do { \
({ \
int __ret = 0; \
if (!(condition)) \
- __wait_event_interruptible(wq, condition, __ret); \
+ __ret = __wait_event_interruptible(wq, condition); \
__ret; \
})
-#define __wait_event_interruptible_timeout(wq, condition, ret) \
-do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (!signal_pending(current)) { \
- ret = schedule_timeout(ret); \
- if (!ret) \
- break; \
- continue; \
- } \
- ret = -ERESTARTSYS; \
- break; \
- } \
- if (!ret && (condition)) \
- ret = 1; \
- finish_wait(&wq, &__wait); \
-} while (0)
+#define __wait_event_interruptible_timeout(wq, condition, timeout) \
+ ___wait_event(wq, ___wait_cond_timeout(condition), \
+ TASK_INTERRUPTIBLE, 0, timeout, \
+ __ret = schedule_timeout(__ret))
/**
* wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
@@ -337,15 +367,15 @@ do { \
#define wait_event_interruptible_timeout(wq, condition, timeout) \
({ \
long __ret = timeout; \
- if (!(condition)) \
- __wait_event_interruptible_timeout(wq, condition, __ret); \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_interruptible_timeout(wq, \
+ condition, timeout); \
__ret; \
})
#define __wait_event_hrtimeout(wq, condition, timeout, state) \
({ \
int __ret = 0; \
- DEFINE_WAIT(__wait); \
struct hrtimer_sleeper __t; \
\
hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
@@ -356,25 +386,15 @@ do { \
current->timer_slack_ns, \
HRTIMER_MODE_REL); \
\
- for (;;) { \
- prepare_to_wait(&wq, &__wait, state); \
- if (condition) \
- break; \
- if (state == TASK_INTERRUPTIBLE && \
- signal_pending(current)) { \
- __ret = -ERESTARTSYS; \
- break; \
- } \
+ __ret = ___wait_event(wq, condition, state, 0, 0, \
if (!__t.task) { \
__ret = -ETIME; \
break; \
} \
- schedule(); \
- } \
+ schedule()); \
\
hrtimer_cancel(&__t.timer); \
destroy_hrtimer_on_stack(&__t.timer); \
- finish_wait(&wq, &__wait); \
__ret; \
})
@@ -428,33 +448,15 @@ do { \
__ret; \
})
-#define __wait_event_interruptible_exclusive(wq, condition, ret) \
-do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait_exclusive(&wq, &__wait, \
- TASK_INTERRUPTIBLE); \
- if (condition) { \
- finish_wait(&wq, &__wait); \
- break; \
- } \
- if (!signal_pending(current)) { \
- schedule(); \
- continue; \
- } \
- ret = -ERESTARTSYS; \
- abort_exclusive_wait(&wq, &__wait, \
- TASK_INTERRUPTIBLE, NULL); \
- break; \
- } \
-} while (0)
+#define __wait_event_interruptible_exclusive(wq, condition) \
+ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
+ schedule())
#define wait_event_interruptible_exclusive(wq, condition) \
({ \
int __ret = 0; \
if (!(condition)) \
- __wait_event_interruptible_exclusive(wq, condition, __ret);\
+ __ret = __wait_event_interruptible_exclusive(wq, condition);\
__ret; \
})
@@ -606,24 +608,8 @@ do { \
? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
-
-#define __wait_event_killable(wq, condition, ret) \
-do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
- if (condition) \
- break; \
- if (!fatal_signal_pending(current)) { \
- schedule(); \
- continue; \
- } \
- ret = -ERESTARTSYS; \
- break; \
- } \
- finish_wait(&wq, &__wait); \
-} while (0)
+#define __wait_event_killable(wq, condition) \
+ ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
/**
* wait_event_killable - sleep until a condition gets true
@@ -644,26 +630,17 @@ do { \
({ \
int __ret = 0; \
if (!(condition)) \
- __wait_event_killable(wq, condition, __ret); \
+ __ret = __wait_event_killable(wq, condition); \
__ret; \
})
#define __wait_event_lock_irq(wq, condition, lock, cmd) \
-do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- spin_unlock_irq(&lock); \
- cmd; \
- schedule(); \
- spin_lock_irq(&lock); \
- } \
- finish_wait(&wq, &__wait); \
-} while (0)
+ (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
+ spin_unlock_irq(&lock); \
+ cmd; \
+ schedule(); \
+ spin_lock_irq(&lock))
/**
* wait_event_lock_irq_cmd - sleep until a condition gets true. The
@@ -723,26 +700,12 @@ do { \
} while (0)
-#define __wait_event_interruptible_lock_irq(wq, condition, \
- lock, ret, cmd) \
-do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (signal_pending(current)) { \
- ret = -ERESTARTSYS; \
- break; \
- } \
- spin_unlock_irq(&lock); \
- cmd; \
- schedule(); \
- spin_lock_irq(&lock); \
- } \
- finish_wait(&wq, &__wait); \
-} while (0)
+#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
+ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
+ spin_unlock_irq(&lock); \
+ cmd; \
+ schedule(); \
+ spin_lock_irq(&lock))
/**
* wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
@@ -772,10 +735,9 @@ do { \
#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
({ \
int __ret = 0; \
- \
if (!(condition)) \
- __wait_event_interruptible_lock_irq(wq, condition, \
- lock, __ret, cmd); \
+ __ret = __wait_event_interruptible_lock_irq(wq, \
+ condition, lock, cmd); \
__ret; \
})
@@ -804,39 +766,24 @@ do { \
#define wait_event_interruptible_lock_irq(wq, condition, lock) \
({ \
int __ret = 0; \
- \
if (!(condition)) \
- __wait_event_interruptible_lock_irq(wq, condition, \
- lock, __ret, ); \
+ __ret = __wait_event_interruptible_lock_irq(wq, \
+ condition, lock,); \
__ret; \
})
#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
- lock, ret) \
-do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (signal_pending(current)) { \
- ret = -ERESTARTSYS; \
- break; \
- } \
- spin_unlock_irq(&lock); \
- ret = schedule_timeout(ret); \
- spin_lock_irq(&lock); \
- if (!ret) \
- break; \
- } \
- finish_wait(&wq, &__wait); \
-} while (0)
+ lock, timeout) \
+ ___wait_event(wq, ___wait_cond_timeout(condition), \
+ TASK_INTERRUPTIBLE, 0, timeout, \
+ spin_unlock_irq(&lock); \
+ __ret = schedule_timeout(__ret); \
+ spin_lock_irq(&lock));
/**
- * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
- * The condition is checked under the lock. This is expected
- * to be called with the lock taken.
+ * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
+ * true or a timeout elapses. The condition is checked under
+ * the lock. This is expected to be called with the lock taken.
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @lock: a locked spinlock_t, which will be released before schedule()
@@ -860,11 +807,10 @@ do { \
#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
timeout) \
({ \
- int __ret = timeout; \
- \
- if (!(condition)) \
- __wait_event_interruptible_lock_irq_timeout( \
- wq, condition, lock, __ret); \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_interruptible_lock_irq_timeout( \
+ wq, condition, lock, timeout); \
__ret; \
})
@@ -875,20 +821,18 @@ do { \
* We plan to remove these interfaces.
*/
extern void sleep_on(wait_queue_head_t *q);
-extern long sleep_on_timeout(wait_queue_head_t *q,
- signed long timeout);
+extern long sleep_on_timeout(wait_queue_head_t *q, signed long timeout);
extern void interruptible_sleep_on(wait_queue_head_t *q);
-extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
- signed long timeout);
+extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, signed long timeout);
/*
* Waitqueues which are removed from the waitqueue_head at wakeup time
*/
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
+long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
-void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
- unsigned int mode, void *key);
+void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
@@ -934,8 +878,8 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
* One uses wait_on_bit() where one is waiting for the bit to clear,
* but has no intention of setting it.
*/
-static inline int wait_on_bit(void *word, int bit,
- int (*action)(void *), unsigned mode)
+static inline int
+wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode)
{
if (!test_bit(bit, word))
return 0;
@@ -958,8 +902,8 @@ static inline int wait_on_bit(void *word, int bit,
* One uses wait_on_bit_lock() where one is waiting for the bit to
* clear with the intention of setting it, and when done, clearing it.
*/
-static inline int wait_on_bit_lock(void *word, int bit,
- int (*action)(void *), unsigned mode)
+static inline int
+wait_on_bit_lock(void *word, int bit, int (*action)(void *), unsigned mode)
{
if (!test_and_set_bit(bit, word))
return 0;
@@ -983,5 +927,5 @@ int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
return 0;
return out_of_line_wait_on_atomic_t(val, action, mode);
}
-
-#endif
+
+#endif /* _LINUX_WAIT_H */
diff --git a/include/linux/yam.h b/include/linux/yam.h
index 7fe28228b274..512cdc2fb80f 100644
--- a/include/linux/yam.h
+++ b/include/linux/yam.h
@@ -77,6 +77,6 @@ struct yamdrv_ioctl_cfg {
struct yamdrv_ioctl_mcs {
int cmd;
- int bitrate;
+ unsigned int bitrate;
unsigned char bits[YAM_FPGA_SIZE];
};
diff --git a/include/media/lm3560.h b/include/media/lm3560.h
new file mode 100644
index 000000000000..46670706d6f8
--- /dev/null
+++ b/include/media/lm3560.h
@@ -0,0 +1,97 @@
+/*
+ * include/media/lm3560.h
+ *
+ * Copyright (C) 2013 Texas Instruments
+ *
+ * Contact: Daniel Jeong <gshark.jeong@gmail.com>
+ * Ldd-Mlp <ldd-mlp@list.ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __LM3560_H__
+#define __LM3560_H__
+
+#include <media/v4l2-subdev.h>
+
+#define LM3560_NAME "lm3560"
+#define LM3560_I2C_ADDR (0x53)
+
+/* FLASH Brightness
+ * min 62500uA, step 62500uA, max 1000000uA
+ */
+#define LM3560_FLASH_BRT_MIN 62500
+#define LM3560_FLASH_BRT_STEP 62500
+#define LM3560_FLASH_BRT_MAX 1000000
+#define LM3560_FLASH_BRT_uA_TO_REG(a) \
+ ((a) < LM3560_FLASH_BRT_MIN ? 0 : \
+ (((a) - LM3560_FLASH_BRT_MIN) / LM3560_FLASH_BRT_STEP))
+#define LM3560_FLASH_BRT_REG_TO_uA(a) \
+ ((a) * LM3560_FLASH_BRT_STEP + LM3560_FLASH_BRT_MIN)
+
+/* FLASH TIMEOUT DURATION
+ * min 32ms, step 32ms, max 1024ms
+ */
+#define LM3560_FLASH_TOUT_MIN 32
+#define LM3560_FLASH_TOUT_STEP 32
+#define LM3560_FLASH_TOUT_MAX 1024
+#define LM3560_FLASH_TOUT_ms_TO_REG(a) \
+ ((a) < LM3560_FLASH_TOUT_MIN ? 0 : \
+ (((a) - LM3560_FLASH_TOUT_MIN) / LM3560_FLASH_TOUT_STEP))
+#define LM3560_FLASH_TOUT_REG_TO_ms(a) \
+ ((a) * LM3560_FLASH_TOUT_STEP + LM3560_FLASH_TOUT_MIN)
+
+/* TORCH BRT
+ * min 31250uA, step 31250uA, max 250000uA
+ */
+#define LM3560_TORCH_BRT_MIN 31250
+#define LM3560_TORCH_BRT_STEP 31250
+#define LM3560_TORCH_BRT_MAX 250000
+#define LM3560_TORCH_BRT_uA_TO_REG(a) \
+ ((a) < LM3560_TORCH_BRT_MIN ? 0 : \
+ (((a) - LM3560_TORCH_BRT_MIN) / LM3560_TORCH_BRT_STEP))
+#define LM3560_TORCH_BRT_REG_TO_uA(a) \
+ ((a) * LM3560_TORCH_BRT_STEP + LM3560_TORCH_BRT_MIN)
+
+enum lm3560_led_id {
+ LM3560_LED0 = 0,
+ LM3560_LED1,
+ LM3560_LED_MAX
+};
+
+enum lm3560_peak_current {
+ LM3560_PEAK_1600mA = 0x00,
+ LM3560_PEAK_2300mA = 0x20,
+ LM3560_PEAK_3000mA = 0x40,
+ LM3560_PEAK_3600mA = 0x60
+};
+
+/* struct lm3560_platform_data
+ *
+ * @peak : peak current
+ * @max_flash_timeout: flash timeout
+ * @max_flash_brt: flash mode led brightness
+ * @max_torch_brt: torch mode led brightness
+ */
+struct lm3560_platform_data {
+ enum lm3560_peak_current peak;
+
+ u32 max_flash_timeout;
+ u32 max_flash_brt[LM3560_LED_MAX];
+ u32 max_torch_brt[LM3560_LED_MAX];
+};
+
+#endif /* __LM3560_H__ */
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index 34d2414f2b8c..865246b00127 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -146,9 +146,14 @@ struct soc_camera_subdev_desc {
/* sensor driver private platform data */
void *drv_priv;
- /* Optional regulators that have to be managed on power on/off events */
- struct regulator_bulk_data *regulators;
- int num_regulators;
+ /*
+ * Set unbalanced_power to true to deal with legacy drivers, failing to
+ * balance their calls to subdevice's .s_power() method. clock_state is
+ * then used internally by helper functions, it shouldn't be touched by
+ * drivers or the platform code.
+ */
+ bool unbalanced_power;
+ unsigned long clock_state;
/* Optional callbacks to power on or off and reset the sensor */
int (*power)(struct device *, int);
@@ -162,6 +167,9 @@ struct soc_camera_subdev_desc {
int (*set_bus_param)(struct soc_camera_subdev_desc *, unsigned long flags);
unsigned long (*query_bus_param)(struct soc_camera_subdev_desc *);
void (*free_bus)(struct soc_camera_subdev_desc *);
+
+ /* Optional regulators that have to be managed on power on/off events */
+ struct v4l2_subdev_platform_data sd_pdata;
};
struct soc_camera_host_desc {
@@ -202,9 +210,10 @@ struct soc_camera_link {
void *priv;
- /* Optional regulators that have to be managed on power on/off events */
- struct regulator_bulk_data *regulators;
- int num_regulators;
+ /* Set by platforms to handle misbehaving drivers */
+ bool unbalanced_power;
+ /* Used by soc-camera helper functions */
+ unsigned long clock_state;
/* Optional callbacks to power on or off and reset the sensor */
int (*power)(struct device *, int);
@@ -218,6 +227,12 @@ struct soc_camera_link {
unsigned long (*query_bus_param)(struct soc_camera_link *);
void (*free_bus)(struct soc_camera_link *);
+ /* Optional regulators that have to be managed on power on/off events */
+ struct regulator_bulk_data *regulators;
+ int num_regulators;
+
+ void *host_priv;
+
/*
* Host part - keep at bottom and compatible to
* struct soc_camera_host_desc
diff --git a/include/media/v4l2-clk.h b/include/media/v4l2-clk.h
index 0503a90b48bb..0b36cc138304 100644
--- a/include/media/v4l2-clk.h
+++ b/include/media/v4l2-clk.h
@@ -15,6 +15,7 @@
#define MEDIA_V4L2_CLK_H
#include <linux/atomic.h>
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -51,4 +52,20 @@ void v4l2_clk_disable(struct v4l2_clk *clk);
unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk);
int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate);
+struct module;
+
+struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
+ const char *id, unsigned long rate, struct module *owner);
+void v4l2_clk_unregister_fixed(struct v4l2_clk *clk);
+
+static inline struct v4l2_clk *v4l2_clk_register_fixed(const char *dev_id,
+ const char *id,
+ unsigned long rate)
+{
+ return __v4l2_clk_register_fixed(dev_id, id, rate, THIS_MODULE);
+}
+
+#define v4l2_clk_name_i2c(name, size, adap, client) snprintf(name, size, \
+ "%d-%04x", adap, client)
+
#endif
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 16550c439008..48f974866f13 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -35,7 +35,7 @@
printk(level "%s %d-%04x: " fmt, name, i2c_adapter_id(adapter), addr , ## arg)
#define v4l_client_printk(level, client, fmt, arg...) \
- v4l_printk(level, (client)->driver->driver.name, (client)->adapter, \
+ v4l_printk(level, (client)->dev.driver->name, (client)->adapter, \
(client)->addr, fmt , ## arg)
#define v4l_err(client, fmt, arg...) \
@@ -86,7 +86,7 @@ int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl,
const char * const *menu_items);
const char *v4l2_ctrl_get_name(u32 id);
const char * const *v4l2_ctrl_get_menu(u32 id);
-const s64 const *v4l2_ctrl_get_int_menu(u32 id, u32 *len);
+const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len);
int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 step, s32 def);
int v4l2_ctrl_query_menu(struct v4l2_querymenu *qmenu,
struct v4l2_queryctrl *qctrl, const char * const *menu_items);
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 47ada23345a1..16f7f2606516 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -571,7 +571,7 @@ static inline void v4l2_ctrl_lock(struct v4l2_ctrl *ctrl)
mutex_lock(ctrl->handler->lock);
}
-/** v4l2_ctrl_lock() - Helper function to unlock the handler
+/** v4l2_ctrl_unlock() - Helper function to unlock the handler
* associated with the control.
* @ctrl: The control to unlock.
*/
diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h
index a62ee18cb7b7..528cdaf622e1 100644
--- a/include/media/v4l2-fh.h
+++ b/include/media/v4l2-fh.h
@@ -26,7 +26,9 @@
#ifndef V4L2_FH_H
#define V4L2_FH_H
+#include <linux/fs.h>
#include <linux/list.h>
+#include <linux/videodev2.h>
struct video_device;
struct v4l2_ctrl_handler;
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index bfda0fe9aeb0..d67210a37ef3 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -559,6 +559,17 @@ struct v4l2_subdev_internal_ops {
/* Set this flag if this subdev generates events. */
#define V4L2_SUBDEV_FL_HAS_EVENTS (1U << 3)
+struct regulator_bulk_data;
+
+struct v4l2_subdev_platform_data {
+ /* Optional regulators uset to power on/off the subdevice */
+ struct regulator_bulk_data *regulators;
+ int num_regulators;
+
+ /* Per-subdevice data, specific for a certain video host device */
+ void *host_priv;
+};
+
/* Each instance of a subdev driver should create this struct, either
stand-alone or embedded in a larger struct.
*/
@@ -592,6 +603,8 @@ struct v4l2_subdev {
struct v4l2_async_subdev *asd;
/* Pointer to the managing notifier. */
struct v4l2_async_notifier *notifier;
+ /* common part of subdevice platform data */
+ struct v4l2_subdev_platform_data *pdata;
};
#define media_entity_to_v4l2_subdev(ent) \
@@ -622,13 +635,13 @@ struct v4l2_subdev_fh {
v4l2_subdev_get_try_##fun_name(struct v4l2_subdev_fh *fh, \
unsigned int pad) \
{ \
- BUG_ON(unlikely(pad >= vdev_to_v4l2_subdev( \
- fh->vfh.vdev)->entity.num_pads)); \
+ BUG_ON(pad >= vdev_to_v4l2_subdev( \
+ fh->vfh.vdev)->entity.num_pads); \
return &fh->pad[pad].field_name; \
}
__V4L2_SUBDEV_MK_GET_TRY(v4l2_mbus_framefmt, format, try_fmt)
-__V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, crop, try_compose)
+__V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, crop, try_crop)
__V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, compose, try_compose)
#endif
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 6781258d0b67..bd8218b15009 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -391,7 +391,7 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait);
size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
loff_t *ppos, int nonblock);
-size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count,
+size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
loff_t *ppos, int nonblock);
/**
@@ -491,7 +491,7 @@ int vb2_ioctl_expbuf(struct file *file, void *priv,
int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma);
int vb2_fop_release(struct file *file);
-ssize_t vb2_fop_write(struct file *file, char __user *buf,
+ssize_t vb2_fop_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos);
ssize_t vb2_fop_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos);
diff --git a/include/media/videobuf2-dma-sg.h b/include/media/videobuf2-dma-sg.h
index 0038526b8ef7..7b89852779af 100644
--- a/include/media/videobuf2-dma-sg.h
+++ b/include/media/videobuf2-dma-sg.h
@@ -15,16 +15,10 @@
#include <media/videobuf2-core.h>
-struct vb2_dma_sg_desc {
- unsigned long size;
- unsigned int num_pages;
- struct scatterlist *sglist;
-};
-
-static inline struct vb2_dma_sg_desc *vb2_dma_sg_plane_desc(
+static inline struct sg_table *vb2_dma_sg_plane_desc(
struct vb2_buffer *vb, unsigned int plane_no)
{
- return (struct vb2_dma_sg_desc *)vb2_plane_cookie(vb, plane_no);
+ return (struct sg_table *)vb2_plane_cookie(vb, plane_no);
}
extern const struct vb2_mem_ops vb2_dma_sg_memops;
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 10d43d8c7037..2a628b28249f 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -197,8 +197,8 @@ static inline bool bdaddr_type_is_le(__u8 type)
return false;
}
-#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0} })
-#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff} })
+#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}})
+#define BDADDR_NONE (&(bdaddr_t) {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}})
/* Copy, swap, convert BD Address */
static inline int bacmp(const bdaddr_t *ba1, const bdaddr_t *ba2)
@@ -218,11 +218,10 @@ void baswap(bdaddr_t *dst, bdaddr_t *src);
struct bt_sock {
struct sock sk;
- bdaddr_t src;
- bdaddr_t dst;
struct list_head accept_q;
struct sock *parent;
unsigned long flags;
+ void (*skb_msg_name)(struct sk_buff *, void *, int *);
};
enum {
@@ -249,6 +248,7 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
+int bt_sock_wait_ready(struct sock *sk, unsigned long flags);
void bt_accept_enqueue(struct sock *parent, struct sock *sk);
void bt_accept_unlink(struct sock *sk);
@@ -282,8 +282,11 @@ struct bt_skb_cb {
__u8 incoming;
__u16 expect;
__u8 force_active;
+ struct l2cap_chan *chan;
struct l2cap_ctrl control;
struct hci_req_ctrl req;
+ bdaddr_t bdaddr;
+ __le16 psm;
};
#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
@@ -331,16 +334,16 @@ out:
int bt_to_errno(__u16 code);
-extern int hci_sock_init(void);
-extern void hci_sock_cleanup(void);
+int hci_sock_init(void);
+void hci_sock_cleanup(void);
-extern int bt_sysfs_init(void);
-extern void bt_sysfs_cleanup(void);
+int bt_sysfs_init(void);
+void bt_sysfs_cleanup(void);
-extern int bt_procfs_init(struct net *net, const char *name,
- struct bt_sock_list* sk_list,
- int (* seq_show)(struct seq_file *, void *));
-extern void bt_procfs_cleanup(struct net *net, const char *name);
+int bt_procfs_init(struct net *net, const char *name,
+ struct bt_sock_list *sk_list,
+ int (*seq_show)(struct seq_file *, void *));
+void bt_procfs_cleanup(struct net *net, const char *name);
extern struct dentry *bt_debugfs;
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 15f10841e2b5..1784c48699f0 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -35,6 +35,8 @@
#define HCI_MAX_AMP_ASSOC_SIZE 672
+#define HCI_MAX_CSB_DATA_SIZE 252
+
/* HCI dev events */
#define HCI_DEV_REG 1
#define HCI_DEV_UNREG 2
@@ -62,16 +64,20 @@
#define HCI_AMP 0x01
/* First BR/EDR Controller shall have ID = 0 */
-#define HCI_BREDR_ID 0
+#define AMP_ID_BREDR 0x00
+
+/* AMP controller types */
+#define AMP_TYPE_BREDR 0x00
+#define AMP_TYPE_80211 0x01
/* AMP controller status */
-#define AMP_CTRL_POWERED_DOWN 0x00
-#define AMP_CTRL_BLUETOOTH_ONLY 0x01
-#define AMP_CTRL_NO_CAPACITY 0x02
-#define AMP_CTRL_LOW_CAPACITY 0x03
-#define AMP_CTRL_MEDIUM_CAPACITY 0x04
-#define AMP_CTRL_HIGH_CAPACITY 0x05
-#define AMP_CTRL_FULL_CAPACITY 0x06
+#define AMP_STATUS_POWERED_DOWN 0x00
+#define AMP_STATUS_BLUETOOTH_ONLY 0x01
+#define AMP_STATUS_NO_CAPACITY 0x02
+#define AMP_STATUS_LOW_CAPACITY 0x03
+#define AMP_STATUS_MEDIUM_CAPACITY 0x04
+#define AMP_STATUS_HIGH_CAPACITY 0x05
+#define AMP_STATUS_FULL_CAPACITY 0x06
/* HCI device quirks */
enum {
@@ -109,18 +115,22 @@ enum {
HCI_PAIRABLE,
HCI_SERVICE_CACHE,
HCI_DEBUG_KEYS,
+ HCI_DUT_MODE,
HCI_UNREGISTER,
+ HCI_USER_CHANNEL,
HCI_LE_SCAN,
HCI_SSP_ENABLED,
HCI_HS_ENABLED,
HCI_LE_ENABLED,
- HCI_LE_PERIPHERAL,
+ HCI_ADVERTISING,
HCI_CONNECTABLE,
HCI_DISCOVERABLE,
+ HCI_LIMITED_DISCOVERABLE,
HCI_LINK_SECURITY,
HCI_PERIODIC_INQ,
HCI_FAST_CONNECTABLE,
+ HCI_BREDR_ENABLED,
};
/* A mask for the flags that are supposed to remain when a reset happens
@@ -624,6 +634,24 @@ struct hci_rp_logical_link_cancel {
__u8 flow_spec_id;
} __packed;
+#define HCI_OP_SET_CSB 0x0441
+struct hci_cp_set_csb {
+ __u8 enable;
+ __u8 lt_addr;
+ __u8 lpo_allowed;
+ __le16 packet_type;
+ __le16 interval_min;
+ __le16 interval_max;
+ __le16 csb_sv_tout;
+} __packed;
+struct hci_rp_set_csb {
+ __u8 status;
+ __u8 lt_addr;
+ __le16 interval;
+} __packed;
+
+#define HCI_OP_START_SYNC_TRAIN 0x0443
+
#define HCI_OP_SNIFF_MODE 0x0803
struct hci_cp_sniff_mode {
__le16 handle;
@@ -694,9 +722,6 @@ struct hci_cp_sniff_subrate {
} __packed;
#define HCI_OP_SET_EVENT_MASK 0x0c01
-struct hci_cp_set_event_mask {
- __u8 mask[8];
-} __packed;
#define HCI_OP_RESET 0x0c03
@@ -792,6 +817,20 @@ struct hci_cp_host_buffer_size {
__le16 sco_max_pkt;
} __packed;
+#define HCI_OP_READ_NUM_SUPPORTED_IAC 0x0c38
+struct hci_rp_read_num_supported_iac {
+ __u8 status;
+ __u8 num_iac;
+} __packed;
+
+#define HCI_OP_READ_CURRENT_IAC_LAP 0x0c39
+
+#define HCI_OP_WRITE_CURRENT_IAC_LAP 0x0c3a
+struct hci_cp_write_current_iac_lap {
+ __u8 num_iac;
+ __u8 iac_lap[6];
+} __packed;
+
#define HCI_OP_WRITE_INQUIRY_MODE 0x0c45
#define HCI_MAX_EIR_LENGTH 240
@@ -826,6 +865,10 @@ struct hci_rp_read_inq_rsp_tx_power {
__s8 tx_power;
} __packed;
+#define HCI_OP_SET_EVENT_MASK_PAGE_2 0x0c63
+
+#define HCI_OP_READ_LOCATION_DATA 0x0c64
+
#define HCI_OP_READ_FLOW_CONTROL_MODE 0x0c66
struct hci_rp_read_flow_control_mode {
__u8 status;
@@ -838,6 +881,50 @@ struct hci_cp_write_le_host_supported {
__u8 simul;
} __packed;
+#define HCI_OP_SET_RESERVED_LT_ADDR 0x0c74
+struct hci_cp_set_reserved_lt_addr {
+ __u8 lt_addr;
+} __packed;
+struct hci_rp_set_reserved_lt_addr {
+ __u8 status;
+ __u8 lt_addr;
+} __packed;
+
+#define HCI_OP_DELETE_RESERVED_LT_ADDR 0x0c75
+struct hci_cp_delete_reserved_lt_addr {
+ __u8 lt_addr;
+} __packed;
+struct hci_rp_delete_reserved_lt_addr {
+ __u8 status;
+ __u8 lt_addr;
+} __packed;
+
+#define HCI_OP_SET_CSB_DATA 0x0c76
+struct hci_cp_set_csb_data {
+ __u8 lt_addr;
+ __u8 fragment;
+ __u8 data_length;
+ __u8 data[HCI_MAX_CSB_DATA_SIZE];
+} __packed;
+struct hci_rp_set_csb_data {
+ __u8 status;
+ __u8 lt_addr;
+} __packed;
+
+#define HCI_OP_READ_SYNC_TRAIN_PARAMS 0x0c77
+
+#define HCI_OP_WRITE_SYNC_TRAIN_PARAMS 0x0c78
+struct hci_cp_write_sync_train_params {
+ __le16 interval_min;
+ __le16 interval_max;
+ __le32 sync_train_tout;
+ __u8 service_data;
+} __packed;
+struct hci_rp_write_sync_train_params {
+ __u8 status;
+ __le16 sync_train_int;
+} __packed;
+
#define HCI_OP_READ_LOCAL_VERSION 0x1001
struct hci_rp_read_local_version {
__u8 status;
@@ -957,6 +1044,10 @@ struct hci_rp_write_remote_amp_assoc {
__u8 phy_handle;
} __packed;
+#define HCI_OP_ENABLE_DUT_MODE 0x1803
+
+#define HCI_OP_WRITE_SSP_DEBUG_MODE 0x1804
+
#define HCI_OP_LE_SET_EVENT_MASK 0x2001
struct hci_cp_le_set_event_mask {
__u8 mask[8];
@@ -975,6 +1066,20 @@ struct hci_rp_le_read_local_features {
__u8 features[8];
} __packed;
+#define HCI_OP_LE_SET_RANDOM_ADDR 0x2005
+
+#define HCI_OP_LE_SET_ADV_PARAM 0x2006
+struct hci_cp_le_set_adv_param {
+ __le16 min_interval;
+ __le16 max_interval;
+ __u8 type;
+ __u8 own_address_type;
+ __u8 direct_addr_type;
+ bdaddr_t direct_addr;
+ __u8 channel_map;
+ __u8 filter_policy;
+} __packed;
+
#define HCI_OP_LE_READ_ADV_TX_POWER 0x2007
struct hci_rp_le_read_adv_tx_power {
__u8 status;
@@ -989,6 +1094,12 @@ struct hci_cp_le_set_adv_data {
__u8 data[HCI_MAX_AD_LENGTH];
} __packed;
+#define HCI_OP_LE_SET_SCAN_RSP_DATA 0x2009
+struct hci_cp_le_set_scan_rsp_data {
+ __u8 length;
+ __u8 data[HCI_MAX_AD_LENGTH];
+} __packed;
+
#define HCI_OP_LE_SET_ADV_ENABLE 0x200a
#define LE_SCAN_PASSIVE 0x00
@@ -1438,6 +1549,13 @@ struct hci_ev_num_comp_blocks {
struct hci_comp_blocks_info handles[0];
} __packed;
+#define HCI_EV_SYNC_TRAIN_COMPLETE 0x4F
+struct hci_ev_sync_train_complete {
+ __u8 status;
+} __packed;
+
+#define HCI_EV_SLAVE_PAGE_RESP_TIMEOUT 0x54
+
/* Low energy meta events */
#define LE_CONN_ROLE_MASTER 0x00
@@ -1462,11 +1580,11 @@ struct hci_ev_le_ltk_req {
} __packed;
/* Advertising report event types */
-#define ADV_IND 0x00
-#define ADV_DIRECT_IND 0x01
-#define ADV_SCAN_IND 0x02
-#define ADV_NONCONN_IND 0x03
-#define ADV_SCAN_RSP 0x04
+#define LE_ADV_IND 0x00
+#define LE_ADV_DIRECT_IND 0x01
+#define LE_ADV_SCAN_IND 0x02
+#define LE_ADV_NONCONN_IND 0x03
+#define LE_ADV_SCAN_RSP 0x04
#define ADDR_LE_DEV_PUBLIC 0x00
#define ADDR_LE_DEV_RANDOM 0x01
@@ -1571,6 +1689,7 @@ struct sockaddr_hci {
#define HCI_DEV_NONE 0xffff
#define HCI_CHANNEL_RAW 0
+#define HCI_CHANNEL_USER 1
#define HCI_CHANNEL_MONITOR 2
#define HCI_CHANNEL_CONTROL 3
@@ -1673,6 +1792,4 @@ struct hci_inquiry_req {
};
#define IREQ_CACHE_FLUSH 0x0001
-extern bool enable_hs;
-
#endif /* __HCI_H */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 3ede820d328f..f8555ad7b104 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -81,6 +81,7 @@ struct hci_conn_hash {
struct bdaddr_list {
struct list_head list;
bdaddr_t bdaddr;
+ u8 bdaddr_type;
};
struct bt_uuid {
@@ -140,6 +141,8 @@ struct hci_dev {
__u8 bus;
__u8 dev_type;
bdaddr_t bdaddr;
+ bdaddr_t static_addr;
+ __u8 own_addr_type;
__u8 dev_name[HCI_MAX_NAME_LENGTH];
__u8 short_name[HCI_MAX_SHORT_NAME_LENGTH];
__u8 eir[HCI_MAX_EIR_LENGTH];
@@ -158,11 +161,17 @@ struct hci_dev {
__u16 manufacturer;
__u16 lmp_subver;
__u16 voice_setting;
+ __u8 num_iac;
__u8 io_capability;
__s8 inq_tx_power;
__u16 page_scan_interval;
__u16 page_scan_window;
__u8 page_scan_type;
+ __u16 le_scan_interval;
+ __u16 le_scan_window;
+ __u16 le_conn_min_interval;
+ __u16 le_conn_max_interval;
+ __u8 ssp_debug_mode;
__u16 devid_source;
__u16 devid_vendor;
@@ -279,14 +288,15 @@ struct hci_dev {
__s8 adv_tx_power;
__u8 adv_data[HCI_MAX_AD_LENGTH];
__u8 adv_data_len;
+ __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
+ __u8 scan_rsp_data_len;
int (*open)(struct hci_dev *hdev);
int (*close)(struct hci_dev *hdev);
int (*flush)(struct hci_dev *hdev);
int (*setup)(struct hci_dev *hdev);
- int (*send)(struct sk_buff *skb);
+ int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
void (*notify)(struct hci_dev *hdev, unsigned int evt);
- int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
};
#define HCI_PHY_HANDLE(handle) (handle & 0xff)
@@ -298,6 +308,8 @@ struct hci_conn {
bdaddr_t dst;
__u8 dst_type;
+ bdaddr_t src;
+ __u8 src_type;
__u16 handle;
__u16 state;
__u8 mode;
@@ -306,7 +318,6 @@ struct hci_conn {
__u8 attempt;
__u8 dev_class[3];
__u8 features[HCI_MAX_PAGES][8];
- __u16 interval;
__u16 pkt_type;
__u16 link_policy;
__u32 link_mode;
@@ -334,8 +345,8 @@ struct hci_conn {
struct list_head chan_list;
struct delayed_work disc_work;
- struct timer_list idle_timer;
- struct timer_list auto_accept_timer;
+ struct delayed_work auto_accept_work;
+ struct delayed_work idle_work;
struct device dev;
@@ -367,18 +378,17 @@ extern rwlock_t hci_dev_list_lock;
extern rwlock_t hci_cb_list_lock;
/* ----- HCI interface to upper protocols ----- */
-extern int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
-extern void l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
-extern int l2cap_disconn_ind(struct hci_conn *hcon);
-extern void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
-extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
-extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb,
- u16 flags);
-
-extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
-extern void sco_connect_cfm(struct hci_conn *hcon, __u8 status);
-extern void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
-extern int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
+int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
+void l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
+int l2cap_disconn_ind(struct hci_conn *hcon);
+void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
+int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
+int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
+
+int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
+void sco_connect_cfm(struct hci_conn *hcon, __u8 status);
+void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
+int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
/* ----- Inquiry cache ----- */
#define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */
@@ -644,7 +654,7 @@ static inline void hci_conn_drop(struct hci_conn *conn)
switch (conn->type) {
case ACL_LINK:
case LE_LINK:
- del_timer(&conn->idle_timer);
+ cancel_delayed_work(&conn->idle_work);
if (conn->state == BT_CONNECTED) {
timeo = conn->disc_timeout;
if (!conn->out)
@@ -703,19 +713,6 @@ static inline void hci_set_drvdata(struct hci_dev *hdev, void *data)
dev_set_drvdata(&hdev->dev, data);
}
-/* hci_dev_list shall be locked */
-static inline uint8_t __hci_num_ctrl(void)
-{
- uint8_t count = 0;
- struct list_head *p;
-
- list_for_each(p, &hci_dev_list) {
- count++;
- }
-
- return count;
-}
-
struct hci_dev *hci_dev_get(int index);
struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src);
@@ -738,7 +735,7 @@ int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
int hci_inquiry(void __user *arg);
struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
- bdaddr_t *bdaddr);
+ bdaddr_t *bdaddr, u8 type);
int hci_blacklist_clear(struct hci_dev *hdev);
int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
@@ -768,13 +765,11 @@ int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr);
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
-int hci_recv_frame(struct sk_buff *skb);
+int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
void hci_init_sysfs(struct hci_dev *hdev);
-int hci_add_sysfs(struct hci_dev *hdev);
-void hci_del_sysfs(struct hci_dev *hdev);
void hci_conn_init_sysfs(struct hci_conn *conn);
void hci_conn_add_sysfs(struct hci_conn *conn);
void hci_conn_del_sysfs(struct hci_conn *conn);
@@ -807,22 +802,6 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE))
#define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
-/* returns true if at least one AMP active */
-static inline bool hci_amp_capable(void)
-{
- struct hci_dev *hdev;
- bool ret = false;
-
- read_lock(&hci_dev_list_lock);
- list_for_each_entry(hdev, &hci_dev_list, list)
- if (hdev->amp_type == HCI_AMP &&
- test_bit(HCI_UP, &hdev->flags))
- ret = true;
- read_unlock(&hci_dev_list_lock);
-
- return ret;
-}
-
/* ----- HCI protocols ----- */
#define HCI_PROTO_DEFER 0x01
@@ -1033,34 +1012,6 @@ static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type)
return false;
}
-static inline size_t eir_get_length(u8 *eir, size_t eir_len)
-{
- size_t parsed = 0;
-
- while (parsed < eir_len) {
- u8 field_len = eir[0];
-
- if (field_len == 0)
- return parsed;
-
- parsed += field_len + 1;
- eir += field_len + 1;
- }
-
- return eir_len;
-}
-
-static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
- u8 data_len)
-{
- eir[eir_len++] = sizeof(type) + data_len;
- eir[eir_len++] = type;
- memcpy(&eir[eir_len], data, data_len);
- eir_len += data_len;
-
- return eir_len;
-}
-
int hci_register_cb(struct hci_cb *hcb);
int hci_unregister_cb(struct hci_cb *hcb);
@@ -1120,29 +1071,30 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
#define DISCOV_BREDR_INQUIRY_LEN 0x08
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
-int mgmt_index_added(struct hci_dev *hdev);
-int mgmt_index_removed(struct hci_dev *hdev);
-int mgmt_set_powered_failed(struct hci_dev *hdev, int err);
+void mgmt_index_added(struct hci_dev *hdev);
+void mgmt_index_removed(struct hci_dev *hdev);
+void mgmt_set_powered_failed(struct hci_dev *hdev, int err);
int mgmt_powered(struct hci_dev *hdev, u8 powered);
-int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
-int mgmt_connectable(struct hci_dev *hdev, u8 connectable);
-int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
-int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
- bool persistent);
-int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u32 flags, u8 *name, u8 name_len,
- u8 *dev_class);
-int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 link_type, u8 addr_type, u8 reason);
-int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 link_type, u8 addr_type, u8 status);
-int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u8 status);
-int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
-int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 status);
-int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 status);
+void mgmt_discoverable_timeout(struct hci_dev *hdev);
+void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
+void mgmt_connectable(struct hci_dev *hdev, u8 connectable);
+void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
+void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+ bool persistent);
+void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u32 flags, u8 *name, u8 name_len,
+ u8 *dev_class);
+void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 reason);
+void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 status);
+void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
+void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status);
+void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status);
int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type, __le32 value,
u8 confirm_hint);
@@ -1159,26 +1111,25 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type, u32 passkey,
u8 entered);
-int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u8 status);
-int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
-int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
-int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
- u8 status);
-int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
-int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
- u8 *randomizer, u8 status);
-int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
-int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
- u8 ssp, u8 *eir, u16 eir_len);
-int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, s8 rssi, u8 *name, u8 name_len);
-int mgmt_discovering(struct hci_dev *hdev, u8 discovering);
+void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 status);
+void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
+void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
+void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
+ u8 status);
+void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
+void mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
+ u8 *randomizer, u8 status);
+void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
+ u8 ssp, u8 *eir, u16 eir_len);
+void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, s8 rssi, u8 *name, u8 name_len);
+void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
-bool mgmt_valid_hdev(struct hci_dev *hdev);
-int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent);
+void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent);
+void mgmt_reenable_advertising(struct hci_dev *hdev);
/* HCI info for socket */
#define hci_pi(sk) ((struct hci_pinfo *) sk)
@@ -1208,15 +1159,11 @@ struct hci_sec_filter {
#define hci_req_lock(d) mutex_lock(&d->req_lock)
#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
-void hci_update_ad(struct hci_request *req);
-
void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
u16 latency, u16 to_multiplier);
void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
__u8 ltk[16]);
-u8 bdaddr_to_le(u8 bdaddr_type);
-
#define SCO_AIRMODE_MASK 0x0003
#define SCO_AIRMODE_CVSD 0x0000
#define SCO_AIRMODE_TRANSP 0x0003
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 1a966afbbfa8..c853b16de4ef 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -131,6 +131,7 @@ struct l2cap_conninfo {
/* L2CAP fixed channels */
#define L2CAP_FC_L2CAP 0x02
+#define L2CAP_FC_CONNLESS 0x04
#define L2CAP_FC_A2MP 0x08
/* L2CAP Control Field bit masks */
@@ -237,8 +238,9 @@ struct l2cap_conn_rsp {
/* protocol/service multiplexer (PSM) */
#define L2CAP_PSM_SDP 0x0001
#define L2CAP_PSM_RFCOMM 0x0003
+#define L2CAP_PSM_3DSP 0x0021
-/* channel indentifier */
+/* channel identifier */
#define L2CAP_CID_SIGNALING 0x0001
#define L2CAP_CID_CONN_LESS 0x0002
#define L2CAP_CID_A2MP 0x0003
@@ -433,8 +435,6 @@ struct l2cap_seq_list {
#define L2CAP_SEQ_LIST_TAIL 0x8000
struct l2cap_chan {
- struct sock *sk;
-
struct l2cap_conn *conn;
struct hci_conn *hs_hcon;
struct hci_chan *hs_hchan;
@@ -442,7 +442,12 @@ struct l2cap_chan {
__u8 state;
+ bdaddr_t dst;
+ __u8 dst_type;
+ bdaddr_t src;
+ __u8 src_type;
__le16 psm;
+ __le16 sport;
__u16 dcid;
__u16 scid;
@@ -453,8 +458,6 @@ struct l2cap_chan {
__u8 chan_type;
__u8 chan_policy;
- __le16 sport;
-
__u8 sec_level;
__u8 ident;
@@ -546,9 +549,12 @@ struct l2cap_ops {
void (*teardown) (struct l2cap_chan *chan, int err);
void (*close) (struct l2cap_chan *chan);
void (*state_change) (struct l2cap_chan *chan,
- int state);
+ int state, int err);
void (*ready) (struct l2cap_chan *chan);
void (*defer) (struct l2cap_chan *chan);
+ void (*resume) (struct l2cap_chan *chan);
+ void (*set_shutdown) (struct l2cap_chan *chan);
+ long (*get_sndtimeo) (struct l2cap_chan *chan);
struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
unsigned long len, int nb);
};
@@ -557,13 +563,11 @@ struct l2cap_conn {
struct hci_conn *hcon;
struct hci_chan *hchan;
- bdaddr_t *dst;
- bdaddr_t *src;
-
unsigned int mtu;
__u32 feat_mask;
__u8 fixed_chan_mask;
+ bool hs_enabled;
__u8 info_state;
__u8 info_ident;
@@ -649,6 +653,7 @@ enum {
FLAG_FLUSHABLE,
FLAG_EXT_CTRL,
FLAG_EFS_ENABLE,
+ FLAG_DEFER_SETUP,
};
enum {
@@ -790,6 +795,19 @@ static inline void l2cap_chan_no_defer(struct l2cap_chan *chan)
{
}
+static inline void l2cap_chan_no_resume(struct l2cap_chan *chan)
+{
+}
+
+static inline void l2cap_chan_no_set_shutdown(struct l2cap_chan *chan)
+{
+}
+
+static inline long l2cap_chan_no_get_sndtimeo(struct l2cap_chan *chan)
+{
+ return 0;
+}
+
extern bool disable_ertm;
int l2cap_init_sockets(void);
@@ -797,7 +815,6 @@ void l2cap_cleanup_sockets(void);
bool l2cap_is_socket(struct socket *sock);
void __l2cap_connect_rsp_defer(struct l2cap_chan *chan);
-int __l2cap_wait_ack(struct sock *sk);
int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm);
int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 9944c3e68c5d..518c5c84e39a 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -93,6 +93,7 @@ struct mgmt_rp_read_index_list {
#define MGMT_SETTING_BREDR 0x00000080
#define MGMT_SETTING_HS 0x00000100
#define MGMT_SETTING_LE 0x00000200
+#define MGMT_SETTING_ADVERTISING 0x00000400
#define MGMT_OP_READ_INFO 0x0004
#define MGMT_READ_INFO_SIZE 0
@@ -351,6 +352,23 @@ struct mgmt_cp_set_device_id {
} __packed;
#define MGMT_SET_DEVICE_ID_SIZE 8
+#define MGMT_OP_SET_ADVERTISING 0x0029
+
+#define MGMT_OP_SET_BREDR 0x002A
+
+#define MGMT_OP_SET_STATIC_ADDRESS 0x002B
+struct mgmt_cp_set_static_address {
+ bdaddr_t bdaddr;
+} __packed;
+#define MGMT_SET_STATIC_ADDRESS_SIZE 6
+
+#define MGMT_OP_SET_SCAN_PARAMS 0x002C
+struct mgmt_cp_set_scan_params {
+ __le16 interval;
+ __le16 window;
+} __packed;
+#define MGMT_SET_SCAN_PARAMS_SIZE 4
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index 7afd4199d6b6..486213a1aed8 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -256,8 +256,8 @@ static inline void rfcomm_dlc_put(struct rfcomm_dlc *d)
rfcomm_dlc_free(d);
}
-extern void __rfcomm_dlc_throttle(struct rfcomm_dlc *d);
-extern void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d);
+void __rfcomm_dlc_throttle(struct rfcomm_dlc *d);
+void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d);
static inline void rfcomm_dlc_throttle(struct rfcomm_dlc *d)
{
@@ -300,6 +300,8 @@ struct rfcomm_conninfo {
struct rfcomm_pinfo {
struct bt_sock bt;
+ bdaddr_t src;
+ bdaddr_t dst;
struct rfcomm_dlc *dlc;
u8 channel;
u8 sec_level;
diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h
index e252a31ee6b6..2019d1a0996a 100644
--- a/include/net/bluetooth/sco.h
+++ b/include/net/bluetooth/sco.h
@@ -55,9 +55,6 @@ struct sco_conninfo {
struct sco_conn {
struct hci_conn *hcon;
- bdaddr_t *dst;
- bdaddr_t *src;
-
spinlock_t lock;
struct sock *sk;
@@ -72,6 +69,8 @@ struct sco_conn {
struct sco_pinfo {
struct bt_sock bt;
+ bdaddr_t src;
+ bdaddr_t dst;
__u32 flags;
__u16 setting;
struct sco_conn *conn;
diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
index 4795e817afe5..097f69cfaa75 100644
--- a/include/net/caif/caif_hsi.h
+++ b/include/net/caif/caif_hsi.h
@@ -195,6 +195,6 @@ enum ifla_caif_hsi {
__IFLA_CAIF_HSI_MAX
};
-extern struct cfhsi_ops *cfhsi_get_ops(void);
+struct cfhsi_ops *cfhsi_get_ops(void);
#endif /* CAIF_HSI_H_ */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index cb710913d5c8..419202ce3f95 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -437,6 +437,15 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
u32 prohibited_flags);
/**
+ * cfg80211_chandef_dfs_required - checks if radar detection is required
+ * @wiphy: the wiphy to validate against
+ * @chandef: the channel definition to check
+ * Return: 1 if radar detection is required, 0 if it is not, < 0 on error
+ */
+int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef);
+
+/**
* ieee80211_chandef_rate_flags - returns rate flags for a channel
*
* In some channel types, not all rates may be used - for example CCK
@@ -3474,6 +3483,15 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
u32 center_freq);
+/**
+ * reg_initiator_name - map regulatory request initiator enum to name
+ * @initiator: the regulatory request initiator
+ *
+ * You can use this to map the regulatory request initiator enum to a
+ * proper string representation.
+ */
+const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
+
/*
* callbacks for asynchronous cfg80211 methods, notification
* functions and BSS handling helpers
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 8f59ca50477c..37a0e24adbe7 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -79,6 +79,12 @@ csum_block_add(__wsum csum, __wsum csum2, int offset)
}
static inline __wsum
+csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
+{
+ return csum_block_add(csum, csum2, offset);
+}
+
+static inline __wsum
csum_block_sub(__wsum csum, __wsum csum2, int offset)
{
u32 sum = (__force u32)csum2;
@@ -92,6 +98,11 @@ static inline __wsum csum_unfold(__sum16 n)
return (__force __wsum)n;
}
+static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
+{
+ return csum_partial(buff, len, sum);
+}
+
#define CSUM_MANGLED_0 ((__force __sum16)0xffff)
static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
index a7a683e30b64..a8c2ef6d3b93 100644
--- a/include/net/cipso_ipv4.h
+++ b/include/net/cipso_ipv4.h
@@ -290,6 +290,7 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
unsigned char err_offset = 0;
u8 opt_len = opt[1];
u8 opt_iter;
+ u8 tag_len;
if (opt_len < 8) {
err_offset = 1;
@@ -302,11 +303,12 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
}
for (opt_iter = 6; opt_iter < opt_len;) {
- if (opt[opt_iter + 1] > (opt_len - opt_iter)) {
+ tag_len = opt[opt_iter + 1];
+ if ((tag_len == 0) || (opt[opt_iter + 1] > (opt_len - opt_iter))) {
err_offset = opt_iter + 1;
goto out;
}
- opt_iter += opt[opt_iter + 1];
+ opt_iter += tag_len;
}
out:
diff --git a/include/net/compat.h b/include/net/compat.h
index 6e9565324989..3b603b199c01 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -29,8 +29,8 @@ struct compat_cmsghdr {
compat_int_t cmsg_type;
};
-extern int compat_sock_get_timestamp(struct sock *, struct timeval __user *);
-extern int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
+int compat_sock_get_timestamp(struct sock *, struct timeval __user *);
+int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
#else /* defined(CONFIG_COMPAT) */
/*
@@ -40,24 +40,30 @@ extern int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
#define compat_mmsghdr mmsghdr
#endif /* defined(CONFIG_COMPAT) */
-extern int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *);
-extern int verify_compat_iovec(struct msghdr *, struct iovec *, struct sockaddr_storage *, int);
-extern asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsigned int);
-extern asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
- unsigned int, unsigned int);
-extern asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsigned int);
-extern asmlinkage long compat_sys_recvmmsg(int, struct compat_mmsghdr __user *,
- unsigned int, unsigned int,
- struct compat_timespec __user *);
-extern asmlinkage long compat_sys_getsockopt(int, int, int, char __user *, int __user *);
-extern int put_cmsg_compat(struct msghdr*, int, int, int, void *);
-
-extern int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, unsigned char *, int);
-
-extern int compat_mc_setsockopt(struct sock *, int, int, char __user *, unsigned int,
- int (*)(struct sock *, int, int, char __user *, unsigned int));
-extern int compat_mc_getsockopt(struct sock *, int, int, char __user *,
- int __user *, int (*)(struct sock *, int, int, char __user *,
- int __user *));
+int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *);
+int verify_compat_iovec(struct msghdr *, struct iovec *,
+ struct sockaddr_storage *, int);
+asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *,
+ unsigned int);
+asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
+ unsigned int, unsigned int);
+asmlinkage long compat_sys_recvmsg(int, struct compat_msghdr __user *,
+ unsigned int);
+asmlinkage long compat_sys_recvmmsg(int, struct compat_mmsghdr __user *,
+ unsigned int, unsigned int,
+ struct compat_timespec __user *);
+asmlinkage long compat_sys_getsockopt(int, int, int, char __user *,
+ int __user *);
+int put_cmsg_compat(struct msghdr*, int, int, int, void *);
+
+int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *,
+ unsigned char *, int);
+
+int compat_mc_setsockopt(struct sock *, int, int, char __user *, unsigned int,
+ int (*)(struct sock *, int, int, char __user *,
+ unsigned int));
+int compat_mc_getsockopt(struct sock *, int, int, char __user *, int __user *,
+ int (*)(struct sock *, int, int, char __user *,
+ int __user *));
#endif /* NET_COMPAT_H */
diff --git a/include/net/dcbevent.h b/include/net/dcbevent.h
index 443626ed4cbc..d2f3041c0dfa 100644
--- a/include/net/dcbevent.h
+++ b/include/net/dcbevent.h
@@ -25,9 +25,9 @@ enum dcbevent_notif_type {
};
#ifdef CONFIG_DCB
-extern int register_dcbevent_notifier(struct notifier_block *nb);
-extern int unregister_dcbevent_notifier(struct notifier_block *nb);
-extern int call_dcbevent_notifiers(unsigned long val, void *v);
+int register_dcbevent_notifier(struct notifier_block *nb);
+int unregister_dcbevent_notifier(struct notifier_block *nb);
+int call_dcbevent_notifiers(unsigned long val, void *v);
#else
static inline int
register_dcbevent_notifier(struct notifier_block *nb)
diff --git a/include/net/dn.h b/include/net/dn.h
index c88bf4ebd330..ccc15588d108 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -199,24 +199,26 @@ static inline void dn_sk_ports_copy(struct flowidn *fld, struct dn_scp *scp)
fld->fld_dport = scp->addrrem;
}
-extern unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu);
+unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu);
#define DN_MENUVER_ACC 0x01
#define DN_MENUVER_USR 0x02
#define DN_MENUVER_PRX 0x04
#define DN_MENUVER_UIC 0x08
-extern struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr);
-extern struct sock *dn_find_by_skb(struct sk_buff *skb);
+struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr);
+struct sock *dn_find_by_skb(struct sk_buff *skb);
#define DN_ASCBUF_LEN 9
-extern char *dn_addr2asc(__u16, char *);
-extern int dn_destroy_timer(struct sock *sk);
+char *dn_addr2asc(__u16, char *);
+int dn_destroy_timer(struct sock *sk);
-extern int dn_sockaddr2username(struct sockaddr_dn *addr, unsigned char *buf, unsigned char type);
-extern int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *addr, unsigned char *type);
+int dn_sockaddr2username(struct sockaddr_dn *addr, unsigned char *buf,
+ unsigned char type);
+int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *addr,
+ unsigned char *type);
-extern void dn_start_slow_timer(struct sock *sk);
-extern void dn_stop_slow_timer(struct sock *sk);
+void dn_start_slow_timer(struct sock *sk);
+void dn_stop_slow_timer(struct sock *sk);
extern __le16 decnet_address;
extern int decnet_debug_level;
diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h
index b9e32db03f20..20b5ab06032d 100644
--- a/include/net/dn_dev.h
+++ b/include/net/dn_dev.h
@@ -148,27 +148,27 @@ struct rtnode_hello_message {
} __packed;
-extern void dn_dev_init(void);
-extern void dn_dev_cleanup(void);
+void dn_dev_init(void);
+void dn_dev_cleanup(void);
-extern int dn_dev_ioctl(unsigned int cmd, void __user *arg);
+int dn_dev_ioctl(unsigned int cmd, void __user *arg);
-extern void dn_dev_devices_off(void);
-extern void dn_dev_devices_on(void);
+void dn_dev_devices_off(void);
+void dn_dev_devices_on(void);
-extern void dn_dev_init_pkt(struct sk_buff *skb);
-extern void dn_dev_veri_pkt(struct sk_buff *skb);
-extern void dn_dev_hello(struct sk_buff *skb);
+void dn_dev_init_pkt(struct sk_buff *skb);
+void dn_dev_veri_pkt(struct sk_buff *skb);
+void dn_dev_hello(struct sk_buff *skb);
-extern void dn_dev_up(struct net_device *);
-extern void dn_dev_down(struct net_device *);
+void dn_dev_up(struct net_device *);
+void dn_dev_down(struct net_device *);
-extern int dn_dev_set_default(struct net_device *dev, int force);
-extern struct net_device *dn_dev_get_default(void);
-extern int dn_dev_bind_default(__le16 *addr);
+int dn_dev_set_default(struct net_device *dev, int force);
+struct net_device *dn_dev_get_default(void);
+int dn_dev_bind_default(__le16 *addr);
-extern int register_dnaddr_notifier(struct notifier_block *nb);
-extern int unregister_dnaddr_notifier(struct notifier_block *nb);
+int register_dnaddr_notifier(struct notifier_block *nb);
+int unregister_dnaddr_notifier(struct notifier_block *nb);
static inline int dn_dev_islocal(struct net_device *dev, __le16 addr)
{
diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h
index 74004af31c48..f2ca135ddcc9 100644
--- a/include/net/dn_fib.h
+++ b/include/net/dn_fib.h
@@ -95,41 +95,38 @@ struct dn_fib_table {
/*
* dn_fib.c
*/
-extern void dn_fib_init(void);
-extern void dn_fib_cleanup(void);
-
-extern int dn_fib_ioctl(struct socket *sock, unsigned int cmd,
- unsigned long arg);
-extern struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r,
- struct nlattr *attrs[],
- const struct nlmsghdr *nlh, int *errp);
-extern int dn_fib_semantic_match(int type, struct dn_fib_info *fi,
- const struct flowidn *fld,
- struct dn_fib_res *res);
-extern void dn_fib_release_info(struct dn_fib_info *fi);
-extern void dn_fib_flush(void);
-extern void dn_fib_select_multipath(const struct flowidn *fld,
- struct dn_fib_res *res);
+void dn_fib_init(void);
+void dn_fib_cleanup(void);
+
+int dn_fib_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r,
+ struct nlattr *attrs[],
+ const struct nlmsghdr *nlh, int *errp);
+int dn_fib_semantic_match(int type, struct dn_fib_info *fi,
+ const struct flowidn *fld, struct dn_fib_res *res);
+void dn_fib_release_info(struct dn_fib_info *fi);
+void dn_fib_flush(void);
+void dn_fib_select_multipath(const struct flowidn *fld, struct dn_fib_res *res);
/*
* dn_tables.c
*/
-extern struct dn_fib_table *dn_fib_get_table(u32 n, int creat);
-extern struct dn_fib_table *dn_fib_empty_table(void);
-extern void dn_fib_table_init(void);
-extern void dn_fib_table_cleanup(void);
+struct dn_fib_table *dn_fib_get_table(u32 n, int creat);
+struct dn_fib_table *dn_fib_empty_table(void);
+void dn_fib_table_init(void);
+void dn_fib_table_cleanup(void);
/*
* dn_rules.c
*/
-extern void dn_fib_rules_init(void);
-extern void dn_fib_rules_cleanup(void);
-extern unsigned int dnet_addr_type(__le16 addr);
-extern int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res);
+void dn_fib_rules_init(void);
+void dn_fib_rules_cleanup(void);
+unsigned int dnet_addr_type(__le16 addr);
+int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res);
-extern int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
+int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
-extern void dn_fib_free_info(struct dn_fib_info *fi);
+void dn_fib_free_info(struct dn_fib_info *fi);
static inline void dn_fib_info_put(struct dn_fib_info *fi)
{
diff --git a/include/net/dn_neigh.h b/include/net/dn_neigh.h
index 4cb4ae7fb81f..fac4e3f4a6d3 100644
--- a/include/net/dn_neigh.h
+++ b/include/net/dn_neigh.h
@@ -16,12 +16,12 @@ struct dn_neigh {
__u8 priority;
};
-extern void dn_neigh_init(void);
-extern void dn_neigh_cleanup(void);
-extern int dn_neigh_router_hello(struct sk_buff *skb);
-extern int dn_neigh_endnode_hello(struct sk_buff *skb);
-extern void dn_neigh_pointopoint_hello(struct sk_buff *skb);
-extern int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
+void dn_neigh_init(void);
+void dn_neigh_cleanup(void);
+int dn_neigh_router_hello(struct sk_buff *skb);
+int dn_neigh_endnode_hello(struct sk_buff *skb);
+void dn_neigh_pointopoint_hello(struct sk_buff *skb);
+int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
extern struct neigh_table dn_neigh_table;
diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h
index e43a2893f132..3a3e33d18456 100644
--- a/include/net/dn_nsp.h
+++ b/include/net/dn_nsp.h
@@ -15,29 +15,32 @@
*******************************************************************************/
/* dn_nsp.c functions prototyping */
-extern void dn_nsp_send_data_ack(struct sock *sk);
-extern void dn_nsp_send_oth_ack(struct sock *sk);
-extern void dn_nsp_delayed_ack(struct sock *sk);
-extern void dn_send_conn_ack(struct sock *sk);
-extern void dn_send_conn_conf(struct sock *sk, gfp_t gfp);
-extern void dn_nsp_send_disc(struct sock *sk, unsigned char type,
- unsigned short reason, gfp_t gfp);
-extern void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type,
- unsigned short reason);
-extern void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval);
-extern void dn_nsp_send_conninit(struct sock *sk, unsigned char flags);
-
-extern void dn_nsp_output(struct sock *sk);
-extern int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum);
-extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp, int oob);
-extern unsigned long dn_nsp_persist(struct sock *sk);
-extern int dn_nsp_xmit_timeout(struct sock *sk);
-
-extern int dn_nsp_rx(struct sk_buff *);
-extern int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
-
-extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
-extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err);
+void dn_nsp_send_data_ack(struct sock *sk);
+void dn_nsp_send_oth_ack(struct sock *sk);
+void dn_nsp_delayed_ack(struct sock *sk);
+void dn_send_conn_ack(struct sock *sk);
+void dn_send_conn_conf(struct sock *sk, gfp_t gfp);
+void dn_nsp_send_disc(struct sock *sk, unsigned char type,
+ unsigned short reason, gfp_t gfp);
+void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type,
+ unsigned short reason);
+void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval);
+void dn_nsp_send_conninit(struct sock *sk, unsigned char flags);
+
+void dn_nsp_output(struct sock *sk);
+int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb,
+ struct sk_buff_head *q, unsigned short acknum);
+void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp,
+ int oob);
+unsigned long dn_nsp_persist(struct sock *sk);
+int dn_nsp_xmit_timeout(struct sock *sk);
+
+int dn_nsp_rx(struct sk_buff *);
+int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+
+struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
+struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock,
+ long timeo, int *err);
#define NSP_REASON_OK 0 /* No error */
#define NSP_REASON_NR 1 /* No resources */
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index 2e9d317c82dc..b409ad6b8d7a 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -15,10 +15,11 @@
GNU General Public License for more details.
*******************************************************************************/
-extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
-extern int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *, struct sock *sk, int flags);
-extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
-extern void dn_rt_cache_flush(int delay);
+struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
+int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *,
+ struct sock *sk, int flags);
+int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
+void dn_rt_cache_flush(int delay);
/* Masks for flags field */
#define DN_RT_F_PID 0x07 /* Mask for packet type */
@@ -92,8 +93,8 @@ static inline bool dn_is_output_route(struct dn_route *rt)
return rt->fld.flowidn_iif == 0;
}
-extern void dn_route_init(void);
-extern void dn_route_cleanup(void);
+void dn_route_init(void);
+void dn_route_cleanup(void);
#include <net/sock.h>
#include <linux/if_arp.h>
diff --git a/include/net/dst.h b/include/net/dst.h
index 3bc4865f8267..44995c13e941 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -106,7 +106,7 @@ struct dst_entry {
};
};
-extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
+u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
extern const u32 dst_default_metrics[];
#define DST_METRICS_READ_ONLY 0x1UL
@@ -119,7 +119,7 @@ static inline bool dst_metrics_read_only(const struct dst_entry *dst)
return dst->_metrics & DST_METRICS_READ_ONLY;
}
-extern void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
+void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
{
@@ -262,7 +262,7 @@ static inline struct dst_entry *dst_clone(struct dst_entry *dst)
return dst;
}
-extern void dst_release(struct dst_entry *dst);
+void dst_release(struct dst_entry *dst);
static inline void refdst_drop(unsigned long refdst)
{
@@ -362,12 +362,11 @@ static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
return child;
}
-extern int dst_discard(struct sk_buff *skb);
-extern void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
- int initial_ref, int initial_obsolete,
- unsigned short flags);
-extern void __dst_free(struct dst_entry *dst);
-extern struct dst_entry *dst_destroy(struct dst_entry *dst);
+int dst_discard(struct sk_buff *skb);
+void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
+ int initial_obsolete, unsigned short flags);
+void __dst_free(struct dst_entry *dst);
+struct dst_entry *dst_destroy(struct dst_entry *dst);
static inline void dst_free(struct dst_entry *dst)
{
@@ -463,7 +462,7 @@ static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
return dst;
}
-extern void dst_init(void);
+void dst_init(void);
/* Flags for xfrm_lookup flags argument. */
enum {
@@ -479,10 +478,22 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
{
return dst_orig;
}
+
+static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
+{
+ return NULL;
+}
+
#else
-extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
- const struct flowi *fl, struct sock *sk,
- int flags);
+struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
+ const struct flowi *fl, struct sock *sk,
+ int flags);
+
+/* skb attached with this dst needs transformation if dst->xfrm is valid */
+static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
+{
+ return dst->xfrm;
+}
#endif
#endif /* _NET_DST_H */
diff --git a/include/net/esp.h b/include/net/esp.h
index d58451331dbd..c92213c38312 100644
--- a/include/net/esp.h
+++ b/include/net/esp.h
@@ -3,17 +3,7 @@
#include <linux/skbuff.h>
-struct crypto_aead;
-
-struct esp_data {
- /* 0..255 */
- int padlen;
-
- /* Confidentiality & Integrity */
- struct crypto_aead *aead;
-};
-
-extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
+void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
struct ip_esp_hdr;
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 4b2b557fb0e8..e584de16e4c3 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -115,14 +115,13 @@ static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla)
return frh->table;
}
-extern struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *, struct net *);
-extern void fib_rules_unregister(struct fib_rules_ops *);
+struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *,
+ struct net *);
+void fib_rules_unregister(struct fib_rules_ops *);
-extern int fib_rules_lookup(struct fib_rules_ops *,
- struct flowi *, int flags,
- struct fib_lookup_arg *);
-extern int fib_default_rule_add(struct fib_rules_ops *,
- u32 pref, u32 table,
- u32 flags);
-extern u32 fib_default_rule_pref(struct fib_rules_ops *ops);
+int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
+ struct fib_lookup_arg *);
+int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
+ u32 flags);
+u32 fib_default_rule_pref(struct fib_rules_ops *ops);
#endif
diff --git a/include/net/flow.h b/include/net/flow.h
index 628e11b98c58..65ce471d2ab5 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -215,12 +215,13 @@ typedef struct flow_cache_object *(*flow_resolve_t)(
struct net *net, const struct flowi *key, u16 family,
u8 dir, struct flow_cache_object *oldobj, void *ctx);
-extern struct flow_cache_object *flow_cache_lookup(
- struct net *net, const struct flowi *key, u16 family,
- u8 dir, flow_resolve_t resolver, void *ctx);
+struct flow_cache_object *flow_cache_lookup(struct net *net,
+ const struct flowi *key, u16 family,
+ u8 dir, flow_resolve_t resolver,
+ void *ctx);
-extern void flow_cache_flush(void);
-extern void flow_cache_flush_deferred(void);
+void flow_cache_flush(void);
+void flow_cache_flush_deferred(void);
extern atomic_t flow_cache_genid;
#endif
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
index bb8271d487b7..7e64bd8bbda9 100644
--- a/include/net/flow_keys.h
+++ b/include/net/flow_keys.h
@@ -13,5 +13,6 @@ struct flow_keys {
u8 ip_proto;
};
-extern bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow);
+bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow);
+__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto);
#endif
diff --git a/include/net/garp.h b/include/net/garp.h
index 834d8add9e5f..abf33bbd2e6a 100644
--- a/include/net/garp.h
+++ b/include/net/garp.h
@@ -112,19 +112,18 @@ struct garp_port {
struct rcu_head rcu;
};
-extern int garp_register_application(struct garp_application *app);
-extern void garp_unregister_application(struct garp_application *app);
-
-extern int garp_init_applicant(struct net_device *dev,
- struct garp_application *app);
-extern void garp_uninit_applicant(struct net_device *dev,
- struct garp_application *app);
-
-extern int garp_request_join(const struct net_device *dev,
- const struct garp_application *app,
- const void *data, u8 len, u8 type);
-extern void garp_request_leave(const struct net_device *dev,
- const struct garp_application *app,
- const void *data, u8 len, u8 type);
+int garp_register_application(struct garp_application *app);
+void garp_unregister_application(struct garp_application *app);
+
+int garp_init_applicant(struct net_device *dev, struct garp_application *app);
+void garp_uninit_applicant(struct net_device *dev,
+ struct garp_application *app);
+
+int garp_request_join(const struct net_device *dev,
+ const struct garp_application *app, const void *data,
+ u8 len, u8 type);
+void garp_request_leave(const struct net_device *dev,
+ const struct garp_application *app,
+ const void *data, u8 len, u8 type);
#endif /* _NET_GARP_H */
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index cf8439ba4d11..ea4271dceff0 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -19,32 +19,31 @@ struct gnet_dump {
struct tc_stats tc_stats;
};
-extern int gnet_stats_start_copy(struct sk_buff *skb, int type,
+int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
+ struct gnet_dump *d);
+
+int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
+ int tc_stats_type, int xstats_type,
spinlock_t *lock, struct gnet_dump *d);
-extern int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
- int tc_stats_type,int xstats_type,
- spinlock_t *lock, struct gnet_dump *d);
-
-extern int gnet_stats_copy_basic(struct gnet_dump *d,
- struct gnet_stats_basic_packed *b);
-extern int gnet_stats_copy_rate_est(struct gnet_dump *d,
- const struct gnet_stats_basic_packed *b,
- struct gnet_stats_rate_est64 *r);
-extern int gnet_stats_copy_queue(struct gnet_dump *d,
- struct gnet_stats_queue *q);
-extern int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
-
-extern int gnet_stats_finish_copy(struct gnet_dump *d);
-
-extern int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_rate_est64 *rate_est,
- spinlock_t *stats_lock, struct nlattr *opt);
-extern void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_rate_est64 *rate_est);
-extern int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_rate_est64 *rate_est,
- spinlock_t *stats_lock, struct nlattr *opt);
-extern bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
- const struct gnet_stats_rate_est64 *rate_est);
+int gnet_stats_copy_basic(struct gnet_dump *d,
+ struct gnet_stats_basic_packed *b);
+int gnet_stats_copy_rate_est(struct gnet_dump *d,
+ const struct gnet_stats_basic_packed *b,
+ struct gnet_stats_rate_est64 *r);
+int gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q);
+int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
+
+int gnet_stats_finish_copy(struct gnet_dump *d);
+
+int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_rate_est64 *rate_est,
+ spinlock_t *stats_lock, struct nlattr *opt);
+void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_rate_est64 *rate_est);
+int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_rate_est64 *rate_est,
+ spinlock_t *stats_lock, struct nlattr *opt);
+bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
+ const struct gnet_stats_rate_est64 *rate_est);
#endif
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 8e0b6c856a13..9b787b62cf16 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -122,7 +122,7 @@ struct genl_ops {
struct list_head ops_list;
};
-extern int __genl_register_family(struct genl_family *family);
+int __genl_register_family(struct genl_family *family);
static inline int genl_register_family(struct genl_family *family)
{
@@ -130,8 +130,8 @@ static inline int genl_register_family(struct genl_family *family)
return __genl_register_family(family);
}
-extern int __genl_register_family_with_ops(struct genl_family *family,
- struct genl_ops *ops, size_t n_ops);
+int __genl_register_family_with_ops(struct genl_family *family,
+ struct genl_ops *ops, size_t n_ops);
static inline int genl_register_family_with_ops(struct genl_family *family,
struct genl_ops *ops, size_t n_ops)
@@ -140,18 +140,18 @@ static inline int genl_register_family_with_ops(struct genl_family *family,
return __genl_register_family_with_ops(family, ops, n_ops);
}
-extern int genl_unregister_family(struct genl_family *family);
-extern int genl_register_ops(struct genl_family *, struct genl_ops *ops);
-extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
-extern int genl_register_mc_group(struct genl_family *family,
- struct genl_multicast_group *grp);
-extern void genl_unregister_mc_group(struct genl_family *family,
- struct genl_multicast_group *grp);
-extern void genl_notify(struct sk_buff *skb, struct net *net, u32 portid,
- u32 group, struct nlmsghdr *nlh, gfp_t flags);
+int genl_unregister_family(struct genl_family *family);
+int genl_register_ops(struct genl_family *, struct genl_ops *ops);
+int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
+int genl_register_mc_group(struct genl_family *family,
+ struct genl_multicast_group *grp);
+void genl_unregister_mc_group(struct genl_family *family,
+ struct genl_multicast_group *grp);
+void genl_notify(struct sk_buff *skb, struct net *net, u32 portid,
+ u32 group, struct nlmsghdr *nlh, gfp_t flags);
void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
- struct genl_family *family, int flags, u8 cmd);
+ struct genl_family *family, int flags, u8 cmd);
/**
* genlmsg_nlhdr - Obtain netlink header from user specified header
diff --git a/include/net/gre.h b/include/net/gre.h
index 57e4afdf7879..dcd9ae3270d3 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -38,7 +38,13 @@ void gre_offload_exit(void);
void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
int hdr_len);
-struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum);
+
+static inline struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
+ bool gre_csum)
+{
+ return iptunnel_handle_offloads(skb, gre_csum, SKB_GSO_GRE);
+}
+
static inline int ip_gre_calc_hlen(__be16 o_flags)
{
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 081439fd070e..970028e13382 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -39,10 +39,10 @@ struct net_proto_family;
struct sk_buff;
struct net;
-extern void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
-extern int icmp_rcv(struct sk_buff *skb);
-extern void icmp_err(struct sk_buff *, u32 info);
-extern int icmp_init(void);
-extern void icmp_out_count(struct net *net, unsigned char type);
+void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
+int icmp_rcv(struct sk_buff *skb);
+void icmp_err(struct sk_buff *skb, u32 info);
+int icmp_init(void);
+void icmp_out_count(struct net *net, unsigned char type);
#endif /* _ICMP_H */
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 02ef7727bb55..76d54270f2e2 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -66,11 +66,10 @@ struct inet6_ifaddr {
struct hlist_node addr_lst;
struct list_head if_list;
-#ifdef CONFIG_IPV6_PRIVACY
struct list_head tmp_list;
struct inet6_ifaddr *ifpub;
int regen_count;
-#endif
+
bool tokenized;
struct rcu_head rcu;
@@ -192,11 +191,9 @@ struct inet6_dev {
__u32 if_flags;
int dead;
-#ifdef CONFIG_IPV6_PRIVACY
u8 rndid[8];
struct timer_list regen_timer;
struct list_head tempaddr_list;
-#endif
struct in6_addr token;
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 04642c920431..f981ba7adeed 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -22,27 +22,25 @@ struct sk_buff;
struct sock;
struct sockaddr;
-extern int inet6_csk_bind_conflict(const struct sock *sk,
- const struct inet_bind_bucket *tb, bool relax);
+int inet6_csk_bind_conflict(const struct sock *sk,
+ const struct inet_bind_bucket *tb, bool relax);
-extern struct dst_entry* inet6_csk_route_req(struct sock *sk,
- struct flowi6 *fl6,
- const struct request_sock *req);
+struct dst_entry *inet6_csk_route_req(struct sock *sk, struct flowi6 *fl6,
+ const struct request_sock *req);
-extern struct request_sock *inet6_csk_search_req(const struct sock *sk,
- struct request_sock ***prevp,
- const __be16 rport,
- const struct in6_addr *raddr,
- const struct in6_addr *laddr,
- const int iif);
+struct request_sock *inet6_csk_search_req(const struct sock *sk,
+ struct request_sock ***prevp,
+ const __be16 rport,
+ const struct in6_addr *raddr,
+ const struct in6_addr *laddr,
+ const int iif);
-extern void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
- struct request_sock *req,
- const unsigned long timeout);
+void inet6_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+ const unsigned long timeout);
-extern void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
+void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
-extern int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl);
+int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl);
-extern struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu);
+struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu);
#endif /* _INET6_CONNECTION_SOCK_H */
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index fd4ee016ba5c..ae0613544308 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -28,32 +28,17 @@
struct inet_hashinfo;
-static inline unsigned int inet6_ehashfn(struct net *net,
- const struct in6_addr *laddr, const u16 lport,
- const struct in6_addr *faddr, const __be16 fport)
+static inline unsigned int __inet6_ehashfn(const u32 lhash,
+ const u16 lport,
+ const u32 fhash,
+ const __be16 fport,
+ const u32 initval)
{
- u32 ports = (((u32)lport) << 16) | (__force u32)fport;
-
- return jhash_3words((__force u32)laddr->s6_addr32[3],
- ipv6_addr_jhash(faddr),
- ports,
- inet_ehash_secret + net_hash_mix(net));
-}
-
-static inline int inet6_sk_ehashfn(const struct sock *sk)
-{
- const struct inet_sock *inet = inet_sk(sk);
- const struct ipv6_pinfo *np = inet6_sk(sk);
- const struct in6_addr *laddr = &np->rcv_saddr;
- const struct in6_addr *faddr = &np->daddr;
- const __u16 lport = inet->inet_num;
- const __be16 fport = inet->inet_dport;
- struct net *net = sock_net(sk);
-
- return inet6_ehashfn(net, laddr, lport, faddr, fport);
+ const u32 ports = (((u32)lport) << 16) | (__force u32)fport;
+ return jhash_3words(lhash, fhash, ports, initval);
}
-extern int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp);
+int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp);
/*
* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
@@ -61,21 +46,19 @@ extern int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp);
*
* The sockhash lock must be held as a reader here.
*/
-extern struct sock *__inet6_lookup_established(struct net *net,
- struct inet_hashinfo *hashinfo,
- const struct in6_addr *saddr,
- const __be16 sport,
- const struct in6_addr *daddr,
- const u16 hnum,
- const int dif);
-
-extern struct sock *inet6_lookup_listener(struct net *net,
- struct inet_hashinfo *hashinfo,
- const struct in6_addr *saddr,
- const __be16 sport,
- const struct in6_addr *daddr,
- const unsigned short hnum,
- const int dif);
+struct sock *__inet6_lookup_established(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ const struct in6_addr *saddr,
+ const __be16 sport,
+ const struct in6_addr *daddr,
+ const u16 hnum, const int dif);
+
+struct sock *inet6_lookup_listener(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ const struct in6_addr *saddr,
+ const __be16 sport,
+ const struct in6_addr *daddr,
+ const unsigned short hnum, const int dif);
static inline struct sock *__inet6_lookup(struct net *net,
struct inet_hashinfo *hashinfo,
@@ -110,9 +93,9 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
inet6_iif(skb));
}
-extern struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
- const struct in6_addr *saddr, const __be16 sport,
- const struct in6_addr *daddr, const __be16 dport,
- const int dif);
+struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
+ const struct in6_addr *saddr, const __be16 sport,
+ const struct in6_addr *daddr, const __be16 dport,
+ const int dif);
#endif /* IS_ENABLED(CONFIG_IPV6) */
#endif /* _INET6_HASHTABLES_H */
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 234008782c8c..fe7994c48b75 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -13,30 +13,30 @@ struct sock;
struct sockaddr;
struct socket;
-extern int inet_release(struct socket *sock);
-extern int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
- int addr_len, int flags);
-extern int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
- int addr_len, int flags);
-extern int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
- int addr_len, int flags);
-extern int inet_accept(struct socket *sock, struct socket *newsock, int flags);
-extern int inet_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size);
-extern ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
- size_t size, int flags);
-extern int inet_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size, int flags);
-extern int inet_shutdown(struct socket *sock, int how);
-extern int inet_listen(struct socket *sock, int backlog);
-extern void inet_sock_destruct(struct sock *sk);
-extern int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
-extern int inet_getname(struct socket *sock, struct sockaddr *uaddr,
- int *uaddr_len, int peer);
-extern int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
-extern int inet_ctl_sock_create(struct sock **sk, unsigned short family,
- unsigned short type, unsigned char protocol,
- struct net *net);
+int inet_release(struct socket *sock);
+int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags);
+int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags);
+int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags);
+int inet_accept(struct socket *sock, struct socket *newsock, int flags);
+int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+ size_t size);
+ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
+ size_t size, int flags);
+int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+ size_t size, int flags);
+int inet_shutdown(struct socket *sock, int how);
+int inet_listen(struct socket *sock, int backlog);
+void inet_sock_destruct(struct sock *sk);
+int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
+int inet_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len,
+ int peer);
+int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+int inet_ctl_sock_create(struct sock **sk, unsigned short family,
+ unsigned short type, unsigned char protocol,
+ struct net *net);
static inline void inet_ctl_sock_destroy(struct sock *sk)
{
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index de2c78529afa..c55aeed41ace 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -146,9 +146,9 @@ static inline void *inet_csk_ca(const struct sock *sk)
return (void *)inet_csk(sk)->icsk_ca_priv;
}
-extern struct sock *inet_csk_clone_lock(const struct sock *sk,
- const struct request_sock *req,
- const gfp_t priority);
+struct sock *inet_csk_clone_lock(const struct sock *sk,
+ const struct request_sock *req,
+ const gfp_t priority);
enum inet_csk_ack_state_t {
ICSK_ACK_SCHED = 1,
@@ -157,11 +157,11 @@ enum inet_csk_ack_state_t {
ICSK_ACK_PUSHED2 = 8
};
-extern void inet_csk_init_xmit_timers(struct sock *sk,
- void (*retransmit_handler)(unsigned long),
- void (*delack_handler)(unsigned long),
- void (*keepalive_handler)(unsigned long));
-extern void inet_csk_clear_xmit_timers(struct sock *sk);
+void inet_csk_init_xmit_timers(struct sock *sk,
+ void (*retransmit_handler)(unsigned long),
+ void (*delack_handler)(unsigned long),
+ void (*keepalive_handler)(unsigned long));
+void inet_csk_clear_xmit_timers(struct sock *sk);
static inline void inet_csk_schedule_ack(struct sock *sk)
{
@@ -178,8 +178,8 @@ static inline void inet_csk_delack_init(struct sock *sk)
memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
}
-extern void inet_csk_delete_keepalive_timer(struct sock *sk);
-extern void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
+void inet_csk_delete_keepalive_timer(struct sock *sk);
+void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
#ifdef INET_CSK_DEBUG
extern const char inet_csk_timer_bug_msg[];
@@ -241,23 +241,21 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
#endif
}
-extern struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
+struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
-extern struct request_sock *inet_csk_search_req(const struct sock *sk,
- struct request_sock ***prevp,
- const __be16 rport,
- const __be32 raddr,
- const __be32 laddr);
-extern int inet_csk_bind_conflict(const struct sock *sk,
- const struct inet_bind_bucket *tb, bool relax);
-extern int inet_csk_get_port(struct sock *sk, unsigned short snum);
+struct request_sock *inet_csk_search_req(const struct sock *sk,
+ struct request_sock ***prevp,
+ const __be16 rport,
+ const __be32 raddr,
+ const __be32 laddr);
+int inet_csk_bind_conflict(const struct sock *sk,
+ const struct inet_bind_bucket *tb, bool relax);
+int inet_csk_get_port(struct sock *sk, unsigned short snum);
-extern struct dst_entry* inet_csk_route_req(struct sock *sk,
- struct flowi4 *fl4,
+struct dst_entry *inet_csk_route_req(struct sock *sk, struct flowi4 *fl4,
+ const struct request_sock *req);
+struct dst_entry *inet_csk_route_child_sock(struct sock *sk, struct sock *newsk,
const struct request_sock *req);
-extern struct dst_entry* inet_csk_route_child_sock(struct sock *sk,
- struct sock *newsk,
- const struct request_sock *req);
static inline void inet_csk_reqsk_queue_add(struct sock *sk,
struct request_sock *req,
@@ -266,9 +264,8 @@ static inline void inet_csk_reqsk_queue_add(struct sock *sk,
reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child);
}
-extern void inet_csk_reqsk_queue_hash_add(struct sock *sk,
- struct request_sock *req,
- unsigned long timeout);
+void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+ unsigned long timeout);
static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
struct request_sock *req)
@@ -315,13 +312,13 @@ static inline void inet_csk_reqsk_queue_drop(struct sock *sk,
reqsk_free(req);
}
-extern void inet_csk_reqsk_queue_prune(struct sock *parent,
- const unsigned long interval,
- const unsigned long timeout,
- const unsigned long max_rto);
+void inet_csk_reqsk_queue_prune(struct sock *parent,
+ const unsigned long interval,
+ const unsigned long timeout,
+ const unsigned long max_rto);
-extern void inet_csk_destroy_sock(struct sock *sk);
-extern void inet_csk_prepare_forced_close(struct sock *sk);
+void inet_csk_destroy_sock(struct sock *sk);
+void inet_csk_prepare_forced_close(struct sock *sk);
/*
* LISTEN is a special case for poll..
@@ -332,15 +329,15 @@ static inline unsigned int inet_csk_listen_poll(const struct sock *sk)
(POLLIN | POLLRDNORM) : 0;
}
-extern int inet_csk_listen_start(struct sock *sk, const int nr_table_entries);
-extern void inet_csk_listen_stop(struct sock *sk);
+int inet_csk_listen_start(struct sock *sk, const int nr_table_entries);
+void inet_csk_listen_stop(struct sock *sk);
-extern void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
+void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
-extern int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen);
-extern int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
+int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
+int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen);
-extern struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
+struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
#endif /* _INET_CONNECTION_SOCK_H */
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index bfcbc0017950..6f59de98dabd 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -64,6 +64,10 @@ struct inet_frags {
rwlock_t lock ____cacheline_aligned_in_smp;
int secret_interval;
struct timer_list secret_timer;
+
+ /* The first call to hashfn is responsible to initialize
+ * rnd. This is best done with net_get_random_once.
+ */
u32 rnd;
int qsize;
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index ef83d9e844b5..1bdb47715def 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -37,12 +37,11 @@
#include <asm/byteorder.h>
/* This is for all connections with a full identity, no wildcards.
- * One chain is dedicated to TIME_WAIT sockets.
- * I'll experiment with dynamic table growth later.
+ * The 'e' prefix stands for Establish, but we really put all sockets
+ * but LISTEN ones.
*/
struct inet_ehash_bucket {
struct hlist_nulls_head chain;
- struct hlist_nulls_head twchain;
};
/* There are a few simple rules, which allow for local port reuse by
@@ -123,7 +122,6 @@ struct inet_hashinfo {
*
* TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
*
- * TIME_WAIT sockets use a separate chain (twchain).
*/
struct inet_ehash_bucket *ehash;
spinlock_t *ehash_locks;
@@ -218,22 +216,21 @@ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
}
}
-extern struct inet_bind_bucket *
- inet_bind_bucket_create(struct kmem_cache *cachep,
- struct net *net,
- struct inet_bind_hashbucket *head,
- const unsigned short snum);
-extern void inet_bind_bucket_destroy(struct kmem_cache *cachep,
- struct inet_bind_bucket *tb);
+struct inet_bind_bucket *
+inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
+ struct inet_bind_hashbucket *head,
+ const unsigned short snum);
+void inet_bind_bucket_destroy(struct kmem_cache *cachep,
+ struct inet_bind_bucket *tb);
-static inline int inet_bhashfn(struct net *net,
- const __u16 lport, const int bhash_size)
+static inline int inet_bhashfn(struct net *net, const __u16 lport,
+ const int bhash_size)
{
return (lport + net_hash_mix(net)) & (bhash_size - 1);
}
-extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
- const unsigned short snum);
+void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
+ const unsigned short snum);
/* These can have wildcards, don't try too hard. */
static inline int inet_lhashfn(struct net *net, const unsigned short num)
@@ -247,23 +244,22 @@ static inline int inet_sk_listen_hashfn(const struct sock *sk)
}
/* Caller must disable local BH processing. */
-extern int __inet_inherit_port(struct sock *sk, struct sock *child);
+int __inet_inherit_port(struct sock *sk, struct sock *child);
-extern void inet_put_port(struct sock *sk);
+void inet_put_port(struct sock *sk);
void inet_hashinfo_init(struct inet_hashinfo *h);
-extern int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
-extern void inet_hash(struct sock *sk);
-extern void inet_unhash(struct sock *sk);
+int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
+void inet_hash(struct sock *sk);
+void inet_unhash(struct sock *sk);
-extern struct sock *__inet_lookup_listener(struct net *net,
- struct inet_hashinfo *hashinfo,
- const __be32 saddr,
- const __be16 sport,
- const __be32 daddr,
- const unsigned short hnum,
- const int dif);
+struct sock *__inet_lookup_listener(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr,
+ const unsigned short hnum,
+ const int dif);
static inline struct sock *inet_lookup_listener(struct net *net,
struct inet_hashinfo *hashinfo,
@@ -304,30 +300,17 @@ static inline struct sock *inet_lookup_listener(struct net *net,
((__force __u64)(__be32)(__saddr)));
#endif /* __BIG_ENDIAN */
#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
- ((inet_sk(__sk)->inet_portpair == (__ports)) && \
- (inet_sk(__sk)->inet_addrpair == (__cookie)) && \
+ (((__sk)->sk_portpair == (__ports)) && \
+ ((__sk)->sk_addrpair == (__cookie)) && \
(!(__sk)->sk_bound_dev_if || \
((__sk)->sk_bound_dev_if == (__dif))) && \
net_eq(sock_net(__sk), (__net)))
-#define INET_TW_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif)\
- ((inet_twsk(__sk)->tw_portpair == (__ports)) && \
- (inet_twsk(__sk)->tw_addrpair == (__cookie)) && \
- (!(__sk)->sk_bound_dev_if || \
- ((__sk)->sk_bound_dev_if == (__dif))) && \
- net_eq(sock_net(__sk), (__net)))
#else /* 32-bit arch */
#define INET_ADDR_COOKIE(__name, __saddr, __daddr)
#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
- ((inet_sk(__sk)->inet_portpair == (__ports)) && \
- (inet_sk(__sk)->inet_daddr == (__saddr)) && \
- (inet_sk(__sk)->inet_rcv_saddr == (__daddr)) && \
- (!(__sk)->sk_bound_dev_if || \
- ((__sk)->sk_bound_dev_if == (__dif))) && \
- net_eq(sock_net(__sk), (__net)))
-#define INET_TW_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
- ((inet_twsk(__sk)->tw_portpair == (__ports)) && \
- (inet_twsk(__sk)->tw_daddr == (__saddr)) && \
- (inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \
+ (((__sk)->sk_portpair == (__ports)) && \
+ ((__sk)->sk_daddr == (__saddr)) && \
+ ((__sk)->sk_rcv_saddr == (__daddr)) && \
(!(__sk)->sk_bound_dev_if || \
((__sk)->sk_bound_dev_if == (__dif))) && \
net_eq(sock_net(__sk), (__net)))
@@ -339,10 +322,11 @@ static inline struct sock *inet_lookup_listener(struct net *net,
*
* Local BH must be disabled here.
*/
-extern struct sock * __inet_lookup_established(struct net *net,
- struct inet_hashinfo *hashinfo,
- const __be32 saddr, const __be16 sport,
- const __be32 daddr, const u16 hnum, const int dif);
+struct sock *__inet_lookup_established(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr, const u16 hnum,
+ const int dif);
static inline struct sock *
inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
@@ -399,13 +383,14 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
iph->daddr, dport, inet_iif(skb));
}
-extern int __inet_hash_connect(struct inet_timewait_death_row *death_row,
- struct sock *sk,
- u32 port_offset,
- int (*check_established)(struct inet_timewait_death_row *,
- struct sock *, __u16, struct inet_timewait_sock **),
- int (*hash)(struct sock *sk, struct inet_timewait_sock *twp));
+int __inet_hash_connect(struct inet_timewait_death_row *death_row,
+ struct sock *sk, u32 port_offset,
+ int (*check_established)(struct inet_timewait_death_row *,
+ struct sock *, __u16,
+ struct inet_timewait_sock **),
+ int (*hash)(struct sock *sk,
+ struct inet_timewait_sock *twp));
-extern int inet_hash_connect(struct inet_timewait_death_row *death_row,
- struct sock *sk);
+int inet_hash_connect(struct inet_timewait_death_row *death_row,
+ struct sock *sk);
#endif /* _INET_HASHTABLES_H */
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index b21a7f06d6a4..1833c3f389ee 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -70,13 +70,14 @@ struct ip_options_data {
struct inet_request_sock {
struct request_sock req;
-#if IS_ENABLED(CONFIG_IPV6)
- u16 inet6_rsk_offset;
-#endif
- __be16 loc_port;
- __be32 loc_addr;
- __be32 rmt_addr;
- __be16 rmt_port;
+#define ir_loc_addr req.__req_common.skc_rcv_saddr
+#define ir_rmt_addr req.__req_common.skc_daddr
+#define ir_num req.__req_common.skc_num
+#define ir_rmt_port req.__req_common.skc_dport
+#define ir_v6_rmt_addr req.__req_common.skc_v6_daddr
+#define ir_v6_loc_addr req.__req_common.skc_v6_rcv_saddr
+#define ir_iif req.__req_common.skc_bound_dev_if
+
kmemcheck_bitfield_begin(flags);
u16 snd_wscale : 4,
rcv_wscale : 4,
@@ -88,6 +89,7 @@ struct inet_request_sock {
no_srccheck: 1;
kmemcheck_bitfield_end(flags);
struct ip_options_rcu *opt;
+ struct sk_buff *pktopts;
};
static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
@@ -103,6 +105,9 @@ struct inet_cork {
int length; /* Total length of all frames */
struct dst_entry *dst;
u8 tx_flags;
+ __u8 ttl;
+ __s16 tos;
+ char priority;
};
struct inet_cork_full {
@@ -143,10 +148,8 @@ struct inet_sock {
/* Socket demultiplex comparisons on incoming packets. */
#define inet_daddr sk.__sk_common.skc_daddr
#define inet_rcv_saddr sk.__sk_common.skc_rcv_saddr
-#define inet_addrpair sk.__sk_common.skc_addrpair
#define inet_dport sk.__sk_common.skc_dport
#define inet_num sk.__sk_common.skc_num
-#define inet_portpair sk.__sk_common.skc_portpair
__be32 inet_saddr;
__s16 uc_ttl;
@@ -199,32 +202,18 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
}
#endif
-extern int inet_sk_rebuild_header(struct sock *sk);
-
-extern u32 inet_ehash_secret;
-extern u32 ipv6_hash_secret;
-extern void build_ehash_secret(void);
+int inet_sk_rebuild_header(struct sock *sk);
-static inline unsigned int inet_ehashfn(struct net *net,
- const __be32 laddr, const __u16 lport,
- const __be32 faddr, const __be16 fport)
+static inline unsigned int __inet_ehashfn(const __be32 laddr,
+ const __u16 lport,
+ const __be32 faddr,
+ const __be16 fport,
+ u32 initval)
{
return jhash_3words((__force __u32) laddr,
(__force __u32) faddr,
((__u32) lport) << 16 | (__force __u32)fport,
- inet_ehash_secret + net_hash_mix(net));
-}
-
-static inline int inet_sk_ehashfn(const struct sock *sk)
-{
- const struct inet_sock *inet = inet_sk(sk);
- const __be32 laddr = inet->inet_rcv_saddr;
- const __u16 lport = inet->inet_num;
- const __be32 faddr = inet->inet_daddr;
- const __be16 fport = inet->inet_dport;
- struct net *net = sock_net(sk);
-
- return inet_ehashfn(net, laddr, lport, faddr, fport);
+ initval);
}
static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops)
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index f908dfc06505..71c6e264e5b5 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -58,6 +58,11 @@ struct inet_hashinfo;
# define INET_TWDR_RECYCLE_TICK (12 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
#endif
+static inline u32 inet_tw_time_stamp(void)
+{
+ return jiffies;
+}
+
/* TIME_WAIT reaping mechanism. */
#define INET_TWDR_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
@@ -83,9 +88,9 @@ struct inet_timewait_death_row {
int sysctl_max_tw_buckets;
};
-extern void inet_twdr_hangman(unsigned long data);
-extern void inet_twdr_twkill_work(struct work_struct *work);
-extern void inet_twdr_twcal_tick(unsigned long data);
+void inet_twdr_hangman(unsigned long data);
+void inet_twdr_twkill_work(struct work_struct *work);
+void inet_twdr_twcal_tick(unsigned long data);
struct inet_bind_bucket;
@@ -111,11 +116,11 @@ struct inet_timewait_sock {
#define tw_prot __tw_common.skc_prot
#define tw_net __tw_common.skc_net
#define tw_daddr __tw_common.skc_daddr
+#define tw_v6_daddr __tw_common.skc_v6_daddr
#define tw_rcv_saddr __tw_common.skc_rcv_saddr
-#define tw_addrpair __tw_common.skc_addrpair
+#define tw_v6_rcv_saddr __tw_common.skc_v6_rcv_saddr
#define tw_dport __tw_common.skc_dport
#define tw_num __tw_common.skc_num
-#define tw_portpair __tw_common.skc_portpair
int tw_timeout;
volatile unsigned char tw_substate;
@@ -130,26 +135,14 @@ struct inet_timewait_sock {
tw_transparent : 1,
tw_pad : 6, /* 6 bits hole */
tw_tos : 8,
- tw_ipv6_offset : 16;
+ tw_pad2 : 16; /* 16 bits hole */
kmemcheck_bitfield_end(flags);
- unsigned long tw_ttd;
+ u32 tw_ttd;
struct inet_bind_bucket *tw_tb;
struct hlist_node tw_death_node;
};
#define tw_tclass tw_tos
-static inline void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
- struct hlist_nulls_head *list)
-{
- hlist_nulls_add_head_rcu(&tw->tw_node, list);
-}
-
-static inline void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
- struct hlist_head *list)
-{
- hlist_add_head(&tw->tw_bind_node, list);
-}
-
static inline int inet_twsk_dead_hashed(const struct inet_timewait_sock *tw)
{
return !hlist_unhashed(&tw->tw_death_node);
@@ -189,34 +182,28 @@ static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
return (struct inet_timewait_sock *)sk;
}
-static inline __be32 sk_rcv_saddr(const struct sock *sk)
-{
-/* both inet_sk() and inet_twsk() store rcv_saddr in skc_rcv_saddr */
- return sk->__sk_common.skc_rcv_saddr;
-}
-
-extern void inet_twsk_put(struct inet_timewait_sock *tw);
+void inet_twsk_free(struct inet_timewait_sock *tw);
+void inet_twsk_put(struct inet_timewait_sock *tw);
-extern int inet_twsk_unhash(struct inet_timewait_sock *tw);
+int inet_twsk_unhash(struct inet_timewait_sock *tw);
-extern int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
- struct inet_hashinfo *hashinfo);
+int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+ struct inet_hashinfo *hashinfo);
-extern struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
- const int state);
+struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
+ const int state);
-extern void __inet_twsk_hashdance(struct inet_timewait_sock *tw,
- struct sock *sk,
- struct inet_hashinfo *hashinfo);
+void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+ struct inet_hashinfo *hashinfo);
-extern void inet_twsk_schedule(struct inet_timewait_sock *tw,
- struct inet_timewait_death_row *twdr,
- const int timeo, const int timewait_len);
-extern void inet_twsk_deschedule(struct inet_timewait_sock *tw,
- struct inet_timewait_death_row *twdr);
+void inet_twsk_schedule(struct inet_timewait_sock *tw,
+ struct inet_timewait_death_row *twdr,
+ const int timeo, const int timewait_len);
+void inet_twsk_deschedule(struct inet_timewait_sock *tw,
+ struct inet_timewait_death_row *twdr);
-extern void inet_twsk_purge(struct inet_hashinfo *hashinfo,
- struct inet_timewait_death_row *twdr, int family);
+void inet_twsk_purge(struct inet_hashinfo *hashinfo,
+ struct inet_timewait_death_row *twdr, int family);
static inline
struct net *twsk_net(const struct inet_timewait_sock *twsk)
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 53f464d7cddc..f4e127af4e17 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -120,9 +120,9 @@ static inline void inetpeer_transfer_peer(unsigned long *to, unsigned long *from
}
}
-extern void inet_peer_base_init(struct inet_peer_base *);
+void inet_peer_base_init(struct inet_peer_base *);
-void inet_initpeers(void) __init;
+void inet_initpeers(void) __init;
#define INETPEER_METRICS_NEW (~(u32) 0)
@@ -159,11 +159,11 @@ static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
}
/* can be called from BH context or outside */
-extern void inet_putpeer(struct inet_peer *p);
-extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
+void inet_putpeer(struct inet_peer *p);
+bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
-extern void inetpeer_invalidate_tree(struct inet_peer_base *);
-extern void inetpeer_invalidate_family(int family);
+void inetpeer_invalidate_tree(struct inet_peer_base *);
+void inetpeer_invalidate_family(int family);
/*
* temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
diff --git a/include/net/ip.h b/include/net/ip.h
index 5e5268807a1c..217bc5bfc6c6 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -28,6 +28,7 @@
#include <linux/skbuff.h>
#include <net/inet_sock.h>
+#include <net/route.h>
#include <net/snmp.h>
#include <net/flow.h>
@@ -56,6 +57,9 @@ struct ipcm_cookie {
int oif;
struct ip_options_rcu *opt;
__u8 tx_flags;
+ __u8 ttl;
+ __s16 tos;
+ char priority;
};
#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
@@ -86,64 +90,71 @@ struct packet_type;
struct rtable;
struct sockaddr;
-extern int igmp_mc_proc_init(void);
+int igmp_mc_proc_init(void);
/*
* Functions provided by ip.c
*/
-extern int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
- __be32 saddr, __be32 daddr,
- struct ip_options_rcu *opt);
-extern int ip_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev);
-extern int ip_local_deliver(struct sk_buff *skb);
-extern int ip_mr_input(struct sk_buff *skb);
-extern int ip_output(struct sk_buff *skb);
-extern int ip_mc_output(struct sk_buff *skb);
-extern int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
-extern int ip_do_nat(struct sk_buff *skb);
-extern void ip_send_check(struct iphdr *ip);
-extern int __ip_local_out(struct sk_buff *skb);
-extern int ip_local_out(struct sk_buff *skb);
-extern int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl);
-extern void ip_init(void);
-extern int ip_append_data(struct sock *sk, struct flowi4 *fl4,
- int getfrag(void *from, char *to, int offset, int len,
- int odd, struct sk_buff *skb),
- void *from, int len, int protolen,
- struct ipcm_cookie *ipc,
- struct rtable **rt,
- unsigned int flags);
-extern int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb);
-extern ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
- int offset, size_t size, int flags);
-extern struct sk_buff *__ip_make_skb(struct sock *sk,
- struct flowi4 *fl4,
- struct sk_buff_head *queue,
- struct inet_cork *cork);
-extern int ip_send_skb(struct net *net, struct sk_buff *skb);
-extern int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
-extern void ip_flush_pending_frames(struct sock *sk);
-extern struct sk_buff *ip_make_skb(struct sock *sk,
- struct flowi4 *fl4,
- int getfrag(void *from, char *to, int offset, int len,
- int odd, struct sk_buff *skb),
- void *from, int length, int transhdrlen,
- struct ipcm_cookie *ipc,
- struct rtable **rtp,
- unsigned int flags);
+int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
+ __be32 saddr, __be32 daddr,
+ struct ip_options_rcu *opt);
+int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
+ struct net_device *orig_dev);
+int ip_local_deliver(struct sk_buff *skb);
+int ip_mr_input(struct sk_buff *skb);
+int ip_output(struct sk_buff *skb);
+int ip_mc_output(struct sk_buff *skb);
+int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
+int ip_do_nat(struct sk_buff *skb);
+void ip_send_check(struct iphdr *ip);
+int __ip_local_out(struct sk_buff *skb);
+int ip_local_out(struct sk_buff *skb);
+int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl);
+void ip_init(void);
+int ip_append_data(struct sock *sk, struct flowi4 *fl4,
+ int getfrag(void *from, char *to, int offset, int len,
+ int odd, struct sk_buff *skb),
+ void *from, int len, int protolen,
+ struct ipcm_cookie *ipc,
+ struct rtable **rt,
+ unsigned int flags);
+int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
+ struct sk_buff *skb);
+ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
+ int offset, size_t size, int flags);
+struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
+ struct sk_buff_head *queue,
+ struct inet_cork *cork);
+int ip_send_skb(struct net *net, struct sk_buff *skb);
+int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
+void ip_flush_pending_frames(struct sock *sk);
+struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
+ int getfrag(void *from, char *to, int offset,
+ int len, int odd, struct sk_buff *skb),
+ void *from, int length, int transhdrlen,
+ struct ipcm_cookie *ipc, struct rtable **rtp,
+ unsigned int flags);
static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
{
return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
}
+static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
+{
+ return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
+}
+
+static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
+{
+ return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
+}
+
/* datagram.c */
-extern int ip4_datagram_connect(struct sock *sk,
- struct sockaddr *uaddr, int addr_len);
+int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
-extern void ip4_datagram_release_cb(struct sock *sk);
+void ip4_datagram_release_cb(struct sock *sk);
struct ip_reply_arg {
struct kvec iov[1];
@@ -184,16 +195,16 @@ extern struct ipv4_config ipv4_config;
#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
-extern unsigned long snmp_fold_field(void __percpu *mib[], int offt);
+unsigned long snmp_fold_field(void __percpu *mib[], int offt);
#if BITS_PER_LONG==32
-extern u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off);
+u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off);
#else
static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_off)
{
return snmp_fold_field(mib, offt);
}
#endif
-extern int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);
+int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);
static inline void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
{
@@ -206,11 +217,7 @@ static inline void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
}
}
-extern struct local_ports {
- seqlock_t lock;
- int range[2];
-} sysctl_local_ports;
-extern void inet_get_local_port_range(int *low, int *high);
+void inet_get_local_port_range(struct net *net, int *low, int *high);
extern unsigned long *sysctl_local_reserved_ports;
static inline int inet_is_reserved_local_port(int port)
@@ -231,9 +238,9 @@ extern int sysctl_ip_early_demux;
/* From ip_output.c */
extern int sysctl_ip_dynaddr;
-extern void ipfrag_init(void);
+void ipfrag_init(void);
-extern void ip_static_sysctl_init(void);
+void ip_static_sysctl_init(void);
static inline bool ip_is_fragment(const struct iphdr *iph)
{
@@ -262,7 +269,7 @@ int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
!(dst_metric_locked(dst, RTAX_MTU)));
}
-extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
+void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk)
{
@@ -367,7 +374,7 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
struct ipv6_pinfo *np = inet6_sk(sk);
memset(&np->saddr, 0, sizeof(np->saddr));
- memset(&np->rcv_saddr, 0, sizeof(np->rcv_saddr));
+ memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
}
#endif
}
@@ -390,7 +397,7 @@ static inline int sk_mc_loop(struct sock *sk)
return 1;
}
-extern bool ip_call_ra_chain(struct sk_buff *skb);
+bool ip_call_ra_chain(struct sk_buff *skb);
/*
* Functions provided by ip_fragment.c
@@ -428,50 +435,52 @@ int ip_frag_nqueues(struct net *net);
* Functions provided by ip_forward.c
*/
-extern int ip_forward(struct sk_buff *skb);
+int ip_forward(struct sk_buff *skb);
/*
* Functions provided by ip_options.c
*/
-extern void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
- __be32 daddr, struct rtable *rt, int is_frag);
-extern int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb);
-extern void ip_options_fragment(struct sk_buff *skb);
-extern int ip_options_compile(struct net *net,
- struct ip_options *opt, struct sk_buff *skb);
-extern int ip_options_get(struct net *net, struct ip_options_rcu **optp,
- unsigned char *data, int optlen);
-extern int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
- unsigned char __user *data, int optlen);
-extern void ip_options_undo(struct ip_options * opt);
-extern void ip_forward_options(struct sk_buff *skb);
-extern int ip_options_rcv_srr(struct sk_buff *skb);
+void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
+ __be32 daddr, struct rtable *rt, int is_frag);
+int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb);
+void ip_options_fragment(struct sk_buff *skb);
+int ip_options_compile(struct net *net, struct ip_options *opt,
+ struct sk_buff *skb);
+int ip_options_get(struct net *net, struct ip_options_rcu **optp,
+ unsigned char *data, int optlen);
+int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
+ unsigned char __user *data, int optlen);
+void ip_options_undo(struct ip_options *opt);
+void ip_forward_options(struct sk_buff *skb);
+int ip_options_rcv_srr(struct sk_buff *skb);
/*
* Functions provided by ip_sockglue.c
*/
-extern void ipv4_pktinfo_prepare(struct sk_buff *skb);
-extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
-extern int ip_cmsg_send(struct net *net,
- struct msghdr *msg, struct ipcm_cookie *ipc);
-extern int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen);
-extern int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen);
-extern int compat_ip_setsockopt(struct sock *sk, int level,
- int optname, char __user *optval, unsigned int optlen);
-extern int compat_ip_getsockopt(struct sock *sk, int level,
- int optname, char __user *optval, int __user *optlen);
-extern int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *));
-
-extern int ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
-extern void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
- __be16 port, u32 info, u8 *payload);
-extern void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
- u32 info);
+void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
+void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
+int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc);
+int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
+ unsigned int optlen);
+int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
+ int __user *optlen);
+int compat_ip_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen);
+int compat_ip_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
+int ip_ra_control(struct sock *sk, unsigned char on,
+ void (*destructor)(struct sock *));
+
+int ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
+void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
+ u32 info, u8 *payload);
+void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
+ u32 info);
#ifdef CONFIG_PROC_FS
-extern int ip_misc_proc_init(void);
+int ip_misc_proc_init(void);
#endif
#endif /* _IP_H */
diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h
index 7686e3f5033d..9e3c540c1b11 100644
--- a/include/net/ip6_checksum.h
+++ b/include/net/ip6_checksum.h
@@ -66,12 +66,14 @@ static inline void __tcp_v6_send_check(struct sk_buff *skb,
}
}
+#if IS_ENABLED(CONFIG_IPV6)
static inline void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
{
struct ipv6_pinfo *np = inet6_sk(sk);
- __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
+ __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
}
+#endif
int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto);
#endif
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 48ec25a7fcb6..2182525e4d74 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -165,6 +165,7 @@ static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
static inline void rt6_clean_expires(struct rt6_info *rt)
{
rt->rt6i_flags &= ~RTF_EXPIRES;
+ rt->dst.expires = 0;
}
static inline void rt6_set_expires(struct rt6_info *rt, unsigned long expires)
@@ -267,48 +268,39 @@ typedef struct rt6_info *(*pol_lookup_t)(struct net *,
* exported functions
*/
-extern struct fib6_table *fib6_get_table(struct net *net, u32 id);
-extern struct fib6_table *fib6_new_table(struct net *net, u32 id);
-extern struct dst_entry *fib6_rule_lookup(struct net *net,
- struct flowi6 *fl6, int flags,
- pol_lookup_t lookup);
+struct fib6_table *fib6_get_table(struct net *net, u32 id);
+struct fib6_table *fib6_new_table(struct net *net, u32 id);
+struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
+ int flags, pol_lookup_t lookup);
-extern struct fib6_node *fib6_lookup(struct fib6_node *root,
- const struct in6_addr *daddr,
- const struct in6_addr *saddr);
+struct fib6_node *fib6_lookup(struct fib6_node *root,
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr);
-struct fib6_node *fib6_locate(struct fib6_node *root,
- const struct in6_addr *daddr, int dst_len,
- const struct in6_addr *saddr, int src_len);
+struct fib6_node *fib6_locate(struct fib6_node *root,
+ const struct in6_addr *daddr, int dst_len,
+ const struct in6_addr *saddr, int src_len);
-extern void fib6_clean_all_ro(struct net *net,
- int (*func)(struct rt6_info *, void *arg),
- int prune, void *arg);
+void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
+ int prune, void *arg);
-extern void fib6_clean_all(struct net *net,
- int (*func)(struct rt6_info *, void *arg),
- int prune, void *arg);
+int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info);
-extern int fib6_add(struct fib6_node *root,
- struct rt6_info *rt,
- struct nl_info *info);
+int fib6_del(struct rt6_info *rt, struct nl_info *info);
-extern int fib6_del(struct rt6_info *rt,
- struct nl_info *info);
+void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info);
-extern void inet6_rt_notify(int event, struct rt6_info *rt,
- struct nl_info *info);
+void fib6_run_gc(unsigned long expires, struct net *net, bool force);
-extern void fib6_run_gc(unsigned long expires,
- struct net *net, bool force);
+void fib6_gc_cleanup(void);
-extern void fib6_gc_cleanup(void);
+int fib6_init(void);
-extern int fib6_init(void);
+int ipv6_route_open(struct inode *inode, struct file *file);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
-extern int fib6_rules_init(void);
-extern void fib6_rules_cleanup(void);
+int fib6_rules_init(void);
+void fib6_rules_cleanup(void);
#else
static inline int fib6_rules_init(void)
{
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index f525e7038cca..733747ce163c 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -51,7 +51,7 @@ static inline unsigned int rt6_flags2srcprefs(int flags)
return (flags >> 3) & 7;
}
-extern void rt6_bind_peer(struct rt6_info *rt, int create);
+void rt6_bind_peer(struct rt6_info *rt, int create);
static inline struct inet_peer *__rt6_get_peer(struct rt6_info *rt, int create)
{
@@ -72,70 +72,58 @@ static inline struct inet_peer *rt6_get_peer_create(struct rt6_info *rt)
return __rt6_get_peer(rt, 1);
}
-extern void ip6_route_input(struct sk_buff *skb);
+void ip6_route_input(struct sk_buff *skb);
-extern struct dst_entry * ip6_route_output(struct net *net,
- const struct sock *sk,
- struct flowi6 *fl6);
-extern struct dst_entry * ip6_route_lookup(struct net *net,
- struct flowi6 *fl6, int flags);
+struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
+ struct flowi6 *fl6);
+struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
+ int flags);
-extern int ip6_route_init(void);
-extern void ip6_route_cleanup(void);
+int ip6_route_init(void);
+void ip6_route_cleanup(void);
-extern int ipv6_route_ioctl(struct net *net,
- unsigned int cmd,
- void __user *arg);
+int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg);
-extern int ip6_route_add(struct fib6_config *cfg);
-extern int ip6_ins_rt(struct rt6_info *);
-extern int ip6_del_rt(struct rt6_info *);
+int ip6_route_add(struct fib6_config *cfg);
+int ip6_ins_rt(struct rt6_info *);
+int ip6_del_rt(struct rt6_info *);
-extern int ip6_route_get_saddr(struct net *net,
- struct rt6_info *rt,
- const struct in6_addr *daddr,
- unsigned int prefs,
- struct in6_addr *saddr);
+int ip6_route_get_saddr(struct net *net, struct rt6_info *rt,
+ const struct in6_addr *daddr, unsigned int prefs,
+ struct in6_addr *saddr);
-extern struct rt6_info *rt6_lookup(struct net *net,
- const struct in6_addr *daddr,
- const struct in6_addr *saddr,
- int oif, int flags);
+struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
+ const struct in6_addr *saddr, int oif, int flags);
-extern struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
- struct flowi6 *fl6);
-extern int icmp6_dst_gc(void);
+struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct flowi6 *fl6);
+int icmp6_dst_gc(void);
-extern void fib6_force_start_gc(struct net *net);
+void fib6_force_start_gc(struct net *net);
-extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
- const struct in6_addr *addr,
- bool anycast);
+struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
+ const struct in6_addr *addr, bool anycast);
/*
* support functions for ND
*
*/
-extern struct rt6_info * rt6_get_dflt_router(const struct in6_addr *addr,
- struct net_device *dev);
-extern struct rt6_info * rt6_add_dflt_router(const struct in6_addr *gwaddr,
- struct net_device *dev,
- unsigned int pref);
-
-extern void rt6_purge_dflt_routers(struct net *net);
-
-extern int rt6_route_rcv(struct net_device *dev,
- u8 *opt, int len,
- const struct in6_addr *gwaddr);
-
-extern void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
- int oif, u32 mark);
-extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk,
- __be32 mtu);
-extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
-extern void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
- u32 mark);
-extern void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
+struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr,
+ struct net_device *dev);
+struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
+ struct net_device *dev, unsigned int pref);
+
+void rt6_purge_dflt_routers(struct net *net);
+
+int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
+ const struct in6_addr *gwaddr);
+
+void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif,
+ u32 mark);
+void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu);
+void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
+void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
+ u32 mark);
+void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
struct netlink_callback;
@@ -145,10 +133,10 @@ struct rt6_rtnl_dump_arg {
struct net *net;
};
-extern int rt6_dump_route(struct rt6_info *rt, void *p_arg);
-extern void rt6_ifdown(struct net *net, struct net_device *dev);
-extern void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
-extern void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
+int rt6_dump_route(struct rt6_info *rt, void *p_arg);
+void rt6_ifdown(struct net *net, struct net_device *dev);
+void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
+void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
/*
@@ -194,11 +182,9 @@ static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
}
-static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, struct in6_addr *dest)
+static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt)
{
- if (rt->rt6i_flags & RTF_GATEWAY)
- return &rt->rt6i_gateway;
- return dest;
+ return &rt->rt6i_gateway;
}
#endif
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index cbf2be37c91a..9922093f575e 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -165,7 +165,7 @@ struct fib_result_nl {
#define FIB_TABLE_HASHSZ 2
#endif
-extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
+__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
#define FIB_RES_SADDR(net, res) \
((FIB_RES_NH(res).nh_saddr_genid == \
@@ -187,14 +187,14 @@ struct fib_table {
unsigned long tb_data[0];
};
-extern int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
- struct fib_result *res, int fib_flags);
-extern int fib_table_insert(struct fib_table *, struct fib_config *);
-extern int fib_table_delete(struct fib_table *, struct fib_config *);
-extern int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
- struct netlink_callback *cb);
-extern int fib_table_flush(struct fib_table *table);
-extern void fib_free_table(struct fib_table *tb);
+int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
+ struct fib_result *res, int fib_flags);
+int fib_table_insert(struct fib_table *, struct fib_config *);
+int fib_table_delete(struct fib_table *, struct fib_config *);
+int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
+ struct netlink_callback *cb);
+int fib_table_flush(struct fib_table *table);
+void fib_free_table(struct fib_table *tb);
@@ -234,14 +234,13 @@ static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
}
#else /* CONFIG_IP_MULTIPLE_TABLES */
-extern int __net_init fib4_rules_init(struct net *net);
-extern void __net_exit fib4_rules_exit(struct net *net);
+int __net_init fib4_rules_init(struct net *net);
+void __net_exit fib4_rules_exit(struct net *net);
-extern struct fib_table *fib_new_table(struct net *net, u32 id);
-extern struct fib_table *fib_get_table(struct net *net, u32 id);
+struct fib_table *fib_new_table(struct net *net, u32 id);
+struct fib_table *fib_get_table(struct net *net, u32 id);
-extern int __fib_lookup(struct net *net, struct flowi4 *flp,
- struct fib_result *res);
+int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res);
static inline int fib_lookup(struct net *net, struct flowi4 *flp,
struct fib_result *res)
@@ -269,12 +268,12 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp,
/* Exported by fib_frontend.c */
extern const struct nla_policy rtm_ipv4_policy[];
-extern void ip_fib_init(void);
-extern __be32 fib_compute_spec_dst(struct sk_buff *skb);
-extern int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
- u8 tos, int oif, struct net_device *dev,
- struct in_device *idev, u32 *itag);
-extern void fib_select_default(struct fib_result *res);
+void ip_fib_init(void);
+__be32 fib_compute_spec_dst(struct sk_buff *skb);
+int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
+ u8 tos, int oif, struct net_device *dev,
+ struct in_device *idev, u32 *itag);
+void fib_select_default(struct fib_result *res);
#ifdef CONFIG_IP_ROUTE_CLASSID
static inline int fib_num_tclassid_users(struct net *net)
{
@@ -288,15 +287,15 @@ static inline int fib_num_tclassid_users(struct net *net)
#endif
/* Exported by fib_semantics.c */
-extern int ip_fib_check_default(__be32 gw, struct net_device *dev);
-extern int fib_sync_down_dev(struct net_device *dev, int force);
-extern int fib_sync_down_addr(struct net *net, __be32 local);
-extern int fib_sync_up(struct net_device *dev);
-extern void fib_select_multipath(struct fib_result *res);
+int ip_fib_check_default(__be32 gw, struct net_device *dev);
+int fib_sync_down_dev(struct net_device *dev, int force);
+int fib_sync_down_addr(struct net *net, __be32 local);
+int fib_sync_up(struct net_device *dev);
+void fib_select_multipath(struct fib_result *res);
/* Exported by fib_trie.c */
-extern void fib_trie_init(void);
-extern struct fib_table *fib_trie_table(u32 id);
+void fib_trie_init(void);
+struct fib_table *fib_trie_table(u32 id);
static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
{
@@ -314,7 +313,7 @@ static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
#endif
}
-extern void free_fib_info(struct fib_info *fi);
+void free_fib_info(struct fib_info *fi);
static inline void fib_info_put(struct fib_info *fi)
{
@@ -323,8 +322,8 @@ static inline void fib_info_put(struct fib_info *fi)
}
#ifdef CONFIG_PROC_FS
-extern int __net_init fib_proc_init(struct net *net);
-extern void __net_exit fib_proc_exit(struct net *net);
+int __net_init fib_proc_init(struct net *net);
+void __net_exit fib_proc_exit(struct net *net);
#else
static inline int fib_proc_init(struct net *net)
{
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index a0a4a100f5c9..732f8c6ae975 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -150,6 +150,9 @@ int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 proto,
__u8 tos, __u8 ttl, __be16 df, bool xnet);
+struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
+ int gso_type_mask);
+
static inline void iptunnel_xmit_stats(int err,
struct net_device_stats *err_stats,
struct pcpu_tstats __percpu *stats)
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 9c4d37ec45a1..1c2e1b9f6b86 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -236,7 +236,7 @@ static inline int ip_vs_addr_equal(int af, const union nf_inet_addr *a,
#ifdef CONFIG_IP_VS_DEBUG
#include <linux/net.h>
-extern int ip_vs_get_debug_level(void);
+int ip_vs_get_debug_level(void);
static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len,
const union nf_inet_addr *addr,
@@ -532,9 +532,9 @@ struct ip_vs_proto_data {
struct tcp_states_t *tcp_state_table;
};
-extern struct ip_vs_protocol *ip_vs_proto_get(unsigned short proto);
-extern struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net,
- unsigned short proto);
+struct ip_vs_protocol *ip_vs_proto_get(unsigned short proto);
+struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net,
+ unsigned short proto);
struct ip_vs_conn_param {
struct net *net;
@@ -1173,8 +1173,8 @@ static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
* IPVS core functions
* (from ip_vs_core.c)
*/
-extern const char *ip_vs_proto_name(unsigned int proto);
-extern void ip_vs_init_hash_table(struct list_head *table, int rows);
+const char *ip_vs_proto_name(unsigned int proto);
+void ip_vs_init_hash_table(struct list_head *table, int rows);
#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t)))
#define IP_VS_APP_TYPE_FTP 1
@@ -1237,22 +1237,22 @@ static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
smp_mb__before_atomic_dec();
atomic_dec(&cp->refcnt);
}
-extern void ip_vs_conn_put(struct ip_vs_conn *cp);
-extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
+void ip_vs_conn_put(struct ip_vs_conn *cp);
+void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p,
const union nf_inet_addr *daddr,
__be16 dport, unsigned int flags,
struct ip_vs_dest *dest, __u32 fwmark);
-extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
+void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
-extern const char * ip_vs_state_name(__u16 proto, int state);
+const char *ip_vs_state_name(__u16 proto, int state);
-extern void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp);
-extern int ip_vs_check_template(struct ip_vs_conn *ct);
-extern void ip_vs_random_dropentry(struct net *net);
-extern int ip_vs_conn_init(void);
-extern void ip_vs_conn_cleanup(void);
+void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp);
+int ip_vs_check_template(struct ip_vs_conn *ct);
+void ip_vs_random_dropentry(struct net *net);
+int ip_vs_conn_init(void);
+void ip_vs_conn_cleanup(void);
static inline void ip_vs_control_del(struct ip_vs_conn *cp)
{
@@ -1317,37 +1317,36 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
/*
* IPVS netns init & cleanup functions
*/
-extern int ip_vs_estimator_net_init(struct net *net);
-extern int ip_vs_control_net_init(struct net *net);
-extern int ip_vs_protocol_net_init(struct net *net);
-extern int ip_vs_app_net_init(struct net *net);
-extern int ip_vs_conn_net_init(struct net *net);
-extern int ip_vs_sync_net_init(struct net *net);
-extern void ip_vs_conn_net_cleanup(struct net *net);
-extern void ip_vs_app_net_cleanup(struct net *net);
-extern void ip_vs_protocol_net_cleanup(struct net *net);
-extern void ip_vs_control_net_cleanup(struct net *net);
-extern void ip_vs_estimator_net_cleanup(struct net *net);
-extern void ip_vs_sync_net_cleanup(struct net *net);
-extern void ip_vs_service_net_cleanup(struct net *net);
+int ip_vs_estimator_net_init(struct net *net);
+int ip_vs_control_net_init(struct net *net);
+int ip_vs_protocol_net_init(struct net *net);
+int ip_vs_app_net_init(struct net *net);
+int ip_vs_conn_net_init(struct net *net);
+int ip_vs_sync_net_init(struct net *net);
+void ip_vs_conn_net_cleanup(struct net *net);
+void ip_vs_app_net_cleanup(struct net *net);
+void ip_vs_protocol_net_cleanup(struct net *net);
+void ip_vs_control_net_cleanup(struct net *net);
+void ip_vs_estimator_net_cleanup(struct net *net);
+void ip_vs_sync_net_cleanup(struct net *net);
+void ip_vs_service_net_cleanup(struct net *net);
/*
* IPVS application functions
* (from ip_vs_app.c)
*/
#define IP_VS_APP_MAX_PORTS 8
-extern struct ip_vs_app *register_ip_vs_app(struct net *net,
- struct ip_vs_app *app);
-extern void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
-extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern void ip_vs_unbind_app(struct ip_vs_conn *cp);
-extern int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app,
- __u16 proto, __u16 port);
-extern int ip_vs_app_inc_get(struct ip_vs_app *inc);
-extern void ip_vs_app_inc_put(struct ip_vs_app *inc);
-
-extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb);
-extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb);
+struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app);
+void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
+int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
+void ip_vs_unbind_app(struct ip_vs_conn *cp);
+int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
+ __u16 port);
+int ip_vs_app_inc_get(struct ip_vs_app *inc);
+void ip_vs_app_inc_put(struct ip_vs_app *inc);
+
+int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb);
+int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb);
int register_ip_vs_pe(struct ip_vs_pe *pe);
int unregister_ip_vs_pe(struct ip_vs_pe *pe);
@@ -1368,17 +1367,15 @@ struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
/*
* IPVS protocol functions (from ip_vs_proto.c)
*/
-extern int ip_vs_protocol_init(void);
-extern void ip_vs_protocol_cleanup(void);
-extern void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
-extern int *ip_vs_create_timeout_table(int *table, int size);
-extern int
-ip_vs_set_state_timeout(int *table, int num, const char *const *names,
- const char *name, int to);
-extern void
-ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
- const struct sk_buff *skb,
- int offset, const char *msg);
+int ip_vs_protocol_init(void);
+void ip_vs_protocol_cleanup(void);
+void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
+int *ip_vs_create_timeout_table(int *table, int size);
+int ip_vs_set_state_timeout(int *table, int num, const char *const *names,
+ const char *name, int to);
+void ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
+ const struct sk_buff *skb, int offset,
+ const char *msg);
extern struct ip_vs_protocol ip_vs_protocol_tcp;
extern struct ip_vs_protocol ip_vs_protocol_udp;
@@ -1391,22 +1388,22 @@ extern struct ip_vs_protocol ip_vs_protocol_sctp;
* Registering/unregistering scheduler functions
* (from ip_vs_sched.c)
*/
-extern int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
-extern int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
-extern int ip_vs_bind_scheduler(struct ip_vs_service *svc,
- struct ip_vs_scheduler *scheduler);
-extern void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
- struct ip_vs_scheduler *sched);
-extern struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name);
-extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
-extern struct ip_vs_conn *
+int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
+int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
+int ip_vs_bind_scheduler(struct ip_vs_service *svc,
+ struct ip_vs_scheduler *scheduler);
+void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
+ struct ip_vs_scheduler *sched);
+struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name);
+void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
+struct ip_vs_conn *
ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
struct ip_vs_proto_data *pd, int *ignored,
struct ip_vs_iphdr *iph);
-extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
- struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph);
+int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
+ struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph);
-extern void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
+void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
/*
@@ -1415,25 +1412,24 @@ extern void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
extern struct ip_vs_stats ip_vs_stats;
extern int sysctl_ip_vs_sync_ver;
-extern struct ip_vs_service *
+struct ip_vs_service *
ip_vs_service_find(struct net *net, int af, __u32 fwmark, __u16 protocol,
const union nf_inet_addr *vaddr, __be16 vport);
-extern bool
-ip_vs_has_real_service(struct net *net, int af, __u16 protocol,
- const union nf_inet_addr *daddr, __be16 dport);
-
-extern int ip_vs_use_count_inc(void);
-extern void ip_vs_use_count_dec(void);
-extern int ip_vs_register_nl_ioctl(void);
-extern void ip_vs_unregister_nl_ioctl(void);
-extern int ip_vs_control_init(void);
-extern void ip_vs_control_cleanup(void);
-extern struct ip_vs_dest *
+bool ip_vs_has_real_service(struct net *net, int af, __u16 protocol,
+ const union nf_inet_addr *daddr, __be16 dport);
+
+int ip_vs_use_count_inc(void);
+void ip_vs_use_count_dec(void);
+int ip_vs_register_nl_ioctl(void);
+void ip_vs_unregister_nl_ioctl(void);
+int ip_vs_control_init(void);
+void ip_vs_control_cleanup(void);
+struct ip_vs_dest *
ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
__be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
__u16 protocol, __u32 fwmark, __u32 flags);
-extern void ip_vs_try_bind_dest(struct ip_vs_conn *cp);
+void ip_vs_try_bind_dest(struct ip_vs_conn *cp);
static inline void ip_vs_dest_hold(struct ip_vs_dest *dest)
{
@@ -1450,56 +1446,49 @@ static inline void ip_vs_dest_put(struct ip_vs_dest *dest)
* IPVS sync daemon data and function prototypes
* (from ip_vs_sync.c)
*/
-extern int start_sync_thread(struct net *net, int state, char *mcast_ifn,
- __u8 syncid);
-extern int stop_sync_thread(struct net *net, int state);
-extern void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
-
+int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid);
+int stop_sync_thread(struct net *net, int state);
+void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
/*
* IPVS rate estimator prototypes (from ip_vs_est.c)
*/
-extern void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
-extern void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
-extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
-extern void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
- struct ip_vs_stats *stats);
+void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
+void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
+void ip_vs_zero_estimator(struct ip_vs_stats *stats);
+void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
+ struct ip_vs_stats *stats);
/*
* Various IPVS packet transmitters (from ip_vs_xmit.c)
*/
-extern int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
- struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
-extern int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
- struct ip_vs_protocol *pp,
- struct ip_vs_iphdr *iph);
-extern int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
- struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
-extern int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
- struct ip_vs_protocol *pp,
- struct ip_vs_iphdr *iph);
-extern int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
- struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
-extern int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
- struct ip_vs_protocol *pp, int offset,
- unsigned int hooknum, struct ip_vs_iphdr *iph);
-extern void ip_vs_dest_dst_rcu_free(struct rcu_head *head);
+int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, int offset,
+ unsigned int hooknum, struct ip_vs_iphdr *iph);
+void ip_vs_dest_dst_rcu_free(struct rcu_head *head);
#ifdef CONFIG_IP_VS_IPV6
-extern int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
- struct ip_vs_protocol *pp,
- struct ip_vs_iphdr *iph);
-extern int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
- struct ip_vs_protocol *pp,
- struct ip_vs_iphdr *iph);
-extern int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
- struct ip_vs_protocol *pp,
- struct ip_vs_iphdr *iph);
-extern int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
- struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
-extern int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
- struct ip_vs_protocol *pp, int offset,
- unsigned int hooknum, struct ip_vs_iphdr *iph);
+int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, int offset,
+ unsigned int hooknum, struct ip_vs_iphdr *iph);
#endif
#ifdef CONFIG_SYSCTL
@@ -1548,15 +1537,15 @@ static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp)
return fwd;
}
-extern void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
- struct ip_vs_conn *cp, int dir);
+void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ struct ip_vs_conn *cp, int dir);
#ifdef CONFIG_IP_VS_IPV6
-extern void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
- struct ip_vs_conn *cp, int dir);
+void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ struct ip_vs_conn *cp, int dir);
#endif
-extern __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset);
+__sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset);
static inline __wsum ip_vs_check_diff4(__be32 old, __be32 new, __wsum oldsum)
{
@@ -1615,13 +1604,13 @@ static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
#endif
}
-extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
- int outin);
-extern int ip_vs_confirm_conntrack(struct sk_buff *skb);
-extern void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct,
- struct ip_vs_conn *cp, u_int8_t proto,
- const __be16 port, int from_rs);
-extern void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp);
+void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
+ int outin);
+int ip_vs_confirm_conntrack(struct sk_buff *skb);
+void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct,
+ struct ip_vs_conn *cp, u_int8_t proto,
+ const __be16 port, int from_rs);
+void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp);
#else
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index bbf1c8fb8511..dd96638ab8ff 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -244,14 +244,14 @@ struct ipv6_fl_socklist {
struct rcu_head rcu;
};
-extern struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
-extern struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
- struct ip6_flowlabel * fl,
- struct ipv6_txoptions * fopt);
-extern void fl6_free_socklist(struct sock *sk);
-extern int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen);
-extern int ip6_flowlabel_init(void);
-extern void ip6_flowlabel_cleanup(void);
+struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
+struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
+ struct ip6_flowlabel *fl,
+ struct ipv6_txoptions *fopt);
+void fl6_free_socklist(struct sock *sk);
+int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen);
+int ip6_flowlabel_init(void);
+void ip6_flowlabel_cleanup(void);
static inline void fl6_sock_release(struct ip6_flowlabel *fl)
{
@@ -259,7 +259,7 @@ static inline void fl6_sock_release(struct ip6_flowlabel *fl)
atomic_dec(&fl->users);
}
-extern void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
+void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
struct icmp6hdr *thdr, int len);
@@ -267,19 +267,21 @@ int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
struct sock *sk, struct flowi6 *fl6);
-extern int ip6_ra_control(struct sock *sk, int sel);
+int ip6_ra_control(struct sock *sk, int sel);
-extern int ipv6_parse_hopopts(struct sk_buff *skb);
+int ipv6_parse_hopopts(struct sk_buff *skb);
-extern struct ipv6_txoptions * ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt);
-extern struct ipv6_txoptions * ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
- int newtype,
- struct ipv6_opt_hdr __user *newopt,
- int newoptlen);
+struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
+ struct ipv6_txoptions *opt);
+struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
+ struct ipv6_txoptions *opt,
+ int newtype,
+ struct ipv6_opt_hdr __user *newopt,
+ int newoptlen);
struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
struct ipv6_txoptions *opt);
-extern bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb);
+bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb);
static inline bool ipv6_accept_ra(struct inet6_dev *idev)
{
@@ -306,7 +308,7 @@ static inline int ip6_frag_mem(struct net *net)
#define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */
#define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */
-extern int __ipv6_addr_type(const struct in6_addr *addr);
+int __ipv6_addr_type(const struct in6_addr *addr);
static inline int ipv6_addr_type(const struct in6_addr *addr)
{
return __ipv6_addr_type(addr) & 0xffff;
@@ -537,14 +539,14 @@ static inline u32 ipv6_addr_hash(const struct in6_addr *a)
}
/* more secured version of ipv6_addr_hash() */
-static inline u32 ipv6_addr_jhash(const struct in6_addr *a)
+static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
{
u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
return jhash_3words(v,
(__force u32)a->s6_addr32[2],
(__force u32)a->s6_addr32[3],
- ipv6_hash_secret);
+ initval);
}
static inline bool ipv6_addr_loopback(const struct in6_addr *a)
@@ -656,9 +658,9 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
}
-extern void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
+void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
-extern int ip6_dst_hoplimit(struct dst_entry *dst);
+int ip6_dst_hoplimit(struct dst_entry *dst);
/*
* Header manipulation
@@ -682,83 +684,65 @@ static inline __be32 ip6_flowinfo(const struct ipv6hdr *hdr)
* rcv function (called from netdevice level)
*/
-extern int ipv6_rcv(struct sk_buff *skb,
- struct net_device *dev,
- struct packet_type *pt,
- struct net_device *orig_dev);
+int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev);
-extern int ip6_rcv_finish(struct sk_buff *skb);
+int ip6_rcv_finish(struct sk_buff *skb);
/*
* upper-layer output functions
*/
-extern int ip6_xmit(struct sock *sk,
- struct sk_buff *skb,
- struct flowi6 *fl6,
- struct ipv6_txoptions *opt,
- int tclass);
-
-extern int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
-
-extern int ip6_append_data(struct sock *sk,
- int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb),
- void *from,
- int length,
- int transhdrlen,
- int hlimit,
- int tclass,
- struct ipv6_txoptions *opt,
- struct flowi6 *fl6,
- struct rt6_info *rt,
- unsigned int flags,
- int dontfrag);
-
-extern int ip6_push_pending_frames(struct sock *sk);
-
-extern void ip6_flush_pending_frames(struct sock *sk);
-
-extern int ip6_dst_lookup(struct sock *sk,
- struct dst_entry **dst,
- struct flowi6 *fl6);
-extern struct dst_entry * ip6_dst_lookup_flow(struct sock *sk,
- struct flowi6 *fl6,
- const struct in6_addr *final_dst,
- bool can_sleep);
-extern struct dst_entry * ip6_sk_dst_lookup_flow(struct sock *sk,
- struct flowi6 *fl6,
- const struct in6_addr *final_dst,
- bool can_sleep);
-extern struct dst_entry * ip6_blackhole_route(struct net *net,
- struct dst_entry *orig_dst);
+int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
+ struct ipv6_txoptions *opt, int tclass);
+
+int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
+
+int ip6_append_data(struct sock *sk,
+ int getfrag(void *from, char *to, int offset, int len,
+ int odd, struct sk_buff *skb),
+ void *from, int length, int transhdrlen, int hlimit,
+ int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
+ struct rt6_info *rt, unsigned int flags, int dontfrag);
+
+int ip6_push_pending_frames(struct sock *sk);
+
+void ip6_flush_pending_frames(struct sock *sk);
+
+int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6);
+struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
+ const struct in6_addr *final_dst,
+ bool can_sleep);
+struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
+ const struct in6_addr *final_dst,
+ bool can_sleep);
+struct dst_entry *ip6_blackhole_route(struct net *net,
+ struct dst_entry *orig_dst);
/*
* skb processing functions
*/
-extern int ip6_output(struct sk_buff *skb);
-extern int ip6_forward(struct sk_buff *skb);
-extern int ip6_input(struct sk_buff *skb);
-extern int ip6_mc_input(struct sk_buff *skb);
+int ip6_output(struct sk_buff *skb);
+int ip6_forward(struct sk_buff *skb);
+int ip6_input(struct sk_buff *skb);
+int ip6_mc_input(struct sk_buff *skb);
-extern int __ip6_local_out(struct sk_buff *skb);
-extern int ip6_local_out(struct sk_buff *skb);
+int __ip6_local_out(struct sk_buff *skb);
+int ip6_local_out(struct sk_buff *skb);
/*
* Extension header (options) processing
*/
-extern void ipv6_push_nfrag_opts(struct sk_buff *skb,
- struct ipv6_txoptions *opt,
- u8 *proto,
- struct in6_addr **daddr_p);
-extern void ipv6_push_frag_opts(struct sk_buff *skb,
- struct ipv6_txoptions *opt,
- u8 *proto);
+void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
+ u8 *proto, struct in6_addr **daddr_p);
+void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
+ u8 *proto);
-extern int ipv6_skip_exthdr(const struct sk_buff *, int start,
- u8 *nexthdrp, __be16 *frag_offp);
+int ipv6_skip_exthdr(const struct sk_buff *, int start, u8 *nexthdrp,
+ __be16 *frag_offp);
-extern bool ipv6_ext_hdr(u8 nexthdr);
+bool ipv6_ext_hdr(u8 nexthdr);
enum {
IP6_FH_F_FRAG = (1 << 0),
@@ -767,57 +751,44 @@ enum {
};
/* find specified header and get offset to it */
-extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
- int target, unsigned short *fragoff, int *fragflg);
+int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target,
+ unsigned short *fragoff, int *fragflg);
-extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type);
+int ipv6_find_tlv(struct sk_buff *skb, int offset, int type);
-extern struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
- const struct ipv6_txoptions *opt,
- struct in6_addr *orig);
+struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
+ const struct ipv6_txoptions *opt,
+ struct in6_addr *orig);
/*
* socket options (ipv6_sockglue.c)
*/
-extern int ipv6_setsockopt(struct sock *sk, int level,
- int optname,
- char __user *optval,
- unsigned int optlen);
-extern int ipv6_getsockopt(struct sock *sk, int level,
- int optname,
- char __user *optval,
- int __user *optlen);
-extern int compat_ipv6_setsockopt(struct sock *sk,
- int level,
- int optname,
- char __user *optval,
- unsigned int optlen);
-extern int compat_ipv6_getsockopt(struct sock *sk,
- int level,
- int optname,
- char __user *optval,
- int __user *optlen);
-
-extern int ip6_datagram_connect(struct sock *sk,
- struct sockaddr *addr, int addr_len);
-
-extern int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
-extern int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len);
-extern void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
- u32 info, u8 *payload);
-extern void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
-extern void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
-
-extern int inet6_release(struct socket *sock);
-extern int inet6_bind(struct socket *sock, struct sockaddr *uaddr,
- int addr_len);
-extern int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
- int *uaddr_len, int peer);
-extern int inet6_ioctl(struct socket *sock, unsigned int cmd,
- unsigned long arg);
-
-extern int inet6_hash_connect(struct inet_timewait_death_row *death_row,
+int ipv6_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen);
+int ipv6_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
+int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen);
+int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
+
+int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
+
+int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
+int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len);
+void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
+ u32 info, u8 *payload);
+void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
+void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
+
+int inet6_release(struct socket *sock);
+int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
+int inet6_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len,
+ int peer);
+int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+
+int inet6_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk);
/*
@@ -829,30 +800,27 @@ extern const struct proto_ops inet6_dgram_ops;
struct group_source_req;
struct group_filter;
-extern int ip6_mc_source(int add, int omode, struct sock *sk,
- struct group_source_req *pgsr);
-extern int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf);
-extern int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
- struct group_filter __user *optval,
- int __user *optlen);
-extern unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
- const struct in6_addr *daddr, u32 rnd);
+int ip6_mc_source(int add, int omode, struct sock *sk,
+ struct group_source_req *pgsr);
+int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf);
+int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
+ struct group_filter __user *optval, int __user *optlen);
#ifdef CONFIG_PROC_FS
-extern int ac6_proc_init(struct net *net);
-extern void ac6_proc_exit(struct net *net);
-extern int raw6_proc_init(void);
-extern void raw6_proc_exit(void);
-extern int tcp6_proc_init(struct net *net);
-extern void tcp6_proc_exit(struct net *net);
-extern int udp6_proc_init(struct net *net);
-extern void udp6_proc_exit(struct net *net);
-extern int udplite6_proc_init(void);
-extern void udplite6_proc_exit(void);
-extern int ipv6_misc_proc_init(void);
-extern void ipv6_misc_proc_exit(void);
-extern int snmp6_register_dev(struct inet6_dev *idev);
-extern int snmp6_unregister_dev(struct inet6_dev *idev);
+int ac6_proc_init(struct net *net);
+void ac6_proc_exit(struct net *net);
+int raw6_proc_init(void);
+void raw6_proc_exit(void);
+int tcp6_proc_init(struct net *net);
+void tcp6_proc_exit(struct net *net);
+int udp6_proc_init(struct net *net);
+void udp6_proc_exit(struct net *net);
+int udplite6_proc_init(void);
+void udplite6_proc_exit(void);
+int ipv6_misc_proc_init(void);
+void ipv6_misc_proc_exit(void);
+int snmp6_register_dev(struct inet6_dev *idev);
+int snmp6_unregister_dev(struct inet6_dev *idev);
#else
static inline int ac6_proc_init(struct net *net) { return 0; }
@@ -865,10 +833,10 @@ static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; }
extern struct ctl_table ipv6_route_table_template[];
extern struct ctl_table ipv6_icmp_table_template[];
-extern struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
-extern struct ctl_table *ipv6_route_sysctl_init(struct net *net);
-extern int ipv6_sysctl_register(void);
-extern void ipv6_sysctl_unregister(void);
+struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
+struct ctl_table *ipv6_route_sysctl_init(struct net *net);
+int ipv6_sysctl_register(void);
+void ipv6_sysctl_unregister(void);
#endif
#endif /* _NET_IPV6_H */
diff --git a/include/net/ipx.h b/include/net/ipx.h
index c1fec6b464cc..9e9e35465baf 100644
--- a/include/net/ipx.h
+++ b/include/net/ipx.h
@@ -123,23 +123,23 @@ extern struct list_head ipx_routes;
extern rwlock_t ipx_routes_lock;
extern struct list_head ipx_interfaces;
-extern struct ipx_interface *ipx_interfaces_head(void);
+struct ipx_interface *ipx_interfaces_head(void);
extern spinlock_t ipx_interfaces_lock;
extern struct ipx_interface *ipx_primary_net;
-extern int ipx_proc_init(void);
-extern void ipx_proc_exit(void);
+int ipx_proc_init(void);
+void ipx_proc_exit(void);
-extern const char *ipx_frame_name(__be16);
-extern const char *ipx_device_name(struct ipx_interface *intrfc);
+const char *ipx_frame_name(__be16);
+const char *ipx_device_name(struct ipx_interface *intrfc);
static __inline__ void ipxitf_hold(struct ipx_interface *intrfc)
{
atomic_inc(&intrfc->refcnt);
}
-extern void ipxitf_down(struct ipx_interface *intrfc);
+void ipxitf_down(struct ipx_interface *intrfc);
static __inline__ void ipxitf_put(struct ipx_interface *intrfc)
{
diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
index 80ffde3bb164..0224402260a7 100644
--- a/include/net/irda/ircomm_tty.h
+++ b/include/net/irda/ircomm_tty.h
@@ -105,13 +105,13 @@ struct ircomm_tty_cb {
void ircomm_tty_start(struct tty_struct *tty);
void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self);
-extern int ircomm_tty_tiocmget(struct tty_struct *tty);
-extern int ircomm_tty_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear);
-extern int ircomm_tty_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg);
-extern void ircomm_tty_set_termios(struct tty_struct *tty,
- struct ktermios *old_termios);
+int ircomm_tty_tiocmget(struct tty_struct *tty);
+int ircomm_tty_tiocmset(struct tty_struct *tty, unsigned int set,
+ unsigned int clear);
+int ircomm_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg);
+void ircomm_tty_set_termios(struct tty_struct *tty,
+ struct ktermios *old_termios);
#endif
diff --git a/include/net/irda/irda.h b/include/net/irda/irda.h
index 3bed61d379a8..a059465101ff 100644
--- a/include/net/irda/irda.h
+++ b/include/net/irda/irda.h
@@ -112,20 +112,19 @@ do { if(!(expr)) { \
struct net_device;
struct packet_type;
-extern void irda_proc_register(void);
-extern void irda_proc_unregister(void);
+void irda_proc_register(void);
+void irda_proc_unregister(void);
-extern int irda_sysctl_register(void);
-extern void irda_sysctl_unregister(void);
+int irda_sysctl_register(void);
+void irda_sysctl_unregister(void);
-extern int irsock_init(void);
-extern void irsock_cleanup(void);
+int irsock_init(void);
+void irsock_cleanup(void);
-extern int irda_nl_register(void);
-extern void irda_nl_unregister(void);
+int irda_nl_register(void);
+void irda_nl_unregister(void);
-extern int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *ptype,
- struct net_device *orig_dev);
+int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype, struct net_device *orig_dev);
#endif /* NET_IRDA_H */
diff --git a/include/net/irda/irda_device.h b/include/net/irda/irda_device.h
index 94c852d47d0f..11417475a6c3 100644
--- a/include/net/irda/irda_device.h
+++ b/include/net/irda/irda_device.h
@@ -162,7 +162,7 @@ typedef struct {
int irq, irq2; /* Interrupts used */
int dma, dma2; /* DMA channel(s) used */
int fifo_size; /* FIFO size */
- int irqflags; /* interrupt flags (ie, IRQF_SHARED|IRQF_DISABLED) */
+ int irqflags; /* interrupt flags (ie, IRQF_SHARED) */
int direction; /* Link direction, used by some FIR drivers */
int enabled; /* Powered on? */
int suspended; /* Suspended by APM */
diff --git a/include/net/irda/irlap_event.h b/include/net/irda/irlap_event.h
index 4c90824c50fb..f9d88da97af2 100644
--- a/include/net/irda/irlap_event.h
+++ b/include/net/irda/irlap_event.h
@@ -126,6 +126,6 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info);
void irlap_print_event(IRLAP_EVENT event);
-extern int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb);
+int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb);
#endif
diff --git a/include/net/irda/irlap_frame.h b/include/net/irda/irlap_frame.h
index 6b1dc4f8eca5..57173ae398ae 100644
--- a/include/net/irda/irlap_frame.h
+++ b/include/net/irda/irlap_frame.h
@@ -163,7 +163,7 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command);
void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb,
__u8 caddr, int command);
-extern int irlap_insert_qos_negotiation_params(struct irlap_cb *self,
- struct sk_buff *skb);
+int irlap_insert_qos_negotiation_params(struct irlap_cb *self,
+ struct sk_buff *skb);
#endif
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index 5d5a6a4732ef..a830b01baba4 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -432,44 +432,32 @@ struct iw_public_data {
/* First : function strictly used inside the kernel */
/* Handle /proc/net/wireless, called in net/code/dev.c */
-extern int dev_get_wireless_info(char * buffer, char **start, off_t offset,
- int length);
+int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length);
/* Second : functions that may be called by driver modules */
/* Send a single event to user space */
-extern void wireless_send_event(struct net_device * dev,
- unsigned int cmd,
- union iwreq_data * wrqu,
- const char * extra);
+void wireless_send_event(struct net_device *dev, unsigned int cmd,
+ union iwreq_data *wrqu, const char *extra);
/* We may need a function to send a stream of events to user space.
* More on that later... */
/* Standard handler for SIOCSIWSPY */
-extern int iw_handler_set_spy(struct net_device * dev,
- struct iw_request_info * info,
- union iwreq_data * wrqu,
- char * extra);
+int iw_handler_set_spy(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
/* Standard handler for SIOCGIWSPY */
-extern int iw_handler_get_spy(struct net_device * dev,
- struct iw_request_info * info,
- union iwreq_data * wrqu,
- char * extra);
+int iw_handler_get_spy(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
/* Standard handler for SIOCSIWTHRSPY */
-extern int iw_handler_set_thrspy(struct net_device * dev,
- struct iw_request_info *info,
- union iwreq_data * wrqu,
- char * extra);
+int iw_handler_set_thrspy(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
/* Standard handler for SIOCGIWTHRSPY */
-extern int iw_handler_get_thrspy(struct net_device * dev,
- struct iw_request_info *info,
- union iwreq_data * wrqu,
- char * extra);
+int iw_handler_get_thrspy(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
/* Driver call to update spy records */
-extern void wireless_spy_update(struct net_device * dev,
- unsigned char * address,
- struct iw_quality * wstats);
+void wireless_spy_update(struct net_device *dev, unsigned char *address,
+ struct iw_quality *wstats);
/************************* INLINE FUNTIONS *************************/
/*
diff --git a/include/net/lapb.h b/include/net/lapb.h
index df892a94f2c6..9510f8725f03 100644
--- a/include/net/lapb.h
+++ b/include/net/lapb.h
@@ -105,40 +105,40 @@ struct lapb_cb {
};
/* lapb_iface.c */
-extern void lapb_connect_confirmation(struct lapb_cb *lapb, int);
-extern void lapb_connect_indication(struct lapb_cb *lapb, int);
-extern void lapb_disconnect_confirmation(struct lapb_cb *lapb, int);
-extern void lapb_disconnect_indication(struct lapb_cb *lapb, int);
-extern int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *);
-extern int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *);
+void lapb_connect_confirmation(struct lapb_cb *lapb, int);
+void lapb_connect_indication(struct lapb_cb *lapb, int);
+void lapb_disconnect_confirmation(struct lapb_cb *lapb, int);
+void lapb_disconnect_indication(struct lapb_cb *lapb, int);
+int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *);
+int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *);
/* lapb_in.c */
-extern void lapb_data_input(struct lapb_cb *lapb, struct sk_buff *);
+void lapb_data_input(struct lapb_cb *lapb, struct sk_buff *);
/* lapb_out.c */
-extern void lapb_kick(struct lapb_cb *lapb);
-extern void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *, int);
-extern void lapb_establish_data_link(struct lapb_cb *lapb);
-extern void lapb_enquiry_response(struct lapb_cb *lapb);
-extern void lapb_timeout_response(struct lapb_cb *lapb);
-extern void lapb_check_iframes_acked(struct lapb_cb *lapb, unsigned short);
-extern void lapb_check_need_response(struct lapb_cb *lapb, int, int);
+void lapb_kick(struct lapb_cb *lapb);
+void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *, int);
+void lapb_establish_data_link(struct lapb_cb *lapb);
+void lapb_enquiry_response(struct lapb_cb *lapb);
+void lapb_timeout_response(struct lapb_cb *lapb);
+void lapb_check_iframes_acked(struct lapb_cb *lapb, unsigned short);
+void lapb_check_need_response(struct lapb_cb *lapb, int, int);
/* lapb_subr.c */
-extern void lapb_clear_queues(struct lapb_cb *lapb);
-extern void lapb_frames_acked(struct lapb_cb *lapb, unsigned short);
-extern void lapb_requeue_frames(struct lapb_cb *lapb);
-extern int lapb_validate_nr(struct lapb_cb *lapb, unsigned short);
-extern int lapb_decode(struct lapb_cb *lapb, struct sk_buff *, struct lapb_frame *);
-extern void lapb_send_control(struct lapb_cb *lapb, int, int, int);
-extern void lapb_transmit_frmr(struct lapb_cb *lapb);
+void lapb_clear_queues(struct lapb_cb *lapb);
+void lapb_frames_acked(struct lapb_cb *lapb, unsigned short);
+void lapb_requeue_frames(struct lapb_cb *lapb);
+int lapb_validate_nr(struct lapb_cb *lapb, unsigned short);
+int lapb_decode(struct lapb_cb *lapb, struct sk_buff *, struct lapb_frame *);
+void lapb_send_control(struct lapb_cb *lapb, int, int, int);
+void lapb_transmit_frmr(struct lapb_cb *lapb);
/* lapb_timer.c */
-extern void lapb_start_t1timer(struct lapb_cb *lapb);
-extern void lapb_start_t2timer(struct lapb_cb *lapb);
-extern void lapb_stop_t1timer(struct lapb_cb *lapb);
-extern void lapb_stop_t2timer(struct lapb_cb *lapb);
-extern int lapb_t1timer_running(struct lapb_cb *lapb);
+void lapb_start_t1timer(struct lapb_cb *lapb);
+void lapb_start_t2timer(struct lapb_cb *lapb);
+void lapb_stop_t1timer(struct lapb_cb *lapb);
+void lapb_stop_t2timer(struct lapb_cb *lapb);
+int lapb_t1timer_running(struct lapb_cb *lapb);
/*
* Debug levels.
diff --git a/include/net/llc.h b/include/net/llc.h
index 9e7d7f08ef77..68490cbc8a65 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -95,29 +95,29 @@ struct hlist_nulls_head *llc_sk_laddr_hash(struct llc_sap *sap,
extern struct list_head llc_sap_list;
extern spinlock_t llc_sap_list_lock;
-extern int llc_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev);
+int llc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
+ struct net_device *orig_dev);
-extern int llc_mac_hdr_init(struct sk_buff *skb,
- const unsigned char *sa, const unsigned char *da);
+int llc_mac_hdr_init(struct sk_buff *skb, const unsigned char *sa,
+ const unsigned char *da);
-extern void llc_add_pack(int type, void (*handler)(struct llc_sap *sap,
- struct sk_buff *skb));
-extern void llc_remove_pack(int type);
+void llc_add_pack(int type,
+ void (*handler)(struct llc_sap *sap, struct sk_buff *skb));
+void llc_remove_pack(int type);
-extern void llc_set_station_handler(void (*handler)(struct sk_buff *skb));
+void llc_set_station_handler(void (*handler)(struct sk_buff *skb));
-extern struct llc_sap *llc_sap_open(unsigned char lsap,
- int (*rcv)(struct sk_buff *skb,
- struct net_device *dev,
- struct packet_type *pt,
- struct net_device *orig_dev));
+struct llc_sap *llc_sap_open(unsigned char lsap,
+ int (*rcv)(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt,
+ struct net_device *orig_dev));
static inline void llc_sap_hold(struct llc_sap *sap)
{
atomic_inc(&sap->refcnt);
}
-extern void llc_sap_close(struct llc_sap *sap);
+void llc_sap_close(struct llc_sap *sap);
static inline void llc_sap_put(struct llc_sap *sap)
{
@@ -125,27 +125,27 @@ static inline void llc_sap_put(struct llc_sap *sap)
llc_sap_close(sap);
}
-extern struct llc_sap *llc_sap_find(unsigned char sap_value);
+struct llc_sap *llc_sap_find(unsigned char sap_value);
-extern int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
- unsigned char *dmac, unsigned char dsap);
+int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
+ unsigned char *dmac, unsigned char dsap);
-extern void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
-extern void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
+void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
+void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
-extern void llc_station_init(void);
-extern void llc_station_exit(void);
+void llc_station_init(void);
+void llc_station_exit(void);
#ifdef CONFIG_PROC_FS
-extern int llc_proc_init(void);
-extern void llc_proc_exit(void);
+int llc_proc_init(void);
+void llc_proc_exit(void);
#else
#define llc_proc_init() (0)
#define llc_proc_exit() do { } while(0)
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_SYSCTL
-extern int llc_sysctl_init(void);
-extern void llc_sysctl_exit(void);
+int llc_sysctl_init(void);
+void llc_sysctl_exit(void);
extern int sysctl_llc2_ack_timeout;
extern int sysctl_llc2_busy_timeout;
diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
index df83f69d2de4..f3be818e73c1 100644
--- a/include/net/llc_c_ac.h
+++ b/include/net/llc_c_ac.h
@@ -89,114 +89,92 @@
typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ac_conn_confirm(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_data_ind(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_disc_ind(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_rst_ind(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_rst_confirm(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_disc_cmd_p_set_x(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_dm_rsp_f_set_p(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_dm_rsp_f_set_1(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_i_cmd_p_set_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_send_i_xxx_x_set_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_resend_i_xxx_x_set_0(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_resend_i_rsp_f_set_1(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_rej_cmd_p_set_1(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_rej_rsp_f_set_1(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_rej_xxx_x_set_0(struct sock* sk,
+int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock *sk,
struct sk_buff *skb);
-extern int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_set_remote_busy(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock* sk,
+int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock *sk,
struct sk_buff *skb);
-extern int llc_conn_ac_send_rr_cmd_p_set_1(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_rr_rsp_f_set_1(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_ack_rsp_f_set_1(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_rr_xxx_x_set_0(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_ack_xxx_x_set_0(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_send_ua_rsp_f_set_p(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_set_s_flag_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_s_flag_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_start_p_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_start_ack_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_start_rej_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_start_ack_tmr_if_not_running(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_stop_ack_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_stop_p_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_stop_rej_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_stop_all_timers(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_stop_other_timers(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_upd_nr_received(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_inc_tx_win_size(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_dec_tx_win_size(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_upd_p_flag(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_data_flag_2(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_data_flag_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_data_flag_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock* sk,
- struct sk_buff *skb);
-extern int llc_conn_ac_set_p_flag_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_remote_busy_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_retry_cnt_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_cause_flag_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_cause_flag_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_inc_retry_cnt_by_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_vr_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_inc_vr_by_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_vs_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_vs_nr(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_rst_vs(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_upd_vs(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_disc(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_reset(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_disc_confirm(struct sock* sk, struct sk_buff *skb);
-extern u8 llc_circular_between(u8 a, u8 b, u8 c);
-extern int llc_conn_ac_send_ack_if_needed(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_adjust_npta_by_rr(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_adjust_npta_by_rnr(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_rst_sendack_flag(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_send_i_rsp_as_ack(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_send_i_as_ack(struct sock* sk, struct sk_buff *skb);
+int llc_conn_ac_resend_i_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_s_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_s_flag_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_dec_tx_win_size(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_upd_p_flag(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ac_set_p_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_remote_busy_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_retry_cnt_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_cause_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_cause_flag_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_inc_retry_cnt_by_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_vr_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_inc_vr_by_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_vs_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_vs_nr(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_upd_vs(struct sock *sk, struct sk_buff *skb);
+int llc_conn_disc(struct sock *sk, struct sk_buff *skb);
+int llc_conn_reset(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_disc_confirm(struct sock *sk, struct sk_buff *skb);
+u8 llc_circular_between(u8 a, u8 b, u8 c);
+int llc_conn_ac_send_ack_if_needed(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_rsp_as_ack(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb);
-extern void llc_conn_busy_tmr_cb(unsigned long timeout_data);
-extern void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data);
-extern void llc_conn_ack_tmr_cb(unsigned long timeout_data);
-extern void llc_conn_rej_tmr_cb(unsigned long timeout_data);
+void llc_conn_busy_tmr_cb(unsigned long timeout_data);
+void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data);
+void llc_conn_ack_tmr_cb(unsigned long timeout_data);
+void llc_conn_rej_tmr_cb(unsigned long timeout_data);
-extern void llc_conn_set_p_flag(struct sock *sk, u8 value);
+void llc_conn_set_p_flag(struct sock *sk, u8 value);
#endif /* LLC_C_AC_H */
diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
index 6ca3113df39e..3948cf111dd0 100644
--- a/include/net/llc_c_ev.h
+++ b/include/net/llc_c_ev.h
@@ -128,138 +128,93 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_local_busy_detected(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk,
+int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk,
struct sk_buff *skb);
-extern int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk,
+int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk,
struct sk_buff *skb);
-extern int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_sendack_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_sendack_tmr_exp(struct sock *sk, struct sk_buff *skb);
/* NOT_USED functions and their variations */
-extern int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk,
+ struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb);
/* Available connection action qualifiers */
-extern int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk,
+int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_conn(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_disc(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_failed(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk,
struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_conn(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_disc(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_failed(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk,
- struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk,
- struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, struct sk_buff *skb);
static __inline__ int llc_conn_space(struct sock *sk, struct sk_buff *skb)
{
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index 2f97d8ddce92..0134681acc4c 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -95,28 +95,24 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
return skb->cb[sizeof(skb->cb) - 1];
}
-extern struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
- struct proto *prot);
-extern void llc_sk_free(struct sock *sk);
+struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
+ struct proto *prot);
+void llc_sk_free(struct sock *sk);
-extern void llc_sk_reset(struct sock *sk);
+void llc_sk_reset(struct sock *sk);
/* Access to a connection */
-extern int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
-extern void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
-extern void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
-extern void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr,
- u8 first_p_bit);
-extern void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr,
- u8 first_f_bit);
-extern int llc_conn_remove_acked_pdus(struct sock *conn, u8 nr,
- u16 *how_many_unacked);
-extern struct sock *llc_lookup_established(struct llc_sap *sap,
- struct llc_addr *daddr,
- struct llc_addr *laddr);
-extern void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk);
-extern void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk);
+int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
+void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
+void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
+void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
+void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
+int llc_conn_remove_acked_pdus(struct sock *conn, u8 nr, u16 *how_many_unacked);
+struct sock *llc_lookup_established(struct llc_sap *sap, struct llc_addr *daddr,
+ struct llc_addr *laddr);
+void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk);
+void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk);
-extern u8 llc_data_accept_state(u8 state);
-extern void llc_build_offset_table(void);
+u8 llc_data_accept_state(u8 state);
+void llc_build_offset_table(void);
#endif /* LLC_CONN_H */
diff --git a/include/net/llc_if.h b/include/net/llc_if.h
index f0cb909b60eb..8d5c543cd620 100644
--- a/include/net/llc_if.h
+++ b/include/net/llc_if.h
@@ -62,8 +62,7 @@
#define LLC_STATUS_CONFLICT 7 /* disconnect conn */
#define LLC_STATUS_RESET_DONE 8 /* */
-extern int llc_establish_connection(struct sock *sk, u8 *lmac,
- u8 *dmac, u8 dsap);
-extern int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
-extern int llc_send_disc(struct sock *sk);
+int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap);
+int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
+int llc_send_disc(struct sock *sk);
#endif /* LLC_IF_H */
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index 5a93d13ac95c..31e2de7d57c5 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -410,21 +410,20 @@ struct llc_frmr_info {
u8 ind_bits; /* indicator bits set with macro */
} __packed;
-extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type);
-extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value);
-extern void llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit);
-extern void llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit);
-extern void llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr);
-extern void llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
-extern void llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
-extern void llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
-extern void llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit);
-extern void llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit);
-extern void llc_pdu_init_as_frmr_rsp(struct sk_buff *skb,
- struct llc_pdu_sn *prev_pdu,
- u8 f_bit, u8 vs, u8 vr, u8 vzyxw);
-extern void llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
-extern void llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
-extern void llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
-extern void llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit);
+void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type);
+void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value);
+void llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit);
+void llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit);
+void llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr);
+void llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
+void llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
+void llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
+void llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit);
+void llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit);
+void llc_pdu_init_as_frmr_rsp(struct sk_buff *skb, struct llc_pdu_sn *prev_pdu,
+ u8 f_bit, u8 vs, u8 vr, u8 vzyxw);
+void llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
+void llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
+void llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
+void llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit);
#endif /* LLC_PDU_H */
diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
index 37a3bbd02394..a61b98c108ee 100644
--- a/include/net/llc_s_ac.h
+++ b/include/net/llc_s_ac.h
@@ -25,15 +25,13 @@
/* All action functions must look like this */
typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
- struct sk_buff *skb);
-extern int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_report_status(struct llc_sap *sap,
- struct sk_buff *skb);
-extern int llc_sap_action_xid_ind(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_test_ind(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_report_status(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_xid_ind(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_test_ind(struct llc_sap *sap, struct sk_buff *skb);
#endif /* LLC_S_AC_H */
diff --git a/include/net/llc_s_ev.h b/include/net/llc_s_ev.h
index e3acb9329e4a..84db3a59ed28 100644
--- a/include/net/llc_s_ev.h
+++ b/include/net/llc_s_ev.h
@@ -53,15 +53,14 @@ struct llc_sap;
typedef int (*llc_sap_ev_t)(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_deactivation_req(struct llc_sap *sap,
- struct sk_buff *skb);
+int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_deactivation_req(struct llc_sap *sap, struct sk_buff *skb);
#endif /* LLC_S_EV_H */
diff --git a/include/net/llc_sap.h b/include/net/llc_sap.h
index ed25bec2f648..1e4df9fd9fb2 100644
--- a/include/net/llc_sap.h
+++ b/include/net/llc_sap.h
@@ -19,18 +19,14 @@ struct net_device;
struct sk_buff;
struct sock;
-extern void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb);
-extern void llc_save_primitive(struct sock *sk, struct sk_buff* skb,
- unsigned char prim);
-extern struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev,
- u8 type, u32 data_size);
+void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb);
+void llc_save_primitive(struct sock *sk, struct sk_buff *skb,
+ unsigned char prim);
+struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev,
+ u8 type, u32 data_size);
-extern void llc_build_and_send_test_pkt(struct llc_sap *sap,
- struct sk_buff *skb,
- unsigned char *dmac,
- unsigned char dsap);
-extern void llc_build_and_send_xid_pkt(struct llc_sap *sap,
- struct sk_buff *skb,
- unsigned char *dmac,
- unsigned char dsap);
+void llc_build_and_send_test_pkt(struct llc_sap *sap, struct sk_buff *skb,
+ unsigned char *dmac, unsigned char dsap);
+void llc_build_and_send_xid_pkt(struct llc_sap *sap, struct sk_buff *skb,
+ unsigned char *dmac, unsigned char dsap);
#endif /* LLC_SAP_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index cc6035f1a2f1..f386c480e134 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -829,6 +829,15 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3
* @RX_FLAG_10MHZ: 10 MHz (half channel) was used
* @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used
+ * @RX_FLAG_AMSDU_MORE: Some drivers may prefer to report separate A-MSDU
+ * subframes instead of a one huge frame for performance reasons.
+ * All, but the last MSDU from an A-MSDU should have this flag set. E.g.
+ * if an A-MSDU has 3 frames, the first 2 must have the flag set, while
+ * the 3rd (last) one must not have this flag set. The flag is used to
+ * deal with retransmission/duplication recovery properly since A-MSDU
+ * subframes share the same sequence number. Reported subframes can be
+ * either regular MSDU or singly A-MSDUs. Subframes must not be
+ * interleaved with other frames.
*/
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
@@ -859,6 +868,7 @@ enum mac80211_rx_flags {
RX_FLAG_STBC_MASK = BIT(26) | BIT(27),
RX_FLAG_10MHZ = BIT(28),
RX_FLAG_5MHZ = BIT(29),
+ RX_FLAG_AMSDU_MORE = BIT(30),
};
#define RX_FLAG_STBC_SHIFT 26
@@ -1492,6 +1502,11 @@ struct ieee80211_tx_control {
*
* @IEEE80211_HW_TIMING_BEACON_ONLY: Use sync timing from beacon frames
* only, to allow getting TBTT of a DTIM beacon.
+ *
+ * @IEEE80211_HW_CHANCTX_STA_CSA: Support 802.11h based channel-switch (CSA)
+ * for a single active channel while using channel contexts. When support
+ * is not enabled the default action is to disconnect when getting the
+ * CSA frame.
*/
enum ieee80211_hw_flags {
IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
@@ -1522,6 +1537,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 1<<25,
IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26,
IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27,
+ IEEE80211_HW_CHANCTX_STA_CSA = 1<<28,
};
/**
@@ -2666,6 +2682,10 @@ enum ieee80211_roc_type {
* zero using ieee80211_csa_is_complete() after the beacon has been
* transmitted and then call ieee80211_csa_finish().
*
+ * @join_ibss: Join an IBSS (on an IBSS interface); this is called after all
+ * information in bss_conf is set up and the beacon can be retrieved. A
+ * channel context is bound before this is called.
+ * @leave_ibss: Leave the IBSS again.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -2857,6 +2877,9 @@ struct ieee80211_ops {
void (*channel_switch_beacon)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef);
+
+ int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+ void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
};
/**
@@ -3920,6 +3943,25 @@ void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw,
void *data);
/**
+ * ieee80211_iterate_active_interfaces_rtnl - iterate active interfaces
+ *
+ * This function iterates over the interfaces associated with a given
+ * hardware that are currently active and calls the callback for them.
+ * This version can only be used while holding the RTNL.
+ *
+ * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
+ * @iterator: the iterator function to call, cannot sleep
+ * @data: first argument of the iterator function
+ */
+void ieee80211_iterate_active_interfaces_rtnl(struct ieee80211_hw *hw,
+ u32 iter_flags,
+ void (*iterator)(void *data,
+ u8 *mac,
+ struct ieee80211_vif *vif),
+ void *data);
+
+/**
* ieee80211_queue_work - add work onto the mac80211 workqueue
*
* Drivers and mac80211 use this to add work onto the mac80211 workqueue.
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index d0d11df9cba1..807d6b7a943f 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -133,7 +133,7 @@ struct ieee802154_ops {
/* Basic interface to register ieee802154 device */
struct ieee802154_dev *
-ieee802154_alloc_device(size_t priv_data_lex, struct ieee802154_ops *ops);
+ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops);
void ieee802154_free_device(struct ieee802154_dev *dev);
int ieee802154_register_device(struct ieee802154_dev *dev);
void ieee802154_unregister_device(struct ieee802154_dev *dev);
diff --git a/include/net/mrp.h b/include/net/mrp.h
index 0f7558b638ae..31912c3be772 100644
--- a/include/net/mrp.h
+++ b/include/net/mrp.h
@@ -126,19 +126,17 @@ struct mrp_port {
struct rcu_head rcu;
};
-extern int mrp_register_application(struct mrp_application *app);
-extern void mrp_unregister_application(struct mrp_application *app);
-
-extern int mrp_init_applicant(struct net_device *dev,
- struct mrp_application *app);
-extern void mrp_uninit_applicant(struct net_device *dev,
- struct mrp_application *app);
-
-extern int mrp_request_join(const struct net_device *dev,
- const struct mrp_application *app,
- const void *value, u8 len, u8 type);
-extern void mrp_request_leave(const struct net_device *dev,
- const struct mrp_application *app,
- const void *value, u8 len, u8 type);
+int mrp_register_application(struct mrp_application *app);
+void mrp_unregister_application(struct mrp_application *app);
+
+int mrp_init_applicant(struct net_device *dev, struct mrp_application *app);
+void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *app);
+
+int mrp_request_join(const struct net_device *dev,
+ const struct mrp_application *app,
+ const void *value, u8 len, u8 type);
+void mrp_request_leave(const struct net_device *dev,
+ const struct mrp_application *app,
+ const void *value, u8 len, u8 type);
#endif /* _NET_MRP_H */
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index ea0cc26ab70e..6bbda34d5e59 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -110,8 +110,8 @@ struct ndisc_options {
#define NDISC_OPT_SPACE(len) (((len)+2+7)&~7)
-extern struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
- struct ndisc_options *ndopts);
+struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
+ struct ndisc_options *ndopts);
/*
* Return the padding between the option length and the start of the
@@ -189,60 +189,51 @@ static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, cons
return n;
}
-extern int ndisc_init(void);
-extern int ndisc_late_init(void);
+int ndisc_init(void);
+int ndisc_late_init(void);
-extern void ndisc_late_cleanup(void);
-extern void ndisc_cleanup(void);
+void ndisc_late_cleanup(void);
+void ndisc_cleanup(void);
-extern int ndisc_rcv(struct sk_buff *skb);
+int ndisc_rcv(struct sk_buff *skb);
-extern void ndisc_send_ns(struct net_device *dev,
- struct neighbour *neigh,
- const struct in6_addr *solicit,
- const struct in6_addr *daddr,
- const struct in6_addr *saddr);
+void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
+ const struct in6_addr *solicit,
+ const struct in6_addr *daddr, const struct in6_addr *saddr);
-extern void ndisc_send_rs(struct net_device *dev,
- const struct in6_addr *saddr,
- const struct in6_addr *daddr);
-extern void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
- const struct in6_addr *daddr,
- const struct in6_addr *solicited_addr,
- bool router, bool solicited, bool override,
- bool inc_opt);
+void ndisc_send_rs(struct net_device *dev,
+ const struct in6_addr *saddr, const struct in6_addr *daddr);
+void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
+ const struct in6_addr *daddr,
+ const struct in6_addr *solicited_addr,
+ bool router, bool solicited, bool override, bool inc_opt);
-extern void ndisc_send_redirect(struct sk_buff *skb,
- const struct in6_addr *target);
+void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target);
-extern int ndisc_mc_map(const struct in6_addr *addr, char *buf,
- struct net_device *dev, int dir);
+int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev,
+ int dir);
/*
* IGMP
*/
-extern int igmp6_init(void);
+int igmp6_init(void);
-extern void igmp6_cleanup(void);
+void igmp6_cleanup(void);
-extern int igmp6_event_query(struct sk_buff *skb);
+int igmp6_event_query(struct sk_buff *skb);
-extern int igmp6_event_report(struct sk_buff *skb);
+int igmp6_event_report(struct sk_buff *skb);
#ifdef CONFIG_SYSCTL
-extern int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl,
- int write,
- void __user *buffer,
- size_t *lenp,
- loff_t *ppos);
+int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
int ndisc_ifinfo_sysctl_strategy(struct ctl_table *ctl,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen);
#endif
-extern void inet6_ifinfo_notify(int event,
- struct inet6_dev *idev);
+void inet6_ifinfo_notify(int event, struct inet6_dev *idev);
#endif
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 9d22f08896c6..da68c9a90ac5 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -22,6 +22,7 @@
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#include <net/netns/conntrack.h>
#endif
+#include <net/netns/nftables.h>
#include <net/netns/xfrm.h>
struct user_namespace;
@@ -101,6 +102,9 @@ struct net {
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
struct netns_ct ct;
#endif
+#if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE)
+ struct netns_nftables nft;
+#endif
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
struct netns_nf_frag nf_frag;
#endif
@@ -137,8 +141,8 @@ struct net {
extern struct net init_net;
#ifdef CONFIG_NET_NS
-extern struct net *copy_net_ns(unsigned long flags,
- struct user_namespace *user_ns, struct net *old_net);
+struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
+ struct net *old_net);
#else /* CONFIG_NET_NS */
#include <linux/sched.h>
@@ -155,11 +159,11 @@ static inline struct net *copy_net_ns(unsigned long flags,
extern struct list_head net_namespace_list;
-extern struct net *get_net_ns_by_pid(pid_t pid);
-extern struct net *get_net_ns_by_fd(int pid);
+struct net *get_net_ns_by_pid(pid_t pid);
+struct net *get_net_ns_by_fd(int pid);
#ifdef CONFIG_NET_NS
-extern void __put_net(struct net *net);
+void __put_net(struct net *net);
static inline struct net *get_net(struct net *net)
{
@@ -191,7 +195,7 @@ int net_eq(const struct net *net1, const struct net *net2)
return net1 == net2;
}
-extern void net_drop_ns(void *);
+void net_drop_ns(void *);
#else
@@ -308,19 +312,19 @@ struct pernet_operations {
* device which caused kernel oops, and panics during network
* namespace cleanup. So please don't get this wrong.
*/
-extern int register_pernet_subsys(struct pernet_operations *);
-extern void unregister_pernet_subsys(struct pernet_operations *);
-extern int register_pernet_device(struct pernet_operations *);
-extern void unregister_pernet_device(struct pernet_operations *);
+int register_pernet_subsys(struct pernet_operations *);
+void unregister_pernet_subsys(struct pernet_operations *);
+int register_pernet_device(struct pernet_operations *);
+void unregister_pernet_device(struct pernet_operations *);
struct ctl_table;
struct ctl_table_header;
#ifdef CONFIG_SYSCTL
-extern int net_sysctl_init(void);
-extern struct ctl_table_header *register_net_sysctl(struct net *net,
- const char *path, struct ctl_table *table);
-extern void unregister_net_sysctl_table(struct ctl_table_header *header);
+int net_sysctl_init(void);
+struct ctl_table_header *register_net_sysctl(struct net *net, const char *path,
+ struct ctl_table *table);
+void unregister_net_sysctl_table(struct ctl_table_header *header);
#else
static inline int net_sysctl_init(void) { return 0; }
static inline struct ctl_table_header *register_net_sysctl(struct net *net,
diff --git a/include/net/netevent.h b/include/net/netevent.h
index fe630dde35c3..d8bbb38584b6 100644
--- a/include/net/netevent.h
+++ b/include/net/netevent.h
@@ -26,8 +26,8 @@ enum netevent_notif_type {
NETEVENT_REDIRECT, /* arg is struct netevent_redirect ptr */
};
-extern int register_netevent_notifier(struct notifier_block *nb);
-extern int unregister_netevent_notifier(struct notifier_block *nb);
-extern int call_netevent_notifiers(unsigned long val, void *v);
+int register_netevent_notifier(struct notifier_block *nb);
+int unregister_netevent_notifier(struct notifier_block *nb);
+int call_netevent_notifiers(unsigned long val, void *v);
#endif
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 7573d52a4346..6c3d12e2949f 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -16,9 +16,9 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
-extern int nf_conntrack_ipv4_compat_init(void);
-extern void nf_conntrack_ipv4_compat_fini(void);
+int nf_conntrack_ipv4_compat_init(void);
+void nf_conntrack_ipv4_compat_fini(void);
-extern void need_ipv4_conntrack(void);
+void need_ipv4_conntrack(void);
#endif /*_NF_CONNTRACK_IPV4_H*/
diff --git a/include/net/netfilter/ipv4/nf_defrag_ipv4.h b/include/net/netfilter/ipv4/nf_defrag_ipv4.h
index 6b00ea38546b..f01ef208dff6 100644
--- a/include/net/netfilter/ipv4/nf_defrag_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_defrag_ipv4.h
@@ -1,6 +1,6 @@
#ifndef _NF_DEFRAG_IPV4_H
#define _NF_DEFRAG_IPV4_H
-extern void nf_defrag_ipv4_enable(void);
+void nf_defrag_ipv4_enable(void);
#endif /* _NF_DEFRAG_IPV4_H */
diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
index fd79c9a1779d..5613412e7dc2 100644
--- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
@@ -1,15 +1,14 @@
#ifndef _NF_DEFRAG_IPV6_H
#define _NF_DEFRAG_IPV6_H
-extern void nf_defrag_ipv6_enable(void);
-
-extern int nf_ct_frag6_init(void);
-extern void nf_ct_frag6_cleanup(void);
-extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
-extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
- struct net_device *in,
- struct net_device *out,
- int (*okfn)(struct sk_buff *));
+void nf_defrag_ipv6_enable(void);
+
+int nf_ct_frag6_init(void);
+void nf_ct_frag6_cleanup(void);
+struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
+void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
+ struct net_device *in, struct net_device *out,
+ int (*okfn)(struct sk_buff *));
struct inet_frags_ctl;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 0c1288a50e8b..01ea6eed1bb1 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -139,15 +139,13 @@ static inline struct net *nf_ct_net(const struct nf_conn *ct)
}
/* Alter reply tuple (maybe alter helper). */
-extern void
-nf_conntrack_alter_reply(struct nf_conn *ct,
- const struct nf_conntrack_tuple *newreply);
+void nf_conntrack_alter_reply(struct nf_conn *ct,
+ const struct nf_conntrack_tuple *newreply);
/* Is this tuple taken? (ignoring any belonging to the given
conntrack). */
-extern int
-nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
- const struct nf_conn *ignored_conntrack);
+int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
+ const struct nf_conn *ignored_conntrack);
/* Return conntrack_info and tuple hash for given skb. */
static inline struct nf_conn *
@@ -165,37 +163,34 @@ static inline void nf_ct_put(struct nf_conn *ct)
}
/* Protocol module loading */
-extern int nf_ct_l3proto_try_module_get(unsigned short l3proto);
-extern void nf_ct_l3proto_module_put(unsigned short l3proto);
+int nf_ct_l3proto_try_module_get(unsigned short l3proto);
+void nf_ct_l3proto_module_put(unsigned short l3proto);
/*
* Allocate a hashtable of hlist_head (if nulls == 0),
* or hlist_nulls_head (if nulls == 1)
*/
-extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
+void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
-extern void nf_ct_free_hashtable(void *hash, unsigned int size);
+void nf_ct_free_hashtable(void *hash, unsigned int size);
-extern struct nf_conntrack_tuple_hash *
+struct nf_conntrack_tuple_hash *
__nf_conntrack_find(struct net *net, u16 zone,
const struct nf_conntrack_tuple *tuple);
-extern int nf_conntrack_hash_check_insert(struct nf_conn *ct);
+int nf_conntrack_hash_check_insert(struct nf_conn *ct);
bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
-extern void nf_conntrack_flush_report(struct net *net, u32 portid, int report);
+void nf_conntrack_flush_report(struct net *net, u32 portid, int report);
-extern bool nf_ct_get_tuplepr(const struct sk_buff *skb,
- unsigned int nhoff, u_int16_t l3num,
- struct nf_conntrack_tuple *tuple);
-extern bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
- const struct nf_conntrack_tuple *orig);
+bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
+ u_int16_t l3num, struct nf_conntrack_tuple *tuple);
+bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
+ const struct nf_conntrack_tuple *orig);
-extern void __nf_ct_refresh_acct(struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- const struct sk_buff *skb,
- unsigned long extra_jiffies,
- int do_acct);
+void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ const struct sk_buff *skb,
+ unsigned long extra_jiffies, int do_acct);
/* Refresh conntrack for this many jiffies and do accounting */
static inline void nf_ct_refresh_acct(struct nf_conn *ct,
@@ -214,10 +209,8 @@ static inline void nf_ct_refresh(struct nf_conn *ct,
__nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0);
}
-extern bool __nf_ct_kill_acct(struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- const struct sk_buff *skb,
- int do_acct);
+bool __nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ const struct sk_buff *skb, int do_acct);
/* kill conntrack and do accounting */
static inline bool nf_ct_kill_acct(struct nf_conn *ct,
@@ -244,19 +237,17 @@ static inline struct nf_conn *nf_ct_untracked_get(void)
{
return &__raw_get_cpu_var(nf_conntrack_untracked);
}
-extern void nf_ct_untracked_status_or(unsigned long bits);
+void nf_ct_untracked_status_or(unsigned long bits);
/* Iterate over all conntracks: if iter returns true, it's deleted. */
-extern void
-nf_ct_iterate_cleanup(struct net *net,
- int (*iter)(struct nf_conn *i, void *data),
- void *data, u32 portid, int report);
-extern void nf_conntrack_free(struct nf_conn *ct);
-extern struct nf_conn *
-nf_conntrack_alloc(struct net *net, u16 zone,
- const struct nf_conntrack_tuple *orig,
- const struct nf_conntrack_tuple *repl,
- gfp_t gfp);
+void nf_ct_iterate_cleanup(struct net *net,
+ int (*iter)(struct nf_conn *i, void *data),
+ void *data, u32 portid, int report);
+void nf_conntrack_free(struct nf_conn *ct);
+struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
+ const struct nf_conntrack_tuple *orig,
+ const struct nf_conntrack_tuple *repl,
+ gfp_t gfp);
static inline int nf_ct_is_template(const struct nf_conn *ct)
{
@@ -287,7 +278,7 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
struct kernel_param;
-extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
+int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
extern unsigned int nf_conntrack_htable_size;
extern unsigned int nf_conntrack_max;
extern unsigned int nf_conntrack_hash_rnd;
diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h
index 2bdb7a15fe06..fef44edf49c1 100644
--- a/include/net/netfilter/nf_conntrack_acct.h
+++ b/include/net/netfilter/nf_conntrack_acct.h
@@ -42,8 +42,8 @@ struct nf_conn_counter *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp)
return acct;
};
-extern unsigned int
-seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir);
+unsigned int seq_print_acct(struct seq_file *s, const struct nf_conn *ct,
+ int dir);
/* Check if connection tracking accounting is enabled */
static inline bool nf_ct_acct_enabled(struct net *net)
@@ -57,9 +57,9 @@ static inline void nf_ct_set_acct(struct net *net, bool enable)
net->ct.sysctl_acct = enable;
}
-extern int nf_conntrack_acct_pernet_init(struct net *net);
-extern void nf_conntrack_acct_pernet_fini(struct net *net);
+int nf_conntrack_acct_pernet_init(struct net *net);
+void nf_conntrack_acct_pernet_fini(struct net *net);
-extern int nf_conntrack_acct_init(void);
-extern void nf_conntrack_acct_fini(void);
+int nf_conntrack_acct_init(void);
+void nf_conntrack_acct_fini(void);
#endif /* _NF_CONNTRACK_ACCT_H */
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index fb2b6234e937..15308b8eb5b5 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -20,49 +20,42 @@
/* This header is used to share core functionality between the
standalone connection tracking module, and the compatibility layer's use
of connection tracking. */
-extern unsigned int nf_conntrack_in(struct net *net,
- u_int8_t pf,
- unsigned int hooknum,
- struct sk_buff *skb);
-
-extern int nf_conntrack_init_net(struct net *net);
-extern void nf_conntrack_cleanup_net(struct net *net);
-extern void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list);
-
-extern int nf_conntrack_proto_pernet_init(struct net *net);
-extern void nf_conntrack_proto_pernet_fini(struct net *net);
-
-extern int nf_conntrack_proto_init(void);
-extern void nf_conntrack_proto_fini(void);
-
-extern int nf_conntrack_init_start(void);
-extern void nf_conntrack_cleanup_start(void);
-
-extern void nf_conntrack_init_end(void);
-extern void nf_conntrack_cleanup_end(void);
-
-extern bool
-nf_ct_get_tuple(const struct sk_buff *skb,
- unsigned int nhoff,
- unsigned int dataoff,
- u_int16_t l3num,
- u_int8_t protonum,
- struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_l3proto *l3proto,
- const struct nf_conntrack_l4proto *l4proto);
-
-extern bool
-nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
- const struct nf_conntrack_tuple *orig,
- const struct nf_conntrack_l3proto *l3proto,
- const struct nf_conntrack_l4proto *l4proto);
+unsigned int nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
+ struct sk_buff *skb);
+
+int nf_conntrack_init_net(struct net *net);
+void nf_conntrack_cleanup_net(struct net *net);
+void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list);
+
+int nf_conntrack_proto_pernet_init(struct net *net);
+void nf_conntrack_proto_pernet_fini(struct net *net);
+
+int nf_conntrack_proto_init(void);
+void nf_conntrack_proto_fini(void);
+
+int nf_conntrack_init_start(void);
+void nf_conntrack_cleanup_start(void);
+
+void nf_conntrack_init_end(void);
+void nf_conntrack_cleanup_end(void);
+
+bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff,
+ unsigned int dataoff, u_int16_t l3num, u_int8_t protonum,
+ struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_l3proto *l3proto,
+ const struct nf_conntrack_l4proto *l4proto);
+
+bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
+ const struct nf_conntrack_tuple *orig,
+ const struct nf_conntrack_l3proto *l3proto,
+ const struct nf_conntrack_l4proto *l4proto);
/* Find a connection corresponding to a tuple. */
-extern struct nf_conntrack_tuple_hash *
+struct nf_conntrack_tuple_hash *
nf_conntrack_find_get(struct net *net, u16 zone,
const struct nf_conntrack_tuple *tuple);
-extern int __nf_conntrack_confirm(struct sk_buff *skb);
+int __nf_conntrack_confirm(struct sk_buff *skb);
/* Confirm a connection: returns NF_DROP if packet must be dropped. */
static inline int nf_conntrack_confirm(struct sk_buff *skb)
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 092dc651689f..0e3d08e4b1d3 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -68,10 +68,12 @@ struct nf_ct_event_notifier {
int (*fcn)(unsigned int events, struct nf_ct_event *item);
};
-extern int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *nb);
-extern void nf_conntrack_unregister_notifier(struct net *net, struct nf_ct_event_notifier *nb);
+int nf_conntrack_register_notifier(struct net *net,
+ struct nf_ct_event_notifier *nb);
+void nf_conntrack_unregister_notifier(struct net *net,
+ struct nf_ct_event_notifier *nb);
-extern void nf_ct_deliver_cached_events(struct nf_conn *ct);
+void nf_ct_deliver_cached_events(struct nf_conn *ct);
static inline void
nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
@@ -166,8 +168,10 @@ struct nf_exp_event_notifier {
int (*fcn)(unsigned int events, struct nf_exp_event *item);
};
-extern int nf_ct_expect_register_notifier(struct net *net, struct nf_exp_event_notifier *nb);
-extern void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_event_notifier *nb);
+int nf_ct_expect_register_notifier(struct net *net,
+ struct nf_exp_event_notifier *nb);
+void nf_ct_expect_unregister_notifier(struct net *net,
+ struct nf_exp_event_notifier *nb);
static inline void
nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
@@ -207,11 +211,11 @@ nf_ct_expect_event(enum ip_conntrack_expect_events event,
nf_ct_expect_event_report(event, exp, 0, 0);
}
-extern int nf_conntrack_ecache_pernet_init(struct net *net);
-extern void nf_conntrack_ecache_pernet_fini(struct net *net);
+int nf_conntrack_ecache_pernet_init(struct net *net);
+void nf_conntrack_ecache_pernet_fini(struct net *net);
-extern int nf_conntrack_ecache_init(void);
-extern void nf_conntrack_ecache_fini(void);
+int nf_conntrack_ecache_init(void);
+void nf_conntrack_ecache_fini(void);
#else /* CONFIG_NF_CONNTRACK_EVENTS */
static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 88a1d4060d52..86372ae0ee84 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -73,7 +73,7 @@ static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id)
((id##_TYPE *)__nf_ct_ext_find((ext), (id)))
/* Destroy all relationships */
-extern void __nf_ct_ext_destroy(struct nf_conn *ct);
+void __nf_ct_ext_destroy(struct nf_conn *ct);
static inline void nf_ct_ext_destroy(struct nf_conn *ct)
{
if (ct->ext)
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 26c4ae5bfbb8..6cf614bc0029 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -52,21 +52,24 @@ struct nf_conntrack_helper {
unsigned int queue_num; /* For user-space helpers. */
};
-extern struct nf_conntrack_helper *
-__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum);
+struct nf_conntrack_helper *__nf_conntrack_helper_find(const char *name,
+ u16 l3num, u8 protonum);
-extern struct nf_conntrack_helper *
-nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum);
+struct nf_conntrack_helper *nf_conntrack_helper_try_module_get(const char *name,
+ u16 l3num,
+ u8 protonum);
-extern int nf_conntrack_helper_register(struct nf_conntrack_helper *);
-extern void nf_conntrack_helper_unregister(struct nf_conntrack_helper *);
+int nf_conntrack_helper_register(struct nf_conntrack_helper *);
+void nf_conntrack_helper_unregister(struct nf_conntrack_helper *);
-extern struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, struct nf_conntrack_helper *helper, gfp_t gfp);
+struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct,
+ struct nf_conntrack_helper *helper,
+ gfp_t gfp);
-extern int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
- gfp_t flags);
+int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
+ gfp_t flags);
-extern void nf_ct_helper_destroy(struct nf_conn *ct);
+void nf_ct_helper_destroy(struct nf_conn *ct);
static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct)
{
@@ -82,17 +85,16 @@ static inline void *nfct_help_data(const struct nf_conn *ct)
return (void *)help->data;
}
-extern int nf_conntrack_helper_pernet_init(struct net *net);
-extern void nf_conntrack_helper_pernet_fini(struct net *net);
+int nf_conntrack_helper_pernet_init(struct net *net);
+void nf_conntrack_helper_pernet_fini(struct net *net);
-extern int nf_conntrack_helper_init(void);
-extern void nf_conntrack_helper_fini(void);
+int nf_conntrack_helper_init(void);
+void nf_conntrack_helper_fini(void);
-extern int nf_conntrack_broadcast_help(struct sk_buff *skb,
- unsigned int protoff,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int timeout);
+int nf_conntrack_broadcast_help(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int timeout);
struct nf_ct_helper_expectfn {
struct list_head head;
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index 3bb89eac3fa1..3efab704b7eb 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -77,17 +77,17 @@ struct nf_conntrack_l3proto {
extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX];
/* Protocol pernet registration. */
-extern int nf_ct_l3proto_pernet_register(struct net *net,
- struct nf_conntrack_l3proto *proto);
-extern void nf_ct_l3proto_pernet_unregister(struct net *net,
- struct nf_conntrack_l3proto *proto);
+int nf_ct_l3proto_pernet_register(struct net *net,
+ struct nf_conntrack_l3proto *proto);
+void nf_ct_l3proto_pernet_unregister(struct net *net,
+ struct nf_conntrack_l3proto *proto);
/* Protocol global registration. */
-extern int nf_ct_l3proto_register(struct nf_conntrack_l3proto *proto);
-extern void nf_ct_l3proto_unregister(struct nf_conntrack_l3proto *proto);
+int nf_ct_l3proto_register(struct nf_conntrack_l3proto *proto);
+void nf_ct_l3proto_unregister(struct nf_conntrack_l3proto *proto);
-extern struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
-extern void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p);
+struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
+void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p);
/* Existing built-in protocols */
extern struct nf_conntrack_l3proto nf_conntrack_l3proto_generic;
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index b411d7b17dec..4c8d573830b7 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -114,22 +114,22 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
#define MAX_NF_CT_PROTO 256
-extern struct nf_conntrack_l4proto *
-__nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto);
+struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u_int16_t l3proto,
+ u_int8_t l4proto);
-extern struct nf_conntrack_l4proto *
-nf_ct_l4proto_find_get(u_int16_t l3proto, u_int8_t l4proto);
-extern void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p);
+struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u_int16_t l3proto,
+ u_int8_t l4proto);
+void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p);
/* Protocol pernet registration. */
-extern int nf_ct_l4proto_pernet_register(struct net *net,
- struct nf_conntrack_l4proto *proto);
-extern void nf_ct_l4proto_pernet_unregister(struct net *net,
- struct nf_conntrack_l4proto *proto);
+int nf_ct_l4proto_pernet_register(struct net *net,
+ struct nf_conntrack_l4proto *proto);
+void nf_ct_l4proto_pernet_unregister(struct net *net,
+ struct nf_conntrack_l4proto *proto);
/* Protocol global registration. */
-extern int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto);
-extern void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto);
+int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto);
+void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto);
static inline void nf_ct_kfree_compat_sysctl_table(struct nf_proto_net *pn)
{
@@ -140,11 +140,11 @@ static inline void nf_ct_kfree_compat_sysctl_table(struct nf_proto_net *pn)
}
/* Generic netlink helpers */
-extern int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
- const struct nf_conntrack_tuple *tuple);
-extern int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
- struct nf_conntrack_tuple *t);
-extern int nf_ct_port_nlattr_tuple_size(void);
+int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
+ const struct nf_conntrack_tuple *tuple);
+int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
+ struct nf_conntrack_tuple *t);
+int nf_ct_port_nlattr_tuple_size(void);
extern const struct nla_policy nf_ct_port_nla_policy[];
#ifdef CONFIG_SYSCTL
diff --git a/include/net/netfilter/nf_conntrack_seqadj.h b/include/net/netfilter/nf_conntrack_seqadj.h
index f6177a5fe0ca..4b3362991a25 100644
--- a/include/net/netfilter/nf_conntrack_seqadj.h
+++ b/include/net/netfilter/nf_conntrack_seqadj.h
@@ -30,22 +30,18 @@ static inline struct nf_conn_seqadj *nfct_seqadj_ext_add(struct nf_conn *ct)
return nf_ct_ext_add(ct, NF_CT_EXT_SEQADJ, GFP_ATOMIC);
}
-extern int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- s32 off);
-extern int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- __be32 seq, s32 off);
-extern void nf_ct_tcp_seqadj_set(struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- s32 off);
-
-extern int nf_ct_seq_adjust(struct sk_buff *skb,
- struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- unsigned int protoff);
-extern s32 nf_ct_seq_offset(const struct nf_conn *ct, enum ip_conntrack_dir,
- u32 seq);
-
-extern int nf_conntrack_seqadj_init(void);
-extern void nf_conntrack_seqadj_fini(void);
+int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ s32 off);
+int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ __be32 seq, s32 off);
+void nf_ct_tcp_seqadj_set(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo, s32 off);
+
+int nf_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo, unsigned int protoff);
+s32 nf_ct_seq_offset(const struct nf_conn *ct, enum ip_conntrack_dir, u32 seq);
+
+int nf_conntrack_seqadj_init(void);
+void nf_conntrack_seqadj_fini(void);
#endif /* _NF_CONNTRACK_SEQADJ_H */
diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h
index f572f313d6f1..6793614e6502 100644
--- a/include/net/netfilter/nf_conntrack_synproxy.h
+++ b/include/net/netfilter/nf_conntrack_synproxy.h
@@ -56,22 +56,20 @@ struct synproxy_options {
struct tcphdr;
struct xt_synproxy_info;
-extern bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
- const struct tcphdr *th,
- struct synproxy_options *opts);
-extern unsigned int synproxy_options_size(const struct synproxy_options *opts);
-extern void synproxy_build_options(struct tcphdr *th,
- const struct synproxy_options *opts);
+bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
+ const struct tcphdr *th,
+ struct synproxy_options *opts);
+unsigned int synproxy_options_size(const struct synproxy_options *opts);
+void synproxy_build_options(struct tcphdr *th,
+ const struct synproxy_options *opts);
-extern void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
- struct synproxy_options *opts);
-extern void synproxy_check_timestamp_cookie(struct synproxy_options *opts);
+void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
+ struct synproxy_options *opts);
+void synproxy_check_timestamp_cookie(struct synproxy_options *opts);
-extern unsigned int synproxy_tstamp_adjust(struct sk_buff *skb,
- unsigned int protoff,
- struct tcphdr *th,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- const struct nf_conn_synproxy *synproxy);
+unsigned int synproxy_tstamp_adjust(struct sk_buff *skb, unsigned int protoff,
+ struct tcphdr *th, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_conn_synproxy *synproxy);
#endif /* _NF_CONNTRACK_SYNPROXY_H */
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index d23aceb16d94..62308713dd7f 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -76,8 +76,8 @@ nf_ct_timeout_lookup(struct net *net, struct nf_conn *ct,
}
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-extern int nf_conntrack_timeout_init(void);
-extern void nf_conntrack_timeout_fini(void);
+int nf_conntrack_timeout_init(void);
+void nf_conntrack_timeout_fini(void);
#else
static inline int nf_conntrack_timeout_init(void)
{
diff --git a/include/net/netfilter/nf_conntrack_timestamp.h b/include/net/netfilter/nf_conntrack_timestamp.h
index b00461413efd..300ae2209f25 100644
--- a/include/net/netfilter/nf_conntrack_timestamp.h
+++ b/include/net/netfilter/nf_conntrack_timestamp.h
@@ -48,11 +48,11 @@ static inline void nf_ct_set_tstamp(struct net *net, bool enable)
}
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
-extern int nf_conntrack_tstamp_pernet_init(struct net *net);
-extern void nf_conntrack_tstamp_pernet_fini(struct net *net);
+int nf_conntrack_tstamp_pernet_init(struct net *net);
+void nf_conntrack_tstamp_pernet_fini(struct net *net);
-extern int nf_conntrack_tstamp_init(void);
-extern void nf_conntrack_tstamp_fini(void);
+int nf_conntrack_tstamp_init(void);
+void nf_conntrack_tstamp_fini(void);
#else
static inline int nf_conntrack_tstamp_pernet_init(struct net *net)
{
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index 59a192420053..07eaaf604092 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -41,13 +41,16 @@ struct nf_conn_nat {
};
/* Set up the info structure to map into this range. */
-extern unsigned int nf_nat_setup_info(struct nf_conn *ct,
- const struct nf_nat_range *range,
- enum nf_nat_manip_type maniptype);
+unsigned int nf_nat_setup_info(struct nf_conn *ct,
+ const struct nf_nat_range *range,
+ enum nf_nat_manip_type maniptype);
+
+extern unsigned int nf_nat_alloc_null_binding(struct nf_conn *ct,
+ unsigned int hooknum);
/* Is this tuple already taken? (not by us)*/
-extern int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
- const struct nf_conn *ignored_conntrack);
+int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
+ const struct nf_conn *ignored_conntrack);
static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct)
{
diff --git a/include/net/netfilter/nf_nat_core.h b/include/net/netfilter/nf_nat_core.h
index 972e1e47ec79..fbfd1ba4254e 100644
--- a/include/net/netfilter/nf_nat_core.h
+++ b/include/net/netfilter/nf_nat_core.h
@@ -7,12 +7,10 @@
/* This header used to share core functionality between the standalone
NAT module, and the compatibility layer's use of NAT for masquerading. */
-extern unsigned int nf_nat_packet(struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int hooknum,
- struct sk_buff *skb);
+unsigned int nf_nat_packet(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ unsigned int hooknum, struct sk_buff *skb);
-extern int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family);
+int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family);
static inline int nf_nat_initialized(struct nf_conn *ct,
enum nf_nat_manip_type manip)
diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h
index 404324d1d0c4..01bcc6bfbcc9 100644
--- a/include/net/netfilter/nf_nat_helper.h
+++ b/include/net/netfilter/nf_nat_helper.h
@@ -7,14 +7,11 @@
struct sk_buff;
/* These return true or false. */
-extern int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- unsigned int match_offset,
- unsigned int match_len,
- const char *rep_buffer,
- unsigned int rep_len, bool adjust);
+int __nf_nat_mangle_tcp_packet(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff, unsigned int match_offset,
+ unsigned int match_len, const char *rep_buffer,
+ unsigned int rep_len, bool adjust);
static inline int nf_nat_mangle_tcp_packet(struct sk_buff *skb,
struct nf_conn *ct,
@@ -30,18 +27,14 @@ static inline int nf_nat_mangle_tcp_packet(struct sk_buff *skb,
rep_buffer, rep_len, true);
}
-extern int nf_nat_mangle_udp_packet(struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- unsigned int match_offset,
- unsigned int match_len,
- const char *rep_buffer,
- unsigned int rep_len);
+int nf_nat_mangle_udp_packet(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff, unsigned int match_offset,
+ unsigned int match_len, const char *rep_buffer,
+ unsigned int rep_len);
/* Setup NAT on this expected conntrack so it follows master, but goes
* to port ct->master->saved_proto. */
-extern void nf_nat_follow_master(struct nf_conn *ct,
- struct nf_conntrack_expect *this);
+void nf_nat_follow_master(struct nf_conn *ct, struct nf_conntrack_expect *this);
#endif
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
index bd3b97e02c82..5a2919b2e09a 100644
--- a/include/net/netfilter/nf_nat_l3proto.h
+++ b/include/net/netfilter/nf_nat_l3proto.h
@@ -35,18 +35,15 @@ struct nf_nat_l3proto {
struct nf_nat_range *range);
};
-extern int nf_nat_l3proto_register(const struct nf_nat_l3proto *);
-extern void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *);
-extern const struct nf_nat_l3proto *__nf_nat_l3proto_find(u8 l3proto);
-
-extern int nf_nat_icmp_reply_translation(struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int hooknum);
-extern int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int hooknum,
- unsigned int hdrlen);
+int nf_nat_l3proto_register(const struct nf_nat_l3proto *);
+void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *);
+const struct nf_nat_l3proto *__nf_nat_l3proto_find(u8 l3proto);
+
+int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int hooknum);
+int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int hooknum, unsigned int hdrlen);
#endif /* _NF_NAT_L3PROTO_H */
diff --git a/include/net/netfilter/nf_nat_l4proto.h b/include/net/netfilter/nf_nat_l4proto.h
index 24feb68d1bcc..12f4cc841b6e 100644
--- a/include/net/netfilter/nf_nat_l4proto.h
+++ b/include/net/netfilter/nf_nat_l4proto.h
@@ -42,10 +42,11 @@ struct nf_nat_l4proto {
};
/* Protocol registration. */
-extern int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto);
-extern void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto);
+int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto);
+void nf_nat_l4proto_unregister(u8 l3proto,
+ const struct nf_nat_l4proto *l4proto);
-extern const struct nf_nat_l4proto *__nf_nat_l4proto_find(u8 l3proto, u8 l4proto);
+const struct nf_nat_l4proto *__nf_nat_l4proto_find(u8 l3proto, u8 l4proto);
/* Built-in protocols. */
extern const struct nf_nat_l4proto nf_nat_l4proto_tcp;
@@ -54,19 +55,18 @@ extern const struct nf_nat_l4proto nf_nat_l4proto_icmp;
extern const struct nf_nat_l4proto nf_nat_l4proto_icmpv6;
extern const struct nf_nat_l4proto nf_nat_l4proto_unknown;
-extern bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype,
- const union nf_conntrack_man_proto *min,
- const union nf_conntrack_man_proto *max);
+bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
+ enum nf_nat_manip_type maniptype,
+ const union nf_conntrack_man_proto *min,
+ const union nf_conntrack_man_proto *max);
-extern void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
- struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range *range,
- enum nf_nat_manip_type maniptype,
- const struct nf_conn *ct,
- u16 *rover);
+void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
+ struct nf_conntrack_tuple *tuple,
+ const struct nf_nat_range *range,
+ enum nf_nat_manip_type maniptype,
+ const struct nf_conn *ct, u16 *rover);
-extern int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
- struct nf_nat_range *range);
+int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
+ struct nf_nat_range *range);
#endif /*_NF_NAT_L4PROTO_H*/
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index aaba4bbcdda0..c1d5b3e34a21 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -28,7 +28,7 @@ struct nf_queue_handler {
void nf_register_queue_handler(const struct nf_queue_handler *qh);
void nf_unregister_queue_handler(void);
-extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
+void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
new file mode 100644
index 000000000000..5a91abfc0c30
--- /dev/null
+++ b/include/net/netfilter/nf_tables.h
@@ -0,0 +1,519 @@
+#ifndef _NET_NF_TABLES_H
+#define _NET_NF_TABLES_H
+
+#include <linux/list.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netlink.h>
+
+#define NFT_JUMP_STACK_SIZE 16
+
+struct nft_pktinfo {
+ struct sk_buff *skb;
+ const struct net_device *in;
+ const struct net_device *out;
+ u8 hooknum;
+ u8 nhoff;
+ u8 thoff;
+ /* for x_tables compatibility */
+ struct xt_action_param xt;
+};
+
+static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
+ const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out)
+{
+ pkt->skb = skb;
+ pkt->in = pkt->xt.in = in;
+ pkt->out = pkt->xt.out = out;
+ pkt->hooknum = pkt->xt.hooknum = ops->hooknum;
+ pkt->xt.family = ops->pf;
+}
+
+struct nft_data {
+ union {
+ u32 data[4];
+ struct {
+ u32 verdict;
+ struct nft_chain *chain;
+ };
+ };
+} __attribute__((aligned(__alignof__(u64))));
+
+static inline int nft_data_cmp(const struct nft_data *d1,
+ const struct nft_data *d2,
+ unsigned int len)
+{
+ return memcmp(d1->data, d2->data, len);
+}
+
+static inline void nft_data_copy(struct nft_data *dst,
+ const struct nft_data *src)
+{
+ BUILD_BUG_ON(__alignof__(*dst) != __alignof__(u64));
+ *(u64 *)&dst->data[0] = *(u64 *)&src->data[0];
+ *(u64 *)&dst->data[2] = *(u64 *)&src->data[2];
+}
+
+static inline void nft_data_debug(const struct nft_data *data)
+{
+ pr_debug("data[0]=%x data[1]=%x data[2]=%x data[3]=%x\n",
+ data->data[0], data->data[1],
+ data->data[2], data->data[3]);
+}
+
+/**
+ * struct nft_ctx - nf_tables rule/set context
+ *
+ * @net: net namespace
+ * @skb: netlink skb
+ * @nlh: netlink message header
+ * @afi: address family info
+ * @table: the table the chain is contained in
+ * @chain: the chain the rule is contained in
+ * @nla: netlink attributes
+ */
+struct nft_ctx {
+ struct net *net;
+ const struct sk_buff *skb;
+ const struct nlmsghdr *nlh;
+ const struct nft_af_info *afi;
+ const struct nft_table *table;
+ const struct nft_chain *chain;
+ const struct nlattr * const *nla;
+};
+
+struct nft_data_desc {
+ enum nft_data_types type;
+ unsigned int len;
+};
+
+int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
+ struct nft_data_desc *desc, const struct nlattr *nla);
+void nft_data_uninit(const struct nft_data *data, enum nft_data_types type);
+int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
+ enum nft_data_types type, unsigned int len);
+
+static inline enum nft_data_types nft_dreg_to_type(enum nft_registers reg)
+{
+ return reg == NFT_REG_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE;
+}
+
+static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
+{
+ return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1;
+}
+
+int nft_validate_input_register(enum nft_registers reg);
+int nft_validate_output_register(enum nft_registers reg);
+int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg,
+ const struct nft_data *data,
+ enum nft_data_types type);
+
+/**
+ * struct nft_set_elem - generic representation of set elements
+ *
+ * @cookie: implementation specific element cookie
+ * @key: element key
+ * @data: element data (maps only)
+ * @flags: element flags (end of interval)
+ *
+ * The cookie can be used to store a handle to the element for subsequent
+ * removal.
+ */
+struct nft_set_elem {
+ void *cookie;
+ struct nft_data key;
+ struct nft_data data;
+ u32 flags;
+};
+
+struct nft_set;
+struct nft_set_iter {
+ unsigned int count;
+ unsigned int skip;
+ int err;
+ int (*fn)(const struct nft_ctx *ctx,
+ const struct nft_set *set,
+ const struct nft_set_iter *iter,
+ const struct nft_set_elem *elem);
+};
+
+/**
+ * struct nft_set_ops - nf_tables set operations
+ *
+ * @lookup: look up an element within the set
+ * @insert: insert new element into set
+ * @remove: remove element from set
+ * @walk: iterate over all set elemeennts
+ * @privsize: function to return size of set private data
+ * @init: initialize private data of new set instance
+ * @destroy: destroy private data of set instance
+ * @list: nf_tables_set_ops list node
+ * @owner: module reference
+ * @features: features supported by the implementation
+ */
+struct nft_set_ops {
+ bool (*lookup)(const struct nft_set *set,
+ const struct nft_data *key,
+ struct nft_data *data);
+ int (*get)(const struct nft_set *set,
+ struct nft_set_elem *elem);
+ int (*insert)(const struct nft_set *set,
+ const struct nft_set_elem *elem);
+ void (*remove)(const struct nft_set *set,
+ const struct nft_set_elem *elem);
+ void (*walk)(const struct nft_ctx *ctx,
+ const struct nft_set *set,
+ struct nft_set_iter *iter);
+
+ unsigned int (*privsize)(const struct nlattr * const nla[]);
+ int (*init)(const struct nft_set *set,
+ const struct nlattr * const nla[]);
+ void (*destroy)(const struct nft_set *set);
+
+ struct list_head list;
+ struct module *owner;
+ u32 features;
+};
+
+int nft_register_set(struct nft_set_ops *ops);
+void nft_unregister_set(struct nft_set_ops *ops);
+
+/**
+ * struct nft_set - nf_tables set instance
+ *
+ * @list: table set list node
+ * @bindings: list of set bindings
+ * @name: name of the set
+ * @ktype: key type (numeric type defined by userspace, not used in the kernel)
+ * @dtype: data type (verdict or numeric type defined by userspace)
+ * @ops: set ops
+ * @flags: set flags
+ * @klen: key length
+ * @dlen: data length
+ * @data: private set data
+ */
+struct nft_set {
+ struct list_head list;
+ struct list_head bindings;
+ char name[IFNAMSIZ];
+ u32 ktype;
+ u32 dtype;
+ /* runtime data below here */
+ const struct nft_set_ops *ops ____cacheline_aligned;
+ u16 flags;
+ u8 klen;
+ u8 dlen;
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(u64))));
+};
+
+static inline void *nft_set_priv(const struct nft_set *set)
+{
+ return (void *)set->data;
+}
+
+struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
+ const struct nlattr *nla);
+
+/**
+ * struct nft_set_binding - nf_tables set binding
+ *
+ * @list: set bindings list node
+ * @chain: chain containing the rule bound to the set
+ *
+ * A set binding contains all information necessary for validation
+ * of new elements added to a bound set.
+ */
+struct nft_set_binding {
+ struct list_head list;
+ const struct nft_chain *chain;
+};
+
+int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_set_binding *binding);
+void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_set_binding *binding);
+
+
+/**
+ * struct nft_expr_type - nf_tables expression type
+ *
+ * @select_ops: function to select nft_expr_ops
+ * @ops: default ops, used when no select_ops functions is present
+ * @list: used internally
+ * @name: Identifier
+ * @owner: module reference
+ * @policy: netlink attribute policy
+ * @maxattr: highest netlink attribute number
+ */
+struct nft_expr_type {
+ const struct nft_expr_ops *(*select_ops)(const struct nft_ctx *,
+ const struct nlattr * const tb[]);
+ const struct nft_expr_ops *ops;
+ struct list_head list;
+ const char *name;
+ struct module *owner;
+ const struct nla_policy *policy;
+ unsigned int maxattr;
+};
+
+/**
+ * struct nft_expr_ops - nf_tables expression operations
+ *
+ * @eval: Expression evaluation function
+ * @size: full expression size, including private data size
+ * @init: initialization function
+ * @destroy: destruction function
+ * @dump: function to dump parameters
+ * @type: expression type
+ * @validate: validate expression, called during loop detection
+ * @data: extra data to attach to this expression operation
+ */
+struct nft_expr;
+struct nft_expr_ops {
+ void (*eval)(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt);
+ unsigned int size;
+
+ int (*init)(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[]);
+ void (*destroy)(const struct nft_expr *expr);
+ int (*dump)(struct sk_buff *skb,
+ const struct nft_expr *expr);
+ int (*validate)(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data);
+ const struct nft_expr_type *type;
+ void *data;
+};
+
+#define NFT_EXPR_MAXATTR 16
+#define NFT_EXPR_SIZE(size) (sizeof(struct nft_expr) + \
+ ALIGN(size, __alignof__(struct nft_expr)))
+
+/**
+ * struct nft_expr - nf_tables expression
+ *
+ * @ops: expression ops
+ * @data: expression private data
+ */
+struct nft_expr {
+ const struct nft_expr_ops *ops;
+ unsigned char data[];
+};
+
+static inline void *nft_expr_priv(const struct nft_expr *expr)
+{
+ return (void *)expr->data;
+}
+
+/**
+ * struct nft_rule - nf_tables rule
+ *
+ * @list: used internally
+ * @rcu_head: used internally for rcu
+ * @handle: rule handle
+ * @genmask: generation mask
+ * @dlen: length of expression data
+ * @data: expression data
+ */
+struct nft_rule {
+ struct list_head list;
+ struct rcu_head rcu_head;
+ u64 handle:46,
+ genmask:2,
+ dlen:16;
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(struct nft_expr))));
+};
+
+/**
+ * struct nft_rule_trans - nf_tables rule update in transaction
+ *
+ * @list: used internally
+ * @rule: rule that needs to be updated
+ * @chain: chain that this rule belongs to
+ * @table: table for which this chain applies
+ * @nlh: netlink header of the message that contain this update
+ * @family: family expressesed as AF_*
+ */
+struct nft_rule_trans {
+ struct list_head list;
+ struct nft_rule *rule;
+ const struct nft_chain *chain;
+ const struct nft_table *table;
+ const struct nlmsghdr *nlh;
+ u8 family;
+};
+
+static inline struct nft_expr *nft_expr_first(const struct nft_rule *rule)
+{
+ return (struct nft_expr *)&rule->data[0];
+}
+
+static inline struct nft_expr *nft_expr_next(const struct nft_expr *expr)
+{
+ return ((void *)expr) + expr->ops->size;
+}
+
+static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule)
+{
+ return (struct nft_expr *)&rule->data[rule->dlen];
+}
+
+/*
+ * The last pointer isn't really necessary, but the compiler isn't able to
+ * determine that the result of nft_expr_last() is always the same since it
+ * can't assume that the dlen value wasn't changed within calls in the loop.
+ */
+#define nft_rule_for_each_expr(expr, last, rule) \
+ for ((expr) = nft_expr_first(rule), (last) = nft_expr_last(rule); \
+ (expr) != (last); \
+ (expr) = nft_expr_next(expr))
+
+enum nft_chain_flags {
+ NFT_BASE_CHAIN = 0x1,
+};
+
+/**
+ * struct nft_chain - nf_tables chain
+ *
+ * @rules: list of rules in the chain
+ * @list: used internally
+ * @rcu_head: used internally
+ * @net: net namespace that this chain belongs to
+ * @table: table that this chain belongs to
+ * @handle: chain handle
+ * @flags: bitmask of enum nft_chain_flags
+ * @use: number of jump references to this chain
+ * @level: length of longest path to this chain
+ * @name: name of the chain
+ */
+struct nft_chain {
+ struct list_head rules;
+ struct list_head list;
+ struct rcu_head rcu_head;
+ struct net *net;
+ struct nft_table *table;
+ u64 handle;
+ u8 flags;
+ u16 use;
+ u16 level;
+ char name[NFT_CHAIN_MAXNAMELEN];
+};
+
+enum nft_chain_type {
+ NFT_CHAIN_T_DEFAULT = 0,
+ NFT_CHAIN_T_ROUTE,
+ NFT_CHAIN_T_NAT,
+ NFT_CHAIN_T_MAX
+};
+
+struct nft_stats {
+ u64 bytes;
+ u64 pkts;
+};
+
+/**
+ * struct nft_base_chain - nf_tables base chain
+ *
+ * @ops: netfilter hook ops
+ * @type: chain type
+ * @policy: default policy
+ * @stats: per-cpu chain stats
+ * @chain: the chain
+ */
+struct nft_base_chain {
+ struct nf_hook_ops ops;
+ enum nft_chain_type type;
+ u8 policy;
+ struct nft_stats __percpu *stats;
+ struct nft_chain chain;
+};
+
+static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain)
+{
+ return container_of(chain, struct nft_base_chain, chain);
+}
+
+unsigned int nft_do_chain_pktinfo(struct nft_pktinfo *pkt,
+ const struct nf_hook_ops *ops);
+
+/**
+ * struct nft_table - nf_tables table
+ *
+ * @list: used internally
+ * @chains: chains in the table
+ * @sets: sets in the table
+ * @hgenerator: handle generator state
+ * @use: number of chain references to this table
+ * @flags: table flag (see enum nft_table_flags)
+ * @name: name of the table
+ */
+struct nft_table {
+ struct list_head list;
+ struct list_head chains;
+ struct list_head sets;
+ u64 hgenerator;
+ u32 use;
+ u16 flags;
+ char name[];
+};
+
+/**
+ * struct nft_af_info - nf_tables address family info
+ *
+ * @list: used internally
+ * @family: address family
+ * @nhooks: number of hooks in this family
+ * @owner: module owner
+ * @tables: used internally
+ * @hooks: hookfn overrides for packet validation
+ */
+struct nft_af_info {
+ struct list_head list;
+ int family;
+ unsigned int nhooks;
+ struct module *owner;
+ struct list_head tables;
+ nf_hookfn *hooks[NF_MAX_HOOKS];
+};
+
+int nft_register_afinfo(struct net *, struct nft_af_info *);
+void nft_unregister_afinfo(struct nft_af_info *);
+
+struct nf_chain_type {
+ unsigned int hook_mask;
+ const char *name;
+ enum nft_chain_type type;
+ nf_hookfn *fn[NF_MAX_HOOKS];
+ struct module *me;
+ int family;
+};
+
+int nft_register_chain_type(struct nf_chain_type *);
+void nft_unregister_chain_type(struct nf_chain_type *);
+
+int nft_register_expr(struct nft_expr_type *);
+void nft_unregister_expr(struct nft_expr_type *);
+
+#define MODULE_ALIAS_NFT_FAMILY(family) \
+ MODULE_ALIAS("nft-afinfo-" __stringify(family))
+
+#define MODULE_ALIAS_NFT_CHAIN(family, name) \
+ MODULE_ALIAS("nft-chain-" __stringify(family) "-" name)
+
+#define MODULE_ALIAS_NFT_EXPR(name) \
+ MODULE_ALIAS("nft-expr-" name)
+
+#define MODULE_ALIAS_NFT_SET() \
+ MODULE_ALIAS("nft-set")
+
+#endif /* _NET_NF_TABLES_H */
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
new file mode 100644
index 000000000000..cf2b7ae2b9d8
--- /dev/null
+++ b/include/net/netfilter/nf_tables_core.h
@@ -0,0 +1,42 @@
+#ifndef _NET_NF_TABLES_CORE_H
+#define _NET_NF_TABLES_CORE_H
+
+int nf_tables_core_module_init(void);
+void nf_tables_core_module_exit(void);
+
+int nft_immediate_module_init(void);
+void nft_immediate_module_exit(void);
+
+struct nft_cmp_fast_expr {
+ u32 data;
+ enum nft_registers sreg:8;
+ u8 len;
+};
+
+extern const struct nft_expr_ops nft_cmp_fast_ops;
+
+int nft_cmp_module_init(void);
+void nft_cmp_module_exit(void);
+
+int nft_lookup_module_init(void);
+void nft_lookup_module_exit(void);
+
+int nft_bitwise_module_init(void);
+void nft_bitwise_module_exit(void);
+
+int nft_byteorder_module_init(void);
+void nft_byteorder_module_exit(void);
+
+struct nft_payload {
+ enum nft_payload_bases base:8;
+ u8 offset;
+ u8 len;
+ enum nft_registers dreg:8;
+};
+
+extern const struct nft_expr_ops nft_payload_fast_ops;
+
+int nft_payload_module_init(void);
+void nft_payload_module_exit(void);
+
+#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
new file mode 100644
index 000000000000..1be1c2c197ee
--- /dev/null
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -0,0 +1,23 @@
+#ifndef _NF_TABLES_IPV4_H_
+#define _NF_TABLES_IPV4_H_
+
+#include <net/netfilter/nf_tables.h>
+#include <net/ip.h>
+
+static inline void
+nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
+ const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out)
+{
+ struct iphdr *ip;
+
+ nft_set_pktinfo(pkt, ops, skb, in, out);
+
+ pkt->xt.thoff = ip_hdrlen(pkt->skb);
+ ip = ip_hdr(pkt->skb);
+ pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
+}
+
+#endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
new file mode 100644
index 000000000000..4a9b88a65963
--- /dev/null
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -0,0 +1,30 @@
+#ifndef _NF_TABLES_IPV6_H_
+#define _NF_TABLES_IPV6_H_
+
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <net/ipv6.h>
+
+static inline int
+nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
+ const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out)
+{
+ int protohdr, thoff = 0;
+ unsigned short frag_off;
+
+ nft_set_pktinfo(pkt, ops, skb, in, out);
+
+ protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
+ /* If malformed, drop it */
+ if (protohdr < 0)
+ return -1;
+
+ pkt->xt.thoff = thoff;
+ pkt->xt.fragoff = frag_off;
+
+ return 0;
+}
+
+#endif
diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h
index 495c71f66e7e..79f45e19f31e 100644
--- a/include/net/netfilter/xt_rateest.h
+++ b/include/net/netfilter/xt_rateest.h
@@ -16,7 +16,7 @@ struct xt_rateest {
struct rcu_head rcu;
};
-extern struct xt_rateest *xt_rateest_lookup(const char *name);
-extern void xt_rateest_put(struct xt_rateest *est);
+struct xt_rateest *xt_rateest_lookup(const char *name);
+void xt_rateest_put(struct xt_rateest *est);
#endif /* _XT_RATEEST_H */
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 9690b0f6698a..2b47eaadba8f 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -225,44 +225,31 @@ struct nl_info {
u32 portid;
};
-extern int netlink_rcv_skb(struct sk_buff *skb,
- int (*cb)(struct sk_buff *,
- struct nlmsghdr *));
-extern int nlmsg_notify(struct sock *sk, struct sk_buff *skb,
- u32 portid, unsigned int group, int report,
- gfp_t flags);
-
-extern int nla_validate(const struct nlattr *head,
- int len, int maxtype,
- const struct nla_policy *policy);
-extern int nla_parse(struct nlattr **tb, int maxtype,
- const struct nlattr *head, int len,
- const struct nla_policy *policy);
-extern int nla_policy_len(const struct nla_policy *, int);
-extern struct nlattr * nla_find(const struct nlattr *head,
- int len, int attrtype);
-extern size_t nla_strlcpy(char *dst, const struct nlattr *nla,
- size_t dstsize);
-extern int nla_memcpy(void *dest, const struct nlattr *src, int count);
-extern int nla_memcmp(const struct nlattr *nla, const void *data,
- size_t size);
-extern int nla_strcmp(const struct nlattr *nla, const char *str);
-extern struct nlattr * __nla_reserve(struct sk_buff *skb, int attrtype,
- int attrlen);
-extern void * __nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
-extern struct nlattr * nla_reserve(struct sk_buff *skb, int attrtype,
- int attrlen);
-extern void * nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
-extern void __nla_put(struct sk_buff *skb, int attrtype,
- int attrlen, const void *data);
-extern void __nla_put_nohdr(struct sk_buff *skb, int attrlen,
- const void *data);
-extern int nla_put(struct sk_buff *skb, int attrtype,
- int attrlen, const void *data);
-extern int nla_put_nohdr(struct sk_buff *skb, int attrlen,
- const void *data);
-extern int nla_append(struct sk_buff *skb, int attrlen,
- const void *data);
+int netlink_rcv_skb(struct sk_buff *skb,
+ int (*cb)(struct sk_buff *, struct nlmsghdr *));
+int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
+ unsigned int group, int report, gfp_t flags);
+
+int nla_validate(const struct nlattr *head, int len, int maxtype,
+ const struct nla_policy *policy);
+int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
+ int len, const struct nla_policy *policy);
+int nla_policy_len(const struct nla_policy *, int);
+struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype);
+size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize);
+int nla_memcpy(void *dest, const struct nlattr *src, int count);
+int nla_memcmp(const struct nlattr *nla, const void *data, size_t size);
+int nla_strcmp(const struct nlattr *nla, const char *str);
+struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
+void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
+struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
+void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
+void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
+ const void *data);
+void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
+int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
+int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
+int nla_append(struct sk_buff *skb, int attrlen, const void *data);
/**************************************************************************
* Netlink Messages
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index bf2ec2202c56..ee520cba2ec2 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -15,6 +15,10 @@ struct fib_rules_ops;
struct hlist_head;
struct fib_table;
struct sock;
+struct local_ports {
+ seqlock_t lock;
+ int range[2];
+};
struct netns_ipv4 {
#ifdef CONFIG_SYSCTL
@@ -62,10 +66,11 @@ struct netns_ipv4 {
int sysctl_icmp_ratemask;
int sysctl_icmp_errors_use_inbound_ifaddr;
+ struct local_ports sysctl_local_ports;
+
int sysctl_tcp_ecn;
kgid_t sysctl_ping_group_range[2];
- long sysctl_tcp_mem[3];
atomic_t dev_addr_genid;
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
new file mode 100644
index 000000000000..15d056d534e3
--- /dev/null
+++ b/include/net/netns/nftables.h
@@ -0,0 +1,19 @@
+#ifndef _NETNS_NFTABLES_H_
+#define _NETNS_NFTABLES_H_
+
+#include <linux/list.h>
+
+struct nft_af_info;
+
+struct netns_nftables {
+ struct list_head af_info;
+ struct list_head commit_list;
+ struct nft_af_info *ipv4;
+ struct nft_af_info *ipv6;
+ struct nft_af_info *arp;
+ struct nft_af_info *bridge;
+ u8 gencursor;
+ u8 genctr;
+};
+
+#endif
diff --git a/include/net/netrom.h b/include/net/netrom.h
index 121dcf854db5..110350aca3df 100644
--- a/include/net/netrom.h
+++ b/include/net/netrom.h
@@ -183,51 +183,50 @@ extern int sysctl_netrom_routing_control;
extern int sysctl_netrom_link_fails_count;
extern int sysctl_netrom_reset_circuit;
-extern int nr_rx_frame(struct sk_buff *, struct net_device *);
-extern void nr_destroy_socket(struct sock *);
+int nr_rx_frame(struct sk_buff *, struct net_device *);
+void nr_destroy_socket(struct sock *);
/* nr_dev.c */
-extern int nr_rx_ip(struct sk_buff *, struct net_device *);
-extern void nr_setup(struct net_device *);
+int nr_rx_ip(struct sk_buff *, struct net_device *);
+void nr_setup(struct net_device *);
/* nr_in.c */
-extern int nr_process_rx_frame(struct sock *, struct sk_buff *);
+int nr_process_rx_frame(struct sock *, struct sk_buff *);
/* nr_loopback.c */
-extern void nr_loopback_init(void);
-extern void nr_loopback_clear(void);
-extern int nr_loopback_queue(struct sk_buff *);
+void nr_loopback_init(void);
+void nr_loopback_clear(void);
+int nr_loopback_queue(struct sk_buff *);
/* nr_out.c */
-extern void nr_output(struct sock *, struct sk_buff *);
-extern void nr_send_nak_frame(struct sock *);
-extern void nr_kick(struct sock *);
-extern void nr_transmit_buffer(struct sock *, struct sk_buff *);
-extern void nr_establish_data_link(struct sock *);
-extern void nr_enquiry_response(struct sock *);
-extern void nr_check_iframes_acked(struct sock *, unsigned short);
+void nr_output(struct sock *, struct sk_buff *);
+void nr_send_nak_frame(struct sock *);
+void nr_kick(struct sock *);
+void nr_transmit_buffer(struct sock *, struct sk_buff *);
+void nr_establish_data_link(struct sock *);
+void nr_enquiry_response(struct sock *);
+void nr_check_iframes_acked(struct sock *, unsigned short);
/* nr_route.c */
-extern void nr_rt_device_down(struct net_device *);
-extern struct net_device *nr_dev_first(void);
-extern struct net_device *nr_dev_get(ax25_address *);
-extern int nr_rt_ioctl(unsigned int, void __user *);
-extern void nr_link_failed(ax25_cb *, int);
-extern int nr_route_frame(struct sk_buff *, ax25_cb *);
+void nr_rt_device_down(struct net_device *);
+struct net_device *nr_dev_first(void);
+struct net_device *nr_dev_get(ax25_address *);
+int nr_rt_ioctl(unsigned int, void __user *);
+void nr_link_failed(ax25_cb *, int);
+int nr_route_frame(struct sk_buff *, ax25_cb *);
extern const struct file_operations nr_nodes_fops;
extern const struct file_operations nr_neigh_fops;
-extern void nr_rt_free(void);
+void nr_rt_free(void);
/* nr_subr.c */
-extern void nr_clear_queues(struct sock *);
-extern void nr_frames_acked(struct sock *, unsigned short);
-extern void nr_requeue_frames(struct sock *);
-extern int nr_validate_nr(struct sock *, unsigned short);
-extern int nr_in_rx_window(struct sock *, unsigned short);
-extern void nr_write_internal(struct sock *, int);
+void nr_clear_queues(struct sock *);
+void nr_frames_acked(struct sock *, unsigned short);
+void nr_requeue_frames(struct sock *);
+int nr_validate_nr(struct sock *, unsigned short);
+int nr_in_rx_window(struct sock *, unsigned short);
+void nr_write_internal(struct sock *, int);
-extern void __nr_transmit_reply(struct sk_buff *skb, int mine,
- unsigned char cmdflags);
+void __nr_transmit_reply(struct sk_buff *skb, int mine, unsigned char cmdflags);
/*
* This routine is called when a Connect Acknowledge with the Choke Flag
@@ -247,24 +246,24 @@ do { \
__nr_transmit_reply((skb), (mine), NR_RESET); \
} while (0)
-extern void nr_disconnect(struct sock *, int);
+void nr_disconnect(struct sock *, int);
/* nr_timer.c */
-extern void nr_init_timers(struct sock *sk);
-extern void nr_start_heartbeat(struct sock *);
-extern void nr_start_t1timer(struct sock *);
-extern void nr_start_t2timer(struct sock *);
-extern void nr_start_t4timer(struct sock *);
-extern void nr_start_idletimer(struct sock *);
-extern void nr_stop_heartbeat(struct sock *);
-extern void nr_stop_t1timer(struct sock *);
-extern void nr_stop_t2timer(struct sock *);
-extern void nr_stop_t4timer(struct sock *);
-extern void nr_stop_idletimer(struct sock *);
-extern int nr_t1timer_running(struct sock *);
+void nr_init_timers(struct sock *sk);
+void nr_start_heartbeat(struct sock *);
+void nr_start_t1timer(struct sock *);
+void nr_start_t2timer(struct sock *);
+void nr_start_t4timer(struct sock *);
+void nr_start_idletimer(struct sock *);
+void nr_stop_heartbeat(struct sock *);
+void nr_stop_t1timer(struct sock *);
+void nr_stop_t2timer(struct sock *);
+void nr_stop_t4timer(struct sock *);
+void nr_stop_idletimer(struct sock *);
+int nr_t1timer_running(struct sock *);
/* sysctl_net_netrom.c */
-extern void nr_register_sysctl(void);
-extern void nr_unregister_sysctl(void);
+void nr_register_sysctl(void);
+void nr_unregister_sysctl(void);
#endif
diff --git a/include/net/nfc/digital.h b/include/net/nfc/digital.h
new file mode 100644
index 000000000000..36acecd5f06c
--- /dev/null
+++ b/include/net/nfc/digital.h
@@ -0,0 +1,227 @@
+/*
+ * NFC Digital Protocol stack
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef __NFC_DIGITAL_H
+#define __NFC_DIGITAL_H
+
+#include <linux/skbuff.h>
+#include <net/nfc/nfc.h>
+
+/**
+ * Configuration types for in_configure_hw and tg_configure_hw.
+ */
+enum {
+ NFC_DIGITAL_CONFIG_RF_TECH = 0,
+ NFC_DIGITAL_CONFIG_FRAMING,
+};
+
+/**
+ * RF technology values passed as param argument to in_configure_hw and
+ * tg_configure_hw for NFC_DIGITAL_CONFIG_RF_TECH configuration type.
+ */
+enum {
+ NFC_DIGITAL_RF_TECH_106A = 0,
+ NFC_DIGITAL_RF_TECH_212F,
+ NFC_DIGITAL_RF_TECH_424F,
+
+ NFC_DIGITAL_RF_TECH_LAST,
+};
+
+/**
+ * Framing configuration passed as param argument to in_configure_hw and
+ * tg_configure_hw for NFC_DIGITAL_CONFIG_FRAMING configuration type.
+ */
+enum {
+ NFC_DIGITAL_FRAMING_NFCA_SHORT = 0,
+ NFC_DIGITAL_FRAMING_NFCA_STANDARD,
+ NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A,
+
+ NFC_DIGITAL_FRAMING_NFCA_T1T,
+ NFC_DIGITAL_FRAMING_NFCA_T2T,
+ NFC_DIGITAL_FRAMING_NFCA_NFC_DEP,
+
+ NFC_DIGITAL_FRAMING_NFCF,
+ NFC_DIGITAL_FRAMING_NFCF_T3T,
+ NFC_DIGITAL_FRAMING_NFCF_NFC_DEP,
+ NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED,
+
+ NFC_DIGITAL_FRAMING_LAST,
+};
+
+#define DIGITAL_MDAA_NFCID1_SIZE 3
+
+struct digital_tg_mdaa_params {
+ u16 sens_res;
+ u8 nfcid1[DIGITAL_MDAA_NFCID1_SIZE];
+ u8 sel_res;
+
+ u8 nfcid2[NFC_NFCID2_MAXSIZE];
+ u16 sc;
+};
+
+struct nfc_digital_dev;
+
+/**
+ * nfc_digital_cmd_complete_t - Definition of command result callback
+ *
+ * @ddev: nfc_digital_device ref
+ * @arg: user data
+ * @resp: response data
+ *
+ * resp pointer can be an error code and will be checked with IS_ERR() macro.
+ * The callback is responsible for freeing resp sk_buff.
+ */
+typedef void (*nfc_digital_cmd_complete_t)(struct nfc_digital_dev *ddev,
+ void *arg, struct sk_buff *resp);
+
+/**
+ * Device side NFC Digital operations
+ *
+ * Initiator mode:
+ * @in_configure_hw: Hardware configuration for RF technology and communication
+ * framing in initiator mode. This is a synchronous function.
+ * @in_send_cmd: Initiator mode data exchange using RF technology and framing
+ * previously set with in_configure_hw. The peer response is returned
+ * through callback cb. If an io error occurs or the peer didn't reply
+ * within the specified timeout (ms), the error code is passed back through
+ * the resp pointer. This is an asynchronous function.
+ *
+ * Target mode: Only NFC-DEP protocol is supported in target mode.
+ * @tg_configure_hw: Hardware configuration for RF technology and communication
+ * framing in target mode. This is a synchronous function.
+ * @tg_send_cmd: Target mode data exchange using RF technology and framing
+ * previously set with tg_configure_hw. The peer next command is returned
+ * through callback cb. If an io error occurs or the peer didn't reply
+ * within the specified timeout (ms), the error code is passed back through
+ * the resp pointer. This is an asynchronous function.
+ * @tg_listen: Put the device in listen mode waiting for data from the peer
+ * device. This is an asynchronous function.
+ * @tg_listen_mdaa: If supported, put the device in automatic listen mode with
+ * mode detection and automatic anti-collision. In this mode, the device
+ * automatically detects the RF technology and executes the anti-collision
+ * detection using the command responses specified in mdaa_params. The
+ * mdaa_params structure contains SENS_RES, NFCID1, and SEL_RES for 106A RF
+ * tech. NFCID2 and system code (sc) for 212F and 424F. The driver returns
+ * the NFC-DEP ATR_REQ command through cb. The digital stack deducts the RF
+ * tech by analyzing the SoD of the frame containing the ATR_REQ command.
+ * This is an asynchronous function.
+ *
+ * @switch_rf: Turns device radio on or off. The stack does not call explicitly
+ * switch_rf to turn the radio on. A call to in|tg_configure_hw must turn
+ * the device radio on.
+ * @abort_cmd: Discard the last sent command.
+ */
+struct nfc_digital_ops {
+ int (*in_configure_hw)(struct nfc_digital_dev *ddev, int type,
+ int param);
+ int (*in_send_cmd)(struct nfc_digital_dev *ddev, struct sk_buff *skb,
+ u16 timeout, nfc_digital_cmd_complete_t cb,
+ void *arg);
+
+ int (*tg_configure_hw)(struct nfc_digital_dev *ddev, int type,
+ int param);
+ int (*tg_send_cmd)(struct nfc_digital_dev *ddev, struct sk_buff *skb,
+ u16 timeout, nfc_digital_cmd_complete_t cb,
+ void *arg);
+ int (*tg_listen)(struct nfc_digital_dev *ddev, u16 timeout,
+ nfc_digital_cmd_complete_t cb, void *arg);
+ int (*tg_listen_mdaa)(struct nfc_digital_dev *ddev,
+ struct digital_tg_mdaa_params *mdaa_params,
+ u16 timeout, nfc_digital_cmd_complete_t cb,
+ void *arg);
+
+ int (*switch_rf)(struct nfc_digital_dev *ddev, bool on);
+ void (*abort_cmd)(struct nfc_digital_dev *ddev);
+};
+
+#define NFC_DIGITAL_POLL_MODE_COUNT_MAX 6 /* 106A, 212F, and 424F in & tg */
+
+typedef int (*digital_poll_t)(struct nfc_digital_dev *ddev, u8 rf_tech);
+
+struct digital_poll_tech {
+ u8 rf_tech;
+ digital_poll_t poll_func;
+};
+
+/**
+ * Driver capabilities - bit mask made of the following values
+ *
+ * @NFC_DIGITAL_DRV_CAPS_IN_CRC: The driver handles CRC calculation in initiator
+ * mode.
+ * @NFC_DIGITAL_DRV_CAPS_TG_CRC: The driver handles CRC calculation in target
+ * mode.
+ */
+#define NFC_DIGITAL_DRV_CAPS_IN_CRC 0x0001
+#define NFC_DIGITAL_DRV_CAPS_TG_CRC 0x0002
+
+struct nfc_digital_dev {
+ struct nfc_dev *nfc_dev;
+ struct nfc_digital_ops *ops;
+
+ u32 protocols;
+
+ int tx_headroom;
+ int tx_tailroom;
+
+ u32 driver_capabilities;
+ void *driver_data;
+
+ struct digital_poll_tech poll_techs[NFC_DIGITAL_POLL_MODE_COUNT_MAX];
+ u8 poll_tech_count;
+ u8 poll_tech_index;
+ struct mutex poll_lock;
+
+ struct work_struct cmd_work;
+ struct work_struct cmd_complete_work;
+ struct list_head cmd_queue;
+ struct mutex cmd_lock;
+
+ struct work_struct poll_work;
+
+ u8 curr_protocol;
+ u8 curr_rf_tech;
+ u8 curr_nfc_dep_pni;
+
+ int (*skb_check_crc)(struct sk_buff *skb);
+ void (*skb_add_crc)(struct sk_buff *skb);
+};
+
+struct nfc_digital_dev *nfc_digital_allocate_device(struct nfc_digital_ops *ops,
+ __u32 supported_protocols,
+ __u32 driver_capabilities,
+ int tx_headroom,
+ int tx_tailroom);
+void nfc_digital_free_device(struct nfc_digital_dev *ndev);
+int nfc_digital_register_device(struct nfc_digital_dev *ndev);
+void nfc_digital_unregister_device(struct nfc_digital_dev *ndev);
+
+static inline void nfc_digital_set_parent_dev(struct nfc_digital_dev *ndev,
+ struct device *dev)
+{
+ nfc_set_parent_dev(ndev->nfc_dev, dev);
+}
+
+static inline void nfc_digital_set_drvdata(struct nfc_digital_dev *dev,
+ void *data)
+{
+ dev->driver_data = data;
+}
+
+static inline void *nfc_digital_get_drvdata(struct nfc_digital_dev *dev)
+{
+ return dev->driver_data;
+}
+
+#endif /* __NFC_DIGITAL_H */
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index b64b7bce4b94..2eca2960ca9c 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -24,12 +24,6 @@
#include <net/nfc/nfc.h>
-struct nfc_phy_ops {
- int (*write)(void *dev_id, struct sk_buff *skb);
- int (*enable)(void *dev_id);
- void (*disable)(void *dev_id);
-};
-
struct nfc_hci_dev;
struct nfc_hci_ops {
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index 88785e5c6b2c..e5aa5acafea0 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -166,6 +166,10 @@
#define NCI_GID_NFCEE_MGMT 0x2
#define NCI_GID_PROPRIETARY 0xf
+/* ----- NCI over SPI head/crc(tail) room needed for outgoing frames ----- */
+#define NCI_SPI_HDR_LEN 4
+#define NCI_SPI_CRC_LEN 2
+
/* ---- NCI Packet structures ---- */
#define NCI_CTRL_HDR_SIZE 3
#define NCI_DATA_HDR_SIZE 3
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 99fc1f3a392a..6126f1f992b4 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -207,19 +207,9 @@ int nci_to_errno(__u8 code);
#define NCI_SPI_CRC_ENABLED 0x01
/* ----- NCI SPI structures ----- */
-struct nci_spi_dev;
-
-struct nci_spi_ops {
- int (*open)(struct nci_spi_dev *ndev);
- int (*close)(struct nci_spi_dev *ndev);
- void (*assert_int)(struct nci_spi_dev *ndev);
- void (*deassert_int)(struct nci_spi_dev *ndev);
-};
-
-struct nci_spi_dev {
- struct nci_dev *nci_dev;
+struct nci_spi {
+ struct nci_dev *ndev;
struct spi_device *spi;
- struct nci_spi_ops *ops;
unsigned int xfer_udelay; /* microseconds delay between
transactions */
@@ -227,31 +217,15 @@ struct nci_spi_dev {
struct completion req_completion;
u8 req_result;
-
- void *driver_data;
};
-/* ----- NCI SPI Devices ----- */
-struct nci_spi_dev *nci_spi_allocate_device(struct spi_device *spi,
- struct nci_spi_ops *ops,
- u32 supported_protocols,
- u32 supported_se,
- u8 acknowledge_mode,
- unsigned int delay);
-void nci_spi_free_device(struct nci_spi_dev *ndev);
-int nci_spi_register_device(struct nci_spi_dev *ndev);
-void nci_spi_unregister_device(struct nci_spi_dev *ndev);
-int nci_spi_recv_frame(struct nci_spi_dev *ndev);
-
-static inline void nci_spi_set_drvdata(struct nci_spi_dev *ndev,
- void *data)
-{
- ndev->driver_data = data;
-}
-
-static inline void *nci_spi_get_drvdata(struct nci_spi_dev *ndev)
-{
- return ndev->driver_data;
-}
+/* ----- NCI SPI ----- */
+struct nci_spi *nci_spi_allocate_spi(struct spi_device *spi,
+ u8 acknowledge_mode, unsigned int delay,
+ struct nci_dev *ndev);
+int nci_spi_send(struct nci_spi *nspi,
+ struct completion *write_handshake_completion,
+ struct sk_buff *skb);
+struct sk_buff *nci_spi_read(struct nci_spi *nspi);
#endif /* __NCI_CORE_H */
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index f68ee68e4e3e..82fc4e43fc6e 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -28,9 +28,14 @@
#include <linux/device.h>
#include <linux/skbuff.h>
-#define nfc_dev_info(dev, fmt, arg...) dev_info((dev), "NFC: " fmt "\n", ## arg)
-#define nfc_dev_err(dev, fmt, arg...) dev_err((dev), "NFC: " fmt "\n", ## arg)
-#define nfc_dev_dbg(dev, fmt, arg...) dev_dbg((dev), fmt "\n", ## arg)
+#define nfc_info(dev, fmt, ...) dev_info((dev), "NFC: " fmt, ##__VA_ARGS__)
+#define nfc_err(dev, fmt, ...) dev_err((dev), "NFC: " fmt, ##__VA_ARGS__)
+
+struct nfc_phy_ops {
+ int (*write)(void *dev_id, struct sk_buff *skb);
+ int (*enable)(void *dev_id);
+ void (*disable)(void *dev_id);
+};
struct nfc_dev;
@@ -48,6 +53,8 @@ struct nfc_dev;
typedef void (*data_exchange_cb_t)(void *context, struct sk_buff *skb,
int err);
+typedef void (*se_io_cb_t)(void *context, u8 *apdu, size_t apdu_len, int err);
+
struct nfc_target;
struct nfc_ops {
@@ -74,12 +81,23 @@ struct nfc_ops {
int (*discover_se)(struct nfc_dev *dev);
int (*enable_se)(struct nfc_dev *dev, u32 se_idx);
int (*disable_se)(struct nfc_dev *dev, u32 se_idx);
+ int (*se_io) (struct nfc_dev *dev, u32 se_idx,
+ u8 *apdu, size_t apdu_length,
+ se_io_cb_t cb, void *cb_context);
};
#define NFC_TARGET_IDX_ANY -1
#define NFC_MAX_GT_LEN 48
#define NFC_ATR_RES_GT_OFFSET 15
+/**
+ * struct nfc_target - NFC target descriptiom
+ *
+ * @sens_res: 2 bytes describing the target SENS_RES response, if the target
+ * is a type A one. The %sens_res most significant byte must be byte 2
+ * as described by the NFC Forum digital specification (i.e. the platform
+ * configuration one) while %sens_res least significant byte is byte 1.
+ */
struct nfc_target {
u32 idx;
u32 supported_protocols;
@@ -243,5 +261,6 @@ void nfc_driver_failure(struct nfc_dev *dev, int err);
int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type);
int nfc_remove_se(struct nfc_dev *dev, u32 se_idx);
+struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx);
#endif /* __NET_NFC_H */
diff --git a/include/net/p8022.h b/include/net/p8022.h
index 42e9fac51b31..05e41383856b 100644
--- a/include/net/p8022.h
+++ b/include/net/p8022.h
@@ -1,13 +1,13 @@
#ifndef _NET_P8022_H
#define _NET_P8022_H
-extern struct datalink_proto *
- register_8022_client(unsigned char type,
- int (*func)(struct sk_buff *skb,
- struct net_device *dev,
- struct packet_type *pt,
- struct net_device *orig_dev));
-extern void unregister_8022_client(struct datalink_proto *proto);
+struct datalink_proto *
+register_8022_client(unsigned char type,
+ int (*func)(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt,
+ struct net_device *orig_dev));
+void unregister_8022_client(struct datalink_proto *proto);
-extern struct datalink_proto *make_8023_client(void);
-extern void destroy_8023_client(struct datalink_proto *dl);
+struct datalink_proto *make_8023_client(void);
+void destroy_8023_client(struct datalink_proto *dl);
#endif
diff --git a/include/net/ping.h b/include/net/ping.h
index 5db0224b73ac..3f67704f3747 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -103,8 +103,8 @@ void ping_seq_stop(struct seq_file *seq, void *v);
int ping_proc_register(struct net *net, struct ping_seq_afinfo *afinfo);
void ping_proc_unregister(struct net *net, struct ping_seq_afinfo *afinfo);
-extern int __init ping_proc_init(void);
-extern void ping_proc_exit(void);
+int __init ping_proc_init(void);
+void ping_proc_exit(void);
#endif
void __init ping_init(void);
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 047c0476c0a0..fbf7676c9a02 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -96,20 +96,20 @@ extern const struct net_offload __rcu *inet6_offloads[MAX_INET_PROTOS];
extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
#endif
-extern int inet_add_protocol(const struct net_protocol *prot, unsigned char num);
-extern int inet_del_protocol(const struct net_protocol *prot, unsigned char num);
-extern int inet_add_offload(const struct net_offload *prot, unsigned char num);
-extern int inet_del_offload(const struct net_offload *prot, unsigned char num);
-extern void inet_register_protosw(struct inet_protosw *p);
-extern void inet_unregister_protosw(struct inet_protosw *p);
+int inet_add_protocol(const struct net_protocol *prot, unsigned char num);
+int inet_del_protocol(const struct net_protocol *prot, unsigned char num);
+int inet_add_offload(const struct net_offload *prot, unsigned char num);
+int inet_del_offload(const struct net_offload *prot, unsigned char num);
+void inet_register_protosw(struct inet_protosw *p);
+void inet_unregister_protosw(struct inet_protosw *p);
#if IS_ENABLED(CONFIG_IPV6)
-extern int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
-extern int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
-extern int inet6_register_protosw(struct inet_protosw *p);
-extern void inet6_unregister_protosw(struct inet_protosw *p);
+int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
+int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
+int inet6_register_protosw(struct inet_protosw *p);
+void inet6_unregister_protosw(struct inet_protosw *p);
#endif
-extern int inet6_add_offload(const struct net_offload *prot, unsigned char num);
-extern int inet6_del_offload(const struct net_offload *prot, unsigned char num);
+int inet6_add_offload(const struct net_offload *prot, unsigned char num);
+int inet6_del_offload(const struct net_offload *prot, unsigned char num);
#endif /* _PROTOCOL_H */
diff --git a/include/net/psnap.h b/include/net/psnap.h
index fe456c295b04..78db4cc1306a 100644
--- a/include/net/psnap.h
+++ b/include/net/psnap.h
@@ -1,11 +1,11 @@
#ifndef _NET_PSNAP_H
#define _NET_PSNAP_H
-extern struct datalink_proto *
+struct datalink_proto *
register_snap_client(const unsigned char *desc,
int (*rcvfunc)(struct sk_buff *, struct net_device *,
struct packet_type *,
struct net_device *orig_dev));
-extern void unregister_snap_client(struct datalink_proto *proto);
+void unregister_snap_client(struct datalink_proto *proto);
#endif
diff --git a/include/net/raw.h b/include/net/raw.h
index 42ce6fe7a2d5..6a40c6562dd2 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -26,7 +26,7 @@ extern struct proto raw_prot;
void raw_icmp_error(struct sk_buff *, int, u32);
int raw_local_deliver(struct sk_buff *, int);
-extern int raw_rcv(struct sock *, struct sk_buff *);
+int raw_rcv(struct sock *, struct sk_buff *);
#define RAW_HTABLE_SIZE MAX_INET_PROTOS
@@ -36,8 +36,8 @@ struct raw_hashinfo {
};
#ifdef CONFIG_PROC_FS
-extern int raw_proc_init(void);
-extern void raw_proc_exit(void);
+int raw_proc_init(void);
+void raw_proc_exit(void);
struct raw_iter_state {
struct seq_net_private p;
diff --git a/include/net/rawv6.h b/include/net/rawv6.h
index e7ea660e4db6..87783dea0791 100644
--- a/include/net/rawv6.h
+++ b/include/net/rawv6.h
@@ -7,8 +7,7 @@ void raw6_icmp_error(struct sk_buff *, int nexthdr,
u8 type, u8 code, int inner_offset, __be32);
bool raw6_local_deliver(struct sk_buff *, int);
-extern int rawv6_rcv(struct sock *sk,
- struct sk_buff *skb);
+int rawv6_rcv(struct sock *sk, struct sk_buff *skb);
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
int rawv6_mh_filter_register(int (*filter)(struct sock *sock,
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 59795e42c8b6..7f830ff67f08 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -43,11 +43,12 @@ struct request_sock_ops {
struct request_sock *req);
};
-extern int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
+int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
/* struct request_sock - mini sock to represent a connection request
*/
struct request_sock {
+ struct sock_common __req_common;
struct request_sock *dl_next;
u16 mss;
u8 num_retrans; /* number of retransmits */
@@ -162,13 +163,13 @@ struct request_sock_queue {
*/
};
-extern int reqsk_queue_alloc(struct request_sock_queue *queue,
- unsigned int nr_table_entries);
+int reqsk_queue_alloc(struct request_sock_queue *queue,
+ unsigned int nr_table_entries);
-extern void __reqsk_queue_destroy(struct request_sock_queue *queue);
-extern void reqsk_queue_destroy(struct request_sock_queue *queue);
-extern void reqsk_fastopen_remove(struct sock *sk,
- struct request_sock *req, bool reset);
+void __reqsk_queue_destroy(struct request_sock_queue *queue);
+void reqsk_queue_destroy(struct request_sock_queue *queue);
+void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
+ bool reset);
static inline struct request_sock *
reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
diff --git a/include/net/rose.h b/include/net/rose.h
index 555dd198aab7..50811fe2c585 100644
--- a/include/net/rose.h
+++ b/include/net/rose.h
@@ -160,38 +160,42 @@ extern int sysctl_rose_routing_control;
extern int sysctl_rose_link_fail_timeout;
extern int sysctl_rose_maximum_vcs;
extern int sysctl_rose_window_size;
-extern int rosecmp(rose_address *, rose_address *);
-extern int rosecmpm(rose_address *, rose_address *, unsigned short);
-extern char *rose2asc(char *buf, const rose_address *);
-extern struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
-extern void rose_kill_by_neigh(struct rose_neigh *);
-extern unsigned int rose_new_lci(struct rose_neigh *);
-extern int rose_rx_call_request(struct sk_buff *, struct net_device *, struct rose_neigh *, unsigned int);
-extern void rose_destroy_socket(struct sock *);
+
+int rosecmp(rose_address *, rose_address *);
+int rosecmpm(rose_address *, rose_address *, unsigned short);
+char *rose2asc(char *buf, const rose_address *);
+struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
+void rose_kill_by_neigh(struct rose_neigh *);
+unsigned int rose_new_lci(struct rose_neigh *);
+int rose_rx_call_request(struct sk_buff *, struct net_device *,
+ struct rose_neigh *, unsigned int);
+void rose_destroy_socket(struct sock *);
/* rose_dev.c */
-extern void rose_setup(struct net_device *);
+void rose_setup(struct net_device *);
/* rose_in.c */
-extern int rose_process_rx_frame(struct sock *, struct sk_buff *);
+int rose_process_rx_frame(struct sock *, struct sk_buff *);
/* rose_link.c */
-extern void rose_start_ftimer(struct rose_neigh *);
-extern void rose_stop_ftimer(struct rose_neigh *);
-extern void rose_stop_t0timer(struct rose_neigh *);
-extern int rose_ftimer_running(struct rose_neigh *);
-extern void rose_link_rx_restart(struct sk_buff *, struct rose_neigh *, unsigned short);
-extern void rose_transmit_clear_request(struct rose_neigh *, unsigned int, unsigned char, unsigned char);
-extern void rose_transmit_link(struct sk_buff *, struct rose_neigh *);
+void rose_start_ftimer(struct rose_neigh *);
+void rose_stop_ftimer(struct rose_neigh *);
+void rose_stop_t0timer(struct rose_neigh *);
+int rose_ftimer_running(struct rose_neigh *);
+void rose_link_rx_restart(struct sk_buff *, struct rose_neigh *,
+ unsigned short);
+void rose_transmit_clear_request(struct rose_neigh *, unsigned int,
+ unsigned char, unsigned char);
+void rose_transmit_link(struct sk_buff *, struct rose_neigh *);
/* rose_loopback.c */
-extern void rose_loopback_init(void);
-extern void rose_loopback_clear(void);
-extern int rose_loopback_queue(struct sk_buff *, struct rose_neigh *);
+void rose_loopback_init(void);
+void rose_loopback_clear(void);
+int rose_loopback_queue(struct sk_buff *, struct rose_neigh *);
/* rose_out.c */
-extern void rose_kick(struct sock *);
-extern void rose_enquiry_response(struct sock *);
+void rose_kick(struct sock *);
+void rose_enquiry_response(struct sock *);
/* rose_route.c */
extern struct rose_neigh *rose_loopback_neigh;
@@ -199,43 +203,45 @@ extern const struct file_operations rose_neigh_fops;
extern const struct file_operations rose_nodes_fops;
extern const struct file_operations rose_routes_fops;
-extern void rose_add_loopback_neigh(void);
-extern int __must_check rose_add_loopback_node(rose_address *);
-extern void rose_del_loopback_node(rose_address *);
-extern void rose_rt_device_down(struct net_device *);
-extern void rose_link_device_down(struct net_device *);
-extern struct net_device *rose_dev_first(void);
-extern struct net_device *rose_dev_get(rose_address *);
-extern struct rose_route *rose_route_free_lci(unsigned int, struct rose_neigh *);
-extern struct rose_neigh *rose_get_neigh(rose_address *, unsigned char *, unsigned char *, int);
-extern int rose_rt_ioctl(unsigned int, void __user *);
-extern void rose_link_failed(ax25_cb *, int);
-extern int rose_route_frame(struct sk_buff *, ax25_cb *);
-extern void rose_rt_free(void);
+void rose_add_loopback_neigh(void);
+int __must_check rose_add_loopback_node(rose_address *);
+void rose_del_loopback_node(rose_address *);
+void rose_rt_device_down(struct net_device *);
+void rose_link_device_down(struct net_device *);
+struct net_device *rose_dev_first(void);
+struct net_device *rose_dev_get(rose_address *);
+struct rose_route *rose_route_free_lci(unsigned int, struct rose_neigh *);
+struct rose_neigh *rose_get_neigh(rose_address *, unsigned char *,
+ unsigned char *, int);
+int rose_rt_ioctl(unsigned int, void __user *);
+void rose_link_failed(ax25_cb *, int);
+int rose_route_frame(struct sk_buff *, ax25_cb *);
+void rose_rt_free(void);
/* rose_subr.c */
-extern void rose_clear_queues(struct sock *);
-extern void rose_frames_acked(struct sock *, unsigned short);
-extern void rose_requeue_frames(struct sock *);
-extern int rose_validate_nr(struct sock *, unsigned short);
-extern void rose_write_internal(struct sock *, int);
-extern int rose_decode(struct sk_buff *, int *, int *, int *, int *, int *);
-extern int rose_parse_facilities(unsigned char *, unsigned int, struct rose_facilities_struct *);
-extern void rose_disconnect(struct sock *, int, int, int);
+void rose_clear_queues(struct sock *);
+void rose_frames_acked(struct sock *, unsigned short);
+void rose_requeue_frames(struct sock *);
+int rose_validate_nr(struct sock *, unsigned short);
+void rose_write_internal(struct sock *, int);
+int rose_decode(struct sk_buff *, int *, int *, int *, int *, int *);
+int rose_parse_facilities(unsigned char *, unsigned int,
+ struct rose_facilities_struct *);
+void rose_disconnect(struct sock *, int, int, int);
/* rose_timer.c */
-extern void rose_start_heartbeat(struct sock *);
-extern void rose_start_t1timer(struct sock *);
-extern void rose_start_t2timer(struct sock *);
-extern void rose_start_t3timer(struct sock *);
-extern void rose_start_hbtimer(struct sock *);
-extern void rose_start_idletimer(struct sock *);
-extern void rose_stop_heartbeat(struct sock *);
-extern void rose_stop_timer(struct sock *);
-extern void rose_stop_idletimer(struct sock *);
+void rose_start_heartbeat(struct sock *);
+void rose_start_t1timer(struct sock *);
+void rose_start_t2timer(struct sock *);
+void rose_start_t3timer(struct sock *);
+void rose_start_hbtimer(struct sock *);
+void rose_start_idletimer(struct sock *);
+void rose_stop_heartbeat(struct sock *);
+void rose_stop_timer(struct sock *);
+void rose_stop_idletimer(struct sock *);
/* sysctl_net_rose.c */
-extern void rose_register_sysctl(void);
-extern void rose_unregister_sysctl(void);
+void rose_register_sysctl(void);
+void rose_unregister_sysctl(void);
#endif
diff --git a/include/net/route.h b/include/net/route.h
index afdeeb5bec25..dd4ae0029fd8 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -39,6 +39,7 @@
#define RTO_ONLINK 0x01
#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE))
+#define RT_CONN_FLAGS_TOS(sk,tos) (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE))
struct fib_nh;
struct fib_info;
@@ -87,34 +88,28 @@ struct ip_rt_acct {
};
struct rt_cache_stat {
- unsigned int in_hit;
unsigned int in_slow_tot;
unsigned int in_slow_mc;
unsigned int in_no_route;
unsigned int in_brd;
unsigned int in_martian_dst;
unsigned int in_martian_src;
- unsigned int out_hit;
unsigned int out_slow_tot;
unsigned int out_slow_mc;
- unsigned int gc_total;
- unsigned int gc_ignored;
- unsigned int gc_goal_miss;
- unsigned int gc_dst_overflow;
- unsigned int in_hlist_search;
- unsigned int out_hlist_search;
};
extern struct ip_rt_acct __percpu *ip_rt_acct;
struct in_device;
-extern int ip_rt_init(void);
-extern void rt_cache_flush(struct net *net);
-extern void rt_flush_dev(struct net_device *dev);
-extern struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp);
-extern struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
- struct sock *sk);
-extern struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig);
+
+int ip_rt_init(void);
+void rt_cache_flush(struct net *net);
+void rt_flush_dev(struct net_device *dev);
+struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp);
+struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
+ struct sock *sk);
+struct dst_entry *ipv4_blackhole_route(struct net *net,
+ struct dst_entry *dst_orig);
static inline struct rtable *ip_route_output_key(struct net *net, struct flowi4 *flp)
{
@@ -162,8 +157,8 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4
return ip_route_output_key(net, fl4);
}
-extern int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
- u8 tos, struct net_device *devin);
+int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
+ u8 tos, struct net_device *devin);
static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
u8 tos, struct net_device *devin)
@@ -179,24 +174,25 @@ static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
return err;
}
-extern void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
- int oif, u32 mark, u8 protocol, int flow_flags);
-extern void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu);
-extern void ipv4_redirect(struct sk_buff *skb, struct net *net,
- int oif, u32 mark, u8 protocol, int flow_flags);
-extern void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk);
-extern void ip_rt_send_redirect(struct sk_buff *skb);
-
-extern unsigned int inet_addr_type(struct net *net, __be32 addr);
-extern unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr);
-extern void ip_rt_multicast_event(struct in_device *);
-extern int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
-extern void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
-extern int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb);
+void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, int oif,
+ u32 mark, u8 protocol, int flow_flags);
+void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu);
+void ipv4_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
+ u8 protocol, int flow_flags);
+void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk);
+void ip_rt_send_redirect(struct sk_buff *skb);
+
+unsigned int inet_addr_type(struct net *net, __be32 addr);
+unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
+ __be32 addr);
+void ip_rt_multicast_event(struct in_device *);
+int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
+void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
+int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb);
struct in_ifaddr;
-extern void fib_add_ifaddr(struct in_ifaddr *);
-extern void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
+void fib_add_ifaddr(struct in_ifaddr *);
+void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
static inline void ip_rt_put(struct rtable *rt)
{
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 702664833a53..bb13a182fba6 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -8,14 +8,12 @@ typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *);
typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
typedef u16 (*rtnl_calcit_func)(struct sk_buff *, struct nlmsghdr *);
-extern int __rtnl_register(int protocol, int msgtype,
- rtnl_doit_func, rtnl_dumpit_func,
- rtnl_calcit_func);
-extern void rtnl_register(int protocol, int msgtype,
- rtnl_doit_func, rtnl_dumpit_func,
- rtnl_calcit_func);
-extern int rtnl_unregister(int protocol, int msgtype);
-extern void rtnl_unregister_all(int protocol);
+int __rtnl_register(int protocol, int msgtype,
+ rtnl_doit_func, rtnl_dumpit_func, rtnl_calcit_func);
+void rtnl_register(int protocol, int msgtype,
+ rtnl_doit_func, rtnl_dumpit_func, rtnl_calcit_func);
+int rtnl_unregister(int protocol, int msgtype);
+void rtnl_unregister_all(int protocol);
static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
{
@@ -83,11 +81,11 @@ struct rtnl_link_ops {
unsigned int (*get_num_rx_queues)(void);
};
-extern int __rtnl_link_register(struct rtnl_link_ops *ops);
-extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
+int __rtnl_link_register(struct rtnl_link_ops *ops);
+void __rtnl_link_unregister(struct rtnl_link_ops *ops);
-extern int rtnl_link_register(struct rtnl_link_ops *ops);
-extern void rtnl_link_unregister(struct rtnl_link_ops *ops);
+int rtnl_link_register(struct rtnl_link_ops *ops);
+void rtnl_link_unregister(struct rtnl_link_ops *ops);
/**
* struct rtnl_af_ops - rtnetlink address family operations
@@ -117,18 +115,18 @@ struct rtnl_af_ops {
const struct nlattr *attr);
};
-extern int __rtnl_af_register(struct rtnl_af_ops *ops);
-extern void __rtnl_af_unregister(struct rtnl_af_ops *ops);
+int __rtnl_af_register(struct rtnl_af_ops *ops);
+void __rtnl_af_unregister(struct rtnl_af_ops *ops);
-extern int rtnl_af_register(struct rtnl_af_ops *ops);
-extern void rtnl_af_unregister(struct rtnl_af_ops *ops);
+int rtnl_af_register(struct rtnl_af_ops *ops);
+void rtnl_af_unregister(struct rtnl_af_ops *ops);
+struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
+struct net_device *rtnl_create_link(struct net *net, char *ifname,
+ const struct rtnl_link_ops *ops,
+ struct nlattr *tb[]);
+int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
-extern struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
-extern struct net_device *rtnl_create_link(struct net *net,
- char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]);
-extern int rtnl_configure_link(struct net_device *dev,
- const struct ifinfomsg *ifm);
extern const struct nla_policy ifla_policy[IFLA_MAX+1];
#define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index f4eb365f7dcd..d0a6321c302e 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -702,13 +702,20 @@ static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
}
void psched_ratecfg_precompute(struct psched_ratecfg *r,
- const struct tc_ratespec *conf);
+ const struct tc_ratespec *conf,
+ u64 rate64);
static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
const struct psched_ratecfg *r)
{
memset(res, 0, sizeof(*res));
- res->rate = r->rate_bytes_ps;
+
+ /* legacy struct tc_ratespec has a 32bit @rate field
+ * Qdisc using 64bit rate should add new attributes
+ * in order to maintain compatibility.
+ */
+ res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
+
res->overhead = r->overhead;
res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
}
diff --git a/include/net/scm.h b/include/net/scm.h
index 8de2d37d2077..262532d111f5 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -33,11 +33,11 @@ struct scm_cookie {
#endif
};
-extern void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm);
-extern void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm);
-extern int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm);
-extern void __scm_destroy(struct scm_cookie *scm);
-extern struct scm_fp_list * scm_fp_dup(struct scm_fp_list *fpl);
+void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm);
+void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm);
+int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm);
+void __scm_destroy(struct scm_cookie *scm);
+struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl);
#ifdef CONFIG_SECURITY_NETWORK
static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_cookie *scm)
diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
index 259924d63ba6..6bd44fe94c26 100644
--- a/include/net/sctp/checksum.h
+++ b/include/net/sctp/checksum.h
@@ -42,56 +42,38 @@
#include <linux/types.h>
#include <net/sctp/sctp.h>
#include <linux/crc32c.h>
+#include <linux/crc32.h>
-static inline __u32 sctp_crc32c(__u32 crc, u8 *buffer, u16 length)
+static inline __wsum sctp_csum_update(const void *buff, int len, __wsum sum)
{
- return crc32c(crc, buffer, length);
-}
-
-static inline __u32 sctp_start_cksum(__u8 *buffer, __u16 length)
-{
- __u32 crc = ~(__u32)0;
- __u8 zero[sizeof(__u32)] = {0};
-
- /* Optimize this routine to be SCTP specific, knowing how
- * to skip the checksum field of the SCTP header.
+ /* This uses the crypto implementation of crc32c, which is either
+ * implemented w/ hardware support or resolves to __crc32c_le().
*/
-
- /* Calculate CRC up to the checksum. */
- crc = sctp_crc32c(crc, buffer, sizeof(struct sctphdr) - sizeof(__u32));
-
- /* Skip checksum field of the header. */
- crc = sctp_crc32c(crc, zero, sizeof(__u32));
-
- /* Calculate the rest of the CRC. */
- crc = sctp_crc32c(crc, &buffer[sizeof(struct sctphdr)],
- length - sizeof(struct sctphdr));
- return crc;
-}
-
-static inline __u32 sctp_update_cksum(__u8 *buffer, __u16 length, __u32 crc32)
-{
- return sctp_crc32c(crc32, buffer, length);
+ return crc32c(sum, buff, len);
}
-static inline __le32 sctp_end_cksum(__u32 crc32)
+static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
+ int offset, int len)
{
- return cpu_to_le32(~crc32);
+ return __crc32c_le_combine(csum, csum2, len);
}
-/* Calculate the CRC32C checksum of an SCTP packet. */
static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
unsigned int offset)
{
- const struct sk_buff *iter;
+ struct sctphdr *sh = sctp_hdr(skb);
+ __le32 ret, old = sh->checksum;
+ const struct skb_checksum_ops ops = {
+ .update = sctp_csum_update,
+ .combine = sctp_csum_combine,
+ };
- __u32 crc32 = sctp_start_cksum(skb->data + offset,
- skb_headlen(skb) - offset);
- skb_walk_frags(skb, iter)
- crc32 = sctp_update_cksum((__u8 *) iter->data,
- skb_headlen(iter), crc32);
+ sh->checksum = 0;
+ ret = cpu_to_le32(~__skb_checksum(skb, offset, skb->len - offset,
+ ~(__u32)0, &ops));
+ sh->checksum = old;
- return sctp_end_cksum(crc32);
+ return ret;
}
#endif /* __sctp_checksum_h__ */
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 3794c5ad20fe..c5fe80697f8d 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -90,12 +90,11 @@
/*
* sctp/protocol.c
*/
-extern int sctp_copy_local_addr_list(struct net *, struct sctp_bind_addr *,
- sctp_scope_t, gfp_t gfp,
- int flags);
-extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
-extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
-extern void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
+int sctp_copy_local_addr_list(struct net *, struct sctp_bind_addr *,
+ sctp_scope_t, gfp_t gfp, int flags);
+struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
+int sctp_register_pf(struct sctp_pf *, sa_family_t);
+void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
/*
* sctp/socket.c
@@ -110,7 +109,7 @@ void sctp_sock_rfree(struct sk_buff *skb);
void sctp_copy_sock(struct sock *newsk, struct sock *sk,
struct sctp_association *asoc);
extern struct percpu_counter sctp_sockets_allocated;
-extern int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
+int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
/*
* sctp/primitive.c
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index c2e542b27a5a..f257486f17be 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -3,18 +3,18 @@
#include <linux/types.h>
-extern __u32 secure_ip_id(__be32 daddr);
-extern __u32 secure_ipv6_id(const __be32 daddr[4]);
-extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
-extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
- __be16 dport);
-extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
- __be16 sport, __be16 dport);
-extern __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
- __be16 sport, __be16 dport);
-extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
- __be16 sport, __be16 dport);
-extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
- __be16 sport, __be16 dport);
+__u32 secure_ip_id(__be32 daddr);
+__u32 secure_ipv6_id(const __be32 daddr[4]);
+u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
+u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+ __be16 dport);
+__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport);
+__u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
+ __be16 sport, __be16 dport);
+u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport);
+u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+ __be16 sport, __be16 dport);
#endif /* _NET_SECURE_SEQ */
diff --git a/include/net/sock.h b/include/net/sock.h
index 1d37a8086bed..e3a18ff0c38b 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -156,7 +156,7 @@ typedef __u64 __bitwise __addrpair;
*/
struct sock_common {
/* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned
- * address on 64bit arches : cf INET_MATCH() and INET_TW_MATCH()
+ * address on 64bit arches : cf INET_MATCH()
*/
union {
__addrpair skc_addrpair;
@@ -191,6 +191,12 @@ struct sock_common {
#ifdef CONFIG_NET_NS
struct net *skc_net;
#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr skc_v6_daddr;
+ struct in6_addr skc_v6_rcv_saddr;
+#endif
+
/*
* fields between dontcopy_begin/dontcopy_end
* are not copied in sock_copy()
@@ -218,7 +224,7 @@ struct cg_proto;
* @sk_lock: synchronizer
* @sk_rcvbuf: size of receive buffer in bytes
* @sk_wq: sock wait queue and async head
- * @sk_rx_dst: receive input route used by early tcp demux
+ * @sk_rx_dst: receive input route used by early demux
* @sk_dst_cache: destination cache
* @sk_dst_lock: destination cache lock
* @sk_policy: flow policy
@@ -233,6 +239,7 @@ struct cg_proto;
* @sk_ll_usec: usecs to busypoll when there is no data
* @sk_allocation: allocation mode
* @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
+ * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
* @sk_sndbuf: size of send buffer in bytes
* @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
* %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
@@ -299,6 +306,12 @@ struct sock {
#define sk_dontcopy_begin __sk_common.skc_dontcopy_begin
#define sk_dontcopy_end __sk_common.skc_dontcopy_end
#define sk_hash __sk_common.skc_hash
+#define sk_portpair __sk_common.skc_portpair
+#define sk_num __sk_common.skc_num
+#define sk_dport __sk_common.skc_dport
+#define sk_addrpair __sk_common.skc_addrpair
+#define sk_daddr __sk_common.skc_daddr
+#define sk_rcv_saddr __sk_common.skc_rcv_saddr
#define sk_family __sk_common.skc_family
#define sk_state __sk_common.skc_state
#define sk_reuse __sk_common.skc_reuse
@@ -307,6 +320,9 @@ struct sock {
#define sk_bind_node __sk_common.skc_bind_node
#define sk_prot __sk_common.skc_prot
#define sk_net __sk_common.skc_net
+#define sk_v6_daddr __sk_common.skc_v6_daddr
+#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
+
socket_lock_t sk_lock;
struct sk_buff_head sk_receive_queue;
/*
@@ -363,6 +379,7 @@ struct sock {
int sk_wmem_queued;
gfp_t sk_allocation;
u32 sk_pacing_rate; /* bytes per second */
+ u32 sk_max_pacing_rate;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
int sk_gso_type;
@@ -751,7 +768,7 @@ static inline int sk_stream_wspace(const struct sock *sk)
return sk->sk_sndbuf - sk->sk_wmem_queued;
}
-extern void sk_stream_write_space(struct sock *sk);
+void sk_stream_write_space(struct sock *sk);
/* OOB backlog add */
static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
@@ -793,7 +810,7 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
return 0;
}
-extern int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
@@ -858,15 +875,15 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
__rc; \
})
-extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
-extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
-extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
-extern int sk_stream_error(struct sock *sk, int flags, int err);
-extern void sk_stream_kill_queues(struct sock *sk);
-extern void sk_set_memalloc(struct sock *sk);
-extern void sk_clear_memalloc(struct sock *sk);
+int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
+int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
+void sk_stream_wait_close(struct sock *sk, long timeo_p);
+int sk_stream_error(struct sock *sk, int flags, int err);
+void sk_stream_kill_queues(struct sock *sk);
+void sk_set_memalloc(struct sock *sk);
+void sk_clear_memalloc(struct sock *sk);
-extern int sk_wait_data(struct sock *sk, long *timeo);
+int sk_wait_data(struct sock *sk, long *timeo);
struct request_sock_ops;
struct timewait_sock_ops;
@@ -1019,10 +1036,10 @@ enum cg_proto_flags {
struct cg_proto {
void (*enter_memory_pressure)(struct sock *sk);
- struct res_counter *memory_allocated; /* Current allocated memory. */
- struct percpu_counter *sockets_allocated; /* Current number of sockets. */
- int *memory_pressure;
- long *sysctl_mem;
+ struct res_counter memory_allocated; /* Current allocated memory. */
+ struct percpu_counter sockets_allocated; /* Current number of sockets. */
+ int memory_pressure;
+ long sysctl_mem[3];
unsigned long flags;
/*
* memcg field is used to find which memcg we belong directly
@@ -1036,8 +1053,8 @@ struct cg_proto {
struct mem_cgroup *memcg;
};
-extern int proto_register(struct proto *prot, int alloc_slab);
-extern void proto_unregister(struct proto *prot);
+int proto_register(struct proto *prot, int alloc_slab);
+void proto_unregister(struct proto *prot);
static inline bool memcg_proto_active(struct cg_proto *cg_proto)
{
@@ -1118,7 +1135,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
return false;
if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
- return !!*sk->sk_cgrp->memory_pressure;
+ return !!sk->sk_cgrp->memory_pressure;
return !!*sk->sk_prot->memory_pressure;
}
@@ -1138,8 +1155,8 @@ static inline void sk_leave_memory_pressure(struct sock *sk)
struct proto *prot = sk->sk_prot;
for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
- if (*cg_proto->memory_pressure)
- *cg_proto->memory_pressure = 0;
+ if (cg_proto->memory_pressure)
+ cg_proto->memory_pressure = 0;
}
}
@@ -1175,7 +1192,7 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot,
struct res_counter *fail;
int ret;
- ret = res_counter_charge_nofail(prot->memory_allocated,
+ ret = res_counter_charge_nofail(&prot->memory_allocated,
amt << PAGE_SHIFT, &fail);
if (ret < 0)
*parent_status = OVER_LIMIT;
@@ -1184,13 +1201,13 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot,
static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
unsigned long amt)
{
- res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT);
+ res_counter_uncharge(&prot->memory_allocated, amt << PAGE_SHIFT);
}
static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
{
u64 ret;
- ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE);
+ ret = res_counter_read_u64(&prot->memory_allocated, RES_USAGE);
return ret >> PAGE_SHIFT;
}
@@ -1238,7 +1255,7 @@ static inline void sk_sockets_allocated_dec(struct sock *sk)
struct cg_proto *cg_proto = sk->sk_cgrp;
for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
- percpu_counter_dec(cg_proto->sockets_allocated);
+ percpu_counter_dec(&cg_proto->sockets_allocated);
}
percpu_counter_dec(prot->sockets_allocated);
@@ -1252,7 +1269,7 @@ static inline void sk_sockets_allocated_inc(struct sock *sk)
struct cg_proto *cg_proto = sk->sk_cgrp;
for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
- percpu_counter_inc(cg_proto->sockets_allocated);
+ percpu_counter_inc(&cg_proto->sockets_allocated);
}
percpu_counter_inc(prot->sockets_allocated);
@@ -1264,7 +1281,7 @@ sk_sockets_allocated_read_positive(struct sock *sk)
struct proto *prot = sk->sk_prot;
if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
- return percpu_counter_read_positive(sk->sk_cgrp->sockets_allocated);
+ return percpu_counter_read_positive(&sk->sk_cgrp->sockets_allocated);
return percpu_counter_read_positive(prot->sockets_allocated);
}
@@ -1292,8 +1309,8 @@ proto_memory_pressure(struct proto *prot)
#ifdef CONFIG_PROC_FS
/* Called with local bh disabled */
-extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
-extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
+void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
+int sock_prot_inuse_get(struct net *net, struct proto *proto);
#else
static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
int inc)
@@ -1369,8 +1386,8 @@ static inline struct inode *SOCK_INODE(struct socket *socket)
/*
* Functions for memory accounting
*/
-extern int __sk_mem_schedule(struct sock *sk, int size, int kind);
-extern void __sk_mem_reclaim(struct sock *sk);
+int __sk_mem_schedule(struct sock *sk, int size, int kind);
+void __sk_mem_reclaim(struct sock *sk);
#define SK_MEM_QUANTUM ((int)PAGE_SIZE)
#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
@@ -1478,14 +1495,14 @@ do { \
lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
} while (0)
-extern void lock_sock_nested(struct sock *sk, int subclass);
+void lock_sock_nested(struct sock *sk, int subclass);
static inline void lock_sock(struct sock *sk)
{
lock_sock_nested(sk, 0);
}
-extern void release_sock(struct sock *sk);
+void release_sock(struct sock *sk);
/* BH context may only use the following locking interface. */
#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
@@ -1494,7 +1511,7 @@ extern void release_sock(struct sock *sk);
SINGLE_DEPTH_NESTING)
#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
-extern bool lock_sock_fast(struct sock *sk);
+bool lock_sock_fast(struct sock *sk);
/**
* unlock_sock_fast - complement of lock_sock_fast
* @sk: socket
@@ -1512,108 +1529,84 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
}
-extern struct sock *sk_alloc(struct net *net, int family,
- gfp_t priority,
- struct proto *prot);
-extern void sk_free(struct sock *sk);
-extern void sk_release_kernel(struct sock *sk);
-extern struct sock *sk_clone_lock(const struct sock *sk,
- const gfp_t priority);
-
-extern struct sk_buff *sock_wmalloc(struct sock *sk,
- unsigned long size, int force,
- gfp_t priority);
-extern struct sk_buff *sock_rmalloc(struct sock *sk,
- unsigned long size, int force,
- gfp_t priority);
-extern void sock_wfree(struct sk_buff *skb);
-extern void skb_orphan_partial(struct sk_buff *skb);
-extern void sock_rfree(struct sk_buff *skb);
-extern void sock_edemux(struct sk_buff *skb);
-
-extern int sock_setsockopt(struct socket *sock, int level,
- int op, char __user *optval,
- unsigned int optlen);
-
-extern int sock_getsockopt(struct socket *sock, int level,
- int op, char __user *optval,
- int __user *optlen);
-extern struct sk_buff *sock_alloc_send_skb(struct sock *sk,
- unsigned long size,
- int noblock,
- int *errcode);
-extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
- unsigned long header_len,
- unsigned long data_len,
- int noblock,
- int *errcode,
- int max_page_order);
-extern void *sock_kmalloc(struct sock *sk, int size,
- gfp_t priority);
-extern void sock_kfree_s(struct sock *sk, void *mem, int size);
-extern void sk_send_sigurg(struct sock *sk);
+struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
+ struct proto *prot);
+void sk_free(struct sock *sk);
+void sk_release_kernel(struct sock *sk);
+struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
+
+struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
+ gfp_t priority);
+struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
+ gfp_t priority);
+void sock_wfree(struct sk_buff *skb);
+void skb_orphan_partial(struct sk_buff *skb);
+void sock_rfree(struct sk_buff *skb);
+void sock_edemux(struct sk_buff *skb);
+
+int sock_setsockopt(struct socket *sock, int level, int op,
+ char __user *optval, unsigned int optlen);
+
+int sock_getsockopt(struct socket *sock, int level, int op,
+ char __user *optval, int __user *optlen);
+struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
+ int noblock, int *errcode);
+struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
+ unsigned long data_len, int noblock,
+ int *errcode, int max_page_order);
+void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
+void sock_kfree_s(struct sock *sk, void *mem, int size);
+void sk_send_sigurg(struct sock *sk);
/*
* Functions to fill in entries in struct proto_ops when a protocol
* does not implement a particular function.
*/
-extern int sock_no_bind(struct socket *,
- struct sockaddr *, int);
-extern int sock_no_connect(struct socket *,
- struct sockaddr *, int, int);
-extern int sock_no_socketpair(struct socket *,
- struct socket *);
-extern int sock_no_accept(struct socket *,
- struct socket *, int);
-extern int sock_no_getname(struct socket *,
- struct sockaddr *, int *, int);
-extern unsigned int sock_no_poll(struct file *, struct socket *,
- struct poll_table_struct *);
-extern int sock_no_ioctl(struct socket *, unsigned int,
- unsigned long);
-extern int sock_no_listen(struct socket *, int);
-extern int sock_no_shutdown(struct socket *, int);
-extern int sock_no_getsockopt(struct socket *, int , int,
- char __user *, int __user *);
-extern int sock_no_setsockopt(struct socket *, int, int,
- char __user *, unsigned int);
-extern int sock_no_sendmsg(struct kiocb *, struct socket *,
- struct msghdr *, size_t);
-extern int sock_no_recvmsg(struct kiocb *, struct socket *,
- struct msghdr *, size_t, int);
-extern int sock_no_mmap(struct file *file,
- struct socket *sock,
- struct vm_area_struct *vma);
-extern ssize_t sock_no_sendpage(struct socket *sock,
- struct page *page,
- int offset, size_t size,
- int flags);
+int sock_no_bind(struct socket *, struct sockaddr *, int);
+int sock_no_connect(struct socket *, struct sockaddr *, int, int);
+int sock_no_socketpair(struct socket *, struct socket *);
+int sock_no_accept(struct socket *, struct socket *, int);
+int sock_no_getname(struct socket *, struct sockaddr *, int *, int);
+unsigned int sock_no_poll(struct file *, struct socket *,
+ struct poll_table_struct *);
+int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
+int sock_no_listen(struct socket *, int);
+int sock_no_shutdown(struct socket *, int);
+int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *);
+int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int);
+int sock_no_sendmsg(struct kiocb *, struct socket *, struct msghdr *, size_t);
+int sock_no_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t,
+ int);
+int sock_no_mmap(struct file *file, struct socket *sock,
+ struct vm_area_struct *vma);
+ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
+ size_t size, int flags);
/*
* Functions to fill in entries in struct proto_ops when a protocol
* uses the inet style.
*/
-extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
+int sock_common_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen);
-extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
+int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags);
-extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
+int sock_common_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen);
-extern int compat_sock_common_getsockopt(struct socket *sock, int level,
+int compat_sock_common_getsockopt(struct socket *sock, int level,
int optname, char __user *optval, int __user *optlen);
-extern int compat_sock_common_setsockopt(struct socket *sock, int level,
+int compat_sock_common_setsockopt(struct socket *sock, int level,
int optname, char __user *optval, unsigned int optlen);
-extern void sk_common_release(struct sock *sk);
+void sk_common_release(struct sock *sk);
/*
* Default socket callbacks and setup code
*/
/* Initialise core socket variables */
-extern void sock_init_data(struct socket *sock, struct sock *sk);
+void sock_init_data(struct socket *sock, struct sock *sk);
-extern void sk_filter_release_rcu(struct rcu_head *rcu);
+void sk_filter_release_rcu(struct rcu_head *rcu);
/**
* sk_filter_release - release a socket filter
@@ -1630,16 +1623,14 @@ static inline void sk_filter_release(struct sk_filter *fp)
static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
{
- unsigned int size = sk_filter_len(fp);
-
- atomic_sub(size, &sk->sk_omem_alloc);
+ atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
sk_filter_release(fp);
}
static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
{
atomic_inc(&fp->refcnt);
- atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
+ atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
}
/*
@@ -1673,9 +1664,12 @@ static inline void sock_put(struct sock *sk)
if (atomic_dec_and_test(&sk->sk_refcnt))
sk_free(sk);
}
+/* Generic version of sock_put(), dealing with all sockets
+ * (TCP_TIMEWAIT, ESTABLISHED...)
+ */
+void sock_gen_put(struct sock *sk);
-extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
- const int nested);
+int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested);
static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
{
@@ -1729,8 +1723,8 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
write_unlock_bh(&sk->sk_callback_lock);
}
-extern kuid_t sock_i_uid(struct sock *sk);
-extern unsigned long sock_i_ino(struct sock *sk);
+kuid_t sock_i_uid(struct sock *sk);
+unsigned long sock_i_ino(struct sock *sk);
static inline struct dst_entry *
__sk_dst_get(struct sock *sk)
@@ -1752,8 +1746,6 @@ sk_dst_get(struct sock *sk)
return dst;
}
-extern void sk_reset_txq(struct sock *sk);
-
static inline void dst_negative_advice(struct sock *sk)
{
struct dst_entry *ndst, *dst = __sk_dst_get(sk);
@@ -1763,7 +1755,7 @@ static inline void dst_negative_advice(struct sock *sk)
if (ndst != dst) {
rcu_assign_pointer(sk->sk_dst_cache, ndst);
- sk_reset_txq(sk);
+ sk_tx_queue_clear(sk);
}
}
}
@@ -1805,16 +1797,16 @@ sk_dst_reset(struct sock *sk)
spin_unlock(&sk->sk_dst_lock);
}
-extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
+struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
-extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
+struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
static inline bool sk_can_gso(const struct sock *sk)
{
return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
}
-extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
+void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
{
@@ -2027,14 +2019,14 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
sk_mem_charge(sk, skb->truesize);
}
-extern void sk_reset_timer(struct sock *sk, struct timer_list *timer,
- unsigned long expires);
+void sk_reset_timer(struct sock *sk, struct timer_list *timer,
+ unsigned long expires);
-extern void sk_stop_timer(struct sock *sk, struct timer_list *timer);
+void sk_stop_timer(struct sock *sk, struct timer_list *timer);
-extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
-extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
+int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
/*
* Recover an error report and clear atomically
@@ -2102,7 +2094,7 @@ static inline struct page_frag *sk_page_frag(struct sock *sk)
return &sk->sk_frag;
}
-extern bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
+bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
/*
* Default write policy as shown to user space via poll/select/SIGIO
@@ -2140,10 +2132,10 @@ static inline int sock_intr_errno(long timeo)
return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
}
-extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
- struct sk_buff *skb);
-extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
- struct sk_buff *skb);
+void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb);
+void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb);
static inline void
sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
@@ -2176,8 +2168,8 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
__sock_recv_wifi_status(msg, sk, skb);
}
-extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
- struct sk_buff *skb);
+void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb);
static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
@@ -2202,7 +2194,7 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
*
* Currently only depends on SOCK_TIMESTAMPING* flags.
*/
-extern void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags);
+void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags);
/**
* sk_eat_skb - Release a skb if it is no longer needed
@@ -2266,11 +2258,11 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb)
return NULL;
}
-extern void sock_enable_timestamp(struct sock *sk, int flag);
-extern int sock_get_timestamp(struct sock *, struct timeval __user *);
-extern int sock_get_timestampns(struct sock *, struct timespec __user *);
-extern int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
- int level, int type);
+void sock_enable_timestamp(struct sock *sk, int flag);
+int sock_get_timestamp(struct sock *, struct timeval __user *);
+int sock_get_timestampns(struct sock *, struct timespec __user *);
+int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
+ int type);
/*
* Enable debug/info messages
diff --git a/include/net/stp.h b/include/net/stp.h
index ad447f105417..3af174d70d9e 100644
--- a/include/net/stp.h
+++ b/include/net/stp.h
@@ -8,7 +8,7 @@ struct stp_proto {
void *data;
};
-extern int stp_proto_register(const struct stp_proto *proto);
-extern void stp_proto_unregister(const struct stp_proto *proto);
+int stp_proto_register(const struct stp_proto *proto);
+void stp_proto_unregister(const struct stp_proto *proto);
#endif /* _NET_STP_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index b1aa324c5e65..2d7b4bdc972f 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -50,7 +50,7 @@
extern struct inet_hashinfo tcp_hashinfo;
extern struct percpu_counter tcp_orphan_count;
-extern void tcp_time_wait(struct sock *sk, int state, int timeo);
+void tcp_time_wait(struct sock *sk, int state, int timeo);
#define MAX_TCP_HEADER (128 + MAX_HEADER)
#define MAX_TCP_OPTION_SPACE 40
@@ -259,6 +259,7 @@ extern int sysctl_tcp_max_orphans;
extern int sysctl_tcp_fack;
extern int sysctl_tcp_reordering;
extern int sysctl_tcp_dsack;
+extern long sysctl_tcp_mem[3];
extern int sysctl_tcp_wmem[3];
extern int sysctl_tcp_rmem[3];
extern int sysctl_tcp_app_win;
@@ -325,7 +326,7 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
return false;
}
-extern bool tcp_check_oom(struct sock *sk, int shift);
+bool tcp_check_oom(struct sock *sk, int shift);
/* syncookies: remember time of last synqueue overflow */
static inline void tcp_synq_overflow(struct sock *sk)
@@ -348,38 +349,36 @@ extern struct proto tcp_prot;
#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
-extern void tcp_init_mem(struct net *net);
-
-extern void tcp_tasklet_init(void);
-
-extern void tcp_v4_err(struct sk_buff *skb, u32);
-
-extern void tcp_shutdown (struct sock *sk, int how);
-
-extern void tcp_v4_early_demux(struct sk_buff *skb);
-extern int tcp_v4_rcv(struct sk_buff *skb);
-
-extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
-extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t size);
-extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
- size_t size, int flags);
-extern void tcp_release_cb(struct sock *sk);
-extern void tcp_wfree(struct sk_buff *skb);
-extern void tcp_write_timer_handler(struct sock *sk);
-extern void tcp_delack_timer_handler(struct sock *sk);
-extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
-extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
- const struct tcphdr *th, unsigned int len);
-extern void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
- const struct tcphdr *th, unsigned int len);
-extern void tcp_rcv_space_adjust(struct sock *sk);
-extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
-extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
-extern void tcp_twsk_destructor(struct sock *sk);
-extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t len,
- unsigned int flags);
+void tcp_tasklet_init(void);
+
+void tcp_v4_err(struct sk_buff *skb, u32);
+
+void tcp_shutdown(struct sock *sk, int how);
+
+void tcp_v4_early_demux(struct sk_buff *skb);
+int tcp_v4_rcv(struct sk_buff *skb);
+
+int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
+int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t size);
+int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
+ int flags);
+void tcp_release_cb(struct sock *sk);
+void tcp_wfree(struct sk_buff *skb);
+void tcp_write_timer_handler(struct sock *sk);
+void tcp_delack_timer_handler(struct sock *sk);
+int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ const struct tcphdr *th, unsigned int len);
+void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+ const struct tcphdr *th, unsigned int len);
+void tcp_rcv_space_adjust(struct sock *sk);
+void tcp_cleanup_rbuf(struct sock *sk, int copied);
+int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
+void tcp_twsk_destructor(struct sock *sk);
+ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags);
static inline void tcp_dec_quickack_mode(struct sock *sk,
const unsigned int pkts)
@@ -409,66 +408,65 @@ enum tcp_tw_status {
};
-extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
- struct sk_buff *skb,
- const struct tcphdr *th);
-extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
- struct request_sock *req,
- struct request_sock **prev,
- bool fastopen);
-extern int tcp_child_process(struct sock *parent, struct sock *child,
- struct sk_buff *skb);
-extern void tcp_enter_loss(struct sock *sk, int how);
-extern void tcp_clear_retrans(struct tcp_sock *tp);
-extern void tcp_update_metrics(struct sock *sk);
-extern void tcp_init_metrics(struct sock *sk);
-extern void tcp_metrics_init(void);
-extern bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check);
-extern bool tcp_remember_stamp(struct sock *sk);
-extern bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
-extern void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
-extern void tcp_disable_fack(struct tcp_sock *tp);
-extern void tcp_close(struct sock *sk, long timeout);
-extern void tcp_init_sock(struct sock *sk);
-extern unsigned int tcp_poll(struct file * file, struct socket *sock,
- struct poll_table_struct *wait);
-extern int tcp_getsockopt(struct sock *sk, int level, int optname,
+enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
+ struct sk_buff *skb,
+ const struct tcphdr *th);
+struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req, struct request_sock **prev,
+ bool fastopen);
+int tcp_child_process(struct sock *parent, struct sock *child,
+ struct sk_buff *skb);
+void tcp_enter_loss(struct sock *sk, int how);
+void tcp_clear_retrans(struct tcp_sock *tp);
+void tcp_update_metrics(struct sock *sk);
+void tcp_init_metrics(struct sock *sk);
+void tcp_metrics_init(void);
+bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
+ bool paws_check);
+bool tcp_remember_stamp(struct sock *sk);
+bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
+void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
+void tcp_disable_fack(struct tcp_sock *tp);
+void tcp_close(struct sock *sk, long timeout);
+void tcp_init_sock(struct sock *sk);
+unsigned int tcp_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
+int tcp_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
+int tcp_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen);
+int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
-extern int tcp_setsockopt(struct sock *sk, int level, int optname,
+int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen);
-extern int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen);
-extern int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
-extern void tcp_set_keepalive(struct sock *sk, int val);
-extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
-extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len, int nonblock, int flags, int *addr_len);
-extern void tcp_parse_options(const struct sk_buff *skb,
- struct tcp_options_received *opt_rx,
- int estab, struct tcp_fastopen_cookie *foc);
-extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
+void tcp_set_keepalive(struct sock *sk, int val);
+void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
+int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t len, int nonblock, int flags, int *addr_len);
+void tcp_parse_options(const struct sk_buff *skb,
+ struct tcp_options_received *opt_rx,
+ int estab, struct tcp_fastopen_cookie *foc);
+const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
/*
* TCP v4 functions exported for the inet6 API
*/
-extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
-extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
-extern struct sock * tcp_create_openreq_child(struct sock *sk,
- struct request_sock *req,
- struct sk_buff *skb);
-extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
- struct request_sock *req,
- struct dst_entry *dst);
-extern int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
-extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
- int addr_len);
-extern int tcp_connect(struct sock *sk);
-extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
- struct request_sock *req,
- struct tcp_fastopen_cookie *foc);
-extern int tcp_disconnect(struct sock *sk, int flags);
+void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
+int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
+struct sock *tcp_create_openreq_child(struct sock *sk,
+ struct request_sock *req,
+ struct sk_buff *skb);
+struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct dst_entry *dst);
+int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
+int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+int tcp_connect(struct sock *sk);
+struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
+ struct request_sock *req,
+ struct tcp_fastopen_cookie *foc);
+int tcp_disconnect(struct sock *sk, int flags);
void tcp_connect_init(struct sock *sk);
void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
@@ -476,16 +474,32 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
/* From syncookies.c */
-extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
-extern int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
- u32 cookie);
-extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
- struct ip_options *opt);
+int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
+ u32 cookie);
+struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
+ struct ip_options *opt);
#ifdef CONFIG_SYN_COOKIES
-extern u32 __cookie_v4_init_sequence(const struct iphdr *iph,
- const struct tcphdr *th, u16 *mssp);
-extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
- __u16 *mss);
+#include <linux/ktime.h>
+
+/* Syncookies use a monotonic timer which increments every 64 seconds.
+ * This counter is used both as a hash input and partially encoded into
+ * the cookie value. A cookie is only validated further if the delta
+ * between the current counter value and the encoded one is less than this,
+ * i.e. a sent cookie is valid only at most for 128 seconds (or less if
+ * the counter advances immediately after a cookie is generated).
+ */
+#define MAX_SYNCOOKIE_AGE 2
+
+static inline u32 tcp_cookie_time(void)
+{
+ struct timespec now;
+ getnstimeofday(&now);
+ return now.tv_sec >> 6; /* 64 seconds granularity */
+}
+
+u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
+ u16 *mssp);
+__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mss);
#else
static inline __u32 cookie_v4_init_sequence(struct sock *sk,
struct sk_buff *skb,
@@ -495,19 +509,19 @@ static inline __u32 cookie_v4_init_sequence(struct sock *sk,
}
#endif
-extern __u32 cookie_init_timestamp(struct request_sock *req);
-extern bool cookie_check_timestamp(struct tcp_options_received *opt,
- struct net *net, bool *ecn_ok);
+__u32 cookie_init_timestamp(struct request_sock *req);
+bool cookie_check_timestamp(struct tcp_options_received *opt, struct net *net,
+ bool *ecn_ok);
/* From net/ipv6/syncookies.c */
-extern int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
- u32 cookie);
-extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
+int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
+ u32 cookie);
+struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
#ifdef CONFIG_SYN_COOKIES
-extern u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
- const struct tcphdr *th, u16 *mssp);
-extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
- __u16 *mss);
+u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
+ const struct tcphdr *th, u16 *mssp);
+__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
+ __u16 *mss);
#else
static inline __u32 cookie_v6_init_sequence(struct sock *sk,
struct sk_buff *skb,
@@ -518,47 +532,46 @@ static inline __u32 cookie_v6_init_sequence(struct sock *sk,
#endif
/* tcp_output.c */
-extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
- int nonagle);
-extern bool tcp_may_send_now(struct sock *sk);
-extern int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
-extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
-extern void tcp_retransmit_timer(struct sock *sk);
-extern void tcp_xmit_retransmit_queue(struct sock *);
-extern void tcp_simple_retransmit(struct sock *);
-extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
-
-extern void tcp_send_probe0(struct sock *);
-extern void tcp_send_partial(struct sock *);
-extern int tcp_write_wakeup(struct sock *);
-extern void tcp_send_fin(struct sock *sk);
-extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
-extern int tcp_send_synack(struct sock *);
-extern bool tcp_syn_flood_action(struct sock *sk,
- const struct sk_buff *skb,
- const char *proto);
-extern void tcp_push_one(struct sock *, unsigned int mss_now);
-extern void tcp_send_ack(struct sock *sk);
-extern void tcp_send_delayed_ack(struct sock *sk);
-extern void tcp_send_loss_probe(struct sock *sk);
-extern bool tcp_schedule_loss_probe(struct sock *sk);
+void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
+ int nonagle);
+bool tcp_may_send_now(struct sock *sk);
+int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
+int tcp_retransmit_skb(struct sock *, struct sk_buff *);
+void tcp_retransmit_timer(struct sock *sk);
+void tcp_xmit_retransmit_queue(struct sock *);
+void tcp_simple_retransmit(struct sock *);
+int tcp_trim_head(struct sock *, struct sk_buff *, u32);
+int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
+
+void tcp_send_probe0(struct sock *);
+void tcp_send_partial(struct sock *);
+int tcp_write_wakeup(struct sock *);
+void tcp_send_fin(struct sock *sk);
+void tcp_send_active_reset(struct sock *sk, gfp_t priority);
+int tcp_send_synack(struct sock *);
+bool tcp_syn_flood_action(struct sock *sk, const struct sk_buff *skb,
+ const char *proto);
+void tcp_push_one(struct sock *, unsigned int mss_now);
+void tcp_send_ack(struct sock *sk);
+void tcp_send_delayed_ack(struct sock *sk);
+void tcp_send_loss_probe(struct sock *sk);
+bool tcp_schedule_loss_probe(struct sock *sk);
/* tcp_input.c */
-extern void tcp_cwnd_application_limited(struct sock *sk);
-extern void tcp_resume_early_retransmit(struct sock *sk);
-extern void tcp_rearm_rto(struct sock *sk);
-extern void tcp_reset(struct sock *sk);
+void tcp_cwnd_application_limited(struct sock *sk);
+void tcp_resume_early_retransmit(struct sock *sk);
+void tcp_rearm_rto(struct sock *sk);
+void tcp_reset(struct sock *sk);
/* tcp_timer.c */
-extern void tcp_init_xmit_timers(struct sock *);
+void tcp_init_xmit_timers(struct sock *);
static inline void tcp_clear_xmit_timers(struct sock *sk)
{
inet_csk_clear_xmit_timers(sk);
}
-extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
-extern unsigned int tcp_current_mss(struct sock *sk);
+unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
+unsigned int tcp_current_mss(struct sock *sk);
/* Bound MSS / TSO packet size with the half of the window */
static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
@@ -584,20 +597,20 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
}
/* tcp.c */
-extern void tcp_get_info(const struct sock *, struct tcp_info *);
+void tcp_get_info(const struct sock *, struct tcp_info *);
/* Read 'sendfile()'-style from a TCP socket */
typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
unsigned int, size_t);
-extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
- sk_read_actor_t recv_actor);
+int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
+ sk_read_actor_t recv_actor);
-extern void tcp_initialize_rcv_mss(struct sock *sk);
+void tcp_initialize_rcv_mss(struct sock *sk);
-extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
-extern int tcp_mss_to_mtu(struct sock *sk, int mss);
-extern void tcp_mtup_init(struct sock *sk);
-extern void tcp_init_buffer_space(struct sock *sk);
+int tcp_mtu_to_mss(struct sock *sk, int pmtu);
+int tcp_mss_to_mtu(struct sock *sk, int mss);
+void tcp_mtup_init(struct sock *sk);
+void tcp_init_buffer_space(struct sock *sk);
static inline void tcp_bound_rto(const struct sock *sk)
{
@@ -610,7 +623,7 @@ static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
return (tp->srtt >> 3) + tp->rttvar;
}
-extern void tcp_set_rto(struct sock *sk);
+void tcp_set_rto(struct sock *sk);
static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
{
@@ -663,7 +676,7 @@ static inline u32 tcp_receive_window(const struct tcp_sock *tp)
* scaling applied to the result. The caller does these things
* if necessary. This is a "raw" window selection.
*/
-extern u32 __tcp_select_window(struct sock *sk);
+u32 __tcp_select_window(struct sock *sk);
void tcp_send_window_probe(struct sock *sk);
@@ -800,24 +813,24 @@ struct tcp_congestion_ops {
struct module *owner;
};
-extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
-extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
+int tcp_register_congestion_control(struct tcp_congestion_ops *type);
+void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
-extern void tcp_init_congestion_control(struct sock *sk);
-extern void tcp_cleanup_congestion_control(struct sock *sk);
-extern int tcp_set_default_congestion_control(const char *name);
-extern void tcp_get_default_congestion_control(char *name);
-extern void tcp_get_available_congestion_control(char *buf, size_t len);
-extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
-extern int tcp_set_allowed_congestion_control(char *allowed);
-extern int tcp_set_congestion_control(struct sock *sk, const char *name);
-extern void tcp_slow_start(struct tcp_sock *tp);
-extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
+void tcp_init_congestion_control(struct sock *sk);
+void tcp_cleanup_congestion_control(struct sock *sk);
+int tcp_set_default_congestion_control(const char *name);
+void tcp_get_default_congestion_control(char *name);
+void tcp_get_available_congestion_control(char *buf, size_t len);
+void tcp_get_allowed_congestion_control(char *buf, size_t len);
+int tcp_set_allowed_congestion_control(char *allowed);
+int tcp_set_congestion_control(struct sock *sk, const char *name);
+void tcp_slow_start(struct tcp_sock *tp);
+void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
extern struct tcp_congestion_ops tcp_init_congestion_ops;
-extern u32 tcp_reno_ssthresh(struct sock *sk);
-extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
-extern u32 tcp_reno_min_cwnd(const struct sock *sk);
+u32 tcp_reno_ssthresh(struct sock *sk);
+void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
+u32 tcp_reno_min_cwnd(const struct sock *sk);
extern struct tcp_congestion_ops tcp_reno;
static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
@@ -936,8 +949,8 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
/* Use define here intentionally to get WARN_ON location shown at the caller */
#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
-extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
-extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
+void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
+__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
/* The maximum number of MSS of available cwnd for which TSO defers
* sending if not using sysctl_tcp_tso_win_divisor.
@@ -963,7 +976,7 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
{
return tp->snd_una + tp->snd_wnd;
}
-extern bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
+bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
const struct sk_buff *skb)
@@ -1028,7 +1041,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
#endif
}
-extern bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
+bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
#undef STATE_TRACE
@@ -1039,9 +1052,9 @@ static const char *statename[]={
"Close Wait","Last ACK","Listen","Closing"
};
#endif
-extern void tcp_set_state(struct sock *sk, int state);
+void tcp_set_state(struct sock *sk, int state);
-extern void tcp_done(struct sock *sk);
+void tcp_done(struct sock *sk);
static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
{
@@ -1049,13 +1062,12 @@ static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
rx_opt->num_sacks = 0;
}
-extern u32 tcp_default_init_rwnd(u32 mss);
+u32 tcp_default_init_rwnd(u32 mss);
/* Determine a window scaling and initial window to offer. */
-extern void tcp_select_initial_window(int __space, __u32 mss,
- __u32 *rcv_wnd, __u32 *window_clamp,
- int wscale_ok, __u8 *rcv_wscale,
- __u32 init_rcv_wnd);
+void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
+ __u32 *window_clamp, int wscale_ok,
+ __u8 *rcv_wscale, __u32 init_rcv_wnd);
static inline int tcp_win_from_space(int space)
{
@@ -1095,11 +1107,11 @@ static inline void tcp_openreq_init(struct request_sock *req,
ireq->wscale_ok = rx_opt->wscale_ok;
ireq->acked = 0;
ireq->ecn_ok = 0;
- ireq->rmt_port = tcp_hdr(skb)->source;
- ireq->loc_port = tcp_hdr(skb)->dest;
+ ireq->ir_rmt_port = tcp_hdr(skb)->source;
+ ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
}
-extern void tcp_enter_memory_pressure(struct sock *sk);
+void tcp_enter_memory_pressure(struct sock *sk);
static inline int keepalive_intvl_when(const struct tcp_sock *tp)
{
@@ -1252,21 +1264,20 @@ struct tcp_md5sig_pool {
};
/* - functions */
-extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
- const struct sock *sk,
- const struct request_sock *req,
- const struct sk_buff *skb);
-extern int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
- int family, const u8 *newkey,
- u8 newkeylen, gfp_t gfp);
-extern int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
- int family);
-extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
+int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
+ const struct sock *sk, const struct request_sock *req,
+ const struct sk_buff *skb);
+int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
+ int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
+int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
+ int family);
+struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
struct sock *addr_sk);
#ifdef CONFIG_TCP_MD5SIG
-extern struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
- const union tcp_md5_addr *addr, int family);
+struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
+ const union tcp_md5_addr *addr,
+ int family);
#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
#else
static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
@@ -1278,27 +1289,26 @@ static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
#define tcp_twsk_md5_key(twsk) NULL
#endif
-extern bool tcp_alloc_md5sig_pool(void);
+bool tcp_alloc_md5sig_pool(void);
-extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
+struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
static inline void tcp_put_md5sig_pool(void)
{
local_bh_enable();
}
-extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
-extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
- unsigned int header_len);
-extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
- const struct tcp_md5sig_key *key);
+int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
+int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
+ unsigned int header_len);
+int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
+ const struct tcp_md5sig_key *key);
/* From tcp_fastopen.c */
-extern void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
- struct tcp_fastopen_cookie *cookie,
- int *syn_loss, unsigned long *last_syn_loss);
-extern void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
- struct tcp_fastopen_cookie *cookie,
- bool syn_lost);
+void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
+ struct tcp_fastopen_cookie *cookie, int *syn_loss,
+ unsigned long *last_syn_loss);
+void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
+ struct tcp_fastopen_cookie *cookie, bool syn_lost);
struct tcp_fastopen_request {
/* Fast Open cookie. Size 0 means a cookie request */
struct tcp_fastopen_cookie cookie;
@@ -1309,9 +1319,9 @@ void tcp_free_fastopen_req(struct tcp_sock *tp);
extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
int tcp_fastopen_reset_cipher(void *key, unsigned int len);
-extern void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
- struct tcp_fastopen_cookie *foc);
-
+void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
+ struct tcp_fastopen_cookie *foc);
+void tcp_fastopen_init_key_once(bool publish);
#define TCP_FASTOPEN_KEY_LENGTH 16
/* Fastopen key context */
@@ -1507,7 +1517,6 @@ enum tcp_seq_states {
TCP_SEQ_STATE_LISTENING,
TCP_SEQ_STATE_OPENREQ,
TCP_SEQ_STATE_ESTABLISHED,
- TCP_SEQ_STATE_TIME_WAIT,
};
int tcp_seq_open(struct inode *inode, struct file *file);
@@ -1529,22 +1538,20 @@ struct tcp_iter_state {
loff_t last_pos;
};
-extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
-extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
+int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
+void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
extern struct request_sock_ops tcp_request_sock_ops;
extern struct request_sock_ops tcp6_request_sock_ops;
-extern void tcp_v4_destroy_sock(struct sock *sk);
+void tcp_v4_destroy_sock(struct sock *sk);
-extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
- netdev_features_t features);
-extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
- struct sk_buff *skb);
-extern int tcp_gro_complete(struct sk_buff *skb);
+struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+ netdev_features_t features);
+struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
+int tcp_gro_complete(struct sk_buff *skb);
-extern void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr,
- __be32 daddr);
+void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
{
@@ -1560,8 +1567,8 @@ static inline bool tcp_stream_memory_free(const struct sock *sk)
}
#ifdef CONFIG_PROC_FS
-extern int tcp4_proc_init(void);
-extern void tcp4_proc_exit(void);
+int tcp4_proc_init(void);
+void tcp4_proc_exit(void);
#endif
/* TCP af-specific functions */
@@ -1592,9 +1599,9 @@ struct tcp_request_sock_ops {
#endif
};
-extern int tcpv4_offload_init(void);
+int tcpv4_offload_init(void);
-extern void tcp_v4_init(void);
-extern void tcp_init(void);
+void tcp_v4_init(void);
+void tcp_init(void);
#endif /* _TCP_H */
diff --git a/include/net/tcp_memcontrol.h b/include/net/tcp_memcontrol.h
index 7df18bc43a97..05b94d9453de 100644
--- a/include/net/tcp_memcontrol.h
+++ b/include/net/tcp_memcontrol.h
@@ -1,19 +1,7 @@
#ifndef _TCP_MEMCG_H
#define _TCP_MEMCG_H
-struct tcp_memcontrol {
- struct cg_proto cg_proto;
- /* per-cgroup tcp memory pressure knobs */
- struct res_counter tcp_memory_allocated;
- struct percpu_counter tcp_sockets_allocated;
- /* those two are read-mostly, leave them at the end */
- long tcp_prot_mem[3];
- int tcp_memory_pressure;
-};
-
struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg);
int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss);
void tcp_destroy_cgroup(struct mem_cgroup *memcg);
-unsigned long long tcp_max_memory(const struct mem_cgroup *memcg);
-void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx);
#endif /* _TCP_MEMCG_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index ef2e0b7843a0..fe4ba9f32429 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -79,7 +79,7 @@ struct udp_table {
unsigned int log;
};
extern struct udp_table udp_table;
-extern void udp_table_init(struct udp_table *, const char *);
+void udp_table_init(struct udp_table *, const char *);
static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
struct net *net, unsigned int num)
{
@@ -162,52 +162,53 @@ static inline void udp_lib_hash(struct sock *sk)
BUG();
}
-extern void udp_lib_unhash(struct sock *sk);
-extern void udp_lib_rehash(struct sock *sk, u16 new_hash);
+void udp_lib_unhash(struct sock *sk);
+void udp_lib_rehash(struct sock *sk, u16 new_hash);
static inline void udp_lib_close(struct sock *sk, long timeout)
{
sk_common_release(sk);
}
-extern int udp_lib_get_port(struct sock *sk, unsigned short snum,
- int (*)(const struct sock *,const struct sock *),
- unsigned int hash2_nulladdr);
+int udp_lib_get_port(struct sock *sk, unsigned short snum,
+ int (*)(const struct sock *, const struct sock *),
+ unsigned int hash2_nulladdr);
/* net/ipv4/udp.c */
-extern int udp_get_port(struct sock *sk, unsigned short snum,
- int (*saddr_cmp)(const struct sock *,
- const struct sock *));
-extern void udp_err(struct sk_buff *, u32);
-extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len);
-extern int udp_push_pending_frames(struct sock *sk);
-extern void udp_flush_pending_frames(struct sock *sk);
-extern void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
-extern int udp_rcv(struct sk_buff *skb);
-extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
-extern int udp_disconnect(struct sock *sk, int flags);
-extern unsigned int udp_poll(struct file *file, struct socket *sock,
- poll_table *wait);
-extern struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
- netdev_features_t features);
-extern int udp_lib_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen);
-extern int udp_lib_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen,
- int (*push_pending_frames)(struct sock *));
-extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
- __be32 daddr, __be16 dport,
- int dif);
-extern struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
- __be32 daddr, __be16 dport,
- int dif, struct udp_table *tbl);
-extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
- const struct in6_addr *daddr, __be16 dport,
- int dif);
-extern struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
- const struct in6_addr *daddr, __be16 dport,
- int dif, struct udp_table *tbl);
+void udp_v4_early_demux(struct sk_buff *skb);
+int udp_get_port(struct sock *sk, unsigned short snum,
+ int (*saddr_cmp)(const struct sock *,
+ const struct sock *));
+void udp_err(struct sk_buff *, u32);
+int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t len);
+int udp_push_pending_frames(struct sock *sk);
+void udp_flush_pending_frames(struct sock *sk);
+void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
+int udp_rcv(struct sk_buff *skb);
+int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int udp_disconnect(struct sock *sk, int flags);
+unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait);
+struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
+ netdev_features_t features);
+int udp_lib_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
+int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen,
+ int (*push_pending_frames)(struct sock *));
+struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+ __be32 daddr, __be16 dport, int dif);
+struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+ __be32 daddr, __be16 dport, int dif,
+ struct udp_table *tbl);
+struct sock *udp6_lib_lookup(struct net *net,
+ const struct in6_addr *saddr, __be16 sport,
+ const struct in6_addr *daddr, __be16 dport,
+ int dif);
+struct sock *__udp6_lib_lookup(struct net *net,
+ const struct in6_addr *saddr, __be16 sport,
+ const struct in6_addr *daddr, __be16 dport,
+ int dif, struct udp_table *tbl);
/*
* SNMP statistics for UDP and UDP-Lite
@@ -259,19 +260,19 @@ struct udp_iter_state {
};
#ifdef CONFIG_PROC_FS
-extern int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo);
-extern void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo);
+int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo);
+void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo);
-extern int udp4_proc_init(void);
-extern void udp4_proc_exit(void);
+int udp4_proc_init(void);
+void udp4_proc_exit(void);
#endif
-extern int udpv4_offload_init(void);
+int udpv4_offload_init(void);
-extern void udp_init(void);
+void udp_init(void);
-extern void udp_encap_enable(void);
+void udp_encap_enable(void);
#if IS_ENABLED(CONFIG_IPV6)
-extern void udpv6_encap_enable(void);
+void udpv6_encap_enable(void);
#endif
#endif /* _UDP_H */
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 71375459a884..2caadabcd07b 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -126,7 +126,7 @@ static inline __wsum udplite_csum(struct sk_buff *skb)
return skb_checksum(skb, off, len, 0);
}
-extern void udplite4_register(void);
-extern int udplite_get_port(struct sock *sk, unsigned short snum,
- int (*scmp)(const struct sock *, const struct sock *));
+void udplite4_register(void);
+int udplite_get_port(struct sock *sk, unsigned short snum,
+ int (*scmp)(const struct sock *, const struct sock *));
#endif /* _UDPLITE_H */
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 2d64d3cd4999..6b6d180fb91a 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -36,5 +36,16 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb);
+/* IP header + UDP + VXLAN + Ethernet header */
+#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
+/* IPv6 header + UDP + VXLAN + Ethernet header */
+#define VXLAN6_HEADROOM (40 + 8 + 8 + 14)
+
+#if IS_ENABLED(CONFIG_VXLAN)
void vxlan_get_rx_port(struct net_device *netdev);
+#else
+static inline void vxlan_get_rx_port(struct net_device *netdev)
+{
+}
+#endif
#endif
diff --git a/include/net/wext.h b/include/net/wext.h
index 4f6e7423174c..345911965dbb 100644
--- a/include/net/wext.h
+++ b/include/net/wext.h
@@ -6,13 +6,13 @@
struct net;
#ifdef CONFIG_WEXT_CORE
-extern int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
- void __user *arg);
-extern int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
- unsigned long arg);
+int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
+ void __user *arg);
+int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
+ unsigned long arg);
-extern struct iw_statistics *get_wireless_stats(struct net_device *dev);
-extern int call_commit_handler(struct net_device *dev);
+struct iw_statistics *get_wireless_stats(struct net_device *dev);
+int call_commit_handler(struct net_device *dev);
#else
static inline int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
void __user *arg)
@@ -27,8 +27,8 @@ static inline int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
#endif
#ifdef CONFIG_WEXT_PROC
-extern int wext_proc_init(struct net *net);
-extern void wext_proc_exit(struct net *net);
+int wext_proc_init(struct net *net);
+void wext_proc_exit(struct net *net);
#else
static inline int wext_proc_init(struct net *net)
{
diff --git a/include/net/wimax.h b/include/net/wimax.h
index bbb74f990cab..98498e1daa06 100644
--- a/include/net/wimax.h
+++ b/include/net/wimax.h
@@ -438,9 +438,9 @@ struct wimax_dev {
*
* These functions are not exported to user space.
*/
-extern void wimax_dev_init(struct wimax_dev *);
-extern int wimax_dev_add(struct wimax_dev *, struct net_device *);
-extern void wimax_dev_rm(struct wimax_dev *);
+void wimax_dev_init(struct wimax_dev *);
+int wimax_dev_add(struct wimax_dev *, struct net_device *);
+void wimax_dev_rm(struct wimax_dev *);
static inline
struct wimax_dev *net_dev_to_wimax(struct net_device *net_dev)
@@ -454,8 +454,8 @@ struct device *wimax_dev_to_dev(struct wimax_dev *wimax_dev)
return wimax_dev->net_dev->dev.parent;
}
-extern void wimax_state_change(struct wimax_dev *, enum wimax_st);
-extern enum wimax_st wimax_state_get(struct wimax_dev *);
+void wimax_state_change(struct wimax_dev *, enum wimax_st);
+enum wimax_st wimax_state_get(struct wimax_dev *);
/*
* Radio Switch state reporting.
@@ -463,8 +463,8 @@ extern enum wimax_st wimax_state_get(struct wimax_dev *);
* enum wimax_rf_state is declared in linux/wimax.h so the exports
* to user space can use it.
*/
-extern void wimax_report_rfkill_hw(struct wimax_dev *, enum wimax_rf_state);
-extern void wimax_report_rfkill_sw(struct wimax_dev *, enum wimax_rf_state);
+void wimax_report_rfkill_hw(struct wimax_dev *, enum wimax_rf_state);
+void wimax_report_rfkill_sw(struct wimax_dev *, enum wimax_rf_state);
/*
@@ -490,15 +490,14 @@ extern void wimax_report_rfkill_sw(struct wimax_dev *, enum wimax_rf_state);
* send diagnostics information that a device-specific diagnostics
* tool would be interested in.
*/
-extern struct sk_buff *wimax_msg_alloc(struct wimax_dev *, const char *,
- const void *, size_t, gfp_t);
-extern int wimax_msg_send(struct wimax_dev *, struct sk_buff *);
-extern int wimax_msg(struct wimax_dev *, const char *,
- const void *, size_t, gfp_t);
+struct sk_buff *wimax_msg_alloc(struct wimax_dev *, const char *, const void *,
+ size_t, gfp_t);
+int wimax_msg_send(struct wimax_dev *, struct sk_buff *);
+int wimax_msg(struct wimax_dev *, const char *, const void *, size_t, gfp_t);
-extern const void *wimax_msg_data_len(struct sk_buff *, size_t *);
-extern const void *wimax_msg_data(struct sk_buff *);
-extern ssize_t wimax_msg_len(struct sk_buff *);
+const void *wimax_msg_data_len(struct sk_buff *, size_t *);
+const void *wimax_msg_data(struct sk_buff *);
+ssize_t wimax_msg_len(struct sk_buff *);
/*
@@ -513,7 +512,7 @@ extern ssize_t wimax_msg_len(struct sk_buff *);
* device's control structure and (as such) the 'struct wimax_dev' is
* referenced by the caller.
*/
-extern int wimax_rfkill(struct wimax_dev *, enum wimax_rf_state);
-extern int wimax_reset(struct wimax_dev *);
+int wimax_rfkill(struct wimax_dev *, enum wimax_rf_state);
+int wimax_reset(struct wimax_dev *);
#endif /* #ifndef __NET__WIMAX_H__ */
diff --git a/include/net/x25.h b/include/net/x25.h
index b4a8a8923128..c383aa4edbf0 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -187,57 +187,57 @@ extern int sysctl_x25_clear_request_timeout;
extern int sysctl_x25_ack_holdback_timeout;
extern int sysctl_x25_forward;
-extern int x25_parse_address_block(struct sk_buff *skb,
- struct x25_address *called_addr,
- struct x25_address *calling_addr);
-
-extern int x25_addr_ntoa(unsigned char *, struct x25_address *,
- struct x25_address *);
-extern int x25_addr_aton(unsigned char *, struct x25_address *,
- struct x25_address *);
-extern struct sock *x25_find_socket(unsigned int, struct x25_neigh *);
-extern void x25_destroy_socket_from_timer(struct sock *);
-extern int x25_rx_call_request(struct sk_buff *, struct x25_neigh *, unsigned int);
-extern void x25_kill_by_neigh(struct x25_neigh *);
+int x25_parse_address_block(struct sk_buff *skb,
+ struct x25_address *called_addr,
+ struct x25_address *calling_addr);
+
+int x25_addr_ntoa(unsigned char *, struct x25_address *, struct x25_address *);
+int x25_addr_aton(unsigned char *, struct x25_address *, struct x25_address *);
+struct sock *x25_find_socket(unsigned int, struct x25_neigh *);
+void x25_destroy_socket_from_timer(struct sock *);
+int x25_rx_call_request(struct sk_buff *, struct x25_neigh *, unsigned int);
+void x25_kill_by_neigh(struct x25_neigh *);
/* x25_dev.c */
-extern void x25_send_frame(struct sk_buff *, struct x25_neigh *);
-extern int x25_lapb_receive_frame(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
-extern void x25_establish_link(struct x25_neigh *);
-extern void x25_terminate_link(struct x25_neigh *);
+void x25_send_frame(struct sk_buff *, struct x25_neigh *);
+int x25_lapb_receive_frame(struct sk_buff *, struct net_device *,
+ struct packet_type *, struct net_device *);
+void x25_establish_link(struct x25_neigh *);
+void x25_terminate_link(struct x25_neigh *);
/* x25_facilities.c */
-extern int x25_parse_facilities(struct sk_buff *, struct x25_facilities *,
- struct x25_dte_facilities *, unsigned long *);
-extern int x25_create_facilities(unsigned char *, struct x25_facilities *,
- struct x25_dte_facilities *, unsigned long);
-extern int x25_negotiate_facilities(struct sk_buff *, struct sock *,
- struct x25_facilities *,
- struct x25_dte_facilities *);
-extern void x25_limit_facilities(struct x25_facilities *, struct x25_neigh *);
+int x25_parse_facilities(struct sk_buff *, struct x25_facilities *,
+ struct x25_dte_facilities *, unsigned long *);
+int x25_create_facilities(unsigned char *, struct x25_facilities *,
+ struct x25_dte_facilities *, unsigned long);
+int x25_negotiate_facilities(struct sk_buff *, struct sock *,
+ struct x25_facilities *,
+ struct x25_dte_facilities *);
+void x25_limit_facilities(struct x25_facilities *, struct x25_neigh *);
/* x25_forward.c */
-extern void x25_clear_forward_by_lci(unsigned int lci);
-extern void x25_clear_forward_by_dev(struct net_device *);
-extern int x25_forward_data(int, struct x25_neigh *, struct sk_buff *);
-extern int x25_forward_call(struct x25_address *, struct x25_neigh *,
- struct sk_buff *, int);
+void x25_clear_forward_by_lci(unsigned int lci);
+void x25_clear_forward_by_dev(struct net_device *);
+int x25_forward_data(int, struct x25_neigh *, struct sk_buff *);
+int x25_forward_call(struct x25_address *, struct x25_neigh *, struct sk_buff *,
+ int);
/* x25_in.c */
-extern int x25_process_rx_frame(struct sock *, struct sk_buff *);
-extern int x25_backlog_rcv(struct sock *, struct sk_buff *);
+int x25_process_rx_frame(struct sock *, struct sk_buff *);
+int x25_backlog_rcv(struct sock *, struct sk_buff *);
/* x25_link.c */
-extern void x25_link_control(struct sk_buff *, struct x25_neigh *, unsigned short);
-extern void x25_link_device_up(struct net_device *);
-extern void x25_link_device_down(struct net_device *);
-extern void x25_link_established(struct x25_neigh *);
-extern void x25_link_terminated(struct x25_neigh *);
-extern void x25_transmit_clear_request(struct x25_neigh *, unsigned int, unsigned char);
-extern void x25_transmit_link(struct sk_buff *, struct x25_neigh *);
-extern int x25_subscr_ioctl(unsigned int, void __user *);
-extern struct x25_neigh *x25_get_neigh(struct net_device *);
-extern void x25_link_free(void);
+void x25_link_control(struct sk_buff *, struct x25_neigh *, unsigned short);
+void x25_link_device_up(struct net_device *);
+void x25_link_device_down(struct net_device *);
+void x25_link_established(struct x25_neigh *);
+void x25_link_terminated(struct x25_neigh *);
+void x25_transmit_clear_request(struct x25_neigh *, unsigned int,
+ unsigned char);
+void x25_transmit_link(struct sk_buff *, struct x25_neigh *);
+int x25_subscr_ioctl(unsigned int, void __user *);
+struct x25_neigh *x25_get_neigh(struct net_device *);
+void x25_link_free(void);
/* x25_neigh.c */
static __inline__ void x25_neigh_hold(struct x25_neigh *nb)
@@ -252,16 +252,16 @@ static __inline__ void x25_neigh_put(struct x25_neigh *nb)
}
/* x25_out.c */
-extern int x25_output(struct sock *, struct sk_buff *);
-extern void x25_kick(struct sock *);
-extern void x25_enquiry_response(struct sock *);
+int x25_output(struct sock *, struct sk_buff *);
+void x25_kick(struct sock *);
+void x25_enquiry_response(struct sock *);
/* x25_route.c */
-extern struct x25_route *x25_get_route(struct x25_address *addr);
-extern struct net_device *x25_dev_get(char *);
-extern void x25_route_device_down(struct net_device *dev);
-extern int x25_route_ioctl(unsigned int, void __user *);
-extern void x25_route_free(void);
+struct x25_route *x25_get_route(struct x25_address *addr);
+struct net_device *x25_dev_get(char *);
+void x25_route_device_down(struct net_device *dev);
+int x25_route_ioctl(unsigned int, void __user *);
+void x25_route_free(void);
static __inline__ void x25_route_hold(struct x25_route *rt)
{
@@ -275,30 +275,31 @@ static __inline__ void x25_route_put(struct x25_route *rt)
}
/* x25_subr.c */
-extern void x25_clear_queues(struct sock *);
-extern void x25_frames_acked(struct sock *, unsigned short);
-extern void x25_requeue_frames(struct sock *);
-extern int x25_validate_nr(struct sock *, unsigned short);
-extern void x25_write_internal(struct sock *, int);
-extern int x25_decode(struct sock *, struct sk_buff *, int *, int *, int *, int *, int *);
-extern void x25_disconnect(struct sock *, int, unsigned char, unsigned char);
+void x25_clear_queues(struct sock *);
+void x25_frames_acked(struct sock *, unsigned short);
+void x25_requeue_frames(struct sock *);
+int x25_validate_nr(struct sock *, unsigned short);
+void x25_write_internal(struct sock *, int);
+int x25_decode(struct sock *, struct sk_buff *, int *, int *, int *, int *,
+ int *);
+void x25_disconnect(struct sock *, int, unsigned char, unsigned char);
/* x25_timer.c */
-extern void x25_init_timers(struct sock *sk);
-extern void x25_start_heartbeat(struct sock *);
-extern void x25_start_t2timer(struct sock *);
-extern void x25_start_t21timer(struct sock *);
-extern void x25_start_t22timer(struct sock *);
-extern void x25_start_t23timer(struct sock *);
-extern void x25_stop_heartbeat(struct sock *);
-extern void x25_stop_timer(struct sock *);
-extern unsigned long x25_display_timer(struct sock *);
-extern void x25_check_rbuf(struct sock *);
+void x25_init_timers(struct sock *sk);
+void x25_start_heartbeat(struct sock *);
+void x25_start_t2timer(struct sock *);
+void x25_start_t21timer(struct sock *);
+void x25_start_t22timer(struct sock *);
+void x25_start_t23timer(struct sock *);
+void x25_stop_heartbeat(struct sock *);
+void x25_stop_timer(struct sock *);
+unsigned long x25_display_timer(struct sock *);
+void x25_check_rbuf(struct sock *);
/* sysctl_net_x25.c */
#ifdef CONFIG_SYSCTL
-extern void x25_register_sysctl(void);
-extern void x25_unregister_sysctl(void);
+void x25_register_sysctl(void);
+void x25_unregister_sysctl(void);
#else
static inline void x25_register_sysctl(void) {};
static inline void x25_unregister_sysctl(void) {};
@@ -318,6 +319,6 @@ extern rwlock_t x25_forward_list_lock;
extern struct list_head x25_neigh_list;
extern rwlock_t x25_neigh_list_lock;
-extern int x25_proc_init(void);
-extern void x25_proc_exit(void);
+int x25_proc_init(void);
+void x25_proc_exit(void);
#endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index e253bf0cc7ef..6b82fdf4ba71 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -307,15 +307,17 @@ struct xfrm_policy_afinfo {
struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
};
-extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
-extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
-extern void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c);
-extern void km_state_notify(struct xfrm_state *x, const struct km_event *c);
+int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
+int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
+void km_policy_notify(struct xfrm_policy *xp, int dir,
+ const struct km_event *c);
+void km_state_notify(struct xfrm_state *x, const struct km_event *c);
struct xfrm_tmpl;
-extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
-extern void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
-extern int __xfrm_state_delete(struct xfrm_state *x);
+int km_query(struct xfrm_state *x, struct xfrm_tmpl *t,
+ struct xfrm_policy *pol);
+void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
+int __xfrm_state_delete(struct xfrm_state *x);
struct xfrm_state_afinfo {
unsigned int family;
@@ -344,12 +346,12 @@ struct xfrm_state_afinfo {
void (*local_error)(struct sk_buff *skb, u32 mtu);
};
-extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
-extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
-extern struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
-extern void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
+int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
+int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
+struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
+void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
-extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
+void xfrm_state_delete_tunnel(struct xfrm_state *x);
struct xfrm_type {
char *description;
@@ -372,8 +374,8 @@ struct xfrm_type {
u32 (*get_mtu)(struct xfrm_state *, int size);
};
-extern int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
-extern int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
+int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
+int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
struct xfrm_mode {
/*
@@ -434,8 +436,8 @@ enum {
XFRM_MODE_FLAG_TUNNEL = 1,
};
-extern int xfrm_register_mode(struct xfrm_mode *mode, int family);
-extern int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
+int xfrm_register_mode(struct xfrm_mode *mode, int family);
+int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
static inline int xfrm_af2proto(unsigned int family)
{
@@ -595,8 +597,8 @@ struct xfrm_mgr {
const struct xfrm_kmaddress *k);
};
-extern int xfrm_register_km(struct xfrm_mgr *km);
-extern int xfrm_unregister_km(struct xfrm_mgr *km);
+int xfrm_register_km(struct xfrm_mgr *km);
+int xfrm_unregister_km(struct xfrm_mgr *km);
/*
* This structure is used for the duration where packets are being
@@ -713,23 +715,23 @@ static inline void xfrm_audit_helper_usrinfo(kuid_t auid, u32 ses, u32 secid,
audit_log_task_context(audit_buf);
}
-extern void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
- kuid_t auid, u32 ses, u32 secid);
-extern void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
- kuid_t auid, u32 ses, u32 secid);
-extern void xfrm_audit_state_add(struct xfrm_state *x, int result,
- kuid_t auid, u32 ses, u32 secid);
-extern void xfrm_audit_state_delete(struct xfrm_state *x, int result,
- kuid_t auid, u32 ses, u32 secid);
-extern void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
- struct sk_buff *skb);
-extern void xfrm_audit_state_replay(struct xfrm_state *x,
- struct sk_buff *skb, __be32 net_seq);
-extern void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
-extern void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
- __be32 net_spi, __be32 net_seq);
-extern void xfrm_audit_state_icvfail(struct xfrm_state *x,
- struct sk_buff *skb, u8 proto);
+void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, kuid_t auid,
+ u32 ses, u32 secid);
+void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, kuid_t auid,
+ u32 ses, u32 secid);
+void xfrm_audit_state_add(struct xfrm_state *x, int result, kuid_t auid,
+ u32 ses, u32 secid);
+void xfrm_audit_state_delete(struct xfrm_state *x, int result, kuid_t auid,
+ u32 ses, u32 secid);
+void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
+ struct sk_buff *skb);
+void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb,
+ __be32 net_seq);
+void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
+void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi,
+ __be32 net_seq);
+void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb,
+ u8 proto);
#else
static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
@@ -784,7 +786,7 @@ static inline void xfrm_pol_hold(struct xfrm_policy *policy)
atomic_inc(&policy->refcnt);
}
-extern void xfrm_policy_destroy(struct xfrm_policy *policy);
+void xfrm_policy_destroy(struct xfrm_policy *policy);
static inline void xfrm_pol_put(struct xfrm_policy *policy)
{
@@ -799,7 +801,7 @@ static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
xfrm_pol_put(pols[i]);
}
-extern void __xfrm_state_destroy(struct xfrm_state *);
+void __xfrm_state_destroy(struct xfrm_state *);
static inline void __xfrm_state_put(struct xfrm_state *x)
{
@@ -903,9 +905,8 @@ __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli)
return port;
}
-extern bool xfrm_selector_match(const struct xfrm_selector *sel,
- const struct flowi *fl,
- unsigned short family);
+bool xfrm_selector_match(const struct xfrm_selector *sel,
+ const struct flowi *fl, unsigned short family);
#ifdef CONFIG_SECURITY_NETWORK_XFRM
/* If neither has a context --> match
@@ -975,7 +976,7 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
}
#endif
-extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
+void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
struct sec_path {
atomic_t refcnt;
@@ -1000,7 +1001,7 @@ secpath_get(struct sec_path *sp)
return sp;
}
-extern void __secpath_destroy(struct sec_path *sp);
+void __secpath_destroy(struct sec_path *sp);
static inline void
secpath_put(struct sec_path *sp)
@@ -1009,7 +1010,7 @@ secpath_put(struct sec_path *sp)
__secpath_destroy(sp);
}
-extern struct sec_path *secpath_dup(struct sec_path *src);
+struct sec_path *secpath_dup(struct sec_path *src);
static inline void
secpath_reset(struct sk_buff *skb)
@@ -1059,7 +1060,8 @@ xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, un
}
#ifdef CONFIG_XFRM
-extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family);
+int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
+ unsigned short family);
static inline int __xfrm_policy_check2(struct sock *sk, int dir,
struct sk_buff *skb,
@@ -1103,8 +1105,8 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
}
-extern int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
- unsigned int family, int reverse);
+int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
+ unsigned int family, int reverse);
static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
unsigned int family)
@@ -1119,7 +1121,7 @@ static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
return __xfrm_decode_session(skb, fl, family, 1);
}
-extern int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
+int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
{
@@ -1140,7 +1142,7 @@ static inline int xfrm6_route_forward(struct sk_buff *skb)
return xfrm_route_forward(skb, AF_INET6);
}
-extern int __xfrm_sk_clone_policy(struct sock *sk);
+int __xfrm_sk_clone_policy(struct sock *sk);
static inline int xfrm_sk_clone_policy(struct sock *sk)
{
@@ -1149,7 +1151,7 @@ static inline int xfrm_sk_clone_policy(struct sock *sk)
return 0;
}
-extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
+int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
static inline void xfrm_sk_free_policy(struct sock *sk)
{
@@ -1163,7 +1165,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
}
}
-extern void xfrm_garbage_collect(struct net *net);
+void xfrm_garbage_collect(struct net *net);
#else
@@ -1355,6 +1357,12 @@ struct xfrm_tunnel {
int priority;
};
+struct xfrm_tunnel_notifier {
+ int (*handler)(struct sk_buff *skb);
+ struct xfrm_tunnel_notifier __rcu *next;
+ int priority;
+};
+
struct xfrm6_tunnel {
int (*handler)(struct sk_buff *skb);
int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
@@ -1363,16 +1371,16 @@ struct xfrm6_tunnel {
int priority;
};
-extern void xfrm_init(void);
-extern void xfrm4_init(void);
-extern int xfrm_state_init(struct net *net);
-extern void xfrm_state_fini(struct net *net);
-extern void xfrm4_state_init(void);
+void xfrm_init(void);
+void xfrm4_init(void);
+int xfrm_state_init(struct net *net);
+void xfrm_state_fini(struct net *net);
+void xfrm4_state_init(void);
#ifdef CONFIG_XFRM
-extern int xfrm6_init(void);
-extern void xfrm6_fini(void);
-extern int xfrm6_state_init(void);
-extern void xfrm6_state_fini(void);
+int xfrm6_init(void);
+void xfrm6_fini(void);
+int xfrm6_state_init(void);
+void xfrm6_state_fini(void);
#else
static inline int xfrm6_init(void)
{
@@ -1385,52 +1393,52 @@ static inline void xfrm6_fini(void)
#endif
#ifdef CONFIG_XFRM_STATISTICS
-extern int xfrm_proc_init(struct net *net);
-extern void xfrm_proc_fini(struct net *net);
+int xfrm_proc_init(struct net *net);
+void xfrm_proc_fini(struct net *net);
#endif
-extern int xfrm_sysctl_init(struct net *net);
+int xfrm_sysctl_init(struct net *net);
#ifdef CONFIG_SYSCTL
-extern void xfrm_sysctl_fini(struct net *net);
+void xfrm_sysctl_fini(struct net *net);
#else
static inline void xfrm_sysctl_fini(struct net *net)
{
}
#endif
-extern void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto);
-extern int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
- int (*func)(struct xfrm_state *, int, void*), void *);
-extern void xfrm_state_walk_done(struct xfrm_state_walk *walk);
-extern struct xfrm_state *xfrm_state_alloc(struct net *net);
-extern struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
- const xfrm_address_t *saddr,
- const struct flowi *fl,
- struct xfrm_tmpl *tmpl,
- struct xfrm_policy *pol, int *err,
- unsigned short family);
-extern struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
- xfrm_address_t *daddr,
- xfrm_address_t *saddr,
- unsigned short family,
- u8 mode, u8 proto, u32 reqid);
-extern int xfrm_state_check_expire(struct xfrm_state *x);
-extern void xfrm_state_insert(struct xfrm_state *x);
-extern int xfrm_state_add(struct xfrm_state *x);
-extern int xfrm_state_update(struct xfrm_state *x);
-extern struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
- const xfrm_address_t *daddr, __be32 spi,
- u8 proto, unsigned short family);
-extern struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
- const xfrm_address_t *daddr,
- const xfrm_address_t *saddr,
- u8 proto,
- unsigned short family);
+void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto);
+int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
+ int (*func)(struct xfrm_state *, int, void*), void *);
+void xfrm_state_walk_done(struct xfrm_state_walk *walk);
+struct xfrm_state *xfrm_state_alloc(struct net *net);
+struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr,
+ const struct flowi *fl,
+ struct xfrm_tmpl *tmpl,
+ struct xfrm_policy *pol, int *err,
+ unsigned short family);
+struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
+ xfrm_address_t *daddr,
+ xfrm_address_t *saddr,
+ unsigned short family,
+ u8 mode, u8 proto, u32 reqid);
+int xfrm_state_check_expire(struct xfrm_state *x);
+void xfrm_state_insert(struct xfrm_state *x);
+int xfrm_state_add(struct xfrm_state *x);
+int xfrm_state_update(struct xfrm_state *x);
+struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
+ const xfrm_address_t *daddr, __be32 spi,
+ u8 proto, unsigned short family);
+struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
+ const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr,
+ u8 proto,
+ unsigned short family);
#ifdef CONFIG_XFRM_SUB_POLICY
-extern int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
- int n, unsigned short family);
-extern int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
- int n, unsigned short family);
+int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
+ unsigned short family);
+int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
+ unsigned short family);
#else
static inline int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
int n, unsigned short family)
@@ -1462,68 +1470,69 @@ struct xfrmk_spdinfo {
u32 spdhmcnt;
};
-extern struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark,
- u32 seq);
-extern int xfrm_state_delete(struct xfrm_state *x);
-extern int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info);
-extern void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
-extern void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
-extern u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
-extern int xfrm_init_replay(struct xfrm_state *x);
-extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
-extern int __xfrm_init_state(struct xfrm_state *x, bool init_replay);
-extern int xfrm_init_state(struct xfrm_state *x);
-extern int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi,
- int encap_type);
-extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
-extern int xfrm_output_resume(struct sk_buff *skb, int err);
-extern int xfrm_output(struct sk_buff *skb);
-extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
-extern void xfrm_local_error(struct sk_buff *skb, int mtu);
-extern int xfrm4_extract_header(struct sk_buff *skb);
-extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
- int encap_type);
-extern int xfrm4_transport_finish(struct sk_buff *skb, int async);
-extern int xfrm4_rcv(struct sk_buff *skb);
+struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
+int xfrm_state_delete(struct xfrm_state *x);
+int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info);
+void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
+void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
+u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
+int xfrm_init_replay(struct xfrm_state *x);
+int xfrm_state_mtu(struct xfrm_state *x, int mtu);
+int __xfrm_init_state(struct xfrm_state *x, bool init_replay);
+int xfrm_init_state(struct xfrm_state *x);
+int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
+int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
+int xfrm_output_resume(struct sk_buff *skb, int err);
+int xfrm_output(struct sk_buff *skb);
+int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
+void xfrm_local_error(struct sk_buff *skb, int mtu);
+int xfrm4_extract_header(struct sk_buff *skb);
+int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
+ int encap_type);
+int xfrm4_transport_finish(struct sk_buff *skb, int async);
+int xfrm4_rcv(struct sk_buff *skb);
static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
{
return xfrm4_rcv_encap(skb, nexthdr, spi, 0);
}
-extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm4_output(struct sk_buff *skb);
-extern int xfrm4_output_finish(struct sk_buff *skb);
-extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
-extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
-extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler);
-extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler);
-extern void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
-extern int xfrm6_extract_header(struct sk_buff *skb);
-extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
-extern int xfrm6_transport_finish(struct sk_buff *skb, int async);
-extern int xfrm6_rcv(struct sk_buff *skb);
-extern int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
- xfrm_address_t *saddr, u8 proto);
-extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
-extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
-extern __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
-extern __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
-extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm6_output(struct sk_buff *skb);
-extern int xfrm6_output_finish(struct sk_buff *skb);
-extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
- u8 **prevhdr);
-extern void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
+int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm4_output(struct sk_buff *skb);
+int xfrm4_output_finish(struct sk_buff *skb);
+int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
+int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
+void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
+int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler);
+int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler);
+int xfrm6_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler);
+int xfrm6_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler);
+int xfrm6_extract_header(struct sk_buff *skb);
+int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
+int xfrm6_transport_finish(struct sk_buff *skb, int async);
+int xfrm6_rcv(struct sk_buff *skb);
+int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
+ xfrm_address_t *saddr, u8 proto);
+void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
+int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
+int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
+__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
+__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
+int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm6_output(struct sk_buff *skb);
+int xfrm6_output_finish(struct sk_buff *skb);
+int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
+ u8 **prevhdr);
#ifdef CONFIG_XFRM
-extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
-extern int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen);
+int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
+int xfrm_user_policy(struct sock *sk, int optname,
+ u8 __user *optval, int optlen);
#else
static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
{
@@ -1540,59 +1549,62 @@ static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
-extern void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
-extern int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
- int (*func)(struct xfrm_policy *, int, int, void*), void *);
-extern void xfrm_policy_walk_done(struct xfrm_policy_walk *walk);
+void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
+int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
+ int (*func)(struct xfrm_policy *, int, int, void*),
+ void *);
+void xfrm_policy_walk_done(struct xfrm_policy_walk *walk);
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
u8 type, int dir,
struct xfrm_selector *sel,
struct xfrm_sec_ctx *ctx, int delete,
int *err);
-struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir, u32 id, int delete, int *err);
+struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir,
+ u32 id, int delete, int *err);
int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info);
u32 xfrm_get_acqseq(void);
-extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
+int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
u8 mode, u32 reqid, u8 proto,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr, int create,
unsigned short family);
-extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
+int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
#ifdef CONFIG_XFRM_MIGRATE
-extern int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
- const struct xfrm_migrate *m, int num_bundles,
- const struct xfrm_kmaddress *k);
-extern struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m);
-extern struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
- struct xfrm_migrate *m);
-extern int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
- struct xfrm_migrate *m, int num_bundles,
- struct xfrm_kmaddress *k);
+int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+ const struct xfrm_migrate *m, int num_bundles,
+ const struct xfrm_kmaddress *k);
+struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m);
+struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
+ struct xfrm_migrate *m);
+int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+ struct xfrm_migrate *m, int num_bundles,
+ struct xfrm_kmaddress *k);
#endif
-extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
-extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
-extern int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
-
-extern void xfrm_input_init(void);
-extern int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
-
-extern void xfrm_probe_algs(void);
-extern int xfrm_count_pfkey_auth_supported(void);
-extern int xfrm_count_pfkey_enc_supported(void);
-extern struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
-extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
-extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
-extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
-extern struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
-extern struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
- int probe);
+int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
+void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
+int km_report(struct net *net, u8 proto, struct xfrm_selector *sel,
+ xfrm_address_t *addr);
+
+void xfrm_input_init(void);
+int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
+
+void xfrm_probe_algs(void);
+int xfrm_count_pfkey_auth_supported(void);
+int xfrm_count_pfkey_enc_supported(void);
+struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
+struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
+struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
+struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
+struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
+struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
+struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
+struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
+struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
+ int probe);
static inline bool xfrm6_addr_equal(const xfrm_address_t *a,
const xfrm_address_t *b)
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index d65fbec2533d..bd7f00ecd844 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -405,11 +405,11 @@ extern int scsi_is_target_device(const struct device *);
extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, int timeout, int retries,
- int flag, int *resid);
+ u64 flags, int *resid);
extern int scsi_execute_req_flags(struct scsi_device *sdev,
const unsigned char *cmd, int data_direction, void *buffer,
unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
- int retries, int *resid, int flags);
+ int retries, int *resid, u64 flags);
static inline int scsi_execute_req(struct scsi_device *sdev,
const unsigned char *cmd, int data_direction, void *buffer,
unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
diff --git a/include/scsi/scsi_transport_srp.h b/include/scsi/scsi_transport_srp.h
index ff0f04ac91aa..4ebf6913b7b2 100644
--- a/include/scsi/scsi_transport_srp.h
+++ b/include/scsi/scsi_transport_srp.h
@@ -13,6 +13,27 @@ struct srp_rport_identifiers {
u8 roles;
};
+/**
+ * enum srp_rport_state - SRP transport layer state
+ * @SRP_RPORT_RUNNING: Transport layer operational.
+ * @SRP_RPORT_BLOCKED: Transport layer not operational; fast I/O fail timer
+ * is running and I/O has been blocked.
+ * @SRP_RPORT_FAIL_FAST: Fast I/O fail timer has expired; fail I/O fast.
+ * @SRP_RPORT_LOST: Device loss timer has expired; port is being removed.
+ */
+enum srp_rport_state {
+ SRP_RPORT_RUNNING,
+ SRP_RPORT_BLOCKED,
+ SRP_RPORT_FAIL_FAST,
+ SRP_RPORT_LOST,
+};
+
+/**
+ * struct srp_rport
+ * @lld_data: LLD private data.
+ * @mutex: Protects against concurrent rport reconnect / fast_io_fail /
+ * dev_loss_tmo activity.
+ */
struct srp_rport {
/* for initiator and target drivers */
@@ -23,11 +44,43 @@ struct srp_rport {
/* for initiator drivers */
- void *lld_data; /* LLD private data */
+ void *lld_data;
+
+ struct mutex mutex;
+ enum srp_rport_state state;
+ bool deleted;
+ int reconnect_delay;
+ int failed_reconnects;
+ struct delayed_work reconnect_work;
+ int fast_io_fail_tmo;
+ int dev_loss_tmo;
+ struct delayed_work fast_io_fail_work;
+ struct delayed_work dev_loss_work;
};
+/**
+ * struct srp_function_template
+ * @has_rport_state: Whether or not to create the state, fast_io_fail_tmo and
+ * dev_loss_tmo sysfs attribute for an rport.
+ * @reset_timer_if_blocked: Whether or srp_timed_out() should reset the command
+ * timer if the device on which it has been queued is blocked.
+ * @reconnect_delay: If not NULL, points to the default reconnect_delay value.
+ * @fast_io_fail_tmo: If not NULL, points to the default fast_io_fail_tmo value.
+ * @dev_loss_tmo: If not NULL, points to the default dev_loss_tmo value.
+ * @reconnect: Callback function for reconnecting to the target. See also
+ * srp_reconnect_rport().
+ * @terminate_rport_io: Callback function for terminating all outstanding I/O
+ * requests for an rport.
+ */
struct srp_function_template {
/* for initiator drivers */
+ bool has_rport_state;
+ bool reset_timer_if_blocked;
+ int *reconnect_delay;
+ int *fast_io_fail_tmo;
+ int *dev_loss_tmo;
+ int (*reconnect)(struct srp_rport *rport);
+ void (*terminate_rport_io)(struct srp_rport *rport);
void (*rport_delete)(struct srp_rport *rport);
/* for target drivers */
int (* tsk_mgmt_response)(struct Scsi_Host *, u64, u64, int);
@@ -38,10 +91,36 @@ extern struct scsi_transport_template *
srp_attach_transport(struct srp_function_template *);
extern void srp_release_transport(struct scsi_transport_template *);
+extern void srp_rport_get(struct srp_rport *rport);
+extern void srp_rport_put(struct srp_rport *rport);
extern struct srp_rport *srp_rport_add(struct Scsi_Host *,
struct srp_rport_identifiers *);
extern void srp_rport_del(struct srp_rport *);
-
+extern int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo,
+ int dev_loss_tmo);
+extern int srp_reconnect_rport(struct srp_rport *rport);
+extern void srp_start_tl_fail_timers(struct srp_rport *rport);
extern void srp_remove_host(struct Scsi_Host *);
+/**
+ * srp_chkready() - evaluate the transport layer state before I/O
+ *
+ * Returns a SCSI result code that can be returned by the LLD queuecommand()
+ * implementation. The role of this function is similar to that of
+ * fc_remote_port_chkready().
+ */
+static inline int srp_chkready(struct srp_rport *rport)
+{
+ switch (rport->state) {
+ case SRP_RPORT_RUNNING:
+ case SRP_RPORT_BLOCKED:
+ default:
+ return 0;
+ case SRP_RPORT_FAIL_FAST:
+ return DID_TRANSPORT_FAILFAST << 16;
+ case SRP_RPORT_LOST:
+ return DID_NO_CONNECT << 16;
+ }
+}
+
#endif
diff --git a/include/sound/ak4114.h b/include/sound/ak4114.h
index 3ce69fd92523..52f02a60dba7 100644
--- a/include/sound/ak4114.h
+++ b/include/sound/ak4114.h
@@ -170,7 +170,7 @@ struct ak4114 {
void * private_data;
unsigned int init: 1;
spinlock_t lock;
- unsigned char regmap[7];
+ unsigned char regmap[6];
unsigned char txcsb[5];
struct snd_kcontrol *kctls[AK4114_CONTROLS];
struct snd_pcm_substream *playback_substream;
@@ -189,7 +189,7 @@ struct ak4114 {
int snd_ak4114_create(struct snd_card *card,
ak4114_read_t *read, ak4114_write_t *write,
- const unsigned char pgm[7], const unsigned char txcsb[5],
+ const unsigned char pgm[6], const unsigned char txcsb[5],
void *private_data, struct ak4114 **r_ak4114);
void snd_ak4114_reg_write(struct ak4114 *ak4114, unsigned char reg, unsigned char mask, unsigned char val);
void snd_ak4114_reinit(struct ak4114 *ak4114);
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index 9031a26249b5..175ab3237b58 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -48,6 +48,8 @@ struct snd_compr_ops;
* the ring buffer
* @total_bytes_transferred: cumulative bytes transferred by offload DSP
* @sleep: poll sleep
+ * @wait: drain wait queue
+ * @drain_wake: condition for drain wake
*/
struct snd_compr_runtime {
snd_pcm_state_t state;
@@ -59,6 +61,8 @@ struct snd_compr_runtime {
u64 total_bytes_available;
u64 total_bytes_transferred;
wait_queue_head_t sleep;
+ wait_queue_head_t wait;
+ unsigned int drain_wake;
void *private_data;
};
@@ -171,4 +175,12 @@ static inline void snd_compr_fragment_elapsed(struct snd_compr_stream *stream)
wake_up(&stream->runtime->sleep);
}
+static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
+{
+ snd_BUG_ON(!stream);
+
+ stream->runtime->drain_wake = 1;
+ wake_up(&stream->runtime->wait);
+}
+
#endif
diff --git a/include/sound/cs42l52.h b/include/sound/cs42l52.h
index 4c68955f7330..7c2be4a51894 100644
--- a/include/sound/cs42l52.h
+++ b/include/sound/cs42l52.h
@@ -31,6 +31,8 @@ struct cs42l52_platform_data {
/* Charge Pump Freq. Check datasheet Pg73 */
unsigned int chgfreq;
+ /* Reset GPIO */
+ unsigned int reset_gpio;
};
#endif /* __CS42L52_H */
diff --git a/include/sound/cs42l73.h b/include/sound/cs42l73.h
new file mode 100644
index 000000000000..f354be4cdc9e
--- /dev/null
+++ b/include/sound/cs42l73.h
@@ -0,0 +1,22 @@
+/*
+ * linux/sound/cs42l73.h -- Platform data for CS42L73
+ *
+ * Copyright (c) 2012 Cirrus Logic Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CS42L73_H
+#define __CS42L73_H
+
+struct cs42l73_platform_data {
+ /* RST GPIO */
+ unsigned int reset_gpio;
+ unsigned int chgfreq;
+ int jack_detection;
+ unsigned int mclk_freq;
+};
+
+#endif /* __CS42L73_H */
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index f11c35cd5532..15017311f2e9 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -61,6 +61,8 @@ struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream)
* @slave_id: Slave requester id for the DMA channel.
* @filter_data: Custom DMA channel filter data, this will usually be used when
* requesting the DMA channel.
+ * @chan_name: Custom channel name to use when requesting DMA channel.
+ * @fifo_size: FIFO size of the DAI controller in bytes
*/
struct snd_dmaengine_dai_dma_data {
dma_addr_t addr;
@@ -68,6 +70,8 @@ struct snd_dmaengine_dai_dma_data {
u32 maxburst;
unsigned int slave_id;
void *filter_data;
+ const char *chan_name;
+ unsigned int fifo_size;
};
void snd_dmaengine_pcm_set_config_from_dai_data(
@@ -96,6 +100,10 @@ void snd_dmaengine_pcm_set_config_from_dai_data(
* playback.
*/
#define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3)
+/*
+ * The PCM streams have custom channel names specified.
+ */
+#define SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME BIT(4)
/**
* struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index cf15b8213df7..af9983970417 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -52,6 +52,11 @@ struct snd_dma_device {
#else
#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
#endif
+#ifdef CONFIG_GENERIC_ALLOCATOR
+#define SNDRV_DMA_TYPE_DEV_IRAM 4 /* generic device iram-buffer */
+#else
+#define SNDRV_DMA_TYPE_DEV_IRAM SNDRV_DMA_TYPE_DEV
+#endif
/*
* info for buffer allocation
diff --git a/include/sound/rcar_snd.h b/include/sound/rcar_snd.h
index fe66533e9b7a..12afab18945d 100644
--- a/include/sound/rcar_snd.h
+++ b/include/sound/rcar_snd.h
@@ -36,7 +36,6 @@
#define RSND_SSI_CLK_PIN_SHARE (1 << 31)
#define RSND_SSI_CLK_FROM_ADG (1 << 30) /* clock parent is master */
#define RSND_SSI_SYNC (1 << 29) /* SSI34_sync etc */
-#define RSND_SSI_DEPENDENT (1 << 28) /* SSI needs SRU/SCU */
#define RSND_SSI_PLAY (1 << 24)
@@ -68,6 +67,7 @@ struct rsnd_scu_platform_info {
*
* A : generation
*/
+#define RSND_GEN_MASK (0xF << 0)
#define RSND_GEN1 (1 << 0) /* fixme */
#define RSND_GEN2 (2 << 0) /* fixme */
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index ae9a227d35d3..800c101bb096 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -105,6 +105,8 @@ int snd_soc_dai_set_clkdiv(struct snd_soc_dai *dai,
int snd_soc_dai_set_pll(struct snd_soc_dai *dai,
int pll_id, int source, unsigned int freq_in, unsigned int freq_out);
+int snd_soc_dai_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio);
+
/* Digital Audio interface formatting */
int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt);
@@ -131,6 +133,7 @@ struct snd_soc_dai_ops {
int (*set_pll)(struct snd_soc_dai *dai, int pll_id, int source,
unsigned int freq_in, unsigned int freq_out);
int (*set_clkdiv)(struct snd_soc_dai *dai, int div_id, int div);
+ int (*set_bclk_ratio)(struct snd_soc_dai *dai, unsigned int ratio);
/*
* DAI format configuration
@@ -166,6 +169,13 @@ struct snd_soc_dai_ops {
struct snd_soc_dai *);
int (*prepare)(struct snd_pcm_substream *,
struct snd_soc_dai *);
+ /*
+ * NOTE: Commands passed to the trigger function are not necessarily
+ * compatible with the current state of the dai. For example this
+ * sequence of commands is possible: START STOP STOP.
+ * So do not unconditionally use refcounting functions in the trigger
+ * function, e.g. clk_enable/disable.
+ */
int (*trigger)(struct snd_pcm_substream *, int,
struct snd_soc_dai *);
int (*bespoke_trigger)(struct snd_pcm_substream *, int,
@@ -276,6 +286,13 @@ static inline void snd_soc_dai_set_dma_data(struct snd_soc_dai *dai,
dai->capture_dma_data = data;
}
+static inline void snd_soc_dai_init_dma_data(struct snd_soc_dai *dai,
+ void *playback, void *capture)
+{
+ dai->playback_dma_data = playback;
+ dai->capture_dma_data = capture;
+}
+
static inline void snd_soc_dai_set_drvdata(struct snd_soc_dai *dai,
void *data)
{
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 27a72d5d4b00..2037c45adfe6 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -286,6 +286,8 @@ struct device;
.info = snd_soc_info_volsw, \
.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
.private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) }
+#define SOC_DAPM_SINGLE_VIRT(xname, max) \
+ SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0)
#define SOC_DAPM_SINGLE_TLV(xname, reg, shift, max, invert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_volsw, \
@@ -300,6 +302,8 @@ struct device;
.tlv.p = (tlv_array), \
.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
.private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
+#define SOC_DAPM_SINGLE_TLV_VIRT(xname, max, tlv_array) \
+ SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0, tlv_array)
#define SOC_DAPM_ENUM(xname, xenum) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_enum_double, \
diff --git a/include/sound/soc.h b/include/sound/soc.h
index d22cb0a06feb..1f741cb24f33 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -13,6 +13,7 @@
#ifndef __LINUX_SND_SOC_H
#define __LINUX_SND_SOC_H
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/notifier.h>
@@ -330,7 +331,6 @@ struct soc_enum;
struct snd_soc_jack;
struct snd_soc_jack_zone;
struct snd_soc_jack_pin;
-struct snd_soc_cache_ops;
#include <sound/soc-dapm.h>
#include <sound/soc-dpcm.h>
@@ -348,10 +348,6 @@ enum snd_soc_control_type {
SND_SOC_REGMAP,
};
-enum snd_soc_compress_type {
- SND_SOC_FLAT_COMPRESSION = 1,
-};
-
enum snd_soc_pcm_subclass {
SND_SOC_PCM_CLASS_PCM = 0,
SND_SOC_PCM_CLASS_BE = 1,
@@ -369,6 +365,7 @@ int snd_soc_codec_set_pll(struct snd_soc_codec *codec, int pll_id, int source,
int snd_soc_register_card(struct snd_soc_card *card);
int snd_soc_unregister_card(struct snd_soc_card *card);
+int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card);
int snd_soc_suspend(struct device *dev);
int snd_soc_resume(struct device *dev);
int snd_soc_poweroff(struct device *dev);
@@ -386,6 +383,9 @@ void snd_soc_unregister_codec(struct device *dev);
int snd_soc_register_component(struct device *dev,
const struct snd_soc_component_driver *cmpnt_drv,
struct snd_soc_dai_driver *dai_drv, int num_dai);
+int devm_snd_soc_register_component(struct device *dev,
+ const struct snd_soc_component_driver *cmpnt_drv,
+ struct snd_soc_dai_driver *dai_drv, int num_dai);
void snd_soc_unregister_component(struct device *dev);
int snd_soc_codec_volatile_register(struct snd_soc_codec *codec,
unsigned int reg);
@@ -403,12 +403,6 @@ int snd_soc_cache_write(struct snd_soc_codec *codec,
unsigned int reg, unsigned int value);
int snd_soc_cache_read(struct snd_soc_codec *codec,
unsigned int reg, unsigned int *value);
-int snd_soc_default_volatile_register(struct snd_soc_codec *codec,
- unsigned int reg);
-int snd_soc_default_readable_register(struct snd_soc_codec *codec,
- unsigned int reg);
-int snd_soc_default_writable_register(struct snd_soc_codec *codec,
- unsigned int reg);
int snd_soc_platform_read(struct snd_soc_platform *platform,
unsigned int reg);
int snd_soc_platform_write(struct snd_soc_platform *platform,
@@ -542,22 +536,6 @@ int snd_soc_put_strobe(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
/**
- * struct snd_soc_reg_access - Describes whether a given register is
- * readable, writable or volatile.
- *
- * @reg: the register number
- * @read: whether this register is readable
- * @write: whether this register is writable
- * @vol: whether this register is volatile
- */
-struct snd_soc_reg_access {
- u16 reg;
- u16 read;
- u16 write;
- u16 vol;
-};
-
-/**
* struct snd_soc_jack_pin - Describes a pin to update based on jack detection
*
* @pin: name of the pin to update
@@ -657,17 +635,26 @@ struct snd_soc_compr_ops {
int (*trigger)(struct snd_compr_stream *);
};
-/* SoC cache ops */
-struct snd_soc_cache_ops {
+/* component interface */
+struct snd_soc_component_driver {
+ const char *name;
+
+ /* DT */
+ int (*of_xlate_dai_name)(struct snd_soc_component *component,
+ struct of_phandle_args *args,
+ const char **dai_name);
+};
+
+struct snd_soc_component {
const char *name;
- enum snd_soc_compress_type id;
- int (*init)(struct snd_soc_codec *codec);
- int (*exit)(struct snd_soc_codec *codec);
- int (*read)(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int *value);
- int (*write)(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value);
- int (*sync)(struct snd_soc_codec *codec);
+ int id;
+ struct device *dev;
+ struct list_head list;
+
+ struct snd_soc_dai_driver *dai_drv;
+ int num_dai;
+
+ const struct snd_soc_component_driver *driver;
};
/* SoC Audio Codec device */
@@ -683,8 +670,6 @@ struct snd_soc_codec {
struct list_head list;
struct list_head card_list;
int num_dai;
- enum snd_soc_compress_type compress_type;
- size_t reg_size; /* reg_cache_size * reg_word_size */
int (*volatile_register)(struct snd_soc_codec *, unsigned int);
int (*readable_register)(struct snd_soc_codec *, unsigned int);
int (*writable_register)(struct snd_soc_codec *, unsigned int);
@@ -708,13 +693,13 @@ struct snd_soc_codec {
unsigned int (*hw_read)(struct snd_soc_codec *, unsigned int);
unsigned int (*read)(struct snd_soc_codec *, unsigned int);
int (*write)(struct snd_soc_codec *, unsigned int, unsigned int);
- int (*bulk_write_raw)(struct snd_soc_codec *, unsigned int, const void *, size_t);
void *reg_cache;
- const void *reg_def_copy;
- const struct snd_soc_cache_ops *cache_ops;
struct mutex cache_rw_mutex;
int val_bytes;
+ /* component */
+ struct snd_soc_component component;
+
/* dapm */
struct snd_soc_dapm_context dapm;
unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */
@@ -733,6 +718,7 @@ struct snd_soc_codec_driver {
int (*remove)(struct snd_soc_codec *);
int (*suspend)(struct snd_soc_codec *);
int (*resume)(struct snd_soc_codec *);
+ struct snd_soc_component_driver component_driver;
/* Default control and setup, added after probe() is run */
const struct snd_kcontrol_new *controls;
@@ -760,9 +746,6 @@ struct snd_soc_codec_driver {
short reg_cache_step;
short reg_word_size;
const void *reg_cache_default;
- short reg_access_size;
- const struct snd_soc_reg_access *reg_access_default;
- enum snd_soc_compress_type compress_type;
/* codec bias level */
int (*set_bias_level)(struct snd_soc_codec *,
@@ -849,20 +832,6 @@ struct snd_soc_platform {
#endif
};
-struct snd_soc_component_driver {
- const char *name;
-};
-
-struct snd_soc_component {
- const char *name;
- int id;
- int num_dai;
- struct device *dev;
- struct list_head list;
-
- const struct snd_soc_component_driver *driver;
-};
-
struct snd_soc_dai_link {
/* config - must be set by machine driver */
const char *name; /* Codec name */
@@ -944,12 +913,6 @@ struct snd_soc_codec_conf {
* associated per device
*/
const char *name_prefix;
-
- /*
- * set this to the desired compression type if you want to
- * override the one supplied in codec->driver->compress_type
- */
- enum snd_soc_compress_type compress_type;
};
struct snd_soc_aux_dev {
@@ -1088,7 +1051,8 @@ struct snd_soc_pcm_runtime {
/* mixer control */
struct soc_mixer_control {
int min, max, platform_max;
- unsigned int reg, rreg, shift, rshift;
+ int reg, rreg;
+ unsigned int shift, rshift;
unsigned int invert:1;
unsigned int autodisable:1;
};
@@ -1121,8 +1085,6 @@ struct soc_enum {
unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg);
unsigned int snd_soc_write(struct snd_soc_codec *codec,
unsigned int reg, unsigned int val);
-unsigned int snd_soc_bulk_write_raw(struct snd_soc_codec *codec,
- unsigned int reg, const void *data, size_t len);
/* device driver data */
@@ -1201,6 +1163,8 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
const char *propname);
unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
const char *prefix);
+int snd_soc_of_get_dai_name(struct device_node *of_node,
+ const char **dai_name);
#include <sound/soc-dai.h>
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index 5fc2dcdd21cd..03996b2bb04f 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -14,6 +14,7 @@ struct snd_soc_codec;
struct snd_soc_platform;
struct snd_soc_card;
struct snd_soc_dapm_widget;
+struct snd_soc_dapm_path;
/*
* Log register events
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index 5ebda976ea93..095c6e4fe1e8 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -6,11 +6,9 @@
#include <linux/tracepoint.h>
-struct search;
-
DECLARE_EVENT_CLASS(bcache_request,
- TP_PROTO(struct search *s, struct bio *bio),
- TP_ARGS(s, bio),
+ TP_PROTO(struct bcache_device *d, struct bio *bio),
+ TP_ARGS(d, bio),
TP_STRUCT__entry(
__field(dev_t, dev )
@@ -24,12 +22,12 @@ DECLARE_EVENT_CLASS(bcache_request,
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
- __entry->orig_major = s->d->disk->major;
- __entry->orig_minor = s->d->disk->first_minor;
- __entry->sector = bio->bi_sector;
- __entry->orig_sector = bio->bi_sector - 16;
- __entry->nr_sector = bio->bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ __entry->orig_major = d->disk->major;
+ __entry->orig_minor = d->disk->first_minor;
+ __entry->sector = bio->bi_iter.bi_sector;
+ __entry->orig_sector = bio->bi_iter.bi_sector - 16;
+ __entry->nr_sector = bio->bi_iter.bi_size >> 9;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
),
TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
@@ -79,13 +77,13 @@ DECLARE_EVENT_CLASS(btree_node,
/* request.c */
DEFINE_EVENT(bcache_request, bcache_request_start,
- TP_PROTO(struct search *s, struct bio *bio),
- TP_ARGS(s, bio)
+ TP_PROTO(struct bcache_device *d, struct bio *bio),
+ TP_ARGS(d, bio)
);
DEFINE_EVENT(bcache_request, bcache_request_end,
- TP_PROTO(struct search *s, struct bio *bio),
- TP_ARGS(s, bio)
+ TP_PROTO(struct bcache_device *d, struct bio *bio),
+ TP_ARGS(d, bio)
);
DECLARE_EVENT_CLASS(bcache_bio,
@@ -101,9 +99,9 @@ DECLARE_EVENT_CLASS(bcache_bio,
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
- __entry->sector = bio->bi_sector;
- __entry->nr_sector = bio->bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ __entry->sector = bio->bi_iter.bi_sector;
+ __entry->nr_sector = bio->bi_iter.bi_size >> 9;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
),
TP_printk("%d,%d %s %llu + %u",
@@ -136,9 +134,9 @@ TRACE_EVENT(bcache_read,
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
- __entry->sector = bio->bi_sector;
- __entry->nr_sector = bio->bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ __entry->sector = bio->bi_iter.bi_sector;
+ __entry->nr_sector = bio->bi_iter.bi_size >> 9;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
__entry->cache_hit = hit;
__entry->bypass = bypass;
),
@@ -164,9 +162,9 @@ TRACE_EVENT(bcache_write,
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
- __entry->sector = bio->bi_sector;
- __entry->nr_sector = bio->bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ __entry->sector = bio->bi_iter.bi_sector;
+ __entry->nr_sector = bio->bi_iter.bi_size >> 9;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
__entry->writeback = writeback;
__entry->bypass = bypass;
),
@@ -370,6 +368,35 @@ DEFINE_EVENT(btree_node, bcache_btree_set_root,
TP_ARGS(b)
);
+TRACE_EVENT(bcache_keyscan,
+ TP_PROTO(unsigned nr_found,
+ unsigned start_inode, uint64_t start_offset,
+ unsigned end_inode, uint64_t end_offset),
+ TP_ARGS(nr_found,
+ start_inode, start_offset,
+ end_inode, end_offset),
+
+ TP_STRUCT__entry(
+ __field(__u32, nr_found )
+ __field(__u32, start_inode )
+ __field(__u64, start_offset )
+ __field(__u32, end_inode )
+ __field(__u64, end_offset )
+ ),
+
+ TP_fast_assign(
+ __entry->nr_found = nr_found;
+ __entry->start_inode = start_inode;
+ __entry->start_offset = start_offset;
+ __entry->end_inode = end_inode;
+ __entry->end_offset = end_offset;
+ ),
+
+ TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
+ __entry->start_inode, __entry->start_offset,
+ __entry->end_inode, __entry->end_offset)
+);
+
/* Allocator */
TRACE_EVENT(bcache_alloc_invalidate,
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 4c2301d2ef1a..e76ae19a8d6f 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -243,9 +243,9 @@ TRACE_EVENT(block_bio_bounce,
TP_fast_assign(
__entry->dev = bio->bi_bdev ?
bio->bi_bdev->bd_dev : 0;
- __entry->sector = bio->bi_sector;
+ __entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -280,10 +280,10 @@ TRACE_EVENT(block_bio_complete,
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
- __entry->sector = bio->bi_sector;
+ __entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
__entry->error = error;
- blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
),
TP_printk("%d,%d %s %llu + %u [%d]",
@@ -308,9 +308,9 @@ DECLARE_EVENT_CLASS(block_bio_merge,
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
- __entry->sector = bio->bi_sector;
+ __entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -375,9 +375,9 @@ TRACE_EVENT(block_bio_queue,
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
- __entry->sector = bio->bi_sector;
+ __entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -403,7 +403,7 @@ DECLARE_EVENT_CLASS(block_get_rq,
TP_fast_assign(
__entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
- __entry->sector = bio ? bio->bi_sector : 0;
+ __entry->sector = bio ? bio->bi_iter.bi_sector : 0;
__entry->nr_sector = bio ? bio_sectors(bio) : 0;
blk_fill_rwbs(__entry->rwbs,
bio ? bio->bi_rw : 0, __entry->nr_sector);
@@ -538,9 +538,9 @@ TRACE_EVENT(block_split,
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
- __entry->sector = bio->bi_sector;
+ __entry->sector = bio->bi_iter.bi_sector;
__entry->new_sector = new_sector;
- blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -579,11 +579,11 @@ TRACE_EVENT(block_bio_remap,
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
- __entry->sector = bio->bi_sector;
+ __entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
__entry->old_dev = dev;
__entry->old_sector = from;
- blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
),
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 52ae54828eda..bd3ee4fbe7a7 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -36,6 +36,11 @@
{ CURSEG_COLD_NODE, "Cold NODE" }, \
{ NO_CHECK_TYPE, "No TYPE" })
+#define show_file_type(type) \
+ __print_symbolic(type, \
+ { 0, "FILE" }, \
+ { 1, "DIR" })
+
#define show_gc_type(type) \
__print_symbolic(type, \
{ FG_GC, "Foreground GC" }, \
@@ -611,8 +616,8 @@ TRACE_EVENT(f2fs_do_submit_bio,
__entry->dev = sb->s_dev;
__entry->btype = btype;
__entry->sync = sync;
- __entry->sector = bio->bi_sector;
- __entry->size = bio->bi_size;
+ __entry->sector = bio->bi_iter.bi_sector;
+ __entry->size = bio->bi_iter.bi_size;
),
TP_printk("dev = (%d,%d), type = %s, io = %s, sector = %lld, size = %u",
@@ -623,6 +628,52 @@ TRACE_EVENT(f2fs_do_submit_bio,
__entry->size)
);
+DECLARE_EVENT_CLASS(f2fs__page,
+
+ TP_PROTO(struct page *page, int type),
+
+ TP_ARGS(page, type),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(int, type)
+ __field(int, dir)
+ __field(pgoff_t, index)
+ __field(int, dirty)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = page->mapping->host->i_sb->s_dev;
+ __entry->ino = page->mapping->host->i_ino;
+ __entry->type = type;
+ __entry->dir = S_ISDIR(page->mapping->host->i_mode);
+ __entry->index = page->index;
+ __entry->dirty = PageDirty(page);
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, %s, %s, index = %lu, dirty = %d",
+ show_dev_ino(__entry),
+ show_block_type(__entry->type),
+ show_file_type(__entry->dir),
+ (unsigned long)__entry->index,
+ __entry->dirty)
+);
+
+DEFINE_EVENT(f2fs__page, f2fs_set_page_dirty,
+
+ TP_PROTO(struct page *page, int type),
+
+ TP_ARGS(page, type)
+);
+
+DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite,
+
+ TP_PROTO(struct page *page, int type),
+
+ TP_ARGS(page, type)
+);
+
TRACE_EVENT(f2fs_submit_write_page,
TP_PROTO(struct page *page, block_t blk_addr, int type),
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h
new file mode 100644
index 000000000000..a8f5c32d174b
--- /dev/null
+++ b/include/trace/events/iommu.h
@@ -0,0 +1,162 @@
+/*
+ * iommu trace points
+ *
+ * Copyright (C) 2013 Shuah Khan <shuah.kh@samsung.com>
+ *
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iommu
+
+#if !defined(_TRACE_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_IOMMU_H
+
+#include <linux/tracepoint.h>
+#include <linux/pci.h>
+
+struct device;
+
+DECLARE_EVENT_CLASS(iommu_group_event,
+
+ TP_PROTO(int group_id, struct device *dev),
+
+ TP_ARGS(group_id, dev),
+
+ TP_STRUCT__entry(
+ __field(int, gid)
+ __string(device, dev_name(dev))
+ ),
+
+ TP_fast_assign(
+ __entry->gid = group_id;
+ __assign_str(device, dev_name(dev));
+ ),
+
+ TP_printk("IOMMU: groupID=%d device=%s",
+ __entry->gid, __get_str(device)
+ )
+);
+
+DEFINE_EVENT(iommu_group_event, add_device_to_group,
+
+ TP_PROTO(int group_id, struct device *dev),
+
+ TP_ARGS(group_id, dev)
+
+);
+
+DEFINE_EVENT(iommu_group_event, remove_device_from_group,
+
+ TP_PROTO(int group_id, struct device *dev),
+
+ TP_ARGS(group_id, dev)
+);
+
+DECLARE_EVENT_CLASS(iommu_device_event,
+
+ TP_PROTO(struct device *dev),
+
+ TP_ARGS(dev),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(dev));
+ ),
+
+ TP_printk("IOMMU: device=%s", __get_str(device)
+ )
+);
+
+DEFINE_EVENT(iommu_device_event, attach_device_to_domain,
+
+ TP_PROTO(struct device *dev),
+
+ TP_ARGS(dev)
+);
+
+DEFINE_EVENT(iommu_device_event, detach_device_from_domain,
+
+ TP_PROTO(struct device *dev),
+
+ TP_ARGS(dev)
+);
+
+DECLARE_EVENT_CLASS(iommu_map_unmap,
+
+ TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
+
+ TP_ARGS(iova, paddr, size),
+
+ TP_STRUCT__entry(
+ __field(u64, iova)
+ __field(u64, paddr)
+ __field(int, size)
+ ),
+
+ TP_fast_assign(
+ __entry->iova = iova;
+ __entry->paddr = paddr;
+ __entry->size = size;
+ ),
+
+ TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=0x%x",
+ __entry->iova, __entry->paddr, __entry->size
+ )
+);
+
+DEFINE_EVENT(iommu_map_unmap, map,
+
+ TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
+
+ TP_ARGS(iova, paddr, size)
+);
+
+DEFINE_EVENT_PRINT(iommu_map_unmap, unmap,
+
+ TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
+
+ TP_ARGS(iova, paddr, size),
+
+ TP_printk("IOMMU: iova=0x%016llx size=0x%x",
+ __entry->iova, __entry->size
+ )
+);
+
+DECLARE_EVENT_CLASS(iommu_error,
+
+ TP_PROTO(struct device *dev, unsigned long iova, int flags),
+
+ TP_ARGS(dev, iova, flags),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __string(driver, dev_driver_string(dev))
+ __field(u64, iova)
+ __field(int, flags)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(dev));
+ __assign_str(driver, dev_driver_string(dev));
+ __entry->iova = iova;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("IOMMU:%s %s iova=0x%016llx flags=0x%04x",
+ __get_str(driver), __get_str(device),
+ __entry->iova, __entry->flags
+ )
+);
+
+DEFINE_EVENT(iommu_error, io_page_fault,
+
+ TP_PROTO(struct device *dev, unsigned long iova, int flags),
+
+ TP_ARGS(dev, iova, flags)
+);
+#endif /* _TRACE_IOMMU_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 7005d1109ec9..131a0bda7aec 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -296,23 +296,21 @@ DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
TRACE_EVENT(
kvm_async_pf_completed,
- TP_PROTO(unsigned long address, struct page *page, u64 gva),
- TP_ARGS(address, page, gva),
+ TP_PROTO(unsigned long address, u64 gva),
+ TP_ARGS(address, gva),
TP_STRUCT__entry(
__field(unsigned long, address)
- __field(pfn_t, pfn)
__field(u64, gva)
),
TP_fast_assign(
__entry->address = address;
- __entry->pfn = page ? page_to_pfn(page) : 0;
__entry->gva = gva;
),
- TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva,
- __entry->address, __entry->pfn)
+ TP_printk("gva %#llx address %#lx", __entry->gva,
+ __entry->address)
);
#endif
diff --git a/include/trace/events/power_cpu_migrate.h b/include/trace/events/power_cpu_migrate.h
new file mode 100644
index 000000000000..f76dd4de625e
--- /dev/null
+++ b/include/trace/events/power_cpu_migrate.h
@@ -0,0 +1,67 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM power
+
+#if !defined(_TRACE_POWER_CPU_MIGRATE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_POWER_CPU_MIGRATE_H
+
+#include <linux/tracepoint.h>
+
+#define __cpu_migrate_proto \
+ TP_PROTO(u64 timestamp, \
+ u32 cpu_hwid)
+#define __cpu_migrate_args \
+ TP_ARGS(timestamp, \
+ cpu_hwid)
+
+DECLARE_EVENT_CLASS(cpu_migrate,
+
+ __cpu_migrate_proto,
+ __cpu_migrate_args,
+
+ TP_STRUCT__entry(
+ __field(u64, timestamp )
+ __field(u32, cpu_hwid )
+ ),
+
+ TP_fast_assign(
+ __entry->timestamp = timestamp;
+ __entry->cpu_hwid = cpu_hwid;
+ ),
+
+ TP_printk("timestamp=%llu cpu_hwid=0x%08lX",
+ (unsigned long long)__entry->timestamp,
+ (unsigned long)__entry->cpu_hwid
+ )
+);
+
+#define __define_cpu_migrate_event(name) \
+ DEFINE_EVENT(cpu_migrate, cpu_migrate_##name, \
+ __cpu_migrate_proto, \
+ __cpu_migrate_args \
+ )
+
+__define_cpu_migrate_event(begin);
+__define_cpu_migrate_event(finish);
+__define_cpu_migrate_event(current);
+
+#undef __define_cpu_migrate
+#undef __cpu_migrate_proto
+#undef __cpu_migrate_args
+
+/* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */
+#ifndef _PWR_CPU_MIGRATE_EVENT_AVOID_DOUBLE_DEFINING
+#define _PWR_CPU_MIGRATE_EVENT_AVOID_DOUBLE_DEFINING
+
+/*
+ * Set from_phys_cpu and to_phys_cpu to CPU_MIGRATE_ALL_CPUS to indicate
+ * a whole-cluster migration:
+ */
+#define CPU_MIGRATE_ALL_CPUS 0x80000000U
+#endif
+
+#endif /* _TRACE_POWER_CPU_MIGRATE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE power_cpu_migrate
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index ee2376cfaab3..aca382266411 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -39,15 +39,26 @@ TRACE_EVENT(rcu_utilization,
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
/*
- * Tracepoint for grace-period events: starting and ending a grace
- * period ("start" and "end", respectively), a CPU noting the start
- * of a new grace period or the end of an old grace period ("cpustart"
- * and "cpuend", respectively), a CPU passing through a quiescent
- * state ("cpuqs"), a CPU coming online or going offline ("cpuonl"
- * and "cpuofl", respectively), a CPU being kicked for being too
- * long in dyntick-idle mode ("kick"), a CPU accelerating its new
- * callbacks to RCU_NEXT_READY_TAIL ("AccReadyCB"), and a CPU
- * accelerating its new callbacks to RCU_WAIT_TAIL ("AccWaitCB").
+ * Tracepoint for grace-period events. Takes a string identifying the
+ * RCU flavor, the grace-period number, and a string identifying the
+ * grace-period-related event as follows:
+ *
+ * "AccReadyCB": CPU acclerates new callbacks to RCU_NEXT_READY_TAIL.
+ * "AccWaitCB": CPU accelerates new callbacks to RCU_WAIT_TAIL.
+ * "newreq": Request a new grace period.
+ * "start": Start a grace period.
+ * "cpustart": CPU first notices a grace-period start.
+ * "cpuqs": CPU passes through a quiescent state.
+ * "cpuonl": CPU comes online.
+ * "cpuofl": CPU goes offline.
+ * "reqwait": GP kthread sleeps waiting for grace-period request.
+ * "reqwaitsig": GP kthread awakened by signal from reqwait state.
+ * "fqswait": GP kthread waiting until time to force quiescent states.
+ * "fqsstart": GP kthread starts forcing quiescent states.
+ * "fqsend": GP kthread done forcing quiescent states.
+ * "fqswaitsig": GP kthread awakened by signal from fqswait state.
+ * "end": End a grace period.
+ * "cpuend": CPU first notices a grace-period end.
*/
TRACE_EVENT(rcu_grace_period,
@@ -161,6 +172,46 @@ TRACE_EVENT(rcu_grace_period_init,
);
/*
+ * Tracepoint for RCU no-CBs CPU callback handoffs. This event is intended
+ * to assist debugging of these handoffs.
+ *
+ * The first argument is the name of the RCU flavor, and the second is
+ * the number of the offloaded CPU are extracted. The third and final
+ * argument is a string as follows:
+ *
+ * "WakeEmpty": Wake rcuo kthread, first CB to empty list.
+ * "WakeOvf": Wake rcuo kthread, CB list is huge.
+ * "WakeNot": Don't wake rcuo kthread.
+ * "WakeNotPoll": Don't wake rcuo kthread because it is polling.
+ * "Poll": Start of new polling cycle for rcu_nocb_poll.
+ * "Sleep": Sleep waiting for CBs for !rcu_nocb_poll.
+ * "WokeEmpty": rcuo kthread woke to find empty list.
+ * "WokeNonEmpty": rcuo kthread woke to find non-empty list.
+ * "WaitQueue": Enqueue partially done, timed wait for it to complete.
+ * "WokeQueue": Partial enqueue now complete.
+ */
+TRACE_EVENT(rcu_nocb_wake,
+
+ TP_PROTO(const char *rcuname, int cpu, const char *reason),
+
+ TP_ARGS(rcuname, cpu, reason),
+
+ TP_STRUCT__entry(
+ __field(const char *, rcuname)
+ __field(int, cpu)
+ __field(const char *, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->cpu = cpu;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("%s %d %s", __entry->rcuname, __entry->cpu, __entry->reason)
+);
+
+/*
* Tracepoint for tasks blocking within preemptible-RCU read-side
* critical sections. Track the type of RCU (which one day might
* include SRCU), the grace-period number that the task is blocking
@@ -540,17 +591,17 @@ TRACE_EVENT(rcu_invoke_kfree_callback,
TRACE_EVENT(rcu_batch_end,
TP_PROTO(const char *rcuname, int callbacks_invoked,
- bool cb, bool nr, bool iit, bool risk),
+ char cb, char nr, char iit, char risk),
TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
TP_STRUCT__entry(
__field(const char *, rcuname)
__field(int, callbacks_invoked)
- __field(bool, cb)
- __field(bool, nr)
- __field(bool, iit)
- __field(bool, risk)
+ __field(char, cb)
+ __field(char, nr)
+ __field(char, iit)
+ __field(char, risk)
),
TP_fast_assign(
@@ -656,6 +707,7 @@ TRACE_EVENT(rcu_barrier,
#define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \
level, grplo, grphi, event) \
do { } while (0)
+#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 2e7d9947a10d..04c308413a5d 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -100,7 +100,7 @@ static inline long __trace_sched_switch_state(struct task_struct *p)
/*
* For all intents and purposes a preempted task is a running task.
*/
- if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
+ if (task_preempt_count(p) & PREEMPT_ACTIVE)
state = TASK_RUNNING | TASK_STATE_MAX;
#endif
@@ -424,6 +424,25 @@ TRACE_EVENT(sched_pi_setprio,
__entry->oldprio, __entry->newprio)
);
+#ifdef CONFIG_DETECT_HUNG_TASK
+TRACE_EVENT(sched_process_hang,
+ TP_PROTO(struct task_struct *tsk),
+ TP_ARGS(tsk),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ ),
+
+ TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
+);
+#endif /* CONFIG_DETECT_HUNG_TASK */
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/include/trace/events/spi.h b/include/trace/events/spi.h
new file mode 100644
index 000000000000..7e02c983bbe2
--- /dev/null
+++ b/include/trace/events/spi.h
@@ -0,0 +1,156 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM spi
+
+#if !defined(_TRACE_SPI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SPI_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(spi_master,
+
+ TP_PROTO(struct spi_master *master),
+
+ TP_ARGS(master),
+
+ TP_STRUCT__entry(
+ __field( int, bus_num )
+ ),
+
+ TP_fast_assign(
+ __entry->bus_num = master->bus_num;
+ ),
+
+ TP_printk("spi%d", (int)__entry->bus_num)
+
+);
+
+DEFINE_EVENT(spi_master, spi_master_idle,
+
+ TP_PROTO(struct spi_master *master),
+
+ TP_ARGS(master)
+
+);
+
+DEFINE_EVENT(spi_master, spi_master_busy,
+
+ TP_PROTO(struct spi_master *master),
+
+ TP_ARGS(master)
+
+);
+
+DECLARE_EVENT_CLASS(spi_message,
+
+ TP_PROTO(struct spi_message *msg),
+
+ TP_ARGS(msg),
+
+ TP_STRUCT__entry(
+ __field( int, bus_num )
+ __field( int, chip_select )
+ __field( struct spi_message *, msg )
+ ),
+
+ TP_fast_assign(
+ __entry->bus_num = msg->spi->master->bus_num;
+ __entry->chip_select = msg->spi->chip_select;
+ __entry->msg = msg;
+ ),
+
+ TP_printk("spi%d.%d %p", (int)__entry->bus_num,
+ (int)__entry->chip_select,
+ (struct spi_message *)__entry->msg)
+);
+
+DEFINE_EVENT(spi_message, spi_message_submit,
+
+ TP_PROTO(struct spi_message *msg),
+
+ TP_ARGS(msg)
+
+);
+
+DEFINE_EVENT(spi_message, spi_message_start,
+
+ TP_PROTO(struct spi_message *msg),
+
+ TP_ARGS(msg)
+
+);
+
+TRACE_EVENT(spi_message_done,
+
+ TP_PROTO(struct spi_message *msg),
+
+ TP_ARGS(msg),
+
+ TP_STRUCT__entry(
+ __field( int, bus_num )
+ __field( int, chip_select )
+ __field( struct spi_message *, msg )
+ __field( unsigned, frame )
+ __field( unsigned, actual )
+ ),
+
+ TP_fast_assign(
+ __entry->bus_num = msg->spi->master->bus_num;
+ __entry->chip_select = msg->spi->chip_select;
+ __entry->msg = msg;
+ __entry->frame = msg->frame_length;
+ __entry->actual = msg->actual_length;
+ ),
+
+ TP_printk("spi%d.%d %p len=%u/%u", (int)__entry->bus_num,
+ (int)__entry->chip_select,
+ (struct spi_message *)__entry->msg,
+ (unsigned)__entry->actual, (unsigned)__entry->frame)
+);
+
+DECLARE_EVENT_CLASS(spi_transfer,
+
+ TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer),
+
+ TP_ARGS(msg, xfer),
+
+ TP_STRUCT__entry(
+ __field( int, bus_num )
+ __field( int, chip_select )
+ __field( struct spi_transfer *, xfer )
+ __field( int, len )
+ ),
+
+ TP_fast_assign(
+ __entry->bus_num = msg->spi->master->bus_num;
+ __entry->chip_select = msg->spi->chip_select;
+ __entry->xfer = xfer;
+ __entry->len = xfer->len;
+ ),
+
+ TP_printk("spi%d.%d %p len=%d", (int)__entry->bus_num,
+ (int)__entry->chip_select,
+ (struct spi_message *)__entry->xfer,
+ (int)__entry->len)
+);
+
+DEFINE_EVENT(spi_transfer, spi_transfer_start,
+
+ TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer),
+
+ TP_ARGS(msg, xfer)
+
+);
+
+DEFINE_EVENT(spi_transfer, spi_transfer_stop,
+
+ TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer),
+
+ TP_ARGS(msg, xfer)
+
+);
+
+#endif /* _TRACE_POWER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/swiotlb.h b/include/trace/events/swiotlb.h
new file mode 100644
index 000000000000..7ea4c5e7c448
--- /dev/null
+++ b/include/trace/events/swiotlb.h
@@ -0,0 +1,46 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM swiotlb
+
+#if !defined(_TRACE_SWIOTLB_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SWIOTLB_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(swiotlb_bounced,
+
+ TP_PROTO(struct device *dev,
+ dma_addr_t dev_addr,
+ size_t size,
+ int swiotlb_force),
+
+ TP_ARGS(dev, dev_addr, size, swiotlb_force),
+
+ TP_STRUCT__entry(
+ __string( dev_name, dev_name(dev) )
+ __field( u64, dma_mask )
+ __field( dma_addr_t, dev_addr )
+ __field( size_t, size )
+ __field( int, swiotlb_force )
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name, dev_name(dev));
+ __entry->dma_mask = (dev->dma_mask ? *dev->dma_mask : 0);
+ __entry->dev_addr = dev_addr;
+ __entry->size = size;
+ __entry->swiotlb_force = swiotlb_force;
+ ),
+
+ TP_printk("dev_name: %s dma_mask=%llx dev_addr=%llx "
+ "size=%zu %s",
+ __get_str(dev_name),
+ __entry->dma_mask,
+ (unsigned long long)__entry->dev_addr,
+ __entry->size,
+ __entry->swiotlb_force ? "swiotlb_force" : "" )
+);
+
+#endif /* _TRACE_SWIOTLB_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/target.h b/include/trace/events/target.h
index aef8fc354025..da9cc0f05c93 100644
--- a/include/trace/events/target.h
+++ b/include/trace/events/target.h
@@ -144,7 +144,7 @@ TRACE_EVENT(target_sequencer_start,
),
TP_fast_assign(
- __entry->unpacked_lun = cmd->se_lun->unpacked_lun;
+ __entry->unpacked_lun = cmd->orig_fe_lun;
__entry->opcode = cmd->t_task_cdb[0];
__entry->data_length = cmd->data_length;
__entry->task_attribute = cmd->sam_task_attr;
@@ -182,7 +182,7 @@ TRACE_EVENT(target_cmd_complete,
),
TP_fast_assign(
- __entry->unpacked_lun = cmd->se_lun->unpacked_lun;
+ __entry->unpacked_lun = cmd->orig_fe_lun;
__entry->opcode = cmd->t_task_cdb[0];
__entry->data_length = cmd->data_length;
__entry->task_attribute = cmd->sam_task_attr;
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index f04b69b6abf2..38f14d0264c3 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -78,4 +78,6 @@
#define SO_BUSY_POLL 46
+#define SO_MAX_PACING_RATE 47
+
#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/uapi/drm/armada_drm.h b/include/uapi/drm/armada_drm.h
new file mode 100644
index 000000000000..8dec3fdc99c7
--- /dev/null
+++ b/include/uapi/drm/armada_drm.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * With inspiration from the i915 driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef DRM_ARMADA_IOCTL_H
+#define DRM_ARMADA_IOCTL_H
+
+#define DRM_ARMADA_GEM_CREATE 0x00
+#define DRM_ARMADA_GEM_MMAP 0x02
+#define DRM_ARMADA_GEM_PWRITE 0x03
+
+#define ARMADA_IOCTL(dir, name, str) \
+ DRM_##dir(DRM_COMMAND_BASE + DRM_ARMADA_##name, struct drm_armada_##str)
+
+struct drm_armada_gem_create {
+ uint32_t handle;
+ uint32_t size;
+};
+#define DRM_IOCTL_ARMADA_GEM_CREATE \
+ ARMADA_IOCTL(IOWR, GEM_CREATE, gem_create)
+
+struct drm_armada_gem_mmap {
+ uint32_t handle;
+ uint32_t pad;
+ uint64_t offset;
+ uint64_t size;
+ uint64_t addr;
+};
+#define DRM_IOCTL_ARMADA_GEM_MMAP \
+ ARMADA_IOCTL(IOWR, GEM_MMAP, gem_mmap)
+
+struct drm_armada_gem_pwrite {
+ uint64_t ptr;
+ uint32_t handle;
+ uint32_t offset;
+ uint32_t size;
+};
+#define DRM_IOCTL_ARMADA_GEM_PWRITE \
+ ARMADA_IOCTL(IOW, GEM_PWRITE, gem_pwrite)
+
+#endif
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index ece867889cc7..9b24d65fed72 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -611,12 +611,37 @@ struct drm_gem_open {
__u64 size;
};
+#define DRM_CAP_DUMB_BUFFER 0x1
+#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
+#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
+#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
+#define DRM_CAP_PRIME 0x5
+#define DRM_PRIME_CAP_IMPORT 0x1
+#define DRM_PRIME_CAP_EXPORT 0x2
+#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
+#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
+
/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
__u64 capability;
__u64 value;
};
+/**
+ * DRM_CLIENT_CAP_STEREO_3D
+ *
+ * if set to 1, the DRM core will expose the stereo 3D capabilities of the
+ * monitor by advertising the supported 3D layouts in the flags of struct
+ * drm_mode_modeinfo.
+ */
+#define DRM_CLIENT_CAP_STEREO_3D 1
+
+/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
+struct drm_set_client_cap {
+ __u64 capability;
+ __u64 value;
+};
+
#define DRM_CLOEXEC O_CLOEXEC
struct drm_prime_handle {
__u32 handle;
@@ -649,6 +674,7 @@ struct drm_prime_handle {
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap)
+#define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
@@ -774,17 +800,6 @@ struct drm_event_vblank {
__u32 reserved;
};
-#define DRM_CAP_DUMB_BUFFER 0x1
-#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
-#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
-#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
-#define DRM_CAP_PRIME 0x5
-#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
-#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
-
-#define DRM_PRIME_CAP_IMPORT 0x1
-#define DRM_PRIME_CAP_EXPORT 0x2
-
/* typedef area */
#ifndef __KERNEL__
typedef struct drm_clip_rect drm_clip_rect_t;
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 550811712f78..f104c2603ebe 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -44,20 +44,35 @@
/* Video mode flags */
/* bit compatible with the xorg definitions. */
-#define DRM_MODE_FLAG_PHSYNC (1<<0)
-#define DRM_MODE_FLAG_NHSYNC (1<<1)
-#define DRM_MODE_FLAG_PVSYNC (1<<2)
-#define DRM_MODE_FLAG_NVSYNC (1<<3)
-#define DRM_MODE_FLAG_INTERLACE (1<<4)
-#define DRM_MODE_FLAG_DBLSCAN (1<<5)
-#define DRM_MODE_FLAG_CSYNC (1<<6)
-#define DRM_MODE_FLAG_PCSYNC (1<<7)
-#define DRM_MODE_FLAG_NCSYNC (1<<8)
-#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
-#define DRM_MODE_FLAG_BCAST (1<<10)
-#define DRM_MODE_FLAG_PIXMUX (1<<11)
-#define DRM_MODE_FLAG_DBLCLK (1<<12)
-#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
+#define DRM_MODE_FLAG_PHSYNC (1<<0)
+#define DRM_MODE_FLAG_NHSYNC (1<<1)
+#define DRM_MODE_FLAG_PVSYNC (1<<2)
+#define DRM_MODE_FLAG_NVSYNC (1<<3)
+#define DRM_MODE_FLAG_INTERLACE (1<<4)
+#define DRM_MODE_FLAG_DBLSCAN (1<<5)
+#define DRM_MODE_FLAG_CSYNC (1<<6)
+#define DRM_MODE_FLAG_PCSYNC (1<<7)
+#define DRM_MODE_FLAG_NCSYNC (1<<8)
+#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
+#define DRM_MODE_FLAG_BCAST (1<<10)
+#define DRM_MODE_FLAG_PIXMUX (1<<11)
+#define DRM_MODE_FLAG_DBLCLK (1<<12)
+#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
+ /*
+ * When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX
+ * (define not exposed to user space).
+ */
+#define DRM_MODE_FLAG_3D_MASK (0x1f<<14)
+#define DRM_MODE_FLAG_3D_NONE (0<<14)
+#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14)
+#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14)
+#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14)
+#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL (4<<14)
+#define DRM_MODE_FLAG_3D_L_DEPTH (5<<14)
+#define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14)
+#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14)
+#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14)
+
/* DPMS flags */
/* bit compatible with the xorg definitions. */
@@ -165,6 +180,7 @@ struct drm_mode_get_plane_res {
#define DRM_MODE_ENCODER_LVDS 3
#define DRM_MODE_ENCODER_TVDAC 4
#define DRM_MODE_ENCODER_VIRTUAL 5
+#define DRM_MODE_ENCODER_DSI 6
struct drm_mode_get_encoder {
__u32 encoder_id;
@@ -203,6 +219,7 @@ struct drm_mode_get_encoder {
#define DRM_MODE_CONNECTOR_TV 13
#define DRM_MODE_CONNECTOR_eDP 14
#define DRM_MODE_CONNECTOR_VIRTUAL 15
+#define DRM_MODE_CONNECTOR_DSI 16
struct drm_mode_get_connector {
@@ -223,6 +240,8 @@ struct drm_mode_get_connector {
__u32 connection;
__u32 mm_width, mm_height; /**< HxW in millimeters */
__u32 subpixel;
+
+ __u32 pad;
};
#define DRM_MODE_PROP_PENDING (1<<0)
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 55bb5729bd78..3a4e97bd8607 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -38,10 +38,10 @@
*
* I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
* event from the gpu l3 cache. Additional information supplied is ROW,
- * BANK, SUBBANK of the affected cacheline. Userspace should keep track of
- * these events and if a specific cache-line seems to have a persistent
- * error remap it with the l3 remapping tool supplied in intel-gpu-tools.
- * The value supplied with the event is always 1.
+ * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
+ * track of these events and if a specific cache-line seems to have a
+ * persistent error remap it with the l3 remapping tool supplied in
+ * intel-gpu-tools. The value supplied with the event is always 1.
*
* I915_ERROR_UEVENT - Generated upon error detection, currently only via
* hangcheck. The error detection event is a good indicator of when things
diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
index 73bde4eaf16c..5e1ab552cbed 100644
--- a/include/uapi/drm/tegra_drm.h
+++ b/include/uapi/drm/tegra_drm.h
@@ -19,6 +19,9 @@
#include <drm/drm.h>
+#define DRM_TEGRA_GEM_CREATE_TILED (1 << 0)
+#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
+
struct drm_tegra_gem_create {
__u64 size;
__u32 flags;
@@ -65,6 +68,12 @@ struct drm_tegra_get_syncpt {
__u32 id;
};
+struct drm_tegra_get_syncpt_base {
+ __u64 context;
+ __u32 syncpt;
+ __u32 id;
+};
+
struct drm_tegra_syncpt {
__u32 id;
__u32 incrs;
@@ -115,15 +124,16 @@ struct drm_tegra_submit {
__u32 reserved[5]; /* future expansion */
};
-#define DRM_TEGRA_GEM_CREATE 0x00
-#define DRM_TEGRA_GEM_MMAP 0x01
-#define DRM_TEGRA_SYNCPT_READ 0x02
-#define DRM_TEGRA_SYNCPT_INCR 0x03
-#define DRM_TEGRA_SYNCPT_WAIT 0x04
-#define DRM_TEGRA_OPEN_CHANNEL 0x05
-#define DRM_TEGRA_CLOSE_CHANNEL 0x06
-#define DRM_TEGRA_GET_SYNCPT 0x07
-#define DRM_TEGRA_SUBMIT 0x08
+#define DRM_TEGRA_GEM_CREATE 0x00
+#define DRM_TEGRA_GEM_MMAP 0x01
+#define DRM_TEGRA_SYNCPT_READ 0x02
+#define DRM_TEGRA_SYNCPT_INCR 0x03
+#define DRM_TEGRA_SYNCPT_WAIT 0x04
+#define DRM_TEGRA_OPEN_CHANNEL 0x05
+#define DRM_TEGRA_CLOSE_CHANNEL 0x06
+#define DRM_TEGRA_GET_SYNCPT 0x07
+#define DRM_TEGRA_SUBMIT 0x08
+#define DRM_TEGRA_GET_SYNCPT_BASE 0x09
#define DRM_IOCTL_TEGRA_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_CREATE, struct drm_tegra_gem_create)
#define DRM_IOCTL_TEGRA_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_MMAP, struct drm_tegra_gem_mmap)
@@ -134,5 +144,6 @@ struct drm_tegra_submit {
#define DRM_IOCTL_TEGRA_CLOSE_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_CLOSE_CHANNEL, struct drm_tegra_open_channel)
#define DRM_IOCTL_TEGRA_GET_SYNCPT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT, struct drm_tegra_get_syncpt)
#define DRM_IOCTL_TEGRA_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SUBMIT, struct drm_tegra_submit)
+#define DRM_IOCTL_TEGRA_GET_SYNCPT_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT_BASE, struct drm_tegra_get_syncpt_base)
#endif
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 75cef3fd97ad..db0b825b4810 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -329,7 +329,6 @@ enum {
#define AUDIT_ARCH_ARMEB (EM_ARM)
#define AUDIT_ARCH_CRIS (EM_CRIS|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_FRV (EM_FRV)
-#define AUDIT_ARCH_H8300 (EM_H8_300)
#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_IA64 (EM_IA_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_M32R (EM_M32R)
diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
new file mode 100644
index 000000000000..164a7e263988
--- /dev/null
+++ b/include/uapi/linux/bcache.h
@@ -0,0 +1,373 @@
+#ifndef _LINUX_BCACHE_H
+#define _LINUX_BCACHE_H
+
+/*
+ * Bcache on disk data structures
+ */
+
+#include <asm/types.h>
+
+#define BITMASK(name, type, field, offset, size) \
+static inline __u64 name(const type *k) \
+{ return (k->field >> offset) & ~(~0ULL << size); } \
+ \
+static inline void SET_##name(type *k, __u64 v) \
+{ \
+ k->field &= ~(~(~0ULL << size) << offset); \
+ k->field |= (v & ~(~0ULL << size)) << offset; \
+}
+
+/* Btree keys - all units are in sectors */
+
+struct bkey {
+ __u64 high;
+ __u64 low;
+ __u64 ptr[];
+};
+
+#define KEY_FIELD(name, field, offset, size) \
+ BITMASK(name, struct bkey, field, offset, size)
+
+#define PTR_FIELD(name, offset, size) \
+static inline __u64 name(const struct bkey *k, unsigned i) \
+{ return (k->ptr[i] >> offset) & ~(~0ULL << size); } \
+ \
+static inline void SET_##name(struct bkey *k, unsigned i, __u64 v) \
+{ \
+ k->ptr[i] &= ~(~(~0ULL << size) << offset); \
+ k->ptr[i] |= (v & ~(~0ULL << size)) << offset; \
+}
+
+#define KEY_SIZE_BITS 16
+
+KEY_FIELD(KEY_PTRS, high, 60, 3)
+KEY_FIELD(HEADER_SIZE, high, 58, 2)
+KEY_FIELD(KEY_CSUM, high, 56, 2)
+KEY_FIELD(KEY_PINNED, high, 55, 1)
+KEY_FIELD(KEY_DIRTY, high, 36, 1)
+
+KEY_FIELD(KEY_SIZE, high, 20, KEY_SIZE_BITS)
+KEY_FIELD(KEY_INODE, high, 0, 20)
+
+/* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */
+
+static inline __u64 KEY_OFFSET(const struct bkey *k)
+{
+ return k->low;
+}
+
+static inline void SET_KEY_OFFSET(struct bkey *k, __u64 v)
+{
+ k->low = v;
+}
+
+/*
+ * The high bit being set is a relic from when we used it to do binary
+ * searches - it told you where a key started. It's not used anymore,
+ * and can probably be safely dropped.
+ */
+#define KEY(inode, offset, size) \
+((struct bkey) { \
+ .high = (1ULL << 63) | ((__u64) (size) << 20) | (inode), \
+ .low = (offset) \
+})
+
+#define ZERO_KEY KEY(0, 0, 0)
+
+#define MAX_KEY_INODE (~(~0 << 20))
+#define MAX_KEY_OFFSET (~0ULL >> 1)
+#define MAX_KEY KEY(MAX_KEY_INODE, MAX_KEY_OFFSET, 0)
+
+#define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k))
+#define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0)
+
+#define PTR_DEV_BITS 12
+
+PTR_FIELD(PTR_DEV, 51, PTR_DEV_BITS)
+PTR_FIELD(PTR_OFFSET, 8, 43)
+PTR_FIELD(PTR_GEN, 0, 8)
+
+#define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1)
+
+#define PTR(gen, offset, dev) \
+ ((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
+
+/* Bkey utility code */
+
+static inline unsigned long bkey_u64s(const struct bkey *k)
+{
+ return (sizeof(struct bkey) / sizeof(__u64)) + KEY_PTRS(k);
+}
+
+static inline unsigned long bkey_bytes(const struct bkey *k)
+{
+ return bkey_u64s(k) * sizeof(__u64);
+}
+
+#define bkey_copy(_dest, _src) memcpy(_dest, _src, bkey_bytes(_src))
+
+static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
+{
+ SET_KEY_INODE(dest, KEY_INODE(src));
+ SET_KEY_OFFSET(dest, KEY_OFFSET(src));
+}
+
+static inline struct bkey *bkey_next(const struct bkey *k)
+{
+ __u64 *d = (void *) k;
+ return (struct bkey *) (d + bkey_u64s(k));
+}
+
+static inline struct bkey *bkey_last(const struct bkey *k, unsigned nr_keys)
+{
+ __u64 *d = (void *) k;
+ return (struct bkey *) (d + nr_keys);
+}
+/* Enough for a key with 6 pointers */
+#define BKEY_PAD 8
+
+#define BKEY_PADDED(key) \
+ union { struct bkey key; __u64 key ## _pad[BKEY_PAD]; }
+
+/* Superblock */
+
+/* Version 0: Cache device
+ * Version 1: Backing device
+ * Version 2: Seed pointer into btree node checksum
+ * Version 3: Cache device with new UUID format
+ * Version 4: Backing device with data offset
+ */
+#define BCACHE_SB_VERSION_CDEV 0
+#define BCACHE_SB_VERSION_BDEV 1
+#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
+#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
+#define BCACHE_SB_MAX_VERSION 4
+
+#define SB_SECTOR 8
+#define SB_SIZE 4096
+#define SB_LABEL_SIZE 32
+#define SB_JOURNAL_BUCKETS 256U
+/* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */
+#define MAX_CACHES_PER_SET 8
+
+#define BDEV_DATA_START_DEFAULT 16 /* sectors */
+
+struct cache_sb {
+ __u64 csum;
+ __u64 offset; /* sector where this sb was written */
+ __u64 version;
+
+ __u8 magic[16];
+
+ __u8 uuid[16];
+ union {
+ __u8 set_uuid[16];
+ __u64 set_magic;
+ };
+ __u8 label[SB_LABEL_SIZE];
+
+ __u64 flags;
+ __u64 seq;
+ __u64 pad[8];
+
+ union {
+ struct {
+ /* Cache devices */
+ __u64 nbuckets; /* device size */
+
+ __u16 block_size; /* sectors */
+ __u16 bucket_size; /* sectors */
+
+ __u16 nr_in_set;
+ __u16 nr_this_dev;
+ };
+ struct {
+ /* Backing devices */
+ __u64 data_offset;
+
+ /*
+ * block_size from the cache device section is still used by
+ * backing devices, so don't add anything here until we fix
+ * things to not need it for backing devices anymore
+ */
+ };
+ };
+
+ __u32 last_mount; /* time_t */
+
+ __u16 first_bucket;
+ union {
+ __u16 njournal_buckets;
+ __u16 keys;
+ };
+ __u64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
+};
+
+static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
+{
+ return sb->version == BCACHE_SB_VERSION_BDEV
+ || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
+}
+
+BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
+BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1);
+BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3);
+#define CACHE_REPLACEMENT_LRU 0U
+#define CACHE_REPLACEMENT_FIFO 1U
+#define CACHE_REPLACEMENT_RANDOM 2U
+
+BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4);
+#define CACHE_MODE_WRITETHROUGH 0U
+#define CACHE_MODE_WRITEBACK 1U
+#define CACHE_MODE_WRITEAROUND 2U
+#define CACHE_MODE_NONE 3U
+BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
+#define BDEV_STATE_NONE 0U
+#define BDEV_STATE_CLEAN 1U
+#define BDEV_STATE_DIRTY 2U
+#define BDEV_STATE_STALE 3U
+
+/*
+ * Magic numbers
+ *
+ * The various other data structures have their own magic numbers, which are
+ * xored with the first part of the cache set's UUID
+ */
+
+#define JSET_MAGIC 0x245235c1a3625032ULL
+#define PSET_MAGIC 0x6750e15f87337f91ULL
+#define BSET_MAGIC 0x90135c78b99e07f5ULL
+
+static inline __u64 jset_magic(struct cache_sb *sb)
+{
+ return sb->set_magic ^ JSET_MAGIC;
+}
+
+static inline __u64 pset_magic(struct cache_sb *sb)
+{
+ return sb->set_magic ^ PSET_MAGIC;
+}
+
+static inline __u64 bset_magic(struct cache_sb *sb)
+{
+ return sb->set_magic ^ BSET_MAGIC;
+}
+
+/*
+ * Journal
+ *
+ * On disk format for a journal entry:
+ * seq is monotonically increasing; every journal entry has its own unique
+ * sequence number.
+ *
+ * last_seq is the oldest journal entry that still has keys the btree hasn't
+ * flushed to disk yet.
+ *
+ * version is for on disk format changes.
+ */
+
+#define BCACHE_JSET_VERSION_UUIDv1 1
+#define BCACHE_JSET_VERSION_UUID 1 /* Always latest UUID format */
+#define BCACHE_JSET_VERSION 1
+
+struct jset {
+ __u64 csum;
+ __u64 magic;
+ __u64 seq;
+ __u32 version;
+ __u32 keys;
+
+ __u64 last_seq;
+
+ BKEY_PADDED(uuid_bucket);
+ BKEY_PADDED(btree_root);
+ __u16 btree_level;
+ __u16 pad[3];
+
+ __u64 prio_bucket[MAX_CACHES_PER_SET];
+
+ union {
+ struct bkey start[0];
+ __u64 d[0];
+ };
+};
+
+/* Bucket prios/gens */
+
+struct prio_set {
+ __u64 csum;
+ __u64 magic;
+ __u64 seq;
+ __u32 version;
+ __u32 pad;
+
+ __u64 next_bucket;
+
+ struct bucket_disk {
+ __u16 prio;
+ __u8 gen;
+ } __attribute((packed)) data[];
+};
+
+/* UUIDS - per backing device/flash only volume metadata */
+
+struct uuid_entry {
+ union {
+ struct {
+ __u8 uuid[16];
+ __u8 label[32];
+ __u32 first_reg;
+ __u32 last_reg;
+ __u32 invalidated;
+
+ __u32 flags;
+ /* Size of flash only volumes */
+ __u64 sectors;
+ };
+
+ __u8 pad[128];
+ };
+};
+
+BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1);
+
+/* Btree nodes */
+
+/* Version 1: Seed pointer into btree node checksum
+ */
+#define BCACHE_BSET_CSUM 1
+#define BCACHE_BSET_VERSION 1
+
+/*
+ * Btree nodes
+ *
+ * On disk a btree node is a list/log of these; within each set the keys are
+ * sorted
+ */
+struct bset {
+ __u64 csum;
+ __u64 magic;
+ __u64 seq;
+ __u32 version;
+ __u32 keys;
+
+ union {
+ struct bkey start[0];
+ __u64 d[0];
+ };
+};
+
+/* OBSOLETE */
+
+/* UUIDS - per backing device/flash only volume metadata */
+
+struct uuid_entry_v0 {
+ __u8 uuid[16];
+ __u8 label[32];
+ __u32 first_reg;
+ __u32 last_reg;
+ __u32 invalidated;
+ __u32 pad;
+};
+
+#endif /* _LINUX_BCACHE_H */
diff --git a/include/uapi/linux/can/bcm.h b/include/uapi/linux/can/bcm.h
index 3ebe387fea4d..382251a1d214 100644
--- a/include/uapi/linux/can/bcm.h
+++ b/include/uapi/linux/can/bcm.h
@@ -7,6 +7,38 @@
* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
* All rights reserved.
*
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
*/
#ifndef CAN_BCM_H
diff --git a/include/uapi/linux/can/error.h b/include/uapi/linux/can/error.h
index 7b7148bded71..b63204545320 100644
--- a/include/uapi/linux/can/error.h
+++ b/include/uapi/linux/can/error.h
@@ -7,6 +7,38 @@
* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
* All rights reserved.
*
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
*/
#ifndef CAN_ERROR_H
diff --git a/include/uapi/linux/can/gw.h b/include/uapi/linux/can/gw.h
index 4e27c82b564a..844c8964bdfe 100644
--- a/include/uapi/linux/can/gw.h
+++ b/include/uapi/linux/can/gw.h
@@ -7,6 +7,38 @@
* Copyright (c) 2011 Volkswagen Group Electronic Research
* All rights reserved.
*
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
*/
#ifndef CAN_GW_H
diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h
index 14966ddb7df1..df944ed206a8 100644
--- a/include/uapi/linux/can/netlink.h
+++ b/include/uapi/linux/can/netlink.h
@@ -5,6 +5,14 @@
*
* Copyright (c) 2009 Wolfgang Grandegger <wg@grandegger.com>
*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#ifndef CAN_NETLINK_H
diff --git a/include/uapi/linux/can/raw.h b/include/uapi/linux/can/raw.h
index a814062b0719..c7d8c334e0ce 100644
--- a/include/uapi/linux/can/raw.h
+++ b/include/uapi/linux/can/raw.h
@@ -8,6 +8,38 @@
* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
* All rights reserved.
*
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
*/
#ifndef CAN_RAW_H
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index f1e12bd40b3b..c8a4302093a3 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -267,9 +267,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 26
+#define DM_VERSION_MINOR 27
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2013-08-15)"
+#define DM_VERSION_EXTRA "-ioctl (2013-10-30)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@@ -341,4 +341,15 @@ enum {
*/
#define DM_DATA_OUT_FLAG (1 << 16) /* Out */
+/*
+ * If set with DM_DEV_REMOVE or DM_REMOVE_ALL this indicates that if
+ * the device cannot be removed immediately because it is still in use
+ * it should instead be scheduled for removal when it gets closed.
+ *
+ * On return from DM_DEV_REMOVE, DM_DEV_STATUS or other ioctls, this
+ * flag indicates that the device is scheduled to be removed when it
+ * gets closed.
+ */
+#define DM_DEFERRED_REMOVE (1 << 17) /* In/Out */
+
#endif /* _LINUX_DM_IOCTL_H */
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index 59c17a2d38ad..01529bd96438 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -31,7 +31,6 @@
#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */
#define EM_V850 87 /* NEC v850 */
#define EM_M32R 88 /* Renesas M32R */
-#define EM_H8_300 46 /* Renesas H8/300,300H,H8S */
#define EM_MN10300 89 /* Panasonic/MEI MN10300, AM33 */
#define EM_BLACKFIN 106 /* ADI Blackfin Processor */
#define EM_TI_C6000 140 /* TI C6X DSPs */
diff --git a/include/uapi/linux/hash_info.h b/include/uapi/linux/hash_info.h
new file mode 100644
index 000000000000..ca18c45f8304
--- /dev/null
+++ b/include/uapi/linux/hash_info.h
@@ -0,0 +1,37 @@
+/*
+ * Hash Info: Hash algorithms information
+ *
+ * Copyright (c) 2013 Dmitry Kasatkin <d.kasatkin@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _UAPI_LINUX_HASH_INFO_H
+#define _UAPI_LINUX_HASH_INFO_H
+
+enum hash_algo {
+ HASH_ALGO_MD4,
+ HASH_ALGO_MD5,
+ HASH_ALGO_SHA1,
+ HASH_ALGO_RIPE_MD_160,
+ HASH_ALGO_SHA256,
+ HASH_ALGO_SHA384,
+ HASH_ALGO_SHA512,
+ HASH_ALGO_SHA224,
+ HASH_ALGO_RIPE_MD_128,
+ HASH_ALGO_RIPE_MD_256,
+ HASH_ALGO_RIPE_MD_320,
+ HASH_ALGO_WP_256,
+ HASH_ALGO_WP_384,
+ HASH_ALGO_WP_512,
+ HASH_ALGO_TGR_128,
+ HASH_ALGO_TGR_160,
+ HASH_ALGO_TGR_192,
+ HASH_ALGO__LAST
+};
+
+#endif /* _UAPI_LINUX_HASH_INFO_H */
diff --git a/include/uapi/linux/hsr_netlink.h b/include/uapi/linux/hsr_netlink.h
new file mode 100644
index 000000000000..2475cb8a53af
--- /dev/null
+++ b/include/uapi/linux/hsr_netlink.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2011-2013 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ * 2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ */
+
+#ifndef __UAPI_HSR_NETLINK_H
+#define __UAPI_HSR_NETLINK_H
+
+/* Generic Netlink HSR family definition
+ */
+
+/* attributes */
+enum {
+ HSR_A_UNSPEC,
+ HSR_A_NODE_ADDR,
+ HSR_A_IFINDEX,
+ HSR_A_IF1_AGE,
+ HSR_A_IF2_AGE,
+ HSR_A_NODE_ADDR_B,
+ HSR_A_IF1_SEQ,
+ HSR_A_IF2_SEQ,
+ HSR_A_IF1_IFINDEX,
+ HSR_A_IF2_IFINDEX,
+ HSR_A_ADDR_B_IFINDEX,
+ __HSR_A_MAX,
+};
+#define HSR_A_MAX (__HSR_A_MAX - 1)
+
+
+/* commands */
+enum {
+ HSR_C_UNSPEC,
+ HSR_C_RING_ERROR,
+ HSR_C_NODE_DOWN,
+ HSR_C_GET_NODE_STATUS,
+ HSR_C_SET_NODE_STATUS,
+ HSR_C_GET_NODE_LIST,
+ HSR_C_SET_NODE_LIST,
+ __HSR_C_MAX,
+};
+#define HSR_C_MAX (__HSR_C_MAX - 1)
+
+#endif /* __UAPI_HSR_NETLINK_H */
diff --git a/include/uapi/linux/if_bonding.h b/include/uapi/linux/if_bonding.h
index a17edda8a781..9635a62f6f89 100644
--- a/include/uapi/linux/if_bonding.h
+++ b/include/uapi/linux/if_bonding.h
@@ -91,6 +91,8 @@
#define BOND_XMIT_POLICY_LAYER2 0 /* layer 2 (MAC only), default */
#define BOND_XMIT_POLICY_LAYER34 1 /* layer 3+4 (IP ^ (TCP || UDP)) */
#define BOND_XMIT_POLICY_LAYER23 2 /* layer 2+3 (IP ^ MAC) */
+#define BOND_XMIT_POLICY_ENCAP23 3 /* encapsulated layer 2+3 */
+#define BOND_XMIT_POLICY_ENCAP34 4 /* encapsulated layer 3+4 */
typedef struct ifbond {
__s32 bond_mode;
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index ade07f1c491a..2ce0f6a78fa5 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -85,6 +85,7 @@
#define ETH_P_8021AH 0x88E7 /* 802.1ah Backbone Service Tag */
#define ETH_P_MVRP 0x88F5 /* 802.1Q MVRP */
#define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */
+#define ETH_P_PRP 0x88FB /* IEC 62439-3 PRP/HSRv0 */
#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */
#define ETH_P_TDLS 0x890D /* TDLS */
#define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 80394e8dc3a3..b78566f59aba 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -325,6 +325,17 @@ struct ifla_vxlan_port_range {
__be16 high;
};
+/* Bonding section */
+
+enum {
+ IFLA_BOND_UNSPEC,
+ IFLA_BOND_MODE,
+ IFLA_BOND_ACTIVE_SLAVE,
+ __IFLA_BOND_MAX,
+};
+
+#define IFLA_BOND_MAX (__IFLA_BOND_MAX - 1)
+
/* SR-IOV virtual function management section */
enum {
@@ -470,4 +481,17 @@ enum {
#define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1)
+
+/* HSR section */
+
+enum {
+ IFLA_HSR_UNSPEC,
+ IFLA_HSR_SLAVE1,
+ IFLA_HSR_SLAVE2,
+ IFLA_HSR_MULTICAST_SPEC,
+ __IFLA_HSR_MAX,
+};
+
+#define IFLA_HSR_MAX (__IFLA_HSR_MAX - 1)
+
#endif /* _UAPI_LINUX_IF_LINK_H */
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
index c9b7f4faf97a..840cb990abe2 100644
--- a/include/uapi/linux/keyctl.h
+++ b/include/uapi/linux/keyctl.h
@@ -56,5 +56,6 @@
#define KEYCTL_REJECT 19 /* reject a partially constructed key */
#define KEYCTL_INSTANTIATE_IOV 20 /* instantiate a partially constructed key */
#define KEYCTL_INVALIDATE 21 /* invalidate a key */
+#define KEYCTL_GET_PERSISTENT 22 /* get a user's persistent keyring */
#endif /* _LINUX_KEYCTL_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 99c25338ede8..902f12461873 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -518,6 +518,10 @@ struct kvm_ppc_smmu_info {
/* machine type bits, to be used as argument to KVM_CREATE_VM */
#define KVM_VM_S390_UCONTROL 1
+/* on ppc, 0 indicate default, 1 should force HV and 2 PR */
+#define KVM_VM_PPC_HV 1
+#define KVM_VM_PPC_PR 2
+
#define KVM_S390_SIE_PAGE_OFFSET 1
/*
@@ -541,6 +545,7 @@ struct kvm_ppc_smmu_info {
#define KVM_TRACE_ENABLE __KVM_DEPRECATED_MAIN_W_0x06
#define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07
#define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08
+#define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2)
/*
* Extension capability list.
@@ -668,6 +673,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_IRQ_XICS 92
#define KVM_CAP_ARM_EL1_32BIT 93
#define KVM_CAP_SPAPR_MULTITCE 94
+#define KVM_CAP_EXT_EMUL_CPUID 95
#ifdef KVM_CAP_IRQ_ROUTING
@@ -843,6 +849,10 @@ struct kvm_device_attr {
#define KVM_DEV_TYPE_FSL_MPIC_20 1
#define KVM_DEV_TYPE_FSL_MPIC_42 2
#define KVM_DEV_TYPE_XICS 3
+#define KVM_DEV_TYPE_VFIO 4
+#define KVM_DEV_VFIO_GROUP 1
+#define KVM_DEV_VFIO_GROUP_ADD 1
+#define KVM_DEV_VFIO_GROUP_DEL 2
/*
* ioctls for VM fds
@@ -1012,6 +1022,7 @@ struct kvm_s390_ucas_mapping {
/* VM is being stopped by host */
#define KVM_KVMCLOCK_CTRL _IO(KVMIO, 0xad)
#define KVM_ARM_VCPU_INIT _IOW(KVMIO, 0xae, struct kvm_vcpu_init)
+#define KVM_ARM_PREFERRED_TARGET _IOR(KVMIO, 0xaf, struct kvm_vcpu_init)
#define KVM_GET_REG_LIST _IOWR(KVMIO, 0xb0, struct kvm_reg_list)
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h
index e0cecd2eabdc..6edc6b68badd 100644
--- a/include/uapi/linux/loop.h
+++ b/include/uapi/linux/loop.h
@@ -21,6 +21,7 @@ enum {
LO_FLAGS_READ_ONLY = 1,
LO_FLAGS_AUTOCLEAR = 4,
LO_FLAGS_PARTSCAN = 8,
+ LO_FLAGS_USE_AIO = 16,
};
#include <asm/posix_types.h> /* for __kernel_old_dev_t */
diff --git a/include/uapi/linux/major.h b/include/uapi/linux/major.h
index 6a8ca98c9a96..620252e69b44 100644
--- a/include/uapi/linux/major.h
+++ b/include/uapi/linux/major.h
@@ -54,6 +54,7 @@
#define ACSI_MAJOR 28
#define AZTECH_CDROM_MAJOR 29
#define FB_MAJOR 29 /* /dev/fb* framebuffers */
+#define MTD_BLOCK_MAJOR 31
#define CM206_CDROM_MAJOR 32
#define IDE2_MAJOR 33
#define IDE3_MAJOR 34
@@ -105,6 +106,7 @@
#define IDE6_MAJOR 88
#define IDE7_MAJOR 89
#define IDE8_MAJOR 90
+#define MTD_CHAR_MAJOR 90
#define IDE9_MAJOR 91
#define DASD_MAJOR 94
diff --git a/include/uapi/linux/netfilter/Kbuild b/include/uapi/linux/netfilter/Kbuild
index 174915420d3f..17c3af2c4bb9 100644
--- a/include/uapi/linux/netfilter/Kbuild
+++ b/include/uapi/linux/netfilter/Kbuild
@@ -5,6 +5,8 @@ header-y += nf_conntrack_ftp.h
header-y += nf_conntrack_sctp.h
header-y += nf_conntrack_tcp.h
header-y += nf_conntrack_tuple_common.h
+header-y += nf_tables.h
+header-y += nf_tables_compat.h
header-y += nf_nat.h
header-y += nfnetlink.h
header-y += nfnetlink_acct.h
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index 8024cdf13b70..25d3b2f79c02 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -10,12 +10,14 @@
#ifndef _UAPI_IP_SET_H
#define _UAPI_IP_SET_H
-
#include <linux/types.h>
/* The protocol version */
#define IPSET_PROTOCOL 6
+/* The maximum permissible comment length we will accept over netlink */
+#define IPSET_MAX_COMMENT_SIZE 255
+
/* The max length of strings including NUL: set and type identifiers */
#define IPSET_MAXNAMELEN 32
@@ -110,6 +112,7 @@ enum {
IPSET_ATTR_IFACE,
IPSET_ATTR_BYTES,
IPSET_ATTR_PACKETS,
+ IPSET_ATTR_COMMENT,
__IPSET_ATTR_ADT_MAX,
};
#define IPSET_ATTR_ADT_MAX (__IPSET_ATTR_ADT_MAX - 1)
@@ -140,6 +143,7 @@ enum ipset_errno {
IPSET_ERR_IPADDR_IPV4,
IPSET_ERR_IPADDR_IPV6,
IPSET_ERR_COUNTER,
+ IPSET_ERR_COMMENT,
/* Type specific error codes */
IPSET_ERR_TYPE_SPECIFIC = 4352,
@@ -176,6 +180,8 @@ enum ipset_cadt_flags {
IPSET_FLAG_NOMATCH = (1 << IPSET_FLAG_BIT_NOMATCH),
IPSET_FLAG_BIT_WITH_COUNTERS = 3,
IPSET_FLAG_WITH_COUNTERS = (1 << IPSET_FLAG_BIT_WITH_COUNTERS),
+ IPSET_FLAG_BIT_WITH_COMMENT = 4,
+ IPSET_FLAG_WITH_COMMENT = (1 << IPSET_FLAG_BIT_WITH_COMMENT),
IPSET_FLAG_CADT_MAX = 15,
};
@@ -250,6 +256,14 @@ struct ip_set_req_get_set {
#define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */
/* Uses ip_set_req_get_set */
+#define IP_SET_OP_GET_FNAME 0x00000008 /* Get set index and family */
+struct ip_set_req_get_set_family {
+ unsigned int op;
+ unsigned int version;
+ unsigned int family;
+ union ip_set_name_index set;
+};
+
#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */
struct ip_set_req_version {
unsigned int op;
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index 8dd803818ebe..319f47128db8 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -25,6 +25,10 @@ enum ip_conntrack_info {
IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1
};
+#define NF_CT_STATE_INVALID_BIT (1 << 0)
+#define NF_CT_STATE_BIT(ctinfo) (1 << ((ctinfo) % IP_CT_IS_REPLY + 1))
+#define NF_CT_STATE_UNTRACKED_BIT (1 << (IP_CT_NUMBER + 1))
+
/* Bitset representing status of connection. */
enum ip_conntrack_status {
/* It's an expected connection: bit 0 set. This bit never changed */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
new file mode 100644
index 000000000000..fbfd229a8e99
--- /dev/null
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -0,0 +1,718 @@
+#ifndef _LINUX_NF_TABLES_H
+#define _LINUX_NF_TABLES_H
+
+#define NFT_CHAIN_MAXNAMELEN 32
+
+enum nft_registers {
+ NFT_REG_VERDICT,
+ NFT_REG_1,
+ NFT_REG_2,
+ NFT_REG_3,
+ NFT_REG_4,
+ __NFT_REG_MAX
+};
+#define NFT_REG_MAX (__NFT_REG_MAX - 1)
+
+/**
+ * enum nft_verdicts - nf_tables internal verdicts
+ *
+ * @NFT_CONTINUE: continue evaluation of the current rule
+ * @NFT_BREAK: terminate evaluation of the current rule
+ * @NFT_JUMP: push the current chain on the jump stack and jump to a chain
+ * @NFT_GOTO: jump to a chain without pushing the current chain on the jump stack
+ * @NFT_RETURN: return to the topmost chain on the jump stack
+ *
+ * The nf_tables verdicts share their numeric space with the netfilter verdicts.
+ */
+enum nft_verdicts {
+ NFT_CONTINUE = -1,
+ NFT_BREAK = -2,
+ NFT_JUMP = -3,
+ NFT_GOTO = -4,
+ NFT_RETURN = -5,
+};
+
+/**
+ * enum nf_tables_msg_types - nf_tables netlink message types
+ *
+ * @NFT_MSG_NEWTABLE: create a new table (enum nft_table_attributes)
+ * @NFT_MSG_GETTABLE: get a table (enum nft_table_attributes)
+ * @NFT_MSG_DELTABLE: delete a table (enum nft_table_attributes)
+ * @NFT_MSG_NEWCHAIN: create a new chain (enum nft_chain_attributes)
+ * @NFT_MSG_GETCHAIN: get a chain (enum nft_chain_attributes)
+ * @NFT_MSG_DELCHAIN: delete a chain (enum nft_chain_attributes)
+ * @NFT_MSG_NEWRULE: create a new rule (enum nft_rule_attributes)
+ * @NFT_MSG_GETRULE: get a rule (enum nft_rule_attributes)
+ * @NFT_MSG_DELRULE: delete a rule (enum nft_rule_attributes)
+ * @NFT_MSG_NEWSET: create a new set (enum nft_set_attributes)
+ * @NFT_MSG_GETSET: get a set (enum nft_set_attributes)
+ * @NFT_MSG_DELSET: delete a set (enum nft_set_attributes)
+ * @NFT_MSG_NEWSETELEM: create a new set element (enum nft_set_elem_attributes)
+ * @NFT_MSG_GETSETELEM: get a set element (enum nft_set_elem_attributes)
+ * @NFT_MSG_DELSETELEM: delete a set element (enum nft_set_elem_attributes)
+ */
+enum nf_tables_msg_types {
+ NFT_MSG_NEWTABLE,
+ NFT_MSG_GETTABLE,
+ NFT_MSG_DELTABLE,
+ NFT_MSG_NEWCHAIN,
+ NFT_MSG_GETCHAIN,
+ NFT_MSG_DELCHAIN,
+ NFT_MSG_NEWRULE,
+ NFT_MSG_GETRULE,
+ NFT_MSG_DELRULE,
+ NFT_MSG_NEWSET,
+ NFT_MSG_GETSET,
+ NFT_MSG_DELSET,
+ NFT_MSG_NEWSETELEM,
+ NFT_MSG_GETSETELEM,
+ NFT_MSG_DELSETELEM,
+ NFT_MSG_MAX,
+};
+
+/**
+ * enum nft_list_attributes - nf_tables generic list netlink attributes
+ *
+ * @NFTA_LIST_ELEM: list element (NLA_NESTED)
+ */
+enum nft_list_attributes {
+ NFTA_LIST_UNPEC,
+ NFTA_LIST_ELEM,
+ __NFTA_LIST_MAX
+};
+#define NFTA_LIST_MAX (__NFTA_LIST_MAX - 1)
+
+/**
+ * enum nft_hook_attributes - nf_tables netfilter hook netlink attributes
+ *
+ * @NFTA_HOOK_HOOKNUM: netfilter hook number (NLA_U32)
+ * @NFTA_HOOK_PRIORITY: netfilter hook priority (NLA_U32)
+ */
+enum nft_hook_attributes {
+ NFTA_HOOK_UNSPEC,
+ NFTA_HOOK_HOOKNUM,
+ NFTA_HOOK_PRIORITY,
+ __NFTA_HOOK_MAX
+};
+#define NFTA_HOOK_MAX (__NFTA_HOOK_MAX - 1)
+
+/**
+ * enum nft_table_flags - nf_tables table flags
+ *
+ * @NFT_TABLE_F_DORMANT: this table is not active
+ */
+enum nft_table_flags {
+ NFT_TABLE_F_DORMANT = 0x1,
+};
+
+/**
+ * enum nft_table_attributes - nf_tables table netlink attributes
+ *
+ * @NFTA_TABLE_NAME: name of the table (NLA_STRING)
+ * @NFTA_TABLE_FLAGS: bitmask of enum nft_table_flags (NLA_U32)
+ */
+enum nft_table_attributes {
+ NFTA_TABLE_UNSPEC,
+ NFTA_TABLE_NAME,
+ NFTA_TABLE_FLAGS,
+ __NFTA_TABLE_MAX
+};
+#define NFTA_TABLE_MAX (__NFTA_TABLE_MAX - 1)
+
+/**
+ * enum nft_chain_attributes - nf_tables chain netlink attributes
+ *
+ * @NFTA_CHAIN_TABLE: name of the table containing the chain (NLA_STRING)
+ * @NFTA_CHAIN_HANDLE: numeric handle of the chain (NLA_U64)
+ * @NFTA_CHAIN_NAME: name of the chain (NLA_STRING)
+ * @NFTA_CHAIN_HOOK: hook specification for basechains (NLA_NESTED: nft_hook_attributes)
+ * @NFTA_CHAIN_POLICY: numeric policy of the chain (NLA_U32)
+ * @NFTA_CHAIN_USE: number of references to this chain (NLA_U32)
+ * @NFTA_CHAIN_TYPE: type name of the string (NLA_NUL_STRING)
+ * @NFTA_CHAIN_COUNTERS: counter specification of the chain (NLA_NESTED: nft_counter_attributes)
+ */
+enum nft_chain_attributes {
+ NFTA_CHAIN_UNSPEC,
+ NFTA_CHAIN_TABLE,
+ NFTA_CHAIN_HANDLE,
+ NFTA_CHAIN_NAME,
+ NFTA_CHAIN_HOOK,
+ NFTA_CHAIN_POLICY,
+ NFTA_CHAIN_USE,
+ NFTA_CHAIN_TYPE,
+ NFTA_CHAIN_COUNTERS,
+ __NFTA_CHAIN_MAX
+};
+#define NFTA_CHAIN_MAX (__NFTA_CHAIN_MAX - 1)
+
+/**
+ * enum nft_rule_attributes - nf_tables rule netlink attributes
+ *
+ * @NFTA_RULE_TABLE: name of the table containing the rule (NLA_STRING)
+ * @NFTA_RULE_CHAIN: name of the chain containing the rule (NLA_STRING)
+ * @NFTA_RULE_HANDLE: numeric handle of the rule (NLA_U64)
+ * @NFTA_RULE_EXPRESSIONS: list of expressions (NLA_NESTED: nft_expr_attributes)
+ * @NFTA_RULE_COMPAT: compatibility specifications of the rule (NLA_NESTED: nft_rule_compat_attributes)
+ * @NFTA_RULE_POSITION: numeric handle of the previous rule (NLA_U64)
+ */
+enum nft_rule_attributes {
+ NFTA_RULE_UNSPEC,
+ NFTA_RULE_TABLE,
+ NFTA_RULE_CHAIN,
+ NFTA_RULE_HANDLE,
+ NFTA_RULE_EXPRESSIONS,
+ NFTA_RULE_COMPAT,
+ NFTA_RULE_POSITION,
+ __NFTA_RULE_MAX
+};
+#define NFTA_RULE_MAX (__NFTA_RULE_MAX - 1)
+
+/**
+ * enum nft_rule_compat_flags - nf_tables rule compat flags
+ *
+ * @NFT_RULE_COMPAT_F_INV: invert the check result
+ */
+enum nft_rule_compat_flags {
+ NFT_RULE_COMPAT_F_INV = (1 << 1),
+ NFT_RULE_COMPAT_F_MASK = NFT_RULE_COMPAT_F_INV,
+};
+
+/**
+ * enum nft_rule_compat_attributes - nf_tables rule compat attributes
+ *
+ * @NFTA_RULE_COMPAT_PROTO: numerice value of handled protocol (NLA_U32)
+ * @NFTA_RULE_COMPAT_FLAGS: bitmask of enum nft_rule_compat_flags (NLA_U32)
+ */
+enum nft_rule_compat_attributes {
+ NFTA_RULE_COMPAT_UNSPEC,
+ NFTA_RULE_COMPAT_PROTO,
+ NFTA_RULE_COMPAT_FLAGS,
+ __NFTA_RULE_COMPAT_MAX
+};
+#define NFTA_RULE_COMPAT_MAX (__NFTA_RULE_COMPAT_MAX - 1)
+
+/**
+ * enum nft_set_flags - nf_tables set flags
+ *
+ * @NFT_SET_ANONYMOUS: name allocation, automatic cleanup on unlink
+ * @NFT_SET_CONSTANT: set contents may not change while bound
+ * @NFT_SET_INTERVAL: set contains intervals
+ * @NFT_SET_MAP: set is used as a dictionary
+ */
+enum nft_set_flags {
+ NFT_SET_ANONYMOUS = 0x1,
+ NFT_SET_CONSTANT = 0x2,
+ NFT_SET_INTERVAL = 0x4,
+ NFT_SET_MAP = 0x8,
+};
+
+/**
+ * enum nft_set_attributes - nf_tables set netlink attributes
+ *
+ * @NFTA_SET_TABLE: table name (NLA_STRING)
+ * @NFTA_SET_NAME: set name (NLA_STRING)
+ * @NFTA_SET_FLAGS: bitmask of enum nft_set_flags (NLA_U32)
+ * @NFTA_SET_KEY_TYPE: key data type, informational purpose only (NLA_U32)
+ * @NFTA_SET_KEY_LEN: key data length (NLA_U32)
+ * @NFTA_SET_DATA_TYPE: mapping data type (NLA_U32)
+ * @NFTA_SET_DATA_LEN: mapping data length (NLA_U32)
+ */
+enum nft_set_attributes {
+ NFTA_SET_UNSPEC,
+ NFTA_SET_TABLE,
+ NFTA_SET_NAME,
+ NFTA_SET_FLAGS,
+ NFTA_SET_KEY_TYPE,
+ NFTA_SET_KEY_LEN,
+ NFTA_SET_DATA_TYPE,
+ NFTA_SET_DATA_LEN,
+ __NFTA_SET_MAX
+};
+#define NFTA_SET_MAX (__NFTA_SET_MAX - 1)
+
+/**
+ * enum nft_set_elem_flags - nf_tables set element flags
+ *
+ * @NFT_SET_ELEM_INTERVAL_END: element ends the previous interval
+ */
+enum nft_set_elem_flags {
+ NFT_SET_ELEM_INTERVAL_END = 0x1,
+};
+
+/**
+ * enum nft_set_elem_attributes - nf_tables set element netlink attributes
+ *
+ * @NFTA_SET_ELEM_KEY: key value (NLA_NESTED: nft_data)
+ * @NFTA_SET_ELEM_DATA: data value of mapping (NLA_NESTED: nft_data_attributes)
+ * @NFTA_SET_ELEM_FLAGS: bitmask of nft_set_elem_flags (NLA_U32)
+ */
+enum nft_set_elem_attributes {
+ NFTA_SET_ELEM_UNSPEC,
+ NFTA_SET_ELEM_KEY,
+ NFTA_SET_ELEM_DATA,
+ NFTA_SET_ELEM_FLAGS,
+ __NFTA_SET_ELEM_MAX
+};
+#define NFTA_SET_ELEM_MAX (__NFTA_SET_ELEM_MAX - 1)
+
+/**
+ * enum nft_set_elem_list_attributes - nf_tables set element list netlink attributes
+ *
+ * @NFTA_SET_ELEM_LIST_TABLE: table of the set to be changed (NLA_STRING)
+ * @NFTA_SET_ELEM_LIST_SET: name of the set to be changed (NLA_STRING)
+ * @NFTA_SET_ELEM_LIST_ELEMENTS: list of set elements (NLA_NESTED: nft_set_elem_attributes)
+ */
+enum nft_set_elem_list_attributes {
+ NFTA_SET_ELEM_LIST_UNSPEC,
+ NFTA_SET_ELEM_LIST_TABLE,
+ NFTA_SET_ELEM_LIST_SET,
+ NFTA_SET_ELEM_LIST_ELEMENTS,
+ __NFTA_SET_ELEM_LIST_MAX
+};
+#define NFTA_SET_ELEM_LIST_MAX (__NFTA_SET_ELEM_LIST_MAX - 1)
+
+/**
+ * enum nft_data_types - nf_tables data types
+ *
+ * @NFT_DATA_VALUE: generic data
+ * @NFT_DATA_VERDICT: netfilter verdict
+ *
+ * The type of data is usually determined by the kernel directly and is not
+ * explicitly specified by userspace. The only difference are sets, where
+ * userspace specifies the key and mapping data types.
+ *
+ * The values 0xffffff00-0xffffffff are reserved for internally used types.
+ * The remaining range can be freely used by userspace to encode types, all
+ * values are equivalent to NFT_DATA_VALUE.
+ */
+enum nft_data_types {
+ NFT_DATA_VALUE,
+ NFT_DATA_VERDICT = 0xffffff00U,
+};
+
+#define NFT_DATA_RESERVED_MASK 0xffffff00U
+
+/**
+ * enum nft_data_attributes - nf_tables data netlink attributes
+ *
+ * @NFTA_DATA_VALUE: generic data (NLA_BINARY)
+ * @NFTA_DATA_VERDICT: nf_tables verdict (NLA_NESTED: nft_verdict_attributes)
+ */
+enum nft_data_attributes {
+ NFTA_DATA_UNSPEC,
+ NFTA_DATA_VALUE,
+ NFTA_DATA_VERDICT,
+ __NFTA_DATA_MAX
+};
+#define NFTA_DATA_MAX (__NFTA_DATA_MAX - 1)
+
+/**
+ * enum nft_verdict_attributes - nf_tables verdict netlink attributes
+ *
+ * @NFTA_VERDICT_CODE: nf_tables verdict (NLA_U32: enum nft_verdicts)
+ * @NFTA_VERDICT_CHAIN: jump target chain name (NLA_STRING)
+ */
+enum nft_verdict_attributes {
+ NFTA_VERDICT_UNSPEC,
+ NFTA_VERDICT_CODE,
+ NFTA_VERDICT_CHAIN,
+ __NFTA_VERDICT_MAX
+};
+#define NFTA_VERDICT_MAX (__NFTA_VERDICT_MAX - 1)
+
+/**
+ * enum nft_expr_attributes - nf_tables expression netlink attributes
+ *
+ * @NFTA_EXPR_NAME: name of the expression type (NLA_STRING)
+ * @NFTA_EXPR_DATA: type specific data (NLA_NESTED)
+ */
+enum nft_expr_attributes {
+ NFTA_EXPR_UNSPEC,
+ NFTA_EXPR_NAME,
+ NFTA_EXPR_DATA,
+ __NFTA_EXPR_MAX
+};
+#define NFTA_EXPR_MAX (__NFTA_EXPR_MAX - 1)
+
+/**
+ * enum nft_immediate_attributes - nf_tables immediate expression netlink attributes
+ *
+ * @NFTA_IMMEDIATE_DREG: destination register to load data into (NLA_U32)
+ * @NFTA_IMMEDIATE_DATA: data to load (NLA_NESTED: nft_data_attributes)
+ */
+enum nft_immediate_attributes {
+ NFTA_IMMEDIATE_UNSPEC,
+ NFTA_IMMEDIATE_DREG,
+ NFTA_IMMEDIATE_DATA,
+ __NFTA_IMMEDIATE_MAX
+};
+#define NFTA_IMMEDIATE_MAX (__NFTA_IMMEDIATE_MAX - 1)
+
+/**
+ * enum nft_bitwise_attributes - nf_tables bitwise expression netlink attributes
+ *
+ * @NFTA_BITWISE_SREG: source register (NLA_U32: nft_registers)
+ * @NFTA_BITWISE_DREG: destination register (NLA_U32: nft_registers)
+ * @NFTA_BITWISE_LEN: length of operands (NLA_U32)
+ * @NFTA_BITWISE_MASK: mask value (NLA_NESTED: nft_data_attributes)
+ * @NFTA_BITWISE_XOR: xor value (NLA_NESTED: nft_data_attributes)
+ *
+ * The bitwise expression performs the following operation:
+ *
+ * dreg = (sreg & mask) ^ xor
+ *
+ * which allow to express all bitwise operations:
+ *
+ * mask xor
+ * NOT: 1 1
+ * OR: 0 x
+ * XOR: 1 x
+ * AND: x 0
+ */
+enum nft_bitwise_attributes {
+ NFTA_BITWISE_UNSPEC,
+ NFTA_BITWISE_SREG,
+ NFTA_BITWISE_DREG,
+ NFTA_BITWISE_LEN,
+ NFTA_BITWISE_MASK,
+ NFTA_BITWISE_XOR,
+ __NFTA_BITWISE_MAX
+};
+#define NFTA_BITWISE_MAX (__NFTA_BITWISE_MAX - 1)
+
+/**
+ * enum nft_byteorder_ops - nf_tables byteorder operators
+ *
+ * @NFT_BYTEORDER_NTOH: network to host operator
+ * @NFT_BYTEORDER_HTON: host to network opertaor
+ */
+enum nft_byteorder_ops {
+ NFT_BYTEORDER_NTOH,
+ NFT_BYTEORDER_HTON,
+};
+
+/**
+ * enum nft_byteorder_attributes - nf_tables byteorder expression netlink attributes
+ *
+ * @NFTA_BYTEORDER_SREG: source register (NLA_U32: nft_registers)
+ * @NFTA_BYTEORDER_DREG: destination register (NLA_U32: nft_registers)
+ * @NFTA_BYTEORDER_OP: operator (NLA_U32: enum nft_byteorder_ops)
+ * @NFTA_BYTEORDER_LEN: length of the data (NLA_U32)
+ * @NFTA_BYTEORDER_SIZE: data size in bytes (NLA_U32: 2 or 4)
+ */
+enum nft_byteorder_attributes {
+ NFTA_BYTEORDER_UNSPEC,
+ NFTA_BYTEORDER_SREG,
+ NFTA_BYTEORDER_DREG,
+ NFTA_BYTEORDER_OP,
+ NFTA_BYTEORDER_LEN,
+ NFTA_BYTEORDER_SIZE,
+ __NFTA_BYTEORDER_MAX
+};
+#define NFTA_BYTEORDER_MAX (__NFTA_BYTEORDER_MAX - 1)
+
+/**
+ * enum nft_cmp_ops - nf_tables relational operator
+ *
+ * @NFT_CMP_EQ: equal
+ * @NFT_CMP_NEQ: not equal
+ * @NFT_CMP_LT: less than
+ * @NFT_CMP_LTE: less than or equal to
+ * @NFT_CMP_GT: greater than
+ * @NFT_CMP_GTE: greater than or equal to
+ */
+enum nft_cmp_ops {
+ NFT_CMP_EQ,
+ NFT_CMP_NEQ,
+ NFT_CMP_LT,
+ NFT_CMP_LTE,
+ NFT_CMP_GT,
+ NFT_CMP_GTE,
+};
+
+/**
+ * enum nft_cmp_attributes - nf_tables cmp expression netlink attributes
+ *
+ * @NFTA_CMP_SREG: source register of data to compare (NLA_U32: nft_registers)
+ * @NFTA_CMP_OP: cmp operation (NLA_U32: nft_cmp_ops)
+ * @NFTA_CMP_DATA: data to compare against (NLA_NESTED: nft_data_attributes)
+ */
+enum nft_cmp_attributes {
+ NFTA_CMP_UNSPEC,
+ NFTA_CMP_SREG,
+ NFTA_CMP_OP,
+ NFTA_CMP_DATA,
+ __NFTA_CMP_MAX
+};
+#define NFTA_CMP_MAX (__NFTA_CMP_MAX - 1)
+
+/**
+ * enum nft_lookup_attributes - nf_tables set lookup expression netlink attributes
+ *
+ * @NFTA_LOOKUP_SET: name of the set where to look for (NLA_STRING)
+ * @NFTA_LOOKUP_SREG: source register of the data to look for (NLA_U32: nft_registers)
+ * @NFTA_LOOKUP_DREG: destination register (NLA_U32: nft_registers)
+ */
+enum nft_lookup_attributes {
+ NFTA_LOOKUP_UNSPEC,
+ NFTA_LOOKUP_SET,
+ NFTA_LOOKUP_SREG,
+ NFTA_LOOKUP_DREG,
+ __NFTA_LOOKUP_MAX
+};
+#define NFTA_LOOKUP_MAX (__NFTA_LOOKUP_MAX - 1)
+
+/**
+ * enum nft_payload_bases - nf_tables payload expression offset bases
+ *
+ * @NFT_PAYLOAD_LL_HEADER: link layer header
+ * @NFT_PAYLOAD_NETWORK_HEADER: network header
+ * @NFT_PAYLOAD_TRANSPORT_HEADER: transport header
+ */
+enum nft_payload_bases {
+ NFT_PAYLOAD_LL_HEADER,
+ NFT_PAYLOAD_NETWORK_HEADER,
+ NFT_PAYLOAD_TRANSPORT_HEADER,
+};
+
+/**
+ * enum nft_payload_attributes - nf_tables payload expression netlink attributes
+ *
+ * @NFTA_PAYLOAD_DREG: destination register to load data into (NLA_U32: nft_registers)
+ * @NFTA_PAYLOAD_BASE: payload base (NLA_U32: nft_payload_bases)
+ * @NFTA_PAYLOAD_OFFSET: payload offset relative to base (NLA_U32)
+ * @NFTA_PAYLOAD_LEN: payload length (NLA_U32)
+ */
+enum nft_payload_attributes {
+ NFTA_PAYLOAD_UNSPEC,
+ NFTA_PAYLOAD_DREG,
+ NFTA_PAYLOAD_BASE,
+ NFTA_PAYLOAD_OFFSET,
+ NFTA_PAYLOAD_LEN,
+ __NFTA_PAYLOAD_MAX
+};
+#define NFTA_PAYLOAD_MAX (__NFTA_PAYLOAD_MAX - 1)
+
+/**
+ * enum nft_exthdr_attributes - nf_tables IPv6 extension header expression netlink attributes
+ *
+ * @NFTA_EXTHDR_DREG: destination register (NLA_U32: nft_registers)
+ * @NFTA_EXTHDR_TYPE: extension header type (NLA_U8)
+ * @NFTA_EXTHDR_OFFSET: extension header offset (NLA_U32)
+ * @NFTA_EXTHDR_LEN: extension header length (NLA_U32)
+ */
+enum nft_exthdr_attributes {
+ NFTA_EXTHDR_UNSPEC,
+ NFTA_EXTHDR_DREG,
+ NFTA_EXTHDR_TYPE,
+ NFTA_EXTHDR_OFFSET,
+ NFTA_EXTHDR_LEN,
+ __NFTA_EXTHDR_MAX
+};
+#define NFTA_EXTHDR_MAX (__NFTA_EXTHDR_MAX - 1)
+
+/**
+ * enum nft_meta_keys - nf_tables meta expression keys
+ *
+ * @NFT_META_LEN: packet length (skb->len)
+ * @NFT_META_PROTOCOL: packet ethertype protocol (skb->protocol), invalid in OUTPUT
+ * @NFT_META_PRIORITY: packet priority (skb->priority)
+ * @NFT_META_MARK: packet mark (skb->mark)
+ * @NFT_META_IIF: packet input interface index (dev->ifindex)
+ * @NFT_META_OIF: packet output interface index (dev->ifindex)
+ * @NFT_META_IIFNAME: packet input interface name (dev->name)
+ * @NFT_META_OIFNAME: packet output interface name (dev->name)
+ * @NFT_META_IIFTYPE: packet input interface type (dev->type)
+ * @NFT_META_OIFTYPE: packet output interface type (dev->type)
+ * @NFT_META_SKUID: originating socket UID (fsuid)
+ * @NFT_META_SKGID: originating socket GID (fsgid)
+ * @NFT_META_NFTRACE: packet nftrace bit
+ * @NFT_META_RTCLASSID: realm value of packet's route (skb->dst->tclassid)
+ * @NFT_META_SECMARK: packet secmark (skb->secmark)
+ */
+enum nft_meta_keys {
+ NFT_META_LEN,
+ NFT_META_PROTOCOL,
+ NFT_META_PRIORITY,
+ NFT_META_MARK,
+ NFT_META_IIF,
+ NFT_META_OIF,
+ NFT_META_IIFNAME,
+ NFT_META_OIFNAME,
+ NFT_META_IIFTYPE,
+ NFT_META_OIFTYPE,
+ NFT_META_SKUID,
+ NFT_META_SKGID,
+ NFT_META_NFTRACE,
+ NFT_META_RTCLASSID,
+ NFT_META_SECMARK,
+};
+
+/**
+ * enum nft_meta_attributes - nf_tables meta expression netlink attributes
+ *
+ * @NFTA_META_DREG: destination register (NLA_U32)
+ * @NFTA_META_KEY: meta data item to load (NLA_U32: nft_meta_keys)
+ */
+enum nft_meta_attributes {
+ NFTA_META_UNSPEC,
+ NFTA_META_DREG,
+ NFTA_META_KEY,
+ __NFTA_META_MAX
+};
+#define NFTA_META_MAX (__NFTA_META_MAX - 1)
+
+/**
+ * enum nft_ct_keys - nf_tables ct expression keys
+ *
+ * @NFT_CT_STATE: conntrack state (bitmask of enum ip_conntrack_info)
+ * @NFT_CT_DIRECTION: conntrack direction (enum ip_conntrack_dir)
+ * @NFT_CT_STATUS: conntrack status (bitmask of enum ip_conntrack_status)
+ * @NFT_CT_MARK: conntrack mark value
+ * @NFT_CT_SECMARK: conntrack secmark value
+ * @NFT_CT_EXPIRATION: relative conntrack expiration time in ms
+ * @NFT_CT_HELPER: connection tracking helper assigned to conntrack
+ * @NFT_CT_L3PROTOCOL: conntrack layer 3 protocol
+ * @NFT_CT_SRC: conntrack layer 3 protocol source (IPv4/IPv6 address)
+ * @NFT_CT_DST: conntrack layer 3 protocol destination (IPv4/IPv6 address)
+ * @NFT_CT_PROTOCOL: conntrack layer 4 protocol
+ * @NFT_CT_PROTO_SRC: conntrack layer 4 protocol source
+ * @NFT_CT_PROTO_DST: conntrack layer 4 protocol destination
+ */
+enum nft_ct_keys {
+ NFT_CT_STATE,
+ NFT_CT_DIRECTION,
+ NFT_CT_STATUS,
+ NFT_CT_MARK,
+ NFT_CT_SECMARK,
+ NFT_CT_EXPIRATION,
+ NFT_CT_HELPER,
+ NFT_CT_L3PROTOCOL,
+ NFT_CT_SRC,
+ NFT_CT_DST,
+ NFT_CT_PROTOCOL,
+ NFT_CT_PROTO_SRC,
+ NFT_CT_PROTO_DST,
+};
+
+/**
+ * enum nft_ct_attributes - nf_tables ct expression netlink attributes
+ *
+ * @NFTA_CT_DREG: destination register (NLA_U32)
+ * @NFTA_CT_KEY: conntrack data item to load (NLA_U32: nft_ct_keys)
+ * @NFTA_CT_DIRECTION: direction in case of directional keys (NLA_U8)
+ */
+enum nft_ct_attributes {
+ NFTA_CT_UNSPEC,
+ NFTA_CT_DREG,
+ NFTA_CT_KEY,
+ NFTA_CT_DIRECTION,
+ __NFTA_CT_MAX
+};
+#define NFTA_CT_MAX (__NFTA_CT_MAX - 1)
+
+/**
+ * enum nft_limit_attributes - nf_tables limit expression netlink attributes
+ *
+ * @NFTA_LIMIT_RATE: refill rate (NLA_U64)
+ * @NFTA_LIMIT_UNIT: refill unit (NLA_U64)
+ */
+enum nft_limit_attributes {
+ NFTA_LIMIT_UNSPEC,
+ NFTA_LIMIT_RATE,
+ NFTA_LIMIT_UNIT,
+ __NFTA_LIMIT_MAX
+};
+#define NFTA_LIMIT_MAX (__NFTA_LIMIT_MAX - 1)
+
+/**
+ * enum nft_counter_attributes - nf_tables counter expression netlink attributes
+ *
+ * @NFTA_COUNTER_BYTES: number of bytes (NLA_U64)
+ * @NFTA_COUNTER_PACKETS: number of packets (NLA_U64)
+ */
+enum nft_counter_attributes {
+ NFTA_COUNTER_UNSPEC,
+ NFTA_COUNTER_BYTES,
+ NFTA_COUNTER_PACKETS,
+ __NFTA_COUNTER_MAX
+};
+#define NFTA_COUNTER_MAX (__NFTA_COUNTER_MAX - 1)
+
+/**
+ * enum nft_log_attributes - nf_tables log expression netlink attributes
+ *
+ * @NFTA_LOG_GROUP: netlink group to send messages to (NLA_U32)
+ * @NFTA_LOG_PREFIX: prefix to prepend to log messages (NLA_STRING)
+ * @NFTA_LOG_SNAPLEN: length of payload to include in netlink message (NLA_U32)
+ * @NFTA_LOG_QTHRESHOLD: queue threshold (NLA_U32)
+ */
+enum nft_log_attributes {
+ NFTA_LOG_UNSPEC,
+ NFTA_LOG_GROUP,
+ NFTA_LOG_PREFIX,
+ NFTA_LOG_SNAPLEN,
+ NFTA_LOG_QTHRESHOLD,
+ __NFTA_LOG_MAX
+};
+#define NFTA_LOG_MAX (__NFTA_LOG_MAX - 1)
+
+/**
+ * enum nft_reject_types - nf_tables reject expression reject types
+ *
+ * @NFT_REJECT_ICMP_UNREACH: reject using ICMP unreachable
+ * @NFT_REJECT_TCP_RST: reject using TCP RST
+ */
+enum nft_reject_types {
+ NFT_REJECT_ICMP_UNREACH,
+ NFT_REJECT_TCP_RST,
+};
+
+/**
+ * enum nft_reject_attributes - nf_tables reject expression netlink attributes
+ *
+ * @NFTA_REJECT_TYPE: packet type to use (NLA_U32: nft_reject_types)
+ * @NFTA_REJECT_ICMP_CODE: ICMP code to use (NLA_U8)
+ */
+enum nft_reject_attributes {
+ NFTA_REJECT_UNSPEC,
+ NFTA_REJECT_TYPE,
+ NFTA_REJECT_ICMP_CODE,
+ __NFTA_REJECT_MAX
+};
+#define NFTA_REJECT_MAX (__NFTA_REJECT_MAX - 1)
+
+/**
+ * enum nft_nat_types - nf_tables nat expression NAT types
+ *
+ * @NFT_NAT_SNAT: source NAT
+ * @NFT_NAT_DNAT: destination NAT
+ */
+enum nft_nat_types {
+ NFT_NAT_SNAT,
+ NFT_NAT_DNAT,
+};
+
+/**
+ * enum nft_nat_attributes - nf_tables nat expression netlink attributes
+ *
+ * @NFTA_NAT_TYPE: NAT type (NLA_U32: nft_nat_types)
+ * @NFTA_NAT_FAMILY: NAT family (NLA_U32)
+ * @NFTA_NAT_REG_ADDR_MIN: source register of address range start (NLA_U32: nft_registers)
+ * @NFTA_NAT_REG_ADDR_MAX: source register of address range end (NLA_U32: nft_registers)
+ * @NFTA_NAT_REG_PROTO_MIN: source register of proto range start (NLA_U32: nft_registers)
+ * @NFTA_NAT_REG_PROTO_MAX: source register of proto range end (NLA_U32: nft_registers)
+ */
+enum nft_nat_attributes {
+ NFTA_NAT_UNSPEC,
+ NFTA_NAT_TYPE,
+ NFTA_NAT_FAMILY,
+ NFTA_NAT_REG_ADDR_MIN,
+ NFTA_NAT_REG_ADDR_MAX,
+ NFTA_NAT_REG_PROTO_MIN,
+ NFTA_NAT_REG_PROTO_MAX,
+ __NFTA_NAT_MAX
+};
+#define NFTA_NAT_MAX (__NFTA_NAT_MAX - 1)
+
+#endif /* _LINUX_NF_TABLES_H */
diff --git a/include/uapi/linux/netfilter/nf_tables_compat.h b/include/uapi/linux/netfilter/nf_tables_compat.h
new file mode 100644
index 000000000000..8310f5f76551
--- /dev/null
+++ b/include/uapi/linux/netfilter/nf_tables_compat.h
@@ -0,0 +1,38 @@
+#ifndef _NFT_COMPAT_NFNETLINK_H_
+#define _NFT_COMPAT_NFNETLINK_H_
+
+enum nft_target_attributes {
+ NFTA_TARGET_UNSPEC,
+ NFTA_TARGET_NAME,
+ NFTA_TARGET_REV,
+ NFTA_TARGET_INFO,
+ __NFTA_TARGET_MAX
+};
+#define NFTA_TARGET_MAX (__NFTA_TARGET_MAX - 1)
+
+enum nft_match_attributes {
+ NFTA_MATCH_UNSPEC,
+ NFTA_MATCH_NAME,
+ NFTA_MATCH_REV,
+ NFTA_MATCH_INFO,
+ __NFTA_MATCH_MAX
+};
+#define NFTA_MATCH_MAX (__NFTA_MATCH_MAX - 1)
+
+#define NFT_COMPAT_NAME_MAX 32
+
+enum {
+ NFNL_MSG_COMPAT_GET,
+ NFNL_MSG_COMPAT_MAX
+};
+
+enum {
+ NFTA_COMPAT_UNSPEC = 0,
+ NFTA_COMPAT_NAME,
+ NFTA_COMPAT_REV,
+ NFTA_COMPAT_TYPE,
+ __NFTA_COMPAT_MAX,
+};
+#define NFTA_COMPAT_MAX (__NFTA_COMPAT_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/netfilter/nfnetlink.h b/include/uapi/linux/netfilter/nfnetlink.h
index 4a4efafad5f4..596ddd45253c 100644
--- a/include/uapi/linux/netfilter/nfnetlink.h
+++ b/include/uapi/linux/netfilter/nfnetlink.h
@@ -18,6 +18,8 @@ enum nfnetlink_groups {
#define NFNLGRP_CONNTRACK_EXP_UPDATE NFNLGRP_CONNTRACK_EXP_UPDATE
NFNLGRP_CONNTRACK_EXP_DESTROY,
#define NFNLGRP_CONNTRACK_EXP_DESTROY NFNLGRP_CONNTRACK_EXP_DESTROY
+ NFNLGRP_NFTABLES,
+#define NFNLGRP_NFTABLES NFNLGRP_NFTABLES
__NFNLGRP_MAX,
};
#define NFNLGRP_MAX (__NFNLGRP_MAX - 1)
@@ -51,6 +53,12 @@ struct nfgenmsg {
#define NFNL_SUBSYS_ACCT 7
#define NFNL_SUBSYS_CTNETLINK_TIMEOUT 8
#define NFNL_SUBSYS_CTHELPER 9
-#define NFNL_SUBSYS_COUNT 10
+#define NFNL_SUBSYS_NFTABLES 10
+#define NFNL_SUBSYS_NFT_COMPAT 11
+#define NFNL_SUBSYS_COUNT 12
+
+/* Reserved control nfnetlink messages */
+#define NFNL_MSG_BATCH_BEGIN NLMSG_MIN_TYPE
+#define NFNL_MSG_BATCH_END NLMSG_MIN_TYPE+1
#endif /* _UAPI_NFNETLINK_H */
diff --git a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
index a2810a7c5e30..1ab0b97b3a1e 100644
--- a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
+++ b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
@@ -6,6 +6,8 @@ enum ctnl_timeout_msg_types {
IPCTNL_MSG_TIMEOUT_NEW,
IPCTNL_MSG_TIMEOUT_GET,
IPCTNL_MSG_TIMEOUT_DELETE,
+ IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
+ IPCTNL_MSG_TIMEOUT_DEFAULT_GET,
IPCTNL_MSG_TIMEOUT_MAX
};
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index 29bed72a4ac4..6ad6cc03ccd3 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -85,6 +85,7 @@
* a specific SE notifies us about the end of a transaction. The parameter
* for this event is the application ID (AID).
* @NFC_CMD_GET_SE: Dump all discovered secure elements from an NFC controller.
+ * @NFC_CMD_SE_IO: Send/Receive APDUs to/from the selected secure element.
*/
enum nfc_commands {
NFC_CMD_UNSPEC,
@@ -114,6 +115,7 @@ enum nfc_commands {
NFC_EVENT_SE_CONNECTIVITY,
NFC_EVENT_SE_TRANSACTION,
NFC_CMD_GET_SE,
+ NFC_CMD_SE_IO,
/* private: internal use only */
__NFC_CMD_AFTER_LAST
};
@@ -147,6 +149,7 @@ enum nfc_commands {
* @NFC_ATTR_SE_INDEX: Secure element index
* @NFC_ATTR_SE_TYPE: Secure element type (UICC or EMBEDDED)
* @NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS: Firmware download operation status
+ * @NFC_ATTR_APDU: Secure element APDU
*/
enum nfc_attrs {
NFC_ATTR_UNSPEC,
@@ -174,6 +177,7 @@ enum nfc_attrs {
NFC_ATTR_SE_TYPE,
NFC_ATTR_SE_AID,
NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS,
+ NFC_ATTR_SE_APDU,
/* private: internal use only */
__NFC_ATTR_AFTER_LAST
};
diff --git a/include/uapi/linux/nfs_mount.h b/include/uapi/linux/nfs_mount.h
index 576bddd72e04..64b0f22f5c4c 100644
--- a/include/uapi/linux/nfs_mount.h
+++ b/include/uapi/linux/nfs_mount.h
@@ -60,7 +60,7 @@ struct nfs_mount_data {
#define NFS_MOUNT_BROKEN_SUID 0x0400 /* 4 */
#define NFS_MOUNT_NOACL 0x0800 /* 4 */
#define NFS_MOUNT_STRICTLOCK 0x1000 /* reserved for NFSv4 */
-#define NFS_MOUNT_SECFLAVOUR 0x2000 /* 5 */
+#define NFS_MOUNT_SECFLAVOUR 0x2000 /* 5 non-text parsed mount data only */
#define NFS_MOUNT_NORDIRPLUS 0x4000 /* 5 */
#define NFS_MOUNT_UNSHARED 0x8000 /* 5 */
#define NFS_MOUNT_FLAGMASK 0xFFFF
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index a74d375b439b..d120f9fe0017 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -63,15 +63,18 @@ enum ovs_datapath_cmd {
* not be sent.
* @OVS_DP_ATTR_STATS: Statistics about packets that have passed through the
* datapath. Always present in notifications.
+ * @OVS_DP_ATTR_MEGAFLOW_STATS: Statistics about mega flow masks usage for the
+ * datapath. Always present in notifications.
*
* These attributes follow the &struct ovs_header within the Generic Netlink
* payload for %OVS_DP_* commands.
*/
enum ovs_datapath_attr {
OVS_DP_ATTR_UNSPEC,
- OVS_DP_ATTR_NAME, /* name of dp_ifindex netdev */
- OVS_DP_ATTR_UPCALL_PID, /* Netlink PID to receive upcalls */
- OVS_DP_ATTR_STATS, /* struct ovs_dp_stats */
+ OVS_DP_ATTR_NAME, /* name of dp_ifindex netdev */
+ OVS_DP_ATTR_UPCALL_PID, /* Netlink PID to receive upcalls */
+ OVS_DP_ATTR_STATS, /* struct ovs_dp_stats */
+ OVS_DP_ATTR_MEGAFLOW_STATS, /* struct ovs_dp_megaflow_stats */
__OVS_DP_ATTR_MAX
};
@@ -84,6 +87,14 @@ struct ovs_dp_stats {
__u64 n_flows; /* Number of flows present */
};
+struct ovs_dp_megaflow_stats {
+ __u64 n_mask_hit; /* Number of masks used for flow lookups. */
+ __u32 n_masks; /* Number of masks for the datapath. */
+ __u32 pad0; /* Pad for future expension. */
+ __u64 pad1; /* Pad for future expension. */
+ __u64 pad2; /* Pad for future expension. */
+};
+
struct ovs_vport_stats {
__u64 rx_packets; /* total packets received */
__u64 tx_packets; /* total packets transmitted */
@@ -260,6 +271,7 @@ enum ovs_key_attr {
OVS_KEY_ATTR_SKB_MARK, /* u32 skb mark */
OVS_KEY_ATTR_TUNNEL, /* Nested set of ovs_tunnel attributes */
OVS_KEY_ATTR_SCTP, /* struct ovs_key_sctp */
+ OVS_KEY_ATTR_TCP_FLAGS, /* be16 TCP flags. */
#ifdef __KERNEL__
OVS_KEY_ATTR_IPV4_TUNNEL, /* struct ovs_key_ipv4_tunnel */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index baa7852468ef..0890556f779e 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -319,7 +319,6 @@
#define PCI_MSIX_PBA 8 /* Pending Bit Array offset */
#define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */
#define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */
-#define PCI_MSIX_FLAGS_BIRMASK (7 << 0) /* deprecated */
#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */
/* MSI-X entry's format */
@@ -558,7 +557,8 @@
#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
#define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */
#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
-#define PCI_EXP_DEVCTL2_ARI 0x20 /* Alternative Routing-ID */
+#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */
+#define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */
#define PCI_EXP_DEVCTL2_IDO_REQ_EN 0x0100 /* Allow IDO for requests */
#define PCI_EXP_DEVCTL2_IDO_CMP_EN 0x0200 /* Allow IDO for completions */
#define PCI_EXP_DEVCTL2_LTR_EN 0x0400 /* Enable LTR mechanism */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 009a655a5d35..e1802d6153ae 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -136,8 +136,9 @@ enum perf_event_sample_format {
PERF_SAMPLE_WEIGHT = 1U << 14,
PERF_SAMPLE_DATA_SRC = 1U << 15,
PERF_SAMPLE_IDENTIFIER = 1U << 16,
+ PERF_SAMPLE_TRANSACTION = 1U << 17,
- PERF_SAMPLE_MAX = 1U << 17, /* non-ABI */
+ PERF_SAMPLE_MAX = 1U << 18, /* non-ABI */
};
/*
@@ -181,6 +182,28 @@ enum perf_sample_regs_abi {
};
/*
+ * Values for the memory transaction event qualifier, mostly for
+ * abort events. Multiple bits can be set.
+ */
+enum {
+ PERF_TXN_ELISION = (1 << 0), /* From elision */
+ PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */
+ PERF_TXN_SYNC = (1 << 2), /* Instruction is related */
+ PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */
+ PERF_TXN_RETRY = (1 << 4), /* Retry possible */
+ PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */
+ PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */
+ PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */
+
+ PERF_TXN_MAX = (1 << 8), /* non-ABI */
+
+ /* bits 32..63 are reserved for the abort code */
+
+ PERF_TXN_ABORT_MASK = (0xffffffffULL << 32),
+ PERF_TXN_ABORT_SHIFT = 32,
+};
+
+/*
* The format of the data returned by read() on a perf event fd,
* as specified by attr.read_format:
*
@@ -456,13 +479,15 @@ struct perf_event_mmap_page {
/*
* Control data for the mmap() data buffer.
*
- * User-space reading the @data_head value should issue an rmb(), on
- * SMP capable platforms, after reading this value -- see
- * perf_event_wakeup().
+ * User-space reading the @data_head value should issue an smp_rmb(),
+ * after reading this value.
*
* When the mapping is PROT_WRITE the @data_tail value should be
- * written by userspace to reflect the last read data. In this case
- * the kernel will not over-write unread data.
+ * written by userspace to reflect the last read data, after issueing
+ * an smp_mb() to separate the data read from the ->data_tail store.
+ * In this case the kernel will not over-write unread data.
+ *
+ * See perf_output_put_handle() for the data ordering.
*/
__u64 data_head; /* head in the data section */
__u64 data_tail; /* user-space written tail */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 082eafaf026b..25731dfb3fcc 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -388,6 +388,20 @@ enum {
#define TCA_CGROUP_MAX (__TCA_CGROUP_MAX - 1)
+/* BPF classifier */
+
+enum {
+ TCA_BPF_UNSPEC,
+ TCA_BPF_ACT,
+ TCA_BPF_POLICE,
+ TCA_BPF_CLASSID,
+ TCA_BPF_OPS_LEN,
+ TCA_BPF_OPS,
+ __TCA_BPF_MAX,
+};
+
+#define TCA_BPF_MAX (__TCA_BPF_MAX - 1)
+
/* Extended Matches */
struct tcf_ematch_tree_hdr {
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 9b829134d422..f2624b549e61 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -357,6 +357,8 @@ enum {
TCA_HTB_CTAB,
TCA_HTB_RTAB,
TCA_HTB_DIRECT_QLEN,
+ TCA_HTB_RATE64,
+ TCA_HTB_CEIL64,
__TCA_HTB_MAX,
};
diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild
index 0623ec4e728f..56f121605c99 100644
--- a/include/uapi/linux/tc_act/Kbuild
+++ b/include/uapi/linux/tc_act/Kbuild
@@ -1,5 +1,6 @@
# UAPI Header export list
header-y += tc_csum.h
+header-y += tc_defact.h
header-y += tc_gact.h
header-y += tc_ipt.h
header-y += tc_mirred.h
diff --git a/include/linux/tc_act/tc_defact.h b/include/uapi/linux/tc_act/tc_defact.h
index 6f65d07c7ce2..17dddb40f740 100644
--- a/include/linux/tc_act/tc_defact.h
+++ b/include/uapi/linux/tc_act/tc_defact.h
@@ -6,7 +6,7 @@
struct tc_defact {
tc_gen;
};
-
+
enum {
TCA_DEF_UNSPEC,
TCA_DEF_TM,
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 083bb5a5aae2..1666aabbbb86 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -160,6 +160,10 @@ enum v4l2_colorfx {
* of controls. Total of 16 controls is reserved for this driver */
#define V4L2_CID_USER_SI476X_BASE (V4L2_CID_USER_BASE + 0x1040)
+/* The base for the TI VPE driver controls. Total of 16 controls is reserved for
+ * this driver */
+#define V4L2_CID_USER_TI_VPE_BASE (V4L2_CID_USER_BASE + 0x1050)
+
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
* and the 'MPEG' part of the define is historical */
diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h
index 36eace03b2ac..e272ea060e38 100644
--- a/include/uapi/mtd/mtd-abi.h
+++ b/include/uapi/mtd/mtd-abi.h
@@ -94,10 +94,10 @@ struct mtd_write_req {
#define MTD_RAM 1
#define MTD_ROM 2
#define MTD_NORFLASH 3
-#define MTD_NANDFLASH 4
+#define MTD_NANDFLASH 4 /* SLC NAND */
#define MTD_DATAFLASH 6
#define MTD_UBIVOLUME 7
-#define MTD_MLCNANDFLASH 8
+#define MTD_MLCNANDFLASH 8 /* MLC NAND (including TLC) */
#define MTD_WRITEABLE 0x400 /* Device is writeable */
#define MTD_BIT_WRITEABLE 0x800 /* Single bits can be flipped */
@@ -275,4 +275,9 @@ enum mtd_file_modes {
MTD_FILE_MODE_RAW,
};
+static inline int mtd_type_is_nand_user(const struct mtd_info_user *mtd)
+{
+ return mtd->type == MTD_NANDFLASH || mtd->type == MTD_MLCNANDFLASH;
+}
+
#endif /* __MTD_ABI_H__ */
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 0b233c56b0e4..e3ddd86c90a6 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -87,8 +87,10 @@ enum {
IB_USER_VERBS_CMD_CLOSE_XRCD,
IB_USER_VERBS_CMD_CREATE_XSRQ,
IB_USER_VERBS_CMD_OPEN_QP,
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
IB_USER_VERBS_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
IB_USER_VERBS_CMD_DESTROY_FLOW
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
};
/*
@@ -126,6 +128,7 @@ struct ib_uverbs_cmd_hdr {
__u16 out_words;
};
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
struct ib_uverbs_cmd_hdr_ex {
__u32 command;
__u16 in_words;
@@ -134,6 +137,7 @@ struct ib_uverbs_cmd_hdr_ex {
__u16 provider_out_words;
__u32 cmd_hdr_reserved;
};
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
struct ib_uverbs_get_context {
__u64 response;
@@ -696,6 +700,7 @@ struct ib_uverbs_detach_mcast {
__u64 driver_data[0];
};
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
struct ib_kern_eth_filter {
__u8 dst_mac[6];
__u8 src_mac[6];
@@ -780,6 +785,7 @@ struct ib_uverbs_destroy_flow {
__u32 comp_mask;
__u32 flow_handle;
};
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
struct ib_uverbs_create_srq {
__u64 response;
diff --git a/include/uapi/sound/Kbuild b/include/uapi/sound/Kbuild
index 0f7d279ebde3..a7f27704f980 100644
--- a/include/uapi/sound/Kbuild
+++ b/include/uapi/sound/Kbuild
@@ -5,6 +5,7 @@ header-y += asound_fm.h
header-y += compress_offload.h
header-y += compress_params.h
header-y += emu10k1.h
+header-y += firewire.h
header-y += hdsp.h
header-y += hdspm.h
header-y += sb16_csp.h
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 041203f20f6d..9fc6219d3848 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -93,9 +93,10 @@ enum {
SNDRV_HWDEP_IFACE_SB_RC, /* SB Extigy/Audigy2NX remote control */
SNDRV_HWDEP_IFACE_HDA, /* HD-audio */
SNDRV_HWDEP_IFACE_USB_STREAM, /* direct access to usb stream */
+ SNDRV_HWDEP_IFACE_FW_DICE, /* TC DICE FireWire device */
/* Don't forget to change the following: */
- SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_USB_STREAM
+ SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_FW_DICE
};
struct snd_hwdep_info {
diff --git a/include/uapi/sound/firewire.h b/include/uapi/sound/firewire.h
new file mode 100644
index 000000000000..e86131ca49e5
--- /dev/null
+++ b/include/uapi/sound/firewire.h
@@ -0,0 +1,51 @@
+#ifndef UAPI_SOUND_FIREWIRE_H_INCLUDED
+#define UAPI_SOUND_FIREWIRE_H_INCLUDED
+
+#include <linux/ioctl.h>
+
+/* events can be read() from the hwdep device */
+
+#define SNDRV_FIREWIRE_EVENT_LOCK_STATUS 0x000010cc
+#define SNDRV_FIREWIRE_EVENT_DICE_NOTIFICATION 0xd1ce004e
+
+struct snd_firewire_event_common {
+ unsigned int type; /* SNDRV_FIREWIRE_EVENT_xxx */
+};
+
+struct snd_firewire_event_lock_status {
+ unsigned int type;
+ unsigned int status; /* 0/1 = unlocked/locked */
+};
+
+struct snd_firewire_event_dice_notification {
+ unsigned int type;
+ unsigned int notification; /* DICE-specific bits */
+};
+
+union snd_firewire_event {
+ struct snd_firewire_event_common common;
+ struct snd_firewire_event_lock_status lock_status;
+ struct snd_firewire_event_dice_notification dice_notification;
+};
+
+
+#define SNDRV_FIREWIRE_IOCTL_GET_INFO _IOR('H', 0xf8, struct snd_firewire_get_info)
+#define SNDRV_FIREWIRE_IOCTL_LOCK _IO('H', 0xf9)
+#define SNDRV_FIREWIRE_IOCTL_UNLOCK _IO('H', 0xfa)
+
+#define SNDRV_FIREWIRE_TYPE_DICE 1
+/* Fireworks, AV/C, RME, MOTU, ... */
+
+struct snd_firewire_get_info {
+ unsigned int type; /* SNDRV_FIREWIRE_TYPE_xxx */
+ unsigned int card; /* same as fw_cdev_get_info.card */
+ unsigned char guid[8];
+ char device_name[16]; /* device node in /dev */
+};
+
+/*
+ * SNDRV_FIREWIRE_IOCTL_LOCK prevents the driver from streaming.
+ * Returns -EBUSY if the driver is already streaming.
+ */
+
+#endif
diff --git a/include/video/atmel_lcdc.h b/include/video/atmel_lcdc.h
index 0f5a2fc69af9..c79f38131926 100644
--- a/include/video/atmel_lcdc.h
+++ b/include/video/atmel_lcdc.h
@@ -31,39 +31,20 @@
#define ATMEL_LCDC_WIRING_BGR 0
#define ATMEL_LCDC_WIRING_RGB 1
-struct atmel_lcdfb_config;
/* LCD Controller info data structure, stored in device platform_data */
-struct atmel_lcdfb_info {
- spinlock_t lock;
- struct fb_info *info;
- void __iomem *mmio;
- int irq_base;
- struct work_struct task;
-
+struct atmel_lcdfb_pdata {
unsigned int guard_time;
- unsigned int smem_len;
- struct platform_device *pdev;
- struct clk *bus_clk;
- struct clk *lcdc_clk;
-
-#ifdef CONFIG_BACKLIGHT_ATMEL_LCDC
- struct backlight_device *backlight;
- u8 bl_power;
-#endif
bool lcdcon_is_backlight;
bool lcdcon_pol_negative;
- u8 saved_lcdcon;
-
u8 default_bpp;
u8 lcd_wiring_mode;
unsigned int default_lcdcon2;
unsigned int default_dmacon;
- void (*atmel_lcdfb_power_control)(int on);
+ void (*atmel_lcdfb_power_control)(struct atmel_lcdfb_pdata *pdata, int on);
struct fb_monspecs *default_monspecs;
- u32 pseudo_palette[16];
- struct atmel_lcdfb_config *config;
+ struct list_head pwr_gpios;
};
#define ATMEL_LCDC_DMABADDR1 0x00
diff --git a/include/video/mmp_disp.h b/include/video/mmp_disp.h
index b9dd1fbb0082..9fd9398368d5 100644
--- a/include/video/mmp_disp.h
+++ b/include/video/mmp_disp.h
@@ -91,6 +91,11 @@ struct mmp_win {
u16 up_crop;
u16 bottom_crop;
int pix_fmt;
+ /*
+ * pitch[0]: graphics/video layer line length or y pitch
+ * pitch[1]/pitch[2]: video u/v pitch if non-zero
+ */
+ u32 pitch[3];
};
struct mmp_addr {
@@ -334,6 +339,7 @@ struct mmp_mach_path_config {
int output_type;
u32 path_config;
u32 link_config;
+ u32 dsi_rbswap;
};
struct mmp_mach_plat_info {
diff --git a/include/video/omap-panel-data.h b/include/video/omap-panel-data.h
index f7ac8d972af0..69279c013ac4 100644
--- a/include/video/omap-panel-data.h
+++ b/include/video/omap-panel-data.h
@@ -238,4 +238,17 @@ struct panel_nec_nl8048hl11_platform_data {
int qvga_gpio;
};
+/**
+ * panel-tpo-td028ttec1 platform data
+ * @name: name for display entity
+ * @source: name of the display entity used as a video source
+ * @data_lines: number of DPI datalines
+ */
+struct panel_tpo_td028ttec1_platform_data {
+ const char *name;
+ const char *source;
+
+ int data_lines;
+};
+
#endif /* __OMAP_PANEL_DATA_H */
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
index eb262e3324d2..c50061db6098 100644
--- a/include/xen/interface/io/netif.h
+++ b/include/xen/interface/io/netif.h
@@ -51,6 +51,20 @@
*/
/*
+ * "feature-no-csum-offload" should be used to turn IPv4 TCP/UDP checksum
+ * offload off or on. If it is missing then the feature is assumed to be on.
+ * "feature-ipv6-csum-offload" should be used to turn IPv6 TCP/UDP checksum
+ * offload on or off. If it is missing then the feature is assumed to be off.
+ */
+
+/*
+ * "feature-gso-tcpv4" and "feature-gso-tcpv6" advertise the capability to
+ * handle large TCP packets (in IPv4 or IPv6 form respectively). Neither
+ * frontends nor backends are assumed to be capable unless the flags are
+ * present.
+ */
+
+/*
* This is the 'wire' format for packets:
* Request 1: xen_netif_tx_request -- XEN_NETTXF_* (any flags)
* [Request 2: xen_netif_extra_info] (only if request 1 has XEN_NETTXF_extra_info)
@@ -95,8 +109,10 @@ struct xen_netif_tx_request {
#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
#define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
-/* GSO types - only TCPv4 currently supported. */
+/* GSO types */
+#define XEN_NETIF_GSO_TYPE_NONE (0)
#define XEN_NETIF_GSO_TYPE_TCPV4 (1)
+#define XEN_NETIF_GSO_TYPE_TCPV6 (2)
/*
* This structure needs to fit within both netif_tx_request and
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index de8bcc641c49..7b644650d968 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -55,4 +55,6 @@ xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
extern int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
+extern int
+xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
#endif /* __LINUX_SWIOTLB_XEN_H */
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index d6fe062cad6b..fb2ea8f26552 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -19,10 +19,11 @@ void xen_arch_resume(void);
int xen_setup_shutdown_event(void);
extern unsigned long *xen_contiguous_bitmap;
-int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
- unsigned int address_bits);
+int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
+ unsigned int address_bits,
+ dma_addr_t *dma_handle);
-void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order);
+void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
struct vm_area_struct;
int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
diff --git a/init/Kconfig b/init/Kconfig
index 3ecd8a1178f1..6ba4329410a3 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -284,7 +284,7 @@ config AUDIT
config AUDITSYSCALL
bool "Enable system-call auditing support"
- depends on AUDIT && (X86 || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || (ARM && AEABI && !OABI_COMPAT))
+ depends on AUDIT && (X86 || PARISC || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || (ARM && AEABI && !OABI_COMPAT))
default y if SECURITY_SELINUX
help
Enable low-overhead system-call auditing infrastructure that
@@ -354,7 +354,8 @@ config VIRT_CPU_ACCOUNTING_NATIVE
config VIRT_CPU_ACCOUNTING_GEN
bool "Full dynticks CPU time accounting"
- depends on HAVE_CONTEXT_TRACKING && 64BIT
+ depends on HAVE_CONTEXT_TRACKING
+ depends on HAVE_VIRT_CPU_ACCOUNTING_GEN
select VIRT_CPU_ACCOUNTING
select CONTEXT_TRACKING
help
@@ -844,7 +845,7 @@ config NUMA_BALANCING_DEFAULT_ENABLED
default y
depends on NUMA_BALANCING
help
- If set, autonumic NUMA balancing will be enabled if running on a NUMA
+ If set, automatic NUMA balancing will be enabled if running on a NUMA
machine.
config NUMA_BALANCING
@@ -855,7 +856,7 @@ config NUMA_BALANCING
help
This option adds support for automatic NUMA aware memory/task placement.
The mechanism is quite primitive and is based on migrating memory when
- it is references to the node the task is running on.
+ it has references to the node the task is running on.
This system will be inactive on UMA systems.
@@ -1668,6 +1669,18 @@ config BASE_SMALL
default 0 if BASE_FULL
default 1 if !BASE_FULL
+config SYSTEM_TRUSTED_KEYRING
+ bool "Provide system-wide ring of trusted keys"
+ depends on KEYS
+ help
+ Provide a system keyring to which trusted keys can be added. Keys in
+ the keyring are considered to be trusted. Keys may be added at will
+ by the kernel from compiled-in data and from hardware key stores, but
+ userspace may only add extra keys if those keys can be verified by
+ keys already in the keyring.
+
+ Keys in this keyring are used by module signature checking.
+
menuconfig MODULES
bool "Enable loadable module support"
option modules
@@ -1741,6 +1754,7 @@ config MODULE_SRCVERSION_ALL
config MODULE_SIG
bool "Module signature verification"
depends on MODULES
+ select SYSTEM_TRUSTED_KEYRING
select KEYS
select CRYPTO
select ASYMMETRIC_KEY_TYPE
diff --git a/init/main.c b/init/main.c
index 63d3e8f2970c..dad19cf42dba 100644
--- a/init/main.c
+++ b/init/main.c
@@ -137,6 +137,13 @@ static char *execute_command;
static char *ramdisk_execute_command;
/*
+ * Used to generate warnings if static_key manipulation functions are used
+ * before jump_label_init is called.
+ */
+bool static_key_initialized __read_mostly = false;
+EXPORT_SYMBOL_GPL(static_key_initialized);
+
+/*
* If set, this is an indication to the drivers that reset the underlying
* device before going ahead with the initialization otherwise driver might
* rely on the BIOS and skip the reset operation.
@@ -693,7 +700,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
if (preempt_count() != count) {
sprintf(msgbuf, "preemption imbalance ");
- preempt_count() = count;
+ preempt_count_set(count);
}
if (irqs_disabled()) {
strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
index 130dfece27ac..b0e99deb6d05 100644
--- a/ipc/ipc_sysctl.c
+++ b/ipc/ipc_sysctl.c
@@ -62,7 +62,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
return err;
}
-static int proc_ipc_callback_dointvec(ctl_table *table, int write,
+static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table ipc_table;
@@ -72,7 +72,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
memcpy(&ipc_table, table, sizeof(ipc_table));
ipc_table.data = get_ipc(table);
- rc = proc_dointvec(&ipc_table, write, buffer, lenp, ppos);
+ rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos);
if (write && !rc && lenp_bef == *lenp)
/*
@@ -152,15 +152,13 @@ static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
#define proc_ipc_dointvec NULL
#define proc_ipc_dointvec_minmax NULL
#define proc_ipc_dointvec_minmax_orphans NULL
-#define proc_ipc_callback_dointvec NULL
+#define proc_ipc_callback_dointvec_minmax NULL
#define proc_ipcauto_dointvec_minmax NULL
#endif
static int zero;
static int one = 1;
-#ifdef CONFIG_CHECKPOINT_RESTORE
static int int_max = INT_MAX;
-#endif
static struct ctl_table ipc_kern_table[] = {
{
@@ -198,21 +196,27 @@ static struct ctl_table ipc_kern_table[] = {
.data = &init_ipc_ns.msg_ctlmax,
.maxlen = sizeof (init_ipc_ns.msg_ctlmax),
.mode = 0644,
- .proc_handler = proc_ipc_dointvec,
+ .proc_handler = proc_ipc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &int_max,
},
{
.procname = "msgmni",
.data = &init_ipc_ns.msg_ctlmni,
.maxlen = sizeof (init_ipc_ns.msg_ctlmni),
.mode = 0644,
- .proc_handler = proc_ipc_callback_dointvec,
+ .proc_handler = proc_ipc_callback_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &int_max,
},
{
.procname = "msgmnb",
.data = &init_ipc_ns.msg_ctlmnb,
.maxlen = sizeof (init_ipc_ns.msg_ctlmnb),
.mode = 0644,
- .proc_handler = proc_ipc_dointvec,
+ .proc_handler = proc_ipc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &int_max,
},
{
.procname = "sem",
diff --git a/kernel/Makefile b/kernel/Makefile
index 1ce47553fb02..9a52eb5bf689 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -6,9 +6,9 @@ obj-y = fork.o exec_domain.o panic.o \
cpu.o exit.o itimer.o time.o softirq.o resource.o \
sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
- rcupdate.o extable.o params.o posix-timers.o \
+ extable.o params.o posix-timers.o \
kthread.o wait.o sys_ni.o posix-cpu-timers.o mutex.o \
- hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
+ hrtimer.o rwsem.o nsproxy.o semaphore.o \
notifier.o ksysfs.o cred.o reboot.o \
async.o range.o groups.o lglock.o smpboot.o
@@ -27,6 +27,7 @@ obj-y += power/
obj-y += printk/
obj-y += cpu/
obj-y += irq/
+obj-y += rcu/
obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
obj-$(CONFIG_FREEZER) += freezer.o
@@ -54,8 +55,9 @@ obj-$(CONFIG_SMP) += spinlock.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
obj-$(CONFIG_UID16) += uid16.o
+obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o
obj-$(CONFIG_MODULES) += module.o
-obj-$(CONFIG_MODULE_SIG) += module_signing.o modsign_pubkey.o modsign_certificate.o
+obj-$(CONFIG_MODULE_SIG) += module_signing.o
obj-$(CONFIG_KALLSYMS) += kallsyms.o
obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o
obj-$(CONFIG_KEXEC) += kexec.o
@@ -81,12 +83,6 @@ obj-$(CONFIG_KGDB) += debug/
obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o
obj-$(CONFIG_SECCOMP) += seccomp.o
-obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
-obj-$(CONFIG_TREE_RCU) += rcutree.o
-obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o
-obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
-obj-$(CONFIG_TINY_RCU) += rcutiny.o
-obj-$(CONFIG_TINY_PREEMPT_RCU) += rcutiny.o
obj-$(CONFIG_RELAY) += relay.o
obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
@@ -141,19 +137,52 @@ targets += timeconst.h
$(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE
$(call if_changed,bc)
-ifeq ($(CONFIG_MODULE_SIG),y)
+###############################################################################
+#
+# Roll all the X.509 certificates that we can find together and pull them into
+# the kernel so that they get loaded into the system trusted keyring during
+# boot.
#
-# Pull the signing certificate and any extra certificates into the kernel
+# We look in the source root and the build root for all files whose name ends
+# in ".x509". Unfortunately, this will generate duplicate filenames, so we
+# have make canonicalise the pathnames and then sort them to discard the
+# duplicates.
#
+###############################################################################
+ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y)
+X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509)
+X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += signing_key.x509
+X509_CERTIFICATES := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \
+ $(or $(realpath $(CERT)),$(CERT))))
+
+ifeq ($(X509_CERTIFICATES),)
+$(warning *** No X.509 certificates found ***)
+endif
+
+ifneq ($(wildcard $(obj)/.x509.list),)
+ifneq ($(shell cat $(obj)/.x509.list),$(X509_CERTIFICATES))
+$(info X.509 certificate list changed)
+$(shell rm $(obj)/.x509.list)
+endif
+endif
+
+kernel/system_certificates.o: $(obj)/x509_certificate_list
-quiet_cmd_touch = TOUCH $@
- cmd_touch = touch $@
+quiet_cmd_x509certs = CERTS $@
+ cmd_x509certs = cat $(X509_CERTIFICATES) /dev/null >$@ $(foreach X509,$(X509_CERTIFICATES),; echo " - Including cert $(X509)")
-extra_certificates:
- $(call cmd,touch)
+targets += $(obj)/x509_certificate_list
+$(obj)/x509_certificate_list: $(X509_CERTIFICATES) $(obj)/.x509.list
+ $(call if_changed,x509certs)
-kernel/modsign_certificate.o: signing_key.x509 extra_certificates
+targets += $(obj)/.x509.list
+$(obj)/.x509.list:
+ @echo $(X509_CERTIFICATES) >$@
+clean-files := x509_certificate_list .x509.list
+endif
+
+ifeq ($(CONFIG_MODULE_SIG),y)
###############################################################################
#
# If module signing is requested, say by allyesconfig, but a key has not been
diff --git a/kernel/bounds.c b/kernel/bounds.c
index 0c9b862292b2..e8ca97b5c386 100644
--- a/kernel/bounds.c
+++ b/kernel/bounds.c
@@ -10,6 +10,7 @@
#include <linux/mmzone.h>
#include <linux/kbuild.h>
#include <linux/page_cgroup.h>
+#include <linux/log2.h>
void foo(void)
{
@@ -17,5 +18,8 @@ void foo(void)
DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
DEFINE(NR_PCG_FLAGS, __NR_PCG_FLAGS);
+#ifdef CONFIG_SMP
+ DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
+#endif
/* End of constants */
}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 2418b6e71a85..e0839bcd48c8 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -125,38 +125,6 @@ struct cfent {
};
/*
- * CSS ID -- ID per subsys's Cgroup Subsys State(CSS). used only when
- * cgroup_subsys->use_id != 0.
- */
-#define CSS_ID_MAX (65535)
-struct css_id {
- /*
- * The css to which this ID points. This pointer is set to valid value
- * after cgroup is populated. If cgroup is removed, this will be NULL.
- * This pointer is expected to be RCU-safe because destroy()
- * is called after synchronize_rcu(). But for safe use, css_tryget()
- * should be used for avoiding race.
- */
- struct cgroup_subsys_state __rcu *css;
- /*
- * ID of this css.
- */
- unsigned short id;
- /*
- * Depth in hierarchy which this ID belongs to.
- */
- unsigned short depth;
- /*
- * ID is freed by RCU. (and lookup routine is RCU safe.)
- */
- struct rcu_head rcu_head;
- /*
- * Hierarchy of CSS ID belongs to.
- */
- unsigned short stack[0]; /* Array of Length (depth+1) */
-};
-
-/*
* cgroup_event represents events which userspace want to receive.
*/
struct cgroup_event {
@@ -387,9 +355,6 @@ struct cgrp_cset_link {
static struct css_set init_css_set;
static struct cgrp_cset_link init_cgrp_cset_link;
-static int cgroup_init_idr(struct cgroup_subsys *ss,
- struct cgroup_subsys_state *css);
-
/*
* css_set_lock protects the list of css_set objects, and the chain of
* tasks off each css_set. Nests outside task->alloc_lock due to
@@ -841,8 +806,6 @@ static struct backing_dev_info cgroup_backing_dev_info = {
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
-static int alloc_css_id(struct cgroup_subsys_state *child_css);
-
static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb)
{
struct inode *inode = new_inode(sb);
@@ -2039,7 +2002,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
/* @tsk either already exited or can't exit until the end */
if (tsk->flags & PF_EXITING)
- continue;
+ goto next;
/* as per above, nr_threads may decrease, but not increase. */
BUG_ON(i >= group_size);
@@ -2047,7 +2010,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
ent.cgrp = task_cgroup_from_root(tsk, root);
/* nothing to do if this task is already in the cgroup */
if (ent.cgrp == cgrp)
- continue;
+ goto next;
/*
* saying GFP_ATOMIC has no effect here because we did prealloc
* earlier, but it's good form to communicate our expectations.
@@ -2055,7 +2018,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
BUG_ON(retval != 0);
i++;
-
+ next:
if (!threadgroup)
break;
} while_each_thread(leader, tsk);
@@ -3188,11 +3151,9 @@ css_next_descendant_post(struct cgroup_subsys_state *pos,
WARN_ON_ONCE(!rcu_read_lock_held());
- /* if first iteration, visit the leftmost descendant */
- if (!pos) {
- next = css_leftmost_descendant(root);
- return next != root ? next : NULL;
- }
+ /* if first iteration, visit leftmost descendant which may be @root */
+ if (!pos)
+ return css_leftmost_descendant(root);
/* if we visited @root, we're done */
if (pos == root)
@@ -4242,21 +4203,6 @@ static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask)
goto err;
}
}
-
- /* This cgroup is ready now */
- for_each_root_subsys(cgrp->root, ss) {
- struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
- struct css_id *id = rcu_dereference_protected(css->id, true);
-
- /*
- * Update id->css pointer and make this css visible from
- * CSS ID functions. This pointer will be dereferened
- * from RCU-read-side without locks.
- */
- if (id)
- rcu_assign_pointer(id->css, css);
- }
-
return 0;
err:
cgroup_clear_dir(cgrp, subsys_mask);
@@ -4325,7 +4271,6 @@ static void init_css(struct cgroup_subsys_state *css, struct cgroup_subsys *ss,
css->cgroup = cgrp;
css->ss = ss;
css->flags = 0;
- css->id = NULL;
if (cgrp->parent)
css->parent = cgroup_css(cgrp->parent, ss);
@@ -4457,12 +4402,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
goto err_free_all;
init_css(css, ss, cgrp);
-
- if (ss->use_id) {
- err = alloc_css_id(css);
- if (err)
- goto err_free_all;
- }
}
/*
@@ -4927,12 +4866,6 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
/* our new subsystem will be attached to the dummy hierarchy. */
init_css(css, ss, cgroup_dummy_top);
- /* init_idr must be after init_css() because it sets css->id. */
- if (ss->use_id) {
- ret = cgroup_init_idr(ss, css);
- if (ret)
- goto err_unload;
- }
/*
* Now we need to entangle the css into the existing css_sets. unlike
@@ -4998,9 +4931,6 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
offline_css(cgroup_css(cgroup_dummy_top, ss));
- if (ss->use_id)
- idr_destroy(&ss->idr);
-
/* deassign the subsys_id */
cgroup_subsys[ss->subsys_id] = NULL;
@@ -5027,8 +4957,7 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
/*
* remove subsystem's css from the cgroup_dummy_top and free it -
* need to free before marking as null because ss->css_free needs
- * the cgrp->subsys pointer to find their state. note that this
- * also takes care of freeing the css_id.
+ * the cgrp->subsys pointer to find their state.
*/
ss->css_free(cgroup_css(cgroup_dummy_top, ss));
RCU_INIT_POINTER(cgroup_dummy_top->subsys[ss->subsys_id], NULL);
@@ -5099,8 +5028,6 @@ int __init cgroup_init(void)
for_each_builtin_subsys(ss, i) {
if (!ss->early_init)
cgroup_init_subsys(ss);
- if (ss->use_id)
- cgroup_init_idr(ss, init_css_set.subsys[ss->subsys_id]);
}
/* allocate id for the dummy hierarchy */
@@ -5520,181 +5447,6 @@ static int __init cgroup_disable(char *str)
}
__setup("cgroup_disable=", cgroup_disable);
-/*
- * Functons for CSS ID.
- */
-
-/* to get ID other than 0, this should be called when !cgroup_is_dead() */
-unsigned short css_id(struct cgroup_subsys_state *css)
-{
- struct css_id *cssid;
-
- /*
- * This css_id() can return correct value when somone has refcnt
- * on this or this is under rcu_read_lock(). Once css->id is allocated,
- * it's unchanged until freed.
- */
- cssid = rcu_dereference_raw(css->id);
-
- if (cssid)
- return cssid->id;
- return 0;
-}
-EXPORT_SYMBOL_GPL(css_id);
-
-/**
- * css_is_ancestor - test "root" css is an ancestor of "child"
- * @child: the css to be tested.
- * @root: the css supporsed to be an ancestor of the child.
- *
- * Returns true if "root" is an ancestor of "child" in its hierarchy. Because
- * this function reads css->id, the caller must hold rcu_read_lock().
- * But, considering usual usage, the csses should be valid objects after test.
- * Assuming that the caller will do some action to the child if this returns
- * returns true, the caller must take "child";s reference count.
- * If "child" is valid object and this returns true, "root" is valid, too.
- */
-
-bool css_is_ancestor(struct cgroup_subsys_state *child,
- const struct cgroup_subsys_state *root)
-{
- struct css_id *child_id;
- struct css_id *root_id;
-
- child_id = rcu_dereference(child->id);
- if (!child_id)
- return false;
- root_id = rcu_dereference(root->id);
- if (!root_id)
- return false;
- if (child_id->depth < root_id->depth)
- return false;
- if (child_id->stack[root_id->depth] != root_id->id)
- return false;
- return true;
-}
-
-void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
-{
- struct css_id *id = rcu_dereference_protected(css->id, true);
-
- /* When this is called before css_id initialization, id can be NULL */
- if (!id)
- return;
-
- BUG_ON(!ss->use_id);
-
- rcu_assign_pointer(id->css, NULL);
- rcu_assign_pointer(css->id, NULL);
- spin_lock(&ss->id_lock);
- idr_remove(&ss->idr, id->id);
- spin_unlock(&ss->id_lock);
- kfree_rcu(id, rcu_head);
-}
-EXPORT_SYMBOL_GPL(free_css_id);
-
-/*
- * This is called by init or create(). Then, calls to this function are
- * always serialized (By cgroup_mutex() at create()).
- */
-
-static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
-{
- struct css_id *newid;
- int ret, size;
-
- BUG_ON(!ss->use_id);
-
- size = sizeof(*newid) + sizeof(unsigned short) * (depth + 1);
- newid = kzalloc(size, GFP_KERNEL);
- if (!newid)
- return ERR_PTR(-ENOMEM);
-
- idr_preload(GFP_KERNEL);
- spin_lock(&ss->id_lock);
- /* Don't use 0. allocates an ID of 1-65535 */
- ret = idr_alloc(&ss->idr, newid, 1, CSS_ID_MAX + 1, GFP_NOWAIT);
- spin_unlock(&ss->id_lock);
- idr_preload_end();
-
- /* Returns error when there are no free spaces for new ID.*/
- if (ret < 0)
- goto err_out;
-
- newid->id = ret;
- newid->depth = depth;
- return newid;
-err_out:
- kfree(newid);
- return ERR_PTR(ret);
-
-}
-
-static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
- struct cgroup_subsys_state *rootcss)
-{
- struct css_id *newid;
-
- spin_lock_init(&ss->id_lock);
- idr_init(&ss->idr);
-
- newid = get_new_cssid(ss, 0);
- if (IS_ERR(newid))
- return PTR_ERR(newid);
-
- newid->stack[0] = newid->id;
- RCU_INIT_POINTER(newid->css, rootcss);
- RCU_INIT_POINTER(rootcss->id, newid);
- return 0;
-}
-
-static int alloc_css_id(struct cgroup_subsys_state *child_css)
-{
- struct cgroup_subsys_state *parent_css = css_parent(child_css);
- struct css_id *child_id, *parent_id;
- int i, depth;
-
- parent_id = rcu_dereference_protected(parent_css->id, true);
- depth = parent_id->depth + 1;
-
- child_id = get_new_cssid(child_css->ss, depth);
- if (IS_ERR(child_id))
- return PTR_ERR(child_id);
-
- for (i = 0; i < depth; i++)
- child_id->stack[i] = parent_id->stack[i];
- child_id->stack[depth] = child_id->id;
- /*
- * child_id->css pointer will be set after this cgroup is available
- * see cgroup_populate_dir()
- */
- rcu_assign_pointer(child_css->id, child_id);
-
- return 0;
-}
-
-/**
- * css_lookup - lookup css by id
- * @ss: cgroup subsys to be looked into.
- * @id: the id
- *
- * Returns pointer to cgroup_subsys_state if there is valid one with id.
- * NULL if not. Should be called under rcu_read_lock()
- */
-struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id)
-{
- struct css_id *cssid = NULL;
-
- BUG_ON(!ss->use_id);
- cssid = idr_find(&ss->idr, id);
-
- if (unlikely(!cssid))
- return NULL;
-
- return rcu_dereference(cssid->css);
-}
-EXPORT_SYMBOL_GPL(css_lookup);
-
/**
* css_from_dir - get corresponding css from the dentry of a cgroup dir
* @dentry: directory dentry of interest
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 859c8dfd78a1..e5f3917aa05b 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -120,7 +120,7 @@ void context_tracking_user_enter(void)
* instead of preempt_schedule() to exit user context if needed before
* calling the scheduler.
*/
-void __sched notrace preempt_schedule_context(void)
+asmlinkage void __sched notrace preempt_schedule_context(void)
{
enum ctx_state prev_ctx;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index d7f07a2da5a6..63aa50d7ce1e 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -308,6 +308,23 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
}
smpboot_park_threads(cpu);
+ /*
+ * By now we've cleared cpu_active_mask, wait for all preempt-disabled
+ * and RCU users of this state to go away such that all new such users
+ * will observe it.
+ *
+ * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
+ * not imply sync_sched(), so explicitly call both.
+ */
+#ifdef CONFIG_PREEMPT
+ synchronize_sched();
+#endif
+ synchronize_rcu();
+
+ /*
+ * So now all preempt/rcu users must observe !cpu_active().
+ */
+
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
if (err) {
/* CPU didn't die: tell everyone. Can't complain. */
diff --git a/kernel/cpu/idle.c b/kernel/cpu/idle.c
index e695c0a0bcb5..988573a9a387 100644
--- a/kernel/cpu/idle.c
+++ b/kernel/cpu/idle.c
@@ -44,7 +44,7 @@ static inline int cpu_idle_poll(void)
rcu_idle_enter();
trace_cpu_idle_rcuidle(0, smp_processor_id());
local_irq_enable();
- while (!need_resched())
+ while (!tif_need_resched())
cpu_relax();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
rcu_idle_exit();
@@ -92,8 +92,7 @@ static void cpu_idle_loop(void)
if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
cpu_idle_poll();
} else {
- current_clr_polling();
- if (!need_resched()) {
+ if (!current_clr_polling_and_test()) {
stop_critical_timings();
rcu_idle_enter();
arch_cpu_idle();
@@ -103,9 +102,16 @@ static void cpu_idle_loop(void)
} else {
local_irq_enable();
}
- current_set_polling();
+ __current_set_polling();
}
arch_cpu_idle_exit();
+ /*
+ * We need to test and propagate the TIF_NEED_RESCHED
+ * bit here because we might not have send the
+ * reschedule IPI to idle tasks.
+ */
+ if (tif_need_resched())
+ set_preempt_need_resched();
}
tick_nohz_idle_exit();
schedule_preempt_disabled();
@@ -129,7 +135,7 @@ void cpu_startup_entry(enum cpuhp_state state)
*/
boot_init_stack_canary();
#endif
- current_set_polling();
+ __current_set_polling();
arch_cpu_idle_prepare();
cpu_idle_loop();
}
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 0506d447aed2..7d2f35e5df2f 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -575,8 +575,12 @@ return_normal:
raw_spin_lock(&dbg_slave_lock);
#ifdef CONFIG_SMP
+ /* If send_ready set, slaves are already waiting */
+ if (ks->send_ready)
+ atomic_set(ks->send_ready, 1);
+
/* Signal the other CPUs to enter kgdb_wait() */
- if ((!kgdb_single_step) && kgdb_do_roundup)
+ else if ((!kgdb_single_step) && kgdb_do_roundup)
kgdb_roundup_cpus(flags);
#endif
@@ -678,11 +682,11 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
if (arch_kgdb_ops.enable_nmi)
arch_kgdb_ops.enable_nmi(0);
+ memset(ks, 0, sizeof(struct kgdb_state));
ks->cpu = raw_smp_processor_id();
ks->ex_vector = evector;
ks->signo = signo;
ks->err_code = ecode;
- ks->kgdb_usethreadid = 0;
ks->linux_regs = regs;
if (kgdb_reenter_check(ks))
@@ -732,6 +736,30 @@ int kgdb_nmicallback(int cpu, void *regs)
return 1;
}
+int kgdb_nmicallin(int cpu, int trapnr, void *regs, atomic_t *send_ready)
+{
+#ifdef CONFIG_SMP
+ if (!kgdb_io_ready(0) || !send_ready)
+ return 1;
+
+ if (kgdb_info[cpu].enter_kgdb == 0) {
+ struct kgdb_state kgdb_var;
+ struct kgdb_state *ks = &kgdb_var;
+
+ memset(ks, 0, sizeof(struct kgdb_state));
+ ks->cpu = cpu;
+ ks->ex_vector = trapnr;
+ ks->signo = SIGTRAP;
+ ks->err_code = KGDB_KDB_REASON_SYSTEM_NMI;
+ ks->linux_regs = regs;
+ ks->send_ready = send_ready;
+ kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
+ return 0;
+ }
+#endif
+ return 1;
+}
+
static void kgdb_console_write(struct console *co, const char *s,
unsigned count)
{
diff --git a/kernel/debug/debug_core.h b/kernel/debug/debug_core.h
index 2235967e78b0..572aa4f5677c 100644
--- a/kernel/debug/debug_core.h
+++ b/kernel/debug/debug_core.h
@@ -26,6 +26,7 @@ struct kgdb_state {
unsigned long threadid;
long kgdb_usethreadid;
struct pt_regs *linux_regs;
+ atomic_t *send_ready;
};
/* Exception state values */
@@ -74,11 +75,13 @@ extern int kdb_stub(struct kgdb_state *ks);
extern int kdb_parse(const char *cmdstr);
extern int kdb_common_init_state(struct kgdb_state *ks);
extern int kdb_common_deinit_state(void);
+#define KGDB_KDB_REASON_SYSTEM_NMI KDB_REASON_SYSTEM_NMI
#else /* ! CONFIG_KGDB_KDB */
static inline int kdb_stub(struct kgdb_state *ks)
{
return DBG_PASS_EVENT;
}
+#define KGDB_KDB_REASON_SYSTEM_NMI 0
#endif /* CONFIG_KGDB_KDB */
#endif /* _DEBUG_CORE_H_ */
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c
index 328d18ef31e4..8859ca34dcfe 100644
--- a/kernel/debug/kdb/kdb_debugger.c
+++ b/kernel/debug/kdb/kdb_debugger.c
@@ -69,7 +69,10 @@ int kdb_stub(struct kgdb_state *ks)
if (atomic_read(&kgdb_setting_breakpoint))
reason = KDB_REASON_KEYBOARD;
- if (in_nmi())
+ if (ks->err_code == KDB_REASON_SYSTEM_NMI && ks->signo == SIGTRAP)
+ reason = KDB_REASON_SYSTEM_NMI;
+
+ else if (in_nmi())
reason = KDB_REASON_NMI;
for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) {
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 00eb8f7fbf41..0b097c8a1e50 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -1200,6 +1200,9 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
instruction_pointer(regs));
kdb_dumpregs(regs);
break;
+ case KDB_REASON_SYSTEM_NMI:
+ kdb_printf("due to System NonMaskable Interrupt\n");
+ break;
case KDB_REASON_NMI:
kdb_printf("due to NonMaskable Interrupt @ "
kdb_machreg_fmt "\n",
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d49a9d29334c..65f8ec97285c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -175,8 +175,8 @@ int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
-static atomic_t perf_sample_allowed_ns __read_mostly =
- ATOMIC_INIT( DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100);
+static int perf_sample_allowed_ns __read_mostly =
+ DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
void update_perf_cpu_limits(void)
{
@@ -184,7 +184,7 @@ void update_perf_cpu_limits(void)
tmp *= sysctl_perf_cpu_time_max_percent;
do_div(tmp, 100);
- atomic_set(&perf_sample_allowed_ns, tmp);
+ ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
}
static int perf_rotate_context(struct perf_cpu_context *cpuctx);
@@ -193,7 +193,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
- int ret = proc_dointvec(table, write, buffer, lenp, ppos);
+ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret || !write)
return ret;
@@ -228,14 +228,15 @@ int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
* we detect that events are taking too long.
*/
#define NR_ACCUMULATED_SAMPLES 128
-DEFINE_PER_CPU(u64, running_sample_length);
+static DEFINE_PER_CPU(u64, running_sample_length);
void perf_sample_event_took(u64 sample_len_ns)
{
u64 avg_local_sample_len;
u64 local_samples_len;
+ u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
- if (atomic_read(&perf_sample_allowed_ns) == 0)
+ if (allowed_ns == 0)
return;
/* decay the counter by 1 average sample */
@@ -251,7 +252,7 @@ void perf_sample_event_took(u64 sample_len_ns)
*/
avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
- if (avg_local_sample_len <= atomic_read(&perf_sample_allowed_ns))
+ if (avg_local_sample_len <= allowed_ns)
return;
if (max_samples_per_tick <= 1)
@@ -262,10 +263,9 @@ void perf_sample_event_took(u64 sample_len_ns)
perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
printk_ratelimited(KERN_WARNING
- "perf samples too long (%lld > %d), lowering "
+ "perf samples too long (%lld > %lld), lowering "
"kernel.perf_event_max_sample_rate to %d\n",
- avg_local_sample_len,
- atomic_read(&perf_sample_allowed_ns),
+ avg_local_sample_len, allowed_ns,
sysctl_perf_event_sample_rate);
update_perf_cpu_limits();
@@ -899,6 +899,7 @@ static void unclone_ctx(struct perf_event_context *ctx)
put_ctx(ctx->parent_ctx);
ctx->parent_ctx = NULL;
}
+ ctx->generation++;
}
static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
@@ -1136,6 +1137,8 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
ctx->nr_events++;
if (event->attr.inherit_stat)
ctx->nr_stat++;
+
+ ctx->generation++;
}
/*
@@ -1201,6 +1204,9 @@ static void perf_event__header_size(struct perf_event *event)
if (sample_type & PERF_SAMPLE_DATA_SRC)
size += sizeof(data->data_src.val);
+ if (sample_type & PERF_SAMPLE_TRANSACTION)
+ size += sizeof(data->txn);
+
event->header_size = size;
}
@@ -1310,6 +1316,8 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
*/
if (event->state > PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_OFF;
+
+ ctx->generation++;
}
static void perf_group_detach(struct perf_event *event)
@@ -2146,22 +2154,38 @@ static void ctx_sched_out(struct perf_event_context *ctx,
}
/*
- * Test whether two contexts are equivalent, i.e. whether they
- * have both been cloned from the same version of the same context
- * and they both have the same number of enabled events.
- * If the number of enabled events is the same, then the set
- * of enabled events should be the same, because these are both
- * inherited contexts, therefore we can't access individual events
- * in them directly with an fd; we can only enable/disable all
- * events via prctl, or enable/disable all events in a family
- * via ioctl, which will have the same effect on both contexts.
+ * Test whether two contexts are equivalent, i.e. whether they have both been
+ * cloned from the same version of the same context.
+ *
+ * Equivalence is measured using a generation number in the context that is
+ * incremented on each modification to it; see unclone_ctx(), list_add_event()
+ * and list_del_event().
*/
static int context_equiv(struct perf_event_context *ctx1,
struct perf_event_context *ctx2)
{
- return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
- && ctx1->parent_gen == ctx2->parent_gen
- && !ctx1->pin_count && !ctx2->pin_count;
+ /* Pinning disables the swap optimization */
+ if (ctx1->pin_count || ctx2->pin_count)
+ return 0;
+
+ /* If ctx1 is the parent of ctx2 */
+ if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
+ return 1;
+
+ /* If ctx2 is the parent of ctx1 */
+ if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
+ return 1;
+
+ /*
+ * If ctx1 and ctx2 have the same parent; we flatten the parent
+ * hierarchy, see perf_event_init_context().
+ */
+ if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
+ ctx1->parent_gen == ctx2->parent_gen)
+ return 1;
+
+ /* Unmatched */
+ return 0;
}
static void __perf_event_sync_stat(struct perf_event *event,
@@ -2244,7 +2268,7 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
{
struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
struct perf_event_context *next_ctx;
- struct perf_event_context *parent;
+ struct perf_event_context *parent, *next_parent;
struct perf_cpu_context *cpuctx;
int do_switch = 1;
@@ -2256,10 +2280,18 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
return;
rcu_read_lock();
- parent = rcu_dereference(ctx->parent_ctx);
next_ctx = next->perf_event_ctxp[ctxn];
- if (parent && next_ctx &&
- rcu_dereference(next_ctx->parent_ctx) == parent) {
+ if (!next_ctx)
+ goto unlock;
+
+ parent = rcu_dereference(ctx->parent_ctx);
+ next_parent = rcu_dereference(next_ctx->parent_ctx);
+
+ /* If neither context have a parent context; they cannot be clones. */
+ if (!parent && !next_parent)
+ goto unlock;
+
+ if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
/*
* Looks like the two contexts are clones, so we might be
* able to optimize the context switch. We lock both
@@ -2287,6 +2319,7 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
raw_spin_unlock(&next_ctx->lock);
raw_spin_unlock(&ctx->lock);
}
+unlock:
rcu_read_unlock();
if (do_switch) {
@@ -4572,6 +4605,9 @@ void perf_output_sample(struct perf_output_handle *handle,
if (sample_type & PERF_SAMPLE_DATA_SRC)
perf_output_put(handle, data->data_src.val);
+ if (sample_type & PERF_SAMPLE_TRANSACTION)
+ perf_output_put(handle, data->txn);
+
if (!event->attr.watermark) {
int wakeup_events = event->attr.wakeup_events;
@@ -5100,24 +5136,23 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
unsigned int size;
char tmp[16];
char *buf = NULL;
- const char *name;
-
- memset(tmp, 0, sizeof(tmp));
+ char *name;
if (file) {
struct inode *inode;
dev_t dev;
- /*
- * d_path works from the end of the rb backwards, so we
- * need to add enough zero bytes after the string to handle
- * the 64bit alignment we do later.
- */
- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
+
+ buf = kmalloc(PATH_MAX, GFP_KERNEL);
if (!buf) {
name = strncpy(tmp, "//enomem", sizeof(tmp));
goto got_name;
}
- name = d_path(&file->f_path, buf, PATH_MAX);
+ /*
+ * d_path() works from the end of the rb backwards, so we
+ * need to add enough zero bytes after the string to handle
+ * the 64bit alignment we do later.
+ */
+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
if (IS_ERR(name)) {
name = strncpy(tmp, "//toolong", sizeof(tmp));
goto got_name;
@@ -5130,21 +5165,19 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
min = MINOR(dev);
} else {
- if (arch_vma_name(mmap_event->vma)) {
- name = strncpy(tmp, arch_vma_name(mmap_event->vma),
- sizeof(tmp) - 1);
+ name = (char *)arch_vma_name(vma);
+ if (name) {
+ name = strncpy(tmp, name, sizeof(tmp) - 1);
tmp[sizeof(tmp) - 1] = '\0';
goto got_name;
}
- if (!vma->vm_mm) {
- name = strncpy(tmp, "[vdso]", sizeof(tmp));
- goto got_name;
- } else if (vma->vm_start <= vma->vm_mm->start_brk &&
+ if (vma->vm_start <= vma->vm_mm->start_brk &&
vma->vm_end >= vma->vm_mm->brk) {
name = strncpy(tmp, "[heap]", sizeof(tmp));
goto got_name;
- } else if (vma->vm_start <= vma->vm_mm->start_stack &&
+ }
+ if (vma->vm_start <= vma->vm_mm->start_stack &&
vma->vm_end >= vma->vm_mm->start_stack) {
name = strncpy(tmp, "[stack]", sizeof(tmp));
goto got_name;
@@ -5155,7 +5188,14 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
}
got_name:
- size = ALIGN(strlen(name)+1, sizeof(u64));
+ /*
+ * Since our buffer works in 8 byte units we need to align our string
+ * size to a multiple of 8. However, we must guarantee the tail end is
+ * zero'd out to avoid leaking random bits to userspace.
+ */
+ size = strlen(name)+1;
+ while (!IS_ALIGNED(size, sizeof(u64)))
+ name[size++] = '\0';
mmap_event->file_name = name;
mmap_event->file_size = size;
@@ -6292,6 +6332,7 @@ type_show(struct device *dev, struct device_attribute *attr, char *page)
return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
}
+static DEVICE_ATTR_RO(type);
static ssize_t
perf_event_mux_interval_ms_show(struct device *dev,
@@ -6336,17 +6377,19 @@ perf_event_mux_interval_ms_store(struct device *dev,
return count;
}
+static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
-static struct device_attribute pmu_dev_attrs[] = {
- __ATTR_RO(type),
- __ATTR_RW(perf_event_mux_interval_ms),
- __ATTR_NULL,
+static struct attribute *pmu_dev_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_perf_event_mux_interval_ms.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(pmu_dev);
static int pmu_bus_running;
static struct bus_type pmu_bus = {
.name = "event_source",
- .dev_attrs = pmu_dev_attrs,
+ .dev_groups = pmu_dev_groups,
};
static void pmu_dev_release(struct device *dev)
@@ -6767,6 +6810,10 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
if (ret)
return -EFAULT;
+ /* disabled for now */
+ if (attr->mmap2)
+ return -EINVAL;
+
if (attr->__reserved_1)
return -EINVAL;
@@ -7122,7 +7169,6 @@ SYSCALL_DEFINE5(perf_event_open,
}
perf_install_in_context(ctx, event, event->cpu);
- ++ctx->generation;
perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
@@ -7205,7 +7251,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
perf_install_in_context(ctx, event, cpu);
- ++ctx->generation;
perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index cd55144270b5..9c2ddfbf4525 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -87,10 +87,31 @@ again:
goto out;
/*
- * Publish the known good head. Rely on the full barrier implied
- * by atomic_dec_and_test() order the rb->head read and this
- * write.
+ * Since the mmap() consumer (userspace) can run on a different CPU:
+ *
+ * kernel user
+ *
+ * READ ->data_tail READ ->data_head
+ * smp_mb() (A) smp_rmb() (C)
+ * WRITE $data READ $data
+ * smp_wmb() (B) smp_mb() (D)
+ * STORE ->data_head WRITE ->data_tail
+ *
+ * Where A pairs with D, and B pairs with C.
+ *
+ * I don't think A needs to be a full barrier because we won't in fact
+ * write data until we see the store from userspace. So we simply don't
+ * issue the data WRITE until we observe it. Be conservative for now.
+ *
+ * OTOH, D needs to be a full barrier since it separates the data READ
+ * from the tail WRITE.
+ *
+ * For B a WMB is sufficient since it separates two WRITEs, and for C
+ * an RMB is sufficient since it separates two READs.
+ *
+ * See perf_output_begin().
*/
+ smp_wmb();
rb->user_page->data_head = head;
/*
@@ -154,9 +175,11 @@ int perf_output_begin(struct perf_output_handle *handle,
* Userspace could choose to issue a mb() before updating the
* tail pointer. So that all reads will be completed before the
* write is issued.
+ *
+ * See perf_output_put_handle().
*/
tail = ACCESS_ONCE(rb->user_page->data_tail);
- smp_rmb();
+ smp_mb();
offset = head = local_read(&rb->head);
head += size;
if (unlikely(!perf_output_space(rb, tail, offset, head)))
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index ad8e1bdca70e..ae9e1d2ef256 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -35,6 +35,7 @@
#include <linux/kdebug.h> /* notifier mechanism */
#include "../../mm/internal.h" /* munlock_vma_page */
#include <linux/percpu-rwsem.h>
+#include <linux/task_work.h>
#include <linux/uprobes.h>
@@ -1096,21 +1097,22 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
}
/* Slot allocation for XOL */
-static int xol_add_vma(struct xol_area *area)
+static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
{
- struct mm_struct *mm = current->mm;
int ret = -EALREADY;
down_write(&mm->mmap_sem);
if (mm->uprobes_state.xol_area)
goto fail;
- ret = -ENOMEM;
- /* Try to map as high as possible, this is only a hint. */
- area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0);
- if (area->vaddr & ~PAGE_MASK) {
- ret = area->vaddr;
- goto fail;
+ if (!area->vaddr) {
+ /* Try to map as high as possible, this is only a hint. */
+ area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
+ PAGE_SIZE, 0, 0);
+ if (area->vaddr & ~PAGE_MASK) {
+ ret = area->vaddr;
+ goto fail;
+ }
}
ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE,
@@ -1120,30 +1122,19 @@ static int xol_add_vma(struct xol_area *area)
smp_wmb(); /* pairs with get_xol_area() */
mm->uprobes_state.xol_area = area;
- ret = 0;
fail:
up_write(&mm->mmap_sem);
return ret;
}
-/*
- * get_xol_area - Allocate process's xol_area if necessary.
- * This area will be used for storing instructions for execution out of line.
- *
- * Returns the allocated area or NULL.
- */
-static struct xol_area *get_xol_area(void)
+static struct xol_area *__create_xol_area(unsigned long vaddr)
{
struct mm_struct *mm = current->mm;
- struct xol_area *area;
uprobe_opcode_t insn = UPROBE_SWBP_INSN;
+ struct xol_area *area;
- area = mm->uprobes_state.xol_area;
- if (area)
- goto ret;
-
- area = kzalloc(sizeof(*area), GFP_KERNEL);
+ area = kmalloc(sizeof(*area), GFP_KERNEL);
if (unlikely(!area))
goto out;
@@ -1155,13 +1146,14 @@ static struct xol_area *get_xol_area(void)
if (!area->page)
goto free_bitmap;
- /* allocate first slot of task's xol_area for the return probes */
+ area->vaddr = vaddr;
+ init_waitqueue_head(&area->wq);
+ /* Reserve the 1st slot for get_trampoline_vaddr() */
set_bit(0, area->bitmap);
- copy_to_page(area->page, 0, &insn, UPROBE_SWBP_INSN_SIZE);
atomic_set(&area->slot_count, 1);
- init_waitqueue_head(&area->wq);
+ copy_to_page(area->page, 0, &insn, UPROBE_SWBP_INSN_SIZE);
- if (!xol_add_vma(area))
+ if (!xol_add_vma(mm, area))
return area;
__free_page(area->page);
@@ -1170,9 +1162,25 @@ static struct xol_area *get_xol_area(void)
free_area:
kfree(area);
out:
+ return NULL;
+}
+
+/*
+ * get_xol_area - Allocate process's xol_area if necessary.
+ * This area will be used for storing instructions for execution out of line.
+ *
+ * Returns the allocated area or NULL.
+ */
+static struct xol_area *get_xol_area(void)
+{
+ struct mm_struct *mm = current->mm;
+ struct xol_area *area;
+
+ if (!mm->uprobes_state.xol_area)
+ __create_xol_area(0);
+
area = mm->uprobes_state.xol_area;
- ret:
- smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */
+ smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */
return area;
}
@@ -1345,14 +1353,6 @@ void uprobe_free_utask(struct task_struct *t)
}
/*
- * Called in context of a new clone/fork from copy_process.
- */
-void uprobe_copy_process(struct task_struct *t)
-{
- t->utask = NULL;
-}
-
-/*
* Allocate a uprobe_task object for the task if if necessary.
* Called when the thread hits a breakpoint.
*
@@ -1367,6 +1367,90 @@ static struct uprobe_task *get_utask(void)
return current->utask;
}
+static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
+{
+ struct uprobe_task *n_utask;
+ struct return_instance **p, *o, *n;
+
+ n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
+ if (!n_utask)
+ return -ENOMEM;
+ t->utask = n_utask;
+
+ p = &n_utask->return_instances;
+ for (o = o_utask->return_instances; o; o = o->next) {
+ n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
+ if (!n)
+ return -ENOMEM;
+
+ *n = *o;
+ atomic_inc(&n->uprobe->ref);
+ n->next = NULL;
+
+ *p = n;
+ p = &n->next;
+ n_utask->depth++;
+ }
+
+ return 0;
+}
+
+static void uprobe_warn(struct task_struct *t, const char *msg)
+{
+ pr_warn("uprobe: %s:%d failed to %s\n",
+ current->comm, current->pid, msg);
+}
+
+static void dup_xol_work(struct callback_head *work)
+{
+ kfree(work);
+
+ if (current->flags & PF_EXITING)
+ return;
+
+ if (!__create_xol_area(current->utask->vaddr))
+ uprobe_warn(current, "dup xol area");
+}
+
+/*
+ * Called in context of a new clone/fork from copy_process.
+ */
+void uprobe_copy_process(struct task_struct *t, unsigned long flags)
+{
+ struct uprobe_task *utask = current->utask;
+ struct mm_struct *mm = current->mm;
+ struct callback_head *work;
+ struct xol_area *area;
+
+ t->utask = NULL;
+
+ if (!utask || !utask->return_instances)
+ return;
+
+ if (mm == t->mm && !(flags & CLONE_VFORK))
+ return;
+
+ if (dup_utask(t, utask))
+ return uprobe_warn(t, "dup ret instances");
+
+ /* The task can fork() after dup_xol_work() fails */
+ area = mm->uprobes_state.xol_area;
+ if (!area)
+ return uprobe_warn(t, "dup xol area");
+
+ if (mm == t->mm)
+ return;
+
+ /* TODO: move it into the union in uprobe_task */
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return uprobe_warn(t, "dup xol area");
+
+ utask->vaddr = area->vaddr;
+ init_task_work(work, dup_xol_work);
+ task_work_add(t, work, true);
+}
+
/*
* Current area->vaddr notion assume the trampoline address is always
* equal area->vaddr.
diff --git a/kernel/fork.c b/kernel/fork.c
index 086fe73ad6bd..f6d11fc67f72 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -817,9 +817,6 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
mm->pmd_huge_pte = NULL;
#endif
-#ifdef CONFIG_NUMA_BALANCING
- mm->first_nid = NUMA_PTE_SCAN_INIT;
-#endif
if (!mm_init(mm, tsk))
goto fail_nomem;
@@ -1313,7 +1310,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
#endif
/* Perform scheduler related setup. Assign this task to a CPU. */
- sched_fork(p);
+ sched_fork(clone_flags, p);
retval = perf_event_init_task(p);
if (retval)
@@ -1373,7 +1370,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
INIT_LIST_HEAD(&p->pi_state_list);
p->pi_state_cache = NULL;
#endif
- uprobe_copy_process(p);
/*
* sigaltstack should be cleared when sharing the same VM
*/
@@ -1490,6 +1486,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
perf_event_fork(p);
trace_task_newtask(p, clone_flags);
+ uprobe_copy_process(p, clone_flags);
return p;
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 3e97fb126e6b..8807061ca004 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -16,11 +16,12 @@
#include <linux/export.h>
#include <linux/sysctl.h>
#include <linux/utsname.h>
+#include <trace/events/sched.h>
/*
* The number of tasks checked:
*/
-unsigned long __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
+int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
/*
* Limit number of tasks checked in a batch.
@@ -92,6 +93,9 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
t->last_switch_count = switch_count;
return;
}
+
+ trace_sched_process_hang(t);
+
if (!sysctl_hung_task_warnings)
return;
sysctl_hung_task_warnings--;
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index a3bb14fbe5c6..dc04c166c54d 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -214,7 +214,7 @@ void irq_enable(struct irq_desc *desc)
}
/**
- * irq_disable - Mark interupt disabled
+ * irq_disable - Mark interrupt disabled
* @desc: irq descriptor which should be disabled
*
* If the chip does not implement the irq_disable callback, we
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 706724e9835d..cf68bb36fe58 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -465,27 +465,26 @@ int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
}
EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
-unsigned int irq_create_of_mapping(struct device_node *controller,
- const u32 *intspec, unsigned int intsize)
+unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
{
struct irq_domain *domain;
irq_hw_number_t hwirq;
unsigned int type = IRQ_TYPE_NONE;
unsigned int virq;
- domain = controller ? irq_find_host(controller) : irq_default_domain;
+ domain = irq_data->np ? irq_find_host(irq_data->np) : irq_default_domain;
if (!domain) {
pr_warn("no irq domain found for %s !\n",
- of_node_full_name(controller));
+ of_node_full_name(irq_data->np));
return 0;
}
/* If domain has no translation, then we assume interrupt line */
if (domain->ops->xlate == NULL)
- hwirq = intspec[0];
+ hwirq = irq_data->args[0];
else {
- if (domain->ops->xlate(domain, controller, intspec, intsize,
- &hwirq, &type))
+ if (domain->ops->xlate(domain, irq_data->np, irq_data->args,
+ irq_data->args_count, &hwirq, &type))
return 0;
}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 514bcfd855a8..481a13c43b17 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -786,7 +786,7 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
}
/*
- * Interrupts explicitely requested as threaded interupts want to be
+ * Interrupts explicitly requested as threaded interrupts want to be
* preemtible - many of them need to sleep and wait for slow busses to
* complete.
*/
@@ -956,7 +956,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
goto out_mput;
}
- sched_setscheduler(t, SCHED_FIFO, &param);
+ sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
/*
* We keep the reference to the task struct even if
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 297a9247a3b3..9019f15deab2 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -58,6 +58,7 @@ static void jump_label_update(struct static_key *key, int enable);
void static_key_slow_inc(struct static_key *key)
{
+ STATIC_KEY_CHECK_USE();
if (atomic_inc_not_zero(&key->enabled))
return;
@@ -103,12 +104,14 @@ static void jump_label_update_timeout(struct work_struct *work)
void static_key_slow_dec(struct static_key *key)
{
+ STATIC_KEY_CHECK_USE();
__static_key_slow_dec(key, 0, NULL);
}
EXPORT_SYMBOL_GPL(static_key_slow_dec);
void static_key_slow_dec_deferred(struct static_key_deferred *key)
{
+ STATIC_KEY_CHECK_USE();
__static_key_slow_dec(&key->key, key->timeout, &key->work);
}
EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
@@ -116,6 +119,7 @@ EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
void jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
{
+ STATIC_KEY_CHECK_USE();
key->timeout = rl;
INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
}
@@ -212,6 +216,7 @@ void __init jump_label_init(void)
key->next = NULL;
#endif
}
+ static_key_initialized = true;
jump_label_unlock();
}
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 2a74f307c5ec..490afc03627e 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -921,7 +921,7 @@ static int kimage_load_segment(struct kimage *image,
* reinitialize them.
*
* - A machine specific part that includes the syscall number
- * and the copies the image to it's final destination. And
+ * and then copies the image to it's final destination. And
* jumps into the image at entry.
*
* kexec does not sync, or unmount filesystems so if you need
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index e16c45b9ee77..4e8e14c34e42 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -4224,7 +4224,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
!rcu_lockdep_current_cpu_online()
? "RCU used illegally from offline CPU!\n"
- : rcu_is_cpu_idle()
+ : !rcu_is_watching()
? "RCU used illegally from idle CPU!\n"
: "",
rcu_scheduler_active, debug_locks);
@@ -4247,7 +4247,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
* So complain bitterly if someone does call rcu_read_lock(),
* rcu_read_lock_bh() and so on from extended quiescent states.
*/
- if (rcu_is_cpu_idle())
+ if (!rcu_is_watching())
printk("RCU used illegally from extended quiescent state!\n");
lockdep_print_held_locks(curr);
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index b2c71c5873e4..09220656d888 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -421,6 +421,7 @@ static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
seq_time(m, lt->min);
seq_time(m, lt->max);
seq_time(m, lt->total);
+ seq_time(m, lt->nr ? do_div(lt->total, lt->nr) : 0);
}
static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
@@ -518,20 +519,20 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
}
if (i) {
seq_puts(m, "\n");
- seq_line(m, '.', 0, 40 + 1 + 10 * (14 + 1));
+ seq_line(m, '.', 0, 40 + 1 + 12 * (14 + 1));
seq_puts(m, "\n");
}
}
static void seq_header(struct seq_file *m)
{
- seq_printf(m, "lock_stat version 0.3\n");
+ seq_puts(m, "lock_stat version 0.4\n");
if (unlikely(!debug_locks))
seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n");
- seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
- seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
+ seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
+ seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s %14s %14s "
"%14s %14s\n",
"class name",
"con-bounces",
@@ -539,12 +540,14 @@ static void seq_header(struct seq_file *m)
"waittime-min",
"waittime-max",
"waittime-total",
+ "waittime-avg",
"acq-bounces",
"acquisitions",
"holdtime-min",
"holdtime-max",
- "holdtime-total");
- seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
+ "holdtime-total",
+ "holdtime-avg");
+ seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
seq_printf(m, "\n");
}
diff --git a/kernel/modsign_certificate.S b/kernel/modsign_certificate.S
deleted file mode 100644
index 4a9a86d12c8b..000000000000
--- a/kernel/modsign_certificate.S
+++ /dev/null
@@ -1,12 +0,0 @@
-#include <linux/export.h>
-
-#define GLOBAL(name) \
- .globl VMLINUX_SYMBOL(name); \
- VMLINUX_SYMBOL(name):
-
- .section ".init.data","aw"
-
-GLOBAL(modsign_certificate_list)
- .incbin "signing_key.x509"
- .incbin "extra_certificates"
-GLOBAL(modsign_certificate_list_end)
diff --git a/kernel/modsign_pubkey.c b/kernel/modsign_pubkey.c
deleted file mode 100644
index 7cbd4507a7e6..000000000000
--- a/kernel/modsign_pubkey.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/* Public keys for module signature verification
- *
- * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/cred.h>
-#include <linux/err.h>
-#include <keys/asymmetric-type.h>
-#include "module-internal.h"
-
-struct key *modsign_keyring;
-
-extern __initconst const u8 modsign_certificate_list[];
-extern __initconst const u8 modsign_certificate_list_end[];
-
-/*
- * We need to make sure ccache doesn't cache the .o file as it doesn't notice
- * if modsign.pub changes.
- */
-static __initconst const char annoy_ccache[] = __TIME__ "foo";
-
-/*
- * Load the compiled-in keys
- */
-static __init int module_verify_init(void)
-{
- pr_notice("Initialise module verification\n");
-
- modsign_keyring = keyring_alloc(".module_sign",
- KUIDT_INIT(0), KGIDT_INIT(0),
- current_cred(),
- ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
- KEY_USR_VIEW | KEY_USR_READ),
- KEY_ALLOC_NOT_IN_QUOTA, NULL);
- if (IS_ERR(modsign_keyring))
- panic("Can't allocate module signing keyring\n");
-
- return 0;
-}
-
-/*
- * Must be initialised before we try and load the keys into the keyring.
- */
-device_initcall(module_verify_init);
-
-/*
- * Load the compiled-in keys
- */
-static __init int load_module_signing_keys(void)
-{
- key_ref_t key;
- const u8 *p, *end;
- size_t plen;
-
- pr_notice("Loading module verification certificates\n");
-
- end = modsign_certificate_list_end;
- p = modsign_certificate_list;
- while (p < end) {
- /* Each cert begins with an ASN.1 SEQUENCE tag and must be more
- * than 256 bytes in size.
- */
- if (end - p < 4)
- goto dodgy_cert;
- if (p[0] != 0x30 &&
- p[1] != 0x82)
- goto dodgy_cert;
- plen = (p[2] << 8) | p[3];
- plen += 4;
- if (plen > end - p)
- goto dodgy_cert;
-
- key = key_create_or_update(make_key_ref(modsign_keyring, 1),
- "asymmetric",
- NULL,
- p,
- plen,
- (KEY_POS_ALL & ~KEY_POS_SETATTR) |
- KEY_USR_VIEW,
- KEY_ALLOC_NOT_IN_QUOTA);
- if (IS_ERR(key))
- pr_err("MODSIGN: Problem loading in-kernel X.509 certificate (%ld)\n",
- PTR_ERR(key));
- else
- pr_notice("MODSIGN: Loaded cert '%s'\n",
- key_ref_to_ptr(key)->description);
- p += plen;
- }
-
- return 0;
-
-dodgy_cert:
- pr_err("MODSIGN: Problem parsing in-kernel X.509 certificate list\n");
- return 0;
-}
-late_initcall(load_module_signing_keys);
diff --git a/kernel/module-internal.h b/kernel/module-internal.h
index 24f9247b7d02..915e123a430f 100644
--- a/kernel/module-internal.h
+++ b/kernel/module-internal.h
@@ -9,6 +9,4 @@
* 2 of the Licence, or (at your option) any later version.
*/
-extern struct key *modsign_keyring;
-
extern int mod_verify_sig(const void *mod, unsigned long *_modlen);
diff --git a/kernel/module.c b/kernel/module.c
index dc582749fa13..5c9cf84017d5 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -644,8 +644,6 @@ static int module_unload_init(struct module *mod)
/* Hold reference count during initialization. */
__this_cpu_write(mod->refptr->incs, 1);
- /* Backwards compatibility macros put refcount during init. */
- mod->waiter = current;
return 0;
}
@@ -771,16 +769,9 @@ static int __try_stop_module(void *_sref)
static int try_stop_module(struct module *mod, int flags, int *forced)
{
- if (flags & O_NONBLOCK) {
- struct stopref sref = { mod, flags, forced };
+ struct stopref sref = { mod, flags, forced };
- return stop_machine(__try_stop_module, &sref, NULL);
- } else {
- /* We don't need to stop the machine for this. */
- mod->state = MODULE_STATE_GOING;
- synchronize_sched();
- return 0;
- }
+ return stop_machine(__try_stop_module, &sref, NULL);
}
unsigned long module_refcount(struct module *mod)
@@ -813,21 +804,6 @@ EXPORT_SYMBOL(module_refcount);
/* This exists whether we can unload or not */
static void free_module(struct module *mod);
-static void wait_for_zero_refcount(struct module *mod)
-{
- /* Since we might sleep for some time, release the mutex first */
- mutex_unlock(&module_mutex);
- for (;;) {
- pr_debug("Looking at refcount...\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (module_refcount(mod) == 0)
- break;
- schedule();
- }
- current->state = TASK_RUNNING;
- mutex_lock(&module_mutex);
-}
-
SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
unsigned int, flags)
{
@@ -842,6 +818,11 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
return -EFAULT;
name[MODULE_NAME_LEN-1] = '\0';
+ if (!(flags & O_NONBLOCK)) {
+ printk(KERN_WARNING
+ "waiting module removal not supported: please upgrade");
+ }
+
if (mutex_lock_interruptible(&module_mutex) != 0)
return -EINTR;
@@ -859,8 +840,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
/* Doing init or already dying? */
if (mod->state != MODULE_STATE_LIVE) {
- /* FIXME: if (force), slam module count and wake up
- waiter --RR */
+ /* FIXME: if (force), slam module count damn the torpedoes */
pr_debug("%s already dying\n", mod->name);
ret = -EBUSY;
goto out;
@@ -876,18 +856,11 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
}
}
- /* Set this up before setting mod->state */
- mod->waiter = current;
-
/* Stop the machine so refcounts can't move and disable module. */
ret = try_stop_module(mod, flags, &forced);
if (ret != 0)
goto out;
- /* Never wait if forced. */
- if (!forced && module_refcount(mod) != 0)
- wait_for_zero_refcount(mod);
-
mutex_unlock(&module_mutex);
/* Final destruction now no one is using it. */
if (mod->exit != NULL)
@@ -1005,9 +978,6 @@ void module_put(struct module *module)
__this_cpu_inc(module->refptr->decs);
trace_module_put(module, _RET_IP_);
- /* Maybe they're waiting for us to drop reference? */
- if (unlikely(!module_is_live(module)))
- wake_up_process(module->waiter);
preempt_enable();
}
}
@@ -2738,7 +2708,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
return 0;
}
-static void find_module_sections(struct module *mod, struct load_info *info)
+static int find_module_sections(struct module *mod, struct load_info *info)
{
mod->kp = section_objs(info, "__param",
sizeof(*mod->kp), &mod->num_kp);
@@ -2768,6 +2738,18 @@ static void find_module_sections(struct module *mod, struct load_info *info)
#ifdef CONFIG_CONSTRUCTORS
mod->ctors = section_objs(info, ".ctors",
sizeof(*mod->ctors), &mod->num_ctors);
+ if (!mod->ctors)
+ mod->ctors = section_objs(info, ".init_array",
+ sizeof(*mod->ctors), &mod->num_ctors);
+ else if (find_sec(info, ".init_array")) {
+ /*
+ * This shouldn't happen with same compiler and binutils
+ * building all parts of the module.
+ */
+ printk(KERN_WARNING "%s: has both .ctors and .init_array.\n",
+ mod->name);
+ return -EINVAL;
+ }
#endif
#ifdef CONFIG_TRACEPOINTS
@@ -2806,6 +2788,8 @@ static void find_module_sections(struct module *mod, struct load_info *info)
info->debug = section_objs(info, "__verbose",
sizeof(*info->debug), &info->num_debug);
+
+ return 0;
}
static int move_module(struct module *mod, struct load_info *info)
@@ -3263,7 +3247,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
/* Now we've got everything in the final locations, we can
* find optional sections. */
- find_module_sections(mod, info);
+ err = find_module_sections(mod, info);
+ if (err)
+ goto free_unload;
err = check_module_license_and_versions(mod);
if (err)
diff --git a/kernel/module_signing.c b/kernel/module_signing.c
index f2970bddc5ea..be5b8fac4bd0 100644
--- a/kernel/module_signing.c
+++ b/kernel/module_signing.c
@@ -14,6 +14,7 @@
#include <crypto/public_key.h>
#include <crypto/hash.h>
#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
#include "module-internal.h"
/*
@@ -28,7 +29,7 @@
*/
struct module_signature {
u8 algo; /* Public-key crypto algorithm [enum pkey_algo] */
- u8 hash; /* Digest algorithm [enum pkey_hash_algo] */
+ u8 hash; /* Digest algorithm [enum hash_algo] */
u8 id_type; /* Key identifier type [enum pkey_id_type] */
u8 signer_len; /* Length of signer's name */
u8 key_id_len; /* Length of key identifier */
@@ -39,7 +40,7 @@ struct module_signature {
/*
* Digest the module contents.
*/
-static struct public_key_signature *mod_make_digest(enum pkey_hash_algo hash,
+static struct public_key_signature *mod_make_digest(enum hash_algo hash,
const void *mod,
unsigned long modlen)
{
@@ -54,7 +55,7 @@ static struct public_key_signature *mod_make_digest(enum pkey_hash_algo hash,
/* Allocate the hashing algorithm we're going to need and find out how
* big the hash operational data will be.
*/
- tfm = crypto_alloc_shash(pkey_hash_algo[hash], 0, 0);
+ tfm = crypto_alloc_shash(hash_algo_name[hash], 0, 0);
if (IS_ERR(tfm))
return (PTR_ERR(tfm) == -ENOENT) ? ERR_PTR(-ENOPKG) : ERR_CAST(tfm);
@@ -157,7 +158,7 @@ static struct key *request_asymmetric_key(const char *signer, size_t signer_len,
pr_debug("Look up: \"%s\"\n", id);
- key = keyring_search(make_key_ref(modsign_keyring, 1),
+ key = keyring_search(make_key_ref(system_trusted_keyring, 1),
&key_type_asymmetric, id);
if (IS_ERR(key))
pr_warn("Request for unknown module key '%s' err %ld\n",
@@ -217,7 +218,7 @@ int mod_verify_sig(const void *mod, unsigned long *_modlen)
return -ENOPKG;
if (ms.hash >= PKEY_HASH__LAST ||
- !pkey_hash_algo[ms.hash])
+ !hash_algo_name[ms.hash])
return -ENOPKG;
key = request_asymmetric_key(sig, ms.signer_len,
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 6d647aedffea..d24105b1b794 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -410,7 +410,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
static __always_inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
struct lockdep_map *nest_lock, unsigned long ip,
- struct ww_acquire_ctx *ww_ctx)
+ struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
{
struct task_struct *task = current;
struct mutex_waiter waiter;
@@ -450,7 +450,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
struct task_struct *owner;
struct mspin_node node;
- if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
+ if (use_ww_ctx && ww_ctx->acquired > 0) {
struct ww_mutex *ww;
ww = container_of(lock, struct ww_mutex, base);
@@ -480,7 +480,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
if ((atomic_read(&lock->count) == 1) &&
(atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
lock_acquired(&lock->dep_map, ip);
- if (!__builtin_constant_p(ww_ctx == NULL)) {
+ if (use_ww_ctx) {
struct ww_mutex *ww;
ww = container_of(lock, struct ww_mutex, base);
@@ -551,7 +551,7 @@ slowpath:
goto err;
}
- if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
+ if (use_ww_ctx && ww_ctx->acquired > 0) {
ret = __mutex_lock_check_stamp(lock, ww_ctx);
if (ret)
goto err;
@@ -575,7 +575,7 @@ skip_wait:
lock_acquired(&lock->dep_map, ip);
mutex_set_owner(lock);
- if (!__builtin_constant_p(ww_ctx == NULL)) {
+ if (use_ww_ctx) {
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
struct mutex_waiter *cur;
@@ -615,7 +615,7 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass)
{
might_sleep();
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
- subclass, NULL, _RET_IP_, NULL);
+ subclass, NULL, _RET_IP_, NULL, 0);
}
EXPORT_SYMBOL_GPL(mutex_lock_nested);
@@ -625,7 +625,7 @@ _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
{
might_sleep();
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
- 0, nest, _RET_IP_, NULL);
+ 0, nest, _RET_IP_, NULL, 0);
}
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
@@ -635,7 +635,7 @@ mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
{
might_sleep();
return __mutex_lock_common(lock, TASK_KILLABLE,
- subclass, NULL, _RET_IP_, NULL);
+ subclass, NULL, _RET_IP_, NULL, 0);
}
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
@@ -644,7 +644,7 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
{
might_sleep();
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
- subclass, NULL, _RET_IP_, NULL);
+ subclass, NULL, _RET_IP_, NULL, 0);
}
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
@@ -682,7 +682,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
might_sleep();
ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
- 0, &ctx->dep_map, _RET_IP_, ctx);
+ 0, &ctx->dep_map, _RET_IP_, ctx, 1);
if (!ret && ctx->acquired > 1)
return ww_mutex_deadlock_injection(lock, ctx);
@@ -697,7 +697,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
might_sleep();
ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
- 0, &ctx->dep_map, _RET_IP_, ctx);
+ 0, &ctx->dep_map, _RET_IP_, ctx, 1);
if (!ret && ctx->acquired > 1)
return ww_mutex_deadlock_injection(lock, ctx);
@@ -809,28 +809,28 @@ __mutex_lock_slowpath(atomic_t *lock_count)
struct mutex *lock = container_of(lock_count, struct mutex, count);
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
- NULL, _RET_IP_, NULL);
+ NULL, _RET_IP_, NULL, 0);
}
static noinline int __sched
__mutex_lock_killable_slowpath(struct mutex *lock)
{
return __mutex_lock_common(lock, TASK_KILLABLE, 0,
- NULL, _RET_IP_, NULL);
+ NULL, _RET_IP_, NULL, 0);
}
static noinline int __sched
__mutex_lock_interruptible_slowpath(struct mutex *lock)
{
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
- NULL, _RET_IP_, NULL);
+ NULL, _RET_IP_, NULL, 0);
}
static noinline int __sched
__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
- NULL, _RET_IP_, ctx);
+ NULL, _RET_IP_, ctx, 1);
}
static noinline int __sched
@@ -838,7 +838,7 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
{
return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
- NULL, _RET_IP_, ctx);
+ NULL, _RET_IP_, ctx, 1);
}
#endif
diff --git a/kernel/padata.c b/kernel/padata.c
index 07af2c95dcfe..2abd25d79cc8 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -46,6 +46,7 @@ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
static int padata_cpu_hash(struct parallel_data *pd)
{
+ unsigned int seq_nr;
int cpu_index;
/*
@@ -53,10 +54,8 @@ static int padata_cpu_hash(struct parallel_data *pd)
* seq_nr mod. number of cpus in use.
*/
- spin_lock(&pd->seq_lock);
- cpu_index = pd->seq_nr % cpumask_weight(pd->cpumask.pcpu);
- pd->seq_nr++;
- spin_unlock(&pd->seq_lock);
+ seq_nr = atomic_inc_return(&pd->seq_nr);
+ cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
return padata_index_to_cpu(pd, cpu_index);
}
@@ -429,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
padata_init_pqueues(pd);
padata_init_squeues(pd);
setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
- pd->seq_nr = 0;
+ atomic_set(&pd->seq_nr, -1);
atomic_set(&pd->reorder_objects, 0);
atomic_set(&pd->refcnt, 0);
pd->pinst = pinst;
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index d444c4e834f4..2fac9cc79b3d 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -178,6 +178,22 @@ config PM_SLEEP_DEBUG
def_bool y
depends on PM_DEBUG && PM_SLEEP
+config DPM_WATCHDOG
+ bool "Device suspend/resume watchdog"
+ depends on PM_DEBUG && PSTORE
+ ---help---
+ Sets up a watchdog timer to capture drivers that are
+ locked up attempting to suspend/resume a device.
+ A detected lockup causes system panic with message
+ captured in pstore device for inspection in subsequent
+ boot session.
+
+config DPM_WATCHDOG_TIMEOUT
+ int "Watchdog timeout in seconds"
+ range 1 120
+ default 12
+ depends on DPM_WATCHDOG
+
config PM_TRACE
bool
help
diff --git a/kernel/power/block_io.c b/kernel/power/block_io.c
index d09dd10c5a5e..9a58bc258810 100644
--- a/kernel/power/block_io.c
+++ b/kernel/power/block_io.c
@@ -32,7 +32,7 @@ static int submit(int rw, struct block_device *bdev, sector_t sector,
struct bio *bio;
bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev;
bio->bi_end_io = end_swap_bio_read;
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index c9c759d5a15c..0121dab83f43 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -846,7 +846,7 @@ static int software_resume(void)
goto Finish;
}
-late_initcall(software_resume);
+late_initcall_sync(software_resume);
static const char * const hibernation_modes[] = {
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index a394297f8b2f..8dff9b48075a 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -558,30 +558,12 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
if (count == sizeof(s32)) {
if (copy_from_user(&value, buf, sizeof(s32)))
return -EFAULT;
- } else if (count <= 11) { /* ASCII perhaps? */
- char ascii_value[11];
- unsigned long int ulval;
+ } else {
int ret;
- if (copy_from_user(ascii_value, buf, count))
- return -EFAULT;
-
- if (count > 10) {
- if (ascii_value[10] == '\n')
- ascii_value[10] = '\0';
- else
- return -EINVAL;
- } else {
- ascii_value[count] = '\0';
- }
- ret = kstrtoul(ascii_value, 16, &ulval);
- if (ret) {
- pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret);
- return -EINVAL;
- }
- value = (s32)lower_32_bits(ulval);
- } else {
- return -EINVAL;
+ ret = kstrtos32_from_user(buf, count, 16, &value);
+ if (ret)
+ return ret;
}
req = filp->private_data;
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 957f06164ad1..24850270c802 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -36,9 +36,9 @@ static struct snapshot_data {
struct snapshot_handle handle;
int swap;
int mode;
- char frozen;
- char ready;
- char platform_support;
+ bool frozen;
+ bool ready;
+ bool platform_support;
bool free_bitmaps;
} snapshot_state;
@@ -93,9 +93,9 @@ static int snapshot_open(struct inode *inode, struct file *filp)
if (error)
atomic_inc(&snapshot_device_available);
- data->frozen = 0;
- data->ready = 0;
- data->platform_support = 0;
+ data->frozen = false;
+ data->ready = false;
+ data->platform_support = false;
Unlock:
unlock_system_sleep();
@@ -229,7 +229,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
if (error)
thaw_processes();
else
- data->frozen = 1;
+ data->frozen = true;
break;
@@ -240,7 +240,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
free_basic_memory_bitmaps();
data->free_bitmaps = false;
thaw_processes();
- data->frozen = 0;
+ data->frozen = false;
break;
case SNAPSHOT_CREATE_IMAGE:
@@ -270,7 +270,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
case SNAPSHOT_FREE:
swsusp_free();
memset(&data->handle, 0, sizeof(struct snapshot_handle));
- data->ready = 0;
+ data->ready = false;
/*
* It is necessary to thaw kernel threads here, because
* SNAPSHOT_CREATE_IMAGE may be invoked directly after
@@ -334,7 +334,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
* PM_HIBERNATION_PREPARE
*/
error = suspend_devices_and_enter(PM_SUSPEND_MEM);
- data->ready = 0;
+ data->ready = false;
break;
case SNAPSHOT_PLATFORM_SUPPORT:
diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile
new file mode 100644
index 000000000000..01e9ec37a3e3
--- /dev/null
+++ b/kernel/rcu/Makefile
@@ -0,0 +1,6 @@
+obj-y += update.o srcu.o
+obj-$(CONFIG_RCU_TORTURE_TEST) += torture.o
+obj-$(CONFIG_TREE_RCU) += tree.o
+obj-$(CONFIG_TREE_PREEMPT_RCU) += tree.o
+obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o
+obj-$(CONFIG_TINY_RCU) += tiny.o
diff --git a/kernel/rcu.h b/kernel/rcu/rcu.h
index 77131966c4ad..7859a0a3951e 100644
--- a/kernel/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -122,4 +122,11 @@ int rcu_jiffies_till_stall_check(void);
#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
+/*
+ * Strings used in tracepoints need to be exported via the
+ * tracing system such that tools like perf and trace-cmd can
+ * translate the string address pointers to actual text.
+ */
+#define TPS(x) tracepoint_string(x)
+
#endif /* __LINUX_RCU_H */
diff --git a/kernel/srcu.c b/kernel/rcu/srcu.c
index 01d5ccb8bfe3..01d5ccb8bfe3 100644
--- a/kernel/srcu.c
+++ b/kernel/rcu/srcu.c
diff --git a/kernel/rcutiny.c b/kernel/rcu/tiny.c
index 9ed6075dc562..0c9a934cfec1 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcu/tiny.c
@@ -35,6 +35,7 @@
#include <linux/time.h>
#include <linux/cpu.h>
#include <linux/prefetch.h>
+#include <linux/ftrace_event.h>
#ifdef CONFIG_RCU_TRACE
#include <trace/events/rcu.h>
@@ -42,7 +43,7 @@
#include "rcu.h"
-/* Forward declarations for rcutiny_plugin.h. */
+/* Forward declarations for tiny_plugin.h. */
struct rcu_ctrlblk;
static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
static void rcu_process_callbacks(struct softirq_action *unused);
@@ -52,22 +53,23 @@ static void __call_rcu(struct rcu_head *head,
static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
-#include "rcutiny_plugin.h"
+#include "tiny_plugin.h"
/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
static void rcu_idle_enter_common(long long newval)
{
if (newval) {
- RCU_TRACE(trace_rcu_dyntick("--=",
+ RCU_TRACE(trace_rcu_dyntick(TPS("--="),
rcu_dynticks_nesting, newval));
rcu_dynticks_nesting = newval;
return;
}
- RCU_TRACE(trace_rcu_dyntick("Start", rcu_dynticks_nesting, newval));
+ RCU_TRACE(trace_rcu_dyntick(TPS("Start"),
+ rcu_dynticks_nesting, newval));
if (!is_idle_task(current)) {
- struct task_struct *idle = idle_task(smp_processor_id());
+ struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
- RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
+ RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"),
rcu_dynticks_nesting, newval));
ftrace_dump(DUMP_ALL);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
@@ -120,15 +122,15 @@ EXPORT_SYMBOL_GPL(rcu_irq_exit);
static void rcu_idle_exit_common(long long oldval)
{
if (oldval) {
- RCU_TRACE(trace_rcu_dyntick("++=",
+ RCU_TRACE(trace_rcu_dyntick(TPS("++="),
oldval, rcu_dynticks_nesting));
return;
}
- RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
+ RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting));
if (!is_idle_task(current)) {
- struct task_struct *idle = idle_task(smp_processor_id());
+ struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
- RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
+ RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"),
oldval, rcu_dynticks_nesting));
ftrace_dump(DUMP_ALL);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
@@ -174,18 +176,18 @@ void rcu_irq_enter(void)
}
EXPORT_SYMBOL_GPL(rcu_irq_enter);
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
/*
* Test whether RCU thinks that the current CPU is idle.
*/
-int rcu_is_cpu_idle(void)
+bool __rcu_is_watching(void)
{
- return !rcu_dynticks_nesting;
+ return rcu_dynticks_nesting;
}
-EXPORT_SYMBOL(rcu_is_cpu_idle);
+EXPORT_SYMBOL(__rcu_is_watching);
-#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
/*
* Test whether the current CPU was interrupted from idle. Nested
@@ -273,7 +275,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
if (&rcp->rcucblist == rcp->donetail) {
RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1));
RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
- ACCESS_ONCE(rcp->rcucblist),
+ !!ACCESS_ONCE(rcp->rcucblist),
need_resched(),
is_idle_task(current),
false));
@@ -304,7 +306,8 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
RCU_TRACE(cb_count++);
}
RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
- RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(),
+ RCU_TRACE(trace_rcu_batch_end(rcp->name,
+ cb_count, 0, need_resched(),
is_idle_task(current),
false));
}
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcu/tiny_plugin.h
index 280d06cae352..280d06cae352 100644
--- a/kernel/rcutiny_plugin.h
+++ b/kernel/rcu/tiny_plugin.h
diff --git a/kernel/rcutorture.c b/kernel/rcu/torture.c
index be63101c6175..3929cd451511 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcu/torture.c
@@ -52,6 +52,12 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>");
+MODULE_ALIAS("rcutorture");
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "rcutorture."
+
static int fqs_duration;
module_param(fqs_duration, int, 0444);
MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us), 0 to disable");
diff --git a/kernel/rcutree.c b/kernel/rcu/tree.c
index 32618b3fe4e6..4c06ddfea7cd 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcu/tree.c
@@ -41,6 +41,7 @@
#include <linux/export.h>
#include <linux/completion.h>
#include <linux/moduleparam.h>
+#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
@@ -56,17 +57,16 @@
#include <linux/ftrace_event.h>
#include <linux/suspend.h>
-#include "rcutree.h"
+#include "tree.h"
#include <trace/events/rcu.h>
#include "rcu.h"
-/*
- * Strings used in tracepoints need to be exported via the
- * tracing system such that tools like perf and trace-cmd can
- * translate the string address pointers to actual text.
- */
-#define TPS(x) tracepoint_string(x)
+MODULE_ALIAS("rcutree");
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "rcutree."
/* Data structures. */
@@ -222,7 +222,7 @@ void rcu_note_context_switch(int cpu)
}
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
-DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
+static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
.dynticks = ATOMIC_INIT(1),
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
@@ -371,7 +371,8 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
{
trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
if (!user && !is_idle_task(current)) {
- struct task_struct *idle = idle_task(smp_processor_id());
+ struct task_struct *idle __maybe_unused =
+ idle_task(smp_processor_id());
trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
ftrace_dump(DUMP_ORIG);
@@ -407,7 +408,7 @@ static void rcu_eqs_enter(bool user)
long long oldval;
struct rcu_dynticks *rdtp;
- rdtp = &__get_cpu_var(rcu_dynticks);
+ rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting;
WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
@@ -435,7 +436,7 @@ void rcu_idle_enter(void)
local_irq_save(flags);
rcu_eqs_enter(false);
- rcu_sysidle_enter(&__get_cpu_var(rcu_dynticks), 0);
+ rcu_sysidle_enter(this_cpu_ptr(&rcu_dynticks), 0);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(rcu_idle_enter);
@@ -478,7 +479,7 @@ void rcu_irq_exit(void)
struct rcu_dynticks *rdtp;
local_irq_save(flags);
- rdtp = &__get_cpu_var(rcu_dynticks);
+ rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting;
rdtp->dynticks_nesting--;
WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
@@ -508,7 +509,8 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
rcu_cleanup_after_idle(smp_processor_id());
trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
if (!user && !is_idle_task(current)) {
- struct task_struct *idle = idle_task(smp_processor_id());
+ struct task_struct *idle __maybe_unused =
+ idle_task(smp_processor_id());
trace_rcu_dyntick(TPS("Error on exit: not idle task"),
oldval, rdtp->dynticks_nesting);
@@ -528,7 +530,7 @@ static void rcu_eqs_exit(bool user)
struct rcu_dynticks *rdtp;
long long oldval;
- rdtp = &__get_cpu_var(rcu_dynticks);
+ rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting;
WARN_ON_ONCE(oldval < 0);
if (oldval & DYNTICK_TASK_NEST_MASK)
@@ -555,7 +557,7 @@ void rcu_idle_exit(void)
local_irq_save(flags);
rcu_eqs_exit(false);
- rcu_sysidle_exit(&__get_cpu_var(rcu_dynticks), 0);
+ rcu_sysidle_exit(this_cpu_ptr(&rcu_dynticks), 0);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(rcu_idle_exit);
@@ -599,7 +601,7 @@ void rcu_irq_enter(void)
long long oldval;
local_irq_save(flags);
- rdtp = &__get_cpu_var(rcu_dynticks);
+ rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting;
rdtp->dynticks_nesting++;
WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
@@ -620,7 +622,7 @@ void rcu_irq_enter(void)
*/
void rcu_nmi_enter(void)
{
- struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
+ struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
if (rdtp->dynticks_nmi_nesting == 0 &&
(atomic_read(&rdtp->dynticks) & 0x1))
@@ -642,7 +644,7 @@ void rcu_nmi_enter(void)
*/
void rcu_nmi_exit(void)
{
- struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
+ struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
if (rdtp->dynticks_nmi_nesting == 0 ||
--rdtp->dynticks_nmi_nesting != 0)
@@ -655,21 +657,34 @@ void rcu_nmi_exit(void)
}
/**
- * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle
+ * __rcu_is_watching - are RCU read-side critical sections safe?
+ *
+ * Return true if RCU is watching the running CPU, which means that
+ * this CPU can safely enter RCU read-side critical sections. Unlike
+ * rcu_is_watching(), the caller of __rcu_is_watching() must have at
+ * least disabled preemption.
+ */
+bool __rcu_is_watching(void)
+{
+ return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
+}
+
+/**
+ * rcu_is_watching - see if RCU thinks that the current CPU is idle
*
* If the current CPU is in its idle loop and is neither in an interrupt
* or NMI handler, return true.
*/
-int rcu_is_cpu_idle(void)
+bool rcu_is_watching(void)
{
int ret;
preempt_disable();
- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
+ ret = __rcu_is_watching();
preempt_enable();
return ret;
}
-EXPORT_SYMBOL(rcu_is_cpu_idle);
+EXPORT_SYMBOL_GPL(rcu_is_watching);
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
@@ -703,7 +718,7 @@ bool rcu_lockdep_current_cpu_online(void)
if (in_nmi())
return 1;
preempt_disable();
- rdp = &__get_cpu_var(rcu_sched_data);
+ rdp = this_cpu_ptr(&rcu_sched_data);
rnp = rdp->mynode;
ret = (rdp->grpmask & rnp->qsmaskinit) ||
!rcu_scheduler_fully_active;
@@ -723,7 +738,7 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
*/
static int rcu_is_cpu_rrupt_from_idle(void)
{
- return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1;
+ return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
}
/*
@@ -802,8 +817,11 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
static void record_gp_stall_check_time(struct rcu_state *rsp)
{
- rsp->gp_start = jiffies;
- rsp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
+ unsigned long j = ACCESS_ONCE(jiffies);
+
+ rsp->gp_start = j;
+ smp_wmb(); /* Record start time before stall time. */
+ rsp->jiffies_stall = j + rcu_jiffies_till_stall_check();
}
/*
@@ -898,6 +916,12 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
force_quiescent_state(rsp); /* Kick them all. */
}
+/*
+ * This function really isn't for public consumption, but RCU is special in
+ * that context switches can allow the state machine to make progress.
+ */
+extern void resched_cpu(int cpu);
+
static void print_cpu_stall(struct rcu_state *rsp)
{
int cpu;
@@ -927,22 +951,60 @@ static void print_cpu_stall(struct rcu_state *rsp)
3 * rcu_jiffies_till_stall_check() + 3;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
- set_need_resched(); /* kick ourselves to get things going. */
+ /*
+ * Attempt to revive the RCU machinery by forcing a context switch.
+ *
+ * A context switch would normally allow the RCU state machine to make
+ * progress and it could be we're stuck in kernel space without context
+ * switches for an entirely unreasonable amount of time.
+ */
+ resched_cpu(smp_processor_id());
}
static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
{
+ unsigned long completed;
+ unsigned long gpnum;
+ unsigned long gps;
unsigned long j;
unsigned long js;
struct rcu_node *rnp;
- if (rcu_cpu_stall_suppress)
+ if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
return;
j = ACCESS_ONCE(jiffies);
+
+ /*
+ * Lots of memory barriers to reject false positives.
+ *
+ * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
+ * then rsp->gp_start, and finally rsp->completed. These values
+ * are updated in the opposite order with memory barriers (or
+ * equivalent) during grace-period initialization and cleanup.
+ * Now, a false positive can occur if we get an new value of
+ * rsp->gp_start and a old value of rsp->jiffies_stall. But given
+ * the memory barriers, the only way that this can happen is if one
+ * grace period ends and another starts between these two fetches.
+ * Detect this by comparing rsp->completed with the previous fetch
+ * from rsp->gpnum.
+ *
+ * Given this check, comparisons of jiffies, rsp->jiffies_stall,
+ * and rsp->gp_start suffice to forestall false positives.
+ */
+ gpnum = ACCESS_ONCE(rsp->gpnum);
+ smp_rmb(); /* Pick up ->gpnum first... */
js = ACCESS_ONCE(rsp->jiffies_stall);
+ smp_rmb(); /* ...then ->jiffies_stall before the rest... */
+ gps = ACCESS_ONCE(rsp->gp_start);
+ smp_rmb(); /* ...and finally ->gp_start before ->completed. */
+ completed = ACCESS_ONCE(rsp->completed);
+ if (ULONG_CMP_GE(completed, gpnum) ||
+ ULONG_CMP_LT(j, js) ||
+ ULONG_CMP_GE(gps, js))
+ return; /* No stall or GP completed since entering function. */
rnp = rdp->mynode;
if (rcu_gp_in_progress(rsp) &&
- (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) {
+ (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) {
/* We haven't checked in, so go dump stack. */
print_cpu_stall(rsp);
@@ -1297,7 +1359,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
}
/*
- * Initialize a new grace period.
+ * Initialize a new grace period. Return 0 if no grace period required.
*/
static int rcu_gp_init(struct rcu_state *rsp)
{
@@ -1306,18 +1368,27 @@ static int rcu_gp_init(struct rcu_state *rsp)
rcu_bind_gp_kthread();
raw_spin_lock_irq(&rnp->lock);
+ if (rsp->gp_flags == 0) {
+ /* Spurious wakeup, tell caller to go back to sleep. */
+ raw_spin_unlock_irq(&rnp->lock);
+ return 0;
+ }
rsp->gp_flags = 0; /* Clear all flags: New grace period. */
- if (rcu_gp_in_progress(rsp)) {
- /* Grace period already in progress, don't start another. */
+ if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
+ /*
+ * Grace period already in progress, don't start another.
+ * Not supposed to be able to happen.
+ */
raw_spin_unlock_irq(&rnp->lock);
return 0;
}
/* Advance to a new grace period and initialize state. */
+ record_gp_stall_check_time(rsp);
+ smp_wmb(); /* Record GP times before starting GP. */
rsp->gpnum++;
trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
- record_gp_stall_check_time(rsp);
raw_spin_unlock_irq(&rnp->lock);
/* Exclude any concurrent CPU-hotplug operations. */
@@ -1366,7 +1437,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
/*
* Do one round of quiescent-state forcing.
*/
-int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
+static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
{
int fqs_state = fqs_state_in;
bool isidle = false;
@@ -1451,8 +1522,12 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
rsp->fqs_state = RCU_GP_IDLE;
rdp = this_cpu_ptr(rsp->rda);
rcu_advance_cbs(rsp, rnp, rdp); /* Reduce false positives below. */
- if (cpu_needs_another_gp(rsp, rdp))
- rsp->gp_flags = 1;
+ if (cpu_needs_another_gp(rsp, rdp)) {
+ rsp->gp_flags = RCU_GP_FLAG_INIT;
+ trace_rcu_grace_period(rsp->name,
+ ACCESS_ONCE(rsp->gpnum),
+ TPS("newreq"));
+ }
raw_spin_unlock_irq(&rnp->lock);
}
@@ -1462,6 +1537,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
static int __noreturn rcu_gp_kthread(void *arg)
{
int fqs_state;
+ int gf;
unsigned long j;
int ret;
struct rcu_state *rsp = arg;
@@ -1471,14 +1547,19 @@ static int __noreturn rcu_gp_kthread(void *arg)
/* Handle grace-period start. */
for (;;) {
+ trace_rcu_grace_period(rsp->name,
+ ACCESS_ONCE(rsp->gpnum),
+ TPS("reqwait"));
wait_event_interruptible(rsp->gp_wq,
- rsp->gp_flags &
+ ACCESS_ONCE(rsp->gp_flags) &
RCU_GP_FLAG_INIT);
- if ((rsp->gp_flags & RCU_GP_FLAG_INIT) &&
- rcu_gp_init(rsp))
+ if (rcu_gp_init(rsp))
break;
cond_resched();
flush_signals(current);
+ trace_rcu_grace_period(rsp->name,
+ ACCESS_ONCE(rsp->gpnum),
+ TPS("reqwaitsig"));
}
/* Handle quiescent-state forcing. */
@@ -1488,10 +1569,16 @@ static int __noreturn rcu_gp_kthread(void *arg)
j = HZ;
jiffies_till_first_fqs = HZ;
}
+ ret = 0;
for (;;) {
- rsp->jiffies_force_qs = jiffies + j;
+ if (!ret)
+ rsp->jiffies_force_qs = jiffies + j;
+ trace_rcu_grace_period(rsp->name,
+ ACCESS_ONCE(rsp->gpnum),
+ TPS("fqswait"));
ret = wait_event_interruptible_timeout(rsp->gp_wq,
- (rsp->gp_flags & RCU_GP_FLAG_FQS) ||
+ ((gf = ACCESS_ONCE(rsp->gp_flags)) &
+ RCU_GP_FLAG_FQS) ||
(!ACCESS_ONCE(rnp->qsmask) &&
!rcu_preempt_blocked_readers_cgp(rnp)),
j);
@@ -1500,13 +1587,23 @@ static int __noreturn rcu_gp_kthread(void *arg)
!rcu_preempt_blocked_readers_cgp(rnp))
break;
/* If time for quiescent-state forcing, do it. */
- if (ret == 0 || (rsp->gp_flags & RCU_GP_FLAG_FQS)) {
+ if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
+ (gf & RCU_GP_FLAG_FQS)) {
+ trace_rcu_grace_period(rsp->name,
+ ACCESS_ONCE(rsp->gpnum),
+ TPS("fqsstart"));
fqs_state = rcu_gp_fqs(rsp, fqs_state);
+ trace_rcu_grace_period(rsp->name,
+ ACCESS_ONCE(rsp->gpnum),
+ TPS("fqsend"));
cond_resched();
} else {
/* Deal with stray signal. */
cond_resched();
flush_signals(current);
+ trace_rcu_grace_period(rsp->name,
+ ACCESS_ONCE(rsp->gpnum),
+ TPS("fqswaitsig"));
}
j = jiffies_till_next_fqs;
if (j > HZ) {
@@ -1554,6 +1651,8 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
return;
}
rsp->gp_flags = RCU_GP_FLAG_INIT;
+ trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
+ TPS("newreq"));
/*
* We can't do wakeups while holding the rnp->lock, as that
@@ -2255,7 +2354,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
* If called from an extended quiescent state, invoke the RCU
* core in order to force a re-evaluation of RCU's idleness.
*/
- if (rcu_is_cpu_idle() && cpu_online(smp_processor_id()))
+ if (!rcu_is_watching() && cpu_online(smp_processor_id()))
invoke_rcu_core();
/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
@@ -2725,10 +2824,13 @@ static int rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
for_each_rcu_flavor(rsp) {
rdp = per_cpu_ptr(rsp->rda, cpu);
- if (rdp->qlen != rdp->qlen_lazy)
+ if (!rdp->nxtlist)
+ continue;
+ hc = true;
+ if (rdp->qlen != rdp->qlen_lazy || !all_lazy) {
al = false;
- if (rdp->nxtlist)
- hc = true;
+ break;
+ }
}
if (all_lazy)
*all_lazy = al;
@@ -3216,7 +3318,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
/*
* Compute the rcu_node tree geometry from kernel parameters. This cannot
- * replace the definitions in rcutree.h because those are needed to size
+ * replace the definitions in tree.h because those are needed to size
* the ->node array in the rcu_state structure.
*/
static void __init rcu_init_geometry(void)
@@ -3295,8 +3397,8 @@ void __init rcu_init(void)
rcu_bootup_announce();
rcu_init_geometry();
- rcu_init_one(&rcu_sched_state, &rcu_sched_data);
rcu_init_one(&rcu_bh_state, &rcu_bh_data);
+ rcu_init_one(&rcu_sched_state, &rcu_sched_data);
__rcu_init_preempt();
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
@@ -3311,4 +3413,4 @@ void __init rcu_init(void)
rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
}
-#include "rcutree_plugin.h"
+#include "tree_plugin.h"
diff --git a/kernel/rcutree.h b/kernel/rcu/tree.h
index 5f97eab602cd..52be957c9fe2 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcu/tree.h
@@ -104,6 +104,8 @@ struct rcu_dynticks {
/* idle-period nonlazy_posted snapshot. */
unsigned long last_accelerate;
/* Last jiffy CBs were accelerated. */
+ unsigned long last_advance_all;
+ /* Last jiffy CBs were all advanced. */
int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
};
diff --git a/kernel/rcutree_plugin.h b/kernel/rcu/tree_plugin.h
index 130c97b027f2..3822ac0c4b27 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -28,7 +28,7 @@
#include <linux/gfp.h>
#include <linux/oom.h>
#include <linux/smpboot.h>
-#include "time/tick-internal.h"
+#include "../time/tick-internal.h"
#define RCU_KTHREAD_PRIO 1
@@ -96,10 +96,15 @@ static void __init rcu_bootup_announce_oddness(void)
#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
#ifdef CONFIG_RCU_NOCB_CPU_ALL
pr_info("\tOffload RCU callbacks from all CPUs\n");
- cpumask_setall(rcu_nocb_mask);
+ cpumask_copy(rcu_nocb_mask, cpu_possible_mask);
#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
if (have_rcu_nocb_mask) {
+ if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
+ pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n");
+ cpumask_and(rcu_nocb_mask, cpu_possible_mask,
+ rcu_nocb_mask);
+ }
cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf);
if (rcu_nocb_poll)
@@ -660,7 +665,7 @@ static void rcu_preempt_check_callbacks(int cpu)
static void rcu_preempt_do_callbacks(void)
{
- rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
+ rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data));
}
#endif /* #ifdef CONFIG_RCU_BOOST */
@@ -1128,7 +1133,7 @@ void exit_rcu(void)
#ifdef CONFIG_RCU_BOOST
-#include "rtmutex_common.h"
+#include "../rtmutex_common.h"
#ifdef CONFIG_RCU_TRACE
@@ -1332,7 +1337,7 @@ static void invoke_rcu_callbacks_kthread(void)
*/
static bool rcu_is_callbacks_kthread(void)
{
- return __get_cpu_var(rcu_cpu_kthread_task) == current;
+ return __this_cpu_read(rcu_cpu_kthread_task) == current;
}
#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
@@ -1382,8 +1387,8 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
static void rcu_kthread_do_work(void)
{
- rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
- rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
+ rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
+ rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
rcu_preempt_do_callbacks();
}
@@ -1402,7 +1407,7 @@ static void rcu_cpu_kthread_park(unsigned int cpu)
static int rcu_cpu_kthread_should_run(unsigned int cpu)
{
- return __get_cpu_var(rcu_cpu_has_work);
+ return __this_cpu_read(rcu_cpu_has_work);
}
/*
@@ -1412,8 +1417,8 @@ static int rcu_cpu_kthread_should_run(unsigned int cpu)
*/
static void rcu_cpu_kthread(unsigned int cpu)
{
- unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
- char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
+ unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
+ char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
int spincnt;
for (spincnt = 0; spincnt < 10; spincnt++) {
@@ -1630,17 +1635,23 @@ module_param(rcu_idle_lazy_gp_delay, int, 0644);
extern int tick_nohz_enabled;
/*
- * Try to advance callbacks for all flavors of RCU on the current CPU.
- * Afterwards, if there are any callbacks ready for immediate invocation,
- * return true.
+ * Try to advance callbacks for all flavors of RCU on the current CPU, but
+ * only if it has been awhile since the last time we did so. Afterwards,
+ * if there are any callbacks ready for immediate invocation, return true.
*/
static bool rcu_try_advance_all_cbs(void)
{
bool cbs_ready = false;
struct rcu_data *rdp;
+ struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
struct rcu_node *rnp;
struct rcu_state *rsp;
+ /* Exit early if we advanced recently. */
+ if (jiffies == rdtp->last_advance_all)
+ return 0;
+ rdtp->last_advance_all = jiffies;
+
for_each_rcu_flavor(rsp) {
rdp = this_cpu_ptr(rsp->rda);
rnp = rdp->mynode;
@@ -1739,6 +1750,8 @@ static void rcu_prepare_for_idle(int cpu)
*/
if (rdtp->all_lazy &&
rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
+ rdtp->all_lazy = false;
+ rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
invoke_rcu_core();
return;
}
@@ -1768,17 +1781,11 @@ static void rcu_prepare_for_idle(int cpu)
*/
static void rcu_cleanup_after_idle(int cpu)
{
- struct rcu_data *rdp;
- struct rcu_state *rsp;
if (rcu_is_nocb_cpu(cpu))
return;
- rcu_try_advance_all_cbs();
- for_each_rcu_flavor(rsp) {
- rdp = per_cpu_ptr(rsp->rda, cpu);
- if (cpu_has_callbacks_ready_to_invoke(rdp))
- invoke_rcu_core();
- }
+ if (rcu_try_advance_all_cbs())
+ invoke_rcu_core();
}
/*
@@ -2108,15 +2115,22 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
/* If we are not being polled and there is a kthread, awaken it ... */
t = ACCESS_ONCE(rdp->nocb_kthread);
- if (rcu_nocb_poll | !t)
+ if (rcu_nocb_poll || !t) {
+ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+ TPS("WakeNotPoll"));
return;
+ }
len = atomic_long_read(&rdp->nocb_q_count);
if (old_rhpp == &rdp->nocb_head) {
wake_up(&rdp->nocb_wq); /* ... only if queue was empty ... */
rdp->qlen_last_fqs_check = 0;
+ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeEmpty"));
} else if (len > rdp->qlen_last_fqs_check + qhimark) {
wake_up_process(t); /* ... or if many callbacks queued. */
rdp->qlen_last_fqs_check = LONG_MAX / 2;
+ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeOvf"));
+ } else {
+ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot"));
}
return;
}
@@ -2140,10 +2154,12 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
if (__is_kfree_rcu_offset((unsigned long)rhp->func))
trace_rcu_kfree_callback(rdp->rsp->name, rhp,
(unsigned long)rhp->func,
- rdp->qlen_lazy, rdp->qlen);
+ -atomic_long_read(&rdp->nocb_q_count_lazy),
+ -atomic_long_read(&rdp->nocb_q_count));
else
trace_rcu_callback(rdp->rsp->name, rhp,
- rdp->qlen_lazy, rdp->qlen);
+ -atomic_long_read(&rdp->nocb_q_count_lazy),
+ -atomic_long_read(&rdp->nocb_q_count));
return 1;
}
@@ -2221,6 +2237,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
static int rcu_nocb_kthread(void *arg)
{
int c, cl;
+ bool firsttime = 1;
struct rcu_head *list;
struct rcu_head *next;
struct rcu_head **tail;
@@ -2229,14 +2246,27 @@ static int rcu_nocb_kthread(void *arg)
/* Each pass through this loop invokes one batch of callbacks */
for (;;) {
/* If not polling, wait for next batch of callbacks. */
- if (!rcu_nocb_poll)
+ if (!rcu_nocb_poll) {
+ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+ TPS("Sleep"));
wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
+ } else if (firsttime) {
+ firsttime = 0;
+ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+ TPS("Poll"));
+ }
list = ACCESS_ONCE(rdp->nocb_head);
if (!list) {
+ if (!rcu_nocb_poll)
+ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+ TPS("WokeEmpty"));
schedule_timeout_interruptible(1);
flush_signals(current);
continue;
}
+ firsttime = 1;
+ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+ TPS("WokeNonEmpty"));
/*
* Extract queued callbacks, update counts, and wait
@@ -2257,7 +2287,11 @@ static int rcu_nocb_kthread(void *arg)
next = list->next;
/* Wait for enqueuing to complete, if needed. */
while (next == NULL && &list->next != tail) {
+ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+ TPS("WaitQueue"));
schedule_timeout_interruptible(1);
+ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+ TPS("WokeQueue"));
next = list->next;
}
debug_rcu_head_unqueue(list);
diff --git a/kernel/rcutree_trace.c b/kernel/rcu/tree_trace.c
index cf6c17412932..3596797b7e46 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcu/tree_trace.c
@@ -44,7 +44,7 @@
#include <linux/seq_file.h>
#define RCU_TREE_NONCORE
-#include "rcutree.h"
+#include "tree.h"
static int r_open(struct inode *inode, struct file *file,
const struct seq_operations *op)
diff --git a/kernel/rcupdate.c b/kernel/rcu/update.c
index b02a339836b4..6cb3dff89e2b 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcu/update.c
@@ -53,6 +53,12 @@
#include "rcu.h"
+MODULE_ALIAS("rcupdate");
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "rcupdate."
+
module_param(rcu_expedited, int, 0);
#ifdef CONFIG_PREEMPT_RCU
@@ -148,7 +154,7 @@ int rcu_read_lock_bh_held(void)
{
if (!debug_lockdep_rcu_enabled())
return 1;
- if (rcu_is_cpu_idle())
+ if (!rcu_is_watching())
return 0;
if (!rcu_lockdep_current_cpu_online())
return 0;
@@ -298,7 +304,7 @@ EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
#endif
int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
-int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
+static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
module_param(rcu_cpu_stall_suppress, int, 0644);
module_param(rcu_cpu_stall_timeout, int, 0644);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5ac63c9a995a..450a34b2a637 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -513,12 +513,11 @@ static inline void init_hrtick(void)
* might also involve a cross-CPU call to trigger the scheduler on
* the target CPU.
*/
-#ifdef CONFIG_SMP
void resched_task(struct task_struct *p)
{
int cpu;
- assert_raw_spin_locked(&task_rq(p)->lock);
+ lockdep_assert_held(&task_rq(p)->lock);
if (test_tsk_need_resched(p))
return;
@@ -526,8 +525,10 @@ void resched_task(struct task_struct *p)
set_tsk_need_resched(p);
cpu = task_cpu(p);
- if (cpu == smp_processor_id())
+ if (cpu == smp_processor_id()) {
+ set_preempt_need_resched();
return;
+ }
/* NEED_RESCHED must be visible before we test polling */
smp_mb();
@@ -546,6 +547,7 @@ void resched_cpu(int cpu)
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
+#ifdef CONFIG_SMP
#ifdef CONFIG_NO_HZ_COMMON
/*
* In the semi idle case, use the nearest busy cpu for migrating timers
@@ -693,12 +695,6 @@ void sched_avg_update(struct rq *rq)
}
}
-#else /* !CONFIG_SMP */
-void resched_task(struct task_struct *p)
-{
- assert_raw_spin_locked(&task_rq(p)->lock);
- set_tsk_need_resched(p);
-}
#endif /* CONFIG_SMP */
#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
@@ -767,14 +763,14 @@ static void set_load_weight(struct task_struct *p)
static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
{
update_rq_clock(rq);
- sched_info_queued(p);
+ sched_info_queued(rq, p);
p->sched_class->enqueue_task(rq, p, flags);
}
static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
{
update_rq_clock(rq);
- sched_info_dequeued(p);
+ sched_info_dequeued(rq, p);
p->sched_class->dequeue_task(rq, p, flags);
}
@@ -987,7 +983,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
* ttwu() will sort out the placement.
*/
WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
- !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
+ !(task_preempt_count(p) & PREEMPT_ACTIVE));
#ifdef CONFIG_LOCKDEP
/*
@@ -1017,6 +1013,107 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
__set_task_cpu(p, new_cpu);
}
+static void __migrate_swap_task(struct task_struct *p, int cpu)
+{
+ if (p->on_rq) {
+ struct rq *src_rq, *dst_rq;
+
+ src_rq = task_rq(p);
+ dst_rq = cpu_rq(cpu);
+
+ deactivate_task(src_rq, p, 0);
+ set_task_cpu(p, cpu);
+ activate_task(dst_rq, p, 0);
+ check_preempt_curr(dst_rq, p, 0);
+ } else {
+ /*
+ * Task isn't running anymore; make it appear like we migrated
+ * it before it went to sleep. This means on wakeup we make the
+ * previous cpu our targer instead of where it really is.
+ */
+ p->wake_cpu = cpu;
+ }
+}
+
+struct migration_swap_arg {
+ struct task_struct *src_task, *dst_task;
+ int src_cpu, dst_cpu;
+};
+
+static int migrate_swap_stop(void *data)
+{
+ struct migration_swap_arg *arg = data;
+ struct rq *src_rq, *dst_rq;
+ int ret = -EAGAIN;
+
+ src_rq = cpu_rq(arg->src_cpu);
+ dst_rq = cpu_rq(arg->dst_cpu);
+
+ double_raw_lock(&arg->src_task->pi_lock,
+ &arg->dst_task->pi_lock);
+ double_rq_lock(src_rq, dst_rq);
+ if (task_cpu(arg->dst_task) != arg->dst_cpu)
+ goto unlock;
+
+ if (task_cpu(arg->src_task) != arg->src_cpu)
+ goto unlock;
+
+ if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task)))
+ goto unlock;
+
+ if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task)))
+ goto unlock;
+
+ __migrate_swap_task(arg->src_task, arg->dst_cpu);
+ __migrate_swap_task(arg->dst_task, arg->src_cpu);
+
+ ret = 0;
+
+unlock:
+ double_rq_unlock(src_rq, dst_rq);
+ raw_spin_unlock(&arg->dst_task->pi_lock);
+ raw_spin_unlock(&arg->src_task->pi_lock);
+
+ return ret;
+}
+
+/*
+ * Cross migrate two tasks
+ */
+int migrate_swap(struct task_struct *cur, struct task_struct *p)
+{
+ struct migration_swap_arg arg;
+ int ret = -EINVAL;
+
+ arg = (struct migration_swap_arg){
+ .src_task = cur,
+ .src_cpu = task_cpu(cur),
+ .dst_task = p,
+ .dst_cpu = task_cpu(p),
+ };
+
+ if (arg.src_cpu == arg.dst_cpu)
+ goto out;
+
+ /*
+ * These three tests are all lockless; this is OK since all of them
+ * will be re-checked with proper locks held further down the line.
+ */
+ if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
+ goto out;
+
+ if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task)))
+ goto out;
+
+ if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task)))
+ goto out;
+
+ ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
+
+out:
+ return ret;
+}
+
struct migration_arg {
struct task_struct *task;
int dest_cpu;
@@ -1236,9 +1333,9 @@ out:
* The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
*/
static inline
-int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
+int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
{
- int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
+ cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
/*
* In order not to call set_task_cpu() on a blocking task we need
@@ -1330,12 +1427,13 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
if (rq->idle_stamp) {
u64 delta = rq_clock(rq) - rq->idle_stamp;
- u64 max = 2*sysctl_sched_migration_cost;
+ u64 max = 2*rq->max_idle_balance_cost;
- if (delta > max)
+ update_avg(&rq->avg_idle, delta);
+
+ if (rq->avg_idle > max)
rq->avg_idle = max;
- else
- update_avg(&rq->avg_idle, delta);
+
rq->idle_stamp = 0;
}
#endif
@@ -1396,6 +1494,14 @@ static void sched_ttwu_pending(void)
void scheduler_ipi(void)
{
+ /*
+ * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
+ * TIF_NEED_RESCHED remotely (for the first time) will also send
+ * this IPI.
+ */
+ if (tif_need_resched())
+ set_preempt_need_resched();
+
if (llist_empty(&this_rq()->wake_list)
&& !tick_nohz_full_cpu(smp_processor_id())
&& !got_nohz_idle_kick())
@@ -1513,7 +1619,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
if (p->sched_class->task_waking)
p->sched_class->task_waking(p);
- cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
+ cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
if (task_cpu(p) != cpu) {
wake_flags |= WF_MIGRATED;
set_task_cpu(p, cpu);
@@ -1595,7 +1701,7 @@ int wake_up_state(struct task_struct *p, unsigned int state)
*
* __sched_fork() is basic setup used by init_idle() too:
*/
-static void __sched_fork(struct task_struct *p)
+static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
{
p->on_rq = 0;
@@ -1619,16 +1725,24 @@ static void __sched_fork(struct task_struct *p)
#ifdef CONFIG_NUMA_BALANCING
if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
- p->mm->numa_next_scan = jiffies;
- p->mm->numa_next_reset = jiffies;
+ p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
p->mm->numa_scan_seq = 0;
}
+ if (clone_flags & CLONE_VM)
+ p->numa_preferred_nid = current->numa_preferred_nid;
+ else
+ p->numa_preferred_nid = -1;
+
p->node_stamp = 0ULL;
p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
- p->numa_migrate_seq = p->mm ? p->mm->numa_scan_seq - 1 : 0;
p->numa_scan_period = sysctl_numa_balancing_scan_delay;
p->numa_work.next = &p->numa_work;
+ p->numa_faults = NULL;
+ p->numa_faults_buffer = NULL;
+
+ INIT_LIST_HEAD(&p->numa_entry);
+ p->numa_group = NULL;
#endif /* CONFIG_NUMA_BALANCING */
}
@@ -1654,12 +1768,12 @@ void set_numabalancing_state(bool enabled)
/*
* fork()/clone()-time setup:
*/
-void sched_fork(struct task_struct *p)
+void sched_fork(unsigned long clone_flags, struct task_struct *p)
{
unsigned long flags;
int cpu = get_cpu();
- __sched_fork(p);
+ __sched_fork(clone_flags, p);
/*
* We mark the process as running here. This guarantees that
* nobody will actually run it, and a signal or other external
@@ -1717,10 +1831,7 @@ void sched_fork(struct task_struct *p)
#if defined(CONFIG_SMP)
p->on_cpu = 0;
#endif
-#ifdef CONFIG_PREEMPT_COUNT
- /* Want to start with kernel preemption disabled. */
- task_thread_info(p)->preempt_count = 1;
-#endif
+ init_task_preempt_count(p);
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
#endif
@@ -1747,7 +1858,7 @@ void wake_up_new_task(struct task_struct *p)
* - cpus_allowed can change in the fork path
* - any previously selected cpu might disappear through hotplug
*/
- set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
+ set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
#endif
/* Initialize new task's runnable average */
@@ -1838,7 +1949,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
trace_sched_switch(prev, next);
- sched_info_switch(prev, next);
+ sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next);
fire_sched_out_preempt_notifiers(prev, next);
prepare_lock_switch(rq, next);
@@ -1890,6 +2001,8 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
if (mm)
mmdrop(mm);
if (unlikely(prev_state == TASK_DEAD)) {
+ task_numa_free(prev);
+
/*
* Remove function-return probe instances associated with this
* task and put them back on the free list.
@@ -2073,7 +2186,7 @@ void sched_exec(void)
int dest_cpu;
raw_spin_lock_irqsave(&p->pi_lock, flags);
- dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
+ dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
if (dest_cpu == smp_processor_id())
goto unlock;
@@ -2215,7 +2328,7 @@ notrace unsigned long get_parent_ip(unsigned long addr)
#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
defined(CONFIG_PREEMPT_TRACER))
-void __kprobes add_preempt_count(int val)
+void __kprobes preempt_count_add(int val)
{
#ifdef CONFIG_DEBUG_PREEMPT
/*
@@ -2224,7 +2337,7 @@ void __kprobes add_preempt_count(int val)
if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
return;
#endif
- preempt_count() += val;
+ __preempt_count_add(val);
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Spinlock count overflowing soon?
@@ -2235,9 +2348,9 @@ void __kprobes add_preempt_count(int val)
if (preempt_count() == val)
trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
}
-EXPORT_SYMBOL(add_preempt_count);
+EXPORT_SYMBOL(preempt_count_add);
-void __kprobes sub_preempt_count(int val)
+void __kprobes preempt_count_sub(int val)
{
#ifdef CONFIG_DEBUG_PREEMPT
/*
@@ -2255,9 +2368,9 @@ void __kprobes sub_preempt_count(int val)
if (preempt_count() == val)
trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
- preempt_count() -= val;
+ __preempt_count_sub(val);
}
-EXPORT_SYMBOL(sub_preempt_count);
+EXPORT_SYMBOL(preempt_count_sub);
#endif
@@ -2430,6 +2543,7 @@ need_resched:
put_prev_task(rq, prev);
next = pick_next_task(rq);
clear_tsk_need_resched(prev);
+ clear_preempt_need_resched();
rq->skip_clock_update = 0;
if (likely(prev != next)) {
@@ -2520,9 +2634,9 @@ asmlinkage void __sched notrace preempt_schedule(void)
return;
do {
- add_preempt_count_notrace(PREEMPT_ACTIVE);
+ __preempt_count_add(PREEMPT_ACTIVE);
__schedule();
- sub_preempt_count_notrace(PREEMPT_ACTIVE);
+ __preempt_count_sub(PREEMPT_ACTIVE);
/*
* Check again in case we missed a preemption opportunity
@@ -2541,20 +2655,19 @@ EXPORT_SYMBOL(preempt_schedule);
*/
asmlinkage void __sched preempt_schedule_irq(void)
{
- struct thread_info *ti = current_thread_info();
enum ctx_state prev_state;
/* Catch callers which need to be fixed */
- BUG_ON(ti->preempt_count || !irqs_disabled());
+ BUG_ON(preempt_count() || !irqs_disabled());
prev_state = exception_enter();
do {
- add_preempt_count(PREEMPT_ACTIVE);
+ __preempt_count_add(PREEMPT_ACTIVE);
local_irq_enable();
__schedule();
local_irq_disable();
- sub_preempt_count(PREEMPT_ACTIVE);
+ __preempt_count_sub(PREEMPT_ACTIVE);
/*
* Check again in case we missed a preemption opportunity
@@ -3598,13 +3711,11 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
struct task_struct *p;
int retval;
- get_online_cpus();
rcu_read_lock();
p = find_process_by_pid(pid);
if (!p) {
rcu_read_unlock();
- put_online_cpus();
return -ESRCH;
}
@@ -3661,7 +3772,6 @@ out_free_cpus_allowed:
free_cpumask_var(cpus_allowed);
out_put_task:
put_task_struct(p);
- put_online_cpus();
return retval;
}
@@ -3706,7 +3816,6 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
unsigned long flags;
int retval;
- get_online_cpus();
rcu_read_lock();
retval = -ESRCH;
@@ -3719,12 +3828,11 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
goto out_unlock;
raw_spin_lock_irqsave(&p->pi_lock, flags);
- cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
+ cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
out_unlock:
rcu_read_unlock();
- put_online_cpus();
return retval;
}
@@ -3794,16 +3902,11 @@ SYSCALL_DEFINE0(sched_yield)
return 0;
}
-static inline int should_resched(void)
-{
- return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
-}
-
static void __cond_resched(void)
{
- add_preempt_count(PREEMPT_ACTIVE);
+ __preempt_count_add(PREEMPT_ACTIVE);
__schedule();
- sub_preempt_count(PREEMPT_ACTIVE);
+ __preempt_count_sub(PREEMPT_ACTIVE);
}
int __sched _cond_resched(void)
@@ -4186,7 +4289,7 @@ void init_idle(struct task_struct *idle, int cpu)
raw_spin_lock_irqsave(&rq->lock, flags);
- __sched_fork(idle);
+ __sched_fork(0, idle);
idle->state = TASK_RUNNING;
idle->se.exec_start = sched_clock();
@@ -4212,7 +4315,7 @@ void init_idle(struct task_struct *idle, int cpu)
raw_spin_unlock_irqrestore(&rq->lock, flags);
/* Set the preempt count _outside_ the spinlocks! */
- task_thread_info(idle)->preempt_count = 0;
+ init_idle_preempt_count(idle, cpu);
/*
* The idle tasks have their own, simple scheduling class:
@@ -4346,6 +4449,53 @@ fail:
return ret;
}
+#ifdef CONFIG_NUMA_BALANCING
+/* Migrate current task p to target_cpu */
+int migrate_task_to(struct task_struct *p, int target_cpu)
+{
+ struct migration_arg arg = { p, target_cpu };
+ int curr_cpu = task_cpu(p);
+
+ if (curr_cpu == target_cpu)
+ return 0;
+
+ if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p)))
+ return -EINVAL;
+
+ /* TODO: This is not properly updating schedstats */
+
+ return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
+}
+
+/*
+ * Requeue a task on a given node and accurately track the number of NUMA
+ * tasks on the runqueues
+ */
+void sched_setnuma(struct task_struct *p, int nid)
+{
+ struct rq *rq;
+ unsigned long flags;
+ bool on_rq, running;
+
+ rq = task_rq_lock(p, &flags);
+ on_rq = p->on_rq;
+ running = task_current(rq, p);
+
+ if (on_rq)
+ dequeue_task(rq, p, 0);
+ if (running)
+ p->sched_class->put_prev_task(rq, p);
+
+ p->numa_preferred_nid = nid;
+
+ if (running)
+ p->sched_class->set_curr_task(rq);
+ if (on_rq)
+ enqueue_task(rq, p, 0);
+ task_rq_unlock(rq, p, &flags);
+}
+#endif
+
/*
* migration_cpu_stop - this will be executed by a highprio stopper thread
* and performs thread migration by bumping thread off CPU then
@@ -5119,6 +5269,7 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu)
DEFINE_PER_CPU(struct sched_domain *, sd_llc);
DEFINE_PER_CPU(int, sd_llc_size);
DEFINE_PER_CPU(int, sd_llc_id);
+DEFINE_PER_CPU(struct sched_domain *, sd_numa);
static void update_top_cache_domain(int cpu)
{
@@ -5135,6 +5286,9 @@ static void update_top_cache_domain(int cpu)
rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
per_cpu(sd_llc_size, cpu) = size;
per_cpu(sd_llc_id, cpu) = id;
+
+ sd = lowest_flag_domain(cpu, SD_NUMA);
+ rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
}
/*
@@ -5654,6 +5808,7 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
| 0*SD_SHARE_PKG_RESOURCES
| 1*SD_SERIALIZE
| 0*SD_PREFER_SIBLING
+ | 1*SD_NUMA
| sd_local_flags(level)
,
.last_balance = jiffies,
@@ -6335,14 +6490,17 @@ void __init sched_init_smp(void)
sched_init_numa();
- get_online_cpus();
+ /*
+ * There's no userspace yet to cause hotplug operations; hence all the
+ * cpu masks are stable and all blatant races in the below code cannot
+ * happen.
+ */
mutex_lock(&sched_domains_mutex);
init_sched_domains(cpu_active_mask);
cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
if (cpumask_empty(non_isolated_cpus))
cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
mutex_unlock(&sched_domains_mutex);
- put_online_cpus();
hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
@@ -6505,6 +6663,7 @@ void __init sched_init(void)
rq->online = 0;
rq->idle_stamp = 0;
rq->avg_idle = 2*sysctl_sched_migration_cost;
+ rq->max_idle_balance_cost = sysctl_sched_migration_cost;
INIT_LIST_HEAD(&rq->cfs_tasks);
@@ -7277,7 +7436,12 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
runtime_enabled = quota != RUNTIME_INF;
runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
- account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
+ /*
+ * If we need to toggle cfs_bandwidth_used, off->on must occur
+ * before making related changes, and on->off must occur afterwards
+ */
+ if (runtime_enabled && !runtime_was_enabled)
+ cfs_bandwidth_usage_inc();
raw_spin_lock_irq(&cfs_b->lock);
cfs_b->period = ns_to_ktime(period);
cfs_b->quota = quota;
@@ -7303,6 +7467,8 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
unthrottle_cfs_rq(cfs_rq);
raw_spin_unlock_irq(&rq->lock);
}
+ if (runtime_was_enabled && !runtime_enabled)
+ cfs_bandwidth_usage_dec();
out_unlock:
mutex_unlock(&cfs_constraints_mutex);
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 196559994f7c..5c34d1817e8f 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -15,6 +15,7 @@
#include <linux/seq_file.h>
#include <linux/kallsyms.h>
#include <linux/utsname.h>
+#include <linux/mempolicy.h>
#include "sched.h"
@@ -137,6 +138,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
#endif
+#ifdef CONFIG_NUMA_BALANCING
+ SEQ_printf(m, " %d", cpu_to_node(task_cpu(p)));
+#endif
#ifdef CONFIG_CGROUP_SCHED
SEQ_printf(m, " %s", task_group_path(task_group(p)));
#endif
@@ -159,7 +163,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
read_lock_irqsave(&tasklist_lock, flags);
do_each_thread(g, p) {
- if (!p->on_rq || task_cpu(p) != rq_cpu)
+ if (task_cpu(p) != rq_cpu)
continue;
print_task(m, rq, p);
@@ -225,6 +229,14 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
atomic_read(&cfs_rq->tg->runnable_avg));
#endif
#endif
+#ifdef CONFIG_CFS_BANDWIDTH
+ SEQ_printf(m, " .%-30s: %d\n", "tg->cfs_bandwidth.timer_active",
+ cfs_rq->tg->cfs_bandwidth.timer_active);
+ SEQ_printf(m, " .%-30s: %d\n", "throttled",
+ cfs_rq->throttled);
+ SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
+ cfs_rq->throttle_count);
+#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
print_cfs_group_stats(m, cpu, cfs_rq->tg);
@@ -345,7 +357,7 @@ static void sched_debug_header(struct seq_file *m)
cpu_clk = local_clock();
local_irq_restore(flags);
- SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n",
+ SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
@@ -488,6 +500,56 @@ static int __init init_sched_debug_procfs(void)
__initcall(init_sched_debug_procfs);
+#define __P(F) \
+ SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
+#define P(F) \
+ SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
+#define __PN(F) \
+ SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
+#define PN(F) \
+ SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
+
+
+static void sched_show_numa(struct task_struct *p, struct seq_file *m)
+{
+#ifdef CONFIG_NUMA_BALANCING
+ struct mempolicy *pol;
+ int node, i;
+
+ if (p->mm)
+ P(mm->numa_scan_seq);
+
+ task_lock(p);
+ pol = p->mempolicy;
+ if (pol && !(pol->flags & MPOL_F_MORON))
+ pol = NULL;
+ mpol_get(pol);
+ task_unlock(p);
+
+ SEQ_printf(m, "numa_migrations, %ld\n", xchg(&p->numa_pages_migrated, 0));
+
+ for_each_online_node(node) {
+ for (i = 0; i < 2; i++) {
+ unsigned long nr_faults = -1;
+ int cpu_current, home_node;
+
+ if (p->numa_faults)
+ nr_faults = p->numa_faults[2*node + i];
+
+ cpu_current = !i ? (task_node(p) == node) :
+ (pol && node_isset(node, pol->v.nodes));
+
+ home_node = (p->numa_preferred_nid == node);
+
+ SEQ_printf(m, "numa_faults, %d, %d, %d, %d, %ld\n",
+ i, node, cpu_current, home_node, nr_faults);
+ }
+ }
+
+ mpol_put(pol);
+#endif
+}
+
void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
unsigned long nr_switches;
@@ -591,6 +653,8 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
SEQ_printf(m, "%-45s:%21Ld\n",
"clock-delta", (long long)(t1-t0));
}
+
+ sched_show_numa(p, m);
}
void proc_sched_set_task(struct task_struct *p)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7c70201fbc61..41c02b6b090e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -681,6 +681,8 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
}
#ifdef CONFIG_SMP
+static unsigned long task_h_load(struct task_struct *p);
+
static inline void __update_task_entity_contrib(struct sched_entity *se);
/* Give new task start runnable values to heavy its load in infant time */
@@ -818,11 +820,12 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
#ifdef CONFIG_NUMA_BALANCING
/*
- * numa task sample period in ms
+ * Approximate time to scan a full NUMA task in ms. The task scan period is
+ * calculated based on the tasks virtual memory size and
+ * numa_balancing_scan_size.
*/
-unsigned int sysctl_numa_balancing_scan_period_min = 100;
-unsigned int sysctl_numa_balancing_scan_period_max = 100*50;
-unsigned int sysctl_numa_balancing_scan_period_reset = 100*600;
+unsigned int sysctl_numa_balancing_scan_period_min = 1000;
+unsigned int sysctl_numa_balancing_scan_period_max = 60000;
/* Portion of address space to scan in MB */
unsigned int sysctl_numa_balancing_scan_size = 256;
@@ -830,41 +833,810 @@ unsigned int sysctl_numa_balancing_scan_size = 256;
/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
unsigned int sysctl_numa_balancing_scan_delay = 1000;
-static void task_numa_placement(struct task_struct *p)
+/*
+ * After skipping a page migration on a shared page, skip N more numa page
+ * migrations unconditionally. This reduces the number of NUMA migrations
+ * in shared memory workloads, and has the effect of pulling tasks towards
+ * where their memory lives, over pulling the memory towards the task.
+ */
+unsigned int sysctl_numa_balancing_migrate_deferred = 16;
+
+static unsigned int task_nr_scan_windows(struct task_struct *p)
+{
+ unsigned long rss = 0;
+ unsigned long nr_scan_pages;
+
+ /*
+ * Calculations based on RSS as non-present and empty pages are skipped
+ * by the PTE scanner and NUMA hinting faults should be trapped based
+ * on resident pages
+ */
+ nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
+ rss = get_mm_rss(p->mm);
+ if (!rss)
+ rss = nr_scan_pages;
+
+ rss = round_up(rss, nr_scan_pages);
+ return rss / nr_scan_pages;
+}
+
+/* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
+#define MAX_SCAN_WINDOW 2560
+
+static unsigned int task_scan_min(struct task_struct *p)
+{
+ unsigned int scan, floor;
+ unsigned int windows = 1;
+
+ if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW)
+ windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size;
+ floor = 1000 / windows;
+
+ scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
+ return max_t(unsigned int, floor, scan);
+}
+
+static unsigned int task_scan_max(struct task_struct *p)
+{
+ unsigned int smin = task_scan_min(p);
+ unsigned int smax;
+
+ /* Watch for min being lower than max due to floor calculations */
+ smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
+ return max(smin, smax);
+}
+
+/*
+ * Once a preferred node is selected the scheduler balancer will prefer moving
+ * a task to that node for sysctl_numa_balancing_settle_count number of PTE
+ * scans. This will give the process the chance to accumulate more faults on
+ * the preferred node but still allow the scheduler to move the task again if
+ * the nodes CPUs are overloaded.
+ */
+unsigned int sysctl_numa_balancing_settle_count __read_mostly = 4;
+
+static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
+{
+ rq->nr_numa_running += (p->numa_preferred_nid != -1);
+ rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
+}
+
+static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
+{
+ rq->nr_numa_running -= (p->numa_preferred_nid != -1);
+ rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
+}
+
+struct numa_group {
+ atomic_t refcount;
+
+ spinlock_t lock; /* nr_tasks, tasks */
+ int nr_tasks;
+ pid_t gid;
+ struct list_head task_list;
+
+ struct rcu_head rcu;
+ unsigned long total_faults;
+ unsigned long faults[0];
+};
+
+pid_t task_numa_group_id(struct task_struct *p)
+{
+ return p->numa_group ? p->numa_group->gid : 0;
+}
+
+static inline int task_faults_idx(int nid, int priv)
+{
+ return 2 * nid + priv;
+}
+
+static inline unsigned long task_faults(struct task_struct *p, int nid)
+{
+ if (!p->numa_faults)
+ return 0;
+
+ return p->numa_faults[task_faults_idx(nid, 0)] +
+ p->numa_faults[task_faults_idx(nid, 1)];
+}
+
+static inline unsigned long group_faults(struct task_struct *p, int nid)
+{
+ if (!p->numa_group)
+ return 0;
+
+ return p->numa_group->faults[2*nid] + p->numa_group->faults[2*nid+1];
+}
+
+/*
+ * These return the fraction of accesses done by a particular task, or
+ * task group, on a particular numa node. The group weight is given a
+ * larger multiplier, in order to group tasks together that are almost
+ * evenly spread out between numa nodes.
+ */
+static inline unsigned long task_weight(struct task_struct *p, int nid)
+{
+ unsigned long total_faults;
+
+ if (!p->numa_faults)
+ return 0;
+
+ total_faults = p->total_numa_faults;
+
+ if (!total_faults)
+ return 0;
+
+ return 1000 * task_faults(p, nid) / total_faults;
+}
+
+static inline unsigned long group_weight(struct task_struct *p, int nid)
{
- int seq;
+ if (!p->numa_group || !p->numa_group->total_faults)
+ return 0;
+
+ return 1000 * group_faults(p, nid) / p->numa_group->total_faults;
+}
+
+static unsigned long weighted_cpuload(const int cpu);
+static unsigned long source_load(int cpu, int type);
+static unsigned long target_load(int cpu, int type);
+static unsigned long power_of(int cpu);
+static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
+
+/* Cached statistics for all CPUs within a node */
+struct numa_stats {
+ unsigned long nr_running;
+ unsigned long load;
+
+ /* Total compute capacity of CPUs on a node */
+ unsigned long power;
+
+ /* Approximate capacity in terms of runnable tasks on a node */
+ unsigned long capacity;
+ int has_capacity;
+};
+
+/*
+ * XXX borrowed from update_sg_lb_stats
+ */
+static void update_numa_stats(struct numa_stats *ns, int nid)
+{
+ int cpu;
+
+ memset(ns, 0, sizeof(*ns));
+ for_each_cpu(cpu, cpumask_of_node(nid)) {
+ struct rq *rq = cpu_rq(cpu);
+
+ ns->nr_running += rq->nr_running;
+ ns->load += weighted_cpuload(cpu);
+ ns->power += power_of(cpu);
+ }
+
+ ns->load = (ns->load * SCHED_POWER_SCALE) / ns->power;
+ ns->capacity = DIV_ROUND_CLOSEST(ns->power, SCHED_POWER_SCALE);
+ ns->has_capacity = (ns->nr_running < ns->capacity);
+}
+
+struct task_numa_env {
+ struct task_struct *p;
+
+ int src_cpu, src_nid;
+ int dst_cpu, dst_nid;
+
+ struct numa_stats src_stats, dst_stats;
+
+ int imbalance_pct, idx;
+
+ struct task_struct *best_task;
+ long best_imp;
+ int best_cpu;
+};
+
+static void task_numa_assign(struct task_numa_env *env,
+ struct task_struct *p, long imp)
+{
+ if (env->best_task)
+ put_task_struct(env->best_task);
+ if (p)
+ get_task_struct(p);
+
+ env->best_task = p;
+ env->best_imp = imp;
+ env->best_cpu = env->dst_cpu;
+}
+
+/*
+ * This checks if the overall compute and NUMA accesses of the system would
+ * be improved if the source tasks was migrated to the target dst_cpu taking
+ * into account that it might be best if task running on the dst_cpu should
+ * be exchanged with the source task
+ */
+static void task_numa_compare(struct task_numa_env *env,
+ long taskimp, long groupimp)
+{
+ struct rq *src_rq = cpu_rq(env->src_cpu);
+ struct rq *dst_rq = cpu_rq(env->dst_cpu);
+ struct task_struct *cur;
+ long dst_load, src_load;
+ long load;
+ long imp = (groupimp > 0) ? groupimp : taskimp;
+
+ rcu_read_lock();
+ cur = ACCESS_ONCE(dst_rq->curr);
+ if (cur->pid == 0) /* idle */
+ cur = NULL;
+
+ /*
+ * "imp" is the fault differential for the source task between the
+ * source and destination node. Calculate the total differential for
+ * the source task and potential destination task. The more negative
+ * the value is, the more rmeote accesses that would be expected to
+ * be incurred if the tasks were swapped.
+ */
+ if (cur) {
+ /* Skip this swap candidate if cannot move to the source cpu */
+ if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
+ goto unlock;
+
+ /*
+ * If dst and source tasks are in the same NUMA group, or not
+ * in any group then look only at task weights.
+ */
+ if (cur->numa_group == env->p->numa_group) {
+ imp = taskimp + task_weight(cur, env->src_nid) -
+ task_weight(cur, env->dst_nid);
+ /*
+ * Add some hysteresis to prevent swapping the
+ * tasks within a group over tiny differences.
+ */
+ if (cur->numa_group)
+ imp -= imp/16;
+ } else {
+ /*
+ * Compare the group weights. If a task is all by
+ * itself (not part of a group), use the task weight
+ * instead.
+ */
+ if (env->p->numa_group)
+ imp = groupimp;
+ else
+ imp = taskimp;
+
+ if (cur->numa_group)
+ imp += group_weight(cur, env->src_nid) -
+ group_weight(cur, env->dst_nid);
+ else
+ imp += task_weight(cur, env->src_nid) -
+ task_weight(cur, env->dst_nid);
+ }
+ }
+
+ if (imp < env->best_imp)
+ goto unlock;
+
+ if (!cur) {
+ /* Is there capacity at our destination? */
+ if (env->src_stats.has_capacity &&
+ !env->dst_stats.has_capacity)
+ goto unlock;
+
+ goto balance;
+ }
+
+ /* Balance doesn't matter much if we're running a task per cpu */
+ if (src_rq->nr_running == 1 && dst_rq->nr_running == 1)
+ goto assign;
+
+ /*
+ * In the overloaded case, try and keep the load balanced.
+ */
+balance:
+ dst_load = env->dst_stats.load;
+ src_load = env->src_stats.load;
+
+ /* XXX missing power terms */
+ load = task_h_load(env->p);
+ dst_load += load;
+ src_load -= load;
+
+ if (cur) {
+ load = task_h_load(cur);
+ dst_load -= load;
+ src_load += load;
+ }
+
+ /* make src_load the smaller */
+ if (dst_load < src_load)
+ swap(dst_load, src_load);
+
+ if (src_load * env->imbalance_pct < dst_load * 100)
+ goto unlock;
+
+assign:
+ task_numa_assign(env, cur, imp);
+unlock:
+ rcu_read_unlock();
+}
+
+static void task_numa_find_cpu(struct task_numa_env *env,
+ long taskimp, long groupimp)
+{
+ int cpu;
+
+ for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
+ /* Skip this CPU if the source task cannot migrate */
+ if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
+ continue;
+
+ env->dst_cpu = cpu;
+ task_numa_compare(env, taskimp, groupimp);
+ }
+}
+
+static int task_numa_migrate(struct task_struct *p)
+{
+ struct task_numa_env env = {
+ .p = p,
+
+ .src_cpu = task_cpu(p),
+ .src_nid = task_node(p),
+
+ .imbalance_pct = 112,
+
+ .best_task = NULL,
+ .best_imp = 0,
+ .best_cpu = -1
+ };
+ struct sched_domain *sd;
+ unsigned long taskweight, groupweight;
+ int nid, ret;
+ long taskimp, groupimp;
+
+ /*
+ * Pick the lowest SD_NUMA domain, as that would have the smallest
+ * imbalance and would be the first to start moving tasks about.
+ *
+ * And we want to avoid any moving of tasks about, as that would create
+ * random movement of tasks -- counter the numa conditions we're trying
+ * to satisfy here.
+ */
+ rcu_read_lock();
+ sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
+ env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
+ rcu_read_unlock();
+
+ taskweight = task_weight(p, env.src_nid);
+ groupweight = group_weight(p, env.src_nid);
+ update_numa_stats(&env.src_stats, env.src_nid);
+ env.dst_nid = p->numa_preferred_nid;
+ taskimp = task_weight(p, env.dst_nid) - taskweight;
+ groupimp = group_weight(p, env.dst_nid) - groupweight;
+ update_numa_stats(&env.dst_stats, env.dst_nid);
+
+ /* If the preferred nid has capacity, try to use it. */
+ if (env.dst_stats.has_capacity)
+ task_numa_find_cpu(&env, taskimp, groupimp);
+
+ /* No space available on the preferred nid. Look elsewhere. */
+ if (env.best_cpu == -1) {
+ for_each_online_node(nid) {
+ if (nid == env.src_nid || nid == p->numa_preferred_nid)
+ continue;
+
+ /* Only consider nodes where both task and groups benefit */
+ taskimp = task_weight(p, nid) - taskweight;
+ groupimp = group_weight(p, nid) - groupweight;
+ if (taskimp < 0 && groupimp < 0)
+ continue;
+
+ env.dst_nid = nid;
+ update_numa_stats(&env.dst_stats, env.dst_nid);
+ task_numa_find_cpu(&env, taskimp, groupimp);
+ }
+ }
+
+ /* No better CPU than the current one was found. */
+ if (env.best_cpu == -1)
+ return -EAGAIN;
+
+ sched_setnuma(p, env.dst_nid);
+
+ /*
+ * Reset the scan period if the task is being rescheduled on an
+ * alternative node to recheck if the tasks is now properly placed.
+ */
+ p->numa_scan_period = task_scan_min(p);
+
+ if (env.best_task == NULL) {
+ int ret = migrate_task_to(p, env.best_cpu);
+ return ret;
+ }
+
+ ret = migrate_swap(p, env.best_task);
+ put_task_struct(env.best_task);
+ return ret;
+}
+
+/* Attempt to migrate a task to a CPU on the preferred node. */
+static void numa_migrate_preferred(struct task_struct *p)
+{
+ /* This task has no NUMA fault statistics yet */
+ if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
+ return;
+
+ /* Periodically retry migrating the task to the preferred node */
+ p->numa_migrate_retry = jiffies + HZ;
+
+ /* Success if task is already running on preferred CPU */
+ if (cpu_to_node(task_cpu(p)) == p->numa_preferred_nid)
+ return;
+
+ /* Otherwise, try migrate to a CPU on the preferred node */
+ task_numa_migrate(p);
+}
+
+/*
+ * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
+ * increments. The more local the fault statistics are, the higher the scan
+ * period will be for the next scan window. If local/remote ratio is below
+ * NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS) the
+ * scan period will decrease
+ */
+#define NUMA_PERIOD_SLOTS 10
+#define NUMA_PERIOD_THRESHOLD 3
+
+/*
+ * Increase the scan period (slow down scanning) if the majority of
+ * our memory is already on our local node, or if the majority of
+ * the page accesses are shared with other processes.
+ * Otherwise, decrease the scan period.
+ */
+static void update_task_scan_period(struct task_struct *p,
+ unsigned long shared, unsigned long private)
+{
+ unsigned int period_slot;
+ int ratio;
+ int diff;
+
+ unsigned long remote = p->numa_faults_locality[0];
+ unsigned long local = p->numa_faults_locality[1];
+
+ /*
+ * If there were no record hinting faults then either the task is
+ * completely idle or all activity is areas that are not of interest
+ * to automatic numa balancing. Scan slower
+ */
+ if (local + shared == 0) {
+ p->numa_scan_period = min(p->numa_scan_period_max,
+ p->numa_scan_period << 1);
+
+ p->mm->numa_next_scan = jiffies +
+ msecs_to_jiffies(p->numa_scan_period);
- if (!p->mm) /* for example, ksmd faulting in a user's mm */
return;
+ }
+
+ /*
+ * Prepare to scale scan period relative to the current period.
+ * == NUMA_PERIOD_THRESHOLD scan period stays the same
+ * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
+ * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
+ */
+ period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
+ ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
+ if (ratio >= NUMA_PERIOD_THRESHOLD) {
+ int slot = ratio - NUMA_PERIOD_THRESHOLD;
+ if (!slot)
+ slot = 1;
+ diff = slot * period_slot;
+ } else {
+ diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
+
+ /*
+ * Scale scan rate increases based on sharing. There is an
+ * inverse relationship between the degree of sharing and
+ * the adjustment made to the scanning period. Broadly
+ * speaking the intent is that there is little point
+ * scanning faster if shared accesses dominate as it may
+ * simply bounce migrations uselessly
+ */
+ period_slot = DIV_ROUND_UP(diff, NUMA_PERIOD_SLOTS);
+ ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared));
+ diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
+ }
+
+ p->numa_scan_period = clamp(p->numa_scan_period + diff,
+ task_scan_min(p), task_scan_max(p));
+ memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
+}
+
+static void task_numa_placement(struct task_struct *p)
+{
+ int seq, nid, max_nid = -1, max_group_nid = -1;
+ unsigned long max_faults = 0, max_group_faults = 0;
+ unsigned long fault_types[2] = { 0, 0 };
+ spinlock_t *group_lock = NULL;
+
seq = ACCESS_ONCE(p->mm->numa_scan_seq);
if (p->numa_scan_seq == seq)
return;
p->numa_scan_seq = seq;
+ p->numa_scan_period_max = task_scan_max(p);
+
+ /* If the task is part of a group prevent parallel updates to group stats */
+ if (p->numa_group) {
+ group_lock = &p->numa_group->lock;
+ spin_lock(group_lock);
+ }
+
+ /* Find the node with the highest number of faults */
+ for_each_online_node(nid) {
+ unsigned long faults = 0, group_faults = 0;
+ int priv, i;
+
+ for (priv = 0; priv < 2; priv++) {
+ long diff;
+
+ i = task_faults_idx(nid, priv);
+ diff = -p->numa_faults[i];
+
+ /* Decay existing window, copy faults since last scan */
+ p->numa_faults[i] >>= 1;
+ p->numa_faults[i] += p->numa_faults_buffer[i];
+ fault_types[priv] += p->numa_faults_buffer[i];
+ p->numa_faults_buffer[i] = 0;
+
+ faults += p->numa_faults[i];
+ diff += p->numa_faults[i];
+ p->total_numa_faults += diff;
+ if (p->numa_group) {
+ /* safe because we can only change our own group */
+ p->numa_group->faults[i] += diff;
+ p->numa_group->total_faults += diff;
+ group_faults += p->numa_group->faults[i];
+ }
+ }
+
+ if (faults > max_faults) {
+ max_faults = faults;
+ max_nid = nid;
+ }
+
+ if (group_faults > max_group_faults) {
+ max_group_faults = group_faults;
+ max_group_nid = nid;
+ }
+ }
+
+ update_task_scan_period(p, fault_types[0], fault_types[1]);
+
+ if (p->numa_group) {
+ /*
+ * If the preferred task and group nids are different,
+ * iterate over the nodes again to find the best place.
+ */
+ if (max_nid != max_group_nid) {
+ unsigned long weight, max_weight = 0;
+
+ for_each_online_node(nid) {
+ weight = task_weight(p, nid) + group_weight(p, nid);
+ if (weight > max_weight) {
+ max_weight = weight;
+ max_nid = nid;
+ }
+ }
+ }
+
+ spin_unlock(group_lock);
+ }
- /* FIXME: Scheduling placement policy hints go here */
+ /* Preferred node as the node with the most faults */
+ if (max_faults && max_nid != p->numa_preferred_nid) {
+ /* Update the preferred nid and migrate task if possible */
+ sched_setnuma(p, max_nid);
+ numa_migrate_preferred(p);
+ }
+}
+
+static inline int get_numa_group(struct numa_group *grp)
+{
+ return atomic_inc_not_zero(&grp->refcount);
+}
+
+static inline void put_numa_group(struct numa_group *grp)
+{
+ if (atomic_dec_and_test(&grp->refcount))
+ kfree_rcu(grp, rcu);
+}
+
+static void task_numa_group(struct task_struct *p, int cpupid, int flags,
+ int *priv)
+{
+ struct numa_group *grp, *my_grp;
+ struct task_struct *tsk;
+ bool join = false;
+ int cpu = cpupid_to_cpu(cpupid);
+ int i;
+
+ if (unlikely(!p->numa_group)) {
+ unsigned int size = sizeof(struct numa_group) +
+ 2*nr_node_ids*sizeof(unsigned long);
+
+ grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+ if (!grp)
+ return;
+
+ atomic_set(&grp->refcount, 1);
+ spin_lock_init(&grp->lock);
+ INIT_LIST_HEAD(&grp->task_list);
+ grp->gid = p->pid;
+
+ for (i = 0; i < 2*nr_node_ids; i++)
+ grp->faults[i] = p->numa_faults[i];
+
+ grp->total_faults = p->total_numa_faults;
+
+ list_add(&p->numa_entry, &grp->task_list);
+ grp->nr_tasks++;
+ rcu_assign_pointer(p->numa_group, grp);
+ }
+
+ rcu_read_lock();
+ tsk = ACCESS_ONCE(cpu_rq(cpu)->curr);
+
+ if (!cpupid_match_pid(tsk, cpupid))
+ goto no_join;
+
+ grp = rcu_dereference(tsk->numa_group);
+ if (!grp)
+ goto no_join;
+
+ my_grp = p->numa_group;
+ if (grp == my_grp)
+ goto no_join;
+
+ /*
+ * Only join the other group if its bigger; if we're the bigger group,
+ * the other task will join us.
+ */
+ if (my_grp->nr_tasks > grp->nr_tasks)
+ goto no_join;
+
+ /*
+ * Tie-break on the grp address.
+ */
+ if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
+ goto no_join;
+
+ /* Always join threads in the same process. */
+ if (tsk->mm == current->mm)
+ join = true;
+
+ /* Simple filter to avoid false positives due to PID collisions */
+ if (flags & TNF_SHARED)
+ join = true;
+
+ /* Update priv based on whether false sharing was detected */
+ *priv = !join;
+
+ if (join && !get_numa_group(grp))
+ goto no_join;
+
+ rcu_read_unlock();
+
+ if (!join)
+ return;
+
+ double_lock(&my_grp->lock, &grp->lock);
+
+ for (i = 0; i < 2*nr_node_ids; i++) {
+ my_grp->faults[i] -= p->numa_faults[i];
+ grp->faults[i] += p->numa_faults[i];
+ }
+ my_grp->total_faults -= p->total_numa_faults;
+ grp->total_faults += p->total_numa_faults;
+
+ list_move(&p->numa_entry, &grp->task_list);
+ my_grp->nr_tasks--;
+ grp->nr_tasks++;
+
+ spin_unlock(&my_grp->lock);
+ spin_unlock(&grp->lock);
+
+ rcu_assign_pointer(p->numa_group, grp);
+
+ put_numa_group(my_grp);
+ return;
+
+no_join:
+ rcu_read_unlock();
+ return;
+}
+
+void task_numa_free(struct task_struct *p)
+{
+ struct numa_group *grp = p->numa_group;
+ int i;
+ void *numa_faults = p->numa_faults;
+
+ if (grp) {
+ spin_lock(&grp->lock);
+ for (i = 0; i < 2*nr_node_ids; i++)
+ grp->faults[i] -= p->numa_faults[i];
+ grp->total_faults -= p->total_numa_faults;
+
+ list_del(&p->numa_entry);
+ grp->nr_tasks--;
+ spin_unlock(&grp->lock);
+ rcu_assign_pointer(p->numa_group, NULL);
+ put_numa_group(grp);
+ }
+
+ p->numa_faults = NULL;
+ p->numa_faults_buffer = NULL;
+ kfree(numa_faults);
}
/*
* Got a PROT_NONE fault for a page on @node.
*/
-void task_numa_fault(int node, int pages, bool migrated)
+void task_numa_fault(int last_cpupid, int node, int pages, int flags)
{
struct task_struct *p = current;
+ bool migrated = flags & TNF_MIGRATED;
+ int priv;
if (!numabalancing_enabled)
return;
- /* FIXME: Allocate task-specific structure for placement policy here */
+ /* for example, ksmd faulting in a user's mm */
+ if (!p->mm)
+ return;
+
+ /* Do not worry about placement if exiting */
+ if (p->state == TASK_DEAD)
+ return;
+
+ /* Allocate buffer to track faults on a per-node basis */
+ if (unlikely(!p->numa_faults)) {
+ int size = sizeof(*p->numa_faults) * 2 * nr_node_ids;
+
+ /* numa_faults and numa_faults_buffer share the allocation */
+ p->numa_faults = kzalloc(size * 2, GFP_KERNEL|__GFP_NOWARN);
+ if (!p->numa_faults)
+ return;
+
+ BUG_ON(p->numa_faults_buffer);
+ p->numa_faults_buffer = p->numa_faults + (2 * nr_node_ids);
+ p->total_numa_faults = 0;
+ memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
+ }
/*
- * If pages are properly placed (did not migrate) then scan slower.
- * This is reset periodically in case of phase changes
+ * First accesses are treated as private, otherwise consider accesses
+ * to be private if the accessing pid has not changed
*/
- if (!migrated)
- p->numa_scan_period = min(sysctl_numa_balancing_scan_period_max,
- p->numa_scan_period + jiffies_to_msecs(10));
+ if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
+ priv = 1;
+ } else {
+ priv = cpupid_match_pid(p, last_cpupid);
+ if (!priv && !(flags & TNF_NO_GROUP))
+ task_numa_group(p, last_cpupid, flags, &priv);
+ }
task_numa_placement(p);
+
+ /*
+ * Retry task to preferred node migration periodically, in case it
+ * case it previously failed, or the scheduler moved us.
+ */
+ if (time_after(jiffies, p->numa_migrate_retry))
+ numa_migrate_preferred(p);
+
+ if (migrated)
+ p->numa_pages_migrated += pages;
+
+ p->numa_faults_buffer[task_faults_idx(node, priv)] += pages;
+ p->numa_faults_locality[!!(flags & TNF_FAULT_LOCAL)] += pages;
}
static void reset_ptenuma_scan(struct task_struct *p)
@@ -884,6 +1656,7 @@ void task_numa_work(struct callback_head *work)
struct mm_struct *mm = p->mm;
struct vm_area_struct *vma;
unsigned long start, end;
+ unsigned long nr_pte_updates = 0;
long pages;
WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
@@ -900,35 +1673,9 @@ void task_numa_work(struct callback_head *work)
if (p->flags & PF_EXITING)
return;
- /*
- * We do not care about task placement until a task runs on a node
- * other than the first one used by the address space. This is
- * largely because migrations are driven by what CPU the task
- * is running on. If it's never scheduled on another node, it'll
- * not migrate so why bother trapping the fault.
- */
- if (mm->first_nid == NUMA_PTE_SCAN_INIT)
- mm->first_nid = numa_node_id();
- if (mm->first_nid != NUMA_PTE_SCAN_ACTIVE) {
- /* Are we running on a new node yet? */
- if (numa_node_id() == mm->first_nid &&
- !sched_feat_numa(NUMA_FORCE))
- return;
-
- mm->first_nid = NUMA_PTE_SCAN_ACTIVE;
- }
-
- /*
- * Reset the scan period if enough time has gone by. Objective is that
- * scanning will be reduced if pages are properly placed. As tasks
- * can enter different phases this needs to be re-examined. Lacking
- * proper tracking of reference behaviour, this blunt hammer is used.
- */
- migrate = mm->numa_next_reset;
- if (time_after(now, migrate)) {
- p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
- next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
- xchg(&mm->numa_next_reset, next_scan);
+ if (!mm->numa_next_scan) {
+ mm->numa_next_scan = now +
+ msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
}
/*
@@ -938,20 +1685,20 @@ void task_numa_work(struct callback_head *work)
if (time_before(now, migrate))
return;
- if (p->numa_scan_period == 0)
- p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
+ if (p->numa_scan_period == 0) {
+ p->numa_scan_period_max = task_scan_max(p);
+ p->numa_scan_period = task_scan_min(p);
+ }
next_scan = now + msecs_to_jiffies(p->numa_scan_period);
if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
return;
/*
- * Do not set pte_numa if the current running node is rate-limited.
- * This loses statistics on the fault but if we are unwilling to
- * migrate to this node, it is less likely we can do useful work
+ * Delay this task enough that another task of this mm will likely win
+ * the next time around.
*/
- if (migrate_ratelimited(numa_node_id()))
- return;
+ p->node_stamp += 2 * TICK_NSEC;
start = mm->numa_scan_offset;
pages = sysctl_numa_balancing_scan_size;
@@ -967,18 +1714,32 @@ void task_numa_work(struct callback_head *work)
vma = mm->mmap;
}
for (; vma; vma = vma->vm_next) {
- if (!vma_migratable(vma))
+ if (!vma_migratable(vma) || !vma_policy_mof(p, vma))
continue;
- /* Skip small VMAs. They are not likely to be of relevance */
- if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
+ /*
+ * Shared library pages mapped by multiple processes are not
+ * migrated as it is expected they are cache replicated. Avoid
+ * hinting faults in read-only file-backed mappings or the vdso
+ * as migrating the pages will be of marginal benefit.
+ */
+ if (!vma->vm_mm ||
+ (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
continue;
do {
start = max(start, vma->vm_start);
end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
end = min(end, vma->vm_end);
- pages -= change_prot_numa(vma, start, end);
+ nr_pte_updates += change_prot_numa(vma, start, end);
+
+ /*
+ * Scan sysctl_numa_balancing_scan_size but ensure that
+ * at least one PTE is updated so that unused virtual
+ * address space is quickly skipped.
+ */
+ if (nr_pte_updates)
+ pages -= (end - start) >> PAGE_SHIFT;
start = end;
if (pages <= 0)
@@ -988,10 +1749,10 @@ void task_numa_work(struct callback_head *work)
out:
/*
- * It is possible to reach the end of the VMA list but the last few VMAs are
- * not guaranteed to the vma_migratable. If they are not, we would find the
- * !migratable VMA on the next scan but not reset the scanner to the start
- * so check it now.
+ * It is possible to reach the end of the VMA list but the last few
+ * VMAs are not guaranteed to the vma_migratable. If they are not, we
+ * would find the !migratable VMA on the next scan but not reset the
+ * scanner to the start so check it now.
*/
if (vma)
mm->numa_scan_offset = start;
@@ -1025,8 +1786,8 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr)
if (now - curr->node_stamp > period) {
if (!curr->node_stamp)
- curr->numa_scan_period = sysctl_numa_balancing_scan_period_min;
- curr->node_stamp = now;
+ curr->numa_scan_period = task_scan_min(curr);
+ curr->node_stamp += period;
if (!time_before(jiffies, curr->mm->numa_next_scan)) {
init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
@@ -1038,6 +1799,14 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr)
static void task_tick_numa(struct rq *rq, struct task_struct *curr)
{
}
+
+static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
+{
+}
+
+static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
+{
+}
#endif /* CONFIG_NUMA_BALANCING */
static void
@@ -1047,8 +1816,12 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
if (!parent_entity(se))
update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
#ifdef CONFIG_SMP
- if (entity_is_task(se))
- list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
+ if (entity_is_task(se)) {
+ struct rq *rq = rq_of(cfs_rq);
+
+ account_numa_enqueue(rq, task_of(se));
+ list_add(&se->group_node, &rq->cfs_tasks);
+ }
#endif
cfs_rq->nr_running++;
}
@@ -1059,8 +1832,10 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
update_load_sub(&cfs_rq->load, se->load.weight);
if (!parent_entity(se))
update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
- if (entity_is_task(se))
+ if (entity_is_task(se)) {
+ account_numa_dequeue(rq_of(cfs_rq), task_of(se));
list_del_init(&se->group_node);
+ }
cfs_rq->nr_running--;
}
@@ -2070,13 +2845,14 @@ static inline bool cfs_bandwidth_used(void)
return static_key_false(&__cfs_bandwidth_used);
}
-void account_cfs_bandwidth_used(int enabled, int was_enabled)
+void cfs_bandwidth_usage_inc(void)
{
- /* only need to count groups transitioning between enabled/!enabled */
- if (enabled && !was_enabled)
- static_key_slow_inc(&__cfs_bandwidth_used);
- else if (!enabled && was_enabled)
- static_key_slow_dec(&__cfs_bandwidth_used);
+ static_key_slow_inc(&__cfs_bandwidth_used);
+}
+
+void cfs_bandwidth_usage_dec(void)
+{
+ static_key_slow_dec(&__cfs_bandwidth_used);
}
#else /* HAVE_JUMP_LABEL */
static bool cfs_bandwidth_used(void)
@@ -2084,7 +2860,8 @@ static bool cfs_bandwidth_used(void)
return true;
}
-void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
+void cfs_bandwidth_usage_inc(void) {}
+void cfs_bandwidth_usage_dec(void) {}
#endif /* HAVE_JUMP_LABEL */
/*
@@ -2335,6 +3112,8 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
cfs_rq->throttled_clock = rq_clock(rq);
raw_spin_lock(&cfs_b->lock);
list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
+ if (!cfs_b->timer_active)
+ __start_cfs_bandwidth(cfs_b);
raw_spin_unlock(&cfs_b->lock);
}
@@ -2448,6 +3227,13 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
if (idle)
goto out_unlock;
+ /*
+ * if we have relooped after returning idle once, we need to update our
+ * status as actually running, so that other cpus doing
+ * __start_cfs_bandwidth will stop trying to cancel us.
+ */
+ cfs_b->timer_active = 1;
+
__refill_cfs_bandwidth_runtime(cfs_b);
if (!throttled) {
@@ -2508,7 +3294,13 @@ static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
/* how long we wait to gather additional slack before distributing */
static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
-/* are we near the end of the current quota period? */
+/*
+ * Are we near the end of the current quota period?
+ *
+ * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
+ * hrtimer base being cleared by __hrtimer_start_range_ns. In the case of
+ * migrate_hrtimers, base is never cleared, so we are fine.
+ */
static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
{
struct hrtimer *refresh_timer = &cfs_b->period_timer;
@@ -2584,10 +3376,12 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
u64 expires;
/* confirm we're still not at a refresh boundary */
- if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
+ raw_spin_lock(&cfs_b->lock);
+ if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
+ raw_spin_unlock(&cfs_b->lock);
return;
+ }
- raw_spin_lock(&cfs_b->lock);
if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
runtime = cfs_b->runtime;
cfs_b->runtime = 0;
@@ -2708,11 +3502,11 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
* (timer_active==0 becomes visible before the hrtimer call-back
* terminates). In either case we ensure that it's re-programmed
*/
- while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
+ while (unlikely(hrtimer_active(&cfs_b->period_timer)) &&
+ hrtimer_try_to_cancel(&cfs_b->period_timer) < 0) {
+ /* bounce the lock to allow do_sched_cfs_period_timer to run */
raw_spin_unlock(&cfs_b->lock);
- /* ensure cfs_b->lock is available while we wait */
- hrtimer_cancel(&cfs_b->period_timer);
-
+ cpu_relax();
raw_spin_lock(&cfs_b->lock);
/* if someone else restarted the timer then we're done */
if (cfs_b->timer_active)
@@ -3113,7 +3907,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
{
struct sched_entity *se = tg->se[cpu];
- if (!tg->parent) /* the trivial, non-cgroup case */
+ if (!tg->parent || !wl) /* the trivial, non-cgroup case */
return wl;
for_each_sched_entity(se) {
@@ -3166,8 +3960,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
}
#else
-static inline unsigned long effective_load(struct task_group *tg, int cpu,
- unsigned long wl, unsigned long wg)
+static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
{
return wl;
}
@@ -3420,11 +4213,10 @@ done:
* preempt must be disabled.
*/
static int
-select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
+select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
{
struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
int cpu = smp_processor_id();
- int prev_cpu = task_cpu(p);
int new_cpu = cpu;
int want_affine = 0;
int sync = wake_flags & WF_SYNC;
@@ -3904,9 +4696,12 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp
static unsigned long __read_mostly max_load_balance_interval = HZ/10;
+enum fbq_type { regular, remote, all };
+
#define LBF_ALL_PINNED 0x01
#define LBF_NEED_BREAK 0x02
-#define LBF_SOME_PINNED 0x04
+#define LBF_DST_PINNED 0x04
+#define LBF_SOME_PINNED 0x08
struct lb_env {
struct sched_domain *sd;
@@ -3929,6 +4724,8 @@ struct lb_env {
unsigned int loop;
unsigned int loop_break;
unsigned int loop_max;
+
+ enum fbq_type fbq_type;
};
/*
@@ -3975,6 +4772,78 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
return delta < (s64)sysctl_sched_migration_cost;
}
+#ifdef CONFIG_NUMA_BALANCING
+/* Returns true if the destination node has incurred more faults */
+static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
+{
+ int src_nid, dst_nid;
+
+ if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults ||
+ !(env->sd->flags & SD_NUMA)) {
+ return false;
+ }
+
+ src_nid = cpu_to_node(env->src_cpu);
+ dst_nid = cpu_to_node(env->dst_cpu);
+
+ if (src_nid == dst_nid)
+ return false;
+
+ /* Always encourage migration to the preferred node. */
+ if (dst_nid == p->numa_preferred_nid)
+ return true;
+
+ /* If both task and group weight improve, this move is a winner. */
+ if (task_weight(p, dst_nid) > task_weight(p, src_nid) &&
+ group_weight(p, dst_nid) > group_weight(p, src_nid))
+ return true;
+
+ return false;
+}
+
+
+static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
+{
+ int src_nid, dst_nid;
+
+ if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
+ return false;
+
+ if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
+ return false;
+
+ src_nid = cpu_to_node(env->src_cpu);
+ dst_nid = cpu_to_node(env->dst_cpu);
+
+ if (src_nid == dst_nid)
+ return false;
+
+ /* Migrating away from the preferred node is always bad. */
+ if (src_nid == p->numa_preferred_nid)
+ return true;
+
+ /* If either task or group weight get worse, don't do it. */
+ if (task_weight(p, dst_nid) < task_weight(p, src_nid) ||
+ group_weight(p, dst_nid) < group_weight(p, src_nid))
+ return true;
+
+ return false;
+}
+
+#else
+static inline bool migrate_improves_locality(struct task_struct *p,
+ struct lb_env *env)
+{
+ return false;
+}
+
+static inline bool migrate_degrades_locality(struct task_struct *p,
+ struct lb_env *env)
+{
+ return false;
+}
+#endif
+
/*
* can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
*/
@@ -3997,6 +4866,8 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
+ env->flags |= LBF_SOME_PINNED;
+
/*
* Remember if this task can be migrated to any other cpu in
* our sched_group. We may want to revisit it if we couldn't
@@ -4005,13 +4876,13 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
* Also avoid computing new_dst_cpu if we have already computed
* one in current iteration.
*/
- if (!env->dst_grpmask || (env->flags & LBF_SOME_PINNED))
+ if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
return 0;
/* Prevent to re-select dst_cpu via env's cpus */
for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
- env->flags |= LBF_SOME_PINNED;
+ env->flags |= LBF_DST_PINNED;
env->new_dst_cpu = cpu;
break;
}
@@ -4030,11 +4901,24 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
/*
* Aggressive migration if:
- * 1) task is cache cold, or
- * 2) too many balance attempts have failed.
+ * 1) destination numa is preferred
+ * 2) task is cache cold, or
+ * 3) too many balance attempts have failed.
*/
-
tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
+ if (!tsk_cache_hot)
+ tsk_cache_hot = migrate_degrades_locality(p, env);
+
+ if (migrate_improves_locality(p, env)) {
+#ifdef CONFIG_SCHEDSTATS
+ if (tsk_cache_hot) {
+ schedstat_inc(env->sd, lb_hot_gained[env->idle]);
+ schedstat_inc(p, se.statistics.nr_forced_migrations);
+ }
+#endif
+ return 1;
+ }
+
if (!tsk_cache_hot ||
env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
@@ -4077,8 +4961,6 @@ static int move_one_task(struct lb_env *env)
return 0;
}
-static unsigned long task_h_load(struct task_struct *p);
-
static const unsigned int sched_nr_migrate_break = 32;
/*
@@ -4291,6 +5173,10 @@ struct sg_lb_stats {
unsigned int group_weight;
int group_imb; /* Is there an imbalance in the group ? */
int group_has_capacity; /* Is there extra capacity in the group? */
+#ifdef CONFIG_NUMA_BALANCING
+ unsigned int nr_numa_running;
+ unsigned int nr_preferred_running;
+#endif
};
/*
@@ -4330,7 +5216,7 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
/**
* get_sd_load_idx - Obtain the load index for a given sched domain.
* @sd: The sched_domain whose load_idx is to be obtained.
- * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
+ * @idle: The idle status of the CPU for whose sd load_idx is obtained.
*
* Return: The load index.
*/
@@ -4447,7 +5333,7 @@ void update_group_power(struct sched_domain *sd, int cpu)
{
struct sched_domain *child = sd->child;
struct sched_group *group, *sdg = sd->groups;
- unsigned long power;
+ unsigned long power, power_orig;
unsigned long interval;
interval = msecs_to_jiffies(sd->balance_interval);
@@ -4459,7 +5345,7 @@ void update_group_power(struct sched_domain *sd, int cpu)
return;
}
- power = 0;
+ power_orig = power = 0;
if (child->flags & SD_OVERLAP) {
/*
@@ -4467,8 +5353,12 @@ void update_group_power(struct sched_domain *sd, int cpu)
* span the current group.
*/
- for_each_cpu(cpu, sched_group_cpus(sdg))
- power += power_of(cpu);
+ for_each_cpu(cpu, sched_group_cpus(sdg)) {
+ struct sched_group *sg = cpu_rq(cpu)->sd->groups;
+
+ power_orig += sg->sgp->power_orig;
+ power += sg->sgp->power;
+ }
} else {
/*
* !SD_OVERLAP domains can assume that child groups
@@ -4477,12 +5367,14 @@ void update_group_power(struct sched_domain *sd, int cpu)
group = child->groups;
do {
+ power_orig += group->sgp->power_orig;
power += group->sgp->power;
group = group->next;
} while (group != child->groups);
}
- sdg->sgp->power_orig = sdg->sgp->power = power;
+ sdg->sgp->power_orig = power_orig;
+ sdg->sgp->power = power;
}
/*
@@ -4526,13 +5418,12 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
* cpu 3 and leave one of the cpus in the second group unused.
*
* The current solution to this issue is detecting the skew in the first group
- * by noticing it has a cpu that is overloaded while the remaining cpus are
- * idle -- or rather, there's a distinct imbalance in the cpus; see
- * sg_imbalanced().
+ * by noticing the lower domain failed to reach balance and had difficulty
+ * moving tasks due to affinity constraints.
*
* When this is so detected; this group becomes a candidate for busiest; see
- * update_sd_pick_busiest(). And calculcate_imbalance() and
- * find_busiest_group() avoid some of the usual balance conditional to allow it
+ * update_sd_pick_busiest(). And calculate_imbalance() and
+ * find_busiest_group() avoid some of the usual balance conditions to allow it
* to create an effective group imbalance.
*
* This is a somewhat tricky proposition since the next run might not find the
@@ -4540,49 +5431,36 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
* subtle and fragile situation.
*/
-struct sg_imb_stats {
- unsigned long max_nr_running, min_nr_running;
- unsigned long max_cpu_load, min_cpu_load;
-};
-
-static inline void init_sg_imb_stats(struct sg_imb_stats *sgi)
+static inline int sg_imbalanced(struct sched_group *group)
{
- sgi->max_cpu_load = sgi->max_nr_running = 0UL;
- sgi->min_cpu_load = sgi->min_nr_running = ~0UL;
+ return group->sgp->imbalance;
}
-static inline void
-update_sg_imb_stats(struct sg_imb_stats *sgi,
- unsigned long load, unsigned long nr_running)
+/*
+ * Compute the group capacity.
+ *
+ * Avoid the issue where N*frac(smt_power) >= 1 creates 'phantom' cores by
+ * first dividing out the smt factor and computing the actual number of cores
+ * and limit power unit capacity with that.
+ */
+static inline int sg_capacity(struct lb_env *env, struct sched_group *group)
{
- if (load > sgi->max_cpu_load)
- sgi->max_cpu_load = load;
- if (sgi->min_cpu_load > load)
- sgi->min_cpu_load = load;
+ unsigned int capacity, smt, cpus;
+ unsigned int power, power_orig;
- if (nr_running > sgi->max_nr_running)
- sgi->max_nr_running = nr_running;
- if (sgi->min_nr_running > nr_running)
- sgi->min_nr_running = nr_running;
-}
+ power = group->sgp->power;
+ power_orig = group->sgp->power_orig;
+ cpus = group->group_weight;
-static inline int
-sg_imbalanced(struct sg_lb_stats *sgs, struct sg_imb_stats *sgi)
-{
- /*
- * Consider the group unbalanced when the imbalance is larger
- * than the average weight of a task.
- *
- * APZ: with cgroup the avg task weight can vary wildly and
- * might not be a suitable number - should we keep a
- * normalized nr_running number somewhere that negates
- * the hierarchy?
- */
- if ((sgi->max_cpu_load - sgi->min_cpu_load) >= sgs->load_per_task &&
- (sgi->max_nr_running - sgi->min_nr_running) > 1)
- return 1;
+ /* smt := ceil(cpus / power), assumes: 1 < smt_power < 2 */
+ smt = DIV_ROUND_UP(SCHED_POWER_SCALE * cpus, power_orig);
+ capacity = cpus / smt; /* cores */
- return 0;
+ capacity = min_t(unsigned, capacity, DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE));
+ if (!capacity)
+ capacity = fix_small_capacity(env->sd, group);
+
+ return capacity;
}
/**
@@ -4597,12 +5475,11 @@ static inline void update_sg_lb_stats(struct lb_env *env,
struct sched_group *group, int load_idx,
int local_group, struct sg_lb_stats *sgs)
{
- struct sg_imb_stats sgi;
unsigned long nr_running;
unsigned long load;
int i;
- init_sg_imb_stats(&sgi);
+ memset(sgs, 0, sizeof(*sgs));
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
struct rq *rq = cpu_rq(i);
@@ -4610,24 +5487,22 @@ static inline void update_sg_lb_stats(struct lb_env *env,
nr_running = rq->nr_running;
/* Bias balancing toward cpus of our domain */
- if (local_group) {
+ if (local_group)
load = target_load(i, load_idx);
- } else {
+ else
load = source_load(i, load_idx);
- update_sg_imb_stats(&sgi, load, nr_running);
- }
sgs->group_load += load;
sgs->sum_nr_running += nr_running;
+#ifdef CONFIG_NUMA_BALANCING
+ sgs->nr_numa_running += rq->nr_numa_running;
+ sgs->nr_preferred_running += rq->nr_preferred_running;
+#endif
sgs->sum_weighted_load += weighted_cpuload(i);
if (idle_cpu(i))
sgs->idle_cpus++;
}
- if (local_group && (env->idle != CPU_NEWLY_IDLE ||
- time_after_eq(jiffies, group->sgp->next_update)))
- update_group_power(env->sd, env->dst_cpu);
-
/* Adjust by relative CPU power of the group */
sgs->group_power = group->sgp->power;
sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power;
@@ -4635,16 +5510,11 @@ static inline void update_sg_lb_stats(struct lb_env *env,
if (sgs->sum_nr_running)
sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
- sgs->group_imb = sg_imbalanced(sgs, &sgi);
-
- sgs->group_capacity =
- DIV_ROUND_CLOSEST(sgs->group_power, SCHED_POWER_SCALE);
-
- if (!sgs->group_capacity)
- sgs->group_capacity = fix_small_capacity(env->sd, group);
-
sgs->group_weight = group->group_weight;
+ sgs->group_imb = sg_imbalanced(group);
+ sgs->group_capacity = sg_capacity(env, group);
+
if (sgs->group_capacity > sgs->sum_nr_running)
sgs->group_has_capacity = 1;
}
@@ -4693,14 +5563,42 @@ static bool update_sd_pick_busiest(struct lb_env *env,
return false;
}
+#ifdef CONFIG_NUMA_BALANCING
+static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
+{
+ if (sgs->sum_nr_running > sgs->nr_numa_running)
+ return regular;
+ if (sgs->sum_nr_running > sgs->nr_preferred_running)
+ return remote;
+ return all;
+}
+
+static inline enum fbq_type fbq_classify_rq(struct rq *rq)
+{
+ if (rq->nr_running > rq->nr_numa_running)
+ return regular;
+ if (rq->nr_running > rq->nr_preferred_running)
+ return remote;
+ return all;
+}
+#else
+static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
+{
+ return all;
+}
+
+static inline enum fbq_type fbq_classify_rq(struct rq *rq)
+{
+ return regular;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
/**
* update_sd_lb_stats - Update sched_domain's statistics for load balancing.
* @env: The load balancing environment.
- * @balance: Should we balance.
* @sds: variable to hold the statistics for this sched_domain.
*/
-static inline void update_sd_lb_stats(struct lb_env *env,
- struct sd_lb_stats *sds)
+static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
{
struct sched_domain *child = env->sd->child;
struct sched_group *sg = env->sd->groups;
@@ -4720,11 +5618,17 @@ static inline void update_sd_lb_stats(struct lb_env *env,
if (local_group) {
sds->local = sg;
sgs = &sds->local_stat;
+
+ if (env->idle != CPU_NEWLY_IDLE ||
+ time_after_eq(jiffies, sg->sgp->next_update))
+ update_group_power(env->sd, env->dst_cpu);
}
- memset(sgs, 0, sizeof(*sgs));
update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
+ if (local_group)
+ goto next_group;
+
/*
* In case the child domain prefers tasks go to siblings
* first, lower the sg capacity to one so that we'll try
@@ -4735,21 +5639,25 @@ static inline void update_sd_lb_stats(struct lb_env *env,
* heaviest group when it is already under-utilized (possible
* with a large weight task outweighs the tasks on the system).
*/
- if (prefer_sibling && !local_group &&
- sds->local && sds->local_stat.group_has_capacity)
+ if (prefer_sibling && sds->local &&
+ sds->local_stat.group_has_capacity)
sgs->group_capacity = min(sgs->group_capacity, 1U);
- /* Now, start updating sd_lb_stats */
- sds->total_load += sgs->group_load;
- sds->total_pwr += sgs->group_power;
-
- if (!local_group && update_sd_pick_busiest(env, sds, sg, sgs)) {
+ if (update_sd_pick_busiest(env, sds, sg, sgs)) {
sds->busiest = sg;
sds->busiest_stat = *sgs;
}
+next_group:
+ /* Now, start updating sd_lb_stats */
+ sds->total_load += sgs->group_load;
+ sds->total_pwr += sgs->group_power;
+
sg = sg->next;
} while (sg != env->sd->groups);
+
+ if (env->sd->flags & SD_NUMA)
+ env->fbq_type = fbq_classify_group(&sds->busiest_stat);
}
/**
@@ -5053,15 +5961,39 @@ static struct rq *find_busiest_queue(struct lb_env *env,
int i;
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
- unsigned long power = power_of(i);
- unsigned long capacity = DIV_ROUND_CLOSEST(power,
- SCHED_POWER_SCALE);
- unsigned long wl;
+ unsigned long power, capacity, wl;
+ enum fbq_type rt;
+ rq = cpu_rq(i);
+ rt = fbq_classify_rq(rq);
+
+ /*
+ * We classify groups/runqueues into three groups:
+ * - regular: there are !numa tasks
+ * - remote: there are numa tasks that run on the 'wrong' node
+ * - all: there is no distinction
+ *
+ * In order to avoid migrating ideally placed numa tasks,
+ * ignore those when there's better options.
+ *
+ * If we ignore the actual busiest queue to migrate another
+ * task, the next balance pass can still reduce the busiest
+ * queue by moving tasks around inside the node.
+ *
+ * If we cannot move enough load due to this classification
+ * the next pass will adjust the group classification and
+ * allow migration of more tasks.
+ *
+ * Both cases only affect the total convergence complexity.
+ */
+ if (rt > env->fbq_type)
+ continue;
+
+ power = power_of(i);
+ capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
if (!capacity)
capacity = fix_small_capacity(env->sd, group);
- rq = cpu_rq(i);
wl = weighted_cpuload(i);
/*
@@ -5164,6 +6096,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
int *continue_balancing)
{
int ld_moved, cur_ld_moved, active_balance = 0;
+ struct sched_domain *sd_parent = sd->parent;
struct sched_group *group;
struct rq *busiest;
unsigned long flags;
@@ -5177,6 +6110,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
.idle = idle,
.loop_break = sched_nr_migrate_break,
.cpus = cpus,
+ .fbq_type = all,
};
/*
@@ -5268,17 +6202,17 @@ more_balance:
* moreover subsequent load balance cycles should correct the
* excess load moved.
*/
- if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
+ if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
+
+ /* Prevent to re-select dst_cpu via env's cpus */
+ cpumask_clear_cpu(env.dst_cpu, env.cpus);
env.dst_rq = cpu_rq(env.new_dst_cpu);
env.dst_cpu = env.new_dst_cpu;
- env.flags &= ~LBF_SOME_PINNED;
+ env.flags &= ~LBF_DST_PINNED;
env.loop = 0;
env.loop_break = sched_nr_migrate_break;
- /* Prevent to re-select dst_cpu via env's cpus */
- cpumask_clear_cpu(env.dst_cpu, env.cpus);
-
/*
* Go back to "more_balance" rather than "redo" since we
* need to continue with same src_cpu.
@@ -5286,6 +6220,18 @@ more_balance:
goto more_balance;
}
+ /*
+ * We failed to reach balance because of affinity.
+ */
+ if (sd_parent) {
+ int *group_imbalance = &sd_parent->groups->sgp->imbalance;
+
+ if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
+ *group_imbalance = 1;
+ } else if (*group_imbalance)
+ *group_imbalance = 0;
+ }
+
/* All tasks on this runqueue were pinned by CPU affinity */
if (unlikely(env.flags & LBF_ALL_PINNED)) {
cpumask_clear_cpu(cpu_of(busiest), cpus);
@@ -5393,6 +6339,7 @@ void idle_balance(int this_cpu, struct rq *this_rq)
struct sched_domain *sd;
int pulled_task = 0;
unsigned long next_balance = jiffies + HZ;
+ u64 curr_cost = 0;
this_rq->idle_stamp = rq_clock(this_rq);
@@ -5409,15 +6356,27 @@ void idle_balance(int this_cpu, struct rq *this_rq)
for_each_domain(this_cpu, sd) {
unsigned long interval;
int continue_balancing = 1;
+ u64 t0, domain_cost;
if (!(sd->flags & SD_LOAD_BALANCE))
continue;
+ if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
+ break;
+
if (sd->flags & SD_BALANCE_NEWIDLE) {
+ t0 = sched_clock_cpu(this_cpu);
+
/* If we've pulled tasks over stop searching: */
pulled_task = load_balance(this_cpu, this_rq,
sd, CPU_NEWLY_IDLE,
&continue_balancing);
+
+ domain_cost = sched_clock_cpu(this_cpu) - t0;
+ if (domain_cost > sd->max_newidle_lb_cost)
+ sd->max_newidle_lb_cost = domain_cost;
+
+ curr_cost += domain_cost;
}
interval = msecs_to_jiffies(sd->balance_interval);
@@ -5439,6 +6398,9 @@ void idle_balance(int this_cpu, struct rq *this_rq)
*/
this_rq->next_balance = next_balance;
}
+
+ if (curr_cost > this_rq->max_idle_balance_cost)
+ this_rq->max_idle_balance_cost = curr_cost;
}
/*
@@ -5662,15 +6624,39 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
/* Earliest time when we have to do rebalance again */
unsigned long next_balance = jiffies + 60*HZ;
int update_next_balance = 0;
- int need_serialize;
+ int need_serialize, need_decay = 0;
+ u64 max_cost = 0;
update_blocked_averages(cpu);
rcu_read_lock();
for_each_domain(cpu, sd) {
+ /*
+ * Decay the newidle max times here because this is a regular
+ * visit to all the domains. Decay ~1% per second.
+ */
+ if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
+ sd->max_newidle_lb_cost =
+ (sd->max_newidle_lb_cost * 253) / 256;
+ sd->next_decay_max_lb_cost = jiffies + HZ;
+ need_decay = 1;
+ }
+ max_cost += sd->max_newidle_lb_cost;
+
if (!(sd->flags & SD_LOAD_BALANCE))
continue;
+ /*
+ * Stop the load balance at this level. There is another
+ * CPU in our sched group which is doing load balancing more
+ * actively.
+ */
+ if (!continue_balancing) {
+ if (need_decay)
+ continue;
+ break;
+ }
+
interval = sd->balance_interval;
if (idle != CPU_IDLE)
interval *= sd->busy_factor;
@@ -5689,7 +6675,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
if (time_after_eq(jiffies, sd->last_balance + interval)) {
if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
/*
- * The LBF_SOME_PINNED logic could have changed
+ * The LBF_DST_PINNED logic could have changed
* env->dst_cpu, so we can't know our idle
* state even if we migrated tasks. Update it.
*/
@@ -5704,14 +6690,14 @@ out:
next_balance = sd->last_balance + interval;
update_next_balance = 1;
}
-
+ }
+ if (need_decay) {
/*
- * Stop the load balance at this level. There is another
- * CPU in our sched group which is doing load balancing more
- * actively.
+ * Ensure the rq-wide value also decays but keep it at a
+ * reasonable floor to avoid funnies with rq->avg_idle.
*/
- if (!continue_balancing)
- break;
+ rq->max_idle_balance_cost =
+ max((u64)sysctl_sched_migration_cost, max_cost);
}
rcu_read_unlock();
@@ -6214,7 +7200,8 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
se->cfs_rq = parent->my_q;
se->my_q = cfs_rq;
- update_load_set(&se->load, 0);
+ /* guarantee group entities always have weight */
+ update_load_set(&se->load, NICE_0_LOAD);
se->parent = parent;
}
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 99399f8e4799..5716929a2e3a 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -63,10 +63,23 @@ SCHED_FEAT(LB_MIN, false)
/*
* Apply the automatic NUMA scheduling policy. Enabled automatically
* at runtime if running on a NUMA machine. Can be controlled via
- * numa_balancing=. Allow PTE scanning to be forced on UMA machines
- * for debugging the core machinery.
+ * numa_balancing=
*/
#ifdef CONFIG_NUMA_BALANCING
SCHED_FEAT(NUMA, false)
-SCHED_FEAT(NUMA_FORCE, false)
+
+/*
+ * NUMA_FAVOUR_HIGHER will favor moving tasks towards nodes where a
+ * higher number of hinting faults are recorded during active load
+ * balancing.
+ */
+SCHED_FEAT(NUMA_FAVOUR_HIGHER, true)
+
+/*
+ * NUMA_RESIST_LOWER will resist moving tasks towards nodes where a
+ * lower number of hinting faults have been recorded. As this has
+ * the potential to prevent a task ever migrating to a new node
+ * due to CPU overload it is disabled by default.
+ */
+SCHED_FEAT(NUMA_RESIST_LOWER, false)
#endif
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index d8da01008d39..516c3d9ceea1 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -9,7 +9,7 @@
#ifdef CONFIG_SMP
static int
-select_task_rq_idle(struct task_struct *p, int sd_flag, int flags)
+select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
{
return task_cpu(p); /* IDLE tasks as never migrated */
}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 01970c8e64df..7d57275fc396 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -246,8 +246,10 @@ static inline void rt_set_overload(struct rq *rq)
* if we should look at the mask. It would be a shame
* if we looked at the mask, but the mask was not
* updated yet.
+ *
+ * Matched by the barrier in pull_rt_task().
*/
- wmb();
+ smp_wmb();
atomic_inc(&rq->rd->rto_count);
}
@@ -1169,13 +1171,10 @@ static void yield_task_rt(struct rq *rq)
static int find_lowest_rq(struct task_struct *task);
static int
-select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
+select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
{
struct task_struct *curr;
struct rq *rq;
- int cpu;
-
- cpu = task_cpu(p);
if (p->nr_cpus_allowed == 1)
goto out;
@@ -1213,8 +1212,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
*/
if (curr && unlikely(rt_task(curr)) &&
(curr->nr_cpus_allowed < 2 ||
- curr->prio <= p->prio) &&
- (p->nr_cpus_allowed > 1)) {
+ curr->prio <= p->prio)) {
int target = find_lowest_rq(p);
if (target != -1)
@@ -1630,6 +1628,12 @@ static int pull_rt_task(struct rq *this_rq)
if (likely(!rt_overloaded(this_rq)))
return 0;
+ /*
+ * Match the barrier from rt_set_overloaded; this guarantees that if we
+ * see overloaded we must also see the rto_mask bit.
+ */
+ smp_rmb();
+
for_each_cpu(cpu, this_rq->rd->rto_mask) {
if (this_cpu == cpu)
continue;
@@ -1931,8 +1935,8 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
p->rt.time_slice = sched_rr_timeslice;
/*
- * Requeue to the end of queue if we (and all of our ancestors) are the
- * only element on the queue
+ * Requeue to the end of queue if we (and all of our ancestors) are not
+ * the only element on the queue
*/
for_each_sched_rt_entity(rt_se) {
if (rt_se->run_list.prev != rt_se->run_list.next) {
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b3c5653e1dca..4e650acffed7 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -6,6 +6,7 @@
#include <linux/spinlock.h>
#include <linux/stop_machine.h>
#include <linux/tick.h>
+#include <linux/slab.h>
#include "cpupri.h"
#include "cpuacct.h"
@@ -408,6 +409,10 @@ struct rq {
* remote CPUs use both these fields when doing load calculation.
*/
unsigned int nr_running;
+#ifdef CONFIG_NUMA_BALANCING
+ unsigned int nr_numa_running;
+ unsigned int nr_preferred_running;
+#endif
#define CPU_LOAD_IDX_MAX 5
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
unsigned long last_load_update_tick;
@@ -476,6 +481,9 @@ struct rq {
u64 age_stamp;
u64 idle_stamp;
u64 avg_idle;
+
+ /* This is used to determine avg_idle's max value */
+ u64 max_idle_balance_cost;
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -552,6 +560,12 @@ static inline u64 rq_clock_task(struct rq *rq)
return rq->clock_task;
}
+#ifdef CONFIG_NUMA_BALANCING
+extern void sched_setnuma(struct task_struct *p, int node);
+extern int migrate_task_to(struct task_struct *p, int cpu);
+extern int migrate_swap(struct task_struct *, struct task_struct *);
+#endif /* CONFIG_NUMA_BALANCING */
+
#ifdef CONFIG_SMP
#define rcu_dereference_check_sched_domain(p) \
@@ -593,9 +607,22 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
return hsd;
}
+static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
+{
+ struct sched_domain *sd;
+
+ for_each_domain(cpu, sd) {
+ if (sd->flags & flag)
+ break;
+ }
+
+ return sd;
+}
+
DECLARE_PER_CPU(struct sched_domain *, sd_llc);
DECLARE_PER_CPU(int, sd_llc_size);
DECLARE_PER_CPU(int, sd_llc_id);
+DECLARE_PER_CPU(struct sched_domain *, sd_numa);
struct sched_group_power {
atomic_t ref;
@@ -605,6 +632,7 @@ struct sched_group_power {
*/
unsigned int power, power_orig;
unsigned long next_update;
+ int imbalance; /* XXX unrelated to power but shared group state */
/*
* Number of busy cpus in this group.
*/
@@ -719,6 +747,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
*/
smp_wmb();
task_thread_info(p)->cpu = cpu;
+ p->wake_cpu = cpu;
#endif
}
@@ -974,7 +1003,7 @@ struct sched_class {
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP
- int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
+ int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
@@ -1220,6 +1249,24 @@ static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
}
+static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
+{
+ if (l1 > l2)
+ swap(l1, l2);
+
+ spin_lock(l1);
+ spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
+}
+
+static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
+{
+ if (l1 > l2)
+ swap(l1, l2);
+
+ raw_spin_lock(l1);
+ raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
+}
+
/*
* double_rq_lock - safely lock two runqueues
*
@@ -1305,7 +1352,8 @@ extern void print_rt_stats(struct seq_file *m, int cpu);
extern void init_cfs_rq(struct cfs_rq *cfs_rq);
extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
-extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
+extern void cfs_bandwidth_usage_inc(void);
+extern void cfs_bandwidth_usage_dec(void);
#ifdef CONFIG_NO_HZ_COMMON
enum rq_nohz_flag_bits {
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index c7edee71bce8..4ab704339656 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -59,9 +59,9 @@ static inline void sched_info_reset_dequeued(struct task_struct *t)
* from dequeue_task() to account for possible rq->clock skew across cpus. The
* delta taken on each cpu would annul the skew.
*/
-static inline void sched_info_dequeued(struct task_struct *t)
+static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
{
- unsigned long long now = rq_clock(task_rq(t)), delta = 0;
+ unsigned long long now = rq_clock(rq), delta = 0;
if (unlikely(sched_info_on()))
if (t->sched_info.last_queued)
@@ -69,7 +69,7 @@ static inline void sched_info_dequeued(struct task_struct *t)
sched_info_reset_dequeued(t);
t->sched_info.run_delay += delta;
- rq_sched_info_dequeued(task_rq(t), delta);
+ rq_sched_info_dequeued(rq, delta);
}
/*
@@ -77,9 +77,9 @@ static inline void sched_info_dequeued(struct task_struct *t)
* long it was waiting to run. We also note when it began so that we
* can keep stats on how long its timeslice is.
*/
-static void sched_info_arrive(struct task_struct *t)
+static void sched_info_arrive(struct rq *rq, struct task_struct *t)
{
- unsigned long long now = rq_clock(task_rq(t)), delta = 0;
+ unsigned long long now = rq_clock(rq), delta = 0;
if (t->sched_info.last_queued)
delta = now - t->sched_info.last_queued;
@@ -88,7 +88,7 @@ static void sched_info_arrive(struct task_struct *t)
t->sched_info.last_arrival = now;
t->sched_info.pcount++;
- rq_sched_info_arrive(task_rq(t), delta);
+ rq_sched_info_arrive(rq, delta);
}
/*
@@ -96,11 +96,11 @@ static void sched_info_arrive(struct task_struct *t)
* the timestamp if it is already not set. It's assumed that
* sched_info_dequeued() will clear that stamp when appropriate.
*/
-static inline void sched_info_queued(struct task_struct *t)
+static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
{
if (unlikely(sched_info_on()))
if (!t->sched_info.last_queued)
- t->sched_info.last_queued = rq_clock(task_rq(t));
+ t->sched_info.last_queued = rq_clock(rq);
}
/*
@@ -111,15 +111,15 @@ static inline void sched_info_queued(struct task_struct *t)
* sched_info_queued() to mark that it has now again started waiting on
* the runqueue.
*/
-static inline void sched_info_depart(struct task_struct *t)
+static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
{
- unsigned long long delta = rq_clock(task_rq(t)) -
+ unsigned long long delta = rq_clock(rq) -
t->sched_info.last_arrival;
- rq_sched_info_depart(task_rq(t), delta);
+ rq_sched_info_depart(rq, delta);
if (t->state == TASK_RUNNING)
- sched_info_queued(t);
+ sched_info_queued(rq, t);
}
/*
@@ -128,32 +128,34 @@ static inline void sched_info_depart(struct task_struct *t)
* the idle task.) We are only called when prev != next.
*/
static inline void
-__sched_info_switch(struct task_struct *prev, struct task_struct *next)
+__sched_info_switch(struct rq *rq,
+ struct task_struct *prev, struct task_struct *next)
{
- struct rq *rq = task_rq(prev);
-
/*
* prev now departs the cpu. It's not interesting to record
* stats about how efficient we were at scheduling the idle
* process, however.
*/
if (prev != rq->idle)
- sched_info_depart(prev);
+ sched_info_depart(rq, prev);
if (next != rq->idle)
- sched_info_arrive(next);
+ sched_info_arrive(rq, next);
}
static inline void
-sched_info_switch(struct task_struct *prev, struct task_struct *next)
+sched_info_switch(struct rq *rq,
+ struct task_struct *prev, struct task_struct *next)
{
if (unlikely(sched_info_on()))
- __sched_info_switch(prev, next);
+ __sched_info_switch(rq, prev, next);
}
#else
-#define sched_info_queued(t) do { } while (0)
+#define sched_info_queued(rq, t) do { } while (0)
#define sched_info_reset_dequeued(t) do { } while (0)
-#define sched_info_dequeued(t) do { } while (0)
-#define sched_info_switch(t, next) do { } while (0)
+#define sched_info_dequeued(rq, t) do { } while (0)
+#define sched_info_depart(rq, t) do { } while (0)
+#define sched_info_arrive(rq, next) do { } while (0)
+#define sched_info_switch(rq, t, next) do { } while (0)
#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
/*
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index e08fbeeb54b9..47197de8abd9 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -11,7 +11,7 @@
#ifdef CONFIG_SMP
static int
-select_task_rq_stop(struct task_struct *p, int sd_flag, int flags)
+select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
{
return task_cpu(p); /* stop tasks as never migrate */
}
diff --git a/kernel/smp.c b/kernel/smp.c
index 0564571dcdf7..46116100f0ee 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -18,6 +18,7 @@
#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
enum {
CSD_FLAG_LOCK = 0x01,
+ CSD_FLAG_WAIT = 0x02,
};
struct call_function_data {
@@ -124,7 +125,7 @@ static void csd_lock(struct call_single_data *csd)
static void csd_unlock(struct call_single_data *csd)
{
- WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
+ WARN_ON((csd->flags & CSD_FLAG_WAIT) && !(csd->flags & CSD_FLAG_LOCK));
/*
* ensure we're all done before releasing data:
@@ -146,6 +147,9 @@ void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
unsigned long flags;
int ipi;
+ if (wait)
+ csd->flags |= CSD_FLAG_WAIT;
+
raw_spin_lock_irqsave(&dst->lock, flags);
ipi = list_empty(&dst->list);
list_add_tail(&csd->list, &dst->list);
@@ -340,6 +344,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *csd,
}
put_cpu();
}
+EXPORT_SYMBOL_GPL(__smp_call_function_single);
/**
* smp_call_function_many(): Run a function on a set of other CPUs.
@@ -524,6 +529,11 @@ void __init setup_nr_cpu_ids(void)
nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
}
+void __weak smp_announce(void)
+{
+ printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
+}
+
/* Called by boot processor to activate the rest. */
void __init smp_init(void)
{
@@ -540,7 +550,7 @@ void __init smp_init(void)
}
/* Any cleanup work */
- printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus());
+ smp_announce();
smp_cpus_done(setup_max_cpus);
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index d7d498d8cc4f..b24988353458 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -29,7 +29,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
-#include <asm/irq.h>
/*
- No shared variables, all the data are CPU local.
- If a softirq needs serialization, let it serialize itself
@@ -100,13 +99,13 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt)
raw_local_irq_save(flags);
/*
- * The preempt tracer hooks into add_preempt_count and will break
+ * The preempt tracer hooks into preempt_count_add and will break
* lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
* is set and before current->softirq_enabled is cleared.
* We must manually increment preempt_count here and manually
* call the trace_preempt_off later.
*/
- preempt_count() += cnt;
+ __preempt_count_add(cnt);
/*
* Were softirqs turned off above:
*/
@@ -120,7 +119,7 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt)
#else /* !CONFIG_TRACE_IRQFLAGS */
static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
{
- add_preempt_count(cnt);
+ preempt_count_add(cnt);
barrier();
}
#endif /* CONFIG_TRACE_IRQFLAGS */
@@ -134,12 +133,11 @@ EXPORT_SYMBOL(local_bh_disable);
static void __local_bh_enable(unsigned int cnt)
{
- WARN_ON_ONCE(in_irq());
WARN_ON_ONCE(!irqs_disabled());
if (softirq_count() == cnt)
trace_softirqs_on(_RET_IP_);
- sub_preempt_count(cnt);
+ preempt_count_sub(cnt);
}
/*
@@ -149,6 +147,7 @@ static void __local_bh_enable(unsigned int cnt)
*/
void _local_bh_enable(void)
{
+ WARN_ON_ONCE(in_irq());
__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
}
@@ -169,12 +168,17 @@ static inline void _local_bh_enable_ip(unsigned long ip)
* Keep preemption disabled until we are done with
* softirq processing:
*/
- sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
+ preempt_count_sub(SOFTIRQ_DISABLE_OFFSET - 1);
- if (unlikely(!in_interrupt() && local_softirq_pending()))
+ if (unlikely(!in_interrupt() && local_softirq_pending())) {
+ /*
+ * Run softirq if any pending. And do it in its own stack
+ * as we may be calling this deep in a task call stack already.
+ */
do_softirq();
+ }
- dec_preempt_count();
+ preempt_count_dec();
#ifdef CONFIG_TRACE_IRQFLAGS
local_irq_enable();
#endif
@@ -256,7 +260,7 @@ restart:
" exited with %08x?\n", vec_nr,
softirq_to_name[vec_nr], h->action,
prev_count, preempt_count());
- preempt_count() = prev_count;
+ preempt_count_set(prev_count);
}
rcu_bh_qs(cpu);
@@ -280,10 +284,11 @@ restart:
account_irq_exit_time(current);
__local_bh_enable(SOFTIRQ_OFFSET);
+ WARN_ON_ONCE(in_interrupt());
tsk_restore_flags(current, old_flags, PF_MEMALLOC);
}
-#ifndef __ARCH_HAS_DO_SOFTIRQ
+
asmlinkage void do_softirq(void)
{
@@ -298,13 +303,11 @@ asmlinkage void do_softirq(void)
pending = local_softirq_pending();
if (pending)
- __do_softirq();
+ do_softirq_own_stack();
local_irq_restore(flags);
}
-#endif
-
/*
* Enter an interrupt context.
*/
@@ -329,15 +332,21 @@ void irq_enter(void)
static inline void invoke_softirq(void)
{
if (!force_irqthreads) {
+#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
/*
* We can safely execute softirq on the current stack if
* it is the irq stack, because it should be near empty
- * at this stage. But we have no way to know if the arch
- * calls irq_exit() on the irq stack. So call softirq
- * in its own stack to prevent from any overrun on top
- * of a potentially deep task stack.
+ * at this stage.
*/
- do_softirq();
+ __do_softirq();
+#else
+ /*
+ * Otherwise, irq_exit() is called on the task stack that can
+ * be potentially deep already. So call softirq in its own stack
+ * to prevent from any overrun.
+ */
+ do_softirq_own_stack();
+#endif
} else {
wakeup_softirqd();
}
@@ -369,7 +378,7 @@ void irq_exit(void)
account_irq_exit_time(current);
trace_hardirq_exit();
- sub_preempt_count(HARDIRQ_OFFSET);
+ preempt_count_sub(HARDIRQ_OFFSET);
if (!in_interrupt() && local_softirq_pending())
invoke_softirq();
@@ -771,6 +780,10 @@ static void run_ksoftirqd(unsigned int cpu)
{
local_irq_disable();
if (local_softirq_pending()) {
+ /*
+ * We can safely run softirq on inline stack, as we are not deep
+ * in the task stack here.
+ */
__do_softirq();
rcu_note_context_switch(cpu);
local_irq_enable();
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index c09f2955ae30..c530bc5be7cf 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -115,6 +115,182 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
return done.executed ? done.ret : -ENOENT;
}
+/* This controls the threads on each CPU. */
+enum multi_stop_state {
+ /* Dummy starting state for thread. */
+ MULTI_STOP_NONE,
+ /* Awaiting everyone to be scheduled. */
+ MULTI_STOP_PREPARE,
+ /* Disable interrupts. */
+ MULTI_STOP_DISABLE_IRQ,
+ /* Run the function */
+ MULTI_STOP_RUN,
+ /* Exit */
+ MULTI_STOP_EXIT,
+};
+
+struct multi_stop_data {
+ int (*fn)(void *);
+ void *data;
+ /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
+ unsigned int num_threads;
+ const struct cpumask *active_cpus;
+
+ enum multi_stop_state state;
+ atomic_t thread_ack;
+};
+
+static void set_state(struct multi_stop_data *msdata,
+ enum multi_stop_state newstate)
+{
+ /* Reset ack counter. */
+ atomic_set(&msdata->thread_ack, msdata->num_threads);
+ smp_wmb();
+ msdata->state = newstate;
+}
+
+/* Last one to ack a state moves to the next state. */
+static void ack_state(struct multi_stop_data *msdata)
+{
+ if (atomic_dec_and_test(&msdata->thread_ack))
+ set_state(msdata, msdata->state + 1);
+}
+
+/* This is the cpu_stop function which stops the CPU. */
+static int multi_cpu_stop(void *data)
+{
+ struct multi_stop_data *msdata = data;
+ enum multi_stop_state curstate = MULTI_STOP_NONE;
+ int cpu = smp_processor_id(), err = 0;
+ unsigned long flags;
+ bool is_active;
+
+ /*
+ * When called from stop_machine_from_inactive_cpu(), irq might
+ * already be disabled. Save the state and restore it on exit.
+ */
+ local_save_flags(flags);
+
+ if (!msdata->active_cpus)
+ is_active = cpu == cpumask_first(cpu_online_mask);
+ else
+ is_active = cpumask_test_cpu(cpu, msdata->active_cpus);
+
+ /* Simple state machine */
+ do {
+ /* Chill out and ensure we re-read multi_stop_state. */
+ cpu_relax();
+ if (msdata->state != curstate) {
+ curstate = msdata->state;
+ switch (curstate) {
+ case MULTI_STOP_DISABLE_IRQ:
+ local_irq_disable();
+ hard_irq_disable();
+ break;
+ case MULTI_STOP_RUN:
+ if (is_active)
+ err = msdata->fn(msdata->data);
+ break;
+ default:
+ break;
+ }
+ ack_state(msdata);
+ }
+ } while (curstate != MULTI_STOP_EXIT);
+
+ local_irq_restore(flags);
+ return err;
+}
+
+struct irq_cpu_stop_queue_work_info {
+ int cpu1;
+ int cpu2;
+ struct cpu_stop_work *work1;
+ struct cpu_stop_work *work2;
+};
+
+/*
+ * This function is always run with irqs and preemption disabled.
+ * This guarantees that both work1 and work2 get queued, before
+ * our local migrate thread gets the chance to preempt us.
+ */
+static void irq_cpu_stop_queue_work(void *arg)
+{
+ struct irq_cpu_stop_queue_work_info *info = arg;
+ cpu_stop_queue_work(info->cpu1, info->work1);
+ cpu_stop_queue_work(info->cpu2, info->work2);
+}
+
+/**
+ * stop_two_cpus - stops two cpus
+ * @cpu1: the cpu to stop
+ * @cpu2: the other cpu to stop
+ * @fn: function to execute
+ * @arg: argument to @fn
+ *
+ * Stops both the current and specified CPU and runs @fn on one of them.
+ *
+ * returns when both are completed.
+ */
+int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
+{
+ struct cpu_stop_done done;
+ struct cpu_stop_work work1, work2;
+ struct irq_cpu_stop_queue_work_info call_args;
+ struct multi_stop_data msdata;
+
+ preempt_disable();
+ msdata = (struct multi_stop_data){
+ .fn = fn,
+ .data = arg,
+ .num_threads = 2,
+ .active_cpus = cpumask_of(cpu1),
+ };
+
+ work1 = work2 = (struct cpu_stop_work){
+ .fn = multi_cpu_stop,
+ .arg = &msdata,
+ .done = &done
+ };
+
+ call_args = (struct irq_cpu_stop_queue_work_info){
+ .cpu1 = cpu1,
+ .cpu2 = cpu2,
+ .work1 = &work1,
+ .work2 = &work2,
+ };
+
+ cpu_stop_init_done(&done, 2);
+ set_state(&msdata, MULTI_STOP_PREPARE);
+
+ /*
+ * If we observe both CPUs active we know _cpu_down() cannot yet have
+ * queued its stop_machine works and therefore ours will get executed
+ * first. Or its not either one of our CPUs that's getting unplugged,
+ * in which case we don't care.
+ *
+ * This relies on the stopper workqueues to be FIFO.
+ */
+ if (!cpu_active(cpu1) || !cpu_active(cpu2)) {
+ preempt_enable();
+ return -ENOENT;
+ }
+
+ /*
+ * Queuing needs to be done by the lowest numbered CPU, to ensure
+ * that works are always queued in the same order on every CPU.
+ * This prevents deadlocks.
+ */
+ smp_call_function_single(min(cpu1, cpu2),
+ &irq_cpu_stop_queue_work,
+ &call_args, 0);
+ preempt_enable();
+
+ wait_for_completion(&done.completion);
+
+ return done.executed ? done.ret : -ENOENT;
+}
+
/**
* stop_one_cpu_nowait - stop a cpu but don't wait for completion
* @cpu: cpu to stop
@@ -359,98 +535,14 @@ early_initcall(cpu_stop_init);
#ifdef CONFIG_STOP_MACHINE
-/* This controls the threads on each CPU. */
-enum stopmachine_state {
- /* Dummy starting state for thread. */
- STOPMACHINE_NONE,
- /* Awaiting everyone to be scheduled. */
- STOPMACHINE_PREPARE,
- /* Disable interrupts. */
- STOPMACHINE_DISABLE_IRQ,
- /* Run the function */
- STOPMACHINE_RUN,
- /* Exit */
- STOPMACHINE_EXIT,
-};
-
-struct stop_machine_data {
- int (*fn)(void *);
- void *data;
- /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
- unsigned int num_threads;
- const struct cpumask *active_cpus;
-
- enum stopmachine_state state;
- atomic_t thread_ack;
-};
-
-static void set_state(struct stop_machine_data *smdata,
- enum stopmachine_state newstate)
-{
- /* Reset ack counter. */
- atomic_set(&smdata->thread_ack, smdata->num_threads);
- smp_wmb();
- smdata->state = newstate;
-}
-
-/* Last one to ack a state moves to the next state. */
-static void ack_state(struct stop_machine_data *smdata)
-{
- if (atomic_dec_and_test(&smdata->thread_ack))
- set_state(smdata, smdata->state + 1);
-}
-
-/* This is the cpu_stop function which stops the CPU. */
-static int stop_machine_cpu_stop(void *data)
-{
- struct stop_machine_data *smdata = data;
- enum stopmachine_state curstate = STOPMACHINE_NONE;
- int cpu = smp_processor_id(), err = 0;
- unsigned long flags;
- bool is_active;
-
- /*
- * When called from stop_machine_from_inactive_cpu(), irq might
- * already be disabled. Save the state and restore it on exit.
- */
- local_save_flags(flags);
-
- if (!smdata->active_cpus)
- is_active = cpu == cpumask_first(cpu_online_mask);
- else
- is_active = cpumask_test_cpu(cpu, smdata->active_cpus);
-
- /* Simple state machine */
- do {
- /* Chill out and ensure we re-read stopmachine_state. */
- cpu_relax();
- if (smdata->state != curstate) {
- curstate = smdata->state;
- switch (curstate) {
- case STOPMACHINE_DISABLE_IRQ:
- local_irq_disable();
- hard_irq_disable();
- break;
- case STOPMACHINE_RUN:
- if (is_active)
- err = smdata->fn(smdata->data);
- break;
- default:
- break;
- }
- ack_state(smdata);
- }
- } while (curstate != STOPMACHINE_EXIT);
-
- local_irq_restore(flags);
- return err;
-}
-
int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
{
- struct stop_machine_data smdata = { .fn = fn, .data = data,
- .num_threads = num_online_cpus(),
- .active_cpus = cpus };
+ struct multi_stop_data msdata = {
+ .fn = fn,
+ .data = data,
+ .num_threads = num_online_cpus(),
+ .active_cpus = cpus,
+ };
if (!stop_machine_initialized) {
/*
@@ -461,7 +553,7 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
unsigned long flags;
int ret;
- WARN_ON_ONCE(smdata.num_threads != 1);
+ WARN_ON_ONCE(msdata.num_threads != 1);
local_irq_save(flags);
hard_irq_disable();
@@ -472,8 +564,8 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
}
/* Set the initial state and stop all online cpus. */
- set_state(&smdata, STOPMACHINE_PREPARE);
- return stop_cpus(cpu_online_mask, stop_machine_cpu_stop, &smdata);
+ set_state(&msdata, MULTI_STOP_PREPARE);
+ return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
}
int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
@@ -513,25 +605,25 @@ EXPORT_SYMBOL_GPL(stop_machine);
int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
const struct cpumask *cpus)
{
- struct stop_machine_data smdata = { .fn = fn, .data = data,
+ struct multi_stop_data msdata = { .fn = fn, .data = data,
.active_cpus = cpus };
struct cpu_stop_done done;
int ret;
/* Local CPU must be inactive and CPU hotplug in progress. */
BUG_ON(cpu_active(raw_smp_processor_id()));
- smdata.num_threads = num_active_cpus() + 1; /* +1 for local */
+ msdata.num_threads = num_active_cpus() + 1; /* +1 for local */
/* No proper task established and can't sleep - busy wait for lock. */
while (!mutex_trylock(&stop_cpus_mutex))
cpu_relax();
/* Schedule work on other CPUs and execute directly for local CPU */
- set_state(&smdata, STOPMACHINE_PREPARE);
+ set_state(&msdata, MULTI_STOP_PREPARE);
cpu_stop_init_done(&done, num_active_cpus());
- queue_stop_cpus_work(cpu_active_mask, stop_machine_cpu_stop, &smdata,
+ queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
&done);
- ret = stop_machine_cpu_stop(&smdata);
+ ret = multi_cpu_stop(&msdata);
/* Busy wait for completion. */
while (!completion_done(&done.completion))
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index b2f06f3c6a3f..9aa71cec9e0e 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -190,7 +190,7 @@ static int proc_dostring_coredump(struct ctl_table *table, int write,
#ifdef CONFIG_MAGIC_SYSRQ
/* Note: sysrq code uses it's own private copy */
-static int __sysrq_enabled = SYSRQ_DEFAULT_ENABLE;
+static int __sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE;
static int sysrq_sysctl_handler(ctl_table *table, int write,
void __user *buffer, size_t *lenp,
@@ -371,13 +371,6 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec,
},
{
- .procname = "numa_balancing_scan_period_reset",
- .data = &sysctl_numa_balancing_scan_period_reset,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
.procname = "numa_balancing_scan_period_max_ms",
.data = &sysctl_numa_balancing_scan_period_max,
.maxlen = sizeof(unsigned int),
@@ -391,6 +384,20 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "numa_balancing_settle_count",
+ .data = &sysctl_numa_balancing_settle_count,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "numa_balancing_migrate_deferred",
+ .data = &sysctl_numa_balancing_migrate_deferred,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
#endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_SCHED_DEBUG */
{
@@ -962,9 +969,10 @@ static struct ctl_table kern_table[] = {
{
.procname = "hung_task_check_count",
.data = &sysctl_hung_task_check_count,
- .maxlen = sizeof(unsigned long),
+ .maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_doulongvec_minmax,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
},
{
.procname = "hung_task_timeout_secs",
@@ -1049,6 +1057,7 @@ static struct ctl_table kern_table[] = {
.maxlen = sizeof(sysctl_perf_event_sample_rate),
.mode = 0644,
.proc_handler = perf_proc_update_handler,
+ .extra1 = &one,
},
{
.procname = "perf_cpu_time_max_percent",
diff --git a/kernel/system_certificates.S b/kernel/system_certificates.S
new file mode 100644
index 000000000000..4aef390671cb
--- /dev/null
+++ b/kernel/system_certificates.S
@@ -0,0 +1,10 @@
+#include <linux/export.h>
+#include <linux/init.h>
+
+ __INITRODATA
+
+ .globl VMLINUX_SYMBOL(system_certificate_list)
+VMLINUX_SYMBOL(system_certificate_list):
+ .incbin "kernel/x509_certificate_list"
+ .globl VMLINUX_SYMBOL(system_certificate_list_end)
+VMLINUX_SYMBOL(system_certificate_list_end):
diff --git a/kernel/system_keyring.c b/kernel/system_keyring.c
new file mode 100644
index 000000000000..564dd93430a2
--- /dev/null
+++ b/kernel/system_keyring.c
@@ -0,0 +1,105 @@
+/* System trusted keyring for trusted public keys
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+#include <linux/err.h>
+#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
+#include "module-internal.h"
+
+struct key *system_trusted_keyring;
+EXPORT_SYMBOL_GPL(system_trusted_keyring);
+
+extern __initconst const u8 system_certificate_list[];
+extern __initconst const u8 system_certificate_list_end[];
+
+/*
+ * Load the compiled-in keys
+ */
+static __init int system_trusted_keyring_init(void)
+{
+ pr_notice("Initialise system trusted keyring\n");
+
+ system_trusted_keyring =
+ keyring_alloc(".system_keyring",
+ KUIDT_INIT(0), KGIDT_INIT(0), current_cred(),
+ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH),
+ KEY_ALLOC_NOT_IN_QUOTA, NULL);
+ if (IS_ERR(system_trusted_keyring))
+ panic("Can't allocate system trusted keyring\n");
+
+ set_bit(KEY_FLAG_TRUSTED_ONLY, &system_trusted_keyring->flags);
+ return 0;
+}
+
+/*
+ * Must be initialised before we try and load the keys into the keyring.
+ */
+device_initcall(system_trusted_keyring_init);
+
+/*
+ * Load the compiled-in list of X.509 certificates.
+ */
+static __init int load_system_certificate_list(void)
+{
+ key_ref_t key;
+ const u8 *p, *end;
+ size_t plen;
+
+ pr_notice("Loading compiled-in X.509 certificates\n");
+
+ end = system_certificate_list_end;
+ p = system_certificate_list;
+ while (p < end) {
+ /* Each cert begins with an ASN.1 SEQUENCE tag and must be more
+ * than 256 bytes in size.
+ */
+ if (end - p < 4)
+ goto dodgy_cert;
+ if (p[0] != 0x30 &&
+ p[1] != 0x82)
+ goto dodgy_cert;
+ plen = (p[2] << 8) | p[3];
+ plen += 4;
+ if (plen > end - p)
+ goto dodgy_cert;
+
+ key = key_create_or_update(make_key_ref(system_trusted_keyring, 1),
+ "asymmetric",
+ NULL,
+ p,
+ plen,
+ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ),
+ KEY_ALLOC_NOT_IN_QUOTA |
+ KEY_ALLOC_TRUSTED);
+ if (IS_ERR(key)) {
+ pr_err("Problem loading in-kernel X.509 certificate (%ld)\n",
+ PTR_ERR(key));
+ } else {
+ pr_notice("Loaded X.509 cert '%s'\n",
+ key_ref_to_ptr(key)->description);
+ key_ref_put(key);
+ }
+ p += plen;
+ }
+
+ return 0;
+
+dodgy_cert:
+ pr_err("Problem parsing in-kernel X.509 certificate list\n");
+ return 0;
+}
+late_initcall(load_system_certificate_list);
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 2b62fe86f9ec..3ce6e8c5f3fc 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -100,7 +100,7 @@ config NO_HZ_FULL
# RCU_USER_QS dependency
depends on HAVE_CONTEXT_TRACKING
# VIRT_CPU_ACCOUNTING_GEN dependency
- depends on 64BIT
+ depends on HAVE_VIRT_CPU_ACCOUNTING_GEN
select NO_HZ_COMMON
select RCU_USER_QS
select RCU_NOCB_CPU
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index eec50fcef9e4..88c9c65a430d 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -490,7 +490,7 @@ static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp)
clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid;
if (!alarmtimer_get_rtcdev())
- return -ENOTSUPP;
+ return -EINVAL;
return hrtimer_get_res(baseid, tp);
}
@@ -507,7 +507,7 @@ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp)
struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)];
if (!alarmtimer_get_rtcdev())
- return -ENOTSUPP;
+ return -EINVAL;
*tp = ktime_to_timespec(base->gettime());
return 0;
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 38959c866789..086ad6043bcb 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -33,29 +33,64 @@ struct ce_unbind {
int res;
};
-/**
- * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
- * @latch: value to convert
- * @evt: pointer to clock event device descriptor
- *
- * Math helper, returns latch value converted to nanoseconds (bound checked)
- */
-u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
+static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
+ bool ismax)
{
u64 clc = (u64) latch << evt->shift;
+ u64 rnd;
if (unlikely(!evt->mult)) {
evt->mult = 1;
WARN_ON(1);
}
+ rnd = (u64) evt->mult - 1;
+
+ /*
+ * Upper bound sanity check. If the backwards conversion is
+ * not equal latch, we know that the above shift overflowed.
+ */
+ if ((clc >> evt->shift) != (u64)latch)
+ clc = ~0ULL;
+
+ /*
+ * Scaled math oddities:
+ *
+ * For mult <= (1 << shift) we can safely add mult - 1 to
+ * prevent integer rounding loss. So the backwards conversion
+ * from nsec to device ticks will be correct.
+ *
+ * For mult > (1 << shift), i.e. device frequency is > 1GHz we
+ * need to be careful. Adding mult - 1 will result in a value
+ * which when converted back to device ticks can be larger
+ * than latch by up to (mult - 1) >> shift. For the min_delta
+ * calculation we still want to apply this in order to stay
+ * above the minimum device ticks limit. For the upper limit
+ * we would end up with a latch value larger than the upper
+ * limit of the device, so we omit the add to stay below the
+ * device upper boundary.
+ *
+ * Also omit the add if it would overflow the u64 boundary.
+ */
+ if ((~0ULL - clc > rnd) &&
+ (!ismax || evt->mult <= (1U << evt->shift)))
+ clc += rnd;
do_div(clc, evt->mult);
- if (clc < 1000)
- clc = 1000;
- if (clc > KTIME_MAX)
- clc = KTIME_MAX;
- return clc;
+ /* Deltas less than 1usec are pointless noise */
+ return clc > 1000 ? clc : 1000;
+}
+
+/**
+ * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
+ * @latch: value to convert
+ * @evt: pointer to clock event device descriptor
+ *
+ * Math helper, returns latch value converted to nanoseconds (bound checked)
+ */
+u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
+{
+ return cev_delta2ns(latch, evt, false);
}
EXPORT_SYMBOL_GPL(clockevent_delta2ns);
@@ -380,8 +415,8 @@ void clockevents_config(struct clock_event_device *dev, u32 freq)
sec = 600;
clockevents_calc_mult_shift(dev, freq, sec);
- dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev);
- dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev);
+ dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
+ dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
}
/**
@@ -584,7 +619,7 @@ static ssize_t sysfs_unbind_tick_dev(struct device *dev,
const char *buf, size_t count)
{
char name[CS_NAME_LEN];
- size_t ret = sysfs_get_uname(buf, name, count);
+ ssize_t ret = sysfs_get_uname(buf, name, count);
struct clock_event_device *ce;
if (ret < 0)
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 50a8736757f3..ba3e502c955a 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -479,6 +479,7 @@ static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
static inline void clocksource_resume_watchdog(void) { }
static inline int __clocksource_watchdog_kthread(void) { return 0; }
static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
+void clocksource_mark_unstable(struct clocksource *cs) { }
#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
@@ -537,40 +538,55 @@ static u32 clocksource_max_adjustment(struct clocksource *cs)
}
/**
- * clocksource_max_deferment - Returns max time the clocksource can be deferred
- * @cs: Pointer to clocksource
- *
+ * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
+ * @mult: cycle to nanosecond multiplier
+ * @shift: cycle to nanosecond divisor (power of two)
+ * @maxadj: maximum adjustment value to mult (~11%)
+ * @mask: bitmask for two's complement subtraction of non 64 bit counters
*/
-static u64 clocksource_max_deferment(struct clocksource *cs)
+u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask)
{
u64 max_nsecs, max_cycles;
/*
* Calculate the maximum number of cycles that we can pass to the
* cyc2ns function without overflowing a 64-bit signed result. The
- * maximum number of cycles is equal to ULLONG_MAX/(cs->mult+cs->maxadj)
+ * maximum number of cycles is equal to ULLONG_MAX/(mult+maxadj)
* which is equivalent to the below.
- * max_cycles < (2^63)/(cs->mult + cs->maxadj)
- * max_cycles < 2^(log2((2^63)/(cs->mult + cs->maxadj)))
- * max_cycles < 2^(log2(2^63) - log2(cs->mult + cs->maxadj))
- * max_cycles < 2^(63 - log2(cs->mult + cs->maxadj))
- * max_cycles < 1 << (63 - log2(cs->mult + cs->maxadj))
+ * max_cycles < (2^63)/(mult + maxadj)
+ * max_cycles < 2^(log2((2^63)/(mult + maxadj)))
+ * max_cycles < 2^(log2(2^63) - log2(mult + maxadj))
+ * max_cycles < 2^(63 - log2(mult + maxadj))
+ * max_cycles < 1 << (63 - log2(mult + maxadj))
* Please note that we add 1 to the result of the log2 to account for
* any rounding errors, ensure the above inequality is satisfied and
* no overflow will occur.
*/
- max_cycles = 1ULL << (63 - (ilog2(cs->mult + cs->maxadj) + 1));
+ max_cycles = 1ULL << (63 - (ilog2(mult + maxadj) + 1));
/*
* The actual maximum number of cycles we can defer the clocksource is
- * determined by the minimum of max_cycles and cs->mask.
+ * determined by the minimum of max_cycles and mask.
* Note: Here we subtract the maxadj to make sure we don't sleep for
* too long if there's a large negative adjustment.
*/
- max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
- max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult - cs->maxadj,
- cs->shift);
+ max_cycles = min(max_cycles, mask);
+ max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
+
+ return max_nsecs;
+}
+
+/**
+ * clocksource_max_deferment - Returns max time the clocksource can be deferred
+ * @cs: Pointer to clocksource
+ *
+ */
+static u64 clocksource_max_deferment(struct clocksource *cs)
+{
+ u64 max_nsecs;
+ max_nsecs = clocks_calc_max_nsecs(cs->mult, cs->shift, cs->maxadj,
+ cs->mask);
/*
* To ensure that the clocksource does not wrap whilst we are idle,
* limit the time the clocksource can be deferred by 12.5%. Please
@@ -893,7 +909,7 @@ sysfs_show_current_clocksources(struct device *dev,
return count;
}
-size_t sysfs_get_uname(const char *buf, char *dst, size_t cnt)
+ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt)
{
size_t ret = cnt;
@@ -924,7 +940,7 @@ static ssize_t sysfs_override_clocksource(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- size_t ret;
+ ssize_t ret;
mutex_lock(&clocksource_mutex);
@@ -952,7 +968,7 @@ static ssize_t sysfs_unbind_clocksource(struct device *dev,
{
struct clocksource *cs;
char name[CS_NAME_LEN];
- size_t ret;
+ ssize_t ret;
ret = sysfs_get_uname(buf, name, count);
if (ret < 0)
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index bb2215174f05..af8d1d4f3d55 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -475,6 +475,7 @@ static void sync_cmos_clock(struct work_struct *work)
* called as close as possible to 500 ms before the new second starts.
* This code is run on a timer. If the clock is set, that timer
* may not expire at the correct time. Thus, we adjust...
+ * We want the clock to be within a couple of ticks from the target.
*/
if (!ntp_synced()) {
/*
@@ -485,7 +486,7 @@ static void sync_cmos_clock(struct work_struct *work)
}
getnstimeofday(&now);
- if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) {
+ if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) {
struct timespec adjust = now;
fail = -ENODEV;
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 0b479a6a22bb..68b799375981 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -8,25 +8,28 @@
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/jiffies.h>
+#include <linux/ktime.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/syscore_ops.h>
-#include <linux/timer.h>
+#include <linux/hrtimer.h>
#include <linux/sched_clock.h>
+#include <linux/seqlock.h>
+#include <linux/bitops.h>
struct clock_data {
+ ktime_t wrap_kt;
u64 epoch_ns;
- u32 epoch_cyc;
- u32 epoch_cyc_copy;
+ u64 epoch_cyc;
+ seqcount_t seq;
unsigned long rate;
u32 mult;
u32 shift;
bool suspended;
};
-static void sched_clock_poll(unsigned long wrap_ticks);
-static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
+static struct hrtimer sched_clock_timer;
static int irqtime = -1;
core_param(irqtime, irqtime, int, 0400);
@@ -35,42 +38,46 @@ static struct clock_data cd = {
.mult = NSEC_PER_SEC / HZ,
};
-static u32 __read_mostly sched_clock_mask = 0xffffffff;
+static u64 __read_mostly sched_clock_mask;
-static u32 notrace jiffy_sched_clock_read(void)
+static u64 notrace jiffy_sched_clock_read(void)
{
- return (u32)(jiffies - INITIAL_JIFFIES);
+ /*
+ * We don't need to use get_jiffies_64 on 32-bit arches here
+ * because we register with BITS_PER_LONG
+ */
+ return (u64)(jiffies - INITIAL_JIFFIES);
}
-static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
+static u32 __read_mostly (*read_sched_clock_32)(void);
+
+static u64 notrace read_sched_clock_32_wrapper(void)
+{
+ return read_sched_clock_32();
+}
+
+static u64 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
{
return (cyc * mult) >> shift;
}
-static unsigned long long notrace sched_clock_32(void)
+unsigned long long notrace sched_clock(void)
{
u64 epoch_ns;
- u32 epoch_cyc;
- u32 cyc;
+ u64 epoch_cyc;
+ u64 cyc;
+ unsigned long seq;
if (cd.suspended)
return cd.epoch_ns;
- /*
- * Load the epoch_cyc and epoch_ns atomically. We do this by
- * ensuring that we always write epoch_cyc, epoch_ns and
- * epoch_cyc_copy in strict order, and read them in strict order.
- * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
- * the middle of an update, and we should repeat the load.
- */
do {
+ seq = read_seqcount_begin(&cd.seq);
epoch_cyc = cd.epoch_cyc;
- smp_rmb();
epoch_ns = cd.epoch_ns;
- smp_rmb();
- } while (epoch_cyc != cd.epoch_cyc_copy);
+ } while (read_seqcount_retry(&cd.seq, seq));
cyc = read_sched_clock();
cyc = (cyc - epoch_cyc) & sched_clock_mask;
@@ -83,49 +90,46 @@ static unsigned long long notrace sched_clock_32(void)
static void notrace update_sched_clock(void)
{
unsigned long flags;
- u32 cyc;
+ u64 cyc;
u64 ns;
cyc = read_sched_clock();
ns = cd.epoch_ns +
cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
cd.mult, cd.shift);
- /*
- * Write epoch_cyc and epoch_ns in a way that the update is
- * detectable in cyc_to_fixed_sched_clock().
- */
+
raw_local_irq_save(flags);
- cd.epoch_cyc_copy = cyc;
- smp_wmb();
+ write_seqcount_begin(&cd.seq);
cd.epoch_ns = ns;
- smp_wmb();
cd.epoch_cyc = cyc;
+ write_seqcount_end(&cd.seq);
raw_local_irq_restore(flags);
}
-static void sched_clock_poll(unsigned long wrap_ticks)
+static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
{
- mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));
update_sched_clock();
+ hrtimer_forward_now(hrt, cd.wrap_kt);
+ return HRTIMER_RESTART;
}
-void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
+void __init sched_clock_register(u64 (*read)(void), int bits,
+ unsigned long rate)
{
- unsigned long r, w;
+ unsigned long r;
u64 res, wrap;
char r_unit;
if (cd.rate > rate)
return;
- BUG_ON(bits > 32);
WARN_ON(!irqs_disabled());
read_sched_clock = read;
- sched_clock_mask = (1ULL << bits) - 1;
+ sched_clock_mask = CLOCKSOURCE_MASK(bits);
cd.rate = rate;
/* calculate the mult/shift to convert counter ticks to ns. */
- clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0);
+ clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 3600);
r = rate;
if (r >= 4000000) {
@@ -138,20 +142,14 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
r_unit = ' ';
/* calculate how many ns until we wrap */
- wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift);
- do_div(wrap, NSEC_PER_MSEC);
- w = wrap;
+ wrap = clocks_calc_max_nsecs(cd.mult, cd.shift, 0, sched_clock_mask);
+ cd.wrap_kt = ns_to_ktime(wrap - (wrap >> 3));
/* calculate the ns resolution of this counter */
res = cyc_to_ns(1ULL, cd.mult, cd.shift);
- pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
- bits, r, r_unit, res, w);
+ pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
+ bits, r, r_unit, res, wrap);
- /*
- * Start the timer to keep sched_clock() properly updated and
- * sets the initial epoch.
- */
- sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
update_sched_clock();
/*
@@ -166,11 +164,10 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
pr_debug("Registered %pF as sched_clock source\n", read);
}
-unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32;
-
-unsigned long long notrace sched_clock(void)
+void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
{
- return sched_clock_func();
+ read_sched_clock_32 = read;
+ sched_clock_register(read_sched_clock_32_wrapper, bits, rate);
}
void __init sched_clock_postinit(void)
@@ -180,14 +177,22 @@ void __init sched_clock_postinit(void)
* make it the final one one.
*/
if (read_sched_clock == jiffy_sched_clock_read)
- setup_sched_clock(jiffy_sched_clock_read, 32, HZ);
+ sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
- sched_clock_poll(sched_clock_timer.data);
+ update_sched_clock();
+
+ /*
+ * Start the timer to keep sched_clock() properly updated and
+ * sets the initial epoch.
+ */
+ hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ sched_clock_timer.function = sched_clock_poll;
+ hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
}
static int sched_clock_suspend(void)
{
- sched_clock_poll(sched_clock_timer.data);
+ sched_clock_poll(&sched_clock_timer);
cd.suspended = true;
return 0;
}
@@ -195,7 +200,6 @@ static int sched_clock_suspend(void)
static void sched_clock_resume(void)
{
cd.epoch_cyc = read_sched_clock();
- cd.epoch_cyc_copy = cd.epoch_cyc;
cd.suspended = false;
}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 218bcb565fed..9532690daaa9 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -70,6 +70,7 @@ static bool tick_check_broadcast_device(struct clock_event_device *curdev,
struct clock_event_device *newdev)
{
if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
+ (newdev->features & CLOCK_EVT_FEAT_PERCPU) ||
(newdev->features & CLOCK_EVT_FEAT_C3STOP))
return false;
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index bc906cad709b..18e71f7fbc2a 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -31,7 +31,7 @@ extern void tick_install_replacement(struct clock_event_device *dev);
extern void clockevents_shutdown(struct clock_event_device *dev);
-extern size_t sysfs_get_uname(const char *buf, char *dst, size_t cnt);
+extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt);
/*
* NO_HZ / high resolution timer shared code
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 947ba25a95a0..3abf53418b67 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1613,9 +1613,10 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
* ktime_get_update_offsets - hrtimer helper
* @offs_real: pointer to storage for monotonic -> realtime offset
* @offs_boot: pointer to storage for monotonic -> boottime offset
+ * @offs_tai: pointer to storage for monotonic -> clock tai offset
*
* Returns current monotonic time and updates the offsets
- * Called from hrtimer_interupt() or retrigger_next_event()
+ * Called from hrtimer_interrupt() or retrigger_next_event()
*/
ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot,
ktime_t *offs_tai)
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 0b537f27b559..1fb08f21302e 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -298,15 +298,15 @@ static int tstats_show(struct seq_file *m, void *v)
period = ktime_to_timespec(time);
ms = period.tv_nsec / 1000000;
- seq_puts(m, "Timer Stats Version: v0.2\n");
+ seq_puts(m, "Timer Stats Version: v0.3\n");
seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
if (atomic_read(&overflow_count))
- seq_printf(m, "Overflow: %d entries\n",
- atomic_read(&overflow_count));
+ seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
+ seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
for (i = 0; i < nr_entries; i++) {
entry = entries + i;
- if (entry->timer_flag & TIMER_STATS_FLAG_DEFERRABLE) {
+ if (entry->timer_flag & TIMER_STATS_FLAG_DEFERRABLE) {
seq_printf(m, "%4luD, %5d %-16s ",
entry->count, entry->pid, entry->comm);
} else {
diff --git a/kernel/timer.c b/kernel/timer.c
index 4296d13db3d1..6582b82fa966 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1092,7 +1092,7 @@ static int cascade(struct tvec_base *base, struct tvec *tv, int index)
static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
unsigned long data)
{
- int preempt_count = preempt_count();
+ int count = preempt_count();
#ifdef CONFIG_LOCKDEP
/*
@@ -1119,16 +1119,16 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
lock_map_release(&lockdep_map);
- if (preempt_count != preempt_count()) {
+ if (count != preempt_count()) {
WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
- fn, preempt_count, preempt_count());
+ fn, count, preempt_count());
/*
* Restore the preempt count. That gives us a decent
* chance to survive and extract information. If the
* callback kept a lock held, bad luck, but not worse
* than the BUG() we had.
*/
- preempt_count() = preempt_count;
+ preempt_count_set(count);
}
}
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index b8b8560bfb95..b418cb0d7242 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -26,6 +26,7 @@
#include <linux/export.h>
#include <linux/time.h>
#include <linux/uaccess.h>
+#include <linux/list.h>
#include <trace/events/block.h>
@@ -38,6 +39,9 @@ static unsigned int blktrace_seq __read_mostly = 1;
static struct trace_array *blk_tr;
static bool blk_tracer_enabled __read_mostly;
+static LIST_HEAD(running_trace_list);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
+
/* Select an alternative, minimalistic output than the original one */
#define TRACE_BLK_OPT_CLASSIC 0x1
@@ -107,10 +111,18 @@ record_it:
* Send out a notify for this process, if we haven't done so since a trace
* started
*/
-static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
+static void trace_note_tsk(struct task_struct *tsk)
{
+ unsigned long flags;
+ struct blk_trace *bt;
+
tsk->btrace_seq = blktrace_seq;
- trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
+ spin_lock_irqsave(&running_trace_lock, flags);
+ list_for_each_entry(bt, &running_trace_list, running_list) {
+ trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
+ sizeof(tsk->comm));
+ }
+ spin_unlock_irqrestore(&running_trace_lock, flags);
}
static void trace_note_time(struct blk_trace *bt)
@@ -229,16 +241,15 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
goto record_it;
}
+ if (unlikely(tsk->btrace_seq != blktrace_seq))
+ trace_note_tsk(tsk);
+
/*
* A word about the locking here - we disable interrupts to reserve
* some space in the relay per-cpu buffer, to prevent an irq
* from coming in and stepping on our toes.
*/
local_irq_save(flags);
-
- if (unlikely(tsk->btrace_seq != blktrace_seq))
- trace_note_tsk(bt, tsk);
-
t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
if (t) {
sequence = per_cpu_ptr(bt->sequence, cpu);
@@ -477,6 +488,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
bt->dir = dir;
bt->dev = dev;
atomic_set(&bt->dropped, 0);
+ INIT_LIST_HEAD(&bt->running_list);
ret = -EIO;
bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
@@ -567,13 +579,12 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
.end_lba = cbuts.end_lba,
.pid = cbuts.pid,
};
- memcpy(&buts.name, &cbuts.name, 32);
ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
if (ret)
return ret;
- if (copy_to_user(arg, &buts.name, 32)) {
+ if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
blk_trace_remove(q);
return -EFAULT;
}
@@ -601,6 +612,9 @@ int blk_trace_startstop(struct request_queue *q, int start)
blktrace_seq++;
smp_mb();
bt->trace_state = Blktrace_running;
+ spin_lock_irq(&running_trace_lock);
+ list_add(&bt->running_list, &running_trace_list);
+ spin_unlock_irq(&running_trace_lock);
trace_note_time(bt);
ret = 0;
@@ -608,6 +622,9 @@ int blk_trace_startstop(struct request_queue *q, int start)
} else {
if (bt->trace_state == Blktrace_running) {
bt->trace_state = Blktrace_stopped;
+ spin_lock_irq(&running_trace_lock);
+ list_del_init(&bt->running_list);
+ spin_unlock_irq(&running_trace_lock);
relay_flush(bt->rchan);
ret = 0;
}
@@ -764,8 +781,8 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
if (!error && !bio_flagged(bio, BIO_UPTODATE))
error = EIO;
- __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
- error, 0, NULL);
+ __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
+ bio->bi_rw, what, error, 0, NULL);
}
static void blk_add_trace_bio_bounce(void *ignore,
@@ -868,8 +885,9 @@ static void blk_add_trace_split(void *ignore,
if (bt) {
__be64 rpdu = cpu_to_be64(pdu);
- __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
- BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
+ __blk_add_trace(bt, bio->bi_iter.bi_sector,
+ bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT,
+ !bio_flagged(bio, BIO_UPTODATE),
sizeof(rpdu), &rpdu);
}
}
@@ -901,9 +919,9 @@ static void blk_add_trace_bio_remap(void *ignore,
r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev);
r.sector_from = cpu_to_be64(from);
- __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
- BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE),
- sizeof(r), &r);
+ __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
+ bio->bi_rw, BLK_TA_REMAP,
+ !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
}
/**
@@ -1472,6 +1490,9 @@ static int blk_trace_remove_queue(struct request_queue *q)
if (atomic_dec_and_test(&blk_probes_ref))
blk_unregister_tracepoints();
+ spin_lock_irq(&running_trace_lock);
+ list_del(&bt->running_list);
+ spin_unlock_irq(&running_trace_lock);
blk_trace_free(bt);
return 0;
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 03cf44ac54d3..44e826a79665 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3641,7 +3641,7 @@ __setup("ftrace_filter=", set_ftrace_filter);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
-static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
+static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
static int __init set_graph_function(char *str)
{
@@ -3659,7 +3659,7 @@ static void __init set_ftrace_early_graph(char *buf)
func = strsep(&buf, ",");
/* we allow only one expression at a time */
ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
- func);
+ FTRACE_GRAPH_MAX_FUNCS, func);
if (ret)
printk(KERN_DEBUG "ftrace: function %s not "
"traceable\n", func);
@@ -3776,15 +3776,25 @@ static const struct file_operations ftrace_notrace_fops = {
static DEFINE_MUTEX(graph_lock);
int ftrace_graph_count;
-int ftrace_graph_filter_enabled;
+int ftrace_graph_notrace_count;
unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
+unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
+
+struct ftrace_graph_data {
+ unsigned long *table;
+ size_t size;
+ int *count;
+ const struct seq_operations *seq_ops;
+};
static void *
__g_next(struct seq_file *m, loff_t *pos)
{
- if (*pos >= ftrace_graph_count)
+ struct ftrace_graph_data *fgd = m->private;
+
+ if (*pos >= *fgd->count)
return NULL;
- return &ftrace_graph_funcs[*pos];
+ return &fgd->table[*pos];
}
static void *
@@ -3796,10 +3806,12 @@ g_next(struct seq_file *m, void *v, loff_t *pos)
static void *g_start(struct seq_file *m, loff_t *pos)
{
+ struct ftrace_graph_data *fgd = m->private;
+
mutex_lock(&graph_lock);
/* Nothing, tell g_show to print all functions are enabled */
- if (!ftrace_graph_filter_enabled && !*pos)
+ if (!*fgd->count && !*pos)
return (void *)1;
return __g_next(m, pos);
@@ -3835,38 +3847,88 @@ static const struct seq_operations ftrace_graph_seq_ops = {
};
static int
-ftrace_graph_open(struct inode *inode, struct file *file)
+__ftrace_graph_open(struct inode *inode, struct file *file,
+ struct ftrace_graph_data *fgd)
{
int ret = 0;
- if (unlikely(ftrace_disabled))
- return -ENODEV;
-
mutex_lock(&graph_lock);
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC)) {
- ftrace_graph_filter_enabled = 0;
- ftrace_graph_count = 0;
- memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
+ *fgd->count = 0;
+ memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
}
mutex_unlock(&graph_lock);
- if (file->f_mode & FMODE_READ)
- ret = seq_open(file, &ftrace_graph_seq_ops);
+ if (file->f_mode & FMODE_READ) {
+ ret = seq_open(file, fgd->seq_ops);
+ if (!ret) {
+ struct seq_file *m = file->private_data;
+ m->private = fgd;
+ }
+ } else
+ file->private_data = fgd;
return ret;
}
static int
+ftrace_graph_open(struct inode *inode, struct file *file)
+{
+ struct ftrace_graph_data *fgd;
+
+ if (unlikely(ftrace_disabled))
+ return -ENODEV;
+
+ fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
+ if (fgd == NULL)
+ return -ENOMEM;
+
+ fgd->table = ftrace_graph_funcs;
+ fgd->size = FTRACE_GRAPH_MAX_FUNCS;
+ fgd->count = &ftrace_graph_count;
+ fgd->seq_ops = &ftrace_graph_seq_ops;
+
+ return __ftrace_graph_open(inode, file, fgd);
+}
+
+static int
+ftrace_graph_notrace_open(struct inode *inode, struct file *file)
+{
+ struct ftrace_graph_data *fgd;
+
+ if (unlikely(ftrace_disabled))
+ return -ENODEV;
+
+ fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
+ if (fgd == NULL)
+ return -ENOMEM;
+
+ fgd->table = ftrace_graph_notrace_funcs;
+ fgd->size = FTRACE_GRAPH_MAX_FUNCS;
+ fgd->count = &ftrace_graph_notrace_count;
+ fgd->seq_ops = &ftrace_graph_seq_ops;
+
+ return __ftrace_graph_open(inode, file, fgd);
+}
+
+static int
ftrace_graph_release(struct inode *inode, struct file *file)
{
- if (file->f_mode & FMODE_READ)
+ if (file->f_mode & FMODE_READ) {
+ struct seq_file *m = file->private_data;
+
+ kfree(m->private);
seq_release(inode, file);
+ } else {
+ kfree(file->private_data);
+ }
+
return 0;
}
static int
-ftrace_set_func(unsigned long *array, int *idx, char *buffer)
+ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
{
struct dyn_ftrace *rec;
struct ftrace_page *pg;
@@ -3879,7 +3941,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
/* decode regex */
type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
- if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
+ if (!not && *idx >= size)
return -EBUSY;
search_len = strlen(search);
@@ -3907,7 +3969,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
fail = 0;
if (!exists) {
array[(*idx)++] = rec->ip;
- if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
+ if (*idx >= size)
goto out;
}
} else {
@@ -3925,8 +3987,6 @@ out:
if (fail)
return -EINVAL;
- ftrace_graph_filter_enabled = !!(*idx);
-
return 0;
}
@@ -3935,36 +3995,33 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_parser parser;
- ssize_t read, ret;
+ ssize_t read, ret = 0;
+ struct ftrace_graph_data *fgd = file->private_data;
if (!cnt)
return 0;
- mutex_lock(&graph_lock);
-
- if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
- ret = -ENOMEM;
- goto out_unlock;
- }
+ if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
+ return -ENOMEM;
read = trace_get_user(&parser, ubuf, cnt, ppos);
if (read >= 0 && trace_parser_loaded((&parser))) {
parser.buffer[parser.idx] = 0;
+ mutex_lock(&graph_lock);
+
/* we allow only one expression at a time */
- ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
- parser.buffer);
- if (ret)
- goto out_free;
+ ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
+ parser.buffer);
+
+ mutex_unlock(&graph_lock);
}
- ret = read;
+ if (!ret)
+ ret = read;
-out_free:
trace_parser_put(&parser);
-out_unlock:
- mutex_unlock(&graph_lock);
return ret;
}
@@ -3976,6 +4033,14 @@ static const struct file_operations ftrace_graph_fops = {
.llseek = ftrace_filter_lseek,
.release = ftrace_graph_release,
};
+
+static const struct file_operations ftrace_graph_notrace_fops = {
+ .open = ftrace_graph_notrace_open,
+ .read = seq_read,
+ .write = ftrace_graph_write,
+ .llseek = ftrace_filter_lseek,
+ .release = ftrace_graph_release,
+};
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
@@ -3997,6 +4062,9 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
trace_create_file("set_graph_function", 0444, d_tracer,
NULL,
&ftrace_graph_fops);
+ trace_create_file("set_graph_notrace", 0444, d_tracer,
+ NULL,
+ &ftrace_graph_notrace_fops);
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
return 0;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 7974ba20557d..063a92bad578 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -843,9 +843,12 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
if (isspace(ch)) {
parser->buffer[parser->idx] = 0;
parser->cont = false;
- } else {
+ } else if (parser->idx < parser->size - 1) {
parser->cont = true;
parser->buffer[parser->idx++] = ch;
+ } else {
+ ret = -EINVAL;
+ goto out;
}
*ppos += read;
@@ -2760,7 +2763,7 @@ static void show_snapshot_main_help(struct seq_file *m)
seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
seq_printf(m, "# Takes a snapshot of the main buffer.\n");
- seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n");
+ seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
seq_printf(m, "# is not a '0' or '1')\n");
}
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 10c86fb7a2b4..d1cf5159bec0 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -730,15 +730,16 @@ extern void __trace_graph_return(struct trace_array *tr,
#ifdef CONFIG_DYNAMIC_FTRACE
/* TODO: make this variable */
#define FTRACE_GRAPH_MAX_FUNCS 32
-extern int ftrace_graph_filter_enabled;
extern int ftrace_graph_count;
extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
+extern int ftrace_graph_notrace_count;
+extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
static inline int ftrace_graph_addr(unsigned long addr)
{
int i;
- if (!ftrace_graph_filter_enabled)
+ if (!ftrace_graph_count)
return 1;
for (i = 0; i < ftrace_graph_count; i++) {
@@ -758,11 +759,31 @@ static inline int ftrace_graph_addr(unsigned long addr)
return 0;
}
+
+static inline int ftrace_graph_notrace_addr(unsigned long addr)
+{
+ int i;
+
+ if (!ftrace_graph_notrace_count)
+ return 0;
+
+ for (i = 0; i < ftrace_graph_notrace_count; i++) {
+ if (addr == ftrace_graph_notrace_funcs[i])
+ return 1;
+ }
+
+ return 0;
+}
#else
static inline int ftrace_graph_addr(unsigned long addr)
{
return 1;
}
+
+static inline int ftrace_graph_notrace_addr(unsigned long addr)
+{
+ return 0;
+}
#endif /* CONFIG_DYNAMIC_FTRACE */
#else /* CONFIG_FUNCTION_GRAPH_TRACER */
static inline enum print_line_t
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index b5c09242683d..e08c030b8f38 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -114,16 +114,37 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
return -EBUSY;
}
+ /*
+ * The curr_ret_stack is an index to ftrace return stack of
+ * current task. Its value should be in [0, FTRACE_RETFUNC_
+ * DEPTH) when the function graph tracer is used. To support
+ * filtering out specific functions, it makes the index
+ * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
+ * so when it sees a negative index the ftrace will ignore
+ * the record. And the index gets recovered when returning
+ * from the filtered function by adding the FTRACE_NOTRACE_
+ * DEPTH and then it'll continue to record functions normally.
+ *
+ * The curr_ret_stack is initialized to -1 and get increased
+ * in this function. So it can be less than -1 only if it was
+ * filtered out via ftrace_graph_notrace_addr() which can be
+ * set from set_graph_notrace file in debugfs by user.
+ */
+ if (current->curr_ret_stack < -1)
+ return -EBUSY;
+
calltime = trace_clock_local();
index = ++current->curr_ret_stack;
+ if (ftrace_graph_notrace_addr(func))
+ current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
barrier();
current->ret_stack[index].ret = ret;
current->ret_stack[index].func = func;
current->ret_stack[index].calltime = calltime;
current->ret_stack[index].subtime = 0;
current->ret_stack[index].fp = frame_pointer;
- *depth = index;
+ *depth = current->curr_ret_stack;
return 0;
}
@@ -137,7 +158,17 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
index = current->curr_ret_stack;
- if (unlikely(index < 0)) {
+ /*
+ * A negative index here means that it's just returned from a
+ * notrace'd function. Recover index to get an original
+ * return address. See ftrace_push_return_trace().
+ *
+ * TODO: Need to check whether the stack gets corrupted.
+ */
+ if (index < 0)
+ index += FTRACE_NOTRACE_DEPTH;
+
+ if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
ftrace_graph_stop();
WARN_ON(1);
/* Might as well panic, otherwise we have no where to go */
@@ -193,6 +224,15 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
trace.rettime = trace_clock_local();
barrier();
current->curr_ret_stack--;
+ /*
+ * The curr_ret_stack can be less than -1 only if it was
+ * filtered out and it's about to return from the function.
+ * Recover the index and continue to trace normal functions.
+ */
+ if (current->curr_ret_stack < -1) {
+ current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
+ return ret;
+ }
/*
* The trace should run after decrementing the ret counter
@@ -259,10 +299,20 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
/* trace it when it is-nested-in or is a function enabled. */
if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
- ftrace_graph_ignore_irqs()) ||
+ ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
(max_depth && trace->depth >= max_depth))
return 0;
+ /*
+ * Do not trace a function if it's filtered by set_graph_notrace.
+ * Make the index of ret stack negative to indicate that it should
+ * ignore further functions. But it needs its own ret stack entry
+ * to recover the original index in order to continue tracing after
+ * returning from the function.
+ */
+ if (ftrace_graph_notrace_addr(trace->func))
+ return 1;
+
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
diff --git a/kernel/user.c b/kernel/user.c
index 5bbb91988e69..a3a0dbfda329 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -51,6 +51,10 @@ struct user_namespace init_user_ns = {
.owner = GLOBAL_ROOT_UID,
.group = GLOBAL_ROOT_GID,
.proc_inum = PROC_USER_INIT_INO,
+#ifdef CONFIG_KEYS_KERBEROS_CACHE
+ .krb_cache_register_sem =
+ __RWSEM_INITIALIZER(init_user_ns.krb_cache_register_sem),
+#endif
};
EXPORT_SYMBOL_GPL(init_user_ns);
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 13fb1134ba58..240fb62cf394 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -101,6 +101,9 @@ int create_user_ns(struct cred *new)
set_cred_user_ns(new, ns);
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+ init_rwsem(&ns->persistent_keyring_register_sem);
+#endif
return 0;
}
@@ -130,6 +133,9 @@ void free_user_ns(struct user_namespace *ns)
do {
parent = ns->parent;
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+ key_put(ns->persistent_keyring_register);
+#endif
proc_free_inum(ns->proc_inum);
kmem_cache_free(user_ns_cachep, ns);
ns = parent;
diff --git a/kernel/wait.c b/kernel/wait.c
index d550920e040c..de21c6305a44 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -92,6 +92,30 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
}
EXPORT_SYMBOL(prepare_to_wait_exclusive);
+long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state)
+{
+ unsigned long flags;
+
+ if (signal_pending_state(state, current))
+ return -ERESTARTSYS;
+
+ wait->private = current;
+ wait->func = autoremove_wake_function;
+
+ spin_lock_irqsave(&q->lock, flags);
+ if (list_empty(&wait->task_list)) {
+ if (wait->flags & WQ_FLAG_EXCLUSIVE)
+ __add_wait_queue_tail(q, wait);
+ else
+ __add_wait_queue(q, wait);
+ }
+ set_current_state(state);
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(prepare_to_wait_event);
+
/**
* finish_wait - clean up after waiting in a queue
* @q: waitqueue waited on
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 987293d03ebc..98a937eb9f7a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -305,6 +305,9 @@ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
/* I: attributes used when instantiating standard unbound pools on demand */
static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
+/* I: attributes used when instantiating ordered pools on demand */
+static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
+
struct workqueue_struct *system_wq __read_mostly;
EXPORT_SYMBOL(system_wq);
struct workqueue_struct *system_highpri_wq __read_mostly;
@@ -518,14 +521,21 @@ static inline void debug_work_activate(struct work_struct *work) { }
static inline void debug_work_deactivate(struct work_struct *work) { }
#endif
-/* allocate ID and assign it to @pool */
+/**
+ * worker_pool_assign_id - allocate ID and assing it to @pool
+ * @pool: the pool pointer of interest
+ *
+ * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
+ * successfully, -errno on failure.
+ */
static int worker_pool_assign_id(struct worker_pool *pool)
{
int ret;
lockdep_assert_held(&wq_pool_mutex);
- ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL);
+ ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
+ GFP_KERNEL);
if (ret >= 0) {
pool->id = ret;
return 0;
@@ -1320,7 +1330,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
debug_work_activate(work);
- /* if dying, only works from the same workqueue are allowed */
+ /* if draining, only works from the same workqueue are allowed */
if (unlikely(wq->flags & __WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
@@ -4106,7 +4116,7 @@ out_unlock:
static int alloc_and_link_pwqs(struct workqueue_struct *wq)
{
bool highpri = wq->flags & WQ_HIGHPRI;
- int cpu;
+ int cpu, ret;
if (!(wq->flags & WQ_UNBOUND)) {
wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
@@ -4126,6 +4136,13 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
mutex_unlock(&wq->mutex);
}
return 0;
+ } else if (wq->flags & __WQ_ORDERED) {
+ ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
+ /* there should only be single pwq for ordering guarantee */
+ WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
+ wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
+ "ordering guarantee broken for workqueue %s\n", wq->name);
+ return ret;
} else {
return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
}
@@ -5009,10 +5026,6 @@ static int __init init_workqueues(void)
int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
int i, cpu;
- /* make sure we have enough bits for OFFQ pool ID */
- BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) <
- WORK_CPU_END * NR_STD_WORKER_POOLS);
-
WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
@@ -5051,13 +5064,23 @@ static int __init init_workqueues(void)
}
}
- /* create default unbound wq attrs */
+ /* create default unbound and ordered wq attrs */
for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
struct workqueue_attrs *attrs;
BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
attrs->nice = std_nice[i];
unbound_std_wq_attrs[i] = attrs;
+
+ /*
+ * An ordered wq should have only one pwq as ordering is
+ * guaranteed by max_active which is enforced by pwqs.
+ * Turn off NUMA so that dfl_pwq is used for all nodes.
+ */
+ BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
+ attrs->nice = std_nice[i];
+ attrs->no_numa = true;
+ ordered_wq_attrs[i] = attrs;
}
system_wq = alloc_workqueue("events", 0, 0);
diff --git a/lib/Kconfig b/lib/Kconfig
index b3c8be0da17f..3cb879b1f282 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -322,6 +322,20 @@ config TEXTSEARCH_FSM
config BTREE
boolean
+config ASSOCIATIVE_ARRAY
+ bool
+ help
+ Generic associative array. Can be searched and iterated over whilst
+ it is being modified. It is also reasonably quick to search and
+ modify. The algorithms are non-recursive, and the trees are highly
+ capacious.
+
+ See:
+
+ Documentation/assoc_array.txt
+
+ for more information.
+
config HAS_IOMEM
boolean
depends on !NO_IOMEM
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 06344d986eb9..ebef88f61b7d 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -312,6 +312,15 @@ config MAGIC_SYSRQ
keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
unless you really know what this hack does.
+config MAGIC_SYSRQ_DEFAULT_ENABLE
+ hex "Enable magic SysRq key functions by default"
+ depends on MAGIC_SYSRQ
+ default 0x1
+ help
+ Specifies which SysRq key functions are enabled by default.
+ This may be set to 1 or 0 to enable or disable them all, or
+ to a bitmask as described in Documentation/sysrq.txt.
+
config DEBUG_KERNEL
bool "Kernel debugging"
help
@@ -983,7 +992,7 @@ config DEBUG_KOBJECT
config DEBUG_KOBJECT_RELEASE
bool "kobject release debugging"
- depends on DEBUG_KERNEL
+ depends on DEBUG_OBJECTS_TIMERS
help
kobjects are reference counted objects. This means that their
last reference count put is not predictable, and the kobject can
diff --git a/lib/Makefile b/lib/Makefile
index f3bb2cb98adf..1e806477e472 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -51,6 +51,7 @@ CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_BTREE) += btree.o
+obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
new file mode 100644
index 000000000000..17edeaf19180
--- /dev/null
+++ b/lib/assoc_array.c
@@ -0,0 +1,1746 @@
+/* Generic associative array implementation.
+ *
+ * See Documentation/assoc_array.txt for information.
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+//#define DEBUG
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/assoc_array_priv.h>
+
+/*
+ * Iterate over an associative array. The caller must hold the RCU read lock
+ * or better.
+ */
+static int assoc_array_subtree_iterate(const struct assoc_array_ptr *root,
+ const struct assoc_array_ptr *stop,
+ int (*iterator)(const void *leaf,
+ void *iterator_data),
+ void *iterator_data)
+{
+ const struct assoc_array_shortcut *shortcut;
+ const struct assoc_array_node *node;
+ const struct assoc_array_ptr *cursor, *ptr, *parent;
+ unsigned long has_meta;
+ int slot, ret;
+
+ cursor = root;
+
+begin_node:
+ if (assoc_array_ptr_is_shortcut(cursor)) {
+ /* Descend through a shortcut */
+ shortcut = assoc_array_ptr_to_shortcut(cursor);
+ smp_read_barrier_depends();
+ cursor = ACCESS_ONCE(shortcut->next_node);
+ }
+
+ node = assoc_array_ptr_to_node(cursor);
+ smp_read_barrier_depends();
+ slot = 0;
+
+ /* We perform two passes of each node.
+ *
+ * The first pass does all the leaves in this node. This means we
+ * don't miss any leaves if the node is split up by insertion whilst
+ * we're iterating over the branches rooted here (we may, however, see
+ * some leaves twice).
+ */
+ has_meta = 0;
+ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = ACCESS_ONCE(node->slots[slot]);
+ has_meta |= (unsigned long)ptr;
+ if (ptr && assoc_array_ptr_is_leaf(ptr)) {
+ /* We need a barrier between the read of the pointer
+ * and dereferencing the pointer - but only if we are
+ * actually going to dereference it.
+ */
+ smp_read_barrier_depends();
+
+ /* Invoke the callback */
+ ret = iterator(assoc_array_ptr_to_leaf(ptr),
+ iterator_data);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* The second pass attends to all the metadata pointers. If we follow
+ * one of these we may find that we don't come back here, but rather go
+ * back to a replacement node with the leaves in a different layout.
+ *
+ * We are guaranteed to make progress, however, as the slot number for
+ * a particular portion of the key space cannot change - and we
+ * continue at the back pointer + 1.
+ */
+ if (!(has_meta & ASSOC_ARRAY_PTR_META_TYPE))
+ goto finished_node;
+ slot = 0;
+
+continue_node:
+ node = assoc_array_ptr_to_node(cursor);
+ smp_read_barrier_depends();
+
+ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = ACCESS_ONCE(node->slots[slot]);
+ if (assoc_array_ptr_is_meta(ptr)) {
+ cursor = ptr;
+ goto begin_node;
+ }
+ }
+
+finished_node:
+ /* Move up to the parent (may need to skip back over a shortcut) */
+ parent = ACCESS_ONCE(node->back_pointer);
+ slot = node->parent_slot;
+ if (parent == stop)
+ return 0;
+
+ if (assoc_array_ptr_is_shortcut(parent)) {
+ shortcut = assoc_array_ptr_to_shortcut(parent);
+ smp_read_barrier_depends();
+ cursor = parent;
+ parent = ACCESS_ONCE(shortcut->back_pointer);
+ slot = shortcut->parent_slot;
+ if (parent == stop)
+ return 0;
+ }
+
+ /* Ascend to next slot in parent node */
+ cursor = parent;
+ slot++;
+ goto continue_node;
+}
+
+/**
+ * assoc_array_iterate - Pass all objects in the array to a callback
+ * @array: The array to iterate over.
+ * @iterator: The callback function.
+ * @iterator_data: Private data for the callback function.
+ *
+ * Iterate over all the objects in an associative array. Each one will be
+ * presented to the iterator function.
+ *
+ * If the array is being modified concurrently with the iteration then it is
+ * possible that some objects in the array will be passed to the iterator
+ * callback more than once - though every object should be passed at least
+ * once. If this is undesirable then the caller must lock against modification
+ * for the duration of this function.
+ *
+ * The function will return 0 if no objects were in the array or else it will
+ * return the result of the last iterator function called. Iteration stops
+ * immediately if any call to the iteration function results in a non-zero
+ * return.
+ *
+ * The caller should hold the RCU read lock or better if concurrent
+ * modification is possible.
+ */
+int assoc_array_iterate(const struct assoc_array *array,
+ int (*iterator)(const void *object,
+ void *iterator_data),
+ void *iterator_data)
+{
+ struct assoc_array_ptr *root = ACCESS_ONCE(array->root);
+
+ if (!root)
+ return 0;
+ return assoc_array_subtree_iterate(root, NULL, iterator, iterator_data);
+}
+
+enum assoc_array_walk_status {
+ assoc_array_walk_tree_empty,
+ assoc_array_walk_found_terminal_node,
+ assoc_array_walk_found_wrong_shortcut,
+} status;
+
+struct assoc_array_walk_result {
+ struct {
+ struct assoc_array_node *node; /* Node in which leaf might be found */
+ int level;
+ int slot;
+ } terminal_node;
+ struct {
+ struct assoc_array_shortcut *shortcut;
+ int level;
+ int sc_level;
+ unsigned long sc_segments;
+ unsigned long dissimilarity;
+ } wrong_shortcut;
+};
+
+/*
+ * Navigate through the internal tree looking for the closest node to the key.
+ */
+static enum assoc_array_walk_status
+assoc_array_walk(const struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key,
+ struct assoc_array_walk_result *result)
+{
+ struct assoc_array_shortcut *shortcut;
+ struct assoc_array_node *node;
+ struct assoc_array_ptr *cursor, *ptr;
+ unsigned long sc_segments, dissimilarity;
+ unsigned long segments;
+ int level, sc_level, next_sc_level;
+ int slot;
+
+ pr_devel("-->%s()\n", __func__);
+
+ cursor = ACCESS_ONCE(array->root);
+ if (!cursor)
+ return assoc_array_walk_tree_empty;
+
+ level = 0;
+
+ /* Use segments from the key for the new leaf to navigate through the
+ * internal tree, skipping through nodes and shortcuts that are on
+ * route to the destination. Eventually we'll come to a slot that is
+ * either empty or contains a leaf at which point we've found a node in
+ * which the leaf we're looking for might be found or into which it
+ * should be inserted.
+ */
+jumped:
+ segments = ops->get_key_chunk(index_key, level);
+ pr_devel("segments[%d]: %lx\n", level, segments);
+
+ if (assoc_array_ptr_is_shortcut(cursor))
+ goto follow_shortcut;
+
+consider_node:
+ node = assoc_array_ptr_to_node(cursor);
+ smp_read_barrier_depends();
+
+ slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
+ slot &= ASSOC_ARRAY_FAN_MASK;
+ ptr = ACCESS_ONCE(node->slots[slot]);
+
+ pr_devel("consider slot %x [ix=%d type=%lu]\n",
+ slot, level, (unsigned long)ptr & 3);
+
+ if (!assoc_array_ptr_is_meta(ptr)) {
+ /* The node doesn't have a node/shortcut pointer in the slot
+ * corresponding to the index key that we have to follow.
+ */
+ result->terminal_node.node = node;
+ result->terminal_node.level = level;
+ result->terminal_node.slot = slot;
+ pr_devel("<--%s() = terminal_node\n", __func__);
+ return assoc_array_walk_found_terminal_node;
+ }
+
+ if (assoc_array_ptr_is_node(ptr)) {
+ /* There is a pointer to a node in the slot corresponding to
+ * this index key segment, so we need to follow it.
+ */
+ cursor = ptr;
+ level += ASSOC_ARRAY_LEVEL_STEP;
+ if ((level & ASSOC_ARRAY_KEY_CHUNK_MASK) != 0)
+ goto consider_node;
+ goto jumped;
+ }
+
+ /* There is a shortcut in the slot corresponding to the index key
+ * segment. We follow the shortcut if its partial index key matches
+ * this leaf's. Otherwise we need to split the shortcut.
+ */
+ cursor = ptr;
+follow_shortcut:
+ shortcut = assoc_array_ptr_to_shortcut(cursor);
+ smp_read_barrier_depends();
+ pr_devel("shortcut to %d\n", shortcut->skip_to_level);
+ sc_level = level + ASSOC_ARRAY_LEVEL_STEP;
+ BUG_ON(sc_level > shortcut->skip_to_level);
+
+ do {
+ /* Check the leaf against the shortcut's index key a word at a
+ * time, trimming the final word (the shortcut stores the index
+ * key completely from the root to the shortcut's target).
+ */
+ if ((sc_level & ASSOC_ARRAY_KEY_CHUNK_MASK) == 0)
+ segments = ops->get_key_chunk(index_key, sc_level);
+
+ sc_segments = shortcut->index_key[sc_level >> ASSOC_ARRAY_KEY_CHUNK_SHIFT];
+ dissimilarity = segments ^ sc_segments;
+
+ if (round_up(sc_level, ASSOC_ARRAY_KEY_CHUNK_SIZE) > shortcut->skip_to_level) {
+ /* Trim segments that are beyond the shortcut */
+ int shift = shortcut->skip_to_level & ASSOC_ARRAY_KEY_CHUNK_MASK;
+ dissimilarity &= ~(ULONG_MAX << shift);
+ next_sc_level = shortcut->skip_to_level;
+ } else {
+ next_sc_level = sc_level + ASSOC_ARRAY_KEY_CHUNK_SIZE;
+ next_sc_level = round_down(next_sc_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
+ }
+
+ if (dissimilarity != 0) {
+ /* This shortcut points elsewhere */
+ result->wrong_shortcut.shortcut = shortcut;
+ result->wrong_shortcut.level = level;
+ result->wrong_shortcut.sc_level = sc_level;
+ result->wrong_shortcut.sc_segments = sc_segments;
+ result->wrong_shortcut.dissimilarity = dissimilarity;
+ return assoc_array_walk_found_wrong_shortcut;
+ }
+
+ sc_level = next_sc_level;
+ } while (sc_level < shortcut->skip_to_level);
+
+ /* The shortcut matches the leaf's index to this point. */
+ cursor = ACCESS_ONCE(shortcut->next_node);
+ if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) {
+ level = sc_level;
+ goto jumped;
+ } else {
+ level = sc_level;
+ goto consider_node;
+ }
+}
+
+/**
+ * assoc_array_find - Find an object by index key
+ * @array: The associative array to search.
+ * @ops: The operations to use.
+ * @index_key: The key to the object.
+ *
+ * Find an object in an associative array by walking through the internal tree
+ * to the node that should contain the object and then searching the leaves
+ * there. NULL is returned if the requested object was not found in the array.
+ *
+ * The caller must hold the RCU read lock or better.
+ */
+void *assoc_array_find(const struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key)
+{
+ struct assoc_array_walk_result result;
+ const struct assoc_array_node *node;
+ const struct assoc_array_ptr *ptr;
+ const void *leaf;
+ int slot;
+
+ if (assoc_array_walk(array, ops, index_key, &result) !=
+ assoc_array_walk_found_terminal_node)
+ return NULL;
+
+ node = result.terminal_node.node;
+ smp_read_barrier_depends();
+
+ /* If the target key is available to us, it's has to be pointed to by
+ * the terminal node.
+ */
+ for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = ACCESS_ONCE(node->slots[slot]);
+ if (ptr && assoc_array_ptr_is_leaf(ptr)) {
+ /* We need a barrier between the read of the pointer
+ * and dereferencing the pointer - but only if we are
+ * actually going to dereference it.
+ */
+ leaf = assoc_array_ptr_to_leaf(ptr);
+ smp_read_barrier_depends();
+ if (ops->compare_object(leaf, index_key))
+ return (void *)leaf;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Destructively iterate over an associative array. The caller must prevent
+ * other simultaneous accesses.
+ */
+static void assoc_array_destroy_subtree(struct assoc_array_ptr *root,
+ const struct assoc_array_ops *ops)
+{
+ struct assoc_array_shortcut *shortcut;
+ struct assoc_array_node *node;
+ struct assoc_array_ptr *cursor, *parent = NULL;
+ int slot = -1;
+
+ pr_devel("-->%s()\n", __func__);
+
+ cursor = root;
+ if (!cursor) {
+ pr_devel("empty\n");
+ return;
+ }
+
+move_to_meta:
+ if (assoc_array_ptr_is_shortcut(cursor)) {
+ /* Descend through a shortcut */
+ pr_devel("[%d] shortcut\n", slot);
+ BUG_ON(!assoc_array_ptr_is_shortcut(cursor));
+ shortcut = assoc_array_ptr_to_shortcut(cursor);
+ BUG_ON(shortcut->back_pointer != parent);
+ BUG_ON(slot != -1 && shortcut->parent_slot != slot);
+ parent = cursor;
+ cursor = shortcut->next_node;
+ slot = -1;
+ BUG_ON(!assoc_array_ptr_is_node(cursor));
+ }
+
+ pr_devel("[%d] node\n", slot);
+ node = assoc_array_ptr_to_node(cursor);
+ BUG_ON(node->back_pointer != parent);
+ BUG_ON(slot != -1 && node->parent_slot != slot);
+ slot = 0;
+
+continue_node:
+ pr_devel("Node %p [back=%p]\n", node, node->back_pointer);
+ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ struct assoc_array_ptr *ptr = node->slots[slot];
+ if (!ptr)
+ continue;
+ if (assoc_array_ptr_is_meta(ptr)) {
+ parent = cursor;
+ cursor = ptr;
+ goto move_to_meta;
+ }
+
+ if (ops) {
+ pr_devel("[%d] free leaf\n", slot);
+ ops->free_object(assoc_array_ptr_to_leaf(ptr));
+ }
+ }
+
+ parent = node->back_pointer;
+ slot = node->parent_slot;
+ pr_devel("free node\n");
+ kfree(node);
+ if (!parent)
+ return; /* Done */
+
+ /* Move back up to the parent (may need to free a shortcut on
+ * the way up) */
+ if (assoc_array_ptr_is_shortcut(parent)) {
+ shortcut = assoc_array_ptr_to_shortcut(parent);
+ BUG_ON(shortcut->next_node != cursor);
+ cursor = parent;
+ parent = shortcut->back_pointer;
+ slot = shortcut->parent_slot;
+ pr_devel("free shortcut\n");
+ kfree(shortcut);
+ if (!parent)
+ return;
+
+ BUG_ON(!assoc_array_ptr_is_node(parent));
+ }
+
+ /* Ascend to next slot in parent node */
+ pr_devel("ascend to %p[%d]\n", parent, slot);
+ cursor = parent;
+ node = assoc_array_ptr_to_node(cursor);
+ slot++;
+ goto continue_node;
+}
+
+/**
+ * assoc_array_destroy - Destroy an associative array
+ * @array: The array to destroy.
+ * @ops: The operations to use.
+ *
+ * Discard all metadata and free all objects in an associative array. The
+ * array will be empty and ready to use again upon completion. This function
+ * cannot fail.
+ *
+ * The caller must prevent all other accesses whilst this takes place as no
+ * attempt is made to adjust pointers gracefully to permit RCU readlock-holding
+ * accesses to continue. On the other hand, no memory allocation is required.
+ */
+void assoc_array_destroy(struct assoc_array *array,
+ const struct assoc_array_ops *ops)
+{
+ assoc_array_destroy_subtree(array->root, ops);
+ array->root = NULL;
+}
+
+/*
+ * Handle insertion into an empty tree.
+ */
+static bool assoc_array_insert_in_empty_tree(struct assoc_array_edit *edit)
+{
+ struct assoc_array_node *new_n0;
+
+ pr_devel("-->%s()\n", __func__);
+
+ new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ if (!new_n0)
+ return false;
+
+ edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
+ edit->leaf_p = &new_n0->slots[0];
+ edit->adjust_count_on = new_n0;
+ edit->set[0].ptr = &edit->array->root;
+ edit->set[0].to = assoc_array_node_to_ptr(new_n0);
+
+ pr_devel("<--%s() = ok [no root]\n", __func__);
+ return true;
+}
+
+/*
+ * Handle insertion into a terminal node.
+ */
+static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
+ const struct assoc_array_ops *ops,
+ const void *index_key,
+ struct assoc_array_walk_result *result)
+{
+ struct assoc_array_shortcut *shortcut, *new_s0;
+ struct assoc_array_node *node, *new_n0, *new_n1, *side;
+ struct assoc_array_ptr *ptr;
+ unsigned long dissimilarity, base_seg, blank;
+ size_t keylen;
+ bool have_meta;
+ int level, diff;
+ int slot, next_slot, free_slot, i, j;
+
+ node = result->terminal_node.node;
+ level = result->terminal_node.level;
+ edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = result->terminal_node.slot;
+
+ pr_devel("-->%s()\n", __func__);
+
+ /* We arrived at a node which doesn't have an onward node or shortcut
+ * pointer that we have to follow. This means that (a) the leaf we
+ * want must go here (either by insertion or replacement) or (b) we
+ * need to split this node and insert in one of the fragments.
+ */
+ free_slot = -1;
+
+ /* Firstly, we have to check the leaves in this node to see if there's
+ * a matching one we should replace in place.
+ */
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ ptr = node->slots[i];
+ if (!ptr) {
+ free_slot = i;
+ continue;
+ }
+ if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
+ pr_devel("replace in slot %d\n", i);
+ edit->leaf_p = &node->slots[i];
+ edit->dead_leaf = node->slots[i];
+ pr_devel("<--%s() = ok [replace]\n", __func__);
+ return true;
+ }
+ }
+
+ /* If there is a free slot in this node then we can just insert the
+ * leaf here.
+ */
+ if (free_slot >= 0) {
+ pr_devel("insert in free slot %d\n", free_slot);
+ edit->leaf_p = &node->slots[free_slot];
+ edit->adjust_count_on = node;
+ pr_devel("<--%s() = ok [insert]\n", __func__);
+ return true;
+ }
+
+ /* The node has no spare slots - so we're either going to have to split
+ * it or insert another node before it.
+ *
+ * Whatever, we're going to need at least two new nodes - so allocate
+ * those now. We may also need a new shortcut, but we deal with that
+ * when we need it.
+ */
+ new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ if (!new_n0)
+ return false;
+ edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
+ new_n1 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ if (!new_n1)
+ return false;
+ edit->new_meta[1] = assoc_array_node_to_ptr(new_n1);
+
+ /* We need to find out how similar the leaves are. */
+ pr_devel("no spare slots\n");
+ have_meta = false;
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ ptr = node->slots[i];
+ if (assoc_array_ptr_is_meta(ptr)) {
+ edit->segment_cache[i] = 0xff;
+ have_meta = true;
+ continue;
+ }
+ base_seg = ops->get_object_key_chunk(
+ assoc_array_ptr_to_leaf(ptr), level);
+ base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
+ edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
+ }
+
+ if (have_meta) {
+ pr_devel("have meta\n");
+ goto split_node;
+ }
+
+ /* The node contains only leaves */
+ dissimilarity = 0;
+ base_seg = edit->segment_cache[0];
+ for (i = 1; i < ASSOC_ARRAY_FAN_OUT; i++)
+ dissimilarity |= edit->segment_cache[i] ^ base_seg;
+
+ pr_devel("only leaves; dissimilarity=%lx\n", dissimilarity);
+
+ if ((dissimilarity & ASSOC_ARRAY_FAN_MASK) == 0) {
+ /* The old leaves all cluster in the same slot. We will need
+ * to insert a shortcut if the new node wants to cluster with them.
+ */
+ if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
+ goto all_leaves_cluster_together;
+
+ /* Otherwise we can just insert a new node ahead of the old
+ * one.
+ */
+ goto present_leaves_cluster_but_not_new_leaf;
+ }
+
+split_node:
+ pr_devel("split node\n");
+
+ /* We need to split the current node; we know that the node doesn't
+ * simply contain a full set of leaves that cluster together (it
+ * contains meta pointers and/or non-clustering leaves).
+ *
+ * We need to expel at least two leaves out of a set consisting of the
+ * leaves in the node and the new leaf.
+ *
+ * We need a new node (n0) to replace the current one and a new node to
+ * take the expelled nodes (n1).
+ */
+ edit->set[0].to = assoc_array_node_to_ptr(new_n0);
+ new_n0->back_pointer = node->back_pointer;
+ new_n0->parent_slot = node->parent_slot;
+ new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
+ new_n1->parent_slot = -1; /* Need to calculate this */
+
+do_split_node:
+ pr_devel("do_split_node\n");
+
+ new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
+ new_n1->nr_leaves_on_branch = 0;
+
+ /* Begin by finding two matching leaves. There have to be at least two
+ * that match - even if there are meta pointers - because any leaf that
+ * would match a slot with a meta pointer in it must be somewhere
+ * behind that meta pointer and cannot be here. Further, given N
+ * remaining leaf slots, we now have N+1 leaves to go in them.
+ */
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ slot = edit->segment_cache[i];
+ if (slot != 0xff)
+ for (j = i + 1; j < ASSOC_ARRAY_FAN_OUT + 1; j++)
+ if (edit->segment_cache[j] == slot)
+ goto found_slot_for_multiple_occupancy;
+ }
+found_slot_for_multiple_occupancy:
+ pr_devel("same slot: %x %x [%02x]\n", i, j, slot);
+ BUG_ON(i >= ASSOC_ARRAY_FAN_OUT);
+ BUG_ON(j >= ASSOC_ARRAY_FAN_OUT + 1);
+ BUG_ON(slot >= ASSOC_ARRAY_FAN_OUT);
+
+ new_n1->parent_slot = slot;
+
+ /* Metadata pointers cannot change slot */
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
+ if (assoc_array_ptr_is_meta(node->slots[i]))
+ new_n0->slots[i] = node->slots[i];
+ else
+ new_n0->slots[i] = NULL;
+ BUG_ON(new_n0->slots[slot] != NULL);
+ new_n0->slots[slot] = assoc_array_node_to_ptr(new_n1);
+
+ /* Filter the leaf pointers between the new nodes */
+ free_slot = -1;
+ next_slot = 0;
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ if (assoc_array_ptr_is_meta(node->slots[i]))
+ continue;
+ if (edit->segment_cache[i] == slot) {
+ new_n1->slots[next_slot++] = node->slots[i];
+ new_n1->nr_leaves_on_branch++;
+ } else {
+ do {
+ free_slot++;
+ } while (new_n0->slots[free_slot] != NULL);
+ new_n0->slots[free_slot] = node->slots[i];
+ }
+ }
+
+ pr_devel("filtered: f=%x n=%x\n", free_slot, next_slot);
+
+ if (edit->segment_cache[ASSOC_ARRAY_FAN_OUT] != slot) {
+ do {
+ free_slot++;
+ } while (new_n0->slots[free_slot] != NULL);
+ edit->leaf_p = &new_n0->slots[free_slot];
+ edit->adjust_count_on = new_n0;
+ } else {
+ edit->leaf_p = &new_n1->slots[next_slot++];
+ edit->adjust_count_on = new_n1;
+ }
+
+ BUG_ON(next_slot <= 1);
+
+ edit->set_backpointers_to = assoc_array_node_to_ptr(new_n0);
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ if (edit->segment_cache[i] == 0xff) {
+ ptr = node->slots[i];
+ BUG_ON(assoc_array_ptr_is_leaf(ptr));
+ if (assoc_array_ptr_is_node(ptr)) {
+ side = assoc_array_ptr_to_node(ptr);
+ edit->set_backpointers[i] = &side->back_pointer;
+ } else {
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ edit->set_backpointers[i] = &shortcut->back_pointer;
+ }
+ }
+ }
+
+ ptr = node->back_pointer;
+ if (!ptr)
+ edit->set[0].ptr = &edit->array->root;
+ else if (assoc_array_ptr_is_node(ptr))
+ edit->set[0].ptr = &assoc_array_ptr_to_node(ptr)->slots[node->parent_slot];
+ else
+ edit->set[0].ptr = &assoc_array_ptr_to_shortcut(ptr)->next_node;
+ edit->excised_meta[0] = assoc_array_node_to_ptr(node);
+ pr_devel("<--%s() = ok [split node]\n", __func__);
+ return true;
+
+present_leaves_cluster_but_not_new_leaf:
+ /* All the old leaves cluster in the same slot, but the new leaf wants
+ * to go into a different slot, so we create a new node to hold the new
+ * leaf and a pointer to a new node holding all the old leaves.
+ */
+ pr_devel("present leaves cluster but not new leaf\n");
+
+ new_n0->back_pointer = node->back_pointer;
+ new_n0->parent_slot = node->parent_slot;
+ new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
+ new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
+ new_n1->parent_slot = edit->segment_cache[0];
+ new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
+ edit->adjust_count_on = new_n0;
+
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
+ new_n1->slots[i] = node->slots[i];
+
+ new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
+ edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
+
+ edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
+ edit->set[0].to = assoc_array_node_to_ptr(new_n0);
+ edit->excised_meta[0] = assoc_array_node_to_ptr(node);
+ pr_devel("<--%s() = ok [insert node before]\n", __func__);
+ return true;
+
+all_leaves_cluster_together:
+ /* All the leaves, new and old, want to cluster together in this node
+ * in the same slot, so we have to replace this node with a shortcut to
+ * skip over the identical parts of the key and then place a pair of
+ * nodes, one inside the other, at the end of the shortcut and
+ * distribute the keys between them.
+ *
+ * Firstly we need to work out where the leaves start diverging as a
+ * bit position into their keys so that we know how big the shortcut
+ * needs to be.
+ *
+ * We only need to make a single pass of N of the N+1 leaves because if
+ * any keys differ between themselves at bit X then at least one of
+ * them must also differ with the base key at bit X or before.
+ */
+ pr_devel("all leaves cluster together\n");
+ diff = INT_MAX;
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ int x = ops->diff_objects(assoc_array_ptr_to_leaf(edit->leaf),
+ assoc_array_ptr_to_leaf(node->slots[i]));
+ if (x < diff) {
+ BUG_ON(x < 0);
+ diff = x;
+ }
+ }
+ BUG_ON(diff == INT_MAX);
+ BUG_ON(diff < level + ASSOC_ARRAY_LEVEL_STEP);
+
+ keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
+ keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
+
+ new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) +
+ keylen * sizeof(unsigned long), GFP_KERNEL);
+ if (!new_s0)
+ return false;
+ edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s0);
+
+ edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
+ new_s0->back_pointer = node->back_pointer;
+ new_s0->parent_slot = node->parent_slot;
+ new_s0->next_node = assoc_array_node_to_ptr(new_n0);
+ new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0);
+ new_n0->parent_slot = 0;
+ new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
+ new_n1->parent_slot = -1; /* Need to calculate this */
+
+ new_s0->skip_to_level = level = diff & ~ASSOC_ARRAY_LEVEL_STEP_MASK;
+ pr_devel("skip_to_level = %d [diff %d]\n", level, diff);
+ BUG_ON(level <= 0);
+
+ for (i = 0; i < keylen; i++)
+ new_s0->index_key[i] =
+ ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
+
+ blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
+ pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
+ new_s0->index_key[keylen - 1] &= ~blank;
+
+ /* This now reduces to a node splitting exercise for which we'll need
+ * to regenerate the disparity table.
+ */
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ ptr = node->slots[i];
+ base_seg = ops->get_object_key_chunk(assoc_array_ptr_to_leaf(ptr),
+ level);
+ base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
+ edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
+ }
+
+ base_seg = ops->get_key_chunk(index_key, level);
+ base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
+ edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = base_seg & ASSOC_ARRAY_FAN_MASK;
+ goto do_split_node;
+}
+
+/*
+ * Handle insertion into the middle of a shortcut.
+ */
+static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
+ const struct assoc_array_ops *ops,
+ struct assoc_array_walk_result *result)
+{
+ struct assoc_array_shortcut *shortcut, *new_s0, *new_s1;
+ struct assoc_array_node *node, *new_n0, *side;
+ unsigned long sc_segments, dissimilarity, blank;
+ size_t keylen;
+ int level, sc_level, diff;
+ int sc_slot;
+
+ shortcut = result->wrong_shortcut.shortcut;
+ level = result->wrong_shortcut.level;
+ sc_level = result->wrong_shortcut.sc_level;
+ sc_segments = result->wrong_shortcut.sc_segments;
+ dissimilarity = result->wrong_shortcut.dissimilarity;
+
+ pr_devel("-->%s(ix=%d dis=%lx scix=%d)\n",
+ __func__, level, dissimilarity, sc_level);
+
+ /* We need to split a shortcut and insert a node between the two
+ * pieces. Zero-length pieces will be dispensed with entirely.
+ *
+ * First of all, we need to find out in which level the first
+ * difference was.
+ */
+ diff = __ffs(dissimilarity);
+ diff &= ~ASSOC_ARRAY_LEVEL_STEP_MASK;
+ diff += sc_level & ~ASSOC_ARRAY_KEY_CHUNK_MASK;
+ pr_devel("diff=%d\n", diff);
+
+ if (!shortcut->back_pointer) {
+ edit->set[0].ptr = &edit->array->root;
+ } else if (assoc_array_ptr_is_node(shortcut->back_pointer)) {
+ node = assoc_array_ptr_to_node(shortcut->back_pointer);
+ edit->set[0].ptr = &node->slots[shortcut->parent_slot];
+ } else {
+ BUG();
+ }
+
+ edit->excised_meta[0] = assoc_array_shortcut_to_ptr(shortcut);
+
+ /* Create a new node now since we're going to need it anyway */
+ new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ if (!new_n0)
+ return false;
+ edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
+ edit->adjust_count_on = new_n0;
+
+ /* Insert a new shortcut before the new node if this segment isn't of
+ * zero length - otherwise we just connect the new node directly to the
+ * parent.
+ */
+ level += ASSOC_ARRAY_LEVEL_STEP;
+ if (diff > level) {
+ pr_devel("pre-shortcut %d...%d\n", level, diff);
+ keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
+ keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
+
+ new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) +
+ keylen * sizeof(unsigned long), GFP_KERNEL);
+ if (!new_s0)
+ return false;
+ edit->new_meta[1] = assoc_array_shortcut_to_ptr(new_s0);
+ edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
+ new_s0->back_pointer = shortcut->back_pointer;
+ new_s0->parent_slot = shortcut->parent_slot;
+ new_s0->next_node = assoc_array_node_to_ptr(new_n0);
+ new_s0->skip_to_level = diff;
+
+ new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0);
+ new_n0->parent_slot = 0;
+
+ memcpy(new_s0->index_key, shortcut->index_key,
+ keylen * sizeof(unsigned long));
+
+ blank = ULONG_MAX << (diff & ASSOC_ARRAY_KEY_CHUNK_MASK);
+ pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, diff, blank);
+ new_s0->index_key[keylen - 1] &= ~blank;
+ } else {
+ pr_devel("no pre-shortcut\n");
+ edit->set[0].to = assoc_array_node_to_ptr(new_n0);
+ new_n0->back_pointer = shortcut->back_pointer;
+ new_n0->parent_slot = shortcut->parent_slot;
+ }
+
+ side = assoc_array_ptr_to_node(shortcut->next_node);
+ new_n0->nr_leaves_on_branch = side->nr_leaves_on_branch;
+
+ /* We need to know which slot in the new node is going to take a
+ * metadata pointer.
+ */
+ sc_slot = sc_segments >> (diff & ASSOC_ARRAY_KEY_CHUNK_MASK);
+ sc_slot &= ASSOC_ARRAY_FAN_MASK;
+
+ pr_devel("new slot %lx >> %d -> %d\n",
+ sc_segments, diff & ASSOC_ARRAY_KEY_CHUNK_MASK, sc_slot);
+
+ /* Determine whether we need to follow the new node with a replacement
+ * for the current shortcut. We could in theory reuse the current
+ * shortcut if its parent slot number doesn't change - but that's a
+ * 1-in-16 chance so not worth expending the code upon.
+ */
+ level = diff + ASSOC_ARRAY_LEVEL_STEP;
+ if (level < shortcut->skip_to_level) {
+ pr_devel("post-shortcut %d...%d\n", level, shortcut->skip_to_level);
+ keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
+ keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
+
+ new_s1 = kzalloc(sizeof(struct assoc_array_shortcut) +
+ keylen * sizeof(unsigned long), GFP_KERNEL);
+ if (!new_s1)
+ return false;
+ edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s1);
+
+ new_s1->back_pointer = assoc_array_node_to_ptr(new_n0);
+ new_s1->parent_slot = sc_slot;
+ new_s1->next_node = shortcut->next_node;
+ new_s1->skip_to_level = shortcut->skip_to_level;
+
+ new_n0->slots[sc_slot] = assoc_array_shortcut_to_ptr(new_s1);
+
+ memcpy(new_s1->index_key, shortcut->index_key,
+ keylen * sizeof(unsigned long));
+
+ edit->set[1].ptr = &side->back_pointer;
+ edit->set[1].to = assoc_array_shortcut_to_ptr(new_s1);
+ } else {
+ pr_devel("no post-shortcut\n");
+
+ /* We don't have to replace the pointed-to node as long as we
+ * use memory barriers to make sure the parent slot number is
+ * changed before the back pointer (the parent slot number is
+ * irrelevant to the old parent shortcut).
+ */
+ new_n0->slots[sc_slot] = shortcut->next_node;
+ edit->set_parent_slot[0].p = &side->parent_slot;
+ edit->set_parent_slot[0].to = sc_slot;
+ edit->set[1].ptr = &side->back_pointer;
+ edit->set[1].to = assoc_array_node_to_ptr(new_n0);
+ }
+
+ /* Install the new leaf in a spare slot in the new node. */
+ if (sc_slot == 0)
+ edit->leaf_p = &new_n0->slots[1];
+ else
+ edit->leaf_p = &new_n0->slots[0];
+
+ pr_devel("<--%s() = ok [split shortcut]\n", __func__);
+ return edit;
+}
+
+/**
+ * assoc_array_insert - Script insertion of an object into an associative array
+ * @array: The array to insert into.
+ * @ops: The operations to use.
+ * @index_key: The key to insert at.
+ * @object: The object to insert.
+ *
+ * Precalculate and preallocate a script for the insertion or replacement of an
+ * object in an associative array. This results in an edit script that can
+ * either be applied or cancelled.
+ *
+ * The function returns a pointer to an edit script or -ENOMEM.
+ *
+ * The caller should lock against other modifications and must continue to hold
+ * the lock until assoc_array_apply_edit() has been called.
+ *
+ * Accesses to the tree may take place concurrently with this function,
+ * provided they hold the RCU read lock.
+ */
+struct assoc_array_edit *assoc_array_insert(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key,
+ void *object)
+{
+ struct assoc_array_walk_result result;
+ struct assoc_array_edit *edit;
+
+ pr_devel("-->%s()\n", __func__);
+
+ /* The leaf pointer we're given must not have the bottom bit set as we
+ * use those for type-marking the pointer. NULL pointers are also not
+ * allowed as they indicate an empty slot but we have to allow them
+ * here as they can be updated later.
+ */
+ BUG_ON(assoc_array_ptr_is_meta(object));
+
+ edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
+ if (!edit)
+ return ERR_PTR(-ENOMEM);
+ edit->array = array;
+ edit->ops = ops;
+ edit->leaf = assoc_array_leaf_to_ptr(object);
+ edit->adjust_count_by = 1;
+
+ switch (assoc_array_walk(array, ops, index_key, &result)) {
+ case assoc_array_walk_tree_empty:
+ /* Allocate a root node if there isn't one yet */
+ if (!assoc_array_insert_in_empty_tree(edit))
+ goto enomem;
+ return edit;
+
+ case assoc_array_walk_found_terminal_node:
+ /* We found a node that doesn't have a node/shortcut pointer in
+ * the slot corresponding to the index key that we have to
+ * follow.
+ */
+ if (!assoc_array_insert_into_terminal_node(edit, ops, index_key,
+ &result))
+ goto enomem;
+ return edit;
+
+ case assoc_array_walk_found_wrong_shortcut:
+ /* We found a shortcut that didn't match our key in a slot we
+ * needed to follow.
+ */
+ if (!assoc_array_insert_mid_shortcut(edit, ops, &result))
+ goto enomem;
+ return edit;
+ }
+
+enomem:
+ /* Clean up after an out of memory error */
+ pr_devel("enomem\n");
+ assoc_array_cancel_edit(edit);
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * assoc_array_insert_set_object - Set the new object pointer in an edit script
+ * @edit: The edit script to modify.
+ * @object: The object pointer to set.
+ *
+ * Change the object to be inserted in an edit script. The object pointed to
+ * by the old object is not freed. This must be done prior to applying the
+ * script.
+ */
+void assoc_array_insert_set_object(struct assoc_array_edit *edit, void *object)
+{
+ BUG_ON(!object);
+ edit->leaf = assoc_array_leaf_to_ptr(object);
+}
+
+struct assoc_array_delete_collapse_context {
+ struct assoc_array_node *node;
+ const void *skip_leaf;
+ int slot;
+};
+
+/*
+ * Subtree collapse to node iterator.
+ */
+static int assoc_array_delete_collapse_iterator(const void *leaf,
+ void *iterator_data)
+{
+ struct assoc_array_delete_collapse_context *collapse = iterator_data;
+
+ if (leaf == collapse->skip_leaf)
+ return 0;
+
+ BUG_ON(collapse->slot >= ASSOC_ARRAY_FAN_OUT);
+
+ collapse->node->slots[collapse->slot++] = assoc_array_leaf_to_ptr(leaf);
+ return 0;
+}
+
+/**
+ * assoc_array_delete - Script deletion of an object from an associative array
+ * @array: The array to search.
+ * @ops: The operations to use.
+ * @index_key: The key to the object.
+ *
+ * Precalculate and preallocate a script for the deletion of an object from an
+ * associative array. This results in an edit script that can either be
+ * applied or cancelled.
+ *
+ * The function returns a pointer to an edit script if the object was found,
+ * NULL if the object was not found or -ENOMEM.
+ *
+ * The caller should lock against other modifications and must continue to hold
+ * the lock until assoc_array_apply_edit() has been called.
+ *
+ * Accesses to the tree may take place concurrently with this function,
+ * provided they hold the RCU read lock.
+ */
+struct assoc_array_edit *assoc_array_delete(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key)
+{
+ struct assoc_array_delete_collapse_context collapse;
+ struct assoc_array_walk_result result;
+ struct assoc_array_node *node, *new_n0;
+ struct assoc_array_edit *edit;
+ struct assoc_array_ptr *ptr;
+ bool has_meta;
+ int slot, i;
+
+ pr_devel("-->%s()\n", __func__);
+
+ edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
+ if (!edit)
+ return ERR_PTR(-ENOMEM);
+ edit->array = array;
+ edit->ops = ops;
+ edit->adjust_count_by = -1;
+
+ switch (assoc_array_walk(array, ops, index_key, &result)) {
+ case assoc_array_walk_found_terminal_node:
+ /* We found a node that should contain the leaf we've been
+ * asked to remove - *if* it's in the tree.
+ */
+ pr_devel("terminal_node\n");
+ node = result.terminal_node.node;
+
+ for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = node->slots[slot];
+ if (ptr &&
+ assoc_array_ptr_is_leaf(ptr) &&
+ ops->compare_object(assoc_array_ptr_to_leaf(ptr),
+ index_key))
+ goto found_leaf;
+ }
+ case assoc_array_walk_tree_empty:
+ case assoc_array_walk_found_wrong_shortcut:
+ default:
+ assoc_array_cancel_edit(edit);
+ pr_devel("not found\n");
+ return NULL;
+ }
+
+found_leaf:
+ BUG_ON(array->nr_leaves_on_tree <= 0);
+
+ /* In the simplest form of deletion we just clear the slot and release
+ * the leaf after a suitable interval.
+ */
+ edit->dead_leaf = node->slots[slot];
+ edit->set[0].ptr = &node->slots[slot];
+ edit->set[0].to = NULL;
+ edit->adjust_count_on = node;
+
+ /* If that concludes erasure of the last leaf, then delete the entire
+ * internal array.
+ */
+ if (array->nr_leaves_on_tree == 1) {
+ edit->set[1].ptr = &array->root;
+ edit->set[1].to = NULL;
+ edit->adjust_count_on = NULL;
+ edit->excised_subtree = array->root;
+ pr_devel("all gone\n");
+ return edit;
+ }
+
+ /* However, we'd also like to clear up some metadata blocks if we
+ * possibly can.
+ *
+ * We go for a simple algorithm of: if this node has FAN_OUT or fewer
+ * leaves in it, then attempt to collapse it - and attempt to
+ * recursively collapse up the tree.
+ *
+ * We could also try and collapse in partially filled subtrees to take
+ * up space in this node.
+ */
+ if (node->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT + 1) {
+ struct assoc_array_node *parent, *grandparent;
+ struct assoc_array_ptr *ptr;
+
+ /* First of all, we need to know if this node has metadata so
+ * that we don't try collapsing if all the leaves are already
+ * here.
+ */
+ has_meta = false;
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ ptr = node->slots[i];
+ if (assoc_array_ptr_is_meta(ptr)) {
+ has_meta = true;
+ break;
+ }
+ }
+
+ pr_devel("leaves: %ld [m=%d]\n",
+ node->nr_leaves_on_branch - 1, has_meta);
+
+ /* Look further up the tree to see if we can collapse this node
+ * into a more proximal node too.
+ */
+ parent = node;
+ collapse_up:
+ pr_devel("collapse subtree: %ld\n", parent->nr_leaves_on_branch);
+
+ ptr = parent->back_pointer;
+ if (!ptr)
+ goto do_collapse;
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ struct assoc_array_shortcut *s = assoc_array_ptr_to_shortcut(ptr);
+ ptr = s->back_pointer;
+ if (!ptr)
+ goto do_collapse;
+ }
+
+ grandparent = assoc_array_ptr_to_node(ptr);
+ if (grandparent->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT + 1) {
+ parent = grandparent;
+ goto collapse_up;
+ }
+
+ do_collapse:
+ /* There's no point collapsing if the original node has no meta
+ * pointers to discard and if we didn't merge into one of that
+ * node's ancestry.
+ */
+ if (has_meta || parent != node) {
+ node = parent;
+
+ /* Create a new node to collapse into */
+ new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ if (!new_n0)
+ goto enomem;
+ edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
+
+ new_n0->back_pointer = node->back_pointer;
+ new_n0->parent_slot = node->parent_slot;
+ new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
+ edit->adjust_count_on = new_n0;
+
+ collapse.node = new_n0;
+ collapse.skip_leaf = assoc_array_ptr_to_leaf(edit->dead_leaf);
+ collapse.slot = 0;
+ assoc_array_subtree_iterate(assoc_array_node_to_ptr(node),
+ node->back_pointer,
+ assoc_array_delete_collapse_iterator,
+ &collapse);
+ pr_devel("collapsed %d,%lu\n", collapse.slot, new_n0->nr_leaves_on_branch);
+ BUG_ON(collapse.slot != new_n0->nr_leaves_on_branch - 1);
+
+ if (!node->back_pointer) {
+ edit->set[1].ptr = &array->root;
+ } else if (assoc_array_ptr_is_leaf(node->back_pointer)) {
+ BUG();
+ } else if (assoc_array_ptr_is_node(node->back_pointer)) {
+ struct assoc_array_node *p =
+ assoc_array_ptr_to_node(node->back_pointer);
+ edit->set[1].ptr = &p->slots[node->parent_slot];
+ } else if (assoc_array_ptr_is_shortcut(node->back_pointer)) {
+ struct assoc_array_shortcut *s =
+ assoc_array_ptr_to_shortcut(node->back_pointer);
+ edit->set[1].ptr = &s->next_node;
+ }
+ edit->set[1].to = assoc_array_node_to_ptr(new_n0);
+ edit->excised_subtree = assoc_array_node_to_ptr(node);
+ }
+ }
+
+ return edit;
+
+enomem:
+ /* Clean up after an out of memory error */
+ pr_devel("enomem\n");
+ assoc_array_cancel_edit(edit);
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * assoc_array_clear - Script deletion of all objects from an associative array
+ * @array: The array to clear.
+ * @ops: The operations to use.
+ *
+ * Precalculate and preallocate a script for the deletion of all the objects
+ * from an associative array. This results in an edit script that can either
+ * be applied or cancelled.
+ *
+ * The function returns a pointer to an edit script if there are objects to be
+ * deleted, NULL if there are no objects in the array or -ENOMEM.
+ *
+ * The caller should lock against other modifications and must continue to hold
+ * the lock until assoc_array_apply_edit() has been called.
+ *
+ * Accesses to the tree may take place concurrently with this function,
+ * provided they hold the RCU read lock.
+ */
+struct assoc_array_edit *assoc_array_clear(struct assoc_array *array,
+ const struct assoc_array_ops *ops)
+{
+ struct assoc_array_edit *edit;
+
+ pr_devel("-->%s()\n", __func__);
+
+ if (!array->root)
+ return NULL;
+
+ edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
+ if (!edit)
+ return ERR_PTR(-ENOMEM);
+ edit->array = array;
+ edit->ops = ops;
+ edit->set[1].ptr = &array->root;
+ edit->set[1].to = NULL;
+ edit->excised_subtree = array->root;
+ edit->ops_for_excised_subtree = ops;
+ pr_devel("all gone\n");
+ return edit;
+}
+
+/*
+ * Handle the deferred destruction after an applied edit.
+ */
+static void assoc_array_rcu_cleanup(struct rcu_head *head)
+{
+ struct assoc_array_edit *edit =
+ container_of(head, struct assoc_array_edit, rcu);
+ int i;
+
+ pr_devel("-->%s()\n", __func__);
+
+ if (edit->dead_leaf)
+ edit->ops->free_object(assoc_array_ptr_to_leaf(edit->dead_leaf));
+ for (i = 0; i < ARRAY_SIZE(edit->excised_meta); i++)
+ if (edit->excised_meta[i])
+ kfree(assoc_array_ptr_to_node(edit->excised_meta[i]));
+
+ if (edit->excised_subtree) {
+ BUG_ON(assoc_array_ptr_is_leaf(edit->excised_subtree));
+ if (assoc_array_ptr_is_node(edit->excised_subtree)) {
+ struct assoc_array_node *n =
+ assoc_array_ptr_to_node(edit->excised_subtree);
+ n->back_pointer = NULL;
+ } else {
+ struct assoc_array_shortcut *s =
+ assoc_array_ptr_to_shortcut(edit->excised_subtree);
+ s->back_pointer = NULL;
+ }
+ assoc_array_destroy_subtree(edit->excised_subtree,
+ edit->ops_for_excised_subtree);
+ }
+
+ kfree(edit);
+}
+
+/**
+ * assoc_array_apply_edit - Apply an edit script to an associative array
+ * @edit: The script to apply.
+ *
+ * Apply an edit script to an associative array to effect an insertion,
+ * deletion or clearance. As the edit script includes preallocated memory,
+ * this is guaranteed not to fail.
+ *
+ * The edit script, dead objects and dead metadata will be scheduled for
+ * destruction after an RCU grace period to permit those doing read-only
+ * accesses on the array to continue to do so under the RCU read lock whilst
+ * the edit is taking place.
+ */
+void assoc_array_apply_edit(struct assoc_array_edit *edit)
+{
+ struct assoc_array_shortcut *shortcut;
+ struct assoc_array_node *node;
+ struct assoc_array_ptr *ptr;
+ int i;
+
+ pr_devel("-->%s()\n", __func__);
+
+ smp_wmb();
+ if (edit->leaf_p)
+ *edit->leaf_p = edit->leaf;
+
+ smp_wmb();
+ for (i = 0; i < ARRAY_SIZE(edit->set_parent_slot); i++)
+ if (edit->set_parent_slot[i].p)
+ *edit->set_parent_slot[i].p = edit->set_parent_slot[i].to;
+
+ smp_wmb();
+ for (i = 0; i < ARRAY_SIZE(edit->set_backpointers); i++)
+ if (edit->set_backpointers[i])
+ *edit->set_backpointers[i] = edit->set_backpointers_to;
+
+ smp_wmb();
+ for (i = 0; i < ARRAY_SIZE(edit->set); i++)
+ if (edit->set[i].ptr)
+ *edit->set[i].ptr = edit->set[i].to;
+
+ if (edit->array->root == NULL) {
+ edit->array->nr_leaves_on_tree = 0;
+ } else if (edit->adjust_count_on) {
+ node = edit->adjust_count_on;
+ for (;;) {
+ node->nr_leaves_on_branch += edit->adjust_count_by;
+
+ ptr = node->back_pointer;
+ if (!ptr)
+ break;
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ ptr = shortcut->back_pointer;
+ if (!ptr)
+ break;
+ }
+ BUG_ON(!assoc_array_ptr_is_node(ptr));
+ node = assoc_array_ptr_to_node(ptr);
+ }
+
+ edit->array->nr_leaves_on_tree += edit->adjust_count_by;
+ }
+
+ call_rcu(&edit->rcu, assoc_array_rcu_cleanup);
+}
+
+/**
+ * assoc_array_cancel_edit - Discard an edit script.
+ * @edit: The script to discard.
+ *
+ * Free an edit script and all the preallocated data it holds without making
+ * any changes to the associative array it was intended for.
+ *
+ * NOTE! In the case of an insertion script, this does _not_ release the leaf
+ * that was to be inserted. That is left to the caller.
+ */
+void assoc_array_cancel_edit(struct assoc_array_edit *edit)
+{
+ struct assoc_array_ptr *ptr;
+ int i;
+
+ pr_devel("-->%s()\n", __func__);
+
+ /* Clean up after an out of memory error */
+ for (i = 0; i < ARRAY_SIZE(edit->new_meta); i++) {
+ ptr = edit->new_meta[i];
+ if (ptr) {
+ if (assoc_array_ptr_is_node(ptr))
+ kfree(assoc_array_ptr_to_node(ptr));
+ else
+ kfree(assoc_array_ptr_to_shortcut(ptr));
+ }
+ }
+ kfree(edit);
+}
+
+/**
+ * assoc_array_gc - Garbage collect an associative array.
+ * @array: The array to clean.
+ * @ops: The operations to use.
+ * @iterator: A callback function to pass judgement on each object.
+ * @iterator_data: Private data for the callback function.
+ *
+ * Collect garbage from an associative array and pack down the internal tree to
+ * save memory.
+ *
+ * The iterator function is asked to pass judgement upon each object in the
+ * array. If it returns false, the object is discard and if it returns true,
+ * the object is kept. If it returns true, it must increment the object's
+ * usage count (or whatever it needs to do to retain it) before returning.
+ *
+ * This function returns 0 if successful or -ENOMEM if out of memory. In the
+ * latter case, the array is not changed.
+ *
+ * The caller should lock against other modifications and must continue to hold
+ * the lock until assoc_array_apply_edit() has been called.
+ *
+ * Accesses to the tree may take place concurrently with this function,
+ * provided they hold the RCU read lock.
+ */
+int assoc_array_gc(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ bool (*iterator)(void *object, void *iterator_data),
+ void *iterator_data)
+{
+ struct assoc_array_shortcut *shortcut, *new_s;
+ struct assoc_array_node *node, *new_n;
+ struct assoc_array_edit *edit;
+ struct assoc_array_ptr *cursor, *ptr;
+ struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
+ unsigned long nr_leaves_on_tree;
+ int keylen, slot, nr_free, next_slot, i;
+
+ pr_devel("-->%s()\n", __func__);
+
+ if (!array->root)
+ return 0;
+
+ edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
+ if (!edit)
+ return -ENOMEM;
+ edit->array = array;
+ edit->ops = ops;
+ edit->ops_for_excised_subtree = ops;
+ edit->set[0].ptr = &array->root;
+ edit->excised_subtree = array->root;
+
+ new_root = new_parent = NULL;
+ new_ptr_pp = &new_root;
+ cursor = array->root;
+
+descend:
+ /* If this point is a shortcut, then we need to duplicate it and
+ * advance the target cursor.
+ */
+ if (assoc_array_ptr_is_shortcut(cursor)) {
+ shortcut = assoc_array_ptr_to_shortcut(cursor);
+ keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
+ keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
+ new_s = kmalloc(sizeof(struct assoc_array_shortcut) +
+ keylen * sizeof(unsigned long), GFP_KERNEL);
+ if (!new_s)
+ goto enomem;
+ pr_devel("dup shortcut %p -> %p\n", shortcut, new_s);
+ memcpy(new_s, shortcut, (sizeof(struct assoc_array_shortcut) +
+ keylen * sizeof(unsigned long)));
+ new_s->back_pointer = new_parent;
+ new_s->parent_slot = shortcut->parent_slot;
+ *new_ptr_pp = new_parent = assoc_array_shortcut_to_ptr(new_s);
+ new_ptr_pp = &new_s->next_node;
+ cursor = shortcut->next_node;
+ }
+
+ /* Duplicate the node at this position */
+ node = assoc_array_ptr_to_node(cursor);
+ new_n = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ if (!new_n)
+ goto enomem;
+ pr_devel("dup node %p -> %p\n", node, new_n);
+ new_n->back_pointer = new_parent;
+ new_n->parent_slot = node->parent_slot;
+ *new_ptr_pp = new_parent = assoc_array_node_to_ptr(new_n);
+ new_ptr_pp = NULL;
+ slot = 0;
+
+continue_node:
+ /* Filter across any leaves and gc any subtrees */
+ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = node->slots[slot];
+ if (!ptr)
+ continue;
+
+ if (assoc_array_ptr_is_leaf(ptr)) {
+ if (iterator(assoc_array_ptr_to_leaf(ptr),
+ iterator_data))
+ /* The iterator will have done any reference
+ * counting on the object for us.
+ */
+ new_n->slots[slot] = ptr;
+ continue;
+ }
+
+ new_ptr_pp = &new_n->slots[slot];
+ cursor = ptr;
+ goto descend;
+ }
+
+ pr_devel("-- compress node %p --\n", new_n);
+
+ /* Count up the number of empty slots in this node and work out the
+ * subtree leaf count.
+ */
+ new_n->nr_leaves_on_branch = 0;
+ nr_free = 0;
+ for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = new_n->slots[slot];
+ if (!ptr)
+ nr_free++;
+ else if (assoc_array_ptr_is_leaf(ptr))
+ new_n->nr_leaves_on_branch++;
+ }
+ pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
+
+ /* See what we can fold in */
+ next_slot = 0;
+ for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ struct assoc_array_shortcut *s;
+ struct assoc_array_node *child;
+
+ ptr = new_n->slots[slot];
+ if (!ptr || assoc_array_ptr_is_leaf(ptr))
+ continue;
+
+ s = NULL;
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ s = assoc_array_ptr_to_shortcut(ptr);
+ ptr = s->next_node;
+ }
+
+ child = assoc_array_ptr_to_node(ptr);
+ new_n->nr_leaves_on_branch += child->nr_leaves_on_branch;
+
+ if (child->nr_leaves_on_branch <= nr_free + 1) {
+ /* Fold the child node into this one */
+ pr_devel("[%d] fold node %lu/%d [nx %d]\n",
+ slot, child->nr_leaves_on_branch, nr_free + 1,
+ next_slot);
+
+ /* We would already have reaped an intervening shortcut
+ * on the way back up the tree.
+ */
+ BUG_ON(s);
+
+ new_n->slots[slot] = NULL;
+ nr_free++;
+ if (slot < next_slot)
+ next_slot = slot;
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ struct assoc_array_ptr *p = child->slots[i];
+ if (!p)
+ continue;
+ BUG_ON(assoc_array_ptr_is_meta(p));
+ while (new_n->slots[next_slot])
+ next_slot++;
+ BUG_ON(next_slot >= ASSOC_ARRAY_FAN_OUT);
+ new_n->slots[next_slot++] = p;
+ nr_free--;
+ }
+ kfree(child);
+ } else {
+ pr_devel("[%d] retain node %lu/%d [nx %d]\n",
+ slot, child->nr_leaves_on_branch, nr_free + 1,
+ next_slot);
+ }
+ }
+
+ pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
+
+ nr_leaves_on_tree = new_n->nr_leaves_on_branch;
+
+ /* Excise this node if it is singly occupied by a shortcut */
+ if (nr_free == ASSOC_ARRAY_FAN_OUT - 1) {
+ for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++)
+ if ((ptr = new_n->slots[slot]))
+ break;
+
+ if (assoc_array_ptr_is_meta(ptr) &&
+ assoc_array_ptr_is_shortcut(ptr)) {
+ pr_devel("excise node %p with 1 shortcut\n", new_n);
+ new_s = assoc_array_ptr_to_shortcut(ptr);
+ new_parent = new_n->back_pointer;
+ slot = new_n->parent_slot;
+ kfree(new_n);
+ if (!new_parent) {
+ new_s->back_pointer = NULL;
+ new_s->parent_slot = 0;
+ new_root = ptr;
+ goto gc_complete;
+ }
+
+ if (assoc_array_ptr_is_shortcut(new_parent)) {
+ /* We can discard any preceding shortcut also */
+ struct assoc_array_shortcut *s =
+ assoc_array_ptr_to_shortcut(new_parent);
+
+ pr_devel("excise preceding shortcut\n");
+
+ new_parent = new_s->back_pointer = s->back_pointer;
+ slot = new_s->parent_slot = s->parent_slot;
+ kfree(s);
+ if (!new_parent) {
+ new_s->back_pointer = NULL;
+ new_s->parent_slot = 0;
+ new_root = ptr;
+ goto gc_complete;
+ }
+ }
+
+ new_s->back_pointer = new_parent;
+ new_s->parent_slot = slot;
+ new_n = assoc_array_ptr_to_node(new_parent);
+ new_n->slots[slot] = ptr;
+ goto ascend_old_tree;
+ }
+ }
+
+ /* Excise any shortcuts we might encounter that point to nodes that
+ * only contain leaves.
+ */
+ ptr = new_n->back_pointer;
+ if (!ptr)
+ goto gc_complete;
+
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ new_s = assoc_array_ptr_to_shortcut(ptr);
+ new_parent = new_s->back_pointer;
+ slot = new_s->parent_slot;
+
+ if (new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
+ struct assoc_array_node *n;
+
+ pr_devel("excise shortcut\n");
+ new_n->back_pointer = new_parent;
+ new_n->parent_slot = slot;
+ kfree(new_s);
+ if (!new_parent) {
+ new_root = assoc_array_node_to_ptr(new_n);
+ goto gc_complete;
+ }
+
+ n = assoc_array_ptr_to_node(new_parent);
+ n->slots[slot] = assoc_array_node_to_ptr(new_n);
+ }
+ } else {
+ new_parent = ptr;
+ }
+ new_n = assoc_array_ptr_to_node(new_parent);
+
+ascend_old_tree:
+ ptr = node->back_pointer;
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ slot = shortcut->parent_slot;
+ cursor = shortcut->back_pointer;
+ } else {
+ slot = node->parent_slot;
+ cursor = ptr;
+ }
+ BUG_ON(!ptr);
+ node = assoc_array_ptr_to_node(cursor);
+ slot++;
+ goto continue_node;
+
+gc_complete:
+ edit->set[0].to = new_root;
+ assoc_array_apply_edit(edit);
+ edit->array->nr_leaves_on_tree = nr_leaves_on_tree;
+ return 0;
+
+enomem:
+ pr_devel("enomem\n");
+ assoc_array_destroy_subtree(new_root, edit->ops);
+ kfree(edit);
+ return -ENOMEM;
+}
diff --git a/lib/crc32.c b/lib/crc32.c
index 410093dbe51c..70f00ca5ef1e 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -29,6 +29,7 @@
#include <linux/crc32.h>
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/sched.h>
#include "crc32defs.h"
#if CRC_LE_BITS > 8
@@ -49,6 +50,30 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
MODULE_DESCRIPTION("Various CRC32 calculations");
MODULE_LICENSE("GPL");
+#define GF2_DIM 32
+
+static u32 gf2_matrix_times(u32 *mat, u32 vec)
+{
+ u32 sum = 0;
+
+ while (vec) {
+ if (vec & 1)
+ sum ^= *mat;
+ vec >>= 1;
+ mat++;
+ }
+
+ return sum;
+}
+
+static void gf2_matrix_square(u32 *square, u32 *mat)
+{
+ int i;
+
+ for (i = 0; i < GF2_DIM; i++)
+ square[i] = gf2_matrix_times(mat, mat[i]);
+}
+
#if CRC_LE_BITS > 8 || CRC_BE_BITS > 8
/* implements slicing-by-4 or slicing-by-8 algorithm */
@@ -130,6 +155,52 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
}
#endif
+/* For conditions of distribution and use, see copyright notice in zlib.h */
+static u32 crc32_generic_combine(u32 crc1, u32 crc2, size_t len2,
+ u32 polynomial)
+{
+ u32 even[GF2_DIM]; /* Even-power-of-two zeros operator */
+ u32 odd[GF2_DIM]; /* Odd-power-of-two zeros operator */
+ u32 row;
+ int i;
+
+ if (len2 <= 0)
+ return crc1;
+
+ /* Put operator for one zero bit in odd */
+ odd[0] = polynomial;
+ row = 1;
+ for (i = 1; i < GF2_DIM; i++) {
+ odd[i] = row;
+ row <<= 1;
+ }
+
+ gf2_matrix_square(even, odd); /* Put operator for two zero bits in even */
+ gf2_matrix_square(odd, even); /* Put operator for four zero bits in odd */
+
+ /* Apply len2 zeros to crc1 (first square will put the operator for one
+ * zero byte, eight zero bits, in even).
+ */
+ do {
+ /* Apply zeros operator for this bit of len2 */
+ gf2_matrix_square(even, odd);
+ if (len2 & 1)
+ crc1 = gf2_matrix_times(even, crc1);
+ len2 >>= 1;
+ /* If no more bits set, then done */
+ if (len2 == 0)
+ break;
+ /* Another iteration of the loop with odd and even swapped */
+ gf2_matrix_square(odd, even);
+ if (len2 & 1)
+ crc1 = gf2_matrix_times(odd, crc1);
+ len2 >>= 1;
+ } while (len2 != 0);
+
+ crc1 ^= crc2;
+ return crc1;
+}
+
/**
* crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II
* CRC32/CRC32C
@@ -200,8 +271,19 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
(const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
}
#endif
+u32 __pure crc32_le_combine(u32 crc1, u32 crc2, size_t len2)
+{
+ return crc32_generic_combine(crc1, crc2, len2, CRCPOLY_LE);
+}
+
+u32 __pure __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2)
+{
+ return crc32_generic_combine(crc1, crc2, len2, CRC32C_POLY_LE);
+}
EXPORT_SYMBOL(crc32_le);
+EXPORT_SYMBOL(crc32_le_combine);
EXPORT_SYMBOL(__crc32c_le);
+EXPORT_SYMBOL(__crc32c_le_combine);
/**
* crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
@@ -795,206 +877,106 @@ static struct crc_test {
u32 crc32c_le; /* expected crc32c_le result */
} test[] =
{
- {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1,
- 0xf6e93d6c},
- {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad,
- 0x0fe92aca},
- {0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f,
- 0x52e1ebb8},
- {0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a,
- 0x0798af9a},
- {0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2,
- 0x18eb3152},
- {0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793,
- 0xd00d08c7},
- {0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed,
- 0x8ba966bc},
- {0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35,
- 0x11d694a2},
- {0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2,
- 0x6ab3208d},
- {0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10,
- 0xba4603c5},
- {0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb,
- 0xe6071c6f},
- {0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0,
- 0x179ec30a},
- {0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb,
- 0x0903beb8},
- {0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed,
- 0x6a7cb4fa},
- {0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591,
- 0xdb535801},
- {0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67,
- 0x92bed597},
- {0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd,
- 0x192a3f1b},
- {0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a,
- 0xccbaec1a},
- {0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b,
- 0x7eabae4d},
- {0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f,
- 0x28c72982},
- {0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d,
- 0xc3cd4d18},
- {0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a,
- 0xbca8f0e7},
- {0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97,
- 0x713f60b3},
- {0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2,
- 0xebd08fd5},
- {0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138,
- 0x64406c59},
- {0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032,
- 0x7421890e},
- {0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f,
- 0xe9347603},
- {0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f,
- 0x1bef9060},
- {0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32,
- 0x34720072},
- {0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef,
- 0x48310f59},
- {0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0,
- 0x783a4213},
- {0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59,
- 0x9e8efd41},
- {0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4,
- 0xfc3d34a5},
- {0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c,
- 0x17a52ae2},
- {0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51,
- 0x886d935a},
- {0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11,
- 0xeaaeaeb2},
- {0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659,
- 0x8e900a4b},
- {0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af,
- 0xd74662b1},
- {0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99,
- 0xd26752ba},
- {0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b,
- 0x8b1fcd62},
- {0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521,
- 0xf54342fe},
- {0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3,
- 0x5b95b988},
- {0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d,
- 0x2e1176be},
- {0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f,
- 0x66120546},
- {0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b,
- 0xf256a5cc},
- {0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0,
- 0x4af1dd69},
- {0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195,
- 0x56f0a04a},
- {0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d,
- 0x74f6b6b2},
- {0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4,
- 0x085951fd},
- {0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3,
- 0xc65387eb},
- {0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643,
- 0x1ca9257b},
- {0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10,
- 0xfd196d76},
- {0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d,
- 0x5ef88339},
- {0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5,
- 0x2c3714d9},
- {0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b,
- 0x58576548},
- {0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee,
- 0xfd7c57de},
- {0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14,
- 0xd5fedd59},
- {0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a,
- 0x1cc3b17b},
- {0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b,
- 0x270eed73},
- {0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3,
- 0x91ecbb11},
- {0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826,
- 0x05ed8d0c},
- {0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06,
- 0x0b09ad5b},
- {0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35,
- 0xf8d511fb},
- {0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801,
- 0x5ad832cc},
- {0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2,
- 0x1214d196},
- {0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d,
- 0x5747218a},
- {0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c,
- 0xde8f14de},
- {0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba,
- 0x3563b7b9},
- {0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5,
- 0x071475d0},
- {0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b,
- 0x54c79d60},
- {0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178,
- 0x4c53eee6},
- {0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3,
- 0x10137a3c},
- {0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605,
- 0xaa9d6c73},
- {0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1,
- 0xb63d23e7},
- {0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9,
- 0x7f53e9cf},
- {0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78,
- 0x13c1cd83},
- {0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9,
- 0x49ff5867},
- {0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd,
- 0x8467f211},
- {0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab,
- 0x3f9683b2},
- {0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb,
- 0x76a3f874},
- {0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77,
- 0x863b702f},
- {0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da,
- 0xdc6c58ff},
- {0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39,
- 0x0622cc95},
- {0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16,
- 0xe85605cd},
- {0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208,
- 0x31da5f06},
- {0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e,
- 0xa1f2e784},
- {0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5,
- 0xb07cc616},
- {0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892,
- 0xbf943b6c},
- {0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db,
- 0x2c01af1c},
- {0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43,
- 0x0fe5f56d},
- {0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac,
- 0xf8943b2d},
- {0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7,
- 0xe4d89272},
- {0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2,
- 0x7c2f6bbb},
- {0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2,
- 0xabbf388b},
- {0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640,
- 0x1dca1f4e},
- {0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f,
- 0x5c170e23},
- {0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99,
- 0xc0e9d672},
- {0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7,
- 0xc18bdc86},
- {0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499,
- 0xa874fcdd},
- {0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a,
- 0x9dc0bb48},
+ {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c},
+ {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca},
+ {0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f, 0x52e1ebb8},
+ {0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a, 0x0798af9a},
+ {0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2, 0x18eb3152},
+ {0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793, 0xd00d08c7},
+ {0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed, 0x8ba966bc},
+ {0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35, 0x11d694a2},
+ {0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2, 0x6ab3208d},
+ {0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10, 0xba4603c5},
+ {0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb, 0xe6071c6f},
+ {0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0, 0x179ec30a},
+ {0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb, 0x0903beb8},
+ {0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed, 0x6a7cb4fa},
+ {0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591, 0xdb535801},
+ {0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67, 0x92bed597},
+ {0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd, 0x192a3f1b},
+ {0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a, 0xccbaec1a},
+ {0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b, 0x7eabae4d},
+ {0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f, 0x28c72982},
+ {0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d, 0xc3cd4d18},
+ {0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a, 0xbca8f0e7},
+ {0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97, 0x713f60b3},
+ {0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2, 0xebd08fd5},
+ {0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138, 0x64406c59},
+ {0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032, 0x7421890e},
+ {0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f, 0xe9347603},
+ {0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f, 0x1bef9060},
+ {0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32, 0x34720072},
+ {0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef, 0x48310f59},
+ {0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0, 0x783a4213},
+ {0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59, 0x9e8efd41},
+ {0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4, 0xfc3d34a5},
+ {0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c, 0x17a52ae2},
+ {0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51, 0x886d935a},
+ {0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11, 0xeaaeaeb2},
+ {0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659, 0x8e900a4b},
+ {0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af, 0xd74662b1},
+ {0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99, 0xd26752ba},
+ {0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b, 0x8b1fcd62},
+ {0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521, 0xf54342fe},
+ {0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3, 0x5b95b988},
+ {0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d, 0x2e1176be},
+ {0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f, 0x66120546},
+ {0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b, 0xf256a5cc},
+ {0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0, 0x4af1dd69},
+ {0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195, 0x56f0a04a},
+ {0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d, 0x74f6b6b2},
+ {0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4, 0x085951fd},
+ {0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3, 0xc65387eb},
+ {0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643, 0x1ca9257b},
+ {0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10, 0xfd196d76},
+ {0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d, 0x5ef88339},
+ {0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5, 0x2c3714d9},
+ {0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b, 0x58576548},
+ {0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee, 0xfd7c57de},
+ {0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14, 0xd5fedd59},
+ {0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a, 0x1cc3b17b},
+ {0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b, 0x270eed73},
+ {0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3, 0x91ecbb11},
+ {0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826, 0x05ed8d0c},
+ {0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06, 0x0b09ad5b},
+ {0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35, 0xf8d511fb},
+ {0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801, 0x5ad832cc},
+ {0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2, 0x1214d196},
+ {0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d, 0x5747218a},
+ {0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c, 0xde8f14de},
+ {0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba, 0x3563b7b9},
+ {0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5, 0x071475d0},
+ {0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b, 0x54c79d60},
+ {0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178, 0x4c53eee6},
+ {0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3, 0x10137a3c},
+ {0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605, 0xaa9d6c73},
+ {0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1, 0xb63d23e7},
+ {0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9, 0x7f53e9cf},
+ {0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78, 0x13c1cd83},
+ {0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9, 0x49ff5867},
+ {0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd, 0x8467f211},
+ {0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab, 0x3f9683b2},
+ {0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb, 0x76a3f874},
+ {0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77, 0x863b702f},
+ {0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da, 0xdc6c58ff},
+ {0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39, 0x0622cc95},
+ {0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16, 0xe85605cd},
+ {0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208, 0x31da5f06},
+ {0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e, 0xa1f2e784},
+ {0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5, 0xb07cc616},
+ {0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892, 0xbf943b6c},
+ {0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db, 0x2c01af1c},
+ {0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43, 0x0fe5f56d},
+ {0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac, 0xf8943b2d},
+ {0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7, 0xe4d89272},
+ {0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2, 0x7c2f6bbb},
+ {0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2, 0xabbf388b},
+ {0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640, 0x1dca1f4e},
+ {0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f, 0x5c170e23},
+ {0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99, 0xc0e9d672},
+ {0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7, 0xc18bdc86},
+ {0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499, 0xa874fcdd},
+ {0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a, 0x9dc0bb48},
};
#include <linux/time.h>
@@ -1050,6 +1032,41 @@ static int __init crc32c_test(void)
return 0;
}
+static int __init crc32c_combine_test(void)
+{
+ int i, j;
+ int errors = 0, runs = 0;
+
+ for (i = 0; i < 10; i++) {
+ u32 crc_full;
+
+ crc_full = __crc32c_le(test[i].crc, test_buf + test[i].start,
+ test[i].length);
+ for (j = 0; j <= test[i].length; ++j) {
+ u32 crc1, crc2;
+ u32 len1 = j, len2 = test[i].length - j;
+
+ crc1 = __crc32c_le(test[i].crc, test_buf +
+ test[i].start, len1);
+ crc2 = __crc32c_le(0, test_buf + test[i].start +
+ len1, len2);
+
+ if (!(crc_full == __crc32c_le_combine(crc1, crc2, len2) &&
+ crc_full == test[i].crc32c_le))
+ errors++;
+ runs++;
+ cond_resched();
+ }
+ }
+
+ if (errors)
+ pr_warn("crc32c_combine: %d/%d self tests failed\n", errors, runs);
+ else
+ pr_info("crc32c_combine: %d self tests passed\n", runs);
+
+ return 0;
+}
+
static int __init crc32_test(void)
{
int i;
@@ -1109,10 +1126,49 @@ static int __init crc32_test(void)
return 0;
}
+static int __init crc32_combine_test(void)
+{
+ int i, j;
+ int errors = 0, runs = 0;
+
+ for (i = 0; i < 10; i++) {
+ u32 crc_full;
+
+ crc_full = crc32_le(test[i].crc, test_buf + test[i].start,
+ test[i].length);
+ for (j = 0; j <= test[i].length; ++j) {
+ u32 crc1, crc2;
+ u32 len1 = j, len2 = test[i].length - j;
+
+ crc1 = crc32_le(test[i].crc, test_buf +
+ test[i].start, len1);
+ crc2 = crc32_le(0, test_buf + test[i].start +
+ len1, len2);
+
+ if (!(crc_full == crc32_le_combine(crc1, crc2, len2) &&
+ crc_full == test[i].crc_le))
+ errors++;
+ runs++;
+ cond_resched();
+ }
+ }
+
+ if (errors)
+ pr_warn("crc32_combine: %d/%d self tests failed\n", errors, runs);
+ else
+ pr_info("crc32_combine: %d self tests passed\n", runs);
+
+ return 0;
+}
+
static int __init crc32test_init(void)
{
crc32_test();
crc32c_test();
+
+ crc32_combine_test();
+ crc32c_combine_test();
+
return 0;
}
diff --git a/lib/kobject.c b/lib/kobject.c
index 084f7b18d0c0..7a1c203083eb 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -13,11 +13,33 @@
*/
#include <linux/kobject.h>
+#include <linux/kobj_completion.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/stat.h>
#include <linux/slab.h>
+/**
+ * kobject_namespace - return @kobj's namespace tag
+ * @kobj: kobject in question
+ *
+ * Returns namespace tag of @kobj if its parent has namespace ops enabled
+ * and thus @kobj should have a namespace tag associated with it. Returns
+ * %NULL otherwise.
+ */
+const void *kobject_namespace(struct kobject *kobj)
+{
+ const struct kobj_ns_type_operations *ns_ops = kobj_ns_ops(kobj);
+ const void *ns;
+
+ if (!ns_ops || ns_ops->type == KOBJ_NS_TYPE_NONE)
+ return NULL;
+
+ ns = kobj->ktype->namespace(kobj);
+ WARN_ON(!ns); /* @kobj in a namespace is required to have !NULL tag */
+ return ns;
+}
+
/*
* populate_dir - populate directory with attributes.
* @kobj: object we're working on.
@@ -46,13 +68,21 @@ static int populate_dir(struct kobject *kobj)
static int create_dir(struct kobject *kobj)
{
- int error = 0;
- error = sysfs_create_dir(kobj);
+ int error;
+
+ error = sysfs_create_dir_ns(kobj, kobject_namespace(kobj));
if (!error) {
error = populate_dir(kobj);
if (error)
sysfs_remove_dir(kobj);
}
+
+ /*
+ * @kobj->sd may be deleted by an ancestor going away. Hold an
+ * extra reference so that it stays until @kobj is gone.
+ */
+ sysfs_get(kobj->sd);
+
return error;
}
@@ -428,7 +458,7 @@ int kobject_rename(struct kobject *kobj, const char *new_name)
goto out;
}
- error = sysfs_rename_dir(kobj, new_name);
+ error = sysfs_rename_dir_ns(kobj, new_name, kobject_namespace(kobj));
if (error)
goto out;
@@ -472,6 +502,7 @@ int kobject_move(struct kobject *kobj, struct kobject *new_parent)
if (kobj->kset)
new_parent = kobject_get(&kobj->kset->kobj);
}
+
/* old object path */
devpath = kobject_get_path(kobj, GFP_KERNEL);
if (!devpath) {
@@ -486,7 +517,7 @@ int kobject_move(struct kobject *kobj, struct kobject *new_parent)
sprintf(devpath_string, "DEVPATH_OLD=%s", devpath);
envp[0] = devpath_string;
envp[1] = NULL;
- error = sysfs_move_dir(kobj, new_parent);
+ error = sysfs_move_dir_ns(kobj, new_parent, kobject_namespace(kobj));
if (error)
goto out;
old_parent = kobj->parent;
@@ -508,10 +539,15 @@ out:
*/
void kobject_del(struct kobject *kobj)
{
+ struct sysfs_dirent *sd;
+
if (!kobj)
return;
+ sd = kobj->sd;
sysfs_remove_dir(kobj);
+ sysfs_put(sd);
+
kobj->state_in_sysfs = 0;
kobj_kset_leave(kobj);
kobject_put(kobj->parent);
@@ -727,6 +763,55 @@ const struct sysfs_ops kobj_sysfs_ops = {
};
/**
+ * kobj_completion_init - initialize a kobj_completion object.
+ * @kc: kobj_completion
+ * @ktype: type of kobject to initialize
+ *
+ * kobj_completion structures can be embedded within structures with different
+ * lifetime rules. During the release of the enclosing object, we can
+ * wait on the release of the kobject so that we don't free it while it's
+ * still busy.
+ */
+void kobj_completion_init(struct kobj_completion *kc, struct kobj_type *ktype)
+{
+ init_completion(&kc->kc_unregister);
+ kobject_init(&kc->kc_kobj, ktype);
+}
+EXPORT_SYMBOL_GPL(kobj_completion_init);
+
+/**
+ * kobj_completion_release - release a kobj_completion object
+ * @kobj: kobject embedded in kobj_completion
+ *
+ * Used with kobject_release to notify waiters that the kobject has been
+ * released.
+ */
+void kobj_completion_release(struct kobject *kobj)
+{
+ struct kobj_completion *kc = kobj_to_kobj_completion(kobj);
+ complete(&kc->kc_unregister);
+}
+EXPORT_SYMBOL_GPL(kobj_completion_release);
+
+/**
+ * kobj_completion_del_and_wait - release the kobject and wait for it
+ * @kc: kobj_completion object to release
+ *
+ * Delete the kobject from sysfs and drop the reference count. Then wait
+ * until any other outstanding references are also dropped. This routine
+ * is only necessary once other references may have been taken on the
+ * kobject. Typically this happens when the kobject has been published
+ * to sysfs via kobject_add.
+ */
+void kobj_completion_del_and_wait(struct kobj_completion *kc)
+{
+ kobject_del(&kc->kc_kobj);
+ kobject_put(&kc->kc_kobj);
+ wait_for_completion(&kc->kc_unregister);
+}
+EXPORT_SYMBOL_GPL(kobj_completion_del_and_wait);
+
+/**
* kset_register - initialize and add a kset.
* @k: kset.
*/
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 6dc09d8f4c24..872a15a2a637 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -1002,7 +1002,7 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
* Some tests (e.g. double-unlock) might corrupt the preemption
* count, so restore it:
*/
- preempt_count() = saved_preempt_count;
+ preempt_count_set(saved_preempt_count);
#ifdef CONFIG_TRACE_IRQFLAGS
if (softirq_count())
current->softirqs_enabled = 0;
diff --git a/lib/lockref.c b/lib/lockref.c
index 6f9d434c1521..af6e95d0bed6 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -153,6 +153,7 @@ void lockref_mark_dead(struct lockref *lockref)
assert_spin_locked(&lockref->lock);
lockref->count = -128;
}
+EXPORT_SYMBOL(lockref_mark_dead);
/**
* lockref_get_not_dead - Increments count unless the ref is dead
diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
index 657979f71bef..bf076d281d40 100644
--- a/lib/mpi/mpiutil.c
+++ b/lib/mpi/mpiutil.c
@@ -121,3 +121,6 @@ void mpi_free(MPI a)
kfree(a);
}
EXPORT_SYMBOL_GPL(mpi_free);
+
+MODULE_DESCRIPTION("Multiprecision maths library");
+MODULE_LICENSE("GPL");
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 93c5d5ecff4e..7473ee3b4ee7 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -60,14 +60,15 @@ static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
{
int cpu;
+ unsigned long flags;
- raw_spin_lock(&fbc->lock);
+ raw_spin_lock_irqsave(&fbc->lock, flags);
for_each_possible_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
*pcount = 0;
}
fbc->count = amount;
- raw_spin_unlock(&fbc->lock);
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
EXPORT_SYMBOL(percpu_counter_set);
@@ -78,9 +79,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
preempt_disable();
count = __this_cpu_read(*fbc->counters) + amount;
if (count >= batch || count <= -batch) {
- raw_spin_lock(&fbc->lock);
+ unsigned long flags;
+ raw_spin_lock_irqsave(&fbc->lock, flags);
fbc->count += count;
- raw_spin_unlock(&fbc->lock);
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
__this_cpu_write(*fbc->counters, 0);
} else {
__this_cpu_write(*fbc->counters, count);
@@ -97,14 +99,15 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
{
s64 ret;
int cpu;
+ unsigned long flags;
- raw_spin_lock(&fbc->lock);
+ raw_spin_lock_irqsave(&fbc->lock, flags);
ret = fbc->count;
for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
- raw_spin_unlock(&fbc->lock);
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
return ret;
}
EXPORT_SYMBOL(__percpu_counter_sum);
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
index bab1ba2a4c71..b0698ea972c6 100644
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
@@ -30,15 +30,6 @@
#include <linux/spinlock.h>
#include <linux/percpu_ida.h>
-/*
- * Number of tags we move between the percpu freelist and the global freelist at
- * a time
- */
-#define IDA_PCPU_BATCH_MOVE 32U
-
-/* Max size of percpu freelist, */
-#define IDA_PCPU_SIZE ((IDA_PCPU_BATCH_MOVE * 3) / 2)
-
struct percpu_ida_cpu {
/*
* Even though this is percpu, we need a lock for tag stealing by remote
@@ -78,7 +69,7 @@ static inline void steal_tags(struct percpu_ida *pool,
struct percpu_ida_cpu *remote;
for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
- cpus_have_tags * IDA_PCPU_SIZE > pool->nr_tags / 2;
+ cpus_have_tags * pool->percpu_max_size > pool->nr_tags / 2;
cpus_have_tags--) {
cpu = cpumask_next(cpu, &pool->cpus_have_tags);
@@ -123,7 +114,7 @@ static inline void alloc_global_tags(struct percpu_ida *pool,
{
move_tags(tags->freelist, &tags->nr_free,
pool->freelist, &pool->nr_free,
- min(pool->nr_free, IDA_PCPU_BATCH_MOVE));
+ min(pool->nr_free, pool->percpu_batch_size));
}
static inline unsigned alloc_local_tag(struct percpu_ida *pool,
@@ -245,17 +236,17 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
wake_up(&pool->wait);
}
- if (nr_free == IDA_PCPU_SIZE) {
+ if (nr_free == pool->percpu_max_size) {
spin_lock(&pool->lock);
/*
* Global lock held and irqs disabled, don't need percpu
* lock
*/
- if (tags->nr_free == IDA_PCPU_SIZE) {
+ if (tags->nr_free == pool->percpu_max_size) {
move_tags(pool->freelist, &pool->nr_free,
tags->freelist, &tags->nr_free,
- IDA_PCPU_BATCH_MOVE);
+ pool->percpu_batch_size);
wake_up(&pool->wait);
}
@@ -292,7 +283,8 @@ EXPORT_SYMBOL_GPL(percpu_ida_destroy);
* Allocation is percpu, but sharding is limited by nr_tags - for best
* performance, the workload should not span more cpus than nr_tags / 128.
*/
-int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
+int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
+ unsigned long max_size, unsigned long batch_size)
{
unsigned i, cpu, order;
@@ -301,6 +293,8 @@ int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
init_waitqueue_head(&pool->wait);
spin_lock_init(&pool->lock);
pool->nr_tags = nr_tags;
+ pool->percpu_max_size = max_size;
+ pool->percpu_batch_size = batch_size;
/* Guard against overflow */
if (nr_tags > (unsigned) INT_MAX + 1) {
@@ -319,7 +313,7 @@ int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
pool->nr_free = nr_tags;
pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
- IDA_PCPU_SIZE * sizeof(unsigned),
+ pool->percpu_max_size * sizeof(unsigned),
sizeof(unsigned));
if (!pool->tag_cpu)
goto err;
@@ -332,4 +326,65 @@ err:
percpu_ida_destroy(pool);
return -ENOMEM;
}
-EXPORT_SYMBOL_GPL(percpu_ida_init);
+EXPORT_SYMBOL_GPL(__percpu_ida_init);
+
+/**
+ * percpu_ida_for_each_free - iterate free ids of a pool
+ * @pool: pool to iterate
+ * @fn: interate callback function
+ * @data: parameter for @fn
+ *
+ * Note, this doesn't guarantee to iterate all free ids restrictly. Some free
+ * ids might be missed, some might be iterated duplicated, and some might
+ * be iterated and not free soon.
+ */
+int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
+ void *data)
+{
+ unsigned long flags;
+ struct percpu_ida_cpu *remote;
+ unsigned cpu, i, err = 0;
+
+ local_irq_save(flags);
+ for_each_possible_cpu(cpu) {
+ remote = per_cpu_ptr(pool->tag_cpu, cpu);
+ spin_lock(&remote->lock);
+ for (i = 0; i < remote->nr_free; i++) {
+ err = fn(remote->freelist[i], data);
+ if (err)
+ break;
+ }
+ spin_unlock(&remote->lock);
+ if (err)
+ goto out;
+ }
+
+ spin_lock(&pool->lock);
+ for (i = 0; i < pool->nr_free; i++) {
+ err = fn(pool->freelist[i], data);
+ if (err)
+ break;
+ }
+ spin_unlock(&pool->lock);
+out:
+ local_irq_restore(flags);
+ return err;
+}
+EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
+
+/**
+ * percpu_ida_free_tags - return free tags number of a specific cpu or global pool
+ * @pool: pool related
+ * @cpu: specific cpu or global pool if @cpu == nr_cpu_ids
+ *
+ * Note: this just returns a snapshot of free tags number.
+ */
+unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu)
+{
+ struct percpu_ida_cpu *remote;
+ if (cpu == nr_cpu_ids)
+ return pool->nr_free;
+ remote = per_cpu_ptr(pool->tag_cpu, cpu);
+ return remote->nr_free;
+}
+EXPORT_SYMBOL_GPL(percpu_ida_free_tags);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index a685c8a79578..d16fa295ae1d 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -577,7 +577,8 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
miter->__offset += miter->consumed;
miter->__remaining -= miter->consumed;
- if (miter->__flags & SG_MITER_TO_SG)
+ if ((miter->__flags & SG_MITER_TO_SG) &&
+ !PageSlab(miter->page))
flush_kernel_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 4c0d0e51d49e..04abe53f12a1 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -9,10 +9,9 @@
notrace unsigned int debug_smp_processor_id(void)
{
- unsigned long preempt_count = preempt_count();
int this_cpu = raw_smp_processor_id();
- if (likely(preempt_count))
+ if (likely(preempt_count()))
goto out;
if (irqs_disabled())
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 4e8686c7e5a4..e4399fa65ad6 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -38,6 +38,9 @@
#include <linux/bootmem.h>
#include <linux/iommu-helper.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/swiotlb.h>
+
#define OFFSET(val,align) ((unsigned long) \
( (val) & ( (align) - 1)))
@@ -502,6 +505,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
not_found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
+ dev_warn(hwdev, "swiotlb buffer is full\n");
return SWIOTLB_MAP_ERROR;
found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
@@ -726,6 +730,8 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
return dev_addr;
+ trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
+
/* Oh well, have to allocate and map a bounce buffer. */
map = map_single(dev, phys, size, dir);
if (map == SWIOTLB_MAP_ERROR) {
diff --git a/mm/Kconfig b/mm/Kconfig
index 394838f489eb..fdd5ce227e5b 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -20,7 +20,7 @@ config FLATMEM_MANUAL
Some users of more advanced features like NUMA and
memory hotplug may have different options here.
- DISCONTIGMEM is an more mature, better tested system,
+ DISCONTIGMEM is a more mature, better tested system,
but is incompatible with memory hotplug and may suffer
decreased performance over SPARSEMEM. If unsure between
"Sparse Memory" and "Discontiguous Memory", choose
diff --git a/mm/bounce.c b/mm/bounce.c
index 5a7d58fb883b..d5873f21567d 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -98,27 +98,24 @@ int init_emergency_isa_pool(void)
static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
{
unsigned char *vfrom;
- struct bio_vec *tovec, *fromvec;
- int i;
-
- bio_for_each_segment(tovec, to, i) {
- fromvec = from->bi_io_vec + i;
-
- /*
- * not bounced
- */
- if (tovec->bv_page == fromvec->bv_page)
- continue;
-
- /*
- * fromvec->bv_offset and fromvec->bv_len might have been
- * modified by the block layer, so use the original copy,
- * bounce_copy_vec already uses tovec->bv_len
- */
- vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
+ struct bio_vec tovec, *fromvec = from->bi_io_vec;
+ struct bvec_iter iter;
+
+ bio_for_each_segment(tovec, to, iter) {
+ if (tovec.bv_page != fromvec->bv_page) {
+ /*
+ * fromvec->bv_offset and fromvec->bv_len might have
+ * been modified by the block layer, so use the original
+ * copy, bounce_copy_vec already uses tovec->bv_len
+ */
+ vfrom = page_address(fromvec->bv_page) +
+ tovec.bv_offset;
+
+ bounce_copy_vec(&tovec, vfrom);
+ flush_dcache_page(tovec.bv_page);
+ }
- bounce_copy_vec(tovec, vfrom);
- flush_dcache_page(tovec->bv_page);
+ fromvec++;
}
}
@@ -201,18 +198,20 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
{
struct bio *bio;
int rw = bio_data_dir(*bio_orig);
- struct bio_vec *to, *from;
+ struct bio_vec *to, from;
+ struct bvec_iter iter;
unsigned i;
if (force)
goto bounce;
- bio_for_each_segment(from, *bio_orig, i)
- if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q))
+ bio_for_each_segment(from, *bio_orig, iter)
+ if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
goto bounce;
return;
bounce:
bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set);
+ bio_clone_biovec(bio, GFP_NOIO);
bio_for_each_segment_all(to, bio, i) {
struct page *page = to->bv_page;
diff --git a/mm/filemap.c b/mm/filemap.c
index ae4846ff4849..ccb87cc8f07c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1315,44 +1315,6 @@ out:
file_accessed(filp);
}
-int file_read_actor(read_descriptor_t *desc, struct page *page,
- unsigned long offset, unsigned long size)
-{
- char *kaddr;
- unsigned long left, count = desc->count;
-
- if (size > count)
- size = count;
-
- /*
- * Faults on the destination of a read are common, so do it before
- * taking the kmap.
- */
- if (!fault_in_pages_writeable(desc->arg.buf, size)) {
- kaddr = kmap_atomic(page);
- left = __copy_to_user_inatomic(desc->arg.buf,
- kaddr + offset, size);
- kunmap_atomic(kaddr);
- if (left == 0)
- goto success;
- }
-
- /* Do it the slow way */
- kaddr = kmap(page);
- left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
- kunmap(page);
-
- if (left) {
- size -= left;
- desc->error = -EFAULT;
- }
-success:
- desc->count = count - size;
- desc->written += size;
- desc->arg.buf += size;
- return size;
-}
-
/*
* Performs necessary checks before doing a write
* @iov: io vector request
@@ -1392,31 +1354,41 @@ int generic_segment_checks(const struct iovec *iov,
}
EXPORT_SYMBOL(generic_segment_checks);
+int file_read_iter_actor(read_descriptor_t *desc, struct page *page,
+ unsigned long offset, unsigned long size)
+{
+ struct iov_iter *iter = desc->arg.data;
+ unsigned long copied = 0;
+
+ if (size > desc->count)
+ size = desc->count;
+
+ copied = __iov_iter_copy_to_user(page, iter, offset, size);
+ if (copied < size)
+ desc->error = -EFAULT;
+
+ iov_iter_advance(iter, copied);
+ desc->count -= copied;
+ desc->written += copied;
+
+ return copied;
+}
+
/**
- * generic_file_aio_read - generic filesystem read routine
+ * generic_file_read_iter - generic filesystem read routine
* @iocb: kernel I/O control block
- * @iov: io vector request
- * @nr_segs: number of segments in the iovec
+ * @iter: memory vector
* @pos: current file position
- *
- * This is the "read()" routine for all filesystems
- * that can use the page cache directly.
*/
ssize_t
-generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
{
struct file *filp = iocb->ki_filp;
- ssize_t retval;
- unsigned long seg = 0;
- size_t count;
+ read_descriptor_t desc;
+ ssize_t retval = 0;
+ size_t count = iov_iter_count(iter);
loff_t *ppos = &iocb->ki_pos;
- count = 0;
- retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
- if (retval)
- return retval;
-
/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
if (filp->f_flags & O_DIRECT) {
loff_t size;
@@ -1430,11 +1402,10 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
size = i_size_read(inode);
if (pos < size) {
retval = filemap_write_and_wait_range(mapping, pos,
- pos + iov_length(iov, nr_segs) - 1);
- if (!retval) {
+ pos + count - 1);
+ if (!retval)
retval = mapping->a_ops->direct_IO(READ, iocb,
- iov, pos, nr_segs);
- }
+ iter, pos);
if (retval > 0) {
*ppos = pos + retval;
count -= retval;
@@ -1455,42 +1426,47 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
}
}
- count = retval;
- for (seg = 0; seg < nr_segs; seg++) {
- read_descriptor_t desc;
- loff_t offset = 0;
-
- /*
- * If we did a short DIO read we need to skip the section of the
- * iov that we've already read data into.
- */
- if (count) {
- if (count > iov[seg].iov_len) {
- count -= iov[seg].iov_len;
- continue;
- }
- offset = count;
- count = 0;
- }
-
- desc.written = 0;
- desc.arg.buf = iov[seg].iov_base + offset;
- desc.count = iov[seg].iov_len - offset;
- if (desc.count == 0)
- continue;
- desc.error = 0;
- do_generic_file_read(filp, ppos, &desc, file_read_actor);
- retval += desc.written;
- if (desc.error) {
- retval = retval ?: desc.error;
- break;
- }
- if (desc.count > 0)
- break;
- }
+ desc.written = 0;
+ desc.arg.data = iter;
+ desc.count = count;
+ desc.error = 0;
+ do_generic_file_read(filp, ppos, &desc, file_read_iter_actor);
+ if (desc.written)
+ retval = desc.written;
+ else
+ retval = desc.error;
out:
return retval;
}
+EXPORT_SYMBOL(generic_file_read_iter);
+
+/**
+ * generic_file_aio_read - generic filesystem read routine
+ * @iocb: kernel I/O control block
+ * @iov: io vector request
+ * @nr_segs: number of segments in the iovec
+ * @pos: current file position
+ *
+ * This is the "read()" routine for all filesystems
+ * that can use the page cache directly.
+ */
+ssize_t
+generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct iov_iter iter;
+ int ret;
+ size_t count;
+
+ count = 0;
+ ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
+ if (ret)
+ return ret;
+
+ iov_iter_init(&iter, iov, nr_segs, count, 0);
+
+ return generic_file_read_iter(iocb, &iter, pos);
+}
EXPORT_SYMBOL(generic_file_aio_read);
#ifdef CONFIG_MMU
@@ -1943,150 +1919,6 @@ struct page *read_cache_page(struct address_space *mapping,
}
EXPORT_SYMBOL(read_cache_page);
-static size_t __iovec_copy_from_user_inatomic(char *vaddr,
- const struct iovec *iov, size_t base, size_t bytes)
-{
- size_t copied = 0, left = 0;
-
- while (bytes) {
- char __user *buf = iov->iov_base + base;
- int copy = min(bytes, iov->iov_len - base);
-
- base = 0;
- left = __copy_from_user_inatomic(vaddr, buf, copy);
- copied += copy;
- bytes -= copy;
- vaddr += copy;
- iov++;
-
- if (unlikely(left))
- break;
- }
- return copied - left;
-}
-
-/*
- * Copy as much as we can into the page and return the number of bytes which
- * were successfully copied. If a fault is encountered then return the number of
- * bytes which were copied.
- */
-size_t iov_iter_copy_from_user_atomic(struct page *page,
- struct iov_iter *i, unsigned long offset, size_t bytes)
-{
- char *kaddr;
- size_t copied;
-
- BUG_ON(!in_atomic());
- kaddr = kmap_atomic(page);
- if (likely(i->nr_segs == 1)) {
- int left;
- char __user *buf = i->iov->iov_base + i->iov_offset;
- left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
- copied = bytes - left;
- } else {
- copied = __iovec_copy_from_user_inatomic(kaddr + offset,
- i->iov, i->iov_offset, bytes);
- }
- kunmap_atomic(kaddr);
-
- return copied;
-}
-EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
-
-/*
- * This has the same sideeffects and return value as
- * iov_iter_copy_from_user_atomic().
- * The difference is that it attempts to resolve faults.
- * Page must not be locked.
- */
-size_t iov_iter_copy_from_user(struct page *page,
- struct iov_iter *i, unsigned long offset, size_t bytes)
-{
- char *kaddr;
- size_t copied;
-
- kaddr = kmap(page);
- if (likely(i->nr_segs == 1)) {
- int left;
- char __user *buf = i->iov->iov_base + i->iov_offset;
- left = __copy_from_user(kaddr + offset, buf, bytes);
- copied = bytes - left;
- } else {
- copied = __iovec_copy_from_user_inatomic(kaddr + offset,
- i->iov, i->iov_offset, bytes);
- }
- kunmap(page);
- return copied;
-}
-EXPORT_SYMBOL(iov_iter_copy_from_user);
-
-void iov_iter_advance(struct iov_iter *i, size_t bytes)
-{
- BUG_ON(i->count < bytes);
-
- if (likely(i->nr_segs == 1)) {
- i->iov_offset += bytes;
- i->count -= bytes;
- } else {
- const struct iovec *iov = i->iov;
- size_t base = i->iov_offset;
- unsigned long nr_segs = i->nr_segs;
-
- /*
- * The !iov->iov_len check ensures we skip over unlikely
- * zero-length segments (without overruning the iovec).
- */
- while (bytes || unlikely(i->count && !iov->iov_len)) {
- int copy;
-
- copy = min(bytes, iov->iov_len - base);
- BUG_ON(!i->count || i->count < copy);
- i->count -= copy;
- bytes -= copy;
- base += copy;
- if (iov->iov_len == base) {
- iov++;
- nr_segs--;
- base = 0;
- }
- }
- i->iov = iov;
- i->iov_offset = base;
- i->nr_segs = nr_segs;
- }
-}
-EXPORT_SYMBOL(iov_iter_advance);
-
-/*
- * Fault in the first iovec of the given iov_iter, to a maximum length
- * of bytes. Returns 0 on success, or non-zero if the memory could not be
- * accessed (ie. because it is an invalid address).
- *
- * writev-intensive code may want this to prefault several iovecs -- that
- * would be possible (callers must not rely on the fact that _only_ the
- * first iovec will be faulted with the current implementation).
- */
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
-{
- char __user *buf = i->iov->iov_base + i->iov_offset;
- bytes = min(bytes, i->iov->iov_len - i->iov_offset);
- return fault_in_pages_readable(buf, bytes);
-}
-EXPORT_SYMBOL(iov_iter_fault_in_readable);
-
-/*
- * Return the count of just the current iov_iter segment.
- */
-size_t iov_iter_single_seg_count(const struct iov_iter *i)
-{
- const struct iovec *iov = i->iov;
- if (i->nr_segs == 1)
- return i->count;
- else
- return min(i->count, iov->iov_len - i->iov_offset);
-}
-EXPORT_SYMBOL(iov_iter_single_seg_count);
-
/*
* Performs necessary checks before doing a write
*
@@ -2192,9 +2024,8 @@ int pagecache_write_end(struct file *file, struct address_space *mapping,
EXPORT_SYMBOL(pagecache_write_end);
ssize_t
-generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long *nr_segs, loff_t pos, loff_t *ppos,
- size_t count, size_t ocount)
+generic_file_direct_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t pos, loff_t *ppos, size_t count)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
@@ -2203,10 +2034,13 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
size_t write_len;
pgoff_t end;
- if (count != ocount)
- *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
+ if (count != iov_iter_count(iter)) {
+ written = iov_iter_shorten(iter, count);
+ if (written)
+ goto out;
+ }
- write_len = iov_length(iov, *nr_segs);
+ write_len = count;
end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
@@ -2233,7 +2067,7 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
}
}
- written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
+ written = mapping->a_ops->direct_IO(WRITE, iocb, iter, pos);
/*
* Finally, try again to invalidate clean pages which might have been
@@ -2259,6 +2093,23 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
out:
return written;
}
+EXPORT_SYMBOL(generic_file_direct_write_iter);
+
+ssize_t
+generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long *nr_segs, loff_t pos, loff_t *ppos,
+ size_t count, size_t ocount)
+{
+ struct iov_iter iter;
+ ssize_t ret;
+
+ iov_iter_init(&iter, iov, *nr_segs, ocount, 0);
+ ret = generic_file_direct_write_iter(iocb, &iter, pos, ppos, count);
+ /* generic_file_direct_write_iter() might have shortened the vec */
+ if (*nr_segs != iter.nr_segs)
+ *nr_segs = iter.nr_segs;
+ return ret;
+}
EXPORT_SYMBOL(generic_file_direct_write);
/*
@@ -2392,16 +2243,19 @@ again:
}
ssize_t
-generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos, loff_t *ppos,
- size_t count, ssize_t written)
+generic_file_buffered_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t pos, loff_t *ppos, size_t count, ssize_t written)
{
struct file *file = iocb->ki_filp;
ssize_t status;
- struct iov_iter i;
- iov_iter_init(&i, iov, nr_segs, count, written);
- status = generic_perform_write(file, &i, pos);
+ if ((count + written) != iov_iter_count(iter)) {
+ int rc = iov_iter_shorten(iter, count + written);
+ if (rc)
+ return rc;
+ }
+
+ status = generic_perform_write(file, iter, pos);
if (likely(status >= 0)) {
written += status;
@@ -2410,13 +2264,24 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
return written ? written : status;
}
+EXPORT_SYMBOL(generic_file_buffered_write_iter);
+
+ssize_t
+generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos, loff_t *ppos,
+ size_t count, ssize_t written)
+{
+ struct iov_iter iter;
+ iov_iter_init(&iter, iov, nr_segs, count, written);
+ return generic_file_buffered_write_iter(iocb, &iter, pos, ppos,
+ count, written);
+}
EXPORT_SYMBOL(generic_file_buffered_write);
/**
* __generic_file_aio_write - write data to a file
* @iocb: IO state structure (file, offset, etc.)
- * @iov: vector with data to write
- * @nr_segs: number of segments in the vector
+ * @iter: iov_iter specifying memory to write
* @ppos: position where to write
*
* This function does all the work needed for actually writing data to a
@@ -2431,24 +2296,18 @@ EXPORT_SYMBOL(generic_file_buffered_write);
* A caller has to handle it. This is mainly due to the fact that we want to
* avoid syncing under i_mutex.
*/
-ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t *ppos)
+ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t *ppos)
{
struct file *file = iocb->ki_filp;
struct address_space * mapping = file->f_mapping;
- size_t ocount; /* original count */
size_t count; /* after file limit checks */
struct inode *inode = mapping->host;
loff_t pos;
ssize_t written;
ssize_t err;
- ocount = 0;
- err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
- if (err)
- return err;
-
- count = ocount;
+ count = iov_iter_count(iter);
pos = *ppos;
/* We can write back this queue in page reclaim */
@@ -2475,8 +2334,8 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
loff_t endbyte;
ssize_t written_buffered;
- written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
- ppos, count, ocount);
+ written = generic_file_direct_write_iter(iocb, iter, pos,
+ ppos, count);
if (written < 0 || written == count)
goto out;
/*
@@ -2485,9 +2344,9 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
*/
pos += written;
count -= written;
- written_buffered = generic_file_buffered_write(iocb, iov,
- nr_segs, pos, ppos, count,
- written);
+ iov_iter_advance(iter, written);
+ written_buffered = generic_file_buffered_write_iter(iocb, iter,
+ pos, ppos, count, written);
/*
* If generic_file_buffered_write() retuned a synchronous error
* then we want to return the number of bytes which were
@@ -2519,13 +2378,57 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
*/
}
} else {
- written = generic_file_buffered_write(iocb, iov, nr_segs,
+ iter->count = count;
+ written = generic_file_buffered_write_iter(iocb, iter,
pos, ppos, count, written);
}
out:
current->backing_dev_info = NULL;
return written ? written : err;
}
+EXPORT_SYMBOL(__generic_file_write_iter);
+
+ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t pos)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file->f_mapping->host;
+ ssize_t ret;
+
+ mutex_lock(&inode->i_mutex);
+ ret = __generic_file_write_iter(iocb, iter, &iocb->ki_pos);
+ mutex_unlock(&inode->i_mutex);
+
+ if (ret > 0 || ret == -EIOCBQUEUED) {
+ ssize_t err;
+
+ err = generic_write_sync(file, pos, ret);
+ if (err < 0 && ret > 0)
+ ret = err;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(generic_file_write_iter);
+
+ssize_t
+__generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t *ppos)
+{
+ struct iov_iter iter;
+ size_t count;
+ int ret;
+
+ count = 0;
+ ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
+ if (ret)
+ goto out;
+
+ iov_iter_init(&iter, iov, nr_segs, count, 0);
+
+ ret = __generic_file_write_iter(iocb, &iter, ppos);
+out:
+ return ret;
+}
EXPORT_SYMBOL(__generic_file_aio_write);
/**
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 610e3df2768a..2612f60f53ee 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1278,64 +1278,105 @@ out:
int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd, pmd_t *pmdp)
{
+ struct anon_vma *anon_vma = NULL;
struct page *page;
unsigned long haddr = addr & HPAGE_PMD_MASK;
- int target_nid;
- int current_nid = -1;
- bool migrated;
+ int page_nid = -1, this_nid = numa_node_id();
+ int target_nid, last_cpupid = -1;
+ bool page_locked;
+ bool migrated = false;
+ int flags = 0;
spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_same(pmd, *pmdp)))
goto out_unlock;
page = pmd_page(pmd);
- get_page(page);
- current_nid = page_to_nid(page);
+ BUG_ON(is_huge_zero_page(page));
+ page_nid = page_to_nid(page);
+ last_cpupid = page_cpupid_last(page);
count_vm_numa_event(NUMA_HINT_FAULTS);
- if (current_nid == numa_node_id())
+ if (page_nid == this_nid) {
count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
+ flags |= TNF_FAULT_LOCAL;
+ }
+
+ /*
+ * Avoid grouping on DSO/COW pages in specific and RO pages
+ * in general, RO pages shouldn't hurt as much anyway since
+ * they can be in shared cache state.
+ */
+ if (!pmd_write(pmd))
+ flags |= TNF_NO_GROUP;
+ /*
+ * Acquire the page lock to serialise THP migrations but avoid dropping
+ * page_table_lock if at all possible
+ */
+ page_locked = trylock_page(page);
target_nid = mpol_misplaced(page, vma, haddr);
if (target_nid == -1) {
- put_page(page);
- goto clear_pmdnuma;
+ /* If the page was locked, there are no parallel migrations */
+ if (page_locked)
+ goto clear_pmdnuma;
+
+ /*
+ * Otherwise wait for potential migrations and retry. We do
+ * relock and check_same as the page may no longer be mapped.
+ * As the fault is being retried, do not account for it.
+ */
+ spin_unlock(&mm->page_table_lock);
+ wait_on_page_locked(page);
+ page_nid = -1;
+ goto out;
}
- /* Acquire the page lock to serialise THP migrations */
+ /* Page is misplaced, serialise migrations and parallel THP splits */
+ get_page(page);
spin_unlock(&mm->page_table_lock);
- lock_page(page);
+ if (!page_locked)
+ lock_page(page);
+ anon_vma = page_lock_anon_vma_read(page);
- /* Confirm the PTE did not while locked */
+ /* Confirm the PMD did not change while page_table_lock was released */
spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_same(pmd, *pmdp))) {
unlock_page(page);
put_page(page);
+ page_nid = -1;
goto out_unlock;
}
- spin_unlock(&mm->page_table_lock);
- /* Migrate the THP to the requested node */
+ /*
+ * Migrate the THP to the requested node, returns with page unlocked
+ * and pmd_numa cleared.
+ */
+ spin_unlock(&mm->page_table_lock);
migrated = migrate_misplaced_transhuge_page(mm, vma,
pmdp, pmd, addr, page, target_nid);
- if (!migrated)
- goto check_same;
-
- task_numa_fault(target_nid, HPAGE_PMD_NR, true);
- return 0;
+ if (migrated) {
+ flags |= TNF_MIGRATED;
+ page_nid = target_nid;
+ }
-check_same:
- spin_lock(&mm->page_table_lock);
- if (unlikely(!pmd_same(pmd, *pmdp)))
- goto out_unlock;
+ goto out;
clear_pmdnuma:
+ BUG_ON(!PageLocked(page));
pmd = pmd_mknonnuma(pmd);
set_pmd_at(mm, haddr, pmdp, pmd);
VM_BUG_ON(pmd_numa(*pmdp));
update_mmu_cache_pmd(vma, addr, pmdp);
+ unlock_page(page);
out_unlock:
spin_unlock(&mm->page_table_lock);
- if (current_nid != -1)
- task_numa_fault(current_nid, HPAGE_PMD_NR, false);
+
+out:
+ if (anon_vma)
+ page_unlock_anon_vma_read(anon_vma);
+
+ if (page_nid != -1)
+ task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags);
+
return 0;
}
@@ -1432,6 +1473,12 @@ out:
return ret;
}
+/*
+ * Returns
+ * - 0 if PMD could not be locked
+ * - 1 if PMD was locked but protections unchange and TLB flush unnecessary
+ * - HPAGE_PMD_NR is protections changed and TLB flush necessary
+ */
int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot, int prot_numa)
{
@@ -1440,22 +1487,34 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
if (__pmd_trans_huge_lock(pmd, vma) == 1) {
pmd_t entry;
- entry = pmdp_get_and_clear(mm, addr, pmd);
+ ret = 1;
if (!prot_numa) {
+ entry = pmdp_get_and_clear(mm, addr, pmd);
entry = pmd_modify(entry, newprot);
+ ret = HPAGE_PMD_NR;
BUG_ON(pmd_write(entry));
} else {
struct page *page = pmd_page(*pmd);
- /* only check non-shared pages */
- if (page_mapcount(page) == 1 &&
+ /*
+ * Do not trap faults against the zero page. The
+ * read-only data is likely to be read-cached on the
+ * local CPU cache and it is less useful to know about
+ * local vs remote hits on the zero page.
+ */
+ if (!is_huge_zero_page(page) &&
!pmd_numa(*pmd)) {
+ entry = pmdp_get_and_clear(mm, addr, pmd);
entry = pmd_mknuma(entry);
+ ret = HPAGE_PMD_NR;
}
}
- set_pmd_at(mm, addr, pmd, entry);
+
+ /* Set PMD if cleared earlier */
+ if (ret == HPAGE_PMD_NR)
+ set_pmd_at(mm, addr, pmd, entry);
+
spin_unlock(&vma->vm_mm->page_table_lock);
- ret = 1;
}
return ret;
@@ -1636,7 +1695,7 @@ static void __split_huge_page_refcount(struct page *page,
page_tail->mapping = page->mapping;
page_tail->index = page->index + i;
- page_nid_xchg_last(page_tail, page_nid_last(page));
+ page_cpupid_xchg_last(page_tail, page_cpupid_last(page));
BUG_ON(!PageAnon(page_tail));
BUG_ON(!PageUptodate(page_tail));
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 72467914b856..72f9decb0104 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -81,8 +81,9 @@ restart:
* decrement nr_to_walk first so that we don't livelock if we
* get stuck on large numbesr of LRU_RETRY items
*/
- if (--(*nr_to_walk) == 0)
+ if (!*nr_to_walk)
break;
+ --*nr_to_walk;
ret = isolate(item, &nlru->lock, cb_arg);
switch (ret) {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 34d3ca9572d6..f3ac76d903f0 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -54,6 +54,7 @@
#include <linux/page_cgroup.h>
#include <linux/cpu.h>
#include <linux/oom.h>
+#include <linux/lockdep.h>
#include "internal.h"
#include <net/sock.h>
#include <net/ip.h>
@@ -311,7 +312,7 @@ struct mem_cgroup {
atomic_t dead_count;
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
- struct tcp_memcontrol tcp_mem;
+ struct cg_proto tcp_mem;
#endif
#if defined(CONFIG_MEMCG_KMEM)
/* analogous to slab_common's slab_caches list. per-memcg */
@@ -498,6 +499,29 @@ static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
return (memcg == root_mem_cgroup);
}
+/*
+ * We restrict the id in the range of [1, 65535], so it can fit into
+ * an unsigned short.
+ */
+#define MEM_CGROUP_ID_MAX USHRT_MAX
+
+static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
+{
+ /*
+ * The ID of the root cgroup is 0, but memcg treat 0 as an
+ * invalid ID, so we return (cgroup_id + 1).
+ */
+ return memcg->css.cgroup->id + 1;
+}
+
+static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
+{
+ struct cgroup_subsys_state *css;
+
+ css = css_from_id(id - 1, &mem_cgroup_subsys);
+ return mem_cgroup_from_css(css);
+}
+
/* Writing them here to avoid exposing memcg's inner layout */
#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
@@ -550,13 +574,13 @@ struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
if (!memcg || mem_cgroup_is_root(memcg))
return NULL;
- return &memcg->tcp_mem.cg_proto;
+ return &memcg->tcp_mem;
}
EXPORT_SYMBOL(tcp_proto_cgroup);
static void disarm_sock_keys(struct mem_cgroup *memcg)
{
- if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto))
+ if (!memcg_proto_activated(&memcg->tcp_mem))
return;
static_key_slow_dec(&memcg_socket_limit_enabled);
}
@@ -569,16 +593,11 @@ static void disarm_sock_keys(struct mem_cgroup *memcg)
#ifdef CONFIG_MEMCG_KMEM
/*
* This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
- * There are two main reasons for not using the css_id for this:
- * 1) this works better in sparse environments, where we have a lot of memcgs,
- * but only a few kmem-limited. Or also, if we have, for instance, 200
- * memcgs, and none but the 200th is kmem-limited, we'd have to have a
- * 200 entry array for that.
- *
- * 2) In order not to violate the cgroup API, we would like to do all memory
- * allocation in ->create(). At that point, we haven't yet allocated the
- * css_id. Having a separate index prevents us from messing with the cgroup
- * core for this
+ * The main reason for not using cgroup id for this:
+ * this works better in sparse environments, where we have a lot of memcgs,
+ * but only a few kmem-limited. Or also, if we have, for instance, 200
+ * memcgs, and none but the 200th is kmem-limited, we'd have to have a
+ * 200 entry array for that.
*
* The current size of the caches array is stored in
* memcg_limited_groups_array_size. It will double each time we have to
@@ -593,14 +612,14 @@ int memcg_limited_groups_array_size;
* cgroups is a reasonable guess. In the future, it could be a parameter or
* tunable, but that is strictly not necessary.
*
- * MAX_SIZE should be as large as the number of css_ids. Ideally, we could get
+ * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
* this constant directly from cgroup, but it is understandable that this is
* better kept as an internal representation in cgroup.c. In any case, the
- * css_id space is not getting any smaller, and we don't have to necessarily
+ * cgrp_id space is not getting any smaller, and we don't have to necessarily
* increase ours as well if it increases.
*/
#define MEMCG_CACHES_MIN_SIZE 4
-#define MEMCG_CACHES_MAX_SIZE 65535
+#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
/*
* A lot of the calls to the cache allocation functions are expected to be
@@ -1407,7 +1426,7 @@ bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
return true;
if (!root_memcg->use_hierarchy || !memcg)
return false;
- return css_is_ancestor(&memcg->css, &root_memcg->css);
+ return cgroup_is_descendant(memcg->css.cgroup, root_memcg->css.cgroup);
}
static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
@@ -2046,6 +2065,12 @@ static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
return total;
}
+#ifdef CONFIG_LOCKDEP
+static struct lockdep_map memcg_oom_lock_dep_map = {
+ .name = "memcg_oom_lock",
+};
+#endif
+
static DEFINE_SPINLOCK(memcg_oom_lock);
/*
@@ -2083,7 +2108,8 @@ static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
}
iter->oom_lock = false;
}
- }
+ } else
+ mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
spin_unlock(&memcg_oom_lock);
@@ -2095,6 +2121,7 @@ static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
struct mem_cgroup *iter;
spin_lock(&memcg_oom_lock);
+ mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
for_each_mem_cgroup_tree(iter, memcg)
iter->oom_lock = false;
spin_unlock(&memcg_oom_lock);
@@ -2765,10 +2792,10 @@ done:
*ptr = memcg;
return 0;
nomem:
- *ptr = NULL;
- if (gfp_mask & __GFP_NOFAIL)
- return 0;
- return -ENOMEM;
+ if (!(gfp_mask & __GFP_NOFAIL)) {
+ *ptr = NULL;
+ return -ENOMEM;
+ }
bypass:
*ptr = root_mem_cgroup;
return -EINTR;
@@ -2817,15 +2844,10 @@ static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
*/
static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
{
- struct cgroup_subsys_state *css;
-
/* ID 0 is unused ID */
if (!id)
return NULL;
- css = css_lookup(&mem_cgroup_subsys, id);
- if (!css)
- return NULL;
- return mem_cgroup_from_css(css);
+ return mem_cgroup_from_id(id);
}
struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
@@ -3773,8 +3795,7 @@ void mem_cgroup_move_account_page_stat(struct mem_cgroup *from,
{
/* Update stat data for mem_cgroup */
preempt_disable();
- WARN_ON_ONCE(from->stat->count[idx] < nr_pages);
- __this_cpu_add(from->stat->count[idx], -nr_pages);
+ __this_cpu_sub(from->stat->count[idx], nr_pages);
__this_cpu_add(to->stat->count[idx], nr_pages);
preempt_enable();
}
@@ -4342,7 +4363,7 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
* css_get() was called in uncharge().
*/
if (do_swap_account && swapout && memcg)
- swap_cgroup_record(ent, css_id(&memcg->css));
+ swap_cgroup_record(ent, mem_cgroup_id(memcg));
}
#endif
@@ -4394,8 +4415,8 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
{
unsigned short old_id, new_id;
- old_id = css_id(&from->css);
- new_id = css_id(&to->css);
+ old_id = mem_cgroup_id(from);
+ new_id = mem_cgroup_id(to);
if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
mem_cgroup_swap_statistics(from, false);
@@ -4950,31 +4971,18 @@ static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
} while (usage > 0);
}
-/*
- * This mainly exists for tests during the setting of set of use_hierarchy.
- * Since this is the very setting we are changing, the current hierarchy value
- * is meaningless
- */
-static inline bool __memcg_has_children(struct mem_cgroup *memcg)
-{
- struct cgroup_subsys_state *pos;
-
- /* bounce at first found */
- css_for_each_child(pos, &memcg->css)
- return true;
- return false;
-}
-
-/*
- * Must be called with memcg_create_mutex held, unless the cgroup is guaranteed
- * to be already dead (as in mem_cgroup_force_empty, for instance). This is
- * from mem_cgroup_count_children(), in the sense that we don't really care how
- * many children we have; we only need to know if we have any. It also counts
- * any memcg without hierarchy as infertile.
- */
static inline bool memcg_has_children(struct mem_cgroup *memcg)
{
- return memcg->use_hierarchy && __memcg_has_children(memcg);
+ lockdep_assert_held(&memcg_create_mutex);
+ /*
+ * The lock does not prevent addition or deletion to the list
+ * of children, but it prevents a new child from being
+ * initialized based on this parent in css_online(), so it's
+ * enough to decide whether hierarchically inherited
+ * attributes can still be changed or not.
+ */
+ return memcg->use_hierarchy &&
+ !list_empty(&memcg->css.cgroup->children);
}
/*
@@ -5054,7 +5062,7 @@ static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
*/
if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
(val == 1 || val == 0)) {
- if (!__memcg_has_children(memcg))
+ if (list_empty(&memcg->css.cgroup->children))
memcg->use_hierarchy = val;
else
retval = -EBUSY;
@@ -6171,7 +6179,6 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
size_t size = memcg_size();
mem_cgroup_remove_from_trees(memcg);
- free_css_id(&mem_cgroup_subsys, &memcg->css);
for_each_node(node)
free_mem_cgroup_per_zone_info(memcg, node);
@@ -6274,6 +6281,9 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css));
int error = 0;
+ if (css->cgroup->id > MEM_CGROUP_ID_MAX)
+ return -ENOSPC;
+
if (!parent)
return 0;
@@ -6545,7 +6555,7 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
}
/* There is a swap entry and a page doesn't exist or isn't charged */
if (ent.val && !ret &&
- css_id(&mc.from->css) == lookup_swap_cgroup_id(ent)) {
+ mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
ret = MC_TARGET_SWAP;
if (target)
target->ent = ent;
@@ -6965,7 +6975,6 @@ struct cgroup_subsys mem_cgroup_subsys = {
.bind = mem_cgroup_bind,
.base_cftypes = mem_cgroup_files,
.early_init = 0,
- .use_id = 1,
};
#ifdef CONFIG_MEMCG_SWAP
diff --git a/mm/memory.c b/mm/memory.c
index 1311f26497e6..1f2287eaa88e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -69,8 +69,8 @@
#include "internal.h"
-#ifdef LAST_NID_NOT_IN_PAGE_FLAGS
-#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_nid.
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
#endif
#ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -2721,6 +2721,14 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
get_page(dirty_page);
reuse:
+ /*
+ * Clear the pages cpupid information as the existing
+ * information potentially belongs to a now completely
+ * unrelated process.
+ */
+ if (old_page)
+ page_cpupid_xchg_last(old_page, (1 << LAST_CPUPID_SHIFT) - 1);
+
flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = pte_mkyoung(orig_pte);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -3521,13 +3529,16 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
}
int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
- unsigned long addr, int current_nid)
+ unsigned long addr, int page_nid,
+ int *flags)
{
get_page(page);
count_vm_numa_event(NUMA_HINT_FAULTS);
- if (current_nid == numa_node_id())
+ if (page_nid == numa_node_id()) {
count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
+ *flags |= TNF_FAULT_LOCAL;
+ }
return mpol_misplaced(page, vma, addr);
}
@@ -3537,9 +3548,11 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
{
struct page *page = NULL;
spinlock_t *ptl;
- int current_nid = -1;
+ int page_nid = -1;
+ int last_cpupid;
int target_nid;
bool migrated = false;
+ int flags = 0;
/*
* The "pte" at this point cannot be used safely without
@@ -3566,123 +3579,44 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
pte_unmap_unlock(ptep, ptl);
return 0;
}
+ BUG_ON(is_zero_pfn(page_to_pfn(page)));
+
+ /*
+ * Avoid grouping on DSO/COW pages in specific and RO pages
+ * in general, RO pages shouldn't hurt as much anyway since
+ * they can be in shared cache state.
+ */
+ if (!pte_write(pte))
+ flags |= TNF_NO_GROUP;
+
+ /*
+ * Flag if the page is shared between multiple address spaces. This
+ * is later used when determining whether to group tasks together
+ */
+ if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
+ flags |= TNF_SHARED;
- current_nid = page_to_nid(page);
- target_nid = numa_migrate_prep(page, vma, addr, current_nid);
+ last_cpupid = page_cpupid_last(page);
+ page_nid = page_to_nid(page);
+ target_nid = numa_migrate_prep(page, vma, addr, page_nid, &flags);
pte_unmap_unlock(ptep, ptl);
if (target_nid == -1) {
- /*
- * Account for the fault against the current node if it not
- * being replaced regardless of where the page is located.
- */
- current_nid = numa_node_id();
put_page(page);
goto out;
}
/* Migrate to the requested node */
- migrated = migrate_misplaced_page(page, target_nid);
- if (migrated)
- current_nid = target_nid;
-
-out:
- if (current_nid != -1)
- task_numa_fault(current_nid, 1, migrated);
- return 0;
-}
-
-/* NUMA hinting page fault entry point for regular pmds */
-#ifdef CONFIG_NUMA_BALANCING
-static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long addr, pmd_t *pmdp)
-{
- pmd_t pmd;
- pte_t *pte, *orig_pte;
- unsigned long _addr = addr & PMD_MASK;
- unsigned long offset;
- spinlock_t *ptl;
- bool numa = false;
- int local_nid = numa_node_id();
-
- spin_lock(&mm->page_table_lock);
- pmd = *pmdp;
- if (pmd_numa(pmd)) {
- set_pmd_at(mm, _addr, pmdp, pmd_mknonnuma(pmd));
- numa = true;
- }
- spin_unlock(&mm->page_table_lock);
-
- if (!numa)
- return 0;
-
- /* we're in a page fault so some vma must be in the range */
- BUG_ON(!vma);
- BUG_ON(vma->vm_start >= _addr + PMD_SIZE);
- offset = max(_addr, vma->vm_start) & ~PMD_MASK;
- VM_BUG_ON(offset >= PMD_SIZE);
- orig_pte = pte = pte_offset_map_lock(mm, pmdp, _addr, &ptl);
- pte += offset >> PAGE_SHIFT;
- for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) {
- pte_t pteval = *pte;
- struct page *page;
- int curr_nid = local_nid;
- int target_nid;
- bool migrated;
- if (!pte_present(pteval))
- continue;
- if (!pte_numa(pteval))
- continue;
- if (addr >= vma->vm_end) {
- vma = find_vma(mm, addr);
- /* there's a pte present so there must be a vma */
- BUG_ON(!vma);
- BUG_ON(addr < vma->vm_start);
- }
- if (pte_numa(pteval)) {
- pteval = pte_mknonnuma(pteval);
- set_pte_at(mm, addr, pte, pteval);
- }
- page = vm_normal_page(vma, addr, pteval);
- if (unlikely(!page))
- continue;
- /* only check non-shared pages */
- if (unlikely(page_mapcount(page) != 1))
- continue;
-
- /*
- * Note that the NUMA fault is later accounted to either
- * the node that is currently running or where the page is
- * migrated to.
- */
- curr_nid = local_nid;
- target_nid = numa_migrate_prep(page, vma, addr,
- page_to_nid(page));
- if (target_nid == -1) {
- put_page(page);
- continue;
- }
-
- /* Migrate to the requested node */
- pte_unmap_unlock(pte, ptl);
- migrated = migrate_misplaced_page(page, target_nid);
- if (migrated)
- curr_nid = target_nid;
- task_numa_fault(curr_nid, 1, migrated);
-
- pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
+ migrated = migrate_misplaced_page(page, vma, target_nid);
+ if (migrated) {
+ page_nid = target_nid;
+ flags |= TNF_MIGRATED;
}
- pte_unmap_unlock(orig_pte, ptl);
+out:
+ if (page_nid != -1)
+ task_numa_fault(last_cpupid, page_nid, 1, flags);
return 0;
}
-#else
-static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long addr, pmd_t *pmdp)
-{
- BUG();
- return 0;
-}
-#endif /* CONFIG_NUMA_BALANCING */
/*
* These routines also need to handle stuff like marking pages dirty
@@ -3822,8 +3756,8 @@ retry:
}
}
- if (pmd_numa(*pmd))
- return do_pmd_numa_page(mm, vma, address, pmd);
+ /* THP should already have been handled */
+ BUG_ON(pmd_numa(*pmd));
/*
* Use __pte_alloc instead of pte_alloc_map, because we can't
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 04729647f359..71cb253368cb 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1679,6 +1679,30 @@ struct mempolicy *get_vma_policy(struct task_struct *task,
return pol;
}
+bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma)
+{
+ struct mempolicy *pol = get_task_policy(task);
+ if (vma) {
+ if (vma->vm_ops && vma->vm_ops->get_policy) {
+ bool ret = false;
+
+ pol = vma->vm_ops->get_policy(vma, vma->vm_start);
+ if (pol && (pol->flags & MPOL_F_MOF))
+ ret = true;
+ mpol_cond_put(pol);
+
+ return ret;
+ } else if (vma->vm_policy) {
+ pol = vma->vm_policy;
+ }
+ }
+
+ if (!pol)
+ return default_policy.flags & MPOL_F_MOF;
+
+ return pol->flags & MPOL_F_MOF;
+}
+
static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
{
enum zone_type dynamic_policy_zone = policy_zone;
@@ -2277,6 +2301,35 @@ static void sp_free(struct sp_node *n)
kmem_cache_free(sn_cache, n);
}
+#ifdef CONFIG_NUMA_BALANCING
+static bool numa_migrate_deferred(struct task_struct *p, int last_cpupid)
+{
+ /* Never defer a private fault */
+ if (cpupid_match_pid(p, last_cpupid))
+ return false;
+
+ if (p->numa_migrate_deferred) {
+ p->numa_migrate_deferred--;
+ return true;
+ }
+ return false;
+}
+
+static inline void defer_numa_migrate(struct task_struct *p)
+{
+ p->numa_migrate_deferred = sysctl_numa_balancing_migrate_deferred;
+}
+#else
+static inline bool numa_migrate_deferred(struct task_struct *p, int last_cpupid)
+{
+ return false;
+}
+
+static inline void defer_numa_migrate(struct task_struct *p)
+{
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
/**
* mpol_misplaced - check whether current page node is valid in policy
*
@@ -2300,6 +2353,8 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
struct zone *zone;
int curnid = page_to_nid(page);
unsigned long pgoff;
+ int thiscpu = raw_smp_processor_id();
+ int thisnid = cpu_to_node(thiscpu);
int polnid = -1;
int ret = -1;
@@ -2348,9 +2403,11 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
/* Migrate the page towards the node whose CPU is referencing it */
if (pol->flags & MPOL_F_MORON) {
- int last_nid;
+ int last_cpupid;
+ int this_cpupid;
- polnid = numa_node_id();
+ polnid = thisnid;
+ this_cpupid = cpu_pid_to_cpupid(thiscpu, current->pid);
/*
* Multi-stage node selection is used in conjunction
@@ -2373,8 +2430,25 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
* it less likely we act on an unlikely task<->page
* relation.
*/
- last_nid = page_nid_xchg_last(page, polnid);
- if (last_nid != polnid)
+ last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
+ if (!cpupid_pid_unset(last_cpupid) && cpupid_to_nid(last_cpupid) != thisnid) {
+
+ /* See sysctl_numa_balancing_migrate_deferred comment */
+ if (!cpupid_match_pid(current, last_cpupid))
+ defer_numa_migrate(current);
+
+ goto out;
+ }
+
+ /*
+ * The quadratic filter above reduces extraneous migration
+ * of shared pages somewhat. This code reduces it even more,
+ * reducing the overhead of page migrations of shared pages.
+ * This makes workloads with shared pages rely more on
+ * "move task near its memory", and less on "move memory
+ * towards its task", which is exactly what we want.
+ */
+ if (numa_migrate_deferred(current, last_cpupid))
goto out;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 7a7325ee1d08..dfc8300ecbb2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -445,6 +445,8 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
*/
void migrate_page_copy(struct page *newpage, struct page *page)
{
+ int cpupid;
+
if (PageHuge(page) || PageTransHuge(page))
copy_huge_page(newpage, page);
else
@@ -481,6 +483,13 @@ void migrate_page_copy(struct page *newpage, struct page *page)
__set_page_dirty_nobuffers(newpage);
}
+ /*
+ * Copy NUMA information to the new page, to prevent over-eager
+ * future migrations of this same page.
+ */
+ cpupid = page_cpupid_xchg_last(page, -1);
+ page_cpupid_xchg_last(newpage, cpupid);
+
mlock_migrate_page(newpage, page);
ksm_migrate_page(newpage, page);
/*
@@ -1500,7 +1509,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
__GFP_NOWARN) &
~GFP_IOFS, 0);
if (newpage)
- page_nid_xchg_last(newpage, page_nid_last(page));
+ page_cpupid_xchg_last(newpage, page_cpupid_last(page));
return newpage;
}
@@ -1601,7 +1610,8 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
* node. Caller is expected to have an elevated reference count on
* the page that will be dropped by this function before returning.
*/
-int migrate_misplaced_page(struct page *page, int node)
+int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
+ int node)
{
pg_data_t *pgdat = NODE_DATA(node);
int isolated;
@@ -1609,10 +1619,11 @@ int migrate_misplaced_page(struct page *page, int node)
LIST_HEAD(migratepages);
/*
- * Don't migrate pages that are mapped in multiple processes.
- * TODO: Handle false sharing detection instead of this hammer
+ * Don't migrate file pages that are mapped in multiple processes
+ * with execute permissions as they are probably shared libraries.
*/
- if (page_mapcount(page) != 1)
+ if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
+ (vma->vm_flags & VM_EXEC))
goto out;
/*
@@ -1663,13 +1674,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
int page_lru = page_is_file_cache(page);
/*
- * Don't migrate pages that are mapped in multiple processes.
- * TODO: Handle false sharing detection instead of this hammer
- */
- if (page_mapcount(page) != 1)
- goto out_dropref;
-
- /*
* Rate-limit the amount of data that is being migrated to a node.
* Optimal placement is no good if the memory bus is saturated and
* all the time is being spent migrating!
@@ -1682,7 +1686,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
if (!new_page)
goto out_fail;
- page_nid_xchg_last(new_page, page_nid_last(page));
+ page_cpupid_xchg_last(new_page, page_cpupid_last(page));
isolated = numamigrate_isolate_page(pgdat, page);
if (!isolated) {
@@ -1715,12 +1719,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
unlock_page(new_page);
put_page(new_page); /* Free it */
- unlock_page(page);
+ /* Retake the callers reference and putback on LRU */
+ get_page(page);
putback_lru_page(page);
-
- count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
- isolated = 0;
- goto out;
+ mod_zone_page_state(page_zone(page),
+ NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
+ goto out_fail;
}
/*
@@ -1737,9 +1741,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
entry = pmd_mkhuge(entry);
- page_add_new_anon_rmap(new_page, vma, haddr);
-
+ pmdp_clear_flush(vma, haddr, pmd);
set_pmd_at(mm, haddr, pmd, entry);
+ page_add_new_anon_rmap(new_page, vma, haddr);
update_mmu_cache_pmd(vma, address, &entry);
page_remove_rmap(page);
/*
@@ -1758,7 +1762,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
-out:
mod_zone_page_state(page_zone(page),
NR_ISOLATED_ANON + page_lru,
-HPAGE_PMD_NR);
@@ -1767,6 +1770,10 @@ out:
out_fail:
count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
out_dropref:
+ entry = pmd_mknonnuma(entry);
+ set_pmd_at(mm, haddr, pmd, entry);
+ update_mmu_cache_pmd(vma, address, &entry);
+
unlock_page(page);
put_page(page);
return 0;
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 633c08863fd8..68562e92d50c 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -71,26 +71,26 @@ void __init mminit_verify_pageflags_layout(void)
unsigned long or_mask, add_mask;
shift = 8 * sizeof(unsigned long);
- width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH - LAST_NID_SHIFT;
+ width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH - LAST_CPUPID_SHIFT;
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
- "Section %d Node %d Zone %d Lastnid %d Flags %d\n",
+ "Section %d Node %d Zone %d Lastcpupid %d Flags %d\n",
SECTIONS_WIDTH,
NODES_WIDTH,
ZONES_WIDTH,
- LAST_NID_WIDTH,
+ LAST_CPUPID_WIDTH,
NR_PAGEFLAGS);
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
- "Section %d Node %d Zone %d Lastnid %d\n",
+ "Section %d Node %d Zone %d Lastcpupid %d\n",
SECTIONS_SHIFT,
NODES_SHIFT,
ZONES_SHIFT,
- LAST_NID_SHIFT);
+ LAST_CPUPID_SHIFT);
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
- "Section %lu Node %lu Zone %lu Lastnid %lu\n",
+ "Section %lu Node %lu Zone %lu Lastcpupid %lu\n",
(unsigned long)SECTIONS_PGSHIFT,
(unsigned long)NODES_PGSHIFT,
(unsigned long)ZONES_PGSHIFT,
- (unsigned long)LAST_NID_PGSHIFT);
+ (unsigned long)LAST_CPUPID_PGSHIFT);
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
"Node/Zone ID: %lu -> %lu\n",
(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
@@ -102,9 +102,9 @@ void __init mminit_verify_pageflags_layout(void)
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
"Node not in page flags");
#endif
-#ifdef LAST_NID_NOT_IN_PAGE_FLAGS
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
- "Last nid not in page flags");
+ "Last cpupid not in page flags");
#endif
if (SECTIONS_WIDTH) {
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 2ac0afbd68f3..bf34fb8556db 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -97,20 +97,20 @@ void lruvec_init(struct lruvec *lruvec)
INIT_LIST_HEAD(&lruvec->lists[lru]);
}
-#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_NID_NOT_IN_PAGE_FLAGS)
-int page_nid_xchg_last(struct page *page, int nid)
+#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
+int page_cpupid_xchg_last(struct page *page, int cpupid)
{
unsigned long old_flags, flags;
- int last_nid;
+ int last_cpupid;
do {
old_flags = flags = page->flags;
- last_nid = page_nid_last(page);
+ last_cpupid = page_cpupid_last(page);
- flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT);
- flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
+ flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
+ flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
} while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
- return last_nid;
+ return last_cpupid;
}
#endif
diff --git a/mm/mprotect.c b/mm/mprotect.c
index a3af058f68e4..a597f2ffcd6f 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -37,14 +37,12 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end, pgprot_t newprot,
- int dirty_accountable, int prot_numa, bool *ret_all_same_node)
+ int dirty_accountable, int prot_numa)
{
struct mm_struct *mm = vma->vm_mm;
pte_t *pte, oldpte;
spinlock_t *ptl;
unsigned long pages = 0;
- bool all_same_node = true;
- int last_nid = -1;
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
arch_enter_lazy_mmu_mode();
@@ -63,15 +61,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
page = vm_normal_page(vma, addr, oldpte);
if (page) {
- int this_nid = page_to_nid(page);
- if (last_nid == -1)
- last_nid = this_nid;
- if (last_nid != this_nid)
- all_same_node = false;
-
- /* only check non-shared pages */
- if (!pte_numa(oldpte) &&
- page_mapcount(page) == 1) {
+ if (!pte_numa(oldpte)) {
ptent = pte_mknuma(ptent);
updated = true;
}
@@ -104,33 +94,17 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
if (pte_swp_soft_dirty(oldpte))
newpte = pte_swp_mksoft_dirty(newpte);
set_pte_at(mm, addr, pte, newpte);
+
+ pages++;
}
- pages++;
}
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(pte - 1, ptl);
- *ret_all_same_node = all_same_node;
return pages;
}
-#ifdef CONFIG_NUMA_BALANCING
-static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmd)
-{
- spin_lock(&mm->page_table_lock);
- set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd));
- spin_unlock(&mm->page_table_lock);
-}
-#else
-static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmd)
-{
- BUG();
-}
-#endif /* CONFIG_NUMA_BALANCING */
-
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
pud_t *pud, unsigned long addr, unsigned long end,
pgprot_t newprot, int dirty_accountable, int prot_numa)
@@ -138,34 +112,33 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
pmd_t *pmd;
unsigned long next;
unsigned long pages = 0;
- bool all_same_node;
pmd = pmd_offset(pud, addr);
do {
+ unsigned long this_pages;
+
next = pmd_addr_end(addr, end);
if (pmd_trans_huge(*pmd)) {
if (next - addr != HPAGE_PMD_SIZE)
split_huge_page_pmd(vma, addr, pmd);
- else if (change_huge_pmd(vma, pmd, addr, newprot,
- prot_numa)) {
- pages += HPAGE_PMD_NR;
- continue;
+ else {
+ int nr_ptes = change_huge_pmd(vma, pmd, addr,
+ newprot, prot_numa);
+
+ if (nr_ptes) {
+ if (nr_ptes == HPAGE_PMD_NR)
+ pages++;
+
+ continue;
+ }
}
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
continue;
- pages += change_pte_range(vma, pmd, addr, next, newprot,
- dirty_accountable, prot_numa, &all_same_node);
-
- /*
- * If we are changing protections for NUMA hinting faults then
- * set pmd_numa if the examined pages were all on the same
- * node. This allows a regular PMD to be handled as one fault
- * and effectively batches the taking of the PTL
- */
- if (prot_numa && all_same_node)
- change_pmd_protnuma(vma->vm_mm, addr, pmd);
+ this_pages = change_pte_range(vma, pmd, addr, next, newprot,
+ dirty_accountable, prot_numa);
+ pages += this_pages;
} while (pmd++, addr = next, addr != end);
return pages;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index dd886fac451a..73d812f16dde 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -626,7 +626,7 @@ static inline int free_pages_check(struct page *page)
bad_page(page);
return 1;
}
- page_nid_reset_last(page);
+ page_cpupid_reset_last(page);
if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
return 0;
@@ -4015,7 +4015,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
mminit_verify_page_links(page, zone, nid, pfn);
init_page_count(page);
page_mapcount_reset(page);
- page_nid_reset_last(page);
+ page_cpupid_reset_last(page);
SetPageReserved(page);
/*
* Mark the block movable so that blocks are reserved for
diff --git a/mm/page_io.c b/mm/page_io.c
index 8c79a4764be0..b8608c944989 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -31,13 +31,13 @@ static struct bio *get_swap_bio(gfp_t gfp_flags,
bio = bio_alloc(gfp_flags, 1);
if (bio) {
- bio->bi_sector = map_swap_page(page, &bio->bi_bdev);
- bio->bi_sector <<= PAGE_SHIFT - 9;
+ bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev);
+ bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
bio->bi_io_vec[0].bv_page = page;
bio->bi_io_vec[0].bv_len = PAGE_SIZE;
bio->bi_io_vec[0].bv_offset = 0;
bio->bi_vcnt = 1;
- bio->bi_size = PAGE_SIZE;
+ bio->bi_iter.bi_size = PAGE_SIZE;
bio->bi_end_io = end_io;
}
return bio;
@@ -62,7 +62,7 @@ void end_swap_bio_write(struct bio *bio, int err)
printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n",
imajor(bio->bi_bdev->bd_inode),
iminor(bio->bi_bdev->bd_inode),
- (unsigned long long)bio->bi_sector);
+ (unsigned long long)bio->bi_iter.bi_sector);
ClearPageReclaim(page);
}
end_page_writeback(page);
@@ -80,7 +80,7 @@ void end_swap_bio_read(struct bio *bio, int err)
printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
imajor(bio->bi_bdev->bd_inode),
iminor(bio->bi_bdev->bd_inode),
- (unsigned long long)bio->bi_sector);
+ (unsigned long long)bio->bi_iter.bi_sector);
goto out;
}
@@ -258,11 +258,14 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
if (sis->flags & SWP_FILE) {
struct kiocb kiocb;
struct file *swap_file = sis->swap_file;
- struct address_space *mapping = swap_file->f_mapping;
- struct iovec iov = {
- .iov_base = kmap(page),
- .iov_len = PAGE_SIZE,
+ struct bio_vec bvec = {
+ .bv_page = kmap(page),
+ .bv_len = PAGE_SIZE,
+ .bv_offset = 0,
};
+ struct iov_iter iter;
+
+ iov_iter_init_bvec(&iter, &bvec, 1, PAGE_SIZE, 0);
init_sync_kiocb(&kiocb, swap_file);
kiocb.ki_pos = page_file_offset(page);
@@ -270,9 +273,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
set_page_writeback(page);
unlock_page(page);
- ret = mapping->a_ops->direct_IO(KERNEL_WRITE,
- &kiocb, &iov,
- kiocb.ki_pos, 1);
+ ret = swap_file->f_op->write_iter(&kiocb, &iter, kiocb.ki_pos);
kunmap(page);
if (ret == PAGE_SIZE) {
count_vm_event(PSWPOUT);
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 5da2cbcfdbb5..2beeabf502c5 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -242,7 +242,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
if (err)
break;
pgd++;
- } while (addr = next, addr != end);
+ } while (addr = next, addr < end);
return err;
}
diff --git a/mm/percpu.c b/mm/percpu.c
index 8c8e08f3a692..0d10defe951e 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1706,8 +1706,9 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
out_free_areas:
for (group = 0; group < ai->nr_groups; group++)
- free_fn(areas[group],
- ai->groups[group].nr_units * ai->unit_size);
+ if (areas[group])
+ free_fn(areas[group],
+ ai->groups[group].nr_units * ai->unit_size);
out_free:
pcpu_free_alloc_info(ai);
if (areas)
diff --git a/mm/shmem.c b/mm/shmem.c
index 8297623fcaed..8612a95d7d7e 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1464,14 +1464,23 @@ shmem_write_end(struct file *file, struct address_space *mapping,
return copied;
}
-static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
+static ssize_t shmem_file_read_iter(struct kiocb *iocb,
+ struct iov_iter *iter, loff_t pos)
{
+ read_descriptor_t desc;
+ loff_t *ppos = &iocb->ki_pos;
+ struct file *filp = iocb->ki_filp;
struct inode *inode = file_inode(filp);
struct address_space *mapping = inode->i_mapping;
pgoff_t index;
unsigned long offset;
enum sgp_type sgp = SGP_READ;
+ desc.written = 0;
+ desc.count = iov_iter_count(iter);
+ desc.arg.data = iter;
+ desc.error = 0;
+
/*
* Might this read be for a stacking filesystem? Then when reading
* holes of a sparse file, we actually need to allocate those pages,
@@ -1498,10 +1507,10 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
break;
}
- desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
- if (desc->error) {
- if (desc->error == -EINVAL)
- desc->error = 0;
+ desc.error = shmem_getpage(inode, index, &page, sgp, NULL);
+ if (desc.error) {
+ if (desc.error == -EINVAL)
+ desc.error = 0;
break;
}
if (page)
@@ -1552,13 +1561,13 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
* "pos" here (the actor routine has to update the user buffer
* pointers and the remaining count).
*/
- ret = actor(desc, page, offset, nr);
+ ret = file_read_iter_actor(&desc, page, offset, nr);
offset += ret;
index += offset >> PAGE_CACHE_SHIFT;
offset &= ~PAGE_CACHE_MASK;
page_cache_release(page);
- if (ret != nr || !desc->count)
+ if (ret != nr || !desc.count)
break;
cond_resched();
@@ -1566,40 +1575,8 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
file_accessed(filp);
-}
-
-static ssize_t shmem_file_aio_read(struct kiocb *iocb,
- const struct iovec *iov, unsigned long nr_segs, loff_t pos)
-{
- struct file *filp = iocb->ki_filp;
- ssize_t retval;
- unsigned long seg;
- size_t count;
- loff_t *ppos = &iocb->ki_pos;
- retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
- if (retval)
- return retval;
-
- for (seg = 0; seg < nr_segs; seg++) {
- read_descriptor_t desc;
-
- desc.written = 0;
- desc.arg.buf = iov[seg].iov_base;
- desc.count = iov[seg].iov_len;
- if (desc.count == 0)
- continue;
- desc.error = 0;
- do_shmem_file_read(filp, ppos, &desc, file_read_actor);
- retval += desc.written;
- if (desc.error) {
- retval = retval ?: desc.error;
- break;
- }
- if (desc.count > 0)
- break;
- }
- return retval;
+ return desc.written ? desc.written : desc.error;
}
static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
@@ -2724,8 +2701,8 @@ static const struct file_operations shmem_file_operations = {
.llseek = shmem_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
- .aio_read = shmem_file_aio_read,
- .aio_write = generic_file_aio_write,
+ .read_iter = shmem_file_read_iter,
+ .write_iter = generic_file_write_iter,
.fsync = noop_fsync,
.splice_read = shmem_file_splice_read,
.splice_write = generic_file_splice_write,
diff --git a/mm/slab.c b/mm/slab.c
index 2580db062df9..a983e3084332 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -164,72 +164,6 @@
static bool pfmemalloc_active __read_mostly;
/*
- * kmem_bufctl_t:
- *
- * Bufctl's are used for linking objs within a slab
- * linked offsets.
- *
- * This implementation relies on "struct page" for locating the cache &
- * slab an object belongs to.
- * This allows the bufctl structure to be small (one int), but limits
- * the number of objects a slab (not a cache) can contain when off-slab
- * bufctls are used. The limit is the size of the largest general cache
- * that does not use off-slab slabs.
- * For 32bit archs with 4 kB pages, is this 56.
- * This is not serious, as it is only for large objects, when it is unwise
- * to have too many per slab.
- * Note: This limit can be raised by introducing a general cache whose size
- * is less than 512 (PAGE_SIZE<<3), but greater than 256.
- */
-
-typedef unsigned int kmem_bufctl_t;
-#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
-#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
-#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
-#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
-
-/*
- * struct slab_rcu
- *
- * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
- * arrange for kmem_freepages to be called via RCU. This is useful if
- * we need to approach a kernel structure obliquely, from its address
- * obtained without the usual locking. We can lock the structure to
- * stabilize it and check it's still at the given address, only if we
- * can be sure that the memory has not been meanwhile reused for some
- * other kind of object (which our subsystem's lock might corrupt).
- *
- * rcu_read_lock before reading the address, then rcu_read_unlock after
- * taking the spinlock within the structure expected at that address.
- */
-struct slab_rcu {
- struct rcu_head head;
- struct kmem_cache *cachep;
- void *addr;
-};
-
-/*
- * struct slab
- *
- * Manages the objs in a slab. Placed either at the beginning of mem allocated
- * for a slab, or allocated from an general cache.
- * Slabs are chained into three list: fully used, partial, fully free slabs.
- */
-struct slab {
- union {
- struct {
- struct list_head list;
- unsigned long colouroff;
- void *s_mem; /* including colour offset */
- unsigned int inuse; /* num of objs active in slab */
- kmem_bufctl_t free;
- unsigned short nodeid;
- };
- struct slab_rcu __slab_cover_slab_rcu;
- };
-};
-
-/*
* struct array_cache
*
* Purpose:
@@ -456,18 +390,10 @@ static inline struct kmem_cache *virt_to_cache(const void *obj)
return page->slab_cache;
}
-static inline struct slab *virt_to_slab(const void *obj)
-{
- struct page *page = virt_to_head_page(obj);
-
- VM_BUG_ON(!PageSlab(page));
- return page->slab_page;
-}
-
-static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
+static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
unsigned int idx)
{
- return slab->s_mem + cache->size * idx;
+ return page->s_mem + cache->size * idx;
}
/*
@@ -477,9 +403,9 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
* reciprocal_divide(offset, cache->reciprocal_buffer_size)
*/
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct slab *slab, void *obj)
+ const struct page *page, void *obj)
{
- u32 offset = (obj - slab->s_mem);
+ u32 offset = (obj - page->s_mem);
return reciprocal_divide(offset, cache->reciprocal_buffer_size);
}
@@ -641,7 +567,7 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
static size_t slab_mgmt_size(size_t nr_objs, size_t align)
{
- return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
+ return ALIGN(nr_objs * sizeof(unsigned int), align);
}
/*
@@ -660,8 +586,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
* on it. For the latter case, the memory allocated for a
* slab is used for:
*
- * - The struct slab
- * - One kmem_bufctl_t for each object
+ * - One unsigned int for each object
* - Padding to respect alignment of @align
* - @buffer_size bytes for each object
*
@@ -674,8 +599,6 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
mgmt_size = 0;
nr_objs = slab_size / buffer_size;
- if (nr_objs > SLAB_LIMIT)
- nr_objs = SLAB_LIMIT;
} else {
/*
* Ignore padding for the initial guess. The padding
@@ -685,8 +608,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
* into the memory allocation when taking the padding
* into account.
*/
- nr_objs = (slab_size - sizeof(struct slab)) /
- (buffer_size + sizeof(kmem_bufctl_t));
+ nr_objs = (slab_size) / (buffer_size + sizeof(unsigned int));
/*
* This calculated number will be either the right
@@ -696,9 +618,6 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
> slab_size)
nr_objs--;
- if (nr_objs > SLAB_LIMIT)
- nr_objs = SLAB_LIMIT;
-
mgmt_size = slab_mgmt_size(nr_objs, align);
}
*num = nr_objs;
@@ -829,10 +748,8 @@ static struct array_cache *alloc_arraycache(int node, int entries,
return nc;
}
-static inline bool is_slab_pfmemalloc(struct slab *slabp)
+static inline bool is_slab_pfmemalloc(struct page *page)
{
- struct page *page = virt_to_page(slabp->s_mem);
-
return PageSlabPfmemalloc(page);
}
@@ -841,23 +758,23 @@ static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
struct array_cache *ac)
{
struct kmem_cache_node *n = cachep->node[numa_mem_id()];
- struct slab *slabp;
+ struct page *page;
unsigned long flags;
if (!pfmemalloc_active)
return;
spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(slabp, &n->slabs_full, list)
- if (is_slab_pfmemalloc(slabp))
+ list_for_each_entry(page, &n->slabs_full, lru)
+ if (is_slab_pfmemalloc(page))
goto out;
- list_for_each_entry(slabp, &n->slabs_partial, list)
- if (is_slab_pfmemalloc(slabp))
+ list_for_each_entry(page, &n->slabs_partial, lru)
+ if (is_slab_pfmemalloc(page))
goto out;
- list_for_each_entry(slabp, &n->slabs_free, list)
- if (is_slab_pfmemalloc(slabp))
+ list_for_each_entry(page, &n->slabs_free, lru)
+ if (is_slab_pfmemalloc(page))
goto out;
pfmemalloc_active = false;
@@ -897,8 +814,8 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
*/
n = cachep->node[numa_mem_id()];
if (!list_empty(&n->slabs_free) && force_refill) {
- struct slab *slabp = virt_to_slab(objp);
- ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem));
+ struct page *page = virt_to_head_page(objp);
+ ClearPageSlabPfmemalloc(page);
clear_obj_pfmemalloc(&objp);
recheck_pfmemalloc_active(cachep, ac);
return objp;
@@ -1099,8 +1016,7 @@ static void drain_alien_cache(struct kmem_cache *cachep,
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
- struct slab *slabp = virt_to_slab(objp);
- int nodeid = slabp->nodeid;
+ int nodeid = page_to_nid(virt_to_page(objp));
struct kmem_cache_node *n;
struct array_cache *alien = NULL;
int node;
@@ -1111,7 +1027,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
* Make sure we are not freeing a object from another node to the array
* cache on this cpu.
*/
- if (likely(slabp->nodeid == node))
+ if (likely(nodeid == node))
return 0;
n = cachep->node[node];
@@ -1512,6 +1428,8 @@ void __init kmem_cache_init(void)
{
int i;
+ BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
+ sizeof(struct rcu_head));
kmem_cache = &kmem_cache_boot;
setup_node_pointer(kmem_cache);
@@ -1687,7 +1605,7 @@ static noinline void
slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
{
struct kmem_cache_node *n;
- struct slab *slabp;
+ struct page *page;
unsigned long flags;
int node;
@@ -1706,15 +1624,15 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
continue;
spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(slabp, &n->slabs_full, list) {
+ list_for_each_entry(page, &n->slabs_full, lru) {
active_objs += cachep->num;
active_slabs++;
}
- list_for_each_entry(slabp, &n->slabs_partial, list) {
- active_objs += slabp->inuse;
+ list_for_each_entry(page, &n->slabs_partial, lru) {
+ active_objs += page->active;
active_slabs++;
}
- list_for_each_entry(slabp, &n->slabs_free, list)
+ list_for_each_entry(page, &n->slabs_free, lru)
num_slabs++;
free_objects += n->free_objects;
@@ -1736,19 +1654,11 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
* did not request dmaable memory, we might get it, but that
* would be relatively rare and ignorable.
*/
-static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
+static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
+ int nodeid)
{
struct page *page;
int nr_pages;
- int i;
-
-#ifndef CONFIG_MMU
- /*
- * Nommu uses slab's for process anonymous memory allocations, and thus
- * requires __GFP_COMP to properly refcount higher order allocations
- */
- flags |= __GFP_COMP;
-#endif
flags |= cachep->allocflags;
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
@@ -1772,12 +1682,9 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
else
add_zone_page_state(page_zone(page),
NR_SLAB_UNRECLAIMABLE, nr_pages);
- for (i = 0; i < nr_pages; i++) {
- __SetPageSlab(page + i);
-
- if (page->pfmemalloc)
- SetPageSlabPfmemalloc(page + i);
- }
+ __SetPageSlab(page);
+ if (page->pfmemalloc)
+ SetPageSlabPfmemalloc(page);
memcg_bind_pages(cachep, cachep->gfporder);
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
@@ -1789,17 +1696,15 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
kmemcheck_mark_unallocated_pages(page, nr_pages);
}
- return page_address(page);
+ return page;
}
/*
* Interface to system's page release.
*/
-static void kmem_freepages(struct kmem_cache *cachep, void *addr)
+static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
{
- unsigned long i = (1 << cachep->gfporder);
- struct page *page = virt_to_page(addr);
- const unsigned long nr_freed = i;
+ const unsigned long nr_freed = (1 << cachep->gfporder);
kmemcheck_free_shadow(page, cachep->gfporder);
@@ -1809,27 +1714,28 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
else
sub_zone_page_state(page_zone(page),
NR_SLAB_UNRECLAIMABLE, nr_freed);
- while (i--) {
- BUG_ON(!PageSlab(page));
- __ClearPageSlabPfmemalloc(page);
- __ClearPageSlab(page);
- page++;
- }
+
+ BUG_ON(!PageSlab(page));
+ __ClearPageSlabPfmemalloc(page);
+ __ClearPageSlab(page);
+ page_mapcount_reset(page);
+ page->mapping = NULL;
memcg_release_pages(cachep, cachep->gfporder);
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_freed;
- free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder);
+ __free_memcg_kmem_pages(page, cachep->gfporder);
}
static void kmem_rcu_free(struct rcu_head *head)
{
- struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
- struct kmem_cache *cachep = slab_rcu->cachep;
+ struct kmem_cache *cachep;
+ struct page *page;
- kmem_freepages(cachep, slab_rcu->addr);
- if (OFF_SLAB(cachep))
- kmem_cache_free(cachep->slabp_cache, slab_rcu);
+ page = container_of(head, struct page, rcu_head);
+ cachep = page->slab_cache;
+
+ kmem_freepages(cachep, page);
}
#if DEBUG
@@ -1978,19 +1884,19 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
/* Print some data about the neighboring objects, if they
* exist:
*/
- struct slab *slabp = virt_to_slab(objp);
+ struct page *page = virt_to_head_page(objp);
unsigned int objnr;
- objnr = obj_to_index(cachep, slabp, objp);
+ objnr = obj_to_index(cachep, page, objp);
if (objnr) {
- objp = index_to_obj(cachep, slabp, objnr - 1);
+ objp = index_to_obj(cachep, page, objnr - 1);
realobj = (char *)objp + obj_offset(cachep);
printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
realobj, size);
print_objinfo(cachep, objp, 2);
}
if (objnr + 1 < cachep->num) {
- objp = index_to_obj(cachep, slabp, objnr + 1);
+ objp = index_to_obj(cachep, page, objnr + 1);
realobj = (char *)objp + obj_offset(cachep);
printk(KERN_ERR "Next obj: start=%p, len=%d\n",
realobj, size);
@@ -2001,11 +1907,12 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
#endif
#if DEBUG
-static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
+static void slab_destroy_debugcheck(struct kmem_cache *cachep,
+ struct page *page)
{
int i;
for (i = 0; i < cachep->num; i++) {
- void *objp = index_to_obj(cachep, slabp, i);
+ void *objp = index_to_obj(cachep, page, i);
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
@@ -2030,7 +1937,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab
}
}
#else
-static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
+static void slab_destroy_debugcheck(struct kmem_cache *cachep,
+ struct page *page)
{
}
#endif
@@ -2044,23 +1952,34 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab
* Before calling the slab must have been unlinked from the cache. The
* cache-lock is not held/needed.
*/
-static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
+static void slab_destroy(struct kmem_cache *cachep, struct page *page)
{
- void *addr = slabp->s_mem - slabp->colouroff;
+ void *freelist;
- slab_destroy_debugcheck(cachep, slabp);
+ freelist = page->freelist;
+ slab_destroy_debugcheck(cachep, page);
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
- struct slab_rcu *slab_rcu;
+ struct rcu_head *head;
+
+ /*
+ * RCU free overloads the RCU head over the LRU.
+ * slab_page has been overloeaded over the LRU,
+ * however it is not used from now on so that
+ * we can use it safely.
+ */
+ head = (void *)&page->rcu_head;
+ call_rcu(head, kmem_rcu_free);
- slab_rcu = (struct slab_rcu *)slabp;
- slab_rcu->cachep = cachep;
- slab_rcu->addr = addr;
- call_rcu(&slab_rcu->head, kmem_rcu_free);
} else {
- kmem_freepages(cachep, addr);
- if (OFF_SLAB(cachep))
- kmem_cache_free(cachep->slabp_cache, slabp);
+ kmem_freepages(cachep, page);
}
+
+ /*
+ * From now on, we don't use freelist
+ * although actual page can be freed in rcu context
+ */
+ if (OFF_SLAB(cachep))
+ kmem_cache_free(cachep->freelist_cache, freelist);
}
/**
@@ -2097,8 +2016,8 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
* use off-slab slabs. Needed to avoid a possible
* looping condition in cache_grow().
*/
- offslab_limit = size - sizeof(struct slab);
- offslab_limit /= sizeof(kmem_bufctl_t);
+ offslab_limit = size;
+ offslab_limit /= sizeof(unsigned int);
if (num > offslab_limit)
break;
@@ -2220,7 +2139,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
int
__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
{
- size_t left_over, slab_size, ralign;
+ size_t left_over, freelist_size, ralign;
gfp_t gfp;
int err;
size_t size = cachep->size;
@@ -2339,22 +2258,21 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
if (!cachep->num)
return -E2BIG;
- slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
- + sizeof(struct slab), cachep->align);
+ freelist_size =
+ ALIGN(cachep->num * sizeof(unsigned int), cachep->align);
/*
* If the slab has been placed off-slab, and we have enough space then
* move it on-slab. This is at the expense of any extra colouring.
*/
- if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
+ if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) {
flags &= ~CFLGS_OFF_SLAB;
- left_over -= slab_size;
+ left_over -= freelist_size;
}
if (flags & CFLGS_OFF_SLAB) {
/* really off slab. No need for manual alignment */
- slab_size =
- cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
+ freelist_size = cachep->num * sizeof(unsigned int);
#ifdef CONFIG_PAGE_POISONING
/* If we're going to use the generic kernel_map_pages()
@@ -2371,16 +2289,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
if (cachep->colour_off < cachep->align)
cachep->colour_off = cachep->align;
cachep->colour = left_over / cachep->colour_off;
- cachep->slab_size = slab_size;
+ cachep->freelist_size = freelist_size;
cachep->flags = flags;
- cachep->allocflags = 0;
+ cachep->allocflags = __GFP_COMP;
if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
cachep->allocflags |= GFP_DMA;
cachep->size = size;
cachep->reciprocal_buffer_size = reciprocal_value(size);
if (flags & CFLGS_OFF_SLAB) {
- cachep->slabp_cache = kmalloc_slab(slab_size, 0u);
+ cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
/*
* This is a possibility for one of the malloc_sizes caches.
* But since we go off slab only for object size greater than
@@ -2388,7 +2306,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
* this should not happen at all.
* But leave a BUG_ON for some lucky dude.
*/
- BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
+ BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache));
}
err = setup_cpu_cache(cachep, gfp);
@@ -2494,7 +2412,7 @@ static int drain_freelist(struct kmem_cache *cache,
{
struct list_head *p;
int nr_freed;
- struct slab *slabp;
+ struct page *page;
nr_freed = 0;
while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
@@ -2506,18 +2424,18 @@ static int drain_freelist(struct kmem_cache *cache,
goto out;
}
- slabp = list_entry(p, struct slab, list);
+ page = list_entry(p, struct page, lru);
#if DEBUG
- BUG_ON(slabp->inuse);
+ BUG_ON(page->active);
#endif
- list_del(&slabp->list);
+ list_del(&page->lru);
/*
* Safe to drop the lock. The slab is no longer linked
* to the cache.
*/
n->free_objects -= cache->num;
spin_unlock_irq(&n->list_lock);
- slab_destroy(cache, slabp);
+ slab_destroy(cache, page);
nr_freed++;
}
out:
@@ -2600,52 +2518,42 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
* descriptors in kmem_cache_create, we search through the malloc_sizes array.
* If we are creating a malloc_sizes cache here it would not be visible to
* kmem_find_general_cachep till the initialization is complete.
- * Hence we cannot have slabp_cache same as the original cache.
+ * Hence we cannot have freelist_cache same as the original cache.
*/
-static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
- int colour_off, gfp_t local_flags,
- int nodeid)
+static void *alloc_slabmgmt(struct kmem_cache *cachep,
+ struct page *page, int colour_off,
+ gfp_t local_flags, int nodeid)
{
- struct slab *slabp;
+ void *freelist;
+ void *addr = page_address(page);
if (OFF_SLAB(cachep)) {
/* Slab management obj is off-slab. */
- slabp = kmem_cache_alloc_node(cachep->slabp_cache,
+ freelist = kmem_cache_alloc_node(cachep->freelist_cache,
local_flags, nodeid);
- /*
- * If the first object in the slab is leaked (it's allocated
- * but no one has a reference to it), we want to make sure
- * kmemleak does not treat the ->s_mem pointer as a reference
- * to the object. Otherwise we will not report the leak.
- */
- kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
- local_flags);
- if (!slabp)
+ if (!freelist)
return NULL;
} else {
- slabp = objp + colour_off;
- colour_off += cachep->slab_size;
+ freelist = addr + colour_off;
+ colour_off += cachep->freelist_size;
}
- slabp->inuse = 0;
- slabp->colouroff = colour_off;
- slabp->s_mem = objp + colour_off;
- slabp->nodeid = nodeid;
- slabp->free = 0;
- return slabp;
+ page->active = 0;
+ page->s_mem = addr + colour_off;
+ return freelist;
}
-static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
+static inline unsigned int *slab_freelist(struct page *page)
{
- return (kmem_bufctl_t *) (slabp + 1);
+ return (unsigned int *)(page->freelist);
}
static void cache_init_objs(struct kmem_cache *cachep,
- struct slab *slabp)
+ struct page *page)
{
int i;
for (i = 0; i < cachep->num; i++) {
- void *objp = index_to_obj(cachep, slabp, i);
+ void *objp = index_to_obj(cachep, page, i);
#if DEBUG
/* need to poison the objs? */
if (cachep->flags & SLAB_POISON)
@@ -2681,9 +2589,8 @@ static void cache_init_objs(struct kmem_cache *cachep,
if (cachep->ctor)
cachep->ctor(objp);
#endif
- slab_bufctl(slabp)[i] = i + 1;
+ slab_freelist(page)[i] = i;
}
- slab_bufctl(slabp)[i - 1] = BUFCTL_END;
}
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
@@ -2696,41 +2603,41 @@ static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
}
}
-static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
+static void *slab_get_obj(struct kmem_cache *cachep, struct page *page,
int nodeid)
{
- void *objp = index_to_obj(cachep, slabp, slabp->free);
- kmem_bufctl_t next;
+ void *objp;
- slabp->inuse++;
- next = slab_bufctl(slabp)[slabp->free];
+ objp = index_to_obj(cachep, page, slab_freelist(page)[page->active]);
+ page->active++;
#if DEBUG
- slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
- WARN_ON(slabp->nodeid != nodeid);
+ WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
#endif
- slabp->free = next;
return objp;
}
-static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
+static void slab_put_obj(struct kmem_cache *cachep, struct page *page,
void *objp, int nodeid)
{
- unsigned int objnr = obj_to_index(cachep, slabp, objp);
-
+ unsigned int objnr = obj_to_index(cachep, page, objp);
#if DEBUG
+ unsigned int i;
+
/* Verify that the slab belongs to the intended node */
- WARN_ON(slabp->nodeid != nodeid);
+ WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
- if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
- printk(KERN_ERR "slab: double free detected in cache "
- "'%s', objp %p\n", cachep->name, objp);
- BUG();
+ /* Verify double free bug */
+ for (i = page->active; i < cachep->num; i++) {
+ if (slab_freelist(page)[i] == objnr) {
+ printk(KERN_ERR "slab: double free detected in cache "
+ "'%s', objp %p\n", cachep->name, objp);
+ BUG();
+ }
}
#endif
- slab_bufctl(slabp)[objnr] = slabp->free;
- slabp->free = objnr;
- slabp->inuse--;
+ page->active--;
+ slab_freelist(page)[page->active] = objnr;
}
/*
@@ -2738,23 +2645,11 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
* for the slab allocator to be able to lookup the cache and slab of a
* virtual address for kfree, ksize, and slab debugging.
*/
-static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
- void *addr)
+static void slab_map_pages(struct kmem_cache *cache, struct page *page,
+ void *freelist)
{
- int nr_pages;
- struct page *page;
-
- page = virt_to_page(addr);
-
- nr_pages = 1;
- if (likely(!PageCompound(page)))
- nr_pages <<= cache->gfporder;
-
- do {
- page->slab_cache = cache;
- page->slab_page = slab;
- page++;
- } while (--nr_pages);
+ page->slab_cache = cache;
+ page->freelist = freelist;
}
/*
@@ -2762,9 +2657,9 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
* kmem_cache_alloc() when there are no active objs left in a cache.
*/
static int cache_grow(struct kmem_cache *cachep,
- gfp_t flags, int nodeid, void *objp)
+ gfp_t flags, int nodeid, struct page *page)
{
- struct slab *slabp;
+ void *freelist;
size_t offset;
gfp_t local_flags;
struct kmem_cache_node *n;
@@ -2805,20 +2700,20 @@ static int cache_grow(struct kmem_cache *cachep,
* Get mem for the objs. Attempt to allocate a physical page from
* 'nodeid'.
*/
- if (!objp)
- objp = kmem_getpages(cachep, local_flags, nodeid);
- if (!objp)
+ if (!page)
+ page = kmem_getpages(cachep, local_flags, nodeid);
+ if (!page)
goto failed;
/* Get slab management. */
- slabp = alloc_slabmgmt(cachep, objp, offset,
+ freelist = alloc_slabmgmt(cachep, page, offset,
local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
- if (!slabp)
+ if (!freelist)
goto opps1;
- slab_map_pages(cachep, slabp, objp);
+ slab_map_pages(cachep, page, freelist);
- cache_init_objs(cachep, slabp);
+ cache_init_objs(cachep, page);
if (local_flags & __GFP_WAIT)
local_irq_disable();
@@ -2826,13 +2721,13 @@ static int cache_grow(struct kmem_cache *cachep,
spin_lock(&n->list_lock);
/* Make slab active. */
- list_add_tail(&slabp->list, &(n->slabs_free));
+ list_add_tail(&page->lru, &(n->slabs_free));
STATS_INC_GROWN(cachep);
n->free_objects += cachep->num;
spin_unlock(&n->list_lock);
return 1;
opps1:
- kmem_freepages(cachep, objp);
+ kmem_freepages(cachep, page);
failed:
if (local_flags & __GFP_WAIT)
local_irq_disable();
@@ -2880,9 +2775,8 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
unsigned long caller)
{
- struct page *page;
unsigned int objnr;
- struct slab *slabp;
+ struct page *page;
BUG_ON(virt_to_cache(objp) != cachep);
@@ -2890,8 +2784,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
kfree_debugcheck(objp);
page = virt_to_head_page(objp);
- slabp = page->slab_page;
-
if (cachep->flags & SLAB_RED_ZONE) {
verify_redzone_free(cachep, objp);
*dbg_redzone1(cachep, objp) = RED_INACTIVE;
@@ -2900,14 +2792,11 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = (void *)caller;
- objnr = obj_to_index(cachep, slabp, objp);
+ objnr = obj_to_index(cachep, page, objp);
BUG_ON(objnr >= cachep->num);
- BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
+ BUG_ON(objp != index_to_obj(cachep, page, objnr));
-#ifdef CONFIG_DEBUG_SLAB_LEAK
- slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
-#endif
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
@@ -2924,33 +2813,9 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
return objp;
}
-static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
-{
- kmem_bufctl_t i;
- int entries = 0;
-
- /* Check slab's freelist to see if this obj is there. */
- for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
- entries++;
- if (entries > cachep->num || i >= cachep->num)
- goto bad;
- }
- if (entries != cachep->num - slabp->inuse) {
-bad:
- printk(KERN_ERR "slab: Internal list corruption detected in "
- "cache '%s'(%d), slabp %p(%d). Tainted(%s). Hexdump:\n",
- cachep->name, cachep->num, slabp, slabp->inuse,
- print_tainted());
- print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, slabp,
- sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t),
- 1);
- BUG();
- }
-}
#else
#define kfree_debugcheck(x) do { } while(0)
#define cache_free_debugcheck(x,objp,z) (objp)
-#define check_slabp(x,y) do { } while(0)
#endif
static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
@@ -2989,7 +2854,7 @@ retry:
while (batchcount > 0) {
struct list_head *entry;
- struct slab *slabp;
+ struct page *page;
/* Get slab alloc is to come from. */
entry = n->slabs_partial.next;
if (entry == &n->slabs_partial) {
@@ -2999,8 +2864,7 @@ retry:
goto must_grow;
}
- slabp = list_entry(entry, struct slab, list);
- check_slabp(cachep, slabp);
+ page = list_entry(entry, struct page, lru);
check_spinlock_acquired(cachep);
/*
@@ -3008,24 +2872,23 @@ retry:
* there must be at least one object available for
* allocation.
*/
- BUG_ON(slabp->inuse >= cachep->num);
+ BUG_ON(page->active >= cachep->num);
- while (slabp->inuse < cachep->num && batchcount--) {
+ while (page->active < cachep->num && batchcount--) {
STATS_INC_ALLOCED(cachep);
STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep);
- ac_put_obj(cachep, ac, slab_get_obj(cachep, slabp,
+ ac_put_obj(cachep, ac, slab_get_obj(cachep, page,
node));
}
- check_slabp(cachep, slabp);
/* move slabp to correct slabp list: */
- list_del(&slabp->list);
- if (slabp->free == BUFCTL_END)
- list_add(&slabp->list, &n->slabs_full);
+ list_del(&page->lru);
+ if (page->active == cachep->num)
+ list_add(&page->list, &n->slabs_full);
else
- list_add(&slabp->list, &n->slabs_partial);
+ list_add(&page->list, &n->slabs_partial);
}
must_grow:
@@ -3097,16 +2960,6 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
*dbg_redzone1(cachep, objp) = RED_ACTIVE;
*dbg_redzone2(cachep, objp) = RED_ACTIVE;
}
-#ifdef CONFIG_DEBUG_SLAB_LEAK
- {
- struct slab *slabp;
- unsigned objnr;
-
- slabp = virt_to_head_page(objp)->slab_page;
- objnr = (unsigned)(objp - slabp->s_mem) / cachep->size;
- slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
- }
-#endif
objp += obj_offset(cachep);
if (cachep->ctor && cachep->flags & SLAB_POISON)
cachep->ctor(objp);
@@ -3248,18 +3101,20 @@ retry:
* We may trigger various forms of reclaim on the allowed
* set and go into memory reserves if necessary.
*/
+ struct page *page;
+
if (local_flags & __GFP_WAIT)
local_irq_enable();
kmem_flagcheck(cache, flags);
- obj = kmem_getpages(cache, local_flags, numa_mem_id());
+ page = kmem_getpages(cache, local_flags, numa_mem_id());
if (local_flags & __GFP_WAIT)
local_irq_disable();
- if (obj) {
+ if (page) {
/*
* Insert into the appropriate per node queues
*/
- nid = page_to_nid(virt_to_page(obj));
- if (cache_grow(cache, flags, nid, obj)) {
+ nid = page_to_nid(page);
+ if (cache_grow(cache, flags, nid, page)) {
obj = ____cache_alloc_node(cache,
flags | GFP_THISNODE, nid);
if (!obj)
@@ -3288,7 +3143,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
int nodeid)
{
struct list_head *entry;
- struct slab *slabp;
+ struct page *page;
struct kmem_cache_node *n;
void *obj;
int x;
@@ -3308,26 +3163,24 @@ retry:
goto must_grow;
}
- slabp = list_entry(entry, struct slab, list);
+ page = list_entry(entry, struct page, lru);
check_spinlock_acquired_node(cachep, nodeid);
- check_slabp(cachep, slabp);
STATS_INC_NODEALLOCS(cachep);
STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep);
- BUG_ON(slabp->inuse == cachep->num);
+ BUG_ON(page->active == cachep->num);
- obj = slab_get_obj(cachep, slabp, nodeid);
- check_slabp(cachep, slabp);
+ obj = slab_get_obj(cachep, page, nodeid);
n->free_objects--;
/* move slabp to correct slabp list: */
- list_del(&slabp->list);
+ list_del(&page->lru);
- if (slabp->free == BUFCTL_END)
- list_add(&slabp->list, &n->slabs_full);
+ if (page->active == cachep->num)
+ list_add(&page->lru, &n->slabs_full);
else
- list_add(&slabp->list, &n->slabs_partial);
+ list_add(&page->lru, &n->slabs_partial);
spin_unlock(&n->list_lock);
goto done;
@@ -3477,23 +3330,21 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
for (i = 0; i < nr_objects; i++) {
void *objp;
- struct slab *slabp;
+ struct page *page;
clear_obj_pfmemalloc(&objpp[i]);
objp = objpp[i];
- slabp = virt_to_slab(objp);
+ page = virt_to_head_page(objp);
n = cachep->node[node];
- list_del(&slabp->list);
+ list_del(&page->lru);
check_spinlock_acquired_node(cachep, node);
- check_slabp(cachep, slabp);
- slab_put_obj(cachep, slabp, objp, node);
+ slab_put_obj(cachep, page, objp, node);
STATS_DEC_ACTIVE(cachep);
n->free_objects++;
- check_slabp(cachep, slabp);
/* fixup slab chains */
- if (slabp->inuse == 0) {
+ if (page->active == 0) {
if (n->free_objects > n->free_limit) {
n->free_objects -= cachep->num;
/* No need to drop any previously held
@@ -3502,16 +3353,16 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
* a different cache, refer to comments before
* alloc_slabmgmt.
*/
- slab_destroy(cachep, slabp);
+ slab_destroy(cachep, page);
} else {
- list_add(&slabp->list, &n->slabs_free);
+ list_add(&page->lru, &n->slabs_free);
}
} else {
/* Unconditionally move a slab to the end of the
* partial list on free - maximum time for the
* other objects to be freed, too.
*/
- list_add_tail(&slabp->list, &n->slabs_partial);
+ list_add_tail(&page->lru, &n->slabs_partial);
}
}
}
@@ -3551,10 +3402,10 @@ free_done:
p = n->slabs_free.next;
while (p != &(n->slabs_free)) {
- struct slab *slabp;
+ struct page *page;
- slabp = list_entry(p, struct slab, list);
- BUG_ON(slabp->inuse);
+ page = list_entry(p, struct page, lru);
+ BUG_ON(page->active);
i++;
p = p->next;
@@ -4158,7 +4009,7 @@ out:
#ifdef CONFIG_SLABINFO
void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
{
- struct slab *slabp;
+ struct page *page;
unsigned long active_objs;
unsigned long num_objs;
unsigned long active_slabs = 0;
@@ -4178,23 +4029,23 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
check_irq_on();
spin_lock_irq(&n->list_lock);
- list_for_each_entry(slabp, &n->slabs_full, list) {
- if (slabp->inuse != cachep->num && !error)
+ list_for_each_entry(page, &n->slabs_full, lru) {
+ if (page->active != cachep->num && !error)
error = "slabs_full accounting error";
active_objs += cachep->num;
active_slabs++;
}
- list_for_each_entry(slabp, &n->slabs_partial, list) {
- if (slabp->inuse == cachep->num && !error)
- error = "slabs_partial inuse accounting error";
- if (!slabp->inuse && !error)
- error = "slabs_partial/inuse accounting error";
- active_objs += slabp->inuse;
+ list_for_each_entry(page, &n->slabs_partial, lru) {
+ if (page->active == cachep->num && !error)
+ error = "slabs_partial accounting error";
+ if (!page->active && !error)
+ error = "slabs_partial accounting error";
+ active_objs += page->active;
active_slabs++;
}
- list_for_each_entry(slabp, &n->slabs_free, list) {
- if (slabp->inuse && !error)
- error = "slabs_free/inuse accounting error";
+ list_for_each_entry(page, &n->slabs_free, lru) {
+ if (page->active && !error)
+ error = "slabs_free accounting error";
num_slabs++;
}
free_objects += n->free_objects;
@@ -4346,15 +4197,27 @@ static inline int add_caller(unsigned long *n, unsigned long v)
return 1;
}
-static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
+static void handle_slab(unsigned long *n, struct kmem_cache *c,
+ struct page *page)
{
void *p;
- int i;
+ int i, j;
+
if (n[0] == n[1])
return;
- for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) {
- if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
+ for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
+ bool active = true;
+
+ for (j = page->active; j < c->num; j++) {
+ /* Skip freed item */
+ if (slab_freelist(page)[j] == i) {
+ active = false;
+ break;
+ }
+ }
+ if (!active)
continue;
+
if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
return;
}
@@ -4379,7 +4242,7 @@ static void show_symbol(struct seq_file *m, unsigned long address)
static int leaks_show(struct seq_file *m, void *p)
{
struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
- struct slab *slabp;
+ struct page *page;
struct kmem_cache_node *n;
const char *name;
unsigned long *x = m->private;
@@ -4403,10 +4266,10 @@ static int leaks_show(struct seq_file *m, void *p)
check_irq_on();
spin_lock_irq(&n->list_lock);
- list_for_each_entry(slabp, &n->slabs_full, list)
- handle_slab(x, cachep, slabp);
- list_for_each_entry(slabp, &n->slabs_partial, list)
- handle_slab(x, cachep, slabp);
+ list_for_each_entry(page, &n->slabs_full, lru)
+ handle_slab(x, cachep, page);
+ list_for_each_entry(page, &n->slabs_partial, lru)
+ handle_slab(x, cachep, page);
spin_unlock_irq(&n->list_lock);
}
name = cachep->name;
diff --git a/mm/slub.c b/mm/slub.c
index c3eb3d3ca835..04657a9903b0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -933,6 +933,16 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
*/
+static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
+{
+ kmemleak_alloc(ptr, size, 1, flags);
+}
+
+static inline void kfree_hook(const void *x)
+{
+ kmemleak_free(x);
+}
+
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{
flags &= gfp_allowed_mask;
@@ -955,7 +965,7 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
kmemleak_free_recursive(x, s->flags);
/*
- * Trouble is that we may no longer disable interupts in the fast path
+ * Trouble is that we may no longer disable interrupts in the fast path
* So in order to make the debug calls that expect irqs to be
* disabled we need to disable interrupts temporarily.
*/
@@ -1260,13 +1270,30 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {}
+static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
+{
+ kmemleak_alloc(ptr, size, 1, flags);
+}
+
+static inline void kfree_hook(const void *x)
+{
+ kmemleak_free(x);
+}
+
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{ return 0; }
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
- void *object) {}
+ void *object)
+{
+ kmemleak_alloc_recursive(object, s->object_size, 1, s->flags,
+ flags & gfp_allowed_mask);
+}
-static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
+static inline void slab_free_hook(struct kmem_cache *s, void *x)
+{
+ kmemleak_free_recursive(x, s->flags);
+}
#endif /* CONFIG_SLUB_DEBUG */
@@ -3272,7 +3299,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
if (page)
ptr = page_address(page);
- kmemleak_alloc(ptr, size, 1, flags);
+ kmalloc_large_node_hook(ptr, size, flags);
return ptr;
}
@@ -3336,7 +3363,7 @@ void kfree(const void *x)
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page));
- kmemleak_free(x);
+ kfree_hook(x);
__free_memcg_kmem_pages(page, compound_order(page));
return;
}
diff --git a/mm/swap.c b/mm/swap.c
index 759c3caf44bd..7a9f80d451f5 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -934,7 +934,8 @@ void __init swap_setup(void)
#ifdef CONFIG_SWAP
int i;
- bdi_init(swapper_spaces[0].backing_dev_info);
+ if (bdi_init(swapper_spaces[0].backing_dev_info))
+ panic("Failed to init swap bdi");
for (i = 0; i < MAX_SWAPFILES; i++) {
spin_lock_init(&swapper_spaces[i].tree_lock);
INIT_LIST_HEAD(&swapper_spaces[i].i_mmap_nonlinear);
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 61fc573f1142..b3d17d1c49c3 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -98,14 +98,14 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
vlan_gvrp_request_leave(dev);
vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL);
+
+ netdev_upper_dev_unlink(real_dev, dev);
/* Because unregister_netdevice_queue() makes sure at least one rcu
* grace period is respected before device freeing,
* we dont need to call synchronize_net() here.
*/
unregister_netdevice_queue(dev, head);
- netdev_upper_dev_unlink(real_dev, dev);
-
if (grp->nr_vlan_devs == 0) {
vlan_mvrp_uninit_applicant(real_dev);
vlan_gvrp_uninit_applicant(real_dev);
@@ -169,13 +169,13 @@ int register_vlan_dev(struct net_device *dev)
if (err < 0)
goto out_uninit_mvrp;
- err = netdev_upper_dev_link(real_dev, dev);
- if (err)
- goto out_uninit_mvrp;
-
err = register_netdevice(dev);
if (err < 0)
- goto out_upper_dev_unlink;
+ goto out_uninit_mvrp;
+
+ err = netdev_upper_dev_link(real_dev, dev);
+ if (err)
+ goto out_unregister_netdev;
/* Account for reference in struct vlan_dev_priv */
dev_hold(real_dev);
@@ -191,8 +191,8 @@ int register_vlan_dev(struct net_device *dev)
return 0;
-out_upper_dev_unlink:
- netdev_upper_dev_unlink(real_dev, dev);
+out_unregister_netdev:
+ unregister_netdevice(dev);
out_uninit_mvrp:
if (grp->nr_vlan_devs == 0)
vlan_mvrp_uninit_applicant(real_dev);
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index ba5983f34c42..a2caf00b82cc 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -196,12 +196,12 @@ static inline u32 vlan_get_ingress_priority(struct net_device *dev,
}
#ifdef CONFIG_VLAN_8021Q_GVRP
-extern int vlan_gvrp_request_join(const struct net_device *dev);
-extern void vlan_gvrp_request_leave(const struct net_device *dev);
-extern int vlan_gvrp_init_applicant(struct net_device *dev);
-extern void vlan_gvrp_uninit_applicant(struct net_device *dev);
-extern int vlan_gvrp_init(void);
-extern void vlan_gvrp_uninit(void);
+int vlan_gvrp_request_join(const struct net_device *dev);
+void vlan_gvrp_request_leave(const struct net_device *dev);
+int vlan_gvrp_init_applicant(struct net_device *dev);
+void vlan_gvrp_uninit_applicant(struct net_device *dev);
+int vlan_gvrp_init(void);
+void vlan_gvrp_uninit(void);
#else
static inline int vlan_gvrp_request_join(const struct net_device *dev) { return 0; }
static inline void vlan_gvrp_request_leave(const struct net_device *dev) {}
@@ -212,12 +212,12 @@ static inline void vlan_gvrp_uninit(void) {}
#endif
#ifdef CONFIG_VLAN_8021Q_MVRP
-extern int vlan_mvrp_request_join(const struct net_device *dev);
-extern void vlan_mvrp_request_leave(const struct net_device *dev);
-extern int vlan_mvrp_init_applicant(struct net_device *dev);
-extern void vlan_mvrp_uninit_applicant(struct net_device *dev);
-extern int vlan_mvrp_init(void);
-extern void vlan_mvrp_uninit(void);
+int vlan_mvrp_request_join(const struct net_device *dev);
+void vlan_mvrp_request_leave(const struct net_device *dev);
+int vlan_mvrp_init_applicant(struct net_device *dev);
+void vlan_mvrp_uninit_applicant(struct net_device *dev);
+int vlan_mvrp_init(void);
+void vlan_mvrp_uninit(void);
#else
static inline int vlan_mvrp_request_join(const struct net_device *dev) { return 0; }
static inline void vlan_mvrp_request_leave(const struct net_device *dev) {}
@@ -229,8 +229,8 @@ static inline void vlan_mvrp_uninit(void) {}
extern const char vlan_fullname[];
extern const char vlan_version[];
-extern int vlan_netlink_init(void);
-extern void vlan_netlink_fini(void);
+int vlan_netlink_init(void);
+void vlan_netlink_fini(void);
extern struct rtnl_link_ops vlan_link_ops;
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 309129732285..c7e634af8516 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -171,7 +171,7 @@ static size_t vlan_get_size(const struct net_device *dev)
return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */
nla_total_size(2) + /* IFLA_VLAN_ID */
- sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
+ nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */
vlan_qos_map_size(vlan->nr_ingress_mappings) +
vlan_qos_map_size(vlan->nr_egress_mappings);
}
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 990afab2be1b..9c5a1aa34d12 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -544,9 +544,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
chan->inuse = false;
if (virtio_has_feature(vdev, VIRTIO_9P_MOUNT_TAG)) {
- vdev->config->get(vdev,
- offsetof(struct virtio_9p_config, tag_len),
- &tag_len, sizeof(tag_len));
+ virtio_cread(vdev, struct virtio_9p_config, tag_len, &tag_len);
} else {
err = -EINVAL;
goto out_free_vq;
@@ -556,8 +554,9 @@ static int p9_virtio_probe(struct virtio_device *vdev)
err = -ENOMEM;
goto out_free_vq;
}
- vdev->config->get(vdev, offsetof(struct virtio_9p_config, tag),
- tag, tag_len);
+
+ virtio_cread_bytes(vdev, offsetof(struct virtio_9p_config, tag),
+ tag, tag_len);
chan->tag = tag;
chan->tag_len = tag_len;
err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
diff --git a/net/Kconfig b/net/Kconfig
index b50dacc072f0..0715db64a5c3 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -220,6 +220,7 @@ source "net/openvswitch/Kconfig"
source "net/vmw_vsock/Kconfig"
source "net/netlink/Kconfig"
source "net/mpls/Kconfig"
+source "net/hsr/Kconfig"
config RPS
boolean
diff --git a/net/Makefile b/net/Makefile
index 9492e8cb64e9..8fa2f91517f1 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -71,3 +71,4 @@ obj-$(CONFIG_NFC) += nfc/
obj-$(CONFIG_OPENVSWITCH) += openvswitch/
obj-$(CONFIG_VSOCKETS) += vmw_vsock/
obj-$(CONFIG_NET_MPLS_GSO) += mpls/
+obj-$(CONFIG_HSR) += hsr/
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 4b4d2b779ec1..a00123ebb0ae 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1735,7 +1735,7 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
res = -EFAULT;
break;
}
- if (amount > AX25_NOUID_BLOCK) {
+ if (amount < 0 || amount > AX25_NOUID_BLOCK) {
res = -EINVAL;
break;
}
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 489bb36f1b94..4f4aabbd8eab 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -24,6 +24,7 @@ batman-adv-y += bitarray.o
batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
batman-adv-y += debugfs.o
batman-adv-$(CONFIG_BATMAN_ADV_DAT) += distributed-arp-table.o
+batman-adv-y += fragmentation.o
batman-adv-y += gateway_client.o
batman-adv-y += gateway_common.o
batman-adv-y += hard-interface.o
@@ -37,5 +38,3 @@ batman-adv-y += send.o
batman-adv-y += soft-interface.o
batman-adv-y += sysfs.o
batman-adv-y += translation-table.o
-batman-adv-y += unicast.o
-batman-adv-y += vis.o
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 0a8a80cd4bf1..a2b480a90872 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -87,22 +87,198 @@ static uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[])
return (uint8_t)(sum / count);
}
+/**
+ * batadv_iv_ogm_orig_free - free the private resources allocated for this
+ * orig_node
+ * @orig_node: the orig_node for which the resources have to be free'd
+ */
+static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node)
+{
+ kfree(orig_node->bat_iv.bcast_own);
+ kfree(orig_node->bat_iv.bcast_own_sum);
+}
+
+/**
+ * batadv_iv_ogm_orig_add_if - change the private structures of the orig_node to
+ * include the new hard-interface
+ * @orig_node: the orig_node that has to be changed
+ * @max_if_num: the current amount of interfaces
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
+ int max_if_num)
+{
+ void *data_ptr;
+ size_t data_size, old_size;
+ int ret = -ENOMEM;
+
+ spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+
+ data_size = max_if_num * sizeof(unsigned long) * BATADV_NUM_WORDS;
+ old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS;
+ data_ptr = kmalloc(data_size, GFP_ATOMIC);
+ if (!data_ptr)
+ goto unlock;
+
+ memcpy(data_ptr, orig_node->bat_iv.bcast_own, old_size);
+ kfree(orig_node->bat_iv.bcast_own);
+ orig_node->bat_iv.bcast_own = data_ptr;
+
+ data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
+ if (!data_ptr) {
+ kfree(orig_node->bat_iv.bcast_own);
+ goto unlock;
+ }
+
+ memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum,
+ (max_if_num - 1) * sizeof(uint8_t));
+ kfree(orig_node->bat_iv.bcast_own_sum);
+ orig_node->bat_iv.bcast_own_sum = data_ptr;
+
+ ret = 0;
+
+unlock:
+ spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+
+ return ret;
+}
+
+/**
+ * batadv_iv_ogm_orig_del_if - change the private structures of the orig_node to
+ * exclude the removed interface
+ * @orig_node: the orig_node that has to be changed
+ * @max_if_num: the current amount of interfaces
+ * @del_if_num: the index of the interface being removed
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
+ int max_if_num, int del_if_num)
+{
+ int chunk_size, ret = -ENOMEM, if_offset;
+ void *data_ptr = NULL;
+
+ spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+
+ /* last interface was removed */
+ if (max_if_num == 0)
+ goto free_bcast_own;
+
+ chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
+ data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
+ if (!data_ptr)
+ goto unlock;
+
+ /* copy first part */
+ memcpy(data_ptr, orig_node->bat_iv.bcast_own, del_if_num * chunk_size);
+
+ /* copy second part */
+ memcpy((char *)data_ptr + del_if_num * chunk_size,
+ orig_node->bat_iv.bcast_own + ((del_if_num + 1) * chunk_size),
+ (max_if_num - del_if_num) * chunk_size);
+
+free_bcast_own:
+ kfree(orig_node->bat_iv.bcast_own);
+ orig_node->bat_iv.bcast_own = data_ptr;
+
+ if (max_if_num == 0)
+ goto free_own_sum;
+
+ data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
+ if (!data_ptr) {
+ kfree(orig_node->bat_iv.bcast_own);
+ goto unlock;
+ }
+
+ memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum,
+ del_if_num * sizeof(uint8_t));
+
+ if_offset = (del_if_num + 1) * sizeof(uint8_t);
+ memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
+ orig_node->bat_iv.bcast_own_sum + if_offset,
+ (max_if_num - del_if_num) * sizeof(uint8_t));
+
+free_own_sum:
+ kfree(orig_node->bat_iv.bcast_own_sum);
+ orig_node->bat_iv.bcast_own_sum = data_ptr;
+
+ ret = 0;
+unlock:
+ spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+
+ return ret;
+}
+
+/**
+ * batadv_iv_ogm_orig_get - retrieve or create (if does not exist) an originator
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: mac address of the originator
+ *
+ * Returns the originator object corresponding to the passed mac address or NULL
+ * on failure.
+ * If the object does not exists it is created an initialised.
+ */
+static struct batadv_orig_node *
+batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const uint8_t *addr)
+{
+ struct batadv_orig_node *orig_node;
+ int size, hash_added;
+
+ orig_node = batadv_orig_hash_find(bat_priv, addr);
+ if (orig_node)
+ return orig_node;
+
+ orig_node = batadv_orig_node_new(bat_priv, addr);
+ if (!orig_node)
+ return NULL;
+
+ spin_lock_init(&orig_node->bat_iv.ogm_cnt_lock);
+
+ size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS;
+ orig_node->bat_iv.bcast_own = kzalloc(size, GFP_ATOMIC);
+ if (!orig_node->bat_iv.bcast_own)
+ goto free_orig_node;
+
+ size = bat_priv->num_ifaces * sizeof(uint8_t);
+ orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC);
+ if (!orig_node->bat_iv.bcast_own_sum)
+ goto free_bcast_own;
+
+ hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
+ batadv_choose_orig, orig_node,
+ &orig_node->hash_entry);
+ if (hash_added != 0)
+ goto free_bcast_own;
+
+ return orig_node;
+
+free_bcast_own:
+ kfree(orig_node->bat_iv.bcast_own);
+free_orig_node:
+ batadv_orig_node_free_ref(orig_node);
+
+ return NULL;
+}
+
static struct batadv_neigh_node *
batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
const uint8_t *neigh_addr,
struct batadv_orig_node *orig_node,
struct batadv_orig_node *orig_neigh)
{
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct batadv_neigh_node *neigh_node;
- neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr);
+ neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node);
if (!neigh_node)
goto out;
- INIT_LIST_HEAD(&neigh_node->bonding_list);
+ spin_lock_init(&neigh_node->bat_iv.lq_update_lock);
- neigh_node->orig_node = orig_neigh;
- neigh_node->if_incoming = hard_iface;
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Creating new neighbor %pM for orig_node %pM on interface %s\n",
+ neigh_addr, orig_node->orig, hard_iface->net_dev->name);
spin_lock_bh(&orig_node->neigh_list_lock);
hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
@@ -135,9 +311,8 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION;
batadv_ogm_packet->header.ttl = 2;
batadv_ogm_packet->flags = BATADV_NO_FLAGS;
+ batadv_ogm_packet->reserved = 0;
batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
- batadv_ogm_packet->tt_num_changes = 0;
- batadv_ogm_packet->ttvn = 0;
res = 0;
@@ -207,12 +382,12 @@ static uint8_t batadv_hop_penalty(uint8_t tq,
/* is there another aggregated packet here? */
static int batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
- int tt_num_changes)
+ __be16 tvlv_len)
{
int next_buff_pos = 0;
next_buff_pos += buff_pos + BATADV_OGM_HLEN;
- next_buff_pos += batadv_tt_len(tt_num_changes);
+ next_buff_pos += ntohs(tvlv_len);
return (next_buff_pos <= packet_len) &&
(next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
@@ -240,7 +415,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
/* adjust all flags and log packets */
while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
- batadv_ogm_packet->tt_num_changes)) {
+ batadv_ogm_packet->tvlv_len)) {
/* we might have aggregated direct link packets with an
* ordinary base packet
*/
@@ -256,18 +431,18 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
fwd_str = "Sending own";
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
+ "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s) on interface %s [%pM]\n",
fwd_str, (packet_num > 0 ? "aggregated " : ""),
batadv_ogm_packet->orig,
ntohl(batadv_ogm_packet->seqno),
batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl,
(batadv_ogm_packet->flags & BATADV_DIRECTLINK ?
"on" : "off"),
- batadv_ogm_packet->ttvn, hard_iface->net_dev->name,
+ hard_iface->net_dev->name,
hard_iface->net_dev->dev_addr);
buff_pos += BATADV_OGM_HLEN;
- buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
+ buff_pos += ntohs(batadv_ogm_packet->tvlv_len);
packet_num++;
packet_pos = forw_packet->skb->data + buff_pos;
batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
@@ -601,7 +776,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
struct batadv_hard_iface *if_incoming)
{
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- uint8_t tt_num_changes;
+ uint16_t tvlv_len;
if (batadv_ogm_packet->header.ttl <= 1) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
@@ -621,7 +796,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
return;
}
- tt_num_changes = batadv_ogm_packet->tt_num_changes;
+ tvlv_len = ntohs(batadv_ogm_packet->tvlv_len);
batadv_ogm_packet->header.ttl--;
memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
@@ -642,7 +817,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
batadv_iv_ogm_queue_add(bat_priv, (unsigned char *)batadv_ogm_packet,
- BATADV_OGM_HLEN + batadv_tt_len(tt_num_changes),
+ BATADV_OGM_HLEN + tvlv_len,
if_incoming, 0, batadv_iv_ogm_fwd_send_time());
}
@@ -662,20 +837,22 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
uint32_t i;
size_t word_index;
uint8_t *w;
+ int if_num;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
- spin_lock_bh(&orig_node->ogm_cnt_lock);
+ spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
word_index = hard_iface->if_num * BATADV_NUM_WORDS;
- word = &(orig_node->bcast_own[word_index]);
+ word = &(orig_node->bat_iv.bcast_own[word_index]);
batadv_bit_get_packet(bat_priv, word, 1, 0);
- w = &orig_node->bcast_own_sum[hard_iface->if_num];
+ if_num = hard_iface->if_num;
+ w = &orig_node->bat_iv.bcast_own_sum[if_num];
*w = bitmap_weight(word, BATADV_TQ_LOCAL_WINDOW_SIZE);
- spin_unlock_bh(&orig_node->ogm_cnt_lock);
+ spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
}
rcu_read_unlock();
}
@@ -688,43 +865,29 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
struct batadv_ogm_packet *batadv_ogm_packet;
struct batadv_hard_iface *primary_if;
int *ogm_buff_len = &hard_iface->bat_iv.ogm_buff_len;
- int vis_server, tt_num_changes = 0;
uint32_t seqno;
- uint8_t bandwidth;
+ uint16_t tvlv_len = 0;
- vis_server = atomic_read(&bat_priv->vis_mode);
primary_if = batadv_primary_if_get_selected(bat_priv);
- if (hard_iface == primary_if)
- tt_num_changes = batadv_tt_append_diff(bat_priv, ogm_buff,
- ogm_buff_len,
- BATADV_OGM_HLEN);
+ if (hard_iface == primary_if) {
+ /* tt changes have to be committed before the tvlv data is
+ * appended as it may alter the tt tvlv container
+ */
+ batadv_tt_local_commit_changes(bat_priv);
+ tvlv_len = batadv_tvlv_container_ogm_append(bat_priv, ogm_buff,
+ ogm_buff_len,
+ BATADV_OGM_HLEN);
+ }
batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
+ batadv_ogm_packet->tvlv_len = htons(tvlv_len);
/* change sequence number to network order */
seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
batadv_ogm_packet->seqno = htonl(seqno);
atomic_inc(&hard_iface->bat_iv.ogm_seqno);
- batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
- batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
- if (tt_num_changes >= 0)
- batadv_ogm_packet->tt_num_changes = tt_num_changes;
-
- if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC)
- batadv_ogm_packet->flags |= BATADV_VIS_SERVER;
- else
- batadv_ogm_packet->flags &= ~BATADV_VIS_SERVER;
-
- if (hard_iface == primary_if &&
- atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER) {
- bandwidth = (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
- batadv_ogm_packet->gw_flags = bandwidth;
- } else {
- batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS;
- }
-
batadv_iv_ogm_slide_own_bcast_window(hard_iface);
batadv_iv_ogm_queue_add(bat_priv, hard_iface->bat_iv.ogm_buff,
hard_iface->bat_iv.ogm_buff_len, hard_iface, 1,
@@ -770,18 +933,18 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
if (dup_status != BATADV_NO_DUP)
continue;
- spin_lock_bh(&tmp_neigh_node->lq_update_lock);
- batadv_ring_buffer_set(tmp_neigh_node->tq_recv,
- &tmp_neigh_node->tq_index, 0);
- tq_avg = batadv_ring_buffer_avg(tmp_neigh_node->tq_recv);
- tmp_neigh_node->tq_avg = tq_avg;
- spin_unlock_bh(&tmp_neigh_node->lq_update_lock);
+ spin_lock_bh(&tmp_neigh_node->bat_iv.lq_update_lock);
+ batadv_ring_buffer_set(tmp_neigh_node->bat_iv.tq_recv,
+ &tmp_neigh_node->bat_iv.tq_index, 0);
+ tq_avg = batadv_ring_buffer_avg(tmp_neigh_node->bat_iv.tq_recv);
+ tmp_neigh_node->bat_iv.tq_avg = tq_avg;
+ spin_unlock_bh(&tmp_neigh_node->bat_iv.lq_update_lock);
}
if (!neigh_node) {
struct batadv_orig_node *orig_tmp;
- orig_tmp = batadv_get_orig_node(bat_priv, ethhdr->h_source);
+ orig_tmp = batadv_iv_ogm_orig_get(bat_priv, ethhdr->h_source);
if (!orig_tmp)
goto unlock;
@@ -798,80 +961,55 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
rcu_read_unlock();
- orig_node->flags = batadv_ogm_packet->flags;
neigh_node->last_seen = jiffies;
- spin_lock_bh(&neigh_node->lq_update_lock);
- batadv_ring_buffer_set(neigh_node->tq_recv,
- &neigh_node->tq_index,
+ spin_lock_bh(&neigh_node->bat_iv.lq_update_lock);
+ batadv_ring_buffer_set(neigh_node->bat_iv.tq_recv,
+ &neigh_node->bat_iv.tq_index,
batadv_ogm_packet->tq);
- neigh_node->tq_avg = batadv_ring_buffer_avg(neigh_node->tq_recv);
- spin_unlock_bh(&neigh_node->lq_update_lock);
+ tq_avg = batadv_ring_buffer_avg(neigh_node->bat_iv.tq_recv);
+ neigh_node->bat_iv.tq_avg = tq_avg;
+ spin_unlock_bh(&neigh_node->bat_iv.lq_update_lock);
if (dup_status == BATADV_NO_DUP) {
orig_node->last_ttl = batadv_ogm_packet->header.ttl;
neigh_node->last_ttl = batadv_ogm_packet->header.ttl;
}
- batadv_bonding_candidate_add(orig_node, neigh_node);
+ batadv_bonding_candidate_add(bat_priv, orig_node, neigh_node);
/* if this neighbor already is our next hop there is nothing
* to change
*/
router = batadv_orig_node_get_router(orig_node);
if (router == neigh_node)
- goto update_tt;
+ goto out;
/* if this neighbor does not offer a better TQ we won't consider it */
- if (router && (router->tq_avg > neigh_node->tq_avg))
- goto update_tt;
+ if (router && (router->bat_iv.tq_avg > neigh_node->bat_iv.tq_avg))
+ goto out;
/* if the TQ is the same and the link not more symmetric we
* won't consider it either
*/
- if (router && (neigh_node->tq_avg == router->tq_avg)) {
+ if (router && (neigh_node->bat_iv.tq_avg == router->bat_iv.tq_avg)) {
orig_node_tmp = router->orig_node;
- spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
+ spin_lock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
if_num = router->if_incoming->if_num;
- sum_orig = orig_node_tmp->bcast_own_sum[if_num];
- spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
+ sum_orig = orig_node_tmp->bat_iv.bcast_own_sum[if_num];
+ spin_unlock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
orig_node_tmp = neigh_node->orig_node;
- spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
+ spin_lock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
if_num = neigh_node->if_incoming->if_num;
- sum_neigh = orig_node_tmp->bcast_own_sum[if_num];
- spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
+ sum_neigh = orig_node_tmp->bat_iv.bcast_own_sum[if_num];
+ spin_unlock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
if (sum_orig >= sum_neigh)
- goto update_tt;
+ goto out;
}
batadv_update_route(bat_priv, orig_node, neigh_node);
-
-update_tt:
- /* I have to check for transtable changes only if the OGM has been
- * sent through a primary interface
- */
- if (((batadv_ogm_packet->orig != ethhdr->h_source) &&
- (batadv_ogm_packet->header.ttl > 2)) ||
- (batadv_ogm_packet->flags & BATADV_PRIMARIES_FIRST_HOP))
- batadv_tt_update_orig(bat_priv, orig_node, tt_buff,
- batadv_ogm_packet->tt_num_changes,
- batadv_ogm_packet->ttvn,
- ntohs(batadv_ogm_packet->tt_crc));
-
- if (orig_node->gw_flags != batadv_ogm_packet->gw_flags)
- batadv_gw_node_update(bat_priv, orig_node,
- batadv_ogm_packet->gw_flags);
-
- orig_node->gw_flags = batadv_ogm_packet->gw_flags;
-
- /* restart gateway selection if fast or late switching was enabled */
- if ((orig_node->gw_flags) &&
- (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_CLIENT) &&
- (atomic_read(&bat_priv->gw_sel_class) > 2))
- batadv_gw_check_election(bat_priv, orig_node);
-
goto out;
unlock:
@@ -893,7 +1031,7 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
uint8_t total_count;
uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
- int tq_asym_penalty, inv_asym_penalty, ret = 0;
+ int tq_asym_penalty, inv_asym_penalty, if_num, ret = 0;
unsigned int combined_tq;
/* find corresponding one hop neighbor */
@@ -931,10 +1069,11 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
orig_node->last_seen = jiffies;
/* find packet count of corresponding one hop neighbor */
- spin_lock_bh(&orig_node->ogm_cnt_lock);
- orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
- neigh_rq_count = neigh_node->real_packet_count;
- spin_unlock_bh(&orig_node->ogm_cnt_lock);
+ spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+ if_num = if_incoming->if_num;
+ orig_eq_count = orig_neigh_node->bat_iv.bcast_own_sum[if_num];
+ neigh_rq_count = neigh_node->bat_iv.real_packet_count;
+ spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
/* pay attention to not get a value bigger than 100 % */
if (orig_eq_count > neigh_rq_count)
@@ -1016,12 +1155,13 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
uint8_t *neigh_addr;
uint8_t packet_count;
+ unsigned long *bitmap;
- orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
+ orig_node = batadv_iv_ogm_orig_get(bat_priv, batadv_ogm_packet->orig);
if (!orig_node)
return BATADV_NO_DUP;
- spin_lock_bh(&orig_node->ogm_cnt_lock);
+ spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
seq_diff = seqno - orig_node->last_real_seqno;
/* signalize caller that the packet is to be dropped. */
@@ -1036,7 +1176,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
hlist_for_each_entry_rcu(tmp_neigh_node,
&orig_node->neigh_list, list) {
neigh_addr = tmp_neigh_node->addr;
- is_dup = batadv_test_bit(tmp_neigh_node->real_bits,
+ is_dup = batadv_test_bit(tmp_neigh_node->bat_iv.real_bits,
orig_node->last_real_seqno,
seqno);
@@ -1052,13 +1192,13 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
}
/* if the window moved, set the update flag. */
- need_update |= batadv_bit_get_packet(bat_priv,
- tmp_neigh_node->real_bits,
+ bitmap = tmp_neigh_node->bat_iv.real_bits;
+ need_update |= batadv_bit_get_packet(bat_priv, bitmap,
seq_diff, set_mark);
- packet_count = bitmap_weight(tmp_neigh_node->real_bits,
+ packet_count = bitmap_weight(tmp_neigh_node->bat_iv.real_bits,
BATADV_TQ_LOCAL_WINDOW_SIZE);
- tmp_neigh_node->real_packet_count = packet_count;
+ tmp_neigh_node->bat_iv.real_packet_count = packet_count;
}
rcu_read_unlock();
@@ -1070,7 +1210,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
}
out:
- spin_unlock_bh(&orig_node->ogm_cnt_lock);
+ spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
batadv_orig_node_free_ref(orig_node);
return ret;
}
@@ -1082,7 +1222,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
{
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batadv_hard_iface *hard_iface;
- struct batadv_orig_node *orig_neigh_node, *orig_node;
+ struct batadv_orig_node *orig_neigh_node, *orig_node, *orig_node_tmp;
struct batadv_neigh_node *router = NULL, *router_router = NULL;
struct batadv_neigh_node *orig_neigh_router = NULL;
int has_directlink_flag;
@@ -1122,13 +1262,11 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
is_single_hop_neigh = true;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %#.4x, changes %u, tq %d, TTL %d, V %d, IDF %d)\n",
+ "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, tq %d, TTL %d, V %d, IDF %d)\n",
ethhdr->h_source, if_incoming->net_dev->name,
if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig,
batadv_ogm_packet->prev_sender,
- ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->ttvn,
- ntohs(batadv_ogm_packet->tt_crc),
- batadv_ogm_packet->tt_num_changes, batadv_ogm_packet->tq,
+ ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq,
batadv_ogm_packet->header.ttl,
batadv_ogm_packet->header.version, has_directlink_flag);
@@ -1168,8 +1306,8 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
int16_t if_num;
uint8_t *weight;
- orig_neigh_node = batadv_get_orig_node(bat_priv,
- ethhdr->h_source);
+ orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
+ ethhdr->h_source);
if (!orig_neigh_node)
return;
@@ -1183,15 +1321,15 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
if_num = if_incoming->if_num;
offset = if_num * BATADV_NUM_WORDS;
- spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
- word = &(orig_neigh_node->bcast_own[offset]);
+ spin_lock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
+ word = &(orig_neigh_node->bat_iv.bcast_own[offset]);
bit_pos = if_incoming_seqno - 2;
bit_pos -= ntohl(batadv_ogm_packet->seqno);
batadv_set_bit(word, bit_pos);
- weight = &orig_neigh_node->bcast_own_sum[if_num];
+ weight = &orig_neigh_node->bat_iv.bcast_own_sum[if_num];
*weight = bitmap_weight(word,
BATADV_TQ_LOCAL_WINDOW_SIZE);
- spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
+ spin_unlock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
}
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -1214,7 +1352,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
return;
}
- orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
+ orig_node = batadv_iv_ogm_orig_get(bat_priv, batadv_ogm_packet->orig);
if (!orig_node)
return;
@@ -1235,10 +1373,12 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
}
router = batadv_orig_node_get_router(orig_node);
- if (router)
- router_router = batadv_orig_node_get_router(router->orig_node);
+ if (router) {
+ orig_node_tmp = router->orig_node;
+ router_router = batadv_orig_node_get_router(orig_node_tmp);
+ }
- if ((router && router->tq_avg != 0) &&
+ if ((router && router->bat_iv.tq_avg != 0) &&
(batadv_compare_eth(router->addr, ethhdr->h_source)))
is_from_best_next_hop = true;
@@ -1254,14 +1394,16 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
goto out;
}
+ batadv_tvlv_ogm_receive(bat_priv, batadv_ogm_packet, orig_node);
+
/* if sender is a direct neighbor the sender mac equals
* originator mac
*/
if (is_single_hop_neigh)
orig_neigh_node = orig_node;
else
- orig_neigh_node = batadv_get_orig_node(bat_priv,
- ethhdr->h_source);
+ orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
+ ethhdr->h_source);
if (!orig_neigh_node)
goto out;
@@ -1350,9 +1492,9 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
struct batadv_ogm_packet *batadv_ogm_packet;
struct ethhdr *ethhdr;
int buff_pos = 0, packet_len;
- unsigned char *tt_buff, *packet_buff;
- bool ret;
+ unsigned char *tvlv_buff, *packet_buff;
uint8_t *packet_pos;
+ bool ret;
ret = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
if (!ret)
@@ -1375,14 +1517,14 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
/* unpack the aggregated packets and process them one by one */
while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len,
- batadv_ogm_packet->tt_num_changes)) {
- tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN;
+ batadv_ogm_packet->tvlv_len)) {
+ tvlv_buff = packet_buff + buff_pos + BATADV_OGM_HLEN;
- batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff,
- if_incoming);
+ batadv_iv_ogm_process(ethhdr, batadv_ogm_packet,
+ tvlv_buff, if_incoming);
buff_pos += BATADV_OGM_HLEN;
- buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
+ buff_pos += ntohs(batadv_ogm_packet->tvlv_len);
packet_pos = packet_buff + buff_pos;
batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
@@ -1392,6 +1534,106 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
return NET_RX_SUCCESS;
}
+/**
+ * batadv_iv_ogm_orig_print - print the originator table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @seq: debugfs table seq_file struct
+ */
+static void batadv_iv_ogm_orig_print(struct batadv_priv *bat_priv,
+ struct seq_file *seq)
+{
+ struct batadv_neigh_node *neigh_node, *neigh_node_tmp;
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
+ int last_seen_msecs, last_seen_secs;
+ struct batadv_orig_node *orig_node;
+ unsigned long last_seen_jiffies;
+ struct hlist_head *head;
+ int batman_count = 0;
+ uint32_t i;
+
+ seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
+ "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
+ "Nexthop", "outgoingIF", "Potential nexthops");
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
+ neigh_node = batadv_orig_node_get_router(orig_node);
+ if (!neigh_node)
+ continue;
+
+ if (neigh_node->bat_iv.tq_avg == 0)
+ goto next;
+
+ last_seen_jiffies = jiffies - orig_node->last_seen;
+ last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
+ last_seen_secs = last_seen_msecs / 1000;
+ last_seen_msecs = last_seen_msecs % 1000;
+
+ seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
+ orig_node->orig, last_seen_secs,
+ last_seen_msecs, neigh_node->bat_iv.tq_avg,
+ neigh_node->addr,
+ neigh_node->if_incoming->net_dev->name);
+
+ hlist_for_each_entry_rcu(neigh_node_tmp,
+ &orig_node->neigh_list, list) {
+ seq_printf(seq, " %pM (%3i)",
+ neigh_node_tmp->addr,
+ neigh_node_tmp->bat_iv.tq_avg);
+ }
+
+ seq_puts(seq, "\n");
+ batman_count++;
+
+next:
+ batadv_neigh_node_free_ref(neigh_node);
+ }
+ rcu_read_unlock();
+ }
+
+ if (batman_count == 0)
+ seq_puts(seq, "No batman nodes in range ...\n");
+}
+
+/**
+ * batadv_iv_ogm_neigh_cmp - compare the metrics of two neighbors
+ * @neigh1: the first neighbor object of the comparison
+ * @neigh2: the second neighbor object of the comparison
+ *
+ * Returns a value less, equal to or greater than 0 if the metric via neigh1 is
+ * lower, the same as or higher than the metric via neigh2
+ */
+static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1,
+ struct batadv_neigh_node *neigh2)
+{
+ uint8_t tq1, tq2;
+
+ tq1 = neigh1->bat_iv.tq_avg;
+ tq2 = neigh2->bat_iv.tq_avg;
+
+ return tq1 - tq2;
+}
+
+/**
+ * batadv_iv_ogm_neigh_is_eob - check if neigh1 is equally good or better than
+ * neigh2 from the metric prospective
+ * @neigh1: the first neighbor object of the comparison
+ * @neigh2: the second neighbor object of the comparison
+ *
+ * Returns true if the metric via neigh1 is equally good or better than the
+ * metric via neigh2, false otherwise.
+ */
+static bool batadv_iv_ogm_neigh_is_eob(struct batadv_neigh_node *neigh1,
+ struct batadv_neigh_node *neigh2)
+{
+ int diff = batadv_iv_ogm_neigh_cmp(neigh1, neigh2);
+
+ return diff > -BATADV_TQ_SIMILARITY_THRESHOLD;
+}
+
static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
.name = "BATMAN_IV",
.bat_iface_enable = batadv_iv_ogm_iface_enable,
@@ -1400,6 +1642,12 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
.bat_primary_iface_set = batadv_iv_ogm_primary_iface_set,
.bat_ogm_schedule = batadv_iv_ogm_schedule,
.bat_ogm_emit = batadv_iv_ogm_emit,
+ .bat_neigh_cmp = batadv_iv_ogm_neigh_cmp,
+ .bat_neigh_is_equiv_or_better = batadv_iv_ogm_neigh_is_eob,
+ .bat_orig_print = batadv_iv_ogm_orig_print,
+ .bat_orig_free = batadv_iv_ogm_orig_free,
+ .bat_orig_add_if = batadv_iv_ogm_orig_add_if,
+ .bat_orig_del_if = batadv_iv_ogm_orig_del_if,
};
int __init batadv_iv_init(void)
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 264de88db320..28eb5e6d0a02 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -411,10 +411,10 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
return NULL;
}
- /* this is a gateway now, remove any tt entries */
+ /* this is a gateway now, remove any TT entry on this VLAN */
orig_node = batadv_orig_hash_find(bat_priv, orig);
if (orig_node) {
- batadv_tt_global_del_orig(bat_priv, orig_node,
+ batadv_tt_global_del_orig(bat_priv, orig_node, vid,
"became a backbone gateway");
batadv_orig_node_free_ref(orig_node);
}
@@ -858,30 +858,28 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
struct sk_buff *skb)
{
- struct ethhdr *ethhdr;
+ struct batadv_bla_claim_dst *bla_dst;
+ uint8_t *hw_src, *hw_dst;
struct vlan_ethhdr *vhdr;
+ struct ethhdr *ethhdr;
struct arphdr *arphdr;
- uint8_t *hw_src, *hw_dst;
- struct batadv_bla_claim_dst *bla_dst;
- uint16_t proto;
+ unsigned short vid;
+ __be16 proto;
int headlen;
- unsigned short vid = BATADV_NO_FLAGS;
int ret;
+ vid = batadv_get_vid(skb, 0);
ethhdr = eth_hdr(skb);
- if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
+ proto = ethhdr->h_proto;
+ headlen = ETH_HLEN;
+ if (vid & BATADV_VLAN_HAS_TAG) {
vhdr = (struct vlan_ethhdr *)ethhdr;
- vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
- vid |= BATADV_VLAN_HAS_TAG;
- proto = ntohs(vhdr->h_vlan_encapsulated_proto);
- headlen = sizeof(*vhdr);
- } else {
- proto = ntohs(ethhdr->h_proto);
- headlen = ETH_HLEN;
+ proto = vhdr->h_vlan_encapsulated_proto;
+ headlen += VLAN_HLEN;
}
- if (proto != ETH_P_ARP)
+ if (proto != htons(ETH_P_ARP))
return 0; /* not a claim frame */
/* this must be a ARP frame. check if it is a claim. */
@@ -1317,12 +1315,14 @@ out:
/* @bat_priv: the bat priv with all the soft interface information
* @orig: originator mac address
+ * @vid: VLAN identifier
*
- * check if the originator is a gateway for any VLAN ID.
+ * Check if the originator is a gateway for the VLAN identified by vid.
*
- * returns 1 if it is found, 0 otherwise
+ * Returns true if orig is a backbone for this vid, false otherwise.
*/
-int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
+bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig,
+ unsigned short vid)
{
struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
struct hlist_head *head;
@@ -1330,25 +1330,26 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
int i;
if (!atomic_read(&bat_priv->bridge_loop_avoidance))
- return 0;
+ return false;
if (!hash)
- return 0;
+ return false;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
- if (batadv_compare_eth(backbone_gw->orig, orig)) {
+ if (batadv_compare_eth(backbone_gw->orig, orig) &&
+ backbone_gw->vid == vid) {
rcu_read_unlock();
- return 1;
+ return true;
}
}
rcu_read_unlock();
}
- return 0;
+ return false;
}
@@ -1365,10 +1366,8 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
int batadv_bla_is_backbone_gw(struct sk_buff *skb,
struct batadv_orig_node *orig_node, int hdr_size)
{
- struct ethhdr *ethhdr;
- struct vlan_ethhdr *vhdr;
struct batadv_bla_backbone_gw *backbone_gw;
- unsigned short vid = BATADV_NO_FLAGS;
+ unsigned short vid;
if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
return 0;
@@ -1377,16 +1376,7 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb,
if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
return 0;
- ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size);
-
- if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
- if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
- return 0;
-
- vhdr = (struct vlan_ethhdr *)(skb->data + hdr_size);
- vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
- vid |= BATADV_VLAN_HAS_TAG;
- }
+ vid = batadv_get_vid(skb, hdr_size);
/* see if this originator is a backbone gw for this VLAN */
backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 4b102e71e5bd..da173e760e77 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -30,7 +30,8 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb,
int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
void *offset);
-int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig);
+bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig,
+ unsigned short vid);
int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
struct sk_buff *skb);
void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
@@ -74,10 +75,11 @@ static inline int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
return 0;
}
-static inline int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
- uint8_t *orig)
+static inline bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
+ uint8_t *orig,
+ unsigned short vid)
{
- return 0;
+ return false;
}
static inline int
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index f186a55b23c3..049a7a2ac5b6 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -28,7 +28,6 @@
#include "gateway_common.h"
#include "gateway_client.h"
#include "soft-interface.h"
-#include "vis.h"
#include "icmp_socket.h"
#include "bridge_loop_avoidance.h"
#include "distributed-arp-table.h"
@@ -300,12 +299,6 @@ static int batadv_transtable_local_open(struct inode *inode, struct file *file)
return single_open(file, batadv_tt_local_seq_print_text, net_dev);
}
-static int batadv_vis_data_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
- return single_open(file, batadv_vis_seq_print_text, net_dev);
-}
-
struct batadv_debuginfo {
struct attribute attr;
const struct file_operations fops;
@@ -356,7 +349,6 @@ static BATADV_DEBUGINFO(dat_cache, S_IRUGO, batadv_dat_cache_open);
#endif
static BATADV_DEBUGINFO(transtable_local, S_IRUGO,
batadv_transtable_local_open);
-static BATADV_DEBUGINFO(vis_data, S_IRUGO, batadv_vis_data_open);
#ifdef CONFIG_BATMAN_ADV_NC
static BATADV_DEBUGINFO(nc_nodes, S_IRUGO, batadv_nc_nodes_open);
#endif
@@ -373,7 +365,6 @@ static struct batadv_debuginfo *batadv_mesh_debuginfos[] = {
&batadv_debuginfo_dat_cache,
#endif
&batadv_debuginfo_transtable_local,
- &batadv_debuginfo_vis_data,
#ifdef CONFIG_BATMAN_ADV_NC
&batadv_debuginfo_nc_nodes,
#endif
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 06345d401588..6c8c3934bd7b 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -19,6 +19,7 @@
#include <linux/if_ether.h>
#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
#include <net/arp.h>
#include "main.h"
@@ -29,7 +30,6 @@
#include "send.h"
#include "types.h"
#include "translation-table.h"
-#include "unicast.h"
static void batadv_dat_purge(struct work_struct *work);
@@ -206,15 +206,11 @@ static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
*/
static uint32_t batadv_hash_dat(const void *data, uint32_t size)
{
- const unsigned char *key = data;
uint32_t hash = 0;
- size_t i;
+ const struct batadv_dat_entry *dat = data;
- for (i = 0; i < 4; i++) {
- hash += key[i];
- hash += (hash << 10);
- hash ^= (hash >> 6);
- }
+ hash = batadv_hash_bytes(hash, &dat->ip, sizeof(dat->ip));
+ hash = batadv_hash_bytes(hash, &dat->vid, sizeof(dat->vid));
hash += (hash << 3);
hash ^= (hash >> 11);
@@ -228,21 +224,26 @@ static uint32_t batadv_hash_dat(const void *data, uint32_t size)
* table
* @bat_priv: the bat priv with all the soft interface information
* @ip: search key
+ * @vid: VLAN identifier
*
* Returns the dat_entry if found, NULL otherwise.
*/
static struct batadv_dat_entry *
-batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip)
+batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip,
+ unsigned short vid)
{
struct hlist_head *head;
- struct batadv_dat_entry *dat_entry, *dat_entry_tmp = NULL;
+ struct batadv_dat_entry to_find, *dat_entry, *dat_entry_tmp = NULL;
struct batadv_hashtable *hash = bat_priv->dat.hash;
uint32_t index;
if (!hash)
return NULL;
- index = batadv_hash_dat(&ip, hash->size);
+ to_find.ip = ip;
+ to_find.vid = vid;
+
+ index = batadv_hash_dat(&to_find, hash->size);
head = &hash->table[index];
rcu_read_lock();
@@ -266,22 +267,24 @@ batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip)
* @bat_priv: the bat priv with all the soft interface information
* @ip: ipv4 to add/edit
* @mac_addr: mac address to assign to the given ipv4
+ * @vid: VLAN identifier
*/
static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
- uint8_t *mac_addr)
+ uint8_t *mac_addr, unsigned short vid)
{
struct batadv_dat_entry *dat_entry;
int hash_added;
- dat_entry = batadv_dat_entry_hash_find(bat_priv, ip);
+ dat_entry = batadv_dat_entry_hash_find(bat_priv, ip, vid);
/* if this entry is already known, just update it */
if (dat_entry) {
if (!batadv_compare_eth(dat_entry->mac_addr, mac_addr))
memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN);
dat_entry->last_update = jiffies;
batadv_dbg(BATADV_DBG_DAT, bat_priv,
- "Entry updated: %pI4 %pM\n", &dat_entry->ip,
- dat_entry->mac_addr);
+ "Entry updated: %pI4 %pM (vid: %d)\n",
+ &dat_entry->ip, dat_entry->mac_addr,
+ BATADV_PRINT_VID(vid));
goto out;
}
@@ -290,12 +293,13 @@ static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
goto out;
dat_entry->ip = ip;
+ dat_entry->vid = vid;
memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN);
dat_entry->last_update = jiffies;
atomic_set(&dat_entry->refcount, 2);
hash_added = batadv_hash_add(bat_priv->dat.hash, batadv_compare_dat,
- batadv_hash_dat, &dat_entry->ip,
+ batadv_hash_dat, dat_entry,
&dat_entry->hash_entry);
if (unlikely(hash_added != 0)) {
@@ -304,8 +308,8 @@ static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
goto out;
}
- batadv_dbg(BATADV_DBG_DAT, bat_priv, "New entry added: %pI4 %pM\n",
- &dat_entry->ip, dat_entry->mac_addr);
+ batadv_dbg(BATADV_DBG_DAT, bat_priv, "New entry added: %pI4 %pM (vid: %d)\n",
+ &dat_entry->ip, dat_entry->mac_addr, BATADV_PRINT_VID(vid));
out:
if (dat_entry)
@@ -419,6 +423,10 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
bool ret = false;
int j;
+ /* check if orig node candidate is running DAT */
+ if (!(candidate->capabilities & BATADV_ORIG_CAPA_HAS_DAT))
+ goto out;
+
/* Check if this node has already been selected... */
for (j = 0; j < select; j++)
if (res[j].orig_node == candidate)
@@ -588,9 +596,9 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
goto free_orig;
tmp_skb = pskb_copy(skb, GFP_ATOMIC);
- if (!batadv_unicast_4addr_prepare_skb(bat_priv, tmp_skb,
- cand[i].orig_node,
- packet_subtype)) {
+ if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, tmp_skb,
+ cand[i].orig_node,
+ packet_subtype)) {
kfree_skb(tmp_skb);
goto free_neigh;
}
@@ -626,6 +634,59 @@ out:
}
/**
+ * batadv_dat_tvlv_container_update - update the dat tvlv container after dat
+ * setting change
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_dat_tvlv_container_update(struct batadv_priv *bat_priv)
+{
+ char dat_mode;
+
+ dat_mode = atomic_read(&bat_priv->distributed_arp_table);
+
+ switch (dat_mode) {
+ case 0:
+ batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_DAT, 1);
+ break;
+ case 1:
+ batadv_tvlv_container_register(bat_priv, BATADV_TVLV_DAT, 1,
+ NULL, 0);
+ break;
+ }
+}
+
+/**
+ * batadv_dat_status_update - update the dat tvlv container after dat
+ * setting change
+ * @net_dev: the soft interface net device
+ */
+void batadv_dat_status_update(struct net_device *net_dev)
+{
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ batadv_dat_tvlv_container_update(bat_priv);
+}
+
+/**
+ * batadv_gw_tvlv_ogm_handler_v1 - process incoming dat tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the gateway data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig,
+ uint8_t flags,
+ void *tvlv_value,
+ uint16_t tvlv_value_len)
+{
+ if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
+ orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_DAT;
+ else
+ orig->capabilities |= BATADV_ORIG_CAPA_HAS_DAT;
+}
+
+/**
* batadv_dat_hash_free - free the local DAT hash table
* @bat_priv: the bat priv with all the soft interface information
*/
@@ -657,6 +718,10 @@ int batadv_dat_init(struct batadv_priv *bat_priv)
batadv_dat_start_timer(bat_priv);
+ batadv_tvlv_handler_register(bat_priv, batadv_dat_tvlv_ogm_handler_v1,
+ NULL, BATADV_TVLV_DAT, 1,
+ BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
+ batadv_dat_tvlv_container_update(bat_priv);
return 0;
}
@@ -666,6 +731,9 @@ int batadv_dat_init(struct batadv_priv *bat_priv)
*/
void batadv_dat_free(struct batadv_priv *bat_priv)
{
+ batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_DAT, 1);
+ batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_DAT, 1);
+
cancel_delayed_work_sync(&bat_priv->dat.work);
batadv_dat_hash_free(bat_priv);
@@ -693,8 +761,8 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
goto out;
seq_printf(seq, "Distributed ARP Table (%s):\n", net_dev->name);
- seq_printf(seq, " %-7s %-13s %5s\n", "IPv4", "MAC",
- "last-seen");
+ seq_printf(seq, " %-7s %-9s %4s %11s\n", "IPv4",
+ "MAC", "VID", "last-seen");
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -707,8 +775,9 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
last_seen_msecs = last_seen_msecs % 60000;
last_seen_secs = last_seen_msecs / 1000;
- seq_printf(seq, " * %15pI4 %14pM %6i:%02i\n",
+ seq_printf(seq, " * %15pI4 %14pM %4i %6i:%02i\n",
&dat_entry->ip, dat_entry->mac_addr,
+ BATADV_PRINT_VID(dat_entry->vid),
last_seen_mins, last_seen_secs);
}
rcu_read_unlock();
@@ -795,6 +864,31 @@ out:
}
/**
+ * batadv_dat_get_vid - extract the VLAN identifier from skb if any
+ * @skb: the buffer containing the packet to extract the VID from
+ * @hdr_size: the size of the batman-adv header encapsulating the packet
+ *
+ * If the packet embedded in the skb is vlan tagged this function returns the
+ * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned.
+ */
+static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size)
+{
+ unsigned short vid;
+
+ vid = batadv_get_vid(skb, *hdr_size);
+
+ /* ARP parsing functions jump forward of hdr_size + ETH_HLEN.
+ * If the header contained in the packet is a VLAN one (which is longer)
+ * hdr_size is updated so that the functions will still skip the
+ * correct amount of bytes.
+ */
+ if (vid & BATADV_VLAN_HAS_TAG)
+ *hdr_size += VLAN_HLEN;
+
+ return vid;
+}
+
+/**
* batadv_dat_snoop_outgoing_arp_request - snoop the ARP request and try to
* answer using DAT
* @bat_priv: the bat priv with all the soft interface information
@@ -813,26 +907,31 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
bool ret = false;
struct batadv_dat_entry *dat_entry = NULL;
struct sk_buff *skb_new;
+ int hdr_size = 0;
+ unsigned short vid;
if (!atomic_read(&bat_priv->distributed_arp_table))
goto out;
- type = batadv_arp_get_type(bat_priv, skb, 0);
+ vid = batadv_dat_get_vid(skb, &hdr_size);
+
+ type = batadv_arp_get_type(bat_priv, skb, hdr_size);
/* If the node gets an ARP_REQUEST it has to send a DHT_GET unicast
* message to the selected DHT candidates
*/
if (type != ARPOP_REQUEST)
goto out;
- batadv_dbg_arp(bat_priv, skb, type, 0, "Parsing outgoing ARP REQUEST");
+ batadv_dbg_arp(bat_priv, skb, type, hdr_size,
+ "Parsing outgoing ARP REQUEST");
- ip_src = batadv_arp_ip_src(skb, 0);
- hw_src = batadv_arp_hw_src(skb, 0);
- ip_dst = batadv_arp_ip_dst(skb, 0);
+ ip_src = batadv_arp_ip_src(skb, hdr_size);
+ hw_src = batadv_arp_hw_src(skb, hdr_size);
+ ip_dst = batadv_arp_ip_dst(skb, hdr_size);
- batadv_dat_entry_add(bat_priv, ip_src, hw_src);
+ batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
- dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
+ dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
if (dat_entry) {
/* If the ARP request is destined for a local client the local
* client will answer itself. DAT would only generate a
@@ -842,7 +941,8 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
* additional DAT answer may trigger kernel warnings about
* a packet coming from the wrong port.
*/
- if (batadv_is_my_client(bat_priv, dat_entry->mac_addr)) {
+ if (batadv_is_my_client(bat_priv, dat_entry->mac_addr,
+ BATADV_NO_FLAGS)) {
ret = true;
goto out;
}
@@ -853,11 +953,15 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
if (!skb_new)
goto out;
+ if (vid & BATADV_VLAN_HAS_TAG)
+ skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
+ vid & VLAN_VID_MASK);
+
skb_reset_mac_header(skb_new);
skb_new->protocol = eth_type_trans(skb_new,
bat_priv->soft_iface);
bat_priv->stats.rx_packets++;
- bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
+ bat_priv->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size;
bat_priv->soft_iface->last_rx = jiffies;
netif_rx(skb_new);
@@ -892,11 +996,14 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
struct sk_buff *skb_new;
struct batadv_dat_entry *dat_entry = NULL;
bool ret = false;
+ unsigned short vid;
int err;
if (!atomic_read(&bat_priv->distributed_arp_table))
goto out;
+ vid = batadv_dat_get_vid(skb, &hdr_size);
+
type = batadv_arp_get_type(bat_priv, skb, hdr_size);
if (type != ARPOP_REQUEST)
goto out;
@@ -908,9 +1015,9 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
batadv_dbg_arp(bat_priv, skb, type, hdr_size,
"Parsing incoming ARP REQUEST");
- batadv_dat_entry_add(bat_priv, ip_src, hw_src);
+ batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
- dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
+ dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
if (!dat_entry)
goto out;
@@ -921,17 +1028,22 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
if (!skb_new)
goto out;
+ if (vid & BATADV_VLAN_HAS_TAG)
+ skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
+ vid & VLAN_VID_MASK);
+
/* To preserve backwards compatibility, the node has choose the outgoing
* format based on the incoming request packet type. The assumption is
* that a node not using the 4addr packet format doesn't support it.
*/
if (hdr_size == sizeof(struct batadv_unicast_4addr_packet))
- err = batadv_unicast_4addr_send_skb(bat_priv, skb_new,
- BATADV_P_DAT_CACHE_REPLY);
+ err = batadv_send_skb_via_tt_4addr(bat_priv, skb_new,
+ BATADV_P_DAT_CACHE_REPLY,
+ vid);
else
- err = batadv_unicast_send_skb(bat_priv, skb_new);
+ err = batadv_send_skb_via_tt(bat_priv, skb_new, vid);
- if (!err) {
+ if (err != NET_XMIT_DROP) {
batadv_inc_counter(bat_priv, BATADV_CNT_DAT_CACHED_REPLY_TX);
ret = true;
}
@@ -954,23 +1066,28 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
uint16_t type;
__be32 ip_src, ip_dst;
uint8_t *hw_src, *hw_dst;
+ int hdr_size = 0;
+ unsigned short vid;
if (!atomic_read(&bat_priv->distributed_arp_table))
return;
- type = batadv_arp_get_type(bat_priv, skb, 0);
+ vid = batadv_dat_get_vid(skb, &hdr_size);
+
+ type = batadv_arp_get_type(bat_priv, skb, hdr_size);
if (type != ARPOP_REPLY)
return;
- batadv_dbg_arp(bat_priv, skb, type, 0, "Parsing outgoing ARP REPLY");
+ batadv_dbg_arp(bat_priv, skb, type, hdr_size,
+ "Parsing outgoing ARP REPLY");
- hw_src = batadv_arp_hw_src(skb, 0);
- ip_src = batadv_arp_ip_src(skb, 0);
- hw_dst = batadv_arp_hw_dst(skb, 0);
- ip_dst = batadv_arp_ip_dst(skb, 0);
+ hw_src = batadv_arp_hw_src(skb, hdr_size);
+ ip_src = batadv_arp_ip_src(skb, hdr_size);
+ hw_dst = batadv_arp_hw_dst(skb, hdr_size);
+ ip_dst = batadv_arp_ip_dst(skb, hdr_size);
- batadv_dat_entry_add(bat_priv, ip_src, hw_src);
- batadv_dat_entry_add(bat_priv, ip_dst, hw_dst);
+ batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
+ batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
/* Send the ARP reply to the candidates for both the IP addresses that
* the node obtained from the ARP reply
@@ -992,10 +1109,13 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
__be32 ip_src, ip_dst;
uint8_t *hw_src, *hw_dst;
bool ret = false;
+ unsigned short vid;
if (!atomic_read(&bat_priv->distributed_arp_table))
goto out;
+ vid = batadv_dat_get_vid(skb, &hdr_size);
+
type = batadv_arp_get_type(bat_priv, skb, hdr_size);
if (type != ARPOP_REPLY)
goto out;
@@ -1011,13 +1131,13 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
/* Update our internal cache with both the IP addresses the node got
* within the ARP reply
*/
- batadv_dat_entry_add(bat_priv, ip_src, hw_src);
- batadv_dat_entry_add(bat_priv, ip_dst, hw_dst);
+ batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
+ batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
/* if this REPLY is directed to a client of mine, let's deliver the
* packet to the interface
*/
- ret = !batadv_is_my_client(bat_priv, hw_dst);
+ ret = !batadv_is_my_client(bat_priv, hw_dst, vid);
out:
if (ret)
kfree_skb(skb);
@@ -1040,7 +1160,8 @@ bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
__be32 ip_dst;
struct batadv_dat_entry *dat_entry = NULL;
bool ret = false;
- const size_t bcast_len = sizeof(struct batadv_bcast_packet);
+ int hdr_size = sizeof(struct batadv_bcast_packet);
+ unsigned short vid;
if (!atomic_read(&bat_priv->distributed_arp_table))
goto out;
@@ -1051,12 +1172,14 @@ bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
if (forw_packet->num_packets)
goto out;
- type = batadv_arp_get_type(bat_priv, forw_packet->skb, bcast_len);
+ vid = batadv_dat_get_vid(forw_packet->skb, &hdr_size);
+
+ type = batadv_arp_get_type(bat_priv, forw_packet->skb, hdr_size);
if (type != ARPOP_REQUEST)
goto out;
- ip_dst = batadv_arp_ip_dst(forw_packet->skb, bcast_len);
- dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
+ ip_dst = batadv_arp_ip_dst(forw_packet->skb, hdr_size);
+ dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
/* check if the node already got this entry */
if (!dat_entry) {
batadv_dbg(BATADV_DBG_DAT, bat_priv,
diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h
index 125c8c6fcfad..60d853beb8d8 100644
--- a/net/batman-adv/distributed-arp-table.h
+++ b/net/batman-adv/distributed-arp-table.h
@@ -29,6 +29,7 @@
#define BATADV_DAT_ADDR_MAX ((batadv_dat_addr_t)~(batadv_dat_addr_t)0)
+void batadv_dat_status_update(struct net_device *net_dev);
bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
struct sk_buff *skb);
bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
@@ -98,6 +99,10 @@ static inline void batadv_dat_inc_counter(struct batadv_priv *bat_priv,
#else
+static inline void batadv_dat_status_update(struct net_device *net_dev)
+{
+}
+
static inline bool
batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
struct sk_buff *skb)
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
new file mode 100644
index 000000000000..271d321b3a04
--- /dev/null
+++ b/net/batman-adv/fragmentation.c
@@ -0,0 +1,491 @@
+/* Copyright (C) 2013 B.A.T.M.A.N. contributors:
+ *
+ * Martin Hundebøll <martin@hundeboll.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include "main.h"
+#include "fragmentation.h"
+#include "send.h"
+#include "originator.h"
+#include "routing.h"
+#include "hard-interface.h"
+#include "soft-interface.h"
+
+
+/**
+ * batadv_frag_clear_chain - delete entries in the fragment buffer chain
+ * @head: head of chain with entries.
+ *
+ * Free fragments in the passed hlist. Should be called with appropriate lock.
+ */
+static void batadv_frag_clear_chain(struct hlist_head *head)
+{
+ struct batadv_frag_list_entry *entry;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(entry, node, head, list) {
+ hlist_del(&entry->list);
+ kfree_skb(entry->skb);
+ kfree(entry);
+ }
+}
+
+/**
+ * batadv_frag_purge_orig - free fragments associated to an orig
+ * @orig_node: originator to free fragments from
+ * @check_cb: optional function to tell if an entry should be purged
+ */
+void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
+ bool (*check_cb)(struct batadv_frag_table_entry *))
+{
+ struct batadv_frag_table_entry *chain;
+ uint8_t i;
+
+ for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
+ chain = &orig_node->fragments[i];
+ spin_lock_bh(&orig_node->fragments[i].lock);
+
+ if (!check_cb || check_cb(chain)) {
+ batadv_frag_clear_chain(&orig_node->fragments[i].head);
+ orig_node->fragments[i].size = 0;
+ }
+
+ spin_unlock_bh(&orig_node->fragments[i].lock);
+ }
+}
+
+/**
+ * batadv_frag_size_limit - maximum possible size of packet to be fragmented
+ *
+ * Returns the maximum size of payload that can be fragmented.
+ */
+static int batadv_frag_size_limit(void)
+{
+ int limit = BATADV_FRAG_MAX_FRAG_SIZE;
+
+ limit -= sizeof(struct batadv_frag_packet);
+ limit *= BATADV_FRAG_MAX_FRAGMENTS;
+
+ return limit;
+}
+
+/**
+ * batadv_frag_init_chain - check and prepare fragment chain for new fragment
+ * @chain: chain in fragments table to init
+ * @seqno: sequence number of the received fragment
+ *
+ * Make chain ready for a fragment with sequence number "seqno". Delete existing
+ * entries if they have an "old" sequence number.
+ *
+ * Caller must hold chain->lock.
+ *
+ * Returns true if chain is empty and caller can just insert the new fragment
+ * without searching for the right position.
+ */
+static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
+ uint16_t seqno)
+{
+ if (chain->seqno == seqno)
+ return false;
+
+ if (!hlist_empty(&chain->head))
+ batadv_frag_clear_chain(&chain->head);
+
+ chain->size = 0;
+ chain->seqno = seqno;
+
+ return true;
+}
+
+/**
+ * batadv_frag_insert_packet - insert a fragment into a fragment chain
+ * @orig_node: originator that the fragment was received from
+ * @skb: skb to insert
+ * @chain_out: list head to attach complete chains of fragments to
+ *
+ * Insert a new fragment into the reverse ordered chain in the right table
+ * entry. The hash table entry is cleared if "old" fragments exist in it.
+ *
+ * Returns true if skb is buffered, false on error. If the chain has all the
+ * fragments needed to merge the packet, the chain is moved to the passed head
+ * to avoid locking the chain in the table.
+ */
+static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
+ struct sk_buff *skb,
+ struct hlist_head *chain_out)
+{
+ struct batadv_frag_table_entry *chain;
+ struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
+ struct batadv_frag_packet *frag_packet;
+ uint8_t bucket;
+ uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet);
+ bool ret = false;
+
+ /* Linearize packet to avoid linearizing 16 packets in a row when doing
+ * the later merge. Non-linear merge should be added to remove this
+ * linearization.
+ */
+ if (skb_linearize(skb) < 0)
+ goto err;
+
+ frag_packet = (struct batadv_frag_packet *)skb->data;
+ seqno = ntohs(frag_packet->seqno);
+ bucket = seqno % BATADV_FRAG_BUFFER_COUNT;
+
+ frag_entry_new = kmalloc(sizeof(*frag_entry_new), GFP_ATOMIC);
+ if (!frag_entry_new)
+ goto err;
+
+ frag_entry_new->skb = skb;
+ frag_entry_new->no = frag_packet->no;
+
+ /* Select entry in the "chain table" and delete any prior fragments
+ * with another sequence number. batadv_frag_init_chain() returns true,
+ * if the list is empty at return.
+ */
+ chain = &orig_node->fragments[bucket];
+ spin_lock_bh(&chain->lock);
+ if (batadv_frag_init_chain(chain, seqno)) {
+ hlist_add_head(&frag_entry_new->list, &chain->head);
+ chain->size = skb->len - hdr_size;
+ chain->timestamp = jiffies;
+ ret = true;
+ goto out;
+ }
+
+ /* Find the position for the new fragment. */
+ hlist_for_each_entry(frag_entry_curr, &chain->head, list) {
+ /* Drop packet if fragment already exists. */
+ if (frag_entry_curr->no == frag_entry_new->no)
+ goto err_unlock;
+
+ /* Order fragments from highest to lowest. */
+ if (frag_entry_curr->no < frag_entry_new->no) {
+ hlist_add_before(&frag_entry_new->list,
+ &frag_entry_curr->list);
+ chain->size += skb->len - hdr_size;
+ chain->timestamp = jiffies;
+ ret = true;
+ goto out;
+ }
+ }
+
+ /* Reached the end of the list, so insert after 'frag_entry_curr'. */
+ if (likely(frag_entry_curr)) {
+ hlist_add_after(&frag_entry_curr->list, &frag_entry_new->list);
+ chain->size += skb->len - hdr_size;
+ chain->timestamp = jiffies;
+ ret = true;
+ }
+
+out:
+ if (chain->size > batadv_frag_size_limit() ||
+ ntohs(frag_packet->total_size) > batadv_frag_size_limit()) {
+ /* Clear chain if total size of either the list or the packet
+ * exceeds the maximum size of one merged packet.
+ */
+ batadv_frag_clear_chain(&chain->head);
+ chain->size = 0;
+ } else if (ntohs(frag_packet->total_size) == chain->size) {
+ /* All fragments received. Hand over chain to caller. */
+ hlist_move_list(&chain->head, chain_out);
+ chain->size = 0;
+ }
+
+err_unlock:
+ spin_unlock_bh(&chain->lock);
+
+err:
+ if (!ret)
+ kfree(frag_entry_new);
+
+ return ret;
+}
+
+/**
+ * batadv_frag_merge_packets - merge a chain of fragments
+ * @chain: head of chain with fragments
+ * @skb: packet with total size of skb after merging
+ *
+ * Expand the first skb in the chain and copy the content of the remaining
+ * skb's into the expanded one. After doing so, clear the chain.
+ *
+ * Returns the merged skb or NULL on error.
+ */
+static struct sk_buff *
+batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
+{
+ struct batadv_frag_packet *packet;
+ struct batadv_frag_list_entry *entry;
+ struct sk_buff *skb_out = NULL;
+ int size, hdr_size = sizeof(struct batadv_frag_packet);
+
+ /* Make sure incoming skb has non-bogus data. */
+ packet = (struct batadv_frag_packet *)skb->data;
+ size = ntohs(packet->total_size);
+ if (size > batadv_frag_size_limit())
+ goto free;
+
+ /* Remove first entry, as this is the destination for the rest of the
+ * fragments.
+ */
+ entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list);
+ hlist_del(&entry->list);
+ skb_out = entry->skb;
+ kfree(entry);
+
+ /* Make room for the rest of the fragments. */
+ if (pskb_expand_head(skb_out, 0, size - skb->len, GFP_ATOMIC) < 0) {
+ kfree_skb(skb_out);
+ skb_out = NULL;
+ goto free;
+ }
+
+ /* Move the existing MAC header to just before the payload. (Override
+ * the fragment header.)
+ */
+ skb_pull_rcsum(skb_out, hdr_size);
+ memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
+ skb_set_mac_header(skb_out, -ETH_HLEN);
+ skb_reset_network_header(skb_out);
+ skb_reset_transport_header(skb_out);
+
+ /* Copy the payload of the each fragment into the last skb */
+ hlist_for_each_entry(entry, chain, list) {
+ size = entry->skb->len - hdr_size;
+ memcpy(skb_put(skb_out, size), entry->skb->data + hdr_size,
+ size);
+ }
+
+free:
+ /* Locking is not needed, because 'chain' is not part of any orig. */
+ batadv_frag_clear_chain(chain);
+ return skb_out;
+}
+
+/**
+ * batadv_frag_skb_buffer - buffer fragment for later merge
+ * @skb: skb to buffer
+ * @orig_node_src: originator that the skb is received from
+ *
+ * Add fragment to buffer and merge fragments if possible.
+ *
+ * There are three possible outcomes: 1) Packet is merged: Return true and
+ * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
+ * to NULL; 3) Error: Return false and leave skb as is.
+ */
+bool batadv_frag_skb_buffer(struct sk_buff **skb,
+ struct batadv_orig_node *orig_node_src)
+{
+ struct sk_buff *skb_out = NULL;
+ struct hlist_head head = HLIST_HEAD_INIT;
+ bool ret = false;
+
+ /* Add packet to buffer and table entry if merge is possible. */
+ if (!batadv_frag_insert_packet(orig_node_src, *skb, &head))
+ goto out_err;
+
+ /* Leave if more fragments are needed to merge. */
+ if (hlist_empty(&head))
+ goto out;
+
+ skb_out = batadv_frag_merge_packets(&head, *skb);
+ if (!skb_out)
+ goto out_err;
+
+out:
+ *skb = skb_out;
+ ret = true;
+out_err:
+ return ret;
+}
+
+/**
+ * batadv_frag_skb_fwd - forward fragments that would exceed MTU when merged
+ * @skb: skb to forward
+ * @recv_if: interface that the skb is received on
+ * @orig_node_src: originator that the skb is received from
+ *
+ * Look up the next-hop of the fragments payload and check if the merged packet
+ * will exceed the MTU towards the next-hop. If so, the fragment is forwarded
+ * without merging it.
+ *
+ * Returns true if the fragment is consumed/forwarded, false otherwise.
+ */
+bool batadv_frag_skb_fwd(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if,
+ struct batadv_orig_node *orig_node_src)
+{
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_orig_node *orig_node_dst = NULL;
+ struct batadv_neigh_node *neigh_node = NULL;
+ struct batadv_frag_packet *packet;
+ uint16_t total_size;
+ bool ret = false;
+
+ packet = (struct batadv_frag_packet *)skb->data;
+ orig_node_dst = batadv_orig_hash_find(bat_priv, packet->dest);
+ if (!orig_node_dst)
+ goto out;
+
+ neigh_node = batadv_find_router(bat_priv, orig_node_dst, recv_if);
+ if (!neigh_node)
+ goto out;
+
+ /* Forward the fragment, if the merged packet would be too big to
+ * be assembled.
+ */
+ total_size = ntohs(packet->total_size);
+ if (total_size > neigh_node->if_incoming->net_dev->mtu) {
+ batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_FWD);
+ batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
+ skb->len + ETH_HLEN);
+
+ packet->header.ttl--;
+ batadv_send_skb_packet(skb, neigh_node->if_incoming,
+ neigh_node->addr);
+ ret = true;
+ }
+
+out:
+ if (orig_node_dst)
+ batadv_orig_node_free_ref(orig_node_dst);
+ if (neigh_node)
+ batadv_neigh_node_free_ref(neigh_node);
+ return ret;
+}
+
+/**
+ * batadv_frag_create - create a fragment from skb
+ * @skb: skb to create fragment from
+ * @frag_head: header to use in new fragment
+ * @mtu: size of new fragment
+ *
+ * Split the passed skb into two fragments: A new one with size matching the
+ * passed mtu and the old one with the rest. The new skb contains data from the
+ * tail of the old skb.
+ *
+ * Returns the new fragment, NULL on error.
+ */
+static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
+ struct batadv_frag_packet *frag_head,
+ unsigned int mtu)
+{
+ struct sk_buff *skb_fragment;
+ unsigned header_size = sizeof(*frag_head);
+ unsigned fragment_size = mtu - header_size;
+
+ skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
+ if (!skb_fragment)
+ goto err;
+
+ skb->priority = TC_PRIO_CONTROL;
+
+ /* Eat the last mtu-bytes of the skb */
+ skb_reserve(skb_fragment, header_size + ETH_HLEN);
+ skb_split(skb, skb_fragment, skb->len - fragment_size);
+
+ /* Add the header */
+ skb_push(skb_fragment, header_size);
+ memcpy(skb_fragment->data, frag_head, header_size);
+
+err:
+ return skb_fragment;
+}
+
+/**
+ * batadv_frag_send_packet - create up to 16 fragments from the passed skb
+ * @skb: skb to create fragments from
+ * @orig_node: final destination of the created fragments
+ * @neigh_node: next-hop of the created fragments
+ *
+ * Returns true on success, false otherwise.
+ */
+bool batadv_frag_send_packet(struct sk_buff *skb,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node)
+{
+ struct batadv_priv *bat_priv;
+ struct batadv_hard_iface *primary_if;
+ struct batadv_frag_packet frag_header;
+ struct sk_buff *skb_fragment;
+ unsigned mtu = neigh_node->if_incoming->net_dev->mtu;
+ unsigned header_size = sizeof(frag_header);
+ unsigned max_fragment_size, max_packet_size;
+
+ /* To avoid merge and refragmentation at next-hops we never send
+ * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
+ */
+ mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
+ max_fragment_size = (mtu - header_size - ETH_HLEN);
+ max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
+
+ /* Don't even try to fragment, if we need more than 16 fragments */
+ if (skb->len > max_packet_size)
+ goto out_err;
+
+ bat_priv = orig_node->bat_priv;
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out_err;
+
+ /* Create one header to be copied to all fragments */
+ frag_header.header.packet_type = BATADV_UNICAST_FRAG;
+ frag_header.header.version = BATADV_COMPAT_VERSION;
+ frag_header.header.ttl = BATADV_TTL;
+ frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
+ frag_header.reserved = 0;
+ frag_header.no = 0;
+ frag_header.total_size = htons(skb->len);
+ memcpy(frag_header.orig, primary_if->net_dev->dev_addr, ETH_ALEN);
+ memcpy(frag_header.dest, orig_node->orig, ETH_ALEN);
+
+ /* Eat and send fragments from the tail of skb */
+ while (skb->len > max_fragment_size) {
+ skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
+ if (!skb_fragment)
+ goto out_err;
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
+ batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
+ skb_fragment->len + ETH_HLEN);
+ batadv_send_skb_packet(skb_fragment, neigh_node->if_incoming,
+ neigh_node->addr);
+ frag_header.no++;
+
+ /* The initial check in this function should cover this case */
+ if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1)
+ goto out_err;
+ }
+
+ /* Make room for the fragment header. */
+ if (batadv_skb_head_push(skb, header_size) < 0 ||
+ pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0)
+ goto out_err;
+
+ memcpy(skb->data, &frag_header, header_size);
+
+ /* Send the last fragment */
+ batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
+ batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
+ skb->len + ETH_HLEN);
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+
+ return true;
+out_err:
+ return false;
+}
diff --git a/net/batman-adv/fragmentation.h b/net/batman-adv/fragmentation.h
new file mode 100644
index 000000000000..ca029e2676e7
--- /dev/null
+++ b/net/batman-adv/fragmentation.h
@@ -0,0 +1,50 @@
+/* Copyright (C) 2013 B.A.T.M.A.N. contributors:
+ *
+ * Martin Hundebøll <martin@hundeboll.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef _NET_BATMAN_ADV_FRAGMENTATION_H_
+#define _NET_BATMAN_ADV_FRAGMENTATION_H_
+
+void batadv_frag_purge_orig(struct batadv_orig_node *orig,
+ bool (*check_cb)(struct batadv_frag_table_entry *));
+bool batadv_frag_skb_fwd(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if,
+ struct batadv_orig_node *orig_node_src);
+bool batadv_frag_skb_buffer(struct sk_buff **skb,
+ struct batadv_orig_node *orig_node);
+bool batadv_frag_send_packet(struct sk_buff *skb,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node);
+
+/**
+ * batadv_frag_check_entry - check if a list of fragments has timed out
+ * @frags_entry: table entry to check
+ *
+ * Returns true if the frags entry has timed out, false otherwise.
+ */
+static inline bool
+batadv_frag_check_entry(struct batadv_frag_table_entry *frags_entry)
+{
+ if (!hlist_empty(&frags_entry->head) &&
+ batadv_has_timed_out(frags_entry->timestamp, BATADV_FRAG_TIMEOUT))
+ return true;
+ else
+ return false;
+}
+
+#endif /* _NET_BATMAN_ADV_FRAGMENTATION_H_ */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 1ce4b8763ef2..2449afaa7638 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -118,7 +118,6 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
uint32_t gw_divisor;
uint8_t max_tq = 0;
- int down, up;
uint8_t tq_avg;
struct batadv_orig_node *orig_node;
@@ -138,14 +137,13 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
if (!atomic_inc_not_zero(&gw_node->refcount))
goto next;
- tq_avg = router->tq_avg;
+ tq_avg = router->bat_iv.tq_avg;
switch (atomic_read(&bat_priv->gw_sel_class)) {
case 1: /* fast connection */
- batadv_gw_bandwidth_to_kbit(orig_node->gw_flags,
- &down, &up);
-
- tmp_gw_factor = tq_avg * tq_avg * down * 100 * 100;
+ tmp_gw_factor = tq_avg * tq_avg;
+ tmp_gw_factor *= gw_node->bandwidth_down;
+ tmp_gw_factor *= 100 * 100;
tmp_gw_factor /= gw_divisor;
if ((tmp_gw_factor > max_gw_factor) ||
@@ -223,11 +221,6 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
struct batadv_neigh_node *router = NULL;
char gw_addr[18] = { '\0' };
- /* The batman daemon checks here if we already passed a full originator
- * cycle in order to make sure we don't choose the first gateway we
- * hear about. This check is based on the daemon's uptime which we
- * don't have.
- */
if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
goto out;
@@ -258,16 +251,22 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
NULL);
} else if ((!curr_gw) && (next_gw)) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n",
+ "Adding route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n",
next_gw->orig_node->orig,
- next_gw->orig_node->gw_flags, router->tq_avg);
+ next_gw->bandwidth_down / 10,
+ next_gw->bandwidth_down % 10,
+ next_gw->bandwidth_up / 10,
+ next_gw->bandwidth_up % 10, router->bat_iv.tq_avg);
batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_ADD,
gw_addr);
} else {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "Changing route to gateway %pM (gw_flags: %i, tq: %i)\n",
+ "Changing route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n",
next_gw->orig_node->orig,
- next_gw->orig_node->gw_flags, router->tq_avg);
+ next_gw->bandwidth_down / 10,
+ next_gw->bandwidth_down % 10,
+ next_gw->bandwidth_up / 10,
+ next_gw->bandwidth_up % 10, router->bat_iv.tq_avg);
batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_CHANGE,
gw_addr);
}
@@ -306,8 +305,8 @@ void batadv_gw_check_election(struct batadv_priv *bat_priv,
if (!router_orig)
goto out;
- gw_tq_avg = router_gw->tq_avg;
- orig_tq_avg = router_orig->tq_avg;
+ gw_tq_avg = router_gw->bat_iv.tq_avg;
+ orig_tq_avg = router_orig->bat_iv.tq_avg;
/* the TQ value has to be better */
if (orig_tq_avg < gw_tq_avg)
@@ -337,12 +336,20 @@ out:
return;
}
+/**
+ * batadv_gw_node_add - add gateway node to list of available gateways
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: originator announcing gateway capabilities
+ * @gateway: announced bandwidth information
+ */
static void batadv_gw_node_add(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
- uint8_t new_gwflags)
+ struct batadv_tvlv_gateway_data *gateway)
{
struct batadv_gw_node *gw_node;
- int down, up;
+
+ if (gateway->bandwidth_down == 0)
+ return;
gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
if (!gw_node)
@@ -356,73 +363,116 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list);
spin_unlock_bh(&bat_priv->gw.list_lock);
- batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n",
- orig_node->orig, new_gwflags,
- (down > 2048 ? down / 1024 : down),
- (down > 2048 ? "MBit" : "KBit"),
- (up > 2048 ? up / 1024 : up),
- (up > 2048 ? "MBit" : "KBit"));
+ "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
+ orig_node->orig,
+ ntohl(gateway->bandwidth_down) / 10,
+ ntohl(gateway->bandwidth_down) % 10,
+ ntohl(gateway->bandwidth_up) / 10,
+ ntohl(gateway->bandwidth_up) % 10);
}
-void batadv_gw_node_update(struct batadv_priv *bat_priv,
- struct batadv_orig_node *orig_node,
- uint8_t new_gwflags)
+/**
+ * batadv_gw_node_get - retrieve gateway node from list of available gateways
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: originator announcing gateway capabilities
+ *
+ * Returns gateway node if found or NULL otherwise.
+ */
+static struct batadv_gw_node *
+batadv_gw_node_get(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node)
{
- struct batadv_gw_node *gw_node, *curr_gw;
-
- /* Note: We don't need a NULL check here, since curr_gw never gets
- * dereferenced. If curr_gw is NULL we also should not exit as we may
- * have this gateway in our list (duplication check!) even though we
- * have no currently selected gateway.
- */
- curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+ struct batadv_gw_node *gw_node_tmp, *gw_node = NULL;
rcu_read_lock();
- hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
- if (gw_node->orig_node != orig_node)
+ hlist_for_each_entry_rcu(gw_node_tmp, &bat_priv->gw.list, list) {
+ if (gw_node_tmp->orig_node != orig_node)
continue;
- batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "Gateway class of originator %pM changed from %i to %i\n",
- orig_node->orig, gw_node->orig_node->gw_flags,
- new_gwflags);
+ if (gw_node_tmp->deleted)
+ continue;
- gw_node->deleted = 0;
+ if (!atomic_inc_not_zero(&gw_node_tmp->refcount))
+ continue;
- if (new_gwflags == BATADV_NO_FLAGS) {
- gw_node->deleted = jiffies;
- batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "Gateway %pM removed from gateway list\n",
- orig_node->orig);
+ gw_node = gw_node_tmp;
+ break;
+ }
+ rcu_read_unlock();
- if (gw_node == curr_gw)
- goto deselect;
- }
+ return gw_node;
+}
- goto unlock;
+/**
+ * batadv_gw_node_update - update list of available gateways with changed
+ * bandwidth information
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: originator announcing gateway capabilities
+ * @gateway: announced bandwidth information
+ */
+void batadv_gw_node_update(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_tvlv_gateway_data *gateway)
+{
+ struct batadv_gw_node *gw_node, *curr_gw = NULL;
+
+ gw_node = batadv_gw_node_get(bat_priv, orig_node);
+ if (!gw_node) {
+ batadv_gw_node_add(bat_priv, orig_node, gateway);
+ goto out;
}
- if (new_gwflags == BATADV_NO_FLAGS)
- goto unlock;
+ if ((gw_node->bandwidth_down == ntohl(gateway->bandwidth_down)) &&
+ (gw_node->bandwidth_up == ntohl(gateway->bandwidth_up)))
+ goto out;
- batadv_gw_node_add(bat_priv, orig_node, new_gwflags);
- goto unlock;
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Gateway bandwidth of originator %pM changed from %u.%u/%u.%u MBit to %u.%u/%u.%u MBit\n",
+ orig_node->orig,
+ gw_node->bandwidth_down / 10,
+ gw_node->bandwidth_down % 10,
+ gw_node->bandwidth_up / 10,
+ gw_node->bandwidth_up % 10,
+ ntohl(gateway->bandwidth_down) / 10,
+ ntohl(gateway->bandwidth_down) % 10,
+ ntohl(gateway->bandwidth_up) / 10,
+ ntohl(gateway->bandwidth_up) % 10);
+
+ gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
+ gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
+
+ gw_node->deleted = 0;
+ if (ntohl(gateway->bandwidth_down) == 0) {
+ gw_node->deleted = jiffies;
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Gateway %pM removed from gateway list\n",
+ orig_node->orig);
-deselect:
- batadv_gw_deselect(bat_priv);
-unlock:
- rcu_read_unlock();
+ /* Note: We don't need a NULL check here, since curr_gw never
+ * gets dereferenced.
+ */
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+ if (gw_node == curr_gw)
+ batadv_gw_deselect(bat_priv);
+ }
+out:
if (curr_gw)
batadv_gw_node_free_ref(curr_gw);
+ if (gw_node)
+ batadv_gw_node_free_ref(gw_node);
}
void batadv_gw_node_delete(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node)
{
- batadv_gw_node_update(bat_priv, orig_node, 0);
+ struct batadv_tvlv_gateway_data gateway;
+
+ gateway.bandwidth_down = 0;
+ gateway.bandwidth_up = 0;
+
+ batadv_gw_node_update(bat_priv, orig_node, &gateway);
}
void batadv_gw_node_purge(struct batadv_priv *bat_priv)
@@ -467,9 +517,7 @@ static int batadv_write_buffer_text(struct batadv_priv *bat_priv,
{
struct batadv_gw_node *curr_gw;
struct batadv_neigh_node *router;
- int down, up, ret = -1;
-
- batadv_gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
+ int ret = -1;
router = batadv_orig_node_get_router(gw_node->orig_node);
if (!router)
@@ -477,16 +525,15 @@ static int batadv_write_buffer_text(struct batadv_priv *bat_priv,
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
- ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
+ ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n",
(curr_gw == gw_node ? "=>" : " "),
gw_node->orig_node->orig,
- router->tq_avg, router->addr,
+ router->bat_iv.tq_avg, router->addr,
router->if_incoming->net_dev->name,
- gw_node->orig_node->gw_flags,
- (down > 2048 ? down / 1024 : down),
- (down > 2048 ? "MBit" : "KBit"),
- (up > 2048 ? up / 1024 : up),
- (up > 2048 ? "MBit" : "KBit"));
+ gw_node->bandwidth_down / 10,
+ gw_node->bandwidth_down % 10,
+ gw_node->bandwidth_up / 10,
+ gw_node->bandwidth_up % 10);
batadv_neigh_node_free_ref(router);
if (curr_gw)
@@ -508,7 +555,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
goto out;
seq_printf(seq,
- " %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
+ " %-12s (%s/%i) %17s [%10s]: advertised uplink bandwidth ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
"Gateway", "#", BATADV_TQ_MAX_VALUE, "Nexthop", "outgoingIF",
BATADV_SOURCE_VERSION, primary_if->net_dev->name,
primary_if->net_dev->dev_addr, net_dev->name);
@@ -603,24 +650,29 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
struct iphdr *iphdr;
struct ipv6hdr *ipv6hdr;
struct udphdr *udphdr;
+ struct vlan_ethhdr *vhdr;
+ __be16 proto;
/* check for ethernet header */
if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
return false;
ethhdr = (struct ethhdr *)skb->data;
+ proto = ethhdr->h_proto;
*header_len += ETH_HLEN;
/* check for initial vlan header */
- if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
+ if (proto == htons(ETH_P_8021Q)) {
if (!pskb_may_pull(skb, *header_len + VLAN_HLEN))
return false;
- ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
+
+ vhdr = (struct vlan_ethhdr *)skb->data;
+ proto = vhdr->h_vlan_encapsulated_proto;
*header_len += VLAN_HLEN;
}
/* check for ip header */
- switch (ntohs(ethhdr->h_proto)) {
- case ETH_P_IP:
+ switch (proto) {
+ case htons(ETH_P_IP):
if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr)))
return false;
iphdr = (struct iphdr *)(skb->data + *header_len);
@@ -631,7 +683,7 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
return false;
break;
- case ETH_P_IPV6:
+ case htons(ETH_P_IPV6):
if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr)))
return false;
ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len);
@@ -658,28 +710,44 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
*header_len += sizeof(*udphdr);
/* check for bootp port */
- if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
- (ntohs(udphdr->dest) != 67))
+ if ((proto == htons(ETH_P_IP)) &&
+ (udphdr->dest != htons(67)))
return false;
- if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) &&
- (ntohs(udphdr->dest) != 547))
+ if ((proto == htons(ETH_P_IPV6)) &&
+ (udphdr->dest != htons(547)))
return false;
return true;
}
-/* this call might reallocate skb data */
+/**
+ * batadv_gw_out_of_range - check if the dhcp request destination is the best gw
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the outgoing packet
+ *
+ * Check if the skb is a DHCP request and if it is sent to the current best GW
+ * server. Due to topology changes it may be the case that the GW server
+ * previously selected is not the best one anymore.
+ *
+ * Returns true if the packet destination is unicast and it is not the best gw,
+ * false otherwise.
+ *
+ * This call might reallocate skb data.
+ */
bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
struct sk_buff *skb)
{
struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
struct batadv_orig_node *orig_dst_node = NULL;
- struct batadv_gw_node *curr_gw = NULL;
+ struct batadv_gw_node *gw_node = NULL, *curr_gw = NULL;
struct ethhdr *ethhdr;
bool ret, out_of_range = false;
unsigned int header_len = 0;
uint8_t curr_tq_avg;
+ unsigned short vid;
+
+ vid = batadv_get_vid(skb, 0);
ret = batadv_gw_is_dhcp_target(skb, &header_len);
if (!ret)
@@ -687,11 +755,12 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
ethhdr = (struct ethhdr *)skb->data;
orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
- ethhdr->h_dest);
+ ethhdr->h_dest, vid);
if (!orig_dst_node)
goto out;
- if (!orig_dst_node->gw_flags)
+ gw_node = batadv_gw_node_get(bat_priv, orig_dst_node);
+ if (!gw_node->bandwidth_down == 0)
goto out;
ret = batadv_is_type_dhcprequest(skb, header_len);
@@ -723,7 +792,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
if (!neigh_curr)
goto out;
- curr_tq_avg = neigh_curr->tq_avg;
+ curr_tq_avg = neigh_curr->bat_iv.tq_avg;
break;
case BATADV_GW_MODE_OFF:
default:
@@ -734,7 +803,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
if (!neigh_old)
goto out;
- if (curr_tq_avg - neigh_old->tq_avg > BATADV_GW_THRESHOLD)
+ if (curr_tq_avg - neigh_old->bat_iv.tq_avg > BATADV_GW_THRESHOLD)
out_of_range = true;
out:
@@ -742,6 +811,8 @@ out:
batadv_orig_node_free_ref(orig_dst_node);
if (curr_gw)
batadv_gw_node_free_ref(curr_gw);
+ if (gw_node)
+ batadv_gw_node_free_ref(gw_node);
if (neigh_old)
batadv_neigh_node_free_ref(neigh_old);
if (neigh_curr)
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index ceef4ebe8bcd..d95c2d23195e 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -29,7 +29,7 @@ void batadv_gw_check_election(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node);
void batadv_gw_node_update(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
- uint8_t new_gwflags);
+ struct batadv_tvlv_gateway_data *gateway);
void batadv_gw_node_delete(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node);
void batadv_gw_node_purge(struct batadv_priv *bat_priv);
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 84bb2b18d711..b211b0f9cb78 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -21,64 +21,23 @@
#include "gateway_common.h"
#include "gateway_client.h"
-/* calculates the gateway class from kbit */
-static void batadv_kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class)
-{
- int mdown = 0, tdown, tup, difference;
- uint8_t sbit, part;
-
- *gw_srv_class = 0;
- difference = 0x0FFFFFFF;
-
- /* test all downspeeds */
- for (sbit = 0; sbit < 2; sbit++) {
- for (part = 0; part < 16; part++) {
- tdown = 32 * (sbit + 2) * (1 << part);
-
- if (abs(tdown - down) < difference) {
- *gw_srv_class = (sbit << 7) + (part << 3);
- difference = abs(tdown - down);
- mdown = tdown;
- }
- }
- }
-
- /* test all upspeeds */
- difference = 0x0FFFFFFF;
-
- for (part = 0; part < 8; part++) {
- tup = ((part + 1) * (mdown)) / 8;
-
- if (abs(tup - up) < difference) {
- *gw_srv_class = (*gw_srv_class & 0xF8) | part;
- difference = abs(tup - up);
- }
- }
-}
-
-/* returns the up and downspeeds in kbit, calculated from the class */
-void batadv_gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up)
-{
- int sbit = (gw_srv_class & 0x80) >> 7;
- int dpart = (gw_srv_class & 0x78) >> 3;
- int upart = (gw_srv_class & 0x07);
-
- if (!gw_srv_class) {
- *down = 0;
- *up = 0;
- return;
- }
-
- *down = 32 * (sbit + 2) * (1 << dpart);
- *up = ((upart + 1) * (*down)) / 8;
-}
-
+/**
+ * batadv_parse_gw_bandwidth - parse supplied string buffer to extract download
+ * and upload bandwidth information
+ * @net_dev: the soft interface net device
+ * @buff: string buffer to parse
+ * @down: pointer holding the returned download bandwidth information
+ * @up: pointer holding the returned upload bandwidth information
+ *
+ * Returns false on parse error and true otherwise.
+ */
static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
- int *up, int *down)
+ uint32_t *down, uint32_t *up)
{
- int ret, multi = 1;
+ enum batadv_bandwidth_units bw_unit_type = BATADV_BW_UNIT_KBIT;
char *slash_ptr, *tmp_ptr;
long ldown, lup;
+ int ret;
slash_ptr = strchr(buff, '/');
if (slash_ptr)
@@ -88,10 +47,10 @@ static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
tmp_ptr = buff + strlen(buff) - 4;
if (strnicmp(tmp_ptr, "mbit", 4) == 0)
- multi = 1024;
+ bw_unit_type = BATADV_BW_UNIT_MBIT;
if ((strnicmp(tmp_ptr, "kbit", 4) == 0) ||
- (multi > 1))
+ (bw_unit_type == BATADV_BW_UNIT_MBIT))
*tmp_ptr = '\0';
}
@@ -103,20 +62,28 @@ static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
return false;
}
- *down = ldown * multi;
+ switch (bw_unit_type) {
+ case BATADV_BW_UNIT_MBIT:
+ *down = ldown * 10;
+ break;
+ case BATADV_BW_UNIT_KBIT:
+ default:
+ *down = ldown / 100;
+ break;
+ }
/* we also got some upload info */
if (slash_ptr) {
- multi = 1;
+ bw_unit_type = BATADV_BW_UNIT_KBIT;
if (strlen(slash_ptr + 1) > 4) {
tmp_ptr = slash_ptr + 1 - 4 + strlen(slash_ptr + 1);
if (strnicmp(tmp_ptr, "mbit", 4) == 0)
- multi = 1024;
+ bw_unit_type = BATADV_BW_UNIT_MBIT;
if ((strnicmp(tmp_ptr, "kbit", 4) == 0) ||
- (multi > 1))
+ (bw_unit_type == BATADV_BW_UNIT_MBIT))
*tmp_ptr = '\0';
}
@@ -128,52 +95,149 @@ static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
return false;
}
- *up = lup * multi;
+ switch (bw_unit_type) {
+ case BATADV_BW_UNIT_MBIT:
+ *up = lup * 10;
+ break;
+ case BATADV_BW_UNIT_KBIT:
+ default:
+ *up = lup / 100;
+ break;
+ }
}
return true;
}
+/**
+ * batadv_gw_tvlv_container_update - update the gw tvlv container after gateway
+ * setting change
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv)
+{
+ struct batadv_tvlv_gateway_data gw;
+ uint32_t down, up;
+ char gw_mode;
+
+ gw_mode = atomic_read(&bat_priv->gw_mode);
+
+ switch (gw_mode) {
+ case BATADV_GW_MODE_OFF:
+ case BATADV_GW_MODE_CLIENT:
+ batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_GW, 1);
+ break;
+ case BATADV_GW_MODE_SERVER:
+ down = atomic_read(&bat_priv->gw.bandwidth_down);
+ up = atomic_read(&bat_priv->gw.bandwidth_up);
+ gw.bandwidth_down = htonl(down);
+ gw.bandwidth_up = htonl(up);
+ batadv_tvlv_container_register(bat_priv, BATADV_TVLV_GW, 1,
+ &gw, sizeof(gw));
+ break;
+ }
+}
+
ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
size_t count)
{
struct batadv_priv *bat_priv = netdev_priv(net_dev);
- long gw_bandwidth_tmp = 0;
- int up = 0, down = 0;
+ uint32_t down_curr, up_curr, down_new = 0, up_new = 0;
bool ret;
- ret = batadv_parse_gw_bandwidth(net_dev, buff, &up, &down);
+ down_curr = (unsigned int)atomic_read(&bat_priv->gw.bandwidth_down);
+ up_curr = (unsigned int)atomic_read(&bat_priv->gw.bandwidth_up);
+
+ ret = batadv_parse_gw_bandwidth(net_dev, buff, &down_new, &up_new);
if (!ret)
goto end;
- if ((!down) || (down < 256))
- down = 2000;
-
- if (!up)
- up = down / 5;
+ if (!down_new)
+ down_new = 1;
- batadv_kbit_to_gw_bandwidth(down, up, &gw_bandwidth_tmp);
+ if (!up_new)
+ up_new = down_new / 5;
- /* the gw bandwidth we guessed above might not match the given
- * speeds, hence we need to calculate it back to show the number
- * that is going to be propagated
- */
- batadv_gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up);
+ if (!up_new)
+ up_new = 1;
- if (atomic_read(&bat_priv->gw_bandwidth) == gw_bandwidth_tmp)
+ if ((down_curr == down_new) && (up_curr == up_new))
return count;
batadv_gw_deselect(bat_priv);
batadv_info(net_dev,
- "Changing gateway bandwidth from: '%i' to: '%ld' (propagating: %d%s/%d%s)\n",
- atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp,
- (down > 2048 ? down / 1024 : down),
- (down > 2048 ? "MBit" : "KBit"),
- (up > 2048 ? up / 1024 : up),
- (up > 2048 ? "MBit" : "KBit"));
+ "Changing gateway bandwidth from: '%u.%u/%u.%u MBit' to: '%u.%u/%u.%u MBit'\n",
+ down_curr / 10, down_curr % 10, up_curr / 10, up_curr % 10,
+ down_new / 10, down_new % 10, up_new / 10, up_new % 10);
- atomic_set(&bat_priv->gw_bandwidth, gw_bandwidth_tmp);
+ atomic_set(&bat_priv->gw.bandwidth_down, down_new);
+ atomic_set(&bat_priv->gw.bandwidth_up, up_new);
+ batadv_gw_tvlv_container_update(bat_priv);
end:
return count;
}
+
+/**
+ * batadv_gw_tvlv_ogm_handler_v1 - process incoming gateway tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the gateway data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig,
+ uint8_t flags,
+ void *tvlv_value,
+ uint16_t tvlv_value_len)
+{
+ struct batadv_tvlv_gateway_data gateway, *gateway_ptr;
+
+ /* only fetch the tvlv value if the handler wasn't called via the
+ * CIFNOTFND flag and if there is data to fetch
+ */
+ if ((flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) ||
+ (tvlv_value_len < sizeof(gateway))) {
+ gateway.bandwidth_down = 0;
+ gateway.bandwidth_up = 0;
+ } else {
+ gateway_ptr = tvlv_value;
+ gateway.bandwidth_down = gateway_ptr->bandwidth_down;
+ gateway.bandwidth_up = gateway_ptr->bandwidth_up;
+ if ((gateway.bandwidth_down == 0) ||
+ (gateway.bandwidth_up == 0)) {
+ gateway.bandwidth_down = 0;
+ gateway.bandwidth_up = 0;
+ }
+ }
+
+ batadv_gw_node_update(bat_priv, orig, &gateway);
+
+ /* restart gateway selection if fast or late switching was enabled */
+ if ((gateway.bandwidth_down != 0) &&
+ (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_CLIENT) &&
+ (atomic_read(&bat_priv->gw_sel_class) > 2))
+ batadv_gw_check_election(bat_priv, orig);
+}
+
+/**
+ * batadv_gw_init - initialise the gateway handling internals
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_gw_init(struct batadv_priv *bat_priv)
+{
+ batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1,
+ NULL, BATADV_TVLV_GW, 1,
+ BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
+}
+
+/**
+ * batadv_gw_free - free the gateway handling internals
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_gw_free(struct batadv_priv *bat_priv)
+{
+ batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_GW, 1);
+ batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_GW, 1);
+}
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index 509b2bf8c2f4..56384a4cd18c 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -26,12 +26,24 @@ enum batadv_gw_modes {
BATADV_GW_MODE_SERVER,
};
+/**
+ * enum batadv_bandwidth_units - bandwidth unit types
+ * @BATADV_BW_UNIT_KBIT: unit type kbit
+ * @BATADV_BW_UNIT_MBIT: unit type mbit
+ */
+enum batadv_bandwidth_units {
+ BATADV_BW_UNIT_KBIT,
+ BATADV_BW_UNIT_MBIT,
+};
+
#define BATADV_GW_MODE_OFF_NAME "off"
#define BATADV_GW_MODE_CLIENT_NAME "client"
#define BATADV_GW_MODE_SERVER_NAME "server"
-void batadv_gw_bandwidth_to_kbit(uint8_t gw_class, int *down, int *up);
ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
size_t count);
+void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv);
+void batadv_gw_init(struct batadv_priv *bat_priv);
+void batadv_gw_free(struct batadv_priv *bat_priv);
#endif /* _NET_BATMAN_ADV_GATEWAY_COMMON_H_ */
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index c478e6bcf89b..57c2a19dcb5c 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -28,6 +28,7 @@
#include "originator.h"
#include "hash.h"
#include "bridge_loop_avoidance.h"
+#include "gateway_client.h"
#include <linux/if_arp.h>
#include <linux/if_ether.h>
@@ -124,8 +125,11 @@ static int batadv_is_valid_iface(const struct net_device *net_dev)
*
* Returns true if the net device is a 802.11 wireless device, false otherwise.
*/
-static bool batadv_is_wifi_netdev(struct net_device *net_device)
+bool batadv_is_wifi_netdev(struct net_device *net_device)
{
+ if (!net_device)
+ return false;
+
#ifdef CONFIG_WIRELESS_EXT
/* pre-cfg80211 drivers have to implement WEXT, so it is possible to
* check for wireless_handlers != NULL
@@ -141,34 +145,6 @@ static bool batadv_is_wifi_netdev(struct net_device *net_device)
return false;
}
-/**
- * batadv_is_wifi_iface - check if the given interface represented by ifindex
- * is a wifi interface
- * @ifindex: interface index to check
- *
- * Returns true if the interface represented by ifindex is a 802.11 wireless
- * device, false otherwise.
- */
-bool batadv_is_wifi_iface(int ifindex)
-{
- struct net_device *net_device = NULL;
- bool ret = false;
-
- if (ifindex == BATADV_NULL_IFINDEX)
- goto out;
-
- net_device = dev_get_by_index(&init_net, ifindex);
- if (!net_device)
- goto out;
-
- ret = batadv_is_wifi_netdev(net_device);
-
-out:
- if (net_device)
- dev_put(net_device);
- return ret;
-}
-
static struct batadv_hard_iface *
batadv_hardif_get_active(const struct net_device *soft_iface)
{
@@ -194,22 +170,13 @@ out:
static void batadv_primary_if_update_addr(struct batadv_priv *bat_priv,
struct batadv_hard_iface *oldif)
{
- struct batadv_vis_packet *vis_packet;
struct batadv_hard_iface *primary_if;
- struct sk_buff *skb;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
batadv_dat_init_own_addr(bat_priv, primary_if);
-
- skb = bat_priv->vis.my_info->skb_packet;
- vis_packet = (struct batadv_vis_packet *)skb->data;
- memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
- memcpy(vis_packet->sender_orig,
- primary_if->net_dev->dev_addr, ETH_ALEN);
-
batadv_bla_update_orig_address(bat_priv, primary_if, oldif);
out:
if (primary_if)
@@ -275,16 +242,10 @@ static void batadv_check_known_mac_addr(const struct net_device *net_dev)
int batadv_hardif_min_mtu(struct net_device *soft_iface)
{
- const struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
const struct batadv_hard_iface *hard_iface;
- /* allow big frames if all devices are capable to do so
- * (have MTU > 1500 + BAT_HEADER_LEN)
- */
int min_mtu = ETH_DATA_LEN;
- if (atomic_read(&bat_priv->fragmentation))
- goto out;
-
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if ((hard_iface->if_status != BATADV_IF_ACTIVE) &&
@@ -294,23 +255,40 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface)
if (hard_iface->soft_iface != soft_iface)
continue;
- min_mtu = min_t(int,
- hard_iface->net_dev->mtu - BATADV_HEADER_LEN,
- min_mtu);
+ min_mtu = min_t(int, hard_iface->net_dev->mtu, min_mtu);
}
rcu_read_unlock();
+
+ atomic_set(&bat_priv->packet_size_max, min_mtu);
+
+ if (atomic_read(&bat_priv->fragmentation) == 0)
+ goto out;
+
+ /* with fragmentation enabled the maximum size of internally generated
+ * packets such as translation table exchanges or tvlv containers, etc
+ * has to be calculated
+ */
+ min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE);
+ min_mtu -= sizeof(struct batadv_frag_packet);
+ min_mtu *= BATADV_FRAG_MAX_FRAGMENTS;
+ atomic_set(&bat_priv->packet_size_max, min_mtu);
+
+ /* with fragmentation enabled we can fragment external packets easily */
+ min_mtu = min_t(int, min_mtu, ETH_DATA_LEN);
+
out:
- return min_mtu;
+ return min_mtu - batadv_max_header_len();
}
/* adjusts the MTU if a new interface with a smaller MTU appeared. */
void batadv_update_min_mtu(struct net_device *soft_iface)
{
- int min_mtu;
+ soft_iface->mtu = batadv_hardif_min_mtu(soft_iface);
- min_mtu = batadv_hardif_min_mtu(soft_iface);
- if (soft_iface->mtu != min_mtu)
- soft_iface->mtu = min_mtu;
+ /* Check if the local translate table should be cleaned up to match a
+ * new (and smaller) MTU.
+ */
+ batadv_tt_local_resize_to_mtu(soft_iface);
}
static void
@@ -388,7 +366,8 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
{
struct batadv_priv *bat_priv;
struct net_device *soft_iface, *master;
- __be16 ethertype = __constant_htons(ETH_P_BATMAN);
+ __be16 ethertype = htons(ETH_P_BATMAN);
+ int max_header_len = batadv_max_header_len();
int ret;
if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
@@ -453,23 +432,22 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
dev_add_pack(&hard_iface->batman_adv_ptype);
- atomic_set(&hard_iface->frag_seqno, 1);
batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
hard_iface->net_dev->name);
if (atomic_read(&bat_priv->fragmentation) &&
- hard_iface->net_dev->mtu < ETH_DATA_LEN + BATADV_HEADER_LEN)
+ hard_iface->net_dev->mtu < ETH_DATA_LEN + max_header_len)
batadv_info(hard_iface->soft_iface,
- "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %zi would solve the problem.\n",
+ "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %i would solve the problem.\n",
hard_iface->net_dev->name, hard_iface->net_dev->mtu,
- ETH_DATA_LEN + BATADV_HEADER_LEN);
+ ETH_DATA_LEN + max_header_len);
if (!atomic_read(&bat_priv->fragmentation) &&
- hard_iface->net_dev->mtu < ETH_DATA_LEN + BATADV_HEADER_LEN)
+ hard_iface->net_dev->mtu < ETH_DATA_LEN + max_header_len)
batadv_info(hard_iface->soft_iface,
- "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %zi.\n",
+ "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %i.\n",
hard_iface->net_dev->name, hard_iface->net_dev->mtu,
- ETH_DATA_LEN + BATADV_HEADER_LEN);
+ ETH_DATA_LEN + max_header_len);
if (batadv_hardif_is_iface_up(hard_iface))
batadv_hardif_activate_interface(hard_iface);
@@ -533,8 +511,12 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
dev_put(hard_iface->soft_iface);
/* nobody uses this interface anymore */
- if (!bat_priv->num_ifaces && autodel == BATADV_IF_CLEANUP_AUTO)
- batadv_softif_destroy_sysfs(hard_iface->soft_iface);
+ if (!bat_priv->num_ifaces) {
+ batadv_gw_check_client_stop(bat_priv);
+
+ if (autodel == BATADV_IF_CLEANUP_AUTO)
+ batadv_softif_destroy_sysfs(hard_iface->soft_iface);
+ }
netdev_upper_dev_unlink(hard_iface->net_dev, hard_iface->soft_iface);
hard_iface->soft_iface = NULL;
@@ -652,6 +634,8 @@ static int batadv_hard_if_event(struct notifier_block *this,
if (batadv_softif_is_valid(net_dev) && event == NETDEV_REGISTER) {
batadv_sysfs_add_meshif(net_dev);
+ bat_priv = netdev_priv(net_dev);
+ batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
return NOTIFY_DONE;
}
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 49892881a7c5..df4c8bd45c40 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -41,6 +41,7 @@ enum batadv_hard_if_cleanup {
extern struct notifier_block batadv_hard_if_notifier;
+bool batadv_is_wifi_netdev(struct net_device *net_device);
struct batadv_hard_iface*
batadv_hardif_get_by_netdev(const struct net_device *net_dev);
int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
@@ -51,7 +52,6 @@ void batadv_hardif_remove_interfaces(void);
int batadv_hardif_min_mtu(struct net_device *soft_iface);
void batadv_update_min_mtu(struct net_device *soft_iface);
void batadv_hardif_free_rcu(struct rcu_head *rcu);
-bool batadv_is_wifi_iface(int ifindex);
static inline void
batadv_hardif_free_ref(struct batadv_hard_iface *hard_iface)
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 5a99bb4b6b82..29ae4efe3543 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -29,7 +29,7 @@
static struct batadv_socket_client *batadv_socket_client_hash[256];
static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
- struct batadv_icmp_packet_rr *icmp_packet,
+ struct batadv_icmp_header *icmph,
size_t icmp_len);
void batadv_socket_init(void)
@@ -155,13 +155,13 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
struct batadv_priv *bat_priv = socket_client->bat_priv;
struct batadv_hard_iface *primary_if = NULL;
struct sk_buff *skb;
- struct batadv_icmp_packet_rr *icmp_packet;
-
+ struct batadv_icmp_packet_rr *icmp_packet_rr;
+ struct batadv_icmp_header *icmp_header;
struct batadv_orig_node *orig_node = NULL;
struct batadv_neigh_node *neigh_node = NULL;
size_t packet_len = sizeof(struct batadv_icmp_packet);
- if (len < sizeof(struct batadv_icmp_packet)) {
+ if (len < sizeof(struct batadv_icmp_header)) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Error - can't send packet from char device: invalid packet size\n");
return -EINVAL;
@@ -174,8 +174,10 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
goto out;
}
- if (len >= sizeof(struct batadv_icmp_packet_rr))
- packet_len = sizeof(struct batadv_icmp_packet_rr);
+ if (len >= BATADV_ICMP_MAX_PACKET_SIZE)
+ packet_len = BATADV_ICMP_MAX_PACKET_SIZE;
+ else
+ packet_len = len;
skb = netdev_alloc_skb_ip_align(NULL, packet_len + ETH_HLEN);
if (!skb) {
@@ -185,67 +187,78 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, ETH_HLEN);
- icmp_packet = (struct batadv_icmp_packet_rr *)skb_put(skb, packet_len);
+ icmp_header = (struct batadv_icmp_header *)skb_put(skb, packet_len);
- if (copy_from_user(icmp_packet, buff, packet_len)) {
+ if (copy_from_user(icmp_header, buff, packet_len)) {
len = -EFAULT;
goto free_skb;
}
- if (icmp_packet->header.packet_type != BATADV_ICMP) {
+ if (icmp_header->header.packet_type != BATADV_ICMP) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
len = -EINVAL;
goto free_skb;
}
- if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
+ switch (icmp_header->msg_type) {
+ case BATADV_ECHO_REQUEST:
+ if (len < sizeof(struct batadv_icmp_packet)) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Error - can't send packet from char device: invalid packet size\n");
+ len = -EINVAL;
+ goto free_skb;
+ }
+
+ if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
+ goto dst_unreach;
+
+ orig_node = batadv_orig_hash_find(bat_priv, icmp_header->dst);
+ if (!orig_node)
+ goto dst_unreach;
+
+ neigh_node = batadv_orig_node_get_router(orig_node);
+ if (!neigh_node)
+ goto dst_unreach;
+
+ if (!neigh_node->if_incoming)
+ goto dst_unreach;
+
+ if (neigh_node->if_incoming->if_status != BATADV_IF_ACTIVE)
+ goto dst_unreach;
+
+ icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmp_header;
+ if (packet_len == sizeof(*icmp_packet_rr))
+ memcpy(icmp_packet_rr->rr,
+ neigh_node->if_incoming->net_dev->dev_addr,
+ ETH_ALEN);
+
+ break;
+ default:
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "Error - can't send packet from char device: got bogus message type (expected: ECHO_REQUEST)\n");
+ "Error - can't send packet from char device: got unknown message type\n");
len = -EINVAL;
goto free_skb;
}
- icmp_packet->uid = socket_client->index;
+ icmp_header->uid = socket_client->index;
- if (icmp_packet->header.version != BATADV_COMPAT_VERSION) {
- icmp_packet->msg_type = BATADV_PARAMETER_PROBLEM;
- icmp_packet->header.version = BATADV_COMPAT_VERSION;
- batadv_socket_add_packet(socket_client, icmp_packet,
+ if (icmp_header->header.version != BATADV_COMPAT_VERSION) {
+ icmp_header->msg_type = BATADV_PARAMETER_PROBLEM;
+ icmp_header->header.version = BATADV_COMPAT_VERSION;
+ batadv_socket_add_packet(socket_client, icmp_header,
packet_len);
goto free_skb;
}
- if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
- goto dst_unreach;
-
- orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->dst);
- if (!orig_node)
- goto dst_unreach;
-
- neigh_node = batadv_orig_node_get_router(orig_node);
- if (!neigh_node)
- goto dst_unreach;
-
- if (!neigh_node->if_incoming)
- goto dst_unreach;
-
- if (neigh_node->if_incoming->if_status != BATADV_IF_ACTIVE)
- goto dst_unreach;
-
- memcpy(icmp_packet->orig,
- primary_if->net_dev->dev_addr, ETH_ALEN);
-
- if (packet_len == sizeof(struct batadv_icmp_packet_rr))
- memcpy(icmp_packet->rr,
- neigh_node->if_incoming->net_dev->dev_addr, ETH_ALEN);
+ memcpy(icmp_header->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
goto out;
dst_unreach:
- icmp_packet->msg_type = BATADV_DESTINATION_UNREACHABLE;
- batadv_socket_add_packet(socket_client, icmp_packet, packet_len);
+ icmp_header->msg_type = BATADV_DESTINATION_UNREACHABLE;
+ batadv_socket_add_packet(socket_client, icmp_header, packet_len);
free_skb:
kfree_skb(skb);
out:
@@ -298,27 +311,40 @@ err:
return -ENOMEM;
}
+/**
+ * batadv_socket_receive_packet - schedule an icmp packet to be sent to userspace
+ * on an icmp socket.
+ * @socket_client: the socket this packet belongs to
+ * @icmph: pointer to the header of the icmp packet
+ * @icmp_len: total length of the icmp packet
+ */
static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
- struct batadv_icmp_packet_rr *icmp_packet,
+ struct batadv_icmp_header *icmph,
size_t icmp_len)
{
struct batadv_socket_packet *socket_packet;
+ size_t len;
socket_packet = kmalloc(sizeof(*socket_packet), GFP_ATOMIC);
if (!socket_packet)
return;
+ len = icmp_len;
+ /* check the maximum length before filling the buffer */
+ if (len > sizeof(socket_packet->icmp_packet))
+ len = sizeof(socket_packet->icmp_packet);
+
INIT_LIST_HEAD(&socket_packet->list);
- memcpy(&socket_packet->icmp_packet, icmp_packet, icmp_len);
- socket_packet->icmp_len = icmp_len;
+ memcpy(&socket_packet->icmp_packet, icmph, len);
+ socket_packet->icmp_len = len;
spin_lock_bh(&socket_client->lock);
/* while waiting for the lock the socket_client could have been
* deleted
*/
- if (!batadv_socket_client_hash[icmp_packet->uid]) {
+ if (!batadv_socket_client_hash[icmph->uid]) {
spin_unlock_bh(&socket_client->lock);
kfree(socket_packet);
return;
@@ -342,12 +368,18 @@ static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
wake_up(&socket_client->queue_wait);
}
-void batadv_socket_receive_packet(struct batadv_icmp_packet_rr *icmp_packet,
+/**
+ * batadv_socket_receive_packet - schedule an icmp packet to be received
+ * locally and sent to userspace.
+ * @icmph: pointer to the header of the icmp packet
+ * @icmp_len: total length of the icmp packet
+ */
+void batadv_socket_receive_packet(struct batadv_icmp_header *icmph,
size_t icmp_len)
{
struct batadv_socket_client *hash;
- hash = batadv_socket_client_hash[icmp_packet->uid];
+ hash = batadv_socket_client_hash[icmph->uid];
if (hash)
- batadv_socket_add_packet(hash, icmp_packet, icmp_len);
+ batadv_socket_add_packet(hash, icmph, icmp_len);
}
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index 1fcca37b6223..6665080dff79 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -24,7 +24,7 @@
void batadv_socket_init(void);
int batadv_socket_setup(struct batadv_priv *bat_priv);
-void batadv_socket_receive_packet(struct batadv_icmp_packet_rr *icmp_packet,
+void batadv_socket_receive_packet(struct batadv_icmp_header *icmph,
size_t icmp_len);
#endif /* _NET_BATMAN_ADV_ICMP_SOCKET_H_ */
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index c72d1bcdcf49..c51a5e568f0a 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -36,10 +36,11 @@
#include "gateway_client.h"
#include "bridge_loop_avoidance.h"
#include "distributed-arp-table.h"
-#include "vis.h"
+#include "gateway_common.h"
#include "hash.h"
#include "bat_algo.h"
#include "network-coding.h"
+#include "fragmentation.h"
/* List manipulations on hardif_list have to be rtnl_lock()'ed,
@@ -65,6 +66,7 @@ static int __init batadv_init(void)
batadv_recv_handler_init();
batadv_iv_init();
+ batadv_nc_init();
batadv_event_workqueue = create_singlethread_workqueue("bat_events");
@@ -108,9 +110,11 @@ int batadv_mesh_init(struct net_device *soft_iface)
spin_lock_init(&bat_priv->tt.req_list_lock);
spin_lock_init(&bat_priv->tt.roam_list_lock);
spin_lock_init(&bat_priv->tt.last_changeset_lock);
+ spin_lock_init(&bat_priv->tt.commit_lock);
spin_lock_init(&bat_priv->gw.list_lock);
- spin_lock_init(&bat_priv->vis.hash_lock);
- spin_lock_init(&bat_priv->vis.list_lock);
+ spin_lock_init(&bat_priv->tvlv.container_list_lock);
+ spin_lock_init(&bat_priv->tvlv.handler_list_lock);
+ spin_lock_init(&bat_priv->softif_vlan_list_lock);
INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
@@ -118,6 +122,9 @@ int batadv_mesh_init(struct net_device *soft_iface)
INIT_LIST_HEAD(&bat_priv->tt.changes_list);
INIT_LIST_HEAD(&bat_priv->tt.req_list);
INIT_LIST_HEAD(&bat_priv->tt.roam_list);
+ INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
+ INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
+ INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
ret = batadv_originator_init(bat_priv);
if (ret < 0)
@@ -127,13 +134,6 @@ int batadv_mesh_init(struct net_device *soft_iface)
if (ret < 0)
goto err;
- batadv_tt_local_add(soft_iface, soft_iface->dev_addr,
- BATADV_NULL_IFINDEX);
-
- ret = batadv_vis_init(bat_priv);
- if (ret < 0)
- goto err;
-
ret = batadv_bla_init(bat_priv);
if (ret < 0)
goto err;
@@ -142,10 +142,12 @@ int batadv_mesh_init(struct net_device *soft_iface)
if (ret < 0)
goto err;
- ret = batadv_nc_init(bat_priv);
+ ret = batadv_nc_mesh_init(bat_priv);
if (ret < 0)
goto err;
+ batadv_gw_init(bat_priv);
+
atomic_set(&bat_priv->gw.reselect, 0);
atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
@@ -164,10 +166,8 @@ void batadv_mesh_free(struct net_device *soft_iface)
batadv_purge_outstanding_packets(bat_priv, NULL);
- batadv_vis_quit(bat_priv);
-
batadv_gw_node_purge(bat_priv);
- batadv_nc_free(bat_priv);
+ batadv_nc_mesh_free(bat_priv);
batadv_dat_free(bat_priv);
batadv_bla_free(bat_priv);
@@ -184,6 +184,8 @@ void batadv_mesh_free(struct net_device *soft_iface)
*/
batadv_originator_free(bat_priv);
+ batadv_gw_free(bat_priv);
+
free_percpu(bat_priv->bat_counters);
bat_priv->bat_counters = NULL;
@@ -254,6 +256,31 @@ out:
}
/**
+ * batadv_max_header_len - calculate maximum encapsulation overhead for a
+ * payload packet
+ *
+ * Return the maximum encapsulation overhead in bytes.
+ */
+int batadv_max_header_len(void)
+{
+ int header_len = 0;
+
+ header_len = max_t(int, header_len,
+ sizeof(struct batadv_unicast_packet));
+ header_len = max_t(int, header_len,
+ sizeof(struct batadv_unicast_4addr_packet));
+ header_len = max_t(int, header_len,
+ sizeof(struct batadv_bcast_packet));
+
+#ifdef CONFIG_BATMAN_ADV_NC
+ header_len = max_t(int, header_len,
+ sizeof(struct batadv_coded_packet));
+#endif
+
+ return header_len;
+}
+
+/**
* batadv_skb_set_priority - sets skb priority according to packet content
* @skb: the packet to be sent
* @offset: offset to the packet content
@@ -391,22 +418,31 @@ static void batadv_recv_handler_init(void)
for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
batadv_rx_handler[i] = batadv_recv_unhandled_packet;
- /* batman icmp packet */
- batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
+ for (i = BATADV_UNICAST_MIN; i <= BATADV_UNICAST_MAX; i++)
+ batadv_rx_handler[i] = batadv_recv_unhandled_unicast_packet;
+
+ /* compile time checks for struct member offsets */
+ BUILD_BUG_ON(offsetof(struct batadv_unicast_4addr_packet, src) != 10);
+ BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4);
+ BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4);
+ BUILD_BUG_ON(offsetof(struct batadv_frag_packet, dest) != 4);
+ BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, icmph.dst) != 4);
+ BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, icmph.dst) != 4);
+
+ /* broadcast packet */
+ batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
+
+ /* unicast packets ... */
/* unicast with 4 addresses packet */
batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
/* unicast packet */
batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
- /* fragmented unicast packet */
- batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_ucast_frag_packet;
- /* broadcast packet */
- batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
- /* vis packet */
- batadv_rx_handler[BATADV_VIS] = batadv_recv_vis_packet;
- /* Translation table query (request or response) */
- batadv_rx_handler[BATADV_TT_QUERY] = batadv_recv_tt_query;
- /* Roaming advertisement */
- batadv_rx_handler[BATADV_ROAM_ADV] = batadv_recv_roam_adv;
+ /* unicast tvlv packet */
+ batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv;
+ /* batman icmp packet */
+ batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
+ /* Fragmented packets */
+ batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
}
int
@@ -414,7 +450,12 @@ batadv_recv_handler_register(uint8_t packet_type,
int (*recv_handler)(struct sk_buff *,
struct batadv_hard_iface *))
{
- if (batadv_rx_handler[packet_type] != &batadv_recv_unhandled_packet)
+ int (*curr)(struct sk_buff *,
+ struct batadv_hard_iface *);
+ curr = batadv_rx_handler[packet_type];
+
+ if ((curr != batadv_recv_unhandled_packet) &&
+ (curr != batadv_recv_unhandled_unicast_packet))
return -EBUSY;
batadv_rx_handler[packet_type] = recv_handler;
@@ -460,7 +501,9 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
!bat_algo_ops->bat_iface_update_mac ||
!bat_algo_ops->bat_primary_iface_set ||
!bat_algo_ops->bat_ogm_schedule ||
- !bat_algo_ops->bat_ogm_emit) {
+ !bat_algo_ops->bat_ogm_emit ||
+ !bat_algo_ops->bat_neigh_cmp ||
+ !bat_algo_ops->bat_neigh_is_equiv_or_better) {
pr_info("Routing algo '%s' does not implement required ops\n",
bat_algo_ops->name);
ret = -EINVAL;
@@ -535,6 +578,601 @@ __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
return htonl(crc);
}
+/**
+ * batadv_tvlv_handler_free_ref - decrement the tvlv handler refcounter and
+ * possibly free it
+ * @tvlv_handler: the tvlv handler to free
+ */
+static void
+batadv_tvlv_handler_free_ref(struct batadv_tvlv_handler *tvlv_handler)
+{
+ if (atomic_dec_and_test(&tvlv_handler->refcount))
+ kfree_rcu(tvlv_handler, rcu);
+}
+
+/**
+ * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
+ * based on the provided type and version (both need to match)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv handler type to look for
+ * @version: tvlv handler version to look for
+ *
+ * Returns tvlv handler if found or NULL otherwise.
+ */
+static struct batadv_tvlv_handler
+*batadv_tvlv_handler_get(struct batadv_priv *bat_priv,
+ uint8_t type, uint8_t version)
+{
+ struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tvlv_handler_tmp,
+ &bat_priv->tvlv.handler_list, list) {
+ if (tvlv_handler_tmp->type != type)
+ continue;
+
+ if (tvlv_handler_tmp->version != version)
+ continue;
+
+ if (!atomic_inc_not_zero(&tvlv_handler_tmp->refcount))
+ continue;
+
+ tvlv_handler = tvlv_handler_tmp;
+ break;
+ }
+ rcu_read_unlock();
+
+ return tvlv_handler;
+}
+
+/**
+ * batadv_tvlv_container_free_ref - decrement the tvlv container refcounter and
+ * possibly free it
+ * @tvlv_handler: the tvlv container to free
+ */
+static void batadv_tvlv_container_free_ref(struct batadv_tvlv_container *tvlv)
+{
+ if (atomic_dec_and_test(&tvlv->refcount))
+ kfree(tvlv);
+}
+
+/**
+ * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
+ * list based on the provided type and version (both need to match)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv container type to look for
+ * @version: tvlv container version to look for
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (tvlv.container_list_lock).
+ *
+ * Returns tvlv container if found or NULL otherwise.
+ */
+static struct batadv_tvlv_container
+*batadv_tvlv_container_get(struct batadv_priv *bat_priv,
+ uint8_t type, uint8_t version)
+{
+ struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
+
+ hlist_for_each_entry(tvlv_tmp, &bat_priv->tvlv.container_list, list) {
+ if (tvlv_tmp->tvlv_hdr.type != type)
+ continue;
+
+ if (tvlv_tmp->tvlv_hdr.version != version)
+ continue;
+
+ if (!atomic_inc_not_zero(&tvlv_tmp->refcount))
+ continue;
+
+ tvlv = tvlv_tmp;
+ break;
+ }
+
+ return tvlv;
+}
+
+/**
+ * batadv_tvlv_container_list_size - calculate the size of the tvlv container
+ * list entries
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (tvlv.container_list_lock).
+ *
+ * Returns size of all currently registered tvlv containers in bytes.
+ */
+static uint16_t batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
+{
+ struct batadv_tvlv_container *tvlv;
+ uint16_t tvlv_len = 0;
+
+ hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
+ tvlv_len += sizeof(struct batadv_tvlv_hdr);
+ tvlv_len += ntohs(tvlv->tvlv_hdr.len);
+ }
+
+ return tvlv_len;
+}
+
+/**
+ * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
+ * list
+ * @tvlv: the to be removed tvlv container
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (tvlv.container_list_lock).
+ */
+static void batadv_tvlv_container_remove(struct batadv_tvlv_container *tvlv)
+{
+ if (!tvlv)
+ return;
+
+ hlist_del(&tvlv->list);
+
+ /* first call to decrement the counter, second call to free */
+ batadv_tvlv_container_free_ref(tvlv);
+ batadv_tvlv_container_free_ref(tvlv);
+}
+
+/**
+ * batadv_tvlv_container_unregister - unregister tvlv container based on the
+ * provided type and version (both need to match)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv container type to unregister
+ * @version: tvlv container type to unregister
+ */
+void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
+ uint8_t type, uint8_t version)
+{
+ struct batadv_tvlv_container *tvlv;
+
+ spin_lock_bh(&bat_priv->tvlv.container_list_lock);
+ tvlv = batadv_tvlv_container_get(bat_priv, type, version);
+ batadv_tvlv_container_remove(tvlv);
+ spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
+}
+
+/**
+ * batadv_tvlv_container_register - register tvlv type, version and content
+ * to be propagated with each (primary interface) OGM
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv container type
+ * @version: tvlv container version
+ * @tvlv_value: tvlv container content
+ * @tvlv_value_len: tvlv container content length
+ *
+ * If a container of the same type and version was already registered the new
+ * content is going to replace the old one.
+ */
+void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
+ uint8_t type, uint8_t version,
+ void *tvlv_value, uint16_t tvlv_value_len)
+{
+ struct batadv_tvlv_container *tvlv_old, *tvlv_new;
+
+ if (!tvlv_value)
+ tvlv_value_len = 0;
+
+ tvlv_new = kzalloc(sizeof(*tvlv_new) + tvlv_value_len, GFP_ATOMIC);
+ if (!tvlv_new)
+ return;
+
+ tvlv_new->tvlv_hdr.version = version;
+ tvlv_new->tvlv_hdr.type = type;
+ tvlv_new->tvlv_hdr.len = htons(tvlv_value_len);
+
+ memcpy(tvlv_new + 1, tvlv_value, ntohs(tvlv_new->tvlv_hdr.len));
+ INIT_HLIST_NODE(&tvlv_new->list);
+ atomic_set(&tvlv_new->refcount, 1);
+
+ spin_lock_bh(&bat_priv->tvlv.container_list_lock);
+ tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
+ batadv_tvlv_container_remove(tvlv_old);
+ hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
+ spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
+}
+
+/**
+ * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accomodate
+ * requested packet size
+ * @packet_buff: packet buffer
+ * @packet_buff_len: packet buffer size
+ * @packet_min_len: requested packet minimum size
+ * @additional_packet_len: requested additional packet size on top of minimum
+ * size
+ *
+ * Returns true of the packet buffer could be changed to the requested size,
+ * false otherwise.
+ */
+static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
+ int *packet_buff_len,
+ int min_packet_len,
+ int additional_packet_len)
+{
+ unsigned char *new_buff;
+
+ new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
+
+ /* keep old buffer if kmalloc should fail */
+ if (new_buff) {
+ memcpy(new_buff, *packet_buff, min_packet_len);
+ kfree(*packet_buff);
+ *packet_buff = new_buff;
+ *packet_buff_len = min_packet_len + additional_packet_len;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * batadv_tvlv_container_ogm_append - append tvlv container content to given
+ * OGM packet buffer
+ * @bat_priv: the bat priv with all the soft interface information
+ * @packet_buff: ogm packet buffer
+ * @packet_buff_len: ogm packet buffer size including ogm header and tvlv
+ * content
+ * @packet_min_len: ogm header size to be preserved for the OGM itself
+ *
+ * The ogm packet might be enlarged or shrunk depending on the current size
+ * and the size of the to-be-appended tvlv containers.
+ *
+ * Returns size of all appended tvlv containers in bytes.
+ */
+uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
+ unsigned char **packet_buff,
+ int *packet_buff_len,
+ int packet_min_len)
+{
+ struct batadv_tvlv_container *tvlv;
+ struct batadv_tvlv_hdr *tvlv_hdr;
+ uint16_t tvlv_value_len;
+ void *tvlv_value;
+ bool ret;
+
+ spin_lock_bh(&bat_priv->tvlv.container_list_lock);
+ tvlv_value_len = batadv_tvlv_container_list_size(bat_priv);
+
+ ret = batadv_tvlv_realloc_packet_buff(packet_buff, packet_buff_len,
+ packet_min_len, tvlv_value_len);
+
+ if (!ret)
+ goto end;
+
+ if (!tvlv_value_len)
+ goto end;
+
+ tvlv_value = (*packet_buff) + packet_min_len;
+
+ hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
+ tvlv_hdr = tvlv_value;
+ tvlv_hdr->type = tvlv->tvlv_hdr.type;
+ tvlv_hdr->version = tvlv->tvlv_hdr.version;
+ tvlv_hdr->len = tvlv->tvlv_hdr.len;
+ tvlv_value = tvlv_hdr + 1;
+ memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
+ tvlv_value = (uint8_t *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
+ }
+
+end:
+ spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
+ return tvlv_value_len;
+}
+
+/**
+ * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
+ * appropriate handlers
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tvlv_handler: tvlv callback function handling the tvlv content
+ * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
+ * @orig_node: orig node emitting the ogm packet
+ * @src: source mac address of the unicast packet
+ * @dst: destination mac address of the unicast packet
+ * @tvlv_value: tvlv content
+ * @tvlv_value_len: tvlv content length
+ *
+ * Returns success if handler was not found or the return value of the handler
+ * callback.
+ */
+static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
+ struct batadv_tvlv_handler *tvlv_handler,
+ bool ogm_source,
+ struct batadv_orig_node *orig_node,
+ uint8_t *src, uint8_t *dst,
+ void *tvlv_value, uint16_t tvlv_value_len)
+{
+ if (!tvlv_handler)
+ return NET_RX_SUCCESS;
+
+ if (ogm_source) {
+ if (!tvlv_handler->ogm_handler)
+ return NET_RX_SUCCESS;
+
+ if (!orig_node)
+ return NET_RX_SUCCESS;
+
+ tvlv_handler->ogm_handler(bat_priv, orig_node,
+ BATADV_NO_FLAGS,
+ tvlv_value, tvlv_value_len);
+ tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED;
+ } else {
+ if (!src)
+ return NET_RX_SUCCESS;
+
+ if (!dst)
+ return NET_RX_SUCCESS;
+
+ if (!tvlv_handler->unicast_handler)
+ return NET_RX_SUCCESS;
+
+ return tvlv_handler->unicast_handler(bat_priv, src,
+ dst, tvlv_value,
+ tvlv_value_len);
+ }
+
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
+ * appropriate handlers
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
+ * @orig_node: orig node emitting the ogm packet
+ * @src: source mac address of the unicast packet
+ * @dst: destination mac address of the unicast packet
+ * @tvlv_value: tvlv content
+ * @tvlv_value_len: tvlv content length
+ *
+ * Returns success when processing an OGM or the return value of all called
+ * handler callbacks.
+ */
+int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
+ bool ogm_source,
+ struct batadv_orig_node *orig_node,
+ uint8_t *src, uint8_t *dst,
+ void *tvlv_value, uint16_t tvlv_value_len)
+{
+ struct batadv_tvlv_handler *tvlv_handler;
+ struct batadv_tvlv_hdr *tvlv_hdr;
+ uint16_t tvlv_value_cont_len;
+ uint8_t cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
+ int ret = NET_RX_SUCCESS;
+
+ while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
+ tvlv_hdr = tvlv_value;
+ tvlv_value_cont_len = ntohs(tvlv_hdr->len);
+ tvlv_value = tvlv_hdr + 1;
+ tvlv_value_len -= sizeof(*tvlv_hdr);
+
+ if (tvlv_value_cont_len > tvlv_value_len)
+ break;
+
+ tvlv_handler = batadv_tvlv_handler_get(bat_priv,
+ tvlv_hdr->type,
+ tvlv_hdr->version);
+
+ ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler,
+ ogm_source, orig_node,
+ src, dst, tvlv_value,
+ tvlv_value_cont_len);
+ if (tvlv_handler)
+ batadv_tvlv_handler_free_ref(tvlv_handler);
+ tvlv_value = (uint8_t *)tvlv_value + tvlv_value_cont_len;
+ tvlv_value_len -= tvlv_value_cont_len;
+ }
+
+ if (!ogm_source)
+ return ret;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tvlv_handler,
+ &bat_priv->tvlv.handler_list, list) {
+ if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) &&
+ !(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED))
+ tvlv_handler->ogm_handler(bat_priv, orig_node,
+ cifnotfound, NULL, 0);
+
+ tvlv_handler->flags &= ~BATADV_TVLV_HANDLER_OGM_CALLED;
+ }
+ rcu_read_unlock();
+
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
+ * handlers
+ * @bat_priv: the bat priv with all the soft interface information
+ * @batadv_ogm_packet: ogm packet containing the tvlv containers
+ * @orig_node: orig node emitting the ogm packet
+ */
+void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
+ struct batadv_ogm_packet *batadv_ogm_packet,
+ struct batadv_orig_node *orig_node)
+{
+ void *tvlv_value;
+ uint16_t tvlv_value_len;
+
+ if (!batadv_ogm_packet)
+ return;
+
+ tvlv_value_len = ntohs(batadv_ogm_packet->tvlv_len);
+ if (!tvlv_value_len)
+ return;
+
+ tvlv_value = batadv_ogm_packet + 1;
+
+ batadv_tvlv_containers_process(bat_priv, true, orig_node, NULL, NULL,
+ tvlv_value, tvlv_value_len);
+}
+
+/**
+ * batadv_tvlv_handler_register - register tvlv handler based on the provided
+ * type and version (both need to match) for ogm tvlv payload and/or unicast
+ * payload
+ * @bat_priv: the bat priv with all the soft interface information
+ * @optr: ogm tvlv handler callback function. This function receives the orig
+ * node, flags and the tvlv content as argument to process.
+ * @uptr: unicast tvlv handler callback function. This function receives the
+ * source & destination of the unicast packet as well as the tvlv content
+ * to process.
+ * @type: tvlv handler type to be registered
+ * @version: tvlv handler version to be registered
+ * @flags: flags to enable or disable TVLV API behavior
+ */
+void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
+ void (*optr)(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig,
+ uint8_t flags,
+ void *tvlv_value,
+ uint16_t tvlv_value_len),
+ int (*uptr)(struct batadv_priv *bat_priv,
+ uint8_t *src, uint8_t *dst,
+ void *tvlv_value,
+ uint16_t tvlv_value_len),
+ uint8_t type, uint8_t version, uint8_t flags)
+{
+ struct batadv_tvlv_handler *tvlv_handler;
+
+ tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
+ if (tvlv_handler) {
+ batadv_tvlv_handler_free_ref(tvlv_handler);
+ return;
+ }
+
+ tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
+ if (!tvlv_handler)
+ return;
+
+ tvlv_handler->ogm_handler = optr;
+ tvlv_handler->unicast_handler = uptr;
+ tvlv_handler->type = type;
+ tvlv_handler->version = version;
+ tvlv_handler->flags = flags;
+ atomic_set(&tvlv_handler->refcount, 1);
+ INIT_HLIST_NODE(&tvlv_handler->list);
+
+ spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
+ hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
+ spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
+}
+
+/**
+ * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
+ * provided type and version (both need to match)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv handler type to be unregistered
+ * @version: tvlv handler version to be unregistered
+ */
+void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
+ uint8_t type, uint8_t version)
+{
+ struct batadv_tvlv_handler *tvlv_handler;
+
+ tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
+ if (!tvlv_handler)
+ return;
+
+ batadv_tvlv_handler_free_ref(tvlv_handler);
+ spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
+ hlist_del_rcu(&tvlv_handler->list);
+ spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
+ batadv_tvlv_handler_free_ref(tvlv_handler);
+}
+
+/**
+ * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
+ * specified host
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: source mac address of the unicast packet
+ * @dst: destination mac address of the unicast packet
+ * @type: tvlv type
+ * @version: tvlv version
+ * @tvlv_value: tvlv content
+ * @tvlv_value_len: tvlv content length
+ */
+void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
+ uint8_t *dst, uint8_t type, uint8_t version,
+ void *tvlv_value, uint16_t tvlv_value_len)
+{
+ struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
+ struct batadv_tvlv_hdr *tvlv_hdr;
+ struct batadv_orig_node *orig_node;
+ struct sk_buff *skb = NULL;
+ unsigned char *tvlv_buff;
+ unsigned int tvlv_len;
+ ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
+ bool ret = false;
+
+ orig_node = batadv_orig_hash_find(bat_priv, dst);
+ if (!orig_node)
+ goto out;
+
+ tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len;
+
+ skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len);
+ if (!skb)
+ goto out;
+
+ skb->priority = TC_PRIO_CONTROL;
+ skb_reserve(skb, ETH_HLEN);
+ tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
+ unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
+ unicast_tvlv_packet->header.packet_type = BATADV_UNICAST_TVLV;
+ unicast_tvlv_packet->header.version = BATADV_COMPAT_VERSION;
+ unicast_tvlv_packet->header.ttl = BATADV_TTL;
+ unicast_tvlv_packet->reserved = 0;
+ unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
+ unicast_tvlv_packet->align = 0;
+ memcpy(unicast_tvlv_packet->src, src, ETH_ALEN);
+ memcpy(unicast_tvlv_packet->dst, dst, ETH_ALEN);
+
+ tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1);
+ tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff;
+ tvlv_hdr->version = version;
+ tvlv_hdr->type = type;
+ tvlv_hdr->len = htons(tvlv_value_len);
+ tvlv_buff += sizeof(*tvlv_hdr);
+ memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
+
+ if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
+ ret = true;
+
+out:
+ if (skb && !ret)
+ kfree_skb(skb);
+ if (orig_node)
+ batadv_orig_node_free_ref(orig_node);
+}
+
+/**
+ * batadv_get_vid - extract the VLAN identifier from skb if any
+ * @skb: the buffer containing the packet
+ * @header_len: length of the batman header preceding the ethernet header
+ *
+ * If the packet embedded in the skb is vlan tagged this function returns the
+ * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned.
+ */
+unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
+{
+ struct ethhdr *ethhdr = (struct ethhdr *)(skb->data + header_len);
+ struct vlan_ethhdr *vhdr;
+ unsigned short vid;
+
+ if (ethhdr->h_proto != htons(ETH_P_8021Q))
+ return BATADV_NO_FLAGS;
+
+ if (!pskb_may_pull(skb, header_len + VLAN_ETH_HLEN))
+ return BATADV_NO_FLAGS;
+
+ vhdr = (struct vlan_ethhdr *)(skb->data + header_len);
+ vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
+ vid |= BATADV_VLAN_HAS_TAG;
+
+ return vid;
+}
+
static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
{
struct batadv_algo_ops *bat_algo_ops;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 24675523930f..f94f287b8670 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -20,13 +20,13 @@
#ifndef _NET_BATMAN_ADV_MAIN_H_
#define _NET_BATMAN_ADV_MAIN_H_
-#define BATADV_DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \
- "Simon Wunderlich <siwu@hrz.tu-chemnitz.de>"
+#define BATADV_DRIVER_AUTHOR "Marek Lindner <mareklindner@neomailbox.ch>, " \
+ "Simon Wunderlich <sw@simonwunderlich.de>"
#define BATADV_DRIVER_DESC "B.A.T.M.A.N. advanced"
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2013.4.0"
+#define BATADV_SOURCE_VERSION "2013.5.0"
#endif
/* B.A.T.M.A.N. parameters */
@@ -86,7 +86,11 @@
/* numbers of originator to contact for any PUT/GET DHT operation */
#define BATADV_DAT_CANDIDATES_NUM 3
-#define BATADV_VIS_INTERVAL 5000 /* 5 seconds */
+/**
+ * BATADV_TQ_SIMILARITY_THRESHOLD - TQ points that a secondary metric can differ
+ * at most from the primary one in order to be still considered acceptable
+ */
+#define BATADV_TQ_SIMILARITY_THRESHOLD 50
/* how much worse secondary interfaces may be to be considered as bonding
* candidates
@@ -133,6 +137,15 @@ enum batadv_uev_type {
#define BATADV_GW_THRESHOLD 50
+/* Number of fragment chains for each orig_node */
+#define BATADV_FRAG_BUFFER_COUNT 8
+/* Maximum number of fragments for one packet */
+#define BATADV_FRAG_MAX_FRAGMENTS 16
+/* Maxumim size of each fragment */
+#define BATADV_FRAG_MAX_FRAG_SIZE 1400
+/* Time to keep fragments while waiting for rest of the fragments */
+#define BATADV_FRAG_TIMEOUT 10000
+
#define BATADV_DAT_CANDIDATE_NOT_FOUND 0
#define BATADV_DAT_CANDIDATE_ORIG 1
@@ -160,15 +173,9 @@ enum batadv_uev_type {
#include <net/rtnetlink.h>
#include <linux/jiffies.h>
#include <linux/seq_file.h>
-#include "types.h"
+#include <linux/if_vlan.h>
-/**
- * batadv_vlan_flags - flags for the four MSB of any vlan ID field
- * @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not
- */
-enum batadv_vlan_flags {
- BATADV_VLAN_HAS_TAG = BIT(15),
-};
+#include "types.h"
#define BATADV_PRINT_VID(vid) (vid & BATADV_VLAN_HAS_TAG ? \
(int)(vid & VLAN_VID_MASK) : -1)
@@ -184,6 +191,7 @@ void batadv_mesh_free(struct net_device *soft_iface);
int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);
struct batadv_hard_iface *
batadv_seq_print_text_primary_if_get(struct seq_file *seq);
+int batadv_max_header_len(void);
void batadv_skb_set_priority(struct sk_buff *skb, int offset);
int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype,
@@ -326,4 +334,40 @@ static inline uint64_t batadv_sum_counter(struct batadv_priv *bat_priv,
*/
#define BATADV_SKB_CB(__skb) ((struct batadv_skb_cb *)&((__skb)->cb[0]))
+void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
+ uint8_t type, uint8_t version,
+ void *tvlv_value, uint16_t tvlv_value_len);
+uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
+ unsigned char **packet_buff,
+ int *packet_buff_len,
+ int packet_min_len);
+void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
+ struct batadv_ogm_packet *batadv_ogm_packet,
+ struct batadv_orig_node *orig_node);
+void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
+ uint8_t type, uint8_t version);
+
+void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
+ void (*optr)(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig,
+ uint8_t flags,
+ void *tvlv_value,
+ uint16_t tvlv_value_len),
+ int (*uptr)(struct batadv_priv *bat_priv,
+ uint8_t *src, uint8_t *dst,
+ void *tvlv_value,
+ uint16_t tvlv_value_len),
+ uint8_t type, uint8_t version, uint8_t flags);
+void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
+ uint8_t type, uint8_t version);
+int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
+ bool ogm_source,
+ struct batadv_orig_node *orig_node,
+ uint8_t *src, uint8_t *dst,
+ void *tvlv_buff, uint16_t tvlv_buff_len);
+void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
+ uint8_t *dst, uint8_t type, uint8_t version,
+ void *tvlv_value, uint16_t tvlv_value_len);
+unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len);
+
#endif /* _NET_BATMAN_ADV_MAIN_H_ */
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index a487d46e0aec..351e199bc0af 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -35,6 +35,20 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if);
/**
+ * batadv_nc_init - one-time initialization for network coding
+ */
+int __init batadv_nc_init(void)
+{
+ int ret;
+
+ /* Register our packet type */
+ ret = batadv_recv_handler_register(BATADV_CODED,
+ batadv_nc_recv_coded_packet);
+
+ return ret;
+}
+
+/**
* batadv_nc_start_timer - initialise the nc periodic worker
* @bat_priv: the bat priv with all the soft interface information
*/
@@ -45,10 +59,63 @@ static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
}
/**
- * batadv_nc_init - initialise coding hash table and start house keeping
+ * batadv_nc_tvlv_container_update - update the network coding tvlv container
+ * after network coding setting change
* @bat_priv: the bat priv with all the soft interface information
*/
-int batadv_nc_init(struct batadv_priv *bat_priv)
+static void batadv_nc_tvlv_container_update(struct batadv_priv *bat_priv)
+{
+ char nc_mode;
+
+ nc_mode = atomic_read(&bat_priv->network_coding);
+
+ switch (nc_mode) {
+ case 0:
+ batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_NC, 1);
+ break;
+ case 1:
+ batadv_tvlv_container_register(bat_priv, BATADV_TVLV_NC, 1,
+ NULL, 0);
+ break;
+ }
+}
+
+/**
+ * batadv_nc_status_update - update the network coding tvlv container after
+ * network coding setting change
+ * @net_dev: the soft interface net device
+ */
+void batadv_nc_status_update(struct net_device *net_dev)
+{
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ batadv_nc_tvlv_container_update(bat_priv);
+}
+
+/**
+ * batadv_nc_tvlv_ogm_handler_v1 - process incoming nc tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the gateway data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig,
+ uint8_t flags,
+ void *tvlv_value,
+ uint16_t tvlv_value_len)
+{
+ if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
+ orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_NC;
+ else
+ orig->capabilities |= BATADV_ORIG_CAPA_HAS_NC;
+}
+
+/**
+ * batadv_nc_mesh_init - initialise coding hash table and start house keeping
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
{
bat_priv->nc.timestamp_fwd_flush = jiffies;
bat_priv->nc.timestamp_sniffed_purge = jiffies;
@@ -70,14 +137,13 @@ int batadv_nc_init(struct batadv_priv *bat_priv)
batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
&batadv_nc_decoding_hash_lock_class_key);
- /* Register our packet type */
- if (batadv_recv_handler_register(BATADV_CODED,
- batadv_nc_recv_coded_packet) < 0)
- goto err;
-
INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
batadv_nc_start_timer(bat_priv);
+ batadv_tvlv_handler_register(bat_priv, batadv_nc_tvlv_ogm_handler_v1,
+ NULL, BATADV_TVLV_NC, 1,
+ BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
+ batadv_nc_tvlv_container_update(bat_priv);
return 0;
err:
@@ -793,6 +859,10 @@ void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
if (!atomic_read(&bat_priv->network_coding))
goto out;
+ /* check if orig node is network coding enabled */
+ if (!(orig_node->capabilities & BATADV_ORIG_CAPA_HAS_NC))
+ goto out;
+
/* accept ogms from 'good' neighbors and single hop neighbors */
if (!batadv_can_nc_with_orig(bat_priv, orig_node, ogm_packet) &&
!is_single_hop_neigh)
@@ -933,7 +1003,7 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
struct batadv_nc_packet *nc_packet,
struct batadv_neigh_node *neigh_node)
{
- uint8_t tq_weighted_neigh, tq_weighted_coding;
+ uint8_t tq_weighted_neigh, tq_weighted_coding, tq_tmp;
struct sk_buff *skb_dest, *skb_src;
struct batadv_unicast_packet *packet1;
struct batadv_unicast_packet *packet2;
@@ -958,8 +1028,10 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
if (!router_coding)
goto out;
- tq_weighted_neigh = batadv_nc_random_weight_tq(router_neigh->tq_avg);
- tq_weighted_coding = batadv_nc_random_weight_tq(router_coding->tq_avg);
+ tq_tmp = batadv_nc_random_weight_tq(router_neigh->bat_iv.tq_avg);
+ tq_weighted_neigh = tq_tmp;
+ tq_tmp = batadv_nc_random_weight_tq(router_coding->bat_iv.tq_avg);
+ tq_weighted_coding = tq_tmp;
/* Select one destination for the MAC-header dst-field based on
* weighted TQ-values.
@@ -1721,12 +1793,13 @@ free_nc_packet:
}
/**
- * batadv_nc_free - clean up network coding memory
+ * batadv_nc_mesh_free - clean up network coding memory
* @bat_priv: the bat priv with all the soft interface information
*/
-void batadv_nc_free(struct batadv_priv *bat_priv)
+void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
{
- batadv_recv_handler_unregister(BATADV_CODED);
+ batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_NC, 1);
+ batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_NC, 1);
cancel_delayed_work_sync(&bat_priv->nc.work);
batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL);
diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h
index 85a4ec81ad50..d4fd315b5261 100644
--- a/net/batman-adv/network-coding.h
+++ b/net/batman-adv/network-coding.h
@@ -22,8 +22,10 @@
#ifdef CONFIG_BATMAN_ADV_NC
-int batadv_nc_init(struct batadv_priv *bat_priv);
-void batadv_nc_free(struct batadv_priv *bat_priv);
+void batadv_nc_status_update(struct net_device *net_dev);
+int batadv_nc_init(void);
+int batadv_nc_mesh_init(struct batadv_priv *bat_priv);
+void batadv_nc_mesh_free(struct batadv_priv *bat_priv);
void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
struct batadv_orig_node *orig_neigh_node,
@@ -46,12 +48,21 @@ int batadv_nc_init_debugfs(struct batadv_priv *bat_priv);
#else /* ifdef CONFIG_BATMAN_ADV_NC */
-static inline int batadv_nc_init(struct batadv_priv *bat_priv)
+static inline void batadv_nc_status_update(struct net_device *net_dev)
+{
+}
+
+static inline int batadv_nc_init(void)
+{
+ return 0;
+}
+
+static inline int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
{
return 0;
}
-static inline void batadv_nc_free(struct batadv_priv *bat_priv)
+static inline void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
{
return;
}
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index f50553a7de62..8ab14340d10f 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -25,10 +25,10 @@
#include "routing.h"
#include "gateway_client.h"
#include "hard-interface.h"
-#include "unicast.h"
#include "soft-interface.h"
#include "bridge_loop_avoidance.h"
#include "network-coding.h"
+#include "fragmentation.h"
/* hash class keys */
static struct lock_class_key batadv_orig_hash_lock_class_key;
@@ -36,7 +36,7 @@ static struct lock_class_key batadv_orig_hash_lock_class_key;
static void batadv_purge_orig(struct work_struct *work);
/* returns 1 if they are the same originator */
-static int batadv_compare_orig(const struct hlist_node *node, const void *data2)
+int batadv_compare_orig(const struct hlist_node *node, const void *data2)
{
const void *data1 = container_of(node, struct batadv_orig_node,
hash_entry);
@@ -44,6 +44,88 @@ static int batadv_compare_orig(const struct hlist_node *node, const void *data2)
return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
}
+/**
+ * batadv_orig_node_vlan_get - get an orig_node_vlan object
+ * @orig_node: the originator serving the VLAN
+ * @vid: the VLAN identifier
+ *
+ * Returns the vlan object identified by vid and belonging to orig_node or NULL
+ * if it does not exist.
+ */
+struct batadv_orig_node_vlan *
+batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
+ unsigned short vid)
+{
+ struct batadv_orig_node_vlan *vlan = NULL, *tmp;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
+ if (tmp->vid != vid)
+ continue;
+
+ if (!atomic_inc_not_zero(&tmp->refcount))
+ continue;
+
+ vlan = tmp;
+
+ break;
+ }
+ rcu_read_unlock();
+
+ return vlan;
+}
+
+/**
+ * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
+ * object
+ * @orig_node: the originator serving the VLAN
+ * @vid: the VLAN identifier
+ *
+ * Returns NULL in case of failure or the vlan object identified by vid and
+ * belonging to orig_node otherwise. The object is created and added to the list
+ * if it does not exist.
+ *
+ * The object is returned with refcounter increased by 1.
+ */
+struct batadv_orig_node_vlan *
+batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
+ unsigned short vid)
+{
+ struct batadv_orig_node_vlan *vlan;
+
+ spin_lock_bh(&orig_node->vlan_list_lock);
+
+ /* first look if an object for this vid already exists */
+ vlan = batadv_orig_node_vlan_get(orig_node, vid);
+ if (vlan)
+ goto out;
+
+ vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
+ if (!vlan)
+ goto out;
+
+ atomic_set(&vlan->refcount, 2);
+ vlan->vid = vid;
+
+ list_add_rcu(&vlan->list, &orig_node->vlan_list);
+
+out:
+ spin_unlock_bh(&orig_node->vlan_list_lock);
+
+ return vlan;
+}
+
+/**
+ * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
+ * the originator-vlan object
+ * @orig_vlan: the originator-vlan object to release
+ */
+void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
+{
+ if (atomic_dec_and_test(&orig_vlan->refcount))
+ kfree_rcu(orig_vlan, rcu);
+}
+
int batadv_originator_init(struct batadv_priv *bat_priv)
{
if (bat_priv->orig_hash)
@@ -90,11 +172,20 @@ batadv_orig_node_get_router(struct batadv_orig_node *orig_node)
return router;
}
+/**
+ * batadv_neigh_node_new - create and init a new neigh_node object
+ * @hard_iface: the interface where the neighbour is connected to
+ * @neigh_addr: the mac address of the neighbour interface
+ * @orig_node: originator object representing the neighbour
+ *
+ * Allocates a new neigh_node object and initialises all the generic fields.
+ * Returns the new object or NULL on failure.
+ */
struct batadv_neigh_node *
batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
- const uint8_t *neigh_addr)
+ const uint8_t *neigh_addr,
+ struct batadv_orig_node *orig_node)
{
- struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct batadv_neigh_node *neigh_node;
neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
@@ -104,15 +195,14 @@ batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
INIT_HLIST_NODE(&neigh_node->list);
memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
- spin_lock_init(&neigh_node->lq_update_lock);
+ neigh_node->if_incoming = hard_iface;
+ neigh_node->orig_node = orig_node;
+
+ INIT_LIST_HEAD(&neigh_node->bonding_list);
/* extra reference for return */
atomic_set(&neigh_node->refcount, 2);
- batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "Creating new neighbor %pM on interface %s\n", neigh_addr,
- hard_iface->net_dev->name);
-
out:
return neigh_node;
}
@@ -146,13 +236,15 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
/* Free nc_nodes */
batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
- batadv_frag_list_free(&orig_node->frag_list);
- batadv_tt_global_del_orig(orig_node->bat_priv, orig_node,
+ batadv_frag_purge_orig(orig_node, NULL);
+
+ batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
"originator timed out");
+ if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
+ orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
+
kfree(orig_node->tt_buff);
- kfree(orig_node->bcast_own);
- kfree(orig_node->bcast_own_sum);
kfree(orig_node);
}
@@ -210,20 +302,22 @@ void batadv_originator_free(struct batadv_priv *bat_priv)
batadv_hash_destroy(hash);
}
-/* this function finds or creates an originator entry for the given
- * address if it does not exits
+/**
+ * batadv_orig_node_new - creates a new orig_node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the originator
+ *
+ * Creates a new originator object and initialise all the generic fields.
+ * The new object is not added to the originator list.
+ * Returns the newly created object or NULL on failure.
*/
-struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
+struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
const uint8_t *addr)
{
struct batadv_orig_node *orig_node;
- int size;
- int hash_added;
+ struct batadv_orig_node_vlan *vlan;
unsigned long reset_time;
-
- orig_node = batadv_orig_hash_find(bat_priv, addr);
- if (orig_node)
- return orig_node;
+ int i;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Creating new originator: %pM\n", addr);
@@ -234,10 +328,12 @@ struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
INIT_HLIST_HEAD(&orig_node->neigh_list);
INIT_LIST_HEAD(&orig_node->bond_list);
- spin_lock_init(&orig_node->ogm_cnt_lock);
+ INIT_LIST_HEAD(&orig_node->vlan_list);
spin_lock_init(&orig_node->bcast_seqno_lock);
spin_lock_init(&orig_node->neigh_list_lock);
spin_lock_init(&orig_node->tt_buff_lock);
+ spin_lock_init(&orig_node->tt_lock);
+ spin_lock_init(&orig_node->vlan_list_lock);
batadv_nc_init_orig(orig_node);
@@ -249,43 +345,32 @@ struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
memcpy(orig_node->orig, addr, ETH_ALEN);
batadv_dat_init_orig_node_addr(orig_node);
orig_node->router = NULL;
- orig_node->tt_crc = 0;
atomic_set(&orig_node->last_ttvn, 0);
orig_node->tt_buff = NULL;
orig_node->tt_buff_len = 0;
- atomic_set(&orig_node->tt_size, 0);
reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
orig_node->bcast_seqno_reset = reset_time;
orig_node->batman_seqno_reset = reset_time;
atomic_set(&orig_node->bond_candidates, 0);
- size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS;
-
- orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
- if (!orig_node->bcast_own)
+ /* create a vlan object for the "untagged" LAN */
+ vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
+ if (!vlan)
goto free_orig_node;
+ /* batadv_orig_node_vlan_new() increases the refcounter.
+ * Immediately release vlan since it is not needed anymore in this
+ * context
+ */
+ batadv_orig_node_vlan_free_ref(vlan);
- size = bat_priv->num_ifaces * sizeof(uint8_t);
- orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
-
- INIT_LIST_HEAD(&orig_node->frag_list);
- orig_node->last_frag_packet = 0;
-
- if (!orig_node->bcast_own_sum)
- goto free_bcast_own;
-
- hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
- batadv_choose_orig, orig_node,
- &orig_node->hash_entry);
- if (hash_added != 0)
- goto free_bcast_own_sum;
+ for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
+ INIT_HLIST_HEAD(&orig_node->fragments[i].head);
+ spin_lock_init(&orig_node->fragments[i].lock);
+ orig_node->fragments[i].size = 0;
+ }
return orig_node;
-free_bcast_own_sum:
- kfree(orig_node->bcast_own_sum);
-free_bcast_own:
- kfree(orig_node->bcast_own);
free_orig_node:
kfree(orig_node);
return NULL;
@@ -294,15 +379,16 @@ free_orig_node:
static bool
batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
- struct batadv_neigh_node **best_neigh_node)
+ struct batadv_neigh_node **best_neigh)
{
+ struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
struct hlist_node *node_tmp;
struct batadv_neigh_node *neigh_node;
bool neigh_purged = false;
unsigned long last_seen;
struct batadv_hard_iface *if_incoming;
- *best_neigh_node = NULL;
+ *best_neigh = NULL;
spin_lock_bh(&orig_node->neigh_list_lock);
@@ -335,9 +421,12 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
batadv_bonding_candidate_del(orig_node, neigh_node);
batadv_neigh_node_free_ref(neigh_node);
} else {
- if ((!*best_neigh_node) ||
- (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
- *best_neigh_node = neigh_node;
+ /* store the best_neighbour if this is the first
+ * iteration or if a better neighbor has been found
+ */
+ if (!*best_neigh ||
+ bao->bat_neigh_cmp(neigh_node, *best_neigh) > 0)
+ *best_neigh = neigh_node;
}
}
@@ -388,17 +477,14 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv)
hlist_for_each_entry_safe(orig_node, node_tmp,
head, hash_entry) {
if (batadv_purge_orig_node(bat_priv, orig_node)) {
- if (orig_node->gw_flags)
- batadv_gw_node_delete(bat_priv,
- orig_node);
+ batadv_gw_node_delete(bat_priv, orig_node);
hlist_del_rcu(&orig_node->hash_entry);
batadv_orig_node_free_ref(orig_node);
continue;
}
- if (batadv_has_timed_out(orig_node->last_frag_packet,
- BATADV_FRAG_TIMEOUT))
- batadv_frag_list_free(&orig_node->frag_list);
+ batadv_frag_purge_orig(orig_node,
+ batadv_frag_check_entry);
}
spin_unlock_bh(list_lock);
}
@@ -429,100 +515,26 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
struct batadv_priv *bat_priv = netdev_priv(net_dev);
- struct batadv_hashtable *hash = bat_priv->orig_hash;
- struct hlist_head *head;
struct batadv_hard_iface *primary_if;
- struct batadv_orig_node *orig_node;
- struct batadv_neigh_node *neigh_node, *neigh_node_tmp;
- int batman_count = 0;
- int last_seen_secs;
- int last_seen_msecs;
- unsigned long last_seen_jiffies;
- uint32_t i;
primary_if = batadv_seq_print_text_primary_if_get(seq);
if (!primary_if)
- goto out;
+ return 0;
- seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
+ seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
BATADV_SOURCE_VERSION, primary_if->net_dev->name,
- primary_if->net_dev->dev_addr, net_dev->name);
- seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
- "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
- "Nexthop", "outgoingIF", "Potential nexthops");
-
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
- neigh_node = batadv_orig_node_get_router(orig_node);
- if (!neigh_node)
- continue;
-
- if (neigh_node->tq_avg == 0)
- goto next;
-
- last_seen_jiffies = jiffies - orig_node->last_seen;
- last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
- last_seen_secs = last_seen_msecs / 1000;
- last_seen_msecs = last_seen_msecs % 1000;
-
- seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
- orig_node->orig, last_seen_secs,
- last_seen_msecs, neigh_node->tq_avg,
- neigh_node->addr,
- neigh_node->if_incoming->net_dev->name);
-
- hlist_for_each_entry_rcu(neigh_node_tmp,
- &orig_node->neigh_list, list) {
- seq_printf(seq, " %pM (%3i)",
- neigh_node_tmp->addr,
- neigh_node_tmp->tq_avg);
- }
+ primary_if->net_dev->dev_addr, net_dev->name,
+ bat_priv->bat_algo_ops->name);
- seq_puts(seq, "\n");
- batman_count++;
+ batadv_hardif_free_ref(primary_if);
-next:
- batadv_neigh_node_free_ref(neigh_node);
- }
- rcu_read_unlock();
+ if (!bat_priv->bat_algo_ops->bat_orig_print) {
+ seq_puts(seq,
+ "No printing function for this routing protocol\n");
+ return 0;
}
- if (batman_count == 0)
- seq_puts(seq, "No batman nodes in range ...\n");
-
-out:
- if (primary_if)
- batadv_hardif_free_ref(primary_if);
- return 0;
-}
-
-static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node,
- int max_if_num)
-{
- void *data_ptr;
- size_t data_size, old_size;
-
- data_size = max_if_num * sizeof(unsigned long) * BATADV_NUM_WORDS;
- old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS;
- data_ptr = kmalloc(data_size, GFP_ATOMIC);
- if (!data_ptr)
- return -ENOMEM;
-
- memcpy(data_ptr, orig_node->bcast_own, old_size);
- kfree(orig_node->bcast_own);
- orig_node->bcast_own = data_ptr;
-
- data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
- if (!data_ptr)
- return -ENOMEM;
-
- memcpy(data_ptr, orig_node->bcast_own_sum,
- (max_if_num - 1) * sizeof(uint8_t));
- kfree(orig_node->bcast_own_sum);
- orig_node->bcast_own_sum = data_ptr;
+ bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq);
return 0;
}
@@ -531,6 +543,7 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
int max_if_num)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_head *head;
struct batadv_orig_node *orig_node;
@@ -545,10 +558,10 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
- spin_lock_bh(&orig_node->ogm_cnt_lock);
- ret = batadv_orig_node_add_if(orig_node, max_if_num);
- spin_unlock_bh(&orig_node->ogm_cnt_lock);
-
+ ret = 0;
+ if (bao->bat_orig_add_if)
+ ret = bao->bat_orig_add_if(orig_node,
+ max_if_num);
if (ret == -ENOMEM)
goto err;
}
@@ -562,54 +575,6 @@ err:
return -ENOMEM;
}
-static int batadv_orig_node_del_if(struct batadv_orig_node *orig_node,
- int max_if_num, int del_if_num)
-{
- void *data_ptr = NULL;
- int chunk_size;
-
- /* last interface was removed */
- if (max_if_num == 0)
- goto free_bcast_own;
-
- chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
- data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
- if (!data_ptr)
- return -ENOMEM;
-
- /* copy first part */
- memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
-
- /* copy second part */
- memcpy((char *)data_ptr + del_if_num * chunk_size,
- orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
- (max_if_num - del_if_num) * chunk_size);
-
-free_bcast_own:
- kfree(orig_node->bcast_own);
- orig_node->bcast_own = data_ptr;
-
- if (max_if_num == 0)
- goto free_own_sum;
-
- data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
- if (!data_ptr)
- return -ENOMEM;
-
- memcpy(data_ptr, orig_node->bcast_own_sum,
- del_if_num * sizeof(uint8_t));
-
- memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
- orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
- (max_if_num - del_if_num) * sizeof(uint8_t));
-
-free_own_sum:
- kfree(orig_node->bcast_own_sum);
- orig_node->bcast_own_sum = data_ptr;
-
- return 0;
-}
-
int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
int max_if_num)
{
@@ -618,6 +583,7 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
struct hlist_head *head;
struct batadv_hard_iface *hard_iface_tmp;
struct batadv_orig_node *orig_node;
+ struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
uint32_t i;
int ret;
@@ -629,11 +595,11 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
- spin_lock_bh(&orig_node->ogm_cnt_lock);
- ret = batadv_orig_node_del_if(orig_node, max_if_num,
- hard_iface->if_num);
- spin_unlock_bh(&orig_node->ogm_cnt_lock);
-
+ ret = 0;
+ if (bao->bat_orig_del_if)
+ ret = bao->bat_orig_del_if(orig_node,
+ max_if_num,
+ hard_iface->if_num);
if (ret == -ENOMEM)
goto err;
}
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 7887b84a9af4..6f77d808a916 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -22,16 +22,18 @@
#include "hash.h"
+int batadv_compare_orig(const struct hlist_node *node, const void *data2);
int batadv_originator_init(struct batadv_priv *bat_priv);
void batadv_originator_free(struct batadv_priv *bat_priv);
void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
-struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
+struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
const uint8_t *addr);
struct batadv_neigh_node *
batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
- const uint8_t *neigh_addr);
+ const uint8_t *neigh_addr,
+ struct batadv_orig_node *orig_node);
void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node);
struct batadv_neigh_node *
batadv_orig_node_get_router(struct batadv_orig_node *orig_node);
@@ -40,6 +42,13 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
int max_if_num);
int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
int max_if_num);
+struct batadv_orig_node_vlan *
+batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
+ unsigned short vid);
+struct batadv_orig_node_vlan *
+batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
+ unsigned short vid);
+void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan);
/* hashfunction to choose an entry in a hash table of given size
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index a51ccfc39da4..207459b62966 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -20,17 +20,34 @@
#ifndef _NET_BATMAN_ADV_PACKET_H_
#define _NET_BATMAN_ADV_PACKET_H_
+/**
+ * enum batadv_packettype - types for batman-adv encapsulated packets
+ * @BATADV_IV_OGM: originator messages for B.A.T.M.A.N. IV
+ * @BATADV_BCAST: broadcast packets carrying broadcast payload
+ * @BATADV_CODED: network coded packets
+ *
+ * @BATADV_UNICAST: unicast packets carrying unicast payload traffic
+ * @BATADV_UNICAST_FRAG: unicast packets carrying a fragment of the original
+ * payload packet
+ * @BATADV_UNICAST_4ADDR: unicast packet including the originator address of
+ * the sender
+ * @BATADV_ICMP: unicast packet like IP ICMP used for ping or traceroute
+ * @BATADV_UNICAST_TVLV: unicast packet carrying TVLV containers
+ */
enum batadv_packettype {
- BATADV_IV_OGM = 0x01,
- BATADV_ICMP = 0x02,
- BATADV_UNICAST = 0x03,
- BATADV_BCAST = 0x04,
- BATADV_VIS = 0x05,
- BATADV_UNICAST_FRAG = 0x06,
- BATADV_TT_QUERY = 0x07,
- BATADV_ROAM_ADV = 0x08,
- BATADV_UNICAST_4ADDR = 0x09,
- BATADV_CODED = 0x0a,
+ /* 0x00 - 0x3f: local packets or special rules for handling */
+ BATADV_IV_OGM = 0x00,
+ BATADV_BCAST = 0x01,
+ BATADV_CODED = 0x02,
+ /* 0x40 - 0x7f: unicast */
+#define BATADV_UNICAST_MIN 0x40
+ BATADV_UNICAST = 0x40,
+ BATADV_UNICAST_FRAG = 0x41,
+ BATADV_UNICAST_4ADDR = 0x42,
+ BATADV_ICMP = 0x43,
+ BATADV_UNICAST_TVLV = 0x44,
+#define BATADV_UNICAST_MAX 0x7f
+ /* 0x80 - 0xff: reserved */
};
/**
@@ -48,13 +65,21 @@ enum batadv_subtype {
};
/* this file is included by batctl which needs these defines */
-#define BATADV_COMPAT_VERSION 14
+#define BATADV_COMPAT_VERSION 15
+/**
+ * enum batadv_iv_flags - flags used in B.A.T.M.A.N. IV OGM packets
+ * @BATADV_NOT_BEST_NEXT_HOP: flag is set when ogm packet is forwarded and was
+ * previously received from someone else than the best neighbor.
+ * @BATADV_PRIMARIES_FIRST_HOP: flag is set when the primary interface address
+ * is used, and the packet travels its first hop.
+ * @BATADV_DIRECTLINK: flag is for the first hop or if rebroadcasted from a
+ * one hop neighbor on the interface where it was originally received.
+ */
enum batadv_iv_flags {
- BATADV_NOT_BEST_NEXT_HOP = BIT(3),
- BATADV_PRIMARIES_FIRST_HOP = BIT(4),
- BATADV_VIS_SERVER = BIT(5),
- BATADV_DIRECTLINK = BIT(6),
+ BATADV_NOT_BEST_NEXT_HOP = BIT(0),
+ BATADV_PRIMARIES_FIRST_HOP = BIT(1),
+ BATADV_DIRECTLINK = BIT(2),
};
/* ICMP message types */
@@ -66,43 +91,44 @@ enum batadv_icmp_packettype {
BATADV_PARAMETER_PROBLEM = 12,
};
-/* vis defines */
-enum batadv_vis_packettype {
- BATADV_VIS_TYPE_SERVER_SYNC = 0,
- BATADV_VIS_TYPE_CLIENT_UPDATE = 1,
-};
-
-/* fragmentation defines */
-enum batadv_unicast_frag_flags {
- BATADV_UNI_FRAG_HEAD = BIT(0),
- BATADV_UNI_FRAG_LARGETAIL = BIT(1),
-};
+/* tt data subtypes */
+#define BATADV_TT_DATA_TYPE_MASK 0x0F
-/* TT_QUERY subtypes */
-#define BATADV_TT_QUERY_TYPE_MASK 0x3
-
-enum batadv_tt_query_packettype {
- BATADV_TT_REQUEST = 0,
- BATADV_TT_RESPONSE = 1,
-};
-
-/* TT_QUERY flags */
-enum batadv_tt_query_flags {
- BATADV_TT_FULL_TABLE = BIT(2),
+/**
+ * enum batadv_tt_data_flags - flags for tt data tvlv
+ * @BATADV_TT_OGM_DIFF: TT diff propagated through OGM
+ * @BATADV_TT_REQUEST: TT request message
+ * @BATADV_TT_RESPONSE: TT response message
+ * @BATADV_TT_FULL_TABLE: contains full table to replace existing table
+ */
+enum batadv_tt_data_flags {
+ BATADV_TT_OGM_DIFF = BIT(0),
+ BATADV_TT_REQUEST = BIT(1),
+ BATADV_TT_RESPONSE = BIT(2),
+ BATADV_TT_FULL_TABLE = BIT(4),
};
/* BATADV_TT_CLIENT flags.
* Flags from BIT(0) to BIT(7) are sent on the wire, while flags from BIT(8) to
- * BIT(15) are used for local computation only
+ * BIT(15) are used for local computation only.
+ * Flags from BIT(4) to BIT(7) are kept in sync with the rest of the network.
*/
enum batadv_tt_client_flags {
BATADV_TT_CLIENT_DEL = BIT(0),
BATADV_TT_CLIENT_ROAM = BIT(1),
- BATADV_TT_CLIENT_WIFI = BIT(2),
- BATADV_TT_CLIENT_TEMP = BIT(3),
+ BATADV_TT_CLIENT_WIFI = BIT(4),
BATADV_TT_CLIENT_NOPURGE = BIT(8),
BATADV_TT_CLIENT_NEW = BIT(9),
BATADV_TT_CLIENT_PENDING = BIT(10),
+ BATADV_TT_CLIENT_TEMP = BIT(11),
+};
+
+/**
+ * batadv_vlan_flags - flags for the four MSB of any vlan ID field
+ * @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not
+ */
+enum batadv_vlan_flags {
+ BATADV_VLAN_HAS_TAG = BIT(15),
};
/* claim frame types for the bridge loop avoidance */
@@ -113,6 +139,22 @@ enum batadv_bla_claimframe {
BATADV_CLAIM_TYPE_REQUEST = 0x03,
};
+/**
+ * enum batadv_tvlv_type - tvlv type definitions
+ * @BATADV_TVLV_GW: gateway tvlv
+ * @BATADV_TVLV_DAT: distributed arp table tvlv
+ * @BATADV_TVLV_NC: network coding tvlv
+ * @BATADV_TVLV_TT: translation table tvlv
+ * @BATADV_TVLV_ROAM: roaming advertisement tvlv
+ */
+enum batadv_tvlv_type {
+ BATADV_TVLV_GW = 0x01,
+ BATADV_TVLV_DAT = 0x02,
+ BATADV_TVLV_NC = 0x03,
+ BATADV_TVLV_TT = 0x04,
+ BATADV_TVLV_ROAM = 0x05,
+};
+
/* the destination hardware field in the ARP frame is used to
* transport the claim type and the group id
*/
@@ -131,47 +173,74 @@ struct batadv_header {
*/
};
+/**
+ * struct batadv_ogm_packet - ogm (routing protocol) packet
+ * @header: common batman packet header
+ * @flags: contains routing relevant flags - see enum batadv_iv_flags
+ * @tvlv_len: length of tvlv data following the ogm header
+ */
struct batadv_ogm_packet {
struct batadv_header header;
- uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
+ uint8_t flags;
__be32 seqno;
uint8_t orig[ETH_ALEN];
uint8_t prev_sender[ETH_ALEN];
- uint8_t gw_flags; /* flags related to gateway class */
+ uint8_t reserved;
uint8_t tq;
- uint8_t tt_num_changes;
- uint8_t ttvn; /* translation table version number */
- __be16 tt_crc;
-} __packed;
+ __be16 tvlv_len;
+ /* __packed is not needed as the struct size is divisible by 4,
+ * and the largest data type in this struct has a size of 4.
+ */
+};
#define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
-struct batadv_icmp_packet {
+/**
+ * batadv_icmp_header - common ICMP header
+ * @header: common batman header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
+ */
+struct batadv_icmp_header {
struct batadv_header header;
uint8_t msg_type; /* see ICMP message types above */
uint8_t dst[ETH_ALEN];
uint8_t orig[ETH_ALEN];
- __be16 seqno;
uint8_t uid;
+};
+
+/**
+ * batadv_icmp_packet - ICMP packet
+ * @icmph: common ICMP header
+ * @reserved: not used - useful for alignment
+ * @seqno: ICMP sequence number
+ */
+struct batadv_icmp_packet {
+ struct batadv_icmp_header icmph;
uint8_t reserved;
+ __be16 seqno;
};
#define BATADV_RR_LEN 16
-/* icmp_packet_rr must start with all fields from imcp_packet
- * as this is assumed by code that handles ICMP packets
+/**
+ * batadv_icmp_packet_rr - ICMP RouteRecord packet
+ * @icmph: common ICMP header
+ * @rr_cur: number of entries the rr array
+ * @seqno: ICMP sequence number
+ * @rr: route record array
*/
struct batadv_icmp_packet_rr {
- struct batadv_header header;
- uint8_t msg_type; /* see ICMP message types above */
- uint8_t dst[ETH_ALEN];
- uint8_t orig[ETH_ALEN];
- __be16 seqno;
- uint8_t uid;
+ struct batadv_icmp_header icmph;
uint8_t rr_cur;
+ __be16 seqno;
uint8_t rr[BATADV_RR_LEN][ETH_ALEN];
};
+#define BATADV_ICMP_MAX_PACKET_SIZE sizeof(struct batadv_icmp_packet_rr)
+
/* All packet headers in front of an ethernet header have to be completely
* divisible by 2 but not by 4 to make the payload after the ethernet
* header again 4 bytes boundary aligned.
@@ -209,15 +278,32 @@ struct batadv_unicast_4addr_packet {
*/
};
-struct batadv_unicast_frag_packet {
- struct batadv_header header;
- uint8_t ttvn; /* destination translation table version number */
- uint8_t dest[ETH_ALEN];
- uint8_t flags;
- uint8_t align;
- uint8_t orig[ETH_ALEN];
- __be16 seqno;
-} __packed;
+/**
+ * struct batadv_frag_packet - fragmented packet
+ * @header: common batman packet header with type, compatversion, and ttl
+ * @dest: final destination used when routing fragments
+ * @orig: originator of the fragment used when merging the packet
+ * @no: fragment number within this sequence
+ * @reserved: reserved byte for alignment
+ * @seqno: sequence identification
+ * @total_size: size of the merged packet
+ */
+struct batadv_frag_packet {
+ struct batadv_header header;
+#if defined(__BIG_ENDIAN_BITFIELD)
+ uint8_t no:4;
+ uint8_t reserved:4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ uint8_t reserved:4;
+ uint8_t no:4;
+#else
+#error "unknown bitfield endianess"
+#endif
+ uint8_t dest[ETH_ALEN];
+ uint8_t orig[ETH_ALEN];
+ __be16 seqno;
+ __be16 total_size;
+};
struct batadv_bcast_packet {
struct batadv_header header;
@@ -231,54 +317,6 @@ struct batadv_bcast_packet {
#pragma pack()
-struct batadv_vis_packet {
- struct batadv_header header;
- uint8_t vis_type; /* which type of vis-participant sent this? */
- __be32 seqno; /* sequence number */
- uint8_t entries; /* number of entries behind this struct */
- uint8_t reserved;
- uint8_t vis_orig[ETH_ALEN]; /* originator reporting its neighbors */
- uint8_t target_orig[ETH_ALEN]; /* who should receive this packet */
- uint8_t sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */
-};
-
-struct batadv_tt_query_packet {
- struct batadv_header header;
- /* the flag field is a combination of:
- * - TT_REQUEST or TT_RESPONSE
- * - TT_FULL_TABLE
- */
- uint8_t flags;
- uint8_t dst[ETH_ALEN];
- uint8_t src[ETH_ALEN];
- /* the ttvn field is:
- * if TT_REQUEST: ttvn that triggered the
- * request
- * if TT_RESPONSE: new ttvn for the src
- * orig_node
- */
- uint8_t ttvn;
- /* tt_data field is:
- * if TT_REQUEST: crc associated with the
- * ttvn
- * if TT_RESPONSE: table_size
- */
- __be16 tt_data;
-} __packed;
-
-struct batadv_roam_adv_packet {
- struct batadv_header header;
- uint8_t reserved;
- uint8_t dst[ETH_ALEN];
- uint8_t src[ETH_ALEN];
- uint8_t client[ETH_ALEN];
-} __packed;
-
-struct batadv_tt_change {
- uint8_t flags;
- uint8_t addr[ETH_ALEN];
-} __packed;
-
/**
* struct batadv_coded_packet - network coded packet
* @header: common batman packet header and ttl of first included packet
@@ -311,4 +349,96 @@ struct batadv_coded_packet {
__be16 coded_len;
};
+/**
+ * struct batadv_unicast_tvlv - generic unicast packet with tvlv payload
+ * @header: common batman packet header
+ * @reserved: reserved field (for packet alignment)
+ * @src: address of the source
+ * @dst: address of the destination
+ * @tvlv_len: length of tvlv data following the unicast tvlv header
+ * @align: 2 bytes to align the header to a 4 byte boundry
+ */
+struct batadv_unicast_tvlv_packet {
+ struct batadv_header header;
+ uint8_t reserved;
+ uint8_t dst[ETH_ALEN];
+ uint8_t src[ETH_ALEN];
+ __be16 tvlv_len;
+ uint16_t align;
+};
+
+/**
+ * struct batadv_tvlv_hdr - base tvlv header struct
+ * @type: tvlv container type (see batadv_tvlv_type)
+ * @version: tvlv container version
+ * @len: tvlv container length
+ */
+struct batadv_tvlv_hdr {
+ uint8_t type;
+ uint8_t version;
+ __be16 len;
+};
+
+/**
+ * struct batadv_tvlv_gateway_data - gateway data propagated through gw tvlv
+ * container
+ * @bandwidth_down: advertised uplink download bandwidth
+ * @bandwidth_up: advertised uplink upload bandwidth
+ */
+struct batadv_tvlv_gateway_data {
+ __be32 bandwidth_down;
+ __be32 bandwidth_up;
+};
+
+/**
+ * struct batadv_tvlv_tt_data - tt data propagated through the tt tvlv container
+ * @flags: translation table flags (see batadv_tt_data_flags)
+ * @ttvn: translation table version number
+ * @vlan_num: number of announced VLANs. In the TVLV this struct is followed by
+ * one batadv_tvlv_tt_vlan_data object per announced vlan
+ */
+struct batadv_tvlv_tt_data {
+ uint8_t flags;
+ uint8_t ttvn;
+ __be16 num_vlan;
+};
+
+/**
+ * struct batadv_tvlv_tt_vlan_data - vlan specific tt data propagated through
+ * the tt tvlv container
+ * @crc: crc32 checksum of the entries belonging to this vlan
+ * @vid: vlan identifier
+ * @reserved: unused, useful for alignment purposes
+ */
+struct batadv_tvlv_tt_vlan_data {
+ __be32 crc;
+ __be16 vid;
+ uint16_t reserved;
+};
+
+/**
+ * struct batadv_tvlv_tt_change - translation table diff data
+ * @flags: status indicators concerning the non-mesh client (see
+ * batadv_tt_client_flags)
+ * @reserved: reserved field
+ * @addr: mac address of non-mesh client that triggered this tt change
+ * @vid: VLAN identifier
+ */
+struct batadv_tvlv_tt_change {
+ uint8_t flags;
+ uint8_t reserved;
+ uint8_t addr[ETH_ALEN];
+ __be16 vid;
+};
+
+/**
+ * struct batadv_tvlv_roam_adv - roaming advertisement
+ * @client: mac address of roaming client
+ * @vid: VLAN identifier
+ */
+struct batadv_tvlv_roam_adv {
+ uint8_t client[ETH_ALEN];
+ __be16 vid;
+};
+
#endif /* _NET_BATMAN_ADV_PACKET_H_ */
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 0439395d7ba5..d4114d775ad6 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -25,11 +25,12 @@
#include "icmp_socket.h"
#include "translation-table.h"
#include "originator.h"
-#include "vis.h"
-#include "unicast.h"
#include "bridge_loop_avoidance.h"
#include "distributed-arp-table.h"
#include "network-coding.h"
+#include "fragmentation.h"
+
+#include <linux/if_vlan.h>
static int batadv_route_unicast_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if);
@@ -46,7 +47,7 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
if ((curr_router) && (!neigh_node)) {
batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
"Deleting route towards: %pM\n", orig_node->orig);
- batadv_tt_global_del_orig(bat_priv, orig_node,
+ batadv_tt_global_del_orig(bat_priv, orig_node, -1,
"Deleted route towards originator");
/* route added */
@@ -114,9 +115,19 @@ out:
return;
}
-void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
+/**
+ * batadv_bonding_candidate_add - consider a new link for bonding mode towards
+ * the given originator
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: the target node
+ * @neigh_node: the neighbor representing the new link to consider for bonding
+ * mode
+ */
+void batadv_bonding_candidate_add(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
struct batadv_neigh_node *neigh_node)
{
+ struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
struct batadv_neigh_node *tmp_neigh_node, *router = NULL;
uint8_t interference_candidate = 0;
@@ -131,8 +142,9 @@ void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
if (!router)
goto candidate_del;
+
/* ... and is good enough to be considered */
- if (neigh_node->tq_avg < router->tq_avg - BATADV_BONDING_TQ_THRESHOLD)
+ if (bao->bat_neigh_is_equiv_or_better(neigh_node, router))
goto candidate_del;
/* check if we have another candidate with the same mac address or
@@ -248,46 +260,65 @@ bool batadv_check_management_packet(struct sk_buff *skb,
return true;
}
+/**
+ * batadv_recv_my_icmp_packet - receive an icmp packet locally
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: icmp packet to process
+ *
+ * Returns NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * otherwise.
+ */
static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
- struct sk_buff *skb, size_t icmp_len)
+ struct sk_buff *skb)
{
struct batadv_hard_iface *primary_if = NULL;
struct batadv_orig_node *orig_node = NULL;
- struct batadv_icmp_packet_rr *icmp_packet;
- int ret = NET_RX_DROP;
+ struct batadv_icmp_header *icmph;
+ int res, ret = NET_RX_DROP;
- icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
+ icmph = (struct batadv_icmp_header *)skb->data;
- /* add data to device queue */
- if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
- batadv_socket_receive_packet(icmp_packet, icmp_len);
- goto out;
- }
+ switch (icmph->msg_type) {
+ case BATADV_ECHO_REPLY:
+ case BATADV_DESTINATION_UNREACHABLE:
+ case BATADV_TTL_EXCEEDED:
+ /* receive the packet */
+ if (skb_linearize(skb) < 0)
+ break;
- primary_if = batadv_primary_if_get_selected(bat_priv);
- if (!primary_if)
- goto out;
+ batadv_socket_receive_packet(icmph, skb->len);
+ break;
+ case BATADV_ECHO_REQUEST:
+ /* answer echo request (ping) */
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
- /* answer echo request (ping) */
- /* get routing information */
- orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
- if (!orig_node)
- goto out;
+ /* get routing information */
+ orig_node = batadv_orig_hash_find(bat_priv, icmph->orig);
+ if (!orig_node)
+ goto out;
- /* create a copy of the skb, if needed, to modify it. */
- if (skb_cow(skb, ETH_HLEN) < 0)
- goto out;
+ /* create a copy of the skb, if needed, to modify it. */
+ if (skb_cow(skb, ETH_HLEN) < 0)
+ goto out;
- icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
+ icmph = (struct batadv_icmp_header *)skb->data;
- memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
- memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
- icmp_packet->msg_type = BATADV_ECHO_REPLY;
- icmp_packet->header.ttl = BATADV_TTL;
+ memcpy(icmph->dst, icmph->orig, ETH_ALEN);
+ memcpy(icmph->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
+ icmph->msg_type = BATADV_ECHO_REPLY;
+ icmph->header.ttl = BATADV_TTL;
- if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
- ret = NET_RX_SUCCESS;
+ res = batadv_send_skb_to_orig(skb, orig_node, NULL);
+ if (res != NET_XMIT_DROP)
+ ret = NET_RX_SUCCESS;
+ break;
+ default:
+ /* drop unknown type */
+ goto out;
+ }
out:
if (primary_if)
batadv_hardif_free_ref(primary_if);
@@ -307,9 +338,9 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
icmp_packet = (struct batadv_icmp_packet *)skb->data;
/* send TTL exceeded if packet is an echo request (traceroute) */
- if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
+ if (icmp_packet->icmph.msg_type != BATADV_ECHO_REQUEST) {
pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
- icmp_packet->orig, icmp_packet->dst);
+ icmp_packet->icmph.orig, icmp_packet->icmph.dst);
goto out;
}
@@ -318,7 +349,7 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
goto out;
/* get routing information */
- orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
+ orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->icmph.orig);
if (!orig_node)
goto out;
@@ -328,10 +359,11 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
icmp_packet = (struct batadv_icmp_packet *)skb->data;
- memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
- memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
- icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
- icmp_packet->header.ttl = BATADV_TTL;
+ memcpy(icmp_packet->icmph.dst, icmp_packet->icmph.orig, ETH_ALEN);
+ memcpy(icmp_packet->icmph.orig, primary_if->net_dev->dev_addr,
+ ETH_ALEN);
+ icmp_packet->icmph.msg_type = BATADV_TTL_EXCEEDED;
+ icmp_packet->icmph.header.ttl = BATADV_TTL;
if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
ret = NET_RX_SUCCESS;
@@ -349,16 +381,13 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
{
struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct batadv_icmp_packet_rr *icmp_packet;
+ struct batadv_icmp_header *icmph;
+ struct batadv_icmp_packet_rr *icmp_packet_rr;
struct ethhdr *ethhdr;
struct batadv_orig_node *orig_node = NULL;
- int hdr_size = sizeof(struct batadv_icmp_packet);
+ int hdr_size = sizeof(struct batadv_icmp_header);
int ret = NET_RX_DROP;
- /* we truncate all incoming icmp packets if they don't match our size */
- if (skb->len >= sizeof(struct batadv_icmp_packet_rr))
- hdr_size = sizeof(struct batadv_icmp_packet_rr);
-
/* drop packet if it has not necessary minimum size */
if (unlikely(!pskb_may_pull(skb, hdr_size)))
goto out;
@@ -377,26 +406,39 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
goto out;
- icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
+ icmph = (struct batadv_icmp_header *)skb->data;
/* add record route information if not full */
- if ((hdr_size == sizeof(struct batadv_icmp_packet_rr)) &&
- (icmp_packet->rr_cur < BATADV_RR_LEN)) {
- memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
+ if ((icmph->msg_type == BATADV_ECHO_REPLY ||
+ icmph->msg_type == BATADV_ECHO_REQUEST) &&
+ (skb->len >= sizeof(struct batadv_icmp_packet_rr))) {
+ if (skb_linearize(skb) < 0)
+ goto out;
+
+ /* create a copy of the skb, if needed, to modify it. */
+ if (skb_cow(skb, ETH_HLEN) < 0)
+ goto out;
+
+ icmph = (struct batadv_icmp_header *)skb->data;
+ icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph;
+ if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN)
+ goto out;
+
+ memcpy(&(icmp_packet_rr->rr[icmp_packet_rr->rr_cur]),
ethhdr->h_dest, ETH_ALEN);
- icmp_packet->rr_cur++;
+ icmp_packet_rr->rr_cur++;
}
/* packet for me */
- if (batadv_is_my_mac(bat_priv, icmp_packet->dst))
- return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size);
+ if (batadv_is_my_mac(bat_priv, icmph->dst))
+ return batadv_recv_my_icmp_packet(bat_priv, skb);
/* TTL exceeded */
- if (icmp_packet->header.ttl < 2)
+ if (icmph->header.ttl < 2)
return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
/* get routing information */
- orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->dst);
+ orig_node = batadv_orig_hash_find(bat_priv, icmph->dst);
if (!orig_node)
goto out;
@@ -404,10 +446,10 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
if (skb_cow(skb, ETH_HLEN) < 0)
goto out;
- icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
+ icmph = (struct batadv_icmp_header *)skb->data;
/* decrement ttl */
- icmp_packet->header.ttl--;
+ icmph->header.ttl--;
/* route it */
if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP)
@@ -474,18 +516,25 @@ out:
return router;
}
-/* Interface Alternating: Use the best of the
- * remaining candidates which are not using
- * this interface.
+/**
+ * batadv_find_ifalter_router - find the best of the remaining candidates which
+ * are not using this interface
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_orig: the destination
+ * @recv_if: the interface that the router returned by this function has to not
+ * use
*
- * Increases the returned router's refcount
+ * Returns the best candidate towards primary_orig that is not using recv_if.
+ * Increases the returned neighbor's refcount
*/
static struct batadv_neigh_node *
-batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
+batadv_find_ifalter_router(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *primary_orig,
const struct batadv_hard_iface *recv_if)
{
- struct batadv_neigh_node *tmp_neigh_node;
struct batadv_neigh_node *router = NULL, *first_candidate = NULL;
+ struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
+ struct batadv_neigh_node *tmp_neigh_node;
rcu_read_lock();
list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
@@ -497,7 +546,7 @@ batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
if (tmp_neigh_node->if_incoming == recv_if)
continue;
- if (router && tmp_neigh_node->tq_avg <= router->tq_avg)
+ if (router && bao->bat_neigh_cmp(tmp_neigh_node, router))
continue;
if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
@@ -557,126 +606,6 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
return 0;
}
-int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
-{
- struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct batadv_tt_query_packet *tt_query;
- uint16_t tt_size;
- int hdr_size = sizeof(*tt_query);
- char tt_flag;
- size_t packet_size;
-
- if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
- return NET_RX_DROP;
-
- /* I could need to modify it */
- if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
- goto out;
-
- tt_query = (struct batadv_tt_query_packet *)skb->data;
-
- switch (tt_query->flags & BATADV_TT_QUERY_TYPE_MASK) {
- case BATADV_TT_REQUEST:
- batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_RX);
-
- /* If we cannot provide an answer the tt_request is
- * forwarded
- */
- if (!batadv_send_tt_response(bat_priv, tt_query)) {
- if (tt_query->flags & BATADV_TT_FULL_TABLE)
- tt_flag = 'F';
- else
- tt_flag = '.';
-
- batadv_dbg(BATADV_DBG_TT, bat_priv,
- "Routing TT_REQUEST to %pM [%c]\n",
- tt_query->dst,
- tt_flag);
- return batadv_route_unicast_packet(skb, recv_if);
- }
- break;
- case BATADV_TT_RESPONSE:
- batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX);
-
- if (batadv_is_my_mac(bat_priv, tt_query->dst)) {
- /* packet needs to be linearized to access the TT
- * changes
- */
- if (skb_linearize(skb) < 0)
- goto out;
- /* skb_linearize() possibly changed skb->data */
- tt_query = (struct batadv_tt_query_packet *)skb->data;
-
- tt_size = batadv_tt_len(ntohs(tt_query->tt_data));
-
- /* Ensure we have all the claimed data */
- packet_size = sizeof(struct batadv_tt_query_packet);
- packet_size += tt_size;
- if (unlikely(skb_headlen(skb) < packet_size))
- goto out;
-
- batadv_handle_tt_response(bat_priv, tt_query);
- } else {
- if (tt_query->flags & BATADV_TT_FULL_TABLE)
- tt_flag = 'F';
- else
- tt_flag = '.';
- batadv_dbg(BATADV_DBG_TT, bat_priv,
- "Routing TT_RESPONSE to %pM [%c]\n",
- tt_query->dst,
- tt_flag);
- return batadv_route_unicast_packet(skb, recv_if);
- }
- break;
- }
-
-out:
- /* returning NET_RX_DROP will make the caller function kfree the skb */
- return NET_RX_DROP;
-}
-
-int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
-{
- struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct batadv_roam_adv_packet *roam_adv_packet;
- struct batadv_orig_node *orig_node;
-
- if (batadv_check_unicast_packet(bat_priv, skb,
- sizeof(*roam_adv_packet)) < 0)
- goto out;
-
- batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);
-
- roam_adv_packet = (struct batadv_roam_adv_packet *)skb->data;
-
- if (!batadv_is_my_mac(bat_priv, roam_adv_packet->dst))
- return batadv_route_unicast_packet(skb, recv_if);
-
- /* check if it is a backbone gateway. we don't accept
- * roaming advertisement from it, as it has the same
- * entries as we have.
- */
- if (batadv_bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src))
- goto out;
-
- orig_node = batadv_orig_hash_find(bat_priv, roam_adv_packet->src);
- if (!orig_node)
- goto out;
-
- batadv_dbg(BATADV_DBG_TT, bat_priv,
- "Received ROAMING_ADV from %pM (client %pM)\n",
- roam_adv_packet->src, roam_adv_packet->client);
-
- batadv_tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
- BATADV_TT_CLIENT_ROAM,
- atomic_read(&orig_node->last_ttvn) + 1);
-
- batadv_orig_node_free_ref(orig_node);
-out:
- /* returning NET_RX_DROP will make the caller function kfree the skb */
- return NET_RX_DROP;
-}
-
/* find a suitable router for this originator, and use
* bonding if possible. increases the found neighbors
* refcount.
@@ -751,7 +680,8 @@ batadv_find_router(struct batadv_priv *bat_priv,
if (bonding_enabled)
router = batadv_find_bond_router(primary_orig_node, recv_if);
else
- router = batadv_find_ifalter_router(primary_orig_node, recv_if);
+ router = batadv_find_ifalter_router(bat_priv, primary_orig_node,
+ recv_if);
return_router:
if (router && router->if_incoming->if_status != BATADV_IF_ACTIVE)
@@ -772,11 +702,9 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
{
struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct batadv_orig_node *orig_node = NULL;
- struct batadv_neigh_node *neigh_node = NULL;
struct batadv_unicast_packet *unicast_packet;
struct ethhdr *ethhdr = eth_hdr(skb);
int res, hdr_len, ret = NET_RX_DROP;
- struct sk_buff *new_skb;
unicast_packet = (struct batadv_unicast_packet *)skb->data;
@@ -793,46 +721,12 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
if (!orig_node)
goto out;
- /* find_router() increases neigh_nodes refcount if found. */
- neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
-
- if (!neigh_node)
- goto out;
-
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, ETH_HLEN) < 0)
goto out;
- unicast_packet = (struct batadv_unicast_packet *)skb->data;
-
- if (unicast_packet->header.packet_type == BATADV_UNICAST &&
- atomic_read(&bat_priv->fragmentation) &&
- skb->len > neigh_node->if_incoming->net_dev->mtu) {
- ret = batadv_frag_send_skb(skb, bat_priv,
- neigh_node->if_incoming,
- neigh_node->addr);
- goto out;
- }
-
- if (unicast_packet->header.packet_type == BATADV_UNICAST_FRAG &&
- batadv_frag_can_reassemble(skb,
- neigh_node->if_incoming->net_dev->mtu)) {
- ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
-
- if (ret == NET_RX_DROP)
- goto out;
-
- /* packet was buffered for late merge */
- if (!new_skb) {
- ret = NET_RX_SUCCESS;
- goto out;
- }
-
- skb = new_skb;
- unicast_packet = (struct batadv_unicast_packet *)skb->data;
- }
-
/* decrement ttl */
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
unicast_packet->header.ttl--;
switch (unicast_packet->header.packet_type) {
@@ -867,8 +761,6 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
}
out:
- if (neigh_node)
- batadv_neigh_node_free_ref(neigh_node);
if (orig_node)
batadv_orig_node_free_ref(orig_node);
return ret;
@@ -879,6 +771,7 @@ out:
* @bat_priv: the bat priv with all the soft interface information
* @unicast_packet: the unicast header to be updated
* @dst_addr: the payload destination
+ * @vid: VLAN identifier
*
* Search the translation table for dst_addr and update the unicast header with
* the new corresponding information (originator address where the destination
@@ -889,21 +782,22 @@ out:
static bool
batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
struct batadv_unicast_packet *unicast_packet,
- uint8_t *dst_addr)
+ uint8_t *dst_addr, unsigned short vid)
{
struct batadv_orig_node *orig_node = NULL;
struct batadv_hard_iface *primary_if = NULL;
bool ret = false;
uint8_t *orig_addr, orig_ttvn;
- if (batadv_is_my_client(bat_priv, dst_addr)) {
+ if (batadv_is_my_client(bat_priv, dst_addr, vid)) {
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
orig_addr = primary_if->net_dev->dev_addr;
orig_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
} else {
- orig_node = batadv_transtable_search(bat_priv, NULL, dst_addr);
+ orig_node = batadv_transtable_search(bat_priv, NULL, dst_addr,
+ vid);
if (!orig_node)
goto out;
@@ -930,11 +824,12 @@ out:
static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
struct sk_buff *skb, int hdr_len) {
- uint8_t curr_ttvn, old_ttvn;
+ struct batadv_unicast_packet *unicast_packet;
+ struct batadv_hard_iface *primary_if;
struct batadv_orig_node *orig_node;
+ uint8_t curr_ttvn, old_ttvn;
struct ethhdr *ethhdr;
- struct batadv_hard_iface *primary_if;
- struct batadv_unicast_packet *unicast_packet;
+ unsigned short vid;
int is_old_ttvn;
/* check if there is enough data before accessing it */
@@ -946,6 +841,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
return 0;
unicast_packet = (struct batadv_unicast_packet *)skb->data;
+ vid = batadv_get_vid(skb, hdr_len);
ethhdr = (struct ethhdr *)(skb->data + hdr_len);
/* check if the destination client was served by this node and it is now
@@ -953,9 +849,9 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
* message and that it knows the new destination in the mesh to re-route
* the packet to
*/
- if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest)) {
+ if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) {
if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
- ethhdr->h_dest))
+ ethhdr->h_dest, vid))
net_ratelimited_function(batadv_dbg, BATADV_DBG_TT,
bat_priv,
"Rerouting unicast packet to %pM (dst=%pM): Local Roaming\n",
@@ -1001,7 +897,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
* target host
*/
if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
- ethhdr->h_dest)) {
+ ethhdr->h_dest, vid)) {
net_ratelimited_function(batadv_dbg, BATADV_DBG_TT, bat_priv,
"Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
unicast_packet->dest, ethhdr->h_dest,
@@ -1013,7 +909,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
* currently served by this node or there is no destination at all and
* it is possible to drop the packet
*/
- if (!batadv_is_my_client(bat_priv, ethhdr->h_dest))
+ if (!batadv_is_my_client(bat_priv, ethhdr->h_dest, vid))
return 0;
/* update the header in order to let the packet be delivered to this
@@ -1032,6 +928,34 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
return 1;
}
+/**
+ * batadv_recv_unhandled_unicast_packet - receive and process packets which
+ * are in the unicast number space but not yet known to the implementation
+ * @skb: unicast tvlv packet to process
+ * @recv_if: pointer to interface this packet was received on
+ *
+ * Returns NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * otherwise.
+ */
+int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
+{
+ struct batadv_unicast_packet *unicast_packet;
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ int check, hdr_size = sizeof(*unicast_packet);
+
+ check = batadv_check_unicast_packet(bat_priv, skb, hdr_size);
+ if (check < 0)
+ return NET_RX_DROP;
+
+ /* we don't know about this type, drop it. */
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
+ if (batadv_is_my_mac(bat_priv, unicast_packet->dest))
+ return NET_RX_DROP;
+
+ return batadv_route_unicast_packet(skb, recv_if);
+}
+
int batadv_recv_unicast_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
{
@@ -1094,51 +1018,112 @@ rx_success:
return batadv_route_unicast_packet(skb, recv_if);
}
-int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
- struct batadv_hard_iface *recv_if)
+/**
+ * batadv_recv_unicast_tvlv - receive and process unicast tvlv packets
+ * @skb: unicast tvlv packet to process
+ * @recv_if: pointer to interface this packet was received on
+ * @dst_addr: the payload destination
+ *
+ * Returns NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * otherwise.
+ */
+int batadv_recv_unicast_tvlv(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
{
struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct batadv_unicast_frag_packet *unicast_packet;
- int hdr_size = sizeof(*unicast_packet);
- struct sk_buff *new_skb = NULL;
- int ret;
+ struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
+ unsigned char *tvlv_buff;
+ uint16_t tvlv_buff_len;
+ int hdr_size = sizeof(*unicast_tvlv_packet);
+ int ret = NET_RX_DROP;
if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
return NET_RX_DROP;
- if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
+ /* the header is likely to be modified while forwarding */
+ if (skb_cow(skb, hdr_size) < 0)
return NET_RX_DROP;
- unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
+ /* packet needs to be linearized to access the tvlv content */
+ if (skb_linearize(skb) < 0)
+ return NET_RX_DROP;
- /* packet for me */
- if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
- ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
+ unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)skb->data;
- if (ret == NET_RX_DROP)
- return NET_RX_DROP;
+ tvlv_buff = (unsigned char *)(skb->data + hdr_size);
+ tvlv_buff_len = ntohs(unicast_tvlv_packet->tvlv_len);
- /* packet was buffered for late merge */
- if (!new_skb)
- return NET_RX_SUCCESS;
+ if (tvlv_buff_len > skb->len - hdr_size)
+ return NET_RX_DROP;
- if (batadv_dat_snoop_incoming_arp_request(bat_priv, new_skb,
- hdr_size))
- goto rx_success;
- if (batadv_dat_snoop_incoming_arp_reply(bat_priv, new_skb,
- hdr_size))
- goto rx_success;
+ ret = batadv_tvlv_containers_process(bat_priv, false, NULL,
+ unicast_tvlv_packet->src,
+ unicast_tvlv_packet->dst,
+ tvlv_buff, tvlv_buff_len);
- batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if,
- sizeof(struct batadv_unicast_packet), NULL);
+ if (ret != NET_RX_SUCCESS)
+ ret = batadv_route_unicast_packet(skb, recv_if);
-rx_success:
- return NET_RX_SUCCESS;
+ return ret;
+}
+
+/**
+ * batadv_recv_frag_packet - process received fragment
+ * @skb: the received fragment
+ * @recv_if: interface that the skb is received on
+ *
+ * This function does one of the three following things: 1) Forward fragment, if
+ * the assembled packet will exceed our MTU; 2) Buffer fragment, if we till
+ * lack further fragments; 3) Merge fragments, if we have all needed parts.
+ *
+ * Return NET_RX_DROP if the skb is not consumed, NET_RX_SUCCESS otherwise.
+ */
+int batadv_recv_frag_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
+{
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_orig_node *orig_node_src = NULL;
+ struct batadv_frag_packet *frag_packet;
+ int ret = NET_RX_DROP;
+
+ if (batadv_check_unicast_packet(bat_priv, skb,
+ sizeof(*frag_packet)) < 0)
+ goto out;
+
+ frag_packet = (struct batadv_frag_packet *)skb->data;
+ orig_node_src = batadv_orig_hash_find(bat_priv, frag_packet->orig);
+ if (!orig_node_src)
+ goto out;
+
+ /* Route the fragment if it is not for us and too big to be merged. */
+ if (!batadv_is_my_mac(bat_priv, frag_packet->dest) &&
+ batadv_frag_skb_fwd(skb, recv_if, orig_node_src)) {
+ ret = NET_RX_SUCCESS;
+ goto out;
}
- return batadv_route_unicast_packet(skb, recv_if);
-}
+ batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_RX);
+ batadv_add_counter(bat_priv, BATADV_CNT_FRAG_RX_BYTES, skb->len);
+
+ /* Add fragment to buffer and merge if possible. */
+ if (!batadv_frag_skb_buffer(&skb, orig_node_src))
+ goto out;
+ /* Deliver merged packet to the appropriate handler, if it was
+ * merged
+ */
+ if (skb)
+ batadv_batman_skb_recv(skb, recv_if->net_dev,
+ &recv_if->batman_adv_ptype, NULL);
+
+ ret = NET_RX_SUCCESS;
+
+out:
+ if (orig_node_src)
+ batadv_orig_node_free_ref(orig_node_src);
+
+ return ret;
+}
int batadv_recv_bcast_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
@@ -1240,53 +1225,3 @@ out:
batadv_orig_node_free_ref(orig_node);
return ret;
}
-
-int batadv_recv_vis_packet(struct sk_buff *skb,
- struct batadv_hard_iface *recv_if)
-{
- struct batadv_vis_packet *vis_packet;
- struct ethhdr *ethhdr;
- struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- int hdr_size = sizeof(*vis_packet);
-
- /* keep skb linear */
- if (skb_linearize(skb) < 0)
- return NET_RX_DROP;
-
- if (unlikely(!pskb_may_pull(skb, hdr_size)))
- return NET_RX_DROP;
-
- vis_packet = (struct batadv_vis_packet *)skb->data;
- ethhdr = eth_hdr(skb);
-
- /* not for me */
- if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
- return NET_RX_DROP;
-
- /* ignore own packets */
- if (batadv_is_my_mac(bat_priv, vis_packet->vis_orig))
- return NET_RX_DROP;
-
- if (batadv_is_my_mac(bat_priv, vis_packet->sender_orig))
- return NET_RX_DROP;
-
- switch (vis_packet->vis_type) {
- case BATADV_VIS_TYPE_SERVER_SYNC:
- batadv_receive_server_sync_packet(bat_priv, vis_packet,
- skb_headlen(skb));
- break;
-
- case BATADV_VIS_TYPE_CLIENT_UPDATE:
- batadv_receive_client_update_packet(bat_priv, vis_packet,
- skb_headlen(skb));
- break;
-
- default: /* ignore unknown packet */
- break;
- }
-
- /* We take a copy of the data in the packet, so we should
- * always free the skbuf.
- */
- return NET_RX_DROP;
-}
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 72a29bde2010..19544ddb81b5 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -30,23 +30,26 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if);
int batadv_recv_unicast_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if);
-int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
- struct batadv_hard_iface *recv_if);
+int batadv_recv_frag_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *iface);
int batadv_recv_bcast_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if);
-int batadv_recv_vis_packet(struct sk_buff *skb,
- struct batadv_hard_iface *recv_if);
int batadv_recv_tt_query(struct sk_buff *skb,
struct batadv_hard_iface *recv_if);
int batadv_recv_roam_adv(struct sk_buff *skb,
struct batadv_hard_iface *recv_if);
+int batadv_recv_unicast_tvlv(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
struct batadv_neigh_node *
batadv_find_router(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
const struct batadv_hard_iface *recv_if);
void batadv_bonding_candidate_del(struct batadv_orig_node *orig_node,
struct batadv_neigh_node *neigh_node);
-void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
+void batadv_bonding_candidate_add(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
struct batadv_neigh_node *neigh_node);
void batadv_bonding_save_primary(const struct batadv_orig_node *orig_node,
struct batadv_orig_node *orig_neigh_node,
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 0266edd0fa7f..c83be5ebaa28 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -24,12 +24,11 @@
#include "translation-table.h"
#include "soft-interface.h"
#include "hard-interface.h"
-#include "vis.h"
#include "gateway_common.h"
+#include "gateway_client.h"
#include "originator.h"
#include "network-coding.h"
-
-#include <linux/if_ether.h>
+#include "fragmentation.h"
static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
@@ -64,10 +63,10 @@ int batadv_send_skb_packet(struct sk_buff *skb,
ethhdr = eth_hdr(skb);
memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
- ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
+ ethhdr->h_proto = htons(ETH_P_BATMAN);
skb_set_network_header(skb, ETH_HLEN);
- skb->protocol = __constant_htons(ETH_P_BATMAN);
+ skb->protocol = htons(ETH_P_BATMAN);
skb->dev = hard_iface->net_dev;
@@ -109,7 +108,19 @@ int batadv_send_skb_to_orig(struct sk_buff *skb,
/* batadv_find_router() increases neigh_nodes refcount if found. */
neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
if (!neigh_node)
- return ret;
+ goto out;
+
+ /* Check if the skb is too large to send in one piece and fragment
+ * it if needed.
+ */
+ if (atomic_read(&bat_priv->fragmentation) &&
+ skb->len > neigh_node->if_incoming->net_dev->mtu) {
+ /* Fragment and send packet. */
+ if (batadv_frag_send_packet(skb, orig_node, neigh_node))
+ ret = NET_XMIT_SUCCESS;
+
+ goto out;
+ }
/* try to network code the packet, if it is received on an interface
* (i.e. being forwarded). If the packet originates from this node or if
@@ -123,11 +134,225 @@ int batadv_send_skb_to_orig(struct sk_buff *skb,
ret = NET_XMIT_SUCCESS;
}
- batadv_neigh_node_free_ref(neigh_node);
+out:
+ if (neigh_node)
+ batadv_neigh_node_free_ref(neigh_node);
+
+ return ret;
+}
+
+/**
+ * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
+ * common fields for unicast packets
+ * @skb: the skb carrying the unicast header to initialize
+ * @hdr_size: amount of bytes to push at the beginning of the skb
+ * @orig_node: the destination node
+ *
+ * Returns false if the buffer extension was not possible or true otherwise.
+ */
+static bool
+batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
+ struct batadv_orig_node *orig_node)
+{
+ struct batadv_unicast_packet *unicast_packet;
+ uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+
+ if (batadv_skb_head_push(skb, hdr_size) < 0)
+ return false;
+
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
+ unicast_packet->header.version = BATADV_COMPAT_VERSION;
+ /* batman packet type: unicast */
+ unicast_packet->header.packet_type = BATADV_UNICAST;
+ /* set unicast ttl */
+ unicast_packet->header.ttl = BATADV_TTL;
+ /* copy the destination for faster routing */
+ memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
+ /* set the destination tt version number */
+ unicast_packet->ttvn = ttvn;
+
+ return true;
+}
+
+/**
+ * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
+ * @skb: the skb containing the payload to encapsulate
+ * @orig_node: the destination node
+ *
+ * Returns false if the payload could not be encapsulated or true otherwise.
+ */
+static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
+ struct batadv_orig_node *orig_node)
+{
+ size_t uni_size = sizeof(struct batadv_unicast_packet);
+
+ return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
+}
+
+/**
+ * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
+ * unicast 4addr header
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the skb containing the payload to encapsulate
+ * @orig_node: the destination node
+ * @packet_subtype: the unicast 4addr packet subtype to use
+ *
+ * Returns false if the payload could not be encapsulated or true otherwise.
+ */
+bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ struct batadv_orig_node *orig,
+ int packet_subtype)
+{
+ struct batadv_hard_iface *primary_if;
+ struct batadv_unicast_4addr_packet *uc_4addr_packet;
+ bool ret = false;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
+
+ /* Pull the header space and fill the unicast_packet substructure.
+ * We can do that because the first member of the uc_4addr_packet
+ * is of type struct unicast_packet
+ */
+ if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
+ orig))
+ goto out;
+
+ uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
+ uc_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR;
+ memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
+ uc_4addr_packet->subtype = packet_subtype;
+ uc_4addr_packet->reserved = 0;
+
+ ret = true;
+out:
+ if (primary_if)
+ batadv_hardif_free_ref(primary_if);
+ return ret;
+}
+
+/**
+ * batadv_send_skb_unicast - encapsulate and send an skb via unicast
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: payload to send
+ * @packet_type: the batman unicast packet type to use
+ * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
+ * 4addr packets)
+ * @orig_node: the originator to send the packet to
+ * @vid: the vid to be used to search the translation table
+ *
+ * Wrap the given skb into a batman-adv unicast or unicast-4addr header
+ * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
+ * as packet_type. Then send this frame to the given orig_node and release a
+ * reference to this orig_node.
+ *
+ * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int packet_type,
+ int packet_subtype,
+ struct batadv_orig_node *orig_node,
+ unsigned short vid)
+{
+ struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
+ struct batadv_unicast_packet *unicast_packet;
+ int ret = NET_XMIT_DROP;
+
+ if (!orig_node)
+ goto out;
+
+ switch (packet_type) {
+ case BATADV_UNICAST:
+ if (!batadv_send_skb_prepare_unicast(skb, orig_node))
+ goto out;
+ break;
+ case BATADV_UNICAST_4ADDR:
+ if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
+ orig_node,
+ packet_subtype))
+ goto out;
+ break;
+ default:
+ /* this function supports UNICAST and UNICAST_4ADDR only. It
+ * should never be invoked with any other packet type
+ */
+ goto out;
+ }
+
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
+
+ /* inform the destination node that we are still missing a correct route
+ * for this client. The destination will receive this packet and will
+ * try to reroute it because the ttvn contained in the header is less
+ * than the current one
+ */
+ if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
+ unicast_packet->ttvn = unicast_packet->ttvn - 1;
+ if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
+ ret = NET_XMIT_SUCCESS;
+
+out:
+ if (orig_node)
+ batadv_orig_node_free_ref(orig_node);
+ if (ret == NET_XMIT_DROP)
+ kfree_skb(skb);
return ret;
}
+/**
+ * batadv_send_skb_via_tt_generic - send an skb via TT lookup
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: payload to send
+ * @packet_type: the batman unicast packet type to use
+ * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
+ * 4addr packets)
+ * @vid: the vid to be used to search the translation table
+ *
+ * Look up the recipient node for the destination address in the ethernet
+ * header via the translation table. Wrap the given skb into a batman-adv
+ * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
+ * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
+ * to the according destination node.
+ *
+ * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int packet_type,
+ int packet_subtype, unsigned short vid)
+{
+ struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
+ struct batadv_orig_node *orig_node;
+
+ orig_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
+ ethhdr->h_dest, vid);
+ return batadv_send_skb_unicast(bat_priv, skb, packet_type,
+ packet_subtype, orig_node, vid);
+}
+
+/**
+ * batadv_send_skb_via_gw - send an skb via gateway lookup
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: payload to send
+ * @vid: the vid to be used to search the translation table
+ *
+ * Look up the currently selected gateway. Wrap the given skb into a batman-adv
+ * unicast header and send this frame to this gateway node.
+ *
+ * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ unsigned short vid)
+{
+ struct batadv_orig_node *orig_node;
+
+ orig_node = batadv_gw_get_selected_orig(bat_priv);
+ return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
+ orig_node, vid);
+}
+
void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index e7b17880fca4..aa2e2537a739 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -34,5 +34,58 @@ void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work);
void
batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
const struct batadv_hard_iface *hard_iface);
+bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ struct batadv_orig_node *orig_node,
+ int packet_subtype);
+int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int packet_type,
+ int packet_subtype, unsigned short vid);
+int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ unsigned short vid);
+
+/**
+ * batadv_send_skb_via_tt - send an skb via TT lookup
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the payload to send
+ * @vid: the vid to be used to search the translation table
+ *
+ * Look up the recipient node for the destination address in the ethernet
+ * header via the translation table. Wrap the given skb into a batman-adv
+ * unicast header. Then send this frame to the according destination node.
+ *
+ * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+static inline int batadv_send_skb_via_tt(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ unsigned short vid)
+{
+ return batadv_send_skb_via_tt_generic(bat_priv, skb, BATADV_UNICAST, 0,
+ vid);
+}
+
+/**
+ * batadv_send_skb_via_tt_4addr - send an skb via TT lookup
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the payload to send
+ * @packet_subtype: the unicast 4addr packet subtype to use
+ * @vid: the vid to be used to search the translation table
+ *
+ * Look up the recipient node for the destination address in the ethernet
+ * header via the translation table. Wrap the given skb into a batman-adv
+ * unicast-4addr header. Then send this frame to the according destination
+ * node.
+ *
+ * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+static inline int batadv_send_skb_via_tt_4addr(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ int packet_subtype,
+ unsigned short vid)
+{
+ return batadv_send_skb_via_tt_generic(bat_priv, skb,
+ BATADV_UNICAST_4ADDR,
+ packet_subtype, vid);
+}
#endif /* _NET_BATMAN_ADV_SEND_H_ */
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 813db4e64602..36f050876f82 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -34,8 +34,6 @@
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
-#include <linux/if_ether.h>
-#include "unicast.h"
#include "bridge_loop_avoidance.h"
#include "network-coding.h"
@@ -120,9 +118,10 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
/* only modify transtable if it has been initialized before */
if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) {
- batadv_tt_local_remove(bat_priv, old_addr,
+ batadv_tt_local_remove(bat_priv, old_addr, BATADV_NO_FLAGS,
"mac address changed", false);
- batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX);
+ batadv_tt_local_add(dev, addr->sa_data, BATADV_NO_FLAGS,
+ BATADV_NULL_IFINDEX);
}
return 0;
@@ -139,36 +138,48 @@ static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+/**
+ * batadv_interface_set_rx_mode - set the rx mode of a device
+ * @dev: registered network device to modify
+ *
+ * We do not actually need to set any rx filters for the virtual batman
+ * soft interface. However a dummy handler enables a user to set static
+ * multicast listeners for instance.
+ */
+static void batadv_interface_set_rx_mode(struct net_device *dev)
+{
+}
+
static int batadv_interface_tx(struct sk_buff *skb,
struct net_device *soft_iface)
{
- struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
+ struct ethhdr *ethhdr;
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
struct batadv_hard_iface *primary_if = NULL;
struct batadv_bcast_packet *bcast_packet;
- struct vlan_ethhdr *vhdr;
- __be16 ethertype = __constant_htons(ETH_P_BATMAN);
+ __be16 ethertype = htons(ETH_P_BATMAN);
static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
0x00, 0x00};
static const uint8_t ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
0x00, 0x00};
+ struct vlan_ethhdr *vhdr;
unsigned int header_len = 0;
int data_len = skb->len, ret;
- unsigned short vid __maybe_unused = BATADV_NO_FLAGS;
- bool do_bcast = false;
- uint32_t seqno;
unsigned long brd_delay = 1;
+ bool do_bcast = false, client_added;
+ unsigned short vid;
+ uint32_t seqno;
if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
goto dropped;
soft_iface->trans_start = jiffies;
+ vid = batadv_get_vid(skb, 0);
+ ethhdr = (struct ethhdr *)skb->data;
switch (ntohs(ethhdr->h_proto)) {
case ETH_P_8021Q:
vhdr = (struct vlan_ethhdr *)skb->data;
- vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
- vid |= BATADV_VLAN_HAS_TAG;
if (vhdr->h_vlan_encapsulated_proto != ethertype)
break;
@@ -185,8 +196,12 @@ static int batadv_interface_tx(struct sk_buff *skb,
ethhdr = (struct ethhdr *)skb->data;
/* Register the client MAC in the transtable */
- if (!is_multicast_ether_addr(ethhdr->h_source))
- batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
+ if (!is_multicast_ether_addr(ethhdr->h_source)) {
+ client_added = batadv_tt_local_add(soft_iface, ethhdr->h_source,
+ vid, skb->skb_iif);
+ if (!client_added)
+ goto dropped;
+ }
/* don't accept stp packets. STP does not help in meshes.
* better use the bridge loop avoidance ...
@@ -286,8 +301,12 @@ static int batadv_interface_tx(struct sk_buff *skb,
batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb);
- ret = batadv_unicast_send_skb(bat_priv, skb);
- if (ret != 0)
+ if (is_multicast_ether_addr(ethhdr->h_dest))
+ ret = batadv_send_skb_via_gw(bat_priv, skb, vid);
+ else
+ ret = batadv_send_skb_via_tt(bat_priv, skb, vid);
+
+ if (ret == NET_XMIT_DROP)
goto dropped_freed;
}
@@ -309,12 +328,12 @@ void batadv_interface_rx(struct net_device *soft_iface,
struct sk_buff *skb, struct batadv_hard_iface *recv_if,
int hdr_size, struct batadv_orig_node *orig_node)
{
+ struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
- struct ethhdr *ethhdr;
+ __be16 ethertype = htons(ETH_P_BATMAN);
struct vlan_ethhdr *vhdr;
- struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
- unsigned short vid __maybe_unused = BATADV_NO_FLAGS;
- __be16 ethertype = __constant_htons(ETH_P_BATMAN);
+ struct ethhdr *ethhdr;
+ unsigned short vid;
bool is_bcast;
is_bcast = (batadv_header->packet_type == BATADV_BCAST);
@@ -326,13 +345,12 @@ void batadv_interface_rx(struct net_device *soft_iface,
skb_pull_rcsum(skb, hdr_size);
skb_reset_mac_header(skb);
+ vid = batadv_get_vid(skb, hdr_size);
ethhdr = eth_hdr(skb);
switch (ntohs(ethhdr->h_proto)) {
case ETH_P_8021Q:
vhdr = (struct vlan_ethhdr *)skb->data;
- vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
- vid |= BATADV_VLAN_HAS_TAG;
if (vhdr->h_vlan_encapsulated_proto != ethertype)
break;
@@ -368,9 +386,10 @@ void batadv_interface_rx(struct net_device *soft_iface,
if (orig_node)
batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
- ethhdr->h_source);
+ ethhdr->h_source, vid);
- if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
+ if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest,
+ vid))
goto dropped;
netif_rx(skb);
@@ -382,6 +401,177 @@ out:
return;
}
+/**
+ * batadv_softif_vlan_free_ref - decrease the vlan object refcounter and
+ * possibly free it
+ * @softif_vlan: the vlan object to release
+ */
+void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan)
+{
+ if (atomic_dec_and_test(&softif_vlan->refcount))
+ kfree_rcu(softif_vlan, rcu);
+}
+
+/**
+ * batadv_softif_vlan_get - get the vlan object for a specific vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the identifier of the vlan object to retrieve
+ *
+ * Returns the private data of the vlan matching the vid passed as argument or
+ * NULL otherwise. The refcounter of the returned object is incremented by 1.
+ */
+struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
+ unsigned short vid)
+{
+ struct batadv_softif_vlan *vlan_tmp, *vlan = NULL;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) {
+ if (vlan_tmp->vid != vid)
+ continue;
+
+ if (!atomic_inc_not_zero(&vlan_tmp->refcount))
+ continue;
+
+ vlan = vlan_tmp;
+ break;
+ }
+ rcu_read_unlock();
+
+ return vlan;
+}
+
+/**
+ * batadv_create_vlan - allocate the needed resources for a new vlan
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier
+ *
+ * Returns 0 on success, a negative error otherwise.
+ */
+int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
+{
+ struct batadv_softif_vlan *vlan;
+ int err;
+
+ vlan = batadv_softif_vlan_get(bat_priv, vid);
+ if (vlan) {
+ batadv_softif_vlan_free_ref(vlan);
+ return -EEXIST;
+ }
+
+ vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
+ if (!vlan)
+ return -ENOMEM;
+
+ vlan->vid = vid;
+ atomic_set(&vlan->refcount, 1);
+
+ atomic_set(&vlan->ap_isolation, 0);
+
+ err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
+ if (err) {
+ kfree(vlan);
+ return err;
+ }
+
+ /* add a new TT local entry. This one will be marked with the NOPURGE
+ * flag
+ */
+ batadv_tt_local_add(bat_priv->soft_iface,
+ bat_priv->soft_iface->dev_addr, vid,
+ BATADV_NULL_IFINDEX);
+
+ spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+ hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
+ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+
+ return 0;
+}
+
+/**
+ * batadv_softif_destroy_vlan - remove and destroy a softif_vlan object
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vlan: the object to remove
+ */
+static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
+ struct batadv_softif_vlan *vlan)
+{
+ spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+ hlist_del_rcu(&vlan->list);
+ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+
+ batadv_sysfs_del_vlan(bat_priv, vlan);
+
+ /* explicitly remove the associated TT local entry because it is marked
+ * with the NOPURGE flag
+ */
+ batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
+ vlan->vid, "vlan interface destroyed", false);
+
+ batadv_softif_vlan_free_ref(vlan);
+}
+
+/**
+ * batadv_interface_add_vid - ndo_add_vid API implementation
+ * @dev: the netdev of the mesh interface
+ * @vid: identifier of the new vlan
+ *
+ * Set up all the internal structures for handling the new vlan on top of the
+ * mesh interface
+ *
+ * Returns 0 on success or a negative error code in case of failure.
+ */
+static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
+ unsigned short vid)
+{
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+
+ /* only 802.1Q vlans are supported.
+ * batman-adv does not know how to handle other types
+ */
+ if (proto != htons(ETH_P_8021Q))
+ return -EINVAL;
+
+ vid |= BATADV_VLAN_HAS_TAG;
+
+ return batadv_softif_create_vlan(bat_priv, vid);
+}
+
+/**
+ * batadv_interface_kill_vid - ndo_kill_vid API implementation
+ * @dev: the netdev of the mesh interface
+ * @vid: identifier of the deleted vlan
+ *
+ * Destroy all the internal structures used to handle the vlan identified by vid
+ * on top of the mesh interface
+ *
+ * Returns 0 on success, -EINVAL if the specified prototype is not ETH_P_8021Q
+ * or -ENOENT if the specified vlan id wasn't registered.
+ */
+static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto,
+ unsigned short vid)
+{
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+ struct batadv_softif_vlan *vlan;
+
+ /* only 802.1Q vlans are supported. batman-adv does not know how to
+ * handle other types
+ */
+ if (proto != htons(ETH_P_8021Q))
+ return -EINVAL;
+
+ vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG);
+ if (!vlan)
+ return -ENOENT;
+
+ batadv_softif_destroy_vlan(bat_priv, vlan);
+
+ /* finally free the vlan object */
+ batadv_softif_vlan_free_ref(vlan);
+
+ return 0;
+}
+
/* batman-adv network devices have devices nesting below it and are a special
* "super class" of normal network devices; split their locks off into a
* separate class since they always nest.
@@ -421,6 +611,7 @@ static void batadv_set_lockdep_class(struct net_device *dev)
*/
static void batadv_softif_destroy_finish(struct work_struct *work)
{
+ struct batadv_softif_vlan *vlan;
struct batadv_priv *bat_priv;
struct net_device *soft_iface;
@@ -428,6 +619,13 @@ static void batadv_softif_destroy_finish(struct work_struct *work)
cleanup_work);
soft_iface = bat_priv->soft_iface;
+ /* destroy the "untagged" VLAN */
+ vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
+ if (vlan) {
+ batadv_softif_destroy_vlan(bat_priv, vlan);
+ batadv_softif_vlan_free_ref(vlan);
+ }
+
batadv_sysfs_del_meshif(soft_iface);
rtnl_lock();
@@ -444,6 +642,7 @@ static void batadv_softif_destroy_finish(struct work_struct *work)
static int batadv_softif_init_late(struct net_device *dev)
{
struct batadv_priv *bat_priv;
+ uint32_t random_seqno;
int ret;
size_t cnt_len = sizeof(uint64_t) * BATADV_CNT_NUM;
@@ -468,17 +667,17 @@ static int batadv_softif_init_late(struct net_device *dev)
#ifdef CONFIG_BATMAN_ADV_DAT
atomic_set(&bat_priv->distributed_arp_table, 1);
#endif
- atomic_set(&bat_priv->ap_isolation, 0);
- atomic_set(&bat_priv->vis_mode, BATADV_VIS_TYPE_CLIENT_UPDATE);
atomic_set(&bat_priv->gw_mode, BATADV_GW_MODE_OFF);
atomic_set(&bat_priv->gw_sel_class, 20);
- atomic_set(&bat_priv->gw_bandwidth, 41);
+ atomic_set(&bat_priv->gw.bandwidth_down, 100);
+ atomic_set(&bat_priv->gw.bandwidth_up, 20);
atomic_set(&bat_priv->orig_interval, 1000);
atomic_set(&bat_priv->hop_penalty, 30);
#ifdef CONFIG_BATMAN_ADV_DEBUG
atomic_set(&bat_priv->log_level, 0);
#endif
atomic_set(&bat_priv->fragmentation, 1);
+ atomic_set(&bat_priv->packet_size_max, ETH_DATA_LEN);
atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN);
atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
@@ -493,6 +692,10 @@ static int batadv_softif_init_late(struct net_device *dev)
bat_priv->tt.last_changeset = NULL;
bat_priv->tt.last_changeset_len = 0;
+ /* randomize initial seqno to avoid collision */
+ get_random_bytes(&random_seqno, sizeof(random_seqno));
+ atomic_set(&bat_priv->frag_seqno, random_seqno);
+
bat_priv->primary_if = NULL;
bat_priv->num_ifaces = 0;
@@ -578,8 +781,11 @@ static const struct net_device_ops batadv_netdev_ops = {
.ndo_open = batadv_interface_open,
.ndo_stop = batadv_interface_release,
.ndo_get_stats = batadv_interface_stats,
+ .ndo_vlan_rx_add_vid = batadv_interface_add_vid,
+ .ndo_vlan_rx_kill_vid = batadv_interface_kill_vid,
.ndo_set_mac_address = batadv_interface_set_mac_addr,
.ndo_change_mtu = batadv_interface_change_mtu,
+ .ndo_set_rx_mode = batadv_interface_set_rx_mode,
.ndo_start_xmit = batadv_interface_tx,
.ndo_validate_addr = eth_validate_addr,
.ndo_add_slave = batadv_softif_slave_add,
@@ -616,6 +822,7 @@ static void batadv_softif_init_early(struct net_device *dev)
dev->netdev_ops = &batadv_netdev_ops;
dev->destructor = batadv_softif_free;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
dev->tx_queue_len = 0;
/* can't call min_mtu, because the needed variables
@@ -623,7 +830,7 @@ static void batadv_softif_init_early(struct net_device *dev)
*/
dev->mtu = ETH_DATA_LEN;
/* reserve more space in the skbuff for our header */
- dev->hard_header_len = BATADV_HEADER_LEN;
+ dev->hard_header_len = batadv_max_header_len();
/* generate random address */
eth_hw_addr_random(dev);
@@ -760,6 +967,12 @@ static const struct {
{ "mgmt_tx_bytes" },
{ "mgmt_rx" },
{ "mgmt_rx_bytes" },
+ { "frag_tx" },
+ { "frag_tx_bytes" },
+ { "frag_rx" },
+ { "frag_rx_bytes" },
+ { "frag_fwd" },
+ { "frag_fwd_bytes" },
{ "tt_request_tx" },
{ "tt_request_rx" },
{ "tt_response_tx" },
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 2f2472c2ea0d..06fc91ff5a02 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -28,5 +28,9 @@ struct net_device *batadv_softif_create(const char *name);
void batadv_softif_destroy_sysfs(struct net_device *soft_iface);
int batadv_softif_is_valid(const struct net_device *net_dev);
extern struct rtnl_link_ops batadv_link_ops;
+int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid);
+void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan);
+struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
+ unsigned short vid);
#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index 4114b961bc2c..6335433310af 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -21,11 +21,12 @@
#include "sysfs.h"
#include "translation-table.h"
#include "distributed-arp-table.h"
+#include "network-coding.h"
#include "originator.h"
#include "hard-interface.h"
+#include "soft-interface.h"
#include "gateway_common.h"
#include "gateway_client.h"
-#include "vis.h"
static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
{
@@ -39,6 +40,53 @@ static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj)
return netdev_priv(net_dev);
}
+/**
+ * batadv_vlan_kobj_to_batpriv - convert a vlan kobj in the associated batpriv
+ * @obj: kobject to covert
+ *
+ * Returns the associated batadv_priv struct.
+ */
+static struct batadv_priv *batadv_vlan_kobj_to_batpriv(struct kobject *obj)
+{
+ /* VLAN specific attributes are located in the root sysfs folder if they
+ * refer to the untagged VLAN..
+ */
+ if (!strcmp(BATADV_SYSFS_IF_MESH_SUBDIR, obj->name))
+ return batadv_kobj_to_batpriv(obj);
+
+ /* ..while the attributes for the tagged vlans are located in
+ * the in the corresponding "vlan%VID" subfolder
+ */
+ return batadv_kobj_to_batpriv(obj->parent);
+}
+
+/**
+ * batadv_kobj_to_vlan - convert a kobj in the associated softif_vlan struct
+ * @obj: kobject to covert
+ *
+ * Returns the associated softif_vlan struct if found, NULL otherwise.
+ */
+static struct batadv_softif_vlan *
+batadv_kobj_to_vlan(struct batadv_priv *bat_priv, struct kobject *obj)
+{
+ struct batadv_softif_vlan *vlan_tmp, *vlan = NULL;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) {
+ if (vlan_tmp->kobj != obj)
+ continue;
+
+ if (!atomic_inc_not_zero(&vlan_tmp->refcount))
+ continue;
+
+ vlan = vlan_tmp;
+ break;
+ }
+ rcu_read_unlock();
+
+ return vlan;
+}
+
#define BATADV_UEV_TYPE_VAR "BATTYPE="
#define BATADV_UEV_ACTION_VAR "BATACTION="
#define BATADV_UEV_DATA_VAR "BATDATA="
@@ -53,6 +101,15 @@ static char *batadv_uev_type_str[] = {
"gw"
};
+/* Use this, if you have customized show and store functions for vlan attrs */
+#define BATADV_ATTR_VLAN(_name, _mode, _show, _store) \
+struct batadv_attribute batadv_attr_vlan_##_name = { \
+ .attr = {.name = __stringify(_name), \
+ .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+};
+
/* Use this, if you have customized show and store functions */
#define BATADV_ATTR(_name, _mode, _show, _store) \
struct batadv_attribute batadv_attr_##_name = { \
@@ -122,6 +179,41 @@ ssize_t batadv_show_##_name(struct kobject *kobj, \
static BATADV_ATTR(_name, _mode, batadv_show_##_name, \
batadv_store_##_name)
+#define BATADV_ATTR_VLAN_STORE_BOOL(_name, _post_func) \
+ssize_t batadv_store_vlan_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff, \
+ size_t count) \
+{ \
+ struct batadv_priv *bat_priv = batadv_vlan_kobj_to_batpriv(kobj);\
+ struct batadv_softif_vlan *vlan = batadv_kobj_to_vlan(bat_priv, \
+ kobj); \
+ size_t res = __batadv_store_bool_attr(buff, count, _post_func, \
+ attr, &vlan->_name, \
+ bat_priv->soft_iface); \
+ batadv_softif_vlan_free_ref(vlan); \
+ return res; \
+}
+
+#define BATADV_ATTR_VLAN_SHOW_BOOL(_name) \
+ssize_t batadv_show_vlan_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff) \
+{ \
+ struct batadv_priv *bat_priv = batadv_vlan_kobj_to_batpriv(kobj);\
+ struct batadv_softif_vlan *vlan = batadv_kobj_to_vlan(bat_priv, \
+ kobj); \
+ size_t res = sprintf(buff, "%s\n", \
+ atomic_read(&vlan->_name) == 0 ? \
+ "disabled" : "enabled"); \
+ batadv_softif_vlan_free_ref(vlan); \
+ return res; \
+}
+
+/* Use this, if you are going to turn a [name] in the vlan struct on or off */
+#define BATADV_ATTR_VLAN_BOOL(_name, _mode, _post_func) \
+ static BATADV_ATTR_VLAN_STORE_BOOL(_name, _post_func) \
+ static BATADV_ATTR_VLAN_SHOW_BOOL(_name) \
+ static BATADV_ATTR_VLAN(_name, _mode, batadv_show_vlan_##_name, \
+ batadv_store_vlan_##_name)
static int batadv_store_bool_attr(char *buff, size_t count,
struct net_device *net_dev,
@@ -230,74 +322,6 @@ __batadv_store_uint_attr(const char *buff, size_t count,
return ret;
}
-static ssize_t batadv_show_vis_mode(struct kobject *kobj,
- struct attribute *attr, char *buff)
-{
- struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
- int vis_mode = atomic_read(&bat_priv->vis_mode);
- const char *mode;
-
- if (vis_mode == BATADV_VIS_TYPE_CLIENT_UPDATE)
- mode = "client";
- else
- mode = "server";
-
- return sprintf(buff, "%s\n", mode);
-}
-
-static ssize_t batadv_store_vis_mode(struct kobject *kobj,
- struct attribute *attr, char *buff,
- size_t count)
-{
- struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
- struct batadv_priv *bat_priv = netdev_priv(net_dev);
- unsigned long val;
- int ret, vis_mode_tmp = -1;
- const char *old_mode, *new_mode;
-
- ret = kstrtoul(buff, 10, &val);
-
- if (((count == 2) && (!ret) &&
- (val == BATADV_VIS_TYPE_CLIENT_UPDATE)) ||
- (strncmp(buff, "client", 6) == 0) ||
- (strncmp(buff, "off", 3) == 0))
- vis_mode_tmp = BATADV_VIS_TYPE_CLIENT_UPDATE;
-
- if (((count == 2) && (!ret) &&
- (val == BATADV_VIS_TYPE_SERVER_SYNC)) ||
- (strncmp(buff, "server", 6) == 0))
- vis_mode_tmp = BATADV_VIS_TYPE_SERVER_SYNC;
-
- if (vis_mode_tmp < 0) {
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- batadv_info(net_dev,
- "Invalid parameter for 'vis mode' setting received: %s\n",
- buff);
- return -EINVAL;
- }
-
- if (atomic_read(&bat_priv->vis_mode) == vis_mode_tmp)
- return count;
-
- if (atomic_read(&bat_priv->vis_mode) == BATADV_VIS_TYPE_CLIENT_UPDATE)
- old_mode = "client";
- else
- old_mode = "server";
-
- if (vis_mode_tmp == BATADV_VIS_TYPE_CLIENT_UPDATE)
- new_mode = "client";
- else
- new_mode = "server";
-
- batadv_info(net_dev, "Changing vis mode from: %s to: %s\n", old_mode,
- new_mode);
-
- atomic_set(&bat_priv->vis_mode, (unsigned int)vis_mode_tmp);
- return count;
-}
-
static ssize_t batadv_show_bat_algo(struct kobject *kobj,
struct attribute *attr, char *buff)
{
@@ -390,6 +414,7 @@ static ssize_t batadv_store_gw_mode(struct kobject *kobj,
*/
batadv_gw_check_client_stop(bat_priv);
atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp);
+ batadv_gw_tvlv_container_update(bat_priv);
return count;
}
@@ -397,15 +422,13 @@ static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
struct attribute *attr, char *buff)
{
struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
- int down, up;
- int gw_bandwidth = atomic_read(&bat_priv->gw_bandwidth);
-
- batadv_gw_bandwidth_to_kbit(gw_bandwidth, &down, &up);
- return sprintf(buff, "%i%s/%i%s\n",
- (down > 2048 ? down / 1024 : down),
- (down > 2048 ? "MBit" : "KBit"),
- (up > 2048 ? up / 1024 : up),
- (up > 2048 ? "MBit" : "KBit"));
+ uint32_t down, up;
+
+ down = atomic_read(&bat_priv->gw.bandwidth_down);
+ up = atomic_read(&bat_priv->gw.bandwidth_up);
+
+ return sprintf(buff, "%u.%u/%u.%u MBit\n", down / 10,
+ down % 10, up / 10, up % 10);
}
static ssize_t batadv_store_gw_bwidth(struct kobject *kobj,
@@ -426,12 +449,10 @@ BATADV_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
BATADV_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
#endif
#ifdef CONFIG_BATMAN_ADV_DAT
-BATADV_ATTR_SIF_BOOL(distributed_arp_table, S_IRUGO | S_IWUSR, NULL);
+BATADV_ATTR_SIF_BOOL(distributed_arp_table, S_IRUGO | S_IWUSR,
+ batadv_dat_status_update);
#endif
BATADV_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, batadv_update_min_mtu);
-BATADV_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
-static BATADV_ATTR(vis_mode, S_IRUGO | S_IWUSR, batadv_show_vis_mode,
- batadv_store_vis_mode);
static BATADV_ATTR(routing_algo, S_IRUGO, batadv_show_bat_algo, NULL);
static BATADV_ATTR(gw_mode, S_IRUGO | S_IWUSR, batadv_show_gw_mode,
batadv_store_gw_mode);
@@ -447,7 +468,8 @@ static BATADV_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, batadv_show_gw_bwidth,
BATADV_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, BATADV_DBG_ALL, NULL);
#endif
#ifdef CONFIG_BATMAN_ADV_NC
-BATADV_ATTR_SIF_BOOL(network_coding, S_IRUGO | S_IWUSR, NULL);
+BATADV_ATTR_SIF_BOOL(network_coding, S_IRUGO | S_IWUSR,
+ batadv_nc_status_update);
#endif
static struct batadv_attribute *batadv_mesh_attrs[] = {
@@ -460,8 +482,6 @@ static struct batadv_attribute *batadv_mesh_attrs[] = {
&batadv_attr_distributed_arp_table,
#endif
&batadv_attr_fragmentation,
- &batadv_attr_ap_isolation,
- &batadv_attr_vis_mode,
&batadv_attr_routing_algo,
&batadv_attr_gw_mode,
&batadv_attr_orig_interval,
@@ -477,6 +497,16 @@ static struct batadv_attribute *batadv_mesh_attrs[] = {
NULL,
};
+BATADV_ATTR_VLAN_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
+
+/**
+ * batadv_vlan_attrs - array of vlan specific sysfs attributes
+ */
+static struct batadv_attribute *batadv_vlan_attrs[] = {
+ &batadv_attr_vlan_ap_isolation,
+ NULL,
+};
+
int batadv_sysfs_add_meshif(struct net_device *dev)
{
struct kobject *batif_kobject = &dev->dev.kobj;
@@ -527,6 +557,80 @@ void batadv_sysfs_del_meshif(struct net_device *dev)
bat_priv->mesh_obj = NULL;
}
+/**
+ * batadv_sysfs_add_vlan - add all the needed sysfs objects for the new vlan
+ * @dev: netdev of the mesh interface
+ * @vlan: private data of the newly added VLAN interface
+ *
+ * Returns 0 on success and -ENOMEM if any of the structure allocations fails.
+ */
+int batadv_sysfs_add_vlan(struct net_device *dev,
+ struct batadv_softif_vlan *vlan)
+{
+ char vlan_subdir[sizeof(BATADV_SYSFS_VLAN_SUBDIR_PREFIX) + 5];
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+ struct batadv_attribute **bat_attr;
+ int err;
+
+ if (vlan->vid & BATADV_VLAN_HAS_TAG) {
+ sprintf(vlan_subdir, BATADV_SYSFS_VLAN_SUBDIR_PREFIX "%hu",
+ vlan->vid & VLAN_VID_MASK);
+
+ vlan->kobj = kobject_create_and_add(vlan_subdir,
+ bat_priv->mesh_obj);
+ if (!vlan->kobj) {
+ batadv_err(dev, "Can't add sysfs directory: %s/%s\n",
+ dev->name, vlan_subdir);
+ goto out;
+ }
+ } else {
+ /* the untagged LAN uses the root folder to store its "VLAN
+ * specific attributes"
+ */
+ vlan->kobj = bat_priv->mesh_obj;
+ kobject_get(bat_priv->mesh_obj);
+ }
+
+ for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr) {
+ err = sysfs_create_file(vlan->kobj,
+ &((*bat_attr)->attr));
+ if (err) {
+ batadv_err(dev, "Can't add sysfs file: %s/%s/%s\n",
+ dev->name, vlan_subdir,
+ ((*bat_attr)->attr).name);
+ goto rem_attr;
+ }
+ }
+
+ return 0;
+
+rem_attr:
+ for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr)
+ sysfs_remove_file(vlan->kobj, &((*bat_attr)->attr));
+
+ kobject_put(vlan->kobj);
+ vlan->kobj = NULL;
+out:
+ return -ENOMEM;
+}
+
+/**
+ * batadv_sysfs_del_vlan - remove all the sysfs objects for a given VLAN
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vlan: the private data of the VLAN to destroy
+ */
+void batadv_sysfs_del_vlan(struct batadv_priv *bat_priv,
+ struct batadv_softif_vlan *vlan)
+{
+ struct batadv_attribute **bat_attr;
+
+ for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr)
+ sysfs_remove_file(vlan->kobj, &((*bat_attr)->attr));
+
+ kobject_put(vlan->kobj);
+ vlan->kobj = NULL;
+}
+
static ssize_t batadv_show_mesh_iface(struct kobject *kobj,
struct attribute *attr, char *buff)
{
diff --git a/net/batman-adv/sysfs.h b/net/batman-adv/sysfs.h
index 479acf4c16f4..c7d725de50ad 100644
--- a/net/batman-adv/sysfs.h
+++ b/net/batman-adv/sysfs.h
@@ -22,6 +22,12 @@
#define BATADV_SYSFS_IF_MESH_SUBDIR "mesh"
#define BATADV_SYSFS_IF_BAT_SUBDIR "batman_adv"
+/**
+ * BATADV_SYSFS_VLAN_SUBDIR_PREFIX - prefix of the subfolder that will be
+ * created in the sysfs hierarchy for each VLAN interface. The subfolder will
+ * be named "BATADV_SYSFS_VLAN_SUBDIR_PREFIX%vid".
+ */
+#define BATADV_SYSFS_VLAN_SUBDIR_PREFIX "vlan"
struct batadv_attribute {
struct attribute attr;
@@ -36,6 +42,10 @@ void batadv_sysfs_del_meshif(struct net_device *dev);
int batadv_sysfs_add_hardif(struct kobject **hardif_obj,
struct net_device *dev);
void batadv_sysfs_del_hardif(struct kobject **hardif_obj);
+int batadv_sysfs_add_vlan(struct net_device *dev,
+ struct batadv_softif_vlan *vlan);
+void batadv_sysfs_del_vlan(struct batadv_priv *bat_priv,
+ struct batadv_softif_vlan *vlan);
int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
enum batadv_uev_action action, const char *data);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 34510f38708f..4add57d4857f 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -27,13 +27,14 @@
#include "routing.h"
#include "bridge_loop_avoidance.h"
-#include <linux/crc16.h>
+#include <linux/crc32c.h>
/* hash class keys */
static struct lock_class_key batadv_tt_local_hash_lock_class_key;
static struct lock_class_key batadv_tt_global_hash_lock_class_key;
static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
+ unsigned short vid,
struct batadv_orig_node *orig_node);
static void batadv_tt_purge(struct work_struct *work);
static void
@@ -41,7 +42,8 @@ batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry);
static void batadv_tt_global_del(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
const unsigned char *addr,
- const char *message, bool roaming);
+ unsigned short vid, const char *message,
+ bool roaming);
/* returns 1 if they are the same mac addr */
static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
@@ -52,43 +54,93 @@ static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
}
+/**
+ * batadv_choose_tt - return the index of the tt entry in the hash table
+ * @data: pointer to the tt_common_entry object to map
+ * @size: the size of the hash table
+ *
+ * Returns the hash index where the object represented by 'data' should be
+ * stored at.
+ */
+static inline uint32_t batadv_choose_tt(const void *data, uint32_t size)
+{
+ struct batadv_tt_common_entry *tt;
+ uint32_t hash = 0;
+
+ tt = (struct batadv_tt_common_entry *)data;
+ hash = batadv_hash_bytes(hash, &tt->addr, ETH_ALEN);
+ hash = batadv_hash_bytes(hash, &tt->vid, sizeof(tt->vid));
+
+ hash += (hash << 3);
+ hash ^= (hash >> 11);
+ hash += (hash << 15);
+
+ return hash % size;
+}
+
+/**
+ * batadv_tt_hash_find - look for a client in the given hash table
+ * @hash: the hash table to search
+ * @addr: the mac address of the client to look for
+ * @vid: VLAN identifier
+ *
+ * Returns a pointer to the tt_common struct belonging to the searched client if
+ * found, NULL otherwise.
+ */
static struct batadv_tt_common_entry *
-batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
+batadv_tt_hash_find(struct batadv_hashtable *hash, const uint8_t *addr,
+ unsigned short vid)
{
struct hlist_head *head;
- struct batadv_tt_common_entry *tt_common_entry;
- struct batadv_tt_common_entry *tt_common_entry_tmp = NULL;
+ struct batadv_tt_common_entry to_search, *tt, *tt_tmp = NULL;
uint32_t index;
if (!hash)
return NULL;
- index = batadv_choose_orig(data, hash->size);
+ memcpy(to_search.addr, addr, ETH_ALEN);
+ to_search.vid = vid;
+
+ index = batadv_choose_tt(&to_search, hash->size);
head = &hash->table[index];
rcu_read_lock();
- hlist_for_each_entry_rcu(tt_common_entry, head, hash_entry) {
- if (!batadv_compare_eth(tt_common_entry, data))
+ hlist_for_each_entry_rcu(tt, head, hash_entry) {
+ if (!batadv_compare_eth(tt, addr))
continue;
- if (!atomic_inc_not_zero(&tt_common_entry->refcount))
+ if (tt->vid != vid)
continue;
- tt_common_entry_tmp = tt_common_entry;
+ if (!atomic_inc_not_zero(&tt->refcount))
+ continue;
+
+ tt_tmp = tt;
break;
}
rcu_read_unlock();
- return tt_common_entry_tmp;
+ return tt_tmp;
}
+/**
+ * batadv_tt_local_hash_find - search the local table for a given client
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the client to look for
+ * @vid: VLAN identifier
+ *
+ * Returns a pointer to the corresponding tt_local_entry struct if the client is
+ * found, NULL otherwise.
+ */
static struct batadv_tt_local_entry *
-batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
+batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const uint8_t *addr,
+ unsigned short vid)
{
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tt_local_entry *tt_local_entry = NULL;
- tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, data);
+ tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, addr,
+ vid);
if (tt_common_entry)
tt_local_entry = container_of(tt_common_entry,
struct batadv_tt_local_entry,
@@ -96,13 +148,24 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
return tt_local_entry;
}
+/**
+ * batadv_tt_global_hash_find - search the global table for a given client
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the client to look for
+ * @vid: VLAN identifier
+ *
+ * Returns a pointer to the corresponding tt_global_entry struct if the client
+ * is found, NULL otherwise.
+ */
static struct batadv_tt_global_entry *
-batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data)
+batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const uint8_t *addr,
+ unsigned short vid)
{
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tt_global_entry *tt_global_entry = NULL;
- tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, data);
+ tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, addr,
+ vid);
if (tt_common_entry)
tt_global_entry = container_of(tt_common_entry,
struct batadv_tt_global_entry,
@@ -117,25 +180,17 @@ batadv_tt_local_entry_free_ref(struct batadv_tt_local_entry *tt_local_entry)
kfree_rcu(tt_local_entry, common.rcu);
}
-static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
-{
- struct batadv_tt_common_entry *tt_common_entry;
- struct batadv_tt_global_entry *tt_global_entry;
-
- tt_common_entry = container_of(rcu, struct batadv_tt_common_entry, rcu);
- tt_global_entry = container_of(tt_common_entry,
- struct batadv_tt_global_entry, common);
-
- kfree(tt_global_entry);
-}
-
+/**
+ * batadv_tt_global_entry_free_ref - decrement the refcounter for a
+ * tt_global_entry and possibly free it
+ * @tt_global_entry: the object to free
+ */
static void
batadv_tt_global_entry_free_ref(struct batadv_tt_global_entry *tt_global_entry)
{
if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
batadv_tt_global_del_orig_list(tt_global_entry);
- call_rcu(&tt_global_entry->common.rcu,
- batadv_tt_global_entry_free_rcu);
+ kfree_rcu(tt_global_entry, common.rcu);
}
}
@@ -153,13 +208,107 @@ static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
kfree(orig_entry);
}
+/**
+ * batadv_tt_local_size_mod - change the size by v of the local table identified
+ * by vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier of the sub-table to change
+ * @v: the amount to sum to the local table size
+ */
+static void batadv_tt_local_size_mod(struct batadv_priv *bat_priv,
+ unsigned short vid, int v)
+{
+ struct batadv_softif_vlan *vlan;
+
+ vlan = batadv_softif_vlan_get(bat_priv, vid);
+ if (!vlan)
+ return;
+
+ atomic_add(v, &vlan->tt.num_entries);
+
+ batadv_softif_vlan_free_ref(vlan);
+}
+
+/**
+ * batadv_tt_local_size_inc - increase by one the local table size for the given
+ * vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier
+ */
+static void batadv_tt_local_size_inc(struct batadv_priv *bat_priv,
+ unsigned short vid)
+{
+ batadv_tt_local_size_mod(bat_priv, vid, 1);
+}
+
+/**
+ * batadv_tt_local_size_dec - decrease by one the local table size for the given
+ * vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier
+ */
+static void batadv_tt_local_size_dec(struct batadv_priv *bat_priv,
+ unsigned short vid)
+{
+ batadv_tt_local_size_mod(bat_priv, vid, -1);
+}
+
+/**
+ * batadv_tt_global_size_mod - change the size by v of the local table
+ * identified by vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier
+ * @v: the amount to sum to the global table size
+ */
+static void batadv_tt_global_size_mod(struct batadv_orig_node *orig_node,
+ unsigned short vid, int v)
+{
+ struct batadv_orig_node_vlan *vlan;
+
+ vlan = batadv_orig_node_vlan_new(orig_node, vid);
+ if (!vlan)
+ return;
+
+ if (atomic_add_return(v, &vlan->tt.num_entries) == 0) {
+ spin_lock_bh(&orig_node->vlan_list_lock);
+ list_del_rcu(&vlan->list);
+ spin_unlock_bh(&orig_node->vlan_list_lock);
+ batadv_orig_node_vlan_free_ref(vlan);
+ }
+
+ batadv_orig_node_vlan_free_ref(vlan);
+}
+
+/**
+ * batadv_tt_global_size_inc - increase by one the global table size for the
+ * given vid
+ * @orig_node: the originator which global table size has to be decreased
+ * @vid: the vlan identifier
+ */
+static void batadv_tt_global_size_inc(struct batadv_orig_node *orig_node,
+ unsigned short vid)
+{
+ batadv_tt_global_size_mod(orig_node, vid, 1);
+}
+
+/**
+ * batadv_tt_global_size_dec - decrease by one the global table size for the
+ * given vid
+ * @orig_node: the originator which global table size has to be decreased
+ * @vid: the vlan identifier
+ */
+static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
+ unsigned short vid)
+{
+ batadv_tt_global_size_mod(orig_node, vid, -1);
+}
+
static void
batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
{
if (!atomic_dec_and_test(&orig_entry->refcount))
return;
- /* to avoid race conditions, immediately decrease the tt counter */
- atomic_dec(&orig_entry->orig_node->tt_size);
+
call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
}
@@ -180,12 +329,13 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
bool del_op_requested, del_op_entry;
tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
-
if (!tt_change_node)
return;
tt_change_node->change.flags = flags;
+ tt_change_node->change.reserved = 0;
memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN);
+ tt_change_node->change.vid = htons(common->vid);
del_op_requested = flags & BATADV_TT_CLIENT_DEL;
@@ -208,6 +358,13 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
goto del;
if (del_op_requested && !del_op_entry)
goto del;
+
+ /* this is a second add in the same originator interval. It
+ * means that flags have been changed: update them!
+ */
+ if (!del_op_requested && !del_op_entry)
+ entry->change.flags = flags;
+
continue;
del:
list_del(&entry->list);
@@ -229,9 +386,55 @@ unlock:
atomic_inc(&bat_priv->tt.local_changes);
}
-int batadv_tt_len(int changes_num)
+/**
+ * batadv_tt_len - compute length in bytes of given number of tt changes
+ * @changes_num: number of tt changes
+ *
+ * Returns computed length in bytes.
+ */
+static int batadv_tt_len(int changes_num)
{
- return changes_num * sizeof(struct batadv_tt_change);
+ return changes_num * sizeof(struct batadv_tvlv_tt_change);
+}
+
+/**
+ * batadv_tt_entries - compute the number of entries fitting in tt_len bytes
+ * @tt_len: available space
+ *
+ * Returns the number of entries.
+ */
+static uint16_t batadv_tt_entries(uint16_t tt_len)
+{
+ return tt_len / batadv_tt_len(1);
+}
+
+/**
+ * batadv_tt_local_table_transmit_size - calculates the local translation table
+ * size when transmitted over the air
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Returns local translation table size in bytes.
+ */
+static int batadv_tt_local_table_transmit_size(struct batadv_priv *bat_priv)
+{
+ uint16_t num_vlan = 0, tt_local_entries = 0;
+ struct batadv_softif_vlan *vlan;
+ int hdr_size;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+ num_vlan++;
+ tt_local_entries += atomic_read(&vlan->tt.num_entries);
+ }
+ rcu_read_unlock();
+
+ /* header size of tvlv encapsulated tt response payload */
+ hdr_size = sizeof(struct batadv_unicast_tvlv_packet);
+ hdr_size += sizeof(struct batadv_tvlv_hdr);
+ hdr_size += sizeof(struct batadv_tvlv_tt_data);
+ hdr_size += num_vlan * sizeof(struct batadv_tvlv_tt_vlan_data);
+
+ return hdr_size + batadv_tt_len(tt_local_entries);
}
static int batadv_tt_local_init(struct batadv_priv *bat_priv)
@@ -255,33 +458,51 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
const char *message)
{
batadv_dbg(BATADV_DBG_TT, bat_priv,
- "Deleting global tt entry %pM: %s\n",
- tt_global->common.addr, message);
+ "Deleting global tt entry %pM (vid: %d): %s\n",
+ tt_global->common.addr,
+ BATADV_PRINT_VID(tt_global->common.vid), message);
batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
- batadv_choose_orig, tt_global->common.addr);
+ batadv_choose_tt, &tt_global->common);
batadv_tt_global_entry_free_ref(tt_global);
}
-void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
- int ifindex)
+/**
+ * batadv_tt_local_add - add a new client to the local table or update an
+ * existing client
+ * @soft_iface: netdev struct of the mesh interface
+ * @addr: the mac address of the client to add
+ * @vid: VLAN identifier
+ * @ifindex: index of the interface where the client is connected to (useful to
+ * identify wireless clients)
+ *
+ * Returns true if the client was successfully added, false otherwise.
+ */
+bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
+ unsigned short vid, int ifindex)
{
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
struct batadv_tt_local_entry *tt_local;
struct batadv_tt_global_entry *tt_global;
+ struct net_device *in_dev = NULL;
struct hlist_head *head;
struct batadv_tt_orig_list_entry *orig_entry;
- int hash_added;
- bool roamed_back = false;
+ int hash_added, table_size, packet_size_max;
+ bool ret = false, roamed_back = false;
+ uint8_t remote_flags;
+
+ if (ifindex != BATADV_NULL_IFINDEX)
+ in_dev = dev_get_by_index(&init_net, ifindex);
- tt_local = batadv_tt_local_hash_find(bat_priv, addr);
- tt_global = batadv_tt_global_hash_find(bat_priv, addr);
+ tt_local = batadv_tt_local_hash_find(bat_priv, addr, vid);
+ tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid);
if (tt_local) {
tt_local->last_seen = jiffies;
if (tt_local->common.flags & BATADV_TT_CLIENT_PENDING) {
batadv_dbg(BATADV_DBG_TT, bat_priv,
- "Re-adding pending client %pM\n", addr);
+ "Re-adding pending client %pM (vid: %d)\n",
+ addr, BATADV_PRINT_VID(vid));
/* whatever the reason why the PENDING flag was set,
* this is a client which was enqueued to be removed in
* this orig_interval. Since it popped up again, the
@@ -293,8 +514,8 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
if (tt_local->common.flags & BATADV_TT_CLIENT_ROAM) {
batadv_dbg(BATADV_DBG_TT, bat_priv,
- "Roaming client %pM came back to its original location\n",
- addr);
+ "Roaming client %pM (vid: %d) came back to its original location\n",
+ addr, BATADV_PRINT_VID(vid));
/* the ROAM flag is set because this client roamed away
* and the node got a roaming_advertisement message. Now
* that the client popped up again at its original
@@ -306,12 +527,24 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
goto check_roaming;
}
+ /* Ignore the client if we cannot send it in a full table response. */
+ table_size = batadv_tt_local_table_transmit_size(bat_priv);
+ table_size += batadv_tt_len(1);
+ packet_size_max = atomic_read(&bat_priv->packet_size_max);
+ if (table_size > packet_size_max) {
+ net_ratelimited_function(batadv_info, soft_iface,
+ "Local translation table size (%i) exceeds maximum packet size (%i); Ignoring new local tt entry: %pM\n",
+ table_size, packet_size_max, addr);
+ goto out;
+ }
+
tt_local = kmalloc(sizeof(*tt_local), GFP_ATOMIC);
if (!tt_local)
goto out;
batadv_dbg(BATADV_DBG_TT, bat_priv,
- "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
+ "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
+ addr, BATADV_PRINT_VID(vid),
(uint8_t)atomic_read(&bat_priv->tt.vn));
memcpy(tt_local->common.addr, addr, ETH_ALEN);
@@ -320,7 +553,8 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
* (consistency check)
*/
tt_local->common.flags = BATADV_TT_CLIENT_NEW;
- if (batadv_is_wifi_iface(ifindex))
+ tt_local->common.vid = vid;
+ if (batadv_is_wifi_netdev(in_dev))
tt_local->common.flags |= BATADV_TT_CLIENT_WIFI;
atomic_set(&tt_local->common.refcount, 2);
tt_local->last_seen = jiffies;
@@ -331,7 +565,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
tt_local->common.flags |= BATADV_TT_CLIENT_NOPURGE;
hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
- batadv_choose_orig, &tt_local->common,
+ batadv_choose_tt, &tt_local->common,
&tt_local->common.hash_entry);
if (unlikely(hash_added != 0)) {
@@ -353,6 +587,7 @@ check_roaming:
rcu_read_lock();
hlist_for_each_entry_rcu(orig_entry, head, list) {
batadv_send_roam_adv(bat_priv, tt_global->common.addr,
+ tt_global->common.vid,
orig_entry->orig_node);
}
rcu_read_unlock();
@@ -369,78 +604,219 @@ check_roaming:
}
}
+ /* store the current remote flags before altering them. This helps
+ * understanding is flags are changing or not
+ */
+ remote_flags = tt_local->common.flags & BATADV_TT_REMOTE_MASK;
+
+ if (batadv_is_wifi_netdev(in_dev))
+ tt_local->common.flags |= BATADV_TT_CLIENT_WIFI;
+ else
+ tt_local->common.flags &= ~BATADV_TT_CLIENT_WIFI;
+
+ /* if any "dynamic" flag has been modified, resend an ADD event for this
+ * entry so that all the nodes can get the new flags
+ */
+ if (remote_flags ^ (tt_local->common.flags & BATADV_TT_REMOTE_MASK))
+ batadv_tt_local_event(bat_priv, tt_local, BATADV_NO_FLAGS);
+
+ ret = true;
out:
+ if (in_dev)
+ dev_put(in_dev);
if (tt_local)
batadv_tt_local_entry_free_ref(tt_local);
if (tt_global)
batadv_tt_global_entry_free_ref(tt_global);
+ return ret;
}
-static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff,
- int *packet_buff_len,
- int min_packet_len,
- int new_packet_len)
+/**
+ * batadv_tt_prepare_tvlv_global_data - prepare the TVLV TT header to send
+ * within a TT Response directed to another node
+ * @orig_node: originator for which the TT data has to be prepared
+ * @tt_data: uninitialised pointer to the address of the TVLV buffer
+ * @tt_change: uninitialised pointer to the address of the area where the TT
+ * changed can be stored
+ * @tt_len: pointer to the length to reserve to the tt_change. if -1 this
+ * function reserves the amount of space needed to send the entire global TT
+ * table. In case of success the value is updated with the real amount of
+ * reserved bytes
+
+ * Allocate the needed amount of memory for the entire TT TVLV and write its
+ * header made up by one tvlv_tt_data object and a series of tvlv_tt_vlan_data
+ * objects, one per active VLAN served by the originator node.
+ *
+ * Return the size of the allocated buffer or 0 in case of failure.
+ */
+static uint16_t
+batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
+ struct batadv_tvlv_tt_data **tt_data,
+ struct batadv_tvlv_tt_change **tt_change,
+ int32_t *tt_len)
{
- unsigned char *new_buff;
+ uint16_t num_vlan = 0, num_entries = 0, change_offset, tvlv_len;
+ struct batadv_tvlv_tt_vlan_data *tt_vlan;
+ struct batadv_orig_node_vlan *vlan;
+ uint8_t *tt_change_ptr;
- new_buff = kmalloc(new_packet_len, GFP_ATOMIC);
+ rcu_read_lock();
+ list_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+ num_vlan++;
+ num_entries += atomic_read(&vlan->tt.num_entries);
+ }
- /* keep old buffer if kmalloc should fail */
- if (new_buff) {
- memcpy(new_buff, *packet_buff, min_packet_len);
- kfree(*packet_buff);
- *packet_buff = new_buff;
- *packet_buff_len = new_packet_len;
+ change_offset = sizeof(**tt_data);
+ change_offset += num_vlan * sizeof(*tt_vlan);
+
+ /* if tt_len is negative, allocate the space needed by the full table */
+ if (*tt_len < 0)
+ *tt_len = batadv_tt_len(num_entries);
+
+ tvlv_len = *tt_len;
+ tvlv_len += change_offset;
+
+ *tt_data = kmalloc(tvlv_len, GFP_ATOMIC);
+ if (!*tt_data) {
+ *tt_len = 0;
+ goto out;
}
+
+ (*tt_data)->flags = BATADV_NO_FLAGS;
+ (*tt_data)->ttvn = atomic_read(&orig_node->last_ttvn);
+ (*tt_data)->num_vlan = htons(num_vlan);
+
+ tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
+ list_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+ tt_vlan->vid = htons(vlan->vid);
+ tt_vlan->crc = htonl(vlan->tt.crc);
+
+ tt_vlan++;
+ }
+
+ tt_change_ptr = (uint8_t *)*tt_data + change_offset;
+ *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
+
+out:
+ rcu_read_unlock();
+ return tvlv_len;
}
-static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv,
- unsigned char **packet_buff,
- int *packet_buff_len,
- int min_packet_len)
-{
- int req_len;
+/**
+ * batadv_tt_prepare_tvlv_local_data - allocate and prepare the TT TVLV for this
+ * node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: uninitialised pointer to the address of the TVLV buffer
+ * @tt_change: uninitialised pointer to the address of the area where the TT
+ * changes can be stored
+ * @tt_len: pointer to the length to reserve to the tt_change. if -1 this
+ * function reserves the amount of space needed to send the entire local TT
+ * table. In case of success the value is updated with the real amount of
+ * reserved bytes
+ *
+ * Allocate the needed amount of memory for the entire TT TVLV and write its
+ * header made up by one tvlv_tt_data object and a series of tvlv_tt_vlan_data
+ * objects, one per active VLAN.
+ *
+ * Return the size of the allocated buffer or 0 in case of failure.
+ */
+static uint16_t
+batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
+ struct batadv_tvlv_tt_data **tt_data,
+ struct batadv_tvlv_tt_change **tt_change,
+ int32_t *tt_len)
+{
+ struct batadv_tvlv_tt_vlan_data *tt_vlan;
+ struct batadv_softif_vlan *vlan;
+ uint16_t num_vlan = 0, num_entries = 0, tvlv_len;
+ uint8_t *tt_change_ptr;
+ int change_offset;
- req_len = min_packet_len;
- req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes));
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+ num_vlan++;
+ num_entries += atomic_read(&vlan->tt.num_entries);
+ }
- /* if we have too many changes for one packet don't send any
- * and wait for the tt table request which will be fragmented
- */
- if (req_len > bat_priv->soft_iface->mtu)
- req_len = min_packet_len;
+ change_offset = sizeof(**tt_data);
+ change_offset += num_vlan * sizeof(*tt_vlan);
+
+ /* if tt_len is negative, allocate the space needed by the full table */
+ if (*tt_len < 0)
+ *tt_len = batadv_tt_len(num_entries);
+
+ tvlv_len = *tt_len;
+ tvlv_len += change_offset;
+
+ *tt_data = kmalloc(tvlv_len, GFP_ATOMIC);
+ if (!*tt_data) {
+ tvlv_len = 0;
+ goto out;
+ }
+
+ (*tt_data)->flags = BATADV_NO_FLAGS;
+ (*tt_data)->ttvn = atomic_read(&bat_priv->tt.vn);
+ (*tt_data)->num_vlan = htons(num_vlan);
+
+ tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
+ hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+ tt_vlan->vid = htons(vlan->vid);
+ tt_vlan->crc = htonl(vlan->tt.crc);
- batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
- min_packet_len, req_len);
+ tt_vlan++;
+ }
+
+ tt_change_ptr = (uint8_t *)*tt_data + change_offset;
+ *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
+
+out:
+ rcu_read_unlock();
+ return tvlv_len;
}
-static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
- unsigned char **packet_buff,
- int *packet_buff_len,
- int min_packet_len)
+/**
+ * batadv_tt_tvlv_container_update - update the translation table tvlv container
+ * after local tt changes have been committed
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
{
struct batadv_tt_change_node *entry, *safe;
- int count = 0, tot_changes = 0, new_len;
- unsigned char *tt_buff;
+ struct batadv_tvlv_tt_data *tt_data;
+ struct batadv_tvlv_tt_change *tt_change;
+ int tt_diff_len, tt_change_len = 0;
+ int tt_diff_entries_num = 0, tt_diff_entries_count = 0;
+ uint16_t tvlv_len;
- batadv_tt_prepare_packet_buff(bat_priv, packet_buff,
- packet_buff_len, min_packet_len);
+ tt_diff_entries_num = atomic_read(&bat_priv->tt.local_changes);
+ tt_diff_len = batadv_tt_len(tt_diff_entries_num);
- new_len = *packet_buff_len - min_packet_len;
- tt_buff = *packet_buff + min_packet_len;
+ /* if we have too many changes for one packet don't send any
+ * and wait for the tt table request which will be fragmented
+ */
+ if (tt_diff_len > bat_priv->soft_iface->mtu)
+ tt_diff_len = 0;
+
+ tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv, &tt_data,
+ &tt_change, &tt_diff_len);
+ if (!tvlv_len)
+ return;
- if (new_len > 0)
- tot_changes = new_len / batadv_tt_len(1);
+ tt_data->flags = BATADV_TT_OGM_DIFF;
+
+ if (tt_diff_len == 0)
+ goto container_register;
spin_lock_bh(&bat_priv->tt.changes_list_lock);
atomic_set(&bat_priv->tt.local_changes, 0);
list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
list) {
- if (count < tot_changes) {
- memcpy(tt_buff + batadv_tt_len(count),
- &entry->change, sizeof(struct batadv_tt_change));
- count++;
+ if (tt_diff_entries_count < tt_diff_entries_num) {
+ memcpy(tt_change + tt_diff_entries_count,
+ &entry->change,
+ sizeof(struct batadv_tvlv_tt_change));
+ tt_diff_entries_count++;
}
list_del(&entry->list);
kfree(entry);
@@ -452,20 +828,25 @@ static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
kfree(bat_priv->tt.last_changeset);
bat_priv->tt.last_changeset_len = 0;
bat_priv->tt.last_changeset = NULL;
+ tt_change_len = batadv_tt_len(tt_diff_entries_count);
/* check whether this new OGM has no changes due to size problems */
- if (new_len > 0) {
+ if (tt_diff_entries_count > 0) {
/* if kmalloc() fails we will reply with the full table
* instead of providing the diff
*/
- bat_priv->tt.last_changeset = kmalloc(new_len, GFP_ATOMIC);
+ bat_priv->tt.last_changeset = kzalloc(tt_diff_len, GFP_ATOMIC);
if (bat_priv->tt.last_changeset) {
- memcpy(bat_priv->tt.last_changeset, tt_buff, new_len);
- bat_priv->tt.last_changeset_len = new_len;
+ memcpy(bat_priv->tt.last_changeset,
+ tt_change, tt_change_len);
+ bat_priv->tt.last_changeset_len = tt_diff_len;
}
}
spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
- return count;
+container_register:
+ batadv_tvlv_container_register(bat_priv, BATADV_TVLV_TT, 1, tt_data,
+ tvlv_len);
+ kfree(tt_data);
}
int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
@@ -476,7 +857,9 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tt_local_entry *tt_local;
struct batadv_hard_iface *primary_if;
+ struct batadv_softif_vlan *vlan;
struct hlist_head *head;
+ unsigned short vid;
uint32_t i;
int last_seen_secs;
int last_seen_msecs;
@@ -489,11 +872,10 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
goto out;
seq_printf(seq,
- "Locally retrieved addresses (from %s) announced via TT (TTVN: %u CRC: %#.4x):\n",
- net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn),
- bat_priv->tt.local_crc);
- seq_printf(seq, " %-13s %-7s %-10s\n", "Client", "Flags",
- "Last seen");
+ "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
+ net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn));
+ seq_printf(seq, " %-13s %s %-7s %-9s (%-10s)\n", "Client", "VID",
+ "Flags", "Last seen", "CRC");
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -504,6 +886,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
tt_local = container_of(tt_common_entry,
struct batadv_tt_local_entry,
common);
+ vid = tt_common_entry->vid;
last_seen_jiffies = jiffies - tt_local->last_seen;
last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
last_seen_secs = last_seen_msecs / 1000;
@@ -511,8 +894,17 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
no_purge = tt_common_entry->flags & np_flag;
- seq_printf(seq, " * %pM [%c%c%c%c%c] %3u.%03u\n",
+ vlan = batadv_softif_vlan_get(bat_priv, vid);
+ if (!vlan) {
+ seq_printf(seq, "Cannot retrieve VLAN %d\n",
+ BATADV_PRINT_VID(vid));
+ continue;
+ }
+
+ seq_printf(seq,
+ " * %pM %4i [%c%c%c%c%c] %3u.%03u (%#.8x)\n",
tt_common_entry->addr,
+ BATADV_PRINT_VID(tt_common_entry->vid),
(tt_common_entry->flags &
BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
no_purge ? 'P' : '.',
@@ -523,7 +915,10 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
(tt_common_entry->flags &
BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
no_purge ? 0 : last_seen_secs,
- no_purge ? 0 : last_seen_msecs);
+ no_purge ? 0 : last_seen_msecs,
+ vlan->tt.crc);
+
+ batadv_softif_vlan_free_ref(vlan);
}
rcu_read_unlock();
}
@@ -547,27 +942,29 @@ batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
tt_local_entry->common.flags |= BATADV_TT_CLIENT_PENDING;
batadv_dbg(BATADV_DBG_TT, bat_priv,
- "Local tt entry (%pM) pending to be removed: %s\n",
- tt_local_entry->common.addr, message);
+ "Local tt entry (%pM, vid: %d) pending to be removed: %s\n",
+ tt_local_entry->common.addr,
+ BATADV_PRINT_VID(tt_local_entry->common.vid), message);
}
/**
* batadv_tt_local_remove - logically remove an entry from the local table
* @bat_priv: the bat priv with all the soft interface information
* @addr: the MAC address of the client to remove
+ * @vid: VLAN identifier
* @message: message to append to the log on deletion
* @roaming: true if the deletion is due to a roaming event
*
* Returns the flags assigned to the local entry before being deleted
*/
uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
- const uint8_t *addr, const char *message,
- bool roaming)
+ const uint8_t *addr, unsigned short vid,
+ const char *message, bool roaming)
{
struct batadv_tt_local_entry *tt_local_entry;
uint16_t flags, curr_flags = BATADV_NO_FLAGS;
- tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
if (!tt_local_entry)
goto out;
@@ -603,8 +1000,16 @@ out:
return curr_flags;
}
+/**
+ * batadv_tt_local_purge_list - purge inactive tt local entries
+ * @bat_priv: the bat priv with all the soft interface information
+ * @head: pointer to the list containing the local tt entries
+ * @timeout: parameter deciding whether a given tt local entry is considered
+ * inactive or not
+ */
static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
- struct hlist_head *head)
+ struct hlist_head *head,
+ int timeout)
{
struct batadv_tt_local_entry *tt_local_entry;
struct batadv_tt_common_entry *tt_common_entry;
@@ -622,8 +1027,7 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
continue;
- if (!batadv_has_timed_out(tt_local_entry->last_seen,
- BATADV_TT_LOCAL_TIMEOUT))
+ if (!batadv_has_timed_out(tt_local_entry->last_seen, timeout))
continue;
batadv_tt_local_set_pending(bat_priv, tt_local_entry,
@@ -631,7 +1035,14 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
}
}
-static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
+/**
+ * batadv_tt_local_purge - purge inactive tt local entries
+ * @bat_priv: the bat priv with all the soft interface information
+ * @timeout: parameter deciding whether a given tt local entry is considered
+ * inactive or not
+ */
+static void batadv_tt_local_purge(struct batadv_priv *bat_priv,
+ int timeout)
{
struct batadv_hashtable *hash = bat_priv->tt.local_hash;
struct hlist_head *head;
@@ -643,7 +1054,7 @@ static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- batadv_tt_local_purge_list(bat_priv, head);
+ batadv_tt_local_purge_list(bat_priv, head, timeout);
spin_unlock_bh(list_lock);
}
}
@@ -784,7 +1195,7 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
INIT_HLIST_NODE(&orig_entry->list);
atomic_inc(&orig_node->refcount);
- atomic_inc(&orig_node->tt_size);
+ batadv_tt_global_size_inc(orig_node, tt_global->common.vid);
orig_entry->orig_node = orig_node;
orig_entry->ttvn = ttvn;
atomic_set(&orig_entry->refcount, 2);
@@ -803,6 +1214,7 @@ out:
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: the originator announcing the client
* @tt_addr: the mac address of the non-mesh client
+ * @vid: VLAN identifier
* @flags: TT flags that have to be set for this non-mesh client
* @ttvn: the tt version number ever announcing this non-mesh client
*
@@ -813,21 +1225,28 @@ out:
* If a TT local entry exists for this non-mesh client remove it.
*
* The caller must hold orig_node refcount.
+ *
+ * Return true if the new entry has been added, false otherwise
*/
-int batadv_tt_global_add(struct batadv_priv *bat_priv,
- struct batadv_orig_node *orig_node,
- const unsigned char *tt_addr, uint16_t flags,
- uint8_t ttvn)
+static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *tt_addr,
+ unsigned short vid, uint16_t flags,
+ uint8_t ttvn)
{
struct batadv_tt_global_entry *tt_global_entry;
struct batadv_tt_local_entry *tt_local_entry;
- int ret = 0;
+ bool ret = false;
int hash_added;
struct batadv_tt_common_entry *common;
uint16_t local_flags;
- tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr);
- tt_local_entry = batadv_tt_local_hash_find(bat_priv, tt_addr);
+ /* ignore global entries from backbone nodes */
+ if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid))
+ return true;
+
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr, vid);
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, tt_addr, vid);
/* if the node already has a local client for this entry, it has to wait
* for a roaming advertisement instead of manually messing up the global
@@ -844,6 +1263,7 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
common = &tt_global_entry->common;
memcpy(common->addr, tt_addr, ETH_ALEN);
+ common->vid = vid;
common->flags = flags;
tt_global_entry->roam_at = 0;
@@ -861,7 +1281,7 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
hash_added = batadv_hash_add(bat_priv->tt.global_hash,
batadv_compare_tt,
- batadv_choose_orig, common,
+ batadv_choose_tt, common,
&common->hash_entry);
if (unlikely(hash_added != 0)) {
@@ -920,14 +1340,15 @@ add_orig_entry:
batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
batadv_dbg(BATADV_DBG_TT, bat_priv,
- "Creating new global tt entry: %pM (via %pM)\n",
- common->addr, orig_node->orig);
- ret = 1;
+ "Creating new global tt entry: %pM (vid: %d, via %pM)\n",
+ common->addr, BATADV_PRINT_VID(common->vid),
+ orig_node->orig);
+ ret = true;
out_remove:
/* remove address from local hash if present */
- local_flags = batadv_tt_local_remove(bat_priv, tt_addr,
+ local_flags = batadv_tt_local_remove(bat_priv, tt_addr, vid,
"global tt received",
flags & BATADV_TT_CLIENT_ROAM);
tt_global_entry->common.flags |= local_flags & BATADV_TT_CLIENT_WIFI;
@@ -947,18 +1368,20 @@ out:
}
/* batadv_transtable_best_orig - Get best originator list entry from tt entry
+ * @bat_priv: the bat priv with all the soft interface information
* @tt_global_entry: global translation table entry to be analyzed
*
* This functon assumes the caller holds rcu_read_lock().
* Returns best originator list entry or NULL on errors.
*/
static struct batadv_tt_orig_list_entry *
-batadv_transtable_best_orig(struct batadv_tt_global_entry *tt_global_entry)
+batadv_transtable_best_orig(struct batadv_priv *bat_priv,
+ struct batadv_tt_global_entry *tt_global_entry)
{
- struct batadv_neigh_node *router = NULL;
+ struct batadv_neigh_node *router, *best_router = NULL;
+ struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
struct hlist_head *head;
struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL;
- int best_tq = 0;
head = &tt_global_entry->orig_list;
hlist_for_each_entry_rcu(orig_entry, head, list) {
@@ -966,64 +1389,104 @@ batadv_transtable_best_orig(struct batadv_tt_global_entry *tt_global_entry)
if (!router)
continue;
- if (router->tq_avg > best_tq) {
- best_entry = orig_entry;
- best_tq = router->tq_avg;
+ if (best_router &&
+ bao->bat_neigh_cmp(router, best_router) <= 0) {
+ batadv_neigh_node_free_ref(router);
+ continue;
}
- batadv_neigh_node_free_ref(router);
+ /* release the refcount for the "old" best */
+ if (best_router)
+ batadv_neigh_node_free_ref(best_router);
+
+ best_entry = orig_entry;
+ best_router = router;
}
+ if (best_router)
+ batadv_neigh_node_free_ref(best_router);
+
return best_entry;
}
/* batadv_tt_global_print_entry - print all orig nodes who announce the address
* for this global entry
+ * @bat_priv: the bat priv with all the soft interface information
* @tt_global_entry: global translation table entry to be printed
* @seq: debugfs table seq_file struct
*
* This functon assumes the caller holds rcu_read_lock().
*/
static void
-batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
+batadv_tt_global_print_entry(struct batadv_priv *bat_priv,
+ struct batadv_tt_global_entry *tt_global_entry,
struct seq_file *seq)
{
- struct hlist_head *head;
struct batadv_tt_orig_list_entry *orig_entry, *best_entry;
struct batadv_tt_common_entry *tt_common_entry;
- uint16_t flags;
+ struct batadv_orig_node_vlan *vlan;
+ struct hlist_head *head;
uint8_t last_ttvn;
+ uint16_t flags;
tt_common_entry = &tt_global_entry->common;
flags = tt_common_entry->flags;
- best_entry = batadv_transtable_best_orig(tt_global_entry);
+ best_entry = batadv_transtable_best_orig(bat_priv, tt_global_entry);
if (best_entry) {
+ vlan = batadv_orig_node_vlan_get(best_entry->orig_node,
+ tt_common_entry->vid);
+ if (!vlan) {
+ seq_printf(seq,
+ " * Cannot retrieve VLAN %d for originator %pM\n",
+ BATADV_PRINT_VID(tt_common_entry->vid),
+ best_entry->orig_node->orig);
+ goto print_list;
+ }
+
last_ttvn = atomic_read(&best_entry->orig_node->last_ttvn);
seq_printf(seq,
- " %c %pM (%3u) via %pM (%3u) (%#.4x) [%c%c%c]\n",
+ " %c %pM %4i (%3u) via %pM (%3u) (%#.8x) [%c%c%c]\n",
'*', tt_global_entry->common.addr,
+ BATADV_PRINT_VID(tt_global_entry->common.vid),
best_entry->ttvn, best_entry->orig_node->orig,
- last_ttvn, best_entry->orig_node->tt_crc,
+ last_ttvn, vlan->tt.crc,
(flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
(flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
(flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
+
+ batadv_orig_node_vlan_free_ref(vlan);
}
+print_list:
head = &tt_global_entry->orig_list;
hlist_for_each_entry_rcu(orig_entry, head, list) {
if (best_entry == orig_entry)
continue;
+ vlan = batadv_orig_node_vlan_get(orig_entry->orig_node,
+ tt_common_entry->vid);
+ if (!vlan) {
+ seq_printf(seq,
+ " + Cannot retrieve VLAN %d for originator %pM\n",
+ BATADV_PRINT_VID(tt_common_entry->vid),
+ orig_entry->orig_node->orig);
+ continue;
+ }
+
last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
- seq_printf(seq, " %c %pM (%3u) via %pM (%3u) [%c%c%c]\n",
+ seq_printf(seq,
+ " %c %pM %4d (%3u) via %pM (%3u) (%#.8x) [%c%c%c]\n",
'+', tt_global_entry->common.addr,
+ BATADV_PRINT_VID(tt_global_entry->common.vid),
orig_entry->ttvn, orig_entry->orig_node->orig,
- last_ttvn,
+ last_ttvn, vlan->tt.crc,
(flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
(flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
(flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
+
+ batadv_orig_node_vlan_free_ref(vlan);
}
}
@@ -1045,9 +1508,9 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
seq_printf(seq,
"Globally announced TT entries received via the mesh %s\n",
net_dev->name);
- seq_printf(seq, " %-13s %s %-15s %s (%-6s) %s\n",
- "Client", "(TTVN)", "Originator", "(Curr TTVN)", "CRC",
- "Flags");
+ seq_printf(seq, " %-13s %s %s %-15s %s (%-10s) %s\n",
+ "Client", "VID", "(TTVN)", "Originator", "(Curr TTVN)",
+ "CRC", "Flags");
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -1058,7 +1521,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
tt_global = container_of(tt_common_entry,
struct batadv_tt_global_entry,
common);
- batadv_tt_global_print_entry(tt_global, seq);
+ batadv_tt_global_print_entry(bat_priv, tt_global, seq);
}
rcu_read_unlock();
}
@@ -1080,6 +1543,8 @@ batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
head = &tt_global_entry->orig_list;
hlist_for_each_entry_safe(orig_entry, safe, head, list) {
hlist_del_rcu(&orig_entry->list);
+ batadv_tt_global_size_dec(orig_entry->orig_node,
+ tt_global_entry->common.vid);
batadv_tt_orig_list_entry_free_ref(orig_entry);
}
spin_unlock_bh(&tt_global_entry->list_lock);
@@ -1094,16 +1559,21 @@ batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv,
struct hlist_head *head;
struct hlist_node *safe;
struct batadv_tt_orig_list_entry *orig_entry;
+ unsigned short vid;
spin_lock_bh(&tt_global_entry->list_lock);
head = &tt_global_entry->orig_list;
hlist_for_each_entry_safe(orig_entry, safe, head, list) {
if (orig_entry->orig_node == orig_node) {
+ vid = tt_global_entry->common.vid;
batadv_dbg(BATADV_DBG_TT, bat_priv,
- "Deleting %pM from global tt entry %pM: %s\n",
+ "Deleting %pM from global tt entry %pM (vid: %d): %s\n",
orig_node->orig,
- tt_global_entry->common.addr, message);
+ tt_global_entry->common.addr,
+ BATADV_PRINT_VID(vid), message);
hlist_del_rcu(&orig_entry->list);
+ batadv_tt_global_size_dec(orig_node,
+ tt_global_entry->common.vid);
batadv_tt_orig_list_entry_free_ref(orig_entry);
}
}
@@ -1150,17 +1620,25 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
orig_node, message);
}
-
-
+/**
+ * batadv_tt_global_del - remove a client from the global table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: an originator serving this client
+ * @addr: the mac address of the client
+ * @vid: VLAN identifier
+ * @message: a message explaining the reason for deleting the client to print
+ * for debugging purpose
+ * @roaming: true if the deletion has been triggered by a roaming event
+ */
static void batadv_tt_global_del(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
- const unsigned char *addr,
+ const unsigned char *addr, unsigned short vid,
const char *message, bool roaming)
{
struct batadv_tt_global_entry *tt_global_entry;
struct batadv_tt_local_entry *local_entry = NULL;
- tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid);
if (!tt_global_entry)
goto out;
@@ -1189,7 +1667,8 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
* the global entry, since it is useless now.
*/
local_entry = batadv_tt_local_hash_find(bat_priv,
- tt_global_entry->common.addr);
+ tt_global_entry->common.addr,
+ vid);
if (local_entry) {
/* local entry exists, case 2: client roamed to us. */
batadv_tt_global_del_orig_list(tt_global_entry);
@@ -1207,8 +1686,18 @@ out:
batadv_tt_local_entry_free_ref(local_entry);
}
+/**
+ * batadv_tt_global_del_orig - remove all the TT global entries belonging to the
+ * given originator matching the provided vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: the originator owning the entries to remove
+ * @match_vid: the VLAN identifier to match. If negative all the entries will be
+ * removed
+ * @message: debug message to print as "reason"
+ */
void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
+ int32_t match_vid,
const char *message)
{
struct batadv_tt_global_entry *tt_global;
@@ -1218,6 +1707,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
struct hlist_node *safe;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
+ unsigned short vid;
if (!hash)
return;
@@ -1229,6 +1719,10 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
spin_lock_bh(list_lock);
hlist_for_each_entry_safe(tt_common_entry, safe,
head, hash_entry) {
+ /* remove only matching entries */
+ if (match_vid >= 0 && tt_common_entry->vid != match_vid)
+ continue;
+
tt_global = container_of(tt_common_entry,
struct batadv_tt_global_entry,
common);
@@ -1237,9 +1731,11 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
orig_node, message);
if (hlist_empty(&tt_global->orig_list)) {
+ vid = tt_global->common.vid;
batadv_dbg(BATADV_DBG_TT, bat_priv,
- "Deleting global tt entry %pM: %s\n",
- tt_global->common.addr, message);
+ "Deleting global tt entry %pM (vid: %d): %s\n",
+ tt_global->common.addr,
+ BATADV_PRINT_VID(vid), message);
hlist_del_rcu(&tt_common_entry->hash_entry);
batadv_tt_global_entry_free_ref(tt_global);
}
@@ -1297,8 +1793,10 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
continue;
batadv_dbg(BATADV_DBG_TT, bat_priv,
- "Deleting global tt entry (%pM): %s\n",
- tt_global->common.addr, msg);
+ "Deleting global tt entry %pM (vid: %d): %s\n",
+ tt_global->common.addr,
+ BATADV_PRINT_VID(tt_global->common.vid),
+ msg);
hlist_del_rcu(&tt_common->hash_entry);
@@ -1357,23 +1855,49 @@ _batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
return ret;
}
+/**
+ * batadv_transtable_search - get the mesh destination for a given client
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: mac address of the source client
+ * @addr: mac address of the destination client
+ * @vid: VLAN identifier
+ *
+ * Returns a pointer to the originator that was selected as destination in the
+ * mesh for contacting the client 'addr', NULL otherwise.
+ * In case of multiple originators serving the same client, the function returns
+ * the best one (best in terms of metric towards the destination node).
+ *
+ * If the two clients are AP isolated the function returns NULL.
+ */
struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
const uint8_t *src,
- const uint8_t *addr)
+ const uint8_t *addr,
+ unsigned short vid)
{
struct batadv_tt_local_entry *tt_local_entry = NULL;
struct batadv_tt_global_entry *tt_global_entry = NULL;
struct batadv_orig_node *orig_node = NULL;
struct batadv_tt_orig_list_entry *best_entry;
+ bool ap_isolation_enabled = false;
+ struct batadv_softif_vlan *vlan;
- if (src && atomic_read(&bat_priv->ap_isolation)) {
- tt_local_entry = batadv_tt_local_hash_find(bat_priv, src);
+ /* if the AP isolation is requested on a VLAN, then check for its
+ * setting in the proper VLAN private data structure
+ */
+ vlan = batadv_softif_vlan_get(bat_priv, vid);
+ if (vlan) {
+ ap_isolation_enabled = atomic_read(&vlan->ap_isolation);
+ batadv_softif_vlan_free_ref(vlan);
+ }
+
+ if (src && ap_isolation_enabled) {
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, src, vid);
if (!tt_local_entry ||
(tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING))
goto out;
}
- tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid);
if (!tt_global_entry)
goto out;
@@ -1385,7 +1909,7 @@ struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
goto out;
rcu_read_lock();
- best_entry = batadv_transtable_best_orig(tt_global_entry);
+ best_entry = batadv_transtable_best_orig(bat_priv, tt_global_entry);
/* found anything? */
if (best_entry)
orig_node = best_entry->orig_node;
@@ -1402,17 +1926,40 @@ out:
return orig_node;
}
-/* Calculates the checksum of the local table of a given orig_node */
-static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
- struct batadv_orig_node *orig_node)
+/**
+ * batadv_tt_global_crc - calculates the checksum of the local table belonging
+ * to the given orig_node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: originator for which the CRC should be computed
+ * @vid: VLAN identifier for which the CRC32 has to be computed
+ *
+ * This function computes the checksum for the global table corresponding to a
+ * specific originator. In particular, the checksum is computed as follows: For
+ * each client connected to the originator the CRC32C of the MAC address and the
+ * VID is computed and then all the CRC32Cs of the various clients are xor'ed
+ * together.
+ *
+ * The idea behind is that CRC32C should be used as much as possible in order to
+ * produce a unique hash of the table, but since the order which is used to feed
+ * the CRC32C function affects the result and since every node in the network
+ * probably sorts the clients differently, the hash function cannot be directly
+ * computed over the entire table. Hence the CRC32C is used only on
+ * the single client entry, while all the results are then xor'ed together
+ * because the XOR operation can combine them all while trying to reduce the
+ * noise as much as possible.
+ *
+ * Returns the checksum of the global table of a given originator.
+ */
+static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ unsigned short vid)
{
- uint16_t total = 0, total_one;
struct batadv_hashtable *hash = bat_priv->tt.global_hash;
struct batadv_tt_common_entry *tt_common;
struct batadv_tt_global_entry *tt_global;
struct hlist_head *head;
- uint32_t i;
- int j;
+ uint32_t i, crc_tmp, crc = 0;
+ uint8_t flags;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -1422,6 +1969,12 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
tt_global = container_of(tt_common,
struct batadv_tt_global_entry,
common);
+ /* compute the CRC only for entries belonging to the
+ * VLAN identified by the vid passed as parameter
+ */
+ if (tt_common->vid != vid)
+ continue;
+
/* Roaming clients are in the global table for
* consistency only. They don't have to be
* taken into account while computing the
@@ -1443,48 +1996,74 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
orig_node))
continue;
- total_one = 0;
- for (j = 0; j < ETH_ALEN; j++)
- total_one = crc16_byte(total_one,
- tt_common->addr[j]);
- total ^= total_one;
+ crc_tmp = crc32c(0, &tt_common->vid,
+ sizeof(tt_common->vid));
+
+ /* compute the CRC on flags that have to be kept in sync
+ * among nodes
+ */
+ flags = tt_common->flags & BATADV_TT_SYNC_MASK;
+ crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
+
+ crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
}
rcu_read_unlock();
}
- return total;
+ return crc;
}
-/* Calculates the checksum of the local table */
-static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
+/**
+ * batadv_tt_local_crc - calculates the checksum of the local table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: VLAN identifier for which the CRC32 has to be computed
+ *
+ * For details about the computation, please refer to the documentation for
+ * batadv_tt_global_crc().
+ *
+ * Returns the checksum of the local table
+ */
+static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv,
+ unsigned short vid)
{
- uint16_t total = 0, total_one;
struct batadv_hashtable *hash = bat_priv->tt.local_hash;
struct batadv_tt_common_entry *tt_common;
struct hlist_head *head;
- uint32_t i;
- int j;
+ uint32_t i, crc_tmp, crc = 0;
+ uint8_t flags;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(tt_common, head, hash_entry) {
+ /* compute the CRC only for entries belonging to the
+ * VLAN identified by vid
+ */
+ if (tt_common->vid != vid)
+ continue;
+
/* not yet committed clients have not to be taken into
* account while computing the CRC
*/
if (tt_common->flags & BATADV_TT_CLIENT_NEW)
continue;
- total_one = 0;
- for (j = 0; j < ETH_ALEN; j++)
- total_one = crc16_byte(total_one,
- tt_common->addr[j]);
- total ^= total_one;
+
+ crc_tmp = crc32c(0, &tt_common->vid,
+ sizeof(tt_common->vid));
+
+ /* compute the CRC on flags that have to be kept in sync
+ * among nodes
+ */
+ flags = tt_common->flags & BATADV_TT_SYNC_MASK;
+ crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
+
+ crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
}
rcu_read_unlock();
}
- return total;
+ return crc;
}
static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
@@ -1503,11 +2082,9 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
- const unsigned char *tt_buff,
- uint8_t tt_num_changes)
+ const void *tt_buff,
+ uint16_t tt_buff_len)
{
- uint16_t tt_buff_len = batadv_tt_len(tt_num_changes);
-
/* Replace the old buffer only if I received something in the
* last OGM (the OGM could carry no changes)
*/
@@ -1569,9 +2146,14 @@ unlock:
return tt_req_node;
}
-/* data_ptr is useless here, but has to be kept to respect the prototype */
-static int batadv_tt_local_valid_entry(const void *entry_ptr,
- const void *data_ptr)
+/**
+ * batadv_tt_local_valid - verify that given tt entry is a valid one
+ * @entry_ptr: to be checked local tt entry
+ * @data_ptr: not used but definition required to satisfy the callback prototype
+ *
+ * Returns 1 if the entry is a valid, 0 otherwise.
+ */
+static int batadv_tt_local_valid(const void *entry_ptr, const void *data_ptr)
{
const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
@@ -1598,41 +2180,30 @@ static int batadv_tt_global_valid(const void *entry_ptr,
return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node);
}
-static struct sk_buff *
-batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
- struct batadv_hashtable *hash,
- struct batadv_priv *bat_priv,
- int (*valid_cb)(const void *, const void *),
- void *cb_data)
+/**
+ * batadv_tt_tvlv_generate - fill the tvlv buff with the tt entries from the
+ * specified tt hash
+ * @bat_priv: the bat priv with all the soft interface information
+ * @hash: hash table containing the tt entries
+ * @tt_len: expected tvlv tt data buffer length in number of bytes
+ * @tvlv_buff: pointer to the buffer to fill with the TT data
+ * @valid_cb: function to filter tt change entries
+ * @cb_data: data passed to the filter function as argument
+ */
+static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
+ struct batadv_hashtable *hash,
+ void *tvlv_buff, uint16_t tt_len,
+ int (*valid_cb)(const void *, const void *),
+ void *cb_data)
{
struct batadv_tt_common_entry *tt_common_entry;
- struct batadv_tt_query_packet *tt_response;
- struct batadv_tt_change *tt_change;
+ struct batadv_tvlv_tt_change *tt_change;
struct hlist_head *head;
- struct sk_buff *skb = NULL;
- uint16_t tt_tot, tt_count;
- ssize_t tt_query_size = sizeof(struct batadv_tt_query_packet);
+ uint16_t tt_tot, tt_num_entries = 0;
uint32_t i;
- size_t len;
-
- if (tt_query_size + tt_len > bat_priv->soft_iface->mtu) {
- tt_len = bat_priv->soft_iface->mtu - tt_query_size;
- tt_len -= tt_len % sizeof(struct batadv_tt_change);
- }
- tt_tot = tt_len / sizeof(struct batadv_tt_change);
-
- len = tt_query_size + tt_len;
- skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
- if (!skb)
- goto out;
-
- skb->priority = TC_PRIO_CONTROL;
- skb_reserve(skb, ETH_HLEN);
- tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len);
- tt_response->ttvn = ttvn;
- tt_change = (struct batadv_tt_change *)(skb->data + tt_query_size);
- tt_count = 0;
+ tt_tot = batadv_tt_entries(tt_len);
+ tt_change = (struct batadv_tvlv_tt_change *)tvlv_buff;
rcu_read_lock();
for (i = 0; i < hash->size; i++) {
@@ -1640,7 +2211,7 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
hlist_for_each_entry_rcu(tt_common_entry,
head, hash_entry) {
- if (tt_count == tt_tot)
+ if (tt_tot == tt_num_entries)
break;
if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
@@ -1649,33 +2220,123 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
memcpy(tt_change->addr, tt_common_entry->addr,
ETH_ALEN);
tt_change->flags = tt_common_entry->flags;
+ tt_change->vid = htons(tt_common_entry->vid);
+ tt_change->reserved = 0;
- tt_count++;
+ tt_num_entries++;
tt_change++;
}
}
rcu_read_unlock();
+}
- /* store in the message the number of entries we have successfully
- * copied
- */
- tt_response->tt_data = htons(tt_count);
+/**
+ * batadv_tt_global_check_crc - check if all the CRCs are correct
+ * @orig_node: originator for which the CRCs have to be checked
+ * @tt_vlan: pointer to the first tvlv VLAN entry
+ * @num_vlan: number of tvlv VLAN entries
+ * @create: if true, create VLAN objects if not found
+ *
+ * Return true if all the received CRCs match the locally stored ones, false
+ * otherwise
+ */
+static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
+ struct batadv_tvlv_tt_vlan_data *tt_vlan,
+ uint16_t num_vlan)
+{
+ struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp;
+ struct batadv_orig_node_vlan *vlan;
+ int i;
-out:
- return skb;
+ /* check if each received CRC matches the locally stored one */
+ for (i = 0; i < num_vlan; i++) {
+ tt_vlan_tmp = tt_vlan + i;
+
+ /* if orig_node is a backbone node for this VLAN, don't check
+ * the CRC as we ignore all the global entries over it
+ */
+ if (batadv_bla_is_backbone_gw_orig(orig_node->bat_priv,
+ orig_node->orig,
+ ntohs(tt_vlan_tmp->vid)))
+ continue;
+
+ vlan = batadv_orig_node_vlan_get(orig_node,
+ ntohs(tt_vlan_tmp->vid));
+ if (!vlan)
+ return false;
+
+ if (vlan->tt.crc != ntohl(tt_vlan_tmp->crc))
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * batadv_tt_local_update_crc - update all the local CRCs
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_tt_local_update_crc(struct batadv_priv *bat_priv)
+{
+ struct batadv_softif_vlan *vlan;
+
+ /* recompute the global CRC for each VLAN */
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+ vlan->tt.crc = batadv_tt_local_crc(bat_priv, vlan->vid);
+ }
+ rcu_read_unlock();
}
+/**
+ * batadv_tt_global_update_crc - update all the global CRCs for this orig_node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: the orig_node for which the CRCs have to be updated
+ */
+static void batadv_tt_global_update_crc(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node)
+{
+ struct batadv_orig_node_vlan *vlan;
+ uint32_t crc;
+
+ /* recompute the global CRC for each VLAN */
+ rcu_read_lock();
+ list_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+ /* if orig_node is a backbone node for this VLAN, don't compute
+ * the CRC as we ignore all the global entries over it
+ */
+ if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig,
+ vlan->vid))
+ continue;
+
+ crc = batadv_tt_global_crc(bat_priv, orig_node, vlan->vid);
+ vlan->tt.crc = crc;
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * batadv_send_tt_request - send a TT Request message to a given node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @dst_orig_node: the destination of the message
+ * @ttvn: the version number that the source of the message is looking for
+ * @tt_vlan: pointer to the first tvlv VLAN object to request
+ * @num_vlan: number of tvlv VLAN entries
+ * @full_table: ask for the entire translation table if true, while only for the
+ * last TT diff otherwise
+ */
static int batadv_send_tt_request(struct batadv_priv *bat_priv,
struct batadv_orig_node *dst_orig_node,
- uint8_t ttvn, uint16_t tt_crc,
- bool full_table)
+ uint8_t ttvn,
+ struct batadv_tvlv_tt_vlan_data *tt_vlan,
+ uint16_t num_vlan, bool full_table)
{
- struct sk_buff *skb = NULL;
- struct batadv_tt_query_packet *tt_request;
- struct batadv_hard_iface *primary_if;
+ struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
struct batadv_tt_req_node *tt_req_node = NULL;
- int ret = 1;
- size_t tt_req_len;
+ struct batadv_tvlv_tt_vlan_data *tt_vlan_req;
+ struct batadv_hard_iface *primary_if;
+ bool ret = false;
+ int i, size;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
@@ -1688,157 +2349,171 @@ static int batadv_send_tt_request(struct batadv_priv *bat_priv,
if (!tt_req_node)
goto out;
- skb = netdev_alloc_skb_ip_align(NULL, sizeof(*tt_request) + ETH_HLEN);
- if (!skb)
+ size = sizeof(*tvlv_tt_data) + sizeof(*tt_vlan_req) * num_vlan;
+ tvlv_tt_data = kzalloc(size, GFP_ATOMIC);
+ if (!tvlv_tt_data)
goto out;
- skb->priority = TC_PRIO_CONTROL;
- skb_reserve(skb, ETH_HLEN);
+ tvlv_tt_data->flags = BATADV_TT_REQUEST;
+ tvlv_tt_data->ttvn = ttvn;
+ tvlv_tt_data->num_vlan = htons(num_vlan);
- tt_req_len = sizeof(*tt_request);
- tt_request = (struct batadv_tt_query_packet *)skb_put(skb, tt_req_len);
+ /* send all the CRCs within the request. This is needed by intermediate
+ * nodes to ensure they have the correct table before replying
+ */
+ tt_vlan_req = (struct batadv_tvlv_tt_vlan_data *)(tvlv_tt_data + 1);
+ for (i = 0; i < num_vlan; i++) {
+ tt_vlan_req->vid = tt_vlan->vid;
+ tt_vlan_req->crc = tt_vlan->crc;
- tt_request->header.packet_type = BATADV_TT_QUERY;
- tt_request->header.version = BATADV_COMPAT_VERSION;
- memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
- memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
- tt_request->header.ttl = BATADV_TTL;
- tt_request->ttvn = ttvn;
- tt_request->tt_data = htons(tt_crc);
- tt_request->flags = BATADV_TT_REQUEST;
+ tt_vlan_req++;
+ tt_vlan++;
+ }
if (full_table)
- tt_request->flags |= BATADV_TT_FULL_TABLE;
+ tvlv_tt_data->flags |= BATADV_TT_FULL_TABLE;
batadv_dbg(BATADV_DBG_TT, bat_priv, "Sending TT_REQUEST to %pM [%c]\n",
- dst_orig_node->orig, (full_table ? 'F' : '.'));
+ dst_orig_node->orig, full_table ? 'F' : '.');
batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX);
-
- if (batadv_send_skb_to_orig(skb, dst_orig_node, NULL) != NET_XMIT_DROP)
- ret = 0;
+ batadv_tvlv_unicast_send(bat_priv, primary_if->net_dev->dev_addr,
+ dst_orig_node->orig, BATADV_TVLV_TT, 1,
+ tvlv_tt_data, size);
+ ret = true;
out:
if (primary_if)
batadv_hardif_free_ref(primary_if);
- if (ret)
- kfree_skb(skb);
if (ret && tt_req_node) {
spin_lock_bh(&bat_priv->tt.req_list_lock);
list_del(&tt_req_node->list);
spin_unlock_bh(&bat_priv->tt.req_list_lock);
kfree(tt_req_node);
}
+ kfree(tvlv_tt_data);
return ret;
}
-static bool
-batadv_send_other_tt_response(struct batadv_priv *bat_priv,
- struct batadv_tt_query_packet *tt_request)
+/**
+ * batadv_send_other_tt_response - send reply to tt request concerning another
+ * node's translation table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: tt data containing the tt request information
+ * @req_src: mac address of tt request sender
+ * @req_dst: mac address of tt request recipient
+ *
+ * Returns true if tt request reply was sent, false otherwise.
+ */
+static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
+ struct batadv_tvlv_tt_data *tt_data,
+ uint8_t *req_src, uint8_t *req_dst)
{
struct batadv_orig_node *req_dst_orig_node;
struct batadv_orig_node *res_dst_orig_node = NULL;
- uint8_t orig_ttvn, req_ttvn, ttvn;
- int res, ret = false;
- unsigned char *tt_buff;
- bool full_table;
- uint16_t tt_len, tt_tot;
- struct sk_buff *skb = NULL;
- struct batadv_tt_query_packet *tt_response;
- uint8_t *packet_pos;
- size_t len;
+ struct batadv_tvlv_tt_change *tt_change;
+ struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
+ struct batadv_tvlv_tt_vlan_data *tt_vlan;
+ bool ret = false, full_table;
+ uint8_t orig_ttvn, req_ttvn;
+ uint16_t tvlv_len;
+ int32_t tt_len;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
- tt_request->src, tt_request->ttvn, tt_request->dst,
- (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
+ req_src, tt_data->ttvn, req_dst,
+ (tt_data->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
/* Let's get the orig node of the REAL destination */
- req_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->dst);
+ req_dst_orig_node = batadv_orig_hash_find(bat_priv, req_dst);
if (!req_dst_orig_node)
goto out;
- res_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
+ res_dst_orig_node = batadv_orig_hash_find(bat_priv, req_src);
if (!res_dst_orig_node)
goto out;
orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
- req_ttvn = tt_request->ttvn;
+ req_ttvn = tt_data->ttvn;
- /* I don't have the requested data */
+ tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(tt_data + 1);
+ /* this node doesn't have the requested data */
if (orig_ttvn != req_ttvn ||
- tt_request->tt_data != htons(req_dst_orig_node->tt_crc))
+ !batadv_tt_global_check_crc(req_dst_orig_node, tt_vlan,
+ ntohs(tt_data->num_vlan)))
goto out;
/* If the full table has been explicitly requested */
- if (tt_request->flags & BATADV_TT_FULL_TABLE ||
+ if (tt_data->flags & BATADV_TT_FULL_TABLE ||
!req_dst_orig_node->tt_buff)
full_table = true;
else
full_table = false;
- /* In this version, fragmentation is not implemented, then
- * I'll send only one packet with as much TT entries as I can
+ /* TT fragmentation hasn't been implemented yet, so send as many
+ * TT entries fit a single packet as possible only
*/
if (!full_table) {
spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
tt_len = req_dst_orig_node->tt_buff_len;
- tt_tot = tt_len / sizeof(struct batadv_tt_change);
- len = sizeof(*tt_response) + tt_len;
- skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
- if (!skb)
+ tvlv_len = batadv_tt_prepare_tvlv_global_data(req_dst_orig_node,
+ &tvlv_tt_data,
+ &tt_change,
+ &tt_len);
+ if (!tt_len)
goto unlock;
- skb->priority = TC_PRIO_CONTROL;
- skb_reserve(skb, ETH_HLEN);
- packet_pos = skb_put(skb, len);
- tt_response = (struct batadv_tt_query_packet *)packet_pos;
- tt_response->ttvn = req_ttvn;
- tt_response->tt_data = htons(tt_tot);
-
- tt_buff = skb->data + sizeof(*tt_response);
/* Copy the last orig_node's OGM buffer */
- memcpy(tt_buff, req_dst_orig_node->tt_buff,
+ memcpy(tt_change, req_dst_orig_node->tt_buff,
req_dst_orig_node->tt_buff_len);
-
spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
} else {
- tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size);
- tt_len *= sizeof(struct batadv_tt_change);
- ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
-
- skb = batadv_tt_response_fill_table(tt_len, ttvn,
- bat_priv->tt.global_hash,
- bat_priv,
- batadv_tt_global_valid,
- req_dst_orig_node);
- if (!skb)
+ /* allocate the tvlv, put the tt_data and all the tt_vlan_data
+ * in the initial part
+ */
+ tt_len = -1;
+ tvlv_len = batadv_tt_prepare_tvlv_global_data(req_dst_orig_node,
+ &tvlv_tt_data,
+ &tt_change,
+ &tt_len);
+ if (!tt_len)
goto out;
- tt_response = (struct batadv_tt_query_packet *)skb->data;
+ /* fill the rest of the tvlv with the real TT entries */
+ batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.global_hash,
+ tt_change, tt_len,
+ batadv_tt_global_valid,
+ req_dst_orig_node);
}
- tt_response->header.packet_type = BATADV_TT_QUERY;
- tt_response->header.version = BATADV_COMPAT_VERSION;
- tt_response->header.ttl = BATADV_TTL;
- memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
- memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
- tt_response->flags = BATADV_TT_RESPONSE;
+ /* Don't send the response, if larger than fragmented packet. */
+ tt_len = sizeof(struct batadv_unicast_tvlv_packet) + tvlv_len;
+ if (tt_len > atomic_read(&bat_priv->packet_size_max)) {
+ net_ratelimited_function(batadv_info, bat_priv->soft_iface,
+ "Ignoring TT_REQUEST from %pM; Response size exceeds max packet size.\n",
+ res_dst_orig_node->orig);
+ goto out;
+ }
+
+ tvlv_tt_data->flags = BATADV_TT_RESPONSE;
+ tvlv_tt_data->ttvn = req_ttvn;
if (full_table)
- tt_response->flags |= BATADV_TT_FULL_TABLE;
+ tvlv_tt_data->flags |= BATADV_TT_FULL_TABLE;
batadv_dbg(BATADV_DBG_TT, bat_priv,
- "Sending TT_RESPONSE %pM for %pM (ttvn: %u)\n",
- res_dst_orig_node->orig, req_dst_orig_node->orig, req_ttvn);
+ "Sending TT_RESPONSE %pM for %pM [%c] (ttvn: %u)\n",
+ res_dst_orig_node->orig, req_dst_orig_node->orig,
+ full_table ? 'F' : '.', req_ttvn);
batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
- res = batadv_send_skb_to_orig(skb, res_dst_orig_node, NULL);
- if (res != NET_XMIT_DROP)
- ret = true;
+ batadv_tvlv_unicast_send(bat_priv, req_dst_orig_node->orig,
+ req_src, BATADV_TVLV_TT, 1, tvlv_tt_data,
+ tvlv_len);
+ ret = true;
goto out;
unlock:
@@ -1849,37 +2524,43 @@ out:
batadv_orig_node_free_ref(res_dst_orig_node);
if (req_dst_orig_node)
batadv_orig_node_free_ref(req_dst_orig_node);
- if (!ret)
- kfree_skb(skb);
+ kfree(tvlv_tt_data);
return ret;
}
-static bool
-batadv_send_my_tt_response(struct batadv_priv *bat_priv,
- struct batadv_tt_query_packet *tt_request)
+/**
+ * batadv_send_my_tt_response - send reply to tt request concerning this node's
+ * translation table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: tt data containing the tt request information
+ * @req_src: mac address of tt request sender
+ *
+ * Returns true if tt request reply was sent, false otherwise.
+ */
+static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
+ struct batadv_tvlv_tt_data *tt_data,
+ uint8_t *req_src)
{
- struct batadv_orig_node *orig_node;
+ struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
struct batadv_hard_iface *primary_if = NULL;
- uint8_t my_ttvn, req_ttvn, ttvn;
- int ret = false;
- unsigned char *tt_buff;
+ struct batadv_tvlv_tt_change *tt_change;
+ struct batadv_orig_node *orig_node;
+ uint8_t my_ttvn, req_ttvn;
+ uint16_t tvlv_len;
bool full_table;
- uint16_t tt_len, tt_tot;
- struct sk_buff *skb = NULL;
- struct batadv_tt_query_packet *tt_response;
- uint8_t *packet_pos;
- size_t len;
+ int32_t tt_len;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
- tt_request->src, tt_request->ttvn,
- (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
+ req_src, tt_data->ttvn,
+ (tt_data->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
+ spin_lock_bh(&bat_priv->tt.commit_lock);
my_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
- req_ttvn = tt_request->ttvn;
+ req_ttvn = tt_data->ttvn;
- orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
+ orig_node = batadv_orig_hash_find(bat_priv, req_src);
if (!orig_node)
goto out;
@@ -1890,103 +2571,104 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
/* If the full table has been explicitly requested or the gap
* is too big send the whole local translation table
*/
- if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
+ if (tt_data->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
!bat_priv->tt.last_changeset)
full_table = true;
else
full_table = false;
- /* In this version, fragmentation is not implemented, then
- * I'll send only one packet with as much TT entries as I can
+ /* TT fragmentation hasn't been implemented yet, so send as many
+ * TT entries fit a single packet as possible only
*/
if (!full_table) {
spin_lock_bh(&bat_priv->tt.last_changeset_lock);
- tt_len = bat_priv->tt.last_changeset_len;
- tt_tot = tt_len / sizeof(struct batadv_tt_change);
- len = sizeof(*tt_response) + tt_len;
- skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
- if (!skb)
+ tt_len = bat_priv->tt.last_changeset_len;
+ tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv,
+ &tvlv_tt_data,
+ &tt_change,
+ &tt_len);
+ if (!tt_len)
goto unlock;
- skb->priority = TC_PRIO_CONTROL;
- skb_reserve(skb, ETH_HLEN);
- packet_pos = skb_put(skb, len);
- tt_response = (struct batadv_tt_query_packet *)packet_pos;
- tt_response->ttvn = req_ttvn;
- tt_response->tt_data = htons(tt_tot);
-
- tt_buff = skb->data + sizeof(*tt_response);
- memcpy(tt_buff, bat_priv->tt.last_changeset,
+ /* Copy the last orig_node's OGM buffer */
+ memcpy(tt_change, bat_priv->tt.last_changeset,
bat_priv->tt.last_changeset_len);
spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
} else {
- tt_len = (uint16_t)atomic_read(&bat_priv->tt.local_entry_num);
- tt_len *= sizeof(struct batadv_tt_change);
- ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
-
- skb = batadv_tt_response_fill_table(tt_len, ttvn,
- bat_priv->tt.local_hash,
- bat_priv,
- batadv_tt_local_valid_entry,
- NULL);
- if (!skb)
+ req_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
+
+ /* allocate the tvlv, put the tt_data and all the tt_vlan_data
+ * in the initial part
+ */
+ tt_len = -1;
+ tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv,
+ &tvlv_tt_data,
+ &tt_change,
+ &tt_len);
+ if (!tt_len)
goto out;
- tt_response = (struct batadv_tt_query_packet *)skb->data;
+ /* fill the rest of the tvlv with the real TT entries */
+ batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.local_hash,
+ tt_change, tt_len,
+ batadv_tt_local_valid, NULL);
}
- tt_response->header.packet_type = BATADV_TT_QUERY;
- tt_response->header.version = BATADV_COMPAT_VERSION;
- tt_response->header.ttl = BATADV_TTL;
- memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
- memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
- tt_response->flags = BATADV_TT_RESPONSE;
+ tvlv_tt_data->flags = BATADV_TT_RESPONSE;
+ tvlv_tt_data->ttvn = req_ttvn;
if (full_table)
- tt_response->flags |= BATADV_TT_FULL_TABLE;
+ tvlv_tt_data->flags |= BATADV_TT_FULL_TABLE;
batadv_dbg(BATADV_DBG_TT, bat_priv,
- "Sending TT_RESPONSE to %pM [%c]\n",
- orig_node->orig,
- (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
+ "Sending TT_RESPONSE to %pM [%c] (ttvn: %u)\n",
+ orig_node->orig, full_table ? 'F' : '.', req_ttvn);
batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
- if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
- ret = true;
+ batadv_tvlv_unicast_send(bat_priv, primary_if->net_dev->dev_addr,
+ req_src, BATADV_TVLV_TT, 1, tvlv_tt_data,
+ tvlv_len);
+
goto out;
unlock:
spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
out:
+ spin_unlock_bh(&bat_priv->tt.commit_lock);
if (orig_node)
batadv_orig_node_free_ref(orig_node);
if (primary_if)
batadv_hardif_free_ref(primary_if);
- if (!ret)
- kfree_skb(skb);
- /* This packet was for me, so it doesn't need to be re-routed */
+ kfree(tvlv_tt_data);
+ /* The packet was for this host, so it doesn't need to be re-routed */
return true;
}
-bool batadv_send_tt_response(struct batadv_priv *bat_priv,
- struct batadv_tt_query_packet *tt_request)
+/**
+ * batadv_send_tt_response - send reply to tt request
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: tt data containing the tt request information
+ * @req_src: mac address of tt request sender
+ * @req_dst: mac address of tt request recipient
+ *
+ * Returns true if tt request reply was sent, false otherwise.
+ */
+static bool batadv_send_tt_response(struct batadv_priv *bat_priv,
+ struct batadv_tvlv_tt_data *tt_data,
+ uint8_t *req_src, uint8_t *req_dst)
{
- if (batadv_is_my_mac(bat_priv, tt_request->dst)) {
- /* don't answer backbone gws! */
- if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
- return true;
-
- return batadv_send_my_tt_response(bat_priv, tt_request);
- } else {
- return batadv_send_other_tt_response(bat_priv, tt_request);
- }
+ if (batadv_is_my_mac(bat_priv, req_dst))
+ return batadv_send_my_tt_response(bat_priv, tt_data, req_src);
+ else
+ return batadv_send_other_tt_response(bat_priv, tt_data,
+ req_src, req_dst);
}
static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
- struct batadv_tt_change *tt_change,
+ struct batadv_tvlv_tt_change *tt_change,
uint16_t tt_num_changes, uint8_t ttvn)
{
int i;
@@ -1997,11 +2679,13 @@ static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
roams = (tt_change + i)->flags & BATADV_TT_CLIENT_ROAM;
batadv_tt_global_del(bat_priv, orig_node,
(tt_change + i)->addr,
+ ntohs((tt_change + i)->vid),
"tt removed by changes",
roams);
} else {
if (!batadv_tt_global_add(bat_priv, orig_node,
(tt_change + i)->addr,
+ ntohs((tt_change + i)->vid),
(tt_change + i)->flags, ttvn))
/* In case of problem while storing a
* global_entry, we stop the updating
@@ -2016,21 +2700,22 @@ static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
}
static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
- struct batadv_tt_query_packet *tt_response)
+ struct batadv_tvlv_tt_change *tt_change,
+ uint8_t ttvn, uint8_t *resp_src,
+ uint16_t num_entries)
{
struct batadv_orig_node *orig_node;
- orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
+ orig_node = batadv_orig_hash_find(bat_priv, resp_src);
if (!orig_node)
goto out;
/* Purge the old table first.. */
- batadv_tt_global_del_orig(bat_priv, orig_node, "Received full table");
+ batadv_tt_global_del_orig(bat_priv, orig_node, -1,
+ "Received full table");
- _batadv_tt_update_changes(bat_priv, orig_node,
- (struct batadv_tt_change *)(tt_response + 1),
- ntohs(tt_response->tt_data),
- tt_response->ttvn);
+ _batadv_tt_update_changes(bat_priv, orig_node, tt_change, num_entries,
+ ttvn);
spin_lock_bh(&orig_node->tt_buff_lock);
kfree(orig_node->tt_buff);
@@ -2038,7 +2723,7 @@ static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
orig_node->tt_buff = NULL;
spin_unlock_bh(&orig_node->tt_buff_lock);
- atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
+ atomic_set(&orig_node->last_ttvn, ttvn);
out:
if (orig_node)
@@ -2048,22 +2733,31 @@ out:
static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
uint16_t tt_num_changes, uint8_t ttvn,
- struct batadv_tt_change *tt_change)
+ struct batadv_tvlv_tt_change *tt_change)
{
_batadv_tt_update_changes(bat_priv, orig_node, tt_change,
tt_num_changes, ttvn);
- batadv_tt_save_orig_buffer(bat_priv, orig_node,
- (unsigned char *)tt_change, tt_num_changes);
+ batadv_tt_save_orig_buffer(bat_priv, orig_node, tt_change,
+ batadv_tt_len(tt_num_changes));
atomic_set(&orig_node->last_ttvn, ttvn);
}
-bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr)
+/**
+ * batadv_is_my_client - check if a client is served by the local node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac adress of the client to check
+ * @vid: VLAN identifier
+ *
+ * Returns true if the client is served by this node, false otherwise.
+ */
+bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr,
+ unsigned short vid)
{
struct batadv_tt_local_entry *tt_local_entry;
bool ret = false;
- tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
if (!tt_local_entry)
goto out;
/* Check if the client has been logically deleted (but is kept for
@@ -2079,72 +2773,68 @@ out:
return ret;
}
-void batadv_handle_tt_response(struct batadv_priv *bat_priv,
- struct batadv_tt_query_packet *tt_response)
+/**
+ * batadv_handle_tt_response - process incoming tt reply
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: tt data containing the tt request information
+ * @resp_src: mac address of tt reply sender
+ * @num_entries: number of tt change entries appended to the tt data
+ */
+static void batadv_handle_tt_response(struct batadv_priv *bat_priv,
+ struct batadv_tvlv_tt_data *tt_data,
+ uint8_t *resp_src, uint16_t num_entries)
{
struct batadv_tt_req_node *node, *safe;
struct batadv_orig_node *orig_node = NULL;
- struct batadv_tt_change *tt_change;
+ struct batadv_tvlv_tt_change *tt_change;
+ uint8_t *tvlv_ptr = (uint8_t *)tt_data;
+ uint16_t change_offset;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
- tt_response->src, tt_response->ttvn,
- ntohs(tt_response->tt_data),
- (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
-
- /* we should have never asked a backbone gw */
- if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src))
- goto out;
+ resp_src, tt_data->ttvn, num_entries,
+ (tt_data->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
- orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
+ orig_node = batadv_orig_hash_find(bat_priv, resp_src);
if (!orig_node)
goto out;
- if (tt_response->flags & BATADV_TT_FULL_TABLE) {
- batadv_tt_fill_gtable(bat_priv, tt_response);
+ spin_lock_bh(&orig_node->tt_lock);
+
+ change_offset = sizeof(struct batadv_tvlv_tt_vlan_data);
+ change_offset *= ntohs(tt_data->num_vlan);
+ change_offset += sizeof(*tt_data);
+ tvlv_ptr += change_offset;
+
+ tt_change = (struct batadv_tvlv_tt_change *)tvlv_ptr;
+ if (tt_data->flags & BATADV_TT_FULL_TABLE) {
+ batadv_tt_fill_gtable(bat_priv, tt_change, tt_data->ttvn,
+ resp_src, num_entries);
} else {
- tt_change = (struct batadv_tt_change *)(tt_response + 1);
- batadv_tt_update_changes(bat_priv, orig_node,
- ntohs(tt_response->tt_data),
- tt_response->ttvn, tt_change);
+ batadv_tt_update_changes(bat_priv, orig_node, num_entries,
+ tt_data->ttvn, tt_change);
}
+ /* Recalculate the CRC for this orig_node and store it */
+ batadv_tt_global_update_crc(bat_priv, orig_node);
+
+ spin_unlock_bh(&orig_node->tt_lock);
+
/* Delete the tt_req_node from pending tt_requests list */
spin_lock_bh(&bat_priv->tt.req_list_lock);
list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
- if (!batadv_compare_eth(node->addr, tt_response->src))
+ if (!batadv_compare_eth(node->addr, resp_src))
continue;
list_del(&node->list);
kfree(node);
}
- spin_unlock_bh(&bat_priv->tt.req_list_lock);
- /* Recalculate the CRC for this orig_node and store it */
- orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
+ spin_unlock_bh(&bat_priv->tt.req_list_lock);
out:
if (orig_node)
batadv_orig_node_free_ref(orig_node);
}
-int batadv_tt_init(struct batadv_priv *bat_priv)
-{
- int ret;
-
- ret = batadv_tt_local_init(bat_priv);
- if (ret < 0)
- return ret;
-
- ret = batadv_tt_global_init(bat_priv);
- if (ret < 0)
- return ret;
-
- INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
- queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
- msecs_to_jiffies(BATADV_TT_WORK_PERIOD));
-
- return 1;
-}
-
static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
{
struct batadv_tt_roam_node *node, *safe;
@@ -2225,14 +2915,28 @@ unlock:
return ret;
}
+/**
+ * batadv_send_roam_adv - send a roaming advertisement message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @client: mac address of the roaming client
+ * @vid: VLAN identifier
+ * @orig_node: message destination
+ *
+ * Send a ROAMING_ADV message to the node which was previously serving this
+ * client. This is done to inform the node that from now on all traffic destined
+ * for this particular roamed client has to be forwarded to the sender of the
+ * roaming message.
+ */
static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
+ unsigned short vid,
struct batadv_orig_node *orig_node)
{
- struct sk_buff *skb = NULL;
- struct batadv_roam_adv_packet *roam_adv_packet;
- int ret = 1;
struct batadv_hard_iface *primary_if;
- size_t len = sizeof(*roam_adv_packet);
+ struct batadv_tvlv_roam_adv tvlv_roam;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
/* before going on we have to check whether the client has
* already roamed to us too many times
@@ -2240,40 +2944,22 @@ static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
if (!batadv_tt_check_roam_count(bat_priv, client))
goto out;
- skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
- if (!skb)
- goto out;
-
- skb->priority = TC_PRIO_CONTROL;
- skb_reserve(skb, ETH_HLEN);
-
- roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len);
-
- roam_adv_packet->header.packet_type = BATADV_ROAM_ADV;
- roam_adv_packet->header.version = BATADV_COMPAT_VERSION;
- roam_adv_packet->header.ttl = BATADV_TTL;
- roam_adv_packet->reserved = 0;
- primary_if = batadv_primary_if_get_selected(bat_priv);
- if (!primary_if)
- goto out;
- memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
- batadv_hardif_free_ref(primary_if);
- memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
- memcpy(roam_adv_packet->client, client, ETH_ALEN);
-
batadv_dbg(BATADV_DBG_TT, bat_priv,
- "Sending ROAMING_ADV to %pM (client %pM)\n",
- orig_node->orig, client);
+ "Sending ROAMING_ADV to %pM (client %pM, vid: %d)\n",
+ orig_node->orig, client, BATADV_PRINT_VID(vid));
batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX);
- if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
- ret = 0;
+ memcpy(tvlv_roam.client, client, sizeof(tvlv_roam.client));
+ tvlv_roam.vid = htons(vid);
+
+ batadv_tvlv_unicast_send(bat_priv, primary_if->net_dev->dev_addr,
+ orig_node->orig, BATADV_TVLV_ROAM, 1,
+ &tvlv_roam, sizeof(tvlv_roam));
out:
- if (ret && skb)
- kfree_skb(skb);
- return;
+ if (primary_if)
+ batadv_hardif_free_ref(primary_if);
}
static void batadv_tt_purge(struct work_struct *work)
@@ -2286,7 +2972,7 @@ static void batadv_tt_purge(struct work_struct *work)
priv_tt = container_of(delayed_work, struct batadv_priv_tt, work);
bat_priv = container_of(priv_tt, struct batadv_priv, tt);
- batadv_tt_local_purge(bat_priv);
+ batadv_tt_local_purge(bat_priv, BATADV_TT_LOCAL_TIMEOUT);
batadv_tt_global_purge(bat_priv);
batadv_tt_req_purge(bat_priv);
batadv_tt_roam_purge(bat_priv);
@@ -2297,6 +2983,9 @@ static void batadv_tt_purge(struct work_struct *work)
void batadv_tt_free(struct batadv_priv *bat_priv)
{
+ batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1);
+ batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_TT, 1);
+
cancel_delayed_work_sync(&bat_priv->tt.work);
batadv_tt_local_table_free(bat_priv);
@@ -2308,19 +2997,25 @@ void batadv_tt_free(struct batadv_priv *bat_priv)
kfree(bat_priv->tt.last_changeset);
}
-/* This function will enable or disable the specified flags for all the entries
- * in the given hash table and returns the number of modified entries
+/**
+ * batadv_tt_local_set_flags - set or unset the specified flags on the local
+ * table and possibly count them in the TT size
+ * @bat_priv: the bat priv with all the soft interface information
+ * @flags: the flag to switch
+ * @enable: whether to set or unset the flag
+ * @count: whether to increase the TT size by the number of changed entries
*/
-static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
- uint16_t flags, bool enable)
+static void batadv_tt_local_set_flags(struct batadv_priv *bat_priv,
+ uint16_t flags, bool enable, bool count)
{
- uint32_t i;
+ struct batadv_hashtable *hash = bat_priv->tt.local_hash;
+ struct batadv_tt_common_entry *tt_common_entry;
uint16_t changed_num = 0;
struct hlist_head *head;
- struct batadv_tt_common_entry *tt_common_entry;
+ uint32_t i;
if (!hash)
- goto out;
+ return;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -2338,11 +3033,15 @@ static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
tt_common_entry->flags &= ~flags;
}
changed_num++;
+
+ if (!count)
+ continue;
+
+ batadv_tt_local_size_inc(bat_priv,
+ tt_common_entry->vid);
}
rcu_read_unlock();
}
-out:
- return changed_num;
}
/* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
@@ -2370,10 +3069,11 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
continue;
batadv_dbg(BATADV_DBG_TT, bat_priv,
- "Deleting local tt entry (%pM): pending\n",
- tt_common->addr);
+ "Deleting local tt entry (%pM, vid: %d): pending\n",
+ tt_common->addr,
+ BATADV_PRINT_VID(tt_common->vid));
- atomic_dec(&bat_priv->tt.local_entry_num);
+ batadv_tt_local_size_dec(bat_priv, tt_common->vid);
hlist_del_rcu(&tt_common->hash_entry);
tt_local = container_of(tt_common,
struct batadv_tt_local_entry,
@@ -2384,22 +3084,25 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
}
}
-static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
- unsigned char **packet_buff,
- int *packet_buff_len, int packet_min_len)
+/**
+ * batadv_tt_local_commit_changes_nolock - commit all pending local tt changes
+ * which have been queued in the time since the last commit
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Caller must hold tt->commit_lock.
+ */
+static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv)
{
- uint16_t changed_num = 0;
-
- if (atomic_read(&bat_priv->tt.local_changes) < 1)
- return -ENOENT;
+ if (atomic_read(&bat_priv->tt.local_changes) < 1) {
+ if (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))
+ batadv_tt_tvlv_container_update(bat_priv);
+ return;
+ }
- changed_num = batadv_tt_set_flags(bat_priv->tt.local_hash,
- BATADV_TT_CLIENT_NEW, false);
+ batadv_tt_local_set_flags(bat_priv, BATADV_TT_CLIENT_NEW, false, true);
- /* all reset entries have to be counted as local entries */
- atomic_add(changed_num, &bat_priv->tt.local_entry_num);
batadv_tt_local_purge_pending_clients(bat_priv);
- bat_priv->tt.local_crc = batadv_tt_local_crc(bat_priv);
+ batadv_tt_local_update_crc(bat_priv);
/* Increment the TTVN only once per OGM interval */
atomic_inc(&bat_priv->tt.vn);
@@ -2409,49 +3112,38 @@ static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
/* reset the sending counter */
atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
-
- return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
- packet_buff_len, packet_min_len);
+ batadv_tt_tvlv_container_update(bat_priv);
}
-/* when calling this function (hard_iface == primary_if) has to be true */
-int batadv_tt_append_diff(struct batadv_priv *bat_priv,
- unsigned char **packet_buff, int *packet_buff_len,
- int packet_min_len)
+/**
+ * batadv_tt_local_commit_changes - commit all pending local tt changes which
+ * have been queued in the time since the last commit
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv)
{
- int tt_num_changes;
-
- /* if at least one change happened */
- tt_num_changes = batadv_tt_commit_changes(bat_priv, packet_buff,
- packet_buff_len,
- packet_min_len);
-
- /* if the changes have been sent often enough */
- if ((tt_num_changes < 0) &&
- (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))) {
- batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
- packet_min_len, packet_min_len);
- tt_num_changes = 0;
- }
-
- return tt_num_changes;
+ spin_lock_bh(&bat_priv->tt.commit_lock);
+ batadv_tt_local_commit_changes_nolock(bat_priv);
+ spin_unlock_bh(&bat_priv->tt.commit_lock);
}
bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
- uint8_t *dst)
+ uint8_t *dst, unsigned short vid)
{
struct batadv_tt_local_entry *tt_local_entry = NULL;
struct batadv_tt_global_entry *tt_global_entry = NULL;
+ struct batadv_softif_vlan *vlan;
bool ret = false;
- if (!atomic_read(&bat_priv->ap_isolation))
+ vlan = batadv_softif_vlan_get(bat_priv, vid);
+ if (!vlan || !atomic_read(&vlan->ap_isolation))
goto out;
- tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst);
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst, vid);
if (!tt_local_entry)
goto out;
- tt_global_entry = batadv_tt_global_hash_find(bat_priv, src);
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, src, vid);
if (!tt_global_entry)
goto out;
@@ -2461,6 +3153,8 @@ bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
ret = true;
out:
+ if (vlan)
+ batadv_softif_vlan_free_ref(vlan);
if (tt_global_entry)
batadv_tt_global_entry_free_ref(tt_global_entry);
if (tt_local_entry)
@@ -2468,19 +3162,29 @@ out:
return ret;
}
-void batadv_tt_update_orig(struct batadv_priv *bat_priv,
- struct batadv_orig_node *orig_node,
- const unsigned char *tt_buff, uint8_t tt_num_changes,
- uint8_t ttvn, uint16_t tt_crc)
+/**
+ * batadv_tt_update_orig - update global translation table with new tt
+ * information received via ogms
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @tt_vlan: pointer to the first tvlv VLAN entry
+ * @tt_num_vlan: number of tvlv VLAN entries
+ * @tt_change: pointer to the first entry in the TT buffer
+ * @tt_num_changes: number of tt changes inside the tt buffer
+ * @ttvn: translation table version number of this changeset
+ * @tt_crc: crc32 checksum of orig node's translation table
+ */
+static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const void *tt_buff, uint16_t tt_num_vlan,
+ struct batadv_tvlv_tt_change *tt_change,
+ uint16_t tt_num_changes, uint8_t ttvn)
{
uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+ struct batadv_tvlv_tt_vlan_data *tt_vlan;
bool full_table = true;
- struct batadv_tt_change *tt_change;
-
- /* don't care about a backbone gateways updates. */
- if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
- return;
+ tt_vlan = (struct batadv_tvlv_tt_vlan_data *)tt_buff;
/* orig table not initialised AND first diff is in the OGM OR the ttvn
* increased by one -> we can apply the attached changes
*/
@@ -2496,7 +3200,9 @@ void batadv_tt_update_orig(struct batadv_priv *bat_priv,
goto request_table;
}
- tt_change = (struct batadv_tt_change *)tt_buff;
+ spin_lock_bh(&orig_node->tt_lock);
+
+ tt_change = (struct batadv_tvlv_tt_change *)tt_buff;
batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
ttvn, tt_change);
@@ -2504,7 +3210,9 @@ void batadv_tt_update_orig(struct batadv_priv *bat_priv,
* prefer to recompute it to spot any possible inconsistency
* in the global table
*/
- orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
+ batadv_tt_global_update_crc(bat_priv, orig_node);
+
+ spin_unlock_bh(&orig_node->tt_lock);
/* The ttvn alone is not enough to guarantee consistency
* because a single value could represent different states
@@ -2515,37 +3223,46 @@ void batadv_tt_update_orig(struct batadv_priv *bat_priv,
* checking the CRC value is mandatory to detect the
* inconsistency
*/
- if (orig_node->tt_crc != tt_crc)
+ if (!batadv_tt_global_check_crc(orig_node, tt_vlan,
+ tt_num_vlan))
goto request_table;
} else {
/* if we missed more than one change or our tables are not
* in sync anymore -> request fresh tt data
*/
if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
- orig_node->tt_crc != tt_crc) {
+ !batadv_tt_global_check_crc(orig_node, tt_vlan,
+ tt_num_vlan)) {
request_table:
batadv_dbg(BATADV_DBG_TT, bat_priv,
- "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %#.4x last_crc: %#.4x num_changes: %u)\n",
- orig_node->orig, ttvn, orig_ttvn, tt_crc,
- orig_node->tt_crc, tt_num_changes);
+ "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u num_changes: %u)\n",
+ orig_node->orig, ttvn, orig_ttvn,
+ tt_num_changes);
batadv_send_tt_request(bat_priv, orig_node, ttvn,
- tt_crc, full_table);
+ tt_vlan, tt_num_vlan,
+ full_table);
return;
}
}
}
-/* returns true whether we know that the client has moved from its old
- * originator to another one. This entry is kept is still kept for consistency
- * purposes
+/**
+ * batadv_tt_global_client_is_roaming - check if a client is marked as roaming
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the client to check
+ * @vid: VLAN identifier
+ *
+ * Returns true if we know that the client has moved from its old originator
+ * to another one. This entry is still kept for consistency purposes and will be
+ * deleted later by a DEL or because of timeout
*/
bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
- uint8_t *addr)
+ uint8_t *addr, unsigned short vid)
{
struct batadv_tt_global_entry *tt_global_entry;
bool ret = false;
- tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid);
if (!tt_global_entry)
goto out;
@@ -2558,19 +3275,20 @@ out:
/**
* batadv_tt_local_client_is_roaming - tells whether the client is roaming
* @bat_priv: the bat priv with all the soft interface information
- * @addr: the MAC address of the local client to query
+ * @addr: the mac address of the local client to query
+ * @vid: VLAN identifier
*
* Returns true if the local client is known to be roaming (it is not served by
* this node anymore) or not. If yes, the client is still present in the table
* to keep the latter consistent with the node TTVN
*/
bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
- uint8_t *addr)
+ uint8_t *addr, unsigned short vid)
{
struct batadv_tt_local_entry *tt_local_entry;
bool ret = false;
- tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
if (!tt_local_entry)
goto out;
@@ -2582,26 +3300,268 @@ out:
bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
- const unsigned char *addr)
+ const unsigned char *addr,
+ unsigned short vid)
{
bool ret = false;
- /* if the originator is a backbone node (meaning it belongs to the same
- * LAN of this node) the temporary client must not be added because to
- * reach such destination the node must use the LAN instead of the mesh
- */
- if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
- goto out;
-
- if (!batadv_tt_global_add(bat_priv, orig_node, addr,
+ if (!batadv_tt_global_add(bat_priv, orig_node, addr, vid,
BATADV_TT_CLIENT_TEMP,
atomic_read(&orig_node->last_ttvn)))
goto out;
batadv_dbg(BATADV_DBG_TT, bat_priv,
- "Added temporary global client (addr: %pM orig: %pM)\n",
- addr, orig_node->orig);
+ "Added temporary global client (addr: %pM, vid: %d, orig: %pM)\n",
+ addr, BATADV_PRINT_VID(vid), orig_node->orig);
ret = true;
out:
return ret;
}
+
+/**
+ * batadv_tt_local_resize_to_mtu - resize the local translation table fit the
+ * maximum packet size that can be transported through the mesh
+ * @soft_iface: netdev struct of the mesh interface
+ *
+ * Remove entries older than 'timeout' and half timeout if more entries need
+ * to be removed.
+ */
+void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
+{
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ int packet_size_max = atomic_read(&bat_priv->packet_size_max);
+ int table_size, timeout = BATADV_TT_LOCAL_TIMEOUT / 2;
+ bool reduced = false;
+
+ spin_lock_bh(&bat_priv->tt.commit_lock);
+
+ while (true) {
+ table_size = batadv_tt_local_table_transmit_size(bat_priv);
+ if (packet_size_max >= table_size)
+ break;
+
+ batadv_tt_local_purge(bat_priv, timeout);
+ batadv_tt_local_purge_pending_clients(bat_priv);
+
+ timeout /= 2;
+ reduced = true;
+ net_ratelimited_function(batadv_info, soft_iface,
+ "Forced to purge local tt entries to fit new maximum fragment MTU (%i)\n",
+ packet_size_max);
+ }
+
+ /* commit these changes immediately, to avoid synchronization problem
+ * with the TTVN
+ */
+ if (reduced)
+ batadv_tt_local_commit_changes_nolock(bat_priv);
+
+ spin_unlock_bh(&bat_priv->tt.commit_lock);
+}
+
+/**
+ * batadv_tt_tvlv_ogm_handler_v1 - process incoming tt tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the gateway data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig,
+ uint8_t flags, void *tvlv_value,
+ uint16_t tvlv_value_len)
+{
+ struct batadv_tvlv_tt_vlan_data *tt_vlan;
+ struct batadv_tvlv_tt_change *tt_change;
+ struct batadv_tvlv_tt_data *tt_data;
+ uint16_t num_entries, num_vlan;
+
+ if (tvlv_value_len < sizeof(*tt_data))
+ return;
+
+ tt_data = (struct batadv_tvlv_tt_data *)tvlv_value;
+ tvlv_value_len -= sizeof(*tt_data);
+
+ num_vlan = ntohs(tt_data->num_vlan);
+
+ if (tvlv_value_len < sizeof(*tt_vlan) * num_vlan)
+ return;
+
+ tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(tt_data + 1);
+ tt_change = (struct batadv_tvlv_tt_change *)(tt_vlan + num_vlan);
+ tvlv_value_len -= sizeof(*tt_vlan) * num_vlan;
+
+ num_entries = batadv_tt_entries(tvlv_value_len);
+
+ batadv_tt_update_orig(bat_priv, orig, tt_vlan, num_vlan, tt_change,
+ num_entries, tt_data->ttvn);
+}
+
+/**
+ * batadv_tt_tvlv_unicast_handler_v1 - process incoming (unicast) tt tvlv
+ * container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: mac address of tt tvlv sender
+ * @dst: mac address of tt tvlv recipient
+ * @tvlv_value: tvlv buffer containing the tt data
+ * @tvlv_value_len: tvlv buffer length
+ *
+ * Returns NET_RX_DROP if the tt tvlv is to be re-routed, NET_RX_SUCCESS
+ * otherwise.
+ */
+static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
+ uint8_t *src, uint8_t *dst,
+ void *tvlv_value,
+ uint16_t tvlv_value_len)
+{
+ struct batadv_tvlv_tt_data *tt_data;
+ uint16_t tt_vlan_len, tt_num_entries;
+ char tt_flag;
+ bool ret;
+
+ if (tvlv_value_len < sizeof(*tt_data))
+ return NET_RX_SUCCESS;
+
+ tt_data = (struct batadv_tvlv_tt_data *)tvlv_value;
+ tvlv_value_len -= sizeof(*tt_data);
+
+ tt_vlan_len = sizeof(struct batadv_tvlv_tt_vlan_data);
+ tt_vlan_len *= ntohs(tt_data->num_vlan);
+
+ if (tvlv_value_len < tt_vlan_len)
+ return NET_RX_SUCCESS;
+
+ tvlv_value_len -= tt_vlan_len;
+ tt_num_entries = batadv_tt_entries(tvlv_value_len);
+
+ switch (tt_data->flags & BATADV_TT_DATA_TYPE_MASK) {
+ case BATADV_TT_REQUEST:
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_RX);
+
+ /* If this node cannot provide a TT response the tt_request is
+ * forwarded
+ */
+ ret = batadv_send_tt_response(bat_priv, tt_data, src, dst);
+ if (!ret) {
+ if (tt_data->flags & BATADV_TT_FULL_TABLE)
+ tt_flag = 'F';
+ else
+ tt_flag = '.';
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Routing TT_REQUEST to %pM [%c]\n",
+ dst, tt_flag);
+ /* tvlv API will re-route the packet */
+ return NET_RX_DROP;
+ }
+ break;
+ case BATADV_TT_RESPONSE:
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX);
+
+ if (batadv_is_my_mac(bat_priv, dst)) {
+ batadv_handle_tt_response(bat_priv, tt_data,
+ src, tt_num_entries);
+ return NET_RX_SUCCESS;
+ }
+
+ if (tt_data->flags & BATADV_TT_FULL_TABLE)
+ tt_flag = 'F';
+ else
+ tt_flag = '.';
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Routing TT_RESPONSE to %pM [%c]\n", dst, tt_flag);
+
+ /* tvlv API will re-route the packet */
+ return NET_RX_DROP;
+ }
+
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_roam_tvlv_unicast_handler_v1 - process incoming tt roam tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: mac address of tt tvlv sender
+ * @dst: mac address of tt tvlv recipient
+ * @tvlv_value: tvlv buffer containing the tt data
+ * @tvlv_value_len: tvlv buffer length
+ *
+ * Returns NET_RX_DROP if the tt roam tvlv is to be re-routed, NET_RX_SUCCESS
+ * otherwise.
+ */
+static int batadv_roam_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
+ uint8_t *src, uint8_t *dst,
+ void *tvlv_value,
+ uint16_t tvlv_value_len)
+{
+ struct batadv_tvlv_roam_adv *roaming_adv;
+ struct batadv_orig_node *orig_node = NULL;
+
+ /* If this node is not the intended recipient of the
+ * roaming advertisement the packet is forwarded
+ * (the tvlv API will re-route the packet).
+ */
+ if (!batadv_is_my_mac(bat_priv, dst))
+ return NET_RX_DROP;
+
+ if (tvlv_value_len < sizeof(*roaming_adv))
+ goto out;
+
+ orig_node = batadv_orig_hash_find(bat_priv, src);
+ if (!orig_node)
+ goto out;
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);
+ roaming_adv = (struct batadv_tvlv_roam_adv *)tvlv_value;
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Received ROAMING_ADV from %pM (client %pM)\n",
+ src, roaming_adv->client);
+
+ batadv_tt_global_add(bat_priv, orig_node, roaming_adv->client,
+ ntohs(roaming_adv->vid), BATADV_TT_CLIENT_ROAM,
+ atomic_read(&orig_node->last_ttvn) + 1);
+
+out:
+ if (orig_node)
+ batadv_orig_node_free_ref(orig_node);
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_tt_init - initialise the translation table internals
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return 0 on success or negative error number in case of failure.
+ */
+int batadv_tt_init(struct batadv_priv *bat_priv)
+{
+ int ret;
+
+ /* synchronized flags must be remote */
+ BUILD_BUG_ON(!(BATADV_TT_SYNC_MASK & BATADV_TT_REMOTE_MASK));
+
+ ret = batadv_tt_local_init(bat_priv);
+ if (ret < 0)
+ return ret;
+
+ ret = batadv_tt_global_init(bat_priv);
+ if (ret < 0)
+ return ret;
+
+ batadv_tvlv_handler_register(bat_priv, batadv_tt_tvlv_ogm_handler_v1,
+ batadv_tt_tvlv_unicast_handler_v1,
+ BATADV_TVLV_TT, 1, BATADV_NO_FLAGS);
+
+ batadv_tvlv_handler_register(bat_priv, NULL,
+ batadv_roam_tvlv_unicast_handler_v1,
+ BATADV_TVLV_ROAM, 1, BATADV_NO_FLAGS);
+
+ INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
+ queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
+ msecs_to_jiffies(BATADV_TT_WORK_PERIOD));
+
+ return 1;
+}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 659a3bb759ce..026b1ffa6746 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -20,49 +20,35 @@
#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
-int batadv_tt_len(int changes_num);
int batadv_tt_init(struct batadv_priv *bat_priv);
-void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
- int ifindex);
+bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
+ unsigned short vid, int ifindex);
uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
- const uint8_t *addr, const char *message,
- bool roaming);
+ const uint8_t *addr, unsigned short vid,
+ const char *message, bool roaming);
int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset);
-void batadv_tt_global_add_orig(struct batadv_priv *bat_priv,
- struct batadv_orig_node *orig_node,
- const unsigned char *tt_buff, int tt_buff_len);
-int batadv_tt_global_add(struct batadv_priv *bat_priv,
- struct batadv_orig_node *orig_node,
- const unsigned char *addr, uint16_t flags,
- uint8_t ttvn);
int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset);
void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
- const char *message);
+ int32_t match_vid, const char *message);
struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
const uint8_t *src,
- const uint8_t *addr);
+ const uint8_t *addr,
+ unsigned short vid);
void batadv_tt_free(struct batadv_priv *bat_priv);
-bool batadv_send_tt_response(struct batadv_priv *bat_priv,
- struct batadv_tt_query_packet *tt_request);
-bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr);
-void batadv_handle_tt_response(struct batadv_priv *bat_priv,
- struct batadv_tt_query_packet *tt_response);
+bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr,
+ unsigned short vid);
bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
- uint8_t *dst);
-void batadv_tt_update_orig(struct batadv_priv *bat_priv,
- struct batadv_orig_node *orig_node,
- const unsigned char *tt_buff, uint8_t tt_num_changes,
- uint8_t ttvn, uint16_t tt_crc);
-int batadv_tt_append_diff(struct batadv_priv *bat_priv,
- unsigned char **packet_buff, int *packet_buff_len,
- int packet_min_len);
+ uint8_t *dst, unsigned short vid);
+void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv);
bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
- uint8_t *addr);
+ uint8_t *addr, unsigned short vid);
bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
- uint8_t *addr);
+ uint8_t *addr, unsigned short vid);
+void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface);
bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
- const unsigned char *addr);
+ const unsigned char *addr,
+ unsigned short vid);
#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index b2c94e139319..91dd369b0ff2 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -24,13 +24,6 @@
#include "bitarray.h"
#include <linux/kernel.h>
-/**
- * Maximum overhead for the encapsulation for a payload packet
- */
-#define BATADV_HEADER_LEN \
- (ETH_HLEN + max(sizeof(struct batadv_unicast_packet), \
- sizeof(struct batadv_bcast_packet)))
-
#ifdef CONFIG_BATMAN_ADV_DAT
/* batadv_dat_addr_t is the type used for all DHT addresses. If it is changed,
@@ -43,6 +36,18 @@
#endif /* CONFIG_BATMAN_ADV_DAT */
/**
+ * BATADV_TT_REMOTE_MASK - bitmask selecting the flags that are sent over the
+ * wire only
+ */
+#define BATADV_TT_REMOTE_MASK 0x00FF
+
+/**
+ * BATADV_TT_SYNC_MASK - bitmask of the flags that need to be kept in sync
+ * among the nodes. These flags are used to compute the global/local CRC
+ */
+#define BATADV_TT_SYNC_MASK 0x00F0
+
+/**
* struct batadv_hard_iface_bat_iv - per hard interface B.A.T.M.A.N. IV data
* @ogm_buff: buffer holding the OGM packet
* @ogm_buff_len: length of the OGM packet buffer
@@ -60,7 +65,6 @@ struct batadv_hard_iface_bat_iv {
* @if_num: identificator of the interface
* @if_status: status of the interface for batman-adv
* @net_dev: pointer to the net_device
- * @frag_seqno: last fragment sequence number sent by this interface
* @num_bcasts: number of payload re-broadcasts on this interface (ARQ)
* @hardif_obj: kobject of the per interface sysfs "mesh" directory
* @refcount: number of contexts the object is used
@@ -76,7 +80,6 @@ struct batadv_hard_iface {
int16_t if_num;
char if_status;
struct net_device *net_dev;
- atomic_t frag_seqno;
uint8_t num_bcasts;
struct kobject *hardif_obj;
atomic_t refcount;
@@ -88,28 +91,97 @@ struct batadv_hard_iface {
};
/**
+ * struct batadv_frag_table_entry - head in the fragment buffer table
+ * @head: head of list with fragments
+ * @lock: lock to protect the list of fragments
+ * @timestamp: time (jiffie) of last received fragment
+ * @seqno: sequence number of the fragments in the list
+ * @size: accumulated size of packets in list
+ */
+struct batadv_frag_table_entry {
+ struct hlist_head head;
+ spinlock_t lock; /* protects head */
+ unsigned long timestamp;
+ uint16_t seqno;
+ uint16_t size;
+};
+
+/**
+ * struct batadv_frag_list_entry - entry in a list of fragments
+ * @list: list node information
+ * @skb: fragment
+ * @no: fragment number in the set
+ */
+struct batadv_frag_list_entry {
+ struct hlist_node list;
+ struct sk_buff *skb;
+ uint8_t no;
+};
+
+/**
+ * struct batadv_vlan_tt - VLAN specific TT attributes
+ * @crc: CRC32 checksum of the entries belonging to this vlan
+ * @num_entries: number of TT entries for this VLAN
+ */
+struct batadv_vlan_tt {
+ uint32_t crc;
+ atomic_t num_entries;
+};
+
+/**
+ * batadv_orig_node_vlan - VLAN specific data per orig_node
+ * @vid: the VLAN identifier
+ * @tt: VLAN specific TT attributes
+ * @list: list node for orig_node::vlan_list
+ * @refcount: number of context where this object is currently in use
+ * @rcu: struct used for freeing in a RCU-safe manner
+ */
+struct batadv_orig_node_vlan {
+ unsigned short vid;
+ struct batadv_vlan_tt tt;
+ struct list_head list;
+ atomic_t refcount;
+ struct rcu_head rcu;
+};
+
+/**
+ * struct batadv_orig_bat_iv - B.A.T.M.A.N. IV private orig_node members
+ * @bcast_own: bitfield containing the number of our OGMs this orig_node
+ * rebroadcasted "back" to us (relative to last_real_seqno)
+ * @bcast_own_sum: counted result of bcast_own
+ * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
+ * neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
+ */
+struct batadv_orig_bat_iv {
+ unsigned long *bcast_own;
+ uint8_t *bcast_own_sum;
+ /* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
+ * neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
+ */
+ spinlock_t ogm_cnt_lock;
+};
+
+/**
* struct batadv_orig_node - structure for orig_list maintaining nodes of mesh
* @orig: originator ethernet address
* @primary_addr: hosts primary interface address
* @router: router that should be used to reach this originator
* @batadv_dat_addr_t: address of the orig node in the distributed hash
- * @bcast_own: bitfield containing the number of our OGMs this orig_node
- * rebroadcasted "back" to us (relative to last_real_seqno)
- * @bcast_own_sum: counted result of bcast_own
* @last_seen: time when last packet from this node was received
* @bcast_seqno_reset: time when the broadcast seqno window was reset
* @batman_seqno_reset: time when the batman seqno window was reset
- * @gw_flags: flags related to gateway class
- * @flags: for now only VIS_SERVER flag
+ * @capabilities: announced capabilities of this originator
* @last_ttvn: last seen translation table version number
- * @tt_crc: CRC of the translation table
* @tt_buff: last tt changeset this node received from the orig node
* @tt_buff_len: length of the last tt changeset this node received from the
* orig node
* @tt_buff_lock: lock that protects tt_buff and tt_buff_len
- * @tt_size: number of global TT entries announced by the orig node
* @tt_initialised: bool keeping track of whether or not this node have received
* any translation table information from the orig node yet
+ * @tt_lock: prevents from updating the table while reading it. Table update is
+ * made up by two operations (data structure update and metdata -CRC/TTVN-
+ * recalculation) and they have to be executed atomically in order to avoid
+ * another thread to read the table/metadata between those.
* @last_real_seqno: last and best known sequence number
* @last_ttl: ttl of last received packet
* @bcast_bits: bitfield containing the info which payload broadcast originated
@@ -117,14 +189,9 @@ struct batadv_hard_iface {
* last_bcast_seqno)
* @last_bcast_seqno: last broadcast sequence number received by this host
* @neigh_list: list of potential next hop neighbor towards this orig node
- * @frag_list: fragmentation buffer list for fragment re-assembly
- * @last_frag_packet: time when last fragmented packet from this node was
- * received
* @neigh_list_lock: lock protecting neigh_list, router and bonding_list
* @hash_entry: hlist node for batadv_priv::orig_hash
* @bat_priv: pointer to soft_iface this orig node belongs to
- * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
- * neigh_node->real_bits & neigh_node->real_packet_count
* @bcast_seqno_lock: lock protecting bcast_bits & last_bcast_seqno
* @bond_candidates: how many candidates are available
* @bond_list: list of bonding candidates
@@ -134,6 +201,11 @@ struct batadv_hard_iface {
* @out_coding_list: list of nodes that can hear this orig
* @in_coding_list_lock: protects in_coding_list
* @out_coding_list_lock: protects out_coding_list
+ * @fragments: array with heads for fragment chains
+ * @vlan_list: a list of orig_node_vlan structs, one per VLAN served by the
+ * originator represented by this object
+ * @vlan_list_lock: lock protecting vlan_list
+ * @bat_iv: B.A.T.M.A.N. IV private structure
*/
struct batadv_orig_node {
uint8_t orig[ETH_ALEN];
@@ -142,35 +214,26 @@ struct batadv_orig_node {
#ifdef CONFIG_BATMAN_ADV_DAT
batadv_dat_addr_t dat_addr;
#endif
- unsigned long *bcast_own;
- uint8_t *bcast_own_sum;
unsigned long last_seen;
unsigned long bcast_seqno_reset;
unsigned long batman_seqno_reset;
- uint8_t gw_flags;
- uint8_t flags;
+ uint8_t capabilities;
atomic_t last_ttvn;
- uint16_t tt_crc;
unsigned char *tt_buff;
int16_t tt_buff_len;
spinlock_t tt_buff_lock; /* protects tt_buff & tt_buff_len */
- atomic_t tt_size;
bool tt_initialised;
+ /* prevents from changing the table while reading it */
+ spinlock_t tt_lock;
uint32_t last_real_seqno;
uint8_t last_ttl;
DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
uint32_t last_bcast_seqno;
struct hlist_head neigh_list;
- struct list_head frag_list;
- unsigned long last_frag_packet;
/* neigh_list_lock protects: neigh_list, router & bonding_list */
spinlock_t neigh_list_lock;
struct hlist_node hash_entry;
struct batadv_priv *bat_priv;
- /* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
- * neigh_node->real_bits & neigh_node->real_packet_count
- */
- spinlock_t ogm_cnt_lock;
/* bcast_seqno_lock protects: bcast_bits & last_bcast_seqno */
spinlock_t bcast_seqno_lock;
atomic_t bond_candidates;
@@ -183,12 +246,28 @@ struct batadv_orig_node {
spinlock_t in_coding_list_lock; /* Protects in_coding_list */
spinlock_t out_coding_list_lock; /* Protects out_coding_list */
#endif
+ struct batadv_frag_table_entry fragments[BATADV_FRAG_BUFFER_COUNT];
+ struct list_head vlan_list;
+ spinlock_t vlan_list_lock; /* protects vlan_list */
+ struct batadv_orig_bat_iv bat_iv;
+};
+
+/**
+ * enum batadv_orig_capabilities - orig node capabilities
+ * @BATADV_ORIG_CAPA_HAS_DAT: orig node has distributed arp table enabled
+ * @BATADV_ORIG_CAPA_HAS_NC: orig node has network coding enabled
+ */
+enum batadv_orig_capabilities {
+ BATADV_ORIG_CAPA_HAS_DAT = BIT(0),
+ BATADV_ORIG_CAPA_HAS_NC = BIT(1),
};
/**
* struct batadv_gw_node - structure for orig nodes announcing gw capabilities
* @list: list node for batadv_priv_gw::list
* @orig_node: pointer to corresponding orig node
+ * @bandwidth_down: advertised uplink download bandwidth
+ * @bandwidth_up: advertised uplink upload bandwidth
* @deleted: this struct is scheduled for deletion
* @refcount: number of contexts the object is used
* @rcu: struct used for freeing in an RCU-safe manner
@@ -196,46 +275,57 @@ struct batadv_orig_node {
struct batadv_gw_node {
struct hlist_node list;
struct batadv_orig_node *orig_node;
+ uint32_t bandwidth_down;
+ uint32_t bandwidth_up;
unsigned long deleted;
atomic_t refcount;
struct rcu_head rcu;
};
/**
- * struct batadv_neigh_node - structure for single hop neighbors
- * @list: list node for batadv_orig_node::neigh_list
- * @addr: mac address of neigh node
+ * struct batadv_neigh_bat_iv - B.A.T.M.A.N. IV specific structure for single
+ * hop neighbors
* @tq_recv: ring buffer of received TQ values from this neigh node
* @tq_index: ring buffer index
* @tq_avg: averaged tq of all tq values in the ring buffer (tq_recv)
- * @last_ttl: last received ttl from this neigh node
- * @bonding_list: list node for batadv_orig_node::bond_list
- * @last_seen: when last packet via this neighbor was received
* @real_bits: bitfield containing the number of OGMs received from this neigh
* node (relative to orig_node->last_real_seqno)
* @real_packet_count: counted result of real_bits
+ * @lq_update_lock: lock protecting tq_recv & tq_index
+ */
+struct batadv_neigh_bat_iv {
+ uint8_t tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
+ uint8_t tq_index;
+ uint8_t tq_avg;
+ DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
+ uint8_t real_packet_count;
+ spinlock_t lq_update_lock; /* protects tq_recv & tq_index */
+};
+
+/**
+ * struct batadv_neigh_node - structure for single hops neighbors
+ * @list: list node for batadv_orig_node::neigh_list
* @orig_node: pointer to corresponding orig_node
+ * @addr: the MAC address of the neighboring interface
* @if_incoming: pointer to incoming hard interface
- * @lq_update_lock: lock protecting tq_recv & tq_index
+ * @last_seen: when last packet via this neighbor was received
+ * @last_ttl: last received ttl from this neigh node
+ * @bonding_list: list node for batadv_orig_node::bond_list
* @refcount: number of contexts the object is used
* @rcu: struct used for freeing in an RCU-safe manner
+ * @bat_iv: B.A.T.M.A.N. IV private structure
*/
struct batadv_neigh_node {
struct hlist_node list;
+ struct batadv_orig_node *orig_node;
uint8_t addr[ETH_ALEN];
- uint8_t tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
- uint8_t tq_index;
- uint8_t tq_avg;
+ struct batadv_hard_iface *if_incoming;
+ unsigned long last_seen;
uint8_t last_ttl;
struct list_head bonding_list;
- unsigned long last_seen;
- DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
- uint8_t real_packet_count;
- struct batadv_orig_node *orig_node;
- struct batadv_hard_iface *if_incoming;
- spinlock_t lq_update_lock; /* protects tq_recv & tq_index */
atomic_t refcount;
struct rcu_head rcu;
+ struct batadv_neigh_bat_iv bat_iv;
};
/**
@@ -265,6 +355,12 @@ struct batadv_bcast_duplist_entry {
* @BATADV_CNT_MGMT_TX_BYTES: transmitted routing protocol traffic bytes counter
* @BATADV_CNT_MGMT_RX: received routing protocol traffic packet counter
* @BATADV_CNT_MGMT_RX_BYTES: received routing protocol traffic bytes counter
+ * @BATADV_CNT_FRAG_TX: transmitted fragment traffic packet counter
+ * @BATADV_CNT_FRAG_TX_BYTES: transmitted fragment traffic bytes counter
+ * @BATADV_CNT_FRAG_RX: received fragment traffic packet counter
+ * @BATADV_CNT_FRAG_RX_BYTES: received fragment traffic bytes counter
+ * @BATADV_CNT_FRAG_FWD: forwarded fragment traffic packet counter
+ * @BATADV_CNT_FRAG_FWD_BYTES: forwarded fragment traffic bytes counter
* @BATADV_CNT_TT_REQUEST_TX: transmitted tt req traffic packet counter
* @BATADV_CNT_TT_REQUEST_RX: received tt req traffic packet counter
* @BATADV_CNT_TT_RESPONSE_TX: transmitted tt resp traffic packet counter
@@ -302,6 +398,12 @@ enum batadv_counters {
BATADV_CNT_MGMT_TX_BYTES,
BATADV_CNT_MGMT_RX,
BATADV_CNT_MGMT_RX_BYTES,
+ BATADV_CNT_FRAG_TX,
+ BATADV_CNT_FRAG_TX_BYTES,
+ BATADV_CNT_FRAG_RX,
+ BATADV_CNT_FRAG_RX_BYTES,
+ BATADV_CNT_FRAG_FWD,
+ BATADV_CNT_FRAG_FWD_BYTES,
BATADV_CNT_TT_REQUEST_TX,
BATADV_CNT_TT_REQUEST_RX,
BATADV_CNT_TT_RESPONSE_TX,
@@ -343,11 +445,14 @@ enum batadv_counters {
* @changes_list_lock: lock protecting changes_list
* @req_list_lock: lock protecting req_list
* @roam_list_lock: lock protecting roam_list
- * @local_entry_num: number of entries in the local hash table
- * @local_crc: Checksum of the local table, recomputed before sending a new OGM
* @last_changeset: last tt changeset this host has generated
* @last_changeset_len: length of last tt changeset this host has generated
* @last_changeset_lock: lock protecting last_changeset & last_changeset_len
+ * @commit_lock: prevents from executing a local TT commit while reading the
+ * local table. The local TT commit is made up by two operations (data
+ * structure update and metdata -CRC/TTVN- recalculation) and they have to be
+ * executed atomically in order to avoid another thread to read the
+ * table/metadata between those.
* @work: work queue callback item for translation table purging
*/
struct batadv_priv_tt {
@@ -362,12 +467,12 @@ struct batadv_priv_tt {
spinlock_t changes_list_lock; /* protects changes */
spinlock_t req_list_lock; /* protects req_list */
spinlock_t roam_list_lock; /* protects roam_list */
- atomic_t local_entry_num;
- uint16_t local_crc;
unsigned char *last_changeset;
int16_t last_changeset_len;
/* protects last_changeset & last_changeset_len */
spinlock_t last_changeset_lock;
+ /* prevents from executing a commit while reading the table */
+ spinlock_t commit_lock;
struct delayed_work work;
};
@@ -420,31 +525,31 @@ struct batadv_priv_debug_log {
* @list: list of available gateway nodes
* @list_lock: lock protecting gw_list & curr_gw
* @curr_gw: pointer to currently selected gateway node
+ * @bandwidth_down: advertised uplink download bandwidth (if gw_mode server)
+ * @bandwidth_up: advertised uplink upload bandwidth (if gw_mode server)
* @reselect: bool indicating a gateway re-selection is in progress
*/
struct batadv_priv_gw {
struct hlist_head list;
spinlock_t list_lock; /* protects gw_list & curr_gw */
struct batadv_gw_node __rcu *curr_gw; /* rcu protected pointer */
+ atomic_t bandwidth_down;
+ atomic_t bandwidth_up;
atomic_t reselect;
};
/**
- * struct batadv_priv_vis - per mesh interface vis data
- * @send_list: list of batadv_vis_info packets to sent
- * @hash: hash table containing vis data from other nodes in the network
- * @hash_lock: lock protecting the hash table
- * @list_lock: lock protecting my_info::recv_list
- * @work: work queue callback item for vis packet sending
- * @my_info: holds this node's vis data sent on a regular basis
+ * struct batadv_priv_tvlv - per mesh interface tvlv data
+ * @container_list: list of registered tvlv containers to be sent with each OGM
+ * @handler_list: list of the various tvlv content handlers
+ * @container_list_lock: protects tvlv container list access
+ * @handler_list_lock: protects handler list access
*/
-struct batadv_priv_vis {
- struct list_head send_list;
- struct batadv_hashtable *hash;
- spinlock_t hash_lock; /* protects hash */
- spinlock_t list_lock; /* protects my_info::recv_list */
- struct delayed_work work;
- struct batadv_vis_info *my_info;
+struct batadv_priv_tvlv {
+ struct hlist_head container_list;
+ struct hlist_head handler_list;
+ spinlock_t container_list_lock; /* protects container_list */
+ spinlock_t handler_list_lock; /* protects handler_list */
};
/**
@@ -491,6 +596,26 @@ struct batadv_priv_nc {
};
/**
+ * struct batadv_softif_vlan - per VLAN attributes set
+ * @vid: VLAN identifier
+ * @kobj: kobject for sysfs vlan subdirectory
+ * @ap_isolation: AP isolation state
+ * @tt: TT private attributes (VLAN specific)
+ * @list: list node for bat_priv::softif_vlan_list
+ * @refcount: number of context where this object is currently in use
+ * @rcu: struct used for freeing in a RCU-safe manner
+ */
+struct batadv_softif_vlan {
+ unsigned short vid;
+ struct kobject *kobj;
+ atomic_t ap_isolation; /* boolean */
+ struct batadv_vlan_tt tt;
+ struct hlist_node list;
+ atomic_t refcount;
+ struct rcu_head rcu;
+};
+
+/**
* struct batadv_priv - per mesh interface data
* @mesh_state: current status of the mesh (inactive/active/deactivating)
* @soft_iface: net device which holds this struct as private data
@@ -499,15 +624,15 @@ struct batadv_priv_nc {
* @aggregated_ogms: bool indicating whether OGM aggregation is enabled
* @bonding: bool indicating whether traffic bonding is enabled
* @fragmentation: bool indicating whether traffic fragmentation is enabled
- * @ap_isolation: bool indicating whether ap isolation is enabled
+ * @packet_size_max: max packet size that can be transmitted via
+ * multiple fragmented skbs or a single frame if fragmentation is disabled
+ * @frag_seqno: incremental counter to identify chains of egress fragments
* @bridge_loop_avoidance: bool indicating whether bridge loop avoidance is
* enabled
* @distributed_arp_table: bool indicating whether distributed ARP table is
* enabled
- * @vis_mode: vis operation: client or server (see batadv_vis_packettype)
* @gw_mode: gateway operation: off, client or server (see batadv_gw_modes)
* @gw_sel_class: gateway selection class (applies if gw_mode client)
- * @gw_bandwidth: gateway announced bandwidth (applies if gw_mode server)
* @orig_interval: OGM broadcast interval in milliseconds
* @hop_penalty: penalty which will be applied to an OGM's tq-field on every hop
* @log_level: configured log level (see batadv_dbg_level)
@@ -527,11 +652,14 @@ struct batadv_priv_nc {
* @primary_if: one of the hard interfaces assigned to this mesh interface
* becomes the primary interface
* @bat_algo_ops: routing algorithm used by this mesh interface
+ * @softif_vlan_list: a list of softif_vlan structs, one per VLAN created on top
+ * of the mesh interface represented by this object
+ * @softif_vlan_list_lock: lock protecting softif_vlan_list
* @bla: bridge loope avoidance data
* @debug_log: holding debug logging relevant data
* @gw: gateway data
* @tt: translation table data
- * @vis: vis data
+ * @tvlv: type-version-length-value data
* @dat: distributed arp table data
* @network_coding: bool indicating whether network coding is enabled
* @batadv_priv_nc: network coding data
@@ -544,17 +672,16 @@ struct batadv_priv {
atomic_t aggregated_ogms;
atomic_t bonding;
atomic_t fragmentation;
- atomic_t ap_isolation;
+ atomic_t packet_size_max;
+ atomic_t frag_seqno;
#ifdef CONFIG_BATMAN_ADV_BLA
atomic_t bridge_loop_avoidance;
#endif
#ifdef CONFIG_BATMAN_ADV_DAT
atomic_t distributed_arp_table;
#endif
- atomic_t vis_mode;
atomic_t gw_mode;
atomic_t gw_sel_class;
- atomic_t gw_bandwidth;
atomic_t orig_interval;
atomic_t hop_penalty;
#ifdef CONFIG_BATMAN_ADV_DEBUG
@@ -575,6 +702,8 @@ struct batadv_priv {
struct work_struct cleanup_work;
struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */
struct batadv_algo_ops *bat_algo_ops;
+ struct hlist_head softif_vlan_list;
+ spinlock_t softif_vlan_list_lock; /* protects softif_vlan_list */
#ifdef CONFIG_BATMAN_ADV_BLA
struct batadv_priv_bla bla;
#endif
@@ -583,7 +712,7 @@ struct batadv_priv {
#endif
struct batadv_priv_gw gw;
struct batadv_priv_tt tt;
- struct batadv_priv_vis vis;
+ struct batadv_priv_tvlv tvlv;
#ifdef CONFIG_BATMAN_ADV_DAT
struct batadv_priv_dat dat;
#endif
@@ -620,7 +749,7 @@ struct batadv_socket_client {
struct batadv_socket_packet {
struct list_head list;
size_t icmp_len;
- struct batadv_icmp_packet_rr icmp_packet;
+ uint8_t icmp_packet[BATADV_ICMP_MAX_PACKET_SIZE];
};
/**
@@ -677,6 +806,7 @@ struct batadv_bla_claim {
/**
* struct batadv_tt_common_entry - tt local & tt global common data
* @addr: mac address of non-mesh client
+ * @vid: VLAN identifier
* @hash_entry: hlist node for batadv_priv_tt::local_hash or for
* batadv_priv_tt::global_hash
* @flags: various state handling flags (see batadv_tt_client_flags)
@@ -686,6 +816,7 @@ struct batadv_bla_claim {
*/
struct batadv_tt_common_entry {
uint8_t addr[ETH_ALEN];
+ unsigned short vid;
struct hlist_node hash_entry;
uint16_t flags;
unsigned long added_at;
@@ -740,7 +871,7 @@ struct batadv_tt_orig_list_entry {
*/
struct batadv_tt_change_node {
struct list_head list;
- struct batadv_tt_change change;
+ struct batadv_tvlv_tt_change change;
};
/**
@@ -866,78 +997,6 @@ struct batadv_forw_packet {
};
/**
- * struct batadv_frag_packet_list_entry - storage for fragment packet
- * @list: list node for orig_node::frag_list
- * @seqno: sequence number of the fragment
- * @skb: fragment's skb buffer
- */
-struct batadv_frag_packet_list_entry {
- struct list_head list;
- uint16_t seqno;
- struct sk_buff *skb;
-};
-
-/**
- * struct batadv_vis_info - local data for vis information
- * @first_seen: timestamp used for purging stale vis info entries
- * @recv_list: List of server-neighbors we have received this packet from. This
- * packet should not be re-forward to them again. List elements are struct
- * batadv_vis_recvlist_node
- * @send_list: list of packets to be forwarded
- * @refcount: number of contexts the object is used
- * @hash_entry: hlist node for batadv_priv_vis::hash
- * @bat_priv: pointer to soft_iface this orig node belongs to
- * @skb_packet: contains the vis packet
- */
-struct batadv_vis_info {
- unsigned long first_seen;
- struct list_head recv_list;
- struct list_head send_list;
- struct kref refcount;
- struct hlist_node hash_entry;
- struct batadv_priv *bat_priv;
- struct sk_buff *skb_packet;
-} __packed;
-
-/**
- * struct batadv_vis_info_entry - contains link information for vis
- * @src: source MAC of the link, all zero for local TT entry
- * @dst: destination MAC of the link, client mac address for local TT entry
- * @quality: transmission quality of the link, or 0 for local TT entry
- */
-struct batadv_vis_info_entry {
- uint8_t src[ETH_ALEN];
- uint8_t dest[ETH_ALEN];
- uint8_t quality;
-} __packed;
-
-/**
- * struct batadv_vis_recvlist_node - list entry for batadv_vis_info::recv_list
- * @list: list node for batadv_vis_info::recv_list
- * @mac: MAC address of the originator from where the vis_info was received
- */
-struct batadv_vis_recvlist_node {
- struct list_head list;
- uint8_t mac[ETH_ALEN];
-};
-
-/**
- * struct batadv_vis_if_list_entry - auxiliary data for vis data generation
- * @addr: MAC address of the interface
- * @primary: true if this interface is the primary interface
- * @list: list node the interface list
- *
- * While scanning for vis-entries of a particular vis-originator
- * this list collects its interfaces to create a subgraph/cluster
- * out of them later
- */
-struct batadv_vis_if_list_entry {
- uint8_t addr[ETH_ALEN];
- bool primary;
- struct hlist_node list;
-};
-
-/**
* struct batadv_algo_ops - mesh algorithm callbacks
* @list: list node for the batadv_algo_list
* @name: name of the algorithm
@@ -948,6 +1007,16 @@ struct batadv_vis_if_list_entry {
* @bat_primary_iface_set: called when primary interface is selected / changed
* @bat_ogm_schedule: prepare a new outgoing OGM for the send queue
* @bat_ogm_emit: send scheduled OGM
+ * @bat_neigh_cmp: compare the metrics of two neighbors
+ * @bat_neigh_is_equiv_or_better: check if neigh1 is equally good or
+ * better than neigh2 from the metric prospective
+ * @bat_orig_print: print the originator table (optional)
+ * @bat_orig_free: free the resources allocated by the routing algorithm for an
+ * orig_node object
+ * @bat_orig_add_if: ask the routing algorithm to apply the needed changes to
+ * the orig_node due to a new hard-interface being added into the mesh
+ * @bat_orig_del_if: ask the routing algorithm to apply the needed changes to
+ * the orig_node due to an hard-interface being removed from the mesh
*/
struct batadv_algo_ops {
struct hlist_node list;
@@ -958,6 +1027,17 @@ struct batadv_algo_ops {
void (*bat_primary_iface_set)(struct batadv_hard_iface *hard_iface);
void (*bat_ogm_schedule)(struct batadv_hard_iface *hard_iface);
void (*bat_ogm_emit)(struct batadv_forw_packet *forw_packet);
+ int (*bat_neigh_cmp)(struct batadv_neigh_node *neigh1,
+ struct batadv_neigh_node *neigh2);
+ bool (*bat_neigh_is_equiv_or_better)(struct batadv_neigh_node *neigh1,
+ struct batadv_neigh_node *neigh2);
+ /* orig_node handling API */
+ void (*bat_orig_print)(struct batadv_priv *priv, struct seq_file *seq);
+ void (*bat_orig_free)(struct batadv_orig_node *orig_node);
+ int (*bat_orig_add_if)(struct batadv_orig_node *orig_node,
+ int max_if_num);
+ int (*bat_orig_del_if)(struct batadv_orig_node *orig_node,
+ int max_if_num, int del_if_num);
};
/**
@@ -965,6 +1045,7 @@ struct batadv_algo_ops {
* is used to stored ARP entries needed for the global DAT cache
* @ip: the IPv4 corresponding to this DAT/ARP entry
* @mac_addr: the MAC address associated to the stored IPv4
+ * @vid: the vlan ID associated to this entry
* @last_update: time in jiffies when this entry was refreshed last time
* @hash_entry: hlist node for batadv_priv_dat::hash
* @refcount: number of contexts the object is used
@@ -973,6 +1054,7 @@ struct batadv_algo_ops {
struct batadv_dat_entry {
__be32 ip;
uint8_t mac_addr[ETH_ALEN];
+ unsigned short vid;
unsigned long last_update;
struct hlist_node hash_entry;
atomic_t refcount;
@@ -992,4 +1074,60 @@ struct batadv_dat_candidate {
struct batadv_orig_node *orig_node;
};
+/**
+ * struct batadv_tvlv_container - container for tvlv appended to OGMs
+ * @list: hlist node for batadv_priv_tvlv::container_list
+ * @tvlv_hdr: tvlv header information needed to construct the tvlv
+ * @value_len: length of the buffer following this struct which contains
+ * the actual tvlv payload
+ * @refcount: number of contexts the object is used
+ */
+struct batadv_tvlv_container {
+ struct hlist_node list;
+ struct batadv_tvlv_hdr tvlv_hdr;
+ atomic_t refcount;
+};
+
+/**
+ * struct batadv_tvlv_handler - handler for specific tvlv type and version
+ * @list: hlist node for batadv_priv_tvlv::handler_list
+ * @ogm_handler: handler callback which is given the tvlv payload to process on
+ * incoming OGM packets
+ * @unicast_handler: handler callback which is given the tvlv payload to process
+ * on incoming unicast tvlv packets
+ * @type: tvlv type this handler feels responsible for
+ * @version: tvlv version this handler feels responsible for
+ * @flags: tvlv handler flags
+ * @refcount: number of contexts the object is used
+ * @rcu: struct used for freeing in an RCU-safe manner
+ */
+struct batadv_tvlv_handler {
+ struct hlist_node list;
+ void (*ogm_handler)(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig,
+ uint8_t flags,
+ void *tvlv_value, uint16_t tvlv_value_len);
+ int (*unicast_handler)(struct batadv_priv *bat_priv,
+ uint8_t *src, uint8_t *dst,
+ void *tvlv_value, uint16_t tvlv_value_len);
+ uint8_t type;
+ uint8_t version;
+ uint8_t flags;
+ atomic_t refcount;
+ struct rcu_head rcu;
+};
+
+/**
+ * enum batadv_tvlv_handler_flags - tvlv handler flags definitions
+ * @BATADV_TVLV_HANDLER_OGM_CIFNOTFND: tvlv ogm processing function will call
+ * this handler even if its type was not found (with no data)
+ * @BATADV_TVLV_HANDLER_OGM_CALLED: interval tvlv handling flag - the API marks
+ * a handler as being called, so it won't be called if the
+ * BATADV_TVLV_HANDLER_OGM_CIFNOTFND flag was set
+ */
+enum batadv_tvlv_handler_flags {
+ BATADV_TVLV_HANDLER_OGM_CIFNOTFND = BIT(1),
+ BATADV_TVLV_HANDLER_OGM_CALLED = BIT(2),
+};
+
#endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
deleted file mode 100644
index 48b31d33ce6b..000000000000
--- a/net/batman-adv/unicast.c
+++ /dev/null
@@ -1,491 +0,0 @@
-/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
- *
- * Andreas Langer
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- */
-
-#include "main.h"
-#include "unicast.h"
-#include "send.h"
-#include "soft-interface.h"
-#include "gateway_client.h"
-#include "originator.h"
-#include "hash.h"
-#include "translation-table.h"
-#include "routing.h"
-#include "hard-interface.h"
-
-
-static struct sk_buff *
-batadv_frag_merge_packet(struct list_head *head,
- struct batadv_frag_packet_list_entry *tfp,
- struct sk_buff *skb)
-{
- struct batadv_unicast_frag_packet *up;
- struct sk_buff *tmp_skb;
- struct batadv_unicast_packet *unicast_packet;
- int hdr_len = sizeof(*unicast_packet);
- int uni_diff = sizeof(*up) - hdr_len;
- uint8_t *packet_pos;
-
- up = (struct batadv_unicast_frag_packet *)skb->data;
- /* set skb to the first part and tmp_skb to the second part */
- if (up->flags & BATADV_UNI_FRAG_HEAD) {
- tmp_skb = tfp->skb;
- } else {
- tmp_skb = skb;
- skb = tfp->skb;
- }
-
- if (skb_linearize(skb) < 0 || skb_linearize(tmp_skb) < 0)
- goto err;
-
- skb_pull(tmp_skb, sizeof(*up));
- if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0)
- goto err;
-
- /* move free entry to end */
- tfp->skb = NULL;
- tfp->seqno = 0;
- list_move_tail(&tfp->list, head);
-
- memcpy(skb_put(skb, tmp_skb->len), tmp_skb->data, tmp_skb->len);
- kfree_skb(tmp_skb);
-
- memmove(skb->data + uni_diff, skb->data, hdr_len);
- packet_pos = skb_pull(skb, uni_diff);
- unicast_packet = (struct batadv_unicast_packet *)packet_pos;
- unicast_packet->header.packet_type = BATADV_UNICAST;
-
- return skb;
-
-err:
- /* free buffered skb, skb will be freed later */
- kfree_skb(tfp->skb);
- return NULL;
-}
-
-static void batadv_frag_create_entry(struct list_head *head,
- struct sk_buff *skb)
-{
- struct batadv_frag_packet_list_entry *tfp;
- struct batadv_unicast_frag_packet *up;
-
- up = (struct batadv_unicast_frag_packet *)skb->data;
-
- /* free and oldest packets stand at the end */
- tfp = list_entry((head)->prev, typeof(*tfp), list);
- kfree_skb(tfp->skb);
-
- tfp->seqno = ntohs(up->seqno);
- tfp->skb = skb;
- list_move(&tfp->list, head);
- return;
-}
-
-static int batadv_frag_create_buffer(struct list_head *head)
-{
- int i;
- struct batadv_frag_packet_list_entry *tfp;
-
- for (i = 0; i < BATADV_FRAG_BUFFER_SIZE; i++) {
- tfp = kmalloc(sizeof(*tfp), GFP_ATOMIC);
- if (!tfp) {
- batadv_frag_list_free(head);
- return -ENOMEM;
- }
- tfp->skb = NULL;
- tfp->seqno = 0;
- INIT_LIST_HEAD(&tfp->list);
- list_add(&tfp->list, head);
- }
-
- return 0;
-}
-
-static struct batadv_frag_packet_list_entry *
-batadv_frag_search_packet(struct list_head *head,
- const struct batadv_unicast_frag_packet *up)
-{
- struct batadv_frag_packet_list_entry *tfp;
- struct batadv_unicast_frag_packet *tmp_up = NULL;
- bool is_head_tmp, is_head;
- uint16_t search_seqno;
-
- if (up->flags & BATADV_UNI_FRAG_HEAD)
- search_seqno = ntohs(up->seqno)+1;
- else
- search_seqno = ntohs(up->seqno)-1;
-
- is_head = up->flags & BATADV_UNI_FRAG_HEAD;
-
- list_for_each_entry(tfp, head, list) {
- if (!tfp->skb)
- continue;
-
- if (tfp->seqno == ntohs(up->seqno))
- goto mov_tail;
-
- tmp_up = (struct batadv_unicast_frag_packet *)tfp->skb->data;
-
- if (tfp->seqno == search_seqno) {
- is_head_tmp = tmp_up->flags & BATADV_UNI_FRAG_HEAD;
- if (is_head_tmp != is_head)
- return tfp;
- else
- goto mov_tail;
- }
- }
- return NULL;
-
-mov_tail:
- list_move_tail(&tfp->list, head);
- return NULL;
-}
-
-void batadv_frag_list_free(struct list_head *head)
-{
- struct batadv_frag_packet_list_entry *pf, *tmp_pf;
-
- if (!list_empty(head)) {
- list_for_each_entry_safe(pf, tmp_pf, head, list) {
- kfree_skb(pf->skb);
- list_del(&pf->list);
- kfree(pf);
- }
- }
- return;
-}
-
-/* frag_reassemble_skb():
- * returns NET_RX_DROP if the operation failed - skb is left intact
- * returns NET_RX_SUCCESS if the fragment was buffered (skb_new will be NULL)
- * or the skb could be reassembled (skb_new will point to the new packet and
- * skb was freed)
- */
-int batadv_frag_reassemble_skb(struct sk_buff *skb,
- struct batadv_priv *bat_priv,
- struct sk_buff **new_skb)
-{
- struct batadv_orig_node *orig_node;
- struct batadv_frag_packet_list_entry *tmp_frag_entry;
- int ret = NET_RX_DROP;
- struct batadv_unicast_frag_packet *unicast_packet;
-
- unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
- *new_skb = NULL;
-
- orig_node = batadv_orig_hash_find(bat_priv, unicast_packet->orig);
- if (!orig_node)
- goto out;
-
- orig_node->last_frag_packet = jiffies;
-
- if (list_empty(&orig_node->frag_list) &&
- batadv_frag_create_buffer(&orig_node->frag_list)) {
- pr_debug("couldn't create frag buffer\n");
- goto out;
- }
-
- tmp_frag_entry = batadv_frag_search_packet(&orig_node->frag_list,
- unicast_packet);
-
- if (!tmp_frag_entry) {
- batadv_frag_create_entry(&orig_node->frag_list, skb);
- ret = NET_RX_SUCCESS;
- goto out;
- }
-
- *new_skb = batadv_frag_merge_packet(&orig_node->frag_list,
- tmp_frag_entry, skb);
- /* if not, merge failed */
- if (*new_skb)
- ret = NET_RX_SUCCESS;
-
-out:
- if (orig_node)
- batadv_orig_node_free_ref(orig_node);
- return ret;
-}
-
-int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
- struct batadv_hard_iface *hard_iface,
- const uint8_t dstaddr[])
-{
- struct batadv_unicast_packet tmp_uc, *unicast_packet;
- struct batadv_hard_iface *primary_if;
- struct sk_buff *frag_skb;
- struct batadv_unicast_frag_packet *frag1, *frag2;
- int uc_hdr_len = sizeof(*unicast_packet);
- int ucf_hdr_len = sizeof(*frag1);
- int data_len = skb->len - uc_hdr_len;
- int large_tail = 0, ret = NET_RX_DROP;
- uint16_t seqno;
-
- primary_if = batadv_primary_if_get_selected(bat_priv);
- if (!primary_if)
- goto dropped;
-
- frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
- if (!frag_skb)
- goto dropped;
-
- skb->priority = TC_PRIO_CONTROL;
- skb_reserve(frag_skb, ucf_hdr_len);
-
- unicast_packet = (struct batadv_unicast_packet *)skb->data;
- memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
- skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len);
-
- if (batadv_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
- batadv_skb_head_push(frag_skb, ucf_hdr_len) < 0)
- goto drop_frag;
-
- frag1 = (struct batadv_unicast_frag_packet *)skb->data;
- frag2 = (struct batadv_unicast_frag_packet *)frag_skb->data;
-
- memcpy(frag1, &tmp_uc, sizeof(tmp_uc));
-
- frag1->header.ttl--;
- frag1->header.version = BATADV_COMPAT_VERSION;
- frag1->header.packet_type = BATADV_UNICAST_FRAG;
-
- memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
- memcpy(frag2, frag1, sizeof(*frag2));
-
- if (data_len & 1)
- large_tail = BATADV_UNI_FRAG_LARGETAIL;
-
- frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
- frag2->flags = large_tail;
-
- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
- frag1->seqno = htons(seqno - 1);
- frag2->seqno = htons(seqno);
-
- batadv_send_skb_packet(skb, hard_iface, dstaddr);
- batadv_send_skb_packet(frag_skb, hard_iface, dstaddr);
- ret = NET_RX_SUCCESS;
- goto out;
-
-drop_frag:
- kfree_skb(frag_skb);
-dropped:
- kfree_skb(skb);
-out:
- if (primary_if)
- batadv_hardif_free_ref(primary_if);
- return ret;
-}
-
-/**
- * batadv_unicast_push_and_fill_skb - extends the buffer and initializes the
- * common fields for unicast packets
- * @skb: packet
- * @hdr_size: amount of bytes to push at the beginning of the skb
- * @orig_node: the destination node
- *
- * Returns false if the buffer extension was not possible or true otherwise
- */
-static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
- struct batadv_orig_node *orig_node)
-{
- struct batadv_unicast_packet *unicast_packet;
- uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
-
- if (batadv_skb_head_push(skb, hdr_size) < 0)
- return false;
-
- unicast_packet = (struct batadv_unicast_packet *)skb->data;
- unicast_packet->header.version = BATADV_COMPAT_VERSION;
- /* batman packet type: unicast */
- unicast_packet->header.packet_type = BATADV_UNICAST;
- /* set unicast ttl */
- unicast_packet->header.ttl = BATADV_TTL;
- /* copy the destination for faster routing */
- memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
- /* set the destination tt version number */
- unicast_packet->ttvn = ttvn;
-
- return true;
-}
-
-/**
- * batadv_unicast_prepare_skb - encapsulate an skb with a unicast header
- * @skb: the skb containing the payload to encapsulate
- * @orig_node: the destination node
- *
- * Returns false if the payload could not be encapsulated or true otherwise.
- *
- * This call might reallocate skb data.
- */
-static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
- struct batadv_orig_node *orig_node)
-{
- size_t uni_size = sizeof(struct batadv_unicast_packet);
- return batadv_unicast_push_and_fill_skb(skb, uni_size, orig_node);
-}
-
-/**
- * batadv_unicast_4addr_prepare_skb - encapsulate an skb with a unicast4addr
- * header
- * @bat_priv: the bat priv with all the soft interface information
- * @skb: the skb containing the payload to encapsulate
- * @orig_node: the destination node
- * @packet_subtype: the batman 4addr packet subtype to use
- *
- * Returns false if the payload could not be encapsulated or true otherwise.
- *
- * This call might reallocate skb data.
- */
-bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
- struct sk_buff *skb,
- struct batadv_orig_node *orig,
- int packet_subtype)
-{
- struct batadv_hard_iface *primary_if;
- struct batadv_unicast_4addr_packet *unicast_4addr_packet;
- bool ret = false;
-
- primary_if = batadv_primary_if_get_selected(bat_priv);
- if (!primary_if)
- goto out;
-
- /* pull the header space and fill the unicast_packet substructure.
- * We can do that because the first member of the unicast_4addr_packet
- * is of type struct unicast_packet
- */
- if (!batadv_unicast_push_and_fill_skb(skb,
- sizeof(*unicast_4addr_packet),
- orig))
- goto out;
-
- unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
- unicast_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR;
- memcpy(unicast_4addr_packet->src, primary_if->net_dev->dev_addr,
- ETH_ALEN);
- unicast_4addr_packet->subtype = packet_subtype;
- unicast_4addr_packet->reserved = 0;
-
- ret = true;
-out:
- if (primary_if)
- batadv_hardif_free_ref(primary_if);
- return ret;
-}
-
-/**
- * batadv_unicast_generic_send_skb - send an skb as unicast
- * @bat_priv: the bat priv with all the soft interface information
- * @skb: payload to send
- * @packet_type: the batman unicast packet type to use
- * @packet_subtype: the batman packet subtype. It is ignored if packet_type is
- * not BATADV_UNICAT_4ADDR
- *
- * Returns 1 in case of error or 0 otherwise
- */
-int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
- struct sk_buff *skb, int packet_type,
- int packet_subtype)
-{
- struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
- struct batadv_unicast_packet *unicast_packet;
- struct batadv_orig_node *orig_node;
- struct batadv_neigh_node *neigh_node;
- int data_len = skb->len;
- int ret = NET_RX_DROP;
- unsigned int dev_mtu, header_len;
-
- /* get routing information */
- if (is_multicast_ether_addr(ethhdr->h_dest)) {
- orig_node = batadv_gw_get_selected_orig(bat_priv);
- if (orig_node)
- goto find_router;
- }
-
- /* check for tt host - increases orig_node refcount.
- * returns NULL in case of AP isolation
- */
- orig_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
- ethhdr->h_dest);
-
-find_router:
- /* find_router():
- * - if orig_node is NULL it returns NULL
- * - increases neigh_nodes refcount if found.
- */
- neigh_node = batadv_find_router(bat_priv, orig_node, NULL);
-
- if (!neigh_node)
- goto out;
-
- switch (packet_type) {
- case BATADV_UNICAST:
- if (!batadv_unicast_prepare_skb(skb, orig_node))
- goto out;
-
- header_len = sizeof(struct batadv_unicast_packet);
- break;
- case BATADV_UNICAST_4ADDR:
- if (!batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
- packet_subtype))
- goto out;
-
- header_len = sizeof(struct batadv_unicast_4addr_packet);
- break;
- default:
- /* this function supports UNICAST and UNICAST_4ADDR only. It
- * should never be invoked with any other packet type
- */
- goto out;
- }
-
- ethhdr = (struct ethhdr *)(skb->data + header_len);
- unicast_packet = (struct batadv_unicast_packet *)skb->data;
-
- /* inform the destination node that we are still missing a correct route
- * for this client. The destination will receive this packet and will
- * try to reroute it because the ttvn contained in the header is less
- * than the current one
- */
- if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
- unicast_packet->ttvn = unicast_packet->ttvn - 1;
-
- dev_mtu = neigh_node->if_incoming->net_dev->mtu;
- /* fragmentation mechanism only works for UNICAST (now) */
- if (packet_type == BATADV_UNICAST &&
- atomic_read(&bat_priv->fragmentation) &&
- data_len + sizeof(*unicast_packet) > dev_mtu) {
- /* send frag skb decreases ttl */
- unicast_packet->header.ttl++;
- ret = batadv_frag_send_skb(skb, bat_priv,
- neigh_node->if_incoming,
- neigh_node->addr);
- goto out;
- }
-
- if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
- ret = 0;
-
-out:
- if (neigh_node)
- batadv_neigh_node_free_ref(neigh_node);
- if (orig_node)
- batadv_orig_node_free_ref(orig_node);
- if (ret == NET_RX_DROP)
- kfree_skb(skb);
- return ret;
-}
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
deleted file mode 100644
index 429cf8a4a31e..000000000000
--- a/net/batman-adv/unicast.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
- *
- * Andreas Langer
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- */
-
-#ifndef _NET_BATMAN_ADV_UNICAST_H_
-#define _NET_BATMAN_ADV_UNICAST_H_
-
-#include "packet.h"
-
-#define BATADV_FRAG_TIMEOUT 10000 /* purge frag list entries after time in ms */
-#define BATADV_FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
-
-int batadv_frag_reassemble_skb(struct sk_buff *skb,
- struct batadv_priv *bat_priv,
- struct sk_buff **new_skb);
-void batadv_frag_list_free(struct list_head *head);
-int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
- struct batadv_hard_iface *hard_iface,
- const uint8_t dstaddr[]);
-bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
- struct sk_buff *skb,
- struct batadv_orig_node *orig_node,
- int packet_subtype);
-int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
- struct sk_buff *skb, int packet_type,
- int packet_subtype);
-
-
-/**
- * batadv_unicast_send_skb - send the skb encapsulated in a unicast packet
- * @bat_priv: the bat priv with all the soft interface information
- * @skb: the payload to send
- */
-static inline int batadv_unicast_send_skb(struct batadv_priv *bat_priv,
- struct sk_buff *skb)
-{
- return batadv_unicast_generic_send_skb(bat_priv, skb, BATADV_UNICAST,
- 0);
-}
-
-/**
- * batadv_unicast_send_skb - send the skb encapsulated in a unicast4addr packet
- * @bat_priv: the bat priv with all the soft interface information
- * @skb: the payload to send
- * @packet_subtype: the batman 4addr packet subtype to use
- */
-static inline int batadv_unicast_4addr_send_skb(struct batadv_priv *bat_priv,
- struct sk_buff *skb,
- int packet_subtype)
-{
- return batadv_unicast_generic_send_skb(bat_priv, skb,
- BATADV_UNICAST_4ADDR,
- packet_subtype);
-}
-
-static inline int batadv_frag_can_reassemble(const struct sk_buff *skb, int mtu)
-{
- const struct batadv_unicast_frag_packet *unicast_packet;
- int uneven_correction = 0;
- unsigned int merged_size;
-
- unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
-
- if (unicast_packet->flags & BATADV_UNI_FRAG_LARGETAIL) {
- if (unicast_packet->flags & BATADV_UNI_FRAG_HEAD)
- uneven_correction = 1;
- else
- uneven_correction = -1;
- }
-
- merged_size = (skb->len - sizeof(*unicast_packet)) * 2;
- merged_size += sizeof(struct batadv_unicast_packet) + uneven_correction;
-
- return merged_size <= mtu;
-}
-
-#endif /* _NET_BATMAN_ADV_UNICAST_H_ */
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
deleted file mode 100644
index d8ea31a58457..000000000000
--- a/net/batman-adv/vis.c
+++ /dev/null
@@ -1,938 +0,0 @@
-/* Copyright (C) 2008-2013 B.A.T.M.A.N. contributors:
- *
- * Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- */
-
-#include "main.h"
-#include "send.h"
-#include "translation-table.h"
-#include "vis.h"
-#include "soft-interface.h"
-#include "hard-interface.h"
-#include "hash.h"
-#include "originator.h"
-
-#define BATADV_MAX_VIS_PACKET_SIZE 1000
-
-/* hash class keys */
-static struct lock_class_key batadv_vis_hash_lock_class_key;
-
-/* free the info */
-static void batadv_free_info(struct kref *ref)
-{
- struct batadv_vis_info *info;
- struct batadv_priv *bat_priv;
- struct batadv_vis_recvlist_node *entry, *tmp;
-
- info = container_of(ref, struct batadv_vis_info, refcount);
- bat_priv = info->bat_priv;
-
- list_del_init(&info->send_list);
- spin_lock_bh(&bat_priv->vis.list_lock);
- list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
- list_del(&entry->list);
- kfree(entry);
- }
-
- spin_unlock_bh(&bat_priv->vis.list_lock);
- kfree_skb(info->skb_packet);
- kfree(info);
-}
-
-/* Compare two vis packets, used by the hashing algorithm */
-static int batadv_vis_info_cmp(const struct hlist_node *node, const void *data2)
-{
- const struct batadv_vis_info *d1, *d2;
- const struct batadv_vis_packet *p1, *p2;
-
- d1 = container_of(node, struct batadv_vis_info, hash_entry);
- d2 = data2;
- p1 = (struct batadv_vis_packet *)d1->skb_packet->data;
- p2 = (struct batadv_vis_packet *)d2->skb_packet->data;
- return batadv_compare_eth(p1->vis_orig, p2->vis_orig);
-}
-
-/* hash function to choose an entry in a hash table of given size
- * hash algorithm from http://en.wikipedia.org/wiki/Hash_table
- */
-static uint32_t batadv_vis_info_choose(const void *data, uint32_t size)
-{
- const struct batadv_vis_info *vis_info = data;
- const struct batadv_vis_packet *packet;
- const unsigned char *key;
- uint32_t hash = 0;
- size_t i;
-
- packet = (struct batadv_vis_packet *)vis_info->skb_packet->data;
- key = packet->vis_orig;
- for (i = 0; i < ETH_ALEN; i++) {
- hash += key[i];
- hash += (hash << 10);
- hash ^= (hash >> 6);
- }
-
- hash += (hash << 3);
- hash ^= (hash >> 11);
- hash += (hash << 15);
-
- return hash % size;
-}
-
-static struct batadv_vis_info *
-batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
-{
- struct batadv_hashtable *hash = bat_priv->vis.hash;
- struct hlist_head *head;
- struct batadv_vis_info *vis_info, *vis_info_tmp = NULL;
- uint32_t index;
-
- if (!hash)
- return NULL;
-
- index = batadv_vis_info_choose(data, hash->size);
- head = &hash->table[index];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(vis_info, head, hash_entry) {
- if (!batadv_vis_info_cmp(&vis_info->hash_entry, data))
- continue;
-
- vis_info_tmp = vis_info;
- break;
- }
- rcu_read_unlock();
-
- return vis_info_tmp;
-}
-
-/* insert interface to the list of interfaces of one originator, if it
- * does not already exist in the list
- */
-static void batadv_vis_data_insert_interface(const uint8_t *interface,
- struct hlist_head *if_list,
- bool primary)
-{
- struct batadv_vis_if_list_entry *entry;
-
- hlist_for_each_entry(entry, if_list, list) {
- if (batadv_compare_eth(entry->addr, interface))
- return;
- }
-
- /* it's a new address, add it to the list */
- entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
- if (!entry)
- return;
- memcpy(entry->addr, interface, ETH_ALEN);
- entry->primary = primary;
- hlist_add_head(&entry->list, if_list);
-}
-
-static void batadv_vis_data_read_prim_sec(struct seq_file *seq,
- const struct hlist_head *if_list)
-{
- struct batadv_vis_if_list_entry *entry;
-
- hlist_for_each_entry(entry, if_list, list) {
- if (entry->primary)
- seq_puts(seq, "PRIMARY, ");
- else
- seq_printf(seq, "SEC %pM, ", entry->addr);
- }
-}
-
-/* read an entry */
-static ssize_t
-batadv_vis_data_read_entry(struct seq_file *seq,
- const struct batadv_vis_info_entry *entry,
- const uint8_t *src, bool primary)
-{
- if (primary && entry->quality == 0)
- return seq_printf(seq, "TT %pM, ", entry->dest);
- else if (batadv_compare_eth(entry->src, src))
- return seq_printf(seq, "TQ %pM %d, ", entry->dest,
- entry->quality);
-
- return 0;
-}
-
-static void
-batadv_vis_data_insert_interfaces(struct hlist_head *list,
- struct batadv_vis_packet *packet,
- struct batadv_vis_info_entry *entries)
-{
- int i;
-
- for (i = 0; i < packet->entries; i++) {
- if (entries[i].quality == 0)
- continue;
-
- if (batadv_compare_eth(entries[i].src, packet->vis_orig))
- continue;
-
- batadv_vis_data_insert_interface(entries[i].src, list, false);
- }
-}
-
-static void batadv_vis_data_read_entries(struct seq_file *seq,
- struct hlist_head *list,
- struct batadv_vis_packet *packet,
- struct batadv_vis_info_entry *entries)
-{
- int i;
- struct batadv_vis_if_list_entry *entry;
-
- hlist_for_each_entry(entry, list, list) {
- seq_printf(seq, "%pM,", entry->addr);
-
- for (i = 0; i < packet->entries; i++)
- batadv_vis_data_read_entry(seq, &entries[i],
- entry->addr, entry->primary);
-
- /* add primary/secondary records */
- if (batadv_compare_eth(entry->addr, packet->vis_orig))
- batadv_vis_data_read_prim_sec(seq, list);
-
- seq_puts(seq, "\n");
- }
-}
-
-static void batadv_vis_seq_print_text_bucket(struct seq_file *seq,
- const struct hlist_head *head)
-{
- struct batadv_vis_info *info;
- struct batadv_vis_packet *packet;
- uint8_t *entries_pos;
- struct batadv_vis_info_entry *entries;
- struct batadv_vis_if_list_entry *entry;
- struct hlist_node *n;
-
- HLIST_HEAD(vis_if_list);
-
- hlist_for_each_entry_rcu(info, head, hash_entry) {
- packet = (struct batadv_vis_packet *)info->skb_packet->data;
- entries_pos = (uint8_t *)packet + sizeof(*packet);
- entries = (struct batadv_vis_info_entry *)entries_pos;
-
- batadv_vis_data_insert_interface(packet->vis_orig, &vis_if_list,
- true);
- batadv_vis_data_insert_interfaces(&vis_if_list, packet,
- entries);
- batadv_vis_data_read_entries(seq, &vis_if_list, packet,
- entries);
-
- hlist_for_each_entry_safe(entry, n, &vis_if_list, list) {
- hlist_del(&entry->list);
- kfree(entry);
- }
- }
-}
-
-int batadv_vis_seq_print_text(struct seq_file *seq, void *offset)
-{
- struct batadv_hard_iface *primary_if;
- struct hlist_head *head;
- struct net_device *net_dev = (struct net_device *)seq->private;
- struct batadv_priv *bat_priv = netdev_priv(net_dev);
- struct batadv_hashtable *hash = bat_priv->vis.hash;
- uint32_t i;
- int ret = 0;
- int vis_server = atomic_read(&bat_priv->vis_mode);
-
- primary_if = batadv_primary_if_get_selected(bat_priv);
- if (!primary_if)
- goto out;
-
- if (vis_server == BATADV_VIS_TYPE_CLIENT_UPDATE)
- goto out;
-
- spin_lock_bh(&bat_priv->vis.hash_lock);
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
- batadv_vis_seq_print_text_bucket(seq, head);
- }
- spin_unlock_bh(&bat_priv->vis.hash_lock);
-
-out:
- if (primary_if)
- batadv_hardif_free_ref(primary_if);
- return ret;
-}
-
-/* add the info packet to the send list, if it was not
- * already linked in.
- */
-static void batadv_send_list_add(struct batadv_priv *bat_priv,
- struct batadv_vis_info *info)
-{
- if (list_empty(&info->send_list)) {
- kref_get(&info->refcount);
- list_add_tail(&info->send_list, &bat_priv->vis.send_list);
- }
-}
-
-/* delete the info packet from the send list, if it was
- * linked in.
- */
-static void batadv_send_list_del(struct batadv_vis_info *info)
-{
- if (!list_empty(&info->send_list)) {
- list_del_init(&info->send_list);
- kref_put(&info->refcount, batadv_free_info);
- }
-}
-
-/* tries to add one entry to the receive list. */
-static void batadv_recv_list_add(struct batadv_priv *bat_priv,
- struct list_head *recv_list, const char *mac)
-{
- struct batadv_vis_recvlist_node *entry;
-
- entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
- if (!entry)
- return;
-
- memcpy(entry->mac, mac, ETH_ALEN);
- spin_lock_bh(&bat_priv->vis.list_lock);
- list_add_tail(&entry->list, recv_list);
- spin_unlock_bh(&bat_priv->vis.list_lock);
-}
-
-/* returns 1 if this mac is in the recv_list */
-static int batadv_recv_list_is_in(struct batadv_priv *bat_priv,
- const struct list_head *recv_list,
- const char *mac)
-{
- const struct batadv_vis_recvlist_node *entry;
-
- spin_lock_bh(&bat_priv->vis.list_lock);
- list_for_each_entry(entry, recv_list, list) {
- if (batadv_compare_eth(entry->mac, mac)) {
- spin_unlock_bh(&bat_priv->vis.list_lock);
- return 1;
- }
- }
- spin_unlock_bh(&bat_priv->vis.list_lock);
- return 0;
-}
-
-/* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
- * broken.. ). vis hash must be locked outside. is_new is set when the packet
- * is newer than old entries in the hash.
- */
-static struct batadv_vis_info *
-batadv_add_packet(struct batadv_priv *bat_priv,
- struct batadv_vis_packet *vis_packet, int vis_info_len,
- int *is_new, int make_broadcast)
-{
- struct batadv_vis_info *info, *old_info;
- struct batadv_vis_packet *search_packet, *old_packet;
- struct batadv_vis_info search_elem;
- struct batadv_vis_packet *packet;
- struct sk_buff *tmp_skb;
- int hash_added;
- size_t len;
- size_t max_entries;
-
- *is_new = 0;
- /* sanity check */
- if (!bat_priv->vis.hash)
- return NULL;
-
- /* see if the packet is already in vis_hash */
- search_elem.skb_packet = dev_alloc_skb(sizeof(*search_packet));
- if (!search_elem.skb_packet)
- return NULL;
- len = sizeof(*search_packet);
- tmp_skb = search_elem.skb_packet;
- search_packet = (struct batadv_vis_packet *)skb_put(tmp_skb, len);
-
- memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
- old_info = batadv_vis_hash_find(bat_priv, &search_elem);
- kfree_skb(search_elem.skb_packet);
-
- if (old_info) {
- tmp_skb = old_info->skb_packet;
- old_packet = (struct batadv_vis_packet *)tmp_skb->data;
- if (!batadv_seq_after(ntohl(vis_packet->seqno),
- ntohl(old_packet->seqno))) {
- if (old_packet->seqno == vis_packet->seqno) {
- batadv_recv_list_add(bat_priv,
- &old_info->recv_list,
- vis_packet->sender_orig);
- return old_info;
- } else {
- /* newer packet is already in hash. */
- return NULL;
- }
- }
- /* remove old entry */
- batadv_hash_remove(bat_priv->vis.hash, batadv_vis_info_cmp,
- batadv_vis_info_choose, old_info);
- batadv_send_list_del(old_info);
- kref_put(&old_info->refcount, batadv_free_info);
- }
-
- info = kmalloc(sizeof(*info), GFP_ATOMIC);
- if (!info)
- return NULL;
-
- len = sizeof(*packet) + vis_info_len;
- info->skb_packet = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
- if (!info->skb_packet) {
- kfree(info);
- return NULL;
- }
- info->skb_packet->priority = TC_PRIO_CONTROL;
- skb_reserve(info->skb_packet, ETH_HLEN);
- packet = (struct batadv_vis_packet *)skb_put(info->skb_packet, len);
-
- kref_init(&info->refcount);
- INIT_LIST_HEAD(&info->send_list);
- INIT_LIST_HEAD(&info->recv_list);
- info->first_seen = jiffies;
- info->bat_priv = bat_priv;
- memcpy(packet, vis_packet, len);
-
- /* initialize and add new packet. */
- *is_new = 1;
-
- /* Make it a broadcast packet, if required */
- if (make_broadcast)
- memcpy(packet->target_orig, batadv_broadcast_addr, ETH_ALEN);
-
- /* repair if entries is longer than packet. */
- max_entries = vis_info_len / sizeof(struct batadv_vis_info_entry);
- if (packet->entries > max_entries)
- packet->entries = max_entries;
-
- batadv_recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
-
- /* try to add it */
- hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
- batadv_vis_info_choose, info,
- &info->hash_entry);
- if (hash_added != 0) {
- /* did not work (for some reason) */
- kref_put(&info->refcount, batadv_free_info);
- info = NULL;
- }
-
- return info;
-}
-
-/* handle the server sync packet, forward if needed. */
-void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
- struct batadv_vis_packet *vis_packet,
- int vis_info_len)
-{
- struct batadv_vis_info *info;
- int is_new, make_broadcast;
- int vis_server = atomic_read(&bat_priv->vis_mode);
-
- make_broadcast = (vis_server == BATADV_VIS_TYPE_SERVER_SYNC);
-
- spin_lock_bh(&bat_priv->vis.hash_lock);
- info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
- &is_new, make_broadcast);
- if (!info)
- goto end;
-
- /* only if we are server ourselves and packet is newer than the one in
- * hash.
- */
- if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && is_new)
- batadv_send_list_add(bat_priv, info);
-end:
- spin_unlock_bh(&bat_priv->vis.hash_lock);
-}
-
-/* handle an incoming client update packet and schedule forward if needed. */
-void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
- struct batadv_vis_packet *vis_packet,
- int vis_info_len)
-{
- struct batadv_vis_info *info;
- struct batadv_vis_packet *packet;
- int is_new;
- int vis_server = atomic_read(&bat_priv->vis_mode);
- int are_target = 0;
-
- /* clients shall not broadcast. */
- if (is_broadcast_ether_addr(vis_packet->target_orig))
- return;
-
- /* Are we the target for this VIS packet? */
- if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC &&
- batadv_is_my_mac(bat_priv, vis_packet->target_orig))
- are_target = 1;
-
- spin_lock_bh(&bat_priv->vis.hash_lock);
- info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
- &is_new, are_target);
-
- if (!info)
- goto end;
- /* note that outdated packets will be dropped at this point. */
-
- packet = (struct batadv_vis_packet *)info->skb_packet->data;
-
- /* send only if we're the target server or ... */
- if (are_target && is_new) {
- packet->vis_type = BATADV_VIS_TYPE_SERVER_SYNC; /* upgrade! */
- batadv_send_list_add(bat_priv, info);
-
- /* ... we're not the recipient (and thus need to forward). */
- } else if (!batadv_is_my_mac(bat_priv, packet->target_orig)) {
- batadv_send_list_add(bat_priv, info);
- }
-
-end:
- spin_unlock_bh(&bat_priv->vis.hash_lock);
-}
-
-/* Walk the originators and find the VIS server with the best tq. Set the packet
- * address to its address and return the best_tq.
- *
- * Must be called with the originator hash locked
- */
-static int batadv_find_best_vis_server(struct batadv_priv *bat_priv,
- struct batadv_vis_info *info)
-{
- struct batadv_hashtable *hash = bat_priv->orig_hash;
- struct batadv_neigh_node *router;
- struct hlist_head *head;
- struct batadv_orig_node *orig_node;
- struct batadv_vis_packet *packet;
- int best_tq = -1;
- uint32_t i;
-
- packet = (struct batadv_vis_packet *)info->skb_packet->data;
-
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
- router = batadv_orig_node_get_router(orig_node);
- if (!router)
- continue;
-
- if ((orig_node->flags & BATADV_VIS_SERVER) &&
- (router->tq_avg > best_tq)) {
- best_tq = router->tq_avg;
- memcpy(packet->target_orig, orig_node->orig,
- ETH_ALEN);
- }
- batadv_neigh_node_free_ref(router);
- }
- rcu_read_unlock();
- }
-
- return best_tq;
-}
-
-/* Return true if the vis packet is full. */
-static bool batadv_vis_packet_full(const struct batadv_vis_info *info)
-{
- const struct batadv_vis_packet *packet;
- size_t num;
-
- packet = (struct batadv_vis_packet *)info->skb_packet->data;
- num = BATADV_MAX_VIS_PACKET_SIZE / sizeof(struct batadv_vis_info_entry);
-
- if (num < packet->entries + 1)
- return true;
- return false;
-}
-
-/* generates a packet of own vis data,
- * returns 0 on success, -1 if no packet could be generated
- */
-static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
-{
- struct batadv_hashtable *hash = bat_priv->orig_hash;
- struct hlist_head *head;
- struct batadv_orig_node *orig_node;
- struct batadv_neigh_node *router;
- struct batadv_vis_info *info = bat_priv->vis.my_info;
- struct batadv_vis_packet *packet;
- struct batadv_vis_info_entry *entry;
- struct batadv_tt_common_entry *tt_common_entry;
- uint8_t *packet_pos;
- int best_tq = -1;
- uint32_t i;
-
- info->first_seen = jiffies;
- packet = (struct batadv_vis_packet *)info->skb_packet->data;
- packet->vis_type = atomic_read(&bat_priv->vis_mode);
-
- memcpy(packet->target_orig, batadv_broadcast_addr, ETH_ALEN);
- packet->header.ttl = BATADV_TTL;
- packet->seqno = htonl(ntohl(packet->seqno) + 1);
- packet->entries = 0;
- packet->reserved = 0;
- skb_trim(info->skb_packet, sizeof(*packet));
-
- if (packet->vis_type == BATADV_VIS_TYPE_CLIENT_UPDATE) {
- best_tq = batadv_find_best_vis_server(bat_priv, info);
-
- if (best_tq < 0)
- return best_tq;
- }
-
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
- router = batadv_orig_node_get_router(orig_node);
- if (!router)
- continue;
-
- if (!batadv_compare_eth(router->addr, orig_node->orig))
- goto next;
-
- if (router->if_incoming->if_status != BATADV_IF_ACTIVE)
- goto next;
-
- if (router->tq_avg < 1)
- goto next;
-
- /* fill one entry into buffer. */
- packet_pos = skb_put(info->skb_packet, sizeof(*entry));
- entry = (struct batadv_vis_info_entry *)packet_pos;
- memcpy(entry->src,
- router->if_incoming->net_dev->dev_addr,
- ETH_ALEN);
- memcpy(entry->dest, orig_node->orig, ETH_ALEN);
- entry->quality = router->tq_avg;
- packet->entries++;
-
-next:
- batadv_neigh_node_free_ref(router);
-
- if (batadv_vis_packet_full(info))
- goto unlock;
- }
- rcu_read_unlock();
- }
-
- hash = bat_priv->tt.local_hash;
-
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(tt_common_entry, head,
- hash_entry) {
- packet_pos = skb_put(info->skb_packet, sizeof(*entry));
- entry = (struct batadv_vis_info_entry *)packet_pos;
- memset(entry->src, 0, ETH_ALEN);
- memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN);
- entry->quality = 0; /* 0 means TT */
- packet->entries++;
-
- if (batadv_vis_packet_full(info))
- goto unlock;
- }
- rcu_read_unlock();
- }
-
- return 0;
-
-unlock:
- rcu_read_unlock();
- return 0;
-}
-
-/* free old vis packets. Must be called with this vis_hash_lock
- * held
- */
-static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
-{
- uint32_t i;
- struct batadv_hashtable *hash = bat_priv->vis.hash;
- struct hlist_node *node_tmp;
- struct hlist_head *head;
- struct batadv_vis_info *info;
-
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- hlist_for_each_entry_safe(info, node_tmp,
- head, hash_entry) {
- /* never purge own data. */
- if (info == bat_priv->vis.my_info)
- continue;
-
- if (batadv_has_timed_out(info->first_seen,
- BATADV_VIS_TIMEOUT)) {
- hlist_del(&info->hash_entry);
- batadv_send_list_del(info);
- kref_put(&info->refcount, batadv_free_info);
- }
- }
- }
-}
-
-static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv,
- struct batadv_vis_info *info)
-{
- struct batadv_hashtable *hash = bat_priv->orig_hash;
- struct hlist_head *head;
- struct batadv_orig_node *orig_node;
- struct batadv_vis_packet *packet;
- struct sk_buff *skb;
- uint32_t i, res;
-
-
- packet = (struct batadv_vis_packet *)info->skb_packet->data;
-
- /* send to all routers in range. */
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
- /* if it's a vis server and reachable, send it. */
- if (!(orig_node->flags & BATADV_VIS_SERVER))
- continue;
-
- /* don't send it if we already received the packet from
- * this node.
- */
- if (batadv_recv_list_is_in(bat_priv, &info->recv_list,
- orig_node->orig))
- continue;
-
- memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
- skb = skb_clone(info->skb_packet, GFP_ATOMIC);
- if (!skb)
- continue;
-
- res = batadv_send_skb_to_orig(skb, orig_node, NULL);
- if (res == NET_XMIT_DROP)
- kfree_skb(skb);
- }
- rcu_read_unlock();
- }
-}
-
-static void batadv_unicast_vis_packet(struct batadv_priv *bat_priv,
- struct batadv_vis_info *info)
-{
- struct batadv_orig_node *orig_node;
- struct sk_buff *skb;
- struct batadv_vis_packet *packet;
-
- packet = (struct batadv_vis_packet *)info->skb_packet->data;
-
- orig_node = batadv_orig_hash_find(bat_priv, packet->target_orig);
- if (!orig_node)
- goto out;
-
- skb = skb_clone(info->skb_packet, GFP_ATOMIC);
- if (!skb)
- goto out;
-
- if (batadv_send_skb_to_orig(skb, orig_node, NULL) == NET_XMIT_DROP)
- kfree_skb(skb);
-
-out:
- if (orig_node)
- batadv_orig_node_free_ref(orig_node);
-}
-
-/* only send one vis packet. called from batadv_send_vis_packets() */
-static void batadv_send_vis_packet(struct batadv_priv *bat_priv,
- struct batadv_vis_info *info)
-{
- struct batadv_hard_iface *primary_if;
- struct batadv_vis_packet *packet;
-
- primary_if = batadv_primary_if_get_selected(bat_priv);
- if (!primary_if)
- goto out;
-
- packet = (struct batadv_vis_packet *)info->skb_packet->data;
- if (packet->header.ttl < 2) {
- pr_debug("Error - can't send vis packet: ttl exceeded\n");
- goto out;
- }
-
- memcpy(packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
- packet->header.ttl--;
-
- if (is_broadcast_ether_addr(packet->target_orig))
- batadv_broadcast_vis_packet(bat_priv, info);
- else
- batadv_unicast_vis_packet(bat_priv, info);
- packet->header.ttl++; /* restore TTL */
-
-out:
- if (primary_if)
- batadv_hardif_free_ref(primary_if);
-}
-
-/* called from timer; send (and maybe generate) vis packet. */
-static void batadv_send_vis_packets(struct work_struct *work)
-{
- struct delayed_work *delayed_work;
- struct batadv_priv *bat_priv;
- struct batadv_priv_vis *priv_vis;
- struct batadv_vis_info *info;
-
- delayed_work = container_of(work, struct delayed_work, work);
- priv_vis = container_of(delayed_work, struct batadv_priv_vis, work);
- bat_priv = container_of(priv_vis, struct batadv_priv, vis);
- spin_lock_bh(&bat_priv->vis.hash_lock);
- batadv_purge_vis_packets(bat_priv);
-
- if (batadv_generate_vis_packet(bat_priv) == 0) {
- /* schedule if generation was successful */
- batadv_send_list_add(bat_priv, bat_priv->vis.my_info);
- }
-
- while (!list_empty(&bat_priv->vis.send_list)) {
- info = list_first_entry(&bat_priv->vis.send_list,
- typeof(*info), send_list);
-
- kref_get(&info->refcount);
- spin_unlock_bh(&bat_priv->vis.hash_lock);
-
- batadv_send_vis_packet(bat_priv, info);
-
- spin_lock_bh(&bat_priv->vis.hash_lock);
- batadv_send_list_del(info);
- kref_put(&info->refcount, batadv_free_info);
- }
- spin_unlock_bh(&bat_priv->vis.hash_lock);
-
- queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work,
- msecs_to_jiffies(BATADV_VIS_INTERVAL));
-}
-
-/* init the vis server. this may only be called when if_list is already
- * initialized (e.g. bat0 is initialized, interfaces have been added)
- */
-int batadv_vis_init(struct batadv_priv *bat_priv)
-{
- struct batadv_vis_packet *packet;
- int hash_added;
- unsigned int len;
- unsigned long first_seen;
- struct sk_buff *tmp_skb;
-
- if (bat_priv->vis.hash)
- return 0;
-
- spin_lock_bh(&bat_priv->vis.hash_lock);
-
- bat_priv->vis.hash = batadv_hash_new(256);
- if (!bat_priv->vis.hash) {
- pr_err("Can't initialize vis_hash\n");
- goto err;
- }
-
- batadv_hash_set_lock_class(bat_priv->vis.hash,
- &batadv_vis_hash_lock_class_key);
-
- bat_priv->vis.my_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
- if (!bat_priv->vis.my_info)
- goto err;
-
- len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN;
- bat_priv->vis.my_info->skb_packet = netdev_alloc_skb_ip_align(NULL,
- len);
- if (!bat_priv->vis.my_info->skb_packet)
- goto free_info;
-
- bat_priv->vis.my_info->skb_packet->priority = TC_PRIO_CONTROL;
- skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN);
- tmp_skb = bat_priv->vis.my_info->skb_packet;
- packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet));
-
- /* prefill the vis info */
- first_seen = jiffies - msecs_to_jiffies(BATADV_VIS_INTERVAL);
- bat_priv->vis.my_info->first_seen = first_seen;
- INIT_LIST_HEAD(&bat_priv->vis.my_info->recv_list);
- INIT_LIST_HEAD(&bat_priv->vis.my_info->send_list);
- kref_init(&bat_priv->vis.my_info->refcount);
- bat_priv->vis.my_info->bat_priv = bat_priv;
- packet->header.version = BATADV_COMPAT_VERSION;
- packet->header.packet_type = BATADV_VIS;
- packet->header.ttl = BATADV_TTL;
- packet->seqno = 0;
- packet->reserved = 0;
- packet->entries = 0;
-
- INIT_LIST_HEAD(&bat_priv->vis.send_list);
-
- hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
- batadv_vis_info_choose,
- bat_priv->vis.my_info,
- &bat_priv->vis.my_info->hash_entry);
- if (hash_added != 0) {
- pr_err("Can't add own vis packet into hash\n");
- /* not in hash, need to remove it manually. */
- kref_put(&bat_priv->vis.my_info->refcount, batadv_free_info);
- goto err;
- }
-
- spin_unlock_bh(&bat_priv->vis.hash_lock);
-
- INIT_DELAYED_WORK(&bat_priv->vis.work, batadv_send_vis_packets);
- queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work,
- msecs_to_jiffies(BATADV_VIS_INTERVAL));
-
- return 0;
-
-free_info:
- kfree(bat_priv->vis.my_info);
- bat_priv->vis.my_info = NULL;
-err:
- spin_unlock_bh(&bat_priv->vis.hash_lock);
- batadv_vis_quit(bat_priv);
- return -ENOMEM;
-}
-
-/* Decrease the reference count on a hash item info */
-static void batadv_free_info_ref(struct hlist_node *node, void *arg)
-{
- struct batadv_vis_info *info;
-
- info = container_of(node, struct batadv_vis_info, hash_entry);
- batadv_send_list_del(info);
- kref_put(&info->refcount, batadv_free_info);
-}
-
-/* shutdown vis-server */
-void batadv_vis_quit(struct batadv_priv *bat_priv)
-{
- if (!bat_priv->vis.hash)
- return;
-
- cancel_delayed_work_sync(&bat_priv->vis.work);
-
- spin_lock_bh(&bat_priv->vis.hash_lock);
- /* properly remove, kill timers ... */
- batadv_hash_delete(bat_priv->vis.hash, batadv_free_info_ref, NULL);
- bat_priv->vis.hash = NULL;
- bat_priv->vis.my_info = NULL;
- spin_unlock_bh(&bat_priv->vis.hash_lock);
-}
diff --git a/net/batman-adv/vis.h b/net/batman-adv/vis.h
deleted file mode 100644
index ad92b0e3c230..000000000000
--- a/net/batman-adv/vis.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (C) 2008-2013 B.A.T.M.A.N. contributors:
- *
- * Simon Wunderlich, Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- */
-
-#ifndef _NET_BATMAN_ADV_VIS_H_
-#define _NET_BATMAN_ADV_VIS_H_
-
-/* timeout of vis packets in milliseconds */
-#define BATADV_VIS_TIMEOUT 200000
-
-int batadv_vis_seq_print_text(struct seq_file *seq, void *offset);
-void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
- struct batadv_vis_packet *vis_packet,
- int vis_info_len);
-void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
- struct batadv_vis_packet *vis_packet,
- int vis_info_len);
-int batadv_vis_init(struct batadv_priv *bat_priv);
-void batadv_vis_quit(struct batadv_priv *bat_priv);
-
-#endif /* _NET_BATMAN_ADV_VIS_H_ */
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index dea6a287daca..6a791e73e39d 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -11,3 +11,5 @@ obj-$(CONFIG_BT_HIDP) += hidp/
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
a2mp.o amp.o
+
+subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 17f33a62f6db..efcd108822c4 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -15,8 +15,9 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
-#include <net/bluetooth/a2mp.h>
-#include <net/bluetooth/amp.h>
+
+#include "a2mp.h"
+#include "amp.h"
/* Global AMP Manager list */
LIST_HEAD(amp_mgr_list);
@@ -75,33 +76,26 @@ u8 __next_ident(struct amp_mgr *mgr)
return mgr->ident;
}
-static inline void __a2mp_cl_bredr(struct a2mp_cl *cl)
-{
- cl->id = 0;
- cl->type = 0;
- cl->status = 1;
-}
-
/* hci_dev_list shall be locked */
-static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl, u8 num_ctrl)
+static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl)
{
- int i = 0;
struct hci_dev *hdev;
+ int i = 1;
- __a2mp_cl_bredr(cl);
+ cl[0].id = AMP_ID_BREDR;
+ cl[0].type = AMP_TYPE_BREDR;
+ cl[0].status = AMP_STATUS_BLUETOOTH_ONLY;
list_for_each_entry(hdev, &hci_dev_list, list) {
- /* Iterate through AMP controllers */
- if (hdev->id == HCI_BREDR_ID)
- continue;
-
- /* Starting from second entry */
- if (++i >= num_ctrl)
- return;
-
- cl[i].id = hdev->id;
- cl[i].type = hdev->amp_type;
- cl[i].status = hdev->amp_status;
+ if (hdev->dev_type == HCI_AMP) {
+ cl[i].id = hdev->id;
+ cl[i].type = hdev->amp_type;
+ if (test_bit(HCI_UP, &hdev->flags))
+ cl[i].status = hdev->amp_status;
+ else
+ cl[i].status = AMP_STATUS_POWERED_DOWN;
+ i++;
+ }
}
}
@@ -129,6 +123,7 @@ static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
struct a2mp_discov_rsp *rsp;
u16 ext_feat;
u8 num_ctrl;
+ struct hci_dev *hdev;
if (len < sizeof(*req))
return -EINVAL;
@@ -152,7 +147,14 @@ static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
read_lock(&hci_dev_list_lock);
- num_ctrl = __hci_num_ctrl();
+ /* at minimum the BR/EDR needs to be listed */
+ num_ctrl = 1;
+
+ list_for_each_entry(hdev, &hci_dev_list, list) {
+ if (hdev->dev_type == HCI_AMP)
+ num_ctrl++;
+ }
+
len = num_ctrl * sizeof(struct a2mp_cl) + sizeof(*rsp);
rsp = kmalloc(len, GFP_ATOMIC);
if (!rsp) {
@@ -163,7 +165,7 @@ static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
rsp->mtu = __constant_cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
rsp->ext_feat = 0;
- __a2mp_add_cl(mgr, rsp->cl, num_ctrl);
+ __a2mp_add_cl(mgr, rsp->cl);
read_unlock(&hci_dev_list_lock);
@@ -208,7 +210,7 @@ static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
BT_DBG("Remote AMP id %d type %d status %d", cl->id, cl->type,
cl->status);
- if (cl->id != HCI_BREDR_ID && cl->type == HCI_AMP) {
+ if (cl->id != AMP_ID_BREDR && cl->type != AMP_TYPE_BREDR) {
struct a2mp_info_req req;
found = true;
@@ -344,7 +346,7 @@ static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
tmp = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
hdev = hci_dev_get(req->id);
- if (!hdev || hdev->amp_type == HCI_BREDR || tmp) {
+ if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) {
struct a2mp_amp_assoc_rsp rsp;
rsp.id = req->id;
@@ -451,7 +453,7 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
rsp.remote_id = req->local_id;
hdev = hci_dev_get(req->remote_id);
- if (!hdev || hdev->amp_type != HCI_AMP) {
+ if (!hdev || hdev->amp_type == AMP_TYPE_BREDR) {
rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
goto send_rsp;
}
@@ -535,7 +537,8 @@ static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
goto send_rsp;
}
- hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, mgr->l2cap_conn->dst);
+ hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
+ &mgr->l2cap_conn->hcon->dst);
if (!hcon) {
BT_ERR("No phys link exist");
rsp.status = A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS;
@@ -669,7 +672,8 @@ static void a2mp_chan_close_cb(struct l2cap_chan *chan)
l2cap_chan_put(chan);
}
-static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state)
+static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state,
+ int err)
{
struct amp_mgr *mgr = chan->data;
@@ -706,6 +710,9 @@ static struct l2cap_ops a2mp_chan_ops = {
.teardown = l2cap_chan_no_teardown,
.ready = l2cap_chan_no_ready,
.defer = l2cap_chan_no_defer,
+ .resume = l2cap_chan_no_resume,
+ .set_shutdown = l2cap_chan_no_set_shutdown,
+ .get_sndtimeo = l2cap_chan_no_get_sndtimeo,
};
static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked)
@@ -829,6 +836,9 @@ struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
{
struct amp_mgr *mgr;
+ if (conn->hcon->type != ACL_LINK)
+ return NULL;
+
mgr = amp_mgr_create(conn, false);
if (!mgr) {
BT_ERR("Could not create AMP manager");
@@ -871,7 +881,7 @@ void a2mp_send_getinfo_rsp(struct hci_dev *hdev)
rsp.id = hdev->id;
rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
- if (hdev->amp_type != HCI_BREDR) {
+ if (hdev->amp_type != AMP_TYPE_BREDR) {
rsp.status = 0;
rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
diff --git a/include/net/bluetooth/a2mp.h b/net/bluetooth/a2mp.h
index 487b54c1308f..487b54c1308f 100644
--- a/include/net/bluetooth/a2mp.h
+++ b/net/bluetooth/a2mp.h
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 9096137c889c..f6a1671ea2ff 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -25,12 +25,13 @@
/* Bluetooth address family and sockets. */
#include <linux/module.h>
+#include <linux/debugfs.h>
#include <asm/ioctls.h>
#include <net/bluetooth/bluetooth.h>
#include <linux/proc_fs.h>
-#define VERSION "2.16"
+#define VERSION "2.17"
/* Bluetooth sockets */
#define BT_MAX_PROTO 8
@@ -221,12 +222,12 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
- msg->msg_namelen = 0;
-
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb) {
- if (sk->sk_shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN) {
+ msg->msg_namelen = 0;
return 0;
+ }
return err;
}
@@ -238,9 +239,16 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
skb_reset_transport_header(skb);
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
- if (err == 0)
+ if (err == 0) {
sock_recv_ts_and_drops(msg, sk, skb);
+ if (bt_sk(sk)->skb_msg_name)
+ bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
+ &msg->msg_namelen);
+ else
+ msg->msg_namelen = 0;
+ }
+
skb_free_datagram(sk, skb);
return err ? : copied;
@@ -490,6 +498,7 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
}
EXPORT_SYMBOL(bt_sock_ioctl);
+/* This function expects the sk lock to be held when called */
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
{
DECLARE_WAITQUEUE(wait, current);
@@ -525,6 +534,46 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
}
EXPORT_SYMBOL(bt_sock_wait_state);
+/* This function expects the sk lock to be held when called */
+int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long timeo;
+ int err = 0;
+
+ BT_DBG("sk %p", sk);
+
+ timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) {
+ if (!timeo) {
+ err = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
+ break;
+ }
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ err = sock_error(sk);
+ if (err)
+ break;
+ }
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+
+ return err;
+}
+EXPORT_SYMBOL(bt_sock_wait_ready);
+
#ifdef CONFIG_PROC_FS
struct bt_seq_state {
struct bt_sock_list *l;
@@ -563,7 +612,7 @@ static int bt_seq_show(struct seq_file *seq, void *v)
struct bt_sock_list *l = s->l;
if (v == SEQ_START_TOKEN) {
- seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Src Dst Parent");
+ seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Parent");
if (l->custom_seq_show) {
seq_putc(seq, ' ');
@@ -576,15 +625,13 @@ static int bt_seq_show(struct seq_file *seq, void *v)
struct bt_sock *bt = bt_sk(sk);
seq_printf(seq,
- "%pK %-6d %-6u %-6u %-6u %-6lu %pMR %pMR %-6lu",
+ "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
sk,
atomic_read(&sk->sk_refcnt),
sk_rmem_alloc_get(sk),
sk_wmem_alloc_get(sk),
from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
sock_i_ino(sk),
- &bt->src,
- &bt->dst,
bt->parent? sock_i_ino(bt->parent): 0LU);
if (l->custom_seq_show) {
@@ -662,12 +709,17 @@ static struct net_proto_family bt_sock_family_ops = {
.create = bt_sock_create,
};
+struct dentry *bt_debugfs;
+EXPORT_SYMBOL_GPL(bt_debugfs);
+
static int __init bt_init(void)
{
int err;
BT_INFO("Core ver %s", VERSION);
+ bt_debugfs = debugfs_create_dir("bluetooth", NULL);
+
err = bt_sysfs_init();
if (err < 0)
return err;
@@ -708,7 +760,6 @@ error:
static void __exit bt_exit(void)
{
-
sco_exit();
l2cap_exit();
@@ -718,6 +769,8 @@ static void __exit bt_exit(void)
sock_unregister(PF_BLUETOOTH);
bt_sysfs_cleanup();
+
+ debugfs_remove_recursive(bt_debugfs);
}
subsys_initcall(bt_init);
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
index d459ed43c779..bb39509b3f06 100644
--- a/net/bluetooth/amp.c
+++ b/net/bluetooth/amp.c
@@ -14,10 +14,11 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci.h>
#include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/a2mp.h>
-#include <net/bluetooth/amp.h>
#include <crypto/hash.h>
+#include "a2mp.h"
+#include "amp.h"
+
/* Remote AMP Controllers interface */
void amp_ctrl_get(struct amp_ctrl *ctrl)
{
@@ -110,7 +111,7 @@ static u8 __next_handle(struct amp_mgr *mgr)
struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
u8 remote_id, bool out)
{
- bdaddr_t *dst = mgr->l2cap_conn->dst;
+ bdaddr_t *dst = &mgr->l2cap_conn->hcon->dst;
struct hci_conn *hcon;
hcon = hci_conn_add(hdev, AMP_LINK, dst);
@@ -409,7 +410,8 @@ void amp_create_logical_link(struct l2cap_chan *chan)
struct hci_cp_create_accept_logical_link cp;
struct hci_dev *hdev;
- BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon, chan->conn->dst);
+ BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon,
+ &chan->conn->hcon->dst);
if (!hs_hcon)
return;
diff --git a/include/net/bluetooth/amp.h b/net/bluetooth/amp.h
index 7ea3db77ba89..7ea3db77ba89 100644
--- a/include/net/bluetooth/amp.h
+++ b/net/bluetooth/amp.h
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index e430b1abcd2f..a841d3e776c5 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -32,6 +32,7 @@
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/hci_core.h>
#include "bnep.h"
@@ -510,20 +511,13 @@ static int bnep_session(void *arg)
static struct device *bnep_get_device(struct bnep_session *session)
{
- bdaddr_t *src = &bt_sk(session->sock->sk)->src;
- bdaddr_t *dst = &bt_sk(session->sock->sk)->dst;
- struct hci_dev *hdev;
struct hci_conn *conn;
- hdev = hci_get_route(dst, src);
- if (!hdev)
+ conn = l2cap_pi(session->sock->sk)->chan->conn->hcon;
+ if (!conn)
return NULL;
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
-
- hci_dev_put(hdev);
-
- return conn ? &conn->dev : NULL;
+ return &conn->dev;
}
static struct device_type bnep_type = {
@@ -539,8 +533,8 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
BT_DBG("");
- baswap((void *) dst, &bt_sk(sock->sk)->dst);
- baswap((void *) src, &bt_sk(sock->sk)->src);
+ baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst);
+ baswap((void *) src, &l2cap_pi(sock->sk)->chan->src);
/* session struct allocated as private part of net_device */
dev = alloc_netdev(sizeof(struct bnep_session),
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index e0a6ebf2baa6..67fe5e84e68f 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -340,20 +340,20 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
down_write(&cmtp_session_sem);
- s = __cmtp_get_session(&bt_sk(sock->sk)->dst);
+ s = __cmtp_get_session(&l2cap_pi(sock->sk)->chan->dst);
if (s && s->state == BT_CONNECTED) {
err = -EEXIST;
goto failed;
}
- bacpy(&session->bdaddr, &bt_sk(sock->sk)->dst);
+ bacpy(&session->bdaddr, &l2cap_pi(sock->sk)->chan->dst);
session->mtu = min_t(uint, l2cap_pi(sock->sk)->chan->omtu,
l2cap_pi(sock->sk)->chan->imtu);
BT_DBG("mtu %d", session->mtu);
- sprintf(session->name, "%pMR", &bt_sk(sock->sk)->dst);
+ sprintf(session->name, "%pMR", &session->bdaddr);
session->sock = sock;
session->state = BT_CONFIG;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index f0817121ec5e..ba5366c320da 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -28,8 +28,9 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/a2mp.h>
-#include <net/bluetooth/smp.h>
+
+#include "smp.h"
+#include "a2mp.h"
struct sco_param {
u16 pkt_type;
@@ -49,30 +50,6 @@ static const struct sco_param sco_param_wideband[] = {
{ EDR_ESCO_MASK | ESCO_EV3, 0x0008 }, /* T1 */
};
-static void hci_le_create_connection(struct hci_conn *conn)
-{
- struct hci_dev *hdev = conn->hdev;
- struct hci_cp_le_create_conn cp;
-
- conn->state = BT_CONNECT;
- conn->out = true;
- conn->link_mode |= HCI_LM_MASTER;
- conn->sec_level = BT_SECURITY_LOW;
-
- memset(&cp, 0, sizeof(cp));
- cp.scan_interval = __constant_cpu_to_le16(0x0060);
- cp.scan_window = __constant_cpu_to_le16(0x0030);
- bacpy(&cp.peer_addr, &conn->dst);
- cp.peer_addr_type = conn->dst_type;
- cp.conn_interval_min = __constant_cpu_to_le16(0x0028);
- cp.conn_interval_max = __constant_cpu_to_le16(0x0038);
- cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
- cp.min_ce_len = __constant_cpu_to_le16(0x0000);
- cp.max_ce_len = __constant_cpu_to_le16(0x0000);
-
- hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
-}
-
static void hci_le_create_connection_cancel(struct hci_conn *conn)
{
hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
@@ -340,8 +317,10 @@ static void hci_conn_timeout(struct work_struct *work)
}
/* Enter sniff mode */
-static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
+static void hci_conn_idle(struct work_struct *work)
{
+ struct hci_conn *conn = container_of(work, struct hci_conn,
+ idle_work.work);
struct hci_dev *hdev = conn->hdev;
BT_DBG("hcon %p mode %d", conn, conn->mode);
@@ -375,21 +354,12 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
}
}
-static void hci_conn_idle(unsigned long arg)
-{
- struct hci_conn *conn = (void *) arg;
-
- BT_DBG("hcon %p mode %d", conn, conn->mode);
-
- hci_conn_enter_sniff_mode(conn);
-}
-
-static void hci_conn_auto_accept(unsigned long arg)
+static void hci_conn_auto_accept(struct work_struct *work)
{
- struct hci_conn *conn = (void *) arg;
- struct hci_dev *hdev = conn->hdev;
+ struct hci_conn *conn = container_of(work, struct hci_conn,
+ auto_accept_work.work);
- hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
+ hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
&conn->dst);
}
@@ -404,6 +374,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
return NULL;
bacpy(&conn->dst, dst);
+ bacpy(&conn->src, &hdev->bdaddr);
conn->hdev = hdev;
conn->type = type;
conn->mode = HCI_CM_ACTIVE;
@@ -437,9 +408,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
INIT_LIST_HEAD(&conn->chan_list);
INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
- setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
- setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
- (unsigned long) conn);
+ INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
+ INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
atomic_set(&conn->refcnt, 0);
@@ -460,11 +430,9 @@ int hci_conn_del(struct hci_conn *conn)
BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
- del_timer(&conn->idle_timer);
-
cancel_delayed_work_sync(&conn->disc_work);
-
- del_timer(&conn->auto_accept_timer);
+ cancel_delayed_work_sync(&conn->auto_accept_work);
+ cancel_delayed_work_sync(&conn->idle_work);
if (conn->type == ACL_LINK) {
struct hci_conn *sco = conn->link;
@@ -518,6 +486,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
list_for_each_entry(d, &hci_dev_list, list) {
if (!test_bit(HCI_UP, &d->flags) ||
test_bit(HCI_RAW, &d->flags) ||
+ test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
d->dev_type != HCI_BREDR)
continue;
@@ -545,34 +514,124 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
}
EXPORT_SYMBOL(hci_get_route);
+static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
+{
+ struct hci_conn *conn;
+
+ if (status == 0)
+ return;
+
+ BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
+ status);
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+ if (!conn)
+ goto done;
+
+ conn->state = BT_CLOSED;
+
+ mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
+ status);
+
+ hci_proto_connect_cfm(conn, status);
+
+ hci_conn_del(conn);
+
+done:
+ hci_dev_unlock(hdev);
+}
+
+static int hci_create_le_conn(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+ struct hci_cp_le_create_conn cp;
+ struct hci_request req;
+ int err;
+
+ hci_req_init(&req, hdev);
+
+ memset(&cp, 0, sizeof(cp));
+ cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
+ cp.scan_window = cpu_to_le16(hdev->le_scan_window);
+ bacpy(&cp.peer_addr, &conn->dst);
+ cp.peer_addr_type = conn->dst_type;
+ cp.own_address_type = conn->src_type;
+ cp.conn_interval_min = cpu_to_le16(hdev->le_conn_min_interval);
+ cp.conn_interval_max = cpu_to_le16(hdev->le_conn_max_interval);
+ cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
+ cp.min_ce_len = __constant_cpu_to_le16(0x0000);
+ cp.max_ce_len = __constant_cpu_to_le16(0x0000);
+
+ hci_req_add(&req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
+
+ err = hci_req_run(&req, create_le_conn_complete);
+ if (err) {
+ hci_conn_del(conn);
+ return err;
+ }
+
+ return 0;
+}
+
static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level, u8 auth_type)
{
- struct hci_conn *le;
+ struct hci_conn *conn;
+ int err;
- if (test_bit(HCI_LE_PERIPHERAL, &hdev->flags))
+ if (test_bit(HCI_ADVERTISING, &hdev->flags))
return ERR_PTR(-ENOTSUPP);
- le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
- if (!le) {
- le = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
- if (le)
- return ERR_PTR(-EBUSY);
+ /* Some devices send ATT messages as soon as the physical link is
+ * established. To be able to handle these ATT messages, the user-
+ * space first establishes the connection and then starts the pairing
+ * process.
+ *
+ * So if a hci_conn object already exists for the following connection
+ * attempt, we simply update pending_sec_level and auth_type fields
+ * and return the object found.
+ */
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+ if (conn) {
+ conn->pending_sec_level = sec_level;
+ conn->auth_type = auth_type;
+ goto done;
+ }
- le = hci_conn_add(hdev, LE_LINK, dst);
- if (!le)
- return ERR_PTR(-ENOMEM);
+ /* Since the controller supports only one LE connection attempt at a
+ * time, we return -EBUSY if there is any connection attempt running.
+ */
+ conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+ if (conn)
+ return ERR_PTR(-EBUSY);
- le->dst_type = bdaddr_to_le(dst_type);
- hci_le_create_connection(le);
- }
+ conn = hci_conn_add(hdev, LE_LINK, dst);
+ if (!conn)
+ return ERR_PTR(-ENOMEM);
- le->pending_sec_level = sec_level;
- le->auth_type = auth_type;
+ if (dst_type == BDADDR_LE_PUBLIC)
+ conn->dst_type = ADDR_LE_DEV_PUBLIC;
+ else
+ conn->dst_type = ADDR_LE_DEV_RANDOM;
- hci_conn_hold(le);
+ conn->src_type = hdev->own_addr_type;
- return le;
+ conn->state = BT_CONNECT;
+ conn->out = true;
+ conn->link_mode |= HCI_LM_MASTER;
+ conn->sec_level = BT_SECURITY_LOW;
+ conn->pending_sec_level = sec_level;
+ conn->auth_type = auth_type;
+
+ err = hci_create_le_conn(conn);
+ if (err)
+ return ERR_PTR(err);
+
+done:
+ hci_conn_hold(conn);
+ return conn;
}
static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
@@ -580,6 +639,9 @@ static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
{
struct hci_conn *acl;
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ return ERR_PTR(-ENOTSUPP);
+
acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
if (!acl) {
acl = hci_conn_add(hdev, ACL_LINK, dst);
@@ -846,8 +908,8 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
timer:
if (hdev->idle_timeout > 0)
- mod_timer(&conn->idle_timer,
- jiffies + msecs_to_jiffies(hdev->idle_timeout));
+ queue_delayed_work(hdev->workqueue, &conn->idle_work,
+ msecs_to_jiffies(hdev->idle_timeout));
}
/* Drop all connection on the device */
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index fb7356fcfe51..03e83558a411 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -27,8 +27,9 @@
#include <linux/export.h>
#include <linux/idr.h>
-
#include <linux/rfkill.h>
+#include <linux/debugfs.h>
+#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -55,6 +56,586 @@ static void hci_notify(struct hci_dev *hdev, int event)
hci_sock_dev_event(hdev, event);
}
+/* ---- HCI debugfs entries ---- */
+
+static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ char buf[3];
+
+ buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ struct sk_buff *skb;
+ char buf[32];
+ size_t buf_size = min(count, (sizeof(buf)-1));
+ bool enable;
+ int err;
+
+ if (!test_bit(HCI_UP, &hdev->flags))
+ return -ENETDOWN;
+
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+ if (strtobool(buf, &enable))
+ return -EINVAL;
+
+ if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
+ return -EALREADY;
+
+ hci_req_lock(hdev);
+ if (enable)
+ skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
+ HCI_CMD_TIMEOUT);
+ else
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
+ HCI_CMD_TIMEOUT);
+ hci_req_unlock(hdev);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ err = -bt_to_errno(skb->data[0]);
+ kfree_skb(skb);
+
+ if (err < 0)
+ return err;
+
+ change_bit(HCI_DUT_MODE, &hdev->dev_flags);
+
+ return count;
+}
+
+static const struct file_operations dut_mode_fops = {
+ .open = simple_open,
+ .read = dut_mode_read,
+ .write = dut_mode_write,
+ .llseek = default_llseek,
+};
+
+static int features_show(struct seq_file *f, void *ptr)
+{
+ struct hci_dev *hdev = f->private;
+ u8 p;
+
+ hci_dev_lock(hdev);
+ for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
+ seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
+ "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
+ hdev->features[p][0], hdev->features[p][1],
+ hdev->features[p][2], hdev->features[p][3],
+ hdev->features[p][4], hdev->features[p][5],
+ hdev->features[p][6], hdev->features[p][7]);
+ }
+ if (lmp_le_capable(hdev))
+ seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
+ "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
+ hdev->le_features[0], hdev->le_features[1],
+ hdev->le_features[2], hdev->le_features[3],
+ hdev->le_features[4], hdev->le_features[5],
+ hdev->le_features[6], hdev->le_features[7]);
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int features_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, features_show, inode->i_private);
+}
+
+static const struct file_operations features_fops = {
+ .open = features_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int blacklist_show(struct seq_file *f, void *p)
+{
+ struct hci_dev *hdev = f->private;
+ struct bdaddr_list *b;
+
+ hci_dev_lock(hdev);
+ list_for_each_entry(b, &hdev->blacklist, list)
+ seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int blacklist_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, blacklist_show, inode->i_private);
+}
+
+static const struct file_operations blacklist_fops = {
+ .open = blacklist_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int uuids_show(struct seq_file *f, void *p)
+{
+ struct hci_dev *hdev = f->private;
+ struct bt_uuid *uuid;
+
+ hci_dev_lock(hdev);
+ list_for_each_entry(uuid, &hdev->uuids, list) {
+ u8 i, val[16];
+
+ /* The Bluetooth UUID values are stored in big endian,
+ * but with reversed byte order. So convert them into
+ * the right order for the %pUb modifier.
+ */
+ for (i = 0; i < 16; i++)
+ val[i] = uuid->uuid[15 - i];
+
+ seq_printf(f, "%pUb\n", val);
+ }
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int uuids_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, uuids_show, inode->i_private);
+}
+
+static const struct file_operations uuids_fops = {
+ .open = uuids_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int inquiry_cache_show(struct seq_file *f, void *p)
+{
+ struct hci_dev *hdev = f->private;
+ struct discovery_state *cache = &hdev->discovery;
+ struct inquiry_entry *e;
+
+ hci_dev_lock(hdev);
+
+ list_for_each_entry(e, &cache->all, all) {
+ struct inquiry_data *data = &e->data;
+ seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
+ &data->bdaddr,
+ data->pscan_rep_mode, data->pscan_period_mode,
+ data->pscan_mode, data->dev_class[2],
+ data->dev_class[1], data->dev_class[0],
+ __le16_to_cpu(data->clock_offset),
+ data->rssi, data->ssp_mode, e->timestamp);
+ }
+
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int inquiry_cache_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, inquiry_cache_show, inode->i_private);
+}
+
+static const struct file_operations inquiry_cache_fops = {
+ .open = inquiry_cache_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int link_keys_show(struct seq_file *f, void *ptr)
+{
+ struct hci_dev *hdev = f->private;
+ struct list_head *p, *n;
+
+ hci_dev_lock(hdev);
+ list_for_each_safe(p, n, &hdev->link_keys) {
+ struct link_key *key = list_entry(p, struct link_key, list);
+ seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
+ HCI_LINK_KEY_SIZE, key->val, key->pin_len);
+ }
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int link_keys_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, link_keys_show, inode->i_private);
+}
+
+static const struct file_operations link_keys_fops = {
+ .open = link_keys_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ char buf[3];
+
+ buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static const struct file_operations use_debug_keys_fops = {
+ .open = simple_open,
+ .read = use_debug_keys_read,
+ .llseek = default_llseek,
+};
+
+static int dev_class_show(struct seq_file *f, void *ptr)
+{
+ struct hci_dev *hdev = f->private;
+
+ hci_dev_lock(hdev);
+ seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
+ hdev->dev_class[1], hdev->dev_class[0]);
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int dev_class_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dev_class_show, inode->i_private);
+}
+
+static const struct file_operations dev_class_fops = {
+ .open = dev_class_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int voice_setting_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->voice_setting;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
+ NULL, "0x%4.4llx\n");
+
+static int auto_accept_delay_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ hdev->auto_accept_delay = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int auto_accept_delay_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->auto_accept_delay;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
+ auto_accept_delay_set, "%llu\n");
+
+static int ssp_debug_mode_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+ struct sk_buff *skb;
+ __u8 mode;
+ int err;
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ if (!test_bit(HCI_UP, &hdev->flags))
+ return -ENETDOWN;
+
+ hci_req_lock(hdev);
+ mode = val;
+ skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
+ &mode, HCI_CMD_TIMEOUT);
+ hci_req_unlock(hdev);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ err = -bt_to_errno(skb->data[0]);
+ kfree_skb(skb);
+
+ if (err < 0)
+ return err;
+
+ hci_dev_lock(hdev);
+ hdev->ssp_debug_mode = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int ssp_debug_mode_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->ssp_debug_mode;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
+ ssp_debug_mode_set, "%llu\n");
+
+static int idle_timeout_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val != 0 && (val < 500 || val > 3600000))
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->idle_timeout = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int idle_timeout_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->idle_timeout;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
+ idle_timeout_set, "%llu\n");
+
+static int sniff_min_interval_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->sniff_min_interval = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int sniff_min_interval_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->sniff_min_interval;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
+ sniff_min_interval_set, "%llu\n");
+
+static int sniff_max_interval_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->sniff_max_interval = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int sniff_max_interval_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->sniff_max_interval;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
+ sniff_max_interval_set, "%llu\n");
+
+static int static_address_show(struct seq_file *f, void *p)
+{
+ struct hci_dev *hdev = f->private;
+
+ hci_dev_lock(hdev);
+ seq_printf(f, "%pMR\n", &hdev->static_addr);
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int static_address_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, static_address_show, inode->i_private);
+}
+
+static const struct file_operations static_address_fops = {
+ .open = static_address_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int own_address_type_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->own_addr_type = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int own_address_type_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->own_addr_type;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
+ own_address_type_set, "%llu\n");
+
+static int long_term_keys_show(struct seq_file *f, void *ptr)
+{
+ struct hci_dev *hdev = f->private;
+ struct list_head *p, *n;
+
+ hci_dev_lock(hdev);
+ list_for_each_safe(p, n, &hdev->link_keys) {
+ struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
+ seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
+ &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
+ ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
+ 8, ltk->rand, 16, ltk->val);
+ }
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int long_term_keys_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, long_term_keys_show, inode->i_private);
+}
+
+static const struct file_operations long_term_keys_fops = {
+ .open = long_term_keys_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int conn_min_interval_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->le_conn_min_interval = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int conn_min_interval_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->le_conn_min_interval;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
+ conn_min_interval_set, "%llu\n");
+
+static int conn_max_interval_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->le_conn_max_interval = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int conn_max_interval_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->le_conn_max_interval;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
+ conn_max_interval_set, "%llu\n");
+
/* ---- HCI requests ---- */
static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
@@ -307,11 +888,23 @@ static void amp_init(struct hci_request *req)
/* Read Local Version */
hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
+ /* Read Local Supported Commands */
+ hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
+
+ /* Read Local Supported Features */
+ hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
+
/* Read Local AMP Info */
hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
/* Read Data Blk size */
hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
+
+ /* Read Flow Control Mode */
+ hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
+
+ /* Read Location Data */
+ hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
}
static void hci_init1_req(struct hci_request *req, unsigned long opt)
@@ -341,6 +934,8 @@ static void hci_init1_req(struct hci_request *req, unsigned long opt)
static void bredr_setup(struct hci_request *req)
{
+ struct hci_dev *hdev = req->hdev;
+
__le16 param;
__u8 flt_type;
@@ -356,6 +951,12 @@ static void bredr_setup(struct hci_request *req)
/* Read Voice Setting */
hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
+ /* Read Number of Supported IAC */
+ hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
+
+ /* Read Current IAC LAP */
+ hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
+
/* Clear Event Filters */
flt_type = HCI_FLT_CLEAR_ALL;
hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
@@ -364,8 +965,10 @@ static void bredr_setup(struct hci_request *req)
param = __constant_cpu_to_le16(0x7d00);
hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
- /* Read page scan parameters */
- if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
+ /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
+ * but it does not support page scan related HCI commands.
+ */
+ if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
}
@@ -519,6 +1122,8 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
if (lmp_bredr_capable(hdev))
bredr_setup(req);
+ else
+ clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
if (lmp_le_capable(hdev))
le_setup(req);
@@ -532,6 +1137,14 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
if (lmp_ssp_capable(hdev)) {
+ /* When SSP is available, then the host features page
+ * should also be available as well. However some
+ * controllers list the max_page as 0 as long as SSP
+ * has not been enabled. To achieve proper debugging
+ * output, force the minimum max_page to 1 at least.
+ */
+ hdev->max_page = 0x01;
+
if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
u8 mode = 0x01;
hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
@@ -607,6 +1220,34 @@ static void hci_set_le_support(struct hci_request *req)
&cp);
}
+static void hci_set_event_mask_page_2(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+ /* If Connectionless Slave Broadcast master role is supported
+ * enable all necessary events for it.
+ */
+ if (hdev->features[2][0] & 0x01) {
+ events[1] |= 0x40; /* Triggered Clock Capture */
+ events[1] |= 0x80; /* Synchronization Train Complete */
+ events[2] |= 0x10; /* Slave Page Response Timeout */
+ events[2] |= 0x20; /* CSB Channel Map Change */
+ }
+
+ /* If Connectionless Slave Broadcast slave role is supported
+ * enable all necessary events for it.
+ */
+ if (hdev->features[2][0] & 0x02) {
+ events[2] |= 0x01; /* Synchronization Train Received */
+ events[2] |= 0x02; /* CSB Receive */
+ events[2] |= 0x04; /* CSB Timeout */
+ events[2] |= 0x08; /* Truncated Page Complete */
+ }
+
+ hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
+}
+
static void hci_init3_req(struct hci_request *req, unsigned long opt)
{
struct hci_dev *hdev = req->hdev;
@@ -634,8 +1275,19 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
hci_setup_link_policy(req);
if (lmp_le_capable(hdev)) {
+ if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
+ /* If the controller has a public BD_ADDR, then
+ * by default use that one. If this is a LE only
+ * controller without a public address, default
+ * to the random address.
+ */
+ if (bacmp(&hdev->bdaddr, BDADDR_ANY))
+ hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
+ else
+ hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
+ }
+
hci_set_le_support(req);
- hci_update_ad(req);
}
/* Read features beyond page 1 if available */
@@ -648,6 +1300,19 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
}
}
+static void hci_init4_req(struct hci_request *req, unsigned long opt)
+{
+ struct hci_dev *hdev = req->hdev;
+
+ /* Set event mask page 2 if the HCI command for it is supported */
+ if (hdev->commands[22] & 0x04)
+ hci_set_event_mask_page_2(req);
+
+ /* Check for Synchronization Train support */
+ if (hdev->features[2][0] & 0x04)
+ hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
+}
+
static int __hci_init(struct hci_dev *hdev)
{
int err;
@@ -656,6 +1321,14 @@ static int __hci_init(struct hci_dev *hdev)
if (err < 0)
return err;
+ /* The Device Under Test (DUT) mode is special and available for
+ * all controller types. So just create it early on.
+ */
+ if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
+ debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
+ &dut_mode_fops);
+ }
+
/* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
* BR/EDR/LE type controllers. AMP controllers only need the
* first stage init.
@@ -667,7 +1340,75 @@ static int __hci_init(struct hci_dev *hdev)
if (err < 0)
return err;
- return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
+ err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
+ if (err < 0)
+ return err;
+
+ err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
+ if (err < 0)
+ return err;
+
+ /* Only create debugfs entries during the initial setup
+ * phase and not every time the controller gets powered on.
+ */
+ if (!test_bit(HCI_SETUP, &hdev->dev_flags))
+ return 0;
+
+ debugfs_create_file("features", 0444, hdev->debugfs, hdev,
+ &features_fops);
+ debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
+ &hdev->manufacturer);
+ debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
+ debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
+ debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
+ &blacklist_fops);
+ debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
+
+ if (lmp_bredr_capable(hdev)) {
+ debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
+ hdev, &inquiry_cache_fops);
+ debugfs_create_file("link_keys", 0400, hdev->debugfs,
+ hdev, &link_keys_fops);
+ debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
+ hdev, &use_debug_keys_fops);
+ debugfs_create_file("dev_class", 0444, hdev->debugfs,
+ hdev, &dev_class_fops);
+ debugfs_create_file("voice_setting", 0444, hdev->debugfs,
+ hdev, &voice_setting_fops);
+ }
+
+ if (lmp_ssp_capable(hdev)) {
+ debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
+ hdev, &auto_accept_delay_fops);
+ debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
+ hdev, &ssp_debug_mode_fops);
+ }
+
+ if (lmp_sniff_capable(hdev)) {
+ debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
+ hdev, &idle_timeout_fops);
+ debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
+ hdev, &sniff_min_interval_fops);
+ debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
+ hdev, &sniff_max_interval_fops);
+ }
+
+ if (lmp_le_capable(hdev)) {
+ debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
+ &hdev->le_white_list_size);
+ debugfs_create_file("static_address", 0444, hdev->debugfs,
+ hdev, &static_address_fops);
+ debugfs_create_file("own_address_type", 0644, hdev->debugfs,
+ hdev, &own_address_type_fops);
+ debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
+ hdev, &long_term_keys_fops);
+ debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
+ hdev, &conn_min_interval_fops);
+ debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
+ hdev, &conn_max_interval_fops);
+ }
+
+ return 0;
}
static void hci_scan_req(struct hci_request *req, unsigned long opt)
@@ -984,6 +1725,21 @@ int hci_inquiry(void __user *arg)
if (!hdev)
return -ENODEV;
+ if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ err = -EBUSY;
+ goto done;
+ }
+
+ if (hdev->dev_type != HCI_BREDR) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+
hci_dev_lock(hdev);
if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
@@ -1043,100 +1799,10 @@ done:
return err;
}
-static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
-{
- u8 ad_len = 0, flags = 0;
- size_t name_len;
-
- if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
- flags |= LE_AD_GENERAL;
-
- if (!lmp_bredr_capable(hdev))
- flags |= LE_AD_NO_BREDR;
-
- if (lmp_le_br_capable(hdev))
- flags |= LE_AD_SIM_LE_BREDR_CTRL;
-
- if (lmp_host_le_br_capable(hdev))
- flags |= LE_AD_SIM_LE_BREDR_HOST;
-
- if (flags) {
- BT_DBG("adv flags 0x%02x", flags);
-
- ptr[0] = 2;
- ptr[1] = EIR_FLAGS;
- ptr[2] = flags;
-
- ad_len += 3;
- ptr += 3;
- }
-
- if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
- ptr[0] = 2;
- ptr[1] = EIR_TX_POWER;
- ptr[2] = (u8) hdev->adv_tx_power;
-
- ad_len += 3;
- ptr += 3;
- }
-
- name_len = strlen(hdev->dev_name);
- if (name_len > 0) {
- size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
-
- if (name_len > max_len) {
- name_len = max_len;
- ptr[1] = EIR_NAME_SHORT;
- } else
- ptr[1] = EIR_NAME_COMPLETE;
-
- ptr[0] = name_len + 1;
-
- memcpy(ptr + 2, hdev->dev_name, name_len);
-
- ad_len += (name_len + 2);
- ptr += (name_len + 2);
- }
-
- return ad_len;
-}
-
-void hci_update_ad(struct hci_request *req)
+static int hci_dev_do_open(struct hci_dev *hdev)
{
- struct hci_dev *hdev = req->hdev;
- struct hci_cp_le_set_adv_data cp;
- u8 len;
-
- if (!lmp_le_capable(hdev))
- return;
-
- memset(&cp, 0, sizeof(cp));
-
- len = create_ad(hdev, cp.data);
-
- if (hdev->adv_data_len == len &&
- memcmp(cp.data, hdev->adv_data, len) == 0)
- return;
-
- memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
- hdev->adv_data_len = len;
-
- cp.length = len;
-
- hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
-}
-
-/* ---- HCI ioctl helpers ---- */
-
-int hci_dev_open(__u16 dev)
-{
- struct hci_dev *hdev;
int ret = 0;
- hdev = hci_dev_get(dev);
- if (!hdev)
- return -ENODEV;
-
BT_DBG("%s %p", hdev->name, hdev);
hci_req_lock(hdev);
@@ -1146,13 +1812,29 @@ int hci_dev_open(__u16 dev)
goto done;
}
- /* Check for rfkill but allow the HCI setup stage to proceed
- * (which in itself doesn't cause any RF activity).
- */
- if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
- !test_bit(HCI_SETUP, &hdev->dev_flags)) {
- ret = -ERFKILL;
- goto done;
+ if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
+ /* Check for rfkill but allow the HCI setup stage to
+ * proceed (which in itself doesn't cause any RF activity).
+ */
+ if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
+ ret = -ERFKILL;
+ goto done;
+ }
+
+ /* Check for valid public address or a configured static
+ * random adddress, but let the HCI setup proceed to
+ * be able to determine if there is a public address
+ * or not.
+ *
+ * This check is only valid for BR/EDR controllers
+ * since AMP controllers do not have an address.
+ */
+ if (hdev->dev_type == HCI_BREDR &&
+ !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+ !bacmp(&hdev->static_addr, BDADDR_ANY)) {
+ ret = -EADDRNOTAVAIL;
+ goto done;
+ }
}
if (test_bit(HCI_UP, &hdev->flags)) {
@@ -1172,16 +1854,11 @@ int hci_dev_open(__u16 dev)
ret = hdev->setup(hdev);
if (!ret) {
- /* Treat all non BR/EDR controllers as raw devices if
- * enable_hs is not set.
- */
- if (hdev->dev_type != HCI_BREDR && !enable_hs)
- set_bit(HCI_RAW, &hdev->flags);
-
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
set_bit(HCI_RAW, &hdev->flags);
- if (!test_bit(HCI_RAW, &hdev->flags))
+ if (!test_bit(HCI_RAW, &hdev->flags) &&
+ !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
ret = __hci_init(hdev);
}
@@ -1192,7 +1869,8 @@ int hci_dev_open(__u16 dev)
set_bit(HCI_UP, &hdev->flags);
hci_notify(hdev, HCI_DEV_UP);
if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
- mgmt_valid_hdev(hdev)) {
+ !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
+ hdev->dev_type == HCI_BREDR) {
hci_dev_lock(hdev);
mgmt_powered(hdev, 1);
hci_dev_unlock(hdev);
@@ -1220,10 +1898,41 @@ int hci_dev_open(__u16 dev)
done:
hci_req_unlock(hdev);
- hci_dev_put(hdev);
return ret;
}
+/* ---- HCI ioctl helpers ---- */
+
+int hci_dev_open(__u16 dev)
+{
+ struct hci_dev *hdev;
+ int err;
+
+ hdev = hci_dev_get(dev);
+ if (!hdev)
+ return -ENODEV;
+
+ /* We need to ensure that no other power on/off work is pending
+ * before proceeding to call hci_dev_do_open. This is
+ * particularly important if the setup procedure has not yet
+ * completed.
+ */
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+ cancel_delayed_work(&hdev->power_off);
+
+ /* After this call it is guaranteed that the setup procedure
+ * has finished. This means that error conditions like RFKILL
+ * or no valid public or static random address apply.
+ */
+ flush_workqueue(hdev->req_workqueue);
+
+ err = hci_dev_do_open(hdev);
+
+ hci_dev_put(hdev);
+
+ return err;
+}
+
static int hci_dev_do_close(struct hci_dev *hdev)
{
BT_DBG("%s %p", hdev->name, hdev);
@@ -1247,6 +1956,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
cancel_delayed_work(&hdev->discov_off);
hdev->discov_timeout = 0;
clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+ clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
}
if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
@@ -1268,6 +1978,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
skb_queue_purge(&hdev->cmd_q);
atomic_set(&hdev->cmd_cnt, 1);
if (!test_bit(HCI_RAW, &hdev->flags) &&
+ !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
set_bit(HCI_INIT, &hdev->flags);
__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
@@ -1300,15 +2011,16 @@ static int hci_dev_do_close(struct hci_dev *hdev)
hdev->flags = 0;
hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
- if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
- mgmt_valid_hdev(hdev)) {
- hci_dev_lock(hdev);
- mgmt_powered(hdev, 0);
- hci_dev_unlock(hdev);
+ if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+ if (hdev->dev_type == HCI_BREDR) {
+ hci_dev_lock(hdev);
+ mgmt_powered(hdev, 0);
+ hci_dev_unlock(hdev);
+ }
}
/* Controller radio is available but is currently powered down */
- hdev->amp_status = 0;
+ hdev->amp_status = AMP_STATUS_POWERED_DOWN;
memset(hdev->eir, 0, sizeof(hdev->eir));
memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
@@ -1328,11 +2040,17 @@ int hci_dev_close(__u16 dev)
if (!hdev)
return -ENODEV;
+ if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ err = -EBUSY;
+ goto done;
+ }
+
if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
cancel_delayed_work(&hdev->power_off);
err = hci_dev_do_close(hdev);
+done:
hci_dev_put(hdev);
return err;
}
@@ -1348,8 +2066,15 @@ int hci_dev_reset(__u16 dev)
hci_req_lock(hdev);
- if (!test_bit(HCI_UP, &hdev->flags))
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ ret = -ENETDOWN;
goto done;
+ }
+
+ if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ ret = -EBUSY;
+ goto done;
+ }
/* Drop queues */
skb_queue_purge(&hdev->rx_q);
@@ -1384,10 +2109,15 @@ int hci_dev_reset_stat(__u16 dev)
if (!hdev)
return -ENODEV;
+ if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ ret = -EBUSY;
+ goto done;
+ }
+
memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
+done:
hci_dev_put(hdev);
-
return ret;
}
@@ -1404,6 +2134,21 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
if (!hdev)
return -ENODEV;
+ if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ err = -EBUSY;
+ goto done;
+ }
+
+ if (hdev->dev_type != HCI_BREDR) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+
switch (cmd) {
case HCISETAUTH:
err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
@@ -1462,6 +2207,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
break;
}
+done:
hci_dev_put(hdev);
return err;
}
@@ -1534,7 +2280,7 @@ int hci_get_dev_info(void __user *arg)
strcpy(di.name, hdev->name);
di.bdaddr = hdev->bdaddr;
- di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
+ di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
di.flags = hdev->flags;
di.pkt_type = hdev->pkt_type;
if (lmp_bredr_capable(hdev)) {
@@ -1570,6 +2316,9 @@ static int hci_rfkill_set_block(void *data, bool blocked)
BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
+ if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
+ return -EBUSY;
+
if (blocked) {
set_bit(HCI_RFKILLED, &hdev->dev_flags);
if (!test_bit(HCI_SETUP, &hdev->dev_flags))
@@ -1592,13 +2341,20 @@ static void hci_power_on(struct work_struct *work)
BT_DBG("%s", hdev->name);
- err = hci_dev_open(hdev->id);
+ err = hci_dev_do_open(hdev);
if (err < 0) {
mgmt_set_powered_failed(hdev, err);
return;
}
- if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
+ /* During the HCI setup phase, a few error conditions are
+ * ignored and they need to be checked now. If they are still
+ * valid, it is important to turn the device back off.
+ */
+ if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
+ (hdev->dev_type == HCI_BREDR &&
+ !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+ !bacmp(&hdev->static_addr, BDADDR_ANY))) {
clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
hci_dev_do_close(hdev);
} else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
@@ -1623,19 +2379,12 @@ static void hci_power_off(struct work_struct *work)
static void hci_discov_off(struct work_struct *work)
{
struct hci_dev *hdev;
- u8 scan = SCAN_PAGE;
hdev = container_of(work, struct hci_dev, discov_off.work);
BT_DBG("%s", hdev->name);
- hci_dev_lock(hdev);
-
- hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
-
- hdev->discov_timeout = 0;
-
- hci_dev_unlock(hdev);
+ mgmt_discoverable_timeout(hdev);
}
int hci_uuids_clear(struct hci_dev *hdev)
@@ -1958,13 +2707,15 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
return 0;
}
-struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
+struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 type)
{
struct bdaddr_list *b;
- list_for_each_entry(b, &hdev->blacklist, list)
- if (bacmp(bdaddr, &b->bdaddr) == 0)
+ list_for_each_entry(b, &hdev->blacklist, list) {
+ if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
return b;
+ }
return NULL;
}
@@ -1974,9 +2725,7 @@ int hci_blacklist_clear(struct hci_dev *hdev)
struct list_head *p, *n;
list_for_each_safe(p, n, &hdev->blacklist) {
- struct bdaddr_list *b;
-
- b = list_entry(p, struct bdaddr_list, list);
+ struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
list_del(p);
kfree(b);
@@ -1989,10 +2738,10 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
{
struct bdaddr_list *entry;
- if (bacmp(bdaddr, BDADDR_ANY) == 0)
+ if (!bacmp(bdaddr, BDADDR_ANY))
return -EBADF;
- if (hci_blacklist_lookup(hdev, bdaddr))
+ if (hci_blacklist_lookup(hdev, bdaddr, type))
return -EEXIST;
entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
@@ -2000,6 +2749,7 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
return -ENOMEM;
bacpy(&entry->bdaddr, bdaddr);
+ entry->bdaddr_type = type;
list_add(&entry->list, &hdev->blacklist);
@@ -2010,10 +2760,10 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
{
struct bdaddr_list *entry;
- if (bacmp(bdaddr, BDADDR_ANY) == 0)
+ if (!bacmp(bdaddr, BDADDR_ANY))
return hci_blacklist_clear(hdev);
- entry = hci_blacklist_lookup(hdev, bdaddr);
+ entry = hci_blacklist_lookup(hdev, bdaddr, type);
if (!entry)
return -ENOENT;
@@ -2111,13 +2861,19 @@ struct hci_dev *hci_alloc_dev(void)
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
hdev->esco_type = (ESCO_HV1);
hdev->link_mode = (HCI_LM_ACCEPT);
- hdev->io_capability = 0x03; /* No Input No Output */
+ hdev->num_iac = 0x01; /* One IAC support is mandatory */
+ hdev->io_capability = 0x03; /* No Input No Output */
hdev->inq_tx_power = HCI_TX_POWER_INVALID;
hdev->adv_tx_power = HCI_TX_POWER_INVALID;
hdev->sniff_max_interval = 800;
hdev->sniff_min_interval = 80;
+ hdev->le_scan_interval = 0x0060;
+ hdev->le_scan_window = 0x0030;
+ hdev->le_conn_min_interval = 0x0028;
+ hdev->le_conn_max_interval = 0x0038;
+
mutex_init(&hdev->lock);
mutex_init(&hdev->req_lock);
@@ -2206,7 +2962,12 @@ int hci_register_dev(struct hci_dev *hdev)
goto err;
}
- error = hci_add_sysfs(hdev);
+ if (!IS_ERR_OR_NULL(bt_debugfs))
+ hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
+
+ dev_set_name(&hdev->dev, "%s", hdev->name);
+
+ error = device_add(&hdev->dev);
if (error < 0)
goto err_wqueue;
@@ -2224,9 +2985,14 @@ int hci_register_dev(struct hci_dev *hdev)
set_bit(HCI_RFKILLED, &hdev->dev_flags);
set_bit(HCI_SETUP, &hdev->dev_flags);
+ set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
- if (hdev->dev_type != HCI_AMP)
- set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+ if (hdev->dev_type == HCI_BREDR) {
+ /* Assume BR/EDR support until proven otherwise (such as
+ * through reading supported features during init.
+ */
+ set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+ }
write_lock(&hci_dev_list_lock);
list_add(&hdev->list, &hci_dev_list);
@@ -2289,7 +3055,9 @@ void hci_unregister_dev(struct hci_dev *hdev)
rfkill_destroy(hdev->rfkill);
}
- hci_del_sysfs(hdev);
+ device_del(&hdev->dev);
+
+ debugfs_remove_recursive(hdev->debugfs);
destroy_workqueue(hdev->workqueue);
destroy_workqueue(hdev->req_workqueue);
@@ -2325,9 +3093,8 @@ int hci_resume_dev(struct hci_dev *hdev)
EXPORT_SYMBOL(hci_resume_dev);
/* Receive frame from HCI drivers */
-int hci_recv_frame(struct sk_buff *skb)
+int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_dev *hdev = (struct hci_dev *) skb->dev;
if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
&& !test_bit(HCI_INIT, &hdev->flags))) {
kfree_skb(skb);
@@ -2386,7 +3153,6 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
scb->expect = hlen;
scb->pkt_type = type;
- skb->dev = (void *) hdev;
hdev->reassembly[index] = skb;
}
@@ -2446,7 +3212,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
/* Complete frame */
bt_cb(skb)->pkt_type = type;
- hci_recv_frame(skb);
+ hci_recv_frame(hdev, skb);
hdev->reassembly[index] = NULL;
return remain;
@@ -2537,15 +3303,8 @@ int hci_unregister_cb(struct hci_cb *cb)
}
EXPORT_SYMBOL(hci_unregister_cb);
-static int hci_send_frame(struct sk_buff *skb)
+static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_dev *hdev = (struct hci_dev *) skb->dev;
-
- if (!hdev) {
- kfree_skb(skb);
- return -ENODEV;
- }
-
BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
/* Time stamp */
@@ -2562,7 +3321,8 @@ static int hci_send_frame(struct sk_buff *skb)
/* Get rid of skb owner, prior to sending to the driver. */
skb_orphan(skb);
- return hdev->send(skb);
+ if (hdev->send(hdev, skb) < 0)
+ BT_ERR("%s sending frame failed", hdev->name);
}
void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
@@ -2625,7 +3385,6 @@ static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
BT_DBG("skb len %d", skb->len);
bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
- skb->dev = (void *) hdev;
return skb;
}
@@ -2769,7 +3528,6 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
do {
skb = list; list = list->next;
- skb->dev = (void *) hdev;
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
hci_add_acl_hdr(skb, conn->handle, flags);
@@ -2788,8 +3546,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
- skb->dev = (void *) hdev;
-
hci_queue_acl(chan, &chan->data_q, skb, flags);
queue_work(hdev->workqueue, &hdev->tx_work);
@@ -2810,7 +3566,6 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
skb_reset_transport_header(skb);
memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
- skb->dev = (void *) hdev;
bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
skb_queue_tail(&conn->data_q, skb);
@@ -3075,7 +3830,7 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev)
hci_conn_enter_active_mode(chan->conn,
bt_cb(skb)->force_active);
- hci_send_frame(skb);
+ hci_send_frame(hdev, skb);
hdev->acl_last_tx = jiffies;
hdev->acl_cnt--;
@@ -3127,7 +3882,7 @@ static void hci_sched_acl_blk(struct hci_dev *hdev)
hci_conn_enter_active_mode(chan->conn,
bt_cb(skb)->force_active);
- hci_send_frame(skb);
+ hci_send_frame(hdev, skb);
hdev->acl_last_tx = jiffies;
hdev->block_cnt -= blocks;
@@ -3180,7 +3935,7 @@ static void hci_sched_sco(struct hci_dev *hdev)
while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
- hci_send_frame(skb);
+ hci_send_frame(hdev, skb);
conn->sent++;
if (conn->sent == ~0)
@@ -3204,7 +3959,7 @@ static void hci_sched_esco(struct hci_dev *hdev)
&quote))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
- hci_send_frame(skb);
+ hci_send_frame(hdev, skb);
conn->sent++;
if (conn->sent == ~0)
@@ -3246,7 +4001,7 @@ static void hci_sched_le(struct hci_dev *hdev)
skb = skb_dequeue(&chan->data_q);
- hci_send_frame(skb);
+ hci_send_frame(hdev, skb);
hdev->le_last_tx = jiffies;
cnt--;
@@ -3272,19 +4027,17 @@ static void hci_tx_work(struct work_struct *work)
BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
hdev->sco_cnt, hdev->le_cnt);
- /* Schedule queues and send stuff to HCI driver */
-
- hci_sched_acl(hdev);
-
- hci_sched_sco(hdev);
-
- hci_sched_esco(hdev);
-
- hci_sched_le(hdev);
+ if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ /* Schedule queues and send stuff to HCI driver */
+ hci_sched_acl(hdev);
+ hci_sched_sco(hdev);
+ hci_sched_esco(hdev);
+ hci_sched_le(hdev);
+ }
/* Send next queued raw (unknown type) packet */
while ((skb = skb_dequeue(&hdev->raw_q)))
- hci_send_frame(skb);
+ hci_send_frame(hdev, skb);
}
/* ----- HCI RX task (incoming data processing) ----- */
@@ -3471,7 +4224,8 @@ static void hci_rx_work(struct work_struct *work)
hci_send_to_sock(hdev, skb);
}
- if (test_bit(HCI_RAW, &hdev->flags)) {
+ if (test_bit(HCI_RAW, &hdev->flags) ||
+ test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
kfree_skb(skb);
continue;
}
@@ -3526,10 +4280,10 @@ static void hci_cmd_work(struct work_struct *work)
kfree_skb(hdev->sent_cmd);
- hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
+ hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
if (hdev->sent_cmd) {
atomic_dec(&hdev->cmd_cnt);
- hci_send_frame(skb);
+ hci_send_frame(hdev, skb);
if (test_bit(HCI_RESET, &hdev->flags))
del_timer(&hdev->cmd_timer);
else
@@ -3541,15 +4295,3 @@ static void hci_cmd_work(struct work_struct *work)
}
}
}
-
-u8 bdaddr_to_le(u8 bdaddr_type)
-{
- switch (bdaddr_type) {
- case BDADDR_LE_PUBLIC:
- return ADDR_LE_DEV_PUBLIC;
-
- default:
- /* Fallback to LE Random address type */
- return ADDR_LE_DEV_RANDOM;
- }
-}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 8db3e89fae35..142aa61f9c3f 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -29,8 +29,9 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
-#include <net/bluetooth/a2mp.h>
-#include <net/bluetooth/amp.h>
+
+#include "a2mp.h"
+#include "amp.h"
/* Handle HCI Event packets */
@@ -194,6 +195,11 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
hdev->adv_data_len = 0;
+
+ memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
+ hdev->scan_rsp_data_len = 0;
+
+ hdev->ssp_debug_mode = 0;
}
static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -297,6 +303,11 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
goto done;
}
+ /* We need to ensure that we set this back on if someone changed
+ * the scan mode through a raw HCI socket.
+ */
+ set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+
old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
@@ -304,11 +315,6 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
set_bit(HCI_ISCAN, &hdev->flags);
if (!old_iscan)
mgmt_discoverable(hdev, 1);
- if (hdev->discov_timeout > 0) {
- int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
- queue_delayed_work(hdev->workqueue, &hdev->discov_off,
- to);
- }
} else if (old_iscan)
mgmt_discoverable(hdev, 0);
@@ -412,6 +418,21 @@ static void hci_cc_write_voice_setting(struct hci_dev *hdev,
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
}
+static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ hdev->num_iac = rp->num_iac;
+
+ BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
+}
+
static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
@@ -449,14 +470,13 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
if (rp->status)
return;
- hdev->hci_ver = rp->hci_ver;
- hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
- hdev->lmp_ver = rp->lmp_ver;
- hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
- hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
-
- BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
- hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
+ if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
+ hdev->hci_ver = rp->hci_ver;
+ hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
+ hdev->lmp_ver = rp->lmp_ver;
+ hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
+ hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
+ }
}
static void hci_cc_read_local_commands(struct hci_dev *hdev,
@@ -466,7 +486,10 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev,
BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
- if (!rp->status)
+ if (rp->status)
+ return;
+
+ if (test_bit(HCI_SETUP, &hdev->dev_flags))
memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
}
@@ -518,12 +541,6 @@ static void hci_cc_read_local_features(struct hci_dev *hdev,
if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
-
- BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
- hdev->features[0][0], hdev->features[0][1],
- hdev->features[0][2], hdev->features[0][3],
- hdev->features[0][4], hdev->features[0][5],
- hdev->features[0][6], hdev->features[0][7]);
}
static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
@@ -536,7 +553,8 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
if (rp->status)
return;
- hdev->max_page = rp->max_page;
+ if (hdev->max_page < rp->max_page)
+ hdev->max_page = rp->max_page;
if (rp->page < HCI_MAX_PAGES)
memcpy(hdev->features[rp->page], rp->features, 8);
@@ -913,17 +931,9 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
if (!status) {
if (*sent)
- set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
+ set_bit(HCI_ADVERTISING, &hdev->dev_flags);
else
- clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
- }
-
- if (!test_bit(HCI_INIT, &hdev->flags)) {
- struct hci_request req;
-
- hci_req_init(&req, hdev);
- hci_update_ad(&req);
- hci_req_run(&req, NULL);
+ clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
}
hci_dev_unlock(hdev);
@@ -994,20 +1004,20 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
return;
if (!status) {
- if (sent->le)
+ if (sent->le) {
hdev->features[1][0] |= LMP_HOST_LE;
- else
+ set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
+ } else {
hdev->features[1][0] &= ~LMP_HOST_LE;
+ clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
+ clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+ }
if (sent->simul)
hdev->features[1][0] |= LMP_HOST_LE_BREDR;
else
hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
}
-
- if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
- !test_bit(HCI_INIT, &hdev->flags))
- mgmt_le_enable_complete(hdev, sent->le, status);
}
static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
@@ -1291,9 +1301,11 @@ static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
goto unlock;
if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
- struct hci_cp_auth_requested cp;
- cp.handle = __cpu_to_le16(conn->handle);
- hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
+ struct hci_cp_auth_requested auth_cp;
+
+ auth_cp.handle = __cpu_to_le16(conn->handle);
+ hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
+ sizeof(auth_cp), &auth_cp);
}
unlock:
@@ -1465,33 +1477,6 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
hci_dev_unlock(hdev);
}
-static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
-{
- struct hci_conn *conn;
-
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
- if (status) {
- hci_dev_lock(hdev);
-
- conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
- if (!conn) {
- hci_dev_unlock(hdev);
- return;
- }
-
- BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
-
- conn->state = BT_CLOSED;
- mgmt_connect_failed(hdev, &conn->dst, conn->type,
- conn->dst_type, status);
- hci_proto_connect_cfm(conn, status);
- hci_conn_del(conn);
-
- hci_dev_unlock(hdev);
- }
-}
-
static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
{
struct hci_cp_create_phy_link *cp;
@@ -1706,7 +1691,7 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
&flags);
if ((mask & HCI_LM_ACCEPT) &&
- !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
+ !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
/* Connection accepted */
struct inquiry_entry *ie;
struct hci_conn *conn;
@@ -1807,8 +1792,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (ev->status == 0)
conn->state = BT_CLOSED;
- if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
- (conn->type == ACL_LINK || conn->type == LE_LINK)) {
+ if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
if (ev->status) {
mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
conn->dst_type, ev->status);
@@ -1821,10 +1805,25 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
}
if (ev->status == 0) {
- if (conn->type == ACL_LINK && conn->flush_key)
+ u8 type = conn->type;
+
+ if (type == ACL_LINK && conn->flush_key)
hci_remove_link_key(hdev, &conn->dst);
hci_proto_disconn_cfm(conn, ev->reason);
hci_conn_del(conn);
+
+ /* Re-enable advertising if necessary, since it might
+ * have been disabled by the connection. From the
+ * HCI_LE_Set_Advertise_Enable command description in
+ * the core specification (v4.0):
+ * "The Controller shall continue advertising until the Host
+ * issues an LE_Set_Advertise_Enable command with
+ * Advertising_Enable set to 0x00 (Advertising is disabled)
+ * or until a connection is created or until the Advertising
+ * is timed out due to Directed Advertising."
+ */
+ if (type == LE_LINK)
+ mgmt_reenable_advertising(hdev);
}
unlock:
@@ -2139,6 +2138,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_cc_write_voice_setting(hdev, skb);
break;
+ case HCI_OP_READ_NUM_SUPPORTED_IAC:
+ hci_cc_read_num_supported_iac(hdev, skb);
+ break;
+
case HCI_OP_WRITE_SSP_MODE:
hci_cc_write_ssp_mode(hdev, skb);
break;
@@ -2342,10 +2345,6 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_cs_disconnect(hdev, ev->status);
break;
- case HCI_OP_LE_CREATE_CONN:
- hci_cs_le_create_conn(hdev, ev->status);
- break;
-
case HCI_OP_CREATE_PHY_LINK:
hci_cs_create_phylink(hdev, ev->status);
break;
@@ -2548,7 +2547,6 @@ static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
if (conn) {
conn->mode = ev->mode;
- conn->interval = __le16_to_cpu(ev->interval);
if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
&conn->flags)) {
@@ -2930,6 +2928,23 @@ unlock:
hci_dev_unlock(hdev);
}
+static inline size_t eir_get_length(u8 *eir, size_t eir_len)
+{
+ size_t parsed = 0;
+
+ while (parsed < eir_len) {
+ u8 field_len = eir[0];
+
+ if (field_len == 0)
+ return parsed;
+
+ parsed += field_len + 1;
+ eir += field_len + 1;
+ }
+
+ return eir_len;
+}
+
static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -3170,7 +3185,8 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
if (hdev->auto_accept_delay > 0) {
int delay = msecs_to_jiffies(hdev->auto_accept_delay);
- mod_timer(&conn->auto_accept_timer, jiffies + delay);
+ queue_delayed_work(conn->hdev->workqueue,
+ &conn->auto_accept_work, delay);
goto unlock;
}
@@ -3485,6 +3501,17 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn->dst_type = ev->bdaddr_type;
+ /* The advertising parameters for own address type
+ * define which source address and source address
+ * type this connections has.
+ */
+ if (bacmp(&conn->src, BDADDR_ANY)) {
+ conn->src_type = ADDR_LE_DEV_PUBLIC;
+ } else {
+ bacpy(&conn->src, &hdev->static_addr);
+ conn->src_type = ADDR_LE_DEV_RANDOM;
+ }
+
if (ev->role == LE_CONN_ROLE_MASTER) {
conn->out = true;
conn->link_mode |= HCI_LM_MASTER;
@@ -3640,8 +3667,8 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
skb_pull(skb, HCI_EVENT_HDR_SIZE);
if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
- struct hci_command_hdr *hdr = (void *) hdev->sent_cmd->data;
- u16 opcode = __le16_to_cpu(hdr->opcode);
+ struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
+ u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
hci_req_cmd_complete(hdev, opcode, 0);
}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 9bd7d959e384..71f0be173080 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -66,6 +66,46 @@ static struct bt_sock_list hci_sk_list = {
.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
};
+static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
+{
+ struct hci_filter *flt;
+ int flt_type, flt_event;
+
+ /* Apply filter */
+ flt = &hci_pi(sk)->filter;
+
+ if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
+ flt_type = 0;
+ else
+ flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
+
+ if (!test_bit(flt_type, &flt->type_mask))
+ return true;
+
+ /* Extra filter for event packets only */
+ if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
+ return false;
+
+ flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
+
+ if (!hci_test_bit(flt_event, &flt->event_mask))
+ return true;
+
+ /* Check filter only when opcode is set */
+ if (!flt->opcode)
+ return false;
+
+ if (flt_event == HCI_EV_CMD_COMPLETE &&
+ flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
+ return true;
+
+ if (flt_event == HCI_EV_CMD_STATUS &&
+ flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
+ return true;
+
+ return false;
+}
+
/* Send frame to RAW socket */
void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
{
@@ -77,7 +117,6 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
read_lock(&hci_sk_list.lock);
sk_for_each(sk, &hci_sk_list.head) {
- struct hci_filter *flt;
struct sk_buff *nskb;
if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
@@ -87,31 +126,19 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
if (skb->sk == sk)
continue;
- if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
- continue;
-
- /* Apply filter */
- flt = &hci_pi(sk)->filter;
-
- if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
- 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
- &flt->type_mask))
- continue;
-
- if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
- int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
-
- if (!hci_test_bit(evt, &flt->event_mask))
+ if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
+ if (is_filtered_packet(sk, skb))
continue;
-
- if (flt->opcode &&
- ((evt == HCI_EV_CMD_COMPLETE &&
- flt->opcode !=
- get_unaligned((__le16 *)(skb->data + 3))) ||
- (evt == HCI_EV_CMD_STATUS &&
- flt->opcode !=
- get_unaligned((__le16 *)(skb->data + 4)))))
+ } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
+ if (!bt_cb(skb)->incoming)
+ continue;
+ if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
+ bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
+ bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
continue;
+ } else {
+ /* Don't send frame to other channel types */
+ continue;
}
if (!skb_copy) {
@@ -360,7 +387,6 @@ static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
__net_timestamp(skb);
bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
- skb->dev = (void *) hdev;
hci_send_to_sock(hdev, skb);
kfree_skb(skb);
}
@@ -426,6 +452,12 @@ static int hci_sock_release(struct socket *sock)
bt_sock_unlink(&hci_sk_list, sk);
if (hdev) {
+ if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
+ mgmt_index_added(hdev);
+ clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
+ hci_dev_close(hdev->id);
+ }
+
atomic_dec(&hdev->promisc);
hci_dev_put(hdev);
}
@@ -449,7 +481,7 @@ static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
hci_dev_lock(hdev);
- err = hci_blacklist_add(hdev, &bdaddr, 0);
+ err = hci_blacklist_add(hdev, &bdaddr, BDADDR_BREDR);
hci_dev_unlock(hdev);
@@ -466,7 +498,7 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
hci_dev_lock(hdev);
- err = hci_blacklist_del(hdev, &bdaddr, 0);
+ err = hci_blacklist_del(hdev, &bdaddr, BDADDR_BREDR);
hci_dev_unlock(hdev);
@@ -482,6 +514,12 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
if (!hdev)
return -EBADFD;
+ if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
+ return -EBUSY;
+
+ if (hdev->dev_type != HCI_BREDR)
+ return -EOPNOTSUPP;
+
switch (cmd) {
case HCISETRAW:
if (!capable(CAP_NET_ADMIN))
@@ -512,23 +550,29 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
if (!capable(CAP_NET_ADMIN))
return -EPERM;
return hci_sock_blacklist_del(hdev, (void __user *) arg);
-
- default:
- if (hdev->ioctl)
- return hdev->ioctl(hdev, cmd, arg);
- return -EINVAL;
}
+
+ return -ENOIOCTLCMD;
}
static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
- struct sock *sk = sock->sk;
void __user *argp = (void __user *) arg;
+ struct sock *sk = sock->sk;
int err;
BT_DBG("cmd %x arg %lx", cmd, arg);
+ lock_sock(sk);
+
+ if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
+ err = -EBADFD;
+ goto done;
+ }
+
+ release_sock(sk);
+
switch (cmd) {
case HCIGETDEVLIST:
return hci_get_dev_list(argp);
@@ -573,13 +617,15 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
case HCIINQUIRY:
return hci_inquiry(argp);
-
- default:
- lock_sock(sk);
- err = hci_sock_bound_ioctl(sk, cmd, arg);
- release_sock(sk);
- return err;
}
+
+ lock_sock(sk);
+
+ err = hci_sock_bound_ioctl(sk, cmd, arg);
+
+done:
+ release_sock(sk);
+ return err;
}
static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
@@ -629,6 +675,56 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
hci_pi(sk)->hdev = hdev;
break;
+ case HCI_CHANNEL_USER:
+ if (hci_pi(sk)->hdev) {
+ err = -EALREADY;
+ goto done;
+ }
+
+ if (haddr.hci_dev == HCI_DEV_NONE) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (!capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ goto done;
+ }
+
+ hdev = hci_dev_get(haddr.hci_dev);
+ if (!hdev) {
+ err = -ENODEV;
+ goto done;
+ }
+
+ if (test_bit(HCI_UP, &hdev->flags) ||
+ test_bit(HCI_INIT, &hdev->flags) ||
+ test_bit(HCI_SETUP, &hdev->dev_flags)) {
+ err = -EBUSY;
+ hci_dev_put(hdev);
+ goto done;
+ }
+
+ if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ err = -EUSERS;
+ hci_dev_put(hdev);
+ goto done;
+ }
+
+ mgmt_index_removed(hdev);
+
+ err = hci_dev_open(hdev->id);
+ if (err) {
+ clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
+ hci_dev_put(hdev);
+ goto done;
+ }
+
+ atomic_inc(&hdev->promisc);
+
+ hci_pi(sk)->hdev = hdev;
+ break;
+
case HCI_CHANNEL_CONTROL:
if (haddr.hci_dev != HCI_DEV_NONE) {
err = -EINVAL;
@@ -677,22 +773,30 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
{
struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
struct sock *sk = sock->sk;
- struct hci_dev *hdev = hci_pi(sk)->hdev;
+ struct hci_dev *hdev;
+ int err = 0;
BT_DBG("sock %p sk %p", sock, sk);
- if (!hdev)
- return -EBADFD;
+ if (peer)
+ return -EOPNOTSUPP;
lock_sock(sk);
+ hdev = hci_pi(sk)->hdev;
+ if (!hdev) {
+ err = -EBADFD;
+ goto done;
+ }
+
*addr_len = sizeof(*haddr);
haddr->hci_family = AF_BLUETOOTH;
haddr->hci_dev = hdev->id;
- haddr->hci_channel= 0;
+ haddr->hci_channel= hci_pi(sk)->channel;
+done:
release_sock(sk);
- return 0;
+ return err;
}
static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
@@ -767,6 +871,7 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
case HCI_CHANNEL_RAW:
hci_sock_cmsg(sk, msg, skb);
break;
+ case HCI_CHANNEL_USER:
case HCI_CHANNEL_CONTROL:
case HCI_CHANNEL_MONITOR:
sock_recv_timestamp(msg, sk, skb);
@@ -801,6 +906,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
switch (hci_pi(sk)->channel) {
case HCI_CHANNEL_RAW:
+ case HCI_CHANNEL_USER:
break;
case HCI_CHANNEL_CONTROL:
err = mgmt_control(sk, msg, len);
@@ -835,9 +941,9 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
skb_pull(skb, 1);
- skb->dev = (void *) hdev;
- if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
+ if (hci_pi(sk)->channel == HCI_CHANNEL_RAW &&
+ bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
u16 opcode = get_unaligned_le16(skb->data);
u16 ogf = hci_opcode_ogf(opcode);
u16 ocf = hci_opcode_ocf(opcode);
@@ -868,6 +974,14 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
goto drop;
}
+ if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
+ bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
+ bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
+ bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
+ err = -EINVAL;
+ goto drop;
+ }
+
skb_queue_tail(&hdev->raw_q, skb);
queue_work(hdev->workqueue, &hdev->tx_work);
}
@@ -895,7 +1009,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
lock_sock(sk);
if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
- err = -EINVAL;
+ err = -EBADFD;
goto done;
}
@@ -981,7 +1095,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
lock_sock(sk);
if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
- err = -EINVAL;
+ err = -EBADFD;
goto done;
}
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index edf623a29043..0b61250cfdf9 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -1,17 +1,12 @@
/* Bluetooth HCI driver model support. */
-#include <linux/debugfs.h>
#include <linux/module.h>
-#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
static struct class *bt_class;
-struct dentry *bt_debugfs;
-EXPORT_SYMBOL_GPL(bt_debugfs);
-
static inline char *link_typetostr(int type)
{
switch (type) {
@@ -42,29 +37,15 @@ static ssize_t show_link_address(struct device *dev,
return sprintf(buf, "%pMR\n", &conn->dst);
}
-static ssize_t show_link_features(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct hci_conn *conn = to_hci_conn(dev);
-
- return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- conn->features[0][0], conn->features[0][1],
- conn->features[0][2], conn->features[0][3],
- conn->features[0][4], conn->features[0][5],
- conn->features[0][6], conn->features[0][7]);
-}
-
#define LINK_ATTR(_name, _mode, _show, _store) \
struct device_attribute link_attr_##_name = __ATTR(_name, _mode, _show, _store)
static LINK_ATTR(type, S_IRUGO, show_link_type, NULL);
static LINK_ATTR(address, S_IRUGO, show_link_address, NULL);
-static LINK_ATTR(features, S_IRUGO, show_link_features, NULL);
static struct attribute *bt_link_attrs[] = {
&link_attr_type.attr,
&link_attr_address.attr,
- &link_attr_features.attr,
NULL
};
@@ -150,28 +131,6 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
hci_dev_put(hdev);
}
-static inline char *host_bustostr(int bus)
-{
- switch (bus) {
- case HCI_VIRTUAL:
- return "VIRTUAL";
- case HCI_USB:
- return "USB";
- case HCI_PCCARD:
- return "PCCARD";
- case HCI_UART:
- return "UART";
- case HCI_RS232:
- return "RS232";
- case HCI_PCI:
- return "PCI";
- case HCI_SDIO:
- return "SDIO";
- default:
- return "UNKNOWN";
- }
-}
-
static inline char *host_typetostr(int type)
{
switch (type) {
@@ -184,13 +143,6 @@ static inline char *host_typetostr(int type)
}
}
-static ssize_t show_bus(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct hci_dev *hdev = to_hci_dev(dev);
- return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
-}
-
static ssize_t show_type(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -212,14 +164,6 @@ static ssize_t show_name(struct device *dev,
return sprintf(buf, "%s\n", name);
}
-static ssize_t show_class(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct hci_dev *hdev = to_hci_dev(dev);
- return sprintf(buf, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
- hdev->dev_class[1], hdev->dev_class[0]);
-}
-
static ssize_t show_address(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -227,150 +171,14 @@ static ssize_t show_address(struct device *dev,
return sprintf(buf, "%pMR\n", &hdev->bdaddr);
}
-static ssize_t show_features(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct hci_dev *hdev = to_hci_dev(dev);
-
- return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- hdev->features[0][0], hdev->features[0][1],
- hdev->features[0][2], hdev->features[0][3],
- hdev->features[0][4], hdev->features[0][5],
- hdev->features[0][6], hdev->features[0][7]);
-}
-
-static ssize_t show_manufacturer(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct hci_dev *hdev = to_hci_dev(dev);
- return sprintf(buf, "%d\n", hdev->manufacturer);
-}
-
-static ssize_t show_hci_version(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct hci_dev *hdev = to_hci_dev(dev);
- return sprintf(buf, "%d\n", hdev->hci_ver);
-}
-
-static ssize_t show_hci_revision(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct hci_dev *hdev = to_hci_dev(dev);
- return sprintf(buf, "%d\n", hdev->hci_rev);
-}
-
-static ssize_t show_idle_timeout(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct hci_dev *hdev = to_hci_dev(dev);
- return sprintf(buf, "%d\n", hdev->idle_timeout);
-}
-
-static ssize_t store_idle_timeout(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct hci_dev *hdev = to_hci_dev(dev);
- unsigned int val;
- int rv;
-
- rv = kstrtouint(buf, 0, &val);
- if (rv < 0)
- return rv;
-
- if (val != 0 && (val < 500 || val > 3600000))
- return -EINVAL;
-
- hdev->idle_timeout = val;
-
- return count;
-}
-
-static ssize_t show_sniff_max_interval(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct hci_dev *hdev = to_hci_dev(dev);
- return sprintf(buf, "%d\n", hdev->sniff_max_interval);
-}
-
-static ssize_t store_sniff_max_interval(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct hci_dev *hdev = to_hci_dev(dev);
- u16 val;
- int rv;
-
- rv = kstrtou16(buf, 0, &val);
- if (rv < 0)
- return rv;
-
- if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
- return -EINVAL;
-
- hdev->sniff_max_interval = val;
-
- return count;
-}
-
-static ssize_t show_sniff_min_interval(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct hci_dev *hdev = to_hci_dev(dev);
- return sprintf(buf, "%d\n", hdev->sniff_min_interval);
-}
-
-static ssize_t store_sniff_min_interval(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct hci_dev *hdev = to_hci_dev(dev);
- u16 val;
- int rv;
-
- rv = kstrtou16(buf, 0, &val);
- if (rv < 0)
- return rv;
-
- if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
- return -EINVAL;
-
- hdev->sniff_min_interval = val;
-
- return count;
-}
-
-static DEVICE_ATTR(bus, S_IRUGO, show_bus, NULL);
static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-static DEVICE_ATTR(class, S_IRUGO, show_class, NULL);
static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
-static DEVICE_ATTR(features, S_IRUGO, show_features, NULL);
-static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL);
-static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
-static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
-
-static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
- show_idle_timeout, store_idle_timeout);
-static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR,
- show_sniff_max_interval, store_sniff_max_interval);
-static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
- show_sniff_min_interval, store_sniff_min_interval);
static struct attribute *bt_host_attrs[] = {
- &dev_attr_bus.attr,
&dev_attr_type.attr,
&dev_attr_name.attr,
- &dev_attr_class.attr,
&dev_attr_address.attr,
- &dev_attr_features.attr,
- &dev_attr_manufacturer.attr,
- &dev_attr_hci_version.attr,
- &dev_attr_hci_revision.attr,
- &dev_attr_idle_timeout.attr,
- &dev_attr_sniff_max_interval.attr,
- &dev_attr_sniff_min_interval.attr,
NULL
};
@@ -396,141 +204,6 @@ static struct device_type bt_host = {
.release = bt_host_release,
};
-static int inquiry_cache_show(struct seq_file *f, void *p)
-{
- struct hci_dev *hdev = f->private;
- struct discovery_state *cache = &hdev->discovery;
- struct inquiry_entry *e;
-
- hci_dev_lock(hdev);
-
- list_for_each_entry(e, &cache->all, all) {
- struct inquiry_data *data = &e->data;
- seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
- &data->bdaddr,
- data->pscan_rep_mode, data->pscan_period_mode,
- data->pscan_mode, data->dev_class[2],
- data->dev_class[1], data->dev_class[0],
- __le16_to_cpu(data->clock_offset),
- data->rssi, data->ssp_mode, e->timestamp);
- }
-
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int inquiry_cache_open(struct inode *inode, struct file *file)
-{
- return single_open(file, inquiry_cache_show, inode->i_private);
-}
-
-static const struct file_operations inquiry_cache_fops = {
- .open = inquiry_cache_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int blacklist_show(struct seq_file *f, void *p)
-{
- struct hci_dev *hdev = f->private;
- struct bdaddr_list *b;
-
- hci_dev_lock(hdev);
-
- list_for_each_entry(b, &hdev->blacklist, list)
- seq_printf(f, "%pMR\n", &b->bdaddr);
-
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int blacklist_open(struct inode *inode, struct file *file)
-{
- return single_open(file, blacklist_show, inode->i_private);
-}
-
-static const struct file_operations blacklist_fops = {
- .open = blacklist_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static void print_bt_uuid(struct seq_file *f, u8 *uuid)
-{
- u32 data0, data5;
- u16 data1, data2, data3, data4;
-
- data5 = get_unaligned_le32(uuid);
- data4 = get_unaligned_le16(uuid + 4);
- data3 = get_unaligned_le16(uuid + 6);
- data2 = get_unaligned_le16(uuid + 8);
- data1 = get_unaligned_le16(uuid + 10);
- data0 = get_unaligned_le32(uuid + 12);
-
- seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
- data0, data1, data2, data3, data4, data5);
-}
-
-static int uuids_show(struct seq_file *f, void *p)
-{
- struct hci_dev *hdev = f->private;
- struct bt_uuid *uuid;
-
- hci_dev_lock(hdev);
-
- list_for_each_entry(uuid, &hdev->uuids, list)
- print_bt_uuid(f, uuid->uuid);
-
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int uuids_open(struct inode *inode, struct file *file)
-{
- return single_open(file, uuids_show, inode->i_private);
-}
-
-static const struct file_operations uuids_fops = {
- .open = uuids_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int auto_accept_delay_set(void *data, u64 val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock(hdev);
-
- hdev->auto_accept_delay = val;
-
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int auto_accept_delay_get(void *data, u64 *val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock(hdev);
-
- *val = hdev->auto_accept_delay;
-
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
- auto_accept_delay_set, "%llu\n");
-
void hci_init_sysfs(struct hci_dev *hdev)
{
struct device *dev = &hdev->dev;
@@ -542,52 +215,8 @@ void hci_init_sysfs(struct hci_dev *hdev)
device_initialize(dev);
}
-int hci_add_sysfs(struct hci_dev *hdev)
-{
- struct device *dev = &hdev->dev;
- int err;
-
- BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
-
- dev_set_name(dev, "%s", hdev->name);
-
- err = device_add(dev);
- if (err < 0)
- return err;
-
- if (!bt_debugfs)
- return 0;
-
- hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
- if (!hdev->debugfs)
- return 0;
-
- debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
- hdev, &inquiry_cache_fops);
-
- debugfs_create_file("blacklist", 0444, hdev->debugfs,
- hdev, &blacklist_fops);
-
- debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
-
- debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev,
- &auto_accept_delay_fops);
- return 0;
-}
-
-void hci_del_sysfs(struct hci_dev *hdev)
-{
- BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
-
- debugfs_remove_recursive(hdev->debugfs);
-
- device_del(&hdev->dev);
-}
-
int __init bt_sysfs_init(void)
{
- bt_debugfs = debugfs_create_dir("bluetooth", NULL);
-
bt_class = class_create(THIS_MODULE, "bluetooth");
return PTR_ERR_OR_ZERO(bt_class);
@@ -596,6 +225,4 @@ int __init bt_sysfs_init(void)
void bt_sysfs_cleanup(void)
{
class_destroy(bt_class);
-
- debugfs_remove_recursive(bt_debugfs);
}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index bdc35a7a7fee..292e619db896 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -767,10 +767,10 @@ static int hidp_setup_hid(struct hidp_session *session,
strncpy(hid->name, req->name, sizeof(req->name) - 1);
snprintf(hid->phys, sizeof(hid->phys), "%pMR",
- &bt_sk(session->ctrl_sock->sk)->src);
+ &l2cap_pi(session->ctrl_sock->sk)->chan->src);
snprintf(hid->uniq, sizeof(hid->uniq), "%pMR",
- &bt_sk(session->ctrl_sock->sk)->dst);
+ &l2cap_pi(session->ctrl_sock->sk)->chan->dst);
hid->dev.parent = &session->conn->hcon->dev;
hid->ll_driver = &hidp_hid_driver;
@@ -1283,23 +1283,29 @@ static int hidp_session_thread(void *arg)
static int hidp_verify_sockets(struct socket *ctrl_sock,
struct socket *intr_sock)
{
+ struct l2cap_chan *ctrl_chan, *intr_chan;
struct bt_sock *ctrl, *intr;
struct hidp_session *session;
if (!l2cap_is_socket(ctrl_sock) || !l2cap_is_socket(intr_sock))
return -EINVAL;
+ ctrl_chan = l2cap_pi(ctrl_sock->sk)->chan;
+ intr_chan = l2cap_pi(intr_sock->sk)->chan;
+
+ if (bacmp(&ctrl_chan->src, &intr_chan->src) ||
+ bacmp(&ctrl_chan->dst, &intr_chan->dst))
+ return -ENOTUNIQ;
+
ctrl = bt_sk(ctrl_sock->sk);
intr = bt_sk(intr_sock->sk);
- if (bacmp(&ctrl->src, &intr->src) || bacmp(&ctrl->dst, &intr->dst))
- return -ENOTUNIQ;
if (ctrl->sk.sk_state != BT_CONNECTED ||
intr->sk.sk_state != BT_CONNECTED)
return -EBADFD;
/* early session check, we check again during session registration */
- session = hidp_session_find(&ctrl->dst);
+ session = hidp_session_find(&ctrl_chan->dst);
if (session) {
hidp_session_put(session);
return -EEXIST;
@@ -1332,7 +1338,7 @@ int hidp_connection_add(struct hidp_connadd_req *req,
if (!conn)
return -EBADFD;
- ret = hidp_session_new(&session, &bt_sk(ctrl_sock->sk)->dst, ctrl_sock,
+ ret = hidp_session_new(&session, &chan->dst, ctrl_sock,
intr_sock, req, conn);
if (ret)
goto out_conn;
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index 9e6cc3553105..ab5241400cf7 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -182,7 +182,7 @@ struct hidp_session {
};
/* HIDP init defines */
-extern int __init hidp_init_sockets(void);
-extern void __exit hidp_cleanup_sockets(void);
+int __init hidp_init_sockets(void);
+void __exit hidp_cleanup_sockets(void);
#endif /* __HIDP_H */
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 63fa11109a1c..0cef67707838 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -36,14 +36,15 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
-#include <net/bluetooth/smp.h>
-#include <net/bluetooth/a2mp.h>
-#include <net/bluetooth/amp.h>
+
+#include "smp.h"
+#include "a2mp.h"
+#include "amp.h"
bool disable_ertm;
-static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
-static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
+static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
+static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
static LIST_HEAD(chan_list);
static DEFINE_RWLOCK(chan_list_lock);
@@ -58,6 +59,18 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
struct sk_buff_head *skbs, u8 event);
+static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
+{
+ if (hcon->type == LE_LINK) {
+ if (type == ADDR_LE_DEV_PUBLIC)
+ return BDADDR_LE_PUBLIC;
+ else
+ return BDADDR_LE_RANDOM;
+ }
+
+ return BDADDR_BREDR;
+}
+
/* ---- L2CAP channels ---- */
static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
@@ -148,7 +161,7 @@ static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
struct l2cap_chan *c;
list_for_each_entry(c, &chan_list, global_l) {
- if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
+ if (c->sport == psm && !bacmp(&c->src, src))
return c;
}
return NULL;
@@ -210,38 +223,25 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
return 0;
}
-static void __l2cap_state_change(struct l2cap_chan *chan, int state)
+static void l2cap_state_change(struct l2cap_chan *chan, int state)
{
BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
state_to_string(state));
chan->state = state;
- chan->ops->state_change(chan, state);
-}
-
-static void l2cap_state_change(struct l2cap_chan *chan, int state)
-{
- struct sock *sk = chan->sk;
-
- lock_sock(sk);
- __l2cap_state_change(chan, state);
- release_sock(sk);
+ chan->ops->state_change(chan, state, 0);
}
-static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
+static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
+ int state, int err)
{
- struct sock *sk = chan->sk;
-
- sk->sk_err = err;
+ chan->state = state;
+ chan->ops->state_change(chan, chan->state, err);
}
static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
{
- struct sock *sk = chan->sk;
-
- lock_sock(sk);
- __l2cap_chan_set_err(chan, err);
- release_sock(sk);
+ chan->ops->state_change(chan, chan->state, err);
}
static void __set_retrans_timer(struct l2cap_chan *chan)
@@ -620,10 +620,8 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
void l2cap_chan_close(struct l2cap_chan *chan, int reason)
{
struct l2cap_conn *conn = chan->conn;
- struct sock *sk = chan->sk;
- BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
- sk);
+ BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
switch (chan->state) {
case BT_LISTEN:
@@ -634,7 +632,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
case BT_CONFIG:
if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
conn->hcon->type == ACL_LINK) {
- __set_chan_timer(chan, sk->sk_sndtimeo);
+ __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
l2cap_send_disconn_req(chan, reason);
} else
l2cap_chan_del(chan, reason);
@@ -646,10 +644,11 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
struct l2cap_conn_rsp rsp;
__u16 result;
- if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
+ if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
result = L2CAP_CR_SEC_BLOCK;
else
result = L2CAP_CR_BAD_PSM;
+
l2cap_state_change(chan, BT_DISCONN);
rsp.scid = cpu_to_le16(chan->dcid);
@@ -676,7 +675,8 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
{
- if (chan->chan_type == L2CAP_CHAN_RAW) {
+ switch (chan->chan_type) {
+ case L2CAP_CHAN_RAW:
switch (chan->sec_level) {
case BT_SECURITY_HIGH:
return HCI_AT_DEDICATED_BONDING_MITM;
@@ -685,15 +685,29 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
default:
return HCI_AT_NO_BONDING;
}
- } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
- if (chan->sec_level == BT_SECURITY_LOW)
- chan->sec_level = BT_SECURITY_SDP;
-
+ break;
+ case L2CAP_CHAN_CONN_LESS:
+ if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
+ if (chan->sec_level == BT_SECURITY_LOW)
+ chan->sec_level = BT_SECURITY_SDP;
+ }
if (chan->sec_level == BT_SECURITY_HIGH)
return HCI_AT_NO_BONDING_MITM;
else
return HCI_AT_NO_BONDING;
- } else {
+ break;
+ case L2CAP_CHAN_CONN_ORIENTED:
+ if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
+ if (chan->sec_level == BT_SECURITY_LOW)
+ chan->sec_level = BT_SECURITY_SDP;
+
+ if (chan->sec_level == BT_SECURITY_HIGH)
+ return HCI_AT_NO_BONDING_MITM;
+ else
+ return HCI_AT_NO_BONDING;
+ }
+ /* fall through */
+ default:
switch (chan->sec_level) {
case BT_SECURITY_HIGH:
return HCI_AT_GENERAL_BONDING_MITM;
@@ -702,6 +716,7 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
default:
return HCI_AT_NO_BONDING;
}
+ break;
}
}
@@ -1015,14 +1030,29 @@ static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
static bool __amp_capable(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
+ struct hci_dev *hdev;
+ bool amp_available = false;
- if (enable_hs &&
- hci_amp_capable() &&
- chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
- conn->fixed_chan_mask & L2CAP_FC_A2MP)
- return true;
- else
+ if (!conn->hs_enabled)
return false;
+
+ if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
+ return false;
+
+ read_lock(&hci_dev_list_lock);
+ list_for_each_entry(hdev, &hci_dev_list, list) {
+ if (hdev->amp_type != AMP_TYPE_BREDR &&
+ test_bit(HCI_UP, &hdev->flags)) {
+ amp_available = true;
+ break;
+ }
+ }
+ read_unlock(&hci_dev_list_lock);
+
+ if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
+ return amp_available;
+
+ return false;
}
static bool l2cap_check_efs(struct l2cap_chan *chan)
@@ -1186,7 +1216,6 @@ static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
{
- struct sock *sk = chan->sk;
struct l2cap_conn *conn = chan->conn;
struct l2cap_disconn_req req;
@@ -1209,10 +1238,7 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
sizeof(req), &req);
- lock_sock(sk);
- __l2cap_state_change(chan, BT_DISCONN);
- __l2cap_chan_set_err(chan, err);
- release_sock(sk);
+ l2cap_state_change_and_error(chan, BT_DISCONN, err);
}
/* ---- L2CAP connections ---- */
@@ -1225,8 +1251,6 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
mutex_lock(&conn->chan_lock);
list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
- struct sock *sk = chan->sk;
-
l2cap_chan_lock(chan);
if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
@@ -1258,19 +1282,16 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
rsp.dcid = cpu_to_le16(chan->scid);
if (l2cap_chan_check_security(chan)) {
- lock_sock(sk);
- if (test_bit(BT_SK_DEFER_SETUP,
- &bt_sk(sk)->flags)) {
+ if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
chan->ops->defer(chan);
} else {
- __l2cap_state_change(chan, BT_CONFIG);
+ l2cap_state_change(chan, BT_CONFIG);
rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
}
- release_sock(sk);
} else {
rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
@@ -1309,8 +1330,6 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
read_lock(&chan_list_lock);
list_for_each_entry(c, &chan_list, global_l) {
- struct sock *sk = c->sk;
-
if (state && c->state != state)
continue;
@@ -1319,16 +1338,16 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
int src_any, dst_any;
/* Exact match. */
- src_match = !bacmp(&bt_sk(sk)->src, src);
- dst_match = !bacmp(&bt_sk(sk)->dst, dst);
+ src_match = !bacmp(&c->src, src);
+ dst_match = !bacmp(&c->dst, dst);
if (src_match && dst_match) {
read_unlock(&chan_list_lock);
return c;
}
/* Closest match */
- src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
- dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
+ src_any = !bacmp(&c->src, BDADDR_ANY);
+ dst_any = !bacmp(&c->dst, BDADDR_ANY);
if ((src_match && dst_any) || (src_any && dst_match) ||
(src_any && dst_any))
c1 = c;
@@ -1342,14 +1361,15 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
static void l2cap_le_conn_ready(struct l2cap_conn *conn)
{
- struct sock *parent;
+ struct hci_conn *hcon = conn->hcon;
struct l2cap_chan *chan, *pchan;
+ u8 dst_type;
BT_DBG("");
/* Check if we have socket listening on cid */
pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
- conn->src, conn->dst);
+ &hcon->src, &hcon->dst);
if (!pchan)
return;
@@ -1357,9 +1377,13 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
return;
- parent = pchan->sk;
+ dst_type = bdaddr_type(hcon, hcon->dst_type);
+
+ /* If device is blocked, do not create a channel for it */
+ if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
+ return;
- lock_sock(parent);
+ l2cap_chan_lock(pchan);
chan = pchan->ops->new_connection(pchan);
if (!chan)
@@ -1367,13 +1391,15 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
chan->dcid = L2CAP_CID_ATT;
- bacpy(&bt_sk(chan->sk)->src, conn->src);
- bacpy(&bt_sk(chan->sk)->dst, conn->dst);
+ bacpy(&chan->src, &hcon->src);
+ bacpy(&chan->dst, &hcon->dst);
+ chan->src_type = bdaddr_type(hcon, hcon->src_type);
+ chan->dst_type = dst_type;
__l2cap_chan_add(conn, chan);
clean:
- release_sock(parent);
+ l2cap_chan_unlock(pchan);
}
static void l2cap_conn_ready(struct l2cap_conn *conn)
@@ -1408,12 +1434,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
l2cap_chan_ready(chan);
} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
- struct sock *sk = chan->sk;
- __clear_chan_timer(chan);
- lock_sock(sk);
- __l2cap_state_change(chan, BT_CONNECTED);
- sk->sk_state_change(sk);
- release_sock(sk);
+ l2cap_chan_ready(chan);
} else if (chan->state == BT_CONNECT) {
l2cap_do_start(chan);
@@ -1633,11 +1654,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
break;
}
- conn->src = &hcon->hdev->bdaddr;
- conn->dst = &hcon->dst;
-
conn->feat_mask = 0;
+ if (hcon->type == ACL_LINK)
+ conn->hs_enabled = test_bit(HCI_HS_ENABLED,
+ &hcon->hdev->dev_flags);
+
spin_lock_init(&conn->lock);
mutex_init(&conn->chan_lock);
@@ -1688,8 +1710,6 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
read_lock(&chan_list_lock);
list_for_each_entry(c, &chan_list, global_l) {
- struct sock *sk = c->sk;
-
if (state && c->state != state)
continue;
@@ -1698,16 +1718,16 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
int src_any, dst_any;
/* Exact match. */
- src_match = !bacmp(&bt_sk(sk)->src, src);
- dst_match = !bacmp(&bt_sk(sk)->dst, dst);
+ src_match = !bacmp(&c->src, src);
+ dst_match = !bacmp(&c->dst, dst);
if (src_match && dst_match) {
read_unlock(&chan_list_lock);
return c;
}
/* Closest match */
- src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
- dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
+ src_any = !bacmp(&c->src, BDADDR_ANY);
+ dst_any = !bacmp(&c->dst, BDADDR_ANY);
if ((src_match && dst_any) || (src_any && dst_match) ||
(src_any && dst_any))
c1 = c;
@@ -1722,18 +1742,16 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
bdaddr_t *dst, u8 dst_type)
{
- struct sock *sk = chan->sk;
- bdaddr_t *src = &bt_sk(sk)->src;
struct l2cap_conn *conn;
struct hci_conn *hcon;
struct hci_dev *hdev;
__u8 auth_type;
int err;
- BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
+ BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
dst_type, __le16_to_cpu(psm));
- hdev = hci_get_route(dst, src);
+ hdev = hci_get_route(dst, &chan->src);
if (!hdev)
return -EHOSTUNREACH;
@@ -1790,9 +1808,8 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
}
/* Set destination address and psm */
- lock_sock(sk);
- bacpy(&bt_sk(sk)->dst, dst);
- release_sock(sk);
+ bacpy(&chan->dst, dst);
+ chan->dst_type = dst_type;
chan->psm = psm;
chan->dcid = cid;
@@ -1825,7 +1842,8 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
}
/* Update source addr of the socket */
- bacpy(src, conn->src);
+ bacpy(&chan->src, &hcon->src);
+ chan->src_type = bdaddr_type(hcon, hcon->src_type);
l2cap_chan_unlock(chan);
l2cap_chan_add(conn, chan);
@@ -1835,7 +1853,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
hci_conn_drop(hcon);
l2cap_state_change(chan, BT_CONNECT);
- __set_chan_timer(chan, sk->sk_sndtimeo);
+ __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
if (hcon->state == BT_CONNECTED) {
if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
@@ -1855,38 +1873,6 @@ done:
return err;
}
-int __l2cap_wait_ack(struct sock *sk)
-{
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
- DECLARE_WAITQUEUE(wait, current);
- int err = 0;
- int timeo = HZ/5;
-
- add_wait_queue(sk_sleep(sk), &wait);
- set_current_state(TASK_INTERRUPTIBLE);
- while (chan->unacked_frames > 0 && chan->conn) {
- if (!timeo)
- timeo = HZ/5;
-
- if (signal_pending(current)) {
- err = sock_intr_errno(timeo);
- break;
- }
-
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock(sk);
- set_current_state(TASK_INTERRUPTIBLE);
-
- err = sock_error(sk);
- if (err)
- break;
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(sk_sleep(sk), &wait);
- return err;
-}
-
static void l2cap_monitor_timeout(struct work_struct *work)
{
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
@@ -2263,7 +2249,8 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
struct l2cap_hdr *lh;
- BT_DBG("chan %p len %zu priority %u", chan, len, priority);
+ BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
+ __le16_to_cpu(chan->psm), len, priority);
count = min_t(unsigned int, (conn->mtu - hlen), len);
@@ -2278,7 +2265,7 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
- put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
+ put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
if (unlikely(err < 0)) {
@@ -2826,17 +2813,16 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
mutex_lock(&conn->chan_lock);
list_for_each_entry(chan, &conn->chan_l, list) {
- struct sock *sk = chan->sk;
if (chan->chan_type != L2CAP_CHAN_RAW)
continue;
- /* Don't send frame to the socket it came from */
- if (skb->sk == sk)
+ /* Don't send frame to the channel it came from */
+ if (bt_cb(skb)->chan == chan)
continue;
+
nskb = skb_clone(skb, GFP_KERNEL);
if (!nskb)
continue;
-
if (chan->ops->recv(chan, nskb))
kfree_skb(nskb);
}
@@ -3043,8 +3029,8 @@ int l2cap_ertm_init(struct l2cap_chan *chan)
skb_queue_head_init(&chan->tx_q);
- chan->local_amp_id = 0;
- chan->move_id = 0;
+ chan->local_amp_id = AMP_ID_BREDR;
+ chan->move_id = AMP_ID_BREDR;
chan->move_state = L2CAP_MOVE_STABLE;
chan->move_role = L2CAP_MOVE_ROLE_NONE;
@@ -3084,20 +3070,20 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
}
}
-static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
+static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
{
- return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
+ return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
}
-static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
+static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
{
- return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
+ return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
}
static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
struct l2cap_conf_rfc *rfc)
{
- if (chan->local_amp_id && chan->hs_hcon) {
+ if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
/* Class 1 devices have must have ERTM timeouts
@@ -3135,7 +3121,7 @@ static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
{
if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
- __l2cap_ews_supported(chan)) {
+ __l2cap_ews_supported(chan->conn)) {
/* use extended control field */
set_bit(FLAG_EXT_CTRL, &chan->flags);
chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
@@ -3165,7 +3151,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
break;
- if (__l2cap_efs_supported(chan))
+ if (__l2cap_efs_supported(chan->conn))
set_bit(FLAG_EFS_ENABLE, &chan->flags);
/* fall through */
@@ -3317,7 +3303,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
break;
case L2CAP_CONF_EWS:
- if (!enable_hs)
+ if (!chan->conn->hs_enabled)
return -ECONNREFUSED;
set_bit(FLAG_EXT_CTRL, &chan->flags);
@@ -3349,7 +3335,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
}
if (remote_efs) {
- if (__l2cap_efs_supported(chan))
+ if (__l2cap_efs_supported(chan->conn))
set_bit(FLAG_EFS_ENABLE, &chan->flags);
else
return -ECONNREFUSED;
@@ -3715,7 +3701,6 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
struct l2cap_conn_rsp rsp;
struct l2cap_chan *chan = NULL, *pchan;
- struct sock *parent, *sk = NULL;
int result, status = L2CAP_CS_NO_INFO;
u16 dcid = 0, scid = __le16_to_cpu(req->scid);
@@ -3724,16 +3709,15 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
/* Check if we have socket listening on psm */
- pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
+ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
+ &conn->hcon->dst);
if (!pchan) {
result = L2CAP_CR_BAD_PSM;
goto sendresp;
}
- parent = pchan->sk;
-
mutex_lock(&conn->chan_lock);
- lock_sock(parent);
+ l2cap_chan_lock(pchan);
/* Check if the ACL is secure enough (if not SDP) */
if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
@@ -3753,8 +3737,6 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
if (!chan)
goto response;
- sk = chan->sk;
-
/* For certain devices (ex: HID mouse), support for authentication,
* pairing and bonding is optional. For such devices, inorder to avoid
* the ACL alive for too long after L2CAP disconnection, reset the ACL
@@ -3762,8 +3744,10 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
*/
conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
- bacpy(&bt_sk(sk)->src, conn->src);
- bacpy(&bt_sk(sk)->dst, conn->dst);
+ bacpy(&chan->src, &conn->hcon->src);
+ bacpy(&chan->dst, &conn->hcon->dst);
+ chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
+ chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
chan->psm = psm;
chan->dcid = scid;
chan->local_amp_id = amp_id;
@@ -3772,14 +3756,14 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
dcid = chan->scid;
- __set_chan_timer(chan, sk->sk_sndtimeo);
+ __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
chan->ident = cmd->ident;
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
if (l2cap_chan_check_security(chan)) {
- if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
- __l2cap_state_change(chan, BT_CONNECT2);
+ if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
+ l2cap_state_change(chan, BT_CONNECT2);
result = L2CAP_CR_PEND;
status = L2CAP_CS_AUTHOR_PEND;
chan->ops->defer(chan);
@@ -3788,28 +3772,28 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
* The connection will succeed after the
* physical link is up.
*/
- if (amp_id) {
- __l2cap_state_change(chan, BT_CONNECT2);
- result = L2CAP_CR_PEND;
- } else {
- __l2cap_state_change(chan, BT_CONFIG);
+ if (amp_id == AMP_ID_BREDR) {
+ l2cap_state_change(chan, BT_CONFIG);
result = L2CAP_CR_SUCCESS;
+ } else {
+ l2cap_state_change(chan, BT_CONNECT2);
+ result = L2CAP_CR_PEND;
}
status = L2CAP_CS_NO_INFO;
}
} else {
- __l2cap_state_change(chan, BT_CONNECT2);
+ l2cap_state_change(chan, BT_CONNECT2);
result = L2CAP_CR_PEND;
status = L2CAP_CS_AUTHEN_PEND;
}
} else {
- __l2cap_state_change(chan, BT_CONNECT2);
+ l2cap_state_change(chan, BT_CONNECT2);
result = L2CAP_CR_PEND;
status = L2CAP_CS_NO_INFO;
}
response:
- release_sock(parent);
+ l2cap_chan_unlock(pchan);
mutex_unlock(&conn->chan_lock);
sendresp:
@@ -3891,13 +3875,13 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
if (scid) {
chan = __l2cap_get_chan_by_scid(conn, scid);
if (!chan) {
- err = -EFAULT;
+ err = -EBADSLT;
goto unlock;
}
} else {
chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
if (!chan) {
- err = -EFAULT;
+ err = -EBADSLT;
goto unlock;
}
}
@@ -3965,6 +3949,18 @@ static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
L2CAP_CONF_SUCCESS, flags), data);
}
+static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
+ u16 scid, u16 dcid)
+{
+ struct l2cap_cmd_rej_cid rej;
+
+ rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
+ rej.scid = __cpu_to_le16(scid);
+ rej.dcid = __cpu_to_le16(dcid);
+
+ l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
+}
+
static inline int l2cap_config_req(struct l2cap_conn *conn,
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
u8 *data)
@@ -3984,18 +3980,14 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
chan = l2cap_get_chan_by_scid(conn, dcid);
- if (!chan)
- return -ENOENT;
+ if (!chan) {
+ cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
+ return 0;
+ }
if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
- struct l2cap_cmd_rej_cid rej;
-
- rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
- rej.scid = cpu_to_le16(chan->scid);
- rej.dcid = cpu_to_le16(chan->dcid);
-
- l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
- sizeof(rej), &rej);
+ cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
+ chan->dcid);
goto unlock;
}
@@ -4198,7 +4190,6 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
struct l2cap_disconn_rsp rsp;
u16 dcid, scid;
struct l2cap_chan *chan;
- struct sock *sk;
if (cmd_len != sizeof(*req))
return -EPROTO;
@@ -4213,20 +4204,17 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
chan = __l2cap_get_chan_by_scid(conn, dcid);
if (!chan) {
mutex_unlock(&conn->chan_lock);
+ cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
return 0;
}
l2cap_chan_lock(chan);
- sk = chan->sk;
-
rsp.dcid = cpu_to_le16(chan->scid);
rsp.scid = cpu_to_le16(chan->dcid);
l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
- lock_sock(sk);
- sk->sk_shutdown = SHUTDOWN_MASK;
- release_sock(sk);
+ chan->ops->set_shutdown(chan);
l2cap_chan_hold(chan);
l2cap_chan_del(chan, ECONNRESET);
@@ -4303,7 +4291,7 @@ static inline int l2cap_information_req(struct l2cap_conn *conn,
if (!disable_ertm)
feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
| L2CAP_FEAT_FCS;
- if (enable_hs)
+ if (conn->hs_enabled)
feat_mask |= L2CAP_FEAT_EXT_FLOW
| L2CAP_FEAT_EXT_WINDOW;
@@ -4314,7 +4302,7 @@ static inline int l2cap_information_req(struct l2cap_conn *conn,
u8 buf[12];
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
- if (enable_hs)
+ if (conn->hs_enabled)
l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
else
l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
@@ -4411,7 +4399,7 @@ static int l2cap_create_channel_req(struct l2cap_conn *conn,
if (cmd_len != sizeof(*req))
return -EPROTO;
- if (!enable_hs)
+ if (!conn->hs_enabled)
return -EINVAL;
psm = le16_to_cpu(req->psm);
@@ -4420,7 +4408,7 @@ static int l2cap_create_channel_req(struct l2cap_conn *conn,
BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
/* For controller id 0 make BR/EDR connection */
- if (req->amp_id == HCI_BREDR_ID) {
+ if (req->amp_id == AMP_ID_BREDR) {
l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
req->amp_id);
return 0;
@@ -4442,10 +4430,13 @@ static int l2cap_create_channel_req(struct l2cap_conn *conn,
struct amp_mgr *mgr = conn->hcon->amp_mgr;
struct hci_conn *hs_hcon;
- hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
+ hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
+ &conn->hcon->dst);
if (!hs_hcon) {
hci_dev_put(hdev);
- return -EFAULT;
+ cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
+ chan->dcid);
+ return 0;
}
BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
@@ -4469,7 +4460,7 @@ error:
l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
sizeof(rsp), &rsp);
- return -EFAULT;
+ return 0;
}
static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
@@ -4655,7 +4646,7 @@ void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
if (chan->state != BT_CONNECTED) {
/* Ignore logical link if channel is on BR/EDR */
- if (chan->local_amp_id)
+ if (chan->local_amp_id != AMP_ID_BREDR)
l2cap_logical_finish_create(chan, hchan);
} else {
l2cap_logical_finish_move(chan, hchan);
@@ -4666,7 +4657,7 @@ void l2cap_move_start(struct l2cap_chan *chan)
{
BT_DBG("chan %p", chan);
- if (chan->local_amp_id == HCI_BREDR_ID) {
+ if (chan->local_amp_id == AMP_ID_BREDR) {
if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
return;
chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
@@ -4723,7 +4714,7 @@ static void l2cap_do_create(struct l2cap_chan *chan, int result,
sizeof(rsp), &rsp);
if (result == L2CAP_CR_SUCCESS) {
- __l2cap_state_change(chan, BT_CONFIG);
+ l2cap_state_change(chan, BT_CONFIG);
set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
L2CAP_CONF_REQ,
@@ -4838,7 +4829,7 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
- if (!enable_hs)
+ if (!conn->hs_enabled)
return -EINVAL;
chan = l2cap_get_chan_by_dcid(conn, icid);
@@ -4865,7 +4856,7 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
goto send_move_response;
}
- if (req->dest_amp_id) {
+ if (req->dest_amp_id != AMP_ID_BREDR) {
struct hci_dev *hdev;
hdev = hci_dev_get(req->dest_amp_id);
if (!hdev || hdev->dev_type != HCI_AMP ||
@@ -4885,7 +4876,7 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
*/
if ((__chan_is_moving(chan) ||
chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
- bacmp(conn->src, conn->dst) > 0) {
+ bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
result = L2CAP_MR_COLLISION;
goto send_move_response;
}
@@ -4895,7 +4886,7 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
chan->move_id = req->dest_amp_id;
icid = chan->dcid;
- if (!req->dest_amp_id) {
+ if (req->dest_amp_id == AMP_ID_BREDR) {
/* Moving to BR/EDR */
if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
@@ -5087,7 +5078,7 @@ static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
if (result == L2CAP_MC_CONFIRMED) {
chan->local_amp_id = chan->move_id;
- if (!chan->local_amp_id)
+ if (chan->local_amp_id == AMP_ID_BREDR)
__release_logical_link(chan);
} else {
chan->move_id = chan->local_amp_id;
@@ -5127,7 +5118,7 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
chan->local_amp_id = chan->move_id;
- if (!chan->local_amp_id && chan->hs_hchan)
+ if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
__release_logical_link(chan);
l2cap_move_done(chan);
@@ -5219,7 +5210,7 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
case L2CAP_CONN_RSP:
case L2CAP_CREATE_CHAN_RSP:
- err = l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
+ l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
break;
case L2CAP_CONF_REQ:
@@ -5227,7 +5218,7 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
break;
case L2CAP_CONF_RSP:
- err = l2cap_config_rsp(conn, cmd, cmd_len, data);
+ l2cap_config_rsp(conn, cmd, cmd_len, data);
break;
case L2CAP_DISCONN_REQ:
@@ -5235,7 +5226,7 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
break;
case L2CAP_DISCONN_RSP:
- err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
+ l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
break;
case L2CAP_ECHO_REQ:
@@ -5250,7 +5241,7 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
break;
case L2CAP_INFO_RSP:
- err = l2cap_information_rsp(conn, cmd, cmd_len, data);
+ l2cap_information_rsp(conn, cmd, cmd_len, data);
break;
case L2CAP_CREATE_CHAN_REQ:
@@ -5262,7 +5253,7 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
break;
case L2CAP_MOVE_CHAN_RSP:
- err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
+ l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
break;
case L2CAP_MOVE_CHAN_CFM:
@@ -5270,7 +5261,7 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
break;
case L2CAP_MOVE_CHAN_CFM_RSP:
- err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
+ l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
break;
default:
@@ -5304,51 +5295,48 @@ static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
struct sk_buff *skb)
{
- u8 *data = skb->data;
- int len = skb->len;
- struct l2cap_cmd_hdr cmd;
+ struct hci_conn *hcon = conn->hcon;
+ struct l2cap_cmd_hdr *cmd;
+ u16 len;
int err;
- l2cap_raw_recv(conn, skb);
+ if (hcon->type != LE_LINK)
+ goto drop;
- while (len >= L2CAP_CMD_HDR_SIZE) {
- u16 cmd_len;
- memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
- data += L2CAP_CMD_HDR_SIZE;
- len -= L2CAP_CMD_HDR_SIZE;
+ if (skb->len < L2CAP_CMD_HDR_SIZE)
+ goto drop;
- cmd_len = le16_to_cpu(cmd.len);
+ cmd = (void *) skb->data;
+ skb_pull(skb, L2CAP_CMD_HDR_SIZE);
- BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
- cmd.ident);
+ len = le16_to_cpu(cmd->len);
- if (cmd_len > len || !cmd.ident) {
- BT_DBG("corrupted command");
- break;
- }
+ BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
- err = l2cap_le_sig_cmd(conn, &cmd, data);
- if (err) {
- struct l2cap_cmd_rej_unk rej;
+ if (len != skb->len || !cmd->ident) {
+ BT_DBG("corrupted command");
+ goto drop;
+ }
- BT_ERR("Wrong link type (%d)", err);
+ err = l2cap_le_sig_cmd(conn, cmd, skb->data);
+ if (err) {
+ struct l2cap_cmd_rej_unk rej;
- /* FIXME: Map err to a valid reason */
- rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
- l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
- sizeof(rej), &rej);
- }
+ BT_ERR("Wrong link type (%d)", err);
- data += cmd_len;
- len -= cmd_len;
+ rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
+ sizeof(rej), &rej);
}
+drop:
kfree_skb(skb);
}
static inline void l2cap_sig_channel(struct l2cap_conn *conn,
struct sk_buff *skb)
{
+ struct hci_conn *hcon = conn->hcon;
u8 *data = skb->data;
int len = skb->len;
struct l2cap_cmd_hdr cmd;
@@ -5356,6 +5344,9 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
l2cap_raw_recv(conn, skb);
+ if (hcon->type != ACL_LINK)
+ goto drop;
+
while (len >= L2CAP_CMD_HDR_SIZE) {
u16 cmd_len;
memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
@@ -5378,7 +5369,6 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
BT_ERR("Wrong link type (%d)", err);
- /* FIXME: Map err to a valid reason */
rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
sizeof(rej), &rej);
@@ -5388,6 +5378,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
len -= cmd_len;
}
+drop:
kfree_skb(skb);
}
@@ -5784,7 +5775,7 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
struct sk_buff *skb, u8 event)
{
int err = 0;
- bool skb_in_use = 0;
+ bool skb_in_use = false;
BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
event);
@@ -5805,7 +5796,7 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
control->txseq);
chan->buffer_seq = chan->expected_tx_seq;
- skb_in_use = 1;
+ skb_in_use = true;
err = l2cap_reassemble_sdu(chan, skb, control);
if (err)
@@ -5841,7 +5832,7 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
* current frame is stored for later use.
*/
skb_queue_tail(&chan->srej_q, skb);
- skb_in_use = 1;
+ skb_in_use = true;
BT_DBG("Queued %p (queue len %d)", skb,
skb_queue_len(&chan->srej_q));
@@ -5919,7 +5910,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
{
int err = 0;
u16 txseq = control->txseq;
- bool skb_in_use = 0;
+ bool skb_in_use = false;
BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
event);
@@ -5931,7 +5922,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
/* Keep frame for reassembly later */
l2cap_pass_to_tx(chan, control);
skb_queue_tail(&chan->srej_q, skb);
- skb_in_use = 1;
+ skb_in_use = true;
BT_DBG("Queued %p (queue len %d)", skb,
skb_queue_len(&chan->srej_q));
@@ -5942,7 +5933,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
l2cap_pass_to_tx(chan, control);
skb_queue_tail(&chan->srej_q, skb);
- skb_in_use = 1;
+ skb_in_use = true;
BT_DBG("Queued %p (queue len %d)", skb,
skb_queue_len(&chan->srej_q));
@@ -5957,7 +5948,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
* the missing frames.
*/
skb_queue_tail(&chan->srej_q, skb);
- skb_in_use = 1;
+ skb_in_use = true;
BT_DBG("Queued %p (queue len %d)", skb,
skb_queue_len(&chan->srej_q));
@@ -5971,7 +5962,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
* SREJ'd frames.
*/
skb_queue_tail(&chan->srej_q, skb);
- skb_in_use = 1;
+ skb_in_use = true;
BT_DBG("Queued %p (queue len %d)", skb,
skb_queue_len(&chan->srej_q));
@@ -6380,9 +6371,13 @@ done:
static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
struct sk_buff *skb)
{
+ struct hci_conn *hcon = conn->hcon;
struct l2cap_chan *chan;
- chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
+ if (hcon->type != ACL_LINK)
+ goto drop;
+
+ chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst);
if (!chan)
goto drop;
@@ -6394,6 +6389,10 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
if (chan->imtu < skb->len)
goto drop;
+ /* Store remote BD_ADDR and PSM for msg_name */
+ bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
+ bt_cb(skb)->psm = psm;
+
if (!chan->ops->recv(chan, skb))
return;
@@ -6404,15 +6403,22 @@ drop:
static void l2cap_att_channel(struct l2cap_conn *conn,
struct sk_buff *skb)
{
+ struct hci_conn *hcon = conn->hcon;
struct l2cap_chan *chan;
+ if (hcon->type != LE_LINK)
+ goto drop;
+
chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
- conn->src, conn->dst);
+ &hcon->src, &hcon->dst);
if (!chan)
goto drop;
BT_DBG("chan %p, len %d", chan, skb->len);
+ if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
+ goto drop;
+
if (chan->imtu < skb->len)
goto drop;
@@ -6441,9 +6447,6 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
BT_DBG("len %d, cid 0x%4.4x", len, cid);
switch (cid) {
- case L2CAP_CID_LE_SIGNALING:
- l2cap_le_sig_channel(conn, skb);
- break;
case L2CAP_CID_SIGNALING:
l2cap_sig_channel(conn, skb);
break;
@@ -6458,6 +6461,10 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
l2cap_att_channel(conn, skb);
break;
+ case L2CAP_CID_LE_SIGNALING:
+ l2cap_le_sig_channel(conn, skb);
+ break;
+
case L2CAP_CID_SMP:
if (smp_sig_channel(conn, skb))
l2cap_conn_del(conn->hcon, EACCES);
@@ -6481,17 +6488,15 @@ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
/* Find listening sockets and check their link_mode */
read_lock(&chan_list_lock);
list_for_each_entry(c, &chan_list, global_l) {
- struct sock *sk = c->sk;
-
if (c->state != BT_LISTEN)
continue;
- if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
+ if (!bacmp(&c->src, &hdev->bdaddr)) {
lm1 |= HCI_LM_ACCEPT;
if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
lm1 |= HCI_LM_MASTER;
exact++;
- } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
+ } else if (!bacmp(&c->src, BDADDR_ANY)) {
lm2 |= HCI_LM_ACCEPT;
if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
lm2 |= HCI_LM_MASTER;
@@ -6597,11 +6602,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
if (!status && (chan->state == BT_CONNECTED ||
chan->state == BT_CONFIG)) {
- struct sock *sk = chan->sk;
-
- clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
- sk->sk_state_change(sk);
-
+ chan->ops->resume(chan);
l2cap_check_encryption(chan, encrypt);
l2cap_chan_unlock(chan);
continue;
@@ -6614,32 +6615,26 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
}
} else if (chan->state == BT_CONNECT2) {
- struct sock *sk = chan->sk;
struct l2cap_conn_rsp rsp;
__u16 res, stat;
- lock_sock(sk);
-
if (!status) {
- if (test_bit(BT_SK_DEFER_SETUP,
- &bt_sk(sk)->flags)) {
+ if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
res = L2CAP_CR_PEND;
stat = L2CAP_CS_AUTHOR_PEND;
chan->ops->defer(chan);
} else {
- __l2cap_state_change(chan, BT_CONFIG);
+ l2cap_state_change(chan, BT_CONFIG);
res = L2CAP_CR_SUCCESS;
stat = L2CAP_CS_NO_INFO;
}
} else {
- __l2cap_state_change(chan, BT_DISCONN);
+ l2cap_state_change(chan, BT_DISCONN);
__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
res = L2CAP_CR_SEC_BLOCK;
stat = L2CAP_CS_NO_INFO;
}
- release_sock(sk);
-
rsp.scid = cpu_to_le16(chan->dcid);
rsp.dcid = cpu_to_le16(chan->scid);
rsp.result = cpu_to_le16(res);
@@ -6756,9 +6751,13 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
conn->rx_len -= skb->len;
if (!conn->rx_len) {
- /* Complete frame received */
- l2cap_recv_frame(conn, conn->rx_skb);
+ /* Complete frame received. l2cap_recv_frame
+ * takes ownership of the skb so set the global
+ * rx_skb pointer to NULL first.
+ */
+ struct sk_buff *rx_skb = conn->rx_skb;
conn->rx_skb = NULL;
+ l2cap_recv_frame(conn, rx_skb);
}
break;
}
@@ -6775,10 +6774,8 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
read_lock(&chan_list_lock);
list_for_each_entry(c, &chan_list, global_l) {
- struct sock *sk = c->sk;
-
seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
- &bt_sk(sk)->src, &bt_sk(sk)->dst,
+ &c->src, &c->dst,
c->state, __le16_to_cpu(c->psm),
c->scid, c->dcid, c->imtu, c->omtu,
c->sec_level, c->mode);
@@ -6811,12 +6808,11 @@ int __init l2cap_init(void)
if (err < 0)
return err;
- if (bt_debugfs) {
- l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
- NULL, &l2cap_debugfs_fops);
- if (!l2cap_debugfs)
- BT_ERR("Failed to create L2CAP debug file");
- }
+ if (IS_ERR_OR_NULL(bt_debugfs))
+ return 0;
+
+ l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
+ NULL, &l2cap_debugfs_fops);
return 0;
}
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 0098af80b213..7cc24d263caa 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -32,7 +32,8 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
-#include <net/bluetooth/smp.h>
+
+#include "smp.h"
static struct bt_sock_list l2cap_sk_list = {
.lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
@@ -68,6 +69,18 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
if (la.l2_cid && la.l2_psm)
return -EINVAL;
+ if (!bdaddr_type_is_valid(la.l2_bdaddr_type))
+ return -EINVAL;
+
+ if (bdaddr_type_is_le(la.l2_bdaddr_type)) {
+ /* Connection oriented channels are not supported on LE */
+ if (la.l2_psm)
+ return -EINVAL;
+ /* We only allow ATT user space socket */
+ if (la.l2_cid != __constant_cpu_to_le16(L2CAP_CID_ATT))
+ return -EINVAL;
+ }
+
lock_sock(sk);
if (sk->sk_state != BT_OPEN) {
@@ -99,11 +112,20 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
if (err < 0)
goto done;
- if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP ||
- __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM)
- chan->sec_level = BT_SECURITY_SDP;
+ switch (chan->chan_type) {
+ case L2CAP_CHAN_CONN_LESS:
+ if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_3DSP)
+ chan->sec_level = BT_SECURITY_SDP;
+ break;
+ case L2CAP_CHAN_CONN_ORIENTED:
+ if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP ||
+ __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM)
+ chan->sec_level = BT_SECURITY_SDP;
+ break;
+ }
- bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
+ bacpy(&chan->src, &la.l2_bdaddr);
+ chan->src_type = la.l2_bdaddr_type;
chan->state = BT_BOUND;
sk->sk_state = BT_BOUND;
@@ -134,6 +156,47 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
if (la.l2_cid && la.l2_psm)
return -EINVAL;
+ if (!bdaddr_type_is_valid(la.l2_bdaddr_type))
+ return -EINVAL;
+
+ /* Check that the socket wasn't bound to something that
+ * conflicts with the address given to connect(). If chan->src
+ * is BDADDR_ANY it means bind() was never used, in which case
+ * chan->src_type and la.l2_bdaddr_type do not need to match.
+ */
+ if (chan->src_type == BDADDR_BREDR && bacmp(&chan->src, BDADDR_ANY) &&
+ bdaddr_type_is_le(la.l2_bdaddr_type)) {
+ /* Old user space versions will try to incorrectly bind
+ * the ATT socket using BDADDR_BREDR. We need to accept
+ * this and fix up the source address type only when
+ * both the source CID and destination CID indicate
+ * ATT. Anything else is an invalid combination.
+ */
+ if (chan->scid != L2CAP_CID_ATT ||
+ la.l2_cid != __constant_cpu_to_le16(L2CAP_CID_ATT))
+ return -EINVAL;
+
+ /* We don't have the hdev available here to make a
+ * better decision on random vs public, but since all
+ * user space versions that exhibit this issue anyway do
+ * not support random local addresses assuming public
+ * here is good enough.
+ */
+ chan->src_type = BDADDR_LE_PUBLIC;
+ }
+
+ if (chan->src_type != BDADDR_BREDR && la.l2_bdaddr_type == BDADDR_BREDR)
+ return -EINVAL;
+
+ if (bdaddr_type_is_le(la.l2_bdaddr_type)) {
+ /* Connection oriented channels are not supported on LE */
+ if (la.l2_psm)
+ return -EINVAL;
+ /* We only allow ATT user space socket */
+ if (la.l2_cid != __constant_cpu_to_le16(L2CAP_CID_ATT))
+ return -EINVAL;
+ }
+
err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
&la.l2_bdaddr, la.l2_bdaddr_type);
if (err)
@@ -265,12 +328,14 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr,
if (peer) {
la->l2_psm = chan->psm;
- bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
+ bacpy(&la->l2_bdaddr, &chan->dst);
la->l2_cid = cpu_to_le16(chan->dcid);
+ la->l2_bdaddr_type = chan->dst_type;
} else {
la->l2_psm = chan->sport;
- bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
+ bacpy(&la->l2_bdaddr, &chan->src);
la->l2_cid = cpu_to_le16(chan->scid);
+ la->l2_bdaddr_type = chan->src_type;
}
return 0;
@@ -445,11 +510,6 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname,
break;
case BT_CHANNEL_POLICY:
- if (!enable_hs) {
- err = -ENOPROTOOPT;
- break;
- }
-
if (put_user(chan->chan_policy, (u32 __user *) optval))
err = -EFAULT;
break;
@@ -665,10 +725,13 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (opt)
+ if (opt) {
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
- else
+ set_bit(FLAG_DEFER_SETUP, &chan->flags);
+ } else {
clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+ clear_bit(FLAG_DEFER_SETUP, &chan->flags);
+ }
break;
case BT_FLUSHABLE:
@@ -683,7 +746,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
}
if (opt == BT_FLUSHABLE_OFF) {
- struct l2cap_conn *conn = chan->conn;
+ conn = chan->conn;
/* proceed further only when we have l2cap_conn and
No Flush support in the LM */
if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
@@ -720,11 +783,6 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
case BT_CHANNEL_POLICY:
- if (!enable_hs) {
- err = -ENOPROTOOPT;
- break;
- }
-
if (get_user(opt, (u32 __user *) optval)) {
err = -EFAULT;
break;
@@ -777,6 +835,12 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
if (sk->sk_state != BT_CONNECTED)
return -ENOTCONN;
+ lock_sock(sk);
+ err = bt_sock_wait_ready(sk, msg->msg_flags);
+ release_sock(sk);
+ if (err)
+ return err;
+
l2cap_chan_lock(chan);
err = l2cap_chan_send(chan, msg, len, sk->sk_priority);
l2cap_chan_unlock(chan);
@@ -799,8 +863,8 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
pi->chan->state = BT_CONFIG;
__l2cap_connect_rsp_defer(pi->chan);
- release_sock(sk);
- return 0;
+ err = 0;
+ goto done;
}
release_sock(sk);
@@ -856,6 +920,38 @@ static void l2cap_sock_kill(struct sock *sk)
sock_put(sk);
}
+static int __l2cap_wait_ack(struct sock *sk)
+{
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ DECLARE_WAITQUEUE(wait, current);
+ int err = 0;
+ int timeo = HZ/5;
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (chan->unacked_frames > 0 && chan->conn) {
+ if (!timeo)
+ timeo = HZ/5;
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
+ break;
+ }
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ err = sock_error(sk);
+ if (err)
+ break;
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+ return err;
+}
+
static int l2cap_sock_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
@@ -946,6 +1042,8 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
{
struct sock *sk, *parent = chan->data;
+ lock_sock(parent);
+
/* Check for backlog size */
if (sk_acceptq_is_full(parent)) {
BT_DBG("backlog full %d", parent->sk_ack_backlog);
@@ -963,18 +1061,19 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
bt_accept_enqueue(parent, sk);
+ release_sock(parent);
+
return l2cap_pi(sk)->chan;
}
static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
{
- int err;
struct sock *sk = chan->data;
- struct l2cap_pinfo *pi = l2cap_pi(sk);
+ int err;
lock_sock(sk);
- if (pi->rx_busy_skb) {
+ if (l2cap_pi(sk)->rx_busy_skb) {
err = -ENOMEM;
goto done;
}
@@ -990,9 +1089,9 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
* acked and reassembled until there is buffer space
* available.
*/
- if (err < 0 && pi->chan->mode == L2CAP_MODE_ERTM) {
- pi->rx_busy_skb = skb;
- l2cap_chan_busy(pi->chan, 1);
+ if (err < 0 && chan->mode == L2CAP_MODE_ERTM) {
+ l2cap_pi(sk)->rx_busy_skb = skb;
+ l2cap_chan_busy(chan, 1);
err = 0;
}
@@ -1050,26 +1149,33 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
release_sock(sk);
}
-static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state)
+static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state,
+ int err)
{
struct sock *sk = chan->data;
sk->sk_state = state;
+
+ if (err)
+ sk->sk_err = err;
}
static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
unsigned long len, int nb)
{
+ struct sock *sk = chan->data;
struct sk_buff *skb;
int err;
l2cap_chan_unlock(chan);
- skb = bt_skb_send_alloc(chan->sk, len, nb, &err);
+ skb = bt_skb_send_alloc(sk, len, nb, &err);
l2cap_chan_lock(chan);
if (!skb)
return ERR_PTR(err);
+ bt_cb(skb)->chan = chan;
+
return skb;
}
@@ -1095,11 +1201,39 @@ static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
static void l2cap_sock_defer_cb(struct l2cap_chan *chan)
{
- struct sock *sk = chan->data;
- struct sock *parent = bt_sk(sk)->parent;
+ struct sock *parent, *sk = chan->data;
+
+ lock_sock(sk);
+ parent = bt_sk(sk)->parent;
if (parent)
parent->sk_data_ready(parent, 0);
+
+ release_sock(sk);
+}
+
+static void l2cap_sock_resume_cb(struct l2cap_chan *chan)
+{
+ struct sock *sk = chan->data;
+
+ clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
+ sk->sk_state_change(sk);
+}
+
+static void l2cap_sock_set_shutdown_cb(struct l2cap_chan *chan)
+{
+ struct sock *sk = chan->data;
+
+ lock_sock(sk);
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ release_sock(sk);
+}
+
+static long l2cap_sock_get_sndtimeo_cb(struct l2cap_chan *chan)
+{
+ struct sock *sk = chan->data;
+
+ return sk->sk_sndtimeo;
}
static struct l2cap_ops l2cap_chan_ops = {
@@ -1111,6 +1245,9 @@ static struct l2cap_ops l2cap_chan_ops = {
.state_change = l2cap_sock_state_change_cb,
.ready = l2cap_sock_ready_cb,
.defer = l2cap_sock_defer_cb,
+ .resume = l2cap_sock_resume_cb,
+ .set_shutdown = l2cap_sock_set_shutdown_cb,
+ .get_sndtimeo = l2cap_sock_get_sndtimeo_cb,
.alloc_skb = l2cap_sock_alloc_skb_cb,
};
@@ -1120,6 +1257,7 @@ static void l2cap_sock_destruct(struct sock *sk)
if (l2cap_pi(sk)->chan)
l2cap_chan_put(l2cap_pi(sk)->chan);
+
if (l2cap_pi(sk)->rx_busy_skb) {
kfree_skb(l2cap_pi(sk)->rx_busy_skb);
l2cap_pi(sk)->rx_busy_skb = NULL;
@@ -1129,10 +1267,22 @@ static void l2cap_sock_destruct(struct sock *sk)
skb_queue_purge(&sk->sk_write_queue);
}
+static void l2cap_skb_msg_name(struct sk_buff *skb, void *msg_name,
+ int *msg_namelen)
+{
+ struct sockaddr_l2 *la = (struct sockaddr_l2 *) msg_name;
+
+ memset(la, 0, sizeof(struct sockaddr_l2));
+ la->l2_family = AF_BLUETOOTH;
+ la->l2_psm = bt_cb(skb)->psm;
+ bacpy(&la->l2_bdaddr, &bt_cb(skb)->bdaddr);
+
+ *msg_namelen = sizeof(struct sockaddr_l2);
+}
+
static void l2cap_sock_init(struct sock *sk, struct sock *parent)
{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct l2cap_chan *chan = pi->chan;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
BT_DBG("sk %p", sk);
@@ -1156,13 +1306,13 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
security_sk_clone(parent, sk);
} else {
-
switch (sk->sk_type) {
case SOCK_RAW:
chan->chan_type = L2CAP_CHAN_RAW;
break;
case SOCK_DGRAM:
chan->chan_type = L2CAP_CHAN_CONN_LESS;
+ bt_sk(sk)->skb_msg_name = l2cap_skb_msg_name;
break;
case SOCK_SEQPACKET:
case SOCK_STREAM:
@@ -1224,8 +1374,6 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
l2cap_chan_hold(chan);
- chan->sk = sk;
-
l2cap_pi(sk)->chan = chan;
return sk;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index fedc5399d465..a03ca3ca91bf 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -30,12 +30,11 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
-#include <net/bluetooth/smp.h>
-bool enable_hs;
+#include "smp.h"
#define MGMT_VERSION 1
-#define MGMT_REVISION 3
+#define MGMT_REVISION 4
static const u16 mgmt_commands[] = {
MGMT_OP_READ_INDEX_LIST,
@@ -76,6 +75,10 @@ static const u16 mgmt_commands[] = {
MGMT_OP_BLOCK_DEVICE,
MGMT_OP_UNBLOCK_DEVICE,
MGMT_OP_SET_DEVICE_ID,
+ MGMT_OP_SET_ADVERTISING,
+ MGMT_OP_SET_BREDR,
+ MGMT_OP_SET_STATIC_ADDRESS,
+ MGMT_OP_SET_SCAN_PARAMS,
};
static const u16 mgmt_events[] = {
@@ -181,11 +184,6 @@ static u8 mgmt_status_table[] = {
MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
};
-bool mgmt_valid_hdev(struct hci_dev *hdev)
-{
- return hdev->dev_type == HCI_BREDR;
-}
-
static u8 mgmt_status(u8 hci_status)
{
if (hci_status < ARRAY_SIZE(mgmt_status_table))
@@ -321,10 +319,8 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
count = 0;
list_for_each_entry(d, &hci_dev_list, list) {
- if (!mgmt_valid_hdev(d))
- continue;
-
- count++;
+ if (d->dev_type == HCI_BREDR)
+ count++;
}
rp_len = sizeof(*rp) + (2 * count);
@@ -339,11 +335,13 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
if (test_bit(HCI_SETUP, &d->dev_flags))
continue;
- if (!mgmt_valid_hdev(d))
+ if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
continue;
- rp->index[count++] = cpu_to_le16(d->id);
- BT_DBG("Added hci%u", d->id);
+ if (d->dev_type == HCI_BREDR) {
+ rp->index[count++] = cpu_to_le16(d->id);
+ BT_DBG("Added hci%u", d->id);
+ }
}
rp->num_controllers = cpu_to_le16(count);
@@ -366,9 +364,6 @@ static u32 get_supported_settings(struct hci_dev *hdev)
settings |= MGMT_SETTING_POWERED;
settings |= MGMT_SETTING_PAIRABLE;
- if (lmp_ssp_capable(hdev))
- settings |= MGMT_SETTING_SSP;
-
if (lmp_bredr_capable(hdev)) {
settings |= MGMT_SETTING_CONNECTABLE;
if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
@@ -376,13 +371,17 @@ static u32 get_supported_settings(struct hci_dev *hdev)
settings |= MGMT_SETTING_DISCOVERABLE;
settings |= MGMT_SETTING_BREDR;
settings |= MGMT_SETTING_LINK_SECURITY;
- }
- if (enable_hs)
- settings |= MGMT_SETTING_HS;
+ if (lmp_ssp_capable(hdev)) {
+ settings |= MGMT_SETTING_SSP;
+ settings |= MGMT_SETTING_HS;
+ }
+ }
- if (lmp_le_capable(hdev))
+ if (lmp_le_capable(hdev)) {
settings |= MGMT_SETTING_LE;
+ settings |= MGMT_SETTING_ADVERTISING;
+ }
return settings;
}
@@ -406,7 +405,7 @@ static u32 get_current_settings(struct hci_dev *hdev)
if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
settings |= MGMT_SETTING_PAIRABLE;
- if (lmp_bredr_capable(hdev))
+ if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
settings |= MGMT_SETTING_BREDR;
if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
@@ -421,6 +420,9 @@ static u32 get_current_settings(struct hci_dev *hdev)
if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
settings |= MGMT_SETTING_HS;
+ if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+ settings |= MGMT_SETTING_ADVERTISING;
+
return settings;
}
@@ -534,6 +536,156 @@ static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
return ptr;
}
+static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
+{
+ struct pending_cmd *cmd;
+
+ list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
+ if (cmd->opcode == opcode)
+ return cmd;
+ }
+
+ return NULL;
+}
+
+static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
+{
+ u8 ad_len = 0;
+ size_t name_len;
+
+ name_len = strlen(hdev->dev_name);
+ if (name_len > 0) {
+ size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
+
+ if (name_len > max_len) {
+ name_len = max_len;
+ ptr[1] = EIR_NAME_SHORT;
+ } else
+ ptr[1] = EIR_NAME_COMPLETE;
+
+ ptr[0] = name_len + 1;
+
+ memcpy(ptr + 2, hdev->dev_name, name_len);
+
+ ad_len += (name_len + 2);
+ ptr += (name_len + 2);
+ }
+
+ return ad_len;
+}
+
+static void update_scan_rsp_data(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_le_set_scan_rsp_data cp;
+ u8 len;
+
+ if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ return;
+
+ memset(&cp, 0, sizeof(cp));
+
+ len = create_scan_rsp_data(hdev, cp.data);
+
+ if (hdev->scan_rsp_data_len == len &&
+ memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
+ return;
+
+ memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
+ hdev->scan_rsp_data_len = len;
+
+ cp.length = len;
+
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
+}
+
+static u8 get_adv_discov_flags(struct hci_dev *hdev)
+{
+ struct pending_cmd *cmd;
+
+ /* If there's a pending mgmt command the flags will not yet have
+ * their final values, so check for this first.
+ */
+ cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
+ if (cmd) {
+ struct mgmt_mode *cp = cmd->param;
+ if (cp->val == 0x01)
+ return LE_AD_GENERAL;
+ else if (cp->val == 0x02)
+ return LE_AD_LIMITED;
+ } else {
+ if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
+ return LE_AD_LIMITED;
+ else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+ return LE_AD_GENERAL;
+ }
+
+ return 0;
+}
+
+static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
+{
+ u8 ad_len = 0, flags = 0;
+
+ flags |= get_adv_discov_flags(hdev);
+
+ if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+ if (lmp_le_br_capable(hdev))
+ flags |= LE_AD_SIM_LE_BREDR_CTRL;
+ if (lmp_host_le_br_capable(hdev))
+ flags |= LE_AD_SIM_LE_BREDR_HOST;
+ } else {
+ flags |= LE_AD_NO_BREDR;
+ }
+
+ if (flags) {
+ BT_DBG("adv flags 0x%02x", flags);
+
+ ptr[0] = 2;
+ ptr[1] = EIR_FLAGS;
+ ptr[2] = flags;
+
+ ad_len += 3;
+ ptr += 3;
+ }
+
+ if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
+ ptr[0] = 2;
+ ptr[1] = EIR_TX_POWER;
+ ptr[2] = (u8) hdev->adv_tx_power;
+
+ ad_len += 3;
+ ptr += 3;
+ }
+
+ return ad_len;
+}
+
+static void update_adv_data(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_le_set_adv_data cp;
+ u8 len;
+
+ if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ return;
+
+ memset(&cp, 0, sizeof(cp));
+
+ len = create_adv_data(hdev, cp.data);
+
+ if (hdev->adv_data_len == len &&
+ memcmp(cp.data, hdev->adv_data, len) == 0)
+ return;
+
+ memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
+ hdev->adv_data_len = len;
+
+ cp.length = len;
+
+ hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
+}
+
static void create_eir(struct hci_dev *hdev, u8 *data)
{
u8 *ptr = data;
@@ -632,6 +784,9 @@ static void update_class(struct hci_request *req)
if (!hdev_is_powered(hdev))
return;
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ return;
+
if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
return;
@@ -639,6 +794,9 @@ static void update_class(struct hci_request *req)
cod[1] = hdev->major_class;
cod[2] = get_service_classes(hdev);
+ if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
+ cod[1] |= 0x20;
+
if (memcmp(cod, hdev->dev_class, 3) == 0)
return;
@@ -763,18 +921,6 @@ static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
}
}
-static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
-{
- struct pending_cmd *cmd;
-
- list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
- if (cmd->opcode == opcode)
- return cmd;
- }
-
- return NULL;
-}
-
static void mgmt_pending_remove(struct pending_cmd *cmd)
{
list_del(&cmd->list);
@@ -804,6 +950,12 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
+ if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
cancel_delayed_work(&hdev->power_off);
@@ -820,12 +972,6 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
- MGMT_STATUS_BUSY);
- goto failed;
- }
-
cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
@@ -883,27 +1029,141 @@ static int new_settings(struct hci_dev *hdev, struct sock *skip)
return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
}
+struct cmd_lookup {
+ struct sock *sk;
+ struct hci_dev *hdev;
+ u8 mgmt_status;
+};
+
+static void settings_rsp(struct pending_cmd *cmd, void *data)
+{
+ struct cmd_lookup *match = data;
+
+ send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
+
+ list_del(&cmd->list);
+
+ if (match->sk == NULL) {
+ match->sk = cmd->sk;
+ sock_hold(match->sk);
+ }
+
+ mgmt_pending_free(cmd);
+}
+
+static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
+{
+ u8 *status = data;
+
+ cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
+ mgmt_pending_remove(cmd);
+}
+
+static u8 mgmt_bredr_support(struct hci_dev *hdev)
+{
+ if (!lmp_bredr_capable(hdev))
+ return MGMT_STATUS_NOT_SUPPORTED;
+ else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ return MGMT_STATUS_REJECTED;
+ else
+ return MGMT_STATUS_SUCCESS;
+}
+
+static u8 mgmt_le_support(struct hci_dev *hdev)
+{
+ if (!lmp_le_capable(hdev))
+ return MGMT_STATUS_NOT_SUPPORTED;
+ else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ return MGMT_STATUS_REJECTED;
+ else
+ return MGMT_STATUS_SUCCESS;
+}
+
+static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
+{
+ struct pending_cmd *cmd;
+ struct mgmt_mode *cp;
+ struct hci_request req;
+ bool changed;
+
+ BT_DBG("status 0x%02x", status);
+
+ hci_dev_lock(hdev);
+
+ cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
+ if (!cmd)
+ goto unlock;
+
+ if (status) {
+ u8 mgmt_err = mgmt_status(status);
+ cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
+ clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+ goto remove_cmd;
+ }
+
+ cp = cmd->param;
+ if (cp->val) {
+ changed = !test_and_set_bit(HCI_DISCOVERABLE,
+ &hdev->dev_flags);
+
+ if (hdev->discov_timeout > 0) {
+ int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
+ queue_delayed_work(hdev->workqueue, &hdev->discov_off,
+ to);
+ }
+ } else {
+ changed = test_and_clear_bit(HCI_DISCOVERABLE,
+ &hdev->dev_flags);
+ }
+
+ send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
+
+ if (changed)
+ new_settings(hdev, cmd->sk);
+
+ /* When the discoverable mode gets changed, make sure
+ * that class of device has the limited discoverable
+ * bit correctly set.
+ */
+ hci_req_init(&req, hdev);
+ update_class(&req);
+ hci_req_run(&req, NULL);
+
+remove_cmd:
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
struct mgmt_cp_set_discoverable *cp = data;
struct pending_cmd *cmd;
+ struct hci_request req;
u16 timeout;
u8 scan;
int err;
BT_DBG("request for %s", hdev->name);
- if (!lmp_bredr_capable(hdev))
+ if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
+ !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
- MGMT_STATUS_NOT_SUPPORTED);
+ MGMT_STATUS_REJECTED);
- if (cp->val != 0x00 && cp->val != 0x01)
+ if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
MGMT_STATUS_INVALID_PARAMS);
timeout = __le16_to_cpu(cp->timeout);
- if (!cp->val && timeout > 0)
+
+ /* Disabling discoverable requires that no timeout is set,
+ * and enabling limited discoverable requires a timeout.
+ */
+ if ((cp->val == 0x00 && timeout > 0) ||
+ (cp->val == 0x02 && timeout == 0))
return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
MGMT_STATUS_INVALID_PARAMS);
@@ -931,6 +1191,10 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
if (!hdev_is_powered(hdev)) {
bool changed = false;
+ /* Setting limited discoverable when powered off is
+ * not a valid operation since it requires a timeout
+ * and so no need to check HCI_LIMITED_DISCOVERABLE.
+ */
if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
changed = true;
@@ -946,16 +1210,20 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
- if (hdev->discov_timeout > 0) {
- cancel_delayed_work(&hdev->discov_off);
- hdev->discov_timeout = 0;
- }
+ /* If the current mode is the same, then just update the timeout
+ * value with the new value. And if only the timeout gets updated,
+ * then no need for any HCI transactions.
+ */
+ if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
+ (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
+ &hdev->dev_flags)) {
+ cancel_delayed_work(&hdev->discov_off);
+ hdev->discov_timeout = timeout;
- if (cp->val && timeout > 0) {
- hdev->discov_timeout = timeout;
+ if (cp->val && hdev->discov_timeout > 0) {
+ int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
queue_delayed_work(hdev->workqueue, &hdev->discov_off,
- msecs_to_jiffies(hdev->discov_timeout * 1000));
+ to);
}
err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
@@ -968,20 +1236,66 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
+ /* Cancel any potential discoverable timeout that might be
+ * still active and store new timeout value. The arming of
+ * the timeout happens in the complete handler.
+ */
+ cancel_delayed_work(&hdev->discov_off);
+ hdev->discov_timeout = timeout;
+
+ /* Limited discoverable mode */
+ if (cp->val == 0x02)
+ set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+ else
+ clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+
+ hci_req_init(&req, hdev);
+
+ /* The procedure for LE-only controllers is much simpler - just
+ * update the advertising data.
+ */
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ goto update_ad;
+
scan = SCAN_PAGE;
- if (cp->val)
+ if (cp->val) {
+ struct hci_cp_write_current_iac_lap hci_cp;
+
+ if (cp->val == 0x02) {
+ /* Limited discoverable mode */
+ hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
+ hci_cp.iac_lap[0] = 0x00; /* LIAC */
+ hci_cp.iac_lap[1] = 0x8b;
+ hci_cp.iac_lap[2] = 0x9e;
+ hci_cp.iac_lap[3] = 0x33; /* GIAC */
+ hci_cp.iac_lap[4] = 0x8b;
+ hci_cp.iac_lap[5] = 0x9e;
+ } else {
+ /* General discoverable mode */
+ hci_cp.num_iac = 1;
+ hci_cp.iac_lap[0] = 0x33; /* GIAC */
+ hci_cp.iac_lap[1] = 0x8b;
+ hci_cp.iac_lap[2] = 0x9e;
+ }
+
+ hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
+ (hci_cp.num_iac * 3) + 1, &hci_cp);
+
scan |= SCAN_INQUIRY;
- else
- cancel_delayed_work(&hdev->discov_off);
+ } else {
+ clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+ }
+
+ hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
+
+update_ad:
+ update_adv_data(&req);
- err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ err = hci_req_run(&req, set_discoverable_complete);
if (err < 0)
mgmt_pending_remove(cmd);
- if (cp->val)
- hdev->discov_timeout = timeout;
-
failed:
hci_dev_unlock(hdev);
return err;
@@ -993,6 +1307,9 @@ static void write_fast_connectable(struct hci_request *req, bool enable)
struct hci_cp_write_page_scan_activity acp;
u8 type;
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ return;
+
if (hdev->hci_ver < BLUETOOTH_VER_1_2)
return;
@@ -1019,9 +1336,55 @@ static void write_fast_connectable(struct hci_request *req, bool enable)
hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
}
+static u8 get_adv_type(struct hci_dev *hdev)
+{
+ struct pending_cmd *cmd;
+ bool connectable;
+
+ /* If there's a pending mgmt command the flag will not yet have
+ * it's final value, so check for this first.
+ */
+ cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
+ if (cmd) {
+ struct mgmt_mode *cp = cmd->param;
+ connectable = !!cp->val;
+ } else {
+ connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+ }
+
+ return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
+}
+
+static void enable_advertising(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_le_set_adv_param cp;
+ u8 enable = 0x01;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.min_interval = __constant_cpu_to_le16(0x0800);
+ cp.max_interval = __constant_cpu_to_le16(0x0800);
+ cp.type = get_adv_type(hdev);
+ cp.own_address_type = hdev->own_addr_type;
+ cp.channel_map = 0x07;
+
+ hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
+
+ hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
+}
+
+static void disable_advertising(struct hci_request *req)
+{
+ u8 enable = 0x00;
+
+ hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
+}
+
static void set_connectable_complete(struct hci_dev *hdev, u8 status)
{
struct pending_cmd *cmd;
+ struct mgmt_mode *cp;
+ bool changed;
BT_DBG("status 0x%02x", status);
@@ -1031,14 +1394,56 @@ static void set_connectable_complete(struct hci_dev *hdev, u8 status)
if (!cmd)
goto unlock;
+ if (status) {
+ u8 mgmt_err = mgmt_status(status);
+ cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
+ goto remove_cmd;
+ }
+
+ cp = cmd->param;
+ if (cp->val)
+ changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+ else
+ changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+
send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
+ if (changed)
+ new_settings(hdev, cmd->sk);
+
+remove_cmd:
mgmt_pending_remove(cmd);
unlock:
hci_dev_unlock(hdev);
}
+static int set_connectable_update_settings(struct hci_dev *hdev,
+ struct sock *sk, u8 val)
+{
+ bool changed = false;
+ int err;
+
+ if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+ changed = true;
+
+ if (val) {
+ set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+ } else {
+ clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+ clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
+ if (err < 0)
+ return err;
+
+ if (changed)
+ return new_settings(hdev, sk);
+
+ return 0;
+}
+
static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
@@ -1050,9 +1455,10 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("request for %s", hdev->name);
- if (!lmp_bredr_capable(hdev))
+ if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
+ !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
- MGMT_STATUS_NOT_SUPPORTED);
+ MGMT_STATUS_REJECTED);
if (cp->val != 0x00 && cp->val != 0x01)
return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
@@ -1061,25 +1467,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
if (!hdev_is_powered(hdev)) {
- bool changed = false;
-
- if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
- changed = true;
-
- if (cp->val) {
- set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
- } else {
- clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
- clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
- }
-
- err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
- if (err < 0)
- goto failed;
-
- if (changed)
- err = new_settings(hdev, sk);
-
+ err = set_connectable_update_settings(hdev, sk, cp->val);
goto failed;
}
@@ -1090,30 +1478,37 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
- err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
- goto failed;
- }
-
cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
- if (cp->val) {
- scan = SCAN_PAGE;
- } else {
- scan = 0;
+ hci_req_init(&req, hdev);
- if (test_bit(HCI_ISCAN, &hdev->flags) &&
- hdev->discov_timeout > 0)
- cancel_delayed_work(&hdev->discov_off);
- }
+ /* If BR/EDR is not enabled and we disable advertising as a
+ * by-product of disabling connectable, we need to update the
+ * advertising flags.
+ */
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+ if (!cp->val) {
+ clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+ clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+ }
+ update_adv_data(&req);
+ } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
+ if (cp->val) {
+ scan = SCAN_PAGE;
+ } else {
+ scan = 0;
- hci_req_init(&req, hdev);
+ if (test_bit(HCI_ISCAN, &hdev->flags) &&
+ hdev->discov_timeout > 0)
+ cancel_delayed_work(&hdev->discov_off);
+ }
- hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ }
/* If we're going from non-connectable to connectable or
* vice-versa when fast connectable is enabled ensure that fast
@@ -1124,9 +1519,20 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
write_fast_connectable(&req, false);
+ if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
+ hci_conn_num(hdev, LE_LINK) == 0) {
+ disable_advertising(&req);
+ enable_advertising(&req);
+ }
+
err = hci_req_run(&req, set_connectable_complete);
- if (err < 0)
+ if (err < 0) {
mgmt_pending_remove(cmd);
+ if (err == -ENODATA)
+ err = set_connectable_update_settings(hdev, sk,
+ cp->val);
+ goto failed;
+ }
failed:
hci_dev_unlock(hdev);
@@ -1137,6 +1543,7 @@ static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
struct mgmt_mode *cp = data;
+ bool changed;
int err;
BT_DBG("request for %s", hdev->name);
@@ -1148,17 +1555,18 @@ static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
if (cp->val)
- set_bit(HCI_PAIRABLE, &hdev->dev_flags);
+ changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
else
- clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
+ changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
if (err < 0)
- goto failed;
+ goto unlock;
- err = new_settings(hdev, sk);
+ if (changed)
+ err = new_settings(hdev, sk);
-failed:
+unlock:
hci_dev_unlock(hdev);
return err;
}
@@ -1168,14 +1576,15 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
{
struct mgmt_mode *cp = data;
struct pending_cmd *cmd;
- u8 val;
+ u8 val, status;
int err;
BT_DBG("request for %s", hdev->name);
- if (!lmp_bredr_capable(hdev))
+ status = mgmt_bredr_support(hdev);
+ if (status)
return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
- MGMT_STATUS_NOT_SUPPORTED);
+ status);
if (cp->val != 0x00 && cp->val != 0x01)
return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
@@ -1236,11 +1645,15 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
struct mgmt_mode *cp = data;
struct pending_cmd *cmd;
- u8 val;
+ u8 status;
int err;
BT_DBG("request for %s", hdev->name);
+ status = mgmt_bredr_support(hdev);
+ if (status)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
+
if (!lmp_ssp_capable(hdev))
return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
MGMT_STATUS_NOT_SUPPORTED);
@@ -1251,14 +1664,20 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
hci_dev_lock(hdev);
- val = !!cp->val;
-
if (!hdev_is_powered(hdev)) {
- bool changed = false;
+ bool changed;
- if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
- change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
- changed = true;
+ if (cp->val) {
+ changed = !test_and_set_bit(HCI_SSP_ENABLED,
+ &hdev->dev_flags);
+ } else {
+ changed = test_and_clear_bit(HCI_SSP_ENABLED,
+ &hdev->dev_flags);
+ if (!changed)
+ changed = test_and_clear_bit(HCI_HS_ENABLED,
+ &hdev->dev_flags);
+ else
+ clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
}
err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
@@ -1271,13 +1690,14 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
+ if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
+ mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
MGMT_STATUS_BUSY);
goto failed;
}
- if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
+ if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
goto failed;
}
@@ -1288,7 +1708,7 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
goto failed;
}
- err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
if (err < 0) {
mgmt_pending_remove(cmd);
goto failed;
@@ -1302,23 +1722,90 @@ failed:
static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
struct mgmt_mode *cp = data;
+ bool changed;
+ u8 status;
+ int err;
BT_DBG("request for %s", hdev->name);
- if (!enable_hs)
+ status = mgmt_bredr_support(hdev);
+ if (status)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
+
+ if (!lmp_ssp_capable(hdev))
return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
MGMT_STATUS_NOT_SUPPORTED);
+ if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+ MGMT_STATUS_REJECTED);
+
if (cp->val != 0x00 && cp->val != 0x01)
return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
MGMT_STATUS_INVALID_PARAMS);
- if (cp->val)
- set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
- else
- clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+ hci_dev_lock(hdev);
+
+ if (cp->val) {
+ changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+ } else {
+ if (hdev_is_powered(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+ MGMT_STATUS_REJECTED);
+ goto unlock;
+ }
+
+ changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
+ if (err < 0)
+ goto unlock;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static void le_enable_complete(struct hci_dev *hdev, u8 status)
+{
+ struct cmd_lookup match = { NULL, hdev };
+
+ if (status) {
+ u8 mgmt_err = mgmt_status(status);
+
+ mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
+ &mgmt_err);
+ return;
+ }
+
+ mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
+
+ new_settings(hdev, match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
+
+ /* Make sure the controller has a good default for
+ * advertising data. Restrict the update to when LE
+ * has actually been enabled. During power on, the
+ * update in powered_update_hci will take care of it.
+ */
+ if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+ struct hci_request req;
+
+ hci_dev_lock(hdev);
- return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
+ hci_req_init(&req, hdev);
+ update_adv_data(&req);
+ update_scan_rsp_data(&req);
+ hci_req_run(&req, NULL);
+
+ hci_dev_unlock(hdev);
+ }
}
static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
@@ -1326,6 +1813,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
struct mgmt_mode *cp = data;
struct hci_cp_write_le_host_supported hci_cp;
struct pending_cmd *cmd;
+ struct hci_request req;
int err;
u8 val, enabled;
@@ -1340,7 +1828,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
MGMT_STATUS_INVALID_PARAMS);
/* LE-only devices do not allow toggling LE on/off */
- if (!lmp_bredr_capable(hdev))
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
MGMT_STATUS_REJECTED);
@@ -1357,6 +1845,11 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
changed = true;
}
+ if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
+ clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+ changed = true;
+ }
+
err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
if (err < 0)
goto unlock;
@@ -1367,7 +1860,8 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
goto unlock;
}
- if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
+ if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
+ mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
MGMT_STATUS_BUSY);
goto unlock;
@@ -1379,15 +1873,22 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
goto unlock;
}
+ hci_req_init(&req, hdev);
+
memset(&hci_cp, 0, sizeof(hci_cp));
if (val) {
hci_cp.le = val;
hci_cp.simul = lmp_le_br_capable(hdev);
+ } else {
+ if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+ disable_advertising(&req);
}
- err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
- &hci_cp);
+ hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
+ &hci_cp);
+
+ err = hci_req_run(&req, le_enable_complete);
if (err < 0)
mgmt_pending_remove(cmd);
@@ -1706,6 +2207,12 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
u16 key_count, expected_len;
int i;
+ BT_DBG("request for %s", hdev->name);
+
+ if (!lmp_bredr_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_STATUS_NOT_SUPPORTED);
+
key_count = __le16_to_cpu(cp->key_count);
expected_len = sizeof(*cp) + key_count *
@@ -2515,8 +3022,11 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
update_eir(&req);
}
+ /* The name is stored in the scan response data and so
+ * no need to udpate the advertising data here.
+ */
if (lmp_le_capable(hdev))
- hci_update_ad(&req);
+ update_scan_rsp_data(&req);
err = hci_req_run(&req, set_name_complete);
if (err < 0)
@@ -2685,6 +3195,7 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
struct hci_request req;
/* General inquiry access code (GIAC) */
u8 lap[3] = { 0x33, 0x8b, 0x9e };
+ u8 status;
int err;
BT_DBG("%s", hdev->name);
@@ -2721,9 +3232,10 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
switch (hdev->discovery.type) {
case DISCOV_TYPE_BREDR:
- if (!lmp_bredr_capable(hdev)) {
+ status = mgmt_bredr_support(hdev);
+ if (status) {
err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
- MGMT_STATUS_NOT_SUPPORTED);
+ status);
mgmt_pending_remove(cmd);
goto failed;
}
@@ -2745,22 +3257,23 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
case DISCOV_TYPE_LE:
case DISCOV_TYPE_INTERLEAVED:
- if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+ status = mgmt_le_support(hdev);
+ if (status) {
err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
- MGMT_STATUS_NOT_SUPPORTED);
+ status);
mgmt_pending_remove(cmd);
goto failed;
}
if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
- !lmp_bredr_capable(hdev)) {
+ !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
MGMT_STATUS_NOT_SUPPORTED);
mgmt_pending_remove(cmd);
goto failed;
}
- if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
+ if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
MGMT_STATUS_REJECTED);
mgmt_pending_remove(cmd);
@@ -2778,6 +3291,7 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
param_cp.type = LE_SCAN_ACTIVE;
param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
+ param_cp.own_address_type = hdev->own_addr_type;
hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
&param_cp);
@@ -3065,6 +3579,186 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
return err;
}
+static void set_advertising_complete(struct hci_dev *hdev, u8 status)
+{
+ struct cmd_lookup match = { NULL, hdev };
+
+ if (status) {
+ u8 mgmt_err = mgmt_status(status);
+
+ mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
+ cmd_status_rsp, &mgmt_err);
+ return;
+ }
+
+ mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
+ &match);
+
+ new_settings(hdev, match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
+}
+
+static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_mode *cp = data;
+ struct pending_cmd *cmd;
+ struct hci_request req;
+ u8 val, enabled, status;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ status = mgmt_le_support(hdev);
+ if (status)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
+ status);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ val = !!cp->val;
+ enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
+
+ /* The following conditions are ones which mean that we should
+ * not do any HCI communication but directly send a mgmt
+ * response to user space (after toggling the flag if
+ * necessary).
+ */
+ if (!hdev_is_powered(hdev) || val == enabled ||
+ hci_conn_num(hdev, LE_LINK) > 0) {
+ bool changed = false;
+
+ if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
+ change_bit(HCI_ADVERTISING, &hdev->dev_flags);
+ changed = true;
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
+ if (err < 0)
+ goto unlock;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+ goto unlock;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
+ mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ hci_req_init(&req, hdev);
+
+ if (val)
+ enable_advertising(&req);
+ else
+ disable_advertising(&req);
+
+ err = hci_req_run(&req, set_advertising_complete);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int set_static_address(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_cp_set_static_address *cp = data;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ if (!lmp_le_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ if (hdev_is_powered(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
+ MGMT_STATUS_REJECTED);
+
+ if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
+ if (!bacmp(&cp->bdaddr, BDADDR_NONE))
+ return cmd_status(sk, hdev->id,
+ MGMT_OP_SET_STATIC_ADDRESS,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ /* Two most significant bits shall be set */
+ if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
+ return cmd_status(sk, hdev->id,
+ MGMT_OP_SET_STATIC_ADDRESS,
+ MGMT_STATUS_INVALID_PARAMS);
+ }
+
+ hci_dev_lock(hdev);
+
+ bacpy(&hdev->static_addr, &cp->bdaddr);
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
+
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
+static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_cp_set_scan_params *cp = data;
+ __u16 interval, window;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ if (!lmp_le_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ interval = __le16_to_cpu(cp->interval);
+
+ if (interval < 0x0004 || interval > 0x4000)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ window = __le16_to_cpu(cp->window);
+
+ if (window < 0x0004 || window > 0x4000)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ if (window > interval)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ hdev->le_scan_interval = interval;
+ hdev->le_scan_window = window;
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
+
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
{
struct pending_cmd *cmd;
@@ -3108,7 +3802,8 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
BT_DBG("%s", hdev->name);
- if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2)
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
+ hdev->hci_ver < BLUETOOTH_VER_1_2)
return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
MGMT_STATUS_NOT_SUPPORTED);
@@ -3162,6 +3857,148 @@ unlock:
return err;
}
+static void set_bredr_scan(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ u8 scan = 0;
+
+ /* Ensure that fast connectable is disabled. This function will
+ * not do anything if the page scan parameters are already what
+ * they should be.
+ */
+ write_fast_connectable(req, false);
+
+ if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+ scan |= SCAN_PAGE;
+ if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+ scan |= SCAN_INQUIRY;
+
+ if (scan)
+ hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+}
+
+static void set_bredr_complete(struct hci_dev *hdev, u8 status)
+{
+ struct pending_cmd *cmd;
+
+ BT_DBG("status 0x%02x", status);
+
+ hci_dev_lock(hdev);
+
+ cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
+ if (!cmd)
+ goto unlock;
+
+ if (status) {
+ u8 mgmt_err = mgmt_status(status);
+
+ /* We need to restore the flag if related HCI commands
+ * failed.
+ */
+ clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+
+ cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
+ } else {
+ send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
+ new_settings(hdev, cmd->sk);
+ }
+
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+{
+ struct mgmt_mode *cp = data;
+ struct pending_cmd *cmd;
+ struct hci_request req;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+ MGMT_STATUS_REJECTED);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+ err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
+ goto unlock;
+ }
+
+ if (!hdev_is_powered(hdev)) {
+ if (!cp->val) {
+ clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+ clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+ clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
+ clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
+ clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+ }
+
+ change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
+ if (err < 0)
+ goto unlock;
+
+ err = new_settings(hdev, sk);
+ goto unlock;
+ }
+
+ /* Reject disabling when powered on */
+ if (!cp->val) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+ MGMT_STATUS_REJECTED);
+ goto unlock;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ /* We need to flip the bit already here so that update_adv_data
+ * generates the correct flags.
+ */
+ set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+
+ hci_req_init(&req, hdev);
+
+ if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+ set_bredr_scan(&req);
+
+ /* Since only the advertising data flags will change, there
+ * is no need to update the scan response data.
+ */
+ update_adv_data(&req);
+
+ err = hci_req_run(&req, set_bredr_complete);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
static bool ltk_is_valid(struct mgmt_ltk_info *key)
{
if (key->authenticated != 0x00 && key->authenticated != 0x01)
@@ -3180,6 +4017,12 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
u16 key_count, expected_len;
int i, err;
+ BT_DBG("request for %s", hdev->name);
+
+ if (!lmp_le_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
+ MGMT_STATUS_NOT_SUPPORTED);
+
key_count = __le16_to_cpu(cp->key_count);
expected_len = sizeof(*cp) + key_count *
@@ -3208,15 +4051,19 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
for (i = 0; i < key_count; i++) {
struct mgmt_ltk_info *key = &cp->keys[i];
- u8 type;
+ u8 type, addr_type;
+
+ if (key->addr.type == BDADDR_LE_PUBLIC)
+ addr_type = ADDR_LE_DEV_PUBLIC;
+ else
+ addr_type = ADDR_LE_DEV_RANDOM;
if (key->master)
type = HCI_SMP_LTK;
else
type = HCI_SMP_LTK_SLAVE;
- hci_add_ltk(hdev, &key->addr.bdaddr,
- bdaddr_to_le(key->addr.type),
+ hci_add_ltk(hdev, &key->addr.bdaddr, addr_type,
type, 0, key->authenticated, key->val,
key->enc_size, key->ediv, key->rand);
}
@@ -3276,6 +4123,10 @@ static const struct mgmt_handler {
{ block_device, false, MGMT_BLOCK_DEVICE_SIZE },
{ unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
{ set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
+ { set_advertising, false, MGMT_SETTING_SIZE },
+ { set_bredr, false, MGMT_SETTING_SIZE },
+ { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
+ { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
};
@@ -3320,6 +4171,13 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
MGMT_STATUS_INVALID_INDEX);
goto done;
}
+
+ if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
+ test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ err = cmd_status(sk, index, opcode,
+ MGMT_STATUS_INVALID_INDEX);
+ goto done;
+ }
}
if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
@@ -3365,74 +4223,24 @@ done:
return err;
}
-static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
+void mgmt_index_added(struct hci_dev *hdev)
{
- u8 *status = data;
-
- cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
- mgmt_pending_remove(cmd);
-}
-
-int mgmt_index_added(struct hci_dev *hdev)
-{
- if (!mgmt_valid_hdev(hdev))
- return -ENOTSUPP;
+ if (hdev->dev_type != HCI_BREDR)
+ return;
- return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
+ mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
}
-int mgmt_index_removed(struct hci_dev *hdev)
+void mgmt_index_removed(struct hci_dev *hdev)
{
u8 status = MGMT_STATUS_INVALID_INDEX;
- if (!mgmt_valid_hdev(hdev))
- return -ENOTSUPP;
+ if (hdev->dev_type != HCI_BREDR)
+ return;
mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
- return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
-}
-
-struct cmd_lookup {
- struct sock *sk;
- struct hci_dev *hdev;
- u8 mgmt_status;
-};
-
-static void settings_rsp(struct pending_cmd *cmd, void *data)
-{
- struct cmd_lookup *match = data;
-
- send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
-
- list_del(&cmd->list);
-
- if (match->sk == NULL) {
- match->sk = cmd->sk;
- sock_hold(match->sk);
- }
-
- mgmt_pending_free(cmd);
-}
-
-static void set_bredr_scan(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- u8 scan = 0;
-
- /* Ensure that fast connectable is disabled. This function will
- * not do anything if the page scan parameters are already what
- * they should be.
- */
- write_fast_connectable(req, false);
-
- if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
- scan |= SCAN_PAGE;
- if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
- scan |= SCAN_INQUIRY;
-
- if (scan)
- hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
}
static void powered_complete(struct hci_dev *hdev, u8 status)
@@ -3483,13 +4291,33 @@ static int powered_update_hci(struct hci_dev *hdev)
sizeof(cp), &cp);
}
+ if (lmp_le_capable(hdev)) {
+ /* Set random address to static address if configured */
+ if (bacmp(&hdev->static_addr, BDADDR_ANY))
+ hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
+ &hdev->static_addr);
+
+ /* Make sure the controller has a good default for
+ * advertising data. This also applies to the case
+ * where BR/EDR was toggled during the AUTO_OFF phase.
+ */
+ if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+ update_adv_data(&req);
+ update_scan_rsp_data(&req);
+ }
+
+ if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+ enable_advertising(&req);
+ }
+
link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
sizeof(link_sec), &link_sec);
if (lmp_bredr_capable(hdev)) {
- set_bredr_scan(&req);
+ if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ set_bredr_scan(&req);
update_class(&req);
update_name(&req);
update_eir(&req);
@@ -3533,76 +4361,110 @@ new_settings:
return err;
}
-int mgmt_set_powered_failed(struct hci_dev *hdev, int err)
+void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
{
struct pending_cmd *cmd;
u8 status;
cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
if (!cmd)
- return -ENOENT;
+ return;
if (err == -ERFKILL)
status = MGMT_STATUS_RFKILLED;
else
status = MGMT_STATUS_FAILED;
- err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
+ cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
mgmt_pending_remove(cmd);
+}
- return err;
+void mgmt_discoverable_timeout(struct hci_dev *hdev)
+{
+ struct hci_request req;
+
+ hci_dev_lock(hdev);
+
+ /* When discoverable timeout triggers, then just make sure
+ * the limited discoverable flag is cleared. Even in the case
+ * of a timeout triggered from general discoverable, it is
+ * safe to unconditionally clear the flag.
+ */
+ clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+ clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+
+ hci_req_init(&req, hdev);
+ if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+ u8 scan = SCAN_PAGE;
+ hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
+ sizeof(scan), &scan);
+ }
+ update_class(&req);
+ update_adv_data(&req);
+ hci_req_run(&req, NULL);
+
+ hdev->discov_timeout = 0;
+
+ new_settings(hdev, NULL);
+
+ hci_dev_unlock(hdev);
}
-int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
+void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
{
- struct cmd_lookup match = { NULL, hdev };
- bool changed = false;
- int err = 0;
+ bool changed;
+
+ /* Nothing needed here if there's a pending command since that
+ * commands request completion callback takes care of everything
+ * necessary.
+ */
+ if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
+ return;
if (discoverable) {
- if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
- changed = true;
+ changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
} else {
- if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
- changed = true;
+ clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+ changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
}
- mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
- &match);
-
- if (changed)
- err = new_settings(hdev, match.sk);
+ if (changed) {
+ struct hci_request req;
- if (match.sk)
- sock_put(match.sk);
+ /* In case this change in discoverable was triggered by
+ * a disabling of connectable there could be a need to
+ * update the advertising flags.
+ */
+ hci_req_init(&req, hdev);
+ update_adv_data(&req);
+ hci_req_run(&req, NULL);
- return err;
+ new_settings(hdev, NULL);
+ }
}
-int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
+void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
{
- struct pending_cmd *cmd;
- bool changed = false;
- int err = 0;
+ bool changed;
- if (connectable) {
- if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
- changed = true;
- } else {
- if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
- changed = true;
- }
+ /* Nothing needed here if there's a pending command since that
+ * commands request completion callback takes care of everything
+ * necessary.
+ */
+ if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
+ return;
- cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
+ if (connectable)
+ changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+ else
+ changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
if (changed)
- err = new_settings(hdev, cmd ? cmd->sk : NULL);
-
- return err;
+ new_settings(hdev, NULL);
}
-int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
+void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
{
u8 mgmt_err = mgmt_status(status);
@@ -3613,12 +4475,10 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
if (scan & SCAN_INQUIRY)
mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
cmd_status_rsp, &mgmt_err);
-
- return 0;
}
-int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
- bool persistent)
+void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+ bool persistent)
{
struct mgmt_ev_new_link_key ev;
@@ -3631,10 +4491,10 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
ev.key.pin_len = key->pin_len;
- return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
+ mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
+void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
{
struct mgmt_ev_new_long_term_key ev;
@@ -3653,13 +4513,23 @@ int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
memcpy(ev.key.rand, key->rand, sizeof(key->rand));
memcpy(ev.key.val, key->val, sizeof(key->val));
- return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
- NULL);
+ mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
+}
+
+static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
+ u8 data_len)
+{
+ eir[eir_len++] = sizeof(type) + data_len;
+ eir[eir_len++] = type;
+ memcpy(&eir[eir_len], data, data_len);
+ eir_len += data_len;
+
+ return eir_len;
}
-int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u32 flags, u8 *name, u8 name_len,
- u8 *dev_class)
+void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u32 flags, u8 *name, u8 name_len,
+ u8 *dev_class)
{
char buf[512];
struct mgmt_ev_device_connected *ev = (void *) buf;
@@ -3680,8 +4550,8 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
ev->eir_len = cpu_to_le16(eir_len);
- return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
- sizeof(*ev) + eir_len, NULL);
+ mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
+ sizeof(*ev) + eir_len, NULL);
}
static void disconnect_rsp(struct pending_cmd *cmd, void *data)
@@ -3719,12 +4589,14 @@ static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
mgmt_pending_remove(cmd);
}
-int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 link_type, u8 addr_type, u8 reason)
+void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 reason)
{
struct mgmt_ev_device_disconnected ev;
struct sock *sk = NULL;
- int err;
+
+ if (link_type != ACL_LINK && link_type != LE_LINK)
+ return;
mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
@@ -3732,45 +4604,49 @@ int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.reason = reason;
- err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
- sk);
+ mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
if (sk)
sock_put(sk);
mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
hdev);
-
- return err;
}
-int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 link_type, u8 addr_type, u8 status)
+void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status)
{
+ u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
+ struct mgmt_cp_disconnect *cp;
struct mgmt_rp_disconnect rp;
struct pending_cmd *cmd;
- int err;
mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
hdev);
cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
if (!cmd)
- return -ENOENT;
+ return;
+
+ cp = cmd->param;
+
+ if (bacmp(bdaddr, &cp->addr.bdaddr))
+ return;
+
+ if (cp->addr.type != bdaddr_type)
+ return;
bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = link_to_bdaddr(link_type, addr_type);
+ rp.addr.type = bdaddr_type;
- err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
- mgmt_status(status), &rp, sizeof(rp));
+ cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
+ mgmt_status(status), &rp, sizeof(rp));
mgmt_pending_remove(cmd);
-
- return err;
}
-int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u8 status)
+void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 status)
{
struct mgmt_ev_connect_failed ev;
@@ -3778,10 +4654,10 @@ int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.status = mgmt_status(status);
- return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
+ mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
+void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
{
struct mgmt_ev_pin_code_request ev;
@@ -3789,52 +4665,45 @@ int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
ev.addr.type = BDADDR_BREDR;
ev.secure = secure;
- return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
- NULL);
+ mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 status)
+void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status)
{
struct pending_cmd *cmd;
struct mgmt_rp_pin_code_reply rp;
- int err;
cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
if (!cmd)
- return -ENOENT;
+ return;
bacpy(&rp.addr.bdaddr, bdaddr);
rp.addr.type = BDADDR_BREDR;
- err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
- mgmt_status(status), &rp, sizeof(rp));
+ cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
+ mgmt_status(status), &rp, sizeof(rp));
mgmt_pending_remove(cmd);
-
- return err;
}
-int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 status)
+void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status)
{
struct pending_cmd *cmd;
struct mgmt_rp_pin_code_reply rp;
- int err;
cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
if (!cmd)
- return -ENOENT;
+ return;
bacpy(&rp.addr.bdaddr, bdaddr);
rp.addr.type = BDADDR_BREDR;
- err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
- mgmt_status(status), &rp, sizeof(rp));
+ cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
+ mgmt_status(status), &rp, sizeof(rp));
mgmt_pending_remove(cmd);
-
- return err;
}
int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
@@ -3936,8 +4805,8 @@ int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u8 status)
+void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 status)
{
struct mgmt_ev_auth_failed ev;
@@ -3945,40 +4814,36 @@ int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.status = mgmt_status(status);
- return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
+ mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
+void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
{
struct cmd_lookup match = { NULL, hdev };
- bool changed = false;
- int err = 0;
+ bool changed;
if (status) {
u8 mgmt_err = mgmt_status(status);
mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
cmd_status_rsp, &mgmt_err);
- return 0;
+ return;
}
- if (test_bit(HCI_AUTH, &hdev->flags)) {
- if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
- changed = true;
- } else {
- if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
- changed = true;
- }
+ if (test_bit(HCI_AUTH, &hdev->flags))
+ changed = !test_and_set_bit(HCI_LINK_SECURITY,
+ &hdev->dev_flags);
+ else
+ changed = test_and_clear_bit(HCI_LINK_SECURITY,
+ &hdev->dev_flags);
mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
&match);
if (changed)
- err = new_settings(hdev, match.sk);
+ new_settings(hdev, match.sk);
if (match.sk)
sock_put(match.sk);
-
- return err;
}
static void clear_eir(struct hci_request *req)
@@ -3996,38 +4861,41 @@ static void clear_eir(struct hci_request *req)
hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
}
-int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
+void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
{
struct cmd_lookup match = { NULL, hdev };
struct hci_request req;
bool changed = false;
- int err = 0;
if (status) {
u8 mgmt_err = mgmt_status(status);
if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
- &hdev->dev_flags))
- err = new_settings(hdev, NULL);
+ &hdev->dev_flags)) {
+ clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+ new_settings(hdev, NULL);
+ }
mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
&mgmt_err);
-
- return err;
+ return;
}
if (enable) {
- if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
- changed = true;
+ changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
} else {
- if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
- changed = true;
+ changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+ if (!changed)
+ changed = test_and_clear_bit(HCI_HS_ENABLED,
+ &hdev->dev_flags);
+ else
+ clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
}
mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
if (changed)
- err = new_settings(hdev, match.sk);
+ new_settings(hdev, match.sk);
if (match.sk)
sock_put(match.sk);
@@ -4040,8 +4908,6 @@ int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
clear_eir(&req);
hci_req_run(&req, NULL);
-
- return err;
}
static void sk_lookup(struct pending_cmd *cmd, void *data)
@@ -4054,33 +4920,30 @@ static void sk_lookup(struct pending_cmd *cmd, void *data)
}
}
-int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
- u8 status)
+void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
+ u8 status)
{
struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
- int err = 0;
mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
if (!status)
- err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
- 3, NULL);
+ mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
+ NULL);
if (match.sk)
sock_put(match.sk);
-
- return err;
}
-int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
+void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
{
struct mgmt_cp_set_local_name ev;
struct pending_cmd *cmd;
if (status)
- return 0;
+ return;
memset(&ev, 0, sizeof(ev));
memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
@@ -4094,96 +4957,54 @@ int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
* HCI dev don't send any mgmt signals.
*/
if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
- return 0;
+ return;
}
- return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
- cmd ? cmd->sk : NULL);
+ mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
+ cmd ? cmd->sk : NULL);
}
-int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
- u8 *randomizer, u8 status)
+void mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
+ u8 *randomizer, u8 status)
{
struct pending_cmd *cmd;
- int err;
BT_DBG("%s status %u", hdev->name, status);
cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
if (!cmd)
- return -ENOENT;
+ return;
if (status) {
- err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
- mgmt_status(status));
+ cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ mgmt_status(status));
} else {
struct mgmt_rp_read_local_oob_data rp;
memcpy(rp.hash, hash, sizeof(rp.hash));
memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
- err = cmd_complete(cmd->sk, hdev->id,
- MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
- sizeof(rp));
+ cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ 0, &rp, sizeof(rp));
}
mgmt_pending_remove(cmd);
-
- return err;
-}
-
-int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
-{
- struct cmd_lookup match = { NULL, hdev };
- bool changed = false;
- int err = 0;
-
- if (status) {
- u8 mgmt_err = mgmt_status(status);
-
- if (enable && test_and_clear_bit(HCI_LE_ENABLED,
- &hdev->dev_flags))
- err = new_settings(hdev, NULL);
-
- mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
- &mgmt_err);
-
- return err;
- }
-
- if (enable) {
- if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
- changed = true;
- } else {
- if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
- changed = true;
- }
-
- mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
-
- if (changed)
- err = new_settings(hdev, match.sk);
-
- if (match.sk)
- sock_put(match.sk);
-
- return err;
}
-int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
- ssp, u8 *eir, u16 eir_len)
+void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
+ ssp, u8 *eir, u16 eir_len)
{
char buf[512];
struct mgmt_ev_device_found *ev = (void *) buf;
size_t ev_size;
if (!hci_discovery_active(hdev))
- return -EPERM;
+ return;
/* Leave 5 bytes for a potential CoD field */
if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
- return -EINVAL;
+ return;
memset(buf, 0, sizeof(buf));
@@ -4205,11 +5026,11 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
ev->eir_len = cpu_to_le16(eir_len);
ev_size = sizeof(*ev) + eir_len;
- return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
+ mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
}
-int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, s8 rssi, u8 *name, u8 name_len)
+void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, s8 rssi, u8 *name, u8 name_len)
{
struct mgmt_ev_device_found *ev;
char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
@@ -4228,11 +5049,10 @@ int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
ev->eir_len = cpu_to_le16(eir_len);
- return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
- sizeof(*ev) + eir_len, NULL);
+ mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
}
-int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
+void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
{
struct mgmt_ev_discovering ev;
struct pending_cmd *cmd;
@@ -4256,7 +5076,7 @@ int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
ev.type = hdev->discovery.type;
ev.discovering = discovering;
- return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
+ mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
}
int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
@@ -4287,5 +5107,35 @@ int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
cmd ? cmd->sk : NULL);
}
-module_param(enable_hs, bool, 0644);
-MODULE_PARM_DESC(enable_hs, "Enable High Speed support");
+static void adv_enable_complete(struct hci_dev *hdev, u8 status)
+{
+ BT_DBG("%s status %u", hdev->name, status);
+
+ /* Clear the advertising mgmt setting if we failed to re-enable it */
+ if (status) {
+ clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+ new_settings(hdev, NULL);
+ }
+}
+
+void mgmt_reenable_advertising(struct hci_dev *hdev)
+{
+ struct hci_request req;
+
+ if (hci_conn_num(hdev, LE_LINK) > 0)
+ return;
+
+ if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+ return;
+
+ hci_req_init(&req, hdev);
+ enable_advertising(&req);
+
+ /* If this fails we have no option but to let user space know
+ * that we've disabled advertising.
+ */
+ if (hci_req_run(&req, adv_enable_complete) < 0) {
+ clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+ new_settings(hdev, NULL);
+ }
+}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index ca957d34b0c8..94d06cbfbc18 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -641,13 +641,13 @@ static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst)
{
struct rfcomm_session *s;
struct list_head *p, *n;
- struct bt_sock *sk;
+ struct l2cap_chan *chan;
list_for_each_safe(p, n, &session_list) {
s = list_entry(p, struct rfcomm_session, list);
- sk = bt_sk(s->sock->sk);
+ chan = l2cap_pi(s->sock->sk)->chan;
- if ((!bacmp(src, BDADDR_ANY) || !bacmp(&sk->src, src)) &&
- !bacmp(&sk->dst, dst))
+ if ((!bacmp(src, BDADDR_ANY) || !bacmp(&chan->src, src)) &&
+ !bacmp(&chan->dst, dst))
return s;
}
return NULL;
@@ -732,11 +732,11 @@ failed:
void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *dst)
{
- struct sock *sk = s->sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(s->sock->sk)->chan;
if (src)
- bacpy(src, &bt_sk(sk)->src);
+ bacpy(src, &chan->src);
if (dst)
- bacpy(dst, &bt_sk(sk)->dst);
+ bacpy(dst, &chan->dst);
}
/* ---- RFCOMM frame sending ---- */
@@ -2112,12 +2112,11 @@ static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x)
rfcomm_lock();
list_for_each_entry(s, &session_list, list) {
+ struct l2cap_chan *chan = l2cap_pi(s->sock->sk)->chan;
struct rfcomm_dlc *d;
list_for_each_entry(d, &s->dlcs, list) {
- struct sock *sk = s->sock->sk;
-
seq_printf(f, "%pMR %pMR %ld %d %d %d %d\n",
- &bt_sk(sk)->src, &bt_sk(sk)->dst,
+ &chan->src, &chan->dst,
d->state, d->dlci, d->mtu,
d->rx_credits, d->tx_credits);
}
@@ -2155,13 +2154,6 @@ static int __init rfcomm_init(void)
goto unregister;
}
- if (bt_debugfs) {
- rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444,
- bt_debugfs, NULL, &rfcomm_dlc_debugfs_fops);
- if (!rfcomm_dlc_debugfs)
- BT_ERR("Failed to create RFCOMM debug file");
- }
-
err = rfcomm_init_ttys();
if (err < 0)
goto stop;
@@ -2172,6 +2164,13 @@ static int __init rfcomm_init(void)
BT_INFO("RFCOMM ver %s", VERSION);
+ if (IS_ERR_OR_NULL(bt_debugfs))
+ return 0;
+
+ rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444,
+ bt_debugfs, NULL,
+ &rfcomm_dlc_debugfs_fops);
+
return 0;
cleanup:
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 30b3721dc6d7..0be7619c5e5e 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -87,7 +87,8 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
parent->sk_data_ready(parent, 0);
} else {
if (d->state == BT_CONNECTED)
- rfcomm_session_getaddr(d->session, &bt_sk(sk)->src, NULL);
+ rfcomm_session_getaddr(d->session,
+ &rfcomm_pi(sk)->src, NULL);
sk->sk_state_change(sk);
}
@@ -110,7 +111,7 @@ static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src)
sk_for_each(sk, &rfcomm_sk_list.head) {
if (rfcomm_pi(sk)->channel == channel &&
- !bacmp(&bt_sk(sk)->src, src))
+ !bacmp(&rfcomm_pi(sk)->src, src))
break;
}
@@ -132,11 +133,11 @@ static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *
if (rfcomm_pi(sk)->channel == channel) {
/* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src))
+ if (!bacmp(&rfcomm_pi(sk)->src, src))
break;
/* Closest match */
- if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+ if (!bacmp(&rfcomm_pi(sk)->src, BDADDR_ANY))
sk1 = sk;
}
}
@@ -355,7 +356,7 @@ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr
err = -EADDRINUSE;
} else {
/* Save source address */
- bacpy(&bt_sk(sk)->src, &sa->rc_bdaddr);
+ bacpy(&rfcomm_pi(sk)->src, &sa->rc_bdaddr);
rfcomm_pi(sk)->channel = sa->rc_channel;
sk->sk_state = BT_BOUND;
}
@@ -393,13 +394,14 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a
}
sk->sk_state = BT_CONNECT;
- bacpy(&bt_sk(sk)->dst, &sa->rc_bdaddr);
+ bacpy(&rfcomm_pi(sk)->dst, &sa->rc_bdaddr);
rfcomm_pi(sk)->channel = sa->rc_channel;
d->sec_level = rfcomm_pi(sk)->sec_level;
d->role_switch = rfcomm_pi(sk)->role_switch;
- err = rfcomm_dlc_open(d, &bt_sk(sk)->src, &sa->rc_bdaddr, sa->rc_channel);
+ err = rfcomm_dlc_open(d, &rfcomm_pi(sk)->src, &sa->rc_bdaddr,
+ sa->rc_channel);
if (!err)
err = bt_sock_wait_state(sk, BT_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
@@ -429,7 +431,7 @@ static int rfcomm_sock_listen(struct socket *sock, int backlog)
}
if (!rfcomm_pi(sk)->channel) {
- bdaddr_t *src = &bt_sk(sk)->src;
+ bdaddr_t *src = &rfcomm_pi(sk)->src;
u8 channel;
err = -EINVAL;
@@ -530,9 +532,9 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *
sa->rc_family = AF_BLUETOOTH;
sa->rc_channel = rfcomm_pi(sk)->channel;
if (peer)
- bacpy(&sa->rc_bdaddr, &bt_sk(sk)->dst);
+ bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->dst);
else
- bacpy(&sa->rc_bdaddr, &bt_sk(sk)->src);
+ bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->src);
*len = sizeof(struct sockaddr_rc);
return 0;
@@ -544,7 +546,7 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct sock *sk = sock->sk;
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
struct sk_buff *skb;
- int sent = 0;
+ int sent;
if (test_bit(RFCOMM_DEFER_SETUP, &d->flags))
return -ENOTCONN;
@@ -559,6 +561,10 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
lock_sock(sk);
+ sent = bt_sock_wait_ready(sk, msg->msg_flags);
+ if (sent)
+ goto done;
+
while (len) {
size_t size = min_t(size_t, len, d->mtu);
int err;
@@ -594,6 +600,7 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
len -= size;
}
+done:
release_sock(sk);
return sent;
@@ -732,8 +739,9 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
+ struct sock *l2cap_sk;
+ struct l2cap_conn *conn;
struct rfcomm_conninfo cinfo;
- struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
int len, err = 0;
u32 opt;
@@ -776,6 +784,9 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
break;
}
+ l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
+ conn = l2cap_pi(l2cap_sk)->chan->conn;
+
memset(&cinfo, 0, sizeof(cinfo));
cinfo.hci_handle = conn->hcon->handle;
memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
@@ -946,8 +957,8 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
bt_sock_reclassify_lock(sk, BTPROTO_RFCOMM);
rfcomm_sock_init(sk, parent);
- bacpy(&bt_sk(sk)->src, &src);
- bacpy(&bt_sk(sk)->dst, &dst);
+ bacpy(&rfcomm_pi(sk)->src, &src);
+ bacpy(&rfcomm_pi(sk)->dst, &dst);
rfcomm_pi(sk)->channel = channel;
sk->sk_state = BT_CONFIG;
@@ -974,7 +985,7 @@ static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
sk_for_each(sk, &rfcomm_sk_list.head) {
seq_printf(f, "%pMR %pMR %d %d\n",
- &bt_sk(sk)->src, &bt_sk(sk)->dst,
+ &rfcomm_pi(sk)->src, &rfcomm_pi(sk)->dst,
sk->sk_state, rfcomm_pi(sk)->channel);
}
@@ -1044,15 +1055,15 @@ int __init rfcomm_init_sockets(void)
goto error;
}
- if (bt_debugfs) {
- rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
- bt_debugfs, NULL, &rfcomm_sock_debugfs_fops);
- if (!rfcomm_sock_debugfs)
- BT_ERR("Failed to create RFCOMM debug file");
- }
-
BT_INFO("RFCOMM socket layer initialized");
+ if (IS_ERR_OR_NULL(bt_debugfs))
+ return 0;
+
+ rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
+ bt_debugfs, NULL,
+ &rfcomm_sock_debugfs_fops);
+
return 0;
error:
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 96bd388d93a4..12a0e51e21e1 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -92,9 +92,6 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
hcon->sco_data = conn;
conn->hcon = hcon;
- conn->src = &hdev->bdaddr;
- conn->dst = &hcon->dst;
-
if (hdev->sco_mtu > 0)
conn->mtu = hdev->sco_mtu;
else
@@ -156,16 +153,14 @@ static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
static int sco_connect(struct sock *sk)
{
- bdaddr_t *src = &bt_sk(sk)->src;
- bdaddr_t *dst = &bt_sk(sk)->dst;
struct sco_conn *conn;
struct hci_conn *hcon;
struct hci_dev *hdev;
int err, type;
- BT_DBG("%pMR -> %pMR", src, dst);
+ BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst);
- hdev = hci_get_route(dst, src);
+ hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src);
if (!hdev)
return -EHOSTUNREACH;
@@ -182,7 +177,8 @@ static int sco_connect(struct sock *sk)
goto done;
}
- hcon = hci_connect_sco(hdev, type, dst, sco_pi(sk)->setting);
+ hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
+ sco_pi(sk)->setting);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto done;
@@ -196,7 +192,7 @@ static int sco_connect(struct sock *sk)
}
/* Update source addr of the socket */
- bacpy(src, conn->src);
+ bacpy(&sco_pi(sk)->src, &hcon->src);
err = sco_chan_add(conn, sk, NULL);
if (err)
@@ -270,7 +266,7 @@ static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
if (sk->sk_state != BT_LISTEN)
continue;
- if (!bacmp(&bt_sk(sk)->src, ba))
+ if (!bacmp(&sco_pi(sk)->src, ba))
return sk;
}
@@ -291,11 +287,11 @@ static struct sock *sco_get_sock_listen(bdaddr_t *src)
continue;
/* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src))
+ if (!bacmp(&sco_pi(sk)->src, src))
break;
/* Closest match */
- if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+ if (!bacmp(&sco_pi(sk)->src, BDADDR_ANY))
sk1 = sk;
}
@@ -475,7 +471,7 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
goto done;
}
- bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
+ bacpy(&sco_pi(sk)->src, &sa->sco_bdaddr);
sk->sk_state = BT_BOUND;
@@ -505,7 +501,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
lock_sock(sk);
/* Set destination address and psm */
- bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr);
+ bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr);
err = sco_connect(sk);
if (err)
@@ -522,7 +518,7 @@ done:
static int sco_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
- bdaddr_t *src = &bt_sk(sk)->src;
+ bdaddr_t *src = &sco_pi(sk)->src;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
@@ -626,9 +622,9 @@ static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len
*len = sizeof(struct sockaddr_sco);
if (peer)
- bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst);
+ bacpy(&sa->sco_bdaddr, &sco_pi(sk)->dst);
else
- bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src);
+ bacpy(&sa->sco_bdaddr, &sco_pi(sk)->src);
return 0;
}
@@ -999,7 +995,7 @@ static void sco_conn_ready(struct sco_conn *conn)
} else {
sco_conn_lock(conn);
- parent = sco_get_sock_listen(conn->src);
+ parent = sco_get_sock_listen(&conn->hcon->src);
if (!parent) {
sco_conn_unlock(conn);
return;
@@ -1017,8 +1013,8 @@ static void sco_conn_ready(struct sco_conn *conn)
sco_sock_init(sk, parent);
- bacpy(&bt_sk(sk)->src, conn->src);
- bacpy(&bt_sk(sk)->dst, conn->dst);
+ bacpy(&sco_pi(sk)->src, &conn->hcon->src);
+ bacpy(&sco_pi(sk)->dst, &conn->hcon->dst);
hci_conn_hold(conn->hcon);
__sco_chan_add(conn, sk, parent);
@@ -1051,8 +1047,8 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
if (sk->sk_state != BT_LISTEN)
continue;
- if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) ||
- !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
+ if (!bacmp(&sco_pi(sk)->src, &hdev->bdaddr) ||
+ !bacmp(&sco_pi(sk)->src, BDADDR_ANY)) {
lm |= HCI_LM_ACCEPT;
if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
@@ -1111,8 +1107,8 @@ static int sco_debugfs_show(struct seq_file *f, void *p)
read_lock(&sco_sk_list.lock);
sk_for_each(sk, &sco_sk_list.head) {
- seq_printf(f, "%pMR %pMR %d\n", &bt_sk(sk)->src,
- &bt_sk(sk)->dst, sk->sk_state);
+ seq_printf(f, "%pMR %pMR %d\n", &sco_pi(sk)->src,
+ &sco_pi(sk)->dst, sk->sk_state);
}
read_unlock(&sco_sk_list.lock);
@@ -1181,15 +1177,14 @@ int __init sco_init(void)
goto error;
}
- if (bt_debugfs) {
- sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
- NULL, &sco_debugfs_fops);
- if (!sco_debugfs)
- BT_ERR("Failed to create SCO debug file");
- }
-
BT_INFO("SCO socket layer initialized");
+ if (IS_ERR_OR_NULL(bt_debugfs))
+ return 0;
+
+ sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
+ NULL, &sco_debugfs_fops);
+
return 0;
error:
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index b5562abdd6e0..85a2796cac61 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -28,7 +28,8 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/mgmt.h>
-#include <net/bluetooth/smp.h>
+
+#include "smp.h"
#define SMP_TIMEOUT msecs_to_jiffies(30000)
@@ -85,8 +86,8 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
}
static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16],
- u8 preq[7], u8 pres[7], u8 _iat, bdaddr_t *ia,
- u8 _rat, bdaddr_t *ra, u8 res[16])
+ u8 preq[7], u8 pres[7], u8 _iat, bdaddr_t *ia,
+ u8 _rat, bdaddr_t *ra, u8 res[16])
{
u8 p1[16], p2[16];
int err;
@@ -126,8 +127,8 @@ static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16],
return err;
}
-static int smp_s1(struct crypto_blkcipher *tfm, u8 k[16],
- u8 r1[16], u8 r2[16], u8 _r[16])
+static int smp_s1(struct crypto_blkcipher *tfm, u8 k[16], u8 r1[16],
+ u8 r2[16], u8 _r[16])
{
int err;
@@ -150,7 +151,7 @@ static int smp_rand(u8 *buf)
}
static struct sk_buff *smp_build_cmd(struct l2cap_conn *conn, u8 code,
- u16 dlen, void *data)
+ u16 dlen, void *data)
{
struct sk_buff *skb;
struct l2cap_hdr *lh;
@@ -213,9 +214,8 @@ static __u8 seclevel_to_authreq(__u8 sec_level)
}
static void build_pairing_cmd(struct l2cap_conn *conn,
- struct smp_cmd_pairing *req,
- struct smp_cmd_pairing *rsp,
- __u8 authreq)
+ struct smp_cmd_pairing *req,
+ struct smp_cmd_pairing *rsp, __u8 authreq)
{
u8 dist_keys = 0;
@@ -249,7 +249,7 @@ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
struct smp_chan *smp = conn->smp_chan;
if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) ||
- (max_key_size < SMP_MIN_ENC_KEY_SIZE))
+ (max_key_size < SMP_MIN_ENC_KEY_SIZE))
return SMP_ENC_KEY_SIZE;
smp->enc_key_size = max_key_size;
@@ -263,15 +263,15 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send)
if (send)
smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
- &reason);
+ &reason);
- clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->flags);
- mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type,
- hcon->dst_type, HCI_ERROR_AUTH_FAILURE);
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags);
+ mgmt_auth_failed(hcon->hdev, &hcon->dst, hcon->type, hcon->dst_type,
+ HCI_ERROR_AUTH_FAILURE);
cancel_delayed_work_sync(&conn->security_timer);
- if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
+ if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
smp_chan_destroy(conn);
}
@@ -309,8 +309,8 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
/* If either side has unknown io_caps, use JUST WORKS */
/* Otherwise, look up method from the table */
if (!(auth & SMP_AUTH_MITM) ||
- local_io > SMP_IO_KEYBOARD_DISPLAY ||
- remote_io > SMP_IO_KEYBOARD_DISPLAY)
+ local_io > SMP_IO_KEYBOARD_DISPLAY ||
+ remote_io > SMP_IO_KEYBOARD_DISPLAY)
method = JUST_WORKS;
else
method = gen_method[remote_io][local_io];
@@ -354,10 +354,10 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
hci_dev_lock(hcon->hdev);
if (method == REQ_PASSKEY)
- ret = mgmt_user_passkey_request(hcon->hdev, conn->dst,
+ ret = mgmt_user_passkey_request(hcon->hdev, &hcon->dst,
hcon->type, hcon->dst_type);
else
- ret = mgmt_user_confirm_request(hcon->hdev, conn->dst,
+ ret = mgmt_user_confirm_request(hcon->hdev, &hcon->dst,
hcon->type, hcon->dst_type,
cpu_to_le32(passkey), 0);
@@ -386,12 +386,13 @@ static void confirm_work(struct work_struct *work)
smp->tfm = tfm;
if (conn->hcon->out)
- ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, 0,
- conn->src, conn->hcon->dst_type, conn->dst, res);
+ ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
+ conn->hcon->src_type, &conn->hcon->src,
+ conn->hcon->dst_type, &conn->hcon->dst, res);
else
ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
- conn->hcon->dst_type, conn->dst, 0, conn->src,
- res);
+ conn->hcon->dst_type, &conn->hcon->dst,
+ conn->hcon->src_type, &conn->hcon->src, res);
if (ret) {
reason = SMP_UNSPECIFIED;
goto error;
@@ -425,11 +426,13 @@ static void random_work(struct work_struct *work)
BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
if (hcon->out)
- ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, 0,
- conn->src, hcon->dst_type, conn->dst, res);
+ ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
+ hcon->src_type, &hcon->src,
+ hcon->dst_type, &hcon->dst, res);
else
ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
- hcon->dst_type, conn->dst, 0, conn->src, res);
+ hcon->dst_type, &hcon->dst,
+ hcon->src_type, &hcon->src, res);
if (ret) {
reason = SMP_UNSPECIFIED;
goto error;
@@ -477,9 +480,9 @@ static void random_work(struct work_struct *work)
swap128(key, stk);
memset(stk + smp->enc_key_size, 0,
- SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
+ SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
- hci_add_ltk(hcon->hdev, conn->dst, hcon->dst_type,
+ hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
HCI_SMP_STK_SLAVE, 0, 0, stk, smp->enc_key_size,
ediv, rand);
}
@@ -494,7 +497,7 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
{
struct smp_chan *smp;
- smp = kzalloc(sizeof(struct smp_chan), GFP_ATOMIC);
+ smp = kzalloc(sizeof(*smp), GFP_ATOMIC);
if (!smp)
return NULL;
@@ -649,7 +652,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
memcpy(&smp->prsp[1], rsp, sizeof(*rsp));
if ((req->auth_req & SMP_AUTH_BONDING) &&
- (rsp->auth_req & SMP_AUTH_BONDING))
+ (rsp->auth_req & SMP_AUTH_BONDING))
auth = SMP_AUTH_BONDING;
auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM;
@@ -684,7 +687,7 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
swap128(smp->prnd, random);
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random),
- random);
+ random);
} else if (test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags)) {
queue_work(hdev->workqueue, &smp->confirm);
} else {
@@ -714,7 +717,7 @@ static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
struct smp_ltk *key;
struct hci_conn *hcon = conn->hcon;
- key = hci_find_ltk_by_addr(hcon->hdev, conn->dst, hcon->dst_type);
+ key = hci_find_ltk_by_addr(hcon->hdev, &hcon->dst, hcon->dst_type);
if (!key)
return 0;
@@ -728,8 +731,8 @@ static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
hcon->enc_key_size = key->enc_size;
return 1;
-
}
+
static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_security_req *rp = (void *) skb->data;
@@ -835,9 +838,9 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
skb_pull(skb, sizeof(*rp));
hci_dev_lock(hdev);
- authenticated = (conn->hcon->sec_level == BT_SECURITY_HIGH);
- hci_add_ltk(conn->hcon->hdev, conn->dst, hcon->dst_type,
- HCI_SMP_LTK, 1, authenticated, smp->tk, smp->enc_key_size,
+ authenticated = (hcon->sec_level == BT_SECURITY_HIGH);
+ hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, HCI_SMP_LTK, 1,
+ authenticated, smp->tk, smp->enc_key_size,
rp->ediv, rp->rand);
smp_distribute_keys(conn, 1);
hci_dev_unlock(hdev);
@@ -847,16 +850,27 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
{
- __u8 code = skb->data[0];
- __u8 reason;
+ struct hci_conn *hcon = conn->hcon;
+ __u8 code, reason;
int err = 0;
- if (!test_bit(HCI_LE_ENABLED, &conn->hcon->hdev->dev_flags)) {
+ if (hcon->type != LE_LINK) {
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (skb->len < 1) {
+ kfree_skb(skb);
+ return -EILSEQ;
+ }
+
+ if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) {
err = -ENOTSUPP;
reason = SMP_PAIRING_NOTSUPP;
goto done;
}
+ code = skb->data[0];
skb_pull(skb, sizeof(code));
/*
@@ -974,7 +988,7 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);
authenticated = hcon->sec_level == BT_SECURITY_HIGH;
- hci_add_ltk(conn->hcon->hdev, conn->dst, hcon->dst_type,
+ hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
HCI_SMP_LTK_SLAVE, 1, authenticated,
enc.ltk, smp->enc_key_size, ediv, ident.rand);
@@ -996,10 +1010,10 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
/* Just public address */
memset(&addrinfo, 0, sizeof(addrinfo));
- bacpy(&addrinfo.bdaddr, conn->src);
+ bacpy(&addrinfo.bdaddr, &conn->hcon->src);
smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo),
- &addrinfo);
+ &addrinfo);
*keydist &= ~SMP_DIST_ID_KEY;
}
diff --git a/include/net/bluetooth/smp.h b/net/bluetooth/smp.h
index f8ba07f3e5fa..f8ba07f3e5fa 100644
--- a/include/net/bluetooth/smp.h
+++ b/net/bluetooth/smp.h
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index ca04163635da..e6b7fecb3af1 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -64,7 +64,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
br_flood_deliver(br, skb, false);
goto out;
}
- if (br_multicast_rcv(br, NULL, skb)) {
+ if (br_multicast_rcv(br, NULL, skb, vid)) {
kfree_skb(skb);
goto out;
}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index ffd5874f2592..33e8f23acddd 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -700,7 +700,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
vid = nla_get_u16(tb[NDA_VLAN]);
- if (vid >= VLAN_N_VID) {
+ if (!vid || vid >= VLAN_VID_MASK) {
pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
vid);
return -EINVAL;
@@ -794,7 +794,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
vid = nla_get_u16(tb[NDA_VLAN]);
- if (vid >= VLAN_N_VID) {
+ if (!vid || vid >= VLAN_VID_MASK) {
pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
vid);
return -EINVAL;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index a2fd37ec35f7..7e73c32e205d 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -80,7 +80,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
br_fdb_update(br, p, eth_hdr(skb)->h_source, vid);
if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
- br_multicast_rcv(br, p, skb))
+ br_multicast_rcv(br, p, skb, vid))
goto drop;
if (p->state == BR_STATE_LEARNING)
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 85a09bb5ca51..b7b1914dfa25 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -453,7 +453,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
call_rcu_bh(&p->rcu, br_multicast_free_pg);
err = 0;
- if (!mp->ports && !mp->mglist && mp->timer_armed &&
+ if (!mp->ports && !mp->mglist &&
netif_running(br->dev))
mod_timer(&mp->timer, jiffies);
break;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index d1c578630678..4c214b2b88ef 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -272,7 +272,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
del_timer(&p->timer);
call_rcu_bh(&p->rcu, br_multicast_free_pg);
- if (!mp->ports && !mp->mglist && mp->timer_armed &&
+ if (!mp->ports && !mp->mglist &&
netif_running(br->dev))
mod_timer(&mp->timer, jiffies);
@@ -363,7 +363,7 @@ static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
skb_reset_mac_header(skb);
eth = eth_hdr(skb);
- memcpy(eth->h_source, br->dev->dev_addr, 6);
+ memcpy(eth->h_source, br->dev->dev_addr, ETH_ALEN);
eth->h_dest[0] = 1;
eth->h_dest[1] = 0;
eth->h_dest[2] = 0x5e;
@@ -433,7 +433,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
skb_reset_mac_header(skb);
eth = eth_hdr(skb);
- memcpy(eth->h_source, br->dev->dev_addr, 6);
+ memcpy(eth->h_source, br->dev->dev_addr, ETH_ALEN);
eth->h_proto = htons(ETH_P_IPV6);
skb_put(skb, sizeof(*eth));
@@ -620,7 +620,6 @@ rehash:
mp->br = br;
mp->addr = *group;
-
setup_timer(&mp->timer, br_multicast_group_expired,
(unsigned long)mp);
@@ -660,6 +659,7 @@ static int br_multicast_add_group(struct net_bridge *br,
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
+ unsigned long now = jiffies;
int err;
spin_lock(&br->multicast_lock);
@@ -674,6 +674,7 @@ static int br_multicast_add_group(struct net_bridge *br,
if (!port) {
mp->mglist = true;
+ mod_timer(&mp->timer, now + br->multicast_membership_interval);
goto out;
}
@@ -681,7 +682,7 @@ static int br_multicast_add_group(struct net_bridge *br,
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
if (p->port == port)
- goto out;
+ goto found;
if ((unsigned long)p->port < (unsigned long)port)
break;
}
@@ -692,6 +693,8 @@ static int br_multicast_add_group(struct net_bridge *br,
rcu_assign_pointer(*pp, p);
br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
+found:
+ mod_timer(&p->timer, now + br->multicast_membership_interval);
out:
err = 0;
@@ -944,7 +947,8 @@ void br_multicast_disable_port(struct net_bridge_port *port)
static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
struct net_bridge_port *port,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ u16 vid)
{
struct igmpv3_report *ih;
struct igmpv3_grec *grec;
@@ -954,12 +958,10 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
int type;
int err = 0;
__be32 group;
- u16 vid = 0;
if (!pskb_may_pull(skb, sizeof(*ih)))
return -EINVAL;
- br_vlan_get_tag(skb, &vid);
ih = igmpv3_report_hdr(skb);
num = ntohs(ih->ngrec);
len = sizeof(*ih);
@@ -1002,7 +1004,8 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
#if IS_ENABLED(CONFIG_IPV6)
static int br_ip6_multicast_mld2_report(struct net_bridge *br,
struct net_bridge_port *port,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ u16 vid)
{
struct icmp6hdr *icmp6h;
struct mld2_grec *grec;
@@ -1010,12 +1013,10 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
int len;
int num;
int err = 0;
- u16 vid = 0;
if (!pskb_may_pull(skb, sizeof(*icmp6h)))
return -EINVAL;
- br_vlan_get_tag(skb, &vid);
icmp6h = icmp6_hdr(skb);
num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
len = sizeof(*icmp6h);
@@ -1138,7 +1139,8 @@ static void br_multicast_query_received(struct net_bridge *br,
static int br_ip4_multicast_query(struct net_bridge *br,
struct net_bridge_port *port,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ u16 vid)
{
const struct iphdr *iph = ip_hdr(skb);
struct igmphdr *ih = igmp_hdr(skb);
@@ -1150,7 +1152,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
unsigned long now = jiffies;
__be32 group;
int err = 0;
- u16 vid = 0;
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) ||
@@ -1186,14 +1187,10 @@ static int br_ip4_multicast_query(struct net_bridge *br,
if (!group)
goto out;
- br_vlan_get_tag(skb, &vid);
mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid);
if (!mp)
goto out;
- mod_timer(&mp->timer, now + br->multicast_membership_interval);
- mp->timer_armed = true;
-
max_delay *= br->multicast_last_member_count;
if (mp->mglist &&
@@ -1219,7 +1216,8 @@ out:
#if IS_ENABLED(CONFIG_IPV6)
static int br_ip6_multicast_query(struct net_bridge *br,
struct net_bridge_port *port,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ u16 vid)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct mld_msg *mld;
@@ -1231,7 +1229,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
unsigned long now = jiffies;
const struct in6_addr *group = NULL;
int err = 0;
- u16 vid = 0;
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) ||
@@ -1265,14 +1262,10 @@ static int br_ip6_multicast_query(struct net_bridge *br,
if (!group)
goto out;
- br_vlan_get_tag(skb, &vid);
mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid);
if (!mp)
goto out;
- mod_timer(&mp->timer, now + br->multicast_membership_interval);
- mp->timer_armed = true;
-
max_delay *= br->multicast_last_member_count;
if (mp->mglist &&
(timer_pending(&mp->timer) ?
@@ -1358,7 +1351,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
call_rcu_bh(&p->rcu, br_multicast_free_pg);
br_mdb_notify(br->dev, port, group, RTM_DELMDB);
- if (!mp->ports && !mp->mglist && mp->timer_armed &&
+ if (!mp->ports && !mp->mglist &&
netif_running(br->dev))
mod_timer(&mp->timer, jiffies);
}
@@ -1370,12 +1363,30 @@ static void br_multicast_leave_group(struct net_bridge *br,
br->multicast_last_member_interval;
if (!port) {
- if (mp->mglist && mp->timer_armed &&
+ if (mp->mglist &&
(timer_pending(&mp->timer) ?
time_after(mp->timer.expires, time) :
try_to_del_timer_sync(&mp->timer) >= 0)) {
mod_timer(&mp->timer, time);
}
+
+ goto out;
+ }
+
+ for (p = mlock_dereference(mp->ports, br);
+ p != NULL;
+ p = mlock_dereference(p->next, br)) {
+ if (p->port != port)
+ continue;
+
+ if (!hlist_unhashed(&p->mglist) &&
+ (timer_pending(&p->timer) ?
+ time_after(p->timer.expires, time) :
+ try_to_del_timer_sync(&p->timer) >= 0)) {
+ mod_timer(&p->timer, time);
+ }
+
+ break;
}
out:
spin_unlock(&br->multicast_lock);
@@ -1424,7 +1435,8 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
static int br_multicast_ipv4_rcv(struct net_bridge *br,
struct net_bridge_port *port,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ u16 vid)
{
struct sk_buff *skb2 = skb;
const struct iphdr *iph;
@@ -1432,7 +1444,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
unsigned int len;
unsigned int offset;
int err;
- u16 vid = 0;
/* We treat OOM as packet loss for now. */
if (!pskb_may_pull(skb, sizeof(*iph)))
@@ -1493,7 +1504,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
err = 0;
- br_vlan_get_tag(skb2, &vid);
BR_INPUT_SKB_CB(skb)->igmp = 1;
ih = igmp_hdr(skb2);
@@ -1504,10 +1514,10 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
err = br_ip4_multicast_add_group(br, port, ih->group, vid);
break;
case IGMPV3_HOST_MEMBERSHIP_REPORT:
- err = br_ip4_multicast_igmp3_report(br, port, skb2);
+ err = br_ip4_multicast_igmp3_report(br, port, skb2, vid);
break;
case IGMP_HOST_MEMBERSHIP_QUERY:
- err = br_ip4_multicast_query(br, port, skb2);
+ err = br_ip4_multicast_query(br, port, skb2, vid);
break;
case IGMP_HOST_LEAVE_MESSAGE:
br_ip4_multicast_leave_group(br, port, ih->group, vid);
@@ -1525,7 +1535,8 @@ err_out:
#if IS_ENABLED(CONFIG_IPV6)
static int br_multicast_ipv6_rcv(struct net_bridge *br,
struct net_bridge_port *port,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ u16 vid)
{
struct sk_buff *skb2;
const struct ipv6hdr *ip6h;
@@ -1535,7 +1546,6 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
unsigned int len;
int offset;
int err;
- u16 vid = 0;
if (!pskb_may_pull(skb, sizeof(*ip6h)))
return -EINVAL;
@@ -1625,7 +1635,6 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
err = 0;
- br_vlan_get_tag(skb, &vid);
BR_INPUT_SKB_CB(skb)->igmp = 1;
switch (icmp6_type) {
@@ -1642,10 +1651,10 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
break;
}
case ICMPV6_MLD2_REPORT:
- err = br_ip6_multicast_mld2_report(br, port, skb2);
+ err = br_ip6_multicast_mld2_report(br, port, skb2, vid);
break;
case ICMPV6_MGM_QUERY:
- err = br_ip6_multicast_query(br, port, skb2);
+ err = br_ip6_multicast_query(br, port, skb2, vid);
break;
case ICMPV6_MGM_REDUCTION:
{
@@ -1666,7 +1675,7 @@ out:
#endif
int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
- struct sk_buff *skb)
+ struct sk_buff *skb, u16 vid)
{
BR_INPUT_SKB_CB(skb)->igmp = 0;
BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
@@ -1676,10 +1685,10 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
switch (skb->protocol) {
case htons(ETH_P_IP):
- return br_multicast_ipv4_rcv(br, port, skb);
+ return br_multicast_ipv4_rcv(br, port, skb, vid);
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
- return br_multicast_ipv6_rcv(br, port, skb);
+ return br_multicast_ipv6_rcv(br, port, skb, vid);
#endif
}
@@ -1798,7 +1807,6 @@ void br_multicast_stop(struct net_bridge *br)
hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
hlist[ver]) {
del_timer(&mp->timer);
- mp->timer_armed = false;
call_rcu_bh(&mp->rcu, br_multicast_free_group);
}
}
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index f87736270eaa..878f008afefa 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -619,7 +619,7 @@ bad:
/* Replicate the checks that IPv6 does on packet reception and pass the packet
* to ip6tables, which doesn't support NAT, so things are fairly simple. */
-static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
+static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -669,7 +669,8 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
* receiving device) to make netfilter happy, the REDIRECT
* target in particular. Save the original destination IP
* address to be able to detect DNAT afterwards. */
-static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
+static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
@@ -691,7 +692,7 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
return NF_ACCEPT;
nf_bridge_pull_encap_header_rcsum(skb);
- return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
+ return br_nf_pre_routing_ipv6(ops, skb, in, out, okfn);
}
if (!brnf_call_iptables && !br->nf_call_iptables)
@@ -727,7 +728,8 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
* took place when the packet entered the bridge), but we
* register an IPv4 PRE_ROUTING 'sabotage' hook that will
* prevent this from happening. */
-static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
+static unsigned int br_nf_local_in(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
@@ -765,7 +767,8 @@ static int br_nf_forward_finish(struct sk_buff *skb)
* but we are still able to filter on the 'real' indev/outdev
* because of the physdev module. For ARP, indev and outdev are the
* bridge ports. */
-static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
+static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
@@ -818,7 +821,8 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
return NF_STOLEN;
}
-static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
+static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
@@ -878,7 +882,8 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb)
#endif
/* PF_BRIDGE/POST_ROUTING ********************************************/
-static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
+static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
@@ -923,7 +928,8 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
/* IP/SABOTAGE *****************************************************/
/* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
* for the second time. */
-static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff *skb,
+static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index e74ddc1c29a8..f75d92e4f96b 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -243,7 +243,7 @@ static int br_afspec(struct net_bridge *br,
vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
- if (vinfo->vid >= VLAN_N_VID)
+ if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
return -EINVAL;
switch (cmd) {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index efb57d911569..229d820bdf0b 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -126,7 +126,6 @@ struct net_bridge_mdb_entry
struct timer_list timer;
struct br_ip addr;
bool mglist;
- bool timer_armed;
};
struct net_bridge_mdb_htable
@@ -344,10 +343,9 @@ static inline int br_is_root_bridge(const struct net_bridge *br)
}
/* br_device.c */
-extern void br_dev_setup(struct net_device *dev);
-extern void br_dev_delete(struct net_device *dev, struct list_head *list);
-extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
- struct net_device *dev);
+void br_dev_setup(struct net_device *dev);
+void br_dev_delete(struct net_device *dev, struct list_head *list);
+netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev);
#ifdef CONFIG_NET_POLL_CONTROLLER
static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
struct sk_buff *skb)
@@ -358,8 +356,8 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
netpoll_send_skb(np, skb);
}
-extern int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp);
-extern void br_netpoll_disable(struct net_bridge_port *p);
+int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp);
+void br_netpoll_disable(struct net_bridge_port *p);
#else
static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
struct sk_buff *skb)
@@ -377,116 +375,99 @@ static inline void br_netpoll_disable(struct net_bridge_port *p)
#endif
/* br_fdb.c */
-extern int br_fdb_init(void);
-extern void br_fdb_fini(void);
-extern void br_fdb_flush(struct net_bridge *br);
-extern void br_fdb_changeaddr(struct net_bridge_port *p,
- const unsigned char *newaddr);
-extern void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
-extern void br_fdb_cleanup(unsigned long arg);
-extern void br_fdb_delete_by_port(struct net_bridge *br,
- const struct net_bridge_port *p, int do_all);
-extern struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
- const unsigned char *addr,
- __u16 vid);
-extern int br_fdb_test_addr(struct net_device *dev, unsigned char *addr);
-extern int br_fdb_fillbuf(struct net_bridge *br, void *buf,
- unsigned long count, unsigned long off);
-extern int br_fdb_insert(struct net_bridge *br,
- struct net_bridge_port *source,
- const unsigned char *addr,
- u16 vid);
-extern void br_fdb_update(struct net_bridge *br,
- struct net_bridge_port *source,
- const unsigned char *addr,
- u16 vid);
-extern int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vid);
-
-extern int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr);
-extern int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr,
- u16 nlh_flags);
-extern int br_fdb_dump(struct sk_buff *skb,
- struct netlink_callback *cb,
- struct net_device *dev,
- int idx);
+int br_fdb_init(void);
+void br_fdb_fini(void);
+void br_fdb_flush(struct net_bridge *br);
+void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr);
+void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
+void br_fdb_cleanup(unsigned long arg);
+void br_fdb_delete_by_port(struct net_bridge *br,
+ const struct net_bridge_port *p, int do_all);
+struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
+ const unsigned char *addr, __u16 vid);
+int br_fdb_test_addr(struct net_device *dev, unsigned char *addr);
+int br_fdb_fillbuf(struct net_bridge *br, void *buf, unsigned long count,
+ unsigned long off);
+int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
+ const unsigned char *addr, u16 vid);
+void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
+ const unsigned char *addr, u16 vid);
+int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vid);
+
+int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev, const unsigned char *addr);
+int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev,
+ const unsigned char *addr, u16 nlh_flags);
+int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *dev, int idx);
/* br_forward.c */
-extern void br_deliver(const struct net_bridge_port *to,
- struct sk_buff *skb);
-extern int br_dev_queue_push_xmit(struct sk_buff *skb);
-extern void br_forward(const struct net_bridge_port *to,
+void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb);
+int br_dev_queue_push_xmit(struct sk_buff *skb);
+void br_forward(const struct net_bridge_port *to,
struct sk_buff *skb, struct sk_buff *skb0);
-extern int br_forward_finish(struct sk_buff *skb);
-extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb,
- bool unicast);
-extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
- struct sk_buff *skb2, bool unicast);
+int br_forward_finish(struct sk_buff *skb);
+void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast);
+void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
+ struct sk_buff *skb2, bool unicast);
/* br_if.c */
-extern void br_port_carrier_check(struct net_bridge_port *p);
-extern int br_add_bridge(struct net *net, const char *name);
-extern int br_del_bridge(struct net *net, const char *name);
-extern void br_net_exit(struct net *net);
-extern int br_add_if(struct net_bridge *br,
- struct net_device *dev);
-extern int br_del_if(struct net_bridge *br,
- struct net_device *dev);
-extern int br_min_mtu(const struct net_bridge *br);
-extern netdev_features_t br_features_recompute(struct net_bridge *br,
- netdev_features_t features);
+void br_port_carrier_check(struct net_bridge_port *p);
+int br_add_bridge(struct net *net, const char *name);
+int br_del_bridge(struct net *net, const char *name);
+void br_net_exit(struct net *net);
+int br_add_if(struct net_bridge *br, struct net_device *dev);
+int br_del_if(struct net_bridge *br, struct net_device *dev);
+int br_min_mtu(const struct net_bridge *br);
+netdev_features_t br_features_recompute(struct net_bridge *br,
+ netdev_features_t features);
/* br_input.c */
-extern int br_handle_frame_finish(struct sk_buff *skb);
-extern rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
+int br_handle_frame_finish(struct sk_buff *skb);
+rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
/* br_ioctl.c */
-extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-extern int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *arg);
+int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd,
+ void __user *arg);
/* br_multicast.c */
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
extern unsigned int br_mdb_rehash_seq;
-extern int br_multicast_rcv(struct net_bridge *br,
- struct net_bridge_port *port,
- struct sk_buff *skb);
-extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
- struct sk_buff *skb, u16 vid);
-extern void br_multicast_add_port(struct net_bridge_port *port);
-extern void br_multicast_del_port(struct net_bridge_port *port);
-extern void br_multicast_enable_port(struct net_bridge_port *port);
-extern void br_multicast_disable_port(struct net_bridge_port *port);
-extern void br_multicast_init(struct net_bridge *br);
-extern void br_multicast_open(struct net_bridge *br);
-extern void br_multicast_stop(struct net_bridge *br);
-extern void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
- struct sk_buff *skb);
-extern void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
- struct sk_buff *skb, struct sk_buff *skb2);
-extern int br_multicast_set_router(struct net_bridge *br, unsigned long val);
-extern int br_multicast_set_port_router(struct net_bridge_port *p,
- unsigned long val);
-extern int br_multicast_toggle(struct net_bridge *br, unsigned long val);
-extern int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
-extern int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
-extern struct net_bridge_mdb_entry *br_mdb_ip_get(
- struct net_bridge_mdb_htable *mdb,
- struct br_ip *dst);
-extern struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
- struct net_bridge_port *port, struct br_ip *group);
-extern void br_multicast_free_pg(struct rcu_head *head);
-extern struct net_bridge_port_group *br_multicast_new_port_group(
- struct net_bridge_port *port,
- struct br_ip *group,
- struct net_bridge_port_group __rcu *next,
- unsigned char state);
-extern void br_mdb_init(void);
-extern void br_mdb_uninit(void);
-extern void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
- struct br_ip *group, int type);
+int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
+ struct sk_buff *skb, u16 vid);
+struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
+ struct sk_buff *skb, u16 vid);
+void br_multicast_add_port(struct net_bridge_port *port);
+void br_multicast_del_port(struct net_bridge_port *port);
+void br_multicast_enable_port(struct net_bridge_port *port);
+void br_multicast_disable_port(struct net_bridge_port *port);
+void br_multicast_init(struct net_bridge *br);
+void br_multicast_open(struct net_bridge *br);
+void br_multicast_stop(struct net_bridge *br);
+void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
+ struct sk_buff *skb);
+void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
+ struct sk_buff *skb, struct sk_buff *skb2);
+int br_multicast_set_router(struct net_bridge *br, unsigned long val);
+int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val);
+int br_multicast_toggle(struct net_bridge *br, unsigned long val);
+int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
+int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
+struct net_bridge_mdb_entry *
+br_mdb_ip_get(struct net_bridge_mdb_htable *mdb, struct br_ip *dst);
+struct net_bridge_mdb_entry *
+br_multicast_new_group(struct net_bridge *br, struct net_bridge_port *port,
+ struct br_ip *group);
+void br_multicast_free_pg(struct rcu_head *head);
+struct net_bridge_port_group *
+br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
+ struct net_bridge_port_group __rcu *next,
+ unsigned char state);
+void br_mdb_init(void);
+void br_mdb_uninit(void);
+void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
+ struct br_ip *group, int type);
#define mlock_dereference(X, br) \
rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
@@ -523,7 +504,8 @@ static inline bool br_multicast_querier_exists(struct net_bridge *br,
#else
static inline int br_multicast_rcv(struct net_bridge *br,
struct net_bridge_port *port,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ u16 vid)
{
return 0;
}
@@ -591,22 +573,21 @@ static inline void br_mdb_uninit(void)
/* br_vlan.c */
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
-extern bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
- struct sk_buff *skb, u16 *vid);
-extern bool br_allowed_egress(struct net_bridge *br,
- const struct net_port_vlans *v,
- const struct sk_buff *skb);
-extern struct sk_buff *br_handle_vlan(struct net_bridge *br,
- const struct net_port_vlans *v,
- struct sk_buff *skb);
-extern int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
-extern int br_vlan_delete(struct net_bridge *br, u16 vid);
-extern void br_vlan_flush(struct net_bridge *br);
-extern int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
-extern int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
-extern int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
-extern void nbp_vlan_flush(struct net_bridge_port *port);
-extern bool nbp_vlan_find(struct net_bridge_port *port, u16 vid);
+bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
+ struct sk_buff *skb, u16 *vid);
+bool br_allowed_egress(struct net_bridge *br, const struct net_port_vlans *v,
+ const struct sk_buff *skb);
+struct sk_buff *br_handle_vlan(struct net_bridge *br,
+ const struct net_port_vlans *v,
+ struct sk_buff *skb);
+int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
+int br_vlan_delete(struct net_bridge *br, u16 vid);
+void br_vlan_flush(struct net_bridge *br);
+int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
+int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
+int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
+void nbp_vlan_flush(struct net_bridge_port *port);
+bool nbp_vlan_find(struct net_bridge_port *port, u16 vid);
static inline struct net_port_vlans *br_get_vlan_info(
const struct net_bridge *br)
@@ -643,9 +624,7 @@ static inline u16 br_get_pvid(const struct net_port_vlans *v)
* vid wasn't set
*/
smp_rmb();
- return (v->pvid & VLAN_TAG_PRESENT) ?
- (v->pvid & ~VLAN_TAG_PRESENT) :
- VLAN_N_VID;
+ return v->pvid ?: VLAN_N_VID;
}
#else
@@ -727,9 +706,9 @@ static inline u16 br_get_pvid(const struct net_port_vlans *v)
/* br_netfilter.c */
#ifdef CONFIG_BRIDGE_NETFILTER
-extern int br_netfilter_init(void);
-extern void br_netfilter_fini(void);
-extern void br_netfilter_rtable_init(struct net_bridge *);
+int br_netfilter_init(void);
+void br_netfilter_fini(void);
+void br_netfilter_rtable_init(struct net_bridge *);
#else
#define br_netfilter_init() (0)
#define br_netfilter_fini() do { } while(0)
@@ -737,43 +716,39 @@ extern void br_netfilter_rtable_init(struct net_bridge *);
#endif
/* br_stp.c */
-extern void br_log_state(const struct net_bridge_port *p);
-extern struct net_bridge_port *br_get_port(struct net_bridge *br,
- u16 port_no);
-extern void br_init_port(struct net_bridge_port *p);
-extern void br_become_designated_port(struct net_bridge_port *p);
+void br_log_state(const struct net_bridge_port *p);
+struct net_bridge_port *br_get_port(struct net_bridge *br, u16 port_no);
+void br_init_port(struct net_bridge_port *p);
+void br_become_designated_port(struct net_bridge_port *p);
-extern void __br_set_forward_delay(struct net_bridge *br, unsigned long t);
-extern int br_set_forward_delay(struct net_bridge *br, unsigned long x);
-extern int br_set_hello_time(struct net_bridge *br, unsigned long x);
-extern int br_set_max_age(struct net_bridge *br, unsigned long x);
+void __br_set_forward_delay(struct net_bridge *br, unsigned long t);
+int br_set_forward_delay(struct net_bridge *br, unsigned long x);
+int br_set_hello_time(struct net_bridge *br, unsigned long x);
+int br_set_max_age(struct net_bridge *br, unsigned long x);
/* br_stp_if.c */
-extern void br_stp_enable_bridge(struct net_bridge *br);
-extern void br_stp_disable_bridge(struct net_bridge *br);
-extern void br_stp_set_enabled(struct net_bridge *br, unsigned long val);
-extern void br_stp_enable_port(struct net_bridge_port *p);
-extern void br_stp_disable_port(struct net_bridge_port *p);
-extern bool br_stp_recalculate_bridge_id(struct net_bridge *br);
-extern void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a);
-extern void br_stp_set_bridge_priority(struct net_bridge *br,
- u16 newprio);
-extern int br_stp_set_port_priority(struct net_bridge_port *p,
- unsigned long newprio);
-extern int br_stp_set_path_cost(struct net_bridge_port *p,
- unsigned long path_cost);
-extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id);
+void br_stp_enable_bridge(struct net_bridge *br);
+void br_stp_disable_bridge(struct net_bridge *br);
+void br_stp_set_enabled(struct net_bridge *br, unsigned long val);
+void br_stp_enable_port(struct net_bridge_port *p);
+void br_stp_disable_port(struct net_bridge_port *p);
+bool br_stp_recalculate_bridge_id(struct net_bridge *br);
+void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a);
+void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio);
+int br_stp_set_port_priority(struct net_bridge_port *p, unsigned long newprio);
+int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost);
+ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id);
/* br_stp_bpdu.c */
struct stp_proto;
-extern void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
- struct net_device *dev);
+void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
+ struct net_device *dev);
/* br_stp_timer.c */
-extern void br_stp_timer_init(struct net_bridge *br);
-extern void br_stp_port_timer_init(struct net_bridge_port *p);
-extern unsigned long br_timer_value(const struct timer_list *timer);
+void br_stp_timer_init(struct net_bridge *br);
+void br_stp_port_timer_init(struct net_bridge_port *p);
+unsigned long br_timer_value(const struct timer_list *timer);
/* br.c */
#if IS_ENABLED(CONFIG_ATM_LANE)
@@ -782,23 +757,23 @@ extern int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr)
/* br_netlink.c */
extern struct rtnl_link_ops br_link_ops;
-extern int br_netlink_init(void);
-extern void br_netlink_fini(void);
-extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
-extern int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg);
-extern int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg);
-extern int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev, u32 filter_mask);
+int br_netlink_init(void);
+void br_netlink_fini(void);
+void br_ifinfo_notify(int event, struct net_bridge_port *port);
+int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg);
+int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg);
+int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev,
+ u32 filter_mask);
#ifdef CONFIG_SYSFS
/* br_sysfs_if.c */
extern const struct sysfs_ops brport_sysfs_ops;
-extern int br_sysfs_addif(struct net_bridge_port *p);
-extern int br_sysfs_renameif(struct net_bridge_port *p);
+int br_sysfs_addif(struct net_bridge_port *p);
+int br_sysfs_renameif(struct net_bridge_port *p);
/* br_sysfs_br.c */
-extern int br_sysfs_addbr(struct net_device *dev);
-extern void br_sysfs_delbr(struct net_device *dev);
+int br_sysfs_addbr(struct net_device *dev);
+void br_sysfs_delbr(struct net_device *dev);
#else
diff --git a/net/bridge/br_private_stp.h b/net/bridge/br_private_stp.h
index 0c0fe36e7aa9..2fe910c4e170 100644
--- a/net/bridge/br_private_stp.h
+++ b/net/bridge/br_private_stp.h
@@ -51,19 +51,19 @@ static inline int br_is_designated_port(const struct net_bridge_port *p)
/* br_stp.c */
-extern void br_become_root_bridge(struct net_bridge *br);
-extern void br_config_bpdu_generation(struct net_bridge *);
-extern void br_configuration_update(struct net_bridge *);
-extern void br_port_state_selection(struct net_bridge *);
-extern void br_received_config_bpdu(struct net_bridge_port *p,
- const struct br_config_bpdu *bpdu);
-extern void br_received_tcn_bpdu(struct net_bridge_port *p);
-extern void br_transmit_config(struct net_bridge_port *p);
-extern void br_transmit_tcn(struct net_bridge *br);
-extern void br_topology_change_detection(struct net_bridge *br);
+void br_become_root_bridge(struct net_bridge *br);
+void br_config_bpdu_generation(struct net_bridge *);
+void br_configuration_update(struct net_bridge *);
+void br_port_state_selection(struct net_bridge *);
+void br_received_config_bpdu(struct net_bridge_port *p,
+ const struct br_config_bpdu *bpdu);
+void br_received_tcn_bpdu(struct net_bridge_port *p);
+void br_transmit_config(struct net_bridge_port *p);
+void br_transmit_tcn(struct net_bridge *br);
+void br_topology_change_detection(struct net_bridge *br);
/* br_stp_bpdu.c */
-extern void br_send_config_bpdu(struct net_bridge_port *, struct br_config_bpdu *);
-extern void br_send_tcn_bpdu(struct net_bridge_port *);
+void br_send_config_bpdu(struct net_bridge_port *, struct br_config_bpdu *);
+void br_send_tcn_bpdu(struct net_bridge_port *);
#endif
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 108084a04671..656a6f3e40de 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -134,7 +134,7 @@ static void br_stp_start(struct net_bridge *br)
if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY)
__br_set_forward_delay(br, BR_MIN_FORWARD_DELAY);
- else if (br->bridge_forward_delay < BR_MAX_FORWARD_DELAY)
+ else if (br->bridge_forward_delay > BR_MAX_FORWARD_DELAY)
__br_set_forward_delay(br, BR_MAX_FORWARD_DELAY);
if (r == 0) {
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 9a9ffe7e4019..53f0990eab58 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -45,37 +45,34 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
return 0;
}
- if (vid) {
- if (v->port_idx) {
- p = v->parent.port;
- br = p->br;
- dev = p->dev;
- } else {
- br = v->parent.br;
- dev = br->dev;
- }
- ops = dev->netdev_ops;
-
- if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
- /* Add VLAN to the device filter if it is supported.
- * Stricly speaking, this is not necessary now, since
- * devices are made promiscuous by the bridge, but if
- * that ever changes this code will allow tagged
- * traffic to enter the bridge.
- */
- err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q),
- vid);
- if (err)
- return err;
- }
-
- err = br_fdb_insert(br, p, dev->dev_addr, vid);
- if (err) {
- br_err(br, "failed insert local address into bridge "
- "forwarding table\n");
- goto out_filt;
- }
+ if (v->port_idx) {
+ p = v->parent.port;
+ br = p->br;
+ dev = p->dev;
+ } else {
+ br = v->parent.br;
+ dev = br->dev;
+ }
+ ops = dev->netdev_ops;
+
+ if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
+ /* Add VLAN to the device filter if it is supported.
+ * Stricly speaking, this is not necessary now, since
+ * devices are made promiscuous by the bridge, but if
+ * that ever changes this code will allow tagged
+ * traffic to enter the bridge.
+ */
+ err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q),
+ vid);
+ if (err)
+ return err;
+ }
+ err = br_fdb_insert(br, p, dev->dev_addr, vid);
+ if (err) {
+ br_err(br, "failed insert local address into bridge "
+ "forwarding table\n");
+ goto out_filt;
}
set_bit(vid, v->vlan_bitmap);
@@ -98,7 +95,7 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
__vlan_delete_pvid(v, vid);
clear_bit(vid, v->untagged_bitmap);
- if (v->port_idx && vid) {
+ if (v->port_idx) {
struct net_device *dev = v->parent.port->dev;
const struct net_device_ops *ops = dev->netdev_ops;
@@ -192,6 +189,8 @@ out:
bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
struct sk_buff *skb, u16 *vid)
{
+ int err;
+
/* If VLAN filtering is disabled on the bridge, all packets are
* permitted.
*/
@@ -204,20 +203,32 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
if (!v)
return false;
- if (br_vlan_get_tag(skb, vid)) {
+ err = br_vlan_get_tag(skb, vid);
+ if (!*vid) {
u16 pvid = br_get_pvid(v);
- /* Frame did not have a tag. See if pvid is set
- * on this port. That tells us which vlan untagged
- * traffic belongs to.
+ /* Frame had a tag with VID 0 or did not have a tag.
+ * See if pvid is set on this port. That tells us which
+ * vlan untagged or priority-tagged traffic belongs to.
*/
if (pvid == VLAN_N_VID)
return false;
- /* PVID is set on this port. Any untagged ingress
- * frame is considered to belong to this vlan.
+ /* PVID is set on this port. Any untagged or priority-tagged
+ * ingress frame is considered to belong to this vlan.
*/
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid);
+ *vid = pvid;
+ if (likely(err))
+ /* Untagged Frame. */
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid);
+ else
+ /* Priority-tagged Frame.
+ * At this point, We know that skb->vlan_tci had
+ * VLAN_TAG_PRESENT bit and its VID field was 0x000.
+ * We update only VID field and preserve PCP field.
+ */
+ skb->vlan_tci |= pvid;
+
return true;
}
@@ -248,7 +259,9 @@ bool br_allowed_egress(struct net_bridge *br,
return false;
}
-/* Must be protected by RTNL */
+/* Must be protected by RTNL.
+ * Must be called with vid in range from 1 to 4094 inclusive.
+ */
int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
{
struct net_port_vlans *pv = NULL;
@@ -278,7 +291,9 @@ out:
return err;
}
-/* Must be protected by RTNL */
+/* Must be protected by RTNL.
+ * Must be called with vid in range from 1 to 4094 inclusive.
+ */
int br_vlan_delete(struct net_bridge *br, u16 vid)
{
struct net_port_vlans *pv;
@@ -289,14 +304,9 @@ int br_vlan_delete(struct net_bridge *br, u16 vid)
if (!pv)
return -EINVAL;
- if (vid) {
- /* If the VID !=0 remove fdb for this vid. VID 0 is special
- * in that it's the default and is always there in the fdb.
- */
- spin_lock_bh(&br->hash_lock);
- fdb_delete_by_addr(br, br->dev->dev_addr, vid);
- spin_unlock_bh(&br->hash_lock);
- }
+ spin_lock_bh(&br->hash_lock);
+ fdb_delete_by_addr(br, br->dev->dev_addr, vid);
+ spin_unlock_bh(&br->hash_lock);
__vlan_del(pv, vid);
return 0;
@@ -329,7 +339,9 @@ unlock:
return 0;
}
-/* Must be protected by RTNL */
+/* Must be protected by RTNL.
+ * Must be called with vid in range from 1 to 4094 inclusive.
+ */
int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
{
struct net_port_vlans *pv = NULL;
@@ -363,7 +375,9 @@ clean_up:
return err;
}
-/* Must be protected by RTNL */
+/* Must be protected by RTNL.
+ * Must be called with vid in range from 1 to 4094 inclusive.
+ */
int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
{
struct net_port_vlans *pv;
@@ -374,14 +388,9 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
if (!pv)
return -EINVAL;
- if (vid) {
- /* If the VID !=0 remove fdb for this vid. VID 0 is special
- * in that it's the default and is always there in the fdb.
- */
- spin_lock_bh(&port->br->hash_lock);
- fdb_delete_by_addr(port->br, port->dev->dev_addr, vid);
- spin_unlock_bh(&port->br->hash_lock);
- }
+ spin_lock_bh(&port->br->hash_lock);
+ fdb_delete_by_addr(port->br, port->dev->dev_addr, vid);
+ spin_unlock_bh(&port->br->hash_lock);
return __vlan_del(pv, vid);
}
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index a9aff9c7d027..68f8128147be 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -1,6 +1,9 @@
#
# Bridge netfilter configuration
#
+#
+config NF_TABLES_BRIDGE
+ tristate "Ethernet Bridge nf_tables support"
menuconfig BRIDGE_NF_EBTABLES
tristate "Ethernet Bridge tables (ebtables) support"
diff --git a/net/bridge/netfilter/Makefile b/net/bridge/netfilter/Makefile
index 0718699540b0..ea7629f58b3d 100644
--- a/net/bridge/netfilter/Makefile
+++ b/net/bridge/netfilter/Makefile
@@ -2,6 +2,8 @@
# Makefile for the netfilter modules for Link Layer filtering on a bridge.
#
+obj-$(CONFIG_NF_TABLES_BRIDGE) += nf_tables_bridge.o
+
obj-$(CONFIG_BRIDGE_NF_EBTABLES) += ebtables.o
# tables
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
index 8b84c581be30..3fb3c848affe 100644
--- a/net/bridge/netfilter/ebt_among.c
+++ b/net/bridge/netfilter/ebt_among.c
@@ -28,7 +28,7 @@ static bool ebt_mac_wormhash_contains(const struct ebt_mac_wormhash *wh,
uint32_t cmp[2] = { 0, 0 };
int key = ((const unsigned char *)mac)[5];
- memcpy(((char *) cmp) + 2, mac, 6);
+ memcpy(((char *) cmp) + 2, mac, ETH_ALEN);
start = wh->table[key];
limit = wh->table[key + 1];
if (ip) {
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index 518093802d1d..7c470c371e14 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -181,6 +181,7 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
ub->qlen++;
pm = nlmsg_data(nlh);
+ memset(pm, 0, sizeof(*pm));
/* Fill in the ulog data */
pm->version = EBT_ULOG_VERSION;
@@ -193,8 +194,6 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
pm->hook = hooknr;
if (uloginfo->prefix != NULL)
strcpy(pm->prefix, uloginfo->prefix);
- else
- *(pm->prefix) = '\0';
if (in) {
strcpy(pm->physindev, in->name);
@@ -204,16 +203,14 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name);
else
strcpy(pm->indev, in->name);
- } else
- pm->indev[0] = pm->physindev[0] = '\0';
+ }
if (out) {
/* If out exists, then out is a bridge port */
strcpy(pm->physoutdev, out->name);
/* rcu_read_lock()ed by nf_hook_slow */
strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name);
- } else
- pm->outdev[0] = pm->physoutdev[0] = '\0';
+ }
if (skb_copy_bits(skb, -ETH_HLEN, pm->data, copy_len) < 0)
BUG();
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index 94b2b700cff8..bb2da7b706e7 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -60,17 +60,21 @@ static const struct ebt_table frame_filter =
};
static unsigned int
-ebt_in_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in,
- const struct net_device *out, int (*okfn)(struct sk_buff *))
+ebt_in_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- return ebt_do_table(hook, skb, in, out, dev_net(in)->xt.frame_filter);
+ return ebt_do_table(ops->hooknum, skb, in, out,
+ dev_net(in)->xt.frame_filter);
}
static unsigned int
-ebt_out_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in,
- const struct net_device *out, int (*okfn)(struct sk_buff *))
+ebt_out_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- return ebt_do_table(hook, skb, in, out, dev_net(out)->xt.frame_filter);
+ return ebt_do_table(ops->hooknum, skb, in, out,
+ dev_net(out)->xt.frame_filter);
}
static struct nf_hook_ops ebt_ops_filter[] __read_mostly = {
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 322555acdd40..bd238f1f105b 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -60,17 +60,21 @@ static struct ebt_table frame_nat =
};
static unsigned int
-ebt_nat_in(unsigned int hook, struct sk_buff *skb, const struct net_device *in
- , const struct net_device *out, int (*okfn)(struct sk_buff *))
+ebt_nat_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- return ebt_do_table(hook, skb, in, out, dev_net(in)->xt.frame_nat);
+ return ebt_do_table(ops->hooknum, skb, in, out,
+ dev_net(in)->xt.frame_nat);
}
static unsigned int
-ebt_nat_out(unsigned int hook, struct sk_buff *skb, const struct net_device *in
- , const struct net_device *out, int (*okfn)(struct sk_buff *))
+ebt_nat_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- return ebt_do_table(hook, skb, in, out, dev_net(out)->xt.frame_nat);
+ return ebt_do_table(ops->hooknum, skb, in, out,
+ dev_net(out)->xt.frame_nat);
}
static struct nf_hook_ops ebt_ops_nat[] __read_mostly = {
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c
new file mode 100644
index 000000000000..e8cb016fa34d
--- /dev/null
+++ b/net/bridge/netfilter/nf_tables_bridge.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netfilter_bridge.h>
+#include <net/netfilter/nf_tables.h>
+
+static struct nft_af_info nft_af_bridge __read_mostly = {
+ .family = NFPROTO_BRIDGE,
+ .nhooks = NF_BR_NUMHOOKS,
+ .owner = THIS_MODULE,
+};
+
+static int nf_tables_bridge_init_net(struct net *net)
+{
+ net->nft.bridge = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+ if (net->nft.bridge == NULL)
+ return -ENOMEM;
+
+ memcpy(net->nft.bridge, &nft_af_bridge, sizeof(nft_af_bridge));
+
+ if (nft_register_afinfo(net, net->nft.bridge) < 0)
+ goto err;
+
+ return 0;
+err:
+ kfree(net->nft.bridge);
+ return -ENOMEM;
+}
+
+static void nf_tables_bridge_exit_net(struct net *net)
+{
+ nft_unregister_afinfo(net->nft.bridge);
+ kfree(net->nft.bridge);
+}
+
+static struct pernet_operations nf_tables_bridge_net_ops = {
+ .init = nf_tables_bridge_init_net,
+ .exit = nf_tables_bridge_exit_net,
+};
+
+static int __init nf_tables_bridge_init(void)
+{
+ return register_pernet_subsys(&nf_tables_bridge_net_ops);
+}
+
+static void __exit nf_tables_bridge_exit(void)
+{
+ return unregister_pernet_subsys(&nf_tables_bridge_net_ops);
+}
+
+module_init(nf_tables_bridge_init);
+module_exit(nf_tables_bridge_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_FAMILY(AF_BRIDGE);
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 3ab8dd2e1282..d249874a366d 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -420,7 +420,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
* @mask: CAN mask (see description)
* @func: callback function on filter match
* @data: returned parameter for callback function
- * @ident: string for calling module indentification
+ * @ident: string for calling module identification
*
* Description:
* Invokes the callback function with the received sk_buff and the given
diff --git a/net/can/af_can.h b/net/can/af_can.h
index 1dccb4c33894..6de58b40535c 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -108,9 +108,9 @@ struct s_pstats {
extern struct dev_rcv_lists can_rx_alldev_list;
/* function prototypes for the CAN networklayer procfs (proc.c) */
-extern void can_init_proc(void);
-extern void can_remove_proc(void);
-extern void can_stat_update(unsigned long data);
+void can_init_proc(void);
+void can_remove_proc(void);
+void can_stat_update(unsigned long data);
/* structures and variables from af_can.c needed in proc.c for reading */
extern struct timer_list can_stattimer; /* timer for statistics update */
diff --git a/net/ceph/auth_none.h b/net/ceph/auth_none.h
index ed7d088b1bc9..059a3ce4b53f 100644
--- a/net/ceph/auth_none.h
+++ b/net/ceph/auth_none.h
@@ -23,7 +23,7 @@ struct ceph_auth_none_info {
struct ceph_none_authorizer au; /* we only need one; it's static */
};
-extern int ceph_auth_none_init(struct ceph_auth_client *ac);
+int ceph_auth_none_init(struct ceph_auth_client *ac);
#endif
diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h
index c5a058da7ac8..65ee72082d99 100644
--- a/net/ceph/auth_x.h
+++ b/net/ceph/auth_x.h
@@ -45,7 +45,7 @@ struct ceph_x_info {
struct ceph_x_authorizer auth_authorizer;
};
-extern int ceph_x_init(struct ceph_auth_client *ac);
+int ceph_x_init(struct ceph_auth_client *ac);
#endif
diff --git a/net/ceph/crypto.h b/net/ceph/crypto.h
index 3572dc518bc9..d1498224c49d 100644
--- a/net/ceph/crypto.h
+++ b/net/ceph/crypto.h
@@ -20,34 +20,32 @@ static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
kfree(key->key);
}
-extern int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
- const struct ceph_crypto_key *src);
-extern int ceph_crypto_key_encode(struct ceph_crypto_key *key,
- void **p, void *end);
-extern int ceph_crypto_key_decode(struct ceph_crypto_key *key,
- void **p, void *end);
-extern int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *in);
+int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
+ const struct ceph_crypto_key *src);
+int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end);
+int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end);
+int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *in);
/* crypto.c */
-extern int ceph_decrypt(struct ceph_crypto_key *secret,
- void *dst, size_t *dst_len,
- const void *src, size_t src_len);
-extern int ceph_encrypt(struct ceph_crypto_key *secret,
- void *dst, size_t *dst_len,
- const void *src, size_t src_len);
-extern int ceph_decrypt2(struct ceph_crypto_key *secret,
- void *dst1, size_t *dst1_len,
- void *dst2, size_t *dst2_len,
- const void *src, size_t src_len);
-extern int ceph_encrypt2(struct ceph_crypto_key *secret,
- void *dst, size_t *dst_len,
- const void *src1, size_t src1_len,
- const void *src2, size_t src2_len);
-extern int ceph_crypto_init(void);
-extern void ceph_crypto_shutdown(void);
+int ceph_decrypt(struct ceph_crypto_key *secret,
+ void *dst, size_t *dst_len,
+ const void *src, size_t src_len);
+int ceph_encrypt(struct ceph_crypto_key *secret,
+ void *dst, size_t *dst_len,
+ const void *src, size_t src_len);
+int ceph_decrypt2(struct ceph_crypto_key *secret,
+ void *dst1, size_t *dst1_len,
+ void *dst2, size_t *dst2_len,
+ const void *src, size_t src_len);
+int ceph_encrypt2(struct ceph_crypto_key *secret,
+ void *dst, size_t *dst_len,
+ const void *src1, size_t src1_len,
+ const void *src2, size_t src2_len);
+int ceph_crypto_init(void);
+void ceph_crypto_shutdown(void);
/* armor.c */
-extern int ceph_armor(char *dst, const char *src, const char *end);
-extern int ceph_unarmor(char *dst, const char *src, const char *end);
+int ceph_armor(char *dst, const char *src, const char *end);
+int ceph_unarmor(char *dst, const char *src, const char *end);
#endif
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 4a5df7b1cc9f..18c039b95c22 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -777,13 +777,12 @@ static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor,
bio = data->bio;
BUG_ON(!bio);
- BUG_ON(!bio->bi_vcnt);
cursor->resid = min(length, data->bio_length);
cursor->bio = bio;
- cursor->vector_index = 0;
- cursor->vector_offset = 0;
- cursor->last_piece = length <= bio->bi_io_vec[0].bv_len;
+ cursor->bvec_iter = bio->bi_iter;
+ cursor->last_piece =
+ cursor->resid <= bio_iter_len(bio, cursor->bvec_iter);
}
static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
@@ -792,71 +791,63 @@ static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
{
struct ceph_msg_data *data = cursor->data;
struct bio *bio;
- struct bio_vec *bio_vec;
- unsigned int index;
+ struct bio_vec bio_vec;
BUG_ON(data->type != CEPH_MSG_DATA_BIO);
bio = cursor->bio;
BUG_ON(!bio);
- index = cursor->vector_index;
- BUG_ON(index >= (unsigned int) bio->bi_vcnt);
+ bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
- bio_vec = &bio->bi_io_vec[index];
- BUG_ON(cursor->vector_offset >= bio_vec->bv_len);
- *page_offset = (size_t) (bio_vec->bv_offset + cursor->vector_offset);
+ *page_offset = (size_t) bio_vec.bv_offset;
BUG_ON(*page_offset >= PAGE_SIZE);
if (cursor->last_piece) /* pagelist offset is always 0 */
*length = cursor->resid;
else
- *length = (size_t) (bio_vec->bv_len - cursor->vector_offset);
+ *length = (size_t) bio_vec.bv_len;
BUG_ON(*length > cursor->resid);
BUG_ON(*page_offset + *length > PAGE_SIZE);
- return bio_vec->bv_page;
+ return bio_vec.bv_page;
}
static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
size_t bytes)
{
struct bio *bio;
- struct bio_vec *bio_vec;
- unsigned int index;
+ struct bio_vec bio_vec;
BUG_ON(cursor->data->type != CEPH_MSG_DATA_BIO);
bio = cursor->bio;
BUG_ON(!bio);
- index = cursor->vector_index;
- BUG_ON(index >= (unsigned int) bio->bi_vcnt);
- bio_vec = &bio->bi_io_vec[index];
+ bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
/* Advance the cursor offset */
BUG_ON(cursor->resid < bytes);
cursor->resid -= bytes;
- cursor->vector_offset += bytes;
- if (cursor->vector_offset < bio_vec->bv_len)
+
+ bio_advance_iter(bio, &cursor->bvec_iter, bytes);
+
+ if (bytes < bio_vec.bv_len)
return false; /* more bytes to process in this segment */
- BUG_ON(cursor->vector_offset != bio_vec->bv_len);
/* Move on to the next segment, and possibly the next bio */
- if (++index == (unsigned int) bio->bi_vcnt) {
+ if (!cursor->bvec_iter.bi_size) {
bio = bio->bi_next;
- index = 0;
+ cursor->bvec_iter = bio->bi_iter;
}
cursor->bio = bio;
- cursor->vector_index = index;
- cursor->vector_offset = 0;
if (!cursor->last_piece) {
BUG_ON(!cursor->resid);
BUG_ON(!bio);
/* A short read is OK, so use <= rather than == */
- if (cursor->resid <= bio->bi_io_vec[index].bv_len)
+ if (cursor->resid <= bio_iter_len(bio, cursor->bvec_iter))
cursor->last_piece = true;
}
diff --git a/net/compat.c b/net/compat.c
index f0a1ba6c8086..89032580bd1d 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -71,6 +71,8 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
__get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
__get_user(kmsg->msg_flags, &umsg->msg_flags))
return -EFAULT;
+ if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+ return -EINVAL;
kmsg->msg_name = compat_ptr(tmp1);
kmsg->msg_iov = compat_ptr(tmp2);
kmsg->msg_control = compat_ptr(tmp3);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index af814e764206..a16ed7bbe376 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -577,7 +577,7 @@ EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
/**
* zerocopy_sg_from_iovec - Build a zerocopy datagram from an iovec
* @skb: buffer to copy
- * @from: io vector to copy to
+ * @from: io vector to copy from
* @offset: offset in the io vector to start copying from
* @count: amount of vectors to copy to buffer from
*
diff --git a/net/core/dev.c b/net/core/dev.c
index 65f829cfd928..0e6136546a8c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1203,7 +1203,7 @@ void netdev_state_change(struct net_device *dev)
{
if (dev->flags & IFF_UP) {
call_netdevice_notifiers(NETDEV_CHANGE, dev);
- rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
+ rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
}
}
EXPORT_SYMBOL(netdev_state_change);
@@ -1293,7 +1293,7 @@ int dev_open(struct net_device *dev)
if (ret < 0)
return ret;
- rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
+ rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
call_netdevice_notifiers(NETDEV_UP, dev);
return ret;
@@ -1307,7 +1307,7 @@ static int __dev_close_many(struct list_head *head)
ASSERT_RTNL();
might_sleep();
- list_for_each_entry(dev, head, unreg_list) {
+ list_for_each_entry(dev, head, close_list) {
call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
clear_bit(__LINK_STATE_START, &dev->state);
@@ -1323,7 +1323,7 @@ static int __dev_close_many(struct list_head *head)
dev_deactivate_many(head);
- list_for_each_entry(dev, head, unreg_list) {
+ list_for_each_entry(dev, head, close_list) {
const struct net_device_ops *ops = dev->netdev_ops;
/*
@@ -1351,7 +1351,7 @@ static int __dev_close(struct net_device *dev)
/* Temporarily disable netpoll until the interface is down */
netpoll_rx_disable(dev);
- list_add(&dev->unreg_list, &single);
+ list_add(&dev->close_list, &single);
retval = __dev_close_many(&single);
list_del(&single);
@@ -1362,21 +1362,20 @@ static int __dev_close(struct net_device *dev)
static int dev_close_many(struct list_head *head)
{
struct net_device *dev, *tmp;
- LIST_HEAD(tmp_list);
- list_for_each_entry_safe(dev, tmp, head, unreg_list)
+ /* Remove the devices that don't need to be closed */
+ list_for_each_entry_safe(dev, tmp, head, close_list)
if (!(dev->flags & IFF_UP))
- list_move(&dev->unreg_list, &tmp_list);
+ list_del_init(&dev->close_list);
__dev_close_many(head);
- list_for_each_entry(dev, head, unreg_list) {
- rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
+ list_for_each_entry_safe(dev, tmp, head, close_list) {
+ rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
call_netdevice_notifiers(NETDEV_DOWN, dev);
+ list_del_init(&dev->close_list);
}
- /* rollback_registered_many needs the complete original list */
- list_splice(&tmp_list, head);
return 0;
}
@@ -1397,7 +1396,7 @@ int dev_close(struct net_device *dev)
/* Block netpoll rx while the interface is going down */
netpoll_rx_disable(dev);
- list_add(&dev->unreg_list, &single);
+ list_add(&dev->close_list, &single);
dev_close_many(&single);
list_del(&single);
@@ -1917,7 +1916,8 @@ static struct xps_map *expand_xps_map(struct xps_map *map,
return new_map;
}
-int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
+int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
+ u16 index)
{
struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
struct xps_map *map, *new_map;
@@ -2377,6 +2377,8 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
}
SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
+ SKB_GSO_CB(skb)->encap_level = 0;
+
skb_reset_mac_header(skb);
skb_reset_mac_len(skb);
@@ -4373,42 +4375,40 @@ struct netdev_adjacent {
/* upper master flag, there can only be one master device per list */
bool master;
- /* indicates that this dev is our first-level lower/upper device */
- bool neighbour;
-
/* counter for the number of times this device was added to us */
u16 ref_nr;
+ /* private field for the users */
+ void *private;
+
struct list_head list;
struct rcu_head rcu;
};
-static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
- struct net_device *adj_dev,
- bool upper)
+static struct netdev_adjacent *__netdev_find_adj_rcu(struct net_device *dev,
+ struct net_device *adj_dev,
+ struct list_head *adj_list)
{
struct netdev_adjacent *adj;
- struct list_head *dev_list;
- dev_list = upper ? &dev->upper_dev_list : &dev->lower_dev_list;
-
- list_for_each_entry(adj, dev_list, list) {
+ list_for_each_entry_rcu(adj, adj_list, list) {
if (adj->dev == adj_dev)
return adj;
}
return NULL;
}
-static inline struct netdev_adjacent *__netdev_find_upper(struct net_device *dev,
- struct net_device *udev)
+static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
+ struct net_device *adj_dev,
+ struct list_head *adj_list)
{
- return __netdev_find_adj(dev, udev, true);
-}
+ struct netdev_adjacent *adj;
-static inline struct netdev_adjacent *__netdev_find_lower(struct net_device *dev,
- struct net_device *ldev)
-{
- return __netdev_find_adj(dev, ldev, false);
+ list_for_each_entry(adj, adj_list, list) {
+ if (adj->dev == adj_dev)
+ return adj;
+ }
+ return NULL;
}
/**
@@ -4425,7 +4425,7 @@ bool netdev_has_upper_dev(struct net_device *dev,
{
ASSERT_RTNL();
- return __netdev_find_upper(dev, upper_dev);
+ return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
}
EXPORT_SYMBOL(netdev_has_upper_dev);
@@ -4440,7 +4440,7 @@ bool netdev_has_any_upper_dev(struct net_device *dev)
{
ASSERT_RTNL();
- return !list_empty(&dev->upper_dev_list);
+ return !list_empty(&dev->all_adj_list.upper);
}
EXPORT_SYMBOL(netdev_has_any_upper_dev);
@@ -4457,10 +4457,10 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
ASSERT_RTNL();
- if (list_empty(&dev->upper_dev_list))
+ if (list_empty(&dev->adj_list.upper))
return NULL;
- upper = list_first_entry(&dev->upper_dev_list,
+ upper = list_first_entry(&dev->adj_list.upper,
struct netdev_adjacent, list);
if (likely(upper->master))
return upper->dev;
@@ -4468,15 +4468,26 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
}
EXPORT_SYMBOL(netdev_master_upper_dev_get);
-/* netdev_upper_get_next_dev_rcu - Get the next dev from upper list
+void *netdev_adjacent_get_private(struct list_head *adj_list)
+{
+ struct netdev_adjacent *adj;
+
+ adj = list_entry(adj_list, struct netdev_adjacent, list);
+
+ return adj->private;
+}
+EXPORT_SYMBOL(netdev_adjacent_get_private);
+
+/**
+ * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
* @dev: device
* @iter: list_head ** of the current position
*
* Gets the next device from the dev's upper list, starting from iter
* position. The caller must hold RCU read lock.
*/
-struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
- struct list_head **iter)
+struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
+ struct list_head **iter)
{
struct netdev_adjacent *upper;
@@ -4484,14 +4495,71 @@ struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
- if (&upper->list == &dev->upper_dev_list)
+ if (&upper->list == &dev->all_adj_list.upper)
return NULL;
*iter = &upper->list;
return upper->dev;
}
-EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
+EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
+
+/**
+ * netdev_lower_get_next_private - Get the next ->private from the
+ * lower neighbour list
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next netdev_adjacent->private from the dev's lower neighbour
+ * list, starting from iter position. The caller must hold either hold the
+ * RTNL lock or its own locking that guarantees that the neighbour lower
+ * list will remain unchainged.
+ */
+void *netdev_lower_get_next_private(struct net_device *dev,
+ struct list_head **iter)
+{
+ struct netdev_adjacent *lower;
+
+ lower = list_entry(*iter, struct netdev_adjacent, list);
+
+ if (&lower->list == &dev->adj_list.lower)
+ return NULL;
+
+ if (iter)
+ *iter = lower->list.next;
+
+ return lower->private;
+}
+EXPORT_SYMBOL(netdev_lower_get_next_private);
+
+/**
+ * netdev_lower_get_next_private_rcu - Get the next ->private from the
+ * lower neighbour list, RCU
+ * variant
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next netdev_adjacent->private from the dev's lower neighbour
+ * list, starting from iter position. The caller must hold RCU read lock.
+ */
+void *netdev_lower_get_next_private_rcu(struct net_device *dev,
+ struct list_head **iter)
+{
+ struct netdev_adjacent *lower;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
+
+ if (&lower->list == &dev->adj_list.lower)
+ return NULL;
+
+ if (iter)
+ *iter = &lower->list;
+
+ return lower->private;
+}
+EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
/**
* netdev_master_upper_dev_get_rcu - Get master upper device
@@ -4504,7 +4572,7 @@ struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
{
struct netdev_adjacent *upper;
- upper = list_first_or_null_rcu(&dev->upper_dev_list,
+ upper = list_first_or_null_rcu(&dev->adj_list.upper,
struct netdev_adjacent, list);
if (upper && likely(upper->master))
return upper->dev;
@@ -4514,15 +4582,16 @@ EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
static int __netdev_adjacent_dev_insert(struct net_device *dev,
struct net_device *adj_dev,
- bool neighbour, bool master,
- bool upper)
+ struct list_head *dev_list,
+ void *private, bool master)
{
struct netdev_adjacent *adj;
+ char linkname[IFNAMSIZ+7];
+ int ret;
- adj = __netdev_find_adj(dev, adj_dev, upper);
+ adj = __netdev_find_adj(dev, adj_dev, dev_list);
if (adj) {
- BUG_ON(neighbour);
adj->ref_nr++;
return 0;
}
@@ -4533,124 +4602,179 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
adj->dev = adj_dev;
adj->master = master;
- adj->neighbour = neighbour;
adj->ref_nr = 1;
-
+ adj->private = private;
dev_hold(adj_dev);
- pr_debug("dev_hold for %s, because of %s link added from %s to %s\n",
- adj_dev->name, upper ? "upper" : "lower", dev->name,
- adj_dev->name);
- if (!upper) {
- list_add_tail_rcu(&adj->list, &dev->lower_dev_list);
- return 0;
+ pr_debug("dev_hold for %s, because of link added from %s to %s\n",
+ adj_dev->name, dev->name, adj_dev->name);
+
+ if (dev_list == &dev->adj_list.lower) {
+ sprintf(linkname, "lower_%s", adj_dev->name);
+ ret = sysfs_create_link(&(dev->dev.kobj),
+ &(adj_dev->dev.kobj), linkname);
+ if (ret)
+ goto free_adj;
+ } else if (dev_list == &dev->adj_list.upper) {
+ sprintf(linkname, "upper_%s", adj_dev->name);
+ ret = sysfs_create_link(&(dev->dev.kobj),
+ &(adj_dev->dev.kobj), linkname);
+ if (ret)
+ goto free_adj;
}
- /* Ensure that master upper link is always the first item in list. */
- if (master)
- list_add_rcu(&adj->list, &dev->upper_dev_list);
- else
- list_add_tail_rcu(&adj->list, &dev->upper_dev_list);
+ /* Ensure that master link is always the first item in list. */
+ if (master) {
+ ret = sysfs_create_link(&(dev->dev.kobj),
+ &(adj_dev->dev.kobj), "master");
+ if (ret)
+ goto remove_symlinks;
+
+ list_add_rcu(&adj->list, dev_list);
+ } else {
+ list_add_tail_rcu(&adj->list, dev_list);
+ }
return 0;
-}
-static inline int __netdev_upper_dev_insert(struct net_device *dev,
- struct net_device *udev,
- bool master, bool neighbour)
-{
- return __netdev_adjacent_dev_insert(dev, udev, neighbour, master,
- true);
-}
+remove_symlinks:
+ if (dev_list == &dev->adj_list.lower) {
+ sprintf(linkname, "lower_%s", adj_dev->name);
+ sysfs_remove_link(&(dev->dev.kobj), linkname);
+ } else if (dev_list == &dev->adj_list.upper) {
+ sprintf(linkname, "upper_%s", adj_dev->name);
+ sysfs_remove_link(&(dev->dev.kobj), linkname);
+ }
-static inline int __netdev_lower_dev_insert(struct net_device *dev,
- struct net_device *ldev,
- bool neighbour)
-{
- return __netdev_adjacent_dev_insert(dev, ldev, neighbour, false,
- false);
+free_adj:
+ kfree(adj);
+ dev_put(adj_dev);
+
+ return ret;
}
void __netdev_adjacent_dev_remove(struct net_device *dev,
- struct net_device *adj_dev, bool upper)
+ struct net_device *adj_dev,
+ struct list_head *dev_list)
{
struct netdev_adjacent *adj;
+ char linkname[IFNAMSIZ+7];
- if (upper)
- adj = __netdev_find_upper(dev, adj_dev);
- else
- adj = __netdev_find_lower(dev, adj_dev);
+ adj = __netdev_find_adj(dev, adj_dev, dev_list);
- if (!adj)
+ if (!adj) {
+ pr_err("tried to remove device %s from %s\n",
+ dev->name, adj_dev->name);
BUG();
+ }
if (adj->ref_nr > 1) {
+ pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
+ adj->ref_nr-1);
adj->ref_nr--;
return;
}
+ if (adj->master)
+ sysfs_remove_link(&(dev->dev.kobj), "master");
+
+ if (dev_list == &dev->adj_list.lower) {
+ sprintf(linkname, "lower_%s", adj_dev->name);
+ sysfs_remove_link(&(dev->dev.kobj), linkname);
+ } else if (dev_list == &dev->adj_list.upper) {
+ sprintf(linkname, "upper_%s", adj_dev->name);
+ sysfs_remove_link(&(dev->dev.kobj), linkname);
+ }
+
list_del_rcu(&adj->list);
- pr_debug("dev_put for %s, because of %s link removed from %s to %s\n",
- adj_dev->name, upper ? "upper" : "lower", dev->name,
- adj_dev->name);
+ pr_debug("dev_put for %s, because link removed from %s to %s\n",
+ adj_dev->name, dev->name, adj_dev->name);
dev_put(adj_dev);
kfree_rcu(adj, rcu);
}
-static inline void __netdev_upper_dev_remove(struct net_device *dev,
- struct net_device *udev)
-{
- return __netdev_adjacent_dev_remove(dev, udev, true);
-}
-
-static inline void __netdev_lower_dev_remove(struct net_device *dev,
- struct net_device *ldev)
-{
- return __netdev_adjacent_dev_remove(dev, ldev, false);
-}
-
-int __netdev_adjacent_dev_insert_link(struct net_device *dev,
- struct net_device *upper_dev,
- bool master, bool neighbour)
+int __netdev_adjacent_dev_link_lists(struct net_device *dev,
+ struct net_device *upper_dev,
+ struct list_head *up_list,
+ struct list_head *down_list,
+ void *private, bool master)
{
int ret;
- ret = __netdev_upper_dev_insert(dev, upper_dev, master, neighbour);
+ ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
+ master);
if (ret)
return ret;
- ret = __netdev_lower_dev_insert(upper_dev, dev, neighbour);
+ ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
+ false);
if (ret) {
- __netdev_upper_dev_remove(dev, upper_dev);
+ __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
return ret;
}
return 0;
}
-static inline int __netdev_adjacent_dev_link(struct net_device *dev,
- struct net_device *udev)
+int __netdev_adjacent_dev_link(struct net_device *dev,
+ struct net_device *upper_dev)
{
- return __netdev_adjacent_dev_insert_link(dev, udev, false, false);
+ return __netdev_adjacent_dev_link_lists(dev, upper_dev,
+ &dev->all_adj_list.upper,
+ &upper_dev->all_adj_list.lower,
+ NULL, false);
}
-static inline int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
- struct net_device *udev,
- bool master)
+void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
+ struct net_device *upper_dev,
+ struct list_head *up_list,
+ struct list_head *down_list)
{
- return __netdev_adjacent_dev_insert_link(dev, udev, master, true);
+ __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
+ __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
}
void __netdev_adjacent_dev_unlink(struct net_device *dev,
struct net_device *upper_dev)
{
- __netdev_upper_dev_remove(dev, upper_dev);
- __netdev_lower_dev_remove(upper_dev, dev);
+ __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
+ &dev->all_adj_list.upper,
+ &upper_dev->all_adj_list.lower);
+}
+
+int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
+ struct net_device *upper_dev,
+ void *private, bool master)
+{
+ int ret = __netdev_adjacent_dev_link(dev, upper_dev);
+
+ if (ret)
+ return ret;
+
+ ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
+ &dev->adj_list.upper,
+ &upper_dev->adj_list.lower,
+ private, master);
+ if (ret) {
+ __netdev_adjacent_dev_unlink(dev, upper_dev);
+ return ret;
+ }
+
+ return 0;
}
+void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
+ struct net_device *upper_dev)
+{
+ __netdev_adjacent_dev_unlink(dev, upper_dev);
+ __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
+ &dev->adj_list.upper,
+ &upper_dev->adj_list.lower);
+}
static int __netdev_upper_dev_link(struct net_device *dev,
- struct net_device *upper_dev, bool master)
+ struct net_device *upper_dev, bool master,
+ void *private)
{
struct netdev_adjacent *i, *j, *to_i, *to_j;
int ret = 0;
@@ -4661,26 +4785,29 @@ static int __netdev_upper_dev_link(struct net_device *dev,
return -EBUSY;
/* To prevent loops, check if dev is not upper device to upper_dev. */
- if (__netdev_find_upper(upper_dev, dev))
+ if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
return -EBUSY;
- if (__netdev_find_upper(dev, upper_dev))
+ if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
return -EEXIST;
if (master && netdev_master_upper_dev_get(dev))
return -EBUSY;
- ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, master);
+ ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
+ master);
if (ret)
return ret;
/* Now that we linked these devs, make all the upper_dev's
- * upper_dev_list visible to every dev's lower_dev_list and vice
+ * all_adj_list.upper visible to every dev's all_adj_list.lower an
* versa, and don't forget the devices itself. All of these
* links are non-neighbours.
*/
- list_for_each_entry(i, &dev->lower_dev_list, list) {
- list_for_each_entry(j, &upper_dev->upper_dev_list, list) {
+ list_for_each_entry(i, &dev->all_adj_list.lower, list) {
+ list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
+ pr_debug("Interlinking %s with %s, non-neighbour\n",
+ i->dev->name, j->dev->name);
ret = __netdev_adjacent_dev_link(i->dev, j->dev);
if (ret)
goto rollback_mesh;
@@ -4688,14 +4815,18 @@ static int __netdev_upper_dev_link(struct net_device *dev,
}
/* add dev to every upper_dev's upper device */
- list_for_each_entry(i, &upper_dev->upper_dev_list, list) {
+ list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
+ pr_debug("linking %s's upper device %s with %s\n",
+ upper_dev->name, i->dev->name, dev->name);
ret = __netdev_adjacent_dev_link(dev, i->dev);
if (ret)
goto rollback_upper_mesh;
}
/* add upper_dev to every dev's lower device */
- list_for_each_entry(i, &dev->lower_dev_list, list) {
+ list_for_each_entry(i, &dev->all_adj_list.lower, list) {
+ pr_debug("linking %s's lower device %s with %s\n", dev->name,
+ i->dev->name, upper_dev->name);
ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
if (ret)
goto rollback_lower_mesh;
@@ -4706,7 +4837,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
rollback_lower_mesh:
to_i = i;
- list_for_each_entry(i, &dev->lower_dev_list, list) {
+ list_for_each_entry(i, &dev->all_adj_list.lower, list) {
if (i == to_i)
break;
__netdev_adjacent_dev_unlink(i->dev, upper_dev);
@@ -4716,7 +4847,7 @@ rollback_lower_mesh:
rollback_upper_mesh:
to_i = i;
- list_for_each_entry(i, &upper_dev->upper_dev_list, list) {
+ list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
if (i == to_i)
break;
__netdev_adjacent_dev_unlink(dev, i->dev);
@@ -4727,8 +4858,8 @@ rollback_upper_mesh:
rollback_mesh:
to_i = i;
to_j = j;
- list_for_each_entry(i, &dev->lower_dev_list, list) {
- list_for_each_entry(j, &upper_dev->upper_dev_list, list) {
+ list_for_each_entry(i, &dev->all_adj_list.lower, list) {
+ list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
if (i == to_i && j == to_j)
break;
__netdev_adjacent_dev_unlink(i->dev, j->dev);
@@ -4737,7 +4868,7 @@ rollback_mesh:
break;
}
- __netdev_adjacent_dev_unlink(dev, upper_dev);
+ __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
return ret;
}
@@ -4755,7 +4886,7 @@ rollback_mesh:
int netdev_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev)
{
- return __netdev_upper_dev_link(dev, upper_dev, false);
+ return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
}
EXPORT_SYMBOL(netdev_upper_dev_link);
@@ -4773,10 +4904,18 @@ EXPORT_SYMBOL(netdev_upper_dev_link);
int netdev_master_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev)
{
- return __netdev_upper_dev_link(dev, upper_dev, true);
+ return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
}
EXPORT_SYMBOL(netdev_master_upper_dev_link);
+int netdev_master_upper_dev_link_private(struct net_device *dev,
+ struct net_device *upper_dev,
+ void *private)
+{
+ return __netdev_upper_dev_link(dev, upper_dev, true, private);
+}
+EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
+
/**
* netdev_upper_dev_unlink - Removes a link to upper device
* @dev: device
@@ -4791,29 +4930,59 @@ void netdev_upper_dev_unlink(struct net_device *dev,
struct netdev_adjacent *i, *j;
ASSERT_RTNL();
- __netdev_adjacent_dev_unlink(dev, upper_dev);
+ __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
/* Here is the tricky part. We must remove all dev's lower
* devices from all upper_dev's upper devices and vice
* versa, to maintain the graph relationship.
*/
- list_for_each_entry(i, &dev->lower_dev_list, list)
- list_for_each_entry(j, &upper_dev->upper_dev_list, list)
+ list_for_each_entry(i, &dev->all_adj_list.lower, list)
+ list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
__netdev_adjacent_dev_unlink(i->dev, j->dev);
/* remove also the devices itself from lower/upper device
* list
*/
- list_for_each_entry(i, &dev->lower_dev_list, list)
+ list_for_each_entry(i, &dev->all_adj_list.lower, list)
__netdev_adjacent_dev_unlink(i->dev, upper_dev);
- list_for_each_entry(i, &upper_dev->upper_dev_list, list)
+ list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
__netdev_adjacent_dev_unlink(dev, i->dev);
call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
}
EXPORT_SYMBOL(netdev_upper_dev_unlink);
+void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
+ struct net_device *lower_dev)
+{
+ struct netdev_adjacent *lower;
+
+ if (!lower_dev)
+ return NULL;
+ lower = __netdev_find_adj_rcu(dev, lower_dev, &dev->adj_list.lower);
+ if (!lower)
+ return NULL;
+
+ return lower->private;
+}
+EXPORT_SYMBOL(netdev_lower_dev_get_private_rcu);
+
+void *netdev_lower_dev_get_private(struct net_device *dev,
+ struct net_device *lower_dev)
+{
+ struct netdev_adjacent *lower;
+
+ if (!lower_dev)
+ return NULL;
+ lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
+ if (!lower)
+ return NULL;
+
+ return lower->private;
+}
+EXPORT_SYMBOL(netdev_lower_dev_get_private);
+
static void dev_change_rx_flags(struct net_device *dev, int flags)
{
const struct net_device_ops *ops = dev->netdev_ops;
@@ -4822,7 +4991,7 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
ops->ndo_change_rx_flags(dev, flags);
}
-static int __dev_set_promiscuity(struct net_device *dev, int inc)
+static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
{
unsigned int old_flags = dev->flags;
kuid_t uid;
@@ -4865,6 +5034,8 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
dev_change_rx_flags(dev, IFF_PROMISC);
}
+ if (notify)
+ __dev_notify_flags(dev, old_flags, IFF_PROMISC);
return 0;
}
@@ -4884,7 +5055,7 @@ int dev_set_promiscuity(struct net_device *dev, int inc)
unsigned int old_flags = dev->flags;
int err;
- err = __dev_set_promiscuity(dev, inc);
+ err = __dev_set_promiscuity(dev, inc, true);
if (err < 0)
return err;
if (dev->flags != old_flags)
@@ -4893,22 +5064,9 @@ int dev_set_promiscuity(struct net_device *dev, int inc)
}
EXPORT_SYMBOL(dev_set_promiscuity);
-/**
- * dev_set_allmulti - update allmulti count on a device
- * @dev: device
- * @inc: modifier
- *
- * Add or remove reception of all multicast frames to a device. While the
- * count in the device remains above zero the interface remains listening
- * to all interfaces. Once it hits zero the device reverts back to normal
- * filtering operation. A negative @inc value is used to drop the counter
- * when releasing a resource needing all multicasts.
- * Return 0 if successful or a negative errno code on error.
- */
-
-int dev_set_allmulti(struct net_device *dev, int inc)
+static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
{
- unsigned int old_flags = dev->flags;
+ unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
ASSERT_RTNL();
@@ -4931,9 +5089,30 @@ int dev_set_allmulti(struct net_device *dev, int inc)
if (dev->flags ^ old_flags) {
dev_change_rx_flags(dev, IFF_ALLMULTI);
dev_set_rx_mode(dev);
+ if (notify)
+ __dev_notify_flags(dev, old_flags,
+ dev->gflags ^ old_gflags);
}
return 0;
}
+
+/**
+ * dev_set_allmulti - update allmulti count on a device
+ * @dev: device
+ * @inc: modifier
+ *
+ * Add or remove reception of all multicast frames to a device. While the
+ * count in the device remains above zero the interface remains listening
+ * to all interfaces. Once it hits zero the device reverts back to normal
+ * filtering operation. A negative @inc value is used to drop the counter
+ * when releasing a resource needing all multicasts.
+ * Return 0 if successful or a negative errno code on error.
+ */
+
+int dev_set_allmulti(struct net_device *dev, int inc)
+{
+ return __dev_set_allmulti(dev, inc, true);
+}
EXPORT_SYMBOL(dev_set_allmulti);
/*
@@ -4958,10 +5137,10 @@ void __dev_set_rx_mode(struct net_device *dev)
* therefore calling __dev_set_promiscuity here is safe.
*/
if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
- __dev_set_promiscuity(dev, 1);
+ __dev_set_promiscuity(dev, 1, false);
dev->uc_promisc = true;
} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
- __dev_set_promiscuity(dev, -1);
+ __dev_set_promiscuity(dev, -1, false);
dev->uc_promisc = false;
}
}
@@ -5050,9 +5229,13 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags)
if ((flags ^ dev->gflags) & IFF_PROMISC) {
int inc = (flags & IFF_PROMISC) ? 1 : -1;
+ unsigned int old_flags = dev->flags;
dev->gflags ^= IFF_PROMISC;
- dev_set_promiscuity(dev, inc);
+
+ if (__dev_set_promiscuity(dev, inc, false) >= 0)
+ if (dev->flags != old_flags)
+ dev_set_rx_mode(dev);
}
/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
@@ -5063,16 +5246,20 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags)
int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
dev->gflags ^= IFF_ALLMULTI;
- dev_set_allmulti(dev, inc);
+ __dev_set_allmulti(dev, inc, false);
}
return ret;
}
-void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
+void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
+ unsigned int gchanges)
{
unsigned int changes = dev->flags ^ old_flags;
+ if (gchanges)
+ rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
+
if (changes & IFF_UP) {
if (dev->flags & IFF_UP)
call_netdevice_notifiers(NETDEV_UP, dev);
@@ -5101,17 +5288,14 @@ void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
int dev_change_flags(struct net_device *dev, unsigned int flags)
{
int ret;
- unsigned int changes, old_flags = dev->flags;
+ unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
ret = __dev_change_flags(dev, flags);
if (ret < 0)
return ret;
- changes = old_flags ^ dev->flags;
- if (changes)
- rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
-
- __dev_notify_flags(dev, old_flags);
+ changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
+ __dev_notify_flags(dev, old_flags, changes);
return ret;
}
EXPORT_SYMBOL(dev_change_flags);
@@ -5258,6 +5442,7 @@ static void net_set_todo(struct net_device *dev)
static void rollback_registered_many(struct list_head *head)
{
struct net_device *dev, *tmp;
+ LIST_HEAD(close_head);
BUG_ON(dev_boot_phase);
ASSERT_RTNL();
@@ -5280,7 +5465,9 @@ static void rollback_registered_many(struct list_head *head)
}
/* If device is running, close it first. */
- dev_close_many(head);
+ list_for_each_entry(dev, head, unreg_list)
+ list_add_tail(&dev->close_list, &close_head);
+ dev_close_many(&close_head);
list_for_each_entry(dev, head, unreg_list) {
/* And unlink it from device chain. */
@@ -5303,7 +5490,7 @@ static void rollback_registered_many(struct list_head *head)
if (!dev->rtnl_link_ops ||
dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
- rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
+ rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
/*
* Flush the unicast and multicast chains
@@ -5702,7 +5889,7 @@ int register_netdevice(struct net_device *dev)
*/
if (!dev->rtnl_link_ops ||
dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
- rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
+ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
out:
return ret;
@@ -6009,6 +6196,16 @@ void netdev_set_default_ethtool_ops(struct net_device *dev,
}
EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
+void netdev_freemem(struct net_device *dev)
+{
+ char *addr = (char *)dev - dev->padded;
+
+ if (is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+}
+
/**
* alloc_netdev_mqs - allocate network device
* @sizeof_priv: size of private data to allocate space for
@@ -6052,7 +6249,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
/* ensure 32-byte alignment of whole construct */
alloc_size += NETDEV_ALIGN - 1;
- p = kzalloc(alloc_size, GFP_KERNEL);
+ p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+ if (!p)
+ p = vzalloc(alloc_size);
if (!p)
return NULL;
@@ -6061,7 +6260,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev->pcpu_refcnt = alloc_percpu(int);
if (!dev->pcpu_refcnt)
- goto free_p;
+ goto free_dev;
if (dev_addr_init(dev))
goto free_pcpu;
@@ -6076,9 +6275,12 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
INIT_LIST_HEAD(&dev->napi_list);
INIT_LIST_HEAD(&dev->unreg_list);
+ INIT_LIST_HEAD(&dev->close_list);
INIT_LIST_HEAD(&dev->link_watch_list);
- INIT_LIST_HEAD(&dev->upper_dev_list);
- INIT_LIST_HEAD(&dev->lower_dev_list);
+ INIT_LIST_HEAD(&dev->adj_list.upper);
+ INIT_LIST_HEAD(&dev->adj_list.lower);
+ INIT_LIST_HEAD(&dev->all_adj_list.upper);
+ INIT_LIST_HEAD(&dev->all_adj_list.lower);
dev->priv_flags = IFF_XMIT_DST_RELEASE;
setup(dev);
@@ -6111,8 +6313,8 @@ free_pcpu:
kfree(dev->_rx);
#endif
-free_p:
- kfree(p);
+free_dev:
+ netdev_freemem(dev);
return NULL;
}
EXPORT_SYMBOL(alloc_netdev_mqs);
@@ -6149,7 +6351,7 @@ void free_netdev(struct net_device *dev)
/* Compatibility with error handling in drivers */
if (dev->reg_state == NETREG_UNINITIALIZED) {
- kfree((char *)dev - dev->padded);
+ netdev_freemem(dev);
return;
}
@@ -6311,7 +6513,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
rcu_barrier();
call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
- rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
+ rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
/*
* Flush the unicast and multicast chains
@@ -6350,7 +6552,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
* Prevent userspace races by waiting until the network
* device is fully setup before sending notifications.
*/
- rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
+ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
synchronize_net();
err = 0;
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 6cda4e2c2132..ec40a849fc42 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -752,7 +752,7 @@ int dev_mc_del_global(struct net_device *dev, const unsigned char *addr)
EXPORT_SYMBOL(dev_mc_del_global);
/**
- * dev_mc_sync - Synchronize device's unicast list to another device
+ * dev_mc_sync - Synchronize device's multicast list to another device
* @to: destination device
* @from: source device
*
@@ -780,7 +780,7 @@ int dev_mc_sync(struct net_device *to, struct net_device *from)
EXPORT_SYMBOL(dev_mc_sync);
/**
- * dev_mc_sync_multiple - Synchronize device's unicast list to another
+ * dev_mc_sync_multiple - Synchronize device's multicast list to another
* device, but allow for multiple calls to sync to multiple devices.
* @to: destination device
* @from: source device
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 78e9d9223e40..862989898f61 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -81,6 +81,8 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
[NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation",
[NETIF_F_FSO_BIT] = "tx-fcoe-segmentation",
[NETIF_F_GSO_GRE_BIT] = "tx-gre-segmentation",
+ [NETIF_F_GSO_IPIP_BIT] = "tx-ipip-segmentation",
+ [NETIF_F_GSO_SIT_BIT] = "tx-sit-segmentation",
[NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation",
[NETIF_F_GSO_MPLS_BIT] = "tx-mpls-segmentation",
diff --git a/net/core/filter.c b/net/core/filter.c
index 6438f29ff266..01b780856db2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -644,7 +644,6 @@ void sk_filter_release_rcu(struct rcu_head *rcu)
struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
bpf_jit_free(fp);
- kfree(fp);
}
EXPORT_SYMBOL(sk_filter_release_rcu);
@@ -683,7 +682,7 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
if (fprog->filter == NULL)
return -EINVAL;
- fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL);
+ fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
if (!fp)
return -ENOMEM;
memcpy(fp->insns, fprog->filter, fsize);
@@ -723,6 +722,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
{
struct sk_filter *fp, *old_fp;
unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
+ unsigned int sk_fsize = sk_filter_size(fprog->len);
int err;
if (sock_flag(sk, SOCK_FILTER_LOCKED))
@@ -732,11 +732,11 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
if (fprog->filter == NULL)
return -EINVAL;
- fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
+ fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
if (!fp)
return -ENOMEM;
if (copy_from_user(fp->insns, fprog->filter, fsize)) {
- sock_kfree_s(sk, fp, fsize+sizeof(*fp));
+ sock_kfree_s(sk, fp, sk_fsize);
return -EFAULT;
}
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 8d7d0dd72db2..0242035192f1 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -25,9 +25,35 @@ static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *i
memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
}
+/**
+ * skb_flow_get_ports - extract the upper layer ports and return them
+ * @skb: buffer to extract the ports from
+ * @thoff: transport header offset
+ * @ip_proto: protocol for which to get port offset
+ *
+ * The function will try to retrieve the ports at offset thoff + poff where poff
+ * is the protocol port offset returned from proto_ports_offset
+ */
+__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
+{
+ int poff = proto_ports_offset(ip_proto);
+
+ if (poff >= 0) {
+ __be32 *ports, _ports;
+
+ ports = skb_header_pointer(skb, thoff + poff,
+ sizeof(_ports), &_ports);
+ if (ports)
+ return *ports;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(skb_flow_get_ports);
+
bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
{
- int poff, nhoff = skb_network_offset(skb);
+ int nhoff = skb_network_offset(skb);
u8 ip_proto;
__be16 proto = skb->protocol;
@@ -40,7 +66,7 @@ again:
struct iphdr _iph;
ip:
iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
- if (!iph)
+ if (!iph || iph->ihl < 5)
return false;
if (ip_is_fragment(iph))
@@ -150,16 +176,7 @@ ipv6:
}
flow->ip_proto = ip_proto;
- poff = proto_ports_offset(ip_proto);
- if (poff >= 0) {
- __be32 *ports, _ports;
-
- ports = skb_header_pointer(skb, nhoff + poff,
- sizeof(_ports), &_ports);
- if (ports)
- flow->ports = *ports;
- }
-
+ flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto);
flow->thoff = (u16) nhoff;
return true;
@@ -167,6 +184,22 @@ ipv6:
EXPORT_SYMBOL(skb_flow_dissect);
static u32 hashrnd __read_mostly;
+static __always_inline void __flow_hash_secret_init(void)
+{
+ net_get_random_once(&hashrnd, sizeof(hashrnd));
+}
+
+static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c)
+{
+ __flow_hash_secret_init();
+ return jhash_3words(a, b, c, hashrnd);
+}
+
+static __always_inline u32 __flow_hash_1word(u32 a)
+{
+ __flow_hash_secret_init();
+ return jhash_1word(a, hashrnd);
+}
/*
* __skb_get_rxhash: calculate a flow hash based on src/dst addresses
@@ -193,9 +226,9 @@ void __skb_get_rxhash(struct sk_buff *skb)
swap(keys.port16[0], keys.port16[1]);
}
- hash = jhash_3words((__force u32)keys.dst,
- (__force u32)keys.src,
- (__force u32)keys.ports, hashrnd);
+ hash = __flow_hash_3words((__force u32)keys.dst,
+ (__force u32)keys.src,
+ (__force u32)keys.ports);
if (!hash)
hash = 1;
@@ -231,7 +264,7 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
hash = skb->sk->sk_hash;
else
hash = (__force u16) skb->protocol;
- hash = jhash_1word(hash, hashrnd);
+ hash = __flow_hash_1word(hash);
return (u16) (((u64) hash * qcount) >> 32) + qoffset;
}
@@ -323,7 +356,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
else
hash = (__force u16) skb->protocol ^
skb->rxhash;
- hash = jhash_1word(hash, hashrnd);
+ hash = __flow_hash_1word(hash);
queue_index = map->queues[
((u64)hash * map->len) >> 32];
}
@@ -378,11 +411,3 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
skb_set_queue_mapping(skb, queue_index);
return netdev_get_tx_queue(dev, queue_index);
}
-
-static int __init initialize_hashrnd(void)
-{
- get_random_bytes(&hashrnd, sizeof(hashrnd));
- return 0;
-}
-
-late_initcall_sync(initialize_hashrnd);
diff --git a/net/core/iovec.c b/net/core/iovec.c
index b77eeecc0011..4cdb7c48dad6 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -100,7 +100,7 @@ int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
EXPORT_SYMBOL(memcpy_toiovecend);
/*
- * Copy iovec from kernel. Returns -EFAULT on error.
+ * Copy iovec to kernel. Returns -EFAULT on error.
*/
int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 6072610a8672..ca15f32821fb 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -867,7 +867,7 @@ static void neigh_invalidate(struct neighbour *neigh)
static void neigh_probe(struct neighbour *neigh)
__releases(neigh->lock)
{
- struct sk_buff *skb = skb_peek(&neigh->arp_queue);
+ struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
/* keep skb alive even if arp_queue overflows */
if (skb)
skb = skb_copy(skb, GFP_ATOMIC);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index d954b56b4e47..f3edf9635e02 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1263,7 +1263,7 @@ static void netdev_release(struct device *d)
BUG_ON(dev->reg_state != NETREG_RELEASED);
kfree(dev->ifalias);
- kfree((char *)dev - dev->padded);
+ netdev_freemem(dev);
}
static const void *net_namespace(struct device *d)
@@ -1344,17 +1344,19 @@ int netdev_register_kobject(struct net_device *net)
return error;
}
-int netdev_class_create_file(struct class_attribute *class_attr)
+int netdev_class_create_file_ns(struct class_attribute *class_attr,
+ const void *ns)
{
- return class_create_file(&net_class, class_attr);
+ return class_create_file_ns(&net_class, class_attr, ns);
}
-EXPORT_SYMBOL(netdev_class_create_file);
+EXPORT_SYMBOL(netdev_class_create_file_ns);
-void netdev_class_remove_file(struct class_attribute *class_attr)
+void netdev_class_remove_file_ns(struct class_attribute *class_attr,
+ const void *ns)
{
- class_remove_file(&net_class, class_attr);
+ class_remove_file_ns(&net_class, class_attr, ns);
}
-EXPORT_SYMBOL(netdev_class_remove_file);
+EXPORT_SYMBOL(netdev_class_remove_file_ns);
int netdev_kobject_init(void)
{
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index fc75c9e461b8..8f971990677c 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -636,8 +636,9 @@ static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo
netpoll_send_skb(np, send_skb);
- /* If there are several rx_hooks for the same address,
- we're fine by sending a single reply */
+ /* If there are several rx_skb_hooks for the same
+ * address we're fine by sending a single reply
+ */
break;
}
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
@@ -719,8 +720,9 @@ static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo
netpoll_send_skb(np, send_skb);
- /* If there are several rx_hooks for the same address,
- we're fine by sending a single reply */
+ /* If there are several rx_skb_hooks for the same
+ * address, we're fine by sending a single reply
+ */
break;
}
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
@@ -756,11 +758,12 @@ static bool pkt_is_ns(struct sk_buff *skb)
int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
{
- int proto, len, ulen;
- int hits = 0;
+ int proto, len, ulen, data_len;
+ int hits = 0, offset;
const struct iphdr *iph;
struct udphdr *uh;
struct netpoll *np, *tmp;
+ uint16_t source;
if (list_empty(&npinfo->rx_np))
goto out;
@@ -820,7 +823,10 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
len -= iph->ihl*4;
uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
+ offset = (unsigned char *)(uh + 1) - skb->data;
ulen = ntohs(uh->len);
+ data_len = skb->len - offset;
+ source = ntohs(uh->source);
if (ulen != len)
goto out;
@@ -834,9 +840,7 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
if (np->local_port && np->local_port != ntohs(uh->dest))
continue;
- np->rx_hook(np, ntohs(uh->source),
- (char *)(uh+1),
- ulen - sizeof(struct udphdr));
+ np->rx_skb_hook(np, source, skb, offset, data_len);
hits++;
}
} else {
@@ -859,7 +863,10 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto out;
uh = udp_hdr(skb);
+ offset = (unsigned char *)(uh + 1) - skb->data;
ulen = ntohs(uh->len);
+ data_len = skb->len - offset;
+ source = ntohs(uh->source);
if (ulen != skb->len)
goto out;
if (udp6_csum_init(skb, uh, IPPROTO_UDP))
@@ -872,9 +879,7 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
if (np->local_port && np->local_port != ntohs(uh->dest))
continue;
- np->rx_hook(np, ntohs(uh->source),
- (char *)(uh+1),
- ulen - sizeof(struct udphdr));
+ np->rx_skb_hook(np, source, skb, offset, data_len);
hits++;
}
#endif
@@ -1062,7 +1067,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
npinfo->netpoll = np;
- if (np->rx_hook) {
+ if (np->rx_skb_hook) {
spin_lock_irqsave(&npinfo->rx_lock, flags);
npinfo->rx_flags |= NETPOLL_RX_ENABLED;
list_add_tail(&np->rx, &npinfo->rx_np);
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index d9cd627e6a16..9b7cf6c85f82 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -222,11 +222,10 @@ static void net_prio_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
struct task_struct *p;
- void *v;
+ void *v = (void *)(unsigned long)css->cgroup->id;
cgroup_taskset_for_each(p, css, tset) {
task_lock(p);
- v = (void *)(unsigned long)task_netprioidx(p);
iterate_fd(p->files, 0, update_netprio, v);
task_unlock(p);
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 2a0e21de3060..cf67144d3e3c 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1647,9 +1647,8 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
}
dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
- rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
- __dev_notify_flags(dev, old_flags);
+ __dev_notify_flags(dev, old_flags, ~0U);
return 0;
}
EXPORT_SYMBOL(rtnl_configure_link);
@@ -1985,14 +1984,15 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
}
-void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change)
+void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
+ gfp_t flags)
{
struct net *net = dev_net(dev);
struct sk_buff *skb;
int err = -ENOBUFS;
size_t if_info_size;
- skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), GFP_KERNEL);
+ skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags);
if (skb == NULL)
goto errout;
@@ -2003,7 +2003,7 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change)
kfree_skb(skb);
goto errout;
}
- rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
+ rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
return;
errout:
if (err < 0)
@@ -2717,7 +2717,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
case NETDEV_JOIN:
break;
default:
- rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
+ rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
break;
}
return NOTIFY_DONE;
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 3f1ec1586ae1..897da56f3aff 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -7,28 +7,20 @@
#include <linux/hrtimer.h>
#include <linux/ktime.h>
#include <linux/string.h>
+#include <linux/net.h>
#include <net/secure_seq.h>
+#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_INET)
#define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4)
static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned;
-static void net_secret_init(void)
+static __always_inline void net_secret_init(void)
{
- u32 tmp;
- int i;
-
- if (likely(net_secret[0]))
- return;
-
- for (i = NET_SECRET_SIZE; i > 0;) {
- do {
- get_random_bytes(&tmp, sizeof(tmp));
- } while (!tmp);
- cmpxchg(&net_secret[--i], 0, tmp);
- }
+ net_get_random_once(net_secret, sizeof(net_secret));
}
+#endif
#ifdef CONFIG_INET
static u32 seq_scale(u32 seq)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d81cff119f73..e4115597b38b 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -903,6 +903,9 @@ EXPORT_SYMBOL(skb_clone);
static void skb_headers_offset_update(struct sk_buff *skb, int off)
{
+ /* Only adjust this if it actually is csum_start rather than csum */
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ skb->csum_start += off;
/* {transport,network,mac}_header and tail are relative to skb->head */
skb->transport_header += off;
skb->network_header += off;
@@ -1109,9 +1112,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
#endif
skb->tail += off;
skb_headers_offset_update(skb, nhead);
- /* Only adjust this if it actually is csum_start rather than csum */
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- skb->csum_start += nhead;
skb->cloned = 0;
skb->hdr_len = 0;
skb->nohdr = 0;
@@ -1176,7 +1176,6 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
NUMA_NO_NODE);
int oldheadroom = skb_headroom(skb);
int head_copy_len, head_copy_off;
- int off;
if (!n)
return NULL;
@@ -1200,11 +1199,7 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
copy_skb_header(n, skb);
- off = newheadroom - oldheadroom;
- if (n->ip_summed == CHECKSUM_PARTIAL)
- n->csum_start += off;
-
- skb_headers_offset_update(n, off);
+ skb_headers_offset_update(n, newheadroom - oldheadroom);
return n;
}
@@ -1933,9 +1928,8 @@ fault:
EXPORT_SYMBOL(skb_store_bits);
/* Checksum skb data. */
-
-__wsum skb_checksum(const struct sk_buff *skb, int offset,
- int len, __wsum csum)
+__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
+ __wsum csum, const struct skb_checksum_ops *ops)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
@@ -1946,7 +1940,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
if (copy > 0) {
if (copy > len)
copy = len;
- csum = csum_partial(skb->data + offset, copy, csum);
+ csum = ops->update(skb->data + offset, copy, csum);
if ((len -= copy) == 0)
return csum;
offset += copy;
@@ -1967,10 +1961,10 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
if (copy > len)
copy = len;
vaddr = kmap_atomic(skb_frag_page(frag));
- csum2 = csum_partial(vaddr + frag->page_offset +
- offset - start, copy, 0);
+ csum2 = ops->update(vaddr + frag->page_offset +
+ offset - start, copy, 0);
kunmap_atomic(vaddr);
- csum = csum_block_add(csum, csum2, pos);
+ csum = ops->combine(csum, csum2, pos, copy);
if (!(len -= copy))
return csum;
offset += copy;
@@ -1989,9 +1983,9 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
__wsum csum2;
if (copy > len)
copy = len;
- csum2 = skb_checksum(frag_iter, offset - start,
- copy, 0);
- csum = csum_block_add(csum, csum2, pos);
+ csum2 = __skb_checksum(frag_iter, offset - start,
+ copy, 0, ops);
+ csum = ops->combine(csum, csum2, pos, copy);
if ((len -= copy) == 0)
return csum;
offset += copy;
@@ -2003,6 +1997,18 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
return csum;
}
+EXPORT_SYMBOL(__skb_checksum);
+
+__wsum skb_checksum(const struct sk_buff *skb, int offset,
+ int len, __wsum csum)
+{
+ const struct skb_checksum_ops ops = {
+ .update = csum_partial_ext,
+ .combine = csum_block_add_ext,
+ };
+
+ return __skb_checksum(skb, offset, len, csum, &ops);
+}
EXPORT_SYMBOL(skb_checksum);
/* Both of above in one bottle. */
@@ -2837,14 +2843,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
__copy_skb_header(nskb, skb);
nskb->mac_len = skb->mac_len;
- /* nskb and skb might have different headroom */
- if (nskb->ip_summed == CHECKSUM_PARTIAL)
- nskb->csum_start += skb_headroom(nskb) - headroom;
-
- skb_reset_mac_header(nskb);
- skb_set_network_header(nskb, skb->mac_len);
- nskb->transport_header = (nskb->network_header +
- skb_network_header_len(skb));
+ skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
skb_copy_from_linear_data_offset(skb, -tnl_hlen,
nskb->data - tnl_hlen,
@@ -2936,32 +2935,30 @@ EXPORT_SYMBOL_GPL(skb_segment);
int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
{
- struct sk_buff *p = *head;
- struct sk_buff *nskb;
- struct skb_shared_info *skbinfo = skb_shinfo(skb);
- struct skb_shared_info *pinfo = skb_shinfo(p);
- unsigned int headroom;
- unsigned int len = skb_gro_len(skb);
+ struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
unsigned int offset = skb_gro_offset(skb);
unsigned int headlen = skb_headlen(skb);
+ struct sk_buff *nskb, *lp, *p = *head;
+ unsigned int len = skb_gro_len(skb);
unsigned int delta_truesize;
+ unsigned int headroom;
- if (p->len + len >= 65536)
+ if (unlikely(p->len + len >= 65536))
return -E2BIG;
- if (pinfo->frag_list)
- goto merge;
- else if (headlen <= offset) {
+ lp = NAPI_GRO_CB(p)->last ?: p;
+ pinfo = skb_shinfo(lp);
+
+ if (headlen <= offset) {
skb_frag_t *frag;
skb_frag_t *frag2;
int i = skbinfo->nr_frags;
int nr_frags = pinfo->nr_frags + i;
- offset -= headlen;
-
if (nr_frags > MAX_SKB_FRAGS)
- return -E2BIG;
+ goto merge;
+ offset -= headlen;
pinfo->nr_frags = nr_frags;
skbinfo->nr_frags = 0;
@@ -2992,7 +2989,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
unsigned int first_offset;
if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
- return -E2BIG;
+ goto merge;
first_offset = skb->data -
(unsigned char *)page_address(page) +
@@ -3010,7 +3007,10 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
goto done;
- } else if (skb_gro_len(p) != pinfo->gso_size)
+ }
+ if (pinfo->frag_list)
+ goto merge;
+ if (skb_gro_len(p) != pinfo->gso_size)
return -E2BIG;
headroom = skb_headroom(p);
@@ -3062,16 +3062,24 @@ merge:
__skb_pull(skb, offset);
- NAPI_GRO_CB(p)->last->next = skb;
+ if (!NAPI_GRO_CB(p)->last)
+ skb_shinfo(p)->frag_list = skb;
+ else
+ NAPI_GRO_CB(p)->last->next = skb;
NAPI_GRO_CB(p)->last = skb;
skb_header_release(skb);
+ lp = p;
done:
NAPI_GRO_CB(p)->count++;
p->data_len += len;
p->truesize += delta_truesize;
p->len += len;
-
+ if (lp != p) {
+ lp->data_len += len;
+ lp->truesize += delta_truesize;
+ lp->len += len;
+ }
NAPI_GRO_CB(skb)->same_flow = 1;
return 0;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 5b6beba494a3..ab20ed9b0f31 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -475,12 +475,6 @@ discard_and_relse:
}
EXPORT_SYMBOL(sk_receive_skb);
-void sk_reset_txq(struct sock *sk)
-{
- sk_tx_queue_clear(sk);
-}
-EXPORT_SYMBOL(sk_reset_txq);
-
struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
{
struct dst_entry *dst = __sk_dst_get(sk);
@@ -914,6 +908,13 @@ set_rcvbuf:
}
break;
#endif
+
+ case SO_MAX_PACING_RATE:
+ sk->sk_max_pacing_rate = val;
+ sk->sk_pacing_rate = min(sk->sk_pacing_rate,
+ sk->sk_max_pacing_rate);
+ break;
+
default:
ret = -ENOPROTOOPT;
break;
@@ -1177,6 +1178,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
#endif
+ case SO_MAX_PACING_RATE:
+ v.val = sk->sk_max_pacing_rate;
+ break;
+
default:
return -ENOPROTOOPT;
}
@@ -1836,7 +1841,17 @@ EXPORT_SYMBOL(sock_alloc_send_skb);
/* On 32bit arches, an skb frag is limited to 2^15 */
#define SKB_FRAG_PAGE_ORDER get_order(32768)
-bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
+/**
+ * skb_page_frag_refill - check that a page_frag contains enough room
+ * @sz: minimum size of the fragment we want to get
+ * @pfrag: pointer to page_frag
+ * @prio: priority for memory allocation
+ *
+ * Note: While this allocator tries to use high order pages, there is
+ * no guarantee that allocations succeed. Therefore, @sz MUST be
+ * less or equal than PAGE_SIZE.
+ */
+bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
{
int order;
@@ -1845,16 +1860,16 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
pfrag->offset = 0;
return true;
}
- if (pfrag->offset < pfrag->size)
+ if (pfrag->offset + sz <= pfrag->size)
return true;
put_page(pfrag->page);
}
/* We restrict high order allocations to users that can afford to wait */
- order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
+ order = (prio & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
do {
- gfp_t gfp = sk->sk_allocation;
+ gfp_t gfp = prio;
if (order)
gfp |= __GFP_COMP | __GFP_NOWARN;
@@ -1866,6 +1881,15 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
}
} while (--order >= 0);
+ return false;
+}
+EXPORT_SYMBOL(skb_page_frag_refill);
+
+bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
+{
+ if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
+ return true;
+
sk_enter_memory_pressure(sk);
sk_stream_moderate_sndbuf(sk);
return false;
@@ -2319,6 +2343,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_ll_usec = sysctl_net_busy_read;
#endif
+ sk->sk_max_pacing_rate = ~0U;
+ sk->sk_pacing_rate = ~0U;
/*
* Before updating sk_refcnt, we must commit prior changes to memory
* (Documentation/RCU/rculist_nulls.txt for details)
diff --git a/net/core/utils.c b/net/core/utils.c
index aa88e23fc87a..2f737bf90b3f 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -338,3 +338,52 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
csum_unfold(*sum)));
}
EXPORT_SYMBOL(inet_proto_csum_replace16);
+
+struct __net_random_once_work {
+ struct work_struct work;
+ struct static_key *key;
+};
+
+static void __net_random_once_deferred(struct work_struct *w)
+{
+ struct __net_random_once_work *work =
+ container_of(w, struct __net_random_once_work, work);
+ if (!static_key_enabled(work->key))
+ static_key_slow_inc(work->key);
+ kfree(work);
+}
+
+static void __net_random_once_disable_jump(struct static_key *key)
+{
+ struct __net_random_once_work *w;
+
+ w = kmalloc(sizeof(*w), GFP_ATOMIC);
+ if (!w)
+ return;
+
+ INIT_WORK(&w->work, __net_random_once_deferred);
+ w->key = key;
+ schedule_work(&w->work);
+}
+
+bool __net_get_random_once(void *buf, int nbytes, bool *done,
+ struct static_key *done_key)
+{
+ static DEFINE_SPINLOCK(lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lock, flags);
+ if (*done) {
+ spin_unlock_irqrestore(&lock, flags);
+ return false;
+ }
+
+ get_random_bytes(buf, nbytes);
+ *done = true;
+ spin_unlock_irqrestore(&lock, flags);
+
+ __net_random_once_disable_jump(done_key);
+
+ return true;
+}
+EXPORT_SYMBOL(__net_get_random_once);
diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h
index a269aa7f7923..3284bfa988c0 100644
--- a/net/dccp/ackvec.h
+++ b/net/dccp/ackvec.h
@@ -101,16 +101,16 @@ struct dccp_ackvec_record {
u8 avr_ack_nonce:1;
};
-extern int dccp_ackvec_init(void);
-extern void dccp_ackvec_exit(void);
+int dccp_ackvec_init(void);
+void dccp_ackvec_exit(void);
-extern struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority);
-extern void dccp_ackvec_free(struct dccp_ackvec *av);
+struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority);
+void dccp_ackvec_free(struct dccp_ackvec *av);
-extern void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb);
-extern int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seq, u8 sum);
-extern void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno);
-extern u16 dccp_ackvec_buflen(const struct dccp_ackvec *av);
+void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb);
+int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seq, u8 sum);
+void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno);
+u16 dccp_ackvec_buflen(const struct dccp_ackvec *av);
static inline bool dccp_ackvec_is_empty(const struct dccp_ackvec *av)
{
@@ -133,7 +133,6 @@ struct dccp_ackvec_parsed {
struct list_head node;
};
-extern int dccp_ackvec_parsed_add(struct list_head *head,
- u8 *vec, u8 len, u8 nonce);
-extern void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks);
+int dccp_ackvec_parsed_add(struct list_head *head, u8 *vec, u8 len, u8 nonce);
+void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks);
#endif /* _ACKVEC_H */
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index fb85d371a8de..6eb837a47b5c 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -93,8 +93,8 @@ extern struct ccid_operations ccid2_ops;
extern struct ccid_operations ccid3_ops;
#endif
-extern int ccid_initialize_builtins(void);
-extern void ccid_cleanup_builtins(void);
+int ccid_initialize_builtins(void);
+void ccid_cleanup_builtins(void);
struct ccid {
struct ccid_operations *ccid_ops;
@@ -106,12 +106,12 @@ static inline void *ccid_priv(const struct ccid *ccid)
return (void *)ccid->ccid_priv;
}
-extern bool ccid_support_check(u8 const *ccid_array, u8 array_len);
-extern int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len);
-extern int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
- char __user *, int __user *);
+bool ccid_support_check(u8 const *ccid_array, u8 array_len);
+int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len);
+int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
+ char __user *, int __user *);
-extern struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx);
+struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx);
static inline int ccid_get_current_rx_ccid(struct dccp_sock *dp)
{
@@ -131,8 +131,8 @@ static inline int ccid_get_current_tx_ccid(struct dccp_sock *dp)
return ccid->ccid_ops->ccid_id;
}
-extern void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk);
-extern void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk);
+void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk);
+void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk);
/*
* Congestion control of queued data packets via CCID decision.
diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h
index d1d2f5383b7d..57f631a86ccd 100644
--- a/net/dccp/ccids/lib/loss_interval.h
+++ b/net/dccp/ccids/lib/loss_interval.h
@@ -65,9 +65,9 @@ static inline u8 tfrc_lh_length(struct tfrc_loss_hist *lh)
struct tfrc_rx_hist;
-extern int tfrc_lh_interval_add(struct tfrc_loss_hist *, struct tfrc_rx_hist *,
- u32 (*first_li)(struct sock *), struct sock *);
-extern u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *);
-extern void tfrc_lh_cleanup(struct tfrc_loss_hist *lh);
+int tfrc_lh_interval_add(struct tfrc_loss_hist *, struct tfrc_rx_hist *,
+ u32 (*first_li)(struct sock *), struct sock *);
+u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *);
+void tfrc_lh_cleanup(struct tfrc_loss_hist *lh);
#endif /* _DCCP_LI_HIST_ */
diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h
index 7ee4a9d9d335..ee362b0b630d 100644
--- a/net/dccp/ccids/lib/packet_history.h
+++ b/net/dccp/ccids/lib/packet_history.h
@@ -60,8 +60,8 @@ static inline struct tfrc_tx_hist_entry *
return head;
}
-extern int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno);
-extern void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp);
+int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno);
+void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp);
/* Subtraction a-b modulo-16, respects circular wrap-around */
#define SUB16(a, b) (((a) + 16 - (b)) & 0xF)
@@ -139,20 +139,17 @@ static inline bool tfrc_rx_hist_loss_pending(const struct tfrc_rx_hist *h)
return h->loss_count > 0;
}
-extern void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h,
- const struct sk_buff *skb, const u64 ndp);
+void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h, const struct sk_buff *skb,
+ const u64 ndp);
-extern int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb);
+int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb);
struct tfrc_loss_hist;
-extern int tfrc_rx_handle_loss(struct tfrc_rx_hist *h,
- struct tfrc_loss_hist *lh,
- struct sk_buff *skb, const u64 ndp,
- u32 (*first_li)(struct sock *sk),
- struct sock *sk);
-extern u32 tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h,
- const struct sk_buff *skb);
-extern int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h);
-extern void tfrc_rx_hist_purge(struct tfrc_rx_hist *h);
+int tfrc_rx_handle_loss(struct tfrc_rx_hist *h, struct tfrc_loss_hist *lh,
+ struct sk_buff *skb, const u64 ndp,
+ u32 (*first_li)(struct sock *sk), struct sock *sk);
+u32 tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h, const struct sk_buff *skb);
+int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h);
+void tfrc_rx_hist_purge(struct tfrc_rx_hist *h);
#endif /* _DCCP_PKT_HIST_ */
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index ed698c42a5fb..40ee7d62b652 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -55,21 +55,21 @@ static inline u32 tfrc_ewma(const u32 avg, const u32 newval, const u8 weight)
return avg ? (weight * avg + (10 - weight) * newval) / 10 : newval;
}
-extern u32 tfrc_calc_x(u16 s, u32 R, u32 p);
-extern u32 tfrc_calc_x_reverse_lookup(u32 fvalue);
-extern u32 tfrc_invert_loss_event_rate(u32 loss_event_rate);
+u32 tfrc_calc_x(u16 s, u32 R, u32 p);
+u32 tfrc_calc_x_reverse_lookup(u32 fvalue);
+u32 tfrc_invert_loss_event_rate(u32 loss_event_rate);
-extern int tfrc_tx_packet_history_init(void);
-extern void tfrc_tx_packet_history_exit(void);
-extern int tfrc_rx_packet_history_init(void);
-extern void tfrc_rx_packet_history_exit(void);
+int tfrc_tx_packet_history_init(void);
+void tfrc_tx_packet_history_exit(void);
+int tfrc_rx_packet_history_init(void);
+void tfrc_rx_packet_history_exit(void);
-extern int tfrc_li_init(void);
-extern void tfrc_li_exit(void);
+int tfrc_li_init(void);
+void tfrc_li_exit(void);
#ifdef CONFIG_IP_DCCP_TFRC_LIB
-extern int tfrc_lib_init(void);
-extern void tfrc_lib_exit(void);
+int tfrc_lib_init(void);
+void tfrc_lib_exit(void);
#else
#define tfrc_lib_init() (0)
#define tfrc_lib_exit()
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 708e75bf623d..30948784dd58 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -53,7 +53,7 @@ extern struct inet_hashinfo dccp_hashinfo;
extern struct percpu_counter dccp_orphan_count;
-extern void dccp_time_wait(struct sock *sk, int state, int timeo);
+void dccp_time_wait(struct sock *sk, int state, int timeo);
/*
* Set safe upper bounds for header and option length. Since Data Offset is 8
@@ -224,114 +224,108 @@ static inline void dccp_csum_outgoing(struct sk_buff *skb)
skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0);
}
-extern void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb);
+void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb);
-extern int dccp_retransmit_skb(struct sock *sk);
+int dccp_retransmit_skb(struct sock *sk);
-extern void dccp_send_ack(struct sock *sk);
-extern void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
- struct request_sock *rsk);
+void dccp_send_ack(struct sock *sk);
+void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *rsk);
-extern void dccp_send_sync(struct sock *sk, const u64 seq,
- const enum dccp_pkt_type pkt_type);
+void dccp_send_sync(struct sock *sk, const u64 seq,
+ const enum dccp_pkt_type pkt_type);
/*
* TX Packet Dequeueing Interface
*/
-extern void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb);
-extern bool dccp_qpolicy_full(struct sock *sk);
-extern void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb);
-extern struct sk_buff *dccp_qpolicy_top(struct sock *sk);
-extern struct sk_buff *dccp_qpolicy_pop(struct sock *sk);
-extern bool dccp_qpolicy_param_ok(struct sock *sk, __be32 param);
+void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb);
+bool dccp_qpolicy_full(struct sock *sk);
+void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb);
+struct sk_buff *dccp_qpolicy_top(struct sock *sk);
+struct sk_buff *dccp_qpolicy_pop(struct sock *sk);
+bool dccp_qpolicy_param_ok(struct sock *sk, __be32 param);
/*
* TX Packet Output and TX Timers
*/
-extern void dccp_write_xmit(struct sock *sk);
-extern void dccp_write_space(struct sock *sk);
-extern void dccp_flush_write_queue(struct sock *sk, long *time_budget);
+void dccp_write_xmit(struct sock *sk);
+void dccp_write_space(struct sock *sk);
+void dccp_flush_write_queue(struct sock *sk, long *time_budget);
-extern void dccp_init_xmit_timers(struct sock *sk);
+void dccp_init_xmit_timers(struct sock *sk);
static inline void dccp_clear_xmit_timers(struct sock *sk)
{
inet_csk_clear_xmit_timers(sk);
}
-extern unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu);
+unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu);
-extern const char *dccp_packet_name(const int type);
+const char *dccp_packet_name(const int type);
-extern void dccp_set_state(struct sock *sk, const int state);
-extern void dccp_done(struct sock *sk);
+void dccp_set_state(struct sock *sk, const int state);
+void dccp_done(struct sock *sk);
-extern int dccp_reqsk_init(struct request_sock *rq, struct dccp_sock const *dp,
- struct sk_buff const *skb);
+int dccp_reqsk_init(struct request_sock *rq, struct dccp_sock const *dp,
+ struct sk_buff const *skb);
-extern int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
+int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
-extern struct sock *dccp_create_openreq_child(struct sock *sk,
- const struct request_sock *req,
- const struct sk_buff *skb);
+struct sock *dccp_create_openreq_child(struct sock *sk,
+ const struct request_sock *req,
+ const struct sk_buff *skb);
-extern int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
+int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
-extern struct sock *dccp_v4_request_recv_sock(struct sock *sk,
- struct sk_buff *skb,
- struct request_sock *req,
- struct dst_entry *dst);
-extern struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
- struct request_sock *req,
- struct request_sock **prev);
+struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct dst_entry *dst);
+struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct request_sock **prev);
-extern int dccp_child_process(struct sock *parent, struct sock *child,
- struct sk_buff *skb);
-extern int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
- struct dccp_hdr *dh, unsigned int len);
-extern int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
- const struct dccp_hdr *dh, const unsigned int len);
+int dccp_child_process(struct sock *parent, struct sock *child,
+ struct sk_buff *skb);
+int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ struct dccp_hdr *dh, unsigned int len);
+int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
+ const struct dccp_hdr *dh, const unsigned int len);
-extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
-extern void dccp_destroy_sock(struct sock *sk);
+int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
+void dccp_destroy_sock(struct sock *sk);
-extern void dccp_close(struct sock *sk, long timeout);
-extern struct sk_buff *dccp_make_response(struct sock *sk,
- struct dst_entry *dst,
- struct request_sock *req);
+void dccp_close(struct sock *sk, long timeout);
+struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
+ struct request_sock *req);
-extern int dccp_connect(struct sock *sk);
-extern int dccp_disconnect(struct sock *sk, int flags);
-extern int dccp_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen);
-extern int dccp_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
+int dccp_connect(struct sock *sk);
+int dccp_disconnect(struct sock *sk, int flags);
+int dccp_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
+int dccp_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen);
#ifdef CONFIG_COMPAT
-extern int compat_dccp_getsockopt(struct sock *sk,
- int level, int optname,
- char __user *optval, int __user *optlen);
-extern int compat_dccp_setsockopt(struct sock *sk,
- int level, int optname,
- char __user *optval, unsigned int optlen);
+int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
+int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen);
#endif
-extern int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg);
-extern int dccp_sendmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t size);
-extern int dccp_recvmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len, int nonblock,
- int flags, int *addr_len);
-extern void dccp_shutdown(struct sock *sk, int how);
-extern int inet_dccp_listen(struct socket *sock, int backlog);
-extern unsigned int dccp_poll(struct file *file, struct socket *sock,
- poll_table *wait);
-extern int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
- int addr_len);
-
-extern struct sk_buff *dccp_ctl_make_reset(struct sock *sk,
- struct sk_buff *skb);
-extern int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code);
-extern void dccp_send_close(struct sock *sk, const int active);
-extern int dccp_invalid_packet(struct sk_buff *skb);
-extern u32 dccp_sample_rtt(struct sock *sk, long delta);
+int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t size);
+int dccp_recvmsg(struct kiocb *iocb, struct sock *sk,
+ struct msghdr *msg, size_t len, int nonblock, int flags,
+ int *addr_len);
+void dccp_shutdown(struct sock *sk, int how);
+int inet_dccp_listen(struct socket *sock, int backlog);
+unsigned int dccp_poll(struct file *file, struct socket *sock,
+ poll_table *wait);
+int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+
+struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *skb);
+int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code);
+void dccp_send_close(struct sock *sk, const int active);
+int dccp_invalid_packet(struct sk_buff *skb);
+u32 dccp_sample_rtt(struct sock *sk, long delta);
static inline int dccp_bad_service_code(const struct sock *sk,
const __be32 service)
@@ -475,25 +469,25 @@ static inline int dccp_ack_pending(const struct sock *sk)
return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk);
}
-extern int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val);
-extern int dccp_feat_finalise_settings(struct dccp_sock *dp);
-extern int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq);
-extern int dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*,
- struct sk_buff *skb);
-extern int dccp_feat_activate_values(struct sock *sk, struct list_head *fn);
-extern void dccp_feat_list_purge(struct list_head *fn_list);
-
-extern int dccp_insert_options(struct sock *sk, struct sk_buff *skb);
-extern int dccp_insert_options_rsk(struct dccp_request_sock*, struct sk_buff*);
-extern int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed);
-extern u32 dccp_timestamp(void);
-extern void dccp_timestamping_init(void);
-extern int dccp_insert_option(struct sk_buff *skb, unsigned char option,
- const void *value, unsigned char len);
+int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val);
+int dccp_feat_finalise_settings(struct dccp_sock *dp);
+int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq);
+int dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*,
+ struct sk_buff *skb);
+int dccp_feat_activate_values(struct sock *sk, struct list_head *fn);
+void dccp_feat_list_purge(struct list_head *fn_list);
+
+int dccp_insert_options(struct sock *sk, struct sk_buff *skb);
+int dccp_insert_options_rsk(struct dccp_request_sock *, struct sk_buff *);
+int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed);
+u32 dccp_timestamp(void);
+void dccp_timestamping_init(void);
+int dccp_insert_option(struct sk_buff *skb, unsigned char option,
+ const void *value, unsigned char len);
#ifdef CONFIG_SYSCTL
-extern int dccp_sysctl_init(void);
-extern void dccp_sysctl_exit(void);
+int dccp_sysctl_init(void);
+void dccp_sysctl_exit(void);
#else
static inline int dccp_sysctl_init(void)
{
diff --git a/net/dccp/feat.h b/net/dccp/feat.h
index 90b957d34d26..0e75cebb2187 100644
--- a/net/dccp/feat.h
+++ b/net/dccp/feat.h
@@ -107,13 +107,13 @@ extern unsigned long sysctl_dccp_sequence_window;
extern int sysctl_dccp_rx_ccid;
extern int sysctl_dccp_tx_ccid;
-extern int dccp_feat_init(struct sock *sk);
-extern void dccp_feat_initialise_sysctls(void);
-extern int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local,
- u8 const *list, u8 len);
-extern int dccp_feat_parse_options(struct sock *, struct dccp_request_sock *,
- u8 mand, u8 opt, u8 feat, u8 *val, u8 len);
-extern int dccp_feat_clone_list(struct list_head const *, struct list_head *);
+int dccp_feat_init(struct sock *sk);
+void dccp_feat_initialise_sysctls(void);
+int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local,
+ u8 const *list, u8 len);
+int dccp_feat_parse_options(struct sock *, struct dccp_request_sock *,
+ u8 mand, u8 opt, u8 feat, u8 *val, u8 len);
+int dccp_feat_clone_list(struct list_head const *, struct list_head *);
/*
* Encoding variable-length options and their maximum length.
@@ -127,11 +127,11 @@ extern int dccp_feat_clone_list(struct list_head const *, struct list_head *);
*/
#define DCCP_OPTVAL_MAXLEN 6
-extern void dccp_encode_value_var(const u64 value, u8 *to, const u8 len);
-extern u64 dccp_decode_value_var(const u8 *bf, const u8 len);
-extern u64 dccp_feat_nn_get(struct sock *sk, u8 feat);
+void dccp_encode_value_var(const u64 value, u8 *to, const u8 len);
+u64 dccp_decode_value_var(const u8 *bf, const u8 len);
+u64 dccp_feat_nn_get(struct sock *sk, u8 feat);
-extern int dccp_insert_option_mandatory(struct sk_buff *skb);
-extern int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat,
- u8 *val, u8 len, bool repeat_first);
+int dccp_insert_option_mandatory(struct sk_buff *skb);
+int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat, u8 *val, u8 len,
+ bool repeat_first);
#endif /* _DCCP_FEAT_H */
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index ebc54fef85a5..720c36225ed9 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -409,9 +409,9 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
newinet = inet_sk(newsk);
ireq = inet_rsk(req);
- newinet->inet_daddr = ireq->rmt_addr;
- newinet->inet_rcv_saddr = ireq->loc_addr;
- newinet->inet_saddr = ireq->loc_addr;
+ newinet->inet_daddr = ireq->ir_rmt_addr;
+ newinet->inet_rcv_saddr = ireq->ir_loc_addr;
+ newinet->inet_saddr = ireq->ir_loc_addr;
newinet->inet_opt = ireq->opt;
ireq->opt = NULL;
newinet->mc_index = inet_iif(skb);
@@ -516,10 +516,10 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req)
const struct inet_request_sock *ireq = inet_rsk(req);
struct dccp_hdr *dh = dccp_hdr(skb);
- dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->loc_addr,
- ireq->rmt_addr);
- err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
- ireq->rmt_addr,
+ dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr,
+ ireq->ir_rmt_addr);
+ err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
+ ireq->ir_rmt_addr,
ireq->opt);
err = net_xmit_eval(err);
}
@@ -641,8 +641,8 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop_and_free;
ireq = inet_rsk(req);
- ireq->loc_addr = ip_hdr(skb)->daddr;
- ireq->rmt_addr = ip_hdr(skb)->saddr;
+ ireq->ir_loc_addr = ip_hdr(skb)->daddr;
+ ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
/*
* Step 3: Process LISTEN state
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 6cf9f7782ad4..4ac71ff7c2e4 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -67,7 +67,7 @@ static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
struct dccp_hdr *dh = dccp_hdr(skb);
dccp_csum_outgoing(skb);
- dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
+ dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &sk->sk_v6_daddr);
}
static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
@@ -216,7 +216,7 @@ out:
static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
{
- struct inet6_request_sock *ireq6 = inet6_rsk(req);
+ struct inet_request_sock *ireq = inet_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff *skb;
struct in6_addr *final_p, final;
@@ -226,12 +226,12 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_DCCP;
- fl6.daddr = ireq6->rmt_addr;
- fl6.saddr = ireq6->loc_addr;
+ fl6.daddr = ireq->ir_v6_rmt_addr;
+ fl6.saddr = ireq->ir_v6_loc_addr;
fl6.flowlabel = 0;
- fl6.flowi6_oif = ireq6->iif;
- fl6.fl6_dport = inet_rsk(req)->rmt_port;
- fl6.fl6_sport = inet_rsk(req)->loc_port;
+ fl6.flowi6_oif = ireq->ir_iif;
+ fl6.fl6_dport = ireq->ir_rmt_port;
+ fl6.fl6_sport = htons(ireq->ir_num);
security_req_classify_flow(req, flowi6_to_flowi(&fl6));
@@ -249,9 +249,9 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
struct dccp_hdr *dh = dccp_hdr(skb);
dh->dccph_checksum = dccp_v6_csum_finish(skb,
- &ireq6->loc_addr,
- &ireq6->rmt_addr);
- fl6.daddr = ireq6->rmt_addr;
+ &ireq->ir_v6_loc_addr,
+ &ireq->ir_v6_rmt_addr);
+ fl6.daddr = ireq->ir_v6_rmt_addr;
err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
err = net_xmit_eval(err);
}
@@ -264,8 +264,7 @@ done:
static void dccp_v6_reqsk_destructor(struct request_sock *req)
{
dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
- if (inet6_rsk(req)->pktopts != NULL)
- kfree_skb(inet6_rsk(req)->pktopts);
+ kfree_skb(inet_rsk(req)->pktopts);
}
static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
@@ -359,7 +358,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct request_sock *req;
struct dccp_request_sock *dreq;
- struct inet6_request_sock *ireq6;
+ struct inet_request_sock *ireq;
struct ipv6_pinfo *np = inet6_sk(sk);
const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
@@ -398,22 +397,22 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (security_inet_conn_request(sk, skb, req))
goto drop_and_free;
- ireq6 = inet6_rsk(req);
- ireq6->rmt_addr = ipv6_hdr(skb)->saddr;
- ireq6->loc_addr = ipv6_hdr(skb)->daddr;
+ ireq = inet_rsk(req);
+ ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+ ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
if (ipv6_opt_accepted(sk, skb) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
atomic_inc(&skb->users);
- ireq6->pktopts = skb;
+ ireq->pktopts = skb;
}
- ireq6->iif = sk->sk_bound_dev_if;
+ ireq->ir_iif = sk->sk_bound_dev_if;
/* So that link locals have meaning */
if (!sk->sk_bound_dev_if &&
- ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
- ireq6->iif = inet6_iif(skb);
+ ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
+ ireq->ir_iif = inet6_iif(skb);
/*
* Step 3: Process LISTEN state
@@ -446,7 +445,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
struct request_sock *req,
struct dst_entry *dst)
{
- struct inet6_request_sock *ireq6 = inet6_rsk(req);
+ struct inet_request_sock *ireq = inet_rsk(req);
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
struct inet_sock *newinet;
struct dccp6_sock *newdp6;
@@ -467,11 +466,11 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
- ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
+ ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
- newnp->rcv_saddr = newnp->saddr;
+ newsk->sk_v6_rcv_saddr = newnp->saddr;
inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
newsk->sk_backlog_rcv = dccp_v4_do_rcv;
@@ -505,12 +504,12 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_DCCP;
- fl6.daddr = ireq6->rmt_addr;
+ fl6.daddr = ireq->ir_v6_rmt_addr;
final_p = fl6_update_dst(&fl6, np->opt, &final);
- fl6.saddr = ireq6->loc_addr;
+ fl6.saddr = ireq->ir_v6_loc_addr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
- fl6.fl6_dport = inet_rsk(req)->rmt_port;
- fl6.fl6_sport = inet_rsk(req)->loc_port;
+ fl6.fl6_dport = ireq->ir_rmt_port;
+ fl6.fl6_sport = htons(ireq->ir_num);
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
@@ -538,10 +537,10 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
- newnp->daddr = ireq6->rmt_addr;
- newnp->saddr = ireq6->loc_addr;
- newnp->rcv_saddr = ireq6->loc_addr;
- newsk->sk_bound_dev_if = ireq6->iif;
+ newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
+ newnp->saddr = ireq->ir_v6_loc_addr;
+ newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
+ newsk->sk_bound_dev_if = ireq->ir_iif;
/* Now IPv6 options...
@@ -554,10 +553,10 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
/* Clone pktoptions received with SYN */
newnp->pktoptions = NULL;
- if (ireq6->pktopts != NULL) {
- newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
- consume_skb(ireq6->pktopts);
- ireq6->pktopts = NULL;
+ if (ireq->pktopts != NULL) {
+ newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
+ consume_skb(ireq->pktopts);
+ ireq->pktopts = NULL;
if (newnp->pktoptions)
skb_set_owner_r(newnp->pktoptions, newsk);
}
@@ -885,7 +884,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
return -EINVAL;
}
- np->daddr = usin->sin6_addr;
+ sk->sk_v6_daddr = usin->sin6_addr;
np->flow_label = fl6.flowlabel;
/*
@@ -915,16 +914,16 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
goto failure;
}
ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
- ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &np->rcv_saddr);
+ ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &sk->sk_v6_rcv_saddr);
return err;
}
- if (!ipv6_addr_any(&np->rcv_saddr))
- saddr = &np->rcv_saddr;
+ if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
+ saddr = &sk->sk_v6_rcv_saddr;
fl6.flowi6_proto = IPPROTO_DCCP;
- fl6.daddr = np->daddr;
+ fl6.daddr = sk->sk_v6_daddr;
fl6.saddr = saddr ? *saddr : np->saddr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.fl6_dport = usin->sin6_port;
@@ -941,7 +940,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
if (saddr == NULL) {
saddr = &fl6.saddr;
- np->rcv_saddr = *saddr;
+ sk->sk_v6_rcv_saddr = *saddr;
}
/* set the source address */
@@ -963,7 +962,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
goto late_failure;
dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
- np->daddr.s6_addr32,
+ sk->sk_v6_daddr.s6_addr32,
inet->inet_sport,
inet->inet_dport);
err = dccp_connect(sk);
diff --git a/net/dccp/ipv6.h b/net/dccp/ipv6.h
index 6eef81fdbe56..af259e15e7f0 100644
--- a/net/dccp/ipv6.h
+++ b/net/dccp/ipv6.h
@@ -25,12 +25,10 @@ struct dccp6_sock {
struct dccp6_request_sock {
struct dccp_request_sock dccp;
- struct inet6_request_sock inet6;
};
struct dccp6_timewait_sock {
struct inet_timewait_sock inet;
- struct inet6_timewait_sock tw6;
};
#endif /* _DCCP_IPV6_H */
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 662071b249cc..9e2f78bc1553 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -56,12 +56,9 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
#if IS_ENABLED(CONFIG_IPV6)
if (tw->tw_family == PF_INET6) {
const struct ipv6_pinfo *np = inet6_sk(sk);
- struct inet6_timewait_sock *tw6;
- tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
- tw6 = inet6_twsk((struct sock *)tw);
- tw6->tw_v6_daddr = np->daddr;
- tw6->tw_v6_rcv_saddr = np->rcv_saddr;
+ tw->tw_v6_daddr = sk->sk_v6_daddr;
+ tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
tw->tw_ipv6only = np->ipv6only;
}
#endif
@@ -269,10 +266,10 @@ int dccp_reqsk_init(struct request_sock *req,
{
struct dccp_request_sock *dreq = dccp_rsk(req);
- inet_rsk(req)->rmt_port = dccp_hdr(skb)->dccph_sport;
- inet_rsk(req)->loc_port = dccp_hdr(skb)->dccph_dport;
- inet_rsk(req)->acked = 0;
- dreq->dreq_timestamp_echo = 0;
+ inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;
+ inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport);
+ inet_rsk(req)->acked = 0;
+ dreq->dreq_timestamp_echo = 0;
/* inherit feature negotiation options from listening socket */
return dccp_feat_clone_list(&dp->dccps_featneg, &dreq->dreq_featneg);
diff --git a/net/dccp/output.c b/net/dccp/output.c
index d17fc90a74b6..8876078859da 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -424,8 +424,8 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
/* Build and checksum header */
dh = dccp_zeroed_hdr(skb, dccp_header_size);
- dh->dccph_sport = inet_rsk(req)->loc_port;
- dh->dccph_dport = inet_rsk(req)->rmt_port;
+ dh->dccph_sport = htons(inet_rsk(req)->ir_num);
+ dh->dccph_dport = inet_rsk(req)->ir_rmt_port;
dh->dccph_doff = (dccp_header_size +
DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
dh->dccph_type = DCCP_PKT_RESPONSE;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index ba64750f0387..eb892b4f4814 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1158,10 +1158,8 @@ static int __init dccp_init(void)
goto out_free_bind_bucket_cachep;
}
- for (i = 0; i <= dccp_hashinfo.ehash_mask; i++) {
+ for (i = 0; i <= dccp_hashinfo.ehash_mask; i++)
INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
- INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i);
- }
if (inet_ehash_locks_alloc(&dccp_hashinfo))
goto out_free_dccp_ehash;
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 2a7efe388344..e83015cecfa7 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -87,7 +87,7 @@ static void dnrmg_send_peer(struct sk_buff *skb)
}
-static unsigned int dnrmg_hook(unsigned int hook,
+static unsigned int dnrmg_hook(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index be1f64d35358..8f032bae60ad 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -58,7 +58,7 @@
#include <net/ipv6.h>
#include <net/ip.h>
#include <net/dsa.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
__setup("ether=", netdev_boot_setup);
@@ -133,7 +133,7 @@ int eth_rebuild_header(struct sk_buff *skb)
return arp_find(eth->h_dest, skb);
#endif
default:
- printk(KERN_DEBUG
+ netdev_dbg(dev,
"%s: unable to resolve type %X addresses.\n",
dev->name, ntohs(eth->h_proto));
@@ -169,20 +169,9 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
else
skb->pkt_type = PACKET_MULTICAST;
}
-
- /*
- * This ALLMULTI check should be redundant by 1.4
- * so don't forget to remove it.
- *
- * Seems, you forgot to remove it. All silly devices
- * seems to set IFF_PROMISC.
- */
-
- else if (1 /*dev->flags&IFF_PROMISC */ ) {
- if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
- dev->dev_addr)))
- skb->pkt_type = PACKET_OTHERHOST;
- }
+ else if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
+ dev->dev_addr)))
+ skb->pkt_type = PACKET_OTHERHOST;
/*
* Some variants of DSA tagging don't have an ethertype field
@@ -190,12 +179,13 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
* variants has been configured on the receiving interface,
* and if so, set skb->protocol without looking at the packet.
*/
- if (netdev_uses_dsa_tags(dev))
+ if (unlikely(netdev_uses_dsa_tags(dev)))
return htons(ETH_P_DSA);
- if (netdev_uses_trailer_tags(dev))
+
+ if (unlikely(netdev_uses_trailer_tags(dev)))
return htons(ETH_P_TRAILER);
- if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
+ if (likely(ntohs(eth->h_proto) >= ETH_P_802_3_MIN))
return eth->h_proto;
/*
@@ -204,7 +194,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
* layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
* won't work for fault tolerant netware but does for the rest.
*/
- if (skb->len >= 2 && *(unsigned short *)(skb->data) == 0xFFFF)
+ if (unlikely(skb->len >= 2 && *(unsigned short *)(skb->data) == 0xFFFF))
return htons(ETH_P_802_3);
/*
diff --git a/net/hsr/Kconfig b/net/hsr/Kconfig
new file mode 100644
index 000000000000..0d3d709052ca
--- /dev/null
+++ b/net/hsr/Kconfig
@@ -0,0 +1,27 @@
+#
+# IEC 62439-3 High-availability Seamless Redundancy
+#
+
+config HSR
+ tristate "High-availability Seamless Redundancy (HSR)"
+ ---help---
+ If you say Y here, then your Linux box will be able to act as a
+ DANH ("Doubly attached node implementing HSR"). For this to work,
+ your Linux box needs (at least) two physical Ethernet interfaces,
+ and it must be connected as a node in a ring network together with
+ other HSR capable nodes.
+
+ All Ethernet frames sent over the hsr device will be sent in both
+ directions on the ring (over both slave ports), giving a redundant,
+ instant fail-over network. Each HSR node in the ring acts like a
+ bridge for HSR frames, but filters frames that have been forwarded
+ earlier.
+
+ This code is a "best effort" to comply with the HSR standard as
+ described in IEC 62439-3:2010 (HSRv0), but no compliancy tests have
+ been made.
+
+ You need to perform any and all necessary tests yourself before
+ relying on this code in a safety critical system!
+
+ If unsure, say N.
diff --git a/net/hsr/Makefile b/net/hsr/Makefile
new file mode 100644
index 000000000000..b68359f181cc
--- /dev/null
+++ b/net/hsr/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for HSR
+#
+
+obj-$(CONFIG_HSR) += hsr.o
+
+hsr-y := hsr_main.o hsr_framereg.o hsr_device.o hsr_netlink.o
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
new file mode 100644
index 000000000000..cac505f166d5
--- /dev/null
+++ b/net/hsr/hsr_device.c
@@ -0,0 +1,596 @@
+/* Copyright 2011-2013 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ * 2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *
+ * This file contains device methods for creating, using and destroying
+ * virtual HSR devices.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/rtnetlink.h>
+#include <linux/pkt_sched.h>
+#include "hsr_device.h"
+#include "hsr_framereg.h"
+#include "hsr_main.h"
+
+
+static bool is_admin_up(struct net_device *dev)
+{
+ return dev && (dev->flags & IFF_UP);
+}
+
+static bool is_slave_up(struct net_device *dev)
+{
+ return dev && is_admin_up(dev) && netif_oper_up(dev);
+}
+
+static void __hsr_set_operstate(struct net_device *dev, int transition)
+{
+ write_lock_bh(&dev_base_lock);
+ if (dev->operstate != transition) {
+ dev->operstate = transition;
+ write_unlock_bh(&dev_base_lock);
+ netdev_state_change(dev);
+ } else {
+ write_unlock_bh(&dev_base_lock);
+ }
+}
+
+void hsr_set_operstate(struct net_device *hsr_dev, struct net_device *slave1,
+ struct net_device *slave2)
+{
+ if (!is_admin_up(hsr_dev)) {
+ __hsr_set_operstate(hsr_dev, IF_OPER_DOWN);
+ return;
+ }
+
+ if (is_slave_up(slave1) || is_slave_up(slave2))
+ __hsr_set_operstate(hsr_dev, IF_OPER_UP);
+ else
+ __hsr_set_operstate(hsr_dev, IF_OPER_LOWERLAYERDOWN);
+}
+
+void hsr_set_carrier(struct net_device *hsr_dev, struct net_device *slave1,
+ struct net_device *slave2)
+{
+ if (is_slave_up(slave1) || is_slave_up(slave2))
+ netif_carrier_on(hsr_dev);
+ else
+ netif_carrier_off(hsr_dev);
+}
+
+
+void hsr_check_announce(struct net_device *hsr_dev, int old_operstate)
+{
+ struct hsr_priv *hsr_priv;
+
+ hsr_priv = netdev_priv(hsr_dev);
+
+ if ((hsr_dev->operstate == IF_OPER_UP) && (old_operstate != IF_OPER_UP)) {
+ /* Went up */
+ hsr_priv->announce_count = 0;
+ hsr_priv->announce_timer.expires = jiffies +
+ msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
+ add_timer(&hsr_priv->announce_timer);
+ }
+
+ if ((hsr_dev->operstate != IF_OPER_UP) && (old_operstate == IF_OPER_UP))
+ /* Went down */
+ del_timer(&hsr_priv->announce_timer);
+}
+
+
+int hsr_get_max_mtu(struct hsr_priv *hsr_priv)
+{
+ int mtu_max;
+
+ if (hsr_priv->slave[0] && hsr_priv->slave[1])
+ mtu_max = min(hsr_priv->slave[0]->mtu, hsr_priv->slave[1]->mtu);
+ else if (hsr_priv->slave[0])
+ mtu_max = hsr_priv->slave[0]->mtu;
+ else if (hsr_priv->slave[1])
+ mtu_max = hsr_priv->slave[1]->mtu;
+ else
+ mtu_max = HSR_TAGLEN;
+
+ return mtu_max - HSR_TAGLEN;
+}
+
+static int hsr_dev_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct hsr_priv *hsr_priv;
+
+ hsr_priv = netdev_priv(dev);
+
+ if (new_mtu > hsr_get_max_mtu(hsr_priv)) {
+ netdev_info(hsr_priv->dev, "A HSR master's MTU cannot be greater than the smallest MTU of its slaves minus the HSR Tag length (%d octets).\n",
+ HSR_TAGLEN);
+ return -EINVAL;
+ }
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+static int hsr_dev_open(struct net_device *dev)
+{
+ struct hsr_priv *hsr_priv;
+ int i;
+ char *slave_name;
+
+ hsr_priv = netdev_priv(dev);
+
+ for (i = 0; i < HSR_MAX_SLAVE; i++) {
+ if (hsr_priv->slave[i])
+ slave_name = hsr_priv->slave[i]->name;
+ else
+ slave_name = "null";
+
+ if (!is_slave_up(hsr_priv->slave[i]))
+ netdev_warn(dev, "Slave %c (%s) is not up; please bring it up to get a working HSR network\n",
+ 'A' + i, slave_name);
+ }
+
+ return 0;
+}
+
+static int hsr_dev_close(struct net_device *dev)
+{
+ /* Nothing to do here. We could try to restore the state of the slaves
+ * to what they were before being changed by the hsr master dev's state,
+ * but they might have been changed manually in the mean time too, so
+ * taking them up or down here might be confusing and is probably not a
+ * good idea.
+ */
+ return 0;
+}
+
+
+static void hsr_fill_tag(struct hsr_ethhdr *hsr_ethhdr, struct hsr_priv *hsr_priv)
+{
+ unsigned long irqflags;
+
+ /* IEC 62439-1:2010, p 48, says the 4-bit "path" field can take values
+ * between 0001-1001 ("ring identifier", for regular HSR frames),
+ * or 1111 ("HSR management", supervision frames). Unfortunately, the
+ * spec writers forgot to explain what a "ring identifier" is, or
+ * how it is used. So we just set this to 0001 for regular frames,
+ * and 1111 for supervision frames.
+ */
+ set_hsr_tag_path(&hsr_ethhdr->hsr_tag, 0x1);
+
+ /* IEC 62439-1:2010, p 12: "The link service data unit in an Ethernet
+ * frame is the content of the frame located between the Length/Type
+ * field and the Frame Check Sequence."
+ *
+ * IEC 62439-3, p 48, specifies the "original LPDU" to include the
+ * original "LT" field (what "LT" means is not explained anywhere as
+ * far as I can see - perhaps "Length/Type"?). So LSDU_size might
+ * equal original length + 2.
+ * Also, the fact that this field is not used anywhere (might be used
+ * by a RedBox connecting HSR and PRP nets?) means I cannot test its
+ * correctness. Instead of guessing, I set this to 0 here, to make any
+ * problems immediately apparent. Anyone using this driver with PRP/HSR
+ * RedBoxes might need to fix this...
+ */
+ set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, 0);
+
+ spin_lock_irqsave(&hsr_priv->seqnr_lock, irqflags);
+ hsr_ethhdr->hsr_tag.sequence_nr = htons(hsr_priv->sequence_nr);
+ hsr_priv->sequence_nr++;
+ spin_unlock_irqrestore(&hsr_priv->seqnr_lock, irqflags);
+
+ hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
+
+ hsr_ethhdr->ethhdr.h_proto = htons(ETH_P_PRP);
+}
+
+static int slave_xmit(struct sk_buff *skb, struct hsr_priv *hsr_priv,
+ enum hsr_dev_idx dev_idx)
+{
+ struct hsr_ethhdr *hsr_ethhdr;
+
+ hsr_ethhdr = (struct hsr_ethhdr *) skb->data;
+
+ skb->dev = hsr_priv->slave[dev_idx];
+
+ hsr_addr_subst_dest(hsr_priv, &hsr_ethhdr->ethhdr, dev_idx);
+
+ /* Address substitution (IEC62439-3 pp 26, 50): replace mac
+ * address of outgoing frame with that of the outgoing slave's.
+ */
+ memcpy(hsr_ethhdr->ethhdr.h_source, skb->dev->dev_addr, ETH_ALEN);
+
+ return dev_queue_xmit(skb);
+}
+
+
+static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct hsr_priv *hsr_priv;
+ struct hsr_ethhdr *hsr_ethhdr;
+ struct sk_buff *skb2;
+ int res1, res2;
+
+ hsr_priv = netdev_priv(dev);
+ hsr_ethhdr = (struct hsr_ethhdr *) skb->data;
+
+ if ((skb->protocol != htons(ETH_P_PRP)) ||
+ (hsr_ethhdr->ethhdr.h_proto != htons(ETH_P_PRP))) {
+ hsr_fill_tag(hsr_ethhdr, hsr_priv);
+ skb->protocol = htons(ETH_P_PRP);
+ }
+
+ skb2 = pskb_copy(skb, GFP_ATOMIC);
+
+ res1 = NET_XMIT_DROP;
+ if (likely(hsr_priv->slave[HSR_DEV_SLAVE_A]))
+ res1 = slave_xmit(skb, hsr_priv, HSR_DEV_SLAVE_A);
+
+ res2 = NET_XMIT_DROP;
+ if (likely(skb2 && hsr_priv->slave[HSR_DEV_SLAVE_B]))
+ res2 = slave_xmit(skb2, hsr_priv, HSR_DEV_SLAVE_B);
+
+ if (likely(res1 == NET_XMIT_SUCCESS || res1 == NET_XMIT_CN ||
+ res2 == NET_XMIT_SUCCESS || res2 == NET_XMIT_CN)) {
+ hsr_priv->dev->stats.tx_packets++;
+ hsr_priv->dev->stats.tx_bytes += skb->len;
+ } else {
+ hsr_priv->dev->stats.tx_dropped++;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+
+static int hsr_header_create(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned int len)
+{
+ int res;
+
+ /* Make room for the HSR tag now. We will fill it in later (in
+ * hsr_dev_xmit)
+ */
+ if (skb_headroom(skb) < HSR_TAGLEN + ETH_HLEN)
+ return -ENOBUFS;
+ skb_push(skb, HSR_TAGLEN);
+
+ /* To allow VLAN/HSR combos we should probably use
+ * res = dev_hard_header(skb, dev, type, daddr, saddr, len + HSR_TAGLEN);
+ * here instead. It would require other changes too, though - e.g.
+ * separate headers for each slave etc...
+ */
+ res = eth_header(skb, dev, type, daddr, saddr, len + HSR_TAGLEN);
+ if (res <= 0)
+ return res;
+ skb_reset_mac_header(skb);
+
+ return res + HSR_TAGLEN;
+}
+
+
+static const struct header_ops hsr_header_ops = {
+ .create = hsr_header_create,
+ .parse = eth_header_parse,
+};
+
+
+/* HSR:2010 supervision frames should be padded so that the whole frame,
+ * including headers and FCS, is 64 bytes (without VLAN).
+ */
+static int hsr_pad(int size)
+{
+ const int min_size = ETH_ZLEN - HSR_TAGLEN - ETH_HLEN;
+
+ if (size >= min_size)
+ return size;
+ return min_size;
+}
+
+static void send_hsr_supervision_frame(struct net_device *hsr_dev, u8 type)
+{
+ struct hsr_priv *hsr_priv;
+ struct sk_buff *skb;
+ int hlen, tlen;
+ struct hsr_sup_tag *hsr_stag;
+ struct hsr_sup_payload *hsr_sp;
+ unsigned long irqflags;
+
+ hlen = LL_RESERVED_SPACE(hsr_dev);
+ tlen = hsr_dev->needed_tailroom;
+ skb = alloc_skb(hsr_pad(sizeof(struct hsr_sup_payload)) + hlen + tlen,
+ GFP_ATOMIC);
+
+ if (skb == NULL)
+ return;
+
+ hsr_priv = netdev_priv(hsr_dev);
+
+ skb_reserve(skb, hlen);
+
+ skb->dev = hsr_dev;
+ skb->protocol = htons(ETH_P_PRP);
+ skb->priority = TC_PRIO_CONTROL;
+
+ if (dev_hard_header(skb, skb->dev, ETH_P_PRP,
+ hsr_priv->sup_multicast_addr,
+ skb->dev->dev_addr, skb->len) < 0)
+ goto out;
+
+ skb_pull(skb, sizeof(struct ethhdr));
+ hsr_stag = (typeof(hsr_stag)) skb->data;
+
+ set_hsr_stag_path(hsr_stag, 0xf);
+ set_hsr_stag_HSR_Ver(hsr_stag, 0);
+
+ spin_lock_irqsave(&hsr_priv->seqnr_lock, irqflags);
+ hsr_stag->sequence_nr = htons(hsr_priv->sequence_nr);
+ hsr_priv->sequence_nr++;
+ spin_unlock_irqrestore(&hsr_priv->seqnr_lock, irqflags);
+
+ hsr_stag->HSR_TLV_Type = type;
+ hsr_stag->HSR_TLV_Length = 12;
+
+ skb_push(skb, sizeof(struct ethhdr));
+
+ /* Payload: MacAddressA */
+ hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(*hsr_sp));
+ memcpy(hsr_sp->MacAddressA, hsr_dev->dev_addr, ETH_ALEN);
+
+ dev_queue_xmit(skb);
+ return;
+
+out:
+ kfree_skb(skb);
+}
+
+
+/* Announce (supervision frame) timer function
+ */
+static void hsr_announce(unsigned long data)
+{
+ struct hsr_priv *hsr_priv;
+
+ hsr_priv = (struct hsr_priv *) data;
+
+ if (hsr_priv->announce_count < 3) {
+ send_hsr_supervision_frame(hsr_priv->dev, HSR_TLV_ANNOUNCE);
+ hsr_priv->announce_count++;
+ } else {
+ send_hsr_supervision_frame(hsr_priv->dev, HSR_TLV_LIFE_CHECK);
+ }
+
+ if (hsr_priv->announce_count < 3)
+ hsr_priv->announce_timer.expires = jiffies +
+ msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
+ else
+ hsr_priv->announce_timer.expires = jiffies +
+ msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
+
+ if (is_admin_up(hsr_priv->dev))
+ add_timer(&hsr_priv->announce_timer);
+}
+
+
+static void restore_slaves(struct net_device *hsr_dev)
+{
+ struct hsr_priv *hsr_priv;
+ int i;
+ int res;
+
+ hsr_priv = netdev_priv(hsr_dev);
+
+ rtnl_lock();
+
+ /* Restore promiscuity */
+ for (i = 0; i < HSR_MAX_SLAVE; i++) {
+ if (!hsr_priv->slave[i])
+ continue;
+ res = dev_set_promiscuity(hsr_priv->slave[i], -1);
+ if (res)
+ netdev_info(hsr_dev,
+ "Cannot restore slave promiscuity (%s, %d)\n",
+ hsr_priv->slave[i]->name, res);
+ }
+
+ rtnl_unlock();
+}
+
+static void reclaim_hsr_dev(struct rcu_head *rh)
+{
+ struct hsr_priv *hsr_priv;
+
+ hsr_priv = container_of(rh, struct hsr_priv, rcu_head);
+ free_netdev(hsr_priv->dev);
+}
+
+
+/* According to comments in the declaration of struct net_device, this function
+ * is "Called from unregister, can be used to call free_netdev". Ok then...
+ */
+static void hsr_dev_destroy(struct net_device *hsr_dev)
+{
+ struct hsr_priv *hsr_priv;
+
+ hsr_priv = netdev_priv(hsr_dev);
+
+ del_timer(&hsr_priv->announce_timer);
+ unregister_hsr_master(hsr_priv); /* calls list_del_rcu on hsr_priv */
+ restore_slaves(hsr_dev);
+ call_rcu(&hsr_priv->rcu_head, reclaim_hsr_dev); /* reclaim hsr_priv */
+}
+
+static const struct net_device_ops hsr_device_ops = {
+ .ndo_change_mtu = hsr_dev_change_mtu,
+ .ndo_open = hsr_dev_open,
+ .ndo_stop = hsr_dev_close,
+ .ndo_start_xmit = hsr_dev_xmit,
+};
+
+
+void hsr_dev_setup(struct net_device *dev)
+{
+ random_ether_addr(dev->dev_addr);
+
+ ether_setup(dev);
+ dev->header_ops = &hsr_header_ops;
+ dev->netdev_ops = &hsr_device_ops;
+ dev->tx_queue_len = 0;
+
+ dev->destructor = hsr_dev_destroy;
+}
+
+
+/* Return true if dev is a HSR master; return false otherwise.
+ */
+bool is_hsr_master(struct net_device *dev)
+{
+ return (dev->netdev_ops->ndo_start_xmit == hsr_dev_xmit);
+}
+
+static int check_slave_ok(struct net_device *dev)
+{
+ /* Don't allow HSR on non-ethernet like devices */
+ if ((dev->flags & IFF_LOOPBACK) || (dev->type != ARPHRD_ETHER) ||
+ (dev->addr_len != ETH_ALEN)) {
+ netdev_info(dev, "Cannot use loopback or non-ethernet device as HSR slave.\n");
+ return -EINVAL;
+ }
+
+ /* Don't allow enslaving hsr devices */
+ if (is_hsr_master(dev)) {
+ netdev_info(dev, "Cannot create trees of HSR devices.\n");
+ return -EINVAL;
+ }
+
+ if (is_hsr_slave(dev)) {
+ netdev_info(dev, "This device is already a HSR slave.\n");
+ return -EINVAL;
+ }
+
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ netdev_info(dev, "HSR on top of VLAN is not yet supported in this driver.\n");
+ return -EINVAL;
+ }
+
+ /* HSR over bonded devices has not been tested, but I'm not sure it
+ * won't work...
+ */
+
+ return 0;
+}
+
+
+/* Default multicast address for HSR Supervision frames */
+static const unsigned char def_multicast_addr[ETH_ALEN] = {
+ 0x01, 0x15, 0x4e, 0x00, 0x01, 0x00
+};
+
+int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
+ unsigned char multicast_spec)
+{
+ struct hsr_priv *hsr_priv;
+ int i;
+ int res;
+
+ hsr_priv = netdev_priv(hsr_dev);
+ hsr_priv->dev = hsr_dev;
+ INIT_LIST_HEAD(&hsr_priv->node_db);
+ INIT_LIST_HEAD(&hsr_priv->self_node_db);
+ for (i = 0; i < HSR_MAX_SLAVE; i++)
+ hsr_priv->slave[i] = slave[i];
+
+ spin_lock_init(&hsr_priv->seqnr_lock);
+ /* Overflow soon to find bugs easier: */
+ hsr_priv->sequence_nr = USHRT_MAX - 1024;
+
+ init_timer(&hsr_priv->announce_timer);
+ hsr_priv->announce_timer.function = hsr_announce;
+ hsr_priv->announce_timer.data = (unsigned long) hsr_priv;
+
+ memcpy(hsr_priv->sup_multicast_addr, def_multicast_addr, ETH_ALEN);
+ hsr_priv->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
+
+/* FIXME: should I modify the value of these?
+ *
+ * - hsr_dev->flags - i.e.
+ * IFF_MASTER/SLAVE?
+ * - hsr_dev->priv_flags - i.e.
+ * IFF_EBRIDGE?
+ * IFF_TX_SKB_SHARING?
+ * IFF_HSR_MASTER/SLAVE?
+ */
+
+ for (i = 0; i < HSR_MAX_SLAVE; i++) {
+ res = check_slave_ok(slave[i]);
+ if (res)
+ return res;
+ }
+
+ hsr_dev->features = slave[0]->features & slave[1]->features;
+ /* Prevent recursive tx locking */
+ hsr_dev->features |= NETIF_F_LLTX;
+ /* VLAN on top of HSR needs testing and probably some work on
+ * hsr_header_create() etc.
+ */
+ hsr_dev->features |= NETIF_F_VLAN_CHALLENGED;
+
+ /* Set hsr_dev's MAC address to that of mac_slave1 */
+ memcpy(hsr_dev->dev_addr, hsr_priv->slave[0]->dev_addr, ETH_ALEN);
+
+ /* Set required header length */
+ for (i = 0; i < HSR_MAX_SLAVE; i++) {
+ if (slave[i]->hard_header_len + HSR_TAGLEN >
+ hsr_dev->hard_header_len)
+ hsr_dev->hard_header_len =
+ slave[i]->hard_header_len + HSR_TAGLEN;
+ }
+
+ /* MTU */
+ for (i = 0; i < HSR_MAX_SLAVE; i++)
+ if (slave[i]->mtu - HSR_TAGLEN < hsr_dev->mtu)
+ hsr_dev->mtu = slave[i]->mtu - HSR_TAGLEN;
+
+ /* Make sure the 1st call to netif_carrier_on() gets through */
+ netif_carrier_off(hsr_dev);
+
+ /* Promiscuity */
+ for (i = 0; i < HSR_MAX_SLAVE; i++) {
+ res = dev_set_promiscuity(slave[i], 1);
+ if (res) {
+ netdev_info(hsr_dev, "Cannot set slave promiscuity (%s, %d)\n",
+ slave[i]->name, res);
+ goto fail;
+ }
+ }
+
+ /* Make sure we recognize frames from ourselves in hsr_rcv() */
+ res = hsr_create_self_node(&hsr_priv->self_node_db,
+ hsr_dev->dev_addr,
+ hsr_priv->slave[1]->dev_addr);
+ if (res < 0)
+ goto fail;
+
+ res = register_netdevice(hsr_dev);
+ if (res)
+ goto fail;
+
+ register_hsr_master(hsr_priv);
+
+ return 0;
+
+fail:
+ restore_slaves(hsr_dev);
+ return res;
+}
diff --git a/net/hsr/hsr_device.h b/net/hsr/hsr_device.h
new file mode 100644
index 000000000000..2c7148e73914
--- /dev/null
+++ b/net/hsr/hsr_device.h
@@ -0,0 +1,29 @@
+/* Copyright 2011-2013 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ * 2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ */
+
+#ifndef __HSR_DEVICE_H
+#define __HSR_DEVICE_H
+
+#include <linux/netdevice.h>
+#include "hsr_main.h"
+
+void hsr_dev_setup(struct net_device *dev);
+int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
+ unsigned char multicast_spec);
+void hsr_set_operstate(struct net_device *hsr_dev, struct net_device *slave1,
+ struct net_device *slave2);
+void hsr_set_carrier(struct net_device *hsr_dev, struct net_device *slave1,
+ struct net_device *slave2);
+void hsr_check_announce(struct net_device *hsr_dev, int old_operstate);
+bool is_hsr_master(struct net_device *dev);
+int hsr_get_max_mtu(struct hsr_priv *hsr_priv);
+
+#endif /* __HSR_DEVICE_H */
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
new file mode 100644
index 000000000000..003f5bb3acd2
--- /dev/null
+++ b/net/hsr/hsr_framereg.c
@@ -0,0 +1,503 @@
+/* Copyright 2011-2013 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ * 2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *
+ * The HSR spec says never to forward the same frame twice on the same
+ * interface. A frame is identified by its source MAC address and its HSR
+ * sequence number. This code keeps track of senders and their sequence numbers
+ * to allow filtering of duplicate frames, and to detect HSR ring errors.
+ */
+
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/rculist.h>
+#include "hsr_main.h"
+#include "hsr_framereg.h"
+#include "hsr_netlink.h"
+
+
+struct node_entry {
+ struct list_head mac_list;
+ unsigned char MacAddressA[ETH_ALEN];
+ unsigned char MacAddressB[ETH_ALEN];
+ enum hsr_dev_idx AddrB_if; /* The local slave through which AddrB
+ * frames are received from this node
+ */
+ unsigned long time_in[HSR_MAX_SLAVE];
+ bool time_in_stale[HSR_MAX_SLAVE];
+ u16 seq_out[HSR_MAX_DEV];
+ struct rcu_head rcu_head;
+};
+
+/* TODO: use hash lists for mac addresses (linux/jhash.h)? */
+
+
+
+/* Search for mac entry. Caller must hold rcu read lock.
+ */
+static struct node_entry *find_node_by_AddrA(struct list_head *node_db,
+ const unsigned char addr[ETH_ALEN])
+{
+ struct node_entry *node;
+
+ list_for_each_entry_rcu(node, node_db, mac_list) {
+ if (ether_addr_equal(node->MacAddressA, addr))
+ return node;
+ }
+
+ return NULL;
+}
+
+
+/* Search for mac entry. Caller must hold rcu read lock.
+ */
+static struct node_entry *find_node_by_AddrB(struct list_head *node_db,
+ const unsigned char addr[ETH_ALEN])
+{
+ struct node_entry *node;
+
+ list_for_each_entry_rcu(node, node_db, mac_list) {
+ if (ether_addr_equal(node->MacAddressB, addr))
+ return node;
+ }
+
+ return NULL;
+}
+
+
+/* Search for mac entry. Caller must hold rcu read lock.
+ */
+struct node_entry *hsr_find_node(struct list_head *node_db, struct sk_buff *skb)
+{
+ struct node_entry *node;
+ struct ethhdr *ethhdr;
+
+ if (!skb_mac_header_was_set(skb))
+ return NULL;
+
+ ethhdr = (struct ethhdr *) skb_mac_header(skb);
+
+ list_for_each_entry_rcu(node, node_db, mac_list) {
+ if (ether_addr_equal(node->MacAddressA, ethhdr->h_source))
+ return node;
+ if (ether_addr_equal(node->MacAddressB, ethhdr->h_source))
+ return node;
+ }
+
+ return NULL;
+}
+
+
+/* Helper for device init; the self_node_db is used in hsr_rcv() to recognize
+ * frames from self that's been looped over the HSR ring.
+ */
+int hsr_create_self_node(struct list_head *self_node_db,
+ unsigned char addr_a[ETH_ALEN],
+ unsigned char addr_b[ETH_ALEN])
+{
+ struct node_entry *node, *oldnode;
+
+ node = kmalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ memcpy(node->MacAddressA, addr_a, ETH_ALEN);
+ memcpy(node->MacAddressB, addr_b, ETH_ALEN);
+
+ rcu_read_lock();
+ oldnode = list_first_or_null_rcu(self_node_db,
+ struct node_entry, mac_list);
+ if (oldnode) {
+ list_replace_rcu(&oldnode->mac_list, &node->mac_list);
+ rcu_read_unlock();
+ synchronize_rcu();
+ kfree(oldnode);
+ } else {
+ rcu_read_unlock();
+ list_add_tail_rcu(&node->mac_list, self_node_db);
+ }
+
+ return 0;
+}
+
+static void node_entry_reclaim(struct rcu_head *rh)
+{
+ kfree(container_of(rh, struct node_entry, rcu_head));
+}
+
+
+/* Add/merge node to the database of nodes. 'skb' must contain an HSR
+ * supervision frame.
+ * - If the supervision header's MacAddressA field is not yet in the database,
+ * this frame is from an hitherto unknown node - add it to the database.
+ * - If the sender's MAC address is not the same as its MacAddressA address,
+ * the node is using PICS_SUBS (address substitution). Record the sender's
+ * address as the node's MacAddressB.
+ *
+ * This function needs to work even if the sender node has changed one of its
+ * slaves' MAC addresses. In this case, there are four different cases described
+ * by (Addr-changed, received-from) pairs as follows. Note that changing the
+ * SlaveA address is equal to changing the node's own address:
+ *
+ * - (AddrB, SlaveB): The new AddrB will be recorded by PICS_SUBS code since
+ * node == NULL.
+ * - (AddrB, SlaveA): Will work as usual (the AddrB change won't be detected
+ * from this frame).
+ *
+ * - (AddrA, SlaveB): The old node will be found. We need to detect this and
+ * remove the node.
+ * - (AddrA, SlaveA): A new node will be registered (non-PICS_SUBS at first).
+ * The old one will be pruned after HSR_NODE_FORGET_TIME.
+ *
+ * We also need to detect if the sender's SlaveA and SlaveB cables have been
+ * swapped.
+ */
+struct node_entry *hsr_merge_node(struct hsr_priv *hsr_priv,
+ struct node_entry *node,
+ struct sk_buff *skb,
+ enum hsr_dev_idx dev_idx)
+{
+ struct hsr_sup_payload *hsr_sp;
+ struct hsr_ethhdr_sp *hsr_ethsup;
+ int i;
+ unsigned long now;
+
+ hsr_ethsup = (struct hsr_ethhdr_sp *) skb_mac_header(skb);
+ hsr_sp = (struct hsr_sup_payload *) skb->data;
+
+ if (node && !ether_addr_equal(node->MacAddressA, hsr_sp->MacAddressA)) {
+ /* Node has changed its AddrA, frame was received from SlaveB */
+ list_del_rcu(&node->mac_list);
+ call_rcu(&node->rcu_head, node_entry_reclaim);
+ node = NULL;
+ }
+
+ if (node && (dev_idx == node->AddrB_if) &&
+ !ether_addr_equal(node->MacAddressB, hsr_ethsup->ethhdr.h_source)) {
+ /* Cables have been swapped */
+ list_del_rcu(&node->mac_list);
+ call_rcu(&node->rcu_head, node_entry_reclaim);
+ node = NULL;
+ }
+
+ if (node && (dev_idx != node->AddrB_if) &&
+ (node->AddrB_if != HSR_DEV_NONE) &&
+ !ether_addr_equal(node->MacAddressA, hsr_ethsup->ethhdr.h_source)) {
+ /* Cables have been swapped */
+ list_del_rcu(&node->mac_list);
+ call_rcu(&node->rcu_head, node_entry_reclaim);
+ node = NULL;
+ }
+
+ if (node)
+ return node;
+
+ node = find_node_by_AddrA(&hsr_priv->node_db, hsr_sp->MacAddressA);
+ if (node) {
+ /* Node is known, but frame was received from an unknown
+ * address. Node is PICS_SUBS capable; merge its AddrB.
+ */
+ memcpy(node->MacAddressB, hsr_ethsup->ethhdr.h_source, ETH_ALEN);
+ node->AddrB_if = dev_idx;
+ return node;
+ }
+
+ node = kzalloc(sizeof(*node), GFP_ATOMIC);
+ if (!node)
+ return NULL;
+
+ memcpy(node->MacAddressA, hsr_sp->MacAddressA, ETH_ALEN);
+ memcpy(node->MacAddressB, hsr_ethsup->ethhdr.h_source, ETH_ALEN);
+ if (!ether_addr_equal(hsr_sp->MacAddressA, hsr_ethsup->ethhdr.h_source))
+ node->AddrB_if = dev_idx;
+ else
+ node->AddrB_if = HSR_DEV_NONE;
+
+ /* We are only interested in time diffs here, so use current jiffies
+ * as initialization. (0 could trigger an spurious ring error warning).
+ */
+ now = jiffies;
+ for (i = 0; i < HSR_MAX_SLAVE; i++)
+ node->time_in[i] = now;
+ for (i = 0; i < HSR_MAX_DEV; i++)
+ node->seq_out[i] = ntohs(hsr_ethsup->hsr_sup.sequence_nr) - 1;
+
+ list_add_tail_rcu(&node->mac_list, &hsr_priv->node_db);
+
+ return node;
+}
+
+
+/* 'skb' is a frame meant for this host, that is to be passed to upper layers.
+ *
+ * If the frame was sent by a node's B interface, replace the sender
+ * address with that node's "official" address (MacAddressA) so that upper
+ * layers recognize where it came from.
+ */
+void hsr_addr_subst_source(struct hsr_priv *hsr_priv, struct sk_buff *skb)
+{
+ struct ethhdr *ethhdr;
+ struct node_entry *node;
+
+ if (!skb_mac_header_was_set(skb)) {
+ WARN_ONCE(1, "%s: Mac header not set\n", __func__);
+ return;
+ }
+ ethhdr = (struct ethhdr *) skb_mac_header(skb);
+
+ rcu_read_lock();
+ node = find_node_by_AddrB(&hsr_priv->node_db, ethhdr->h_source);
+ if (node)
+ memcpy(ethhdr->h_source, node->MacAddressA, ETH_ALEN);
+ rcu_read_unlock();
+}
+
+
+/* 'skb' is a frame meant for another host.
+ * 'hsr_dev_idx' is the HSR index of the outgoing device
+ *
+ * Substitute the target (dest) MAC address if necessary, so the it matches the
+ * recipient interface MAC address, regardless of whether that is the
+ * recipient's A or B interface.
+ * This is needed to keep the packets flowing through switches that learn on
+ * which "side" the different interfaces are.
+ */
+void hsr_addr_subst_dest(struct hsr_priv *hsr_priv, struct ethhdr *ethhdr,
+ enum hsr_dev_idx dev_idx)
+{
+ struct node_entry *node;
+
+ rcu_read_lock();
+ node = find_node_by_AddrA(&hsr_priv->node_db, ethhdr->h_dest);
+ if (node && (node->AddrB_if == dev_idx))
+ memcpy(ethhdr->h_dest, node->MacAddressB, ETH_ALEN);
+ rcu_read_unlock();
+}
+
+
+/* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b,
+ * false otherwise.
+ */
+static bool seq_nr_after(u16 a, u16 b)
+{
+ /* Remove inconsistency where
+ * seq_nr_after(a, b) == seq_nr_before(a, b) */
+ if ((int) b - a == 32768)
+ return false;
+
+ return (((s16) (b - a)) < 0);
+}
+#define seq_nr_before(a, b) seq_nr_after((b), (a))
+#define seq_nr_after_or_eq(a, b) (!seq_nr_before((a), (b)))
+#define seq_nr_before_or_eq(a, b) (!seq_nr_after((a), (b)))
+
+
+void hsr_register_frame_in(struct node_entry *node, enum hsr_dev_idx dev_idx)
+{
+ if ((dev_idx < 0) || (dev_idx >= HSR_MAX_DEV)) {
+ WARN_ONCE(1, "%s: Invalid dev_idx (%d)\n", __func__, dev_idx);
+ return;
+ }
+ node->time_in[dev_idx] = jiffies;
+ node->time_in_stale[dev_idx] = false;
+}
+
+
+/* 'skb' is a HSR Ethernet frame (with a HSR tag inserted), with a valid
+ * ethhdr->h_source address and skb->mac_header set.
+ *
+ * Return:
+ * 1 if frame can be shown to have been sent recently on this interface,
+ * 0 otherwise, or
+ * negative error code on error
+ */
+int hsr_register_frame_out(struct node_entry *node, enum hsr_dev_idx dev_idx,
+ struct sk_buff *skb)
+{
+ struct hsr_ethhdr *hsr_ethhdr;
+ u16 sequence_nr;
+
+ if ((dev_idx < 0) || (dev_idx >= HSR_MAX_DEV)) {
+ WARN_ONCE(1, "%s: Invalid dev_idx (%d)\n", __func__, dev_idx);
+ return -EINVAL;
+ }
+ if (!skb_mac_header_was_set(skb)) {
+ WARN_ONCE(1, "%s: Mac header not set\n", __func__);
+ return -EINVAL;
+ }
+ hsr_ethhdr = (struct hsr_ethhdr *) skb_mac_header(skb);
+
+ sequence_nr = ntohs(hsr_ethhdr->hsr_tag.sequence_nr);
+ if (seq_nr_before_or_eq(sequence_nr, node->seq_out[dev_idx]))
+ return 1;
+
+ node->seq_out[dev_idx] = sequence_nr;
+ return 0;
+}
+
+
+
+static bool is_late(struct node_entry *node, enum hsr_dev_idx dev_idx)
+{
+ enum hsr_dev_idx other;
+
+ if (node->time_in_stale[dev_idx])
+ return true;
+
+ if (dev_idx == HSR_DEV_SLAVE_A)
+ other = HSR_DEV_SLAVE_B;
+ else
+ other = HSR_DEV_SLAVE_A;
+
+ if (node->time_in_stale[other])
+ return false;
+
+ if (time_after(node->time_in[other], node->time_in[dev_idx] +
+ msecs_to_jiffies(MAX_SLAVE_DIFF)))
+ return true;
+
+ return false;
+}
+
+
+/* Remove stale sequence_nr records. Called by timer every
+ * HSR_LIFE_CHECK_INTERVAL (two seconds or so).
+ */
+void hsr_prune_nodes(struct hsr_priv *hsr_priv)
+{
+ struct node_entry *node;
+ unsigned long timestamp;
+ unsigned long time_a, time_b;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(node, &hsr_priv->node_db, mac_list) {
+ /* Shorthand */
+ time_a = node->time_in[HSR_DEV_SLAVE_A];
+ time_b = node->time_in[HSR_DEV_SLAVE_B];
+
+ /* Check for timestamps old enough to risk wrap-around */
+ if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET/2))
+ node->time_in_stale[HSR_DEV_SLAVE_A] = true;
+ if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET/2))
+ node->time_in_stale[HSR_DEV_SLAVE_B] = true;
+
+ /* Get age of newest frame from node.
+ * At least one time_in is OK here; nodes get pruned long
+ * before both time_ins can get stale
+ */
+ timestamp = time_a;
+ if (node->time_in_stale[HSR_DEV_SLAVE_A] ||
+ (!node->time_in_stale[HSR_DEV_SLAVE_B] &&
+ time_after(time_b, time_a)))
+ timestamp = time_b;
+
+ /* Warn of ring error only as long as we get frames at all */
+ if (time_is_after_jiffies(timestamp +
+ msecs_to_jiffies(1.5*MAX_SLAVE_DIFF))) {
+
+ if (is_late(node, HSR_DEV_SLAVE_A))
+ hsr_nl_ringerror(hsr_priv, node->MacAddressA,
+ HSR_DEV_SLAVE_A);
+ else if (is_late(node, HSR_DEV_SLAVE_B))
+ hsr_nl_ringerror(hsr_priv, node->MacAddressA,
+ HSR_DEV_SLAVE_B);
+ }
+
+ /* Prune old entries */
+ if (time_is_before_jiffies(timestamp +
+ msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
+ hsr_nl_nodedown(hsr_priv, node->MacAddressA);
+ list_del_rcu(&node->mac_list);
+ /* Note that we need to free this entry later: */
+ call_rcu(&node->rcu_head, node_entry_reclaim);
+ }
+ }
+ rcu_read_unlock();
+}
+
+
+void *hsr_get_next_node(struct hsr_priv *hsr_priv, void *_pos,
+ unsigned char addr[ETH_ALEN])
+{
+ struct node_entry *node;
+
+ if (!_pos) {
+ node = list_first_or_null_rcu(&hsr_priv->node_db,
+ struct node_entry, mac_list);
+ if (node)
+ memcpy(addr, node->MacAddressA, ETH_ALEN);
+ return node;
+ }
+
+ node = _pos;
+ list_for_each_entry_continue_rcu(node, &hsr_priv->node_db, mac_list) {
+ memcpy(addr, node->MacAddressA, ETH_ALEN);
+ return node;
+ }
+
+ return NULL;
+}
+
+
+int hsr_get_node_data(struct hsr_priv *hsr_priv,
+ const unsigned char *addr,
+ unsigned char addr_b[ETH_ALEN],
+ unsigned int *addr_b_ifindex,
+ int *if1_age,
+ u16 *if1_seq,
+ int *if2_age,
+ u16 *if2_seq)
+{
+ struct node_entry *node;
+ unsigned long tdiff;
+
+
+ rcu_read_lock();
+ node = find_node_by_AddrA(&hsr_priv->node_db, addr);
+ if (!node) {
+ rcu_read_unlock();
+ return -ENOENT; /* No such entry */
+ }
+
+ memcpy(addr_b, node->MacAddressB, ETH_ALEN);
+
+ tdiff = jiffies - node->time_in[HSR_DEV_SLAVE_A];
+ if (node->time_in_stale[HSR_DEV_SLAVE_A])
+ *if1_age = INT_MAX;
+#if HZ <= MSEC_PER_SEC
+ else if (tdiff > msecs_to_jiffies(INT_MAX))
+ *if1_age = INT_MAX;
+#endif
+ else
+ *if1_age = jiffies_to_msecs(tdiff);
+
+ tdiff = jiffies - node->time_in[HSR_DEV_SLAVE_B];
+ if (node->time_in_stale[HSR_DEV_SLAVE_B])
+ *if2_age = INT_MAX;
+#if HZ <= MSEC_PER_SEC
+ else if (tdiff > msecs_to_jiffies(INT_MAX))
+ *if2_age = INT_MAX;
+#endif
+ else
+ *if2_age = jiffies_to_msecs(tdiff);
+
+ /* Present sequence numbers as if they were incoming on interface */
+ *if1_seq = node->seq_out[HSR_DEV_SLAVE_B];
+ *if2_seq = node->seq_out[HSR_DEV_SLAVE_A];
+
+ if ((node->AddrB_if != HSR_DEV_NONE) && hsr_priv->slave[node->AddrB_if])
+ *addr_b_ifindex = hsr_priv->slave[node->AddrB_if]->ifindex;
+ else
+ *addr_b_ifindex = -1;
+
+ rcu_read_unlock();
+
+ return 0;
+}
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
new file mode 100644
index 000000000000..e6c4022030ad
--- /dev/null
+++ b/net/hsr/hsr_framereg.h
@@ -0,0 +1,53 @@
+/* Copyright 2011-2013 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ * 2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ */
+
+#ifndef _HSR_FRAMEREG_H
+#define _HSR_FRAMEREG_H
+
+#include "hsr_main.h"
+
+struct node_entry;
+
+struct node_entry *hsr_find_node(struct list_head *node_db, struct sk_buff *skb);
+
+struct node_entry *hsr_merge_node(struct hsr_priv *hsr_priv,
+ struct node_entry *node,
+ struct sk_buff *skb,
+ enum hsr_dev_idx dev_idx);
+
+void hsr_addr_subst_source(struct hsr_priv *hsr_priv, struct sk_buff *skb);
+void hsr_addr_subst_dest(struct hsr_priv *hsr_priv, struct ethhdr *ethhdr,
+ enum hsr_dev_idx dev_idx);
+
+void hsr_register_frame_in(struct node_entry *node, enum hsr_dev_idx dev_idx);
+
+int hsr_register_frame_out(struct node_entry *node, enum hsr_dev_idx dev_idx,
+ struct sk_buff *skb);
+
+void hsr_prune_nodes(struct hsr_priv *hsr_priv);
+
+int hsr_create_self_node(struct list_head *self_node_db,
+ unsigned char addr_a[ETH_ALEN],
+ unsigned char addr_b[ETH_ALEN]);
+
+void *hsr_get_next_node(struct hsr_priv *hsr_priv, void *_pos,
+ unsigned char addr[ETH_ALEN]);
+
+int hsr_get_node_data(struct hsr_priv *hsr_priv,
+ const unsigned char *addr,
+ unsigned char addr_b[ETH_ALEN],
+ unsigned int *addr_b_ifindex,
+ int *if1_age,
+ u16 *if1_seq,
+ int *if2_age,
+ u16 *if2_seq);
+
+#endif /* _HSR_FRAMEREG_H */
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
new file mode 100644
index 000000000000..af68dd83a4e3
--- /dev/null
+++ b/net/hsr/hsr_main.c
@@ -0,0 +1,469 @@
+/* Copyright 2011-2013 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ * 2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *
+ * In addition to routines for registering and unregistering HSR support, this
+ * file also contains the receive routine that handles all incoming frames with
+ * Ethertype (protocol) ETH_P_PRP (HSRv0), and network device event handling.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/rculist.h>
+#include <linux/timer.h>
+#include <linux/etherdevice.h>
+#include "hsr_main.h"
+#include "hsr_device.h"
+#include "hsr_netlink.h"
+#include "hsr_framereg.h"
+
+
+/* List of all registered virtual HSR devices */
+static LIST_HEAD(hsr_list);
+
+void register_hsr_master(struct hsr_priv *hsr_priv)
+{
+ list_add_tail_rcu(&hsr_priv->hsr_list, &hsr_list);
+}
+
+void unregister_hsr_master(struct hsr_priv *hsr_priv)
+{
+ struct hsr_priv *hsr_priv_it;
+
+ list_for_each_entry(hsr_priv_it, &hsr_list, hsr_list)
+ if (hsr_priv_it == hsr_priv) {
+ list_del_rcu(&hsr_priv_it->hsr_list);
+ return;
+ }
+}
+
+bool is_hsr_slave(struct net_device *dev)
+{
+ struct hsr_priv *hsr_priv_it;
+
+ list_for_each_entry_rcu(hsr_priv_it, &hsr_list, hsr_list) {
+ if (dev == hsr_priv_it->slave[0])
+ return true;
+ if (dev == hsr_priv_it->slave[1])
+ return true;
+ }
+
+ return false;
+}
+
+
+/* If dev is a HSR slave device, return the virtual master device. Return NULL
+ * otherwise.
+ */
+static struct hsr_priv *get_hsr_master(struct net_device *dev)
+{
+ struct hsr_priv *hsr_priv;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(hsr_priv, &hsr_list, hsr_list)
+ if ((dev == hsr_priv->slave[0]) ||
+ (dev == hsr_priv->slave[1])) {
+ rcu_read_unlock();
+ return hsr_priv;
+ }
+
+ rcu_read_unlock();
+ return NULL;
+}
+
+
+/* If dev is a HSR slave device, return the other slave device. Return NULL
+ * otherwise.
+ */
+static struct net_device *get_other_slave(struct hsr_priv *hsr_priv,
+ struct net_device *dev)
+{
+ if (dev == hsr_priv->slave[0])
+ return hsr_priv->slave[1];
+ if (dev == hsr_priv->slave[1])
+ return hsr_priv->slave[0];
+
+ return NULL;
+}
+
+
+static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct net_device *slave, *other_slave;
+ struct hsr_priv *hsr_priv;
+ int old_operstate;
+ int mtu_max;
+ int res;
+ struct net_device *dev;
+
+ dev = netdev_notifier_info_to_dev(ptr);
+
+ hsr_priv = get_hsr_master(dev);
+ if (hsr_priv) {
+ /* dev is a slave device */
+ slave = dev;
+ other_slave = get_other_slave(hsr_priv, slave);
+ } else {
+ if (!is_hsr_master(dev))
+ return NOTIFY_DONE;
+ hsr_priv = netdev_priv(dev);
+ slave = hsr_priv->slave[0];
+ other_slave = hsr_priv->slave[1];
+ }
+
+ switch (event) {
+ case NETDEV_UP: /* Administrative state DOWN */
+ case NETDEV_DOWN: /* Administrative state UP */
+ case NETDEV_CHANGE: /* Link (carrier) state changes */
+ old_operstate = hsr_priv->dev->operstate;
+ hsr_set_carrier(hsr_priv->dev, slave, other_slave);
+ /* netif_stacked_transfer_operstate() cannot be used here since
+ * it doesn't set IF_OPER_LOWERLAYERDOWN (?)
+ */
+ hsr_set_operstate(hsr_priv->dev, slave, other_slave);
+ hsr_check_announce(hsr_priv->dev, old_operstate);
+ break;
+ case NETDEV_CHANGEADDR:
+
+ /* This should not happen since there's no ndo_set_mac_address()
+ * for HSR devices - i.e. not supported.
+ */
+ if (dev == hsr_priv->dev)
+ break;
+
+ if (dev == hsr_priv->slave[0])
+ memcpy(hsr_priv->dev->dev_addr,
+ hsr_priv->slave[0]->dev_addr, ETH_ALEN);
+
+ /* Make sure we recognize frames from ourselves in hsr_rcv() */
+ res = hsr_create_self_node(&hsr_priv->self_node_db,
+ hsr_priv->dev->dev_addr,
+ hsr_priv->slave[1] ?
+ hsr_priv->slave[1]->dev_addr :
+ hsr_priv->dev->dev_addr);
+ if (res)
+ netdev_warn(hsr_priv->dev,
+ "Could not update HSR node address.\n");
+
+ if (dev == hsr_priv->slave[0])
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, hsr_priv->dev);
+ break;
+ case NETDEV_CHANGEMTU:
+ if (dev == hsr_priv->dev)
+ break; /* Handled in ndo_change_mtu() */
+ mtu_max = hsr_get_max_mtu(hsr_priv);
+ if (hsr_priv->dev->mtu > mtu_max)
+ dev_set_mtu(hsr_priv->dev, mtu_max);
+ break;
+ case NETDEV_UNREGISTER:
+ if (dev == hsr_priv->slave[0])
+ hsr_priv->slave[0] = NULL;
+ if (dev == hsr_priv->slave[1])
+ hsr_priv->slave[1] = NULL;
+
+ /* There should really be a way to set a new slave device... */
+
+ break;
+ case NETDEV_PRE_TYPE_CHANGE:
+ /* HSR works only on Ethernet devices. Refuse slave to change
+ * its type.
+ */
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_DONE;
+}
+
+
+static struct timer_list prune_timer;
+
+static void prune_nodes_all(unsigned long data)
+{
+ struct hsr_priv *hsr_priv;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(hsr_priv, &hsr_list, hsr_list)
+ hsr_prune_nodes(hsr_priv);
+ rcu_read_unlock();
+
+ prune_timer.expires = jiffies + msecs_to_jiffies(PRUNE_PERIOD);
+ add_timer(&prune_timer);
+}
+
+
+static struct sk_buff *hsr_pull_tag(struct sk_buff *skb)
+{
+ struct hsr_tag *hsr_tag;
+ struct sk_buff *skb2;
+
+ skb2 = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb2))
+ goto err_free;
+ skb = skb2;
+
+ if (unlikely(!pskb_may_pull(skb, HSR_TAGLEN)))
+ goto err_free;
+
+ hsr_tag = (struct hsr_tag *) skb->data;
+ skb->protocol = hsr_tag->encap_proto;
+ skb_pull(skb, HSR_TAGLEN);
+
+ return skb;
+
+err_free:
+ kfree_skb(skb);
+ return NULL;
+}
+
+
+/* The uses I can see for these HSR supervision frames are:
+ * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
+ * 22") to reset any sequence_nr counters belonging to that node. Useful if
+ * the other node's counter has been reset for some reason.
+ * --
+ * Or not - resetting the counter and bridging the frame would create a
+ * loop, unfortunately.
+ *
+ * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
+ * frame is received from a particular node, we know something is wrong.
+ * We just register these (as with normal frames) and throw them away.
+ *
+ * 3) Allow different MAC addresses for the two slave interfaces, using the
+ * MacAddressA field.
+ */
+static bool is_supervision_frame(struct hsr_priv *hsr_priv, struct sk_buff *skb)
+{
+ struct hsr_sup_tag *hsr_stag;
+
+ if (!ether_addr_equal(eth_hdr(skb)->h_dest,
+ hsr_priv->sup_multicast_addr))
+ return false;
+
+ hsr_stag = (struct hsr_sup_tag *) skb->data;
+ if (get_hsr_stag_path(hsr_stag) != 0x0f)
+ return false;
+ if ((hsr_stag->HSR_TLV_Type != HSR_TLV_ANNOUNCE) &&
+ (hsr_stag->HSR_TLV_Type != HSR_TLV_LIFE_CHECK))
+ return false;
+ if (hsr_stag->HSR_TLV_Length != 12)
+ return false;
+
+ return true;
+}
+
+
+/* Implementation somewhat according to IEC-62439-3, p. 43
+ */
+static int hsr_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ struct hsr_priv *hsr_priv;
+ struct net_device *other_slave;
+ struct node_entry *node;
+ bool deliver_to_self;
+ struct sk_buff *skb_deliver;
+ enum hsr_dev_idx dev_in_idx, dev_other_idx;
+ bool dup_out;
+ int ret;
+
+ hsr_priv = get_hsr_master(dev);
+
+ if (!hsr_priv) {
+ /* Non-HSR-slave device 'dev' is connected to a HSR network */
+ kfree_skb(skb);
+ dev->stats.rx_errors++;
+ return NET_RX_SUCCESS;
+ }
+
+ if (dev == hsr_priv->slave[0]) {
+ dev_in_idx = HSR_DEV_SLAVE_A;
+ dev_other_idx = HSR_DEV_SLAVE_B;
+ } else {
+ dev_in_idx = HSR_DEV_SLAVE_B;
+ dev_other_idx = HSR_DEV_SLAVE_A;
+ }
+
+ node = hsr_find_node(&hsr_priv->self_node_db, skb);
+ if (node) {
+ /* Always kill frames sent by ourselves */
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+
+ /* Is this frame a candidate for local reception? */
+ deliver_to_self = false;
+ if ((skb->pkt_type == PACKET_HOST) ||
+ (skb->pkt_type == PACKET_MULTICAST) ||
+ (skb->pkt_type == PACKET_BROADCAST))
+ deliver_to_self = true;
+ else if (ether_addr_equal(eth_hdr(skb)->h_dest,
+ hsr_priv->dev->dev_addr)) {
+ skb->pkt_type = PACKET_HOST;
+ deliver_to_self = true;
+ }
+
+
+ rcu_read_lock(); /* node_db */
+ node = hsr_find_node(&hsr_priv->node_db, skb);
+
+ if (is_supervision_frame(hsr_priv, skb)) {
+ skb_pull(skb, sizeof(struct hsr_sup_tag));
+ node = hsr_merge_node(hsr_priv, node, skb, dev_in_idx);
+ if (!node) {
+ rcu_read_unlock(); /* node_db */
+ kfree_skb(skb);
+ hsr_priv->dev->stats.rx_dropped++;
+ return NET_RX_DROP;
+ }
+ skb_push(skb, sizeof(struct hsr_sup_tag));
+ deliver_to_self = false;
+ }
+
+ if (!node) {
+ /* Source node unknown; this might be a HSR frame from
+ * another net (different multicast address). Ignore it.
+ */
+ rcu_read_unlock(); /* node_db */
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+
+ /* Register ALL incoming frames as outgoing through the other interface.
+ * This allows us to register frames as incoming only if they are valid
+ * for the receiving interface, without using a specific counter for
+ * incoming frames.
+ */
+ dup_out = hsr_register_frame_out(node, dev_other_idx, skb);
+ if (!dup_out)
+ hsr_register_frame_in(node, dev_in_idx);
+
+ /* Forward this frame? */
+ if (!dup_out && (skb->pkt_type != PACKET_HOST))
+ other_slave = get_other_slave(hsr_priv, dev);
+ else
+ other_slave = NULL;
+
+ if (hsr_register_frame_out(node, HSR_DEV_MASTER, skb))
+ deliver_to_self = false;
+
+ rcu_read_unlock(); /* node_db */
+
+ if (!deliver_to_self && !other_slave) {
+ kfree_skb(skb);
+ /* Circulated frame; silently remove it. */
+ return NET_RX_SUCCESS;
+ }
+
+ skb_deliver = skb;
+ if (deliver_to_self && other_slave) {
+ /* skb_clone() is not enough since we will strip the hsr tag
+ * and do address substitution below
+ */
+ skb_deliver = pskb_copy(skb, GFP_ATOMIC);
+ if (!skb_deliver) {
+ deliver_to_self = false;
+ hsr_priv->dev->stats.rx_dropped++;
+ }
+ }
+
+ if (deliver_to_self) {
+ bool multicast_frame;
+
+ skb_deliver = hsr_pull_tag(skb_deliver);
+ if (!skb_deliver) {
+ hsr_priv->dev->stats.rx_dropped++;
+ goto forward;
+ }
+#if !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ /* Move everything in the header that is after the HSR tag,
+ * to work around alignment problems caused by the 6-byte HSR
+ * tag. In practice, this removes/overwrites the HSR tag in
+ * the header and restores a "standard" packet.
+ */
+ memmove(skb_deliver->data - HSR_TAGLEN, skb_deliver->data,
+ skb_headlen(skb_deliver));
+
+ /* Adjust skb members so they correspond with the move above.
+ * This cannot possibly underflow skb->data since hsr_pull_tag()
+ * above succeeded.
+ * At this point in the protocol stack, the transport and
+ * network headers have not been set yet, and we haven't touched
+ * the mac header nor the head. So we only need to adjust data
+ * and tail:
+ */
+ skb_deliver->data -= HSR_TAGLEN;
+ skb_deliver->tail -= HSR_TAGLEN;
+#endif
+ skb_deliver->dev = hsr_priv->dev;
+ hsr_addr_subst_source(hsr_priv, skb_deliver);
+ multicast_frame = (skb_deliver->pkt_type == PACKET_MULTICAST);
+ ret = netif_rx(skb_deliver);
+ if (ret == NET_RX_DROP) {
+ hsr_priv->dev->stats.rx_dropped++;
+ } else {
+ hsr_priv->dev->stats.rx_packets++;
+ hsr_priv->dev->stats.rx_bytes += skb->len;
+ if (multicast_frame)
+ hsr_priv->dev->stats.multicast++;
+ }
+ }
+
+forward:
+ if (other_slave) {
+ skb_push(skb, ETH_HLEN);
+ skb->dev = other_slave;
+ dev_queue_xmit(skb);
+ }
+
+ return NET_RX_SUCCESS;
+}
+
+
+static struct packet_type hsr_pt __read_mostly = {
+ .type = htons(ETH_P_PRP),
+ .func = hsr_rcv,
+};
+
+static struct notifier_block hsr_nb = {
+ .notifier_call = hsr_netdev_notify, /* Slave event notifications */
+};
+
+
+static int __init hsr_init(void)
+{
+ int res;
+
+ BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_TAGLEN);
+
+ dev_add_pack(&hsr_pt);
+
+ init_timer(&prune_timer);
+ prune_timer.function = prune_nodes_all;
+ prune_timer.data = 0;
+ prune_timer.expires = jiffies + msecs_to_jiffies(PRUNE_PERIOD);
+ add_timer(&prune_timer);
+
+ register_netdevice_notifier(&hsr_nb);
+
+ res = hsr_netlink_init();
+
+ return res;
+}
+
+static void __exit hsr_exit(void)
+{
+ unregister_netdevice_notifier(&hsr_nb);
+ del_timer(&prune_timer);
+ hsr_netlink_exit();
+ dev_remove_pack(&hsr_pt);
+}
+
+module_init(hsr_init);
+module_exit(hsr_exit);
+MODULE_LICENSE("GPL");
diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
new file mode 100644
index 000000000000..56fe060c0ab1
--- /dev/null
+++ b/net/hsr/hsr_main.h
@@ -0,0 +1,166 @@
+/* Copyright 2011-2013 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ * 2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ */
+
+#ifndef _HSR_PRIVATE_H
+#define _HSR_PRIVATE_H
+
+#include <linux/netdevice.h>
+#include <linux/list.h>
+
+
+/* Time constants as specified in the HSR specification (IEC-62439-3 2010)
+ * Table 8.
+ * All values in milliseconds.
+ */
+#define HSR_LIFE_CHECK_INTERVAL 2000 /* ms */
+#define HSR_NODE_FORGET_TIME 60000 /* ms */
+#define HSR_ANNOUNCE_INTERVAL 100 /* ms */
+
+
+/* By how much may slave1 and slave2 timestamps of latest received frame from
+ * each node differ before we notify of communication problem?
+ */
+#define MAX_SLAVE_DIFF 3000 /* ms */
+
+
+/* How often shall we check for broken ring and remove node entries older than
+ * HSR_NODE_FORGET_TIME?
+ */
+#define PRUNE_PERIOD 3000 /* ms */
+
+
+#define HSR_TLV_ANNOUNCE 22
+#define HSR_TLV_LIFE_CHECK 23
+
+
+/* HSR Tag.
+ * As defined in IEC-62439-3:2010, the HSR tag is really { ethertype = 0x88FB,
+ * path, LSDU_size, sequence Nr }. But we let eth_header() create { h_dest,
+ * h_source, h_proto = 0x88FB }, and add { path, LSDU_size, sequence Nr,
+ * encapsulated protocol } instead.
+ */
+#define HSR_TAGLEN 6
+
+/* Field names below as defined in the IEC:2010 standard for HSR. */
+struct hsr_tag {
+ __be16 path_and_LSDU_size;
+ __be16 sequence_nr;
+ __be16 encap_proto;
+} __packed;
+
+
+/* The helper functions below assumes that 'path' occupies the 4 most
+ * significant bits of the 16-bit field shared by 'path' and 'LSDU_size' (or
+ * equivalently, the 4 most significant bits of HSR tag byte 14).
+ *
+ * This is unclear in the IEC specification; its definition of MAC addresses
+ * indicates the spec is written with the least significant bit first (to the
+ * left). This, however, would mean that the LSDU field would be split in two
+ * with the path field in-between, which seems strange. I'm guessing the MAC
+ * address definition is in error.
+ */
+static inline u16 get_hsr_tag_path(struct hsr_tag *ht)
+{
+ return ntohs(ht->path_and_LSDU_size) >> 12;
+}
+
+static inline u16 get_hsr_tag_LSDU_size(struct hsr_tag *ht)
+{
+ return ntohs(ht->path_and_LSDU_size) & 0x0FFF;
+}
+
+static inline void set_hsr_tag_path(struct hsr_tag *ht, u16 path)
+{
+ ht->path_and_LSDU_size = htons(
+ (ntohs(ht->path_and_LSDU_size) & 0x0FFF) | (path << 12));
+}
+
+static inline void set_hsr_tag_LSDU_size(struct hsr_tag *ht, u16 LSDU_size)
+{
+ ht->path_and_LSDU_size = htons(
+ (ntohs(ht->path_and_LSDU_size) & 0xF000) |
+ (LSDU_size & 0x0FFF));
+}
+
+struct hsr_ethhdr {
+ struct ethhdr ethhdr;
+ struct hsr_tag hsr_tag;
+} __packed;
+
+
+/* HSR Supervision Frame data types.
+ * Field names as defined in the IEC:2010 standard for HSR.
+ */
+struct hsr_sup_tag {
+ __be16 path_and_HSR_Ver;
+ __be16 sequence_nr;
+ __u8 HSR_TLV_Type;
+ __u8 HSR_TLV_Length;
+} __packed;
+
+struct hsr_sup_payload {
+ unsigned char MacAddressA[ETH_ALEN];
+} __packed;
+
+static inline u16 get_hsr_stag_path(struct hsr_sup_tag *hst)
+{
+ return get_hsr_tag_path((struct hsr_tag *) hst);
+}
+
+static inline u16 get_hsr_stag_HSR_ver(struct hsr_sup_tag *hst)
+{
+ return get_hsr_tag_LSDU_size((struct hsr_tag *) hst);
+}
+
+static inline void set_hsr_stag_path(struct hsr_sup_tag *hst, u16 path)
+{
+ set_hsr_tag_path((struct hsr_tag *) hst, path);
+}
+
+static inline void set_hsr_stag_HSR_Ver(struct hsr_sup_tag *hst, u16 HSR_Ver)
+{
+ set_hsr_tag_LSDU_size((struct hsr_tag *) hst, HSR_Ver);
+}
+
+struct hsr_ethhdr_sp {
+ struct ethhdr ethhdr;
+ struct hsr_sup_tag hsr_sup;
+} __packed;
+
+
+enum hsr_dev_idx {
+ HSR_DEV_NONE = -1,
+ HSR_DEV_SLAVE_A = 0,
+ HSR_DEV_SLAVE_B,
+ HSR_DEV_MASTER,
+};
+#define HSR_MAX_SLAVE (HSR_DEV_SLAVE_B + 1)
+#define HSR_MAX_DEV (HSR_DEV_MASTER + 1)
+
+struct hsr_priv {
+ struct list_head hsr_list; /* List of hsr devices */
+ struct rcu_head rcu_head;
+ struct net_device *dev;
+ struct net_device *slave[HSR_MAX_SLAVE];
+ struct list_head node_db; /* Other HSR nodes */
+ struct list_head self_node_db; /* MACs of slaves */
+ struct timer_list announce_timer; /* Supervision frame dispatch */
+ int announce_count;
+ u16 sequence_nr;
+ spinlock_t seqnr_lock; /* locking for sequence_nr */
+ unsigned char sup_multicast_addr[ETH_ALEN];
+};
+
+void register_hsr_master(struct hsr_priv *hsr_priv);
+void unregister_hsr_master(struct hsr_priv *hsr_priv);
+bool is_hsr_slave(struct net_device *dev);
+
+#endif /* _HSR_PRIVATE_H */
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
new file mode 100644
index 000000000000..4e66bf61f585
--- /dev/null
+++ b/net/hsr/hsr_netlink.c
@@ -0,0 +1,457 @@
+/* Copyright 2011-2013 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ * 2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *
+ * Routines for handling Netlink messages for HSR.
+ */
+
+#include "hsr_netlink.h"
+#include <linux/kernel.h>
+#include <net/rtnetlink.h>
+#include <net/genetlink.h>
+#include "hsr_main.h"
+#include "hsr_device.h"
+#include "hsr_framereg.h"
+
+static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
+ [IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
+ [IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
+ [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
+};
+
+
+/* Here, it seems a netdevice has already been allocated for us, and the
+ * hsr_dev_setup routine has been executed. Nice!
+ */
+static int hsr_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct net_device *link[2];
+ unsigned char multicast_spec;
+
+ if (!data[IFLA_HSR_SLAVE1]) {
+ netdev_info(dev, "IFLA_HSR_SLAVE1 missing!\n");
+ return -EINVAL;
+ }
+ link[0] = __dev_get_by_index(src_net, nla_get_u32(data[IFLA_HSR_SLAVE1]));
+ if (!data[IFLA_HSR_SLAVE2]) {
+ netdev_info(dev, "IFLA_HSR_SLAVE2 missing!\n");
+ return -EINVAL;
+ }
+ link[1] = __dev_get_by_index(src_net, nla_get_u32(data[IFLA_HSR_SLAVE2]));
+
+ if (!link[0] || !link[1])
+ return -ENODEV;
+ if (link[0] == link[1])
+ return -EINVAL;
+
+ if (!data[IFLA_HSR_MULTICAST_SPEC])
+ multicast_spec = 0;
+ else
+ multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
+
+ return hsr_dev_finalize(dev, link, multicast_spec);
+}
+
+static struct rtnl_link_ops hsr_link_ops __read_mostly = {
+ .kind = "hsr",
+ .maxtype = IFLA_HSR_MAX,
+ .policy = hsr_policy,
+ .priv_size = sizeof(struct hsr_priv),
+ .setup = hsr_dev_setup,
+ .newlink = hsr_newlink,
+};
+
+
+
+/* attribute policy */
+/* NLA_BINARY missing in libnl; use NLA_UNSPEC in userspace instead. */
+static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
+ [HSR_A_NODE_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN },
+ [HSR_A_NODE_ADDR_B] = { .type = NLA_BINARY, .len = ETH_ALEN },
+ [HSR_A_IFINDEX] = { .type = NLA_U32 },
+ [HSR_A_IF1_AGE] = { .type = NLA_U32 },
+ [HSR_A_IF2_AGE] = { .type = NLA_U32 },
+ [HSR_A_IF1_SEQ] = { .type = NLA_U16 },
+ [HSR_A_IF2_SEQ] = { .type = NLA_U16 },
+};
+
+static struct genl_family hsr_genl_family = {
+ .id = GENL_ID_GENERATE,
+ .hdrsize = 0,
+ .name = "HSR",
+ .version = 1,
+ .maxattr = HSR_A_MAX,
+};
+
+static struct genl_multicast_group hsr_network_genl_mcgrp = {
+ .name = "hsr-network",
+};
+
+
+
+/* This is called if for some node with MAC address addr, we only get frames
+ * over one of the slave interfaces. This would indicate an open network ring
+ * (i.e. a link has failed somewhere).
+ */
+void hsr_nl_ringerror(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN],
+ enum hsr_dev_idx dev_idx)
+{
+ struct sk_buff *skb;
+ void *msg_head;
+ int res;
+ int ifindex;
+
+ skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ if (!skb)
+ goto fail;
+
+ msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_RING_ERROR);
+ if (!msg_head)
+ goto nla_put_failure;
+
+ res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
+ if (res < 0)
+ goto nla_put_failure;
+
+ if (hsr_priv->slave[dev_idx])
+ ifindex = hsr_priv->slave[dev_idx]->ifindex;
+ else
+ ifindex = -1;
+ res = nla_put_u32(skb, HSR_A_IFINDEX, ifindex);
+ if (res < 0)
+ goto nla_put_failure;
+
+ genlmsg_end(skb, msg_head);
+ genlmsg_multicast(skb, 0, hsr_network_genl_mcgrp.id, GFP_ATOMIC);
+
+ return;
+
+nla_put_failure:
+ kfree_skb(skb);
+
+fail:
+ netdev_warn(hsr_priv->dev, "Could not send HSR ring error message\n");
+}
+
+/* This is called when we haven't heard from the node with MAC address addr for
+ * some time (just before the node is removed from the node table/list).
+ */
+void hsr_nl_nodedown(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN])
+{
+ struct sk_buff *skb;
+ void *msg_head;
+ int res;
+
+ skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ if (!skb)
+ goto fail;
+
+ msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
+ if (!msg_head)
+ goto nla_put_failure;
+
+
+ res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
+ if (res < 0)
+ goto nla_put_failure;
+
+ genlmsg_end(skb, msg_head);
+ genlmsg_multicast(skb, 0, hsr_network_genl_mcgrp.id, GFP_ATOMIC);
+
+ return;
+
+nla_put_failure:
+ kfree_skb(skb);
+
+fail:
+ netdev_warn(hsr_priv->dev, "Could not send HSR node down\n");
+}
+
+
+/* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
+ * about the status of a specific node in the network, defined by its MAC
+ * address.
+ *
+ * Input: hsr ifindex, node mac address
+ * Output: hsr ifindex, node mac address (copied from request),
+ * age of latest frame from node over slave 1, slave 2 [ms]
+ */
+static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
+{
+ /* For receiving */
+ struct nlattr *na;
+ struct net_device *hsr_dev;
+
+ /* For sending */
+ struct sk_buff *skb_out;
+ void *msg_head;
+ struct hsr_priv *hsr_priv;
+ unsigned char hsr_node_addr_b[ETH_ALEN];
+ int hsr_node_if1_age;
+ u16 hsr_node_if1_seq;
+ int hsr_node_if2_age;
+ u16 hsr_node_if2_seq;
+ int addr_b_ifindex;
+ int res;
+
+ if (!info)
+ goto invalid;
+
+ na = info->attrs[HSR_A_IFINDEX];
+ if (!na)
+ goto invalid;
+ na = info->attrs[HSR_A_NODE_ADDR];
+ if (!na)
+ goto invalid;
+
+ hsr_dev = __dev_get_by_index(genl_info_net(info),
+ nla_get_u32(info->attrs[HSR_A_IFINDEX]));
+ if (!hsr_dev)
+ goto invalid;
+ if (!is_hsr_master(hsr_dev))
+ goto invalid;
+
+
+ /* Send reply */
+
+ skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb_out) {
+ res = -ENOMEM;
+ goto fail;
+ }
+
+ msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
+ info->snd_seq, &hsr_genl_family, 0,
+ HSR_C_SET_NODE_STATUS);
+ if (!msg_head) {
+ res = -ENOMEM;
+ goto nla_put_failure;
+ }
+
+ res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
+ if (res < 0)
+ goto nla_put_failure;
+
+ hsr_priv = netdev_priv(hsr_dev);
+ res = hsr_get_node_data(hsr_priv,
+ (unsigned char *) nla_data(info->attrs[HSR_A_NODE_ADDR]),
+ hsr_node_addr_b,
+ &addr_b_ifindex,
+ &hsr_node_if1_age,
+ &hsr_node_if1_seq,
+ &hsr_node_if2_age,
+ &hsr_node_if2_seq);
+ if (res < 0)
+ goto fail;
+
+ res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
+ nla_data(info->attrs[HSR_A_NODE_ADDR]));
+ if (res < 0)
+ goto nla_put_failure;
+
+ if (addr_b_ifindex > -1) {
+ res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
+ hsr_node_addr_b);
+ if (res < 0)
+ goto nla_put_failure;
+
+ res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX, addr_b_ifindex);
+ if (res < 0)
+ goto nla_put_failure;
+ }
+
+ res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
+ if (res < 0)
+ goto nla_put_failure;
+ res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
+ if (res < 0)
+ goto nla_put_failure;
+ if (hsr_priv->slave[0])
+ res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
+ hsr_priv->slave[0]->ifindex);
+ if (res < 0)
+ goto nla_put_failure;
+
+ res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
+ if (res < 0)
+ goto nla_put_failure;
+ res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
+ if (res < 0)
+ goto nla_put_failure;
+ if (hsr_priv->slave[1])
+ res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
+ hsr_priv->slave[1]->ifindex);
+
+ genlmsg_end(skb_out, msg_head);
+ genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
+
+ return 0;
+
+invalid:
+ netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL);
+ return 0;
+
+nla_put_failure:
+ kfree_skb(skb_out);
+ /* Fall through */
+
+fail:
+ return res;
+}
+
+static struct genl_ops hsr_ops_get_node_status = {
+ .cmd = HSR_C_GET_NODE_STATUS,
+ .flags = 0,
+ .policy = hsr_genl_policy,
+ .doit = hsr_get_node_status,
+ .dumpit = NULL,
+};
+
+
+/* Get a list of MacAddressA of all nodes known to this node (other than self).
+ */
+static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
+{
+ /* For receiving */
+ struct nlattr *na;
+ struct net_device *hsr_dev;
+
+ /* For sending */
+ struct sk_buff *skb_out;
+ void *msg_head;
+ struct hsr_priv *hsr_priv;
+ void *pos;
+ unsigned char addr[ETH_ALEN];
+ int res;
+
+ if (!info)
+ goto invalid;
+
+ na = info->attrs[HSR_A_IFINDEX];
+ if (!na)
+ goto invalid;
+
+ hsr_dev = __dev_get_by_index(genl_info_net(info),
+ nla_get_u32(info->attrs[HSR_A_IFINDEX]));
+ if (!hsr_dev)
+ goto invalid;
+ if (!is_hsr_master(hsr_dev))
+ goto invalid;
+
+
+ /* Send reply */
+
+ skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb_out) {
+ res = -ENOMEM;
+ goto fail;
+ }
+
+ msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
+ info->snd_seq, &hsr_genl_family, 0,
+ HSR_C_SET_NODE_LIST);
+ if (!msg_head) {
+ res = -ENOMEM;
+ goto nla_put_failure;
+ }
+
+ res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
+ if (res < 0)
+ goto nla_put_failure;
+
+ hsr_priv = netdev_priv(hsr_dev);
+
+ rcu_read_lock();
+ pos = hsr_get_next_node(hsr_priv, NULL, addr);
+ while (pos) {
+ res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
+ if (res < 0) {
+ rcu_read_unlock();
+ goto nla_put_failure;
+ }
+ pos = hsr_get_next_node(hsr_priv, pos, addr);
+ }
+ rcu_read_unlock();
+
+ genlmsg_end(skb_out, msg_head);
+ genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
+
+ return 0;
+
+invalid:
+ netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL);
+ return 0;
+
+nla_put_failure:
+ kfree_skb(skb_out);
+ /* Fall through */
+
+fail:
+ return res;
+}
+
+
+static struct genl_ops hsr_ops_get_node_list = {
+ .cmd = HSR_C_GET_NODE_LIST,
+ .flags = 0,
+ .policy = hsr_genl_policy,
+ .doit = hsr_get_node_list,
+ .dumpit = NULL,
+};
+
+int __init hsr_netlink_init(void)
+{
+ int rc;
+
+ rc = rtnl_link_register(&hsr_link_ops);
+ if (rc)
+ goto fail_rtnl_link_register;
+
+ rc = genl_register_family(&hsr_genl_family);
+ if (rc)
+ goto fail_genl_register_family;
+
+ rc = genl_register_ops(&hsr_genl_family, &hsr_ops_get_node_status);
+ if (rc)
+ goto fail_genl_register_ops;
+
+ rc = genl_register_ops(&hsr_genl_family, &hsr_ops_get_node_list);
+ if (rc)
+ goto fail_genl_register_ops_node_list;
+
+ rc = genl_register_mc_group(&hsr_genl_family, &hsr_network_genl_mcgrp);
+ if (rc)
+ goto fail_genl_register_mc_group;
+
+ return 0;
+
+fail_genl_register_mc_group:
+ genl_unregister_ops(&hsr_genl_family, &hsr_ops_get_node_list);
+fail_genl_register_ops_node_list:
+ genl_unregister_ops(&hsr_genl_family, &hsr_ops_get_node_status);
+fail_genl_register_ops:
+ genl_unregister_family(&hsr_genl_family);
+fail_genl_register_family:
+ rtnl_link_unregister(&hsr_link_ops);
+fail_rtnl_link_register:
+
+ return rc;
+}
+
+void __exit hsr_netlink_exit(void)
+{
+ genl_unregister_mc_group(&hsr_genl_family, &hsr_network_genl_mcgrp);
+ genl_unregister_ops(&hsr_genl_family, &hsr_ops_get_node_status);
+ genl_unregister_family(&hsr_genl_family);
+
+ rtnl_link_unregister(&hsr_link_ops);
+}
+
+MODULE_ALIAS_RTNL_LINK("hsr");
diff --git a/net/hsr/hsr_netlink.h b/net/hsr/hsr_netlink.h
new file mode 100644
index 000000000000..d4579dcc3c7d
--- /dev/null
+++ b/net/hsr/hsr_netlink.h
@@ -0,0 +1,30 @@
+/* Copyright 2011-2013 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ * 2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ */
+
+#ifndef __HSR_NETLINK_H
+#define __HSR_NETLINK_H
+
+#include <linux/if_ether.h>
+#include <linux/module.h>
+#include <uapi/linux/hsr_netlink.h>
+
+struct hsr_priv;
+
+int __init hsr_netlink_init(void);
+void __exit hsr_netlink_exit(void);
+
+void hsr_nl_ringerror(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN],
+ int dev_idx);
+void hsr_nl_nodedown(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN]);
+void hsr_nl_framedrop(int dropcount, int dev_idx);
+void hsr_nl_linkdown(int dev_idx);
+
+#endif /* __HSR_NETLINK_H */
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index c85e71e0c7ff..9497c6f3276b 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -440,7 +440,6 @@ lowpan_uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh)
default:
pr_debug("ERROR: unknown UDP format\n");
goto err;
- break;
}
pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
@@ -655,7 +654,9 @@ static int lowpan_header_create(struct sk_buff *skb,
head[1] = iphc1;
skb_pull(skb, sizeof(struct ipv6hdr));
+ skb_reset_transport_header(skb);
memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head);
+ skb_reset_network_header(skb);
lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
skb->len);
@@ -738,7 +739,6 @@ static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr)
return -ENOMEM;
skb_push(new, sizeof(struct ipv6hdr));
- skb_reset_network_header(new);
skb_copy_to_linear_data(new, hdr, sizeof(struct ipv6hdr));
new->protocol = htons(ETH_P_IPV6);
@@ -785,7 +785,6 @@ lowpan_alloc_new_frame(struct sk_buff *skb, u16 len, u16 tag)
goto skb_err;
frame->skb->priority = skb->priority;
- frame->skb->dev = skb->dev;
/* reserve headroom for uncompressed ipv6 header */
skb_reserve(frame->skb, sizeof(struct ipv6hdr));
@@ -1061,7 +1060,6 @@ lowpan_process_data(struct sk_buff *skb)
skb = new;
skb_push(skb, sizeof(struct udphdr));
- skb_reset_transport_header(skb);
skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr));
lowpan_raw_dump_table(__func__, "raw UDP header dump",
@@ -1104,50 +1102,40 @@ static int lowpan_set_address(struct net_device *dev, void *p)
return 0;
}
-static int lowpan_get_mac_header_length(struct sk_buff *skb)
-{
- /*
- * Currently long addressing mode is supported only, so the overall
- * header size is 21:
- * FC SeqNum DPAN DA SA Sec
- * 2 + 1 + 2 + 8 + 8 + 0 = 21
- */
- return 21;
-}
-
static int
lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
int mlen, int plen, int offset, int type)
{
struct sk_buff *frag;
- int hlen, ret;
+ int hlen;
hlen = (type == LOWPAN_DISPATCH_FRAG1) ?
LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE;
lowpan_raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
- frag = dev_alloc_skb(hlen + mlen + plen + IEEE802154_MFR_SIZE);
+ frag = netdev_alloc_skb(skb->dev,
+ hlen + mlen + plen + IEEE802154_MFR_SIZE);
if (!frag)
return -ENOMEM;
frag->priority = skb->priority;
- frag->dev = skb->dev;
/* copy header, MFR and payload */
- memcpy(skb_put(frag, mlen), skb->data, mlen);
- memcpy(skb_put(frag, hlen), head, hlen);
+ skb_put(frag, mlen);
+ skb_copy_to_linear_data(frag, skb_mac_header(skb), mlen);
+
+ skb_put(frag, hlen);
+ skb_copy_to_linear_data_offset(frag, mlen, head, hlen);
- if (plen)
- skb_copy_from_linear_data_offset(skb, offset + mlen,
- skb_put(frag, plen), plen);
+ skb_put(frag, plen);
+ skb_copy_to_linear_data_offset(frag, mlen + hlen,
+ skb_network_header(skb) + offset, plen);
lowpan_raw_dump_table(__func__, " raw fragment dump", frag->data,
frag->len);
- ret = dev_queue_xmit(frag);
-
- return ret;
+ return dev_queue_xmit(frag);
}
static int
@@ -1156,7 +1144,7 @@ lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev)
int err, header_length, payload_length, tag, offset = 0;
u8 head[5];
- header_length = lowpan_get_mac_header_length(skb);
+ header_length = skb->mac_len;
payload_length = skb->len - header_length;
tag = lowpan_dev_info(dev)->fragment_tag++;
@@ -1181,7 +1169,7 @@ lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev)
head[0] &= ~LOWPAN_DISPATCH_FRAG1;
head[0] |= LOWPAN_DISPATCH_FRAGN;
- while ((payload_length - offset > 0) && (err >= 0)) {
+ while (payload_length - offset > 0) {
int len = LOWPAN_FRAG_SIZE;
head[4] = offset / 8;
@@ -1327,8 +1315,6 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
/* Pull off the 1-byte of 6lowpan header. */
skb_pull(local_skb, 1);
- skb_reset_network_header(local_skb);
- skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
lowpan_give_skb_to_devices(local_skb);
@@ -1372,6 +1358,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev)
return -ENODEV;
+ if (real_dev->type != ARPHRD_IEEE802154)
+ return -EINVAL;
lowpan_dev_info(dev)->real_dev = real_dev;
lowpan_dev_info(dev)->fragment_tag = 0;
@@ -1386,6 +1374,9 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
entry->ldev = dev;
+ /* Set the lowpan harware address to the wpan hardware address. */
+ memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
+
mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
INIT_LIST_HEAD(&entry->list);
list_add_tail(&entry->list, &lowpan_devices);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index cfeb85cff4f0..09d78d4a3cff 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -245,29 +245,6 @@ out:
}
EXPORT_SYMBOL(inet_listen);
-u32 inet_ehash_secret __read_mostly;
-EXPORT_SYMBOL(inet_ehash_secret);
-
-u32 ipv6_hash_secret __read_mostly;
-EXPORT_SYMBOL(ipv6_hash_secret);
-
-/*
- * inet_ehash_secret must be set exactly once, and to a non nul value
- * ipv6_hash_secret must be set exactly once.
- */
-void build_ehash_secret(void)
-{
- u32 rnd;
-
- do {
- get_random_bytes(&rnd, sizeof(rnd));
- } while (rnd == 0);
-
- if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
- get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
-}
-EXPORT_SYMBOL(build_ehash_secret);
-
/*
* Create an inet socket.
*/
@@ -284,10 +261,6 @@ static int inet_create(struct net *net, struct socket *sock, int protocol,
int try_loading_module = 0;
int err;
- if (unlikely(!inet_ehash_secret))
- if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
- build_ehash_secret();
-
sock->state = SS_UNCONNECTED;
/* Look for the requested type/protocol pair. */
@@ -1254,36 +1227,36 @@ static int inet_gso_send_check(struct sk_buff *skb)
if (ihl < sizeof(*iph))
goto out;
+ proto = iph->protocol;
+
+ /* Warning: after this point, iph might be no longer valid */
if (unlikely(!pskb_may_pull(skb, ihl)))
goto out;
-
__skb_pull(skb, ihl);
+
skb_reset_transport_header(skb);
- iph = ip_hdr(skb);
- proto = iph->protocol;
err = -EPROTONOSUPPORT;
- rcu_read_lock();
ops = rcu_dereference(inet_offloads[proto]);
if (likely(ops && ops->callbacks.gso_send_check))
err = ops->callbacks.gso_send_check(skb);
- rcu_read_unlock();
out:
return err;
}
static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
- netdev_features_t features)
+ netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
const struct net_offload *ops;
+ unsigned int offset = 0;
+ bool udpfrag, encap;
struct iphdr *iph;
int proto;
+ int nhoff;
int ihl;
int id;
- unsigned int offset = 0;
- bool tunnel;
if (unlikely(skb_shinfo(skb)->gso_type &
~(SKB_GSO_TCPV4 |
@@ -1291,12 +1264,16 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
SKB_GSO_DODGY |
SKB_GSO_TCP_ECN |
SKB_GSO_GRE |
+ SKB_GSO_IPIP |
+ SKB_GSO_SIT |
SKB_GSO_TCPV6 |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_MPLS |
0)))
goto out;
+ skb_reset_network_header(skb);
+ nhoff = skb_network_header(skb) - skb_mac_header(skb);
if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
goto out;
@@ -1305,42 +1282,48 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
if (ihl < sizeof(*iph))
goto out;
+ id = ntohs(iph->id);
+ proto = iph->protocol;
+
+ /* Warning: after this point, iph might be no longer valid */
if (unlikely(!pskb_may_pull(skb, ihl)))
goto out;
+ __skb_pull(skb, ihl);
- tunnel = !!skb->encapsulation;
+ encap = SKB_GSO_CB(skb)->encap_level > 0;
+ if (encap)
+ features = skb->dev->hw_enc_features & netif_skb_features(skb);
+ SKB_GSO_CB(skb)->encap_level += ihl;
- __skb_pull(skb, ihl);
skb_reset_transport_header(skb);
- iph = ip_hdr(skb);
- id = ntohs(iph->id);
- proto = iph->protocol;
+
segs = ERR_PTR(-EPROTONOSUPPORT);
- rcu_read_lock();
ops = rcu_dereference(inet_offloads[proto]);
if (likely(ops && ops->callbacks.gso_segment))
segs = ops->callbacks.gso_segment(skb, features);
- rcu_read_unlock();
if (IS_ERR_OR_NULL(segs))
goto out;
+ udpfrag = !!skb->encapsulation && proto == IPPROTO_UDP;
skb = segs;
do {
- iph = ip_hdr(skb);
- if (!tunnel && proto == IPPROTO_UDP) {
+ iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
+ if (udpfrag) {
iph->id = htons(id);
iph->frag_off = htons(offset >> 3);
if (skb->next != NULL)
iph->frag_off |= htons(IP_MF);
- offset += (skb->len - skb->mac_len - iph->ihl * 4);
- } else {
+ offset += skb->len - nhoff - ihl;
+ } else {
iph->id = htons(id++);
}
- iph->tot_len = htons(skb->len - skb->mac_len);
- iph->check = 0;
- iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
+ iph->tot_len = htons(skb->len - nhoff);
+ ip_send_check(iph);
+ if (encap)
+ skb_reset_inner_headers(skb);
+ skb->network_header = (u8 *)iph - skb->head;
} while ((skb = skb->next));
out:
@@ -1546,6 +1529,7 @@ static const struct net_protocol tcp_protocol = {
};
static const struct net_protocol udp_protocol = {
+ .early_demux = udp_v4_early_demux,
.handler = udp_rcv,
.err_handler = udp_err,
.no_policy = 1,
@@ -1646,6 +1630,13 @@ static struct packet_offload ip_packet_offload __read_mostly = {
},
};
+static const struct net_offload ipip_offload = {
+ .callbacks = {
+ .gso_send_check = inet_gso_send_check,
+ .gso_segment = inet_gso_segment,
+ },
+};
+
static int __init ipv4_offload_init(void)
{
/*
@@ -1657,6 +1648,7 @@ static int __init ipv4_offload_init(void)
pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
dev_add_offload(&ip_packet_offload);
+ inet_add_offload(&ipip_offload, IPPROTO_IPIP);
return 0;
}
@@ -1705,8 +1697,6 @@ static int __init inet_init(void)
ip_static_sysctl_init();
#endif
- tcp_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
-
/*
* Add all the base protocols.
*/
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 109ee89f123e..7785b28061ac 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -121,7 +121,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
struct aead_givcrypt_request *req;
struct scatterlist *sg;
struct scatterlist *asg;
- struct esp_data *esp;
struct sk_buff *trailer;
void *tmp;
u8 *iv;
@@ -139,8 +138,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
/* skb is pure payload to encrypt */
- esp = x->data;
- aead = esp->aead;
+ aead = x->data;
alen = crypto_aead_authsize(aead);
tfclen = 0;
@@ -154,8 +152,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
}
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
clen = ALIGN(skb->len + 2 + tfclen, blksize);
- if (esp->padlen)
- clen = ALIGN(clen, esp->padlen);
plen = clen - skb->len - tfclen;
err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
@@ -280,8 +276,7 @@ static int esp_input_done2(struct sk_buff *skb, int err)
{
const struct iphdr *iph;
struct xfrm_state *x = xfrm_input_state(skb);
- struct esp_data *esp = x->data;
- struct crypto_aead *aead = esp->aead;
+ struct crypto_aead *aead = x->data;
int alen = crypto_aead_authsize(aead);
int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
int elen = skb->len - hlen;
@@ -376,8 +371,7 @@ static void esp_input_done(struct crypto_async_request *base, int err)
static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
{
struct ip_esp_hdr *esph;
- struct esp_data *esp = x->data;
- struct crypto_aead *aead = esp->aead;
+ struct crypto_aead *aead = x->data;
struct aead_request *req;
struct sk_buff *trailer;
int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
@@ -459,9 +453,8 @@ out:
static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
{
- struct esp_data *esp = x->data;
- u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
- u32 align = max_t(u32, blksize, esp->padlen);
+ struct crypto_aead *aead = x->data;
+ u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
unsigned int net_adj;
switch (x->props.mode) {
@@ -476,8 +469,8 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
BUG();
}
- return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
- net_adj) & ~(align - 1)) + net_adj - 2;
+ return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
+ net_adj) & ~(blksize - 1)) + net_adj - 2;
}
static void esp4_err(struct sk_buff *skb, u32 info)
@@ -511,18 +504,16 @@ static void esp4_err(struct sk_buff *skb, u32 info)
static void esp_destroy(struct xfrm_state *x)
{
- struct esp_data *esp = x->data;
+ struct crypto_aead *aead = x->data;
- if (!esp)
+ if (!aead)
return;
- crypto_free_aead(esp->aead);
- kfree(esp);
+ crypto_free_aead(aead);
}
static int esp_init_aead(struct xfrm_state *x)
{
- struct esp_data *esp = x->data;
struct crypto_aead *aead;
int err;
@@ -531,7 +522,7 @@ static int esp_init_aead(struct xfrm_state *x)
if (IS_ERR(aead))
goto error;
- esp->aead = aead;
+ x->data = aead;
err = crypto_aead_setkey(aead, x->aead->alg_key,
(x->aead->alg_key_len + 7) / 8);
@@ -548,7 +539,6 @@ error:
static int esp_init_authenc(struct xfrm_state *x)
{
- struct esp_data *esp = x->data;
struct crypto_aead *aead;
struct crypto_authenc_key_param *param;
struct rtattr *rta;
@@ -583,7 +573,7 @@ static int esp_init_authenc(struct xfrm_state *x)
if (IS_ERR(aead))
goto error;
- esp->aead = aead;
+ x->data = aead;
keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
(x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
@@ -638,16 +628,11 @@ error:
static int esp_init_state(struct xfrm_state *x)
{
- struct esp_data *esp;
struct crypto_aead *aead;
u32 align;
int err;
- esp = kzalloc(sizeof(*esp), GFP_KERNEL);
- if (esp == NULL)
- return -ENOMEM;
-
- x->data = esp;
+ x->data = NULL;
if (x->aead)
err = esp_init_aead(x);
@@ -657,9 +642,7 @@ static int esp_init_state(struct xfrm_state *x)
if (err)
goto error;
- aead = esp->aead;
-
- esp->padlen = 0;
+ aead = x->data;
x->props.header_len = sizeof(struct ip_esp_hdr) +
crypto_aead_ivsize(aead);
@@ -683,9 +666,7 @@ static int esp_init_state(struct xfrm_state *x)
}
align = ALIGN(crypto_aead_blocksize(aead), 4);
- if (esp->padlen)
- align = max_t(u32, align, esp->padlen);
- x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
+ x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
error:
return err;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index b3f627ac4ed8..d846304b7b89 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -933,7 +933,6 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
local_bh_disable();
frn->tb_id = tb->tb_id;
- rcu_read_lock();
frn->err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
if (!frn->err) {
@@ -942,7 +941,6 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
frn->type = res.type;
frn->scope = res.scope;
}
- rcu_read_unlock();
local_bh_enable();
}
}
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index af0f14aba169..388d113fd289 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -24,21 +24,17 @@ static inline void fib_alias_accessed(struct fib_alias *fa)
}
/* Exported by fib_semantics.c */
-extern void fib_release_info(struct fib_info *);
-extern struct fib_info *fib_create_info(struct fib_config *cfg);
-extern int fib_nh_match(struct fib_config *cfg, struct fib_info *fi);
-extern int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
- u32 tb_id, u8 type, __be32 dst,
- int dst_len, u8 tos, struct fib_info *fi,
- unsigned int);
-extern void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
- int dst_len, u32 tb_id, struct nl_info *info,
- unsigned int nlm_flags);
-extern struct fib_alias *fib_find_alias(struct list_head *fah,
- u8 tos, u32 prio);
-extern int fib_detect_death(struct fib_info *fi, int order,
- struct fib_info **last_resort,
- int *last_idx, int dflt);
+void fib_release_info(struct fib_info *);
+struct fib_info *fib_create_info(struct fib_config *cfg);
+int fib_nh_match(struct fib_config *cfg, struct fib_info *fi);
+int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, u32 tb_id,
+ u8 type, __be32 dst, int dst_len, u8 tos, struct fib_info *fi,
+ unsigned int);
+void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, int dst_len,
+ u32 tb_id, const struct nl_info *info, unsigned int nlm_flags);
+struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio);
+int fib_detect_death(struct fib_info *fi, int order,
+ struct fib_info **last_resort, int *last_idx, int dflt);
static inline void fib_result_assign(struct fib_result *res,
struct fib_info *fi)
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index d5dbca5ecf62..e63f47a4e651 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -380,7 +380,7 @@ static inline size_t fib_nlmsg_size(struct fib_info *fi)
}
void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
- int dst_len, u32 tb_id, struct nl_info *info,
+ int dst_len, u32 tb_id, const struct nl_info *info,
unsigned int nlm_flags)
{
struct sk_buff *skb;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 3df6d3edb2a1..ec9a9ef4ce50 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -762,12 +762,9 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
if (IS_LEAF(node) || ((struct tnode *) node)->pos >
tn->pos + tn->bits - 1) {
- if (tkey_extract_bits(node->key,
- oldtnode->pos + oldtnode->bits,
- 1) == 0)
- put_child(tn, 2*i, node);
- else
- put_child(tn, 2*i+1, node);
+ put_child(tn,
+ tkey_extract_bits(node->key, oldtnode->pos, oldtnode->bits + 1),
+ node);
continue;
}
@@ -1120,12 +1117,8 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
* first tnode need some special handling
*/
- if (tp)
- pos = tp->pos+tp->bits;
- else
- pos = 0;
-
if (n) {
+ pos = tp ? tp->pos+tp->bits : 0;
newpos = tkey_mismatch(key, pos, n->key);
tn = tnode_new(n->key, newpos, 1);
} else {
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 736c9fc3ef93..5893e99e8299 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -93,35 +93,6 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
}
EXPORT_SYMBOL_GPL(gre_build_header);
-struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum)
-{
- int err;
-
- if (likely(!skb->encapsulation)) {
- skb_reset_inner_headers(skb);
- skb->encapsulation = 1;
- }
-
- if (skb_is_gso(skb)) {
- err = skb_unclone(skb, GFP_ATOMIC);
- if (unlikely(err))
- goto error;
- skb_shinfo(skb)->gso_type |= SKB_GSO_GRE;
- return skb;
- } else if (skb->ip_summed == CHECKSUM_PARTIAL && gre_csum) {
- err = skb_checksum_help(skb);
- if (unlikely(err))
- goto error;
- } else if (skb->ip_summed != CHECKSUM_PARTIAL)
- skb->ip_summed = CHECKSUM_NONE;
-
- return skb;
-error:
- kfree_skb(skb);
- return ERR_PTR(err);
-}
-EXPORT_SYMBOL_GPL(gre_handle_offloads);
-
static __sum16 check_checksum(struct sk_buff *skb)
{
__sum16 csum = 0;
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 55e6bfb3a289..e5d436188464 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -39,7 +39,8 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
SKB_GSO_UDP |
SKB_GSO_DODGY |
SKB_GSO_TCP_ECN |
- SKB_GSO_GRE)))
+ SKB_GSO_GRE |
+ SKB_GSO_IPIP)))
goto out;
if (unlikely(!pskb_may_pull(skb, sizeof(*greh))))
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 5f7d11a45871..5c0e8bc6e5ba 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -353,6 +353,9 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
saddr = fib_compute_spec_dst(skb);
ipc.opt = NULL;
ipc.tx_flags = 0;
+ ipc.ttl = 0;
+ ipc.tos = -1;
+
if (icmp_param->replyopts.opt.opt.optlen) {
ipc.opt = &icmp_param->replyopts.opt;
if (ipc.opt->opt.srr)
@@ -608,6 +611,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
ipc.addr = iph->saddr;
ipc.opt = &icmp_param->replyopts.opt;
ipc.tx_flags = 0;
+ ipc.ttl = 0;
+ ipc.tos = -1;
rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos,
type, code, icmp_param);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 6acb541c9091..fc0e649cc002 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -29,27 +29,19 @@ const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
EXPORT_SYMBOL(inet_csk_timer_bug_msg);
#endif
-/*
- * This struct holds the first and last local port number.
- */
-struct local_ports sysctl_local_ports __read_mostly = {
- .lock = __SEQLOCK_UNLOCKED(sysctl_local_ports.lock),
- .range = { 32768, 61000 },
-};
-
unsigned long *sysctl_local_reserved_ports;
EXPORT_SYMBOL(sysctl_local_reserved_ports);
-void inet_get_local_port_range(int *low, int *high)
+void inet_get_local_port_range(struct net *net, int *low, int *high)
{
unsigned int seq;
do {
- seq = read_seqbegin(&sysctl_local_ports.lock);
+ seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
- *low = sysctl_local_ports.range[0];
- *high = sysctl_local_ports.range[1];
- } while (read_seqretry(&sysctl_local_ports.lock, seq));
+ *low = net->ipv4.sysctl_local_ports.range[0];
+ *high = net->ipv4.sysctl_local_ports.range[1];
+ } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
}
EXPORT_SYMBOL(inet_get_local_port_range);
@@ -79,17 +71,16 @@ int inet_csk_bind_conflict(const struct sock *sk,
(!reuseport || !sk2->sk_reuseport ||
(sk2->sk_state != TCP_TIME_WAIT &&
!uid_eq(uid, sock_i_uid(sk2))))) {
- const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
- if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
- sk2_rcv_saddr == sk_rcv_saddr(sk))
+
+ if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
+ sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
break;
}
if (!relax && reuse && sk2->sk_reuse &&
sk2->sk_state != TCP_LISTEN) {
- const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
- if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
- sk2_rcv_saddr == sk_rcv_saddr(sk))
+ if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
+ sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
break;
}
}
@@ -116,7 +107,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
int remaining, rover, low, high;
again:
- inet_get_local_port_range(&low, &high);
+ inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
smallest_rover = rover = net_random() % remaining + low;
@@ -421,8 +412,8 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
sk->sk_protocol,
flags,
- (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
- ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
+ (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
+ ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport);
security_req_classify_flow(req, flowi4_to_flowi(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
@@ -457,8 +448,8 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
sk->sk_protocol, inet_sk_flowi_flags(sk),
- (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
- ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
+ (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
+ ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport);
security_req_classify_flow(req, flowi4_to_flowi(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
@@ -504,9 +495,9 @@ struct request_sock *inet_csk_search_req(const struct sock *sk,
prev = &req->dl_next) {
const struct inet_request_sock *ireq = inet_rsk(req);
- if (ireq->rmt_port == rport &&
- ireq->rmt_addr == raddr &&
- ireq->loc_addr == laddr &&
+ if (ireq->ir_rmt_port == rport &&
+ ireq->ir_rmt_addr == raddr &&
+ ireq->ir_loc_addr == laddr &&
AF_INET_FAMILY(req->rsk_ops->family)) {
WARN_ON(req->sk);
*prevp = prev;
@@ -523,7 +514,8 @@ void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
- const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
+ const u32 h = inet_synq_hash(inet_rsk(req)->ir_rmt_addr,
+ inet_rsk(req)->ir_rmt_port,
lopt->hash_rnd, lopt->nr_table_entries);
reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
@@ -683,9 +675,9 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
newsk->sk_state = TCP_SYN_RECV;
newicsk->icsk_bind_hash = NULL;
- inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port;
- inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port);
- inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port;
+ inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
+ inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
+ inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
newsk->sk_write_space = sk_stream_write_space;
newicsk->icsk_retransmits = 0;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 5f648751fce2..56a964a553d2 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -121,13 +121,13 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
#if IS_ENABLED(CONFIG_IPV6)
if (r->idiag_family == AF_INET6) {
- const struct ipv6_pinfo *np = inet6_sk(sk);
- *(struct in6_addr *)r->id.idiag_src = np->rcv_saddr;
- *(struct in6_addr *)r->id.idiag_dst = np->daddr;
+ *(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr;
+ *(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr;
if (ext & (1 << (INET_DIAG_TCLASS - 1)))
- if (nla_put_u8(skb, INET_DIAG_TCLASS, np->tclass) < 0)
+ if (nla_put_u8(skb, INET_DIAG_TCLASS,
+ inet6_sk(sk)->tclass) < 0)
goto errout;
}
#endif
@@ -222,7 +222,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
u32 portid, u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh)
{
- long tmo;
+ s32 tmo;
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
@@ -234,7 +234,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
r = nlmsg_data(nlh);
BUG_ON(tw->tw_state != TCP_TIME_WAIT);
- tmo = tw->tw_ttd - jiffies;
+ tmo = tw->tw_ttd - inet_tw_time_stamp();
if (tmo < 0)
tmo = 0;
@@ -248,18 +248,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
r->id.idiag_dst[0] = tw->tw_daddr;
r->idiag_state = tw->tw_substate;
r->idiag_timer = 3;
- r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ);
+ r->idiag_expires = jiffies_to_msecs(tmo);
r->idiag_rqueue = 0;
r->idiag_wqueue = 0;
r->idiag_uid = 0;
r->idiag_inode = 0;
#if IS_ENABLED(CONFIG_IPV6)
if (tw->tw_family == AF_INET6) {
- const struct inet6_timewait_sock *tw6 =
- inet6_twsk((struct sock *)tw);
-
- *(struct in6_addr *)r->id.idiag_src = tw6->tw_v6_rcv_saddr;
- *(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr;
+ *(struct in6_addr *)r->id.idiag_src = tw->tw_v6_rcv_saddr;
+ *(struct in6_addr *)r->id.idiag_dst = tw->tw_v6_daddr;
}
#endif
@@ -273,10 +270,11 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
const struct nlmsghdr *unlh)
{
if (sk->sk_state == TCP_TIME_WAIT)
- return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
- skb, r, portid, seq, nlmsg_flags,
- unlh);
- return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq, nlmsg_flags, unlh);
+ return inet_twsk_diag_fill(inet_twsk(sk), skb, r, portid, seq,
+ nlmsg_flags, unlh);
+
+ return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq,
+ nlmsg_flags, unlh);
}
int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
@@ -338,12 +336,9 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
err = 0;
out:
- if (sk) {
- if (sk->sk_state == TCP_TIME_WAIT)
- inet_twsk_put((struct inet_timewait_sock *)sk);
- else
- sock_put(sk);
- }
+ if (sk)
+ sock_gen_put(sk);
+
out_nosk:
return err;
}
@@ -489,10 +484,9 @@ int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
entry.family = sk->sk_family;
#if IS_ENABLED(CONFIG_IPV6)
if (entry.family == AF_INET6) {
- struct ipv6_pinfo *np = inet6_sk(sk);
- entry.saddr = np->rcv_saddr.s6_addr32;
- entry.daddr = np->daddr.s6_addr32;
+ entry.saddr = sk->sk_v6_rcv_saddr.s6_addr32;
+ entry.daddr = sk->sk_v6_daddr.s6_addr32;
} else
#endif
{
@@ -635,22 +629,22 @@ static int inet_csk_diag_dump(struct sock *sk,
cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
}
-static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
+static int inet_twsk_diag_dump(struct sock *sk,
struct sk_buff *skb,
struct netlink_callback *cb,
struct inet_diag_req_v2 *r,
const struct nlattr *bc)
{
+ struct inet_timewait_sock *tw = inet_twsk(sk);
+
if (bc != NULL) {
struct inet_diag_entry entry;
entry.family = tw->tw_family;
#if IS_ENABLED(CONFIG_IPV6)
if (tw->tw_family == AF_INET6) {
- struct inet6_timewait_sock *tw6 =
- inet6_twsk((struct sock *)tw);
- entry.saddr = tw6->tw_v6_rcv_saddr.s6_addr32;
- entry.daddr = tw6->tw_v6_daddr.s6_addr32;
+ entry.saddr = tw->tw_v6_rcv_saddr.s6_addr32;
+ entry.daddr = tw->tw_v6_daddr.s6_addr32;
} else
#endif
{
@@ -682,12 +676,12 @@ static inline void inet_diag_req_addrs(const struct sock *sk,
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6) {
if (req->rsk_ops->family == AF_INET6) {
- entry->saddr = inet6_rsk(req)->loc_addr.s6_addr32;
- entry->daddr = inet6_rsk(req)->rmt_addr.s6_addr32;
+ entry->saddr = ireq->ir_v6_loc_addr.s6_addr32;
+ entry->daddr = ireq->ir_v6_rmt_addr.s6_addr32;
} else if (req->rsk_ops->family == AF_INET) {
- ipv6_addr_set_v4mapped(ireq->loc_addr,
+ ipv6_addr_set_v4mapped(ireq->ir_loc_addr,
&entry->saddr_storage);
- ipv6_addr_set_v4mapped(ireq->rmt_addr,
+ ipv6_addr_set_v4mapped(ireq->ir_rmt_addr,
&entry->daddr_storage);
entry->saddr = entry->saddr_storage.s6_addr32;
entry->daddr = entry->daddr_storage.s6_addr32;
@@ -695,8 +689,8 @@ static inline void inet_diag_req_addrs(const struct sock *sk,
} else
#endif
{
- entry->saddr = &ireq->loc_addr;
- entry->daddr = &ireq->rmt_addr;
+ entry->saddr = &ireq->ir_loc_addr;
+ entry->daddr = &ireq->ir_rmt_addr;
}
}
@@ -731,9 +725,9 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
tmo = 0;
r->id.idiag_sport = inet->inet_sport;
- r->id.idiag_dport = ireq->rmt_port;
- r->id.idiag_src[0] = ireq->loc_addr;
- r->id.idiag_dst[0] = ireq->rmt_addr;
+ r->id.idiag_dport = ireq->ir_rmt_port;
+ r->id.idiag_src[0] = ireq->ir_loc_addr;
+ r->id.idiag_dst[0] = ireq->ir_rmt_addr;
r->idiag_expires = jiffies_to_msecs(tmo);
r->idiag_rqueue = 0;
r->idiag_wqueue = 0;
@@ -792,13 +786,13 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
if (reqnum < s_reqnum)
continue;
- if (r->id.idiag_dport != ireq->rmt_port &&
+ if (r->id.idiag_dport != ireq->ir_rmt_port &&
r->id.idiag_dport)
continue;
if (bc) {
inet_diag_req_addrs(sk, req, &entry);
- entry.dport = ntohs(ireq->rmt_port);
+ entry.dport = ntohs(ireq->ir_rmt_port);
if (!inet_diag_bc_run(bc, &entry))
continue;
@@ -911,8 +905,7 @@ skip_listen_ht:
num = 0;
- if (hlist_nulls_empty(&head->chain) &&
- hlist_nulls_empty(&head->twchain))
+ if (hlist_nulls_empty(&head->chain))
continue;
if (i > s_i)
@@ -920,7 +913,7 @@ skip_listen_ht:
spin_lock_bh(lock);
sk_nulls_for_each(sk, node, &head->chain) {
- struct inet_sock *inet = inet_sk(sk);
+ int res;
if (!net_eq(sock_net(sk), net))
continue;
@@ -929,15 +922,19 @@ skip_listen_ht:
if (!(r->idiag_states & (1 << sk->sk_state)))
goto next_normal;
if (r->sdiag_family != AF_UNSPEC &&
- sk->sk_family != r->sdiag_family)
+ sk->sk_family != r->sdiag_family)
goto next_normal;
- if (r->id.idiag_sport != inet->inet_sport &&
+ if (r->id.idiag_sport != htons(sk->sk_num) &&
r->id.idiag_sport)
goto next_normal;
- if (r->id.idiag_dport != inet->inet_dport &&
+ if (r->id.idiag_dport != sk->sk_dport &&
r->id.idiag_dport)
goto next_normal;
- if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
+ if (sk->sk_state == TCP_TIME_WAIT)
+ res = inet_twsk_diag_dump(sk, skb, cb, r, bc);
+ else
+ res = inet_csk_diag_dump(sk, skb, cb, r, bc);
+ if (res < 0) {
spin_unlock_bh(lock);
goto done;
}
@@ -945,33 +942,6 @@ next_normal:
++num;
}
- if (r->idiag_states & TCPF_TIME_WAIT) {
- struct inet_timewait_sock *tw;
-
- inet_twsk_for_each(tw, node,
- &head->twchain) {
- if (!net_eq(twsk_net(tw), net))
- continue;
-
- if (num < s_num)
- goto next_dying;
- if (r->sdiag_family != AF_UNSPEC &&
- tw->tw_family != r->sdiag_family)
- goto next_dying;
- if (r->id.idiag_sport != tw->tw_sport &&
- r->id.idiag_sport)
- goto next_dying;
- if (r->id.idiag_dport != tw->tw_dport &&
- r->id.idiag_dport)
- goto next_dying;
- if (inet_twsk_diag_dump(tw, skb, cb, r, bc) < 0) {
- spin_unlock_bh(lock);
- goto done;
- }
-next_dying:
- ++num;
- }
- }
spin_unlock_bh(lock);
}
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index c5313a9c019b..bb075fc9a14f 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -93,9 +93,6 @@ void inet_frags_init(struct inet_frags *f)
}
rwlock_init(&f->lock);
- f->rnd = (u32) ((totalram_pages ^ (totalram_pages >> 7)) ^
- (jiffies ^ (jiffies >> 6)));
-
setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
(unsigned long)f);
f->secret_timer.expires = jiffies + f->secret_interval;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 7bd8983dbfcf..8b9cf279450d 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -24,6 +24,31 @@
#include <net/secure_seq.h>
#include <net/ip.h>
+static unsigned int inet_ehashfn(struct net *net, const __be32 laddr,
+ const __u16 lport, const __be32 faddr,
+ const __be16 fport)
+{
+ static u32 inet_ehash_secret __read_mostly;
+
+ net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
+
+ return __inet_ehashfn(laddr, lport, faddr, fport,
+ inet_ehash_secret + net_hash_mix(net));
+}
+
+
+static unsigned int inet_sk_ehashfn(const struct sock *sk)
+{
+ const struct inet_sock *inet = inet_sk(sk);
+ const __be32 laddr = inet->inet_rcv_saddr;
+ const __u16 lport = inet->inet_num;
+ const __be32 faddr = inet->inet_daddr;
+ const __be16 fport = inet->inet_dport;
+ struct net *net = sock_net(sk);
+
+ return inet_ehashfn(net, laddr, lport, faddr, fport);
+}
+
/*
* Allocate and initialize a new local port bind bucket.
* The bindhash mutex for snum's hash chain must be held here.
@@ -230,6 +255,19 @@ begin:
}
EXPORT_SYMBOL_GPL(__inet_lookup_listener);
+/* All sockets share common refcount, but have different destructors */
+void sock_gen_put(struct sock *sk)
+{
+ if (!atomic_dec_and_test(&sk->sk_refcnt))
+ return;
+
+ if (sk->sk_state == TCP_TIME_WAIT)
+ inet_twsk_free(inet_twsk(sk));
+ else
+ sk_free(sk);
+}
+EXPORT_SYMBOL_GPL(sock_gen_put);
+
struct sock *__inet_lookup_established(struct net *net,
struct inet_hashinfo *hashinfo,
const __be32 saddr, const __be16 sport,
@@ -255,13 +293,13 @@ begin:
if (likely(INET_MATCH(sk, net, acookie,
saddr, daddr, ports, dif))) {
if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
- goto begintw;
+ goto out;
if (unlikely(!INET_MATCH(sk, net, acookie,
saddr, daddr, ports, dif))) {
- sock_put(sk);
+ sock_gen_put(sk);
goto begin;
}
- goto out;
+ goto found;
}
}
/*
@@ -271,37 +309,9 @@ begin:
*/
if (get_nulls_value(node) != slot)
goto begin;
-
-begintw:
- /* Must check for a TIME_WAIT'er before going to listener hash. */
- sk_nulls_for_each_rcu(sk, node, &head->twchain) {
- if (sk->sk_hash != hash)
- continue;
- if (likely(INET_TW_MATCH(sk, net, acookie,
- saddr, daddr, ports,
- dif))) {
- if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) {
- sk = NULL;
- goto out;
- }
- if (unlikely(!INET_TW_MATCH(sk, net, acookie,
- saddr, daddr, ports,
- dif))) {
- sock_put(sk);
- goto begintw;
- }
- goto out;
- }
- }
- /*
- * if the nulls value we got at the end of this lookup is
- * not the expected one, we must restart lookup.
- * We probably met an item that was moved to another chain.
- */
- if (get_nulls_value(node) != slot)
- goto begintw;
- sk = NULL;
out:
+ sk = NULL;
+found:
rcu_read_unlock();
return sk;
}
@@ -326,39 +336,29 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
struct sock *sk2;
const struct hlist_nulls_node *node;
- struct inet_timewait_sock *tw;
+ struct inet_timewait_sock *tw = NULL;
int twrefcnt = 0;
spin_lock(lock);
- /* Check TIME-WAIT sockets first. */
- sk_nulls_for_each(sk2, node, &head->twchain) {
- if (sk2->sk_hash != hash)
- continue;
-
- if (likely(INET_TW_MATCH(sk2, net, acookie,
- saddr, daddr, ports, dif))) {
- tw = inet_twsk(sk2);
- if (twsk_unique(sk, sk2, twp))
- goto unique;
- else
- goto not_unique;
- }
- }
- tw = NULL;
-
- /* And established part... */
sk_nulls_for_each(sk2, node, &head->chain) {
if (sk2->sk_hash != hash)
continue;
+
if (likely(INET_MATCH(sk2, net, acookie,
- saddr, daddr, ports, dif)))
+ saddr, daddr, ports, dif))) {
+ if (sk2->sk_state == TCP_TIME_WAIT) {
+ tw = inet_twsk(sk2);
+ if (twsk_unique(sk, sk2, twp))
+ break;
+ }
goto not_unique;
+ }
}
-unique:
/* Must record num and sport now. Otherwise we will see
- * in hash table socket with a funny identity. */
+ * in hash table socket with a funny identity.
+ */
inet->inet_num = lport;
inet->inet_sport = htons(lport);
sk->sk_hash = hash;
@@ -494,7 +494,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
u32 offset = hint + port_offset;
struct inet_timewait_sock *tw = NULL;
- inet_get_local_port_range(&low, &high);
+ inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
local_bh_disable();
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 1f27c9f4afd0..6d592f8555fb 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -87,19 +87,11 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
refcnt += inet_twsk_bind_unhash(tw, hashinfo);
spin_unlock(&bhead->lock);
-#ifdef SOCK_REFCNT_DEBUG
- if (atomic_read(&tw->tw_refcnt) != 1) {
- pr_debug("%s timewait_sock %p refcnt=%d\n",
- tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
- }
-#endif
- while (refcnt) {
- inet_twsk_put(tw);
- refcnt--;
- }
+ BUG_ON(refcnt >= atomic_read(&tw->tw_refcnt));
+ atomic_sub(refcnt, &tw->tw_refcnt);
}
-static noinline void inet_twsk_free(struct inet_timewait_sock *tw)
+void inet_twsk_free(struct inet_timewait_sock *tw)
{
struct module *owner = tw->tw_prot->owner;
twsk_destructor((struct sock *)tw);
@@ -118,6 +110,18 @@ void inet_twsk_put(struct inet_timewait_sock *tw)
}
EXPORT_SYMBOL_GPL(inet_twsk_put);
+static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
+ struct hlist_nulls_head *list)
+{
+ hlist_nulls_add_head_rcu(&tw->tw_node, list);
+}
+
+static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
+ struct hlist_head *list)
+{
+ hlist_add_head(&tw->tw_bind_node, list);
+}
+
/*
* Enter the time wait state. This is called with locally disabled BH.
* Essentially we whip up a timewait bucket, copy the relevant info into it
@@ -146,26 +150,21 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
spin_lock(lock);
/*
- * Step 2: Hash TW into TIMEWAIT chain.
- * Should be done before removing sk from established chain
- * because readers are lockless and search established first.
+ * Step 2: Hash TW into tcp ehash chain.
+ * Notes :
+ * - tw_refcnt is set to 3 because :
+ * - We have one reference from bhash chain.
+ * - We have one reference from ehash chain.
+ * We can use atomic_set() because prior spin_lock()/spin_unlock()
+ * committed into memory all tw fields.
*/
- inet_twsk_add_node_rcu(tw, &ehead->twchain);
+ atomic_set(&tw->tw_refcnt, 1 + 1 + 1);
+ inet_twsk_add_node_rcu(tw, &ehead->chain);
- /* Step 3: Remove SK from established hash. */
+ /* Step 3: Remove SK from hash chain */
if (__sk_nulls_del_node_init_rcu(sk))
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
- /*
- * Notes :
- * - We initially set tw_refcnt to 0 in inet_twsk_alloc()
- * - We add one reference for the bhash link
- * - We add one reference for the ehash link
- * - We want this refcnt update done before allowing other
- * threads to find this tw in ehash chain.
- */
- atomic_add(1 + 1 + 1, &tw->tw_refcnt);
-
spin_unlock(lock);
}
EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
@@ -387,11 +386,11 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw,
if (slot >= INET_TWDR_TWKILL_SLOTS)
slot = INET_TWDR_TWKILL_SLOTS - 1;
}
- tw->tw_ttd = jiffies + timeo;
+ tw->tw_ttd = inet_tw_time_stamp() + timeo;
slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
list = &twdr->cells[slot];
} else {
- tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK);
+ tw->tw_ttd = inet_tw_time_stamp() + (slot << INET_TWDR_RECYCLE_TICK);
if (twdr->twcal_hand < 0) {
twdr->twcal_hand = 0;
@@ -490,7 +489,9 @@ void inet_twsk_purge(struct inet_hashinfo *hashinfo,
restart_rcu:
rcu_read_lock();
restart:
- sk_nulls_for_each_rcu(sk, node, &head->twchain) {
+ sk_nulls_for_each_rcu(sk, node, &head->chain) {
+ if (sk->sk_state != TCP_TIME_WAIT)
+ continue;
tw = inet_twsk(sk);
if ((tw->tw_family != family) ||
atomic_read(&twsk_net(tw)->count))
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index b66910aaef4d..2481993a4970 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -106,6 +106,7 @@ struct ip4_create_arg {
static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
{
+ net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd));
return jhash_3words((__force u32)id << 16 | prot,
(__force u32)saddr, (__force u32)daddr,
ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index a04d872c54f9..51be64e18e32 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -772,15 +772,20 @@ static inline int ip_ufo_append_data(struct sock *sk,
/* initialize protocol header pointer */
skb->transport_header = skb->network_header + fragheaderlen;
- skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
- /* specify the length of each IP datagram fragment */
- skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
- skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
__skb_queue_tail(queue, skb);
+ } else if (skb_is_gso(skb)) {
+ goto append;
}
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ /* specify the length of each IP datagram fragment */
+ skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
+append:
return skb_append_datato_frags(sk, skb, getfrag, from,
(length - transhdrlen));
}
@@ -805,7 +810,7 @@ static int __ip_append_data(struct sock *sk,
int copy;
int err;
int offset = 0;
- unsigned int maxfraglen, fragheaderlen;
+ unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
int csummode = CHECKSUM_NONE;
struct rtable *rt = (struct rtable *)cork->dst;
@@ -818,8 +823,10 @@ static int __ip_append_data(struct sock *sk,
fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
+ maxnonfragsize = (inet->pmtudisc >= IP_PMTUDISC_DO) ?
+ mtu : 0xFFFF;
- if (cork->length + length > 0xFFFF - fragheaderlen) {
+ if (cork->length + length > maxnonfragsize - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
mtu-exthdrlen);
return -EMSGSIZE;
@@ -1060,6 +1067,9 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
rt->dst.dev->mtu : dst_mtu(&rt->dst);
cork->dst = &rt->dst;
cork->length = 0;
+ cork->ttl = ipc->ttl;
+ cork->tos = ipc->tos;
+ cork->priority = ipc->priority;
cork->tx_flags = ipc->tx_flags;
return 0;
@@ -1114,7 +1124,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
int mtu;
int len;
int err;
- unsigned int maxfraglen, fragheaderlen, fraggap;
+ unsigned int maxfraglen, fragheaderlen, fraggap, maxnonfragsize;
if (inet->hdrincl)
return -EPERM;
@@ -1138,8 +1148,10 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
+ maxnonfragsize = (inet->pmtudisc >= IP_PMTUDISC_DO) ?
+ mtu : 0xFFFF;
- if (cork->length + size > 0xFFFF - fragheaderlen) {
+ if (cork->length + size > maxnonfragsize - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu);
return -EMSGSIZE;
}
@@ -1311,7 +1323,9 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
if (cork->flags & IPCORK_OPT)
opt = cork->opt;
- if (rt->rt_type == RTN_MULTICAST)
+ if (cork->ttl != 0)
+ ttl = cork->ttl;
+ else if (rt->rt_type == RTN_MULTICAST)
ttl = inet->mc_ttl;
else
ttl = ip_select_ttl(inet, &rt->dst);
@@ -1319,7 +1333,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
iph = ip_hdr(skb);
iph->version = 4;
iph->ihl = 5;
- iph->tos = inet->tos;
+ iph->tos = (cork->tos != -1) ? cork->tos : inet->tos;
iph->frag_off = df;
iph->ttl = ttl;
iph->protocol = sk->sk_protocol;
@@ -1331,7 +1345,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
ip_options_build(skb, opt, cork->addr, rt, 0);
}
- skb->priority = sk->sk_priority;
+ skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
skb->mark = sk->sk_mark;
/*
* Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
@@ -1481,6 +1495,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
ipc.addr = daddr;
ipc.opt = NULL;
ipc.tx_flags = 0;
+ ipc.ttl = 0;
+ ipc.tos = -1;
if (replyopts.opt.opt.optlen) {
ipc.opt = &replyopts.opt;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index d9c4f113d709..0626f2cb192e 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -189,7 +189,7 @@ EXPORT_SYMBOL(ip_cmsg_recv);
int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
{
- int err;
+ int err, val;
struct cmsghdr *cmsg;
for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
@@ -215,6 +215,24 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
ipc->addr = info->ipi_spec_dst.s_addr;
break;
}
+ case IP_TTL:
+ if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
+ return -EINVAL;
+ val = *(int *)CMSG_DATA(cmsg);
+ if (val < 1 || val > 255)
+ return -EINVAL;
+ ipc->ttl = val;
+ break;
+ case IP_TOS:
+ if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
+ return -EINVAL;
+ val = *(int *)CMSG_DATA(cmsg);
+ if (val < 0 || val > 255)
+ return -EINVAL;
+ ipc->tos = val;
+ ipc->priority = rt_tos2priority(ipc->tos);
+ break;
+
default:
return -EINVAL;
}
@@ -1034,11 +1052,12 @@ e_inval:
* destination in skb->cb[] before dst drop.
* This way, receiver doesnt make cache line misses to read rtable.
*/
-void ipv4_pktinfo_prepare(struct sk_buff *skb)
+void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
{
struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
- if (skb_rtable(skb)) {
+ if ((inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) &&
+ skb_rtable(skb)) {
pktinfo->ipi_ifindex = inet_iif(skb);
pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
} else {
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index c31e3ad98ef2..42ffbc8d65c6 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -116,3 +116,36 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
return 0;
}
EXPORT_SYMBOL_GPL(iptunnel_pull_header);
+
+struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
+ bool csum_help,
+ int gso_type_mask)
+{
+ int err;
+
+ if (likely(!skb->encapsulation)) {
+ skb_reset_inner_headers(skb);
+ skb->encapsulation = 1;
+ }
+
+ if (skb_is_gso(skb)) {
+ err = skb_unclone(skb, GFP_ATOMIC);
+ if (unlikely(err))
+ goto error;
+ skb_shinfo(skb)->gso_type |= gso_type_mask;
+ return skb;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) {
+ err = skb_checksum_help(skb);
+ if (unlikely(err))
+ goto error;
+ } else if (skb->ip_summed != CHECKSUM_PARTIAL)
+ skb->ip_summed = CHECKSUM_NONE;
+
+ return skb;
+error:
+ kfree_skb(skb);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index e805e7b3030e..5d9c845d288a 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -49,70 +49,6 @@ static struct rtnl_link_ops vti_link_ops __read_mostly;
static int vti_net_id __read_mostly;
static int vti_tunnel_init(struct net_device *dev);
-static int vti_err(struct sk_buff *skb, u32 info)
-{
-
- /* All the routers (except for Linux) return only
- * 8 bytes of packet payload. It means, that precise relaying of
- * ICMP in the real Internet is absolutely infeasible.
- */
- struct net *net = dev_net(skb->dev);
- struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
- struct iphdr *iph = (struct iphdr *)skb->data;
- const int type = icmp_hdr(skb)->type;
- const int code = icmp_hdr(skb)->code;
- struct ip_tunnel *t;
- int err;
-
- switch (type) {
- default:
- case ICMP_PARAMETERPROB:
- return 0;
-
- case ICMP_DEST_UNREACH:
- switch (code) {
- case ICMP_SR_FAILED:
- case ICMP_PORT_UNREACH:
- /* Impossible event. */
- return 0;
- default:
- /* All others are translated to HOST_UNREACH. */
- break;
- }
- break;
- case ICMP_TIME_EXCEEDED:
- if (code != ICMP_EXC_TTL)
- return 0;
- break;
- }
-
- err = -ENOENT;
-
- t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
- iph->daddr, iph->saddr, 0);
- if (t == NULL)
- goto out;
-
- if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
- ipv4_update_pmtu(skb, dev_net(skb->dev), info,
- t->parms.link, 0, IPPROTO_IPIP, 0);
- err = 0;
- goto out;
- }
-
- err = 0;
- if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
- goto out;
-
- if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
- t->err_count++;
- else
- t->err_count = 1;
- t->err_time = jiffies;
-out:
- return err;
-}
-
/* We dont digest the packet therefore let the packet pass */
static int vti_rcv(struct sk_buff *skb)
{
@@ -125,8 +61,17 @@ static int vti_rcv(struct sk_buff *skb)
iph->saddr, iph->daddr, 0);
if (tunnel != NULL) {
struct pcpu_tstats *tstats;
+ u32 oldmark = skb->mark;
+ int ret;
+
- if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+ /* temporarily mark the skb with the tunnel o_key, to
+ * only match policies with this mark.
+ */
+ skb->mark = be32_to_cpu(tunnel->parms.o_key);
+ ret = xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb);
+ skb->mark = oldmark;
+ if (!ret)
return -1;
tstats = this_cpu_ptr(tunnel->dev->tstats);
@@ -135,7 +80,6 @@ static int vti_rcv(struct sk_buff *skb)
tstats->rx_bytes += skb->len;
u64_stats_update_end(&tstats->syncp);
- skb->mark = 0;
secpath_reset(skb);
skb->dev = tunnel->dev;
return 1;
@@ -167,7 +111,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
memset(&fl4, 0, sizeof(fl4));
flowi4_init_output(&fl4, tunnel->parms.link,
- be32_to_cpu(tunnel->parms.i_key), RT_TOS(tos),
+ be32_to_cpu(tunnel->parms.o_key), RT_TOS(tos),
RT_SCOPE_UNIVERSE,
IPPROTO_IPIP, 0,
dst, tiph->saddr, 0, 0);
@@ -296,9 +240,8 @@ static void __net_init vti_fb_tunnel_init(struct net_device *dev)
iph->ihl = 5;
}
-static struct xfrm_tunnel vti_handler __read_mostly = {
+static struct xfrm_tunnel_notifier vti_handler __read_mostly = {
.handler = vti_rcv,
- .err_handler = vti_err,
.priority = 1,
};
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 7f80fb4b82d3..fe3e9f7f1f0b 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -220,17 +220,17 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb->protocol != htons(ETH_P_IP)))
goto tx_error;
- if (likely(!skb->encapsulation)) {
- skb_reset_inner_headers(skb);
- skb->encapsulation = 1;
- }
+ skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP);
+ if (IS_ERR(skb))
+ goto out;
ip_tunnel_xmit(skb, dev, tiph, tiph->protocol);
return NETDEV_TX_OK;
tx_error:
- dev->stats.tx_errors++;
dev_kfree_skb(skb);
+out:
+ dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
@@ -275,6 +275,7 @@ static const struct net_device_ops ipip_netdev_ops = {
#define IPIP_FEATURES (NETIF_F_SG | \
NETIF_F_FRAGLIST | \
NETIF_F_HIGHDMA | \
+ NETIF_F_GSO_SOFTWARE | \
NETIF_F_HW_CSUM)
static void ipip_tunnel_setup(struct net_device *dev)
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 1657e39b291f..40d56073cd19 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -36,6 +36,27 @@ config NF_CONNTRACK_PROC_COMPAT
If unsure, say Y.
+config NF_TABLES_IPV4
+ depends on NF_TABLES
+ tristate "IPv4 nf_tables support"
+
+config NFT_REJECT_IPV4
+ depends on NF_TABLES_IPV4
+ tristate "nf_tables IPv4 reject support"
+
+config NFT_CHAIN_ROUTE_IPV4
+ depends on NF_TABLES_IPV4
+ tristate "IPv4 nf_tables route chain support"
+
+config NFT_CHAIN_NAT_IPV4
+ depends on NF_TABLES_IPV4
+ depends on NF_NAT_IPV4 && NFT_NAT
+ tristate "IPv4 nf_tables nat chain support"
+
+config NF_TABLES_ARP
+ depends on NF_TABLES
+ tristate "ARP nf_tables support"
+
config IP_NF_IPTABLES
tristate "IP tables support (required for filtering/masq/NAT)"
default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 3622b248b6dd..19df72b7ba88 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -27,6 +27,12 @@ obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
# NAT protocols (nf_nat)
obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
+obj-$(CONFIG_NF_TABLES_IPV4) += nf_tables_ipv4.o
+obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
+obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o
+obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o
+obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
+
# generic IP tables
obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 85a4f21aac1a..59da7cde0724 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -271,6 +271,11 @@ unsigned int arpt_do_table(struct sk_buff *skb,
local_bh_disable();
addend = xt_write_recseq_begin();
private = table->private;
+ /*
+ * Ensure we load private-> members after we've fetched the base
+ * pointer.
+ */
+ smp_read_barrier_depends();
table_base = private->entries[smp_processor_id()];
e = get_entry(table_base, private->hook_entry[hook]);
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index a865f6f94013..802ddecb30b8 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -27,13 +27,14 @@ static const struct xt_table packet_filter = {
/* The work comes in here from netfilter.c */
static unsigned int
-arptable_filter_hook(unsigned int hook, struct sk_buff *skb,
+arptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
const struct net *net = dev_net((in != NULL) ? in : out);
- return arpt_do_table(skb, hook, in, out, net->ipv4.arptable_filter);
+ return arpt_do_table(skb, ops->hooknum, in, out,
+ net->ipv4.arptable_filter);
}
static struct nf_hook_ops *arpfilter_ops __read_mostly;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index d23118d95ff9..718dfbd30cbe 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -327,6 +327,11 @@ ipt_do_table(struct sk_buff *skb,
addend = xt_write_recseq_begin();
private = table->private;
cpu = smp_processor_id();
+ /*
+ * Ensure we load private-> members after we've fetched the base
+ * pointer.
+ */
+ smp_read_barrier_depends();
table_base = private->entries[cpu];
jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
stackptr = per_cpu_ptr(private->stackptr, cpu);
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 0b732efd32e2..a2e2b61cd7da 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -483,7 +483,7 @@ static void arp_print(struct arp_payload *payload)
#endif
static unsigned int
-arp_mangle(unsigned int hook,
+arp_mangle(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index b6346bf2fde3..01cffeaa0085 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -297,7 +297,7 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
return XT_CONTINUE;
}
-static unsigned int ipv4_synproxy_hook(unsigned int hooknum,
+static unsigned int ipv4_synproxy_hook(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index cbc22158af49..9cb993cd224b 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -220,6 +220,7 @@ static void ipt_ulog_packet(struct net *net,
ub->qlen++;
pm = nlmsg_data(nlh);
+ memset(pm, 0, sizeof(*pm));
/* We might not have a timestamp, get one */
if (skb->tstamp.tv64 == 0)
@@ -238,8 +239,6 @@ static void ipt_ulog_packet(struct net *net,
}
else if (loginfo->prefix[0] != '\0')
strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
- else
- *(pm->prefix) = '\0';
if (in && in->hard_header_len > 0 &&
skb->mac_header != skb->network_header &&
@@ -251,13 +250,9 @@ static void ipt_ulog_packet(struct net *net,
if (in)
strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));
- else
- pm->indev_name[0] = '\0';
if (out)
strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
- else
- pm->outdev_name[0] = '\0';
/* copy_len <= skb->len, so can't fail. */
if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index 50af5b45c050..e08a74a243a8 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -33,20 +33,21 @@ static const struct xt_table packet_filter = {
};
static unsigned int
-iptable_filter_hook(unsigned int hook, struct sk_buff *skb,
+iptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
const struct net *net;
- if (hook == NF_INET_LOCAL_OUT &&
+ if (ops->hooknum == NF_INET_LOCAL_OUT &&
(skb->len < sizeof(struct iphdr) ||
ip_hdrlen(skb) < sizeof(struct iphdr)))
/* root is playing with raw sockets. */
return NF_ACCEPT;
net = dev_net((in != NULL) ? in : out);
- return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_filter);
+ return ipt_do_table(skb, ops->hooknum, in, out,
+ net->ipv4.iptable_filter);
}
static struct nf_hook_ops *filter_ops __read_mostly;
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 0d8cd82e0fad..6a5079c34bb3 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -79,19 +79,19 @@ ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
/* The work comes in here from netfilter.c. */
static unsigned int
-iptable_mangle_hook(unsigned int hook,
+iptable_mangle_hook(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- if (hook == NF_INET_LOCAL_OUT)
+ if (ops->hooknum == NF_INET_LOCAL_OUT)
return ipt_mangle_out(skb, out);
- if (hook == NF_INET_POST_ROUTING)
- return ipt_do_table(skb, hook, in, out,
+ if (ops->hooknum == NF_INET_POST_ROUTING)
+ return ipt_do_table(skb, ops->hooknum, in, out,
dev_net(out)->ipv4.iptable_mangle);
/* PREROUTING/INPUT/FORWARD: */
- return ipt_do_table(skb, hook, in, out,
+ return ipt_do_table(skb, ops->hooknum, in, out,
dev_net(in)->ipv4.iptable_mangle);
}
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index 683bfaffed65..ee2886126e3d 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -61,7 +61,7 @@ static unsigned int nf_nat_rule_find(struct sk_buff *skb, unsigned int hooknum,
}
static unsigned int
-nf_nat_ipv4_fn(unsigned int hooknum,
+nf_nat_ipv4_fn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -71,7 +71,7 @@ nf_nat_ipv4_fn(unsigned int hooknum,
enum ip_conntrack_info ctinfo;
struct nf_conn_nat *nat;
/* maniptype == SRC for postrouting. */
- enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
+ enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
/* We never see fragments: conntrack defrags on pre-routing
* and local-out, and nf_nat_out protects post-routing.
@@ -108,7 +108,7 @@ nf_nat_ipv4_fn(unsigned int hooknum,
case IP_CT_RELATED_REPLY:
if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
- hooknum))
+ ops->hooknum))
return NF_DROP;
else
return NF_ACCEPT;
@@ -121,14 +121,14 @@ nf_nat_ipv4_fn(unsigned int hooknum,
if (!nf_nat_initialized(ct, maniptype)) {
unsigned int ret;
- ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
+ ret = nf_nat_rule_find(skb, ops->hooknum, in, out, ct);
if (ret != NF_ACCEPT)
return ret;
} else {
pr_debug("Already setup manip %s for ct %p\n",
maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
ct);
- if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
+ if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
goto oif_changed;
}
break;
@@ -137,11 +137,11 @@ nf_nat_ipv4_fn(unsigned int hooknum,
/* ESTABLISHED */
NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
ctinfo == IP_CT_ESTABLISHED_REPLY);
- if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
+ if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
goto oif_changed;
}
- return nf_nat_packet(ct, ctinfo, hooknum, skb);
+ return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
oif_changed:
nf_ct_kill_acct(ct, ctinfo, skb);
@@ -149,7 +149,7 @@ oif_changed:
}
static unsigned int
-nf_nat_ipv4_in(unsigned int hooknum,
+nf_nat_ipv4_in(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -158,7 +158,7 @@ nf_nat_ipv4_in(unsigned int hooknum,
unsigned int ret;
__be32 daddr = ip_hdr(skb)->daddr;
- ret = nf_nat_ipv4_fn(hooknum, skb, in, out, okfn);
+ ret = nf_nat_ipv4_fn(ops, skb, in, out, okfn);
if (ret != NF_DROP && ret != NF_STOLEN &&
daddr != ip_hdr(skb)->daddr)
skb_dst_drop(skb);
@@ -167,7 +167,7 @@ nf_nat_ipv4_in(unsigned int hooknum,
}
static unsigned int
-nf_nat_ipv4_out(unsigned int hooknum,
+nf_nat_ipv4_out(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -185,7 +185,7 @@ nf_nat_ipv4_out(unsigned int hooknum,
ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
- ret = nf_nat_ipv4_fn(hooknum, skb, in, out, okfn);
+ ret = nf_nat_ipv4_fn(ops, skb, in, out, okfn);
#ifdef CONFIG_XFRM
if (ret != NF_DROP && ret != NF_STOLEN &&
!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
@@ -207,7 +207,7 @@ nf_nat_ipv4_out(unsigned int hooknum,
}
static unsigned int
-nf_nat_ipv4_local_fn(unsigned int hooknum,
+nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -223,7 +223,7 @@ nf_nat_ipv4_local_fn(unsigned int hooknum,
ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
- ret = nf_nat_ipv4_fn(hooknum, skb, in, out, okfn);
+ ret = nf_nat_ipv4_fn(ops, skb, in, out, okfn);
if (ret != NF_DROP && ret != NF_STOLEN &&
(ct = nf_ct_get(skb, &ctinfo)) != NULL) {
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 1f82aea11df6..b2f7e8f98316 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -20,20 +20,20 @@ static const struct xt_table packet_raw = {
/* The work comes in here from netfilter.c. */
static unsigned int
-iptable_raw_hook(unsigned int hook, struct sk_buff *skb,
+iptable_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
const struct net *net;
- if (hook == NF_INET_LOCAL_OUT &&
+ if (ops->hooknum == NF_INET_LOCAL_OUT &&
(skb->len < sizeof(struct iphdr) ||
ip_hdrlen(skb) < sizeof(struct iphdr)))
/* root is playing with raw sockets. */
return NF_ACCEPT;
net = dev_net((in != NULL) ? in : out);
- return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_raw);
+ return ipt_do_table(skb, ops->hooknum, in, out, net->ipv4.iptable_raw);
}
static struct nf_hook_ops *rawtable_ops __read_mostly;
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index f867a8d38bf7..c86647ed2078 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -37,21 +37,22 @@ static const struct xt_table security_table = {
};
static unsigned int
-iptable_security_hook(unsigned int hook, struct sk_buff *skb,
+iptable_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
const struct net *net;
- if (hook == NF_INET_LOCAL_OUT &&
+ if (ops->hooknum == NF_INET_LOCAL_OUT &&
(skb->len < sizeof(struct iphdr) ||
ip_hdrlen(skb) < sizeof(struct iphdr)))
/* Somebody is playing with raw sockets. */
return NF_ACCEPT;
net = dev_net((in != NULL) ? in : out);
- return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_security);
+ return ipt_do_table(skb, ops->hooknum, in, out,
+ net->ipv4.iptable_security);
}
static struct nf_hook_ops *sectbl_ops __read_mostly;
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 86f5b34a4ed1..ecd8bec411c9 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -92,7 +92,7 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
return NF_ACCEPT;
}
-static unsigned int ipv4_helper(unsigned int hooknum,
+static unsigned int ipv4_helper(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -121,7 +121,7 @@ static unsigned int ipv4_helper(unsigned int hooknum,
ct, ctinfo);
}
-static unsigned int ipv4_confirm(unsigned int hooknum,
+static unsigned int ipv4_confirm(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -147,16 +147,16 @@ out:
return nf_conntrack_confirm(skb);
}
-static unsigned int ipv4_conntrack_in(unsigned int hooknum,
+static unsigned int ipv4_conntrack_in(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- return nf_conntrack_in(dev_net(in), PF_INET, hooknum, skb);
+ return nf_conntrack_in(dev_net(in), PF_INET, ops->hooknum, skb);
}
-static unsigned int ipv4_conntrack_local(unsigned int hooknum,
+static unsigned int ipv4_conntrack_local(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -166,7 +166,7 @@ static unsigned int ipv4_conntrack_local(unsigned int hooknum,
if (skb->len < sizeof(struct iphdr) ||
ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
- return nf_conntrack_in(dev_net(out), PF_INET, hooknum, skb);
+ return nf_conntrack_in(dev_net(out), PF_INET, ops->hooknum, skb);
}
/* Connection tracking may drop packets, but never alters them, so
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 742815518b0f..12e13bd82b5b 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -60,7 +60,7 @@ static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
return IP_DEFRAG_CONNTRACK_OUT + zone;
}
-static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
+static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -83,7 +83,9 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
#endif
/* Gather fragments. */
if (ip_is_fragment(ip_hdr(skb))) {
- enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb);
+ enum ip_defrag_users user =
+ nf_ct_defrag_user(ops->hooknum, skb);
+
if (nf_ct_ipv4_gather_frags(skb, user))
return NF_STOLEN;
}
diff --git a/net/ipv4/netfilter/nf_tables_arp.c b/net/ipv4/netfilter/nf_tables_arp.c
new file mode 100644
index 000000000000..3e67ef1c676f
--- /dev/null
+++ b/net/ipv4/netfilter/nf_tables_arp.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2008-2010 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2013 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netfilter_arp.h>
+#include <net/netfilter/nf_tables.h>
+
+static struct nft_af_info nft_af_arp __read_mostly = {
+ .family = NFPROTO_ARP,
+ .nhooks = NF_ARP_NUMHOOKS,
+ .owner = THIS_MODULE,
+};
+
+static int nf_tables_arp_init_net(struct net *net)
+{
+ net->nft.arp = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+ if (net->nft.arp== NULL)
+ return -ENOMEM;
+
+ memcpy(net->nft.arp, &nft_af_arp, sizeof(nft_af_arp));
+
+ if (nft_register_afinfo(net, net->nft.arp) < 0)
+ goto err;
+
+ return 0;
+err:
+ kfree(net->nft.arp);
+ return -ENOMEM;
+}
+
+static void nf_tables_arp_exit_net(struct net *net)
+{
+ nft_unregister_afinfo(net->nft.arp);
+ kfree(net->nft.arp);
+}
+
+static struct pernet_operations nf_tables_arp_net_ops = {
+ .init = nf_tables_arp_init_net,
+ .exit = nf_tables_arp_exit_net,
+};
+
+static unsigned int
+nft_do_chain_arp(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct nft_pktinfo pkt;
+
+ nft_set_pktinfo(&pkt, ops, skb, in, out);
+
+ return nft_do_chain_pktinfo(&pkt, ops);
+}
+
+static struct nf_chain_type filter_arp = {
+ .family = NFPROTO_ARP,
+ .name = "filter",
+ .type = NFT_CHAIN_T_DEFAULT,
+ .hook_mask = (1 << NF_ARP_IN) |
+ (1 << NF_ARP_OUT) |
+ (1 << NF_ARP_FORWARD),
+ .fn = {
+ [NF_ARP_IN] = nft_do_chain_arp,
+ [NF_ARP_OUT] = nft_do_chain_arp,
+ [NF_ARP_FORWARD] = nft_do_chain_arp,
+ },
+};
+
+static int __init nf_tables_arp_init(void)
+{
+ int ret;
+
+ nft_register_chain_type(&filter_arp);
+ ret = register_pernet_subsys(&nf_tables_arp_net_ops);
+ if (ret < 0)
+ nft_unregister_chain_type(&filter_arp);
+
+ return ret;
+}
+
+static void __exit nf_tables_arp_exit(void)
+{
+ unregister_pernet_subsys(&nf_tables_arp_net_ops);
+ nft_unregister_chain_type(&filter_arp);
+}
+
+module_init(nf_tables_arp_init);
+module_exit(nf_tables_arp_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_FAMILY(3); /* NFPROTO_ARP */
diff --git a/net/ipv4/netfilter/nf_tables_ipv4.c b/net/ipv4/netfilter/nf_tables_ipv4.c
new file mode 100644
index 000000000000..8f7536be1322
--- /dev/null
+++ b/net/ipv4/netfilter/nf_tables_ipv4.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012-2013 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/netfilter_ipv4.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/net_namespace.h>
+#include <net/ip.h>
+#include <net/net_namespace.h>
+#include <net/netfilter/nf_tables_ipv4.h>
+
+static unsigned int nft_ipv4_output(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct nft_pktinfo pkt;
+
+ if (unlikely(skb->len < sizeof(struct iphdr) ||
+ ip_hdr(skb)->ihl < sizeof(struct iphdr) / 4)) {
+ if (net_ratelimit())
+ pr_info("nf_tables_ipv4: ignoring short SOCK_RAW "
+ "packet\n");
+ return NF_ACCEPT;
+ }
+ nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+
+ return nft_do_chain_pktinfo(&pkt, ops);
+}
+
+static struct nft_af_info nft_af_ipv4 __read_mostly = {
+ .family = NFPROTO_IPV4,
+ .nhooks = NF_INET_NUMHOOKS,
+ .owner = THIS_MODULE,
+ .hooks = {
+ [NF_INET_LOCAL_OUT] = nft_ipv4_output,
+ },
+};
+
+static int nf_tables_ipv4_init_net(struct net *net)
+{
+ net->nft.ipv4 = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+ if (net->nft.ipv4 == NULL)
+ return -ENOMEM;
+
+ memcpy(net->nft.ipv4, &nft_af_ipv4, sizeof(nft_af_ipv4));
+
+ if (nft_register_afinfo(net, net->nft.ipv4) < 0)
+ goto err;
+
+ return 0;
+err:
+ kfree(net->nft.ipv4);
+ return -ENOMEM;
+}
+
+static void nf_tables_ipv4_exit_net(struct net *net)
+{
+ nft_unregister_afinfo(net->nft.ipv4);
+ kfree(net->nft.ipv4);
+}
+
+static struct pernet_operations nf_tables_ipv4_net_ops = {
+ .init = nf_tables_ipv4_init_net,
+ .exit = nf_tables_ipv4_exit_net,
+};
+
+static unsigned int
+nft_do_chain_ipv4(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct nft_pktinfo pkt;
+
+ nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+
+ return nft_do_chain_pktinfo(&pkt, ops);
+}
+
+static struct nf_chain_type filter_ipv4 = {
+ .family = NFPROTO_IPV4,
+ .name = "filter",
+ .type = NFT_CHAIN_T_DEFAULT,
+ .hook_mask = (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_FORWARD) |
+ (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_POST_ROUTING),
+ .fn = {
+ [NF_INET_LOCAL_IN] = nft_do_chain_ipv4,
+ [NF_INET_LOCAL_OUT] = nft_ipv4_output,
+ [NF_INET_FORWARD] = nft_do_chain_ipv4,
+ [NF_INET_PRE_ROUTING] = nft_do_chain_ipv4,
+ [NF_INET_POST_ROUTING] = nft_do_chain_ipv4,
+ },
+};
+
+static int __init nf_tables_ipv4_init(void)
+{
+ nft_register_chain_type(&filter_ipv4);
+ return register_pernet_subsys(&nf_tables_ipv4_net_ops);
+}
+
+static void __exit nf_tables_ipv4_exit(void)
+{
+ unregister_pernet_subsys(&nf_tables_ipv4_net_ops);
+ nft_unregister_chain_type(&filter_ipv4);
+}
+
+module_init(nf_tables_ipv4_init);
+module_exit(nf_tables_ipv4_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_FAMILY(AF_INET);
diff --git a/net/ipv4/netfilter/nft_chain_nat_ipv4.c b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
new file mode 100644
index 000000000000..cf2c792cd971
--- /dev/null
+++ b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
+ * Copyright (c) 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv4.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/ip.h>
+
+/*
+ * NAT chains
+ */
+
+static unsigned int nf_nat_fn(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+ struct nf_conn_nat *nat;
+ enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
+ struct nft_pktinfo pkt;
+ unsigned int ret;
+
+ if (ct == NULL || nf_ct_is_untracked(ct))
+ return NF_ACCEPT;
+
+ NF_CT_ASSERT(!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)));
+
+ nat = nfct_nat(ct);
+ if (nat == NULL) {
+ /* Conntrack module was loaded late, can't add extension. */
+ if (nf_ct_is_confirmed(ct))
+ return NF_ACCEPT;
+ nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
+ if (nat == NULL)
+ return NF_ACCEPT;
+ }
+
+ switch (ctinfo) {
+ case IP_CT_RELATED:
+ case IP_CT_RELATED + IP_CT_IS_REPLY:
+ if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
+ if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
+ ops->hooknum))
+ return NF_DROP;
+ else
+ return NF_ACCEPT;
+ }
+ /* Fall through */
+ case IP_CT_NEW:
+ if (nf_nat_initialized(ct, maniptype))
+ break;
+
+ nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+
+ ret = nft_do_chain_pktinfo(&pkt, ops);
+ if (ret != NF_ACCEPT)
+ return ret;
+ if (!nf_nat_initialized(ct, maniptype)) {
+ ret = nf_nat_alloc_null_binding(ct, ops->hooknum);
+ if (ret != NF_ACCEPT)
+ return ret;
+ }
+ default:
+ break;
+ }
+
+ return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
+}
+
+static unsigned int nf_nat_prerouting(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ __be32 daddr = ip_hdr(skb)->daddr;
+ unsigned int ret;
+
+ ret = nf_nat_fn(ops, skb, in, out, okfn);
+ if (ret != NF_DROP && ret != NF_STOLEN &&
+ ip_hdr(skb)->daddr != daddr) {
+ skb_dst_drop(skb);
+ }
+ return ret;
+}
+
+static unsigned int nf_nat_postrouting(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ enum ip_conntrack_info ctinfo __maybe_unused;
+ const struct nf_conn *ct __maybe_unused;
+ unsigned int ret;
+
+ ret = nf_nat_fn(ops, skb, in, out, okfn);
+#ifdef CONFIG_XFRM
+ if (ret != NF_DROP && ret != NF_STOLEN &&
+ (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+ if (ct->tuplehash[dir].tuple.src.u3.ip !=
+ ct->tuplehash[!dir].tuple.dst.u3.ip ||
+ ct->tuplehash[dir].tuple.src.u.all !=
+ ct->tuplehash[!dir].tuple.dst.u.all)
+ return nf_xfrm_me_harder(skb, AF_INET) == 0 ?
+ ret : NF_DROP;
+ }
+#endif
+ return ret;
+}
+
+static unsigned int nf_nat_output(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ enum ip_conntrack_info ctinfo;
+ const struct nf_conn *ct;
+ unsigned int ret;
+
+ ret = nf_nat_fn(ops, skb, in, out, okfn);
+ if (ret != NF_DROP && ret != NF_STOLEN &&
+ (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+ if (ct->tuplehash[dir].tuple.dst.u3.ip !=
+ ct->tuplehash[!dir].tuple.src.u3.ip) {
+ if (ip_route_me_harder(skb, RTN_UNSPEC))
+ ret = NF_DROP;
+ }
+#ifdef CONFIG_XFRM
+ else if (ct->tuplehash[dir].tuple.dst.u.all !=
+ ct->tuplehash[!dir].tuple.src.u.all)
+ if (nf_xfrm_me_harder(skb, AF_INET))
+ ret = NF_DROP;
+#endif
+ }
+ return ret;
+}
+
+static struct nf_chain_type nft_chain_nat_ipv4 = {
+ .family = NFPROTO_IPV4,
+ .name = "nat",
+ .type = NFT_CHAIN_T_NAT,
+ .hook_mask = (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_POST_ROUTING) |
+ (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_LOCAL_IN),
+ .fn = {
+ [NF_INET_PRE_ROUTING] = nf_nat_prerouting,
+ [NF_INET_POST_ROUTING] = nf_nat_postrouting,
+ [NF_INET_LOCAL_OUT] = nf_nat_output,
+ [NF_INET_LOCAL_IN] = nf_nat_fn,
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init nft_chain_nat_init(void)
+{
+ int err;
+
+ err = nft_register_chain_type(&nft_chain_nat_ipv4);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void __exit nft_chain_nat_exit(void)
+{
+ nft_unregister_chain_type(&nft_chain_nat_ipv4);
+}
+
+module_init(nft_chain_nat_init);
+module_exit(nft_chain_nat_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_CHAIN(AF_INET, "nat");
diff --git a/net/ipv4/netfilter/nft_chain_route_ipv4.c b/net/ipv4/netfilter/nft_chain_route_ipv4.c
new file mode 100644
index 000000000000..4e6bf9a3d7aa
--- /dev/null
+++ b/net/ipv4/netfilter/nft_chain_route_ipv4.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv4.h>
+#include <net/route.h>
+#include <net/ip.h>
+
+static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ unsigned int ret;
+ struct nft_pktinfo pkt;
+ u32 mark;
+ __be32 saddr, daddr;
+ u_int8_t tos;
+ const struct iphdr *iph;
+
+ /* root is playing with raw sockets. */
+ if (skb->len < sizeof(struct iphdr) ||
+ ip_hdrlen(skb) < sizeof(struct iphdr))
+ return NF_ACCEPT;
+
+ nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+
+ mark = skb->mark;
+ iph = ip_hdr(skb);
+ saddr = iph->saddr;
+ daddr = iph->daddr;
+ tos = iph->tos;
+
+ ret = nft_do_chain_pktinfo(&pkt, ops);
+ if (ret != NF_DROP && ret != NF_QUEUE) {
+ iph = ip_hdr(skb);
+
+ if (iph->saddr != saddr ||
+ iph->daddr != daddr ||
+ skb->mark != mark ||
+ iph->tos != tos)
+ if (ip_route_me_harder(skb, RTN_UNSPEC))
+ ret = NF_DROP;
+ }
+ return ret;
+}
+
+static struct nf_chain_type nft_chain_route_ipv4 = {
+ .family = NFPROTO_IPV4,
+ .name = "route",
+ .type = NFT_CHAIN_T_ROUTE,
+ .hook_mask = (1 << NF_INET_LOCAL_OUT),
+ .fn = {
+ [NF_INET_LOCAL_OUT] = nf_route_table_hook,
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init nft_chain_route_init(void)
+{
+ return nft_register_chain_type(&nft_chain_route_ipv4);
+}
+
+static void __exit nft_chain_route_exit(void)
+{
+ nft_unregister_chain_type(&nft_chain_route_ipv4);
+}
+
+module_init(nft_chain_route_init);
+module_exit(nft_chain_route_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_CHAIN(AF_INET, "route");
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
new file mode 100644
index 000000000000..fff5ba1a33b7
--- /dev/null
+++ b/net/ipv4/netfilter/nft_reject_ipv4.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/icmp.h>
+
+struct nft_reject {
+ enum nft_reject_types type:8;
+ u8 icmp_code;
+};
+
+static void nft_reject_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_reject *priv = nft_expr_priv(expr);
+
+ switch (priv->type) {
+ case NFT_REJECT_ICMP_UNREACH:
+ icmp_send(pkt->skb, ICMP_DEST_UNREACH, priv->icmp_code, 0);
+ break;
+ case NFT_REJECT_TCP_RST:
+ break;
+ }
+
+ data[NFT_REG_VERDICT].verdict = NF_DROP;
+}
+
+static const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
+ [NFTA_REJECT_TYPE] = { .type = NLA_U32 },
+ [NFTA_REJECT_ICMP_CODE] = { .type = NLA_U8 },
+};
+
+static int nft_reject_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_reject *priv = nft_expr_priv(expr);
+
+ if (tb[NFTA_REJECT_TYPE] == NULL)
+ return -EINVAL;
+
+ priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
+ switch (priv->type) {
+ case NFT_REJECT_ICMP_UNREACH:
+ if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
+ return -EINVAL;
+ priv->icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
+ case NFT_REJECT_TCP_RST:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_reject *priv = nft_expr_priv(expr);
+
+ if (nla_put_be32(skb, NFTA_REJECT_TYPE, priv->type))
+ goto nla_put_failure;
+
+ switch (priv->type) {
+ case NFT_REJECT_ICMP_UNREACH:
+ if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
+ goto nla_put_failure;
+ break;
+ }
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_reject_type;
+static const struct nft_expr_ops nft_reject_ops = {
+ .type = &nft_reject_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
+ .eval = nft_reject_eval,
+ .init = nft_reject_init,
+ .dump = nft_reject_dump,
+};
+
+static struct nft_expr_type nft_reject_type __read_mostly = {
+ .name = "reject",
+ .ops = &nft_reject_ops,
+ .policy = nft_reject_policy,
+ .maxattr = NFTA_REJECT_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_reject_module_init(void)
+{
+ return nft_register_expr(&nft_reject_type);
+}
+
+static void __exit nft_reject_module_exit(void)
+{
+ nft_unregister_expr(&nft_reject_type);
+}
+
+module_init(nft_reject_module_init);
+module_exit(nft_reject_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("reject");
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index d7d9882d4cae..9afbdb19f4a2 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -202,15 +202,14 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
#if IS_ENABLED(CONFIG_IPV6)
} else if (skb->protocol == htons(ETH_P_IPV6) &&
sk->sk_family == AF_INET6) {
- struct ipv6_pinfo *np = inet6_sk(sk);
pr_debug("found: %p: num=%d, daddr=%pI6c, dif=%d\n", sk,
(int) isk->inet_num,
- &inet6_sk(sk)->rcv_saddr,
+ &sk->sk_v6_rcv_saddr,
sk->sk_bound_dev_if);
- if (!ipv6_addr_any(&np->rcv_saddr) &&
- !ipv6_addr_equal(&np->rcv_saddr,
+ if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
+ !ipv6_addr_equal(&sk->sk_v6_rcv_saddr,
&ipv6_hdr(skb)->daddr))
continue;
#endif
@@ -237,11 +236,11 @@ static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
unsigned int seq;
do {
- seq = read_seqbegin(&sysctl_local_ports.lock);
+ seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
*low = data[0];
*high = data[1];
- } while (read_seqretry(&sysctl_local_ports.lock, seq));
+ } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
}
@@ -362,7 +361,7 @@ static void ping_set_saddr(struct sock *sk, struct sockaddr *saddr)
} else if (saddr->sa_family == AF_INET6) {
struct sockaddr_in6 *addr = (struct sockaddr_in6 *) saddr;
struct ipv6_pinfo *np = inet6_sk(sk);
- np->rcv_saddr = np->saddr = addr->sin6_addr;
+ sk->sk_v6_rcv_saddr = np->saddr = addr->sin6_addr;
#endif
}
}
@@ -376,7 +375,7 @@ static void ping_clear_saddr(struct sock *sk, int dif)
#if IS_ENABLED(CONFIG_IPV6)
} else if (sk->sk_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
- memset(&np->rcv_saddr, 0, sizeof(np->rcv_saddr));
+ memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
memset(&np->saddr, 0, sizeof(np->saddr));
#endif
}
@@ -416,10 +415,12 @@ int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
(int)sk->sk_bound_dev_if);
err = 0;
- if ((sk->sk_family == AF_INET && isk->inet_rcv_saddr) ||
- (sk->sk_family == AF_INET6 &&
- !ipv6_addr_any(&inet6_sk(sk)->rcv_saddr)))
+ if (sk->sk_family == AF_INET && isk->inet_rcv_saddr)
sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6 && !ipv6_addr_any(&sk->sk_v6_rcv_saddr))
+ sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
+#endif
if (snum)
sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
@@ -429,7 +430,7 @@ int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6)
- memset(&inet6_sk(sk)->daddr, 0, sizeof(inet6_sk(sk)->daddr));
+ memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr));
#endif
sk_dst_reset(sk);
@@ -713,6 +714,8 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
ipc.opt = NULL;
ipc.oif = sk->sk_bound_dev_if;
ipc.tx_flags = 0;
+ ipc.ttl = 0;
+ ipc.tos = -1;
sock_tx_timestamp(sk, &ipc.tx_flags);
@@ -744,7 +747,7 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
return -EINVAL;
faddr = ipc.opt->opt.faddr;
}
- tos = RT_TOS(inet->tos);
+ tos = get_rttos(&ipc, inet);
if (sock_flag(sk, SOCK_LOCALROUTE) ||
(msg->msg_flags & MSG_DONTROUTE) ||
(ipc.opt && ipc.opt->opt.is_strictroute)) {
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 193db03540ad..41e1d2845c8f 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -299,7 +299,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
/* Charge it to the socket. */
- ipv4_pktinfo_prepare(skb);
+ ipv4_pktinfo_prepare(sk, skb);
if (sock_queue_rcv_skb(sk, skb) < 0) {
kfree_skb(skb);
return NET_RX_DROP;
@@ -519,6 +519,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
ipc.addr = inet->inet_saddr;
ipc.opt = NULL;
ipc.tx_flags = 0;
+ ipc.ttl = 0;
+ ipc.tos = -1;
ipc.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
@@ -558,7 +560,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
daddr = ipc.opt->opt.faddr;
}
}
- tos = RT_CONN_FLAGS(sk);
+ tos = get_rtconn_flags(&ipc, sk);
if (msg->msg_flags & MSG_DONTROUTE)
tos |= RTO_ONLINK;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 727f4365bcdf..d2d325382b13 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -295,7 +295,7 @@ static int rt_cpu_seq_show(struct seq_file *seq, void *v)
seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
" %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
dst_entries_get_slow(&ipv4_dst_ops),
- st->in_hit,
+ 0, /* st->in_hit */
st->in_slow_tot,
st->in_slow_mc,
st->in_no_route,
@@ -303,16 +303,16 @@ static int rt_cpu_seq_show(struct seq_file *seq, void *v)
st->in_martian_dst,
st->in_martian_src,
- st->out_hit,
+ 0, /* st->out_hit */
st->out_slow_tot,
st->out_slow_mc,
- st->gc_total,
- st->gc_ignored,
- st->gc_goal_miss,
- st->gc_dst_overflow,
- st->in_hlist_search,
- st->out_hlist_search
+ 0, /* st->gc_total */
+ 0, /* st->gc_ignored */
+ 0, /* st->gc_goal_miss */
+ 0, /* st->gc_dst_overflow */
+ 0, /* st->in_hlist_search */
+ 0 /* st->out_hlist_search */
);
return 0;
}
@@ -2072,7 +2072,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
RT_SCOPE_LINK);
goto make_route;
}
- if (fl4->saddr) {
+ if (!fl4->saddr) {
if (ipv4_is_multicast(fl4->daddr))
fl4->saddr = inet_select_addr(dev_out, 0,
fl4->flowi4_scope);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 14a15c49129d..b95331e6c077 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -25,15 +25,7 @@
extern int sysctl_tcp_syncookies;
-__u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
-EXPORT_SYMBOL(syncookie_secret);
-
-static __init int init_syncookies(void)
-{
- get_random_bytes(syncookie_secret, sizeof(syncookie_secret));
- return 0;
-}
-__initcall(init_syncookies);
+static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
#define COOKIEBITS 24 /* Upper bits store count */
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
@@ -44,8 +36,11 @@ static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
u32 count, int c)
{
- __u32 *tmp = __get_cpu_var(ipv4_cookie_scratch);
+ __u32 *tmp;
+
+ net_get_random_once(syncookie_secret, sizeof(syncookie_secret));
+ tmp = __get_cpu_var(ipv4_cookie_scratch);
memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c]));
tmp[0] = (__force u32)saddr;
tmp[1] = (__force u32)daddr;
@@ -89,8 +84,7 @@ __u32 cookie_init_timestamp(struct request_sock *req)
static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
- __be16 dport, __u32 sseq, __u32 count,
- __u32 data)
+ __be16 dport, __u32 sseq, __u32 data)
{
/*
* Compute the secure sequence number.
@@ -102,7 +96,7 @@ static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
* As an extra hack, we add a small "data" value that encodes the
* MSS into the second hash value.
*/
-
+ u32 count = tcp_cookie_time();
return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
sseq + (count << COOKIEBITS) +
((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
@@ -114,22 +108,21 @@ static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
* If the syncookie is bad, the data returned will be out of
* range. This must be checked by the caller.
*
- * The count value used to generate the cookie must be within
- * "maxdiff" if the current (passed-in) "count". The return value
- * is (__u32)-1 if this test fails.
+ * The count value used to generate the cookie must be less than
+ * MAX_SYNCOOKIE_AGE minutes in the past.
+ * The return value (__u32)-1 if this test fails.
*/
static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
- __be16 sport, __be16 dport, __u32 sseq,
- __u32 count, __u32 maxdiff)
+ __be16 sport, __be16 dport, __u32 sseq)
{
- __u32 diff;
+ u32 diff, count = tcp_cookie_time();
/* Strip away the layers from the cookie */
cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
/* Cookie is now reduced to (count * 2^24) ^ (hash % 2^24) */
diff = (count - (cookie >> COOKIEBITS)) & ((__u32) - 1 >> COOKIEBITS);
- if (diff >= maxdiff)
+ if (diff >= MAX_SYNCOOKIE_AGE)
return (__u32)-1;
return (cookie -
@@ -138,22 +131,22 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
}
/*
- * MSS Values are taken from the 2009 paper
- * 'Measuring TCP Maximum Segment Size' by S. Alcock and R. Nelson:
- * - values 1440 to 1460 accounted for 80% of observed mss values
- * - values outside the 536-1460 range are rare (<0.2%).
+ * MSS Values are chosen based on the 2011 paper
+ * 'An Analysis of TCP Maximum Segement Sizes' by S. Alcock and R. Nelson.
+ * Values ..
+ * .. lower than 536 are rare (< 0.2%)
+ * .. between 537 and 1299 account for less than < 1.5% of observed values
+ * .. in the 1300-1349 range account for about 15 to 20% of observed mss values
+ * .. exceeding 1460 are very rare (< 0.04%)
*
- * Table must be sorted.
+ * 1460 is the single most frequently announced mss value (30 to 46% depending
+ * on monitor location). Table must be sorted.
*/
static __u16 const msstab[] = {
- 64,
- 512,
536,
- 1024,
- 1440,
+ 1300,
+ 1440, /* 1440, 1452: PPPoE */
1460,
- 4312,
- 8960,
};
/*
@@ -173,7 +166,7 @@ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
th->source, th->dest, ntohl(th->seq),
- jiffies / (HZ * 60), mssind);
+ mssind);
}
EXPORT_SYMBOL_GPL(__cookie_v4_init_sequence);
@@ -189,13 +182,6 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
}
/*
- * This (misnamed) value is the age of syncookie which is permitted.
- * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
- * sysctl_tcp_retries1. It's a rather complicated formula (exponential
- * backoff) to compute at runtime so it's currently hardcoded here.
- */
-#define COUNTER_TRIES 4
-/*
* Check if a ack sequence number is a valid syncookie.
* Return the decoded mss if it is, or 0 if not.
*/
@@ -204,9 +190,7 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
{
__u32 seq = ntohl(th->seq) - 1;
__u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
- th->source, th->dest, seq,
- jiffies / (HZ * 60),
- COUNTER_TRIES);
+ th->source, th->dest, seq);
return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
}
@@ -315,10 +299,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
treq->rcv_isn = ntohl(th->seq) - 1;
treq->snt_isn = cookie;
req->mss = mss;
- ireq->loc_port = th->dest;
- ireq->rmt_port = th->source;
- ireq->loc_addr = ip_hdr(skb)->daddr;
- ireq->rmt_addr = ip_hdr(skb)->saddr;
+ ireq->ir_num = ntohs(th->dest);
+ ireq->ir_rmt_port = th->source;
+ ireq->ir_loc_addr = ip_hdr(skb)->daddr;
+ ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
ireq->ecn_ok = ecn_ok;
ireq->snd_wscale = tcp_opt.snd_wscale;
ireq->sack_ok = tcp_opt.sack_ok;
@@ -358,8 +342,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
inet_sk_flowi_flags(sk),
- (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
- ireq->loc_addr, th->source, th->dest);
+ (opt && opt->srr) ? opt->faddr : ireq->ir_rmt_addr,
+ ireq->ir_loc_addr, th->source, th->dest);
security_req_classify_flow(req, flowi4_to_flowi(&fl4));
rt = ip_route_output_key(sock_net(sk), &fl4);
if (IS_ERR(rt)) {
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 540279f4c531..d5b1390eebbe 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -43,12 +43,12 @@ static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
/* Update system visible IP port range */
-static void set_local_port_range(int range[2])
+static void set_local_port_range(struct net *net, int range[2])
{
- write_seqlock(&sysctl_local_ports.lock);
- sysctl_local_ports.range[0] = range[0];
- sysctl_local_ports.range[1] = range[1];
- write_sequnlock(&sysctl_local_ports.lock);
+ write_seqlock(&net->ipv4.sysctl_local_ports.lock);
+ net->ipv4.sysctl_local_ports.range[0] = range[0];
+ net->ipv4.sysctl_local_ports.range[1] = range[1];
+ write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
}
/* Validate changes from /proc interface. */
@@ -56,6 +56,8 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
+ struct net *net =
+ container_of(table->data, struct net, ipv4.sysctl_local_ports.range);
int ret;
int range[2];
struct ctl_table tmp = {
@@ -66,14 +68,15 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
.extra2 = &ip_local_port_range_max,
};
- inet_get_local_port_range(range, range + 1);
+ inet_get_local_port_range(net, &range[0], &range[1]);
+
ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
if (write && ret == 0) {
if (range[1] < range[0])
ret = -EINVAL;
else
- set_local_port_range(range);
+ set_local_port_range(net, range);
}
return ret;
@@ -83,23 +86,27 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high)
{
kgid_t *data = table->data;
+ struct net *net =
+ container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
unsigned int seq;
do {
- seq = read_seqbegin(&sysctl_local_ports.lock);
+ seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
*low = data[0];
*high = data[1];
- } while (read_seqretry(&sysctl_local_ports.lock, seq));
+ } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
}
/* Update system visible IP port range */
static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t high)
{
kgid_t *data = table->data;
- write_seqlock(&sysctl_local_ports.lock);
+ struct net *net =
+ container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
+ write_seqlock(&net->ipv4.sysctl_local_ports.lock);
data[0] = low;
data[1] = high;
- write_sequnlock(&sysctl_local_ports.lock);
+ write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
}
/* Validate changes from /proc interface. */
@@ -193,49 +200,6 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
return ret;
}
-static int ipv4_tcp_mem(struct ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
-{
- int ret;
- unsigned long vec[3];
- struct net *net = current->nsproxy->net_ns;
-#ifdef CONFIG_MEMCG_KMEM
- struct mem_cgroup *memcg;
-#endif
-
- struct ctl_table tmp = {
- .data = &vec,
- .maxlen = sizeof(vec),
- .mode = ctl->mode,
- };
-
- if (!write) {
- ctl->data = &net->ipv4.sysctl_tcp_mem;
- return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
- }
-
- ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
- if (ret)
- return ret;
-
-#ifdef CONFIG_MEMCG_KMEM
- rcu_read_lock();
- memcg = mem_cgroup_from_task(current);
-
- tcp_prot_mem(memcg, vec[0], 0);
- tcp_prot_mem(memcg, vec[1], 1);
- tcp_prot_mem(memcg, vec[2], 2);
- rcu_read_unlock();
-#endif
-
- net->ipv4.sysctl_tcp_mem[0] = vec[0];
- net->ipv4.sysctl_tcp_mem[1] = vec[1];
- net->ipv4.sysctl_tcp_mem[2] = vec[2];
-
- return 0;
-}
-
static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
@@ -267,6 +231,11 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
ret = -EINVAL;
goto bad_key;
}
+ /* Generate a dummy secret but don't publish it. This
+ * is needed so we don't regenerate a new key on the
+ * first invocation of tcp_fastopen_cookie_gen
+ */
+ tcp_fastopen_init_key_once(false);
tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH);
}
@@ -475,13 +444,6 @@ static struct ctl_table ipv4_table[] = {
.proc_handler = proc_dointvec
},
{
- .procname = "ip_local_port_range",
- .data = &sysctl_local_ports.range,
- .maxlen = sizeof(sysctl_local_ports.range),
- .mode = 0644,
- .proc_handler = ipv4_local_port_range,
- },
- {
.procname = "ip_local_reserved_ports",
.data = NULL, /* initialized in sysctl_ipv4_init */
.maxlen = 65536,
@@ -552,6 +514,13 @@ static struct ctl_table ipv4_table[] = {
.proc_handler = proc_dointvec
},
{
+ .procname = "tcp_mem",
+ .maxlen = sizeof(sysctl_tcp_mem),
+ .data = &sysctl_tcp_mem,
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ },
+ {
.procname = "tcp_wmem",
.data = &sysctl_tcp_wmem,
.maxlen = sizeof(sysctl_tcp_wmem),
@@ -854,10 +823,11 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = proc_dointvec
},
{
- .procname = "tcp_mem",
- .maxlen = sizeof(init_net.ipv4.sysctl_tcp_mem),
+ .procname = "ip_local_port_range",
+ .maxlen = sizeof(init_net.ipv4.sysctl_local_ports.range),
+ .data = &init_net.ipv4.sysctl_local_ports.range,
.mode = 0644,
- .proc_handler = ipv4_tcp_mem,
+ .proc_handler = ipv4_local_port_range,
},
{ }
};
@@ -868,30 +838,15 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
table = ipv4_net_table;
if (!net_eq(net, &init_net)) {
+ int i;
+
table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
if (table == NULL)
goto err_alloc;
- table[0].data =
- &net->ipv4.sysctl_icmp_echo_ignore_all;
- table[1].data =
- &net->ipv4.sysctl_icmp_echo_ignore_broadcasts;
- table[2].data =
- &net->ipv4.sysctl_icmp_ignore_bogus_error_responses;
- table[3].data =
- &net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr;
- table[4].data =
- &net->ipv4.sysctl_icmp_ratelimit;
- table[5].data =
- &net->ipv4.sysctl_icmp_ratemask;
- table[6].data =
- &net->ipv4.sysctl_ping_group_range;
- table[7].data =
- &net->ipv4.sysctl_tcp_ecn;
-
- /* Don't export sysctls to unprivileged users */
- if (net->user_ns != &init_user_ns)
- table[0].procname = NULL;
+ /* Update the variables to point into the current struct net */
+ for (i = 0; i < ARRAY_SIZE(ipv4_net_table) - 1; i++)
+ table[i].data += (void *)net - (void *)&init_net;
}
/*
@@ -901,7 +856,12 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1);
net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0);
- tcp_init_mem(net);
+ /*
+ * Set defaults for local port range
+ */
+ seqlock_init(&net->ipv4.sysctl_local_ports.lock);
+ net->ipv4.sysctl_local_ports.range[0] = 32768;
+ net->ipv4.sysctl_local_ports.range[1] = 61000;
net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
if (net->ipv4.ipv4_hdr == NULL)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6e5617b9f9db..4f328544c075 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -288,9 +288,11 @@ int sysctl_tcp_min_tso_segs __read_mostly = 2;
struct percpu_counter tcp_orphan_count;
EXPORT_SYMBOL_GPL(tcp_orphan_count);
+long sysctl_tcp_mem[3] __read_mostly;
int sysctl_tcp_wmem[3] __read_mostly;
int sysctl_tcp_rmem[3] __read_mostly;
+EXPORT_SYMBOL(sysctl_tcp_mem);
EXPORT_SYMBOL(sysctl_tcp_rmem);
EXPORT_SYMBOL(sysctl_tcp_wmem);
@@ -1429,7 +1431,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
do {
if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
last_issued, &done,
- &used) == DMA_SUCCESS) {
+ &used) == DMA_COMPLETE) {
/* Safe to free early-copied skbs now */
__skb_queue_purge(&sk->sk_async_wait_queue);
break;
@@ -1437,7 +1439,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
struct sk_buff *skb;
while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
(dma_async_is_complete(skb->dma_cookie, done,
- used) == DMA_SUCCESS)) {
+ used) == DMA_COMPLETE)) {
__skb_dequeue(&sk->sk_async_wait_queue);
kfree_skb(skb);
}
@@ -3097,13 +3099,13 @@ static int __init set_thash_entries(char *str)
}
__setup("thash_entries=", set_thash_entries);
-void tcp_init_mem(struct net *net)
+static void tcp_init_mem(void)
{
unsigned long limit = nr_free_buffer_pages() / 8;
limit = max(limit, 128UL);
- net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
- net->ipv4.sysctl_tcp_mem[1] = limit;
- net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
+ sysctl_tcp_mem[0] = limit / 4 * 3;
+ sysctl_tcp_mem[1] = limit;
+ sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
}
void __init tcp_init(void)
@@ -3137,10 +3139,9 @@ void __init tcp_init(void)
&tcp_hashinfo.ehash_mask,
0,
thash_entries ? 0 : 512 * 1024);
- for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) {
+ for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)
INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
- INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i);
- }
+
if (inet_ehash_locks_alloc(&tcp_hashinfo))
panic("TCP: failed to alloc ehash_locks");
tcp_hashinfo.bhash =
@@ -3166,7 +3167,7 @@ void __init tcp_init(void)
sysctl_tcp_max_orphans = cnt / 2;
sysctl_max_syn_backlog = max(128, cnt / 256);
- tcp_init_mem(&init_net);
+ tcp_init_mem();
/* Set per-socket limits to no more than 1/128 the pressure threshold */
limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
max_wshare = min(4UL*1024*1024, limit);
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index ab7bd35bb312..766032b4a6c3 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -14,6 +14,20 @@ struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock);
+void tcp_fastopen_init_key_once(bool publish)
+{
+ static u8 key[TCP_FASTOPEN_KEY_LENGTH];
+
+ /* tcp_fastopen_reset_cipher publishes the new context
+ * atomically, so we allow this race happening here.
+ *
+ * All call sites of tcp_fastopen_cookie_gen also check
+ * for a valid cookie, so this is an acceptable risk.
+ */
+ if (net_get_random_once(key, sizeof(key)) && publish)
+ tcp_fastopen_reset_cipher(key, sizeof(key));
+}
+
static void tcp_fastopen_ctx_free(struct rcu_head *head)
{
struct tcp_fastopen_context *ctx =
@@ -70,6 +84,8 @@ void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
__be32 path[4] = { src, dst, 0, 0 };
struct tcp_fastopen_context *ctx;
+ tcp_fastopen_init_key_once(true);
+
rcu_read_lock();
ctx = rcu_dereference(tcp_fastopen_ctx);
if (ctx) {
@@ -78,14 +94,3 @@ void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
}
rcu_read_unlock();
}
-
-static int __init tcp_fastopen_init(void)
-{
- __u8 key[TCP_FASTOPEN_KEY_LENGTH];
-
- get_random_bytes(key, sizeof(key));
- tcp_fastopen_reset_cipher(key, sizeof(key));
- return 0;
-}
-
-late_initcall(tcp_fastopen_init);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 25a89eaa669d..63095b218b4a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -267,11 +267,31 @@ static bool TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr
* 1. Tuning sk->sk_sndbuf, when connection enters established state.
*/
-static void tcp_fixup_sndbuf(struct sock *sk)
+static void tcp_sndbuf_expand(struct sock *sk)
{
- int sndmem = SKB_TRUESIZE(tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER);
+ const struct tcp_sock *tp = tcp_sk(sk);
+ int sndmem, per_mss;
+ u32 nr_segs;
+
+ /* Worst case is non GSO/TSO : each frame consumes one skb
+ * and skb->head is kmalloced using power of two area of memory
+ */
+ per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
+ MAX_TCP_HEADER +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ per_mss = roundup_pow_of_two(per_mss) +
+ SKB_DATA_ALIGN(sizeof(struct sk_buff));
+
+ nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd);
+ nr_segs = max_t(u32, nr_segs, tp->reordering + 1);
+
+ /* Fast Recovery (RFC 5681 3.2) :
+ * Cubic needs 1.7 factor, rounded to 2 to include
+ * extra cushion (application might react slowly to POLLOUT)
+ */
+ sndmem = 2 * nr_segs * per_mss;
- sndmem *= TCP_INIT_CWND;
if (sk->sk_sndbuf < sndmem)
sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
}
@@ -355,6 +375,12 @@ static void tcp_fixup_rcvbuf(struct sock *sk)
rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER) *
tcp_default_init_rwnd(mss);
+ /* Dynamic Right Sizing (DRS) has 2 to 3 RTT latency
+ * Allow enough cushion so that sender is not limited by our window
+ */
+ if (sysctl_tcp_moderate_rcvbuf)
+ rcvmem <<= 2;
+
if (sk->sk_rcvbuf < rcvmem)
sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]);
}
@@ -370,9 +396,11 @@ void tcp_init_buffer_space(struct sock *sk)
if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
tcp_fixup_rcvbuf(sk);
if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
- tcp_fixup_sndbuf(sk);
+ tcp_sndbuf_expand(sk);
tp->rcvq_space.space = tp->rcv_wnd;
+ tp->rcvq_space.time = tcp_time_stamp;
+ tp->rcvq_space.seq = tp->copied_seq;
maxwin = tcp_full_space(sk);
@@ -512,48 +540,62 @@ void tcp_rcv_space_adjust(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
int time;
- int space;
-
- if (tp->rcvq_space.time == 0)
- goto new_measure;
+ int copied;
time = tcp_time_stamp - tp->rcvq_space.time;
if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
return;
- space = 2 * (tp->copied_seq - tp->rcvq_space.seq);
+ /* Number of bytes copied to user in last RTT */
+ copied = tp->copied_seq - tp->rcvq_space.seq;
+ if (copied <= tp->rcvq_space.space)
+ goto new_measure;
- space = max(tp->rcvq_space.space, space);
+ /* A bit of theory :
+ * copied = bytes received in previous RTT, our base window
+ * To cope with packet losses, we need a 2x factor
+ * To cope with slow start, and sender growing its cwin by 100 %
+ * every RTT, we need a 4x factor, because the ACK we are sending
+ * now is for the next RTT, not the current one :
+ * <prev RTT . ><current RTT .. ><next RTT .... >
+ */
- if (tp->rcvq_space.space != space) {
- int rcvmem;
+ if (sysctl_tcp_moderate_rcvbuf &&
+ !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
+ int rcvwin, rcvmem, rcvbuf;
- tp->rcvq_space.space = space;
+ /* minimal window to cope with packet losses, assuming
+ * steady state. Add some cushion because of small variations.
+ */
+ rcvwin = (copied << 1) + 16 * tp->advmss;
- if (sysctl_tcp_moderate_rcvbuf &&
- !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
- int new_clamp = space;
+ /* If rate increased by 25%,
+ * assume slow start, rcvwin = 3 * copied
+ * If rate increased by 50%,
+ * assume sender can use 2x growth, rcvwin = 4 * copied
+ */
+ if (copied >=
+ tp->rcvq_space.space + (tp->rcvq_space.space >> 2)) {
+ if (copied >=
+ tp->rcvq_space.space + (tp->rcvq_space.space >> 1))
+ rcvwin <<= 1;
+ else
+ rcvwin += (rcvwin >> 1);
+ }
- /* Receive space grows, normalize in order to
- * take into account packet headers and sk_buff
- * structure overhead.
- */
- space /= tp->advmss;
- if (!space)
- space = 1;
- rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
- while (tcp_win_from_space(rcvmem) < tp->advmss)
- rcvmem += 128;
- space *= rcvmem;
- space = min(space, sysctl_tcp_rmem[2]);
- if (space > sk->sk_rcvbuf) {
- sk->sk_rcvbuf = space;
-
- /* Make the window clamp follow along. */
- tp->window_clamp = new_clamp;
- }
+ rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
+ while (tcp_win_from_space(rcvmem) < tp->advmss)
+ rcvmem += 128;
+
+ rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]);
+ if (rcvbuf > sk->sk_rcvbuf) {
+ sk->sk_rcvbuf = rcvbuf;
+
+ /* Make the window clamp follow along. */
+ tp->window_clamp = rcvwin;
}
}
+ tp->rcvq_space.space = copied;
new_measure:
tp->rcvq_space.seq = tp->copied_seq;
@@ -713,7 +755,12 @@ static void tcp_update_pacing_rate(struct sock *sk)
if (tp->srtt > 8 + 2)
do_div(rate, tp->srtt);
- sk->sk_pacing_rate = min_t(u64, rate, ~0U);
+ /* ACCESS_ONCE() is needed because sch_fq fetches sk_pacing_rate
+ * without any lock. We want to make sure compiler wont store
+ * intermediate values in this location.
+ */
+ ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
+ sk->sk_max_pacing_rate);
}
/* Calculate rto without backoff. This is the second half of Van Jacobson's
@@ -1284,7 +1331,10 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
tp->lost_cnt_hint -= tcp_skb_pcount(prev);
}
- TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags;
+ TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
+ TCP_SKB_CB(prev)->end_seq++;
+
if (skb == tcp_highest_sack(sk))
tcp_advance_highest_sack(sk, skb);
@@ -2853,7 +2903,8 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
* left edge of the send window.
* See draft-ietf-tcplw-high-performance-00, section 3.3.
*/
- if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
+ if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
+ flag & FLAG_ACKED)
seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
if (seq_rtt < 0)
@@ -2868,14 +2919,19 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
}
/* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */
-static void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
+static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp)
{
struct tcp_sock *tp = tcp_sk(sk);
s32 seq_rtt = -1;
- if (tp->lsndtime && !tp->total_retrans)
- seq_rtt = tcp_time_stamp - tp->lsndtime;
- tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1);
+ if (synack_stamp && !tp->total_retrans)
+ seq_rtt = tcp_time_stamp - synack_stamp;
+
+ /* If the ACK acks both the SYNACK and the (Fast Open'd) data packets
+ * sent in SYN_RECV, SYNACK RTT is the smooth RTT computed in tcp_ack()
+ */
+ if (!tp->srtt)
+ tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1);
}
static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
@@ -2970,7 +3026,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
const struct inet_connection_sock *icsk = inet_csk(sk);
struct sk_buff *skb;
u32 now = tcp_time_stamp;
- int fully_acked = true;
+ bool fully_acked = true;
int flag = 0;
u32 pkts_acked = 0;
u32 reord = tp->packets_out;
@@ -2978,6 +3034,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
s32 seq_rtt = -1;
s32 ca_seq_rtt = -1;
ktime_t last_ackt = net_invalid_timestamp();
+ bool rtt_update;
while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
@@ -3054,14 +3111,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
flag |= FLAG_SACK_RENEGING;
- if (tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt) ||
- (flag & FLAG_ACKED))
- tcp_rearm_rto(sk);
+ rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt);
if (flag & FLAG_ACKED) {
const struct tcp_congestion_ops *ca_ops
= inet_csk(sk)->icsk_ca_ops;
+ tcp_rearm_rto(sk);
if (unlikely(icsk->icsk_mtup.probe_size &&
!after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
tcp_mtup_probe_success(sk);
@@ -3100,6 +3156,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
}
+ } else if (skb && rtt_update && sack_rtt >= 0 &&
+ sack_rtt > (s32)(now - TCP_SKB_CB(skb)->when)) {
+ /* Do not re-arm RTO if the sack RTT is measured from data sent
+ * after when the head was last (re)transmitted. Otherwise the
+ * timeout may continue to extend in loss recovery.
+ */
+ tcp_rearm_rto(sk);
}
#if FASTRETRANS_DEBUG > 0
@@ -3288,7 +3351,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
tcp_init_cwnd_reduction(sk, true);
tcp_set_ca_state(sk, TCP_CA_CWR);
tcp_end_cwnd_reduction(sk);
- tcp_set_ca_state(sk, TCP_CA_Open);
+ tcp_try_keep_open(sk);
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPLOSSPROBERECOVERY);
}
@@ -4701,15 +4764,7 @@ static void tcp_new_space(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
if (tcp_should_expand_sndbuf(sk)) {
- int sndmem = SKB_TRUESIZE(max_t(u32,
- tp->rx_opt.mss_clamp,
- tp->mss_cache) +
- MAX_TCP_HEADER);
- int demanded = max_t(unsigned int, tp->snd_cwnd,
- tp->reordering + 1);
- sndmem *= 2 * demanded;
- if (sndmem > sk->sk_sndbuf)
- sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
+ tcp_sndbuf_expand(sk);
tp->snd_cwnd_stamp = tcp_time_stamp;
}
@@ -5584,6 +5639,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
struct request_sock *req;
int queued = 0;
bool acceptable;
+ u32 synack_stamp;
tp->rx_opt.saw_tstamp = 0;
@@ -5666,16 +5722,18 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
* so release it.
*/
if (req) {
+ synack_stamp = tcp_rsk(req)->snt_synack;
tp->total_retrans = req->num_retrans;
reqsk_fastopen_remove(sk, req, false);
} else {
+ synack_stamp = tp->lsndtime;
/* Make sure socket is routed, for correct metrics. */
icsk->icsk_af_ops->rebuild_header(sk);
tcp_init_congestion_control(sk);
tcp_mtup_init(sk);
- tcp_init_buffer_space(sk);
tp->copied_seq = tp->rcv_nxt;
+ tcp_init_buffer_space(sk);
}
smp_mb();
tcp_set_state(sk, TCP_ESTABLISHED);
@@ -5691,7 +5749,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale;
tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
- tcp_synack_rtt_meas(sk, req);
+ tcp_synack_rtt_meas(sk, synack_stamp);
if (tp->rx_opt.tstamp_ok)
tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
@@ -5709,6 +5767,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
} else
tcp_init_metrics(sk);
+ tcp_update_pacing_rate(sk);
+
/* Prevent spurious tcp_cwnd_restart() on first data packet */
tp->lsndtime = tcp_time_stamp;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index b14266bb91eb..300ab2c93f29 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -835,11 +835,11 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
skb = tcp_make_synack(sk, dst, req, NULL);
if (skb) {
- __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
+ __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
skb_set_queue_mapping(skb, queue_mapping);
- err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
- ireq->rmt_addr,
+ err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
+ ireq->ir_rmt_addr,
ireq->opt);
err = net_xmit_eval(err);
if (!tcp_rsk(req)->snt_synack && !err)
@@ -972,7 +972,7 @@ static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
{
union tcp_md5_addr *addr;
- addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
+ addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
return tcp_md5_do_lookup(sk, addr, AF_INET);
}
@@ -1149,8 +1149,8 @@ int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
saddr = inet_sk(sk)->inet_saddr;
daddr = inet_sk(sk)->inet_daddr;
} else if (req) {
- saddr = inet_rsk(req)->loc_addr;
- daddr = inet_rsk(req)->rmt_addr;
+ saddr = inet_rsk(req)->ir_loc_addr;
+ daddr = inet_rsk(req)->ir_rmt_addr;
} else {
const struct iphdr *iph = ip_hdr(skb);
saddr = iph->saddr;
@@ -1366,8 +1366,8 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
kfree_skb(skb_synack);
return -1;
}
- err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
- ireq->rmt_addr, ireq->opt);
+ err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
+ ireq->ir_rmt_addr, ireq->opt);
err = net_xmit_eval(err);
if (!err)
tcp_rsk(req)->snt_synack = tcp_time_stamp;
@@ -1410,8 +1410,8 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
inet_csk(child)->icsk_af_ops->rebuild_header(child);
tcp_init_congestion_control(child);
tcp_mtup_init(child);
- tcp_init_buffer_space(child);
tcp_init_metrics(child);
+ tcp_init_buffer_space(child);
/* Queue the data carried in the SYN packet. We need to first
* bump skb's refcnt because the caller will attempt to free it.
@@ -1502,8 +1502,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_openreq_init(req, &tmp_opt, skb);
ireq = inet_rsk(req);
- ireq->loc_addr = daddr;
- ireq->rmt_addr = saddr;
+ ireq->ir_loc_addr = daddr;
+ ireq->ir_rmt_addr = saddr;
ireq->no_srccheck = inet_sk(sk)->transparent;
ireq->opt = tcp_v4_save_options(skb);
@@ -1578,15 +1578,15 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
if (skb_synack) {
- __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
+ __tcp_v4_send_check(skb_synack, ireq->ir_loc_addr, ireq->ir_rmt_addr);
skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
} else
goto drop_and_free;
if (likely(!do_fastopen)) {
int err;
- err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
- ireq->rmt_addr, ireq->opt);
+ err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
+ ireq->ir_rmt_addr, ireq->opt);
err = net_xmit_eval(err);
if (err || want_cookie)
goto drop_and_free;
@@ -1644,9 +1644,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newtp = tcp_sk(newsk);
newinet = inet_sk(newsk);
ireq = inet_rsk(req);
- newinet->inet_daddr = ireq->rmt_addr;
- newinet->inet_rcv_saddr = ireq->loc_addr;
- newinet->inet_saddr = ireq->loc_addr;
+ newinet->inet_daddr = ireq->ir_rmt_addr;
+ newinet->inet_rcv_saddr = ireq->ir_loc_addr;
+ newinet->inet_saddr = ireq->ir_loc_addr;
inet_opt = ireq->opt;
rcu_assign_pointer(newinet->inet_opt, inet_opt);
ireq->opt = NULL;
@@ -2194,18 +2194,6 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock);
#ifdef CONFIG_PROC_FS
/* Proc filesystem TCP sock list dumping. */
-static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
-{
- return hlist_nulls_empty(head) ? NULL :
- list_entry(head->first, struct inet_timewait_sock, tw_node);
-}
-
-static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
-{
- return !is_a_nulls(tw->tw_node.next) ?
- hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
-}
-
/*
* Get next listener socket follow cur. If cur is NULL, get first socket
* starting from bucket given in st->bucket; when st->bucket is zero the
@@ -2309,10 +2297,9 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
return rc;
}
-static inline bool empty_bucket(struct tcp_iter_state *st)
+static inline bool empty_bucket(const struct tcp_iter_state *st)
{
- return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
- hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
+ return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
}
/*
@@ -2329,7 +2316,6 @@ static void *established_get_first(struct seq_file *seq)
for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
struct sock *sk;
struct hlist_nulls_node *node;
- struct inet_timewait_sock *tw;
spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
/* Lockless fast path for the common case of empty buckets */
@@ -2345,18 +2331,7 @@ static void *established_get_first(struct seq_file *seq)
rc = sk;
goto out;
}
- st->state = TCP_SEQ_STATE_TIME_WAIT;
- inet_twsk_for_each(tw, node,
- &tcp_hashinfo.ehash[st->bucket].twchain) {
- if (tw->tw_family != st->family ||
- !net_eq(twsk_net(tw), net)) {
- continue;
- }
- rc = tw;
- goto out;
- }
spin_unlock_bh(lock);
- st->state = TCP_SEQ_STATE_ESTABLISHED;
}
out:
return rc;
@@ -2365,7 +2340,6 @@ out:
static void *established_get_next(struct seq_file *seq, void *cur)
{
struct sock *sk = cur;
- struct inet_timewait_sock *tw;
struct hlist_nulls_node *node;
struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
@@ -2373,45 +2347,16 @@ static void *established_get_next(struct seq_file *seq, void *cur)
++st->num;
++st->offset;
- if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
- tw = cur;
- tw = tw_next(tw);
-get_tw:
- while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
- tw = tw_next(tw);
- }
- if (tw) {
- cur = tw;
- goto out;
- }
- spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
- st->state = TCP_SEQ_STATE_ESTABLISHED;
-
- /* Look for next non empty bucket */
- st->offset = 0;
- while (++st->bucket <= tcp_hashinfo.ehash_mask &&
- empty_bucket(st))
- ;
- if (st->bucket > tcp_hashinfo.ehash_mask)
- return NULL;
-
- spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
- sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
- } else
- sk = sk_nulls_next(sk);
+ sk = sk_nulls_next(sk);
sk_nulls_for_each_from(sk, node) {
if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
- goto found;
+ return sk;
}
- st->state = TCP_SEQ_STATE_TIME_WAIT;
- tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
- goto get_tw;
-found:
- cur = sk;
-out:
- return cur;
+ spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
+ ++st->bucket;
+ return established_get_first(seq);
}
static void *established_get_idx(struct seq_file *seq, loff_t pos)
@@ -2464,10 +2409,9 @@ static void *tcp_seek_last_pos(struct seq_file *seq)
if (rc)
break;
st->bucket = 0;
+ st->state = TCP_SEQ_STATE_ESTABLISHED;
/* Fallthrough */
case TCP_SEQ_STATE_ESTABLISHED:
- case TCP_SEQ_STATE_TIME_WAIT:
- st->state = TCP_SEQ_STATE_ESTABLISHED;
if (st->bucket > tcp_hashinfo.ehash_mask)
break;
rc = established_get_first(seq);
@@ -2524,7 +2468,6 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
break;
case TCP_SEQ_STATE_ESTABLISHED:
- case TCP_SEQ_STATE_TIME_WAIT:
rc = established_get_next(seq, v);
break;
}
@@ -2548,7 +2491,6 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
if (v != SEQ_START_TOKEN)
spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
break;
- case TCP_SEQ_STATE_TIME_WAIT:
case TCP_SEQ_STATE_ESTABLISHED:
if (v)
spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
@@ -2606,10 +2548,10 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
seq_printf(f, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK%n",
i,
- ireq->loc_addr,
+ ireq->ir_loc_addr,
ntohs(inet_sk(sk)->inet_sport),
- ireq->rmt_addr,
- ntohs(ireq->rmt_port),
+ ireq->ir_rmt_addr,
+ ntohs(ireq->ir_rmt_port),
TCP_SYN_RECV,
0, 0, /* could print option size, but that is af dependent. */
1, /* timers active (only the expire timer) */
@@ -2707,6 +2649,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
static int tcp4_seq_show(struct seq_file *seq, void *v)
{
struct tcp_iter_state *st;
+ struct sock *sk = v;
int len;
if (v == SEQ_START_TOKEN) {
@@ -2721,14 +2664,14 @@ static int tcp4_seq_show(struct seq_file *seq, void *v)
switch (st->state) {
case TCP_SEQ_STATE_LISTENING:
case TCP_SEQ_STATE_ESTABLISHED:
- get_tcp4_sock(v, seq, st->num, &len);
+ if (sk->sk_state == TCP_TIME_WAIT)
+ get_timewait4_sock(v, seq, st->num, &len);
+ else
+ get_tcp4_sock(v, seq, st->num, &len);
break;
case TCP_SEQ_STATE_OPENREQ:
get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
break;
- case TCP_SEQ_STATE_TIME_WAIT:
- get_timewait4_sock(v, seq, st->num, &len);
- break;
}
seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
out:
@@ -2806,6 +2749,7 @@ struct proto tcp_prot = {
.orphan_count = &tcp_orphan_count,
.memory_allocated = &tcp_memory_allocated,
.memory_pressure = &tcp_memory_pressure,
+ .sysctl_mem = sysctl_tcp_mem,
.sysctl_wmem = sysctl_tcp_wmem,
.sysctl_rmem = sysctl_tcp_rmem,
.max_header = MAX_TCP_HEADER,
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index 559d4ae6ebf4..03e9154f7e68 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -6,15 +6,10 @@
#include <linux/memcontrol.h>
#include <linux/module.h>
-static inline struct tcp_memcontrol *tcp_from_cgproto(struct cg_proto *cg_proto)
-{
- return container_of(cg_proto, struct tcp_memcontrol, cg_proto);
-}
-
static void memcg_tcp_enter_memory_pressure(struct sock *sk)
{
if (sk->sk_cgrp->memory_pressure)
- *sk->sk_cgrp->memory_pressure = 1;
+ sk->sk_cgrp->memory_pressure = 1;
}
EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
@@ -27,34 +22,24 @@ int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
*/
struct res_counter *res_parent = NULL;
struct cg_proto *cg_proto, *parent_cg;
- struct tcp_memcontrol *tcp;
struct mem_cgroup *parent = parent_mem_cgroup(memcg);
- struct net *net = current->nsproxy->net_ns;
cg_proto = tcp_prot.proto_cgroup(memcg);
if (!cg_proto)
return 0;
- tcp = tcp_from_cgproto(cg_proto);
-
- tcp->tcp_prot_mem[0] = net->ipv4.sysctl_tcp_mem[0];
- tcp->tcp_prot_mem[1] = net->ipv4.sysctl_tcp_mem[1];
- tcp->tcp_prot_mem[2] = net->ipv4.sysctl_tcp_mem[2];
- tcp->tcp_memory_pressure = 0;
+ cg_proto->sysctl_mem[0] = sysctl_tcp_mem[0];
+ cg_proto->sysctl_mem[1] = sysctl_tcp_mem[1];
+ cg_proto->sysctl_mem[2] = sysctl_tcp_mem[2];
+ cg_proto->memory_pressure = 0;
+ cg_proto->memcg = memcg;
parent_cg = tcp_prot.proto_cgroup(parent);
if (parent_cg)
- res_parent = parent_cg->memory_allocated;
-
- res_counter_init(&tcp->tcp_memory_allocated, res_parent);
- percpu_counter_init(&tcp->tcp_sockets_allocated, 0);
+ res_parent = &parent_cg->memory_allocated;
- cg_proto->enter_memory_pressure = memcg_tcp_enter_memory_pressure;
- cg_proto->memory_pressure = &tcp->tcp_memory_pressure;
- cg_proto->sysctl_mem = tcp->tcp_prot_mem;
- cg_proto->memory_allocated = &tcp->tcp_memory_allocated;
- cg_proto->sockets_allocated = &tcp->tcp_sockets_allocated;
- cg_proto->memcg = memcg;
+ res_counter_init(&cg_proto->memory_allocated, res_parent);
+ percpu_counter_init(&cg_proto->sockets_allocated, 0);
return 0;
}
@@ -63,21 +48,17 @@ EXPORT_SYMBOL(tcp_init_cgroup);
void tcp_destroy_cgroup(struct mem_cgroup *memcg)
{
struct cg_proto *cg_proto;
- struct tcp_memcontrol *tcp;
cg_proto = tcp_prot.proto_cgroup(memcg);
if (!cg_proto)
return;
- tcp = tcp_from_cgproto(cg_proto);
- percpu_counter_destroy(&tcp->tcp_sockets_allocated);
+ percpu_counter_destroy(&cg_proto->sockets_allocated);
}
EXPORT_SYMBOL(tcp_destroy_cgroup);
static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
{
- struct net *net = current->nsproxy->net_ns;
- struct tcp_memcontrol *tcp;
struct cg_proto *cg_proto;
u64 old_lim;
int i;
@@ -90,16 +71,14 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
if (val > RES_COUNTER_MAX)
val = RES_COUNTER_MAX;
- tcp = tcp_from_cgproto(cg_proto);
-
- old_lim = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
- ret = res_counter_set_limit(&tcp->tcp_memory_allocated, val);
+ old_lim = res_counter_read_u64(&cg_proto->memory_allocated, RES_LIMIT);
+ ret = res_counter_set_limit(&cg_proto->memory_allocated, val);
if (ret)
return ret;
for (i = 0; i < 3; i++)
- tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT,
- net->ipv4.sysctl_tcp_mem[i]);
+ cg_proto->sysctl_mem[i] = min_t(long, val >> PAGE_SHIFT,
+ sysctl_tcp_mem[i]);
if (val == RES_COUNTER_MAX)
clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
@@ -156,28 +135,24 @@ static int tcp_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
static u64 tcp_read_stat(struct mem_cgroup *memcg, int type, u64 default_val)
{
- struct tcp_memcontrol *tcp;
struct cg_proto *cg_proto;
cg_proto = tcp_prot.proto_cgroup(memcg);
if (!cg_proto)
return default_val;
- tcp = tcp_from_cgproto(cg_proto);
- return res_counter_read_u64(&tcp->tcp_memory_allocated, type);
+ return res_counter_read_u64(&cg_proto->memory_allocated, type);
}
static u64 tcp_read_usage(struct mem_cgroup *memcg)
{
- struct tcp_memcontrol *tcp;
struct cg_proto *cg_proto;
cg_proto = tcp_prot.proto_cgroup(memcg);
if (!cg_proto)
return atomic_long_read(&tcp_memory_allocated) << PAGE_SHIFT;
- tcp = tcp_from_cgproto(cg_proto);
- return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
+ return res_counter_read_u64(&cg_proto->memory_allocated, RES_USAGE);
}
static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
@@ -205,54 +180,25 @@ static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
static int tcp_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
{
struct mem_cgroup *memcg;
- struct tcp_memcontrol *tcp;
struct cg_proto *cg_proto;
memcg = mem_cgroup_from_css(css);
cg_proto = tcp_prot.proto_cgroup(memcg);
if (!cg_proto)
return 0;
- tcp = tcp_from_cgproto(cg_proto);
switch (event) {
case RES_MAX_USAGE:
- res_counter_reset_max(&tcp->tcp_memory_allocated);
+ res_counter_reset_max(&cg_proto->memory_allocated);
break;
case RES_FAILCNT:
- res_counter_reset_failcnt(&tcp->tcp_memory_allocated);
+ res_counter_reset_failcnt(&cg_proto->memory_allocated);
break;
}
return 0;
}
-unsigned long long tcp_max_memory(const struct mem_cgroup *memcg)
-{
- struct tcp_memcontrol *tcp;
- struct cg_proto *cg_proto;
-
- cg_proto = tcp_prot.proto_cgroup((struct mem_cgroup *)memcg);
- if (!cg_proto)
- return 0;
-
- tcp = tcp_from_cgproto(cg_proto);
- return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
-}
-
-void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx)
-{
- struct tcp_memcontrol *tcp;
- struct cg_proto *cg_proto;
-
- cg_proto = tcp_prot.proto_cgroup(memcg);
- if (!cg_proto)
- return;
-
- tcp = tcp_from_cgproto(cg_proto);
-
- tcp->tcp_prot_mem[idx] = val;
-}
-
static struct cftype tcp_files[] = {
{
.name = "kmem.tcp.limit_in_bytes",
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 52f3c6b971d2..2ab09cbae74d 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -215,13 +215,15 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
addr.family = req->rsk_ops->family;
switch (addr.family) {
case AF_INET:
- addr.addr.a4 = inet_rsk(req)->rmt_addr;
+ addr.addr.a4 = inet_rsk(req)->ir_rmt_addr;
hash = (__force unsigned int) addr.addr.a4;
break;
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr;
- hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr);
+ *(struct in6_addr *)addr.addr.a6 = inet_rsk(req)->ir_v6_rmt_addr;
+ hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
break;
+#endif
default:
return NULL;
}
@@ -240,7 +242,6 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
{
- struct inet6_timewait_sock *tw6;
struct tcp_metrics_block *tm;
struct inetpeer_addr addr;
unsigned int hash;
@@ -252,11 +253,12 @@ static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock
addr.addr.a4 = tw->tw_daddr;
hash = (__force unsigned int) addr.addr.a4;
break;
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- tw6 = inet6_twsk((struct sock *)tw);
- *(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr;
- hash = ipv6_addr_hash(&tw6->tw_v6_daddr);
+ *(struct in6_addr *)addr.addr.a6 = tw->tw_v6_daddr;
+ hash = ipv6_addr_hash(&tw->tw_v6_daddr);
break;
+#endif
default:
return NULL;
}
@@ -288,10 +290,12 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
addr.addr.a4 = inet_sk(sk)->inet_daddr;
hash = (__force unsigned int) addr.addr.a4;
break;
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr;
- hash = ipv6_addr_hash(&inet6_sk(sk)->daddr);
+ *(struct in6_addr *)addr.addr.a6 = sk->sk_v6_daddr;
+ hash = ipv6_addr_hash(&sk->sk_v6_daddr);
break;
+#endif
default:
return NULL;
}
@@ -667,8 +671,9 @@ void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
write_seqlock_bh(&fastopen_seqlock);
- tfom->mss = mss;
- if (cookie->len > 0)
+ if (mss)
+ tfom->mss = mss;
+ if (cookie && cookie->len > 0)
tfom->cookie = *cookie;
if (syn_lost) {
++tfom->syn_loss;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 58a3e69aef64..97b684159861 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -293,12 +293,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
#if IS_ENABLED(CONFIG_IPV6)
if (tw->tw_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
- struct inet6_timewait_sock *tw6;
- tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
- tw6 = inet6_twsk((struct sock *)tw);
- tw6->tw_v6_daddr = np->daddr;
- tw6->tw_v6_rcv_saddr = np->rcv_saddr;
+ tw->tw_v6_daddr = sk->sk_v6_daddr;
+ tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
tw->tw_tclass = np->tclass;
tw->tw_ipv6only = np->ipv6only;
}
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 3a7525e6c086..a2b68a108eae 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -14,10 +14,11 @@
#include <net/tcp.h>
#include <net/protocol.h>
-struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
+struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
+ unsigned int sum_truesize = 0;
struct tcphdr *th;
unsigned int thlen;
unsigned int seq;
@@ -56,6 +57,8 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
SKB_GSO_TCP_ECN |
SKB_GSO_TCPV6 |
SKB_GSO_GRE |
+ SKB_GSO_IPIP |
+ SKB_GSO_SIT |
SKB_GSO_MPLS |
SKB_GSO_UDP_TUNNEL |
0) ||
@@ -102,13 +105,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
if (copy_destructor) {
skb->destructor = gso_skb->destructor;
skb->sk = gso_skb->sk;
- /* {tcp|sock}_wfree() use exact truesize accounting :
- * sum(skb->truesize) MUST be exactly be gso_skb->truesize
- * So we account mss bytes of 'true size' for each segment.
- * The last segment will contain the remaining.
- */
- skb->truesize = mss;
- gso_skb->truesize -= mss;
+ sum_truesize += skb->truesize;
}
skb = skb->next;
th = tcp_hdr(skb);
@@ -125,7 +122,9 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
if (copy_destructor) {
swap(gso_skb->sk, skb->sk);
swap(gso_skb->destructor, skb->destructor);
- swap(gso_skb->truesize, skb->truesize);
+ sum_truesize += skb->truesize;
+ atomic_add(sum_truesize - gso_skb->truesize,
+ &skb->sk->sk_wmem_alloc);
}
delta = htonl(oldlen + (skb_tail_pointer(skb) -
@@ -139,7 +138,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
out:
return segs;
}
-EXPORT_SYMBOL(tcp_tso_segment);
+EXPORT_SYMBOL(tcp_gso_segment);
struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
{
@@ -320,7 +319,7 @@ static int tcp4_gro_complete(struct sk_buff *skb)
static const struct net_offload tcpv4_offload = {
.callbacks = {
.gso_send_check = tcp_v4_gso_send_check,
- .gso_segment = tcp_tso_segment,
+ .gso_segment = tcp_gso_segment,
.gro_receive = tcp4_gro_receive,
.gro_complete = tcp4_gro_complete,
},
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e6bb8256e59f..672854664ff5 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -637,6 +637,8 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
unsigned int size = 0;
unsigned int eff_sacks;
+ opts->options = 0;
+
#ifdef CONFIG_TCP_MD5SIG
*md5 = tp->af_specific->md5_lookup(sk, sk);
if (unlikely(*md5)) {
@@ -848,15 +850,15 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
BUG_ON(!skb || !tcp_skb_pcount(skb));
- /* If congestion control is doing timestamping, we must
- * take such a timestamp before we potentially clone/copy.
- */
- if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
- __net_timestamp(skb);
-
- if (likely(clone_it)) {
+ if (clone_it) {
const struct sk_buff *fclone = skb + 1;
+ /* If congestion control is doing timestamping, we must
+ * take such a timestamp before we potentially clone/copy.
+ */
+ if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
+ __net_timestamp(skb);
+
if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
fclone->fclone == SKB_FCLONE_CLONE))
NET_INC_STATS_BH(sock_net(sk),
@@ -984,8 +986,10 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
- if (skb->len <= mss_now || !sk_can_gso(sk) ||
- skb->ip_summed == CHECKSUM_NONE) {
+ /* Make sure we own this skb before messing gso_size/gso_segs */
+ WARN_ON_ONCE(skb_cloned(skb));
+
+ if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) {
/* Avoid the costly divide in the normal
* non-TSO case.
*/
@@ -1065,9 +1069,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
if (nsize < 0)
nsize = 0;
- if (skb_cloned(skb) &&
- skb_is_nonlinear(skb) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ if (skb_unclone(skb, GFP_ATOMIC))
return -ENOMEM;
/* Get a new skb... force flag on. */
@@ -2342,6 +2344,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
int oldpcount = tcp_skb_pcount(skb);
if (unlikely(oldpcount > 1)) {
+ if (skb_unclone(skb, GFP_ATOMIC))
+ return -ENOMEM;
tcp_init_tso_segs(sk, skb, cur_mss);
tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
}
@@ -2349,21 +2353,6 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
tcp_retrans_try_collapse(sk, skb, cur_mss);
- /* Some Solaris stacks overoptimize and ignore the FIN on a
- * retransmit when old data is attached. So strip it off
- * since it is cheap to do so and saves bytes on the network.
- */
- if (skb->len > 0 &&
- (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
- tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
- if (!pskb_trim(skb, 0)) {
- /* Reuse, even though it does some unnecessary work */
- tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1,
- TCP_SKB_CB(skb)->tcp_flags);
- skb->ip_summed = CHECKSUM_NONE;
- }
- }
-
/* Make a copy, if the first transmission SKB clone we made
* is still in somebody's hands, else make a clone.
*/
@@ -2732,8 +2721,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
th->syn = 1;
th->ack = 1;
TCP_ECN_make_synack(req, th);
- th->source = ireq->loc_port;
- th->dest = ireq->rmt_port;
+ th->source = htons(ireq->ir_num);
+ th->dest = ireq->ir_rmt_port;
/* Setting of flags are superfluous here for callers (and ECE is
* not even correctly set)
*/
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 611beab38a00..8b97d71e193b 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -101,22 +101,6 @@ static inline int tcp_probe_avail(void)
si4.sin_addr.s_addr = inet->inet_##mem##addr; \
} while (0) \
-#if IS_ENABLED(CONFIG_IPV6)
-#define tcp_probe_copy_fl_to_si6(inet, si6, mem) \
- do { \
- struct ipv6_pinfo *pi6 = inet->pinet6; \
- si6.sin6_family = AF_INET6; \
- si6.sin6_port = inet->inet_##mem##port; \
- si6.sin6_addr = pi6->mem##addr; \
- si6.sin6_flowinfo = 0; /* No need here. */ \
- si6.sin6_scope_id = 0; /* No need here. */ \
- } while (0)
-#else
-#define tcp_probe_copy_fl_to_si6(fl, si6, mem) \
- do { \
- memset(&si6, 0, sizeof(si6)); \
- } while (0)
-#endif
/*
* Hook inserted to be called before each receive packet.
@@ -147,8 +131,17 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
tcp_probe_copy_fl_to_si4(inet, p->dst.v4, d);
break;
case AF_INET6:
- tcp_probe_copy_fl_to_si6(inet, p->src.v6, s);
- tcp_probe_copy_fl_to_si6(inet, p->dst.v6, d);
+ memset(&p->src.v6, 0, sizeof(p->src.v6));
+ memset(&p->dst.v6, 0, sizeof(p->dst.v6));
+#if IS_ENABLED(CONFIG_IPV6)
+ p->src.v6.sin6_family = AF_INET6;
+ p->src.v6.sin6_port = inet->inet_sport;
+ p->src.v6.sin6_addr = inet6_sk(sk)->saddr;
+
+ p->dst.v6.sin6_family = AF_INET6;
+ p->dst.v6.sin6_port = inet->inet_dport;
+ p->dst.v6.sin6_addr = sk->sk_v6_daddr;
+#endif
break;
default:
BUG();
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 4b85e6f636c9..64f0354c84c7 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -156,12 +156,16 @@ static bool retransmits_timed_out(struct sock *sk,
static int tcp_write_timeout(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
int retry_until;
bool do_reset, syn_set = false;
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
- if (icsk->icsk_retransmits)
+ if (icsk->icsk_retransmits) {
dst_negative_advice(sk);
+ if (tp->syn_fastopen || tp->syn_data)
+ tcp_fastopen_cache_set(sk, 0, NULL, true);
+ }
retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
syn_set = true;
} else {
@@ -374,9 +378,8 @@ void tcp_retransmit_timer(struct sock *sk)
}
#if IS_ENABLED(CONFIG_IPV6)
else if (sk->sk_family == AF_INET6) {
- struct ipv6_pinfo *np = inet6_sk(sk);
LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"),
- &np->daddr,
+ &sk->sk_v6_daddr,
ntohs(inet->inet_dport), inet->inet_num,
tp->snd_una, tp->snd_nxt);
}
diff --git a/net/ipv4/tcp_vegas.h b/net/ipv4/tcp_vegas.h
index 6c0eea2f8249..0531b99d8637 100644
--- a/net/ipv4/tcp_vegas.h
+++ b/net/ipv4/tcp_vegas.h
@@ -15,10 +15,10 @@ struct vegas {
u32 baseRTT; /* the min of all Vegas RTT measurements seen (in usec) */
};
-extern void tcp_vegas_init(struct sock *sk);
-extern void tcp_vegas_state(struct sock *sk, u8 ca_state);
-extern void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us);
-extern void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event);
-extern void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb);
+void tcp_vegas_init(struct sock *sk);
+void tcp_vegas_state(struct sock *sk, u8 ca_state);
+void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us);
+void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event);
+void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb);
#endif /* __TCP_VEGAS_H */
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 0ca44df51ee9..89909dd730dd 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -103,6 +103,7 @@
#include <linux/seq_file.h>
#include <net/net_namespace.h>
#include <net/icmp.h>
+#include <net/inet_hashtables.h>
#include <net/route.h>
#include <net/checksum.h>
#include <net/xfrm.h>
@@ -219,7 +220,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
unsigned short first, last;
DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
- inet_get_local_port_range(&low, &high);
+ inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
rand = net_random();
@@ -406,6 +407,18 @@ static inline int compute_score2(struct sock *sk, struct net *net,
return score;
}
+static unsigned int udp_ehashfn(struct net *net, const __be32 laddr,
+ const __u16 lport, const __be32 faddr,
+ const __be16 fport)
+{
+ static u32 udp_ehash_secret __read_mostly;
+
+ net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));
+
+ return __inet_ehashfn(laddr, lport, faddr, fport,
+ udp_ehash_secret + net_hash_mix(net));
+}
+
/* called with read_rcu_lock() */
static struct sock *udp4_lib_lookup2(struct net *net,
@@ -429,8 +442,8 @@ begin:
badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
- hash = inet_ehashfn(net, daddr, hnum,
- saddr, sport);
+ hash = udp_ehashfn(net, daddr, hnum,
+ saddr, sport);
matches = 1;
}
} else if (score == badness && reuseport) {
@@ -510,8 +523,8 @@ begin:
badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
- hash = inet_ehashfn(net, daddr, hnum,
- saddr, sport);
+ hash = udp_ehashfn(net, daddr, hnum,
+ saddr, sport);
matches = 1;
}
} else if (score == badness && reuseport) {
@@ -565,6 +578,26 @@ struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
}
EXPORT_SYMBOL_GPL(udp4_lib_lookup);
+static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
+ __be16 loc_port, __be32 loc_addr,
+ __be16 rmt_port, __be32 rmt_addr,
+ int dif, unsigned short hnum)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ if (!net_eq(sock_net(sk), net) ||
+ udp_sk(sk)->udp_port_hash != hnum ||
+ (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
+ (inet->inet_dport != rmt_port && inet->inet_dport) ||
+ (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
+ ipv6_only_sock(sk) ||
+ (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
+ return false;
+ if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif))
+ return false;
+ return true;
+}
+
static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
__be16 loc_port, __be32 loc_addr,
__be16 rmt_port, __be32 rmt_addr,
@@ -575,20 +608,11 @@ static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
unsigned short hnum = ntohs(loc_port);
sk_nulls_for_each_from(s, node) {
- struct inet_sock *inet = inet_sk(s);
-
- if (!net_eq(sock_net(s), net) ||
- udp_sk(s)->udp_port_hash != hnum ||
- (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
- (inet->inet_dport != rmt_port && inet->inet_dport) ||
- (inet->inet_rcv_saddr &&
- inet->inet_rcv_saddr != loc_addr) ||
- ipv6_only_sock(s) ||
- (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
- continue;
- if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif))
- continue;
- goto found;
+ if (__udp_is_mcast_sock(net, s,
+ loc_port, loc_addr,
+ rmt_port, rmt_addr,
+ dif, hnum))
+ goto found;
}
s = NULL;
found:
@@ -855,6 +879,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
ipc.opt = NULL;
ipc.tx_flags = 0;
+ ipc.ttl = 0;
+ ipc.tos = -1;
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
@@ -938,7 +964,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
faddr = ipc.opt->opt.faddr;
connected = 0;
}
- tos = RT_TOS(inet->tos);
+ tos = get_rttos(&ipc, inet);
if (sock_flag(sk, SOCK_LOCALROUTE) ||
(msg->msg_flags & MSG_DONTROUTE) ||
(ipc.opt && ipc.opt->opt.is_strictroute)) {
@@ -1403,8 +1429,10 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int rc;
- if (inet_sk(sk)->inet_daddr)
+ if (inet_sk(sk)->inet_daddr) {
sock_rps_save_rxhash(sk, skb);
+ sk_mark_napi_id(sk, skb);
+ }
rc = sock_queue_rcv_skb(sk, skb);
if (rc < 0) {
@@ -1528,7 +1556,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
rc = 0;
- ipv4_pktinfo_prepare(skb);
+ ipv4_pktinfo_prepare(sk, skb);
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
rc = __udp_queue_rcv_skb(sk, skb);
@@ -1577,6 +1605,14 @@ static void flush_stack(struct sock **stack, unsigned int count,
kfree_skb(skb1);
}
+static void udp_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+
+ dst_hold(dst);
+ sk->sk_rx_dst = dst;
+}
+
/*
* Multicasts and broadcasts go to each listener.
*
@@ -1705,16 +1741,32 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (udp4_csum_init(skb, uh, proto))
goto csum_error;
- if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
- return __udp4_lib_mcast_deliver(net, skb, uh,
- saddr, daddr, udptable);
+ if (skb->sk) {
+ int ret;
+ sk = skb->sk;
+
+ if (unlikely(sk->sk_rx_dst == NULL))
+ udp_sk_rx_dst_set(sk, skb);
- sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
+ ret = udp_queue_rcv_skb(sk, skb);
+
+ /* a return value > 0 means to resubmit the input, but
+ * it wants the return to be -protocol, or 0
+ */
+ if (ret > 0)
+ return -ret;
+ return 0;
+ } else {
+ if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
+ return __udp4_lib_mcast_deliver(net, skb, uh,
+ saddr, daddr, udptable);
+
+ sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
+ }
if (sk != NULL) {
int ret;
- sk_mark_napi_id(sk, skb);
ret = udp_queue_rcv_skb(sk, skb);
sock_put(sk);
@@ -1768,6 +1820,135 @@ drop:
return 0;
}
+/* We can only early demux multicast if there is a single matching socket.
+ * If more than one socket found returns NULL
+ */
+static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
+ __be16 loc_port, __be32 loc_addr,
+ __be16 rmt_port, __be32 rmt_addr,
+ int dif)
+{
+ struct sock *sk, *result;
+ struct hlist_nulls_node *node;
+ unsigned short hnum = ntohs(loc_port);
+ unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask);
+ struct udp_hslot *hslot = &udp_table.hash[slot];
+
+ rcu_read_lock();
+begin:
+ count = 0;
+ result = NULL;
+ sk_nulls_for_each_rcu(sk, node, &hslot->head) {
+ if (__udp_is_mcast_sock(net, sk,
+ loc_port, loc_addr,
+ rmt_port, rmt_addr,
+ dif, hnum)) {
+ result = sk;
+ ++count;
+ }
+ }
+ /*
+ * if the nulls value we got at the end of this lookup is
+ * not the expected one, we must restart lookup.
+ * We probably met an item that was moved to another chain.
+ */
+ if (get_nulls_value(node) != slot)
+ goto begin;
+
+ if (result) {
+ if (count != 1 ||
+ unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
+ result = NULL;
+ else if (unlikely(!__udp_is_mcast_sock(net, result,
+ loc_port, loc_addr,
+ rmt_port, rmt_addr,
+ dif, hnum))) {
+ sock_put(result);
+ result = NULL;
+ }
+ }
+ rcu_read_unlock();
+ return result;
+}
+
+/* For unicast we should only early demux connected sockets or we can
+ * break forwarding setups. The chains here can be long so only check
+ * if the first socket is an exact match and if not move on.
+ */
+static struct sock *__udp4_lib_demux_lookup(struct net *net,
+ __be16 loc_port, __be32 loc_addr,
+ __be16 rmt_port, __be32 rmt_addr,
+ int dif)
+{
+ struct sock *sk, *result;
+ struct hlist_nulls_node *node;
+ unsigned short hnum = ntohs(loc_port);
+ unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum);
+ unsigned int slot2 = hash2 & udp_table.mask;
+ struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
+ INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr)
+ const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
+
+ rcu_read_lock();
+ result = NULL;
+ udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
+ if (INET_MATCH(sk, net, acookie,
+ rmt_addr, loc_addr, ports, dif))
+ result = sk;
+ /* Only check first socket in chain */
+ break;
+ }
+
+ if (result) {
+ if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
+ result = NULL;
+ else if (unlikely(!INET_MATCH(sk, net, acookie,
+ rmt_addr, loc_addr,
+ ports, dif))) {
+ sock_put(result);
+ result = NULL;
+ }
+ }
+ rcu_read_unlock();
+ return result;
+}
+
+void udp_v4_early_demux(struct sk_buff *skb)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ const struct udphdr *uh = udp_hdr(skb);
+ struct sock *sk;
+ struct dst_entry *dst;
+ struct net *net = dev_net(skb->dev);
+ int dif = skb->dev->ifindex;
+
+ /* validate the packet */
+ if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
+ return;
+
+ if (skb->pkt_type == PACKET_BROADCAST ||
+ skb->pkt_type == PACKET_MULTICAST)
+ sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
+ uh->source, iph->saddr, dif);
+ else if (skb->pkt_type == PACKET_HOST)
+ sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
+ uh->source, iph->saddr, dif);
+ else
+ return;
+
+ if (!sk)
+ return;
+
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+ dst = sk->sk_rx_dst;
+
+ if (dst)
+ dst = dst_check(dst, 0);
+ if (dst)
+ skb_dst_set_noref(skb, dst);
+}
+
int udp_rcv(struct sk_buff *skb)
{
return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index 5a681e298b90..f3c27899f62b 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -5,30 +5,30 @@
#include <net/protocol.h>
#include <net/inet_common.h>
-extern int __udp4_lib_rcv(struct sk_buff *, struct udp_table *, int );
-extern void __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
+int __udp4_lib_rcv(struct sk_buff *, struct udp_table *, int);
+void __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
-extern int udp_v4_get_port(struct sock *sk, unsigned short snum);
+int udp_v4_get_port(struct sock *sk, unsigned short snum);
-extern int udp_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
-extern int udp_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen);
+int udp_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen);
+int udp_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
#ifdef CONFIG_COMPAT
-extern int compat_udp_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
-extern int compat_udp_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen);
+int compat_udp_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen);
+int compat_udp_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
#endif
-extern int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len, int noblock, int flags, int *addr_len);
-extern int udp_sendpage(struct sock *sk, struct page *page, int offset,
- size_t size, int flags);
-extern int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
-extern void udp_destroy_sock(struct sock *sk);
+int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t len, int noblock, int flags, int *addr_len);
+int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
+ int flags);
+int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+void udp_destroy_sock(struct sock *sk);
#ifdef CONFIG_PROC_FS
-extern int udp4_seq_show(struct seq_file *seq, void *v);
+int udp4_seq_show(struct seq_file *seq, void *v);
#endif
#endif /* _UDP4_IMPL_H */
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index f35eccaa855e..83206de2bc76 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -52,6 +52,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_IPIP |
SKB_GSO_GRE | SKB_GSO_MPLS) ||
!(type & (SKB_GSO_UDP))))
goto out;
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index b5663c37f089..31b18152528f 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -16,13 +16,13 @@
#include <net/xfrm.h>
/* Informational hook. The decap is still done here. */
-static struct xfrm_tunnel __rcu *rcv_notify_handlers __read_mostly;
+static struct xfrm_tunnel_notifier __rcu *rcv_notify_handlers __read_mostly;
static DEFINE_MUTEX(xfrm4_mode_tunnel_input_mutex);
-int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler)
+int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler)
{
- struct xfrm_tunnel __rcu **pprev;
- struct xfrm_tunnel *t;
+ struct xfrm_tunnel_notifier __rcu **pprev;
+ struct xfrm_tunnel_notifier *t;
int ret = -EEXIST;
int priority = handler->priority;
@@ -50,10 +50,10 @@ err:
}
EXPORT_SYMBOL_GPL(xfrm4_mode_tunnel_input_register);
-int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler)
+int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler)
{
- struct xfrm_tunnel __rcu **pprev;
- struct xfrm_tunnel *t;
+ struct xfrm_tunnel_notifier __rcu **pprev;
+ struct xfrm_tunnel_notifier *t;
int ret = -ENOENT;
mutex_lock(&xfrm4_mode_tunnel_input_mutex);
@@ -134,7 +134,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
{
- struct xfrm_tunnel *handler;
+ struct xfrm_tunnel_notifier *handler;
int err = -EINVAL;
if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 9a459be24af7..e1a63930a967 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -104,9 +104,14 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
const struct iphdr *iph = ip_hdr(skb);
u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
struct flowi4 *fl4 = &fl->u.ip4;
+ int oif = 0;
+
+ if (skb_dst(skb))
+ oif = skb_dst(skb)->dev->ifindex;
memset(fl4, 0, sizeof(struct flowi4));
fl4->flowi4_mark = skb->mark;
+ fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
if (!ip_is_fragment(iph)) {
switch (iph->protocol) {
@@ -235,7 +240,7 @@ static struct dst_ops xfrm4_dst_ops = {
.destroy = xfrm4_dst_destroy,
.ifdown = xfrm4_dst_ifdown,
.local_out = __ip_local_out,
- .gc_thresh = 1024,
+ .gc_thresh = 32768,
};
static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 11b13ea69db4..d92e5586783e 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -21,24 +21,6 @@ menuconfig IPV6
if IPV6
-config IPV6_PRIVACY
- bool "IPv6: Privacy Extensions (RFC 3041) support"
- ---help---
- Privacy Extensions for Stateless Address Autoconfiguration in IPv6
- support. With this option, additional periodically-altered
- pseudo-random global-scope unicast address(es) will be assigned to
- your interface(s).
-
- We use our standard pseudo-random algorithm to generate the
- randomized interface identifier, instead of one described in RFC 3041.
-
- By default the kernel does not generate temporary addresses.
- To use temporary addresses, do
-
- echo 2 >/proc/sys/net/ipv6/conf/all/use_tempaddr
-
- See <file:Documentation/networking/ip-sysctl.txt> for details.
-
config IPV6_ROUTER_PREF
bool "IPv6: Router Preference (RFC 4191) support"
---help---
@@ -153,6 +135,17 @@ config INET6_XFRM_MODE_ROUTEOPTIMIZATION
---help---
Support for MIPv6 route optimization mode.
+config IPV6_VTI
+tristate "Virtual (secure) IPv6: tunneling"
+ select IPV6_TUNNEL
+ depends on INET6_XFRM_MODE_TUNNEL
+ ---help---
+ Tunneling means encapsulating data of one protocol type within
+ another protocol and sending it over a channel that understands the
+ encapsulating protocol. This can be used with xfrm mode tunnel to give
+ the notion of a secure tunnel for IPSEC and then use routing protocol
+ on top.
+
config IPV6_SIT
tristate "IPv6: IPv6-in-IPv4 tunnel (SIT driver)"
select INET_TUNNEL
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 470a9c008e9b..17bb830872db 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_INET6_XFRM_MODE_BEET) += xfrm6_mode_beet.o
obj-$(CONFIG_IPV6_MIP6) += mip6.o
obj-$(CONFIG_NETFILTER) += netfilter/
+obj-$(CONFIG_IPV6_VTI) += ip6_vti.o
obj-$(CONFIG_IPV6_SIT) += sit.o
obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index cd3fb301da38..542d09561ed6 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -83,11 +83,7 @@
#include <linux/if_tunnel.h>
#include <linux/rtnetlink.h>
#include <linux/netconf.h>
-
-#ifdef CONFIG_IPV6_PRIVACY
#include <linux/random.h>
-#endif
-
#include <linux/uaccess.h>
#include <asm/unaligned.h>
@@ -124,11 +120,9 @@ static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
}
#endif
-#ifdef CONFIG_IPV6_PRIVACY
static void __ipv6_regen_rndid(struct inet6_dev *idev);
static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
static void ipv6_regen_rndid(unsigned long data);
-#endif
static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
static int ipv6_count_addresses(struct inet6_dev *idev);
@@ -183,13 +177,11 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.rtr_solicits = MAX_RTR_SOLICITATIONS,
.rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
.rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
-#ifdef CONFIG_IPV6_PRIVACY
.use_tempaddr = 0,
.temp_valid_lft = TEMP_VALID_LIFETIME,
.temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
.regen_max_retry = REGEN_MAX_RETRY,
.max_desync_factor = MAX_DESYNC_FACTOR,
-#endif
.max_addresses = IPV6_MAX_ADDRESSES,
.accept_ra_defrtr = 1,
.accept_ra_pinfo = 1,
@@ -221,13 +213,11 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.rtr_solicits = MAX_RTR_SOLICITATIONS,
.rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
.rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
-#ifdef CONFIG_IPV6_PRIVACY
.use_tempaddr = 0,
.temp_valid_lft = TEMP_VALID_LIFETIME,
.temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
.regen_max_retry = REGEN_MAX_RETRY,
.max_desync_factor = MAX_DESYNC_FACTOR,
-#endif
.max_addresses = IPV6_MAX_ADDRESSES,
.accept_ra_defrtr = 1,
.accept_ra_pinfo = 1,
@@ -371,7 +361,6 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
}
#endif
-#ifdef CONFIG_IPV6_PRIVACY
INIT_LIST_HEAD(&ndev->tempaddr_list);
setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev);
if ((dev->flags&IFF_LOOPBACK) ||
@@ -384,7 +373,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
in6_dev_hold(ndev);
ipv6_regen_rndid((unsigned long) ndev);
}
-#endif
+
ndev->token = in6addr_any;
if (netif_running(dev) && addrconf_qdisc_ok(dev))
@@ -865,12 +854,10 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
/* Add to inet6_dev unicast addr list. */
ipv6_link_dev_addr(idev, ifa);
-#ifdef CONFIG_IPV6_PRIVACY
if (ifa->flags&IFA_F_TEMPORARY) {
list_add(&ifa->tmp_list, &idev->tempaddr_list);
in6_ifa_hold(ifa);
}
-#endif
in6_ifa_hold(ifa);
write_unlock(&idev->lock);
@@ -913,7 +900,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
spin_unlock_bh(&addrconf_hash_lock);
write_lock_bh(&idev->lock);
-#ifdef CONFIG_IPV6_PRIVACY
+
if (ifp->flags&IFA_F_TEMPORARY) {
list_del(&ifp->tmp_list);
if (ifp->ifpub) {
@@ -922,7 +909,6 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
}
__in6_ifa_put(ifp);
}
-#endif
list_for_each_entry_safe(ifa, ifn, &idev->addr_list, if_list) {
if (ifa == ifp) {
@@ -1013,7 +999,6 @@ out:
in6_ifa_put(ifp);
}
-#ifdef CONFIG_IPV6_PRIVACY
static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *ift)
{
struct inet6_dev *idev = ifp->idev;
@@ -1116,7 +1101,6 @@ retry:
out:
return ret;
}
-#endif
/*
* Choose an appropriate source address (RFC3484)
@@ -1131,9 +1115,7 @@ enum {
#endif
IPV6_SADDR_RULE_OIF,
IPV6_SADDR_RULE_LABEL,
-#ifdef CONFIG_IPV6_PRIVACY
IPV6_SADDR_RULE_PRIVACY,
-#endif
IPV6_SADDR_RULE_ORCHID,
IPV6_SADDR_RULE_PREFIX,
IPV6_SADDR_RULE_MAX
@@ -1247,7 +1229,6 @@ static int ipv6_get_saddr_eval(struct net *net,
&score->ifa->addr, score->addr_type,
score->ifa->idev->dev->ifindex) == dst->label;
break;
-#ifdef CONFIG_IPV6_PRIVACY
case IPV6_SADDR_RULE_PRIVACY:
{
/* Rule 7: Prefer public address
@@ -1259,7 +1240,6 @@ static int ipv6_get_saddr_eval(struct net *net,
ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
break;
}
-#endif
case IPV6_SADDR_RULE_ORCHID:
/* Rule 8-: Prefer ORCHID vs ORCHID or
* non-ORCHID vs non-ORCHID
@@ -1588,7 +1568,6 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
if (dad_failed)
ipv6_ifa_notify(0, ifp);
in6_ifa_put(ifp);
-#ifdef CONFIG_IPV6_PRIVACY
} else if (ifp->flags&IFA_F_TEMPORARY) {
struct inet6_ifaddr *ifpub;
spin_lock_bh(&ifp->lock);
@@ -1602,7 +1581,6 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
spin_unlock_bh(&ifp->lock);
}
ipv6_del_addr(ifp);
-#endif
} else
ipv6_del_addr(ifp);
}
@@ -1851,7 +1829,6 @@ static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
return err;
}
-#ifdef CONFIG_IPV6_PRIVACY
/* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
static void __ipv6_regen_rndid(struct inet6_dev *idev)
{
@@ -1919,7 +1896,6 @@ static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmp
if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
__ipv6_regen_rndid(idev);
}
-#endif
/*
* Add prefix route.
@@ -2207,9 +2183,7 @@ ok:
if (ifp) {
int flags;
unsigned long now;
-#ifdef CONFIG_IPV6_PRIVACY
struct inet6_ifaddr *ift;
-#endif
u32 stored_lft;
/* update lifetime (RFC2462 5.5.3 e) */
@@ -2250,7 +2224,6 @@ ok:
} else
spin_unlock(&ifp->lock);
-#ifdef CONFIG_IPV6_PRIVACY
read_lock_bh(&in6_dev->lock);
/* update all temporary addresses in the list */
list_for_each_entry(ift, &in6_dev->tempaddr_list,
@@ -2315,7 +2288,7 @@ ok:
} else {
read_unlock_bh(&in6_dev->lock);
}
-#endif
+
in6_ifa_put(ifp);
addrconf_verify(0);
}
@@ -2995,7 +2968,6 @@ static int addrconf_ifdown(struct net_device *dev, int how)
if (!how)
idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
-#ifdef CONFIG_IPV6_PRIVACY
if (how && del_timer(&idev->regen_timer))
in6_dev_put(idev);
@@ -3015,7 +2987,6 @@ static int addrconf_ifdown(struct net_device *dev, int how)
in6_ifa_put(ifa);
write_lock_bh(&idev->lock);
}
-#endif
while (!list_empty(&idev->addr_list)) {
ifa = list_first_entry(&idev->addr_list,
@@ -3528,7 +3499,6 @@ restart:
in6_ifa_put(ifp);
goto restart;
}
-#ifdef CONFIG_IPV6_PRIVACY
} else if ((ifp->flags&IFA_F_TEMPORARY) &&
!(ifp->flags&IFA_F_TENTATIVE)) {
unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
@@ -3556,7 +3526,6 @@ restart:
} else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
spin_unlock(&ifp->lock);
-#endif
} else {
/* ifp->prefered_lft <= ifp->valid_lft */
if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
@@ -4128,13 +4097,11 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
-#ifdef CONFIG_IPV6_PRIVACY
array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
-#endif
array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
@@ -4828,7 +4795,6 @@ static struct addrconf_sysctl_table
.mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
},
-#ifdef CONFIG_IPV6_PRIVACY
{
.procname = "use_tempaddr",
.data = &ipv6_devconf.use_tempaddr,
@@ -4864,7 +4830,6 @@ static struct addrconf_sysctl_table
.mode = 0644,
.proc_handler = proc_dointvec,
},
-#endif
{
.procname = "max_addresses",
.data = &ipv6_devconf.max_addresses,
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 7c96100b021e..6468bda1f2b9 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -110,11 +110,6 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
int try_loading_module = 0;
int err;
- if (sock->type != SOCK_RAW &&
- sock->type != SOCK_DGRAM &&
- !inet_ehash_secret)
- build_ehash_secret();
-
/* Look for the requested type/protocol pair. */
lookup_protocol:
err = -ESOCKTNOSUPPORT;
@@ -364,7 +359,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
inet->inet_rcv_saddr = v4addr;
inet->inet_saddr = v4addr;
- np->rcv_saddr = addr->sin6_addr;
+ sk->sk_v6_rcv_saddr = addr->sin6_addr;
if (!(addr_type & IPV6_ADDR_MULTICAST))
np->saddr = addr->sin6_addr;
@@ -461,14 +456,14 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
peer == 1)
return -ENOTCONN;
sin->sin6_port = inet->inet_dport;
- sin->sin6_addr = np->daddr;
+ sin->sin6_addr = sk->sk_v6_daddr;
if (np->sndflow)
sin->sin6_flowinfo = np->flow_label;
} else {
- if (ipv6_addr_any(&np->rcv_saddr))
+ if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
sin->sin6_addr = np->saddr;
else
- sin->sin6_addr = np->rcv_saddr;
+ sin->sin6_addr = sk->sk_v6_rcv_saddr;
sin->sin6_port = inet->inet_sport;
}
@@ -655,7 +650,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = sk->sk_protocol;
- fl6.daddr = np->daddr;
+ fl6.daddr = sk->sk_v6_daddr;
fl6.saddr = np->saddr;
fl6.flowlabel = np->flow_label;
fl6.flowi6_oif = sk->sk_bound_dev_if;
@@ -870,8 +865,6 @@ static int __init inet6_init(void)
if (err)
goto out_sock_register_fail;
- tcpv6_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
-
/*
* ipngwg API draft makes clear that the correct semantics
* for TCP and UDP is to consider one TCP and UDP instance
@@ -1028,52 +1021,4 @@ out_unregister_tcp_proto:
}
module_init(inet6_init);
-static void __exit inet6_exit(void)
-{
- if (disable_ipv6_mod)
- return;
-
- /* First of all disallow new sockets creation. */
- sock_unregister(PF_INET6);
- /* Disallow any further netlink messages */
- rtnl_unregister_all(PF_INET6);
-
- udpv6_exit();
- udplitev6_exit();
- tcpv6_exit();
-
- /* Cleanup code parts. */
- ipv6_packet_cleanup();
- ipv6_frag_exit();
- ipv6_exthdrs_exit();
- addrconf_cleanup();
- ip6_flowlabel_cleanup();
- ndisc_late_cleanup();
- ip6_route_cleanup();
-#ifdef CONFIG_PROC_FS
-
- /* Cleanup code parts. */
- if6_proc_exit();
- ipv6_misc_proc_exit();
- udplite6_proc_exit();
- raw6_proc_exit();
-#endif
- ipv6_netfilter_fini();
- ipv6_stub = NULL;
- igmp6_cleanup();
- ndisc_cleanup();
- ip6_mr_cleanup();
- icmpv6_cleanup();
- rawv6_exit();
-
- unregister_pernet_subsys(&inet6_net_ops);
- proto_unregister(&rawv6_prot);
- proto_unregister(&udplitev6_prot);
- proto_unregister(&udpv6_prot);
- proto_unregister(&tcpv6_prot);
-
- rcu_barrier(); /* Wait for completion of call_rcu()'s */
-}
-module_exit(inet6_exit);
-
MODULE_ALIAS_NETPROTO(PF_INET6);
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 73784c3d4642..82e1da3a40b9 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -618,8 +618,7 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset);
struct xfrm_state *x;
- if (type != ICMPV6_DEST_UNREACH &&
- type != ICMPV6_PKT_TOOBIG &&
+ if (type != ICMPV6_PKT_TOOBIG &&
type != NDISC_REDIRECT)
return;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 48b6bd2a9a14..a454b0ff57c7 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -107,16 +107,16 @@ ipv4_connected:
if (err)
goto out;
- ipv6_addr_set_v4mapped(inet->inet_daddr, &np->daddr);
+ ipv6_addr_set_v4mapped(inet->inet_daddr, &sk->sk_v6_daddr);
if (ipv6_addr_any(&np->saddr) ||
ipv6_mapped_addr_any(&np->saddr))
ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
- if (ipv6_addr_any(&np->rcv_saddr) ||
- ipv6_mapped_addr_any(&np->rcv_saddr)) {
+ if (ipv6_addr_any(&sk->sk_v6_rcv_saddr) ||
+ ipv6_mapped_addr_any(&sk->sk_v6_rcv_saddr)) {
ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
- &np->rcv_saddr);
+ &sk->sk_v6_rcv_saddr);
if (sk->sk_prot->rehash)
sk->sk_prot->rehash(sk);
}
@@ -145,7 +145,7 @@ ipv4_connected:
}
}
- np->daddr = *daddr;
+ sk->sk_v6_daddr = *daddr;
np->flow_label = fl6.flowlabel;
inet->inet_dport = usin->sin6_port;
@@ -156,7 +156,7 @@ ipv4_connected:
*/
fl6.flowi6_proto = sk->sk_protocol;
- fl6.daddr = np->daddr;
+ fl6.daddr = sk->sk_v6_daddr;
fl6.saddr = np->saddr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
@@ -183,16 +183,16 @@ ipv4_connected:
if (ipv6_addr_any(&np->saddr))
np->saddr = fl6.saddr;
- if (ipv6_addr_any(&np->rcv_saddr)) {
- np->rcv_saddr = fl6.saddr;
+ if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+ sk->sk_v6_rcv_saddr = fl6.saddr;
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
if (sk->sk_prot->rehash)
sk->sk_prot->rehash(sk);
}
ip6_dst_store(sk, dst,
- ipv6_addr_equal(&fl6.daddr, &np->daddr) ?
- &np->daddr : NULL,
+ ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
+ &sk->sk_v6_daddr : NULL,
#ifdef CONFIG_IPV6_SUBTREES
ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
&np->saddr :
@@ -883,11 +883,10 @@ EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
__u16 srcp, __u16 destp, int bucket)
{
- struct ipv6_pinfo *np = inet6_sk(sp);
const struct in6_addr *dest, *src;
- dest = &np->daddr;
- src = &np->rcv_saddr;
+ dest = &sp->sk_v6_daddr;
+ src = &sp->sk_v6_rcv_saddr;
seq_printf(seq,
"%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
"%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n",
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index d3618a78fcac..b8719df0366e 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -164,10 +164,9 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
u8 *iv;
u8 *tail;
__be32 *seqhi;
- struct esp_data *esp = x->data;
/* skb is pure payload to encrypt */
- aead = esp->aead;
+ aead = x->data;
alen = crypto_aead_authsize(aead);
tfclen = 0;
@@ -181,8 +180,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
}
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
clen = ALIGN(skb->len + 2 + tfclen, blksize);
- if (esp->padlen)
- clen = ALIGN(clen, esp->padlen);
plen = clen - skb->len - tfclen;
err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
@@ -271,8 +268,7 @@ error:
static int esp_input_done2(struct sk_buff *skb, int err)
{
struct xfrm_state *x = xfrm_input_state(skb);
- struct esp_data *esp = x->data;
- struct crypto_aead *aead = esp->aead;
+ struct crypto_aead *aead = x->data;
int alen = crypto_aead_authsize(aead);
int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
int elen = skb->len - hlen;
@@ -325,8 +321,7 @@ static void esp_input_done(struct crypto_async_request *base, int err)
static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
{
struct ip_esp_hdr *esph;
- struct esp_data *esp = x->data;
- struct crypto_aead *aead = esp->aead;
+ struct crypto_aead *aead = x->data;
struct aead_request *req;
struct sk_buff *trailer;
int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
@@ -414,9 +409,8 @@ out:
static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
{
- struct esp_data *esp = x->data;
- u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
- u32 align = max_t(u32, blksize, esp->padlen);
+ struct crypto_aead *aead = x->data;
+ u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
unsigned int net_adj;
if (x->props.mode != XFRM_MODE_TUNNEL)
@@ -424,8 +418,8 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
else
net_adj = 0;
- return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
- net_adj) & ~(align - 1)) + net_adj - 2;
+ return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
+ net_adj) & ~(blksize - 1)) + net_adj - 2;
}
static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
@@ -436,8 +430,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
struct xfrm_state *x;
- if (type != ICMPV6_DEST_UNREACH &&
- type != ICMPV6_PKT_TOOBIG &&
+ if (type != ICMPV6_PKT_TOOBIG &&
type != NDISC_REDIRECT)
return;
@@ -455,18 +448,16 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
static void esp6_destroy(struct xfrm_state *x)
{
- struct esp_data *esp = x->data;
+ struct crypto_aead *aead = x->data;
- if (!esp)
+ if (!aead)
return;
- crypto_free_aead(esp->aead);
- kfree(esp);
+ crypto_free_aead(aead);
}
static int esp_init_aead(struct xfrm_state *x)
{
- struct esp_data *esp = x->data;
struct crypto_aead *aead;
int err;
@@ -475,7 +466,7 @@ static int esp_init_aead(struct xfrm_state *x)
if (IS_ERR(aead))
goto error;
- esp->aead = aead;
+ x->data = aead;
err = crypto_aead_setkey(aead, x->aead->alg_key,
(x->aead->alg_key_len + 7) / 8);
@@ -492,7 +483,6 @@ error:
static int esp_init_authenc(struct xfrm_state *x)
{
- struct esp_data *esp = x->data;
struct crypto_aead *aead;
struct crypto_authenc_key_param *param;
struct rtattr *rta;
@@ -527,7 +517,7 @@ static int esp_init_authenc(struct xfrm_state *x)
if (IS_ERR(aead))
goto error;
- esp->aead = aead;
+ x->data = aead;
keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
(x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
@@ -582,7 +572,6 @@ error:
static int esp6_init_state(struct xfrm_state *x)
{
- struct esp_data *esp;
struct crypto_aead *aead;
u32 align;
int err;
@@ -590,11 +579,7 @@ static int esp6_init_state(struct xfrm_state *x)
if (x->encap)
return -EINVAL;
- esp = kzalloc(sizeof(*esp), GFP_KERNEL);
- if (esp == NULL)
- return -ENOMEM;
-
- x->data = esp;
+ x->data = NULL;
if (x->aead)
err = esp_init_aead(x);
@@ -604,9 +589,7 @@ static int esp6_init_state(struct xfrm_state *x)
if (err)
goto error;
- aead = esp->aead;
-
- esp->padlen = 0;
+ aead = x->data;
x->props.header_len = sizeof(struct ip_esp_hdr) +
crypto_aead_ivsize(aead);
@@ -626,9 +609,7 @@ static int esp6_init_state(struct xfrm_state *x)
}
align = ALIGN(crypto_aead_blocksize(aead), 4);
- if (esp->padlen)
- align = max_t(u32, align, esp->padlen);
- x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
+ x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
error:
return err;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index e4311cbc8b4e..77bb8afb141d 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -70,20 +70,20 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
struct flowi6 *fl6,
const struct request_sock *req)
{
- struct inet6_request_sock *treq = inet6_rsk(req);
+ struct inet_request_sock *ireq = inet_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *final_p, final;
struct dst_entry *dst;
memset(fl6, 0, sizeof(*fl6));
fl6->flowi6_proto = IPPROTO_TCP;
- fl6->daddr = treq->rmt_addr;
+ fl6->daddr = ireq->ir_v6_rmt_addr;
final_p = fl6_update_dst(fl6, np->opt, &final);
- fl6->saddr = treq->loc_addr;
- fl6->flowi6_oif = treq->iif;
+ fl6->saddr = ireq->ir_v6_loc_addr;
+ fl6->flowi6_oif = ireq->ir_iif;
fl6->flowi6_mark = sk->sk_mark;
- fl6->fl6_dport = inet_rsk(req)->rmt_port;
- fl6->fl6_sport = inet_rsk(req)->loc_port;
+ fl6->fl6_dport = ireq->ir_rmt_port;
+ fl6->fl6_sport = htons(ireq->ir_num);
security_req_classify_flow(req, flowi6_to_flowi(fl6));
dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
@@ -129,13 +129,13 @@ struct request_sock *inet6_csk_search_req(const struct sock *sk,
lopt->nr_table_entries)];
(req = *prev) != NULL;
prev = &req->dl_next) {
- const struct inet6_request_sock *treq = inet6_rsk(req);
+ const struct inet_request_sock *ireq = inet_rsk(req);
- if (inet_rsk(req)->rmt_port == rport &&
+ if (ireq->ir_rmt_port == rport &&
req->rsk_ops->family == AF_INET6 &&
- ipv6_addr_equal(&treq->rmt_addr, raddr) &&
- ipv6_addr_equal(&treq->loc_addr, laddr) &&
- (!treq->iif || treq->iif == iif)) {
+ ipv6_addr_equal(&ireq->ir_v6_rmt_addr, raddr) &&
+ ipv6_addr_equal(&ireq->ir_v6_loc_addr, laddr) &&
+ (!ireq->ir_iif || ireq->ir_iif == iif)) {
WARN_ON(req->sk != NULL);
*prevp = prev;
return req;
@@ -153,8 +153,8 @@ void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
- const u32 h = inet6_synq_hash(&inet6_rsk(req)->rmt_addr,
- inet_rsk(req)->rmt_port,
+ const u32 h = inet6_synq_hash(&inet_rsk(req)->ir_v6_rmt_addr,
+ inet_rsk(req)->ir_rmt_port,
lopt->hash_rnd, lopt->nr_table_entries);
reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
@@ -165,11 +165,10 @@ EXPORT_SYMBOL_GPL(inet6_csk_reqsk_queue_hash_add);
void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
{
- struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
sin6->sin6_family = AF_INET6;
- sin6->sin6_addr = np->daddr;
+ sin6->sin6_addr = sk->sk_v6_daddr;
sin6->sin6_port = inet_sk(sk)->inet_dport;
/* We do not store received flowlabel for TCP */
sin6->sin6_flowinfo = 0;
@@ -203,7 +202,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
memset(fl6, 0, sizeof(*fl6));
fl6->flowi6_proto = sk->sk_protocol;
- fl6->daddr = np->daddr;
+ fl6->daddr = sk->sk_v6_daddr;
fl6->saddr = np->saddr;
fl6->flowlabel = np->flow_label;
IP6_ECN_flow_xmit(sk, fl6->flowlabel);
@@ -245,7 +244,7 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
skb_dst_set_noref(skb, dst);
/* Restore final destination back after routing done */
- fl6.daddr = np->daddr;
+ fl6.daddr = sk->sk_v6_daddr;
res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
rcu_read_unlock();
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 32b4a1675d82..262e13c02ec2 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -23,6 +23,39 @@
#include <net/secure_seq.h>
#include <net/ip.h>
+static unsigned int inet6_ehashfn(struct net *net,
+ const struct in6_addr *laddr,
+ const u16 lport,
+ const struct in6_addr *faddr,
+ const __be16 fport)
+{
+ static u32 inet6_ehash_secret __read_mostly;
+ static u32 ipv6_hash_secret __read_mostly;
+
+ u32 lhash, fhash;
+
+ net_get_random_once(&inet6_ehash_secret, sizeof(inet6_ehash_secret));
+ net_get_random_once(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
+
+ lhash = (__force u32)laddr->s6_addr32[3];
+ fhash = __ipv6_addr_jhash(faddr, ipv6_hash_secret);
+
+ return __inet6_ehashfn(lhash, lport, fhash, fport,
+ inet6_ehash_secret + net_hash_mix(net));
+}
+
+static int inet6_sk_ehashfn(const struct sock *sk)
+{
+ const struct inet_sock *inet = inet_sk(sk);
+ const struct in6_addr *laddr = &sk->sk_v6_rcv_saddr;
+ const struct in6_addr *faddr = &sk->sk_v6_daddr;
+ const __u16 lport = inet->inet_num;
+ const __be16 fport = inet->inet_dport;
+ struct net *net = sock_net(sk);
+
+ return inet6_ehashfn(net, laddr, lport, faddr, fport);
+}
+
int __inet6_hash(struct sock *sk, struct inet_timewait_sock *tw)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
@@ -89,43 +122,22 @@ begin:
sk_nulls_for_each_rcu(sk, node, &head->chain) {
if (sk->sk_hash != hash)
continue;
- if (likely(INET6_MATCH(sk, net, saddr, daddr, ports, dif))) {
- if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
- goto begintw;
- if (unlikely(!INET6_MATCH(sk, net, saddr, daddr,
- ports, dif))) {
- sock_put(sk);
- goto begin;
- }
- goto out;
- }
- }
- if (get_nulls_value(node) != slot)
- goto begin;
-
-begintw:
- /* Must check for a TIME_WAIT'er before going to listener hash. */
- sk_nulls_for_each_rcu(sk, node, &head->twchain) {
- if (sk->sk_hash != hash)
+ if (!INET6_MATCH(sk, net, saddr, daddr, ports, dif))
continue;
- if (likely(INET6_TW_MATCH(sk, net, saddr, daddr,
- ports, dif))) {
- if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) {
- sk = NULL;
- goto out;
- }
- if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr,
- ports, dif))) {
- sock_put(sk);
- goto begintw;
- }
+ if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
goto out;
+
+ if (unlikely(!INET6_MATCH(sk, net, saddr, daddr, ports, dif))) {
+ sock_gen_put(sk);
+ goto begin;
}
+ goto found;
}
if (get_nulls_value(node) != slot)
- goto begintw;
- sk = NULL;
+ goto begin;
out:
+ sk = NULL;
+found:
rcu_read_unlock();
return sk;
}
@@ -140,11 +152,10 @@ static inline int compute_score(struct sock *sk, struct net *net,
if (net_eq(sock_net(sk), net) && inet_sk(sk)->inet_num == hnum &&
sk->sk_family == PF_INET6) {
- const struct ipv6_pinfo *np = inet6_sk(sk);
score = 1;
- if (!ipv6_addr_any(&np->rcv_saddr)) {
- if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
+ if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+ if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
return -1;
score++;
}
@@ -236,9 +247,8 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
{
struct inet_hashinfo *hinfo = death_row->hashinfo;
struct inet_sock *inet = inet_sk(sk);
- const struct ipv6_pinfo *np = inet6_sk(sk);
- const struct in6_addr *daddr = &np->rcv_saddr;
- const struct in6_addr *saddr = &np->daddr;
+ const struct in6_addr *daddr = &sk->sk_v6_rcv_saddr;
+ const struct in6_addr *saddr = &sk->sk_v6_daddr;
const int dif = sk->sk_bound_dev_if;
const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
struct net *net = sock_net(sk);
@@ -248,38 +258,28 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
struct sock *sk2;
const struct hlist_nulls_node *node;
- struct inet_timewait_sock *tw;
+ struct inet_timewait_sock *tw = NULL;
int twrefcnt = 0;
spin_lock(lock);
- /* Check TIME-WAIT sockets first. */
- sk_nulls_for_each(sk2, node, &head->twchain) {
- if (sk2->sk_hash != hash)
- continue;
-
- if (likely(INET6_TW_MATCH(sk2, net, saddr, daddr,
- ports, dif))) {
- tw = inet_twsk(sk2);
- if (twsk_unique(sk, sk2, twp))
- goto unique;
- else
- goto not_unique;
- }
- }
- tw = NULL;
-
- /* And established part... */
sk_nulls_for_each(sk2, node, &head->chain) {
if (sk2->sk_hash != hash)
continue;
- if (likely(INET6_MATCH(sk2, net, saddr, daddr, ports, dif)))
+
+ if (likely(INET6_MATCH(sk2, net, saddr, daddr, ports, dif))) {
+ if (sk2->sk_state == TCP_TIME_WAIT) {
+ tw = inet_twsk(sk2);
+ if (twsk_unique(sk, sk2, twp))
+ break;
+ }
goto not_unique;
+ }
}
-unique:
/* Must record num and sport now. Otherwise we will see
- * in hash table socket with a funny identity. */
+ * in hash table socket with a funny identity.
+ */
inet->inet_num = lport;
inet->inet_sport = htons(lport);
sk->sk_hash = hash;
@@ -312,9 +312,9 @@ not_unique:
static inline u32 inet6_sk_port_offset(const struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
- const struct ipv6_pinfo *np = inet6_sk(sk);
- return secure_ipv6_port_ephemeral(np->rcv_saddr.s6_addr32,
- np->daddr.s6_addr32,
+
+ return secure_ipv6_port_ephemeral(sk->sk_v6_rcv_saddr.s6_addr32,
+ sk->sk_v6_daddr.s6_addr32,
inet->inet_dport);
}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 5bec666aba61..5550a8113a6d 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1529,25 +1529,6 @@ static void fib6_clean_tree(struct net *net, struct fib6_node *root,
fib6_walk(&c.w);
}
-void fib6_clean_all_ro(struct net *net, int (*func)(struct rt6_info *, void *arg),
- int prune, void *arg)
-{
- struct fib6_table *table;
- struct hlist_head *head;
- unsigned int h;
-
- rcu_read_lock();
- for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
- head = &net->ipv6.fib_table_hash[h];
- hlist_for_each_entry_rcu(table, head, tb6_hlist) {
- read_lock_bh(&table->tb6_lock);
- fib6_clean_tree(net, &table->tb6_root,
- func, prune, arg);
- read_unlock_bh(&table->tb6_lock);
- }
- }
- rcu_read_unlock();
-}
void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
int prune, void *arg)
{
@@ -1782,3 +1763,189 @@ void fib6_gc_cleanup(void)
unregister_pernet_subsys(&fib6_net_ops);
kmem_cache_destroy(fib6_node_kmem);
}
+
+#ifdef CONFIG_PROC_FS
+
+struct ipv6_route_iter {
+ struct seq_net_private p;
+ struct fib6_walker_t w;
+ loff_t skip;
+ struct fib6_table *tbl;
+ __u32 sernum;
+};
+
+static int ipv6_route_seq_show(struct seq_file *seq, void *v)
+{
+ struct rt6_info *rt = v;
+ struct ipv6_route_iter *iter = seq->private;
+
+ seq_printf(seq, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
+
+#ifdef CONFIG_IPV6_SUBTREES
+ seq_printf(seq, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
+#else
+ seq_puts(seq, "00000000000000000000000000000000 00 ");
+#endif
+ if (rt->rt6i_flags & RTF_GATEWAY)
+ seq_printf(seq, "%pi6", &rt->rt6i_gateway);
+ else
+ seq_puts(seq, "00000000000000000000000000000000");
+
+ seq_printf(seq, " %08x %08x %08x %08x %8s\n",
+ rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
+ rt->dst.__use, rt->rt6i_flags,
+ rt->dst.dev ? rt->dst.dev->name : "");
+ iter->w.leaf = NULL;
+ return 0;
+}
+
+static int ipv6_route_yield(struct fib6_walker_t *w)
+{
+ struct ipv6_route_iter *iter = w->args;
+
+ if (!iter->skip)
+ return 1;
+
+ do {
+ iter->w.leaf = iter->w.leaf->dst.rt6_next;
+ iter->skip--;
+ if (!iter->skip && iter->w.leaf)
+ return 1;
+ } while (iter->w.leaf);
+
+ return 0;
+}
+
+static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter)
+{
+ memset(&iter->w, 0, sizeof(iter->w));
+ iter->w.func = ipv6_route_yield;
+ iter->w.root = &iter->tbl->tb6_root;
+ iter->w.state = FWS_INIT;
+ iter->w.node = iter->w.root;
+ iter->w.args = iter;
+ iter->sernum = iter->w.root->fn_sernum;
+ INIT_LIST_HEAD(&iter->w.lh);
+ fib6_walker_link(&iter->w);
+}
+
+static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl,
+ struct net *net)
+{
+ unsigned int h;
+ struct hlist_node *node;
+
+ if (tbl) {
+ h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1;
+ node = rcu_dereference_bh(hlist_next_rcu(&tbl->tb6_hlist));
+ } else {
+ h = 0;
+ node = NULL;
+ }
+
+ while (!node && h < FIB6_TABLE_HASHSZ) {
+ node = rcu_dereference_bh(
+ hlist_first_rcu(&net->ipv6.fib_table_hash[h++]));
+ }
+ return hlist_entry_safe(node, struct fib6_table, tb6_hlist);
+}
+
+static void ipv6_route_check_sernum(struct ipv6_route_iter *iter)
+{
+ if (iter->sernum != iter->w.root->fn_sernum) {
+ iter->sernum = iter->w.root->fn_sernum;
+ iter->w.state = FWS_INIT;
+ iter->w.node = iter->w.root;
+ WARN_ON(iter->w.skip);
+ iter->w.skip = iter->w.count;
+ }
+}
+
+static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ int r;
+ struct rt6_info *n;
+ struct net *net = seq_file_net(seq);
+ struct ipv6_route_iter *iter = seq->private;
+
+ if (!v)
+ goto iter_table;
+
+ n = ((struct rt6_info *)v)->dst.rt6_next;
+ if (n) {
+ ++*pos;
+ return n;
+ }
+
+iter_table:
+ ipv6_route_check_sernum(iter);
+ read_lock(&iter->tbl->tb6_lock);
+ r = fib6_walk_continue(&iter->w);
+ read_unlock(&iter->tbl->tb6_lock);
+ if (r > 0) {
+ if (v)
+ ++*pos;
+ return iter->w.leaf;
+ } else if (r < 0) {
+ fib6_walker_unlink(&iter->w);
+ return NULL;
+ }
+ fib6_walker_unlink(&iter->w);
+
+ iter->tbl = ipv6_route_seq_next_table(iter->tbl, net);
+ if (!iter->tbl)
+ return NULL;
+
+ ipv6_route_seq_setup_walk(iter);
+ goto iter_table;
+}
+
+static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(RCU_BH)
+{
+ struct net *net = seq_file_net(seq);
+ struct ipv6_route_iter *iter = seq->private;
+
+ rcu_read_lock_bh();
+ iter->tbl = ipv6_route_seq_next_table(NULL, net);
+ iter->skip = *pos;
+
+ if (iter->tbl) {
+ ipv6_route_seq_setup_walk(iter);
+ return ipv6_route_seq_next(seq, NULL, pos);
+ } else {
+ return NULL;
+ }
+}
+
+static bool ipv6_route_iter_active(struct ipv6_route_iter *iter)
+{
+ struct fib6_walker_t *w = &iter->w;
+ return w->node && !(w->state == FWS_U && w->node == w->root);
+}
+
+static void ipv6_route_seq_stop(struct seq_file *seq, void *v)
+ __releases(RCU_BH)
+{
+ struct ipv6_route_iter *iter = seq->private;
+
+ if (ipv6_route_iter_active(iter))
+ fib6_walker_unlink(&iter->w);
+
+ rcu_read_unlock_bh();
+}
+
+static const struct seq_operations ipv6_route_seq_ops = {
+ .start = ipv6_route_seq_start,
+ .next = ipv6_route_seq_next,
+ .stop = ipv6_route_seq_stop,
+ .show = ipv6_route_seq_show
+};
+
+int ipv6_route_open(struct inode *inode, struct file *file)
+{
+ return seq_open_net(inode, file, &ipv6_route_seq_ops,
+ sizeof(struct ipv6_route_iter));
+}
+
+#endif /* CONFIG_PROC_FS */
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 7bb5446b9d73..bf4a9a084de5 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -976,6 +976,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
if (t->parms.o_flags&GRE_SEQ)
addend += 4;
}
+ t->hlen = addend;
if (p->flags & IP6_TNL_F_CAP_XMIT) {
int strict = (ipv6_addr_type(&p->raddr) &
@@ -1002,8 +1003,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
}
ip6_rt_put(rt);
}
-
- t->hlen = addend;
}
static int ip6gre_tnl_change(struct ip6_tnl *t,
@@ -1173,9 +1172,8 @@ done:
static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
{
- struct ip6_tnl *tunnel = netdev_priv(dev);
if (new_mtu < 68 ||
- new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
+ new_mtu > 0xFFF8 - dev->hard_header_len)
return -EINVAL;
dev->mtu = new_mtu;
return 0;
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index d82de7228100..4b851692b1f6 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -66,7 +66,6 @@ static int ipv6_gso_send_check(struct sk_buff *skb)
__skb_pull(skb, sizeof(*ipv6h));
err = -EPROTONOSUPPORT;
- rcu_read_lock();
ops = rcu_dereference(inet6_offloads[
ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
@@ -74,7 +73,6 @@ static int ipv6_gso_send_check(struct sk_buff *skb)
skb_reset_transport_header(skb);
err = ops->callbacks.gso_send_check(skb);
}
- rcu_read_unlock();
out:
return err;
@@ -92,46 +90,58 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
u8 *prevhdr;
int offset = 0;
bool tunnel;
+ int nhoff;
if (unlikely(skb_shinfo(skb)->gso_type &
~(SKB_GSO_UDP |
SKB_GSO_DODGY |
SKB_GSO_TCP_ECN |
SKB_GSO_GRE |
+ SKB_GSO_IPIP |
+ SKB_GSO_SIT |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_MPLS |
SKB_GSO_TCPV6 |
0)))
goto out;
+ skb_reset_network_header(skb);
+ nhoff = skb_network_header(skb) - skb_mac_header(skb);
if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
goto out;
- tunnel = skb->encapsulation;
+ tunnel = SKB_GSO_CB(skb)->encap_level > 0;
+ if (tunnel)
+ features = skb->dev->hw_enc_features & netif_skb_features(skb);
+ SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
+
ipv6h = ipv6_hdr(skb);
__skb_pull(skb, sizeof(*ipv6h));
segs = ERR_PTR(-EPROTONOSUPPORT);
proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
- rcu_read_lock();
+
ops = rcu_dereference(inet6_offloads[proto]);
if (likely(ops && ops->callbacks.gso_segment)) {
skb_reset_transport_header(skb);
segs = ops->callbacks.gso_segment(skb, features);
}
- rcu_read_unlock();
if (IS_ERR(segs))
goto out;
for (skb = segs; skb; skb = skb->next) {
- ipv6h = ipv6_hdr(skb);
- ipv6h->payload_len = htons(skb->len - skb->mac_len -
- sizeof(*ipv6h));
+ ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
+ ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h));
+ if (tunnel) {
+ skb_reset_inner_headers(skb);
+ skb->encapsulation = 1;
+ }
+ skb->network_header = (u8 *)ipv6h - skb->head;
+
if (!tunnel && proto == IPPROTO_UDP) {
unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
- fptr = (struct frag_hdr *)(skb_network_header(skb) +
- unfrag_ip6hlen);
+ fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
fptr->frag_off = htons(offset);
if (skb->next != NULL)
fptr->frag_off |= htons(IP6_MF);
@@ -267,6 +277,13 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
},
};
+static const struct net_offload sit_offload = {
+ .callbacks = {
+ .gso_send_check = ipv6_gso_send_check,
+ .gso_segment = ipv6_gso_segment,
+ },
+};
+
static int __init ipv6_offload_init(void)
{
@@ -278,6 +295,9 @@ static int __init ipv6_offload_init(void)
pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
dev_add_offload(&ipv6_packet_offload);
+
+ inet_add_offload(&sit_offload, IPPROTO_IPV6);
+
return 0;
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index a54c45ce4a48..91fb4e8212f5 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -105,7 +105,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
}
rcu_read_lock_bh();
- nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
+ nexthop = rt6_nexthop((struct rt6_info *)dst);
neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
if (unlikely(!neigh))
neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
@@ -874,7 +874,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
*/
rt = (struct rt6_info *) *dst;
rcu_read_lock_bh();
- n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt, &fl6->daddr));
+ n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
rcu_read_unlock_bh();
@@ -1008,6 +1008,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
{
struct sk_buff *skb;
+ struct frag_hdr fhdr;
int err;
/* There is support for UDP large send offload by network
@@ -1015,8 +1016,6 @@ static inline int ip6_ufo_append_data(struct sock *sk,
* udp datagram
*/
if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
- struct frag_hdr fhdr;
-
skb = sock_alloc_send_skb(sk,
hh_len + fragheaderlen + transhdrlen + 20,
(flags & MSG_DONTWAIT), &err);
@@ -1036,20 +1035,24 @@ static inline int ip6_ufo_append_data(struct sock *sk,
skb->transport_header = skb->network_header + fragheaderlen;
skb->protocol = htons(ETH_P_IPV6);
- skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
- /* Specify the length of each IPv6 datagram fragment.
- * It has to be a multiple of 8.
- */
- skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
- sizeof(struct frag_hdr)) & ~7;
- skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
- ipv6_select_ident(&fhdr, rt);
- skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
__skb_queue_tail(&sk->sk_write_queue, skb);
+ } else if (skb_is_gso(skb)) {
+ goto append;
}
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ /* Specify the length of each IPv6 datagram fragment.
+ * It has to be a multiple of 8.
+ */
+ skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
+ sizeof(struct frag_hdr)) & ~7;
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+ ipv6_select_ident(&fhdr, rt);
+ skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
+
+append:
return skb_append_datato_frags(sk, skb, getfrag, from,
(length - transhdrlen));
}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index a791552e0422..583b77e2f69b 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1430,9 +1430,17 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static int
ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
{
- if (new_mtu < IPV6_MIN_MTU) {
- return -EINVAL;
+ struct ip6_tnl *tnl = netdev_priv(dev);
+
+ if (tnl->parms.proto == IPPROTO_IPIP) {
+ if (new_mtu < 68)
+ return -EINVAL;
+ } else {
+ if (new_mtu < IPV6_MIN_MTU)
+ return -EINVAL;
}
+ if (new_mtu > 0xFFF8 - dev->hard_header_len)
+ return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
new file mode 100644
index 000000000000..ed94ba61dda0
--- /dev/null
+++ b/net/ipv6/ip6_vti.c
@@ -0,0 +1,1056 @@
+/*
+ * IPv6 virtual tunneling interface
+ *
+ * Copyright (C) 2013 secunet Security Networks AG
+ *
+ * Author:
+ * Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * Based on:
+ * net/ipv6/ip6_tunnel.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/capability.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/sockios.h>
+#include <linux/icmp.h>
+#include <linux/if.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/if_tunnel.h>
+#include <linux/net.h>
+#include <linux/in6.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/icmpv6.h>
+#include <linux/init.h>
+#include <linux/route.h>
+#include <linux/rtnetlink.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/slab.h>
+#include <linux/hash.h>
+
+#include <linux/uaccess.h>
+#include <linux/atomic.h>
+
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/ip_tunnels.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/addrconf.h>
+#include <net/ip6_tunnel.h>
+#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+#define HASH_SIZE_SHIFT 5
+#define HASH_SIZE (1 << HASH_SIZE_SHIFT)
+
+static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
+{
+ u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
+
+ return hash_32(hash, HASH_SIZE_SHIFT);
+}
+
+static int vti6_dev_init(struct net_device *dev);
+static void vti6_dev_setup(struct net_device *dev);
+static struct rtnl_link_ops vti6_link_ops __read_mostly;
+
+static int vti6_net_id __read_mostly;
+struct vti6_net {
+ /* the vti6 tunnel fallback device */
+ struct net_device *fb_tnl_dev;
+ /* lists for storing tunnels in use */
+ struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE];
+ struct ip6_tnl __rcu *tnls_wc[1];
+ struct ip6_tnl __rcu **tnls[2];
+};
+
+static struct net_device_stats *vti6_get_stats(struct net_device *dev)
+{
+ struct pcpu_tstats sum = { 0 };
+ int i;
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+
+ sum.rx_packets += tstats->rx_packets;
+ sum.rx_bytes += tstats->rx_bytes;
+ sum.tx_packets += tstats->tx_packets;
+ sum.tx_bytes += tstats->tx_bytes;
+ }
+ dev->stats.rx_packets = sum.rx_packets;
+ dev->stats.rx_bytes = sum.rx_bytes;
+ dev->stats.tx_packets = sum.tx_packets;
+ dev->stats.tx_bytes = sum.tx_bytes;
+ return &dev->stats;
+}
+
+#define for_each_vti6_tunnel_rcu(start) \
+ for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
+
+/**
+ * vti6_tnl_lookup - fetch tunnel matching the end-point addresses
+ * @net: network namespace
+ * @remote: the address of the tunnel exit-point
+ * @local: the address of the tunnel entry-point
+ *
+ * Return:
+ * tunnel matching given end-points if found,
+ * else fallback tunnel if its device is up,
+ * else %NULL
+ **/
+static struct ip6_tnl *
+vti6_tnl_lookup(struct net *net, const struct in6_addr *remote,
+ const struct in6_addr *local)
+{
+ unsigned int hash = HASH(remote, local);
+ struct ip6_tnl *t;
+ struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+ for_each_vti6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
+ if (ipv6_addr_equal(local, &t->parms.laddr) &&
+ ipv6_addr_equal(remote, &t->parms.raddr) &&
+ (t->dev->flags & IFF_UP))
+ return t;
+ }
+ t = rcu_dereference(ip6n->tnls_wc[0]);
+ if (t && (t->dev->flags & IFF_UP))
+ return t;
+
+ return NULL;
+}
+
+/**
+ * vti6_tnl_bucket - get head of list matching given tunnel parameters
+ * @p: parameters containing tunnel end-points
+ *
+ * Description:
+ * vti6_tnl_bucket() returns the head of the list matching the
+ * &struct in6_addr entries laddr and raddr in @p.
+ *
+ * Return: head of IPv6 tunnel list
+ **/
+static struct ip6_tnl __rcu **
+vti6_tnl_bucket(struct vti6_net *ip6n, const struct __ip6_tnl_parm *p)
+{
+ const struct in6_addr *remote = &p->raddr;
+ const struct in6_addr *local = &p->laddr;
+ unsigned int h = 0;
+ int prio = 0;
+
+ if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
+ prio = 1;
+ h = HASH(remote, local);
+ }
+ return &ip6n->tnls[prio][h];
+}
+
+static void
+vti6_tnl_link(struct vti6_net *ip6n, struct ip6_tnl *t)
+{
+ struct ip6_tnl __rcu **tp = vti6_tnl_bucket(ip6n, &t->parms);
+
+ rcu_assign_pointer(t->next , rtnl_dereference(*tp));
+ rcu_assign_pointer(*tp, t);
+}
+
+static void
+vti6_tnl_unlink(struct vti6_net *ip6n, struct ip6_tnl *t)
+{
+ struct ip6_tnl __rcu **tp;
+ struct ip6_tnl *iter;
+
+ for (tp = vti6_tnl_bucket(ip6n, &t->parms);
+ (iter = rtnl_dereference(*tp)) != NULL;
+ tp = &iter->next) {
+ if (t == iter) {
+ rcu_assign_pointer(*tp, t->next);
+ break;
+ }
+ }
+}
+
+static void vti6_dev_free(struct net_device *dev)
+{
+ free_percpu(dev->tstats);
+ free_netdev(dev);
+}
+
+static int vti6_tnl_create2(struct net_device *dev)
+{
+ struct ip6_tnl *t = netdev_priv(dev);
+ struct net *net = dev_net(dev);
+ struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+ int err;
+
+ err = vti6_dev_init(dev);
+ if (err < 0)
+ goto out;
+
+ err = register_netdevice(dev);
+ if (err < 0)
+ goto out;
+
+ strcpy(t->parms.name, dev->name);
+ dev->rtnl_link_ops = &vti6_link_ops;
+
+ dev_hold(dev);
+ vti6_tnl_link(ip6n, t);
+
+ return 0;
+
+out:
+ return err;
+}
+
+static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
+{
+ struct net_device *dev;
+ struct ip6_tnl *t;
+ char name[IFNAMSIZ];
+ int err;
+
+ if (p->name[0])
+ strlcpy(name, p->name, IFNAMSIZ);
+ else
+ sprintf(name, "ip6_vti%%d");
+
+ dev = alloc_netdev(sizeof(*t), name, vti6_dev_setup);
+ if (dev == NULL)
+ goto failed;
+
+ dev_net_set(dev, net);
+
+ t = netdev_priv(dev);
+ t->parms = *p;
+ t->net = dev_net(dev);
+
+ err = vti6_tnl_create2(dev);
+ if (err < 0)
+ goto failed_free;
+
+ return t;
+
+failed_free:
+ vti6_dev_free(dev);
+failed:
+ return NULL;
+}
+
+/**
+ * vti6_locate - find or create tunnel matching given parameters
+ * @net: network namespace
+ * @p: tunnel parameters
+ * @create: != 0 if allowed to create new tunnel if no match found
+ *
+ * Description:
+ * vti6_locate() first tries to locate an existing tunnel
+ * based on @parms. If this is unsuccessful, but @create is set a new
+ * tunnel device is created and registered for use.
+ *
+ * Return:
+ * matching tunnel or NULL
+ **/
+static struct ip6_tnl *vti6_locate(struct net *net, struct __ip6_tnl_parm *p,
+ int create)
+{
+ const struct in6_addr *remote = &p->raddr;
+ const struct in6_addr *local = &p->laddr;
+ struct ip6_tnl __rcu **tp;
+ struct ip6_tnl *t;
+ struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+ for (tp = vti6_tnl_bucket(ip6n, p);
+ (t = rtnl_dereference(*tp)) != NULL;
+ tp = &t->next) {
+ if (ipv6_addr_equal(local, &t->parms.laddr) &&
+ ipv6_addr_equal(remote, &t->parms.raddr))
+ return t;
+ }
+ if (!create)
+ return NULL;
+ return vti6_tnl_create(net, p);
+}
+
+/**
+ * vti6_dev_uninit - tunnel device uninitializer
+ * @dev: the device to be destroyed
+ *
+ * Description:
+ * vti6_dev_uninit() removes tunnel from its list
+ **/
+static void vti6_dev_uninit(struct net_device *dev)
+{
+ struct ip6_tnl *t = netdev_priv(dev);
+ struct net *net = dev_net(dev);
+ struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+ if (dev == ip6n->fb_tnl_dev)
+ RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
+ else
+ vti6_tnl_unlink(ip6n, t);
+ ip6_tnl_dst_reset(t);
+ dev_put(dev);
+}
+
+static int vti6_rcv(struct sk_buff *skb)
+{
+ struct ip6_tnl *t;
+ const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+ rcu_read_lock();
+
+ if ((t = vti6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
+ &ipv6h->daddr)) != NULL) {
+ struct pcpu_tstats *tstats;
+
+ if (t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) {
+ rcu_read_unlock();
+ goto discard;
+ }
+
+ if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
+ t->dev->stats.rx_dropped++;
+ rcu_read_unlock();
+ goto discard;
+ }
+
+ tstats = this_cpu_ptr(t->dev->tstats);
+ tstats->rx_packets++;
+ tstats->rx_bytes += skb->len;
+
+ skb->mark = 0;
+ secpath_reset(skb);
+ skb->dev = t->dev;
+
+ rcu_read_unlock();
+ return 0;
+ }
+ rcu_read_unlock();
+ return 1;
+
+discard:
+ kfree_skb(skb);
+ return 0;
+}
+
+/**
+ * vti6_addr_conflict - compare packet addresses to tunnel's own
+ * @t: the outgoing tunnel device
+ * @hdr: IPv6 header from the incoming packet
+ *
+ * Description:
+ * Avoid trivial tunneling loop by checking that tunnel exit-point
+ * doesn't match source of incoming packet.
+ *
+ * Return:
+ * 1 if conflict,
+ * 0 else
+ **/
+static inline bool
+vti6_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
+{
+ return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
+}
+
+/**
+ * vti6_xmit - send a packet
+ * @skb: the outgoing socket buffer
+ * @dev: the outgoing tunnel device
+ **/
+static int vti6_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net *net = dev_net(dev);
+ struct ip6_tnl *t = netdev_priv(dev);
+ struct net_device_stats *stats = &t->dev->stats;
+ struct dst_entry *dst = NULL, *ndst = NULL;
+ struct flowi6 fl6;
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct net_device *tdev;
+ int err = -1;
+
+ if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
+ !ip6_tnl_xmit_ctl(t) || vti6_addr_conflict(t, ipv6h))
+ return err;
+
+ dst = ip6_tnl_dst_check(t);
+ if (!dst) {
+ memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+
+ ndst = ip6_route_output(net, NULL, &fl6);
+
+ if (ndst->error)
+ goto tx_err_link_failure;
+ ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(&fl6), NULL, 0);
+ if (IS_ERR(ndst)) {
+ err = PTR_ERR(ndst);
+ ndst = NULL;
+ goto tx_err_link_failure;
+ }
+ dst = ndst;
+ }
+
+ if (!dst->xfrm || dst->xfrm->props.mode != XFRM_MODE_TUNNEL)
+ goto tx_err_link_failure;
+
+ tdev = dst->dev;
+
+ if (tdev == dev) {
+ stats->collisions++;
+ net_warn_ratelimited("%s: Local routing loop detected!\n",
+ t->parms.name);
+ goto tx_err_dst_release;
+ }
+
+
+ skb_dst_drop(skb);
+ skb_dst_set_noref(skb, dst);
+
+ ip6tunnel_xmit(skb, dev);
+ if (ndst) {
+ dev->mtu = dst_mtu(ndst);
+ ip6_tnl_dst_store(t, ndst);
+ }
+
+ return 0;
+tx_err_link_failure:
+ stats->tx_carrier_errors++;
+ dst_link_failure(skb);
+tx_err_dst_release:
+ dst_release(ndst);
+ return err;
+}
+
+static netdev_tx_t
+vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ip6_tnl *t = netdev_priv(dev);
+ struct net_device_stats *stats = &t->dev->stats;
+ int ret;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IPV6):
+ ret = vti6_xmit(skb, dev);
+ break;
+ default:
+ goto tx_err;
+ }
+
+ if (ret < 0)
+ goto tx_err;
+
+ return NETDEV_TX_OK;
+
+tx_err:
+ stats->tx_errors++;
+ stats->tx_dropped++;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static void vti6_link_config(struct ip6_tnl *t)
+{
+ struct dst_entry *dst;
+ struct net_device *dev = t->dev;
+ struct __ip6_tnl_parm *p = &t->parms;
+ struct flowi6 *fl6 = &t->fl.u.ip6;
+
+ memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
+ memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
+
+ /* Set up flowi template */
+ fl6->saddr = p->laddr;
+ fl6->daddr = p->raddr;
+ fl6->flowi6_oif = p->link;
+ fl6->flowi6_mark = be32_to_cpu(p->i_key);
+ fl6->flowi6_proto = p->proto;
+ fl6->flowlabel = 0;
+
+ p->flags &= ~(IP6_TNL_F_CAP_XMIT | IP6_TNL_F_CAP_RCV |
+ IP6_TNL_F_CAP_PER_PACKET);
+ p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
+
+ if (p->flags & IP6_TNL_F_CAP_XMIT && p->flags & IP6_TNL_F_CAP_RCV)
+ dev->flags |= IFF_POINTOPOINT;
+ else
+ dev->flags &= ~IFF_POINTOPOINT;
+
+ dev->iflink = p->link;
+
+ if (p->flags & IP6_TNL_F_CAP_XMIT) {
+
+ dst = ip6_route_output(dev_net(dev), NULL, fl6);
+ if (dst->error)
+ return;
+
+ dst = xfrm_lookup(dev_net(dev), dst, flowi6_to_flowi(fl6),
+ NULL, 0);
+ if (IS_ERR(dst))
+ return;
+
+ if (dst->dev) {
+ dev->hard_header_len = dst->dev->hard_header_len;
+
+ dev->mtu = dst_mtu(dst);
+
+ if (dev->mtu < IPV6_MIN_MTU)
+ dev->mtu = IPV6_MIN_MTU;
+ }
+ dst_release(dst);
+ }
+}
+
+/**
+ * vti6_tnl_change - update the tunnel parameters
+ * @t: tunnel to be changed
+ * @p: tunnel configuration parameters
+ *
+ * Description:
+ * vti6_tnl_change() updates the tunnel parameters
+ **/
+static int
+vti6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
+{
+ t->parms.laddr = p->laddr;
+ t->parms.raddr = p->raddr;
+ t->parms.link = p->link;
+ t->parms.i_key = p->i_key;
+ t->parms.o_key = p->o_key;
+ t->parms.proto = p->proto;
+ ip6_tnl_dst_reset(t);
+ vti6_link_config(t);
+ return 0;
+}
+
+static int vti6_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
+{
+ struct net *net = dev_net(t->dev);
+ struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+ int err;
+
+ vti6_tnl_unlink(ip6n, t);
+ synchronize_net();
+ err = vti6_tnl_change(t, p);
+ vti6_tnl_link(ip6n, t);
+ netdev_state_change(t->dev);
+ return err;
+}
+
+static void
+vti6_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm2 *u)
+{
+ p->laddr = u->laddr;
+ p->raddr = u->raddr;
+ p->link = u->link;
+ p->i_key = u->i_key;
+ p->o_key = u->o_key;
+ p->proto = u->proto;
+
+ memcpy(p->name, u->name, sizeof(u->name));
+}
+
+static void
+vti6_parm_to_user(struct ip6_tnl_parm2 *u, const struct __ip6_tnl_parm *p)
+{
+ u->laddr = p->laddr;
+ u->raddr = p->raddr;
+ u->link = p->link;
+ u->i_key = p->i_key;
+ u->o_key = p->o_key;
+ u->proto = p->proto;
+
+ memcpy(u->name, p->name, sizeof(u->name));
+}
+
+/**
+ * vti6_tnl_ioctl - configure vti6 tunnels from userspace
+ * @dev: virtual device associated with tunnel
+ * @ifr: parameters passed from userspace
+ * @cmd: command to be performed
+ *
+ * Description:
+ * vti6_ioctl() is used for managing vti6 tunnels
+ * from userspace.
+ *
+ * The possible commands are the following:
+ * %SIOCGETTUNNEL: get tunnel parameters for device
+ * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
+ * %SIOCCHGTUNNEL: change tunnel parameters to those given
+ * %SIOCDELTUNNEL: delete tunnel
+ *
+ * The fallback device "ip6_vti0", created during module
+ * initialization, can be used for creating other tunnel devices.
+ *
+ * Return:
+ * 0 on success,
+ * %-EFAULT if unable to copy data to or from userspace,
+ * %-EPERM if current process hasn't %CAP_NET_ADMIN set
+ * %-EINVAL if passed tunnel parameters are invalid,
+ * %-EEXIST if changing a tunnel's parameters would cause a conflict
+ * %-ENODEV if attempting to change or delete a nonexisting device
+ **/
+static int
+vti6_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ int err = 0;
+ struct ip6_tnl_parm2 p;
+ struct __ip6_tnl_parm p1;
+ struct ip6_tnl *t = NULL;
+ struct net *net = dev_net(dev);
+ struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+ switch (cmd) {
+ case SIOCGETTUNNEL:
+ if (dev == ip6n->fb_tnl_dev) {
+ if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
+ err = -EFAULT;
+ break;
+ }
+ vti6_parm_from_user(&p1, &p);
+ t = vti6_locate(net, &p1, 0);
+ } else {
+ memset(&p, 0, sizeof(p));
+ }
+ if (t == NULL)
+ t = netdev_priv(dev);
+ vti6_parm_to_user(&p, &t->parms);
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+ err = -EFAULT;
+ break;
+ case SIOCADDTUNNEL:
+ case SIOCCHGTUNNEL:
+ err = -EPERM;
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+ break;
+ err = -EFAULT;
+ if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+ break;
+ err = -EINVAL;
+ if (p.proto != IPPROTO_IPV6 && p.proto != 0)
+ break;
+ vti6_parm_from_user(&p1, &p);
+ t = vti6_locate(net, &p1, cmd == SIOCADDTUNNEL);
+ if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
+ if (t != NULL) {
+ if (t->dev != dev) {
+ err = -EEXIST;
+ break;
+ }
+ } else
+ t = netdev_priv(dev);
+
+ err = vti6_update(t, &p1);
+ }
+ if (t) {
+ err = 0;
+ vti6_parm_to_user(&p, &t->parms);
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+ err = -EFAULT;
+
+ } else
+ err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
+ break;
+ case SIOCDELTUNNEL:
+ err = -EPERM;
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+ break;
+
+ if (dev == ip6n->fb_tnl_dev) {
+ err = -EFAULT;
+ if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+ break;
+ err = -ENOENT;
+ vti6_parm_from_user(&p1, &p);
+ t = vti6_locate(net, &p1, 0);
+ if (t == NULL)
+ break;
+ err = -EPERM;
+ if (t->dev == ip6n->fb_tnl_dev)
+ break;
+ dev = t->dev;
+ }
+ err = 0;
+ unregister_netdevice(dev);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ return err;
+}
+
+/**
+ * vti6_tnl_change_mtu - change mtu manually for tunnel device
+ * @dev: virtual device associated with tunnel
+ * @new_mtu: the new mtu
+ *
+ * Return:
+ * 0 on success,
+ * %-EINVAL if mtu too small
+ **/
+static int vti6_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (new_mtu < IPV6_MIN_MTU)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static const struct net_device_ops vti6_netdev_ops = {
+ .ndo_uninit = vti6_dev_uninit,
+ .ndo_start_xmit = vti6_tnl_xmit,
+ .ndo_do_ioctl = vti6_ioctl,
+ .ndo_change_mtu = vti6_change_mtu,
+ .ndo_get_stats = vti6_get_stats,
+};
+
+/**
+ * vti6_dev_setup - setup virtual tunnel device
+ * @dev: virtual device associated with tunnel
+ *
+ * Description:
+ * Initialize function pointers and device parameters
+ **/
+static void vti6_dev_setup(struct net_device *dev)
+{
+ struct ip6_tnl *t;
+
+ dev->netdev_ops = &vti6_netdev_ops;
+ dev->destructor = vti6_dev_free;
+
+ dev->type = ARPHRD_TUNNEL6;
+ dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
+ dev->mtu = ETH_DATA_LEN;
+ t = netdev_priv(dev);
+ dev->flags |= IFF_NOARP;
+ dev->addr_len = sizeof(struct in6_addr);
+ dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+}
+
+/**
+ * vti6_dev_init_gen - general initializer for all tunnel devices
+ * @dev: virtual device associated with tunnel
+ **/
+static inline int vti6_dev_init_gen(struct net_device *dev)
+{
+ struct ip6_tnl *t = netdev_priv(dev);
+
+ t->dev = dev;
+ t->net = dev_net(dev);
+ dev->tstats = alloc_percpu(struct pcpu_tstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * vti6_dev_init - initializer for all non fallback tunnel devices
+ * @dev: virtual device associated with tunnel
+ **/
+static int vti6_dev_init(struct net_device *dev)
+{
+ struct ip6_tnl *t = netdev_priv(dev);
+ int err = vti6_dev_init_gen(dev);
+
+ if (err)
+ return err;
+ vti6_link_config(t);
+ return 0;
+}
+
+/**
+ * vti6_fb_tnl_dev_init - initializer for fallback tunnel device
+ * @dev: fallback device
+ *
+ * Return: 0
+ **/
+static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev)
+{
+ struct ip6_tnl *t = netdev_priv(dev);
+ struct net *net = dev_net(dev);
+ struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+ int err = vti6_dev_init_gen(dev);
+
+ if (err)
+ return err;
+
+ t->parms.proto = IPPROTO_IPV6;
+ dev_hold(dev);
+
+ vti6_link_config(t);
+
+ rcu_assign_pointer(ip6n->tnls_wc[0], t);
+ return 0;
+}
+
+static int vti6_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ return 0;
+}
+
+static void vti6_netlink_parms(struct nlattr *data[],
+ struct __ip6_tnl_parm *parms)
+{
+ memset(parms, 0, sizeof(*parms));
+
+ if (!data)
+ return;
+
+ if (data[IFLA_VTI_LINK])
+ parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
+
+ if (data[IFLA_VTI_LOCAL])
+ nla_memcpy(&parms->laddr, data[IFLA_VTI_LOCAL],
+ sizeof(struct in6_addr));
+
+ if (data[IFLA_VTI_REMOTE])
+ nla_memcpy(&parms->raddr, data[IFLA_VTI_REMOTE],
+ sizeof(struct in6_addr));
+
+ if (data[IFLA_VTI_IKEY])
+ parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]);
+
+ if (data[IFLA_VTI_OKEY])
+ parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]);
+}
+
+static int vti6_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct net *net = dev_net(dev);
+ struct ip6_tnl *nt;
+
+ nt = netdev_priv(dev);
+ vti6_netlink_parms(data, &nt->parms);
+
+ nt->parms.proto = IPPROTO_IPV6;
+
+ if (vti6_locate(net, &nt->parms, 0))
+ return -EEXIST;
+
+ return vti6_tnl_create2(dev);
+}
+
+static int vti6_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[])
+{
+ struct ip6_tnl *t;
+ struct __ip6_tnl_parm p;
+ struct net *net = dev_net(dev);
+ struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+ if (dev == ip6n->fb_tnl_dev)
+ return -EINVAL;
+
+ vti6_netlink_parms(data, &p);
+
+ t = vti6_locate(net, &p, 0);
+
+ if (t) {
+ if (t->dev != dev)
+ return -EEXIST;
+ } else
+ t = netdev_priv(dev);
+
+ return vti6_update(t, &p);
+}
+
+static size_t vti6_get_size(const struct net_device *dev)
+{
+ return
+ /* IFLA_VTI_LINK */
+ nla_total_size(4) +
+ /* IFLA_VTI_LOCAL */
+ nla_total_size(sizeof(struct in6_addr)) +
+ /* IFLA_VTI_REMOTE */
+ nla_total_size(sizeof(struct in6_addr)) +
+ /* IFLA_VTI_IKEY */
+ nla_total_size(4) +
+ /* IFLA_VTI_OKEY */
+ nla_total_size(4) +
+ 0;
+}
+
+static int vti6_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct ip6_tnl *tunnel = netdev_priv(dev);
+ struct __ip6_tnl_parm *parm = &tunnel->parms;
+
+ if (nla_put_u32(skb, IFLA_VTI_LINK, parm->link) ||
+ nla_put(skb, IFLA_VTI_LOCAL, sizeof(struct in6_addr),
+ &parm->laddr) ||
+ nla_put(skb, IFLA_VTI_REMOTE, sizeof(struct in6_addr),
+ &parm->raddr) ||
+ nla_put_be32(skb, IFLA_VTI_IKEY, parm->i_key) ||
+ nla_put_be32(skb, IFLA_VTI_OKEY, parm->o_key))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
+ [IFLA_VTI_LINK] = { .type = NLA_U32 },
+ [IFLA_VTI_LOCAL] = { .len = sizeof(struct in6_addr) },
+ [IFLA_VTI_REMOTE] = { .len = sizeof(struct in6_addr) },
+ [IFLA_VTI_IKEY] = { .type = NLA_U32 },
+ [IFLA_VTI_OKEY] = { .type = NLA_U32 },
+};
+
+static struct rtnl_link_ops vti6_link_ops __read_mostly = {
+ .kind = "vti6",
+ .maxtype = IFLA_VTI_MAX,
+ .policy = vti6_policy,
+ .priv_size = sizeof(struct ip6_tnl),
+ .setup = vti6_dev_setup,
+ .validate = vti6_validate,
+ .newlink = vti6_newlink,
+ .changelink = vti6_changelink,
+ .get_size = vti6_get_size,
+ .fill_info = vti6_fill_info,
+};
+
+static struct xfrm_tunnel_notifier vti6_handler __read_mostly = {
+ .handler = vti6_rcv,
+ .priority = 1,
+};
+
+static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n)
+{
+ int h;
+ struct ip6_tnl *t;
+ LIST_HEAD(list);
+
+ for (h = 0; h < HASH_SIZE; h++) {
+ t = rtnl_dereference(ip6n->tnls_r_l[h]);
+ while (t != NULL) {
+ unregister_netdevice_queue(t->dev, &list);
+ t = rtnl_dereference(t->next);
+ }
+ }
+
+ t = rtnl_dereference(ip6n->tnls_wc[0]);
+ unregister_netdevice_queue(t->dev, &list);
+ unregister_netdevice_many(&list);
+}
+
+static int __net_init vti6_init_net(struct net *net)
+{
+ struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+ struct ip6_tnl *t = NULL;
+ int err;
+
+ ip6n->tnls[0] = ip6n->tnls_wc;
+ ip6n->tnls[1] = ip6n->tnls_r_l;
+
+ err = -ENOMEM;
+ ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6_vti0",
+ vti6_dev_setup);
+
+ if (!ip6n->fb_tnl_dev)
+ goto err_alloc_dev;
+ dev_net_set(ip6n->fb_tnl_dev, net);
+
+ err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
+ if (err < 0)
+ goto err_register;
+
+ err = register_netdev(ip6n->fb_tnl_dev);
+ if (err < 0)
+ goto err_register;
+
+ t = netdev_priv(ip6n->fb_tnl_dev);
+
+ strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
+ return 0;
+
+err_register:
+ vti6_dev_free(ip6n->fb_tnl_dev);
+err_alloc_dev:
+ return err;
+}
+
+static void __net_exit vti6_exit_net(struct net *net)
+{
+ struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+ rtnl_lock();
+ vti6_destroy_tunnels(ip6n);
+ rtnl_unlock();
+}
+
+static struct pernet_operations vti6_net_ops = {
+ .init = vti6_init_net,
+ .exit = vti6_exit_net,
+ .id = &vti6_net_id,
+ .size = sizeof(struct vti6_net),
+};
+
+/**
+ * vti6_tunnel_init - register protocol and reserve needed resources
+ *
+ * Return: 0 on success
+ **/
+static int __init vti6_tunnel_init(void)
+{
+ int err;
+
+ err = register_pernet_device(&vti6_net_ops);
+ if (err < 0)
+ goto out_pernet;
+
+ err = xfrm6_mode_tunnel_input_register(&vti6_handler);
+ if (err < 0) {
+ pr_err("%s: can't register vti6\n", __func__);
+ goto out;
+ }
+ err = rtnl_link_register(&vti6_link_ops);
+ if (err < 0)
+ goto rtnl_link_failed;
+
+ return 0;
+
+rtnl_link_failed:
+ xfrm6_mode_tunnel_input_deregister(&vti6_handler);
+out:
+ unregister_pernet_device(&vti6_net_ops);
+out_pernet:
+ return err;
+}
+
+/**
+ * vti6_tunnel_cleanup - free resources and unregister protocol
+ **/
+static void __exit vti6_tunnel_cleanup(void)
+{
+ rtnl_link_unregister(&vti6_link_ops);
+ if (xfrm6_mode_tunnel_input_deregister(&vti6_handler))
+ pr_info("%s: can't deregister vti6\n", __func__);
+
+ unregister_pernet_device(&vti6_net_ops);
+}
+
+module_init(vti6_tunnel_init);
+module_exit(vti6_tunnel_cleanup);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("vti6");
+MODULE_ALIAS_NETDEV("ip6_vti0");
+MODULE_AUTHOR("Steffen Klassert");
+MODULE_DESCRIPTION("IPv6 virtual tunnel interface");
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 5636a912074a..ce507d9e1c90 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -64,8 +64,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
(struct ip_comp_hdr *)(skb->data + offset);
struct xfrm_state *x;
- if (type != ICMPV6_DEST_UNREACH &&
- type != ICMPV6_PKT_TOOBIG &&
+ if (type != ICMPV6_PKT_TOOBIG &&
type != NDISC_REDIRECT)
return;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index d1e2e8ef29c5..4919a8e6063e 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -174,7 +174,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
}
if (ipv6_only_sock(sk) ||
- !ipv6_addr_v4mapped(&np->daddr)) {
+ !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
retv = -EADDRNOTAVAIL;
break;
}
@@ -1011,7 +1011,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
struct in6_pktinfo src_info;
src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
np->sticky_pktinfo.ipi6_ifindex;
- src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
+ src_info.ipi6_addr = np->mcast_oif ? sk->sk_v6_daddr : np->sticky_pktinfo.ipi6_addr;
put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
}
if (np->rxopt.bits.rxhlim) {
@@ -1026,7 +1026,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
struct in6_pktinfo src_info;
src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
np->sticky_pktinfo.ipi6_ifindex;
- src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
+ src_info.ipi6_addr = np->mcast_oif ? sk->sk_v6_daddr :
+ np->sticky_pktinfo.ipi6_addr;
put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
}
if (np->rxopt.bits.rxohlim) {
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index a7f842b29b67..7702f9e90a04 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -25,6 +25,19 @@ config NF_CONNTRACK_IPV6
To compile it as a module, choose M here. If unsure, say N.
+config NF_TABLES_IPV6
+ depends on NF_TABLES
+ tristate "IPv6 nf_tables support"
+
+config NFT_CHAIN_ROUTE_IPV6
+ depends on NF_TABLES_IPV6
+ tristate "IPv6 nf_tables route chain support"
+
+config NFT_CHAIN_NAT_IPV6
+ depends on NF_TABLES_IPV6
+ depends on NF_NAT_IPV6 && NFT_NAT
+ tristate "IPv6 nf_tables nat chain support"
+
config IP6_NF_IPTABLES
tristate "IP6 tables support (required for filtering)"
depends on INET && IPV6
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 2b53738f798c..d1b4928f34f7 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -23,6 +23,11 @@ obj-$(CONFIG_NF_NAT_IPV6) += nf_nat_ipv6.o
nf_defrag_ipv6-y := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
+# nf_tables
+obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o
+obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
+obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o
+
# matches
obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 44400c216dc6..710238f58aa9 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -349,6 +349,11 @@ ip6t_do_table(struct sk_buff *skb,
local_bh_disable();
addend = xt_write_recseq_begin();
private = table->private;
+ /*
+ * Ensure we load private-> members after we've fetched the base
+ * pointer.
+ */
+ smp_read_barrier_depends();
cpu = smp_processor_id();
table_base = private->entries[cpu];
jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index 2748b042da72..bf9f612c1bc2 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -312,7 +312,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
return XT_CONTINUE;
}
-static unsigned int ipv6_synproxy_hook(unsigned int hooknum,
+static unsigned int ipv6_synproxy_hook(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index 29b44b14c5ea..ca7f6c128086 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -32,13 +32,14 @@ static const struct xt_table packet_filter = {
/* The work comes in here from netfilter.c. */
static unsigned int
-ip6table_filter_hook(unsigned int hook, struct sk_buff *skb,
+ip6table_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
const struct net *net = dev_net((in != NULL) ? in : out);
- return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_filter);
+ return ip6t_do_table(skb, ops->hooknum, in, out,
+ net->ipv6.ip6table_filter);
}
static struct nf_hook_ops *filter_ops __read_mostly;
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index c705907ae6ab..307bbb782d14 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -76,17 +76,17 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
/* The work comes in here from netfilter.c. */
static unsigned int
-ip6table_mangle_hook(unsigned int hook, struct sk_buff *skb,
+ip6table_mangle_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- if (hook == NF_INET_LOCAL_OUT)
+ if (ops->hooknum == NF_INET_LOCAL_OUT)
return ip6t_mangle_out(skb, out);
- if (hook == NF_INET_POST_ROUTING)
- return ip6t_do_table(skb, hook, in, out,
+ if (ops->hooknum == NF_INET_POST_ROUTING)
+ return ip6t_do_table(skb, ops->hooknum, in, out,
dev_net(out)->ipv6.ip6table_mangle);
/* INPUT/FORWARD */
- return ip6t_do_table(skb, hook, in, out,
+ return ip6t_do_table(skb, ops->hooknum, in, out,
dev_net(in)->ipv6.ip6table_mangle);
}
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index 9b076d2d3a7b..84c7f33d0cf8 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -63,7 +63,7 @@ static unsigned int nf_nat_rule_find(struct sk_buff *skb, unsigned int hooknum,
}
static unsigned int
-nf_nat_ipv6_fn(unsigned int hooknum,
+nf_nat_ipv6_fn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -72,7 +72,7 @@ nf_nat_ipv6_fn(unsigned int hooknum,
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
struct nf_conn_nat *nat;
- enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
+ enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
__be16 frag_off;
int hdrlen;
u8 nexthdr;
@@ -111,7 +111,8 @@ nf_nat_ipv6_fn(unsigned int hooknum,
if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
- hooknum, hdrlen))
+ ops->hooknum,
+ hdrlen))
return NF_DROP;
else
return NF_ACCEPT;
@@ -124,14 +125,14 @@ nf_nat_ipv6_fn(unsigned int hooknum,
if (!nf_nat_initialized(ct, maniptype)) {
unsigned int ret;
- ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
+ ret = nf_nat_rule_find(skb, ops->hooknum, in, out, ct);
if (ret != NF_ACCEPT)
return ret;
} else {
pr_debug("Already setup manip %s for ct %p\n",
maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
ct);
- if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
+ if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
goto oif_changed;
}
break;
@@ -140,11 +141,11 @@ nf_nat_ipv6_fn(unsigned int hooknum,
/* ESTABLISHED */
NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
ctinfo == IP_CT_ESTABLISHED_REPLY);
- if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
+ if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
goto oif_changed;
}
- return nf_nat_packet(ct, ctinfo, hooknum, skb);
+ return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
oif_changed:
nf_ct_kill_acct(ct, ctinfo, skb);
@@ -152,7 +153,7 @@ oif_changed:
}
static unsigned int
-nf_nat_ipv6_in(unsigned int hooknum,
+nf_nat_ipv6_in(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -161,7 +162,7 @@ nf_nat_ipv6_in(unsigned int hooknum,
unsigned int ret;
struct in6_addr daddr = ipv6_hdr(skb)->daddr;
- ret = nf_nat_ipv6_fn(hooknum, skb, in, out, okfn);
+ ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
if (ret != NF_DROP && ret != NF_STOLEN &&
ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
skb_dst_drop(skb);
@@ -170,7 +171,7 @@ nf_nat_ipv6_in(unsigned int hooknum,
}
static unsigned int
-nf_nat_ipv6_out(unsigned int hooknum,
+nf_nat_ipv6_out(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -187,7 +188,7 @@ nf_nat_ipv6_out(unsigned int hooknum,
if (skb->len < sizeof(struct ipv6hdr))
return NF_ACCEPT;
- ret = nf_nat_ipv6_fn(hooknum, skb, in, out, okfn);
+ ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
#ifdef CONFIG_XFRM
if (ret != NF_DROP && ret != NF_STOLEN &&
!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
@@ -209,7 +210,7 @@ nf_nat_ipv6_out(unsigned int hooknum,
}
static unsigned int
-nf_nat_ipv6_local_fn(unsigned int hooknum,
+nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -224,7 +225,7 @@ nf_nat_ipv6_local_fn(unsigned int hooknum,
if (skb->len < sizeof(struct ipv6hdr))
return NF_ACCEPT;
- ret = nf_nat_ipv6_fn(hooknum, skb, in, out, okfn);
+ ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
if (ret != NF_DROP && ret != NF_STOLEN &&
(ct = nf_ct_get(skb, &ctinfo)) != NULL) {
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index 9a626d86720f..5274740acecc 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -19,13 +19,14 @@ static const struct xt_table packet_raw = {
/* The work comes in here from netfilter.c. */
static unsigned int
-ip6table_raw_hook(unsigned int hook, struct sk_buff *skb,
+ip6table_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
const struct net *net = dev_net((in != NULL) ? in : out);
- return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_raw);
+ return ip6t_do_table(skb, ops->hooknum, in, out,
+ net->ipv6.ip6table_raw);
}
static struct nf_hook_ops *rawtable_ops __read_mostly;
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c
index ce88d1d7e525..ab3b0219ecfa 100644
--- a/net/ipv6/netfilter/ip6table_security.c
+++ b/net/ipv6/netfilter/ip6table_security.c
@@ -36,14 +36,15 @@ static const struct xt_table security_table = {
};
static unsigned int
-ip6table_security_hook(unsigned int hook, struct sk_buff *skb,
+ip6table_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
const struct net *net = dev_net((in != NULL) ? in : out);
- return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_security);
+ return ip6t_do_table(skb, ops->hooknum, in, out,
+ net->ipv6.ip6table_security);
}
static struct nf_hook_ops *sectbl_ops __read_mostly;
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index d6e4dd8b58df..486545eb42ce 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -95,7 +95,7 @@ static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
return NF_ACCEPT;
}
-static unsigned int ipv6_helper(unsigned int hooknum,
+static unsigned int ipv6_helper(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -133,7 +133,7 @@ static unsigned int ipv6_helper(unsigned int hooknum,
return helper->help(skb, protoff, ct, ctinfo);
}
-static unsigned int ipv6_confirm(unsigned int hooknum,
+static unsigned int ipv6_confirm(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -219,16 +219,17 @@ static unsigned int __ipv6_conntrack_in(struct net *net,
return nf_conntrack_in(net, PF_INET6, hooknum, skb);
}
-static unsigned int ipv6_conntrack_in(unsigned int hooknum,
+static unsigned int ipv6_conntrack_in(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- return __ipv6_conntrack_in(dev_net(in), hooknum, skb, in, out, okfn);
+ return __ipv6_conntrack_in(dev_net(in), ops->hooknum, skb, in, out,
+ okfn);
}
-static unsigned int ipv6_conntrack_local(unsigned int hooknum,
+static unsigned int ipv6_conntrack_local(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -239,7 +240,8 @@ static unsigned int ipv6_conntrack_local(unsigned int hooknum,
net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
return NF_ACCEPT;
}
- return __ipv6_conntrack_in(dev_net(out), hooknum, skb, in, out, okfn);
+ return __ipv6_conntrack_in(dev_net(out), ops->hooknum, skb, in, out,
+ okfn);
}
static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
@@ -297,9 +299,9 @@ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
struct nf_conn *ct;
- tuple.src.u3.in6 = inet6->rcv_saddr;
+ tuple.src.u3.in6 = sk->sk_v6_rcv_saddr;
tuple.src.u.tcp.port = inet->inet_sport;
- tuple.dst.u3.in6 = inet6->daddr;
+ tuple.dst.u3.in6 = sk->sk_v6_daddr;
tuple.dst.u.tcp.port = inet->inet_dport;
tuple.dst.protonum = sk->sk_protocol;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index dffdc1a389c5..4a258263d8ec 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -144,12 +144,24 @@ static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
}
+static unsigned int nf_hash_frag(__be32 id, const struct in6_addr *saddr,
+ const struct in6_addr *daddr)
+{
+ u32 c;
+
+ net_get_random_once(&nf_frags.rnd, sizeof(nf_frags.rnd));
+ c = jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
+ (__force u32)id, nf_frags.rnd);
+ return c & (INETFRAGS_HASHSZ - 1);
+}
+
+
static unsigned int nf_hashfn(struct inet_frag_queue *q)
{
const struct frag_queue *nq;
nq = container_of(q, struct frag_queue, q);
- return inet6_hash_frag(nq->id, &nq->saddr, &nq->daddr, nf_frags.rnd);
+ return nf_hash_frag(nq->id, &nq->saddr, &nq->daddr);
}
static void nf_skb_free(struct sk_buff *skb)
@@ -185,7 +197,7 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
arg.ecn = ecn;
read_lock_bh(&nf_frags.lock);
- hash = inet6_hash_frag(id, src, dst, nf_frags.rnd);
+ hash = nf_hash_frag(id, src, dst);
q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
local_bh_enable();
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index aacd121fe8c5..ec483aa3f60f 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -52,7 +52,7 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
}
-static unsigned int ipv6_defrag(unsigned int hooknum,
+static unsigned int ipv6_defrag(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -66,7 +66,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
return NF_ACCEPT;
#endif
- reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
+ reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(ops->hooknum, skb));
/* queued */
if (reasm == NULL)
return NF_STOLEN;
@@ -75,7 +75,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
if (reasm == skb)
return NF_ACCEPT;
- nf_ct_frag6_output(hooknum, reasm, (struct net_device *)in,
+ nf_ct_frag6_output(ops->hooknum, reasm, (struct net_device *)in,
(struct net_device *)out, okfn);
return NF_STOLEN;
diff --git a/net/ipv6/netfilter/nf_tables_ipv6.c b/net/ipv6/netfilter/nf_tables_ipv6.c
new file mode 100644
index 000000000000..d77db8a13505
--- /dev/null
+++ b/net/ipv6/netfilter/nf_tables_ipv6.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012-2013 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ipv6.h>
+#include <linux/netfilter_ipv6.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv6.h>
+
+static unsigned int nft_ipv6_output(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct nft_pktinfo pkt;
+
+ if (unlikely(skb->len < sizeof(struct ipv6hdr))) {
+ if (net_ratelimit())
+ pr_info("nf_tables_ipv6: ignoring short SOCK_RAW "
+ "packet\n");
+ return NF_ACCEPT;
+ }
+ if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
+ return NF_DROP;
+
+ return nft_do_chain_pktinfo(&pkt, ops);
+}
+
+static struct nft_af_info nft_af_ipv6 __read_mostly = {
+ .family = NFPROTO_IPV6,
+ .nhooks = NF_INET_NUMHOOKS,
+ .owner = THIS_MODULE,
+ .hooks = {
+ [NF_INET_LOCAL_OUT] = nft_ipv6_output,
+ },
+};
+
+static int nf_tables_ipv6_init_net(struct net *net)
+{
+ net->nft.ipv6 = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+ if (net->nft.ipv6 == NULL)
+ return -ENOMEM;
+
+ memcpy(net->nft.ipv6, &nft_af_ipv6, sizeof(nft_af_ipv6));
+
+ if (nft_register_afinfo(net, net->nft.ipv6) < 0)
+ goto err;
+
+ return 0;
+err:
+ kfree(net->nft.ipv6);
+ return -ENOMEM;
+}
+
+static void nf_tables_ipv6_exit_net(struct net *net)
+{
+ nft_unregister_afinfo(net->nft.ipv6);
+ kfree(net->nft.ipv6);
+}
+
+static struct pernet_operations nf_tables_ipv6_net_ops = {
+ .init = nf_tables_ipv6_init_net,
+ .exit = nf_tables_ipv6_exit_net,
+};
+
+static unsigned int
+nft_do_chain_ipv6(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct nft_pktinfo pkt;
+
+ /* malformed packet, drop it */
+ if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
+ return NF_DROP;
+
+ return nft_do_chain_pktinfo(&pkt, ops);
+}
+
+static struct nf_chain_type filter_ipv6 = {
+ .family = NFPROTO_IPV6,
+ .name = "filter",
+ .type = NFT_CHAIN_T_DEFAULT,
+ .hook_mask = (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_FORWARD) |
+ (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_POST_ROUTING),
+ .fn = {
+ [NF_INET_LOCAL_IN] = nft_do_chain_ipv6,
+ [NF_INET_LOCAL_OUT] = nft_ipv6_output,
+ [NF_INET_FORWARD] = nft_do_chain_ipv6,
+ [NF_INET_PRE_ROUTING] = nft_do_chain_ipv6,
+ [NF_INET_POST_ROUTING] = nft_do_chain_ipv6,
+ },
+};
+
+static int __init nf_tables_ipv6_init(void)
+{
+ nft_register_chain_type(&filter_ipv6);
+ return register_pernet_subsys(&nf_tables_ipv6_net_ops);
+}
+
+static void __exit nf_tables_ipv6_exit(void)
+{
+ unregister_pernet_subsys(&nf_tables_ipv6_net_ops);
+ nft_unregister_chain_type(&filter_ipv6);
+}
+
+module_init(nf_tables_ipv6_init);
+module_exit(nf_tables_ipv6_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_FAMILY(AF_INET6);
diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
new file mode 100644
index 000000000000..e86dcd70dc76
--- /dev/null
+++ b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv6.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/ipv6.h>
+
+/*
+ * IPv6 NAT chains
+ */
+
+static unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+ struct nf_conn_nat *nat;
+ enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
+ __be16 frag_off;
+ int hdrlen;
+ u8 nexthdr;
+ struct nft_pktinfo pkt;
+ unsigned int ret;
+
+ if (ct == NULL || nf_ct_is_untracked(ct))
+ return NF_ACCEPT;
+
+ nat = nfct_nat(ct);
+ if (nat == NULL) {
+ /* Conntrack module was loaded late, can't add extension. */
+ if (nf_ct_is_confirmed(ct))
+ return NF_ACCEPT;
+ nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
+ if (nat == NULL)
+ return NF_ACCEPT;
+ }
+
+ switch (ctinfo) {
+ case IP_CT_RELATED:
+ case IP_CT_RELATED + IP_CT_IS_REPLY:
+ nexthdr = ipv6_hdr(skb)->nexthdr;
+ hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
+ &nexthdr, &frag_off);
+
+ if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
+ if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
+ ops->hooknum,
+ hdrlen))
+ return NF_DROP;
+ else
+ return NF_ACCEPT;
+ }
+ /* Fall through */
+ case IP_CT_NEW:
+ if (nf_nat_initialized(ct, maniptype))
+ break;
+
+ nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out);
+
+ ret = nft_do_chain_pktinfo(&pkt, ops);
+ if (ret != NF_ACCEPT)
+ return ret;
+ if (!nf_nat_initialized(ct, maniptype)) {
+ ret = nf_nat_alloc_null_binding(ct, ops->hooknum);
+ if (ret != NF_ACCEPT)
+ return ret;
+ }
+ default:
+ break;
+ }
+
+ return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
+}
+
+static unsigned int nf_nat_ipv6_prerouting(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct in6_addr daddr = ipv6_hdr(skb)->daddr;
+ unsigned int ret;
+
+ ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
+ if (ret != NF_DROP && ret != NF_STOLEN &&
+ ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
+ skb_dst_drop(skb);
+
+ return ret;
+}
+
+static unsigned int nf_nat_ipv6_postrouting(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ enum ip_conntrack_info ctinfo __maybe_unused;
+ const struct nf_conn *ct __maybe_unused;
+ unsigned int ret;
+
+ ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
+#ifdef CONFIG_XFRM
+ if (ret != NF_DROP && ret != NF_STOLEN &&
+ !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
+ (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+ if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
+ &ct->tuplehash[!dir].tuple.dst.u3) ||
+ (ct->tuplehash[dir].tuple.src.u.all !=
+ ct->tuplehash[!dir].tuple.dst.u.all))
+ if (nf_xfrm_me_harder(skb, AF_INET6) < 0)
+ ret = NF_DROP;
+ }
+#endif
+ return ret;
+}
+
+static unsigned int nf_nat_ipv6_output(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ enum ip_conntrack_info ctinfo;
+ const struct nf_conn *ct;
+ unsigned int ret;
+
+ ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
+ if (ret != NF_DROP && ret != NF_STOLEN &&
+ (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+ if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
+ &ct->tuplehash[!dir].tuple.src.u3)) {
+ if (ip6_route_me_harder(skb))
+ ret = NF_DROP;
+ }
+#ifdef CONFIG_XFRM
+ else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
+ ct->tuplehash[dir].tuple.dst.u.all !=
+ ct->tuplehash[!dir].tuple.src.u.all)
+ if (nf_xfrm_me_harder(skb, AF_INET6))
+ ret = NF_DROP;
+#endif
+ }
+ return ret;
+}
+
+static struct nf_chain_type nft_chain_nat_ipv6 = {
+ .family = NFPROTO_IPV6,
+ .name = "nat",
+ .type = NFT_CHAIN_T_NAT,
+ .hook_mask = (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_POST_ROUTING) |
+ (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_LOCAL_IN),
+ .fn = {
+ [NF_INET_PRE_ROUTING] = nf_nat_ipv6_prerouting,
+ [NF_INET_POST_ROUTING] = nf_nat_ipv6_postrouting,
+ [NF_INET_LOCAL_OUT] = nf_nat_ipv6_output,
+ [NF_INET_LOCAL_IN] = nf_nat_ipv6_fn,
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init nft_chain_nat_ipv6_init(void)
+{
+ int err;
+
+ err = nft_register_chain_type(&nft_chain_nat_ipv6);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void __exit nft_chain_nat_ipv6_exit(void)
+{
+ nft_unregister_chain_type(&nft_chain_nat_ipv6);
+}
+
+module_init(nft_chain_nat_ipv6_init);
+module_exit(nft_chain_nat_ipv6_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>");
+MODULE_ALIAS_NFT_CHAIN(AF_INET6, "nat");
diff --git a/net/ipv6/netfilter/nft_chain_route_ipv6.c b/net/ipv6/netfilter/nft_chain_route_ipv6.c
new file mode 100644
index 000000000000..3fe40f0456ad
--- /dev/null
+++ b/net/ipv6/netfilter/nft_chain_route_ipv6.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv6.h>
+#include <net/route.h>
+
+static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ unsigned int ret;
+ struct nft_pktinfo pkt;
+ struct in6_addr saddr, daddr;
+ u_int8_t hop_limit;
+ u32 mark, flowlabel;
+
+ /* malformed packet, drop it */
+ if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
+ return NF_DROP;
+
+ /* save source/dest address, mark, hoplimit, flowlabel, priority */
+ memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr));
+ memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr));
+ mark = skb->mark;
+ hop_limit = ipv6_hdr(skb)->hop_limit;
+
+ /* flowlabel and prio (includes version, which shouldn't change either */
+ flowlabel = *((u32 *)ipv6_hdr(skb));
+
+ ret = nft_do_chain_pktinfo(&pkt, ops);
+ if (ret != NF_DROP && ret != NF_QUEUE &&
+ (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) ||
+ memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) ||
+ skb->mark != mark ||
+ ipv6_hdr(skb)->hop_limit != hop_limit ||
+ flowlabel != *((u_int32_t *)ipv6_hdr(skb))))
+ return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP;
+
+ return ret;
+}
+
+static struct nf_chain_type nft_chain_route_ipv6 = {
+ .family = NFPROTO_IPV6,
+ .name = "route",
+ .type = NFT_CHAIN_T_ROUTE,
+ .hook_mask = (1 << NF_INET_LOCAL_OUT),
+ .fn = {
+ [NF_INET_LOCAL_OUT] = nf_route_table_hook,
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init nft_chain_route_init(void)
+{
+ return nft_register_chain_type(&nft_chain_route_ipv6);
+}
+
+static void __exit nft_chain_route_exit(void)
+{
+ nft_unregister_chain_type(&nft_chain_route_ipv6);
+}
+
+module_init(nft_chain_route_init);
+module_exit(nft_chain_route_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_CHAIN(AF_INET6, "route");
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 18f19df4189f..8815e31a87fe 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -116,7 +116,7 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
- daddr = &np->daddr;
+ daddr = &sk->sk_v6_daddr;
}
if (!iif)
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index a4ed2416399e..3c00842b0079 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -77,20 +77,19 @@ static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
sk_for_each_from(sk)
if (inet_sk(sk)->inet_num == num) {
- struct ipv6_pinfo *np = inet6_sk(sk);
if (!net_eq(sock_net(sk), net))
continue;
- if (!ipv6_addr_any(&np->daddr) &&
- !ipv6_addr_equal(&np->daddr, rmt_addr))
+ if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
+ !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
continue;
if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
continue;
- if (!ipv6_addr_any(&np->rcv_saddr)) {
- if (ipv6_addr_equal(&np->rcv_saddr, loc_addr))
+ if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+ if (ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
goto found;
if (is_multicast &&
inet6_mc_check(sk, loc_addr, rmt_addr))
@@ -302,7 +301,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
}
inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
- np->rcv_saddr = addr->sin6_addr;
+ sk->sk_v6_rcv_saddr = addr->sin6_addr;
if (!(addr_type & IPV6_ADDR_MULTICAST))
np->saddr = addr->sin6_addr;
err = 0;
@@ -804,8 +803,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
* sk->sk_dst_cache.
*/
if (sk->sk_state == TCP_ESTABLISHED &&
- ipv6_addr_equal(daddr, &np->daddr))
- daddr = &np->daddr;
+ ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
+ daddr = &sk->sk_v6_daddr;
if (addr_len >= sizeof(struct sockaddr_in6) &&
sin6->sin6_scope_id &&
@@ -816,7 +815,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
return -EDESTADDRREQ;
proto = inet->inet_num;
- daddr = &np->daddr;
+ daddr = &sk->sk_v6_daddr;
fl6.flowlabel = np->flow_label;
}
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 1aeb473b2cc6..cc85a9ba5010 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -82,24 +82,24 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
* callers should be careful not to use the hash value outside the ipfrag_lock
* as doing so could race with ipfrag_hash_rnd being recalculated.
*/
-unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
- const struct in6_addr *daddr, u32 rnd)
+static unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
+ const struct in6_addr *daddr)
{
u32 c;
+ net_get_random_once(&ip6_frags.rnd, sizeof(ip6_frags.rnd));
c = jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
- (__force u32)id, rnd);
+ (__force u32)id, ip6_frags.rnd);
return c & (INETFRAGS_HASHSZ - 1);
}
-EXPORT_SYMBOL_GPL(inet6_hash_frag);
static unsigned int ip6_hashfn(struct inet_frag_queue *q)
{
struct frag_queue *fq;
fq = container_of(q, struct frag_queue, q);
- return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd);
+ return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr);
}
bool ip6_frag_match(struct inet_frag_queue *q, void *a)
@@ -193,7 +193,7 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src,
arg.ecn = ecn;
read_lock(&ip6_frags.lock);
- hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
+ hash = inet6_hash_frag(id, src, dst);
q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
if (IS_ERR_OR_NULL(q)) {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c979dd96d82a..fd399ac6c1f7 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -476,6 +476,24 @@ out:
}
#ifdef CONFIG_IPV6_ROUTER_PREF
+struct __rt6_probe_work {
+ struct work_struct work;
+ struct in6_addr target;
+ struct net_device *dev;
+};
+
+static void rt6_probe_deferred(struct work_struct *w)
+{
+ struct in6_addr mcaddr;
+ struct __rt6_probe_work *work =
+ container_of(w, struct __rt6_probe_work, work);
+
+ addrconf_addr_solict_mult(&work->target, &mcaddr);
+ ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
+ dev_put(work->dev);
+ kfree(w);
+}
+
static void rt6_probe(struct rt6_info *rt)
{
struct neighbour *neigh;
@@ -499,17 +517,23 @@ static void rt6_probe(struct rt6_info *rt)
if (!neigh ||
time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
- struct in6_addr mcaddr;
- struct in6_addr *target;
+ struct __rt6_probe_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (neigh) {
+ if (neigh && work)
neigh->updated = jiffies;
+
+ if (neigh)
write_unlock(&neigh->lock);
- }
- target = (struct in6_addr *)&rt->rt6i_gateway;
- addrconf_addr_solict_mult(target, &mcaddr);
- ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL);
+ if (work) {
+ INIT_WORK(&work->work, rt6_probe_deferred);
+ work->target = rt->rt6i_gateway;
+ dev_hold(rt->dst.dev);
+ work->dev = rt->dst.dev;
+ schedule_work(&work->work);
+ }
} else {
out:
write_unlock(&neigh->lock);
@@ -595,7 +619,7 @@ static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
goto out;
m = rt6_score_route(rt, oif, strict);
- if (m == RT6_NUD_FAIL_SOFT && !IS_ENABLED(CONFIG_IPV6_ROUTER_PREF)) {
+ if (m == RT6_NUD_FAIL_SOFT) {
match_do_rr = true;
m = 0; /* lowest valid score */
} else if (m < 0) {
@@ -851,7 +875,6 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
if (ort->rt6i_dst.plen != 128 &&
ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
rt->rt6i_flags |= RTF_ANYCAST;
- rt->rt6i_gateway = *daddr;
}
rt->rt6i_flags |= RTF_CACHE;
@@ -1064,10 +1087,13 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev)))
return NULL;
- if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
- return dst;
+ if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
+ return NULL;
- return NULL;
+ if (rt6_check_expired(rt))
+ return NULL;
+
+ return dst;
}
static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
@@ -1137,7 +1163,6 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_oif = oif;
fl6.flowi6_mark = mark;
- fl6.flowi6_flags = 0;
fl6.daddr = iph->daddr;
fl6.saddr = iph->saddr;
fl6.flowlabel = ip6_flowinfo(iph);
@@ -1236,7 +1261,6 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_oif = oif;
fl6.flowi6_mark = mark;
- fl6.flowi6_flags = 0;
fl6.daddr = iph->daddr;
fl6.saddr = iph->saddr;
fl6.flowlabel = ip6_flowinfo(iph);
@@ -1258,7 +1282,6 @@ void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_oif = oif;
fl6.flowi6_mark = mark;
- fl6.flowi6_flags = 0;
fl6.daddr = msg->dest;
fl6.saddr = iph->daddr;
@@ -1338,6 +1361,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
rt->dst.flags |= DST_HOST;
rt->dst.output = ip6_output;
atomic_set(&rt->dst.__refcnt, 1);
+ rt->rt6i_gateway = fl6->daddr;
rt->rt6i_dst.addr = fl6->daddr;
rt->rt6i_dst.plen = 128;
rt->rt6i_idev = idev;
@@ -1873,7 +1897,10 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
in6_dev_hold(rt->rt6i_idev);
rt->dst.lastuse = jiffies;
- rt->rt6i_gateway = ort->rt6i_gateway;
+ if (ort->rt6i_flags & RTF_GATEWAY)
+ rt->rt6i_gateway = ort->rt6i_gateway;
+ else
+ rt->rt6i_gateway = *dest;
rt->rt6i_flags = ort->rt6i_flags;
if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
(RTF_DEFAULT | RTF_ADDRCONF))
@@ -2160,6 +2187,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
else
rt->rt6i_flags |= RTF_LOCAL;
+ rt->rt6i_gateway = *addr;
rt->rt6i_dst.addr = *addr;
rt->rt6i_dst.plen = 128;
rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
@@ -2800,56 +2828,12 @@ static int ip6_route_dev_notify(struct notifier_block *this,
#ifdef CONFIG_PROC_FS
-struct rt6_proc_arg
-{
- char *buffer;
- int offset;
- int length;
- int skip;
- int len;
-};
-
-static int rt6_info_route(struct rt6_info *rt, void *p_arg)
-{
- struct seq_file *m = p_arg;
-
- seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
-
-#ifdef CONFIG_IPV6_SUBTREES
- seq_printf(m, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
-#else
- seq_puts(m, "00000000000000000000000000000000 00 ");
-#endif
- if (rt->rt6i_flags & RTF_GATEWAY) {
- seq_printf(m, "%pi6", &rt->rt6i_gateway);
- } else {
- seq_puts(m, "00000000000000000000000000000000");
- }
- seq_printf(m, " %08x %08x %08x %08x %8s\n",
- rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
- rt->dst.__use, rt->rt6i_flags,
- rt->dst.dev ? rt->dst.dev->name : "");
- return 0;
-}
-
-static int ipv6_route_show(struct seq_file *m, void *v)
-{
- struct net *net = (struct net *)m->private;
- fib6_clean_all_ro(net, rt6_info_route, 0, m);
- return 0;
-}
-
-static int ipv6_route_open(struct inode *inode, struct file *file)
-{
- return single_open_net(inode, file, ipv6_route_show);
-}
-
static const struct file_operations ipv6_route_proc_fops = {
.owner = THIS_MODULE,
.open = ipv6_route_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = single_release_net,
+ .release = seq_release_net,
};
static int rt6_stats_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 19269453a8ea..3a9038dd818d 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -933,10 +933,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
ttl = iph6->hop_limit;
tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
- if (likely(!skb->encapsulation)) {
- skb_reset_inner_headers(skb);
- skb->encapsulation = 1;
- }
+ skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT);
+ if (IS_ERR(skb))
+ goto out;
err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos,
ttl, df, !net_eq(tunnel->net, dev_net(dev)));
@@ -946,8 +945,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
tx_error_icmp:
dst_link_failure(skb);
tx_error:
- dev->stats.tx_errors++;
dev_kfree_skb(skb);
+out:
+ dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
@@ -956,13 +956,15 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
struct ip_tunnel *tunnel = netdev_priv(dev);
const struct iphdr *tiph = &tunnel->parms.iph;
- if (likely(!skb->encapsulation)) {
- skb_reset_inner_headers(skb);
- skb->encapsulation = 1;
- }
+ skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP);
+ if (IS_ERR(skb))
+ goto out;
ip_tunnel_xmit(skb, dev, tiph, IPPROTO_IPIP);
return NETDEV_TX_OK;
+out:
+ dev->stats.tx_errors++;
+ return NETDEV_TX_OK;
}
static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
@@ -1292,6 +1294,12 @@ static void ipip6_dev_free(struct net_device *dev)
free_netdev(dev);
}
+#define SIT_FEATURES (NETIF_F_SG | \
+ NETIF_F_FRAGLIST | \
+ NETIF_F_HIGHDMA | \
+ NETIF_F_GSO_SOFTWARE | \
+ NETIF_F_HW_CSUM)
+
static void ipip6_tunnel_setup(struct net_device *dev)
{
dev->netdev_ops = &ipip6_netdev_ops;
@@ -1305,6 +1313,8 @@ static void ipip6_tunnel_setup(struct net_device *dev)
dev->iflink = 0;
dev->addr_len = 4;
dev->features |= NETIF_F_LLTX;
+ dev->features |= SIT_FEATURES;
+ dev->hw_features |= SIT_FEATURES;
}
static int ipip6_tunnel_init(struct net_device *dev)
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index bf63ac8a49b9..535a3ad262f1 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -24,26 +24,23 @@
#define COOKIEBITS 24 /* Upper bits store count */
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
-/* Table must be sorted. */
+static u32 syncookie6_secret[2][16-4+SHA_DIGEST_WORDS];
+
+/* RFC 2460, Section 8.3:
+ * [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..]
+ *
+ * Due to IPV6_MIN_MTU=1280 the lowest possible MSS is 1220, which allows
+ * using higher values than ipv4 tcp syncookies.
+ * The other values are chosen based on ethernet (1500 and 9k MTU), plus
+ * one that accounts for common encap (PPPoe) overhead. Table must be sorted.
+ */
static __u16 const msstab[] = {
- 64,
- 512,
- 536,
- 1280 - 60,
+ 1280 - 60, /* IPV6_MIN_MTU - 60 */
1480 - 60,
1500 - 60,
- 4460 - 60,
9000 - 60,
};
-/*
- * This (misnamed) value is the age of syncookie which is permitted.
- * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
- * sysctl_tcp_retries1. It's a rather complicated formula (exponential
- * backoff) to compute at runtime so it's currently hardcoded here.
- */
-#define COUNTER_TRIES 4
-
static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst)
@@ -66,14 +63,18 @@ static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *daddr,
__be16 sport, __be16 dport, u32 count, int c)
{
- __u32 *tmp = __get_cpu_var(ipv6_cookie_scratch);
+ __u32 *tmp;
+
+ net_get_random_once(syncookie6_secret, sizeof(syncookie6_secret));
+
+ tmp = __get_cpu_var(ipv6_cookie_scratch);
/*
* we have 320 bits of information to hash, copy in the remaining
- * 192 bits required for sha_transform, from the syncookie_secret
+ * 192 bits required for sha_transform, from the syncookie6_secret
* and overwrite the digest with the secret
*/
- memcpy(tmp + 10, syncookie_secret[c], 44);
+ memcpy(tmp + 10, syncookie6_secret[c], 44);
memcpy(tmp, saddr, 16);
memcpy(tmp + 4, daddr, 16);
tmp[8] = ((__force u32)sport << 16) + (__force u32)dport;
@@ -86,8 +87,9 @@ static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *dadd
static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__be16 sport, __be16 dport, __u32 sseq,
- __u32 count, __u32 data)
+ __u32 data)
{
+ u32 count = tcp_cookie_time();
return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
sseq + (count << COOKIEBITS) +
((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
@@ -96,15 +98,14 @@ static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr,
const struct in6_addr *daddr, __be16 sport,
- __be16 dport, __u32 sseq, __u32 count,
- __u32 maxdiff)
+ __be16 dport, __u32 sseq)
{
- __u32 diff;
+ __u32 diff, count = tcp_cookie_time();
cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
diff = (count - (cookie >> COOKIEBITS)) & ((__u32) -1 >> COOKIEBITS);
- if (diff >= maxdiff)
+ if (diff >= MAX_SYNCOOKIE_AGE)
return (__u32)-1;
return (cookie -
@@ -125,8 +126,7 @@ u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
*mssp = msstab[mssind];
return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source,
- th->dest, ntohl(th->seq),
- jiffies / (HZ * 60), mssind);
+ th->dest, ntohl(th->seq), mssind);
}
EXPORT_SYMBOL_GPL(__cookie_v6_init_sequence);
@@ -146,8 +146,7 @@ int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
{
__u32 seq = ntohl(th->seq) - 1;
__u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr,
- th->source, th->dest, seq,
- jiffies / (HZ * 60), COUNTER_TRIES);
+ th->source, th->dest, seq);
return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
}
@@ -157,7 +156,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
{
struct tcp_options_received tcp_opt;
struct inet_request_sock *ireq;
- struct inet6_request_sock *ireq6;
struct tcp_request_sock *treq;
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -194,7 +192,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
goto out;
ireq = inet_rsk(req);
- ireq6 = inet6_rsk(req);
treq = tcp_rsk(req);
treq->listener = NULL;
@@ -202,22 +199,22 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
goto out_free;
req->mss = mss;
- ireq->rmt_port = th->source;
- ireq->loc_port = th->dest;
- ireq6->rmt_addr = ipv6_hdr(skb)->saddr;
- ireq6->loc_addr = ipv6_hdr(skb)->daddr;
+ ireq->ir_rmt_port = th->source;
+ ireq->ir_num = ntohs(th->dest);
+ ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+ ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
if (ipv6_opt_accepted(sk, skb) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
atomic_inc(&skb->users);
- ireq6->pktopts = skb;
+ ireq->pktopts = skb;
}
- ireq6->iif = sk->sk_bound_dev_if;
+ ireq->ir_iif = sk->sk_bound_dev_if;
/* So that link locals have meaning */
if (!sk->sk_bound_dev_if &&
- ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
- ireq6->iif = inet6_iif(skb);
+ ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
+ ireq->ir_iif = inet6_iif(skb);
req->expires = 0UL;
req->num_retrans = 0;
@@ -241,12 +238,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
struct flowi6 fl6;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_TCP;
- fl6.daddr = ireq6->rmt_addr;
+ fl6.daddr = ireq->ir_v6_rmt_addr;
final_p = fl6_update_dst(&fl6, np->opt, &final);
- fl6.saddr = ireq6->loc_addr;
+ fl6.saddr = ireq->ir_v6_loc_addr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
- fl6.fl6_dport = inet_rsk(req)->rmt_port;
+ fl6.fl6_dport = ireq->ir_rmt_port;
fl6.fl6_sport = inet_sk(sk)->inet_sport;
security_req_classify_flow(req, flowi6_to_flowi(&fl6));
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5c71501fc917..0740f93a114a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -192,13 +192,13 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
}
if (tp->rx_opt.ts_recent_stamp &&
- !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
+ !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
tp->rx_opt.ts_recent = 0;
tp->rx_opt.ts_recent_stamp = 0;
tp->write_seq = 0;
}
- np->daddr = usin->sin6_addr;
+ sk->sk_v6_daddr = usin->sin6_addr;
np->flow_label = fl6.flowlabel;
/*
@@ -237,17 +237,17 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
} else {
ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
- &np->rcv_saddr);
+ &sk->sk_v6_rcv_saddr);
}
return err;
}
- if (!ipv6_addr_any(&np->rcv_saddr))
- saddr = &np->rcv_saddr;
+ if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
+ saddr = &sk->sk_v6_rcv_saddr;
fl6.flowi6_proto = IPPROTO_TCP;
- fl6.daddr = np->daddr;
+ fl6.daddr = sk->sk_v6_daddr;
fl6.saddr = saddr ? *saddr : np->saddr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
@@ -266,7 +266,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
if (saddr == NULL) {
saddr = &fl6.saddr;
- np->rcv_saddr = *saddr;
+ sk->sk_v6_rcv_saddr = *saddr;
}
/* set the source address */
@@ -279,7 +279,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
rt = (struct rt6_info *) dst;
if (tcp_death_row.sysctl_tw_recycle &&
!tp->rx_opt.ts_recent_stamp &&
- ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr))
+ ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
tcp_fetch_timewait_stamp(sk, dst);
icsk->icsk_ext_hdr_len = 0;
@@ -298,7 +298,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
if (!tp->write_seq && likely(!tp->repair))
tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
- np->daddr.s6_addr32,
+ sk->sk_v6_daddr.s6_addr32,
inet->inet_sport,
inet->inet_dport);
@@ -465,7 +465,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
u16 queue_mapping)
{
- struct inet6_request_sock *treq = inet6_rsk(req);
+ struct inet_request_sock *ireq = inet_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff * skb;
int err = -ENOMEM;
@@ -477,9 +477,10 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
skb = tcp_make_synack(sk, dst, req, NULL);
if (skb) {
- __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
+ __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
+ &ireq->ir_v6_rmt_addr);
- fl6->daddr = treq->rmt_addr;
+ fl6->daddr = ireq->ir_v6_rmt_addr;
skb_set_queue_mapping(skb, queue_mapping);
err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
err = net_xmit_eval(err);
@@ -502,7 +503,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
static void tcp_v6_reqsk_destructor(struct request_sock *req)
{
- kfree_skb(inet6_rsk(req)->pktopts);
+ kfree_skb(inet_rsk(req)->pktopts);
}
#ifdef CONFIG_TCP_MD5SIG
@@ -515,13 +516,13 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
struct sock *addr_sk)
{
- return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
+ return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
}
static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
struct request_sock *req)
{
- return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
+ return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
}
static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
@@ -621,10 +622,10 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
if (sk) {
saddr = &inet6_sk(sk)->saddr;
- daddr = &inet6_sk(sk)->daddr;
+ daddr = &sk->sk_v6_daddr;
} else if (req) {
- saddr = &inet6_rsk(req)->loc_addr;
- daddr = &inet6_rsk(req)->rmt_addr;
+ saddr = &inet_rsk(req)->ir_v6_loc_addr;
+ daddr = &inet_rsk(req)->ir_v6_rmt_addr;
} else {
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
saddr = &ip6h->saddr;
@@ -949,7 +950,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct tcp_options_received tmp_opt;
struct request_sock *req;
- struct inet6_request_sock *treq;
+ struct inet_request_sock *ireq;
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
__u32 isn = TCP_SKB_CB(skb)->when;
@@ -994,25 +995,25 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
tcp_openreq_init(req, &tmp_opt, skb);
- treq = inet6_rsk(req);
- treq->rmt_addr = ipv6_hdr(skb)->saddr;
- treq->loc_addr = ipv6_hdr(skb)->daddr;
+ ireq = inet_rsk(req);
+ ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+ ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
if (!want_cookie || tmp_opt.tstamp_ok)
TCP_ECN_create_request(req, skb, sock_net(sk));
- treq->iif = sk->sk_bound_dev_if;
+ ireq->ir_iif = sk->sk_bound_dev_if;
/* So that link locals have meaning */
if (!sk->sk_bound_dev_if &&
- ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
- treq->iif = inet6_iif(skb);
+ ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
+ ireq->ir_iif = inet6_iif(skb);
if (!isn) {
if (ipv6_opt_accepted(sk, skb) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
atomic_inc(&skb->users);
- treq->pktopts = skb;
+ ireq->pktopts = skb;
}
if (want_cookie) {
@@ -1051,7 +1052,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
* to the moment of synflood.
*/
LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
- &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
+ &ireq->ir_v6_rmt_addr, ntohs(tcp_hdr(skb)->source));
goto drop_and_release;
}
@@ -1086,7 +1087,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst)
{
- struct inet6_request_sock *treq;
+ struct inet_request_sock *ireq;
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
struct tcp6_sock *newtcp6sk;
struct inet_sock *newinet;
@@ -1116,11 +1117,11 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
- ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
+ ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
- newnp->rcv_saddr = newnp->saddr;
+ newsk->sk_v6_rcv_saddr = newnp->saddr;
inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
newsk->sk_backlog_rcv = tcp_v4_do_rcv;
@@ -1151,7 +1152,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
return newsk;
}
- treq = inet6_rsk(req);
+ ireq = inet_rsk(req);
if (sk_acceptq_is_full(sk))
goto out_overflow;
@@ -1185,10 +1186,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
- newnp->daddr = treq->rmt_addr;
- newnp->saddr = treq->loc_addr;
- newnp->rcv_saddr = treq->loc_addr;
- newsk->sk_bound_dev_if = treq->iif;
+ newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
+ newnp->saddr = ireq->ir_v6_loc_addr;
+ newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
+ newsk->sk_bound_dev_if = ireq->ir_iif;
/* Now IPv6 options...
@@ -1203,11 +1204,11 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
/* Clone pktoptions received with SYN */
newnp->pktoptions = NULL;
- if (treq->pktopts != NULL) {
- newnp->pktoptions = skb_clone(treq->pktopts,
+ if (ireq->pktopts != NULL) {
+ newnp->pktoptions = skb_clone(ireq->pktopts,
sk_gfp_atomic(sk, GFP_ATOMIC));
- consume_skb(treq->pktopts);
- treq->pktopts = NULL;
+ consume_skb(ireq->pktopts);
+ ireq->pktopts = NULL;
if (newnp->pktoptions)
skb_set_owner_r(newnp->pktoptions, newsk);
}
@@ -1244,13 +1245,13 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
#ifdef CONFIG_TCP_MD5SIG
/* Copy over the MD5 key from the original socket */
- if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
+ if ((key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr)) != NULL) {
/* We're using one, so create a matching key
* on the newsk structure. If we fail to get
* memory, then we end up not copying the key
* across. Shucks.
*/
- tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
+ tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
AF_INET6, key->key, key->keylen,
sk_gfp_atomic(sk, GFP_ATOMIC));
}
@@ -1722,8 +1723,8 @@ static void get_openreq6(struct seq_file *seq,
const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
{
int ttd = req->expires - jiffies;
- const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
- const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
+ const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
+ const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
if (ttd < 0)
ttd = 0;
@@ -1734,10 +1735,10 @@ static void get_openreq6(struct seq_file *seq,
i,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3],
- ntohs(inet_rsk(req)->loc_port),
+ inet_rsk(req)->ir_num,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3],
- ntohs(inet_rsk(req)->rmt_port),
+ ntohs(inet_rsk(req)->ir_rmt_port),
TCP_SYN_RECV,
0,0, /* could print option size, but that is af dependent. */
1, /* timers active (only the expire timer) */
@@ -1758,10 +1759,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
const struct inet_sock *inet = inet_sk(sp);
const struct tcp_sock *tp = tcp_sk(sp);
const struct inet_connection_sock *icsk = inet_csk(sp);
- const struct ipv6_pinfo *np = inet6_sk(sp);
- dest = &np->daddr;
- src = &np->rcv_saddr;
+ dest = &sp->sk_v6_daddr;
+ src = &sp->sk_v6_rcv_saddr;
destp = ntohs(inet->inet_dport);
srcp = ntohs(inet->inet_sport);
@@ -1810,11 +1810,10 @@ static void get_timewait6_sock(struct seq_file *seq,
{
const struct in6_addr *dest, *src;
__u16 destp, srcp;
- const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
- long delta = tw->tw_ttd - jiffies;
+ s32 delta = tw->tw_ttd - inet_tw_time_stamp();
- dest = &tw6->tw_v6_daddr;
- src = &tw6->tw_v6_rcv_saddr;
+ dest = &tw->tw_v6_daddr;
+ src = &tw->tw_v6_rcv_saddr;
destp = ntohs(tw->tw_dport);
srcp = ntohs(tw->tw_sport);
@@ -1834,6 +1833,7 @@ static void get_timewait6_sock(struct seq_file *seq,
static int tcp6_seq_show(struct seq_file *seq, void *v)
{
struct tcp_iter_state *st;
+ struct sock *sk = v;
if (v == SEQ_START_TOKEN) {
seq_puts(seq,
@@ -1849,14 +1849,14 @@ static int tcp6_seq_show(struct seq_file *seq, void *v)
switch (st->state) {
case TCP_SEQ_STATE_LISTENING:
case TCP_SEQ_STATE_ESTABLISHED:
- get_tcp6_sock(seq, v, st->num);
+ if (sk->sk_state == TCP_TIME_WAIT)
+ get_timewait6_sock(seq, v, st->num);
+ else
+ get_tcp6_sock(seq, v, st->num);
break;
case TCP_SEQ_STATE_OPENREQ:
get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
break;
- case TCP_SEQ_STATE_TIME_WAIT:
- get_timewait6_sock(seq, v, st->num);
- break;
}
out:
return 0;
@@ -1929,6 +1929,7 @@ struct proto tcpv6_prot = {
.memory_allocated = &tcp_memory_allocated,
.memory_pressure = &tcp_memory_pressure,
.orphan_count = &tcp_orphan_count,
+ .sysctl_mem = sysctl_tcp_mem,
.sysctl_wmem = sysctl_tcp_wmem,
.sysctl_rmem = sysctl_tcp_rmem,
.max_header = MAX_TCP_HEADER,
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 2ec6bf6a0aa0..c1097c798900 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -83,7 +83,7 @@ static int tcp6_gro_complete(struct sk_buff *skb)
static const struct net_offload tcpv6_offload = {
.callbacks = {
.gso_send_check = tcp_v6_gso_send_check,
- .gso_segment = tcp_tso_segment,
+ .gso_segment = tcp_gso_segment,
.gro_receive = tcp6_gro_receive,
.gro_complete = tcp6_gro_complete,
},
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 72b7eaaf3ca0..f3893e897f72 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -53,22 +53,42 @@
#include <trace/events/skb.h>
#include "udp_impl.h"
+static unsigned int udp6_ehashfn(struct net *net,
+ const struct in6_addr *laddr,
+ const u16 lport,
+ const struct in6_addr *faddr,
+ const __be16 fport)
+{
+ static u32 udp6_ehash_secret __read_mostly;
+ static u32 udp_ipv6_hash_secret __read_mostly;
+
+ u32 lhash, fhash;
+
+ net_get_random_once(&udp6_ehash_secret,
+ sizeof(udp6_ehash_secret));
+ net_get_random_once(&udp_ipv6_hash_secret,
+ sizeof(udp_ipv6_hash_secret));
+
+ lhash = (__force u32)laddr->s6_addr32[3];
+ fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
+
+ return __inet6_ehashfn(lhash, lport, fhash, fport,
+ udp_ipv6_hash_secret + net_hash_mix(net));
+}
+
int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
{
- const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
- __be32 sk1_rcv_saddr = sk_rcv_saddr(sk);
- __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
int sk_ipv6only = ipv6_only_sock(sk);
int sk2_ipv6only = inet_v6_ipv6only(sk2);
- int addr_type = ipv6_addr_type(sk_rcv_saddr6);
+ int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
/* if both are mapped, treat as IPv4 */
if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
return (!sk2_ipv6only &&
- (!sk1_rcv_saddr || !sk2_rcv_saddr ||
- sk1_rcv_saddr == sk2_rcv_saddr));
+ (!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr ||
+ sk->sk_rcv_saddr == sk2->sk_rcv_saddr));
if (addr_type2 == IPV6_ADDR_ANY &&
!(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
@@ -79,7 +99,7 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
return 1;
if (sk2_rcv_saddr6 &&
- ipv6_addr_equal(sk_rcv_saddr6, sk2_rcv_saddr6))
+ ipv6_addr_equal(&sk->sk_v6_rcv_saddr, sk2_rcv_saddr6))
return 1;
return 0;
@@ -107,7 +127,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
unsigned int hash2_nulladdr =
udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
unsigned int hash2_partial =
- udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, 0);
+ udp6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
/* precompute partial secondary hash */
udp_sk(sk)->udp_portaddr_hash = hash2_partial;
@@ -117,7 +137,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
static void udp_v6_rehash(struct sock *sk)
{
u16 new_hash = udp6_portaddr_hash(sock_net(sk),
- &inet6_sk(sk)->rcv_saddr,
+ &sk->sk_v6_rcv_saddr,
inet_sk(sk)->inet_num);
udp_lib_rehash(sk, new_hash);
@@ -133,7 +153,6 @@ static inline int compute_score(struct sock *sk, struct net *net,
if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
sk->sk_family == PF_INET6) {
- struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_sock *inet = inet_sk(sk);
score = 0;
@@ -142,13 +161,13 @@ static inline int compute_score(struct sock *sk, struct net *net,
return -1;
score++;
}
- if (!ipv6_addr_any(&np->rcv_saddr)) {
- if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
+ if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+ if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
return -1;
score++;
}
- if (!ipv6_addr_any(&np->daddr)) {
- if (!ipv6_addr_equal(&np->daddr, saddr))
+ if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
+ if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
return -1;
score++;
}
@@ -171,10 +190,9 @@ static inline int compute_score2(struct sock *sk, struct net *net,
if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
sk->sk_family == PF_INET6) {
- struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_sock *inet = inet_sk(sk);
- if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
+ if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
return -1;
score = 0;
if (inet->inet_dport) {
@@ -182,8 +200,8 @@ static inline int compute_score2(struct sock *sk, struct net *net,
return -1;
score++;
}
- if (!ipv6_addr_any(&np->daddr)) {
- if (!ipv6_addr_equal(&np->daddr, saddr))
+ if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
+ if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
return -1;
score++;
}
@@ -219,8 +237,8 @@ begin:
badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
- hash = inet6_ehashfn(net, daddr, hnum,
- saddr, sport);
+ hash = udp6_ehashfn(net, daddr, hnum,
+ saddr, sport);
matches = 1;
} else if (score == SCORE2_MAX)
goto exact_match;
@@ -300,8 +318,8 @@ begin:
badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
- hash = inet6_ehashfn(net, daddr, hnum,
- saddr, sport);
+ hash = udp6_ehashfn(net, daddr, hnum,
+ saddr, sport);
matches = 1;
}
} else if (score == badness && reuseport) {
@@ -551,8 +569,10 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int rc;
- if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
+ if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
sock_rps_save_rxhash(sk, skb);
+ sk_mark_napi_id(sk, skb);
+ }
rc = sock_queue_rcv_skb(sk, skb);
if (rc < 0) {
@@ -690,20 +710,19 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
if (udp_sk(s)->udp_port_hash == num &&
s->sk_family == PF_INET6) {
- struct ipv6_pinfo *np = inet6_sk(s);
if (inet->inet_dport) {
if (inet->inet_dport != rmt_port)
continue;
}
- if (!ipv6_addr_any(&np->daddr) &&
- !ipv6_addr_equal(&np->daddr, rmt_addr))
+ if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
+ !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
continue;
if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)
continue;
- if (!ipv6_addr_any(&np->rcv_saddr)) {
- if (!ipv6_addr_equal(&np->rcv_saddr, loc_addr))
+ if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+ if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
continue;
}
if (!inet6_mc_check(s, loc_addr, rmt_addr))
@@ -846,7 +865,6 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (sk != NULL) {
int ret;
- sk_mark_napi_id(sk, skb);
ret = udpv6_queue_rcv_skb(sk, skb);
sock_put(sk);
@@ -1064,7 +1082,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
} else if (!up->pending) {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
- daddr = &np->daddr;
+ daddr = &sk->sk_v6_daddr;
} else
daddr = NULL;
@@ -1134,8 +1152,8 @@ do_udp_sendmsg:
* sk->sk_dst_cache.
*/
if (sk->sk_state == TCP_ESTABLISHED &&
- ipv6_addr_equal(daddr, &np->daddr))
- daddr = &np->daddr;
+ ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
+ daddr = &sk->sk_v6_daddr;
if (addr_len >= sizeof(struct sockaddr_in6) &&
sin6->sin6_scope_id &&
@@ -1146,7 +1164,7 @@ do_udp_sendmsg:
return -EDESTADDRREQ;
fl6.fl6_dport = inet->inet_dport;
- daddr = &np->daddr;
+ daddr = &sk->sk_v6_daddr;
fl6.flowlabel = np->flow_label;
connected = 1;
}
@@ -1225,9 +1243,6 @@ do_udp_sendmsg:
if (tclass < 0)
tclass = np->tclass;
- if (dontfrag < 0)
- dontfrag = np->dontfrag;
-
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
@@ -1246,6 +1261,8 @@ back_from_confirm:
up->pending = AF_INET6;
do_append_data:
+ if (dontfrag < 0)
+ dontfrag = np->dontfrag;
up->len += ulen;
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen,
@@ -1262,8 +1279,8 @@ do_append_data:
if (dst) {
if (connected) {
ip6_dst_store(sk, dst,
- ipv6_addr_equal(&fl6.daddr, &np->daddr) ?
- &np->daddr : NULL,
+ ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
+ &sk->sk_v6_daddr : NULL,
#ifdef CONFIG_IPV6_SUBTREES
ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
&np->saddr :
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index 4691ed50a928..c779c3c90b9d 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -7,33 +7,32 @@
#include <net/inet_common.h>
#include <net/transp_v6.h>
-extern int __udp6_lib_rcv(struct sk_buff *, struct udp_table *, int );
-extern void __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *,
- u8 , u8 , int , __be32 , struct udp_table *);
+int __udp6_lib_rcv(struct sk_buff *, struct udp_table *, int);
+void __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int,
+ __be32, struct udp_table *);
-extern int udp_v6_get_port(struct sock *sk, unsigned short snum);
+int udp_v6_get_port(struct sock *sk, unsigned short snum);
-extern int udpv6_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen);
-extern int udpv6_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
+int udpv6_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
+int udpv6_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen);
#ifdef CONFIG_COMPAT
-extern int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
-extern int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen);
+int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen);
+int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen);
#endif
-extern int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len);
-extern int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len,
- int noblock, int flags, int *addr_len);
-extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
-extern void udpv6_destroy_sock(struct sock *sk);
+int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t len);
+int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t len, int noblock, int flags, int *addr_len);
+int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+void udpv6_destroy_sock(struct sock *sk);
-extern void udp_v6_clear_sk(struct sock *sk, int size);
+void udp_v6_clear_sk(struct sock *sk, int size);
#ifdef CONFIG_PROC_FS
-extern int udp6_seq_show(struct seq_file *seq, void *v);
+int udp6_seq_show(struct seq_file *seq, void *v);
#endif
#endif /* _UDP6_IMPL_H */
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 60559511bd9c..08e23b0bf302 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -64,6 +64,8 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
SKB_GSO_DODGY |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_GRE |
+ SKB_GSO_IPIP |
+ SKB_GSO_SIT |
SKB_GSO_MPLS) ||
!(type & (SKB_GSO_UDP))))
goto out;
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 4770d515c2c8..cb04f7a16b5e 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -18,6 +18,65 @@
#include <net/ipv6.h>
#include <net/xfrm.h>
+/* Informational hook. The decap is still done here. */
+static struct xfrm_tunnel_notifier __rcu *rcv_notify_handlers __read_mostly;
+static DEFINE_MUTEX(xfrm6_mode_tunnel_input_mutex);
+
+int xfrm6_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler)
+{
+ struct xfrm_tunnel_notifier __rcu **pprev;
+ struct xfrm_tunnel_notifier *t;
+ int ret = -EEXIST;
+ int priority = handler->priority;
+
+ mutex_lock(&xfrm6_mode_tunnel_input_mutex);
+
+ for (pprev = &rcv_notify_handlers;
+ (t = rcu_dereference_protected(*pprev,
+ lockdep_is_held(&xfrm6_mode_tunnel_input_mutex))) != NULL;
+ pprev = &t->next) {
+ if (t->priority > priority)
+ break;
+ if (t->priority == priority)
+ goto err;
+
+ }
+
+ handler->next = *pprev;
+ rcu_assign_pointer(*pprev, handler);
+
+ ret = 0;
+
+err:
+ mutex_unlock(&xfrm6_mode_tunnel_input_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xfrm6_mode_tunnel_input_register);
+
+int xfrm6_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler)
+{
+ struct xfrm_tunnel_notifier __rcu **pprev;
+ struct xfrm_tunnel_notifier *t;
+ int ret = -ENOENT;
+
+ mutex_lock(&xfrm6_mode_tunnel_input_mutex);
+ for (pprev = &rcv_notify_handlers;
+ (t = rcu_dereference_protected(*pprev,
+ lockdep_is_held(&xfrm6_mode_tunnel_input_mutex))) != NULL;
+ pprev = &t->next) {
+ if (t == handler) {
+ *pprev = handler->next;
+ ret = 0;
+ break;
+ }
+ }
+ mutex_unlock(&xfrm6_mode_tunnel_input_mutex);
+ synchronize_net();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xfrm6_mode_tunnel_input_deregister);
+
static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
{
const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
@@ -63,8 +122,15 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
return 0;
}
+#define for_each_input_rcu(head, handler) \
+ for (handler = rcu_dereference(head); \
+ handler != NULL; \
+ handler = rcu_dereference(handler->next))
+
+
static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
{
+ struct xfrm_tunnel_notifier *handler;
int err = -EINVAL;
if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6)
@@ -72,6 +138,9 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
goto out;
+ for_each_input_rcu(rcv_notify_handlers, handler)
+ handler->handler(skb);
+
err = skb_unclone(skb, GFP_ATOMIC);
if (err)
goto out;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 23ed03d786c8..5f8e128c512d 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -135,9 +135,14 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
struct ipv6_opt_hdr *exthdr;
const unsigned char *nh = skb_network_header(skb);
u8 nexthdr = nh[IP6CB(skb)->nhoff];
+ int oif = 0;
+
+ if (skb_dst(skb))
+ oif = skb_dst(skb)->dev->ifindex;
memset(fl6, 0, sizeof(struct flowi6));
fl6->flowi6_mark = skb->mark;
+ fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
@@ -284,7 +289,7 @@ static struct dst_ops xfrm6_dst_ops = {
.destroy = xfrm6_dst_destroy,
.ifdown = xfrm6_dst_ifdown,
.local_out = __ip6_local_out,
- .gc_thresh = 1024,
+ .gc_thresh = 32768,
};
static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 0578d4fa00a9..0f676908d15b 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -2563,9 +2563,8 @@ bed:
jiffies + msecs_to_jiffies(val));
/* Wait for IR-LMP to call us back */
- __wait_event_interruptible(self->query_wait,
- (self->cachedaddr != 0 || self->errno == -ETIME),
- err);
+ err = __wait_event_interruptible(self->query_wait,
+ (self->cachedaddr != 0 || self->errno == -ETIME));
/* If watchdog is still activated, kill it! */
del_timer(&(self->watchdog));
diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h
index 564eb0b8afa3..8d65bb9477fc 100644
--- a/net/irda/irnet/irnet.h
+++ b/net/irda/irnet/irnet.h
@@ -509,16 +509,11 @@ typedef struct irnet_ctrl_channel
*/
/* -------------------------- IRDA PART -------------------------- */
-extern int
- irda_irnet_create(irnet_socket *); /* Initialise a IrNET socket */
-extern int
- irda_irnet_connect(irnet_socket *); /* Try to connect over IrDA */
-extern void
- irda_irnet_destroy(irnet_socket *); /* Teardown a IrNET socket */
-extern int
- irda_irnet_init(void); /* Initialise IrDA part of IrNET */
-extern void
- irda_irnet_cleanup(void); /* Teardown IrDA part of IrNET */
+int irda_irnet_create(irnet_socket *); /* Initialise an IrNET socket */
+int irda_irnet_connect(irnet_socket *); /* Try to connect over IrDA */
+void irda_irnet_destroy(irnet_socket *); /* Teardown an IrNET socket */
+int irda_irnet_init(void); /* Initialise IrDA part of IrNET */
+void irda_irnet_cleanup(void); /* Teardown IrDA part of IrNET */
/**************************** VARIABLES ****************************/
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 9d585370c5b4..911ef03bf8fb 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1098,7 +1098,8 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
x->id.proto = proto;
x->id.spi = sa->sadb_sa_spi;
- x->props.replay_window = sa->sadb_sa_replay;
+ x->props.replay_window = min_t(unsigned int, sa->sadb_sa_replay,
+ (sizeof(x->replay.bitmap) * 8));
if (sa->sadb_sa_flags & SADB_SAFLAGS_NOECN)
x->props.flags |= XFRM_STATE_NOECN;
if (sa->sadb_sa_flags & SADB_SAFLAGS_DECAP_DSCP)
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index feae495a0a30..9af77d9c0ec9 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -115,6 +115,11 @@ struct l2tp_net {
static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
+static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
+{
+ return sk->sk_user_data;
+}
+
static inline struct l2tp_net *l2tp_pernet(struct net *net)
{
BUG_ON(!net);
@@ -504,7 +509,7 @@ static inline int l2tp_verify_udp_checksum(struct sock *sk,
return 0;
#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family == PF_INET6) {
+ if (sk->sk_family == PF_INET6 && !l2tp_tunnel(sk)->v4mapped) {
if (!uh->check) {
LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
return 1;
@@ -1128,7 +1133,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
/* Queue the packet to IP for output */
skb->local_df = 1;
#if IS_ENABLED(CONFIG_IPV6)
- if (skb->sk->sk_family == PF_INET6)
+ if (skb->sk->sk_family == PF_INET6 && !tunnel->v4mapped)
error = inet6_csk_xmit(skb, NULL);
else
#endif
@@ -1176,7 +1181,7 @@ static void l2tp_xmit_ipv6_csum(struct sock *sk, struct sk_buff *skb,
!(skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) {
__wsum csum = skb_checksum(skb, 0, udp_len, 0);
skb->ip_summed = CHECKSUM_UNNECESSARY;
- uh->check = csum_ipv6_magic(&np->saddr, &np->daddr, udp_len,
+ uh->check = csum_ipv6_magic(&np->saddr, &sk->sk_v6_daddr, udp_len,
IPPROTO_UDP, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
@@ -1184,7 +1189,7 @@ static void l2tp_xmit_ipv6_csum(struct sock *sk, struct sk_buff *skb,
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
- uh->check = ~csum_ipv6_magic(&np->saddr, &np->daddr,
+ uh->check = ~csum_ipv6_magic(&np->saddr, &sk->sk_v6_daddr,
udp_len, IPPROTO_UDP, 0);
}
}
@@ -1255,7 +1260,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
/* Calculate UDP checksum if configured to do so */
#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family == PF_INET6)
+ if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
l2tp_xmit_ipv6_csum(sk, skb, udp_len);
else
#endif
@@ -1304,10 +1309,9 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
*/
static void l2tp_tunnel_destruct(struct sock *sk)
{
- struct l2tp_tunnel *tunnel;
+ struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
struct l2tp_net *pn;
- tunnel = sk->sk_user_data;
if (tunnel == NULL)
goto end;
@@ -1675,7 +1679,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
}
/* Check if this socket has already been prepped */
- tunnel = (struct l2tp_tunnel *)sk->sk_user_data;
+ tunnel = l2tp_tunnel(sk);
if (tunnel != NULL) {
/* This socket has already been prepped */
err = -EBUSY;
@@ -1704,6 +1708,24 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
if (cfg != NULL)
tunnel->debug = cfg->debug;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == PF_INET6) {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ if (ipv6_addr_v4mapped(&np->saddr) &&
+ ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
+ struct inet_sock *inet = inet_sk(sk);
+
+ tunnel->v4mapped = true;
+ inet->inet_saddr = np->saddr.s6_addr32[3];
+ inet->inet_rcv_saddr = sk->sk_v6_rcv_saddr.s6_addr32[3];
+ inet->inet_daddr = sk->sk_v6_daddr.s6_addr32[3];
+ } else {
+ tunnel->v4mapped = false;
+ }
+ }
+#endif
+
/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
tunnel->encap = encap;
if (encap == L2TP_ENCAPTYPE_UDP) {
@@ -1712,7 +1734,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family == PF_INET6)
+ if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
udpv6_encap_enable();
else
#endif
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 66a559b104b6..1ee9f6965d68 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -194,6 +194,9 @@ struct l2tp_tunnel {
struct sock *sock; /* Parent socket */
int fd; /* Parent fd, if tunnel socket
* was created by userspace */
+#if IS_ENABLED(CONFIG_IPV6)
+ bool v4mapped;
+#endif
struct work_struct del_work;
@@ -235,29 +238,40 @@ out:
return tunnel;
}
-extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel);
-extern void l2tp_tunnel_sock_put(struct sock *sk);
-extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);
-extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
-extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
-extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
-extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
-
-extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp);
-extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
-extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
-extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
-extern void __l2tp_session_unhash(struct l2tp_session *session);
-extern int l2tp_session_delete(struct l2tp_session *session);
-extern void l2tp_session_free(struct l2tp_session *session);
-extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb));
-extern int l2tp_session_queue_purge(struct l2tp_session *session);
-extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
-
-extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len);
-
-extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops);
-extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
+struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel);
+void l2tp_tunnel_sock_put(struct sock *sk);
+struct l2tp_session *l2tp_session_find(struct net *net,
+ struct l2tp_tunnel *tunnel,
+ u32 session_id);
+struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
+struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
+struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
+struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
+
+int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
+ u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
+ struct l2tp_tunnel **tunnelp);
+void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
+int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
+struct l2tp_session *l2tp_session_create(int priv_size,
+ struct l2tp_tunnel *tunnel,
+ u32 session_id, u32 peer_session_id,
+ struct l2tp_session_cfg *cfg);
+void __l2tp_session_unhash(struct l2tp_session *session);
+int l2tp_session_delete(struct l2tp_session *session);
+void l2tp_session_free(struct l2tp_session *session);
+void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
+ unsigned char *ptr, unsigned char *optr, u16 hdrflags,
+ int length, int (*payload_hook)(struct sk_buff *skb));
+int l2tp_session_queue_purge(struct l2tp_session *session);
+int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
+
+int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb,
+ int hdr_len);
+
+int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
+ const struct l2tp_nl_cmd_ops *ops);
+void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
/* Session reference counts. Incremented when code obtains a reference
* to a session.
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 072d7202e182..2d6760a2ae34 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -127,9 +127,10 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
#if IS_ENABLED(CONFIG_IPV6)
if (tunnel->sock->sk_family == AF_INET6) {
- struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
+ const struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
+
seq_printf(m, " from %pI6c to %pI6c\n",
- &np->saddr, &np->daddr);
+ &np->saddr, &tunnel->sock->sk_v6_daddr);
} else
#endif
seq_printf(m, " from %pI4 to %pI4\n",
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index b8a6039314e8..cfd65304be60 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -63,7 +63,7 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
struct sock *sk;
sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
- struct in6_addr *addr = inet6_rcv_saddr(sk);
+ const struct in6_addr *addr = inet6_rcv_saddr(sk);
struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
if (l2tp == NULL)
@@ -331,7 +331,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
rcu_read_unlock();
inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
- np->rcv_saddr = addr->l2tp_addr;
+ sk->sk_v6_rcv_saddr = addr->l2tp_addr;
np->saddr = addr->l2tp_addr;
l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
@@ -421,14 +421,14 @@ static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
if (!lsk->peer_conn_id)
return -ENOTCONN;
lsa->l2tp_conn_id = lsk->peer_conn_id;
- lsa->l2tp_addr = np->daddr;
+ lsa->l2tp_addr = sk->sk_v6_daddr;
if (np->sndflow)
lsa->l2tp_flowinfo = np->flow_label;
} else {
- if (ipv6_addr_any(&np->rcv_saddr))
+ if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
lsa->l2tp_addr = np->saddr;
else
- lsa->l2tp_addr = np->rcv_saddr;
+ lsa->l2tp_addr = sk->sk_v6_rcv_saddr;
lsa->l2tp_conn_id = lsk->conn_id;
}
@@ -537,8 +537,8 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
* sk->sk_dst_cache.
*/
if (sk->sk_state == TCP_ESTABLISHED &&
- ipv6_addr_equal(daddr, &np->daddr))
- daddr = &np->daddr;
+ ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
+ daddr = &sk->sk_v6_daddr;
if (addr_len >= sizeof(struct sockaddr_in6) &&
lsa->l2tp_scope_id &&
@@ -548,7 +548,7 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
- daddr = &np->daddr;
+ daddr = &sk->sk_v6_daddr;
fl6.flowlabel = np->flow_label;
}
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 0825ff26e113..be446d517bc9 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -306,8 +306,8 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
if (np) {
if (nla_put(skb, L2TP_ATTR_IP6_SADDR, sizeof(np->saddr),
&np->saddr) ||
- nla_put(skb, L2TP_ATTR_IP6_DADDR, sizeof(np->daddr),
- &np->daddr))
+ nla_put(skb, L2TP_ATTR_IP6_DADDR, sizeof(sk->sk_v6_daddr),
+ &sk->sk_v6_daddr))
goto nla_put_failure;
} else
#endif
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 5ebee2ded9e9..ffda81ef1a70 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -353,7 +353,9 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
goto error_put_sess_tun;
}
+ local_bh_disable();
l2tp_xmit_skb(session, skb, session->hdr_len);
+ local_bh_enable();
sock_put(ps->tunnel_sock);
sock_put(sk);
@@ -422,7 +424,9 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
skb->data[0] = ppph[0];
skb->data[1] = ppph[1];
+ local_bh_disable();
l2tp_xmit_skb(session, skb, session->hdr_len);
+ local_bh_enable();
sock_put(sk_tun);
sock_put(sk);
@@ -906,8 +910,8 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
#if IS_ENABLED(CONFIG_IPV6)
} else if ((tunnel->version == 2) &&
(tunnel->sock->sk_family == AF_INET6)) {
- struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
struct sockaddr_pppol2tpin6 sp;
+
len = sizeof(sp);
memset(&sp, 0, len);
sp.sa_family = AF_PPPOX;
@@ -920,13 +924,13 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
sp.pppol2tp.d_session = session->peer_session_id;
sp.pppol2tp.addr.sin6_family = AF_INET6;
sp.pppol2tp.addr.sin6_port = inet->inet_dport;
- memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr,
- sizeof(np->daddr));
+ memcpy(&sp.pppol2tp.addr.sin6_addr, &tunnel->sock->sk_v6_daddr,
+ sizeof(tunnel->sock->sk_v6_daddr));
memcpy(uaddr, &sp, len);
} else if ((tunnel->version == 3) &&
(tunnel->sock->sk_family == AF_INET6)) {
- struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
struct sockaddr_pppol2tpv3in6 sp;
+
len = sizeof(sp);
memset(&sp, 0, len);
sp.sa_family = AF_PPPOX;
@@ -939,8 +943,8 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
sp.pppol2tp.d_session = session->peer_session_id;
sp.pppol2tp.addr.sin6_family = AF_INET6;
sp.pppol2tp.addr.sin6_port = inet->inet_dport;
- memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr,
- sizeof(np->daddr));
+ memcpy(&sp.pppol2tp.addr.sin6_addr, &tunnel->sock->sk_v6_daddr,
+ sizeof(tunnel->sock->sk_v6_daddr));
memcpy(uaddr, &sp, len);
#endif
} else if (tunnel->version == 3) {
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 2e7855a1b10d..b0a651cc389f 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2865,30 +2865,43 @@ void ieee80211_csa_finalize_work(struct work_struct *work)
if (!ieee80211_sdata_running(sdata))
return;
- if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
- return;
-
sdata->radar_required = sdata->csa_radar_required;
err = ieee80211_vif_change_channel(sdata, &local->csa_chandef,
&changed);
if (WARN_ON(err < 0))
return;
- err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon);
- if (err < 0)
- return;
+ if (!local->use_chanctx) {
+ local->_oper_chandef = local->csa_chandef;
+ ieee80211_hw_config(local, 0);
+ }
- changed |= err;
- kfree(sdata->u.ap.next_beacon);
- sdata->u.ap.next_beacon = NULL;
+ ieee80211_bss_info_change_notify(sdata, changed);
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP:
+ err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon);
+ if (err < 0)
+ return;
+ changed |= err;
+ kfree(sdata->u.ap.next_beacon);
+ sdata->u.ap.next_beacon = NULL;
+
+ ieee80211_bss_info_change_notify(sdata, err);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ieee80211_ibss_finish_csa(sdata);
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
sdata->vif.csa_active = false;
ieee80211_wake_queues_by_reason(&sdata->local->hw,
IEEE80211_MAX_QUEUE_MAP,
IEEE80211_QUEUE_STOP_REASON_CSA);
- ieee80211_bss_info_change_notify(sdata, changed);
-
cfg80211_ch_switch_notify(sdata->dev, &local->csa_chandef);
}
@@ -2936,20 +2949,56 @@ static int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
if (sdata->vif.csa_active)
return -EBUSY;
- /* only handle AP for now. */
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
+ sdata->csa_counter_offset_beacon =
+ params->counter_offset_beacon;
+ sdata->csa_counter_offset_presp = params->counter_offset_presp;
+ sdata->u.ap.next_beacon =
+ cfg80211_beacon_dup(&params->beacon_after);
+ if (!sdata->u.ap.next_beacon)
+ return -ENOMEM;
+
+ err = ieee80211_assign_beacon(sdata, &params->beacon_csa);
+ if (err < 0) {
+ kfree(sdata->u.ap.next_beacon);
+ return err;
+ }
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ if (!sdata->vif.bss_conf.ibss_joined)
+ return -EINVAL;
+
+ if (params->chandef.width != sdata->u.ibss.chandef.width)
+ return -EINVAL;
+
+ switch (params->chandef.width) {
+ case NL80211_CHAN_WIDTH_40:
+ if (cfg80211_get_chandef_type(&params->chandef) !=
+ cfg80211_get_chandef_type(&sdata->u.ibss.chandef))
+ return -EINVAL;
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* changes into another band are not supported */
+ if (sdata->u.ibss.chandef.chan->band !=
+ params->chandef.chan->band)
+ return -EINVAL;
+
+ err = ieee80211_ibss_csa_beacon(sdata, params);
+ if (err < 0)
+ return err;
break;
default:
return -EOPNOTSUPP;
}
- sdata->u.ap.next_beacon = cfg80211_beacon_dup(&params->beacon_after);
- if (!sdata->u.ap.next_beacon)
- return -ENOMEM;
-
- sdata->csa_counter_offset_beacon = params->counter_offset_beacon;
- sdata->csa_counter_offset_presp = params->counter_offset_presp;
sdata->csa_radar_required = params->radar_required;
if (params->block_tx)
@@ -2957,10 +3006,6 @@ static int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
IEEE80211_MAX_QUEUE_MAP,
IEEE80211_QUEUE_STOP_REASON_CSA);
- err = ieee80211_assign_beacon(sdata, &params->beacon_csa);
- if (err < 0)
- return err;
-
local->csa_chandef = params->chandef;
sdata->vif.csa_active = true;
@@ -3014,7 +3059,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
need_offchan = true;
if (!ieee80211_is_action(mgmt->frame_control) ||
mgmt->u.action.category == WLAN_CATEGORY_PUBLIC ||
- mgmt->u.action.category == WLAN_CATEGORY_SELF_PROTECTED)
+ mgmt->u.action.category == WLAN_CATEGORY_SELF_PROTECTED ||
+ mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT)
break;
rcu_read_lock();
sta = sta_info_get(sdata, mgmt->da);
@@ -3518,7 +3564,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
return -EINVAL;
}
band = chanctx_conf->def.chan->band;
- sta = sta_info_get(sdata, peer);
+ sta = sta_info_get_bss(sdata, peer);
if (sta) {
qos = test_sta_flag(sta, WLAN_STA_WME);
} else {
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 3a4764b2869e..03ba6b5c5373 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -453,11 +453,6 @@ int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
chanctx_changed |= IEEE80211_CHANCTX_CHANGE_CHANNEL;
drv_change_chanctx(local, ctx, chanctx_changed);
- if (!local->use_chanctx) {
- local->_oper_chandef = *chandef;
- ieee80211_hw_config(local, 0);
- }
-
ieee80211_recalc_chanctx_chantype(local, ctx);
ieee80211_recalc_smps_chanctx(local, ctx);
ieee80211_recalc_radar_chanctx(local, ctx);
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index b0e32d628114..5c090e41d9bb 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -103,54 +103,57 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
if (!buf)
return 0;
- sf += snprintf(buf, mxln - sf, "0x%x\n", local->hw.flags);
+ sf += scnprintf(buf, mxln - sf, "0x%x\n", local->hw.flags);
if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
- sf += snprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n");
+ sf += scnprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n");
if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
- sf += snprintf(buf + sf, mxln - sf, "RX_INCLUDES_FCS\n");
+ sf += scnprintf(buf + sf, mxln - sf, "RX_INCLUDES_FCS\n");
if (local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)
- sf += snprintf(buf + sf, mxln - sf,
- "HOST_BCAST_PS_BUFFERING\n");
+ sf += scnprintf(buf + sf, mxln - sf,
+ "HOST_BCAST_PS_BUFFERING\n");
if (local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE)
- sf += snprintf(buf + sf, mxln - sf,
- "2GHZ_SHORT_SLOT_INCAPABLE\n");
+ sf += scnprintf(buf + sf, mxln - sf,
+ "2GHZ_SHORT_SLOT_INCAPABLE\n");
if (local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE)
- sf += snprintf(buf + sf, mxln - sf,
- "2GHZ_SHORT_PREAMBLE_INCAPABLE\n");
+ sf += scnprintf(buf + sf, mxln - sf,
+ "2GHZ_SHORT_PREAMBLE_INCAPABLE\n");
if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
- sf += snprintf(buf + sf, mxln - sf, "SIGNAL_UNSPEC\n");
+ sf += scnprintf(buf + sf, mxln - sf, "SIGNAL_UNSPEC\n");
if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
- sf += snprintf(buf + sf, mxln - sf, "SIGNAL_DBM\n");
+ sf += scnprintf(buf + sf, mxln - sf, "SIGNAL_DBM\n");
if (local->hw.flags & IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC)
- sf += snprintf(buf + sf, mxln - sf, "NEED_DTIM_BEFORE_ASSOC\n");
+ sf += scnprintf(buf + sf, mxln - sf,
+ "NEED_DTIM_BEFORE_ASSOC\n");
if (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT)
- sf += snprintf(buf + sf, mxln - sf, "SPECTRUM_MGMT\n");
+ sf += scnprintf(buf + sf, mxln - sf, "SPECTRUM_MGMT\n");
if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)
- sf += snprintf(buf + sf, mxln - sf, "AMPDU_AGGREGATION\n");
+ sf += scnprintf(buf + sf, mxln - sf, "AMPDU_AGGREGATION\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_PS)
- sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PS\n");
+ sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_PS\n");
if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
- sf += snprintf(buf + sf, mxln - sf, "PS_NULLFUNC_STACK\n");
+ sf += scnprintf(buf + sf, mxln - sf, "PS_NULLFUNC_STACK\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
- sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_PS\n");
+ sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_PS\n");
if (local->hw.flags & IEEE80211_HW_MFP_CAPABLE)
- sf += snprintf(buf + sf, mxln - sf, "MFP_CAPABLE\n");
+ sf += scnprintf(buf + sf, mxln - sf, "MFP_CAPABLE\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS)
- sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_STATIC_SMPS\n");
+ sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_STATIC_SMPS\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS)
- sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_SMPS\n");
+ sf += scnprintf(buf + sf, mxln - sf,
+ "SUPPORTS_DYNAMIC_SMPS\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
- sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_UAPSD\n");
+ sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_UAPSD\n");
if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
- sf += snprintf(buf + sf, mxln - sf, "REPORTS_TX_ACK_STATUS\n");
+ sf += scnprintf(buf + sf, mxln - sf,
+ "REPORTS_TX_ACK_STATUS\n");
if (local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
- sf += snprintf(buf + sf, mxln - sf, "CONNECTION_MONITOR\n");
+ sf += scnprintf(buf + sf, mxln - sf, "CONNECTION_MONITOR\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK)
- sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n");
+ sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n");
if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
- sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
+ sf += scnprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)
- sf += snprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n");
+ sf += scnprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n");
rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
kfree(buf);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index b3ea11f3d526..5d03c47c0a4c 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1085,4 +1085,31 @@ drv_channel_switch_beacon(struct ieee80211_sub_if_data *sdata,
}
}
+static inline int drv_join_ibss(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
+{
+ int ret = 0;
+
+ might_sleep();
+ check_sdata_in_driver(sdata);
+
+ trace_drv_join_ibss(local, sdata, &sdata->vif.bss_conf);
+ if (local->ops->join_ibss)
+ ret = local->ops->join_ibss(&local->hw, &sdata->vif);
+ trace_drv_return_int(local, ret);
+ return ret;
+}
+
+static inline void drv_leave_ibss(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
+{
+ might_sleep();
+ check_sdata_in_driver(sdata);
+
+ trace_drv_leave_ibss(local, sdata);
+ if (local->ops->leave_ibss)
+ local->ops->leave_ibss(&local->hw, &sdata->vif);
+ trace_drv_return_void(local);
+}
+
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index a12afe77bb26..21a0b8835cb3 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -39,7 +39,8 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
const int beacon_int, const u32 basic_rates,
const u16 capability, u64 tsf,
struct cfg80211_chan_def *chandef,
- bool *have_higher_than_11mbit)
+ bool *have_higher_than_11mbit,
+ struct cfg80211_csa_settings *csa_settings)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct ieee80211_local *local = sdata->local;
@@ -59,6 +60,7 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
2 + 8 /* max Supported Rates */ +
3 /* max DS params */ +
4 /* IBSS params */ +
+ 5 /* Channel Switch Announcement */ +
2 + (IEEE80211_MAX_SUPP_RATES - 8) +
2 + sizeof(struct ieee80211_ht_cap) +
2 + sizeof(struct ieee80211_ht_operation) +
@@ -135,6 +137,16 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
*pos++ = 0;
*pos++ = 0;
+ if (csa_settings) {
+ *pos++ = WLAN_EID_CHANNEL_SWITCH;
+ *pos++ = 3;
+ *pos++ = csa_settings->block_tx ? 1 : 0;
+ *pos++ = ieee80211_frequency_to_channel(
+ csa_settings->chandef.chan->center_freq);
+ sdata->csa_counter_offset_beacon = (pos - presp->head);
+ *pos++ = csa_settings->count;
+ }
+
/* put the remaining rates in WLAN_EID_EXT_SUPP_RATES */
if (rates_n > 8) {
*pos++ = WLAN_EID_EXT_SUPP_RATES;
@@ -217,6 +229,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
struct beacon_data *presp;
enum nl80211_bss_scan_width scan_width;
bool have_higher_than_11mbit;
+ int err;
sdata_assert_lock(sdata);
@@ -235,6 +248,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
ieee80211_bss_info_change_notify(sdata,
BSS_CHANGED_IBSS |
BSS_CHANGED_BEACON_ENABLED);
+ drv_leave_ibss(local, sdata);
}
presp = rcu_dereference_protected(ifibss->presp,
@@ -276,7 +290,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
presp = ieee80211_ibss_build_presp(sdata, beacon_int, basic_rates,
capability, tsf, &chandef,
- &have_higher_than_11mbit);
+ &have_higher_than_11mbit, NULL);
if (!presp)
return;
@@ -317,11 +331,26 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
else
sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
+ ieee80211_set_wmm_default(sdata, true);
+
sdata->vif.bss_conf.ibss_joined = true;
sdata->vif.bss_conf.ibss_creator = creator;
- ieee80211_bss_info_change_notify(sdata, bss_change);
- ieee80211_set_wmm_default(sdata, true);
+ err = drv_join_ibss(local, sdata);
+ if (err) {
+ sdata->vif.bss_conf.ibss_joined = false;
+ sdata->vif.bss_conf.ibss_creator = false;
+ sdata->vif.bss_conf.enable_beacon = false;
+ sdata->vif.bss_conf.ssid_len = 0;
+ RCU_INIT_POINTER(ifibss->presp, NULL);
+ kfree_rcu(presp, rcu_head);
+ ieee80211_vif_release_channel(sdata);
+ sdata_info(sdata, "Failed to join IBSS, driver failure: %d\n",
+ err);
+ return;
+ }
+
+ ieee80211_bss_info_change_notify(sdata, bss_change);
ifibss->state = IEEE80211_IBSS_MLME_JOINED;
mod_timer(&ifibss->timer,
@@ -416,6 +445,169 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
tsf, false);
}
+static int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_csa_settings *csa_settings)
+{
+ struct sk_buff *skb;
+ struct ieee80211_mgmt *mgmt;
+ struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+ struct ieee80211_local *local = sdata->local;
+ int freq;
+ int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.chan_switch) +
+ sizeof(mgmt->u.action.u.chan_switch);
+ u8 *pos;
+
+ skb = dev_alloc_skb(local->tx_headroom + hdr_len +
+ 5 + /* channel switch announcement element */
+ 3); /* secondary channel offset element */
+ if (!skb)
+ return -1;
+
+ skb_reserve(skb, local->tx_headroom);
+ mgmt = (struct ieee80211_mgmt *)skb_put(skb, hdr_len);
+ memset(mgmt, 0, hdr_len);
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+
+ eth_broadcast_addr(mgmt->da);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+ memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN);
+ mgmt->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT;
+ mgmt->u.action.u.chan_switch.action_code = WLAN_ACTION_SPCT_CHL_SWITCH;
+ pos = skb_put(skb, 5);
+ *pos++ = WLAN_EID_CHANNEL_SWITCH; /* EID */
+ *pos++ = 3; /* IE length */
+ *pos++ = csa_settings->block_tx ? 1 : 0; /* CSA mode */
+ freq = csa_settings->chandef.chan->center_freq;
+ *pos++ = ieee80211_frequency_to_channel(freq); /* channel */
+ *pos++ = csa_settings->count; /* count */
+
+ if (csa_settings->chandef.width == NL80211_CHAN_WIDTH_40) {
+ enum nl80211_channel_type ch_type;
+
+ skb_put(skb, 3);
+ *pos++ = WLAN_EID_SECONDARY_CHANNEL_OFFSET; /* EID */
+ *pos++ = 1; /* IE length */
+ ch_type = cfg80211_get_chandef_type(&csa_settings->chandef);
+ if (ch_type == NL80211_CHAN_HT40PLUS)
+ *pos++ = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ else
+ *pos++ = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ }
+
+ ieee80211_tx_skb(sdata, skb);
+ return 0;
+}
+
+int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_csa_settings *csa_settings)
+{
+ struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+ struct beacon_data *presp, *old_presp;
+ struct cfg80211_bss *cbss;
+ const struct cfg80211_bss_ies *ies;
+ u16 capability;
+ u64 tsf;
+ int ret = 0;
+
+ sdata_assert_lock(sdata);
+
+ capability = WLAN_CAPABILITY_IBSS;
+
+ if (ifibss->privacy)
+ capability |= WLAN_CAPABILITY_PRIVACY;
+
+ cbss = cfg80211_get_bss(sdata->local->hw.wiphy, ifibss->chandef.chan,
+ ifibss->bssid, ifibss->ssid,
+ ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
+ WLAN_CAPABILITY_PRIVACY,
+ capability);
+
+ if (WARN_ON(!cbss)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ rcu_read_lock();
+ ies = rcu_dereference(cbss->ies);
+ tsf = ies->tsf;
+ rcu_read_unlock();
+ cfg80211_put_bss(sdata->local->hw.wiphy, cbss);
+
+ old_presp = rcu_dereference_protected(ifibss->presp,
+ lockdep_is_held(&sdata->wdev.mtx));
+
+ presp = ieee80211_ibss_build_presp(sdata,
+ sdata->vif.bss_conf.beacon_int,
+ sdata->vif.bss_conf.basic_rates,
+ capability, tsf, &ifibss->chandef,
+ NULL, csa_settings);
+ if (!presp) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ rcu_assign_pointer(ifibss->presp, presp);
+ if (old_presp)
+ kfree_rcu(old_presp, rcu_head);
+
+ /* it might not send the beacon for a while. send an action frame
+ * immediately to announce the channel switch.
+ */
+ if (csa_settings)
+ ieee80211_send_action_csa(sdata, csa_settings);
+
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+ out:
+ return ret;
+}
+
+int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+ struct cfg80211_bss *cbss;
+ int err;
+ u16 capability;
+
+ sdata_lock(sdata);
+ /* update cfg80211 bss information with the new channel */
+ if (!is_zero_ether_addr(ifibss->bssid)) {
+ capability = WLAN_CAPABILITY_IBSS;
+
+ if (ifibss->privacy)
+ capability |= WLAN_CAPABILITY_PRIVACY;
+
+ cbss = cfg80211_get_bss(sdata->local->hw.wiphy,
+ ifibss->chandef.chan,
+ ifibss->bssid, ifibss->ssid,
+ ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
+ WLAN_CAPABILITY_PRIVACY,
+ capability);
+ /* XXX: should not really modify cfg80211 data */
+ if (cbss) {
+ cbss->channel = sdata->local->csa_chandef.chan;
+ cfg80211_put_bss(sdata->local->hw.wiphy, cbss);
+ }
+ }
+
+ ifibss->chandef = sdata->local->csa_chandef;
+
+ /* generate the beacon */
+ err = ieee80211_ibss_csa_beacon(sdata, NULL);
+ sdata_unlock(sdata);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+
+ cancel_work_sync(&ifibss->csa_connection_drop_work);
+}
+
static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta)
__acquires(RCU)
{
@@ -499,6 +691,295 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid,
return ieee80211_ibss_finish_sta(sta);
}
+static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ int active = 0;
+ struct sta_info *sta;
+
+ sdata_assert_lock(sdata);
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ if (sta->sdata == sdata &&
+ time_after(sta->last_rx + IEEE80211_IBSS_MERGE_INTERVAL,
+ jiffies)) {
+ active++;
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return active;
+}
+
+static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+ struct ieee80211_local *local = sdata->local;
+ struct cfg80211_bss *cbss;
+ struct beacon_data *presp;
+ struct sta_info *sta;
+ int active_ibss;
+ u16 capability;
+
+ active_ibss = ieee80211_sta_active_ibss(sdata);
+
+ if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
+ capability = WLAN_CAPABILITY_IBSS;
+
+ if (ifibss->privacy)
+ capability |= WLAN_CAPABILITY_PRIVACY;
+
+ cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->chandef.chan,
+ ifibss->bssid, ifibss->ssid,
+ ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
+ WLAN_CAPABILITY_PRIVACY,
+ capability);
+
+ if (cbss) {
+ cfg80211_unlink_bss(local->hw.wiphy, cbss);
+ cfg80211_put_bss(sdata->local->hw.wiphy, cbss);
+ }
+ }
+
+ ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
+
+ sta_info_flush(sdata);
+
+ spin_lock_bh(&ifibss->incomplete_lock);
+ while (!list_empty(&ifibss->incomplete_stations)) {
+ sta = list_first_entry(&ifibss->incomplete_stations,
+ struct sta_info, list);
+ list_del(&sta->list);
+ spin_unlock_bh(&ifibss->incomplete_lock);
+
+ sta_info_free(local, sta);
+ spin_lock_bh(&ifibss->incomplete_lock);
+ }
+ spin_unlock_bh(&ifibss->incomplete_lock);
+
+ netif_carrier_off(sdata->dev);
+
+ sdata->vif.bss_conf.ibss_joined = false;
+ sdata->vif.bss_conf.ibss_creator = false;
+ sdata->vif.bss_conf.enable_beacon = false;
+ sdata->vif.bss_conf.ssid_len = 0;
+
+ /* remove beacon */
+ presp = rcu_dereference_protected(ifibss->presp,
+ lockdep_is_held(&sdata->wdev.mtx));
+ RCU_INIT_POINTER(sdata->u.ibss.presp, NULL);
+ if (presp)
+ kfree_rcu(presp, rcu_head);
+
+ clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
+ BSS_CHANGED_IBSS);
+ drv_leave_ibss(local, sdata);
+ ieee80211_vif_release_channel(sdata);
+}
+
+static void ieee80211_csa_connection_drop_work(struct work_struct *work)
+{
+ struct ieee80211_sub_if_data *sdata =
+ container_of(work, struct ieee80211_sub_if_data,
+ u.ibss.csa_connection_drop_work);
+
+ ieee80211_ibss_disconnect(sdata);
+ synchronize_rcu();
+ skb_queue_purge(&sdata->skb_queue);
+
+ /* trigger a scan to find another IBSS network to join */
+ ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+}
+
+static bool
+ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
+ struct ieee802_11_elems *elems,
+ bool beacon)
+{
+ struct cfg80211_csa_settings params;
+ struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct ieee80211_chanctx *chanctx;
+ enum nl80211_channel_type ch_type;
+ int err, num_chanctx;
+ u32 sta_flags;
+ u8 mode;
+
+ if (sdata->vif.csa_active)
+ return true;
+
+ if (!sdata->vif.bss_conf.ibss_joined)
+ return false;
+
+ sta_flags = IEEE80211_STA_DISABLE_VHT;
+ switch (ifibss->chandef.width) {
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ sta_flags |= IEEE80211_STA_DISABLE_HT;
+ /* fall through */
+ case NL80211_CHAN_WIDTH_20:
+ sta_flags |= IEEE80211_STA_DISABLE_40MHZ;
+ break;
+ default:
+ break;
+ }
+
+ memset(&params, 0, sizeof(params));
+ err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon,
+ ifibss->chandef.chan->band,
+ sta_flags, ifibss->bssid,
+ &params.count, &mode,
+ &params.chandef);
+
+ /* can't switch to destination channel, fail */
+ if (err < 0)
+ goto disconnect;
+
+ /* did not contain a CSA */
+ if (err)
+ return false;
+
+ if (ifibss->chandef.chan->band != params.chandef.chan->band)
+ goto disconnect;
+
+ switch (ifibss->chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_40:
+ /* keep our current HT mode (HT20/HT40+/HT40-), even if
+ * another mode has been announced. The mode is not adopted
+ * within the beacon while doing CSA and we should therefore
+ * keep the mode which we announce.
+ */
+ ch_type = cfg80211_get_chandef_type(&ifibss->chandef);
+ cfg80211_chandef_create(&params.chandef, params.chandef.chan,
+ ch_type);
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
+ if (params.chandef.width != ifibss->chandef.width) {
+ sdata_info(sdata,
+ "IBSS %pM received channel switch from incompatible channel width (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
+ ifibss->bssid,
+ params.chandef.chan->center_freq,
+ params.chandef.width,
+ params.chandef.center_freq1,
+ params.chandef.center_freq2);
+ goto disconnect;
+ }
+ break;
+ default:
+ /* should not happen, sta_flags should prevent VHT modes. */
+ WARN_ON(1);
+ goto disconnect;
+ }
+
+ if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, &params.chandef,
+ IEEE80211_CHAN_DISABLED)) {
+ sdata_info(sdata,
+ "IBSS %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
+ ifibss->bssid,
+ params.chandef.chan->center_freq,
+ params.chandef.width,
+ params.chandef.center_freq1,
+ params.chandef.center_freq2);
+ goto disconnect;
+ }
+
+ err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
+ &params.chandef);
+ if (err < 0)
+ goto disconnect;
+ if (err) {
+ params.radar_required = true;
+
+ /* TODO: IBSS-DFS not (yet) supported, disconnect. */
+ goto disconnect;
+ }
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ if (!chanctx_conf) {
+ rcu_read_unlock();
+ goto disconnect;
+ }
+
+ /* don't handle for multi-VIF cases */
+ chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf);
+ if (chanctx->refcount > 1) {
+ rcu_read_unlock();
+ goto disconnect;
+ }
+ num_chanctx = 0;
+ list_for_each_entry_rcu(chanctx, &sdata->local->chanctx_list, list)
+ num_chanctx++;
+
+ if (num_chanctx > 1) {
+ rcu_read_unlock();
+ goto disconnect;
+ }
+ rcu_read_unlock();
+
+ /* all checks done, now perform the channel switch. */
+ ibss_dbg(sdata,
+ "received channel switch announcement to go to channel %d MHz\n",
+ params.chandef.chan->center_freq);
+
+ params.block_tx = !!mode;
+
+ ieee80211_ibss_csa_beacon(sdata, &params);
+ sdata->csa_radar_required = params.radar_required;
+
+ if (params.block_tx)
+ ieee80211_stop_queues_by_reason(&sdata->local->hw,
+ IEEE80211_MAX_QUEUE_MAP,
+ IEEE80211_QUEUE_STOP_REASON_CSA);
+
+ sdata->local->csa_chandef = params.chandef;
+ sdata->vif.csa_active = true;
+
+ ieee80211_bss_info_change_notify(sdata, err);
+ drv_channel_switch_beacon(sdata, &params.chandef);
+
+ return true;
+disconnect:
+ ibss_dbg(sdata, "Can't handle channel switch, disconnect\n");
+ ieee80211_queue_work(&sdata->local->hw,
+ &ifibss->csa_connection_drop_work);
+
+ return true;
+}
+
+static void
+ieee80211_rx_mgmt_spectrum_mgmt(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee802_11_elems *elems)
+{
+ int required_len;
+
+ if (len < IEEE80211_MIN_ACTION_SIZE + 1)
+ return;
+
+ /* CSA is the only action we handle for now */
+ if (mgmt->u.action.u.measurement.action_code !=
+ WLAN_ACTION_SPCT_CHL_SWITCH)
+ return;
+
+ required_len = IEEE80211_MIN_ACTION_SIZE +
+ sizeof(mgmt->u.action.u.chan_switch);
+ if (len < required_len)
+ return;
+
+ ieee80211_ibss_process_chanswitch(sdata, elems, false);
+}
+
static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
size_t len)
@@ -661,10 +1142,6 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
/* check if we need to merge IBSS */
- /* we use a fixed BSSID */
- if (sdata->u.ibss.fixed_bssid)
- goto put_bss;
-
/* not an IBSS */
if (!(cbss->capability & WLAN_CAPABILITY_IBSS))
goto put_bss;
@@ -680,10 +1157,18 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
sdata->u.ibss.ssid_len))
goto put_bss;
+ /* process channel switch */
+ if (ieee80211_ibss_process_chanswitch(sdata, elems, true))
+ goto put_bss;
+
/* same BSSID */
if (ether_addr_equal(cbss->bssid, sdata->u.ibss.bssid))
goto put_bss;
+ /* we use a fixed BSSID */
+ if (sdata->u.ibss.fixed_bssid)
+ goto put_bss;
+
if (ieee80211_have_rx_timestamp(rx_status)) {
/* time when timestamp field was received */
rx_timestamp =
@@ -775,30 +1260,6 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
ieee80211_queue_work(&local->hw, &sdata->work);
}
-static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
-{
- struct ieee80211_local *local = sdata->local;
- int active = 0;
- struct sta_info *sta;
-
- sdata_assert_lock(sdata);
-
- rcu_read_lock();
-
- list_for_each_entry_rcu(sta, &local->sta_list, list) {
- if (sta->sdata == sdata &&
- time_after(sta->last_rx + IEEE80211_IBSS_MERGE_INTERVAL,
- jiffies)) {
- active++;
- break;
- }
- }
-
- rcu_read_unlock();
-
- return active;
-}
-
static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
@@ -1076,6 +1537,8 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
struct ieee80211_rx_status *rx_status;
struct ieee80211_mgmt *mgmt;
u16 fc;
+ struct ieee802_11_elems elems;
+ int ies_len;
rx_status = IEEE80211_SKB_RXCB(skb);
mgmt = (struct ieee80211_mgmt *) skb->data;
@@ -1101,6 +1564,27 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
case IEEE80211_STYPE_DEAUTH:
ieee80211_rx_mgmt_deauth_ibss(sdata, mgmt, skb->len);
break;
+ case IEEE80211_STYPE_ACTION:
+ switch (mgmt->u.action.category) {
+ case WLAN_CATEGORY_SPECTRUM_MGMT:
+ ies_len = skb->len -
+ offsetof(struct ieee80211_mgmt,
+ u.action.u.chan_switch.variable);
+
+ if (ies_len < 0)
+ break;
+
+ ieee802_11_parse_elems(
+ mgmt->u.action.u.chan_switch.variable,
+ ies_len, true, &elems);
+
+ if (elems.parse_error)
+ break;
+
+ ieee80211_rx_mgmt_spectrum_mgmt(sdata, mgmt, skb->len,
+ rx_status, &elems);
+ break;
+ }
}
mgmt_out:
@@ -1167,6 +1651,8 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata)
(unsigned long) sdata);
INIT_LIST_HEAD(&ifibss->incomplete_stations);
spin_lock_init(&ifibss->incomplete_lock);
+ INIT_WORK(&ifibss->csa_connection_drop_work,
+ ieee80211_csa_connection_drop_work);
}
/* scan finished notification */
@@ -1265,73 +1751,19 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
- struct ieee80211_local *local = sdata->local;
- struct cfg80211_bss *cbss;
- u16 capability;
- int active_ibss;
- struct sta_info *sta;
- struct beacon_data *presp;
-
- active_ibss = ieee80211_sta_active_ibss(sdata);
-
- if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
- capability = WLAN_CAPABILITY_IBSS;
-
- if (ifibss->privacy)
- capability |= WLAN_CAPABILITY_PRIVACY;
-
- cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->chandef.chan,
- ifibss->bssid, ifibss->ssid,
- ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
- WLAN_CAPABILITY_PRIVACY,
- capability);
- if (cbss) {
- cfg80211_unlink_bss(local->hw.wiphy, cbss);
- cfg80211_put_bss(local->hw.wiphy, cbss);
- }
- }
-
- ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
- memset(ifibss->bssid, 0, ETH_ALEN);
+ ieee80211_ibss_disconnect(sdata);
ifibss->ssid_len = 0;
-
- sta_info_flush(sdata);
-
- spin_lock_bh(&ifibss->incomplete_lock);
- while (!list_empty(&ifibss->incomplete_stations)) {
- sta = list_first_entry(&ifibss->incomplete_stations,
- struct sta_info, list);
- list_del(&sta->list);
- spin_unlock_bh(&ifibss->incomplete_lock);
-
- sta_info_free(local, sta);
- spin_lock_bh(&ifibss->incomplete_lock);
- }
- spin_unlock_bh(&ifibss->incomplete_lock);
-
- netif_carrier_off(sdata->dev);
+ memset(ifibss->bssid, 0, ETH_ALEN);
/* remove beacon */
kfree(sdata->u.ibss.ie);
- presp = rcu_dereference_protected(ifibss->presp,
- lockdep_is_held(&sdata->wdev.mtx));
- RCU_INIT_POINTER(sdata->u.ibss.presp, NULL);
/* on the next join, re-program HT parameters */
memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa));
memset(&ifibss->ht_capa_mask, 0, sizeof(ifibss->ht_capa_mask));
- sdata->vif.bss_conf.ibss_joined = false;
- sdata->vif.bss_conf.ibss_creator = false;
- sdata->vif.bss_conf.enable_beacon = false;
- sdata->vif.bss_conf.ssid_len = 0;
- clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
- BSS_CHANGED_IBSS);
- ieee80211_vif_release_channel(sdata);
synchronize_rcu();
- kfree(presp);
skb_queue_purge(&sdata->skb_queue);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index b6186517ec56..fe48b093d4dc 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -322,7 +322,6 @@ struct ieee80211_roc_work {
/* flags used in struct ieee80211_if_managed.flags */
enum ieee80211_sta_flags {
- IEEE80211_STA_BEACON_POLL = BIT(0),
IEEE80211_STA_CONNECTION_POLL = BIT(1),
IEEE80211_STA_CONTROL_PORT = BIT(2),
IEEE80211_STA_DISABLE_HT = BIT(4),
@@ -335,6 +334,7 @@ enum ieee80211_sta_flags {
IEEE80211_STA_DISABLE_VHT = BIT(11),
IEEE80211_STA_DISABLE_80P80MHZ = BIT(12),
IEEE80211_STA_DISABLE_160MHZ = BIT(13),
+ IEEE80211_STA_DISABLE_WMM = BIT(14),
};
struct ieee80211_mgd_auth_data {
@@ -487,6 +487,7 @@ struct ieee80211_if_managed {
struct ieee80211_if_ibss {
struct timer_list timer;
+ struct work_struct csa_connection_drop_work;
unsigned long last_scan_completed;
@@ -893,6 +894,8 @@ struct tpt_led_trigger {
* that the scan completed.
* @SCAN_ABORTED: Set for our scan work function when the driver reported
* a scan complete for an aborted scan.
+ * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being
+ * cancelled.
*/
enum {
SCAN_SW_SCANNING,
@@ -900,6 +903,7 @@ enum {
SCAN_ONCHANNEL_SCANNING,
SCAN_COMPLETED,
SCAN_ABORTED,
+ SCAN_HW_CANCELLED,
};
/**
@@ -1330,6 +1334,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata);
void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata);
void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
+int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_csa_settings *csa_settings);
+int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata);
+void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata);
/* mesh code */
void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata);
@@ -1481,6 +1489,29 @@ void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata,
void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
size_t len);
+/**
+ * ieee80211_parse_ch_switch_ie - parses channel switch IEs
+ * @sdata: the sdata of the interface which has received the frame
+ * @elems: parsed 802.11 elements received with the frame
+ * @beacon: indicates if the frame was a beacon or probe response
+ * @current_band: indicates the current band
+ * @sta_flags: contains information about own capabilities and restrictions
+ * to decide which channel switch announcements can be accepted. Only the
+ * following subset of &enum ieee80211_sta_flags are evaluated:
+ * %IEEE80211_STA_DISABLE_HT, %IEEE80211_STA_DISABLE_VHT,
+ * %IEEE80211_STA_DISABLE_40MHZ, %IEEE80211_STA_DISABLE_80P80MHZ,
+ * %IEEE80211_STA_DISABLE_160MHZ.
+ * @count: to be filled with the counter until the switch (on success only)
+ * @bssid: the currently connected bssid (for reporting)
+ * @mode: to be filled with CSA mode (on success only)
+ * @new_chandef: to be filled with destination chandef (on success only)
+ * Return: 0 on success, <0 on error and >0 if there is nothing to parse.
+ */
+int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
+ struct ieee802_11_elems *elems, bool beacon,
+ enum ieee80211_band current_band,
+ u32 sta_flags, u8 *bssid, u8 *count, u8 *mode,
+ struct cfg80211_chan_def *new_chandef);
/* Suspend/resume and hw reconfiguration */
int ieee80211_reconfig(struct ieee80211_local *local);
@@ -1654,6 +1685,7 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
const struct ieee80211_ht_operation *ht_oper,
struct cfg80211_chan_def *chandef);
+u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c);
int __must_check
ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index fcecd633514e..e48f103b9ade 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -766,6 +766,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
if (sdata->vif.type == NL80211_IFTYPE_STATION)
ieee80211_mgd_stop(sdata);
+ if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+ ieee80211_ibss_stop(sdata);
+
+
/*
* Remove all stations associated with this interface.
*
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 620677e897bd..3e51dd7d98b3 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -879,7 +879,7 @@ ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
keyconf->keylen, keyconf->key,
0, NULL);
if (IS_ERR(key))
- return ERR_PTR(PTR_ERR(key));
+ return ERR_CAST(key);
if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED)
key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 86e4ad56b573..d7bdc4b97dde 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -145,66 +145,6 @@ static int ecw2cw(int ecw)
return (1 << ecw) - 1;
}
-static u32 chandef_downgrade(struct cfg80211_chan_def *c)
-{
- u32 ret;
- int tmp;
-
- switch (c->width) {
- case NL80211_CHAN_WIDTH_20:
- c->width = NL80211_CHAN_WIDTH_20_NOHT;
- ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
- break;
- case NL80211_CHAN_WIDTH_40:
- c->width = NL80211_CHAN_WIDTH_20;
- c->center_freq1 = c->chan->center_freq;
- ret = IEEE80211_STA_DISABLE_40MHZ |
- IEEE80211_STA_DISABLE_VHT;
- break;
- case NL80211_CHAN_WIDTH_80:
- tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
- /* n_P40 */
- tmp /= 2;
- /* freq_P40 */
- c->center_freq1 = c->center_freq1 - 20 + 40 * tmp;
- c->width = NL80211_CHAN_WIDTH_40;
- ret = IEEE80211_STA_DISABLE_VHT;
- break;
- case NL80211_CHAN_WIDTH_80P80:
- c->center_freq2 = 0;
- c->width = NL80211_CHAN_WIDTH_80;
- ret = IEEE80211_STA_DISABLE_80P80MHZ |
- IEEE80211_STA_DISABLE_160MHZ;
- break;
- case NL80211_CHAN_WIDTH_160:
- /* n_P20 */
- tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
- /* n_P80 */
- tmp /= 4;
- c->center_freq1 = c->center_freq1 - 40 + 80 * tmp;
- c->width = NL80211_CHAN_WIDTH_80;
- ret = IEEE80211_STA_DISABLE_80P80MHZ |
- IEEE80211_STA_DISABLE_160MHZ;
- break;
- default:
- case NL80211_CHAN_WIDTH_20_NOHT:
- WARN_ON_ONCE(1);
- c->width = NL80211_CHAN_WIDTH_20_NOHT;
- ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
- break;
- case NL80211_CHAN_WIDTH_5:
- case NL80211_CHAN_WIDTH_10:
- WARN_ON_ONCE(1);
- /* keep c->width */
- ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
- break;
- }
-
- WARN_ON_ONCE(!cfg80211_chandef_valid(c));
-
- return ret;
-}
-
static u32
ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
@@ -352,7 +292,7 @@ out:
break;
}
- ret |= chandef_downgrade(chandef);
+ ret |= ieee80211_chandef_downgrade(chandef);
}
if (chandef->width != vht_chandef.width && !tracking)
@@ -406,13 +346,13 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
*/
if (ifmgd->flags & IEEE80211_STA_DISABLE_80P80MHZ &&
chandef.width == NL80211_CHAN_WIDTH_80P80)
- flags |= chandef_downgrade(&chandef);
+ flags |= ieee80211_chandef_downgrade(&chandef);
if (ifmgd->flags & IEEE80211_STA_DISABLE_160MHZ &&
chandef.width == NL80211_CHAN_WIDTH_160)
- flags |= chandef_downgrade(&chandef);
+ flags |= ieee80211_chandef_downgrade(&chandef);
if (ifmgd->flags & IEEE80211_STA_DISABLE_40MHZ &&
chandef.width > NL80211_CHAN_WIDTH_20)
- flags |= chandef_downgrade(&chandef);
+ flags |= ieee80211_chandef_downgrade(&chandef);
if (cfg80211_chandef_identical(&chandef, &sdata->vif.bss_conf.chandef))
return 0;
@@ -893,8 +833,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
- if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
- IEEE80211_STA_CONNECTION_POLL))
+ if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL)
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE;
ieee80211_tx_skb(sdata, skb);
@@ -937,6 +876,8 @@ static void ieee80211_chswitch_work(struct work_struct *work)
container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work);
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ u32 changed = 0;
+ int ret;
if (!ieee80211_sdata_running(sdata))
return;
@@ -945,24 +886,39 @@ static void ieee80211_chswitch_work(struct work_struct *work)
if (!ifmgd->associated)
goto out;
- local->_oper_chandef = local->csa_chandef;
+ ret = ieee80211_vif_change_channel(sdata, &local->csa_chandef,
+ &changed);
+ if (ret) {
+ sdata_info(sdata,
+ "vif channel switch failed, disconnecting\n");
+ ieee80211_queue_work(&sdata->local->hw,
+ &ifmgd->csa_connection_drop_work);
+ goto out;
+ }
- if (!local->ops->channel_switch) {
- /* call "hw_config" only if doing sw channel switch */
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- } else {
- /* update the device channel directly */
- local->hw.conf.chandef = local->_oper_chandef;
+ if (!local->use_chanctx) {
+ local->_oper_chandef = local->csa_chandef;
+ /* Call "hw_config" only if doing sw channel switch.
+ * Otherwise update the channel directly
+ */
+ if (!local->ops->channel_switch)
+ ieee80211_hw_config(local, 0);
+ else
+ local->hw.conf.chandef = local->_oper_chandef;
}
/* XXX: shouldn't really modify cfg80211-owned data! */
- ifmgd->associated->channel = local->_oper_chandef.chan;
+ ifmgd->associated->channel = local->csa_chandef.chan;
/* XXX: wait for a beacon first? */
ieee80211_wake_queues_by_reason(&local->hw,
IEEE80211_MAX_QUEUE_MAP,
IEEE80211_QUEUE_STOP_REASON_CSA);
+
+ ieee80211_bss_info_change_notify(sdata, changed);
+
out:
+ sdata->vif.csa_active = false;
ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
sdata_unlock(sdata);
}
@@ -1000,20 +956,12 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct cfg80211_bss *cbss = ifmgd->associated;
- struct ieee80211_bss *bss;
struct ieee80211_chanctx *chanctx;
- enum ieee80211_band new_band;
- int new_freq;
- u8 new_chan_no;
+ enum ieee80211_band current_band;
u8 count;
u8 mode;
- struct ieee80211_channel *new_chan;
struct cfg80211_chan_def new_chandef = {};
- struct cfg80211_chan_def new_vht_chandef = {};
- const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
- const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie;
- const struct ieee80211_ht_operation *ht_oper;
- int secondary_channel_offset = -1;
+ int res;
sdata_assert_lock(sdata);
@@ -1027,162 +975,23 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
if (ifmgd->flags & IEEE80211_STA_CSA_RECEIVED)
return;
- sec_chan_offs = elems->sec_chan_offs;
- wide_bw_chansw_ie = elems->wide_bw_chansw_ie;
- ht_oper = elems->ht_operation;
-
- if (ifmgd->flags & (IEEE80211_STA_DISABLE_HT |
- IEEE80211_STA_DISABLE_40MHZ)) {
- sec_chan_offs = NULL;
- wide_bw_chansw_ie = NULL;
- /* only used for bandwidth here */
- ht_oper = NULL;
- }
-
- if (ifmgd->flags & IEEE80211_STA_DISABLE_VHT)
- wide_bw_chansw_ie = NULL;
-
- if (elems->ext_chansw_ie) {
- if (!ieee80211_operating_class_to_band(
- elems->ext_chansw_ie->new_operating_class,
- &new_band)) {
- sdata_info(sdata,
- "cannot understand ECSA IE operating class %d, disconnecting\n",
- elems->ext_chansw_ie->new_operating_class);
- ieee80211_queue_work(&local->hw,
- &ifmgd->csa_connection_drop_work);
- }
- new_chan_no = elems->ext_chansw_ie->new_ch_num;
- count = elems->ext_chansw_ie->count;
- mode = elems->ext_chansw_ie->mode;
- } else if (elems->ch_switch_ie) {
- new_band = cbss->channel->band;
- new_chan_no = elems->ch_switch_ie->new_ch_num;
- count = elems->ch_switch_ie->count;
- mode = elems->ch_switch_ie->mode;
- } else {
- /* nothing here we understand */
- return;
- }
-
- bss = (void *)cbss->priv;
-
- new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band);
- new_chan = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq);
- if (!new_chan || new_chan->flags & IEEE80211_CHAN_DISABLED) {
- sdata_info(sdata,
- "AP %pM switches to unsupported channel (%d MHz), disconnecting\n",
- ifmgd->associated->bssid, new_freq);
+ current_band = cbss->channel->band;
+ res = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, current_band,
+ ifmgd->flags,
+ ifmgd->associated->bssid, &count,
+ &mode, &new_chandef);
+ if (res < 0)
ieee80211_queue_work(&local->hw,
&ifmgd->csa_connection_drop_work);
+ if (res)
return;
- }
-
- if (!beacon && sec_chan_offs) {
- secondary_channel_offset = sec_chan_offs->sec_chan_offs;
- } else if (beacon && ht_oper) {
- secondary_channel_offset =
- ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET;
- } else if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
- /*
- * If it's not a beacon, HT is enabled and the IE not present,
- * it's 20 MHz, 802.11-2012 8.5.2.6:
- * This element [the Secondary Channel Offset Element] is
- * present when switching to a 40 MHz channel. It may be
- * present when switching to a 20 MHz channel (in which
- * case the secondary channel offset is set to SCN).
- */
- secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
- }
-
- switch (secondary_channel_offset) {
- default:
- /* secondary_channel_offset was present but is invalid */
- case IEEE80211_HT_PARAM_CHA_SEC_NONE:
- cfg80211_chandef_create(&new_chandef, new_chan,
- NL80211_CHAN_HT20);
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- cfg80211_chandef_create(&new_chandef, new_chan,
- NL80211_CHAN_HT40PLUS);
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- cfg80211_chandef_create(&new_chandef, new_chan,
- NL80211_CHAN_HT40MINUS);
- break;
- case -1:
- cfg80211_chandef_create(&new_chandef, new_chan,
- NL80211_CHAN_NO_HT);
- /* keep width for 5/10 MHz channels */
- switch (sdata->vif.bss_conf.chandef.width) {
- case NL80211_CHAN_WIDTH_5:
- case NL80211_CHAN_WIDTH_10:
- new_chandef.width = sdata->vif.bss_conf.chandef.width;
- break;
- default:
- break;
- }
- break;
- }
-
- if (wide_bw_chansw_ie) {
- new_vht_chandef.chan = new_chan;
- new_vht_chandef.center_freq1 =
- ieee80211_channel_to_frequency(
- wide_bw_chansw_ie->new_center_freq_seg0,
- new_band);
-
- switch (wide_bw_chansw_ie->new_channel_width) {
- default:
- /* hmmm, ignore VHT and use HT if present */
- case IEEE80211_VHT_CHANWIDTH_USE_HT:
- new_vht_chandef.chan = NULL;
- break;
- case IEEE80211_VHT_CHANWIDTH_80MHZ:
- new_vht_chandef.width = NL80211_CHAN_WIDTH_80;
- break;
- case IEEE80211_VHT_CHANWIDTH_160MHZ:
- new_vht_chandef.width = NL80211_CHAN_WIDTH_160;
- break;
- case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
- /* field is otherwise reserved */
- new_vht_chandef.center_freq2 =
- ieee80211_channel_to_frequency(
- wide_bw_chansw_ie->new_center_freq_seg1,
- new_band);
- new_vht_chandef.width = NL80211_CHAN_WIDTH_80P80;
- break;
- }
- if (ifmgd->flags & IEEE80211_STA_DISABLE_80P80MHZ &&
- new_vht_chandef.width == NL80211_CHAN_WIDTH_80P80)
- chandef_downgrade(&new_vht_chandef);
- if (ifmgd->flags & IEEE80211_STA_DISABLE_160MHZ &&
- new_vht_chandef.width == NL80211_CHAN_WIDTH_160)
- chandef_downgrade(&new_vht_chandef);
- if (ifmgd->flags & IEEE80211_STA_DISABLE_40MHZ &&
- new_vht_chandef.width > NL80211_CHAN_WIDTH_20)
- chandef_downgrade(&new_vht_chandef);
- }
-
- /* if VHT data is there validate & use it */
- if (new_vht_chandef.chan) {
- if (!cfg80211_chandef_compatible(&new_vht_chandef,
- &new_chandef)) {
- sdata_info(sdata,
- "AP %pM CSA has inconsistent channel data, disconnecting\n",
- ifmgd->associated->bssid);
- ieee80211_queue_work(&local->hw,
- &ifmgd->csa_connection_drop_work);
- return;
- }
- new_chandef = new_vht_chandef;
- }
if (!cfg80211_chandef_usable(local->hw.wiphy, &new_chandef,
IEEE80211_CHAN_DISABLED)) {
sdata_info(sdata,
"AP %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
- ifmgd->associated->bssid, new_freq,
+ ifmgd->associated->bssid,
+ new_chandef.chan->center_freq,
new_chandef.width, new_chandef.center_freq1,
new_chandef.center_freq2);
ieee80211_queue_work(&local->hw,
@@ -1191,17 +1000,28 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
}
ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
+ sdata->vif.csa_active = true;
+ mutex_lock(&local->chanctx_mtx);
if (local->use_chanctx) {
- sdata_info(sdata,
- "not handling channel switch with channel contexts\n");
- ieee80211_queue_work(&local->hw,
- &ifmgd->csa_connection_drop_work);
- return;
+ u32 num_chanctx = 0;
+ list_for_each_entry(chanctx, &local->chanctx_list, list)
+ num_chanctx++;
+
+ if (num_chanctx > 1 ||
+ !(local->hw.flags & IEEE80211_HW_CHANCTX_STA_CSA)) {
+ sdata_info(sdata,
+ "not handling chan-switch with channel contexts\n");
+ ieee80211_queue_work(&local->hw,
+ &ifmgd->csa_connection_drop_work);
+ mutex_unlock(&local->chanctx_mtx);
+ return;
+ }
}
- mutex_lock(&local->chanctx_mtx);
if (WARN_ON(!rcu_access_pointer(sdata->vif.chanctx_conf))) {
+ ieee80211_queue_work(&local->hw,
+ &ifmgd->csa_connection_drop_work);
mutex_unlock(&local->chanctx_mtx);
return;
}
@@ -1374,8 +1194,7 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
if (!mgd->associated)
return false;
- if (mgd->flags & (IEEE80211_STA_BEACON_POLL |
- IEEE80211_STA_CONNECTION_POLL))
+ if (mgd->flags & IEEE80211_STA_CONNECTION_POLL)
return false;
if (!mgd->have_beacon)
@@ -1691,8 +1510,7 @@ static void __ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata)
{
lockdep_assert_held(&sdata->local->mtx);
- sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
- IEEE80211_STA_BEACON_POLL);
+ sdata->u.mgd.flags &= ~IEEE80211_STA_CONNECTION_POLL;
ieee80211_run_deferred_scan(sdata->local);
}
@@ -1954,11 +1772,8 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
struct ieee80211_local *local = sdata->local;
mutex_lock(&local->mtx);
- if (!(ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
- IEEE80211_STA_CONNECTION_POLL))) {
- mutex_unlock(&local->mtx);
- return;
- }
+ if (!(ifmgd->flags & IEEE80211_STA_CONNECTION_POLL))
+ goto out;
__ieee80211_stop_poll(sdata);
@@ -2094,15 +1909,9 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
* because otherwise we would reset the timer every time and
* never check whether we received a probe response!
*/
- if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
- IEEE80211_STA_CONNECTION_POLL))
+ if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL)
already = true;
- if (beacon)
- ifmgd->flags |= IEEE80211_STA_BEACON_POLL;
- else
- ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL;
-
mutex_unlock(&sdata->local->mtx);
if (already)
@@ -2174,6 +1983,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
true, frame_buf);
ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
+ sdata->vif.csa_active = false;
ieee80211_wake_queues_by_reason(&sdata->local->hw,
IEEE80211_MAX_QUEUE_MAP,
IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -2717,7 +2527,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
*/
ifmgd->wmm_last_param_set = -1;
- if (elems.wmm_param)
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_WMM) && elems.wmm_param)
ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
elems.wmm_param_len);
else
@@ -3061,17 +2871,10 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
}
}
- if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) {
+ if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL) {
mlme_dbg_ratelimited(sdata,
"cancelling AP probe due to a received beacon\n");
- mutex_lock(&local->mtx);
- ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
- ieee80211_run_deferred_scan(local);
- mutex_unlock(&local->mtx);
-
- mutex_lock(&local->iflist_mtx);
- ieee80211_recalc_ps(local, -1);
- mutex_unlock(&local->iflist_mtx);
+ ieee80211_reset_ap_probe(sdata);
}
/*
@@ -3152,7 +2955,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ieee80211_sta_process_chanswitch(sdata, rx_status->mactime,
&elems, true);
- if (ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_WMM) &&
+ ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
elems.wmm_param_len))
changed |= BSS_CHANGED_QOS;
@@ -3543,8 +3347,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
} else if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started)
run_again(sdata, ifmgd->assoc_data->timeout);
- if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
- IEEE80211_STA_CONNECTION_POLL) &&
+ if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL &&
ifmgd->associated) {
u8 bssid[ETH_ALEN];
int max_tries;
@@ -3876,7 +3679,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
return ret;
while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
- ifmgd->flags |= chandef_downgrade(&chandef);
+ ifmgd->flags |= ieee80211_chandef_downgrade(&chandef);
ret = ieee80211_vif_use_channel(sdata, &chandef,
IEEE80211_CHANCTX_SHARED);
}
@@ -4135,6 +3938,44 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
return err;
}
+static bool ieee80211_usable_wmm_params(struct ieee80211_sub_if_data *sdata,
+ const u8 *wmm_param, int len)
+{
+ const u8 *pos;
+ size_t left;
+
+ if (len < 8)
+ return false;
+
+ if (wmm_param[5] != 1 /* version */)
+ return false;
+
+ pos = wmm_param + 8;
+ left = len - 8;
+
+ for (; left >= 4; left -= 4, pos += 4) {
+ u8 aifsn = pos[0] & 0x0f;
+ u8 ecwmin = pos[1] & 0x0f;
+ u8 ecwmax = (pos[1] & 0xf0) >> 4;
+ int aci = (pos[0] >> 5) & 0x03;
+
+ if (aifsn < 2) {
+ sdata_info(sdata,
+ "AP has invalid WMM params (AIFSN=%d for ACI %d), disabling WMM\n",
+ aifsn, aci);
+ return false;
+ }
+ if (ecwmin > ecwmax) {
+ sdata_info(sdata,
+ "AP has invalid WMM params (ECWmin/max=%d/%d for ACI %d), disabling WMM\n",
+ ecwmin, ecwmax, aci);
+ return false;
+ }
+ }
+
+ return true;
+}
+
int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
struct cfg80211_assoc_request *req)
{
@@ -4192,9 +4033,45 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
}
/* prepare assoc data */
-
+
ifmgd->beacon_crc_valid = false;
+ assoc_data->wmm = bss->wmm_used &&
+ (local->hw.queues >= IEEE80211_NUM_ACS);
+ if (assoc_data->wmm) {
+ /* try to check validity of WMM params IE */
+ const struct cfg80211_bss_ies *ies;
+ const u8 *wp, *start, *end;
+
+ rcu_read_lock();
+ ies = rcu_dereference(req->bss->ies);
+ start = ies->data;
+ end = start + ies->len;
+
+ while (true) {
+ wp = cfg80211_find_vendor_ie(
+ WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WMM,
+ start, end - start);
+ if (!wp)
+ break;
+ start = wp + wp[1] + 2;
+ /* if this IE is too short, try the next */
+ if (wp[1] <= 4)
+ continue;
+ /* if this IE is WMM params, we found what we wanted */
+ if (wp[6] == 1)
+ break;
+ }
+
+ if (!wp || !ieee80211_usable_wmm_params(sdata, wp + 2,
+ wp[1] - 2)) {
+ assoc_data->wmm = false;
+ ifmgd->flags |= IEEE80211_STA_DISABLE_WMM;
+ }
+ rcu_read_unlock();
+ }
+
/*
* IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode.
* We still associate in non-HT mode (11a/b/g) if any one of these
@@ -4224,18 +4101,22 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
/* Also disable HT if we don't support it or the AP doesn't use WMM */
sband = local->hw.wiphy->bands[req->bss->channel->band];
if (!sband->ht_cap.ht_supported ||
- local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
+ local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used ||
+ ifmgd->flags & IEEE80211_STA_DISABLE_WMM) {
ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
- if (!bss->wmm_used)
+ if (!bss->wmm_used &&
+ !(ifmgd->flags & IEEE80211_STA_DISABLE_WMM))
netdev_info(sdata->dev,
"disabling HT as WMM/QoS is not supported by the AP\n");
}
/* disable VHT if we don't support it or the AP doesn't use WMM */
if (!sband->vht_cap.vht_supported ||
- local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
+ local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used ||
+ ifmgd->flags & IEEE80211_STA_DISABLE_WMM) {
ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
- if (!bss->wmm_used)
+ if (!bss->wmm_used &&
+ !(ifmgd->flags & IEEE80211_STA_DISABLE_WMM))
netdev_info(sdata->dev,
"disabling VHT as WMM/QoS is not supported by the AP\n");
}
@@ -4264,8 +4145,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
sdata->smps_mode = ifmgd->req_smps;
assoc_data->capability = req->bss->capability;
- assoc_data->wmm = bss->wmm_used &&
- (local->hw.queues >= IEEE80211_NUM_ACS);
assoc_data->supp_rates = bss->supp_rates;
assoc_data->supp_rates_len = bss->supp_rates_len;
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index acd1f71adc03..0c2a29484c07 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -394,6 +394,8 @@ void ieee80211_sw_roc_work(struct work_struct *work)
if (started)
ieee80211_start_next_roc(local);
+ else if (list_empty(&local->roc_list))
+ ieee80211_run_deferred_scan(local);
}
out_unlock:
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index e126605cec66..22b223f13c9f 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -235,7 +235,8 @@ static void rc_send_low_basicrate(s8 *idx, u32 basic_rates,
static void __rate_control_send_low(struct ieee80211_hw *hw,
struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta,
- struct ieee80211_tx_info *info)
+ struct ieee80211_tx_info *info,
+ u32 rate_mask)
{
int i;
u32 rate_flags =
@@ -247,6 +248,12 @@ static void __rate_control_send_low(struct ieee80211_hw *hw,
info->control.rates[0].idx = 0;
for (i = 0; i < sband->n_bitrates; i++) {
+ if (!(rate_mask & BIT(i)))
+ continue;
+
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+
if (!rate_supported(sta, sband->band, i))
continue;
@@ -274,7 +281,8 @@ bool rate_control_send_low(struct ieee80211_sta *pubsta,
bool use_basicrate = false;
if (!pubsta || !priv_sta || rc_no_data_or_no_ack_use_min(txrc)) {
- __rate_control_send_low(txrc->hw, sband, pubsta, info);
+ __rate_control_send_low(txrc->hw, sband, pubsta, info,
+ txrc->rate_idx_mask);
if (!pubsta && txrc->bss) {
mcast_rate = txrc->bss_conf->mcast_rate[sband->band];
@@ -656,7 +664,8 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
rate_control_apply_mask(sdata, sta, sband, info, dest, max_rates);
if (dest[0].idx < 0)
- __rate_control_send_low(&sdata->local->hw, sband, sta, info);
+ __rate_control_send_low(&sdata->local->hw, sband, sta, info,
+ sdata->rc_rateidx_mask[info->band]);
if (sta)
rate_fixup_ratelist(vif, sband, info, dest, max_rates);
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 5dedc56c94db..505bc0dea074 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -144,8 +144,8 @@ void rate_control_deinitialize(struct ieee80211_local *local);
/* Rate control algorithms */
#ifdef CONFIG_MAC80211_RC_PID
-extern int rc80211_pid_init(void);
-extern void rc80211_pid_exit(void);
+int rc80211_pid_init(void);
+void rc80211_pid_exit(void);
#else
static inline int rc80211_pid_init(void)
{
@@ -157,8 +157,8 @@ static inline void rc80211_pid_exit(void)
#endif
#ifdef CONFIG_MAC80211_RC_MINSTREL
-extern int rc80211_minstrel_init(void);
-extern void rc80211_minstrel_exit(void);
+int rc80211_minstrel_init(void);
+void rc80211_minstrel_exit(void);
#else
static inline int rc80211_minstrel_init(void)
{
@@ -170,8 +170,8 @@ static inline void rc80211_minstrel_exit(void)
#endif
#ifdef CONFIG_MAC80211_RC_MINSTREL_HT
-extern int rc80211_minstrel_ht_init(void);
-extern void rc80211_minstrel_ht_exit(void);
+int rc80211_minstrel_ht_init(void);
+void rc80211_minstrel_ht_exit(void);
#else
static inline int rc80211_minstrel_ht_init(void)
{
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 8b5f7ef7c0c9..7fa1b36e6202 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -203,6 +203,15 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
memcpy(mi->max_tp_rate, tmp_tp_rate, sizeof(mi->max_tp_rate));
mi->max_prob_rate = tmp_prob_rate;
+#ifdef CONFIG_MAC80211_DEBUGFS
+ /* use fixed index if set */
+ if (mp->fixed_rate_idx != -1) {
+ mi->max_tp_rate[0] = mp->fixed_rate_idx;
+ mi->max_tp_rate[1] = mp->fixed_rate_idx;
+ mi->max_prob_rate = mp->fixed_rate_idx;
+ }
+#endif
+
/* Reset update timer */
mi->stats_update = jiffies;
@@ -310,6 +319,11 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
/* increase sum packet counter */
mi->packet_count++;
+#ifdef CONFIG_MAC80211_DEBUGFS
+ if (mp->fixed_rate_idx != -1)
+ return;
+#endif
+
delta = (mi->packet_count * sampling_ratio / 100) -
(mi->sample_count + mi->sample_deferred / 2);
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 7c323f27ba23..5d60779a0c1b 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -365,6 +365,14 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
}
}
+#ifdef CONFIG_MAC80211_DEBUGFS
+ /* use fixed index if set */
+ if (mp->fixed_rate_idx != -1) {
+ mi->max_tp_rate = mp->fixed_rate_idx;
+ mi->max_tp_rate2 = mp->fixed_rate_idx;
+ mi->max_prob_rate = mp->fixed_rate_idx;
+ }
+#endif
mi->stats_update = jiffies;
}
@@ -774,6 +782,11 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
info->flags |= mi->tx_flags;
minstrel_ht_check_cck_shortpreamble(mp, mi, txrc->short_preamble);
+#ifdef CONFIG_MAC80211_DEBUGFS
+ if (mp->fixed_rate_idx != -1)
+ return;
+#endif
+
/* Don't use EAPOL frames for sampling on non-mrr hw */
if (mp->hw->max_rates == 1 &&
(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
@@ -781,16 +794,6 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
else
sample_idx = minstrel_get_sample_rate(mp, mi);
-#ifdef CONFIG_MAC80211_DEBUGFS
- /* use fixed index if set */
- if (mp->fixed_rate_idx != -1) {
- mi->max_tp_rate = mp->fixed_rate_idx;
- mi->max_tp_rate2 = mp->fixed_rate_idx;
- mi->max_prob_rate = mp->fixed_rate_idx;
- sample_idx = -1;
- }
-#endif
-
mi->total_packets++;
/* wraparound */
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
index c97a0657c043..6ff134650a84 100644
--- a/net/mac80211/rc80211_pid_debugfs.c
+++ b/net/mac80211/rc80211_pid_debugfs.c
@@ -167,29 +167,29 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
* provide large enough buffers. */
length = length < RC_PID_PRINT_BUF_SIZE ?
length : RC_PID_PRINT_BUF_SIZE;
- p = snprintf(pb, length, "%u %lu ", ev->id, ev->timestamp);
+ p = scnprintf(pb, length, "%u %lu ", ev->id, ev->timestamp);
switch (ev->type) {
case RC_PID_EVENT_TYPE_TX_STATUS:
- p += snprintf(pb + p, length - p, "tx_status %u %u",
- !(ev->data.flags & IEEE80211_TX_STAT_ACK),
- ev->data.tx_status.status.rates[0].idx);
+ p += scnprintf(pb + p, length - p, "tx_status %u %u",
+ !(ev->data.flags & IEEE80211_TX_STAT_ACK),
+ ev->data.tx_status.status.rates[0].idx);
break;
case RC_PID_EVENT_TYPE_RATE_CHANGE:
- p += snprintf(pb + p, length - p, "rate_change %d %d",
- ev->data.index, ev->data.rate);
+ p += scnprintf(pb + p, length - p, "rate_change %d %d",
+ ev->data.index, ev->data.rate);
break;
case RC_PID_EVENT_TYPE_TX_RATE:
- p += snprintf(pb + p, length - p, "tx_rate %d %d",
- ev->data.index, ev->data.rate);
+ p += scnprintf(pb + p, length - p, "tx_rate %d %d",
+ ev->data.index, ev->data.rate);
break;
case RC_PID_EVENT_TYPE_PF_SAMPLE:
- p += snprintf(pb + p, length - p,
- "pf_sample %d %d %d %d",
- ev->data.pf_sample, ev->data.prop_err,
- ev->data.int_err, ev->data.der_err);
+ p += scnprintf(pb + p, length - p,
+ "pf_sample %d %d %d %d",
+ ev->data.pf_sample, ev->data.prop_err,
+ ev->data.int_err, ev->data.der_err);
break;
}
- p += snprintf(pb + p, length - p, "\n");
+ p += scnprintf(pb + p, length - p, "\n");
spin_unlock_irqrestore(&events->lock, status);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 54395d7583ba..0011ac815097 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -995,8 +995,9 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
rx->sta->num_duplicates++;
}
return RX_DROP_UNUSABLE;
- } else
+ } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
+ }
}
if (unlikely(rx->skb->len < 16)) {
@@ -2402,7 +2403,8 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
return RX_DROP_UNUSABLE;
if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
- mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED)
+ mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED &&
+ mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
return RX_DROP_UNUSABLE;
if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
@@ -2566,31 +2568,46 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
goto queue;
case WLAN_CATEGORY_SPECTRUM_MGMT:
- if (status->band != IEEE80211_BAND_5GHZ)
- break;
-
- if (sdata->vif.type != NL80211_IFTYPE_STATION)
- break;
-
/* verify action_code is present */
if (len < IEEE80211_MIN_ACTION_SIZE + 1)
break;
switch (mgmt->u.action.u.measurement.action_code) {
case WLAN_ACTION_SPCT_MSR_REQ:
+ if (status->band != IEEE80211_BAND_5GHZ)
+ break;
+
if (len < (IEEE80211_MIN_ACTION_SIZE +
sizeof(mgmt->u.action.u.measurement)))
break;
+
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ break;
+
ieee80211_process_measurement_req(sdata, mgmt, len);
goto handled;
- case WLAN_ACTION_SPCT_CHL_SWITCH:
- if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ case WLAN_ACTION_SPCT_CHL_SWITCH: {
+ u8 *bssid;
+ if (len < (IEEE80211_MIN_ACTION_SIZE +
+ sizeof(mgmt->u.action.u.chan_switch)))
+ break;
+
+ if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+ sdata->vif.type != NL80211_IFTYPE_ADHOC)
break;
- if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
+ if (sdata->vif.type == NL80211_IFTYPE_STATION)
+ bssid = sdata->u.mgd.bssid;
+ else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+ bssid = sdata->u.ibss.bssid;
+ else
+ break;
+
+ if (!ether_addr_equal(mgmt->bssid, bssid))
break;
goto queue;
+ }
}
break;
case WLAN_CATEGORY_SA_QUERY:
@@ -3056,6 +3073,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
case NL80211_IFTYPE_ADHOC:
if (!bssid)
return 0;
+ if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
+ ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
+ return 0;
if (ieee80211_is_beacon(hdr->frame_control)) {
return 1;
} else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 08afe74b98f4..5ad66a83ef7f 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -238,6 +238,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
enum ieee80211_band band;
int i, ielen, n_chans;
+ if (test_bit(SCAN_HW_CANCELLED, &local->scanning))
+ return false;
+
do {
if (local->hw_scan_band == IEEE80211_NUM_BANDS)
return false;
@@ -391,8 +394,7 @@ static bool ieee80211_can_scan(struct ieee80211_local *local,
return false;
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- sdata->u.mgd.flags & (IEEE80211_STA_BEACON_POLL |
- IEEE80211_STA_CONNECTION_POLL))
+ sdata->u.mgd.flags & IEEE80211_STA_CONNECTION_POLL)
return false;
return true;
@@ -940,7 +942,23 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
if (!local->scan_req)
goto out;
+ /*
+ * We have a scan running and the driver already reported completion,
+ * but the worker hasn't run yet or is stuck on the mutex - mark it as
+ * cancelled.
+ */
+ if (test_bit(SCAN_HW_SCANNING, &local->scanning) &&
+ test_bit(SCAN_COMPLETED, &local->scanning)) {
+ set_bit(SCAN_HW_CANCELLED, &local->scanning);
+ goto out;
+ }
+
if (test_bit(SCAN_HW_SCANNING, &local->scanning)) {
+ /*
+ * Make sure that __ieee80211_scan_completed doesn't trigger a
+ * scan on another band.
+ */
+ set_bit(SCAN_HW_CANCELLED, &local->scanning);
if (local->ops->cancel_hw_scan)
drv_cancel_hw_scan(local,
rcu_dereference_protected(local->scan_sdata,
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 578eea3fc04d..921597e279a3 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -21,6 +21,168 @@
#include "sta_info.h"
#include "wme.h"
+int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
+ struct ieee802_11_elems *elems, bool beacon,
+ enum ieee80211_band current_band,
+ u32 sta_flags, u8 *bssid, u8 *count, u8 *mode,
+ struct cfg80211_chan_def *new_chandef)
+{
+ enum ieee80211_band new_band;
+ int new_freq;
+ u8 new_chan_no;
+ struct ieee80211_channel *new_chan;
+ struct cfg80211_chan_def new_vht_chandef = {};
+ const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
+ const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie;
+ const struct ieee80211_ht_operation *ht_oper;
+ int secondary_channel_offset = -1;
+
+ sec_chan_offs = elems->sec_chan_offs;
+ wide_bw_chansw_ie = elems->wide_bw_chansw_ie;
+ ht_oper = elems->ht_operation;
+
+ if (sta_flags & (IEEE80211_STA_DISABLE_HT |
+ IEEE80211_STA_DISABLE_40MHZ)) {
+ sec_chan_offs = NULL;
+ wide_bw_chansw_ie = NULL;
+ /* only used for bandwidth here */
+ ht_oper = NULL;
+ }
+
+ if (sta_flags & IEEE80211_STA_DISABLE_VHT)
+ wide_bw_chansw_ie = NULL;
+
+ if (elems->ext_chansw_ie) {
+ if (!ieee80211_operating_class_to_band(
+ elems->ext_chansw_ie->new_operating_class,
+ &new_band)) {
+ sdata_info(sdata,
+ "cannot understand ECSA IE operating class %d, disconnecting\n",
+ elems->ext_chansw_ie->new_operating_class);
+ return -EINVAL;
+ }
+ new_chan_no = elems->ext_chansw_ie->new_ch_num;
+ *count = elems->ext_chansw_ie->count;
+ *mode = elems->ext_chansw_ie->mode;
+ } else if (elems->ch_switch_ie) {
+ new_band = current_band;
+ new_chan_no = elems->ch_switch_ie->new_ch_num;
+ *count = elems->ch_switch_ie->count;
+ *mode = elems->ch_switch_ie->mode;
+ } else {
+ /* nothing here we understand */
+ return 1;
+ }
+
+ new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band);
+ new_chan = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq);
+ if (!new_chan || new_chan->flags & IEEE80211_CHAN_DISABLED) {
+ sdata_info(sdata,
+ "BSS %pM switches to unsupported channel (%d MHz), disconnecting\n",
+ bssid, new_freq);
+ return -EINVAL;
+ }
+
+ if (!beacon && sec_chan_offs) {
+ secondary_channel_offset = sec_chan_offs->sec_chan_offs;
+ } else if (beacon && ht_oper) {
+ secondary_channel_offset =
+ ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET;
+ } else if (!(sta_flags & IEEE80211_STA_DISABLE_HT)) {
+ /* If it's not a beacon, HT is enabled and the IE not present,
+ * it's 20 MHz, 802.11-2012 8.5.2.6:
+ * This element [the Secondary Channel Offset Element] is
+ * present when switching to a 40 MHz channel. It may be
+ * present when switching to a 20 MHz channel (in which
+ * case the secondary channel offset is set to SCN).
+ */
+ secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ }
+
+ switch (secondary_channel_offset) {
+ default:
+ /* secondary_channel_offset was present but is invalid */
+ case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+ cfg80211_chandef_create(new_chandef, new_chan,
+ NL80211_CHAN_HT20);
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ cfg80211_chandef_create(new_chandef, new_chan,
+ NL80211_CHAN_HT40PLUS);
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ cfg80211_chandef_create(new_chandef, new_chan,
+ NL80211_CHAN_HT40MINUS);
+ break;
+ case -1:
+ cfg80211_chandef_create(new_chandef, new_chan,
+ NL80211_CHAN_NO_HT);
+ /* keep width for 5/10 MHz channels */
+ switch (sdata->vif.bss_conf.chandef.width) {
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
+ new_chandef->width = sdata->vif.bss_conf.chandef.width;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ if (wide_bw_chansw_ie) {
+ new_vht_chandef.chan = new_chan;
+ new_vht_chandef.center_freq1 =
+ ieee80211_channel_to_frequency(
+ wide_bw_chansw_ie->new_center_freq_seg0,
+ new_band);
+
+ switch (wide_bw_chansw_ie->new_channel_width) {
+ default:
+ /* hmmm, ignore VHT and use HT if present */
+ case IEEE80211_VHT_CHANWIDTH_USE_HT:
+ new_vht_chandef.chan = NULL;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80MHZ:
+ new_vht_chandef.width = NL80211_CHAN_WIDTH_80;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_160MHZ:
+ new_vht_chandef.width = NL80211_CHAN_WIDTH_160;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+ /* field is otherwise reserved */
+ new_vht_chandef.center_freq2 =
+ ieee80211_channel_to_frequency(
+ wide_bw_chansw_ie->new_center_freq_seg1,
+ new_band);
+ new_vht_chandef.width = NL80211_CHAN_WIDTH_80P80;
+ break;
+ }
+ if (sta_flags & IEEE80211_STA_DISABLE_80P80MHZ &&
+ new_vht_chandef.width == NL80211_CHAN_WIDTH_80P80)
+ ieee80211_chandef_downgrade(&new_vht_chandef);
+ if (sta_flags & IEEE80211_STA_DISABLE_160MHZ &&
+ new_vht_chandef.width == NL80211_CHAN_WIDTH_160)
+ ieee80211_chandef_downgrade(&new_vht_chandef);
+ if (sta_flags & IEEE80211_STA_DISABLE_40MHZ &&
+ new_vht_chandef.width > NL80211_CHAN_WIDTH_20)
+ ieee80211_chandef_downgrade(&new_vht_chandef);
+ }
+
+ /* if VHT data is there validate & use it */
+ if (new_vht_chandef.chan) {
+ if (!cfg80211_chandef_compatible(&new_vht_chandef,
+ new_chandef)) {
+ sdata_info(sdata,
+ "BSS %pM: CSA has inconsistent channel data, disconnecting\n",
+ bssid);
+ return -EINVAL;
+ }
+ *new_chandef = new_vht_chandef;
+ }
+
+ return 0;
+}
+
static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_data *sdata,
struct ieee80211_msrment_ie *request_ie,
const u8 *da, const u8 *bssid,
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 368837fe3b80..78dc2e99027e 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -180,6 +180,9 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
+ if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+ sta->last_rx = jiffies;
+
if (ieee80211_is_data_qos(mgmt->frame_control)) {
struct ieee80211_hdr *hdr = (void *) skb->data;
u8 *qc = ieee80211_get_qos_ctl(hdr);
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 1aba645882bd..d4cee98533fd 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -77,13 +77,13 @@ DECLARE_EVENT_CLASS(local_sdata_addr_evt,
TP_STRUCT__entry(
LOCAL_ENTRY
VIF_ENTRY
- __array(char, addr, 6)
+ __array(char, addr, ETH_ALEN)
),
TP_fast_assign(
LOCAL_ASSIGN;
VIF_ASSIGN;
- memcpy(__entry->addr, sdata->vif.addr, 6);
+ memcpy(__entry->addr, sdata->vif.addr, ETH_ALEN);
),
TP_printk(
@@ -1475,6 +1475,41 @@ DEFINE_EVENT(local_sdata_evt, drv_ipv6_addr_change,
);
#endif
+TRACE_EVENT(drv_join_ibss,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_bss_conf *info),
+
+ TP_ARGS(local, sdata, info),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __field(u8, dtimper)
+ __field(u16, bcnint)
+ __dynamic_array(u8, ssid, info->ssid_len);
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ __entry->dtimper = info->dtim_period;
+ __entry->bcnint = info->beacon_int;
+ memcpy(__get_dynamic_array(ssid), info->ssid, info->ssid_len);
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG
+ )
+);
+
+DEFINE_EVENT(local_sdata_evt, drv_leave_ibss,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+ TP_ARGS(local, sdata)
+);
+
/*
* Tracing for API calls that drivers call.
*/
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 3456c0486b48..9993fcb19ecd 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1120,7 +1120,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
tx->sta = rcu_dereference(sdata->u.vlan.sta);
if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
return TX_DROP;
- } else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
+ } else if (info->flags & (IEEE80211_TX_CTL_INJECTED |
+ IEEE80211_TX_INTFL_NL80211_FRAME_TX) ||
tx->sdata->control_port_protocol == tx->skb->protocol) {
tx->sta = sta_info_get_bss(sdata, hdr->addr1);
}
@@ -1981,7 +1982,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
* EAPOL frames from the local station.
*/
if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) &&
- !is_multicast_ether_addr(hdr.addr1) && !authorized &&
+ !multicast && !authorized &&
(cpu_to_be16(ethertype) != sdata->control_port_protocol ||
!ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -2357,15 +2358,31 @@ static void ieee80211_update_csa(struct ieee80211_sub_if_data *sdata,
struct probe_resp *resp;
int counter_offset_beacon = sdata->csa_counter_offset_beacon;
int counter_offset_presp = sdata->csa_counter_offset_presp;
+ u8 *beacon_data;
+ size_t beacon_data_len;
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP:
+ beacon_data = beacon->tail;
+ beacon_data_len = beacon->tail_len;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ beacon_data = beacon->head;
+ beacon_data_len = beacon->head_len;
+ break;
+ default:
+ return;
+ }
+ if (WARN_ON(counter_offset_beacon >= beacon_data_len))
+ return;
/* warn if the driver did not check for/react to csa completeness */
- if (WARN_ON(((u8 *)beacon->tail)[counter_offset_beacon] == 0))
+ if (WARN_ON(beacon_data[counter_offset_beacon] == 0))
return;
- ((u8 *)beacon->tail)[counter_offset_beacon]--;
+ beacon_data[counter_offset_beacon]--;
- if (sdata->vif.type == NL80211_IFTYPE_AP &&
- counter_offset_presp) {
+ if (sdata->vif.type == NL80211_IFTYPE_AP && counter_offset_presp) {
rcu_read_lock();
resp = rcu_dereference(sdata->u.ap.probe_resp);
@@ -2400,6 +2417,15 @@ bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
goto out;
beacon_data = beacon->tail;
beacon_data_len = beacon->tail_len;
+ } else if (vif->type == NL80211_IFTYPE_ADHOC) {
+ struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+
+ beacon = rcu_dereference(ifibss->presp);
+ if (!beacon)
+ goto out;
+
+ beacon_data = beacon->head;
+ beacon_data_len = beacon->head_len;
} else {
WARN_ON(1);
goto out;
@@ -2484,6 +2510,10 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
if (!presp)
goto out;
+ if (sdata->vif.csa_active)
+ ieee80211_update_csa(sdata, presp);
+
+
skb = dev_alloc_skb(local->tx_headroom + presp->head_len);
if (!skb)
goto out;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index e1b34a18b243..aefb9d5b9620 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -567,18 +567,15 @@ void ieee80211_flush_queues(struct ieee80211_local *local,
IEEE80211_QUEUE_STOP_REASON_FLUSH);
}
-void ieee80211_iterate_active_interfaces(
- struct ieee80211_hw *hw, u32 iter_flags,
- void (*iterator)(void *data, u8 *mac,
- struct ieee80211_vif *vif),
- void *data)
+static void __iterate_active_interfaces(struct ieee80211_local *local,
+ u32 iter_flags,
+ void (*iterator)(void *data, u8 *mac,
+ struct ieee80211_vif *vif),
+ void *data)
{
- struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
- mutex_lock(&local->iflist_mtx);
-
- list_for_each_entry(sdata, &local->interfaces, list) {
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
switch (sdata->vif.type) {
case NL80211_IFTYPE_MONITOR:
if (!(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))
@@ -597,13 +594,25 @@ void ieee80211_iterate_active_interfaces(
&sdata->vif);
}
- sdata = rcu_dereference_protected(local->monitor_sdata,
- lockdep_is_held(&local->iflist_mtx));
+ sdata = rcu_dereference_check(local->monitor_sdata,
+ lockdep_is_held(&local->iflist_mtx) ||
+ lockdep_rtnl_is_held());
if (sdata &&
(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL ||
sdata->flags & IEEE80211_SDATA_IN_DRIVER))
iterator(data, sdata->vif.addr, &sdata->vif);
+}
+void ieee80211_iterate_active_interfaces(
+ struct ieee80211_hw *hw, u32 iter_flags,
+ void (*iterator)(void *data, u8 *mac,
+ struct ieee80211_vif *vif),
+ void *data)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ mutex_lock(&local->iflist_mtx);
+ __iterate_active_interfaces(local, iter_flags, iterator, data);
mutex_unlock(&local->iflist_mtx);
}
EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces);
@@ -615,38 +624,26 @@ void ieee80211_iterate_active_interfaces_atomic(
void *data)
{
struct ieee80211_local *local = hw_to_local(hw);
- struct ieee80211_sub_if_data *sdata;
rcu_read_lock();
+ __iterate_active_interfaces(local, iter_flags, iterator, data);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- switch (sdata->vif.type) {
- case NL80211_IFTYPE_MONITOR:
- if (!(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))
- continue;
- break;
- case NL80211_IFTYPE_AP_VLAN:
- continue;
- default:
- break;
- }
- if (!(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL) &&
- !(sdata->flags & IEEE80211_SDATA_IN_DRIVER))
- continue;
- if (ieee80211_sdata_running(sdata))
- iterator(data, sdata->vif.addr,
- &sdata->vif);
- }
+void ieee80211_iterate_active_interfaces_rtnl(
+ struct ieee80211_hw *hw, u32 iter_flags,
+ void (*iterator)(void *data, u8 *mac,
+ struct ieee80211_vif *vif),
+ void *data)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
- sdata = rcu_dereference(local->monitor_sdata);
- if (sdata &&
- (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL ||
- sdata->flags & IEEE80211_SDATA_IN_DRIVER))
- iterator(data, sdata->vif.addr, &sdata->vif);
+ ASSERT_RTNL();
- rcu_read_unlock();
+ __iterate_active_interfaces(local, iter_flags, iterator, data);
}
-EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
+EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_rtnl);
/*
* Nothing should have been stuffed into the workqueue during
@@ -1007,14 +1004,21 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
*/
enable_qos = (sdata->vif.type != NL80211_IFTYPE_STATION);
- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- /* Set defaults according to 802.11-2007 Table 7-37 */
- aCWmax = 1023;
- if (use_11b)
- aCWmin = 31;
- else
- aCWmin = 15;
+ /* Set defaults according to 802.11-2007 Table 7-37 */
+ aCWmax = 1023;
+ if (use_11b)
+ aCWmin = 31;
+ else
+ aCWmin = 15;
+ /* Confiure old 802.11b/g medium access rules. */
+ qparam.cw_max = aCWmax;
+ qparam.cw_min = aCWmin;
+ qparam.txop = 0;
+ qparam.aifs = 2;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ /* Update if QoS is enabled. */
if (enable_qos) {
switch (ac) {
case IEEE80211_AC_BK:
@@ -1050,12 +1054,6 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
qparam.aifs = 2;
break;
}
- } else {
- /* Confiure old 802.11b/g medium access rules. */
- qparam.cw_max = aCWmax;
- qparam.cw_min = aCWmin;
- qparam.txop = 0;
- qparam.aifs = 2;
}
qparam.uapsd = false;
@@ -1084,8 +1082,8 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt;
int err;
- skb = dev_alloc_skb(local->hw.extra_tx_headroom +
- sizeof(*mgmt) + 6 + extra_len);
+ /* 24 + 6 = header + auth_algo + auth_transaction + status_code */
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 6 + extra_len);
if (!skb)
return;
@@ -2103,7 +2101,7 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
- int rate, skip, shift;
+ int rate, shift;
u8 i, exrates, *pos;
u32 basic_rates = sdata->vif.bss_conf.basic_rates;
u32 rate_flags;
@@ -2131,14 +2129,11 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
pos = skb_put(skb, exrates + 2);
*pos++ = WLAN_EID_EXT_SUPP_RATES;
*pos++ = exrates;
- skip = 0;
for (i = 8; i < sband->n_bitrates; i++) {
u8 basic = 0;
if ((rate_flags & sband->bitrates[i].flags)
!= rate_flags)
continue;
- if (skip++ < 8)
- continue;
if (need_basic && basic_rates & BIT(i))
basic = 0x80;
rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
@@ -2241,6 +2236,10 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
}
rate = cfg80211_calculate_bitrate(&ri);
+ if (WARN_ONCE(!rate,
+ "Invalid bitrate: flags=0x%x, idx=%d, vht_nss=%d\n",
+ status->flag, status->rate_idx, status->vht_nss))
+ return 0;
/* rewind from end of MPDU */
if (status->flag & RX_FLAG_MACTIME_END)
@@ -2295,3 +2294,63 @@ void ieee80211_radar_detected(struct ieee80211_hw *hw)
ieee80211_queue_work(hw, &local->radar_detected_work);
}
EXPORT_SYMBOL(ieee80211_radar_detected);
+
+u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c)
+{
+ u32 ret;
+ int tmp;
+
+ switch (c->width) {
+ case NL80211_CHAN_WIDTH_20:
+ c->width = NL80211_CHAN_WIDTH_20_NOHT;
+ ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ c->width = NL80211_CHAN_WIDTH_20;
+ c->center_freq1 = c->chan->center_freq;
+ ret = IEEE80211_STA_DISABLE_40MHZ |
+ IEEE80211_STA_DISABLE_VHT;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
+ /* n_P40 */
+ tmp /= 2;
+ /* freq_P40 */
+ c->center_freq1 = c->center_freq1 - 20 + 40 * tmp;
+ c->width = NL80211_CHAN_WIDTH_40;
+ ret = IEEE80211_STA_DISABLE_VHT;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ c->center_freq2 = 0;
+ c->width = NL80211_CHAN_WIDTH_80;
+ ret = IEEE80211_STA_DISABLE_80P80MHZ |
+ IEEE80211_STA_DISABLE_160MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ /* n_P20 */
+ tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
+ /* n_P80 */
+ tmp /= 4;
+ c->center_freq1 = c->center_freq1 - 40 + 80 * tmp;
+ c->width = NL80211_CHAN_WIDTH_80;
+ ret = IEEE80211_STA_DISABLE_80P80MHZ |
+ IEEE80211_STA_DISABLE_160MHZ;
+ break;
+ default:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ WARN_ON_ONCE(1);
+ c->width = NL80211_CHAN_WIDTH_20_NOHT;
+ ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
+ WARN_ON_ONCE(1);
+ /* keep c->width */
+ ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
+ break;
+ }
+
+ WARN_ON_ONCE(!cfg80211_chandef_valid(c));
+
+ return ret;
+}
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 97c289414e32..de0112785aae 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -185,13 +185,13 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) {
vht_cap->cap |= cap_info &
(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
- IEEE80211_VHT_CAP_BEAMFORMER_ANTENNAS_MAX |
IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MAX);
}
if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
vht_cap->cap |= cap_info &
- IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
+ (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_BEAMFORMEE_STS_MAX);
if (own_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
vht_cap->cap |= cap_info &
diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c
index b7c7f815deae..52ae6646a411 100644
--- a/net/mac802154/ieee802154_dev.c
+++ b/net/mac802154/ieee802154_dev.c
@@ -174,8 +174,7 @@ ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops)
if (!ops || !ops->xmit || !ops->ed || !ops->start ||
!ops->stop || !ops->set_channel) {
- printk(KERN_ERR
- "undefined IEEE802.15.4 device operations\n");
+ pr_err("undefined IEEE802.15.4 device operations\n");
return NULL;
}
@@ -201,8 +200,7 @@ ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops)
phy = wpan_phy_alloc(priv_size);
if (!phy) {
- printk(KERN_ERR
- "failure to allocate master IEEE802.15.4 device\n");
+ pr_err("failure to allocate master IEEE802.15.4 device\n");
return NULL;
}
diff --git a/net/mac802154/wpan.c b/net/mac802154/wpan.c
index 2ca2f4dceab7..e24bcf977296 100644
--- a/net/mac802154/wpan.c
+++ b/net/mac802154/wpan.c
@@ -208,6 +208,8 @@ static int mac802154_header_create(struct sk_buff *skb,
head[1] = fc >> 8;
memcpy(skb_push(skb, pos), head, pos);
+ skb_reset_mac_header(skb);
+ skb->mac_len = pos;
return pos;
}
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index 1bec1219ab81..851cd880b0c0 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -33,6 +33,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
SKB_GSO_DODGY |
SKB_GSO_TCP_ECN |
SKB_GSO_GRE |
+ SKB_GSO_IPIP |
SKB_GSO_MPLS)))
goto out;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 6e839b6dff2b..48acec17e27a 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -413,6 +413,58 @@ config NETFILTER_SYNPROXY
endif # NF_CONNTRACK
+config NF_TABLES
+ depends on NETFILTER_NETLINK
+ tristate "Netfilter nf_tables support"
+
+config NFT_EXTHDR
+ depends on NF_TABLES
+ tristate "Netfilter nf_tables IPv6 exthdr module"
+
+config NFT_META
+ depends on NF_TABLES
+ tristate "Netfilter nf_tables meta module"
+
+config NFT_CT
+ depends on NF_TABLES
+ depends on NF_CONNTRACK
+ tristate "Netfilter nf_tables conntrack module"
+
+config NFT_RBTREE
+ depends on NF_TABLES
+ tristate "Netfilter nf_tables rbtree set module"
+
+config NFT_HASH
+ depends on NF_TABLES
+ tristate "Netfilter nf_tables hash set module"
+
+config NFT_COUNTER
+ depends on NF_TABLES
+ tristate "Netfilter nf_tables counter module"
+
+config NFT_LOG
+ depends on NF_TABLES
+ tristate "Netfilter nf_tables log module"
+
+config NFT_LIMIT
+ depends on NF_TABLES
+ tristate "Netfilter nf_tables limit module"
+
+config NFT_NAT
+ depends on NF_TABLES
+ depends on NF_CONNTRACK
+ depends on NF_NAT
+ tristate "Netfilter nf_tables nat module"
+
+config NFT_COMPAT
+ depends on NF_TABLES
+ depends on NETFILTER_XTABLES
+ tristate "Netfilter x_tables over nf_tables module"
+ help
+ This is required if you intend to use any of existing
+ x_tables match/target extensions over the nf_tables
+ framework.
+
config NETFILTER_XTABLES
tristate "Netfilter Xtables support (required for ip_tables)"
default m if NETFILTER_ADVANCED=n
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index c3a0a12907f6..394483b2c193 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -64,6 +64,24 @@ obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
# SYNPROXY
obj-$(CONFIG_NETFILTER_SYNPROXY) += nf_synproxy_core.o
+# nf_tables
+nf_tables-objs += nf_tables_core.o nf_tables_api.o
+nf_tables-objs += nft_immediate.o nft_cmp.o nft_lookup.o
+nf_tables-objs += nft_bitwise.o nft_byteorder.o nft_payload.o
+
+obj-$(CONFIG_NF_TABLES) += nf_tables.o
+obj-$(CONFIG_NFT_COMPAT) += nft_compat.o
+obj-$(CONFIG_NFT_EXTHDR) += nft_exthdr.o
+obj-$(CONFIG_NFT_META) += nft_meta.o
+obj-$(CONFIG_NFT_CT) += nft_ct.o
+obj-$(CONFIG_NFT_LIMIT) += nft_limit.o
+obj-$(CONFIG_NFT_NAT) += nft_nat.o
+#nf_tables-objs += nft_meta_target.o
+obj-$(CONFIG_NFT_RBTREE) += nft_rbtree.o
+obj-$(CONFIG_NFT_HASH) += nft_hash.o
+obj-$(CONFIG_NFT_COUNTER) += nft_counter.o
+obj-$(CONFIG_NFT_LOG) += nft_log.o
+
# generic X tables
obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 593b16ea45e0..1fbab0cdd302 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -146,7 +146,7 @@ unsigned int nf_iterate(struct list_head *head,
/* Optimization: we don't need to hold module
reference here, since function can't sleep. --RR */
repeat:
- verdict = (*elemp)->hook(hook, skb, indev, outdev, okfn);
+ verdict = (*elemp)->hook(*elemp, skb, indev, outdev, okfn);
if (verdict != NF_ACCEPT) {
#ifdef CONFIG_NETFILTER_DEBUG
if (unlikely((verdict & NF_VERDICT_MASK)
diff --git a/net/netfilter/ipset/Kconfig b/net/netfilter/ipset/Kconfig
index ba36c283d837..a2d6263b6c64 100644
--- a/net/netfilter/ipset/Kconfig
+++ b/net/netfilter/ipset/Kconfig
@@ -1,7 +1,7 @@
menuconfig IP_SET
tristate "IP set support"
depends on INET && NETFILTER
- depends on NETFILTER_NETLINK
+ select NETFILTER_NETLINK
help
This option adds IP set support to the kernel.
In order to define and use the sets, you need the userspace utility
@@ -90,6 +90,15 @@ config IP_SET_HASH_IPPORTNET
To compile it as a module, choose M here. If unsure, say N.
+config IP_SET_HASH_NETPORTNET
+ tristate "hash:net,port,net set support"
+ depends on IP_SET
+ help
+ This option adds the hash:net,port,net set type support, by which
+ one can store two IPv4/IPv6 subnets, and a protocol/port in a set.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config IP_SET_HASH_NET
tristate "hash:net set support"
depends on IP_SET
@@ -99,6 +108,15 @@ config IP_SET_HASH_NET
To compile it as a module, choose M here. If unsure, say N.
+config IP_SET_HASH_NETNET
+ tristate "hash:net,net set support"
+ depends on IP_SET
+ help
+ This option adds the hash:net,net set type support, by which
+ one can store IPv4/IPv6 network address/prefix pairs in a set.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config IP_SET_HASH_NETPORT
tristate "hash:net,port set support"
depends on IP_SET
diff --git a/net/netfilter/ipset/Makefile b/net/netfilter/ipset/Makefile
index 6e965ecd5444..44b2d38476fa 100644
--- a/net/netfilter/ipset/Makefile
+++ b/net/netfilter/ipset/Makefile
@@ -20,6 +20,8 @@ obj-$(CONFIG_IP_SET_HASH_IPPORTNET) += ip_set_hash_ipportnet.o
obj-$(CONFIG_IP_SET_HASH_NET) += ip_set_hash_net.o
obj-$(CONFIG_IP_SET_HASH_NETPORT) += ip_set_hash_netport.o
obj-$(CONFIG_IP_SET_HASH_NETIFACE) += ip_set_hash_netiface.o
+obj-$(CONFIG_IP_SET_HASH_NETNET) += ip_set_hash_netnet.o
+obj-$(CONFIG_IP_SET_HASH_NETPORTNET) += ip_set_hash_netportnet.o
# list types
obj-$(CONFIG_IP_SET_LIST_SET) += ip_set_list_set.o
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
index 25243379b887..a13e15be7911 100644
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
@@ -8,38 +8,32 @@
#ifndef __IP_SET_BITMAP_IP_GEN_H
#define __IP_SET_BITMAP_IP_GEN_H
-#define CONCAT(a, b) a##b
-#define TOKEN(a,b) CONCAT(a, b)
-
-#define mtype_do_test TOKEN(MTYPE, _do_test)
-#define mtype_gc_test TOKEN(MTYPE, _gc_test)
-#define mtype_is_filled TOKEN(MTYPE, _is_filled)
-#define mtype_do_add TOKEN(MTYPE, _do_add)
-#define mtype_do_del TOKEN(MTYPE, _do_del)
-#define mtype_do_list TOKEN(MTYPE, _do_list)
-#define mtype_do_head TOKEN(MTYPE, _do_head)
-#define mtype_adt_elem TOKEN(MTYPE, _adt_elem)
-#define mtype_add_timeout TOKEN(MTYPE, _add_timeout)
-#define mtype_gc_init TOKEN(MTYPE, _gc_init)
-#define mtype_kadt TOKEN(MTYPE, _kadt)
-#define mtype_uadt TOKEN(MTYPE, _uadt)
-#define mtype_destroy TOKEN(MTYPE, _destroy)
-#define mtype_flush TOKEN(MTYPE, _flush)
-#define mtype_head TOKEN(MTYPE, _head)
-#define mtype_same_set TOKEN(MTYPE, _same_set)
-#define mtype_elem TOKEN(MTYPE, _elem)
-#define mtype_test TOKEN(MTYPE, _test)
-#define mtype_add TOKEN(MTYPE, _add)
-#define mtype_del TOKEN(MTYPE, _del)
-#define mtype_list TOKEN(MTYPE, _list)
-#define mtype_gc TOKEN(MTYPE, _gc)
+#define mtype_do_test IPSET_TOKEN(MTYPE, _do_test)
+#define mtype_gc_test IPSET_TOKEN(MTYPE, _gc_test)
+#define mtype_is_filled IPSET_TOKEN(MTYPE, _is_filled)
+#define mtype_do_add IPSET_TOKEN(MTYPE, _do_add)
+#define mtype_ext_cleanup IPSET_TOKEN(MTYPE, _ext_cleanup)
+#define mtype_do_del IPSET_TOKEN(MTYPE, _do_del)
+#define mtype_do_list IPSET_TOKEN(MTYPE, _do_list)
+#define mtype_do_head IPSET_TOKEN(MTYPE, _do_head)
+#define mtype_adt_elem IPSET_TOKEN(MTYPE, _adt_elem)
+#define mtype_add_timeout IPSET_TOKEN(MTYPE, _add_timeout)
+#define mtype_gc_init IPSET_TOKEN(MTYPE, _gc_init)
+#define mtype_kadt IPSET_TOKEN(MTYPE, _kadt)
+#define mtype_uadt IPSET_TOKEN(MTYPE, _uadt)
+#define mtype_destroy IPSET_TOKEN(MTYPE, _destroy)
+#define mtype_flush IPSET_TOKEN(MTYPE, _flush)
+#define mtype_head IPSET_TOKEN(MTYPE, _head)
+#define mtype_same_set IPSET_TOKEN(MTYPE, _same_set)
+#define mtype_elem IPSET_TOKEN(MTYPE, _elem)
+#define mtype_test IPSET_TOKEN(MTYPE, _test)
+#define mtype_add IPSET_TOKEN(MTYPE, _add)
+#define mtype_del IPSET_TOKEN(MTYPE, _del)
+#define mtype_list IPSET_TOKEN(MTYPE, _list)
+#define mtype_gc IPSET_TOKEN(MTYPE, _gc)
#define mtype MTYPE
-#define ext_timeout(e, m) \
- (unsigned long *)((e) + (m)->offset[IPSET_OFFSET_TIMEOUT])
-#define ext_counter(e, m) \
- (struct ip_set_counter *)((e) + (m)->offset[IPSET_OFFSET_COUNTER])
-#define get_ext(map, id) ((map)->extensions + (map)->dsize * (id))
+#define get_ext(set, map, id) ((map)->extensions + (set)->dsize * (id))
static void
mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
@@ -49,11 +43,22 @@ mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
init_timer(&map->gc);
map->gc.data = (unsigned long) set;
map->gc.function = gc;
- map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
add_timer(&map->gc);
}
static void
+mtype_ext_cleanup(struct ip_set *set)
+{
+ struct mtype *map = set->data;
+ u32 id;
+
+ for (id = 0; id < map->elements; id++)
+ if (test_bit(id, map->members))
+ ip_set_ext_destroy(set, get_ext(set, map, id));
+}
+
+static void
mtype_destroy(struct ip_set *set)
{
struct mtype *map = set->data;
@@ -62,8 +67,11 @@ mtype_destroy(struct ip_set *set)
del_timer_sync(&map->gc);
ip_set_free(map->members);
- if (map->dsize)
+ if (set->dsize) {
+ if (set->extensions & IPSET_EXT_DESTROY)
+ mtype_ext_cleanup(set);
ip_set_free(map->extensions);
+ }
kfree(map);
set->data = NULL;
@@ -74,6 +82,8 @@ mtype_flush(struct ip_set *set)
{
struct mtype *map = set->data;
+ if (set->extensions & IPSET_EXT_DESTROY)
+ mtype_ext_cleanup(set);
memset(map->members, 0, map->memsize);
}
@@ -91,12 +101,9 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) +
map->memsize +
- map->dsize * map->elements)) ||
- (SET_WITH_TIMEOUT(set) &&
- nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))) ||
- (SET_WITH_COUNTER(set) &&
- nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS,
- htonl(IPSET_FLAG_WITH_COUNTERS))))
+ set->dsize * map->elements)))
+ goto nla_put_failure;
+ if (unlikely(ip_set_put_flags(skb, set)))
goto nla_put_failure;
ipset_nest_end(skb, nested);
@@ -111,16 +118,16 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
{
struct mtype *map = set->data;
const struct mtype_adt_elem *e = value;
- void *x = get_ext(map, e->id);
- int ret = mtype_do_test(e, map);
+ void *x = get_ext(set, map, e->id);
+ int ret = mtype_do_test(e, map, set->dsize);
if (ret <= 0)
return ret;
if (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(x, map)))
+ ip_set_timeout_expired(ext_timeout(x, set)))
return 0;
if (SET_WITH_COUNTER(set))
- ip_set_update_counter(ext_counter(x, map), ext, mext, flags);
+ ip_set_update_counter(ext_counter(x, set), ext, mext, flags);
return 1;
}
@@ -130,26 +137,30 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
{
struct mtype *map = set->data;
const struct mtype_adt_elem *e = value;
- void *x = get_ext(map, e->id);
- int ret = mtype_do_add(e, map, flags);
+ void *x = get_ext(set, map, e->id);
+ int ret = mtype_do_add(e, map, flags, set->dsize);
if (ret == IPSET_ADD_FAILED) {
if (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(x, map)))
+ ip_set_timeout_expired(ext_timeout(x, set)))
ret = 0;
else if (!(flags & IPSET_FLAG_EXIST))
return -IPSET_ERR_EXIST;
+ /* Element is re-added, cleanup extensions */
+ ip_set_ext_destroy(set, x);
}
if (SET_WITH_TIMEOUT(set))
#ifdef IP_SET_BITMAP_STORED_TIMEOUT
- mtype_add_timeout(ext_timeout(x, map), e, ext, map, ret);
+ mtype_add_timeout(ext_timeout(x, set), e, ext, set, map, ret);
#else
- ip_set_timeout_set(ext_timeout(x, map), ext->timeout);
+ ip_set_timeout_set(ext_timeout(x, set), ext->timeout);
#endif
if (SET_WITH_COUNTER(set))
- ip_set_init_counter(ext_counter(x, map), ext);
+ ip_set_init_counter(ext_counter(x, set), ext);
+ if (SET_WITH_COMMENT(set))
+ ip_set_init_comment(ext_comment(x, set), ext);
return 0;
}
@@ -159,16 +170,27 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
{
struct mtype *map = set->data;
const struct mtype_adt_elem *e = value;
- const void *x = get_ext(map, e->id);
+ void *x = get_ext(set, map, e->id);
- if (mtype_do_del(e, map) ||
- (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(x, map))))
+ if (mtype_do_del(e, map))
+ return -IPSET_ERR_EXIST;
+
+ ip_set_ext_destroy(set, x);
+ if (SET_WITH_TIMEOUT(set) &&
+ ip_set_timeout_expired(ext_timeout(x, set)))
return -IPSET_ERR_EXIST;
return 0;
}
+#ifndef IP_SET_BITMAP_STORED_TIMEOUT
+static inline bool
+mtype_is_filled(const struct mtype_elem *x)
+{
+ return true;
+}
+#endif
+
static int
mtype_list(const struct ip_set *set,
struct sk_buff *skb, struct netlink_callback *cb)
@@ -183,13 +205,13 @@ mtype_list(const struct ip_set *set,
return -EMSGSIZE;
for (; cb->args[2] < map->elements; cb->args[2]++) {
id = cb->args[2];
- x = get_ext(map, id);
+ x = get_ext(set, map, id);
if (!test_bit(id, map->members) ||
(SET_WITH_TIMEOUT(set) &&
#ifdef IP_SET_BITMAP_STORED_TIMEOUT
mtype_is_filled((const struct mtype_elem *) x) &&
#endif
- ip_set_timeout_expired(ext_timeout(x, map))))
+ ip_set_timeout_expired(ext_timeout(x, set))))
continue;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) {
@@ -199,23 +221,10 @@ mtype_list(const struct ip_set *set,
} else
goto nla_put_failure;
}
- if (mtype_do_list(skb, map, id))
+ if (mtype_do_list(skb, map, id, set->dsize))
goto nla_put_failure;
- if (SET_WITH_TIMEOUT(set)) {
-#ifdef IP_SET_BITMAP_STORED_TIMEOUT
- if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
- htonl(ip_set_timeout_stored(map, id,
- ext_timeout(x, map)))))
- goto nla_put_failure;
-#else
- if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
- htonl(ip_set_timeout_get(
- ext_timeout(x, map)))))
- goto nla_put_failure;
-#endif
- }
- if (SET_WITH_COUNTER(set) &&
- ip_set_put_counter(skb, ext_counter(x, map)))
+ if (ip_set_put_extensions(skb, set, x,
+ mtype_is_filled((const struct mtype_elem *) x)))
goto nla_put_failure;
ipset_nest_end(skb, nested);
}
@@ -228,11 +237,11 @@ mtype_list(const struct ip_set *set,
nla_put_failure:
nla_nest_cancel(skb, nested);
- ipset_nest_end(skb, adt);
if (unlikely(id == first)) {
cb->args[2] = 0;
return -EMSGSIZE;
}
+ ipset_nest_end(skb, adt);
return 0;
}
@@ -241,21 +250,23 @@ mtype_gc(unsigned long ul_set)
{
struct ip_set *set = (struct ip_set *) ul_set;
struct mtype *map = set->data;
- const void *x;
+ void *x;
u32 id;
/* We run parallel with other readers (test element)
* but adding/deleting new entries is locked out */
read_lock_bh(&set->lock);
for (id = 0; id < map->elements; id++)
- if (mtype_gc_test(id, map)) {
- x = get_ext(map, id);
- if (ip_set_timeout_expired(ext_timeout(x, map)))
+ if (mtype_gc_test(id, map, set->dsize)) {
+ x = get_ext(set, map, id);
+ if (ip_set_timeout_expired(ext_timeout(x, set))) {
clear_bit(id, map->members);
+ ip_set_ext_destroy(set, x);
+ }
}
read_unlock_bh(&set->lock);
- map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
add_timer(&map->gc);
}
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index f1a8128bef01..6f1f9f494808 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -25,12 +25,13 @@
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_bitmap.h>
-#define REVISION_MIN 0
-#define REVISION_MAX 1 /* Counter support added */
+#define IPSET_TYPE_REV_MIN 0
+/* 1 Counter support added */
+#define IPSET_TYPE_REV_MAX 2 /* Comment support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("bitmap:ip", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("bitmap:ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_bitmap:ip");
#define MTYPE bitmap_ip
@@ -44,10 +45,7 @@ struct bitmap_ip {
u32 elements; /* number of max elements in the set */
u32 hosts; /* number of hosts in a subnet */
size_t memsize; /* members size */
- size_t dsize; /* extensions struct size */
- size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
u8 netmask; /* subnet netmask */
- u32 timeout; /* timeout parameter */
struct timer_list gc; /* garbage collection */
};
@@ -65,20 +63,21 @@ ip_to_id(const struct bitmap_ip *m, u32 ip)
/* Common functions */
static inline int
-bitmap_ip_do_test(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map)
+bitmap_ip_do_test(const struct bitmap_ip_adt_elem *e,
+ struct bitmap_ip *map, size_t dsize)
{
return !!test_bit(e->id, map->members);
}
static inline int
-bitmap_ip_gc_test(u16 id, const struct bitmap_ip *map)
+bitmap_ip_gc_test(u16 id, const struct bitmap_ip *map, size_t dsize)
{
return !!test_bit(id, map->members);
}
static inline int
bitmap_ip_do_add(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map,
- u32 flags)
+ u32 flags, size_t dsize)
{
return !!test_and_set_bit(e->id, map->members);
}
@@ -90,7 +89,8 @@ bitmap_ip_do_del(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map)
}
static inline int
-bitmap_ip_do_list(struct sk_buff *skb, const struct bitmap_ip *map, u32 id)
+bitmap_ip_do_list(struct sk_buff *skb, const struct bitmap_ip *map, u32 id,
+ size_t dsize)
{
return nla_put_ipaddr4(skb, IPSET_ATTR_IP,
htonl(map->first_ip + id * map->hosts));
@@ -113,7 +113,7 @@ bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb,
struct bitmap_ip *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct bitmap_ip_adt_elem e = { };
- struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, map);
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
u32 ip;
ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
@@ -131,9 +131,9 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
{
struct bitmap_ip *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- u32 ip, ip_to;
+ u32 ip = 0, ip_to = 0;
struct bitmap_ip_adt_elem e = { };
- struct ip_set_ext ext = IP_SET_INIT_UEXT(map);
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
int ret = 0;
if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -200,7 +200,7 @@ bitmap_ip_same_set(const struct ip_set *a, const struct ip_set *b)
return x->first_ip == y->first_ip &&
x->last_ip == y->last_ip &&
x->netmask == y->netmask &&
- x->timeout == y->timeout &&
+ a->timeout == b->timeout &&
a->extensions == b->extensions;
}
@@ -209,25 +209,6 @@ bitmap_ip_same_set(const struct ip_set *a, const struct ip_set *b)
struct bitmap_ip_elem {
};
-/* Timeout variant */
-
-struct bitmap_ipt_elem {
- unsigned long timeout;
-};
-
-/* Plain variant with counter */
-
-struct bitmap_ipc_elem {
- struct ip_set_counter counter;
-};
-
-/* Timeout variant with counter */
-
-struct bitmap_ipct_elem {
- unsigned long timeout;
- struct ip_set_counter counter;
-};
-
#include "ip_set_bitmap_gen.h"
/* Create bitmap:ip type of sets */
@@ -240,8 +221,8 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
map->members = ip_set_alloc(map->memsize);
if (!map->members)
return false;
- if (map->dsize) {
- map->extensions = ip_set_alloc(map->dsize * elements);
+ if (set->dsize) {
+ map->extensions = ip_set_alloc(set->dsize * elements);
if (!map->extensions) {
kfree(map->members);
return false;
@@ -252,7 +233,7 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
map->elements = elements;
map->hosts = hosts;
map->netmask = netmask;
- map->timeout = IPSET_NO_TIMEOUT;
+ set->timeout = IPSET_NO_TIMEOUT;
set->data = map;
set->family = NFPROTO_IPV4;
@@ -261,10 +242,11 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
}
static int
-bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
+ u32 flags)
{
struct bitmap_ip *map;
- u32 first_ip, last_ip, hosts, cadt_flags = 0;
+ u32 first_ip = 0, last_ip = 0, hosts;
u64 elements;
u8 netmask = 32;
int ret;
@@ -336,61 +318,15 @@ bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
map->memsize = bitmap_bytes(0, elements - 1);
set->variant = &bitmap_ip;
- if (tb[IPSET_ATTR_CADT_FLAGS])
- cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
- if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
- set->extensions |= IPSET_EXT_COUNTER;
- if (tb[IPSET_ATTR_TIMEOUT]) {
- map->dsize = sizeof(struct bitmap_ipct_elem);
- map->offset[IPSET_OFFSET_TIMEOUT] =
- offsetof(struct bitmap_ipct_elem, timeout);
- map->offset[IPSET_OFFSET_COUNTER] =
- offsetof(struct bitmap_ipct_elem, counter);
-
- if (!init_map_ip(set, map, first_ip, last_ip,
- elements, hosts, netmask)) {
- kfree(map);
- return -ENOMEM;
- }
-
- map->timeout = ip_set_timeout_uget(
- tb[IPSET_ATTR_TIMEOUT]);
- set->extensions |= IPSET_EXT_TIMEOUT;
-
- bitmap_ip_gc_init(set, bitmap_ip_gc);
- } else {
- map->dsize = sizeof(struct bitmap_ipc_elem);
- map->offset[IPSET_OFFSET_COUNTER] =
- offsetof(struct bitmap_ipc_elem, counter);
-
- if (!init_map_ip(set, map, first_ip, last_ip,
- elements, hosts, netmask)) {
- kfree(map);
- return -ENOMEM;
- }
- }
- } else if (tb[IPSET_ATTR_TIMEOUT]) {
- map->dsize = sizeof(struct bitmap_ipt_elem);
- map->offset[IPSET_OFFSET_TIMEOUT] =
- offsetof(struct bitmap_ipt_elem, timeout);
-
- if (!init_map_ip(set, map, first_ip, last_ip,
- elements, hosts, netmask)) {
- kfree(map);
- return -ENOMEM;
- }
-
- map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
- set->extensions |= IPSET_EXT_TIMEOUT;
-
+ set->dsize = ip_set_elem_len(set, tb, 0);
+ if (!init_map_ip(set, map, first_ip, last_ip,
+ elements, hosts, netmask)) {
+ kfree(map);
+ return -ENOMEM;
+ }
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
bitmap_ip_gc_init(set, bitmap_ip_gc);
- } else {
- map->dsize = 0;
- if (!init_map_ip(set, map, first_ip, last_ip,
- elements, hosts, netmask)) {
- kfree(map);
- return -ENOMEM;
- }
}
return 0;
}
@@ -401,8 +337,8 @@ static struct ip_set_type bitmap_ip_type __read_mostly = {
.features = IPSET_TYPE_IP,
.dimension = IPSET_DIM_ONE,
.family = NFPROTO_IPV4,
- .revision_min = REVISION_MIN,
- .revision_max = REVISION_MAX,
+ .revision_min = IPSET_TYPE_REV_MIN,
+ .revision_max = IPSET_TYPE_REV_MAX,
.create = bitmap_ip_create,
.create_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
@@ -420,6 +356,7 @@ static struct ip_set_type bitmap_ip_type __read_mostly = {
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 3b30e0bef890..740eabededd9 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -25,12 +25,13 @@
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_bitmap.h>
-#define REVISION_MIN 0
-#define REVISION_MAX 1 /* Counter support added */
+#define IPSET_TYPE_REV_MIN 0
+/* 1 Counter support added */
+#define IPSET_TYPE_REV_MAX 2 /* Comment support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("bitmap:ip,mac", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("bitmap:ip,mac", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_bitmap:ip,mac");
#define MTYPE bitmap_ipmac
@@ -48,11 +49,8 @@ struct bitmap_ipmac {
u32 first_ip; /* host byte order, included in range */
u32 last_ip; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
- u32 timeout; /* timeout value */
- struct timer_list gc; /* garbage collector */
size_t memsize; /* members size */
- size_t dsize; /* size of element */
- size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
+ struct timer_list gc; /* garbage collector */
};
/* ADT structure for generic function args */
@@ -82,13 +80,13 @@ get_elem(void *extensions, u16 id, size_t dsize)
static inline int
bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e,
- const struct bitmap_ipmac *map)
+ const struct bitmap_ipmac *map, size_t dsize)
{
const struct bitmap_ipmac_elem *elem;
if (!test_bit(e->id, map->members))
return 0;
- elem = get_elem(map->extensions, e->id, map->dsize);
+ elem = get_elem(map->extensions, e->id, dsize);
if (elem->filled == MAC_FILLED)
return e->ether == NULL ||
ether_addr_equal(e->ether, elem->ether);
@@ -97,13 +95,13 @@ bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e,
}
static inline int
-bitmap_ipmac_gc_test(u16 id, const struct bitmap_ipmac *map)
+bitmap_ipmac_gc_test(u16 id, const struct bitmap_ipmac *map, size_t dsize)
{
const struct bitmap_ipmac_elem *elem;
if (!test_bit(id, map->members))
return 0;
- elem = get_elem(map->extensions, id, map->dsize);
+ elem = get_elem(map->extensions, id, dsize);
/* Timer not started for the incomplete elements */
return elem->filled == MAC_FILLED;
}
@@ -117,13 +115,13 @@ bitmap_ipmac_is_filled(const struct bitmap_ipmac_elem *elem)
static inline int
bitmap_ipmac_add_timeout(unsigned long *timeout,
const struct bitmap_ipmac_adt_elem *e,
- const struct ip_set_ext *ext,
+ const struct ip_set_ext *ext, struct ip_set *set,
struct bitmap_ipmac *map, int mode)
{
u32 t = ext->timeout;
if (mode == IPSET_ADD_START_STORED_TIMEOUT) {
- if (t == map->timeout)
+ if (t == set->timeout)
/* Timeout was not specified, get stored one */
t = *timeout;
ip_set_timeout_set(timeout, t);
@@ -142,11 +140,11 @@ bitmap_ipmac_add_timeout(unsigned long *timeout,
static inline int
bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
- struct bitmap_ipmac *map, u32 flags)
+ struct bitmap_ipmac *map, u32 flags, size_t dsize)
{
struct bitmap_ipmac_elem *elem;
- elem = get_elem(map->extensions, e->id, map->dsize);
+ elem = get_elem(map->extensions, e->id, dsize);
if (test_and_set_bit(e->id, map->members)) {
if (elem->filled == MAC_FILLED) {
if (e->ether && (flags & IPSET_FLAG_EXIST))
@@ -178,22 +176,12 @@ bitmap_ipmac_do_del(const struct bitmap_ipmac_adt_elem *e,
return !test_and_clear_bit(e->id, map->members);
}
-static inline unsigned long
-ip_set_timeout_stored(struct bitmap_ipmac *map, u32 id, unsigned long *timeout)
-{
- const struct bitmap_ipmac_elem *elem =
- get_elem(map->extensions, id, map->dsize);
-
- return elem->filled == MAC_FILLED ? ip_set_timeout_get(timeout) :
- *timeout;
-}
-
static inline int
bitmap_ipmac_do_list(struct sk_buff *skb, const struct bitmap_ipmac *map,
- u32 id)
+ u32 id, size_t dsize)
{
const struct bitmap_ipmac_elem *elem =
- get_elem(map->extensions, id, map->dsize);
+ get_elem(map->extensions, id, dsize);
return nla_put_ipaddr4(skb, IPSET_ATTR_IP,
htonl(map->first_ip + id)) ||
@@ -216,7 +204,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
struct bitmap_ipmac *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct bitmap_ipmac_adt_elem e = {};
- struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, map);
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
u32 ip;
/* MAC can be src only */
@@ -245,8 +233,8 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
const struct bitmap_ipmac *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct bitmap_ipmac_adt_elem e = {};
- struct ip_set_ext ext = IP_SET_INIT_UEXT(map);
- u32 ip;
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+ u32 ip = 0;
int ret = 0;
if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -285,43 +273,12 @@ bitmap_ipmac_same_set(const struct ip_set *a, const struct ip_set *b)
return x->first_ip == y->first_ip &&
x->last_ip == y->last_ip &&
- x->timeout == y->timeout &&
+ a->timeout == b->timeout &&
a->extensions == b->extensions;
}
/* Plain variant */
-/* Timeout variant */
-
-struct bitmap_ipmact_elem {
- struct {
- unsigned char ether[ETH_ALEN];
- unsigned char filled;
- } __attribute__ ((aligned));
- unsigned long timeout;
-};
-
-/* Plain variant with counter */
-
-struct bitmap_ipmacc_elem {
- struct {
- unsigned char ether[ETH_ALEN];
- unsigned char filled;
- } __attribute__ ((aligned));
- struct ip_set_counter counter;
-};
-
-/* Timeout variant with counter */
-
-struct bitmap_ipmacct_elem {
- struct {
- unsigned char ether[ETH_ALEN];
- unsigned char filled;
- } __attribute__ ((aligned));
- unsigned long timeout;
- struct ip_set_counter counter;
-};
-
#include "ip_set_bitmap_gen.h"
/* Create bitmap:ip,mac type of sets */
@@ -330,11 +287,11 @@ static bool
init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
u32 first_ip, u32 last_ip, u32 elements)
{
- map->members = ip_set_alloc((last_ip - first_ip + 1) * map->dsize);
+ map->members = ip_set_alloc(map->memsize);
if (!map->members)
return false;
- if (map->dsize) {
- map->extensions = ip_set_alloc(map->dsize * elements);
+ if (set->dsize) {
+ map->extensions = ip_set_alloc(set->dsize * elements);
if (!map->extensions) {
kfree(map->members);
return false;
@@ -343,7 +300,7 @@ init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
map->first_ip = first_ip;
map->last_ip = last_ip;
map->elements = elements;
- map->timeout = IPSET_NO_TIMEOUT;
+ set->timeout = IPSET_NO_TIMEOUT;
set->data = map;
set->family = NFPROTO_IPV4;
@@ -352,10 +309,10 @@ init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
}
static int
-bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
+bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
u32 flags)
{
- u32 first_ip, last_ip, cadt_flags = 0;
+ u32 first_ip = 0, last_ip = 0;
u64 elements;
struct bitmap_ipmac *map;
int ret;
@@ -399,57 +356,15 @@ bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
map->memsize = bitmap_bytes(0, elements - 1);
set->variant = &bitmap_ipmac;
- if (tb[IPSET_ATTR_CADT_FLAGS])
- cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
- if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
- set->extensions |= IPSET_EXT_COUNTER;
- if (tb[IPSET_ATTR_TIMEOUT]) {
- map->dsize = sizeof(struct bitmap_ipmacct_elem);
- map->offset[IPSET_OFFSET_TIMEOUT] =
- offsetof(struct bitmap_ipmacct_elem, timeout);
- map->offset[IPSET_OFFSET_COUNTER] =
- offsetof(struct bitmap_ipmacct_elem, counter);
-
- if (!init_map_ipmac(set, map, first_ip, last_ip,
- elements)) {
- kfree(map);
- return -ENOMEM;
- }
- map->timeout = ip_set_timeout_uget(
- tb[IPSET_ATTR_TIMEOUT]);
- set->extensions |= IPSET_EXT_TIMEOUT;
- bitmap_ipmac_gc_init(set, bitmap_ipmac_gc);
- } else {
- map->dsize = sizeof(struct bitmap_ipmacc_elem);
- map->offset[IPSET_OFFSET_COUNTER] =
- offsetof(struct bitmap_ipmacc_elem, counter);
-
- if (!init_map_ipmac(set, map, first_ip, last_ip,
- elements)) {
- kfree(map);
- return -ENOMEM;
- }
- }
- } else if (tb[IPSET_ATTR_TIMEOUT]) {
- map->dsize = sizeof(struct bitmap_ipmact_elem);
- map->offset[IPSET_OFFSET_TIMEOUT] =
- offsetof(struct bitmap_ipmact_elem, timeout);
-
- if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
- kfree(map);
- return -ENOMEM;
- }
- map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
- set->extensions |= IPSET_EXT_TIMEOUT;
+ set->dsize = ip_set_elem_len(set, tb,
+ sizeof(struct bitmap_ipmac_elem));
+ if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
+ kfree(map);
+ return -ENOMEM;
+ }
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
bitmap_ipmac_gc_init(set, bitmap_ipmac_gc);
- } else {
- map->dsize = sizeof(struct bitmap_ipmac_elem);
-
- if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
- kfree(map);
- return -ENOMEM;
- }
- set->variant = &bitmap_ipmac;
}
return 0;
}
@@ -460,8 +375,8 @@ static struct ip_set_type bitmap_ipmac_type = {
.features = IPSET_TYPE_IP | IPSET_TYPE_MAC,
.dimension = IPSET_DIM_TWO,
.family = NFPROTO_IPV4,
- .revision_min = REVISION_MIN,
- .revision_max = REVISION_MAX,
+ .revision_min = IPSET_TYPE_REV_MIN,
+ .revision_max = IPSET_TYPE_REV_MAX,
.create = bitmap_ipmac_create,
.create_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
@@ -478,6 +393,7 @@ static struct ip_set_type bitmap_ipmac_type = {
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 8207d1fda528..e7603c5b53d7 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -20,12 +20,13 @@
#include <linux/netfilter/ipset/ip_set_bitmap.h>
#include <linux/netfilter/ipset/ip_set_getport.h>
-#define REVISION_MIN 0
-#define REVISION_MAX 1 /* Counter support added */
+#define IPSET_TYPE_REV_MIN 0
+/* 1 Counter support added */
+#define IPSET_TYPE_REV_MAX 2 /* Comment support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("bitmap:port", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("bitmap:port", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_bitmap:port");
#define MTYPE bitmap_port
@@ -38,9 +39,6 @@ struct bitmap_port {
u16 last_port; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
size_t memsize; /* members size */
- size_t dsize; /* extensions struct size */
- size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
- u32 timeout; /* timeout parameter */
struct timer_list gc; /* garbage collection */
};
@@ -59,20 +57,20 @@ port_to_id(const struct bitmap_port *m, u16 port)
static inline int
bitmap_port_do_test(const struct bitmap_port_adt_elem *e,
- const struct bitmap_port *map)
+ const struct bitmap_port *map, size_t dsize)
{
return !!test_bit(e->id, map->members);
}
static inline int
-bitmap_port_gc_test(u16 id, const struct bitmap_port *map)
+bitmap_port_gc_test(u16 id, const struct bitmap_port *map, size_t dsize)
{
return !!test_bit(id, map->members);
}
static inline int
bitmap_port_do_add(const struct bitmap_port_adt_elem *e,
- struct bitmap_port *map, u32 flags)
+ struct bitmap_port *map, u32 flags, size_t dsize)
{
return !!test_and_set_bit(e->id, map->members);
}
@@ -85,7 +83,8 @@ bitmap_port_do_del(const struct bitmap_port_adt_elem *e,
}
static inline int
-bitmap_port_do_list(struct sk_buff *skb, const struct bitmap_port *map, u32 id)
+bitmap_port_do_list(struct sk_buff *skb, const struct bitmap_port *map, u32 id,
+ size_t dsize)
{
return nla_put_net16(skb, IPSET_ATTR_PORT,
htons(map->first_port + id));
@@ -106,7 +105,7 @@ bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb,
struct bitmap_port *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct bitmap_port_adt_elem e = {};
- struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, map);
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
__be16 __port;
u16 port = 0;
@@ -131,7 +130,7 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
struct bitmap_port *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct bitmap_port_adt_elem e = {};
- struct ip_set_ext ext = IP_SET_INIT_UEXT(map);
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 port; /* wraparound */
u16 port_to;
int ret = 0;
@@ -191,7 +190,7 @@ bitmap_port_same_set(const struct ip_set *a, const struct ip_set *b)
return x->first_port == y->first_port &&
x->last_port == y->last_port &&
- x->timeout == y->timeout &&
+ a->timeout == b->timeout &&
a->extensions == b->extensions;
}
@@ -200,25 +199,6 @@ bitmap_port_same_set(const struct ip_set *a, const struct ip_set *b)
struct bitmap_port_elem {
};
-/* Timeout variant */
-
-struct bitmap_portt_elem {
- unsigned long timeout;
-};
-
-/* Plain variant with counter */
-
-struct bitmap_portc_elem {
- struct ip_set_counter counter;
-};
-
-/* Timeout variant with counter */
-
-struct bitmap_portct_elem {
- unsigned long timeout;
- struct ip_set_counter counter;
-};
-
#include "ip_set_bitmap_gen.h"
/* Create bitmap:ip type of sets */
@@ -230,8 +210,8 @@ init_map_port(struct ip_set *set, struct bitmap_port *map,
map->members = ip_set_alloc(map->memsize);
if (!map->members)
return false;
- if (map->dsize) {
- map->extensions = ip_set_alloc(map->dsize * map->elements);
+ if (set->dsize) {
+ map->extensions = ip_set_alloc(set->dsize * map->elements);
if (!map->extensions) {
kfree(map->members);
return false;
@@ -239,7 +219,7 @@ init_map_port(struct ip_set *set, struct bitmap_port *map,
}
map->first_port = first_port;
map->last_port = last_port;
- map->timeout = IPSET_NO_TIMEOUT;
+ set->timeout = IPSET_NO_TIMEOUT;
set->data = map;
set->family = NFPROTO_UNSPEC;
@@ -248,11 +228,11 @@ init_map_port(struct ip_set *set, struct bitmap_port *map,
}
static int
-bitmap_port_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
+ u32 flags)
{
struct bitmap_port *map;
u16 first_port, last_port;
- u32 cadt_flags = 0;
if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) ||
@@ -276,53 +256,14 @@ bitmap_port_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
map->elements = last_port - first_port + 1;
map->memsize = map->elements * sizeof(unsigned long);
set->variant = &bitmap_port;
- if (tb[IPSET_ATTR_CADT_FLAGS])
- cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
- if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
- set->extensions |= IPSET_EXT_COUNTER;
- if (tb[IPSET_ATTR_TIMEOUT]) {
- map->dsize = sizeof(struct bitmap_portct_elem);
- map->offset[IPSET_OFFSET_TIMEOUT] =
- offsetof(struct bitmap_portct_elem, timeout);
- map->offset[IPSET_OFFSET_COUNTER] =
- offsetof(struct bitmap_portct_elem, counter);
- if (!init_map_port(set, map, first_port, last_port)) {
- kfree(map);
- return -ENOMEM;
- }
-
- map->timeout =
- ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
- set->extensions |= IPSET_EXT_TIMEOUT;
- bitmap_port_gc_init(set, bitmap_port_gc);
- } else {
- map->dsize = sizeof(struct bitmap_portc_elem);
- map->offset[IPSET_OFFSET_COUNTER] =
- offsetof(struct bitmap_portc_elem, counter);
- if (!init_map_port(set, map, first_port, last_port)) {
- kfree(map);
- return -ENOMEM;
- }
- }
- } else if (tb[IPSET_ATTR_TIMEOUT]) {
- map->dsize = sizeof(struct bitmap_portt_elem);
- map->offset[IPSET_OFFSET_TIMEOUT] =
- offsetof(struct bitmap_portt_elem, timeout);
- if (!init_map_port(set, map, first_port, last_port)) {
- kfree(map);
- return -ENOMEM;
- }
-
- map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
- set->extensions |= IPSET_EXT_TIMEOUT;
+ set->dsize = ip_set_elem_len(set, tb, 0);
+ if (!init_map_port(set, map, first_port, last_port)) {
+ kfree(map);
+ return -ENOMEM;
+ }
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
bitmap_port_gc_init(set, bitmap_port_gc);
- } else {
- map->dsize = 0;
- if (!init_map_port(set, map, first_port, last_port)) {
- kfree(map);
- return -ENOMEM;
- }
-
}
return 0;
}
@@ -333,8 +274,8 @@ static struct ip_set_type bitmap_port_type = {
.features = IPSET_TYPE_PORT,
.dimension = IPSET_DIM_ONE,
.family = NFPROTO_UNSPEC,
- .revision_min = REVISION_MIN,
- .revision_max = REVISION_MAX,
+ .revision_min = IPSET_TYPE_REV_MIN,
+ .revision_max = IPSET_TYPE_REV_MAX,
.create = bitmap_port_create,
.create_policy = {
[IPSET_ATTR_PORT] = { .type = NLA_U16 },
@@ -349,6 +290,7 @@ static struct ip_set_type bitmap_port_type = {
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index f2e30fb31e78..dc9284bdd2dd 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -17,6 +17,8 @@
#include <linux/spinlock.h>
#include <linux/rculist.h>
#include <net/netlink.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include <linux/netfilter.h>
#include <linux/netfilter/x_tables.h>
@@ -27,8 +29,17 @@ static LIST_HEAD(ip_set_type_list); /* all registered set types */
static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */
static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */
-static struct ip_set * __rcu *ip_set_list; /* all individual sets */
-static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */
+struct ip_set_net {
+ struct ip_set * __rcu *ip_set_list; /* all individual sets */
+ ip_set_id_t ip_set_max; /* max number of sets */
+ int is_deleted; /* deleted by ip_set_net_exit */
+};
+static int ip_set_net_id __read_mostly;
+
+static inline struct ip_set_net *ip_set_pernet(struct net *net)
+{
+ return net_generic(net, ip_set_net_id);
+}
#define IP_SET_INC 64
#define STREQ(a, b) (strncmp(a, b, IPSET_MAXNAMELEN) == 0)
@@ -45,8 +56,8 @@ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
/* When the nfnl mutex is held: */
#define nfnl_dereference(p) \
rcu_dereference_protected(p, 1)
-#define nfnl_set(id) \
- nfnl_dereference(ip_set_list)[id]
+#define nfnl_set(inst, id) \
+ nfnl_dereference((inst)->ip_set_list)[id]
/*
* The set types are implemented in modules and registered set types
@@ -315,6 +326,60 @@ ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
}
EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
+typedef void (*destroyer)(void *);
+/* ipset data extension types, in size order */
+
+const struct ip_set_ext_type ip_set_extensions[] = {
+ [IPSET_EXT_ID_COUNTER] = {
+ .type = IPSET_EXT_COUNTER,
+ .flag = IPSET_FLAG_WITH_COUNTERS,
+ .len = sizeof(struct ip_set_counter),
+ .align = __alignof__(struct ip_set_counter),
+ },
+ [IPSET_EXT_ID_TIMEOUT] = {
+ .type = IPSET_EXT_TIMEOUT,
+ .len = sizeof(unsigned long),
+ .align = __alignof__(unsigned long),
+ },
+ [IPSET_EXT_ID_COMMENT] = {
+ .type = IPSET_EXT_COMMENT | IPSET_EXT_DESTROY,
+ .flag = IPSET_FLAG_WITH_COMMENT,
+ .len = sizeof(struct ip_set_comment),
+ .align = __alignof__(struct ip_set_comment),
+ .destroy = (destroyer) ip_set_comment_free,
+ },
+};
+EXPORT_SYMBOL_GPL(ip_set_extensions);
+
+static inline bool
+add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[])
+{
+ return ip_set_extensions[id].flag ?
+ (flags & ip_set_extensions[id].flag) :
+ !!tb[IPSET_ATTR_TIMEOUT];
+}
+
+size_t
+ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len)
+{
+ enum ip_set_ext_id id;
+ size_t offset = 0;
+ u32 cadt_flags = 0;
+
+ if (tb[IPSET_ATTR_CADT_FLAGS])
+ cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+ for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
+ if (!add_extension(id, cadt_flags, tb))
+ continue;
+ offset += ALIGN(len + offset, ip_set_extensions[id].align);
+ set->offset[id] = offset;
+ set->extensions |= ip_set_extensions[id].type;
+ offset += ip_set_extensions[id].len;
+ }
+ return len + offset;
+}
+EXPORT_SYMBOL_GPL(ip_set_elem_len);
+
int
ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
struct ip_set_ext *ext)
@@ -334,6 +399,12 @@ ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
ext->packets = be64_to_cpu(nla_get_be64(
tb[IPSET_ATTR_PACKETS]));
}
+ if (tb[IPSET_ATTR_COMMENT]) {
+ if (!(set->extensions & IPSET_EXT_COMMENT))
+ return -IPSET_ERR_COMMENT;
+ ext->comment = ip_set_comment_uget(tb[IPSET_ATTR_COMMENT]);
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(ip_set_get_extensions);
@@ -374,13 +445,14 @@ __ip_set_put(struct ip_set *set)
*/
static inline struct ip_set *
-ip_set_rcu_get(ip_set_id_t index)
+ip_set_rcu_get(struct net *net, ip_set_id_t index)
{
struct ip_set *set;
+ struct ip_set_net *inst = ip_set_pernet(net);
rcu_read_lock();
/* ip_set_list itself needs to be protected */
- set = rcu_dereference(ip_set_list)[index];
+ set = rcu_dereference(inst->ip_set_list)[index];
rcu_read_unlock();
return set;
@@ -390,7 +462,8 @@ int
ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
const struct xt_action_param *par, struct ip_set_adt_opt *opt)
{
- struct ip_set *set = ip_set_rcu_get(index);
+ struct ip_set *set = ip_set_rcu_get(
+ dev_net(par->in ? par->in : par->out), index);
int ret = 0;
BUG_ON(set == NULL);
@@ -428,7 +501,8 @@ int
ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
const struct xt_action_param *par, struct ip_set_adt_opt *opt)
{
- struct ip_set *set = ip_set_rcu_get(index);
+ struct ip_set *set = ip_set_rcu_get(
+ dev_net(par->in ? par->in : par->out), index);
int ret;
BUG_ON(set == NULL);
@@ -450,7 +524,8 @@ int
ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
const struct xt_action_param *par, struct ip_set_adt_opt *opt)
{
- struct ip_set *set = ip_set_rcu_get(index);
+ struct ip_set *set = ip_set_rcu_get(
+ dev_net(par->in ? par->in : par->out), index);
int ret = 0;
BUG_ON(set == NULL);
@@ -474,14 +549,15 @@ EXPORT_SYMBOL_GPL(ip_set_del);
*
*/
ip_set_id_t
-ip_set_get_byname(const char *name, struct ip_set **set)
+ip_set_get_byname(struct net *net, const char *name, struct ip_set **set)
{
ip_set_id_t i, index = IPSET_INVALID_ID;
struct ip_set *s;
+ struct ip_set_net *inst = ip_set_pernet(net);
rcu_read_lock();
- for (i = 0; i < ip_set_max; i++) {
- s = rcu_dereference(ip_set_list)[i];
+ for (i = 0; i < inst->ip_set_max; i++) {
+ s = rcu_dereference(inst->ip_set_list)[i];
if (s != NULL && STREQ(s->name, name)) {
__ip_set_get(s);
index = i;
@@ -501,17 +577,26 @@ EXPORT_SYMBOL_GPL(ip_set_get_byname);
* to be valid, after calling this function.
*
*/
-void
-ip_set_put_byindex(ip_set_id_t index)
+
+static inline void
+__ip_set_put_byindex(struct ip_set_net *inst, ip_set_id_t index)
{
struct ip_set *set;
rcu_read_lock();
- set = rcu_dereference(ip_set_list)[index];
+ set = rcu_dereference(inst->ip_set_list)[index];
if (set != NULL)
__ip_set_put(set);
rcu_read_unlock();
}
+
+void
+ip_set_put_byindex(struct net *net, ip_set_id_t index)
+{
+ struct ip_set_net *inst = ip_set_pernet(net);
+
+ __ip_set_put_byindex(inst, index);
+}
EXPORT_SYMBOL_GPL(ip_set_put_byindex);
/*
@@ -522,9 +607,9 @@ EXPORT_SYMBOL_GPL(ip_set_put_byindex);
*
*/
const char *
-ip_set_name_byindex(ip_set_id_t index)
+ip_set_name_byindex(struct net *net, ip_set_id_t index)
{
- const struct ip_set *set = ip_set_rcu_get(index);
+ const struct ip_set *set = ip_set_rcu_get(net, index);
BUG_ON(set == NULL);
BUG_ON(set->ref == 0);
@@ -546,14 +631,15 @@ EXPORT_SYMBOL_GPL(ip_set_name_byindex);
* The nfnl mutex is used in the function.
*/
ip_set_id_t
-ip_set_nfnl_get(const char *name)
+ip_set_nfnl_get(struct net *net, const char *name)
{
ip_set_id_t i, index = IPSET_INVALID_ID;
struct ip_set *s;
+ struct ip_set_net *inst = ip_set_pernet(net);
nfnl_lock(NFNL_SUBSYS_IPSET);
- for (i = 0; i < ip_set_max; i++) {
- s = nfnl_set(i);
+ for (i = 0; i < inst->ip_set_max; i++) {
+ s = nfnl_set(inst, i);
if (s != NULL && STREQ(s->name, name)) {
__ip_set_get(s);
index = i;
@@ -573,15 +659,16 @@ EXPORT_SYMBOL_GPL(ip_set_nfnl_get);
* The nfnl mutex is used in the function.
*/
ip_set_id_t
-ip_set_nfnl_get_byindex(ip_set_id_t index)
+ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index)
{
struct ip_set *set;
+ struct ip_set_net *inst = ip_set_pernet(net);
- if (index > ip_set_max)
+ if (index > inst->ip_set_max)
return IPSET_INVALID_ID;
nfnl_lock(NFNL_SUBSYS_IPSET);
- set = nfnl_set(index);
+ set = nfnl_set(inst, index);
if (set)
__ip_set_get(set);
else
@@ -600,13 +687,17 @@ EXPORT_SYMBOL_GPL(ip_set_nfnl_get_byindex);
* The nfnl mutex is used in the function.
*/
void
-ip_set_nfnl_put(ip_set_id_t index)
+ip_set_nfnl_put(struct net *net, ip_set_id_t index)
{
struct ip_set *set;
+ struct ip_set_net *inst = ip_set_pernet(net);
+
nfnl_lock(NFNL_SUBSYS_IPSET);
- set = nfnl_set(index);
- if (set != NULL)
- __ip_set_put(set);
+ if (!inst->is_deleted) { /* already deleted from ip_set_net_exit() */
+ set = nfnl_set(inst, index);
+ if (set != NULL)
+ __ip_set_put(set);
+ }
nfnl_unlock(NFNL_SUBSYS_IPSET);
}
EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
@@ -664,14 +755,14 @@ static const struct nla_policy ip_set_create_policy[IPSET_ATTR_CMD_MAX + 1] = {
};
static struct ip_set *
-find_set_and_id(const char *name, ip_set_id_t *id)
+find_set_and_id(struct ip_set_net *inst, const char *name, ip_set_id_t *id)
{
struct ip_set *set = NULL;
ip_set_id_t i;
*id = IPSET_INVALID_ID;
- for (i = 0; i < ip_set_max; i++) {
- set = nfnl_set(i);
+ for (i = 0; i < inst->ip_set_max; i++) {
+ set = nfnl_set(inst, i);
if (set != NULL && STREQ(set->name, name)) {
*id = i;
break;
@@ -681,22 +772,23 @@ find_set_and_id(const char *name, ip_set_id_t *id)
}
static inline struct ip_set *
-find_set(const char *name)
+find_set(struct ip_set_net *inst, const char *name)
{
ip_set_id_t id;
- return find_set_and_id(name, &id);
+ return find_set_and_id(inst, name, &id);
}
static int
-find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set)
+find_free_id(struct ip_set_net *inst, const char *name, ip_set_id_t *index,
+ struct ip_set **set)
{
struct ip_set *s;
ip_set_id_t i;
*index = IPSET_INVALID_ID;
- for (i = 0; i < ip_set_max; i++) {
- s = nfnl_set(i);
+ for (i = 0; i < inst->ip_set_max; i++) {
+ s = nfnl_set(inst, i);
if (s == NULL) {
if (*index == IPSET_INVALID_ID)
*index = i;
@@ -725,6 +817,8 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const attr[])
{
+ struct net *net = sock_net(ctnl);
+ struct ip_set_net *inst = ip_set_pernet(net);
struct ip_set *set, *clash = NULL;
ip_set_id_t index = IPSET_INVALID_ID;
struct nlattr *tb[IPSET_ATTR_CREATE_MAX+1] = {};
@@ -783,7 +877,7 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
goto put_out;
}
- ret = set->type->create(set, tb, flags);
+ ret = set->type->create(net, set, tb, flags);
if (ret != 0)
goto put_out;
@@ -794,7 +888,7 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
* by the nfnl mutex. Find the first free index in ip_set_list
* and check clashing.
*/
- ret = find_free_id(set->name, &index, &clash);
+ ret = find_free_id(inst, set->name, &index, &clash);
if (ret == -EEXIST) {
/* If this is the same set and requested, ignore error */
if ((flags & IPSET_FLAG_EXIST) &&
@@ -807,9 +901,9 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
goto cleanup;
} else if (ret == -IPSET_ERR_MAX_SETS) {
struct ip_set **list, **tmp;
- ip_set_id_t i = ip_set_max + IP_SET_INC;
+ ip_set_id_t i = inst->ip_set_max + IP_SET_INC;
- if (i < ip_set_max || i == IPSET_INVALID_ID)
+ if (i < inst->ip_set_max || i == IPSET_INVALID_ID)
/* Wraparound */
goto cleanup;
@@ -817,14 +911,14 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
if (!list)
goto cleanup;
/* nfnl mutex is held, both lists are valid */
- tmp = nfnl_dereference(ip_set_list);
- memcpy(list, tmp, sizeof(struct ip_set *) * ip_set_max);
- rcu_assign_pointer(ip_set_list, list);
+ tmp = nfnl_dereference(inst->ip_set_list);
+ memcpy(list, tmp, sizeof(struct ip_set *) * inst->ip_set_max);
+ rcu_assign_pointer(inst->ip_set_list, list);
/* Make sure all current packets have passed through */
synchronize_net();
/* Use new list */
- index = ip_set_max;
- ip_set_max = i;
+ index = inst->ip_set_max;
+ inst->ip_set_max = i;
kfree(tmp);
ret = 0;
} else if (ret)
@@ -834,7 +928,7 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
* Finally! Add our shiny new set to the list, and be done.
*/
pr_debug("create: '%s' created with index %u!\n", set->name, index);
- nfnl_set(index) = set;
+ nfnl_set(inst, index) = set;
return ret;
@@ -857,12 +951,12 @@ ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
};
static void
-ip_set_destroy_set(ip_set_id_t index)
+ip_set_destroy_set(struct ip_set_net *inst, ip_set_id_t index)
{
- struct ip_set *set = nfnl_set(index);
+ struct ip_set *set = nfnl_set(inst, index);
pr_debug("set: %s\n", set->name);
- nfnl_set(index) = NULL;
+ nfnl_set(inst, index) = NULL;
/* Must call it without holding any lock */
set->variant->destroy(set);
@@ -875,6 +969,7 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const attr[])
{
+ struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
struct ip_set *s;
ip_set_id_t i;
int ret = 0;
@@ -894,21 +989,22 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
*/
read_lock_bh(&ip_set_ref_lock);
if (!attr[IPSET_ATTR_SETNAME]) {
- for (i = 0; i < ip_set_max; i++) {
- s = nfnl_set(i);
+ for (i = 0; i < inst->ip_set_max; i++) {
+ s = nfnl_set(inst, i);
if (s != NULL && s->ref) {
ret = -IPSET_ERR_BUSY;
goto out;
}
}
read_unlock_bh(&ip_set_ref_lock);
- for (i = 0; i < ip_set_max; i++) {
- s = nfnl_set(i);
+ for (i = 0; i < inst->ip_set_max; i++) {
+ s = nfnl_set(inst, i);
if (s != NULL)
- ip_set_destroy_set(i);
+ ip_set_destroy_set(inst, i);
}
} else {
- s = find_set_and_id(nla_data(attr[IPSET_ATTR_SETNAME]), &i);
+ s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
+ &i);
if (s == NULL) {
ret = -ENOENT;
goto out;
@@ -918,7 +1014,7 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
}
read_unlock_bh(&ip_set_ref_lock);
- ip_set_destroy_set(i);
+ ip_set_destroy_set(inst, i);
}
return 0;
out:
@@ -943,6 +1039,7 @@ ip_set_flush(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const attr[])
{
+ struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
struct ip_set *s;
ip_set_id_t i;
@@ -950,13 +1047,13 @@ ip_set_flush(struct sock *ctnl, struct sk_buff *skb,
return -IPSET_ERR_PROTOCOL;
if (!attr[IPSET_ATTR_SETNAME]) {
- for (i = 0; i < ip_set_max; i++) {
- s = nfnl_set(i);
+ for (i = 0; i < inst->ip_set_max; i++) {
+ s = nfnl_set(inst, i);
if (s != NULL)
ip_set_flush_set(s);
}
} else {
- s = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+ s = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
if (s == NULL)
return -ENOENT;
@@ -982,6 +1079,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const attr[])
{
+ struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
struct ip_set *set, *s;
const char *name2;
ip_set_id_t i;
@@ -992,7 +1090,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
attr[IPSET_ATTR_SETNAME2] == NULL))
return -IPSET_ERR_PROTOCOL;
- set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+ set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
if (set == NULL)
return -ENOENT;
@@ -1003,8 +1101,8 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
}
name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
- for (i = 0; i < ip_set_max; i++) {
- s = nfnl_set(i);
+ for (i = 0; i < inst->ip_set_max; i++) {
+ s = nfnl_set(inst, i);
if (s != NULL && STREQ(s->name, name2)) {
ret = -IPSET_ERR_EXIST_SETNAME2;
goto out;
@@ -1031,6 +1129,7 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const attr[])
{
+ struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
struct ip_set *from, *to;
ip_set_id_t from_id, to_id;
char from_name[IPSET_MAXNAMELEN];
@@ -1040,11 +1139,13 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
attr[IPSET_ATTR_SETNAME2] == NULL))
return -IPSET_ERR_PROTOCOL;
- from = find_set_and_id(nla_data(attr[IPSET_ATTR_SETNAME]), &from_id);
+ from = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
+ &from_id);
if (from == NULL)
return -ENOENT;
- to = find_set_and_id(nla_data(attr[IPSET_ATTR_SETNAME2]), &to_id);
+ to = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME2]),
+ &to_id);
if (to == NULL)
return -IPSET_ERR_EXIST_SETNAME2;
@@ -1061,8 +1162,8 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
write_lock_bh(&ip_set_ref_lock);
swap(from->ref, to->ref);
- nfnl_set(from_id) = to;
- nfnl_set(to_id) = from;
+ nfnl_set(inst, from_id) = to;
+ nfnl_set(inst, to_id) = from;
write_unlock_bh(&ip_set_ref_lock);
return 0;
@@ -1081,9 +1182,10 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
static int
ip_set_dump_done(struct netlink_callback *cb)
{
+ struct ip_set_net *inst = (struct ip_set_net *)cb->data;
if (cb->args[2]) {
- pr_debug("release set %s\n", nfnl_set(cb->args[1])->name);
- ip_set_put_byindex((ip_set_id_t) cb->args[1]);
+ pr_debug("release set %s\n", nfnl_set(inst, cb->args[1])->name);
+ __ip_set_put_byindex(inst, (ip_set_id_t) cb->args[1]);
}
return 0;
}
@@ -1109,6 +1211,7 @@ dump_init(struct netlink_callback *cb)
struct nlattr *attr = (void *)nlh + min_len;
u32 dump_type;
ip_set_id_t index;
+ struct ip_set_net *inst = (struct ip_set_net *)cb->data;
/* Second pass, so parser can't fail */
nla_parse(cda, IPSET_ATTR_CMD_MAX,
@@ -1122,7 +1225,7 @@ dump_init(struct netlink_callback *cb)
if (cda[IPSET_ATTR_SETNAME]) {
struct ip_set *set;
- set = find_set_and_id(nla_data(cda[IPSET_ATTR_SETNAME]),
+ set = find_set_and_id(inst, nla_data(cda[IPSET_ATTR_SETNAME]),
&index);
if (set == NULL)
return -ENOENT;
@@ -1150,6 +1253,7 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
unsigned int flags = NETLINK_CB(cb->skb).portid ? NLM_F_MULTI : 0;
u32 dump_type, dump_flags;
int ret = 0;
+ struct ip_set_net *inst = (struct ip_set_net *)cb->data;
if (!cb->args[0]) {
ret = dump_init(cb);
@@ -1163,18 +1267,18 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
}
}
- if (cb->args[1] >= ip_set_max)
+ if (cb->args[1] >= inst->ip_set_max)
goto out;
dump_type = DUMP_TYPE(cb->args[0]);
dump_flags = DUMP_FLAGS(cb->args[0]);
- max = dump_type == DUMP_ONE ? cb->args[1] + 1 : ip_set_max;
+ max = dump_type == DUMP_ONE ? cb->args[1] + 1 : inst->ip_set_max;
dump_last:
pr_debug("args[0]: %u %u args[1]: %ld\n",
dump_type, dump_flags, cb->args[1]);
for (; cb->args[1] < max; cb->args[1]++) {
index = (ip_set_id_t) cb->args[1];
- set = nfnl_set(index);
+ set = nfnl_set(inst, index);
if (set == NULL) {
if (dump_type == DUMP_ONE) {
ret = -ENOENT;
@@ -1252,8 +1356,8 @@ next_set:
release_refcount:
/* If there was an error or set is done, release set */
if (ret || !cb->args[2]) {
- pr_debug("release set %s\n", nfnl_set(index)->name);
- ip_set_put_byindex(index);
+ pr_debug("release set %s\n", nfnl_set(inst, index)->name);
+ __ip_set_put_byindex(inst, index);
cb->args[2] = 0;
}
out:
@@ -1271,6 +1375,8 @@ ip_set_dump(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const attr[])
{
+ struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
+
if (unlikely(protocol_failed(attr)))
return -IPSET_ERR_PROTOCOL;
@@ -1278,6 +1384,7 @@ ip_set_dump(struct sock *ctnl, struct sk_buff *skb,
struct netlink_dump_control c = {
.dump = ip_set_dump_start,
.done = ip_set_dump_done,
+ .data = (void *)inst
};
return netlink_dump_start(ctnl, skb, nlh, &c);
}
@@ -1356,6 +1463,7 @@ ip_set_uadd(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const attr[])
{
+ struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
struct ip_set *set;
struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
const struct nlattr *nla;
@@ -1374,7 +1482,7 @@ ip_set_uadd(struct sock *ctnl, struct sk_buff *skb,
attr[IPSET_ATTR_LINENO] == NULL))))
return -IPSET_ERR_PROTOCOL;
- set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+ set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
if (set == NULL)
return -ENOENT;
@@ -1410,6 +1518,7 @@ ip_set_udel(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const attr[])
{
+ struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
struct ip_set *set;
struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
const struct nlattr *nla;
@@ -1428,7 +1537,7 @@ ip_set_udel(struct sock *ctnl, struct sk_buff *skb,
attr[IPSET_ATTR_LINENO] == NULL))))
return -IPSET_ERR_PROTOCOL;
- set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+ set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
if (set == NULL)
return -ENOENT;
@@ -1464,6 +1573,7 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const attr[])
{
+ struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
struct ip_set *set;
struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
int ret = 0;
@@ -1474,7 +1584,7 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
!flag_nested(attr[IPSET_ATTR_DATA])))
return -IPSET_ERR_PROTOCOL;
- set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+ set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
if (set == NULL)
return -ENOENT;
@@ -1499,6 +1609,7 @@ ip_set_header(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const attr[])
{
+ struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
const struct ip_set *set;
struct sk_buff *skb2;
struct nlmsghdr *nlh2;
@@ -1508,7 +1619,7 @@ ip_set_header(struct sock *ctnl, struct sk_buff *skb,
attr[IPSET_ATTR_SETNAME] == NULL))
return -IPSET_ERR_PROTOCOL;
- set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+ set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
if (set == NULL)
return -ENOENT;
@@ -1733,8 +1844,10 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
unsigned int *op;
void *data;
int copylen = *len, ret = 0;
+ struct net *net = sock_net(sk);
+ struct ip_set_net *inst = ip_set_pernet(net);
- if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (optval != SO_IP_SET)
return -EBADF;
@@ -1783,22 +1896,39 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
}
req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
nfnl_lock(NFNL_SUBSYS_IPSET);
- find_set_and_id(req_get->set.name, &id);
+ find_set_and_id(inst, req_get->set.name, &id);
req_get->set.index = id;
nfnl_unlock(NFNL_SUBSYS_IPSET);
goto copy;
}
+ case IP_SET_OP_GET_FNAME: {
+ struct ip_set_req_get_set_family *req_get = data;
+ ip_set_id_t id;
+
+ if (*len != sizeof(struct ip_set_req_get_set_family)) {
+ ret = -EINVAL;
+ goto done;
+ }
+ req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
+ nfnl_lock(NFNL_SUBSYS_IPSET);
+ find_set_and_id(inst, req_get->set.name, &id);
+ req_get->set.index = id;
+ if (id != IPSET_INVALID_ID)
+ req_get->family = nfnl_set(inst, id)->family;
+ nfnl_unlock(NFNL_SUBSYS_IPSET);
+ goto copy;
+ }
case IP_SET_OP_GET_BYINDEX: {
struct ip_set_req_get_set *req_get = data;
struct ip_set *set;
if (*len != sizeof(struct ip_set_req_get_set) ||
- req_get->set.index >= ip_set_max) {
+ req_get->set.index >= inst->ip_set_max) {
ret = -EINVAL;
goto done;
}
nfnl_lock(NFNL_SUBSYS_IPSET);
- set = nfnl_set(req_get->set.index);
+ set = nfnl_set(inst, req_get->set.index);
strncpy(req_get->set.name, set ? set->name : "",
IPSET_MAXNAMELEN);
nfnl_unlock(NFNL_SUBSYS_IPSET);
@@ -1827,49 +1957,82 @@ static struct nf_sockopt_ops so_set __read_mostly = {
.owner = THIS_MODULE,
};
-static int __init
-ip_set_init(void)
+static int __net_init
+ip_set_net_init(struct net *net)
{
+ struct ip_set_net *inst = ip_set_pernet(net);
+
struct ip_set **list;
- int ret;
- if (max_sets)
- ip_set_max = max_sets;
- if (ip_set_max >= IPSET_INVALID_ID)
- ip_set_max = IPSET_INVALID_ID - 1;
+ inst->ip_set_max = max_sets ? max_sets : CONFIG_IP_SET_MAX;
+ if (inst->ip_set_max >= IPSET_INVALID_ID)
+ inst->ip_set_max = IPSET_INVALID_ID - 1;
- list = kzalloc(sizeof(struct ip_set *) * ip_set_max, GFP_KERNEL);
+ list = kzalloc(sizeof(struct ip_set *) * inst->ip_set_max, GFP_KERNEL);
if (!list)
return -ENOMEM;
+ inst->is_deleted = 0;
+ rcu_assign_pointer(inst->ip_set_list, list);
+ pr_notice("ip_set: protocol %u\n", IPSET_PROTOCOL);
+ return 0;
+}
+
+static void __net_exit
+ip_set_net_exit(struct net *net)
+{
+ struct ip_set_net *inst = ip_set_pernet(net);
+
+ struct ip_set *set = NULL;
+ ip_set_id_t i;
+
+ inst->is_deleted = 1; /* flag for ip_set_nfnl_put */
+
+ for (i = 0; i < inst->ip_set_max; i++) {
+ set = nfnl_set(inst, i);
+ if (set != NULL)
+ ip_set_destroy_set(inst, i);
+ }
+ kfree(rcu_dereference_protected(inst->ip_set_list, 1));
+}
+
+static struct pernet_operations ip_set_net_ops = {
+ .init = ip_set_net_init,
+ .exit = ip_set_net_exit,
+ .id = &ip_set_net_id,
+ .size = sizeof(struct ip_set_net)
+};
+
- rcu_assign_pointer(ip_set_list, list);
- ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
+static int __init
+ip_set_init(void)
+{
+ int ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
if (ret != 0) {
pr_err("ip_set: cannot register with nfnetlink.\n");
- kfree(list);
return ret;
}
ret = nf_register_sockopt(&so_set);
if (ret != 0) {
pr_err("SO_SET registry failed: %d\n", ret);
nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
- kfree(list);
return ret;
}
-
- pr_notice("ip_set: protocol %u\n", IPSET_PROTOCOL);
+ ret = register_pernet_subsys(&ip_set_net_ops);
+ if (ret) {
+ pr_err("ip_set: cannot register pernet_subsys.\n");
+ nf_unregister_sockopt(&so_set);
+ nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+ return ret;
+ }
return 0;
}
static void __exit
ip_set_fini(void)
{
- struct ip_set **list = rcu_dereference_protected(ip_set_list, 1);
-
- /* There can't be any existing set */
+ unregister_pernet_subsys(&ip_set_net_ops);
nf_unregister_sockopt(&so_set);
nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
- kfree(list);
pr_debug("these are the famous last words\n");
}
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
index dac156f819ac..29fb01ddff93 100644
--- a/net/netfilter/ipset/ip_set_getport.c
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -102,9 +102,25 @@ ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
int protocol = iph->protocol;
/* See comments at tcp_match in ip_tables.c */
- if (protocol <= 0 || (ntohs(iph->frag_off) & IP_OFFSET))
+ if (protocol <= 0)
return false;
+ if (ntohs(iph->frag_off) & IP_OFFSET)
+ switch (protocol) {
+ case IPPROTO_TCP:
+ case IPPROTO_SCTP:
+ case IPPROTO_UDP:
+ case IPPROTO_UDPLITE:
+ case IPPROTO_ICMP:
+ /* Port info not available for fragment offset > 0 */
+ return false;
+ default:
+ /* Other protocols doesn't have ports,
+ so we can match fragments */
+ *proto = protocol;
+ return true;
+ }
+
return get_port(skb, protocol, protooff, src, port, proto);
}
EXPORT_SYMBOL_GPL(ip_set_get_ip4_port);
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 707bc520d629..6a80dbd30df7 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -15,8 +15,7 @@
#define rcu_dereference_bh(p) rcu_dereference(p)
#endif
-#define CONCAT(a, b) a##b
-#define TOKEN(a, b) CONCAT(a, b)
+#define rcu_dereference_bh_nfnl(p) rcu_dereference_bh_check(p, 1)
/* Hashing which uses arrays to resolve clashing. The hash table is resized
* (doubled) when searching becomes too long.
@@ -78,10 +77,14 @@ struct htable {
#define hbucket(h, i) (&((h)->bucket[i]))
+#ifndef IPSET_NET_COUNT
+#define IPSET_NET_COUNT 1
+#endif
+
/* Book-keeping of the prefixes added to the set */
struct net_prefixes {
- u8 cidr; /* the different cidr values in the set */
- u32 nets; /* number of elements per cidr */
+ u32 nets[IPSET_NET_COUNT]; /* number of elements per cidr */
+ u8 cidr[IPSET_NET_COUNT]; /* the different cidr values in the set */
};
/* Compute the hash table size */
@@ -114,23 +117,6 @@ htable_bits(u32 hashsize)
return bits;
}
-/* Destroy the hashtable part of the set */
-static void
-ahash_destroy(struct htable *t)
-{
- struct hbucket *n;
- u32 i;
-
- for (i = 0; i < jhash_size(t->htable_bits); i++) {
- n = hbucket(t, i);
- if (n->size)
- /* FIXME: use slab cache */
- kfree(n->value);
- }
-
- ip_set_free(t);
-}
-
static int
hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
{
@@ -156,30 +142,30 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
}
#ifdef IP_SET_HASH_WITH_NETS
+#if IPSET_NET_COUNT > 1
+#define __CIDR(cidr, i) (cidr[i])
+#else
+#define __CIDR(cidr, i) (cidr)
+#endif
#ifdef IP_SET_HASH_WITH_NETS_PACKED
/* When cidr is packed with nomatch, cidr - 1 is stored in the entry */
-#define CIDR(cidr) (cidr + 1)
+#define CIDR(cidr, i) (__CIDR(cidr, i) + 1)
#else
-#define CIDR(cidr) (cidr)
+#define CIDR(cidr, i) (__CIDR(cidr, i))
#endif
#define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128)
#ifdef IP_SET_HASH_WITH_MULTI
-#define NETS_LENGTH(family) (SET_HOST_MASK(family) + 1)
+#define NLEN(family) (SET_HOST_MASK(family) + 1)
#else
-#define NETS_LENGTH(family) SET_HOST_MASK(family)
+#define NLEN(family) SET_HOST_MASK(family)
#endif
#else
-#define NETS_LENGTH(family) 0
+#define NLEN(family) 0
#endif /* IP_SET_HASH_WITH_NETS */
-#define ext_timeout(e, h) \
-(unsigned long *)(((void *)(e)) + (h)->offset[IPSET_OFFSET_TIMEOUT])
-#define ext_counter(e, h) \
-(struct ip_set_counter *)(((void *)(e)) + (h)->offset[IPSET_OFFSET_COUNTER])
-
#endif /* _IP_SET_HASH_GEN_H */
/* Family dependent templates */
@@ -194,6 +180,8 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
#undef mtype_data_next
#undef mtype_elem
+#undef mtype_ahash_destroy
+#undef mtype_ext_cleanup
#undef mtype_add_cidr
#undef mtype_del_cidr
#undef mtype_ahash_memsize
@@ -220,41 +208,44 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
#undef HKEY
-#define mtype_data_equal TOKEN(MTYPE, _data_equal)
+#define mtype_data_equal IPSET_TOKEN(MTYPE, _data_equal)
#ifdef IP_SET_HASH_WITH_NETS
-#define mtype_do_data_match TOKEN(MTYPE, _do_data_match)
+#define mtype_do_data_match IPSET_TOKEN(MTYPE, _do_data_match)
#else
#define mtype_do_data_match(d) 1
#endif
-#define mtype_data_set_flags TOKEN(MTYPE, _data_set_flags)
-#define mtype_data_reset_flags TOKEN(MTYPE, _data_reset_flags)
-#define mtype_data_netmask TOKEN(MTYPE, _data_netmask)
-#define mtype_data_list TOKEN(MTYPE, _data_list)
-#define mtype_data_next TOKEN(MTYPE, _data_next)
-#define mtype_elem TOKEN(MTYPE, _elem)
-#define mtype_add_cidr TOKEN(MTYPE, _add_cidr)
-#define mtype_del_cidr TOKEN(MTYPE, _del_cidr)
-#define mtype_ahash_memsize TOKEN(MTYPE, _ahash_memsize)
-#define mtype_flush TOKEN(MTYPE, _flush)
-#define mtype_destroy TOKEN(MTYPE, _destroy)
-#define mtype_gc_init TOKEN(MTYPE, _gc_init)
-#define mtype_same_set TOKEN(MTYPE, _same_set)
-#define mtype_kadt TOKEN(MTYPE, _kadt)
-#define mtype_uadt TOKEN(MTYPE, _uadt)
+#define mtype_data_set_flags IPSET_TOKEN(MTYPE, _data_set_flags)
+#define mtype_data_reset_elem IPSET_TOKEN(MTYPE, _data_reset_elem)
+#define mtype_data_reset_flags IPSET_TOKEN(MTYPE, _data_reset_flags)
+#define mtype_data_netmask IPSET_TOKEN(MTYPE, _data_netmask)
+#define mtype_data_list IPSET_TOKEN(MTYPE, _data_list)
+#define mtype_data_next IPSET_TOKEN(MTYPE, _data_next)
+#define mtype_elem IPSET_TOKEN(MTYPE, _elem)
+#define mtype_ahash_destroy IPSET_TOKEN(MTYPE, _ahash_destroy)
+#define mtype_ext_cleanup IPSET_TOKEN(MTYPE, _ext_cleanup)
+#define mtype_add_cidr IPSET_TOKEN(MTYPE, _add_cidr)
+#define mtype_del_cidr IPSET_TOKEN(MTYPE, _del_cidr)
+#define mtype_ahash_memsize IPSET_TOKEN(MTYPE, _ahash_memsize)
+#define mtype_flush IPSET_TOKEN(MTYPE, _flush)
+#define mtype_destroy IPSET_TOKEN(MTYPE, _destroy)
+#define mtype_gc_init IPSET_TOKEN(MTYPE, _gc_init)
+#define mtype_same_set IPSET_TOKEN(MTYPE, _same_set)
+#define mtype_kadt IPSET_TOKEN(MTYPE, _kadt)
+#define mtype_uadt IPSET_TOKEN(MTYPE, _uadt)
#define mtype MTYPE
-#define mtype_elem TOKEN(MTYPE, _elem)
-#define mtype_add TOKEN(MTYPE, _add)
-#define mtype_del TOKEN(MTYPE, _del)
-#define mtype_test_cidrs TOKEN(MTYPE, _test_cidrs)
-#define mtype_test TOKEN(MTYPE, _test)
-#define mtype_expire TOKEN(MTYPE, _expire)
-#define mtype_resize TOKEN(MTYPE, _resize)
-#define mtype_head TOKEN(MTYPE, _head)
-#define mtype_list TOKEN(MTYPE, _list)
-#define mtype_gc TOKEN(MTYPE, _gc)
-#define mtype_variant TOKEN(MTYPE, _variant)
-#define mtype_data_match TOKEN(MTYPE, _data_match)
+#define mtype_elem IPSET_TOKEN(MTYPE, _elem)
+#define mtype_add IPSET_TOKEN(MTYPE, _add)
+#define mtype_del IPSET_TOKEN(MTYPE, _del)
+#define mtype_test_cidrs IPSET_TOKEN(MTYPE, _test_cidrs)
+#define mtype_test IPSET_TOKEN(MTYPE, _test)
+#define mtype_expire IPSET_TOKEN(MTYPE, _expire)
+#define mtype_resize IPSET_TOKEN(MTYPE, _resize)
+#define mtype_head IPSET_TOKEN(MTYPE, _head)
+#define mtype_list IPSET_TOKEN(MTYPE, _list)
+#define mtype_gc IPSET_TOKEN(MTYPE, _gc)
+#define mtype_variant IPSET_TOKEN(MTYPE, _variant)
+#define mtype_data_match IPSET_TOKEN(MTYPE, _data_match)
#ifndef HKEY_DATALEN
#define HKEY_DATALEN sizeof(struct mtype_elem)
@@ -269,13 +260,10 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
/* The generic hash structure */
struct htype {
- struct htable *table; /* the hash table */
+ struct htable __rcu *table; /* the hash table */
u32 maxelem; /* max elements in the hash */
u32 elements; /* current element (vs timeout) */
u32 initval; /* random jhash init value */
- u32 timeout; /* timeout value, if enabled */
- size_t dsize; /* data struct size */
- size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
struct timer_list gc; /* garbage collection when timeout enabled */
struct mtype_elem next; /* temporary storage for uadd */
#ifdef IP_SET_HASH_WITH_MULTI
@@ -297,49 +285,49 @@ struct htype {
/* Network cidr size book keeping when the hash stores different
* sized networks */
static void
-mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length)
+mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
{
int i, j;
/* Add in increasing prefix order, so larger cidr first */
- for (i = 0, j = -1; i < nets_length && h->nets[i].nets; i++) {
+ for (i = 0, j = -1; i < nets_length && h->nets[i].nets[n]; i++) {
if (j != -1)
continue;
- else if (h->nets[i].cidr < cidr)
+ else if (h->nets[i].cidr[n] < cidr)
j = i;
- else if (h->nets[i].cidr == cidr) {
- h->nets[i].nets++;
+ else if (h->nets[i].cidr[n] == cidr) {
+ h->nets[i].nets[n]++;
return;
}
}
if (j != -1) {
for (; i > j; i--) {
- h->nets[i].cidr = h->nets[i - 1].cidr;
- h->nets[i].nets = h->nets[i - 1].nets;
+ h->nets[i].cidr[n] = h->nets[i - 1].cidr[n];
+ h->nets[i].nets[n] = h->nets[i - 1].nets[n];
}
}
- h->nets[i].cidr = cidr;
- h->nets[i].nets = 1;
+ h->nets[i].cidr[n] = cidr;
+ h->nets[i].nets[n] = 1;
}
static void
-mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length)
+mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
{
u8 i, j, net_end = nets_length - 1;
for (i = 0; i < nets_length; i++) {
- if (h->nets[i].cidr != cidr)
+ if (h->nets[i].cidr[n] != cidr)
continue;
- if (h->nets[i].nets > 1 || i == net_end ||
- h->nets[i + 1].nets == 0) {
- h->nets[i].nets--;
+ if (h->nets[i].nets[n] > 1 || i == net_end ||
+ h->nets[i + 1].nets[n] == 0) {
+ h->nets[i].nets[n]--;
return;
}
- for (j = i; j < net_end && h->nets[j].nets; j++) {
- h->nets[j].cidr = h->nets[j + 1].cidr;
- h->nets[j].nets = h->nets[j + 1].nets;
+ for (j = i; j < net_end && h->nets[j].nets[n]; j++) {
+ h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
+ h->nets[j].nets[n] = h->nets[j + 1].nets[n];
}
- h->nets[j].nets = 0;
+ h->nets[j].nets[n] = 0;
return;
}
}
@@ -347,10 +335,10 @@ mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length)
/* Calculate the actual memory size of the set data */
static size_t
-mtype_ahash_memsize(const struct htype *h, u8 nets_length)
+mtype_ahash_memsize(const struct htype *h, const struct htable *t,
+ u8 nets_length, size_t dsize)
{
u32 i;
- struct htable *t = h->table;
size_t memsize = sizeof(*h)
+ sizeof(*t)
#ifdef IP_SET_HASH_WITH_NETS
@@ -359,35 +347,70 @@ mtype_ahash_memsize(const struct htype *h, u8 nets_length)
+ jhash_size(t->htable_bits) * sizeof(struct hbucket);
for (i = 0; i < jhash_size(t->htable_bits); i++)
- memsize += t->bucket[i].size * h->dsize;
+ memsize += t->bucket[i].size * dsize;
return memsize;
}
+/* Get the ith element from the array block n */
+#define ahash_data(n, i, dsize) \
+ ((struct mtype_elem *)((n)->value + ((i) * (dsize))))
+
+static void
+mtype_ext_cleanup(struct ip_set *set, struct hbucket *n)
+{
+ int i;
+
+ for (i = 0; i < n->pos; i++)
+ ip_set_ext_destroy(set, ahash_data(n, i, set->dsize));
+}
+
/* Flush a hash type of set: destroy all elements */
static void
mtype_flush(struct ip_set *set)
{
struct htype *h = set->data;
- struct htable *t = h->table;
+ struct htable *t;
struct hbucket *n;
u32 i;
+ t = rcu_dereference_bh_nfnl(h->table);
for (i = 0; i < jhash_size(t->htable_bits); i++) {
n = hbucket(t, i);
if (n->size) {
+ if (set->extensions & IPSET_EXT_DESTROY)
+ mtype_ext_cleanup(set, n);
n->size = n->pos = 0;
/* FIXME: use slab cache */
kfree(n->value);
}
}
#ifdef IP_SET_HASH_WITH_NETS
- memset(h->nets, 0, sizeof(struct net_prefixes)
- * NETS_LENGTH(set->family));
+ memset(h->nets, 0, sizeof(struct net_prefixes) * NLEN(set->family));
#endif
h->elements = 0;
}
+/* Destroy the hashtable part of the set */
+static void
+mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
+{
+ struct hbucket *n;
+ u32 i;
+
+ for (i = 0; i < jhash_size(t->htable_bits); i++) {
+ n = hbucket(t, i);
+ if (n->size) {
+ if (set->extensions & IPSET_EXT_DESTROY && ext_destroy)
+ mtype_ext_cleanup(set, n);
+ /* FIXME: use slab cache */
+ kfree(n->value);
+ }
+ }
+
+ ip_set_free(t);
+}
+
/* Destroy a hash type of set */
static void
mtype_destroy(struct ip_set *set)
@@ -397,7 +420,7 @@ mtype_destroy(struct ip_set *set)
if (set->extensions & IPSET_EXT_TIMEOUT)
del_timer_sync(&h->gc);
- ahash_destroy(h->table);
+ mtype_ahash_destroy(set, rcu_dereference_bh_nfnl(h->table), true);
#ifdef IP_SET_HASH_WITH_RBTREE
rbtree_destroy(&h->rbtree);
#endif
@@ -414,10 +437,10 @@ mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
init_timer(&h->gc);
h->gc.data = (unsigned long) set;
h->gc.function = gc;
- h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
+ h->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
add_timer(&h->gc);
pr_debug("gc initialized, run in every %u\n",
- IPSET_GC_PERIOD(h->timeout));
+ IPSET_GC_PERIOD(set->timeout));
}
static bool
@@ -428,37 +451,40 @@ mtype_same_set(const struct ip_set *a, const struct ip_set *b)
/* Resizing changes htable_bits, so we ignore it */
return x->maxelem == y->maxelem &&
- x->timeout == y->timeout &&
+ a->timeout == b->timeout &&
#ifdef IP_SET_HASH_WITH_NETMASK
x->netmask == y->netmask &&
#endif
a->extensions == b->extensions;
}
-/* Get the ith element from the array block n */
-#define ahash_data(n, i, dsize) \
- ((struct mtype_elem *)((n)->value + ((i) * (dsize))))
-
/* Delete expired elements from the hashtable */
static void
-mtype_expire(struct htype *h, u8 nets_length, size_t dsize)
+mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
{
- struct htable *t = h->table;
+ struct htable *t;
struct hbucket *n;
struct mtype_elem *data;
u32 i;
int j;
+#ifdef IP_SET_HASH_WITH_NETS
+ u8 k;
+#endif
+ rcu_read_lock_bh();
+ t = rcu_dereference_bh(h->table);
for (i = 0; i < jhash_size(t->htable_bits); i++) {
n = hbucket(t, i);
for (j = 0; j < n->pos; j++) {
data = ahash_data(n, j, dsize);
- if (ip_set_timeout_expired(ext_timeout(data, h))) {
+ if (ip_set_timeout_expired(ext_timeout(data, set))) {
pr_debug("expired %u/%u\n", i, j);
#ifdef IP_SET_HASH_WITH_NETS
- mtype_del_cidr(h, CIDR(data->cidr),
- nets_length);
+ for (k = 0; k < IPSET_NET_COUNT; k++)
+ mtype_del_cidr(h, CIDR(data->cidr, k),
+ nets_length, k);
#endif
+ ip_set_ext_destroy(set, data);
if (j != n->pos - 1)
/* Not last one */
memcpy(data,
@@ -481,6 +507,7 @@ mtype_expire(struct htype *h, u8 nets_length, size_t dsize)
n->value = tmp;
}
}
+ rcu_read_unlock_bh();
}
static void
@@ -491,10 +518,10 @@ mtype_gc(unsigned long ul_set)
pr_debug("called\n");
write_lock_bh(&set->lock);
- mtype_expire(h, NETS_LENGTH(set->family), h->dsize);
+ mtype_expire(set, h, NLEN(set->family), set->dsize);
write_unlock_bh(&set->lock);
- h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
+ h->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
add_timer(&h->gc);
}
@@ -505,7 +532,7 @@ static int
mtype_resize(struct ip_set *set, bool retried)
{
struct htype *h = set->data;
- struct htable *t, *orig = h->table;
+ struct htable *t, *orig = rcu_dereference_bh_nfnl(h->table);
u8 htable_bits = orig->htable_bits;
#ifdef IP_SET_HASH_WITH_NETS
u8 flags;
@@ -520,8 +547,7 @@ mtype_resize(struct ip_set *set, bool retried)
if (SET_WITH_TIMEOUT(set) && !retried) {
i = h->elements;
write_lock_bh(&set->lock);
- mtype_expire(set->data, NETS_LENGTH(set->family),
- h->dsize);
+ mtype_expire(set, set->data, NLEN(set->family), set->dsize);
write_unlock_bh(&set->lock);
if (h->elements < i)
return 0;
@@ -548,25 +574,25 @@ retry:
for (i = 0; i < jhash_size(orig->htable_bits); i++) {
n = hbucket(orig, i);
for (j = 0; j < n->pos; j++) {
- data = ahash_data(n, j, h->dsize);
+ data = ahash_data(n, j, set->dsize);
#ifdef IP_SET_HASH_WITH_NETS
flags = 0;
mtype_data_reset_flags(data, &flags);
#endif
m = hbucket(t, HKEY(data, h->initval, htable_bits));
- ret = hbucket_elem_add(m, AHASH_MAX(h), h->dsize);
+ ret = hbucket_elem_add(m, AHASH_MAX(h), set->dsize);
if (ret < 0) {
#ifdef IP_SET_HASH_WITH_NETS
mtype_data_reset_flags(data, &flags);
#endif
read_unlock_bh(&set->lock);
- ahash_destroy(t);
+ mtype_ahash_destroy(set, t, false);
if (ret == -EAGAIN)
goto retry;
return ret;
}
- d = ahash_data(m, m->pos++, h->dsize);
- memcpy(d, data, h->dsize);
+ d = ahash_data(m, m->pos++, set->dsize);
+ memcpy(d, data, set->dsize);
#ifdef IP_SET_HASH_WITH_NETS
mtype_data_reset_flags(d, &flags);
#endif
@@ -581,7 +607,7 @@ retry:
pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
orig->htable_bits, orig, t->htable_bits, t);
- ahash_destroy(orig);
+ mtype_ahash_destroy(set, orig, false);
return 0;
}
@@ -604,7 +630,7 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
if (SET_WITH_TIMEOUT(set) && h->elements >= h->maxelem)
/* FIXME: when set is full, we slow down here */
- mtype_expire(h, NETS_LENGTH(set->family), h->dsize);
+ mtype_expire(set, h, NLEN(set->family), set->dsize);
if (h->elements >= h->maxelem) {
if (net_ratelimit())
@@ -618,11 +644,11 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
key = HKEY(value, h->initval, t->htable_bits);
n = hbucket(t, key);
for (i = 0; i < n->pos; i++) {
- data = ahash_data(n, i, h->dsize);
+ data = ahash_data(n, i, set->dsize);
if (mtype_data_equal(data, d, &multi)) {
if (flag_exist ||
(SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(data, h)))) {
+ ip_set_timeout_expired(ext_timeout(data, set)))) {
/* Just the extensions could be overwritten */
j = i;
goto reuse_slot;
@@ -633,30 +659,37 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
}
/* Reuse first timed out entry */
if (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(data, h)) &&
+ ip_set_timeout_expired(ext_timeout(data, set)) &&
j != AHASH_MAX(h) + 1)
j = i;
}
reuse_slot:
if (j != AHASH_MAX(h) + 1) {
/* Fill out reused slot */
- data = ahash_data(n, j, h->dsize);
+ data = ahash_data(n, j, set->dsize);
#ifdef IP_SET_HASH_WITH_NETS
- mtype_del_cidr(h, CIDR(data->cidr), NETS_LENGTH(set->family));
- mtype_add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
+ for (i = 0; i < IPSET_NET_COUNT; i++) {
+ mtype_del_cidr(h, CIDR(data->cidr, i),
+ NLEN(set->family), i);
+ mtype_add_cidr(h, CIDR(d->cidr, i),
+ NLEN(set->family), i);
+ }
#endif
+ ip_set_ext_destroy(set, data);
} else {
/* Use/create a new slot */
TUNE_AHASH_MAX(h, multi);
- ret = hbucket_elem_add(n, AHASH_MAX(h), h->dsize);
+ ret = hbucket_elem_add(n, AHASH_MAX(h), set->dsize);
if (ret != 0) {
if (ret == -EAGAIN)
mtype_data_next(&h->next, d);
goto out;
}
- data = ahash_data(n, n->pos++, h->dsize);
+ data = ahash_data(n, n->pos++, set->dsize);
#ifdef IP_SET_HASH_WITH_NETS
- mtype_add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
+ for (i = 0; i < IPSET_NET_COUNT; i++)
+ mtype_add_cidr(h, CIDR(d->cidr, i), NLEN(set->family),
+ i);
#endif
h->elements++;
}
@@ -665,9 +698,11 @@ reuse_slot:
mtype_data_set_flags(data, flags);
#endif
if (SET_WITH_TIMEOUT(set))
- ip_set_timeout_set(ext_timeout(data, h), ext->timeout);
+ ip_set_timeout_set(ext_timeout(data, set), ext->timeout);
if (SET_WITH_COUNTER(set))
- ip_set_init_counter(ext_counter(data, h), ext);
+ ip_set_init_counter(ext_counter(data, set), ext);
+ if (SET_WITH_COMMENT(set))
+ ip_set_init_comment(ext_comment(data, set), ext);
out:
rcu_read_unlock_bh();
@@ -682,47 +717,60 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags)
{
struct htype *h = set->data;
- struct htable *t = h->table;
+ struct htable *t;
const struct mtype_elem *d = value;
struct mtype_elem *data;
struct hbucket *n;
- int i;
+ int i, ret = -IPSET_ERR_EXIST;
+#ifdef IP_SET_HASH_WITH_NETS
+ u8 j;
+#endif
u32 key, multi = 0;
+ rcu_read_lock_bh();
+ t = rcu_dereference_bh(h->table);
key = HKEY(value, h->initval, t->htable_bits);
n = hbucket(t, key);
for (i = 0; i < n->pos; i++) {
- data = ahash_data(n, i, h->dsize);
+ data = ahash_data(n, i, set->dsize);
if (!mtype_data_equal(data, d, &multi))
continue;
if (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(data, h)))
- return -IPSET_ERR_EXIST;
+ ip_set_timeout_expired(ext_timeout(data, set)))
+ goto out;
if (i != n->pos - 1)
/* Not last one */
- memcpy(data, ahash_data(n, n->pos - 1, h->dsize),
- h->dsize);
+ memcpy(data, ahash_data(n, n->pos - 1, set->dsize),
+ set->dsize);
n->pos--;
h->elements--;
#ifdef IP_SET_HASH_WITH_NETS
- mtype_del_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
+ for (j = 0; j < IPSET_NET_COUNT; j++)
+ mtype_del_cidr(h, CIDR(d->cidr, j), NLEN(set->family),
+ j);
#endif
+ ip_set_ext_destroy(set, data);
if (n->pos + AHASH_INIT_SIZE < n->size) {
void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
- * h->dsize,
+ * set->dsize,
GFP_ATOMIC);
- if (!tmp)
- return 0;
+ if (!tmp) {
+ ret = 0;
+ goto out;
+ }
n->size -= AHASH_INIT_SIZE;
- memcpy(tmp, n->value, n->size * h->dsize);
+ memcpy(tmp, n->value, n->size * set->dsize);
kfree(n->value);
n->value = tmp;
}
- return 0;
+ ret = 0;
+ goto out;
}
- return -IPSET_ERR_EXIST;
+out:
+ rcu_read_unlock_bh();
+ return ret;
}
static inline int
@@ -730,8 +778,7 @@ mtype_data_match(struct mtype_elem *data, const struct ip_set_ext *ext,
struct ip_set_ext *mext, struct ip_set *set, u32 flags)
{
if (SET_WITH_COUNTER(set))
- ip_set_update_counter(ext_counter(data,
- (struct htype *)(set->data)),
+ ip_set_update_counter(ext_counter(data, set),
ext, mext, flags);
return mtype_do_data_match(data);
}
@@ -745,25 +792,38 @@ mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
struct ip_set_ext *mext, u32 flags)
{
struct htype *h = set->data;
- struct htable *t = h->table;
+ struct htable *t = rcu_dereference_bh(h->table);
struct hbucket *n;
struct mtype_elem *data;
+#if IPSET_NET_COUNT == 2
+ struct mtype_elem orig = *d;
+ int i, j = 0, k;
+#else
int i, j = 0;
+#endif
u32 key, multi = 0;
- u8 nets_length = NETS_LENGTH(set->family);
+ u8 nets_length = NLEN(set->family);
pr_debug("test by nets\n");
- for (; j < nets_length && h->nets[j].nets && !multi; j++) {
- mtype_data_netmask(d, h->nets[j].cidr);
+ for (; j < nets_length && h->nets[j].nets[0] && !multi; j++) {
+#if IPSET_NET_COUNT == 2
+ mtype_data_reset_elem(d, &orig);
+ mtype_data_netmask(d, h->nets[j].cidr[0], false);
+ for (k = 0; k < nets_length && h->nets[k].nets[1] && !multi;
+ k++) {
+ mtype_data_netmask(d, h->nets[k].cidr[1], true);
+#else
+ mtype_data_netmask(d, h->nets[j].cidr[0]);
+#endif
key = HKEY(d, h->initval, t->htable_bits);
n = hbucket(t, key);
for (i = 0; i < n->pos; i++) {
- data = ahash_data(n, i, h->dsize);
+ data = ahash_data(n, i, set->dsize);
if (!mtype_data_equal(data, d, &multi))
continue;
if (SET_WITH_TIMEOUT(set)) {
if (!ip_set_timeout_expired(
- ext_timeout(data, h)))
+ ext_timeout(data, set)))
return mtype_data_match(data, ext,
mext, set,
flags);
@@ -774,6 +834,9 @@ mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
return mtype_data_match(data, ext,
mext, set, flags);
}
+#if IPSET_NET_COUNT == 2
+ }
+#endif
}
return 0;
}
@@ -785,30 +848,41 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags)
{
struct htype *h = set->data;
- struct htable *t = h->table;
+ struct htable *t;
struct mtype_elem *d = value;
struct hbucket *n;
struct mtype_elem *data;
- int i;
+ int i, ret = 0;
u32 key, multi = 0;
+ rcu_read_lock_bh();
+ t = rcu_dereference_bh(h->table);
#ifdef IP_SET_HASH_WITH_NETS
/* If we test an IP address and not a network address,
* try all possible network sizes */
- if (CIDR(d->cidr) == SET_HOST_MASK(set->family))
- return mtype_test_cidrs(set, d, ext, mext, flags);
+ for (i = 0; i < IPSET_NET_COUNT; i++)
+ if (CIDR(d->cidr, i) != SET_HOST_MASK(set->family))
+ break;
+ if (i == IPSET_NET_COUNT) {
+ ret = mtype_test_cidrs(set, d, ext, mext, flags);
+ goto out;
+ }
#endif
key = HKEY(d, h->initval, t->htable_bits);
n = hbucket(t, key);
for (i = 0; i < n->pos; i++) {
- data = ahash_data(n, i, h->dsize);
+ data = ahash_data(n, i, set->dsize);
if (mtype_data_equal(data, d, &multi) &&
!(SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(data, h))))
- return mtype_data_match(data, ext, mext, set, flags);
+ ip_set_timeout_expired(ext_timeout(data, set)))) {
+ ret = mtype_data_match(data, ext, mext, set, flags);
+ goto out;
+ }
}
- return 0;
+out:
+ rcu_read_unlock_bh();
+ return ret;
}
/* Reply a HEADER request: fill out the header part of the set */
@@ -816,18 +890,18 @@ static int
mtype_head(struct ip_set *set, struct sk_buff *skb)
{
const struct htype *h = set->data;
+ const struct htable *t;
struct nlattr *nested;
size_t memsize;
- read_lock_bh(&set->lock);
- memsize = mtype_ahash_memsize(h, NETS_LENGTH(set->family));
- read_unlock_bh(&set->lock);
+ t = rcu_dereference_bh_nfnl(h->table);
+ memsize = mtype_ahash_memsize(h, t, NLEN(set->family), set->dsize);
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)
goto nla_put_failure;
if (nla_put_net32(skb, IPSET_ATTR_HASHSIZE,
- htonl(jhash_size(h->table->htable_bits))) ||
+ htonl(jhash_size(t->htable_bits))) ||
nla_put_net32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)))
goto nla_put_failure;
#ifdef IP_SET_HASH_WITH_NETMASK
@@ -836,12 +910,9 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
goto nla_put_failure;
#endif
if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
- nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
- ((set->extensions & IPSET_EXT_TIMEOUT) &&
- nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout))) ||
- ((set->extensions & IPSET_EXT_COUNTER) &&
- nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS,
- htonl(IPSET_FLAG_WITH_COUNTERS))))
+ nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)))
+ goto nla_put_failure;
+ if (unlikely(ip_set_put_flags(skb, set)))
goto nla_put_failure;
ipset_nest_end(skb, nested);
@@ -856,7 +927,7 @@ mtype_list(const struct ip_set *set,
struct sk_buff *skb, struct netlink_callback *cb)
{
const struct htype *h = set->data;
- const struct htable *t = h->table;
+ const struct htable *t = rcu_dereference_bh_nfnl(h->table);
struct nlattr *atd, *nested;
const struct hbucket *n;
const struct mtype_elem *e;
@@ -874,9 +945,9 @@ mtype_list(const struct ip_set *set,
n = hbucket(t, cb->args[2]);
pr_debug("cb->args[2]: %lu, t %p n %p\n", cb->args[2], t, n);
for (i = 0; i < n->pos; i++) {
- e = ahash_data(n, i, h->dsize);
+ e = ahash_data(n, i, set->dsize);
if (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(e, h)))
+ ip_set_timeout_expired(ext_timeout(e, set)))
continue;
pr_debug("list hash %lu hbucket %p i %u, data %p\n",
cb->args[2], n, i, e);
@@ -890,13 +961,7 @@ mtype_list(const struct ip_set *set,
}
if (mtype_data_list(skb, e))
goto nla_put_failure;
- if (SET_WITH_TIMEOUT(set) &&
- nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
- htonl(ip_set_timeout_get(
- ext_timeout(e, h)))))
- goto nla_put_failure;
- if (SET_WITH_COUNTER(set) &&
- ip_set_put_counter(skb, ext_counter(e, h)))
+ if (ip_set_put_extensions(skb, set, e, true))
goto nla_put_failure;
ipset_nest_end(skb, nested);
}
@@ -909,24 +974,24 @@ mtype_list(const struct ip_set *set,
nla_put_failure:
nlmsg_trim(skb, incomplete);
- ipset_nest_end(skb, atd);
if (unlikely(first == cb->args[2])) {
pr_warning("Can't list set %s: one bucket does not fit into "
"a message. Please report it!\n", set->name);
cb->args[2] = 0;
return -EMSGSIZE;
}
+ ipset_nest_end(skb, atd);
return 0;
}
static int
-TOKEN(MTYPE, _kadt)(struct ip_set *set, const struct sk_buff *skb,
- const struct xt_action_param *par,
- enum ipset_adt adt, struct ip_set_adt_opt *opt);
+IPSET_TOKEN(MTYPE, _kadt)(struct ip_set *set, const struct sk_buff *skb,
+ const struct xt_action_param *par,
+ enum ipset_adt adt, struct ip_set_adt_opt *opt);
static int
-TOKEN(MTYPE, _uadt)(struct ip_set *set, struct nlattr *tb[],
- enum ipset_adt adt, u32 *lineno, u32 flags, bool retried);
+IPSET_TOKEN(MTYPE, _uadt)(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags, bool retried);
static const struct ip_set_type_variant mtype_variant = {
.kadt = mtype_kadt,
@@ -946,16 +1011,17 @@ static const struct ip_set_type_variant mtype_variant = {
#ifdef IP_SET_EMIT_CREATE
static int
-TOKEN(HTYPE, _create)(struct ip_set *set, struct nlattr *tb[], u32 flags)
+IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
+ struct nlattr *tb[], u32 flags)
{
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
- u32 cadt_flags = 0;
u8 hbits;
#ifdef IP_SET_HASH_WITH_NETMASK
u8 netmask;
#endif
size_t hsize;
struct HTYPE *h;
+ struct htable *t;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
@@ -1005,7 +1071,7 @@ TOKEN(HTYPE, _create)(struct ip_set *set, struct nlattr *tb[], u32 flags)
h->netmask = netmask;
#endif
get_random_bytes(&h->initval, sizeof(h->initval));
- h->timeout = IPSET_NO_TIMEOUT;
+ set->timeout = IPSET_NO_TIMEOUT;
hbits = htable_bits(hashsize);
hsize = htable_size(hbits);
@@ -1013,91 +1079,37 @@ TOKEN(HTYPE, _create)(struct ip_set *set, struct nlattr *tb[], u32 flags)
kfree(h);
return -ENOMEM;
}
- h->table = ip_set_alloc(hsize);
- if (!h->table) {
+ t = ip_set_alloc(hsize);
+ if (!t) {
kfree(h);
return -ENOMEM;
}
- h->table->htable_bits = hbits;
+ t->htable_bits = hbits;
+ rcu_assign_pointer(h->table, t);
set->data = h;
- if (set->family == NFPROTO_IPV4)
- set->variant = &TOKEN(HTYPE, 4_variant);
- else
- set->variant = &TOKEN(HTYPE, 6_variant);
-
- if (tb[IPSET_ATTR_CADT_FLAGS])
- cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
- if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
- set->extensions |= IPSET_EXT_COUNTER;
- if (tb[IPSET_ATTR_TIMEOUT]) {
- h->timeout =
- ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
- set->extensions |= IPSET_EXT_TIMEOUT;
- if (set->family == NFPROTO_IPV4) {
- h->dsize =
- sizeof(struct TOKEN(HTYPE, 4ct_elem));
- h->offset[IPSET_OFFSET_TIMEOUT] =
- offsetof(struct TOKEN(HTYPE, 4ct_elem),
- timeout);
- h->offset[IPSET_OFFSET_COUNTER] =
- offsetof(struct TOKEN(HTYPE, 4ct_elem),
- counter);
- TOKEN(HTYPE, 4_gc_init)(set,
- TOKEN(HTYPE, 4_gc));
- } else {
- h->dsize =
- sizeof(struct TOKEN(HTYPE, 6ct_elem));
- h->offset[IPSET_OFFSET_TIMEOUT] =
- offsetof(struct TOKEN(HTYPE, 6ct_elem),
- timeout);
- h->offset[IPSET_OFFSET_COUNTER] =
- offsetof(struct TOKEN(HTYPE, 6ct_elem),
- counter);
- TOKEN(HTYPE, 6_gc_init)(set,
- TOKEN(HTYPE, 6_gc));
- }
- } else {
- if (set->family == NFPROTO_IPV4) {
- h->dsize =
- sizeof(struct TOKEN(HTYPE, 4c_elem));
- h->offset[IPSET_OFFSET_COUNTER] =
- offsetof(struct TOKEN(HTYPE, 4c_elem),
- counter);
- } else {
- h->dsize =
- sizeof(struct TOKEN(HTYPE, 6c_elem));
- h->offset[IPSET_OFFSET_COUNTER] =
- offsetof(struct TOKEN(HTYPE, 6c_elem),
- counter);
- }
- }
- } else if (tb[IPSET_ATTR_TIMEOUT]) {
- h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
- set->extensions |= IPSET_EXT_TIMEOUT;
- if (set->family == NFPROTO_IPV4) {
- h->dsize = sizeof(struct TOKEN(HTYPE, 4t_elem));
- h->offset[IPSET_OFFSET_TIMEOUT] =
- offsetof(struct TOKEN(HTYPE, 4t_elem),
- timeout);
- TOKEN(HTYPE, 4_gc_init)(set, TOKEN(HTYPE, 4_gc));
- } else {
- h->dsize = sizeof(struct TOKEN(HTYPE, 6t_elem));
- h->offset[IPSET_OFFSET_TIMEOUT] =
- offsetof(struct TOKEN(HTYPE, 6t_elem),
- timeout);
- TOKEN(HTYPE, 6_gc_init)(set, TOKEN(HTYPE, 6_gc));
- }
+ if (set->family == NFPROTO_IPV4) {
+ set->variant = &IPSET_TOKEN(HTYPE, 4_variant);
+ set->dsize = ip_set_elem_len(set, tb,
+ sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)));
} else {
+ set->variant = &IPSET_TOKEN(HTYPE, 6_variant);
+ set->dsize = ip_set_elem_len(set, tb,
+ sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)));
+ }
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
if (set->family == NFPROTO_IPV4)
- h->dsize = sizeof(struct TOKEN(HTYPE, 4_elem));
+ IPSET_TOKEN(HTYPE, 4_gc_init)(set,
+ IPSET_TOKEN(HTYPE, 4_gc));
else
- h->dsize = sizeof(struct TOKEN(HTYPE, 6_elem));
+ IPSET_TOKEN(HTYPE, 6_gc_init)(set,
+ IPSET_TOKEN(HTYPE, 6_gc));
}
pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
- set->name, jhash_size(h->table->htable_bits),
- h->table->htable_bits, h->maxelem, set->data, h->table);
+ set->name, jhash_size(t->htable_bits),
+ t->htable_bits, h->maxelem, set->data, t);
return 0;
}
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index c74e6e14cd93..e65fc2423d56 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -23,19 +23,20 @@
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_hash.h>
-#define REVISION_MIN 0
-#define REVISION_MAX 1 /* Counters support */
+#define IPSET_TYPE_REV_MIN 0
+/* 1 Counters support */
+#define IPSET_TYPE_REV_MAX 2 /* Comments support */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:ip", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:ip");
/* Type specific function prefix */
#define HTYPE hash_ip
#define IP_SET_HASH_WITH_NETMASK
-/* IPv4 variants */
+/* IPv4 variant */
/* Member elements */
struct hash_ip4_elem {
@@ -43,22 +44,6 @@ struct hash_ip4_elem {
__be32 ip;
};
-struct hash_ip4t_elem {
- __be32 ip;
- unsigned long timeout;
-};
-
-struct hash_ip4c_elem {
- __be32 ip;
- struct ip_set_counter counter;
-};
-
-struct hash_ip4ct_elem {
- __be32 ip;
- struct ip_set_counter counter;
- unsigned long timeout;
-};
-
/* Common functions */
static inline bool
@@ -99,7 +84,7 @@ hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct hash_ip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ip4_elem e = {};
- struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
__be32 ip;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &ip);
@@ -118,8 +103,8 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
const struct hash_ip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ip4_elem e = {};
- struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
- u32 ip, ip_to, hosts;
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+ u32 ip = 0, ip_to = 0, hosts;
int ret = 0;
if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -178,29 +163,13 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret;
}
-/* IPv6 variants */
+/* IPv6 variant */
/* Member elements */
struct hash_ip6_elem {
union nf_inet_addr ip;
};
-struct hash_ip6t_elem {
- union nf_inet_addr ip;
- unsigned long timeout;
-};
-
-struct hash_ip6c_elem {
- union nf_inet_addr ip;
- struct ip_set_counter counter;
-};
-
-struct hash_ip6ct_elem {
- union nf_inet_addr ip;
- struct ip_set_counter counter;
- unsigned long timeout;
-};
-
/* Common functions */
static inline bool
@@ -253,7 +222,7 @@ hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct hash_ip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ip6_elem e = {};
- struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
hash_ip6_netmask(&e.ip, h->netmask);
@@ -270,7 +239,7 @@ hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
const struct hash_ip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ip6_elem e = {};
- struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -304,8 +273,8 @@ static struct ip_set_type hash_ip_type __read_mostly = {
.features = IPSET_TYPE_IP,
.dimension = IPSET_DIM_ONE,
.family = NFPROTO_UNSPEC,
- .revision_min = REVISION_MIN,
- .revision_max = REVISION_MAX,
+ .revision_min = IPSET_TYPE_REV_MIN,
+ .revision_max = IPSET_TYPE_REV_MAX,
.create = hash_ip_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -324,6 +293,7 @@ static struct ip_set_type hash_ip_type __read_mostly = {
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 7a2d2bd98d04..525a595dd1fe 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -24,19 +24,20 @@
#include <linux/netfilter/ipset/ip_set_getport.h>
#include <linux/netfilter/ipset/ip_set_hash.h>
-#define REVISION_MIN 0
-/* 1 SCTP and UDPLITE support added */
-#define REVISION_MAX 2 /* Counters support added */
+#define IPSET_TYPE_REV_MIN 0
+/* 1 SCTP and UDPLITE support added */
+/* 2 Counters support added */
+#define IPSET_TYPE_REV_MAX 3 /* Comments support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:ip,port", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:ip,port", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:ip,port");
/* Type specific function prefix */
#define HTYPE hash_ipport
-/* IPv4 variants */
+/* IPv4 variant */
/* Member elements */
struct hash_ipport4_elem {
@@ -46,31 +47,6 @@ struct hash_ipport4_elem {
u8 padding;
};
-struct hash_ipport4t_elem {
- __be32 ip;
- __be16 port;
- u8 proto;
- u8 padding;
- unsigned long timeout;
-};
-
-struct hash_ipport4c_elem {
- __be32 ip;
- __be16 port;
- u8 proto;
- u8 padding;
- struct ip_set_counter counter;
-};
-
-struct hash_ipport4ct_elem {
- __be32 ip;
- __be16 port;
- u8 proto;
- u8 padding;
- struct ip_set_counter counter;
- unsigned long timeout;
-};
-
/* Common functions */
static inline bool
@@ -116,10 +92,9 @@ hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
- const struct hash_ipport *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipport4_elem e = { };
- struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&e.port, &e.proto))
@@ -136,8 +111,8 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
const struct hash_ipport *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipport4_elem e = { };
- struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
- u32 ip, ip_to, p = 0, port, port_to;
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+ u32 ip, ip_to = 0, p = 0, port, port_to;
bool with_ports = false;
int ret;
@@ -222,7 +197,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret;
}
-/* IPv6 variants */
+/* IPv6 variant */
struct hash_ipport6_elem {
union nf_inet_addr ip;
@@ -231,31 +206,6 @@ struct hash_ipport6_elem {
u8 padding;
};
-struct hash_ipport6t_elem {
- union nf_inet_addr ip;
- __be16 port;
- u8 proto;
- u8 padding;
- unsigned long timeout;
-};
-
-struct hash_ipport6c_elem {
- union nf_inet_addr ip;
- __be16 port;
- u8 proto;
- u8 padding;
- struct ip_set_counter counter;
-};
-
-struct hash_ipport6ct_elem {
- union nf_inet_addr ip;
- __be16 port;
- u8 proto;
- u8 padding;
- struct ip_set_counter counter;
- unsigned long timeout;
-};
-
/* Common functions */
static inline bool
@@ -306,10 +256,9 @@ hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
- const struct hash_ipport *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipport6_elem e = { };
- struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&e.port, &e.proto))
@@ -326,7 +275,7 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
const struct hash_ipport *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipport6_elem e = { };
- struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 port, port_to;
bool with_ports = false;
int ret;
@@ -396,8 +345,8 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
.features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
.dimension = IPSET_DIM_TWO,
.family = NFPROTO_UNSPEC,
- .revision_min = REVISION_MIN,
- .revision_max = REVISION_MAX,
+ .revision_min = IPSET_TYPE_REV_MIN,
+ .revision_max = IPSET_TYPE_REV_MAX,
.create = hash_ipport_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -419,6 +368,7 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 34e8a1acce42..f5636631466e 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -24,19 +24,20 @@
#include <linux/netfilter/ipset/ip_set_getport.h>
#include <linux/netfilter/ipset/ip_set_hash.h>
-#define REVISION_MIN 0
-/* 1 SCTP and UDPLITE support added */
-#define REVISION_MAX 2 /* Counters support added */
+#define IPSET_TYPE_REV_MIN 0
+/* 1 SCTP and UDPLITE support added */
+/* 2 Counters support added */
+#define IPSET_TYPE_REV_MAX 3 /* Comments support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:ip,port,ip", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:ip,port,ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:ip,port,ip");
/* Type specific function prefix */
#define HTYPE hash_ipportip
-/* IPv4 variants */
+/* IPv4 variant */
/* Member elements */
struct hash_ipportip4_elem {
@@ -47,34 +48,6 @@ struct hash_ipportip4_elem {
u8 padding;
};
-struct hash_ipportip4t_elem {
- __be32 ip;
- __be32 ip2;
- __be16 port;
- u8 proto;
- u8 padding;
- unsigned long timeout;
-};
-
-struct hash_ipportip4c_elem {
- __be32 ip;
- __be32 ip2;
- __be16 port;
- u8 proto;
- u8 padding;
- struct ip_set_counter counter;
-};
-
-struct hash_ipportip4ct_elem {
- __be32 ip;
- __be32 ip2;
- __be16 port;
- u8 proto;
- u8 padding;
- struct ip_set_counter counter;
- unsigned long timeout;
-};
-
static inline bool
hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1,
const struct hash_ipportip4_elem *ip2,
@@ -120,10 +93,9 @@ hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
- const struct hash_ipportip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportip4_elem e = { };
- struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&e.port, &e.proto))
@@ -141,8 +113,8 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
const struct hash_ipportip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportip4_elem e = { };
- struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
- u32 ip, ip_to, p = 0, port, port_to;
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+ u32 ip, ip_to = 0, p = 0, port, port_to;
bool with_ports = false;
int ret;
@@ -231,7 +203,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret;
}
-/* IPv6 variants */
+/* IPv6 variant */
struct hash_ipportip6_elem {
union nf_inet_addr ip;
@@ -241,34 +213,6 @@ struct hash_ipportip6_elem {
u8 padding;
};
-struct hash_ipportip6t_elem {
- union nf_inet_addr ip;
- union nf_inet_addr ip2;
- __be16 port;
- u8 proto;
- u8 padding;
- unsigned long timeout;
-};
-
-struct hash_ipportip6c_elem {
- union nf_inet_addr ip;
- union nf_inet_addr ip2;
- __be16 port;
- u8 proto;
- u8 padding;
- struct ip_set_counter counter;
-};
-
-struct hash_ipportip6ct_elem {
- union nf_inet_addr ip;
- union nf_inet_addr ip2;
- __be16 port;
- u8 proto;
- u8 padding;
- struct ip_set_counter counter;
- unsigned long timeout;
-};
-
/* Common functions */
static inline bool
@@ -319,10 +263,9 @@ hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
- const struct hash_ipportip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportip6_elem e = { };
- struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&e.port, &e.proto))
@@ -340,7 +283,7 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
const struct hash_ipportip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportip6_elem e = { };
- struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 port, port_to;
bool with_ports = false;
int ret;
@@ -414,8 +357,8 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
.features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
.dimension = IPSET_DIM_THREE,
.family = NFPROTO_UNSPEC,
- .revision_min = REVISION_MIN,
- .revision_max = REVISION_MAX,
+ .revision_min = IPSET_TYPE_REV_MIN,
+ .revision_max = IPSET_TYPE_REV_MAX,
.create = hash_ipportip_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -437,6 +380,7 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index f15f3e28b9c3..5d87fe8a41ff 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -24,15 +24,16 @@
#include <linux/netfilter/ipset/ip_set_getport.h>
#include <linux/netfilter/ipset/ip_set_hash.h>
-#define REVISION_MIN 0
-/* 1 SCTP and UDPLITE support added */
-/* 2 Range as input support for IPv4 added */
-/* 3 nomatch flag support added */
-#define REVISION_MAX 4 /* Counters support added */
+#define IPSET_TYPE_REV_MIN 0
+/* 1 SCTP and UDPLITE support added */
+/* 2 Range as input support for IPv4 added */
+/* 3 nomatch flag support added */
+/* 4 Counters support added */
+#define IPSET_TYPE_REV_MAX 5 /* Comments support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:ip,port,net", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:ip,port,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:ip,port,net");
/* Type specific function prefix */
@@ -46,7 +47,7 @@ MODULE_ALIAS("ip_set_hash:ip,port,net");
#define IP_SET_HASH_WITH_PROTO
#define IP_SET_HASH_WITH_NETS
-/* IPv4 variants */
+/* IPv4 variant */
/* Member elements */
struct hash_ipportnet4_elem {
@@ -58,37 +59,6 @@ struct hash_ipportnet4_elem {
u8 proto;
};
-struct hash_ipportnet4t_elem {
- __be32 ip;
- __be32 ip2;
- __be16 port;
- u8 cidr:7;
- u8 nomatch:1;
- u8 proto;
- unsigned long timeout;
-};
-
-struct hash_ipportnet4c_elem {
- __be32 ip;
- __be32 ip2;
- __be16 port;
- u8 cidr:7;
- u8 nomatch:1;
- u8 proto;
- struct ip_set_counter counter;
-};
-
-struct hash_ipportnet4ct_elem {
- __be32 ip;
- __be32 ip2;
- __be16 port;
- u8 cidr:7;
- u8 nomatch:1;
- u8 proto;
- struct ip_set_counter counter;
- unsigned long timeout;
-};
-
/* Common functions */
static inline bool
@@ -170,9 +140,9 @@ hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct hash_ipportnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet4_elem e = {
- .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
+ .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
};
- struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (adt == IPSET_TEST)
e.cidr = HOST_MASK - 1;
@@ -195,9 +165,9 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
const struct hash_ipportnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 };
- struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
- u32 ip, ip_to, p = 0, port, port_to;
- u32 ip2_from, ip2_to, ip2_last, ip2;
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+ u32 ip = 0, ip_to = 0, p = 0, port, port_to;
+ u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
bool with_ports = false;
u8 cidr;
int ret;
@@ -272,7 +242,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (ip > ip_to)
swap(ip, ip_to);
} else if (tb[IPSET_ATTR_CIDR]) {
- u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+ cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!cidr || cidr > 32)
return -IPSET_ERR_INVALID_CIDR;
@@ -306,9 +276,9 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
: port;
for (; p <= port_to; p++) {
e.port = htons(p);
- ip2 = retried
- && ip == ntohl(h->next.ip)
- && p == ntohs(h->next.port)
+ ip2 = retried &&
+ ip == ntohl(h->next.ip) &&
+ p == ntohs(h->next.port)
? ntohl(h->next.ip2) : ip2_from;
while (!after(ip2, ip2_to)) {
e.ip2 = htonl(ip2);
@@ -328,7 +298,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret;
}
-/* IPv6 variants */
+/* IPv6 variant */
struct hash_ipportnet6_elem {
union nf_inet_addr ip;
@@ -339,37 +309,6 @@ struct hash_ipportnet6_elem {
u8 proto;
};
-struct hash_ipportnet6t_elem {
- union nf_inet_addr ip;
- union nf_inet_addr ip2;
- __be16 port;
- u8 cidr:7;
- u8 nomatch:1;
- u8 proto;
- unsigned long timeout;
-};
-
-struct hash_ipportnet6c_elem {
- union nf_inet_addr ip;
- union nf_inet_addr ip2;
- __be16 port;
- u8 cidr:7;
- u8 nomatch:1;
- u8 proto;
- struct ip_set_counter counter;
-};
-
-struct hash_ipportnet6ct_elem {
- union nf_inet_addr ip;
- union nf_inet_addr ip2;
- __be16 port;
- u8 cidr:7;
- u8 nomatch:1;
- u8 proto;
- struct ip_set_counter counter;
- unsigned long timeout;
-};
-
/* Common functions */
static inline bool
@@ -454,9 +393,9 @@ hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct hash_ipportnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet6_elem e = {
- .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
+ .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
};
- struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (adt == IPSET_TEST)
e.cidr = HOST_MASK - 1;
@@ -479,7 +418,7 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
const struct hash_ipportnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet6_elem e = { .cidr = HOST_MASK - 1 };
- struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 port, port_to;
bool with_ports = false;
u8 cidr;
@@ -574,8 +513,8 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
IPSET_TYPE_NOMATCH,
.dimension = IPSET_DIM_THREE,
.family = NFPROTO_UNSPEC,
- .revision_min = REVISION_MIN,
- .revision_max = REVISION_MAX,
+ .revision_min = IPSET_TYPE_REV_MIN,
+ .revision_max = IPSET_TYPE_REV_MAX,
.create = hash_ipportnet_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -600,6 +539,7 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 223e9f546d0f..8295cf4f9fdc 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -22,21 +22,22 @@
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_hash.h>
-#define REVISION_MIN 0
-/* 1 Range as input support for IPv4 added */
-/* 2 nomatch flag support added */
-#define REVISION_MAX 3 /* Counters support added */
+#define IPSET_TYPE_REV_MIN 0
+/* 1 Range as input support for IPv4 added */
+/* 2 nomatch flag support added */
+/* 3 Counters support added */
+#define IPSET_TYPE_REV_MAX 4 /* Comments support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:net", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:net");
/* Type specific function prefix */
#define HTYPE hash_net
#define IP_SET_HASH_WITH_NETS
-/* IPv4 variants */
+/* IPv4 variant */
/* Member elements */
struct hash_net4_elem {
@@ -46,31 +47,6 @@ struct hash_net4_elem {
u8 cidr;
};
-struct hash_net4t_elem {
- __be32 ip;
- u16 padding0;
- u8 nomatch;
- u8 cidr;
- unsigned long timeout;
-};
-
-struct hash_net4c_elem {
- __be32 ip;
- u16 padding0;
- u8 nomatch;
- u8 cidr;
- struct ip_set_counter counter;
-};
-
-struct hash_net4ct_elem {
- __be32 ip;
- u16 padding0;
- u8 nomatch;
- u8 cidr;
- struct ip_set_counter counter;
- unsigned long timeout;
-};
-
/* Common functions */
static inline bool
@@ -143,9 +119,9 @@ hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct hash_net *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net4_elem e = {
- .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+ .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
};
- struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (e.cidr == 0)
return -EINVAL;
@@ -165,8 +141,8 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
const struct hash_net *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net4_elem e = { .cidr = HOST_MASK };
- struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
- u32 ip = 0, ip_to, last;
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+ u32 ip = 0, ip_to = 0, last;
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -228,7 +204,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret;
}
-/* IPv6 variants */
+/* IPv6 variant */
struct hash_net6_elem {
union nf_inet_addr ip;
@@ -237,31 +213,6 @@ struct hash_net6_elem {
u8 cidr;
};
-struct hash_net6t_elem {
- union nf_inet_addr ip;
- u16 padding0;
- u8 nomatch;
- u8 cidr;
- unsigned long timeout;
-};
-
-struct hash_net6c_elem {
- union nf_inet_addr ip;
- u16 padding0;
- u8 nomatch;
- u8 cidr;
- struct ip_set_counter counter;
-};
-
-struct hash_net6ct_elem {
- union nf_inet_addr ip;
- u16 padding0;
- u8 nomatch;
- u8 cidr;
- struct ip_set_counter counter;
- unsigned long timeout;
-};
-
/* Common functions */
static inline bool
@@ -338,9 +289,9 @@ hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct hash_net *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net6_elem e = {
- .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+ .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
};
- struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (e.cidr == 0)
return -EINVAL;
@@ -357,10 +308,9 @@ static int
hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
- const struct hash_net *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net6_elem e = { .cidr = HOST_MASK };
- struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -406,8 +356,8 @@ static struct ip_set_type hash_net_type __read_mostly = {
.features = IPSET_TYPE_IP | IPSET_TYPE_NOMATCH,
.dimension = IPSET_DIM_ONE,
.family = NFPROTO_UNSPEC,
- .revision_min = REVISION_MIN,
- .revision_max = REVISION_MAX,
+ .revision_min = IPSET_TYPE_REV_MIN,
+ .revision_max = IPSET_TYPE_REV_MAX,
.create = hash_net_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -425,6 +375,7 @@ static struct ip_set_type hash_net_type __read_mostly = {
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index 7d798d5d5cd3..3f64a66bf5d9 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -23,14 +23,15 @@
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_hash.h>
-#define REVISION_MIN 0
-/* 1 nomatch flag support added */
-/* 2 /0 support added */
-#define REVISION_MAX 3 /* Counters support added */
+#define IPSET_TYPE_REV_MIN 0
+/* 1 nomatch flag support added */
+/* 2 /0 support added */
+/* 3 Counters support added */
+#define IPSET_TYPE_REV_MAX 4 /* Comments support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:net,iface", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:net,iface", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:net,iface");
/* Interface name rbtree */
@@ -134,7 +135,7 @@ iface_add(struct rb_root *root, const char **iface)
#define STREQ(a, b) (strcmp(a, b) == 0)
-/* IPv4 variants */
+/* IPv4 variant */
struct hash_netiface4_elem_hashed {
__be32 ip;
@@ -144,7 +145,7 @@ struct hash_netiface4_elem_hashed {
u8 elem;
};
-/* Member elements without timeout */
+/* Member elements */
struct hash_netiface4_elem {
__be32 ip;
u8 physdev;
@@ -154,37 +155,6 @@ struct hash_netiface4_elem {
const char *iface;
};
-struct hash_netiface4t_elem {
- __be32 ip;
- u8 physdev;
- u8 cidr;
- u8 nomatch;
- u8 elem;
- const char *iface;
- unsigned long timeout;
-};
-
-struct hash_netiface4c_elem {
- __be32 ip;
- u8 physdev;
- u8 cidr;
- u8 nomatch;
- u8 elem;
- const char *iface;
- struct ip_set_counter counter;
-};
-
-struct hash_netiface4ct_elem {
- __be32 ip;
- u8 physdev;
- u8 cidr;
- u8 nomatch;
- u8 elem;
- const char *iface;
- struct ip_set_counter counter;
- unsigned long timeout;
-};
-
/* Common functions */
static inline bool
@@ -265,10 +235,10 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
struct hash_netiface *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netiface4_elem e = {
- .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK,
+ .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
.elem = 1,
};
- struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
int ret;
if (e.cidr == 0)
@@ -319,8 +289,8 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
struct hash_netiface *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
- struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
- u32 ip = 0, ip_to, last;
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+ u32 ip = 0, ip_to = 0, last;
char iface[IFNAMSIZ];
int ret;
@@ -399,7 +369,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret;
}
-/* IPv6 variants */
+/* IPv6 variant */
struct hash_netiface6_elem_hashed {
union nf_inet_addr ip;
@@ -418,37 +388,6 @@ struct hash_netiface6_elem {
const char *iface;
};
-struct hash_netiface6t_elem {
- union nf_inet_addr ip;
- u8 physdev;
- u8 cidr;
- u8 nomatch;
- u8 elem;
- const char *iface;
- unsigned long timeout;
-};
-
-struct hash_netiface6c_elem {
- union nf_inet_addr ip;
- u8 physdev;
- u8 cidr;
- u8 nomatch;
- u8 elem;
- const char *iface;
- struct ip_set_counter counter;
-};
-
-struct hash_netiface6ct_elem {
- union nf_inet_addr ip;
- u8 physdev;
- u8 cidr;
- u8 nomatch;
- u8 elem;
- const char *iface;
- struct ip_set_counter counter;
- unsigned long timeout;
-};
-
/* Common functions */
static inline bool
@@ -534,10 +473,10 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
struct hash_netiface *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netiface6_elem e = {
- .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK,
+ .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
.elem = 1,
};
- struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
int ret;
if (e.cidr == 0)
@@ -584,7 +523,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
struct hash_netiface *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netiface6_elem e = { .cidr = HOST_MASK, .elem = 1 };
- struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
char iface[IFNAMSIZ];
int ret;
@@ -645,8 +584,8 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
IPSET_TYPE_NOMATCH,
.dimension = IPSET_DIM_TWO,
.family = NFPROTO_UNSPEC,
- .revision_min = REVISION_MIN,
- .revision_max = REVISION_MAX,
+ .revision_min = IPSET_TYPE_REV_MIN,
+ .revision_max = IPSET_TYPE_REV_MAX,
.create = hash_netiface_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -668,6 +607,7 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
new file mode 100644
index 000000000000..426032706ca9
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -0,0 +1,483 @@
+/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ * Copyright (C) 2013 Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:net type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+#define IPSET_TYPE_REV_MIN 0
+#define IPSET_TYPE_REV_MAX 0
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>");
+IP_SET_MODULE_DESC("hash:net,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
+MODULE_ALIAS("ip_set_hash:net,net");
+
+/* Type specific function prefix */
+#define HTYPE hash_netnet
+#define IP_SET_HASH_WITH_NETS
+#define IPSET_NET_COUNT 2
+
+/* IPv4 variants */
+
+/* Member elements */
+struct hash_netnet4_elem {
+ union {
+ __be32 ip[2];
+ __be64 ipcmp;
+ };
+ u8 nomatch;
+ union {
+ u8 cidr[2];
+ u16 ccmp;
+ };
+};
+
+/* Common functions */
+
+static inline bool
+hash_netnet4_data_equal(const struct hash_netnet4_elem *ip1,
+ const struct hash_netnet4_elem *ip2,
+ u32 *multi)
+{
+ return ip1->ipcmp == ip2->ipcmp &&
+ ip2->ccmp == ip2->ccmp;
+}
+
+static inline int
+hash_netnet4_do_data_match(const struct hash_netnet4_elem *elem)
+{
+ return elem->nomatch ? -ENOTEMPTY : 1;
+}
+
+static inline void
+hash_netnet4_data_set_flags(struct hash_netnet4_elem *elem, u32 flags)
+{
+ elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
+}
+
+static inline void
+hash_netnet4_data_reset_flags(struct hash_netnet4_elem *elem, u8 *flags)
+{
+ swap(*flags, elem->nomatch);
+}
+
+static inline void
+hash_netnet4_data_reset_elem(struct hash_netnet4_elem *elem,
+ struct hash_netnet4_elem *orig)
+{
+ elem->ip[1] = orig->ip[1];
+}
+
+static inline void
+hash_netnet4_data_netmask(struct hash_netnet4_elem *elem, u8 cidr, bool inner)
+{
+ if (inner) {
+ elem->ip[1] &= ip_set_netmask(cidr);
+ elem->cidr[1] = cidr;
+ } else {
+ elem->ip[0] &= ip_set_netmask(cidr);
+ elem->cidr[0] = cidr;
+ }
+}
+
+static bool
+hash_netnet4_data_list(struct sk_buff *skb,
+ const struct hash_netnet4_elem *data)
+{
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip[0]) ||
+ nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip[1]) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
+ (flags &&
+ nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static inline void
+hash_netnet4_data_next(struct hash_netnet4_elem *next,
+ const struct hash_netnet4_elem *d)
+{
+ next->ipcmp = d->ipcmp;
+}
+
+#define MTYPE hash_netnet4
+#define PF 4
+#define HOST_MASK 32
+#include "ip_set_hash_gen.h"
+
+static int
+hash_netnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ const struct xt_action_param *par,
+ enum ipset_adt adt, struct ip_set_adt_opt *opt)
+{
+ const struct hash_netnet *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_netnet4_elem e = {
+ .cidr[0] = h->nets[0].cidr[0] ? h->nets[0].cidr[0] : HOST_MASK,
+ .cidr[1] = h->nets[0].cidr[1] ? h->nets[0].cidr[1] : HOST_MASK,
+ };
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
+
+ if (adt == IPSET_TEST)
+ e.ccmp = (HOST_MASK << (sizeof(e.cidr[0]) * 8)) | HOST_MASK;
+
+ ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0]);
+ ip4addrptr(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.ip[1]);
+ e.ip[0] &= ip_set_netmask(e.cidr[0]);
+ e.ip[1] &= ip_set_netmask(e.cidr[1]);
+
+ return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
+}
+
+static int
+hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+{
+ const struct hash_netnet *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_netnet4_elem e = { .cidr[0] = HOST_MASK,
+ .cidr[1] = HOST_MASK };
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+ u32 ip = 0, ip_to = 0, last;
+ u32 ip2 = 0, ip2_from = 0, ip2_to = 0, last2;
+ u8 cidr, cidr2;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
+ ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from) ||
+ ip_set_get_extensions(set, tb, &ext);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR]) {
+ cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+ if (!cidr || cidr > HOST_MASK)
+ return -IPSET_ERR_INVALID_CIDR;
+ e.cidr[0] = cidr;
+ }
+
+ if (tb[IPSET_ATTR_CIDR2]) {
+ cidr2 = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+ if (!cidr2 || cidr2 > HOST_MASK)
+ return -IPSET_ERR_INVALID_CIDR;
+ e.cidr[1] = cidr2;
+ }
+
+ if (tb[IPSET_ATTR_CADT_FLAGS]) {
+ u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+ if (cadt_flags & IPSET_FLAG_NOMATCH)
+ flags |= (IPSET_FLAG_NOMATCH << 16);
+ }
+
+ if (adt == IPSET_TEST || !(tb[IPSET_ATTR_IP_TO] &&
+ tb[IPSET_ATTR_IP2_TO])) {
+ e.ip[0] = htonl(ip & ip_set_hostmask(e.cidr[0]));
+ e.ip[1] = htonl(ip2_from & ip_set_hostmask(e.cidr[1]));
+ ret = adtfn(set, &e, &ext, &ext, flags);
+ return ip_set_enomatch(ret, flags, adt, set) ? -ret :
+ ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ ip_to = ip;
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+ if (ret)
+ return ret;
+ if (ip_to < ip)
+ swap(ip, ip_to);
+ if (ip + UINT_MAX == ip_to)
+ return -IPSET_ERR_HASH_RANGE;
+ }
+
+ ip2_to = ip2_from;
+ if (tb[IPSET_ATTR_IP2_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
+ if (ret)
+ return ret;
+ if (ip2_to < ip2_from)
+ swap(ip2_from, ip2_to);
+ if (ip2_from + UINT_MAX == ip2_to)
+ return -IPSET_ERR_HASH_RANGE;
+
+ }
+
+ if (retried)
+ ip = ntohl(h->next.ip[0]);
+
+ while (!after(ip, ip_to)) {
+ e.ip[0] = htonl(ip);
+ last = ip_set_range_to_cidr(ip, ip_to, &cidr);
+ e.cidr[0] = cidr;
+ ip2 = (retried &&
+ ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1])
+ : ip2_from;
+ while (!after(ip2, ip2_to)) {
+ e.ip[1] = htonl(ip2);
+ last2 = ip_set_range_to_cidr(ip2, ip2_to, &cidr2);
+ e.cidr[1] = cidr2;
+ ret = adtfn(set, &e, &ext, &ext, flags);
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ ip2 = last2 + 1;
+ }
+ ip = last + 1;
+ }
+ return ret;
+}
+
+/* IPv6 variants */
+
+struct hash_netnet6_elem {
+ union nf_inet_addr ip[2];
+ u8 nomatch;
+ union {
+ u8 cidr[2];
+ u16 ccmp;
+ };
+};
+
+/* Common functions */
+
+static inline bool
+hash_netnet6_data_equal(const struct hash_netnet6_elem *ip1,
+ const struct hash_netnet6_elem *ip2,
+ u32 *multi)
+{
+ return ipv6_addr_equal(&ip1->ip[0].in6, &ip2->ip[0].in6) &&
+ ipv6_addr_equal(&ip1->ip[1].in6, &ip2->ip[1].in6) &&
+ ip1->ccmp == ip2->ccmp;
+}
+
+static inline int
+hash_netnet6_do_data_match(const struct hash_netnet6_elem *elem)
+{
+ return elem->nomatch ? -ENOTEMPTY : 1;
+}
+
+static inline void
+hash_netnet6_data_set_flags(struct hash_netnet6_elem *elem, u32 flags)
+{
+ elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
+}
+
+static inline void
+hash_netnet6_data_reset_flags(struct hash_netnet6_elem *elem, u8 *flags)
+{
+ swap(*flags, elem->nomatch);
+}
+
+static inline void
+hash_netnet6_data_reset_elem(struct hash_netnet6_elem *elem,
+ struct hash_netnet6_elem *orig)
+{
+ elem->ip[1] = orig->ip[1];
+}
+
+static inline void
+hash_netnet6_data_netmask(struct hash_netnet6_elem *elem, u8 cidr, bool inner)
+{
+ if (inner) {
+ ip6_netmask(&elem->ip[1], cidr);
+ elem->cidr[1] = cidr;
+ } else {
+ ip6_netmask(&elem->ip[0], cidr);
+ elem->cidr[0] = cidr;
+ }
+}
+
+static bool
+hash_netnet6_data_list(struct sk_buff *skb,
+ const struct hash_netnet6_elem *data)
+{
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
+ if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip[0].in6) ||
+ nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip[1].in6) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
+ (flags &&
+ nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static inline void
+hash_netnet6_data_next(struct hash_netnet4_elem *next,
+ const struct hash_netnet6_elem *d)
+{
+}
+
+#undef MTYPE
+#undef PF
+#undef HOST_MASK
+
+#define MTYPE hash_netnet6
+#define PF 6
+#define HOST_MASK 128
+#define IP_SET_EMIT_CREATE
+#include "ip_set_hash_gen.h"
+
+static int
+hash_netnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ const struct xt_action_param *par,
+ enum ipset_adt adt, struct ip_set_adt_opt *opt)
+{
+ const struct hash_netnet *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_netnet6_elem e = {
+ .cidr[0] = h->nets[0].cidr[0] ? h->nets[0].cidr[0] : HOST_MASK,
+ .cidr[1] = h->nets[0].cidr[1] ? h->nets[0].cidr[1] : HOST_MASK
+ };
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
+
+ if (adt == IPSET_TEST)
+ e.ccmp = (HOST_MASK << (sizeof(u8)*8)) | HOST_MASK;
+
+ ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0].in6);
+ ip6addrptr(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.ip[1].in6);
+ ip6_netmask(&e.ip[0], e.cidr[0]);
+ ip6_netmask(&e.ip[1], e.cidr[1]);
+
+ return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
+}
+
+static int
+hash_netnet6_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+{
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_netnet6_elem e = { .cidr[0] = HOST_MASK,
+ .cidr[1] = HOST_MASK };
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ return -IPSET_ERR_PROTOCOL;
+ if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO]))
+ return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]) ||
+ ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]) ||
+ ip_set_get_extensions(set, tb, &ext);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR])
+ e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (tb[IPSET_ATTR_CIDR2])
+ e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+
+ if (!e.cidr[0] || e.cidr[0] > HOST_MASK || !e.cidr[1] ||
+ e.cidr[1] > HOST_MASK)
+ return -IPSET_ERR_INVALID_CIDR;
+
+ ip6_netmask(&e.ip[0], e.cidr[0]);
+ ip6_netmask(&e.ip[1], e.cidr[1]);
+
+ if (tb[IPSET_ATTR_CADT_FLAGS]) {
+ u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+ if (cadt_flags & IPSET_FLAG_NOMATCH)
+ flags |= (IPSET_FLAG_NOMATCH << 16);
+ }
+
+ ret = adtfn(set, &e, &ext, &ext, flags);
+
+ return ip_set_enomatch(ret, flags, adt, set) ? -ret :
+ ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static struct ip_set_type hash_netnet_type __read_mostly = {
+ .name = "hash:net,net",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP | IPSET_TYPE_IP2 | IPSET_TYPE_NOMATCH,
+ .dimension = IPSET_DIM_TWO,
+ .family = NFPROTO_UNSPEC,
+ .revision_min = IPSET_TYPE_REV_MIN,
+ .revision_max = IPSET_TYPE_REV_MAX,
+ .create = hash_netnet_create,
+ .create_policy = {
+ [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
+ [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP2] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP2_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_CIDR2] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
+ [IPSET_ATTR_BYTES] = { .type = NLA_U64 },
+ [IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+hash_netnet_init(void)
+{
+ return ip_set_type_register(&hash_netnet_type);
+}
+
+static void __exit
+hash_netnet_fini(void)
+{
+ ip_set_type_unregister(&hash_netnet_type);
+}
+
+module_init(hash_netnet_init);
+module_exit(hash_netnet_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 09d6690bee6f..7097fb0141bf 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -23,15 +23,16 @@
#include <linux/netfilter/ipset/ip_set_getport.h>
#include <linux/netfilter/ipset/ip_set_hash.h>
-#define REVISION_MIN 0
-/* 1 SCTP and UDPLITE support added */
-/* 2 Range as input support for IPv4 added */
-/* 3 nomatch flag support added */
-#define REVISION_MAX 4 /* Counters support added */
+#define IPSET_TYPE_REV_MIN 0
+/* 1 SCTP and UDPLITE support added */
+/* 2 Range as input support for IPv4 added */
+/* 3 nomatch flag support added */
+/* 4 Counters support added */
+#define IPSET_TYPE_REV_MAX 5 /* Comments support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:net,port", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:net,port", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:net,port");
/* Type specific function prefix */
@@ -45,7 +46,7 @@ MODULE_ALIAS("ip_set_hash:net,port");
*/
#define IP_SET_HASH_WITH_NETS_PACKED
-/* IPv4 variants */
+/* IPv4 variant */
/* Member elements */
struct hash_netport4_elem {
@@ -56,34 +57,6 @@ struct hash_netport4_elem {
u8 nomatch:1;
};
-struct hash_netport4t_elem {
- __be32 ip;
- __be16 port;
- u8 proto;
- u8 cidr:7;
- u8 nomatch:1;
- unsigned long timeout;
-};
-
-struct hash_netport4c_elem {
- __be32 ip;
- __be16 port;
- u8 proto;
- u8 cidr:7;
- u8 nomatch:1;
- struct ip_set_counter counter;
-};
-
-struct hash_netport4ct_elem {
- __be32 ip;
- __be16 port;
- u8 proto;
- u8 cidr:7;
- u8 nomatch:1;
- struct ip_set_counter counter;
- unsigned long timeout;
-};
-
/* Common functions */
static inline bool
@@ -162,9 +135,9 @@ hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct hash_netport *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport4_elem e = {
- .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
+ .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
};
- struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (adt == IPSET_TEST)
e.cidr = HOST_MASK - 1;
@@ -186,8 +159,8 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
const struct hash_netport *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 };
- struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
- u32 port, port_to, p = 0, ip = 0, ip_to, last;
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+ u32 port, port_to, p = 0, ip = 0, ip_to = 0, last;
bool with_ports = false;
u8 cidr;
int ret;
@@ -287,7 +260,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret;
}
-/* IPv6 variants */
+/* IPv6 variant */
struct hash_netport6_elem {
union nf_inet_addr ip;
@@ -297,34 +270,6 @@ struct hash_netport6_elem {
u8 nomatch:1;
};
-struct hash_netport6t_elem {
- union nf_inet_addr ip;
- __be16 port;
- u8 proto;
- u8 cidr:7;
- u8 nomatch:1;
- unsigned long timeout;
-};
-
-struct hash_netport6c_elem {
- union nf_inet_addr ip;
- __be16 port;
- u8 proto;
- u8 cidr:7;
- u8 nomatch:1;
- struct ip_set_counter counter;
-};
-
-struct hash_netport6ct_elem {
- union nf_inet_addr ip;
- __be16 port;
- u8 proto;
- u8 cidr:7;
- u8 nomatch:1;
- struct ip_set_counter counter;
- unsigned long timeout;
-};
-
/* Common functions */
static inline bool
@@ -407,9 +352,9 @@ hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct hash_netport *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport6_elem e = {
- .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1,
+ .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
};
- struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (adt == IPSET_TEST)
e.cidr = HOST_MASK - 1;
@@ -431,7 +376,7 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
const struct hash_netport *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport6_elem e = { .cidr = HOST_MASK - 1 };
- struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 port, port_to;
bool with_ports = false;
u8 cidr;
@@ -518,8 +463,8 @@ static struct ip_set_type hash_netport_type __read_mostly = {
.features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_NOMATCH,
.dimension = IPSET_DIM_TWO,
.family = NFPROTO_UNSPEC,
- .revision_min = REVISION_MIN,
- .revision_max = REVISION_MAX,
+ .revision_min = IPSET_TYPE_REV_MIN,
+ .revision_max = IPSET_TYPE_REV_MAX,
.create = hash_netport_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -542,6 +487,7 @@ static struct ip_set_type hash_netport_type __read_mostly = {
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
new file mode 100644
index 000000000000..363fab933d48
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -0,0 +1,588 @@
+/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port,net type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+#define IPSET_TYPE_REV_MIN 0
+#define IPSET_TYPE_REV_MAX 0 /* Comments support added */
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>");
+IP_SET_MODULE_DESC("hash:net,port,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
+MODULE_ALIAS("ip_set_hash:net,port,net");
+
+/* Type specific function prefix */
+#define HTYPE hash_netportnet
+#define IP_SET_HASH_WITH_PROTO
+#define IP_SET_HASH_WITH_NETS
+#define IPSET_NET_COUNT 2
+
+/* IPv4 variant */
+
+/* Member elements */
+struct hash_netportnet4_elem {
+ union {
+ __be32 ip[2];
+ __be64 ipcmp;
+ };
+ __be16 port;
+ union {
+ u8 cidr[2];
+ u16 ccmp;
+ };
+ u8 nomatch:1;
+ u8 proto;
+};
+
+/* Common functions */
+
+static inline bool
+hash_netportnet4_data_equal(const struct hash_netportnet4_elem *ip1,
+ const struct hash_netportnet4_elem *ip2,
+ u32 *multi)
+{
+ return ip1->ipcmp == ip2->ipcmp &&
+ ip1->ccmp == ip2->ccmp &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto;
+}
+
+static inline int
+hash_netportnet4_do_data_match(const struct hash_netportnet4_elem *elem)
+{
+ return elem->nomatch ? -ENOTEMPTY : 1;
+}
+
+static inline void
+hash_netportnet4_data_set_flags(struct hash_netportnet4_elem *elem, u32 flags)
+{
+ elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
+}
+
+static inline void
+hash_netportnet4_data_reset_flags(struct hash_netportnet4_elem *elem, u8 *flags)
+{
+ swap(*flags, elem->nomatch);
+}
+
+static inline void
+hash_netportnet4_data_reset_elem(struct hash_netportnet4_elem *elem,
+ struct hash_netportnet4_elem *orig)
+{
+ elem->ip[1] = orig->ip[1];
+}
+
+static inline void
+hash_netportnet4_data_netmask(struct hash_netportnet4_elem *elem,
+ u8 cidr, bool inner)
+{
+ if (inner) {
+ elem->ip[1] &= ip_set_netmask(cidr);
+ elem->cidr[1] = cidr;
+ } else {
+ elem->ip[0] &= ip_set_netmask(cidr);
+ elem->cidr[0] = cidr;
+ }
+}
+
+static bool
+hash_netportnet4_data_list(struct sk_buff *skb,
+ const struct hash_netportnet4_elem *data)
+{
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip[0]) ||
+ nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip[1]) ||
+ nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
+ nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+ (flags &&
+ nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static inline void
+hash_netportnet4_data_next(struct hash_netportnet4_elem *next,
+ const struct hash_netportnet4_elem *d)
+{
+ next->ipcmp = d->ipcmp;
+ next->port = d->port;
+}
+
+#define MTYPE hash_netportnet4
+#define PF 4
+#define HOST_MASK 32
+#include "ip_set_hash_gen.h"
+
+static int
+hash_netportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ const struct xt_action_param *par,
+ enum ipset_adt adt, struct ip_set_adt_opt *opt)
+{
+ const struct hash_netportnet *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_netportnet4_elem e = {
+ .cidr[0] = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
+ .cidr[1] = IP_SET_INIT_CIDR(h->nets[0].cidr[1], HOST_MASK),
+ };
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
+
+ if (adt == IPSET_TEST)
+ e.ccmp = (HOST_MASK << (sizeof(e.cidr[0]) * 8)) | HOST_MASK;
+
+ if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
+ &e.port, &e.proto))
+ return -EINVAL;
+
+ ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0]);
+ ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip[1]);
+ e.ip[0] &= ip_set_netmask(e.cidr[0]);
+ e.ip[1] &= ip_set_netmask(e.cidr[1]);
+
+ return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
+}
+
+static int
+hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+{
+ const struct hash_netportnet *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_netportnet4_elem e = { .cidr[0] = HOST_MASK,
+ .cidr[1] = HOST_MASK };
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+ u32 ip = 0, ip_to = 0, ip_last, p = 0, port, port_to;
+ u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
+ bool with_ports = false;
+ u8 cidr, cidr2;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
+ ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from) ||
+ ip_set_get_extensions(set, tb, &ext);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR]) {
+ cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+ if (!cidr || cidr > HOST_MASK)
+ return -IPSET_ERR_INVALID_CIDR;
+ e.cidr[0] = cidr;
+ }
+
+ if (tb[IPSET_ATTR_CIDR2]) {
+ cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+ if (!cidr || cidr > HOST_MASK)
+ return -IPSET_ERR_INVALID_CIDR;
+ e.cidr[1] = cidr;
+ }
+
+ if (tb[IPSET_ATTR_PORT])
+ e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+ with_ports = ip_set_proto_with_ports(e.proto);
+
+ if (e.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ if (!(with_ports || e.proto == IPPROTO_ICMP))
+ e.port = 0;
+
+ if (tb[IPSET_ATTR_CADT_FLAGS]) {
+ u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+ if (cadt_flags & IPSET_FLAG_NOMATCH)
+ flags |= (IPSET_FLAG_NOMATCH << 16);
+ }
+
+ with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
+ if (adt == IPSET_TEST ||
+ !(tb[IPSET_ATTR_IP_TO] || with_ports || tb[IPSET_ATTR_IP2_TO])) {
+ e.ip[0] = htonl(ip & ip_set_hostmask(e.cidr[0]));
+ e.ip[1] = htonl(ip2_from & ip_set_hostmask(e.cidr[1]));
+ ret = adtfn(set, &e, &ext, &ext, flags);
+ return ip_set_enomatch(ret, flags, adt, set) ? -ret :
+ ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ ip_to = ip;
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+ if (ret)
+ return ret;
+ if (ip > ip_to)
+ swap(ip, ip_to);
+ if (unlikely(ip + UINT_MAX == ip_to))
+ return -IPSET_ERR_HASH_RANGE;
+ }
+
+ port_to = port = ntohs(e.port);
+ if (tb[IPSET_ATTR_PORT_TO]) {
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+ }
+
+ ip2_to = ip2_from;
+ if (tb[IPSET_ATTR_IP2_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
+ if (ret)
+ return ret;
+ if (ip2_from > ip2_to)
+ swap(ip2_from, ip2_to);
+ if (unlikely(ip2_from + UINT_MAX == ip2_to))
+ return -IPSET_ERR_HASH_RANGE;
+ }
+
+ if (retried)
+ ip = ntohl(h->next.ip[0]);
+
+ while (!after(ip, ip_to)) {
+ e.ip[0] = htonl(ip);
+ ip_last = ip_set_range_to_cidr(ip, ip_to, &cidr);
+ e.cidr[0] = cidr;
+ p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port)
+ : port;
+ for (; p <= port_to; p++) {
+ e.port = htons(p);
+ ip2 = (retried && ip == ntohl(h->next.ip[0]) &&
+ p == ntohs(h->next.port)) ? ntohl(h->next.ip[1])
+ : ip2_from;
+ while (!after(ip2, ip2_to)) {
+ e.ip[1] = htonl(ip2);
+ ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
+ &cidr2);
+ e.cidr[1] = cidr2;
+ ret = adtfn(set, &e, &ext, &ext, flags);
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ ip2 = ip2_last + 1;
+ }
+ }
+ ip = ip_last + 1;
+ }
+ return ret;
+}
+
+/* IPv6 variant */
+
+struct hash_netportnet6_elem {
+ union nf_inet_addr ip[2];
+ __be16 port;
+ union {
+ u8 cidr[2];
+ u16 ccmp;
+ };
+ u8 nomatch:1;
+ u8 proto;
+};
+
+/* Common functions */
+
+static inline bool
+hash_netportnet6_data_equal(const struct hash_netportnet6_elem *ip1,
+ const struct hash_netportnet6_elem *ip2,
+ u32 *multi)
+{
+ return ipv6_addr_equal(&ip1->ip[0].in6, &ip2->ip[0].in6) &&
+ ipv6_addr_equal(&ip1->ip[1].in6, &ip2->ip[1].in6) &&
+ ip1->ccmp == ip2->ccmp &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto;
+}
+
+static inline int
+hash_netportnet6_do_data_match(const struct hash_netportnet6_elem *elem)
+{
+ return elem->nomatch ? -ENOTEMPTY : 1;
+}
+
+static inline void
+hash_netportnet6_data_set_flags(struct hash_netportnet6_elem *elem, u32 flags)
+{
+ elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
+}
+
+static inline void
+hash_netportnet6_data_reset_flags(struct hash_netportnet6_elem *elem, u8 *flags)
+{
+ swap(*flags, elem->nomatch);
+}
+
+static inline void
+hash_netportnet6_data_reset_elem(struct hash_netportnet6_elem *elem,
+ struct hash_netportnet6_elem *orig)
+{
+ elem->ip[1] = orig->ip[1];
+}
+
+static inline void
+hash_netportnet6_data_netmask(struct hash_netportnet6_elem *elem,
+ u8 cidr, bool inner)
+{
+ if (inner) {
+ ip6_netmask(&elem->ip[1], cidr);
+ elem->cidr[1] = cidr;
+ } else {
+ ip6_netmask(&elem->ip[0], cidr);
+ elem->cidr[0] = cidr;
+ }
+}
+
+static bool
+hash_netportnet6_data_list(struct sk_buff *skb,
+ const struct hash_netportnet6_elem *data)
+{
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
+ if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip[0].in6) ||
+ nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip[1].in6) ||
+ nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
+ nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+ (flags &&
+ nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static inline void
+hash_netportnet6_data_next(struct hash_netportnet4_elem *next,
+ const struct hash_netportnet6_elem *d)
+{
+ next->port = d->port;
+}
+
+#undef MTYPE
+#undef PF
+#undef HOST_MASK
+
+#define MTYPE hash_netportnet6
+#define PF 6
+#define HOST_MASK 128
+#define IP_SET_EMIT_CREATE
+#include "ip_set_hash_gen.h"
+
+static int
+hash_netportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ const struct xt_action_param *par,
+ enum ipset_adt adt, struct ip_set_adt_opt *opt)
+{
+ const struct hash_netportnet *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_netportnet6_elem e = {
+ .cidr[0] = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
+ .cidr[1] = IP_SET_INIT_CIDR(h->nets[0].cidr[1], HOST_MASK),
+ };
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
+
+ if (adt == IPSET_TEST)
+ e.ccmp = (HOST_MASK << (sizeof(u8) * 8)) | HOST_MASK;
+
+ if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
+ &e.port, &e.proto))
+ return -EINVAL;
+
+ ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0].in6);
+ ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip[1].in6);
+ ip6_netmask(&e.ip[0], e.cidr[0]);
+ ip6_netmask(&e.ip[1], e.cidr[1]);
+
+ return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
+}
+
+static int
+hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+{
+ const struct hash_netportnet *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_netportnet6_elem e = { .cidr[0] = HOST_MASK,
+ .cidr[1] = HOST_MASK };
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+ u32 port, port_to;
+ bool with_ports = false;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ return -IPSET_ERR_PROTOCOL;
+ if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO]))
+ return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]) ||
+ ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]) ||
+ ip_set_get_extensions(set, tb, &ext);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR])
+ e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (tb[IPSET_ATTR_CIDR2])
+ e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+
+ if (unlikely(!e.cidr[0] || e.cidr[0] > HOST_MASK || !e.cidr[1] ||
+ e.cidr[1] > HOST_MASK))
+ return -IPSET_ERR_INVALID_CIDR;
+
+ ip6_netmask(&e.ip[0], e.cidr[0]);
+ ip6_netmask(&e.ip[1], e.cidr[1]);
+
+ if (tb[IPSET_ATTR_PORT])
+ e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+ with_ports = ip_set_proto_with_ports(e.proto);
+
+ if (e.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ if (!(with_ports || e.proto == IPPROTO_ICMPV6))
+ e.port = 0;
+
+ if (tb[IPSET_ATTR_CADT_FLAGS]) {
+ u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+ if (cadt_flags & IPSET_FLAG_NOMATCH)
+ flags |= (IPSET_FLAG_NOMATCH << 16);
+ }
+
+ if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
+ ret = adtfn(set, &e, &ext, &ext, flags);
+ return ip_set_enomatch(ret, flags, adt, set) ? -ret :
+ ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ port = ntohs(e.port);
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+
+ if (retried)
+ port = ntohs(h->next.port);
+ for (; port <= port_to; port++) {
+ e.port = htons(port);
+ ret = adtfn(set, &e, &ext, &ext, flags);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static struct ip_set_type hash_netportnet_type __read_mostly = {
+ .name = "hash:net,port,net",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2 |
+ IPSET_TYPE_NOMATCH,
+ .dimension = IPSET_DIM_THREE,
+ .family = NFPROTO_UNSPEC,
+ .revision_min = IPSET_TYPE_REV_MIN,
+ .revision_max = IPSET_TYPE_REV_MAX,
+ .create = hash_netportnet_create,
+ .create_policy = {
+ [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
+ [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP2] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP2_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_PORT] = { .type = NLA_U16 },
+ [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_CIDR2] = { .type = NLA_U8 },
+ [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
+ [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ [IPSET_ATTR_BYTES] = { .type = NLA_U64 },
+ [IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+hash_netportnet_init(void)
+{
+ return ip_set_type_register(&hash_netportnet_type);
+}
+
+static void __exit
+hash_netportnet_fini(void)
+{
+ ip_set_type_unregister(&hash_netportnet_type);
+}
+
+module_init(hash_netportnet_init);
+module_exit(hash_netportnet_fini);
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 979b8c90e422..ec6f6d15dded 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -15,12 +15,13 @@
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_list.h>
-#define REVISION_MIN 0
-#define REVISION_MAX 1 /* Counters support added */
+#define IPSET_TYPE_REV_MIN 0
+/* 1 Counters support added */
+#define IPSET_TYPE_REV_MAX 2 /* Comments support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("list:set", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("list:set", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_list:set");
/* Member elements */
@@ -28,28 +29,6 @@ struct set_elem {
ip_set_id_t id;
};
-struct sett_elem {
- struct {
- ip_set_id_t id;
- } __attribute__ ((aligned));
- unsigned long timeout;
-};
-
-struct setc_elem {
- struct {
- ip_set_id_t id;
- } __attribute__ ((aligned));
- struct ip_set_counter counter;
-};
-
-struct setct_elem {
- struct {
- ip_set_id_t id;
- } __attribute__ ((aligned));
- struct ip_set_counter counter;
- unsigned long timeout;
-};
-
struct set_adt_elem {
ip_set_id_t id;
ip_set_id_t refid;
@@ -58,24 +37,14 @@ struct set_adt_elem {
/* Type structure */
struct list_set {
- size_t dsize; /* element size */
- size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
u32 size; /* size of set list array */
- u32 timeout; /* timeout value */
struct timer_list gc; /* garbage collection */
+ struct net *net; /* namespace */
struct set_elem members[0]; /* the set members */
};
-static inline struct set_elem *
-list_set_elem(const struct list_set *map, u32 id)
-{
- return (struct set_elem *)((void *)map->members + id * map->dsize);
-}
-
-#define ext_timeout(e, m) \
-(unsigned long *)((void *)(e) + (m)->offset[IPSET_OFFSET_TIMEOUT])
-#define ext_counter(e, m) \
-(struct ip_set_counter *)((void *)(e) + (m)->offset[IPSET_OFFSET_COUNTER])
+#define list_set_elem(set, map, id) \
+ (struct set_elem *)((void *)(map)->members + (id) * (set)->dsize)
static int
list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
@@ -92,16 +61,16 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE)
opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE;
for (i = 0; i < map->size; i++) {
- e = list_set_elem(map, i);
+ e = list_set_elem(set, map, i);
if (e->id == IPSET_INVALID_ID)
return 0;
if (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(e, map)))
+ ip_set_timeout_expired(ext_timeout(e, set)))
continue;
ret = ip_set_test(e->id, skb, par, opt);
if (ret > 0) {
if (SET_WITH_COUNTER(set))
- ip_set_update_counter(ext_counter(e, map),
+ ip_set_update_counter(ext_counter(e, set),
ext, &opt->ext,
cmdflags);
return ret;
@@ -121,11 +90,11 @@ list_set_kadd(struct ip_set *set, const struct sk_buff *skb,
int ret;
for (i = 0; i < map->size; i++) {
- e = list_set_elem(map, i);
+ e = list_set_elem(set, map, i);
if (e->id == IPSET_INVALID_ID)
return 0;
if (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(e, map)))
+ ip_set_timeout_expired(ext_timeout(e, set)))
continue;
ret = ip_set_add(e->id, skb, par, opt);
if (ret == 0)
@@ -145,11 +114,11 @@ list_set_kdel(struct ip_set *set, const struct sk_buff *skb,
int ret;
for (i = 0; i < map->size; i++) {
- e = list_set_elem(map, i);
+ e = list_set_elem(set, map, i);
if (e->id == IPSET_INVALID_ID)
return 0;
if (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(e, map)))
+ ip_set_timeout_expired(ext_timeout(e, set)))
continue;
ret = ip_set_del(e->id, skb, par, opt);
if (ret == 0)
@@ -163,8 +132,7 @@ list_set_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
- struct list_set *map = set->data;
- struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, map);
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
switch (adt) {
case IPSET_TEST:
@@ -188,10 +156,10 @@ id_eq(const struct ip_set *set, u32 i, ip_set_id_t id)
if (i >= map->size)
return 0;
- e = list_set_elem(map, i);
+ e = list_set_elem(set, map, i);
return !!(e->id == id &&
!(SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(e, map))));
+ ip_set_timeout_expired(ext_timeout(e, set))));
}
static int
@@ -199,28 +167,36 @@ list_set_add(struct ip_set *set, u32 i, struct set_adt_elem *d,
const struct ip_set_ext *ext)
{
struct list_set *map = set->data;
- struct set_elem *e = list_set_elem(map, i);
+ struct set_elem *e = list_set_elem(set, map, i);
if (e->id != IPSET_INVALID_ID) {
- if (i == map->size - 1)
+ if (i == map->size - 1) {
/* Last element replaced: e.g. add new,before,last */
- ip_set_put_byindex(e->id);
- else {
- struct set_elem *x = list_set_elem(map, map->size - 1);
+ ip_set_put_byindex(map->net, e->id);
+ ip_set_ext_destroy(set, e);
+ } else {
+ struct set_elem *x = list_set_elem(set, map,
+ map->size - 1);
/* Last element pushed off */
- if (x->id != IPSET_INVALID_ID)
- ip_set_put_byindex(x->id);
- memmove(list_set_elem(map, i + 1), e,
- map->dsize * (map->size - (i + 1)));
+ if (x->id != IPSET_INVALID_ID) {
+ ip_set_put_byindex(map->net, x->id);
+ ip_set_ext_destroy(set, x);
+ }
+ memmove(list_set_elem(set, map, i + 1), e,
+ set->dsize * (map->size - (i + 1)));
+ /* Extensions must be initialized to zero */
+ memset(e, 0, set->dsize);
}
}
e->id = d->id;
if (SET_WITH_TIMEOUT(set))
- ip_set_timeout_set(ext_timeout(e, map), ext->timeout);
+ ip_set_timeout_set(ext_timeout(e, set), ext->timeout);
if (SET_WITH_COUNTER(set))
- ip_set_init_counter(ext_counter(e, map), ext);
+ ip_set_init_counter(ext_counter(e, set), ext);
+ if (SET_WITH_COMMENT(set))
+ ip_set_init_comment(ext_comment(e, set), ext);
return 0;
}
@@ -228,16 +204,17 @@ static int
list_set_del(struct ip_set *set, u32 i)
{
struct list_set *map = set->data;
- struct set_elem *e = list_set_elem(map, i);
+ struct set_elem *e = list_set_elem(set, map, i);
- ip_set_put_byindex(e->id);
+ ip_set_put_byindex(map->net, e->id);
+ ip_set_ext_destroy(set, e);
if (i < map->size - 1)
- memmove(e, list_set_elem(map, i + 1),
- map->dsize * (map->size - (i + 1)));
+ memmove(e, list_set_elem(set, map, i + 1),
+ set->dsize * (map->size - (i + 1)));
/* Last element */
- e = list_set_elem(map, map->size - 1);
+ e = list_set_elem(set, map, map->size - 1);
e->id = IPSET_INVALID_ID;
return 0;
}
@@ -247,13 +224,16 @@ set_cleanup_entries(struct ip_set *set)
{
struct list_set *map = set->data;
struct set_elem *e;
- u32 i;
+ u32 i = 0;
- for (i = 0; i < map->size; i++) {
- e = list_set_elem(map, i);
+ while (i < map->size) {
+ e = list_set_elem(set, map, i);
if (e->id != IPSET_INVALID_ID &&
- ip_set_timeout_expired(ext_timeout(e, map)))
+ ip_set_timeout_expired(ext_timeout(e, set)))
list_set_del(set, i);
+ /* Check element moved to position i in next loop */
+ else
+ i++;
}
}
@@ -268,11 +248,11 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
int ret;
for (i = 0; i < map->size; i++) {
- e = list_set_elem(map, i);
+ e = list_set_elem(set, map, i);
if (e->id == IPSET_INVALID_ID)
return 0;
else if (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(e, map)))
+ ip_set_timeout_expired(ext_timeout(e, set)))
continue;
else if (e->id != d->id)
continue;
@@ -299,14 +279,14 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
bool flag_exist = flags & IPSET_FLAG_EXIST;
u32 i, ret = 0;
+ if (SET_WITH_TIMEOUT(set))
+ set_cleanup_entries(set);
+
/* Check already added element */
for (i = 0; i < map->size; i++) {
- e = list_set_elem(map, i);
+ e = list_set_elem(set, map, i);
if (e->id == IPSET_INVALID_ID)
goto insert;
- else if (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(e, map)))
- continue;
else if (e->id != d->id)
continue;
@@ -319,18 +299,22 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
/* Can't re-add */
return -IPSET_ERR_EXIST;
/* Update extensions */
+ ip_set_ext_destroy(set, e);
+
if (SET_WITH_TIMEOUT(set))
- ip_set_timeout_set(ext_timeout(e, map), ext->timeout);
+ ip_set_timeout_set(ext_timeout(e, set), ext->timeout);
if (SET_WITH_COUNTER(set))
- ip_set_init_counter(ext_counter(e, map), ext);
+ ip_set_init_counter(ext_counter(e, set), ext);
+ if (SET_WITH_COMMENT(set))
+ ip_set_init_comment(ext_comment(e, set), ext);
/* Set is already added to the list */
- ip_set_put_byindex(d->id);
+ ip_set_put_byindex(map->net, d->id);
return 0;
}
insert:
ret = -IPSET_ERR_LIST_FULL;
for (i = 0; i < map->size && ret == -IPSET_ERR_LIST_FULL; i++) {
- e = list_set_elem(map, i);
+ e = list_set_elem(set, map, i);
if (e->id == IPSET_INVALID_ID)
ret = d->before != 0 ? -IPSET_ERR_REF_EXIST
: list_set_add(set, i, d, ext);
@@ -355,12 +339,12 @@ list_set_udel(struct ip_set *set, void *value, const struct ip_set_ext *ext,
u32 i;
for (i = 0; i < map->size; i++) {
- e = list_set_elem(map, i);
+ e = list_set_elem(set, map, i);
if (e->id == IPSET_INVALID_ID)
return d->before != 0 ? -IPSET_ERR_REF_EXIST
: -IPSET_ERR_EXIST;
else if (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(e, map)))
+ ip_set_timeout_expired(ext_timeout(e, set)))
continue;
else if (e->id != d->id)
continue;
@@ -386,7 +370,7 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
struct list_set *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct set_adt_elem e = { .refid = IPSET_INVALID_ID };
- struct ip_set_ext ext = IP_SET_INIT_UEXT(map);
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
struct ip_set *s;
int ret = 0;
@@ -403,7 +387,7 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
- e.id = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAME]), &s);
+ e.id = ip_set_get_byname(map->net, nla_data(tb[IPSET_ATTR_NAME]), &s);
if (e.id == IPSET_INVALID_ID)
return -IPSET_ERR_NAME;
/* "Loop detection" */
@@ -423,7 +407,8 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
}
if (tb[IPSET_ATTR_NAMEREF]) {
- e.refid = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAMEREF]),
+ e.refid = ip_set_get_byname(map->net,
+ nla_data(tb[IPSET_ATTR_NAMEREF]),
&s);
if (e.refid == IPSET_INVALID_ID) {
ret = -IPSET_ERR_NAMEREF;
@@ -439,9 +424,9 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
finish:
if (e.refid != IPSET_INVALID_ID)
- ip_set_put_byindex(e.refid);
+ ip_set_put_byindex(map->net, e.refid);
if (adt != IPSET_ADD || ret)
- ip_set_put_byindex(e.id);
+ ip_set_put_byindex(map->net, e.id);
return ip_set_eexist(ret, flags) ? 0 : ret;
}
@@ -454,9 +439,10 @@ list_set_flush(struct ip_set *set)
u32 i;
for (i = 0; i < map->size; i++) {
- e = list_set_elem(map, i);
+ e = list_set_elem(set, map, i);
if (e->id != IPSET_INVALID_ID) {
- ip_set_put_byindex(e->id);
+ ip_set_put_byindex(map->net, e->id);
+ ip_set_ext_destroy(set, e);
e->id = IPSET_INVALID_ID;
}
}
@@ -485,14 +471,11 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
if (!nested)
goto nla_put_failure;
if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
- (SET_WITH_TIMEOUT(set) &&
- nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))) ||
- (SET_WITH_COUNTER(set) &&
- nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS,
- htonl(IPSET_FLAG_WITH_COUNTERS))) ||
nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
- htonl(sizeof(*map) + map->size * map->dsize)))
+ htonl(sizeof(*map) + map->size * set->dsize)))
+ goto nla_put_failure;
+ if (unlikely(ip_set_put_flags(skb, set)))
goto nla_put_failure;
ipset_nest_end(skb, nested);
@@ -515,11 +498,11 @@ list_set_list(const struct ip_set *set,
return -EMSGSIZE;
for (; cb->args[2] < map->size; cb->args[2]++) {
i = cb->args[2];
- e = list_set_elem(map, i);
+ e = list_set_elem(set, map, i);
if (e->id == IPSET_INVALID_ID)
goto finish;
if (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(e, map)))
+ ip_set_timeout_expired(ext_timeout(e, set)))
continue;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) {
@@ -530,15 +513,9 @@ list_set_list(const struct ip_set *set,
goto nla_put_failure;
}
if (nla_put_string(skb, IPSET_ATTR_NAME,
- ip_set_name_byindex(e->id)))
- goto nla_put_failure;
- if (SET_WITH_TIMEOUT(set) &&
- nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
- htonl(ip_set_timeout_get(
- ext_timeout(e, map)))))
+ ip_set_name_byindex(map->net, e->id)))
goto nla_put_failure;
- if (SET_WITH_COUNTER(set) &&
- ip_set_put_counter(skb, ext_counter(e, map)))
+ if (ip_set_put_extensions(skb, set, e, true))
goto nla_put_failure;
ipset_nest_end(skb, nested);
}
@@ -550,11 +527,11 @@ finish:
nla_put_failure:
nla_nest_cancel(skb, nested);
- ipset_nest_end(skb, atd);
if (unlikely(i == first)) {
cb->args[2] = 0;
return -EMSGSIZE;
}
+ ipset_nest_end(skb, atd);
return 0;
}
@@ -565,7 +542,7 @@ list_set_same_set(const struct ip_set *a, const struct ip_set *b)
const struct list_set *y = b->data;
return x->size == y->size &&
- x->timeout == y->timeout &&
+ a->timeout == b->timeout &&
a->extensions == b->extensions;
}
@@ -594,7 +571,7 @@ list_set_gc(unsigned long ul_set)
set_cleanup_entries(set);
write_unlock_bh(&set->lock);
- map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
add_timer(&map->gc);
}
@@ -606,43 +583,40 @@ list_set_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
init_timer(&map->gc);
map->gc.data = (unsigned long) set;
map->gc.function = gc;
- map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
add_timer(&map->gc);
}
/* Create list:set type of sets */
-static struct list_set *
-init_list_set(struct ip_set *set, u32 size, size_t dsize,
- unsigned long timeout)
+static bool
+init_list_set(struct net *net, struct ip_set *set, u32 size)
{
struct list_set *map;
struct set_elem *e;
u32 i;
- map = kzalloc(sizeof(*map) + size * dsize, GFP_KERNEL);
+ map = kzalloc(sizeof(*map) + size * set->dsize, GFP_KERNEL);
if (!map)
- return NULL;
+ return false;
map->size = size;
- map->dsize = dsize;
- map->timeout = timeout;
+ map->net = net;
set->data = map;
for (i = 0; i < size; i++) {
- e = list_set_elem(map, i);
+ e = list_set_elem(set, map, i);
e->id = IPSET_INVALID_ID;
}
- return map;
+ return true;
}
static int
-list_set_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
+ u32 flags)
{
- struct list_set *map;
- u32 size = IP_SET_LIST_DEFAULT_SIZE, cadt_flags = 0;
- unsigned long timeout = 0;
+ u32 size = IP_SET_LIST_DEFAULT_SIZE;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_SIZE) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
@@ -654,45 +628,13 @@ list_set_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
if (size < IP_SET_LIST_MIN_SIZE)
size = IP_SET_LIST_MIN_SIZE;
- if (tb[IPSET_ATTR_CADT_FLAGS])
- cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
- if (tb[IPSET_ATTR_TIMEOUT])
- timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = &set_variant;
- if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
- set->extensions |= IPSET_EXT_COUNTER;
- if (tb[IPSET_ATTR_TIMEOUT]) {
- map = init_list_set(set, size,
- sizeof(struct setct_elem), timeout);
- if (!map)
- return -ENOMEM;
- set->extensions |= IPSET_EXT_TIMEOUT;
- map->offset[IPSET_OFFSET_TIMEOUT] =
- offsetof(struct setct_elem, timeout);
- map->offset[IPSET_OFFSET_COUNTER] =
- offsetof(struct setct_elem, counter);
- list_set_gc_init(set, list_set_gc);
- } else {
- map = init_list_set(set, size,
- sizeof(struct setc_elem), 0);
- if (!map)
- return -ENOMEM;
- map->offset[IPSET_OFFSET_COUNTER] =
- offsetof(struct setc_elem, counter);
- }
- } else if (tb[IPSET_ATTR_TIMEOUT]) {
- map = init_list_set(set, size,
- sizeof(struct sett_elem), timeout);
- if (!map)
- return -ENOMEM;
- set->extensions |= IPSET_EXT_TIMEOUT;
- map->offset[IPSET_OFFSET_TIMEOUT] =
- offsetof(struct sett_elem, timeout);
+ set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem));
+ if (!init_list_set(net, set, size))
+ return -ENOMEM;
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
list_set_gc_init(set, list_set_gc);
- } else {
- map = init_list_set(set, size, sizeof(struct set_elem), 0);
- if (!map)
- return -ENOMEM;
}
return 0;
}
@@ -703,8 +645,8 @@ static struct ip_set_type list_set_type __read_mostly = {
.features = IPSET_TYPE_NAME | IPSET_DUMP_LAST,
.dimension = IPSET_DIM_ONE,
.family = NFPROTO_UNSPEC,
- .revision_min = REVISION_MIN,
- .revision_max = REVISION_MAX,
+ .revision_min = IPSET_TYPE_REV_MIN,
+ .revision_max = IPSET_TYPE_REV_MAX,
.create = list_set_create,
.create_policy = {
[IPSET_ATTR_SIZE] = { .type = NLA_U32 },
@@ -721,6 +663,7 @@ static struct ip_set_type list_set_type __read_mostly = {
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 74fd00c27210..34fda62f40f6 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1239,11 +1239,11 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
* Check if packet is reply for established ip_vs_conn.
*/
static unsigned int
-ip_vs_reply4(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- return ip_vs_out(hooknum, skb, AF_INET);
+ return ip_vs_out(ops->hooknum, skb, AF_INET);
}
/*
@@ -1251,11 +1251,11 @@ ip_vs_reply4(unsigned int hooknum, struct sk_buff *skb,
* Check if packet is reply for established ip_vs_conn.
*/
static unsigned int
-ip_vs_local_reply4(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_local_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- return ip_vs_out(hooknum, skb, AF_INET);
+ return ip_vs_out(ops->hooknum, skb, AF_INET);
}
#ifdef CONFIG_IP_VS_IPV6
@@ -1266,11 +1266,11 @@ ip_vs_local_reply4(unsigned int hooknum, struct sk_buff *skb,
* Check if packet is reply for established ip_vs_conn.
*/
static unsigned int
-ip_vs_reply6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- return ip_vs_out(hooknum, skb, AF_INET6);
+ return ip_vs_out(ops->hooknum, skb, AF_INET6);
}
/*
@@ -1278,11 +1278,11 @@ ip_vs_reply6(unsigned int hooknum, struct sk_buff *skb,
* Check if packet is reply for established ip_vs_conn.
*/
static unsigned int
-ip_vs_local_reply6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_local_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- return ip_vs_out(hooknum, skb, AF_INET6);
+ return ip_vs_out(ops->hooknum, skb, AF_INET6);
}
#endif
@@ -1733,12 +1733,12 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
* Schedule and forward packets from remote clients
*/
static unsigned int
-ip_vs_remote_request4(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_remote_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- return ip_vs_in(hooknum, skb, AF_INET);
+ return ip_vs_in(ops->hooknum, skb, AF_INET);
}
/*
@@ -1746,11 +1746,11 @@ ip_vs_remote_request4(unsigned int hooknum, struct sk_buff *skb,
* Schedule and forward packets from local clients
*/
static unsigned int
-ip_vs_local_request4(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_local_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- return ip_vs_in(hooknum, skb, AF_INET);
+ return ip_vs_in(ops->hooknum, skb, AF_INET);
}
#ifdef CONFIG_IP_VS_IPV6
@@ -1760,7 +1760,7 @@ ip_vs_local_request4(unsigned int hooknum, struct sk_buff *skb,
* Copy info from first fragment, to the rest of them.
*/
static unsigned int
-ip_vs_preroute_frag6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_preroute_frag6(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
@@ -1792,12 +1792,12 @@ ip_vs_preroute_frag6(unsigned int hooknum, struct sk_buff *skb,
* Schedule and forward packets from remote clients
*/
static unsigned int
-ip_vs_remote_request6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_remote_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- return ip_vs_in(hooknum, skb, AF_INET6);
+ return ip_vs_in(ops->hooknum, skb, AF_INET6);
}
/*
@@ -1805,11 +1805,11 @@ ip_vs_remote_request6(unsigned int hooknum, struct sk_buff *skb,
* Schedule and forward packets from local clients
*/
static unsigned int
-ip_vs_local_request6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_local_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- return ip_vs_in(hooknum, skb, AF_INET6);
+ return ip_vs_in(ops->hooknum, skb, AF_INET6);
}
#endif
@@ -1825,7 +1825,7 @@ ip_vs_local_request6(unsigned int hooknum, struct sk_buff *skb,
* and send them to ip_vs_in_icmp.
*/
static unsigned int
-ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_forward_icmp(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
@@ -1842,12 +1842,12 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
return NF_ACCEPT;
- return ip_vs_in_icmp(skb, &r, hooknum);
+ return ip_vs_in_icmp(skb, &r, ops->hooknum);
}
#ifdef CONFIG_IP_VS_IPV6
static unsigned int
-ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_forward_icmp_v6(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
@@ -1866,7 +1866,7 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
return NF_ACCEPT;
- return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr);
+ return ip_vs_in_icmp_v6(skb, &r, ops->hooknum, &iphdr);
}
#endif
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index f4484719f3e6..f63c2388f38d 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1637,12 +1637,9 @@ static int sync_thread_master(void *data)
continue;
}
while (ip_vs_send_sync_msg(tinfo->sock, sb->mesg) < 0) {
- int ret = 0;
-
- __wait_event_interruptible(*sk_sleep(sk),
+ int ret = __wait_event_interruptible(*sk_sleep(sk),
sock_writeable(sk) ||
- kthread_should_stop(),
- ret);
+ kthread_should_stop());
if (unlikely(kthread_should_stop()))
goto done;
}
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index bdebd03bc8cd..70866d192efc 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -778,8 +778,8 @@ static int callforward_do_filter(const union nf_inet_addr *src,
flowi6_to_flowi(&fl1), false)) {
if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
flowi6_to_flowi(&fl2), false)) {
- if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
- sizeof(rt1->rt6i_gateway)) &&
+ if (ipv6_addr_equal(rt6_nexthop(rt1),
+ rt6_nexthop(rt2)) &&
rt1->dst.dev == rt2->dst.dev)
ret = 1;
dst_release(&rt2->dst);
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index e0c4373b4747..466410eaa482 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -52,66 +52,8 @@ module_param(sip_direct_media, int, 0600);
MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling "
"endpoints only (default 1)");
-unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int protoff,
- unsigned int dataoff, const char **dptr,
- unsigned int *datalen) __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sip_hook);
-
-void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, unsigned int protoff,
- s16 off) __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sip_seq_adjust_hook);
-
-unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
- unsigned int protoff,
- unsigned int dataoff,
- const char **dptr,
- unsigned int *datalen,
- struct nf_conntrack_expect *exp,
- unsigned int matchoff,
- unsigned int matchlen) __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook);
-
-unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int protoff,
- unsigned int dataoff,
- const char **dptr,
- unsigned int *datalen,
- unsigned int sdpoff,
- enum sdp_header_types type,
- enum sdp_header_types term,
- const union nf_inet_addr *addr)
- __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook);
-
-unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int protoff,
- unsigned int dataoff,
- const char **dptr,
- unsigned int *datalen,
- unsigned int matchoff,
- unsigned int matchlen,
- u_int16_t port) __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook);
-
-unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
- unsigned int protoff,
- unsigned int dataoff,
- const char **dptr,
- unsigned int *datalen,
- unsigned int sdpoff,
- const union nf_inet_addr *addr)
- __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook);
-
-unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, unsigned int protoff,
- unsigned int dataoff,
- const char **dptr,
- unsigned int *datalen,
- struct nf_conntrack_expect *rtp_exp,
- struct nf_conntrack_expect *rtcp_exp,
- unsigned int mediaoff,
- unsigned int medialen,
- union nf_inet_addr *rtp_addr)
- __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sdp_media_hook);
+const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
+EXPORT_SYMBOL_GPL(nf_nat_sip_hooks);
static int string_len(const struct nf_conn *ct, const char *dptr,
const char *limit, int *shift)
@@ -914,8 +856,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
int direct_rtp = 0, skip_expect = 0, ret = NF_DROP;
u_int16_t base_port;
__be16 rtp_port, rtcp_port;
- typeof(nf_nat_sdp_port_hook) nf_nat_sdp_port;
- typeof(nf_nat_sdp_media_hook) nf_nat_sdp_media;
+ const struct nf_nat_sip_hooks *hooks;
saddr = NULL;
if (sip_direct_media) {
@@ -966,22 +907,23 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
#endif
skip_expect = 1;
} while (!skip_expect);
- rcu_read_unlock();
base_port = ntohs(tuple.dst.u.udp.port) & ~1;
rtp_port = htons(base_port);
rtcp_port = htons(base_port + 1);
if (direct_rtp) {
- nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook);
- if (nf_nat_sdp_port &&
- !nf_nat_sdp_port(skb, protoff, dataoff, dptr, datalen,
+ hooks = rcu_dereference(nf_nat_sip_hooks);
+ if (hooks &&
+ !hooks->sdp_port(skb, protoff, dataoff, dptr, datalen,
mediaoff, medialen, ntohs(rtp_port)))
goto err1;
}
- if (skip_expect)
+ if (skip_expect) {
+ rcu_read_unlock();
return NF_ACCEPT;
+ }
rtp_exp = nf_ct_expect_alloc(ct);
if (rtp_exp == NULL)
@@ -995,10 +937,10 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
nf_ct_expect_init(rtcp_exp, class, nf_ct_l3num(ct), saddr, daddr,
IPPROTO_UDP, NULL, &rtcp_port);
- nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook);
- if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp)
- ret = nf_nat_sdp_media(skb, protoff, dataoff, dptr, datalen,
- rtp_exp, rtcp_exp,
+ hooks = rcu_dereference(nf_nat_sip_hooks);
+ if (hooks && ct->status & IPS_NAT_MASK && !direct_rtp)
+ ret = hooks->sdp_media(skb, protoff, dataoff, dptr,
+ datalen, rtp_exp, rtcp_exp,
mediaoff, medialen, daddr);
else {
if (nf_ct_expect_related(rtp_exp) == 0) {
@@ -1012,6 +954,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
err2:
nf_ct_expect_put(rtp_exp);
err1:
+ rcu_read_unlock();
return ret;
}
@@ -1051,13 +994,12 @@ static int process_sdp(struct sk_buff *skb, unsigned int protoff,
unsigned int caddr_len, maddr_len;
unsigned int i;
union nf_inet_addr caddr, maddr, rtp_addr;
+ const struct nf_nat_sip_hooks *hooks;
unsigned int port;
const struct sdp_media_type *t;
int ret = NF_ACCEPT;
- typeof(nf_nat_sdp_addr_hook) nf_nat_sdp_addr;
- typeof(nf_nat_sdp_session_hook) nf_nat_sdp_session;
- nf_nat_sdp_addr = rcu_dereference(nf_nat_sdp_addr_hook);
+ hooks = rcu_dereference(nf_nat_sip_hooks);
/* Find beginning of session description */
if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen,
@@ -1125,10 +1067,11 @@ static int process_sdp(struct sk_buff *skb, unsigned int protoff,
}
/* Update media connection address if present */
- if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) {
- ret = nf_nat_sdp_addr(skb, protoff, dataoff,
+ if (maddr_len && hooks && ct->status & IPS_NAT_MASK) {
+ ret = hooks->sdp_addr(skb, protoff, dataoff,
dptr, datalen, mediaoff,
- SDP_HDR_CONNECTION, SDP_HDR_MEDIA,
+ SDP_HDR_CONNECTION,
+ SDP_HDR_MEDIA,
&rtp_addr);
if (ret != NF_ACCEPT) {
nf_ct_helper_log(skb, ct, "cannot mangle SDP");
@@ -1139,10 +1082,11 @@ static int process_sdp(struct sk_buff *skb, unsigned int protoff,
}
/* Update session connection and owner addresses */
- nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook);
- if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK)
- ret = nf_nat_sdp_session(skb, protoff, dataoff,
- dptr, datalen, sdpoff, &rtp_addr);
+ hooks = rcu_dereference(nf_nat_sip_hooks);
+ if (hooks && ct->status & IPS_NAT_MASK)
+ ret = hooks->sdp_session(skb, protoff, dataoff,
+ dptr, datalen, sdpoff,
+ &rtp_addr);
return ret;
}
@@ -1242,11 +1186,11 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
unsigned int matchoff, matchlen;
struct nf_conntrack_expect *exp;
union nf_inet_addr *saddr, daddr;
+ const struct nf_nat_sip_hooks *hooks;
__be16 port;
u8 proto;
unsigned int expires = 0;
int ret;
- typeof(nf_nat_sip_expect_hook) nf_nat_sip_expect;
/* Expected connections can not register again. */
if (ct->status & IPS_EXPECTED)
@@ -1309,10 +1253,10 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
exp->helper = nfct_help(ct)->helper;
exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE;
- nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook);
- if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK)
- ret = nf_nat_sip_expect(skb, protoff, dataoff, dptr, datalen,
- exp, matchoff, matchlen);
+ hooks = rcu_dereference(nf_nat_sip_hooks);
+ if (hooks && ct->status & IPS_NAT_MASK)
+ ret = hooks->expect(skb, protoff, dataoff, dptr, datalen,
+ exp, matchoff, matchlen);
else {
if (nf_ct_expect_related(exp) != 0) {
nf_ct_helper_log(skb, ct, "cannot add expectation");
@@ -1515,7 +1459,7 @@ static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct,
unsigned int protoff, unsigned int dataoff,
const char **dptr, unsigned int *datalen)
{
- typeof(nf_nat_sip_hook) nf_nat_sip;
+ const struct nf_nat_sip_hooks *hooks;
int ret;
if (strnicmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0)
@@ -1524,9 +1468,9 @@ static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct,
ret = process_sip_response(skb, protoff, dataoff, dptr, datalen);
if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
- nf_nat_sip = rcu_dereference(nf_nat_sip_hook);
- if (nf_nat_sip && !nf_nat_sip(skb, protoff, dataoff,
- dptr, datalen)) {
+ hooks = rcu_dereference(nf_nat_sip_hooks);
+ if (hooks && !hooks->msg(skb, protoff, dataoff,
+ dptr, datalen)) {
nf_ct_helper_log(skb, ct, "cannot NAT SIP message");
ret = NF_DROP;
}
@@ -1546,7 +1490,6 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
s16 diff, tdiff = 0;
int ret = NF_ACCEPT;
bool term;
- typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
if (ctinfo != IP_CT_ESTABLISHED &&
ctinfo != IP_CT_ESTABLISHED_REPLY)
@@ -1610,9 +1553,11 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
}
if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
- nf_nat_sip_seq_adjust = rcu_dereference(nf_nat_sip_seq_adjust_hook);
- if (nf_nat_sip_seq_adjust)
- nf_nat_sip_seq_adjust(skb, protoff, tdiff);
+ const struct nf_nat_sip_hooks *hooks;
+
+ hooks = rcu_dereference(nf_nat_sip_hooks);
+ if (hooks)
+ hooks->seq_adjust(skb, protoff, tdiff);
}
return ret;
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index 3deec997be89..61a3c927e63c 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -13,26 +13,20 @@
/* core.c */
-extern unsigned int nf_iterate(struct list_head *head,
- struct sk_buff *skb,
- unsigned int hook,
- const struct net_device *indev,
- const struct net_device *outdev,
- struct nf_hook_ops **elemp,
- int (*okfn)(struct sk_buff *),
- int hook_thresh);
+unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb,
+ unsigned int hook, const struct net_device *indev,
+ const struct net_device *outdev,
+ struct nf_hook_ops **elemp,
+ int (*okfn)(struct sk_buff *), int hook_thresh);
/* nf_queue.c */
-extern int nf_queue(struct sk_buff *skb,
- struct nf_hook_ops *elem,
- u_int8_t pf, unsigned int hook,
- struct net_device *indev,
- struct net_device *outdev,
- int (*okfn)(struct sk_buff *),
- unsigned int queuenum);
-extern int __init netfilter_queue_init(void);
+int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem, u_int8_t pf,
+ unsigned int hook, struct net_device *indev,
+ struct net_device *outdev, int (*okfn)(struct sk_buff *),
+ unsigned int queuenum);
+int __init netfilter_queue_init(void);
/* nf_log.c */
-extern int __init netfilter_log_init(void);
+int __init netfilter_log_init(void);
#endif
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 6f0f4f7f68a5..63a815402211 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -432,6 +432,26 @@ nf_nat_setup_info(struct nf_conn *ct,
}
EXPORT_SYMBOL(nf_nat_setup_info);
+unsigned int
+nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
+{
+ /* Force range to this IP; let proto decide mapping for
+ * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
+ * Use reply in case it's already been mangled (eg local packet).
+ */
+ union nf_inet_addr ip =
+ (HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
+ ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
+ ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
+ struct nf_nat_range range = {
+ .flags = NF_NAT_RANGE_MAP_IPS,
+ .min_addr = ip,
+ .max_addr = ip,
+ };
+ return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
+}
+EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
+
/* Do packet manipulations according to nf_nat_setup_info. */
unsigned int nf_nat_packet(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c
index f9790405b7ff..b4d691db955e 100644
--- a/net/netfilter/nf_nat_sip.c
+++ b/net/netfilter/nf_nat_sip.c
@@ -625,33 +625,26 @@ static struct nf_ct_helper_expectfn sip_nat = {
static void __exit nf_nat_sip_fini(void)
{
- RCU_INIT_POINTER(nf_nat_sip_hook, NULL);
- RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, NULL);
- RCU_INIT_POINTER(nf_nat_sip_expect_hook, NULL);
- RCU_INIT_POINTER(nf_nat_sdp_addr_hook, NULL);
- RCU_INIT_POINTER(nf_nat_sdp_port_hook, NULL);
- RCU_INIT_POINTER(nf_nat_sdp_session_hook, NULL);
- RCU_INIT_POINTER(nf_nat_sdp_media_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_sip_hooks, NULL);
+
nf_ct_helper_expectfn_unregister(&sip_nat);
synchronize_rcu();
}
+static const struct nf_nat_sip_hooks sip_hooks = {
+ .msg = nf_nat_sip,
+ .seq_adjust = nf_nat_sip_seq_adjust,
+ .expect = nf_nat_sip_expect,
+ .sdp_addr = nf_nat_sdp_addr,
+ .sdp_port = nf_nat_sdp_port,
+ .sdp_session = nf_nat_sdp_session,
+ .sdp_media = nf_nat_sdp_media,
+};
+
static int __init nf_nat_sip_init(void)
{
- BUG_ON(nf_nat_sip_hook != NULL);
- BUG_ON(nf_nat_sip_seq_adjust_hook != NULL);
- BUG_ON(nf_nat_sip_expect_hook != NULL);
- BUG_ON(nf_nat_sdp_addr_hook != NULL);
- BUG_ON(nf_nat_sdp_port_hook != NULL);
- BUG_ON(nf_nat_sdp_session_hook != NULL);
- BUG_ON(nf_nat_sdp_media_hook != NULL);
- RCU_INIT_POINTER(nf_nat_sip_hook, nf_nat_sip);
- RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, nf_nat_sip_seq_adjust);
- RCU_INIT_POINTER(nf_nat_sip_expect_hook, nf_nat_sip_expect);
- RCU_INIT_POINTER(nf_nat_sdp_addr_hook, nf_nat_sdp_addr);
- RCU_INIT_POINTER(nf_nat_sdp_port_hook, nf_nat_sdp_port);
- RCU_INIT_POINTER(nf_nat_sdp_session_hook, nf_nat_sdp_session);
- RCU_INIT_POINTER(nf_nat_sdp_media_hook, nf_nat_sdp_media);
+ BUG_ON(nf_nat_sip_hooks != NULL);
+ RCU_INIT_POINTER(nf_nat_sip_hooks, &sip_hooks);
nf_ct_helper_expectfn_register(&sip_nat);
return 0;
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
new file mode 100644
index 000000000000..dcddc49c0e08
--- /dev/null
+++ b/net/netfilter/nf_tables_api.c
@@ -0,0 +1,3275 @@
+/*
+ * Copyright (c) 2007-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+
+static LIST_HEAD(nf_tables_expressions);
+
+/**
+ * nft_register_afinfo - register nf_tables address family info
+ *
+ * @afi: address family info to register
+ *
+ * Register the address family for use with nf_tables. Returns zero on
+ * success or a negative errno code otherwise.
+ */
+int nft_register_afinfo(struct net *net, struct nft_af_info *afi)
+{
+ INIT_LIST_HEAD(&afi->tables);
+ nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ list_add_tail(&afi->list, &net->nft.af_info);
+ nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nft_register_afinfo);
+
+/**
+ * nft_unregister_afinfo - unregister nf_tables address family info
+ *
+ * @afi: address family info to unregister
+ *
+ * Unregister the address family for use with nf_tables.
+ */
+void nft_unregister_afinfo(struct nft_af_info *afi)
+{
+ nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ list_del(&afi->list);
+ nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_unregister_afinfo);
+
+static struct nft_af_info *nft_afinfo_lookup(struct net *net, int family)
+{
+ struct nft_af_info *afi;
+
+ list_for_each_entry(afi, &net->nft.af_info, list) {
+ if (afi->family == family)
+ return afi;
+ }
+ return NULL;
+}
+
+static struct nft_af_info *
+nf_tables_afinfo_lookup(struct net *net, int family, bool autoload)
+{
+ struct nft_af_info *afi;
+
+ afi = nft_afinfo_lookup(net, family);
+ if (afi != NULL)
+ return afi;
+#ifdef CONFIG_MODULES
+ if (autoload) {
+ nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+ request_module("nft-afinfo-%u", family);
+ nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ afi = nft_afinfo_lookup(net, family);
+ if (afi != NULL)
+ return ERR_PTR(-EAGAIN);
+ }
+#endif
+ return ERR_PTR(-EAFNOSUPPORT);
+}
+
+/*
+ * Tables
+ */
+
+static struct nft_table *nft_table_lookup(const struct nft_af_info *afi,
+ const struct nlattr *nla)
+{
+ struct nft_table *table;
+
+ list_for_each_entry(table, &afi->tables, list) {
+ if (!nla_strcmp(nla, table->name))
+ return table;
+ }
+ return NULL;
+}
+
+static struct nft_table *nf_tables_table_lookup(const struct nft_af_info *afi,
+ const struct nlattr *nla)
+{
+ struct nft_table *table;
+
+ if (nla == NULL)
+ return ERR_PTR(-EINVAL);
+
+ table = nft_table_lookup(afi, nla);
+ if (table != NULL)
+ return table;
+
+ return ERR_PTR(-ENOENT);
+}
+
+static inline u64 nf_tables_alloc_handle(struct nft_table *table)
+{
+ return ++table->hgenerator;
+}
+
+static struct nf_chain_type *chain_type[AF_MAX][NFT_CHAIN_T_MAX];
+
+static int __nf_tables_chain_type_lookup(int family, const struct nlattr *nla)
+{
+ int i;
+
+ for (i=0; i<NFT_CHAIN_T_MAX; i++) {
+ if (chain_type[family][i] != NULL &&
+ !nla_strcmp(nla, chain_type[family][i]->name))
+ return i;
+ }
+ return -1;
+}
+
+static int nf_tables_chain_type_lookup(const struct nft_af_info *afi,
+ const struct nlattr *nla,
+ bool autoload)
+{
+ int type;
+
+ type = __nf_tables_chain_type_lookup(afi->family, nla);
+#ifdef CONFIG_MODULES
+ if (type < 0 && autoload) {
+ nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+ request_module("nft-chain-%u-%*.s", afi->family,
+ nla_len(nla)-1, (const char *)nla_data(nla));
+ nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ type = __nf_tables_chain_type_lookup(afi->family, nla);
+ }
+#endif
+ return type;
+}
+
+static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
+ [NFTA_TABLE_NAME] = { .type = NLA_STRING },
+ [NFTA_TABLE_FLAGS] = { .type = NLA_U32 },
+};
+
+static int nf_tables_fill_table_info(struct sk_buff *skb, u32 portid, u32 seq,
+ int event, u32 flags, int family,
+ const struct nft_table *table)
+{
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfmsg;
+
+ event |= NFNL_SUBSYS_NFTABLES << 8;
+ nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags);
+ if (nlh == NULL)
+ goto nla_put_failure;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = family;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = 0;
+
+ if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) ||
+ nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)))
+ goto nla_put_failure;
+
+ return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+ nlmsg_trim(skb, nlh);
+ return -1;
+}
+
+static int nf_tables_table_notify(const struct sk_buff *oskb,
+ const struct nlmsghdr *nlh,
+ const struct nft_table *table,
+ int event, int family)
+{
+ struct sk_buff *skb;
+ u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
+ u32 seq = nlh ? nlh->nlmsg_seq : 0;
+ struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
+ bool report;
+ int err;
+
+ report = nlh ? nlmsg_report(nlh) : false;
+ if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+ return 0;
+
+ err = -ENOBUFS;
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (skb == NULL)
+ goto err;
+
+ err = nf_tables_fill_table_info(skb, portid, seq, event, 0,
+ family, table);
+ if (err < 0) {
+ kfree_skb(skb);
+ goto err;
+ }
+
+ err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report,
+ GFP_KERNEL);
+err:
+ if (err < 0)
+ nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
+ return err;
+}
+
+static int nf_tables_dump_tables(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+ const struct nft_af_info *afi;
+ const struct nft_table *table;
+ unsigned int idx = 0, s_idx = cb->args[0];
+ struct net *net = sock_net(skb->sk);
+ int family = nfmsg->nfgen_family;
+
+ list_for_each_entry(afi, &net->nft.af_info, list) {
+ if (family != NFPROTO_UNSPEC && family != afi->family)
+ continue;
+
+ list_for_each_entry(table, &afi->tables, list) {
+ if (idx < s_idx)
+ goto cont;
+ if (idx > s_idx)
+ memset(&cb->args[1], 0,
+ sizeof(cb->args) - sizeof(cb->args[0]));
+ if (nf_tables_fill_table_info(skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NFT_MSG_NEWTABLE,
+ NLM_F_MULTI,
+ afi->family, table) < 0)
+ goto done;
+cont:
+ idx++;
+ }
+ }
+done:
+ cb->args[0] = idx;
+ return skb->len;
+}
+
+static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[])
+{
+ const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+ const struct nft_af_info *afi;
+ const struct nft_table *table;
+ struct sk_buff *skb2;
+ struct net *net = sock_net(skb->sk);
+ int family = nfmsg->nfgen_family;
+ int err;
+
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .dump = nf_tables_dump_tables,
+ };
+ return netlink_dump_start(nlsk, skb, nlh, &c);
+ }
+
+ afi = nf_tables_afinfo_lookup(net, family, false);
+ if (IS_ERR(afi))
+ return PTR_ERR(afi);
+
+ table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]);
+ if (IS_ERR(table))
+ return PTR_ERR(table);
+
+ skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb2)
+ return -ENOMEM;
+
+ err = nf_tables_fill_table_info(skb2, NETLINK_CB(skb).portid,
+ nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0,
+ family, table);
+ if (err < 0)
+ goto err;
+
+ return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+
+err:
+ kfree_skb(skb2);
+ return err;
+}
+
+static int nf_tables_table_enable(struct nft_table *table)
+{
+ struct nft_chain *chain;
+ int err, i = 0;
+
+ list_for_each_entry(chain, &table->chains, list) {
+ err = nf_register_hook(&nft_base_chain(chain)->ops);
+ if (err < 0)
+ goto err;
+
+ i++;
+ }
+ return 0;
+err:
+ list_for_each_entry(chain, &table->chains, list) {
+ if (i-- <= 0)
+ break;
+
+ nf_unregister_hook(&nft_base_chain(chain)->ops);
+ }
+ return err;
+}
+
+static int nf_tables_table_disable(struct nft_table *table)
+{
+ struct nft_chain *chain;
+
+ list_for_each_entry(chain, &table->chains, list)
+ nf_unregister_hook(&nft_base_chain(chain)->ops);
+
+ return 0;
+}
+
+static int nf_tables_updtable(struct sock *nlsk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[],
+ struct nft_af_info *afi, struct nft_table *table)
+{
+ const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+ int family = nfmsg->nfgen_family, ret = 0;
+
+ if (nla[NFTA_TABLE_FLAGS]) {
+ __be32 flags;
+
+ flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS]));
+ if (flags & ~NFT_TABLE_F_DORMANT)
+ return -EINVAL;
+
+ if ((flags & NFT_TABLE_F_DORMANT) &&
+ !(table->flags & NFT_TABLE_F_DORMANT)) {
+ ret = nf_tables_table_disable(table);
+ if (ret >= 0)
+ table->flags |= NFT_TABLE_F_DORMANT;
+ } else if (!(flags & NFT_TABLE_F_DORMANT) &&
+ table->flags & NFT_TABLE_F_DORMANT) {
+ ret = nf_tables_table_enable(table);
+ if (ret >= 0)
+ table->flags &= ~NFT_TABLE_F_DORMANT;
+ }
+ if (ret < 0)
+ goto err;
+ }
+
+ nf_tables_table_notify(skb, nlh, table, NFT_MSG_NEWTABLE, family);
+err:
+ return ret;
+}
+
+static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[])
+{
+ const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+ const struct nlattr *name;
+ struct nft_af_info *afi;
+ struct nft_table *table;
+ struct net *net = sock_net(skb->sk);
+ int family = nfmsg->nfgen_family;
+
+ afi = nf_tables_afinfo_lookup(net, family, true);
+ if (IS_ERR(afi))
+ return PTR_ERR(afi);
+
+ name = nla[NFTA_TABLE_NAME];
+ table = nf_tables_table_lookup(afi, name);
+ if (IS_ERR(table)) {
+ if (PTR_ERR(table) != -ENOENT)
+ return PTR_ERR(table);
+ table = NULL;
+ }
+
+ if (table != NULL) {
+ if (nlh->nlmsg_flags & NLM_F_EXCL)
+ return -EEXIST;
+ if (nlh->nlmsg_flags & NLM_F_REPLACE)
+ return -EOPNOTSUPP;
+ return nf_tables_updtable(nlsk, skb, nlh, nla, afi, table);
+ }
+
+ table = kzalloc(sizeof(*table) + nla_len(name), GFP_KERNEL);
+ if (table == NULL)
+ return -ENOMEM;
+
+ nla_strlcpy(table->name, name, nla_len(name));
+ INIT_LIST_HEAD(&table->chains);
+ INIT_LIST_HEAD(&table->sets);
+
+ if (nla[NFTA_TABLE_FLAGS]) {
+ __be32 flags;
+
+ flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS]));
+ if (flags & ~NFT_TABLE_F_DORMANT) {
+ kfree(table);
+ return -EINVAL;
+ }
+
+ table->flags |= flags;
+ }
+
+ list_add_tail(&table->list, &afi->tables);
+ nf_tables_table_notify(skb, nlh, table, NFT_MSG_NEWTABLE, family);
+ return 0;
+}
+
+static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[])
+{
+ const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+ struct nft_af_info *afi;
+ struct nft_table *table;
+ struct net *net = sock_net(skb->sk);
+ int family = nfmsg->nfgen_family;
+
+ afi = nf_tables_afinfo_lookup(net, family, false);
+ if (IS_ERR(afi))
+ return PTR_ERR(afi);
+
+ table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]);
+ if (IS_ERR(table))
+ return PTR_ERR(table);
+
+ if (table->use)
+ return -EBUSY;
+
+ list_del(&table->list);
+ nf_tables_table_notify(skb, nlh, table, NFT_MSG_DELTABLE, family);
+ kfree(table);
+ return 0;
+}
+
+int nft_register_chain_type(struct nf_chain_type *ctype)
+{
+ int err = 0;
+
+ nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ if (chain_type[ctype->family][ctype->type] != NULL) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (!try_module_get(ctype->me))
+ goto out;
+
+ chain_type[ctype->family][ctype->type] = ctype;
+out:
+ nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+ return err;
+}
+EXPORT_SYMBOL_GPL(nft_register_chain_type);
+
+void nft_unregister_chain_type(struct nf_chain_type *ctype)
+{
+ nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ chain_type[ctype->family][ctype->type] = NULL;
+ module_put(ctype->me);
+ nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_unregister_chain_type);
+
+/*
+ * Chains
+ */
+
+static struct nft_chain *
+nf_tables_chain_lookup_byhandle(const struct nft_table *table, u64 handle)
+{
+ struct nft_chain *chain;
+
+ list_for_each_entry(chain, &table->chains, list) {
+ if (chain->handle == handle)
+ return chain;
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static struct nft_chain *nf_tables_chain_lookup(const struct nft_table *table,
+ const struct nlattr *nla)
+{
+ struct nft_chain *chain;
+
+ if (nla == NULL)
+ return ERR_PTR(-EINVAL);
+
+ list_for_each_entry(chain, &table->chains, list) {
+ if (!nla_strcmp(nla, chain->name))
+ return chain;
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
+ [NFTA_CHAIN_TABLE] = { .type = NLA_STRING },
+ [NFTA_CHAIN_HANDLE] = { .type = NLA_U64 },
+ [NFTA_CHAIN_NAME] = { .type = NLA_STRING,
+ .len = NFT_CHAIN_MAXNAMELEN - 1 },
+ [NFTA_CHAIN_HOOK] = { .type = NLA_NESTED },
+ [NFTA_CHAIN_POLICY] = { .type = NLA_U32 },
+ [NFTA_CHAIN_TYPE] = { .type = NLA_NUL_STRING },
+ [NFTA_CHAIN_COUNTERS] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy nft_hook_policy[NFTA_HOOK_MAX + 1] = {
+ [NFTA_HOOK_HOOKNUM] = { .type = NLA_U32 },
+ [NFTA_HOOK_PRIORITY] = { .type = NLA_U32 },
+};
+
+static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
+{
+ struct nft_stats *cpu_stats, total;
+ struct nlattr *nest;
+ int cpu;
+
+ memset(&total, 0, sizeof(total));
+ for_each_possible_cpu(cpu) {
+ cpu_stats = per_cpu_ptr(stats, cpu);
+ total.pkts += cpu_stats->pkts;
+ total.bytes += cpu_stats->bytes;
+ }
+ nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS);
+ if (nest == NULL)
+ goto nla_put_failure;
+
+ if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.pkts)) ||
+ nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+nla_put_failure:
+ return -ENOSPC;
+}
+
+static int nf_tables_fill_chain_info(struct sk_buff *skb, u32 portid, u32 seq,
+ int event, u32 flags, int family,
+ const struct nft_table *table,
+ const struct nft_chain *chain)
+{
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfmsg;
+
+ event |= NFNL_SUBSYS_NFTABLES << 8;
+ nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags);
+ if (nlh == NULL)
+ goto nla_put_failure;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = family;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = 0;
+
+ if (nla_put_string(skb, NFTA_CHAIN_TABLE, table->name))
+ goto nla_put_failure;
+ if (nla_put_be64(skb, NFTA_CHAIN_HANDLE, cpu_to_be64(chain->handle)))
+ goto nla_put_failure;
+ if (nla_put_string(skb, NFTA_CHAIN_NAME, chain->name))
+ goto nla_put_failure;
+
+ if (chain->flags & NFT_BASE_CHAIN) {
+ const struct nft_base_chain *basechain = nft_base_chain(chain);
+ const struct nf_hook_ops *ops = &basechain->ops;
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, NFTA_CHAIN_HOOK);
+ if (nest == NULL)
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_HOOK_HOOKNUM, htonl(ops->hooknum)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_HOOK_PRIORITY, htonl(ops->priority)))
+ goto nla_put_failure;
+ nla_nest_end(skb, nest);
+
+ if (nla_put_be32(skb, NFTA_CHAIN_POLICY,
+ htonl(basechain->policy)))
+ goto nla_put_failure;
+
+ if (nla_put_string(skb, NFTA_CHAIN_TYPE,
+ chain_type[ops->pf][nft_base_chain(chain)->type]->name))
+ goto nla_put_failure;
+
+ if (nft_dump_stats(skb, nft_base_chain(chain)->stats))
+ goto nla_put_failure;
+ }
+
+ if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use)))
+ goto nla_put_failure;
+
+ return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+ nlmsg_trim(skb, nlh);
+ return -1;
+}
+
+static int nf_tables_chain_notify(const struct sk_buff *oskb,
+ const struct nlmsghdr *nlh,
+ const struct nft_table *table,
+ const struct nft_chain *chain,
+ int event, int family)
+{
+ struct sk_buff *skb;
+ u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
+ struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
+ u32 seq = nlh ? nlh->nlmsg_seq : 0;
+ bool report;
+ int err;
+
+ report = nlh ? nlmsg_report(nlh) : false;
+ if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+ return 0;
+
+ err = -ENOBUFS;
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (skb == NULL)
+ goto err;
+
+ err = nf_tables_fill_chain_info(skb, portid, seq, event, 0, family,
+ table, chain);
+ if (err < 0) {
+ kfree_skb(skb);
+ goto err;
+ }
+
+ err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report,
+ GFP_KERNEL);
+err:
+ if (err < 0)
+ nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
+ return err;
+}
+
+static int nf_tables_dump_chains(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+ const struct nft_af_info *afi;
+ const struct nft_table *table;
+ const struct nft_chain *chain;
+ unsigned int idx = 0, s_idx = cb->args[0];
+ struct net *net = sock_net(skb->sk);
+ int family = nfmsg->nfgen_family;
+
+ list_for_each_entry(afi, &net->nft.af_info, list) {
+ if (family != NFPROTO_UNSPEC && family != afi->family)
+ continue;
+
+ list_for_each_entry(table, &afi->tables, list) {
+ list_for_each_entry(chain, &table->chains, list) {
+ if (idx < s_idx)
+ goto cont;
+ if (idx > s_idx)
+ memset(&cb->args[1], 0,
+ sizeof(cb->args) - sizeof(cb->args[0]));
+ if (nf_tables_fill_chain_info(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NFT_MSG_NEWCHAIN,
+ NLM_F_MULTI,
+ afi->family, table, chain) < 0)
+ goto done;
+cont:
+ idx++;
+ }
+ }
+ }
+done:
+ cb->args[0] = idx;
+ return skb->len;
+}
+
+
+static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[])
+{
+ const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+ const struct nft_af_info *afi;
+ const struct nft_table *table;
+ const struct nft_chain *chain;
+ struct sk_buff *skb2;
+ struct net *net = sock_net(skb->sk);
+ int family = nfmsg->nfgen_family;
+ int err;
+
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .dump = nf_tables_dump_chains,
+ };
+ return netlink_dump_start(nlsk, skb, nlh, &c);
+ }
+
+ afi = nf_tables_afinfo_lookup(net, family, false);
+ if (IS_ERR(afi))
+ return PTR_ERR(afi);
+
+ table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]);
+ if (IS_ERR(table))
+ return PTR_ERR(table);
+
+ chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]);
+ if (IS_ERR(chain))
+ return PTR_ERR(chain);
+
+ skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb2)
+ return -ENOMEM;
+
+ err = nf_tables_fill_chain_info(skb2, NETLINK_CB(skb).portid,
+ nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0,
+ family, table, chain);
+ if (err < 0)
+ goto err;
+
+ return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+
+err:
+ kfree_skb(skb2);
+ return err;
+}
+
+static int
+nf_tables_chain_policy(struct nft_base_chain *chain, const struct nlattr *attr)
+{
+ switch (ntohl(nla_get_be32(attr))) {
+ case NF_DROP:
+ chain->policy = NF_DROP;
+ break;
+ case NF_ACCEPT:
+ chain->policy = NF_ACCEPT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct nla_policy nft_counter_policy[NFTA_COUNTER_MAX + 1] = {
+ [NFTA_COUNTER_PACKETS] = { .type = NLA_U64 },
+ [NFTA_COUNTER_BYTES] = { .type = NLA_U64 },
+};
+
+static int
+nf_tables_counters(struct nft_base_chain *chain, const struct nlattr *attr)
+{
+ struct nlattr *tb[NFTA_COUNTER_MAX+1];
+ struct nft_stats __percpu *newstats;
+ struct nft_stats *stats;
+ int err;
+
+ err = nla_parse_nested(tb, NFTA_COUNTER_MAX, attr, nft_counter_policy);
+ if (err < 0)
+ return err;
+
+ if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
+ return -EINVAL;
+
+ newstats = alloc_percpu(struct nft_stats);
+ if (newstats == NULL)
+ return -ENOMEM;
+
+ /* Restore old counters on this cpu, no problem. Per-cpu statistics
+ * are not exposed to userspace.
+ */
+ stats = this_cpu_ptr(newstats);
+ stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
+ stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
+
+ if (chain->stats) {
+ /* nfnl_lock is held, add some nfnl function for this, later */
+ struct nft_stats __percpu *oldstats =
+ rcu_dereference_protected(chain->stats, 1);
+
+ rcu_assign_pointer(chain->stats, newstats);
+ synchronize_rcu();
+ free_percpu(oldstats);
+ } else
+ rcu_assign_pointer(chain->stats, newstats);
+
+ return 0;
+}
+
+static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[])
+{
+ const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+ const struct nlattr * uninitialized_var(name);
+ const struct nft_af_info *afi;
+ struct nft_table *table;
+ struct nft_chain *chain;
+ struct nft_base_chain *basechain = NULL;
+ struct nlattr *ha[NFTA_HOOK_MAX + 1];
+ struct net *net = sock_net(skb->sk);
+ int family = nfmsg->nfgen_family;
+ u64 handle = 0;
+ int err;
+ bool create;
+
+ create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
+
+ afi = nf_tables_afinfo_lookup(net, family, true);
+ if (IS_ERR(afi))
+ return PTR_ERR(afi);
+
+ table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]);
+ if (IS_ERR(table))
+ return PTR_ERR(table);
+
+ if (table->use == UINT_MAX)
+ return -EOVERFLOW;
+
+ chain = NULL;
+ name = nla[NFTA_CHAIN_NAME];
+
+ if (nla[NFTA_CHAIN_HANDLE]) {
+ handle = be64_to_cpu(nla_get_be64(nla[NFTA_CHAIN_HANDLE]));
+ chain = nf_tables_chain_lookup_byhandle(table, handle);
+ if (IS_ERR(chain))
+ return PTR_ERR(chain);
+ } else {
+ chain = nf_tables_chain_lookup(table, name);
+ if (IS_ERR(chain)) {
+ if (PTR_ERR(chain) != -ENOENT)
+ return PTR_ERR(chain);
+ chain = NULL;
+ }
+ }
+
+ if (chain != NULL) {
+ if (nlh->nlmsg_flags & NLM_F_EXCL)
+ return -EEXIST;
+ if (nlh->nlmsg_flags & NLM_F_REPLACE)
+ return -EOPNOTSUPP;
+
+ if (nla[NFTA_CHAIN_HANDLE] && name &&
+ !IS_ERR(nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME])))
+ return -EEXIST;
+
+ if (nla[NFTA_CHAIN_POLICY]) {
+ if (!(chain->flags & NFT_BASE_CHAIN))
+ return -EOPNOTSUPP;
+
+ err = nf_tables_chain_policy(nft_base_chain(chain),
+ nla[NFTA_CHAIN_POLICY]);
+ if (err < 0)
+ return err;
+ }
+
+ if (nla[NFTA_CHAIN_COUNTERS]) {
+ if (!(chain->flags & NFT_BASE_CHAIN))
+ return -EOPNOTSUPP;
+
+ err = nf_tables_counters(nft_base_chain(chain),
+ nla[NFTA_CHAIN_COUNTERS]);
+ if (err < 0)
+ return err;
+ }
+
+ if (nla[NFTA_CHAIN_HANDLE] && name)
+ nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN);
+
+ goto notify;
+ }
+
+ if (nla[NFTA_CHAIN_HOOK]) {
+ struct nf_hook_ops *ops;
+ nf_hookfn *hookfn;
+ u32 hooknum;
+ int type = NFT_CHAIN_T_DEFAULT;
+
+ if (nla[NFTA_CHAIN_TYPE]) {
+ type = nf_tables_chain_type_lookup(afi,
+ nla[NFTA_CHAIN_TYPE],
+ create);
+ if (type < 0)
+ return -ENOENT;
+ }
+
+ err = nla_parse_nested(ha, NFTA_HOOK_MAX, nla[NFTA_CHAIN_HOOK],
+ nft_hook_policy);
+ if (err < 0)
+ return err;
+ if (ha[NFTA_HOOK_HOOKNUM] == NULL ||
+ ha[NFTA_HOOK_PRIORITY] == NULL)
+ return -EINVAL;
+
+ hooknum = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
+ if (hooknum >= afi->nhooks)
+ return -EINVAL;
+
+ hookfn = chain_type[family][type]->fn[hooknum];
+ if (hookfn == NULL)
+ return -EOPNOTSUPP;
+
+ basechain = kzalloc(sizeof(*basechain), GFP_KERNEL);
+ if (basechain == NULL)
+ return -ENOMEM;
+
+ basechain->type = type;
+ chain = &basechain->chain;
+
+ ops = &basechain->ops;
+ ops->pf = family;
+ ops->owner = afi->owner;
+ ops->hooknum = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
+ ops->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
+ ops->priv = chain;
+ ops->hook = hookfn;
+ if (afi->hooks[ops->hooknum])
+ ops->hook = afi->hooks[ops->hooknum];
+
+ chain->flags |= NFT_BASE_CHAIN;
+
+ if (nla[NFTA_CHAIN_POLICY]) {
+ err = nf_tables_chain_policy(basechain,
+ nla[NFTA_CHAIN_POLICY]);
+ if (err < 0) {
+ free_percpu(basechain->stats);
+ kfree(basechain);
+ return err;
+ }
+ } else
+ basechain->policy = NF_ACCEPT;
+
+ if (nla[NFTA_CHAIN_COUNTERS]) {
+ err = nf_tables_counters(basechain,
+ nla[NFTA_CHAIN_COUNTERS]);
+ if (err < 0) {
+ free_percpu(basechain->stats);
+ kfree(basechain);
+ return err;
+ }
+ } else {
+ struct nft_stats __percpu *newstats;
+
+ newstats = alloc_percpu(struct nft_stats);
+ if (newstats == NULL)
+ return -ENOMEM;
+
+ rcu_assign_pointer(nft_base_chain(chain)->stats,
+ newstats);
+ }
+ } else {
+ chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+ if (chain == NULL)
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&chain->rules);
+ chain->handle = nf_tables_alloc_handle(table);
+ chain->net = net;
+ chain->table = table;
+ nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN);
+
+ if (!(table->flags & NFT_TABLE_F_DORMANT) &&
+ chain->flags & NFT_BASE_CHAIN) {
+ err = nf_register_hook(&nft_base_chain(chain)->ops);
+ if (err < 0) {
+ free_percpu(basechain->stats);
+ kfree(basechain);
+ return err;
+ }
+ }
+ list_add_tail(&chain->list, &table->chains);
+ table->use++;
+notify:
+ nf_tables_chain_notify(skb, nlh, table, chain, NFT_MSG_NEWCHAIN,
+ family);
+ return 0;
+}
+
+static void nf_tables_rcu_chain_destroy(struct rcu_head *head)
+{
+ struct nft_chain *chain = container_of(head, struct nft_chain, rcu_head);
+
+ BUG_ON(chain->use > 0);
+
+ if (chain->flags & NFT_BASE_CHAIN) {
+ free_percpu(nft_base_chain(chain)->stats);
+ kfree(nft_base_chain(chain));
+ } else
+ kfree(chain);
+}
+
+static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[])
+{
+ const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+ const struct nft_af_info *afi;
+ struct nft_table *table;
+ struct nft_chain *chain;
+ struct net *net = sock_net(skb->sk);
+ int family = nfmsg->nfgen_family;
+
+ afi = nf_tables_afinfo_lookup(net, family, false);
+ if (IS_ERR(afi))
+ return PTR_ERR(afi);
+
+ table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]);
+ if (IS_ERR(table))
+ return PTR_ERR(table);
+
+ chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]);
+ if (IS_ERR(chain))
+ return PTR_ERR(chain);
+
+ if (!list_empty(&chain->rules))
+ return -EBUSY;
+
+ list_del(&chain->list);
+ table->use--;
+
+ if (!(table->flags & NFT_TABLE_F_DORMANT) &&
+ chain->flags & NFT_BASE_CHAIN)
+ nf_unregister_hook(&nft_base_chain(chain)->ops);
+
+ nf_tables_chain_notify(skb, nlh, table, chain, NFT_MSG_DELCHAIN,
+ family);
+
+ /* Make sure all rule references are gone before this is released */
+ call_rcu(&chain->rcu_head, nf_tables_rcu_chain_destroy);
+ return 0;
+}
+
+static void nft_ctx_init(struct nft_ctx *ctx,
+ const struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nft_af_info *afi,
+ const struct nft_table *table,
+ const struct nft_chain *chain,
+ const struct nlattr * const *nla)
+{
+ ctx->net = sock_net(skb->sk);
+ ctx->skb = skb;
+ ctx->nlh = nlh;
+ ctx->afi = afi;
+ ctx->table = table;
+ ctx->chain = chain;
+ ctx->nla = nla;
+}
+
+/*
+ * Expressions
+ */
+
+/**
+ * nft_register_expr - register nf_tables expr type
+ * @ops: expr type
+ *
+ * Registers the expr type for use with nf_tables. Returns zero on
+ * success or a negative errno code otherwise.
+ */
+int nft_register_expr(struct nft_expr_type *type)
+{
+ nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ list_add_tail(&type->list, &nf_tables_expressions);
+ nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nft_register_expr);
+
+/**
+ * nft_unregister_expr - unregister nf_tables expr type
+ * @ops: expr type
+ *
+ * Unregisters the expr typefor use with nf_tables.
+ */
+void nft_unregister_expr(struct nft_expr_type *type)
+{
+ nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ list_del(&type->list);
+ nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_unregister_expr);
+
+static const struct nft_expr_type *__nft_expr_type_get(struct nlattr *nla)
+{
+ const struct nft_expr_type *type;
+
+ list_for_each_entry(type, &nf_tables_expressions, list) {
+ if (!nla_strcmp(nla, type->name))
+ return type;
+ }
+ return NULL;
+}
+
+static const struct nft_expr_type *nft_expr_type_get(struct nlattr *nla)
+{
+ const struct nft_expr_type *type;
+
+ if (nla == NULL)
+ return ERR_PTR(-EINVAL);
+
+ type = __nft_expr_type_get(nla);
+ if (type != NULL && try_module_get(type->owner))
+ return type;
+
+#ifdef CONFIG_MODULES
+ if (type == NULL) {
+ nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+ request_module("nft-expr-%.*s",
+ nla_len(nla), (char *)nla_data(nla));
+ nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ if (__nft_expr_type_get(nla))
+ return ERR_PTR(-EAGAIN);
+ }
+#endif
+ return ERR_PTR(-ENOENT);
+}
+
+static const struct nla_policy nft_expr_policy[NFTA_EXPR_MAX + 1] = {
+ [NFTA_EXPR_NAME] = { .type = NLA_STRING },
+ [NFTA_EXPR_DATA] = { .type = NLA_NESTED },
+};
+
+static int nf_tables_fill_expr_info(struct sk_buff *skb,
+ const struct nft_expr *expr)
+{
+ if (nla_put_string(skb, NFTA_EXPR_NAME, expr->ops->type->name))
+ goto nla_put_failure;
+
+ if (expr->ops->dump) {
+ struct nlattr *data = nla_nest_start(skb, NFTA_EXPR_DATA);
+ if (data == NULL)
+ goto nla_put_failure;
+ if (expr->ops->dump(skb, expr) < 0)
+ goto nla_put_failure;
+ nla_nest_end(skb, data);
+ }
+
+ return skb->len;
+
+nla_put_failure:
+ return -1;
+};
+
+struct nft_expr_info {
+ const struct nft_expr_ops *ops;
+ struct nlattr *tb[NFT_EXPR_MAXATTR + 1];
+};
+
+static int nf_tables_expr_parse(const struct nft_ctx *ctx,
+ const struct nlattr *nla,
+ struct nft_expr_info *info)
+{
+ const struct nft_expr_type *type;
+ const struct nft_expr_ops *ops;
+ struct nlattr *tb[NFTA_EXPR_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb, NFTA_EXPR_MAX, nla, nft_expr_policy);
+ if (err < 0)
+ return err;
+
+ type = nft_expr_type_get(tb[NFTA_EXPR_NAME]);
+ if (IS_ERR(type))
+ return PTR_ERR(type);
+
+ if (tb[NFTA_EXPR_DATA]) {
+ err = nla_parse_nested(info->tb, type->maxattr,
+ tb[NFTA_EXPR_DATA], type->policy);
+ if (err < 0)
+ goto err1;
+ } else
+ memset(info->tb, 0, sizeof(info->tb[0]) * (type->maxattr + 1));
+
+ if (type->select_ops != NULL) {
+ ops = type->select_ops(ctx,
+ (const struct nlattr * const *)info->tb);
+ if (IS_ERR(ops)) {
+ err = PTR_ERR(ops);
+ goto err1;
+ }
+ } else
+ ops = type->ops;
+
+ info->ops = ops;
+ return 0;
+
+err1:
+ module_put(type->owner);
+ return err;
+}
+
+static int nf_tables_newexpr(const struct nft_ctx *ctx,
+ const struct nft_expr_info *info,
+ struct nft_expr *expr)
+{
+ const struct nft_expr_ops *ops = info->ops;
+ int err;
+
+ expr->ops = ops;
+ if (ops->init) {
+ err = ops->init(ctx, expr, (const struct nlattr **)info->tb);
+ if (err < 0)
+ goto err1;
+ }
+
+ return 0;
+
+err1:
+ expr->ops = NULL;
+ return err;
+}
+
+static void nf_tables_expr_destroy(struct nft_expr *expr)
+{
+ if (expr->ops->destroy)
+ expr->ops->destroy(expr);
+ module_put(expr->ops->type->owner);
+}
+
+/*
+ * Rules
+ */
+
+static struct nft_rule *__nf_tables_rule_lookup(const struct nft_chain *chain,
+ u64 handle)
+{
+ struct nft_rule *rule;
+
+ // FIXME: this sucks
+ list_for_each_entry(rule, &chain->rules, list) {
+ if (handle == rule->handle)
+ return rule;
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static struct nft_rule *nf_tables_rule_lookup(const struct nft_chain *chain,
+ const struct nlattr *nla)
+{
+ if (nla == NULL)
+ return ERR_PTR(-EINVAL);
+
+ return __nf_tables_rule_lookup(chain, be64_to_cpu(nla_get_be64(nla)));
+}
+
+static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
+ [NFTA_RULE_TABLE] = { .type = NLA_STRING },
+ [NFTA_RULE_CHAIN] = { .type = NLA_STRING,
+ .len = NFT_CHAIN_MAXNAMELEN - 1 },
+ [NFTA_RULE_HANDLE] = { .type = NLA_U64 },
+ [NFTA_RULE_EXPRESSIONS] = { .type = NLA_NESTED },
+ [NFTA_RULE_COMPAT] = { .type = NLA_NESTED },
+ [NFTA_RULE_POSITION] = { .type = NLA_U64 },
+};
+
+static int nf_tables_fill_rule_info(struct sk_buff *skb, u32 portid, u32 seq,
+ int event, u32 flags, int family,
+ const struct nft_table *table,
+ const struct nft_chain *chain,
+ const struct nft_rule *rule)
+{
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfmsg;
+ const struct nft_expr *expr, *next;
+ struct nlattr *list;
+ const struct nft_rule *prule;
+ int type = event | NFNL_SUBSYS_NFTABLES << 8;
+
+ nlh = nlmsg_put(skb, portid, seq, type, sizeof(struct nfgenmsg),
+ flags);
+ if (nlh == NULL)
+ goto nla_put_failure;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = family;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = 0;
+
+ if (nla_put_string(skb, NFTA_RULE_TABLE, table->name))
+ goto nla_put_failure;
+ if (nla_put_string(skb, NFTA_RULE_CHAIN, chain->name))
+ goto nla_put_failure;
+ if (nla_put_be64(skb, NFTA_RULE_HANDLE, cpu_to_be64(rule->handle)))
+ goto nla_put_failure;
+
+ if ((event != NFT_MSG_DELRULE) && (rule->list.prev != &chain->rules)) {
+ prule = list_entry(rule->list.prev, struct nft_rule, list);
+ if (nla_put_be64(skb, NFTA_RULE_POSITION,
+ cpu_to_be64(prule->handle)))
+ goto nla_put_failure;
+ }
+
+ list = nla_nest_start(skb, NFTA_RULE_EXPRESSIONS);
+ if (list == NULL)
+ goto nla_put_failure;
+ nft_rule_for_each_expr(expr, next, rule) {
+ struct nlattr *elem = nla_nest_start(skb, NFTA_LIST_ELEM);
+ if (elem == NULL)
+ goto nla_put_failure;
+ if (nf_tables_fill_expr_info(skb, expr) < 0)
+ goto nla_put_failure;
+ nla_nest_end(skb, elem);
+ }
+ nla_nest_end(skb, list);
+
+ return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+ nlmsg_trim(skb, nlh);
+ return -1;
+}
+
+static int nf_tables_rule_notify(const struct sk_buff *oskb,
+ const struct nlmsghdr *nlh,
+ const struct nft_table *table,
+ const struct nft_chain *chain,
+ const struct nft_rule *rule,
+ int event, u32 flags, int family)
+{
+ struct sk_buff *skb;
+ u32 portid = NETLINK_CB(oskb).portid;
+ struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
+ u32 seq = nlh->nlmsg_seq;
+ bool report;
+ int err;
+
+ report = nlmsg_report(nlh);
+ if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+ return 0;
+
+ err = -ENOBUFS;
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (skb == NULL)
+ goto err;
+
+ err = nf_tables_fill_rule_info(skb, portid, seq, event, flags,
+ family, table, chain, rule);
+ if (err < 0) {
+ kfree_skb(skb);
+ goto err;
+ }
+
+ err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report,
+ GFP_KERNEL);
+err:
+ if (err < 0)
+ nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
+ return err;
+}
+
+static inline bool
+nft_rule_is_active(struct net *net, const struct nft_rule *rule)
+{
+ return (rule->genmask & (1 << net->nft.gencursor)) == 0;
+}
+
+static inline int gencursor_next(struct net *net)
+{
+ return net->nft.gencursor+1 == 1 ? 1 : 0;
+}
+
+static inline int
+nft_rule_is_active_next(struct net *net, const struct nft_rule *rule)
+{
+ return (rule->genmask & (1 << gencursor_next(net))) == 0;
+}
+
+static inline void
+nft_rule_activate_next(struct net *net, struct nft_rule *rule)
+{
+ /* Now inactive, will be active in the future */
+ rule->genmask = (1 << net->nft.gencursor);
+}
+
+static inline void
+nft_rule_disactivate_next(struct net *net, struct nft_rule *rule)
+{
+ rule->genmask = (1 << gencursor_next(net));
+}
+
+static inline void nft_rule_clear(struct net *net, struct nft_rule *rule)
+{
+ rule->genmask = 0;
+}
+
+static int nf_tables_dump_rules(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+ const struct nft_af_info *afi;
+ const struct nft_table *table;
+ const struct nft_chain *chain;
+ const struct nft_rule *rule;
+ unsigned int idx = 0, s_idx = cb->args[0];
+ struct net *net = sock_net(skb->sk);
+ int family = nfmsg->nfgen_family;
+ u8 genctr = ACCESS_ONCE(net->nft.genctr);
+ u8 gencursor = ACCESS_ONCE(net->nft.gencursor);
+
+ list_for_each_entry(afi, &net->nft.af_info, list) {
+ if (family != NFPROTO_UNSPEC && family != afi->family)
+ continue;
+
+ list_for_each_entry(table, &afi->tables, list) {
+ list_for_each_entry(chain, &table->chains, list) {
+ list_for_each_entry(rule, &chain->rules, list) {
+ if (!nft_rule_is_active(net, rule))
+ goto cont;
+ if (idx < s_idx)
+ goto cont;
+ if (idx > s_idx)
+ memset(&cb->args[1], 0,
+ sizeof(cb->args) - sizeof(cb->args[0]));
+ if (nf_tables_fill_rule_info(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NFT_MSG_NEWRULE,
+ NLM_F_MULTI | NLM_F_APPEND,
+ afi->family, table, chain, rule) < 0)
+ goto done;
+cont:
+ idx++;
+ }
+ }
+ }
+ }
+done:
+ /* Invalidate this dump, a transition to the new generation happened */
+ if (gencursor != net->nft.gencursor || genctr != net->nft.genctr)
+ return -EBUSY;
+
+ cb->args[0] = idx;
+ return skb->len;
+}
+
+static int nf_tables_getrule(struct sock *nlsk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[])
+{
+ const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+ const struct nft_af_info *afi;
+ const struct nft_table *table;
+ const struct nft_chain *chain;
+ const struct nft_rule *rule;
+ struct sk_buff *skb2;
+ struct net *net = sock_net(skb->sk);
+ int family = nfmsg->nfgen_family;
+ int err;
+
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .dump = nf_tables_dump_rules,
+ };
+ return netlink_dump_start(nlsk, skb, nlh, &c);
+ }
+
+ afi = nf_tables_afinfo_lookup(net, family, false);
+ if (IS_ERR(afi))
+ return PTR_ERR(afi);
+
+ table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]);
+ if (IS_ERR(table))
+ return PTR_ERR(table);
+
+ chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
+ if (IS_ERR(chain))
+ return PTR_ERR(chain);
+
+ rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
+ if (IS_ERR(rule))
+ return PTR_ERR(rule);
+
+ skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb2)
+ return -ENOMEM;
+
+ err = nf_tables_fill_rule_info(skb2, NETLINK_CB(skb).portid,
+ nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0,
+ family, table, chain, rule);
+ if (err < 0)
+ goto err;
+
+ return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+
+err:
+ kfree_skb(skb2);
+ return err;
+}
+
+static void nf_tables_rcu_rule_destroy(struct rcu_head *head)
+{
+ struct nft_rule *rule = container_of(head, struct nft_rule, rcu_head);
+ struct nft_expr *expr;
+
+ /*
+ * Careful: some expressions might not be initialized in case this
+ * is called on error from nf_tables_newrule().
+ */
+ expr = nft_expr_first(rule);
+ while (expr->ops && expr != nft_expr_last(rule)) {
+ nf_tables_expr_destroy(expr);
+ expr = nft_expr_next(expr);
+ }
+ kfree(rule);
+}
+
+static void nf_tables_rule_destroy(struct nft_rule *rule)
+{
+ call_rcu(&rule->rcu_head, nf_tables_rcu_rule_destroy);
+}
+
+#define NFT_RULE_MAXEXPRS 128
+
+static struct nft_expr_info *info;
+
+static struct nft_rule_trans *
+nf_tables_trans_add(struct nft_rule *rule, const struct nft_ctx *ctx)
+{
+ struct nft_rule_trans *rupd;
+
+ rupd = kmalloc(sizeof(struct nft_rule_trans), GFP_KERNEL);
+ if (rupd == NULL)
+ return NULL;
+
+ rupd->chain = ctx->chain;
+ rupd->table = ctx->table;
+ rupd->rule = rule;
+ rupd->family = ctx->afi->family;
+ rupd->nlh = ctx->nlh;
+ list_add_tail(&rupd->list, &ctx->net->nft.commit_list);
+
+ return rupd;
+}
+
+static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[])
+{
+ const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+ const struct nft_af_info *afi;
+ struct net *net = sock_net(skb->sk);
+ struct nft_table *table;
+ struct nft_chain *chain;
+ struct nft_rule *rule, *old_rule = NULL;
+ struct nft_rule_trans *repl = NULL;
+ struct nft_expr *expr;
+ struct nft_ctx ctx;
+ struct nlattr *tmp;
+ unsigned int size, i, n;
+ int err, rem;
+ bool create;
+ u64 handle, pos_handle;
+
+ create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
+
+ afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create);
+ if (IS_ERR(afi))
+ return PTR_ERR(afi);
+
+ table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]);
+ if (IS_ERR(table))
+ return PTR_ERR(table);
+
+ chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
+ if (IS_ERR(chain))
+ return PTR_ERR(chain);
+
+ if (nla[NFTA_RULE_HANDLE]) {
+ handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE]));
+ rule = __nf_tables_rule_lookup(chain, handle);
+ if (IS_ERR(rule))
+ return PTR_ERR(rule);
+
+ if (nlh->nlmsg_flags & NLM_F_EXCL)
+ return -EEXIST;
+ if (nlh->nlmsg_flags & NLM_F_REPLACE)
+ old_rule = rule;
+ else
+ return -EOPNOTSUPP;
+ } else {
+ if (!create || nlh->nlmsg_flags & NLM_F_REPLACE)
+ return -EINVAL;
+ handle = nf_tables_alloc_handle(table);
+ }
+
+ if (nla[NFTA_RULE_POSITION]) {
+ if (!(nlh->nlmsg_flags & NLM_F_CREATE))
+ return -EOPNOTSUPP;
+
+ pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION]));
+ old_rule = __nf_tables_rule_lookup(chain, pos_handle);
+ if (IS_ERR(old_rule))
+ return PTR_ERR(old_rule);
+ }
+
+ nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
+
+ n = 0;
+ size = 0;
+ if (nla[NFTA_RULE_EXPRESSIONS]) {
+ nla_for_each_nested(tmp, nla[NFTA_RULE_EXPRESSIONS], rem) {
+ err = -EINVAL;
+ if (nla_type(tmp) != NFTA_LIST_ELEM)
+ goto err1;
+ if (n == NFT_RULE_MAXEXPRS)
+ goto err1;
+ err = nf_tables_expr_parse(&ctx, tmp, &info[n]);
+ if (err < 0)
+ goto err1;
+ size += info[n].ops->size;
+ n++;
+ }
+ }
+
+ err = -ENOMEM;
+ rule = kzalloc(sizeof(*rule) + size, GFP_KERNEL);
+ if (rule == NULL)
+ goto err1;
+
+ nft_rule_activate_next(net, rule);
+
+ rule->handle = handle;
+ rule->dlen = size;
+
+ expr = nft_expr_first(rule);
+ for (i = 0; i < n; i++) {
+ err = nf_tables_newexpr(&ctx, &info[i], expr);
+ if (err < 0)
+ goto err2;
+ info[i].ops = NULL;
+ expr = nft_expr_next(expr);
+ }
+
+ if (nlh->nlmsg_flags & NLM_F_REPLACE) {
+ if (nft_rule_is_active_next(net, old_rule)) {
+ repl = nf_tables_trans_add(old_rule, &ctx);
+ if (repl == NULL) {
+ err = -ENOMEM;
+ goto err2;
+ }
+ nft_rule_disactivate_next(net, old_rule);
+ list_add_tail(&rule->list, &old_rule->list);
+ } else {
+ err = -ENOENT;
+ goto err2;
+ }
+ } else if (nlh->nlmsg_flags & NLM_F_APPEND)
+ if (old_rule)
+ list_add_rcu(&rule->list, &old_rule->list);
+ else
+ list_add_tail_rcu(&rule->list, &chain->rules);
+ else {
+ if (old_rule)
+ list_add_tail_rcu(&rule->list, &old_rule->list);
+ else
+ list_add_rcu(&rule->list, &chain->rules);
+ }
+
+ if (nf_tables_trans_add(rule, &ctx) == NULL) {
+ err = -ENOMEM;
+ goto err3;
+ }
+ return 0;
+
+err3:
+ list_del_rcu(&rule->list);
+ if (repl) {
+ list_del_rcu(&repl->rule->list);
+ list_del(&repl->list);
+ nft_rule_clear(net, repl->rule);
+ kfree(repl);
+ }
+err2:
+ nf_tables_rule_destroy(rule);
+err1:
+ for (i = 0; i < n; i++) {
+ if (info[i].ops != NULL)
+ module_put(info[i].ops->type->owner);
+ }
+ return err;
+}
+
+static int
+nf_tables_delrule_one(struct nft_ctx *ctx, struct nft_rule *rule)
+{
+ /* You cannot delete the same rule twice */
+ if (nft_rule_is_active_next(ctx->net, rule)) {
+ if (nf_tables_trans_add(rule, ctx) == NULL)
+ return -ENOMEM;
+ nft_rule_disactivate_next(ctx->net, rule);
+ return 0;
+ }
+ return -ENOENT;
+}
+
+static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[])
+{
+ const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+ const struct nft_af_info *afi;
+ struct net *net = sock_net(skb->sk);
+ const struct nft_table *table;
+ struct nft_chain *chain;
+ struct nft_rule *rule, *tmp;
+ int family = nfmsg->nfgen_family, err = 0;
+ struct nft_ctx ctx;
+
+ afi = nf_tables_afinfo_lookup(net, family, false);
+ if (IS_ERR(afi))
+ return PTR_ERR(afi);
+
+ table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]);
+ if (IS_ERR(table))
+ return PTR_ERR(table);
+
+ chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
+ if (IS_ERR(chain))
+ return PTR_ERR(chain);
+
+ nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
+
+ if (nla[NFTA_RULE_HANDLE]) {
+ rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
+ if (IS_ERR(rule))
+ return PTR_ERR(rule);
+
+ err = nf_tables_delrule_one(&ctx, rule);
+ } else {
+ /* Remove all rules in this chain */
+ list_for_each_entry_safe(rule, tmp, &chain->rules, list) {
+ err = nf_tables_delrule_one(&ctx, rule);
+ if (err < 0)
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int nf_tables_commit(struct sk_buff *skb)
+{
+ struct net *net = sock_net(skb->sk);
+ struct nft_rule_trans *rupd, *tmp;
+
+ /* Bump generation counter, invalidate any dump in progress */
+ net->nft.genctr++;
+
+ /* A new generation has just started */
+ net->nft.gencursor = gencursor_next(net);
+
+ /* Make sure all packets have left the previous generation before
+ * purging old rules.
+ */
+ synchronize_rcu();
+
+ list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
+ /* Delete this rule from the dirty list */
+ list_del(&rupd->list);
+
+ /* This rule was inactive in the past and just became active.
+ * Clear the next bit of the genmask since its meaning has
+ * changed, now it is the future.
+ */
+ if (nft_rule_is_active(net, rupd->rule)) {
+ nft_rule_clear(net, rupd->rule);
+ nf_tables_rule_notify(skb, rupd->nlh, rupd->table,
+ rupd->chain, rupd->rule,
+ NFT_MSG_NEWRULE, 0,
+ rupd->family);
+ kfree(rupd);
+ continue;
+ }
+
+ /* This rule is in the past, get rid of it */
+ list_del_rcu(&rupd->rule->list);
+ nf_tables_rule_notify(skb, rupd->nlh, rupd->table, rupd->chain,
+ rupd->rule, NFT_MSG_DELRULE, 0,
+ rupd->family);
+ nf_tables_rule_destroy(rupd->rule);
+ kfree(rupd);
+ }
+
+ return 0;
+}
+
+static int nf_tables_abort(struct sk_buff *skb)
+{
+ struct net *net = sock_net(skb->sk);
+ struct nft_rule_trans *rupd, *tmp;
+
+ list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
+ /* Delete all rules from the dirty list */
+ list_del(&rupd->list);
+
+ if (!nft_rule_is_active_next(net, rupd->rule)) {
+ nft_rule_clear(net, rupd->rule);
+ kfree(rupd);
+ continue;
+ }
+
+ /* This rule is inactive, get rid of it */
+ list_del_rcu(&rupd->rule->list);
+ nf_tables_rule_destroy(rupd->rule);
+ kfree(rupd);
+ }
+ return 0;
+}
+
+/*
+ * Sets
+ */
+
+static LIST_HEAD(nf_tables_set_ops);
+
+int nft_register_set(struct nft_set_ops *ops)
+{
+ nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ list_add_tail(&ops->list, &nf_tables_set_ops);
+ nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nft_register_set);
+
+void nft_unregister_set(struct nft_set_ops *ops)
+{
+ nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ list_del(&ops->list);
+ nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_unregister_set);
+
+static const struct nft_set_ops *nft_select_set_ops(const struct nlattr * const nla[])
+{
+ const struct nft_set_ops *ops;
+ u32 features;
+
+#ifdef CONFIG_MODULES
+ if (list_empty(&nf_tables_set_ops)) {
+ nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+ request_module("nft-set");
+ nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ if (!list_empty(&nf_tables_set_ops))
+ return ERR_PTR(-EAGAIN);
+ }
+#endif
+ features = 0;
+ if (nla[NFTA_SET_FLAGS] != NULL) {
+ features = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
+ features &= NFT_SET_INTERVAL | NFT_SET_MAP;
+ }
+
+ // FIXME: implement selection properly
+ list_for_each_entry(ops, &nf_tables_set_ops, list) {
+ if ((ops->features & features) != features)
+ continue;
+ if (!try_module_get(ops->owner))
+ continue;
+ return ops;
+ }
+
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
+ [NFTA_SET_TABLE] = { .type = NLA_STRING },
+ [NFTA_SET_NAME] = { .type = NLA_STRING },
+ [NFTA_SET_FLAGS] = { .type = NLA_U32 },
+ [NFTA_SET_KEY_TYPE] = { .type = NLA_U32 },
+ [NFTA_SET_KEY_LEN] = { .type = NLA_U32 },
+ [NFTA_SET_DATA_TYPE] = { .type = NLA_U32 },
+ [NFTA_SET_DATA_LEN] = { .type = NLA_U32 },
+};
+
+static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
+ const struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[])
+{
+ struct net *net = sock_net(skb->sk);
+ const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+ const struct nft_af_info *afi;
+ const struct nft_table *table = NULL;
+
+ afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
+ if (IS_ERR(afi))
+ return PTR_ERR(afi);
+
+ if (nla[NFTA_SET_TABLE] != NULL) {
+ table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]);
+ if (IS_ERR(table))
+ return PTR_ERR(table);
+ }
+
+ nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla);
+ return 0;
+}
+
+struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
+ const struct nlattr *nla)
+{
+ struct nft_set *set;
+
+ if (nla == NULL)
+ return ERR_PTR(-EINVAL);
+
+ list_for_each_entry(set, &table->sets, list) {
+ if (!nla_strcmp(nla, set->name))
+ return set;
+ }
+ return ERR_PTR(-ENOENT);
+}
+
+static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
+ const char *name)
+{
+ const struct nft_set *i;
+ const char *p;
+ unsigned long *inuse;
+ unsigned int n = 0;
+
+ p = strnchr(name, IFNAMSIZ, '%');
+ if (p != NULL) {
+ if (p[1] != 'd' || strchr(p + 2, '%'))
+ return -EINVAL;
+
+ inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+ if (inuse == NULL)
+ return -ENOMEM;
+
+ list_for_each_entry(i, &ctx->table->sets, list) {
+ if (!sscanf(i->name, name, &n))
+ continue;
+ if (n < 0 || n > BITS_PER_LONG * PAGE_SIZE)
+ continue;
+ set_bit(n, inuse);
+ }
+
+ n = find_first_zero_bit(inuse, BITS_PER_LONG * PAGE_SIZE);
+ free_page((unsigned long)inuse);
+ }
+
+ snprintf(set->name, sizeof(set->name), name, n);
+ list_for_each_entry(i, &ctx->table->sets, list) {
+ if (!strcmp(set->name, i->name))
+ return -ENFILE;
+ }
+ return 0;
+}
+
+static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
+ const struct nft_set *set, u16 event, u16 flags)
+{
+ struct nfgenmsg *nfmsg;
+ struct nlmsghdr *nlh;
+ u32 portid = NETLINK_CB(ctx->skb).portid;
+ u32 seq = ctx->nlh->nlmsg_seq;
+
+ event |= NFNL_SUBSYS_NFTABLES << 8;
+ nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
+ flags);
+ if (nlh == NULL)
+ goto nla_put_failure;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = ctx->afi->family;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = 0;
+
+ if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name))
+ goto nla_put_failure;
+ if (nla_put_string(skb, NFTA_SET_NAME, set->name))
+ goto nla_put_failure;
+ if (set->flags != 0)
+ if (nla_put_be32(skb, NFTA_SET_FLAGS, htonl(set->flags)))
+ goto nla_put_failure;
+
+ if (nla_put_be32(skb, NFTA_SET_KEY_TYPE, htonl(set->ktype)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_SET_KEY_LEN, htonl(set->klen)))
+ goto nla_put_failure;
+ if (set->flags & NFT_SET_MAP) {
+ if (nla_put_be32(skb, NFTA_SET_DATA_TYPE, htonl(set->dtype)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_SET_DATA_LEN, htonl(set->dlen)))
+ goto nla_put_failure;
+ }
+
+ return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+ nlmsg_trim(skb, nlh);
+ return -1;
+}
+
+static int nf_tables_set_notify(const struct nft_ctx *ctx,
+ const struct nft_set *set,
+ int event)
+{
+ struct sk_buff *skb;
+ u32 portid = NETLINK_CB(ctx->skb).portid;
+ bool report;
+ int err;
+
+ report = nlmsg_report(ctx->nlh);
+ if (!report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
+ return 0;
+
+ err = -ENOBUFS;
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (skb == NULL)
+ goto err;
+
+ err = nf_tables_fill_set(skb, ctx, set, event, 0);
+ if (err < 0) {
+ kfree_skb(skb);
+ goto err;
+ }
+
+ err = nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES, report,
+ GFP_KERNEL);
+err:
+ if (err < 0)
+ nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, err);
+ return err;
+}
+
+static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ const struct nft_set *set;
+ unsigned int idx = 0, s_idx = cb->args[0];
+
+ if (cb->args[1])
+ return skb->len;
+
+ list_for_each_entry(set, &ctx->table->sets, list) {
+ if (idx < s_idx)
+ goto cont;
+ if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
+ NLM_F_MULTI) < 0) {
+ cb->args[0] = idx;
+ goto done;
+ }
+cont:
+ idx++;
+ }
+ cb->args[1] = 1;
+done:
+ return skb->len;
+}
+
+static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ const struct nft_set *set;
+ unsigned int idx = 0, s_idx = cb->args[0];
+ struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
+
+ if (cb->args[1])
+ return skb->len;
+
+ list_for_each_entry(table, &ctx->afi->tables, list) {
+ if (cur_table && cur_table != table)
+ continue;
+
+ ctx->table = table;
+ list_for_each_entry(set, &ctx->table->sets, list) {
+ if (idx < s_idx)
+ goto cont;
+ if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
+ NLM_F_MULTI) < 0) {
+ cb->args[0] = idx;
+ cb->args[2] = (unsigned long) table;
+ goto done;
+ }
+cont:
+ idx++;
+ }
+ }
+ cb->args[1] = 1;
+done:
+ return skb->len;
+}
+
+static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+ struct nlattr *nla[NFTA_SET_MAX + 1];
+ struct nft_ctx ctx;
+ int err, ret;
+
+ err = nlmsg_parse(cb->nlh, sizeof(*nfmsg), nla, NFTA_SET_MAX,
+ nft_set_policy);
+ if (err < 0)
+ return err;
+
+ err = nft_ctx_init_from_setattr(&ctx, cb->skb, cb->nlh, (void *)nla);
+ if (err < 0)
+ return err;
+
+ if (ctx.table == NULL)
+ ret = nf_tables_dump_sets_all(&ctx, skb, cb);
+ else
+ ret = nf_tables_dump_sets_table(&ctx, skb, cb);
+
+ return ret;
+}
+
+static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[])
+{
+ const struct nft_set *set;
+ struct nft_ctx ctx;
+ struct sk_buff *skb2;
+ int err;
+
+ /* Verify existance before starting dump */
+ err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla);
+ if (err < 0)
+ return err;
+
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .dump = nf_tables_dump_sets,
+ };
+ return netlink_dump_start(nlsk, skb, nlh, &c);
+ }
+
+ set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
+ if (IS_ERR(set))
+ return PTR_ERR(set);
+
+ skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (skb2 == NULL)
+ return -ENOMEM;
+
+ err = nf_tables_fill_set(skb2, &ctx, set, NFT_MSG_NEWSET, 0);
+ if (err < 0)
+ goto err;
+
+ return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+
+err:
+ kfree_skb(skb2);
+ return err;
+}
+
+static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[])
+{
+ const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+ const struct nft_set_ops *ops;
+ const struct nft_af_info *afi;
+ struct net *net = sock_net(skb->sk);
+ struct nft_table *table;
+ struct nft_set *set;
+ struct nft_ctx ctx;
+ char name[IFNAMSIZ];
+ unsigned int size;
+ bool create;
+ u32 ktype, klen, dlen, dtype, flags;
+ int err;
+
+ if (nla[NFTA_SET_TABLE] == NULL ||
+ nla[NFTA_SET_NAME] == NULL ||
+ nla[NFTA_SET_KEY_LEN] == NULL)
+ return -EINVAL;
+
+ ktype = NFT_DATA_VALUE;
+ if (nla[NFTA_SET_KEY_TYPE] != NULL) {
+ ktype = ntohl(nla_get_be32(nla[NFTA_SET_KEY_TYPE]));
+ if ((ktype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK)
+ return -EINVAL;
+ }
+
+ klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN]));
+ if (klen == 0 || klen > FIELD_SIZEOF(struct nft_data, data))
+ return -EINVAL;
+
+ flags = 0;
+ if (nla[NFTA_SET_FLAGS] != NULL) {
+ flags = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
+ if (flags & ~(NFT_SET_ANONYMOUS | NFT_SET_CONSTANT |
+ NFT_SET_INTERVAL | NFT_SET_MAP))
+ return -EINVAL;
+ }
+
+ dtype = 0;
+ dlen = 0;
+ if (nla[NFTA_SET_DATA_TYPE] != NULL) {
+ if (!(flags & NFT_SET_MAP))
+ return -EINVAL;
+
+ dtype = ntohl(nla_get_be32(nla[NFTA_SET_DATA_TYPE]));
+ if ((dtype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK &&
+ dtype != NFT_DATA_VERDICT)
+ return -EINVAL;
+
+ if (dtype != NFT_DATA_VERDICT) {
+ if (nla[NFTA_SET_DATA_LEN] == NULL)
+ return -EINVAL;
+ dlen = ntohl(nla_get_be32(nla[NFTA_SET_DATA_LEN]));
+ if (dlen == 0 ||
+ dlen > FIELD_SIZEOF(struct nft_data, data))
+ return -EINVAL;
+ } else
+ dlen = sizeof(struct nft_data);
+ } else if (flags & NFT_SET_MAP)
+ return -EINVAL;
+
+ create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
+
+ afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create);
+ if (IS_ERR(afi))
+ return PTR_ERR(afi);
+
+ table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]);
+ if (IS_ERR(table))
+ return PTR_ERR(table);
+
+ nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
+
+ set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME]);
+ if (IS_ERR(set)) {
+ if (PTR_ERR(set) != -ENOENT)
+ return PTR_ERR(set);
+ set = NULL;
+ }
+
+ if (set != NULL) {
+ if (nlh->nlmsg_flags & NLM_F_EXCL)
+ return -EEXIST;
+ if (nlh->nlmsg_flags & NLM_F_REPLACE)
+ return -EOPNOTSUPP;
+ return 0;
+ }
+
+ if (!(nlh->nlmsg_flags & NLM_F_CREATE))
+ return -ENOENT;
+
+ ops = nft_select_set_ops(nla);
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
+
+ size = 0;
+ if (ops->privsize != NULL)
+ size = ops->privsize(nla);
+
+ err = -ENOMEM;
+ set = kzalloc(sizeof(*set) + size, GFP_KERNEL);
+ if (set == NULL)
+ goto err1;
+
+ nla_strlcpy(name, nla[NFTA_SET_NAME], sizeof(set->name));
+ err = nf_tables_set_alloc_name(&ctx, set, name);
+ if (err < 0)
+ goto err2;
+
+ INIT_LIST_HEAD(&set->bindings);
+ set->ops = ops;
+ set->ktype = ktype;
+ set->klen = klen;
+ set->dtype = dtype;
+ set->dlen = dlen;
+ set->flags = flags;
+
+ err = ops->init(set, nla);
+ if (err < 0)
+ goto err2;
+
+ list_add_tail(&set->list, &table->sets);
+ nf_tables_set_notify(&ctx, set, NFT_MSG_NEWSET);
+ return 0;
+
+err2:
+ kfree(set);
+err1:
+ module_put(ops->owner);
+ return err;
+}
+
+static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
+{
+ list_del(&set->list);
+ if (!(set->flags & NFT_SET_ANONYMOUS))
+ nf_tables_set_notify(ctx, set, NFT_MSG_DELSET);
+
+ set->ops->destroy(set);
+ module_put(set->ops->owner);
+ kfree(set);
+}
+
+static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[])
+{
+ struct nft_set *set;
+ struct nft_ctx ctx;
+ int err;
+
+ if (nla[NFTA_SET_TABLE] == NULL)
+ return -EINVAL;
+
+ err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla);
+ if (err < 0)
+ return err;
+
+ set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
+ if (IS_ERR(set))
+ return PTR_ERR(set);
+ if (!list_empty(&set->bindings))
+ return -EBUSY;
+
+ nf_tables_set_destroy(&ctx, set);
+ return 0;
+}
+
+static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
+ const struct nft_set *set,
+ const struct nft_set_iter *iter,
+ const struct nft_set_elem *elem)
+{
+ enum nft_registers dreg;
+
+ dreg = nft_type_to_reg(set->dtype);
+ return nft_validate_data_load(ctx, dreg, &elem->data, set->dtype);
+}
+
+int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_set_binding *binding)
+{
+ struct nft_set_binding *i;
+ struct nft_set_iter iter;
+
+ if (!list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS)
+ return -EBUSY;
+
+ if (set->flags & NFT_SET_MAP) {
+ /* If the set is already bound to the same chain all
+ * jumps are already validated for that chain.
+ */
+ list_for_each_entry(i, &set->bindings, list) {
+ if (i->chain == binding->chain)
+ goto bind;
+ }
+
+ iter.skip = 0;
+ iter.count = 0;
+ iter.err = 0;
+ iter.fn = nf_tables_bind_check_setelem;
+
+ set->ops->walk(ctx, set, &iter);
+ if (iter.err < 0) {
+ /* Destroy anonymous sets if binding fails */
+ if (set->flags & NFT_SET_ANONYMOUS)
+ nf_tables_set_destroy(ctx, set);
+
+ return iter.err;
+ }
+ }
+bind:
+ binding->chain = ctx->chain;
+ list_add_tail(&binding->list, &set->bindings);
+ return 0;
+}
+
+void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_set_binding *binding)
+{
+ list_del(&binding->list);
+
+ if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS)
+ nf_tables_set_destroy(ctx, set);
+}
+
+/*
+ * Set elements
+ */
+
+static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
+ [NFTA_SET_ELEM_KEY] = { .type = NLA_NESTED },
+ [NFTA_SET_ELEM_DATA] = { .type = NLA_NESTED },
+ [NFTA_SET_ELEM_FLAGS] = { .type = NLA_U32 },
+};
+
+static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
+ [NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING },
+ [NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING },
+ [NFTA_SET_ELEM_LIST_ELEMENTS] = { .type = NLA_NESTED },
+};
+
+static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx,
+ const struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[])
+{
+ const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+ const struct nft_af_info *afi;
+ const struct nft_table *table;
+ struct net *net = sock_net(skb->sk);
+
+ afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
+ if (IS_ERR(afi))
+ return PTR_ERR(afi);
+
+ table = nf_tables_table_lookup(afi, nla[NFTA_SET_ELEM_LIST_TABLE]);
+ if (IS_ERR(table))
+ return PTR_ERR(table);
+
+ nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla);
+ return 0;
+}
+
+static int nf_tables_fill_setelem(struct sk_buff *skb,
+ const struct nft_set *set,
+ const struct nft_set_elem *elem)
+{
+ unsigned char *b = skb_tail_pointer(skb);
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, NFTA_LIST_ELEM);
+ if (nest == NULL)
+ goto nla_put_failure;
+
+ if (nft_data_dump(skb, NFTA_SET_ELEM_KEY, &elem->key, NFT_DATA_VALUE,
+ set->klen) < 0)
+ goto nla_put_failure;
+
+ if (set->flags & NFT_SET_MAP &&
+ !(elem->flags & NFT_SET_ELEM_INTERVAL_END) &&
+ nft_data_dump(skb, NFTA_SET_ELEM_DATA, &elem->data,
+ set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE,
+ set->dlen) < 0)
+ goto nla_put_failure;
+
+ if (elem->flags != 0)
+ if (nla_put_be32(skb, NFTA_SET_ELEM_FLAGS, htonl(elem->flags)))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+nla_put_failure:
+ nlmsg_trim(skb, b);
+ return -EMSGSIZE;
+}
+
+struct nft_set_dump_args {
+ const struct netlink_callback *cb;
+ struct nft_set_iter iter;
+ struct sk_buff *skb;
+};
+
+static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
+ const struct nft_set *set,
+ const struct nft_set_iter *iter,
+ const struct nft_set_elem *elem)
+{
+ struct nft_set_dump_args *args;
+
+ args = container_of(iter, struct nft_set_dump_args, iter);
+ return nf_tables_fill_setelem(args->skb, set, elem);
+}
+
+static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct nft_set *set;
+ struct nft_set_dump_args args;
+ struct nft_ctx ctx;
+ struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1];
+ struct nfgenmsg *nfmsg;
+ struct nlmsghdr *nlh;
+ struct nlattr *nest;
+ u32 portid, seq;
+ int event, err;
+
+ nfmsg = nlmsg_data(cb->nlh);
+ err = nlmsg_parse(cb->nlh, sizeof(*nfmsg), nla, NFTA_SET_ELEM_LIST_MAX,
+ nft_set_elem_list_policy);
+ if (err < 0)
+ return err;
+
+ err = nft_ctx_init_from_elemattr(&ctx, cb->skb, cb->nlh, (void *)nla);
+ if (err < 0)
+ return err;
+
+ set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
+ if (IS_ERR(set))
+ return PTR_ERR(set);
+
+ event = NFT_MSG_NEWSETELEM;
+ event |= NFNL_SUBSYS_NFTABLES << 8;
+ portid = NETLINK_CB(cb->skb).portid;
+ seq = cb->nlh->nlmsg_seq;
+
+ nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
+ NLM_F_MULTI);
+ if (nlh == NULL)
+ goto nla_put_failure;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = NFPROTO_UNSPEC;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = 0;
+
+ if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, ctx.table->name))
+ goto nla_put_failure;
+ if (nla_put_string(skb, NFTA_SET_ELEM_LIST_SET, set->name))
+ goto nla_put_failure;
+
+ nest = nla_nest_start(skb, NFTA_SET_ELEM_LIST_ELEMENTS);
+ if (nest == NULL)
+ goto nla_put_failure;
+
+ args.cb = cb;
+ args.skb = skb;
+ args.iter.skip = cb->args[0];
+ args.iter.count = 0;
+ args.iter.err = 0;
+ args.iter.fn = nf_tables_dump_setelem;
+ set->ops->walk(&ctx, set, &args.iter);
+
+ nla_nest_end(skb, nest);
+ nlmsg_end(skb, nlh);
+
+ if (args.iter.err && args.iter.err != -EMSGSIZE)
+ return args.iter.err;
+ if (args.iter.count == cb->args[0])
+ return 0;
+
+ cb->args[0] = args.iter.count;
+ return skb->len;
+
+nla_put_failure:
+ return -ENOSPC;
+}
+
+static int nf_tables_getsetelem(struct sock *nlsk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[])
+{
+ const struct nft_set *set;
+ struct nft_ctx ctx;
+ int err;
+
+ err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla);
+ if (err < 0)
+ return err;
+
+ set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
+ if (IS_ERR(set))
+ return PTR_ERR(set);
+
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .dump = nf_tables_dump_set,
+ };
+ return netlink_dump_start(nlsk, skb, nlh, &c);
+ }
+ return -EOPNOTSUPP;
+}
+
+static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
+ const struct nlattr *attr)
+{
+ struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
+ struct nft_data_desc d1, d2;
+ struct nft_set_elem elem;
+ struct nft_set_binding *binding;
+ enum nft_registers dreg;
+ int err;
+
+ err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr,
+ nft_set_elem_policy);
+ if (err < 0)
+ return err;
+
+ if (nla[NFTA_SET_ELEM_KEY] == NULL)
+ return -EINVAL;
+
+ elem.flags = 0;
+ if (nla[NFTA_SET_ELEM_FLAGS] != NULL) {
+ elem.flags = ntohl(nla_get_be32(nla[NFTA_SET_ELEM_FLAGS]));
+ if (elem.flags & ~NFT_SET_ELEM_INTERVAL_END)
+ return -EINVAL;
+ }
+
+ if (set->flags & NFT_SET_MAP) {
+ if (nla[NFTA_SET_ELEM_DATA] == NULL &&
+ !(elem.flags & NFT_SET_ELEM_INTERVAL_END))
+ return -EINVAL;
+ } else {
+ if (nla[NFTA_SET_ELEM_DATA] != NULL)
+ return -EINVAL;
+ }
+
+ err = nft_data_init(ctx, &elem.key, &d1, nla[NFTA_SET_ELEM_KEY]);
+ if (err < 0)
+ goto err1;
+ err = -EINVAL;
+ if (d1.type != NFT_DATA_VALUE || d1.len != set->klen)
+ goto err2;
+
+ err = -EEXIST;
+ if (set->ops->get(set, &elem) == 0)
+ goto err2;
+
+ if (nla[NFTA_SET_ELEM_DATA] != NULL) {
+ err = nft_data_init(ctx, &elem.data, &d2, nla[NFTA_SET_ELEM_DATA]);
+ if (err < 0)
+ goto err2;
+
+ err = -EINVAL;
+ if (set->dtype != NFT_DATA_VERDICT && d2.len != set->dlen)
+ goto err3;
+
+ dreg = nft_type_to_reg(set->dtype);
+ list_for_each_entry(binding, &set->bindings, list) {
+ struct nft_ctx bind_ctx = {
+ .afi = ctx->afi,
+ .table = ctx->table,
+ .chain = binding->chain,
+ };
+
+ err = nft_validate_data_load(&bind_ctx, dreg,
+ &elem.data, d2.type);
+ if (err < 0)
+ goto err3;
+ }
+ }
+
+ err = set->ops->insert(set, &elem);
+ if (err < 0)
+ goto err3;
+
+ return 0;
+
+err3:
+ if (nla[NFTA_SET_ELEM_DATA] != NULL)
+ nft_data_uninit(&elem.data, d2.type);
+err2:
+ nft_data_uninit(&elem.key, d1.type);
+err1:
+ return err;
+}
+
+static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[])
+{
+ const struct nlattr *attr;
+ struct nft_set *set;
+ struct nft_ctx ctx;
+ int rem, err;
+
+ err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla);
+ if (err < 0)
+ return err;
+
+ set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
+ if (IS_ERR(set))
+ return PTR_ERR(set);
+ if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
+ return -EBUSY;
+
+ nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
+ err = nft_add_set_elem(&ctx, set, attr);
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
+static int nft_del_setelem(const struct nft_ctx *ctx, struct nft_set *set,
+ const struct nlattr *attr)
+{
+ struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
+ struct nft_data_desc desc;
+ struct nft_set_elem elem;
+ int err;
+
+ err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr,
+ nft_set_elem_policy);
+ if (err < 0)
+ goto err1;
+
+ err = -EINVAL;
+ if (nla[NFTA_SET_ELEM_KEY] == NULL)
+ goto err1;
+
+ err = nft_data_init(ctx, &elem.key, &desc, nla[NFTA_SET_ELEM_KEY]);
+ if (err < 0)
+ goto err1;
+
+ err = -EINVAL;
+ if (desc.type != NFT_DATA_VALUE || desc.len != set->klen)
+ goto err2;
+
+ err = set->ops->get(set, &elem);
+ if (err < 0)
+ goto err2;
+
+ set->ops->remove(set, &elem);
+
+ nft_data_uninit(&elem.key, NFT_DATA_VALUE);
+ if (set->flags & NFT_SET_MAP)
+ nft_data_uninit(&elem.data, set->dtype);
+
+err2:
+ nft_data_uninit(&elem.key, desc.type);
+err1:
+ return err;
+}
+
+static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[])
+{
+ const struct nlattr *attr;
+ struct nft_set *set;
+ struct nft_ctx ctx;
+ int rem, err;
+
+ err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla);
+ if (err < 0)
+ return err;
+
+ set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
+ if (IS_ERR(set))
+ return PTR_ERR(set);
+ if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
+ return -EBUSY;
+
+ nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
+ err = nft_del_setelem(&ctx, set, attr);
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
+static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
+ [NFT_MSG_NEWTABLE] = {
+ .call = nf_tables_newtable,
+ .attr_count = NFTA_TABLE_MAX,
+ .policy = nft_table_policy,
+ },
+ [NFT_MSG_GETTABLE] = {
+ .call = nf_tables_gettable,
+ .attr_count = NFTA_TABLE_MAX,
+ .policy = nft_table_policy,
+ },
+ [NFT_MSG_DELTABLE] = {
+ .call = nf_tables_deltable,
+ .attr_count = NFTA_TABLE_MAX,
+ .policy = nft_table_policy,
+ },
+ [NFT_MSG_NEWCHAIN] = {
+ .call = nf_tables_newchain,
+ .attr_count = NFTA_CHAIN_MAX,
+ .policy = nft_chain_policy,
+ },
+ [NFT_MSG_GETCHAIN] = {
+ .call = nf_tables_getchain,
+ .attr_count = NFTA_CHAIN_MAX,
+ .policy = nft_chain_policy,
+ },
+ [NFT_MSG_DELCHAIN] = {
+ .call = nf_tables_delchain,
+ .attr_count = NFTA_CHAIN_MAX,
+ .policy = nft_chain_policy,
+ },
+ [NFT_MSG_NEWRULE] = {
+ .call_batch = nf_tables_newrule,
+ .attr_count = NFTA_RULE_MAX,
+ .policy = nft_rule_policy,
+ },
+ [NFT_MSG_GETRULE] = {
+ .call = nf_tables_getrule,
+ .attr_count = NFTA_RULE_MAX,
+ .policy = nft_rule_policy,
+ },
+ [NFT_MSG_DELRULE] = {
+ .call_batch = nf_tables_delrule,
+ .attr_count = NFTA_RULE_MAX,
+ .policy = nft_rule_policy,
+ },
+ [NFT_MSG_NEWSET] = {
+ .call = nf_tables_newset,
+ .attr_count = NFTA_SET_MAX,
+ .policy = nft_set_policy,
+ },
+ [NFT_MSG_GETSET] = {
+ .call = nf_tables_getset,
+ .attr_count = NFTA_SET_MAX,
+ .policy = nft_set_policy,
+ },
+ [NFT_MSG_DELSET] = {
+ .call = nf_tables_delset,
+ .attr_count = NFTA_SET_MAX,
+ .policy = nft_set_policy,
+ },
+ [NFT_MSG_NEWSETELEM] = {
+ .call = nf_tables_newsetelem,
+ .attr_count = NFTA_SET_ELEM_LIST_MAX,
+ .policy = nft_set_elem_list_policy,
+ },
+ [NFT_MSG_GETSETELEM] = {
+ .call = nf_tables_getsetelem,
+ .attr_count = NFTA_SET_ELEM_LIST_MAX,
+ .policy = nft_set_elem_list_policy,
+ },
+ [NFT_MSG_DELSETELEM] = {
+ .call = nf_tables_delsetelem,
+ .attr_count = NFTA_SET_ELEM_LIST_MAX,
+ .policy = nft_set_elem_list_policy,
+ },
+};
+
+static const struct nfnetlink_subsystem nf_tables_subsys = {
+ .name = "nf_tables",
+ .subsys_id = NFNL_SUBSYS_NFTABLES,
+ .cb_count = NFT_MSG_MAX,
+ .cb = nf_tables_cb,
+ .commit = nf_tables_commit,
+ .abort = nf_tables_abort,
+};
+
+/*
+ * Loop detection - walk through the ruleset beginning at the destination chain
+ * of a new jump until either the source chain is reached (loop) or all
+ * reachable chains have been traversed.
+ *
+ * The loop check is performed whenever a new jump verdict is added to an
+ * expression or verdict map or a verdict map is bound to a new chain.
+ */
+
+static int nf_tables_check_loops(const struct nft_ctx *ctx,
+ const struct nft_chain *chain);
+
+static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
+ const struct nft_set *set,
+ const struct nft_set_iter *iter,
+ const struct nft_set_elem *elem)
+{
+ switch (elem->data.verdict) {
+ case NFT_JUMP:
+ case NFT_GOTO:
+ return nf_tables_check_loops(ctx, elem->data.chain);
+ default:
+ return 0;
+ }
+}
+
+static int nf_tables_check_loops(const struct nft_ctx *ctx,
+ const struct nft_chain *chain)
+{
+ const struct nft_rule *rule;
+ const struct nft_expr *expr, *last;
+ const struct nft_set *set;
+ struct nft_set_binding *binding;
+ struct nft_set_iter iter;
+
+ if (ctx->chain == chain)
+ return -ELOOP;
+
+ list_for_each_entry(rule, &chain->rules, list) {
+ nft_rule_for_each_expr(expr, last, rule) {
+ const struct nft_data *data = NULL;
+ int err;
+
+ if (!expr->ops->validate)
+ continue;
+
+ err = expr->ops->validate(ctx, expr, &data);
+ if (err < 0)
+ return err;
+
+ if (data == NULL)
+ continue;
+
+ switch (data->verdict) {
+ case NFT_JUMP:
+ case NFT_GOTO:
+ err = nf_tables_check_loops(ctx, data->chain);
+ if (err < 0)
+ return err;
+ default:
+ break;
+ }
+ }
+ }
+
+ list_for_each_entry(set, &ctx->table->sets, list) {
+ if (!(set->flags & NFT_SET_MAP) ||
+ set->dtype != NFT_DATA_VERDICT)
+ continue;
+
+ list_for_each_entry(binding, &set->bindings, list) {
+ if (binding->chain != chain)
+ continue;
+
+ iter.skip = 0;
+ iter.count = 0;
+ iter.err = 0;
+ iter.fn = nf_tables_loop_check_setelem;
+
+ set->ops->walk(ctx, set, &iter);
+ if (iter.err < 0)
+ return iter.err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * nft_validate_input_register - validate an expressions' input register
+ *
+ * @reg: the register number
+ *
+ * Validate that the input register is one of the general purpose
+ * registers.
+ */
+int nft_validate_input_register(enum nft_registers reg)
+{
+ if (reg <= NFT_REG_VERDICT)
+ return -EINVAL;
+ if (reg > NFT_REG_MAX)
+ return -ERANGE;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nft_validate_input_register);
+
+/**
+ * nft_validate_output_register - validate an expressions' output register
+ *
+ * @reg: the register number
+ *
+ * Validate that the output register is one of the general purpose
+ * registers or the verdict register.
+ */
+int nft_validate_output_register(enum nft_registers reg)
+{
+ if (reg < NFT_REG_VERDICT)
+ return -EINVAL;
+ if (reg > NFT_REG_MAX)
+ return -ERANGE;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nft_validate_output_register);
+
+/**
+ * nft_validate_data_load - validate an expressions' data load
+ *
+ * @ctx: context of the expression performing the load
+ * @reg: the destination register number
+ * @data: the data to load
+ * @type: the data type
+ *
+ * Validate that a data load uses the appropriate data type for
+ * the destination register. A value of NULL for the data means
+ * that its runtime gathered data, which is always of type
+ * NFT_DATA_VALUE.
+ */
+int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg,
+ const struct nft_data *data,
+ enum nft_data_types type)
+{
+ int err;
+
+ switch (reg) {
+ case NFT_REG_VERDICT:
+ if (data == NULL || type != NFT_DATA_VERDICT)
+ return -EINVAL;
+
+ if (data->verdict == NFT_GOTO || data->verdict == NFT_JUMP) {
+ err = nf_tables_check_loops(ctx, data->chain);
+ if (err < 0)
+ return err;
+
+ if (ctx->chain->level + 1 > data->chain->level) {
+ if (ctx->chain->level + 1 == NFT_JUMP_STACK_SIZE)
+ return -EMLINK;
+ data->chain->level = ctx->chain->level + 1;
+ }
+ }
+
+ return 0;
+ default:
+ if (data != NULL && type != NFT_DATA_VALUE)
+ return -EINVAL;
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(nft_validate_data_load);
+
+static const struct nla_policy nft_verdict_policy[NFTA_VERDICT_MAX + 1] = {
+ [NFTA_VERDICT_CODE] = { .type = NLA_U32 },
+ [NFTA_VERDICT_CHAIN] = { .type = NLA_STRING,
+ .len = NFT_CHAIN_MAXNAMELEN - 1 },
+};
+
+static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
+ struct nft_data_desc *desc, const struct nlattr *nla)
+{
+ struct nlattr *tb[NFTA_VERDICT_MAX + 1];
+ struct nft_chain *chain;
+ int err;
+
+ err = nla_parse_nested(tb, NFTA_VERDICT_MAX, nla, nft_verdict_policy);
+ if (err < 0)
+ return err;
+
+ if (!tb[NFTA_VERDICT_CODE])
+ return -EINVAL;
+ data->verdict = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
+
+ switch (data->verdict) {
+ case NF_ACCEPT:
+ case NF_DROP:
+ case NF_QUEUE:
+ case NFT_CONTINUE:
+ case NFT_BREAK:
+ case NFT_RETURN:
+ desc->len = sizeof(data->verdict);
+ break;
+ case NFT_JUMP:
+ case NFT_GOTO:
+ if (!tb[NFTA_VERDICT_CHAIN])
+ return -EINVAL;
+ chain = nf_tables_chain_lookup(ctx->table,
+ tb[NFTA_VERDICT_CHAIN]);
+ if (IS_ERR(chain))
+ return PTR_ERR(chain);
+ if (chain->flags & NFT_BASE_CHAIN)
+ return -EOPNOTSUPP;
+
+ chain->use++;
+ data->chain = chain;
+ desc->len = sizeof(data);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ desc->type = NFT_DATA_VERDICT;
+ return 0;
+}
+
+static void nft_verdict_uninit(const struct nft_data *data)
+{
+ switch (data->verdict) {
+ case NFT_JUMP:
+ case NFT_GOTO:
+ data->chain->use--;
+ break;
+ }
+}
+
+static int nft_verdict_dump(struct sk_buff *skb, const struct nft_data *data)
+{
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, NFTA_DATA_VERDICT);
+ if (!nest)
+ goto nla_put_failure;
+
+ if (nla_put_be32(skb, NFTA_VERDICT_CODE, htonl(data->verdict)))
+ goto nla_put_failure;
+
+ switch (data->verdict) {
+ case NFT_JUMP:
+ case NFT_GOTO:
+ if (nla_put_string(skb, NFTA_VERDICT_CHAIN, data->chain->name))
+ goto nla_put_failure;
+ }
+ nla_nest_end(skb, nest);
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static int nft_value_init(const struct nft_ctx *ctx, struct nft_data *data,
+ struct nft_data_desc *desc, const struct nlattr *nla)
+{
+ unsigned int len;
+
+ len = nla_len(nla);
+ if (len == 0)
+ return -EINVAL;
+ if (len > sizeof(data->data))
+ return -EOVERFLOW;
+
+ nla_memcpy(data->data, nla, sizeof(data->data));
+ desc->type = NFT_DATA_VALUE;
+ desc->len = len;
+ return 0;
+}
+
+static int nft_value_dump(struct sk_buff *skb, const struct nft_data *data,
+ unsigned int len)
+{
+ return nla_put(skb, NFTA_DATA_VALUE, len, data->data);
+}
+
+static const struct nla_policy nft_data_policy[NFTA_DATA_MAX + 1] = {
+ [NFTA_DATA_VALUE] = { .type = NLA_BINARY,
+ .len = FIELD_SIZEOF(struct nft_data, data) },
+ [NFTA_DATA_VERDICT] = { .type = NLA_NESTED },
+};
+
+/**
+ * nft_data_init - parse nf_tables data netlink attributes
+ *
+ * @ctx: context of the expression using the data
+ * @data: destination struct nft_data
+ * @desc: data description
+ * @nla: netlink attribute containing data
+ *
+ * Parse the netlink data attributes and initialize a struct nft_data.
+ * The type and length of data are returned in the data description.
+ *
+ * The caller can indicate that it only wants to accept data of type
+ * NFT_DATA_VALUE by passing NULL for the ctx argument.
+ */
+int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
+ struct nft_data_desc *desc, const struct nlattr *nla)
+{
+ struct nlattr *tb[NFTA_DATA_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb, NFTA_DATA_MAX, nla, nft_data_policy);
+ if (err < 0)
+ return err;
+
+ if (tb[NFTA_DATA_VALUE])
+ return nft_value_init(ctx, data, desc, tb[NFTA_DATA_VALUE]);
+ if (tb[NFTA_DATA_VERDICT] && ctx != NULL)
+ return nft_verdict_init(ctx, data, desc, tb[NFTA_DATA_VERDICT]);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(nft_data_init);
+
+/**
+ * nft_data_uninit - release a nft_data item
+ *
+ * @data: struct nft_data to release
+ * @type: type of data
+ *
+ * Release a nft_data item. NFT_DATA_VALUE types can be silently discarded,
+ * all others need to be released by calling this function.
+ */
+void nft_data_uninit(const struct nft_data *data, enum nft_data_types type)
+{
+ switch (type) {
+ case NFT_DATA_VALUE:
+ return;
+ case NFT_DATA_VERDICT:
+ return nft_verdict_uninit(data);
+ default:
+ WARN_ON(1);
+ }
+}
+EXPORT_SYMBOL_GPL(nft_data_uninit);
+
+int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
+ enum nft_data_types type, unsigned int len)
+{
+ struct nlattr *nest;
+ int err;
+
+ nest = nla_nest_start(skb, attr);
+ if (nest == NULL)
+ return -1;
+
+ switch (type) {
+ case NFT_DATA_VALUE:
+ err = nft_value_dump(skb, data, len);
+ break;
+ case NFT_DATA_VERDICT:
+ err = nft_verdict_dump(skb, data);
+ break;
+ default:
+ err = -EINVAL;
+ WARN_ON(1);
+ }
+
+ nla_nest_end(skb, nest);
+ return err;
+}
+EXPORT_SYMBOL_GPL(nft_data_dump);
+
+static int nf_tables_init_net(struct net *net)
+{
+ INIT_LIST_HEAD(&net->nft.af_info);
+ INIT_LIST_HEAD(&net->nft.commit_list);
+ return 0;
+}
+
+static struct pernet_operations nf_tables_net_ops = {
+ .init = nf_tables_init_net,
+};
+
+static int __init nf_tables_module_init(void)
+{
+ int err;
+
+ info = kmalloc(sizeof(struct nft_expr_info) * NFT_RULE_MAXEXPRS,
+ GFP_KERNEL);
+ if (info == NULL) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ err = nf_tables_core_module_init();
+ if (err < 0)
+ goto err2;
+
+ err = nfnetlink_subsys_register(&nf_tables_subsys);
+ if (err < 0)
+ goto err3;
+
+ pr_info("nf_tables: (c) 2007-2009 Patrick McHardy <kaber@trash.net>\n");
+ return register_pernet_subsys(&nf_tables_net_ops);
+err3:
+ nf_tables_core_module_exit();
+err2:
+ kfree(info);
+err1:
+ return err;
+}
+
+static void __exit nf_tables_module_exit(void)
+{
+ unregister_pernet_subsys(&nf_tables_net_ops);
+ nfnetlink_subsys_unregister(&nf_tables_subsys);
+ nf_tables_core_module_exit();
+ kfree(info);
+}
+
+module_init(nf_tables_module_init);
+module_exit(nf_tables_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFTABLES);
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
new file mode 100644
index 000000000000..cb9e685caae1
--- /dev/null
+++ b/net/netfilter/nf_tables_core.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_log.h>
+
+static void nft_cmp_fast_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1])
+{
+ const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
+ u32 mask;
+
+ mask = ~0U >> (sizeof(priv->data) * BITS_PER_BYTE - priv->len);
+ if ((data[priv->sreg].data[0] & mask) == priv->data)
+ return;
+ data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static bool nft_payload_fast_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_payload *priv = nft_expr_priv(expr);
+ const struct sk_buff *skb = pkt->skb;
+ struct nft_data *dest = &data[priv->dreg];
+ unsigned char *ptr;
+
+ if (priv->base == NFT_PAYLOAD_NETWORK_HEADER)
+ ptr = skb_network_header(skb);
+ else
+ ptr = skb_network_header(skb) + pkt->xt.thoff;
+
+ ptr += priv->offset;
+
+ if (unlikely(ptr + priv->len >= skb_tail_pointer(skb)))
+ return false;
+
+ if (priv->len == 2)
+ *(u16 *)dest->data = *(u16 *)ptr;
+ else if (priv->len == 4)
+ *(u32 *)dest->data = *(u32 *)ptr;
+ else
+ *(u8 *)dest->data = *(u8 *)ptr;
+ return true;
+}
+
+struct nft_jumpstack {
+ const struct nft_chain *chain;
+ const struct nft_rule *rule;
+ int rulenum;
+};
+
+static inline void
+nft_chain_stats(const struct nft_chain *this, const struct nft_pktinfo *pkt,
+ struct nft_jumpstack *jumpstack, unsigned int stackptr)
+{
+ struct nft_stats __percpu *stats;
+ const struct nft_chain *chain = stackptr ? jumpstack[0].chain : this;
+
+ rcu_read_lock_bh();
+ stats = rcu_dereference(nft_base_chain(chain)->stats);
+ __this_cpu_inc(stats->pkts);
+ __this_cpu_add(stats->bytes, pkt->skb->len);
+ rcu_read_unlock_bh();
+}
+
+enum nft_trace {
+ NFT_TRACE_RULE,
+ NFT_TRACE_RETURN,
+ NFT_TRACE_POLICY,
+};
+
+static const char *const comments[] = {
+ [NFT_TRACE_RULE] = "rule",
+ [NFT_TRACE_RETURN] = "return",
+ [NFT_TRACE_POLICY] = "policy",
+};
+
+static struct nf_loginfo trace_loginfo = {
+ .type = NF_LOG_TYPE_LOG,
+ .u = {
+ .log = {
+ .level = 4,
+ .logflags = NF_LOG_MASK,
+ },
+ },
+};
+
+static inline void nft_trace_packet(const struct nft_pktinfo *pkt,
+ const struct nft_chain *chain,
+ int rulenum, enum nft_trace type)
+{
+ struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
+
+ nf_log_packet(net, pkt->xt.family, pkt->hooknum, pkt->skb, pkt->in,
+ pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ",
+ chain->table->name, chain->name, comments[type],
+ rulenum);
+}
+
+unsigned int
+nft_do_chain_pktinfo(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
+{
+ const struct nft_chain *chain = ops->priv;
+ const struct nft_rule *rule;
+ const struct nft_expr *expr, *last;
+ struct nft_data data[NFT_REG_MAX + 1];
+ unsigned int stackptr = 0;
+ struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
+ int rulenum = 0;
+ /*
+ * Cache cursor to avoid problems in case that the cursor is updated
+ * while traversing the ruleset.
+ */
+ unsigned int gencursor = ACCESS_ONCE(chain->net->nft.gencursor);
+
+do_chain:
+ rule = list_entry(&chain->rules, struct nft_rule, list);
+next_rule:
+ data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+ list_for_each_entry_continue_rcu(rule, &chain->rules, list) {
+
+ /* This rule is not active, skip. */
+ if (unlikely(rule->genmask & (1 << gencursor)))
+ continue;
+
+ rulenum++;
+
+ nft_rule_for_each_expr(expr, last, rule) {
+ if (expr->ops == &nft_cmp_fast_ops)
+ nft_cmp_fast_eval(expr, data);
+ else if (expr->ops != &nft_payload_fast_ops ||
+ !nft_payload_fast_eval(expr, data, pkt))
+ expr->ops->eval(expr, data, pkt);
+
+ if (data[NFT_REG_VERDICT].verdict != NFT_CONTINUE)
+ break;
+ }
+
+ switch (data[NFT_REG_VERDICT].verdict) {
+ case NFT_BREAK:
+ data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+ /* fall through */
+ case NFT_CONTINUE:
+ continue;
+ }
+ break;
+ }
+
+ switch (data[NFT_REG_VERDICT].verdict) {
+ case NF_ACCEPT:
+ case NF_DROP:
+ case NF_QUEUE:
+ if (unlikely(pkt->skb->nf_trace))
+ nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
+
+ return data[NFT_REG_VERDICT].verdict;
+ case NFT_JUMP:
+ if (unlikely(pkt->skb->nf_trace))
+ nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
+
+ BUG_ON(stackptr >= NFT_JUMP_STACK_SIZE);
+ jumpstack[stackptr].chain = chain;
+ jumpstack[stackptr].rule = rule;
+ jumpstack[stackptr].rulenum = rulenum;
+ stackptr++;
+ /* fall through */
+ case NFT_GOTO:
+ chain = data[NFT_REG_VERDICT].chain;
+ goto do_chain;
+ case NFT_RETURN:
+ if (unlikely(pkt->skb->nf_trace))
+ nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN);
+
+ /* fall through */
+ case NFT_CONTINUE:
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ if (stackptr > 0) {
+ if (unlikely(pkt->skb->nf_trace))
+ nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
+
+ stackptr--;
+ chain = jumpstack[stackptr].chain;
+ rule = jumpstack[stackptr].rule;
+ rulenum = jumpstack[stackptr].rulenum;
+ goto next_rule;
+ }
+ nft_chain_stats(chain, pkt, jumpstack, stackptr);
+
+ if (unlikely(pkt->skb->nf_trace))
+ nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_POLICY);
+
+ return nft_base_chain(chain)->policy;
+}
+EXPORT_SYMBOL_GPL(nft_do_chain_pktinfo);
+
+int __init nf_tables_core_module_init(void)
+{
+ int err;
+
+ err = nft_immediate_module_init();
+ if (err < 0)
+ goto err1;
+
+ err = nft_cmp_module_init();
+ if (err < 0)
+ goto err2;
+
+ err = nft_lookup_module_init();
+ if (err < 0)
+ goto err3;
+
+ err = nft_bitwise_module_init();
+ if (err < 0)
+ goto err4;
+
+ err = nft_byteorder_module_init();
+ if (err < 0)
+ goto err5;
+
+ err = nft_payload_module_init();
+ if (err < 0)
+ goto err6;
+
+ return 0;
+
+err6:
+ nft_byteorder_module_exit();
+err5:
+ nft_bitwise_module_exit();
+err4:
+ nft_lookup_module_exit();
+err3:
+ nft_cmp_module_exit();
+err2:
+ nft_immediate_module_exit();
+err1:
+ return err;
+}
+
+void nf_tables_core_module_exit(void)
+{
+ nft_payload_module_exit();
+ nft_byteorder_module_exit();
+ nft_bitwise_module_exit();
+ nft_lookup_module_exit();
+ nft_cmp_module_exit();
+ nft_immediate_module_exit();
+}
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 572d87dc116f..027f16af51a0 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -147,9 +147,6 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
const struct nfnetlink_subsystem *ss;
int type, err;
- if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
- return -EPERM;
-
/* All the messages must at least contain nfgenmsg */
if (nlmsg_len(nlh) < sizeof(struct nfgenmsg))
return 0;
@@ -217,9 +214,179 @@ replay:
}
}
+static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
+ u_int16_t subsys_id)
+{
+ struct sk_buff *nskb, *oskb = skb;
+ struct net *net = sock_net(skb->sk);
+ const struct nfnetlink_subsystem *ss;
+ const struct nfnl_callback *nc;
+ bool success = true, done = false;
+ int err;
+
+ if (subsys_id >= NFNL_SUBSYS_COUNT)
+ return netlink_ack(skb, nlh, -EINVAL);
+replay:
+ nskb = netlink_skb_clone(oskb, GFP_KERNEL);
+ if (!nskb)
+ return netlink_ack(oskb, nlh, -ENOMEM);
+
+ nskb->sk = oskb->sk;
+ skb = nskb;
+
+ nfnl_lock(subsys_id);
+ ss = rcu_dereference_protected(table[subsys_id].subsys,
+ lockdep_is_held(&table[subsys_id].mutex));
+ if (!ss) {
+#ifdef CONFIG_MODULES
+ nfnl_unlock(subsys_id);
+ request_module("nfnetlink-subsys-%d", subsys_id);
+ nfnl_lock(subsys_id);
+ ss = rcu_dereference_protected(table[subsys_id].subsys,
+ lockdep_is_held(&table[subsys_id].mutex));
+ if (!ss)
+#endif
+ {
+ nfnl_unlock(subsys_id);
+ kfree_skb(nskb);
+ return netlink_ack(skb, nlh, -EOPNOTSUPP);
+ }
+ }
+
+ if (!ss->commit || !ss->abort) {
+ nfnl_unlock(subsys_id);
+ kfree_skb(nskb);
+ return netlink_ack(skb, nlh, -EOPNOTSUPP);
+ }
+
+ while (skb->len >= nlmsg_total_size(0)) {
+ int msglen, type;
+
+ nlh = nlmsg_hdr(skb);
+ err = 0;
+
+ if (nlh->nlmsg_len < NLMSG_HDRLEN) {
+ err = -EINVAL;
+ goto ack;
+ }
+
+ /* Only requests are handled by the kernel */
+ if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) {
+ err = -EINVAL;
+ goto ack;
+ }
+
+ type = nlh->nlmsg_type;
+ if (type == NFNL_MSG_BATCH_BEGIN) {
+ /* Malformed: Batch begin twice */
+ success = false;
+ goto done;
+ } else if (type == NFNL_MSG_BATCH_END) {
+ done = true;
+ goto done;
+ } else if (type < NLMSG_MIN_TYPE) {
+ err = -EINVAL;
+ goto ack;
+ }
+
+ /* We only accept a batch with messages for the same
+ * subsystem.
+ */
+ if (NFNL_SUBSYS_ID(type) != subsys_id) {
+ err = -EINVAL;
+ goto ack;
+ }
+
+ nc = nfnetlink_find_client(type, ss);
+ if (!nc) {
+ err = -EINVAL;
+ goto ack;
+ }
+
+ {
+ int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
+ u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
+ struct nlattr *cda[ss->cb[cb_id].attr_count + 1];
+ struct nlattr *attr = (void *)nlh + min_len;
+ int attrlen = nlh->nlmsg_len - min_len;
+
+ err = nla_parse(cda, ss->cb[cb_id].attr_count,
+ attr, attrlen, ss->cb[cb_id].policy);
+ if (err < 0)
+ goto ack;
+
+ if (nc->call_batch) {
+ err = nc->call_batch(net->nfnl, skb, nlh,
+ (const struct nlattr **)cda);
+ }
+
+ /* The lock was released to autoload some module, we
+ * have to abort and start from scratch using the
+ * original skb.
+ */
+ if (err == -EAGAIN) {
+ ss->abort(skb);
+ nfnl_unlock(subsys_id);
+ kfree_skb(nskb);
+ goto replay;
+ }
+ }
+ack:
+ if (nlh->nlmsg_flags & NLM_F_ACK || err) {
+ /* We don't stop processing the batch on errors, thus,
+ * userspace gets all the errors that the batch
+ * triggers.
+ */
+ netlink_ack(skb, nlh, err);
+ if (err)
+ success = false;
+ }
+
+ msglen = NLMSG_ALIGN(nlh->nlmsg_len);
+ if (msglen > skb->len)
+ msglen = skb->len;
+ skb_pull(skb, msglen);
+ }
+done:
+ if (success && done)
+ ss->commit(skb);
+ else
+ ss->abort(skb);
+
+ nfnl_unlock(subsys_id);
+ kfree_skb(nskb);
+}
+
static void nfnetlink_rcv(struct sk_buff *skb)
{
- netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
+ struct nlmsghdr *nlh = nlmsg_hdr(skb);
+ struct net *net = sock_net(skb->sk);
+ int msglen;
+
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+ return netlink_ack(skb, nlh, -EPERM);
+
+ if (nlh->nlmsg_len < NLMSG_HDRLEN ||
+ skb->len < nlh->nlmsg_len)
+ return;
+
+ if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN) {
+ struct nfgenmsg *nfgenmsg;
+
+ msglen = NLMSG_ALIGN(nlh->nlmsg_len);
+ if (msglen > skb->len)
+ msglen = skb->len;
+
+ if (nlh->nlmsg_len < NLMSG_HDRLEN ||
+ skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
+ return;
+
+ nfgenmsg = nlmsg_data(nlh);
+ skb_pull(skb, msglen);
+ nfnetlink_rcv_batch(skb, nlh, nfgenmsg->res_id);
+ } else {
+ netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
+ }
}
#ifdef CONFIG_MODULES
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 50580494148d..476accd17145 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -49,10 +49,8 @@ static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = {
};
static int
-ctnl_timeout_parse_policy(struct ctnl_timeout *timeout,
- struct nf_conntrack_l4proto *l4proto,
- struct net *net,
- const struct nlattr *attr)
+ctnl_timeout_parse_policy(void *timeouts, struct nf_conntrack_l4proto *l4proto,
+ struct net *net, const struct nlattr *attr)
{
int ret = 0;
@@ -64,8 +62,7 @@ ctnl_timeout_parse_policy(struct ctnl_timeout *timeout,
if (ret < 0)
return ret;
- ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net,
- &timeout->data);
+ ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net, timeouts);
}
return ret;
}
@@ -123,7 +120,8 @@ cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
goto err_proto_put;
}
- ret = ctnl_timeout_parse_policy(matching, l4proto, net,
+ ret = ctnl_timeout_parse_policy(&matching->data,
+ l4proto, net,
cda[CTA_TIMEOUT_DATA]);
return ret;
}
@@ -138,7 +136,7 @@ cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
goto err_proto_put;
}
- ret = ctnl_timeout_parse_policy(timeout, l4proto, net,
+ ret = ctnl_timeout_parse_policy(&timeout->data, l4proto, net,
cda[CTA_TIMEOUT_DATA]);
if (ret < 0)
goto err;
@@ -342,6 +340,147 @@ cttimeout_del_timeout(struct sock *ctnl, struct sk_buff *skb,
return ret;
}
+static int
+cttimeout_default_set(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[])
+{
+ __u16 l3num;
+ __u8 l4num;
+ struct nf_conntrack_l4proto *l4proto;
+ struct net *net = sock_net(skb->sk);
+ unsigned int *timeouts;
+ int ret;
+
+ if (!cda[CTA_TIMEOUT_L3PROTO] ||
+ !cda[CTA_TIMEOUT_L4PROTO] ||
+ !cda[CTA_TIMEOUT_DATA])
+ return -EINVAL;
+
+ l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO]));
+ l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
+ l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+
+ /* This protocol is not supported, skip. */
+ if (l4proto->l4proto != l4num) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+
+ timeouts = l4proto->get_timeouts(net);
+
+ ret = ctnl_timeout_parse_policy(timeouts, l4proto, net,
+ cda[CTA_TIMEOUT_DATA]);
+ if (ret < 0)
+ goto err;
+
+ nf_ct_l4proto_put(l4proto);
+ return 0;
+err:
+ nf_ct_l4proto_put(l4proto);
+ return ret;
+}
+
+static int
+cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
+ u32 seq, u32 type, int event,
+ struct nf_conntrack_l4proto *l4proto)
+{
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfmsg;
+ unsigned int flags = portid ? NLM_F_MULTI : 0;
+
+ event |= NFNL_SUBSYS_CTNETLINK_TIMEOUT << 8;
+ nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
+ if (nlh == NULL)
+ goto nlmsg_failure;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = AF_UNSPEC;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = 0;
+
+ if (nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(l4proto->l3proto)) ||
+ nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, l4proto->l4proto))
+ goto nla_put_failure;
+
+ if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
+ struct nlattr *nest_parms;
+ unsigned int *timeouts = l4proto->get_timeouts(net);
+ int ret;
+
+ nest_parms = nla_nest_start(skb,
+ CTA_TIMEOUT_DATA | NLA_F_NESTED);
+ if (!nest_parms)
+ goto nla_put_failure;
+
+ ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, timeouts);
+ if (ret < 0)
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest_parms);
+ }
+
+ nlmsg_end(skb, nlh);
+ return skb->len;
+
+nlmsg_failure:
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -1;
+}
+
+static int cttimeout_default_get(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[])
+{
+ __u16 l3num;
+ __u8 l4num;
+ struct nf_conntrack_l4proto *l4proto;
+ struct net *net = sock_net(skb->sk);
+ struct sk_buff *skb2;
+ int ret, err;
+
+ if (!cda[CTA_TIMEOUT_L3PROTO] || !cda[CTA_TIMEOUT_L4PROTO])
+ return -EINVAL;
+
+ l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO]));
+ l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
+ l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+
+ /* This protocol is not supported, skip. */
+ if (l4proto->l4proto != l4num) {
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (skb2 == NULL) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ ret = cttimeout_default_fill_info(net, skb2, NETLINK_CB(skb).portid,
+ nlh->nlmsg_seq,
+ NFNL_MSG_TYPE(nlh->nlmsg_type),
+ IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
+ l4proto);
+ if (ret <= 0) {
+ kfree_skb(skb2);
+ err = -ENOMEM;
+ goto err;
+ }
+ ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
+ if (ret > 0)
+ ret = 0;
+
+ /* this avoids a loop in nfnetlink. */
+ return ret == -EAGAIN ? -ENOBUFS : ret;
+err:
+ nf_ct_l4proto_put(l4proto);
+ return err;
+}
+
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
static struct ctnl_timeout *ctnl_timeout_find_get(const char *name)
{
@@ -384,6 +523,12 @@ static const struct nfnl_callback cttimeout_cb[IPCTNL_MSG_TIMEOUT_MAX] = {
[IPCTNL_MSG_TIMEOUT_DELETE] = { .call = cttimeout_del_timeout,
.attr_count = CTA_TIMEOUT_MAX,
.policy = cttimeout_nla_policy },
+ [IPCTNL_MSG_TIMEOUT_DEFAULT_SET]= { .call = cttimeout_default_set,
+ .attr_count = CTA_TIMEOUT_MAX,
+ .policy = cttimeout_nla_policy },
+ [IPCTNL_MSG_TIMEOUT_DEFAULT_GET]= { .call = cttimeout_default_get,
+ .attr_count = CTA_TIMEOUT_MAX,
+ .policy = cttimeout_nla_policy },
};
static const struct nfnetlink_subsystem cttimeout_subsys = {
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index d92cc317bf8b..3c4b69e5fe17 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -319,7 +319,8 @@ nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags)
}
static struct sk_buff *
-nfulnl_alloc_skb(u32 peer_portid, unsigned int inst_size, unsigned int pkt_size)
+nfulnl_alloc_skb(struct net *net, u32 peer_portid, unsigned int inst_size,
+ unsigned int pkt_size)
{
struct sk_buff *skb;
unsigned int n;
@@ -328,13 +329,13 @@ nfulnl_alloc_skb(u32 peer_portid, unsigned int inst_size, unsigned int pkt_size)
* message. WARNING: has to be <= 128k due to slab restrictions */
n = max(inst_size, pkt_size);
- skb = nfnetlink_alloc_skb(&init_net, n, peer_portid, GFP_ATOMIC);
+ skb = nfnetlink_alloc_skb(net, n, peer_portid, GFP_ATOMIC);
if (!skb) {
if (n > pkt_size) {
/* try to allocate only as much as we need for current
* packet */
- skb = nfnetlink_alloc_skb(&init_net, pkt_size,
+ skb = nfnetlink_alloc_skb(net, pkt_size,
peer_portid, GFP_ATOMIC);
if (!skb)
pr_err("nfnetlink_log: can't even alloc %u bytes\n",
@@ -702,8 +703,8 @@ nfulnl_log_packet(struct net *net,
}
if (!inst->skb) {
- inst->skb = nfulnl_alloc_skb(inst->peer_portid, inst->nlbufsiz,
- size);
+ inst->skb = nfulnl_alloc_skb(net, inst->peer_portid,
+ inst->nlbufsiz, size);
if (!inst->skb)
goto alloc_failure;
}
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index ae2e5c11d01a..21258cf70091 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -298,7 +298,7 @@ nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet,
}
static struct sk_buff *
-nfqnl_build_packet_message(struct nfqnl_instance *queue,
+nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
struct nf_queue_entry *entry,
__be32 **packet_id_ptr)
{
@@ -372,7 +372,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
if (queue->flags & NFQA_CFG_F_CONNTRACK)
ct = nfqnl_ct_get(entskb, &size, &ctinfo);
- skb = nfnetlink_alloc_skb(&init_net, size, queue->peer_portid,
+ skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
GFP_ATOMIC);
if (!skb)
return NULL;
@@ -525,7 +525,7 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
__be32 *packet_id_ptr;
int failopen = 0;
- nskb = nfqnl_build_packet_message(queue, entry, &packet_id_ptr);
+ nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr);
if (nskb == NULL) {
err = -ENOMEM;
goto err_out;
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
new file mode 100644
index 000000000000..4fb6ee2c1106
--- /dev/null
+++ b/net/netfilter/nft_bitwise.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_bitwise {
+ enum nft_registers sreg:8;
+ enum nft_registers dreg:8;
+ u8 len;
+ struct nft_data mask;
+ struct nft_data xor;
+};
+
+static void nft_bitwise_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_bitwise *priv = nft_expr_priv(expr);
+ const struct nft_data *src = &data[priv->sreg];
+ struct nft_data *dst = &data[priv->dreg];
+ unsigned int i;
+
+ for (i = 0; i < DIV_ROUND_UP(priv->len, 4); i++) {
+ dst->data[i] = (src->data[i] & priv->mask.data[i]) ^
+ priv->xor.data[i];
+ }
+}
+
+static const struct nla_policy nft_bitwise_policy[NFTA_BITWISE_MAX + 1] = {
+ [NFTA_BITWISE_SREG] = { .type = NLA_U32 },
+ [NFTA_BITWISE_DREG] = { .type = NLA_U32 },
+ [NFTA_BITWISE_LEN] = { .type = NLA_U32 },
+ [NFTA_BITWISE_MASK] = { .type = NLA_NESTED },
+ [NFTA_BITWISE_XOR] = { .type = NLA_NESTED },
+};
+
+static int nft_bitwise_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_bitwise *priv = nft_expr_priv(expr);
+ struct nft_data_desc d1, d2;
+ int err;
+
+ if (tb[NFTA_BITWISE_SREG] == NULL ||
+ tb[NFTA_BITWISE_DREG] == NULL ||
+ tb[NFTA_BITWISE_LEN] == NULL ||
+ tb[NFTA_BITWISE_MASK] == NULL ||
+ tb[NFTA_BITWISE_XOR] == NULL)
+ return -EINVAL;
+
+ priv->sreg = ntohl(nla_get_be32(tb[NFTA_BITWISE_SREG]));
+ err = nft_validate_input_register(priv->sreg);
+ if (err < 0)
+ return err;
+
+ priv->dreg = ntohl(nla_get_be32(tb[NFTA_BITWISE_DREG]));
+ err = nft_validate_output_register(priv->dreg);
+ if (err < 0)
+ return err;
+ err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+ if (err < 0)
+ return err;
+
+ priv->len = ntohl(nla_get_be32(tb[NFTA_BITWISE_LEN]));
+
+ err = nft_data_init(NULL, &priv->mask, &d1, tb[NFTA_BITWISE_MASK]);
+ if (err < 0)
+ return err;
+ if (d1.len != priv->len)
+ return -EINVAL;
+
+ err = nft_data_init(NULL, &priv->xor, &d2, tb[NFTA_BITWISE_XOR]);
+ if (err < 0)
+ return err;
+ if (d2.len != priv->len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_bitwise *priv = nft_expr_priv(expr);
+
+ if (nla_put_be32(skb, NFTA_BITWISE_SREG, htonl(priv->sreg)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_BITWISE_DREG, htonl(priv->dreg)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_BITWISE_LEN, htonl(priv->len)))
+ goto nla_put_failure;
+
+ if (nft_data_dump(skb, NFTA_BITWISE_MASK, &priv->mask,
+ NFT_DATA_VALUE, priv->len) < 0)
+ goto nla_put_failure;
+
+ if (nft_data_dump(skb, NFTA_BITWISE_XOR, &priv->xor,
+ NFT_DATA_VALUE, priv->len) < 0)
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_bitwise_type;
+static const struct nft_expr_ops nft_bitwise_ops = {
+ .type = &nft_bitwise_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_bitwise)),
+ .eval = nft_bitwise_eval,
+ .init = nft_bitwise_init,
+ .dump = nft_bitwise_dump,
+};
+
+static struct nft_expr_type nft_bitwise_type __read_mostly = {
+ .name = "bitwise",
+ .ops = &nft_bitwise_ops,
+ .policy = nft_bitwise_policy,
+ .maxattr = NFTA_BITWISE_MAX,
+ .owner = THIS_MODULE,
+};
+
+int __init nft_bitwise_module_init(void)
+{
+ return nft_register_expr(&nft_bitwise_type);
+}
+
+void nft_bitwise_module_exit(void)
+{
+ nft_unregister_expr(&nft_bitwise_type);
+}
diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c
new file mode 100644
index 000000000000..c39ed8d29df1
--- /dev/null
+++ b/net/netfilter/nft_byteorder.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_byteorder {
+ enum nft_registers sreg:8;
+ enum nft_registers dreg:8;
+ enum nft_byteorder_ops op:8;
+ u8 len;
+ u8 size;
+};
+
+static void nft_byteorder_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_byteorder *priv = nft_expr_priv(expr);
+ struct nft_data *src = &data[priv->sreg], *dst = &data[priv->dreg];
+ union { u32 u32; u16 u16; } *s, *d;
+ unsigned int i;
+
+ s = (void *)src->data;
+ d = (void *)dst->data;
+
+ switch (priv->size) {
+ case 4:
+ switch (priv->op) {
+ case NFT_BYTEORDER_NTOH:
+ for (i = 0; i < priv->len / 4; i++)
+ d[i].u32 = ntohl((__force __be32)s[i].u32);
+ break;
+ case NFT_BYTEORDER_HTON:
+ for (i = 0; i < priv->len / 4; i++)
+ d[i].u32 = (__force __u32)htonl(s[i].u32);
+ break;
+ }
+ break;
+ case 2:
+ switch (priv->op) {
+ case NFT_BYTEORDER_NTOH:
+ for (i = 0; i < priv->len / 2; i++)
+ d[i].u16 = ntohs((__force __be16)s[i].u16);
+ break;
+ case NFT_BYTEORDER_HTON:
+ for (i = 0; i < priv->len / 2; i++)
+ d[i].u16 = (__force __u16)htons(s[i].u16);
+ break;
+ }
+ break;
+ }
+}
+
+static const struct nla_policy nft_byteorder_policy[NFTA_BYTEORDER_MAX + 1] = {
+ [NFTA_BYTEORDER_SREG] = { .type = NLA_U32 },
+ [NFTA_BYTEORDER_DREG] = { .type = NLA_U32 },
+ [NFTA_BYTEORDER_OP] = { .type = NLA_U32 },
+ [NFTA_BYTEORDER_LEN] = { .type = NLA_U32 },
+ [NFTA_BYTEORDER_SIZE] = { .type = NLA_U32 },
+};
+
+static int nft_byteorder_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_byteorder *priv = nft_expr_priv(expr);
+ int err;
+
+ if (tb[NFTA_BYTEORDER_SREG] == NULL ||
+ tb[NFTA_BYTEORDER_DREG] == NULL ||
+ tb[NFTA_BYTEORDER_LEN] == NULL ||
+ tb[NFTA_BYTEORDER_SIZE] == NULL ||
+ tb[NFTA_BYTEORDER_OP] == NULL)
+ return -EINVAL;
+
+ priv->sreg = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_SREG]));
+ err = nft_validate_input_register(priv->sreg);
+ if (err < 0)
+ return err;
+
+ priv->dreg = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_DREG]));
+ err = nft_validate_output_register(priv->dreg);
+ if (err < 0)
+ return err;
+ err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+ if (err < 0)
+ return err;
+
+ priv->op = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_OP]));
+ switch (priv->op) {
+ case NFT_BYTEORDER_NTOH:
+ case NFT_BYTEORDER_HTON:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ priv->len = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_LEN]));
+ if (priv->len == 0 || priv->len > FIELD_SIZEOF(struct nft_data, data))
+ return -EINVAL;
+
+ priv->size = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_SIZE]));
+ switch (priv->size) {
+ case 2:
+ case 4:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nft_byteorder_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_byteorder *priv = nft_expr_priv(expr);
+
+ if (nla_put_be32(skb, NFTA_BYTEORDER_SREG, htonl(priv->sreg)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_BYTEORDER_DREG, htonl(priv->dreg)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_BYTEORDER_OP, htonl(priv->op)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_BYTEORDER_LEN, htonl(priv->len)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_BYTEORDER_SIZE, htonl(priv->size)))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_byteorder_type;
+static const struct nft_expr_ops nft_byteorder_ops = {
+ .type = &nft_byteorder_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_byteorder)),
+ .eval = nft_byteorder_eval,
+ .init = nft_byteorder_init,
+ .dump = nft_byteorder_dump,
+};
+
+static struct nft_expr_type nft_byteorder_type __read_mostly = {
+ .name = "byteorder",
+ .ops = &nft_byteorder_ops,
+ .policy = nft_byteorder_policy,
+ .maxattr = NFTA_BYTEORDER_MAX,
+ .owner = THIS_MODULE,
+};
+
+int __init nft_byteorder_module_init(void)
+{
+ return nft_register_expr(&nft_byteorder_type);
+}
+
+void nft_byteorder_module_exit(void)
+{
+ nft_unregister_expr(&nft_byteorder_type);
+}
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
new file mode 100644
index 000000000000..954925db414d
--- /dev/null
+++ b/net/netfilter/nft_cmp.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_cmp_expr {
+ struct nft_data data;
+ enum nft_registers sreg:8;
+ u8 len;
+ enum nft_cmp_ops op:8;
+};
+
+static void nft_cmp_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_cmp_expr *priv = nft_expr_priv(expr);
+ int d;
+
+ d = nft_data_cmp(&data[priv->sreg], &priv->data, priv->len);
+ switch (priv->op) {
+ case NFT_CMP_EQ:
+ if (d != 0)
+ goto mismatch;
+ break;
+ case NFT_CMP_NEQ:
+ if (d == 0)
+ goto mismatch;
+ break;
+ case NFT_CMP_LT:
+ if (d == 0)
+ goto mismatch;
+ case NFT_CMP_LTE:
+ if (d > 0)
+ goto mismatch;
+ break;
+ case NFT_CMP_GT:
+ if (d == 0)
+ goto mismatch;
+ case NFT_CMP_GTE:
+ if (d < 0)
+ goto mismatch;
+ break;
+ }
+ return;
+
+mismatch:
+ data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_cmp_policy[NFTA_CMP_MAX + 1] = {
+ [NFTA_CMP_SREG] = { .type = NLA_U32 },
+ [NFTA_CMP_OP] = { .type = NLA_U32 },
+ [NFTA_CMP_DATA] = { .type = NLA_NESTED },
+};
+
+static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_cmp_expr *priv = nft_expr_priv(expr);
+ struct nft_data_desc desc;
+ int err;
+
+ priv->sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
+ priv->op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
+
+ err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
+ BUG_ON(err < 0);
+
+ priv->len = desc.len;
+ return 0;
+}
+
+static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_cmp_expr *priv = nft_expr_priv(expr);
+
+ if (nla_put_be32(skb, NFTA_CMP_SREG, htonl(priv->sreg)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_CMP_OP, htonl(priv->op)))
+ goto nla_put_failure;
+
+ if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
+ NFT_DATA_VALUE, priv->len) < 0)
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_cmp_type;
+static const struct nft_expr_ops nft_cmp_ops = {
+ .type = &nft_cmp_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_expr)),
+ .eval = nft_cmp_eval,
+ .init = nft_cmp_init,
+ .dump = nft_cmp_dump,
+};
+
+static int nft_cmp_fast_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
+ struct nft_data_desc desc;
+ struct nft_data data;
+ u32 mask;
+ int err;
+
+ priv->sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
+
+ err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
+ BUG_ON(err < 0);
+ desc.len *= BITS_PER_BYTE;
+
+ mask = ~0U >> (sizeof(priv->data) * BITS_PER_BYTE - desc.len);
+ priv->data = data.data[0] & mask;
+ priv->len = desc.len;
+ return 0;
+}
+
+static int nft_cmp_fast_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
+ struct nft_data data;
+
+ if (nla_put_be32(skb, NFTA_CMP_SREG, htonl(priv->sreg)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_CMP_OP, htonl(NFT_CMP_EQ)))
+ goto nla_put_failure;
+
+ data.data[0] = priv->data;
+ if (nft_data_dump(skb, NFTA_CMP_DATA, &data,
+ NFT_DATA_VALUE, priv->len / BITS_PER_BYTE) < 0)
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+const struct nft_expr_ops nft_cmp_fast_ops = {
+ .type = &nft_cmp_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_fast_expr)),
+ .eval = NULL, /* inlined */
+ .init = nft_cmp_fast_init,
+ .dump = nft_cmp_fast_dump,
+};
+
+static const struct nft_expr_ops *
+nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
+{
+ struct nft_data_desc desc;
+ struct nft_data data;
+ enum nft_registers sreg;
+ enum nft_cmp_ops op;
+ int err;
+
+ if (tb[NFTA_CMP_SREG] == NULL ||
+ tb[NFTA_CMP_OP] == NULL ||
+ tb[NFTA_CMP_DATA] == NULL)
+ return ERR_PTR(-EINVAL);
+
+ sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
+ err = nft_validate_input_register(sreg);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
+ switch (op) {
+ case NFT_CMP_EQ:
+ case NFT_CMP_NEQ:
+ case NFT_CMP_LT:
+ case NFT_CMP_LTE:
+ case NFT_CMP_GT:
+ case NFT_CMP_GTE:
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ if (desc.len <= sizeof(u32) && op == NFT_CMP_EQ)
+ return &nft_cmp_fast_ops;
+ else
+ return &nft_cmp_ops;
+}
+
+static struct nft_expr_type nft_cmp_type __read_mostly = {
+ .name = "cmp",
+ .select_ops = nft_cmp_select_ops,
+ .policy = nft_cmp_policy,
+ .maxattr = NFTA_CMP_MAX,
+ .owner = THIS_MODULE,
+};
+
+int __init nft_cmp_module_init(void)
+{
+ return nft_register_expr(&nft_cmp_type);
+}
+
+void nft_cmp_module_exit(void)
+{
+ nft_unregister_expr(&nft_cmp_type);
+}
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
new file mode 100644
index 000000000000..4811f762e060
--- /dev/null
+++ b/net/netfilter/nft_compat.c
@@ -0,0 +1,768 @@
+/*
+ * (C) 2012-2013 by Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This software has been sponsored by Sophos Astaro <http://www.sophos.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <linux/netfilter/nf_tables_compat.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <asm/uaccess.h> /* for set_fs */
+#include <net/netfilter/nf_tables.h>
+
+union nft_entry {
+ struct ipt_entry e4;
+ struct ip6t_entry e6;
+};
+
+static inline void
+nft_compat_set_par(struct xt_action_param *par, void *xt, const void *xt_info)
+{
+ par->target = xt;
+ par->targinfo = xt_info;
+ par->hotdrop = false;
+}
+
+static void nft_target_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ void *info = nft_expr_priv(expr);
+ struct xt_target *target = expr->ops->data;
+ struct sk_buff *skb = pkt->skb;
+ int ret;
+
+ nft_compat_set_par((struct xt_action_param *)&pkt->xt, target, info);
+
+ ret = target->target(skb, &pkt->xt);
+
+ if (pkt->xt.hotdrop)
+ ret = NF_DROP;
+
+ switch(ret) {
+ case XT_CONTINUE:
+ data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+ break;
+ default:
+ data[NFT_REG_VERDICT].verdict = ret;
+ break;
+ }
+ return;
+}
+
+static const struct nla_policy nft_target_policy[NFTA_TARGET_MAX + 1] = {
+ [NFTA_TARGET_NAME] = { .type = NLA_NUL_STRING },
+ [NFTA_TARGET_REV] = { .type = NLA_U32 },
+ [NFTA_TARGET_INFO] = { .type = NLA_BINARY },
+};
+
+static void
+nft_target_set_tgchk_param(struct xt_tgchk_param *par,
+ const struct nft_ctx *ctx,
+ struct xt_target *target, void *info,
+ union nft_entry *entry, u8 proto, bool inv)
+{
+ par->net = &init_net;
+ par->table = ctx->table->name;
+ switch (ctx->afi->family) {
+ case AF_INET:
+ entry->e4.ip.proto = proto;
+ entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
+ break;
+ case AF_INET6:
+ entry->e6.ipv6.proto = proto;
+ entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
+ break;
+ }
+ par->entryinfo = entry;
+ par->target = target;
+ par->targinfo = info;
+ if (ctx->chain->flags & NFT_BASE_CHAIN) {
+ const struct nft_base_chain *basechain =
+ nft_base_chain(ctx->chain);
+ const struct nf_hook_ops *ops = &basechain->ops;
+
+ par->hook_mask = 1 << ops->hooknum;
+ }
+ par->family = ctx->afi->family;
+}
+
+static void target_compat_from_user(struct xt_target *t, void *in, void *out)
+{
+#ifdef CONFIG_COMPAT
+ if (t->compat_from_user) {
+ int pad;
+
+ t->compat_from_user(out, in);
+ pad = XT_ALIGN(t->targetsize) - t->targetsize;
+ if (pad > 0)
+ memset(out + t->targetsize, 0, pad);
+ } else
+#endif
+ memcpy(out, in, XT_ALIGN(t->targetsize));
+}
+
+static inline int nft_compat_target_offset(struct xt_target *target)
+{
+#ifdef CONFIG_COMPAT
+ return xt_compat_target_offset(target);
+#else
+ return 0;
+#endif
+}
+
+static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1] = {
+ [NFTA_RULE_COMPAT_PROTO] = { .type = NLA_U32 },
+ [NFTA_RULE_COMPAT_FLAGS] = { .type = NLA_U32 },
+};
+
+static u8 nft_parse_compat(const struct nlattr *attr, bool *inv)
+{
+ struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1];
+ u32 flags;
+ int err;
+
+ err = nla_parse_nested(tb, NFTA_RULE_COMPAT_MAX, attr,
+ nft_rule_compat_policy);
+ if (err < 0)
+ return err;
+
+ if (!tb[NFTA_RULE_COMPAT_PROTO] || !tb[NFTA_RULE_COMPAT_FLAGS])
+ return -EINVAL;
+
+ flags = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_FLAGS]));
+ if (flags & ~NFT_RULE_COMPAT_F_MASK)
+ return -EINVAL;
+ if (flags & NFT_RULE_COMPAT_F_INV)
+ *inv = true;
+
+ return ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO]));
+}
+
+static int
+nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ void *info = nft_expr_priv(expr);
+ struct xt_target *target = expr->ops->data;
+ struct xt_tgchk_param par;
+ size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO]));
+ u8 proto = 0;
+ bool inv = false;
+ union nft_entry e = {};
+ int ret;
+
+ target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info);
+
+ if (ctx->nla[NFTA_RULE_COMPAT])
+ proto = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &inv);
+
+ nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv);
+
+ ret = xt_check_target(&par, size, proto, inv);
+ if (ret < 0)
+ goto err;
+
+ /* The standard target cannot be used */
+ if (target->target == NULL) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ return 0;
+err:
+ module_put(target->me);
+ return ret;
+}
+
+static void
+nft_target_destroy(const struct nft_expr *expr)
+{
+ struct xt_target *target = expr->ops->data;
+
+ module_put(target->me);
+}
+
+static int
+target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in)
+{
+ int ret;
+
+#ifdef CONFIG_COMPAT
+ if (t->compat_to_user) {
+ mm_segment_t old_fs;
+ void *out;
+
+ out = kmalloc(XT_ALIGN(t->targetsize), GFP_ATOMIC);
+ if (out == NULL)
+ return -ENOMEM;
+
+ /* We want to reuse existing compat_to_user */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ t->compat_to_user(out, in);
+ set_fs(old_fs);
+ ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out);
+ kfree(out);
+ } else
+#endif
+ ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), in);
+
+ return ret;
+}
+
+static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct xt_target *target = expr->ops->data;
+ void *info = nft_expr_priv(expr);
+
+ if (nla_put_string(skb, NFTA_TARGET_NAME, target->name) ||
+ nla_put_be32(skb, NFTA_TARGET_REV, htonl(target->revision)) ||
+ target_dump_info(skb, target, info))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static int nft_target_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+{
+ struct xt_target *target = expr->ops->data;
+ unsigned int hook_mask = 0;
+
+ if (ctx->chain->flags & NFT_BASE_CHAIN) {
+ const struct nft_base_chain *basechain =
+ nft_base_chain(ctx->chain);
+ const struct nf_hook_ops *ops = &basechain->ops;
+
+ hook_mask = 1 << ops->hooknum;
+ if (hook_mask & target->hooks)
+ return 0;
+
+ /* This target is being called from an invalid chain */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void nft_match_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ void *info = nft_expr_priv(expr);
+ struct xt_match *match = expr->ops->data;
+ struct sk_buff *skb = pkt->skb;
+ bool ret;
+
+ nft_compat_set_par((struct xt_action_param *)&pkt->xt, match, info);
+
+ ret = match->match(skb, (struct xt_action_param *)&pkt->xt);
+
+ if (pkt->xt.hotdrop) {
+ data[NFT_REG_VERDICT].verdict = NF_DROP;
+ return;
+ }
+
+ switch(ret) {
+ case true:
+ data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+ break;
+ case false:
+ data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+ break;
+ }
+}
+
+static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
+ [NFTA_MATCH_NAME] = { .type = NLA_NUL_STRING },
+ [NFTA_MATCH_REV] = { .type = NLA_U32 },
+ [NFTA_MATCH_INFO] = { .type = NLA_BINARY },
+};
+
+/* struct xt_mtchk_param and xt_tgchk_param look very similar */
+static void
+nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
+ struct xt_match *match, void *info,
+ union nft_entry *entry, u8 proto, bool inv)
+{
+ par->net = &init_net;
+ par->table = ctx->table->name;
+ switch (ctx->afi->family) {
+ case AF_INET:
+ entry->e4.ip.proto = proto;
+ entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
+ break;
+ case AF_INET6:
+ entry->e6.ipv6.proto = proto;
+ entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
+ break;
+ }
+ par->entryinfo = entry;
+ par->match = match;
+ par->matchinfo = info;
+ if (ctx->chain->flags & NFT_BASE_CHAIN) {
+ const struct nft_base_chain *basechain =
+ nft_base_chain(ctx->chain);
+ const struct nf_hook_ops *ops = &basechain->ops;
+
+ par->hook_mask = 1 << ops->hooknum;
+ }
+ par->family = ctx->afi->family;
+}
+
+static void match_compat_from_user(struct xt_match *m, void *in, void *out)
+{
+#ifdef CONFIG_COMPAT
+ if (m->compat_from_user) {
+ int pad;
+
+ m->compat_from_user(out, in);
+ pad = XT_ALIGN(m->matchsize) - m->matchsize;
+ if (pad > 0)
+ memset(out + m->matchsize, 0, pad);
+ } else
+#endif
+ memcpy(out, in, XT_ALIGN(m->matchsize));
+}
+
+static int
+nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ void *info = nft_expr_priv(expr);
+ struct xt_match *match = expr->ops->data;
+ struct xt_mtchk_param par;
+ size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO]));
+ u8 proto = 0;
+ bool inv = false;
+ union nft_entry e = {};
+ int ret;
+
+ match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info);
+
+ if (ctx->nla[NFTA_RULE_COMPAT])
+ proto = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &inv);
+
+ nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
+
+ ret = xt_check_match(&par, size, proto, inv);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+err:
+ module_put(match->me);
+ return ret;
+}
+
+static void
+nft_match_destroy(const struct nft_expr *expr)
+{
+ struct xt_match *match = expr->ops->data;
+
+ module_put(match->me);
+}
+
+static int
+match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in)
+{
+ int ret;
+
+#ifdef CONFIG_COMPAT
+ if (m->compat_to_user) {
+ mm_segment_t old_fs;
+ void *out;
+
+ out = kmalloc(XT_ALIGN(m->matchsize), GFP_ATOMIC);
+ if (out == NULL)
+ return -ENOMEM;
+
+ /* We want to reuse existing compat_to_user */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ m->compat_to_user(out, in);
+ set_fs(old_fs);
+ ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out);
+ kfree(out);
+ } else
+#endif
+ ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), in);
+
+ return ret;
+}
+
+static inline int nft_compat_match_offset(struct xt_match *match)
+{
+#ifdef CONFIG_COMPAT
+ return xt_compat_match_offset(match);
+#else
+ return 0;
+#endif
+}
+
+static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ void *info = nft_expr_priv(expr);
+ struct xt_match *match = expr->ops->data;
+
+ if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) ||
+ nla_put_be32(skb, NFTA_MATCH_REV, htonl(match->revision)) ||
+ match_dump_info(skb, match, info))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static int nft_match_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+{
+ struct xt_match *match = expr->ops->data;
+ unsigned int hook_mask = 0;
+
+ if (ctx->chain->flags & NFT_BASE_CHAIN) {
+ const struct nft_base_chain *basechain =
+ nft_base_chain(ctx->chain);
+ const struct nf_hook_ops *ops = &basechain->ops;
+
+ hook_mask = 1 << ops->hooknum;
+ if (hook_mask & match->hooks)
+ return 0;
+
+ /* This match is being called from an invalid chain */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+nfnl_compat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
+ int event, u16 family, const char *name,
+ int rev, int target)
+{
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfmsg;
+ unsigned int flags = portid ? NLM_F_MULTI : 0;
+
+ event |= NFNL_SUBSYS_NFT_COMPAT << 8;
+ nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
+ if (nlh == NULL)
+ goto nlmsg_failure;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = family;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = 0;
+
+ if (nla_put_string(skb, NFTA_COMPAT_NAME, name) ||
+ nla_put_be32(skb, NFTA_COMPAT_REV, htonl(rev)) ||
+ nla_put_be32(skb, NFTA_COMPAT_TYPE, htonl(target)))
+ goto nla_put_failure;
+
+ nlmsg_end(skb, nlh);
+ return skb->len;
+
+nlmsg_failure:
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -1;
+}
+
+static int
+nfnl_compat_get(struct sock *nfnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh, const struct nlattr * const tb[])
+{
+ int ret = 0, target;
+ struct nfgenmsg *nfmsg;
+ const char *fmt;
+ const char *name;
+ u32 rev;
+ struct sk_buff *skb2;
+
+ if (tb[NFTA_COMPAT_NAME] == NULL ||
+ tb[NFTA_COMPAT_REV] == NULL ||
+ tb[NFTA_COMPAT_TYPE] == NULL)
+ return -EINVAL;
+
+ name = nla_data(tb[NFTA_COMPAT_NAME]);
+ rev = ntohl(nla_get_be32(tb[NFTA_COMPAT_REV]));
+ target = ntohl(nla_get_be32(tb[NFTA_COMPAT_TYPE]));
+
+ nfmsg = nlmsg_data(nlh);
+
+ switch(nfmsg->nfgen_family) {
+ case AF_INET:
+ fmt = "ipt_%s";
+ break;
+ case AF_INET6:
+ fmt = "ip6t_%s";
+ break;
+ default:
+ pr_err("nft_compat: unsupported protocol %d\n",
+ nfmsg->nfgen_family);
+ return -EINVAL;
+ }
+
+ try_then_request_module(xt_find_revision(nfmsg->nfgen_family, name,
+ rev, target, &ret),
+ fmt, name);
+
+ if (ret < 0)
+ return ret;
+
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (skb2 == NULL)
+ return -ENOMEM;
+
+ /* include the best revision for this extension in the message */
+ if (nfnl_compat_fill_info(skb2, NETLINK_CB(skb).portid,
+ nlh->nlmsg_seq,
+ NFNL_MSG_TYPE(nlh->nlmsg_type),
+ NFNL_MSG_COMPAT_GET,
+ nfmsg->nfgen_family,
+ name, ret, target) <= 0) {
+ kfree_skb(skb2);
+ return -ENOSPC;
+ }
+
+ ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
+ MSG_DONTWAIT);
+ if (ret > 0)
+ ret = 0;
+
+ return ret == -EAGAIN ? -ENOBUFS : ret;
+}
+
+static const struct nla_policy nfnl_compat_policy_get[NFTA_COMPAT_MAX+1] = {
+ [NFTA_COMPAT_NAME] = { .type = NLA_NUL_STRING,
+ .len = NFT_COMPAT_NAME_MAX-1 },
+ [NFTA_COMPAT_REV] = { .type = NLA_U32 },
+ [NFTA_COMPAT_TYPE] = { .type = NLA_U32 },
+};
+
+static const struct nfnl_callback nfnl_nft_compat_cb[NFNL_MSG_COMPAT_MAX] = {
+ [NFNL_MSG_COMPAT_GET] = { .call = nfnl_compat_get,
+ .attr_count = NFTA_COMPAT_MAX,
+ .policy = nfnl_compat_policy_get },
+};
+
+static const struct nfnetlink_subsystem nfnl_compat_subsys = {
+ .name = "nft-compat",
+ .subsys_id = NFNL_SUBSYS_NFT_COMPAT,
+ .cb_count = NFNL_MSG_COMPAT_MAX,
+ .cb = nfnl_nft_compat_cb,
+};
+
+static LIST_HEAD(nft_match_list);
+
+struct nft_xt {
+ struct list_head head;
+ struct nft_expr_ops ops;
+};
+
+static struct nft_expr_type nft_match_type;
+
+static const struct nft_expr_ops *
+nft_match_select_ops(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[])
+{
+ struct nft_xt *nft_match;
+ struct xt_match *match;
+ char *mt_name;
+ __u32 rev, family;
+
+ if (tb[NFTA_MATCH_NAME] == NULL ||
+ tb[NFTA_MATCH_REV] == NULL ||
+ tb[NFTA_MATCH_INFO] == NULL)
+ return ERR_PTR(-EINVAL);
+
+ mt_name = nla_data(tb[NFTA_MATCH_NAME]);
+ rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV]));
+ family = ctx->afi->family;
+
+ /* Re-use the existing match if it's already loaded. */
+ list_for_each_entry(nft_match, &nft_match_list, head) {
+ struct xt_match *match = nft_match->ops.data;
+
+ if (strcmp(match->name, mt_name) == 0 &&
+ match->revision == rev && match->family == family)
+ return &nft_match->ops;
+ }
+
+ match = xt_request_find_match(family, mt_name, rev);
+ if (IS_ERR(match))
+ return ERR_PTR(-ENOENT);
+
+ /* This is the first time we use this match, allocate operations */
+ nft_match = kzalloc(sizeof(struct nft_xt), GFP_KERNEL);
+ if (nft_match == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ nft_match->ops.type = &nft_match_type;
+ nft_match->ops.size = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize) +
+ nft_compat_match_offset(match));
+ nft_match->ops.eval = nft_match_eval;
+ nft_match->ops.init = nft_match_init;
+ nft_match->ops.destroy = nft_match_destroy;
+ nft_match->ops.dump = nft_match_dump;
+ nft_match->ops.validate = nft_match_validate;
+ nft_match->ops.data = match;
+
+ list_add(&nft_match->head, &nft_match_list);
+
+ return &nft_match->ops;
+}
+
+static void nft_match_release(void)
+{
+ struct nft_xt *nft_match;
+
+ list_for_each_entry(nft_match, &nft_match_list, head)
+ kfree(nft_match);
+}
+
+static struct nft_expr_type nft_match_type __read_mostly = {
+ .name = "match",
+ .select_ops = nft_match_select_ops,
+ .policy = nft_match_policy,
+ .maxattr = NFTA_MATCH_MAX,
+ .owner = THIS_MODULE,
+};
+
+static LIST_HEAD(nft_target_list);
+
+static struct nft_expr_type nft_target_type;
+
+static const struct nft_expr_ops *
+nft_target_select_ops(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[])
+{
+ struct nft_xt *nft_target;
+ struct xt_target *target;
+ char *tg_name;
+ __u32 rev, family;
+
+ if (tb[NFTA_TARGET_NAME] == NULL ||
+ tb[NFTA_TARGET_REV] == NULL ||
+ tb[NFTA_TARGET_INFO] == NULL)
+ return ERR_PTR(-EINVAL);
+
+ tg_name = nla_data(tb[NFTA_TARGET_NAME]);
+ rev = ntohl(nla_get_be32(tb[NFTA_TARGET_REV]));
+ family = ctx->afi->family;
+
+ /* Re-use the existing target if it's already loaded. */
+ list_for_each_entry(nft_target, &nft_match_list, head) {
+ struct xt_target *target = nft_target->ops.data;
+
+ if (strcmp(target->name, tg_name) == 0 &&
+ target->revision == rev && target->family == family)
+ return &nft_target->ops;
+ }
+
+ target = xt_request_find_target(family, tg_name, rev);
+ if (IS_ERR(target))
+ return ERR_PTR(-ENOENT);
+
+ /* This is the first time we use this target, allocate operations */
+ nft_target = kzalloc(sizeof(struct nft_xt), GFP_KERNEL);
+ if (nft_target == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ nft_target->ops.type = &nft_target_type;
+ nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize) +
+ nft_compat_target_offset(target));
+ nft_target->ops.eval = nft_target_eval;
+ nft_target->ops.init = nft_target_init;
+ nft_target->ops.destroy = nft_target_destroy;
+ nft_target->ops.dump = nft_target_dump;
+ nft_target->ops.validate = nft_target_validate;
+ nft_target->ops.data = target;
+
+ list_add(&nft_target->head, &nft_target_list);
+
+ return &nft_target->ops;
+}
+
+static void nft_target_release(void)
+{
+ struct nft_xt *nft_target;
+
+ list_for_each_entry(nft_target, &nft_target_list, head)
+ kfree(nft_target);
+}
+
+static struct nft_expr_type nft_target_type __read_mostly = {
+ .name = "target",
+ .select_ops = nft_target_select_ops,
+ .policy = nft_target_policy,
+ .maxattr = NFTA_TARGET_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_compat_module_init(void)
+{
+ int ret;
+
+ ret = nft_register_expr(&nft_match_type);
+ if (ret < 0)
+ return ret;
+
+ ret = nft_register_expr(&nft_target_type);
+ if (ret < 0)
+ goto err_match;
+
+ ret = nfnetlink_subsys_register(&nfnl_compat_subsys);
+ if (ret < 0) {
+ pr_err("nft_compat: cannot register with nfnetlink.\n");
+ goto err_target;
+ }
+
+ pr_info("nf_tables_compat: (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>\n");
+
+ return ret;
+
+err_target:
+ nft_unregister_expr(&nft_target_type);
+err_match:
+ nft_unregister_expr(&nft_match_type);
+ return ret;
+}
+
+static void __exit nft_compat_module_exit(void)
+{
+ nfnetlink_subsys_unregister(&nfnl_compat_subsys);
+ nft_unregister_expr(&nft_target_type);
+ nft_unregister_expr(&nft_match_type);
+ nft_match_release();
+ nft_target_release();
+}
+
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT);
+
+module_init(nft_compat_module_init);
+module_exit(nft_compat_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_EXPR("match");
+MODULE_ALIAS_NFT_EXPR("target");
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
new file mode 100644
index 000000000000..c89ee486ce54
--- /dev/null
+++ b/net/netfilter/nft_counter.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/seqlock.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_counter {
+ seqlock_t lock;
+ u64 bytes;
+ u64 packets;
+};
+
+static void nft_counter_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_counter *priv = nft_expr_priv(expr);
+
+ write_seqlock_bh(&priv->lock);
+ priv->bytes += pkt->skb->len;
+ priv->packets++;
+ write_sequnlock_bh(&priv->lock);
+}
+
+static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ struct nft_counter *priv = nft_expr_priv(expr);
+ unsigned int seq;
+ u64 bytes;
+ u64 packets;
+
+ do {
+ seq = read_seqbegin(&priv->lock);
+ bytes = priv->bytes;
+ packets = priv->packets;
+ } while (read_seqretry(&priv->lock, seq));
+
+ if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(bytes)))
+ goto nla_put_failure;
+ if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(packets)))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static const struct nla_policy nft_counter_policy[NFTA_COUNTER_MAX + 1] = {
+ [NFTA_COUNTER_PACKETS] = { .type = NLA_U64 },
+ [NFTA_COUNTER_BYTES] = { .type = NLA_U64 },
+};
+
+static int nft_counter_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_counter *priv = nft_expr_priv(expr);
+
+ if (tb[NFTA_COUNTER_PACKETS])
+ priv->packets = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
+ if (tb[NFTA_COUNTER_BYTES])
+ priv->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
+
+ seqlock_init(&priv->lock);
+ return 0;
+}
+
+static struct nft_expr_type nft_counter_type;
+static const struct nft_expr_ops nft_counter_ops = {
+ .type = &nft_counter_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_counter)),
+ .eval = nft_counter_eval,
+ .init = nft_counter_init,
+ .dump = nft_counter_dump,
+};
+
+static struct nft_expr_type nft_counter_type __read_mostly = {
+ .name = "counter",
+ .ops = &nft_counter_ops,
+ .policy = nft_counter_policy,
+ .maxattr = NFTA_COUNTER_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_counter_module_init(void)
+{
+ return nft_register_expr(&nft_counter_type);
+}
+
+static void __exit nft_counter_module_exit(void)
+{
+ nft_unregister_expr(&nft_counter_type);
+}
+
+module_init(nft_counter_module_init);
+module_exit(nft_counter_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("counter");
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
new file mode 100644
index 000000000000..955f4e6e7089
--- /dev/null
+++ b/net/netfilter/nft_ct.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+
+struct nft_ct {
+ enum nft_ct_keys key:8;
+ enum ip_conntrack_dir dir:8;
+ enum nft_registers dreg:8;
+ uint8_t family;
+};
+
+static void nft_ct_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_ct *priv = nft_expr_priv(expr);
+ struct nft_data *dest = &data[priv->dreg];
+ enum ip_conntrack_info ctinfo;
+ const struct nf_conn *ct;
+ const struct nf_conn_help *help;
+ const struct nf_conntrack_tuple *tuple;
+ const struct nf_conntrack_helper *helper;
+ long diff;
+ unsigned int state;
+
+ ct = nf_ct_get(pkt->skb, &ctinfo);
+
+ switch (priv->key) {
+ case NFT_CT_STATE:
+ if (ct == NULL)
+ state = NF_CT_STATE_INVALID_BIT;
+ else if (nf_ct_is_untracked(ct))
+ state = NF_CT_STATE_UNTRACKED_BIT;
+ else
+ state = NF_CT_STATE_BIT(ctinfo);
+ dest->data[0] = state;
+ return;
+ }
+
+ if (ct == NULL)
+ goto err;
+
+ switch (priv->key) {
+ case NFT_CT_DIRECTION:
+ dest->data[0] = CTINFO2DIR(ctinfo);
+ return;
+ case NFT_CT_STATUS:
+ dest->data[0] = ct->status;
+ return;
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ case NFT_CT_MARK:
+ dest->data[0] = ct->mark;
+ return;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+ case NFT_CT_SECMARK:
+ dest->data[0] = ct->secmark;
+ return;
+#endif
+ case NFT_CT_EXPIRATION:
+ diff = (long)jiffies - (long)ct->timeout.expires;
+ if (diff < 0)
+ diff = 0;
+ dest->data[0] = jiffies_to_msecs(diff);
+ return;
+ case NFT_CT_HELPER:
+ if (ct->master == NULL)
+ goto err;
+ help = nfct_help(ct->master);
+ if (help == NULL)
+ goto err;
+ helper = rcu_dereference(help->helper);
+ if (helper == NULL)
+ goto err;
+ if (strlen(helper->name) >= sizeof(dest->data))
+ goto err;
+ strncpy((char *)dest->data, helper->name, sizeof(dest->data));
+ return;
+ }
+
+ tuple = &ct->tuplehash[priv->dir].tuple;
+ switch (priv->key) {
+ case NFT_CT_L3PROTOCOL:
+ dest->data[0] = nf_ct_l3num(ct);
+ return;
+ case NFT_CT_SRC:
+ memcpy(dest->data, tuple->src.u3.all,
+ nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
+ return;
+ case NFT_CT_DST:
+ memcpy(dest->data, tuple->dst.u3.all,
+ nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
+ return;
+ case NFT_CT_PROTOCOL:
+ dest->data[0] = nf_ct_protonum(ct);
+ return;
+ case NFT_CT_PROTO_SRC:
+ dest->data[0] = (__force __u16)tuple->src.u.all;
+ return;
+ case NFT_CT_PROTO_DST:
+ dest->data[0] = (__force __u16)tuple->dst.u.all;
+ return;
+ }
+ return;
+err:
+ data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_ct_policy[NFTA_CT_MAX + 1] = {
+ [NFTA_CT_DREG] = { .type = NLA_U32 },
+ [NFTA_CT_KEY] = { .type = NLA_U32 },
+ [NFTA_CT_DIRECTION] = { .type = NLA_U8 },
+};
+
+static int nft_ct_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_ct *priv = nft_expr_priv(expr);
+ int err;
+
+ if (tb[NFTA_CT_DREG] == NULL ||
+ tb[NFTA_CT_KEY] == NULL)
+ return -EINVAL;
+
+ priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
+ if (tb[NFTA_CT_DIRECTION] != NULL) {
+ priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]);
+ switch (priv->dir) {
+ case IP_CT_DIR_ORIGINAL:
+ case IP_CT_DIR_REPLY:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ switch (priv->key) {
+ case NFT_CT_STATE:
+ case NFT_CT_DIRECTION:
+ case NFT_CT_STATUS:
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ case NFT_CT_MARK:
+#endif
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+ case NFT_CT_SECMARK:
+#endif
+ case NFT_CT_EXPIRATION:
+ case NFT_CT_HELPER:
+ if (tb[NFTA_CT_DIRECTION] != NULL)
+ return -EINVAL;
+ break;
+ case NFT_CT_PROTOCOL:
+ case NFT_CT_SRC:
+ case NFT_CT_DST:
+ case NFT_CT_PROTO_SRC:
+ case NFT_CT_PROTO_DST:
+ if (tb[NFTA_CT_DIRECTION] == NULL)
+ return -EINVAL;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = nf_ct_l3proto_try_module_get(ctx->afi->family);
+ if (err < 0)
+ return err;
+ priv->family = ctx->afi->family;
+
+ priv->dreg = ntohl(nla_get_be32(tb[NFTA_CT_DREG]));
+ err = nft_validate_output_register(priv->dreg);
+ if (err < 0)
+ goto err1;
+
+ err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+ if (err < 0)
+ goto err1;
+ return 0;
+
+err1:
+ nf_ct_l3proto_module_put(ctx->afi->family);
+ return err;
+}
+
+static void nft_ct_destroy(const struct nft_expr *expr)
+{
+ struct nft_ct *priv = nft_expr_priv(expr);
+
+ nf_ct_l3proto_module_put(priv->family);
+}
+
+static int nft_ct_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_ct *priv = nft_expr_priv(expr);
+
+ if (nla_put_be32(skb, NFTA_CT_DREG, htonl(priv->dreg)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key)))
+ goto nla_put_failure;
+ if (nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_ct_type;
+static const struct nft_expr_ops nft_ct_ops = {
+ .type = &nft_ct_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
+ .eval = nft_ct_eval,
+ .init = nft_ct_init,
+ .destroy = nft_ct_destroy,
+ .dump = nft_ct_dump,
+};
+
+static struct nft_expr_type nft_ct_type __read_mostly = {
+ .name = "ct",
+ .ops = &nft_ct_ops,
+ .policy = nft_ct_policy,
+ .maxattr = NFTA_CT_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_ct_module_init(void)
+{
+ return nft_register_expr(&nft_ct_type);
+}
+
+static void __exit nft_ct_module_exit(void)
+{
+ nft_unregister_expr(&nft_ct_type);
+}
+
+module_init(nft_ct_module_init);
+module_exit(nft_ct_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("ct");
diff --git a/net/netfilter/nft_expr_template.c b/net/netfilter/nft_expr_template.c
new file mode 100644
index 000000000000..b6eed4d5a096
--- /dev/null
+++ b/net/netfilter/nft_expr_template.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_template {
+
+};
+
+static void nft_template_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_template *priv = nft_expr_priv(expr);
+
+}
+
+static const struct nla_policy nft_template_policy[NFTA_TEMPLATE_MAX + 1] = {
+ [NFTA_TEMPLATE_ATTR] = { .type = NLA_U32 },
+};
+
+static int nft_template_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_template *priv = nft_expr_priv(expr);
+
+ return 0;
+}
+
+static void nft_template_destroy(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_template *priv = nft_expr_priv(expr);
+
+}
+
+static int nft_template_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_template *priv = nft_expr_priv(expr);
+
+ NLA_PUT_BE32(skb, NFTA_TEMPLATE_ATTR, priv->field);
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_template_type;
+static const struct nft_expr_ops nft_template_ops = {
+ .type = &nft_template_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_template)),
+ .eval = nft_template_eval,
+ .init = nft_template_init,
+ .destroy = nft_template_destroy,
+ .dump = nft_template_dump,
+};
+
+static struct nft_expr_type nft_template_type __read_mostly = {
+ .name = "template",
+ .ops = &nft_template_ops,
+ .policy = nft_template_policy,
+ .maxattr = NFTA_TEMPLATE_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_template_module_init(void)
+{
+ return nft_register_expr(&nft_template_type);
+}
+
+static void __exit nft_template_module_exit(void)
+{
+ nft_unregister_expr(&nft_template_type);
+}
+
+module_init(nft_template_module_init);
+module_exit(nft_template_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("template");
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
new file mode 100644
index 000000000000..8e0bb75e7c51
--- /dev/null
+++ b/net/netfilter/nft_exthdr.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+// FIXME:
+#include <net/ipv6.h>
+
+struct nft_exthdr {
+ u8 type;
+ u8 offset;
+ u8 len;
+ enum nft_registers dreg:8;
+};
+
+static void nft_exthdr_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_exthdr *priv = nft_expr_priv(expr);
+ struct nft_data *dest = &data[priv->dreg];
+ unsigned int offset;
+ int err;
+
+ err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
+ if (err < 0)
+ goto err;
+ offset += priv->offset;
+
+ if (skb_copy_bits(pkt->skb, offset, dest->data, priv->len) < 0)
+ goto err;
+ return;
+err:
+ data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = {
+ [NFTA_EXTHDR_DREG] = { .type = NLA_U32 },
+ [NFTA_EXTHDR_TYPE] = { .type = NLA_U8 },
+ [NFTA_EXTHDR_OFFSET] = { .type = NLA_U32 },
+ [NFTA_EXTHDR_LEN] = { .type = NLA_U32 },
+};
+
+static int nft_exthdr_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_exthdr *priv = nft_expr_priv(expr);
+ int err;
+
+ if (tb[NFTA_EXTHDR_DREG] == NULL ||
+ tb[NFTA_EXTHDR_TYPE] == NULL ||
+ tb[NFTA_EXTHDR_OFFSET] == NULL ||
+ tb[NFTA_EXTHDR_LEN] == NULL)
+ return -EINVAL;
+
+ priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
+ priv->offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET]));
+ priv->len = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN]));
+ if (priv->len == 0 ||
+ priv->len > FIELD_SIZEOF(struct nft_data, data))
+ return -EINVAL;
+
+ priv->dreg = ntohl(nla_get_be32(tb[NFTA_EXTHDR_DREG]));
+ err = nft_validate_output_register(priv->dreg);
+ if (err < 0)
+ return err;
+ return nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+}
+
+static int nft_exthdr_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_exthdr *priv = nft_expr_priv(expr);
+
+ if (nla_put_be32(skb, NFTA_EXTHDR_DREG, htonl(priv->dreg)))
+ goto nla_put_failure;
+ if (nla_put_u8(skb, NFTA_EXTHDR_TYPE, priv->type))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_EXTHDR_OFFSET, htonl(priv->offset)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_EXTHDR_LEN, htonl(priv->len)))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_exthdr_type;
+static const struct nft_expr_ops nft_exthdr_ops = {
+ .type = &nft_exthdr_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
+ .eval = nft_exthdr_eval,
+ .init = nft_exthdr_init,
+ .dump = nft_exthdr_dump,
+};
+
+static struct nft_expr_type nft_exthdr_type __read_mostly = {
+ .name = "exthdr",
+ .ops = &nft_exthdr_ops,
+ .policy = nft_exthdr_policy,
+ .maxattr = NFTA_EXTHDR_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_exthdr_module_init(void)
+{
+ return nft_register_expr(&nft_exthdr_type);
+}
+
+static void __exit nft_exthdr_module_exit(void)
+{
+ nft_unregister_expr(&nft_exthdr_type);
+}
+
+module_init(nft_exthdr_module_init);
+module_exit(nft_exthdr_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("exthdr");
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
new file mode 100644
index 000000000000..3d3f8fce10a5
--- /dev/null
+++ b/net/netfilter/nft_hash.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/jhash.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_hash {
+ struct hlist_head *hash;
+ unsigned int hsize;
+};
+
+struct nft_hash_elem {
+ struct hlist_node hnode;
+ struct nft_data key;
+ struct nft_data data[];
+};
+
+static u32 nft_hash_rnd __read_mostly;
+static bool nft_hash_rnd_initted __read_mostly;
+
+static unsigned int nft_hash_data(const struct nft_data *data,
+ unsigned int hsize, unsigned int len)
+{
+ unsigned int h;
+
+ h = jhash(data->data, len, nft_hash_rnd);
+ return ((u64)h * hsize) >> 32;
+}
+
+static bool nft_hash_lookup(const struct nft_set *set,
+ const struct nft_data *key,
+ struct nft_data *data)
+{
+ const struct nft_hash *priv = nft_set_priv(set);
+ const struct nft_hash_elem *he;
+ unsigned int h;
+
+ h = nft_hash_data(key, priv->hsize, set->klen);
+ hlist_for_each_entry(he, &priv->hash[h], hnode) {
+ if (nft_data_cmp(&he->key, key, set->klen))
+ continue;
+ if (set->flags & NFT_SET_MAP)
+ nft_data_copy(data, he->data);
+ return true;
+ }
+ return false;
+}
+
+static void nft_hash_elem_destroy(const struct nft_set *set,
+ struct nft_hash_elem *he)
+{
+ nft_data_uninit(&he->key, NFT_DATA_VALUE);
+ if (set->flags & NFT_SET_MAP)
+ nft_data_uninit(he->data, set->dtype);
+ kfree(he);
+}
+
+static int nft_hash_insert(const struct nft_set *set,
+ const struct nft_set_elem *elem)
+{
+ struct nft_hash *priv = nft_set_priv(set);
+ struct nft_hash_elem *he;
+ unsigned int size, h;
+
+ if (elem->flags != 0)
+ return -EINVAL;
+
+ size = sizeof(*he);
+ if (set->flags & NFT_SET_MAP)
+ size += sizeof(he->data[0]);
+
+ he = kzalloc(size, GFP_KERNEL);
+ if (he == NULL)
+ return -ENOMEM;
+
+ nft_data_copy(&he->key, &elem->key);
+ if (set->flags & NFT_SET_MAP)
+ nft_data_copy(he->data, &elem->data);
+
+ h = nft_hash_data(&he->key, priv->hsize, set->klen);
+ hlist_add_head_rcu(&he->hnode, &priv->hash[h]);
+ return 0;
+}
+
+static void nft_hash_remove(const struct nft_set *set,
+ const struct nft_set_elem *elem)
+{
+ struct nft_hash_elem *he = elem->cookie;
+
+ hlist_del_rcu(&he->hnode);
+ kfree(he);
+}
+
+static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem)
+{
+ const struct nft_hash *priv = nft_set_priv(set);
+ struct nft_hash_elem *he;
+ unsigned int h;
+
+ h = nft_hash_data(&elem->key, priv->hsize, set->klen);
+ hlist_for_each_entry(he, &priv->hash[h], hnode) {
+ if (nft_data_cmp(&he->key, &elem->key, set->klen))
+ continue;
+
+ elem->cookie = he;
+ elem->flags = 0;
+ if (set->flags & NFT_SET_MAP)
+ nft_data_copy(&elem->data, he->data);
+ return 0;
+ }
+ return -ENOENT;
+}
+
+static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
+ struct nft_set_iter *iter)
+{
+ const struct nft_hash *priv = nft_set_priv(set);
+ const struct nft_hash_elem *he;
+ struct nft_set_elem elem;
+ unsigned int i;
+
+ for (i = 0; i < priv->hsize; i++) {
+ hlist_for_each_entry(he, &priv->hash[i], hnode) {
+ if (iter->count < iter->skip)
+ goto cont;
+
+ memcpy(&elem.key, &he->key, sizeof(elem.key));
+ if (set->flags & NFT_SET_MAP)
+ memcpy(&elem.data, he->data, sizeof(elem.data));
+ elem.flags = 0;
+
+ iter->err = iter->fn(ctx, set, iter, &elem);
+ if (iter->err < 0)
+ return;
+cont:
+ iter->count++;
+ }
+ }
+}
+
+static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
+{
+ return sizeof(struct nft_hash);
+}
+
+static int nft_hash_init(const struct nft_set *set,
+ const struct nlattr * const tb[])
+{
+ struct nft_hash *priv = nft_set_priv(set);
+ unsigned int cnt, i;
+
+ if (unlikely(!nft_hash_rnd_initted)) {
+ get_random_bytes(&nft_hash_rnd, 4);
+ nft_hash_rnd_initted = true;
+ }
+
+ /* Aim for a load factor of 0.75 */
+ // FIXME: temporarily broken until we have set descriptions
+ cnt = 100;
+ cnt = cnt * 4 / 3;
+
+ priv->hash = kcalloc(cnt, sizeof(struct hlist_head), GFP_KERNEL);
+ if (priv->hash == NULL)
+ return -ENOMEM;
+ priv->hsize = cnt;
+
+ for (i = 0; i < cnt; i++)
+ INIT_HLIST_HEAD(&priv->hash[i]);
+
+ return 0;
+}
+
+static void nft_hash_destroy(const struct nft_set *set)
+{
+ const struct nft_hash *priv = nft_set_priv(set);
+ const struct hlist_node *next;
+ struct nft_hash_elem *elem;
+ unsigned int i;
+
+ for (i = 0; i < priv->hsize; i++) {
+ hlist_for_each_entry_safe(elem, next, &priv->hash[i], hnode) {
+ hlist_del(&elem->hnode);
+ nft_hash_elem_destroy(set, elem);
+ }
+ }
+ kfree(priv->hash);
+}
+
+static struct nft_set_ops nft_hash_ops __read_mostly = {
+ .privsize = nft_hash_privsize,
+ .init = nft_hash_init,
+ .destroy = nft_hash_destroy,
+ .get = nft_hash_get,
+ .insert = nft_hash_insert,
+ .remove = nft_hash_remove,
+ .lookup = nft_hash_lookup,
+ .walk = nft_hash_walk,
+ .features = NFT_SET_MAP,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_hash_module_init(void)
+{
+ return nft_register_set(&nft_hash_ops);
+}
+
+static void __exit nft_hash_module_exit(void)
+{
+ nft_unregister_set(&nft_hash_ops);
+}
+
+module_init(nft_hash_module_init);
+module_exit(nft_hash_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_SET();
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
new file mode 100644
index 000000000000..f169501f1ad4
--- /dev/null
+++ b/net/netfilter/nft_immediate.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_immediate_expr {
+ struct nft_data data;
+ enum nft_registers dreg:8;
+ u8 dlen;
+};
+
+static void nft_immediate_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+
+ nft_data_copy(&data[priv->dreg], &priv->data);
+}
+
+static const struct nla_policy nft_immediate_policy[NFTA_IMMEDIATE_MAX + 1] = {
+ [NFTA_IMMEDIATE_DREG] = { .type = NLA_U32 },
+ [NFTA_IMMEDIATE_DATA] = { .type = NLA_NESTED },
+};
+
+static int nft_immediate_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_immediate_expr *priv = nft_expr_priv(expr);
+ struct nft_data_desc desc;
+ int err;
+
+ if (tb[NFTA_IMMEDIATE_DREG] == NULL ||
+ tb[NFTA_IMMEDIATE_DATA] == NULL)
+ return -EINVAL;
+
+ priv->dreg = ntohl(nla_get_be32(tb[NFTA_IMMEDIATE_DREG]));
+ err = nft_validate_output_register(priv->dreg);
+ if (err < 0)
+ return err;
+
+ err = nft_data_init(ctx, &priv->data, &desc, tb[NFTA_IMMEDIATE_DATA]);
+ if (err < 0)
+ return err;
+ priv->dlen = desc.len;
+
+ err = nft_validate_data_load(ctx, priv->dreg, &priv->data, desc.type);
+ if (err < 0)
+ goto err1;
+
+ return 0;
+
+err1:
+ nft_data_uninit(&priv->data, desc.type);
+ return err;
+}
+
+static void nft_immediate_destroy(const struct nft_expr *expr)
+{
+ const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+ return nft_data_uninit(&priv->data, nft_dreg_to_type(priv->dreg));
+}
+
+static int nft_immediate_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+
+ if (nla_put_be32(skb, NFTA_IMMEDIATE_DREG, htonl(priv->dreg)))
+ goto nla_put_failure;
+
+ return nft_data_dump(skb, NFTA_IMMEDIATE_DATA, &priv->data,
+ nft_dreg_to_type(priv->dreg), priv->dlen);
+
+nla_put_failure:
+ return -1;
+}
+
+static int nft_immediate_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+{
+ const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+
+ if (priv->dreg == NFT_REG_VERDICT)
+ *data = &priv->data;
+
+ return 0;
+}
+
+static struct nft_expr_type nft_imm_type;
+static const struct nft_expr_ops nft_imm_ops = {
+ .type = &nft_imm_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)),
+ .eval = nft_immediate_eval,
+ .init = nft_immediate_init,
+ .destroy = nft_immediate_destroy,
+ .dump = nft_immediate_dump,
+ .validate = nft_immediate_validate,
+};
+
+static struct nft_expr_type nft_imm_type __read_mostly = {
+ .name = "immediate",
+ .ops = &nft_imm_ops,
+ .policy = nft_immediate_policy,
+ .maxattr = NFTA_IMMEDIATE_MAX,
+ .owner = THIS_MODULE,
+};
+
+int __init nft_immediate_module_init(void)
+{
+ return nft_register_expr(&nft_imm_type);
+}
+
+void nft_immediate_module_exit(void)
+{
+ nft_unregister_expr(&nft_imm_type);
+}
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
new file mode 100644
index 000000000000..85da5bd02f64
--- /dev/null
+++ b/net/netfilter/nft_limit.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+static DEFINE_SPINLOCK(limit_lock);
+
+struct nft_limit {
+ u64 tokens;
+ u64 rate;
+ u64 unit;
+ unsigned long stamp;
+};
+
+static void nft_limit_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_limit *priv = nft_expr_priv(expr);
+
+ spin_lock_bh(&limit_lock);
+ if (time_after_eq(jiffies, priv->stamp)) {
+ priv->tokens = priv->rate;
+ priv->stamp = jiffies + priv->unit * HZ;
+ }
+
+ if (priv->tokens >= 1) {
+ priv->tokens--;
+ spin_unlock_bh(&limit_lock);
+ return;
+ }
+ spin_unlock_bh(&limit_lock);
+
+ data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_limit_policy[NFTA_LIMIT_MAX + 1] = {
+ [NFTA_LIMIT_RATE] = { .type = NLA_U64 },
+ [NFTA_LIMIT_UNIT] = { .type = NLA_U64 },
+};
+
+static int nft_limit_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_limit *priv = nft_expr_priv(expr);
+
+ if (tb[NFTA_LIMIT_RATE] == NULL ||
+ tb[NFTA_LIMIT_UNIT] == NULL)
+ return -EINVAL;
+
+ priv->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
+ priv->unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
+ priv->stamp = jiffies + priv->unit * HZ;
+ priv->tokens = priv->rate;
+ return 0;
+}
+
+static int nft_limit_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_limit *priv = nft_expr_priv(expr);
+
+ if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(priv->rate)))
+ goto nla_put_failure;
+ if (nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(priv->unit)))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_limit_type;
+static const struct nft_expr_ops nft_limit_ops = {
+ .type = &nft_limit_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_limit)),
+ .eval = nft_limit_eval,
+ .init = nft_limit_init,
+ .dump = nft_limit_dump,
+};
+
+static struct nft_expr_type nft_limit_type __read_mostly = {
+ .name = "limit",
+ .ops = &nft_limit_ops,
+ .policy = nft_limit_policy,
+ .maxattr = NFTA_LIMIT_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_limit_module_init(void)
+{
+ return nft_register_expr(&nft_limit_type);
+}
+
+static void __exit nft_limit_module_exit(void)
+{
+ nft_unregister_expr(&nft_limit_type);
+}
+
+module_init(nft_limit_module_init);
+module_exit(nft_limit_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("limit");
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
new file mode 100644
index 000000000000..57cad072a13e
--- /dev/null
+++ b/net/netfilter/nft_log.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_log.h>
+#include <linux/netdevice.h>
+
+static const char *nft_log_null_prefix = "";
+
+struct nft_log {
+ struct nf_loginfo loginfo;
+ char *prefix;
+ int family;
+};
+
+static void nft_log_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_log *priv = nft_expr_priv(expr);
+ struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
+
+ nf_log_packet(net, priv->family, pkt->hooknum, pkt->skb, pkt->in,
+ pkt->out, &priv->loginfo, "%s", priv->prefix);
+}
+
+static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = {
+ [NFTA_LOG_GROUP] = { .type = NLA_U16 },
+ [NFTA_LOG_PREFIX] = { .type = NLA_STRING },
+ [NFTA_LOG_SNAPLEN] = { .type = NLA_U32 },
+ [NFTA_LOG_QTHRESHOLD] = { .type = NLA_U16 },
+};
+
+static int nft_log_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_log *priv = nft_expr_priv(expr);
+ struct nf_loginfo *li = &priv->loginfo;
+ const struct nlattr *nla;
+
+ priv->family = ctx->afi->family;
+
+ nla = tb[NFTA_LOG_PREFIX];
+ if (nla != NULL) {
+ priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL);
+ if (priv->prefix == NULL)
+ return -ENOMEM;
+ nla_strlcpy(priv->prefix, nla, nla_len(nla) + 1);
+ } else
+ priv->prefix = (char *)nft_log_null_prefix;
+
+ li->type = NF_LOG_TYPE_ULOG;
+ if (tb[NFTA_LOG_GROUP] != NULL)
+ li->u.ulog.group = ntohs(nla_get_be16(tb[NFTA_LOG_GROUP]));
+
+ if (tb[NFTA_LOG_SNAPLEN] != NULL)
+ li->u.ulog.copy_len = ntohl(nla_get_be32(tb[NFTA_LOG_SNAPLEN]));
+ if (tb[NFTA_LOG_QTHRESHOLD] != NULL) {
+ li->u.ulog.qthreshold =
+ ntohs(nla_get_be16(tb[NFTA_LOG_QTHRESHOLD]));
+ }
+
+ return 0;
+}
+
+static void nft_log_destroy(const struct nft_expr *expr)
+{
+ struct nft_log *priv = nft_expr_priv(expr);
+
+ if (priv->prefix != nft_log_null_prefix)
+ kfree(priv->prefix);
+}
+
+static int nft_log_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_log *priv = nft_expr_priv(expr);
+ const struct nf_loginfo *li = &priv->loginfo;
+
+ if (priv->prefix != nft_log_null_prefix)
+ if (nla_put_string(skb, NFTA_LOG_PREFIX, priv->prefix))
+ goto nla_put_failure;
+ if (li->u.ulog.group)
+ if (nla_put_be16(skb, NFTA_LOG_GROUP, htons(li->u.ulog.group)))
+ goto nla_put_failure;
+ if (li->u.ulog.copy_len)
+ if (nla_put_be32(skb, NFTA_LOG_SNAPLEN,
+ htonl(li->u.ulog.copy_len)))
+ goto nla_put_failure;
+ if (li->u.ulog.qthreshold)
+ if (nla_put_be16(skb, NFTA_LOG_QTHRESHOLD,
+ htons(li->u.ulog.qthreshold)))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_log_type;
+static const struct nft_expr_ops nft_log_ops = {
+ .type = &nft_log_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_log)),
+ .eval = nft_log_eval,
+ .init = nft_log_init,
+ .destroy = nft_log_destroy,
+ .dump = nft_log_dump,
+};
+
+static struct nft_expr_type nft_log_type __read_mostly = {
+ .name = "log",
+ .ops = &nft_log_ops,
+ .policy = nft_log_policy,
+ .maxattr = NFTA_LOG_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_log_module_init(void)
+{
+ return nft_register_expr(&nft_log_type);
+}
+
+static void __exit nft_log_module_exit(void)
+{
+ nft_unregister_expr(&nft_log_type);
+}
+
+module_init(nft_log_module_init);
+module_exit(nft_log_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("log");
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
new file mode 100644
index 000000000000..8a6116b75b5a
--- /dev/null
+++ b/net/netfilter/nft_lookup.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_lookup {
+ struct nft_set *set;
+ enum nft_registers sreg:8;
+ enum nft_registers dreg:8;
+ struct nft_set_binding binding;
+};
+
+static void nft_lookup_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_lookup *priv = nft_expr_priv(expr);
+ const struct nft_set *set = priv->set;
+
+ if (set->ops->lookup(set, &data[priv->sreg], &data[priv->dreg]))
+ return;
+ data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
+ [NFTA_LOOKUP_SET] = { .type = NLA_STRING },
+ [NFTA_LOOKUP_SREG] = { .type = NLA_U32 },
+ [NFTA_LOOKUP_DREG] = { .type = NLA_U32 },
+};
+
+static int nft_lookup_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_lookup *priv = nft_expr_priv(expr);
+ struct nft_set *set;
+ int err;
+
+ if (tb[NFTA_LOOKUP_SET] == NULL ||
+ tb[NFTA_LOOKUP_SREG] == NULL)
+ return -EINVAL;
+
+ set = nf_tables_set_lookup(ctx->table, tb[NFTA_LOOKUP_SET]);
+ if (IS_ERR(set))
+ return PTR_ERR(set);
+
+ priv->sreg = ntohl(nla_get_be32(tb[NFTA_LOOKUP_SREG]));
+ err = nft_validate_input_register(priv->sreg);
+ if (err < 0)
+ return err;
+
+ if (tb[NFTA_LOOKUP_DREG] != NULL) {
+ if (!(set->flags & NFT_SET_MAP))
+ return -EINVAL;
+
+ priv->dreg = ntohl(nla_get_be32(tb[NFTA_LOOKUP_DREG]));
+ err = nft_validate_output_register(priv->dreg);
+ if (err < 0)
+ return err;
+
+ if (priv->dreg == NFT_REG_VERDICT) {
+ if (set->dtype != NFT_DATA_VERDICT)
+ return -EINVAL;
+ } else if (set->dtype == NFT_DATA_VERDICT)
+ return -EINVAL;
+ } else if (set->flags & NFT_SET_MAP)
+ return -EINVAL;
+
+ err = nf_tables_bind_set(ctx, set, &priv->binding);
+ if (err < 0)
+ return err;
+
+ priv->set = set;
+ return 0;
+}
+
+static void nft_lookup_destroy(const struct nft_expr *expr)
+{
+ struct nft_lookup *priv = nft_expr_priv(expr);
+
+ nf_tables_unbind_set(NULL, priv->set, &priv->binding);
+}
+
+static int nft_lookup_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_lookup *priv = nft_expr_priv(expr);
+
+ if (nla_put_string(skb, NFTA_LOOKUP_SET, priv->set->name))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_LOOKUP_SREG, htonl(priv->sreg)))
+ goto nla_put_failure;
+ if (priv->set->flags & NFT_SET_MAP)
+ if (nla_put_be32(skb, NFTA_LOOKUP_DREG, htonl(priv->dreg)))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_lookup_type;
+static const struct nft_expr_ops nft_lookup_ops = {
+ .type = &nft_lookup_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
+ .eval = nft_lookup_eval,
+ .init = nft_lookup_init,
+ .destroy = nft_lookup_destroy,
+ .dump = nft_lookup_dump,
+};
+
+static struct nft_expr_type nft_lookup_type __read_mostly = {
+ .name = "lookup",
+ .ops = &nft_lookup_ops,
+ .policy = nft_lookup_policy,
+ .maxattr = NFTA_LOOKUP_MAX,
+ .owner = THIS_MODULE,
+};
+
+int __init nft_lookup_module_init(void)
+{
+ return nft_register_expr(&nft_lookup_type);
+}
+
+void nft_lookup_module_exit(void)
+{
+ nft_unregister_expr(&nft_lookup_type);
+}
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
new file mode 100644
index 000000000000..8c28220a90b3
--- /dev/null
+++ b/net/netfilter/nft_meta.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/dst.h>
+#include <net/sock.h>
+#include <net/tcp_states.h> /* for TCP_TIME_WAIT */
+#include <net/netfilter/nf_tables.h>
+
+struct nft_meta {
+ enum nft_meta_keys key:8;
+ enum nft_registers dreg:8;
+};
+
+static void nft_meta_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_meta *priv = nft_expr_priv(expr);
+ const struct sk_buff *skb = pkt->skb;
+ const struct net_device *in = pkt->in, *out = pkt->out;
+ struct nft_data *dest = &data[priv->dreg];
+
+ switch (priv->key) {
+ case NFT_META_LEN:
+ dest->data[0] = skb->len;
+ break;
+ case NFT_META_PROTOCOL:
+ *(__be16 *)dest->data = skb->protocol;
+ break;
+ case NFT_META_PRIORITY:
+ dest->data[0] = skb->priority;
+ break;
+ case NFT_META_MARK:
+ dest->data[0] = skb->mark;
+ break;
+ case NFT_META_IIF:
+ if (in == NULL)
+ goto err;
+ dest->data[0] = in->ifindex;
+ break;
+ case NFT_META_OIF:
+ if (out == NULL)
+ goto err;
+ dest->data[0] = out->ifindex;
+ break;
+ case NFT_META_IIFNAME:
+ if (in == NULL)
+ goto err;
+ strncpy((char *)dest->data, in->name, sizeof(dest->data));
+ break;
+ case NFT_META_OIFNAME:
+ if (out == NULL)
+ goto err;
+ strncpy((char *)dest->data, out->name, sizeof(dest->data));
+ break;
+ case NFT_META_IIFTYPE:
+ if (in == NULL)
+ goto err;
+ *(u16 *)dest->data = in->type;
+ break;
+ case NFT_META_OIFTYPE:
+ if (out == NULL)
+ goto err;
+ *(u16 *)dest->data = out->type;
+ break;
+ case NFT_META_SKUID:
+ if (skb->sk == NULL || skb->sk->sk_state == TCP_TIME_WAIT)
+ goto err;
+
+ read_lock_bh(&skb->sk->sk_callback_lock);
+ if (skb->sk->sk_socket == NULL ||
+ skb->sk->sk_socket->file == NULL) {
+ read_unlock_bh(&skb->sk->sk_callback_lock);
+ goto err;
+ }
+
+ dest->data[0] =
+ from_kuid_munged(&init_user_ns,
+ skb->sk->sk_socket->file->f_cred->fsuid);
+ read_unlock_bh(&skb->sk->sk_callback_lock);
+ break;
+ case NFT_META_SKGID:
+ if (skb->sk == NULL || skb->sk->sk_state == TCP_TIME_WAIT)
+ goto err;
+
+ read_lock_bh(&skb->sk->sk_callback_lock);
+ if (skb->sk->sk_socket == NULL ||
+ skb->sk->sk_socket->file == NULL) {
+ read_unlock_bh(&skb->sk->sk_callback_lock);
+ goto err;
+ }
+ dest->data[0] =
+ from_kgid_munged(&init_user_ns,
+ skb->sk->sk_socket->file->f_cred->fsgid);
+ read_unlock_bh(&skb->sk->sk_callback_lock);
+ break;
+#ifdef CONFIG_NET_CLS_ROUTE
+ case NFT_META_RTCLASSID: {
+ const struct dst_entry *dst = skb_dst(skb);
+
+ if (dst == NULL)
+ goto err;
+ dest->data[0] = dst->tclassid;
+ break;
+ }
+#endif
+#ifdef CONFIG_NETWORK_SECMARK
+ case NFT_META_SECMARK:
+ dest->data[0] = skb->secmark;
+ break;
+#endif
+ default:
+ WARN_ON(1);
+ goto err;
+ }
+ return;
+
+err:
+ data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = {
+ [NFTA_META_DREG] = { .type = NLA_U32 },
+ [NFTA_META_KEY] = { .type = NLA_U32 },
+};
+
+static int nft_meta_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_meta *priv = nft_expr_priv(expr);
+ int err;
+
+ if (tb[NFTA_META_DREG] == NULL ||
+ tb[NFTA_META_KEY] == NULL)
+ return -EINVAL;
+
+ priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
+ switch (priv->key) {
+ case NFT_META_LEN:
+ case NFT_META_PROTOCOL:
+ case NFT_META_PRIORITY:
+ case NFT_META_MARK:
+ case NFT_META_IIF:
+ case NFT_META_OIF:
+ case NFT_META_IIFNAME:
+ case NFT_META_OIFNAME:
+ case NFT_META_IIFTYPE:
+ case NFT_META_OIFTYPE:
+ case NFT_META_SKUID:
+ case NFT_META_SKGID:
+#ifdef CONFIG_NET_CLS_ROUTE
+ case NFT_META_RTCLASSID:
+#endif
+#ifdef CONFIG_NETWORK_SECMARK
+ case NFT_META_SECMARK:
+#endif
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG]));
+ err = nft_validate_output_register(priv->dreg);
+ if (err < 0)
+ return err;
+ return nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+}
+
+static int nft_meta_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_meta *priv = nft_expr_priv(expr);
+
+ if (nla_put_be32(skb, NFTA_META_DREG, htonl(priv->dreg)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_META_KEY, htonl(priv->key)))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_meta_type;
+static const struct nft_expr_ops nft_meta_ops = {
+ .type = &nft_meta_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
+ .eval = nft_meta_eval,
+ .init = nft_meta_init,
+ .dump = nft_meta_dump,
+};
+
+static struct nft_expr_type nft_meta_type __read_mostly = {
+ .name = "meta",
+ .ops = &nft_meta_ops,
+ .policy = nft_meta_policy,
+ .maxattr = NFTA_META_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_meta_module_init(void)
+{
+ return nft_register_expr(&nft_meta_type);
+}
+
+static void __exit nft_meta_module_exit(void)
+{
+ nft_unregister_expr(&nft_meta_type);
+}
+
+module_init(nft_meta_module_init);
+module_exit(nft_meta_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("meta");
diff --git a/net/netfilter/nft_meta_target.c b/net/netfilter/nft_meta_target.c
new file mode 100644
index 000000000000..71177df75ffb
--- /dev/null
+++ b/net/netfilter/nft_meta_target.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_meta {
+ enum nft_meta_keys key;
+};
+
+static void nft_meta_eval(const struct nft_expr *expr,
+ struct nft_data *nfres,
+ struct nft_data *data,
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_meta *meta = nft_expr_priv(expr);
+ struct sk_buff *skb = pkt->skb;
+ u32 val = data->data[0];
+
+ switch (meta->key) {
+ case NFT_META_MARK:
+ skb->mark = val;
+ break;
+ case NFT_META_PRIORITY:
+ skb->priority = val;
+ break;
+ case NFT_META_NFTRACE:
+ skb->nf_trace = val;
+ break;
+#ifdef CONFIG_NETWORK_SECMARK
+ case NFT_META_SECMARK:
+ skb->secmark = val;
+ break;
+#endif
+ default:
+ WARN_ON(1);
+ }
+}
+
+static const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = {
+ [NFTA_META_KEY] = { .type = NLA_U32 },
+};
+
+static int nft_meta_init(const struct nft_expr *expr, struct nlattr *tb[])
+{
+ struct nft_meta *meta = nft_expr_priv(expr);
+
+ if (tb[NFTA_META_KEY] == NULL)
+ return -EINVAL;
+
+ meta->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
+ switch (meta->key) {
+ case NFT_META_MARK:
+ case NFT_META_PRIORITY:
+ case NFT_META_NFTRACE:
+#ifdef CONFIG_NETWORK_SECMARK
+ case NFT_META_SECMARK:
+#endif
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nft_meta_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ struct nft_meta *meta = nft_expr_priv(expr);
+
+ NLA_PUT_BE32(skb, NFTA_META_KEY, htonl(meta->key));
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_ops meta_target __read_mostly = {
+ .name = "meta",
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
+ .owner = THIS_MODULE,
+ .eval = nft_meta_eval,
+ .init = nft_meta_init,
+ .dump = nft_meta_dump,
+ .policy = nft_meta_policy,
+ .maxattr = NFTA_META_MAX,
+};
+
+static int __init nft_meta_target_init(void)
+{
+ return nft_register_expr(&meta_target);
+}
+
+static void __exit nft_meta_target_exit(void)
+{
+ nft_unregister_expr(&meta_target);
+}
+
+module_init(nft_meta_target_init);
+module_exit(nft_meta_target_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("meta");
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
new file mode 100644
index 000000000000..b0b87b2d2411
--- /dev/null
+++ b/net/netfilter/nft_nat.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
+ * Copyright (c) 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/string.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/ip.h>
+
+struct nft_nat {
+ enum nft_registers sreg_addr_min:8;
+ enum nft_registers sreg_addr_max:8;
+ enum nft_registers sreg_proto_min:8;
+ enum nft_registers sreg_proto_max:8;
+ int family;
+ enum nf_nat_manip_type type;
+};
+
+static void nft_nat_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_nat *priv = nft_expr_priv(expr);
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(pkt->skb, &ctinfo);
+ struct nf_nat_range range;
+
+ memset(&range, 0, sizeof(range));
+ if (priv->sreg_addr_min) {
+ if (priv->family == AF_INET) {
+ range.min_addr.ip = data[priv->sreg_addr_min].data[0];
+ range.max_addr.ip = data[priv->sreg_addr_max].data[0];
+
+ } else {
+ memcpy(range.min_addr.ip6,
+ data[priv->sreg_addr_min].data,
+ sizeof(struct nft_data));
+ memcpy(range.max_addr.ip6,
+ data[priv->sreg_addr_max].data,
+ sizeof(struct nft_data));
+ }
+ range.flags |= NF_NAT_RANGE_MAP_IPS;
+ }
+
+ if (priv->sreg_proto_min) {
+ range.min_proto.all = data[priv->sreg_proto_min].data[0];
+ range.max_proto.all = data[priv->sreg_proto_max].data[0];
+ range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
+ }
+
+ data[NFT_REG_VERDICT].verdict =
+ nf_nat_setup_info(ct, &range, priv->type);
+}
+
+static const struct nla_policy nft_nat_policy[NFTA_NAT_MAX + 1] = {
+ [NFTA_NAT_TYPE] = { .type = NLA_U32 },
+ [NFTA_NAT_FAMILY] = { .type = NLA_U32 },
+ [NFTA_NAT_REG_ADDR_MIN] = { .type = NLA_U32 },
+ [NFTA_NAT_REG_ADDR_MAX] = { .type = NLA_U32 },
+ [NFTA_NAT_REG_PROTO_MIN] = { .type = NLA_U32 },
+ [NFTA_NAT_REG_PROTO_MAX] = { .type = NLA_U32 },
+};
+
+static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_nat *priv = nft_expr_priv(expr);
+ int err;
+
+ if (tb[NFTA_NAT_TYPE] == NULL)
+ return -EINVAL;
+
+ switch (ntohl(nla_get_be32(tb[NFTA_NAT_TYPE]))) {
+ case NFT_NAT_SNAT:
+ priv->type = NF_NAT_MANIP_SRC;
+ break;
+ case NFT_NAT_DNAT:
+ priv->type = NF_NAT_MANIP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (tb[NFTA_NAT_FAMILY] == NULL)
+ return -EINVAL;
+
+ priv->family = ntohl(nla_get_be32(tb[NFTA_NAT_FAMILY]));
+ if (priv->family != AF_INET && priv->family != AF_INET6)
+ return -EINVAL;
+
+ if (tb[NFTA_NAT_REG_ADDR_MIN]) {
+ priv->sreg_addr_min = ntohl(nla_get_be32(
+ tb[NFTA_NAT_REG_ADDR_MIN]));
+ err = nft_validate_input_register(priv->sreg_addr_min);
+ if (err < 0)
+ return err;
+ }
+
+ if (tb[NFTA_NAT_REG_ADDR_MAX]) {
+ priv->sreg_addr_max = ntohl(nla_get_be32(
+ tb[NFTA_NAT_REG_ADDR_MAX]));
+ err = nft_validate_input_register(priv->sreg_addr_max);
+ if (err < 0)
+ return err;
+ } else
+ priv->sreg_addr_max = priv->sreg_addr_min;
+
+ if (tb[NFTA_NAT_REG_PROTO_MIN]) {
+ priv->sreg_proto_min = ntohl(nla_get_be32(
+ tb[NFTA_NAT_REG_PROTO_MIN]));
+ err = nft_validate_input_register(priv->sreg_proto_min);
+ if (err < 0)
+ return err;
+ }
+
+ if (tb[NFTA_NAT_REG_PROTO_MAX]) {
+ priv->sreg_proto_max = ntohl(nla_get_be32(
+ tb[NFTA_NAT_REG_PROTO_MAX]));
+ err = nft_validate_input_register(priv->sreg_proto_max);
+ if (err < 0)
+ return err;
+ } else
+ priv->sreg_proto_max = priv->sreg_proto_min;
+
+ return 0;
+}
+
+static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_nat *priv = nft_expr_priv(expr);
+
+ switch (priv->type) {
+ case NF_NAT_MANIP_SRC:
+ if (nla_put_be32(skb, NFTA_NAT_TYPE, htonl(NFT_NAT_SNAT)))
+ goto nla_put_failure;
+ break;
+ case NF_NAT_MANIP_DST:
+ if (nla_put_be32(skb, NFTA_NAT_TYPE, htonl(NFT_NAT_DNAT)))
+ goto nla_put_failure;
+ break;
+ }
+
+ if (nla_put_be32(skb, NFTA_NAT_FAMILY, htonl(priv->family)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb,
+ NFTA_NAT_REG_ADDR_MIN, htonl(priv->sreg_addr_min)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb,
+ NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb,
+ NFTA_NAT_REG_PROTO_MIN, htonl(priv->sreg_proto_min)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb,
+ NFTA_NAT_REG_PROTO_MAX, htonl(priv->sreg_proto_max)))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_nat_type;
+static const struct nft_expr_ops nft_nat_ops = {
+ .type = &nft_nat_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_nat)),
+ .eval = nft_nat_eval,
+ .init = nft_nat_init,
+ .dump = nft_nat_dump,
+};
+
+static struct nft_expr_type nft_nat_type __read_mostly = {
+ .name = "nat",
+ .ops = &nft_nat_ops,
+ .policy = nft_nat_policy,
+ .maxattr = NFTA_NAT_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_nat_module_init(void)
+{
+ int err;
+
+ err = nft_register_expr(&nft_nat_type);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void __exit nft_nat_module_exit(void)
+{
+ nft_unregister_expr(&nft_nat_type);
+}
+
+module_init(nft_nat_module_init);
+module_exit(nft_nat_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>");
+MODULE_ALIAS_NFT_EXPR("nat");
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
new file mode 100644
index 000000000000..a2aeb318678f
--- /dev/null
+++ b/net/netfilter/nft_payload.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+static void nft_payload_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_payload *priv = nft_expr_priv(expr);
+ const struct sk_buff *skb = pkt->skb;
+ struct nft_data *dest = &data[priv->dreg];
+ int offset;
+
+ switch (priv->base) {
+ case NFT_PAYLOAD_LL_HEADER:
+ if (!skb_mac_header_was_set(skb))
+ goto err;
+ offset = skb_mac_header(skb) - skb->data;
+ break;
+ case NFT_PAYLOAD_NETWORK_HEADER:
+ offset = skb_network_offset(skb);
+ break;
+ case NFT_PAYLOAD_TRANSPORT_HEADER:
+ offset = pkt->xt.thoff;
+ break;
+ default:
+ BUG();
+ }
+ offset += priv->offset;
+
+ if (skb_copy_bits(skb, offset, dest->data, priv->len) < 0)
+ goto err;
+ return;
+err:
+ data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
+ [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 },
+ [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 },
+ [NFTA_PAYLOAD_OFFSET] = { .type = NLA_U32 },
+ [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 },
+};
+
+static int nft_payload_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_payload *priv = nft_expr_priv(expr);
+ int err;
+
+ priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
+ priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
+ priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
+
+ priv->dreg = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_DREG]));
+ err = nft_validate_output_register(priv->dreg);
+ if (err < 0)
+ return err;
+ return nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+}
+
+static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_payload *priv = nft_expr_priv(expr);
+
+ if (nla_put_be32(skb, NFTA_PAYLOAD_DREG, htonl(priv->dreg)) ||
+ nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
+ nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
+ nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_payload_type;
+static const struct nft_expr_ops nft_payload_ops = {
+ .type = &nft_payload_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
+ .eval = nft_payload_eval,
+ .init = nft_payload_init,
+ .dump = nft_payload_dump,
+};
+
+const struct nft_expr_ops nft_payload_fast_ops = {
+ .type = &nft_payload_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
+ .eval = nft_payload_eval,
+ .init = nft_payload_init,
+ .dump = nft_payload_dump,
+};
+
+static const struct nft_expr_ops *
+nft_payload_select_ops(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[])
+{
+ enum nft_payload_bases base;
+ unsigned int offset, len;
+
+ if (tb[NFTA_PAYLOAD_DREG] == NULL ||
+ tb[NFTA_PAYLOAD_BASE] == NULL ||
+ tb[NFTA_PAYLOAD_OFFSET] == NULL ||
+ tb[NFTA_PAYLOAD_LEN] == NULL)
+ return ERR_PTR(-EINVAL);
+
+ base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
+ switch (base) {
+ case NFT_PAYLOAD_LL_HEADER:
+ case NFT_PAYLOAD_NETWORK_HEADER:
+ case NFT_PAYLOAD_TRANSPORT_HEADER:
+ break;
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
+ len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
+ if (len == 0 || len > FIELD_SIZEOF(struct nft_data, data))
+ return ERR_PTR(-EINVAL);
+
+ if (len <= 4 && IS_ALIGNED(offset, len) && base != NFT_PAYLOAD_LL_HEADER)
+ return &nft_payload_fast_ops;
+ else
+ return &nft_payload_ops;
+}
+
+static struct nft_expr_type nft_payload_type __read_mostly = {
+ .name = "payload",
+ .select_ops = nft_payload_select_ops,
+ .policy = nft_payload_policy,
+ .maxattr = NFTA_PAYLOAD_MAX,
+ .owner = THIS_MODULE,
+};
+
+int __init nft_payload_module_init(void)
+{
+ return nft_register_expr(&nft_payload_type);
+}
+
+void nft_payload_module_exit(void)
+{
+ nft_unregister_expr(&nft_payload_type);
+}
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
new file mode 100644
index 000000000000..ca0c1b231bfe
--- /dev/null
+++ b/net/netfilter/nft_rbtree.c
@@ -0,0 +1,247 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_rbtree {
+ struct rb_root root;
+};
+
+struct nft_rbtree_elem {
+ struct rb_node node;
+ u16 flags;
+ struct nft_data key;
+ struct nft_data data[];
+};
+
+static bool nft_rbtree_lookup(const struct nft_set *set,
+ const struct nft_data *key,
+ struct nft_data *data)
+{
+ const struct nft_rbtree *priv = nft_set_priv(set);
+ const struct nft_rbtree_elem *rbe, *interval = NULL;
+ const struct rb_node *parent = priv->root.rb_node;
+ int d;
+
+ while (parent != NULL) {
+ rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+
+ d = nft_data_cmp(&rbe->key, key, set->klen);
+ if (d < 0) {
+ parent = parent->rb_left;
+ interval = rbe;
+ } else if (d > 0)
+ parent = parent->rb_right;
+ else {
+found:
+ if (rbe->flags & NFT_SET_ELEM_INTERVAL_END)
+ goto out;
+ if (set->flags & NFT_SET_MAP)
+ nft_data_copy(data, rbe->data);
+ return true;
+ }
+ }
+
+ if (set->flags & NFT_SET_INTERVAL && interval != NULL) {
+ rbe = interval;
+ goto found;
+ }
+out:
+ return false;
+}
+
+static void nft_rbtree_elem_destroy(const struct nft_set *set,
+ struct nft_rbtree_elem *rbe)
+{
+ nft_data_uninit(&rbe->key, NFT_DATA_VALUE);
+ if (set->flags & NFT_SET_MAP)
+ nft_data_uninit(rbe->data, set->dtype);
+ kfree(rbe);
+}
+
+static int __nft_rbtree_insert(const struct nft_set *set,
+ struct nft_rbtree_elem *new)
+{
+ struct nft_rbtree *priv = nft_set_priv(set);
+ struct nft_rbtree_elem *rbe;
+ struct rb_node *parent, **p;
+ int d;
+
+ parent = NULL;
+ p = &priv->root.rb_node;
+ while (*p != NULL) {
+ parent = *p;
+ rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+ d = nft_data_cmp(&rbe->key, &new->key, set->klen);
+ if (d < 0)
+ p = &parent->rb_left;
+ else if (d > 0)
+ p = &parent->rb_right;
+ else
+ return -EEXIST;
+ }
+ rb_link_node(&new->node, parent, p);
+ rb_insert_color(&new->node, &priv->root);
+ return 0;
+}
+
+static int nft_rbtree_insert(const struct nft_set *set,
+ const struct nft_set_elem *elem)
+{
+ struct nft_rbtree_elem *rbe;
+ unsigned int size;
+ int err;
+
+ size = sizeof(*rbe);
+ if (set->flags & NFT_SET_MAP)
+ size += sizeof(rbe->data[0]);
+
+ rbe = kzalloc(size, GFP_KERNEL);
+ if (rbe == NULL)
+ return -ENOMEM;
+
+ rbe->flags = elem->flags;
+ nft_data_copy(&rbe->key, &elem->key);
+ if (set->flags & NFT_SET_MAP)
+ nft_data_copy(rbe->data, &elem->data);
+
+ err = __nft_rbtree_insert(set, rbe);
+ if (err < 0)
+ kfree(rbe);
+ return err;
+}
+
+static void nft_rbtree_remove(const struct nft_set *set,
+ const struct nft_set_elem *elem)
+{
+ struct nft_rbtree *priv = nft_set_priv(set);
+ struct nft_rbtree_elem *rbe = elem->cookie;
+
+ rb_erase(&rbe->node, &priv->root);
+ kfree(rbe);
+}
+
+static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
+{
+ const struct nft_rbtree *priv = nft_set_priv(set);
+ const struct rb_node *parent = priv->root.rb_node;
+ struct nft_rbtree_elem *rbe;
+ int d;
+
+ while (parent != NULL) {
+ rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+
+ d = nft_data_cmp(&rbe->key, &elem->key, set->klen);
+ if (d < 0)
+ parent = parent->rb_left;
+ else if (d > 0)
+ parent = parent->rb_right;
+ else {
+ elem->cookie = rbe;
+ if (set->flags & NFT_SET_MAP)
+ nft_data_copy(&elem->data, rbe->data);
+ elem->flags = rbe->flags;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+static void nft_rbtree_walk(const struct nft_ctx *ctx,
+ const struct nft_set *set,
+ struct nft_set_iter *iter)
+{
+ const struct nft_rbtree *priv = nft_set_priv(set);
+ const struct nft_rbtree_elem *rbe;
+ struct nft_set_elem elem;
+ struct rb_node *node;
+
+ for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
+ if (iter->count < iter->skip)
+ goto cont;
+
+ rbe = rb_entry(node, struct nft_rbtree_elem, node);
+ nft_data_copy(&elem.key, &rbe->key);
+ if (set->flags & NFT_SET_MAP)
+ nft_data_copy(&elem.data, rbe->data);
+ elem.flags = rbe->flags;
+
+ iter->err = iter->fn(ctx, set, iter, &elem);
+ if (iter->err < 0)
+ return;
+cont:
+ iter->count++;
+ }
+}
+
+static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[])
+{
+ return sizeof(struct nft_rbtree);
+}
+
+static int nft_rbtree_init(const struct nft_set *set,
+ const struct nlattr * const nla[])
+{
+ struct nft_rbtree *priv = nft_set_priv(set);
+
+ priv->root = RB_ROOT;
+ return 0;
+}
+
+static void nft_rbtree_destroy(const struct nft_set *set)
+{
+ struct nft_rbtree *priv = nft_set_priv(set);
+ struct nft_rbtree_elem *rbe;
+ struct rb_node *node;
+
+ while ((node = priv->root.rb_node) != NULL) {
+ rb_erase(node, &priv->root);
+ rbe = rb_entry(node, struct nft_rbtree_elem, node);
+ nft_rbtree_elem_destroy(set, rbe);
+ }
+}
+
+static struct nft_set_ops nft_rbtree_ops __read_mostly = {
+ .privsize = nft_rbtree_privsize,
+ .init = nft_rbtree_init,
+ .destroy = nft_rbtree_destroy,
+ .insert = nft_rbtree_insert,
+ .remove = nft_rbtree_remove,
+ .get = nft_rbtree_get,
+ .lookup = nft_rbtree_lookup,
+ .walk = nft_rbtree_walk,
+ .features = NFT_SET_INTERVAL | NFT_SET_MAP,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_rbtree_module_init(void)
+{
+ return nft_register_set(&nft_rbtree_ops);
+}
+
+static void __exit nft_rbtree_module_exit(void)
+{
+ nft_unregister_set(&nft_rbtree_ops);
+}
+
+module_init(nft_rbtree_module_init);
+module_exit(nft_rbtree_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_SET();
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 8b03028cca69..227aa11e8409 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -845,8 +845,13 @@ xt_replace_table(struct xt_table *table,
return NULL;
}
- table->private = newinfo;
newinfo->initial_entries = private->initial_entries;
+ /*
+ * Ensure contents of newinfo are visible before assigning to
+ * private.
+ */
+ smp_wmb();
+ table->private = newinfo;
/*
* Even though table entries have now been swapped, other CPU's
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 1e2fae32f81b..ed00fef58996 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -147,6 +147,7 @@ nfqueue_tg_v3(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_NFQ_info_v3 *info = par->targinfo;
u32 queue = info->queuenum;
+ int ret;
if (info->queues_total > 1) {
if (info->flags & NFQ_FLAG_CPU_FANOUT) {
@@ -157,7 +158,11 @@ nfqueue_tg_v3(struct sk_buff *skb, const struct xt_action_param *par)
queue = nfqueue_hash(skb, par);
}
- return NF_QUEUE_NR(queue);
+ ret = NF_QUEUE_NR(queue);
+ if (info->flags & NFQ_FLAG_BYPASS)
+ ret |= NF_VERDICT_FLAG_QUEUE_BYPASS;
+
+ return ret;
}
static struct xt_target nfqueue_tg_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index cd24290f3b2f..e762de5ee89b 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -43,10 +43,42 @@ optlen(const u_int8_t *opt, unsigned int offset)
return opt[offset+1];
}
+static u_int32_t tcpmss_reverse_mtu(struct net *net,
+ const struct sk_buff *skb,
+ unsigned int family)
+{
+ struct flowi fl;
+ const struct nf_afinfo *ai;
+ struct rtable *rt = NULL;
+ u_int32_t mtu = ~0U;
+
+ if (family == PF_INET) {
+ struct flowi4 *fl4 = &fl.u.ip4;
+ memset(fl4, 0, sizeof(*fl4));
+ fl4->daddr = ip_hdr(skb)->saddr;
+ } else {
+ struct flowi6 *fl6 = &fl.u.ip6;
+
+ memset(fl6, 0, sizeof(*fl6));
+ fl6->daddr = ipv6_hdr(skb)->saddr;
+ }
+ rcu_read_lock();
+ ai = nf_get_afinfo(family);
+ if (ai != NULL)
+ ai->route(net, (struct dst_entry **)&rt, &fl, false);
+ rcu_read_unlock();
+
+ if (rt != NULL) {
+ mtu = dst_mtu(&rt->dst);
+ dst_release(&rt->dst);
+ }
+ return mtu;
+}
+
static int
tcpmss_mangle_packet(struct sk_buff *skb,
const struct xt_action_param *par,
- unsigned int in_mtu,
+ unsigned int family,
unsigned int tcphoff,
unsigned int minlen)
{
@@ -76,6 +108,9 @@ tcpmss_mangle_packet(struct sk_buff *skb,
return -1;
if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
+ struct net *net = dev_net(par->in ? par->in : par->out);
+ unsigned int in_mtu = tcpmss_reverse_mtu(net, skb, family);
+
if (dst_mtu(skb_dst(skb)) <= minlen) {
net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
dst_mtu(skb_dst(skb)));
@@ -165,37 +200,6 @@ tcpmss_mangle_packet(struct sk_buff *skb,
return TCPOLEN_MSS;
}
-static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
- unsigned int family)
-{
- struct flowi fl;
- const struct nf_afinfo *ai;
- struct rtable *rt = NULL;
- u_int32_t mtu = ~0U;
-
- if (family == PF_INET) {
- struct flowi4 *fl4 = &fl.u.ip4;
- memset(fl4, 0, sizeof(*fl4));
- fl4->daddr = ip_hdr(skb)->saddr;
- } else {
- struct flowi6 *fl6 = &fl.u.ip6;
-
- memset(fl6, 0, sizeof(*fl6));
- fl6->daddr = ipv6_hdr(skb)->saddr;
- }
- rcu_read_lock();
- ai = nf_get_afinfo(family);
- if (ai != NULL)
- ai->route(&init_net, (struct dst_entry **)&rt, &fl, false);
- rcu_read_unlock();
-
- if (rt != NULL) {
- mtu = dst_mtu(&rt->dst);
- dst_release(&rt->dst);
- }
- return mtu;
-}
-
static unsigned int
tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
{
@@ -204,7 +208,7 @@ tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
int ret;
ret = tcpmss_mangle_packet(skb, par,
- tcpmss_reverse_mtu(skb, PF_INET),
+ PF_INET,
iph->ihl * 4,
sizeof(*iph) + sizeof(struct tcphdr));
if (ret < 0)
@@ -233,7 +237,7 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
if (tcphoff < 0)
return NF_DROP;
ret = tcpmss_mangle_packet(skb, par,
- tcpmss_reverse_mtu(skb, PF_INET6),
+ PF_INET6,
tcphoff,
sizeof(*ipv6h) + sizeof(struct tcphdr));
if (ret < 0)
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 5d8a3a3cd5a7..ef8a926752a9 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -200,7 +200,7 @@ nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
in->ifindex);
if (sk) {
int connected = (sk->sk_state == TCP_ESTABLISHED);
- int wildcard = ipv6_addr_any(&inet6_sk(sk)->rcv_saddr);
+ int wildcard = ipv6_addr_any(&sk->sk_v6_rcv_saddr);
/* NOTE: we return listeners even if bound to
* 0.0.0.0, those are filtered out in
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 31790e789e22..80c2e2d603e0 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -81,17 +81,17 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par)
struct xt_set_info_match_v0 *info = par->matchinfo;
ip_set_id_t index;
- index = ip_set_nfnl_get_byindex(info->match_set.index);
+ index = ip_set_nfnl_get_byindex(par->net, info->match_set.index);
if (index == IPSET_INVALID_ID) {
- pr_warning("Cannot find set indentified by id %u to match\n",
+ pr_warning("Cannot find set identified by id %u to match\n",
info->match_set.index);
return -ENOENT;
}
if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
pr_warning("Protocol error: set match dimension "
"is over the limit!\n");
- ip_set_nfnl_put(info->match_set.index);
+ ip_set_nfnl_put(par->net, info->match_set.index);
return -ERANGE;
}
@@ -106,9 +106,104 @@ set_match_v0_destroy(const struct xt_mtdtor_param *par)
{
struct xt_set_info_match_v0 *info = par->matchinfo;
- ip_set_nfnl_put(info->match_set.index);
+ ip_set_nfnl_put(par->net, info->match_set.index);
}
+/* Revision 1 match */
+
+static bool
+set_match_v1(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_set_info_match_v1 *info = par->matchinfo;
+ ADT_OPT(opt, par->family, info->match_set.dim,
+ info->match_set.flags, 0, UINT_MAX);
+
+ if (opt.flags & IPSET_RETURN_NOMATCH)
+ opt.cmdflags |= IPSET_FLAG_RETURN_NOMATCH;
+
+ return match_set(info->match_set.index, skb, par, &opt,
+ info->match_set.flags & IPSET_INV_MATCH);
+}
+
+static int
+set_match_v1_checkentry(const struct xt_mtchk_param *par)
+{
+ struct xt_set_info_match_v1 *info = par->matchinfo;
+ ip_set_id_t index;
+
+ index = ip_set_nfnl_get_byindex(par->net, info->match_set.index);
+
+ if (index == IPSET_INVALID_ID) {
+ pr_warning("Cannot find set identified by id %u to match\n",
+ info->match_set.index);
+ return -ENOENT;
+ }
+ if (info->match_set.dim > IPSET_DIM_MAX) {
+ pr_warning("Protocol error: set match dimension "
+ "is over the limit!\n");
+ ip_set_nfnl_put(par->net, info->match_set.index);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static void
+set_match_v1_destroy(const struct xt_mtdtor_param *par)
+{
+ struct xt_set_info_match_v1 *info = par->matchinfo;
+
+ ip_set_nfnl_put(par->net, info->match_set.index);
+}
+
+/* Revision 3 match */
+
+static bool
+match_counter(u64 counter, const struct ip_set_counter_match *info)
+{
+ switch (info->op) {
+ case IPSET_COUNTER_NONE:
+ return true;
+ case IPSET_COUNTER_EQ:
+ return counter == info->value;
+ case IPSET_COUNTER_NE:
+ return counter != info->value;
+ case IPSET_COUNTER_LT:
+ return counter < info->value;
+ case IPSET_COUNTER_GT:
+ return counter > info->value;
+ }
+ return false;
+}
+
+static bool
+set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_set_info_match_v3 *info = par->matchinfo;
+ ADT_OPT(opt, par->family, info->match_set.dim,
+ info->match_set.flags, info->flags, UINT_MAX);
+ int ret;
+
+ if (info->packets.op != IPSET_COUNTER_NONE ||
+ info->bytes.op != IPSET_COUNTER_NONE)
+ opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS;
+
+ ret = match_set(info->match_set.index, skb, par, &opt,
+ info->match_set.flags & IPSET_INV_MATCH);
+
+ if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS))
+ return ret;
+
+ if (!match_counter(opt.ext.packets, &info->packets))
+ return 0;
+ return match_counter(opt.ext.bytes, &info->bytes);
+}
+
+#define set_match_v3_checkentry set_match_v1_checkentry
+#define set_match_v3_destroy set_match_v1_destroy
+
+/* Revision 0 interface: backward compatible with netfilter/iptables */
+
static unsigned int
set_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
{
@@ -133,7 +228,7 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
ip_set_id_t index;
if (info->add_set.index != IPSET_INVALID_ID) {
- index = ip_set_nfnl_get_byindex(info->add_set.index);
+ index = ip_set_nfnl_get_byindex(par->net, info->add_set.index);
if (index == IPSET_INVALID_ID) {
pr_warning("Cannot find add_set index %u as target\n",
info->add_set.index);
@@ -142,12 +237,12 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
}
if (info->del_set.index != IPSET_INVALID_ID) {
- index = ip_set_nfnl_get_byindex(info->del_set.index);
+ index = ip_set_nfnl_get_byindex(par->net, info->del_set.index);
if (index == IPSET_INVALID_ID) {
pr_warning("Cannot find del_set index %u as target\n",
info->del_set.index);
if (info->add_set.index != IPSET_INVALID_ID)
- ip_set_nfnl_put(info->add_set.index);
+ ip_set_nfnl_put(par->net, info->add_set.index);
return -ENOENT;
}
}
@@ -156,9 +251,9 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
pr_warning("Protocol error: SET target dimension "
"is over the limit!\n");
if (info->add_set.index != IPSET_INVALID_ID)
- ip_set_nfnl_put(info->add_set.index);
+ ip_set_nfnl_put(par->net, info->add_set.index);
if (info->del_set.index != IPSET_INVALID_ID)
- ip_set_nfnl_put(info->del_set.index);
+ ip_set_nfnl_put(par->net, info->del_set.index);
return -ERANGE;
}
@@ -175,57 +270,12 @@ set_target_v0_destroy(const struct xt_tgdtor_param *par)
const struct xt_set_info_target_v0 *info = par->targinfo;
if (info->add_set.index != IPSET_INVALID_ID)
- ip_set_nfnl_put(info->add_set.index);
+ ip_set_nfnl_put(par->net, info->add_set.index);
if (info->del_set.index != IPSET_INVALID_ID)
- ip_set_nfnl_put(info->del_set.index);
-}
-
-/* Revision 1 match and target */
-
-static bool
-set_match_v1(const struct sk_buff *skb, struct xt_action_param *par)
-{
- const struct xt_set_info_match_v1 *info = par->matchinfo;
- ADT_OPT(opt, par->family, info->match_set.dim,
- info->match_set.flags, 0, UINT_MAX);
-
- if (opt.flags & IPSET_RETURN_NOMATCH)
- opt.cmdflags |= IPSET_FLAG_RETURN_NOMATCH;
-
- return match_set(info->match_set.index, skb, par, &opt,
- info->match_set.flags & IPSET_INV_MATCH);
-}
-
-static int
-set_match_v1_checkentry(const struct xt_mtchk_param *par)
-{
- struct xt_set_info_match_v1 *info = par->matchinfo;
- ip_set_id_t index;
-
- index = ip_set_nfnl_get_byindex(info->match_set.index);
-
- if (index == IPSET_INVALID_ID) {
- pr_warning("Cannot find set indentified by id %u to match\n",
- info->match_set.index);
- return -ENOENT;
- }
- if (info->match_set.dim > IPSET_DIM_MAX) {
- pr_warning("Protocol error: set match dimension "
- "is over the limit!\n");
- ip_set_nfnl_put(info->match_set.index);
- return -ERANGE;
- }
-
- return 0;
+ ip_set_nfnl_put(par->net, info->del_set.index);
}
-static void
-set_match_v1_destroy(const struct xt_mtdtor_param *par)
-{
- struct xt_set_info_match_v1 *info = par->matchinfo;
-
- ip_set_nfnl_put(info->match_set.index);
-}
+/* Revision 1 target */
static unsigned int
set_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
@@ -251,7 +301,7 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
ip_set_id_t index;
if (info->add_set.index != IPSET_INVALID_ID) {
- index = ip_set_nfnl_get_byindex(info->add_set.index);
+ index = ip_set_nfnl_get_byindex(par->net, info->add_set.index);
if (index == IPSET_INVALID_ID) {
pr_warning("Cannot find add_set index %u as target\n",
info->add_set.index);
@@ -260,12 +310,12 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
}
if (info->del_set.index != IPSET_INVALID_ID) {
- index = ip_set_nfnl_get_byindex(info->del_set.index);
+ index = ip_set_nfnl_get_byindex(par->net, info->del_set.index);
if (index == IPSET_INVALID_ID) {
pr_warning("Cannot find del_set index %u as target\n",
info->del_set.index);
if (info->add_set.index != IPSET_INVALID_ID)
- ip_set_nfnl_put(info->add_set.index);
+ ip_set_nfnl_put(par->net, info->add_set.index);
return -ENOENT;
}
}
@@ -274,9 +324,9 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
pr_warning("Protocol error: SET target dimension "
"is over the limit!\n");
if (info->add_set.index != IPSET_INVALID_ID)
- ip_set_nfnl_put(info->add_set.index);
+ ip_set_nfnl_put(par->net, info->add_set.index);
if (info->del_set.index != IPSET_INVALID_ID)
- ip_set_nfnl_put(info->del_set.index);
+ ip_set_nfnl_put(par->net, info->del_set.index);
return -ERANGE;
}
@@ -289,9 +339,9 @@ set_target_v1_destroy(const struct xt_tgdtor_param *par)
const struct xt_set_info_target_v1 *info = par->targinfo;
if (info->add_set.index != IPSET_INVALID_ID)
- ip_set_nfnl_put(info->add_set.index);
+ ip_set_nfnl_put(par->net, info->add_set.index);
if (info->del_set.index != IPSET_INVALID_ID)
- ip_set_nfnl_put(info->del_set.index);
+ ip_set_nfnl_put(par->net, info->del_set.index);
}
/* Revision 2 target */
@@ -320,52 +370,6 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
#define set_target_v2_checkentry set_target_v1_checkentry
#define set_target_v2_destroy set_target_v1_destroy
-/* Revision 3 match */
-
-static bool
-match_counter(u64 counter, const struct ip_set_counter_match *info)
-{
- switch (info->op) {
- case IPSET_COUNTER_NONE:
- return true;
- case IPSET_COUNTER_EQ:
- return counter == info->value;
- case IPSET_COUNTER_NE:
- return counter != info->value;
- case IPSET_COUNTER_LT:
- return counter < info->value;
- case IPSET_COUNTER_GT:
- return counter > info->value;
- }
- return false;
-}
-
-static bool
-set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
-{
- const struct xt_set_info_match_v3 *info = par->matchinfo;
- ADT_OPT(opt, par->family, info->match_set.dim,
- info->match_set.flags, info->flags, UINT_MAX);
- int ret;
-
- if (info->packets.op != IPSET_COUNTER_NONE ||
- info->bytes.op != IPSET_COUNTER_NONE)
- opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS;
-
- ret = match_set(info->match_set.index, skb, par, &opt,
- info->match_set.flags & IPSET_INV_MATCH);
-
- if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS))
- return ret;
-
- if (!match_counter(opt.ext.packets, &info->packets))
- return 0;
- return match_counter(opt.ext.bytes, &info->bytes);
-}
-
-#define set_match_v3_checkentry set_match_v1_checkentry
-#define set_match_v3_destroy set_match_v1_destroy
-
static struct xt_match set_matches[] __read_mostly = {
{
.name = "set",
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 06df2b9110f5..3dd0e374bc2b 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -370,7 +370,7 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
*/
wildcard = (!(info->flags & XT_SOCKET_NOWILDCARD) &&
sk->sk_state != TCP_TIME_WAIT &&
- ipv6_addr_any(&inet6_sk(sk)->rcv_saddr));
+ ipv6_addr_any(&sk->sk_v6_rcv_saddr));
/* Ignore non-transparent sockets,
if XT_SOCKET_TRANSPARENT is used */
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 96a458e12f60..dce1bebf7aec 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -817,7 +817,7 @@ int netlbl_req_setattr(struct request_sock *req,
switch (req->rsk_ops->family) {
case AF_INET:
entry = netlbl_domhsh_getentry_af4(secattr->domain,
- inet_rsk(req)->rmt_addr);
+ inet_rsk(req)->ir_rmt_addr);
if (entry == NULL) {
ret_val = -ENOENT;
goto req_setattr_return;
diff --git a/net/nfc/Kconfig b/net/nfc/Kconfig
index 5948b2fc72f6..6e0fa0cce198 100644
--- a/net/nfc/Kconfig
+++ b/net/nfc/Kconfig
@@ -14,6 +14,20 @@ menuconfig NFC
To compile this support as a module, choose M here: the module will
be called nfc.
+config NFC_DIGITAL
+ depends on NFC
+ select CRC_CCITT
+ select CRC_ITU_T
+ tristate "NFC Digital Protocol stack support"
+ default n
+ help
+ Say Y if you want to build NFC digital protocol stack support.
+ This is needed by NFC chipsets whose firmware only implement
+ the NFC analog layer.
+
+ To compile this support as a module, choose M here: the module will
+ be called nfc_digital.
+
source "net/nfc/nci/Kconfig"
source "net/nfc/hci/Kconfig"
diff --git a/net/nfc/Makefile b/net/nfc/Makefile
index a76f4533cb6c..2555ff8e7219 100644
--- a/net/nfc/Makefile
+++ b/net/nfc/Makefile
@@ -5,7 +5,9 @@
obj-$(CONFIG_NFC) += nfc.o
obj-$(CONFIG_NFC_NCI) += nci/
obj-$(CONFIG_NFC_HCI) += hci/
+obj-$(CONFIG_NFC_DIGITAL) += nfc_digital.o
nfc-objs := core.o netlink.o af_nfc.o rawsock.o llcp_core.o llcp_commands.o \
llcp_sock.o
+nfc_digital-objs := digital_core.o digital_technology.o digital_dep.o
diff --git a/net/nfc/core.c b/net/nfc/core.c
index e92923cf3e03..872529105abc 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -384,6 +384,19 @@ int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
{
dev->dep_link_up = true;
+ if (!dev->active_target) {
+ struct nfc_target *target;
+
+ target = nfc_find_target(dev, target_idx);
+ if (target == NULL)
+ return -ENOTCONN;
+
+ dev->active_target = target;
+ }
+
+ dev->polling = false;
+ dev->rf_mode = rf_mode;
+
nfc_llcp_mac_is_up(dev, target_idx, comm_mode, rf_mode);
return nfc_genl_dep_link_up_event(dev, target_idx, comm_mode, rf_mode);
@@ -536,7 +549,7 @@ error:
return rc;
}
-static struct nfc_se *find_se(struct nfc_dev *dev, u32 se_idx)
+struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx)
{
struct nfc_se *se, *n;
@@ -546,6 +559,7 @@ static struct nfc_se *find_se(struct nfc_dev *dev, u32 se_idx)
return NULL;
}
+EXPORT_SYMBOL(nfc_find_se);
int nfc_enable_se(struct nfc_dev *dev, u32 se_idx)
{
@@ -577,7 +591,7 @@ int nfc_enable_se(struct nfc_dev *dev, u32 se_idx)
goto error;
}
- se = find_se(dev, se_idx);
+ se = nfc_find_se(dev, se_idx);
if (!se) {
rc = -EINVAL;
goto error;
@@ -622,7 +636,7 @@ int nfc_disable_se(struct nfc_dev *dev, u32 se_idx)
goto error;
}
- se = find_se(dev, se_idx);
+ se = nfc_find_se(dev, se_idx);
if (!se) {
rc = -EINVAL;
goto error;
@@ -881,7 +895,7 @@ int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type)
pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx);
- se = find_se(dev, se_idx);
+ se = nfc_find_se(dev, se_idx);
if (se)
return -EALREADY;
diff --git a/net/nfc/digital.h b/net/nfc/digital.h
new file mode 100644
index 000000000000..08b29b55ea63
--- /dev/null
+++ b/net/nfc/digital.h
@@ -0,0 +1,170 @@
+/*
+ * NFC Digital Protocol stack
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef __DIGITAL_H
+#define __DIGITAL_H
+
+#include <net/nfc/nfc.h>
+#include <net/nfc/digital.h>
+
+#include <linux/crc-ccitt.h>
+#include <linux/crc-itu-t.h>
+
+#define PROTOCOL_ERR(req) pr_err("%d: NFC Digital Protocol error: %s\n", \
+ __LINE__, req)
+
+#define DIGITAL_CMD_IN_SEND 0
+#define DIGITAL_CMD_TG_SEND 1
+#define DIGITAL_CMD_TG_LISTEN 2
+#define DIGITAL_CMD_TG_LISTEN_MDAA 3
+
+#define DIGITAL_MAX_HEADER_LEN 7
+#define DIGITAL_CRC_LEN 2
+
+#define DIGITAL_SENSF_NFCID2_NFC_DEP_B1 0x01
+#define DIGITAL_SENSF_NFCID2_NFC_DEP_B2 0xFE
+
+#define DIGITAL_SENS_RES_NFC_DEP 0x0100
+#define DIGITAL_SEL_RES_NFC_DEP 0x40
+#define DIGITAL_SENSF_FELICA_SC 0xFFFF
+
+#define DIGITAL_DRV_CAPS_IN_CRC(ddev) \
+ ((ddev)->driver_capabilities & NFC_DIGITAL_DRV_CAPS_IN_CRC)
+#define DIGITAL_DRV_CAPS_TG_CRC(ddev) \
+ ((ddev)->driver_capabilities & NFC_DIGITAL_DRV_CAPS_TG_CRC)
+
+struct digital_data_exch {
+ data_exchange_cb_t cb;
+ void *cb_context;
+};
+
+struct sk_buff *digital_skb_alloc(struct nfc_digital_dev *ddev,
+ unsigned int len);
+
+int digital_send_cmd(struct nfc_digital_dev *ddev, u8 cmd_type,
+ struct sk_buff *skb, struct digital_tg_mdaa_params *params,
+ u16 timeout, nfc_digital_cmd_complete_t cmd_cb,
+ void *cb_context);
+
+int digital_in_configure_hw(struct nfc_digital_dev *ddev, int type, int param);
+static inline int digital_in_send_cmd(struct nfc_digital_dev *ddev,
+ struct sk_buff *skb, u16 timeout,
+ nfc_digital_cmd_complete_t cmd_cb,
+ void *cb_context)
+{
+ return digital_send_cmd(ddev, DIGITAL_CMD_IN_SEND, skb, NULL, timeout,
+ cmd_cb, cb_context);
+}
+
+void digital_poll_next_tech(struct nfc_digital_dev *ddev);
+
+int digital_in_send_sens_req(struct nfc_digital_dev *ddev, u8 rf_tech);
+int digital_in_send_sensf_req(struct nfc_digital_dev *ddev, u8 rf_tech);
+
+int digital_target_found(struct nfc_digital_dev *ddev,
+ struct nfc_target *target, u8 protocol);
+
+int digital_in_recv_mifare_res(struct sk_buff *resp);
+
+int digital_in_send_atr_req(struct nfc_digital_dev *ddev,
+ struct nfc_target *target, __u8 comm_mode, __u8 *gb,
+ size_t gb_len);
+int digital_in_send_dep_req(struct nfc_digital_dev *ddev,
+ struct nfc_target *target, struct sk_buff *skb,
+ struct digital_data_exch *data_exch);
+
+int digital_tg_configure_hw(struct nfc_digital_dev *ddev, int type, int param);
+static inline int digital_tg_send_cmd(struct nfc_digital_dev *ddev,
+ struct sk_buff *skb, u16 timeout,
+ nfc_digital_cmd_complete_t cmd_cb, void *cb_context)
+{
+ return digital_send_cmd(ddev, DIGITAL_CMD_TG_SEND, skb, NULL, timeout,
+ cmd_cb, cb_context);
+}
+
+void digital_tg_recv_sens_req(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp);
+
+void digital_tg_recv_sensf_req(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp);
+
+static inline int digital_tg_listen(struct nfc_digital_dev *ddev, u16 timeout,
+ nfc_digital_cmd_complete_t cb, void *arg)
+{
+ return digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN, NULL, NULL,
+ timeout, cb, arg);
+}
+
+void digital_tg_recv_atr_req(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp);
+
+int digital_tg_send_dep_res(struct nfc_digital_dev *ddev, struct sk_buff *skb);
+
+int digital_tg_listen_nfca(struct nfc_digital_dev *ddev, u8 rf_tech);
+int digital_tg_listen_nfcf(struct nfc_digital_dev *ddev, u8 rf_tech);
+
+typedef u16 (*crc_func_t)(u16, const u8 *, size_t);
+
+#define CRC_A_INIT 0x6363
+#define CRC_B_INIT 0xFFFF
+#define CRC_F_INIT 0x0000
+
+void digital_skb_add_crc(struct sk_buff *skb, crc_func_t crc_func, u16 init,
+ u8 bitwise_inv, u8 msb_first);
+
+static inline void digital_skb_add_crc_a(struct sk_buff *skb)
+{
+ digital_skb_add_crc(skb, crc_ccitt, CRC_A_INIT, 0, 0);
+}
+
+static inline void digital_skb_add_crc_b(struct sk_buff *skb)
+{
+ digital_skb_add_crc(skb, crc_ccitt, CRC_B_INIT, 1, 0);
+}
+
+static inline void digital_skb_add_crc_f(struct sk_buff *skb)
+{
+ digital_skb_add_crc(skb, crc_itu_t, CRC_F_INIT, 0, 1);
+}
+
+static inline void digital_skb_add_crc_none(struct sk_buff *skb)
+{
+ return;
+}
+
+int digital_skb_check_crc(struct sk_buff *skb, crc_func_t crc_func,
+ u16 crc_init, u8 bitwise_inv, u8 msb_first);
+
+static inline int digital_skb_check_crc_a(struct sk_buff *skb)
+{
+ return digital_skb_check_crc(skb, crc_ccitt, CRC_A_INIT, 0, 0);
+}
+
+static inline int digital_skb_check_crc_b(struct sk_buff *skb)
+{
+ return digital_skb_check_crc(skb, crc_ccitt, CRC_B_INIT, 1, 0);
+}
+
+static inline int digital_skb_check_crc_f(struct sk_buff *skb)
+{
+ return digital_skb_check_crc(skb, crc_itu_t, CRC_F_INIT, 0, 1);
+}
+
+static inline int digital_skb_check_crc_none(struct sk_buff *skb)
+{
+ return 0;
+}
+
+#endif /* __DIGITAL_H */
diff --git a/net/nfc/digital_core.c b/net/nfc/digital_core.c
new file mode 100644
index 000000000000..09fc95439955
--- /dev/null
+++ b/net/nfc/digital_core.c
@@ -0,0 +1,737 @@
+/*
+ * NFC Digital Protocol stack
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#define pr_fmt(fmt) "digital: %s: " fmt, __func__
+
+#include <linux/module.h>
+
+#include "digital.h"
+
+#define DIGITAL_PROTO_NFCA_RF_TECH \
+ (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK | NFC_PROTO_NFC_DEP_MASK)
+
+#define DIGITAL_PROTO_NFCF_RF_TECH \
+ (NFC_PROTO_FELICA_MASK | NFC_PROTO_NFC_DEP_MASK)
+
+struct digital_cmd {
+ struct list_head queue;
+
+ u8 type;
+ u8 pending;
+
+ u16 timeout;
+ struct sk_buff *req;
+ struct sk_buff *resp;
+ struct digital_tg_mdaa_params *mdaa_params;
+
+ nfc_digital_cmd_complete_t cmd_cb;
+ void *cb_context;
+};
+
+struct sk_buff *digital_skb_alloc(struct nfc_digital_dev *ddev,
+ unsigned int len)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len + ddev->tx_headroom + ddev->tx_tailroom,
+ GFP_KERNEL);
+ if (skb)
+ skb_reserve(skb, ddev->tx_headroom);
+
+ return skb;
+}
+
+void digital_skb_add_crc(struct sk_buff *skb, crc_func_t crc_func, u16 init,
+ u8 bitwise_inv, u8 msb_first)
+{
+ u16 crc;
+
+ crc = crc_func(init, skb->data, skb->len);
+
+ if (bitwise_inv)
+ crc = ~crc;
+
+ if (msb_first)
+ crc = __fswab16(crc);
+
+ *skb_put(skb, 1) = crc & 0xFF;
+ *skb_put(skb, 1) = (crc >> 8) & 0xFF;
+}
+
+int digital_skb_check_crc(struct sk_buff *skb, crc_func_t crc_func,
+ u16 crc_init, u8 bitwise_inv, u8 msb_first)
+{
+ int rc;
+ u16 crc;
+
+ if (skb->len <= 2)
+ return -EIO;
+
+ crc = crc_func(crc_init, skb->data, skb->len - 2);
+
+ if (bitwise_inv)
+ crc = ~crc;
+
+ if (msb_first)
+ crc = __swab16(crc);
+
+ rc = (skb->data[skb->len - 2] - (crc & 0xFF)) +
+ (skb->data[skb->len - 1] - ((crc >> 8) & 0xFF));
+
+ if (rc)
+ return -EIO;
+
+ skb_trim(skb, skb->len - 2);
+
+ return 0;
+}
+
+static inline void digital_switch_rf(struct nfc_digital_dev *ddev, bool on)
+{
+ ddev->ops->switch_rf(ddev, on);
+}
+
+static inline void digital_abort_cmd(struct nfc_digital_dev *ddev)
+{
+ ddev->ops->abort_cmd(ddev);
+}
+
+static void digital_wq_cmd_complete(struct work_struct *work)
+{
+ struct digital_cmd *cmd;
+ struct nfc_digital_dev *ddev = container_of(work,
+ struct nfc_digital_dev,
+ cmd_complete_work);
+
+ mutex_lock(&ddev->cmd_lock);
+
+ cmd = list_first_entry_or_null(&ddev->cmd_queue, struct digital_cmd,
+ queue);
+ if (!cmd) {
+ mutex_unlock(&ddev->cmd_lock);
+ return;
+ }
+
+ list_del(&cmd->queue);
+
+ mutex_unlock(&ddev->cmd_lock);
+
+ if (!IS_ERR(cmd->resp))
+ print_hex_dump_debug("DIGITAL RX: ", DUMP_PREFIX_NONE, 16, 1,
+ cmd->resp->data, cmd->resp->len, false);
+
+ cmd->cmd_cb(ddev, cmd->cb_context, cmd->resp);
+
+ kfree(cmd->mdaa_params);
+ kfree(cmd);
+
+ schedule_work(&ddev->cmd_work);
+}
+
+static void digital_send_cmd_complete(struct nfc_digital_dev *ddev,
+ void *arg, struct sk_buff *resp)
+{
+ struct digital_cmd *cmd = arg;
+
+ cmd->resp = resp;
+
+ schedule_work(&ddev->cmd_complete_work);
+}
+
+static void digital_wq_cmd(struct work_struct *work)
+{
+ int rc;
+ struct digital_cmd *cmd;
+ struct digital_tg_mdaa_params *params;
+ struct nfc_digital_dev *ddev = container_of(work,
+ struct nfc_digital_dev,
+ cmd_work);
+
+ mutex_lock(&ddev->cmd_lock);
+
+ cmd = list_first_entry_or_null(&ddev->cmd_queue, struct digital_cmd,
+ queue);
+ if (!cmd || cmd->pending) {
+ mutex_unlock(&ddev->cmd_lock);
+ return;
+ }
+
+ mutex_unlock(&ddev->cmd_lock);
+
+ if (cmd->req)
+ print_hex_dump_debug("DIGITAL TX: ", DUMP_PREFIX_NONE, 16, 1,
+ cmd->req->data, cmd->req->len, false);
+
+ switch (cmd->type) {
+ case DIGITAL_CMD_IN_SEND:
+ rc = ddev->ops->in_send_cmd(ddev, cmd->req, cmd->timeout,
+ digital_send_cmd_complete, cmd);
+ break;
+
+ case DIGITAL_CMD_TG_SEND:
+ rc = ddev->ops->tg_send_cmd(ddev, cmd->req, cmd->timeout,
+ digital_send_cmd_complete, cmd);
+ break;
+
+ case DIGITAL_CMD_TG_LISTEN:
+ rc = ddev->ops->tg_listen(ddev, cmd->timeout,
+ digital_send_cmd_complete, cmd);
+ break;
+
+ case DIGITAL_CMD_TG_LISTEN_MDAA:
+ params = cmd->mdaa_params;
+
+ rc = ddev->ops->tg_listen_mdaa(ddev, params, cmd->timeout,
+ digital_send_cmd_complete, cmd);
+ break;
+
+ default:
+ pr_err("Unknown cmd type %d\n", cmd->type);
+ return;
+ }
+
+ if (!rc)
+ return;
+
+ pr_err("in_send_command returned err %d\n", rc);
+
+ mutex_lock(&ddev->cmd_lock);
+ list_del(&cmd->queue);
+ mutex_unlock(&ddev->cmd_lock);
+
+ kfree_skb(cmd->req);
+ kfree(cmd->mdaa_params);
+ kfree(cmd);
+
+ schedule_work(&ddev->cmd_work);
+}
+
+int digital_send_cmd(struct nfc_digital_dev *ddev, u8 cmd_type,
+ struct sk_buff *skb, struct digital_tg_mdaa_params *params,
+ u16 timeout, nfc_digital_cmd_complete_t cmd_cb,
+ void *cb_context)
+{
+ struct digital_cmd *cmd;
+
+ cmd = kzalloc(sizeof(struct digital_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->type = cmd_type;
+ cmd->timeout = timeout;
+ cmd->req = skb;
+ cmd->mdaa_params = params;
+ cmd->cmd_cb = cmd_cb;
+ cmd->cb_context = cb_context;
+ INIT_LIST_HEAD(&cmd->queue);
+
+ mutex_lock(&ddev->cmd_lock);
+ list_add_tail(&cmd->queue, &ddev->cmd_queue);
+ mutex_unlock(&ddev->cmd_lock);
+
+ schedule_work(&ddev->cmd_work);
+
+ return 0;
+}
+
+int digital_in_configure_hw(struct nfc_digital_dev *ddev, int type, int param)
+{
+ int rc;
+
+ rc = ddev->ops->in_configure_hw(ddev, type, param);
+ if (rc)
+ pr_err("in_configure_hw failed: %d\n", rc);
+
+ return rc;
+}
+
+int digital_tg_configure_hw(struct nfc_digital_dev *ddev, int type, int param)
+{
+ int rc;
+
+ rc = ddev->ops->tg_configure_hw(ddev, type, param);
+ if (rc)
+ pr_err("tg_configure_hw failed: %d\n", rc);
+
+ return rc;
+}
+
+static int digital_tg_listen_mdaa(struct nfc_digital_dev *ddev, u8 rf_tech)
+{
+ struct digital_tg_mdaa_params *params;
+
+ params = kzalloc(sizeof(struct digital_tg_mdaa_params), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ params->sens_res = DIGITAL_SENS_RES_NFC_DEP;
+ get_random_bytes(params->nfcid1, sizeof(params->nfcid1));
+ params->sel_res = DIGITAL_SEL_RES_NFC_DEP;
+
+ params->nfcid2[0] = DIGITAL_SENSF_NFCID2_NFC_DEP_B1;
+ params->nfcid2[1] = DIGITAL_SENSF_NFCID2_NFC_DEP_B2;
+ get_random_bytes(params->nfcid2 + 2, NFC_NFCID2_MAXSIZE - 2);
+ params->sc = DIGITAL_SENSF_FELICA_SC;
+
+ return digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN_MDAA, NULL, params,
+ 500, digital_tg_recv_atr_req, NULL);
+}
+
+int digital_target_found(struct nfc_digital_dev *ddev,
+ struct nfc_target *target, u8 protocol)
+{
+ int rc;
+ u8 framing;
+ u8 rf_tech;
+ int (*check_crc)(struct sk_buff *skb);
+ void (*add_crc)(struct sk_buff *skb);
+
+ rf_tech = ddev->poll_techs[ddev->poll_tech_index].rf_tech;
+
+ switch (protocol) {
+ case NFC_PROTO_JEWEL:
+ framing = NFC_DIGITAL_FRAMING_NFCA_T1T;
+ check_crc = digital_skb_check_crc_b;
+ add_crc = digital_skb_add_crc_b;
+ break;
+
+ case NFC_PROTO_MIFARE:
+ framing = NFC_DIGITAL_FRAMING_NFCA_T2T;
+ check_crc = digital_skb_check_crc_a;
+ add_crc = digital_skb_add_crc_a;
+ break;
+
+ case NFC_PROTO_FELICA:
+ framing = NFC_DIGITAL_FRAMING_NFCF_T3T;
+ check_crc = digital_skb_check_crc_f;
+ add_crc = digital_skb_add_crc_f;
+ break;
+
+ case NFC_PROTO_NFC_DEP:
+ if (rf_tech == NFC_DIGITAL_RF_TECH_106A) {
+ framing = NFC_DIGITAL_FRAMING_NFCA_NFC_DEP;
+ check_crc = digital_skb_check_crc_a;
+ add_crc = digital_skb_add_crc_a;
+ } else {
+ framing = NFC_DIGITAL_FRAMING_NFCF_NFC_DEP;
+ check_crc = digital_skb_check_crc_f;
+ add_crc = digital_skb_add_crc_f;
+ }
+ break;
+
+ default:
+ pr_err("Invalid protocol %d\n", protocol);
+ return -EINVAL;
+ }
+
+ pr_debug("rf_tech=%d, protocol=%d\n", rf_tech, protocol);
+
+ ddev->curr_rf_tech = rf_tech;
+ ddev->curr_protocol = protocol;
+
+ if (DIGITAL_DRV_CAPS_IN_CRC(ddev)) {
+ ddev->skb_add_crc = digital_skb_add_crc_none;
+ ddev->skb_check_crc = digital_skb_check_crc_none;
+ } else {
+ ddev->skb_add_crc = add_crc;
+ ddev->skb_check_crc = check_crc;
+ }
+
+ rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, framing);
+ if (rc)
+ return rc;
+
+ target->supported_protocols = (1 << protocol);
+ rc = nfc_targets_found(ddev->nfc_dev, target, 1);
+ if (rc)
+ return rc;
+
+ ddev->poll_tech_count = 0;
+
+ return 0;
+}
+
+void digital_poll_next_tech(struct nfc_digital_dev *ddev)
+{
+ digital_switch_rf(ddev, 0);
+
+ mutex_lock(&ddev->poll_lock);
+
+ if (!ddev->poll_tech_count) {
+ mutex_unlock(&ddev->poll_lock);
+ return;
+ }
+
+ ddev->poll_tech_index = (ddev->poll_tech_index + 1) %
+ ddev->poll_tech_count;
+
+ mutex_unlock(&ddev->poll_lock);
+
+ schedule_work(&ddev->poll_work);
+}
+
+static void digital_wq_poll(struct work_struct *work)
+{
+ int rc;
+ struct digital_poll_tech *poll_tech;
+ struct nfc_digital_dev *ddev = container_of(work,
+ struct nfc_digital_dev,
+ poll_work);
+ mutex_lock(&ddev->poll_lock);
+
+ if (!ddev->poll_tech_count) {
+ mutex_unlock(&ddev->poll_lock);
+ return;
+ }
+
+ poll_tech = &ddev->poll_techs[ddev->poll_tech_index];
+
+ mutex_unlock(&ddev->poll_lock);
+
+ rc = poll_tech->poll_func(ddev, poll_tech->rf_tech);
+ if (rc)
+ digital_poll_next_tech(ddev);
+}
+
+static void digital_add_poll_tech(struct nfc_digital_dev *ddev, u8 rf_tech,
+ digital_poll_t poll_func)
+{
+ struct digital_poll_tech *poll_tech;
+
+ if (ddev->poll_tech_count >= NFC_DIGITAL_POLL_MODE_COUNT_MAX)
+ return;
+
+ poll_tech = &ddev->poll_techs[ddev->poll_tech_count++];
+
+ poll_tech->rf_tech = rf_tech;
+ poll_tech->poll_func = poll_func;
+}
+
+/**
+ * start_poll operation
+ *
+ * For every supported protocol, the corresponding polling function is added
+ * to the table of polling technologies (ddev->poll_techs[]) using
+ * digital_add_poll_tech().
+ * When a polling function fails (by timeout or protocol error) the next one is
+ * schedule by digital_poll_next_tech() on the poll workqueue (ddev->poll_work).
+ */
+static int digital_start_poll(struct nfc_dev *nfc_dev, __u32 im_protocols,
+ __u32 tm_protocols)
+{
+ struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+ u32 matching_im_protocols, matching_tm_protocols;
+
+ pr_debug("protocols: im 0x%x, tm 0x%x, supported 0x%x\n", im_protocols,
+ tm_protocols, ddev->protocols);
+
+ matching_im_protocols = ddev->protocols & im_protocols;
+ matching_tm_protocols = ddev->protocols & tm_protocols;
+
+ if (!matching_im_protocols && !matching_tm_protocols) {
+ pr_err("Unknown protocol\n");
+ return -EINVAL;
+ }
+
+ if (ddev->poll_tech_count) {
+ pr_err("Already polling\n");
+ return -EBUSY;
+ }
+
+ if (ddev->curr_protocol) {
+ pr_err("A target is already active\n");
+ return -EBUSY;
+ }
+
+ ddev->poll_tech_count = 0;
+ ddev->poll_tech_index = 0;
+
+ if (matching_im_protocols & DIGITAL_PROTO_NFCA_RF_TECH)
+ digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106A,
+ digital_in_send_sens_req);
+
+ if (im_protocols & DIGITAL_PROTO_NFCF_RF_TECH) {
+ digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_212F,
+ digital_in_send_sensf_req);
+
+ digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_424F,
+ digital_in_send_sensf_req);
+ }
+
+ if (tm_protocols & NFC_PROTO_NFC_DEP_MASK) {
+ if (ddev->ops->tg_listen_mdaa) {
+ digital_add_poll_tech(ddev, 0,
+ digital_tg_listen_mdaa);
+ } else {
+ digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106A,
+ digital_tg_listen_nfca);
+
+ digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_212F,
+ digital_tg_listen_nfcf);
+
+ digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_424F,
+ digital_tg_listen_nfcf);
+ }
+ }
+
+ if (!ddev->poll_tech_count) {
+ pr_err("Unsupported protocols: im=0x%x, tm=0x%x\n",
+ matching_im_protocols, matching_tm_protocols);
+ return -EINVAL;
+ }
+
+ schedule_work(&ddev->poll_work);
+
+ return 0;
+}
+
+static void digital_stop_poll(struct nfc_dev *nfc_dev)
+{
+ struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+
+ mutex_lock(&ddev->poll_lock);
+
+ if (!ddev->poll_tech_count) {
+ pr_err("Polling operation was not running\n");
+ mutex_unlock(&ddev->poll_lock);
+ return;
+ }
+
+ ddev->poll_tech_count = 0;
+
+ mutex_unlock(&ddev->poll_lock);
+
+ cancel_work_sync(&ddev->poll_work);
+
+ digital_abort_cmd(ddev);
+}
+
+static int digital_dev_up(struct nfc_dev *nfc_dev)
+{
+ struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+
+ digital_switch_rf(ddev, 1);
+
+ return 0;
+}
+
+static int digital_dev_down(struct nfc_dev *nfc_dev)
+{
+ struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+
+ digital_switch_rf(ddev, 0);
+
+ return 0;
+}
+
+static int digital_dep_link_up(struct nfc_dev *nfc_dev,
+ struct nfc_target *target,
+ __u8 comm_mode, __u8 *gb, size_t gb_len)
+{
+ struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+
+ return digital_in_send_atr_req(ddev, target, comm_mode, gb, gb_len);
+}
+
+static int digital_dep_link_down(struct nfc_dev *nfc_dev)
+{
+ struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+
+ ddev->curr_protocol = 0;
+
+ return 0;
+}
+
+static int digital_activate_target(struct nfc_dev *nfc_dev,
+ struct nfc_target *target, __u32 protocol)
+{
+ return 0;
+}
+
+static void digital_deactivate_target(struct nfc_dev *nfc_dev,
+ struct nfc_target *target)
+{
+ struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+
+ ddev->curr_protocol = 0;
+}
+
+static int digital_tg_send(struct nfc_dev *dev, struct sk_buff *skb)
+{
+ struct nfc_digital_dev *ddev = nfc_get_drvdata(dev);
+
+ return digital_tg_send_dep_res(ddev, skb);
+}
+
+static void digital_in_send_complete(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp)
+{
+ struct digital_data_exch *data_exch = arg;
+ int rc;
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+ goto done;
+ }
+
+ if (ddev->curr_protocol == NFC_PROTO_MIFARE)
+ rc = digital_in_recv_mifare_res(resp);
+ else
+ rc = ddev->skb_check_crc(resp);
+
+ if (rc) {
+ kfree_skb(resp);
+ resp = NULL;
+ }
+
+done:
+ data_exch->cb(data_exch->cb_context, resp, rc);
+
+ kfree(data_exch);
+}
+
+static int digital_in_send(struct nfc_dev *nfc_dev, struct nfc_target *target,
+ struct sk_buff *skb, data_exchange_cb_t cb,
+ void *cb_context)
+{
+ struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+ struct digital_data_exch *data_exch;
+
+ data_exch = kzalloc(sizeof(struct digital_data_exch), GFP_KERNEL);
+ if (!data_exch) {
+ pr_err("Failed to allocate data_exch struct\n");
+ return -ENOMEM;
+ }
+
+ data_exch->cb = cb;
+ data_exch->cb_context = cb_context;
+
+ if (ddev->curr_protocol == NFC_PROTO_NFC_DEP)
+ return digital_in_send_dep_req(ddev, target, skb, data_exch);
+
+ ddev->skb_add_crc(skb);
+
+ return digital_in_send_cmd(ddev, skb, 500, digital_in_send_complete,
+ data_exch);
+}
+
+static struct nfc_ops digital_nfc_ops = {
+ .dev_up = digital_dev_up,
+ .dev_down = digital_dev_down,
+ .start_poll = digital_start_poll,
+ .stop_poll = digital_stop_poll,
+ .dep_link_up = digital_dep_link_up,
+ .dep_link_down = digital_dep_link_down,
+ .activate_target = digital_activate_target,
+ .deactivate_target = digital_deactivate_target,
+ .tm_send = digital_tg_send,
+ .im_transceive = digital_in_send,
+};
+
+struct nfc_digital_dev *nfc_digital_allocate_device(struct nfc_digital_ops *ops,
+ __u32 supported_protocols,
+ __u32 driver_capabilities,
+ int tx_headroom, int tx_tailroom)
+{
+ struct nfc_digital_dev *ddev;
+
+ if (!ops->in_configure_hw || !ops->in_send_cmd || !ops->tg_listen ||
+ !ops->tg_configure_hw || !ops->tg_send_cmd || !ops->abort_cmd ||
+ !ops->switch_rf)
+ return NULL;
+
+ ddev = kzalloc(sizeof(struct nfc_digital_dev), GFP_KERNEL);
+ if (!ddev)
+ return NULL;
+
+ ddev->driver_capabilities = driver_capabilities;
+ ddev->ops = ops;
+
+ mutex_init(&ddev->cmd_lock);
+ INIT_LIST_HEAD(&ddev->cmd_queue);
+
+ INIT_WORK(&ddev->cmd_work, digital_wq_cmd);
+ INIT_WORK(&ddev->cmd_complete_work, digital_wq_cmd_complete);
+
+ mutex_init(&ddev->poll_lock);
+ INIT_WORK(&ddev->poll_work, digital_wq_poll);
+
+ if (supported_protocols & NFC_PROTO_JEWEL_MASK)
+ ddev->protocols |= NFC_PROTO_JEWEL_MASK;
+ if (supported_protocols & NFC_PROTO_MIFARE_MASK)
+ ddev->protocols |= NFC_PROTO_MIFARE_MASK;
+ if (supported_protocols & NFC_PROTO_FELICA_MASK)
+ ddev->protocols |= NFC_PROTO_FELICA_MASK;
+ if (supported_protocols & NFC_PROTO_NFC_DEP_MASK)
+ ddev->protocols |= NFC_PROTO_NFC_DEP_MASK;
+
+ ddev->tx_headroom = tx_headroom + DIGITAL_MAX_HEADER_LEN;
+ ddev->tx_tailroom = tx_tailroom + DIGITAL_CRC_LEN;
+
+ ddev->nfc_dev = nfc_allocate_device(&digital_nfc_ops, ddev->protocols,
+ ddev->tx_headroom,
+ ddev->tx_tailroom);
+ if (!ddev->nfc_dev) {
+ pr_err("nfc_allocate_device failed\n");
+ goto free_dev;
+ }
+
+ nfc_set_drvdata(ddev->nfc_dev, ddev);
+
+ return ddev;
+
+free_dev:
+ kfree(ddev);
+
+ return NULL;
+}
+EXPORT_SYMBOL(nfc_digital_allocate_device);
+
+void nfc_digital_free_device(struct nfc_digital_dev *ddev)
+{
+ nfc_free_device(ddev->nfc_dev);
+ kfree(ddev);
+}
+EXPORT_SYMBOL(nfc_digital_free_device);
+
+int nfc_digital_register_device(struct nfc_digital_dev *ddev)
+{
+ return nfc_register_device(ddev->nfc_dev);
+}
+EXPORT_SYMBOL(nfc_digital_register_device);
+
+void nfc_digital_unregister_device(struct nfc_digital_dev *ddev)
+{
+ struct digital_cmd *cmd, *n;
+
+ nfc_unregister_device(ddev->nfc_dev);
+
+ mutex_lock(&ddev->poll_lock);
+ ddev->poll_tech_count = 0;
+ mutex_unlock(&ddev->poll_lock);
+
+ cancel_work_sync(&ddev->poll_work);
+ cancel_work_sync(&ddev->cmd_work);
+ cancel_work_sync(&ddev->cmd_complete_work);
+
+ list_for_each_entry_safe(cmd, n, &ddev->cmd_queue, queue) {
+ list_del(&cmd->queue);
+ kfree(cmd->mdaa_params);
+ kfree(cmd);
+ }
+}
+EXPORT_SYMBOL(nfc_digital_unregister_device);
+
+MODULE_LICENSE("GPL");
diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c
new file mode 100644
index 000000000000..07bbc24fb4c7
--- /dev/null
+++ b/net/nfc/digital_dep.c
@@ -0,0 +1,729 @@
+/*
+ * NFC Digital Protocol stack
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#define pr_fmt(fmt) "digital: %s: " fmt, __func__
+
+#include "digital.h"
+
+#define DIGITAL_NFC_DEP_FRAME_DIR_OUT 0xD4
+#define DIGITAL_NFC_DEP_FRAME_DIR_IN 0xD5
+
+#define DIGITAL_NFC_DEP_NFCA_SOD_SB 0xF0
+
+#define DIGITAL_CMD_ATR_REQ 0x00
+#define DIGITAL_CMD_ATR_RES 0x01
+#define DIGITAL_CMD_PSL_REQ 0x04
+#define DIGITAL_CMD_PSL_RES 0x05
+#define DIGITAL_CMD_DEP_REQ 0x06
+#define DIGITAL_CMD_DEP_RES 0x07
+
+#define DIGITAL_ATR_REQ_MIN_SIZE 16
+#define DIGITAL_ATR_REQ_MAX_SIZE 64
+
+#define DIGITAL_NFCID3_LEN ((u8)8)
+#define DIGITAL_LR_BITS_PAYLOAD_SIZE_254B 0x30
+#define DIGITAL_GB_BIT 0x02
+
+#define DIGITAL_NFC_DEP_PFB_TYPE(pfb) ((pfb) & 0xE0)
+
+#define DIGITAL_NFC_DEP_PFB_TIMEOUT_BIT 0x10
+
+#define DIGITAL_NFC_DEP_PFB_IS_TIMEOUT(pfb) \
+ ((pfb) & DIGITAL_NFC_DEP_PFB_TIMEOUT_BIT)
+#define DIGITAL_NFC_DEP_MI_BIT_SET(pfb) ((pfb) & 0x10)
+#define DIGITAL_NFC_DEP_NAD_BIT_SET(pfb) ((pfb) & 0x08)
+#define DIGITAL_NFC_DEP_DID_BIT_SET(pfb) ((pfb) & 0x04)
+#define DIGITAL_NFC_DEP_PFB_PNI(pfb) ((pfb) & 0x03)
+
+#define DIGITAL_NFC_DEP_PFB_I_PDU 0x00
+#define DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU 0x40
+#define DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU 0x80
+
+struct digital_atr_req {
+ u8 dir;
+ u8 cmd;
+ u8 nfcid3[10];
+ u8 did;
+ u8 bs;
+ u8 br;
+ u8 pp;
+ u8 gb[0];
+} __packed;
+
+struct digital_atr_res {
+ u8 dir;
+ u8 cmd;
+ u8 nfcid3[10];
+ u8 did;
+ u8 bs;
+ u8 br;
+ u8 to;
+ u8 pp;
+ u8 gb[0];
+} __packed;
+
+struct digital_psl_req {
+ u8 dir;
+ u8 cmd;
+ u8 did;
+ u8 brs;
+ u8 fsl;
+} __packed;
+
+struct digital_psl_res {
+ u8 dir;
+ u8 cmd;
+ u8 did;
+} __packed;
+
+struct digital_dep_req_res {
+ u8 dir;
+ u8 cmd;
+ u8 pfb;
+} __packed;
+
+static void digital_in_recv_dep_res(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp);
+
+static void digital_skb_push_dep_sod(struct nfc_digital_dev *ddev,
+ struct sk_buff *skb)
+{
+ skb_push(skb, sizeof(u8));
+
+ skb->data[0] = skb->len;
+
+ if (ddev->curr_rf_tech == NFC_DIGITAL_RF_TECH_106A)
+ *skb_push(skb, sizeof(u8)) = DIGITAL_NFC_DEP_NFCA_SOD_SB;
+}
+
+static int digital_skb_pull_dep_sod(struct nfc_digital_dev *ddev,
+ struct sk_buff *skb)
+{
+ u8 size;
+
+ if (skb->len < 2)
+ return -EIO;
+
+ if (ddev->curr_rf_tech == NFC_DIGITAL_RF_TECH_106A)
+ skb_pull(skb, sizeof(u8));
+
+ size = skb->data[0];
+ if (size != skb->len)
+ return -EIO;
+
+ skb_pull(skb, sizeof(u8));
+
+ return 0;
+}
+
+static void digital_in_recv_atr_res(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp)
+{
+ struct nfc_target *target = arg;
+ struct digital_atr_res *atr_res;
+ u8 gb_len;
+ int rc;
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+ resp = NULL;
+ goto exit;
+ }
+
+ rc = ddev->skb_check_crc(resp);
+ if (rc) {
+ PROTOCOL_ERR("14.4.1.6");
+ goto exit;
+ }
+
+ rc = digital_skb_pull_dep_sod(ddev, resp);
+ if (rc) {
+ PROTOCOL_ERR("14.4.1.2");
+ goto exit;
+ }
+
+ if (resp->len < sizeof(struct digital_atr_res)) {
+ rc = -EIO;
+ goto exit;
+ }
+
+ gb_len = resp->len - sizeof(struct digital_atr_res);
+
+ atr_res = (struct digital_atr_res *)resp->data;
+
+ rc = nfc_set_remote_general_bytes(ddev->nfc_dev, atr_res->gb, gb_len);
+ if (rc)
+ goto exit;
+
+ rc = nfc_dep_link_is_up(ddev->nfc_dev, target->idx, NFC_COMM_ACTIVE,
+ NFC_RF_INITIATOR);
+
+ ddev->curr_nfc_dep_pni = 0;
+
+exit:
+ dev_kfree_skb(resp);
+
+ if (rc)
+ ddev->curr_protocol = 0;
+}
+
+int digital_in_send_atr_req(struct nfc_digital_dev *ddev,
+ struct nfc_target *target, __u8 comm_mode, __u8 *gb,
+ size_t gb_len)
+{
+ struct sk_buff *skb;
+ struct digital_atr_req *atr_req;
+ uint size;
+
+ size = DIGITAL_ATR_REQ_MIN_SIZE + gb_len;
+
+ if (size > DIGITAL_ATR_REQ_MAX_SIZE) {
+ PROTOCOL_ERR("14.6.1.1");
+ return -EINVAL;
+ }
+
+ skb = digital_skb_alloc(ddev, size);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, sizeof(struct digital_atr_req));
+
+ atr_req = (struct digital_atr_req *)skb->data;
+ memset(atr_req, 0, sizeof(struct digital_atr_req));
+
+ atr_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT;
+ atr_req->cmd = DIGITAL_CMD_ATR_REQ;
+ if (target->nfcid2_len)
+ memcpy(atr_req->nfcid3, target->nfcid2,
+ max(target->nfcid2_len, DIGITAL_NFCID3_LEN));
+ else
+ get_random_bytes(atr_req->nfcid3, DIGITAL_NFCID3_LEN);
+
+ atr_req->did = 0;
+ atr_req->bs = 0;
+ atr_req->br = 0;
+
+ atr_req->pp = DIGITAL_LR_BITS_PAYLOAD_SIZE_254B;
+
+ if (gb_len) {
+ atr_req->pp |= DIGITAL_GB_BIT;
+ memcpy(skb_put(skb, gb_len), gb, gb_len);
+ }
+
+ digital_skb_push_dep_sod(ddev, skb);
+
+ ddev->skb_add_crc(skb);
+
+ digital_in_send_cmd(ddev, skb, 500, digital_in_recv_atr_res, target);
+
+ return 0;
+}
+
+static int digital_in_send_rtox(struct nfc_digital_dev *ddev,
+ struct digital_data_exch *data_exch, u8 rtox)
+{
+ struct digital_dep_req_res *dep_req;
+ struct sk_buff *skb;
+ int rc;
+
+ skb = digital_skb_alloc(ddev, 1);
+ if (!skb)
+ return -ENOMEM;
+
+ *skb_put(skb, 1) = rtox;
+
+ skb_push(skb, sizeof(struct digital_dep_req_res));
+
+ dep_req = (struct digital_dep_req_res *)skb->data;
+
+ dep_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT;
+ dep_req->cmd = DIGITAL_CMD_DEP_REQ;
+ dep_req->pfb = DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU |
+ DIGITAL_NFC_DEP_PFB_TIMEOUT_BIT;
+
+ digital_skb_push_dep_sod(ddev, skb);
+
+ ddev->skb_add_crc(skb);
+
+ rc = digital_in_send_cmd(ddev, skb, 1500, digital_in_recv_dep_res,
+ data_exch);
+
+ return rc;
+}
+
+static void digital_in_recv_dep_res(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp)
+{
+ struct digital_data_exch *data_exch = arg;
+ struct digital_dep_req_res *dep_res;
+ u8 pfb;
+ uint size;
+ int rc;
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+ resp = NULL;
+ goto exit;
+ }
+
+ rc = ddev->skb_check_crc(resp);
+ if (rc) {
+ PROTOCOL_ERR("14.4.1.6");
+ goto error;
+ }
+
+ rc = digital_skb_pull_dep_sod(ddev, resp);
+ if (rc) {
+ PROTOCOL_ERR("14.4.1.2");
+ goto exit;
+ }
+
+ dep_res = (struct digital_dep_req_res *)resp->data;
+
+ if (resp->len < sizeof(struct digital_dep_req_res) ||
+ dep_res->dir != DIGITAL_NFC_DEP_FRAME_DIR_IN ||
+ dep_res->cmd != DIGITAL_CMD_DEP_RES) {
+ rc = -EIO;
+ goto error;
+ }
+
+ pfb = dep_res->pfb;
+
+ switch (DIGITAL_NFC_DEP_PFB_TYPE(pfb)) {
+ case DIGITAL_NFC_DEP_PFB_I_PDU:
+ if (DIGITAL_NFC_DEP_PFB_PNI(pfb) != ddev->curr_nfc_dep_pni) {
+ PROTOCOL_ERR("14.12.3.3");
+ rc = -EIO;
+ goto error;
+ }
+
+ ddev->curr_nfc_dep_pni =
+ DIGITAL_NFC_DEP_PFB_PNI(ddev->curr_nfc_dep_pni + 1);
+ rc = 0;
+ break;
+
+ case DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU:
+ pr_err("Received a ACK/NACK PDU\n");
+ rc = -EIO;
+ goto error;
+
+ case DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU:
+ if (!DIGITAL_NFC_DEP_PFB_IS_TIMEOUT(pfb)) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ rc = digital_in_send_rtox(ddev, data_exch, resp->data[3]);
+ if (rc)
+ goto error;
+
+ kfree_skb(resp);
+ return;
+ }
+
+ if (DIGITAL_NFC_DEP_MI_BIT_SET(pfb)) {
+ pr_err("MI bit set. Chained PDU not supported\n");
+ rc = -EIO;
+ goto error;
+ }
+
+ size = sizeof(struct digital_dep_req_res);
+
+ if (DIGITAL_NFC_DEP_DID_BIT_SET(pfb))
+ size++;
+
+ if (size > resp->len) {
+ rc = -EIO;
+ goto error;
+ }
+
+ skb_pull(resp, size);
+
+exit:
+ data_exch->cb(data_exch->cb_context, resp, rc);
+
+error:
+ kfree(data_exch);
+
+ if (rc)
+ kfree_skb(resp);
+}
+
+int digital_in_send_dep_req(struct nfc_digital_dev *ddev,
+ struct nfc_target *target, struct sk_buff *skb,
+ struct digital_data_exch *data_exch)
+{
+ struct digital_dep_req_res *dep_req;
+
+ skb_push(skb, sizeof(struct digital_dep_req_res));
+
+ dep_req = (struct digital_dep_req_res *)skb->data;
+ dep_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT;
+ dep_req->cmd = DIGITAL_CMD_DEP_REQ;
+ dep_req->pfb = ddev->curr_nfc_dep_pni;
+
+ digital_skb_push_dep_sod(ddev, skb);
+
+ ddev->skb_add_crc(skb);
+
+ return digital_in_send_cmd(ddev, skb, 1500, digital_in_recv_dep_res,
+ data_exch);
+}
+
+static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp)
+{
+ int rc;
+ struct digital_dep_req_res *dep_req;
+ size_t size;
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+ resp = NULL;
+ goto exit;
+ }
+
+ rc = ddev->skb_check_crc(resp);
+ if (rc) {
+ PROTOCOL_ERR("14.4.1.6");
+ goto exit;
+ }
+
+ rc = digital_skb_pull_dep_sod(ddev, resp);
+ if (rc) {
+ PROTOCOL_ERR("14.4.1.2");
+ goto exit;
+ }
+
+ size = sizeof(struct digital_dep_req_res);
+ dep_req = (struct digital_dep_req_res *)resp->data;
+
+ if (resp->len < size || dep_req->dir != DIGITAL_NFC_DEP_FRAME_DIR_OUT ||
+ dep_req->cmd != DIGITAL_CMD_DEP_REQ) {
+ rc = -EIO;
+ goto exit;
+ }
+
+ if (DIGITAL_NFC_DEP_DID_BIT_SET(dep_req->pfb))
+ size++;
+
+ if (resp->len < size) {
+ rc = -EIO;
+ goto exit;
+ }
+
+ switch (DIGITAL_NFC_DEP_PFB_TYPE(dep_req->pfb)) {
+ case DIGITAL_NFC_DEP_PFB_I_PDU:
+ pr_debug("DIGITAL_NFC_DEP_PFB_I_PDU\n");
+ ddev->curr_nfc_dep_pni = DIGITAL_NFC_DEP_PFB_PNI(dep_req->pfb);
+ break;
+ case DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU:
+ pr_err("Received a ACK/NACK PDU\n");
+ rc = -EINVAL;
+ goto exit;
+ break;
+ case DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU:
+ pr_err("Received a SUPERVISOR PDU\n");
+ rc = -EINVAL;
+ goto exit;
+ break;
+ }
+
+ skb_pull(resp, size);
+
+ rc = nfc_tm_data_received(ddev->nfc_dev, resp);
+
+exit:
+ if (rc)
+ kfree_skb(resp);
+}
+
+int digital_tg_send_dep_res(struct nfc_digital_dev *ddev, struct sk_buff *skb)
+{
+ struct digital_dep_req_res *dep_res;
+
+ skb_push(skb, sizeof(struct digital_dep_req_res));
+ dep_res = (struct digital_dep_req_res *)skb->data;
+
+ dep_res->dir = DIGITAL_NFC_DEP_FRAME_DIR_IN;
+ dep_res->cmd = DIGITAL_CMD_DEP_RES;
+ dep_res->pfb = ddev->curr_nfc_dep_pni;
+
+ digital_skb_push_dep_sod(ddev, skb);
+
+ ddev->skb_add_crc(skb);
+
+ return digital_tg_send_cmd(ddev, skb, 1500, digital_tg_recv_dep_req,
+ NULL);
+}
+
+static void digital_tg_send_psl_res_complete(struct nfc_digital_dev *ddev,
+ void *arg, struct sk_buff *resp)
+{
+ u8 rf_tech = PTR_ERR(arg);
+
+ if (IS_ERR(resp))
+ return;
+
+ digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, rf_tech);
+
+ digital_tg_listen(ddev, 1500, digital_tg_recv_dep_req, NULL);
+
+ dev_kfree_skb(resp);
+}
+
+static int digital_tg_send_psl_res(struct nfc_digital_dev *ddev, u8 did,
+ u8 rf_tech)
+{
+ struct digital_psl_res *psl_res;
+ struct sk_buff *skb;
+ int rc;
+
+ skb = digital_skb_alloc(ddev, sizeof(struct digital_psl_res));
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, sizeof(struct digital_psl_res));
+
+ psl_res = (struct digital_psl_res *)skb->data;
+
+ psl_res->dir = DIGITAL_NFC_DEP_FRAME_DIR_IN;
+ psl_res->cmd = DIGITAL_CMD_PSL_RES;
+ psl_res->did = did;
+
+ digital_skb_push_dep_sod(ddev, skb);
+
+ ddev->skb_add_crc(skb);
+
+ rc = digital_tg_send_cmd(ddev, skb, 0, digital_tg_send_psl_res_complete,
+ ERR_PTR(rf_tech));
+
+ if (rc)
+ kfree_skb(skb);
+
+ return rc;
+}
+
+static void digital_tg_recv_psl_req(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp)
+{
+ int rc;
+ struct digital_psl_req *psl_req;
+ u8 rf_tech;
+ u8 dsi;
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+ resp = NULL;
+ goto exit;
+ }
+
+ rc = ddev->skb_check_crc(resp);
+ if (rc) {
+ PROTOCOL_ERR("14.4.1.6");
+ goto exit;
+ }
+
+ rc = digital_skb_pull_dep_sod(ddev, resp);
+ if (rc) {
+ PROTOCOL_ERR("14.4.1.2");
+ goto exit;
+ }
+
+ psl_req = (struct digital_psl_req *)resp->data;
+
+ if (resp->len != sizeof(struct digital_psl_req) ||
+ psl_req->dir != DIGITAL_NFC_DEP_FRAME_DIR_OUT ||
+ psl_req->cmd != DIGITAL_CMD_PSL_REQ) {
+ rc = -EIO;
+ goto exit;
+ }
+
+ dsi = (psl_req->brs >> 3) & 0x07;
+ switch (dsi) {
+ case 0:
+ rf_tech = NFC_DIGITAL_RF_TECH_106A;
+ break;
+ case 1:
+ rf_tech = NFC_DIGITAL_RF_TECH_212F;
+ break;
+ case 2:
+ rf_tech = NFC_DIGITAL_RF_TECH_424F;
+ break;
+ default:
+ pr_err("Unsuported dsi value %d\n", dsi);
+ goto exit;
+ }
+
+ rc = digital_tg_send_psl_res(ddev, psl_req->did, rf_tech);
+
+exit:
+ kfree_skb(resp);
+}
+
+static void digital_tg_send_atr_res_complete(struct nfc_digital_dev *ddev,
+ void *arg, struct sk_buff *resp)
+{
+ int offset;
+
+ if (IS_ERR(resp)) {
+ digital_poll_next_tech(ddev);
+ return;
+ }
+
+ offset = 2;
+ if (resp->data[0] == DIGITAL_NFC_DEP_NFCA_SOD_SB)
+ offset++;
+
+ if (resp->data[offset] == DIGITAL_CMD_PSL_REQ)
+ digital_tg_recv_psl_req(ddev, arg, resp);
+ else
+ digital_tg_recv_dep_req(ddev, arg, resp);
+}
+
+static int digital_tg_send_atr_res(struct nfc_digital_dev *ddev,
+ struct digital_atr_req *atr_req)
+{
+ struct digital_atr_res *atr_res;
+ struct sk_buff *skb;
+ u8 *gb;
+ size_t gb_len;
+ int rc;
+
+ gb = nfc_get_local_general_bytes(ddev->nfc_dev, &gb_len);
+ if (!gb)
+ gb_len = 0;
+
+ skb = digital_skb_alloc(ddev, sizeof(struct digital_atr_res) + gb_len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, sizeof(struct digital_atr_res));
+ atr_res = (struct digital_atr_res *)skb->data;
+
+ memset(atr_res, 0, sizeof(struct digital_atr_res));
+
+ atr_res->dir = DIGITAL_NFC_DEP_FRAME_DIR_IN;
+ atr_res->cmd = DIGITAL_CMD_ATR_RES;
+ memcpy(atr_res->nfcid3, atr_req->nfcid3, sizeof(atr_req->nfcid3));
+ atr_res->to = 8;
+ atr_res->pp = DIGITAL_LR_BITS_PAYLOAD_SIZE_254B;
+ if (gb_len) {
+ skb_put(skb, gb_len);
+
+ atr_res->pp |= DIGITAL_GB_BIT;
+ memcpy(atr_res->gb, gb, gb_len);
+ }
+
+ digital_skb_push_dep_sod(ddev, skb);
+
+ ddev->skb_add_crc(skb);
+
+ rc = digital_tg_send_cmd(ddev, skb, 999,
+ digital_tg_send_atr_res_complete, NULL);
+ if (rc) {
+ kfree_skb(skb);
+ return rc;
+ }
+
+ return rc;
+}
+
+void digital_tg_recv_atr_req(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp)
+{
+ int rc;
+ struct digital_atr_req *atr_req;
+ size_t gb_len, min_size;
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+ resp = NULL;
+ goto exit;
+ }
+
+ if (!resp->len) {
+ rc = -EIO;
+ goto exit;
+ }
+
+ if (resp->data[0] == DIGITAL_NFC_DEP_NFCA_SOD_SB) {
+ min_size = DIGITAL_ATR_REQ_MIN_SIZE + 2;
+
+ ddev->curr_rf_tech = NFC_DIGITAL_RF_TECH_106A;
+ ddev->skb_add_crc = digital_skb_add_crc_a;
+ ddev->skb_check_crc = digital_skb_check_crc_a;
+ } else {
+ min_size = DIGITAL_ATR_REQ_MIN_SIZE + 1;
+
+ ddev->curr_rf_tech = NFC_DIGITAL_RF_TECH_212F;
+ ddev->skb_add_crc = digital_skb_add_crc_f;
+ ddev->skb_check_crc = digital_skb_check_crc_f;
+ }
+
+ if (resp->len < min_size) {
+ rc = -EIO;
+ goto exit;
+ }
+
+ if (DIGITAL_DRV_CAPS_TG_CRC(ddev)) {
+ ddev->skb_add_crc = digital_skb_add_crc_none;
+ ddev->skb_check_crc = digital_skb_check_crc_none;
+ }
+
+ rc = ddev->skb_check_crc(resp);
+ if (rc) {
+ PROTOCOL_ERR("14.4.1.6");
+ goto exit;
+ }
+
+ rc = digital_skb_pull_dep_sod(ddev, resp);
+ if (rc) {
+ PROTOCOL_ERR("14.4.1.2");
+ goto exit;
+ }
+
+ atr_req = (struct digital_atr_req *)resp->data;
+
+ if (atr_req->dir != DIGITAL_NFC_DEP_FRAME_DIR_OUT ||
+ atr_req->cmd != DIGITAL_CMD_ATR_REQ) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+ NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED);
+ if (rc)
+ goto exit;
+
+ rc = digital_tg_send_atr_res(ddev, atr_req);
+ if (rc)
+ goto exit;
+
+ gb_len = resp->len - sizeof(struct digital_atr_req);
+ rc = nfc_tm_activated(ddev->nfc_dev, NFC_PROTO_NFC_DEP_MASK,
+ NFC_COMM_PASSIVE, atr_req->gb, gb_len);
+ if (rc)
+ goto exit;
+
+ ddev->poll_tech_count = 0;
+
+ rc = 0;
+exit:
+ if (rc)
+ digital_poll_next_tech(ddev);
+
+ dev_kfree_skb(resp);
+}
diff --git a/net/nfc/digital_technology.c b/net/nfc/digital_technology.c
new file mode 100644
index 000000000000..251c8c753ebe
--- /dev/null
+++ b/net/nfc/digital_technology.c
@@ -0,0 +1,770 @@
+/*
+ * NFC Digital Protocol stack
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#define pr_fmt(fmt) "digital: %s: " fmt, __func__
+
+#include "digital.h"
+
+#define DIGITAL_CMD_SENS_REQ 0x26
+#define DIGITAL_CMD_ALL_REQ 0x52
+#define DIGITAL_CMD_SEL_REQ_CL1 0x93
+#define DIGITAL_CMD_SEL_REQ_CL2 0x95
+#define DIGITAL_CMD_SEL_REQ_CL3 0x97
+
+#define DIGITAL_SDD_REQ_SEL_PAR 0x20
+
+#define DIGITAL_SDD_RES_CT 0x88
+#define DIGITAL_SDD_RES_LEN 5
+
+#define DIGITAL_SEL_RES_NFCID1_COMPLETE(sel_res) (!((sel_res) & 0x04))
+#define DIGITAL_SEL_RES_IS_T2T(sel_res) (!((sel_res) & 0x60))
+#define DIGITAL_SEL_RES_IS_NFC_DEP(sel_res) ((sel_res) & 0x40)
+
+#define DIGITAL_SENS_RES_IS_T1T(sens_res) (((sens_res) & 0x0C00) == 0x0C00)
+#define DIGITAL_SENS_RES_IS_VALID(sens_res) \
+ ((!((sens_res) & 0x001F) && (((sens_res) & 0x0C00) == 0x0C00)) || \
+ (((sens_res) & 0x001F) && ((sens_res) & 0x0C00) != 0x0C00))
+
+#define DIGITAL_MIFARE_READ_RES_LEN 16
+#define DIGITAL_MIFARE_ACK_RES 0x0A
+
+#define DIGITAL_CMD_SENSF_REQ 0x00
+#define DIGITAL_CMD_SENSF_RES 0x01
+
+#define DIGITAL_SENSF_RES_MIN_LENGTH 17
+#define DIGITAL_SENSF_RES_RD_AP_B1 0x00
+#define DIGITAL_SENSF_RES_RD_AP_B2 0x8F
+
+#define DIGITAL_SENSF_REQ_RC_NONE 0
+#define DIGITAL_SENSF_REQ_RC_SC 1
+#define DIGITAL_SENSF_REQ_RC_AP 2
+
+struct digital_sdd_res {
+ u8 nfcid1[4];
+ u8 bcc;
+} __packed;
+
+struct digital_sel_req {
+ u8 sel_cmd;
+ u8 b2;
+ u8 nfcid1[4];
+ u8 bcc;
+} __packed;
+
+struct digital_sensf_req {
+ u8 cmd;
+ u8 sc1;
+ u8 sc2;
+ u8 rc;
+ u8 tsn;
+} __packed;
+
+struct digital_sensf_res {
+ u8 cmd;
+ u8 nfcid2[8];
+ u8 pad0[2];
+ u8 pad1[3];
+ u8 mrti_check;
+ u8 mrti_update;
+ u8 pad2;
+ u8 rd[2];
+} __packed;
+
+static int digital_in_send_sdd_req(struct nfc_digital_dev *ddev,
+ struct nfc_target *target);
+
+static void digital_in_recv_sel_res(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp)
+{
+ struct nfc_target *target = arg;
+ int rc;
+ u8 sel_res;
+ u8 nfc_proto;
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+ resp = NULL;
+ goto exit;
+ }
+
+ if (!DIGITAL_DRV_CAPS_IN_CRC(ddev)) {
+ rc = digital_skb_check_crc_a(resp);
+ if (rc) {
+ PROTOCOL_ERR("4.4.1.3");
+ goto exit;
+ }
+ }
+
+ if (!resp->len) {
+ rc = -EIO;
+ goto exit;
+ }
+
+ sel_res = resp->data[0];
+
+ if (!DIGITAL_SEL_RES_NFCID1_COMPLETE(sel_res)) {
+ rc = digital_in_send_sdd_req(ddev, target);
+ if (rc)
+ goto exit;
+
+ goto exit_free_skb;
+ }
+
+ if (DIGITAL_SEL_RES_IS_T2T(sel_res)) {
+ nfc_proto = NFC_PROTO_MIFARE;
+ } else if (DIGITAL_SEL_RES_IS_NFC_DEP(sel_res)) {
+ nfc_proto = NFC_PROTO_NFC_DEP;
+ } else {
+ rc = -EOPNOTSUPP;
+ goto exit;
+ }
+
+ target->sel_res = sel_res;
+
+ rc = digital_target_found(ddev, target, nfc_proto);
+
+exit:
+ kfree(target);
+
+exit_free_skb:
+ dev_kfree_skb(resp);
+
+ if (rc)
+ digital_poll_next_tech(ddev);
+}
+
+static int digital_in_send_sel_req(struct nfc_digital_dev *ddev,
+ struct nfc_target *target,
+ struct digital_sdd_res *sdd_res)
+{
+ struct sk_buff *skb;
+ struct digital_sel_req *sel_req;
+ u8 sel_cmd;
+ int rc;
+
+ skb = digital_skb_alloc(ddev, sizeof(struct digital_sel_req));
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, sizeof(struct digital_sel_req));
+ sel_req = (struct digital_sel_req *)skb->data;
+
+ if (target->nfcid1_len <= 4)
+ sel_cmd = DIGITAL_CMD_SEL_REQ_CL1;
+ else if (target->nfcid1_len < 10)
+ sel_cmd = DIGITAL_CMD_SEL_REQ_CL2;
+ else
+ sel_cmd = DIGITAL_CMD_SEL_REQ_CL3;
+
+ sel_req->sel_cmd = sel_cmd;
+ sel_req->b2 = 0x70;
+ memcpy(sel_req->nfcid1, sdd_res->nfcid1, 4);
+ sel_req->bcc = sdd_res->bcc;
+
+ if (DIGITAL_DRV_CAPS_IN_CRC(ddev)) {
+ rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+ NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A);
+ if (rc)
+ goto exit;
+ } else {
+ digital_skb_add_crc_a(skb);
+ }
+
+ rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sel_res,
+ target);
+exit:
+ if (rc)
+ kfree_skb(skb);
+
+ return rc;
+}
+
+static void digital_in_recv_sdd_res(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp)
+{
+ struct nfc_target *target = arg;
+ struct digital_sdd_res *sdd_res;
+ int rc;
+ u8 offset, size;
+ u8 i, bcc;
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+ resp = NULL;
+ goto exit;
+ }
+
+ if (resp->len < DIGITAL_SDD_RES_LEN) {
+ PROTOCOL_ERR("4.7.2.8");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ sdd_res = (struct digital_sdd_res *)resp->data;
+
+ for (i = 0, bcc = 0; i < 4; i++)
+ bcc ^= sdd_res->nfcid1[i];
+
+ if (bcc != sdd_res->bcc) {
+ PROTOCOL_ERR("4.7.2.6");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (sdd_res->nfcid1[0] == DIGITAL_SDD_RES_CT) {
+ offset = 1;
+ size = 3;
+ } else {
+ offset = 0;
+ size = 4;
+ }
+
+ memcpy(target->nfcid1 + target->nfcid1_len, sdd_res->nfcid1 + offset,
+ size);
+ target->nfcid1_len += size;
+
+ rc = digital_in_send_sel_req(ddev, target, sdd_res);
+
+exit:
+ dev_kfree_skb(resp);
+
+ if (rc) {
+ kfree(target);
+ digital_poll_next_tech(ddev);
+ }
+}
+
+static int digital_in_send_sdd_req(struct nfc_digital_dev *ddev,
+ struct nfc_target *target)
+{
+ int rc;
+ struct sk_buff *skb;
+ u8 sel_cmd;
+
+ rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+ NFC_DIGITAL_FRAMING_NFCA_STANDARD);
+ if (rc)
+ return rc;
+
+ skb = digital_skb_alloc(ddev, 2);
+ if (!skb)
+ return -ENOMEM;
+
+ if (target->nfcid1_len == 0)
+ sel_cmd = DIGITAL_CMD_SEL_REQ_CL1;
+ else if (target->nfcid1_len == 3)
+ sel_cmd = DIGITAL_CMD_SEL_REQ_CL2;
+ else
+ sel_cmd = DIGITAL_CMD_SEL_REQ_CL3;
+
+ *skb_put(skb, sizeof(u8)) = sel_cmd;
+ *skb_put(skb, sizeof(u8)) = DIGITAL_SDD_REQ_SEL_PAR;
+
+ return digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sdd_res,
+ target);
+}
+
+static void digital_in_recv_sens_res(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp)
+{
+ struct nfc_target *target = NULL;
+ int rc;
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+ resp = NULL;
+ goto exit;
+ }
+
+ if (resp->len < sizeof(u16)) {
+ rc = -EIO;
+ goto exit;
+ }
+
+ target = kzalloc(sizeof(struct nfc_target), GFP_KERNEL);
+ if (!target) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ target->sens_res = __le16_to_cpu(*(__le16 *)resp->data);
+
+ if (!DIGITAL_SENS_RES_IS_VALID(target->sens_res)) {
+ PROTOCOL_ERR("4.6.3.3");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (DIGITAL_SENS_RES_IS_T1T(target->sens_res))
+ rc = digital_target_found(ddev, target, NFC_PROTO_JEWEL);
+ else
+ rc = digital_in_send_sdd_req(ddev, target);
+
+exit:
+ dev_kfree_skb(resp);
+
+ if (rc) {
+ kfree(target);
+ digital_poll_next_tech(ddev);
+ }
+}
+
+int digital_in_send_sens_req(struct nfc_digital_dev *ddev, u8 rf_tech)
+{
+ struct sk_buff *skb;
+ int rc;
+
+ rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH,
+ NFC_DIGITAL_RF_TECH_106A);
+ if (rc)
+ return rc;
+
+ rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+ NFC_DIGITAL_FRAMING_NFCA_SHORT);
+ if (rc)
+ return rc;
+
+ skb = digital_skb_alloc(ddev, 1);
+ if (!skb)
+ return -ENOMEM;
+
+ *skb_put(skb, sizeof(u8)) = DIGITAL_CMD_SENS_REQ;
+
+ rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sens_res, NULL);
+ if (rc)
+ kfree_skb(skb);
+
+ return rc;
+}
+
+int digital_in_recv_mifare_res(struct sk_buff *resp)
+{
+ /* Successful READ command response is 16 data bytes + 2 CRC bytes long.
+ * Since the driver can't differentiate a ACK/NACK response from a valid
+ * READ response, the CRC calculation must be handled at digital level
+ * even if the driver supports it for this technology.
+ */
+ if (resp->len == DIGITAL_MIFARE_READ_RES_LEN + DIGITAL_CRC_LEN) {
+ if (digital_skb_check_crc_a(resp)) {
+ PROTOCOL_ERR("9.4.1.2");
+ return -EIO;
+ }
+
+ return 0;
+ }
+
+ /* ACK response (i.e. successful WRITE). */
+ if (resp->len == 1 && resp->data[0] == DIGITAL_MIFARE_ACK_RES) {
+ resp->data[0] = 0;
+ return 0;
+ }
+
+ /* NACK and any other responses are treated as error. */
+ return -EIO;
+}
+
+static void digital_in_recv_sensf_res(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp)
+{
+ int rc;
+ u8 proto;
+ struct nfc_target target;
+ struct digital_sensf_res *sensf_res;
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+ resp = NULL;
+ goto exit;
+ }
+
+ if (resp->len < DIGITAL_SENSF_RES_MIN_LENGTH) {
+ rc = -EIO;
+ goto exit;
+ }
+
+ if (!DIGITAL_DRV_CAPS_IN_CRC(ddev)) {
+ rc = digital_skb_check_crc_f(resp);
+ if (rc) {
+ PROTOCOL_ERR("6.4.1.8");
+ goto exit;
+ }
+ }
+
+ skb_pull(resp, 1);
+
+ memset(&target, 0, sizeof(struct nfc_target));
+
+ sensf_res = (struct digital_sensf_res *)resp->data;
+
+ memcpy(target.sensf_res, sensf_res, resp->len);
+ target.sensf_res_len = resp->len;
+
+ memcpy(target.nfcid2, sensf_res->nfcid2, NFC_NFCID2_MAXSIZE);
+ target.nfcid2_len = NFC_NFCID2_MAXSIZE;
+
+ if (target.nfcid2[0] == DIGITAL_SENSF_NFCID2_NFC_DEP_B1 &&
+ target.nfcid2[1] == DIGITAL_SENSF_NFCID2_NFC_DEP_B2)
+ proto = NFC_PROTO_NFC_DEP;
+ else
+ proto = NFC_PROTO_FELICA;
+
+ rc = digital_target_found(ddev, &target, proto);
+
+exit:
+ dev_kfree_skb(resp);
+
+ if (rc)
+ digital_poll_next_tech(ddev);
+}
+
+int digital_in_send_sensf_req(struct nfc_digital_dev *ddev, u8 rf_tech)
+{
+ struct digital_sensf_req *sensf_req;
+ struct sk_buff *skb;
+ int rc;
+ u8 size;
+
+ rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, rf_tech);
+ if (rc)
+ return rc;
+
+ rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+ NFC_DIGITAL_FRAMING_NFCF);
+ if (rc)
+ return rc;
+
+ size = sizeof(struct digital_sensf_req);
+
+ skb = digital_skb_alloc(ddev, size);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, size);
+
+ sensf_req = (struct digital_sensf_req *)skb->data;
+ sensf_req->cmd = DIGITAL_CMD_SENSF_REQ;
+ sensf_req->sc1 = 0xFF;
+ sensf_req->sc2 = 0xFF;
+ sensf_req->rc = 0;
+ sensf_req->tsn = 0;
+
+ *skb_push(skb, 1) = size + 1;
+
+ if (!DIGITAL_DRV_CAPS_IN_CRC(ddev))
+ digital_skb_add_crc_f(skb);
+
+ rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sensf_res,
+ NULL);
+ if (rc)
+ kfree_skb(skb);
+
+ return rc;
+}
+
+static int digital_tg_send_sel_res(struct nfc_digital_dev *ddev)
+{
+ struct sk_buff *skb;
+ int rc;
+
+ skb = digital_skb_alloc(ddev, 1);
+ if (!skb)
+ return -ENOMEM;
+
+ *skb_put(skb, 1) = DIGITAL_SEL_RES_NFC_DEP;
+
+ if (!DIGITAL_DRV_CAPS_TG_CRC(ddev))
+ digital_skb_add_crc_a(skb);
+
+ rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_atr_req,
+ NULL);
+ if (rc)
+ kfree_skb(skb);
+
+ return rc;
+}
+
+static void digital_tg_recv_sel_req(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp)
+{
+ int rc;
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+ resp = NULL;
+ goto exit;
+ }
+
+ if (!DIGITAL_DRV_CAPS_TG_CRC(ddev)) {
+ rc = digital_skb_check_crc_a(resp);
+ if (rc) {
+ PROTOCOL_ERR("4.4.1.3");
+ goto exit;
+ }
+ }
+
+ /* Silently ignore SEL_REQ content and send a SEL_RES for NFC-DEP */
+
+ rc = digital_tg_send_sel_res(ddev);
+
+exit:
+ if (rc)
+ digital_poll_next_tech(ddev);
+
+ dev_kfree_skb(resp);
+}
+
+static int digital_tg_send_sdd_res(struct nfc_digital_dev *ddev)
+{
+ struct sk_buff *skb;
+ struct digital_sdd_res *sdd_res;
+ int rc, i;
+
+ skb = digital_skb_alloc(ddev, sizeof(struct digital_sdd_res));
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, sizeof(struct digital_sdd_res));
+ sdd_res = (struct digital_sdd_res *)skb->data;
+
+ sdd_res->nfcid1[0] = 0x08;
+ get_random_bytes(sdd_res->nfcid1 + 1, 3);
+
+ sdd_res->bcc = 0;
+ for (i = 0; i < 4; i++)
+ sdd_res->bcc ^= sdd_res->nfcid1[i];
+
+ rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_sel_req,
+ NULL);
+ if (rc)
+ kfree_skb(skb);
+
+ return rc;
+}
+
+static void digital_tg_recv_sdd_req(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp)
+{
+ u8 *sdd_req;
+ int rc;
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+ resp = NULL;
+ goto exit;
+ }
+
+ sdd_req = resp->data;
+
+ if (resp->len < 2 || sdd_req[0] != DIGITAL_CMD_SEL_REQ_CL1 ||
+ sdd_req[1] != DIGITAL_SDD_REQ_SEL_PAR) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ rc = digital_tg_send_sdd_res(ddev);
+
+exit:
+ if (rc)
+ digital_poll_next_tech(ddev);
+
+ dev_kfree_skb(resp);
+}
+
+static int digital_tg_send_sens_res(struct nfc_digital_dev *ddev)
+{
+ struct sk_buff *skb;
+ u8 *sens_res;
+ int rc;
+
+ skb = digital_skb_alloc(ddev, 2);
+ if (!skb)
+ return -ENOMEM;
+
+ sens_res = skb_put(skb, 2);
+
+ sens_res[0] = (DIGITAL_SENS_RES_NFC_DEP >> 8) & 0xFF;
+ sens_res[1] = DIGITAL_SENS_RES_NFC_DEP & 0xFF;
+
+ rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_sdd_req,
+ NULL);
+ if (rc)
+ kfree_skb(skb);
+
+ return rc;
+}
+
+void digital_tg_recv_sens_req(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp)
+{
+ u8 sens_req;
+ int rc;
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+ resp = NULL;
+ goto exit;
+ }
+
+ sens_req = resp->data[0];
+
+ if (!resp->len || (sens_req != DIGITAL_CMD_SENS_REQ &&
+ sens_req != DIGITAL_CMD_ALL_REQ)) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ rc = digital_tg_send_sens_res(ddev);
+
+exit:
+ if (rc)
+ digital_poll_next_tech(ddev);
+
+ dev_kfree_skb(resp);
+}
+
+static int digital_tg_send_sensf_res(struct nfc_digital_dev *ddev,
+ struct digital_sensf_req *sensf_req)
+{
+ struct sk_buff *skb;
+ u8 size;
+ int rc;
+ struct digital_sensf_res *sensf_res;
+
+ size = sizeof(struct digital_sensf_res);
+
+ if (sensf_req->rc != DIGITAL_SENSF_REQ_RC_NONE)
+ size -= sizeof(sensf_res->rd);
+
+ skb = digital_skb_alloc(ddev, size);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, size);
+
+ sensf_res = (struct digital_sensf_res *)skb->data;
+
+ memset(sensf_res, 0, size);
+
+ sensf_res->cmd = DIGITAL_CMD_SENSF_RES;
+ sensf_res->nfcid2[0] = DIGITAL_SENSF_NFCID2_NFC_DEP_B1;
+ sensf_res->nfcid2[1] = DIGITAL_SENSF_NFCID2_NFC_DEP_B2;
+ get_random_bytes(&sensf_res->nfcid2[2], 6);
+
+ switch (sensf_req->rc) {
+ case DIGITAL_SENSF_REQ_RC_SC:
+ sensf_res->rd[0] = sensf_req->sc1;
+ sensf_res->rd[1] = sensf_req->sc2;
+ break;
+ case DIGITAL_SENSF_REQ_RC_AP:
+ sensf_res->rd[0] = DIGITAL_SENSF_RES_RD_AP_B1;
+ sensf_res->rd[1] = DIGITAL_SENSF_RES_RD_AP_B2;
+ break;
+ }
+
+ *skb_push(skb, sizeof(u8)) = size + 1;
+
+ if (!DIGITAL_DRV_CAPS_TG_CRC(ddev))
+ digital_skb_add_crc_f(skb);
+
+ rc = digital_tg_send_cmd(ddev, skb, 300,
+ digital_tg_recv_atr_req, NULL);
+ if (rc)
+ kfree_skb(skb);
+
+ return rc;
+}
+
+void digital_tg_recv_sensf_req(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp)
+{
+ struct digital_sensf_req *sensf_req;
+ int rc;
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+ resp = NULL;
+ goto exit;
+ }
+
+ if (!DIGITAL_DRV_CAPS_TG_CRC(ddev)) {
+ rc = digital_skb_check_crc_f(resp);
+ if (rc) {
+ PROTOCOL_ERR("6.4.1.8");
+ goto exit;
+ }
+ }
+
+ if (resp->len != sizeof(struct digital_sensf_req) + 1) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ skb_pull(resp, 1);
+ sensf_req = (struct digital_sensf_req *)resp->data;
+
+ if (sensf_req->cmd != DIGITAL_CMD_SENSF_REQ) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ rc = digital_tg_send_sensf_res(ddev, sensf_req);
+
+exit:
+ if (rc)
+ digital_poll_next_tech(ddev);
+
+ dev_kfree_skb(resp);
+}
+
+int digital_tg_listen_nfca(struct nfc_digital_dev *ddev, u8 rf_tech)
+{
+ int rc;
+
+ rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, rf_tech);
+ if (rc)
+ return rc;
+
+ rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+ NFC_DIGITAL_FRAMING_NFCA_NFC_DEP);
+ if (rc)
+ return rc;
+
+ return digital_tg_listen(ddev, 300, digital_tg_recv_sens_req, NULL);
+}
+
+int digital_tg_listen_nfcf(struct nfc_digital_dev *ddev, u8 rf_tech)
+{
+ int rc;
+ u8 *nfcid2;
+
+ rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, rf_tech);
+ if (rc)
+ return rc;
+
+ rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+ NFC_DIGITAL_FRAMING_NFCF_NFC_DEP);
+ if (rc)
+ return rc;
+
+ nfcid2 = kzalloc(NFC_NFCID2_MAXSIZE, GFP_KERNEL);
+ if (!nfcid2)
+ return -ENOMEM;
+
+ nfcid2[0] = DIGITAL_SENSF_NFCID2_NFC_DEP_B1;
+ nfcid2[1] = DIGITAL_SENSF_NFCID2_NFC_DEP_B2;
+ get_random_bytes(nfcid2 + 2, NFC_NFCID2_MAXSIZE - 2);
+
+ return digital_tg_listen(ddev, 300, digital_tg_recv_sensf_req, nfcid2);
+}
diff --git a/net/nfc/nci/spi.c b/net/nfc/nci/spi.c
index c7cf37ba7298..f1d426f10cce 100644
--- a/net/nfc/nci/spi.c
+++ b/net/nfc/nci/spi.c
@@ -21,11 +21,8 @@
#include <linux/export.h>
#include <linux/spi/spi.h>
#include <linux/crc-ccitt.h>
-#include <linux/nfc.h>
#include <net/nfc/nci_core.h>
-#define NCI_SPI_HDR_LEN 4
-#define NCI_SPI_CRC_LEN 2
#define NCI_SPI_ACK_SHIFT 6
#define NCI_SPI_MSB_PAYLOAD_MASK 0x3F
@@ -41,54 +38,48 @@
#define CRC_INIT 0xFFFF
-static int nci_spi_open(struct nci_dev *nci_dev)
-{
- struct nci_spi_dev *ndev = nci_get_drvdata(nci_dev);
-
- return ndev->ops->open(ndev);
-}
-
-static int nci_spi_close(struct nci_dev *nci_dev)
-{
- struct nci_spi_dev *ndev = nci_get_drvdata(nci_dev);
-
- return ndev->ops->close(ndev);
-}
-
-static int __nci_spi_send(struct nci_spi_dev *ndev, struct sk_buff *skb)
+static int __nci_spi_send(struct nci_spi *nspi, struct sk_buff *skb,
+ int cs_change)
{
struct spi_message m;
struct spi_transfer t;
- t.tx_buf = skb->data;
- t.len = skb->len;
- t.cs_change = 0;
- t.delay_usecs = ndev->xfer_udelay;
+ memset(&t, 0, sizeof(struct spi_transfer));
+ /* a NULL skb means we just want the SPI chip select line to raise */
+ if (skb) {
+ t.tx_buf = skb->data;
+ t.len = skb->len;
+ } else {
+ /* still set tx_buf non NULL to make the driver happy */
+ t.tx_buf = &t;
+ t.len = 0;
+ }
+ t.cs_change = cs_change;
+ t.delay_usecs = nspi->xfer_udelay;
spi_message_init(&m);
spi_message_add_tail(&t, &m);
- return spi_sync(ndev->spi, &m);
+ return spi_sync(nspi->spi, &m);
}
-static int nci_spi_send(struct nci_dev *nci_dev, struct sk_buff *skb)
+int nci_spi_send(struct nci_spi *nspi,
+ struct completion *write_handshake_completion,
+ struct sk_buff *skb)
{
- struct nci_spi_dev *ndev = nci_get_drvdata(nci_dev);
unsigned int payload_len = skb->len;
unsigned char *hdr;
int ret;
long completion_rc;
- ndev->ops->deassert_int(ndev);
-
/* add the NCI SPI header to the start of the buffer */
hdr = skb_push(skb, NCI_SPI_HDR_LEN);
hdr[0] = NCI_SPI_DIRECT_WRITE;
- hdr[1] = ndev->acknowledge_mode;
+ hdr[1] = nspi->acknowledge_mode;
hdr[2] = payload_len >> 8;
hdr[3] = payload_len & 0xFF;
- if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
+ if (nspi->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
u16 crc;
crc = crc_ccitt(CRC_INIT, skb->data, skb->len);
@@ -96,123 +87,77 @@ static int nci_spi_send(struct nci_dev *nci_dev, struct sk_buff *skb)
*skb_put(skb, 1) = crc & 0xFF;
}
- ret = __nci_spi_send(ndev, skb);
+ if (write_handshake_completion) {
+ /* Trick SPI driver to raise chip select */
+ ret = __nci_spi_send(nspi, NULL, 1);
+ if (ret)
+ goto done;
- kfree_skb(skb);
- ndev->ops->assert_int(ndev);
+ /* wait for NFC chip hardware handshake to complete */
+ if (wait_for_completion_timeout(write_handshake_completion,
+ msecs_to_jiffies(1000)) == 0) {
+ ret = -ETIME;
+ goto done;
+ }
+ }
- if (ret != 0 || ndev->acknowledge_mode == NCI_SPI_CRC_DISABLED)
+ ret = __nci_spi_send(nspi, skb, 0);
+ if (ret != 0 || nspi->acknowledge_mode == NCI_SPI_CRC_DISABLED)
goto done;
- init_completion(&ndev->req_completion);
- completion_rc =
- wait_for_completion_interruptible_timeout(&ndev->req_completion,
- NCI_SPI_SEND_TIMEOUT);
+ init_completion(&nspi->req_completion);
+ completion_rc = wait_for_completion_interruptible_timeout(
+ &nspi->req_completion,
+ NCI_SPI_SEND_TIMEOUT);
- if (completion_rc <= 0 || ndev->req_result == ACKNOWLEDGE_NACK)
+ if (completion_rc <= 0 || nspi->req_result == ACKNOWLEDGE_NACK)
ret = -EIO;
done:
+ kfree_skb(skb);
+
return ret;
}
-
-static struct nci_ops nci_spi_ops = {
- .open = nci_spi_open,
- .close = nci_spi_close,
- .send = nci_spi_send,
-};
+EXPORT_SYMBOL_GPL(nci_spi_send);
/* ---- Interface to NCI SPI drivers ---- */
/**
- * nci_spi_allocate_device - allocate a new nci spi device
+ * nci_spi_allocate_spi - allocate a new nci spi
*
* @spi: SPI device
- * @ops: device operations
- * @supported_protocols: NFC protocols supported by the device
- * @supported_se: NFC Secure Elements supported by the device
- * @acknowledge_mode: Acknowledge mode used by the device
+ * @acknowledge_mode: Acknowledge mode used by the NFC device
* @delay: delay between transactions in us
+ * @ndev: nci dev to send incoming nci frames to
*/
-struct nci_spi_dev *nci_spi_allocate_device(struct spi_device *spi,
- struct nci_spi_ops *ops,
- u32 supported_protocols,
- u32 supported_se,
- u8 acknowledge_mode,
- unsigned int delay)
+struct nci_spi *nci_spi_allocate_spi(struct spi_device *spi,
+ u8 acknowledge_mode, unsigned int delay,
+ struct nci_dev *ndev)
{
- struct nci_spi_dev *ndev;
- int tailroom = 0;
+ struct nci_spi *nspi;
- if (!ops->open || !ops->close || !ops->assert_int || !ops->deassert_int)
+ nspi = devm_kzalloc(&spi->dev, sizeof(struct nci_spi), GFP_KERNEL);
+ if (!nspi)
return NULL;
- if (!supported_protocols)
- return NULL;
-
- ndev = devm_kzalloc(&spi->dev, sizeof(struct nci_dev), GFP_KERNEL);
- if (!ndev)
- return NULL;
+ nspi->acknowledge_mode = acknowledge_mode;
+ nspi->xfer_udelay = delay;
- ndev->ops = ops;
- ndev->acknowledge_mode = acknowledge_mode;
- ndev->xfer_udelay = delay;
+ nspi->spi = spi;
+ nspi->ndev = ndev;
- if (acknowledge_mode == NCI_SPI_CRC_ENABLED)
- tailroom += NCI_SPI_CRC_LEN;
-
- ndev->nci_dev = nci_allocate_device(&nci_spi_ops, supported_protocols,
- NCI_SPI_HDR_LEN, tailroom);
- if (!ndev->nci_dev)
- return NULL;
-
- nci_set_drvdata(ndev->nci_dev, ndev);
-
- return ndev;
+ return nspi;
}
-EXPORT_SYMBOL_GPL(nci_spi_allocate_device);
+EXPORT_SYMBOL_GPL(nci_spi_allocate_spi);
-/**
- * nci_spi_free_device - deallocate nci spi device
- *
- * @ndev: The nci spi device to deallocate
- */
-void nci_spi_free_device(struct nci_spi_dev *ndev)
-{
- nci_free_device(ndev->nci_dev);
-}
-EXPORT_SYMBOL_GPL(nci_spi_free_device);
-
-/**
- * nci_spi_register_device - register a nci spi device in the nfc subsystem
- *
- * @pdev: The nci spi device to register
- */
-int nci_spi_register_device(struct nci_spi_dev *ndev)
-{
- return nci_register_device(ndev->nci_dev);
-}
-EXPORT_SYMBOL_GPL(nci_spi_register_device);
-
-/**
- * nci_spi_unregister_device - unregister a nci spi device in the nfc subsystem
- *
- * @dev: The nci spi device to unregister
- */
-void nci_spi_unregister_device(struct nci_spi_dev *ndev)
-{
- nci_unregister_device(ndev->nci_dev);
-}
-EXPORT_SYMBOL_GPL(nci_spi_unregister_device);
-
-static int send_acknowledge(struct nci_spi_dev *ndev, u8 acknowledge)
+static int send_acknowledge(struct nci_spi *nspi, u8 acknowledge)
{
struct sk_buff *skb;
unsigned char *hdr;
u16 crc;
int ret;
- skb = nci_skb_alloc(ndev->nci_dev, 0, GFP_KERNEL);
+ skb = nci_skb_alloc(nspi->ndev, 0, GFP_KERNEL);
/* add the NCI SPI header to the start of the buffer */
hdr = skb_push(skb, NCI_SPI_HDR_LEN);
@@ -225,14 +170,14 @@ static int send_acknowledge(struct nci_spi_dev *ndev, u8 acknowledge)
*skb_put(skb, 1) = crc >> 8;
*skb_put(skb, 1) = crc & 0xFF;
- ret = __nci_spi_send(ndev, skb);
+ ret = __nci_spi_send(nspi, skb, 0);
kfree_skb(skb);
return ret;
}
-static struct sk_buff *__nci_spi_recv_frame(struct nci_spi_dev *ndev)
+static struct sk_buff *__nci_spi_read(struct nci_spi *nspi)
{
struct sk_buff *skb;
struct spi_message m;
@@ -242,43 +187,49 @@ static struct sk_buff *__nci_spi_recv_frame(struct nci_spi_dev *ndev)
int ret;
spi_message_init(&m);
+
+ memset(&tx, 0, sizeof(struct spi_transfer));
req[0] = NCI_SPI_DIRECT_READ;
- req[1] = ndev->acknowledge_mode;
+ req[1] = nspi->acknowledge_mode;
tx.tx_buf = req;
tx.len = 2;
tx.cs_change = 0;
spi_message_add_tail(&tx, &m);
+
+ memset(&rx, 0, sizeof(struct spi_transfer));
rx.rx_buf = resp_hdr;
rx.len = 2;
rx.cs_change = 1;
spi_message_add_tail(&rx, &m);
- ret = spi_sync(ndev->spi, &m);
+ ret = spi_sync(nspi->spi, &m);
if (ret)
return NULL;
- if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED)
+ if (nspi->acknowledge_mode == NCI_SPI_CRC_ENABLED)
rx_len = ((resp_hdr[0] & NCI_SPI_MSB_PAYLOAD_MASK) << 8) +
resp_hdr[1] + NCI_SPI_CRC_LEN;
else
rx_len = (resp_hdr[0] << 8) | resp_hdr[1];
- skb = nci_skb_alloc(ndev->nci_dev, rx_len, GFP_KERNEL);
+ skb = nci_skb_alloc(nspi->ndev, rx_len, GFP_KERNEL);
if (!skb)
return NULL;
spi_message_init(&m);
+
+ memset(&rx, 0, sizeof(struct spi_transfer));
rx.rx_buf = skb_put(skb, rx_len);
rx.len = rx_len;
rx.cs_change = 0;
- rx.delay_usecs = ndev->xfer_udelay;
+ rx.delay_usecs = nspi->xfer_udelay;
spi_message_add_tail(&rx, &m);
- ret = spi_sync(ndev->spi, &m);
+ ret = spi_sync(nspi->spi, &m);
if (ret)
goto receive_error;
- if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
+ if (nspi->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
*skb_push(skb, 1) = resp_hdr[1];
*skb_push(skb, 1) = resp_hdr[0];
}
@@ -318,61 +269,53 @@ static u8 nci_spi_get_ack(struct sk_buff *skb)
}
/**
- * nci_spi_recv_frame - receive frame from NCI SPI drivers
+ * nci_spi_read - read frame from NCI SPI drivers
*
- * @ndev: The nci spi device
+ * @nspi: The nci spi
* Context: can sleep
*
* This call may only be used from a context that may sleep. The sleep
* is non-interruptible, and has no timeout.
*
- * It returns zero on success, else a negative error code.
+ * It returns an allocated skb containing the frame on success, or NULL.
*/
-int nci_spi_recv_frame(struct nci_spi_dev *ndev)
+struct sk_buff *nci_spi_read(struct nci_spi *nspi)
{
struct sk_buff *skb;
- int ret = 0;
-
- ndev->ops->deassert_int(ndev);
/* Retrieve frame from SPI */
- skb = __nci_spi_recv_frame(ndev);
- if (!skb) {
- ret = -EIO;
+ skb = __nci_spi_read(nspi);
+ if (!skb)
goto done;
- }
- if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
+ if (nspi->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
if (!nci_spi_check_crc(skb)) {
- send_acknowledge(ndev, ACKNOWLEDGE_NACK);
+ send_acknowledge(nspi, ACKNOWLEDGE_NACK);
goto done;
}
/* In case of acknowledged mode: if ACK or NACK received,
* unblock completion of latest frame sent.
*/
- ndev->req_result = nci_spi_get_ack(skb);
- if (ndev->req_result)
- complete(&ndev->req_completion);
+ nspi->req_result = nci_spi_get_ack(skb);
+ if (nspi->req_result)
+ complete(&nspi->req_completion);
}
/* If there is no payload (ACK/NACK only frame),
* free the socket buffer
*/
- if (skb->len == 0) {
+ if (!skb->len) {
kfree_skb(skb);
+ skb = NULL;
goto done;
}
- if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED)
- send_acknowledge(ndev, ACKNOWLEDGE_ACK);
-
- /* Forward skb to NCI core layer */
- ret = nci_recv_frame(ndev->nci_dev, skb);
+ if (nspi->acknowledge_mode == NCI_SPI_CRC_ENABLED)
+ send_acknowledge(nspi, ACKNOWLEDGE_ACK);
done:
- ndev->ops->assert_int(ndev);
- return ret;
+ return skb;
}
-EXPORT_SYMBOL_GPL(nci_spi_recv_frame);
+EXPORT_SYMBOL_GPL(nci_spi_read);
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 68063b2025da..84b7e3ea7b7a 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -58,6 +58,7 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
[NFC_ATTR_LLC_SDP] = { .type = NLA_NESTED },
[NFC_ATTR_FIRMWARE_NAME] = { .type = NLA_STRING,
.len = NFC_FIRMWARE_NAME_MAXSIZE },
+ [NFC_ATTR_SE_APDU] = { .type = NLA_BINARY },
};
static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = {
@@ -1278,6 +1279,91 @@ static int nfc_genl_dump_ses_done(struct netlink_callback *cb)
return 0;
}
+struct se_io_ctx {
+ u32 dev_idx;
+ u32 se_idx;
+};
+
+static void se_io_cb(void *context, u8 *apdu, size_t apdu_len, int err)
+{
+ struct se_io_ctx *ctx = context;
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ kfree(ctx);
+ return;
+ }
+
+ hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
+ NFC_CMD_SE_IO);
+ if (!hdr)
+ goto free_msg;
+
+ if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, ctx->dev_idx) ||
+ nla_put_u32(msg, NFC_ATTR_SE_INDEX, ctx->se_idx) ||
+ nla_put(msg, NFC_ATTR_SE_APDU, apdu_len, apdu))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
+
+ kfree(ctx);
+
+ return;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+free_msg:
+ nlmsg_free(msg);
+ kfree(ctx);
+
+ return;
+}
+
+static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nfc_dev *dev;
+ struct se_io_ctx *ctx;
+ u32 dev_idx, se_idx;
+ u8 *apdu;
+ size_t apdu_len;
+
+ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+ !info->attrs[NFC_ATTR_SE_INDEX] ||
+ !info->attrs[NFC_ATTR_SE_APDU])
+ return -EINVAL;
+
+ dev_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+ se_idx = nla_get_u32(info->attrs[NFC_ATTR_SE_INDEX]);
+
+ dev = nfc_get_device(dev_idx);
+ if (!dev)
+ return -ENODEV;
+
+ if (!dev->ops || !dev->ops->se_io)
+ return -ENOTSUPP;
+
+ apdu_len = nla_len(info->attrs[NFC_ATTR_SE_APDU]);
+ if (apdu_len == 0)
+ return -EINVAL;
+
+ apdu = nla_data(info->attrs[NFC_ATTR_SE_APDU]);
+ if (!apdu)
+ return -EINVAL;
+
+ ctx = kzalloc(sizeof(struct se_io_ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev_idx = dev_idx;
+ ctx->se_idx = se_idx;
+
+ return dev->ops->se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx);
+}
+
static struct genl_ops nfc_genl_ops[] = {
{
.cmd = NFC_CMD_GET_DEVICE,
@@ -1358,6 +1444,11 @@ static struct genl_ops nfc_genl_ops[] = {
.done = nfc_genl_dump_ses_done,
.policy = nfc_genl_policy,
},
+ {
+ .cmd = NFC_CMD_SE_IO,
+ .doit = nfc_genl_se_io,
+ .policy = nfc_genl_policy,
+ },
};
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 313bf1bc848a..cd958b381f96 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -142,11 +142,11 @@ static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb,
err = rawsock_add_header(skb);
if (err)
- goto error;
+ goto error_skb;
err = sock_queue_rcv_skb(sk, skb);
if (err)
- goto error;
+ goto error_skb;
spin_lock_bh(&sk->sk_write_queue.lock);
if (!skb_queue_empty(&sk->sk_write_queue))
@@ -158,6 +158,9 @@ static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb,
sock_put(sk);
return;
+error_skb:
+ kfree_skb(skb);
+
error:
rawsock_report_error(sk, err);
sock_put(sk);
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile
index ea36e99089af..3591cb5dae91 100644
--- a/net/openvswitch/Makefile
+++ b/net/openvswitch/Makefile
@@ -9,6 +9,8 @@ openvswitch-y := \
datapath.o \
dp_notify.o \
flow.o \
+ flow_netlink.o \
+ flow_table.o \
vport.o \
vport-internal_dev.o \
vport-netdev.o
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 2aa13bd7f2b2..1408adc2a2a7 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -55,14 +55,10 @@
#include "datapath.h"
#include "flow.h"
+#include "flow_netlink.h"
#include "vport-internal_dev.h"
#include "vport-netdev.h"
-
-#define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
-static void rehash_flow_table(struct work_struct *work);
-static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
-
int ovs_net_id __read_mostly;
static void ovs_notify(struct sk_buff *skb, struct genl_info *info,
@@ -165,7 +161,7 @@ static void destroy_dp_rcu(struct rcu_head *rcu)
{
struct datapath *dp = container_of(rcu, struct datapath, rcu);
- ovs_flow_tbl_destroy((__force struct flow_table *)dp->table, false);
+ ovs_flow_tbl_destroy(&dp->table);
free_percpu(dp->stats_percpu);
release_net(ovs_dp_get_net(dp));
kfree(dp->ports);
@@ -225,6 +221,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
struct dp_stats_percpu *stats;
struct sw_flow_key key;
u64 *stats_counter;
+ u32 n_mask_hit;
int error;
stats = this_cpu_ptr(dp->stats_percpu);
@@ -237,7 +234,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
}
/* Look up flow. */
- flow = ovs_flow_lookup(rcu_dereference(dp->table), &key);
+ flow = ovs_flow_tbl_lookup(&dp->table, &key, &n_mask_hit);
if (unlikely(!flow)) {
struct dp_upcall_info upcall;
@@ -262,6 +259,7 @@ out:
/* Update datapath statistics. */
u64_stats_update_begin(&stats->sync);
(*stats_counter)++;
+ stats->n_mask_hit += n_mask_hit;
u64_stats_update_end(&stats->sync);
}
@@ -435,7 +433,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex,
upcall->dp_ifindex = dp_ifindex;
nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
- ovs_flow_to_nlattrs(upcall_info->key, upcall_info->key, user_skb);
+ ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb);
nla_nest_end(user_skb, nla);
if (upcall_info->userdata)
@@ -455,398 +453,6 @@ out:
return err;
}
-/* Called with ovs_mutex. */
-static int flush_flows(struct datapath *dp)
-{
- struct flow_table *old_table;
- struct flow_table *new_table;
-
- old_table = ovsl_dereference(dp->table);
- new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
- if (!new_table)
- return -ENOMEM;
-
- rcu_assign_pointer(dp->table, new_table);
-
- ovs_flow_tbl_destroy(old_table, true);
- return 0;
-}
-
-static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, int attr_len)
-{
-
- struct sw_flow_actions *acts;
- int new_acts_size;
- int req_size = NLA_ALIGN(attr_len);
- int next_offset = offsetof(struct sw_flow_actions, actions) +
- (*sfa)->actions_len;
-
- if (req_size <= (ksize(*sfa) - next_offset))
- goto out;
-
- new_acts_size = ksize(*sfa) * 2;
-
- if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
- if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
- return ERR_PTR(-EMSGSIZE);
- new_acts_size = MAX_ACTIONS_BUFSIZE;
- }
-
- acts = ovs_flow_actions_alloc(new_acts_size);
- if (IS_ERR(acts))
- return (void *)acts;
-
- memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
- acts->actions_len = (*sfa)->actions_len;
- kfree(*sfa);
- *sfa = acts;
-
-out:
- (*sfa)->actions_len += req_size;
- return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
-}
-
-static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, int len)
-{
- struct nlattr *a;
-
- a = reserve_sfa_size(sfa, nla_attr_size(len));
- if (IS_ERR(a))
- return PTR_ERR(a);
-
- a->nla_type = attrtype;
- a->nla_len = nla_attr_size(len);
-
- if (data)
- memcpy(nla_data(a), data, len);
- memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
-
- return 0;
-}
-
-static inline int add_nested_action_start(struct sw_flow_actions **sfa, int attrtype)
-{
- int used = (*sfa)->actions_len;
- int err;
-
- err = add_action(sfa, attrtype, NULL, 0);
- if (err)
- return err;
-
- return used;
-}
-
-static inline void add_nested_action_end(struct sw_flow_actions *sfa, int st_offset)
-{
- struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions + st_offset);
-
- a->nla_len = sfa->actions_len - st_offset;
-}
-
-static int validate_and_copy_actions(const struct nlattr *attr,
- const struct sw_flow_key *key, int depth,
- struct sw_flow_actions **sfa);
-
-static int validate_and_copy_sample(const struct nlattr *attr,
- const struct sw_flow_key *key, int depth,
- struct sw_flow_actions **sfa)
-{
- const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
- const struct nlattr *probability, *actions;
- const struct nlattr *a;
- int rem, start, err, st_acts;
-
- memset(attrs, 0, sizeof(attrs));
- nla_for_each_nested(a, attr, rem) {
- int type = nla_type(a);
- if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
- return -EINVAL;
- attrs[type] = a;
- }
- if (rem)
- return -EINVAL;
-
- probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
- if (!probability || nla_len(probability) != sizeof(u32))
- return -EINVAL;
-
- actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
- if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
- return -EINVAL;
-
- /* validation done, copy sample action. */
- start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE);
- if (start < 0)
- return start;
- err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY, nla_data(probability), sizeof(u32));
- if (err)
- return err;
- st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS);
- if (st_acts < 0)
- return st_acts;
-
- err = validate_and_copy_actions(actions, key, depth + 1, sfa);
- if (err)
- return err;
-
- add_nested_action_end(*sfa, st_acts);
- add_nested_action_end(*sfa, start);
-
- return 0;
-}
-
-static int validate_tp_port(const struct sw_flow_key *flow_key)
-{
- if (flow_key->eth.type == htons(ETH_P_IP)) {
- if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst)
- return 0;
- } else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
- if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst)
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int validate_and_copy_set_tun(const struct nlattr *attr,
- struct sw_flow_actions **sfa)
-{
- struct sw_flow_match match;
- struct sw_flow_key key;
- int err, start;
-
- ovs_match_init(&match, &key, NULL);
- err = ovs_ipv4_tun_from_nlattr(nla_data(attr), &match, false);
- if (err)
- return err;
-
- start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET);
- if (start < 0)
- return start;
-
- err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &match.key->tun_key,
- sizeof(match.key->tun_key));
- add_nested_action_end(*sfa, start);
-
- return err;
-}
-
-static int validate_set(const struct nlattr *a,
- const struct sw_flow_key *flow_key,
- struct sw_flow_actions **sfa,
- bool *set_tun)
-{
- const struct nlattr *ovs_key = nla_data(a);
- int key_type = nla_type(ovs_key);
-
- /* There can be only one key in a action */
- if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
- return -EINVAL;
-
- if (key_type > OVS_KEY_ATTR_MAX ||
- (ovs_key_lens[key_type] != nla_len(ovs_key) &&
- ovs_key_lens[key_type] != -1))
- return -EINVAL;
-
- switch (key_type) {
- const struct ovs_key_ipv4 *ipv4_key;
- const struct ovs_key_ipv6 *ipv6_key;
- int err;
-
- case OVS_KEY_ATTR_PRIORITY:
- case OVS_KEY_ATTR_SKB_MARK:
- case OVS_KEY_ATTR_ETHERNET:
- break;
-
- case OVS_KEY_ATTR_TUNNEL:
- *set_tun = true;
- err = validate_and_copy_set_tun(a, sfa);
- if (err)
- return err;
- break;
-
- case OVS_KEY_ATTR_IPV4:
- if (flow_key->eth.type != htons(ETH_P_IP))
- return -EINVAL;
-
- if (!flow_key->ip.proto)
- return -EINVAL;
-
- ipv4_key = nla_data(ovs_key);
- if (ipv4_key->ipv4_proto != flow_key->ip.proto)
- return -EINVAL;
-
- if (ipv4_key->ipv4_frag != flow_key->ip.frag)
- return -EINVAL;
-
- break;
-
- case OVS_KEY_ATTR_IPV6:
- if (flow_key->eth.type != htons(ETH_P_IPV6))
- return -EINVAL;
-
- if (!flow_key->ip.proto)
- return -EINVAL;
-
- ipv6_key = nla_data(ovs_key);
- if (ipv6_key->ipv6_proto != flow_key->ip.proto)
- return -EINVAL;
-
- if (ipv6_key->ipv6_frag != flow_key->ip.frag)
- return -EINVAL;
-
- if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
- return -EINVAL;
-
- break;
-
- case OVS_KEY_ATTR_TCP:
- if (flow_key->ip.proto != IPPROTO_TCP)
- return -EINVAL;
-
- return validate_tp_port(flow_key);
-
- case OVS_KEY_ATTR_UDP:
- if (flow_key->ip.proto != IPPROTO_UDP)
- return -EINVAL;
-
- return validate_tp_port(flow_key);
-
- case OVS_KEY_ATTR_SCTP:
- if (flow_key->ip.proto != IPPROTO_SCTP)
- return -EINVAL;
-
- return validate_tp_port(flow_key);
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int validate_userspace(const struct nlattr *attr)
-{
- static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
- [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
- [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
- };
- struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
- int error;
-
- error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
- attr, userspace_policy);
- if (error)
- return error;
-
- if (!a[OVS_USERSPACE_ATTR_PID] ||
- !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
- return -EINVAL;
-
- return 0;
-}
-
-static int copy_action(const struct nlattr *from,
- struct sw_flow_actions **sfa)
-{
- int totlen = NLA_ALIGN(from->nla_len);
- struct nlattr *to;
-
- to = reserve_sfa_size(sfa, from->nla_len);
- if (IS_ERR(to))
- return PTR_ERR(to);
-
- memcpy(to, from, totlen);
- return 0;
-}
-
-static int validate_and_copy_actions(const struct nlattr *attr,
- const struct sw_flow_key *key,
- int depth,
- struct sw_flow_actions **sfa)
-{
- const struct nlattr *a;
- int rem, err;
-
- if (depth >= SAMPLE_ACTION_DEPTH)
- return -EOVERFLOW;
-
- nla_for_each_nested(a, attr, rem) {
- /* Expected argument lengths, (u32)-1 for variable length. */
- static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
- [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
- [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
- [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
- [OVS_ACTION_ATTR_POP_VLAN] = 0,
- [OVS_ACTION_ATTR_SET] = (u32)-1,
- [OVS_ACTION_ATTR_SAMPLE] = (u32)-1
- };
- const struct ovs_action_push_vlan *vlan;
- int type = nla_type(a);
- bool skip_copy;
-
- if (type > OVS_ACTION_ATTR_MAX ||
- (action_lens[type] != nla_len(a) &&
- action_lens[type] != (u32)-1))
- return -EINVAL;
-
- skip_copy = false;
- switch (type) {
- case OVS_ACTION_ATTR_UNSPEC:
- return -EINVAL;
-
- case OVS_ACTION_ATTR_USERSPACE:
- err = validate_userspace(a);
- if (err)
- return err;
- break;
-
- case OVS_ACTION_ATTR_OUTPUT:
- if (nla_get_u32(a) >= DP_MAX_PORTS)
- return -EINVAL;
- break;
-
-
- case OVS_ACTION_ATTR_POP_VLAN:
- break;
-
- case OVS_ACTION_ATTR_PUSH_VLAN:
- vlan = nla_data(a);
- if (vlan->vlan_tpid != htons(ETH_P_8021Q))
- return -EINVAL;
- if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
- return -EINVAL;
- break;
-
- case OVS_ACTION_ATTR_SET:
- err = validate_set(a, key, sfa, &skip_copy);
- if (err)
- return err;
- break;
-
- case OVS_ACTION_ATTR_SAMPLE:
- err = validate_and_copy_sample(a, key, depth, sfa);
- if (err)
- return err;
- skip_copy = true;
- break;
-
- default:
- return -EINVAL;
- }
- if (!skip_copy) {
- err = copy_action(a, sfa);
- if (err)
- return err;
- }
- }
-
- if (rem > 0)
- return -EINVAL;
-
- return 0;
-}
-
static void clear_stats(struct sw_flow *flow)
{
flow->used = 0;
@@ -902,15 +508,16 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
if (err)
goto err_flow_free;
- err = ovs_flow_metadata_from_nlattrs(flow, a[OVS_PACKET_ATTR_KEY]);
+ err = ovs_nla_get_flow_metadata(flow, a[OVS_PACKET_ATTR_KEY]);
if (err)
goto err_flow_free;
- acts = ovs_flow_actions_alloc(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
+ acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
err = PTR_ERR(acts);
if (IS_ERR(acts))
goto err_flow_free;
- err = validate_and_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0, &acts);
+ err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
+ &flow->key, 0, &acts);
rcu_assign_pointer(flow->sf_acts, acts);
if (err)
goto err_flow_free;
@@ -958,15 +565,18 @@ static struct genl_ops dp_packet_genl_ops[] = {
}
};
-static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
+static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
+ struct ovs_dp_megaflow_stats *mega_stats)
{
- struct flow_table *table;
int i;
- table = rcu_dereference_check(dp->table, lockdep_ovsl_is_held());
- stats->n_flows = ovs_flow_tbl_count(table);
+ memset(mega_stats, 0, sizeof(*mega_stats));
+
+ stats->n_flows = ovs_flow_tbl_count(&dp->table);
+ mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
stats->n_hit = stats->n_missed = stats->n_lost = 0;
+
for_each_possible_cpu(i) {
const struct dp_stats_percpu *percpu_stats;
struct dp_stats_percpu local_stats;
@@ -982,6 +592,7 @@ static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
stats->n_hit += local_stats.n_hit;
stats->n_missed += local_stats.n_missed;
stats->n_lost += local_stats.n_lost;
+ mega_stats->n_mask_hit += local_stats.n_mask_hit;
}
}
@@ -1005,100 +616,6 @@ static struct genl_multicast_group ovs_dp_flow_multicast_group = {
.name = OVS_FLOW_MCGROUP
};
-static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb);
-static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
-{
- const struct nlattr *a;
- struct nlattr *start;
- int err = 0, rem;
-
- start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE);
- if (!start)
- return -EMSGSIZE;
-
- nla_for_each_nested(a, attr, rem) {
- int type = nla_type(a);
- struct nlattr *st_sample;
-
- switch (type) {
- case OVS_SAMPLE_ATTR_PROBABILITY:
- if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY, sizeof(u32), nla_data(a)))
- return -EMSGSIZE;
- break;
- case OVS_SAMPLE_ATTR_ACTIONS:
- st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
- if (!st_sample)
- return -EMSGSIZE;
- err = actions_to_attr(nla_data(a), nla_len(a), skb);
- if (err)
- return err;
- nla_nest_end(skb, st_sample);
- break;
- }
- }
-
- nla_nest_end(skb, start);
- return err;
-}
-
-static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
-{
- const struct nlattr *ovs_key = nla_data(a);
- int key_type = nla_type(ovs_key);
- struct nlattr *start;
- int err;
-
- switch (key_type) {
- case OVS_KEY_ATTR_IPV4_TUNNEL:
- start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
- if (!start)
- return -EMSGSIZE;
-
- err = ovs_ipv4_tun_to_nlattr(skb, nla_data(ovs_key),
- nla_data(ovs_key));
- if (err)
- return err;
- nla_nest_end(skb, start);
- break;
- default:
- if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
- return -EMSGSIZE;
- break;
- }
-
- return 0;
-}
-
-static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb)
-{
- const struct nlattr *a;
- int rem, err;
-
- nla_for_each_attr(a, attr, len, rem) {
- int type = nla_type(a);
-
- switch (type) {
- case OVS_ACTION_ATTR_SET:
- err = set_action_to_attr(a, skb);
- if (err)
- return err;
- break;
-
- case OVS_ACTION_ATTR_SAMPLE:
- err = sample_action_to_attr(a, skb);
- if (err)
- return err;
- break;
- default:
- if (nla_put(skb, type, nla_len(a), nla_data(a)))
- return -EMSGSIZE;
- break;
- }
- }
-
- return 0;
-}
-
static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
{
return NLMSG_ALIGN(sizeof(struct ovs_header))
@@ -1135,8 +652,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
if (!nla)
goto nla_put_failure;
- err = ovs_flow_to_nlattrs(&flow->unmasked_key,
- &flow->unmasked_key, skb);
+ err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb);
if (err)
goto error;
nla_nest_end(skb, nla);
@@ -1145,7 +661,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
if (!nla)
goto nla_put_failure;
- err = ovs_flow_to_nlattrs(&flow->key, &flow->mask->key, skb);
+ err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
if (err)
goto error;
@@ -1155,7 +671,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
used = flow->used;
stats.n_packets = flow->packet_count;
stats.n_bytes = flow->byte_count;
- tcp_flags = flow->tcp_flags;
+ tcp_flags = (u8)ntohs(flow->tcp_flags);
spin_unlock_bh(&flow->lock);
if (used &&
@@ -1188,7 +704,8 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
sf_acts = rcu_dereference_check(flow->sf_acts,
lockdep_ovsl_is_held());
- err = actions_to_attr(sf_acts->actions, sf_acts->actions_len, skb);
+ err = ovs_nla_put_actions(sf_acts->actions,
+ sf_acts->actions_len, skb);
if (!err)
nla_nest_end(skb, start);
else {
@@ -1234,6 +751,14 @@ static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
return skb;
}
+static struct sw_flow *__ovs_flow_tbl_lookup(struct flow_table *tbl,
+ const struct sw_flow_key *key)
+{
+ u32 __always_unused n_mask_hit;
+
+ return ovs_flow_tbl_lookup(tbl, key, &n_mask_hit);
+}
+
static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
@@ -1243,7 +768,6 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
struct sw_flow_mask mask;
struct sk_buff *reply;
struct datapath *dp;
- struct flow_table *table;
struct sw_flow_actions *acts = NULL;
struct sw_flow_match match;
int error;
@@ -1254,21 +778,21 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
goto error;
ovs_match_init(&match, &key, &mask);
- error = ovs_match_from_nlattrs(&match,
- a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
+ error = ovs_nla_get_match(&match,
+ a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
if (error)
goto error;
/* Validate actions. */
if (a[OVS_FLOW_ATTR_ACTIONS]) {
- acts = ovs_flow_actions_alloc(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
+ acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
error = PTR_ERR(acts);
if (IS_ERR(acts))
goto error;
- ovs_flow_key_mask(&masked_key, &key, &mask);
- error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
- &masked_key, 0, &acts);
+ ovs_flow_mask_key(&masked_key, &key, &mask);
+ error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
+ &masked_key, 0, &acts);
if (error) {
OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
goto err_kfree;
@@ -1284,29 +808,14 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
if (!dp)
goto err_unlock_ovs;
- table = ovsl_dereference(dp->table);
-
/* Check if this is a duplicate flow */
- flow = ovs_flow_lookup(table, &key);
+ flow = __ovs_flow_tbl_lookup(&dp->table, &key);
if (!flow) {
- struct sw_flow_mask *mask_p;
/* Bail out if we're not allowed to create a new flow. */
error = -ENOENT;
if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
goto err_unlock_ovs;
- /* Expand table, if necessary, to make room. */
- if (ovs_flow_tbl_need_to_expand(table)) {
- struct flow_table *new_table;
-
- new_table = ovs_flow_tbl_expand(table);
- if (!IS_ERR(new_table)) {
- rcu_assign_pointer(dp->table, new_table);
- ovs_flow_tbl_destroy(table, true);
- table = ovsl_dereference(dp->table);
- }
- }
-
/* Allocate flow. */
flow = ovs_flow_alloc();
if (IS_ERR(flow)) {
@@ -1317,25 +826,14 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
flow->key = masked_key;
flow->unmasked_key = key;
-
- /* Make sure mask is unique in the system */
- mask_p = ovs_sw_flow_mask_find(table, &mask);
- if (!mask_p) {
- /* Allocate a new mask if none exsits. */
- mask_p = ovs_sw_flow_mask_alloc();
- if (!mask_p)
- goto err_flow_free;
- mask_p->key = mask.key;
- mask_p->range = mask.range;
- ovs_sw_flow_mask_insert(table, mask_p);
- }
-
- ovs_sw_flow_mask_add_ref(mask_p);
- flow->mask = mask_p;
rcu_assign_pointer(flow->sf_acts, acts);
/* Put flow in bucket. */
- ovs_flow_insert(table, flow);
+ error = ovs_flow_tbl_insert(&dp->table, flow, &mask);
+ if (error) {
+ acts = NULL;
+ goto err_flow_free;
+ }
reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
info->snd_seq, OVS_FLOW_CMD_NEW);
@@ -1356,7 +854,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
/* The unmasked key has to be the same for flow updates. */
error = -EINVAL;
- if (!ovs_flow_cmp_unmasked_key(flow, &key, match.range.end)) {
+ if (!ovs_flow_cmp_unmasked_key(flow, &match)) {
OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n");
goto err_unlock_ovs;
}
@@ -1364,7 +862,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
/* Update actions. */
old_acts = ovsl_dereference(flow->sf_acts);
rcu_assign_pointer(flow->sf_acts, acts);
- ovs_flow_deferred_free_acts(old_acts);
+ ovs_nla_free_flow_actions(old_acts);
reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
info->snd_seq, OVS_FLOW_CMD_NEW);
@@ -1403,7 +901,6 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
struct sk_buff *reply;
struct sw_flow *flow;
struct datapath *dp;
- struct flow_table *table;
struct sw_flow_match match;
int err;
@@ -1413,7 +910,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
}
ovs_match_init(&match, &key, NULL);
- err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL);
+ err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
if (err)
return err;
@@ -1424,9 +921,8 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
goto unlock;
}
- table = ovsl_dereference(dp->table);
- flow = ovs_flow_lookup_unmasked_key(table, &match);
- if (!flow) {
+ flow = __ovs_flow_tbl_lookup(&dp->table, &key);
+ if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
err = -ENOENT;
goto unlock;
}
@@ -1453,7 +949,6 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
struct sk_buff *reply;
struct sw_flow *flow;
struct datapath *dp;
- struct flow_table *table;
struct sw_flow_match match;
int err;
@@ -1465,18 +960,17 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
}
if (!a[OVS_FLOW_ATTR_KEY]) {
- err = flush_flows(dp);
+ err = ovs_flow_tbl_flush(&dp->table);
goto unlock;
}
ovs_match_init(&match, &key, NULL);
- err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL);
+ err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
if (err)
goto unlock;
- table = ovsl_dereference(dp->table);
- flow = ovs_flow_lookup_unmasked_key(table, &match);
- if (!flow) {
+ flow = __ovs_flow_tbl_lookup(&dp->table, &key);
+ if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
err = -ENOENT;
goto unlock;
}
@@ -1487,7 +981,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
goto unlock;
}
- ovs_flow_remove(table, flow);
+ ovs_flow_tbl_remove(&dp->table, flow);
err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid,
info->snd_seq, 0, OVS_FLOW_CMD_DEL);
@@ -1506,8 +1000,8 @@ unlock:
static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
+ struct table_instance *ti;
struct datapath *dp;
- struct flow_table *table;
rcu_read_lock();
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
@@ -1516,14 +1010,14 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
return -ENODEV;
}
- table = rcu_dereference(dp->table);
+ ti = rcu_dereference(dp->table.ti);
for (;;) {
struct sw_flow *flow;
u32 bucket, obj;
bucket = cb->args[0];
obj = cb->args[1];
- flow = ovs_flow_dump_next(table, &bucket, &obj);
+ flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
if (!flow)
break;
@@ -1589,6 +1083,7 @@ static size_t ovs_dp_cmd_msg_size(void)
msgsize += nla_total_size(IFNAMSIZ);
msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
+ msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
return msgsize;
}
@@ -1598,6 +1093,7 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
{
struct ovs_header *ovs_header;
struct ovs_dp_stats dp_stats;
+ struct ovs_dp_megaflow_stats dp_megaflow_stats;
int err;
ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
@@ -1613,8 +1109,14 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
if (err)
goto nla_put_failure;
- get_dp_stats(dp, &dp_stats);
- if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats))
+ get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
+ if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
+ &dp_stats))
+ goto nla_put_failure;
+
+ if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
+ sizeof(struct ovs_dp_megaflow_stats),
+ &dp_megaflow_stats))
goto nla_put_failure;
return genlmsg_end(skb, ovs_header);
@@ -1687,9 +1189,8 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
/* Allocate table. */
- err = -ENOMEM;
- rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS));
- if (!dp->table)
+ err = ovs_flow_tbl_init(&dp->table);
+ if (err)
goto err_free_dp;
dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
@@ -1699,7 +1200,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
}
dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!dp->ports) {
err = -ENOMEM;
goto err_destroy_percpu;
@@ -1746,7 +1247,7 @@ err_destroy_ports_array:
err_destroy_percpu:
free_percpu(dp->stats_percpu);
err_destroy_table:
- ovs_flow_tbl_destroy(ovsl_dereference(dp->table), false);
+ ovs_flow_tbl_destroy(&dp->table);
err_free_dp:
release_net(ovs_dp_get_net(dp));
kfree(dp);
@@ -2336,32 +1837,6 @@ error:
return err;
}
-static void rehash_flow_table(struct work_struct *work)
-{
- struct datapath *dp;
- struct net *net;
-
- ovs_lock();
- rtnl_lock();
- for_each_net(net) {
- struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
-
- list_for_each_entry(dp, &ovs_net->dps, list_node) {
- struct flow_table *old_table = ovsl_dereference(dp->table);
- struct flow_table *new_table;
-
- new_table = ovs_flow_tbl_rehash(old_table);
- if (!IS_ERR(new_table)) {
- rcu_assign_pointer(dp->table, new_table);
- ovs_flow_tbl_destroy(old_table, true);
- }
- }
- }
- rtnl_unlock();
- ovs_unlock();
- schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
-}
-
static int __net_init ovs_init_net(struct net *net)
{
struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
@@ -2419,8 +1894,6 @@ static int __init dp_init(void)
if (err < 0)
goto error_unreg_notifier;
- schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
-
return 0;
error_unreg_notifier:
@@ -2437,7 +1910,6 @@ error:
static void dp_cleanup(void)
{
- cancel_delayed_work_sync(&rehash_flow_wq);
dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
unregister_netdevice_notifier(&ovs_dp_device_notifier);
unregister_pernet_device(&ovs_net_ops);
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 4d109c176ef3..d3d14a58aa91 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -27,6 +27,7 @@
#include <linux/u64_stats_sync.h>
#include "flow.h"
+#include "flow_table.h"
#include "vport.h"
#define DP_MAX_PORTS USHRT_MAX
@@ -45,11 +46,15 @@
* @n_lost: Number of received packets that had no matching flow in the flow
* table that could not be sent to userspace (normally due to an overflow in
* one of the datapath's queues).
+ * @n_mask_hit: Number of masks looked up for flow match.
+ * @n_mask_hit / (@n_hit + @n_missed) will be the average masks looked
+ * up per packet.
*/
struct dp_stats_percpu {
u64 n_hit;
u64 n_missed;
u64 n_lost;
+ u64 n_mask_hit;
struct u64_stats_sync sync;
};
@@ -57,7 +62,7 @@ struct dp_stats_percpu {
* struct datapath - datapath for flow-based packet switching
* @rcu: RCU callback head for deferred destruction.
* @list_node: Element in global 'dps' list.
- * @table: Current flow table. Protected by ovs_mutex and RCU.
+ * @table: flow table.
* @ports: Hash table for ports. %OVSP_LOCAL port always exists. Protected by
* ovs_mutex and RCU.
* @stats_percpu: Per-CPU datapath statistics.
@@ -71,7 +76,7 @@ struct datapath {
struct list_head list_node;
/* Flow table. */
- struct flow_table __rcu *table;
+ struct flow_table table;
/* Switch ports. */
struct hlist_head *ports;
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
index c3235675f359..5c2dab276109 100644
--- a/net/openvswitch/dp_notify.c
+++ b/net/openvswitch/dp_notify.c
@@ -65,8 +65,7 @@ void ovs_dp_notify_wq(struct work_struct *work)
continue;
netdev_vport = netdev_vport_priv(vport);
- if (netdev_vport->dev->reg_state == NETREG_UNREGISTERED ||
- netdev_vport->dev->reg_state == NETREG_UNREGISTERING)
+ if (!(netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH))
dp_detach_port_notify(vport);
}
}
@@ -88,6 +87,10 @@ static int dp_device_event(struct notifier_block *unused, unsigned long event,
return NOTIFY_DONE;
if (event == NETDEV_UNREGISTER) {
+ /* upper_dev_unlink and decrement promisc immediately */
+ ovs_netdev_detach_dev(vport);
+
+ /* schedule vport destroy, dev_put and genl notification */
ovs_net = net_generic(dev_net(dev), ovs_net_id);
queue_work(system_wq, &ovs_net->dp_notify_work);
}
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 410db90db73d..b409f5279601 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -45,202 +45,38 @@
#include <net/ipv6.h>
#include <net/ndisc.h>
-static struct kmem_cache *flow_cache;
-
-static void ovs_sw_flow_mask_set(struct sw_flow_mask *mask,
- struct sw_flow_key_range *range, u8 val);
-
-static void update_range__(struct sw_flow_match *match,
- size_t offset, size_t size, bool is_mask)
+u64 ovs_flow_used_time(unsigned long flow_jiffies)
{
- struct sw_flow_key_range *range = NULL;
- size_t start = rounddown(offset, sizeof(long));
- size_t end = roundup(offset + size, sizeof(long));
-
- if (!is_mask)
- range = &match->range;
- else if (match->mask)
- range = &match->mask->range;
-
- if (!range)
- return;
-
- if (range->start == range->end) {
- range->start = start;
- range->end = end;
- return;
- }
-
- if (range->start > start)
- range->start = start;
+ struct timespec cur_ts;
+ u64 cur_ms, idle_ms;
- if (range->end < end)
- range->end = end;
-}
+ ktime_get_ts(&cur_ts);
+ idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
+ cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
+ cur_ts.tv_nsec / NSEC_PER_MSEC;
-#define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
- do { \
- update_range__(match, offsetof(struct sw_flow_key, field), \
- sizeof((match)->key->field), is_mask); \
- if (is_mask) { \
- if ((match)->mask) \
- (match)->mask->key.field = value; \
- } else { \
- (match)->key->field = value; \
- } \
- } while (0)
-
-#define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \
- do { \
- update_range__(match, offsetof(struct sw_flow_key, field), \
- len, is_mask); \
- if (is_mask) { \
- if ((match)->mask) \
- memcpy(&(match)->mask->key.field, value_p, len);\
- } else { \
- memcpy(&(match)->key->field, value_p, len); \
- } \
- } while (0)
-
-static u16 range_n_bytes(const struct sw_flow_key_range *range)
-{
- return range->end - range->start;
+ return cur_ms - idle_ms;
}
-void ovs_match_init(struct sw_flow_match *match,
- struct sw_flow_key *key,
- struct sw_flow_mask *mask)
-{
- memset(match, 0, sizeof(*match));
- match->key = key;
- match->mask = mask;
+#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
- memset(key, 0, sizeof(*key));
-
- if (mask) {
- memset(&mask->key, 0, sizeof(mask->key));
- mask->range.start = mask->range.end = 0;
- }
-}
-
-static bool ovs_match_validate(const struct sw_flow_match *match,
- u64 key_attrs, u64 mask_attrs)
+void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
{
- u64 key_expected = 1 << OVS_KEY_ATTR_ETHERNET;
- u64 mask_allowed = key_attrs; /* At most allow all key attributes */
-
- /* The following mask attributes allowed only if they
- * pass the validation tests. */
- mask_allowed &= ~((1 << OVS_KEY_ATTR_IPV4)
- | (1 << OVS_KEY_ATTR_IPV6)
- | (1 << OVS_KEY_ATTR_TCP)
- | (1 << OVS_KEY_ATTR_UDP)
- | (1 << OVS_KEY_ATTR_SCTP)
- | (1 << OVS_KEY_ATTR_ICMP)
- | (1 << OVS_KEY_ATTR_ICMPV6)
- | (1 << OVS_KEY_ATTR_ARP)
- | (1 << OVS_KEY_ATTR_ND));
-
- /* Always allowed mask fields. */
- mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL)
- | (1 << OVS_KEY_ATTR_IN_PORT)
- | (1 << OVS_KEY_ATTR_ETHERTYPE));
-
- /* Check key attributes. */
- if (match->key->eth.type == htons(ETH_P_ARP)
- || match->key->eth.type == htons(ETH_P_RARP)) {
- key_expected |= 1 << OVS_KEY_ATTR_ARP;
- if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
- mask_allowed |= 1 << OVS_KEY_ATTR_ARP;
- }
+ __be16 tcp_flags = 0;
- if (match->key->eth.type == htons(ETH_P_IP)) {
- key_expected |= 1 << OVS_KEY_ATTR_IPV4;
- if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
- mask_allowed |= 1 << OVS_KEY_ATTR_IPV4;
-
- if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
- if (match->key->ip.proto == IPPROTO_UDP) {
- key_expected |= 1 << OVS_KEY_ATTR_UDP;
- if (match->mask && (match->mask->key.ip.proto == 0xff))
- mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
- }
-
- if (match->key->ip.proto == IPPROTO_SCTP) {
- key_expected |= 1 << OVS_KEY_ATTR_SCTP;
- if (match->mask && (match->mask->key.ip.proto == 0xff))
- mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
- }
-
- if (match->key->ip.proto == IPPROTO_TCP) {
- key_expected |= 1 << OVS_KEY_ATTR_TCP;
- if (match->mask && (match->mask->key.ip.proto == 0xff))
- mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
- }
-
- if (match->key->ip.proto == IPPROTO_ICMP) {
- key_expected |= 1 << OVS_KEY_ATTR_ICMP;
- if (match->mask && (match->mask->key.ip.proto == 0xff))
- mask_allowed |= 1 << OVS_KEY_ATTR_ICMP;
- }
- }
- }
-
- if (match->key->eth.type == htons(ETH_P_IPV6)) {
- key_expected |= 1 << OVS_KEY_ATTR_IPV6;
- if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
- mask_allowed |= 1 << OVS_KEY_ATTR_IPV6;
-
- if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
- if (match->key->ip.proto == IPPROTO_UDP) {
- key_expected |= 1 << OVS_KEY_ATTR_UDP;
- if (match->mask && (match->mask->key.ip.proto == 0xff))
- mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
- }
-
- if (match->key->ip.proto == IPPROTO_SCTP) {
- key_expected |= 1 << OVS_KEY_ATTR_SCTP;
- if (match->mask && (match->mask->key.ip.proto == 0xff))
- mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
- }
-
- if (match->key->ip.proto == IPPROTO_TCP) {
- key_expected |= 1 << OVS_KEY_ATTR_TCP;
- if (match->mask && (match->mask->key.ip.proto == 0xff))
- mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
- }
-
- if (match->key->ip.proto == IPPROTO_ICMPV6) {
- key_expected |= 1 << OVS_KEY_ATTR_ICMPV6;
- if (match->mask && (match->mask->key.ip.proto == 0xff))
- mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6;
-
- if (match->key->ipv6.tp.src ==
- htons(NDISC_NEIGHBOUR_SOLICITATION) ||
- match->key->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
- key_expected |= 1 << OVS_KEY_ATTR_ND;
- if (match->mask && (match->mask->key.ipv6.tp.src == htons(0xffff)))
- mask_allowed |= 1 << OVS_KEY_ATTR_ND;
- }
- }
- }
- }
-
- if ((key_attrs & key_expected) != key_expected) {
- /* Key attributes check failed. */
- OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n",
- key_attrs, key_expected);
- return false;
- }
-
- if ((mask_attrs & mask_allowed) != mask_attrs) {
- /* Mask attributes check failed. */
- OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n",
- mask_attrs, mask_allowed);
- return false;
+ if ((flow->key.eth.type == htons(ETH_P_IP) ||
+ flow->key.eth.type == htons(ETH_P_IPV6)) &&
+ flow->key.ip.proto == IPPROTO_TCP &&
+ likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
+ tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb));
}
- return true;
+ spin_lock(&flow->lock);
+ flow->used = jiffies;
+ flow->packet_count++;
+ flow->byte_count += skb->len;
+ flow->tcp_flags |= tcp_flags;
+ spin_unlock(&flow->lock);
}
static int check_header(struct sk_buff *skb, int len)
@@ -311,19 +147,6 @@ static bool icmphdr_ok(struct sk_buff *skb)
sizeof(struct icmphdr));
}
-u64 ovs_flow_used_time(unsigned long flow_jiffies)
-{
- struct timespec cur_ts;
- u64 cur_ms, idle_ms;
-
- ktime_get_ts(&cur_ts);
- idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
- cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
- cur_ts.tv_nsec / NSEC_PER_MSEC;
-
- return cur_ms - idle_ms;
-}
-
static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
{
unsigned int nh_ofs = skb_network_offset(skb);
@@ -372,311 +195,6 @@ static bool icmp6hdr_ok(struct sk_buff *skb)
sizeof(struct icmp6hdr));
}
-void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src,
- const struct sw_flow_mask *mask)
-{
- const long *m = (long *)((u8 *)&mask->key + mask->range.start);
- const long *s = (long *)((u8 *)src + mask->range.start);
- long *d = (long *)((u8 *)dst + mask->range.start);
- int i;
-
- /* The memory outside of the 'mask->range' are not set since
- * further operations on 'dst' only uses contents within
- * 'mask->range'.
- */
- for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
- *d++ = *s++ & *m++;
-}
-
-#define TCP_FLAGS_OFFSET 13
-#define TCP_FLAG_MASK 0x3f
-
-void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
-{
- u8 tcp_flags = 0;
-
- if ((flow->key.eth.type == htons(ETH_P_IP) ||
- flow->key.eth.type == htons(ETH_P_IPV6)) &&
- flow->key.ip.proto == IPPROTO_TCP &&
- likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
- u8 *tcp = (u8 *)tcp_hdr(skb);
- tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
- }
-
- spin_lock(&flow->lock);
- flow->used = jiffies;
- flow->packet_count++;
- flow->byte_count += skb->len;
- flow->tcp_flags |= tcp_flags;
- spin_unlock(&flow->lock);
-}
-
-struct sw_flow_actions *ovs_flow_actions_alloc(int size)
-{
- struct sw_flow_actions *sfa;
-
- if (size > MAX_ACTIONS_BUFSIZE)
- return ERR_PTR(-EINVAL);
-
- sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
- if (!sfa)
- return ERR_PTR(-ENOMEM);
-
- sfa->actions_len = 0;
- return sfa;
-}
-
-struct sw_flow *ovs_flow_alloc(void)
-{
- struct sw_flow *flow;
-
- flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
- if (!flow)
- return ERR_PTR(-ENOMEM);
-
- spin_lock_init(&flow->lock);
- flow->sf_acts = NULL;
- flow->mask = NULL;
-
- return flow;
-}
-
-static struct hlist_head *find_bucket(struct flow_table *table, u32 hash)
-{
- hash = jhash_1word(hash, table->hash_seed);
- return flex_array_get(table->buckets,
- (hash & (table->n_buckets - 1)));
-}
-
-static struct flex_array *alloc_buckets(unsigned int n_buckets)
-{
- struct flex_array *buckets;
- int i, err;
-
- buckets = flex_array_alloc(sizeof(struct hlist_head),
- n_buckets, GFP_KERNEL);
- if (!buckets)
- return NULL;
-
- err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
- if (err) {
- flex_array_free(buckets);
- return NULL;
- }
-
- for (i = 0; i < n_buckets; i++)
- INIT_HLIST_HEAD((struct hlist_head *)
- flex_array_get(buckets, i));
-
- return buckets;
-}
-
-static void free_buckets(struct flex_array *buckets)
-{
- flex_array_free(buckets);
-}
-
-static struct flow_table *__flow_tbl_alloc(int new_size)
-{
- struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
-
- if (!table)
- return NULL;
-
- table->buckets = alloc_buckets(new_size);
-
- if (!table->buckets) {
- kfree(table);
- return NULL;
- }
- table->n_buckets = new_size;
- table->count = 0;
- table->node_ver = 0;
- table->keep_flows = false;
- get_random_bytes(&table->hash_seed, sizeof(u32));
- table->mask_list = NULL;
-
- return table;
-}
-
-static void __flow_tbl_destroy(struct flow_table *table)
-{
- int i;
-
- if (table->keep_flows)
- goto skip_flows;
-
- for (i = 0; i < table->n_buckets; i++) {
- struct sw_flow *flow;
- struct hlist_head *head = flex_array_get(table->buckets, i);
- struct hlist_node *n;
- int ver = table->node_ver;
-
- hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
- hlist_del(&flow->hash_node[ver]);
- ovs_flow_free(flow, false);
- }
- }
-
- BUG_ON(!list_empty(table->mask_list));
- kfree(table->mask_list);
-
-skip_flows:
- free_buckets(table->buckets);
- kfree(table);
-}
-
-struct flow_table *ovs_flow_tbl_alloc(int new_size)
-{
- struct flow_table *table = __flow_tbl_alloc(new_size);
-
- if (!table)
- return NULL;
-
- table->mask_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
- if (!table->mask_list) {
- table->keep_flows = true;
- __flow_tbl_destroy(table);
- return NULL;
- }
- INIT_LIST_HEAD(table->mask_list);
-
- return table;
-}
-
-static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
-{
- struct flow_table *table = container_of(rcu, struct flow_table, rcu);
-
- __flow_tbl_destroy(table);
-}
-
-void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
-{
- if (!table)
- return;
-
- if (deferred)
- call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
- else
- __flow_tbl_destroy(table);
-}
-
-struct sw_flow *ovs_flow_dump_next(struct flow_table *table, u32 *bucket, u32 *last)
-{
- struct sw_flow *flow;
- struct hlist_head *head;
- int ver;
- int i;
-
- ver = table->node_ver;
- while (*bucket < table->n_buckets) {
- i = 0;
- head = flex_array_get(table->buckets, *bucket);
- hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
- if (i < *last) {
- i++;
- continue;
- }
- *last = i + 1;
- return flow;
- }
- (*bucket)++;
- *last = 0;
- }
-
- return NULL;
-}
-
-static void __tbl_insert(struct flow_table *table, struct sw_flow *flow)
-{
- struct hlist_head *head;
-
- head = find_bucket(table, flow->hash);
- hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
-
- table->count++;
-}
-
-static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new)
-{
- int old_ver;
- int i;
-
- old_ver = old->node_ver;
- new->node_ver = !old_ver;
-
- /* Insert in new table. */
- for (i = 0; i < old->n_buckets; i++) {
- struct sw_flow *flow;
- struct hlist_head *head;
-
- head = flex_array_get(old->buckets, i);
-
- hlist_for_each_entry(flow, head, hash_node[old_ver])
- __tbl_insert(new, flow);
- }
-
- new->mask_list = old->mask_list;
- old->keep_flows = true;
-}
-
-static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets)
-{
- struct flow_table *new_table;
-
- new_table = __flow_tbl_alloc(n_buckets);
- if (!new_table)
- return ERR_PTR(-ENOMEM);
-
- flow_table_copy_flows(table, new_table);
-
- return new_table;
-}
-
-struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table)
-{
- return __flow_tbl_rehash(table, table->n_buckets);
-}
-
-struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
-{
- return __flow_tbl_rehash(table, table->n_buckets * 2);
-}
-
-static void __flow_free(struct sw_flow *flow)
-{
- kfree((struct sf_flow_acts __force *)flow->sf_acts);
- kmem_cache_free(flow_cache, flow);
-}
-
-static void rcu_free_flow_callback(struct rcu_head *rcu)
-{
- struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
-
- __flow_free(flow);
-}
-
-void ovs_flow_free(struct sw_flow *flow, bool deferred)
-{
- if (!flow)
- return;
-
- ovs_sw_flow_mask_del_ref(flow->mask, deferred);
-
- if (deferred)
- call_rcu(&flow->rcu, rcu_free_flow_callback);
- else
- __flow_free(flow);
-}
-
-/* Schedules 'sf_acts' to be freed after the next RCU grace period.
- * The caller must hold rcu_read_lock for this to be sensible. */
-void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
-{
- kfree_rcu(sf_acts, rcu);
-}
-
static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
{
struct qtag_prefix {
@@ -910,6 +428,7 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
struct tcphdr *tcp = tcp_hdr(skb);
key->ipv4.tp.src = tcp->source;
key->ipv4.tp.dst = tcp->dest;
+ key->ipv4.tp.flags = TCP_FLAGS_BE16(tcp);
}
} else if (key->ip.proto == IPPROTO_UDP) {
if (udphdr_ok(skb)) {
@@ -978,6 +497,7 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
struct tcphdr *tcp = tcp_hdr(skb);
key->ipv6.tp.src = tcp->source;
key->ipv6.tp.dst = tcp->dest;
+ key->ipv6.tp.flags = TCP_FLAGS_BE16(tcp);
}
} else if (key->ip.proto == NEXTHDR_UDP) {
if (udphdr_ok(skb)) {
@@ -1002,1080 +522,3 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
return 0;
}
-
-static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start,
- int key_end)
-{
- u32 *hash_key = (u32 *)((u8 *)key + key_start);
- int hash_u32s = (key_end - key_start) >> 2;
-
- /* Make sure number of hash bytes are multiple of u32. */
- BUILD_BUG_ON(sizeof(long) % sizeof(u32));
-
- return jhash2(hash_key, hash_u32s, 0);
-}
-
-static int flow_key_start(const struct sw_flow_key *key)
-{
- if (key->tun_key.ipv4_dst)
- return 0;
- else
- return rounddown(offsetof(struct sw_flow_key, phy),
- sizeof(long));
-}
-
-static bool __cmp_key(const struct sw_flow_key *key1,
- const struct sw_flow_key *key2, int key_start, int key_end)
-{
- const long *cp1 = (long *)((u8 *)key1 + key_start);
- const long *cp2 = (long *)((u8 *)key2 + key_start);
- long diffs = 0;
- int i;
-
- for (i = key_start; i < key_end; i += sizeof(long))
- diffs |= *cp1++ ^ *cp2++;
-
- return diffs == 0;
-}
-
-static bool __flow_cmp_masked_key(const struct sw_flow *flow,
- const struct sw_flow_key *key, int key_start, int key_end)
-{
- return __cmp_key(&flow->key, key, key_start, key_end);
-}
-
-static bool __flow_cmp_unmasked_key(const struct sw_flow *flow,
- const struct sw_flow_key *key, int key_start, int key_end)
-{
- return __cmp_key(&flow->unmasked_key, key, key_start, key_end);
-}
-
-bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
- const struct sw_flow_key *key, int key_end)
-{
- int key_start;
- key_start = flow_key_start(key);
-
- return __flow_cmp_unmasked_key(flow, key, key_start, key_end);
-
-}
-
-struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table,
- struct sw_flow_match *match)
-{
- struct sw_flow_key *unmasked = match->key;
- int key_end = match->range.end;
- struct sw_flow *flow;
-
- flow = ovs_flow_lookup(table, unmasked);
- if (flow && (!ovs_flow_cmp_unmasked_key(flow, unmasked, key_end)))
- flow = NULL;
-
- return flow;
-}
-
-static struct sw_flow *ovs_masked_flow_lookup(struct flow_table *table,
- const struct sw_flow_key *unmasked,
- struct sw_flow_mask *mask)
-{
- struct sw_flow *flow;
- struct hlist_head *head;
- int key_start = mask->range.start;
- int key_end = mask->range.end;
- u32 hash;
- struct sw_flow_key masked_key;
-
- ovs_flow_key_mask(&masked_key, unmasked, mask);
- hash = ovs_flow_hash(&masked_key, key_start, key_end);
- head = find_bucket(table, hash);
- hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) {
- if (flow->mask == mask &&
- __flow_cmp_masked_key(flow, &masked_key,
- key_start, key_end))
- return flow;
- }
- return NULL;
-}
-
-struct sw_flow *ovs_flow_lookup(struct flow_table *tbl,
- const struct sw_flow_key *key)
-{
- struct sw_flow *flow = NULL;
- struct sw_flow_mask *mask;
-
- list_for_each_entry_rcu(mask, tbl->mask_list, list) {
- flow = ovs_masked_flow_lookup(tbl, key, mask);
- if (flow) /* Found */
- break;
- }
-
- return flow;
-}
-
-
-void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow)
-{
- flow->hash = ovs_flow_hash(&flow->key, flow->mask->range.start,
- flow->mask->range.end);
- __tbl_insert(table, flow);
-}
-
-void ovs_flow_remove(struct flow_table *table, struct sw_flow *flow)
-{
- BUG_ON(table->count == 0);
- hlist_del_rcu(&flow->hash_node[table->node_ver]);
- table->count--;
-}
-
-/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
-const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
- [OVS_KEY_ATTR_ENCAP] = -1,
- [OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
- [OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
- [OVS_KEY_ATTR_SKB_MARK] = sizeof(u32),
- [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
- [OVS_KEY_ATTR_VLAN] = sizeof(__be16),
- [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16),
- [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
- [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
- [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
- [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
- [OVS_KEY_ATTR_SCTP] = sizeof(struct ovs_key_sctp),
- [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
- [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
- [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
- [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
- [OVS_KEY_ATTR_TUNNEL] = -1,
-};
-
-static bool is_all_zero(const u8 *fp, size_t size)
-{
- int i;
-
- if (!fp)
- return false;
-
- for (i = 0; i < size; i++)
- if (fp[i])
- return false;
-
- return true;
-}
-
-static int __parse_flow_nlattrs(const struct nlattr *attr,
- const struct nlattr *a[],
- u64 *attrsp, bool nz)
-{
- const struct nlattr *nla;
- u32 attrs;
- int rem;
-
- attrs = *attrsp;
- nla_for_each_nested(nla, attr, rem) {
- u16 type = nla_type(nla);
- int expected_len;
-
- if (type > OVS_KEY_ATTR_MAX) {
- OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n",
- type, OVS_KEY_ATTR_MAX);
- return -EINVAL;
- }
-
- if (attrs & (1 << type)) {
- OVS_NLERR("Duplicate key attribute (type %d).\n", type);
- return -EINVAL;
- }
-
- expected_len = ovs_key_lens[type];
- if (nla_len(nla) != expected_len && expected_len != -1) {
- OVS_NLERR("Key attribute has unexpected length (type=%d"
- ", length=%d, expected=%d).\n", type,
- nla_len(nla), expected_len);
- return -EINVAL;
- }
-
- if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
- attrs |= 1 << type;
- a[type] = nla;
- }
- }
- if (rem) {
- OVS_NLERR("Message has %d unknown bytes.\n", rem);
- return -EINVAL;
- }
-
- *attrsp = attrs;
- return 0;
-}
-
-static int parse_flow_mask_nlattrs(const struct nlattr *attr,
- const struct nlattr *a[], u64 *attrsp)
-{
- return __parse_flow_nlattrs(attr, a, attrsp, true);
-}
-
-static int parse_flow_nlattrs(const struct nlattr *attr,
- const struct nlattr *a[], u64 *attrsp)
-{
- return __parse_flow_nlattrs(attr, a, attrsp, false);
-}
-
-int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr,
- struct sw_flow_match *match, bool is_mask)
-{
- struct nlattr *a;
- int rem;
- bool ttl = false;
- __be16 tun_flags = 0;
-
- nla_for_each_nested(a, attr, rem) {
- int type = nla_type(a);
- static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
- [OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64),
- [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32),
- [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = sizeof(u32),
- [OVS_TUNNEL_KEY_ATTR_TOS] = 1,
- [OVS_TUNNEL_KEY_ATTR_TTL] = 1,
- [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0,
- [OVS_TUNNEL_KEY_ATTR_CSUM] = 0,
- };
-
- if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
- OVS_NLERR("Unknown IPv4 tunnel attribute (type=%d, max=%d).\n",
- type, OVS_TUNNEL_KEY_ATTR_MAX);
- return -EINVAL;
- }
-
- if (ovs_tunnel_key_lens[type] != nla_len(a)) {
- OVS_NLERR("IPv4 tunnel attribute type has unexpected "
- " length (type=%d, length=%d, expected=%d).\n",
- type, nla_len(a), ovs_tunnel_key_lens[type]);
- return -EINVAL;
- }
-
- switch (type) {
- case OVS_TUNNEL_KEY_ATTR_ID:
- SW_FLOW_KEY_PUT(match, tun_key.tun_id,
- nla_get_be64(a), is_mask);
- tun_flags |= TUNNEL_KEY;
- break;
- case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
- SW_FLOW_KEY_PUT(match, tun_key.ipv4_src,
- nla_get_be32(a), is_mask);
- break;
- case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
- SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst,
- nla_get_be32(a), is_mask);
- break;
- case OVS_TUNNEL_KEY_ATTR_TOS:
- SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos,
- nla_get_u8(a), is_mask);
- break;
- case OVS_TUNNEL_KEY_ATTR_TTL:
- SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl,
- nla_get_u8(a), is_mask);
- ttl = true;
- break;
- case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
- tun_flags |= TUNNEL_DONT_FRAGMENT;
- break;
- case OVS_TUNNEL_KEY_ATTR_CSUM:
- tun_flags |= TUNNEL_CSUM;
- break;
- default:
- return -EINVAL;
- }
- }
-
- SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
-
- if (rem > 0) {
- OVS_NLERR("IPv4 tunnel attribute has %d unknown bytes.\n", rem);
- return -EINVAL;
- }
-
- if (!is_mask) {
- if (!match->key->tun_key.ipv4_dst) {
- OVS_NLERR("IPv4 tunnel destination address is zero.\n");
- return -EINVAL;
- }
-
- if (!ttl) {
- OVS_NLERR("IPv4 tunnel TTL not specified.\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb,
- const struct ovs_key_ipv4_tunnel *tun_key,
- const struct ovs_key_ipv4_tunnel *output)
-{
- struct nlattr *nla;
-
- nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL);
- if (!nla)
- return -EMSGSIZE;
-
- if (output->tun_flags & TUNNEL_KEY &&
- nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
- return -EMSGSIZE;
- if (output->ipv4_src &&
- nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src))
- return -EMSGSIZE;
- if (output->ipv4_dst &&
- nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst))
- return -EMSGSIZE;
- if (output->ipv4_tos &&
- nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
- return -EMSGSIZE;
- if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl))
- return -EMSGSIZE;
- if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
- nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
- return -EMSGSIZE;
- if ((output->tun_flags & TUNNEL_CSUM) &&
- nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
- return -EMSGSIZE;
-
- nla_nest_end(skb, nla);
- return 0;
-}
-
-static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
- const struct nlattr **a, bool is_mask)
-{
- if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
- SW_FLOW_KEY_PUT(match, phy.priority,
- nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask);
- *attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
- }
-
- if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
- u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
-
- if (is_mask)
- in_port = 0xffffffff; /* Always exact match in_port. */
- else if (in_port >= DP_MAX_PORTS)
- return -EINVAL;
-
- SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask);
- *attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
- } else if (!is_mask) {
- SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask);
- }
-
- if (*attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) {
- uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
-
- SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask);
- *attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
- }
- if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
- if (ovs_ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
- is_mask))
- return -EINVAL;
- *attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
- }
- return 0;
-}
-
-static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
- const struct nlattr **a, bool is_mask)
-{
- int err;
- u64 orig_attrs = attrs;
-
- err = metadata_from_nlattrs(match, &attrs, a, is_mask);
- if (err)
- return err;
-
- if (attrs & (1 << OVS_KEY_ATTR_ETHERNET)) {
- const struct ovs_key_ethernet *eth_key;
-
- eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
- SW_FLOW_KEY_MEMCPY(match, eth.src,
- eth_key->eth_src, ETH_ALEN, is_mask);
- SW_FLOW_KEY_MEMCPY(match, eth.dst,
- eth_key->eth_dst, ETH_ALEN, is_mask);
- attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
- }
-
- if (attrs & (1 << OVS_KEY_ATTR_VLAN)) {
- __be16 tci;
-
- tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
- if (!(tci & htons(VLAN_TAG_PRESENT))) {
- if (is_mask)
- OVS_NLERR("VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.\n");
- else
- OVS_NLERR("VLAN TCI does not have VLAN_TAG_PRESENT bit set.\n");
-
- return -EINVAL;
- }
-
- SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask);
- attrs &= ~(1 << OVS_KEY_ATTR_VLAN);
- } else if (!is_mask)
- SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true);
-
- if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
- __be16 eth_type;
-
- eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
- if (is_mask) {
- /* Always exact match EtherType. */
- eth_type = htons(0xffff);
- } else if (ntohs(eth_type) < ETH_P_802_3_MIN) {
- OVS_NLERR("EtherType is less than minimum (type=%x, min=%x).\n",
- ntohs(eth_type), ETH_P_802_3_MIN);
- return -EINVAL;
- }
-
- SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask);
- attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
- } else if (!is_mask) {
- SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
- }
-
- if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
- const struct ovs_key_ipv4 *ipv4_key;
-
- ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
- if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) {
- OVS_NLERR("Unknown IPv4 fragment type (value=%d, max=%d).\n",
- ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX);
- return -EINVAL;
- }
- SW_FLOW_KEY_PUT(match, ip.proto,
- ipv4_key->ipv4_proto, is_mask);
- SW_FLOW_KEY_PUT(match, ip.tos,
- ipv4_key->ipv4_tos, is_mask);
- SW_FLOW_KEY_PUT(match, ip.ttl,
- ipv4_key->ipv4_ttl, is_mask);
- SW_FLOW_KEY_PUT(match, ip.frag,
- ipv4_key->ipv4_frag, is_mask);
- SW_FLOW_KEY_PUT(match, ipv4.addr.src,
- ipv4_key->ipv4_src, is_mask);
- SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
- ipv4_key->ipv4_dst, is_mask);
- attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
- }
-
- if (attrs & (1 << OVS_KEY_ATTR_IPV6)) {
- const struct ovs_key_ipv6 *ipv6_key;
-
- ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
- if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) {
- OVS_NLERR("Unknown IPv6 fragment type (value=%d, max=%d).\n",
- ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX);
- return -EINVAL;
- }
- SW_FLOW_KEY_PUT(match, ipv6.label,
- ipv6_key->ipv6_label, is_mask);
- SW_FLOW_KEY_PUT(match, ip.proto,
- ipv6_key->ipv6_proto, is_mask);
- SW_FLOW_KEY_PUT(match, ip.tos,
- ipv6_key->ipv6_tclass, is_mask);
- SW_FLOW_KEY_PUT(match, ip.ttl,
- ipv6_key->ipv6_hlimit, is_mask);
- SW_FLOW_KEY_PUT(match, ip.frag,
- ipv6_key->ipv6_frag, is_mask);
- SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src,
- ipv6_key->ipv6_src,
- sizeof(match->key->ipv6.addr.src),
- is_mask);
- SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst,
- ipv6_key->ipv6_dst,
- sizeof(match->key->ipv6.addr.dst),
- is_mask);
-
- attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
- }
-
- if (attrs & (1 << OVS_KEY_ATTR_ARP)) {
- const struct ovs_key_arp *arp_key;
-
- arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
- if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
- OVS_NLERR("Unknown ARP opcode (opcode=%d).\n",
- arp_key->arp_op);
- return -EINVAL;
- }
-
- SW_FLOW_KEY_PUT(match, ipv4.addr.src,
- arp_key->arp_sip, is_mask);
- SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
- arp_key->arp_tip, is_mask);
- SW_FLOW_KEY_PUT(match, ip.proto,
- ntohs(arp_key->arp_op), is_mask);
- SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha,
- arp_key->arp_sha, ETH_ALEN, is_mask);
- SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha,
- arp_key->arp_tha, ETH_ALEN, is_mask);
-
- attrs &= ~(1 << OVS_KEY_ATTR_ARP);
- }
-
- if (attrs & (1 << OVS_KEY_ATTR_TCP)) {
- const struct ovs_key_tcp *tcp_key;
-
- tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
- if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
- SW_FLOW_KEY_PUT(match, ipv4.tp.src,
- tcp_key->tcp_src, is_mask);
- SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
- tcp_key->tcp_dst, is_mask);
- } else {
- SW_FLOW_KEY_PUT(match, ipv6.tp.src,
- tcp_key->tcp_src, is_mask);
- SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
- tcp_key->tcp_dst, is_mask);
- }
- attrs &= ~(1 << OVS_KEY_ATTR_TCP);
- }
-
- if (attrs & (1 << OVS_KEY_ATTR_UDP)) {
- const struct ovs_key_udp *udp_key;
-
- udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
- if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
- SW_FLOW_KEY_PUT(match, ipv4.tp.src,
- udp_key->udp_src, is_mask);
- SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
- udp_key->udp_dst, is_mask);
- } else {
- SW_FLOW_KEY_PUT(match, ipv6.tp.src,
- udp_key->udp_src, is_mask);
- SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
- udp_key->udp_dst, is_mask);
- }
- attrs &= ~(1 << OVS_KEY_ATTR_UDP);
- }
-
- if (attrs & (1 << OVS_KEY_ATTR_SCTP)) {
- const struct ovs_key_sctp *sctp_key;
-
- sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]);
- if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
- SW_FLOW_KEY_PUT(match, ipv4.tp.src,
- sctp_key->sctp_src, is_mask);
- SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
- sctp_key->sctp_dst, is_mask);
- } else {
- SW_FLOW_KEY_PUT(match, ipv6.tp.src,
- sctp_key->sctp_src, is_mask);
- SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
- sctp_key->sctp_dst, is_mask);
- }
- attrs &= ~(1 << OVS_KEY_ATTR_SCTP);
- }
-
- if (attrs & (1 << OVS_KEY_ATTR_ICMP)) {
- const struct ovs_key_icmp *icmp_key;
-
- icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
- SW_FLOW_KEY_PUT(match, ipv4.tp.src,
- htons(icmp_key->icmp_type), is_mask);
- SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
- htons(icmp_key->icmp_code), is_mask);
- attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
- }
-
- if (attrs & (1 << OVS_KEY_ATTR_ICMPV6)) {
- const struct ovs_key_icmpv6 *icmpv6_key;
-
- icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
- SW_FLOW_KEY_PUT(match, ipv6.tp.src,
- htons(icmpv6_key->icmpv6_type), is_mask);
- SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
- htons(icmpv6_key->icmpv6_code), is_mask);
- attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
- }
-
- if (attrs & (1 << OVS_KEY_ATTR_ND)) {
- const struct ovs_key_nd *nd_key;
-
- nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
- SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target,
- nd_key->nd_target,
- sizeof(match->key->ipv6.nd.target),
- is_mask);
- SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll,
- nd_key->nd_sll, ETH_ALEN, is_mask);
- SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll,
- nd_key->nd_tll, ETH_ALEN, is_mask);
- attrs &= ~(1 << OVS_KEY_ATTR_ND);
- }
-
- if (attrs != 0)
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * ovs_match_from_nlattrs - parses Netlink attributes into a flow key and
- * mask. In case the 'mask' is NULL, the flow is treated as exact match
- * flow. Otherwise, it is treated as a wildcarded flow, except the mask
- * does not include any don't care bit.
- * @match: receives the extracted flow match information.
- * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
- * sequence. The fields should of the packet that triggered the creation
- * of this flow.
- * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink
- * attribute specifies the mask field of the wildcarded flow.
- */
-int ovs_match_from_nlattrs(struct sw_flow_match *match,
- const struct nlattr *key,
- const struct nlattr *mask)
-{
- const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
- const struct nlattr *encap;
- u64 key_attrs = 0;
- u64 mask_attrs = 0;
- bool encap_valid = false;
- int err;
-
- err = parse_flow_nlattrs(key, a, &key_attrs);
- if (err)
- return err;
-
- if ((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) &&
- (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) &&
- (nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q))) {
- __be16 tci;
-
- if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) &&
- (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) {
- OVS_NLERR("Invalid Vlan frame.\n");
- return -EINVAL;
- }
-
- key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
- tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
- encap = a[OVS_KEY_ATTR_ENCAP];
- key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
- encap_valid = true;
-
- if (tci & htons(VLAN_TAG_PRESENT)) {
- err = parse_flow_nlattrs(encap, a, &key_attrs);
- if (err)
- return err;
- } else if (!tci) {
- /* Corner case for truncated 802.1Q header. */
- if (nla_len(encap)) {
- OVS_NLERR("Truncated 802.1Q header has non-zero encap attribute.\n");
- return -EINVAL;
- }
- } else {
- OVS_NLERR("Encap attribute is set for a non-VLAN frame.\n");
- return -EINVAL;
- }
- }
-
- err = ovs_key_from_nlattrs(match, key_attrs, a, false);
- if (err)
- return err;
-
- if (mask) {
- err = parse_flow_mask_nlattrs(mask, a, &mask_attrs);
- if (err)
- return err;
-
- if (mask_attrs & 1ULL << OVS_KEY_ATTR_ENCAP) {
- __be16 eth_type = 0;
- __be16 tci = 0;
-
- if (!encap_valid) {
- OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n");
- return -EINVAL;
- }
-
- mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
- if (a[OVS_KEY_ATTR_ETHERTYPE])
- eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
-
- if (eth_type == htons(0xffff)) {
- mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
- encap = a[OVS_KEY_ATTR_ENCAP];
- err = parse_flow_mask_nlattrs(encap, a, &mask_attrs);
- } else {
- OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n",
- ntohs(eth_type));
- return -EINVAL;
- }
-
- if (a[OVS_KEY_ATTR_VLAN])
- tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
-
- if (!(tci & htons(VLAN_TAG_PRESENT))) {
- OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci));
- return -EINVAL;
- }
- }
-
- err = ovs_key_from_nlattrs(match, mask_attrs, a, true);
- if (err)
- return err;
- } else {
- /* Populate exact match flow's key mask. */
- if (match->mask)
- ovs_sw_flow_mask_set(match->mask, &match->range, 0xff);
- }
-
- if (!ovs_match_validate(match, key_attrs, mask_attrs))
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
- * @flow: Receives extracted in_port, priority, tun_key and skb_mark.
- * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
- * sequence.
- *
- * This parses a series of Netlink attributes that form a flow key, which must
- * take the same form accepted by flow_from_nlattrs(), but only enough of it to
- * get the metadata, that is, the parts of the flow key that cannot be
- * extracted from the packet itself.
- */
-
-int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow,
- const struct nlattr *attr)
-{
- struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key;
- const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
- u64 attrs = 0;
- int err;
- struct sw_flow_match match;
-
- flow->key.phy.in_port = DP_MAX_PORTS;
- flow->key.phy.priority = 0;
- flow->key.phy.skb_mark = 0;
- memset(tun_key, 0, sizeof(flow->key.tun_key));
-
- err = parse_flow_nlattrs(attr, a, &attrs);
- if (err)
- return -EINVAL;
-
- memset(&match, 0, sizeof(match));
- match.key = &flow->key;
-
- err = metadata_from_nlattrs(&match, &attrs, a, false);
- if (err)
- return err;
-
- return 0;
-}
-
-int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey,
- const struct sw_flow_key *output, struct sk_buff *skb)
-{
- struct ovs_key_ethernet *eth_key;
- struct nlattr *nla, *encap;
- bool is_mask = (swkey != output);
-
- if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
- goto nla_put_failure;
-
- if ((swkey->tun_key.ipv4_dst || is_mask) &&
- ovs_ipv4_tun_to_nlattr(skb, &swkey->tun_key, &output->tun_key))
- goto nla_put_failure;
-
- if (swkey->phy.in_port == DP_MAX_PORTS) {
- if (is_mask && (output->phy.in_port == 0xffff))
- if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff))
- goto nla_put_failure;
- } else {
- u16 upper_u16;
- upper_u16 = !is_mask ? 0 : 0xffff;
-
- if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT,
- (upper_u16 << 16) | output->phy.in_port))
- goto nla_put_failure;
- }
-
- if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
- goto nla_put_failure;
-
- nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
- if (!nla)
- goto nla_put_failure;
-
- eth_key = nla_data(nla);
- memcpy(eth_key->eth_src, output->eth.src, ETH_ALEN);
- memcpy(eth_key->eth_dst, output->eth.dst, ETH_ALEN);
-
- if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
- __be16 eth_type;
- eth_type = !is_mask ? htons(ETH_P_8021Q) : htons(0xffff);
- if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
- nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci))
- goto nla_put_failure;
- encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
- if (!swkey->eth.tci)
- goto unencap;
- } else
- encap = NULL;
-
- if (swkey->eth.type == htons(ETH_P_802_2)) {
- /*
- * Ethertype 802.2 is represented in the netlink with omitted
- * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and
- * 0xffff in the mask attribute. Ethertype can also
- * be wildcarded.
- */
- if (is_mask && output->eth.type)
- if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE,
- output->eth.type))
- goto nla_put_failure;
- goto unencap;
- }
-
- if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
- goto nla_put_failure;
-
- if (swkey->eth.type == htons(ETH_P_IP)) {
- struct ovs_key_ipv4 *ipv4_key;
-
- nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
- if (!nla)
- goto nla_put_failure;
- ipv4_key = nla_data(nla);
- ipv4_key->ipv4_src = output->ipv4.addr.src;
- ipv4_key->ipv4_dst = output->ipv4.addr.dst;
- ipv4_key->ipv4_proto = output->ip.proto;
- ipv4_key->ipv4_tos = output->ip.tos;
- ipv4_key->ipv4_ttl = output->ip.ttl;
- ipv4_key->ipv4_frag = output->ip.frag;
- } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
- struct ovs_key_ipv6 *ipv6_key;
-
- nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
- if (!nla)
- goto nla_put_failure;
- ipv6_key = nla_data(nla);
- memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src,
- sizeof(ipv6_key->ipv6_src));
- memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst,
- sizeof(ipv6_key->ipv6_dst));
- ipv6_key->ipv6_label = output->ipv6.label;
- ipv6_key->ipv6_proto = output->ip.proto;
- ipv6_key->ipv6_tclass = output->ip.tos;
- ipv6_key->ipv6_hlimit = output->ip.ttl;
- ipv6_key->ipv6_frag = output->ip.frag;
- } else if (swkey->eth.type == htons(ETH_P_ARP) ||
- swkey->eth.type == htons(ETH_P_RARP)) {
- struct ovs_key_arp *arp_key;
-
- nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
- if (!nla)
- goto nla_put_failure;
- arp_key = nla_data(nla);
- memset(arp_key, 0, sizeof(struct ovs_key_arp));
- arp_key->arp_sip = output->ipv4.addr.src;
- arp_key->arp_tip = output->ipv4.addr.dst;
- arp_key->arp_op = htons(output->ip.proto);
- memcpy(arp_key->arp_sha, output->ipv4.arp.sha, ETH_ALEN);
- memcpy(arp_key->arp_tha, output->ipv4.arp.tha, ETH_ALEN);
- }
-
- if ((swkey->eth.type == htons(ETH_P_IP) ||
- swkey->eth.type == htons(ETH_P_IPV6)) &&
- swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
-
- if (swkey->ip.proto == IPPROTO_TCP) {
- struct ovs_key_tcp *tcp_key;
-
- nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
- if (!nla)
- goto nla_put_failure;
- tcp_key = nla_data(nla);
- if (swkey->eth.type == htons(ETH_P_IP)) {
- tcp_key->tcp_src = output->ipv4.tp.src;
- tcp_key->tcp_dst = output->ipv4.tp.dst;
- } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
- tcp_key->tcp_src = output->ipv6.tp.src;
- tcp_key->tcp_dst = output->ipv6.tp.dst;
- }
- } else if (swkey->ip.proto == IPPROTO_UDP) {
- struct ovs_key_udp *udp_key;
-
- nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
- if (!nla)
- goto nla_put_failure;
- udp_key = nla_data(nla);
- if (swkey->eth.type == htons(ETH_P_IP)) {
- udp_key->udp_src = output->ipv4.tp.src;
- udp_key->udp_dst = output->ipv4.tp.dst;
- } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
- udp_key->udp_src = output->ipv6.tp.src;
- udp_key->udp_dst = output->ipv6.tp.dst;
- }
- } else if (swkey->ip.proto == IPPROTO_SCTP) {
- struct ovs_key_sctp *sctp_key;
-
- nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key));
- if (!nla)
- goto nla_put_failure;
- sctp_key = nla_data(nla);
- if (swkey->eth.type == htons(ETH_P_IP)) {
- sctp_key->sctp_src = swkey->ipv4.tp.src;
- sctp_key->sctp_dst = swkey->ipv4.tp.dst;
- } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
- sctp_key->sctp_src = swkey->ipv6.tp.src;
- sctp_key->sctp_dst = swkey->ipv6.tp.dst;
- }
- } else if (swkey->eth.type == htons(ETH_P_IP) &&
- swkey->ip.proto == IPPROTO_ICMP) {
- struct ovs_key_icmp *icmp_key;
-
- nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
- if (!nla)
- goto nla_put_failure;
- icmp_key = nla_data(nla);
- icmp_key->icmp_type = ntohs(output->ipv4.tp.src);
- icmp_key->icmp_code = ntohs(output->ipv4.tp.dst);
- } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
- swkey->ip.proto == IPPROTO_ICMPV6) {
- struct ovs_key_icmpv6 *icmpv6_key;
-
- nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
- sizeof(*icmpv6_key));
- if (!nla)
- goto nla_put_failure;
- icmpv6_key = nla_data(nla);
- icmpv6_key->icmpv6_type = ntohs(output->ipv6.tp.src);
- icmpv6_key->icmpv6_code = ntohs(output->ipv6.tp.dst);
-
- if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
- icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
- struct ovs_key_nd *nd_key;
-
- nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
- if (!nla)
- goto nla_put_failure;
- nd_key = nla_data(nla);
- memcpy(nd_key->nd_target, &output->ipv6.nd.target,
- sizeof(nd_key->nd_target));
- memcpy(nd_key->nd_sll, output->ipv6.nd.sll, ETH_ALEN);
- memcpy(nd_key->nd_tll, output->ipv6.nd.tll, ETH_ALEN);
- }
- }
- }
-
-unencap:
- if (encap)
- nla_nest_end(skb, encap);
-
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
-}
-
-/* Initializes the flow module.
- * Returns zero if successful or a negative error code. */
-int ovs_flow_init(void)
-{
- BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
- BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
-
- flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
- 0, NULL);
- if (flow_cache == NULL)
- return -ENOMEM;
-
- return 0;
-}
-
-/* Uninitializes the flow module. */
-void ovs_flow_exit(void)
-{
- kmem_cache_destroy(flow_cache);
-}
-
-struct sw_flow_mask *ovs_sw_flow_mask_alloc(void)
-{
- struct sw_flow_mask *mask;
-
- mask = kmalloc(sizeof(*mask), GFP_KERNEL);
- if (mask)
- mask->ref_count = 0;
-
- return mask;
-}
-
-void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *mask)
-{
- mask->ref_count++;
-}
-
-void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
-{
- if (!mask)
- return;
-
- BUG_ON(!mask->ref_count);
- mask->ref_count--;
-
- if (!mask->ref_count) {
- list_del_rcu(&mask->list);
- if (deferred)
- kfree_rcu(mask, rcu);
- else
- kfree(mask);
- }
-}
-
-static bool ovs_sw_flow_mask_equal(const struct sw_flow_mask *a,
- const struct sw_flow_mask *b)
-{
- u8 *a_ = (u8 *)&a->key + a->range.start;
- u8 *b_ = (u8 *)&b->key + b->range.start;
-
- return (a->range.end == b->range.end)
- && (a->range.start == b->range.start)
- && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
-}
-
-struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl,
- const struct sw_flow_mask *mask)
-{
- struct list_head *ml;
-
- list_for_each(ml, tbl->mask_list) {
- struct sw_flow_mask *m;
- m = container_of(ml, struct sw_flow_mask, list);
- if (ovs_sw_flow_mask_equal(mask, m))
- return m;
- }
-
- return NULL;
-}
-
-/**
- * add a new mask into the mask list.
- * The caller needs to make sure that 'mask' is not the same
- * as any masks that are already on the list.
- */
-void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask)
-{
- list_add_rcu(&mask->list, tbl->mask_list);
-}
-
-/**
- * Set 'range' fields in the mask to the value of 'val'.
- */
-static void ovs_sw_flow_mask_set(struct sw_flow_mask *mask,
- struct sw_flow_key_range *range, u8 val)
-{
- u8 *m = (u8 *)&mask->key + range->start;
-
- mask->range = *range;
- memset(m, val, range_n_bytes(range));
-}
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 212fbf7510c4..1510f51dbf74 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -33,14 +33,6 @@
#include <net/inet_ecn.h>
struct sk_buff;
-struct sw_flow_mask;
-struct flow_table;
-
-struct sw_flow_actions {
- struct rcu_head rcu;
- u32 actions_len;
- struct nlattr actions[];
-};
/* Used to memset ovs_key_ipv4_tunnel padding. */
#define OVS_TUNNEL_KEY_SIZE \
@@ -101,6 +93,7 @@ struct sw_flow_key {
struct {
__be16 src; /* TCP/UDP/SCTP source port. */
__be16 dst; /* TCP/UDP/SCTP destination port. */
+ __be16 flags; /* TCP flags. */
} tp;
struct {
u8 sha[ETH_ALEN]; /* ARP source hardware address. */
@@ -117,6 +110,7 @@ struct sw_flow_key {
struct {
__be16 src; /* TCP/UDP/SCTP source port. */
__be16 dst; /* TCP/UDP/SCTP destination port. */
+ __be16 flags; /* TCP flags. */
} tp;
struct {
struct in6_addr target; /* ND target address. */
@@ -127,6 +121,31 @@ struct sw_flow_key {
};
} __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */
+struct sw_flow_key_range {
+ size_t start;
+ size_t end;
+};
+
+struct sw_flow_mask {
+ int ref_count;
+ struct rcu_head rcu;
+ struct list_head list;
+ struct sw_flow_key_range range;
+ struct sw_flow_key key;
+};
+
+struct sw_flow_match {
+ struct sw_flow_key *key;
+ struct sw_flow_key_range range;
+ struct sw_flow_mask *mask;
+};
+
+struct sw_flow_actions {
+ struct rcu_head rcu;
+ u32 actions_len;
+ struct nlattr actions[];
+};
+
struct sw_flow {
struct rcu_head rcu;
struct hlist_node hash_node[2];
@@ -141,23 +160,9 @@ struct sw_flow {
unsigned long used; /* Last used time (in jiffies). */
u64 packet_count; /* Number of packets matched. */
u64 byte_count; /* Number of bytes matched. */
- u8 tcp_flags; /* Union of seen TCP flags. */
-};
-
-struct sw_flow_key_range {
- size_t start;
- size_t end;
+ __be16 tcp_flags; /* Union of seen TCP flags. */
};
-struct sw_flow_match {
- struct sw_flow_key *key;
- struct sw_flow_key_range range;
- struct sw_flow_mask *mask;
-};
-
-void ovs_match_init(struct sw_flow_match *match,
- struct sw_flow_key *key, struct sw_flow_mask *mask);
-
struct arp_eth_header {
__be16 ar_hrd; /* format of hardware address */
__be16 ar_pro; /* format of protocol address */
@@ -172,88 +177,9 @@ struct arp_eth_header {
unsigned char ar_tip[4]; /* target IP address */
} __packed;
-int ovs_flow_init(void);
-void ovs_flow_exit(void);
-
-struct sw_flow *ovs_flow_alloc(void);
-void ovs_flow_deferred_free(struct sw_flow *);
-void ovs_flow_free(struct sw_flow *, bool deferred);
-
-struct sw_flow_actions *ovs_flow_actions_alloc(int actions_len);
-void ovs_flow_deferred_free_acts(struct sw_flow_actions *);
-
-int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *);
void ovs_flow_used(struct sw_flow *, struct sk_buff *);
u64 ovs_flow_used_time(unsigned long flow_jiffies);
-int ovs_flow_to_nlattrs(const struct sw_flow_key *,
- const struct sw_flow_key *, struct sk_buff *);
-int ovs_match_from_nlattrs(struct sw_flow_match *match,
- const struct nlattr *,
- const struct nlattr *);
-int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow,
- const struct nlattr *attr);
-#define MAX_ACTIONS_BUFSIZE (32 * 1024)
-#define TBL_MIN_BUCKETS 1024
-
-struct flow_table {
- struct flex_array *buckets;
- unsigned int count, n_buckets;
- struct rcu_head rcu;
- struct list_head *mask_list;
- int node_ver;
- u32 hash_seed;
- bool keep_flows;
-};
-
-static inline int ovs_flow_tbl_count(struct flow_table *table)
-{
- return table->count;
-}
-
-static inline int ovs_flow_tbl_need_to_expand(struct flow_table *table)
-{
- return (table->count > table->n_buckets);
-}
-
-struct sw_flow *ovs_flow_lookup(struct flow_table *,
- const struct sw_flow_key *);
-struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table,
- struct sw_flow_match *match);
-
-void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred);
-struct flow_table *ovs_flow_tbl_alloc(int new_size);
-struct flow_table *ovs_flow_tbl_expand(struct flow_table *table);
-struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table);
-
-void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow);
-void ovs_flow_remove(struct flow_table *table, struct sw_flow *flow);
-
-struct sw_flow *ovs_flow_dump_next(struct flow_table *table, u32 *bucket, u32 *idx);
-extern const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1];
-int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr,
- struct sw_flow_match *match, bool is_mask);
-int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb,
- const struct ovs_key_ipv4_tunnel *tun_key,
- const struct ovs_key_ipv4_tunnel *output);
-
-bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
- const struct sw_flow_key *key, int key_end);
-
-struct sw_flow_mask {
- int ref_count;
- struct rcu_head rcu;
- struct list_head list;
- struct sw_flow_key_range range;
- struct sw_flow_key key;
-};
+int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *);
-struct sw_flow_mask *ovs_sw_flow_mask_alloc(void);
-void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *);
-void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *, bool deferred);
-void ovs_sw_flow_mask_insert(struct flow_table *, struct sw_flow_mask *);
-struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *,
- const struct sw_flow_mask *);
-void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src,
- const struct sw_flow_mask *mask);
#endif /* flow.h */
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
new file mode 100644
index 000000000000..2bc1bc1aca3b
--- /dev/null
+++ b/net/openvswitch/flow_netlink.c
@@ -0,0 +1,1630 @@
+/*
+ * Copyright (c) 2007-2013 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include "flow.h"
+#include "datapath.h"
+#include <linux/uaccess.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/llc_pdu.h>
+#include <linux/kernel.h>
+#include <linux/jhash.h>
+#include <linux/jiffies.h>
+#include <linux/llc.h>
+#include <linux/module.h>
+#include <linux/in.h>
+#include <linux/rcupdate.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/sctp.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/rculist.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/ndisc.h>
+
+#include "flow_netlink.h"
+
+static void update_range__(struct sw_flow_match *match,
+ size_t offset, size_t size, bool is_mask)
+{
+ struct sw_flow_key_range *range = NULL;
+ size_t start = rounddown(offset, sizeof(long));
+ size_t end = roundup(offset + size, sizeof(long));
+
+ if (!is_mask)
+ range = &match->range;
+ else if (match->mask)
+ range = &match->mask->range;
+
+ if (!range)
+ return;
+
+ if (range->start == range->end) {
+ range->start = start;
+ range->end = end;
+ return;
+ }
+
+ if (range->start > start)
+ range->start = start;
+
+ if (range->end < end)
+ range->end = end;
+}
+
+#define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
+ do { \
+ update_range__(match, offsetof(struct sw_flow_key, field), \
+ sizeof((match)->key->field), is_mask); \
+ if (is_mask) { \
+ if ((match)->mask) \
+ (match)->mask->key.field = value; \
+ } else { \
+ (match)->key->field = value; \
+ } \
+ } while (0)
+
+#define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \
+ do { \
+ update_range__(match, offsetof(struct sw_flow_key, field), \
+ len, is_mask); \
+ if (is_mask) { \
+ if ((match)->mask) \
+ memcpy(&(match)->mask->key.field, value_p, len);\
+ } else { \
+ memcpy(&(match)->key->field, value_p, len); \
+ } \
+ } while (0)
+
+static u16 range_n_bytes(const struct sw_flow_key_range *range)
+{
+ return range->end - range->start;
+}
+
+static bool match_validate(const struct sw_flow_match *match,
+ u64 key_attrs, u64 mask_attrs)
+{
+ u64 key_expected = 1 << OVS_KEY_ATTR_ETHERNET;
+ u64 mask_allowed = key_attrs; /* At most allow all key attributes */
+
+ /* The following mask attributes allowed only if they
+ * pass the validation tests. */
+ mask_allowed &= ~((1 << OVS_KEY_ATTR_IPV4)
+ | (1 << OVS_KEY_ATTR_IPV6)
+ | (1 << OVS_KEY_ATTR_TCP)
+ | (1 << OVS_KEY_ATTR_TCP_FLAGS)
+ | (1 << OVS_KEY_ATTR_UDP)
+ | (1 << OVS_KEY_ATTR_SCTP)
+ | (1 << OVS_KEY_ATTR_ICMP)
+ | (1 << OVS_KEY_ATTR_ICMPV6)
+ | (1 << OVS_KEY_ATTR_ARP)
+ | (1 << OVS_KEY_ATTR_ND));
+
+ /* Always allowed mask fields. */
+ mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL)
+ | (1 << OVS_KEY_ATTR_IN_PORT)
+ | (1 << OVS_KEY_ATTR_ETHERTYPE));
+
+ /* Check key attributes. */
+ if (match->key->eth.type == htons(ETH_P_ARP)
+ || match->key->eth.type == htons(ETH_P_RARP)) {
+ key_expected |= 1 << OVS_KEY_ATTR_ARP;
+ if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
+ mask_allowed |= 1 << OVS_KEY_ATTR_ARP;
+ }
+
+ if (match->key->eth.type == htons(ETH_P_IP)) {
+ key_expected |= 1 << OVS_KEY_ATTR_IPV4;
+ if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
+ mask_allowed |= 1 << OVS_KEY_ATTR_IPV4;
+
+ if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
+ if (match->key->ip.proto == IPPROTO_UDP) {
+ key_expected |= 1 << OVS_KEY_ATTR_UDP;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
+ }
+
+ if (match->key->ip.proto == IPPROTO_SCTP) {
+ key_expected |= 1 << OVS_KEY_ATTR_SCTP;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
+ }
+
+ if (match->key->ip.proto == IPPROTO_TCP) {
+ key_expected |= 1 << OVS_KEY_ATTR_TCP;
+ key_expected |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
+ if (match->mask && (match->mask->key.ip.proto == 0xff)) {
+ mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
+ mask_allowed |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
+ }
+ }
+
+ if (match->key->ip.proto == IPPROTO_ICMP) {
+ key_expected |= 1 << OVS_KEY_ATTR_ICMP;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_ICMP;
+ }
+ }
+ }
+
+ if (match->key->eth.type == htons(ETH_P_IPV6)) {
+ key_expected |= 1 << OVS_KEY_ATTR_IPV6;
+ if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
+ mask_allowed |= 1 << OVS_KEY_ATTR_IPV6;
+
+ if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
+ if (match->key->ip.proto == IPPROTO_UDP) {
+ key_expected |= 1 << OVS_KEY_ATTR_UDP;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
+ }
+
+ if (match->key->ip.proto == IPPROTO_SCTP) {
+ key_expected |= 1 << OVS_KEY_ATTR_SCTP;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
+ }
+
+ if (match->key->ip.proto == IPPROTO_TCP) {
+ key_expected |= 1 << OVS_KEY_ATTR_TCP;
+ key_expected |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
+ if (match->mask && (match->mask->key.ip.proto == 0xff)) {
+ mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
+ mask_allowed |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
+ }
+ }
+
+ if (match->key->ip.proto == IPPROTO_ICMPV6) {
+ key_expected |= 1 << OVS_KEY_ATTR_ICMPV6;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6;
+
+ if (match->key->ipv6.tp.src ==
+ htons(NDISC_NEIGHBOUR_SOLICITATION) ||
+ match->key->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
+ key_expected |= 1 << OVS_KEY_ATTR_ND;
+ if (match->mask && (match->mask->key.ipv6.tp.src == htons(0xffff)))
+ mask_allowed |= 1 << OVS_KEY_ATTR_ND;
+ }
+ }
+ }
+ }
+
+ if ((key_attrs & key_expected) != key_expected) {
+ /* Key attributes check failed. */
+ OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n",
+ key_attrs, key_expected);
+ return false;
+ }
+
+ if ((mask_attrs & mask_allowed) != mask_attrs) {
+ /* Mask attributes check failed. */
+ OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n",
+ mask_attrs, mask_allowed);
+ return false;
+ }
+
+ return true;
+}
+
+/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
+static const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
+ [OVS_KEY_ATTR_ENCAP] = -1,
+ [OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
+ [OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
+ [OVS_KEY_ATTR_SKB_MARK] = sizeof(u32),
+ [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
+ [OVS_KEY_ATTR_VLAN] = sizeof(__be16),
+ [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16),
+ [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
+ [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
+ [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
+ [OVS_KEY_ATTR_TCP_FLAGS] = sizeof(__be16),
+ [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
+ [OVS_KEY_ATTR_SCTP] = sizeof(struct ovs_key_sctp),
+ [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
+ [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
+ [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
+ [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
+ [OVS_KEY_ATTR_TUNNEL] = -1,
+};
+
+static bool is_all_zero(const u8 *fp, size_t size)
+{
+ int i;
+
+ if (!fp)
+ return false;
+
+ for (i = 0; i < size; i++)
+ if (fp[i])
+ return false;
+
+ return true;
+}
+
+static int __parse_flow_nlattrs(const struct nlattr *attr,
+ const struct nlattr *a[],
+ u64 *attrsp, bool nz)
+{
+ const struct nlattr *nla;
+ u64 attrs;
+ int rem;
+
+ attrs = *attrsp;
+ nla_for_each_nested(nla, attr, rem) {
+ u16 type = nla_type(nla);
+ int expected_len;
+
+ if (type > OVS_KEY_ATTR_MAX) {
+ OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n",
+ type, OVS_KEY_ATTR_MAX);
+ return -EINVAL;
+ }
+
+ if (attrs & (1 << type)) {
+ OVS_NLERR("Duplicate key attribute (type %d).\n", type);
+ return -EINVAL;
+ }
+
+ expected_len = ovs_key_lens[type];
+ if (nla_len(nla) != expected_len && expected_len != -1) {
+ OVS_NLERR("Key attribute has unexpected length (type=%d"
+ ", length=%d, expected=%d).\n", type,
+ nla_len(nla), expected_len);
+ return -EINVAL;
+ }
+
+ if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
+ attrs |= 1 << type;
+ a[type] = nla;
+ }
+ }
+ if (rem) {
+ OVS_NLERR("Message has %d unknown bytes.\n", rem);
+ return -EINVAL;
+ }
+
+ *attrsp = attrs;
+ return 0;
+}
+
+static int parse_flow_mask_nlattrs(const struct nlattr *attr,
+ const struct nlattr *a[], u64 *attrsp)
+{
+ return __parse_flow_nlattrs(attr, a, attrsp, true);
+}
+
+static int parse_flow_nlattrs(const struct nlattr *attr,
+ const struct nlattr *a[], u64 *attrsp)
+{
+ return __parse_flow_nlattrs(attr, a, attrsp, false);
+}
+
+static int ipv4_tun_from_nlattr(const struct nlattr *attr,
+ struct sw_flow_match *match, bool is_mask)
+{
+ struct nlattr *a;
+ int rem;
+ bool ttl = false;
+ __be16 tun_flags = 0;
+
+ nla_for_each_nested(a, attr, rem) {
+ int type = nla_type(a);
+ static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
+ [OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64),
+ [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32),
+ [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = sizeof(u32),
+ [OVS_TUNNEL_KEY_ATTR_TOS] = 1,
+ [OVS_TUNNEL_KEY_ATTR_TTL] = 1,
+ [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0,
+ [OVS_TUNNEL_KEY_ATTR_CSUM] = 0,
+ };
+
+ if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
+ OVS_NLERR("Unknown IPv4 tunnel attribute (type=%d, max=%d).\n",
+ type, OVS_TUNNEL_KEY_ATTR_MAX);
+ return -EINVAL;
+ }
+
+ if (ovs_tunnel_key_lens[type] != nla_len(a)) {
+ OVS_NLERR("IPv4 tunnel attribute type has unexpected "
+ " length (type=%d, length=%d, expected=%d).\n",
+ type, nla_len(a), ovs_tunnel_key_lens[type]);
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case OVS_TUNNEL_KEY_ATTR_ID:
+ SW_FLOW_KEY_PUT(match, tun_key.tun_id,
+ nla_get_be64(a), is_mask);
+ tun_flags |= TUNNEL_KEY;
+ break;
+ case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
+ SW_FLOW_KEY_PUT(match, tun_key.ipv4_src,
+ nla_get_be32(a), is_mask);
+ break;
+ case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
+ SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst,
+ nla_get_be32(a), is_mask);
+ break;
+ case OVS_TUNNEL_KEY_ATTR_TOS:
+ SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos,
+ nla_get_u8(a), is_mask);
+ break;
+ case OVS_TUNNEL_KEY_ATTR_TTL:
+ SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl,
+ nla_get_u8(a), is_mask);
+ ttl = true;
+ break;
+ case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
+ tun_flags |= TUNNEL_DONT_FRAGMENT;
+ break;
+ case OVS_TUNNEL_KEY_ATTR_CSUM:
+ tun_flags |= TUNNEL_CSUM;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
+
+ if (rem > 0) {
+ OVS_NLERR("IPv4 tunnel attribute has %d unknown bytes.\n", rem);
+ return -EINVAL;
+ }
+
+ if (!is_mask) {
+ if (!match->key->tun_key.ipv4_dst) {
+ OVS_NLERR("IPv4 tunnel destination address is zero.\n");
+ return -EINVAL;
+ }
+
+ if (!ttl) {
+ OVS_NLERR("IPv4 tunnel TTL not specified.\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int ipv4_tun_to_nlattr(struct sk_buff *skb,
+ const struct ovs_key_ipv4_tunnel *tun_key,
+ const struct ovs_key_ipv4_tunnel *output)
+{
+ struct nlattr *nla;
+
+ nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL);
+ if (!nla)
+ return -EMSGSIZE;
+
+ if (output->tun_flags & TUNNEL_KEY &&
+ nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
+ return -EMSGSIZE;
+ if (output->ipv4_src &&
+ nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src))
+ return -EMSGSIZE;
+ if (output->ipv4_dst &&
+ nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst))
+ return -EMSGSIZE;
+ if (output->ipv4_tos &&
+ nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
+ return -EMSGSIZE;
+ if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl))
+ return -EMSGSIZE;
+ if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
+ nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
+ return -EMSGSIZE;
+ if ((output->tun_flags & TUNNEL_CSUM) &&
+ nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
+ return -EMSGSIZE;
+
+ nla_nest_end(skb, nla);
+ return 0;
+}
+
+
+static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
+ const struct nlattr **a, bool is_mask)
+{
+ if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
+ SW_FLOW_KEY_PUT(match, phy.priority,
+ nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask);
+ *attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
+ }
+
+ if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
+ u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
+
+ if (is_mask)
+ in_port = 0xffffffff; /* Always exact match in_port. */
+ else if (in_port >= DP_MAX_PORTS)
+ return -EINVAL;
+
+ SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask);
+ *attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
+ } else if (!is_mask) {
+ SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask);
+ }
+
+ if (*attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) {
+ uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
+
+ SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask);
+ *attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
+ }
+ if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
+ if (ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
+ is_mask))
+ return -EINVAL;
+ *attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
+ }
+ return 0;
+}
+
+static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
+ const struct nlattr **a, bool is_mask)
+{
+ int err;
+ u64 orig_attrs = attrs;
+
+ err = metadata_from_nlattrs(match, &attrs, a, is_mask);
+ if (err)
+ return err;
+
+ if (attrs & (1 << OVS_KEY_ATTR_ETHERNET)) {
+ const struct ovs_key_ethernet *eth_key;
+
+ eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
+ SW_FLOW_KEY_MEMCPY(match, eth.src,
+ eth_key->eth_src, ETH_ALEN, is_mask);
+ SW_FLOW_KEY_MEMCPY(match, eth.dst,
+ eth_key->eth_dst, ETH_ALEN, is_mask);
+ attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_VLAN)) {
+ __be16 tci;
+
+ tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+ if (!(tci & htons(VLAN_TAG_PRESENT))) {
+ if (is_mask)
+ OVS_NLERR("VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.\n");
+ else
+ OVS_NLERR("VLAN TCI does not have VLAN_TAG_PRESENT bit set.\n");
+
+ return -EINVAL;
+ }
+
+ SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask);
+ attrs &= ~(1 << OVS_KEY_ATTR_VLAN);
+ } else if (!is_mask)
+ SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true);
+
+ if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
+ __be16 eth_type;
+
+ eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
+ if (is_mask) {
+ /* Always exact match EtherType. */
+ eth_type = htons(0xffff);
+ } else if (ntohs(eth_type) < ETH_P_802_3_MIN) {
+ OVS_NLERR("EtherType is less than minimum (type=%x, min=%x).\n",
+ ntohs(eth_type), ETH_P_802_3_MIN);
+ return -EINVAL;
+ }
+
+ SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask);
+ attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
+ } else if (!is_mask) {
+ SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
+ const struct ovs_key_ipv4 *ipv4_key;
+
+ ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
+ if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) {
+ OVS_NLERR("Unknown IPv4 fragment type (value=%d, max=%d).\n",
+ ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX);
+ return -EINVAL;
+ }
+ SW_FLOW_KEY_PUT(match, ip.proto,
+ ipv4_key->ipv4_proto, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.tos,
+ ipv4_key->ipv4_tos, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.ttl,
+ ipv4_key->ipv4_ttl, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.frag,
+ ipv4_key->ipv4_frag, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv4.addr.src,
+ ipv4_key->ipv4_src, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
+ ipv4_key->ipv4_dst, is_mask);
+ attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_IPV6)) {
+ const struct ovs_key_ipv6 *ipv6_key;
+
+ ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
+ if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) {
+ OVS_NLERR("Unknown IPv6 fragment type (value=%d, max=%d).\n",
+ ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX);
+ return -EINVAL;
+ }
+ SW_FLOW_KEY_PUT(match, ipv6.label,
+ ipv6_key->ipv6_label, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.proto,
+ ipv6_key->ipv6_proto, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.tos,
+ ipv6_key->ipv6_tclass, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.ttl,
+ ipv6_key->ipv6_hlimit, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.frag,
+ ipv6_key->ipv6_frag, is_mask);
+ SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src,
+ ipv6_key->ipv6_src,
+ sizeof(match->key->ipv6.addr.src),
+ is_mask);
+ SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst,
+ ipv6_key->ipv6_dst,
+ sizeof(match->key->ipv6.addr.dst),
+ is_mask);
+
+ attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_ARP)) {
+ const struct ovs_key_arp *arp_key;
+
+ arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
+ if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
+ OVS_NLERR("Unknown ARP opcode (opcode=%d).\n",
+ arp_key->arp_op);
+ return -EINVAL;
+ }
+
+ SW_FLOW_KEY_PUT(match, ipv4.addr.src,
+ arp_key->arp_sip, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
+ arp_key->arp_tip, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.proto,
+ ntohs(arp_key->arp_op), is_mask);
+ SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha,
+ arp_key->arp_sha, ETH_ALEN, is_mask);
+ SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha,
+ arp_key->arp_tha, ETH_ALEN, is_mask);
+
+ attrs &= ~(1 << OVS_KEY_ATTR_ARP);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_TCP)) {
+ const struct ovs_key_tcp *tcp_key;
+
+ tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
+ if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
+ SW_FLOW_KEY_PUT(match, ipv4.tp.src,
+ tcp_key->tcp_src, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
+ tcp_key->tcp_dst, is_mask);
+ } else {
+ SW_FLOW_KEY_PUT(match, ipv6.tp.src,
+ tcp_key->tcp_src, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
+ tcp_key->tcp_dst, is_mask);
+ }
+ attrs &= ~(1 << OVS_KEY_ATTR_TCP);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) {
+ if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
+ SW_FLOW_KEY_PUT(match, ipv4.tp.flags,
+ nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
+ is_mask);
+ } else {
+ SW_FLOW_KEY_PUT(match, ipv6.tp.flags,
+ nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
+ is_mask);
+ }
+ attrs &= ~(1 << OVS_KEY_ATTR_TCP_FLAGS);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_UDP)) {
+ const struct ovs_key_udp *udp_key;
+
+ udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
+ if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
+ SW_FLOW_KEY_PUT(match, ipv4.tp.src,
+ udp_key->udp_src, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
+ udp_key->udp_dst, is_mask);
+ } else {
+ SW_FLOW_KEY_PUT(match, ipv6.tp.src,
+ udp_key->udp_src, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
+ udp_key->udp_dst, is_mask);
+ }
+ attrs &= ~(1 << OVS_KEY_ATTR_UDP);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_SCTP)) {
+ const struct ovs_key_sctp *sctp_key;
+
+ sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]);
+ if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
+ SW_FLOW_KEY_PUT(match, ipv4.tp.src,
+ sctp_key->sctp_src, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
+ sctp_key->sctp_dst, is_mask);
+ } else {
+ SW_FLOW_KEY_PUT(match, ipv6.tp.src,
+ sctp_key->sctp_src, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
+ sctp_key->sctp_dst, is_mask);
+ }
+ attrs &= ~(1 << OVS_KEY_ATTR_SCTP);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_ICMP)) {
+ const struct ovs_key_icmp *icmp_key;
+
+ icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
+ SW_FLOW_KEY_PUT(match, ipv4.tp.src,
+ htons(icmp_key->icmp_type), is_mask);
+ SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
+ htons(icmp_key->icmp_code), is_mask);
+ attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_ICMPV6)) {
+ const struct ovs_key_icmpv6 *icmpv6_key;
+
+ icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
+ SW_FLOW_KEY_PUT(match, ipv6.tp.src,
+ htons(icmpv6_key->icmpv6_type), is_mask);
+ SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
+ htons(icmpv6_key->icmpv6_code), is_mask);
+ attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_ND)) {
+ const struct ovs_key_nd *nd_key;
+
+ nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
+ SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target,
+ nd_key->nd_target,
+ sizeof(match->key->ipv6.nd.target),
+ is_mask);
+ SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll,
+ nd_key->nd_sll, ETH_ALEN, is_mask);
+ SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll,
+ nd_key->nd_tll, ETH_ALEN, is_mask);
+ attrs &= ~(1 << OVS_KEY_ATTR_ND);
+ }
+
+ if (attrs != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void sw_flow_mask_set(struct sw_flow_mask *mask,
+ struct sw_flow_key_range *range, u8 val)
+{
+ u8 *m = (u8 *)&mask->key + range->start;
+
+ mask->range = *range;
+ memset(m, val, range_n_bytes(range));
+}
+
+/**
+ * ovs_nla_get_match - parses Netlink attributes into a flow key and
+ * mask. In case the 'mask' is NULL, the flow is treated as exact match
+ * flow. Otherwise, it is treated as a wildcarded flow, except the mask
+ * does not include any don't care bit.
+ * @match: receives the extracted flow match information.
+ * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
+ * sequence. The fields should of the packet that triggered the creation
+ * of this flow.
+ * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink
+ * attribute specifies the mask field of the wildcarded flow.
+ */
+int ovs_nla_get_match(struct sw_flow_match *match,
+ const struct nlattr *key,
+ const struct nlattr *mask)
+{
+ const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
+ const struct nlattr *encap;
+ u64 key_attrs = 0;
+ u64 mask_attrs = 0;
+ bool encap_valid = false;
+ int err;
+
+ err = parse_flow_nlattrs(key, a, &key_attrs);
+ if (err)
+ return err;
+
+ if ((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) &&
+ (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) &&
+ (nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q))) {
+ __be16 tci;
+
+ if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) &&
+ (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) {
+ OVS_NLERR("Invalid Vlan frame.\n");
+ return -EINVAL;
+ }
+
+ key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
+ tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+ encap = a[OVS_KEY_ATTR_ENCAP];
+ key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
+ encap_valid = true;
+
+ if (tci & htons(VLAN_TAG_PRESENT)) {
+ err = parse_flow_nlattrs(encap, a, &key_attrs);
+ if (err)
+ return err;
+ } else if (!tci) {
+ /* Corner case for truncated 802.1Q header. */
+ if (nla_len(encap)) {
+ OVS_NLERR("Truncated 802.1Q header has non-zero encap attribute.\n");
+ return -EINVAL;
+ }
+ } else {
+ OVS_NLERR("Encap attribute is set for a non-VLAN frame.\n");
+ return -EINVAL;
+ }
+ }
+
+ err = ovs_key_from_nlattrs(match, key_attrs, a, false);
+ if (err)
+ return err;
+
+ if (mask) {
+ err = parse_flow_mask_nlattrs(mask, a, &mask_attrs);
+ if (err)
+ return err;
+
+ if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP) {
+ __be16 eth_type = 0;
+ __be16 tci = 0;
+
+ if (!encap_valid) {
+ OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n");
+ return -EINVAL;
+ }
+
+ mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
+ if (a[OVS_KEY_ATTR_ETHERTYPE])
+ eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
+
+ if (eth_type == htons(0xffff)) {
+ mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
+ encap = a[OVS_KEY_ATTR_ENCAP];
+ err = parse_flow_mask_nlattrs(encap, a, &mask_attrs);
+ } else {
+ OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n",
+ ntohs(eth_type));
+ return -EINVAL;
+ }
+
+ if (a[OVS_KEY_ATTR_VLAN])
+ tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+
+ if (!(tci & htons(VLAN_TAG_PRESENT))) {
+ OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci));
+ return -EINVAL;
+ }
+ }
+
+ err = ovs_key_from_nlattrs(match, mask_attrs, a, true);
+ if (err)
+ return err;
+ } else {
+ /* Populate exact match flow's key mask. */
+ if (match->mask)
+ sw_flow_mask_set(match->mask, &match->range, 0xff);
+ }
+
+ if (!match_validate(match, key_attrs, mask_attrs))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key.
+ * @flow: Receives extracted in_port, priority, tun_key and skb_mark.
+ * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
+ * sequence.
+ *
+ * This parses a series of Netlink attributes that form a flow key, which must
+ * take the same form accepted by flow_from_nlattrs(), but only enough of it to
+ * get the metadata, that is, the parts of the flow key that cannot be
+ * extracted from the packet itself.
+ */
+
+int ovs_nla_get_flow_metadata(struct sw_flow *flow,
+ const struct nlattr *attr)
+{
+ struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key;
+ const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
+ u64 attrs = 0;
+ int err;
+ struct sw_flow_match match;
+
+ flow->key.phy.in_port = DP_MAX_PORTS;
+ flow->key.phy.priority = 0;
+ flow->key.phy.skb_mark = 0;
+ memset(tun_key, 0, sizeof(flow->key.tun_key));
+
+ err = parse_flow_nlattrs(attr, a, &attrs);
+ if (err)
+ return -EINVAL;
+
+ memset(&match, 0, sizeof(match));
+ match.key = &flow->key;
+
+ err = metadata_from_nlattrs(&match, &attrs, a, false);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int ovs_nla_put_flow(const struct sw_flow_key *swkey,
+ const struct sw_flow_key *output, struct sk_buff *skb)
+{
+ struct ovs_key_ethernet *eth_key;
+ struct nlattr *nla, *encap;
+ bool is_mask = (swkey != output);
+
+ if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
+ goto nla_put_failure;
+
+ if ((swkey->tun_key.ipv4_dst || is_mask) &&
+ ipv4_tun_to_nlattr(skb, &swkey->tun_key, &output->tun_key))
+ goto nla_put_failure;
+
+ if (swkey->phy.in_port == DP_MAX_PORTS) {
+ if (is_mask && (output->phy.in_port == 0xffff))
+ if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff))
+ goto nla_put_failure;
+ } else {
+ u16 upper_u16;
+ upper_u16 = !is_mask ? 0 : 0xffff;
+
+ if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT,
+ (upper_u16 << 16) | output->phy.in_port))
+ goto nla_put_failure;
+ }
+
+ if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
+ goto nla_put_failure;
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
+ if (!nla)
+ goto nla_put_failure;
+
+ eth_key = nla_data(nla);
+ memcpy(eth_key->eth_src, output->eth.src, ETH_ALEN);
+ memcpy(eth_key->eth_dst, output->eth.dst, ETH_ALEN);
+
+ if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
+ __be16 eth_type;
+ eth_type = !is_mask ? htons(ETH_P_8021Q) : htons(0xffff);
+ if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
+ nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci))
+ goto nla_put_failure;
+ encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
+ if (!swkey->eth.tci)
+ goto unencap;
+ } else
+ encap = NULL;
+
+ if (swkey->eth.type == htons(ETH_P_802_2)) {
+ /*
+ * Ethertype 802.2 is represented in the netlink with omitted
+ * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and
+ * 0xffff in the mask attribute. Ethertype can also
+ * be wildcarded.
+ */
+ if (is_mask && output->eth.type)
+ if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE,
+ output->eth.type))
+ goto nla_put_failure;
+ goto unencap;
+ }
+
+ if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
+ goto nla_put_failure;
+
+ if (swkey->eth.type == htons(ETH_P_IP)) {
+ struct ovs_key_ipv4 *ipv4_key;
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
+ if (!nla)
+ goto nla_put_failure;
+ ipv4_key = nla_data(nla);
+ ipv4_key->ipv4_src = output->ipv4.addr.src;
+ ipv4_key->ipv4_dst = output->ipv4.addr.dst;
+ ipv4_key->ipv4_proto = output->ip.proto;
+ ipv4_key->ipv4_tos = output->ip.tos;
+ ipv4_key->ipv4_ttl = output->ip.ttl;
+ ipv4_key->ipv4_frag = output->ip.frag;
+ } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+ struct ovs_key_ipv6 *ipv6_key;
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
+ if (!nla)
+ goto nla_put_failure;
+ ipv6_key = nla_data(nla);
+ memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src,
+ sizeof(ipv6_key->ipv6_src));
+ memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst,
+ sizeof(ipv6_key->ipv6_dst));
+ ipv6_key->ipv6_label = output->ipv6.label;
+ ipv6_key->ipv6_proto = output->ip.proto;
+ ipv6_key->ipv6_tclass = output->ip.tos;
+ ipv6_key->ipv6_hlimit = output->ip.ttl;
+ ipv6_key->ipv6_frag = output->ip.frag;
+ } else if (swkey->eth.type == htons(ETH_P_ARP) ||
+ swkey->eth.type == htons(ETH_P_RARP)) {
+ struct ovs_key_arp *arp_key;
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
+ if (!nla)
+ goto nla_put_failure;
+ arp_key = nla_data(nla);
+ memset(arp_key, 0, sizeof(struct ovs_key_arp));
+ arp_key->arp_sip = output->ipv4.addr.src;
+ arp_key->arp_tip = output->ipv4.addr.dst;
+ arp_key->arp_op = htons(output->ip.proto);
+ memcpy(arp_key->arp_sha, output->ipv4.arp.sha, ETH_ALEN);
+ memcpy(arp_key->arp_tha, output->ipv4.arp.tha, ETH_ALEN);
+ }
+
+ if ((swkey->eth.type == htons(ETH_P_IP) ||
+ swkey->eth.type == htons(ETH_P_IPV6)) &&
+ swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
+
+ if (swkey->ip.proto == IPPROTO_TCP) {
+ struct ovs_key_tcp *tcp_key;
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
+ if (!nla)
+ goto nla_put_failure;
+ tcp_key = nla_data(nla);
+ if (swkey->eth.type == htons(ETH_P_IP)) {
+ tcp_key->tcp_src = output->ipv4.tp.src;
+ tcp_key->tcp_dst = output->ipv4.tp.dst;
+ if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
+ output->ipv4.tp.flags))
+ goto nla_put_failure;
+ } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+ tcp_key->tcp_src = output->ipv6.tp.src;
+ tcp_key->tcp_dst = output->ipv6.tp.dst;
+ if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
+ output->ipv6.tp.flags))
+ goto nla_put_failure;
+ }
+ } else if (swkey->ip.proto == IPPROTO_UDP) {
+ struct ovs_key_udp *udp_key;
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
+ if (!nla)
+ goto nla_put_failure;
+ udp_key = nla_data(nla);
+ if (swkey->eth.type == htons(ETH_P_IP)) {
+ udp_key->udp_src = output->ipv4.tp.src;
+ udp_key->udp_dst = output->ipv4.tp.dst;
+ } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+ udp_key->udp_src = output->ipv6.tp.src;
+ udp_key->udp_dst = output->ipv6.tp.dst;
+ }
+ } else if (swkey->ip.proto == IPPROTO_SCTP) {
+ struct ovs_key_sctp *sctp_key;
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key));
+ if (!nla)
+ goto nla_put_failure;
+ sctp_key = nla_data(nla);
+ if (swkey->eth.type == htons(ETH_P_IP)) {
+ sctp_key->sctp_src = swkey->ipv4.tp.src;
+ sctp_key->sctp_dst = swkey->ipv4.tp.dst;
+ } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+ sctp_key->sctp_src = swkey->ipv6.tp.src;
+ sctp_key->sctp_dst = swkey->ipv6.tp.dst;
+ }
+ } else if (swkey->eth.type == htons(ETH_P_IP) &&
+ swkey->ip.proto == IPPROTO_ICMP) {
+ struct ovs_key_icmp *icmp_key;
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
+ if (!nla)
+ goto nla_put_failure;
+ icmp_key = nla_data(nla);
+ icmp_key->icmp_type = ntohs(output->ipv4.tp.src);
+ icmp_key->icmp_code = ntohs(output->ipv4.tp.dst);
+ } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
+ swkey->ip.proto == IPPROTO_ICMPV6) {
+ struct ovs_key_icmpv6 *icmpv6_key;
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
+ sizeof(*icmpv6_key));
+ if (!nla)
+ goto nla_put_failure;
+ icmpv6_key = nla_data(nla);
+ icmpv6_key->icmpv6_type = ntohs(output->ipv6.tp.src);
+ icmpv6_key->icmpv6_code = ntohs(output->ipv6.tp.dst);
+
+ if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
+ icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
+ struct ovs_key_nd *nd_key;
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
+ if (!nla)
+ goto nla_put_failure;
+ nd_key = nla_data(nla);
+ memcpy(nd_key->nd_target, &output->ipv6.nd.target,
+ sizeof(nd_key->nd_target));
+ memcpy(nd_key->nd_sll, output->ipv6.nd.sll, ETH_ALEN);
+ memcpy(nd_key->nd_tll, output->ipv6.nd.tll, ETH_ALEN);
+ }
+ }
+ }
+
+unencap:
+ if (encap)
+ nla_nest_end(skb, encap);
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+#define MAX_ACTIONS_BUFSIZE (32 * 1024)
+
+struct sw_flow_actions *ovs_nla_alloc_flow_actions(int size)
+{
+ struct sw_flow_actions *sfa;
+
+ if (size > MAX_ACTIONS_BUFSIZE)
+ return ERR_PTR(-EINVAL);
+
+ sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
+ if (!sfa)
+ return ERR_PTR(-ENOMEM);
+
+ sfa->actions_len = 0;
+ return sfa;
+}
+
+/* RCU callback used by ovs_nla_free_flow_actions. */
+static void rcu_free_acts_callback(struct rcu_head *rcu)
+{
+ struct sw_flow_actions *sf_acts = container_of(rcu,
+ struct sw_flow_actions, rcu);
+ kfree(sf_acts);
+}
+
+/* Schedules 'sf_acts' to be freed after the next RCU grace period.
+ * The caller must hold rcu_read_lock for this to be sensible. */
+void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
+{
+ call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
+}
+
+static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
+ int attr_len)
+{
+
+ struct sw_flow_actions *acts;
+ int new_acts_size;
+ int req_size = NLA_ALIGN(attr_len);
+ int next_offset = offsetof(struct sw_flow_actions, actions) +
+ (*sfa)->actions_len;
+
+ if (req_size <= (ksize(*sfa) - next_offset))
+ goto out;
+
+ new_acts_size = ksize(*sfa) * 2;
+
+ if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
+ if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
+ return ERR_PTR(-EMSGSIZE);
+ new_acts_size = MAX_ACTIONS_BUFSIZE;
+ }
+
+ acts = ovs_nla_alloc_flow_actions(new_acts_size);
+ if (IS_ERR(acts))
+ return (void *)acts;
+
+ memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
+ acts->actions_len = (*sfa)->actions_len;
+ kfree(*sfa);
+ *sfa = acts;
+
+out:
+ (*sfa)->actions_len += req_size;
+ return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
+}
+
+static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, int len)
+{
+ struct nlattr *a;
+
+ a = reserve_sfa_size(sfa, nla_attr_size(len));
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
+ a->nla_type = attrtype;
+ a->nla_len = nla_attr_size(len);
+
+ if (data)
+ memcpy(nla_data(a), data, len);
+ memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
+
+ return 0;
+}
+
+static inline int add_nested_action_start(struct sw_flow_actions **sfa,
+ int attrtype)
+{
+ int used = (*sfa)->actions_len;
+ int err;
+
+ err = add_action(sfa, attrtype, NULL, 0);
+ if (err)
+ return err;
+
+ return used;
+}
+
+static inline void add_nested_action_end(struct sw_flow_actions *sfa,
+ int st_offset)
+{
+ struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions +
+ st_offset);
+
+ a->nla_len = sfa->actions_len - st_offset;
+}
+
+static int validate_and_copy_sample(const struct nlattr *attr,
+ const struct sw_flow_key *key, int depth,
+ struct sw_flow_actions **sfa)
+{
+ const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
+ const struct nlattr *probability, *actions;
+ const struct nlattr *a;
+ int rem, start, err, st_acts;
+
+ memset(attrs, 0, sizeof(attrs));
+ nla_for_each_nested(a, attr, rem) {
+ int type = nla_type(a);
+ if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
+ return -EINVAL;
+ attrs[type] = a;
+ }
+ if (rem)
+ return -EINVAL;
+
+ probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
+ if (!probability || nla_len(probability) != sizeof(u32))
+ return -EINVAL;
+
+ actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
+ if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
+ return -EINVAL;
+
+ /* validation done, copy sample action. */
+ start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE);
+ if (start < 0)
+ return start;
+ err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY,
+ nla_data(probability), sizeof(u32));
+ if (err)
+ return err;
+ st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS);
+ if (st_acts < 0)
+ return st_acts;
+
+ err = ovs_nla_copy_actions(actions, key, depth + 1, sfa);
+ if (err)
+ return err;
+
+ add_nested_action_end(*sfa, st_acts);
+ add_nested_action_end(*sfa, start);
+
+ return 0;
+}
+
+static int validate_tp_port(const struct sw_flow_key *flow_key)
+{
+ if (flow_key->eth.type == htons(ETH_P_IP)) {
+ if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst)
+ return 0;
+ } else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
+ if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+void ovs_match_init(struct sw_flow_match *match,
+ struct sw_flow_key *key,
+ struct sw_flow_mask *mask)
+{
+ memset(match, 0, sizeof(*match));
+ match->key = key;
+ match->mask = mask;
+
+ memset(key, 0, sizeof(*key));
+
+ if (mask) {
+ memset(&mask->key, 0, sizeof(mask->key));
+ mask->range.start = mask->range.end = 0;
+ }
+}
+
+static int validate_and_copy_set_tun(const struct nlattr *attr,
+ struct sw_flow_actions **sfa)
+{
+ struct sw_flow_match match;
+ struct sw_flow_key key;
+ int err, start;
+
+ ovs_match_init(&match, &key, NULL);
+ err = ipv4_tun_from_nlattr(nla_data(attr), &match, false);
+ if (err)
+ return err;
+
+ start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET);
+ if (start < 0)
+ return start;
+
+ err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &match.key->tun_key,
+ sizeof(match.key->tun_key));
+ add_nested_action_end(*sfa, start);
+
+ return err;
+}
+
+static int validate_set(const struct nlattr *a,
+ const struct sw_flow_key *flow_key,
+ struct sw_flow_actions **sfa,
+ bool *set_tun)
+{
+ const struct nlattr *ovs_key = nla_data(a);
+ int key_type = nla_type(ovs_key);
+
+ /* There can be only one key in a action */
+ if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
+ return -EINVAL;
+
+ if (key_type > OVS_KEY_ATTR_MAX ||
+ (ovs_key_lens[key_type] != nla_len(ovs_key) &&
+ ovs_key_lens[key_type] != -1))
+ return -EINVAL;
+
+ switch (key_type) {
+ const struct ovs_key_ipv4 *ipv4_key;
+ const struct ovs_key_ipv6 *ipv6_key;
+ int err;
+
+ case OVS_KEY_ATTR_PRIORITY:
+ case OVS_KEY_ATTR_SKB_MARK:
+ case OVS_KEY_ATTR_ETHERNET:
+ break;
+
+ case OVS_KEY_ATTR_TUNNEL:
+ *set_tun = true;
+ err = validate_and_copy_set_tun(a, sfa);
+ if (err)
+ return err;
+ break;
+
+ case OVS_KEY_ATTR_IPV4:
+ if (flow_key->eth.type != htons(ETH_P_IP))
+ return -EINVAL;
+
+ if (!flow_key->ip.proto)
+ return -EINVAL;
+
+ ipv4_key = nla_data(ovs_key);
+ if (ipv4_key->ipv4_proto != flow_key->ip.proto)
+ return -EINVAL;
+
+ if (ipv4_key->ipv4_frag != flow_key->ip.frag)
+ return -EINVAL;
+
+ break;
+
+ case OVS_KEY_ATTR_IPV6:
+ if (flow_key->eth.type != htons(ETH_P_IPV6))
+ return -EINVAL;
+
+ if (!flow_key->ip.proto)
+ return -EINVAL;
+
+ ipv6_key = nla_data(ovs_key);
+ if (ipv6_key->ipv6_proto != flow_key->ip.proto)
+ return -EINVAL;
+
+ if (ipv6_key->ipv6_frag != flow_key->ip.frag)
+ return -EINVAL;
+
+ if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
+ return -EINVAL;
+
+ break;
+
+ case OVS_KEY_ATTR_TCP:
+ if (flow_key->ip.proto != IPPROTO_TCP)
+ return -EINVAL;
+
+ return validate_tp_port(flow_key);
+
+ case OVS_KEY_ATTR_UDP:
+ if (flow_key->ip.proto != IPPROTO_UDP)
+ return -EINVAL;
+
+ return validate_tp_port(flow_key);
+
+ case OVS_KEY_ATTR_SCTP:
+ if (flow_key->ip.proto != IPPROTO_SCTP)
+ return -EINVAL;
+
+ return validate_tp_port(flow_key);
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int validate_userspace(const struct nlattr *attr)
+{
+ static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
+ [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
+ [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
+ };
+ struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
+ int error;
+
+ error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
+ attr, userspace_policy);
+ if (error)
+ return error;
+
+ if (!a[OVS_USERSPACE_ATTR_PID] ||
+ !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int copy_action(const struct nlattr *from,
+ struct sw_flow_actions **sfa)
+{
+ int totlen = NLA_ALIGN(from->nla_len);
+ struct nlattr *to;
+
+ to = reserve_sfa_size(sfa, from->nla_len);
+ if (IS_ERR(to))
+ return PTR_ERR(to);
+
+ memcpy(to, from, totlen);
+ return 0;
+}
+
+int ovs_nla_copy_actions(const struct nlattr *attr,
+ const struct sw_flow_key *key,
+ int depth,
+ struct sw_flow_actions **sfa)
+{
+ const struct nlattr *a;
+ int rem, err;
+
+ if (depth >= SAMPLE_ACTION_DEPTH)
+ return -EOVERFLOW;
+
+ nla_for_each_nested(a, attr, rem) {
+ /* Expected argument lengths, (u32)-1 for variable length. */
+ static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
+ [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
+ [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
+ [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
+ [OVS_ACTION_ATTR_POP_VLAN] = 0,
+ [OVS_ACTION_ATTR_SET] = (u32)-1,
+ [OVS_ACTION_ATTR_SAMPLE] = (u32)-1
+ };
+ const struct ovs_action_push_vlan *vlan;
+ int type = nla_type(a);
+ bool skip_copy;
+
+ if (type > OVS_ACTION_ATTR_MAX ||
+ (action_lens[type] != nla_len(a) &&
+ action_lens[type] != (u32)-1))
+ return -EINVAL;
+
+ skip_copy = false;
+ switch (type) {
+ case OVS_ACTION_ATTR_UNSPEC:
+ return -EINVAL;
+
+ case OVS_ACTION_ATTR_USERSPACE:
+ err = validate_userspace(a);
+ if (err)
+ return err;
+ break;
+
+ case OVS_ACTION_ATTR_OUTPUT:
+ if (nla_get_u32(a) >= DP_MAX_PORTS)
+ return -EINVAL;
+ break;
+
+
+ case OVS_ACTION_ATTR_POP_VLAN:
+ break;
+
+ case OVS_ACTION_ATTR_PUSH_VLAN:
+ vlan = nla_data(a);
+ if (vlan->vlan_tpid != htons(ETH_P_8021Q))
+ return -EINVAL;
+ if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
+ return -EINVAL;
+ break;
+
+ case OVS_ACTION_ATTR_SET:
+ err = validate_set(a, key, sfa, &skip_copy);
+ if (err)
+ return err;
+ break;
+
+ case OVS_ACTION_ATTR_SAMPLE:
+ err = validate_and_copy_sample(a, key, depth, sfa);
+ if (err)
+ return err;
+ skip_copy = true;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ if (!skip_copy) {
+ err = copy_action(a, sfa);
+ if (err)
+ return err;
+ }
+ }
+
+ if (rem > 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
+{
+ const struct nlattr *a;
+ struct nlattr *start;
+ int err = 0, rem;
+
+ start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE);
+ if (!start)
+ return -EMSGSIZE;
+
+ nla_for_each_nested(a, attr, rem) {
+ int type = nla_type(a);
+ struct nlattr *st_sample;
+
+ switch (type) {
+ case OVS_SAMPLE_ATTR_PROBABILITY:
+ if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY,
+ sizeof(u32), nla_data(a)))
+ return -EMSGSIZE;
+ break;
+ case OVS_SAMPLE_ATTR_ACTIONS:
+ st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
+ if (!st_sample)
+ return -EMSGSIZE;
+ err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
+ if (err)
+ return err;
+ nla_nest_end(skb, st_sample);
+ break;
+ }
+ }
+
+ nla_nest_end(skb, start);
+ return err;
+}
+
+static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
+{
+ const struct nlattr *ovs_key = nla_data(a);
+ int key_type = nla_type(ovs_key);
+ struct nlattr *start;
+ int err;
+
+ switch (key_type) {
+ case OVS_KEY_ATTR_IPV4_TUNNEL:
+ start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
+ if (!start)
+ return -EMSGSIZE;
+
+ err = ipv4_tun_to_nlattr(skb, nla_data(ovs_key),
+ nla_data(ovs_key));
+ if (err)
+ return err;
+ nla_nest_end(skb, start);
+ break;
+ default:
+ if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
+ return -EMSGSIZE;
+ break;
+ }
+
+ return 0;
+}
+
+int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb)
+{
+ const struct nlattr *a;
+ int rem, err;
+
+ nla_for_each_attr(a, attr, len, rem) {
+ int type = nla_type(a);
+
+ switch (type) {
+ case OVS_ACTION_ATTR_SET:
+ err = set_action_to_attr(a, skb);
+ if (err)
+ return err;
+ break;
+
+ case OVS_ACTION_ATTR_SAMPLE:
+ err = sample_action_to_attr(a, skb);
+ if (err)
+ return err;
+ break;
+ default:
+ if (nla_put(skb, type, nla_len(a), nla_data(a)))
+ return -EMSGSIZE;
+ break;
+ }
+ }
+
+ return 0;
+}
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
new file mode 100644
index 000000000000..440151045d39
--- /dev/null
+++ b/net/openvswitch/flow_netlink.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2007-2013 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+
+#ifndef FLOW_NETLINK_H
+#define FLOW_NETLINK_H 1
+
+#include <linux/kernel.h>
+#include <linux/netlink.h>
+#include <linux/openvswitch.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+#include <linux/if_ether.h>
+#include <linux/in6.h>
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <linux/flex_array.h>
+
+#include <net/inet_ecn.h>
+#include <net/ip_tunnels.h>
+
+#include "flow.h"
+
+void ovs_match_init(struct sw_flow_match *match,
+ struct sw_flow_key *key, struct sw_flow_mask *mask);
+
+int ovs_nla_put_flow(const struct sw_flow_key *,
+ const struct sw_flow_key *, struct sk_buff *);
+int ovs_nla_get_flow_metadata(struct sw_flow *flow,
+ const struct nlattr *attr);
+int ovs_nla_get_match(struct sw_flow_match *match,
+ const struct nlattr *,
+ const struct nlattr *);
+
+int ovs_nla_copy_actions(const struct nlattr *attr,
+ const struct sw_flow_key *key, int depth,
+ struct sw_flow_actions **sfa);
+int ovs_nla_put_actions(const struct nlattr *attr,
+ int len, struct sk_buff *skb);
+
+struct sw_flow_actions *ovs_nla_alloc_flow_actions(int actions_len);
+void ovs_nla_free_flow_actions(struct sw_flow_actions *);
+
+#endif /* flow_netlink.h */
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
new file mode 100644
index 000000000000..e42542706087
--- /dev/null
+++ b/net/openvswitch/flow_table.c
@@ -0,0 +1,592 @@
+/*
+ * Copyright (c) 2007-2013 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include "flow.h"
+#include "datapath.h"
+#include <linux/uaccess.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/llc_pdu.h>
+#include <linux/kernel.h>
+#include <linux/jhash.h>
+#include <linux/jiffies.h>
+#include <linux/llc.h>
+#include <linux/module.h>
+#include <linux/in.h>
+#include <linux/rcupdate.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/sctp.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/rculist.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/ndisc.h>
+
+#include "datapath.h"
+
+#define TBL_MIN_BUCKETS 1024
+#define REHASH_INTERVAL (10 * 60 * HZ)
+
+static struct kmem_cache *flow_cache;
+
+static u16 range_n_bytes(const struct sw_flow_key_range *range)
+{
+ return range->end - range->start;
+}
+
+void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
+ const struct sw_flow_mask *mask)
+{
+ const long *m = (long *)((u8 *)&mask->key + mask->range.start);
+ const long *s = (long *)((u8 *)src + mask->range.start);
+ long *d = (long *)((u8 *)dst + mask->range.start);
+ int i;
+
+ /* The memory outside of the 'mask->range' are not set since
+ * further operations on 'dst' only uses contents within
+ * 'mask->range'.
+ */
+ for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
+ *d++ = *s++ & *m++;
+}
+
+struct sw_flow *ovs_flow_alloc(void)
+{
+ struct sw_flow *flow;
+
+ flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
+ if (!flow)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&flow->lock);
+ flow->sf_acts = NULL;
+ flow->mask = NULL;
+
+ return flow;
+}
+
+int ovs_flow_tbl_count(struct flow_table *table)
+{
+ return table->count;
+}
+
+static struct flex_array *alloc_buckets(unsigned int n_buckets)
+{
+ struct flex_array *buckets;
+ int i, err;
+
+ buckets = flex_array_alloc(sizeof(struct hlist_head),
+ n_buckets, GFP_KERNEL);
+ if (!buckets)
+ return NULL;
+
+ err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
+ if (err) {
+ flex_array_free(buckets);
+ return NULL;
+ }
+
+ for (i = 0; i < n_buckets; i++)
+ INIT_HLIST_HEAD((struct hlist_head *)
+ flex_array_get(buckets, i));
+
+ return buckets;
+}
+
+static void flow_free(struct sw_flow *flow)
+{
+ kfree((struct sf_flow_acts __force *)flow->sf_acts);
+ kmem_cache_free(flow_cache, flow);
+}
+
+static void rcu_free_flow_callback(struct rcu_head *rcu)
+{
+ struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
+
+ flow_free(flow);
+}
+
+static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu)
+{
+ struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu);
+
+ kfree(mask);
+}
+
+static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
+{
+ if (!mask)
+ return;
+
+ BUG_ON(!mask->ref_count);
+ mask->ref_count--;
+
+ if (!mask->ref_count) {
+ list_del_rcu(&mask->list);
+ if (deferred)
+ call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb);
+ else
+ kfree(mask);
+ }
+}
+
+void ovs_flow_free(struct sw_flow *flow, bool deferred)
+{
+ if (!flow)
+ return;
+
+ flow_mask_del_ref(flow->mask, deferred);
+
+ if (deferred)
+ call_rcu(&flow->rcu, rcu_free_flow_callback);
+ else
+ flow_free(flow);
+}
+
+static void free_buckets(struct flex_array *buckets)
+{
+ flex_array_free(buckets);
+}
+
+static void __table_instance_destroy(struct table_instance *ti)
+{
+ int i;
+
+ if (ti->keep_flows)
+ goto skip_flows;
+
+ for (i = 0; i < ti->n_buckets; i++) {
+ struct sw_flow *flow;
+ struct hlist_head *head = flex_array_get(ti->buckets, i);
+ struct hlist_node *n;
+ int ver = ti->node_ver;
+
+ hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
+ hlist_del(&flow->hash_node[ver]);
+ ovs_flow_free(flow, false);
+ }
+ }
+
+skip_flows:
+ free_buckets(ti->buckets);
+ kfree(ti);
+}
+
+static struct table_instance *table_instance_alloc(int new_size)
+{
+ struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
+
+ if (!ti)
+ return NULL;
+
+ ti->buckets = alloc_buckets(new_size);
+
+ if (!ti->buckets) {
+ kfree(ti);
+ return NULL;
+ }
+ ti->n_buckets = new_size;
+ ti->node_ver = 0;
+ ti->keep_flows = false;
+ get_random_bytes(&ti->hash_seed, sizeof(u32));
+
+ return ti;
+}
+
+int ovs_flow_tbl_init(struct flow_table *table)
+{
+ struct table_instance *ti;
+
+ ti = table_instance_alloc(TBL_MIN_BUCKETS);
+
+ if (!ti)
+ return -ENOMEM;
+
+ rcu_assign_pointer(table->ti, ti);
+ INIT_LIST_HEAD(&table->mask_list);
+ table->last_rehash = jiffies;
+ table->count = 0;
+ return 0;
+}
+
+static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
+{
+ struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
+
+ __table_instance_destroy(ti);
+}
+
+static void table_instance_destroy(struct table_instance *ti, bool deferred)
+{
+ if (!ti)
+ return;
+
+ if (deferred)
+ call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
+ else
+ __table_instance_destroy(ti);
+}
+
+void ovs_flow_tbl_destroy(struct flow_table *table)
+{
+ struct table_instance *ti = ovsl_dereference(table->ti);
+
+ table_instance_destroy(ti, false);
+}
+
+struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
+ u32 *bucket, u32 *last)
+{
+ struct sw_flow *flow;
+ struct hlist_head *head;
+ int ver;
+ int i;
+
+ ver = ti->node_ver;
+ while (*bucket < ti->n_buckets) {
+ i = 0;
+ head = flex_array_get(ti->buckets, *bucket);
+ hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
+ if (i < *last) {
+ i++;
+ continue;
+ }
+ *last = i + 1;
+ return flow;
+ }
+ (*bucket)++;
+ *last = 0;
+ }
+
+ return NULL;
+}
+
+static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
+{
+ hash = jhash_1word(hash, ti->hash_seed);
+ return flex_array_get(ti->buckets,
+ (hash & (ti->n_buckets - 1)));
+}
+
+static void table_instance_insert(struct table_instance *ti, struct sw_flow *flow)
+{
+ struct hlist_head *head;
+
+ head = find_bucket(ti, flow->hash);
+ hlist_add_head_rcu(&flow->hash_node[ti->node_ver], head);
+}
+
+static void flow_table_copy_flows(struct table_instance *old,
+ struct table_instance *new)
+{
+ int old_ver;
+ int i;
+
+ old_ver = old->node_ver;
+ new->node_ver = !old_ver;
+
+ /* Insert in new table. */
+ for (i = 0; i < old->n_buckets; i++) {
+ struct sw_flow *flow;
+ struct hlist_head *head;
+
+ head = flex_array_get(old->buckets, i);
+
+ hlist_for_each_entry(flow, head, hash_node[old_ver])
+ table_instance_insert(new, flow);
+ }
+
+ old->keep_flows = true;
+}
+
+static struct table_instance *table_instance_rehash(struct table_instance *ti,
+ int n_buckets)
+{
+ struct table_instance *new_ti;
+
+ new_ti = table_instance_alloc(n_buckets);
+ if (!new_ti)
+ return NULL;
+
+ flow_table_copy_flows(ti, new_ti);
+
+ return new_ti;
+}
+
+int ovs_flow_tbl_flush(struct flow_table *flow_table)
+{
+ struct table_instance *old_ti;
+ struct table_instance *new_ti;
+
+ old_ti = ovsl_dereference(flow_table->ti);
+ new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
+ if (!new_ti)
+ return -ENOMEM;
+
+ rcu_assign_pointer(flow_table->ti, new_ti);
+ flow_table->last_rehash = jiffies;
+ flow_table->count = 0;
+
+ table_instance_destroy(old_ti, true);
+ return 0;
+}
+
+static u32 flow_hash(const struct sw_flow_key *key, int key_start,
+ int key_end)
+{
+ u32 *hash_key = (u32 *)((u8 *)key + key_start);
+ int hash_u32s = (key_end - key_start) >> 2;
+
+ /* Make sure number of hash bytes are multiple of u32. */
+ BUILD_BUG_ON(sizeof(long) % sizeof(u32));
+
+ return jhash2(hash_key, hash_u32s, 0);
+}
+
+static int flow_key_start(const struct sw_flow_key *key)
+{
+ if (key->tun_key.ipv4_dst)
+ return 0;
+ else
+ return rounddown(offsetof(struct sw_flow_key, phy),
+ sizeof(long));
+}
+
+static bool cmp_key(const struct sw_flow_key *key1,
+ const struct sw_flow_key *key2,
+ int key_start, int key_end)
+{
+ const long *cp1 = (long *)((u8 *)key1 + key_start);
+ const long *cp2 = (long *)((u8 *)key2 + key_start);
+ long diffs = 0;
+ int i;
+
+ for (i = key_start; i < key_end; i += sizeof(long))
+ diffs |= *cp1++ ^ *cp2++;
+
+ return diffs == 0;
+}
+
+static bool flow_cmp_masked_key(const struct sw_flow *flow,
+ const struct sw_flow_key *key,
+ int key_start, int key_end)
+{
+ return cmp_key(&flow->key, key, key_start, key_end);
+}
+
+bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
+ struct sw_flow_match *match)
+{
+ struct sw_flow_key *key = match->key;
+ int key_start = flow_key_start(key);
+ int key_end = match->range.end;
+
+ return cmp_key(&flow->unmasked_key, key, key_start, key_end);
+}
+
+static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
+ const struct sw_flow_key *unmasked,
+ struct sw_flow_mask *mask)
+{
+ struct sw_flow *flow;
+ struct hlist_head *head;
+ int key_start = mask->range.start;
+ int key_end = mask->range.end;
+ u32 hash;
+ struct sw_flow_key masked_key;
+
+ ovs_flow_mask_key(&masked_key, unmasked, mask);
+ hash = flow_hash(&masked_key, key_start, key_end);
+ head = find_bucket(ti, hash);
+ hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) {
+ if (flow->mask == mask && flow->hash == hash &&
+ flow_cmp_masked_key(flow, &masked_key,
+ key_start, key_end))
+ return flow;
+ }
+ return NULL;
+}
+
+struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
+ const struct sw_flow_key *key,
+ u32 *n_mask_hit)
+{
+ struct table_instance *ti = rcu_dereference(tbl->ti);
+ struct sw_flow_mask *mask;
+ struct sw_flow *flow;
+
+ *n_mask_hit = 0;
+ list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
+ (*n_mask_hit)++;
+ flow = masked_flow_lookup(ti, key, mask);
+ if (flow) /* Found */
+ return flow;
+ }
+ return NULL;
+}
+
+int ovs_flow_tbl_num_masks(const struct flow_table *table)
+{
+ struct sw_flow_mask *mask;
+ int num = 0;
+
+ list_for_each_entry(mask, &table->mask_list, list)
+ num++;
+
+ return num;
+}
+
+static struct table_instance *table_instance_expand(struct table_instance *ti)
+{
+ return table_instance_rehash(ti, ti->n_buckets * 2);
+}
+
+void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
+{
+ struct table_instance *ti = ovsl_dereference(table->ti);
+
+ BUG_ON(table->count == 0);
+ hlist_del_rcu(&flow->hash_node[ti->node_ver]);
+ table->count--;
+}
+
+static struct sw_flow_mask *mask_alloc(void)
+{
+ struct sw_flow_mask *mask;
+
+ mask = kmalloc(sizeof(*mask), GFP_KERNEL);
+ if (mask)
+ mask->ref_count = 0;
+
+ return mask;
+}
+
+static void mask_add_ref(struct sw_flow_mask *mask)
+{
+ mask->ref_count++;
+}
+
+static bool mask_equal(const struct sw_flow_mask *a,
+ const struct sw_flow_mask *b)
+{
+ u8 *a_ = (u8 *)&a->key + a->range.start;
+ u8 *b_ = (u8 *)&b->key + b->range.start;
+
+ return (a->range.end == b->range.end)
+ && (a->range.start == b->range.start)
+ && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
+}
+
+static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
+ const struct sw_flow_mask *mask)
+{
+ struct list_head *ml;
+
+ list_for_each(ml, &tbl->mask_list) {
+ struct sw_flow_mask *m;
+ m = container_of(ml, struct sw_flow_mask, list);
+ if (mask_equal(mask, m))
+ return m;
+ }
+
+ return NULL;
+}
+
+/**
+ * add a new mask into the mask list.
+ * The caller needs to make sure that 'mask' is not the same
+ * as any masks that are already on the list.
+ */
+static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
+ struct sw_flow_mask *new)
+{
+ struct sw_flow_mask *mask;
+ mask = flow_mask_find(tbl, new);
+ if (!mask) {
+ /* Allocate a new mask if none exsits. */
+ mask = mask_alloc();
+ if (!mask)
+ return -ENOMEM;
+ mask->key = new->key;
+ mask->range = new->range;
+ list_add_rcu(&mask->list, &tbl->mask_list);
+ }
+
+ mask_add_ref(mask);
+ flow->mask = mask;
+ return 0;
+}
+
+int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
+ struct sw_flow_mask *mask)
+{
+ struct table_instance *new_ti = NULL;
+ struct table_instance *ti;
+ int err;
+
+ err = flow_mask_insert(table, flow, mask);
+ if (err)
+ return err;
+
+ flow->hash = flow_hash(&flow->key, flow->mask->range.start,
+ flow->mask->range.end);
+ ti = ovsl_dereference(table->ti);
+ table_instance_insert(ti, flow);
+ table->count++;
+
+ /* Expand table, if necessary, to make room. */
+ if (table->count > ti->n_buckets)
+ new_ti = table_instance_expand(ti);
+ else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
+ new_ti = table_instance_rehash(ti, ti->n_buckets);
+
+ if (new_ti) {
+ rcu_assign_pointer(table->ti, new_ti);
+ table_instance_destroy(ti, true);
+ table->last_rehash = jiffies;
+ }
+ return 0;
+}
+
+/* Initializes the flow module.
+ * Returns zero if successful or a negative error code. */
+int ovs_flow_init(void)
+{
+ BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
+ BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
+
+ flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
+ 0, NULL);
+ if (flow_cache == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/* Uninitializes the flow module. */
+void ovs_flow_exit(void)
+{
+ kmem_cache_destroy(flow_cache);
+}
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
new file mode 100644
index 000000000000..fbe45d5ad07d
--- /dev/null
+++ b/net/openvswitch/flow_table.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2007-2013 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef FLOW_TABLE_H
+#define FLOW_TABLE_H 1
+
+#include <linux/kernel.h>
+#include <linux/netlink.h>
+#include <linux/openvswitch.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+#include <linux/if_ether.h>
+#include <linux/in6.h>
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <linux/flex_array.h>
+
+#include <net/inet_ecn.h>
+#include <net/ip_tunnels.h>
+
+#include "flow.h"
+
+struct table_instance {
+ struct flex_array *buckets;
+ unsigned int n_buckets;
+ struct rcu_head rcu;
+ int node_ver;
+ u32 hash_seed;
+ bool keep_flows;
+};
+
+struct flow_table {
+ struct table_instance __rcu *ti;
+ struct list_head mask_list;
+ unsigned long last_rehash;
+ unsigned int count;
+};
+
+int ovs_flow_init(void);
+void ovs_flow_exit(void);
+
+struct sw_flow *ovs_flow_alloc(void);
+void ovs_flow_free(struct sw_flow *, bool deferred);
+
+int ovs_flow_tbl_init(struct flow_table *);
+int ovs_flow_tbl_count(struct flow_table *table);
+void ovs_flow_tbl_destroy(struct flow_table *table);
+int ovs_flow_tbl_flush(struct flow_table *flow_table);
+
+int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
+ struct sw_flow_mask *mask);
+void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
+int ovs_flow_tbl_num_masks(const struct flow_table *table);
+struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table,
+ u32 *bucket, u32 *idx);
+struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
+ const struct sw_flow_key *,
+ u32 *n_mask_hit);
+
+bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
+ struct sw_flow_match *match);
+
+void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
+ const struct sw_flow_mask *mask);
+#endif /* flow_table.h */
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index c99dea543d64..a3d6951602db 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -24,8 +24,6 @@
#include <linux/if_tunnel.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
-#include <linux/if_vlan.h>
-#include <linux/in.h>
#include <linux/in_route.h>
#include <linux/inetdevice.h>
#include <linux/jhash.h>
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 98d3edbbc235..729c68763fe7 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -134,7 +134,7 @@ static void do_setup(struct net_device *netdev)
netdev->tx_queue_len = 0;
netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
- NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_TSO;
+ NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
netdev->vlan_features = netdev->features;
netdev->features |= NETIF_F_HW_VLAN_CTAG_TX;
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 09d93c13cfd6..d21f77d875ba 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -150,15 +150,25 @@ static void free_port_rcu(struct rcu_head *rcu)
ovs_vport_free(vport_from_priv(netdev_vport));
}
-static void netdev_destroy(struct vport *vport)
+void ovs_netdev_detach_dev(struct vport *vport)
{
struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
- rtnl_lock();
+ ASSERT_RTNL();
netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
netdev_rx_handler_unregister(netdev_vport->dev);
- netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp));
+ netdev_upper_dev_unlink(netdev_vport->dev,
+ netdev_master_upper_dev_get(netdev_vport->dev));
dev_set_promiscuity(netdev_vport->dev, -1);
+}
+
+static void netdev_destroy(struct vport *vport)
+{
+ struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+
+ rtnl_lock();
+ if (netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH)
+ ovs_netdev_detach_dev(vport);
rtnl_unlock();
call_rcu(&netdev_vport->rcu, free_port_rcu);
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
index dd298b5c5cdb..8df01c1127e5 100644
--- a/net/openvswitch/vport-netdev.h
+++ b/net/openvswitch/vport-netdev.h
@@ -39,5 +39,6 @@ netdev_vport_priv(const struct vport *vport)
}
const char *ovs_netdev_get_name(const struct vport *);
+void ovs_netdev_detach_dev(struct vport *);
#endif /* vport_netdev.h */
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index a481c03e2861..e797a50ac2be 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -29,7 +29,6 @@
#include <net/ip.h>
#include <net/udp.h>
#include <net/ip_tunnels.h>
-#include <net/udp.h>
#include <net/rtnetlink.h>
#include <net/route.h>
#include <net/dsfield.h>
@@ -173,7 +172,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
skb->local_df = 1;
- inet_get_local_port_range(&port_min, &port_max);
+ inet_get_local_port_range(net, &port_min, &port_max);
src_port = vxlan_src_port(port_min, port_max, skb);
err = vxlan_xmit_skb(vxlan_port->vs, rt, skb,
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 642ad42c416b..378c3a6acf84 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -51,10 +51,16 @@ static struct kmem_cache *rds_conn_slab;
static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
{
+ static u32 rds_hash_secret __read_mostly;
+
+ unsigned long hash;
+
+ net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret));
+
/* Pass NULL, don't need struct net for hash */
- unsigned long hash = inet_ehashfn(NULL,
- be32_to_cpu(laddr), 0,
- be32_to_cpu(faddr), 0);
+ hash = __inet_ehashfn(be32_to_cpu(laddr), 0,
+ be32_to_cpu(faddr), 0,
+ rds_hash_secret);
return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
}
diff --git a/net/rds/rds.h b/net/rds/rds.h
index ec1d731ecff0..48f8ffc60f8f 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -749,7 +749,7 @@ void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg);
-extern void __rds_put_mr_final(struct rds_mr *mr);
+void __rds_put_mr_final(struct rds_mr *mr);
static inline void rds_mr_put(struct rds_mr *mr)
{
if (atomic_dec_and_test(&mr->r_refcount))
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index a693aca2ae2e..5f43675ee1df 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -426,17 +426,16 @@ extern struct workqueue_struct *rxrpc_workqueue;
/*
* ar-accept.c
*/
-extern void rxrpc_accept_incoming_calls(struct work_struct *);
-extern struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *,
- unsigned long);
-extern int rxrpc_reject_call(struct rxrpc_sock *);
+void rxrpc_accept_incoming_calls(struct work_struct *);
+struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long);
+int rxrpc_reject_call(struct rxrpc_sock *);
/*
* ar-ack.c
*/
-extern void __rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
-extern void rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
-extern void rxrpc_process_call(struct work_struct *);
+void __rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
+void rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
+void rxrpc_process_call(struct work_struct *);
/*
* ar-call.c
@@ -445,19 +444,18 @@ extern struct kmem_cache *rxrpc_call_jar;
extern struct list_head rxrpc_calls;
extern rwlock_t rxrpc_call_lock;
-extern struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *,
- struct rxrpc_transport *,
- struct rxrpc_conn_bundle *,
- unsigned long, int, gfp_t);
-extern struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
- struct rxrpc_connection *,
- struct rxrpc_header *, gfp_t);
-extern struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *,
- unsigned long);
-extern void rxrpc_release_call(struct rxrpc_call *);
-extern void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
-extern void __rxrpc_put_call(struct rxrpc_call *);
-extern void __exit rxrpc_destroy_all_calls(void);
+struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *,
+ struct rxrpc_transport *,
+ struct rxrpc_conn_bundle *,
+ unsigned long, int, gfp_t);
+struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
+ struct rxrpc_connection *,
+ struct rxrpc_header *, gfp_t);
+struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *, unsigned long);
+void rxrpc_release_call(struct rxrpc_call *);
+void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
+void __rxrpc_put_call(struct rxrpc_call *);
+void __exit rxrpc_destroy_all_calls(void);
/*
* ar-connection.c
@@ -465,19 +463,16 @@ extern void __exit rxrpc_destroy_all_calls(void);
extern struct list_head rxrpc_connections;
extern rwlock_t rxrpc_connection_lock;
-extern struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *,
- struct rxrpc_transport *,
- struct key *,
- __be16, gfp_t);
-extern void rxrpc_put_bundle(struct rxrpc_transport *,
- struct rxrpc_conn_bundle *);
-extern int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_transport *,
- struct rxrpc_conn_bundle *, struct rxrpc_call *,
- gfp_t);
-extern void rxrpc_put_connection(struct rxrpc_connection *);
-extern void __exit rxrpc_destroy_all_connections(void);
-extern struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *,
- struct rxrpc_header *);
+struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *,
+ struct rxrpc_transport *,
+ struct key *, __be16, gfp_t);
+void rxrpc_put_bundle(struct rxrpc_transport *, struct rxrpc_conn_bundle *);
+int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_transport *,
+ struct rxrpc_conn_bundle *, struct rxrpc_call *, gfp_t);
+void rxrpc_put_connection(struct rxrpc_connection *);
+void __exit rxrpc_destroy_all_connections(void);
+struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *,
+ struct rxrpc_header *);
extern struct rxrpc_connection *
rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_header *,
gfp_t);
@@ -485,15 +480,15 @@ rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_header *,
/*
* ar-connevent.c
*/
-extern void rxrpc_process_connection(struct work_struct *);
-extern void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *);
-extern void rxrpc_reject_packets(struct work_struct *);
+void rxrpc_process_connection(struct work_struct *);
+void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *);
+void rxrpc_reject_packets(struct work_struct *);
/*
* ar-error.c
*/
-extern void rxrpc_UDP_error_report(struct sock *);
-extern void rxrpc_UDP_error_handler(struct work_struct *);
+void rxrpc_UDP_error_report(struct sock *);
+void rxrpc_UDP_error_handler(struct work_struct *);
/*
* ar-input.c
@@ -501,18 +496,17 @@ extern void rxrpc_UDP_error_handler(struct work_struct *);
extern unsigned long rxrpc_ack_timeout;
extern const char *rxrpc_pkts[];
-extern void rxrpc_data_ready(struct sock *, int);
-extern int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool,
- bool);
-extern void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
+void rxrpc_data_ready(struct sock *, int);
+int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
+void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
/*
* ar-local.c
*/
extern rwlock_t rxrpc_local_lock;
-extern struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *);
-extern void rxrpc_put_local(struct rxrpc_local *);
-extern void __exit rxrpc_destroy_all_locals(void);
+struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *);
+void rxrpc_put_local(struct rxrpc_local *);
+void __exit rxrpc_destroy_all_locals(void);
/*
* ar-key.c
@@ -520,31 +514,29 @@ extern void __exit rxrpc_destroy_all_locals(void);
extern struct key_type key_type_rxrpc;
extern struct key_type key_type_rxrpc_s;
-extern int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
-extern int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
-extern int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *,
- time_t, u32);
+int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
+int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
+int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t,
+ u32);
/*
* ar-output.c
*/
extern int rxrpc_resend_timeout;
-extern int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *);
-extern int rxrpc_client_sendmsg(struct kiocb *, struct rxrpc_sock *,
- struct rxrpc_transport *, struct msghdr *,
- size_t);
-extern int rxrpc_server_sendmsg(struct kiocb *, struct rxrpc_sock *,
- struct msghdr *, size_t);
+int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *);
+int rxrpc_client_sendmsg(struct kiocb *, struct rxrpc_sock *,
+ struct rxrpc_transport *, struct msghdr *, size_t);
+int rxrpc_server_sendmsg(struct kiocb *, struct rxrpc_sock *, struct msghdr *,
+ size_t);
/*
* ar-peer.c
*/
-extern struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *, gfp_t);
-extern void rxrpc_put_peer(struct rxrpc_peer *);
-extern struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *,
- __be32, __be16);
-extern void __exit rxrpc_destroy_all_peers(void);
+struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *, gfp_t);
+void rxrpc_put_peer(struct rxrpc_peer *);
+struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *, __be32, __be16);
+void __exit rxrpc_destroy_all_peers(void);
/*
* ar-proc.c
@@ -556,38 +548,36 @@ extern const struct file_operations rxrpc_connection_seq_fops;
/*
* ar-recvmsg.c
*/
-extern void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *);
-extern int rxrpc_recvmsg(struct kiocb *, struct socket *, struct msghdr *,
- size_t, int);
+void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *);
+int rxrpc_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t,
+ int);
/*
* ar-security.c
*/
-extern int rxrpc_register_security(struct rxrpc_security *);
-extern void rxrpc_unregister_security(struct rxrpc_security *);
-extern int rxrpc_init_client_conn_security(struct rxrpc_connection *);
-extern int rxrpc_init_server_conn_security(struct rxrpc_connection *);
-extern int rxrpc_secure_packet(const struct rxrpc_call *, struct sk_buff *,
- size_t, void *);
-extern int rxrpc_verify_packet(const struct rxrpc_call *, struct sk_buff *,
- u32 *);
-extern void rxrpc_clear_conn_security(struct rxrpc_connection *);
+int rxrpc_register_security(struct rxrpc_security *);
+void rxrpc_unregister_security(struct rxrpc_security *);
+int rxrpc_init_client_conn_security(struct rxrpc_connection *);
+int rxrpc_init_server_conn_security(struct rxrpc_connection *);
+int rxrpc_secure_packet(const struct rxrpc_call *, struct sk_buff *, size_t,
+ void *);
+int rxrpc_verify_packet(const struct rxrpc_call *, struct sk_buff *, u32 *);
+void rxrpc_clear_conn_security(struct rxrpc_connection *);
/*
* ar-skbuff.c
*/
-extern void rxrpc_packet_destructor(struct sk_buff *);
+void rxrpc_packet_destructor(struct sk_buff *);
/*
* ar-transport.c
*/
-extern struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *,
- struct rxrpc_peer *,
- gfp_t);
-extern void rxrpc_put_transport(struct rxrpc_transport *);
-extern void __exit rxrpc_destroy_all_transports(void);
-extern struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
- struct rxrpc_peer *);
+struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *,
+ struct rxrpc_peer *, gfp_t);
+void rxrpc_put_transport(struct rxrpc_transport *);
+void __exit rxrpc_destroy_all_transports(void);
+struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
+ struct rxrpc_peer *);
/*
* debug tracing
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index c03a32a0418e..ad1f1d819203 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -443,6 +443,16 @@ config NET_CLS_CGROUP
To compile this code as a module, choose M here: the
module will be called cls_cgroup.
+config NET_CLS_BPF
+ tristate "BPF-based classifier"
+ select NET_CLS
+ ---help---
+ If you say Y here, you will be able to classify packets based on
+ programmable BPF (JIT'ed) filters as an alternative to ematches.
+
+ To compile this code as a module, choose M here: the module will
+ be called cls_bpf.
+
config NET_EMATCH
bool "Extended Matches"
select NET_CLS
diff --git a/net/sched/Makefile b/net/sched/Makefile
index e5f9abe9a5db..35fa47a494ab 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_NET_CLS_RSVP6) += cls_rsvp6.o
obj-$(CONFIG_NET_CLS_BASIC) += cls_basic.o
obj-$(CONFIG_NET_CLS_FLOW) += cls_flow.o
obj-$(CONFIG_NET_CLS_CGROUP) += cls_cgroup.o
+obj-$(CONFIG_NET_CLS_BPF) += cls_bpf.o
obj-$(CONFIG_NET_EMATCH) += ematch.o
obj-$(CONFIG_NET_EMATCH_CMP) += em_cmp.o
obj-$(CONFIG_NET_EMATCH_NBYTE) += em_nbyte.o
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 189e3c5b3d09..272d8e924cf6 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -231,14 +231,14 @@ override:
}
if (R_tab) {
police->rate_present = true;
- psched_ratecfg_precompute(&police->rate, &R_tab->rate);
+ psched_ratecfg_precompute(&police->rate, &R_tab->rate, 0);
qdisc_put_rtab(R_tab);
} else {
police->rate_present = false;
}
if (P_tab) {
police->peak_present = true;
- psched_ratecfg_precompute(&police->peak, &P_tab->rate);
+ psched_ratecfg_precompute(&police->peak, &P_tab->rate, 0);
qdisc_put_rtab(P_tab);
} else {
police->peak_present = false;
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index d76a35d0dc85..636d9131d870 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -137,7 +137,7 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp,
struct nlattr **tb,
struct nlattr *est)
{
- int err = -EINVAL;
+ int err;
struct tcf_exts e;
struct tcf_ematch_tree t;
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
new file mode 100644
index 000000000000..1002a8226281
--- /dev/null
+++ b/net/sched/cls_bpf.c
@@ -0,0 +1,385 @@
+/*
+ * Berkeley Packet Filter based traffic classifier
+ *
+ * Might be used to classify traffic through flexible, user-defined and
+ * possibly JIT-ed BPF filters for traffic control as an alternative to
+ * ematches.
+ *
+ * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/filter.h>
+#include <net/rtnetlink.h>
+#include <net/pkt_cls.h>
+#include <net/sock.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
+MODULE_DESCRIPTION("TC BPF based classifier");
+
+struct cls_bpf_head {
+ struct list_head plist;
+ u32 hgen;
+};
+
+struct cls_bpf_prog {
+ struct sk_filter *filter;
+ struct sock_filter *bpf_ops;
+ struct tcf_exts exts;
+ struct tcf_result res;
+ struct list_head link;
+ u32 handle;
+ u16 bpf_len;
+};
+
+static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
+ [TCA_BPF_CLASSID] = { .type = NLA_U32 },
+ [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
+ [TCA_BPF_OPS] = { .type = NLA_BINARY,
+ .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
+};
+
+static const struct tcf_ext_map bpf_ext_map = {
+ .action = TCA_BPF_ACT,
+ .police = TCA_BPF_POLICE,
+};
+
+static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ struct tcf_result *res)
+{
+ struct cls_bpf_head *head = tp->root;
+ struct cls_bpf_prog *prog;
+ int ret;
+
+ list_for_each_entry(prog, &head->plist, link) {
+ int filter_res = SK_RUN_FILTER(prog->filter, skb);
+
+ if (filter_res == 0)
+ continue;
+
+ *res = prog->res;
+ if (filter_res != -1)
+ res->classid = filter_res;
+
+ ret = tcf_exts_exec(skb, &prog->exts, res);
+ if (ret < 0)
+ continue;
+
+ return ret;
+ }
+
+ return -1;
+}
+
+static int cls_bpf_init(struct tcf_proto *tp)
+{
+ struct cls_bpf_head *head;
+
+ head = kzalloc(sizeof(*head), GFP_KERNEL);
+ if (head == NULL)
+ return -ENOBUFS;
+
+ INIT_LIST_HEAD(&head->plist);
+ tp->root = head;
+
+ return 0;
+}
+
+static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
+{
+ tcf_unbind_filter(tp, &prog->res);
+ tcf_exts_destroy(tp, &prog->exts);
+
+ sk_unattached_filter_destroy(prog->filter);
+
+ kfree(prog->bpf_ops);
+ kfree(prog);
+}
+
+static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
+{
+ struct cls_bpf_head *head = tp->root;
+ struct cls_bpf_prog *prog, *todel = (struct cls_bpf_prog *) arg;
+
+ list_for_each_entry(prog, &head->plist, link) {
+ if (prog == todel) {
+ tcf_tree_lock(tp);
+ list_del(&prog->link);
+ tcf_tree_unlock(tp);
+
+ cls_bpf_delete_prog(tp, prog);
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+static void cls_bpf_destroy(struct tcf_proto *tp)
+{
+ struct cls_bpf_head *head = tp->root;
+ struct cls_bpf_prog *prog, *tmp;
+
+ list_for_each_entry_safe(prog, tmp, &head->plist, link) {
+ list_del(&prog->link);
+ cls_bpf_delete_prog(tp, prog);
+ }
+
+ kfree(head);
+}
+
+static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
+{
+ struct cls_bpf_head *head = tp->root;
+ struct cls_bpf_prog *prog;
+ unsigned long ret = 0UL;
+
+ if (head == NULL)
+ return 0UL;
+
+ list_for_each_entry(prog, &head->plist, link) {
+ if (prog->handle == handle) {
+ ret = (unsigned long) prog;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void cls_bpf_put(struct tcf_proto *tp, unsigned long f)
+{
+}
+
+static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
+ struct cls_bpf_prog *prog,
+ unsigned long base, struct nlattr **tb,
+ struct nlattr *est)
+{
+ struct sock_filter *bpf_ops, *bpf_old;
+ struct tcf_exts exts;
+ struct sock_fprog tmp;
+ struct sk_filter *fp, *fp_old;
+ u16 bpf_size, bpf_len;
+ u32 classid;
+ int ret;
+
+ if (!tb[TCA_BPF_OPS_LEN] || !tb[TCA_BPF_OPS] || !tb[TCA_BPF_CLASSID])
+ return -EINVAL;
+
+ ret = tcf_exts_validate(net, tp, tb, est, &exts, &bpf_ext_map);
+ if (ret < 0)
+ return ret;
+
+ classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
+ bpf_len = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
+ if (bpf_len > BPF_MAXINSNS || bpf_len == 0) {
+ ret = -EINVAL;
+ goto errout;
+ }
+
+ bpf_size = bpf_len * sizeof(*bpf_ops);
+ bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
+ if (bpf_ops == NULL) {
+ ret = -ENOMEM;
+ goto errout;
+ }
+
+ memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
+
+ tmp.len = bpf_len;
+ tmp.filter = (struct sock_filter __user *) bpf_ops;
+
+ ret = sk_unattached_filter_create(&fp, &tmp);
+ if (ret)
+ goto errout_free;
+
+ tcf_tree_lock(tp);
+ fp_old = prog->filter;
+ bpf_old = prog->bpf_ops;
+
+ prog->bpf_len = bpf_len;
+ prog->bpf_ops = bpf_ops;
+ prog->filter = fp;
+ prog->res.classid = classid;
+ tcf_tree_unlock(tp);
+
+ tcf_bind_filter(tp, &prog->res, base);
+ tcf_exts_change(tp, &prog->exts, &exts);
+
+ if (fp_old)
+ sk_unattached_filter_destroy(fp_old);
+ if (bpf_old)
+ kfree(bpf_old);
+
+ return 0;
+
+errout_free:
+ kfree(bpf_ops);
+errout:
+ tcf_exts_destroy(tp, &exts);
+ return ret;
+}
+
+static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
+ struct cls_bpf_head *head)
+{
+ unsigned int i = 0x80000000;
+
+ do {
+ if (++head->hgen == 0x7FFFFFFF)
+ head->hgen = 1;
+ } while (--i > 0 && cls_bpf_get(tp, head->hgen));
+ if (i == 0)
+ pr_err("Insufficient number of handles\n");
+
+ return i;
+}
+
+static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
+ struct tcf_proto *tp, unsigned long base,
+ u32 handle, struct nlattr **tca,
+ unsigned long *arg)
+{
+ struct cls_bpf_head *head = tp->root;
+ struct cls_bpf_prog *prog = (struct cls_bpf_prog *) *arg;
+ struct nlattr *tb[TCA_BPF_MAX + 1];
+ int ret;
+
+ if (tca[TCA_OPTIONS] == NULL)
+ return -EINVAL;
+
+ ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
+ if (ret < 0)
+ return ret;
+
+ if (prog != NULL) {
+ if (handle && prog->handle != handle)
+ return -EINVAL;
+ return cls_bpf_modify_existing(net, tp, prog, base, tb,
+ tca[TCA_RATE]);
+ }
+
+ prog = kzalloc(sizeof(*prog), GFP_KERNEL);
+ if (prog == NULL)
+ return -ENOBUFS;
+
+ if (handle == 0)
+ prog->handle = cls_bpf_grab_new_handle(tp, head);
+ else
+ prog->handle = handle;
+ if (prog->handle == 0) {
+ ret = -EINVAL;
+ goto errout;
+ }
+
+ ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE]);
+ if (ret < 0)
+ goto errout;
+
+ tcf_tree_lock(tp);
+ list_add(&prog->link, &head->plist);
+ tcf_tree_unlock(tp);
+
+ *arg = (unsigned long) prog;
+
+ return 0;
+errout:
+ if (*arg == 0UL && prog)
+ kfree(prog);
+
+ return ret;
+}
+
+static int cls_bpf_dump(struct tcf_proto *tp, unsigned long fh,
+ struct sk_buff *skb, struct tcmsg *tm)
+{
+ struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
+ struct nlattr *nest, *nla;
+
+ if (prog == NULL)
+ return skb->len;
+
+ tm->tcm_handle = prog->handle;
+
+ nest = nla_nest_start(skb, TCA_OPTIONS);
+ if (nest == NULL)
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
+ goto nla_put_failure;
+ if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_len))
+ goto nla_put_failure;
+
+ nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_len *
+ sizeof(struct sock_filter));
+ if (nla == NULL)
+ goto nla_put_failure;
+
+ memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
+
+ if (tcf_exts_dump(skb, &prog->exts, &bpf_ext_map) < 0)
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest);
+
+ if (tcf_exts_dump_stats(skb, &prog->exts, &bpf_ext_map) < 0)
+ goto nla_put_failure;
+
+ return skb->len;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nest);
+ return -1;
+}
+
+static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
+{
+ struct cls_bpf_head *head = tp->root;
+ struct cls_bpf_prog *prog;
+
+ list_for_each_entry(prog, &head->plist, link) {
+ if (arg->count < arg->skip)
+ goto skip;
+ if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
+ arg->stop = 1;
+ break;
+ }
+skip:
+ arg->count++;
+ }
+}
+
+static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
+ .kind = "bpf",
+ .owner = THIS_MODULE,
+ .classify = cls_bpf_classify,
+ .init = cls_bpf_init,
+ .destroy = cls_bpf_destroy,
+ .get = cls_bpf_get,
+ .put = cls_bpf_put,
+ .change = cls_bpf_change,
+ .delete = cls_bpf_delete,
+ .walk = cls_bpf_walk,
+ .dump = cls_bpf_dump,
+};
+
+static int __init cls_bpf_init_mod(void)
+{
+ return register_tcf_proto_ops(&cls_bpf_ops);
+}
+
+static void __exit cls_bpf_exit_mod(void)
+{
+ unregister_tcf_proto_ops(&cls_bpf_ops);
+}
+
+module_init(cls_bpf_init_mod);
+module_exit(cls_bpf_exit_mod);
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 867b4a3e3980..16006c92c3fd 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -72,11 +72,11 @@ static void cgrp_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
struct task_struct *p;
- void *v;
+ struct cgroup_cls_state *cs = css_cls_state(css);
+ void *v = (void *)(unsigned long)cs->classid;
cgroup_taskset_for_each(p, css, tset) {
task_lock(p);
- v = (void *)(unsigned long)task_cls_classid(p);
iterate_fd(p->files, 0, update_classid, v);
task_unlock(p);
}
diff --git a/net/sched/em_ipset.c b/net/sched/em_ipset.c
index 938b7cbf5627..527aeb7a3ff0 100644
--- a/net/sched/em_ipset.c
+++ b/net/sched/em_ipset.c
@@ -24,11 +24,12 @@ static int em_ipset_change(struct tcf_proto *tp, void *data, int data_len,
{
struct xt_set_info *set = data;
ip_set_id_t index;
+ struct net *net = dev_net(qdisc_dev(tp->q));
if (data_len != sizeof(*set))
return -EINVAL;
- index = ip_set_nfnl_get_byindex(set->index);
+ index = ip_set_nfnl_get_byindex(net, set->index);
if (index == IPSET_INVALID_ID)
return -ENOENT;
@@ -37,7 +38,7 @@ static int em_ipset_change(struct tcf_proto *tp, void *data, int data_len,
if (em->data)
return 0;
- ip_set_nfnl_put(index);
+ ip_set_nfnl_put(net, index);
return -ENOMEM;
}
@@ -45,7 +46,7 @@ static void em_ipset_destroy(struct tcf_proto *p, struct tcf_ematch *em)
{
const struct xt_set_info *set = (const void *) em->data;
if (set) {
- ip_set_nfnl_put(set->index);
+ ip_set_nfnl_put(dev_net(qdisc_dev(p->q)), set->index);
kfree((void *) em->data);
}
}
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 7c3de6ffa516..e5cef9567225 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -793,8 +793,10 @@ static int em_meta_change(struct tcf_proto *tp, void *data, int len,
goto errout;
meta = kzalloc(sizeof(*meta), GFP_KERNEL);
- if (meta == NULL)
+ if (meta == NULL) {
+ err = -ENOMEM;
goto errout;
+ }
memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left));
memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right));
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 2adda7fa2d39..cd81505662b8 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -737,9 +737,11 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
const struct Qdisc_class_ops *cops;
unsigned long cl;
u32 parentid;
+ int drops;
if (n == 0)
return;
+ drops = max_t(int, n, 0);
while ((parentid = sch->parent)) {
if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
return;
@@ -756,6 +758,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
cops->put(sch, cl);
}
sch->q.qlen -= n;
+ sch->qstats.drops += drops;
}
}
EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index a2fef8b10b96..fdc041c57853 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -255,6 +255,7 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
f->socket_hash != sk->sk_hash)) {
f->credit = q->initial_quantum;
f->socket_hash = sk->sk_hash;
+ f->time_next_packet = 0ULL;
}
return f;
}
@@ -472,20 +473,16 @@ begin:
if (f->credit > 0 || !q->rate_enable)
goto out;
- if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) {
- rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate;
+ rate = q->flow_max_rate;
+ if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT)
+ rate = min(skb->sk->sk_pacing_rate, rate);
- rate = min(rate, q->flow_max_rate);
- } else {
- rate = q->flow_max_rate;
- if (rate == ~0U)
- goto out;
- }
- if (rate) {
+ if (rate != ~0U) {
u32 plen = max(qdisc_pkt_len(skb), q->quantum);
u64 len = (u64)plen * NSEC_PER_SEC;
- do_div(len, rate);
+ if (likely(rate))
+ do_div(len, rate);
/* Since socket rate can change later,
* clamp the delay to 125 ms.
* TODO: maybe segment the too big skb, as in commit
@@ -656,7 +653,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
if (tb[TCA_FQ_INITIAL_QUANTUM])
- q->quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
+ q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]);
@@ -735,12 +732,14 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
if (opts == NULL)
goto nla_put_failure;
+ /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore,
+ * do not bother giving its value
+ */
if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
- nla_put_u32(skb, TCA_FQ_FLOW_DEFAULT_RATE, q->flow_default_rate) ||
nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
goto nla_put_failure;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index a74e278654aa..7fc899a943a8 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -829,7 +829,7 @@ void dev_deactivate_many(struct list_head *head)
struct net_device *dev;
bool sync_needed = false;
- list_for_each_entry(dev, head, unreg_list) {
+ list_for_each_entry(dev, head, close_list) {
netdev_for_each_tx_queue(dev, dev_deactivate_queue,
&noop_qdisc);
if (dev_ingress_queue(dev))
@@ -848,7 +848,7 @@ void dev_deactivate_many(struct list_head *head)
synchronize_net();
/* Wait for outstanding qdisc_run calls. */
- list_for_each_entry(dev, head, unreg_list)
+ list_for_each_entry(dev, head, close_list)
while (some_qdisc_is_busy(dev))
yield();
}
@@ -857,7 +857,7 @@ void dev_deactivate(struct net_device *dev)
{
LIST_HEAD(single);
- list_add(&dev->unreg_list, &single);
+ list_add(&dev->close_list, &single);
dev_deactivate_many(&single);
list_del(&single);
}
@@ -910,11 +910,12 @@ void dev_shutdown(struct net_device *dev)
}
void psched_ratecfg_precompute(struct psched_ratecfg *r,
- const struct tc_ratespec *conf)
+ const struct tc_ratespec *conf,
+ u64 rate64)
{
memset(r, 0, sizeof(*r));
r->overhead = conf->overhead;
- r->rate_bytes_ps = conf->rate;
+ r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
r->mult = 1;
/*
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 863846cc5513..0e1e38b40025 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -997,6 +997,8 @@ static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
[TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
[TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
[TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
+ [TCA_HTB_RATE64] = { .type = NLA_U64 },
+ [TCA_HTB_CEIL64] = { .type = NLA_U64 },
};
static void htb_work_func(struct work_struct *work)
@@ -1114,6 +1116,12 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
opt.level = cl->level;
if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
+ if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
+ nla_put_u64(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps))
+ goto nla_put_failure;
+ if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
+ nla_put_u64(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps))
+ goto nla_put_failure;
nla_nest_end(skb, nest);
spin_unlock_bh(root_lock);
@@ -1332,6 +1340,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
struct nlattr *tb[TCA_HTB_MAX + 1];
struct tc_htb_opt *hopt;
+ u64 rate64, ceil64;
/* extract all subattrs from opt attr */
if (!opt)
@@ -1491,8 +1500,12 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
cl->prio = TC_HTB_NUMPRIO - 1;
}
- psched_ratecfg_precompute(&cl->rate, &hopt->rate);
- psched_ratecfg_precompute(&cl->ceil, &hopt->ceil);
+ rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
+
+ ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
+
+ psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
+ psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index a6d788d45216..75c94e59a3bd 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -235,7 +235,6 @@ static bool loss_4state(struct netem_sched_data *q)
clg->state = 2;
else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
clg->state = 1;
- return true;
} else if (clg->a2 + clg->a3 < rnd) {
clg->state = 3;
return true;
@@ -358,6 +357,21 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
return PSCHED_NS2TICKS(ticks);
}
+static void tfifo_reset(struct Qdisc *sch)
+{
+ struct netem_sched_data *q = qdisc_priv(sch);
+ struct rb_node *p;
+
+ while ((p = rb_first(&q->t_root))) {
+ struct sk_buff *skb = netem_rb_to_skb(p);
+
+ rb_erase(p, &q->t_root);
+ skb->next = NULL;
+ skb->prev = NULL;
+ kfree_skb(skb);
+ }
+}
+
static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
@@ -520,6 +534,7 @@ static unsigned int netem_drop(struct Qdisc *sch)
skb->next = NULL;
skb->prev = NULL;
len = qdisc_pkt_len(skb);
+ sch->qstats.backlog -= len;
kfree_skb(skb);
}
}
@@ -609,6 +624,7 @@ static void netem_reset(struct Qdisc *sch)
struct netem_sched_data *q = qdisc_priv(sch);
qdisc_reset_queue(sch);
+ tfifo_reset(sch);
if (q->qdisc)
qdisc_reset(q->qdisc);
qdisc_watchdog_cancel(&q->watchdog);
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 1aaf1b6e51a2..b0571224f3c9 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -341,9 +341,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
q->tokens = q->buffer;
q->ptokens = q->mtu;
- psched_ratecfg_precompute(&q->rate, &rtab->rate);
+ psched_ratecfg_precompute(&q->rate, &rtab->rate, 0);
if (ptab) {
- psched_ratecfg_precompute(&q->peak, &ptab->rate);
+ psched_ratecfg_precompute(&q->peak, &ptab->rate, 0);
q->peak_present = true;
} else {
q->peak_present = false;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index cef509985192..c9b91cb1cb0d 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -602,7 +602,7 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
/* Start a T3 timer here in case it wasn't running so
* that these migrated packets have a chance to get
- * retrnasmitted.
+ * retransmitted.
*/
if (!timer_pending(&active->T3_rtx_timer))
if (!mod_timer(&active->T3_rtx_timer,
@@ -665,7 +665,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
/* Set the path max_retrans. */
peer->pathmaxrxt = asoc->pathmaxrxt;
- /* And the partial failure retrnas threshold */
+ /* And the partial failure retrans threshold */
peer->pf_retrans = asoc->pf_retrans;
/* Initialize the peer's SACK delay timeout based on the
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 8c4fa5dec824..46b5977978a1 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -539,18 +539,14 @@ struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc)
for (i = 0; i < n_elt; i++) {
id = ntohs(hmacs->hmac_ids[i]);
- /* Check the id is in the supported range */
- if (id > SCTP_AUTH_HMAC_ID_MAX) {
- id = 0;
- continue;
- }
-
- /* See is we support the id. Supported IDs have name and
- * length fields set, so that we can allocated and use
+ /* Check the id is in the supported range. And
+ * see if we support the id. Supported IDs have name and
+ * length fields set, so that we can allocate and use
* them. We can safely just check for name, for without the
* name, we can't allocate the TFM.
*/
- if (!sctp_hmac_list[id].hmac_name) {
+ if (id > SCTP_AUTH_HMAC_ID_MAX ||
+ !sctp_hmac_list[id].hmac_name) {
id = 0;
continue;
}
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 7bd5ed4a8657..f2044fcb9dd1 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -201,7 +201,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
max = asoc->frag_point;
/* If the the peer requested that we authenticate DATA chunks
- * we need to accound for bundling of the AUTH chunks along with
+ * we need to account for bundling of the AUTH chunks along with
* DATA.
*/
if (sctp_auth_send_cid(SCTP_CID_DATA, asoc)) {
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index e7b2d4fe2b6a..7567e6f1a920 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -279,7 +279,9 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
sctp_v6_to_addr(&dst_saddr, &fl6->saddr, htons(bp->port));
rcu_read_lock();
list_for_each_entry_rcu(laddr, &bp->address_list, list) {
- if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC))
+ if (!laddr->valid || laddr->state == SCTP_ADDR_DEL ||
+ (laddr->state != SCTP_ADDR_SRC &&
+ !asoc->src_out_of_asoc_ok))
continue;
/* Do not compare against v4 addrs */
@@ -426,20 +428,20 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk)
{
addr->v6.sin6_family = AF_INET6;
addr->v6.sin6_port = 0;
- addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr;
+ addr->v6.sin6_addr = sk->sk_v6_rcv_saddr;
}
/* Initialize sk->sk_rcv_saddr from sctp_addr. */
static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
{
if (addr->sa.sa_family == AF_INET && sctp_sk(sk)->v4mapped) {
- inet6_sk(sk)->rcv_saddr.s6_addr32[0] = 0;
- inet6_sk(sk)->rcv_saddr.s6_addr32[1] = 0;
- inet6_sk(sk)->rcv_saddr.s6_addr32[2] = htonl(0x0000ffff);
- inet6_sk(sk)->rcv_saddr.s6_addr32[3] =
+ sk->sk_v6_rcv_saddr.s6_addr32[0] = 0;
+ sk->sk_v6_rcv_saddr.s6_addr32[1] = 0;
+ sk->sk_v6_rcv_saddr.s6_addr32[2] = htonl(0x0000ffff);
+ sk->sk_v6_rcv_saddr.s6_addr32[3] =
addr->v4.sin_addr.s_addr;
} else {
- inet6_sk(sk)->rcv_saddr = addr->v6.sin6_addr;
+ sk->sk_v6_rcv_saddr = addr->v6.sin6_addr;
}
}
@@ -447,12 +449,12 @@ static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
{
if (addr->sa.sa_family == AF_INET && sctp_sk(sk)->v4mapped) {
- inet6_sk(sk)->daddr.s6_addr32[0] = 0;
- inet6_sk(sk)->daddr.s6_addr32[1] = 0;
- inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff);
- inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
+ sk->sk_v6_daddr.s6_addr32[0] = 0;
+ sk->sk_v6_daddr.s6_addr32[1] = 0;
+ sk->sk_v6_daddr.s6_addr32[2] = htonl(0x0000ffff);
+ sk->sk_v6_daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
} else {
- inet6_sk(sk)->daddr = addr->v6.sin6_addr;
+ sk->sk_v6_daddr = addr->v6.sin6_addr;
}
}
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 0ac3a65daccb..e650978daf27 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -390,7 +390,6 @@ int sctp_packet_transmit(struct sctp_packet *packet)
__u8 has_data = 0;
struct dst_entry *dst = tp->dst;
unsigned char *auth = NULL; /* pointer to auth in skb data */
- __u32 cksum_buf_len = sizeof(struct sctphdr);
pr_debug("%s: packet:%p\n", __func__, packet);
@@ -493,7 +492,6 @@ int sctp_packet_transmit(struct sctp_packet *packet)
if (chunk == packet->auth)
auth = skb_tail_pointer(nskb);
- cksum_buf_len += chunk->skb->len;
memcpy(skb_put(nskb, chunk->skb->len),
chunk->skb->data, chunk->skb->len);
@@ -536,13 +534,9 @@ int sctp_packet_transmit(struct sctp_packet *packet)
* by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
*/
if (!sctp_checksum_disable) {
- if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) {
- __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
-
- /* 3) Put the resultant value into the checksum field in the
- * common header, and leave the rest of the bits unchanged.
- */
- sh->checksum = sctp_end_cksum(crc32);
+ if (!(dst->dev->features & NETIF_F_SCTP_CSUM) ||
+ (dst_xfrm(dst) != NULL) || packet->ipfragok) {
+ sh->checksum = sctp_compute_cksum(nskb, 0);
} else {
/* no need to seed pseudo checksum for SCTP */
nskb->ip_summed = CHECKSUM_PARTIAL;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index d244a23ab8d3..fe690320b1e4 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1297,6 +1297,13 @@ struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc)
/* Turn an skb into a chunk.
* FIXME: Eventually move the structure directly inside the skb->cb[].
+ *
+ * sctpimpguide-05.txt Section 2.8.2
+ * M1) Each time a new DATA chunk is transmitted
+ * set the 'TSN.Missing.Report' count for that TSN to 0. The
+ * 'TSN.Missing.Report' count will be used to determine missing chunks
+ * and when to fast retransmit.
+ *
*/
struct sctp_chunk *sctp_chunkify(struct sk_buff *skb,
const struct sctp_association *asoc,
@@ -1314,29 +1321,9 @@ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb,
INIT_LIST_HEAD(&retval->list);
retval->skb = skb;
retval->asoc = (struct sctp_association *)asoc;
- retval->has_tsn = 0;
- retval->has_ssn = 0;
- retval->rtt_in_progress = 0;
- retval->sent_at = 0;
retval->singleton = 1;
- retval->end_of_packet = 0;
- retval->ecn_ce_done = 0;
- retval->pdiscard = 0;
-
- /* sctpimpguide-05.txt Section 2.8.2
- * M1) Each time a new DATA chunk is transmitted
- * set the 'TSN.Missing.Report' count for that TSN to 0. The
- * 'TSN.Missing.Report' count will be used to determine missing chunks
- * and when to fast retransmit.
- */
- retval->tsn_missing_report = 0;
- retval->tsn_gap_acked = 0;
- retval->fast_retransmit = SCTP_CAN_FRTX;
- /* If this is a fragmented message, track all fragments
- * of the message (for SEND_FAILED).
- */
- retval->msg = NULL;
+ retval->fast_retransmit = SCTP_CAN_FRTX;
/* Polish the bead hole. */
INIT_LIST_HEAD(&retval->transmitted_list);
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 666c66842799..1a6eef39ab2f 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -860,7 +860,6 @@ static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds,
(!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
return;
- BUG_ON(asoc->peer.primary_path == NULL);
sctp_unhash_established(asoc);
sctp_association_free(asoc);
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 911b71b26b0e..72046b9729a8 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5890,7 +5890,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
int low, high, remaining, index;
unsigned int rover;
- inet_get_local_port_range(&low, &high);
+ inet_get_local_port_range(sock_net(sk), &low, &high);
remaining = (high - low) + 1;
rover = net_random() % remaining + low;
diff --git a/net/socket.c b/net/socket.c
index ebed4b68f768..c226aceee65b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1964,6 +1964,16 @@ struct used_address {
unsigned int name_len;
};
+static int copy_msghdr_from_user(struct msghdr *kmsg,
+ struct msghdr __user *umsg)
+{
+ if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
+ return -EFAULT;
+ if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+ return -EINVAL;
+ return 0;
+}
+
static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
struct msghdr *msg_sys, unsigned int flags,
struct used_address *used_address)
@@ -1982,8 +1992,11 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
if (MSG_CMSG_COMPAT & flags) {
if (get_compat_msghdr(msg_sys, msg_compat))
return -EFAULT;
- } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
- return -EFAULT;
+ } else {
+ err = copy_msghdr_from_user(msg_sys, msg);
+ if (err)
+ return err;
+ }
if (msg_sys->msg_iovlen > UIO_FASTIOV) {
err = -EMSGSIZE;
@@ -2191,8 +2204,11 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
if (MSG_CMSG_COMPAT & flags) {
if (get_compat_msghdr(msg_sys, msg_compat))
return -EFAULT;
- } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
- return -EFAULT;
+ } else {
+ err = copy_msghdr_from_user(msg_sys, msg);
+ if (err)
+ return err;
+ }
if (msg_sys->msg_iovlen > UIO_FASTIOV) {
err = -EMSGSIZE;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 084656671d6e..97912b40c254 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -420,41 +420,53 @@ static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg)
memcpy(gss_msg->databuf, &uid, sizeof(uid));
gss_msg->msg.data = gss_msg->databuf;
gss_msg->msg.len = sizeof(uid);
- BUG_ON(sizeof(uid) > UPCALL_BUF_LEN);
+
+ BUILD_BUG_ON(sizeof(uid) > sizeof(gss_msg->databuf));
}
-static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
+static int gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
const char *service_name,
const char *target_name)
{
struct gss_api_mech *mech = gss_msg->auth->mech;
char *p = gss_msg->databuf;
- int len = 0;
-
- gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ",
- mech->gm_name,
- from_kuid(&init_user_ns, gss_msg->uid));
- p += gss_msg->msg.len;
+ size_t buflen = sizeof(gss_msg->databuf);
+ int len;
+
+ len = scnprintf(p, buflen, "mech=%s uid=%d ", mech->gm_name,
+ from_kuid(&init_user_ns, gss_msg->uid));
+ buflen -= len;
+ p += len;
+ gss_msg->msg.len = len;
if (target_name) {
- len = sprintf(p, "target=%s ", target_name);
+ len = scnprintf(p, buflen, "target=%s ", target_name);
+ buflen -= len;
p += len;
gss_msg->msg.len += len;
}
if (service_name != NULL) {
- len = sprintf(p, "service=%s ", service_name);
+ len = scnprintf(p, buflen, "service=%s ", service_name);
+ buflen -= len;
p += len;
gss_msg->msg.len += len;
}
if (mech->gm_upcall_enctypes) {
- len = sprintf(p, "enctypes=%s ", mech->gm_upcall_enctypes);
+ len = scnprintf(p, buflen, "enctypes=%s ",
+ mech->gm_upcall_enctypes);
+ buflen -= len;
p += len;
gss_msg->msg.len += len;
}
- len = sprintf(p, "\n");
+ len = scnprintf(p, buflen, "\n");
+ if (len == 0)
+ goto out_overflow;
gss_msg->msg.len += len;
gss_msg->msg.data = gss_msg->databuf;
- BUG_ON(gss_msg->msg.len > UPCALL_BUF_LEN);
+ return 0;
+out_overflow:
+ WARN_ON_ONCE(1);
+ return -ENOMEM;
}
static struct gss_upcall_msg *
@@ -463,15 +475,15 @@ gss_alloc_msg(struct gss_auth *gss_auth,
{
struct gss_upcall_msg *gss_msg;
int vers;
+ int err = -ENOMEM;
gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS);
if (gss_msg == NULL)
- return ERR_PTR(-ENOMEM);
+ goto err;
vers = get_pipe_version(gss_auth->net);
- if (vers < 0) {
- kfree(gss_msg);
- return ERR_PTR(vers);
- }
+ err = vers;
+ if (err < 0)
+ goto err_free_msg;
gss_msg->pipe = gss_auth->gss_pipe[vers]->pipe;
INIT_LIST_HEAD(&gss_msg->list);
rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
@@ -482,10 +494,17 @@ gss_alloc_msg(struct gss_auth *gss_auth,
switch (vers) {
case 0:
gss_encode_v0_msg(gss_msg);
+ break;
default:
- gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name);
+ err = gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name);
+ if (err)
+ goto err_free_msg;
};
return gss_msg;
+err_free_msg:
+ kfree(gss_msg);
+err:
+ return ERR_PTR(err);
}
static struct gss_upcall_msg *
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c
index 6cd930f3678f..6c981ddc19f8 100644
--- a/net/sunrpc/auth_gss/gss_krb5_unseal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c
@@ -150,7 +150,6 @@ gss_verify_mic_v2(struct krb5_ctx *ctx,
struct xdr_netobj cksumobj = {.len = sizeof(cksumdata),
.data = cksumdata};
s32 now;
- u64 seqnum;
u8 *ptr = read_token->data;
u8 *cksumkey;
u8 flags;
@@ -197,9 +196,10 @@ gss_verify_mic_v2(struct krb5_ctx *ctx,
if (now > ctx->endtime)
return GSS_S_CONTEXT_EXPIRED;
- /* do sequencing checks */
-
- seqnum = be64_to_cpup((__be64 *)ptr + 8);
+ /*
+ * NOTE: the sequence number at ptr + 8 is skipped, rpcsec_gss
+ * doesn't want it checked; see page 6 of rfc 2203.
+ */
return GSS_S_COMPLETE;
}
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 1da52d1406fc..42560e55d978 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -489,7 +489,6 @@ static u32
gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
{
s32 now;
- u64 seqnum;
u8 *ptr;
u8 flags = 0x00;
u16 ec, rrc;
@@ -525,7 +524,10 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
ec = be16_to_cpup((__be16 *)(ptr + 4));
rrc = be16_to_cpup((__be16 *)(ptr + 6));
- seqnum = be64_to_cpup((__be64 *)(ptr + 8));
+ /*
+ * NOTE: the sequence number at ptr + 8 is skipped, rpcsec_gss
+ * doesn't want it checked; see page 6 of rfc 2203.
+ */
if (rrc != 0)
rotate_left(offset + 16, buf, rrc);
@@ -574,8 +576,8 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
- /* Trim off the checksum blob */
- xdr_buf_trim(buf, GSS_KRB5_TOK_HDR_LEN + tailskip);
+ /* Trim off the trailing "extra count" and checksum blob */
+ xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
return GSS_S_COMPLETE;
}
diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
index f1eb0d16666c..458f85e9b0ba 100644
--- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
+++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
@@ -298,7 +298,8 @@ int gssp_accept_sec_context_upcall(struct net *net,
if (res.context_handle) {
data->out_handle = rctxh.exported_context_token;
data->mech_oid.len = rctxh.mech.len;
- memcpy(data->mech_oid.data, rctxh.mech.data,
+ if (rctxh.mech.data)
+ memcpy(data->mech_oid.data, rctxh.mech.data,
data->mech_oid.len);
client_name = rctxh.src_name.display_name;
}
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index f0f78c5f1c7d..1ec19f6f0c2b 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -559,6 +559,8 @@ static int gssx_enc_cred(struct xdr_stream *xdr,
/* cred->elements */
err = dummy_enc_credel_array(xdr, &cred->elements);
+ if (err)
+ return err;
/* cred->cred_handle_reference */
err = gssx_enc_buffer(xdr, &cred->cred_handle_reference);
@@ -740,22 +742,20 @@ void gssx_enc_accept_sec_context(struct rpc_rqst *req,
goto done;
/* arg->context_handle */
- if (arg->context_handle) {
+ if (arg->context_handle)
err = gssx_enc_ctx(xdr, arg->context_handle);
- if (err)
- goto done;
- } else {
+ else
err = gssx_enc_bool(xdr, 0);
- }
+ if (err)
+ goto done;
/* arg->cred_handle */
- if (arg->cred_handle) {
+ if (arg->cred_handle)
err = gssx_enc_cred(xdr, arg->cred_handle);
- if (err)
- goto done;
- } else {
+ else
err = gssx_enc_bool(xdr, 0);
- }
+ if (err)
+ goto done;
/* arg->input_token */
err = gssx_enc_in_token(xdr, &arg->input_token);
@@ -763,13 +763,12 @@ void gssx_enc_accept_sec_context(struct rpc_rqst *req,
goto done;
/* arg->input_cb */
- if (arg->input_cb) {
+ if (arg->input_cb)
err = gssx_enc_cb(xdr, arg->input_cb);
- if (err)
- goto done;
- } else {
+ else
err = gssx_enc_bool(xdr, 0);
- }
+ if (err)
+ goto done;
err = gssx_enc_bool(xdr, arg->ret_deleg_cred);
if (err)
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 09fb638bcaa4..008cdade5aae 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1167,8 +1167,8 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
if (!ud->found_creds) {
/* userspace seem buggy, we should always get at least a
* mapping to nobody */
- dprintk("RPC: No creds found, marking Negative!\n");
- set_bit(CACHE_NEGATIVE, &rsci.h.flags);
+ dprintk("RPC: No creds found!\n");
+ goto out;
} else {
/* steal creds */
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 77479606a971..dab09dac8fc7 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -25,12 +25,12 @@
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/slab.h>
+#include <linux/rcupdate.h>
#include <linux/utsname.h>
#include <linux/workqueue.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/un.h>
-#include <linux/rcupdate.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/addr.h>
@@ -264,6 +264,26 @@ void rpc_clients_notifier_unregister(void)
return rpc_pipefs_notifier_unregister(&rpc_clients_block);
}
+static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt,
+ struct rpc_xprt *xprt,
+ const struct rpc_timeout *timeout)
+{
+ struct rpc_xprt *old;
+
+ spin_lock(&clnt->cl_lock);
+ old = rcu_dereference_protected(clnt->cl_xprt,
+ lockdep_is_held(&clnt->cl_lock));
+
+ if (!xprt_bound(xprt))
+ clnt->cl_autobind = 1;
+
+ clnt->cl_timeout = timeout;
+ rcu_assign_pointer(clnt->cl_xprt, xprt);
+ spin_unlock(&clnt->cl_lock);
+
+ return old;
+}
+
static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
{
clnt->cl_nodelen = strlen(nodename);
@@ -272,12 +292,13 @@ static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen);
}
-static int rpc_client_register(const struct rpc_create_args *args,
- struct rpc_clnt *clnt)
+static int rpc_client_register(struct rpc_clnt *clnt,
+ rpc_authflavor_t pseudoflavor,
+ const char *client_name)
{
struct rpc_auth_create_args auth_args = {
- .pseudoflavor = args->authflavor,
- .target_name = args->client_name,
+ .pseudoflavor = pseudoflavor,
+ .target_name = client_name,
};
struct rpc_auth *auth;
struct net *net = rpc_net_ns(clnt);
@@ -298,7 +319,7 @@ static int rpc_client_register(const struct rpc_create_args *args,
auth = rpcauth_create(&auth_args, clnt);
if (IS_ERR(auth)) {
dprintk("RPC: Couldn't create auth handle (flavor %u)\n",
- args->authflavor);
+ pseudoflavor);
err = PTR_ERR(auth);
goto err_auth;
}
@@ -337,7 +358,8 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
{
const struct rpc_program *program = args->program;
const struct rpc_version *version;
- struct rpc_clnt *clnt = NULL;
+ struct rpc_clnt *clnt = NULL;
+ const struct rpc_timeout *timeout;
int err;
/* sanity check the name before trying to print it */
@@ -365,7 +387,6 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
if (err)
goto out_no_clid;
- rcu_assign_pointer(clnt->cl_xprt, xprt);
clnt->cl_procinfo = version->procs;
clnt->cl_maxproc = version->nrprocs;
clnt->cl_prog = args->prognumber ? : program->number;
@@ -380,16 +401,15 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
INIT_LIST_HEAD(&clnt->cl_tasks);
spin_lock_init(&clnt->cl_lock);
- if (!xprt_bound(xprt))
- clnt->cl_autobind = 1;
-
- clnt->cl_timeout = xprt->timeout;
+ timeout = xprt->timeout;
if (args->timeout != NULL) {
memcpy(&clnt->cl_timeout_default, args->timeout,
sizeof(clnt->cl_timeout_default));
- clnt->cl_timeout = &clnt->cl_timeout_default;
+ timeout = &clnt->cl_timeout_default;
}
+ rpc_clnt_set_transport(clnt, xprt, timeout);
+
clnt->cl_rtt = &clnt->cl_rtt_default;
rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
@@ -398,7 +418,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
/* save the nodename */
rpc_clnt_set_nodename(clnt, utsname()->nodename);
- err = rpc_client_register(args, clnt);
+ err = rpc_client_register(clnt, args->authflavor, args->client_name);
if (err)
goto out_no_path;
if (parent)
@@ -600,6 +620,80 @@ rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
}
EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth);
+/**
+ * rpc_switch_client_transport: switch the RPC transport on the fly
+ * @clnt: pointer to a struct rpc_clnt
+ * @args: pointer to the new transport arguments
+ * @timeout: pointer to the new timeout parameters
+ *
+ * This function allows the caller to switch the RPC transport for the
+ * rpc_clnt structure 'clnt' to allow it to connect to a mirrored NFS
+ * server, for instance. It assumes that the caller has ensured that
+ * there are no active RPC tasks by using some form of locking.
+ *
+ * Returns zero if "clnt" is now using the new xprt. Otherwise a
+ * negative errno is returned, and "clnt" continues to use the old
+ * xprt.
+ */
+int rpc_switch_client_transport(struct rpc_clnt *clnt,
+ struct xprt_create *args,
+ const struct rpc_timeout *timeout)
+{
+ const struct rpc_timeout *old_timeo;
+ rpc_authflavor_t pseudoflavor;
+ struct rpc_xprt *xprt, *old;
+ struct rpc_clnt *parent;
+ int err;
+
+ xprt = xprt_create_transport(args);
+ if (IS_ERR(xprt)) {
+ dprintk("RPC: failed to create new xprt for clnt %p\n",
+ clnt);
+ return PTR_ERR(xprt);
+ }
+
+ pseudoflavor = clnt->cl_auth->au_flavor;
+
+ old_timeo = clnt->cl_timeout;
+ old = rpc_clnt_set_transport(clnt, xprt, timeout);
+
+ rpc_unregister_client(clnt);
+ __rpc_clnt_remove_pipedir(clnt);
+
+ /*
+ * A new transport was created. "clnt" therefore
+ * becomes the root of a new cl_parent tree. clnt's
+ * children, if it has any, still point to the old xprt.
+ */
+ parent = clnt->cl_parent;
+ clnt->cl_parent = clnt;
+
+ /*
+ * The old rpc_auth cache cannot be re-used. GSS
+ * contexts in particular are between a single
+ * client and server.
+ */
+ err = rpc_client_register(clnt, pseudoflavor, NULL);
+ if (err)
+ goto out_revert;
+
+ synchronize_rcu();
+ if (parent != clnt)
+ rpc_release_client(parent);
+ xprt_put(old);
+ dprintk("RPC: replaced xprt for clnt %p\n", clnt);
+ return 0;
+
+out_revert:
+ rpc_clnt_set_transport(clnt, old, old_timeo);
+ clnt->cl_parent = parent;
+ rpc_client_register(clnt, pseudoflavor, NULL);
+ xprt_put(xprt);
+ dprintk("RPC: failed to switch xprt for clnt %p\n", clnt);
+ return err;
+}
+EXPORT_SYMBOL_GPL(rpc_switch_client_transport);
+
/*
* Kill all tasks for the given client.
* XXX: kill their descendants as well?
@@ -772,6 +866,8 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
atomic_inc(&clnt->cl_count);
if (clnt->cl_softrtry)
task->tk_flags |= RPC_TASK_SOFT;
+ if (clnt->cl_noretranstimeo)
+ task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
if (sk_memalloc_socks()) {
struct rpc_xprt *xprt;
@@ -1690,6 +1786,7 @@ call_connect_status(struct rpc_task *task)
dprint_status(task);
trace_rpc_connect_status(task, status);
+ task->tk_status = 0;
switch (status) {
/* if soft mounted, test if we've timed out */
case -ETIMEDOUT:
@@ -1698,12 +1795,14 @@ call_connect_status(struct rpc_task *task)
case -ECONNREFUSED:
case -ECONNRESET:
case -ENETUNREACH:
+ /* retry with existing socket, after a delay */
+ rpc_delay(task, 3*HZ);
if (RPC_IS_SOFTCONN(task))
break;
- /* retry with existing socket, after a delay */
- case 0:
case -EAGAIN:
- task->tk_status = 0;
+ task->tk_action = call_bind;
+ return;
+ case 0:
clnt->cl_stats->netreconn++;
task->tk_action = call_transmit;
return;
@@ -1717,13 +1816,14 @@ call_connect_status(struct rpc_task *task)
static void
call_transmit(struct rpc_task *task)
{
+ int is_retrans = RPC_WAS_SENT(task);
+
dprint_status(task);
task->tk_action = call_status;
if (task->tk_status < 0)
return;
- task->tk_status = xprt_prepare_transmit(task);
- if (task->tk_status != 0)
+ if (!xprt_prepare_transmit(task))
return;
task->tk_action = call_transmit_status;
/* Encode here so that rpcsec_gss can use correct sequence number. */
@@ -1742,6 +1842,8 @@ call_transmit(struct rpc_task *task)
xprt_transmit(task);
if (task->tk_status < 0)
return;
+ if (is_retrans)
+ task->tk_client->cl_stats->rpcretrans++;
/*
* On success, ensure that we call xprt_end_transmit() before sleeping
* in order to allow access to the socket to other RPC requests.
@@ -1811,8 +1913,7 @@ call_bc_transmit(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
- task->tk_status = xprt_prepare_transmit(task);
- if (task->tk_status == -EAGAIN) {
+ if (!xprt_prepare_transmit(task)) {
/*
* Could not reserve the transport. Try again after the
* transport is released.
@@ -1900,7 +2001,8 @@ call_status(struct rpc_task *task)
rpc_delay(task, 3*HZ);
case -ETIMEDOUT:
task->tk_action = call_timeout;
- if (task->tk_client->cl_discrtry)
+ if (!(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
+ && task->tk_client->cl_discrtry)
xprt_conditional_disconnect(req->rq_xprt,
req->rq_connect_cookie);
break;
@@ -1982,7 +2084,6 @@ call_timeout(struct rpc_task *task)
rpcauth_invalcred(task);
retry:
- clnt->cl_stats->rpcretrans++;
task->tk_action = call_bind;
task->tk_status = 0;
}
@@ -2025,7 +2126,6 @@ call_decode(struct rpc_task *task)
if (req->rq_rcv_buf.len < 12) {
if (!RPC_IS_SOFT(task)) {
task->tk_action = call_bind;
- clnt->cl_stats->rpcretrans++;
goto out_retry;
}
dprintk("RPC: %s: too small RPC reply size (%d bytes)\n",
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index f94567b45bb3..d0d14a04dce1 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -519,8 +519,8 @@ static int __rpc_create_common(struct inode *dir, struct dentry *dentry,
d_add(dentry, inode);
return 0;
out_err:
- printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
- __FILE__, __func__, dentry->d_name.name);
+ printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %pd\n",
+ __FILE__, __func__, dentry);
dput(dentry);
return -ENOMEM;
}
@@ -755,8 +755,8 @@ static int rpc_populate(struct dentry *parent,
out_bad:
__rpc_depopulate(parent, files, start, eof);
mutex_unlock(&dir->i_mutex);
- printk(KERN_WARNING "%s: %s failed to populate directory %s\n",
- __FILE__, __func__, parent->d_name.name);
+ printk(KERN_WARNING "%s: %s failed to populate directory %pd\n",
+ __FILE__, __func__, parent);
return err;
}
@@ -852,8 +852,8 @@ out:
return dentry;
out_err:
dentry = ERR_PTR(err);
- printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n",
- __FILE__, __func__, parent->d_name.name, name,
+ printk(KERN_WARNING "%s: %s() failed to create pipe %pd/%s (errno = %d)\n",
+ __FILE__, __func__, parent, name,
err);
goto out;
}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 9c9caaa5e0d3..b6e59f0a9475 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -291,12 +291,14 @@ static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining)
&inet_sk(sk)->inet_rcv_saddr,
inet_sk(sk)->inet_num);
break;
+#if IS_ENABLED(CONFIG_IPV6)
case PF_INET6:
len = snprintf(buf, remaining, "ipv6 %s %pI6 %d\n",
proto_name,
- &inet6_sk(sk)->rcv_saddr,
+ &sk->sk_v6_rcv_saddr,
inet_sk(sk)->inet_num);
break;
+#endif
default:
len = snprintf(buf, remaining, "*unknown-%d*\n",
sk->sk_family);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 095363eee764..04199bc8416f 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -205,10 +205,8 @@ int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
goto out_sleep;
}
xprt->snd_task = task;
- if (req != NULL) {
- req->rq_bytes_sent = 0;
+ if (req != NULL)
req->rq_ntrans++;
- }
return 1;
@@ -263,7 +261,6 @@ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
}
if (__xprt_get_cong(xprt, task)) {
xprt->snd_task = task;
- req->rq_bytes_sent = 0;
req->rq_ntrans++;
return 1;
}
@@ -300,10 +297,8 @@ static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
req = task->tk_rqstp;
xprt->snd_task = task;
- if (req) {
- req->rq_bytes_sent = 0;
+ if (req)
req->rq_ntrans++;
- }
return true;
}
@@ -329,7 +324,6 @@ static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
}
if (__xprt_get_cong(xprt, task)) {
xprt->snd_task = task;
- req->rq_bytes_sent = 0;
req->rq_ntrans++;
return true;
}
@@ -358,6 +352,11 @@ out_unlock:
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
{
if (xprt->snd_task == task) {
+ if (task != NULL) {
+ struct rpc_rqst *req = task->tk_rqstp;
+ if (req != NULL)
+ req->rq_bytes_sent = 0;
+ }
xprt_clear_locked(xprt);
__xprt_lock_write_next(xprt);
}
@@ -375,6 +374,11 @@ EXPORT_SYMBOL_GPL(xprt_release_xprt);
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
if (xprt->snd_task == task) {
+ if (task != NULL) {
+ struct rpc_rqst *req = task->tk_rqstp;
+ if (req != NULL)
+ req->rq_bytes_sent = 0;
+ }
xprt_clear_locked(xprt);
__xprt_lock_write_next_cong(xprt);
}
@@ -854,24 +858,36 @@ static inline int xprt_has_timer(struct rpc_xprt *xprt)
* @task: RPC task about to send a request
*
*/
-int xprt_prepare_transmit(struct rpc_task *task)
+bool xprt_prepare_transmit(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
- int err = 0;
+ bool ret = false;
dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
spin_lock_bh(&xprt->transport_lock);
- if (req->rq_reply_bytes_recvd && !req->rq_bytes_sent) {
- err = req->rq_reply_bytes_recvd;
+ if (!req->rq_bytes_sent) {
+ if (req->rq_reply_bytes_recvd) {
+ task->tk_status = req->rq_reply_bytes_recvd;
+ goto out_unlock;
+ }
+ if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
+ && xprt_connected(xprt)
+ && req->rq_connect_cookie == xprt->connect_cookie) {
+ xprt->ops->set_retrans_timeout(task);
+ rpc_sleep_on(&xprt->pending, task, xprt_timer);
+ goto out_unlock;
+ }
+ }
+ if (!xprt->ops->reserve_xprt(xprt, task)) {
+ task->tk_status = -EAGAIN;
goto out_unlock;
}
- if (!xprt->ops->reserve_xprt(xprt, task))
- err = -EAGAIN;
+ ret = true;
out_unlock:
spin_unlock_bh(&xprt->transport_lock);
- return err;
+ return ret;
}
void xprt_end_transmit(struct rpc_task *task)
@@ -912,7 +928,6 @@ void xprt_transmit(struct rpc_task *task)
} else if (!req->rq_bytes_sent)
return;
- req->rq_connect_cookie = xprt->connect_cookie;
req->rq_xtime = ktime_get();
status = xprt->ops->send_request(task);
if (status != 0) {
@@ -938,12 +953,14 @@ void xprt_transmit(struct rpc_task *task)
/* Don't race with disconnect */
if (!xprt_connected(xprt))
task->tk_status = -ENOTCONN;
- else if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task)) {
+ else {
/*
* Sleep on the pending queue since
* we're expecting a reply.
*/
- rpc_sleep_on(&xprt->pending, task, xprt_timer);
+ if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task))
+ rpc_sleep_on(&xprt->pending, task, xprt_timer);
+ req->rq_connect_cookie = xprt->connect_cookie;
}
spin_unlock_bh(&xprt->transport_lock);
}
@@ -1087,11 +1104,9 @@ struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
for (i = 0; i < num_prealloc; i++) {
req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
if (!req)
- break;
+ goto out_free;
list_add(&req->rq_list, &xprt->free);
}
- if (i < num_prealloc)
- goto out_free;
if (max_alloc > num_prealloc)
xprt->max_reqs = max_alloc;
else
@@ -1186,6 +1201,12 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
req->rq_xprt = xprt;
req->rq_buffer = NULL;
req->rq_xid = xprt_alloc_xid(xprt);
+ req->rq_connect_cookie = xprt->connect_cookie - 1;
+ req->rq_bytes_sent = 0;
+ req->rq_snd_buf.len = 0;
+ req->rq_snd_buf.buflen = 0;
+ req->rq_rcv_buf.len = 0;
+ req->rq_rcv_buf.buflen = 0;
req->rq_release_snd_buf = NULL;
xprt_reset_majortimeo(req);
dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index ee03d35677d9..17c88928b7db 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -835,6 +835,8 @@ static void xs_close(struct rpc_xprt *xprt)
dprintk("RPC: xs_close xprt %p\n", xprt);
+ cancel_delayed_work_sync(&transport->connect_worker);
+
xs_reset_transport(transport);
xprt->reestablish_timeout = 0;
@@ -854,14 +856,6 @@ static void xs_tcp_close(struct rpc_xprt *xprt)
xs_tcp_shutdown(xprt);
}
-static void xs_local_destroy(struct rpc_xprt *xprt)
-{
- xs_close(xprt);
- xs_free_peer_addresses(xprt);
- xprt_free(xprt);
- module_put(THIS_MODULE);
-}
-
/**
* xs_destroy - prepare to shutdown a transport
* @xprt: doomed transport
@@ -869,13 +863,12 @@ static void xs_local_destroy(struct rpc_xprt *xprt)
*/
static void xs_destroy(struct rpc_xprt *xprt)
{
- struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
-
dprintk("RPC: xs_destroy xprt %p\n", xprt);
- cancel_delayed_work_sync(&transport->connect_worker);
-
- xs_local_destroy(xprt);
+ xs_close(xprt);
+ xs_free_peer_addresses(xprt);
+ xprt_free(xprt);
+ module_put(THIS_MODULE);
}
static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
@@ -1511,6 +1504,7 @@ static void xs_tcp_state_change(struct sock *sk)
transport->tcp_copied = 0;
transport->tcp_flags =
TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
+ xprt->connect_cookie++;
xprt_wake_pending_tasks(xprt, -EAGAIN);
}
@@ -1816,6 +1810,10 @@ static inline void xs_reclassify_socket(int family, struct socket *sock)
}
#endif
+static void xs_dummy_setup_socket(struct work_struct *work)
+{
+}
+
static struct socket *xs_create_sock(struct rpc_xprt *xprt,
struct sock_xprt *transport, int family, int type, int protocol)
{
@@ -2112,6 +2110,19 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
if (!transport->inet) {
struct sock *sk = sock->sk;
+ unsigned int keepidle = xprt->timeout->to_initval / HZ;
+ unsigned int keepcnt = xprt->timeout->to_retries + 1;
+ unsigned int opt_on = 1;
+
+ /* TCP Keepalive options */
+ kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
+ (char *)&opt_on, sizeof(opt_on));
+ kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
+ (char *)&keepidle, sizeof(keepidle));
+ kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
+ (char *)&keepidle, sizeof(keepidle));
+ kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
+ (char *)&keepcnt, sizeof(keepcnt));
write_lock_bh(&sk->sk_callback_lock);
@@ -2151,7 +2162,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
case 0:
case -EINPROGRESS:
/* SYN_SENT! */
- xprt->connect_cookie++;
if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
}
@@ -2498,7 +2508,7 @@ static struct rpc_xprt_ops xs_local_ops = {
.send_request = xs_local_send_request,
.set_retrans_timeout = xprt_set_retrans_timeout_def,
.close = xs_close,
- .destroy = xs_local_destroy,
+ .destroy = xs_destroy,
.print_stats = xs_local_print_stats,
};
@@ -2655,6 +2665,9 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
xprt->ops = &xs_local_ops;
xprt->timeout = &xs_local_default_timeout;
+ INIT_DELAYED_WORK(&transport->connect_worker,
+ xs_dummy_setup_socket);
+
switch (sun->sun_family) {
case AF_LOCAL:
if (sun->sun_path[0] != '/') {
@@ -2859,8 +2872,8 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
if (args->bc_xprt->xpt_bc_xprt) {
/*
* This server connection already has a backchannel
- * export; we can't create a new one, as we wouldn't be
- * able to match replies based on xid any more. So,
+ * transport; we can't create a new one, as we wouldn't
+ * be able to match replies based on xid any more. So,
* reuse the already-existing one:
*/
return args->bc_xprt->xpt_bc_xprt;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 609c30c80816..3f9707a16d06 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -387,7 +387,7 @@ restart:
b_ptr = &tipc_bearers[bearer_id];
strcpy(b_ptr->name, name);
- res = m_ptr->enable_bearer(b_ptr);
+ res = m_ptr->enable_media(b_ptr);
if (res) {
pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
name, -res);
@@ -420,23 +420,15 @@ exit:
}
/**
- * tipc_block_bearer - Block the bearer with the given name, and reset all its links
+ * tipc_block_bearer - Block the bearer, and reset all its links
*/
-int tipc_block_bearer(const char *name)
+int tipc_block_bearer(struct tipc_bearer *b_ptr)
{
- struct tipc_bearer *b_ptr = NULL;
struct tipc_link *l_ptr;
struct tipc_link *temp_l_ptr;
read_lock_bh(&tipc_net_lock);
- b_ptr = tipc_bearer_find(name);
- if (!b_ptr) {
- pr_warn("Attempt to block unknown bearer <%s>\n", name);
- read_unlock_bh(&tipc_net_lock);
- return -EINVAL;
- }
-
- pr_info("Blocking bearer <%s>\n", name);
+ pr_info("Blocking bearer <%s>\n", b_ptr->name);
spin_lock_bh(&b_ptr->lock);
b_ptr->blocked = 1;
list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
@@ -465,7 +457,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
pr_info("Disabling bearer <%s>\n", b_ptr->name);
spin_lock_bh(&b_ptr->lock);
b_ptr->blocked = 1;
- b_ptr->media->disable_bearer(b_ptr);
+ b_ptr->media->disable_media(b_ptr);
list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
tipc_link_delete(l_ptr);
}
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 09c869adcfcf..e5e04be6fffa 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -75,8 +75,8 @@ struct tipc_bearer;
/**
* struct tipc_media - TIPC media information available to internal users
* @send_msg: routine which handles buffer transmission
- * @enable_bearer: routine which enables a bearer
- * @disable_bearer: routine which disables a bearer
+ * @enable_media: routine which enables a media
+ * @disable_media: routine which disables a media
* @addr2str: routine which converts media address to string
* @addr2msg: routine which converts media address to protocol message area
* @msg2addr: routine which converts media address from protocol message area
@@ -91,8 +91,8 @@ struct tipc_media {
int (*send_msg)(struct sk_buff *buf,
struct tipc_bearer *b_ptr,
struct tipc_media_addr *dest);
- int (*enable_bearer)(struct tipc_bearer *b_ptr);
- void (*disable_bearer)(struct tipc_bearer *b_ptr);
+ int (*enable_media)(struct tipc_bearer *b_ptr);
+ void (*disable_media)(struct tipc_bearer *b_ptr);
int (*addr2str)(struct tipc_media_addr *a, char *str_buf, int str_size);
int (*addr2msg)(struct tipc_media_addr *a, char *msg_area);
int (*msg2addr)(const struct tipc_bearer *b_ptr,
@@ -163,7 +163,7 @@ int tipc_register_media(struct tipc_media *m_ptr);
void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
-int tipc_block_bearer(const char *name);
+int tipc_block_bearer(struct tipc_bearer *b_ptr);
void tipc_continue(struct tipc_bearer *tb_ptr);
int tipc_enable_bearer(const char *bearer_name, u32 disc_domain, u32 priority);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index be72f8cebc53..94895d4e86ab 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -90,21 +90,21 @@ extern int tipc_random __read_mostly;
/*
* Routines available to privileged subsystems
*/
-extern int tipc_core_start_net(unsigned long);
-extern int tipc_handler_start(void);
-extern void tipc_handler_stop(void);
-extern int tipc_netlink_start(void);
-extern void tipc_netlink_stop(void);
-extern int tipc_socket_init(void);
-extern void tipc_socket_stop(void);
-extern int tipc_sock_create_local(int type, struct socket **res);
-extern void tipc_sock_release_local(struct socket *sock);
-extern int tipc_sock_accept_local(struct socket *sock,
- struct socket **newsock, int flags);
+int tipc_core_start_net(unsigned long);
+int tipc_handler_start(void);
+void tipc_handler_stop(void);
+int tipc_netlink_start(void);
+void tipc_netlink_stop(void);
+int tipc_socket_init(void);
+void tipc_socket_stop(void);
+int tipc_sock_create_local(int type, struct socket **res);
+void tipc_sock_release_local(struct socket *sock);
+int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
+ int flags);
#ifdef CONFIG_SYSCTL
-extern int tipc_register_sysctl(void);
-extern void tipc_unregister_sysctl(void);
+int tipc_register_sysctl(void);
+void tipc_unregister_sysctl(void);
#else
#define tipc_register_sysctl() 0
#define tipc_unregister_sysctl()
@@ -201,6 +201,6 @@ static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
return (struct tipc_msg *)skb->data;
}
-extern struct sk_buff *tipc_buf_acquire(u32 size);
+struct sk_buff *tipc_buf_acquire(u32 size);
#endif
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 40ea40cf6204..f80d59f5a161 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -2,7 +2,7 @@
* net/tipc/eth_media.c: Ethernet bearer support for TIPC
*
* Copyright (c) 2001-2007, Ericsson AB
- * Copyright (c) 2005-2008, 2011, Wind River Systems
+ * Copyright (c) 2005-2008, 2011-2013, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,19 +37,19 @@
#include "core.h"
#include "bearer.h"
-#define MAX_ETH_BEARERS MAX_BEARERS
+#define MAX_ETH_MEDIA MAX_BEARERS
#define ETH_ADDR_OFFSET 4 /* message header offset of MAC address */
/**
- * struct eth_bearer - Ethernet bearer data structure
+ * struct eth_media - Ethernet bearer data structure
* @bearer: ptr to associated "generic" bearer structure
* @dev: ptr to associated Ethernet network device
* @tipc_packet_type: used in binding TIPC to Ethernet driver
* @setup: work item used when enabling bearer
* @cleanup: work item used when disabling bearer
*/
-struct eth_bearer {
+struct eth_media {
struct tipc_bearer *bearer;
struct net_device *dev;
struct packet_type tipc_packet_type;
@@ -58,7 +58,7 @@ struct eth_bearer {
};
static struct tipc_media eth_media_info;
-static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
+static struct eth_media eth_media_array[MAX_ETH_MEDIA];
static int eth_started;
static int recv_notification(struct notifier_block *nb, unsigned long evt,
@@ -100,7 +100,7 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
if (!clone)
return 0;
- dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
+ dev = ((struct eth_media *)(tb_ptr->usr_handle))->dev;
delta = dev->hard_header_len - skb_headroom(buf);
if ((delta > 0) &&
@@ -128,43 +128,43 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
static int recv_msg(struct sk_buff *buf, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
- struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv;
+ struct eth_media *eb_ptr = (struct eth_media *)pt->af_packet_priv;
if (!net_eq(dev_net(dev), &init_net)) {
kfree_skb(buf);
- return 0;
+ return NET_RX_DROP;
}
if (likely(eb_ptr->bearer)) {
if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
buf->next = NULL;
tipc_recv_msg(buf, eb_ptr->bearer);
- return 0;
+ return NET_RX_SUCCESS;
}
}
kfree_skb(buf);
- return 0;
+ return NET_RX_DROP;
}
/**
- * setup_bearer - setup association between Ethernet bearer and interface
+ * setup_media - setup association between Ethernet bearer and interface
*/
-static void setup_bearer(struct work_struct *work)
+static void setup_media(struct work_struct *work)
{
- struct eth_bearer *eb_ptr =
- container_of(work, struct eth_bearer, setup);
+ struct eth_media *eb_ptr =
+ container_of(work, struct eth_media, setup);
dev_add_pack(&eb_ptr->tipc_packet_type);
}
/**
- * enable_bearer - attach TIPC bearer to an Ethernet interface
+ * enable_media - attach TIPC bearer to an Ethernet interface
*/
-static int enable_bearer(struct tipc_bearer *tb_ptr)
+static int enable_media(struct tipc_bearer *tb_ptr)
{
struct net_device *dev;
- struct eth_bearer *eb_ptr = &eth_bearers[0];
- struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
+ struct eth_media *eb_ptr = &eth_media_array[0];
+ struct eth_media *stop = &eth_media_array[MAX_ETH_MEDIA];
char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
int pending_dev = 0;
@@ -188,7 +188,7 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
eb_ptr->tipc_packet_type.func = recv_msg;
eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
- INIT_WORK(&eb_ptr->setup, setup_bearer);
+ INIT_WORK(&eb_ptr->setup, setup_media);
schedule_work(&eb_ptr->setup);
/* Associate TIPC bearer with Ethernet bearer */
@@ -205,14 +205,14 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
}
/**
- * cleanup_bearer - break association between Ethernet bearer and interface
+ * cleanup_media - break association between Ethernet bearer and interface
*
* This routine must be invoked from a work queue because it can sleep.
*/
-static void cleanup_bearer(struct work_struct *work)
+static void cleanup_media(struct work_struct *work)
{
- struct eth_bearer *eb_ptr =
- container_of(work, struct eth_bearer, cleanup);
+ struct eth_media *eb_ptr =
+ container_of(work, struct eth_media, cleanup);
dev_remove_pack(&eb_ptr->tipc_packet_type);
dev_put(eb_ptr->dev);
@@ -220,18 +220,18 @@ static void cleanup_bearer(struct work_struct *work)
}
/**
- * disable_bearer - detach TIPC bearer from an Ethernet interface
+ * disable_media - detach TIPC bearer from an Ethernet interface
*
* Mark Ethernet bearer as inactive so that incoming buffers are thrown away,
* then get worker thread to complete bearer cleanup. (Can't do cleanup
* here because cleanup code needs to sleep and caller holds spinlocks.)
*/
-static void disable_bearer(struct tipc_bearer *tb_ptr)
+static void disable_media(struct tipc_bearer *tb_ptr)
{
- struct eth_bearer *eb_ptr = (struct eth_bearer *)tb_ptr->usr_handle;
+ struct eth_media *eb_ptr = (struct eth_media *)tb_ptr->usr_handle;
eb_ptr->bearer = NULL;
- INIT_WORK(&eb_ptr->cleanup, cleanup_bearer);
+ INIT_WORK(&eb_ptr->cleanup, cleanup_media);
schedule_work(&eb_ptr->cleanup);
}
@@ -245,8 +245,8 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct eth_bearer *eb_ptr = &eth_bearers[0];
- struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
+ struct eth_media *eb_ptr = &eth_media_array[0];
+ struct eth_media *stop = &eth_media_array[MAX_ETH_MEDIA];
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
@@ -265,17 +265,17 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
if (netif_carrier_ok(dev))
tipc_continue(eb_ptr->bearer);
else
- tipc_block_bearer(eb_ptr->bearer->name);
+ tipc_block_bearer(eb_ptr->bearer);
break;
case NETDEV_UP:
tipc_continue(eb_ptr->bearer);
break;
case NETDEV_DOWN:
- tipc_block_bearer(eb_ptr->bearer->name);
+ tipc_block_bearer(eb_ptr->bearer);
break;
case NETDEV_CHANGEMTU:
case NETDEV_CHANGEADDR:
- tipc_block_bearer(eb_ptr->bearer->name);
+ tipc_block_bearer(eb_ptr->bearer);
tipc_continue(eb_ptr->bearer);
break;
case NETDEV_UNREGISTER:
@@ -327,8 +327,8 @@ static int eth_msg2addr(const struct tipc_bearer *tb_ptr,
*/
static struct tipc_media eth_media_info = {
.send_msg = send_msg,
- .enable_bearer = enable_bearer,
- .disable_bearer = disable_bearer,
+ .enable_media = enable_media,
+ .disable_media = disable_media,
.addr2str = eth_addr2str,
.addr2msg = eth_addr2msg,
.msg2addr = eth_msg2addr,
diff --git a/net/tipc/ib_media.c b/net/tipc/ib_media.c
index 9934a32bfa87..c13989297464 100644
--- a/net/tipc/ib_media.c
+++ b/net/tipc/ib_media.c
@@ -42,17 +42,17 @@
#include "core.h"
#include "bearer.h"
-#define MAX_IB_BEARERS MAX_BEARERS
+#define MAX_IB_MEDIA MAX_BEARERS
/**
- * struct ib_bearer - Infiniband bearer data structure
+ * struct ib_media - Infiniband media data structure
* @bearer: ptr to associated "generic" bearer structure
* @dev: ptr to associated Infiniband network device
* @tipc_packet_type: used in binding TIPC to Infiniband driver
* @cleanup: work item used when disabling bearer
*/
-struct ib_bearer {
+struct ib_media {
struct tipc_bearer *bearer;
struct net_device *dev;
struct packet_type tipc_packet_type;
@@ -61,7 +61,7 @@ struct ib_bearer {
};
static struct tipc_media ib_media_info;
-static struct ib_bearer ib_bearers[MAX_IB_BEARERS];
+static struct ib_media ib_media_array[MAX_IB_MEDIA];
static int ib_started;
/**
@@ -93,7 +93,7 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
if (!clone)
return 0;
- dev = ((struct ib_bearer *)(tb_ptr->usr_handle))->dev;
+ dev = ((struct ib_media *)(tb_ptr->usr_handle))->dev;
delta = dev->hard_header_len - skb_headroom(buf);
if ((delta > 0) &&
@@ -121,43 +121,43 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
static int recv_msg(struct sk_buff *buf, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
- struct ib_bearer *ib_ptr = (struct ib_bearer *)pt->af_packet_priv;
+ struct ib_media *ib_ptr = (struct ib_media *)pt->af_packet_priv;
if (!net_eq(dev_net(dev), &init_net)) {
kfree_skb(buf);
- return 0;
+ return NET_RX_DROP;
}
if (likely(ib_ptr->bearer)) {
if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
buf->next = NULL;
tipc_recv_msg(buf, ib_ptr->bearer);
- return 0;
+ return NET_RX_SUCCESS;
}
}
kfree_skb(buf);
- return 0;
+ return NET_RX_DROP;
}
/**
* setup_bearer - setup association between InfiniBand bearer and interface
*/
-static void setup_bearer(struct work_struct *work)
+static void setup_media(struct work_struct *work)
{
- struct ib_bearer *ib_ptr =
- container_of(work, struct ib_bearer, setup);
+ struct ib_media *ib_ptr =
+ container_of(work, struct ib_media, setup);
dev_add_pack(&ib_ptr->tipc_packet_type);
}
/**
- * enable_bearer - attach TIPC bearer to an InfiniBand interface
+ * enable_media - attach TIPC bearer to an InfiniBand interface
*/
-static int enable_bearer(struct tipc_bearer *tb_ptr)
+static int enable_media(struct tipc_bearer *tb_ptr)
{
struct net_device *dev;
- struct ib_bearer *ib_ptr = &ib_bearers[0];
- struct ib_bearer *stop = &ib_bearers[MAX_IB_BEARERS];
+ struct ib_media *ib_ptr = &ib_media_array[0];
+ struct ib_media *stop = &ib_media_array[MAX_IB_MEDIA];
char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
int pending_dev = 0;
@@ -181,7 +181,7 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
ib_ptr->tipc_packet_type.func = recv_msg;
ib_ptr->tipc_packet_type.af_packet_priv = ib_ptr;
INIT_LIST_HEAD(&(ib_ptr->tipc_packet_type.list));
- INIT_WORK(&ib_ptr->setup, setup_bearer);
+ INIT_WORK(&ib_ptr->setup, setup_media);
schedule_work(&ib_ptr->setup);
/* Associate TIPC bearer with InfiniBand bearer */
@@ -204,8 +204,8 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
*/
static void cleanup_bearer(struct work_struct *work)
{
- struct ib_bearer *ib_ptr =
- container_of(work, struct ib_bearer, cleanup);
+ struct ib_media *ib_ptr =
+ container_of(work, struct ib_media, cleanup);
dev_remove_pack(&ib_ptr->tipc_packet_type);
dev_put(ib_ptr->dev);
@@ -213,15 +213,15 @@ static void cleanup_bearer(struct work_struct *work)
}
/**
- * disable_bearer - detach TIPC bearer from an InfiniBand interface
+ * disable_media - detach TIPC bearer from an InfiniBand interface
*
* Mark InfiniBand bearer as inactive so that incoming buffers are thrown away,
* then get worker thread to complete bearer cleanup. (Can't do cleanup
* here because cleanup code needs to sleep and caller holds spinlocks.)
*/
-static void disable_bearer(struct tipc_bearer *tb_ptr)
+static void disable_media(struct tipc_bearer *tb_ptr)
{
- struct ib_bearer *ib_ptr = (struct ib_bearer *)tb_ptr->usr_handle;
+ struct ib_media *ib_ptr = (struct ib_media *)tb_ptr->usr_handle;
ib_ptr->bearer = NULL;
INIT_WORK(&ib_ptr->cleanup, cleanup_bearer);
@@ -238,8 +238,8 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct ib_bearer *ib_ptr = &ib_bearers[0];
- struct ib_bearer *stop = &ib_bearers[MAX_IB_BEARERS];
+ struct ib_media *ib_ptr = &ib_media_array[0];
+ struct ib_media *stop = &ib_media_array[MAX_IB_MEDIA];
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
@@ -258,17 +258,17 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
if (netif_carrier_ok(dev))
tipc_continue(ib_ptr->bearer);
else
- tipc_block_bearer(ib_ptr->bearer->name);
+ tipc_block_bearer(ib_ptr->bearer);
break;
case NETDEV_UP:
tipc_continue(ib_ptr->bearer);
break;
case NETDEV_DOWN:
- tipc_block_bearer(ib_ptr->bearer->name);
+ tipc_block_bearer(ib_ptr->bearer);
break;
case NETDEV_CHANGEMTU:
case NETDEV_CHANGEADDR:
- tipc_block_bearer(ib_ptr->bearer->name);
+ tipc_block_bearer(ib_ptr->bearer);
tipc_continue(ib_ptr->bearer);
break;
case NETDEV_UNREGISTER:
@@ -323,8 +323,8 @@ static int ib_msg2addr(const struct tipc_bearer *tb_ptr,
*/
static struct tipc_media ib_media_info = {
.send_msg = send_msg,
- .enable_bearer = enable_bearer,
- .disable_bearer = disable_bearer,
+ .enable_media = enable_media,
+ .disable_media = disable_media,
.addr2str = ib_addr2str,
.addr2msg = ib_addr2msg,
.msg2addr = ib_msg2addr,
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 0cc3d9015c5d..54163f91b8ae 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -75,20 +75,6 @@ static const char *link_unk_evt = "Unknown link event ";
*/
#define START_CHANGEOVER 100000u
-/**
- * struct tipc_link_name - deconstructed link name
- * @addr_local: network address of node at this end
- * @if_local: name of interface at this end
- * @addr_peer: network address of node at far end
- * @if_peer: name of interface at far end
- */
-struct tipc_link_name {
- u32 addr_local;
- char if_local[TIPC_MAX_IF_NAME];
- u32 addr_peer;
- char if_peer[TIPC_MAX_IF_NAME];
-};
-
static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
struct sk_buff *buf);
static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf);
@@ -97,8 +83,7 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
static int link_send_sections_long(struct tipc_port *sender,
struct iovec const *msg_sect,
- u32 num_sect, unsigned int total_len,
- u32 destnode);
+ unsigned int len, u32 destnode);
static void link_state_event(struct tipc_link *l_ptr, u32 event);
static void link_reset_statistics(struct tipc_link *l_ptr);
static void link_print(struct tipc_link *l_ptr, const char *str);
@@ -161,72 +146,6 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
}
/**
- * link_name_validate - validate & (optionally) deconstruct tipc_link name
- * @name: ptr to link name string
- * @name_parts: ptr to area for link name components (or NULL if not needed)
- *
- * Returns 1 if link name is valid, otherwise 0.
- */
-static int link_name_validate(const char *name,
- struct tipc_link_name *name_parts)
-{
- char name_copy[TIPC_MAX_LINK_NAME];
- char *addr_local;
- char *if_local;
- char *addr_peer;
- char *if_peer;
- char dummy;
- u32 z_local, c_local, n_local;
- u32 z_peer, c_peer, n_peer;
- u32 if_local_len;
- u32 if_peer_len;
-
- /* copy link name & ensure length is OK */
- name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
- /* need above in case non-Posix strncpy() doesn't pad with nulls */
- strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
- if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
- return 0;
-
- /* ensure all component parts of link name are present */
- addr_local = name_copy;
- if_local = strchr(addr_local, ':');
- if (if_local == NULL)
- return 0;
- *(if_local++) = 0;
- addr_peer = strchr(if_local, '-');
- if (addr_peer == NULL)
- return 0;
- *(addr_peer++) = 0;
- if_local_len = addr_peer - if_local;
- if_peer = strchr(addr_peer, ':');
- if (if_peer == NULL)
- return 0;
- *(if_peer++) = 0;
- if_peer_len = strlen(if_peer) + 1;
-
- /* validate component parts of link name */
- if ((sscanf(addr_local, "%u.%u.%u%c",
- &z_local, &c_local, &n_local, &dummy) != 3) ||
- (sscanf(addr_peer, "%u.%u.%u%c",
- &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
- (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
- (z_peer > 255) || (c_peer > 4095) || (n_peer > 4095) ||
- (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
- (if_peer_len <= 1) || (if_peer_len > TIPC_MAX_IF_NAME))
- return 0;
-
- /* return link name components, if necessary */
- if (name_parts) {
- name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
- strcpy(name_parts->if_local, if_local);
- name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
- strcpy(name_parts->if_peer, if_peer);
- }
- return 1;
-}
-
-/**
* link_timeout - handle expiration of link timer
* @l_ptr: pointer to link
*
@@ -1065,8 +984,7 @@ static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
*/
int tipc_link_send_sections_fast(struct tipc_port *sender,
struct iovec const *msg_sect,
- const u32 num_sect, unsigned int total_len,
- u32 destaddr)
+ unsigned int len, u32 destaddr)
{
struct tipc_msg *hdr = &sender->phdr;
struct tipc_link *l_ptr;
@@ -1080,8 +998,7 @@ again:
* Try building message using port's max_pkt hint.
* (Must not hold any locks while building message.)
*/
- res = tipc_msg_build(hdr, msg_sect, num_sect, total_len,
- sender->max_pkt, &buf);
+ res = tipc_msg_build(hdr, msg_sect, len, sender->max_pkt, &buf);
/* Exit if build request was invalid */
if (unlikely(res < 0))
return res;
@@ -1121,8 +1038,7 @@ exit:
if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
goto again;
- return link_send_sections_long(sender, msg_sect,
- num_sect, total_len,
+ return link_send_sections_long(sender, msg_sect, len,
destaddr);
}
tipc_node_unlock(node);
@@ -1133,8 +1049,8 @@ exit:
if (buf)
return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
if (res >= 0)
- return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
- total_len, TIPC_ERR_NO_NODE);
+ return tipc_port_reject_sections(sender, hdr, msg_sect,
+ len, TIPC_ERR_NO_NODE);
return res;
}
@@ -1154,18 +1070,17 @@ exit:
*/
static int link_send_sections_long(struct tipc_port *sender,
struct iovec const *msg_sect,
- u32 num_sect, unsigned int total_len,
- u32 destaddr)
+ unsigned int len, u32 destaddr)
{
struct tipc_link *l_ptr;
struct tipc_node *node;
struct tipc_msg *hdr = &sender->phdr;
- u32 dsz = total_len;
+ u32 dsz = len;
u32 max_pkt, fragm_sz, rest;
struct tipc_msg fragm_hdr;
struct sk_buff *buf, *buf_chain, *prev;
u32 fragm_crs, fragm_rest, hsz, sect_rest;
- const unchar *sect_crs;
+ const unchar __user *sect_crs;
int curr_sect;
u32 fragm_no;
int res = 0;
@@ -1207,7 +1122,7 @@ again:
if (!sect_rest) {
sect_rest = msg_sect[++curr_sect].iov_len;
- sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
+ sect_crs = msg_sect[curr_sect].iov_base;
}
if (sect_rest < fragm_rest)
@@ -1283,8 +1198,8 @@ reject:
buf = buf_chain->next;
kfree_skb(buf_chain);
}
- return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
- total_len, TIPC_ERR_NO_NODE);
+ return tipc_port_reject_sections(sender, hdr, msg_sect,
+ len, TIPC_ERR_NO_NODE);
}
/* Append chain of fragments to send queue & send them */
@@ -1592,15 +1507,15 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
/* Ensure bearer is still enabled */
if (unlikely(!b_ptr->active))
- goto cont;
+ goto discard;
/* Ensure message is well-formed */
if (unlikely(!link_recv_buf_validate(buf)))
- goto cont;
+ goto discard;
/* Ensure message data is a single contiguous unit */
if (unlikely(skb_linearize(buf)))
- goto cont;
+ goto discard;
/* Handle arrival of a non-unicast link message */
msg = buf_msg(buf);
@@ -1616,20 +1531,18 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
/* Discard unicast link messages destined for another node */
if (unlikely(!msg_short(msg) &&
(msg_destnode(msg) != tipc_own_addr)))
- goto cont;
+ goto discard;
/* Locate neighboring node that sent message */
n_ptr = tipc_node_find(msg_prevnode(msg));
if (unlikely(!n_ptr))
- goto cont;
+ goto discard;
tipc_node_lock(n_ptr);
/* Locate unicast link endpoint that should handle message */
l_ptr = n_ptr->links[b_ptr->identity];
- if (unlikely(!l_ptr)) {
- tipc_node_unlock(n_ptr);
- goto cont;
- }
+ if (unlikely(!l_ptr))
+ goto unlock_discard;
/* Verify that communication with node is currently allowed */
if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
@@ -1639,10 +1552,8 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
!msg_redundant_link(msg))
n_ptr->block_setup &= ~WAIT_PEER_DOWN;
- if (n_ptr->block_setup) {
- tipc_node_unlock(n_ptr);
- goto cont;
- }
+ if (n_ptr->block_setup)
+ goto unlock_discard;
/* Validate message sequence number info */
seq_no = msg_seqno(msg);
@@ -1678,98 +1589,97 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
/* Now (finally!) process the incoming message */
protocol_check:
- if (likely(link_working_working(l_ptr))) {
- if (likely(seq_no == mod(l_ptr->next_in_no))) {
- l_ptr->next_in_no++;
- if (unlikely(l_ptr->oldest_deferred_in))
- head = link_insert_deferred_queue(l_ptr,
- head);
-deliver:
- if (likely(msg_isdata(msg))) {
- tipc_node_unlock(n_ptr);
- tipc_port_recv_msg(buf);
- continue;
- }
- switch (msg_user(msg)) {
- int ret;
- case MSG_BUNDLER:
- l_ptr->stats.recv_bundles++;
- l_ptr->stats.recv_bundled +=
- msg_msgcnt(msg);
- tipc_node_unlock(n_ptr);
- tipc_link_recv_bundle(buf);
- continue;
- case NAME_DISTRIBUTOR:
- n_ptr->bclink.recv_permitted = true;
- tipc_node_unlock(n_ptr);
- tipc_named_recv(buf);
- continue;
- case BCAST_PROTOCOL:
- tipc_link_recv_sync(n_ptr, buf);
- tipc_node_unlock(n_ptr);
- continue;
- case CONN_MANAGER:
- tipc_node_unlock(n_ptr);
- tipc_port_recv_proto_msg(buf);
- continue;
- case MSG_FRAGMENTER:
- l_ptr->stats.recv_fragments++;
- ret = tipc_link_recv_fragment(
- &l_ptr->defragm_buf,
- &buf, &msg);
- if (ret == 1) {
- l_ptr->stats.recv_fragmented++;
- goto deliver;
- }
- if (ret == -1)
- l_ptr->next_in_no--;
- break;
- case CHANGEOVER_PROTOCOL:
- type = msg_type(msg);
- if (link_recv_changeover_msg(&l_ptr,
- &buf)) {
- msg = buf_msg(buf);
- seq_no = msg_seqno(msg);
- if (type == ORIGINAL_MSG)
- goto deliver;
- goto protocol_check;
- }
- break;
- default:
- kfree_skb(buf);
- buf = NULL;
- break;
- }
+ if (unlikely(!link_working_working(l_ptr))) {
+ if (msg_user(msg) == LINK_PROTOCOL) {
+ link_recv_proto_msg(l_ptr, buf);
+ head = link_insert_deferred_queue(l_ptr, head);
tipc_node_unlock(n_ptr);
- tipc_net_route_msg(buf);
continue;
}
+
+ /* Traffic message. Conditionally activate link */
+ link_state_event(l_ptr, TRAFFIC_MSG_EVT);
+
+ if (link_working_working(l_ptr)) {
+ /* Re-insert buffer in front of queue */
+ buf->next = head;
+ head = buf;
+ tipc_node_unlock(n_ptr);
+ continue;
+ }
+ goto unlock_discard;
+ }
+
+ /* Link is now in state WORKING_WORKING */
+ if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
link_handle_out_of_seq_msg(l_ptr, buf);
head = link_insert_deferred_queue(l_ptr, head);
tipc_node_unlock(n_ptr);
continue;
}
-
- /* Link is not in state WORKING_WORKING */
- if (msg_user(msg) == LINK_PROTOCOL) {
- link_recv_proto_msg(l_ptr, buf);
+ l_ptr->next_in_no++;
+ if (unlikely(l_ptr->oldest_deferred_in))
head = link_insert_deferred_queue(l_ptr, head);
+deliver:
+ if (likely(msg_isdata(msg))) {
tipc_node_unlock(n_ptr);
+ tipc_port_recv_msg(buf);
continue;
}
-
- /* Traffic message. Conditionally activate link */
- link_state_event(l_ptr, TRAFFIC_MSG_EVT);
-
- if (link_working_working(l_ptr)) {
- /* Re-insert buffer in front of queue */
- buf->next = head;
- head = buf;
+ switch (msg_user(msg)) {
+ int ret;
+ case MSG_BUNDLER:
+ l_ptr->stats.recv_bundles++;
+ l_ptr->stats.recv_bundled += msg_msgcnt(msg);
+ tipc_node_unlock(n_ptr);
+ tipc_link_recv_bundle(buf);
+ continue;
+ case NAME_DISTRIBUTOR:
+ n_ptr->bclink.recv_permitted = true;
tipc_node_unlock(n_ptr);
+ tipc_named_recv(buf);
continue;
+ case BCAST_PROTOCOL:
+ tipc_link_recv_sync(n_ptr, buf);
+ tipc_node_unlock(n_ptr);
+ continue;
+ case CONN_MANAGER:
+ tipc_node_unlock(n_ptr);
+ tipc_port_recv_proto_msg(buf);
+ continue;
+ case MSG_FRAGMENTER:
+ l_ptr->stats.recv_fragments++;
+ ret = tipc_link_recv_fragment(&l_ptr->defragm_buf,
+ &buf, &msg);
+ if (ret == 1) {
+ l_ptr->stats.recv_fragmented++;
+ goto deliver;
+ }
+ if (ret == -1)
+ l_ptr->next_in_no--;
+ break;
+ case CHANGEOVER_PROTOCOL:
+ type = msg_type(msg);
+ if (link_recv_changeover_msg(&l_ptr, &buf)) {
+ msg = buf_msg(buf);
+ seq_no = msg_seqno(msg);
+ if (type == ORIGINAL_MSG)
+ goto deliver;
+ goto protocol_check;
+ }
+ break;
+ default:
+ kfree_skb(buf);
+ buf = NULL;
+ break;
}
tipc_node_unlock(n_ptr);
-cont:
+ tipc_net_route_msg(buf);
+ continue;
+unlock_discard:
+
+ tipc_node_unlock(n_ptr);
+discard:
kfree_skb(buf);
}
read_unlock_bh(&tipc_net_lock);
@@ -2585,25 +2495,21 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
static struct tipc_link *link_find_link(const char *name,
struct tipc_node **node)
{
- struct tipc_link_name link_name_parts;
- struct tipc_bearer *b_ptr;
struct tipc_link *l_ptr;
+ struct tipc_node *n_ptr;
+ int i;
- if (!link_name_validate(name, &link_name_parts))
- return NULL;
-
- b_ptr = tipc_bearer_find_interface(link_name_parts.if_local);
- if (!b_ptr)
- return NULL;
-
- *node = tipc_node_find(link_name_parts.addr_peer);
- if (!*node)
- return NULL;
-
- l_ptr = (*node)->links[b_ptr->identity];
- if (!l_ptr || strcmp(l_ptr->name, name))
- return NULL;
-
+ list_for_each_entry(n_ptr, &tipc_node_list, list) {
+ for (i = 0; i < MAX_BEARERS; i++) {
+ l_ptr = n_ptr->links[i];
+ if (l_ptr && !strcmp(l_ptr->name, name))
+ goto found;
+ }
+ }
+ l_ptr = NULL;
+ n_ptr = NULL;
+found:
+ *node = n_ptr;
return l_ptr;
}
@@ -2646,6 +2552,7 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
struct tipc_link *l_ptr;
struct tipc_bearer *b_ptr;
struct tipc_media *m_ptr;
+ int res = 0;
l_ptr = link_find_link(name, &node);
if (l_ptr) {
@@ -2668,9 +2575,12 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
case TIPC_CMD_SET_LINK_WINDOW:
tipc_link_set_queue_limits(l_ptr, new_value);
break;
+ default:
+ res = -EINVAL;
+ break;
}
tipc_node_unlock(node);
- return 0;
+ return res;
}
b_ptr = tipc_bearer_find(name);
@@ -2678,15 +2588,18 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
switch (cmd) {
case TIPC_CMD_SET_LINK_TOL:
b_ptr->tolerance = new_value;
- return 0;
+ break;
case TIPC_CMD_SET_LINK_PRI:
b_ptr->priority = new_value;
- return 0;
+ break;
case TIPC_CMD_SET_LINK_WINDOW:
b_ptr->window = new_value;
- return 0;
+ break;
+ default:
+ res = -EINVAL;
+ break;
}
- return -EINVAL;
+ return res;
}
m_ptr = tipc_media_find(name);
@@ -2695,15 +2608,18 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
switch (cmd) {
case TIPC_CMD_SET_LINK_TOL:
m_ptr->tolerance = new_value;
- return 0;
+ break;
case TIPC_CMD_SET_LINK_PRI:
m_ptr->priority = new_value;
- return 0;
+ break;
case TIPC_CMD_SET_LINK_WINDOW:
m_ptr->window = new_value;
- return 0;
+ break;
+ default:
+ res = -EINVAL;
+ break;
}
- return -EINVAL;
+ return res;
}
struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
diff --git a/net/tipc/link.h b/net/tipc/link.h
index c048ed1cbd76..55cf8554a08b 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -227,9 +227,7 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
int tipc_link_send_sections_fast(struct tipc_port *sender,
struct iovec const *msg_sect,
- const u32 num_sect,
- unsigned int total_len,
- u32 destnode);
+ unsigned int len, u32 destnode);
void tipc_link_recv_bundle(struct sk_buff *buf);
int tipc_link_recv_fragment(struct sk_buff **pending,
struct sk_buff **fb,
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index ced60e2fc4f7..e525f8ce1dee 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -73,13 +73,13 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
* Returns message data size or errno
*/
int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
- u32 num_sect, unsigned int total_len, int max_size,
- struct sk_buff **buf)
+ unsigned int len, int max_size, struct sk_buff **buf)
{
- int dsz, sz, hsz, pos, res, cnt;
+ int dsz, sz, hsz;
+ unsigned char *to;
- dsz = total_len;
- pos = hsz = msg_hdr_sz(hdr);
+ dsz = len;
+ hsz = msg_hdr_sz(hdr);
sz = hsz + dsz;
msg_set_size(hdr, sz);
if (unlikely(sz > max_size)) {
@@ -91,16 +91,11 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
if (!(*buf))
return -ENOMEM;
skb_copy_to_linear_data(*buf, hdr, hsz);
- for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) {
- skb_copy_to_linear_data_offset(*buf, pos,
- msg_sect[cnt].iov_base,
- msg_sect[cnt].iov_len);
- pos += msg_sect[cnt].iov_len;
+ to = (*buf)->data + hsz;
+ if (len && memcpy_fromiovecend(to, msg_sect, 0, dsz)) {
+ kfree_skb(*buf);
+ *buf = NULL;
+ return -EFAULT;
}
- if (likely(res))
- return dsz;
-
- kfree_skb(*buf);
- *buf = NULL;
- return -EFAULT;
+ return dsz;
}
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 5e4ccf5c27df..559b73a9bf35 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -722,6 +722,5 @@ u32 tipc_msg_tot_importance(struct tipc_msg *m);
void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
u32 destnode);
int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
- u32 num_sect, unsigned int total_len, int max_size,
- struct sk_buff **buf);
+ unsigned int len, int max_size, struct sk_buff **buf);
#endif
diff --git a/net/tipc/port.c b/net/tipc/port.c
index b3ed2fcab4fb..c081a7632302 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -90,8 +90,7 @@ int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg)
* tipc_multicast - send a multicast message to local and remote destinations
*/
int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
- u32 num_sect, struct iovec const *msg_sect,
- unsigned int total_len)
+ struct iovec const *msg_sect, unsigned int len)
{
struct tipc_msg *hdr;
struct sk_buff *buf;
@@ -114,8 +113,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
msg_set_namelower(hdr, seq->lower);
msg_set_nameupper(hdr, seq->upper);
msg_set_hdr_sz(hdr, MCAST_H_SIZE);
- res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, MAX_MSG_SIZE,
- &buf);
+ res = tipc_msg_build(hdr, msg_sect, len, MAX_MSG_SIZE, &buf);
if (unlikely(!buf))
return res;
@@ -436,14 +434,13 @@ exit:
}
int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
- struct iovec const *msg_sect, u32 num_sect,
- unsigned int total_len, int err)
+ struct iovec const *msg_sect, unsigned int len,
+ int err)
{
struct sk_buff *buf;
int res;
- res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, MAX_MSG_SIZE,
- &buf);
+ res = tipc_msg_build(hdr, msg_sect, len, MAX_MSG_SIZE, &buf);
if (!buf)
return res;
@@ -918,15 +915,14 @@ int tipc_port_recv_msg(struct sk_buff *buf)
* tipc_port_recv_sections(): Concatenate and deliver sectioned
* message for this node.
*/
-static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_sect,
+static int tipc_port_recv_sections(struct tipc_port *sender,
struct iovec const *msg_sect,
- unsigned int total_len)
+ unsigned int len)
{
struct sk_buff *buf;
int res;
- res = tipc_msg_build(&sender->phdr, msg_sect, num_sect, total_len,
- MAX_MSG_SIZE, &buf);
+ res = tipc_msg_build(&sender->phdr, msg_sect, len, MAX_MSG_SIZE, &buf);
if (likely(buf))
tipc_port_recv_msg(buf);
return res;
@@ -935,8 +931,7 @@ static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_se
/**
* tipc_send - send message sections on connection
*/
-int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
- unsigned int total_len)
+int tipc_send(u32 ref, struct iovec const *msg_sect, unsigned int len)
{
struct tipc_port *p_ptr;
u32 destnode;
@@ -950,11 +945,10 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
if (!tipc_port_congested(p_ptr)) {
destnode = port_peernode(p_ptr);
if (likely(!in_own_node(destnode)))
- res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
- total_len, destnode);
+ res = tipc_link_send_sections_fast(p_ptr, msg_sect,
+ len, destnode);
else
- res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect,
- total_len);
+ res = tipc_port_recv_sections(p_ptr, msg_sect, len);
if (likely(res != -ELINKCONG)) {
p_ptr->congested = 0;
@@ -965,7 +959,7 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
}
if (port_unreliable(p_ptr)) {
p_ptr->congested = 0;
- return total_len;
+ return len;
}
return -ELINKCONG;
}
@@ -974,8 +968,7 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
* tipc_send2name - send message sections to port name
*/
int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
- unsigned int num_sect, struct iovec const *msg_sect,
- unsigned int total_len)
+ struct iovec const *msg_sect, unsigned int len)
{
struct tipc_port *p_ptr;
struct tipc_msg *msg;
@@ -999,36 +992,32 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
if (likely(destport || destnode)) {
if (likely(in_own_node(destnode)))
- res = tipc_port_recv_sections(p_ptr, num_sect,
- msg_sect, total_len);
+ res = tipc_port_recv_sections(p_ptr, msg_sect, len);
else if (tipc_own_addr)
res = tipc_link_send_sections_fast(p_ptr, msg_sect,
- num_sect, total_len,
- destnode);
+ len, destnode);
else
res = tipc_port_reject_sections(p_ptr, msg, msg_sect,
- num_sect, total_len,
- TIPC_ERR_NO_NODE);
+ len, TIPC_ERR_NO_NODE);
if (likely(res != -ELINKCONG)) {
if (res > 0)
p_ptr->sent++;
return res;
}
if (port_unreliable(p_ptr)) {
- return total_len;
+ return len;
}
return -ELINKCONG;
}
- return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
- total_len, TIPC_ERR_NO_NAME);
+ return tipc_port_reject_sections(p_ptr, msg, msg_sect, len,
+ TIPC_ERR_NO_NAME);
}
/**
* tipc_send2port - send message sections to port identity
*/
int tipc_send2port(u32 ref, struct tipc_portid const *dest,
- unsigned int num_sect, struct iovec const *msg_sect,
- unsigned int total_len)
+ struct iovec const *msg_sect, unsigned int len)
{
struct tipc_port *p_ptr;
struct tipc_msg *msg;
@@ -1046,21 +1035,20 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
msg_set_hdr_sz(msg, BASIC_H_SIZE);
if (in_own_node(dest->node))
- res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect,
- total_len);
+ res = tipc_port_recv_sections(p_ptr, msg_sect, len);
else if (tipc_own_addr)
- res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
- total_len, dest->node);
+ res = tipc_link_send_sections_fast(p_ptr, msg_sect, len,
+ dest->node);
else
- res = tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
- total_len, TIPC_ERR_NO_NODE);
+ res = tipc_port_reject_sections(p_ptr, msg, msg_sect, len,
+ TIPC_ERR_NO_NODE);
if (likely(res != -ELINKCONG)) {
if (res > 0)
p_ptr->sent++;
return res;
}
if (port_unreliable(p_ptr)) {
- return total_len;
+ return len;
}
return -ELINKCONG;
}
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 5a7026b9c345..912253597343 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -151,24 +151,20 @@ int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
* TIPC messaging routines
*/
int tipc_port_recv_msg(struct sk_buff *buf);
-int tipc_send(u32 portref, unsigned int num_sect, struct iovec const *msg_sect,
- unsigned int total_len);
+int tipc_send(u32 portref, struct iovec const *msg_sect, unsigned int len);
int tipc_send2name(u32 portref, struct tipc_name const *name, u32 domain,
- unsigned int num_sect, struct iovec const *msg_sect,
- unsigned int total_len);
+ struct iovec const *msg_sect, unsigned int len);
int tipc_send2port(u32 portref, struct tipc_portid const *dest,
- unsigned int num_sect, struct iovec const *msg_sect,
- unsigned int total_len);
+ struct iovec const *msg_sect, unsigned int len);
int tipc_multicast(u32 portref, struct tipc_name_seq const *seq,
- unsigned int section_count, struct iovec const *msg,
- unsigned int total_len);
+ struct iovec const *msg, unsigned int len);
int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
- struct iovec const *msg_sect, u32 num_sect,
- unsigned int total_len, int err);
+ struct iovec const *msg_sect, unsigned int len,
+ int err);
struct sk_buff *tipc_port_get_ports(void);
void tipc_port_recv_proto_msg(struct sk_buff *buf);
void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 6cc7ddd2fb7c..3906527259d1 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -338,7 +338,7 @@ static int release(struct socket *sock)
buf = __skb_dequeue(&sk->sk_receive_queue);
if (buf == NULL)
break;
- if (TIPC_SKB_CB(buf)->handle != 0)
+ if (TIPC_SKB_CB(buf)->handle != NULL)
kfree_skb(buf);
else {
if ((sock->state == SS_CONNECTING) ||
@@ -622,13 +622,11 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
res = tipc_send2name(tport->ref,
&dest->addr.name.name,
dest->addr.name.domain,
- m->msg_iovlen,
m->msg_iov,
total_len);
} else if (dest->addrtype == TIPC_ADDR_ID) {
res = tipc_send2port(tport->ref,
&dest->addr.id,
- m->msg_iovlen,
m->msg_iov,
total_len);
} else if (dest->addrtype == TIPC_ADDR_MCAST) {
@@ -641,7 +639,6 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
break;
res = tipc_multicast(tport->ref,
&dest->addr.nameseq,
- m->msg_iovlen,
m->msg_iov,
total_len);
}
@@ -707,8 +704,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
break;
}
- res = tipc_send(tport->ref, m->msg_iovlen, m->msg_iov,
- total_len);
+ res = tipc_send(tport->ref, m->msg_iov, total_len);
if (likely(res != -ELINKCONG))
break;
if (timeout_val <= 0L) {
@@ -1368,7 +1364,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
return TIPC_ERR_OVERLOAD;
/* Enqueue message */
- TIPC_SKB_CB(buf)->handle = 0;
+ TIPC_SKB_CB(buf)->handle = NULL;
__skb_queue_tail(&sk->sk_receive_queue, buf);
skb_set_owner_r(buf, sk);
@@ -1691,7 +1687,7 @@ restart:
/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
buf = __skb_dequeue(&sk->sk_receive_queue);
if (buf) {
- if (TIPC_SKB_CB(buf)->handle != 0) {
+ if (TIPC_SKB_CB(buf)->handle != NULL) {
kfree_skb(buf);
goto restart;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 86de99ad2976..c1f403bed683 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1246,6 +1246,15 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb)
return 0;
}
+static void unix_sock_inherit_flags(const struct socket *old,
+ struct socket *new)
+{
+ if (test_bit(SOCK_PASSCRED, &old->flags))
+ set_bit(SOCK_PASSCRED, &new->flags);
+ if (test_bit(SOCK_PASSSEC, &old->flags))
+ set_bit(SOCK_PASSSEC, &new->flags);
+}
+
static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sock *sk = sock->sk;
@@ -1280,6 +1289,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
/* attach accepted sock to socket */
unix_state_lock(tsk);
newsock->state = SS_CONNECTED;
+ unix_sock_inherit_flags(sock, newsock);
sock_graft(tsk, newsock);
unix_state_unlock(tsk);
return 0;
diff --git a/net/unix/diag.c b/net/unix/diag.c
index d591091603bf..86fa0f3b2caf 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -124,6 +124,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
rep->udiag_family = AF_UNIX;
rep->udiag_type = sk->sk_type;
rep->udiag_state = sk->sk_state;
+ rep->pad = 0;
rep->udiag_ino = sk_ino;
sock_diag_save_cookie(sk, rep->udiag_cookie);
diff --git a/net/vmw_vsock/Kconfig b/net/vmw_vsock/Kconfig
index b5fa7e40cdcb..14810abedc2e 100644
--- a/net/vmw_vsock/Kconfig
+++ b/net/vmw_vsock/Kconfig
@@ -6,7 +6,7 @@ config VSOCKETS
tristate "Virtual Socket protocol"
help
Virtual Socket Protocol is a socket protocol similar to TCP/IP
- allowing comunication between Virtual Machines and hypervisor
+ allowing communication between Virtual Machines and hypervisor
or host.
You should also select one or more hypervisor-specific transports
diff --git a/net/wimax/wimax-internal.h b/net/wimax/wimax-internal.h
index 1e743d214856..5dcd9c067bf0 100644
--- a/net/wimax/wimax-internal.h
+++ b/net/wimax/wimax-internal.h
@@ -63,11 +63,11 @@ void __wimax_state_set(struct wimax_dev *wimax_dev, enum wimax_st state)
{
wimax_dev->state = state;
}
-extern void __wimax_state_change(struct wimax_dev *, enum wimax_st);
+void __wimax_state_change(struct wimax_dev *, enum wimax_st);
#ifdef CONFIG_DEBUG_FS
-extern int wimax_debugfs_add(struct wimax_dev *);
-extern void wimax_debugfs_rm(struct wimax_dev *);
+int wimax_debugfs_add(struct wimax_dev *);
+void wimax_debugfs_rm(struct wimax_dev *);
#else
static inline int wimax_debugfs_add(struct wimax_dev *wimax_dev)
{
@@ -76,13 +76,13 @@ static inline int wimax_debugfs_add(struct wimax_dev *wimax_dev)
static inline void wimax_debugfs_rm(struct wimax_dev *wimax_dev) {}
#endif
-extern void wimax_id_table_add(struct wimax_dev *);
-extern struct wimax_dev *wimax_dev_get_by_genl_info(struct genl_info *, int);
-extern void wimax_id_table_rm(struct wimax_dev *);
-extern void wimax_id_table_release(void);
+void wimax_id_table_add(struct wimax_dev *);
+struct wimax_dev *wimax_dev_get_by_genl_info(struct genl_info *, int);
+void wimax_id_table_rm(struct wimax_dev *);
+void wimax_id_table_release(void);
-extern int wimax_rfkill_add(struct wimax_dev *);
-extern void wimax_rfkill_rm(struct wimax_dev *);
+int wimax_rfkill_add(struct wimax_dev *);
+void wimax_rfkill_rm(struct wimax_dev *);
extern struct genl_family wimax_gnl_family;
extern struct genl_multicast_group wimax_gnl_mcg;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 50f6195c8b70..16f3c3a7b2c1 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -328,6 +328,7 @@ int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
return cfg80211_get_chans_dfs_required(wiphy, chandef->center_freq2,
width);
}
+EXPORT_SYMBOL(cfg80211_chandef_dfs_required);
static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
u32 center_freq, u32 bandwidth,
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 67153964aad2..aff959e5a1b3 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -566,18 +566,13 @@ int wiphy_register(struct wiphy *wiphy)
/* check and set up bitrates */
ieee80211_set_bitrate_flags(wiphy);
-
+ rtnl_lock();
res = device_add(&rdev->wiphy.dev);
- if (res)
- return res;
-
- res = rfkill_register(rdev->rfkill);
if (res) {
- device_del(&rdev->wiphy.dev);
+ rtnl_unlock();
return res;
}
- rtnl_lock();
/* set up regulatory info */
wiphy_regulatory_register(wiphy);
@@ -606,6 +601,15 @@ int wiphy_register(struct wiphy *wiphy)
rdev->wiphy.registered = true;
rtnl_unlock();
+
+ res = rfkill_register(rdev->rfkill);
+ if (res) {
+ rfkill_destroy(rdev->rfkill);
+ rdev->rfkill = NULL;
+ wiphy_unregister(&rdev->wiphy);
+ return res;
+ }
+
return 0;
}
EXPORT_SYMBOL(wiphy_register);
@@ -640,7 +644,8 @@ void wiphy_unregister(struct wiphy *wiphy)
rtnl_unlock();
__count == 0; }));
- rfkill_unregister(rdev->rfkill);
+ if (rdev->rfkill)
+ rfkill_unregister(rdev->rfkill);
rtnl_lock();
rdev->wiphy.registered = false;
@@ -953,8 +958,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
case NETDEV_PRE_UP:
if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
return notifier_from_errno(-EOPNOTSUPP);
- if (rfkill_blocked(rdev->rfkill))
- return notifier_from_errno(-ERFKILL);
ret = cfg80211_can_add_interface(rdev, wdev->iftype);
if (ret)
return notifier_from_errno(ret);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 9ad43c619c54..af10e59af2d8 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -234,10 +234,10 @@ struct cfg80211_beacon_registration {
};
/* free object */
-extern void cfg80211_dev_free(struct cfg80211_registered_device *rdev);
+void cfg80211_dev_free(struct cfg80211_registered_device *rdev);
-extern int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
- char *newname);
+int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
+ char *newname);
void ieee80211_set_bitrate_flags(struct wiphy *wiphy);
@@ -382,15 +382,6 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
enum cfg80211_chan_mode chanmode,
u8 radar_detect);
-/**
- * cfg80211_chandef_dfs_required - checks if radar detection is required
- * @wiphy: the wiphy to validate against
- * @chandef: the channel definition to check
- * Return: 1 if radar detection is required, 0 if it is not, < 0 on error
- */
-int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
- const struct cfg80211_chan_def *c);
-
void cfg80211_set_dfs_state(struct wiphy *wiphy,
const struct cfg80211_chan_def *chandef,
enum nl80211_dfs_state dfs_state);
@@ -411,6 +402,9 @@ static inline int
cfg80211_can_add_interface(struct cfg80211_registered_device *rdev,
enum nl80211_iftype iftype)
{
+ if (rfkill_blocked(rdev->rfkill))
+ return -ERFKILL;
+
return cfg80211_can_change_interface(rdev, NULL, iftype);
}
diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c
index 90d050036624..454157717efa 100644
--- a/net/wireless/debugfs.c
+++ b/net/wireless/debugfs.c
@@ -47,17 +47,19 @@ static int ht_print_chan(struct ieee80211_channel *chan,
return 0;
if (chan->flags & IEEE80211_CHAN_DISABLED)
- return snprintf(buf + offset,
- buf_size - offset,
- "%d Disabled\n",
- chan->center_freq);
-
- return snprintf(buf + offset,
- buf_size - offset,
- "%d HT40 %c%c\n",
- chan->center_freq,
- (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) ? ' ' : '-',
- (chan->flags & IEEE80211_CHAN_NO_HT40PLUS) ? ' ' : '+');
+ return scnprintf(buf + offset,
+ buf_size - offset,
+ "%d Disabled\n",
+ chan->center_freq);
+
+ return scnprintf(buf + offset,
+ buf_size - offset,
+ "%d HT40 %c%c\n",
+ chan->center_freq,
+ (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) ?
+ ' ' : '-',
+ (chan->flags & IEEE80211_CHAN_NO_HT40PLUS) ?
+ ' ' : '+');
}
static ssize_t ht40allow_map_read(struct file *file,
diff --git a/net/wireless/genregdb.awk b/net/wireless/genregdb.awk
index 9392f8cbb901..42ed274e81f4 100644
--- a/net/wireless/genregdb.awk
+++ b/net/wireless/genregdb.awk
@@ -46,6 +46,12 @@ BEGIN {
sub(/:/, "", country)
printf "static const struct ieee80211_regdomain regdom_%s = {\n", country
printf "\t.alpha2 = \"%s\",\n", country
+ if ($NF ~ /DFS-ETSI/)
+ printf "\t.dfs_region = NL80211_DFS_ETSI,\n"
+ else if ($NF ~ /DFS-FCC/)
+ printf "\t.dfs_region = NL80211_DFS_FCC,\n"
+ else if ($NF ~ /DFS-JP/)
+ printf "\t.dfs_region = NL80211_DFS_JP,\n"
printf "\t.reg_rules = {\n"
active = 1
regdb = regdb "\t&regdom_" country ",\n"
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 39bff7d36768..403fe29c024d 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -263,6 +263,8 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
if (chan->flags & IEEE80211_CHAN_DISABLED)
continue;
wdev->wext.ibss.chandef.chan = chan;
+ wdev->wext.ibss.chandef.center_freq1 =
+ chan->center_freq;
break;
}
@@ -347,6 +349,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
if (chan) {
wdev->wext.ibss.chandef.chan = chan;
wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
+ wdev->wext.ibss.chandef.center_freq1 = freq;
wdev->wext.ibss.channel_fixed = true;
} else {
/* cfg80211_ibss_wext_join will pick one if needed */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index af8d84a4a5b2..cbbef88a8ebd 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2421,7 +2421,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
change = true;
}
- if (flags && (*flags & NL80211_MNTR_FLAG_ACTIVE) &&
+ if (flags && (*flags & MONITOR_FLAG_ACTIVE) &&
!(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
return -EOPNOTSUPP;
@@ -2483,7 +2483,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
&flags);
- if (!err && (flags & NL80211_MNTR_FLAG_ACTIVE) &&
+ if (!err && (flags & MONITOR_FLAG_ACTIVE) &&
!(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
return -EOPNOTSUPP;
@@ -5591,6 +5591,9 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
if (err)
return err;
+ if (netif_carrier_ok(dev))
+ return -EBUSY;
+
if (wdev->cac_started)
return -EBUSY;
@@ -5634,15 +5637,26 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
static struct nlattr *csa_attrs[NL80211_ATTR_MAX+1];
u8 radar_detect_width = 0;
int err;
+ bool need_new_beacon = false;
if (!rdev->ops->channel_switch ||
!(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH))
return -EOPNOTSUPP;
- /* may add IBSS support later */
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+ switch (dev->ieee80211_ptr->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ need_new_beacon = true;
+
+ /* useless if AP is not running */
+ if (!wdev->beacon_interval)
+ return -EINVAL;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ break;
+ default:
return -EOPNOTSUPP;
+ }
memset(&params, 0, sizeof(params));
@@ -5651,15 +5665,16 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
/* only important for AP, IBSS and mesh create IEs internally */
- if (!info->attrs[NL80211_ATTR_CSA_IES])
- return -EINVAL;
-
- /* useless if AP is not running */
- if (!wdev->beacon_interval)
+ if (need_new_beacon &&
+ (!info->attrs[NL80211_ATTR_CSA_IES] ||
+ !info->attrs[NL80211_ATTR_CSA_C_OFF_BEACON]))
return -EINVAL;
params.count = nla_get_u32(info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]);
+ if (!need_new_beacon)
+ goto skip_beacons;
+
err = nl80211_parse_beacon(info->attrs, &params.beacon_after);
if (err)
return err;
@@ -5699,6 +5714,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
+skip_beacons:
err = nl80211_parse_chandef(rdev, info, &params.chandef);
if (err)
return err;
@@ -5706,12 +5722,17 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef))
return -EINVAL;
- err = cfg80211_chandef_dfs_required(wdev->wiphy, &params.chandef);
- if (err < 0) {
- return err;
- } else if (err) {
- radar_detect_width = BIT(params.chandef.width);
- params.radar_required = true;
+ /* DFS channels are only supported for AP/P2P GO ... for now. */
+ if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP ||
+ dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
+ err = cfg80211_chandef_dfs_required(wdev->wiphy,
+ &params.chandef);
+ if (err < 0) {
+ return err;
+ } else if (err) {
+ radar_detect_width = BIT(params.chandef.width);
+ params.radar_required = true;
+ }
}
err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
@@ -10740,7 +10761,8 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
wdev_lock(wdev);
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
- wdev->iftype != NL80211_IFTYPE_P2P_GO))
+ wdev->iftype != NL80211_IFTYPE_P2P_GO &&
+ wdev->iftype != NL80211_IFTYPE_ADHOC))
goto out;
wdev->channel = chandef->chan;
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index 7d604c06c3dc..a271c27fac77 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -97,6 +97,10 @@ int ieee80211_radiotap_iterator_init(
struct ieee80211_radiotap_header *radiotap_header,
int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns)
{
+ /* check the radiotap header can actually be present */
+ if (max_length < sizeof(struct ieee80211_radiotap_header))
+ return -EINVAL;
+
/* Linux only supports version 0 radiotap format */
if (radiotap_header->it_version)
return -EINVAL;
@@ -131,7 +135,8 @@ int ieee80211_radiotap_iterator_init(
*/
if ((unsigned long)iterator->_arg -
- (unsigned long)iterator->_rtheader >
+ (unsigned long)iterator->_rtheader +
+ sizeof(uint32_t) >
(unsigned long)iterator->_max_length)
return -EINVAL;
}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index de06d5d1287f..a0ec143ba3dc 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -172,11 +172,21 @@ static const struct ieee80211_regdomain world_regdom = {
NL80211_RRF_NO_IBSS |
NL80211_RRF_NO_OFDM),
/* IEEE 802.11a, channel 36..48 */
- REG_RULE(5180-10, 5240+10, 80, 6, 20,
+ REG_RULE(5180-10, 5240+10, 160, 6, 20,
NL80211_RRF_PASSIVE_SCAN |
NL80211_RRF_NO_IBSS),
- /* NB: 5260 MHz - 5700 MHz requires DFS */
+ /* IEEE 802.11a, channel 52..64 - DFS required */
+ REG_RULE(5260-10, 5320+10, 160, 6, 20,
+ NL80211_RRF_PASSIVE_SCAN |
+ NL80211_RRF_NO_IBSS |
+ NL80211_RRF_DFS),
+
+ /* IEEE 802.11a, channel 100..144 - DFS required */
+ REG_RULE(5500-10, 5720+10, 160, 6, 20,
+ NL80211_RRF_PASSIVE_SCAN |
+ NL80211_RRF_NO_IBSS |
+ NL80211_RRF_DFS),
/* IEEE 802.11a, channel 149..165 */
REG_RULE(5745-10, 5825+10, 80, 6, 20,
@@ -758,23 +768,25 @@ const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
}
EXPORT_SYMBOL(freq_reg_info);
-#ifdef CONFIG_CFG80211_REG_DEBUG
-static const char *reg_initiator_name(enum nl80211_reg_initiator initiator)
+const char *reg_initiator_name(enum nl80211_reg_initiator initiator)
{
switch (initiator) {
case NL80211_REGDOM_SET_BY_CORE:
- return "Set by core";
+ return "core";
case NL80211_REGDOM_SET_BY_USER:
- return "Set by user";
+ return "user";
case NL80211_REGDOM_SET_BY_DRIVER:
- return "Set by driver";
+ return "driver";
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
- return "Set by country IE";
+ return "country IE";
default:
WARN_ON(1);
- return "Set by bug";
+ return "bug";
}
}
+EXPORT_SYMBOL(reg_initiator_name);
+
+#ifdef CONFIG_CFG80211_REG_DEBUG
static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
const struct ieee80211_reg_rule *reg_rule)
@@ -969,14 +981,17 @@ static bool ignore_reg_update(struct wiphy *wiphy,
struct regulatory_request *lr = get_last_request();
if (!lr) {
- REG_DBG_PRINT("Ignoring regulatory request %s since last_request is not set\n",
+ REG_DBG_PRINT("Ignoring regulatory request set by %s "
+ "since last_request is not set\n",
reg_initiator_name(initiator));
return true;
}
if (initiator == NL80211_REGDOM_SET_BY_CORE &&
wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) {
- REG_DBG_PRINT("Ignoring regulatory request %s since the driver uses its own custom regulatory domain\n",
+ REG_DBG_PRINT("Ignoring regulatory request set by %s "
+ "since the driver uses its own custom "
+ "regulatory domain\n",
reg_initiator_name(initiator));
return true;
}
@@ -988,7 +1003,9 @@ static bool ignore_reg_update(struct wiphy *wiphy,
if (wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY && !wiphy->regd &&
initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
!is_world_regdom(lr->alpha2)) {
- REG_DBG_PRINT("Ignoring regulatory request %s since the driver requires its own regulatory domain to be set first\n",
+ REG_DBG_PRINT("Ignoring regulatory request set by %s "
+ "since the driver requires its own regulatory "
+ "domain to be set first\n",
reg_initiator_name(initiator));
return true;
}
diff --git a/net/wireless/sysfs.h b/net/wireless/sysfs.h
index 65acbebd3711..b533ed71daff 100644
--- a/net/wireless/sysfs.h
+++ b/net/wireless/sysfs.h
@@ -1,8 +1,8 @@
#ifndef __WIRELESS_SYSFS_H
#define __WIRELESS_SYSFS_H
-extern int wiphy_sysfs_init(void);
-extern void wiphy_sysfs_exit(void);
+int wiphy_sysfs_init(void);
+void wiphy_sysfs_exit(void);
extern struct class ieee80211_class;
diff --git a/net/wireless/util.c b/net/wireless/util.c
index ce090c1c5e4f..3c8be6104ba4 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -10,6 +10,7 @@
#include <net/cfg80211.h>
#include <net/ip.h>
#include <net/dsfield.h>
+#include <linux/if_vlan.h>
#include "core.h"
#include "rdev-ops.h"
@@ -691,6 +692,7 @@ EXPORT_SYMBOL(ieee80211_amsdu_to_8023s);
unsigned int cfg80211_classify8021d(struct sk_buff *skb)
{
unsigned int dscp;
+ unsigned char vlan_priority;
/* skb->priority values from 256->263 are magic values to
* directly indicate a specific 802.1d priority. This is used
@@ -700,6 +702,13 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb)
if (skb->priority >= 256 && skb->priority <= 263)
return skb->priority - 256;
+ if (vlan_tx_tag_present(skb)) {
+ vlan_priority = (vlan_tx_tag_get(skb) & VLAN_PRIO_MASK)
+ >> VLAN_PRIO_SHIFT;
+ if (vlan_priority > 0)
+ return vlan_priority;
+ }
+
switch (skb->protocol) {
case htons(ETH_P_IP):
dscp = ipv4_get_dsfield(ip_hdr(skb)) & 0xfc;
diff --git a/net/x25/Kconfig b/net/x25/Kconfig
index c959312c45e3..e2fa133f9fba 100644
--- a/net/x25/Kconfig
+++ b/net/x25/Kconfig
@@ -16,8 +16,8 @@ config X25
if you want that) and the lower level data link layer protocol LAPB
(say Y to "LAPB Data Link Driver" below if you want that).
- You can read more about X.25 at <http://www.sangoma.com/x25.htm> and
- <http://www.cisco.com/univercd/cc/td/doc/product/software/ios11/cbook/cx25.htm>.
+ You can read more about X.25 at <http://www.sangoma.com/tutorials/x25/> and
+ <http://docwiki.cisco.com/wiki/X.25>.
Information about X.25 for Linux is contained in the files
<file:Documentation/networking/x25.txt> and
<file:Documentation/networking/x25-iface.txt>.
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h
index 716502ada53b..0622d319e1f2 100644
--- a/net/xfrm/xfrm_hash.h
+++ b/net/xfrm/xfrm_hash.h
@@ -130,7 +130,7 @@ static inline unsigned int __addr_hash(const xfrm_address_t *daddr,
return h & hmask;
}
-extern struct hlist_head *xfrm_hash_alloc(unsigned int sz);
-extern void xfrm_hash_free(struct hlist_head *n, unsigned int sz);
+struct hlist_head *xfrm_hash_alloc(unsigned int sz);
+void xfrm_hash_free(struct hlist_head *n, unsigned int sz);
#endif /* _XFRM_HASH_H */
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index 2906d520eea7..ccfdc7115a83 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -141,14 +141,14 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
const int plen = skb->len;
int dlen = IPCOMP_SCRATCH_SIZE;
u8 *start = skb->data;
- const int cpu = get_cpu();
- u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
- struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
+ struct crypto_comp *tfm;
+ u8 *scratch;
int err;
local_bh_disable();
+ scratch = *this_cpu_ptr(ipcomp_scratches);
+ tfm = *this_cpu_ptr(ipcd->tfms);
err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
- local_bh_enable();
if (err)
goto out;
@@ -158,13 +158,13 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
}
memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
- put_cpu();
+ local_bh_enable();
pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
return 0;
out:
- put_cpu();
+ local_bh_enable();
return err;
}
@@ -220,8 +220,8 @@ static void ipcomp_free_scratches(void)
static void * __percpu *ipcomp_alloc_scratches(void)
{
- int i;
void * __percpu *scratches;
+ int i;
if (ipcomp_scratch_users++)
return ipcomp_scratches;
@@ -233,7 +233,9 @@ static void * __percpu *ipcomp_alloc_scratches(void)
ipcomp_scratches = scratches;
for_each_possible_cpu(i) {
- void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
+ void *scratch;
+
+ scratch = vmalloc_node(IPCOMP_SCRATCH_SIZE, cpu_to_node(i));
if (!scratch)
return NULL;
*per_cpu_ptr(scratches, i) = scratch;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index ed38d5d81f9e..9a91f7431c41 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -334,7 +334,8 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
atomic_inc(&policy->genid);
- del_timer(&policy->polq.hold_timer);
+ if (del_timer(&policy->polq.hold_timer))
+ xfrm_pol_put(policy);
xfrm_queue_purge(&policy->polq.hold_queue);
if (del_timer(&policy->timer))
@@ -589,7 +590,8 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
spin_lock_bh(&pq->hold_queue.lock);
skb_queue_splice_init(&pq->hold_queue, &list);
- del_timer(&pq->hold_timer);
+ if (del_timer(&pq->hold_timer))
+ xfrm_pol_put(old);
spin_unlock_bh(&pq->hold_queue.lock);
if (skb_queue_empty(&list))
@@ -600,7 +602,8 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
spin_lock_bh(&pq->hold_queue.lock);
skb_queue_splice(&list, &pq->hold_queue);
pq->timeout = XFRM_QUEUE_TMO_MIN;
- mod_timer(&pq->hold_timer, jiffies);
+ if (!mod_timer(&pq->hold_timer, jiffies))
+ xfrm_pol_hold(new);
spin_unlock_bh(&pq->hold_queue.lock);
}
@@ -1769,6 +1772,10 @@ static void xfrm_policy_queue_process(unsigned long arg)
spin_lock(&pq->hold_queue.lock);
skb = skb_peek(&pq->hold_queue);
+ if (!skb) {
+ spin_unlock(&pq->hold_queue.lock);
+ goto out;
+ }
dst = skb_dst(skb);
sk = skb->sk;
xfrm_decode_session(skb, &fl, dst->ops->family);
@@ -1787,8 +1794,9 @@ static void xfrm_policy_queue_process(unsigned long arg)
goto purge_queue;
pq->timeout = pq->timeout << 1;
- mod_timer(&pq->hold_timer, jiffies + pq->timeout);
- return;
+ if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
+ xfrm_pol_hold(pol);
+ goto out;
}
dst_release(dst);
@@ -1819,11 +1827,14 @@ static void xfrm_policy_queue_process(unsigned long arg)
err = dst_output(skb);
}
+out:
+ xfrm_pol_put(pol);
return;
purge_queue:
pq->timeout = 0;
xfrm_queue_purge(&pq->hold_queue);
+ xfrm_pol_put(pol);
}
static int xdst_queue_output(struct sk_buff *skb)
@@ -1831,7 +1842,15 @@ static int xdst_queue_output(struct sk_buff *skb)
unsigned long sched_next;
struct dst_entry *dst = skb_dst(skb);
struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
- struct xfrm_policy_queue *pq = &xdst->pols[0]->polq;
+ struct xfrm_policy *pol = xdst->pols[0];
+ struct xfrm_policy_queue *pq = &pol->polq;
+ const struct sk_buff *fclone = skb + 1;
+
+ if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
+ fclone->fclone == SKB_FCLONE_CLONE)) {
+ kfree_skb(skb);
+ return 0;
+ }
if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
kfree_skb(skb);
@@ -1850,10 +1869,12 @@ static int xdst_queue_output(struct sk_buff *skb)
if (del_timer(&pq->hold_timer)) {
if (time_before(pq->hold_timer.expires, sched_next))
sched_next = pq->hold_timer.expires;
+ xfrm_pol_put(pol);
}
__skb_queue_tail(&pq->hold_queue, skb);
- mod_timer(&pq->hold_timer, sched_next);
+ if (!mod_timer(&pq->hold_timer, sched_next))
+ xfrm_pol_hold(pol);
spin_unlock_bh(&pq->hold_queue.lock);
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 8dafe6d3c6e4..dab57daae408 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -61,9 +61,9 @@ static void xfrm_replay_notify(struct xfrm_state *x, int event)
switch (event) {
case XFRM_REPLAY_UPDATE:
- if (x->replay_maxdiff &&
- (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
- (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
+ if (!x->replay_maxdiff ||
+ ((x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
+ (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff))) {
if (x->xflags & XFRM_TIME_DEFER)
event = XFRM_REPLAY_TIMEOUT;
else
@@ -129,8 +129,7 @@ static int xfrm_replay_check(struct xfrm_state *x,
return 0;
diff = x->replay.seq - seq;
- if (diff >= min_t(unsigned int, x->props.replay_window,
- sizeof(x->replay.bitmap) * 8)) {
+ if (diff >= x->props.replay_window) {
x->stats.replay_window++;
goto err;
}
@@ -302,9 +301,10 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
switch (event) {
case XFRM_REPLAY_UPDATE:
- if (x->replay_maxdiff &&
- (replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) &&
- (replay_esn->oseq - preplay_esn->oseq < x->replay_maxdiff)) {
+ if (!x->replay_maxdiff ||
+ ((replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) &&
+ (replay_esn->oseq - preplay_esn->oseq
+ < x->replay_maxdiff))) {
if (x->xflags & XFRM_TIME_DEFER)
event = XFRM_REPLAY_TIMEOUT;
else
@@ -353,28 +353,30 @@ static void xfrm_replay_notify_esn(struct xfrm_state *x, int event)
switch (event) {
case XFRM_REPLAY_UPDATE:
- if (!x->replay_maxdiff)
- break;
-
- if (replay_esn->seq_hi == preplay_esn->seq_hi)
- seq_diff = replay_esn->seq - preplay_esn->seq;
- else
- seq_diff = ~preplay_esn->seq + replay_esn->seq + 1;
-
- if (replay_esn->oseq_hi == preplay_esn->oseq_hi)
- oseq_diff = replay_esn->oseq - preplay_esn->oseq;
- else
- oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1;
-
- if (seq_diff < x->replay_maxdiff &&
- oseq_diff < x->replay_maxdiff) {
+ if (x->replay_maxdiff) {
+ if (replay_esn->seq_hi == preplay_esn->seq_hi)
+ seq_diff = replay_esn->seq - preplay_esn->seq;
+ else
+ seq_diff = ~preplay_esn->seq + replay_esn->seq
+ + 1;
- if (x->xflags & XFRM_TIME_DEFER)
- event = XFRM_REPLAY_TIMEOUT;
+ if (replay_esn->oseq_hi == preplay_esn->oseq_hi)
+ oseq_diff = replay_esn->oseq
+ - preplay_esn->oseq;
else
- return;
+ oseq_diff = ~preplay_esn->oseq
+ + replay_esn->oseq + 1;
+
+ if (seq_diff >= x->replay_maxdiff ||
+ oseq_diff >= x->replay_maxdiff)
+ break;
}
+ if (x->xflags & XFRM_TIME_DEFER)
+ event = XFRM_REPLAY_TIMEOUT;
+ else
+ return;
+
break;
case XFRM_REPLAY_TIMEOUT:
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index b9c3f9e943a9..68c2f357a183 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -468,7 +468,7 @@ expired:
}
err = __xfrm_state_delete(x);
- if (!err && x->id.spi)
+ if (!err)
km_state_expired(x, 1, 0);
xfrm_audit_state_delete(x, err ? 0 : 1,
@@ -815,7 +815,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
xfrm_state_look_at(pol, x, fl, encap_family,
&best, &acquire_in_progress, &error);
}
- if (best)
+ if (best || acquire_in_progress)
goto found;
h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
@@ -824,7 +824,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
x->props.reqid == tmpl->reqid &&
(mark & x->mark.m) == x->mark.v &&
!(x->props.flags & XFRM_STATE_WILDRECV) &&
- xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
+ xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
tmpl->mode == x->props.mode &&
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 3f565e495ac6..f964d4c00ffb 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -446,7 +446,8 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *
memcpy(&x->sel, &p->sel, sizeof(x->sel));
memcpy(&x->lft, &p->lft, sizeof(x->lft));
x->props.mode = p->mode;
- x->props.replay_window = p->replay_window;
+ x->props.replay_window = min_t(unsigned int, p->replay_window,
+ sizeof(x->replay.bitmap) * 8);
x->props.reqid = p->reqid;
x->props.family = p->family;
memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
@@ -1856,7 +1857,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
if (x->km.state != XFRM_STATE_VALID)
goto out;
- err = xfrm_replay_verify_len(x->replay_esn, rp);
+ err = xfrm_replay_verify_len(x->replay_esn, re);
if (err)
goto out;
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 8dcdca27d836..69f0a1417e9a 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -79,9 +79,11 @@ modpost = scripts/mod/modpost \
$(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \
$(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w)
+MODPOST_OPT=$(subst -i,-n,$(filter -i,$(MAKEFLAGS)))
+
# We can go over command line length here, so be careful.
quiet_cmd_modpost = MODPOST $(words $(filter-out vmlinux FORCE, $^)) modules
- cmd_modpost = $(MODLISTCMD) | sed 's/\.ko$$/.o/' | $(modpost) -s -T -
+ cmd_modpost = $(MODLISTCMD) | sed 's/\.ko$$/.o/' | $(modpost) $(MODPOST_OPT) -s -T -
PHONY += __modpost
__modpost: $(modules:.ko=.o) FORCE
diff --git a/scripts/asn1_compiler.c b/scripts/asn1_compiler.c
index db0e5cd34c70..91c4117637ae 100644
--- a/scripts/asn1_compiler.c
+++ b/scripts/asn1_compiler.c
@@ -1353,6 +1353,8 @@ static void render_out_of_line_list(FILE *out)
render_opcode(out, "ASN1_OP_END_SET_OF%s,\n", act);
render_opcode(out, "_jump_target(%u),\n", entry);
break;
+ default:
+ break;
}
if (e->action)
render_opcode(out, "_action(ACT_%s),\n",
diff --git a/scripts/coccinelle/api/devm_request_and_ioremap.cocci b/scripts/coccinelle/api/devm_request_and_ioremap.cocci
deleted file mode 100644
index 562ec88b6352..000000000000
--- a/scripts/coccinelle/api/devm_request_and_ioremap.cocci
+++ /dev/null
@@ -1,105 +0,0 @@
-/// Reimplement a call to devm_request_mem_region followed by a call to ioremap
-/// or ioremap_nocache by a call to devm_request_and_ioremap.
-/// Devm_request_and_ioremap was introduced in
-/// 72f8c0bfa0de64c68ee59f40eb9b2683bffffbb0. It makes the code much more
-/// concise.
-///
-///
-// Confidence: High
-// Copyright: (C) 2011 Julia Lawall, INRIA/LIP6. GPLv2.
-// Copyright: (C) 2011 Gilles Muller, INRIA/LiP6. GPLv2.
-// URL: http://coccinelle.lip6.fr/
-// Comments:
-// Options: --no-includes --include-headers
-
-virtual patch
-virtual org
-virtual report
-virtual context
-
-@nm@
-expression myname;
-identifier i;
-@@
-
-struct platform_driver i = { .driver = { .name = myname } };
-
-@depends on patch@
-expression dev,res,size;
-@@
-
--if (!devm_request_mem_region(dev, res->start, size,
-- \(res->name\|dev_name(dev)\))) {
-- ...
-- return ...;
--}
-... when != res->start
-(
--devm_ioremap(dev,res->start,size)
-+devm_request_and_ioremap(dev,res)
-|
--devm_ioremap_nocache(dev,res->start,size)
-+devm_request_and_ioremap(dev,res)
-)
-... when any
- when != res->start
-
-// this rule is separate from the previous one, because a single file can
-// have multiple values of myname
-@depends on patch@
-expression dev,res,size;
-expression nm.myname;
-@@
-
--if (!devm_request_mem_region(dev, res->start, size,myname)) {
-- ...
-- return ...;
--}
-... when != res->start
-(
--devm_ioremap(dev,res->start,size)
-+devm_request_and_ioremap(dev,res)
-|
--devm_ioremap_nocache(dev,res->start,size)
-+devm_request_and_ioremap(dev,res)
-)
-... when any
- when != res->start
-
-
-@pb depends on org || report || context@
-expression dev,res,size;
-expression nm.myname;
-position p1,p2;
-@@
-
-*if
- (!devm_request_mem_region@p1(dev, res->start, size,
- \(res->name\|dev_name(dev)\|myname\))) {
- ...
- return ...;
-}
-... when != res->start
-(
-*devm_ioremap@p2(dev,res->start,size)
-|
-*devm_ioremap_nocache@p2(dev,res->start,size)
-)
-... when any
- when != res->start
-
-@script:python depends on org@
-p1 << pb.p1;
-p2 << pb.p2;
-@@
-
-cocci.print_main("INFO: replace by devm_request_and_ioremap",p1)
-cocci.print_secs("",p2)
-
-@script:python depends on report@
-p1 << pb.p1;
-p2 << pb.p2;
-@@
-
-msg = "INFO: devm_request_mem_region followed by ioremap on line %s can be replaced by devm_request_and_ioremap" % (p2[0].line)
-coccilib.report.print_report(p1[0],msg)
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 487ac6f37ca2..9a11f9f799f4 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -55,6 +55,7 @@ static struct sym_entry *table;
static unsigned int table_size, table_cnt;
static int all_symbols = 0;
static char symbol_prefix_char = '\0';
+static unsigned long long kernel_start_addr = 0;
int token_profit[0x10000];
@@ -65,7 +66,10 @@ unsigned char best_table_len[256];
static void usage(void)
{
- fprintf(stderr, "Usage: kallsyms [--all-symbols] [--symbol-prefix=<prefix char>] < in.map > out.S\n");
+ fprintf(stderr, "Usage: kallsyms [--all-symbols] "
+ "[--symbol-prefix=<prefix char>] "
+ "[--page-offset=<CONFIG_PAGE_OFFSET>] "
+ "< in.map > out.S\n");
exit(1);
}
@@ -194,6 +198,9 @@ static int symbol_valid(struct sym_entry *s)
int i;
int offset = 1;
+ if (s->addr < kernel_start_addr)
+ return 0;
+
/* skip prefix char */
if (symbol_prefix_char && *(s->sym + 1) == symbol_prefix_char)
offset++;
@@ -646,6 +653,9 @@ int main(int argc, char **argv)
if ((*p == '"' && *(p+2) == '"') || (*p == '\'' && *(p+2) == '\''))
p++;
symbol_prefix_char = *p;
+ } else if (strncmp(argv[i], "--page-offset=", 14) == 0) {
+ const char *p = &argv[i][14];
+ kernel_start_addr = strtoull(p, NULL, 16);
} else
usage();
}
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index df198a5f4822..ba663e1dc7e3 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -93,7 +93,7 @@ struct symbol {
#define SYMBOL_CHOICEVAL 0x0020 /* used as a value in a choice block */
#define SYMBOL_VALID 0x0080 /* set when symbol.curr is calculated */
#define SYMBOL_OPTIONAL 0x0100 /* choice is optional - values can be 'n' */
-#define SYMBOL_WRITE 0x0200 /* ? */
+#define SYMBOL_WRITE 0x0200 /* write symbol to file (KCONFIG_CONFIG) */
#define SYMBOL_CHANGED 0x0400 /* ? */
#define SYMBOL_AUTO 0x1000 /* value from environment variable */
#define SYMBOL_CHECKED 0x2000 /* used during dependency checking */
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 2c3963165a0d..59184bb41ef8 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -25,7 +25,7 @@
static const char mconf_readme[] = N_(
"Overview\n"
"--------\n"
-"This interface let you select features and parameters for the build.\n"
+"This interface lets you select features and parameters for the build.\n"
"Features can either be built-in, modularized, or ignored. Parameters\n"
"must be entered in as decimal or hexadecimal numbers or text.\n"
"\n"
@@ -39,15 +39,15 @@ static const char mconf_readme[] = N_(
"\n"
"To change any of these features, highlight it with the cursor\n"
"keys and press <Y> to build it in, <M> to make it a module or\n"
-"<N> to removed it. You may also press the <Space Bar> to cycle\n"
-"through the available options (ie. Y->N->M->Y).\n"
+"<N> to remove it. You may also press the <Space Bar> to cycle\n"
+"through the available options (i.e. Y->N->M->Y).\n"
"\n"
"Some additional keyboard hints:\n"
"\n"
"Menus\n"
"----------\n"
-"o Use the Up/Down arrow keys (cursor keys) to highlight the item\n"
-" you wish to change or submenu wish to select and press <Enter>.\n"
+"o Use the Up/Down arrow keys (cursor keys) to highlight the item you\n"
+" wish to change or the submenu you wish to select and press <Enter>.\n"
" Submenus are designated by \"--->\", empty ones by \"----\".\n"
"\n"
" Shortcut: Press the option's highlighted letter (hotkey).\n"
@@ -65,7 +65,7 @@ static const char mconf_readme[] = N_(
" there is a delayed response which you may find annoying.\n"
"\n"
" Also, the <TAB> and cursor keys will cycle between <Select>,\n"
-" <Exit> and <Help>.\n"
+" <Exit>, <Help>, <Save>, and <Load>.\n"
"\n"
"o To get help with an item, use the cursor keys to highlight <Help>\n"
" and press <ENTER>.\n"
@@ -105,7 +105,7 @@ static const char mconf_readme[] = N_(
"Text Box (Help Window)\n"
"--------\n"
"o Use the cursor keys to scroll up/down/left/right. The VI editor\n"
-" keys h,j,k,l function here as do <u>, <d>, <SPACE BAR> and <B> for \n"
+" keys h,j,k,l function here as do <u>, <d>, <SPACE BAR> and <B> for\n"
" those who are familiar with less and lynx.\n"
"\n"
"o Press <E>, <X>, <q>, <Enter> or <Esc><Esc> to exit.\n"
@@ -117,23 +117,21 @@ static const char mconf_readme[] = N_(
"those who, for various reasons, find it necessary to switch\n"
"between different configurations.\n"
"\n"
-"At the end of the main menu you will find two options. One is\n"
-"for saving the current configuration to a file of your choosing.\n"
-"The other option is for loading a previously saved alternate\n"
-"configuration.\n"
+"The <Save> button will let you save the current configuration to\n"
+"a file of your choosing. Use the <Load> button to load a previously\n"
+"saved alternate configuration.\n"
"\n"
-"Even if you don't use alternate configuration files, but you\n"
-"find during a Menuconfig session that you have completely messed\n"
-"up your settings, you may use the \"Load Alternate...\" option to\n"
-"restore your previously saved settings from \".config\" without\n"
-"restarting Menuconfig.\n"
+"Even if you don't use alternate configuration files, but you find\n"
+"during a Menuconfig session that you have completely messed up your\n"
+"settings, you may use the <Load> button to restore your previously\n"
+"saved settings from \".config\" without restarting Menuconfig.\n"
"\n"
"Other information\n"
"-----------------\n"
-"If you use Menuconfig in an XTERM window make sure you have your\n"
-"$TERM variable set to point to a xterm definition which supports color.\n"
-"Otherwise, Menuconfig will look rather bad. Menuconfig will not\n"
-"display correctly in a RXVT window because rxvt displays only one\n"
+"If you use Menuconfig in an XTERM window, make sure you have your\n"
+"$TERM variable set to point to an xterm definition which supports\n"
+"color. Otherwise, Menuconfig will look rather bad. Menuconfig will\n"
+"not display correctly in an RXVT window because rxvt displays only one\n"
"intensity of color, bright.\n"
"\n"
"Menuconfig will display larger menus on screens or xterms which are\n"
@@ -148,8 +146,8 @@ static const char mconf_readme[] = N_(
"\n"
"Optional personality available\n"
"------------------------------\n"
-"If you prefer to have all of the options listed in a single menu, rather\n"
-"than the default multimenu hierarchy, run the menuconfig with\n"
+"If you prefer to have all of the options listed in a single menu,\n"
+"rather than the default multimenu hierarchy, run the menuconfig with\n"
"MENUCONFIG_MODE environment variable set to single_menu. Example:\n"
"\n"
"make MENUCONFIG_MODE=single_menu menuconfig\n"
@@ -172,7 +170,7 @@ static const char mconf_readme[] = N_(
" mono => selects colors suitable for monochrome displays\n"
" blackbg => selects a color scheme with black background\n"
" classic => theme with blue background. The classic look\n"
-" bluetitle => a LCD friendly version of classic. (default)\n"
+" bluetitle => an LCD friendly version of classic. (default)\n"
"\n"),
menu_instructions[] = N_(
"Arrow keys navigate the menu. "
@@ -238,24 +236,24 @@ search_help[] = N_(
"Symbol: FOO [=m]\n"
"Type : tristate\n"
"Prompt: Foo bus is used to drive the bar HW\n"
- " Defined at drivers/pci/Kconfig:47\n"
- " Depends on: X86_LOCAL_APIC && X86_IO_APIC || IA64\n"
" Location:\n"
" -> Bus options (PCI, PCMCIA, EISA, ISA)\n"
" -> PCI support (PCI [=y])\n"
"(1) -> PCI access mode (<choice> [=y])\n"
+ " Defined at drivers/pci/Kconfig:47\n"
+ " Depends on: X86_LOCAL_APIC && X86_IO_APIC || IA64\n"
" Selects: LIBCRC32\n"
- " Selected by: BAR\n"
+ " Selected by: BAR [=n]\n"
"-----------------------------------------------------------------\n"
"o The line 'Type:' shows the type of the configuration option for\n"
" this symbol (boolean, tristate, string, ...)\n"
"o The line 'Prompt:' shows the text used in the menu structure for\n"
" this symbol\n"
- "o The 'Defined at' line tell at what file / line number the symbol\n"
+ "o The 'Defined at' line tells at what file / line number the symbol\n"
" is defined\n"
- "o The 'Depends on:' line tell what symbols needs to be defined for\n"
+ "o The 'Depends on:' line tells what symbols need to be defined for\n"
" this symbol to be visible in the menu (selectable)\n"
- "o The 'Location:' lines tell where in the menu structure this symbol\n"
+ "o The 'Location:' lines tells where in the menu structure this symbol\n"
" is located\n"
" A location followed by a [=y] indicates that this is a\n"
" selectable menu item - and the current value is displayed inside\n"
@@ -263,9 +261,9 @@ search_help[] = N_(
" Press the key in the (#) prefix to jump directly to that\n"
" location. You will be returned to the current search results\n"
" after exiting this new menu.\n"
- "o The 'Selects:' line tell what symbol will be automatically\n"
+ "o The 'Selects:' line tells what symbols will be automatically\n"
" selected if this symbol is selected (y or m)\n"
- "o The 'Selected by' line tell what symbol has selected this symbol\n"
+ "o The 'Selected by' line tells what symbol has selected this symbol\n"
"\n"
"Only relevant lines are shown.\n"
"\n\n"
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index c1d53200c306..db1512ae30cc 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -119,9 +119,10 @@ void menu_set_type(int type)
sym->type = type;
return;
}
- menu_warn(current_entry, "type of '%s' redefined from '%s' to '%s'",
- sym->name ? sym->name : "<choice>",
- sym_type_name(sym->type), sym_type_name(type));
+ menu_warn(current_entry,
+ "ignoring type redefinition of '%s' from '%s' to '%s'",
+ sym->name ? sym->name : "<choice>",
+ sym_type_name(sym->type), sym_type_name(type));
}
struct property *menu_add_prop(enum prop_type type, char *prompt, struct expr *expr, struct expr *dep)
@@ -583,7 +584,7 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
for (j = 4; --i >= 0; j += 2) {
menu = submenu[i];
if (head && location && menu == location)
- jump->offset = r->len - 1;
+ jump->offset = strlen(r->s);
str_printf(r, "%*c-> %s", j, ' ',
_(menu_get_prompt(menu)));
if (menu->sym) {
@@ -597,7 +598,7 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
}
/*
- * get peoperty of type P_SYMBOL
+ * get property of type P_SYMBOL
*/
static struct property *get_symbol_prop(struct symbol *sym)
{
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index 1500c38f0cca..9d3b04b0769c 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -69,6 +69,11 @@ static inline QString qgettext(const QString& str)
return QString::fromLocal8Bit(gettext(str.latin1()));
}
+ConfigSettings::ConfigSettings()
+ : QSettings("kernel.org", "qconf")
+{
+}
+
/**
* Reads a list of integer values from the application settings.
*/
diff --git a/scripts/kconfig/qconf.h b/scripts/kconfig/qconf.h
index 3715b3e7212c..bde0c6b6f9e8 100644
--- a/scripts/kconfig/qconf.h
+++ b/scripts/kconfig/qconf.h
@@ -32,6 +32,7 @@ class ConfigMainWindow;
class ConfigSettings : public QSettings {
public:
+ ConfigSettings();
Q3ValueList<int> readSizes(const QString& key, bool *ok);
bool writeSizes(const QString& key, const Q3ValueList<int>& value);
};
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index c9a6775565bf..7caabdb51c64 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -1047,7 +1047,7 @@ sym_re_search_free:
* When we check for recursive dependencies we use a stack to save
* current state so we can print out relevant info to user.
* The entries are located on the call stack so no need to free memory.
- * Note inser() remove() must always match to properly clear the stack.
+ * Note insert() remove() must always match to properly clear the stack.
*/
static struct dep_stack {
struct dep_stack *prev, *next;
diff --git a/scripts/kconfig/zconf.l b/scripts/kconfig/zconf.l
index 6555a475453b..1a9f53e535ca 100644
--- a/scripts/kconfig/zconf.l
+++ b/scripts/kconfig/zconf.l
@@ -68,7 +68,6 @@ static void alloc_string(const char *str, int size)
}
%}
-ws [ \n\t]
n [A-Za-z0-9_]
%%
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 014994936b1c..32b10f53d0b4 100644
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -82,6 +82,8 @@ kallsyms()
kallsymopt="${kallsymopt} --all-symbols"
fi
+ kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
+
local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 8247979e8f64..393706b37774 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -17,6 +17,7 @@
#include <string.h>
#include <limits.h>
#include <stdbool.h>
+#include <errno.h>
#include "modpost.h"
#include "../../include/generated/autoconf.h"
#include "../../include/linux/license.h"
@@ -37,6 +38,8 @@ static int warn_unresolved = 0;
/* How a symbol is exported */
static int sec_mismatch_count = 0;
static int sec_mismatch_verbose = 1;
+/* ignore missing files */
+static int ignore_missing_files;
enum export {
export_plain, export_unused, export_gpl,
@@ -407,6 +410,11 @@ static int parse_elf(struct elf_info *info, const char *filename)
hdr = grab_file(filename, &info->size);
if (!hdr) {
+ if (ignore_missing_files) {
+ fprintf(stderr, "%s: %s (ignored)\n", filename,
+ strerror(errno));
+ return 0;
+ }
perror(filename);
exit(1);
}
@@ -2119,7 +2127,7 @@ int main(int argc, char **argv)
struct ext_sym_list *extsym_iter;
struct ext_sym_list *extsym_start = NULL;
- while ((opt = getopt(argc, argv, "i:I:e:msST:o:awM:K:")) != -1) {
+ while ((opt = getopt(argc, argv, "i:I:e:mnsST:o:awM:K:")) != -1) {
switch (opt) {
case 'i':
kernel_read = optarg;
@@ -2139,6 +2147,9 @@ int main(int argc, char **argv)
case 'm':
modversions = 1;
break;
+ case 'n':
+ ignore_missing_files = 1;
+ break;
case 'o':
dump_write = optarg;
break;
diff --git a/scripts/show_delta b/scripts/show_delta
index 17df3051747a..e25732b5d701 100755
--- a/scripts/show_delta
+++ b/scripts/show_delta
@@ -13,7 +13,7 @@ import sys
import string
def usage():
- print """usage: show_delta [<options>] <filename>
+ print ("""usage: show_delta [<options>] <filename>
This program parses the output from a set of printk message lines which
have time data prefixed because the CONFIG_PRINTK_TIME option is set, or
@@ -35,7 +35,7 @@ ex: $ dmesg >timefile
will show times relative to the line in the kernel output
starting with "NET4".
-"""
+""")
sys.exit(1)
# returns a tuple containing the seconds and text for each message line
@@ -94,11 +94,11 @@ def main():
try:
lines = open(filein,"r").readlines()
except:
- print "Problem opening file: %s" % filein
+ print ("Problem opening file: %s" % filein)
sys.exit(1)
if base_str:
- print 'base= "%s"' % base_str
+ print ('base= "%s"' % base_str)
# assume a numeric base. If that fails, try searching
# for a matching line.
try:
@@ -117,13 +117,13 @@ def main():
# stop at first match
break
if not found:
- print 'Couldn\'t find line matching base pattern "%s"' % base_str
+ print ('Couldn\'t find line matching base pattern "%s"' % base_str)
sys.exit(1)
else:
base_time = 0.0
for line in lines:
- print convert_line(line, base_time),
+ print (convert_line(line, base_time),)
main()
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 74f02e4dddd2..f1bcfc11cc72 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -151,13 +151,14 @@ exuberant()
all_target_sources | xargs $1 -a \
-I __initdata,__exitdata,__initconst,__devinitdata \
-I __devinitconst,__cpuinitdata,__initdata_memblock \
- -I __refdata,__attribute \
+ -I __refdata,__attribute,__maybe_unused,__always_unused \
-I __acquires,__releases,__deprecated \
-I __read_mostly,__aligned,____cacheline_aligned \
-I ____cacheline_aligned_in_smp \
+ -I __cacheline_aligned,__cacheline_aligned_in_smp \
-I ____cacheline_internodealigned_in_smp \
-I __used,__packed,__packed2__,__must_check,__must_hold \
- -I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL \
+ -I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL,ACPI_EXPORT_SYMBOL \
-I DEFINE_TRACE,EXPORT_TRACEPOINT_SYMBOL,EXPORT_TRACEPOINT_SYMBOL_GPL \
-I static,const \
--extra=+f --c-kinds=+px \
diff --git a/security/Makefile b/security/Makefile
index c26c81e92571..a5918e01a4f7 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_MMU) += min_addr.o
# Object file lists
obj-$(CONFIG_SECURITY) += security.o capability.o
obj-$(CONFIG_SECURITYFS) += inode.o
-# Must precede capability.o in order to stack properly.
obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o
obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o
obj-$(CONFIG_AUDIT) += lsm_audit.o
diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
index 031d2d9dd695..89c78658031f 100644
--- a/security/apparmor/audit.c
+++ b/security/apparmor/audit.c
@@ -111,7 +111,6 @@ static const char *const aa_audit_type[] = {
static void audit_pre(struct audit_buffer *ab, void *ca)
{
struct common_audit_data *sa = ca;
- struct task_struct *tsk = sa->aad->tsk ? sa->aad->tsk : current;
if (aa_g_audit_header) {
audit_log_format(ab, "apparmor=");
@@ -132,11 +131,6 @@ static void audit_pre(struct audit_buffer *ab, void *ca)
if (sa->aad->profile) {
struct aa_profile *profile = sa->aad->profile;
- pid_t pid;
- rcu_read_lock();
- pid = rcu_dereference(tsk->real_parent)->pid;
- rcu_read_unlock();
- audit_log_format(ab, " parent=%d", pid);
if (profile->ns != root_ns) {
audit_log_format(ab, " namespace=");
audit_log_untrustedstring(ab, profile->ns->base.hname);
@@ -149,12 +143,6 @@ static void audit_pre(struct audit_buffer *ab, void *ca)
audit_log_format(ab, " name=");
audit_log_untrustedstring(ab, sa->aad->name);
}
-
- if (sa->aad->tsk) {
- audit_log_format(ab, " pid=%d comm=", tsk->pid);
- audit_log_untrustedstring(ab, tsk->comm);
- }
-
}
/**
@@ -212,7 +200,7 @@ int aa_audit(int type, struct aa_profile *profile, gfp_t gfp,
if (sa->aad->type == AUDIT_APPARMOR_KILL)
(void)send_sig_info(SIGKILL, NULL,
- sa->aad->tsk ? sa->aad->tsk : current);
+ sa->u.tsk ? sa->u.tsk : current);
if (sa->aad->type == AUDIT_APPARMOR_ALLOWED)
return complain_error(sa->aad->error);
diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c
index 84d1f5f53877..1101c6f64bb7 100644
--- a/security/apparmor/capability.c
+++ b/security/apparmor/capability.c
@@ -53,8 +53,7 @@ static void audit_cb(struct audit_buffer *ab, void *va)
/**
* audit_caps - audit a capability
- * @profile: profile confining task (NOT NULL)
- * @task: task capability test was performed against (NOT NULL)
+ * @profile: profile being tested for confinement (NOT NULL)
* @cap: capability tested
* @error: error code returned by test
*
@@ -63,8 +62,7 @@ static void audit_cb(struct audit_buffer *ab, void *va)
*
* Returns: 0 or sa->error on success, error code on failure
*/
-static int audit_caps(struct aa_profile *profile, struct task_struct *task,
- int cap, int error)
+static int audit_caps(struct aa_profile *profile, int cap, int error)
{
struct audit_cache *ent;
int type = AUDIT_APPARMOR_AUTO;
@@ -73,7 +71,6 @@ static int audit_caps(struct aa_profile *profile, struct task_struct *task,
sa.type = LSM_AUDIT_DATA_CAP;
sa.aad = &aad;
sa.u.cap = cap;
- sa.aad->tsk = task;
sa.aad->op = OP_CAPABLE;
sa.aad->error = error;
@@ -124,8 +121,7 @@ static int profile_capable(struct aa_profile *profile, int cap)
/**
* aa_capable - test permission to use capability
- * @task: task doing capability test against (NOT NULL)
- * @profile: profile confining @task (NOT NULL)
+ * @profile: profile being tested against (NOT NULL)
* @cap: capability to be tested
* @audit: whether an audit record should be generated
*
@@ -133,8 +129,7 @@ static int profile_capable(struct aa_profile *profile, int cap)
*
* Returns: 0 on success, or else an error code.
*/
-int aa_capable(struct task_struct *task, struct aa_profile *profile, int cap,
- int audit)
+int aa_capable(struct aa_profile *profile, int cap, int audit)
{
int error = profile_capable(profile, cap);
@@ -144,5 +139,5 @@ int aa_capable(struct task_struct *task, struct aa_profile *profile, int cap,
return error;
}
- return audit_caps(profile, task, cap, error);
+ return audit_caps(profile, cap, error);
}
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 26c607c971f5..452567d3a08e 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -50,23 +50,21 @@ void aa_free_domain_entries(struct aa_domain *domain)
/**
* may_change_ptraced_domain - check if can change profile on ptraced task
- * @task: task we want to change profile of (NOT NULL)
* @to_profile: profile to change to (NOT NULL)
*
- * Check if the task is ptraced and if so if the tracing task is allowed
+ * Check if current is ptraced and if so if the tracing task is allowed
* to trace the new domain
*
* Returns: %0 or error if change not allowed
*/
-static int may_change_ptraced_domain(struct task_struct *task,
- struct aa_profile *to_profile)
+static int may_change_ptraced_domain(struct aa_profile *to_profile)
{
struct task_struct *tracer;
struct aa_profile *tracerp = NULL;
int error = 0;
rcu_read_lock();
- tracer = ptrace_parent(task);
+ tracer = ptrace_parent(current);
if (tracer)
/* released below */
tracerp = aa_get_task_profile(tracer);
@@ -75,7 +73,7 @@ static int may_change_ptraced_domain(struct task_struct *task,
if (!tracer || unconfined(tracerp))
goto out;
- error = aa_may_ptrace(tracer, tracerp, to_profile, PTRACE_MODE_ATTACH);
+ error = aa_may_ptrace(tracerp, to_profile, PTRACE_MODE_ATTACH);
out:
rcu_read_unlock();
@@ -477,7 +475,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
}
if (bprm->unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) {
- error = may_change_ptraced_domain(current, new_profile);
+ error = may_change_ptraced_domain(new_profile);
if (error) {
aa_put_profile(new_profile);
goto audit;
@@ -690,7 +688,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
}
}
- error = may_change_ptraced_domain(current, hat);
+ error = may_change_ptraced_domain(hat);
if (error) {
info = "ptraced";
error = -EPERM;
@@ -829,7 +827,7 @@ int aa_change_profile(const char *ns_name, const char *hname, bool onexec,
}
/* check if tracing task is allowed to trace target domain */
- error = may_change_ptraced_domain(current, target);
+ error = may_change_ptraced_domain(target);
if (error) {
info = "ptrace prevents transition";
goto audit;
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
index 30e8d7687259..ba3dfd17f23f 100644
--- a/security/apparmor/include/audit.h
+++ b/security/apparmor/include/audit.h
@@ -109,7 +109,6 @@ struct apparmor_audit_data {
void *profile;
const char *name;
const char *info;
- struct task_struct *tsk;
union {
void *target;
struct {
diff --git a/security/apparmor/include/capability.h b/security/apparmor/include/capability.h
index 2e7c9d6a2f3b..fc3fa381d850 100644
--- a/security/apparmor/include/capability.h
+++ b/security/apparmor/include/capability.h
@@ -4,7 +4,7 @@
* This file contains AppArmor capability mediation definitions.
*
* Copyright (C) 1998-2008 Novell/SUSE
- * Copyright 2009-2010 Canonical Ltd.
+ * Copyright 2009-2013 Canonical Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -38,8 +38,7 @@ struct aa_caps {
extern struct aa_fs_entry aa_fs_entry_caps[];
-int aa_capable(struct task_struct *task, struct aa_profile *profile, int cap,
- int audit);
+int aa_capable(struct aa_profile *profile, int cap, int audit);
static inline void aa_free_cap_rules(struct aa_caps *caps)
{
diff --git a/security/apparmor/include/ipc.h b/security/apparmor/include/ipc.h
index aeda0fbc8b2f..288ca76e2fb1 100644
--- a/security/apparmor/include/ipc.h
+++ b/security/apparmor/include/ipc.h
@@ -19,8 +19,8 @@
struct aa_profile;
-int aa_may_ptrace(struct task_struct *tracer_task, struct aa_profile *tracer,
- struct aa_profile *tracee, unsigned int mode);
+int aa_may_ptrace(struct aa_profile *tracer, struct aa_profile *tracee,
+ unsigned int mode);
int aa_ptrace(struct task_struct *tracer, struct task_struct *tracee,
unsigned int mode);
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
index c51d2266587e..777ac1c47253 100644
--- a/security/apparmor/ipc.c
+++ b/security/apparmor/ipc.c
@@ -54,15 +54,14 @@ static int aa_audit_ptrace(struct aa_profile *profile,
/**
* aa_may_ptrace - test if tracer task can trace the tracee
- * @tracer_task: task who will do the tracing (NOT NULL)
* @tracer: profile of the task doing the tracing (NOT NULL)
* @tracee: task to be traced
* @mode: whether PTRACE_MODE_READ || PTRACE_MODE_ATTACH
*
* Returns: %0 else error code if permission denied or error
*/
-int aa_may_ptrace(struct task_struct *tracer_task, struct aa_profile *tracer,
- struct aa_profile *tracee, unsigned int mode)
+int aa_may_ptrace(struct aa_profile *tracer, struct aa_profile *tracee,
+ unsigned int mode)
{
/* TODO: currently only based on capability, not extended ptrace
* rules,
@@ -72,7 +71,7 @@ int aa_may_ptrace(struct task_struct *tracer_task, struct aa_profile *tracer,
if (unconfined(tracer) || tracer == tracee)
return 0;
/* log this capability request */
- return aa_capable(tracer_task, tracer, CAP_SYS_PTRACE, 1);
+ return aa_capable(tracer, CAP_SYS_PTRACE, 1);
}
/**
@@ -101,7 +100,7 @@ int aa_ptrace(struct task_struct *tracer, struct task_struct *tracee,
if (!unconfined(tracer_p)) {
struct aa_profile *tracee_p = aa_get_task_profile(tracee);
- error = aa_may_ptrace(tracer, tracer_p, tracee_p, mode);
+ error = aa_may_ptrace(tracer_p, tracee_p, mode);
error = aa_audit_ptrace(tracer_p, tracee_p, error);
aa_put_profile(tracee_p);
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index fb99e18123b4..4257b7e2796b 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -145,7 +145,7 @@ static int apparmor_capable(const struct cred *cred, struct user_namespace *ns,
if (!error) {
profile = aa_cred_profile(cred);
if (!unconfined(profile))
- error = aa_capable(current, profile, cap, audit);
+ error = aa_capable(profile, cap, audit);
}
return error;
}
diff --git a/security/capability.c b/security/capability.c
index dbeb9bc27b24..8b4f24ae4338 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -777,9 +777,15 @@ static int cap_xfrm_policy_delete_security(struct xfrm_sec_ctx *ctx)
return 0;
}
-static int cap_xfrm_state_alloc_security(struct xfrm_state *x,
- struct xfrm_user_sec_ctx *sec_ctx,
- u32 secid)
+static int cap_xfrm_state_alloc(struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *sec_ctx)
+{
+ return 0;
+}
+
+static int cap_xfrm_state_alloc_acquire(struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec,
+ u32 secid)
{
return 0;
}
@@ -1101,7 +1107,8 @@ void __init security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, xfrm_policy_clone_security);
set_to_cap_if_null(ops, xfrm_policy_free_security);
set_to_cap_if_null(ops, xfrm_policy_delete_security);
- set_to_cap_if_null(ops, xfrm_state_alloc_security);
+ set_to_cap_if_null(ops, xfrm_state_alloc);
+ set_to_cap_if_null(ops, xfrm_state_alloc_acquire);
set_to_cap_if_null(ops, xfrm_state_free_security);
set_to_cap_if_null(ops, xfrm_state_delete_security);
set_to_cap_if_null(ops, xfrm_policy_lookup);
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index c123628d3f84..7c2a0a71049e 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -63,16 +63,6 @@ static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
struct cgroup_subsys devices_subsys;
-static int devcgroup_can_attach(struct cgroup_subsys_state *new_css,
- struct cgroup_taskset *set)
-{
- struct task_struct *task = cgroup_taskset_first(set);
-
- if (current != task && !capable(CAP_SYS_ADMIN))
- return -EPERM;
- return 0;
-}
-
/*
* called under devcgroup_mutex
*/
@@ -697,7 +687,6 @@ static struct cftype dev_cgroup_files[] = {
struct cgroup_subsys devices_subsys = {
.name = "devices",
- .can_attach = devcgroup_can_attach,
.css_alloc = devcgroup_css_alloc,
.css_free = devcgroup_css_free,
.css_online = devcgroup_online,
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 0b759e17a131..77ca965ab684 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -13,7 +13,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/err.h>
+#include <linux/sched.h>
#include <linux/rbtree.h>
+#include <linux/cred.h>
#include <linux/key-type.h>
#include <linux/digsig.h>
@@ -21,21 +23,29 @@
static struct key *keyring[INTEGRITY_KEYRING_MAX];
+#ifdef CONFIG_IMA_TRUSTED_KEYRING
+static const char *keyring_name[INTEGRITY_KEYRING_MAX] = {
+ ".evm",
+ ".module",
+ ".ima",
+};
+#else
static const char *keyring_name[INTEGRITY_KEYRING_MAX] = {
"_evm",
"_module",
"_ima",
};
+#endif
int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
- const char *digest, int digestlen)
+ const char *digest, int digestlen)
{
if (id >= INTEGRITY_KEYRING_MAX)
return -EINVAL;
if (!keyring[id]) {
keyring[id] =
- request_key(&key_type_keyring, keyring_name[id], NULL);
+ request_key(&key_type_keyring, keyring_name[id], NULL);
if (IS_ERR(keyring[id])) {
int err = PTR_ERR(keyring[id]);
pr_err("no %s keyring: %d\n", keyring_name[id], err);
@@ -44,9 +54,10 @@ int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
}
}
- switch (sig[0]) {
+ switch (sig[1]) {
case 1:
- return digsig_verify(keyring[id], sig, siglen,
+ /* v1 API expect signature without xattr type */
+ return digsig_verify(keyring[id], sig + 1, siglen - 1,
digest, digestlen);
case 2:
return asymmetric_verify(keyring[id], sig, siglen,
@@ -55,3 +66,21 @@ int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
return -EOPNOTSUPP;
}
+
+int integrity_init_keyring(const unsigned int id)
+{
+ const struct cred *cred = current_cred();
+ const struct user_struct *user = cred->user;
+
+ keyring[id] = keyring_alloc(keyring_name[id], KUIDT_INIT(0),
+ KGIDT_INIT(0), cred,
+ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ),
+ KEY_ALLOC_NOT_IN_QUOTA, user->uid_keyring);
+ if (!IS_ERR(keyring[id]))
+ set_bit(KEY_FLAG_TRUSTED_ONLY, &keyring[id]->flags);
+ else
+ pr_info("Can't allocate %s keyring (%ld)\n",
+ keyring_name[id], PTR_ERR(keyring[id]));
+ return 0;
+}
diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c
index b4754667659d..9eae4809006b 100644
--- a/security/integrity/digsig_asymmetric.c
+++ b/security/integrity/digsig_asymmetric.c
@@ -20,17 +20,6 @@
#include "integrity.h"
/*
- * signature format v2 - for using with asymmetric keys
- */
-struct signature_v2_hdr {
- uint8_t version; /* signature format version */
- uint8_t hash_algo; /* Digest algorithm [enum pkey_hash_algo] */
- uint32_t keyid; /* IMA key identifier - not X509/PGP specific*/
- uint16_t sig_size; /* signature size */
- uint8_t sig[0]; /* signature payload */
-} __packed;
-
-/*
* Request an asymmetric key.
*/
static struct key *request_asymmetric_key(struct key *keyring, uint32_t keyid)
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index af9b6852f4e1..336b3ddfe63f 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -123,7 +123,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
goto out;
}
- xattr_len = rc - 1;
+ xattr_len = rc;
/* check value type */
switch (xattr_data->type) {
@@ -143,7 +143,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
if (rc)
break;
rc = integrity_digsig_verify(INTEGRITY_KEYRING_EVM,
- xattr_data->digest, xattr_len,
+ (const char *)xattr_data, xattr_len,
calc.digest, sizeof(calc.digest));
if (!rc) {
/* we probably want to replace rsa with hmac here */
diff --git a/security/integrity/evm/evm_posix_acl.c b/security/integrity/evm/evm_posix_acl.c
index b1753e98bf9a..46408b9e62e8 100644
--- a/security/integrity/evm/evm_posix_acl.c
+++ b/security/integrity/evm/evm_posix_acl.c
@@ -11,8 +11,9 @@
#include <linux/module.h>
#include <linux/xattr.h>
+#include <linux/evm.h>
-int posix_xattr_acl(char *xattr)
+int posix_xattr_acl(const char *xattr)
{
int xattr_len = strlen(xattr);
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
index 74522dbd10a6..c49d3f14cbec 100644
--- a/security/integrity/iint.c
+++ b/security/integrity/iint.c
@@ -70,6 +70,8 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
static void iint_free(struct integrity_iint_cache *iint)
{
+ kfree(iint->ima_hash);
+ iint->ima_hash = NULL;
iint->version = 0;
iint->flags = 0UL;
iint->ima_file_status = INTEGRITY_UNKNOWN;
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 39196abaff0d..dad8d4ca2437 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -9,6 +9,7 @@ config IMA
select CRYPTO_HMAC
select CRYPTO_MD5
select CRYPTO_SHA1
+ select CRYPTO_HASH_INFO
select TCG_TPM if HAS_IOMEM && !UML
select TCG_TIS if TCG_TPM && X86
select TCG_IBMVTPM if TCG_TPM && PPC64
@@ -45,6 +46,69 @@ config IMA_LSM_RULES
help
Disabling this option will disregard LSM based policy rules.
+choice
+ prompt "Default template"
+ default IMA_NG_TEMPLATE
+ depends on IMA
+ help
+ Select the default IMA measurement template.
+
+ The original 'ima' measurement list template contains a
+ hash, defined as 20 bytes, and a null terminated pathname,
+ limited to 255 characters. The 'ima-ng' measurement list
+ template permits both larger hash digests and longer
+ pathnames.
+
+ config IMA_TEMPLATE
+ bool "ima"
+ config IMA_NG_TEMPLATE
+ bool "ima-ng (default)"
+ config IMA_SIG_TEMPLATE
+ bool "ima-sig"
+endchoice
+
+config IMA_DEFAULT_TEMPLATE
+ string
+ depends on IMA
+ default "ima" if IMA_TEMPLATE
+ default "ima-ng" if IMA_NG_TEMPLATE
+ default "ima-sig" if IMA_SIG_TEMPLATE
+
+choice
+ prompt "Default integrity hash algorithm"
+ default IMA_DEFAULT_HASH_SHA1
+ depends on IMA
+ help
+ Select the default hash algorithm used for the measurement
+ list, integrity appraisal and audit log. The compiled default
+ hash algorithm can be overwritten using the kernel command
+ line 'ima_hash=' option.
+
+ config IMA_DEFAULT_HASH_SHA1
+ bool "SHA1 (default)"
+ depends on CRYPTO_SHA1
+
+ config IMA_DEFAULT_HASH_SHA256
+ bool "SHA256"
+ depends on CRYPTO_SHA256 && !IMA_TEMPLATE
+
+ config IMA_DEFAULT_HASH_SHA512
+ bool "SHA512"
+ depends on CRYPTO_SHA512 && !IMA_TEMPLATE
+
+ config IMA_DEFAULT_HASH_WP512
+ bool "WP512"
+ depends on CRYPTO_WP512 && !IMA_TEMPLATE
+endchoice
+
+config IMA_DEFAULT_HASH
+ string
+ depends on IMA
+ default "sha1" if IMA_DEFAULT_HASH_SHA1
+ default "sha256" if IMA_DEFAULT_HASH_SHA256
+ default "sha512" if IMA_DEFAULT_HASH_SHA512
+ default "wp512" if IMA_DEFAULT_HASH_WP512
+
config IMA_APPRAISE
bool "Appraise integrity measurements"
depends on IMA
@@ -59,3 +123,11 @@ config IMA_APPRAISE
For more information on integrity appraisal refer to:
<http://linux-ima.sourceforge.net>
If unsure, say N.
+
+config IMA_TRUSTED_KEYRING
+ bool "Require all keys on the _ima keyring be signed"
+ depends on IMA_APPRAISE && SYSTEM_TRUSTED_KEYRING
+ default y
+ help
+ This option requires that all keys added to the _ima
+ keyring be signed by a key on the system trusted keyring.
diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile
index 56dfee7cbf61..d79263d2fdbf 100644
--- a/security/integrity/ima/Makefile
+++ b/security/integrity/ima/Makefile
@@ -6,5 +6,5 @@
obj-$(CONFIG_IMA) += ima.o
ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
- ima_policy.o
+ ima_policy.o ima_template.o ima_template_lib.o
ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index b3dd616560f7..bf03c6a16cc8 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -36,23 +36,48 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
#define IMA_HASH_BITS 9
#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
+#define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16
+#define IMA_TEMPLATE_NUM_FIELDS_MAX 15
+
+#define IMA_TEMPLATE_IMA_NAME "ima"
+#define IMA_TEMPLATE_IMA_FMT "d|n"
+
/* set during initialization */
extern int ima_initialized;
extern int ima_used_chip;
-extern char *ima_hash;
+extern int ima_hash_algo;
extern int ima_appraise;
-/* IMA inode template definition */
-struct ima_template_data {
- u8 digest[IMA_DIGEST_SIZE]; /* sha1/md5 measurement hash */
- char file_name[IMA_EVENT_NAME_LEN_MAX + 1]; /* name + \0 */
+/* IMA template field data definition */
+struct ima_field_data {
+ u8 *data;
+ u32 len;
+};
+
+/* IMA template field definition */
+struct ima_template_field {
+ const char field_id[IMA_TEMPLATE_FIELD_ID_MAX_LEN];
+ int (*field_init) (struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, struct ima_field_data *field_data);
+ void (*field_show) (struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+};
+
+/* IMA template descriptor definition */
+struct ima_template_desc {
+ char *name;
+ char *fmt;
+ int num_fields;
+ struct ima_template_field **fields;
};
struct ima_template_entry {
- u8 digest[IMA_DIGEST_SIZE]; /* sha1 or md5 measurement hash */
- const char *template_name;
- int template_len;
- struct ima_template_data template;
+ u8 digest[TPM_DIGEST_SIZE]; /* sha1 or md5 measurement hash */
+ struct ima_template_desc *template_desc; /* template descriptor */
+ u32 template_data_len;
+ struct ima_field_data template_data[0]; /* template related data */
};
struct ima_queue_entry {
@@ -69,13 +94,21 @@ int ima_fs_init(void);
void ima_fs_cleanup(void);
int ima_inode_alloc(struct inode *inode);
int ima_add_template_entry(struct ima_template_entry *entry, int violation,
- const char *op, struct inode *inode);
-int ima_calc_file_hash(struct file *file, char *digest);
-int ima_calc_buffer_hash(const void *data, int len, char *digest);
-int ima_calc_boot_aggregate(char *digest);
-void ima_add_violation(struct inode *inode, const unsigned char *filename,
+ const char *op, struct inode *inode,
+ const unsigned char *filename);
+int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash);
+int ima_calc_field_array_hash(struct ima_field_data *field_data, int num_fields,
+ struct ima_digest_data *hash);
+int __init ima_calc_boot_aggregate(struct ima_digest_data *hash);
+void ima_add_violation(struct file *file, const unsigned char *filename,
const char *op, const char *cause);
int ima_init_crypto(void);
+void ima_putc(struct seq_file *m, void *data, int datalen);
+void ima_print_digest(struct seq_file *m, u8 *digest, int size);
+struct ima_template_desc *ima_template_desc_current(void);
+int ima_init_template(void);
+
+int ima_init_template(void);
/*
* used to protect h_table and sha_table
@@ -98,14 +131,21 @@ static inline unsigned long ima_hash_key(u8 *digest)
int ima_get_action(struct inode *inode, int mask, int function);
int ima_must_measure(struct inode *inode, int mask, int function);
int ima_collect_measurement(struct integrity_iint_cache *iint,
- struct file *file);
+ struct file *file,
+ struct evm_ima_xattr_data **xattr_value,
+ int *xattr_len);
void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file,
- const unsigned char *filename);
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len);
void ima_audit_measurement(struct integrity_iint_cache *iint,
const unsigned char *filename);
+int ima_alloc_init_template(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, struct ima_template_entry **entry);
int ima_store_template(struct ima_template_entry *entry, int violation,
- struct inode *inode);
-void ima_template_show(struct seq_file *m, void *e, enum ima_show_type show);
+ struct inode *inode, const unsigned char *filename);
const char *ima_d_path(struct path *path, char **pathbuf);
/* rbtree tree calls to lookup, insert, delete
@@ -131,17 +171,25 @@ void ima_delete_rules(void);
#ifdef CONFIG_IMA_APPRAISE
int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
- struct file *file, const unsigned char *filename);
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len);
int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func);
void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file);
enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
int func);
+void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_digest_data *hash);
+int ima_read_xattr(struct dentry *dentry,
+ struct evm_ima_xattr_data **xattr_value);
#else
static inline int ima_appraise_measurement(int func,
struct integrity_iint_cache *iint,
struct file *file,
- const unsigned char *filename)
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len)
{
return INTEGRITY_UNKNOWN;
}
@@ -162,6 +210,19 @@ static inline enum integrity_status ima_get_cache_status(struct integrity_iint_c
{
return INTEGRITY_UNKNOWN;
}
+
+static inline void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value,
+ int xattr_len,
+ struct ima_digest_data *hash)
+{
+}
+
+static inline int ima_read_xattr(struct dentry *dentry,
+ struct evm_ima_xattr_data **xattr_value)
+{
+ return 0;
+}
+
#endif
/* LSM based policy rules require audit */
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index 1c03e8f1e0e1..0e7540863fc2 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -18,9 +18,46 @@
#include <linux/fs.h>
#include <linux/xattr.h>
#include <linux/evm.h>
+#include <crypto/hash_info.h>
#include "ima.h"
-static const char *IMA_TEMPLATE_NAME = "ima";
+/*
+ * ima_alloc_init_template - create and initialize a new template entry
+ */
+int ima_alloc_init_template(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, struct ima_template_entry **entry)
+{
+ struct ima_template_desc *template_desc = ima_template_desc_current();
+ int i, result = 0;
+
+ *entry = kzalloc(sizeof(**entry) + template_desc->num_fields *
+ sizeof(struct ima_field_data), GFP_NOFS);
+ if (!*entry)
+ return -ENOMEM;
+
+ for (i = 0; i < template_desc->num_fields; i++) {
+ struct ima_template_field *field = template_desc->fields[i];
+ u32 len;
+
+ result = field->field_init(iint, file, filename,
+ xattr_value, xattr_len,
+ &((*entry)->template_data[i]));
+ if (result != 0)
+ goto out;
+
+ len = (*entry)->template_data[i].len;
+ (*entry)->template_data_len += sizeof(len);
+ (*entry)->template_data_len += len;
+ }
+ (*entry)->template_desc = template_desc;
+ return 0;
+out:
+ kfree(*entry);
+ *entry = NULL;
+ return result;
+}
/*
* ima_store_template - store ima template measurements
@@ -39,28 +76,34 @@ static const char *IMA_TEMPLATE_NAME = "ima";
* Returns 0 on success, error code otherwise
*/
int ima_store_template(struct ima_template_entry *entry,
- int violation, struct inode *inode)
+ int violation, struct inode *inode,
+ const unsigned char *filename)
{
const char *op = "add_template_measure";
const char *audit_cause = "hashing_error";
+ char *template_name = entry->template_desc->name;
int result;
-
- memset(entry->digest, 0, sizeof(entry->digest));
- entry->template_name = IMA_TEMPLATE_NAME;
- entry->template_len = sizeof(entry->template);
+ struct {
+ struct ima_digest_data hdr;
+ char digest[TPM_DIGEST_SIZE];
+ } hash;
if (!violation) {
- result = ima_calc_buffer_hash(&entry->template,
- entry->template_len,
- entry->digest);
+ int num_fields = entry->template_desc->num_fields;
+
+ /* this function uses default algo */
+ hash.hdr.algo = HASH_ALGO_SHA1;
+ result = ima_calc_field_array_hash(&entry->template_data[0],
+ num_fields, &hash.hdr);
if (result < 0) {
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
- entry->template_name, op,
+ template_name, op,
audit_cause, result, 0);
return result;
}
+ memcpy(entry->digest, hash.hdr.digest, hash.hdr.length);
}
- result = ima_add_template_entry(entry, violation, op, inode);
+ result = ima_add_template_entry(entry, violation, op, inode, filename);
return result;
}
@@ -71,24 +114,24 @@ int ima_store_template(struct ima_template_entry *entry,
* By extending the PCR with 0xFF's instead of with zeroes, the PCR
* value is invalidated.
*/
-void ima_add_violation(struct inode *inode, const unsigned char *filename,
+void ima_add_violation(struct file *file, const unsigned char *filename,
const char *op, const char *cause)
{
struct ima_template_entry *entry;
+ struct inode *inode = file->f_dentry->d_inode;
int violation = 1;
int result;
/* can overflow, only indicator */
atomic_long_inc(&ima_htable.violations);
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
+ result = ima_alloc_init_template(NULL, file, filename,
+ NULL, 0, &entry);
+ if (result < 0) {
result = -ENOMEM;
goto err_out;
}
- memset(&entry->template, 0, sizeof(entry->template));
- strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX);
- result = ima_store_template(entry, violation, inode);
+ result = ima_store_template(entry, violation, inode, filename);
if (result < 0)
kfree(entry);
err_out:
@@ -138,20 +181,42 @@ int ima_must_measure(struct inode *inode, int mask, int function)
* Return 0 on success, error code otherwise
*/
int ima_collect_measurement(struct integrity_iint_cache *iint,
- struct file *file)
+ struct file *file,
+ struct evm_ima_xattr_data **xattr_value,
+ int *xattr_len)
{
struct inode *inode = file_inode(file);
const char *filename = file->f_dentry->d_name.name;
int result = 0;
+ struct {
+ struct ima_digest_data hdr;
+ char digest[IMA_MAX_DIGEST_SIZE];
+ } hash;
+
+ if (xattr_value)
+ *xattr_len = ima_read_xattr(file->f_dentry, xattr_value);
if (!(iint->flags & IMA_COLLECTED)) {
u64 i_version = file_inode(file)->i_version;
- iint->ima_xattr.type = IMA_XATTR_DIGEST;
- result = ima_calc_file_hash(file, iint->ima_xattr.digest);
+ /* use default hash algorithm */
+ hash.hdr.algo = ima_hash_algo;
+
+ if (xattr_value)
+ ima_get_hash_algo(*xattr_value, *xattr_len, &hash.hdr);
+
+ result = ima_calc_file_hash(file, &hash.hdr);
if (!result) {
- iint->version = i_version;
- iint->flags |= IMA_COLLECTED;
+ int length = sizeof(hash.hdr) + hash.hdr.length;
+ void *tmpbuf = krealloc(iint->ima_hash, length,
+ GFP_NOFS);
+ if (tmpbuf) {
+ iint->ima_hash = tmpbuf;
+ memcpy(iint->ima_hash, &hash, length);
+ iint->version = i_version;
+ iint->flags |= IMA_COLLECTED;
+ } else
+ result = -ENOMEM;
}
}
if (result)
@@ -177,7 +242,9 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
* Must be called with iint->mutex held.
*/
void ima_store_measurement(struct integrity_iint_cache *iint,
- struct file *file, const unsigned char *filename)
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len)
{
const char *op = "add_template_measure";
const char *audit_cause = "ENOMEM";
@@ -189,19 +256,15 @@ void ima_store_measurement(struct integrity_iint_cache *iint,
if (iint->flags & IMA_MEASURED)
return;
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
+ result = ima_alloc_init_template(iint, file, filename,
+ xattr_value, xattr_len, &entry);
+ if (result < 0) {
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
op, audit_cause, result, 0);
return;
}
- memset(&entry->template, 0, sizeof(entry->template));
- memcpy(entry->template.digest, iint->ima_xattr.digest, IMA_DIGEST_SIZE);
- strcpy(entry->template.file_name,
- (strlen(filename) > IMA_EVENT_NAME_LEN_MAX) ?
- file->f_dentry->d_name.name : filename);
- result = ima_store_template(entry, violation, inode);
+ result = ima_store_template(entry, violation, inode, filename);
if (!result || result == -EEXIST)
iint->flags |= IMA_MEASURED;
if (result < 0)
@@ -212,14 +275,16 @@ void ima_audit_measurement(struct integrity_iint_cache *iint,
const unsigned char *filename)
{
struct audit_buffer *ab;
- char hash[(IMA_DIGEST_SIZE * 2) + 1];
+ char hash[(iint->ima_hash->length * 2) + 1];
+ const char *algo_name = hash_algo_name[iint->ima_hash->algo];
+ char algo_hash[sizeof(hash) + strlen(algo_name) + 2];
int i;
if (iint->flags & IMA_AUDITED)
return;
- for (i = 0; i < IMA_DIGEST_SIZE; i++)
- hex_byte_pack(hash + (i * 2), iint->ima_xattr.digest[i]);
+ for (i = 0; i < iint->ima_hash->length; i++)
+ hex_byte_pack(hash + (i * 2), iint->ima_hash->digest[i]);
hash[i * 2] = '\0';
ab = audit_log_start(current->audit_context, GFP_KERNEL,
@@ -230,7 +295,8 @@ void ima_audit_measurement(struct integrity_iint_cache *iint,
audit_log_format(ab, "file=");
audit_log_untrustedstring(ab, filename);
audit_log_format(ab, " hash=");
- audit_log_untrustedstring(ab, hash);
+ snprintf(algo_hash, sizeof(algo_hash), "%s:%s", algo_name, hash);
+ audit_log_untrustedstring(ab, algo_hash);
audit_log_task_info(ab, current);
audit_log_end(ab);
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 2d4becab8918..46353ee517f6 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -15,6 +15,7 @@
#include <linux/magic.h>
#include <linux/ima.h>
#include <linux/evm.h>
+#include <crypto/hash_info.h>
#include "ima.h"
@@ -43,19 +44,31 @@ int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func)
}
static int ima_fix_xattr(struct dentry *dentry,
- struct integrity_iint_cache *iint)
+ struct integrity_iint_cache *iint)
{
- iint->ima_xattr.type = IMA_XATTR_DIGEST;
- return __vfs_setxattr_noperm(dentry, XATTR_NAME_IMA,
- (u8 *)&iint->ima_xattr,
- sizeof(iint->ima_xattr), 0);
+ int rc, offset;
+ u8 algo = iint->ima_hash->algo;
+
+ if (algo <= HASH_ALGO_SHA1) {
+ offset = 1;
+ iint->ima_hash->xattr.sha1.type = IMA_XATTR_DIGEST;
+ } else {
+ offset = 0;
+ iint->ima_hash->xattr.ng.type = IMA_XATTR_DIGEST_NG;
+ iint->ima_hash->xattr.ng.algo = algo;
+ }
+ rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_IMA,
+ &iint->ima_hash->xattr.data[offset],
+ (sizeof(iint->ima_hash->xattr) - offset) +
+ iint->ima_hash->length, 0);
+ return rc;
}
/* Return specific func appraised cached result */
enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
int func)
{
- switch(func) {
+ switch (func) {
case MMAP_CHECK:
return iint->ima_mmap_status;
case BPRM_CHECK:
@@ -71,7 +84,7 @@ enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
static void ima_set_cache_status(struct integrity_iint_cache *iint,
int func, enum integrity_status status)
{
- switch(func) {
+ switch (func) {
case MMAP_CHECK:
iint->ima_mmap_status = status;
break;
@@ -90,7 +103,7 @@ static void ima_set_cache_status(struct integrity_iint_cache *iint,
static void ima_cache_flags(struct integrity_iint_cache *iint, int func)
{
- switch(func) {
+ switch (func) {
case MMAP_CHECK:
iint->flags |= (IMA_MMAP_APPRAISED | IMA_APPRAISED);
break;
@@ -107,6 +120,50 @@ static void ima_cache_flags(struct integrity_iint_cache *iint, int func)
}
}
+void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_digest_data *hash)
+{
+ struct signature_v2_hdr *sig;
+
+ if (!xattr_value || xattr_len < 2)
+ return;
+
+ switch (xattr_value->type) {
+ case EVM_IMA_XATTR_DIGSIG:
+ sig = (typeof(sig))xattr_value;
+ if (sig->version != 2 || xattr_len <= sizeof(*sig))
+ return;
+ hash->algo = sig->hash_algo;
+ break;
+ case IMA_XATTR_DIGEST_NG:
+ hash->algo = xattr_value->digest[0];
+ break;
+ case IMA_XATTR_DIGEST:
+ /* this is for backward compatibility */
+ if (xattr_len == 21) {
+ unsigned int zero = 0;
+ if (!memcmp(&xattr_value->digest[16], &zero, 4))
+ hash->algo = HASH_ALGO_MD5;
+ else
+ hash->algo = HASH_ALGO_SHA1;
+ } else if (xattr_len == 17)
+ hash->algo = HASH_ALGO_MD5;
+ break;
+ }
+}
+
+int ima_read_xattr(struct dentry *dentry,
+ struct evm_ima_xattr_data **xattr_value)
+{
+ struct inode *inode = dentry->d_inode;
+
+ if (!inode->i_op->getxattr)
+ return 0;
+
+ return vfs_getxattr_alloc(dentry, XATTR_NAME_IMA, (char **)xattr_value,
+ 0, GFP_NOFS);
+}
+
/*
* ima_appraise_measurement - appraise file measurement
*
@@ -116,23 +173,22 @@ static void ima_cache_flags(struct integrity_iint_cache *iint, int func)
* Return 0 on success, error code otherwise
*/
int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
- struct file *file, const unsigned char *filename)
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len)
{
struct dentry *dentry = file->f_dentry;
struct inode *inode = dentry->d_inode;
- struct evm_ima_xattr_data *xattr_value = NULL;
enum integrity_status status = INTEGRITY_UNKNOWN;
const char *op = "appraise_data";
char *cause = "unknown";
- int rc;
+ int rc = xattr_len, hash_start = 0;
if (!ima_appraise)
return 0;
if (!inode->i_op->getxattr)
return INTEGRITY_UNKNOWN;
- rc = vfs_getxattr_alloc(dentry, XATTR_NAME_IMA, (char **)&xattr_value,
- 0, GFP_NOFS);
if (rc <= 0) {
if (rc && rc != -ENODATA)
goto out;
@@ -153,14 +209,25 @@ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
goto out;
}
switch (xattr_value->type) {
+ case IMA_XATTR_DIGEST_NG:
+ /* first byte contains algorithm id */
+ hash_start = 1;
case IMA_XATTR_DIGEST:
if (iint->flags & IMA_DIGSIG_REQUIRED) {
cause = "IMA signature required";
status = INTEGRITY_FAIL;
break;
}
- rc = memcmp(xattr_value->digest, iint->ima_xattr.digest,
- IMA_DIGEST_SIZE);
+ if (xattr_len - sizeof(xattr_value->type) - hash_start >=
+ iint->ima_hash->length)
+ /* xattr length may be longer. md5 hash in previous
+ version occupied 20 bytes in xattr, instead of 16
+ */
+ rc = memcmp(&xattr_value->digest[hash_start],
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
+ else
+ rc = -EINVAL;
if (rc) {
cause = "invalid-hash";
status = INTEGRITY_FAIL;
@@ -171,9 +238,9 @@ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
case EVM_IMA_XATTR_DIGSIG:
iint->flags |= IMA_DIGSIG;
rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA,
- xattr_value->digest, rc - 1,
- iint->ima_xattr.digest,
- IMA_DIGEST_SIZE);
+ (const char *)xattr_value, rc,
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
if (rc == -EOPNOTSUPP) {
status = INTEGRITY_UNKNOWN;
} else if (rc) {
@@ -203,7 +270,6 @@ out:
ima_cache_flags(iint, func);
}
ima_set_cache_status(iint, func, status);
- kfree(xattr_value);
return status;
}
@@ -219,7 +285,7 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
if (iint->flags & IMA_DIGSIG)
return;
- rc = ima_collect_measurement(iint, file);
+ rc = ima_collect_measurement(iint, file, NULL, NULL);
if (rc < 0)
return;
@@ -315,3 +381,14 @@ int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name)
}
return result;
}
+
+#ifdef CONFIG_IMA_TRUSTED_KEYRING
+static int __init init_ima_keyring(void)
+{
+ int ret;
+
+ ret = integrity_init_keyring(INTEGRITY_KEYRING_IMA);
+ return 0;
+}
+late_initcall(init_ima_keyring);
+#endif
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index a02e0791cf15..676e0292dfec 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -20,6 +20,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <crypto/hash.h>
+#include <crypto/hash_info.h>
#include "ima.h"
static struct crypto_shash *ima_shash_tfm;
@@ -28,31 +29,58 @@ int ima_init_crypto(void)
{
long rc;
- ima_shash_tfm = crypto_alloc_shash(ima_hash, 0, 0);
+ ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
if (IS_ERR(ima_shash_tfm)) {
rc = PTR_ERR(ima_shash_tfm);
- pr_err("Can not allocate %s (reason: %ld)\n", ima_hash, rc);
+ pr_err("Can not allocate %s (reason: %ld)\n",
+ hash_algo_name[ima_hash_algo], rc);
return rc;
}
return 0;
}
+static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
+{
+ struct crypto_shash *tfm = ima_shash_tfm;
+ int rc;
+
+ if (algo != ima_hash_algo && algo < HASH_ALGO__LAST) {
+ tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
+ if (IS_ERR(tfm)) {
+ rc = PTR_ERR(tfm);
+ pr_err("Can not allocate %s (reason: %d)\n",
+ hash_algo_name[algo], rc);
+ }
+ }
+ return tfm;
+}
+
+static void ima_free_tfm(struct crypto_shash *tfm)
+{
+ if (tfm != ima_shash_tfm)
+ crypto_free_shash(tfm);
+}
+
/*
* Calculate the MD5/SHA1 file digest
*/
-int ima_calc_file_hash(struct file *file, char *digest)
+static int ima_calc_file_hash_tfm(struct file *file,
+ struct ima_digest_data *hash,
+ struct crypto_shash *tfm)
{
loff_t i_size, offset = 0;
char *rbuf;
int rc, read = 0;
struct {
struct shash_desc shash;
- char ctx[crypto_shash_descsize(ima_shash_tfm)];
+ char ctx[crypto_shash_descsize(tfm)];
} desc;
- desc.shash.tfm = ima_shash_tfm;
+ desc.shash.tfm = tfm;
desc.shash.flags = 0;
+ hash->length = crypto_shash_digestsize(tfm);
+
rc = crypto_shash_init(&desc.shash);
if (rc != 0)
return rc;
@@ -85,27 +113,83 @@ int ima_calc_file_hash(struct file *file, char *digest)
}
kfree(rbuf);
if (!rc)
- rc = crypto_shash_final(&desc.shash, digest);
+ rc = crypto_shash_final(&desc.shash, hash->digest);
if (read)
file->f_mode &= ~FMODE_READ;
out:
return rc;
}
+int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ int rc;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ rc = ima_calc_file_hash_tfm(file, hash, tfm);
+
+ ima_free_tfm(tfm);
+
+ return rc;
+}
+
/*
- * Calculate the hash of a given buffer
+ * Calculate the hash of template data
*/
-int ima_calc_buffer_hash(const void *data, int len, char *digest)
+static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
+ int num_fields,
+ struct ima_digest_data *hash,
+ struct crypto_shash *tfm)
{
struct {
struct shash_desc shash;
- char ctx[crypto_shash_descsize(ima_shash_tfm)];
+ char ctx[crypto_shash_descsize(tfm)];
} desc;
+ int rc, i;
- desc.shash.tfm = ima_shash_tfm;
+ desc.shash.tfm = tfm;
desc.shash.flags = 0;
- return crypto_shash_digest(&desc.shash, data, len, digest);
+ hash->length = crypto_shash_digestsize(tfm);
+
+ rc = crypto_shash_init(&desc.shash);
+ if (rc != 0)
+ return rc;
+
+ for (i = 0; i < num_fields; i++) {
+ rc = crypto_shash_update(&desc.shash,
+ (const u8 *) &field_data[i].len,
+ sizeof(field_data[i].len));
+ rc = crypto_shash_update(&desc.shash, field_data[i].data,
+ field_data[i].len);
+ if (rc)
+ break;
+ }
+
+ if (!rc)
+ rc = crypto_shash_final(&desc.shash, hash->digest);
+
+ return rc;
+}
+
+int ima_calc_field_array_hash(struct ima_field_data *field_data, int num_fields,
+ struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ int rc;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ rc = ima_calc_field_array_hash_tfm(field_data, num_fields, hash, tfm);
+
+ ima_free_tfm(tfm);
+
+ return rc;
}
static void __init ima_pcrread(int idx, u8 *pcr)
@@ -120,16 +204,17 @@ static void __init ima_pcrread(int idx, u8 *pcr)
/*
* Calculate the boot aggregate hash
*/
-int __init ima_calc_boot_aggregate(char *digest)
+static int __init ima_calc_boot_aggregate_tfm(char *digest,
+ struct crypto_shash *tfm)
{
- u8 pcr_i[IMA_DIGEST_SIZE];
+ u8 pcr_i[TPM_DIGEST_SIZE];
int rc, i;
struct {
struct shash_desc shash;
- char ctx[crypto_shash_descsize(ima_shash_tfm)];
+ char ctx[crypto_shash_descsize(tfm)];
} desc;
- desc.shash.tfm = ima_shash_tfm;
+ desc.shash.tfm = tfm;
desc.shash.flags = 0;
rc = crypto_shash_init(&desc.shash);
@@ -140,9 +225,26 @@ int __init ima_calc_boot_aggregate(char *digest)
for (i = TPM_PCR0; i < TPM_PCR8; i++) {
ima_pcrread(i, pcr_i);
/* now accumulate with current aggregate */
- rc = crypto_shash_update(&desc.shash, pcr_i, IMA_DIGEST_SIZE);
+ rc = crypto_shash_update(&desc.shash, pcr_i, TPM_DIGEST_SIZE);
}
if (!rc)
crypto_shash_final(&desc.shash, digest);
return rc;
}
+
+int __init ima_calc_boot_aggregate(struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ int rc;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ hash->length = crypto_shash_digestsize(tfm);
+ rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm);
+
+ ima_free_tfm(tfm);
+
+ return rc;
+}
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index 38477c9c3415..d47a7c86a21d 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -88,8 +88,7 @@ static void *ima_measurements_next(struct seq_file *m, void *v, loff_t *pos)
* against concurrent list-extension
*/
rcu_read_lock();
- qe = list_entry_rcu(qe->later.next,
- struct ima_queue_entry, later);
+ qe = list_entry_rcu(qe->later.next, struct ima_queue_entry, later);
rcu_read_unlock();
(*pos)++;
@@ -100,7 +99,7 @@ static void ima_measurements_stop(struct seq_file *m, void *v)
{
}
-static void ima_putc(struct seq_file *m, void *data, int datalen)
+void ima_putc(struct seq_file *m, void *data, int datalen)
{
while (datalen--)
seq_putc(m, *(char *)data++);
@@ -111,6 +110,7 @@ static void ima_putc(struct seq_file *m, void *data, int datalen)
* char[20]=template digest
* 32bit-le=template name size
* char[n]=template name
+ * [eventdata length]
* eventdata[n]=template specific data
*/
static int ima_measurements_show(struct seq_file *m, void *v)
@@ -120,6 +120,7 @@ static int ima_measurements_show(struct seq_file *m, void *v)
struct ima_template_entry *e;
int namelen;
u32 pcr = CONFIG_IMA_MEASURE_PCR_IDX;
+ int i;
/* get entry */
e = qe->entry;
@@ -134,18 +135,25 @@ static int ima_measurements_show(struct seq_file *m, void *v)
ima_putc(m, &pcr, sizeof pcr);
/* 2nd: template digest */
- ima_putc(m, e->digest, IMA_DIGEST_SIZE);
+ ima_putc(m, e->digest, TPM_DIGEST_SIZE);
/* 3rd: template name size */
- namelen = strlen(e->template_name);
+ namelen = strlen(e->template_desc->name);
ima_putc(m, &namelen, sizeof namelen);
/* 4th: template name */
- ima_putc(m, (void *)e->template_name, namelen);
+ ima_putc(m, e->template_desc->name, namelen);
+
+ /* 5th: template length (except for 'ima' template) */
+ if (strcmp(e->template_desc->name, IMA_TEMPLATE_IMA_NAME) != 0)
+ ima_putc(m, &e->template_data_len,
+ sizeof(e->template_data_len));
- /* 5th: template specific data */
- ima_template_show(m, (struct ima_template_data *)&e->template,
- IMA_SHOW_BINARY);
+ /* 6th: template specific data */
+ for (i = 0; i < e->template_desc->num_fields; i++) {
+ e->template_desc->fields[i]->field_show(m, IMA_SHOW_BINARY,
+ &e->template_data[i]);
+ }
return 0;
}
@@ -168,41 +176,21 @@ static const struct file_operations ima_measurements_ops = {
.release = seq_release,
};
-static void ima_print_digest(struct seq_file *m, u8 *digest)
+void ima_print_digest(struct seq_file *m, u8 *digest, int size)
{
int i;
- for (i = 0; i < IMA_DIGEST_SIZE; i++)
+ for (i = 0; i < size; i++)
seq_printf(m, "%02x", *(digest + i));
}
-void ima_template_show(struct seq_file *m, void *e, enum ima_show_type show)
-{
- struct ima_template_data *entry = e;
- int namelen;
-
- switch (show) {
- case IMA_SHOW_ASCII:
- ima_print_digest(m, entry->digest);
- seq_printf(m, " %s\n", entry->file_name);
- break;
- case IMA_SHOW_BINARY:
- ima_putc(m, entry->digest, IMA_DIGEST_SIZE);
-
- namelen = strlen(entry->file_name);
- ima_putc(m, &namelen, sizeof namelen);
- ima_putc(m, entry->file_name, namelen);
- default:
- break;
- }
-}
-
/* print in ascii */
static int ima_ascii_measurements_show(struct seq_file *m, void *v)
{
/* the list never shrinks, so we don't need a lock here */
struct ima_queue_entry *qe = v;
struct ima_template_entry *e;
+ int i;
/* get entry */
e = qe->entry;
@@ -213,14 +201,21 @@ static int ima_ascii_measurements_show(struct seq_file *m, void *v)
seq_printf(m, "%2d ", CONFIG_IMA_MEASURE_PCR_IDX);
/* 2nd: SHA1 template hash */
- ima_print_digest(m, e->digest);
+ ima_print_digest(m, e->digest, TPM_DIGEST_SIZE);
/* 3th: template name */
- seq_printf(m, " %s ", e->template_name);
+ seq_printf(m, " %s", e->template_desc->name);
/* 4th: template specific data */
- ima_template_show(m, (struct ima_template_data *)&e->template,
- IMA_SHOW_ASCII);
+ for (i = 0; i < e->template_desc->num_fields; i++) {
+ seq_puts(m, " ");
+ if (e->template_data[i].len == 0)
+ continue;
+
+ e->template_desc->fields[i]->field_show(m, IMA_SHOW_ASCII,
+ &e->template_data[i]);
+ }
+ seq_puts(m, "\n");
return 0;
}
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 162ea723db3d..15f34bd40abe 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -18,6 +18,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <crypto/hash_info.h>
#include "ima.h"
/* name for boot aggregate entry */
@@ -42,28 +43,38 @@ int ima_used_chip;
static void __init ima_add_boot_aggregate(void)
{
struct ima_template_entry *entry;
+ struct integrity_iint_cache tmp_iint, *iint = &tmp_iint;
const char *op = "add_boot_aggregate";
const char *audit_cause = "ENOMEM";
int result = -ENOMEM;
- int violation = 1;
+ int violation = 0;
+ struct {
+ struct ima_digest_data hdr;
+ char digest[TPM_DIGEST_SIZE];
+ } hash;
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- goto err_out;
+ memset(iint, 0, sizeof(*iint));
+ memset(&hash, 0, sizeof(hash));
+ iint->ima_hash = &hash.hdr;
+ iint->ima_hash->algo = HASH_ALGO_SHA1;
+ iint->ima_hash->length = SHA1_DIGEST_SIZE;
- memset(&entry->template, 0, sizeof(entry->template));
- strncpy(entry->template.file_name, boot_aggregate_name,
- IMA_EVENT_NAME_LEN_MAX);
if (ima_used_chip) {
- violation = 0;
- result = ima_calc_boot_aggregate(entry->template.digest);
+ result = ima_calc_boot_aggregate(&hash.hdr);
if (result < 0) {
audit_cause = "hashing_error";
kfree(entry);
goto err_out;
}
}
- result = ima_store_template(entry, violation, NULL);
+
+ result = ima_alloc_init_template(iint, NULL, boot_aggregate_name,
+ NULL, 0, &entry);
+ if (result < 0)
+ return;
+
+ result = ima_store_template(entry, violation, NULL,
+ boot_aggregate_name);
if (result < 0)
kfree(entry);
return;
@@ -74,7 +85,7 @@ err_out:
int __init ima_init(void)
{
- u8 pcr_i[IMA_DIGEST_SIZE];
+ u8 pcr_i[TPM_DIGEST_SIZE];
int rc;
ima_used_chip = 0;
@@ -88,6 +99,10 @@ int __init ima_init(void)
rc = ima_init_crypto();
if (rc)
return rc;
+ rc = ima_init_template();
+ if (rc != 0)
+ return rc;
+
ima_add_boot_aggregate(); /* boot aggregate must be first entry */
ima_init_policy();
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index e9508d5bbfcf..149ee1119f87 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/xattr.h>
#include <linux/ima.h>
+#include <crypto/hash_info.h>
#include "ima.h"
@@ -35,11 +36,33 @@ int ima_appraise = IMA_APPRAISE_ENFORCE;
int ima_appraise;
#endif
-char *ima_hash = "sha1";
+int ima_hash_algo = HASH_ALGO_SHA1;
+static int hash_setup_done;
+
static int __init hash_setup(char *str)
{
- if (strncmp(str, "md5", 3) == 0)
- ima_hash = "md5";
+ struct ima_template_desc *template_desc = ima_template_desc_current();
+ int i;
+
+ if (hash_setup_done)
+ return 1;
+
+ if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) {
+ if (strncmp(str, "sha1", 4) == 0)
+ ima_hash_algo = HASH_ALGO_SHA1;
+ else if (strncmp(str, "md5", 3) == 0)
+ ima_hash_algo = HASH_ALGO_MD5;
+ goto out;
+ }
+
+ for (i = 0; i < HASH_ALGO__LAST; i++) {
+ if (strcmp(str, hash_algo_name[i]) == 0) {
+ ima_hash_algo = i;
+ break;
+ }
+ }
+out:
+ hash_setup_done = 1;
return 1;
}
__setup("ima_hash=", hash_setup);
@@ -92,10 +115,9 @@ out:
pathname = dentry->d_name.name;
if (send_tomtou)
- ima_add_violation(inode, pathname,
- "invalid_pcr", "ToMToU");
+ ima_add_violation(file, pathname, "invalid_pcr", "ToMToU");
if (send_writers)
- ima_add_violation(inode, pathname,
+ ima_add_violation(file, pathname,
"invalid_pcr", "open_writers");
kfree(pathbuf);
}
@@ -144,9 +166,12 @@ static int process_measurement(struct file *file, const char *filename,
{
struct inode *inode = file_inode(file);
struct integrity_iint_cache *iint;
+ struct ima_template_desc *template_desc = ima_template_desc_current();
char *pathbuf = NULL;
const char *pathname = NULL;
int rc = -ENOMEM, action, must_appraise, _func;
+ struct evm_ima_xattr_data *xattr_value = NULL, **xattr_ptr = NULL;
+ int xattr_len = 0;
if (!ima_initialized || !S_ISREG(inode->i_mode))
return 0;
@@ -185,7 +210,13 @@ static int process_measurement(struct file *file, const char *filename,
goto out_digsig;
}
- rc = ima_collect_measurement(iint, file);
+ if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) {
+ if (action & IMA_APPRAISE_SUBMASK)
+ xattr_ptr = &xattr_value;
+ } else
+ xattr_ptr = &xattr_value;
+
+ rc = ima_collect_measurement(iint, file, xattr_ptr, &xattr_len);
if (rc != 0)
goto out_digsig;
@@ -194,9 +225,11 @@ static int process_measurement(struct file *file, const char *filename,
pathname = (const char *)file->f_dentry->d_name.name;
if (action & IMA_MEASURE)
- ima_store_measurement(iint, file, pathname);
+ ima_store_measurement(iint, file, pathname,
+ xattr_value, xattr_len);
if (action & IMA_APPRAISE_SUBMASK)
- rc = ima_appraise_measurement(_func, iint, file, pathname);
+ rc = ima_appraise_measurement(_func, iint, file, pathname,
+ xattr_value, xattr_len);
if (action & IMA_AUDIT)
ima_audit_measurement(iint, pathname);
kfree(pathbuf);
@@ -205,6 +238,7 @@ out_digsig:
rc = -EACCES;
out:
mutex_unlock(&inode->i_mutex);
+ kfree(xattr_value);
if ((rc && must_appraise) && (ima_appraise & IMA_APPRAISE_ENFORCE))
return -EACCES;
return 0;
@@ -244,9 +278,9 @@ int ima_file_mmap(struct file *file, unsigned long prot)
int ima_bprm_check(struct linux_binprm *bprm)
{
return process_measurement(bprm->file,
- (strcmp(bprm->filename, bprm->interp) == 0) ?
- bprm->filename : bprm->interp,
- MAY_EXEC, BPRM_CHECK);
+ (strcmp(bprm->filename, bprm->interp) == 0) ?
+ bprm->filename : bprm->interp,
+ MAY_EXEC, BPRM_CHECK);
}
/**
@@ -263,8 +297,8 @@ int ima_file_check(struct file *file, int mask)
{
ima_rdwr_violation_check(file);
return process_measurement(file, NULL,
- mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
- FILE_CHECK);
+ mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
+ FILE_CHECK);
}
EXPORT_SYMBOL_GPL(ima_file_check);
@@ -294,6 +328,7 @@ static int __init init_ima(void)
{
int error;
+ hash_setup(CONFIG_IMA_DEFAULT_HASH);
error = ima_init();
if (!error)
ima_initialized = 1;
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 399433ad614e..a9c3d3cd1990 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -73,7 +73,6 @@ static struct ima_rule_entry default_rules[] = {
{.action = DONT_MEASURE,.fsmagic = SYSFS_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = TMPFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE,.fsmagic = RAMFS_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = DEVPTS_SUPER_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = BINFMTFS_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = SECURITYFS_MAGIC,.flags = IMA_FSMAGIC},
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index ff63fe00c195..d85e99761f4f 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -50,7 +50,7 @@ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value)
key = ima_hash_key(digest_value);
rcu_read_lock();
hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) {
- rc = memcmp(qe->entry->digest, digest_value, IMA_DIGEST_SIZE);
+ rc = memcmp(qe->entry->digest, digest_value, TPM_DIGEST_SIZE);
if (rc == 0) {
ret = qe;
break;
@@ -104,9 +104,10 @@ static int ima_pcr_extend(const u8 *hash)
* and extend the pcr.
*/
int ima_add_template_entry(struct ima_template_entry *entry, int violation,
- const char *op, struct inode *inode)
+ const char *op, struct inode *inode,
+ const unsigned char *filename)
{
- u8 digest[IMA_DIGEST_SIZE];
+ u8 digest[TPM_DIGEST_SIZE];
const char *audit_cause = "hash_added";
char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
int audit_info = 1;
@@ -141,8 +142,7 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
}
out:
mutex_unlock(&ima_extend_list_mutex);
- integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
- entry->template.file_name,
+ integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
op, audit_cause, result, audit_info);
return result;
}
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
new file mode 100644
index 000000000000..4e5da990630b
--- /dev/null
+++ b/security/integrity/ima/ima_template.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_template.c
+ * Helpers to manage template descriptors.
+ */
+#include <crypto/hash_info.h>
+
+#include "ima.h"
+#include "ima_template_lib.h"
+
+static struct ima_template_desc defined_templates[] = {
+ {.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT},
+ {.name = "ima-ng",.fmt = "d-ng|n-ng"},
+ {.name = "ima-sig",.fmt = "d-ng|n-ng|sig"},
+};
+
+static struct ima_template_field supported_fields[] = {
+ {.field_id = "d",.field_init = ima_eventdigest_init,
+ .field_show = ima_show_template_digest},
+ {.field_id = "n",.field_init = ima_eventname_init,
+ .field_show = ima_show_template_string},
+ {.field_id = "d-ng",.field_init = ima_eventdigest_ng_init,
+ .field_show = ima_show_template_digest_ng},
+ {.field_id = "n-ng",.field_init = ima_eventname_ng_init,
+ .field_show = ima_show_template_string},
+ {.field_id = "sig",.field_init = ima_eventsig_init,
+ .field_show = ima_show_template_sig},
+};
+
+static struct ima_template_desc *ima_template;
+static struct ima_template_desc *lookup_template_desc(const char *name);
+
+static int __init ima_template_setup(char *str)
+{
+ struct ima_template_desc *template_desc;
+ int template_len = strlen(str);
+
+ /*
+ * Verify that a template with the supplied name exists.
+ * If not, use CONFIG_IMA_DEFAULT_TEMPLATE.
+ */
+ template_desc = lookup_template_desc(str);
+ if (!template_desc)
+ return 1;
+
+ /*
+ * Verify whether the current hash algorithm is supported
+ * by the 'ima' template.
+ */
+ if (template_len == 3 && strcmp(str, IMA_TEMPLATE_IMA_NAME) == 0 &&
+ ima_hash_algo != HASH_ALGO_SHA1 && ima_hash_algo != HASH_ALGO_MD5) {
+ pr_err("IMA: template does not support hash alg\n");
+ return 1;
+ }
+
+ ima_template = template_desc;
+ return 1;
+}
+__setup("ima_template=", ima_template_setup);
+
+static struct ima_template_desc *lookup_template_desc(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(defined_templates); i++) {
+ if (strcmp(defined_templates[i].name, name) == 0)
+ return defined_templates + i;
+ }
+
+ return NULL;
+}
+
+static struct ima_template_field *lookup_template_field(const char *field_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(supported_fields); i++)
+ if (strncmp(supported_fields[i].field_id, field_id,
+ IMA_TEMPLATE_FIELD_ID_MAX_LEN) == 0)
+ return &supported_fields[i];
+ return NULL;
+}
+
+static int template_fmt_size(char *template_fmt)
+{
+ char c;
+ int template_fmt_len = strlen(template_fmt);
+ int i = 0, j = 0;
+
+ while (i < template_fmt_len) {
+ c = template_fmt[i];
+ if (c == '|')
+ j++;
+ i++;
+ }
+
+ return j + 1;
+}
+
+static int template_desc_init_fields(char *template_fmt,
+ struct ima_template_field ***fields,
+ int *num_fields)
+{
+ char *c, *template_fmt_ptr = template_fmt;
+ int template_num_fields = template_fmt_size(template_fmt);
+ int i, result = 0;
+
+ if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX)
+ return -EINVAL;
+
+ *fields = kzalloc(template_num_fields * sizeof(*fields), GFP_KERNEL);
+ if (*fields == NULL) {
+ result = -ENOMEM;
+ goto out;
+ }
+ for (i = 0; (c = strsep(&template_fmt_ptr, "|")) != NULL &&
+ i < template_num_fields; i++) {
+ struct ima_template_field *f = lookup_template_field(c);
+
+ if (!f) {
+ result = -ENOENT;
+ goto out;
+ }
+ (*fields)[i] = f;
+ }
+ *num_fields = i;
+ return 0;
+out:
+ kfree(*fields);
+ *fields = NULL;
+ return result;
+}
+
+static int init_defined_templates(void)
+{
+ int i = 0;
+ int result = 0;
+
+ /* Init defined templates. */
+ for (i = 0; i < ARRAY_SIZE(defined_templates); i++) {
+ struct ima_template_desc *template = &defined_templates[i];
+
+ result = template_desc_init_fields(template->fmt,
+ &(template->fields),
+ &(template->num_fields));
+ if (result < 0)
+ return result;
+ }
+ return result;
+}
+
+struct ima_template_desc *ima_template_desc_current(void)
+{
+ if (!ima_template)
+ ima_template =
+ lookup_template_desc(CONFIG_IMA_DEFAULT_TEMPLATE);
+ return ima_template;
+}
+
+int ima_init_template(void)
+{
+ int result;
+
+ result = init_defined_templates();
+ if (result < 0)
+ return result;
+
+ return 0;
+}
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
new file mode 100644
index 000000000000..6d66ad6ed265
--- /dev/null
+++ b/security/integrity/ima/ima_template_lib.c
@@ -0,0 +1,347 @@
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_template_lib.c
+ * Library of supported template fields.
+ */
+#include <crypto/hash_info.h>
+
+#include "ima_template_lib.h"
+
+static bool ima_template_hash_algo_allowed(u8 algo)
+{
+ if (algo == HASH_ALGO_SHA1 || algo == HASH_ALGO_MD5)
+ return true;
+
+ return false;
+}
+
+enum data_formats {
+ DATA_FMT_DIGEST = 0,
+ DATA_FMT_DIGEST_WITH_ALGO,
+ DATA_FMT_EVENT_NAME,
+ DATA_FMT_STRING,
+ DATA_FMT_HEX
+};
+
+static int ima_write_template_field_data(const void *data, const u32 datalen,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ u8 *buf, *buf_ptr;
+ u32 buflen;
+
+ switch (datafmt) {
+ case DATA_FMT_EVENT_NAME:
+ buflen = IMA_EVENT_NAME_LEN_MAX + 1;
+ break;
+ case DATA_FMT_STRING:
+ buflen = datalen + 1;
+ break;
+ default:
+ buflen = datalen;
+ }
+
+ buf = kzalloc(buflen, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, data, datalen);
+
+ /*
+ * Replace all space characters with underscore for event names and
+ * strings. This avoid that, during the parsing of a measurements list,
+ * filenames with spaces or that end with the suffix ' (deleted)' are
+ * split into multiple template fields (the space is the delimitator
+ * character for measurements lists in ASCII format).
+ */
+ if (datafmt == DATA_FMT_EVENT_NAME || datafmt == DATA_FMT_STRING) {
+ for (buf_ptr = buf; buf_ptr - buf < datalen; buf_ptr++)
+ if (*buf_ptr == ' ')
+ *buf_ptr = '_';
+ }
+
+ field_data->data = buf;
+ field_data->len = buflen;
+ return 0;
+}
+
+static void ima_show_template_data_ascii(struct seq_file *m,
+ enum ima_show_type show,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ u8 *buf_ptr = field_data->data, buflen = field_data->len;
+
+ switch (datafmt) {
+ case DATA_FMT_DIGEST_WITH_ALGO:
+ buf_ptr = strnchr(field_data->data, buflen, ':');
+ if (buf_ptr != field_data->data)
+ seq_printf(m, "%s", field_data->data);
+
+ /* skip ':' and '\0' */
+ buf_ptr += 2;
+ buflen -= buf_ptr - field_data->data;
+ case DATA_FMT_DIGEST:
+ case DATA_FMT_HEX:
+ if (!buflen)
+ break;
+ ima_print_digest(m, buf_ptr, buflen);
+ break;
+ case DATA_FMT_STRING:
+ seq_printf(m, "%s", buf_ptr);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ima_show_template_data_binary(struct seq_file *m,
+ enum ima_show_type show,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ ima_putc(m, &field_data->len, sizeof(u32));
+ if (!field_data->len)
+ return;
+ ima_putc(m, field_data->data, field_data->len);
+}
+
+static void ima_show_template_field_data(struct seq_file *m,
+ enum ima_show_type show,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ switch (show) {
+ case IMA_SHOW_ASCII:
+ ima_show_template_data_ascii(m, show, datafmt, field_data);
+ break;
+ case IMA_SHOW_BINARY:
+ ima_show_template_data_binary(m, show, datafmt, field_data);
+ break;
+ default:
+ break;
+ }
+}
+
+void ima_show_template_digest(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_DIGEST, field_data);
+}
+
+void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_DIGEST_WITH_ALGO,
+ field_data);
+}
+
+void ima_show_template_string(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_STRING, field_data);
+}
+
+void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_HEX, field_data);
+}
+
+static int ima_eventdigest_init_common(u8 *digest, u32 digestsize, u8 hash_algo,
+ struct ima_field_data *field_data,
+ bool size_limit)
+{
+ /*
+ * digest formats:
+ * - DATA_FMT_DIGEST: digest
+ * - DATA_FMT_DIGEST_WITH_ALGO: [<hash algo>] + ':' + '\0' + digest,
+ * where <hash algo> is provided if the hash algoritm is not
+ * SHA1 or MD5
+ */
+ u8 buffer[CRYPTO_MAX_ALG_NAME + 2 + IMA_MAX_DIGEST_SIZE] = { 0 };
+ enum data_formats fmt = DATA_FMT_DIGEST;
+ u32 offset = 0;
+
+ if (!size_limit) {
+ fmt = DATA_FMT_DIGEST_WITH_ALGO;
+ if (hash_algo < HASH_ALGO__LAST)
+ offset += snprintf(buffer, CRYPTO_MAX_ALG_NAME + 1,
+ "%s", hash_algo_name[hash_algo]);
+ buffer[offset] = ':';
+ offset += 2;
+ }
+
+ if (digest)
+ memcpy(buffer + offset, digest, digestsize);
+ else
+ /*
+ * If digest is NULL, the event being recorded is a violation.
+ * Make room for the digest by increasing the offset of
+ * IMA_DIGEST_SIZE.
+ */
+ offset += IMA_DIGEST_SIZE;
+
+ return ima_write_template_field_data(buffer, offset + digestsize,
+ fmt, field_data);
+}
+
+/*
+ * This function writes the digest of an event (with size limit).
+ */
+int ima_eventdigest_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data)
+{
+ struct {
+ struct ima_digest_data hdr;
+ char digest[IMA_MAX_DIGEST_SIZE];
+ } hash;
+ u8 *cur_digest = NULL;
+ u32 cur_digestsize = 0;
+ struct inode *inode;
+ int result;
+
+ memset(&hash, 0, sizeof(hash));
+
+ if (!iint) /* recording a violation. */
+ goto out;
+
+ if (ima_template_hash_algo_allowed(iint->ima_hash->algo)) {
+ cur_digest = iint->ima_hash->digest;
+ cur_digestsize = iint->ima_hash->length;
+ goto out;
+ }
+
+ if (!file) /* missing info to re-calculate the digest */
+ return -EINVAL;
+
+ inode = file_inode(file);
+ hash.hdr.algo = ima_template_hash_algo_allowed(ima_hash_algo) ?
+ ima_hash_algo : HASH_ALGO_SHA1;
+ result = ima_calc_file_hash(file, &hash.hdr);
+ if (result) {
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
+ filename, "collect_data",
+ "failed", result, 0);
+ return result;
+ }
+ cur_digest = hash.hdr.digest;
+ cur_digestsize = hash.hdr.length;
+out:
+ return ima_eventdigest_init_common(cur_digest, cur_digestsize, -1,
+ field_data, true);
+}
+
+/*
+ * This function writes the digest of an event (without size limit).
+ */
+int ima_eventdigest_ng_init(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, struct ima_field_data *field_data)
+{
+ u8 *cur_digest = NULL, hash_algo = HASH_ALGO__LAST;
+ u32 cur_digestsize = 0;
+
+ /* If iint is NULL, we are recording a violation. */
+ if (!iint)
+ goto out;
+
+ cur_digest = iint->ima_hash->digest;
+ cur_digestsize = iint->ima_hash->length;
+
+ hash_algo = iint->ima_hash->algo;
+out:
+ return ima_eventdigest_init_common(cur_digest, cur_digestsize,
+ hash_algo, field_data, false);
+}
+
+static int ima_eventname_init_common(struct integrity_iint_cache *iint,
+ struct file *file,
+ const unsigned char *filename,
+ struct ima_field_data *field_data,
+ bool size_limit)
+{
+ const char *cur_filename = NULL;
+ u32 cur_filename_len = 0;
+ enum data_formats fmt = size_limit ?
+ DATA_FMT_EVENT_NAME : DATA_FMT_STRING;
+
+ BUG_ON(filename == NULL && file == NULL);
+
+ if (filename) {
+ cur_filename = filename;
+ cur_filename_len = strlen(filename);
+
+ if (!size_limit || cur_filename_len <= IMA_EVENT_NAME_LEN_MAX)
+ goto out;
+ }
+
+ if (file) {
+ cur_filename = file->f_dentry->d_name.name;
+ cur_filename_len = strlen(cur_filename);
+ } else
+ /*
+ * Truncate filename if the latter is too long and
+ * the file descriptor is not available.
+ */
+ cur_filename_len = IMA_EVENT_NAME_LEN_MAX;
+out:
+ return ima_write_template_field_data(cur_filename, cur_filename_len,
+ fmt, field_data);
+}
+
+/*
+ * This function writes the name of an event (with size limit).
+ */
+int ima_eventname_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data)
+{
+ return ima_eventname_init_common(iint, file, filename,
+ field_data, true);
+}
+
+/*
+ * This function writes the name of an event (without size limit).
+ */
+int ima_eventname_ng_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data)
+{
+ return ima_eventname_init_common(iint, file, filename,
+ field_data, false);
+}
+
+/*
+ * ima_eventsig_init - include the file signature as part of the template data
+ */
+int ima_eventsig_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data)
+{
+ enum data_formats fmt = DATA_FMT_HEX;
+ int rc = 0;
+
+ if ((!xattr_value) || (xattr_value->type != EVM_IMA_XATTR_DIGSIG))
+ goto out;
+
+ rc = ima_write_template_field_data(xattr_value, xattr_len, fmt,
+ field_data);
+out:
+ return rc;
+}
diff --git a/security/integrity/ima/ima_template_lib.h b/security/integrity/ima/ima_template_lib.h
new file mode 100644
index 000000000000..63f6b52cb1c2
--- /dev/null
+++ b/security/integrity/ima/ima_template_lib.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_template_lib.h
+ * Header for the library of supported template fields.
+ */
+#ifndef __LINUX_IMA_TEMPLATE_LIB_H
+#define __LINUX_IMA_TEMPLATE_LIB_H
+
+#include <linux/seq_file.h>
+#include "ima.h"
+
+void ima_show_template_digest(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_string(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+int ima_eventdigest_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data);
+int ima_eventname_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data);
+int ima_eventdigest_ng_init(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, struct ima_field_data *field_data);
+int ima_eventname_ng_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data);
+int ima_eventsig_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data);
+#endif /* __LINUX_IMA_TEMPLATE_LIB_H */
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index c42fb7a70dee..b9e7c133734a 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -54,25 +54,57 @@ enum evm_ima_xattr_type {
IMA_XATTR_DIGEST = 0x01,
EVM_XATTR_HMAC,
EVM_IMA_XATTR_DIGSIG,
+ IMA_XATTR_DIGEST_NG,
};
struct evm_ima_xattr_data {
u8 type;
u8 digest[SHA1_DIGEST_SIZE];
-} __attribute__((packed));
+} __packed;
+
+#define IMA_MAX_DIGEST_SIZE 64
+
+struct ima_digest_data {
+ u8 algo;
+ u8 length;
+ union {
+ struct {
+ u8 unused;
+ u8 type;
+ } sha1;
+ struct {
+ u8 type;
+ u8 algo;
+ } ng;
+ u8 data[2];
+ } xattr;
+ u8 digest[0];
+} __packed;
+
+/*
+ * signature format v2 - for using with asymmetric keys
+ */
+struct signature_v2_hdr {
+ uint8_t type; /* xattr type */
+ uint8_t version; /* signature format version */
+ uint8_t hash_algo; /* Digest algorithm [enum pkey_hash_algo] */
+ uint32_t keyid; /* IMA key identifier - not X509/PGP specific */
+ uint16_t sig_size; /* signature size */
+ uint8_t sig[0]; /* signature payload */
+} __packed;
/* integrity data associated with an inode */
struct integrity_iint_cache {
- struct rb_node rb_node; /* rooted in integrity_iint_tree */
+ struct rb_node rb_node; /* rooted in integrity_iint_tree */
struct inode *inode; /* back pointer to inode in question */
u64 version; /* track inode changes */
unsigned long flags;
- struct evm_ima_xattr_data ima_xattr;
enum integrity_status ima_file_status:4;
enum integrity_status ima_mmap_status:4;
enum integrity_status ima_bprm_status:4;
enum integrity_status ima_module_status:4;
enum integrity_status evm_status:4;
+ struct ima_digest_data *ima_hash;
};
/* rbtree tree calls to lookup, insert, delete
@@ -89,7 +121,7 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
#ifdef CONFIG_INTEGRITY_SIGNATURE
int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
- const char *digest, int digestlen);
+ const char *digest, int digestlen);
#else
@@ -105,12 +137,19 @@ static inline int integrity_digsig_verify(const unsigned int id,
#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS
int asymmetric_verify(struct key *keyring, const char *sig,
int siglen, const char *data, int datalen);
+
+int integrity_init_keyring(const unsigned int id);
#else
static inline int asymmetric_verify(struct key *keyring, const char *sig,
int siglen, const char *data, int datalen)
{
return -EOPNOTSUPP;
}
+
+static int integrity_init_keyring(const unsigned int id)
+{
+ return 0;
+}
#endif
#ifdef CONFIG_INTEGRITY_AUDIT
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index a90d6d300dbd..a4f3f8c48d6e 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -4,6 +4,7 @@
config KEYS
bool "Enable access key retention support"
+ select ASSOCIATIVE_ARRAY
help
This option provides support for retaining authentication tokens and
access keys in the kernel.
@@ -19,6 +20,34 @@ config KEYS
If you are unsure as to whether this is required, answer N.
+config PERSISTENT_KEYRINGS
+ bool "Enable register of persistent per-UID keyrings"
+ depends on KEYS
+ help
+ This option provides a register of persistent per-UID keyrings,
+ primarily aimed at Kerberos key storage. The keyrings are persistent
+ in the sense that they stay around after all processes of that UID
+ have exited, not that they survive the machine being rebooted.
+
+ A particular keyring may be accessed by either the user whose keyring
+ it is or by a process with administrative privileges. The active
+ LSMs gets to rule on which admin-level processes get to access the
+ cache.
+
+ Keyrings are created and added into the register upon demand and get
+ removed if they expire (a default timeout is set upon creation).
+
+config BIG_KEYS
+ bool "Large payload keys"
+ depends on KEYS
+ depends on TMPFS
+ help
+ This option provides support for holding large keys within the kernel
+ (for example Kerberos ticket caches). The data may be stored out to
+ swapspace by tmpfs.
+
+ If you are unsure as to whether this is required, answer N.
+
config TRUSTED_KEYS
tristate "TRUSTED KEYS"
depends on KEYS && TCG_TPM
diff --git a/security/keys/Makefile b/security/keys/Makefile
index 504aaa008388..dfb3a7bededf 100644
--- a/security/keys/Makefile
+++ b/security/keys/Makefile
@@ -18,9 +18,11 @@ obj-y := \
obj-$(CONFIG_KEYS_COMPAT) += compat.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSCTL) += sysctl.o
+obj-$(CONFIG_PERSISTENT_KEYRINGS) += persistent.o
#
# Key types
#
+obj-$(CONFIG_BIG_KEYS) += big_key.o
obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys/
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
new file mode 100644
index 000000000000..2cf5e62d67af
--- /dev/null
+++ b/security/keys/big_key.c
@@ -0,0 +1,206 @@
+/* Large capacity key type
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/file.h>
+#include <linux/shmem_fs.h>
+#include <linux/err.h>
+#include <keys/user-type.h>
+#include <keys/big_key-type.h>
+
+MODULE_LICENSE("GPL");
+
+/*
+ * If the data is under this limit, there's no point creating a shm file to
+ * hold it as the permanently resident metadata for the shmem fs will be at
+ * least as large as the data.
+ */
+#define BIG_KEY_FILE_THRESHOLD (sizeof(struct inode) + sizeof(struct dentry))
+
+/*
+ * big_key defined keys take an arbitrary string as the description and an
+ * arbitrary blob of data as the payload
+ */
+struct key_type key_type_big_key = {
+ .name = "big_key",
+ .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .instantiate = big_key_instantiate,
+ .match = user_match,
+ .revoke = big_key_revoke,
+ .destroy = big_key_destroy,
+ .describe = big_key_describe,
+ .read = big_key_read,
+};
+
+/*
+ * Instantiate a big key
+ */
+int big_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
+{
+ struct path *path = (struct path *)&key->payload.data2;
+ struct file *file;
+ ssize_t written;
+ size_t datalen = prep->datalen;
+ int ret;
+
+ ret = -EINVAL;
+ if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
+ goto error;
+
+ /* Set an arbitrary quota */
+ ret = key_payload_reserve(key, 16);
+ if (ret < 0)
+ goto error;
+
+ key->type_data.x[1] = datalen;
+
+ if (datalen > BIG_KEY_FILE_THRESHOLD) {
+ /* Create a shmem file to store the data in. This will permit the data
+ * to be swapped out if needed.
+ *
+ * TODO: Encrypt the stored data with a temporary key.
+ */
+ file = shmem_file_setup("", datalen, 0);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto err_quota;
+ }
+
+ written = kernel_write(file, prep->data, prep->datalen, 0);
+ if (written != datalen) {
+ if (written >= 0)
+ ret = -ENOMEM;
+ goto err_fput;
+ }
+
+ /* Pin the mount and dentry to the key so that we can open it again
+ * later
+ */
+ *path = file->f_path;
+ path_get(path);
+ fput(file);
+ } else {
+ /* Just store the data in a buffer */
+ void *data = kmalloc(datalen, GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto err_quota;
+ }
+
+ key->payload.data = memcpy(data, prep->data, prep->datalen);
+ }
+ return 0;
+
+err_fput:
+ fput(file);
+err_quota:
+ key_payload_reserve(key, 0);
+error:
+ return ret;
+}
+
+/*
+ * dispose of the links from a revoked keyring
+ * - called with the key sem write-locked
+ */
+void big_key_revoke(struct key *key)
+{
+ struct path *path = (struct path *)&key->payload.data2;
+
+ /* clear the quota */
+ key_payload_reserve(key, 0);
+ if (key_is_instantiated(key) && key->type_data.x[1] > BIG_KEY_FILE_THRESHOLD)
+ vfs_truncate(path, 0);
+}
+
+/*
+ * dispose of the data dangling from the corpse of a big_key key
+ */
+void big_key_destroy(struct key *key)
+{
+ if (key->type_data.x[1] > BIG_KEY_FILE_THRESHOLD) {
+ struct path *path = (struct path *)&key->payload.data2;
+ path_put(path);
+ path->mnt = NULL;
+ path->dentry = NULL;
+ } else {
+ kfree(key->payload.data);
+ key->payload.data = NULL;
+ }
+}
+
+/*
+ * describe the big_key key
+ */
+void big_key_describe(const struct key *key, struct seq_file *m)
+{
+ unsigned long datalen = key->type_data.x[1];
+
+ seq_puts(m, key->description);
+
+ if (key_is_instantiated(key))
+ seq_printf(m, ": %lu [%s]",
+ datalen,
+ datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
+}
+
+/*
+ * read the key data
+ * - the key's semaphore is read-locked
+ */
+long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
+{
+ unsigned long datalen = key->type_data.x[1];
+ long ret;
+
+ if (!buffer || buflen < datalen)
+ return datalen;
+
+ if (datalen > BIG_KEY_FILE_THRESHOLD) {
+ struct path *path = (struct path *)&key->payload.data2;
+ struct file *file;
+ loff_t pos;
+
+ file = dentry_open(path, O_RDONLY, current_cred());
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ pos = 0;
+ ret = vfs_read(file, buffer, datalen, &pos);
+ fput(file);
+ if (ret >= 0 && ret != datalen)
+ ret = -EIO;
+ } else {
+ ret = datalen;
+ if (copy_to_user(buffer, key->payload.data, datalen) != 0)
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+/*
+ * Module stuff
+ */
+static int __init big_key_init(void)
+{
+ return register_key_type(&key_type_big_key);
+}
+
+static void __exit big_key_cleanup(void)
+{
+ unregister_key_type(&key_type_big_key);
+}
+
+module_init(big_key_init);
+module_exit(big_key_cleanup);
diff --git a/security/keys/compat.c b/security/keys/compat.c
index d65fa7fa29ba..bbd32c729dbb 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -138,6 +138,9 @@ asmlinkage long compat_sys_keyctl(u32 option,
case KEYCTL_INVALIDATE:
return keyctl_invalidate_key(arg2);
+ case KEYCTL_GET_PERSISTENT:
+ return keyctl_get_persistent(arg2, arg3);
+
default:
return -EOPNOTSUPP;
}
diff --git a/security/keys/gc.c b/security/keys/gc.c
index d67c97bb1025..cce621c33dce 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -130,6 +130,13 @@ void key_gc_keytype(struct key_type *ktype)
kleave("");
}
+static int key_gc_keyring_func(const void *object, void *iterator_data)
+{
+ const struct key *key = object;
+ time_t *limit = iterator_data;
+ return key_is_dead(key, *limit);
+}
+
/*
* Garbage collect pointers from a keyring.
*
@@ -138,10 +145,9 @@ void key_gc_keytype(struct key_type *ktype)
*/
static void key_gc_keyring(struct key *keyring, time_t limit)
{
- struct keyring_list *klist;
- int loop;
+ int result;
- kenter("%x", key_serial(keyring));
+ kenter("%x{%s}", keyring->serial, keyring->description ?: "");
if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
(1 << KEY_FLAG_REVOKED)))
@@ -149,27 +155,17 @@ static void key_gc_keyring(struct key *keyring, time_t limit)
/* scan the keyring looking for dead keys */
rcu_read_lock();
- klist = rcu_dereference(keyring->payload.subscriptions);
- if (!klist)
- goto unlock_dont_gc;
-
- loop = klist->nkeys;
- smp_rmb();
- for (loop--; loop >= 0; loop--) {
- struct key *key = rcu_dereference(klist->keys[loop]);
- if (key_is_dead(key, limit))
- goto do_gc;
- }
-
-unlock_dont_gc:
+ result = assoc_array_iterate(&keyring->keys,
+ key_gc_keyring_func, &limit);
rcu_read_unlock();
+ if (result == true)
+ goto do_gc;
+
dont_gc:
kleave(" [no gc]");
return;
do_gc:
- rcu_read_unlock();
-
keyring_gc(keyring, limit);
kleave(" [gc]");
}
@@ -392,7 +388,6 @@ found_unreferenced_key:
*/
found_keyring:
spin_unlock(&key_serial_lock);
- kdebug("scan keyring %d", key->serial);
key_gc_keyring(key, limit);
goto maybe_resched;
diff --git a/security/keys/internal.h b/security/keys/internal.h
index d4f1468b9b50..80b2aac4f50c 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -89,42 +89,53 @@ extern struct key_type *key_type_lookup(const char *type);
extern void key_type_put(struct key_type *ktype);
extern int __key_link_begin(struct key *keyring,
- const struct key_type *type,
- const char *description,
- unsigned long *_prealloc);
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit **_edit);
extern int __key_link_check_live_key(struct key *keyring, struct key *key);
-extern void __key_link(struct key *keyring, struct key *key,
- unsigned long *_prealloc);
+extern void __key_link(struct key *key, struct assoc_array_edit **_edit);
extern void __key_link_end(struct key *keyring,
- struct key_type *type,
- unsigned long prealloc);
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit *edit);
-extern key_ref_t __keyring_search_one(key_ref_t keyring_ref,
- const struct key_type *type,
- const char *description,
- key_perm_t perm);
+extern key_ref_t find_key_to_update(key_ref_t keyring_ref,
+ const struct keyring_index_key *index_key);
extern struct key *keyring_search_instkey(struct key *keyring,
key_serial_t target_id);
+extern int iterate_over_keyring(const struct key *keyring,
+ int (*func)(const struct key *key, void *data),
+ void *data);
+
typedef int (*key_match_func_t)(const struct key *, const void *);
+struct keyring_search_context {
+ struct keyring_index_key index_key;
+ const struct cred *cred;
+ key_match_func_t match;
+ const void *match_data;
+ unsigned flags;
+#define KEYRING_SEARCH_LOOKUP_TYPE 0x0001 /* [as type->def_lookup_type] */
+#define KEYRING_SEARCH_NO_STATE_CHECK 0x0002 /* Skip state checks */
+#define KEYRING_SEARCH_DO_STATE_CHECK 0x0004 /* Override NO_STATE_CHECK */
+#define KEYRING_SEARCH_NO_UPDATE_TIME 0x0008 /* Don't update times */
+#define KEYRING_SEARCH_NO_CHECK_PERM 0x0010 /* Don't check permissions */
+#define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0020 /* Give an error on excessive depth */
+
+ int (*iterator)(const void *object, void *iterator_data);
+
+ /* Internal stuff */
+ int skipped_ret;
+ bool possessed;
+ key_ref_t result;
+ struct timespec now;
+};
+
extern key_ref_t keyring_search_aux(key_ref_t keyring_ref,
- const struct cred *cred,
- struct key_type *type,
- const void *description,
- key_match_func_t match,
- bool no_state_check);
-
-extern key_ref_t search_my_process_keyrings(struct key_type *type,
- const void *description,
- key_match_func_t match,
- bool no_state_check,
- const struct cred *cred);
-extern key_ref_t search_process_keyrings(struct key_type *type,
- const void *description,
- key_match_func_t match,
- const struct cred *cred);
+ struct keyring_search_context *ctx);
+
+extern key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx);
+extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx);
extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check);
@@ -202,7 +213,7 @@ extern struct key *key_get_instantiation_authkey(key_serial_t target_id);
/*
* Determine whether a key is dead.
*/
-static inline bool key_is_dead(struct key *key, time_t limit)
+static inline bool key_is_dead(const struct key *key, time_t limit)
{
return
key->flags & ((1 << KEY_FLAG_DEAD) |
@@ -244,6 +255,15 @@ extern long keyctl_invalidate_key(key_serial_t);
extern long keyctl_instantiate_key_common(key_serial_t,
const struct iovec *,
unsigned, size_t, key_serial_t);
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+extern long keyctl_get_persistent(uid_t, key_serial_t);
+extern unsigned persistent_keyring_expiry;
+#else
+static inline long keyctl_get_persistent(uid_t uid, key_serial_t destring)
+{
+ return -EOPNOTSUPP;
+}
+#endif
/*
* Debugging key validation
diff --git a/security/keys/key.c b/security/keys/key.c
index 8fb7c7bd4657..55d110f0aced 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -242,8 +242,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
}
}
- desclen = strlen(desc) + 1;
- quotalen = desclen + type->def_datalen;
+ desclen = strlen(desc);
+ quotalen = desclen + 1 + type->def_datalen;
/* get hold of the key tracking for this user */
user = key_user_lookup(uid);
@@ -277,7 +277,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
goto no_memory_2;
if (desc) {
- key->description = kmemdup(desc, desclen, GFP_KERNEL);
+ key->index_key.desc_len = desclen;
+ key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
if (!key->description)
goto no_memory_3;
}
@@ -285,7 +286,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
atomic_set(&key->usage, 1);
init_rwsem(&key->sem);
lockdep_set_class(&key->sem, &type->lock_class);
- key->type = type;
+ key->index_key.type = type;
key->user = user;
key->quotalen = quotalen;
key->datalen = type->def_datalen;
@@ -299,6 +300,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
key->flags |= 1 << KEY_FLAG_IN_QUOTA;
+ if (flags & KEY_ALLOC_TRUSTED)
+ key->flags |= 1 << KEY_FLAG_TRUSTED;
memset(&key->type_data, 0, sizeof(key->type_data));
@@ -408,7 +411,7 @@ static int __key_instantiate_and_link(struct key *key,
struct key_preparsed_payload *prep,
struct key *keyring,
struct key *authkey,
- unsigned long *_prealloc)
+ struct assoc_array_edit **_edit)
{
int ret, awaken;
@@ -435,7 +438,7 @@ static int __key_instantiate_and_link(struct key *key,
/* and link it into the destination keyring */
if (keyring)
- __key_link(keyring, key, _prealloc);
+ __key_link(key, _edit);
/* disable the authorisation key */
if (authkey)
@@ -475,7 +478,7 @@ int key_instantiate_and_link(struct key *key,
struct key *authkey)
{
struct key_preparsed_payload prep;
- unsigned long prealloc;
+ struct assoc_array_edit *edit;
int ret;
memset(&prep, 0, sizeof(prep));
@@ -489,17 +492,15 @@ int key_instantiate_and_link(struct key *key,
}
if (keyring) {
- ret = __key_link_begin(keyring, key->type, key->description,
- &prealloc);
+ ret = __key_link_begin(keyring, &key->index_key, &edit);
if (ret < 0)
goto error_free_preparse;
}
- ret = __key_instantiate_and_link(key, &prep, keyring, authkey,
- &prealloc);
+ ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit);
if (keyring)
- __key_link_end(keyring, key->type, prealloc);
+ __key_link_end(keyring, &key->index_key, edit);
error_free_preparse:
if (key->type->preparse)
@@ -537,7 +538,7 @@ int key_reject_and_link(struct key *key,
struct key *keyring,
struct key *authkey)
{
- unsigned long prealloc;
+ struct assoc_array_edit *edit;
struct timespec now;
int ret, awaken, link_ret = 0;
@@ -548,8 +549,7 @@ int key_reject_and_link(struct key *key,
ret = -EBUSY;
if (keyring)
- link_ret = __key_link_begin(keyring, key->type,
- key->description, &prealloc);
+ link_ret = __key_link_begin(keyring, &key->index_key, &edit);
mutex_lock(&key_construction_mutex);
@@ -557,9 +557,10 @@ int key_reject_and_link(struct key *key,
if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
/* mark the key as being negatively instantiated */
atomic_inc(&key->user->nikeys);
+ key->type_data.reject_error = -error;
+ smp_wmb();
set_bit(KEY_FLAG_NEGATIVE, &key->flags);
set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
- key->type_data.reject_error = -error;
now = current_kernel_time();
key->expiry = now.tv_sec + timeout;
key_schedule_gc(key->expiry + key_gc_delay);
@@ -571,7 +572,7 @@ int key_reject_and_link(struct key *key,
/* and link it into the destination keyring */
if (keyring && link_ret == 0)
- __key_link(keyring, key, &prealloc);
+ __key_link(key, &edit);
/* disable the authorisation key */
if (authkey)
@@ -581,7 +582,7 @@ int key_reject_and_link(struct key *key,
mutex_unlock(&key_construction_mutex);
if (keyring)
- __key_link_end(keyring, key->type, prealloc);
+ __key_link_end(keyring, &key->index_key, edit);
/* wake up anyone waiting for a key to be constructed */
if (awaken)
@@ -645,7 +646,7 @@ found:
/* this races with key_put(), but that doesn't matter since key_put()
* doesn't actually change the key
*/
- atomic_inc(&key->usage);
+ __key_get(key);
error:
spin_unlock(&key_serial_lock);
@@ -780,25 +781,27 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
key_perm_t perm,
unsigned long flags)
{
- unsigned long prealloc;
+ struct keyring_index_key index_key = {
+ .description = description,
+ };
struct key_preparsed_payload prep;
+ struct assoc_array_edit *edit;
const struct cred *cred = current_cred();
- struct key_type *ktype;
struct key *keyring, *key = NULL;
key_ref_t key_ref;
int ret;
/* look up the key type to see if it's one of the registered kernel
* types */
- ktype = key_type_lookup(type);
- if (IS_ERR(ktype)) {
+ index_key.type = key_type_lookup(type);
+ if (IS_ERR(index_key.type)) {
key_ref = ERR_PTR(-ENODEV);
goto error;
}
key_ref = ERR_PTR(-EINVAL);
- if (!ktype->match || !ktype->instantiate ||
- (!description && !ktype->preparse))
+ if (!index_key.type->match || !index_key.type->instantiate ||
+ (!index_key.description && !index_key.type->preparse))
goto error_put_type;
keyring = key_ref_to_ptr(keyring_ref);
@@ -812,21 +815,28 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
memset(&prep, 0, sizeof(prep));
prep.data = payload;
prep.datalen = plen;
- prep.quotalen = ktype->def_datalen;
- if (ktype->preparse) {
- ret = ktype->preparse(&prep);
+ prep.quotalen = index_key.type->def_datalen;
+ prep.trusted = flags & KEY_ALLOC_TRUSTED;
+ if (index_key.type->preparse) {
+ ret = index_key.type->preparse(&prep);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_put_type;
}
- if (!description)
- description = prep.description;
+ if (!index_key.description)
+ index_key.description = prep.description;
key_ref = ERR_PTR(-EINVAL);
- if (!description)
+ if (!index_key.description)
goto error_free_prep;
}
+ index_key.desc_len = strlen(index_key.description);
+
+ key_ref = ERR_PTR(-EPERM);
+ if (!prep.trusted && test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags))
+ goto error_free_prep;
+ flags |= prep.trusted ? KEY_ALLOC_TRUSTED : 0;
- ret = __key_link_begin(keyring, ktype, description, &prealloc);
+ ret = __key_link_begin(keyring, &index_key, &edit);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_free_prep;
@@ -844,10 +854,9 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
* key of the same type and description in the destination keyring and
* update that instead if possible
*/
- if (ktype->update) {
- key_ref = __keyring_search_one(keyring_ref, ktype, description,
- 0);
- if (!IS_ERR(key_ref))
+ if (index_key.type->update) {
+ key_ref = find_key_to_update(keyring_ref, &index_key);
+ if (key_ref)
goto found_matching_key;
}
@@ -856,23 +865,24 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
perm |= KEY_USR_VIEW;
- if (ktype->read)
+ if (index_key.type->read)
perm |= KEY_POS_READ;
- if (ktype == &key_type_keyring || ktype->update)
+ if (index_key.type == &key_type_keyring ||
+ index_key.type->update)
perm |= KEY_POS_WRITE;
}
/* allocate a new key */
- key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred,
- perm, flags);
+ key = key_alloc(index_key.type, index_key.description,
+ cred->fsuid, cred->fsgid, cred, perm, flags);
if (IS_ERR(key)) {
key_ref = ERR_CAST(key);
goto error_link_end;
}
/* instantiate it and link it into the target keyring */
- ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &prealloc);
+ ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit);
if (ret < 0) {
key_put(key);
key_ref = ERR_PTR(ret);
@@ -882,12 +892,12 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
error_link_end:
- __key_link_end(keyring, ktype, prealloc);
+ __key_link_end(keyring, &index_key, edit);
error_free_prep:
- if (ktype->preparse)
- ktype->free_preparse(&prep);
+ if (index_key.type->preparse)
+ index_key.type->free_preparse(&prep);
error_put_type:
- key_type_put(ktype);
+ key_type_put(index_key.type);
error:
return key_ref;
@@ -895,7 +905,7 @@ error:
/* we found a matching key, so we're going to try to update it
* - we can drop the locks first as we have the key pinned
*/
- __key_link_end(keyring, ktype, prealloc);
+ __key_link_end(keyring, &index_key, edit);
key_ref = __key_update(key_ref, &prep);
goto error_free_prep;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 33cfd27b4de2..cee72ce64222 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1667,6 +1667,9 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
case KEYCTL_INVALIDATE:
return keyctl_invalidate_key((key_serial_t) arg2);
+ case KEYCTL_GET_PERSISTENT:
+ return keyctl_get_persistent((uid_t)arg2, (key_serial_t)arg3);
+
default:
return -EOPNOTSUPP;
}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 6ece7f2e5707..d80311e571c3 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -1,6 +1,6 @@
/* Keyring handling
*
- * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -17,25 +17,11 @@
#include <linux/seq_file.h>
#include <linux/err.h>
#include <keys/keyring-type.h>
+#include <keys/user-type.h>
+#include <linux/assoc_array_priv.h>
#include <linux/uaccess.h>
#include "internal.h"
-#define rcu_dereference_locked_keyring(keyring) \
- (rcu_dereference_protected( \
- (keyring)->payload.subscriptions, \
- rwsem_is_locked((struct rw_semaphore *)&(keyring)->sem)))
-
-#define rcu_deref_link_locked(klist, index, keyring) \
- (rcu_dereference_protected( \
- (klist)->keys[index], \
- rwsem_is_locked((struct rw_semaphore *)&(keyring)->sem)))
-
-#define MAX_KEYRING_LINKS \
- min_t(size_t, USHRT_MAX - 1, \
- ((PAGE_SIZE - sizeof(struct keyring_list)) / sizeof(struct key *)))
-
-#define KEY_LINK_FIXQUOTA 1UL
-
/*
* When plumbing the depths of the key tree, this sets a hard limit
* set on how deep we're willing to go.
@@ -47,6 +33,28 @@
*/
#define KEYRING_NAME_HASH_SIZE (1 << 5)
+/*
+ * We mark pointers we pass to the associative array with bit 1 set if
+ * they're keyrings and clear otherwise.
+ */
+#define KEYRING_PTR_SUBTYPE 0x2UL
+
+static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr *x)
+{
+ return (unsigned long)x & KEYRING_PTR_SUBTYPE;
+}
+static inline struct key *keyring_ptr_to_key(const struct assoc_array_ptr *x)
+{
+ void *object = assoc_array_ptr_to_leaf(x);
+ return (struct key *)((unsigned long)object & ~KEYRING_PTR_SUBTYPE);
+}
+static inline void *keyring_key_to_ptr(struct key *key)
+{
+ if (key->type == &key_type_keyring)
+ return (void *)((unsigned long)key | KEYRING_PTR_SUBTYPE);
+ return key;
+}
+
static struct list_head keyring_name_hash[KEYRING_NAME_HASH_SIZE];
static DEFINE_RWLOCK(keyring_name_lock);
@@ -67,7 +75,6 @@ static inline unsigned keyring_hash(const char *desc)
*/
static int keyring_instantiate(struct key *keyring,
struct key_preparsed_payload *prep);
-static int keyring_match(const struct key *keyring, const void *criterion);
static void keyring_revoke(struct key *keyring);
static void keyring_destroy(struct key *keyring);
static void keyring_describe(const struct key *keyring, struct seq_file *m);
@@ -76,9 +83,9 @@ static long keyring_read(const struct key *keyring,
struct key_type key_type_keyring = {
.name = "keyring",
- .def_datalen = sizeof(struct keyring_list),
+ .def_datalen = 0,
.instantiate = keyring_instantiate,
- .match = keyring_match,
+ .match = user_match,
.revoke = keyring_revoke,
.destroy = keyring_destroy,
.describe = keyring_describe,
@@ -127,6 +134,7 @@ static int keyring_instantiate(struct key *keyring,
ret = -EINVAL;
if (prep->datalen == 0) {
+ assoc_array_init(&keyring->keys);
/* make the keyring available by name if it has one */
keyring_publish_name(keyring);
ret = 0;
@@ -136,15 +144,226 @@ static int keyring_instantiate(struct key *keyring,
}
/*
- * Match keyrings on their name
+ * Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit. Ideally we'd
+ * fold the carry back too, but that requires inline asm.
+ */
+static u64 mult_64x32_and_fold(u64 x, u32 y)
+{
+ u64 hi = (u64)(u32)(x >> 32) * y;
+ u64 lo = (u64)(u32)(x) * y;
+ return lo + ((u64)(u32)hi << 32) + (u32)(hi >> 32);
+}
+
+/*
+ * Hash a key type and description.
+ */
+static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key)
+{
+ const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP;
+ const unsigned long level_mask = ASSOC_ARRAY_LEVEL_STEP_MASK;
+ const char *description = index_key->description;
+ unsigned long hash, type;
+ u32 piece;
+ u64 acc;
+ int n, desc_len = index_key->desc_len;
+
+ type = (unsigned long)index_key->type;
+
+ acc = mult_64x32_and_fold(type, desc_len + 13);
+ acc = mult_64x32_and_fold(acc, 9207);
+ for (;;) {
+ n = desc_len;
+ if (n <= 0)
+ break;
+ if (n > 4)
+ n = 4;
+ piece = 0;
+ memcpy(&piece, description, n);
+ description += n;
+ desc_len -= n;
+ acc = mult_64x32_and_fold(acc, piece);
+ acc = mult_64x32_and_fold(acc, 9207);
+ }
+
+ /* Fold the hash down to 32 bits if need be. */
+ hash = acc;
+ if (ASSOC_ARRAY_KEY_CHUNK_SIZE == 32)
+ hash ^= acc >> 32;
+
+ /* Squidge all the keyrings into a separate part of the tree to
+ * ordinary keys by making sure the lowest level segment in the hash is
+ * zero for keyrings and non-zero otherwise.
+ */
+ if (index_key->type != &key_type_keyring && (hash & level_mask) == 0)
+ return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1;
+ if (index_key->type == &key_type_keyring && (hash & level_mask) != 0)
+ return (hash + (hash << level_shift)) & ~level_mask;
+ return hash;
+}
+
+/*
+ * Build the next index key chunk.
+ *
+ * On 32-bit systems the index key is laid out as:
+ *
+ * 0 4 5 9...
+ * hash desclen typeptr desc[]
+ *
+ * On 64-bit systems:
+ *
+ * 0 8 9 17...
+ * hash desclen typeptr desc[]
+ *
+ * We return it one word-sized chunk at a time.
+ */
+static unsigned long keyring_get_key_chunk(const void *data, int level)
+{
+ const struct keyring_index_key *index_key = data;
+ unsigned long chunk = 0;
+ long offset = 0;
+ int desc_len = index_key->desc_len, n = sizeof(chunk);
+
+ level /= ASSOC_ARRAY_KEY_CHUNK_SIZE;
+ switch (level) {
+ case 0:
+ return hash_key_type_and_desc(index_key);
+ case 1:
+ return ((unsigned long)index_key->type << 8) | desc_len;
+ case 2:
+ if (desc_len == 0)
+ return (u8)((unsigned long)index_key->type >>
+ (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8));
+ n--;
+ offset = 1;
+ default:
+ offset += sizeof(chunk) - 1;
+ offset += (level - 3) * sizeof(chunk);
+ if (offset >= desc_len)
+ return 0;
+ desc_len -= offset;
+ if (desc_len > n)
+ desc_len = n;
+ offset += desc_len;
+ do {
+ chunk <<= 8;
+ chunk |= ((u8*)index_key->description)[--offset];
+ } while (--desc_len > 0);
+
+ if (level == 2) {
+ chunk <<= 8;
+ chunk |= (u8)((unsigned long)index_key->type >>
+ (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8));
+ }
+ return chunk;
+ }
+}
+
+static unsigned long keyring_get_object_key_chunk(const void *object, int level)
+{
+ const struct key *key = keyring_ptr_to_key(object);
+ return keyring_get_key_chunk(&key->index_key, level);
+}
+
+static bool keyring_compare_object(const void *object, const void *data)
+{
+ const struct keyring_index_key *index_key = data;
+ const struct key *key = keyring_ptr_to_key(object);
+
+ return key->index_key.type == index_key->type &&
+ key->index_key.desc_len == index_key->desc_len &&
+ memcmp(key->index_key.description, index_key->description,
+ index_key->desc_len) == 0;
+}
+
+/*
+ * Compare the index keys of a pair of objects and determine the bit position
+ * at which they differ - if they differ.
*/
-static int keyring_match(const struct key *keyring, const void *description)
+static int keyring_diff_objects(const void *_a, const void *_b)
{
- return keyring->description &&
- strcmp(keyring->description, description) == 0;
+ const struct key *key_a = keyring_ptr_to_key(_a);
+ const struct key *key_b = keyring_ptr_to_key(_b);
+ const struct keyring_index_key *a = &key_a->index_key;
+ const struct keyring_index_key *b = &key_b->index_key;
+ unsigned long seg_a, seg_b;
+ int level, i;
+
+ level = 0;
+ seg_a = hash_key_type_and_desc(a);
+ seg_b = hash_key_type_and_desc(b);
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
+
+ /* The number of bits contributed by the hash is controlled by a
+ * constant in the assoc_array headers. Everything else thereafter we
+ * can deal with as being machine word-size dependent.
+ */
+ level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8;
+ seg_a = a->desc_len;
+ seg_b = b->desc_len;
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
+
+ /* The next bit may not work on big endian */
+ level++;
+ seg_a = (unsigned long)a->type;
+ seg_b = (unsigned long)b->type;
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
+
+ level += sizeof(unsigned long);
+ if (a->desc_len == 0)
+ goto same;
+
+ i = 0;
+ if (((unsigned long)a->description | (unsigned long)b->description) &
+ (sizeof(unsigned long) - 1)) {
+ do {
+ seg_a = *(unsigned long *)(a->description + i);
+ seg_b = *(unsigned long *)(b->description + i);
+ if ((seg_a ^ seg_b) != 0)
+ goto differ_plus_i;
+ i += sizeof(unsigned long);
+ } while (i < (a->desc_len & (sizeof(unsigned long) - 1)));
+ }
+
+ for (; i < a->desc_len; i++) {
+ seg_a = *(unsigned char *)(a->description + i);
+ seg_b = *(unsigned char *)(b->description + i);
+ if ((seg_a ^ seg_b) != 0)
+ goto differ_plus_i;
+ }
+
+same:
+ return -1;
+
+differ_plus_i:
+ level += i;
+differ:
+ i = level * 8 + __ffs(seg_a ^ seg_b);
+ return i;
}
/*
+ * Free an object after stripping the keyring flag off of the pointer.
+ */
+static void keyring_free_object(void *object)
+{
+ key_put(keyring_ptr_to_key(object));
+}
+
+/*
+ * Operations for keyring management by the index-tree routines.
+ */
+static const struct assoc_array_ops keyring_assoc_array_ops = {
+ .get_key_chunk = keyring_get_key_chunk,
+ .get_object_key_chunk = keyring_get_object_key_chunk,
+ .compare_object = keyring_compare_object,
+ .diff_objects = keyring_diff_objects,
+ .free_object = keyring_free_object,
+};
+
+/*
* Clean up a keyring when it is destroyed. Unpublish its name if it had one
* and dispose of its data.
*
@@ -155,9 +374,6 @@ static int keyring_match(const struct key *keyring, const void *description)
*/
static void keyring_destroy(struct key *keyring)
{
- struct keyring_list *klist;
- int loop;
-
if (keyring->description) {
write_lock(&keyring_name_lock);
@@ -168,12 +384,7 @@ static void keyring_destroy(struct key *keyring)
write_unlock(&keyring_name_lock);
}
- klist = rcu_access_pointer(keyring->payload.subscriptions);
- if (klist) {
- for (loop = klist->nkeys - 1; loop >= 0; loop--)
- key_put(rcu_access_pointer(klist->keys[loop]));
- kfree(klist);
- }
+ assoc_array_destroy(&keyring->keys, &keyring_assoc_array_ops);
}
/*
@@ -181,76 +392,88 @@ static void keyring_destroy(struct key *keyring)
*/
static void keyring_describe(const struct key *keyring, struct seq_file *m)
{
- struct keyring_list *klist;
-
if (keyring->description)
seq_puts(m, keyring->description);
else
seq_puts(m, "[anon]");
if (key_is_instantiated(keyring)) {
- rcu_read_lock();
- klist = rcu_dereference(keyring->payload.subscriptions);
- if (klist)
- seq_printf(m, ": %u/%u", klist->nkeys, klist->maxkeys);
+ if (keyring->keys.nr_leaves_on_tree != 0)
+ seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
else
seq_puts(m, ": empty");
- rcu_read_unlock();
}
}
+struct keyring_read_iterator_context {
+ size_t qty;
+ size_t count;
+ key_serial_t __user *buffer;
+};
+
+static int keyring_read_iterator(const void *object, void *data)
+{
+ struct keyring_read_iterator_context *ctx = data;
+ const struct key *key = keyring_ptr_to_key(object);
+ int ret;
+
+ kenter("{%s,%d},,{%zu/%zu}",
+ key->type->name, key->serial, ctx->count, ctx->qty);
+
+ if (ctx->count >= ctx->qty)
+ return 1;
+
+ ret = put_user(key->serial, ctx->buffer);
+ if (ret < 0)
+ return ret;
+ ctx->buffer++;
+ ctx->count += sizeof(key->serial);
+ return 0;
+}
+
/*
* Read a list of key IDs from the keyring's contents in binary form
*
- * The keyring's semaphore is read-locked by the caller.
+ * The keyring's semaphore is read-locked by the caller. This prevents someone
+ * from modifying it under us - which could cause us to read key IDs multiple
+ * times.
*/
static long keyring_read(const struct key *keyring,
char __user *buffer, size_t buflen)
{
- struct keyring_list *klist;
- struct key *key;
- size_t qty, tmp;
- int loop, ret;
+ struct keyring_read_iterator_context ctx;
+ unsigned long nr_keys;
+ int ret;
- ret = 0;
- klist = rcu_dereference_locked_keyring(keyring);
- if (klist) {
- /* calculate how much data we could return */
- qty = klist->nkeys * sizeof(key_serial_t);
-
- if (buffer && buflen > 0) {
- if (buflen > qty)
- buflen = qty;
-
- /* copy the IDs of the subscribed keys into the
- * buffer */
- ret = -EFAULT;
-
- for (loop = 0; loop < klist->nkeys; loop++) {
- key = rcu_deref_link_locked(klist, loop,
- keyring);
-
- tmp = sizeof(key_serial_t);
- if (tmp > buflen)
- tmp = buflen;
-
- if (copy_to_user(buffer,
- &key->serial,
- tmp) != 0)
- goto error;
-
- buflen -= tmp;
- if (buflen == 0)
- break;
- buffer += tmp;
- }
- }
+ kenter("{%d},,%zu", key_serial(keyring), buflen);
+
+ if (buflen & (sizeof(key_serial_t) - 1))
+ return -EINVAL;
+
+ nr_keys = keyring->keys.nr_leaves_on_tree;
+ if (nr_keys == 0)
+ return 0;
+
+ /* Calculate how much data we could return */
+ ctx.qty = nr_keys * sizeof(key_serial_t);
+
+ if (!buffer || !buflen)
+ return ctx.qty;
+
+ if (buflen > ctx.qty)
+ ctx.qty = buflen;
- ret = qty;
+ /* Copy the IDs of the subscribed keys into the buffer */
+ ctx.buffer = (key_serial_t __user *)buffer;
+ ctx.count = 0;
+ ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
+ if (ret < 0) {
+ kleave(" = %d [iterate]", ret);
+ return ret;
}
-error:
- return ret;
+ kleave(" = %zu [ok]", ctx.count);
+ return ctx.count;
}
/*
@@ -277,227 +500,361 @@ struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
}
EXPORT_SYMBOL(keyring_alloc);
-/**
- * keyring_search_aux - Search a keyring tree for a key matching some criteria
- * @keyring_ref: A pointer to the keyring with possession indicator.
- * @cred: The credentials to use for permissions checks.
- * @type: The type of key to search for.
- * @description: Parameter for @match.
- * @match: Function to rule on whether or not a key is the one required.
- * @no_state_check: Don't check if a matching key is bad
- *
- * Search the supplied keyring tree for a key that matches the criteria given.
- * The root keyring and any linked keyrings must grant Search permission to the
- * caller to be searchable and keys can only be found if they too grant Search
- * to the caller. The possession flag on the root keyring pointer controls use
- * of the possessor bits in permissions checking of the entire tree. In
- * addition, the LSM gets to forbid keyring searches and key matches.
- *
- * The search is performed as a breadth-then-depth search up to the prescribed
- * limit (KEYRING_SEARCH_MAX_DEPTH).
- *
- * Keys are matched to the type provided and are then filtered by the match
- * function, which is given the description to use in any way it sees fit. The
- * match function may use any attributes of a key that it wishes to to
- * determine the match. Normally the match function from the key type would be
- * used.
- *
- * RCU is used to prevent the keyring key lists from disappearing without the
- * need to take lots of locks.
- *
- * Returns a pointer to the found key and increments the key usage count if
- * successful; -EAGAIN if no matching keys were found, or if expired or revoked
- * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the
- * specified keyring wasn't a keyring.
- *
- * In the case of a successful return, the possession attribute from
- * @keyring_ref is propagated to the returned key reference.
+/*
+ * Iteration function to consider each key found.
*/
-key_ref_t keyring_search_aux(key_ref_t keyring_ref,
- const struct cred *cred,
- struct key_type *type,
- const void *description,
- key_match_func_t match,
- bool no_state_check)
+static int keyring_search_iterator(const void *object, void *iterator_data)
{
- struct {
- /* Need a separate keylist pointer for RCU purposes */
- struct key *keyring;
- struct keyring_list *keylist;
- int kix;
- } stack[KEYRING_SEARCH_MAX_DEPTH];
-
- struct keyring_list *keylist;
- struct timespec now;
- unsigned long possessed, kflags;
- struct key *keyring, *key;
- key_ref_t key_ref;
- long err;
- int sp, nkeys, kix;
+ struct keyring_search_context *ctx = iterator_data;
+ const struct key *key = keyring_ptr_to_key(object);
+ unsigned long kflags = key->flags;
- keyring = key_ref_to_ptr(keyring_ref);
- possessed = is_key_possessed(keyring_ref);
- key_check(keyring);
+ kenter("{%d}", key->serial);
- /* top keyring must have search permission to begin the search */
- err = key_task_permission(keyring_ref, cred, KEY_SEARCH);
- if (err < 0) {
- key_ref = ERR_PTR(err);
- goto error;
+ /* ignore keys not of this type */
+ if (key->type != ctx->index_key.type) {
+ kleave(" = 0 [!type]");
+ return 0;
}
- key_ref = ERR_PTR(-ENOTDIR);
- if (keyring->type != &key_type_keyring)
- goto error;
+ /* skip invalidated, revoked and expired keys */
+ if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
+ if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED))) {
+ ctx->result = ERR_PTR(-EKEYREVOKED);
+ kleave(" = %d [invrev]", ctx->skipped_ret);
+ goto skipped;
+ }
- rcu_read_lock();
+ if (key->expiry && ctx->now.tv_sec >= key->expiry) {
+ ctx->result = ERR_PTR(-EKEYEXPIRED);
+ kleave(" = %d [expire]", ctx->skipped_ret);
+ goto skipped;
+ }
+ }
- now = current_kernel_time();
- err = -EAGAIN;
- sp = 0;
-
- /* firstly we should check to see if this top-level keyring is what we
- * are looking for */
- key_ref = ERR_PTR(-EAGAIN);
- kflags = keyring->flags;
- if (keyring->type == type && match(keyring, description)) {
- key = keyring;
- if (no_state_check)
- goto found;
+ /* keys that don't match */
+ if (!ctx->match(key, ctx->match_data)) {
+ kleave(" = 0 [!match]");
+ return 0;
+ }
- /* check it isn't negative and hasn't expired or been
- * revoked */
- if (kflags & (1 << KEY_FLAG_REVOKED))
- goto error_2;
- if (key->expiry && now.tv_sec >= key->expiry)
- goto error_2;
- key_ref = ERR_PTR(key->type_data.reject_error);
- if (kflags & (1 << KEY_FLAG_NEGATIVE))
- goto error_2;
- goto found;
+ /* key must have search permissions */
+ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) &&
+ key_task_permission(make_key_ref(key, ctx->possessed),
+ ctx->cred, KEY_SEARCH) < 0) {
+ ctx->result = ERR_PTR(-EACCES);
+ kleave(" = %d [!perm]", ctx->skipped_ret);
+ goto skipped;
}
- /* otherwise, the top keyring must not be revoked, expired, or
- * negatively instantiated if we are to search it */
- key_ref = ERR_PTR(-EAGAIN);
- if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
- (1 << KEY_FLAG_REVOKED) |
- (1 << KEY_FLAG_NEGATIVE)) ||
- (keyring->expiry && now.tv_sec >= keyring->expiry))
- goto error_2;
-
- /* start processing a new keyring */
-descend:
- kflags = keyring->flags;
- if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
- (1 << KEY_FLAG_REVOKED)))
- goto not_this_keyring;
+ if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
+ /* we set a different error code if we pass a negative key */
+ if (kflags & (1 << KEY_FLAG_NEGATIVE)) {
+ smp_rmb();
+ ctx->result = ERR_PTR(key->type_data.reject_error);
+ kleave(" = %d [neg]", ctx->skipped_ret);
+ goto skipped;
+ }
+ }
- keylist = rcu_dereference(keyring->payload.subscriptions);
- if (!keylist)
- goto not_this_keyring;
+ /* Found */
+ ctx->result = make_key_ref(key, ctx->possessed);
+ kleave(" = 1 [found]");
+ return 1;
- /* iterate through the keys in this keyring first */
- nkeys = keylist->nkeys;
- smp_rmb();
- for (kix = 0; kix < nkeys; kix++) {
- key = rcu_dereference(keylist->keys[kix]);
- kflags = key->flags;
+skipped:
+ return ctx->skipped_ret;
+}
- /* ignore keys not of this type */
- if (key->type != type)
- continue;
+/*
+ * Search inside a keyring for a key. We can search by walking to it
+ * directly based on its index-key or we can iterate over the entire
+ * tree looking for it, based on the match function.
+ */
+static int search_keyring(struct key *keyring, struct keyring_search_context *ctx)
+{
+ if ((ctx->flags & KEYRING_SEARCH_LOOKUP_TYPE) ==
+ KEYRING_SEARCH_LOOKUP_DIRECT) {
+ const void *object;
+
+ object = assoc_array_find(&keyring->keys,
+ &keyring_assoc_array_ops,
+ &ctx->index_key);
+ return object ? ctx->iterator(object, ctx) : 0;
+ }
+ return assoc_array_iterate(&keyring->keys, ctx->iterator, ctx);
+}
- /* skip invalidated, revoked and expired keys */
- if (!no_state_check) {
- if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
- (1 << KEY_FLAG_REVOKED)))
- continue;
+/*
+ * Search a tree of keyrings that point to other keyrings up to the maximum
+ * depth.
+ */
+static bool search_nested_keyrings(struct key *keyring,
+ struct keyring_search_context *ctx)
+{
+ struct {
+ struct key *keyring;
+ struct assoc_array_node *node;
+ int slot;
+ } stack[KEYRING_SEARCH_MAX_DEPTH];
- if (key->expiry && now.tv_sec >= key->expiry)
- continue;
- }
+ struct assoc_array_shortcut *shortcut;
+ struct assoc_array_node *node;
+ struct assoc_array_ptr *ptr;
+ struct key *key;
+ int sp = 0, slot;
- /* keys that don't match */
- if (!match(key, description))
- continue;
+ kenter("{%d},{%s,%s}",
+ keyring->serial,
+ ctx->index_key.type->name,
+ ctx->index_key.description);
- /* key must have search permissions */
- if (key_task_permission(make_key_ref(key, possessed),
- cred, KEY_SEARCH) < 0)
- continue;
+ if (ctx->index_key.description)
+ ctx->index_key.desc_len = strlen(ctx->index_key.description);
- if (no_state_check)
+ /* Check to see if this top-level keyring is what we are looking for
+ * and whether it is valid or not.
+ */
+ if (ctx->flags & KEYRING_SEARCH_LOOKUP_ITERATE ||
+ keyring_compare_object(keyring, &ctx->index_key)) {
+ ctx->skipped_ret = 2;
+ ctx->flags |= KEYRING_SEARCH_DO_STATE_CHECK;
+ switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) {
+ case 1:
goto found;
-
- /* we set a different error code if we pass a negative key */
- if (kflags & (1 << KEY_FLAG_NEGATIVE)) {
- err = key->type_data.reject_error;
- continue;
+ case 2:
+ return false;
+ default:
+ break;
}
+ }
+
+ ctx->skipped_ret = 0;
+ if (ctx->flags & KEYRING_SEARCH_NO_STATE_CHECK)
+ ctx->flags &= ~KEYRING_SEARCH_DO_STATE_CHECK;
+
+ /* Start processing a new keyring */
+descend_to_keyring:
+ kdebug("descend to %d", keyring->serial);
+ if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED)))
+ goto not_this_keyring;
+ /* Search through the keys in this keyring before its searching its
+ * subtrees.
+ */
+ if (search_keyring(keyring, ctx))
goto found;
- }
- /* search through the keyrings nested in this one */
- kix = 0;
-ascend:
- nkeys = keylist->nkeys;
- smp_rmb();
- for (; kix < nkeys; kix++) {
- key = rcu_dereference(keylist->keys[kix]);
- if (key->type != &key_type_keyring)
- continue;
+ /* Then manually iterate through the keyrings nested in this one.
+ *
+ * Start from the root node of the index tree. Because of the way the
+ * hash function has been set up, keyrings cluster on the leftmost
+ * branch of the root node (root slot 0) or in the root node itself.
+ * Non-keyrings avoid the leftmost branch of the root entirely (root
+ * slots 1-15).
+ */
+ ptr = ACCESS_ONCE(keyring->keys.root);
+ if (!ptr)
+ goto not_this_keyring;
- /* recursively search nested keyrings
- * - only search keyrings for which we have search permission
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ /* If the root is a shortcut, either the keyring only contains
+ * keyring pointers (everything clusters behind root slot 0) or
+ * doesn't contain any keyring pointers.
*/
- if (sp >= KEYRING_SEARCH_MAX_DEPTH)
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ smp_read_barrier_depends();
+ if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0)
+ goto not_this_keyring;
+
+ ptr = ACCESS_ONCE(shortcut->next_node);
+ node = assoc_array_ptr_to_node(ptr);
+ goto begin_node;
+ }
+
+ node = assoc_array_ptr_to_node(ptr);
+ smp_read_barrier_depends();
+
+ ptr = node->slots[0];
+ if (!assoc_array_ptr_is_meta(ptr))
+ goto begin_node;
+
+descend_to_node:
+ /* Descend to a more distal node in this keyring's content tree and go
+ * through that.
+ */
+ kdebug("descend");
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ smp_read_barrier_depends();
+ ptr = ACCESS_ONCE(shortcut->next_node);
+ BUG_ON(!assoc_array_ptr_is_node(ptr));
+ node = assoc_array_ptr_to_node(ptr);
+ }
+
+begin_node:
+ kdebug("begin_node");
+ smp_read_barrier_depends();
+ slot = 0;
+ascend_to_node:
+ /* Go through the slots in a node */
+ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = ACCESS_ONCE(node->slots[slot]);
+
+ if (assoc_array_ptr_is_meta(ptr) && node->back_pointer)
+ goto descend_to_node;
+
+ if (!keyring_ptr_is_keyring(ptr))
continue;
- if (key_task_permission(make_key_ref(key, possessed),
- cred, KEY_SEARCH) < 0)
+ key = keyring_ptr_to_key(ptr);
+
+ if (sp >= KEYRING_SEARCH_MAX_DEPTH) {
+ if (ctx->flags & KEYRING_SEARCH_DETECT_TOO_DEEP) {
+ ctx->result = ERR_PTR(-ELOOP);
+ return false;
+ }
+ goto not_this_keyring;
+ }
+
+ /* Search a nested keyring */
+ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) &&
+ key_task_permission(make_key_ref(key, ctx->possessed),
+ ctx->cred, KEY_SEARCH) < 0)
continue;
/* stack the current position */
stack[sp].keyring = keyring;
- stack[sp].keylist = keylist;
- stack[sp].kix = kix;
+ stack[sp].node = node;
+ stack[sp].slot = slot;
sp++;
/* begin again with the new keyring */
keyring = key;
- goto descend;
+ goto descend_to_keyring;
+ }
+
+ /* We've dealt with all the slots in the current node, so now we need
+ * to ascend to the parent and continue processing there.
+ */
+ ptr = ACCESS_ONCE(node->back_pointer);
+ slot = node->parent_slot;
+
+ if (ptr && assoc_array_ptr_is_shortcut(ptr)) {
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ smp_read_barrier_depends();
+ ptr = ACCESS_ONCE(shortcut->back_pointer);
+ slot = shortcut->parent_slot;
+ }
+ if (!ptr)
+ goto not_this_keyring;
+ node = assoc_array_ptr_to_node(ptr);
+ smp_read_barrier_depends();
+ slot++;
+
+ /* If we've ascended to the root (zero backpointer), we must have just
+ * finished processing the leftmost branch rather than the root slots -
+ * so there can't be any more keyrings for us to find.
+ */
+ if (node->back_pointer) {
+ kdebug("ascend %d", slot);
+ goto ascend_to_node;
}
- /* the keyring we're looking at was disqualified or didn't contain a
- * matching key */
+ /* The keyring we're looking at was disqualified or didn't contain a
+ * matching key.
+ */
not_this_keyring:
- if (sp > 0) {
- /* resume the processing of a keyring higher up in the tree */
- sp--;
- keyring = stack[sp].keyring;
- keylist = stack[sp].keylist;
- kix = stack[sp].kix + 1;
- goto ascend;
+ kdebug("not_this_keyring %d", sp);
+ if (sp <= 0) {
+ kleave(" = false");
+ return false;
}
- key_ref = ERR_PTR(err);
- goto error_2;
+ /* Resume the processing of a keyring higher up in the tree */
+ sp--;
+ keyring = stack[sp].keyring;
+ node = stack[sp].node;
+ slot = stack[sp].slot + 1;
+ kdebug("ascend to %d [%d]", keyring->serial, slot);
+ goto ascend_to_node;
- /* we found a viable match */
+ /* We found a viable match */
found:
- atomic_inc(&key->usage);
- key->last_used_at = now.tv_sec;
- keyring->last_used_at = now.tv_sec;
- while (sp > 0)
- stack[--sp].keyring->last_used_at = now.tv_sec;
+ key = key_ref_to_ptr(ctx->result);
key_check(key);
- key_ref = make_key_ref(key, possessed);
-error_2:
+ if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) {
+ key->last_used_at = ctx->now.tv_sec;
+ keyring->last_used_at = ctx->now.tv_sec;
+ while (sp > 0)
+ stack[--sp].keyring->last_used_at = ctx->now.tv_sec;
+ }
+ kleave(" = true");
+ return true;
+}
+
+/**
+ * keyring_search_aux - Search a keyring tree for a key matching some criteria
+ * @keyring_ref: A pointer to the keyring with possession indicator.
+ * @ctx: The keyring search context.
+ *
+ * Search the supplied keyring tree for a key that matches the criteria given.
+ * The root keyring and any linked keyrings must grant Search permission to the
+ * caller to be searchable and keys can only be found if they too grant Search
+ * to the caller. The possession flag on the root keyring pointer controls use
+ * of the possessor bits in permissions checking of the entire tree. In
+ * addition, the LSM gets to forbid keyring searches and key matches.
+ *
+ * The search is performed as a breadth-then-depth search up to the prescribed
+ * limit (KEYRING_SEARCH_MAX_DEPTH).
+ *
+ * Keys are matched to the type provided and are then filtered by the match
+ * function, which is given the description to use in any way it sees fit. The
+ * match function may use any attributes of a key that it wishes to to
+ * determine the match. Normally the match function from the key type would be
+ * used.
+ *
+ * RCU can be used to prevent the keyring key lists from disappearing without
+ * the need to take lots of locks.
+ *
+ * Returns a pointer to the found key and increments the key usage count if
+ * successful; -EAGAIN if no matching keys were found, or if expired or revoked
+ * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the
+ * specified keyring wasn't a keyring.
+ *
+ * In the case of a successful return, the possession attribute from
+ * @keyring_ref is propagated to the returned key reference.
+ */
+key_ref_t keyring_search_aux(key_ref_t keyring_ref,
+ struct keyring_search_context *ctx)
+{
+ struct key *keyring;
+ long err;
+
+ ctx->iterator = keyring_search_iterator;
+ ctx->possessed = is_key_possessed(keyring_ref);
+ ctx->result = ERR_PTR(-EAGAIN);
+
+ keyring = key_ref_to_ptr(keyring_ref);
+ key_check(keyring);
+
+ if (keyring->type != &key_type_keyring)
+ return ERR_PTR(-ENOTDIR);
+
+ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM)) {
+ err = key_task_permission(keyring_ref, ctx->cred, KEY_SEARCH);
+ if (err < 0)
+ return ERR_PTR(err);
+ }
+
+ rcu_read_lock();
+ ctx->now = current_kernel_time();
+ if (search_nested_keyrings(keyring, ctx))
+ __key_get(key_ref_to_ptr(ctx->result));
rcu_read_unlock();
-error:
- return key_ref;
+ return ctx->result;
}
/**
@@ -507,77 +864,73 @@ error:
* @description: The name of the keyring we want to find.
*
* As keyring_search_aux() above, but using the current task's credentials and
- * type's default matching function.
+ * type's default matching function and preferred search method.
*/
key_ref_t keyring_search(key_ref_t keyring,
struct key_type *type,
const char *description)
{
- if (!type->match)
+ struct keyring_search_context ctx = {
+ .index_key.type = type,
+ .index_key.description = description,
+ .cred = current_cred(),
+ .match = type->match,
+ .match_data = description,
+ .flags = (type->def_lookup_type |
+ KEYRING_SEARCH_DO_STATE_CHECK),
+ };
+
+ if (!ctx.match)
return ERR_PTR(-ENOKEY);
- return keyring_search_aux(keyring, current->cred,
- type, description, type->match, false);
+ return keyring_search_aux(keyring, &ctx);
}
EXPORT_SYMBOL(keyring_search);
/*
- * Search the given keyring only (no recursion).
+ * Search the given keyring for a key that might be updated.
*
* The caller must guarantee that the keyring is a keyring and that the
- * permission is granted to search the keyring as no check is made here.
- *
- * RCU is used to make it unnecessary to lock the keyring key list here.
+ * permission is granted to modify the keyring as no check is made here. The
+ * caller must also hold a lock on the keyring semaphore.
*
* Returns a pointer to the found key with usage count incremented if
- * successful and returns -ENOKEY if not found. Revoked keys and keys not
- * providing the requested permission are skipped over.
+ * successful and returns NULL if not found. Revoked and invalidated keys are
+ * skipped over.
*
* If successful, the possession indicator is propagated from the keyring ref
* to the returned key reference.
*/
-key_ref_t __keyring_search_one(key_ref_t keyring_ref,
- const struct key_type *ktype,
- const char *description,
- key_perm_t perm)
+key_ref_t find_key_to_update(key_ref_t keyring_ref,
+ const struct keyring_index_key *index_key)
{
- struct keyring_list *klist;
- unsigned long possessed;
struct key *keyring, *key;
- int nkeys, loop;
+ const void *object;
keyring = key_ref_to_ptr(keyring_ref);
- possessed = is_key_possessed(keyring_ref);
- rcu_read_lock();
+ kenter("{%d},{%s,%s}",
+ keyring->serial, index_key->type->name, index_key->description);
- klist = rcu_dereference(keyring->payload.subscriptions);
- if (klist) {
- nkeys = klist->nkeys;
- smp_rmb();
- for (loop = 0; loop < nkeys ; loop++) {
- key = rcu_dereference(klist->keys[loop]);
- if (key->type == ktype &&
- (!key->type->match ||
- key->type->match(key, description)) &&
- key_permission(make_key_ref(key, possessed),
- perm) == 0 &&
- !(key->flags & ((1 << KEY_FLAG_INVALIDATED) |
- (1 << KEY_FLAG_REVOKED)))
- )
- goto found;
- }
- }
+ object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops,
+ index_key);
- rcu_read_unlock();
- return ERR_PTR(-ENOKEY);
+ if (object)
+ goto found;
+
+ kleave(" = NULL");
+ return NULL;
found:
- atomic_inc(&key->usage);
- keyring->last_used_at = key->last_used_at =
- current_kernel_time().tv_sec;
- rcu_read_unlock();
- return make_key_ref(key, possessed);
+ key = keyring_ptr_to_key(object);
+ if (key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED))) {
+ kleave(" = NULL [x]");
+ return NULL;
+ }
+ __key_get(key);
+ kleave(" = {%d}", key->serial);
+ return make_key_ref(key, is_key_possessed(keyring_ref));
}
/*
@@ -640,6 +993,19 @@ out:
return keyring;
}
+static int keyring_detect_cycle_iterator(const void *object,
+ void *iterator_data)
+{
+ struct keyring_search_context *ctx = iterator_data;
+ const struct key *key = keyring_ptr_to_key(object);
+
+ kenter("{%d}", key->serial);
+
+ BUG_ON(key != ctx->match_data);
+ ctx->result = ERR_PTR(-EDEADLK);
+ return 1;
+}
+
/*
* See if a cycle will will be created by inserting acyclic tree B in acyclic
* tree A at the topmost level (ie: as a direct child of A).
@@ -649,116 +1015,39 @@ out:
*/
static int keyring_detect_cycle(struct key *A, struct key *B)
{
- struct {
- struct keyring_list *keylist;
- int kix;
- } stack[KEYRING_SEARCH_MAX_DEPTH];
-
- struct keyring_list *keylist;
- struct key *subtree, *key;
- int sp, nkeys, kix, ret;
+ struct keyring_search_context ctx = {
+ .index_key = A->index_key,
+ .match_data = A,
+ .iterator = keyring_detect_cycle_iterator,
+ .flags = (KEYRING_SEARCH_LOOKUP_DIRECT |
+ KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_NO_UPDATE_TIME |
+ KEYRING_SEARCH_NO_CHECK_PERM |
+ KEYRING_SEARCH_DETECT_TOO_DEEP),
+ };
rcu_read_lock();
-
- ret = -EDEADLK;
- if (A == B)
- goto cycle_detected;
-
- subtree = B;
- sp = 0;
-
- /* start processing a new keyring */
-descend:
- if (test_bit(KEY_FLAG_REVOKED, &subtree->flags))
- goto not_this_keyring;
-
- keylist = rcu_dereference(subtree->payload.subscriptions);
- if (!keylist)
- goto not_this_keyring;
- kix = 0;
-
-ascend:
- /* iterate through the remaining keys in this keyring */
- nkeys = keylist->nkeys;
- smp_rmb();
- for (; kix < nkeys; kix++) {
- key = rcu_dereference(keylist->keys[kix]);
-
- if (key == A)
- goto cycle_detected;
-
- /* recursively check nested keyrings */
- if (key->type == &key_type_keyring) {
- if (sp >= KEYRING_SEARCH_MAX_DEPTH)
- goto too_deep;
-
- /* stack the current position */
- stack[sp].keylist = keylist;
- stack[sp].kix = kix;
- sp++;
-
- /* begin again with the new keyring */
- subtree = key;
- goto descend;
- }
- }
-
- /* the keyring we're looking at was disqualified or didn't contain a
- * matching key */
-not_this_keyring:
- if (sp > 0) {
- /* resume the checking of a keyring higher up in the tree */
- sp--;
- keylist = stack[sp].keylist;
- kix = stack[sp].kix + 1;
- goto ascend;
- }
-
- ret = 0; /* no cycles detected */
-
-error:
+ search_nested_keyrings(B, &ctx);
rcu_read_unlock();
- return ret;
-
-too_deep:
- ret = -ELOOP;
- goto error;
-
-cycle_detected:
- ret = -EDEADLK;
- goto error;
-}
-
-/*
- * Dispose of a keyring list after the RCU grace period, freeing the unlinked
- * key
- */
-static void keyring_unlink_rcu_disposal(struct rcu_head *rcu)
-{
- struct keyring_list *klist =
- container_of(rcu, struct keyring_list, rcu);
-
- if (klist->delkey != USHRT_MAX)
- key_put(rcu_access_pointer(klist->keys[klist->delkey]));
- kfree(klist);
+ return PTR_ERR(ctx.result) == -EAGAIN ? 0 : PTR_ERR(ctx.result);
}
/*
* Preallocate memory so that a key can be linked into to a keyring.
*/
-int __key_link_begin(struct key *keyring, const struct key_type *type,
- const char *description, unsigned long *_prealloc)
+int __key_link_begin(struct key *keyring,
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit **_edit)
__acquires(&keyring->sem)
__acquires(&keyring_serialise_link_sem)
{
- struct keyring_list *klist, *nklist;
- unsigned long prealloc;
- unsigned max;
- time_t lowest_lru;
- size_t size;
- int loop, lru, ret;
+ struct assoc_array_edit *edit;
+ int ret;
- kenter("%d,%s,%s,", key_serial(keyring), type->name, description);
+ kenter("%d,%s,%s,",
+ keyring->serial, index_key->type->name, index_key->description);
+
+ BUG_ON(index_key->desc_len == 0);
if (keyring->type != &key_type_keyring)
return -ENOTDIR;
@@ -771,100 +1060,39 @@ int __key_link_begin(struct key *keyring, const struct key_type *type,
/* serialise link/link calls to prevent parallel calls causing a cycle
* when linking two keyring in opposite orders */
- if (type == &key_type_keyring)
+ if (index_key->type == &key_type_keyring)
down_write(&keyring_serialise_link_sem);
- klist = rcu_dereference_locked_keyring(keyring);
-
- /* see if there's a matching key we can displace */
- lru = -1;
- if (klist && klist->nkeys > 0) {
- lowest_lru = TIME_T_MAX;
- for (loop = klist->nkeys - 1; loop >= 0; loop--) {
- struct key *key = rcu_deref_link_locked(klist, loop,
- keyring);
- if (key->type == type &&
- strcmp(key->description, description) == 0) {
- /* Found a match - we'll replace the link with
- * one to the new key. We record the slot
- * position.
- */
- klist->delkey = loop;
- prealloc = 0;
- goto done;
- }
- if (key->last_used_at < lowest_lru) {
- lowest_lru = key->last_used_at;
- lru = loop;
- }
- }
- }
-
- /* If the keyring is full then do an LRU discard */
- if (klist &&
- klist->nkeys == klist->maxkeys &&
- klist->maxkeys >= MAX_KEYRING_LINKS) {
- kdebug("LRU discard %d\n", lru);
- klist->delkey = lru;
- prealloc = 0;
- goto done;
- }
-
- /* check that we aren't going to overrun the user's quota */
- ret = key_payload_reserve(keyring,
- keyring->datalen + KEYQUOTA_LINK_BYTES);
- if (ret < 0)
+ /* Create an edit script that will insert/replace the key in the
+ * keyring tree.
+ */
+ edit = assoc_array_insert(&keyring->keys,
+ &keyring_assoc_array_ops,
+ index_key,
+ NULL);
+ if (IS_ERR(edit)) {
+ ret = PTR_ERR(edit);
goto error_sem;
+ }
- if (klist && klist->nkeys < klist->maxkeys) {
- /* there's sufficient slack space to append directly */
- klist->delkey = klist->nkeys;
- prealloc = KEY_LINK_FIXQUOTA;
- } else {
- /* grow the key list */
- max = 4;
- if (klist) {
- max += klist->maxkeys;
- if (max > MAX_KEYRING_LINKS)
- max = MAX_KEYRING_LINKS;
- BUG_ON(max <= klist->maxkeys);
- }
-
- size = sizeof(*klist) + sizeof(struct key *) * max;
-
- ret = -ENOMEM;
- nklist = kmalloc(size, GFP_KERNEL);
- if (!nklist)
- goto error_quota;
-
- nklist->maxkeys = max;
- if (klist) {
- memcpy(nklist->keys, klist->keys,
- sizeof(struct key *) * klist->nkeys);
- nklist->delkey = klist->nkeys;
- nklist->nkeys = klist->nkeys + 1;
- klist->delkey = USHRT_MAX;
- } else {
- nklist->nkeys = 1;
- nklist->delkey = 0;
- }
-
- /* add the key into the new space */
- RCU_INIT_POINTER(nklist->keys[nklist->delkey], NULL);
- prealloc = (unsigned long)nklist | KEY_LINK_FIXQUOTA;
+ /* If we're not replacing a link in-place then we're going to need some
+ * extra quota.
+ */
+ if (!edit->dead_leaf) {
+ ret = key_payload_reserve(keyring,
+ keyring->datalen + KEYQUOTA_LINK_BYTES);
+ if (ret < 0)
+ goto error_cancel;
}
-done:
- *_prealloc = prealloc;
+ *_edit = edit;
kleave(" = 0");
return 0;
-error_quota:
- /* undo the quota changes */
- key_payload_reserve(keyring,
- keyring->datalen - KEYQUOTA_LINK_BYTES);
+error_cancel:
+ assoc_array_cancel_edit(edit);
error_sem:
- if (type == &key_type_keyring)
+ if (index_key->type == &key_type_keyring)
up_write(&keyring_serialise_link_sem);
error_krsem:
up_write(&keyring->sem);
@@ -895,60 +1123,12 @@ int __key_link_check_live_key(struct key *keyring, struct key *key)
* holds at most one link to any given key of a particular type+description
* combination.
*/
-void __key_link(struct key *keyring, struct key *key,
- unsigned long *_prealloc)
+void __key_link(struct key *key, struct assoc_array_edit **_edit)
{
- struct keyring_list *klist, *nklist;
- struct key *discard;
-
- nklist = (struct keyring_list *)(*_prealloc & ~KEY_LINK_FIXQUOTA);
- *_prealloc = 0;
-
- kenter("%d,%d,%p", keyring->serial, key->serial, nklist);
-
- klist = rcu_dereference_locked_keyring(keyring);
-
- atomic_inc(&key->usage);
- keyring->last_used_at = key->last_used_at =
- current_kernel_time().tv_sec;
-
- /* there's a matching key we can displace or an empty slot in a newly
- * allocated list we can fill */
- if (nklist) {
- kdebug("reissue %hu/%hu/%hu",
- nklist->delkey, nklist->nkeys, nklist->maxkeys);
-
- RCU_INIT_POINTER(nklist->keys[nklist->delkey], key);
-
- rcu_assign_pointer(keyring->payload.subscriptions, nklist);
-
- /* dispose of the old keyring list and, if there was one, the
- * displaced key */
- if (klist) {
- kdebug("dispose %hu/%hu/%hu",
- klist->delkey, klist->nkeys, klist->maxkeys);
- call_rcu(&klist->rcu, keyring_unlink_rcu_disposal);
- }
- } else if (klist->delkey < klist->nkeys) {
- kdebug("replace %hu/%hu/%hu",
- klist->delkey, klist->nkeys, klist->maxkeys);
-
- discard = rcu_dereference_protected(
- klist->keys[klist->delkey],
- rwsem_is_locked(&keyring->sem));
- rcu_assign_pointer(klist->keys[klist->delkey], key);
- /* The garbage collector will take care of RCU
- * synchronisation */
- key_put(discard);
- } else {
- /* there's sufficient slack space to append directly */
- kdebug("append %hu/%hu/%hu",
- klist->delkey, klist->nkeys, klist->maxkeys);
-
- RCU_INIT_POINTER(klist->keys[klist->delkey], key);
- smp_wmb();
- klist->nkeys++;
- }
+ __key_get(key);
+ assoc_array_insert_set_object(*_edit, keyring_key_to_ptr(key));
+ assoc_array_apply_edit(*_edit);
+ *_edit = NULL;
}
/*
@@ -956,24 +1136,22 @@ void __key_link(struct key *keyring, struct key *key,
*
* Must be called with __key_link_begin() having being called.
*/
-void __key_link_end(struct key *keyring, struct key_type *type,
- unsigned long prealloc)
+void __key_link_end(struct key *keyring,
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit *edit)
__releases(&keyring->sem)
__releases(&keyring_serialise_link_sem)
{
- BUG_ON(type == NULL);
- BUG_ON(type->name == NULL);
- kenter("%d,%s,%lx", keyring->serial, type->name, prealloc);
+ BUG_ON(index_key->type == NULL);
+ kenter("%d,%s,", keyring->serial, index_key->type->name);
- if (type == &key_type_keyring)
+ if (index_key->type == &key_type_keyring)
up_write(&keyring_serialise_link_sem);
- if (prealloc) {
- if (prealloc & KEY_LINK_FIXQUOTA)
- key_payload_reserve(keyring,
- keyring->datalen -
- KEYQUOTA_LINK_BYTES);
- kfree((struct keyring_list *)(prealloc & ~KEY_LINK_FIXQUOTA));
+ if (edit && !edit->dead_leaf) {
+ key_payload_reserve(keyring,
+ keyring->datalen - KEYQUOTA_LINK_BYTES);
+ assoc_array_cancel_edit(edit);
}
up_write(&keyring->sem);
}
@@ -1000,20 +1178,28 @@ void __key_link_end(struct key *keyring, struct key_type *type,
*/
int key_link(struct key *keyring, struct key *key)
{
- unsigned long prealloc;
+ struct assoc_array_edit *edit;
int ret;
+ kenter("{%d,%d}", keyring->serial, atomic_read(&keyring->usage));
+
key_check(keyring);
key_check(key);
- ret = __key_link_begin(keyring, key->type, key->description, &prealloc);
+ if (test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags) &&
+ !test_bit(KEY_FLAG_TRUSTED, &key->flags))
+ return -EPERM;
+
+ ret = __key_link_begin(keyring, &key->index_key, &edit);
if (ret == 0) {
+ kdebug("begun {%d,%d}", keyring->serial, atomic_read(&keyring->usage));
ret = __key_link_check_live_key(keyring, key);
if (ret == 0)
- __key_link(keyring, key, &prealloc);
- __key_link_end(keyring, key->type, prealloc);
+ __key_link(key, &edit);
+ __key_link_end(keyring, &key->index_key, edit);
}
+ kleave(" = %d {%d,%d}", ret, keyring->serial, atomic_read(&keyring->usage));
return ret;
}
EXPORT_SYMBOL(key_link);
@@ -1037,90 +1223,37 @@ EXPORT_SYMBOL(key_link);
*/
int key_unlink(struct key *keyring, struct key *key)
{
- struct keyring_list *klist, *nklist;
- int loop, ret;
+ struct assoc_array_edit *edit;
+ int ret;
key_check(keyring);
key_check(key);
- ret = -ENOTDIR;
if (keyring->type != &key_type_keyring)
- goto error;
+ return -ENOTDIR;
down_write(&keyring->sem);
- klist = rcu_dereference_locked_keyring(keyring);
- if (klist) {
- /* search the keyring for the key */
- for (loop = 0; loop < klist->nkeys; loop++)
- if (rcu_access_pointer(klist->keys[loop]) == key)
- goto key_is_present;
+ edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops,
+ &key->index_key);
+ if (IS_ERR(edit)) {
+ ret = PTR_ERR(edit);
+ goto error;
}
-
- up_write(&keyring->sem);
ret = -ENOENT;
- goto error;
-
-key_is_present:
- /* we need to copy the key list for RCU purposes */
- nklist = kmalloc(sizeof(*klist) +
- sizeof(struct key *) * klist->maxkeys,
- GFP_KERNEL);
- if (!nklist)
- goto nomem;
- nklist->maxkeys = klist->maxkeys;
- nklist->nkeys = klist->nkeys - 1;
-
- if (loop > 0)
- memcpy(&nklist->keys[0],
- &klist->keys[0],
- loop * sizeof(struct key *));
-
- if (loop < nklist->nkeys)
- memcpy(&nklist->keys[loop],
- &klist->keys[loop + 1],
- (nklist->nkeys - loop) * sizeof(struct key *));
-
- /* adjust the user's quota */
- key_payload_reserve(keyring,
- keyring->datalen - KEYQUOTA_LINK_BYTES);
-
- rcu_assign_pointer(keyring->payload.subscriptions, nklist);
-
- up_write(&keyring->sem);
-
- /* schedule for later cleanup */
- klist->delkey = loop;
- call_rcu(&klist->rcu, keyring_unlink_rcu_disposal);
+ if (edit == NULL)
+ goto error;
+ assoc_array_apply_edit(edit);
+ key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES);
ret = 0;
error:
- return ret;
-nomem:
- ret = -ENOMEM;
up_write(&keyring->sem);
- goto error;
+ return ret;
}
EXPORT_SYMBOL(key_unlink);
-/*
- * Dispose of a keyring list after the RCU grace period, releasing the keys it
- * links to.
- */
-static void keyring_clear_rcu_disposal(struct rcu_head *rcu)
-{
- struct keyring_list *klist;
- int loop;
-
- klist = container_of(rcu, struct keyring_list, rcu);
-
- for (loop = klist->nkeys - 1; loop >= 0; loop--)
- key_put(rcu_access_pointer(klist->keys[loop]));
-
- kfree(klist);
-}
-
/**
* keyring_clear - Clear a keyring
* @keyring: The keyring to clear.
@@ -1131,33 +1264,25 @@ static void keyring_clear_rcu_disposal(struct rcu_head *rcu)
*/
int keyring_clear(struct key *keyring)
{
- struct keyring_list *klist;
+ struct assoc_array_edit *edit;
int ret;
- ret = -ENOTDIR;
- if (keyring->type == &key_type_keyring) {
- /* detach the pointer block with the locks held */
- down_write(&keyring->sem);
-
- klist = rcu_dereference_locked_keyring(keyring);
- if (klist) {
- /* adjust the quota */
- key_payload_reserve(keyring,
- sizeof(struct keyring_list));
-
- rcu_assign_pointer(keyring->payload.subscriptions,
- NULL);
- }
-
- up_write(&keyring->sem);
+ if (keyring->type != &key_type_keyring)
+ return -ENOTDIR;
- /* free the keys after the locks have been dropped */
- if (klist)
- call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
+ down_write(&keyring->sem);
+ edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops);
+ if (IS_ERR(edit)) {
+ ret = PTR_ERR(edit);
+ } else {
+ if (edit)
+ assoc_array_apply_edit(edit);
+ key_payload_reserve(keyring, 0);
ret = 0;
}
+ up_write(&keyring->sem);
return ret;
}
EXPORT_SYMBOL(keyring_clear);
@@ -1169,17 +1294,25 @@ EXPORT_SYMBOL(keyring_clear);
*/
static void keyring_revoke(struct key *keyring)
{
- struct keyring_list *klist;
+ struct assoc_array_edit *edit;
- klist = rcu_dereference_locked_keyring(keyring);
+ edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops);
+ if (!IS_ERR(edit)) {
+ if (edit)
+ assoc_array_apply_edit(edit);
+ key_payload_reserve(keyring, 0);
+ }
+}
- /* adjust the quota */
- key_payload_reserve(keyring, 0);
+static bool gc_iterator(void *object, void *iterator_data)
+{
+ struct key *key = keyring_ptr_to_key(object);
+ time_t *limit = iterator_data;
- if (klist) {
- rcu_assign_pointer(keyring->payload.subscriptions, NULL);
- call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
- }
+ if (key_is_dead(key, *limit))
+ return false;
+ key_get(key);
+ return true;
}
/*
@@ -1192,88 +1325,12 @@ static void keyring_revoke(struct key *keyring)
*/
void keyring_gc(struct key *keyring, time_t limit)
{
- struct keyring_list *klist, *new;
- struct key *key;
- int loop, keep, max;
-
kenter("{%x,%s}", key_serial(keyring), keyring->description);
down_write(&keyring->sem);
-
- klist = rcu_dereference_locked_keyring(keyring);
- if (!klist)
- goto no_klist;
-
- /* work out how many subscriptions we're keeping */
- keep = 0;
- for (loop = klist->nkeys - 1; loop >= 0; loop--)
- if (!key_is_dead(rcu_deref_link_locked(klist, loop, keyring),
- limit))
- keep++;
-
- if (keep == klist->nkeys)
- goto just_return;
-
- /* allocate a new keyring payload */
- max = roundup(keep, 4);
- new = kmalloc(sizeof(struct keyring_list) + max * sizeof(struct key *),
- GFP_KERNEL);
- if (!new)
- goto nomem;
- new->maxkeys = max;
- new->nkeys = 0;
- new->delkey = 0;
-
- /* install the live keys
- * - must take care as expired keys may be updated back to life
- */
- keep = 0;
- for (loop = klist->nkeys - 1; loop >= 0; loop--) {
- key = rcu_deref_link_locked(klist, loop, keyring);
- if (!key_is_dead(key, limit)) {
- if (keep >= max)
- goto discard_new;
- RCU_INIT_POINTER(new->keys[keep++], key_get(key));
- }
- }
- new->nkeys = keep;
-
- /* adjust the quota */
- key_payload_reserve(keyring,
- sizeof(struct keyring_list) +
- KEYQUOTA_LINK_BYTES * keep);
-
- if (keep == 0) {
- rcu_assign_pointer(keyring->payload.subscriptions, NULL);
- kfree(new);
- } else {
- rcu_assign_pointer(keyring->payload.subscriptions, new);
- }
-
- up_write(&keyring->sem);
-
- call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
- kleave(" [yes]");
- return;
-
-discard_new:
- new->nkeys = keep;
- keyring_clear_rcu_disposal(&new->rcu);
+ assoc_array_gc(&keyring->keys, &keyring_assoc_array_ops,
+ gc_iterator, &limit);
up_write(&keyring->sem);
- kleave(" [discard]");
- return;
-just_return:
- up_write(&keyring->sem);
- kleave(" [no dead]");
- return;
-
-no_klist:
- up_write(&keyring->sem);
- kleave(" [no_klist]");
- return;
-
-nomem:
- up_write(&keyring->sem);
- kleave(" [oom]");
+ kleave("");
}
diff --git a/security/keys/persistent.c b/security/keys/persistent.c
new file mode 100644
index 000000000000..82f4957a7acf
--- /dev/null
+++ b/security/keys/persistent.c
@@ -0,0 +1,169 @@
+/* General persistent per-UID keyrings register
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/user_namespace.h>
+#include "internal.h"
+
+unsigned persistent_keyring_expiry = 3 * 24 * 3600; /* Expire after 3 days of non-use */
+
+/*
+ * Create the persistent keyring register for the current user namespace.
+ *
+ * Called with the namespace's sem locked for writing.
+ */
+static int key_create_persistent_register(struct user_namespace *ns)
+{
+ struct key *reg = keyring_alloc(".persistent_register",
+ KUIDT_INIT(0), KGIDT_INIT(0),
+ current_cred(),
+ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ),
+ KEY_ALLOC_NOT_IN_QUOTA, NULL);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ ns->persistent_keyring_register = reg;
+ return 0;
+}
+
+/*
+ * Create the persistent keyring for the specified user.
+ *
+ * Called with the namespace's sem locked for writing.
+ */
+static key_ref_t key_create_persistent(struct user_namespace *ns, kuid_t uid,
+ struct keyring_index_key *index_key)
+{
+ struct key *persistent;
+ key_ref_t reg_ref, persistent_ref;
+
+ if (!ns->persistent_keyring_register) {
+ long err = key_create_persistent_register(ns);
+ if (err < 0)
+ return ERR_PTR(err);
+ } else {
+ reg_ref = make_key_ref(ns->persistent_keyring_register, true);
+ persistent_ref = find_key_to_update(reg_ref, index_key);
+ if (persistent_ref)
+ return persistent_ref;
+ }
+
+ persistent = keyring_alloc(index_key->description,
+ uid, INVALID_GID, current_cred(),
+ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ),
+ KEY_ALLOC_NOT_IN_QUOTA,
+ ns->persistent_keyring_register);
+ if (IS_ERR(persistent))
+ return ERR_CAST(persistent);
+
+ return make_key_ref(persistent, true);
+}
+
+/*
+ * Get the persistent keyring for a specific UID and link it to the nominated
+ * keyring.
+ */
+static long key_get_persistent(struct user_namespace *ns, kuid_t uid,
+ key_ref_t dest_ref)
+{
+ struct keyring_index_key index_key;
+ struct key *persistent;
+ key_ref_t reg_ref, persistent_ref;
+ char buf[32];
+ long ret;
+
+ /* Look in the register if it exists */
+ index_key.type = &key_type_keyring;
+ index_key.description = buf;
+ index_key.desc_len = sprintf(buf, "_persistent.%u", from_kuid(ns, uid));
+
+ if (ns->persistent_keyring_register) {
+ reg_ref = make_key_ref(ns->persistent_keyring_register, true);
+ down_read(&ns->persistent_keyring_register_sem);
+ persistent_ref = find_key_to_update(reg_ref, &index_key);
+ up_read(&ns->persistent_keyring_register_sem);
+
+ if (persistent_ref)
+ goto found;
+ }
+
+ /* It wasn't in the register, so we'll need to create it. We might
+ * also need to create the register.
+ */
+ down_write(&ns->persistent_keyring_register_sem);
+ persistent_ref = key_create_persistent(ns, uid, &index_key);
+ up_write(&ns->persistent_keyring_register_sem);
+ if (!IS_ERR(persistent_ref))
+ goto found;
+
+ return PTR_ERR(persistent_ref);
+
+found:
+ ret = key_task_permission(persistent_ref, current_cred(), KEY_LINK);
+ if (ret == 0) {
+ persistent = key_ref_to_ptr(persistent_ref);
+ ret = key_link(key_ref_to_ptr(dest_ref), persistent);
+ if (ret == 0) {
+ key_set_timeout(persistent, persistent_keyring_expiry);
+ ret = persistent->serial;
+ }
+ }
+
+ key_ref_put(persistent_ref);
+ return ret;
+}
+
+/*
+ * Get the persistent keyring for a specific UID and link it to the nominated
+ * keyring.
+ */
+long keyctl_get_persistent(uid_t _uid, key_serial_t destid)
+{
+ struct user_namespace *ns = current_user_ns();
+ key_ref_t dest_ref;
+ kuid_t uid;
+ long ret;
+
+ /* -1 indicates the current user */
+ if (_uid == (uid_t)-1) {
+ uid = current_uid();
+ } else {
+ uid = make_kuid(ns, _uid);
+ if (!uid_valid(uid))
+ return -EINVAL;
+
+ /* You can only see your own persistent cache if you're not
+ * sufficiently privileged.
+ */
+ if (uid_eq(uid, current_uid()) &&
+ uid_eq(uid, current_suid()) &&
+ uid_eq(uid, current_euid()) &&
+ uid_eq(uid, current_fsuid()) &&
+ !ns_capable(ns, CAP_SETUID))
+ return -EPERM;
+ }
+
+ /* There must be a destination keyring */
+ dest_ref = lookup_user_key(destid, KEY_LOOKUP_CREATE, KEY_WRITE);
+ if (IS_ERR(dest_ref))
+ return PTR_ERR(dest_ref);
+ if (key_ref_to_ptr(dest_ref)->type != &key_type_keyring) {
+ ret = -ENOTDIR;
+ goto out_put_dest;
+ }
+
+ ret = key_get_persistent(ns, uid, dest_ref);
+
+out_put_dest:
+ key_ref_put(dest_ref);
+ return ret;
+}
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 217b6855e815..88e9a466940f 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -182,7 +182,6 @@ static void proc_keys_stop(struct seq_file *p, void *v)
static int proc_keys_show(struct seq_file *m, void *v)
{
- const struct cred *cred = current_cred();
struct rb_node *_p = v;
struct key *key = rb_entry(_p, struct key, serial_node);
struct timespec now;
@@ -191,15 +190,23 @@ static int proc_keys_show(struct seq_file *m, void *v)
char xbuf[12];
int rc;
+ struct keyring_search_context ctx = {
+ .index_key.type = key->type,
+ .index_key.description = key->description,
+ .cred = current_cred(),
+ .match = lookup_user_key_possessed,
+ .match_data = key,
+ .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_LOOKUP_DIRECT),
+ };
+
key_ref = make_key_ref(key, 0);
/* determine if the key is possessed by this process (a test we can
* skip if the key does not indicate the possessor can view it
*/
if (key->perm & KEY_POS_VIEW) {
- skey_ref = search_my_process_keyrings(key->type, key,
- lookup_user_key_possessed,
- true, cred);
+ skey_ref = search_my_process_keyrings(&ctx);
if (!IS_ERR(skey_ref)) {
key_ref_put(skey_ref);
key_ref = make_key_ref(key, 1);
@@ -211,7 +218,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
* - the caller holds a spinlock, and thus the RCU read lock, making our
* access to __current_cred() safe
*/
- rc = key_task_permission(key_ref, cred, KEY_VIEW);
+ rc = key_task_permission(key_ref, ctx.cred, KEY_VIEW);
if (rc < 0)
return 0;
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 42defae1e161..0cf8a130a267 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -235,7 +235,7 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
if (IS_ERR(keyring))
return PTR_ERR(keyring);
} else {
- atomic_inc(&keyring->usage);
+ __key_get(keyring);
}
/* install the keyring */
@@ -319,11 +319,7 @@ void key_fsgid_changed(struct task_struct *tsk)
* In the case of a successful return, the possession attribute is set on the
* returned key reference.
*/
-key_ref_t search_my_process_keyrings(struct key_type *type,
- const void *description,
- key_match_func_t match,
- bool no_state_check,
- const struct cred *cred)
+key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
{
key_ref_t key_ref, ret, err;
@@ -339,10 +335,9 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
err = ERR_PTR(-EAGAIN);
/* search the thread keyring first */
- if (cred->thread_keyring) {
+ if (ctx->cred->thread_keyring) {
key_ref = keyring_search_aux(
- make_key_ref(cred->thread_keyring, 1),
- cred, type, description, match, no_state_check);
+ make_key_ref(ctx->cred->thread_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
@@ -358,10 +353,9 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
}
/* search the process keyring second */
- if (cred->process_keyring) {
+ if (ctx->cred->process_keyring) {
key_ref = keyring_search_aux(
- make_key_ref(cred->process_keyring, 1),
- cred, type, description, match, no_state_check);
+ make_key_ref(ctx->cred->process_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
@@ -379,11 +373,11 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
}
/* search the session keyring */
- if (cred->session_keyring) {
+ if (ctx->cred->session_keyring) {
rcu_read_lock();
key_ref = keyring_search_aux(
- make_key_ref(rcu_dereference(cred->session_keyring), 1),
- cred, type, description, match, no_state_check);
+ make_key_ref(rcu_dereference(ctx->cred->session_keyring), 1),
+ ctx);
rcu_read_unlock();
if (!IS_ERR(key_ref))
@@ -402,10 +396,10 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
}
}
/* or search the user-session keyring */
- else if (cred->user->session_keyring) {
+ else if (ctx->cred->user->session_keyring) {
key_ref = keyring_search_aux(
- make_key_ref(cred->user->session_keyring, 1),
- cred, type, description, match, no_state_check);
+ make_key_ref(ctx->cred->user->session_keyring, 1),
+ ctx);
if (!IS_ERR(key_ref))
goto found;
@@ -437,18 +431,14 @@ found:
*
* Return same as search_my_process_keyrings().
*/
-key_ref_t search_process_keyrings(struct key_type *type,
- const void *description,
- key_match_func_t match,
- const struct cred *cred)
+key_ref_t search_process_keyrings(struct keyring_search_context *ctx)
{
struct request_key_auth *rka;
key_ref_t key_ref, ret = ERR_PTR(-EACCES), err;
might_sleep();
- key_ref = search_my_process_keyrings(type, description, match,
- false, cred);
+ key_ref = search_my_process_keyrings(ctx);
if (!IS_ERR(key_ref))
goto found;
err = key_ref;
@@ -457,18 +447,21 @@ key_ref_t search_process_keyrings(struct key_type *type,
* search the keyrings of the process mentioned there
* - we don't permit access to request_key auth keys via this method
*/
- if (cred->request_key_auth &&
- cred == current_cred() &&
- type != &key_type_request_key_auth
+ if (ctx->cred->request_key_auth &&
+ ctx->cred == current_cred() &&
+ ctx->index_key.type != &key_type_request_key_auth
) {
+ const struct cred *cred = ctx->cred;
+
/* defend against the auth key being revoked */
down_read(&cred->request_key_auth->sem);
- if (key_validate(cred->request_key_auth) == 0) {
- rka = cred->request_key_auth->payload.data;
+ if (key_validate(ctx->cred->request_key_auth) == 0) {
+ rka = ctx->cred->request_key_auth->payload.data;
- key_ref = search_process_keyrings(type, description,
- match, rka->cred);
+ ctx->cred = rka->cred;
+ key_ref = search_process_keyrings(ctx);
+ ctx->cred = cred;
up_read(&cred->request_key_auth->sem);
@@ -522,19 +515,23 @@ int lookup_user_key_possessed(const struct key *key, const void *target)
key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
key_perm_t perm)
{
+ struct keyring_search_context ctx = {
+ .match = lookup_user_key_possessed,
+ .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_LOOKUP_DIRECT),
+ };
struct request_key_auth *rka;
- const struct cred *cred;
struct key *key;
key_ref_t key_ref, skey_ref;
int ret;
try_again:
- cred = get_current_cred();
+ ctx.cred = get_current_cred();
key_ref = ERR_PTR(-ENOKEY);
switch (id) {
case KEY_SPEC_THREAD_KEYRING:
- if (!cred->thread_keyring) {
+ if (!ctx.cred->thread_keyring) {
if (!(lflags & KEY_LOOKUP_CREATE))
goto error;
@@ -546,13 +543,13 @@ try_again:
goto reget_creds;
}
- key = cred->thread_keyring;
- atomic_inc(&key->usage);
+ key = ctx.cred->thread_keyring;
+ __key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_PROCESS_KEYRING:
- if (!cred->process_keyring) {
+ if (!ctx.cred->process_keyring) {
if (!(lflags & KEY_LOOKUP_CREATE))
goto error;
@@ -564,13 +561,13 @@ try_again:
goto reget_creds;
}
- key = cred->process_keyring;
- atomic_inc(&key->usage);
+ key = ctx.cred->process_keyring;
+ __key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_SESSION_KEYRING:
- if (!cred->session_keyring) {
+ if (!ctx.cred->session_keyring) {
/* always install a session keyring upon access if one
* doesn't exist yet */
ret = install_user_keyrings();
@@ -580,13 +577,13 @@ try_again:
ret = join_session_keyring(NULL);
else
ret = install_session_keyring(
- cred->user->session_keyring);
+ ctx.cred->user->session_keyring);
if (ret < 0)
goto error;
goto reget_creds;
- } else if (cred->session_keyring ==
- cred->user->session_keyring &&
+ } else if (ctx.cred->session_keyring ==
+ ctx.cred->user->session_keyring &&
lflags & KEY_LOOKUP_CREATE) {
ret = join_session_keyring(NULL);
if (ret < 0)
@@ -595,33 +592,33 @@ try_again:
}
rcu_read_lock();
- key = rcu_dereference(cred->session_keyring);
- atomic_inc(&key->usage);
+ key = rcu_dereference(ctx.cred->session_keyring);
+ __key_get(key);
rcu_read_unlock();
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_USER_KEYRING:
- if (!cred->user->uid_keyring) {
+ if (!ctx.cred->user->uid_keyring) {
ret = install_user_keyrings();
if (ret < 0)
goto error;
}
- key = cred->user->uid_keyring;
- atomic_inc(&key->usage);
+ key = ctx.cred->user->uid_keyring;
+ __key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_USER_SESSION_KEYRING:
- if (!cred->user->session_keyring) {
+ if (!ctx.cred->user->session_keyring) {
ret = install_user_keyrings();
if (ret < 0)
goto error;
}
- key = cred->user->session_keyring;
- atomic_inc(&key->usage);
+ key = ctx.cred->user->session_keyring;
+ __key_get(key);
key_ref = make_key_ref(key, 1);
break;
@@ -631,29 +628,29 @@ try_again:
goto error;
case KEY_SPEC_REQKEY_AUTH_KEY:
- key = cred->request_key_auth;
+ key = ctx.cred->request_key_auth;
if (!key)
goto error;
- atomic_inc(&key->usage);
+ __key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_REQUESTOR_KEYRING:
- if (!cred->request_key_auth)
+ if (!ctx.cred->request_key_auth)
goto error;
- down_read(&cred->request_key_auth->sem);
+ down_read(&ctx.cred->request_key_auth->sem);
if (test_bit(KEY_FLAG_REVOKED,
- &cred->request_key_auth->flags)) {
+ &ctx.cred->request_key_auth->flags)) {
key_ref = ERR_PTR(-EKEYREVOKED);
key = NULL;
} else {
- rka = cred->request_key_auth->payload.data;
+ rka = ctx.cred->request_key_auth->payload.data;
key = rka->dest_keyring;
- atomic_inc(&key->usage);
+ __key_get(key);
}
- up_read(&cred->request_key_auth->sem);
+ up_read(&ctx.cred->request_key_auth->sem);
if (!key)
goto error;
key_ref = make_key_ref(key, 1);
@@ -673,9 +670,13 @@ try_again:
key_ref = make_key_ref(key, 0);
/* check to see if we possess the key */
- skey_ref = search_process_keyrings(key->type, key,
- lookup_user_key_possessed,
- cred);
+ ctx.index_key.type = key->type;
+ ctx.index_key.description = key->description;
+ ctx.index_key.desc_len = strlen(key->description);
+ ctx.match_data = key;
+ kdebug("check possessed");
+ skey_ref = search_process_keyrings(&ctx);
+ kdebug("possessed=%p", skey_ref);
if (!IS_ERR(skey_ref)) {
key_put(key);
@@ -715,14 +716,14 @@ try_again:
goto invalid_key;
/* check the permissions */
- ret = key_task_permission(key_ref, cred, perm);
+ ret = key_task_permission(key_ref, ctx.cred, perm);
if (ret < 0)
goto invalid_key;
key->last_used_at = current_kernel_time().tv_sec;
error:
- put_cred(cred);
+ put_cred(ctx.cred);
return key_ref;
invalid_key:
@@ -733,7 +734,7 @@ invalid_key:
/* if we attempted to install a keyring, then it may have caused new
* creds to be installed */
reget_creds:
- put_cred(cred);
+ put_cred(ctx.cred);
goto try_again;
}
@@ -856,3 +857,13 @@ void key_change_session_keyring(struct callback_head *twork)
commit_creds(new);
}
+
+/*
+ * Make sure that root's user and user-session keyrings exist.
+ */
+static int __init init_root_keyring(void)
+{
+ return install_user_keyrings();
+}
+
+late_initcall(init_root_keyring);
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index c411f9bb156b..381411941cc1 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -345,33 +345,34 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
* May return a key that's already under construction instead if there was a
* race between two thread calling request_key().
*/
-static int construct_alloc_key(struct key_type *type,
- const char *description,
+static int construct_alloc_key(struct keyring_search_context *ctx,
struct key *dest_keyring,
unsigned long flags,
struct key_user *user,
struct key **_key)
{
- const struct cred *cred = current_cred();
- unsigned long prealloc;
+ struct assoc_array_edit *edit;
struct key *key;
key_perm_t perm;
key_ref_t key_ref;
int ret;
- kenter("%s,%s,,,", type->name, description);
+ kenter("%s,%s,,,",
+ ctx->index_key.type->name, ctx->index_key.description);
*_key = NULL;
mutex_lock(&user->cons_lock);
perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
perm |= KEY_USR_VIEW;
- if (type->read)
+ if (ctx->index_key.type->read)
perm |= KEY_POS_READ;
- if (type == &key_type_keyring || type->update)
+ if (ctx->index_key.type == &key_type_keyring ||
+ ctx->index_key.type->update)
perm |= KEY_POS_WRITE;
- key = key_alloc(type, description, cred->fsuid, cred->fsgid, cred,
+ key = key_alloc(ctx->index_key.type, ctx->index_key.description,
+ ctx->cred->fsuid, ctx->cred->fsgid, ctx->cred,
perm, flags);
if (IS_ERR(key))
goto alloc_failed;
@@ -379,8 +380,7 @@ static int construct_alloc_key(struct key_type *type,
set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags);
if (dest_keyring) {
- ret = __key_link_begin(dest_keyring, type, description,
- &prealloc);
+ ret = __key_link_begin(dest_keyring, &ctx->index_key, &edit);
if (ret < 0)
goto link_prealloc_failed;
}
@@ -390,16 +390,16 @@ static int construct_alloc_key(struct key_type *type,
* waited for locks */
mutex_lock(&key_construction_mutex);
- key_ref = search_process_keyrings(type, description, type->match, cred);
+ key_ref = search_process_keyrings(ctx);
if (!IS_ERR(key_ref))
goto key_already_present;
if (dest_keyring)
- __key_link(dest_keyring, key, &prealloc);
+ __key_link(key, &edit);
mutex_unlock(&key_construction_mutex);
if (dest_keyring)
- __key_link_end(dest_keyring, type, prealloc);
+ __key_link_end(dest_keyring, &ctx->index_key, edit);
mutex_unlock(&user->cons_lock);
*_key = key;
kleave(" = 0 [%d]", key_serial(key));
@@ -414,8 +414,8 @@ key_already_present:
if (dest_keyring) {
ret = __key_link_check_live_key(dest_keyring, key);
if (ret == 0)
- __key_link(dest_keyring, key, &prealloc);
- __key_link_end(dest_keyring, type, prealloc);
+ __key_link(key, &edit);
+ __key_link_end(dest_keyring, &ctx->index_key, edit);
if (ret < 0)
goto link_check_failed;
}
@@ -444,8 +444,7 @@ alloc_failed:
/*
* Commence key construction.
*/
-static struct key *construct_key_and_link(struct key_type *type,
- const char *description,
+static struct key *construct_key_and_link(struct keyring_search_context *ctx,
const char *callout_info,
size_t callout_len,
void *aux,
@@ -464,8 +463,7 @@ static struct key *construct_key_and_link(struct key_type *type,
construct_get_dest_keyring(&dest_keyring);
- ret = construct_alloc_key(type, description, dest_keyring, flags, user,
- &key);
+ ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key);
key_user_put(user);
if (ret == 0) {
@@ -529,17 +527,24 @@ struct key *request_key_and_link(struct key_type *type,
struct key *dest_keyring,
unsigned long flags)
{
- const struct cred *cred = current_cred();
+ struct keyring_search_context ctx = {
+ .index_key.type = type,
+ .index_key.description = description,
+ .cred = current_cred(),
+ .match = type->match,
+ .match_data = description,
+ .flags = KEYRING_SEARCH_LOOKUP_DIRECT,
+ };
struct key *key;
key_ref_t key_ref;
int ret;
kenter("%s,%s,%p,%zu,%p,%p,%lx",
- type->name, description, callout_info, callout_len, aux,
- dest_keyring, flags);
+ ctx.index_key.type->name, ctx.index_key.description,
+ callout_info, callout_len, aux, dest_keyring, flags);
/* search all the process keyrings for a key */
- key_ref = search_process_keyrings(type, description, type->match, cred);
+ key_ref = search_process_keyrings(&ctx);
if (!IS_ERR(key_ref)) {
key = key_ref_to_ptr(key_ref);
@@ -562,9 +567,8 @@ struct key *request_key_and_link(struct key_type *type,
if (!callout_info)
goto error;
- key = construct_key_and_link(type, description, callout_info,
- callout_len, aux, dest_keyring,
- flags);
+ key = construct_key_and_link(&ctx, callout_info, callout_len,
+ aux, dest_keyring, flags);
}
error:
@@ -592,8 +596,10 @@ int wait_for_key_construction(struct key *key, bool intr)
intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (ret < 0)
return ret;
- if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+ if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
+ smp_rmb();
return key->type_data.reject_error;
+ }
return key_validate(key);
}
EXPORT_SYMBOL(wait_for_key_construction);
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 85730d5a5a59..7495a93b4b90 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <asm/uaccess.h>
#include "internal.h"
+#include <keys/user-type.h>
static int request_key_auth_instantiate(struct key *,
struct key_preparsed_payload *);
@@ -222,32 +223,26 @@ error_alloc:
}
/*
- * See if an authorisation key is associated with a particular key.
- */
-static int key_get_instantiation_authkey_match(const struct key *key,
- const void *_id)
-{
- struct request_key_auth *rka = key->payload.data;
- key_serial_t id = (key_serial_t)(unsigned long) _id;
-
- return rka->target_key->serial == id;
-}
-
-/*
* Search the current process's keyrings for the authorisation key for
* instantiation of a key.
*/
struct key *key_get_instantiation_authkey(key_serial_t target_id)
{
- const struct cred *cred = current_cred();
+ char description[16];
+ struct keyring_search_context ctx = {
+ .index_key.type = &key_type_request_key_auth,
+ .index_key.description = description,
+ .cred = current_cred(),
+ .match = user_match,
+ .match_data = description,
+ .flags = KEYRING_SEARCH_LOOKUP_DIRECT,
+ };
struct key *authkey;
key_ref_t authkey_ref;
- authkey_ref = search_process_keyrings(
- &key_type_request_key_auth,
- (void *) (unsigned long) target_id,
- key_get_instantiation_authkey_match,
- cred);
+ sprintf(description, "%x", target_id);
+
+ authkey_ref = search_process_keyrings(&ctx);
if (IS_ERR(authkey_ref)) {
authkey = ERR_CAST(authkey_ref);
diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c
index ee32d181764a..8c0af08760c8 100644
--- a/security/keys/sysctl.c
+++ b/security/keys/sysctl.c
@@ -61,5 +61,16 @@ ctl_table key_sysctls[] = {
.extra1 = (void *) &zero,
.extra2 = (void *) &max,
},
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+ {
+ .procname = "persistent_keyring_expiry",
+ .data = &persistent_keyring_expiry,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (void *) &zero,
+ .extra2 = (void *) &max,
+ },
+#endif
{ }
};
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 55dc88939185..faa2caeb593f 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -25,14 +25,15 @@ static int logon_vet_description(const char *desc);
* arbitrary blob of data as the payload
*/
struct key_type key_type_user = {
- .name = "user",
- .instantiate = user_instantiate,
- .update = user_update,
- .match = user_match,
- .revoke = user_revoke,
- .destroy = user_destroy,
- .describe = user_describe,
- .read = user_read,
+ .name = "user",
+ .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .instantiate = user_instantiate,
+ .update = user_update,
+ .match = user_match,
+ .revoke = user_revoke,
+ .destroy = user_destroy,
+ .describe = user_describe,
+ .read = user_read,
};
EXPORT_SYMBOL_GPL(key_type_user);
@@ -45,6 +46,7 @@ EXPORT_SYMBOL_GPL(key_type_user);
*/
struct key_type key_type_logon = {
.name = "logon",
+ .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
.instantiate = user_instantiate,
.update = user_update,
.match = user_match,
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 8d8d97dbb389..234bc2ab450c 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -302,18 +302,19 @@ static void dump_common_audit_data(struct audit_buffer *ab,
"faddr", "fport");
break;
}
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6: {
struct inet_sock *inet = inet_sk(sk);
- struct ipv6_pinfo *inet6 = inet6_sk(sk);
- print_ipv6_addr(ab, &inet6->rcv_saddr,
+ print_ipv6_addr(ab, &sk->sk_v6_rcv_saddr,
inet->inet_sport,
"laddr", "lport");
- print_ipv6_addr(ab, &inet6->daddr,
+ print_ipv6_addr(ab, &sk->sk_v6_daddr,
inet->inet_dport,
"faddr", "fport");
break;
}
+#endif
case AF_UNIX:
u = unix_sk(sk);
if (u->path.dentry) {
diff --git a/security/security.c b/security/security.c
index 4dc31f4f2700..15b6928592ef 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1340,22 +1340,17 @@ int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
return security_ops->xfrm_policy_delete_security(ctx);
}
-int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx)
+int security_xfrm_state_alloc(struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *sec_ctx)
{
- return security_ops->xfrm_state_alloc_security(x, sec_ctx, 0);
+ return security_ops->xfrm_state_alloc(x, sec_ctx);
}
EXPORT_SYMBOL(security_xfrm_state_alloc);
int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
struct xfrm_sec_ctx *polsec, u32 secid)
{
- if (!polsec)
- return 0;
- /*
- * We want the context to be taken from secid which is usually
- * from the sock.
- */
- return security_ops->xfrm_state_alloc_security(x, NULL, secid);
+ return security_ops->xfrm_state_alloc_acquire(x, polsec, secid);
}
int security_xfrm_state_delete(struct xfrm_state *x)
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 5b5231068516..794c3ca49eac 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -95,7 +95,9 @@
#include "audit.h"
#include "avc_ss.h"
-#define NUM_SEL_MNT_OPTS 5
+#define SB_TYPE_FMT "%s%s%s"
+#define SB_SUBTYPE(sb) (sb->s_subtype && sb->s_subtype[0])
+#define SB_TYPE_ARGS(sb) sb->s_type->name, SB_SUBTYPE(sb) ? "." : "", SB_SUBTYPE(sb) ? sb->s_subtype : ""
extern struct security_operations *security_ops;
@@ -139,12 +141,28 @@ static struct kmem_cache *sel_inode_cache;
* This function checks the SECMARK reference counter to see if any SECMARK
* targets are currently configured, if the reference counter is greater than
* zero SECMARK is considered to be enabled. Returns true (1) if SECMARK is
- * enabled, false (0) if SECMARK is disabled.
+ * enabled, false (0) if SECMARK is disabled. If the always_check_network
+ * policy capability is enabled, SECMARK is always considered enabled.
*
*/
static int selinux_secmark_enabled(void)
{
- return (atomic_read(&selinux_secmark_refcount) > 0);
+ return (selinux_policycap_alwaysnetwork || atomic_read(&selinux_secmark_refcount));
+}
+
+/**
+ * selinux_peerlbl_enabled - Check to see if peer labeling is currently enabled
+ *
+ * Description:
+ * This function checks if NetLabel or labeled IPSEC is enabled. Returns true
+ * (1) if any are enabled or false (0) if neither are enabled. If the
+ * always_check_network policy capability is enabled, peer labeling
+ * is always considered enabled.
+ *
+ */
+static int selinux_peerlbl_enabled(void)
+{
+ return (selinux_policycap_alwaysnetwork || netlbl_enabled() || selinux_xfrm_enabled());
}
/*
@@ -309,8 +327,11 @@ enum {
Opt_defcontext = 3,
Opt_rootcontext = 4,
Opt_labelsupport = 5,
+ Opt_nextmntopt = 6,
};
+#define NUM_SEL_MNT_OPTS (Opt_nextmntopt - 1)
+
static const match_table_t tokens = {
{Opt_context, CONTEXT_STR "%s"},
{Opt_fscontext, FSCONTEXT_STR "%s"},
@@ -355,6 +376,29 @@ static int may_context_mount_inode_relabel(u32 sid,
return rc;
}
+static int selinux_is_sblabel_mnt(struct super_block *sb)
+{
+ struct superblock_security_struct *sbsec = sb->s_security;
+
+ if (sbsec->behavior == SECURITY_FS_USE_XATTR ||
+ sbsec->behavior == SECURITY_FS_USE_TRANS ||
+ sbsec->behavior == SECURITY_FS_USE_TASK)
+ return 1;
+
+ /* Special handling for sysfs. Is genfs but also has setxattr handler*/
+ if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0)
+ return 1;
+
+ /*
+ * Special handling for rootfs. Is genfs but supports
+ * setting SELinux context on in-core inodes.
+ */
+ if (strncmp(sb->s_type->name, "rootfs", sizeof("rootfs")) == 0)
+ return 1;
+
+ return 0;
+}
+
static int sb_finish_set_opts(struct super_block *sb)
{
struct superblock_security_struct *sbsec = sb->s_security;
@@ -369,8 +413,8 @@ static int sb_finish_set_opts(struct super_block *sb)
the first boot of the SELinux kernel before we have
assigned xattr values to the filesystem. */
if (!root_inode->i_op->getxattr) {
- printk(KERN_WARNING "SELinux: (dev %s, type %s) has no "
- "xattr support\n", sb->s_id, sb->s_type->name);
+ printk(KERN_WARNING "SELinux: (dev %s, type "SB_TYPE_FMT") has no "
+ "xattr support\n", sb->s_id, SB_TYPE_ARGS(sb));
rc = -EOPNOTSUPP;
goto out;
}
@@ -378,35 +422,27 @@ static int sb_finish_set_opts(struct super_block *sb)
if (rc < 0 && rc != -ENODATA) {
if (rc == -EOPNOTSUPP)
printk(KERN_WARNING "SELinux: (dev %s, type "
- "%s) has no security xattr handler\n",
- sb->s_id, sb->s_type->name);
+ SB_TYPE_FMT") has no security xattr handler\n",
+ sb->s_id, SB_TYPE_ARGS(sb));
else
printk(KERN_WARNING "SELinux: (dev %s, type "
- "%s) getxattr errno %d\n", sb->s_id,
- sb->s_type->name, -rc);
+ SB_TYPE_FMT") getxattr errno %d\n", sb->s_id,
+ SB_TYPE_ARGS(sb), -rc);
goto out;
}
}
- sbsec->flags |= (SE_SBINITIALIZED | SE_SBLABELSUPP);
-
if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
- printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n",
- sb->s_id, sb->s_type->name);
+ printk(KERN_ERR "SELinux: initialized (dev %s, type "SB_TYPE_FMT"), unknown behavior\n",
+ sb->s_id, SB_TYPE_ARGS(sb));
else
- printk(KERN_DEBUG "SELinux: initialized (dev %s, type %s), %s\n",
- sb->s_id, sb->s_type->name,
+ printk(KERN_DEBUG "SELinux: initialized (dev %s, type "SB_TYPE_FMT"), %s\n",
+ sb->s_id, SB_TYPE_ARGS(sb),
labeling_behaviors[sbsec->behavior-1]);
- if (sbsec->behavior == SECURITY_FS_USE_GENFS ||
- sbsec->behavior == SECURITY_FS_USE_MNTPOINT ||
- sbsec->behavior == SECURITY_FS_USE_NONE ||
- sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
- sbsec->flags &= ~SE_SBLABELSUPP;
-
- /* Special handling for sysfs. Is genfs but also has setxattr handler*/
- if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0)
- sbsec->flags |= SE_SBLABELSUPP;
+ sbsec->flags |= SE_SBINITIALIZED;
+ if (selinux_is_sblabel_mnt(sb))
+ sbsec->flags |= SBLABEL_MNT;
/* Initialize the root inode. */
rc = inode_doinit_with_dentry(root_inode, root);
@@ -460,15 +496,18 @@ static int selinux_get_mnt_opts(const struct super_block *sb,
if (!ss_initialized)
return -EINVAL;
+ /* make sure we always check enough bits to cover the mask */
+ BUILD_BUG_ON(SE_MNTMASK >= (1 << NUM_SEL_MNT_OPTS));
+
tmp = sbsec->flags & SE_MNTMASK;
/* count the number of mount options for this sb */
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < NUM_SEL_MNT_OPTS; i++) {
if (tmp & 0x01)
opts->num_mnt_opts++;
tmp >>= 1;
}
/* Check if the Label support flag is set */
- if (sbsec->flags & SE_SBLABELSUPP)
+ if (sbsec->flags & SBLABEL_MNT)
opts->num_mnt_opts++;
opts->mnt_opts = kcalloc(opts->num_mnt_opts, sizeof(char *), GFP_ATOMIC);
@@ -515,9 +554,9 @@ static int selinux_get_mnt_opts(const struct super_block *sb,
opts->mnt_opts[i] = context;
opts->mnt_opts_flags[i++] = ROOTCONTEXT_MNT;
}
- if (sbsec->flags & SE_SBLABELSUPP) {
+ if (sbsec->flags & SBLABEL_MNT) {
opts->mnt_opts[i] = NULL;
- opts->mnt_opts_flags[i++] = SE_SBLABELSUPP;
+ opts->mnt_opts_flags[i++] = SBLABEL_MNT;
}
BUG_ON(i != opts->num_mnt_opts);
@@ -561,7 +600,6 @@ static int selinux_set_mnt_opts(struct super_block *sb,
const struct cred *cred = current_cred();
int rc = 0, i;
struct superblock_security_struct *sbsec = sb->s_security;
- const char *name = sb->s_type->name;
struct inode *inode = sbsec->sb->s_root->d_inode;
struct inode_security_struct *root_isec = inode->i_security;
u32 fscontext_sid = 0, context_sid = 0, rootcontext_sid = 0;
@@ -614,14 +652,14 @@ static int selinux_set_mnt_opts(struct super_block *sb,
for (i = 0; i < num_opts; i++) {
u32 sid;
- if (flags[i] == SE_SBLABELSUPP)
+ if (flags[i] == SBLABEL_MNT)
continue;
rc = security_context_to_sid(mount_options[i],
strlen(mount_options[i]), &sid);
if (rc) {
printk(KERN_WARNING "SELinux: security_context_to_sid"
- "(%s) failed for (dev %s, type %s) errno=%d\n",
- mount_options[i], sb->s_id, name, rc);
+ "(%s) failed for (dev %s, type "SB_TYPE_FMT") errno=%d\n",
+ mount_options[i], sb->s_id, SB_TYPE_ARGS(sb), rc);
goto out;
}
switch (flags[i]) {
@@ -685,9 +723,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
* Determine the labeling behavior to use for this
* filesystem type.
*/
- rc = security_fs_use((sbsec->flags & SE_SBPROC) ?
- "proc" : sb->s_type->name,
- &sbsec->behavior, &sbsec->sid);
+ rc = security_fs_use(sb);
if (rc) {
printk(KERN_WARNING
"%s: security_fs_use(%s) returned %d\n",
@@ -770,7 +806,8 @@ out:
out_double_mount:
rc = -EINVAL;
printk(KERN_WARNING "SELinux: mount invalid. Same superblock, different "
- "security settings for (dev %s, type %s)\n", sb->s_id, name);
+ "security settings for (dev %s, type "SB_TYPE_FMT")\n", sb->s_id,
+ SB_TYPE_ARGS(sb));
goto out;
}
@@ -1037,7 +1074,7 @@ static void selinux_write_opts(struct seq_file *m,
case DEFCONTEXT_MNT:
prefix = DEFCONTEXT_STR;
break;
- case SE_SBLABELSUPP:
+ case SBLABEL_MNT:
seq_putc(m, ',');
seq_puts(m, LABELSUPP_STR);
continue;
@@ -1649,7 +1686,7 @@ static int may_create(struct inode *dir,
if (rc)
return rc;
- if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
+ if (!newsid || !(sbsec->flags & SBLABEL_MNT)) {
rc = security_transition_sid(sid, dsec->sid, tclass,
&dentry->d_name, &newsid);
if (rc)
@@ -2437,14 +2474,14 @@ static int selinux_sb_remount(struct super_block *sb, void *data)
u32 sid;
size_t len;
- if (flags[i] == SE_SBLABELSUPP)
+ if (flags[i] == SBLABEL_MNT)
continue;
len = strlen(mount_options[i]);
rc = security_context_to_sid(mount_options[i], len, &sid);
if (rc) {
printk(KERN_WARNING "SELinux: security_context_to_sid"
- "(%s) failed for (dev %s, type %s) errno=%d\n",
- mount_options[i], sb->s_id, sb->s_type->name, rc);
+ "(%s) failed for (dev %s, type "SB_TYPE_FMT") errno=%d\n",
+ mount_options[i], sb->s_id, SB_TYPE_ARGS(sb), rc);
goto out_free_opts;
}
rc = -EINVAL;
@@ -2482,8 +2519,8 @@ out_free_secdata:
return rc;
out_bad_option:
printk(KERN_WARNING "SELinux: unable to change security options "
- "during remount (dev %s, type=%s)\n", sb->s_id,
- sb->s_type->name);
+ "during remount (dev %s, type "SB_TYPE_FMT")\n", sb->s_id,
+ SB_TYPE_ARGS(sb));
goto out_free_opts;
}
@@ -2606,7 +2643,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
if ((sbsec->flags & SE_SBINITIALIZED) &&
(sbsec->behavior == SECURITY_FS_USE_MNTPOINT))
newsid = sbsec->mntpoint_sid;
- else if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
+ else if (!newsid || !(sbsec->flags & SBLABEL_MNT)) {
rc = security_transition_sid(sid, dsec->sid,
inode_mode_to_security_class(inode->i_mode),
qstr, &newsid);
@@ -2628,7 +2665,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
isec->initialized = 1;
}
- if (!ss_initialized || !(sbsec->flags & SE_SBLABELSUPP))
+ if (!ss_initialized || !(sbsec->flags & SBLABEL_MNT))
return -EOPNOTSUPP;
if (name)
@@ -2830,7 +2867,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
return selinux_inode_setotherxattr(dentry, name);
sbsec = inode->i_sb->s_security;
- if (!(sbsec->flags & SE_SBLABELSUPP))
+ if (!(sbsec->flags & SBLABEL_MNT))
return -EOPNOTSUPP;
if (!inode_owner_or_capable(inode))
@@ -3791,8 +3828,12 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
u32 nlbl_sid;
u32 nlbl_type;
- selinux_skb_xfrm_sid(skb, &xfrm_sid);
- selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid);
+ err = selinux_skb_xfrm_sid(skb, &xfrm_sid);
+ if (unlikely(err))
+ return -EACCES;
+ err = selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid);
+ if (unlikely(err))
+ return -EACCES;
err = security_net_peersid_resolve(nlbl_sid, nlbl_type, xfrm_sid, sid);
if (unlikely(err)) {
@@ -3928,7 +3969,7 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
if (snum) {
int low, high;
- inet_get_local_port_range(&low, &high);
+ inet_get_local_port_range(sock_net(sk), &low, &high);
if (snum < max(PROT_SOCK, low) || snum > high) {
err = sel_netport_sid(sk->sk_protocol,
@@ -4246,7 +4287,7 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
return selinux_sock_rcv_skb_compat(sk, skb, family);
secmark_active = selinux_secmark_enabled();
- peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
+ peerlbl_active = selinux_peerlbl_enabled();
if (!secmark_active && !peerlbl_active)
return 0;
@@ -4628,7 +4669,7 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
secmark_active = selinux_secmark_enabled();
netlbl_active = netlbl_enabled();
- peerlbl_active = netlbl_active || selinux_xfrm_enabled();
+ peerlbl_active = selinux_peerlbl_enabled();
if (!secmark_active && !peerlbl_active)
return NF_ACCEPT;
@@ -4667,7 +4708,7 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
return NF_ACCEPT;
}
-static unsigned int selinux_ipv4_forward(unsigned int hooknum,
+static unsigned int selinux_ipv4_forward(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -4677,7 +4718,7 @@ static unsigned int selinux_ipv4_forward(unsigned int hooknum,
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static unsigned int selinux_ipv6_forward(unsigned int hooknum,
+static unsigned int selinux_ipv6_forward(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -4709,7 +4750,7 @@ static unsigned int selinux_ip_output(struct sk_buff *skb,
return NF_ACCEPT;
}
-static unsigned int selinux_ipv4_output(unsigned int hooknum,
+static unsigned int selinux_ipv4_output(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -4780,7 +4821,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
return NF_ACCEPT;
#endif
secmark_active = selinux_secmark_enabled();
- peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
+ peerlbl_active = selinux_peerlbl_enabled();
if (!secmark_active && !peerlbl_active)
return NF_ACCEPT;
@@ -4836,7 +4877,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
return NF_ACCEPT;
}
-static unsigned int selinux_ipv4_postroute(unsigned int hooknum,
+static unsigned int selinux_ipv4_postroute(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -4846,7 +4887,7 @@ static unsigned int selinux_ipv4_postroute(unsigned int hooknum,
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static unsigned int selinux_ipv6_postroute(unsigned int hooknum,
+static unsigned int selinux_ipv6_postroute(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -5784,7 +5825,8 @@ static struct security_operations selinux_ops = {
.xfrm_policy_clone_security = selinux_xfrm_policy_clone,
.xfrm_policy_free_security = selinux_xfrm_policy_free,
.xfrm_policy_delete_security = selinux_xfrm_policy_delete,
- .xfrm_state_alloc_security = selinux_xfrm_state_alloc,
+ .xfrm_state_alloc = selinux_xfrm_state_alloc,
+ .xfrm_state_alloc_acquire = selinux_xfrm_state_alloc_acquire,
.xfrm_state_free_security = selinux_xfrm_state_free,
.xfrm_state_delete_security = selinux_xfrm_state_delete,
.xfrm_policy_lookup = selinux_xfrm_policy_lookup,
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index aa47bcabb5f6..b1dfe1049450 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -58,8 +58,8 @@ struct superblock_security_struct {
u32 sid; /* SID of file system superblock */
u32 def_sid; /* default SID for labeling */
u32 mntpoint_sid; /* SECURITY_FS_USE_MNTPOINT context for files */
- unsigned int behavior; /* labeling behavior */
- unsigned char flags; /* which mount options were specified */
+ unsigned short behavior; /* labeling behavior */
+ unsigned short flags; /* which mount options were specified */
struct mutex lock;
struct list_head isec_head;
spinlock_t isec_lock;
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 8fd8e18ea340..fe341ae37004 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -45,14 +45,15 @@
/* Mask for just the mount related flags */
#define SE_MNTMASK 0x0f
/* Super block security struct flags for mount options */
+/* BE CAREFUL, these need to be the low order bits for selinux_get_mnt_opts */
#define CONTEXT_MNT 0x01
#define FSCONTEXT_MNT 0x02
#define ROOTCONTEXT_MNT 0x04
#define DEFCONTEXT_MNT 0x08
+#define SBLABEL_MNT 0x10
/* Non-mount related flags */
-#define SE_SBINITIALIZED 0x10
-#define SE_SBPROC 0x20
-#define SE_SBLABELSUPP 0x40
+#define SE_SBINITIALIZED 0x0100
+#define SE_SBPROC 0x0200
#define CONTEXT_STR "context="
#define FSCONTEXT_STR "fscontext="
@@ -68,12 +69,15 @@ extern int selinux_enabled;
enum {
POLICYDB_CAPABILITY_NETPEER,
POLICYDB_CAPABILITY_OPENPERM,
+ POLICYDB_CAPABILITY_REDHAT1,
+ POLICYDB_CAPABILITY_ALWAYSNETWORK,
__POLICYDB_CAPABILITY_MAX
};
#define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1)
extern int selinux_policycap_netpeer;
extern int selinux_policycap_openperm;
+extern int selinux_policycap_alwaysnetwork;
/*
* type_datum properties
@@ -172,8 +176,7 @@ int security_get_allow_unknown(void);
#define SECURITY_FS_USE_NATIVE 7 /* use native label support */
#define SECURITY_FS_USE_MAX 7 /* Highest SECURITY_FS_USE_XXX */
-int security_fs_use(const char *fstype, unsigned int *behavior,
- u32 *sid);
+int security_fs_use(struct super_block *sb);
int security_genfs_sid(const char *fstype, char *name, u16 sclass,
u32 *sid);
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index 6713f04e30ba..0dec76c64cf5 100644
--- a/security/selinux/include/xfrm.h
+++ b/security/selinux/include/xfrm.h
@@ -10,29 +10,21 @@
#include <net/flow.h>
int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *sec_ctx);
+ struct xfrm_user_sec_ctx *uctx);
int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
struct xfrm_sec_ctx **new_ctxp);
void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx);
int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx);
int selinux_xfrm_state_alloc(struct xfrm_state *x,
- struct xfrm_user_sec_ctx *sec_ctx, u32 secid);
+ struct xfrm_user_sec_ctx *uctx);
+int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec, u32 secid);
void selinux_xfrm_state_free(struct xfrm_state *x);
int selinux_xfrm_state_delete(struct xfrm_state *x);
int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
- struct xfrm_policy *xp, const struct flowi *fl);
-
-/*
- * Extract the security blob from the sock (it's actually on the socket)
- */
-static inline struct inode_security_struct *get_sock_isec(struct sock *sk)
-{
- if (!sk->sk_socket)
- return NULL;
-
- return SOCK_INODE(sk->sk_socket)->i_security;
-}
+ struct xfrm_policy *xp,
+ const struct flowi *fl);
#ifdef CONFIG_SECURITY_NETWORK_XFRM
extern atomic_t selinux_xfrm_refcount;
@@ -42,10 +34,10 @@ static inline int selinux_xfrm_enabled(void)
return (atomic_read(&selinux_xfrm_refcount) > 0);
}
-int selinux_xfrm_sock_rcv_skb(u32 sid, struct sk_buff *skb,
- struct common_audit_data *ad);
-int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
- struct common_audit_data *ad, u8 proto);
+int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad);
+int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad, u8 proto);
int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
static inline void selinux_xfrm_notify_policyload(void)
@@ -64,19 +56,21 @@ static inline int selinux_xfrm_enabled(void)
return 0;
}
-static inline int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
- struct common_audit_data *ad)
+static inline int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad)
{
return 0;
}
-static inline int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
- struct common_audit_data *ad, u8 proto)
+static inline int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad,
+ u8 proto)
{
return 0;
}
-static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
+static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid,
+ int ckall)
{
*sid = SECSID_NULL;
return 0;
@@ -87,10 +81,9 @@ static inline void selinux_xfrm_notify_policyload(void)
}
#endif
-static inline void selinux_skb_xfrm_sid(struct sk_buff *skb, u32 *sid)
+static inline int selinux_skb_xfrm_sid(struct sk_buff *skb, u32 *sid)
{
- int err = selinux_xfrm_decode_session(skb, sid, 0);
- BUG_ON(err);
+ return selinux_xfrm_decode_session(skb, sid, 0);
}
#endif /* _SELINUX_XFRM_H_ */
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
index da4b8b233280..6235d052338b 100644
--- a/security/selinux/netlabel.c
+++ b/security/selinux/netlabel.c
@@ -442,8 +442,7 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
sksec->nlbl_state != NLBL_CONNLABELED)
return 0;
- local_bh_disable();
- bh_lock_sock_nested(sk);
+ lock_sock(sk);
/* connected sockets are allowed to disconnect when the address family
* is set to AF_UNSPEC, if that is what is happening we want to reset
@@ -464,7 +463,6 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
sksec->nlbl_state = NLBL_CONNLABELED;
socket_connect_return:
- bh_unlock_sock(sk);
- local_bh_enable();
+ release_sock(sk);
return rc;
}
diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c
index c5454c0477c3..03a72c32afd7 100644
--- a/security/selinux/netnode.c
+++ b/security/selinux/netnode.c
@@ -166,6 +166,7 @@ static void sel_netnode_insert(struct sel_netnode *node)
break;
default:
BUG();
+ return;
}
/* we need to impose a limit on the growth of the hash table so check
@@ -225,6 +226,7 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
break;
default:
BUG();
+ ret = -EINVAL;
}
if (ret != 0)
goto out;
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index ff427733c290..5122affe06a8 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -44,7 +44,9 @@
/* Policy capability filenames */
static char *policycap_names[] = {
"network_peer_controls",
- "open_perms"
+ "open_perms",
+ "redhat1",
+ "always_check_network"
};
unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index 30f119b1d1ec..820313a04d49 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -213,7 +213,12 @@ netlbl_import_failure:
}
#endif /* CONFIG_NETLABEL */
-int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
+/*
+ * Check to see if all the bits set in e2 are also set in e1. Optionally,
+ * if last_e2bit is non-zero, the highest set bit in e2 cannot exceed
+ * last_e2bit.
+ */
+int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit)
{
struct ebitmap_node *n1, *n2;
int i;
@@ -223,14 +228,25 @@ int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
n1 = e1->node;
n2 = e2->node;
+
while (n1 && n2 && (n1->startbit <= n2->startbit)) {
if (n1->startbit < n2->startbit) {
n1 = n1->next;
continue;
}
- for (i = 0; i < EBITMAP_UNIT_NUMS; i++) {
+ for (i = EBITMAP_UNIT_NUMS - 1; (i >= 0) && !n2->maps[i]; )
+ i--; /* Skip trailing NULL map entries */
+ if (last_e2bit && (i >= 0)) {
+ u32 lastsetbit = n2->startbit + i * EBITMAP_UNIT_SIZE +
+ __fls(n2->maps[i]);
+ if (lastsetbit > last_e2bit)
+ return 0;
+ }
+
+ while (i >= 0) {
if ((n1->maps[i] & n2->maps[i]) != n2->maps[i])
return 0;
+ i--;
}
n1 = n1->next;
diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
index 922f8afa89dd..712c8a7b8e8b 100644
--- a/security/selinux/ss/ebitmap.h
+++ b/security/selinux/ss/ebitmap.h
@@ -16,7 +16,13 @@
#include <net/netlabel.h>
-#define EBITMAP_UNIT_NUMS ((32 - sizeof(void *) - sizeof(u32)) \
+#ifdef CONFIG_64BIT
+#define EBITMAP_NODE_SIZE 64
+#else
+#define EBITMAP_NODE_SIZE 32
+#endif
+
+#define EBITMAP_UNIT_NUMS ((EBITMAP_NODE_SIZE-sizeof(void *)-sizeof(u32))\
/ sizeof(unsigned long))
#define EBITMAP_UNIT_SIZE BITS_PER_LONG
#define EBITMAP_SIZE (EBITMAP_UNIT_NUMS * EBITMAP_UNIT_SIZE)
@@ -117,7 +123,7 @@ static inline void ebitmap_node_clr_bit(struct ebitmap_node *n,
int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2);
int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src);
-int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2);
+int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit);
int ebitmap_get_bit(struct ebitmap *e, unsigned long bit);
int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
void ebitmap_destroy(struct ebitmap *e);
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index 40de8d3f208e..c85bc1ec040c 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -160,8 +160,6 @@ void mls_sid_to_context(struct context *context,
int mls_level_isvalid(struct policydb *p, struct mls_level *l)
{
struct level_datum *levdatum;
- struct ebitmap_node *node;
- int i;
if (!l->sens || l->sens > p->p_levels.nprim)
return 0;
@@ -170,19 +168,13 @@ int mls_level_isvalid(struct policydb *p, struct mls_level *l)
if (!levdatum)
return 0;
- ebitmap_for_each_positive_bit(&l->cat, node, i) {
- if (i > p->p_cats.nprim)
- return 0;
- if (!ebitmap_get_bit(&levdatum->level->cat, i)) {
- /*
- * Category may not be associated with
- * sensitivity.
- */
- return 0;
- }
- }
-
- return 1;
+ /*
+ * Return 1 iff all the bits set in l->cat are also be set in
+ * levdatum->level->cat and no bit in l->cat is larger than
+ * p->p_cats.nprim.
+ */
+ return ebitmap_contains(&levdatum->level->cat, &l->cat,
+ p->p_cats.nprim);
}
int mls_range_isvalid(struct policydb *p, struct mls_range *r)
diff --git a/security/selinux/ss/mls_types.h b/security/selinux/ss/mls_types.h
index 03bed52a8052..e93648774137 100644
--- a/security/selinux/ss/mls_types.h
+++ b/security/selinux/ss/mls_types.h
@@ -35,7 +35,7 @@ static inline int mls_level_eq(struct mls_level *l1, struct mls_level *l2)
static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2)
{
return ((l1->sens >= l2->sens) &&
- ebitmap_contains(&l1->cat, &l2->cat));
+ ebitmap_contains(&l1->cat, &l2->cat, 0));
}
#define mls_level_incomp(l1, l2) \
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index c8adde3aff8f..f6195ebde3c9 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -3203,9 +3203,8 @@ static int range_write_helper(void *key, void *data, void *ptr)
static int range_write(struct policydb *p, void *fp)
{
- size_t nel;
__le32 buf[1];
- int rc;
+ int rc, nel;
struct policy_data pd;
pd.p = p;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index b4feecc3fe01..ee470a0b5c27 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -72,6 +72,7 @@
int selinux_policycap_netpeer;
int selinux_policycap_openperm;
+int selinux_policycap_alwaysnetwork;
static DEFINE_RWLOCK(policy_rwlock);
@@ -1812,6 +1813,8 @@ static void security_load_policycaps(void)
POLICYDB_CAPABILITY_NETPEER);
selinux_policycap_openperm = ebitmap_get_bit(&policydb.policycaps,
POLICYDB_CAPABILITY_OPENPERM);
+ selinux_policycap_alwaysnetwork = ebitmap_get_bit(&policydb.policycaps,
+ POLICYDB_CAPABILITY_ALWAYSNETWORK);
}
static int security_preserve_bools(struct policydb *p);
@@ -2323,43 +2326,74 @@ out:
/**
* security_fs_use - Determine how to handle labeling for a filesystem.
- * @fstype: filesystem type
- * @behavior: labeling behavior
- * @sid: SID for filesystem (superblock)
+ * @sb: superblock in question
*/
-int security_fs_use(
- const char *fstype,
- unsigned int *behavior,
- u32 *sid)
+int security_fs_use(struct super_block *sb)
{
int rc = 0;
struct ocontext *c;
+ struct superblock_security_struct *sbsec = sb->s_security;
+ const char *fstype = sb->s_type->name;
+ const char *subtype = (sb->s_subtype && sb->s_subtype[0]) ? sb->s_subtype : NULL;
+ struct ocontext *base = NULL;
read_lock(&policy_rwlock);
- c = policydb.ocontexts[OCON_FSUSE];
- while (c) {
- if (strcmp(fstype, c->u.name) == 0)
+ for (c = policydb.ocontexts[OCON_FSUSE]; c; c = c->next) {
+ char *sub;
+ int baselen;
+
+ baselen = strlen(fstype);
+
+ /* if base does not match, this is not the one */
+ if (strncmp(fstype, c->u.name, baselen))
+ continue;
+
+ /* if there is no subtype, this is the one! */
+ if (!subtype)
+ break;
+
+ /* skip past the base in this entry */
+ sub = c->u.name + baselen;
+
+ /* entry is only a base. save it. keep looking for subtype */
+ if (sub[0] == '\0') {
+ base = c;
+ continue;
+ }
+
+ /* entry is not followed by a subtype, so it is not a match */
+ if (sub[0] != '.')
+ continue;
+
+ /* whew, we found a subtype of this fstype */
+ sub++; /* move past '.' */
+
+ /* exact match of fstype AND subtype */
+ if (!strcmp(subtype, sub))
break;
- c = c->next;
}
+ /* in case we had found an fstype match but no subtype match */
+ if (!c)
+ c = base;
+
if (c) {
- *behavior = c->v.behavior;
+ sbsec->behavior = c->v.behavior;
if (!c->sid[0]) {
rc = sidtab_context_to_sid(&sidtab, &c->context[0],
&c->sid[0]);
if (rc)
goto out;
}
- *sid = c->sid[0];
+ sbsec->sid = c->sid[0];
} else {
- rc = security_genfs_sid(fstype, "/", SECCLASS_DIR, sid);
+ rc = security_genfs_sid(fstype, "/", SECCLASS_DIR, &sbsec->sid);
if (rc) {
- *behavior = SECURITY_FS_USE_NONE;
+ sbsec->behavior = SECURITY_FS_USE_NONE;
rc = 0;
} else {
- *behavior = SECURITY_FS_USE_GENFS;
+ sbsec->behavior = SECURITY_FS_USE_GENFS;
}
}
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index d03081886214..a91d205ec0c6 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -56,7 +56,7 @@
atomic_t selinux_xfrm_refcount = ATOMIC_INIT(0);
/*
- * Returns true if an LSM/SELinux context
+ * Returns true if the context is an LSM/SELinux context.
*/
static inline int selinux_authorizable_ctx(struct xfrm_sec_ctx *ctx)
{
@@ -66,7 +66,7 @@ static inline int selinux_authorizable_ctx(struct xfrm_sec_ctx *ctx)
}
/*
- * Returns true if the xfrm contains a security blob for SELinux
+ * Returns true if the xfrm contains a security blob for SELinux.
*/
static inline int selinux_authorizable_xfrm(struct xfrm_state *x)
{
@@ -74,48 +74,111 @@ static inline int selinux_authorizable_xfrm(struct xfrm_state *x)
}
/*
- * LSM hook implementation that authorizes that a flow can use
- * a xfrm policy rule.
+ * Allocates a xfrm_sec_state and populates it using the supplied security
+ * xfrm_user_sec_ctx context.
*/
-int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
+static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *uctx)
{
int rc;
- u32 sel_sid;
+ const struct task_security_struct *tsec = current_security();
+ struct xfrm_sec_ctx *ctx = NULL;
+ u32 str_len;
- /* Context sid is either set to label or ANY_ASSOC */
- if (ctx) {
- if (!selinux_authorizable_ctx(ctx))
- return -EINVAL;
-
- sel_sid = ctx->ctx_sid;
- } else
- /*
- * All flows should be treated as polmatch'ing an
- * otherwise applicable "non-labeled" policy. This
- * would prevent inadvertent "leaks".
- */
- return 0;
+ if (ctxp == NULL || uctx == NULL ||
+ uctx->ctx_doi != XFRM_SC_DOI_LSM ||
+ uctx->ctx_alg != XFRM_SC_ALG_SELINUX)
+ return -EINVAL;
- rc = avc_has_perm(fl_secid, sel_sid, SECCLASS_ASSOCIATION,
- ASSOCIATION__POLMATCH,
- NULL);
+ str_len = uctx->ctx_len;
+ if (str_len >= PAGE_SIZE)
+ return -ENOMEM;
- if (rc == -EACCES)
- return -ESRCH;
+ ctx = kmalloc(sizeof(*ctx) + str_len + 1, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->ctx_doi = XFRM_SC_DOI_LSM;
+ ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
+ ctx->ctx_len = str_len;
+ memcpy(ctx->ctx_str, &uctx[1], str_len);
+ ctx->ctx_str[str_len] = '\0';
+ rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid);
+ if (rc)
+ goto err;
+
+ rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, NULL);
+ if (rc)
+ goto err;
+
+ *ctxp = ctx;
+ atomic_inc(&selinux_xfrm_refcount);
+ return 0;
+
+err:
+ kfree(ctx);
return rc;
}
/*
+ * Free the xfrm_sec_ctx structure.
+ */
+static void selinux_xfrm_free(struct xfrm_sec_ctx *ctx)
+{
+ if (!ctx)
+ return;
+
+ atomic_dec(&selinux_xfrm_refcount);
+ kfree(ctx);
+}
+
+/*
+ * Authorize the deletion of a labeled SA or policy rule.
+ */
+static int selinux_xfrm_delete(struct xfrm_sec_ctx *ctx)
+{
+ const struct task_security_struct *tsec = current_security();
+
+ if (!ctx)
+ return 0;
+
+ return avc_has_perm(tsec->sid, ctx->ctx_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
+ NULL);
+}
+
+/*
+ * LSM hook implementation that authorizes that a flow can use a xfrm policy
+ * rule.
+ */
+int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
+{
+ int rc;
+
+ /* All flows should be treated as polmatch'ing an otherwise applicable
+ * "non-labeled" policy. This would prevent inadvertent "leaks". */
+ if (!ctx)
+ return 0;
+
+ /* Context sid is either set to label or ANY_ASSOC */
+ if (!selinux_authorizable_ctx(ctx))
+ return -EINVAL;
+
+ rc = avc_has_perm(fl_secid, ctx->ctx_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__POLMATCH, NULL);
+ return (rc == -EACCES ? -ESRCH : rc);
+}
+
+/*
* LSM hook implementation that authorizes that a state matches
* the given policy, flow combo.
*/
-
-int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp,
- const struct flowi *fl)
+int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
+ struct xfrm_policy *xp,
+ const struct flowi *fl)
{
u32 state_sid;
- int rc;
if (!xp->security)
if (x->security)
@@ -138,187 +201,80 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *
if (fl->flowi_secid != state_sid)
return 0;
- rc = avc_has_perm(fl->flowi_secid, state_sid, SECCLASS_ASSOCIATION,
- ASSOCIATION__SENDTO,
- NULL)? 0:1;
-
- /*
- * We don't need a separate SA Vs. policy polmatch check
- * since the SA is now of the same label as the flow and
- * a flow Vs. policy polmatch check had already happened
- * in selinux_xfrm_policy_lookup() above.
- */
-
- return rc;
+ /* We don't need a separate SA Vs. policy polmatch check since the SA
+ * is now of the same label as the flow and a flow Vs. policy polmatch
+ * check had already happened in selinux_xfrm_policy_lookup() above. */
+ return (avc_has_perm(fl->flowi_secid, state_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO,
+ NULL) ? 0 : 1);
}
/*
* LSM hook implementation that checks and/or returns the xfrm sid for the
* incoming packet.
*/
-
int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
{
+ u32 sid_session = SECSID_NULL;
struct sec_path *sp;
- *sid = SECSID_NULL;
-
if (skb == NULL)
- return 0;
+ goto out;
sp = skb->sp;
if (sp) {
- int i, sid_set = 0;
+ int i;
- for (i = sp->len-1; i >= 0; i--) {
+ for (i = sp->len - 1; i >= 0; i--) {
struct xfrm_state *x = sp->xvec[i];
if (selinux_authorizable_xfrm(x)) {
struct xfrm_sec_ctx *ctx = x->security;
- if (!sid_set) {
- *sid = ctx->ctx_sid;
- sid_set = 1;
-
+ if (sid_session == SECSID_NULL) {
+ sid_session = ctx->ctx_sid;
if (!ckall)
- break;
- } else if (*sid != ctx->ctx_sid)
+ goto out;
+ } else if (sid_session != ctx->ctx_sid) {
+ *sid = SECSID_NULL;
return -EINVAL;
+ }
}
}
}
- return 0;
-}
-
-/*
- * Security blob allocation for xfrm_policy and xfrm_state
- * CTX does not have a meaningful value on input
- */
-static int selinux_xfrm_sec_ctx_alloc(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *uctx, u32 sid)
-{
- int rc = 0;
- const struct task_security_struct *tsec = current_security();
- struct xfrm_sec_ctx *ctx = NULL;
- char *ctx_str = NULL;
- u32 str_len;
-
- BUG_ON(uctx && sid);
-
- if (!uctx)
- goto not_from_user;
-
- if (uctx->ctx_alg != XFRM_SC_ALG_SELINUX)
- return -EINVAL;
-
- str_len = uctx->ctx_len;
- if (str_len >= PAGE_SIZE)
- return -ENOMEM;
-
- *ctxp = ctx = kmalloc(sizeof(*ctx) +
- str_len + 1,
- GFP_KERNEL);
-
- if (!ctx)
- return -ENOMEM;
-
- ctx->ctx_doi = uctx->ctx_doi;
- ctx->ctx_len = str_len;
- ctx->ctx_alg = uctx->ctx_alg;
-
- memcpy(ctx->ctx_str,
- uctx+1,
- str_len);
- ctx->ctx_str[str_len] = 0;
- rc = security_context_to_sid(ctx->ctx_str,
- str_len,
- &ctx->ctx_sid);
-
- if (rc)
- goto out;
-
- /*
- * Does the subject have permission to set security context?
- */
- rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
- SECCLASS_ASSOCIATION,
- ASSOCIATION__SETCONTEXT, NULL);
- if (rc)
- goto out;
-
- return rc;
-
-not_from_user:
- rc = security_sid_to_context(sid, &ctx_str, &str_len);
- if (rc)
- goto out;
-
- *ctxp = ctx = kmalloc(sizeof(*ctx) +
- str_len,
- GFP_ATOMIC);
-
- if (!ctx) {
- rc = -ENOMEM;
- goto out;
- }
-
- ctx->ctx_doi = XFRM_SC_DOI_LSM;
- ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
- ctx->ctx_sid = sid;
- ctx->ctx_len = str_len;
- memcpy(ctx->ctx_str,
- ctx_str,
- str_len);
-
- goto out2;
-
out:
- *ctxp = NULL;
- kfree(ctx);
-out2:
- kfree(ctx_str);
- return rc;
+ *sid = sid_session;
+ return 0;
}
/*
- * LSM hook implementation that allocs and transfers uctx spec to
- * xfrm_policy.
+ * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy.
*/
int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
struct xfrm_user_sec_ctx *uctx)
{
- int err;
-
- BUG_ON(!uctx);
-
- err = selinux_xfrm_sec_ctx_alloc(ctxp, uctx, 0);
- if (err == 0)
- atomic_inc(&selinux_xfrm_refcount);
-
- return err;
+ return selinux_xfrm_alloc_user(ctxp, uctx);
}
-
/*
- * LSM hook implementation that copies security data structure from old to
- * new for policy cloning.
+ * LSM hook implementation that copies security data structure from old to new
+ * for policy cloning.
*/
int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
struct xfrm_sec_ctx **new_ctxp)
{
struct xfrm_sec_ctx *new_ctx;
- if (old_ctx) {
- new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len,
- GFP_ATOMIC);
- if (!new_ctx)
- return -ENOMEM;
+ if (!old_ctx)
+ return 0;
+
+ new_ctx = kmemdup(old_ctx, sizeof(*old_ctx) + old_ctx->ctx_len,
+ GFP_ATOMIC);
+ if (!new_ctx)
+ return -ENOMEM;
+ atomic_inc(&selinux_xfrm_refcount);
+ *new_ctxp = new_ctx;
- memcpy(new_ctx, old_ctx, sizeof(*new_ctx));
- memcpy(new_ctx->ctx_str, old_ctx->ctx_str, new_ctx->ctx_len);
- atomic_inc(&selinux_xfrm_refcount);
- *new_ctxp = new_ctx;
- }
return 0;
}
@@ -327,8 +283,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
*/
void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
{
- atomic_dec(&selinux_xfrm_refcount);
- kfree(ctx);
+ selinux_xfrm_free(ctx);
}
/*
@@ -336,31 +291,55 @@ void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
*/
int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
{
- const struct task_security_struct *tsec = current_security();
-
- if (!ctx)
- return 0;
+ return selinux_xfrm_delete(ctx);
+}
- return avc_has_perm(tsec->sid, ctx->ctx_sid,
- SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
- NULL);
+/*
+ * LSM hook implementation that allocates a xfrm_sec_state, populates it using
+ * the supplied security context, and assigns it to the xfrm_state.
+ */
+int selinux_xfrm_state_alloc(struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *uctx)
+{
+ return selinux_xfrm_alloc_user(&x->security, uctx);
}
/*
- * LSM hook implementation that allocs and transfers sec_ctx spec to
- * xfrm_state.
+ * LSM hook implementation that allocates a xfrm_sec_state and populates based
+ * on a secid.
*/
-int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uctx,
- u32 secid)
+int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec, u32 secid)
{
- int err;
+ int rc;
+ struct xfrm_sec_ctx *ctx;
+ char *ctx_str = NULL;
+ int str_len;
+
+ if (!polsec)
+ return 0;
- BUG_ON(!x);
+ if (secid == 0)
+ return -EINVAL;
- err = selinux_xfrm_sec_ctx_alloc(&x->security, uctx, secid);
- if (err == 0)
- atomic_inc(&selinux_xfrm_refcount);
- return err;
+ rc = security_sid_to_context(secid, &ctx_str, &str_len);
+ if (rc)
+ return rc;
+
+ ctx = kmalloc(sizeof(*ctx) + str_len, GFP_ATOMIC);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->ctx_doi = XFRM_SC_DOI_LSM;
+ ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
+ ctx->ctx_sid = secid;
+ ctx->ctx_len = str_len;
+ memcpy(ctx->ctx_str, ctx_str, str_len);
+ kfree(ctx_str);
+
+ x->security = ctx;
+ atomic_inc(&selinux_xfrm_refcount);
+ return 0;
}
/*
@@ -368,24 +347,15 @@ int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uct
*/
void selinux_xfrm_state_free(struct xfrm_state *x)
{
- atomic_dec(&selinux_xfrm_refcount);
- kfree(x->security);
+ selinux_xfrm_free(x->security);
}
- /*
- * LSM hook implementation that authorizes deletion of labeled SAs.
- */
+/*
+ * LSM hook implementation that authorizes deletion of labeled SAs.
+ */
int selinux_xfrm_state_delete(struct xfrm_state *x)
{
- const struct task_security_struct *tsec = current_security();
- struct xfrm_sec_ctx *ctx = x->security;
-
- if (!ctx)
- return 0;
-
- return avc_has_perm(tsec->sid, ctx->ctx_sid,
- SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
- NULL);
+ return selinux_xfrm_delete(x->security);
}
/*
@@ -395,14 +365,12 @@ int selinux_xfrm_state_delete(struct xfrm_state *x)
* we need to check for unlabelled access since this may not have
* gone thru the IPSec process.
*/
-int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
- struct common_audit_data *ad)
+int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad)
{
- int i, rc = 0;
- struct sec_path *sp;
- u32 sel_sid = SECINITSID_UNLABELED;
-
- sp = skb->sp;
+ int i;
+ struct sec_path *sp = skb->sp;
+ u32 peer_sid = SECINITSID_UNLABELED;
if (sp) {
for (i = 0; i < sp->len; i++) {
@@ -410,23 +378,17 @@ int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
if (x && selinux_authorizable_xfrm(x)) {
struct xfrm_sec_ctx *ctx = x->security;
- sel_sid = ctx->ctx_sid;
+ peer_sid = ctx->ctx_sid;
break;
}
}
}
- /*
- * This check even when there's no association involved is
- * intended, according to Trent Jaeger, to make sure a
- * process can't engage in non-ipsec communication unless
- * explicitly allowed by policy.
- */
-
- rc = avc_has_perm(isec_sid, sel_sid, SECCLASS_ASSOCIATION,
- ASSOCIATION__RECVFROM, ad);
-
- return rc;
+ /* This check even when there's no association involved is intended,
+ * according to Trent Jaeger, to make sure a process can't engage in
+ * non-IPsec communication unless explicitly allowed by policy. */
+ return avc_has_perm(sk_sid, peer_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__RECVFROM, ad);
}
/*
@@ -436,49 +398,38 @@ int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
* If we do have a authorizable security association, then it has already been
* checked in the selinux_xfrm_state_pol_flow_match hook above.
*/
-int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
- struct common_audit_data *ad, u8 proto)
+int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad, u8 proto)
{
struct dst_entry *dst;
- int rc = 0;
-
- dst = skb_dst(skb);
-
- if (dst) {
- struct dst_entry *dst_test;
-
- for (dst_test = dst; dst_test != NULL;
- dst_test = dst_test->child) {
- struct xfrm_state *x = dst_test->xfrm;
-
- if (x && selinux_authorizable_xfrm(x))
- goto out;
- }
- }
switch (proto) {
case IPPROTO_AH:
case IPPROTO_ESP:
case IPPROTO_COMP:
- /*
- * We should have already seen this packet once before
- * it underwent xfrm(s). No need to subject it to the
- * unlabeled check.
- */
- goto out;
+ /* We should have already seen this packet once before it
+ * underwent xfrm(s). No need to subject it to the unlabeled
+ * check. */
+ return 0;
default:
break;
}
- /*
- * This check even when there's no association involved is
- * intended, according to Trent Jaeger, to make sure a
- * process can't engage in non-ipsec communication unless
- * explicitly allowed by policy.
- */
+ dst = skb_dst(skb);
+ if (dst) {
+ struct dst_entry *iter;
- rc = avc_has_perm(isec_sid, SECINITSID_UNLABELED, SECCLASS_ASSOCIATION,
- ASSOCIATION__SENDTO, ad);
-out:
- return rc;
+ for (iter = dst; iter != NULL; iter = iter->child) {
+ struct xfrm_state *x = iter->xfrm;
+
+ if (x && selinux_authorizable_xfrm(x))
+ return 0;
+ }
+ }
+
+ /* This check even when there's no association involved is intended,
+ * according to Trent Jaeger, to make sure a process can't engage in
+ * non-IPsec communication unless explicitly allowed by policy. */
+ return avc_has_perm(sk_sid, SECINITSID_UNLABELED,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, ad);
}
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 076b8e8a51ab..364cc64fce71 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -177,9 +177,13 @@ struct smk_port_label {
#define SMACK_CIPSO_MAXCATNUM 184 /* 23 * 8 */
/*
- * Flag for transmute access
+ * Flags for untraditional access modes.
+ * It shouldn't be necessary to avoid conflicts with definitions
+ * in fs.h, but do so anyway.
*/
-#define MAY_TRANSMUTE 64
+#define MAY_TRANSMUTE 0x00001000 /* Controls directory labeling */
+#define MAY_LOCK 0x00002000 /* Locks should be writes, but ... */
+
/*
* Just to make the common cases easier to deal with
*/
@@ -188,9 +192,9 @@ struct smk_port_label {
#define MAY_NOT 0
/*
- * Number of access types used by Smack (rwxat)
+ * Number of access types used by Smack (rwxatl)
*/
-#define SMK_NUM_ACCESS_TYPE 5
+#define SMK_NUM_ACCESS_TYPE 6
/* SMACK data */
struct smack_audit_data {
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index b3b59b1e93d6..14293cd9b1e5 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -84,6 +84,8 @@ int log_policy = SMACK_AUDIT_DENIED;
*
* Do the object check first because that is more
* likely to differ.
+ *
+ * Allowing write access implies allowing locking.
*/
int smk_access_entry(char *subject_label, char *object_label,
struct list_head *rule_list)
@@ -99,6 +101,11 @@ int smk_access_entry(char *subject_label, char *object_label,
}
}
+ /*
+ * MAY_WRITE implies MAY_LOCK.
+ */
+ if ((may & MAY_WRITE) == MAY_WRITE)
+ may |= MAY_LOCK;
return may;
}
@@ -245,6 +252,7 @@ out_audit:
static inline void smack_str_from_perm(char *string, int access)
{
int i = 0;
+
if (access & MAY_READ)
string[i++] = 'r';
if (access & MAY_WRITE)
@@ -255,6 +263,8 @@ static inline void smack_str_from_perm(char *string, int access)
string[i++] = 'a';
if (access & MAY_TRANSMUTE)
string[i++] = 't';
+ if (access & MAY_LOCK)
+ string[i++] = 'l';
string[i] = '\0';
}
/**
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 8825375cc031..b0be893ad44d 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -185,7 +185,7 @@ static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode)
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, ctp);
- rc = smk_curacc(skp->smk_known, MAY_READWRITE, &ad);
+ rc = smk_curacc(skp->smk_known, mode, &ad);
return rc;
}
@@ -1146,7 +1146,7 @@ static int smack_file_ioctl(struct file *file, unsigned int cmd,
* @file: the object
* @cmd: unused
*
- * Returns 0 if current has write access, error code otherwise
+ * Returns 0 if current has lock access, error code otherwise
*/
static int smack_file_lock(struct file *file, unsigned int cmd)
{
@@ -1154,7 +1154,7 @@ static int smack_file_lock(struct file *file, unsigned int cmd)
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
- return smk_curacc(file->f_security, MAY_WRITE, &ad);
+ return smk_curacc(file->f_security, MAY_LOCK, &ad);
}
/**
@@ -1178,8 +1178,13 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd,
switch (cmd) {
case F_GETLK:
+ break;
case F_SETLK:
case F_SETLKW:
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, file->f_path);
+ rc = smk_curacc(file->f_security, MAY_LOCK, &ad);
+ break;
case F_SETOWN:
case F_SETSIG:
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 80f4b4a45725..160aa08e3cd5 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -139,7 +139,7 @@ const char *smack_cipso_option = SMACK_CIPSO_OPTION;
* SMK_LOADLEN: Smack rule length
*/
#define SMK_OACCESS "rwxa"
-#define SMK_ACCESS "rwxat"
+#define SMK_ACCESS "rwxatl"
#define SMK_OACCESSLEN (sizeof(SMK_OACCESS) - 1)
#define SMK_ACCESSLEN (sizeof(SMK_ACCESS) - 1)
#define SMK_OLOADLEN (SMK_LABELLEN + SMK_LABELLEN + SMK_OACCESSLEN)
@@ -282,6 +282,10 @@ static int smk_perm_from_str(const char *string)
case 'T':
perm |= MAY_TRANSMUTE;
break;
+ case 'l':
+ case 'L':
+ perm |= MAY_LOCK;
+ break;
default:
return perm;
}
@@ -452,7 +456,7 @@ static ssize_t smk_write_rules_list(struct file *file, const char __user *buf,
/*
* Minor hack for backward compatibility
*/
- if (count != SMK_OLOADLEN && count != SMK_LOADLEN)
+ if (count < SMK_OLOADLEN || count > SMK_LOADLEN)
return -EINVAL;
} else {
if (count >= PAGE_SIZE) {
@@ -592,6 +596,8 @@ static void smk_rule_show(struct seq_file *s, struct smack_rule *srp, int max)
seq_putc(s, 'a');
if (srp->smk_access & MAY_TRANSMUTE)
seq_putc(s, 't');
+ if (srp->smk_access & MAY_LOCK)
+ seq_putc(s, 'l');
seq_putc(s, '\n');
}
diff --git a/sound/aoa/core/gpio-feature.c b/sound/aoa/core/gpio-feature.c
index faa317490545..f34153962d07 100644
--- a/sound/aoa/core/gpio-feature.c
+++ b/sound/aoa/core/gpio-feature.c
@@ -10,8 +10,9 @@
* registers.
*/
-#include <asm/pmac_feature.h>
+#include <linux/of_irq.h>
#include <linux/interrupt.h>
+#include <asm/pmac_feature.h>
#include "../aoa.h"
/* TODO: these are lots of global variables
diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c
index 15e76131b501..467836057ee5 100644
--- a/sound/aoa/soundbus/i2sbus/core.c
+++ b/sound/aoa/soundbus/i2sbus/core.c
@@ -11,6 +11,8 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <sound/core.h>
diff --git a/sound/arm/pxa2xx-ac97-lib.c b/sound/arm/pxa2xx-ac97-lib.c
index e6f4633b8dd5..99a466822a7d 100644
--- a/sound/arm/pxa2xx-ac97-lib.c
+++ b/sound/arm/pxa2xx-ac97-lib.c
@@ -117,8 +117,7 @@ static inline void pxa_ac97_warm_pxa25x(void)
{
gsr_bits = 0;
- GCR |= GCR_WARM_RST | GCR_PRIRDY_IEN | GCR_SECRDY_IEN;
- wait_event_timeout(gsr_wq, gsr_bits & (GSR_PCR | GSR_SCR), 1);
+ GCR |= GCR_WARM_RST;
}
static inline void pxa_ac97_cold_pxa25x(void)
@@ -129,8 +128,6 @@ static inline void pxa_ac97_cold_pxa25x(void)
gsr_bits = 0;
GCR = GCR_COLD_RST;
- GCR |= GCR_CDONE_IE|GCR_SDONE_IE;
- wait_event_timeout(gsr_wq, gsr_bits & (GSR_PCR | GSR_SCR), 1);
}
#endif
@@ -149,8 +146,6 @@ static inline void pxa_ac97_warm_pxa27x(void)
static inline void pxa_ac97_cold_pxa27x(void)
{
- unsigned int timeout;
-
GCR &= GCR_COLD_RST; /* clear everything but nCRST */
GCR &= ~GCR_COLD_RST; /* then assert nCRST */
@@ -161,29 +156,20 @@ static inline void pxa_ac97_cold_pxa27x(void)
udelay(5);
clk_disable(ac97conf_clk);
GCR = GCR_COLD_RST | GCR_WARM_RST;
- timeout = 100; /* wait for the codec-ready bit to be set */
- while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
- mdelay(1);
}
#endif
#ifdef CONFIG_PXA3xx
static inline void pxa_ac97_warm_pxa3xx(void)
{
- int timeout = 100;
-
gsr_bits = 0;
/* Can't use interrupts */
GCR |= GCR_WARM_RST;
- while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
- mdelay(1);
}
static inline void pxa_ac97_cold_pxa3xx(void)
{
- int timeout = 1000;
-
/* Hold CLKBPB for 100us */
GCR = 0;
GCR = GCR_CLKBPB;
@@ -199,14 +185,13 @@ static inline void pxa_ac97_cold_pxa3xx(void)
GCR &= ~(GCR_PRIRDY_IEN|GCR_SECRDY_IEN);
GCR = GCR_WARM_RST | GCR_COLD_RST;
- while (!(GSR & (GSR_PCR | GSR_SCR)) && timeout--)
- mdelay(10);
}
#endif
bool pxa2xx_ac97_try_warm_reset(struct snd_ac97 *ac97)
{
unsigned long gsr;
+ unsigned int timeout = 100;
#ifdef CONFIG_PXA25x
if (cpu_is_pxa25x())
@@ -224,6 +209,10 @@ bool pxa2xx_ac97_try_warm_reset(struct snd_ac97 *ac97)
else
#endif
BUG();
+
+ while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
+ mdelay(1);
+
gsr = GSR | gsr_bits;
if (!(gsr & (GSR_PCR | GSR_SCR))) {
printk(KERN_INFO "%s: warm reset timeout (GSR=%#lx)\n",
@@ -239,6 +228,7 @@ EXPORT_SYMBOL_GPL(pxa2xx_ac97_try_warm_reset);
bool pxa2xx_ac97_try_cold_reset(struct snd_ac97 *ac97)
{
unsigned long gsr;
+ unsigned int timeout = 1000;
#ifdef CONFIG_PXA25x
if (cpu_is_pxa25x())
@@ -257,6 +247,9 @@ bool pxa2xx_ac97_try_cold_reset(struct snd_ac97 *ac97)
#endif
BUG();
+ while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
+ mdelay(1);
+
gsr = GSR | gsr_bits;
if (!(gsr & (GSR_PCR | GSR_SCR))) {
printk(KERN_INFO "%s: cold reset timeout (GSR=%#lx)\n",
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index 5066a3768b28..9a2ac1e0f77a 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -185,7 +185,7 @@ static int pxa2xx_ac97_probe(struct platform_device *dev)
goto err;
card->dev = &dev->dev;
- strncpy(card->driver, dev->dev.driver->name, sizeof(card->driver));
+ strlcpy(card->driver, dev->dev.driver->name, sizeof(card->driver));
ret = pxa2xx_pcm_new(card, &pxa2xx_ac97_pcm_client, &pxa2xx_ac97_pcm);
if (ret)
diff --git a/sound/arm/pxa2xx-pcm.c b/sound/arm/pxa2xx-pcm.c
index 69a2455b4472..e6c727b317fb 100644
--- a/sound/arm/pxa2xx-pcm.c
+++ b/sound/arm/pxa2xx-pcm.c
@@ -11,6 +11,7 @@
*/
#include <linux/module.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <sound/core.h>
@@ -83,8 +84,6 @@ static struct snd_pcm_ops pxa2xx_pcm_ops = {
.mmap = pxa2xx_pcm_mmap,
};
-static u64 pxa2xx_pcm_dmamask = 0xffffffff;
-
int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client,
struct snd_pcm **rpcm)
{
@@ -100,10 +99,9 @@ int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client,
pcm->private_data = client;
pcm->private_free = pxa2xx_pcm_free_dma_buffers;
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &pxa2xx_pcm_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = 0xffffffff;
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto out;
if (play) {
int stream = SNDRV_PCM_STREAM_PLAYBACK;
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index bea523a5d852..3eb47d0006a7 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -123,6 +123,7 @@ static int snd_compr_open(struct inode *inode, struct file *f)
}
runtime->state = SNDRV_PCM_STATE_OPEN;
init_waitqueue_head(&runtime->sleep);
+ init_waitqueue_head(&runtime->wait);
data->stream.runtime = runtime;
f->private_data = (void *)data;
mutex_lock(&compr->lock);
@@ -682,12 +683,34 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
if (!retval) {
stream->runtime->state = SNDRV_PCM_STATE_SETUP;
wake_up(&stream->runtime->sleep);
+ snd_compr_drain_notify(stream);
stream->runtime->total_bytes_available = 0;
stream->runtime->total_bytes_transferred = 0;
}
return retval;
}
+static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
+{
+ /*
+ * We are called with lock held. So drop the lock while we wait for
+ * drain complete notfication from the driver
+ *
+ * It is expected that driver will notify the drain completion and then
+ * stream will be moved to SETUP state, even if draining resulted in an
+ * error. We can trigger next track after this.
+ */
+ stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
+ mutex_unlock(&stream->device->lock);
+
+ wait_event(stream->runtime->wait, stream->runtime->drain_wake);
+
+ wake_up(&stream->runtime->sleep);
+ mutex_lock(&stream->device->lock);
+
+ return 0;
+}
+
static int snd_compr_drain(struct snd_compr_stream *stream)
{
int retval;
@@ -695,11 +718,17 @@ static int snd_compr_drain(struct snd_compr_stream *stream)
if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
stream->runtime->state == SNDRV_PCM_STATE_SETUP)
return -EPERM;
+
+ stream->runtime->drain_wake = 0;
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
- if (!retval) {
- stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
+ if (retval) {
+ pr_err("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
wake_up(&stream->runtime->sleep);
+ return retval;
}
+
+ retval = snd_compress_wait_for_drain(stream);
+ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
return retval;
}
@@ -735,10 +764,16 @@ static int snd_compr_partial_drain(struct snd_compr_stream *stream)
if (stream->next_track == false)
return -EPERM;
+ stream->runtime->drain_wake = 0;
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
+ if (retval) {
+ pr_err("Partial drain returned failure\n");
+ wake_up(&stream->runtime->sleep);
+ return retval;
+ }
stream->next_track = false;
- return retval;
+ return snd_compress_wait_for_drain(stream);
}
static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
diff --git a/sound/core/init.c b/sound/core/init.c
index 6b9087115da2..1351f22f651c 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -66,7 +66,7 @@ static int module_slot_match(struct module *module, int idx)
#ifdef MODULE
const char *s1, *s2;
- if (!module || !module->name || !slots[idx])
+ if (!module || !*module->name || !slots[idx])
return 0;
s1 = module->name;
@@ -597,7 +597,7 @@ static void snd_card_set_id_no_lock(struct snd_card *card, const char *src,
/* last resort... */
snd_printk(KERN_ERR "unable to set card id (%s)\n", id);
if (card->proc_root->name)
- strcpy(card->id, card->proc_root->name);
+ strlcpy(card->id, card->proc_root->name, sizeof(card->id));
}
/**
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index bdf826f4fe0c..9d93f02c6285 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -30,6 +30,7 @@
#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include <linux/dma-mapping.h>
+#include <linux/genalloc.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <sound/memalloc.h>
@@ -157,6 +158,51 @@ static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr,
dec_snd_pages(pg);
dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma);
}
+
+#ifdef CONFIG_GENERIC_ALLOCATOR
+/**
+ * snd_malloc_dev_iram - allocate memory from on-chip internal ram
+ * @dmab: buffer allocation record to store the allocated data
+ * @size: number of bytes to allocate from the iram
+ *
+ * This function requires iram phandle provided via of_node
+ */
+static void snd_malloc_dev_iram(struct snd_dma_buffer *dmab, size_t size)
+{
+ struct device *dev = dmab->dev.dev;
+ struct gen_pool *pool = NULL;
+
+ dmab->area = NULL;
+ dmab->addr = 0;
+
+ if (dev->of_node)
+ pool = of_get_named_gen_pool(dev->of_node, "iram", 0);
+
+ if (!pool)
+ return;
+
+ /* Assign the pool into private_data field */
+ dmab->private_data = pool;
+
+ dmab->area = (void *)gen_pool_alloc(pool, size);
+ if (!dmab->area)
+ return;
+
+ dmab->addr = gen_pool_virt_to_phys(pool, (unsigned long)dmab->area);
+}
+
+/**
+ * snd_free_dev_iram - free allocated specific memory from on-chip internal ram
+ * @dmab: buffer allocation record to store the allocated data
+ */
+static void snd_free_dev_iram(struct snd_dma_buffer *dmab)
+{
+ struct gen_pool *pool = dmab->private_data;
+
+ if (pool && dmab->area)
+ gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
+}
+#endif /* CONFIG_GENERIC_ALLOCATOR */
#endif /* CONFIG_HAS_DMA */
/*
@@ -197,6 +243,16 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
dmab->addr = 0;
break;
#ifdef CONFIG_HAS_DMA
+#ifdef CONFIG_GENERIC_ALLOCATOR
+ case SNDRV_DMA_TYPE_DEV_IRAM:
+ snd_malloc_dev_iram(dmab, size);
+ if (dmab->area)
+ break;
+ /* Internal memory might have limited size and no enough space,
+ * so if we fail to malloc, try to fetch memory traditionally.
+ */
+ dmab->dev.type = SNDRV_DMA_TYPE_DEV;
+#endif /* CONFIG_GENERIC_ALLOCATOR */
case SNDRV_DMA_TYPE_DEV:
dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr);
break;
@@ -269,6 +325,11 @@ void snd_dma_free_pages(struct snd_dma_buffer *dmab)
snd_free_pages(dmab->area, dmab->bytes);
break;
#ifdef CONFIG_HAS_DMA
+#ifdef CONFIG_GENERIC_ALLOCATOR
+ case SNDRV_DMA_TYPE_DEV_IRAM:
+ snd_free_dev_iram(dmab);
+ break;
+#endif /* CONFIG_GENERIC_ALLOCATOR */
case SNDRV_DMA_TYPE_DEV:
snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
break;
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 17f45e8aa89c..e1e9e0c999fe 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -49,6 +49,8 @@ static struct snd_pcm *snd_pcm_get(struct snd_card *card, int device)
struct snd_pcm *pcm;
list_for_each_entry(pcm, &snd_pcm_devices, list) {
+ if (pcm->internal)
+ continue;
if (pcm->card == card && pcm->device == device)
return pcm;
}
@@ -60,6 +62,8 @@ static int snd_pcm_next(struct snd_card *card, int device)
struct snd_pcm *pcm;
list_for_each_entry(pcm, &snd_pcm_devices, list) {
+ if (pcm->internal)
+ continue;
if (pcm->card == card && pcm->device > device)
return pcm->device;
else if (pcm->card->number > card->number)
diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c
index aa924d9b7986..94d08733cb38 100644
--- a/sound/core/pcm_dmaengine.c
+++ b/sound/core/pcm_dmaengine.c
@@ -63,23 +63,19 @@ int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream,
struct dma_slave_config *slave_config)
{
enum dma_slave_buswidth buswidth;
+ int bits;
- switch (params_format(params)) {
- case SNDRV_PCM_FORMAT_S8:
+ bits = snd_pcm_format_physical_width(params_format(params));
+ if (bits < 8 || bits > 64)
+ return -EINVAL;
+ else if (bits == 8)
buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
- break;
- case SNDRV_PCM_FORMAT_S16_LE:
+ else if (bits == 16)
buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
- break;
- case SNDRV_PCM_FORMAT_S18_3LE:
- case SNDRV_PCM_FORMAT_S20_3LE:
- case SNDRV_PCM_FORMAT_S24_LE:
- case SNDRV_PCM_FORMAT_S32_LE:
+ else if (bits <= 32)
buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
- break;
- default:
- return -EINVAL;
- }
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
slave_config->direction = DMA_MEM_TO_DEV;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index a68d4c6d702c..01a5e05ede95 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2428,6 +2428,7 @@ static int snd_pcm_hwsync(struct snd_pcm_substream *substream)
case SNDRV_PCM_STATE_DRAINING:
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
goto __badfd;
+ /* Fall through */
case SNDRV_PCM_STATE_RUNNING:
if ((err = snd_pcm_update_hw_ptr(substream)) < 0)
break;
@@ -2460,6 +2461,7 @@ static int snd_pcm_delay(struct snd_pcm_substream *substream,
case SNDRV_PCM_STATE_DRAINING:
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
goto __badfd;
+ /* Fall through */
case SNDRV_PCM_STATE_RUNNING:
if ((err = snd_pcm_update_hw_ptr(substream)) < 0)
break;
@@ -3199,6 +3201,14 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *area)
{
area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+#ifdef CONFIG_GENERIC_ALLOCATOR
+ if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_IRAM) {
+ area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
+ return remap_pfn_range(area, area->vm_start,
+ substream->dma_buffer.addr >> PAGE_SHIFT,
+ area->vm_end - area->vm_start, area->vm_page_prot);
+ }
+#endif /* CONFIG_GENERIC_ALLOCATOR */
#ifdef ARCH_HAS_DMA_MMAP_COHERENT
if (!substream->ops->page &&
substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
diff --git a/sound/drivers/opl3/opl3_midi.c b/sound/drivers/opl3/opl3_midi.c
index 0c796bcbc0a3..6c6d09a51f42 100644
--- a/sound/drivers/opl3/opl3_midi.c
+++ b/sound/drivers/opl3/opl3_midi.c
@@ -390,6 +390,11 @@ void snd_opl3_note_on(void *p, int note, int vel, struct snd_midi_channel *chan)
voice = snd_opl3_oss_map[chan->number];
}
+ if (voice < 0) {
+ spin_unlock_irqrestore(&opl3->voice_lock, flags);
+ return;
+ }
+
if (voice < MAX_OPL2_VOICES) {
/* Left register block for voices 0 .. 8 */
reg_side = OPL3_LEFT;
diff --git a/sound/drivers/pcsp/pcsp.c b/sound/drivers/pcsp/pcsp.c
index 1c19cd7ad26e..f664bae3b9b0 100644
--- a/sound/drivers/pcsp/pcsp.c
+++ b/sound/drivers/pcsp/pcsp.c
@@ -46,8 +46,9 @@ static int snd_pcsp_create(struct snd_card *card)
int err;
int div, min_div, order;
+ hrtimer_get_res(CLOCK_MONOTONIC, &tp);
+
if (!nopcm) {
- hrtimer_get_res(CLOCK_MONOTONIC, &tp);
if (tp.tv_sec || tp.tv_nsec > PCSP_MAX_PERIOD_NS) {
printk(KERN_ERR "PCSP: Timer resolution is not sufficient "
"(%linS)\n", tp.tv_nsec);
diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
index ea063e1f8722..b3e274fe4a77 100644
--- a/sound/firewire/Kconfig
+++ b/sound/firewire/Kconfig
@@ -11,6 +11,21 @@ config SND_FIREWIRE_LIB
tristate
depends on SND_PCM
+config SND_DICE
+ tristate "DICE-based DACs (EXPERIMENTAL)"
+ select SND_HWDEP
+ select SND_PCM
+ select SND_FIREWIRE_LIB
+ help
+ Say Y here to include support for many DACs based on the DICE
+ chip family (DICE-II/Jr/Mini) from TC Applied Technologies.
+
+ At the moment, this driver supports playback only. If you
+ want to use devices that support capturing, use FFADO instead.
+
+ To compile this driver as a module, choose M here: the module
+ will be called snd-dice.
+
config SND_FIREWIRE_SPEAKERS
tristate "FireWire speakers"
select SND_PCM
diff --git a/sound/firewire/Makefile b/sound/firewire/Makefile
index 460179df5bb5..509955061d30 100644
--- a/sound/firewire/Makefile
+++ b/sound/firewire/Makefile
@@ -1,10 +1,12 @@
snd-firewire-lib-objs := lib.o iso-resources.o packets-buffer.o \
fcp.o cmp.o amdtp.o
+snd-dice-objs := dice.o
snd-firewire-speakers-objs := speakers.o
snd-isight-objs := isight.o
snd-scs1x-objs := scs1x.o
obj-$(CONFIG_SND_FIREWIRE_LIB) += snd-firewire-lib.o
+obj-$(CONFIG_SND_DICE) += snd-dice.o
obj-$(CONFIG_SND_FIREWIRE_SPEAKERS) += snd-firewire-speakers.o
obj-$(CONFIG_SND_ISIGHT) += snd-isight.o
obj-$(CONFIG_SND_SCS1X) += snd-scs1x.o
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
index ea995af6d049..d3226892ad6b 100644
--- a/sound/firewire/amdtp.c
+++ b/sound/firewire/amdtp.c
@@ -42,9 +42,6 @@ static void pcm_period_tasklet(unsigned long data);
int amdtp_out_stream_init(struct amdtp_out_stream *s, struct fw_unit *unit,
enum cip_out_flags flags)
{
- if (flags != CIP_NONBLOCKING)
- return -EINVAL;
-
s->unit = fw_unit_get(unit);
s->flags = flags;
s->context = ERR_PTR(-1);
@@ -62,73 +59,91 @@ EXPORT_SYMBOL(amdtp_out_stream_init);
*/
void amdtp_out_stream_destroy(struct amdtp_out_stream *s)
{
- WARN_ON(!IS_ERR(s->context));
+ WARN_ON(amdtp_out_stream_running(s));
mutex_destroy(&s->mutex);
fw_unit_put(s->unit);
}
EXPORT_SYMBOL(amdtp_out_stream_destroy);
+const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = {
+ [CIP_SFC_32000] = 8,
+ [CIP_SFC_44100] = 8,
+ [CIP_SFC_48000] = 8,
+ [CIP_SFC_88200] = 16,
+ [CIP_SFC_96000] = 16,
+ [CIP_SFC_176400] = 32,
+ [CIP_SFC_192000] = 32,
+};
+EXPORT_SYMBOL(amdtp_syt_intervals);
+
/**
- * amdtp_out_stream_set_rate - set the sample rate
+ * amdtp_out_stream_set_parameters - set stream parameters
* @s: the AMDTP output stream to configure
* @rate: the sample rate
+ * @pcm_channels: the number of PCM samples in each data block, to be encoded
+ * as AM824 multi-bit linear audio
+ * @midi_ports: the number of MIDI ports (i.e., MPX-MIDI Data Channels)
*
- * The sample rate must be set before the stream is started, and must not be
+ * The parameters must be set before the stream is started, and must not be
* changed while the stream is running.
*/
-void amdtp_out_stream_set_rate(struct amdtp_out_stream *s, unsigned int rate)
+void amdtp_out_stream_set_parameters(struct amdtp_out_stream *s,
+ unsigned int rate,
+ unsigned int pcm_channels,
+ unsigned int midi_ports)
{
- static const struct {
- unsigned int rate;
- unsigned int syt_interval;
- } rate_info[] = {
- [CIP_SFC_32000] = { 32000, 8, },
- [CIP_SFC_44100] = { 44100, 8, },
- [CIP_SFC_48000] = { 48000, 8, },
- [CIP_SFC_88200] = { 88200, 16, },
- [CIP_SFC_96000] = { 96000, 16, },
- [CIP_SFC_176400] = { 176400, 32, },
- [CIP_SFC_192000] = { 192000, 32, },
+ static const unsigned int rates[] = {
+ [CIP_SFC_32000] = 32000,
+ [CIP_SFC_44100] = 44100,
+ [CIP_SFC_48000] = 48000,
+ [CIP_SFC_88200] = 88200,
+ [CIP_SFC_96000] = 96000,
+ [CIP_SFC_176400] = 176400,
+ [CIP_SFC_192000] = 192000,
};
unsigned int sfc;
- if (WARN_ON(!IS_ERR(s->context)))
+ if (WARN_ON(amdtp_out_stream_running(s)))
return;
- for (sfc = 0; sfc < ARRAY_SIZE(rate_info); ++sfc)
- if (rate_info[sfc].rate == rate) {
- s->sfc = sfc;
- s->syt_interval = rate_info[sfc].syt_interval;
- return;
- }
+ for (sfc = 0; sfc < CIP_SFC_COUNT; ++sfc)
+ if (rates[sfc] == rate)
+ goto sfc_found;
WARN_ON(1);
+ return;
+
+sfc_found:
+ s->dual_wire = (s->flags & CIP_HI_DUALWIRE) && sfc > CIP_SFC_96000;
+ if (s->dual_wire) {
+ sfc -= 2;
+ rate /= 2;
+ pcm_channels *= 2;
+ }
+ s->sfc = sfc;
+ s->data_block_quadlets = pcm_channels + DIV_ROUND_UP(midi_ports, 8);
+ s->pcm_channels = pcm_channels;
+ s->midi_ports = midi_ports;
+
+ s->syt_interval = amdtp_syt_intervals[sfc];
+
+ /* default buffering in the device */
+ s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
+ if (s->flags & CIP_BLOCKING)
+ /* additional buffering needed to adjust for no-data packets */
+ s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate;
}
-EXPORT_SYMBOL(amdtp_out_stream_set_rate);
+EXPORT_SYMBOL(amdtp_out_stream_set_parameters);
/**
* amdtp_out_stream_get_max_payload - get the stream's packet size
* @s: the AMDTP output stream
*
* This function must not be called before the stream has been configured
- * with amdtp_out_stream_set_hw_params(), amdtp_out_stream_set_pcm(), and
- * amdtp_out_stream_set_midi().
+ * with amdtp_out_stream_set_parameters().
*/
unsigned int amdtp_out_stream_get_max_payload(struct amdtp_out_stream *s)
{
- static const unsigned int max_data_blocks[] = {
- [CIP_SFC_32000] = 4,
- [CIP_SFC_44100] = 6,
- [CIP_SFC_48000] = 6,
- [CIP_SFC_88200] = 12,
- [CIP_SFC_96000] = 12,
- [CIP_SFC_176400] = 23,
- [CIP_SFC_192000] = 24,
- };
-
- s->data_block_quadlets = s->pcm_channels;
- s->data_block_quadlets += DIV_ROUND_UP(s->midi_ports, 8);
-
- return 8 + max_data_blocks[s->sfc] * 4 * s->data_block_quadlets;
+ return 8 + s->syt_interval * s->data_block_quadlets * 4;
}
EXPORT_SYMBOL(amdtp_out_stream_get_max_payload);
@@ -138,19 +153,26 @@ static void amdtp_write_s16(struct amdtp_out_stream *s,
static void amdtp_write_s32(struct amdtp_out_stream *s,
struct snd_pcm_substream *pcm,
__be32 *buffer, unsigned int frames);
+static void amdtp_write_s16_dualwire(struct amdtp_out_stream *s,
+ struct snd_pcm_substream *pcm,
+ __be32 *buffer, unsigned int frames);
+static void amdtp_write_s32_dualwire(struct amdtp_out_stream *s,
+ struct snd_pcm_substream *pcm,
+ __be32 *buffer, unsigned int frames);
/**
* amdtp_out_stream_set_pcm_format - set the PCM format
* @s: the AMDTP output stream to configure
* @format: the format of the ALSA PCM device
*
- * The sample format must be set before the stream is started, and must not be
- * changed while the stream is running.
+ * The sample format must be set after the other paramters (rate/PCM channels/
+ * MIDI) and before the stream is started, and must not be changed while the
+ * stream is running.
*/
void amdtp_out_stream_set_pcm_format(struct amdtp_out_stream *s,
snd_pcm_format_t format)
{
- if (WARN_ON(!IS_ERR(s->context)))
+ if (WARN_ON(amdtp_out_stream_running(s)))
return;
switch (format) {
@@ -158,10 +180,16 @@ void amdtp_out_stream_set_pcm_format(struct amdtp_out_stream *s,
WARN_ON(1);
/* fall through */
case SNDRV_PCM_FORMAT_S16:
- s->transfer_samples = amdtp_write_s16;
+ if (s->dual_wire)
+ s->transfer_samples = amdtp_write_s16_dualwire;
+ else
+ s->transfer_samples = amdtp_write_s16;
break;
case SNDRV_PCM_FORMAT_S32:
- s->transfer_samples = amdtp_write_s32;
+ if (s->dual_wire)
+ s->transfer_samples = amdtp_write_s32_dualwire;
+ else
+ s->transfer_samples = amdtp_write_s32;
break;
}
}
@@ -248,7 +276,7 @@ static unsigned int calculate_syt(struct amdtp_out_stream *s,
s->last_syt_offset = syt_offset;
if (syt_offset < TICKS_PER_CYCLE) {
- syt_offset += TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
+ syt_offset += s->transfer_delay;
syt = (cycle + syt_offset / TICKS_PER_CYCLE) << 12;
syt += syt_offset % TICKS_PER_CYCLE;
@@ -268,7 +296,7 @@ static void amdtp_write_s32(struct amdtp_out_stream *s,
channels = s->pcm_channels;
src = (void *)runtime->dma_area +
- s->pcm_buffer_pointer * (runtime->frame_bits / 8);
+ frames_to_bytes(runtime, s->pcm_buffer_pointer);
remaining_frames = runtime->buffer_size - s->pcm_buffer_pointer;
frame_step = s->data_block_quadlets - channels;
@@ -294,7 +322,7 @@ static void amdtp_write_s16(struct amdtp_out_stream *s,
channels = s->pcm_channels;
src = (void *)runtime->dma_area +
- s->pcm_buffer_pointer * (runtime->frame_bits / 8);
+ frames_to_bytes(runtime, s->pcm_buffer_pointer);
remaining_frames = runtime->buffer_size - s->pcm_buffer_pointer;
frame_step = s->data_block_quadlets - channels;
@@ -310,6 +338,68 @@ static void amdtp_write_s16(struct amdtp_out_stream *s,
}
}
+static void amdtp_write_s32_dualwire(struct amdtp_out_stream *s,
+ struct snd_pcm_substream *pcm,
+ __be32 *buffer, unsigned int frames)
+{
+ struct snd_pcm_runtime *runtime = pcm->runtime;
+ unsigned int channels, frame_adjust_1, frame_adjust_2, i, c;
+ const u32 *src;
+
+ channels = s->pcm_channels;
+ src = (void *)runtime->dma_area +
+ s->pcm_buffer_pointer * (runtime->frame_bits / 8);
+ frame_adjust_1 = channels - 1;
+ frame_adjust_2 = 1 - (s->data_block_quadlets - channels);
+
+ channels /= 2;
+ for (i = 0; i < frames; ++i) {
+ for (c = 0; c < channels; ++c) {
+ *buffer = cpu_to_be32((*src >> 8) | 0x40000000);
+ src++;
+ buffer += 2;
+ }
+ buffer -= frame_adjust_1;
+ for (c = 0; c < channels; ++c) {
+ *buffer = cpu_to_be32((*src >> 8) | 0x40000000);
+ src++;
+ buffer += 2;
+ }
+ buffer -= frame_adjust_2;
+ }
+}
+
+static void amdtp_write_s16_dualwire(struct amdtp_out_stream *s,
+ struct snd_pcm_substream *pcm,
+ __be32 *buffer, unsigned int frames)
+{
+ struct snd_pcm_runtime *runtime = pcm->runtime;
+ unsigned int channels, frame_adjust_1, frame_adjust_2, i, c;
+ const u16 *src;
+
+ channels = s->pcm_channels;
+ src = (void *)runtime->dma_area +
+ s->pcm_buffer_pointer * (runtime->frame_bits / 8);
+ frame_adjust_1 = channels - 1;
+ frame_adjust_2 = 1 - (s->data_block_quadlets - channels);
+
+ channels /= 2;
+ for (i = 0; i < frames; ++i) {
+ for (c = 0; c < channels; ++c) {
+ *buffer = cpu_to_be32((*src << 8) | 0x40000000);
+ src++;
+ buffer += 2;
+ }
+ buffer -= frame_adjust_1;
+ for (c = 0; c < channels; ++c) {
+ *buffer = cpu_to_be32((*src << 8) | 0x40000000);
+ src++;
+ buffer += 2;
+ }
+ buffer -= frame_adjust_2;
+ }
+}
+
static void amdtp_fill_pcm_silence(struct amdtp_out_stream *s,
__be32 *buffer, unsigned int frames)
{
@@ -344,8 +434,17 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
return;
index = s->packet_index;
- data_blocks = calculate_data_blocks(s);
syt = calculate_syt(s, cycle);
+ if (!(s->flags & CIP_BLOCKING)) {
+ data_blocks = calculate_data_blocks(s);
+ } else {
+ if (syt != 0xffff) {
+ data_blocks = s->syt_interval;
+ } else {
+ data_blocks = 0;
+ syt = 0xffffff;
+ }
+ }
buffer = s->buffer.packets[index].buffer;
buffer[0] = cpu_to_be32(ACCESS_ONCE(s->source_node_id_field) |
@@ -386,6 +485,9 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
s->packet_index = index;
if (pcm) {
+ if (s->dual_wire)
+ data_blocks *= 2;
+
ptr = s->pcm_buffer_pointer + data_blocks;
if (ptr >= pcm->runtime->buffer_size)
ptr -= pcm->runtime->buffer_size;
@@ -455,9 +557,8 @@ static int queue_initial_skip_packets(struct amdtp_out_stream *s)
* @speed: firewire speed code
*
* The stream cannot be started until it has been configured with
- * amdtp_out_stream_set_hw_params(), amdtp_out_stream_set_pcm(), and
- * amdtp_out_stream_set_midi(); and it must be started before any
- * PCM or MIDI device can be started.
+ * amdtp_out_stream_set_parameters() and amdtp_out_stream_set_pcm_format(),
+ * and it must be started before any PCM or MIDI device can be started.
*/
int amdtp_out_stream_start(struct amdtp_out_stream *s, int channel, int speed)
{
@@ -477,7 +578,7 @@ int amdtp_out_stream_start(struct amdtp_out_stream *s, int channel, int speed)
mutex_lock(&s->mutex);
- if (WARN_ON(!IS_ERR(s->context) ||
+ if (WARN_ON(amdtp_out_stream_running(s) ||
(!s->pcm_channels && !s->midi_ports))) {
err = -EBADFD;
goto err_unlock;
@@ -573,7 +674,7 @@ void amdtp_out_stream_stop(struct amdtp_out_stream *s)
{
mutex_lock(&s->mutex);
- if (IS_ERR(s->context)) {
+ if (!amdtp_out_stream_running(s)) {
mutex_unlock(&s->mutex);
return;
}
diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
index f6103d68c4b1..839ebf812d79 100644
--- a/sound/firewire/amdtp.h
+++ b/sound/firewire/amdtp.h
@@ -1,6 +1,7 @@
#ifndef SOUND_FIREWIRE_AMDTP_H_INCLUDED
#define SOUND_FIREWIRE_AMDTP_H_INCLUDED
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include "packets-buffer.h"
@@ -11,9 +12,18 @@
* sample_rate/8000 samples, with rounding up or down to adjust
* for clock skew and left-over fractional samples. This should
* be used if supported by the device.
+ * @CIP_BLOCKING: In blocking mode, each packet contains either zero or
+ * SYT_INTERVAL samples, with these two types alternating so that
+ * the overall sample rate comes out right.
+ * @CIP_HI_DUALWIRE: At rates above 96 kHz, pretend that the stream runs
+ * at half the actual sample rate with twice the number of channels;
+ * two samples of a channel are stored consecutively in the packet.
+ * Requires blocking mode and SYT_INTERVAL-aligned PCM buffer size.
*/
enum cip_out_flags {
- CIP_NONBLOCKING = 0,
+ CIP_NONBLOCKING = 0x00,
+ CIP_BLOCKING = 0x01,
+ CIP_HI_DUALWIRE = 0x02,
};
/**
@@ -27,6 +37,7 @@ enum cip_sfc {
CIP_SFC_96000 = 4,
CIP_SFC_176400 = 5,
CIP_SFC_192000 = 6,
+ CIP_SFC_COUNT
};
#define AMDTP_OUT_PCM_FORMAT_BITS (SNDRV_PCM_FMTBIT_S16 | \
@@ -43,6 +54,7 @@ struct amdtp_out_stream {
struct mutex mutex;
enum cip_sfc sfc;
+ bool dual_wire;
unsigned int data_block_quadlets;
unsigned int pcm_channels;
unsigned int midi_ports;
@@ -51,6 +63,7 @@ struct amdtp_out_stream {
__be32 *buffer, unsigned int frames);
unsigned int syt_interval;
+ unsigned int transfer_delay;
unsigned int source_node_id_field;
struct iso_packets_buffer buffer;
@@ -74,7 +87,10 @@ int amdtp_out_stream_init(struct amdtp_out_stream *s, struct fw_unit *unit,
enum cip_out_flags flags);
void amdtp_out_stream_destroy(struct amdtp_out_stream *s);
-void amdtp_out_stream_set_rate(struct amdtp_out_stream *s, unsigned int rate);
+void amdtp_out_stream_set_parameters(struct amdtp_out_stream *s,
+ unsigned int rate,
+ unsigned int pcm_channels,
+ unsigned int midi_ports);
unsigned int amdtp_out_stream_get_max_payload(struct amdtp_out_stream *s);
int amdtp_out_stream_start(struct amdtp_out_stream *s, int channel, int speed);
@@ -87,31 +103,11 @@ void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s);
unsigned long amdtp_out_stream_pcm_pointer(struct amdtp_out_stream *s);
void amdtp_out_stream_pcm_abort(struct amdtp_out_stream *s);
-/**
- * amdtp_out_stream_set_pcm - configure format of PCM samples
- * @s: the AMDTP output stream to be configured
- * @pcm_channels: the number of PCM samples in each data block, to be encoded
- * as AM824 multi-bit linear audio
- *
- * This function must not be called while the stream is running.
- */
-static inline void amdtp_out_stream_set_pcm(struct amdtp_out_stream *s,
- unsigned int pcm_channels)
-{
- s->pcm_channels = pcm_channels;
-}
+extern const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT];
-/**
- * amdtp_out_stream_set_midi - configure format of MIDI data
- * @s: the AMDTP output stream to be configured
- * @midi_ports: the number of MIDI ports (i.e., MPX-MIDI Data Channels)
- *
- * This function must not be called while the stream is running.
- */
-static inline void amdtp_out_stream_set_midi(struct amdtp_out_stream *s,
- unsigned int midi_ports)
+static inline bool amdtp_out_stream_running(struct amdtp_out_stream *s)
{
- s->midi_ports = midi_ports;
+ return !IS_ERR(s->context);
}
/**
diff --git a/sound/firewire/cmp.c b/sound/firewire/cmp.c
index 645cb0ba4293..efdbf585e404 100644
--- a/sound/firewire/cmp.c
+++ b/sound/firewire/cmp.c
@@ -48,9 +48,6 @@ static int pcr_modify(struct cmp_connection *c,
int (*check)(struct cmp_connection *c, __be32 pcr),
enum bus_reset_handling bus_reset_handling)
{
- struct fw_device *device = fw_parent_device(c->resources.unit);
- int generation = c->resources.generation;
- int rcode, errors = 0;
__be32 old_arg, buffer[2];
int err;
@@ -59,36 +56,31 @@ static int pcr_modify(struct cmp_connection *c,
old_arg = buffer[0];
buffer[1] = modify(c, buffer[0]);
- rcode = fw_run_transaction(
- device->card, TCODE_LOCK_COMPARE_SWAP,
- device->node_id, generation, device->max_speed,
+ err = snd_fw_transaction(
+ c->resources.unit, TCODE_LOCK_COMPARE_SWAP,
CSR_REGISTER_BASE + CSR_IPCR(c->pcr_index),
- buffer, 8);
-
- if (rcode == RCODE_COMPLETE) {
- if (buffer[0] == old_arg) /* success? */
- break;
-
- if (check) {
- err = check(c, buffer[0]);
- if (err < 0)
- return err;
- }
- } else if (rcode == RCODE_GENERATION)
- goto bus_reset;
- else if (rcode_is_permanent_error(rcode) || ++errors >= 3)
- goto io_error;
+ buffer, 8,
+ FW_FIXED_GENERATION | c->resources.generation);
+
+ if (err < 0) {
+ if (err == -EAGAIN &&
+ bus_reset_handling == SUCCEED_ON_BUS_RESET)
+ err = 0;
+ return err;
+ }
+
+ if (buffer[0] == old_arg) /* success? */
+ break;
+
+ if (check) {
+ err = check(c, buffer[0]);
+ if (err < 0)
+ return err;
+ }
}
c->last_pcr_value = buffer[1];
return 0;
-
-io_error:
- cmp_error(c, "transaction failed: %s\n", fw_rcode_string(rcode));
- return -EIO;
-
-bus_reset:
- return bus_reset_handling == ABORT_ON_BUS_RESET ? -EAGAIN : 0;
}
@@ -108,7 +100,7 @@ int cmp_connection_init(struct cmp_connection *c,
err = snd_fw_transaction(unit, TCODE_READ_QUADLET_REQUEST,
CSR_REGISTER_BASE + CSR_IMPR,
- &impr_be, 4);
+ &impr_be, 4, 0);
if (err < 0)
return err;
impr = be32_to_cpu(impr_be);
diff --git a/sound/firewire/dice-interface.h b/sound/firewire/dice-interface.h
new file mode 100644
index 000000000000..27b044f84c81
--- /dev/null
+++ b/sound/firewire/dice-interface.h
@@ -0,0 +1,371 @@
+#ifndef SOUND_FIREWIRE_DICE_INTERFACE_H_INCLUDED
+#define SOUND_FIREWIRE_DICE_INTERFACE_H_INCLUDED
+
+/*
+ * DICE device interface definitions
+ */
+
+/*
+ * Generally, all registers can be read like memory, i.e., with quadlet read or
+ * block read transactions with at least quadlet-aligned offset and length.
+ * Writes are not allowed except where noted; quadlet-sized registers must be
+ * written with a quadlet write transaction.
+ *
+ * All values are in big endian. The DICE firmware runs on a little-endian CPU
+ * and just byte-swaps _all_ quadlets on the bus, so values without endianness
+ * (e.g. strings) get scrambled and must be byte-swapped again by the driver.
+ */
+
+/*
+ * Streaming is handled by the "DICE driver" interface. Its registers are
+ * located in this private address space.
+ */
+#define DICE_PRIVATE_SPACE 0xffffe0000000uLL
+
+/*
+ * The registers are organized in several sections, which are organized
+ * separately to allow them to be extended individually. Whether a register is
+ * supported can be detected by checking its offset against its section's size.
+ *
+ * The section offset values are relative to DICE_PRIVATE_SPACE; the offset/
+ * size values are measured in quadlets. Read-only.
+ */
+#define DICE_GLOBAL_OFFSET 0x00
+#define DICE_GLOBAL_SIZE 0x04
+#define DICE_TX_OFFSET 0x08
+#define DICE_TX_SIZE 0x0c
+#define DICE_RX_OFFSET 0x10
+#define DICE_RX_SIZE 0x14
+#define DICE_EXT_SYNC_OFFSET 0x18
+#define DICE_EXT_SYNC_SIZE 0x1c
+#define DICE_UNUSED2_OFFSET 0x20
+#define DICE_UNUSED2_SIZE 0x24
+
+/*
+ * Global settings.
+ */
+
+/*
+ * Stores the full 64-bit address (node ID and offset in the node's address
+ * space) where the device will send notifications. Must be changed with
+ * a compare/swap transaction by the owner. This register is automatically
+ * cleared on a bus reset.
+ */
+#define GLOBAL_OWNER 0x000
+#define OWNER_NO_OWNER 0xffff000000000000uLL
+#define OWNER_NODE_SHIFT 48
+
+/*
+ * A bitmask with asynchronous events; read-only. When any event(s) happen,
+ * the bits of previous events are cleared, and the value of this register is
+ * also written to the address stored in the owner register.
+ */
+#define GLOBAL_NOTIFICATION 0x008
+/* Some registers in the Rx/Tx sections may have changed. */
+#define NOTIFY_RX_CFG_CHG 0x00000001
+#define NOTIFY_TX_CFG_CHG 0x00000002
+/* Lock status of the current clock source may have changed. */
+#define NOTIFY_LOCK_CHG 0x00000010
+/* Write to the clock select register has been finished. */
+#define NOTIFY_CLOCK_ACCEPTED 0x00000020
+/* Lock status of some clock source has changed. */
+#define NOTIFY_EXT_STATUS 0x00000040
+/* Other bits may be used for device-specific events. */
+
+/*
+ * A name that can be customized for each device; read/write. Padded with zero
+ * bytes. Quadlets are byte-swapped. The encoding is whatever the host driver
+ * happens to be using.
+ */
+#define GLOBAL_NICK_NAME 0x00c
+#define NICK_NAME_SIZE 64
+
+/*
+ * The current sample rate and clock source; read/write. Whether a clock
+ * source or sample rate is supported is device-specific; the internal clock
+ * source is always available. Low/mid/high = up to 48/96/192 kHz. This
+ * register can be changed even while streams are running.
+ */
+#define GLOBAL_CLOCK_SELECT 0x04c
+#define CLOCK_SOURCE_MASK 0x000000ff
+#define CLOCK_SOURCE_AES1 0x00000000
+#define CLOCK_SOURCE_AES2 0x00000001
+#define CLOCK_SOURCE_AES3 0x00000002
+#define CLOCK_SOURCE_AES4 0x00000003
+#define CLOCK_SOURCE_AES_ANY 0x00000004
+#define CLOCK_SOURCE_ADAT 0x00000005
+#define CLOCK_SOURCE_TDIF 0x00000006
+#define CLOCK_SOURCE_WC 0x00000007
+#define CLOCK_SOURCE_ARX1 0x00000008
+#define CLOCK_SOURCE_ARX2 0x00000009
+#define CLOCK_SOURCE_ARX3 0x0000000a
+#define CLOCK_SOURCE_ARX4 0x0000000b
+#define CLOCK_SOURCE_INTERNAL 0x0000000c
+#define CLOCK_RATE_MASK 0x0000ff00
+#define CLOCK_RATE_32000 0x00000000
+#define CLOCK_RATE_44100 0x00000100
+#define CLOCK_RATE_48000 0x00000200
+#define CLOCK_RATE_88200 0x00000300
+#define CLOCK_RATE_96000 0x00000400
+#define CLOCK_RATE_176400 0x00000500
+#define CLOCK_RATE_192000 0x00000600
+#define CLOCK_RATE_ANY_LOW 0x00000700
+#define CLOCK_RATE_ANY_MID 0x00000800
+#define CLOCK_RATE_ANY_HIGH 0x00000900
+#define CLOCK_RATE_NONE 0x00000a00
+#define CLOCK_RATE_SHIFT 8
+
+/*
+ * Enable streaming; read/write. Writing a non-zero value (re)starts all
+ * streams that have a valid iso channel set; zero stops all streams. The
+ * streams' parameters must be configured before starting. This register is
+ * automatically cleared on a bus reset.
+ */
+#define GLOBAL_ENABLE 0x050
+
+/*
+ * Status of the sample clock; read-only.
+ */
+#define GLOBAL_STATUS 0x054
+/* The current clock source is locked. */
+#define STATUS_SOURCE_LOCKED 0x00000001
+/* The actual sample rate; CLOCK_RATE_32000-_192000 or _NONE. */
+#define STATUS_NOMINAL_RATE_MASK 0x0000ff00
+
+/*
+ * Status of all clock sources; read-only.
+ */
+#define GLOBAL_EXTENDED_STATUS 0x058
+/*
+ * The _LOCKED bits always show the current status; any change generates
+ * a notification.
+ */
+#define EXT_STATUS_AES1_LOCKED 0x00000001
+#define EXT_STATUS_AES2_LOCKED 0x00000002
+#define EXT_STATUS_AES3_LOCKED 0x00000004
+#define EXT_STATUS_AES4_LOCKED 0x00000008
+#define EXT_STATUS_ADAT_LOCKED 0x00000010
+#define EXT_STATUS_TDIF_LOCKED 0x00000020
+#define EXT_STATUS_ARX1_LOCKED 0x00000040
+#define EXT_STATUS_ARX2_LOCKED 0x00000080
+#define EXT_STATUS_ARX3_LOCKED 0x00000100
+#define EXT_STATUS_ARX4_LOCKED 0x00000200
+#define EXT_STATUS_WC_LOCKED 0x00000400
+/*
+ * The _SLIP bits do not generate notifications; a set bit indicates that an
+ * error occurred since the last time when this register was read with
+ * a quadlet read transaction.
+ */
+#define EXT_STATUS_AES1_SLIP 0x00010000
+#define EXT_STATUS_AES2_SLIP 0x00020000
+#define EXT_STATUS_AES3_SLIP 0x00040000
+#define EXT_STATUS_AES4_SLIP 0x00080000
+#define EXT_STATUS_ADAT_SLIP 0x00100000
+#define EXT_STATUS_TDIF_SLIP 0x00200000
+#define EXT_STATUS_ARX1_SLIP 0x00400000
+#define EXT_STATUS_ARX2_SLIP 0x00800000
+#define EXT_STATUS_ARX3_SLIP 0x01000000
+#define EXT_STATUS_ARX4_SLIP 0x02000000
+#define EXT_STATUS_WC_SLIP 0x04000000
+
+/*
+ * The measured rate of the current clock source, in Hz; read-only.
+ */
+#define GLOBAL_SAMPLE_RATE 0x05c
+
+/*
+ * The version of the DICE driver specification that this device conforms to;
+ * read-only.
+ */
+#define GLOBAL_VERSION 0x060
+
+/* Some old firmware versions do not have the following global registers: */
+
+/*
+ * Supported sample rates and clock sources; read-only.
+ */
+#define GLOBAL_CLOCK_CAPABILITIES 0x064
+#define CLOCK_CAP_RATE_32000 0x00000001
+#define CLOCK_CAP_RATE_44100 0x00000002
+#define CLOCK_CAP_RATE_48000 0x00000004
+#define CLOCK_CAP_RATE_88200 0x00000008
+#define CLOCK_CAP_RATE_96000 0x00000010
+#define CLOCK_CAP_RATE_176400 0x00000020
+#define CLOCK_CAP_RATE_192000 0x00000040
+#define CLOCK_CAP_SOURCE_AES1 0x00010000
+#define CLOCK_CAP_SOURCE_AES2 0x00020000
+#define CLOCK_CAP_SOURCE_AES3 0x00040000
+#define CLOCK_CAP_SOURCE_AES4 0x00080000
+#define CLOCK_CAP_SOURCE_AES_ANY 0x00100000
+#define CLOCK_CAP_SOURCE_ADAT 0x00200000
+#define CLOCK_CAP_SOURCE_TDIF 0x00400000
+#define CLOCK_CAP_SOURCE_WC 0x00800000
+#define CLOCK_CAP_SOURCE_ARX1 0x01000000
+#define CLOCK_CAP_SOURCE_ARX2 0x02000000
+#define CLOCK_CAP_SOURCE_ARX3 0x04000000
+#define CLOCK_CAP_SOURCE_ARX4 0x08000000
+#define CLOCK_CAP_SOURCE_INTERNAL 0x10000000
+
+/*
+ * Names of all clock sources; read-only. Quadlets are byte-swapped. Names
+ * are separated with one backslash, the list is terminated with two
+ * backslashes. Unused clock sources are included.
+ */
+#define GLOBAL_CLOCK_SOURCE_NAMES 0x068
+#define CLOCK_SOURCE_NAMES_SIZE 256
+
+/*
+ * Capture stream settings. This section includes the number/size registers
+ * and the registers of all streams.
+ */
+
+/*
+ * The number of supported capture streams; read-only.
+ */
+#define TX_NUMBER 0x000
+
+/*
+ * The size of one stream's register block, in quadlets; read-only. The
+ * registers of the first stream follow immediately afterwards; the registers
+ * of the following streams are offset by this register's value.
+ */
+#define TX_SIZE 0x004
+
+/*
+ * The isochronous channel number on which packets are sent, or -1 if the
+ * stream is not to be used; read/write.
+ */
+#define TX_ISOCHRONOUS 0x008
+
+/*
+ * The number of audio channels; read-only. There will be one quadlet per
+ * channel; the first channel is the first quadlet in a data block.
+ */
+#define TX_NUMBER_AUDIO 0x00c
+
+/*
+ * The number of MIDI ports, 0-8; read-only. If > 0, there will be one
+ * additional quadlet in each data block, following the audio quadlets.
+ */
+#define TX_NUMBER_MIDI 0x010
+
+/*
+ * The speed at which the packets are sent, SCODE_100-_400; read/write.
+ */
+#define TX_SPEED 0x014
+
+/*
+ * Names of all audio channels; read-only. Quadlets are byte-swapped. Names
+ * are separated with one backslash, the list is terminated with two
+ * backslashes.
+ */
+#define TX_NAMES 0x018
+#define TX_NAMES_SIZE 256
+
+/*
+ * Audio IEC60958 capabilities; read-only. Bitmask with one bit per audio
+ * channel.
+ */
+#define TX_AC3_CAPABILITIES 0x118
+
+/*
+ * Send audio data with IEC60958 label; read/write. Bitmask with one bit per
+ * audio channel. This register can be changed even while the stream is
+ * running.
+ */
+#define TX_AC3_ENABLE 0x11c
+
+/*
+ * Playback stream settings. This section includes the number/size registers
+ * and the registers of all streams.
+ */
+
+/*
+ * The number of supported playback streams; read-only.
+ */
+#define RX_NUMBER 0x000
+
+/*
+ * The size of one stream's register block, in quadlets; read-only. The
+ * registers of the first stream follow immediately afterwards; the registers
+ * of the following streams are offset by this register's value.
+ */
+#define RX_SIZE 0x004
+
+/*
+ * The isochronous channel number on which packets are received, or -1 if the
+ * stream is not to be used; read/write.
+ */
+#define RX_ISOCHRONOUS 0x008
+
+/*
+ * Index of first quadlet to be interpreted; read/write. If > 0, that many
+ * quadlets at the beginning of each data block will be ignored, and all the
+ * audio and MIDI quadlets will follow.
+ */
+#define RX_SEQ_START 0x00c
+
+/*
+ * The number of audio channels; read-only. There will be one quadlet per
+ * channel.
+ */
+#define RX_NUMBER_AUDIO 0x010
+
+/*
+ * The number of MIDI ports, 0-8; read-only. If > 0, there will be one
+ * additional quadlet in each data block, following the audio quadlets.
+ */
+#define RX_NUMBER_MIDI 0x014
+
+/*
+ * Names of all audio channels; read-only. Quadlets are byte-swapped. Names
+ * are separated with one backslash, the list is terminated with two
+ * backslashes.
+ */
+#define RX_NAMES 0x018
+#define RX_NAMES_SIZE 256
+
+/*
+ * Audio IEC60958 capabilities; read-only. Bitmask with one bit per audio
+ * channel.
+ */
+#define RX_AC3_CAPABILITIES 0x118
+
+/*
+ * Receive audio data with IEC60958 label; read/write. Bitmask with one bit
+ * per audio channel. This register can be changed even while the stream is
+ * running.
+ */
+#define RX_AC3_ENABLE 0x11c
+
+/*
+ * Extended synchronization information.
+ * This section can be read completely with a block read request.
+ */
+
+/*
+ * Current clock source; read-only.
+ */
+#define EXT_SYNC_CLOCK_SOURCE 0x000
+
+/*
+ * Clock source is locked (boolean); read-only.
+ */
+#define EXT_SYNC_LOCKED 0x004
+
+/*
+ * Current sample rate (CLOCK_RATE_* >> CLOCK_RATE_SHIFT), _32000-_192000 or
+ * _NONE; read-only.
+ */
+#define EXT_SYNC_RATE 0x008
+
+/*
+ * ADAT user data bits; read-only.
+ */
+#define EXT_SYNC_ADAT_USER_DATA 0x00c
+/* The data bits, if available. */
+#define ADAT_USER_DATA_MASK 0x0f
+/* The data bits are not available. */
+#define ADAT_USER_DATA_NO_DATA 0x10
+
+#endif
diff --git a/sound/firewire/dice.c b/sound/firewire/dice.c
new file mode 100644
index 000000000000..6feee6614193
--- /dev/null
+++ b/sound/firewire/dice.c
@@ -0,0 +1,1494 @@
+/*
+ * TC Applied Technologies Digital Interface Communications Engine driver
+ *
+ * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+#include <linux/compat.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <sound/control.h>
+#include <sound/core.h>
+#include <sound/firewire.h>
+#include <sound/hwdep.h>
+#include <sound/info.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include "amdtp.h"
+#include "iso-resources.h"
+#include "lib.h"
+#include "dice-interface.h"
+
+
+struct dice {
+ struct snd_card *card;
+ struct fw_unit *unit;
+ spinlock_t lock;
+ struct mutex mutex;
+ unsigned int global_offset;
+ unsigned int rx_offset;
+ unsigned int clock_caps;
+ unsigned int rx_channels[3];
+ unsigned int rx_midi_ports[3];
+ struct fw_address_handler notification_handler;
+ int owner_generation;
+ int dev_lock_count; /* > 0 driver, < 0 userspace */
+ bool dev_lock_changed;
+ bool global_enabled;
+ struct completion clock_accepted;
+ wait_queue_head_t hwdep_wait;
+ u32 notification_bits;
+ struct fw_iso_resources resources;
+ struct amdtp_out_stream stream;
+};
+
+MODULE_DESCRIPTION("DICE driver");
+MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
+MODULE_LICENSE("GPL v2");
+
+static const unsigned int dice_rates[] = {
+ /* mode 0 */
+ [0] = 32000,
+ [1] = 44100,
+ [2] = 48000,
+ /* mode 1 */
+ [3] = 88200,
+ [4] = 96000,
+ /* mode 2 */
+ [5] = 176400,
+ [6] = 192000,
+};
+
+static unsigned int rate_to_index(unsigned int rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dice_rates); ++i)
+ if (dice_rates[i] == rate)
+ return i;
+
+ return 0;
+}
+
+static unsigned int rate_index_to_mode(unsigned int rate_index)
+{
+ return ((int)rate_index - 1) / 2;
+}
+
+static void dice_lock_changed(struct dice *dice)
+{
+ dice->dev_lock_changed = true;
+ wake_up(&dice->hwdep_wait);
+}
+
+static int dice_try_lock(struct dice *dice)
+{
+ int err;
+
+ spin_lock_irq(&dice->lock);
+
+ if (dice->dev_lock_count < 0) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (dice->dev_lock_count++ == 0)
+ dice_lock_changed(dice);
+ err = 0;
+
+out:
+ spin_unlock_irq(&dice->lock);
+
+ return err;
+}
+
+static void dice_unlock(struct dice *dice)
+{
+ spin_lock_irq(&dice->lock);
+
+ if (WARN_ON(dice->dev_lock_count <= 0))
+ goto out;
+
+ if (--dice->dev_lock_count == 0)
+ dice_lock_changed(dice);
+
+out:
+ spin_unlock_irq(&dice->lock);
+}
+
+static inline u64 global_address(struct dice *dice, unsigned int offset)
+{
+ return DICE_PRIVATE_SPACE + dice->global_offset + offset;
+}
+
+// TODO: rx index
+static inline u64 rx_address(struct dice *dice, unsigned int offset)
+{
+ return DICE_PRIVATE_SPACE + dice->rx_offset + offset;
+}
+
+static int dice_owner_set(struct dice *dice)
+{
+ struct fw_device *device = fw_parent_device(dice->unit);
+ __be64 *buffer;
+ int err, errors = 0;
+
+ buffer = kmalloc(2 * 8, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ for (;;) {
+ buffer[0] = cpu_to_be64(OWNER_NO_OWNER);
+ buffer[1] = cpu_to_be64(
+ ((u64)device->card->node_id << OWNER_NODE_SHIFT) |
+ dice->notification_handler.offset);
+
+ dice->owner_generation = device->generation;
+ smp_rmb(); /* node_id vs. generation */
+ err = snd_fw_transaction(dice->unit,
+ TCODE_LOCK_COMPARE_SWAP,
+ global_address(dice, GLOBAL_OWNER),
+ buffer, 2 * 8,
+ FW_FIXED_GENERATION |
+ dice->owner_generation);
+
+ if (err == 0) {
+ if (buffer[0] != cpu_to_be64(OWNER_NO_OWNER)) {
+ dev_err(&dice->unit->device,
+ "device is already in use\n");
+ err = -EBUSY;
+ }
+ break;
+ }
+ if (err != -EAGAIN || ++errors >= 3)
+ break;
+
+ msleep(20);
+ }
+
+ kfree(buffer);
+
+ return err;
+}
+
+static int dice_owner_update(struct dice *dice)
+{
+ struct fw_device *device = fw_parent_device(dice->unit);
+ __be64 *buffer;
+ int err;
+
+ if (dice->owner_generation == -1)
+ return 0;
+
+ buffer = kmalloc(2 * 8, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ buffer[0] = cpu_to_be64(OWNER_NO_OWNER);
+ buffer[1] = cpu_to_be64(
+ ((u64)device->card->node_id << OWNER_NODE_SHIFT) |
+ dice->notification_handler.offset);
+
+ dice->owner_generation = device->generation;
+ smp_rmb(); /* node_id vs. generation */
+ err = snd_fw_transaction(dice->unit, TCODE_LOCK_COMPARE_SWAP,
+ global_address(dice, GLOBAL_OWNER),
+ buffer, 2 * 8,
+ FW_FIXED_GENERATION | dice->owner_generation);
+
+ if (err == 0) {
+ if (buffer[0] != cpu_to_be64(OWNER_NO_OWNER)) {
+ dev_err(&dice->unit->device,
+ "device is already in use\n");
+ err = -EBUSY;
+ }
+ } else if (err == -EAGAIN) {
+ err = 0; /* try again later */
+ }
+
+ kfree(buffer);
+
+ if (err < 0)
+ dice->owner_generation = -1;
+
+ return err;
+}
+
+static void dice_owner_clear(struct dice *dice)
+{
+ struct fw_device *device = fw_parent_device(dice->unit);
+ __be64 *buffer;
+
+ buffer = kmalloc(2 * 8, GFP_KERNEL);
+ if (!buffer)
+ return;
+
+ buffer[0] = cpu_to_be64(
+ ((u64)device->card->node_id << OWNER_NODE_SHIFT) |
+ dice->notification_handler.offset);
+ buffer[1] = cpu_to_be64(OWNER_NO_OWNER);
+ snd_fw_transaction(dice->unit, TCODE_LOCK_COMPARE_SWAP,
+ global_address(dice, GLOBAL_OWNER),
+ buffer, 2 * 8, FW_QUIET |
+ FW_FIXED_GENERATION | dice->owner_generation);
+
+ kfree(buffer);
+
+ dice->owner_generation = -1;
+}
+
+static int dice_enable_set(struct dice *dice)
+{
+ __be32 value;
+ int err;
+
+ value = cpu_to_be32(1);
+ err = snd_fw_transaction(dice->unit, TCODE_WRITE_QUADLET_REQUEST,
+ global_address(dice, GLOBAL_ENABLE),
+ &value, 4,
+ FW_FIXED_GENERATION | dice->owner_generation);
+ if (err < 0)
+ return err;
+
+ dice->global_enabled = true;
+
+ return 0;
+}
+
+static void dice_enable_clear(struct dice *dice)
+{
+ __be32 value;
+
+ if (!dice->global_enabled)
+ return;
+
+ value = 0;
+ snd_fw_transaction(dice->unit, TCODE_WRITE_QUADLET_REQUEST,
+ global_address(dice, GLOBAL_ENABLE),
+ &value, 4, FW_QUIET |
+ FW_FIXED_GENERATION | dice->owner_generation);
+
+ dice->global_enabled = false;
+}
+
+static void dice_notification(struct fw_card *card, struct fw_request *request,
+ int tcode, int destination, int source,
+ int generation, unsigned long long offset,
+ void *data, size_t length, void *callback_data)
+{
+ struct dice *dice = callback_data;
+ u32 bits;
+ unsigned long flags;
+
+ if (tcode != TCODE_WRITE_QUADLET_REQUEST) {
+ fw_send_response(card, request, RCODE_TYPE_ERROR);
+ return;
+ }
+ if ((offset & 3) != 0) {
+ fw_send_response(card, request, RCODE_ADDRESS_ERROR);
+ return;
+ }
+
+ bits = be32_to_cpup(data);
+
+ spin_lock_irqsave(&dice->lock, flags);
+ dice->notification_bits |= bits;
+ spin_unlock_irqrestore(&dice->lock, flags);
+
+ fw_send_response(card, request, RCODE_COMPLETE);
+
+ if (bits & NOTIFY_CLOCK_ACCEPTED)
+ complete(&dice->clock_accepted);
+ wake_up(&dice->hwdep_wait);
+}
+
+static int dice_rate_constraint(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct dice *dice = rule->private;
+ const struct snd_interval *channels =
+ hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_interval *rate =
+ hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval allowed_rates = {
+ .min = UINT_MAX, .max = 0, .integer = 1
+ };
+ unsigned int i, mode;
+
+ for (i = 0; i < ARRAY_SIZE(dice_rates); ++i) {
+ mode = rate_index_to_mode(i);
+ if ((dice->clock_caps & (1 << i)) &&
+ snd_interval_test(channels, dice->rx_channels[mode])) {
+ allowed_rates.min = min(allowed_rates.min,
+ dice_rates[i]);
+ allowed_rates.max = max(allowed_rates.max,
+ dice_rates[i]);
+ }
+ }
+
+ return snd_interval_refine(rate, &allowed_rates);
+}
+
+static int dice_channels_constraint(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct dice *dice = rule->private;
+ const struct snd_interval *rate =
+ hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels =
+ hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_interval allowed_channels = {
+ .min = UINT_MAX, .max = 0, .integer = 1
+ };
+ unsigned int i, mode;
+
+ for (i = 0; i < ARRAY_SIZE(dice_rates); ++i)
+ if ((dice->clock_caps & (1 << i)) &&
+ snd_interval_test(rate, dice_rates[i])) {
+ mode = rate_index_to_mode(i);
+ allowed_channels.min = min(allowed_channels.min,
+ dice->rx_channels[mode]);
+ allowed_channels.max = max(allowed_channels.max,
+ dice->rx_channels[mode]);
+ }
+
+ return snd_interval_refine(channels, &allowed_channels);
+}
+
+static int dice_open(struct snd_pcm_substream *substream)
+{
+ static const struct snd_pcm_hardware hardware = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_BATCH |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER,
+ .formats = AMDTP_OUT_PCM_FORMAT_BITS,
+ .channels_min = UINT_MAX,
+ .channels_max = 0,
+ .buffer_bytes_max = 16 * 1024 * 1024,
+ .period_bytes_min = 1,
+ .period_bytes_max = UINT_MAX,
+ .periods_min = 1,
+ .periods_max = UINT_MAX,
+ };
+ struct dice *dice = substream->private_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ unsigned int i;
+ int err;
+
+ err = dice_try_lock(dice);
+ if (err < 0)
+ goto error;
+
+ runtime->hw = hardware;
+
+ for (i = 0; i < ARRAY_SIZE(dice_rates); ++i)
+ if (dice->clock_caps & (1 << i))
+ runtime->hw.rates |=
+ snd_pcm_rate_to_rate_bit(dice_rates[i]);
+ snd_pcm_limit_hw_rates(runtime);
+
+ for (i = 0; i < 3; ++i)
+ if (dice->rx_channels[i]) {
+ runtime->hw.channels_min = min(runtime->hw.channels_min,
+ dice->rx_channels[i]);
+ runtime->hw.channels_max = max(runtime->hw.channels_max,
+ dice->rx_channels[i]);
+ }
+
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ dice_rate_constraint, dice,
+ SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+ if (err < 0)
+ goto err_lock;
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ dice_channels_constraint, dice,
+ SNDRV_PCM_HW_PARAM_RATE, -1);
+ if (err < 0)
+ goto err_lock;
+
+ err = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 32);
+ if (err < 0)
+ goto err_lock;
+ err = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 32);
+ if (err < 0)
+ goto err_lock;
+
+ err = snd_pcm_hw_constraint_minmax(runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_TIME,
+ 5000, UINT_MAX);
+ if (err < 0)
+ goto err_lock;
+
+ err = snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
+ if (err < 0)
+ goto err_lock;
+
+ return 0;
+
+err_lock:
+ dice_unlock(dice);
+error:
+ return err;
+}
+
+static int dice_close(struct snd_pcm_substream *substream)
+{
+ struct dice *dice = substream->private_data;
+
+ dice_unlock(dice);
+
+ return 0;
+}
+
+static int dice_stream_start_packets(struct dice *dice)
+{
+ int err;
+
+ if (amdtp_out_stream_running(&dice->stream))
+ return 0;
+
+ err = amdtp_out_stream_start(&dice->stream, dice->resources.channel,
+ fw_parent_device(dice->unit)->max_speed);
+ if (err < 0)
+ return err;
+
+ err = dice_enable_set(dice);
+ if (err < 0) {
+ amdtp_out_stream_stop(&dice->stream);
+ return err;
+ }
+
+ return 0;
+}
+
+static int dice_stream_start(struct dice *dice)
+{
+ __be32 channel;
+ int err;
+
+ if (!dice->resources.allocated) {
+ err = fw_iso_resources_allocate(&dice->resources,
+ amdtp_out_stream_get_max_payload(&dice->stream),
+ fw_parent_device(dice->unit)->max_speed);
+ if (err < 0)
+ goto error;
+
+ channel = cpu_to_be32(dice->resources.channel);
+ err = snd_fw_transaction(dice->unit,
+ TCODE_WRITE_QUADLET_REQUEST,
+ rx_address(dice, RX_ISOCHRONOUS),
+ &channel, 4, 0);
+ if (err < 0)
+ goto err_resources;
+ }
+
+ err = dice_stream_start_packets(dice);
+ if (err < 0)
+ goto err_rx_channel;
+
+ return 0;
+
+err_rx_channel:
+ channel = cpu_to_be32((u32)-1);
+ snd_fw_transaction(dice->unit, TCODE_WRITE_QUADLET_REQUEST,
+ rx_address(dice, RX_ISOCHRONOUS), &channel, 4, 0);
+err_resources:
+ fw_iso_resources_free(&dice->resources);
+error:
+ return err;
+}
+
+static void dice_stream_stop_packets(struct dice *dice)
+{
+ if (amdtp_out_stream_running(&dice->stream)) {
+ dice_enable_clear(dice);
+ amdtp_out_stream_stop(&dice->stream);
+ }
+}
+
+static void dice_stream_stop(struct dice *dice)
+{
+ __be32 channel;
+
+ dice_stream_stop_packets(dice);
+
+ if (!dice->resources.allocated)
+ return;
+
+ channel = cpu_to_be32((u32)-1);
+ snd_fw_transaction(dice->unit, TCODE_WRITE_QUADLET_REQUEST,
+ rx_address(dice, RX_ISOCHRONOUS), &channel, 4, 0);
+
+ fw_iso_resources_free(&dice->resources);
+}
+
+static int dice_change_rate(struct dice *dice, unsigned int clock_rate)
+{
+ __be32 value;
+ int err;
+
+ INIT_COMPLETION(dice->clock_accepted);
+
+ value = cpu_to_be32(clock_rate | CLOCK_SOURCE_ARX1);
+ err = snd_fw_transaction(dice->unit, TCODE_WRITE_QUADLET_REQUEST,
+ global_address(dice, GLOBAL_CLOCK_SELECT),
+ &value, 4, 0);
+ if (err < 0)
+ return err;
+
+ if (!wait_for_completion_timeout(&dice->clock_accepted,
+ msecs_to_jiffies(100)))
+ dev_warn(&dice->unit->device, "clock change timed out\n");
+
+ return 0;
+}
+
+static int dice_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct dice *dice = substream->private_data;
+ unsigned int rate_index, mode;
+ int err;
+
+ mutex_lock(&dice->mutex);
+ dice_stream_stop(dice);
+ mutex_unlock(&dice->mutex);
+
+ err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+ params_buffer_bytes(hw_params));
+ if (err < 0)
+ return err;
+
+ rate_index = rate_to_index(params_rate(hw_params));
+ err = dice_change_rate(dice, rate_index << CLOCK_RATE_SHIFT);
+ if (err < 0)
+ return err;
+
+ mode = rate_index_to_mode(rate_index);
+ amdtp_out_stream_set_parameters(&dice->stream,
+ params_rate(hw_params),
+ params_channels(hw_params),
+ dice->rx_midi_ports[mode]);
+ amdtp_out_stream_set_pcm_format(&dice->stream,
+ params_format(hw_params));
+
+ return 0;
+}
+
+static int dice_hw_free(struct snd_pcm_substream *substream)
+{
+ struct dice *dice = substream->private_data;
+
+ mutex_lock(&dice->mutex);
+ dice_stream_stop(dice);
+ mutex_unlock(&dice->mutex);
+
+ return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+static int dice_prepare(struct snd_pcm_substream *substream)
+{
+ struct dice *dice = substream->private_data;
+ int err;
+
+ mutex_lock(&dice->mutex);
+
+ if (amdtp_out_streaming_error(&dice->stream))
+ dice_stream_stop_packets(dice);
+
+ err = dice_stream_start(dice);
+ if (err < 0) {
+ mutex_unlock(&dice->mutex);
+ return err;
+ }
+
+ mutex_unlock(&dice->mutex);
+
+ amdtp_out_stream_pcm_prepare(&dice->stream);
+
+ return 0;
+}
+
+static int dice_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct dice *dice = substream->private_data;
+ struct snd_pcm_substream *pcm;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ pcm = substream;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ pcm = NULL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ amdtp_out_stream_pcm_trigger(&dice->stream, pcm);
+
+ return 0;
+}
+
+static snd_pcm_uframes_t dice_pointer(struct snd_pcm_substream *substream)
+{
+ struct dice *dice = substream->private_data;
+
+ return amdtp_out_stream_pcm_pointer(&dice->stream);
+}
+
+static int dice_create_pcm(struct dice *dice)
+{
+ static struct snd_pcm_ops ops = {
+ .open = dice_open,
+ .close = dice_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = dice_hw_params,
+ .hw_free = dice_hw_free,
+ .prepare = dice_prepare,
+ .trigger = dice_trigger,
+ .pointer = dice_pointer,
+ .page = snd_pcm_lib_get_vmalloc_page,
+ .mmap = snd_pcm_lib_mmap_vmalloc,
+ };
+ struct snd_pcm *pcm;
+ int err;
+
+ err = snd_pcm_new(dice->card, "DICE", 0, 1, 0, &pcm);
+ if (err < 0)
+ return err;
+ pcm->private_data = dice;
+ strcpy(pcm->name, dice->card->shortname);
+ pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->ops = &ops;
+
+ return 0;
+}
+
+static long dice_hwdep_read(struct snd_hwdep *hwdep, char __user *buf,
+ long count, loff_t *offset)
+{
+ struct dice *dice = hwdep->private_data;
+ DEFINE_WAIT(wait);
+ union snd_firewire_event event;
+
+ spin_lock_irq(&dice->lock);
+
+ while (!dice->dev_lock_changed && dice->notification_bits == 0) {
+ prepare_to_wait(&dice->hwdep_wait, &wait, TASK_INTERRUPTIBLE);
+ spin_unlock_irq(&dice->lock);
+ schedule();
+ finish_wait(&dice->hwdep_wait, &wait);
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ spin_lock_irq(&dice->lock);
+ }
+
+ memset(&event, 0, sizeof(event));
+ if (dice->dev_lock_changed) {
+ event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
+ event.lock_status.status = dice->dev_lock_count > 0;
+ dice->dev_lock_changed = false;
+
+ count = min(count, (long)sizeof(event.lock_status));
+ } else {
+ event.dice_notification.type = SNDRV_FIREWIRE_EVENT_DICE_NOTIFICATION;
+ event.dice_notification.notification = dice->notification_bits;
+ dice->notification_bits = 0;
+
+ count = min(count, (long)sizeof(event.dice_notification));
+ }
+
+ spin_unlock_irq(&dice->lock);
+
+ if (copy_to_user(buf, &event, count))
+ return -EFAULT;
+
+ return count;
+}
+
+static unsigned int dice_hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+ poll_table *wait)
+{
+ struct dice *dice = hwdep->private_data;
+ unsigned int events;
+
+ poll_wait(file, &dice->hwdep_wait, wait);
+
+ spin_lock_irq(&dice->lock);
+ if (dice->dev_lock_changed || dice->notification_bits != 0)
+ events = POLLIN | POLLRDNORM;
+ else
+ events = 0;
+ spin_unlock_irq(&dice->lock);
+
+ return events;
+}
+
+static int dice_hwdep_get_info(struct dice *dice, void __user *arg)
+{
+ struct fw_device *dev = fw_parent_device(dice->unit);
+ struct snd_firewire_get_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.type = SNDRV_FIREWIRE_TYPE_DICE;
+ info.card = dev->card->index;
+ *(__be32 *)&info.guid[0] = cpu_to_be32(dev->config_rom[3]);
+ *(__be32 *)&info.guid[4] = cpu_to_be32(dev->config_rom[4]);
+ strlcpy(info.device_name, dev_name(&dev->device),
+ sizeof(info.device_name));
+
+ if (copy_to_user(arg, &info, sizeof(info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int dice_hwdep_lock(struct dice *dice)
+{
+ int err;
+
+ spin_lock_irq(&dice->lock);
+
+ if (dice->dev_lock_count == 0) {
+ dice->dev_lock_count = -1;
+ err = 0;
+ } else {
+ err = -EBUSY;
+ }
+
+ spin_unlock_irq(&dice->lock);
+
+ return err;
+}
+
+static int dice_hwdep_unlock(struct dice *dice)
+{
+ int err;
+
+ spin_lock_irq(&dice->lock);
+
+ if (dice->dev_lock_count == -1) {
+ dice->dev_lock_count = 0;
+ err = 0;
+ } else {
+ err = -EBADFD;
+ }
+
+ spin_unlock_irq(&dice->lock);
+
+ return err;
+}
+
+static int dice_hwdep_release(struct snd_hwdep *hwdep, struct file *file)
+{
+ struct dice *dice = hwdep->private_data;
+
+ spin_lock_irq(&dice->lock);
+ if (dice->dev_lock_count == -1)
+ dice->dev_lock_count = 0;
+ spin_unlock_irq(&dice->lock);
+
+ return 0;
+}
+
+static int dice_hwdep_ioctl(struct snd_hwdep *hwdep, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct dice *dice = hwdep->private_data;
+
+ switch (cmd) {
+ case SNDRV_FIREWIRE_IOCTL_GET_INFO:
+ return dice_hwdep_get_info(dice, (void __user *)arg);
+ case SNDRV_FIREWIRE_IOCTL_LOCK:
+ return dice_hwdep_lock(dice);
+ case SNDRV_FIREWIRE_IOCTL_UNLOCK:
+ return dice_hwdep_unlock(dice);
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static int dice_hwdep_compat_ioctl(struct snd_hwdep *hwdep, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return dice_hwdep_ioctl(hwdep, file, cmd,
+ (unsigned long)compat_ptr(arg));
+}
+#else
+#define dice_hwdep_compat_ioctl NULL
+#endif
+
+static int dice_create_hwdep(struct dice *dice)
+{
+ static const struct snd_hwdep_ops ops = {
+ .read = dice_hwdep_read,
+ .release = dice_hwdep_release,
+ .poll = dice_hwdep_poll,
+ .ioctl = dice_hwdep_ioctl,
+ .ioctl_compat = dice_hwdep_compat_ioctl,
+ };
+ struct snd_hwdep *hwdep;
+ int err;
+
+ err = snd_hwdep_new(dice->card, "DICE", 0, &hwdep);
+ if (err < 0)
+ return err;
+ strcpy(hwdep->name, "DICE");
+ hwdep->iface = SNDRV_HWDEP_IFACE_FW_DICE;
+ hwdep->ops = ops;
+ hwdep->private_data = dice;
+ hwdep->exclusive = true;
+
+ return 0;
+}
+
+static int dice_proc_read_mem(struct dice *dice, void *buffer,
+ unsigned int offset_q, unsigned int quadlets)
+{
+ unsigned int i;
+ int err;
+
+ err = snd_fw_transaction(dice->unit, TCODE_READ_BLOCK_REQUEST,
+ DICE_PRIVATE_SPACE + 4 * offset_q,
+ buffer, 4 * quadlets, 0);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < quadlets; ++i)
+ be32_to_cpus(&((u32 *)buffer)[i]);
+
+ return 0;
+}
+
+static const char *str_from_array(const char *const strs[], unsigned int count,
+ unsigned int i)
+{
+ if (i < count)
+ return strs[i];
+ else
+ return "(unknown)";
+}
+
+static void dice_proc_fixup_string(char *s, unsigned int size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i += 4)
+ cpu_to_le32s((u32 *)(s + i));
+
+ for (i = 0; i < size - 2; ++i) {
+ if (s[i] == '\0')
+ return;
+ if (s[i] == '\\' && s[i + 1] == '\\') {
+ s[i + 2] = '\0';
+ return;
+ }
+ }
+ s[size - 1] = '\0';
+}
+
+static void dice_proc_read(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+{
+ static const char *const section_names[5] = {
+ "global", "tx", "rx", "ext_sync", "unused2"
+ };
+ static const char *const clock_sources[] = {
+ "aes1", "aes2", "aes3", "aes4", "aes", "adat", "tdif",
+ "wc", "arx1", "arx2", "arx3", "arx4", "internal"
+ };
+ static const char *const rates[] = {
+ "32000", "44100", "48000", "88200", "96000", "176400", "192000",
+ "any low", "any mid", "any high", "none"
+ };
+ struct dice *dice = entry->private_data;
+ u32 sections[ARRAY_SIZE(section_names) * 2];
+ struct {
+ u32 number;
+ u32 size;
+ } tx_rx_header;
+ union {
+ struct {
+ u32 owner_hi, owner_lo;
+ u32 notification;
+ char nick_name[NICK_NAME_SIZE];
+ u32 clock_select;
+ u32 enable;
+ u32 status;
+ u32 extended_status;
+ u32 sample_rate;
+ u32 version;
+ u32 clock_caps;
+ char clock_source_names[CLOCK_SOURCE_NAMES_SIZE];
+ } global;
+ struct {
+ u32 iso;
+ u32 number_audio;
+ u32 number_midi;
+ u32 speed;
+ char names[TX_NAMES_SIZE];
+ u32 ac3_caps;
+ u32 ac3_enable;
+ } tx;
+ struct {
+ u32 iso;
+ u32 seq_start;
+ u32 number_audio;
+ u32 number_midi;
+ char names[RX_NAMES_SIZE];
+ u32 ac3_caps;
+ u32 ac3_enable;
+ } rx;
+ struct {
+ u32 clock_source;
+ u32 locked;
+ u32 rate;
+ u32 adat_user_data;
+ } ext_sync;
+ } buf;
+ unsigned int quadlets, stream, i;
+
+ if (dice_proc_read_mem(dice, sections, 0, ARRAY_SIZE(sections)) < 0)
+ return;
+ snd_iprintf(buffer, "sections:\n");
+ for (i = 0; i < ARRAY_SIZE(section_names); ++i)
+ snd_iprintf(buffer, " %s: offset %u, size %u\n",
+ section_names[i],
+ sections[i * 2], sections[i * 2 + 1]);
+
+ quadlets = min_t(u32, sections[1], sizeof(buf.global) / 4);
+ if (dice_proc_read_mem(dice, &buf.global, sections[0], quadlets) < 0)
+ return;
+ snd_iprintf(buffer, "global:\n");
+ snd_iprintf(buffer, " owner: %04x:%04x%08x\n",
+ buf.global.owner_hi >> 16,
+ buf.global.owner_hi & 0xffff, buf.global.owner_lo);
+ snd_iprintf(buffer, " notification: %08x\n", buf.global.notification);
+ dice_proc_fixup_string(buf.global.nick_name, NICK_NAME_SIZE);
+ snd_iprintf(buffer, " nick name: %s\n", buf.global.nick_name);
+ snd_iprintf(buffer, " clock select: %s %s\n",
+ str_from_array(clock_sources, ARRAY_SIZE(clock_sources),
+ buf.global.clock_select & CLOCK_SOURCE_MASK),
+ str_from_array(rates, ARRAY_SIZE(rates),
+ (buf.global.clock_select & CLOCK_RATE_MASK)
+ >> CLOCK_RATE_SHIFT));
+ snd_iprintf(buffer, " enable: %u\n", buf.global.enable);
+ snd_iprintf(buffer, " status: %slocked %s\n",
+ buf.global.status & STATUS_SOURCE_LOCKED ? "" : "un",
+ str_from_array(rates, ARRAY_SIZE(rates),
+ (buf.global.status &
+ STATUS_NOMINAL_RATE_MASK)
+ >> CLOCK_RATE_SHIFT));
+ snd_iprintf(buffer, " ext status: %08x\n", buf.global.extended_status);
+ snd_iprintf(buffer, " sample rate: %u\n", buf.global.sample_rate);
+ snd_iprintf(buffer, " version: %u.%u.%u.%u\n",
+ (buf.global.version >> 24) & 0xff,
+ (buf.global.version >> 16) & 0xff,
+ (buf.global.version >> 8) & 0xff,
+ (buf.global.version >> 0) & 0xff);
+ if (quadlets >= 90) {
+ snd_iprintf(buffer, " clock caps:");
+ for (i = 0; i <= 6; ++i)
+ if (buf.global.clock_caps & (1 << i))
+ snd_iprintf(buffer, " %s", rates[i]);
+ for (i = 0; i <= 12; ++i)
+ if (buf.global.clock_caps & (1 << (16 + i)))
+ snd_iprintf(buffer, " %s", clock_sources[i]);
+ snd_iprintf(buffer, "\n");
+ dice_proc_fixup_string(buf.global.clock_source_names,
+ CLOCK_SOURCE_NAMES_SIZE);
+ snd_iprintf(buffer, " clock source names: %s\n",
+ buf.global.clock_source_names);
+ }
+
+ if (dice_proc_read_mem(dice, &tx_rx_header, sections[2], 2) < 0)
+ return;
+ quadlets = min_t(u32, tx_rx_header.size, sizeof(buf.tx));
+ for (stream = 0; stream < tx_rx_header.number; ++stream) {
+ if (dice_proc_read_mem(dice, &buf.tx, sections[2] + 2 +
+ stream * tx_rx_header.size,
+ quadlets) < 0)
+ break;
+ snd_iprintf(buffer, "tx %u:\n", stream);
+ snd_iprintf(buffer, " iso channel: %d\n", (int)buf.tx.iso);
+ snd_iprintf(buffer, " audio channels: %u\n",
+ buf.tx.number_audio);
+ snd_iprintf(buffer, " midi ports: %u\n", buf.tx.number_midi);
+ snd_iprintf(buffer, " speed: S%u\n", 100u << buf.tx.speed);
+ if (quadlets >= 68) {
+ dice_proc_fixup_string(buf.tx.names, TX_NAMES_SIZE);
+ snd_iprintf(buffer, " names: %s\n", buf.tx.names);
+ }
+ if (quadlets >= 70) {
+ snd_iprintf(buffer, " ac3 caps: %08x\n",
+ buf.tx.ac3_caps);
+ snd_iprintf(buffer, " ac3 enable: %08x\n",
+ buf.tx.ac3_enable);
+ }
+ }
+
+ if (dice_proc_read_mem(dice, &tx_rx_header, sections[4], 2) < 0)
+ return;
+ quadlets = min_t(u32, tx_rx_header.size, sizeof(buf.rx));
+ for (stream = 0; stream < tx_rx_header.number; ++stream) {
+ if (dice_proc_read_mem(dice, &buf.rx, sections[4] + 2 +
+ stream * tx_rx_header.size,
+ quadlets) < 0)
+ break;
+ snd_iprintf(buffer, "rx %u:\n", stream);
+ snd_iprintf(buffer, " iso channel: %d\n", (int)buf.rx.iso);
+ snd_iprintf(buffer, " sequence start: %u\n", buf.rx.seq_start);
+ snd_iprintf(buffer, " audio channels: %u\n",
+ buf.rx.number_audio);
+ snd_iprintf(buffer, " midi ports: %u\n", buf.rx.number_midi);
+ if (quadlets >= 68) {
+ dice_proc_fixup_string(buf.rx.names, RX_NAMES_SIZE);
+ snd_iprintf(buffer, " names: %s\n", buf.rx.names);
+ }
+ if (quadlets >= 70) {
+ snd_iprintf(buffer, " ac3 caps: %08x\n",
+ buf.rx.ac3_caps);
+ snd_iprintf(buffer, " ac3 enable: %08x\n",
+ buf.rx.ac3_enable);
+ }
+ }
+
+ quadlets = min_t(u32, sections[7], sizeof(buf.ext_sync) / 4);
+ if (quadlets >= 4) {
+ if (dice_proc_read_mem(dice, &buf.ext_sync,
+ sections[6], 4) < 0)
+ return;
+ snd_iprintf(buffer, "ext status:\n");
+ snd_iprintf(buffer, " clock source: %s\n",
+ str_from_array(clock_sources,
+ ARRAY_SIZE(clock_sources),
+ buf.ext_sync.clock_source));
+ snd_iprintf(buffer, " locked: %u\n", buf.ext_sync.locked);
+ snd_iprintf(buffer, " rate: %s\n",
+ str_from_array(rates, ARRAY_SIZE(rates),
+ buf.ext_sync.rate));
+ snd_iprintf(buffer, " adat user data: ");
+ if (buf.ext_sync.adat_user_data & ADAT_USER_DATA_NO_DATA)
+ snd_iprintf(buffer, "-\n");
+ else
+ snd_iprintf(buffer, "%x\n",
+ buf.ext_sync.adat_user_data);
+ }
+}
+
+static void dice_create_proc(struct dice *dice)
+{
+ struct snd_info_entry *entry;
+
+ if (!snd_card_proc_new(dice->card, "dice", &entry))
+ snd_info_set_text_ops(entry, dice, dice_proc_read);
+}
+
+static void dice_card_free(struct snd_card *card)
+{
+ struct dice *dice = card->private_data;
+
+ amdtp_out_stream_destroy(&dice->stream);
+ fw_core_remove_address_handler(&dice->notification_handler);
+ mutex_destroy(&dice->mutex);
+}
+
+#define OUI_WEISS 0x001c6a
+
+#define DICE_CATEGORY_ID 0x04
+#define WEISS_CATEGORY_ID 0x00
+
+static int dice_interface_check(struct fw_unit *unit)
+{
+ static const int min_values[10] = {
+ 10, 0x64 / 4,
+ 10, 0x18 / 4,
+ 10, 0x18 / 4,
+ 0, 0,
+ 0, 0,
+ };
+ struct fw_device *device = fw_parent_device(unit);
+ struct fw_csr_iterator it;
+ int key, value, vendor = -1, model = -1, err;
+ unsigned int category, i;
+ __be32 pointers[ARRAY_SIZE(min_values)];
+ __be32 tx_data[4];
+ __be32 version;
+
+ /*
+ * Check that GUID and unit directory are constructed according to DICE
+ * rules, i.e., that the specifier ID is the GUID's OUI, and that the
+ * GUID chip ID consists of the 8-bit category ID, the 10-bit product
+ * ID, and a 22-bit serial number.
+ */
+ fw_csr_iterator_init(&it, unit->directory);
+ while (fw_csr_iterator_next(&it, &key, &value)) {
+ switch (key) {
+ case CSR_SPECIFIER_ID:
+ vendor = value;
+ break;
+ case CSR_MODEL:
+ model = value;
+ break;
+ }
+ }
+ if (vendor == OUI_WEISS)
+ category = WEISS_CATEGORY_ID;
+ else
+ category = DICE_CATEGORY_ID;
+ if (device->config_rom[3] != ((vendor << 8) | category) ||
+ device->config_rom[4] >> 22 != model)
+ return -ENODEV;
+
+ /*
+ * Check that the sub address spaces exist and are located inside the
+ * private address space. The minimum values are chosen so that all
+ * minimally required registers are included.
+ */
+ err = snd_fw_transaction(unit, TCODE_READ_BLOCK_REQUEST,
+ DICE_PRIVATE_SPACE,
+ pointers, sizeof(pointers), 0);
+ if (err < 0)
+ return -ENODEV;
+ for (i = 0; i < ARRAY_SIZE(pointers); ++i) {
+ value = be32_to_cpu(pointers[i]);
+ if (value < min_values[i] || value >= 0x40000)
+ return -ENODEV;
+ }
+
+ /* We support playback only. Let capture devices be handled by FFADO. */
+ err = snd_fw_transaction(unit, TCODE_READ_BLOCK_REQUEST,
+ DICE_PRIVATE_SPACE +
+ be32_to_cpu(pointers[2]) * 4,
+ tx_data, sizeof(tx_data), 0);
+ if (err < 0 || (tx_data[0] && tx_data[3]))
+ return -ENODEV;
+
+ /*
+ * Check that the implemented DICE driver specification major version
+ * number matches.
+ */
+ err = snd_fw_transaction(unit, TCODE_READ_QUADLET_REQUEST,
+ DICE_PRIVATE_SPACE +
+ be32_to_cpu(pointers[0]) * 4 + GLOBAL_VERSION,
+ &version, 4, 0);
+ if (err < 0)
+ return -ENODEV;
+ if ((version & cpu_to_be32(0xff000000)) != cpu_to_be32(0x01000000)) {
+ dev_err(&unit->device,
+ "unknown DICE version: 0x%08x\n", be32_to_cpu(version));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int highest_supported_mode_rate(struct dice *dice, unsigned int mode)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(dice_rates) - 1; i >= 0; --i)
+ if ((dice->clock_caps & (1 << i)) &&
+ rate_index_to_mode(i) == mode)
+ return i;
+
+ return -1;
+}
+
+static int dice_read_mode_params(struct dice *dice, unsigned int mode)
+{
+ __be32 values[2];
+ int rate_index, err;
+
+ rate_index = highest_supported_mode_rate(dice, mode);
+ if (rate_index < 0) {
+ dice->rx_channels[mode] = 0;
+ dice->rx_midi_ports[mode] = 0;
+ return 0;
+ }
+
+ err = dice_change_rate(dice, rate_index << CLOCK_RATE_SHIFT);
+ if (err < 0)
+ return err;
+
+ err = snd_fw_transaction(dice->unit, TCODE_READ_BLOCK_REQUEST,
+ rx_address(dice, RX_NUMBER_AUDIO),
+ values, 2 * 4, 0);
+ if (err < 0)
+ return err;
+
+ dice->rx_channels[mode] = be32_to_cpu(values[0]);
+ dice->rx_midi_ports[mode] = be32_to_cpu(values[1]);
+
+ return 0;
+}
+
+static int dice_read_params(struct dice *dice)
+{
+ __be32 pointers[6];
+ __be32 value;
+ int mode, err;
+
+ err = snd_fw_transaction(dice->unit, TCODE_READ_BLOCK_REQUEST,
+ DICE_PRIVATE_SPACE,
+ pointers, sizeof(pointers), 0);
+ if (err < 0)
+ return err;
+
+ dice->global_offset = be32_to_cpu(pointers[0]) * 4;
+ dice->rx_offset = be32_to_cpu(pointers[4]) * 4;
+
+ /* some very old firmwares don't tell about their clock support */
+ if (be32_to_cpu(pointers[1]) * 4 >= GLOBAL_CLOCK_CAPABILITIES + 4) {
+ err = snd_fw_transaction(
+ dice->unit, TCODE_READ_QUADLET_REQUEST,
+ global_address(dice, GLOBAL_CLOCK_CAPABILITIES),
+ &value, 4, 0);
+ if (err < 0)
+ return err;
+ dice->clock_caps = be32_to_cpu(value);
+ } else {
+ /* this should be supported by any device */
+ dice->clock_caps = CLOCK_CAP_RATE_44100 |
+ CLOCK_CAP_RATE_48000 |
+ CLOCK_CAP_SOURCE_ARX1 |
+ CLOCK_CAP_SOURCE_INTERNAL;
+ }
+
+ for (mode = 2; mode >= 0; --mode) {
+ err = dice_read_mode_params(dice, mode);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static void dice_card_strings(struct dice *dice)
+{
+ struct snd_card *card = dice->card;
+ struct fw_device *dev = fw_parent_device(dice->unit);
+ char vendor[32], model[32];
+ unsigned int i;
+ int err;
+
+ strcpy(card->driver, "DICE");
+
+ strcpy(card->shortname, "DICE");
+ BUILD_BUG_ON(NICK_NAME_SIZE < sizeof(card->shortname));
+ err = snd_fw_transaction(dice->unit, TCODE_READ_BLOCK_REQUEST,
+ global_address(dice, GLOBAL_NICK_NAME),
+ card->shortname, sizeof(card->shortname), 0);
+ if (err >= 0) {
+ /* DICE strings are returned in "always-wrong" endianness */
+ BUILD_BUG_ON(sizeof(card->shortname) % 4 != 0);
+ for (i = 0; i < sizeof(card->shortname); i += 4)
+ swab32s((u32 *)&card->shortname[i]);
+ card->shortname[sizeof(card->shortname) - 1] = '\0';
+ }
+
+ strcpy(vendor, "?");
+ fw_csr_string(dev->config_rom + 5, CSR_VENDOR, vendor, sizeof(vendor));
+ strcpy(model, "?");
+ fw_csr_string(dice->unit->directory, CSR_MODEL, model, sizeof(model));
+ snprintf(card->longname, sizeof(card->longname),
+ "%s %s (serial %u) at %s, S%d",
+ vendor, model, dev->config_rom[4] & 0x3fffff,
+ dev_name(&dice->unit->device), 100 << dev->max_speed);
+
+ strcpy(card->mixername, "DICE");
+}
+
+static int dice_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
+{
+ struct snd_card *card;
+ struct dice *dice;
+ __be32 clock_sel;
+ int err;
+
+ err = dice_interface_check(unit);
+ if (err < 0)
+ return err;
+
+ err = snd_card_create(-1, NULL, THIS_MODULE, sizeof(*dice), &card);
+ if (err < 0)
+ return err;
+ snd_card_set_dev(card, &unit->device);
+
+ dice = card->private_data;
+ dice->card = card;
+ spin_lock_init(&dice->lock);
+ mutex_init(&dice->mutex);
+ dice->unit = unit;
+ init_completion(&dice->clock_accepted);
+ init_waitqueue_head(&dice->hwdep_wait);
+
+ dice->notification_handler.length = 4;
+ dice->notification_handler.address_callback = dice_notification;
+ dice->notification_handler.callback_data = dice;
+ err = fw_core_add_address_handler(&dice->notification_handler,
+ &fw_high_memory_region);
+ if (err < 0)
+ goto err_mutex;
+
+ err = dice_owner_set(dice);
+ if (err < 0)
+ goto err_notification_handler;
+
+ err = dice_read_params(dice);
+ if (err < 0)
+ goto err_owner;
+
+ err = fw_iso_resources_init(&dice->resources, unit);
+ if (err < 0)
+ goto err_owner;
+ dice->resources.channels_mask = 0x00000000ffffffffuLL;
+
+ err = amdtp_out_stream_init(&dice->stream, unit,
+ CIP_BLOCKING | CIP_HI_DUALWIRE);
+ if (err < 0)
+ goto err_resources;
+
+ card->private_free = dice_card_free;
+
+ dice_card_strings(dice);
+
+ err = snd_fw_transaction(unit, TCODE_READ_QUADLET_REQUEST,
+ global_address(dice, GLOBAL_CLOCK_SELECT),
+ &clock_sel, 4, 0);
+ if (err < 0)
+ goto error;
+ clock_sel &= cpu_to_be32(~CLOCK_SOURCE_MASK);
+ clock_sel |= cpu_to_be32(CLOCK_SOURCE_ARX1);
+ err = snd_fw_transaction(unit, TCODE_WRITE_QUADLET_REQUEST,
+ global_address(dice, GLOBAL_CLOCK_SELECT),
+ &clock_sel, 4, 0);
+ if (err < 0)
+ goto error;
+
+ err = dice_create_pcm(dice);
+ if (err < 0)
+ goto error;
+
+ err = dice_create_hwdep(dice);
+ if (err < 0)
+ goto error;
+
+ dice_create_proc(dice);
+
+ err = snd_card_register(card);
+ if (err < 0)
+ goto error;
+
+ dev_set_drvdata(&unit->device, dice);
+
+ return 0;
+
+err_resources:
+ fw_iso_resources_destroy(&dice->resources);
+err_owner:
+ dice_owner_clear(dice);
+err_notification_handler:
+ fw_core_remove_address_handler(&dice->notification_handler);
+err_mutex:
+ mutex_destroy(&dice->mutex);
+error:
+ snd_card_free(card);
+ return err;
+}
+
+static void dice_remove(struct fw_unit *unit)
+{
+ struct dice *dice = dev_get_drvdata(&unit->device);
+
+ amdtp_out_stream_pcm_abort(&dice->stream);
+
+ snd_card_disconnect(dice->card);
+
+ mutex_lock(&dice->mutex);
+
+ dice_stream_stop(dice);
+ dice_owner_clear(dice);
+
+ mutex_unlock(&dice->mutex);
+
+ snd_card_free_when_closed(dice->card);
+}
+
+static void dice_bus_reset(struct fw_unit *unit)
+{
+ struct dice *dice = dev_get_drvdata(&unit->device);
+
+ /*
+ * On a bus reset, the DICE firmware disables streaming and then goes
+ * off contemplating its own navel for hundreds of milliseconds before
+ * it can react to any of our attempts to reenable streaming. This
+ * means that we lose synchronization anyway, so we force our streams
+ * to stop so that the application can restart them in an orderly
+ * manner.
+ */
+ amdtp_out_stream_pcm_abort(&dice->stream);
+
+ mutex_lock(&dice->mutex);
+
+ dice->global_enabled = false;
+ dice_stream_stop_packets(dice);
+
+ dice_owner_update(dice);
+
+ fw_iso_resources_update(&dice->resources);
+
+ mutex_unlock(&dice->mutex);
+}
+
+#define DICE_INTERFACE 0x000001
+
+static const struct ieee1394_device_id dice_id_table[] = {
+ {
+ .match_flags = IEEE1394_MATCH_VERSION,
+ .version = DICE_INTERFACE,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(ieee1394, dice_id_table);
+
+static struct fw_driver dice_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = KBUILD_MODNAME,
+ .bus = &fw_bus_type,
+ },
+ .probe = dice_probe,
+ .update = dice_bus_reset,
+ .remove = dice_remove,
+ .id_table = dice_id_table,
+};
+
+static int __init alsa_dice_init(void)
+{
+ return driver_register(&dice_driver.driver);
+}
+
+static void __exit alsa_dice_exit(void)
+{
+ driver_unregister(&dice_driver.driver);
+}
+
+module_init(alsa_dice_init);
+module_exit(alsa_dice_exit);
diff --git a/sound/firewire/fcp.c b/sound/firewire/fcp.c
index ec578b5ad8da..860c08073c59 100644
--- a/sound/firewire/fcp.c
+++ b/sound/firewire/fcp.c
@@ -90,7 +90,7 @@ int fcp_avc_transaction(struct fw_unit *unit,
: TCODE_WRITE_BLOCK_REQUEST;
ret = snd_fw_transaction(t.unit, tcode,
CSR_REGISTER_BASE + CSR_FCP_COMMAND,
- (void *)command, command_size);
+ (void *)command, command_size, 0);
if (ret < 0)
break;
diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
index 58a5afefdc69..fd42e6b315e6 100644
--- a/sound/firewire/isight.c
+++ b/sound/firewire/isight.c
@@ -217,7 +217,7 @@ static void isight_packet(struct fw_iso_context *context, u32 cycle,
static int isight_connect(struct isight *isight)
{
- int ch, err, rcode, errors = 0;
+ int ch, err;
__be32 value;
retry_after_bus_reset:
@@ -230,27 +230,19 @@ retry_after_bus_reset:
}
value = cpu_to_be32(ch | (isight->device->max_speed << SPEED_SHIFT));
- for (;;) {
- rcode = fw_run_transaction(
- isight->device->card,
- TCODE_WRITE_QUADLET_REQUEST,
- isight->device->node_id,
- isight->resources.generation,
- isight->device->max_speed,
- isight->audio_base + REG_ISO_TX_CONFIG,
- &value, 4);
- if (rcode == RCODE_COMPLETE) {
- return 0;
- } else if (rcode == RCODE_GENERATION) {
- fw_iso_resources_free(&isight->resources);
- goto retry_after_bus_reset;
- } else if (rcode_is_permanent_error(rcode) || ++errors >= 3) {
- err = -EIO;
- goto err_resources;
- }
- msleep(5);
+ err = snd_fw_transaction(isight->unit, TCODE_WRITE_QUADLET_REQUEST,
+ isight->audio_base + REG_ISO_TX_CONFIG,
+ &value, 4, FW_FIXED_GENERATION |
+ isight->resources.generation);
+ if (err == -EAGAIN) {
+ fw_iso_resources_free(&isight->resources);
+ goto retry_after_bus_reset;
+ } else if (err < 0) {
+ goto err_resources;
}
+ return 0;
+
err_resources:
fw_iso_resources_free(&isight->resources);
error:
@@ -315,17 +307,19 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
static int reg_read(struct isight *isight, int offset, __be32 *value)
{
return snd_fw_transaction(isight->unit, TCODE_READ_QUADLET_REQUEST,
- isight->audio_base + offset, value, 4);
+ isight->audio_base + offset, value, 4, 0);
}
static int reg_write(struct isight *isight, int offset, __be32 value)
{
return snd_fw_transaction(isight->unit, TCODE_WRITE_QUADLET_REQUEST,
- isight->audio_base + offset, &value, 4);
+ isight->audio_base + offset, &value, 4, 0);
}
static void isight_stop_streaming(struct isight *isight)
{
+ __be32 value;
+
if (!isight->context)
return;
@@ -333,7 +327,10 @@ static void isight_stop_streaming(struct isight *isight)
fw_iso_context_destroy(isight->context);
isight->context = NULL;
fw_iso_resources_free(&isight->resources);
- reg_write(isight, REG_AUDIO_ENABLE, 0);
+ value = 0;
+ snd_fw_transaction(isight->unit, TCODE_WRITE_QUADLET_REQUEST,
+ isight->audio_base + REG_AUDIO_ENABLE,
+ &value, 4, FW_QUIET);
}
static int isight_hw_free(struct snd_pcm_substream *substream)
diff --git a/sound/firewire/lib.c b/sound/firewire/lib.c
index 14eb41498372..7409edba9f06 100644
--- a/sound/firewire/lib.c
+++ b/sound/firewire/lib.c
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include "lib.h"
-#define ERROR_RETRY_DELAY_MS 5
+#define ERROR_RETRY_DELAY_MS 20
/**
* snd_fw_transaction - send a request and wait for its completion
@@ -20,6 +20,9 @@
* @offset: the address in the target's address space
* @buffer: input/output data
* @length: length of @buffer
+ * @flags: use %FW_FIXED_GENERATION and add the generation value to attempt the
+ * request only in that generation; use %FW_QUIET to suppress error
+ * messages
*
* Submits an asynchronous request to the target device, and waits for the
* response. The node ID and the current generation are derived from @unit.
@@ -27,14 +30,18 @@
* Returns zero on success, or a negative error code.
*/
int snd_fw_transaction(struct fw_unit *unit, int tcode,
- u64 offset, void *buffer, size_t length)
+ u64 offset, void *buffer, size_t length,
+ unsigned int flags)
{
struct fw_device *device = fw_parent_device(unit);
int generation, rcode, tries = 0;
+ generation = flags & FW_GENERATION_MASK;
for (;;) {
- generation = device->generation;
- smp_rmb(); /* node_id vs. generation */
+ if (!(flags & FW_FIXED_GENERATION)) {
+ generation = device->generation;
+ smp_rmb(); /* node_id vs. generation */
+ }
rcode = fw_run_transaction(device->card, tcode,
device->node_id, generation,
device->max_speed, offset,
@@ -43,9 +50,14 @@ int snd_fw_transaction(struct fw_unit *unit, int tcode,
if (rcode == RCODE_COMPLETE)
return 0;
+ if (rcode == RCODE_GENERATION && (flags & FW_FIXED_GENERATION))
+ return -EAGAIN;
+
if (rcode_is_permanent_error(rcode) || ++tries >= 3) {
- dev_err(&unit->device, "transaction failed: %s\n",
- fw_rcode_string(rcode));
+ if (!(flags & FW_QUIET))
+ dev_err(&unit->device,
+ "transaction failed: %s\n",
+ fw_rcode_string(rcode));
return -EIO;
}
diff --git a/sound/firewire/lib.h b/sound/firewire/lib.h
index aef301476ea9..02cfabc9c3c4 100644
--- a/sound/firewire/lib.h
+++ b/sound/firewire/lib.h
@@ -6,8 +6,13 @@
struct fw_unit;
+#define FW_GENERATION_MASK 0x00ff
+#define FW_FIXED_GENERATION 0x0100
+#define FW_QUIET 0x0200
+
int snd_fw_transaction(struct fw_unit *unit, int tcode,
- u64 offset, void *buffer, size_t length);
+ u64 offset, void *buffer, size_t length,
+ unsigned int flags);
/* returns true if retrying the transaction would not make sense */
static inline bool rcode_is_permanent_error(int rcode)
diff --git a/sound/firewire/scs1x.c b/sound/firewire/scs1x.c
index 505fc8123199..858023cf4298 100644
--- a/sound/firewire/scs1x.c
+++ b/sound/firewire/scs1x.c
@@ -369,7 +369,7 @@ static int scs_init_hss_address(struct scs *scs)
data = cpu_to_be64(((u64)HSS1394_TAG_CHANGE_ADDRESS << 56) |
scs->hss_handler.offset);
err = snd_fw_transaction(scs->unit, TCODE_WRITE_BLOCK_REQUEST,
- HSS1394_ADDRESS, &data, 8);
+ HSS1394_ADDRESS, &data, 8, 0);
if (err < 0)
dev_err(&scs->unit->device, "HSS1394 communication failed\n");
@@ -455,12 +455,16 @@ err_card:
static void scs_update(struct fw_unit *unit)
{
struct scs *scs = dev_get_drvdata(&unit->device);
+ int generation;
__be64 data;
data = cpu_to_be64(((u64)HSS1394_TAG_CHANGE_ADDRESS << 56) |
scs->hss_handler.offset);
+ generation = fw_parent_device(unit)->generation;
+ smp_rmb(); /* node_id vs. generation */
snd_fw_transaction(scs->unit, TCODE_WRITE_BLOCK_REQUEST,
- HSS1394_ADDRESS, &data, 8);
+ HSS1394_ADDRESS, &data, 8,
+ FW_FIXED_GENERATION | generation);
}
static void scs_remove(struct fw_unit *unit)
diff --git a/sound/firewire/speakers.c b/sound/firewire/speakers.c
index fe9e6e2f2c5b..cc8bc3a51bc1 100644
--- a/sound/firewire/speakers.c
+++ b/sound/firewire/speakers.c
@@ -52,7 +52,6 @@ struct fwspk {
struct mutex mutex;
struct cmp_connection connection;
struct amdtp_out_stream stream;
- bool stream_running;
bool mute;
s16 volume[6];
s16 volume_min;
@@ -188,10 +187,9 @@ static int fwspk_close(struct snd_pcm_substream *substream)
static void fwspk_stop_stream(struct fwspk *fwspk)
{
- if (fwspk->stream_running) {
+ if (amdtp_out_stream_running(&fwspk->stream)) {
amdtp_out_stream_stop(&fwspk->stream);
cmp_connection_break(&fwspk->connection);
- fwspk->stream_running = false;
}
}
@@ -246,8 +244,10 @@ static int fwspk_hw_params(struct snd_pcm_substream *substream,
if (err < 0)
goto error;
- amdtp_out_stream_set_rate(&fwspk->stream, params_rate(hw_params));
- amdtp_out_stream_set_pcm(&fwspk->stream, params_channels(hw_params));
+ amdtp_out_stream_set_parameters(&fwspk->stream,
+ params_rate(hw_params),
+ params_channels(hw_params),
+ 0);
amdtp_out_stream_set_pcm_format(&fwspk->stream,
params_format(hw_params));
@@ -285,7 +285,7 @@ static int fwspk_prepare(struct snd_pcm_substream *substream)
if (amdtp_out_streaming_error(&fwspk->stream))
fwspk_stop_stream(fwspk);
- if (!fwspk->stream_running) {
+ if (!amdtp_out_stream_running(&fwspk->stream)) {
err = cmp_connection_establish(&fwspk->connection,
amdtp_out_stream_get_max_payload(&fwspk->stream));
if (err < 0)
@@ -296,8 +296,6 @@ static int fwspk_prepare(struct snd_pcm_substream *substream)
fwspk->connection.speed);
if (err < 0)
goto err_connection;
-
- fwspk->stream_running = true;
}
mutex_unlock(&fwspk->mutex);
@@ -647,7 +645,7 @@ static u32 fwspk_read_firmware_version(struct fw_unit *unit)
int err;
err = snd_fw_transaction(unit, TCODE_READ_QUADLET_REQUEST,
- OXFORD_FIRMWARE_ID_ADDRESS, &data, 4);
+ OXFORD_FIRMWARE_ID_ADDRESS, &data, 4, 0);
return err >= 0 ? be32_to_cpu(data) : 0;
}
diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c
index 5bf4fca19e48..15ae0250eace 100644
--- a/sound/i2c/other/ak4114.c
+++ b/sound/i2c/other/ak4114.c
@@ -60,7 +60,7 @@ static void reg_dump(struct ak4114 *ak4114)
printk(KERN_DEBUG "AK4114 REG DUMP:\n");
for (i = 0; i < 0x20; i++)
- printk(KERN_DEBUG "reg[%02x] = %02x (%02x)\n", i, reg_read(ak4114, i), i < sizeof(ak4114->regmap) ? ak4114->regmap[i] : 0);
+ printk(KERN_DEBUG "reg[%02x] = %02x (%02x)\n", i, reg_read(ak4114, i), i < ARRAY_SIZE(ak4114->regmap) ? ak4114->regmap[i] : 0);
}
#endif
@@ -81,7 +81,7 @@ static int snd_ak4114_dev_free(struct snd_device *device)
int snd_ak4114_create(struct snd_card *card,
ak4114_read_t *read, ak4114_write_t *write,
- const unsigned char pgm[7], const unsigned char txcsb[5],
+ const unsigned char pgm[6], const unsigned char txcsb[5],
void *private_data, struct ak4114 **r_ak4114)
{
struct ak4114 *chip;
@@ -101,7 +101,7 @@ int snd_ak4114_create(struct snd_card *card,
chip->private_data = private_data;
INIT_DELAYED_WORK(&chip->work, ak4114_stats);
- for (reg = 0; reg < 7; reg++)
+ for (reg = 0; reg < 6; reg++)
chip->regmap[reg] = pgm[reg];
for (reg = 0; reg < 5; reg++)
chip->txcsb[reg] = txcsb[reg];
@@ -142,7 +142,7 @@ static void ak4114_init_regs(struct ak4114 *chip)
/* release reset, but leave powerdown */
reg_write(chip, AK4114_REG_PWRDN, (old | AK4114_RST) & ~AK4114_PWN);
udelay(200);
- for (reg = 1; reg < 7; reg++)
+ for (reg = 1; reg < 6; reg++)
reg_write(chip, reg, chip->regmap[reg]);
for (reg = 0; reg < 5; reg++)
reg_write(chip, reg + AK4114_REG_TXCSB0, chip->txcsb[reg]);
diff --git a/sound/i2c/other/ak4xxx-adda.c b/sound/i2c/other/ak4xxx-adda.c
index ed726d1569e8..f3735e64791c 100644
--- a/sound/i2c/other/ak4xxx-adda.c
+++ b/sound/i2c/other/ak4xxx-adda.c
@@ -583,7 +583,7 @@ static int ak4xxx_capture_source_info(struct snd_kcontrol *kcontrol,
if (idx >= num_names)
return -EINVAL;
input_names = ak->adc_info[mixer_ch].input_names;
- strncpy(uinfo->value.enumerated.name, input_names[idx],
+ strlcpy(uinfo->value.enumerated.name, input_names[idx],
sizeof(uinfo->value.enumerated.name));
return 0;
}
diff --git a/sound/oss/sb_ess.c b/sound/oss/sb_ess.c
index c0be085e4a20..0e7254bde4c2 100644
--- a/sound/oss/sb_ess.c
+++ b/sound/oss/sb_ess.c
@@ -1544,7 +1544,7 @@ static int ess_has_rec_mixer (int submodel)
return 1;
default:
return 0;
- };
+ }
};
#ifdef FKS_LOGGING
diff --git a/sound/pci/ad1889.c b/sound/pci/ad1889.c
index d2b9d617aee5..b680d03e2419 100644
--- a/sound/pci/ad1889.c
+++ b/sound/pci/ad1889.c
@@ -739,7 +739,7 @@ snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffe
reg = ad1889_readw(chip, AD_DS_WADA);
snd_iprintf(buffer, "Right: %s, -%d dB\n",
(reg & AD_DS_WADA_RWAM) ? "mute" : "unmute",
- ((reg & AD_DS_WADA_RWAA) >> 8) * 3);
+ (reg & AD_DS_WADA_RWAA) * 3);
reg = ad1889_readw(chip, AD_DS_WAS);
snd_iprintf(buffer, "Wave samplerate: %u Hz\n", reg);
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
index 3dfa12b670eb..c6835a3d64fb 100644
--- a/sound/pci/ali5451/ali5451.c
+++ b/sound/pci/ali5451/ali5451.c
@@ -855,7 +855,6 @@ static void snd_ali_disable_spdif_out(struct snd_ali *codec)
static void snd_ali_update_ptr(struct snd_ali *codec, int channel)
{
struct snd_ali_voice *pvoice;
- struct snd_pcm_runtime *runtime;
struct snd_ali_channel_control *pchregs;
unsigned int old, mask;
#ifdef ALI_DEBUG
@@ -872,7 +871,6 @@ static void snd_ali_update_ptr(struct snd_ali *codec, int channel)
return;
pvoice = &codec->synth.voices[channel];
- runtime = pvoice->substream->runtime;
udelay(100);
spin_lock(&codec->reg_lock);
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
index dc632cdc3870..5f2acd35dcb9 100644
--- a/sound/pci/asihpi/asihpi.c
+++ b/sound/pci/asihpi/asihpi.c
@@ -1913,6 +1913,7 @@ static int snd_asihpi_tuner_band_put(struct snd_kcontrol *kcontrol,
struct snd_card_asihpi *asihpi = snd_kcontrol_chip(kcontrol);
*/
u32 h_control = kcontrol->private_value;
+ unsigned int idx;
u16 band;
u16 tuner_bands[HPI_TUNER_BAND_LAST];
u32 num_bands = 0;
@@ -1920,7 +1921,10 @@ static int snd_asihpi_tuner_band_put(struct snd_kcontrol *kcontrol,
num_bands = asihpi_tuner_band_query(kcontrol, tuner_bands,
HPI_TUNER_BAND_LAST);
- band = tuner_bands[ucontrol->value.enumerated.item[0]];
+ idx = ucontrol->value.enumerated.item[0];
+ if (idx >= ARRAY_SIZE(tuner_bands))
+ idx = ARRAY_SIZE(tuner_bands) - 1;
+ band = tuner_bands[idx];
hpi_handle_error(hpi_tuner_set_band(h_control, band));
return 1;
@@ -2383,7 +2387,8 @@ static int snd_asihpi_clksrc_put(struct snd_kcontrol *kcontrol,
struct snd_card_asihpi *asihpi =
(struct snd_card_asihpi *)(kcontrol->private_data);
struct clk_cache *clkcache = &asihpi->cc;
- int change, item;
+ unsigned int item;
+ int change;
u32 h_control = kcontrol->private_value;
change = 1;
diff --git a/sound/pci/au88x0/au88x0_pcm.c b/sound/pci/au88x0/au88x0_pcm.c
index b46dc9b24dbd..9fb03b4ea925 100644
--- a/sound/pci/au88x0/au88x0_pcm.c
+++ b/sound/pci/au88x0/au88x0_pcm.c
@@ -671,7 +671,7 @@ static int snd_vortex_new_pcm(vortex_t *chip, int idx, int nr)
return err;
break;
#endif
- };
+ }
if (VORTEX_PCM_TYPE(pcm) == VORTEX_PCM_SPDIF) {
for (i = 0; i < ARRAY_SIZE(snd_vortex_mixer_spdif); i++) {
diff --git a/sound/pci/au88x0/au88x0_synth.c b/sound/pci/au88x0/au88x0_synth.c
index 8bef47311e45..922a84bba2ef 100644
--- a/sound/pci/au88x0/au88x0_synth.c
+++ b/sound/pci/au88x0/au88x0_synth.c
@@ -219,7 +219,6 @@ vortex_wt_SetReg(vortex_t * vortex, unsigned char reg, int wt,
*/
hwwrite(vortex->mmio, WT_RUN(wt), val);
return 0xc;
- break;
case 1: /* param 0 */
/*
printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n",
@@ -227,7 +226,6 @@ vortex_wt_SetReg(vortex_t * vortex, unsigned char reg, int wt,
*/
hwwrite(vortex->mmio, WT_PARM(wt, 0), val);
return 0xc;
- break;
case 2: /* param 1 */
/*
printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n",
@@ -235,7 +233,6 @@ vortex_wt_SetReg(vortex_t * vortex, unsigned char reg, int wt,
*/
hwwrite(vortex->mmio, WT_PARM(wt, 1), val);
return 0xc;
- break;
case 3: /* param 2 */
/*
printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n",
@@ -243,7 +240,6 @@ vortex_wt_SetReg(vortex_t * vortex, unsigned char reg, int wt,
*/
hwwrite(vortex->mmio, WT_PARM(wt, 2), val);
return 0xc;
- break;
case 4: /* param 3 */
/*
printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n",
@@ -251,7 +247,6 @@ vortex_wt_SetReg(vortex_t * vortex, unsigned char reg, int wt,
*/
hwwrite(vortex->mmio, WT_PARM(wt, 3), val);
return 0xc;
- break;
case 6: /* mute */
/*
printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n",
@@ -259,20 +254,17 @@ vortex_wt_SetReg(vortex_t * vortex, unsigned char reg, int wt,
*/
hwwrite(vortex->mmio, WT_MUTE(wt), val);
return 0xc;
- break;
case 0xb:
- { /* delay */
- /*
- printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n",
- WT_DELAY(wt,0), (int)val);
- */
- hwwrite(vortex->mmio, WT_DELAY(wt, 3), val);
- hwwrite(vortex->mmio, WT_DELAY(wt, 2), val);
- hwwrite(vortex->mmio, WT_DELAY(wt, 1), val);
- hwwrite(vortex->mmio, WT_DELAY(wt, 0), val);
- return 0xc;
- }
- break;
+ /* delay */
+ /*
+ printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n",
+ WT_DELAY(wt,0), (int)val);
+ */
+ hwwrite(vortex->mmio, WT_DELAY(wt, 3), val);
+ hwwrite(vortex->mmio, WT_DELAY(wt, 2), val);
+ hwwrite(vortex->mmio, WT_DELAY(wt, 1), val);
+ hwwrite(vortex->mmio, WT_DELAY(wt, 0), val);
+ return 0xc;
/* Global WT block parameters */
case 5: /* sramp */
ecx = WT_SRAMP(wt);
@@ -291,7 +283,6 @@ vortex_wt_SetReg(vortex_t * vortex, unsigned char reg, int wt,
break;
default:
return 0;
- break;
}
/*
printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n", ecx, (int)val);
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index c8e121611593..1aef7128f7ca 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -715,14 +715,14 @@ snd_azf3328_mixer_ac97_read(struct snd_ac97 *ac97, unsigned short reg_ac97)
const struct snd_azf3328 *chip = ac97->private_data;
unsigned short reg_azf = snd_azf3328_mixer_ac97_map_reg_idx(reg_ac97);
unsigned short reg_val = 0;
- bool unsupported = 0;
+ bool unsupported = false;
snd_azf3328_dbgmixer(
"snd_azf3328_mixer_ac97_read reg_ac97 %u\n",
reg_ac97
);
if (reg_azf & AZF_AC97_REG_UNSUPPORTED)
- unsupported = 1;
+ unsupported = true;
else {
if (reg_azf & AZF_AC97_REG_REAL_IO_READ)
reg_val = snd_azf3328_mixer_inw(chip,
@@ -759,7 +759,7 @@ snd_azf3328_mixer_ac97_read(struct snd_ac97 *ac97, unsigned short reg_ac97)
reg_val = azf_emulated_ac97_vendor_id & 0xffff;
break;
default:
- unsupported = 1;
+ unsupported = true;
break;
}
}
@@ -776,14 +776,14 @@ snd_azf3328_mixer_ac97_write(struct snd_ac97 *ac97,
{
const struct snd_azf3328 *chip = ac97->private_data;
unsigned short reg_azf = snd_azf3328_mixer_ac97_map_reg_idx(reg_ac97);
- bool unsupported = 0;
+ bool unsupported = false;
snd_azf3328_dbgmixer(
"snd_azf3328_mixer_ac97_write reg_ac97 %u val %u\n",
reg_ac97, val
);
if (reg_azf & AZF_AC97_REG_UNSUPPORTED)
- unsupported = 1;
+ unsupported = true;
else {
if (reg_azf & AZF_AC97_REG_REAL_IO_WRITE)
snd_azf3328_mixer_outw(
@@ -808,7 +808,7 @@ snd_azf3328_mixer_ac97_write(struct snd_ac97 *ac97,
*/
break;
default:
- unsupported = 1;
+ unsupported = true;
break;
}
}
@@ -1559,7 +1559,7 @@ snd_azf3328_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
struct snd_azf3328_codec_data *codec = runtime->private_data;
int result = 0;
u16 flags1;
- bool previously_muted = 0;
+ bool previously_muted = false;
bool is_main_mixer_playback_codec = (AZF_CODEC_PLAYBACK == codec->type);
snd_azf3328_dbgcalls("snd_azf3328_pcm_trigger cmd %d\n", cmd);
diff --git a/sound/pci/cs5535audio/cs5535audio_olpc.c b/sound/pci/cs5535audio/cs5535audio_olpc.c
index da1cb9c4c76c..e6a44507d557 100644
--- a/sound/pci/cs5535audio/cs5535audio_olpc.c
+++ b/sound/pci/cs5535audio/cs5535audio_olpc.c
@@ -161,13 +161,13 @@ int olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97)
/* drop the original AD1888 HPF control */
memset(&elem, 0, sizeof(elem));
elem.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
- strncpy(elem.name, "High Pass Filter Enable", sizeof(elem.name));
+ strlcpy(elem.name, "High Pass Filter Enable", sizeof(elem.name));
snd_ctl_remove_id(card, &elem);
/* drop the original V_REFOUT control */
memset(&elem, 0, sizeof(elem));
elem.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
- strncpy(elem.name, "V_REFOUT Enable", sizeof(elem.name));
+ strlcpy(elem.name, "V_REFOUT Enable", sizeof(elem.name));
snd_ctl_remove_id(card, &elem);
/* add the OLPC-specific controls */
diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c
index 0c00eb4088ef..84f86bf63b8f 100644
--- a/sound/pci/ctxfi/ctdaio.c
+++ b/sound/pci/ctxfi/ctdaio.c
@@ -33,7 +33,7 @@ struct daio_rsc_idx {
unsigned short right;
};
-struct daio_rsc_idx idx_20k1[NUM_DAIOTYP] = {
+static struct daio_rsc_idx idx_20k1[NUM_DAIOTYP] = {
[LINEO1] = {.left = 0x00, .right = 0x01},
[LINEO2] = {.left = 0x18, .right = 0x19},
[LINEO3] = {.left = 0x08, .right = 0x09},
@@ -44,7 +44,7 @@ struct daio_rsc_idx idx_20k1[NUM_DAIOTYP] = {
[SPDIFI1] = {.left = 0x95, .right = 0x9d},
};
-struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = {
+static struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = {
[LINEO1] = {.left = 0x40, .right = 0x41},
[LINEO2] = {.left = 0x60, .right = 0x61},
[LINEO3] = {.left = 0x50, .right = 0x51},
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 0275209ca82e..1f9c7c4bbcd8 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -1182,15 +1182,20 @@ static int _snd_emu10k1_audigy_init_efx(struct snd_emu10k1 *emu)
u32 *gpr_map;
mm_segment_t seg;
- if ((icode = kzalloc(sizeof(*icode), GFP_KERNEL)) == NULL ||
- (icode->gpr_map = (u_int32_t __user *)
- kcalloc(512 + 256 + 256 + 2 * 1024, sizeof(u_int32_t),
- GFP_KERNEL)) == NULL ||
- (controls = kcalloc(SND_EMU10K1_GPR_CONTROLS,
- sizeof(*controls), GFP_KERNEL)) == NULL) {
- err = -ENOMEM;
- goto __err;
- }
+ err = -ENOMEM;
+ icode = kzalloc(sizeof(*icode), GFP_KERNEL);
+ if (!icode)
+ return err;
+
+ icode->gpr_map = (u_int32_t __user *) kcalloc(512 + 256 + 256 + 2 * 1024,
+ sizeof(u_int32_t), GFP_KERNEL);
+ if (!icode->gpr_map)
+ goto __err_gpr;
+ controls = kcalloc(SND_EMU10K1_GPR_CONTROLS,
+ sizeof(*controls), GFP_KERNEL);
+ if (!controls)
+ goto __err_ctrls;
+
gpr_map = (u32 __force *)icode->gpr_map;
icode->tram_data_map = icode->gpr_map + 512;
@@ -1741,12 +1746,12 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
emu->support_tlv = 0; /* clear again */
snd_leave_user(seg);
- __err:
+__err:
kfree(controls);
- if (icode != NULL) {
- kfree((void __force *)icode->gpr_map);
- kfree(icode);
- }
+__err_ctrls:
+ kfree((void __force *)icode->gpr_map);
+__err_gpr:
+ kfree(icode);
return err;
}
@@ -1813,18 +1818,26 @@ static int _snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
u32 *gpr_map;
mm_segment_t seg;
- if ((icode = kzalloc(sizeof(*icode), GFP_KERNEL)) == NULL)
- return -ENOMEM;
- if ((icode->gpr_map = (u_int32_t __user *)
- kcalloc(256 + 160 + 160 + 2 * 512, sizeof(u_int32_t),
- GFP_KERNEL)) == NULL ||
- (controls = kcalloc(SND_EMU10K1_GPR_CONTROLS,
- sizeof(struct snd_emu10k1_fx8010_control_gpr),
- GFP_KERNEL)) == NULL ||
- (ipcm = kzalloc(sizeof(*ipcm), GFP_KERNEL)) == NULL) {
- err = -ENOMEM;
- goto __err;
- }
+ err = -ENOMEM;
+ icode = kzalloc(sizeof(*icode), GFP_KERNEL);
+ if (!icode)
+ return err;
+
+ icode->gpr_map = (u_int32_t __user *) kcalloc(256 + 160 + 160 + 2 * 512,
+ sizeof(u_int32_t), GFP_KERNEL);
+ if (!icode->gpr_map)
+ goto __err_gpr;
+
+ controls = kcalloc(SND_EMU10K1_GPR_CONTROLS,
+ sizeof(struct snd_emu10k1_fx8010_control_gpr),
+ GFP_KERNEL);
+ if (!controls)
+ goto __err_ctrls;
+
+ ipcm = kzalloc(sizeof(*ipcm), GFP_KERNEL);
+ if (!ipcm)
+ goto __err_ipcm;
+
gpr_map = (u32 __force *)icode->gpr_map;
icode->tram_data_map = icode->gpr_map + 256;
@@ -2363,13 +2376,14 @@ static int _snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
snd_leave_user(seg);
if (err >= 0)
err = snd_emu10k1_ipcm_poke(emu, ipcm);
- __err:
+__err:
kfree(ipcm);
+__err_ipcm:
kfree(controls);
- if (icode != NULL) {
- kfree((void __force *)icode->gpr_map);
- kfree(icode);
- }
+__err_ctrls:
+ kfree((void __force *)icode->gpr_map);
+__err_gpr:
+ kfree(icode);
return err;
}
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 48a9d004d6d9..853c6a69e29e 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -638,7 +638,7 @@ static int fill_audio_out_name(struct hda_codec *codec, hda_nid_t nid,
/* don't add channel suffix for Headphone controls */
int idx = get_hp_label_index(codec, nid, cfg->hp_pins,
cfg->hp_outs);
- if (idx >= 0)
+ if (idx >= 0 && indexp)
*indexp = idx;
sfx = "";
}
diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c
index 63c99090a4ec..98bce9830be0 100644
--- a/sound/pci/hda/hda_beep.c
+++ b/sound/pci/hda/hda_beep.c
@@ -110,6 +110,7 @@ static int snd_hda_beep_event(struct input_dev *dev, unsigned int type,
case SND_BELL:
if (hz)
hz = 1000;
+ /* fallthru */
case SND_TONE:
if (beep->linear_tone)
beep->tone = beep_linear_tone(beep, hz);
@@ -151,10 +152,8 @@ static int snd_hda_do_attach(struct hda_beep *beep)
int err;
input_dev = input_allocate_device();
- if (!input_dev) {
- printk(KERN_INFO "hda_beep: unable to allocate input device\n");
+ if (!input_dev)
return -ENOMEM;
- }
/* setup digital beep device */
input_dev->name = "HDA Digital PCBeep";
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 5b6c4e3c92ca..de1a7670ba0d 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -565,7 +565,7 @@ int snd_hda_get_raw_connections(struct hda_codec *codec, hda_nid_t nid,
range_val = !!(parm & (1 << (shift-1))); /* ranges */
val = parm & mask;
if (val == 0 && null_count++) { /* no second chance */
- snd_printk(KERN_WARNING "hda_codec: "
+ snd_printdd("hda_codec: "
"invalid CONNECT_LIST verb %x[%i]:%x\n",
nid, i, parm);
return 0;
@@ -2634,8 +2634,7 @@ static int map_slaves(struct hda_codec *codec, const char * const *slaves,
items = codec->mixers.list;
for (i = 0; i < codec->mixers.used; i++) {
struct snd_kcontrol *sctl = items[i].kctl;
- if (!sctl || !sctl->id.name ||
- sctl->id.iface != SNDRV_CTL_ELEM_IFACE_MIXER)
+ if (!sctl || sctl->id.iface != SNDRV_CTL_ELEM_IFACE_MIXER)
continue;
for (s = slaves; *s; s++) {
char tmpname[sizeof(sctl->id.name)];
@@ -4864,8 +4863,8 @@ static void hda_power_work(struct work_struct *work)
spin_unlock(&codec->power_lock);
state = hda_call_codec_suspend(codec, true);
- codec->pm_down_notified = 0;
- if (!bus->power_keep_link_on && (state & AC_PWRST_CLK_STOP_OK)) {
+ if (!codec->pm_down_notified &&
+ !bus->power_keep_link_on && (state & AC_PWRST_CLK_STOP_OK)) {
codec->pm_down_notified = 1;
hda_call_pm_notify(bus, false);
}
@@ -5395,11 +5394,6 @@ int snd_hda_multi_out_analog_prepare(struct hda_codec *codec,
snd_hda_codec_setup_stream(codec,
mout->hp_out_nid[i],
stream_tag, 0, format);
- for (i = 0; i < ARRAY_SIZE(mout->extra_out_nid); i++)
- if (!mout->no_share_stream && mout->extra_out_nid[i])
- snd_hda_codec_setup_stream(codec,
- mout->extra_out_nid[i],
- stream_tag, 0, format);
/* surrounds */
for (i = 1; i < mout->num_dacs; i++) {
@@ -5410,6 +5404,20 @@ int snd_hda_multi_out_analog_prepare(struct hda_codec *codec,
snd_hda_codec_setup_stream(codec, nids[i], stream_tag,
0, format);
}
+
+ /* extra surrounds */
+ for (i = 0; i < ARRAY_SIZE(mout->extra_out_nid); i++) {
+ int ch = 0;
+ if (!mout->extra_out_nid[i])
+ break;
+ if (chs >= (i + 1) * 2)
+ ch = i * 2;
+ else if (!mout->no_share_stream)
+ break;
+ snd_hda_codec_setup_stream(codec, mout->extra_out_nid[i],
+ stream_tag, ch, format);
+ }
+
return 0;
}
EXPORT_SYMBOL_HDA(snd_hda_multi_out_analog_prepare);
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 7aa9870040c1..77db69480c19 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -698,6 +698,7 @@ struct hda_bus {
unsigned int in_reset:1; /* during reset operation */
unsigned int power_keep_link_on:1; /* don't power off HDA link */
unsigned int no_response_fallback:1; /* don't fallback at RIRB error */
+ unsigned int avoid_link_reset:1; /* don't reset link at runtime PM */
int primary_dig_out_type; /* primary digital out PCM type */
};
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index d0d7ac1e99d2..32d3e3855a6e 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -2,6 +2,7 @@
* Generic routines and proc interface for ELD(EDID Like Data) information
*
* Copyright(c) 2008 Intel Corporation.
+ * Copyright (c) 2013 Anssi Hannula <anssi.hannula@iki.fi>
*
* Authors:
* Wu Fengguang <wfg@linux.intel.com>
@@ -478,10 +479,9 @@ static void hdmi_print_sad_info(int i, struct cea_sad *a,
snd_iprintf(buffer, "sad%d_profile\t\t%d\n", i, a->profile);
}
-static void hdmi_print_eld_info(struct snd_info_entry *entry,
- struct snd_info_buffer *buffer)
+void snd_hdmi_print_eld_info(struct hdmi_eld *eld,
+ struct snd_info_buffer *buffer)
{
- struct hdmi_eld *eld = entry->private_data;
struct parsed_hdmi_eld *e = &eld->info;
char buf[SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE];
int i;
@@ -500,13 +500,10 @@ static void hdmi_print_eld_info(struct snd_info_entry *entry,
[4 ... 7] = "reserved"
};
- mutex_lock(&eld->lock);
snd_iprintf(buffer, "monitor_present\t\t%d\n", eld->monitor_present);
snd_iprintf(buffer, "eld_valid\t\t%d\n", eld->eld_valid);
- if (!eld->eld_valid) {
- mutex_unlock(&eld->lock);
+ if (!eld->eld_valid)
return;
- }
snd_iprintf(buffer, "monitor_name\t\t%s\n", e->monitor_name);
snd_iprintf(buffer, "connection_type\t\t%s\n",
eld_connection_type_names[e->conn_type]);
@@ -528,13 +525,11 @@ static void hdmi_print_eld_info(struct snd_info_entry *entry,
for (i = 0; i < e->sad_count; i++)
hdmi_print_sad_info(i, e->sad + i, buffer);
- mutex_unlock(&eld->lock);
}
-static void hdmi_write_eld_info(struct snd_info_entry *entry,
- struct snd_info_buffer *buffer)
+void snd_hdmi_write_eld_info(struct hdmi_eld *eld,
+ struct snd_info_buffer *buffer)
{
- struct hdmi_eld *eld = entry->private_data;
struct parsed_hdmi_eld *e = &eld->info;
char line[64];
char name[64];
@@ -542,7 +537,6 @@ static void hdmi_write_eld_info(struct snd_info_entry *entry,
long long val;
unsigned int n;
- mutex_lock(&eld->lock);
while (!snd_info_get_line(buffer, line, sizeof(line))) {
if (sscanf(line, "%s %llx", name, &val) != 2)
continue;
@@ -594,38 +588,7 @@ static void hdmi_write_eld_info(struct snd_info_entry *entry,
e->sad_count = n + 1;
}
}
- mutex_unlock(&eld->lock);
-}
-
-
-int snd_hda_eld_proc_new(struct hda_codec *codec, struct hdmi_eld *eld,
- int index)
-{
- char name[32];
- struct snd_info_entry *entry;
- int err;
-
- snprintf(name, sizeof(name), "eld#%d.%d", codec->addr, index);
- err = snd_card_proc_new(codec->bus->card, name, &entry);
- if (err < 0)
- return err;
-
- snd_info_set_text_ops(entry, eld, hdmi_print_eld_info);
- entry->c.text.write = hdmi_write_eld_info;
- entry->mode |= S_IWUSR;
- eld->proc_entry = entry;
-
- return 0;
-}
-
-void snd_hda_eld_proc_free(struct hda_codec *codec, struct hdmi_eld *eld)
-{
- if (!codec->bus->shutdown && eld->proc_entry) {
- snd_device_free(codec->bus->card, eld->proc_entry);
- eld->proc_entry = NULL;
- }
}
-
#endif /* CONFIG_PROC_FS */
/* update PCM info based on ELD */
@@ -671,3 +634,153 @@ void snd_hdmi_eld_update_pcm_info(struct parsed_hdmi_eld *e,
hinfo->maxbps = min(hinfo->maxbps, maxbps);
hinfo->channels_max = min(hinfo->channels_max, channels_max);
}
+
+
+/* ATI/AMD specific stuff (ELD emulation) */
+
+#define ATI_VERB_SET_AUDIO_DESCRIPTOR 0x776
+#define ATI_VERB_SET_SINK_INFO_INDEX 0x780
+#define ATI_VERB_GET_SPEAKER_ALLOCATION 0xf70
+#define ATI_VERB_GET_AUDIO_DESCRIPTOR 0xf76
+#define ATI_VERB_GET_AUDIO_VIDEO_DELAY 0xf7b
+#define ATI_VERB_GET_SINK_INFO_INDEX 0xf80
+#define ATI_VERB_GET_SINK_INFO_DATA 0xf81
+
+#define ATI_SPKALLOC_SPKALLOC 0x007f
+#define ATI_SPKALLOC_TYPE_HDMI 0x0100
+#define ATI_SPKALLOC_TYPE_DISPLAYPORT 0x0200
+
+/* first three bytes are just standard SAD */
+#define ATI_AUDIODESC_CHANNELS 0x00000007
+#define ATI_AUDIODESC_RATES 0x0000ff00
+#define ATI_AUDIODESC_LPCM_STEREO_RATES 0xff000000
+
+/* in standard HDMI VSDB format */
+#define ATI_DELAY_VIDEO_LATENCY 0x000000ff
+#define ATI_DELAY_AUDIO_LATENCY 0x0000ff00
+
+enum ati_sink_info_idx {
+ ATI_INFO_IDX_MANUFACTURER_ID = 0,
+ ATI_INFO_IDX_PRODUCT_ID = 1,
+ ATI_INFO_IDX_SINK_DESC_LEN = 2,
+ ATI_INFO_IDX_PORT_ID_LOW = 3,
+ ATI_INFO_IDX_PORT_ID_HIGH = 4,
+ ATI_INFO_IDX_SINK_DESC_FIRST = 5,
+ ATI_INFO_IDX_SINK_DESC_LAST = 22, /* max len 18 bytes */
+};
+
+int snd_hdmi_get_eld_ati(struct hda_codec *codec, hda_nid_t nid,
+ unsigned char *buf, int *eld_size, bool rev3_or_later)
+{
+ int spkalloc, ati_sad, aud_synch;
+ int sink_desc_len = 0;
+ int pos, i;
+
+ /* ATI/AMD does not have ELD, emulate it */
+
+ spkalloc = snd_hda_codec_read(codec, nid, 0, ATI_VERB_GET_SPEAKER_ALLOCATION, 0);
+
+ if (!spkalloc) {
+ snd_printd(KERN_INFO "HDMI ATI/AMD: no speaker allocation for ELD\n");
+ return -EINVAL;
+ }
+
+ memset(buf, 0, ELD_FIXED_BYTES + ELD_MAX_MNL + ELD_MAX_SAD * 3);
+
+ /* version */
+ buf[0] = ELD_VER_CEA_861D << 3;
+
+ /* speaker allocation from EDID */
+ buf[7] = spkalloc & ATI_SPKALLOC_SPKALLOC;
+
+ /* is DisplayPort? */
+ if (spkalloc & ATI_SPKALLOC_TYPE_DISPLAYPORT)
+ buf[5] |= 0x04;
+
+ pos = ELD_FIXED_BYTES;
+
+ if (rev3_or_later) {
+ int sink_info;
+
+ snd_hda_codec_write(codec, nid, 0, ATI_VERB_SET_SINK_INFO_INDEX, ATI_INFO_IDX_PORT_ID_LOW);
+ sink_info = snd_hda_codec_read(codec, nid, 0, ATI_VERB_GET_SINK_INFO_DATA, 0);
+ put_unaligned_le32(sink_info, buf + 8);
+
+ snd_hda_codec_write(codec, nid, 0, ATI_VERB_SET_SINK_INFO_INDEX, ATI_INFO_IDX_PORT_ID_HIGH);
+ sink_info = snd_hda_codec_read(codec, nid, 0, ATI_VERB_GET_SINK_INFO_DATA, 0);
+ put_unaligned_le32(sink_info, buf + 12);
+
+ snd_hda_codec_write(codec, nid, 0, ATI_VERB_SET_SINK_INFO_INDEX, ATI_INFO_IDX_MANUFACTURER_ID);
+ sink_info = snd_hda_codec_read(codec, nid, 0, ATI_VERB_GET_SINK_INFO_DATA, 0);
+ put_unaligned_le16(sink_info, buf + 16);
+
+ snd_hda_codec_write(codec, nid, 0, ATI_VERB_SET_SINK_INFO_INDEX, ATI_INFO_IDX_PRODUCT_ID);
+ sink_info = snd_hda_codec_read(codec, nid, 0, ATI_VERB_GET_SINK_INFO_DATA, 0);
+ put_unaligned_le16(sink_info, buf + 18);
+
+ snd_hda_codec_write(codec, nid, 0, ATI_VERB_SET_SINK_INFO_INDEX, ATI_INFO_IDX_SINK_DESC_LEN);
+ sink_desc_len = snd_hda_codec_read(codec, nid, 0, ATI_VERB_GET_SINK_INFO_DATA, 0);
+
+ if (sink_desc_len > ELD_MAX_MNL) {
+ snd_printd(KERN_INFO "HDMI ATI/AMD: Truncating HDMI sink description with length %d\n",
+ sink_desc_len);
+ sink_desc_len = ELD_MAX_MNL;
+ }
+
+ buf[4] |= sink_desc_len;
+
+ for (i = 0; i < sink_desc_len; i++) {
+ snd_hda_codec_write(codec, nid, 0, ATI_VERB_SET_SINK_INFO_INDEX, ATI_INFO_IDX_SINK_DESC_FIRST + i);
+ buf[pos++] = snd_hda_codec_read(codec, nid, 0, ATI_VERB_GET_SINK_INFO_DATA, 0);
+ }
+ }
+
+ for (i = AUDIO_CODING_TYPE_LPCM; i <= AUDIO_CODING_TYPE_WMAPRO; i++) {
+ if (i == AUDIO_CODING_TYPE_SACD || i == AUDIO_CODING_TYPE_DST)
+ continue; /* not handled by ATI/AMD */
+
+ snd_hda_codec_write(codec, nid, 0, ATI_VERB_SET_AUDIO_DESCRIPTOR, i << 3);
+ ati_sad = snd_hda_codec_read(codec, nid, 0, ATI_VERB_GET_AUDIO_DESCRIPTOR, 0);
+
+ if (ati_sad & ATI_AUDIODESC_RATES) {
+ /* format is supported, copy SAD as-is */
+ buf[pos++] = (ati_sad & 0x0000ff) >> 0;
+ buf[pos++] = (ati_sad & 0x00ff00) >> 8;
+ buf[pos++] = (ati_sad & 0xff0000) >> 16;
+ }
+
+ if (i == AUDIO_CODING_TYPE_LPCM
+ && (ati_sad & ATI_AUDIODESC_LPCM_STEREO_RATES)
+ && (ati_sad & ATI_AUDIODESC_LPCM_STEREO_RATES) >> 16 != (ati_sad & ATI_AUDIODESC_RATES)) {
+ /* for PCM there is a separate stereo rate mask */
+ buf[pos++] = ((ati_sad & 0x000000ff) & ~ATI_AUDIODESC_CHANNELS) | 0x1;
+ /* rates from the extra byte */
+ buf[pos++] = (ati_sad & 0xff000000) >> 24;
+ buf[pos++] = (ati_sad & 0x00ff0000) >> 16;
+ }
+ }
+
+ if (pos == ELD_FIXED_BYTES + sink_desc_len) {
+ snd_printd(KERN_INFO "HDMI ATI/AMD: no audio descriptors for ELD\n");
+ return -EINVAL;
+ }
+
+ aud_synch = snd_hda_codec_read(codec, nid, 0, ATI_VERB_GET_AUDIO_VIDEO_DELAY, 0);
+ if ((aud_synch & ATI_DELAY_VIDEO_LATENCY) && (aud_synch & ATI_DELAY_AUDIO_LATENCY)) {
+ int video_latency = (aud_synch & ATI_DELAY_VIDEO_LATENCY) - 1;
+ int audio_latency = ((aud_synch & ATI_DELAY_AUDIO_LATENCY) >> 8) - 1;
+
+ if (video_latency > audio_latency)
+ buf[6] = min(video_latency - audio_latency, 0xfa);
+ }
+
+ /* Baseline length */
+ buf[2] = pos - 4;
+
+ /* SAD count */
+ buf[5] |= ((pos - ELD_FIXED_BYTES - sink_desc_len) / 3) << 4;
+
+ *eld_size = pos;
+
+ return 0;
+}
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 26ad4f0aade3..b7c89dff7066 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -4475,9 +4475,11 @@ int snd_hda_gen_build_controls(struct hda_codec *codec)
true, &spec->vmaster_mute.sw_kctl);
if (err < 0)
return err;
- if (spec->vmaster_mute.hook)
+ if (spec->vmaster_mute.hook) {
snd_hda_add_vmaster_hook(codec, &spec->vmaster_mute,
spec->vmaster_mute_enum);
+ snd_hda_sync_vmaster_hook(&spec->vmaster_mute);
+ }
}
free_kctls(spec); /* no longer needed */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 6e61a019aa5e..66f7e1e3ec4b 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -169,6 +169,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
"{Intel, PPT},"
"{Intel, LPT},"
"{Intel, LPT_LP},"
+ "{Intel, WPT_LP},"
"{Intel, HPT},"
"{Intel, PBG},"
"{Intel, SCH},"
@@ -2986,7 +2987,8 @@ static int azx_runtime_suspend(struct device *dev)
STATESTS_INT_MASK);
azx_stop_chip(chip);
- azx_enter_link_reset(chip);
+ if (!chip->bus->avoid_link_reset)
+ azx_enter_link_reset(chip);
azx_clear_irq_pending(chip);
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
hda_display_power(false);
@@ -3985,6 +3987,9 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
/* Lynx Point-LP */
{ PCI_DEVICE(0x8086, 0x9c21),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ /* Wildcat Point-LP */
+ { PCI_DEVICE(0x8086, 0x9ca0),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
/* Haswell */
{ PCI_DEVICE(0x8086, 0x0a0c),
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH |
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index 2e7493ef8ee0..d398b648bb5d 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -428,6 +428,7 @@ enum {
HDA_FIXUP_ACT_PROBE,
HDA_FIXUP_ACT_INIT,
HDA_FIXUP_ACT_BUILD,
+ HDA_FIXUP_ACT_FREE,
};
int snd_hda_add_verbs(struct hda_codec *codec, const struct hda_verb *list);
@@ -751,10 +752,6 @@ struct hdmi_eld {
int eld_size;
char eld_buffer[ELD_MAX_SIZE];
struct parsed_hdmi_eld info;
- struct mutex lock;
-#ifdef CONFIG_PROC_FS
- struct snd_info_entry *proc_entry;
-#endif
};
int snd_hdmi_get_eld_size(struct hda_codec *codec, hda_nid_t nid);
@@ -766,21 +763,15 @@ void snd_hdmi_show_eld(struct parsed_hdmi_eld *e);
void snd_hdmi_eld_update_pcm_info(struct parsed_hdmi_eld *e,
struct hda_pcm_stream *hinfo);
+int snd_hdmi_get_eld_ati(struct hda_codec *codec, hda_nid_t nid,
+ unsigned char *buf, int *eld_size,
+ bool rev3_or_later);
+
#ifdef CONFIG_PROC_FS
-int snd_hda_eld_proc_new(struct hda_codec *codec, struct hdmi_eld *eld,
- int index);
-void snd_hda_eld_proc_free(struct hda_codec *codec, struct hdmi_eld *eld);
-#else
-static inline int snd_hda_eld_proc_new(struct hda_codec *codec,
- struct hdmi_eld *eld,
- int index)
-{
- return 0;
-}
-static inline void snd_hda_eld_proc_free(struct hda_codec *codec,
- struct hdmi_eld *eld)
-{
-}
+void snd_hdmi_print_eld_info(struct hdmi_eld *eld,
+ struct snd_info_buffer *buffer);
+void snd_hdmi_write_eld_info(struct hdmi_eld *eld,
+ struct snd_info_buffer *buffer);
#endif
#define SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE 80
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 0cbdd87dde6d..87d2e0335ae4 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -968,6 +968,18 @@ static void ad1884_fixup_hp_eapd(struct hda_codec *codec,
}
}
+static void ad1884_fixup_thinkpad(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct ad198x_spec *spec = codec->spec;
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ spec->gen.keep_eapd_on = 1;
+ spec->gen.vmaster_mute.hook = ad_vmaster_eapd_hook;
+ spec->eapd_nid = 0x12;
+ }
+}
+
/* set magic COEFs for dmic */
static const struct hda_verb ad1884_dmic_init_verbs[] = {
{0x01, AC_VERB_SET_COEF_INDEX, 0x13f7},
@@ -979,6 +991,7 @@ enum {
AD1884_FIXUP_AMP_OVERRIDE,
AD1884_FIXUP_HP_EAPD,
AD1884_FIXUP_DMIC_COEF,
+ AD1884_FIXUP_THINKPAD,
AD1884_FIXUP_HP_TOUCHSMART,
};
@@ -997,6 +1010,12 @@ static const struct hda_fixup ad1884_fixups[] = {
.type = HDA_FIXUP_VERBS,
.v.verbs = ad1884_dmic_init_verbs,
},
+ [AD1884_FIXUP_THINKPAD] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = ad1884_fixup_thinkpad,
+ .chained = true,
+ .chain_id = AD1884_FIXUP_DMIC_COEF,
+ },
[AD1884_FIXUP_HP_TOUCHSMART] = {
.type = HDA_FIXUP_VERBS,
.v.verbs = ad1884_dmic_init_verbs,
@@ -1008,7 +1027,7 @@ static const struct hda_fixup ad1884_fixups[] = {
static const struct snd_pci_quirk ad1884_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x2a82, "HP Touchsmart", AD1884_FIXUP_HP_TOUCHSMART),
SND_PCI_QUIRK_VENDOR(0x103c, "HP", AD1884_FIXUP_HP_EAPD),
- SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1884_FIXUP_DMIC_COEF),
+ SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1884_FIXUP_THINKPAD),
{}
};
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 6e9876f27d95..54d14793725a 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -759,7 +759,7 @@ struct ca0132_spec {
/*
* CA0132 codec access
*/
-unsigned int codec_send_command(struct hda_codec *codec, hda_nid_t nid,
+static unsigned int codec_send_command(struct hda_codec *codec, hda_nid_t nid,
unsigned int verb, unsigned int parm, unsigned int *res)
{
unsigned int response;
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index ec68eaea0336..993b25c17711 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3208,11 +3208,17 @@ static int cx_auto_init(struct hda_codec *codec)
return 0;
}
+static void cx_auto_free(struct hda_codec *codec)
+{
+ snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_FREE);
+ snd_hda_gen_free(codec);
+}
+
static const struct hda_codec_ops cx_auto_patch_ops = {
.build_controls = cx_auto_build_controls,
.build_pcms = snd_hda_gen_build_pcms,
.init = cx_auto_init,
- .free = snd_hda_gen_free,
+ .free = cx_auto_free,
.unsol_event = snd_hda_jack_unsol_event,
#ifdef CONFIG_PM
.check_power_status = snd_hda_gen_check_power_status,
@@ -3232,8 +3238,84 @@ enum {
CXT_FIXUP_HEADPHONE_MIC_PIN,
CXT_FIXUP_HEADPHONE_MIC,
CXT_FIXUP_GPIO1,
+ CXT_FIXUP_THINKPAD_ACPI,
};
+#if IS_ENABLED(CONFIG_THINKPAD_ACPI)
+
+#include <linux/thinkpad_acpi.h>
+
+static int (*led_set_func)(int, bool);
+
+static void update_tpacpi_mute_led(void *private_data, int enabled)
+{
+ struct hda_codec *codec = private_data;
+ struct conexant_spec *spec = codec->spec;
+
+ if (spec->dynamic_eapd)
+ cx_auto_vmaster_hook(private_data, enabled);
+
+ if (led_set_func)
+ led_set_func(TPACPI_LED_MUTE, !enabled);
+}
+
+static void update_tpacpi_micmute_led(struct hda_codec *codec,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ if (!ucontrol || !led_set_func)
+ return;
+ if (strcmp("Capture Switch", ucontrol->id.name) == 0 && ucontrol->id.index == 0) {
+ /* TODO: How do I verify if it's a mono or stereo here? */
+ bool val = ucontrol->value.integer.value[0] || ucontrol->value.integer.value[1];
+ led_set_func(TPACPI_LED_MICMUTE, !val);
+ }
+}
+
+static void cxt_fixup_thinkpad_acpi(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct conexant_spec *spec = codec->spec;
+
+ bool removefunc = false;
+
+ if (action == HDA_FIXUP_ACT_PROBE) {
+ if (!led_set_func)
+ led_set_func = symbol_request(tpacpi_led_set);
+ if (!led_set_func) {
+ snd_printk(KERN_WARNING "Failed to find thinkpad-acpi symbol tpacpi_led_set\n");
+ return;
+ }
+
+ removefunc = true;
+ if (led_set_func(TPACPI_LED_MUTE, false) >= 0) {
+ spec->gen.vmaster_mute.hook = update_tpacpi_mute_led;
+ removefunc = false;
+ }
+ if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
+ if (spec->gen.num_adc_nids > 1)
+ snd_printdd("Skipping micmute LED control due to several ADCs");
+ else {
+ spec->gen.cap_sync_hook = update_tpacpi_micmute_led;
+ removefunc = false;
+ }
+ }
+ }
+
+ if (led_set_func && (action == HDA_FIXUP_ACT_FREE || removefunc)) {
+ symbol_put(tpacpi_led_set);
+ led_set_func = NULL;
+ }
+}
+
+#else
+
+static void cxt_fixup_thinkpad_acpi(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+}
+
+#endif
+
static void cxt_fixup_stereo_dmic(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -3344,6 +3426,8 @@ static const struct hda_fixup cxt_fixups[] = {
[CXT_PINCFG_LENOVO_TP410] = {
.type = HDA_FIXUP_PINS,
.v.pins = cxt_pincfg_lenovo_tp410,
+ .chained = true,
+ .chain_id = CXT_FIXUP_THINKPAD_ACPI,
},
[CXT_PINCFG_LEMOTE_A1004] = {
.type = HDA_FIXUP_PINS,
@@ -3385,6 +3469,10 @@ static const struct hda_fixup cxt_fixups[] = {
{ }
},
},
+ [CXT_FIXUP_THINKPAD_ACPI] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cxt_fixup_thinkpad_acpi,
+ },
};
static const struct snd_pci_quirk cxt5051_fixups[] = {
@@ -3507,7 +3595,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
return 0;
error:
- snd_hda_gen_free(codec);
+ cx_auto_free(codec);
return err;
}
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 50173d412ac5..e22323f50424 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -6,6 +6,7 @@
* Copyright (c) 2006 ATI Technologies Inc.
* Copyright (c) 2008 NVIDIA Corp. All rights reserved.
* Copyright (c) 2008 Wei Ni <wni@nvidia.com>
+ * Copyright (c) 2013 Anssi Hannula <anssi.hannula@iki.fi>
*
* Authors:
* Wu Fengguang <wfg@linux.intel.com>
@@ -45,6 +46,7 @@ module_param(static_hdmi_pcm, bool, 0644);
MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
#define is_haswell(codec) ((codec)->vendor_id == 0x80862807)
+#define is_valleyview(codec) ((codec)->vendor_id == 0x80862882)
struct hdmi_spec_per_cvt {
hda_nid_t cvt_nid;
@@ -63,9 +65,11 @@ struct hdmi_spec_per_pin {
hda_nid_t pin_nid;
int num_mux_nids;
hda_nid_t mux_nids[HDA_MAX_CONNECTIONS];
+ hda_nid_t cvt_nid;
struct hda_codec *codec;
struct hdmi_eld sink_eld;
+ struct mutex lock;
struct delayed_work work;
struct snd_kcontrol *eld_ctl;
int repoll_count;
@@ -75,6 +79,42 @@ struct hdmi_spec_per_pin {
bool chmap_set; /* channel-map override by ALSA API? */
unsigned char chmap[8]; /* ALSA API channel-map */
char pcm_name[8]; /* filled in build_pcm callbacks */
+#ifdef CONFIG_PROC_FS
+ struct snd_info_entry *proc_entry;
+#endif
+};
+
+struct cea_channel_speaker_allocation;
+
+/* operations used by generic code that can be overridden by patches */
+struct hdmi_ops {
+ int (*pin_get_eld)(struct hda_codec *codec, hda_nid_t pin_nid,
+ unsigned char *buf, int *eld_size);
+
+ /* get and set channel assigned to each HDMI ASP (audio sample packet) slot */
+ int (*pin_get_slot_channel)(struct hda_codec *codec, hda_nid_t pin_nid,
+ int asp_slot);
+ int (*pin_set_slot_channel)(struct hda_codec *codec, hda_nid_t pin_nid,
+ int asp_slot, int channel);
+
+ void (*pin_setup_infoframe)(struct hda_codec *codec, hda_nid_t pin_nid,
+ int ca, int active_channels, int conn_type);
+
+ /* enable/disable HBR (HD passthrough) */
+ int (*pin_hbr_setup)(struct hda_codec *codec, hda_nid_t pin_nid, bool hbr);
+
+ int (*setup_stream)(struct hda_codec *codec, hda_nid_t cvt_nid,
+ hda_nid_t pin_nid, u32 stream_tag, int format);
+
+ /* Helpers for producing the channel map TLVs. These can be overridden
+ * for devices that have non-standard mapping requirements. */
+ int (*chmap_cea_alloc_validate_get_type)(struct cea_channel_speaker_allocation *cap,
+ int channels);
+ void (*cea_alloc_to_tlv_chmap)(struct cea_channel_speaker_allocation *cap,
+ unsigned int *chmap, int channels);
+
+ /* check that the user-given chmap is supported */
+ int (*chmap_validate)(int ca, int channels, unsigned char *chmap);
};
struct hdmi_spec {
@@ -88,8 +128,9 @@ struct hdmi_spec {
unsigned int channels_max; /* max over all cvts */
struct hdmi_eld temp_eld;
+ struct hdmi_ops ops;
/*
- * Non-generic ATI/NVIDIA specific
+ * Non-generic VIA/NVIDIA specific
*/
struct hda_multi_out multiout;
struct hda_pcm_stream pcm_playback;
@@ -348,17 +389,19 @@ static int hdmi_eld_ctl_info(struct snd_kcontrol *kcontrol,
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct hdmi_spec *spec = codec->spec;
+ struct hdmi_spec_per_pin *per_pin;
struct hdmi_eld *eld;
int pin_idx;
uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
pin_idx = kcontrol->private_value;
- eld = &get_pin(spec, pin_idx)->sink_eld;
+ per_pin = get_pin(spec, pin_idx);
+ eld = &per_pin->sink_eld;
- mutex_lock(&eld->lock);
+ mutex_lock(&per_pin->lock);
uinfo->count = eld->eld_valid ? eld->eld_size : 0;
- mutex_unlock(&eld->lock);
+ mutex_unlock(&per_pin->lock);
return 0;
}
@@ -368,15 +411,17 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct hdmi_spec *spec = codec->spec;
+ struct hdmi_spec_per_pin *per_pin;
struct hdmi_eld *eld;
int pin_idx;
pin_idx = kcontrol->private_value;
- eld = &get_pin(spec, pin_idx)->sink_eld;
+ per_pin = get_pin(spec, pin_idx);
+ eld = &per_pin->sink_eld;
- mutex_lock(&eld->lock);
+ mutex_lock(&per_pin->lock);
if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data)) {
- mutex_unlock(&eld->lock);
+ mutex_unlock(&per_pin->lock);
snd_BUG();
return -EINVAL;
}
@@ -386,7 +431,7 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
if (eld->eld_valid)
memcpy(ucontrol->value.bytes.data, eld->eld_buffer,
eld->eld_size);
- mutex_unlock(&eld->lock);
+ mutex_unlock(&per_pin->lock);
return 0;
}
@@ -477,6 +522,68 @@ static void hdmi_set_channel_count(struct hda_codec *codec,
AC_VERB_SET_CVT_CHAN_COUNT, chs - 1);
}
+/*
+ * ELD proc files
+ */
+
+#ifdef CONFIG_PROC_FS
+static void print_eld_info(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+{
+ struct hdmi_spec_per_pin *per_pin = entry->private_data;
+
+ mutex_lock(&per_pin->lock);
+ snd_hdmi_print_eld_info(&per_pin->sink_eld, buffer);
+ mutex_unlock(&per_pin->lock);
+}
+
+static void write_eld_info(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+{
+ struct hdmi_spec_per_pin *per_pin = entry->private_data;
+
+ mutex_lock(&per_pin->lock);
+ snd_hdmi_write_eld_info(&per_pin->sink_eld, buffer);
+ mutex_unlock(&per_pin->lock);
+}
+
+static int eld_proc_new(struct hdmi_spec_per_pin *per_pin, int index)
+{
+ char name[32];
+ struct hda_codec *codec = per_pin->codec;
+ struct snd_info_entry *entry;
+ int err;
+
+ snprintf(name, sizeof(name), "eld#%d.%d", codec->addr, index);
+ err = snd_card_proc_new(codec->bus->card, name, &entry);
+ if (err < 0)
+ return err;
+
+ snd_info_set_text_ops(entry, per_pin, print_eld_info);
+ entry->c.text.write = write_eld_info;
+ entry->mode |= S_IWUSR;
+ per_pin->proc_entry = entry;
+
+ return 0;
+}
+
+static void eld_proc_free(struct hdmi_spec_per_pin *per_pin)
+{
+ if (!per_pin->codec->bus->shutdown && per_pin->proc_entry) {
+ snd_device_free(per_pin->codec->bus->card, per_pin->proc_entry);
+ per_pin->proc_entry = NULL;
+ }
+}
+#else
+static inline int eld_proc_new(struct hdmi_spec_per_pin *per_pin,
+ int index)
+{
+ return 0;
+}
+static inline void eld_proc_free(struct hdmi_spec_per_pin *per_pin)
+{
+}
+#endif
/*
* Channel mapping routines
@@ -577,74 +684,91 @@ static void hdmi_debug_channel_mapping(struct hda_codec *codec,
hda_nid_t pin_nid)
{
#ifdef CONFIG_SND_DEBUG_VERBOSE
+ struct hdmi_spec *spec = codec->spec;
int i;
- int slot;
+ int channel;
for (i = 0; i < 8; i++) {
- slot = snd_hda_codec_read(codec, pin_nid, 0,
- AC_VERB_GET_HDMI_CHAN_SLOT, i);
+ channel = spec->ops.pin_get_slot_channel(codec, pin_nid, i);
printk(KERN_DEBUG "HDMI: ASP channel %d => slot %d\n",
- slot >> 4, slot & 0xf);
+ channel, i);
}
#endif
}
-
static void hdmi_std_setup_channel_mapping(struct hda_codec *codec,
hda_nid_t pin_nid,
bool non_pcm,
int ca)
{
+ struct hdmi_spec *spec = codec->spec;
+ struct cea_channel_speaker_allocation *ch_alloc;
int i;
int err;
int order;
int non_pcm_mapping[8];
order = get_channel_allocation_order(ca);
+ ch_alloc = &channel_allocations[order];
if (hdmi_channel_mapping[ca][1] == 0) {
- for (i = 0; i < channel_allocations[order].channels; i++)
- hdmi_channel_mapping[ca][i] = i | (i << 4);
- for (; i < 8; i++)
- hdmi_channel_mapping[ca][i] = 0xf | (i << 4);
+ int hdmi_slot = 0;
+ /* fill actual channel mappings in ALSA channel (i) order */
+ for (i = 0; i < ch_alloc->channels; i++) {
+ while (!ch_alloc->speakers[7 - hdmi_slot] && !WARN_ON(hdmi_slot >= 8))
+ hdmi_slot++; /* skip zero slots */
+
+ hdmi_channel_mapping[ca][i] = (i << 4) | hdmi_slot++;
+ }
+ /* fill the rest of the slots with ALSA channel 0xf */
+ for (hdmi_slot = 0; hdmi_slot < 8; hdmi_slot++)
+ if (!ch_alloc->speakers[7 - hdmi_slot])
+ hdmi_channel_mapping[ca][i++] = (0xf << 4) | hdmi_slot;
}
if (non_pcm) {
- for (i = 0; i < channel_allocations[order].channels; i++)
- non_pcm_mapping[i] = i | (i << 4);
+ for (i = 0; i < ch_alloc->channels; i++)
+ non_pcm_mapping[i] = (i << 4) | i;
for (; i < 8; i++)
- non_pcm_mapping[i] = 0xf | (i << 4);
+ non_pcm_mapping[i] = (0xf << 4) | i;
}
for (i = 0; i < 8; i++) {
- err = snd_hda_codec_write(codec, pin_nid, 0,
- AC_VERB_SET_HDMI_CHAN_SLOT,
- non_pcm ? non_pcm_mapping[i] : hdmi_channel_mapping[ca][i]);
+ int slotsetup = non_pcm ? non_pcm_mapping[i] : hdmi_channel_mapping[ca][i];
+ int hdmi_slot = slotsetup & 0x0f;
+ int channel = (slotsetup & 0xf0) >> 4;
+ err = spec->ops.pin_set_slot_channel(codec, pin_nid, hdmi_slot, channel);
if (err) {
snd_printdd(KERN_NOTICE
"HDMI: channel mapping failed\n");
break;
}
}
-
- hdmi_debug_channel_mapping(codec, pin_nid);
}
struct channel_map_table {
unsigned char map; /* ALSA API channel map position */
- unsigned char cea_slot; /* CEA slot value */
int spk_mask; /* speaker position bit mask */
};
static struct channel_map_table map_tables[] = {
- { SNDRV_CHMAP_FL, 0x00, FL },
- { SNDRV_CHMAP_FR, 0x01, FR },
- { SNDRV_CHMAP_RL, 0x04, RL },
- { SNDRV_CHMAP_RR, 0x05, RR },
- { SNDRV_CHMAP_LFE, 0x02, LFE },
- { SNDRV_CHMAP_FC, 0x03, FC },
- { SNDRV_CHMAP_RLC, 0x06, RLC },
- { SNDRV_CHMAP_RRC, 0x07, RRC },
+ { SNDRV_CHMAP_FL, FL },
+ { SNDRV_CHMAP_FR, FR },
+ { SNDRV_CHMAP_RL, RL },
+ { SNDRV_CHMAP_RR, RR },
+ { SNDRV_CHMAP_LFE, LFE },
+ { SNDRV_CHMAP_FC, FC },
+ { SNDRV_CHMAP_RLC, RLC },
+ { SNDRV_CHMAP_RRC, RRC },
+ { SNDRV_CHMAP_RC, RC },
+ { SNDRV_CHMAP_FLC, FLC },
+ { SNDRV_CHMAP_FRC, FRC },
+ { SNDRV_CHMAP_FLH, FLH },
+ { SNDRV_CHMAP_FRH, FRH },
+ { SNDRV_CHMAP_FLW, FLW },
+ { SNDRV_CHMAP_FRW, FRW },
+ { SNDRV_CHMAP_TC, TC },
+ { SNDRV_CHMAP_FCH, FCH },
{} /* terminator */
};
@@ -660,25 +784,19 @@ static int to_spk_mask(unsigned char c)
}
/* from ALSA API channel position to CEA slot */
-static int to_cea_slot(unsigned char c)
+static int to_cea_slot(int ordered_ca, unsigned char pos)
{
- struct channel_map_table *t = map_tables;
- for (; t->map; t++) {
- if (t->map == c)
- return t->cea_slot;
- }
- return 0x0f;
-}
+ int mask = to_spk_mask(pos);
+ int i;
-/* from CEA slot to ALSA API channel position */
-static int from_cea_slot(unsigned char c)
-{
- struct channel_map_table *t = map_tables;
- for (; t->map; t++) {
- if (t->cea_slot == c)
- return t->map;
+ if (mask) {
+ for (i = 0; i < 8; i++) {
+ if (channel_allocations[ordered_ca].speakers[7 - i] == mask)
+ return i;
+ }
}
- return 0;
+
+ return -1;
}
/* from speaker bit mask to ALSA API channel position */
@@ -692,6 +810,14 @@ static int spk_to_chmap(int spk)
return 0;
}
+/* from CEA slot to ALSA API channel position */
+static int from_cea_slot(int ordered_ca, unsigned char slot)
+{
+ int mask = channel_allocations[ordered_ca].speakers[7 - slot];
+
+ return spk_to_chmap(mask);
+}
+
/* get the CA index corresponding to the given ALSA API channel map */
static int hdmi_manual_channel_allocation(int chs, unsigned char *map)
{
@@ -718,18 +844,29 @@ static int hdmi_manual_channel_allocation(int chs, unsigned char *map)
/* set up the channel slots for the given ALSA API channel map */
static int hdmi_manual_setup_channel_mapping(struct hda_codec *codec,
hda_nid_t pin_nid,
- int chs, unsigned char *map)
+ int chs, unsigned char *map,
+ int ca)
{
- int i;
- for (i = 0; i < 8; i++) {
- int val, err;
- if (i < chs)
- val = to_cea_slot(map[i]);
- else
- val = 0xf;
- val |= (i << 4);
- err = snd_hda_codec_write(codec, pin_nid, 0,
- AC_VERB_SET_HDMI_CHAN_SLOT, val);
+ struct hdmi_spec *spec = codec->spec;
+ int ordered_ca = get_channel_allocation_order(ca);
+ int alsa_pos, hdmi_slot;
+ int assignments[8] = {[0 ... 7] = 0xf};
+
+ for (alsa_pos = 0; alsa_pos < chs; alsa_pos++) {
+
+ hdmi_slot = to_cea_slot(ordered_ca, map[alsa_pos]);
+
+ if (hdmi_slot < 0)
+ continue; /* unassigned channel */
+
+ assignments[hdmi_slot] = alsa_pos;
+ }
+
+ for (hdmi_slot = 0; hdmi_slot < 8; hdmi_slot++) {
+ int err;
+
+ err = spec->ops.pin_set_slot_channel(codec, pin_nid, hdmi_slot,
+ assignments[hdmi_slot]);
if (err)
return -EINVAL;
}
@@ -740,9 +877,10 @@ static int hdmi_manual_setup_channel_mapping(struct hda_codec *codec,
static void hdmi_setup_fake_chmap(unsigned char *map, int ca)
{
int i;
+ int ordered_ca = get_channel_allocation_order(ca);
for (i = 0; i < 8; i++) {
- if (i < channel_allocations[ca].channels)
- map[i] = from_cea_slot((hdmi_channel_mapping[ca][i] >> 4) & 0x0f);
+ if (i < channel_allocations[ordered_ca].channels)
+ map[i] = from_cea_slot(ordered_ca, hdmi_channel_mapping[ca][i] & 0x0f);
else
map[i] = 0;
}
@@ -755,11 +893,29 @@ static void hdmi_setup_channel_mapping(struct hda_codec *codec,
{
if (!non_pcm && chmap_set) {
hdmi_manual_setup_channel_mapping(codec, pin_nid,
- channels, map);
+ channels, map, ca);
} else {
hdmi_std_setup_channel_mapping(codec, pin_nid, non_pcm, ca);
hdmi_setup_fake_chmap(map, ca);
}
+
+ hdmi_debug_channel_mapping(codec, pin_nid);
+}
+
+static int hdmi_pin_set_slot_channel(struct hda_codec *codec, hda_nid_t pin_nid,
+ int asp_slot, int channel)
+{
+ return snd_hda_codec_write(codec, pin_nid, 0,
+ AC_VERB_SET_HDMI_CHAN_SLOT,
+ (channel << 4) | asp_slot);
+}
+
+static int hdmi_pin_get_slot_channel(struct hda_codec *codec, hda_nid_t pin_nid,
+ int asp_slot)
+{
+ return (snd_hda_codec_read(codec, pin_nid, 0,
+ AC_VERB_GET_HDMI_CHAN_SLOT,
+ asp_slot) & 0xf0) >> 4;
}
/*
@@ -883,15 +1039,64 @@ static bool hdmi_infoframe_uptodate(struct hda_codec *codec, hda_nid_t pin_nid,
return true;
}
+static void hdmi_pin_setup_infoframe(struct hda_codec *codec,
+ hda_nid_t pin_nid,
+ int ca, int active_channels,
+ int conn_type)
+{
+ union audio_infoframe ai;
+
+ if (conn_type == 0) { /* HDMI */
+ struct hdmi_audio_infoframe *hdmi_ai = &ai.hdmi;
+
+ hdmi_ai->type = 0x84;
+ hdmi_ai->ver = 0x01;
+ hdmi_ai->len = 0x0a;
+ hdmi_ai->CC02_CT47 = active_channels - 1;
+ hdmi_ai->CA = ca;
+ hdmi_checksum_audio_infoframe(hdmi_ai);
+ } else if (conn_type == 1) { /* DisplayPort */
+ struct dp_audio_infoframe *dp_ai = &ai.dp;
+
+ dp_ai->type = 0x84;
+ dp_ai->len = 0x1b;
+ dp_ai->ver = 0x11 << 2;
+ dp_ai->CC02_CT47 = active_channels - 1;
+ dp_ai->CA = ca;
+ } else {
+ snd_printd("HDMI: unknown connection type at pin %d\n",
+ pin_nid);
+ return;
+ }
+
+ /*
+ * sizeof(ai) is used instead of sizeof(*hdmi_ai) or
+ * sizeof(*dp_ai) to avoid partial match/update problems when
+ * the user switches between HDMI/DP monitors.
+ */
+ if (!hdmi_infoframe_uptodate(codec, pin_nid, ai.bytes,
+ sizeof(ai))) {
+ snd_printdd("hdmi_pin_setup_infoframe: "
+ "pin=%d channels=%d ca=0x%02x\n",
+ pin_nid,
+ active_channels, ca);
+ hdmi_stop_infoframe_trans(codec, pin_nid);
+ hdmi_fill_audio_infoframe(codec, pin_nid,
+ ai.bytes, sizeof(ai));
+ hdmi_start_infoframe_trans(codec, pin_nid);
+ }
+}
+
static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
struct hdmi_spec_per_pin *per_pin,
bool non_pcm)
{
+ struct hdmi_spec *spec = codec->spec;
hda_nid_t pin_nid = per_pin->pin_nid;
int channels = per_pin->channels;
+ int active_channels;
struct hdmi_eld *eld;
- int ca;
- union audio_infoframe ai;
+ int ca, ordered_ca;
if (!channels)
return;
@@ -912,29 +1117,10 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
if (ca < 0)
ca = 0;
- memset(&ai, 0, sizeof(ai));
- if (eld->info.conn_type == 0) { /* HDMI */
- struct hdmi_audio_infoframe *hdmi_ai = &ai.hdmi;
-
- hdmi_ai->type = 0x84;
- hdmi_ai->ver = 0x01;
- hdmi_ai->len = 0x0a;
- hdmi_ai->CC02_CT47 = channels - 1;
- hdmi_ai->CA = ca;
- hdmi_checksum_audio_infoframe(hdmi_ai);
- } else if (eld->info.conn_type == 1) { /* DisplayPort */
- struct dp_audio_infoframe *dp_ai = &ai.dp;
+ ordered_ca = get_channel_allocation_order(ca);
+ active_channels = channel_allocations[ordered_ca].channels;
- dp_ai->type = 0x84;
- dp_ai->len = 0x1b;
- dp_ai->ver = 0x11 << 2;
- dp_ai->CC02_CT47 = channels - 1;
- dp_ai->CA = ca;
- } else {
- snd_printd("HDMI: unknown connection type at pin %d\n",
- pin_nid);
- return;
- }
+ hdmi_set_channel_count(codec, per_pin->cvt_nid, active_channels);
/*
* always configure channel mapping, it may have been changed by the
@@ -944,27 +1130,12 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
channels, per_pin->chmap,
per_pin->chmap_set);
- /*
- * sizeof(ai) is used instead of sizeof(*hdmi_ai) or
- * sizeof(*dp_ai) to avoid partial match/update problems when
- * the user switches between HDMI/DP monitors.
- */
- if (!hdmi_infoframe_uptodate(codec, pin_nid, ai.bytes,
- sizeof(ai))) {
- snd_printdd("hdmi_setup_audio_infoframe: "
- "pin=%d channels=%d\n",
- pin_nid,
- channels);
- hdmi_stop_infoframe_trans(codec, pin_nid);
- hdmi_fill_audio_infoframe(codec, pin_nid,
- ai.bytes, sizeof(ai));
- hdmi_start_infoframe_trans(codec, pin_nid);
- }
+ spec->ops.pin_setup_infoframe(codec, pin_nid, ca, active_channels,
+ eld->info.conn_type);
per_pin->non_pcm = non_pcm;
}
-
/*
* Unsolicited events
*/
@@ -1067,26 +1238,22 @@ static void haswell_verify_D0(struct hda_codec *codec,
#define is_hbr_format(format) \
((format & AC_FMT_TYPE_NON_PCM) && (format & AC_FMT_CHAN_MASK) == 7)
-static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
- hda_nid_t pin_nid, u32 stream_tag, int format)
+static int hdmi_pin_hbr_setup(struct hda_codec *codec, hda_nid_t pin_nid,
+ bool hbr)
{
- int pinctl;
- int new_pinctl = 0;
-
- if (is_haswell(codec))
- haswell_verify_D0(codec, cvt_nid, pin_nid);
+ int pinctl, new_pinctl;
if (snd_hda_query_pin_caps(codec, pin_nid) & AC_PINCAP_HBR) {
pinctl = snd_hda_codec_read(codec, pin_nid, 0,
AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
new_pinctl = pinctl & ~AC_PINCTL_EPT;
- if (is_hbr_format(format))
+ if (hbr)
new_pinctl |= AC_PINCTL_EPT_HBR;
else
new_pinctl |= AC_PINCTL_EPT_NATIVE;
- snd_printdd("hdmi_setup_stream: "
+ snd_printdd("hdmi_pin_hbr_setup: "
"NID=0x%x, %spinctl=0x%x\n",
pin_nid,
pinctl == new_pinctl ? "" : "new-",
@@ -1096,11 +1263,26 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
snd_hda_codec_write(codec, pin_nid, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL,
new_pinctl);
+ } else if (hbr)
+ return -EINVAL;
- }
- if (is_hbr_format(format) && !new_pinctl) {
+ return 0;
+}
+
+static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
+ hda_nid_t pin_nid, u32 stream_tag, int format)
+{
+ struct hdmi_spec *spec = codec->spec;
+ int err;
+
+ if (is_haswell(codec))
+ haswell_verify_D0(codec, cvt_nid, pin_nid);
+
+ err = spec->ops.pin_hbr_setup(codec, pin_nid, is_hbr_format(format));
+
+ if (err) {
snd_printdd("hdmi_setup_stream: HBR is not supported\n");
- return -EINVAL;
+ return err;
}
snd_hda_codec_setup_stream(codec, cvt_nid, stream_tag, 0, format);
@@ -1146,7 +1328,16 @@ static int hdmi_choose_cvt(struct hda_codec *codec,
return 0;
}
-static void haswell_config_cvts(struct hda_codec *codec,
+/* Intel HDMI workaround to fix audio routing issue:
+ * For some Intel display codecs, pins share the same connection list.
+ * So a conveter can be selected by multiple pins and playback on any of these
+ * pins will generate sound on the external display, because audio flows from
+ * the same converter to the display pipeline. Also muting one pin may make
+ * other pins have no sound output.
+ * So this function assures that an assigned converter for a pin is not selected
+ * by any other pins.
+ */
+static void intel_not_share_assigned_cvt(struct hda_codec *codec,
hda_nid_t pin_nid, int mux_idx)
{
struct hdmi_spec *spec = codec->spec;
@@ -1217,6 +1408,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
per_cvt = get_cvt(spec, cvt_idx);
/* Claim converter */
per_cvt->assigned = 1;
+ per_pin->cvt_nid = per_cvt->cvt_nid;
hinfo->nid = per_cvt->cvt_nid;
snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
@@ -1224,8 +1416,8 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
mux_idx);
/* configure unused pins to choose other converters */
- if (is_haswell(codec))
- haswell_config_cvts(codec, per_pin->pin_nid, mux_idx);
+ if (is_haswell(codec) || is_valleyview(codec))
+ intel_not_share_assigned_cvt(codec, per_pin->pin_nid, mux_idx);
snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
@@ -1302,6 +1494,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
bool update_eld = false;
bool eld_changed = false;
+ mutex_lock(&per_pin->lock);
pin_eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
if (pin_eld->monitor_present)
eld->eld_valid = !!(present & AC_PINSENSE_ELDV);
@@ -1313,7 +1506,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
codec->addr, pin_nid, pin_eld->monitor_present, eld->eld_valid);
if (eld->eld_valid) {
- if (snd_hdmi_get_eld(codec, pin_nid, eld->eld_buffer,
+ if (spec->ops.pin_get_eld(codec, pin_nid, eld->eld_buffer,
&eld->eld_size) < 0)
eld->eld_valid = false;
else {
@@ -1331,11 +1524,10 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
queue_delayed_work(codec->bus->workq,
&per_pin->work,
msecs_to_jiffies(300));
- return;
+ goto unlock;
}
}
- mutex_lock(&pin_eld->lock);
if (pin_eld->eld_valid && !eld->eld_valid) {
update_eld = true;
eld_changed = true;
@@ -1352,20 +1544,22 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
pin_eld->eld_size = eld->eld_size;
pin_eld->info = eld->info;
- /* Haswell-specific workaround: re-setup when the transcoder is
- * changed during the stream playback
+ /*
+ * Re-setup pin and infoframe. This is needed e.g. when
+ * - sink is first plugged-in (infoframe is not set up if !monitor_present)
+ * - transcoder can change during stream playback on Haswell
*/
- if (is_haswell(codec) &&
- eld->eld_valid && !old_eld_valid && per_pin->setup)
+ if (eld->eld_valid && !old_eld_valid && per_pin->setup)
hdmi_setup_audio_infoframe(codec, per_pin,
per_pin->non_pcm);
}
- mutex_unlock(&pin_eld->lock);
if (eld_changed)
snd_ctl_notify(codec->bus->card,
SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
&per_pin->eld_ctl->id);
+ unlock:
+ mutex_unlock(&per_pin->lock);
}
static void hdmi_repoll_eld(struct work_struct *work)
@@ -1536,14 +1730,14 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
bool non_pcm;
non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
+ mutex_lock(&per_pin->lock);
per_pin->channels = substream->runtime->channels;
per_pin->setup = true;
- hdmi_set_channel_count(codec, cvt_nid, substream->runtime->channels);
-
hdmi_setup_audio_infoframe(codec, per_pin, non_pcm);
+ mutex_unlock(&per_pin->lock);
- return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
+ return spec->ops.setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
}
static int generic_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
@@ -1579,11 +1773,14 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
per_pin = get_pin(spec, pin_idx);
snd_hda_spdif_ctls_unassign(codec, pin_idx);
+
+ mutex_lock(&per_pin->lock);
per_pin->chmap_set = false;
memset(per_pin->chmap, 0, sizeof(per_pin->chmap));
per_pin->setup = false;
per_pin->channels = 0;
+ mutex_unlock(&per_pin->lock);
}
return 0;
@@ -1612,14 +1809,40 @@ static int hdmi_chmap_ctl_info(struct snd_kcontrol *kcontrol,
return 0;
}
+static int hdmi_chmap_cea_alloc_validate_get_type(struct cea_channel_speaker_allocation *cap,
+ int channels)
+{
+ /* If the speaker allocation matches the channel count, it is OK.*/
+ if (cap->channels != channels)
+ return -1;
+
+ /* all channels are remappable freely */
+ return SNDRV_CTL_TLVT_CHMAP_VAR;
+}
+
+static void hdmi_cea_alloc_to_tlv_chmap(struct cea_channel_speaker_allocation *cap,
+ unsigned int *chmap, int channels)
+{
+ int count = 0;
+ int c;
+
+ for (c = 7; c >= 0; c--) {
+ int spk = cap->speakers[c];
+ if (!spk)
+ continue;
+
+ chmap[count++] = spk_to_chmap(spk);
+ }
+
+ WARN_ON(count != channels);
+}
+
static int hdmi_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
unsigned int size, unsigned int __user *tlv)
{
struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
struct hda_codec *codec = info->private_data;
struct hdmi_spec *spec = codec->spec;
- const unsigned int valid_mask =
- FL | FR | RL | RR | LFE | FC | RLC | RRC;
unsigned int __user *dst;
int chs, count = 0;
@@ -1630,18 +1853,19 @@ static int hdmi_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
size -= 8;
dst = tlv + 2;
for (chs = 2; chs <= spec->channels_max; chs++) {
- int i, c;
+ int i;
struct cea_channel_speaker_allocation *cap;
cap = channel_allocations;
for (i = 0; i < ARRAY_SIZE(channel_allocations); i++, cap++) {
int chs_bytes = chs * 4;
- if (cap->channels != chs)
- continue;
- if (cap->spk_mask & ~valid_mask)
+ int type = spec->ops.chmap_cea_alloc_validate_get_type(cap, chs);
+ unsigned int tlv_chmap[8];
+
+ if (type < 0)
continue;
if (size < 8)
return -ENOMEM;
- if (put_user(SNDRV_CTL_TLVT_CHMAP_VAR, dst) ||
+ if (put_user(type, dst) ||
put_user(chs_bytes, dst + 1))
return -EFAULT;
dst += 2;
@@ -1651,14 +1875,10 @@ static int hdmi_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
return -ENOMEM;
size -= chs_bytes;
count += chs_bytes;
- for (c = 7; c >= 0; c--) {
- int spk = cap->speakers[c];
- if (!spk)
- continue;
- if (put_user(spk_to_chmap(spk), dst))
- return -EFAULT;
- dst++;
- }
+ spec->ops.cea_alloc_to_tlv_chmap(cap, tlv_chmap, chs);
+ if (copy_to_user(dst, tlv_chmap, chs_bytes))
+ return -EFAULT;
+ dst += chs;
}
}
if (put_user(count, tlv + 1))
@@ -1692,7 +1912,7 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
unsigned int ctl_idx;
struct snd_pcm_substream *substream;
unsigned char chmap[8];
- int i, ca, prepared = 0;
+ int i, err, ca, prepared = 0;
ctl_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
substream = snd_pcm_chmap_substream(info, ctl_idx);
@@ -1716,10 +1936,17 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
ca = hdmi_manual_channel_allocation(ARRAY_SIZE(chmap), chmap);
if (ca < 0)
return -EINVAL;
+ if (spec->ops.chmap_validate) {
+ err = spec->ops.chmap_validate(ca, ARRAY_SIZE(chmap), chmap);
+ if (err)
+ return err;
+ }
+ mutex_lock(&per_pin->lock);
per_pin->chmap_set = true;
memcpy(per_pin->chmap, chmap, sizeof(chmap));
if (prepared)
hdmi_setup_audio_infoframe(codec, per_pin, per_pin->non_pcm);
+ mutex_unlock(&per_pin->lock);
return 0;
}
@@ -1836,12 +2063,11 @@ static int generic_hdmi_init_per_pins(struct hda_codec *codec)
for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
- struct hdmi_eld *eld = &per_pin->sink_eld;
per_pin->codec = codec;
- mutex_init(&eld->lock);
+ mutex_init(&per_pin->lock);
INIT_DELAYED_WORK(&per_pin->work, hdmi_repoll_eld);
- snd_hda_eld_proc_new(codec, eld, pin_idx);
+ eld_proc_new(per_pin, pin_idx);
}
return 0;
}
@@ -1882,10 +2108,9 @@ static void generic_hdmi_free(struct hda_codec *codec)
for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
- struct hdmi_eld *eld = &per_pin->sink_eld;
cancel_delayed_work(&per_pin->work);
- snd_hda_eld_proc_free(codec, eld);
+ eld_proc_free(per_pin);
}
flush_workqueue(codec->bus->workq);
@@ -1922,6 +2147,17 @@ static const struct hda_codec_ops generic_hdmi_patch_ops = {
#endif
};
+static const struct hdmi_ops generic_standard_hdmi_ops = {
+ .pin_get_eld = snd_hdmi_get_eld,
+ .pin_get_slot_channel = hdmi_pin_get_slot_channel,
+ .pin_set_slot_channel = hdmi_pin_set_slot_channel,
+ .pin_setup_infoframe = hdmi_pin_setup_infoframe,
+ .pin_hbr_setup = hdmi_pin_hbr_setup,
+ .setup_stream = hdmi_setup_stream,
+ .chmap_cea_alloc_validate_get_type = hdmi_chmap_cea_alloc_validate_get_type,
+ .cea_alloc_to_tlv_chmap = hdmi_cea_alloc_to_tlv_chmap,
+};
+
static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
hda_nid_t nid)
@@ -2004,6 +2240,7 @@ static int patch_generic_hdmi(struct hda_codec *codec)
if (spec == NULL)
return -ENOMEM;
+ spec->ops = generic_standard_hdmi_ops;
codec->spec = spec;
hdmi_array_init(spec, 4);
@@ -2559,49 +2796,398 @@ static int patch_nvhdmi_8ch_7x(struct hda_codec *codec)
}
/*
- * ATI-specific implementations
- *
- * FIXME: we may omit the whole this and use the generic code once after
- * it's confirmed to work.
+ * NVIDIA codecs ignore ASP mapping for 2ch - confirmed on:
+ * - 0x10de0015
+ * - 0x10de0040
+ */
+static int nvhdmi_chmap_cea_alloc_validate_get_type(struct cea_channel_speaker_allocation *cap,
+ int channels)
+{
+ if (cap->ca_index == 0x00 && channels == 2)
+ return SNDRV_CTL_TLVT_CHMAP_FIXED;
+
+ return hdmi_chmap_cea_alloc_validate_get_type(cap, channels);
+}
+
+static int nvhdmi_chmap_validate(int ca, int chs, unsigned char *map)
+{
+ if (ca == 0x00 && (map[0] != SNDRV_CHMAP_FL || map[1] != SNDRV_CHMAP_FR))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int patch_nvhdmi(struct hda_codec *codec)
+{
+ struct hdmi_spec *spec;
+ int err;
+
+ err = patch_generic_hdmi(codec);
+ if (err)
+ return err;
+
+ spec = codec->spec;
+
+ spec->ops.chmap_cea_alloc_validate_get_type =
+ nvhdmi_chmap_cea_alloc_validate_get_type;
+ spec->ops.chmap_validate = nvhdmi_chmap_validate;
+
+ return 0;
+}
+
+/*
+ * ATI/AMD-specific implementations
*/
-#define ATIHDMI_CVT_NID 0x02 /* audio converter */
-#define ATIHDMI_PIN_NID 0x03 /* HDMI output pin */
+#define is_amdhdmi_rev3_or_later(codec) \
+ ((codec)->vendor_id == 0x1002aa01 && ((codec)->revision_id & 0xff00) >= 0x0300)
+#define has_amd_full_remap_support(codec) is_amdhdmi_rev3_or_later(codec)
+
+/* ATI/AMD specific HDA pin verbs, see the AMD HDA Verbs specification */
+#define ATI_VERB_SET_CHANNEL_ALLOCATION 0x771
+#define ATI_VERB_SET_DOWNMIX_INFO 0x772
+#define ATI_VERB_SET_MULTICHANNEL_01 0x777
+#define ATI_VERB_SET_MULTICHANNEL_23 0x778
+#define ATI_VERB_SET_MULTICHANNEL_45 0x779
+#define ATI_VERB_SET_MULTICHANNEL_67 0x77a
+#define ATI_VERB_SET_HBR_CONTROL 0x77c
+#define ATI_VERB_SET_MULTICHANNEL_1 0x785
+#define ATI_VERB_SET_MULTICHANNEL_3 0x786
+#define ATI_VERB_SET_MULTICHANNEL_5 0x787
+#define ATI_VERB_SET_MULTICHANNEL_7 0x788
+#define ATI_VERB_SET_MULTICHANNEL_MODE 0x789
+#define ATI_VERB_GET_CHANNEL_ALLOCATION 0xf71
+#define ATI_VERB_GET_DOWNMIX_INFO 0xf72
+#define ATI_VERB_GET_MULTICHANNEL_01 0xf77
+#define ATI_VERB_GET_MULTICHANNEL_23 0xf78
+#define ATI_VERB_GET_MULTICHANNEL_45 0xf79
+#define ATI_VERB_GET_MULTICHANNEL_67 0xf7a
+#define ATI_VERB_GET_HBR_CONTROL 0xf7c
+#define ATI_VERB_GET_MULTICHANNEL_1 0xf85
+#define ATI_VERB_GET_MULTICHANNEL_3 0xf86
+#define ATI_VERB_GET_MULTICHANNEL_5 0xf87
+#define ATI_VERB_GET_MULTICHANNEL_7 0xf88
+#define ATI_VERB_GET_MULTICHANNEL_MODE 0xf89
+
+/* AMD specific HDA cvt verbs */
+#define ATI_VERB_SET_RAMP_RATE 0x770
+#define ATI_VERB_GET_RAMP_RATE 0xf70
+
+#define ATI_OUT_ENABLE 0x1
+
+#define ATI_MULTICHANNEL_MODE_PAIRED 0
+#define ATI_MULTICHANNEL_MODE_SINGLE 1
+
+#define ATI_HBR_CAPABLE 0x01
+#define ATI_HBR_ENABLE 0x10
+
+static int atihdmi_pin_get_eld(struct hda_codec *codec, hda_nid_t nid,
+ unsigned char *buf, int *eld_size)
+{
+ /* call hda_eld.c ATI/AMD-specific function */
+ return snd_hdmi_get_eld_ati(codec, nid, buf, eld_size,
+ is_amdhdmi_rev3_or_later(codec));
+}
+
+static void atihdmi_pin_setup_infoframe(struct hda_codec *codec, hda_nid_t pin_nid, int ca,
+ int active_channels, int conn_type)
+{
+ snd_hda_codec_write(codec, pin_nid, 0, ATI_VERB_SET_CHANNEL_ALLOCATION, ca);
+}
+
+static int atihdmi_paired_swap_fc_lfe(int pos)
+{
+ /*
+ * ATI/AMD have automatic FC/LFE swap built-in
+ * when in pairwise mapping mode.
+ */
+
+ switch (pos) {
+ /* see channel_allocations[].speakers[] */
+ case 2: return 3;
+ case 3: return 2;
+ default: break;
+ }
+
+ return pos;
+}
+
+static int atihdmi_paired_chmap_validate(int ca, int chs, unsigned char *map)
+{
+ struct cea_channel_speaker_allocation *cap;
+ int i, j;
+
+ /* check that only channel pairs need to be remapped on old pre-rev3 ATI/AMD */
+
+ cap = &channel_allocations[get_channel_allocation_order(ca)];
+ for (i = 0; i < chs; ++i) {
+ int mask = to_spk_mask(map[i]);
+ bool ok = false;
+ bool companion_ok = false;
+
+ if (!mask)
+ continue;
+
+ for (j = 0 + i % 2; j < 8; j += 2) {
+ int chan_idx = 7 - atihdmi_paired_swap_fc_lfe(j);
+ if (cap->speakers[chan_idx] == mask) {
+ /* channel is in a supported position */
+ ok = true;
+
+ if (i % 2 == 0 && i + 1 < chs) {
+ /* even channel, check the odd companion */
+ int comp_chan_idx = 7 - atihdmi_paired_swap_fc_lfe(j + 1);
+ int comp_mask_req = to_spk_mask(map[i+1]);
+ int comp_mask_act = cap->speakers[comp_chan_idx];
+
+ if (comp_mask_req == comp_mask_act)
+ companion_ok = true;
+ else
+ return -EINVAL;
+ }
+ break;
+ }
+ }
+
+ if (!ok)
+ return -EINVAL;
+
+ if (companion_ok)
+ i++; /* companion channel already checked */
+ }
+
+ return 0;
+}
+
+static int atihdmi_pin_set_slot_channel(struct hda_codec *codec, hda_nid_t pin_nid,
+ int hdmi_slot, int stream_channel)
+{
+ int verb;
+ int ati_channel_setup = 0;
+
+ if (hdmi_slot > 7)
+ return -EINVAL;
+
+ if (!has_amd_full_remap_support(codec)) {
+ hdmi_slot = atihdmi_paired_swap_fc_lfe(hdmi_slot);
+
+ /* In case this is an odd slot but without stream channel, do not
+ * disable the slot since the corresponding even slot could have a
+ * channel. In case neither have a channel, the slot pair will be
+ * disabled when this function is called for the even slot. */
+ if (hdmi_slot % 2 != 0 && stream_channel == 0xf)
+ return 0;
+
+ hdmi_slot -= hdmi_slot % 2;
+
+ if (stream_channel != 0xf)
+ stream_channel -= stream_channel % 2;
+ }
+
+ verb = ATI_VERB_SET_MULTICHANNEL_01 + hdmi_slot/2 + (hdmi_slot % 2) * 0x00e;
+
+ /* ati_channel_setup format: [7..4] = stream_channel_id, [1] = mute, [0] = enable */
+
+ if (stream_channel != 0xf)
+ ati_channel_setup = (stream_channel << 4) | ATI_OUT_ENABLE;
+
+ return snd_hda_codec_write(codec, pin_nid, 0, verb, ati_channel_setup);
+}
+
+static int atihdmi_pin_get_slot_channel(struct hda_codec *codec, hda_nid_t pin_nid,
+ int asp_slot)
+{
+ bool was_odd = false;
+ int ati_asp_slot = asp_slot;
+ int verb;
+ int ati_channel_setup;
+
+ if (asp_slot > 7)
+ return -EINVAL;
+
+ if (!has_amd_full_remap_support(codec)) {
+ ati_asp_slot = atihdmi_paired_swap_fc_lfe(asp_slot);
+ if (ati_asp_slot % 2 != 0) {
+ ati_asp_slot -= 1;
+ was_odd = true;
+ }
+ }
+
+ verb = ATI_VERB_GET_MULTICHANNEL_01 + ati_asp_slot/2 + (ati_asp_slot % 2) * 0x00e;
+
+ ati_channel_setup = snd_hda_codec_read(codec, pin_nid, 0, verb, 0);
+
+ if (!(ati_channel_setup & ATI_OUT_ENABLE))
+ return 0xf;
+
+ return ((ati_channel_setup & 0xf0) >> 4) + !!was_odd;
+}
+
+static int atihdmi_paired_chmap_cea_alloc_validate_get_type(struct cea_channel_speaker_allocation *cap,
+ int channels)
+{
+ int c;
+
+ /*
+ * Pre-rev3 ATI/AMD codecs operate in a paired channel mode, so
+ * we need to take that into account (a single channel may take 2
+ * channel slots if we need to carry a silent channel next to it).
+ * On Rev3+ AMD codecs this function is not used.
+ */
+ int chanpairs = 0;
+
+ /* We only produce even-numbered channel count TLVs */
+ if ((channels % 2) != 0)
+ return -1;
+
+ for (c = 0; c < 7; c += 2) {
+ if (cap->speakers[c] || cap->speakers[c+1])
+ chanpairs++;
+ }
+
+ if (chanpairs * 2 != channels)
+ return -1;
+
+ return SNDRV_CTL_TLVT_CHMAP_PAIRED;
+}
+
+static void atihdmi_paired_cea_alloc_to_tlv_chmap(struct cea_channel_speaker_allocation *cap,
+ unsigned int *chmap, int channels)
+{
+ /* produce paired maps for pre-rev3 ATI/AMD codecs */
+ int count = 0;
+ int c;
+
+ for (c = 7; c >= 0; c--) {
+ int chan = 7 - atihdmi_paired_swap_fc_lfe(7 - c);
+ int spk = cap->speakers[chan];
+ if (!spk) {
+ /* add N/A channel if the companion channel is occupied */
+ if (cap->speakers[chan + (chan % 2 ? -1 : 1)])
+ chmap[count++] = SNDRV_CHMAP_NA;
+
+ continue;
+ }
+
+ chmap[count++] = spk_to_chmap(spk);
+ }
+
+ WARN_ON(count != channels);
+}
+
+static int atihdmi_pin_hbr_setup(struct hda_codec *codec, hda_nid_t pin_nid,
+ bool hbr)
+{
+ int hbr_ctl, hbr_ctl_new;
+
+ hbr_ctl = snd_hda_codec_read(codec, pin_nid, 0, ATI_VERB_GET_HBR_CONTROL, 0);
+ if (hbr_ctl & ATI_HBR_CAPABLE) {
+ if (hbr)
+ hbr_ctl_new = hbr_ctl | ATI_HBR_ENABLE;
+ else
+ hbr_ctl_new = hbr_ctl & ~ATI_HBR_ENABLE;
+
+ snd_printdd("atihdmi_pin_hbr_setup: "
+ "NID=0x%x, %shbr-ctl=0x%x\n",
+ pin_nid,
+ hbr_ctl == hbr_ctl_new ? "" : "new-",
+ hbr_ctl_new);
+
+ if (hbr_ctl != hbr_ctl_new)
+ snd_hda_codec_write(codec, pin_nid, 0,
+ ATI_VERB_SET_HBR_CONTROL,
+ hbr_ctl_new);
+
+ } else if (hbr)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int atihdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
+ hda_nid_t pin_nid, u32 stream_tag, int format)
+{
+
+ if (is_amdhdmi_rev3_or_later(codec)) {
+ int ramp_rate = 180; /* default as per AMD spec */
+ /* disable ramp-up/down for non-pcm as per AMD spec */
+ if (format & AC_FMT_TYPE_NON_PCM)
+ ramp_rate = 0;
+
+ snd_hda_codec_write(codec, cvt_nid, 0, ATI_VERB_SET_RAMP_RATE, ramp_rate);
+ }
+
+ return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
+}
-static int atihdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- unsigned int stream_tag,
- unsigned int format,
- struct snd_pcm_substream *substream)
+
+static int atihdmi_init(struct hda_codec *codec)
{
struct hdmi_spec *spec = codec->spec;
- struct hdmi_spec_per_cvt *per_cvt = get_cvt(spec, 0);
- int chans = substream->runtime->channels;
- int i, err;
+ int pin_idx, err;
- err = simple_playback_pcm_prepare(hinfo, codec, stream_tag, format,
- substream);
- if (err < 0)
+ err = generic_hdmi_init(codec);
+
+ if (err)
return err;
- snd_hda_codec_write(codec, per_cvt->cvt_nid, 0,
- AC_VERB_SET_CVT_CHAN_COUNT, chans - 1);
- /* FIXME: XXX */
- for (i = 0; i < chans; i++) {
- snd_hda_codec_write(codec, per_cvt->cvt_nid, 0,
- AC_VERB_SET_HDMI_CHAN_SLOT,
- (i << 4) | i);
+
+ for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
+ struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
+
+ /* make sure downmix information in infoframe is zero */
+ snd_hda_codec_write(codec, per_pin->pin_nid, 0, ATI_VERB_SET_DOWNMIX_INFO, 0);
+
+ /* enable channel-wise remap mode if supported */
+ if (has_amd_full_remap_support(codec))
+ snd_hda_codec_write(codec, per_pin->pin_nid, 0,
+ ATI_VERB_SET_MULTICHANNEL_MODE,
+ ATI_MULTICHANNEL_MODE_SINGLE);
}
+
return 0;
}
static int patch_atihdmi(struct hda_codec *codec)
{
struct hdmi_spec *spec;
- int err = patch_simple_hdmi(codec, ATIHDMI_CVT_NID, ATIHDMI_PIN_NID);
- if (err < 0)
+ struct hdmi_spec_per_cvt *per_cvt;
+ int err, cvt_idx;
+
+ err = patch_generic_hdmi(codec);
+
+ if (err)
return err;
+
+ codec->patch_ops.init = atihdmi_init;
+
spec = codec->spec;
- spec->pcm_playback.ops.prepare = atihdmi_playback_pcm_prepare;
+
+ spec->ops.pin_get_eld = atihdmi_pin_get_eld;
+ spec->ops.pin_get_slot_channel = atihdmi_pin_get_slot_channel;
+ spec->ops.pin_set_slot_channel = atihdmi_pin_set_slot_channel;
+ spec->ops.pin_setup_infoframe = atihdmi_pin_setup_infoframe;
+ spec->ops.pin_hbr_setup = atihdmi_pin_hbr_setup;
+ spec->ops.setup_stream = atihdmi_setup_stream;
+
+ if (!has_amd_full_remap_support(codec)) {
+ /* override to ATI/AMD-specific versions with pairwise mapping */
+ spec->ops.chmap_cea_alloc_validate_get_type =
+ atihdmi_paired_chmap_cea_alloc_validate_get_type;
+ spec->ops.cea_alloc_to_tlv_chmap = atihdmi_paired_cea_alloc_to_tlv_chmap;
+ spec->ops.chmap_validate = atihdmi_paired_chmap_validate;
+ }
+
+ /* ATI/AMD converters do not advertise all of their capabilities */
+ for (cvt_idx = 0; cvt_idx < spec->num_cvts; cvt_idx++) {
+ per_cvt = get_cvt(spec, cvt_idx);
+ per_cvt->channels_max = max(per_cvt->channels_max, 8u);
+ per_cvt->rates |= SUPPORTED_RATES;
+ per_cvt->formats |= SUPPORTED_FORMATS;
+ per_cvt->maxbps = max(per_cvt->maxbps, 24u);
+ }
+
+ spec->channels_max = max(spec->channels_max, 8u);
+
return 0;
}
@@ -2621,7 +3207,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
{ .id = 0x1002793c, .name = "RS600 HDMI", .patch = patch_atihdmi },
{ .id = 0x10027919, .name = "RS600 HDMI", .patch = patch_atihdmi },
{ .id = 0x1002791a, .name = "RS690/780 HDMI", .patch = patch_atihdmi },
-{ .id = 0x1002aa01, .name = "R6xx HDMI", .patch = patch_generic_hdmi },
+{ .id = 0x1002aa01, .name = "R6xx HDMI", .patch = patch_atihdmi },
{ .id = 0x10951390, .name = "SiI1390 HDMI", .patch = patch_generic_hdmi },
{ .id = 0x10951392, .name = "SiI1392 HDMI", .patch = patch_generic_hdmi },
{ .id = 0x17e80047, .name = "Chrontel HDMI", .patch = patch_generic_hdmi },
@@ -2630,30 +3216,30 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
{ .id = 0x10de0005, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x },
{ .id = 0x10de0006, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x },
{ .id = 0x10de0007, .name = "MCP79/7A HDMI", .patch = patch_nvhdmi_8ch_7x },
-{ .id = 0x10de000a, .name = "GPU 0a HDMI/DP", .patch = patch_generic_hdmi },
-{ .id = 0x10de000b, .name = "GPU 0b HDMI/DP", .patch = patch_generic_hdmi },
-{ .id = 0x10de000c, .name = "MCP89 HDMI", .patch = patch_generic_hdmi },
-{ .id = 0x10de000d, .name = "GPU 0d HDMI/DP", .patch = patch_generic_hdmi },
-{ .id = 0x10de0010, .name = "GPU 10 HDMI/DP", .patch = patch_generic_hdmi },
-{ .id = 0x10de0011, .name = "GPU 11 HDMI/DP", .patch = patch_generic_hdmi },
-{ .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_generic_hdmi },
-{ .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_generic_hdmi },
-{ .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_generic_hdmi },
-{ .id = 0x10de0015, .name = "GPU 15 HDMI/DP", .patch = patch_generic_hdmi },
-{ .id = 0x10de0016, .name = "GPU 16 HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de000a, .name = "GPU 0a HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de000b, .name = "GPU 0b HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de000c, .name = "MCP89 HDMI", .patch = patch_nvhdmi },
+{ .id = 0x10de000d, .name = "GPU 0d HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de0010, .name = "GPU 10 HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de0011, .name = "GPU 11 HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de0015, .name = "GPU 15 HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de0016, .name = "GPU 16 HDMI/DP", .patch = patch_nvhdmi },
/* 17 is known to be absent */
-{ .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_generic_hdmi },
-{ .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_generic_hdmi },
-{ .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_generic_hdmi },
-{ .id = 0x10de001b, .name = "GPU 1b HDMI/DP", .patch = patch_generic_hdmi },
-{ .id = 0x10de001c, .name = "GPU 1c HDMI/DP", .patch = patch_generic_hdmi },
-{ .id = 0x10de0040, .name = "GPU 40 HDMI/DP", .patch = patch_generic_hdmi },
-{ .id = 0x10de0041, .name = "GPU 41 HDMI/DP", .patch = patch_generic_hdmi },
-{ .id = 0x10de0042, .name = "GPU 42 HDMI/DP", .patch = patch_generic_hdmi },
-{ .id = 0x10de0043, .name = "GPU 43 HDMI/DP", .patch = patch_generic_hdmi },
-{ .id = 0x10de0044, .name = "GPU 44 HDMI/DP", .patch = patch_generic_hdmi },
-{ .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_generic_hdmi },
-{ .id = 0x10de0060, .name = "GPU 60 HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de001b, .name = "GPU 1b HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de001c, .name = "GPU 1c HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de0040, .name = "GPU 40 HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de0041, .name = "GPU 41 HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de0042, .name = "GPU 42 HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de0043, .name = "GPU 43 HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de0044, .name = "GPU 44 HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de0060, .name = "GPU 60 HDMI/DP", .patch = patch_nvhdmi },
{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
@@ -2669,6 +3255,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
{ .id = 0x80862806, .name = "PantherPoint HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862807, .name = "Haswell HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi },
+{ .id = 0x80862882, .name = "Valleyview2 HDMI", .patch = patch_generic_hdmi },
{ .id = 0x808629fb, .name = "Crestline HDMI", .patch = patch_generic_hdmi },
{} /* terminator */
};
@@ -2723,6 +3310,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862805");
MODULE_ALIAS("snd-hda-codec-id:80862806");
MODULE_ALIAS("snd-hda-codec-id:80862807");
MODULE_ALIAS("snd-hda-codec-id:80862880");
+MODULE_ALIAS("snd-hda-codec-id:80862882");
MODULE_ALIAS("snd-hda-codec-id:808629fb");
MODULE_LICENSE("GPL");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index bf313bea7085..01bf812455a2 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -554,8 +554,6 @@ do_sku:
nid = portd;
else if (tmp == 3)
nid = porti;
- else
- return 1;
if (found_in_nid_list(nid, spec->gen.autocfg.line_out_pins,
spec->gen.autocfg.line_outs))
return 1;
@@ -2388,6 +2386,7 @@ static const struct hda_verb alc268_beep_init_verbs[] = {
enum {
ALC268_FIXUP_INV_DMIC,
ALC268_FIXUP_HP_EAPD,
+ ALC268_FIXUP_SPDIF,
};
static const struct hda_fixup alc268_fixups[] = {
@@ -2402,6 +2401,13 @@ static const struct hda_fixup alc268_fixups[] = {
{}
}
},
+ [ALC268_FIXUP_SPDIF] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1e, 0x014b1180 }, /* enable SPDIF out */
+ {}
+ }
+ },
};
static const struct hda_model_fixup alc268_fixup_models[] = {
@@ -2411,6 +2417,7 @@ static const struct hda_model_fixup alc268_fixup_models[] = {
};
static const struct snd_pci_quirk alc268_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1025, 0x0139, "Acer TravelMate 6293", ALC268_FIXUP_SPDIF),
SND_PCI_QUIRK(0x1025, 0x015b, "Acer AOA 150 (ZG5)", ALC268_FIXUP_INV_DMIC),
/* below is codec SSID since multiple Toshiba laptops have the
* same PCI SSID 1179:ff00
@@ -2539,7 +2546,9 @@ enum {
ALC269_TYPE_ALC282,
ALC269_TYPE_ALC283,
ALC269_TYPE_ALC284,
+ ALC269_TYPE_ALC285,
ALC269_TYPE_ALC286,
+ ALC269_TYPE_ALC255,
};
/*
@@ -2558,6 +2567,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
case ALC269_TYPE_ALC269VC:
case ALC269_TYPE_ALC280:
case ALC269_TYPE_ALC284:
+ case ALC269_TYPE_ALC285:
ssids = alc269va_ssids;
break;
case ALC269_TYPE_ALC269VB:
@@ -2565,6 +2575,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
case ALC269_TYPE_ALC282:
case ALC269_TYPE_ALC283:
case ALC269_TYPE_ALC286:
+ case ALC269_TYPE_ALC255:
ssids = alc269_ssids;
break;
default:
@@ -2652,7 +2663,7 @@ static void alc283_shutup(struct hda_codec *codec)
AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
if (hp_pin_sense)
- msleep(85);
+ msleep(100);
snd_hda_codec_write(codec, hp_pin, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
@@ -2661,7 +2672,7 @@ static void alc283_shutup(struct hda_codec *codec)
alc_write_coef_idx(codec, 0x46, val | (3 << 12));
if (hp_pin_sense)
- msleep(85);
+ msleep(100);
snd_hda_shutup_pins(codec);
alc_write_coef_idx(codec, 0x43, 0x9614);
}
@@ -3443,6 +3454,8 @@ static void alc283_fixup_chromebook(struct hda_codec *codec,
switch (action) {
case HDA_FIXUP_ACT_PRE_PROBE:
alc283_chromebook_caps(codec);
+ /* Disable AA-loopback as it causes white noise */
+ spec->gen.mixer_nid = 0;
spec->gen.hp_automute_hook = alc283_hp_automute_hook;
/* MIC2-VREF control */
/* Set to manual mode */
@@ -4125,9 +4138,16 @@ static int patch_alc269(struct hda_codec *codec)
case 0x10ec0292:
spec->codec_variant = ALC269_TYPE_ALC284;
break;
+ case 0x10ec0285:
+ case 0x10ec0293:
+ spec->codec_variant = ALC269_TYPE_ALC285;
+ break;
case 0x10ec0286:
spec->codec_variant = ALC269_TYPE_ALC286;
break;
+ case 0x10ec0255:
+ spec->codec_variant = ALC269_TYPE_ALC255;
+ break;
}
if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
@@ -4415,6 +4435,25 @@ static void alc272_fixup_mario(struct hda_codec *codec,
"hda_codec: failed to override amp caps for NID 0x2\n");
}
+static const struct snd_pcm_chmap_elem asus_pcm_2_1_chmaps[] = {
+ { .channels = 2,
+ .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
+ { .channels = 4,
+ .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
+ SNDRV_CHMAP_NA, SNDRV_CHMAP_LFE } }, /* LFE only on right */
+ { }
+};
+
+/* override the 2.1 chmap */
+static void alc662_fixup_bass_chmap(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ if (action == HDA_FIXUP_ACT_BUILD) {
+ struct alc_spec *spec = codec->spec;
+ spec->gen.pcm_rec[0].stream[0].chmap = asus_pcm_2_1_chmaps;
+ }
+}
+
enum {
ALC662_FIXUP_ASPIRE,
ALC662_FIXUP_IDEAPAD,
@@ -4435,6 +4474,7 @@ enum {
ALC662_FIXUP_INV_DMIC,
ALC668_FIXUP_DELL_MIC_NO_PRESENCE,
ALC668_FIXUP_HEADSET_MODE,
+ ALC662_FIXUP_BASS_CHMAP,
};
static const struct hda_fixup alc662_fixups[] = {
@@ -4609,6 +4649,12 @@ static const struct hda_fixup alc662_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_headset_mode_alc668,
},
+ [ALC662_FIXUP_BASS_CHMAP] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc662_fixup_bass_chmap,
+ .chained = true,
+ .chain_id = ALC662_FIXUP_ASUS_MODE4
+ },
};
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -4622,7 +4668,8 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
- SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4),
+ SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP),
+ SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_CHMAP),
SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
@@ -4841,6 +4888,7 @@ static int patch_alc680(struct hda_codec *codec)
static const struct hda_codec_preset snd_hda_preset_realtek[] = {
{ .id = 0x10ec0221, .name = "ALC221", .patch = patch_alc269 },
{ .id = 0x10ec0233, .name = "ALC233", .patch = patch_alc269 },
+ { .id = 0x10ec0255, .name = "ALC255", .patch = patch_alc269 },
{ .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 },
{ .id = 0x10ec0262, .name = "ALC262", .patch = patch_alc262 },
{ .id = 0x10ec0267, .name = "ALC267", .patch = patch_alc268 },
@@ -4854,9 +4902,11 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
{ .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 },
{ .id = 0x10ec0283, .name = "ALC283", .patch = patch_alc269 },
{ .id = 0x10ec0284, .name = "ALC284", .patch = patch_alc269 },
+ { .id = 0x10ec0285, .name = "ALC285", .patch = patch_alc269 },
{ .id = 0x10ec0286, .name = "ALC286", .patch = patch_alc269 },
{ .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 },
{ .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 },
+ { .id = 0x10ec0293, .name = "ALC293", .patch = patch_alc269 },
{ .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
.patch = patch_alc861 },
{ .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index fba0cef1c47f..69a549a82345 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -2091,8 +2091,10 @@ static void stac92hd83xxx_fixup_hp_mic_led(struct hda_codec *codec,
{
struct sigmatel_spec *spec = codec->spec;
- if (action == HDA_FIXUP_ACT_PRE_PROBE)
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
spec->mic_mute_led_gpio = 0x08; /* GPIO3 */
+ codec->bus->avoid_link_reset = 1;
+ }
}
static void stac92hd83xxx_fixup_headset_jack(struct hda_codec *codec,
diff --git a/sound/pci/ice1712/psc724.c b/sound/pci/ice1712/psc724.c
index 302ac6ddd545..4019cf27d117 100644
--- a/sound/pci/ice1712/psc724.c
+++ b/sound/pci/ice1712/psc724.c
@@ -203,12 +203,12 @@ static void psc724_set_jack_state(struct snd_ice1712 *ice, bool hp_connected)
/* notify about master speaker mute change */
memset(&elem_id, 0, sizeof(elem_id));
elem_id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
- strncpy(elem_id.name, "Master Speakers Playback Switch",
+ strlcpy(elem_id.name, "Master Speakers Playback Switch",
sizeof(elem_id.name));
kctl = snd_ctl_find_id(ice->card, &elem_id);
snd_ctl_notify(ice->card, SNDRV_CTL_EVENT_MASK_VALUE, &kctl->id);
/* and headphone mute change */
- strncpy(elem_id.name, spec->wm8776.ctl[WM8776_CTL_HP_SW].name,
+ strlcpy(elem_id.name, spec->wm8776.ctl[WM8776_CTL_HP_SW].name,
sizeof(elem_id.name));
kctl = snd_ctl_find_id(ice->card, &elem_id);
snd_ctl_notify(ice->card, SNDRV_CTL_EVENT_MASK_VALUE, &kctl->id);
diff --git a/sound/pci/ice1712/wm8766.c b/sound/pci/ice1712/wm8766.c
index e473f8a88f9c..21b373b2e260 100644
--- a/sound/pci/ice1712/wm8766.c
+++ b/sound/pci/ice1712/wm8766.c
@@ -253,7 +253,8 @@ static int snd_wm8766_ctl_get(struct snd_kcontrol *kcontrol,
}
if (wm->ctl[n].flags & WM8766_FLAG_INVERT) {
val1 = wm->ctl[n].max - (val1 - wm->ctl[n].min);
- val2 = wm->ctl[n].max - (val2 - wm->ctl[n].min);
+ if (wm->ctl[n].flags & WM8766_FLAG_STEREO)
+ val2 = wm->ctl[n].max - (val2 - wm->ctl[n].min);
}
ucontrol->value.integer.value[0] = val1;
if (wm->ctl[n].flags & WM8766_FLAG_STEREO)
diff --git a/sound/pci/ice1712/wm8776.c b/sound/pci/ice1712/wm8776.c
index a3c05fe5daf9..e66c0da62014 100644
--- a/sound/pci/ice1712/wm8776.c
+++ b/sound/pci/ice1712/wm8776.c
@@ -52,7 +52,7 @@ static void snd_wm8776_activate_ctl(struct snd_wm8776 *wm,
unsigned int index_offset;
memset(&elem_id, 0, sizeof(elem_id));
- strncpy(elem_id.name, ctl_name, sizeof(elem_id.name));
+ strlcpy(elem_id.name, ctl_name, sizeof(elem_id.name));
elem_id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
kctl = snd_ctl_find_id(card, &elem_id);
if (!kctl)
@@ -526,7 +526,8 @@ static int snd_wm8776_ctl_get(struct snd_kcontrol *kcontrol,
}
if (wm->ctl[n].flags & WM8776_FLAG_INVERT) {
val1 = wm->ctl[n].max - (val1 - wm->ctl[n].min);
- val2 = wm->ctl[n].max - (val2 - wm->ctl[n].min);
+ if (wm->ctl[n].flags & WM8776_FLAG_STEREO)
+ val2 = wm->ctl[n].max - (val2 - wm->ctl[n].min);
}
ucontrol->value.integer.value[0] = val1;
if (wm->ctl[n].flags & WM8776_FLAG_STEREO)
diff --git a/sound/pci/lola/lola.c b/sound/pci/lola/lola.c
index 7307d97186cb..0568540dc8d3 100644
--- a/sound/pci/lola/lola.c
+++ b/sound/pci/lola/lola.c
@@ -463,7 +463,7 @@ static int lola_parse_tree(struct lola *chip)
err = lola_read_param(chip, 1, LOLA_PAR_FUNCTION_TYPE, &val);
if (err < 0) {
- printk(KERN_ERR SFX "Can't read FUNCTION_TYPE for 0x%x\n", nid);
+ printk(KERN_ERR SFX "Can't read FUNCTION_TYPE\n");
return err;
}
if (val != 1) {
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index bb9ebc5543d7..0236363c301f 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -350,9 +350,8 @@ snd_rme96_playback_copy(struct snd_pcm_substream *substream,
struct rme96 *rme96 = snd_pcm_substream_chip(substream);
count <<= rme96->playback_frlog;
pos <<= rme96->playback_frlog;
- copy_from_user_toio(rme96->iobase + RME96_IO_PLAY_BUFFER + pos, src,
- count);
- return 0;
+ return copy_from_user_toio(rme96->iobase + RME96_IO_PLAY_BUFFER + pos, src,
+ count);
}
static int
@@ -365,9 +364,8 @@ snd_rme96_capture_copy(struct snd_pcm_substream *substream,
struct rme96 *rme96 = snd_pcm_substream_chip(substream);
count <<= rme96->capture_frlog;
pos <<= rme96->capture_frlog;
- copy_to_user_fromio(dst, rme96->iobase + RME96_IO_REC_BUFFER + pos,
- count);
- return 0;
+ return copy_to_user_fromio(dst, rme96->iobase + RME96_IO_REC_BUFFER + pos,
+ count);
}
/*
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 3cde55b753e2..e98dc008de0b 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -3996,7 +3996,6 @@ static int hdspm_tco_sync_check(struct hdspm *hdspm)
return 1;
}
return 0;
- break;
case AES32:
status = hdspm_read(hdspm, HDSPM_statusRegister);
if (status & HDSPM_tcoLockAes) {
@@ -4006,9 +4005,6 @@ static int hdspm_tco_sync_check(struct hdspm *hdspm)
return 1;
}
return 0;
-
- break;
-
case RayDAT:
case AIO:
status = hdspm_read(hdspm, HDSPM_RD_STATUS_1);
@@ -4018,7 +4014,6 @@ static int hdspm_tco_sync_check(struct hdspm *hdspm)
if (status & 0x4000000)
return 1; /* Lock */
return 0; /* No signal */
- break;
default:
break;
@@ -6405,7 +6400,7 @@ static int snd_hdspm_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
memset(&hdspm_version, 0, sizeof(hdspm_version));
hdspm_version.card_type = hdspm->io_type;
- strncpy(hdspm_version.cardname, hdspm->card_name,
+ strlcpy(hdspm_version.cardname, hdspm->card_name,
sizeof(hdspm_version.cardname));
hdspm_version.serial = hdspm->serial;
hdspm_version.firmware_rev = hdspm->firmware_rev;
diff --git a/sound/ppc/keywest.c b/sound/ppc/keywest.c
index 01aecc2b5073..0d1c27e911b8 100644
--- a/sound/ppc/keywest.c
+++ b/sound/ppc/keywest.c
@@ -65,7 +65,7 @@ static int keywest_attach_adapter(struct i2c_adapter *adapter)
* already bound. If not it means binding failed, and then there
* is no point in keeping the device instantiated.
*/
- if (!keywest_ctx->client->driver) {
+ if (!keywest_ctx->client->dev.driver) {
i2c_unregister_device(keywest_ctx->client);
keywest_ctx->client = NULL;
return -ENODEV;
@@ -76,7 +76,7 @@ static int keywest_attach_adapter(struct i2c_adapter *adapter)
* This is safe because i2c-core holds the core_lock mutex for us.
*/
list_add_tail(&keywest_ctx->client->detected,
- &keywest_ctx->client->driver->clients);
+ &to_i2c_driver(keywest_ctx->client->dev.driver)->clients);
return 0;
}
diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c
index c93fbbb201fe..7a43c0c38316 100644
--- a/sound/ppc/pmac.c
+++ b/sound/ppc/pmac.c
@@ -28,6 +28,8 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <sound/core.h>
#include "pmac.h"
#include <sound/pcm_params.h>
diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c
index b23354a4ceca..b9ffc17a4799 100644
--- a/sound/ppc/tumbler.c
+++ b/sound/ppc/tumbler.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/string.h>
+#include <linux/of_irq.h>
#include <sound/core.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 61a64d281905..8b9e70105dd2 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -1,5 +1,5 @@
snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o
-snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o
+snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o
ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),)
snd-soc-core-objs += soc-generic-dmaengine-pcm.o
diff --git a/sound/soc/atmel/atmel-pcm.c b/sound/soc/atmel/atmel-pcm.c
index 3109db7b9017..8ae3fa5ac60a 100644
--- a/sound/soc/atmel/atmel-pcm.c
+++ b/sound/soc/atmel/atmel-pcm.c
@@ -50,7 +50,7 @@ static int atmel_pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
buf->area = dma_alloc_coherent(pcm->card->dev, size,
&buf->addr, GFP_KERNEL);
pr_debug("atmel-pcm: alloc dma buffer: area=%p, addr=%p, size=%zu\n",
- (void *)buf->area, (void *)buf->addr, size);
+ (void *)buf->area, (void *)(long)buf->addr, size);
if (!buf->area)
return -ENOMEM;
@@ -68,18 +68,15 @@ int atmel_pcm_mmap(struct snd_pcm_substream *substream,
}
EXPORT_SYMBOL_GPL(atmel_pcm_mmap);
-static u64 atmel_pcm_dmamask = DMA_BIT_MASK(32);
-
int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
- int ret = 0;
+ int ret;
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &atmel_pcm_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
pr_debug("atmel-pcm: allocating PCM playback DMA buffer\n");
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index bb53dea85b17..8697cedccd21 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -777,7 +777,7 @@ static int asoc_ssc_init(struct device *dev)
if (ret) {
dev_err(dev, "Could not register PCM: %d\n", ret);
goto err_unregister_dai;
- };
+ }
return 0;
diff --git a/sound/soc/atmel/atmel_wm8904.c b/sound/soc/atmel/atmel_wm8904.c
index 7222380131ea..b4e36901a40b 100644
--- a/sound/soc/atmel/atmel_wm8904.c
+++ b/sound/soc/atmel/atmel_wm8904.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/pinctrl/consumer.h>
#include <sound/soc.h>
@@ -155,15 +154,8 @@ static int atmel_asoc_wm8904_probe(struct platform_device *pdev)
struct snd_soc_card *card = &atmel_asoc_wm8904_card;
struct snd_soc_dai_link *dailink = &atmel_asoc_wm8904_dailink;
struct clk *clk_src;
- struct pinctrl *pinctrl;
int id, ret;
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl)) {
- dev_err(&pdev->dev, "failed to request pinctrl\n");
- return PTR_ERR(pinctrl);
- }
-
card->dev = &pdev->dev;
ret = atmel_asoc_wm8904_dt_init(pdev);
if (ret) {
diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
index 802717eccbd0..f15bff1548f8 100644
--- a/sound/soc/atmel/sam9g20_wm8731.c
+++ b/sound/soc/atmel/sam9g20_wm8731.c
@@ -37,6 +37,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
+#include <linux/of.h>
#include <linux/atmel-ssc.h>
diff --git a/sound/soc/blackfin/bf5xx-ac97-pcm.c b/sound/soc/blackfin/bf5xx-ac97-pcm.c
index 53f84085bf1f..1d4c676eb6cc 100644
--- a/sound/soc/blackfin/bf5xx-ac97-pcm.c
+++ b/sound/soc/blackfin/bf5xx-ac97-pcm.c
@@ -415,19 +415,16 @@ static void bf5xx_pcm_free_dma_buffers(struct snd_pcm *pcm)
}
}
-static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
-
static int bf5xx_pcm_ac97_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
- int ret = 0;
+ int ret;
pr_debug("%s enter\n", __func__);
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &bf5xx_pcm_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = bf5xx_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/blackfin/bf5xx-i2s-pcm.c b/sound/soc/blackfin/bf5xx-i2s-pcm.c
index 9cb4a80df98e..2a5b43417fd5 100644
--- a/sound/soc/blackfin/bf5xx-i2s-pcm.c
+++ b/sound/soc/blackfin/bf5xx-i2s-pcm.c
@@ -323,18 +323,16 @@ static struct snd_pcm_ops bf5xx_pcm_i2s_ops = {
.silence = bf5xx_pcm_silence,
};
-static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
-
static int bf5xx_pcm_i2s_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
size_t size = bf5xx_pcm_hardware.buffer_bytes_max;
+ int ret;
pr_debug("%s enter\n", __func__);
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &bf5xx_pcm_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
SNDRV_DMA_TYPE_DEV, card->dev, size, size);
diff --git a/sound/soc/cirrus/Kconfig b/sound/soc/cirrus/Kconfig
index 2c20f01e1f7e..06f938deda15 100644
--- a/sound/soc/cirrus/Kconfig
+++ b/sound/soc/cirrus/Kconfig
@@ -1,6 +1,6 @@
config SND_EP93XX_SOC
tristate "SoC Audio support for the Cirrus Logic EP93xx series"
- depends on ARCH_EP93XX && SND_SOC
+ depends on (ARCH_EP93XX || COMPILE_TEST) && SND_SOC
select SND_SOC_GENERIC_DMAENGINE_PCM
help
Say Y or M if you want to add support for codecs attached to
diff --git a/sound/soc/cirrus/ep93xx-pcm.c b/sound/soc/cirrus/ep93xx-pcm.c
index 0e9f56e0d4b2..cfe517e68009 100644
--- a/sound/soc/cirrus/ep93xx-pcm.c
+++ b/sound/soc/cirrus/ep93xx-pcm.c
@@ -57,9 +57,22 @@ static bool ep93xx_pcm_dma_filter(struct dma_chan *chan, void *filter_param)
return false;
}
+static struct dma_chan *ep93xx_compat_request_channel(
+ struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_dmaengine_dai_dma_data *dma_data;
+
+ dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+
+ return snd_dmaengine_pcm_request_channel(ep93xx_pcm_dma_filter,
+ dma_data);
+}
+
static const struct snd_dmaengine_pcm_config ep93xx_dmaengine_pcm_config = {
.pcm_hardware = &ep93xx_pcm_hardware,
.compat_filter_fn = ep93xx_pcm_dma_filter,
+ .compat_request_channel = ep93xx_compat_request_channel,
.prealloc_buffer_size = 131072,
};
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index 259d1ac4492f..75d0ad5d2dcb 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -16,6 +16,7 @@
#include <linux/mfd/88pm860x.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/regmap.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -140,6 +141,7 @@ struct pm860x_priv {
unsigned int filter;
struct snd_soc_codec *codec;
struct i2c_client *i2c;
+ struct regmap *regmap;
struct pm860x_chip *chip;
struct pm860x_det det;
@@ -269,48 +271,6 @@ static struct st_gain st_table[] = {
{ -86, 29, 0}, { -56, 30, 0}, { -28, 31, 0}, { 0, 0, 0},
};
-static int pm860x_volatile(unsigned int reg)
-{
- BUG_ON(reg >= REG_CACHE_SIZE);
-
- switch (reg) {
- case PM860X_AUDIO_SUPPLIES_2:
- return 1;
- }
-
- return 0;
-}
-
-static unsigned int pm860x_read_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- unsigned char *cache = codec->reg_cache;
-
- BUG_ON(reg >= REG_CACHE_SIZE);
-
- if (pm860x_volatile(reg))
- return cache[reg];
-
- reg += REG_CACHE_BASE;
-
- return pm860x_reg_read(codec->control_data, reg);
-}
-
-static int pm860x_write_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg, unsigned int value)
-{
- unsigned char *cache = codec->reg_cache;
-
- BUG_ON(reg >= REG_CACHE_SIZE);
-
- if (!pm860x_volatile(reg))
- cache[reg] = (unsigned char)value;
-
- reg += REG_CACHE_BASE;
-
- return pm860x_reg_write(codec->control_data, reg, value);
-}
-
static int snd_soc_get_volsw_2r_st(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -1169,6 +1129,7 @@ static int pm860x_i2s_set_dai_fmt(struct snd_soc_dai *codec_dai,
static int pm860x_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
+ struct pm860x_priv *pm860x = snd_soc_codec_get_drvdata(codec);
int data;
switch (level) {
@@ -1182,17 +1143,17 @@ static int pm860x_set_bias_level(struct snd_soc_codec *codec,
if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
/* Enable Audio PLL & Audio section */
data = AUDIO_PLL | AUDIO_SECTION_ON;
- pm860x_reg_write(codec->control_data, REG_MISC2, data);
+ pm860x_reg_write(pm860x->i2c, REG_MISC2, data);
udelay(300);
data = AUDIO_PLL | AUDIO_SECTION_RESET
| AUDIO_SECTION_ON;
- pm860x_reg_write(codec->control_data, REG_MISC2, data);
+ pm860x_reg_write(pm860x->i2c, REG_MISC2, data);
}
break;
case SND_SOC_BIAS_OFF:
data = AUDIO_PLL | AUDIO_SECTION_RESET | AUDIO_SECTION_ON;
- pm860x_set_bits(codec->control_data, REG_MISC2, data, 0);
+ pm860x_set_bits(pm860x->i2c, REG_MISC2, data, 0);
break;
}
codec->dapm.bias_level = level;
@@ -1322,17 +1283,17 @@ int pm860x_hs_jack_detect(struct snd_soc_codec *codec,
pm860x->det.lo_shrt = lo_shrt;
if (det & SND_JACK_HEADPHONE)
- pm860x_set_bits(codec->control_data, REG_HS_DET,
+ pm860x_set_bits(pm860x->i2c, REG_HS_DET,
EN_HS_DET, EN_HS_DET);
/* headset short detect */
if (hs_shrt) {
data = CLR_SHORT_HS2 | CLR_SHORT_HS1;
- pm860x_set_bits(codec->control_data, REG_SHORTS, data, data);
+ pm860x_set_bits(pm860x->i2c, REG_SHORTS, data, data);
}
/* Lineout short detect */
if (lo_shrt) {
data = CLR_SHORT_LO2 | CLR_SHORT_LO1;
- pm860x_set_bits(codec->control_data, REG_SHORTS, data, data);
+ pm860x_set_bits(pm860x->i2c, REG_SHORTS, data, data);
}
/* sync status */
@@ -1350,7 +1311,7 @@ int pm860x_mic_jack_detect(struct snd_soc_codec *codec,
pm860x->det.mic_det = det;
if (det & SND_JACK_MICROPHONE)
- pm860x_set_bits(codec->control_data, REG_MIC_DET,
+ pm860x_set_bits(pm860x->i2c, REG_MIC_DET,
MICDET_MASK, MICDET_MASK);
/* sync status */
@@ -1366,7 +1327,7 @@ static int pm860x_probe(struct snd_soc_codec *codec)
pm860x->codec = codec;
- codec->control_data = pm860x->i2c;
+ codec->control_data = pm860x->regmap;
for (i = 0; i < 4; i++) {
ret = request_threaded_irq(pm860x->irq[i], NULL,
@@ -1380,14 +1341,6 @@ static int pm860x_probe(struct snd_soc_codec *codec)
pm860x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- ret = pm860x_bulk_read(codec->control_data, REG_CACHE_BASE,
- REG_CACHE_SIZE, codec->reg_cache);
- if (ret < 0) {
- dev_err(codec->dev, "Failed to fill register cache: %d\n",
- ret);
- goto out;
- }
-
return 0;
out:
@@ -1410,10 +1363,6 @@ static int pm860x_remove(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_dev_pm860x = {
.probe = pm860x_probe,
.remove = pm860x_remove,
- .read = pm860x_read_reg_cache,
- .write = pm860x_write_reg_cache,
- .reg_cache_size = REG_CACHE_SIZE,
- .reg_word_size = sizeof(u8),
.set_bias_level = pm860x_set_bias_level,
.controls = pm860x_snd_controls,
@@ -1439,6 +1388,8 @@ static int pm860x_codec_probe(struct platform_device *pdev)
pm860x->chip = chip;
pm860x->i2c = (chip->id == CHIP_PM8607) ? chip->client
: chip->companion;
+ pm860x->regmap = (chip->id == CHIP_PM8607) ? chip->regmap
+ : chip->regmap_companion;
platform_set_drvdata(pdev, pm860x);
for (i = 0; i < 4; i++) {
diff --git a/sound/soc/codecs/88pm860x-codec.h b/sound/soc/codecs/88pm860x-codec.h
index 3364ba4a3607..f7282f4f4a79 100644
--- a/sound/soc/codecs/88pm860x-codec.h
+++ b/sound/soc/codecs/88pm860x-codec.h
@@ -12,67 +12,66 @@
#ifndef __88PM860X_H
#define __88PM860X_H
-/* The offset of these registers are 0xb0 */
-#define PM860X_PCM_IFACE_1 0x00
-#define PM860X_PCM_IFACE_2 0x01
-#define PM860X_PCM_IFACE_3 0x02
-#define PM860X_PCM_RATE 0x03
-#define PM860X_EC_PATH 0x04
-#define PM860X_SIDETONE_L_GAIN 0x05
-#define PM860X_SIDETONE_R_GAIN 0x06
-#define PM860X_SIDETONE_SHIFT 0x07
-#define PM860X_ADC_OFFSET_1 0x08
-#define PM860X_ADC_OFFSET_2 0x09
-#define PM860X_DMIC_DELAY 0x0a
+#define PM860X_PCM_IFACE_1 0xb0
+#define PM860X_PCM_IFACE_2 0xb1
+#define PM860X_PCM_IFACE_3 0xb2
+#define PM860X_PCM_RATE 0xb3
+#define PM860X_EC_PATH 0xb4
+#define PM860X_SIDETONE_L_GAIN 0xb5
+#define PM860X_SIDETONE_R_GAIN 0xb6
+#define PM860X_SIDETONE_SHIFT 0xb7
+#define PM860X_ADC_OFFSET_1 0xb8
+#define PM860X_ADC_OFFSET_2 0xb9
+#define PM860X_DMIC_DELAY 0xba
-#define PM860X_I2S_IFACE_1 0x0b
-#define PM860X_I2S_IFACE_2 0x0c
-#define PM860X_I2S_IFACE_3 0x0d
-#define PM860X_I2S_IFACE_4 0x0e
-#define PM860X_EQUALIZER_N0_1 0x0f
-#define PM860X_EQUALIZER_N0_2 0x10
-#define PM860X_EQUALIZER_N1_1 0x11
-#define PM860X_EQUALIZER_N1_2 0x12
-#define PM860X_EQUALIZER_D1_1 0x13
-#define PM860X_EQUALIZER_D1_2 0x14
-#define PM860X_LOFI_GAIN_LEFT 0x15
-#define PM860X_LOFI_GAIN_RIGHT 0x16
-#define PM860X_HIFIL_GAIN_LEFT 0x17
-#define PM860X_HIFIL_GAIN_RIGHT 0x18
-#define PM860X_HIFIR_GAIN_LEFT 0x19
-#define PM860X_HIFIR_GAIN_RIGHT 0x1a
-#define PM860X_DAC_OFFSET 0x1b
-#define PM860X_OFFSET_LEFT_1 0x1c
-#define PM860X_OFFSET_LEFT_2 0x1d
-#define PM860X_OFFSET_RIGHT_1 0x1e
-#define PM860X_OFFSET_RIGHT_2 0x1f
-#define PM860X_ADC_ANA_1 0x20
-#define PM860X_ADC_ANA_2 0x21
-#define PM860X_ADC_ANA_3 0x22
-#define PM860X_ADC_ANA_4 0x23
-#define PM860X_ANA_TO_ANA 0x24
-#define PM860X_HS1_CTRL 0x25
-#define PM860X_HS2_CTRL 0x26
-#define PM860X_LO1_CTRL 0x27
-#define PM860X_LO2_CTRL 0x28
-#define PM860X_EAR_CTRL_1 0x29
-#define PM860X_EAR_CTRL_2 0x2a
-#define PM860X_AUDIO_SUPPLIES_1 0x2b
-#define PM860X_AUDIO_SUPPLIES_2 0x2c
-#define PM860X_ADC_EN_1 0x2d
-#define PM860X_ADC_EN_2 0x2e
-#define PM860X_DAC_EN_1 0x2f
-#define PM860X_DAC_EN_2 0x31
-#define PM860X_AUDIO_CAL_1 0x32
-#define PM860X_AUDIO_CAL_2 0x33
-#define PM860X_AUDIO_CAL_3 0x34
-#define PM860X_AUDIO_CAL_4 0x35
-#define PM860X_AUDIO_CAL_5 0x36
-#define PM860X_ANA_INPUT_SEL_1 0x37
-#define PM860X_ANA_INPUT_SEL_2 0x38
+#define PM860X_I2S_IFACE_1 0xbb
+#define PM860X_I2S_IFACE_2 0xbc
+#define PM860X_I2S_IFACE_3 0xbd
+#define PM860X_I2S_IFACE_4 0xbe
+#define PM860X_EQUALIZER_N0_1 0xbf
+#define PM860X_EQUALIZER_N0_2 0xc0
+#define PM860X_EQUALIZER_N1_1 0xc1
+#define PM860X_EQUALIZER_N1_2 0xc2
+#define PM860X_EQUALIZER_D1_1 0xc3
+#define PM860X_EQUALIZER_D1_2 0xc4
+#define PM860X_LOFI_GAIN_LEFT 0xc5
+#define PM860X_LOFI_GAIN_RIGHT 0xc6
+#define PM860X_HIFIL_GAIN_LEFT 0xc7
+#define PM860X_HIFIL_GAIN_RIGHT 0xc8
+#define PM860X_HIFIR_GAIN_LEFT 0xc9
+#define PM860X_HIFIR_GAIN_RIGHT 0xca
+#define PM860X_DAC_OFFSET 0xcb
+#define PM860X_OFFSET_LEFT_1 0xcc
+#define PM860X_OFFSET_LEFT_2 0xcd
+#define PM860X_OFFSET_RIGHT_1 0xce
+#define PM860X_OFFSET_RIGHT_2 0xcf
+#define PM860X_ADC_ANA_1 0xd0
+#define PM860X_ADC_ANA_2 0xd1
+#define PM860X_ADC_ANA_3 0xd2
+#define PM860X_ADC_ANA_4 0xd3
+#define PM860X_ANA_TO_ANA 0xd4
+#define PM860X_HS1_CTRL 0xd5
+#define PM860X_HS2_CTRL 0xd6
+#define PM860X_LO1_CTRL 0xd7
+#define PM860X_LO2_CTRL 0xd8
+#define PM860X_EAR_CTRL_1 0xd9
+#define PM860X_EAR_CTRL_2 0xda
+#define PM860X_AUDIO_SUPPLIES_1 0xdb
+#define PM860X_AUDIO_SUPPLIES_2 0xdc
+#define PM860X_ADC_EN_1 0xdd
+#define PM860X_ADC_EN_2 0xde
+#define PM860X_DAC_EN_1 0xdf
+#define PM860X_DAC_EN_2 0xe1
+#define PM860X_AUDIO_CAL_1 0xe2
+#define PM860X_AUDIO_CAL_2 0xe3
+#define PM860X_AUDIO_CAL_3 0xe4
+#define PM860X_AUDIO_CAL_4 0xe5
+#define PM860X_AUDIO_CAL_5 0xe6
+#define PM860X_ANA_INPUT_SEL_1 0xe7
+#define PM860X_ANA_INPUT_SEL_2 0xe8
-#define PM860X_PCM_IFACE_4 0x39
-#define PM860X_I2S_IFACE_5 0x3a
+#define PM860X_PCM_IFACE_4 0xe9
+#define PM860X_I2S_IFACE_5 0xea
#define PM860X_SHORTS 0x3b
#define PM860X_PLL_ADJ_1 0x3c
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index 80555d7551e6..21ae8d4fdbfb 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -126,6 +126,8 @@ struct ab8500_codec_drvdata_dbg {
/* Private data for AB8500 device-driver */
struct ab8500_codec_drvdata {
+ struct regmap *regmap;
+
/* Sidetone */
long *sid_fir_values;
enum sid_state sid_status;
@@ -166,49 +168,35 @@ static inline const char *amic_type_str(enum amic_type type)
*/
/* Read a register from the audio-bank of AB8500 */
-static unsigned int ab8500_codec_read_reg(struct snd_soc_codec *codec,
- unsigned int reg)
+static int ab8500_codec_read_reg(void *context, unsigned int reg,
+ unsigned int *value)
{
+ struct device *dev = context;
int status;
- unsigned int value = 0;
u8 value8;
- status = abx500_get_register_interruptible(codec->dev, AB8500_AUDIO,
- reg, &value8);
- if (status < 0) {
- dev_err(codec->dev,
- "%s: ERROR: Register (0x%02x:0x%02x) read failed (%d).\n",
- __func__, (u8)AB8500_AUDIO, (u8)reg, status);
- } else {
- dev_dbg(codec->dev,
- "%s: Read 0x%02x from register 0x%02x:0x%02x\n",
- __func__, value8, (u8)AB8500_AUDIO, (u8)reg);
- value = (unsigned int)value8;
- }
+ status = abx500_get_register_interruptible(dev, AB8500_AUDIO,
+ reg, &value8);
+ *value = (unsigned int)value8;
- return value;
+ return status;
}
/* Write to a register in the audio-bank of AB8500 */
-static int ab8500_codec_write_reg(struct snd_soc_codec *codec,
- unsigned int reg, unsigned int value)
+static int ab8500_codec_write_reg(void *context, unsigned int reg,
+ unsigned int value)
{
- int status;
-
- status = abx500_set_register_interruptible(codec->dev, AB8500_AUDIO,
- reg, value);
- if (status < 0)
- dev_err(codec->dev,
- "%s: ERROR: Register (%02x:%02x) write failed (%d).\n",
- __func__, (u8)AB8500_AUDIO, (u8)reg, status);
- else
- dev_dbg(codec->dev,
- "%s: Wrote 0x%02x into register %02x:%02x\n",
- __func__, (u8)value, (u8)AB8500_AUDIO, (u8)reg);
+ struct device *dev = context;
- return status;
+ return abx500_set_register_interruptible(dev, AB8500_AUDIO,
+ reg, value);
}
+static const struct regmap_config ab8500_codec_regmap = {
+ .reg_read = ab8500_codec_read_reg,
+ .reg_write = ab8500_codec_write_reg,
+};
+
/*
* Controls - DAPM
*/
@@ -2312,17 +2300,17 @@ static int ab8500_codec_set_dai_tdm_slot(struct snd_soc_dai *dai,
case 0:
break;
case 1:
- slot = find_first_bit((unsigned long *)&tx_mask, 32);
+ slot = ffs(tx_mask);
snd_soc_update_bits(codec, AB8500_DASLOTCONF1, mask, slot);
snd_soc_update_bits(codec, AB8500_DASLOTCONF3, mask, slot);
snd_soc_update_bits(codec, AB8500_DASLOTCONF2, mask, slot);
snd_soc_update_bits(codec, AB8500_DASLOTCONF4, mask, slot);
break;
case 2:
- slot = find_first_bit((unsigned long *)&tx_mask, 32);
+ slot = ffs(tx_mask);
snd_soc_update_bits(codec, AB8500_DASLOTCONF1, mask, slot);
snd_soc_update_bits(codec, AB8500_DASLOTCONF3, mask, slot);
- slot = find_next_bit((unsigned long *)&tx_mask, 32, slot + 1);
+ slot = fls(tx_mask);
snd_soc_update_bits(codec, AB8500_DASLOTCONF2, mask, slot);
snd_soc_update_bits(codec, AB8500_DASLOTCONF4, mask, slot);
break;
@@ -2353,18 +2341,18 @@ static int ab8500_codec_set_dai_tdm_slot(struct snd_soc_dai *dai,
case 0:
break;
case 1:
- slot = find_first_bit((unsigned long *)&rx_mask, 32);
+ slot = ffs(rx_mask);
snd_soc_update_bits(codec, AB8500_ADSLOTSEL(slot),
AB8500_MASK_SLOT(slot),
AB8500_ADSLOTSELX_AD_OUT_TO_SLOT(AB8500_AD_OUT3, slot));
break;
case 2:
- slot = find_first_bit((unsigned long *)&rx_mask, 32);
+ slot = ffs(rx_mask);
snd_soc_update_bits(codec,
AB8500_ADSLOTSEL(slot),
AB8500_MASK_SLOT(slot),
AB8500_ADSLOTSELX_AD_OUT_TO_SLOT(AB8500_AD_OUT3, slot));
- slot = find_next_bit((unsigned long *)&rx_mask, 32, slot + 1);
+ slot = fls(rx_mask);
snd_soc_update_bits(codec,
AB8500_ADSLOTSEL(slot),
AB8500_MASK_SLOT(slot),
@@ -2485,9 +2473,13 @@ static int ab8500_codec_probe(struct snd_soc_codec *codec)
dev_dbg(dev, "%s: Enter.\n", __func__);
+ snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
+
/* Setup AB8500 according to board-settings */
pdata = dev_get_platdata(dev->parent);
+ codec->control_data = drvdata->regmap;
+
if (np) {
if (!pdata)
pdata = devm_kzalloc(dev,
@@ -2532,12 +2524,10 @@ static int ab8500_codec_probe(struct snd_soc_codec *codec)
}
/* Override HW-defaults */
- ab8500_codec_write_reg(codec,
- AB8500_ANACONF5,
- BIT(AB8500_ANACONF5_HSAUTOEN));
- ab8500_codec_write_reg(codec,
- AB8500_SHORTCIRCONF,
- BIT(AB8500_SHORTCIRCONF_HSZCDDIS));
+ snd_soc_write(codec, AB8500_ANACONF5,
+ BIT(AB8500_ANACONF5_HSAUTOEN));
+ snd_soc_write(codec, AB8500_SHORTCIRCONF,
+ BIT(AB8500_SHORTCIRCONF_HSZCDDIS));
/* Add filter controls */
status = snd_soc_add_codec_controls(codec, ab8500_filter_controls,
@@ -2567,9 +2557,6 @@ static int ab8500_codec_probe(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver ab8500_codec_driver = {
.probe = ab8500_codec_probe,
- .read = ab8500_codec_read_reg,
- .write = ab8500_codec_write_reg,
- .reg_word_size = sizeof(u8),
.controls = ab8500_ctrls,
.num_controls = ARRAY_SIZE(ab8500_ctrls),
.dapm_widgets = ab8500_dapm_widgets,
@@ -2588,10 +2575,21 @@ static int ab8500_codec_driver_probe(struct platform_device *pdev)
/* Create driver private-data struct */
drvdata = devm_kzalloc(&pdev->dev, sizeof(struct ab8500_codec_drvdata),
GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
drvdata->sid_status = SID_UNCONFIGURED;
drvdata->anc_status = ANC_UNCONFIGURED;
dev_set_drvdata(&pdev->dev, drvdata);
+ drvdata->regmap = devm_regmap_init(&pdev->dev, NULL, &pdev->dev,
+ &ab8500_codec_regmap);
+ if (IS_ERR(drvdata->regmap)) {
+ status = PTR_ERR(drvdata->regmap);
+ dev_err(&pdev->dev, "%s: Failed to allocate regmap: %d\n",
+ __func__, status);
+ return status;
+ }
+
dev_dbg(&pdev->dev, "%s: Register codec.\n", __func__);
status = snd_soc_register_codec(&pdev->dev, &ab8500_codec_driver,
ab8500_codec_dai,
@@ -2606,7 +2604,7 @@ static int ab8500_codec_driver_probe(struct platform_device *pdev)
static int ab8500_codec_driver_remove(struct platform_device *pdev)
{
- dev_info(&pdev->dev, "%s Enter.\n", __func__);
+ dev_dbg(&pdev->dev, "%s Enter.\n", __func__);
snd_soc_unregister_codec(&pdev->dev);
diff --git a/sound/soc/codecs/adau1373.c b/sound/soc/codecs/adau1373.c
index 1aa10ddf3a61..59654b1e7f3f 100644
--- a/sound/soc/codecs/adau1373.c
+++ b/sound/soc/codecs/adau1373.c
@@ -32,6 +32,7 @@ struct adau1373_dai {
};
struct adau1373 {
+ struct regmap *regmap;
struct adau1373_dai dais[3];
};
@@ -73,7 +74,6 @@ struct adau1373 {
#define ADAU1373_PLL_CTRL4(x) (0x2c + (x) * 7)
#define ADAU1373_PLL_CTRL5(x) (0x2d + (x) * 7)
#define ADAU1373_PLL_CTRL6(x) (0x2e + (x) * 7)
-#define ADAU1373_PLL_CTRL7(x) (0x2f + (x) * 7)
#define ADAU1373_HEADDECT 0x36
#define ADAU1373_ADC_DAC_STATUS 0x37
#define ADAU1373_ADC_CTRL 0x3c
@@ -152,37 +152,172 @@ struct adau1373 {
#define ADAU1373_EP_CTRL_MICBIAS1_OFFSET 4
#define ADAU1373_EP_CTRL_MICBIAS2_OFFSET 2
-static const uint8_t adau1373_default_regs[] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, /* 0x30 */
- 0x00, 0x00, 0x00, 0x80, 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x0a, 0x0a, 0x0a, 0x00, /* 0x40 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, /* 0x50 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x78, 0x18, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, /* 0x80 */
- 0x00, 0xc0, 0x88, 0x7a, 0xdf, 0x20, 0x00, 0x00,
- 0x78, 0x18, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, /* 0x90 */
- 0x00, 0xc0, 0x88, 0x7a, 0xdf, 0x20, 0x00, 0x00,
- 0x78, 0x18, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, /* 0xa0 */
- 0x00, 0xc0, 0x88, 0x7a, 0xdf, 0x20, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
- 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, /* 0xe0 */
- 0x00, 0x1f, 0x0f, 0x00, 0x00,
+static const struct reg_default adau1373_reg_defaults[] = {
+ { ADAU1373_INPUT_MODE, 0x00 },
+ { ADAU1373_AINL_CTRL(0), 0x00 },
+ { ADAU1373_AINR_CTRL(0), 0x00 },
+ { ADAU1373_AINL_CTRL(1), 0x00 },
+ { ADAU1373_AINR_CTRL(1), 0x00 },
+ { ADAU1373_AINL_CTRL(2), 0x00 },
+ { ADAU1373_AINR_CTRL(2), 0x00 },
+ { ADAU1373_AINL_CTRL(3), 0x00 },
+ { ADAU1373_AINR_CTRL(3), 0x00 },
+ { ADAU1373_LLINE_OUT(0), 0x00 },
+ { ADAU1373_RLINE_OUT(0), 0x00 },
+ { ADAU1373_LLINE_OUT(1), 0x00 },
+ { ADAU1373_RLINE_OUT(1), 0x00 },
+ { ADAU1373_LSPK_OUT, 0x00 },
+ { ADAU1373_RSPK_OUT, 0x00 },
+ { ADAU1373_LHP_OUT, 0x00 },
+ { ADAU1373_RHP_OUT, 0x00 },
+ { ADAU1373_ADC_GAIN, 0x00 },
+ { ADAU1373_LADC_MIXER, 0x00 },
+ { ADAU1373_RADC_MIXER, 0x00 },
+ { ADAU1373_LLINE1_MIX, 0x00 },
+ { ADAU1373_RLINE1_MIX, 0x00 },
+ { ADAU1373_LLINE2_MIX, 0x00 },
+ { ADAU1373_RLINE2_MIX, 0x00 },
+ { ADAU1373_LSPK_MIX, 0x00 },
+ { ADAU1373_RSPK_MIX, 0x00 },
+ { ADAU1373_LHP_MIX, 0x00 },
+ { ADAU1373_RHP_MIX, 0x00 },
+ { ADAU1373_EP_MIX, 0x00 },
+ { ADAU1373_HP_CTRL, 0x00 },
+ { ADAU1373_HP_CTRL2, 0x00 },
+ { ADAU1373_LS_CTRL, 0x00 },
+ { ADAU1373_EP_CTRL, 0x00 },
+ { ADAU1373_MICBIAS_CTRL1, 0x00 },
+ { ADAU1373_MICBIAS_CTRL2, 0x00 },
+ { ADAU1373_OUTPUT_CTRL, 0x00 },
+ { ADAU1373_PWDN_CTRL1, 0x00 },
+ { ADAU1373_PWDN_CTRL2, 0x00 },
+ { ADAU1373_PWDN_CTRL3, 0x00 },
+ { ADAU1373_DPLL_CTRL(0), 0x00 },
+ { ADAU1373_PLL_CTRL1(0), 0x00 },
+ { ADAU1373_PLL_CTRL2(0), 0x00 },
+ { ADAU1373_PLL_CTRL3(0), 0x00 },
+ { ADAU1373_PLL_CTRL4(0), 0x00 },
+ { ADAU1373_PLL_CTRL5(0), 0x00 },
+ { ADAU1373_PLL_CTRL6(0), 0x02 },
+ { ADAU1373_DPLL_CTRL(1), 0x00 },
+ { ADAU1373_PLL_CTRL1(1), 0x00 },
+ { ADAU1373_PLL_CTRL2(1), 0x00 },
+ { ADAU1373_PLL_CTRL3(1), 0x00 },
+ { ADAU1373_PLL_CTRL4(1), 0x00 },
+ { ADAU1373_PLL_CTRL5(1), 0x00 },
+ { ADAU1373_PLL_CTRL6(1), 0x02 },
+ { ADAU1373_HEADDECT, 0x00 },
+ { ADAU1373_ADC_CTRL, 0x00 },
+ { ADAU1373_CLK_SRC_DIV(0), 0x00 },
+ { ADAU1373_CLK_SRC_DIV(1), 0x00 },
+ { ADAU1373_DAI(0), 0x0a },
+ { ADAU1373_DAI(1), 0x0a },
+ { ADAU1373_DAI(2), 0x0a },
+ { ADAU1373_BCLKDIV(0), 0x00 },
+ { ADAU1373_BCLKDIV(1), 0x00 },
+ { ADAU1373_BCLKDIV(2), 0x00 },
+ { ADAU1373_SRC_RATIOA(0), 0x00 },
+ { ADAU1373_SRC_RATIOB(0), 0x00 },
+ { ADAU1373_SRC_RATIOA(1), 0x00 },
+ { ADAU1373_SRC_RATIOB(1), 0x00 },
+ { ADAU1373_SRC_RATIOA(2), 0x00 },
+ { ADAU1373_SRC_RATIOB(2), 0x00 },
+ { ADAU1373_DEEMP_CTRL, 0x00 },
+ { ADAU1373_SRC_DAI_CTRL(0), 0x08 },
+ { ADAU1373_SRC_DAI_CTRL(1), 0x08 },
+ { ADAU1373_SRC_DAI_CTRL(2), 0x08 },
+ { ADAU1373_DIN_MIX_CTRL(0), 0x00 },
+ { ADAU1373_DIN_MIX_CTRL(1), 0x00 },
+ { ADAU1373_DIN_MIX_CTRL(2), 0x00 },
+ { ADAU1373_DIN_MIX_CTRL(3), 0x00 },
+ { ADAU1373_DIN_MIX_CTRL(4), 0x00 },
+ { ADAU1373_DOUT_MIX_CTRL(0), 0x00 },
+ { ADAU1373_DOUT_MIX_CTRL(1), 0x00 },
+ { ADAU1373_DOUT_MIX_CTRL(2), 0x00 },
+ { ADAU1373_DOUT_MIX_CTRL(3), 0x00 },
+ { ADAU1373_DOUT_MIX_CTRL(4), 0x00 },
+ { ADAU1373_DAI_PBL_VOL(0), 0x00 },
+ { ADAU1373_DAI_PBR_VOL(0), 0x00 },
+ { ADAU1373_DAI_PBL_VOL(1), 0x00 },
+ { ADAU1373_DAI_PBR_VOL(1), 0x00 },
+ { ADAU1373_DAI_PBL_VOL(2), 0x00 },
+ { ADAU1373_DAI_PBR_VOL(2), 0x00 },
+ { ADAU1373_DAI_RECL_VOL(0), 0x00 },
+ { ADAU1373_DAI_RECR_VOL(0), 0x00 },
+ { ADAU1373_DAI_RECL_VOL(1), 0x00 },
+ { ADAU1373_DAI_RECR_VOL(1), 0x00 },
+ { ADAU1373_DAI_RECL_VOL(2), 0x00 },
+ { ADAU1373_DAI_RECR_VOL(2), 0x00 },
+ { ADAU1373_DAC1_PBL_VOL, 0x00 },
+ { ADAU1373_DAC1_PBR_VOL, 0x00 },
+ { ADAU1373_DAC2_PBL_VOL, 0x00 },
+ { ADAU1373_DAC2_PBR_VOL, 0x00 },
+ { ADAU1373_ADC_RECL_VOL, 0x00 },
+ { ADAU1373_ADC_RECR_VOL, 0x00 },
+ { ADAU1373_DMIC_RECL_VOL, 0x00 },
+ { ADAU1373_DMIC_RECR_VOL, 0x00 },
+ { ADAU1373_VOL_GAIN1, 0x00 },
+ { ADAU1373_VOL_GAIN2, 0x00 },
+ { ADAU1373_VOL_GAIN3, 0x00 },
+ { ADAU1373_HPF_CTRL, 0x00 },
+ { ADAU1373_BASS1, 0x00 },
+ { ADAU1373_BASS2, 0x00 },
+ { ADAU1373_DRC(0) + 0x0, 0x78 },
+ { ADAU1373_DRC(0) + 0x1, 0x18 },
+ { ADAU1373_DRC(0) + 0x2, 0x00 },
+ { ADAU1373_DRC(0) + 0x3, 0x00 },
+ { ADAU1373_DRC(0) + 0x4, 0x00 },
+ { ADAU1373_DRC(0) + 0x5, 0xc0 },
+ { ADAU1373_DRC(0) + 0x6, 0x00 },
+ { ADAU1373_DRC(0) + 0x7, 0x00 },
+ { ADAU1373_DRC(0) + 0x8, 0x00 },
+ { ADAU1373_DRC(0) + 0x9, 0xc0 },
+ { ADAU1373_DRC(0) + 0xa, 0x88 },
+ { ADAU1373_DRC(0) + 0xb, 0x7a },
+ { ADAU1373_DRC(0) + 0xc, 0xdf },
+ { ADAU1373_DRC(0) + 0xd, 0x20 },
+ { ADAU1373_DRC(0) + 0xe, 0x00 },
+ { ADAU1373_DRC(0) + 0xf, 0x00 },
+ { ADAU1373_DRC(1) + 0x0, 0x78 },
+ { ADAU1373_DRC(1) + 0x1, 0x18 },
+ { ADAU1373_DRC(1) + 0x2, 0x00 },
+ { ADAU1373_DRC(1) + 0x3, 0x00 },
+ { ADAU1373_DRC(1) + 0x4, 0x00 },
+ { ADAU1373_DRC(1) + 0x5, 0xc0 },
+ { ADAU1373_DRC(1) + 0x6, 0x00 },
+ { ADAU1373_DRC(1) + 0x7, 0x00 },
+ { ADAU1373_DRC(1) + 0x8, 0x00 },
+ { ADAU1373_DRC(1) + 0x9, 0xc0 },
+ { ADAU1373_DRC(1) + 0xa, 0x88 },
+ { ADAU1373_DRC(1) + 0xb, 0x7a },
+ { ADAU1373_DRC(1) + 0xc, 0xdf },
+ { ADAU1373_DRC(1) + 0xd, 0x20 },
+ { ADAU1373_DRC(1) + 0xe, 0x00 },
+ { ADAU1373_DRC(1) + 0xf, 0x00 },
+ { ADAU1373_DRC(2) + 0x0, 0x78 },
+ { ADAU1373_DRC(2) + 0x1, 0x18 },
+ { ADAU1373_DRC(2) + 0x2, 0x00 },
+ { ADAU1373_DRC(2) + 0x3, 0x00 },
+ { ADAU1373_DRC(2) + 0x4, 0x00 },
+ { ADAU1373_DRC(2) + 0x5, 0xc0 },
+ { ADAU1373_DRC(2) + 0x6, 0x00 },
+ { ADAU1373_DRC(2) + 0x7, 0x00 },
+ { ADAU1373_DRC(2) + 0x8, 0x00 },
+ { ADAU1373_DRC(2) + 0x9, 0xc0 },
+ { ADAU1373_DRC(2) + 0xa, 0x88 },
+ { ADAU1373_DRC(2) + 0xb, 0x7a },
+ { ADAU1373_DRC(2) + 0xc, 0xdf },
+ { ADAU1373_DRC(2) + 0xd, 0x20 },
+ { ADAU1373_DRC(2) + 0xe, 0x00 },
+ { ADAU1373_DRC(2) + 0xf, 0x00 },
+ { ADAU1373_3D_CTRL1, 0x00 },
+ { ADAU1373_3D_CTRL2, 0x00 },
+ { ADAU1373_FDSP_SEL1, 0x00 },
+ { ADAU1373_FDSP_SEL2, 0x00 },
+ { ADAU1373_FDSP_SEL2, 0x00 },
+ { ADAU1373_FDSP_SEL4, 0x00 },
+ { ADAU1373_DIGMICCTRL, 0x00 },
+ { ADAU1373_DIGEN, 0x00 },
};
static const unsigned int adau1373_out_tlv[] = {
@@ -418,6 +553,7 @@ static int adau1373_pll_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
+ struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
unsigned int pll_id = w->name[3] - '1';
unsigned int val;
@@ -426,7 +562,7 @@ static int adau1373_pll_event(struct snd_soc_dapm_widget *w,
else
val = 0;
- snd_soc_update_bits(codec, ADAU1373_PLL_CTRL6(pll_id),
+ regmap_update_bits(adau1373->regmap, ADAU1373_PLL_CTRL6(pll_id),
ADAU1373_PLL_CTRL6_PLL_EN, val);
if (SND_SOC_DAPM_EVENT_ON(event))
@@ -938,7 +1074,7 @@ static int adau1373_hw_params(struct snd_pcm_substream *substream,
adau1373_dai->enable_src = (div != 0);
- snd_soc_update_bits(codec, ADAU1373_BCLKDIV(dai->id),
+ regmap_update_bits(adau1373->regmap, ADAU1373_BCLKDIV(dai->id),
ADAU1373_BCLKDIV_SR_MASK | ADAU1373_BCLKDIV_BCLK_MASK,
(div << 2) | ADAU1373_BCLKDIV_64);
@@ -959,7 +1095,7 @@ static int adau1373_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- return snd_soc_update_bits(codec, ADAU1373_DAI(dai->id),
+ return regmap_update_bits(adau1373->regmap, ADAU1373_DAI(dai->id),
ADAU1373_DAI_WLEN_MASK, ctrl);
}
@@ -1016,7 +1152,7 @@ static int adau1373_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return -EINVAL;
}
- snd_soc_update_bits(codec, ADAU1373_DAI(dai->id),
+ regmap_update_bits(adau1373->regmap, ADAU1373_DAI(dai->id),
~ADAU1373_DAI_WLEN_MASK, ctrl);
return 0;
@@ -1039,7 +1175,7 @@ static int adau1373_set_dai_sysclk(struct snd_soc_dai *dai,
adau1373_dai->sysclk = freq;
adau1373_dai->clk_src = clk_id;
- snd_soc_update_bits(dai->codec, ADAU1373_BCLKDIV(dai->id),
+ regmap_update_bits(adau1373->regmap, ADAU1373_BCLKDIV(dai->id),
ADAU1373_BCLKDIV_SOURCE, clk_id << 5);
return 0;
@@ -1120,6 +1256,7 @@ static struct snd_soc_dai_driver adau1373_dai_driver[] = {
static int adau1373_set_pll(struct snd_soc_codec *codec, int pll_id,
int source, unsigned int freq_in, unsigned int freq_out)
{
+ struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
unsigned int dpll_div = 0;
unsigned int x, r, n, m, i, j, mode;
@@ -1187,36 +1324,36 @@ static int adau1373_set_pll(struct snd_soc_codec *codec, int pll_id,
if (dpll_div) {
dpll_div = 11 - dpll_div;
- snd_soc_update_bits(codec, ADAU1373_PLL_CTRL6(pll_id),
+ regmap_update_bits(adau1373->regmap, ADAU1373_PLL_CTRL6(pll_id),
ADAU1373_PLL_CTRL6_DPLL_BYPASS, 0);
} else {
- snd_soc_update_bits(codec, ADAU1373_PLL_CTRL6(pll_id),
+ regmap_update_bits(adau1373->regmap, ADAU1373_PLL_CTRL6(pll_id),
ADAU1373_PLL_CTRL6_DPLL_BYPASS,
ADAU1373_PLL_CTRL6_DPLL_BYPASS);
}
- snd_soc_write(codec, ADAU1373_DPLL_CTRL(pll_id),
+ regmap_write(adau1373->regmap, ADAU1373_DPLL_CTRL(pll_id),
(source << 4) | dpll_div);
- snd_soc_write(codec, ADAU1373_PLL_CTRL1(pll_id), (m >> 8) & 0xff);
- snd_soc_write(codec, ADAU1373_PLL_CTRL2(pll_id), m & 0xff);
- snd_soc_write(codec, ADAU1373_PLL_CTRL3(pll_id), (n >> 8) & 0xff);
- snd_soc_write(codec, ADAU1373_PLL_CTRL4(pll_id), n & 0xff);
- snd_soc_write(codec, ADAU1373_PLL_CTRL5(pll_id),
+ regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL1(pll_id), (m >> 8) & 0xff);
+ regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL2(pll_id), m & 0xff);
+ regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL3(pll_id), (n >> 8) & 0xff);
+ regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL4(pll_id), n & 0xff);
+ regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL5(pll_id),
(r << 3) | (x << 1) | mode);
/* Set sysclk to pll_rate / 4 */
- snd_soc_update_bits(codec, ADAU1373_CLK_SRC_DIV(pll_id), 0x3f, 0x09);
+ regmap_update_bits(adau1373->regmap, ADAU1373_CLK_SRC_DIV(pll_id), 0x3f, 0x09);
return 0;
}
-static void adau1373_load_drc_settings(struct snd_soc_codec *codec,
+static void adau1373_load_drc_settings(struct adau1373 *adau1373,
unsigned int nr, uint8_t *drc)
{
unsigned int i;
for (i = 0; i < ADAU1373_DRC_SIZE; ++i)
- snd_soc_write(codec, ADAU1373_DRC(nr) + i, drc[i]);
+ regmap_write(adau1373->regmap, ADAU1373_DRC(nr) + i, drc[i]);
}
static bool adau1373_valid_micbias(enum adau1373_micbias_voltage micbias)
@@ -1235,13 +1372,14 @@ static bool adau1373_valid_micbias(enum adau1373_micbias_voltage micbias)
static int adau1373_probe(struct snd_soc_codec *codec)
{
+ struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
struct adau1373_platform_data *pdata = codec->dev->platform_data;
bool lineout_differential = false;
unsigned int val;
int ret;
int i;
- ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_I2C);
+ ret = snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
if (ret) {
dev_err(codec->dev, "failed to set cache I/O: %d\n", ret);
return ret;
@@ -1256,7 +1394,7 @@ static int adau1373_probe(struct snd_soc_codec *codec)
return -EINVAL;
for (i = 0; i < pdata->num_drc; ++i) {
- adau1373_load_drc_settings(codec, i,
+ adau1373_load_drc_settings(adau1373, i,
pdata->drc_setting[i]);
}
@@ -1268,18 +1406,18 @@ static int adau1373_probe(struct snd_soc_codec *codec)
if (pdata->input_differential[i])
val |= BIT(i);
}
- snd_soc_write(codec, ADAU1373_INPUT_MODE, val);
+ regmap_write(adau1373->regmap, ADAU1373_INPUT_MODE, val);
val = 0;
if (pdata->lineout_differential)
val |= ADAU1373_OUTPUT_CTRL_LDIFF;
if (pdata->lineout_ground_sense)
val |= ADAU1373_OUTPUT_CTRL_LNFBEN;
- snd_soc_write(codec, ADAU1373_OUTPUT_CTRL, val);
+ regmap_write(adau1373->regmap, ADAU1373_OUTPUT_CTRL, val);
lineout_differential = pdata->lineout_differential;
- snd_soc_write(codec, ADAU1373_EP_CTRL,
+ regmap_write(adau1373->regmap, ADAU1373_EP_CTRL,
(pdata->micbias1 << ADAU1373_EP_CTRL_MICBIAS1_OFFSET) |
(pdata->micbias2 << ADAU1373_EP_CTRL_MICBIAS2_OFFSET));
}
@@ -1289,7 +1427,7 @@ static int adau1373_probe(struct snd_soc_codec *codec)
ARRAY_SIZE(adau1373_lineout2_controls));
}
- snd_soc_write(codec, ADAU1373_ADC_CTRL,
+ regmap_write(adau1373->regmap, ADAU1373_ADC_CTRL,
ADAU1373_ADC_CTRL_RESET_FORCE | ADAU1373_ADC_CTRL_PEAK_DETECT);
return 0;
@@ -1298,17 +1436,19 @@ static int adau1373_probe(struct snd_soc_codec *codec)
static int adau1373_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
+ struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
+
switch (level) {
case SND_SOC_BIAS_ON:
break;
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
- snd_soc_update_bits(codec, ADAU1373_PWDN_CTRL3,
+ regmap_update_bits(adau1373->regmap, ADAU1373_PWDN_CTRL3,
ADAU1373_PWDN_CTRL3_PWR_EN, ADAU1373_PWDN_CTRL3_PWR_EN);
break;
case SND_SOC_BIAS_OFF:
- snd_soc_update_bits(codec, ADAU1373_PWDN_CTRL3,
+ regmap_update_bits(adau1373->regmap, ADAU1373_PWDN_CTRL3,
ADAU1373_PWDN_CTRL3_PWR_EN, 0);
break;
}
@@ -1324,17 +1464,49 @@ static int adau1373_remove(struct snd_soc_codec *codec)
static int adau1373_suspend(struct snd_soc_codec *codec)
{
- return adau1373_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ ret = adau1373_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ regcache_cache_only(adau1373->regmap, true);
+
+ return ret;
}
static int adau1373_resume(struct snd_soc_codec *codec)
{
+ struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
+
+ regcache_cache_only(adau1373->regmap, false);
adau1373_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- snd_soc_cache_sync(codec);
+ regcache_sync(adau1373->regmap);
return 0;
}
+static bool adau1373_register_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ADAU1373_SOFT_RESET:
+ case ADAU1373_ADC_DAC_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config adau1373_regmap_config = {
+ .val_bits = 8,
+ .reg_bits = 8,
+
+ .volatile_reg = adau1373_register_volatile,
+ .max_register = ADAU1373_SOFT_RESET,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = adau1373_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(adau1373_reg_defaults),
+};
+
static struct snd_soc_codec_driver adau1373_codec_driver = {
.probe = adau1373_probe,
.remove = adau1373_remove,
@@ -1342,9 +1514,6 @@ static struct snd_soc_codec_driver adau1373_codec_driver = {
.resume = adau1373_resume,
.set_bias_level = adau1373_set_bias_level,
.idle_bias_off = true,
- .reg_cache_size = ARRAY_SIZE(adau1373_default_regs),
- .reg_cache_default = adau1373_default_regs,
- .reg_word_size = sizeof(uint8_t),
.set_pll = adau1373_set_pll,
@@ -1366,6 +1535,13 @@ static int adau1373_i2c_probe(struct i2c_client *client,
if (!adau1373)
return -ENOMEM;
+ adau1373->regmap = devm_regmap_init_i2c(client,
+ &adau1373_regmap_config);
+ if (IS_ERR(adau1373->regmap))
+ return PTR_ERR(adau1373->regmap);
+
+ regmap_write(adau1373->regmap, ADAU1373_SOFT_RESET, 0x00);
+
dev_set_drvdata(&client->dev, adau1373);
ret = snd_soc_register_codec(&client->dev, &adau1373_codec_driver,
diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
index 15b012d0f226..14a7c169d004 100644
--- a/sound/soc/codecs/adav80x.c
+++ b/sound/soc/codecs/adav80x.c
@@ -115,22 +115,34 @@
#define ADAV80X_PLL_OUTE_SYSCLKPD(x) BIT(2 - (x))
-static u8 adav80x_default_regs[] = {
- 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x02, 0x01, 0x80, 0x26, 0x00, 0x00,
- 0x02, 0x40, 0x20, 0x00, 0x09, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x04, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd1, 0x92, 0xb1, 0x37,
- 0x48, 0xd2, 0xfb, 0xca, 0xd2, 0x15, 0xe8, 0x29, 0xb9, 0x6a, 0xda, 0x2b,
- 0xb7, 0xc0, 0x11, 0x65, 0x5c, 0xf6, 0xff, 0x8d, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa5, 0x00, 0x00,
- 0x00, 0xe8, 0x46, 0xe1, 0x5b, 0xd3, 0x43, 0x77, 0x93, 0xa7, 0x44, 0xee,
- 0x32, 0x12, 0xc0, 0x11, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3f, 0x3f,
- 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x1d, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x52, 0x00,
+static struct reg_default adav80x_reg_defaults[] = {
+ { ADAV80X_PLAYBACK_CTRL, 0x01 },
+ { ADAV80X_AUX_IN_CTRL, 0x01 },
+ { ADAV80X_REC_CTRL, 0x02 },
+ { ADAV80X_AUX_OUT_CTRL, 0x01 },
+ { ADAV80X_DPATH_CTRL1, 0xc0 },
+ { ADAV80X_DPATH_CTRL2, 0x11 },
+ { ADAV80X_DAC_CTRL1, 0x00 },
+ { ADAV80X_DAC_CTRL2, 0x00 },
+ { ADAV80X_DAC_CTRL3, 0x00 },
+ { ADAV80X_DAC_L_VOL, 0xff },
+ { ADAV80X_DAC_R_VOL, 0xff },
+ { ADAV80X_PGA_L_VOL, 0x00 },
+ { ADAV80X_PGA_R_VOL, 0x00 },
+ { ADAV80X_ADC_CTRL1, 0x00 },
+ { ADAV80X_ADC_CTRL2, 0x00 },
+ { ADAV80X_ADC_L_VOL, 0xff },
+ { ADAV80X_ADC_R_VOL, 0xff },
+ { ADAV80X_PLL_CTRL1, 0x00 },
+ { ADAV80X_PLL_CTRL2, 0x00 },
+ { ADAV80X_ICLK_CTRL1, 0x00 },
+ { ADAV80X_ICLK_CTRL2, 0x00 },
+ { ADAV80X_PLL_CLK_SRC, 0x00 },
+ { ADAV80X_PLL_OUTE, 0x00 },
};
struct adav80x {
- enum snd_soc_control_type control_type;
+ struct regmap *regmap;
enum adav80x_clk_src clk_src;
unsigned int sysclk;
@@ -298,7 +310,7 @@ static int adav80x_set_deemph(struct snd_soc_codec *codec)
val = ADAV80X_DAC_CTRL2_DEEMPH_NONE;
}
- return snd_soc_update_bits(codec, ADAV80X_DAC_CTRL2,
+ return regmap_update_bits(adav80x->regmap, ADAV80X_DAC_CTRL2,
ADAV80X_DAC_CTRL2_DEEMPH_MASK, val);
}
@@ -394,10 +406,11 @@ static int adav80x_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return -EINVAL;
}
- snd_soc_update_bits(codec, adav80x_port_ctrl_regs[dai->id][0],
+ regmap_update_bits(adav80x->regmap, adav80x_port_ctrl_regs[dai->id][0],
ADAV80X_CAPTURE_MODE_MASK | ADAV80X_CAPTURE_MODE_MASTER,
capture);
- snd_soc_write(codec, adav80x_port_ctrl_regs[dai->id][1], playback);
+ regmap_write(adav80x->regmap, adav80x_port_ctrl_regs[dai->id][1],
+ playback);
adav80x->dai_fmt[dai->id] = fmt & SND_SOC_DAIFMT_FORMAT_MASK;
@@ -407,6 +420,7 @@ static int adav80x_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
static int adav80x_set_adc_clock(struct snd_soc_codec *codec,
unsigned int sample_rate)
{
+ struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
unsigned int val;
if (sample_rate <= 48000)
@@ -414,7 +428,7 @@ static int adav80x_set_adc_clock(struct snd_soc_codec *codec,
else
val = ADAV80X_ADC_CTRL1_MODULATOR_64FS;
- snd_soc_update_bits(codec, ADAV80X_ADC_CTRL1,
+ regmap_update_bits(adav80x->regmap, ADAV80X_ADC_CTRL1,
ADAV80X_ADC_CTRL1_MODULATOR_MASK, val);
return 0;
@@ -423,6 +437,7 @@ static int adav80x_set_adc_clock(struct snd_soc_codec *codec,
static int adav80x_set_dac_clock(struct snd_soc_codec *codec,
unsigned int sample_rate)
{
+ struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
unsigned int val;
if (sample_rate <= 48000)
@@ -430,7 +445,7 @@ static int adav80x_set_dac_clock(struct snd_soc_codec *codec,
else
val = ADAV80X_DAC_CTRL2_DIV2 | ADAV80X_DAC_CTRL2_INTERPOL_128FS;
- snd_soc_update_bits(codec, ADAV80X_DAC_CTRL2,
+ regmap_update_bits(adav80x->regmap, ADAV80X_DAC_CTRL2,
ADAV80X_DAC_CTRL2_DIV_MASK | ADAV80X_DAC_CTRL2_INTERPOL_MASK,
val);
@@ -440,6 +455,7 @@ static int adav80x_set_dac_clock(struct snd_soc_codec *codec,
static int adav80x_set_capture_pcm_format(struct snd_soc_codec *codec,
struct snd_soc_dai *dai, snd_pcm_format_t format)
{
+ struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
unsigned int val;
switch (format) {
@@ -459,7 +475,7 @@ static int adav80x_set_capture_pcm_format(struct snd_soc_codec *codec,
return -EINVAL;
}
- snd_soc_update_bits(codec, adav80x_port_ctrl_regs[dai->id][0],
+ regmap_update_bits(adav80x->regmap, adav80x_port_ctrl_regs[dai->id][0],
ADAV80X_CAPTURE_WORD_LEN_MASK, val);
return 0;
@@ -491,7 +507,7 @@ static int adav80x_set_playback_pcm_format(struct snd_soc_codec *codec,
return -EINVAL;
}
- snd_soc_update_bits(codec, adav80x_port_ctrl_regs[dai->id][1],
+ regmap_update_bits(adav80x->regmap, adav80x_port_ctrl_regs[dai->id][1],
ADAV80X_PLAYBACK_MODE_MASK, val);
return 0;
@@ -554,8 +570,10 @@ static int adav80x_set_sysclk(struct snd_soc_codec *codec,
ADAV80X_ICLK_CTRL1_ICLK2_SRC(clk_id);
iclk_ctrl2 = ADAV80X_ICLK_CTRL2_ICLK1_SRC(clk_id);
- snd_soc_write(codec, ADAV80X_ICLK_CTRL1, iclk_ctrl1);
- snd_soc_write(codec, ADAV80X_ICLK_CTRL2, iclk_ctrl2);
+ regmap_write(adav80x->regmap, ADAV80X_ICLK_CTRL1,
+ iclk_ctrl1);
+ regmap_write(adav80x->regmap, ADAV80X_ICLK_CTRL2,
+ iclk_ctrl2);
snd_soc_dapm_sync(&codec->dapm);
}
@@ -575,10 +593,12 @@ static int adav80x_set_sysclk(struct snd_soc_codec *codec,
mask = ADAV80X_PLL_OUTE_SYSCLKPD(clk_id);
if (freq == 0) {
- snd_soc_update_bits(codec, ADAV80X_PLL_OUTE, mask, mask);
+ regmap_update_bits(adav80x->regmap, ADAV80X_PLL_OUTE,
+ mask, mask);
adav80x->sysclk_pd[clk_id] = true;
} else {
- snd_soc_update_bits(codec, ADAV80X_PLL_OUTE, mask, 0);
+ regmap_update_bits(adav80x->regmap, ADAV80X_PLL_OUTE,
+ mask, 0);
adav80x->sysclk_pd[clk_id] = false;
}
@@ -650,9 +670,9 @@ static int adav80x_set_pll(struct snd_soc_codec *codec, int pll_id,
return -EINVAL;
}
- snd_soc_update_bits(codec, ADAV80X_PLL_CTRL1, ADAV80X_PLL_CTRL1_PLLDIV,
- pll_ctrl1);
- snd_soc_update_bits(codec, ADAV80X_PLL_CTRL2,
+ regmap_update_bits(adav80x->regmap, ADAV80X_PLL_CTRL1,
+ ADAV80X_PLL_CTRL1_PLLDIV, pll_ctrl1);
+ regmap_update_bits(adav80x->regmap, ADAV80X_PLL_CTRL2,
ADAV80X_PLL_CTRL2_PLL_MASK(pll_id), pll_ctrl2);
if (source != adav80x->pll_src) {
@@ -661,7 +681,7 @@ static int adav80x_set_pll(struct snd_soc_codec *codec, int pll_id,
else
pll_src = ADAV80X_PLL_CLK_SRC_PLL_XIN(pll_id);
- snd_soc_update_bits(codec, ADAV80X_PLL_CLK_SRC,
+ regmap_update_bits(adav80x->regmap, ADAV80X_PLL_CLK_SRC,
ADAV80X_PLL_CLK_SRC_PLL_MASK(pll_id), pll_src);
adav80x->pll_src = source;
@@ -675,6 +695,7 @@ static int adav80x_set_pll(struct snd_soc_codec *codec, int pll_id,
static int adav80x_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
+ struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
unsigned int mask = ADAV80X_DAC_CTRL1_PD;
switch (level) {
@@ -683,10 +704,12 @@ static int adav80x_set_bias_level(struct snd_soc_codec *codec,
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
- snd_soc_update_bits(codec, ADAV80X_DAC_CTRL1, mask, 0x00);
+ regmap_update_bits(adav80x->regmap, ADAV80X_DAC_CTRL1, mask,
+ 0x00);
break;
case SND_SOC_BIAS_OFF:
- snd_soc_update_bits(codec, ADAV80X_DAC_CTRL1, mask, mask);
+ regmap_update_bits(adav80x->regmap, ADAV80X_DAC_CTRL1, mask,
+ mask);
break;
}
@@ -780,7 +803,7 @@ static int adav80x_probe(struct snd_soc_codec *codec)
int ret;
struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
- ret = snd_soc_codec_set_cache_io(codec, 7, 9, adav80x->control_type);
+ ret = snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
if (ret) {
dev_err(codec->dev, "failed to set cache I/O: %d\n", ret);
return ret;
@@ -791,23 +814,31 @@ static int adav80x_probe(struct snd_soc_codec *codec)
snd_soc_dapm_force_enable_pin(&codec->dapm, "PLL2");
/* Power down S/PDIF receiver, since it is currently not supported */
- snd_soc_write(codec, ADAV80X_PLL_OUTE, 0x20);
+ regmap_write(adav80x->regmap, ADAV80X_PLL_OUTE, 0x20);
/* Disable DAC zero flag */
- snd_soc_write(codec, ADAV80X_DAC_CTRL3, 0x6);
+ regmap_write(adav80x->regmap, ADAV80X_DAC_CTRL3, 0x6);
return adav80x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
}
static int adav80x_suspend(struct snd_soc_codec *codec)
{
- return adav80x_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ ret = adav80x_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ regcache_cache_only(adav80x->regmap, true);
+
+ return ret;
}
static int adav80x_resume(struct snd_soc_codec *codec)
{
+ struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
+
+ regcache_cache_only(adav80x->regmap, false);
adav80x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- codec->cache_sync = 1;
- snd_soc_cache_sync(codec);
+ regcache_sync(adav80x->regmap);
return 0;
}
@@ -827,10 +858,6 @@ static struct snd_soc_codec_driver adav80x_codec_driver = {
.set_pll = adav80x_set_pll,
.set_sysclk = adav80x_set_sysclk,
- .reg_word_size = sizeof(u8),
- .reg_cache_size = ARRAY_SIZE(adav80x_default_regs),
- .reg_cache_default = adav80x_default_regs,
-
.controls = adav80x_controls,
.num_controls = ARRAY_SIZE(adav80x_controls),
.dapm_widgets = adav80x_dapm_widgets,
@@ -839,18 +866,21 @@ static struct snd_soc_codec_driver adav80x_codec_driver = {
.num_dapm_routes = ARRAY_SIZE(adav80x_dapm_routes),
};
-static int adav80x_bus_probe(struct device *dev,
- enum snd_soc_control_type control_type)
+static int adav80x_bus_probe(struct device *dev, struct regmap *regmap)
{
struct adav80x *adav80x;
int ret;
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
adav80x = kzalloc(sizeof(*adav80x), GFP_KERNEL);
if (!adav80x)
return -ENOMEM;
+
dev_set_drvdata(dev, adav80x);
- adav80x->control_type = control_type;
+ adav80x->regmap = regmap;
ret = snd_soc_register_codec(dev, &adav80x_codec_driver,
adav80x_dais, ARRAY_SIZE(adav80x_dais));
@@ -868,6 +898,19 @@ static int adav80x_bus_remove(struct device *dev)
}
#if defined(CONFIG_SPI_MASTER)
+static const struct regmap_config adav80x_spi_regmap_config = {
+ .val_bits = 8,
+ .pad_bits = 1,
+ .reg_bits = 7,
+ .read_flag_mask = 0x01,
+
+ .max_register = ADAV80X_PLL_OUTE,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = adav80x_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(adav80x_reg_defaults),
+};
+
static const struct spi_device_id adav80x_spi_id[] = {
{ "adav801", 0 },
{ }
@@ -876,7 +919,8 @@ MODULE_DEVICE_TABLE(spi, adav80x_spi_id);
static int adav80x_spi_probe(struct spi_device *spi)
{
- return adav80x_bus_probe(&spi->dev, SND_SOC_SPI);
+ return adav80x_bus_probe(&spi->dev,
+ devm_regmap_init_spi(spi, &adav80x_spi_regmap_config));
}
static int adav80x_spi_remove(struct spi_device *spi)
@@ -896,6 +940,18 @@ static struct spi_driver adav80x_spi_driver = {
#endif
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+static const struct regmap_config adav80x_i2c_regmap_config = {
+ .val_bits = 8,
+ .pad_bits = 1,
+ .reg_bits = 7,
+
+ .max_register = ADAV80X_PLL_OUTE,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = adav80x_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(adav80x_reg_defaults),
+};
+
static const struct i2c_device_id adav80x_i2c_id[] = {
{ "adav803", 0 },
{ }
@@ -905,7 +961,8 @@ MODULE_DEVICE_TABLE(i2c, adav80x_i2c_id);
static int adav80x_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- return adav80x_bus_probe(&client->dev, SND_SOC_I2C);
+ return adav80x_bus_probe(&client->dev,
+ devm_regmap_init_i2c(client, &adav80x_i2c_regmap_config));
}
static int adav80x_i2c_remove(struct i2c_client *client)
diff --git a/sound/soc/codecs/ak4104.c b/sound/soc/codecs/ak4104.c
index 71059c07ae7b..b4819dcd4f4d 100644
--- a/sound/soc/codecs/ak4104.c
+++ b/sound/soc/codecs/ak4104.c
@@ -45,8 +45,6 @@
#define AK4104_TX_TXE (1 << 0)
#define AK4104_TX_V (1 << 1)
-#define DRV_NAME "ak4104-codec"
-
struct ak4104_private {
struct regmap *regmap;
};
@@ -291,12 +289,19 @@ static const struct of_device_id ak4104_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ak4104_of_match);
+static const struct spi_device_id ak4104_id_table[] = {
+ { "ak4104", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ak4104_id_table);
+
static struct spi_driver ak4104_spi_driver = {
.driver = {
- .name = DRV_NAME,
+ .name = "ak4104",
.owner = THIS_MODULE,
.of_match_table = ak4104_of_match,
},
+ .id_table = ak4104_id_table,
.probe = ak4104_spi_probe,
.remove = ak4104_spi_remove,
};
diff --git a/sound/soc/codecs/ak4641.c b/sound/soc/codecs/ak4641.c
index 5f9af1fb76e8..49cc5f6d6dba 100644
--- a/sound/soc/codecs/ak4641.c
+++ b/sound/soc/codecs/ak4641.c
@@ -328,7 +328,7 @@ static int ak4641_i2s_hw_params(struct snd_pcm_substream *substream,
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
ak4641->playback_fs = rate;
ak4641_set_deemph(codec);
- };
+ }
return 0;
}
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index 2d0378709702..090d499bb7eb 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -257,7 +257,7 @@ static int ak4642_dai_startup(struct snd_pcm_substream *substream,
* This operation came from example code of
* "ASAHI KASEI AK4642" (japanese) manual p94.
*/
- snd_soc_write(codec, SG_SL1, PMMP | MGAIN0);
+ snd_soc_update_bits(codec, SG_SL1, PMMP | MGAIN0, PMMP | MGAIN0);
snd_soc_write(codec, TIMER, ZTM(0x3) | WTM(0x3));
snd_soc_write(codec, ALC_CTL1, ALC | LMTH0);
snd_soc_update_bits(codec, PW_MGMT1, PMADL, PMADL);
@@ -352,7 +352,6 @@ static int ak4642_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
*/
default:
return -EINVAL;
- break;
}
snd_soc_update_bits(codec, MD_CTL1, DIF_MASK, data);
@@ -405,7 +404,6 @@ static int ak4642_dai_hw_params(struct snd_pcm_substream *substream,
break;
default:
return -EINVAL;
- break;
}
snd_soc_update_bits(codec, MD_CTL2, FS_MASK, rate);
diff --git a/sound/soc/codecs/alc5632.c b/sound/soc/codecs/alc5632.c
index f2e62e45f912..19e9f222d09c 100644
--- a/sound/soc/codecs/alc5632.c
+++ b/sound/soc/codecs/alc5632.c
@@ -614,7 +614,7 @@ struct _pll_div {
};
/* Note : pll code from original alc5632 driver. Not sure of how good it is */
-/* usefull only for master mode */
+/* useful only for master mode */
static const struct _pll_div codec_master_pll_div[] = {
{ 2048000, 8192000, 0x0ea0},
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 657808ba1418..6f05b17d1965 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -1477,21 +1477,25 @@ static void arizona_enable_fll(struct arizona_fll *fll,
{
struct arizona *arizona = fll->arizona;
int ret;
+ bool use_sync = false;
/*
* If we have both REFCLK and SYNCCLK then enable both,
* otherwise apply the SYNCCLK settings to REFCLK.
*/
- if (fll->ref_src >= 0 && fll->ref_src != fll->sync_src) {
+ if (fll->ref_src >= 0 && fll->ref_freq &&
+ fll->ref_src != fll->sync_src) {
regmap_update_bits(arizona->regmap, fll->base + 5,
ARIZONA_FLL1_OUTDIV_MASK,
ref->outdiv << ARIZONA_FLL1_OUTDIV_SHIFT);
arizona_apply_fll(arizona, fll->base, ref, fll->ref_src,
false);
- if (fll->sync_src >= 0)
+ if (fll->sync_src >= 0) {
arizona_apply_fll(arizona, fll->base + 0x10, sync,
fll->sync_src, true);
+ use_sync = true;
+ }
} else if (fll->sync_src >= 0) {
regmap_update_bits(arizona->regmap, fll->base + 5,
ARIZONA_FLL1_OUTDIV_MASK,
@@ -1511,7 +1515,7 @@ static void arizona_enable_fll(struct arizona_fll *fll,
* Increase the bandwidth if we're not using a low frequency
* sync source.
*/
- if (fll->sync_src >= 0 && fll->sync_freq > 100000)
+ if (use_sync && fll->sync_freq > 100000)
regmap_update_bits(arizona->regmap, fll->base + 0x17,
ARIZONA_FLL1_SYNC_BW, 0);
else
@@ -1526,8 +1530,7 @@ static void arizona_enable_fll(struct arizona_fll *fll,
regmap_update_bits(arizona->regmap, fll->base + 1,
ARIZONA_FLL1_ENA, ARIZONA_FLL1_ENA);
- if (fll->ref_src >= 0 && fll->sync_src >= 0 &&
- fll->ref_src != fll->sync_src)
+ if (use_sync)
regmap_update_bits(arizona->regmap, fll->base + 0x11,
ARIZONA_FLL1_SYNC_ENA,
ARIZONA_FLL1_SYNC_ENA);
@@ -1561,10 +1564,12 @@ int arizona_set_fll_refclk(struct arizona_fll *fll, int source,
if (fll->ref_src == source && fll->ref_freq == Fref)
return 0;
- if (fll->fout && Fref > 0) {
- ret = arizona_calc_fll(fll, &ref, Fref, fll->fout);
- if (ret != 0)
- return ret;
+ if (fll->fout) {
+ if (Fref > 0) {
+ ret = arizona_calc_fll(fll, &ref, Fref, fll->fout);
+ if (ret != 0)
+ return ret;
+ }
if (fll->sync_src >= 0) {
ret = arizona_calc_fll(fll, &sync, fll->sync_freq,
diff --git a/sound/soc/codecs/cq93vc.c b/sound/soc/codecs/cq93vc.c
index 23316c887b19..43737a27d79c 100644
--- a/sound/soc/codecs/cq93vc.c
+++ b/sound/soc/codecs/cq93vc.c
@@ -38,24 +38,6 @@
#include <sound/soc.h>
#include <sound/initval.h>
-static inline unsigned int cq93vc_read(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- struct davinci_vc *davinci_vc = codec->control_data;
-
- return readl(davinci_vc->base + reg);
-}
-
-static inline int cq93vc_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value)
-{
- struct davinci_vc *davinci_vc = codec->control_data;
-
- writel(value, davinci_vc->base + reg);
-
- return 0;
-}
-
static const struct snd_kcontrol_new cq93vc_snd_controls[] = {
SOC_SINGLE("PGA Capture Volume", DAVINCI_VC_REG05, 0, 0x03, 0),
SOC_SINGLE("Mono DAC Playback Volume", DAVINCI_VC_REG09, 0, 0x3f, 0),
@@ -64,13 +46,15 @@ static const struct snd_kcontrol_new cq93vc_snd_controls[] = {
static int cq93vc_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
- u8 reg = cq93vc_read(codec, DAVINCI_VC_REG09) & ~DAVINCI_VC_REG09_MUTE;
+ u8 reg;
if (mute)
- cq93vc_write(codec, DAVINCI_VC_REG09,
- reg | DAVINCI_VC_REG09_MUTE);
+ reg = DAVINCI_VC_REG09_MUTE;
else
- cq93vc_write(codec, DAVINCI_VC_REG09, reg);
+ reg = 0;
+
+ snd_soc_update_bits(codec, DAVINCI_VC_REG09, DAVINCI_VC_REG09_MUTE,
+ reg);
return 0;
}
@@ -79,7 +63,7 @@ static int cq93vc_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct davinci_vc *davinci_vc = codec->control_data;
+ struct davinci_vc *davinci_vc = codec->dev->platform_data;
switch (freq) {
case 22579200:
@@ -97,18 +81,18 @@ static int cq93vc_set_bias_level(struct snd_soc_codec *codec,
{
switch (level) {
case SND_SOC_BIAS_ON:
- cq93vc_write(codec, DAVINCI_VC_REG12,
+ snd_soc_write(codec, DAVINCI_VC_REG12,
DAVINCI_VC_REG12_POWER_ALL_ON);
break;
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
- cq93vc_write(codec, DAVINCI_VC_REG12,
+ snd_soc_write(codec, DAVINCI_VC_REG12,
DAVINCI_VC_REG12_POWER_ALL_OFF);
break;
case SND_SOC_BIAS_OFF:
/* force all power off */
- cq93vc_write(codec, DAVINCI_VC_REG12,
+ snd_soc_write(codec, DAVINCI_VC_REG12,
DAVINCI_VC_REG12_POWER_ALL_OFF);
break;
}
@@ -154,11 +138,9 @@ static int cq93vc_probe(struct snd_soc_codec *codec)
struct davinci_vc *davinci_vc = codec->dev->platform_data;
davinci_vc->cq93vc.codec = codec;
- codec->control_data = davinci_vc;
+ codec->control_data = davinci_vc->regmap;
- /* Set controls */
- snd_soc_add_codec_controls(codec, cq93vc_snd_controls,
- ARRAY_SIZE(cq93vc_snd_controls));
+ snd_soc_codec_set_cache_io(codec, 32, 32, SND_SOC_REGMAP);
/* Off, with power on */
cq93vc_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
@@ -174,12 +156,12 @@ static int cq93vc_remove(struct snd_soc_codec *codec)
}
static struct snd_soc_codec_driver soc_codec_dev_cq93vc = {
- .read = cq93vc_read,
- .write = cq93vc_write,
.set_bias_level = cq93vc_set_bias_level,
.probe = cq93vc_probe,
.remove = cq93vc_remove,
.resume = cq93vc_resume,
+ .controls = cq93vc_snd_controls,
+ .num_controls = ARRAY_SIZE(cq93vc_snd_controls),
};
static int cq93vc_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index a20f1bb8f071..f6e953454bc0 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -25,6 +25,7 @@
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/spi/spi.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <sound/pcm.h>
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index be2ba1b6fe4a..8b427c977083 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/gpio.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/input.h>
@@ -1116,40 +1117,6 @@ static int cs42l52_probe(struct snd_soc_codec *codec)
cs42l52->sysclk = CS42L52_DEFAULT_CLK;
cs42l52->config.format = CS42L52_DEFAULT_FORMAT;
- /* Set Platform MICx CFG */
- snd_soc_update_bits(codec, CS42L52_MICA_CTL,
- CS42L52_MIC_CTL_TYPE_MASK,
- cs42l52->pdata.mica_cfg <<
- CS42L52_MIC_CTL_TYPE_SHIFT);
-
- snd_soc_update_bits(codec, CS42L52_MICB_CTL,
- CS42L52_MIC_CTL_TYPE_MASK,
- cs42l52->pdata.micb_cfg <<
- CS42L52_MIC_CTL_TYPE_SHIFT);
-
- /* if Single Ended, Get Mic_Select */
- if (cs42l52->pdata.mica_cfg)
- snd_soc_update_bits(codec, CS42L52_MICA_CTL,
- CS42L52_MIC_CTL_MIC_SEL_MASK,
- cs42l52->pdata.mica_sel <<
- CS42L52_MIC_CTL_MIC_SEL_SHIFT);
- if (cs42l52->pdata.micb_cfg)
- snd_soc_update_bits(codec, CS42L52_MICB_CTL,
- CS42L52_MIC_CTL_MIC_SEL_MASK,
- cs42l52->pdata.micb_sel <<
- CS42L52_MIC_CTL_MIC_SEL_SHIFT);
-
- /* Set Platform Charge Pump Freq */
- snd_soc_update_bits(codec, CS42L52_CHARGE_PUMP,
- CS42L52_CHARGE_PUMP_MASK,
- cs42l52->pdata.chgfreq <<
- CS42L52_CHARGE_PUMP_SHIFT);
-
- /* Set Platform Bias Level */
- snd_soc_update_bits(codec, CS42L52_IFACE_CTL2,
- CS42L52_IFACE_CTL2_BIAS_LVL,
- cs42l52->pdata.micbias_lvl);
-
return ret;
}
@@ -1205,6 +1172,7 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
const struct i2c_device_id *id)
{
struct cs42l52_private *cs42l52;
+ struct cs42l52_platform_data *pdata = dev_get_platdata(&i2c_client->dev);
int ret;
unsigned int devid = 0;
unsigned int reg;
@@ -1222,11 +1190,22 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
return ret;
}
- i2c_set_clientdata(i2c_client, cs42l52);
+ if (pdata)
+ cs42l52->pdata = *pdata;
+
+ if (cs42l52->pdata.reset_gpio) {
+ ret = gpio_request_one(cs42l52->pdata.reset_gpio,
+ GPIOF_OUT_INIT_HIGH, "CS42L52 /RST");
+ if (ret < 0) {
+ dev_err(&i2c_client->dev, "Failed to request /RST %d: %d\n",
+ cs42l52->pdata.reset_gpio, ret);
+ return ret;
+ }
+ gpio_set_value_cansleep(cs42l52->pdata.reset_gpio, 0);
+ gpio_set_value_cansleep(cs42l52->pdata.reset_gpio, 1);
+ }
- if (dev_get_platdata(&i2c_client->dev))
- memcpy(&cs42l52->pdata, dev_get_platdata(&i2c_client->dev),
- sizeof(cs42l52->pdata));
+ i2c_set_clientdata(i2c_client, cs42l52);
ret = regmap_register_patch(cs42l52->regmap, cs42l52_threshold_patch,
ARRAY_SIZE(cs42l52_threshold_patch));
@@ -1244,7 +1223,43 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
return ret;
}
- regcache_cache_only(cs42l52->regmap, true);
+ dev_info(&i2c_client->dev, "Cirrus Logic CS42L52, Revision: %02X\n",
+ reg & 0xFF);
+
+ /* Set Platform Data */
+ if (cs42l52->pdata.mica_cfg)
+ regmap_update_bits(cs42l52->regmap, CS42L52_MICA_CTL,
+ CS42L52_MIC_CTL_TYPE_MASK,
+ cs42l52->pdata.mica_cfg <<
+ CS42L52_MIC_CTL_TYPE_SHIFT);
+
+ if (cs42l52->pdata.micb_cfg)
+ regmap_update_bits(cs42l52->regmap, CS42L52_MICB_CTL,
+ CS42L52_MIC_CTL_TYPE_MASK,
+ cs42l52->pdata.micb_cfg <<
+ CS42L52_MIC_CTL_TYPE_SHIFT);
+
+ if (cs42l52->pdata.mica_sel)
+ regmap_update_bits(cs42l52->regmap, CS42L52_MICA_CTL,
+ CS42L52_MIC_CTL_MIC_SEL_MASK,
+ cs42l52->pdata.mica_sel <<
+ CS42L52_MIC_CTL_MIC_SEL_SHIFT);
+ if (cs42l52->pdata.micb_sel)
+ regmap_update_bits(cs42l52->regmap, CS42L52_MICB_CTL,
+ CS42L52_MIC_CTL_MIC_SEL_MASK,
+ cs42l52->pdata.micb_sel <<
+ CS42L52_MIC_CTL_MIC_SEL_SHIFT);
+
+ if (cs42l52->pdata.chgfreq)
+ regmap_update_bits(cs42l52->regmap, CS42L52_CHARGE_PUMP,
+ CS42L52_CHARGE_PUMP_MASK,
+ cs42l52->pdata.chgfreq <<
+ CS42L52_CHARGE_PUMP_SHIFT);
+
+ if (cs42l52->pdata.micbias_lvl)
+ regmap_update_bits(cs42l52->regmap, CS42L52_IFACE_CTL2,
+ CS42L52_IFACE_CTL2_BIAS_LVL,
+ cs42l52->pdata.micbias_lvl);
ret = snd_soc_register_codec(&i2c_client->dev,
&soc_codec_dev_cs42l52, &cs42l52_dai, 1);
diff --git a/sound/soc/codecs/cs42l52.h b/sound/soc/codecs/cs42l52.h
index 4277012c4719..1a9412d86d17 100644
--- a/sound/soc/codecs/cs42l52.h
+++ b/sound/soc/codecs/cs42l52.h
@@ -269,6 +269,6 @@
#define CS42L52_FIX_BITS1 0x3E
#define CS42L52_FIX_BITS2 0x47
-#define CS42L52_MAX_REGISTER 0x34
+#define CS42L52_MAX_REGISTER 0x47
#endif
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index 3b20c86cdb01..549d5d6a3fef 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/of_gpio.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
@@ -28,6 +29,7 @@
#include <sound/soc-dapm.h>
#include <sound/initval.h>
#include <sound/tlv.h>
+#include <sound/cs42l73.h>
#include "cs42l73.h"
struct sp_config {
@@ -35,6 +37,7 @@ struct sp_config {
u32 srate;
};
struct cs42l73_private {
+ struct cs42l73_platform_data pdata;
struct sp_config config[3];
struct regmap *regmap;
u32 sysclk;
@@ -310,15 +313,6 @@ static const struct soc_enum ng_delay_enum =
SOC_ENUM_SINGLE(CS42L73_NGCAB, 0,
ARRAY_SIZE(cs42l73_ng_delay_text), cs42l73_ng_delay_text);
-static const char * const charge_pump_freq_text[] = {
- "0", "1", "2", "3", "4",
- "5", "6", "7", "8", "9",
- "10", "11", "12", "13", "14", "15" };
-
-static const struct soc_enum charge_pump_enum =
- SOC_ENUM_SINGLE(CS42L73_CPFCHC, 4,
- ARRAY_SIZE(charge_pump_freq_text), charge_pump_freq_text);
-
static const char * const cs42l73_mono_mix_texts[] = {
"Left", "Right", "Mono Mix"};
@@ -511,8 +505,6 @@ static const struct snd_kcontrol_new cs42l73_snd_controls[] = {
SOC_SINGLE("NG Threshold", CS42L73_NGCAB, 2, 7, 0),
SOC_ENUM("NG Delay", ng_delay_enum),
- SOC_ENUM("Charge Pump Frequency", charge_pump_enum),
-
SOC_DOUBLE_R_TLV("XSP-IP Volume",
CS42L73_XSPAIPAA, CS42L73_XSPBIPBA, 0, 0x3F, 1,
attn_tlv),
@@ -1055,11 +1047,11 @@ static int cs42l73_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
- mmcc |= MS_MASTER;
+ mmcc |= CS42L73_MS_MASTER;
break;
case SND_SOC_DAIFMT_CBS_CFS:
- mmcc &= ~MS_MASTER;
+ mmcc &= ~CS42L73_MS_MASTER;
break;
default:
@@ -1071,11 +1063,11 @@ static int cs42l73_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
switch (format) {
case SND_SOC_DAIFMT_I2S:
- spc &= ~SPDIF_PCM;
+ spc &= ~CS42L73_SPDIF_PCM;
break;
case SND_SOC_DAIFMT_DSP_A:
case SND_SOC_DAIFMT_DSP_B:
- if (mmcc & MS_MASTER) {
+ if (mmcc & CS42L73_MS_MASTER) {
dev_err(codec->dev,
"PCM format in slave mode only\n");
return -EINVAL;
@@ -1085,25 +1077,25 @@ static int cs42l73_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
"PCM format is not supported on ASP port\n");
return -EINVAL;
}
- spc |= SPDIF_PCM;
+ spc |= CS42L73_SPDIF_PCM;
break;
default:
return -EINVAL;
}
- if (spc & SPDIF_PCM) {
+ if (spc & CS42L73_SPDIF_PCM) {
/* Clear PCM mode, clear PCM_BIT_ORDER bit for MSB->LSB */
- spc &= ~(PCM_MODE_MASK | PCM_BIT_ORDER);
+ spc &= ~(CS42L73_PCM_MODE_MASK | CS42L73_PCM_BIT_ORDER);
switch (format) {
case SND_SOC_DAIFMT_DSP_B:
if (inv == SND_SOC_DAIFMT_IB_IF)
- spc |= PCM_MODE0;
+ spc |= CS42L73_PCM_MODE0;
if (inv == SND_SOC_DAIFMT_IB_NF)
- spc |= PCM_MODE1;
+ spc |= CS42L73_PCM_MODE1;
break;
case SND_SOC_DAIFMT_DSP_A:
if (inv == SND_SOC_DAIFMT_IB_IF)
- spc |= PCM_MODE1;
+ spc |= CS42L73_PCM_MODE1;
break;
default:
return -EINVAL;
@@ -1163,7 +1155,7 @@ static int cs42l73_pcm_hw_params(struct snd_pcm_substream *substream,
int mclk_coeff;
int srate = params_rate(params);
- if (priv->config[id].mmcc & MS_MASTER) {
+ if (priv->config[id].mmcc & CS42L73_MS_MASTER) {
/* CS42L73 Master */
/* MCLK -> srate */
mclk_coeff =
@@ -1182,13 +1174,13 @@ static int cs42l73_pcm_hw_params(struct snd_pcm_substream *substream,
priv->config[id].spc &= 0xFC;
/* Use SCLK=64*Fs if internal MCLK >= 6.4MHz */
if (priv->mclk >= 6400000)
- priv->config[id].spc |= MCK_SCLK_64FS;
+ priv->config[id].spc |= CS42L73_MCK_SCLK_64FS;
else
- priv->config[id].spc |= MCK_SCLK_MCLK;
+ priv->config[id].spc |= CS42L73_MCK_SCLK_MCLK;
} else {
/* CS42L73 Slave */
priv->config[id].spc &= 0xFC;
- priv->config[id].spc |= MCK_SCLK_64FS;
+ priv->config[id].spc |= CS42L73_MCK_SCLK_64FS;
}
/* Update ASRCs */
priv->config[id].srate = srate;
@@ -1208,8 +1200,8 @@ static int cs42l73_set_bias_level(struct snd_soc_codec *codec,
switch (level) {
case SND_SOC_BIAS_ON:
- snd_soc_update_bits(codec, CS42L73_DMMCC, MCLKDIS, 0);
- snd_soc_update_bits(codec, CS42L73_PWRCTL1, PDN, 0);
+ snd_soc_update_bits(codec, CS42L73_DMMCC, CS42L73_MCLKDIS, 0);
+ snd_soc_update_bits(codec, CS42L73_PWRCTL1, CS42L73_PDN, 0);
break;
case SND_SOC_BIAS_PREPARE:
@@ -1220,11 +1212,11 @@ static int cs42l73_set_bias_level(struct snd_soc_codec *codec,
regcache_cache_only(cs42l73->regmap, false);
regcache_sync(cs42l73->regmap);
}
- snd_soc_update_bits(codec, CS42L73_PWRCTL1, PDN, 1);
+ snd_soc_update_bits(codec, CS42L73_PWRCTL1, CS42L73_PDN, 1);
break;
case SND_SOC_BIAS_OFF:
- snd_soc_update_bits(codec, CS42L73_PWRCTL1, PDN, 1);
+ snd_soc_update_bits(codec, CS42L73_PWRCTL1, CS42L73_PDN, 1);
if (cs42l73->shutdwn_delay > 0) {
mdelay(cs42l73->shutdwn_delay);
cs42l73->shutdwn_delay = 0;
@@ -1233,7 +1225,7 @@ static int cs42l73_set_bias_level(struct snd_soc_codec *codec,
* down.
*/
}
- snd_soc_update_bits(codec, CS42L73_DMMCC, MCLKDIS, 1);
+ snd_soc_update_bits(codec, CS42L73_DMMCC, CS42L73_MCLKDIS, 1);
break;
}
codec->dapm.bias_level = level;
@@ -1367,11 +1359,16 @@ static int cs42l73_probe(struct snd_soc_codec *codec)
return ret;
}
- regcache_cache_only(cs42l73->regmap, true);
-
cs42l73_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- cs42l73->mclksel = CS42L73_CLKID_MCLK1; /* MCLK1 as master clk */
+ /* Set Charge Pump Frequency */
+ if (cs42l73->pdata.chgfreq)
+ snd_soc_update_bits(codec, CS42L73_CPFCHC,
+ CS42L73_CHARGEPUMP_MASK,
+ cs42l73->pdata.chgfreq << 4);
+
+ /* MCLK1 as master clk */
+ cs42l73->mclksel = CS42L73_CLKID_MCLK1;
cs42l73->mclk = 0;
return ret;
@@ -1415,9 +1412,11 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
const struct i2c_device_id *id)
{
struct cs42l73_private *cs42l73;
+ struct cs42l73_platform_data *pdata = dev_get_platdata(&i2c_client->dev);
int ret;
unsigned int devid = 0;
unsigned int reg;
+ u32 val32;
cs42l73 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs42l73_private),
GFP_KERNEL);
@@ -1426,14 +1425,49 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
return -ENOMEM;
}
- i2c_set_clientdata(i2c_client, cs42l73);
-
cs42l73->regmap = devm_regmap_init_i2c(i2c_client, &cs42l73_regmap);
if (IS_ERR(cs42l73->regmap)) {
ret = PTR_ERR(cs42l73->regmap);
dev_err(&i2c_client->dev, "regmap_init() failed: %d\n", ret);
return ret;
}
+
+ if (pdata) {
+ cs42l73->pdata = *pdata;
+ } else {
+ pdata = devm_kzalloc(&i2c_client->dev,
+ sizeof(struct cs42l73_platform_data),
+ GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&i2c_client->dev, "could not allocate pdata\n");
+ return -ENOMEM;
+ }
+ if (i2c_client->dev.of_node) {
+ if (of_property_read_u32(i2c_client->dev.of_node,
+ "chgfreq", &val32) >= 0)
+ pdata->chgfreq = val32;
+ }
+ pdata->reset_gpio = of_get_named_gpio(i2c_client->dev.of_node,
+ "reset-gpio", 0);
+ cs42l73->pdata = *pdata;
+ }
+
+ i2c_set_clientdata(i2c_client, cs42l73);
+
+ if (cs42l73->pdata.reset_gpio) {
+ ret = gpio_request_one(cs42l73->pdata.reset_gpio,
+ GPIOF_OUT_INIT_HIGH, "CS42L73 /RST");
+ if (ret < 0) {
+ dev_err(&i2c_client->dev, "Failed to request /RST %d: %d\n",
+ cs42l73->pdata.reset_gpio, ret);
+ return ret;
+ }
+ gpio_set_value_cansleep(cs42l73->pdata.reset_gpio, 0);
+ gpio_set_value_cansleep(cs42l73->pdata.reset_gpio, 1);
+ }
+
+ regcache_cache_bypass(cs42l73->regmap, true);
+
/* initialize codec */
ret = regmap_read(cs42l73->regmap, CS42L73_DEVID_AB, &reg);
devid = (reg & 0xFF) << 12;
@@ -1444,7 +1478,6 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
ret = regmap_read(cs42l73->regmap, CS42L73_DEVID_E, &reg);
devid |= (reg & 0xF0) >> 4;
-
if (devid != CS42L73_DEVID) {
ret = -ENODEV;
dev_err(&i2c_client->dev,
@@ -1462,7 +1495,7 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
dev_info(&i2c_client->dev,
"Cirrus Logic CS42L73, Revision: %02X\n", reg & 0xFF);
- regcache_cache_only(cs42l73->regmap, true);
+ regcache_cache_bypass(cs42l73->regmap, false);
ret = snd_soc_register_codec(&i2c_client->dev,
&soc_codec_dev_cs42l73, cs42l73_dai,
@@ -1478,6 +1511,12 @@ static int cs42l73_i2c_remove(struct i2c_client *client)
return 0;
}
+static const struct of_device_id cs42l73_of_match[] = {
+ { .compatible = "cirrus,cs42l73", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cs42l73_of_match);
+
static const struct i2c_device_id cs42l73_id[] = {
{"cs42l73", 0},
{}
@@ -1489,6 +1528,7 @@ static struct i2c_driver cs42l73_i2c_driver = {
.driver = {
.name = "cs42l73",
.owner = THIS_MODULE,
+ .of_match_table = cs42l73_of_match,
},
.id_table = cs42l73_id,
.probe = cs42l73_i2c_probe,
diff --git a/sound/soc/codecs/cs42l73.h b/sound/soc/codecs/cs42l73.h
index f30a4c4d62e6..45746186a678 100644
--- a/sound/soc/codecs/cs42l73.h
+++ b/sound/soc/codecs/cs42l73.h
@@ -128,59 +128,60 @@
/* Bitfield Definitions */
/* CS42L73_PWRCTL1 */
-#define PDN_ADCB (1 << 7)
-#define PDN_DMICB (1 << 6)
-#define PDN_ADCA (1 << 5)
-#define PDN_DMICA (1 << 4)
-#define PDN_LDO (1 << 2)
-#define DISCHG_FILT (1 << 1)
-#define PDN (1 << 0)
+#define CS42L73_PDN_ADCB (1 << 7)
+#define CS42L73_PDN_DMICB (1 << 6)
+#define CS42L73_PDN_ADCA (1 << 5)
+#define CS42L73_PDN_DMICA (1 << 4)
+#define CS42L73_PDN_LDO (1 << 2)
+#define CS42L73_DISCHG_FILT (1 << 1)
+#define CS42L73_PDN (1 << 0)
/* CS42L73_PWRCTL2 */
-#define PDN_MIC2_BIAS (1 << 7)
-#define PDN_MIC1_BIAS (1 << 6)
-#define PDN_VSP (1 << 4)
-#define PDN_ASP_SDOUT (1 << 3)
-#define PDN_ASP_SDIN (1 << 2)
-#define PDN_XSP_SDOUT (1 << 1)
-#define PDN_XSP_SDIN (1 << 0)
+#define CS42L73_PDN_MIC2_BIAS (1 << 7)
+#define CS42L73_PDN_MIC1_BIAS (1 << 6)
+#define CS42L73_PDN_VSP (1 << 4)
+#define CS42L73_PDN_ASP_SDOUT (1 << 3)
+#define CS42L73_PDN_ASP_SDIN (1 << 2)
+#define CS42L73_PDN_XSP_SDOUT (1 << 1)
+#define CS42L73_PDN_XSP_SDIN (1 << 0)
/* CS42L73_PWRCTL3 */
-#define PDN_THMS (1 << 5)
-#define PDN_SPKLO (1 << 4)
-#define PDN_EAR (1 << 3)
-#define PDN_SPK (1 << 2)
-#define PDN_LO (1 << 1)
-#define PDN_HP (1 << 0)
+#define CS42L73_PDN_THMS (1 << 5)
+#define CS42L73_PDN_SPKLO (1 << 4)
+#define CS42L73_PDN_EAR (1 << 3)
+#define CS42L73_PDN_SPK (1 << 2)
+#define CS42L73_PDN_LO (1 << 1)
+#define CS42L73_PDN_HP (1 << 0)
/* Thermal Overload Detect. Requires interrupt ... */
-#define THMOVLD_150C 0
-#define THMOVLD_132C 1
-#define THMOVLD_115C 2
-#define THMOVLD_098C 3
+#define CS42L73_THMOVLD_150C 0
+#define CS42L73_THMOVLD_132C 1
+#define CS42L73_THMOVLD_115C 2
+#define CS42L73_THMOVLD_098C 3
+#define CS42L73_CHARGEPUMP_MASK (0xF0)
/* CS42L73_ASPC, CS42L73_XSPC, CS42L73_VSPC */
-#define SP_3ST (1 << 7)
-#define SPDIF_I2S (0 << 6)
-#define SPDIF_PCM (1 << 6)
-#define PCM_MODE0 (0 << 4)
-#define PCM_MODE1 (1 << 4)
-#define PCM_MODE2 (2 << 4)
-#define PCM_MODE_MASK (3 << 4)
-#define PCM_BIT_ORDER (1 << 3)
-#define MCK_SCLK_64FS (0 << 0)
-#define MCK_SCLK_MCLK (2 << 0)
-#define MCK_SCLK_PREMCLK (3 << 0)
+#define CS42L73_SP_3ST (1 << 7)
+#define CS42L73_SPDIF_I2S (0 << 6)
+#define CS42L73_SPDIF_PCM (1 << 6)
+#define CS42L73_PCM_MODE0 (0 << 4)
+#define CS42L73_PCM_MODE1 (1 << 4)
+#define CS42L73_PCM_MODE2 (2 << 4)
+#define CS42L73_PCM_MODE_MASK (3 << 4)
+#define CS42L73_PCM_BIT_ORDER (1 << 3)
+#define CS42L73_MCK_SCLK_64FS (0 << 0)
+#define CS42L73_MCK_SCLK_MCLK (2 << 0)
+#define CS42L73_MCK_SCLK_PREMCLK (3 << 0)
/* CS42L73_xSPMMCC */
-#define MS_MASTER (1 << 7)
+#define CS42L73_MS_MASTER (1 << 7)
/* CS42L73_DMMCC */
-#define MCLKDIS (1 << 0)
-#define MCLKSEL_MCLK2 (1 << 4)
-#define MCLKSEL_MCLK1 (0 << 4)
+#define CS42L73_MCLKDIS (1 << 0)
+#define CS42L73_MCLKSEL_MCLK2 (1 << 4)
+#define CS42L73_MCLKSEL_MCLK1 (0 << 4)
/* CS42L73 MCLK derived from MCLK1 or MCLK2 */
#define CS42L73_CLKID_MCLK1 0
@@ -194,28 +195,26 @@
#define CS42L73_VSP 2
/* IS1, IM1 */
-#define MIC2_SDET (1 << 6)
-#define THMOVLD (1 << 4)
-#define DIGMIXOVFL (1 << 3)
-#define IPBOVFL (1 << 1)
-#define IPAOVFL (1 << 0)
+#define CS42L73_MIC2_SDET (1 << 6)
+#define CS42L73_THMOVLD (1 << 4)
+#define CS42L73_DIGMIXOVFL (1 << 3)
+#define CS42L73_IPBOVFL (1 << 1)
+#define CS42L73_IPAOVFL (1 << 0)
/* Analog Softramp */
-#define ANLGOSFT (1 << 0)
+#define CS42L73_ANLGOSFT (1 << 0)
/* HP A/B Analog Mute */
-#define HPA_MUTE (1 << 7)
+#define CS42L73_HPA_MUTE (1 << 7)
/* LO A/B Analog Mute */
-#define LOA_MUTE (1 << 7)
+#define CS42L73_LOA_MUTE (1 << 7)
/* Digital Mute */
-#define HLAD_MUTE (1 << 0)
-#define HLBD_MUTE (1 << 1)
-#define SPKD_MUTE (1 << 2)
-#define ESLD_MUTE (1 << 3)
+#define CS42L73_HLAD_MUTE (1 << 0)
+#define CS42L73_HLBD_MUTE (1 << 1)
+#define CS42L73_SPKD_MUTE (1 << 2)
+#define CS42L73_ESLD_MUTE (1 << 3)
/* Misc defines for codec */
-#define CS42L73_RESET_GPIO 143
-
#define CS42L73_DEVID 0x00042A73
#define CS42L73_MCLKX_MIN 5644800
#define CS42L73_MCLKX_MAX 38400000
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index 566a367c94fa..66ceee22fdad 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
+#include <linux/regmap.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -38,294 +39,223 @@ struct max98088_cdata {
};
struct max98088_priv {
- enum max98088_type devtype;
- struct max98088_pdata *pdata;
- unsigned int sysclk;
- struct max98088_cdata dai[2];
- int eq_textcnt;
- const char **eq_texts;
- struct soc_enum eq_enum;
- u8 ina_state;
- u8 inb_state;
- unsigned int ex_mode;
- unsigned int digmic;
- unsigned int mic1pre;
- unsigned int mic2pre;
- unsigned int extmic_mode;
+ struct regmap *regmap;
+ enum max98088_type devtype;
+ struct max98088_pdata *pdata;
+ unsigned int sysclk;
+ struct max98088_cdata dai[2];
+ int eq_textcnt;
+ const char **eq_texts;
+ struct soc_enum eq_enum;
+ u8 ina_state;
+ u8 inb_state;
+ unsigned int ex_mode;
+ unsigned int digmic;
+ unsigned int mic1pre;
+ unsigned int mic2pre;
+ unsigned int extmic_mode;
};
-static const u8 max98088_reg[M98088_REG_CNT] = {
- 0x00, /* 00 IRQ status */
- 0x00, /* 01 MIC status */
- 0x00, /* 02 jack status */
- 0x00, /* 03 battery voltage */
- 0x00, /* 04 */
- 0x00, /* 05 */
- 0x00, /* 06 */
- 0x00, /* 07 */
- 0x00, /* 08 */
- 0x00, /* 09 */
- 0x00, /* 0A */
- 0x00, /* 0B */
- 0x00, /* 0C */
- 0x00, /* 0D */
- 0x00, /* 0E */
- 0x00, /* 0F interrupt enable */
-
- 0x00, /* 10 master clock */
- 0x00, /* 11 DAI1 clock mode */
- 0x00, /* 12 DAI1 clock control */
- 0x00, /* 13 DAI1 clock control */
- 0x00, /* 14 DAI1 format */
- 0x00, /* 15 DAI1 clock */
- 0x00, /* 16 DAI1 config */
- 0x00, /* 17 DAI1 TDM */
- 0x00, /* 18 DAI1 filters */
- 0x00, /* 19 DAI2 clock mode */
- 0x00, /* 1A DAI2 clock control */
- 0x00, /* 1B DAI2 clock control */
- 0x00, /* 1C DAI2 format */
- 0x00, /* 1D DAI2 clock */
- 0x00, /* 1E DAI2 config */
- 0x00, /* 1F DAI2 TDM */
-
- 0x00, /* 20 DAI2 filters */
- 0x00, /* 21 data config */
- 0x00, /* 22 DAC mixer */
- 0x00, /* 23 left ADC mixer */
- 0x00, /* 24 right ADC mixer */
- 0x00, /* 25 left HP mixer */
- 0x00, /* 26 right HP mixer */
- 0x00, /* 27 HP control */
- 0x00, /* 28 left REC mixer */
- 0x00, /* 29 right REC mixer */
- 0x00, /* 2A REC control */
- 0x00, /* 2B left SPK mixer */
- 0x00, /* 2C right SPK mixer */
- 0x00, /* 2D SPK control */
- 0x00, /* 2E sidetone */
- 0x00, /* 2F DAI1 playback level */
-
- 0x00, /* 30 DAI1 playback level */
- 0x00, /* 31 DAI2 playback level */
- 0x00, /* 32 DAI2 playbakc level */
- 0x00, /* 33 left ADC level */
- 0x00, /* 34 right ADC level */
- 0x00, /* 35 MIC1 level */
- 0x00, /* 36 MIC2 level */
- 0x00, /* 37 INA level */
- 0x00, /* 38 INB level */
- 0x00, /* 39 left HP volume */
- 0x00, /* 3A right HP volume */
- 0x00, /* 3B left REC volume */
- 0x00, /* 3C right REC volume */
- 0x00, /* 3D left SPK volume */
- 0x00, /* 3E right SPK volume */
- 0x00, /* 3F MIC config */
-
- 0x00, /* 40 MIC threshold */
- 0x00, /* 41 excursion limiter filter */
- 0x00, /* 42 excursion limiter threshold */
- 0x00, /* 43 ALC */
- 0x00, /* 44 power limiter threshold */
- 0x00, /* 45 power limiter config */
- 0x00, /* 46 distortion limiter config */
- 0x00, /* 47 audio input */
- 0x00, /* 48 microphone */
- 0x00, /* 49 level control */
- 0x00, /* 4A bypass switches */
- 0x00, /* 4B jack detect */
- 0x00, /* 4C input enable */
- 0x00, /* 4D output enable */
- 0xF0, /* 4E bias control */
- 0x00, /* 4F DAC power */
-
- 0x0F, /* 50 DAC power */
- 0x00, /* 51 system */
- 0x00, /* 52 DAI1 EQ1 */
- 0x00, /* 53 DAI1 EQ1 */
- 0x00, /* 54 DAI1 EQ1 */
- 0x00, /* 55 DAI1 EQ1 */
- 0x00, /* 56 DAI1 EQ1 */
- 0x00, /* 57 DAI1 EQ1 */
- 0x00, /* 58 DAI1 EQ1 */
- 0x00, /* 59 DAI1 EQ1 */
- 0x00, /* 5A DAI1 EQ1 */
- 0x00, /* 5B DAI1 EQ1 */
- 0x00, /* 5C DAI1 EQ2 */
- 0x00, /* 5D DAI1 EQ2 */
- 0x00, /* 5E DAI1 EQ2 */
- 0x00, /* 5F DAI1 EQ2 */
-
- 0x00, /* 60 DAI1 EQ2 */
- 0x00, /* 61 DAI1 EQ2 */
- 0x00, /* 62 DAI1 EQ2 */
- 0x00, /* 63 DAI1 EQ2 */
- 0x00, /* 64 DAI1 EQ2 */
- 0x00, /* 65 DAI1 EQ2 */
- 0x00, /* 66 DAI1 EQ3 */
- 0x00, /* 67 DAI1 EQ3 */
- 0x00, /* 68 DAI1 EQ3 */
- 0x00, /* 69 DAI1 EQ3 */
- 0x00, /* 6A DAI1 EQ3 */
- 0x00, /* 6B DAI1 EQ3 */
- 0x00, /* 6C DAI1 EQ3 */
- 0x00, /* 6D DAI1 EQ3 */
- 0x00, /* 6E DAI1 EQ3 */
- 0x00, /* 6F DAI1 EQ3 */
-
- 0x00, /* 70 DAI1 EQ4 */
- 0x00, /* 71 DAI1 EQ4 */
- 0x00, /* 72 DAI1 EQ4 */
- 0x00, /* 73 DAI1 EQ4 */
- 0x00, /* 74 DAI1 EQ4 */
- 0x00, /* 75 DAI1 EQ4 */
- 0x00, /* 76 DAI1 EQ4 */
- 0x00, /* 77 DAI1 EQ4 */
- 0x00, /* 78 DAI1 EQ4 */
- 0x00, /* 79 DAI1 EQ4 */
- 0x00, /* 7A DAI1 EQ5 */
- 0x00, /* 7B DAI1 EQ5 */
- 0x00, /* 7C DAI1 EQ5 */
- 0x00, /* 7D DAI1 EQ5 */
- 0x00, /* 7E DAI1 EQ5 */
- 0x00, /* 7F DAI1 EQ5 */
-
- 0x00, /* 80 DAI1 EQ5 */
- 0x00, /* 81 DAI1 EQ5 */
- 0x00, /* 82 DAI1 EQ5 */
- 0x00, /* 83 DAI1 EQ5 */
- 0x00, /* 84 DAI2 EQ1 */
- 0x00, /* 85 DAI2 EQ1 */
- 0x00, /* 86 DAI2 EQ1 */
- 0x00, /* 87 DAI2 EQ1 */
- 0x00, /* 88 DAI2 EQ1 */
- 0x00, /* 89 DAI2 EQ1 */
- 0x00, /* 8A DAI2 EQ1 */
- 0x00, /* 8B DAI2 EQ1 */
- 0x00, /* 8C DAI2 EQ1 */
- 0x00, /* 8D DAI2 EQ1 */
- 0x00, /* 8E DAI2 EQ2 */
- 0x00, /* 8F DAI2 EQ2 */
-
- 0x00, /* 90 DAI2 EQ2 */
- 0x00, /* 91 DAI2 EQ2 */
- 0x00, /* 92 DAI2 EQ2 */
- 0x00, /* 93 DAI2 EQ2 */
- 0x00, /* 94 DAI2 EQ2 */
- 0x00, /* 95 DAI2 EQ2 */
- 0x00, /* 96 DAI2 EQ2 */
- 0x00, /* 97 DAI2 EQ2 */
- 0x00, /* 98 DAI2 EQ3 */
- 0x00, /* 99 DAI2 EQ3 */
- 0x00, /* 9A DAI2 EQ3 */
- 0x00, /* 9B DAI2 EQ3 */
- 0x00, /* 9C DAI2 EQ3 */
- 0x00, /* 9D DAI2 EQ3 */
- 0x00, /* 9E DAI2 EQ3 */
- 0x00, /* 9F DAI2 EQ3 */
-
- 0x00, /* A0 DAI2 EQ3 */
- 0x00, /* A1 DAI2 EQ3 */
- 0x00, /* A2 DAI2 EQ4 */
- 0x00, /* A3 DAI2 EQ4 */
- 0x00, /* A4 DAI2 EQ4 */
- 0x00, /* A5 DAI2 EQ4 */
- 0x00, /* A6 DAI2 EQ4 */
- 0x00, /* A7 DAI2 EQ4 */
- 0x00, /* A8 DAI2 EQ4 */
- 0x00, /* A9 DAI2 EQ4 */
- 0x00, /* AA DAI2 EQ4 */
- 0x00, /* AB DAI2 EQ4 */
- 0x00, /* AC DAI2 EQ5 */
- 0x00, /* AD DAI2 EQ5 */
- 0x00, /* AE DAI2 EQ5 */
- 0x00, /* AF DAI2 EQ5 */
-
- 0x00, /* B0 DAI2 EQ5 */
- 0x00, /* B1 DAI2 EQ5 */
- 0x00, /* B2 DAI2 EQ5 */
- 0x00, /* B3 DAI2 EQ5 */
- 0x00, /* B4 DAI2 EQ5 */
- 0x00, /* B5 DAI2 EQ5 */
- 0x00, /* B6 DAI1 biquad */
- 0x00, /* B7 DAI1 biquad */
- 0x00, /* B8 DAI1 biquad */
- 0x00, /* B9 DAI1 biquad */
- 0x00, /* BA DAI1 biquad */
- 0x00, /* BB DAI1 biquad */
- 0x00, /* BC DAI1 biquad */
- 0x00, /* BD DAI1 biquad */
- 0x00, /* BE DAI1 biquad */
- 0x00, /* BF DAI1 biquad */
-
- 0x00, /* C0 DAI2 biquad */
- 0x00, /* C1 DAI2 biquad */
- 0x00, /* C2 DAI2 biquad */
- 0x00, /* C3 DAI2 biquad */
- 0x00, /* C4 DAI2 biquad */
- 0x00, /* C5 DAI2 biquad */
- 0x00, /* C6 DAI2 biquad */
- 0x00, /* C7 DAI2 biquad */
- 0x00, /* C8 DAI2 biquad */
- 0x00, /* C9 DAI2 biquad */
- 0x00, /* CA */
- 0x00, /* CB */
- 0x00, /* CC */
- 0x00, /* CD */
- 0x00, /* CE */
- 0x00, /* CF */
-
- 0x00, /* D0 */
- 0x00, /* D1 */
- 0x00, /* D2 */
- 0x00, /* D3 */
- 0x00, /* D4 */
- 0x00, /* D5 */
- 0x00, /* D6 */
- 0x00, /* D7 */
- 0x00, /* D8 */
- 0x00, /* D9 */
- 0x00, /* DA */
- 0x70, /* DB */
- 0x00, /* DC */
- 0x00, /* DD */
- 0x00, /* DE */
- 0x00, /* DF */
-
- 0x00, /* E0 */
- 0x00, /* E1 */
- 0x00, /* E2 */
- 0x00, /* E3 */
- 0x00, /* E4 */
- 0x00, /* E5 */
- 0x00, /* E6 */
- 0x00, /* E7 */
- 0x00, /* E8 */
- 0x00, /* E9 */
- 0x00, /* EA */
- 0x00, /* EB */
- 0x00, /* EC */
- 0x00, /* ED */
- 0x00, /* EE */
- 0x00, /* EF */
-
- 0x00, /* F0 */
- 0x00, /* F1 */
- 0x00, /* F2 */
- 0x00, /* F3 */
- 0x00, /* F4 */
- 0x00, /* F5 */
- 0x00, /* F6 */
- 0x00, /* F7 */
- 0x00, /* F8 */
- 0x00, /* F9 */
- 0x00, /* FA */
- 0x00, /* FB */
- 0x00, /* FC */
- 0x00, /* FD */
- 0x00, /* FE */
- 0x00, /* FF */
+static const struct reg_default max98088_reg[] = {
+ { 0xf, 0x00 }, /* 0F interrupt enable */
+
+ { 0x10, 0x00 }, /* 10 master clock */
+ { 0x11, 0x00 }, /* 11 DAI1 clock mode */
+ { 0x12, 0x00 }, /* 12 DAI1 clock control */
+ { 0x13, 0x00 }, /* 13 DAI1 clock control */
+ { 0x14, 0x00 }, /* 14 DAI1 format */
+ { 0x15, 0x00 }, /* 15 DAI1 clock */
+ { 0x16, 0x00 }, /* 16 DAI1 config */
+ { 0x17, 0x00 }, /* 17 DAI1 TDM */
+ { 0x18, 0x00 }, /* 18 DAI1 filters */
+ { 0x19, 0x00 }, /* 19 DAI2 clock mode */
+ { 0x1a, 0x00 }, /* 1A DAI2 clock control */
+ { 0x1b, 0x00 }, /* 1B DAI2 clock control */
+ { 0x1c, 0x00 }, /* 1C DAI2 format */
+ { 0x1d, 0x00 }, /* 1D DAI2 clock */
+ { 0x1e, 0x00 }, /* 1E DAI2 config */
+ { 0x1f, 0x00 }, /* 1F DAI2 TDM */
+
+ { 0x20, 0x00 }, /* 20 DAI2 filters */
+ { 0x21, 0x00 }, /* 21 data config */
+ { 0x22, 0x00 }, /* 22 DAC mixer */
+ { 0x23, 0x00 }, /* 23 left ADC mixer */
+ { 0x24, 0x00 }, /* 24 right ADC mixer */
+ { 0x25, 0x00 }, /* 25 left HP mixer */
+ { 0x26, 0x00 }, /* 26 right HP mixer */
+ { 0x27, 0x00 }, /* 27 HP control */
+ { 0x28, 0x00 }, /* 28 left REC mixer */
+ { 0x29, 0x00 }, /* 29 right REC mixer */
+ { 0x2a, 0x00 }, /* 2A REC control */
+ { 0x2b, 0x00 }, /* 2B left SPK mixer */
+ { 0x2c, 0x00 }, /* 2C right SPK mixer */
+ { 0x2d, 0x00 }, /* 2D SPK control */
+ { 0x2e, 0x00 }, /* 2E sidetone */
+ { 0x2f, 0x00 }, /* 2F DAI1 playback level */
+
+ { 0x30, 0x00 }, /* 30 DAI1 playback level */
+ { 0x31, 0x00 }, /* 31 DAI2 playback level */
+ { 0x32, 0x00 }, /* 32 DAI2 playbakc level */
+ { 0x33, 0x00 }, /* 33 left ADC level */
+ { 0x34, 0x00 }, /* 34 right ADC level */
+ { 0x35, 0x00 }, /* 35 MIC1 level */
+ { 0x36, 0x00 }, /* 36 MIC2 level */
+ { 0x37, 0x00 }, /* 37 INA level */
+ { 0x38, 0x00 }, /* 38 INB level */
+ { 0x39, 0x00 }, /* 39 left HP volume */
+ { 0x3a, 0x00 }, /* 3A right HP volume */
+ { 0x3b, 0x00 }, /* 3B left REC volume */
+ { 0x3c, 0x00 }, /* 3C right REC volume */
+ { 0x3d, 0x00 }, /* 3D left SPK volume */
+ { 0x3e, 0x00 }, /* 3E right SPK volume */
+ { 0x3f, 0x00 }, /* 3F MIC config */
+
+ { 0x40, 0x00 }, /* 40 MIC threshold */
+ { 0x41, 0x00 }, /* 41 excursion limiter filter */
+ { 0x42, 0x00 }, /* 42 excursion limiter threshold */
+ { 0x43, 0x00 }, /* 43 ALC */
+ { 0x44, 0x00 }, /* 44 power limiter threshold */
+ { 0x45, 0x00 }, /* 45 power limiter config */
+ { 0x46, 0x00 }, /* 46 distortion limiter config */
+ { 0x47, 0x00 }, /* 47 audio input */
+ { 0x48, 0x00 }, /* 48 microphone */
+ { 0x49, 0x00 }, /* 49 level control */
+ { 0x4a, 0x00 }, /* 4A bypass switches */
+ { 0x4b, 0x00 }, /* 4B jack detect */
+ { 0x4c, 0x00 }, /* 4C input enable */
+ { 0x4d, 0x00 }, /* 4D output enable */
+ { 0x4e, 0xF0 }, /* 4E bias control */
+ { 0x4f, 0x00 }, /* 4F DAC power */
+
+ { 0x50, 0x0F }, /* 50 DAC power */
+ { 0x51, 0x00 }, /* 51 system */
+ { 0x52, 0x00 }, /* 52 DAI1 EQ1 */
+ { 0x53, 0x00 }, /* 53 DAI1 EQ1 */
+ { 0x54, 0x00 }, /* 54 DAI1 EQ1 */
+ { 0x55, 0x00 }, /* 55 DAI1 EQ1 */
+ { 0x56, 0x00 }, /* 56 DAI1 EQ1 */
+ { 0x57, 0x00 }, /* 57 DAI1 EQ1 */
+ { 0x58, 0x00 }, /* 58 DAI1 EQ1 */
+ { 0x59, 0x00 }, /* 59 DAI1 EQ1 */
+ { 0x5a, 0x00 }, /* 5A DAI1 EQ1 */
+ { 0x5b, 0x00 }, /* 5B DAI1 EQ1 */
+ { 0x5c, 0x00 }, /* 5C DAI1 EQ2 */
+ { 0x5d, 0x00 }, /* 5D DAI1 EQ2 */
+ { 0x5e, 0x00 }, /* 5E DAI1 EQ2 */
+ { 0x5f, 0x00 }, /* 5F DAI1 EQ2 */
+
+ { 0x60, 0x00 }, /* 60 DAI1 EQ2 */
+ { 0x61, 0x00 }, /* 61 DAI1 EQ2 */
+ { 0x62, 0x00 }, /* 62 DAI1 EQ2 */
+ { 0x63, 0x00 }, /* 63 DAI1 EQ2 */
+ { 0x64, 0x00 }, /* 64 DAI1 EQ2 */
+ { 0x65, 0x00 }, /* 65 DAI1 EQ2 */
+ { 0x66, 0x00 }, /* 66 DAI1 EQ3 */
+ { 0x67, 0x00 }, /* 67 DAI1 EQ3 */
+ { 0x68, 0x00 }, /* 68 DAI1 EQ3 */
+ { 0x69, 0x00 }, /* 69 DAI1 EQ3 */
+ { 0x6a, 0x00 }, /* 6A DAI1 EQ3 */
+ { 0x6b, 0x00 }, /* 6B DAI1 EQ3 */
+ { 0x6c, 0x00 }, /* 6C DAI1 EQ3 */
+ { 0x6d, 0x00 }, /* 6D DAI1 EQ3 */
+ { 0x6e, 0x00 }, /* 6E DAI1 EQ3 */
+ { 0x6f, 0x00 }, /* 6F DAI1 EQ3 */
+
+ { 0x70, 0x00 }, /* 70 DAI1 EQ4 */
+ { 0x71, 0x00 }, /* 71 DAI1 EQ4 */
+ { 0x72, 0x00 }, /* 72 DAI1 EQ4 */
+ { 0x73, 0x00 }, /* 73 DAI1 EQ4 */
+ { 0x74, 0x00 }, /* 74 DAI1 EQ4 */
+ { 0x75, 0x00 }, /* 75 DAI1 EQ4 */
+ { 0x76, 0x00 }, /* 76 DAI1 EQ4 */
+ { 0x77, 0x00 }, /* 77 DAI1 EQ4 */
+ { 0x78, 0x00 }, /* 78 DAI1 EQ4 */
+ { 0x79, 0x00 }, /* 79 DAI1 EQ4 */
+ { 0x7a, 0x00 }, /* 7A DAI1 EQ5 */
+ { 0x7b, 0x00 }, /* 7B DAI1 EQ5 */
+ { 0x7c, 0x00 }, /* 7C DAI1 EQ5 */
+ { 0x7d, 0x00 }, /* 7D DAI1 EQ5 */
+ { 0x7e, 0x00 }, /* 7E DAI1 EQ5 */
+ { 0x7f, 0x00 }, /* 7F DAI1 EQ5 */
+
+ { 0x80, 0x00 }, /* 80 DAI1 EQ5 */
+ { 0x81, 0x00 }, /* 81 DAI1 EQ5 */
+ { 0x82, 0x00 }, /* 82 DAI1 EQ5 */
+ { 0x83, 0x00 }, /* 83 DAI1 EQ5 */
+ { 0x84, 0x00 }, /* 84 DAI2 EQ1 */
+ { 0x85, 0x00 }, /* 85 DAI2 EQ1 */
+ { 0x86, 0x00 }, /* 86 DAI2 EQ1 */
+ { 0x87, 0x00 }, /* 87 DAI2 EQ1 */
+ { 0x88, 0x00 }, /* 88 DAI2 EQ1 */
+ { 0x89, 0x00 }, /* 89 DAI2 EQ1 */
+ { 0x8a, 0x00 }, /* 8A DAI2 EQ1 */
+ { 0x8b, 0x00 }, /* 8B DAI2 EQ1 */
+ { 0x8c, 0x00 }, /* 8C DAI2 EQ1 */
+ { 0x8d, 0x00 }, /* 8D DAI2 EQ1 */
+ { 0x8e, 0x00 }, /* 8E DAI2 EQ2 */
+ { 0x8f, 0x00 }, /* 8F DAI2 EQ2 */
+
+ { 0x90, 0x00 }, /* 90 DAI2 EQ2 */
+ { 0x91, 0x00 }, /* 91 DAI2 EQ2 */
+ { 0x92, 0x00 }, /* 92 DAI2 EQ2 */
+ { 0x93, 0x00 }, /* 93 DAI2 EQ2 */
+ { 0x94, 0x00 }, /* 94 DAI2 EQ2 */
+ { 0x95, 0x00 }, /* 95 DAI2 EQ2 */
+ { 0x96, 0x00 }, /* 96 DAI2 EQ2 */
+ { 0x97, 0x00 }, /* 97 DAI2 EQ2 */
+ { 0x98, 0x00 }, /* 98 DAI2 EQ3 */
+ { 0x99, 0x00 }, /* 99 DAI2 EQ3 */
+ { 0x9a, 0x00 }, /* 9A DAI2 EQ3 */
+ { 0x9b, 0x00 }, /* 9B DAI2 EQ3 */
+ { 0x9c, 0x00 }, /* 9C DAI2 EQ3 */
+ { 0x9d, 0x00 }, /* 9D DAI2 EQ3 */
+ { 0x9e, 0x00 }, /* 9E DAI2 EQ3 */
+ { 0x9f, 0x00 }, /* 9F DAI2 EQ3 */
+
+ { 0xa0, 0x00 }, /* A0 DAI2 EQ3 */
+ { 0xa1, 0x00 }, /* A1 DAI2 EQ3 */
+ { 0xa2, 0x00 }, /* A2 DAI2 EQ4 */
+ { 0xa3, 0x00 }, /* A3 DAI2 EQ4 */
+ { 0xa4, 0x00 }, /* A4 DAI2 EQ4 */
+ { 0xa5, 0x00 }, /* A5 DAI2 EQ4 */
+ { 0xa6, 0x00 }, /* A6 DAI2 EQ4 */
+ { 0xa7, 0x00 }, /* A7 DAI2 EQ4 */
+ { 0xa8, 0x00 }, /* A8 DAI2 EQ4 */
+ { 0xa9, 0x00 }, /* A9 DAI2 EQ4 */
+ { 0xaa, 0x00 }, /* AA DAI2 EQ4 */
+ { 0xab, 0x00 }, /* AB DAI2 EQ4 */
+ { 0xac, 0x00 }, /* AC DAI2 EQ5 */
+ { 0xad, 0x00 }, /* AD DAI2 EQ5 */
+ { 0xae, 0x00 }, /* AE DAI2 EQ5 */
+ { 0xaf, 0x00 }, /* AF DAI2 EQ5 */
+
+ { 0xb0, 0x00 }, /* B0 DAI2 EQ5 */
+ { 0xb1, 0x00 }, /* B1 DAI2 EQ5 */
+ { 0xb2, 0x00 }, /* B2 DAI2 EQ5 */
+ { 0xb3, 0x00 }, /* B3 DAI2 EQ5 */
+ { 0xb4, 0x00 }, /* B4 DAI2 EQ5 */
+ { 0xb5, 0x00 }, /* B5 DAI2 EQ5 */
+ { 0xb6, 0x00 }, /* B6 DAI1 biquad */
+ { 0xb7, 0x00 }, /* B7 DAI1 biquad */
+ { 0xb8 ,0x00 }, /* B8 DAI1 biquad */
+ { 0xb9, 0x00 }, /* B9 DAI1 biquad */
+ { 0xba, 0x00 }, /* BA DAI1 biquad */
+ { 0xbb, 0x00 }, /* BB DAI1 biquad */
+ { 0xbc, 0x00 }, /* BC DAI1 biquad */
+ { 0xbd, 0x00 }, /* BD DAI1 biquad */
+ { 0xbe, 0x00 }, /* BE DAI1 biquad */
+ { 0xbf, 0x00 }, /* BF DAI1 biquad */
+
+ { 0xc0, 0x00 }, /* C0 DAI2 biquad */
+ { 0xc1, 0x00 }, /* C1 DAI2 biquad */
+ { 0xc2, 0x00 }, /* C2 DAI2 biquad */
+ { 0xc3, 0x00 }, /* C3 DAI2 biquad */
+ { 0xc4, 0x00 }, /* C4 DAI2 biquad */
+ { 0xc5, 0x00 }, /* C5 DAI2 biquad */
+ { 0xc6, 0x00 }, /* C6 DAI2 biquad */
+ { 0xc7, 0x00 }, /* C7 DAI2 biquad */
+ { 0xc8, 0x00 }, /* C8 DAI2 biquad */
+ { 0xc9, 0x00 }, /* C9 DAI2 biquad */
};
static struct {
@@ -606,11 +536,28 @@ static struct {
{ 0xFF, 0x00, 1 }, /* FF */
};
-static int max98088_volatile_register(struct snd_soc_codec *codec, unsigned int reg)
+static bool max98088_readable_register(struct device *dev, unsigned int reg)
+{
+ return max98088_access[reg].readable;
+}
+
+static bool max98088_volatile_register(struct device *dev, unsigned int reg)
{
return max98088_access[reg].vol;
}
+static const struct regmap_config max98088_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .readable_reg = max98088_readable_register,
+ .volatile_reg = max98088_volatile_register,
+ .max_register = 0xff,
+
+ .reg_defaults = max98088_reg,
+ .num_reg_defaults = ARRAY_SIZE(max98088_reg),
+ .cache_type = REGCACHE_RBTREE,
+};
/*
* Load equalizer DSP coefficient configurations registers
@@ -1610,58 +1557,34 @@ static int max98088_dai2_digital_mute(struct snd_soc_dai *codec_dai, int mute)
return 0;
}
-static void max98088_sync_cache(struct snd_soc_codec *codec)
-{
- u8 *reg_cache = codec->reg_cache;
- int i;
-
- if (!codec->cache_sync)
- return;
-
- codec->cache_only = 0;
-
- /* write back cached values if they're writeable and
- * different from the hardware default.
- */
- for (i = 1; i < codec->driver->reg_cache_size; i++) {
- if (!max98088_access[i].writable)
- continue;
-
- if (reg_cache[i] == max98088_reg[i])
- continue;
-
- snd_soc_write(codec, i, reg_cache[i]);
- }
-
- codec->cache_sync = 0;
-}
-
static int max98088_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
- switch (level) {
- case SND_SOC_BIAS_ON:
- break;
-
- case SND_SOC_BIAS_PREPARE:
- break;
-
- case SND_SOC_BIAS_STANDBY:
- if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
- max98088_sync_cache(codec);
-
- snd_soc_update_bits(codec, M98088_REG_4C_PWR_EN_IN,
- M98088_MBEN, M98088_MBEN);
- break;
-
- case SND_SOC_BIAS_OFF:
- snd_soc_update_bits(codec, M98088_REG_4C_PWR_EN_IN,
- M98088_MBEN, 0);
- codec->cache_sync = 1;
- break;
- }
- codec->dapm.bias_level = level;
- return 0;
+ struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+
+ case SND_SOC_BIAS_PREPARE:
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
+ regcache_sync(max98088->regmap);
+
+ snd_soc_update_bits(codec, M98088_REG_4C_PWR_EN_IN,
+ M98088_MBEN, M98088_MBEN);
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ snd_soc_update_bits(codec, M98088_REG_4C_PWR_EN_IN,
+ M98088_MBEN, 0);
+ regcache_mark_dirty(max98088->regmap);
+ break;
+ }
+ codec->dapm.bias_level = level;
+ return 0;
}
#define MAX98088_RATES SNDRV_PCM_RATE_8000_96000
@@ -1988,9 +1911,9 @@ static int max98088_probe(struct snd_soc_codec *codec)
struct max98088_cdata *cdata;
int ret = 0;
- codec->cache_sync = 1;
+ regcache_mark_dirty(max98088->regmap);
- ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_I2C);
+ ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_REGMAP);
if (ret != 0) {
dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
return ret;
@@ -2048,9 +1971,6 @@ static int max98088_probe(struct snd_soc_codec *codec)
max98088_handle_pdata(codec);
- snd_soc_add_codec_controls(codec, max98088_snd_controls,
- ARRAY_SIZE(max98088_snd_controls));
-
err_access:
return ret;
}
@@ -2066,15 +1986,13 @@ static int max98088_remove(struct snd_soc_codec *codec)
}
static struct snd_soc_codec_driver soc_codec_dev_max98088 = {
- .probe = max98088_probe,
- .remove = max98088_remove,
- .suspend = max98088_suspend,
- .resume = max98088_resume,
- .set_bias_level = max98088_set_bias_level,
- .reg_cache_size = ARRAY_SIZE(max98088_reg),
- .reg_word_size = sizeof(u8),
- .reg_cache_default = max98088_reg,
- .volatile_register = max98088_volatile_register,
+ .probe = max98088_probe,
+ .remove = max98088_remove,
+ .suspend = max98088_suspend,
+ .resume = max98088_resume,
+ .set_bias_level = max98088_set_bias_level,
+ .controls = max98088_snd_controls,
+ .num_controls = ARRAY_SIZE(max98088_snd_controls),
.dapm_widgets = max98088_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(max98088_dapm_widgets),
.dapm_routes = max98088_audio_map,
@@ -2082,7 +2000,7 @@ static struct snd_soc_codec_driver soc_codec_dev_max98088 = {
};
static int max98088_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
struct max98088_priv *max98088;
int ret;
@@ -2092,6 +2010,10 @@ static int max98088_i2c_probe(struct i2c_client *i2c,
if (max98088 == NULL)
return -ENOMEM;
+ max98088->regmap = devm_regmap_init_i2c(i2c, &max98088_regmap);
+ if (IS_ERR(max98088->regmap))
+ return PTR_ERR(max98088->regmap);
+
max98088->devtype = id->driver_data;
i2c_set_clientdata(i2c, max98088);
diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c
index 8dbcacd44e6a..8fb072455802 100644
--- a/sound/soc/codecs/max98095.c
+++ b/sound/soc/codecs/max98095.c
@@ -39,6 +39,7 @@ struct max98095_cdata {
};
struct max98095_priv {
+ struct regmap *regmap;
enum max98095_type devtype;
struct max98095_pdata *pdata;
unsigned int sysclk;
@@ -56,263 +57,145 @@ struct max98095_priv {
struct snd_soc_jack *mic_jack;
};
-static const u8 max98095_reg_def[M98095_REG_CNT] = {
- 0x00, /* 00 */
- 0x00, /* 01 */
- 0x00, /* 02 */
- 0x00, /* 03 */
- 0x00, /* 04 */
- 0x00, /* 05 */
- 0x00, /* 06 */
- 0x00, /* 07 */
- 0x00, /* 08 */
- 0x00, /* 09 */
- 0x00, /* 0A */
- 0x00, /* 0B */
- 0x00, /* 0C */
- 0x00, /* 0D */
- 0x00, /* 0E */
- 0x00, /* 0F */
- 0x00, /* 10 */
- 0x00, /* 11 */
- 0x00, /* 12 */
- 0x00, /* 13 */
- 0x00, /* 14 */
- 0x00, /* 15 */
- 0x00, /* 16 */
- 0x00, /* 17 */
- 0x00, /* 18 */
- 0x00, /* 19 */
- 0x00, /* 1A */
- 0x00, /* 1B */
- 0x00, /* 1C */
- 0x00, /* 1D */
- 0x00, /* 1E */
- 0x00, /* 1F */
- 0x00, /* 20 */
- 0x00, /* 21 */
- 0x00, /* 22 */
- 0x00, /* 23 */
- 0x00, /* 24 */
- 0x00, /* 25 */
- 0x00, /* 26 */
- 0x00, /* 27 */
- 0x00, /* 28 */
- 0x00, /* 29 */
- 0x00, /* 2A */
- 0x00, /* 2B */
- 0x00, /* 2C */
- 0x00, /* 2D */
- 0x00, /* 2E */
- 0x00, /* 2F */
- 0x00, /* 30 */
- 0x00, /* 31 */
- 0x00, /* 32 */
- 0x00, /* 33 */
- 0x00, /* 34 */
- 0x00, /* 35 */
- 0x00, /* 36 */
- 0x00, /* 37 */
- 0x00, /* 38 */
- 0x00, /* 39 */
- 0x00, /* 3A */
- 0x00, /* 3B */
- 0x00, /* 3C */
- 0x00, /* 3D */
- 0x00, /* 3E */
- 0x00, /* 3F */
- 0x00, /* 40 */
- 0x00, /* 41 */
- 0x00, /* 42 */
- 0x00, /* 43 */
- 0x00, /* 44 */
- 0x00, /* 45 */
- 0x00, /* 46 */
- 0x00, /* 47 */
- 0x00, /* 48 */
- 0x00, /* 49 */
- 0x00, /* 4A */
- 0x00, /* 4B */
- 0x00, /* 4C */
- 0x00, /* 4D */
- 0x00, /* 4E */
- 0x00, /* 4F */
- 0x00, /* 50 */
- 0x00, /* 51 */
- 0x00, /* 52 */
- 0x00, /* 53 */
- 0x00, /* 54 */
- 0x00, /* 55 */
- 0x00, /* 56 */
- 0x00, /* 57 */
- 0x00, /* 58 */
- 0x00, /* 59 */
- 0x00, /* 5A */
- 0x00, /* 5B */
- 0x00, /* 5C */
- 0x00, /* 5D */
- 0x00, /* 5E */
- 0x00, /* 5F */
- 0x00, /* 60 */
- 0x00, /* 61 */
- 0x00, /* 62 */
- 0x00, /* 63 */
- 0x00, /* 64 */
- 0x00, /* 65 */
- 0x00, /* 66 */
- 0x00, /* 67 */
- 0x00, /* 68 */
- 0x00, /* 69 */
- 0x00, /* 6A */
- 0x00, /* 6B */
- 0x00, /* 6C */
- 0x00, /* 6D */
- 0x00, /* 6E */
- 0x00, /* 6F */
- 0x00, /* 70 */
- 0x00, /* 71 */
- 0x00, /* 72 */
- 0x00, /* 73 */
- 0x00, /* 74 */
- 0x00, /* 75 */
- 0x00, /* 76 */
- 0x00, /* 77 */
- 0x00, /* 78 */
- 0x00, /* 79 */
- 0x00, /* 7A */
- 0x00, /* 7B */
- 0x00, /* 7C */
- 0x00, /* 7D */
- 0x00, /* 7E */
- 0x00, /* 7F */
- 0x00, /* 80 */
- 0x00, /* 81 */
- 0x00, /* 82 */
- 0x00, /* 83 */
- 0x00, /* 84 */
- 0x00, /* 85 */
- 0x00, /* 86 */
- 0x00, /* 87 */
- 0x00, /* 88 */
- 0x00, /* 89 */
- 0x00, /* 8A */
- 0x00, /* 8B */
- 0x00, /* 8C */
- 0x00, /* 8D */
- 0x00, /* 8E */
- 0x00, /* 8F */
- 0x00, /* 90 */
- 0x00, /* 91 */
- 0x30, /* 92 */
- 0xF0, /* 93 */
- 0x00, /* 94 */
- 0x00, /* 95 */
- 0x3F, /* 96 */
- 0x00, /* 97 */
- 0x00, /* 98 */
- 0x00, /* 99 */
- 0x00, /* 9A */
- 0x00, /* 9B */
- 0x00, /* 9C */
- 0x00, /* 9D */
- 0x00, /* 9E */
- 0x00, /* 9F */
- 0x00, /* A0 */
- 0x00, /* A1 */
- 0x00, /* A2 */
- 0x00, /* A3 */
- 0x00, /* A4 */
- 0x00, /* A5 */
- 0x00, /* A6 */
- 0x00, /* A7 */
- 0x00, /* A8 */
- 0x00, /* A9 */
- 0x00, /* AA */
- 0x00, /* AB */
- 0x00, /* AC */
- 0x00, /* AD */
- 0x00, /* AE */
- 0x00, /* AF */
- 0x00, /* B0 */
- 0x00, /* B1 */
- 0x00, /* B2 */
- 0x00, /* B3 */
- 0x00, /* B4 */
- 0x00, /* B5 */
- 0x00, /* B6 */
- 0x00, /* B7 */
- 0x00, /* B8 */
- 0x00, /* B9 */
- 0x00, /* BA */
- 0x00, /* BB */
- 0x00, /* BC */
- 0x00, /* BD */
- 0x00, /* BE */
- 0x00, /* BF */
- 0x00, /* C0 */
- 0x00, /* C1 */
- 0x00, /* C2 */
- 0x00, /* C3 */
- 0x00, /* C4 */
- 0x00, /* C5 */
- 0x00, /* C6 */
- 0x00, /* C7 */
- 0x00, /* C8 */
- 0x00, /* C9 */
- 0x00, /* CA */
- 0x00, /* CB */
- 0x00, /* CC */
- 0x00, /* CD */
- 0x00, /* CE */
- 0x00, /* CF */
- 0x00, /* D0 */
- 0x00, /* D1 */
- 0x00, /* D2 */
- 0x00, /* D3 */
- 0x00, /* D4 */
- 0x00, /* D5 */
- 0x00, /* D6 */
- 0x00, /* D7 */
- 0x00, /* D8 */
- 0x00, /* D9 */
- 0x00, /* DA */
- 0x00, /* DB */
- 0x00, /* DC */
- 0x00, /* DD */
- 0x00, /* DE */
- 0x00, /* DF */
- 0x00, /* E0 */
- 0x00, /* E1 */
- 0x00, /* E2 */
- 0x00, /* E3 */
- 0x00, /* E4 */
- 0x00, /* E5 */
- 0x00, /* E6 */
- 0x00, /* E7 */
- 0x00, /* E8 */
- 0x00, /* E9 */
- 0x00, /* EA */
- 0x00, /* EB */
- 0x00, /* EC */
- 0x00, /* ED */
- 0x00, /* EE */
- 0x00, /* EF */
- 0x00, /* F0 */
- 0x00, /* F1 */
- 0x00, /* F2 */
- 0x00, /* F3 */
- 0x00, /* F4 */
- 0x00, /* F5 */
- 0x00, /* F6 */
- 0x00, /* F7 */
- 0x00, /* F8 */
- 0x00, /* F9 */
- 0x00, /* FA */
- 0x00, /* FB */
- 0x00, /* FC */
- 0x00, /* FD */
- 0x00, /* FE */
- 0x00, /* FF */
+static const struct reg_default max98095_reg_def[] = {
+ { 0xf, 0x00 }, /* 0F */
+ { 0x10, 0x00 }, /* 10 */
+ { 0x11, 0x00 }, /* 11 */
+ { 0x12, 0x00 }, /* 12 */
+ { 0x13, 0x00 }, /* 13 */
+ { 0x14, 0x00 }, /* 14 */
+ { 0x15, 0x00 }, /* 15 */
+ { 0x16, 0x00 }, /* 16 */
+ { 0x17, 0x00 }, /* 17 */
+ { 0x18, 0x00 }, /* 18 */
+ { 0x19, 0x00 }, /* 19 */
+ { 0x1a, 0x00 }, /* 1A */
+ { 0x1b, 0x00 }, /* 1B */
+ { 0x1c, 0x00 }, /* 1C */
+ { 0x1d, 0x00 }, /* 1D */
+ { 0x1e, 0x00 }, /* 1E */
+ { 0x1f, 0x00 }, /* 1F */
+ { 0x20, 0x00 }, /* 20 */
+ { 0x21, 0x00 }, /* 21 */
+ { 0x22, 0x00 }, /* 22 */
+ { 0x23, 0x00 }, /* 23 */
+ { 0x24, 0x00 }, /* 24 */
+ { 0x25, 0x00 }, /* 25 */
+ { 0x26, 0x00 }, /* 26 */
+ { 0x27, 0x00 }, /* 27 */
+ { 0x28, 0x00 }, /* 28 */
+ { 0x29, 0x00 }, /* 29 */
+ { 0x2a, 0x00 }, /* 2A */
+ { 0x2b, 0x00 }, /* 2B */
+ { 0x2c, 0x00 }, /* 2C */
+ { 0x2d, 0x00 }, /* 2D */
+ { 0x2e, 0x00 }, /* 2E */
+ { 0x2f, 0x00 }, /* 2F */
+ { 0x30, 0x00 }, /* 30 */
+ { 0x31, 0x00 }, /* 31 */
+ { 0x32, 0x00 }, /* 32 */
+ { 0x33, 0x00 }, /* 33 */
+ { 0x34, 0x00 }, /* 34 */
+ { 0x35, 0x00 }, /* 35 */
+ { 0x36, 0x00 }, /* 36 */
+ { 0x37, 0x00 }, /* 37 */
+ { 0x38, 0x00 }, /* 38 */
+ { 0x39, 0x00 }, /* 39 */
+ { 0x3a, 0x00 }, /* 3A */
+ { 0x3b, 0x00 }, /* 3B */
+ { 0x3c, 0x00 }, /* 3C */
+ { 0x3d, 0x00 }, /* 3D */
+ { 0x3e, 0x00 }, /* 3E */
+ { 0x3f, 0x00 }, /* 3F */
+ { 0x40, 0x00 }, /* 40 */
+ { 0x41, 0x00 }, /* 41 */
+ { 0x42, 0x00 }, /* 42 */
+ { 0x43, 0x00 }, /* 43 */
+ { 0x44, 0x00 }, /* 44 */
+ { 0x45, 0x00 }, /* 45 */
+ { 0x46, 0x00 }, /* 46 */
+ { 0x47, 0x00 }, /* 47 */
+ { 0x48, 0x00 }, /* 48 */
+ { 0x49, 0x00 }, /* 49 */
+ { 0x4a, 0x00 }, /* 4A */
+ { 0x4b, 0x00 }, /* 4B */
+ { 0x4c, 0x00 }, /* 4C */
+ { 0x4d, 0x00 }, /* 4D */
+ { 0x4e, 0x00 }, /* 4E */
+ { 0x4f, 0x00 }, /* 4F */
+ { 0x50, 0x00 }, /* 50 */
+ { 0x51, 0x00 }, /* 51 */
+ { 0x52, 0x00 }, /* 52 */
+ { 0x53, 0x00 }, /* 53 */
+ { 0x54, 0x00 }, /* 54 */
+ { 0x55, 0x00 }, /* 55 */
+ { 0x56, 0x00 }, /* 56 */
+ { 0x57, 0x00 }, /* 57 */
+ { 0x58, 0x00 }, /* 58 */
+ { 0x59, 0x00 }, /* 59 */
+ { 0x5a, 0x00 }, /* 5A */
+ { 0x5b, 0x00 }, /* 5B */
+ { 0x5c, 0x00 }, /* 5C */
+ { 0x5d, 0x00 }, /* 5D */
+ { 0x5e, 0x00 }, /* 5E */
+ { 0x5f, 0x00 }, /* 5F */
+ { 0x60, 0x00 }, /* 60 */
+ { 0x61, 0x00 }, /* 61 */
+ { 0x62, 0x00 }, /* 62 */
+ { 0x63, 0x00 }, /* 63 */
+ { 0x64, 0x00 }, /* 64 */
+ { 0x65, 0x00 }, /* 65 */
+ { 0x66, 0x00 }, /* 66 */
+ { 0x67, 0x00 }, /* 67 */
+ { 0x68, 0x00 }, /* 68 */
+ { 0x69, 0x00 }, /* 69 */
+ { 0x6a, 0x00 }, /* 6A */
+ { 0x6b, 0x00 }, /* 6B */
+ { 0x6c, 0x00 }, /* 6C */
+ { 0x6d, 0x00 }, /* 6D */
+ { 0x6e, 0x00 }, /* 6E */
+ { 0x6f, 0x00 }, /* 6F */
+ { 0x70, 0x00 }, /* 70 */
+ { 0x71, 0x00 }, /* 71 */
+ { 0x72, 0x00 }, /* 72 */
+ { 0x73, 0x00 }, /* 73 */
+ { 0x74, 0x00 }, /* 74 */
+ { 0x75, 0x00 }, /* 75 */
+ { 0x76, 0x00 }, /* 76 */
+ { 0x77, 0x00 }, /* 77 */
+ { 0x78, 0x00 }, /* 78 */
+ { 0x79, 0x00 }, /* 79 */
+ { 0x7a, 0x00 }, /* 7A */
+ { 0x7b, 0x00 }, /* 7B */
+ { 0x7c, 0x00 }, /* 7C */
+ { 0x7d, 0x00 }, /* 7D */
+ { 0x7e, 0x00 }, /* 7E */
+ { 0x7f, 0x00 }, /* 7F */
+ { 0x80, 0x00 }, /* 80 */
+ { 0x81, 0x00 }, /* 81 */
+ { 0x82, 0x00 }, /* 82 */
+ { 0x83, 0x00 }, /* 83 */
+ { 0x84, 0x00 }, /* 84 */
+ { 0x85, 0x00 }, /* 85 */
+ { 0x86, 0x00 }, /* 86 */
+ { 0x87, 0x00 }, /* 87 */
+ { 0x88, 0x00 }, /* 88 */
+ { 0x89, 0x00 }, /* 89 */
+ { 0x8a, 0x00 }, /* 8A */
+ { 0x8b, 0x00 }, /* 8B */
+ { 0x8c, 0x00 }, /* 8C */
+ { 0x8d, 0x00 }, /* 8D */
+ { 0x8e, 0x00 }, /* 8E */
+ { 0x8f, 0x00 }, /* 8F */
+ { 0x90, 0x00 }, /* 90 */
+ { 0x91, 0x00 }, /* 91 */
+ { 0x92, 0x30 }, /* 92 */
+ { 0x93, 0xF0 }, /* 93 */
+ { 0x94, 0x00 }, /* 94 */
+ { 0x95, 0x00 }, /* 95 */
+ { 0x96, 0x3F }, /* 96 */
+ { 0x97, 0x00 }, /* 97 */
+ { 0xff, 0x00 }, /* FF */
};
static struct {
@@ -577,14 +460,14 @@ static struct {
{ 0xFF, 0x00 }, /* FF */
};
-static int max98095_readable(struct snd_soc_codec *codec, unsigned int reg)
+static bool max98095_readable(struct device *dev, unsigned int reg)
{
if (reg >= M98095_REG_CNT)
return 0;
return max98095_access[reg].readable != 0;
}
-static int max98095_volatile(struct snd_soc_codec *codec, unsigned int reg)
+static bool max98095_volatile(struct device *dev, unsigned int reg)
{
if (reg > M98095_REG_MAX_CACHED)
return 1;
@@ -611,22 +494,18 @@ static int max98095_volatile(struct snd_soc_codec *codec, unsigned int reg)
return 0;
}
-/*
- * Filter coefficients are in a separate register segment
- * and they share the address space of the normal registers.
- * The coefficient registers do not need or share the cache.
- */
-static int max98095_hw_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value)
-{
- int ret;
+static const struct regmap_config max98095_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
- codec->cache_bypass = 1;
- ret = snd_soc_write(codec, reg, value);
- codec->cache_bypass = 0;
+ .reg_defaults = max98095_reg_def,
+ .num_reg_defaults = ARRAY_SIZE(max98095_reg_def),
+ .max_register = M98095_0FF_REV_ID,
+ .cache_type = REGCACHE_RBTREE,
- return ret ? -EIO : 0;
-}
+ .readable_reg = max98095_readable,
+ .volatile_reg = max98095_volatile,
+};
/*
* Load equalizer DSP coefficient configurations registers
@@ -648,8 +527,8 @@ static void m98095_eq_band(struct snd_soc_codec *codec, unsigned int dai,
/* Step through the registers and coefs */
for (i = 0; i < M98095_COEFS_PER_BAND; i++) {
- max98095_hw_write(codec, eq_reg++, M98095_BYTE1(coefs[i]));
- max98095_hw_write(codec, eq_reg++, M98095_BYTE0(coefs[i]));
+ snd_soc_write(codec, eq_reg++, M98095_BYTE1(coefs[i]));
+ snd_soc_write(codec, eq_reg++, M98095_BYTE0(coefs[i]));
}
}
@@ -673,8 +552,8 @@ static void m98095_biquad_band(struct snd_soc_codec *codec, unsigned int dai,
/* Step through the registers and coefs */
for (i = 0; i < M98095_COEFS_PER_BAND; i++) {
- max98095_hw_write(codec, bq_reg++, M98095_BYTE1(coefs[i]));
- max98095_hw_write(codec, bq_reg++, M98095_BYTE0(coefs[i]));
+ snd_soc_write(codec, bq_reg++, M98095_BYTE1(coefs[i]));
+ snd_soc_write(codec, bq_reg++, M98095_BYTE0(coefs[i]));
}
}
@@ -1285,14 +1164,6 @@ static const struct snd_soc_dapm_route max98095_audio_map[] = {
{"MIC2 Input", NULL, "MIC2"},
};
-static int max98095_add_widgets(struct snd_soc_codec *codec)
-{
- snd_soc_add_codec_controls(codec, max98095_snd_controls,
- ARRAY_SIZE(max98095_snd_controls));
-
- return 0;
-}
-
/* codec mclk clock divider coefficients */
static const struct {
u32 rate;
@@ -1748,6 +1619,7 @@ static int max98095_dai3_set_fmt(struct snd_soc_dai *codec_dai,
static int max98095_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
+ struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
int ret;
switch (level) {
@@ -1759,7 +1631,7 @@ static int max98095_set_bias_level(struct snd_soc_codec *codec,
case SND_SOC_BIAS_STANDBY:
if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
- ret = snd_soc_cache_sync(codec);
+ ret = regcache_sync(max98095->regmap);
if (ret != 0) {
dev_err(codec->dev, "Failed to sync cache: %d\n", ret);
@@ -1774,7 +1646,7 @@ static int max98095_set_bias_level(struct snd_soc_codec *codec,
case SND_SOC_BIAS_OFF:
snd_soc_update_bits(codec, M98095_090_PWR_EN_IN,
M98095_MBEN, 0);
- codec->cache_sync = 1;
+ regcache_mark_dirty(max98095->regmap);
break;
}
codec->dapm.bias_level = level;
@@ -2341,7 +2213,7 @@ static int max98095_reset(struct snd_soc_codec *codec)
/* Reset to hardware default for registers, as there is not
* a soft reset hardware control register */
for (i = M98095_010_HOST_INT_CFG; i < M98095_REG_MAX_CACHED; i++) {
- ret = snd_soc_write(codec, i, max98095_reg_def[i]);
+ ret = snd_soc_write(codec, i, snd_soc_read(codec, i));
if (ret < 0) {
dev_err(codec->dev, "Failed to reset: %d\n", ret);
return ret;
@@ -2358,7 +2230,7 @@ static int max98095_probe(struct snd_soc_codec *codec)
struct i2c_client *client;
int ret = 0;
- ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_I2C);
+ ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_REGMAP);
if (ret != 0) {
dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
return ret;
@@ -2447,8 +2319,6 @@ static int max98095_probe(struct snd_soc_codec *codec)
snd_soc_update_bits(codec, M98095_097_PWR_SYS, M98095_SHDNRUN,
M98095_SHDNRUN);
- max98095_add_widgets(codec);
-
return 0;
err_irq:
@@ -2480,11 +2350,8 @@ static struct snd_soc_codec_driver soc_codec_dev_max98095 = {
.suspend = max98095_suspend,
.resume = max98095_resume,
.set_bias_level = max98095_set_bias_level,
- .reg_cache_size = ARRAY_SIZE(max98095_reg_def),
- .reg_word_size = sizeof(u8),
- .reg_cache_default = max98095_reg_def,
- .readable_register = max98095_readable,
- .volatile_register = max98095_volatile,
+ .controls = max98095_snd_controls,
+ .num_controls = ARRAY_SIZE(max98095_snd_controls),
.dapm_widgets = max98095_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(max98095_dapm_widgets),
.dapm_routes = max98095_audio_map,
@@ -2502,6 +2369,13 @@ static int max98095_i2c_probe(struct i2c_client *i2c,
if (max98095 == NULL)
return -ENOMEM;
+ max98095->regmap = devm_regmap_init_i2c(i2c, &max98095_regmap);
+ if (IS_ERR(max98095->regmap)) {
+ ret = PTR_ERR(max98095->regmap);
+ dev_err(&i2c->dev, "Failed to allocate regmap: %d\n", ret);
+ return ret;
+ }
+
max98095->devtype = id->driver_data;
i2c_set_clientdata(i2c, max98095);
max98095->pdata = i2c->dev.platform_data;
diff --git a/sound/soc/codecs/max9850.c b/sound/soc/codecs/max9850.c
index 58c38a5b481c..c5dd61785f8d 100644
--- a/sound/soc/codecs/max9850.c
+++ b/sound/soc/codecs/max9850.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -27,18 +28,26 @@
#include "max9850.h"
struct max9850_priv {
+ struct regmap *regmap;
unsigned int sysclk;
};
/* max9850 register cache */
-static const u8 max9850_reg[MAX9850_CACHEREGNUM] = {
- 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+static const struct reg_default max9850_reg[] = {
+ { 2, 0x0c },
+ { 3, 0x00 },
+ { 4, 0x00 },
+ { 5, 0x00 },
+ { 6, 0x00 },
+ { 7, 0x00 },
+ { 8, 0x00 },
+ { 9, 0x00 },
+ { 10, 0x00 },
};
/* these registers are not used at the moment but provided for the sake of
* completeness */
-static int max9850_volatile_register(struct snd_soc_codec *codec,
- unsigned int reg)
+static bool max9850_volatile_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case MAX9850_STATUSA:
@@ -49,6 +58,15 @@ static int max9850_volatile_register(struct snd_soc_codec *codec,
}
}
+static const struct regmap_config max9850_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = MAX9850_DIGITAL_AUDIO,
+ .volatile_reg = max9850_volatile_register,
+ .cache_type = REGCACHE_RBTREE,
+};
+
static const unsigned int max9850_tlv[] = {
TLV_DB_RANGE_HEAD(4),
0x18, 0x1f, TLV_DB_SCALE_ITEM(-7450, 400, 0),
@@ -225,6 +243,7 @@ static int max9850_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
static int max9850_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
+ struct max9850_priv *max9850 = snd_soc_codec_get_drvdata(codec);
int ret;
switch (level) {
@@ -234,7 +253,7 @@ static int max9850_set_bias_level(struct snd_soc_codec *codec,
break;
case SND_SOC_BIAS_STANDBY:
if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
- ret = snd_soc_cache_sync(codec);
+ ret = regcache_sync(max9850->regmap);
if (ret) {
dev_err(codec->dev,
"Failed to sync cache: %d\n", ret);
@@ -295,7 +314,7 @@ static int max9850_probe(struct snd_soc_codec *codec)
{
int ret;
- ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_I2C);
+ ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_REGMAP);
if (ret < 0) {
dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
return ret;
@@ -316,10 +335,6 @@ static struct snd_soc_codec_driver soc_codec_dev_max9850 = {
.suspend = max9850_suspend,
.resume = max9850_resume,
.set_bias_level = max9850_set_bias_level,
- .reg_cache_size = ARRAY_SIZE(max9850_reg),
- .reg_word_size = sizeof(u8),
- .reg_cache_default = max9850_reg,
- .volatile_register = max9850_volatile_register,
.controls = max9850_controls,
.num_controls = ARRAY_SIZE(max9850_controls),
@@ -340,6 +355,10 @@ static int max9850_i2c_probe(struct i2c_client *i2c,
if (max9850 == NULL)
return -ENOMEM;
+ max9850->regmap = devm_regmap_init_i2c(i2c, &max9850_regmap);
+ if (IS_ERR(max9850->regmap))
+ return PTR_ERR(max9850->regmap);
+
i2c_set_clientdata(i2c, max9850);
ret = snd_soc_register_codec(&i2c->dev,
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index ea141e1d6f28..bae60164c7b7 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -30,16 +30,10 @@
#include <sound/soc.h>
#include <sound/initval.h>
#include <sound/soc-dapm.h>
+#include <linux/regmap.h>
#include "mc13783.h"
-#define MC13783_AUDIO_RX0 36
-#define MC13783_AUDIO_RX1 37
-#define MC13783_AUDIO_TX 38
-#define MC13783_SSI_NETWORK 39
-#define MC13783_AUDIO_CODEC 40
-#define MC13783_AUDIO_DAC 41
-
#define AUDIO_RX0_ALSPEN (1 << 5)
#define AUDIO_RX0_ALSPSEL (1 << 7)
#define AUDIO_RX0_ADDCDC (1 << 21)
@@ -95,45 +89,12 @@
struct mc13783_priv {
struct mc13xxx *mc13xxx;
+ struct regmap *regmap;
enum mc13783_ssi_port adc_ssi_port;
enum mc13783_ssi_port dac_ssi_port;
};
-static unsigned int mc13783_read(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- struct mc13783_priv *priv = snd_soc_codec_get_drvdata(codec);
- unsigned int value = 0;
-
- mc13xxx_lock(priv->mc13xxx);
-
- mc13xxx_reg_read(priv->mc13xxx, reg, &value);
-
- mc13xxx_unlock(priv->mc13xxx);
-
- return value;
-}
-
-static int mc13783_write(struct snd_soc_codec *codec,
- unsigned int reg, unsigned int value)
-{
- struct mc13783_priv *priv = snd_soc_codec_get_drvdata(codec);
- int ret;
-
- mc13xxx_lock(priv->mc13xxx);
-
- ret = mc13xxx_reg_write(priv->mc13xxx, reg, value);
-
- /* include errata fix for spi audio problems */
- if (reg == MC13783_AUDIO_CODEC || reg == MC13783_AUDIO_DAC)
- ret = mc13xxx_reg_write(priv->mc13xxx, reg, value);
-
- mc13xxx_unlock(priv->mc13xxx);
-
- return ret;
-}
-
/* Mapping between sample rates and register value */
static unsigned int mc13783_rates[] = {
8000, 11025, 12000, 16000,
@@ -382,7 +343,7 @@ static int mc13783_set_tdm_slot_dac(struct snd_soc_dai *dai,
break;
default:
return -EINVAL;
- };
+ }
snd_soc_update_bits(codec, MC13783_SSI_NETWORK, mask, val);
@@ -466,6 +427,29 @@ static const struct snd_kcontrol_new right_input_mux =
static const struct snd_kcontrol_new samp_ctl =
SOC_DAPM_SINGLE("Switch", MC13783_AUDIO_RX0, 3, 1, 0);
+static const char * const speaker_amp_source_text[] = {
+ "CODEC", "Right"
+};
+static const SOC_ENUM_SINGLE_DECL(speaker_amp_source, MC13783_AUDIO_RX0, 4,
+ speaker_amp_source_text);
+static const struct snd_kcontrol_new speaker_amp_source_mux =
+ SOC_DAPM_ENUM("Speaker Amp Source MUX", speaker_amp_source);
+
+static const char * const headset_amp_source_text[] = {
+ "CODEC", "Mixer"
+};
+
+static const SOC_ENUM_SINGLE_DECL(headset_amp_source, MC13783_AUDIO_RX0, 11,
+ headset_amp_source_text);
+static const struct snd_kcontrol_new headset_amp_source_mux =
+ SOC_DAPM_ENUM("Headset Amp Source MUX", headset_amp_source);
+
+static const struct snd_kcontrol_new cdcout_ctl =
+ SOC_DAPM_SINGLE("Switch", MC13783_AUDIO_RX0, 18, 1, 0);
+
+static const struct snd_kcontrol_new adc_bypass_ctl =
+ SOC_DAPM_SINGLE("Switch", MC13783_AUDIO_CODEC, 16, 1, 0);
+
static const struct snd_kcontrol_new lamp_ctl =
SOC_DAPM_SINGLE("Switch", MC13783_AUDIO_RX0, 5, 1, 0);
@@ -503,12 +487,22 @@ static const struct snd_soc_dapm_widget mc13783_dapm_widgets[] = {
SND_SOC_DAPM_VIRT_MUX("PGA Right Input Mux", SND_SOC_NOPM, 0, 0,
&right_input_mux),
+ SND_SOC_DAPM_MUX("Speaker Amp Source MUX", SND_SOC_NOPM, 0, 0,
+ &speaker_amp_source_mux),
+
+ SND_SOC_DAPM_MUX("Headset Amp Source MUX", SND_SOC_NOPM, 0, 0,
+ &headset_amp_source_mux),
+
SND_SOC_DAPM_PGA("PGA Left Input", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("PGA Right Input", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_ADC("ADC", "Capture", MC13783_AUDIO_CODEC, 11, 0),
SND_SOC_DAPM_SUPPLY("ADC_Reset", MC13783_AUDIO_CODEC, 15, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Voice CODEC PGA", MC13783_AUDIO_RX1, 0, 0, NULL, 0),
+ SND_SOC_DAPM_SWITCH("Voice CODEC Bypass", MC13783_AUDIO_CODEC, 16, 0,
+ &adc_bypass_ctl),
+
/* Output */
SND_SOC_DAPM_SUPPLY("DAC_E", MC13783_AUDIO_DAC, 11, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("DAC_Reset", MC13783_AUDIO_DAC, 15, 0, NULL, 0),
@@ -516,10 +510,15 @@ static const struct snd_soc_dapm_widget mc13783_dapm_widgets[] = {
SND_SOC_DAPM_OUTPUT("RXOUTR"),
SND_SOC_DAPM_OUTPUT("HSL"),
SND_SOC_DAPM_OUTPUT("HSR"),
+ SND_SOC_DAPM_OUTPUT("LSPL"),
SND_SOC_DAPM_OUTPUT("LSP"),
SND_SOC_DAPM_OUTPUT("SP"),
+ SND_SOC_DAPM_OUTPUT("CDCOUT"),
- SND_SOC_DAPM_SWITCH("Speaker Amp", MC13783_AUDIO_RX0, 3, 0, &samp_ctl),
+ SND_SOC_DAPM_SWITCH("CDCOUT Switch", MC13783_AUDIO_RX0, 18, 0,
+ &cdcout_ctl),
+ SND_SOC_DAPM_SWITCH("Speaker Amp Switch", MC13783_AUDIO_RX0, 3, 0,
+ &samp_ctl),
SND_SOC_DAPM_SWITCH("Loudspeaker Amp", SND_SOC_NOPM, 0, 0, &lamp_ctl),
SND_SOC_DAPM_SWITCH("Headset Amp Left", MC13783_AUDIO_RX0, 10, 0,
&hlamp_ctl),
@@ -554,20 +553,28 @@ static struct snd_soc_dapm_route mc13783_routes[] = {
{ "ADC", NULL, "PGA Right Input"},
{ "ADC", NULL, "ADC_Reset"},
+ { "Voice CODEC PGA", "Voice CODEC Bypass", "ADC" },
+
+ { "Speaker Amp Source MUX", "CODEC", "Voice CODEC PGA"},
+ { "Speaker Amp Source MUX", "Right", "DAC PGA"},
+
+ { "Headset Amp Source MUX", "CODEC", "Voice CODEC PGA"},
+ { "Headset Amp Source MUX", "Mixer", "DAC PGA"},
+
/* Output */
{ "HSL", NULL, "Headset Amp Left" },
{ "HSR", NULL, "Headset Amp Right"},
{ "RXOUTL", NULL, "Line out Amp Left"},
{ "RXOUTR", NULL, "Line out Amp Right"},
- { "SP", NULL, "Speaker Amp"},
- { "Speaker Amp", NULL, "DAC PGA"},
- { "LSP", NULL, "DAC PGA"},
- { "Headset Amp Left", NULL, "DAC PGA"},
- { "Headset Amp Right", NULL, "DAC PGA"},
+ { "SP", "Speaker Amp Switch", "Speaker Amp Source MUX"},
+ { "LSP", "Loudspeaker Amp", "Speaker Amp Source MUX"},
+ { "HSL", "Headset Amp Left", "Headset Amp Source MUX"},
+ { "HSR", "Headset Amp Right", "Headset Amp Source MUX"},
{ "Line out Amp Left", NULL, "DAC PGA"},
{ "Line out Amp Right", NULL, "DAC PGA"},
{ "DAC PGA", NULL, "DAC"},
{ "DAC", NULL, "DAC_E"},
+ { "CDCOUT", "CDCOUT Switch", "Voice CODEC PGA"},
};
static const char * const mc13783_3d_mixer[] = {"Stereo", "Phase Mix",
@@ -580,15 +587,39 @@ static const struct soc_enum mc13783_enum_3d_mixer =
static struct snd_kcontrol_new mc13783_control_list[] = {
SOC_SINGLE("Loudspeaker enable", MC13783_AUDIO_RX0, 5, 1, 0),
SOC_SINGLE("PCM Playback Volume", MC13783_AUDIO_RX1, 6, 15, 0),
+ SOC_SINGLE("PCM Playback Switch", MC13783_AUDIO_RX1, 5, 1, 0),
SOC_DOUBLE("PCM Capture Volume", MC13783_AUDIO_TX, 19, 14, 31, 0),
SOC_ENUM("3D Control", mc13783_enum_3d_mixer),
+
+ SOC_SINGLE("CDCOUT Switch", MC13783_AUDIO_RX0, 18, 1, 0),
+ SOC_SINGLE("Earpiece Amp Switch", MC13783_AUDIO_RX0, 3, 1, 0),
+ SOC_DOUBLE("Headset Amp Switch", MC13783_AUDIO_RX0, 10, 9, 1, 0),
+ SOC_DOUBLE("Line out Amp Switch", MC13783_AUDIO_RX0, 16, 15, 1, 0),
+
+ SOC_SINGLE("PCM Capture Mixin Switch", MC13783_AUDIO_RX0, 22, 1, 0),
+ SOC_SINGLE("Line in Capture Mixin Switch", MC13783_AUDIO_RX0, 23, 1, 0),
+
+ SOC_SINGLE("CODEC Capture Volume", MC13783_AUDIO_RX1, 1, 15, 0),
+ SOC_SINGLE("CODEC Capture Mixin Switch", MC13783_AUDIO_RX0, 21, 1, 0),
+
+ SOC_SINGLE("Line in Capture Volume", MC13783_AUDIO_RX1, 12, 15, 0),
+ SOC_SINGLE("Line in Capture Switch", MC13783_AUDIO_RX1, 10, 1, 0),
+
+ SOC_SINGLE("MC1 Capture Bias Switch", MC13783_AUDIO_TX, 0, 1, 0),
+ SOC_SINGLE("MC2 Capture Bias Switch", MC13783_AUDIO_TX, 1, 1, 0),
};
static int mc13783_probe(struct snd_soc_codec *codec)
{
struct mc13783_priv *priv = snd_soc_codec_get_drvdata(codec);
+ int ret;
- mc13xxx_lock(priv->mc13xxx);
+ codec->control_data = dev_get_regmap(codec->dev->parent, NULL);
+ ret = snd_soc_codec_set_cache_io(codec, 8, 24, SND_SOC_REGMAP);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ return ret;
+ }
/* these are the reset values */
mc13xxx_reg_write(priv->mc13xxx, MC13783_AUDIO_RX0, 0x25893);
@@ -612,8 +643,6 @@ static int mc13783_probe(struct snd_soc_codec *codec)
mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC,
0, AUDIO_SSI_SEL);
- mc13xxx_unlock(priv->mc13xxx);
-
return 0;
}
@@ -621,13 +650,9 @@ static int mc13783_remove(struct snd_soc_codec *codec)
{
struct mc13783_priv *priv = snd_soc_codec_get_drvdata(codec);
- mc13xxx_lock(priv->mc13xxx);
-
/* Make sure VAUDIOON is off */
mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_RX0, 0x3, 0);
- mc13xxx_unlock(priv->mc13xxx);
-
return 0;
}
@@ -717,8 +742,6 @@ static struct snd_soc_dai_driver mc13783_dai_sync[] = {
static struct snd_soc_codec_driver soc_codec_dev_mc13783 = {
.probe = mc13783_probe,
.remove = mc13783_remove,
- .read = mc13783_read,
- .write = mc13783_write,
.controls = mc13783_control_list,
.num_controls = ARRAY_SIZE(mc13783_control_list),
.dapm_widgets = mc13783_dapm_widgets,
diff --git a/sound/soc/codecs/ml26124.c b/sound/soc/codecs/ml26124.c
index 26118828782b..185fa3bc3052 100644
--- a/sound/soc/codecs/ml26124.c
+++ b/sound/soc/codecs/ml26124.c
@@ -342,6 +342,8 @@ static int ml26124_hw_params(struct snd_pcm_substream *substream,
struct ml26124_priv *priv = snd_soc_codec_get_drvdata(codec);
int i = get_coeff(priv->mclk, params_rate(hw_params));
+ if (i < 0)
+ return i;
priv->substream = substream;
priv->rate = params_rate(hw_params);
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
index 651ce0923675..73f9c3630e2c 100644
--- a/sound/soc/codecs/pcm1681.c
+++ b/sound/soc/codecs/pcm1681.c
@@ -21,6 +21,7 @@
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <sound/pcm.h>
@@ -270,7 +271,7 @@ MODULE_DEVICE_TABLE(of, pcm1681_dt_ids);
static const struct regmap_config pcm1681_regmap = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = ARRAY_SIZE(pcm1681_reg_defaults) + 1,
+ .max_register = 0x13,
.reg_defaults = pcm1681_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(pcm1681_reg_defaults),
.writeable_reg = pcm1681_writeable_reg,
diff --git a/sound/soc/codecs/pcm1792a.c b/sound/soc/codecs/pcm1792a.c
index 2a8eccf64c76..7146653a8e16 100644
--- a/sound/soc/codecs/pcm1792a.c
+++ b/sound/soc/codecs/pcm1792a.c
@@ -28,6 +28,7 @@
#include <sound/initval.h>
#include <sound/soc.h>
#include <sound/tlv.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include "pcm1792a.h"
@@ -188,7 +189,7 @@ MODULE_DEVICE_TABLE(of, pcm1792a_of_match);
static const struct regmap_config pcm1792a_regmap = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = 24,
+ .max_register = 23,
.reg_defaults = pcm1792a_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(pcm1792a_reg_defaults),
.writeable_reg = pcm1792a_writeable_reg,
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index c26a8f814b18..a3fb41179636 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -21,6 +21,7 @@
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
+#include <linux/acpi.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -926,7 +927,7 @@ static int rt5640_set_dmic2_event(struct snd_soc_dapm_widget *w,
return 0;
}
-void hp_amp_power_on(struct snd_soc_codec *codec)
+static void hp_amp_power_on(struct snd_soc_codec *codec)
{
struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
@@ -1603,13 +1604,14 @@ static int rt5640_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_codec *codec = rtd->codec;
struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
- unsigned int val_len = 0, val_clk, mask_clk, dai_sel;
- int pre_div, bclk_ms, frame_size;
+ unsigned int val_len = 0, val_clk, mask_clk;
+ int dai_sel, pre_div, bclk_ms, frame_size;
rt5640->lrck[dai->id] = params_rate(params);
pre_div = get_clk_info(rt5640->sysclk, rt5640->lrck[dai->id]);
if (pre_div < 0) {
- dev_err(codec->dev, "Unsupported clock setting\n");
+ dev_err(codec->dev, "Unsupported clock setting %d for DAI %d\n",
+ rt5640->lrck[dai->id], dai->id);
return -EINVAL;
}
frame_size = snd_soc_params_to_frame_size(params);
@@ -1673,7 +1675,8 @@ static int rt5640_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct snd_soc_codec *codec = dai->codec;
struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
- unsigned int reg_val = 0, dai_sel;
+ unsigned int reg_val = 0;
+ int dai_sel;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
@@ -1977,13 +1980,20 @@ static int rt5640_suspend(struct snd_soc_codec *codec)
rt5640_reset(codec);
regcache_cache_only(rt5640->regmap, true);
regcache_mark_dirty(rt5640->regmap);
+ if (gpio_is_valid(rt5640->pdata.ldo1_en))
+ gpio_set_value_cansleep(rt5640->pdata.ldo1_en, 0);
return 0;
}
static int rt5640_resume(struct snd_soc_codec *codec)
{
- rt5640_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+ struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
+
+ if (gpio_is_valid(rt5640->pdata.ldo1_en)) {
+ gpio_set_value_cansleep(rt5640->pdata.ldo1_en, 1);
+ msleep(400);
+ }
return 0;
}
@@ -2080,6 +2090,14 @@ static const struct i2c_device_id rt5640_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, rt5640_i2c_id);
+#ifdef CONFIG_ACPI
+static struct acpi_device_id rt5640_acpi_match[] = {
+ { "INT33CA", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, rt5640_acpi_match);
+#endif
+
static int rt5640_parse_dt(struct rt5640_priv *rt5640, struct device_node *np)
{
rt5640->pdata.in1_diff = of_property_read_bool(np,
@@ -2199,6 +2217,7 @@ static struct i2c_driver rt5640_i2c_driver = {
.driver = {
.name = "rt5640",
.owner = THIS_MODULE,
+ .acpi_match_table = ACPI_PTR(rt5640_acpi_match),
},
.probe = rt5640_i2c_probe,
.remove = rt5640_i2c_remove,
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c
index 38f3b105c17d..52e7cb08434b 100644
--- a/sound/soc/codecs/si476x.c
+++ b/sound/soc/codecs/si476x.c
@@ -60,48 +60,6 @@ enum si476x_pcm_format {
SI476X_PCM_FORMAT_S24_LE = 6,
};
-static unsigned int si476x_codec_read(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- int err;
- unsigned int val;
- struct si476x_core *core = codec->control_data;
-
- si476x_core_lock(core);
- if (!si476x_core_is_powered_up(core))
- regcache_cache_only(core->regmap, true);
-
- err = regmap_read(core->regmap, reg, &val);
-
- if (!si476x_core_is_powered_up(core))
- regcache_cache_only(core->regmap, false);
- si476x_core_unlock(core);
-
- if (err < 0)
- return err;
-
- return val;
-}
-
-static int si476x_codec_write(struct snd_soc_codec *codec,
- unsigned int reg, unsigned int val)
-{
- int err;
- struct si476x_core *core = codec->control_data;
-
- si476x_core_lock(core);
- if (!si476x_core_is_powered_up(core))
- regcache_cache_only(core->regmap, true);
-
- err = regmap_write(core->regmap, reg, val);
-
- if (!si476x_core_is_powered_up(core))
- regcache_cache_only(core->regmap, false);
- si476x_core_unlock(core);
-
- return err;
-}
-
static const struct snd_soc_dapm_widget si476x_dapm_widgets[] = {
SND_SOC_DAPM_OUTPUT("LOUT"),
SND_SOC_DAPM_OUTPUT("ROUT"),
@@ -115,6 +73,7 @@ static const struct snd_soc_dapm_route si476x_dapm_routes[] = {
static int si476x_codec_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
+ struct si476x_core *core = i2c_mfd_cell_to_core(codec_dai->dev);
int err;
u16 format = 0;
@@ -178,9 +137,14 @@ static int si476x_codec_set_dai_fmt(struct snd_soc_dai *codec_dai,
return -EINVAL;
}
+ si476x_core_lock(core);
+
err = snd_soc_update_bits(codec_dai->codec, SI476X_DIGITAL_IO_OUTPUT_FORMAT,
SI476X_DIGITAL_IO_OUTPUT_FORMAT_MASK,
format);
+
+ si476x_core_unlock(core);
+
if (err < 0) {
dev_err(codec_dai->codec->dev, "Failed to set output format\n");
return err;
@@ -193,6 +157,7 @@ static int si476x_codec_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
+ struct si476x_core *core = i2c_mfd_cell_to_core(dai->dev);
int rate, width, err;
rate = params_rate(params);
@@ -218,11 +183,13 @@ static int si476x_codec_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
+ si476x_core_lock(core);
+
err = snd_soc_write(dai->codec, SI476X_DIGITAL_IO_OUTPUT_SAMPLE_RATE,
rate);
if (err < 0) {
dev_err(dai->codec->dev, "Failed to set sample rate\n");
- return err;
+ goto out;
}
err = snd_soc_update_bits(dai->codec, SI476X_DIGITAL_IO_OUTPUT_FORMAT,
@@ -231,15 +198,18 @@ static int si476x_codec_hw_params(struct snd_pcm_substream *substream,
(width << SI476X_DIGITAL_IO_SAMPLE_SIZE_SHIFT));
if (err < 0) {
dev_err(dai->codec->dev, "Failed to set output width\n");
- return err;
+ goto out;
}
- return 0;
+out:
+ si476x_core_unlock(core);
+
+ return err;
}
static int si476x_codec_probe(struct snd_soc_codec *codec)
{
- codec->control_data = i2c_mfd_cell_to_core(codec->dev);
+ codec->control_data = dev_get_regmap(codec->dev->parent, NULL);
return 0;
}
@@ -268,8 +238,6 @@ static struct snd_soc_dai_driver si476x_dai = {
static struct snd_soc_codec_driver soc_codec_dev_si476x = {
.probe = si476x_codec_probe,
- .read = si476x_codec_read,
- .write = si476x_codec_write,
.dapm_widgets = si476x_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(si476x_dapm_widgets),
.dapm_routes = si476x_dapm_routes,
diff --git a/sound/soc/codecs/sn95031.c b/sound/soc/codecs/sn95031.c
index dba26e63844e..13045f2af4d3 100644
--- a/sound/soc/codecs/sn95031.c
+++ b/sound/soc/codecs/sn95031.c
@@ -164,30 +164,28 @@ static unsigned int sn95031_get_mic_bias(struct snd_soc_codec *codec)
}
/*end - adc helper functions */
-static inline unsigned int sn95031_read(struct snd_soc_codec *codec,
- unsigned int reg)
+static int sn95031_read(void *ctx, unsigned int reg, unsigned int *val)
{
u8 value = 0;
int ret;
ret = intel_scu_ipc_ioread8(reg, &value);
- if (ret)
- pr_err("read of %x failed, err %d\n", reg, ret);
- return value;
+ if (ret == 0)
+ *val = value;
+ return ret;
}
-static inline int sn95031_write(struct snd_soc_codec *codec,
- unsigned int reg, unsigned int value)
+static int sn95031_write(void *ctx, unsigned int reg, unsigned int value)
{
- int ret;
-
- ret = intel_scu_ipc_iowrite8(reg, value);
- if (ret)
- pr_err("write of %x failed, err %d\n", reg, ret);
- return ret;
+ return intel_scu_ipc_iowrite8(reg, value);
}
+static const struct regmap_config sn95031_regmap = {
+ .reg_read = sn95031_read,
+ .reg_write = sn95031_write,
+};
+
static int sn95031_set_vaud_bias(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
@@ -827,6 +825,8 @@ static int sn95031_codec_probe(struct snd_soc_codec *codec)
{
pr_debug("codec_probe called\n");
+ snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
+
/* PCM interface config
* This sets the pcm rx slot conguration to max 6 slots
* for max 4 dais (2 stereo and 2 mono)
@@ -886,8 +886,6 @@ static int sn95031_codec_remove(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver sn95031_codec = {
.probe = sn95031_codec_probe,
.remove = sn95031_codec_remove,
- .read = sn95031_read,
- .write = sn95031_write,
.set_bias_level = sn95031_set_vaud_bias,
.idle_bias_off = true,
.dapm_widgets = sn95031_dapm_widgets,
@@ -898,7 +896,14 @@ static struct snd_soc_codec_driver sn95031_codec = {
static int sn95031_device_probe(struct platform_device *pdev)
{
+ struct regmap *regmap;
+
pr_debug("codec device probe called for %s\n", dev_name(&pdev->dev));
+
+ regmap = devm_regmap_init(&pdev->dev, NULL, NULL, &sn95031_regmap);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
return snd_soc_register_codec(&pdev->dev, &sn95031_codec,
sn95031_dais, ARRAY_SIZE(sn95031_dais));
}
diff --git a/sound/soc/codecs/tas5086.c b/sound/soc/codecs/tas5086.c
index 6d31d88f7204..a895a5e4bdf2 100644
--- a/sound/soc/codecs/tas5086.c
+++ b/sound/soc/codecs/tas5086.c
@@ -37,6 +37,7 @@
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <sound/pcm.h>
@@ -244,6 +245,8 @@ struct tas5086_private {
unsigned int mclk, sclk;
unsigned int format;
bool deemph;
+ unsigned int charge_period;
+ unsigned int pwm_start_mid_z;
/* Current sample rate for de-emphasis control */
int rate;
/* GPIO driving Reset pin, if any */
@@ -429,7 +432,7 @@ static int tas5086_hw_params(struct snd_pcm_substream *substream,
default:
dev_err(codec->dev, "Invalid bit width\n");
return -EINVAL;
- };
+ }
ret = regmap_write(priv->regmap, TAS5086_SERIAL_DATA_IF, val);
if (ret < 0)
@@ -456,6 +459,75 @@ static int tas5086_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
return regmap_write(priv->regmap, TAS5086_SOFT_MUTE, val);
}
+static void tas5086_reset(struct tas5086_private *priv)
+{
+ if (gpio_is_valid(priv->gpio_nreset)) {
+ /* Reset codec - minimum assertion time is 400ns */
+ gpio_direction_output(priv->gpio_nreset, 0);
+ udelay(1);
+ gpio_set_value(priv->gpio_nreset, 1);
+
+ /* Codec needs ~15ms to wake up */
+ msleep(15);
+ }
+}
+
+/* charge period values in microseconds */
+static const int tas5086_charge_period[] = {
+ 13000, 16900, 23400, 31200, 41600, 54600, 72800, 96200,
+ 130000, 156000, 234000, 312000, 416000, 546000, 728000, 962000,
+ 1300000, 169000, 2340000, 3120000, 4160000, 5460000, 7280000, 9620000,
+};
+
+static int tas5086_init(struct device *dev, struct tas5086_private *priv)
+{
+ int ret, i;
+
+ /*
+ * If any of the channels is configured to start in Mid-Z mode,
+ * configure 'part 1' of the PWM starts to use Mid-Z, and tell
+ * all configured mid-z channels to start start under 'part 1'.
+ */
+ if (priv->pwm_start_mid_z)
+ regmap_write(priv->regmap, TAS5086_PWM_START,
+ TAS5086_PWM_START_MIDZ_FOR_START_1 |
+ priv->pwm_start_mid_z);
+
+ /* lookup and set split-capacitor charge period */
+ if (priv->charge_period == 0) {
+ regmap_write(priv->regmap, TAS5086_SPLIT_CAP_CHARGE, 0);
+ } else {
+ i = index_in_array(tas5086_charge_period,
+ ARRAY_SIZE(tas5086_charge_period),
+ priv->charge_period);
+ if (i >= 0)
+ regmap_write(priv->regmap, TAS5086_SPLIT_CAP_CHARGE,
+ i + 0x08);
+ else
+ dev_warn(dev,
+ "Invalid split-cap charge period of %d ns.\n",
+ priv->charge_period);
+ }
+
+ /* enable factory trim */
+ ret = regmap_write(priv->regmap, TAS5086_OSC_TRIM, 0x00);
+ if (ret < 0)
+ return ret;
+
+ /* start all channels */
+ ret = regmap_write(priv->regmap, TAS5086_SYS_CONTROL_2, 0x20);
+ if (ret < 0)
+ return ret;
+
+ /* mute all channels for now */
+ ret = regmap_write(priv->regmap, TAS5086_SOFT_MUTE,
+ TAS5086_SOFT_MUTE_ALL);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
/* TAS5086 controls */
static const DECLARE_TLV_DB_SCALE(tas5086_dac_tlv, -10350, 50, 1);
@@ -691,14 +763,39 @@ static struct snd_soc_dai_driver tas5086_dai = {
};
#ifdef CONFIG_PM
+static int tas5086_soc_suspend(struct snd_soc_codec *codec)
+{
+ struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ /* Shut down all channels */
+ ret = regmap_write(priv->regmap, TAS5086_SYS_CONTROL_2, 0x60);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int tas5086_soc_resume(struct snd_soc_codec *codec)
{
struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ tas5086_reset(priv);
+ regcache_mark_dirty(priv->regmap);
+
+ ret = tas5086_init(codec->dev, priv);
+ if (ret < 0)
+ return ret;
+
+ ret = regcache_sync(priv->regmap);
+ if (ret < 0)
+ return ret;
- /* Restore codec state */
- return regcache_sync(priv->regmap);
+ return 0;
}
#else
+#define tas5086_soc_suspend NULL
#define tas5086_soc_resume NULL
#endif /* CONFIG_PM */
@@ -710,23 +807,19 @@ static const struct of_device_id tas5086_dt_ids[] = {
MODULE_DEVICE_TABLE(of, tas5086_dt_ids);
#endif
-/* charge period values in microseconds */
-static const int tas5086_charge_period[] = {
- 13000, 16900, 23400, 31200, 41600, 54600, 72800, 96200,
- 130000, 156000, 234000, 312000, 416000, 546000, 728000, 962000,
- 1300000, 169000, 2340000, 3120000, 4160000, 5460000, 7280000, 9620000,
-};
-
static int tas5086_probe(struct snd_soc_codec *codec)
{
struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
- int charge_period = 1300000; /* hardware default is 1300 ms */
- u8 pwm_start_mid_z = 0;
int i, ret;
+ priv->pwm_start_mid_z = 0;
+ priv->charge_period = 1300000; /* hardware default is 1300 ms */
+
if (of_match_device(of_match_ptr(tas5086_dt_ids), codec->dev)) {
struct device_node *of_node = codec->dev->of_node;
- of_property_read_u32(of_node, "ti,charge-period", &charge_period);
+
+ of_property_read_u32(of_node, "ti,charge-period",
+ &priv->charge_period);
for (i = 0; i < 6; i++) {
char name[25];
@@ -735,43 +828,11 @@ static int tas5086_probe(struct snd_soc_codec *codec)
"ti,mid-z-channel-%d", i + 1);
if (of_get_property(of_node, name, NULL) != NULL)
- pwm_start_mid_z |= 1 << i;
+ priv->pwm_start_mid_z |= 1 << i;
}
}
- /*
- * If any of the channels is configured to start in Mid-Z mode,
- * configure 'part 1' of the PWM starts to use Mid-Z, and tell
- * all configured mid-z channels to start start under 'part 1'.
- */
- if (pwm_start_mid_z)
- regmap_write(priv->regmap, TAS5086_PWM_START,
- TAS5086_PWM_START_MIDZ_FOR_START_1 |
- pwm_start_mid_z);
-
- /* lookup and set split-capacitor charge period */
- if (charge_period == 0) {
- regmap_write(priv->regmap, TAS5086_SPLIT_CAP_CHARGE, 0);
- } else {
- i = index_in_array(tas5086_charge_period,
- ARRAY_SIZE(tas5086_charge_period),
- charge_period);
- if (i >= 0)
- regmap_write(priv->regmap, TAS5086_SPLIT_CAP_CHARGE,
- i + 0x08);
- else
- dev_warn(codec->dev,
- "Invalid split-cap charge period of %d ns.\n",
- charge_period);
- }
-
- /* enable factory trim */
- ret = regmap_write(priv->regmap, TAS5086_OSC_TRIM, 0x00);
- if (ret < 0)
- return ret;
-
- /* start all channels */
- ret = regmap_write(priv->regmap, TAS5086_SYS_CONTROL_2, 0x20);
+ ret = tas5086_init(codec->dev, priv);
if (ret < 0)
return ret;
@@ -780,12 +841,6 @@ static int tas5086_probe(struct snd_soc_codec *codec)
if (ret < 0)
return ret;
- /* mute all channels for now */
- ret = regmap_write(priv->regmap, TAS5086_SOFT_MUTE,
- TAS5086_SOFT_MUTE_ALL);
- if (ret < 0)
- return ret;
-
return 0;
}
@@ -803,6 +858,7 @@ static int tas5086_remove(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_dev_tas5086 = {
.probe = tas5086_probe,
.remove = tas5086_remove,
+ .suspend = tas5086_soc_suspend,
.resume = tas5086_soc_resume,
.controls = tas5086_controls,
.num_controls = ARRAY_SIZE(tas5086_controls),
@@ -862,17 +918,8 @@ static int tas5086_i2c_probe(struct i2c_client *i2c,
if (devm_gpio_request(dev, gpio_nreset, "TAS5086 Reset"))
gpio_nreset = -EINVAL;
- if (gpio_is_valid(gpio_nreset)) {
- /* Reset codec - minimum assertion time is 400ns */
- gpio_direction_output(gpio_nreset, 0);
- udelay(1);
- gpio_set_value(gpio_nreset, 1);
-
- /* Codec needs ~15ms to wake up */
- msleep(15);
- }
-
priv->gpio_nreset = gpio_nreset;
+ tas5086_reset(priv);
/* The TAS5086 always returns 0x03 in its TAS5086_DEV_ID register */
ret = regmap_read(priv->regmap, TAS5086_DEV_ID, &i);
diff --git a/sound/soc/codecs/tlv320aic23.c b/sound/soc/codecs/tlv320aic23.c
index 31762ebdd774..5d430cc56f51 100644
--- a/sound/soc/codecs/tlv320aic23.c
+++ b/sound/soc/codecs/tlv320aic23.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -37,11 +38,27 @@
/*
* AIC23 register cache
*/
-static const u16 tlv320aic23_reg[] = {
- 0x0097, 0x0097, 0x00F9, 0x00F9, /* 0 */
- 0x001A, 0x0004, 0x0007, 0x0001, /* 4 */
- 0x0020, 0x0000, 0x0000, 0x0000, /* 8 */
- 0x0000, 0x0000, 0x0000, 0x0000, /* 12 */
+static const struct reg_default tlv320aic23_reg[] = {
+ { 0, 0x0097 },
+ { 1, 0x0097 },
+ { 2, 0x00F9 },
+ { 3, 0x00F9 },
+ { 4, 0x001A },
+ { 5, 0x0004 },
+ { 6, 0x0007 },
+ { 7, 0x0001 },
+ { 8, 0x0020 },
+ { 9, 0x0000 },
+};
+
+static const struct regmap_config tlv320aic23_regmap = {
+ .reg_bits = 7,
+ .val_bits = 9,
+
+ .max_register = TLV320AIC23_RESET,
+ .reg_defaults = tlv320aic23_reg,
+ .num_reg_defaults = ARRAY_SIZE(tlv320aic23_reg),
+ .cache_type = REGCACHE_RBTREE,
};
static const char *rec_src_text[] = { "Line", "Mic" };
@@ -171,7 +188,7 @@ static const struct snd_soc_dapm_route tlv320aic23_intercon[] = {
/* AIC23 driver data */
struct aic23 {
- enum snd_soc_control_type control_type;
+ struct regmap *regmap;
int mclk;
int requested_adc;
int requested_dac;
@@ -532,7 +549,9 @@ static int tlv320aic23_suspend(struct snd_soc_codec *codec)
static int tlv320aic23_resume(struct snd_soc_codec *codec)
{
- snd_soc_cache_sync(codec);
+ struct aic23 *aic23 = snd_soc_codec_get_drvdata(codec);
+ regcache_mark_dirty(aic23->regmap);
+ regcache_sync(aic23->regmap);
tlv320aic23_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return 0;
@@ -540,10 +559,9 @@ static int tlv320aic23_resume(struct snd_soc_codec *codec)
static int tlv320aic23_probe(struct snd_soc_codec *codec)
{
- struct aic23 *aic23 = snd_soc_codec_get_drvdata(codec);
int ret;
- ret = snd_soc_codec_set_cache_io(codec, 7, 9, aic23->control_type);
+ ret = snd_soc_codec_set_cache_io(codec, 7, 9, SND_SOC_REGMAP);
if (ret < 0) {
dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
return ret;
@@ -552,16 +570,6 @@ static int tlv320aic23_probe(struct snd_soc_codec *codec)
/* Reset codec */
snd_soc_write(codec, TLV320AIC23_RESET, 0);
- /* Write the register default value to cache for reserved registers,
- * so the write to the these registers are suppressed by the cache
- * restore code when it skips writes of default registers.
- */
- snd_soc_cache_write(codec, 0x0A, 0);
- snd_soc_cache_write(codec, 0x0B, 0);
- snd_soc_cache_write(codec, 0x0C, 0);
- snd_soc_cache_write(codec, 0x0D, 0);
- snd_soc_cache_write(codec, 0x0E, 0);
-
/* power on device */
tlv320aic23_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
@@ -586,9 +594,6 @@ static int tlv320aic23_probe(struct snd_soc_codec *codec)
snd_soc_write(codec, TLV320AIC23_ACTIVE, 0x1);
- snd_soc_add_codec_controls(codec, tlv320aic23_snd_controls,
- ARRAY_SIZE(tlv320aic23_snd_controls));
-
return 0;
}
@@ -599,21 +604,19 @@ static int tlv320aic23_remove(struct snd_soc_codec *codec)
}
static struct snd_soc_codec_driver soc_codec_dev_tlv320aic23 = {
- .reg_cache_size = ARRAY_SIZE(tlv320aic23_reg),
- .reg_word_size = sizeof(u16),
- .reg_cache_default = tlv320aic23_reg,
.probe = tlv320aic23_probe,
.remove = tlv320aic23_remove,
.suspend = tlv320aic23_suspend,
.resume = tlv320aic23_resume,
.set_bias_level = tlv320aic23_set_bias_level,
+ .controls = tlv320aic23_snd_controls,
+ .num_controls = ARRAY_SIZE(tlv320aic23_snd_controls),
.dapm_widgets = tlv320aic23_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(tlv320aic23_dapm_widgets),
.dapm_routes = tlv320aic23_intercon,
.num_dapm_routes = ARRAY_SIZE(tlv320aic23_intercon),
};
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
/*
* If the i2c layer weren't so broken, we could pass this kind of data
* around
@@ -631,8 +634,11 @@ static int tlv320aic23_codec_probe(struct i2c_client *i2c,
if (aic23 == NULL)
return -ENOMEM;
+ aic23->regmap = devm_regmap_init_i2c(i2c, &tlv320aic23_regmap);
+ if (IS_ERR(aic23->regmap))
+ return PTR_ERR(aic23->regmap);
+
i2c_set_clientdata(i2c, aic23);
- aic23->control_type = SND_SOC_I2C;
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_tlv320aic23, &tlv320aic23_dai, 1);
@@ -660,29 +666,7 @@ static struct i2c_driver tlv320aic23_i2c_driver = {
.id_table = tlv320aic23_id,
};
-#endif
-
-static int __init tlv320aic23_modinit(void)
-{
- int ret;
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
- ret = i2c_add_driver(&tlv320aic23_i2c_driver);
- if (ret != 0) {
- printk(KERN_ERR "Failed to register TLV320AIC23 I2C driver: %d\n",
- ret);
- }
-#endif
- return ret;
-}
-module_init(tlv320aic23_modinit);
-
-static void __exit tlv320aic23_exit(void)
-{
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
- i2c_del_driver(&tlv320aic23_i2c_driver);
-#endif
-}
-module_exit(tlv320aic23_exit);
+module_i2c_driver(tlv320aic23_i2c_driver);
MODULE_DESCRIPTION("ASoC TLV320AIC23 codec driver");
MODULE_AUTHOR("Arun KS <arunks@mistralsolutions.com>");
diff --git a/sound/soc/codecs/tlv320aic26.c b/sound/soc/codecs/tlv320aic26.c
index 7b8f3d965f43..94a658fa6d97 100644
--- a/sound/soc/codecs/tlv320aic26.c
+++ b/sound/soc/codecs/tlv320aic26.c
@@ -29,6 +29,7 @@ MODULE_LICENSE("GPL");
/* AIC26 driver private data */
struct aic26 {
struct spi_device *spi;
+ struct regmap *regmap;
struct snd_soc_codec *codec;
int master;
int datfm;
@@ -40,85 +41,6 @@ struct aic26 {
int keyclick_len;
};
-/* ---------------------------------------------------------------------
- * Register access routines
- */
-static unsigned int aic26_reg_read(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- struct aic26 *aic26 = snd_soc_codec_get_drvdata(codec);
- u16 *cache = codec->reg_cache;
- u16 cmd, value;
- u8 buffer[2];
- int rc;
-
- if (reg >= AIC26_NUM_REGS) {
- WARN_ON_ONCE(1);
- return 0;
- }
-
- /* Do SPI transfer; first 16bits are command; remaining is
- * register contents */
- cmd = AIC26_READ_COMMAND_WORD(reg);
- buffer[0] = (cmd >> 8) & 0xff;
- buffer[1] = cmd & 0xff;
- rc = spi_write_then_read(aic26->spi, buffer, 2, buffer, 2);
- if (rc) {
- dev_err(&aic26->spi->dev, "AIC26 reg read error\n");
- return -EIO;
- }
- value = (buffer[0] << 8) | buffer[1];
-
- /* Update the cache before returning with the value */
- cache[reg] = value;
- return value;
-}
-
-static unsigned int aic26_reg_read_cache(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- u16 *cache = codec->reg_cache;
-
- if (reg >= AIC26_NUM_REGS) {
- WARN_ON_ONCE(1);
- return 0;
- }
-
- return cache[reg];
-}
-
-static int aic26_reg_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value)
-{
- struct aic26 *aic26 = snd_soc_codec_get_drvdata(codec);
- u16 *cache = codec->reg_cache;
- u16 cmd;
- u8 buffer[4];
- int rc;
-
- if (reg >= AIC26_NUM_REGS) {
- WARN_ON_ONCE(1);
- return -EINVAL;
- }
-
- /* Do SPI transfer; first 16bits are command; remaining is data
- * to write into register */
- cmd = AIC26_WRITE_COMMAND_WORD(reg);
- buffer[0] = (cmd >> 8) & 0xff;
- buffer[1] = cmd & 0xff;
- buffer[2] = value >> 8;
- buffer[3] = value;
- rc = spi_write(aic26->spi, buffer, 4);
- if (rc) {
- dev_err(&aic26->spi->dev, "AIC26 reg read error\n");
- return -EIO;
- }
-
- /* update cache before returning */
- cache[reg] = value;
- return 0;
-}
-
static const struct snd_soc_dapm_widget tlv320aic26_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("MICIN"),
SND_SOC_DAPM_INPUT("AUX"),
@@ -195,19 +117,15 @@ static int aic26_hw_params(struct snd_pcm_substream *substream,
snd_soc_write(codec, AIC26_REG_PLL_PROG2, reg);
/* Audio Control 3 (master mode, fsref rate) */
- reg = aic26_reg_read_cache(codec, AIC26_REG_AUDIO_CTRL3);
- reg &= ~0xf800;
if (aic26->master)
- reg |= 0x0800;
+ reg = 0x0800;
if (fsref == 48000)
- reg |= 0x2000;
- snd_soc_write(codec, AIC26_REG_AUDIO_CTRL3, reg);
+ reg = 0x2000;
+ snd_soc_update_bits(codec, AIC26_REG_AUDIO_CTRL3, 0xf800, reg);
/* Audio Control 1 (FSref divisor) */
- reg = aic26_reg_read_cache(codec, AIC26_REG_AUDIO_CTRL1);
- reg &= ~0x0fff;
- reg |= wlen | aic26->datfm | (divisor << 3) | divisor;
- snd_soc_write(codec, AIC26_REG_AUDIO_CTRL1, reg);
+ reg = wlen | aic26->datfm | (divisor << 3) | divisor;
+ snd_soc_update_bits(codec, AIC26_REG_AUDIO_CTRL1, 0xfff, reg);
return 0;
}
@@ -219,16 +137,16 @@ static int aic26_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
struct aic26 *aic26 = snd_soc_codec_get_drvdata(codec);
- u16 reg = aic26_reg_read_cache(codec, AIC26_REG_DAC_GAIN);
+ u16 reg;
dev_dbg(&aic26->spi->dev, "aic26_mute(dai=%p, mute=%i)\n",
dai, mute);
if (mute)
- reg |= 0x8080;
+ reg = 0x8080;
else
- reg &= ~0x8080;
- snd_soc_write(codec, AIC26_REG_DAC_GAIN, reg);
+ reg = 0;
+ snd_soc_update_bits(codec, AIC26_REG_DAC_GAIN, 0x8000, reg);
return 0;
}
@@ -346,7 +264,7 @@ static ssize_t aic26_keyclick_show(struct device *dev,
struct aic26 *aic26 = dev_get_drvdata(dev);
int val, amp, freq, len;
- val = aic26_reg_read_cache(aic26->codec, AIC26_REG_AUDIO_CTRL2);
+ val = snd_soc_read(aic26->codec, AIC26_REG_AUDIO_CTRL2);
amp = (val >> 12) & 0x7;
freq = (125 << ((val >> 8) & 0x7)) >> 1;
len = 2 * (1 + ((val >> 4) & 0xf));
@@ -360,11 +278,9 @@ static ssize_t aic26_keyclick_set(struct device *dev,
const char *buf, size_t count)
{
struct aic26 *aic26 = dev_get_drvdata(dev);
- int val;
- val = aic26_reg_read_cache(aic26->codec, AIC26_REG_AUDIO_CTRL2);
- val |= 0x8000;
- snd_soc_write(aic26->codec, AIC26_REG_AUDIO_CTRL2, val);
+ snd_soc_update_bits(aic26->codec, AIC26_REG_AUDIO_CTRL2,
+ 0x8000, 0x800);
return count;
}
@@ -377,7 +293,9 @@ static DEVICE_ATTR(keyclick, 0644, aic26_keyclick_show, aic26_keyclick_set);
static int aic26_probe(struct snd_soc_codec *codec)
{
struct aic26 *aic26 = dev_get_drvdata(codec->dev);
- int ret, err, i, reg;
+ int ret, reg;
+
+ snd_soc_codec_set_cache_io(codec, 16, 16, SND_SOC_REGMAP);
aic26->codec = codec;
@@ -393,37 +311,30 @@ static int aic26_probe(struct snd_soc_codec *codec)
reg |= 0x0800; /* set master mode */
snd_soc_write(codec, AIC26_REG_AUDIO_CTRL3, reg);
- /* Fill register cache */
- for (i = 0; i < codec->driver->reg_cache_size; i++)
- snd_soc_read(codec, i);
-
/* Register the sysfs files for debugging */
/* Create SysFS files */
ret = device_create_file(codec->dev, &dev_attr_keyclick);
if (ret)
dev_info(codec->dev, "error creating sysfs files\n");
- /* register controls */
- dev_dbg(codec->dev, "Registering controls\n");
- err = snd_soc_add_codec_controls(codec, aic26_snd_controls,
- ARRAY_SIZE(aic26_snd_controls));
- WARN_ON(err < 0);
-
return 0;
}
static struct snd_soc_codec_driver aic26_soc_codec_dev = {
.probe = aic26_probe,
- .read = aic26_reg_read,
- .write = aic26_reg_write,
- .reg_cache_size = AIC26_NUM_REGS,
- .reg_word_size = sizeof(u16),
+ .controls = aic26_snd_controls,
+ .num_controls = ARRAY_SIZE(aic26_snd_controls),
.dapm_widgets = tlv320aic26_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(tlv320aic26_dapm_widgets),
.dapm_routes = tlv320aic26_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(tlv320aic26_dapm_routes),
};
+static const struct regmap_config aic26_regmap = {
+ .reg_bits = 16,
+ .val_bits = 16,
+};
+
/* ---------------------------------------------------------------------
* SPI device portion of driver: probe and release routines and SPI
* driver registration.
@@ -440,6 +351,10 @@ static int aic26_spi_probe(struct spi_device *spi)
if (!aic26)
return -ENOMEM;
+ aic26->regmap = devm_regmap_init_spi(spi, &aic26_regmap);
+ if (IS_ERR(aic26->regmap))
+ return PTR_ERR(aic26->regmap);
+
/* Initialize the driver data */
aic26->spi = spi;
dev_set_drvdata(&spi->dev, aic26);
diff --git a/sound/soc/codecs/tlv320aic26.h b/sound/soc/codecs/tlv320aic26.h
index 67f19c3bebe6..629b85e75409 100644
--- a/sound/soc/codecs/tlv320aic26.h
+++ b/sound/soc/codecs/tlv320aic26.h
@@ -9,10 +9,7 @@
#define _TLV320AIC16_H_
/* AIC26 Registers */
-#define AIC26_READ_COMMAND_WORD(addr) ((1 << 15) | (addr << 5))
-#define AIC26_WRITE_COMMAND_WORD(addr) ((0 << 15) | (addr << 5))
-#define AIC26_PAGE_ADDR(page, offset) ((page << 6) | offset)
-#define AIC26_NUM_REGS AIC26_PAGE_ADDR(3, 0)
+#define AIC26_PAGE_ADDR(page, offset) ((page << 11) | offset << 5)
/* Page 0: Auxiliary data registers */
#define AIC26_REG_BAT1 AIC26_PAGE_ADDR(0, 0x05)
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index 2ed57d4aa445..18cdcca9014c 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -60,9 +60,8 @@ struct aic32x4_rate_divs {
};
struct aic32x4_priv {
+ struct regmap *regmap;
u32 sysclk;
- u8 page_no;
- void *control_data;
u32 power_cfg;
u32 micpga_routing;
bool swapdacs;
@@ -262,67 +261,25 @@ static const struct snd_soc_dapm_route aic32x4_dapm_routes[] = {
{"Right ADC", NULL, "Right Input Mixer"},
};
-static inline int aic32x4_change_page(struct snd_soc_codec *codec,
- unsigned int new_page)
-{
- struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
- u8 data[2];
- int ret;
-
- data[0] = 0x00;
- data[1] = new_page & 0xff;
-
- ret = codec->hw_write(codec->control_data, data, 2);
- if (ret == 2) {
- aic32x4->page_no = new_page;
- return 0;
- } else {
- return ret;
- }
-}
-
-static int aic32x4_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int val)
-{
- struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
- unsigned int page = reg / 128;
- unsigned int fixed_reg = reg % 128;
- u8 data[2];
- int ret;
-
- /* A write to AIC32X4_PSEL is really a non-explicit page change */
- if (reg == AIC32X4_PSEL)
- return aic32x4_change_page(codec, val);
-
- if (aic32x4->page_no != page) {
- ret = aic32x4_change_page(codec, page);
- if (ret != 0)
- return ret;
- }
-
- data[0] = fixed_reg & 0xff;
- data[1] = val & 0xff;
-
- if (codec->hw_write(codec->control_data, data, 2) == 2)
- return 0;
- else
- return -EIO;
-}
+static const struct regmap_range_cfg aic32x4_regmap_pages[] = {
+ {
+ .selector_reg = 0,
+ .selector_mask = 0xff,
+ .window_start = 0,
+ .window_len = 128,
+ .range_min = AIC32X4_PAGE1,
+ .range_max = AIC32X4_PAGE1 + 127,
+ },
+};
-static unsigned int aic32x4_read(struct snd_soc_codec *codec, unsigned int reg)
-{
- struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
- unsigned int page = reg / 128;
- unsigned int fixed_reg = reg % 128;
- int ret;
+static const struct regmap_config aic32x4_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
- if (aic32x4->page_no != page) {
- ret = aic32x4_change_page(codec, page);
- if (ret != 0)
- return ret;
- }
- return i2c_smbus_read_byte_data(codec->control_data, fixed_reg & 0xff);
-}
+ .max_register = AIC32X4_RMICPGAVOL,
+ .ranges = aic32x4_regmap_pages,
+ .num_ranges = ARRAY_SIZE(aic32x4_regmap_pages),
+};
static inline int aic32x4_get_divs(int mclk, int rate)
{
@@ -617,16 +574,10 @@ static int aic32x4_probe(struct snd_soc_codec *codec)
{
struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
u32 tmp_reg;
- int ret;
- codec->hw_write = (hw_write_t) i2c_master_send;
- codec->control_data = aic32x4->control_data;
+ snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_REGMAP);
if (aic32x4->rstn_gpio >= 0) {
- ret = devm_gpio_request_one(codec->dev, aic32x4->rstn_gpio,
- GPIOF_OUT_INIT_LOW, "tlv320aic32x4 rstn");
- if (ret != 0)
- return ret;
ndelay(10);
gpio_set_value(aic32x4->rstn_gpio, 1);
}
@@ -692,8 +643,6 @@ static int aic32x4_remove(struct snd_soc_codec *codec)
}
static struct snd_soc_codec_driver soc_codec_dev_aic32x4 = {
- .read = aic32x4_read,
- .write = aic32x4_write,
.probe = aic32x4_probe,
.remove = aic32x4_remove,
.suspend = aic32x4_suspend,
@@ -720,7 +669,10 @@ static int aic32x4_i2c_probe(struct i2c_client *i2c,
if (aic32x4 == NULL)
return -ENOMEM;
- aic32x4->control_data = i2c;
+ aic32x4->regmap = devm_regmap_init_i2c(i2c, &aic32x4_regmap);
+ if (IS_ERR(aic32x4->regmap))
+ return PTR_ERR(aic32x4->regmap);
+
i2c_set_clientdata(i2c, aic32x4);
if (pdata) {
@@ -735,6 +687,13 @@ static int aic32x4_i2c_probe(struct i2c_client *i2c,
aic32x4->rstn_gpio = -1;
}
+ if (aic32x4->rstn_gpio >= 0) {
+ ret = devm_gpio_request_one(&i2c->dev, aic32x4->rstn_gpio,
+ GPIOF_OUT_INIT_LOW, "tlv320aic32x4 rstn");
+ if (ret != 0)
+ return ret;
+ }
+
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_aic32x4, &aic32x4_dai, 1);
return ret;
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 6e3f269243e0..546d16b7d38f 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -40,6 +40,7 @@
#include <linux/i2c.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <sound/core.h>
@@ -72,9 +73,9 @@ struct aic3x_disable_nb {
/* codec private data */
struct aic3x_priv {
struct snd_soc_codec *codec;
+ struct regmap *regmap;
struct regulator_bulk_data supplies[AIC3X_NUM_SUPPLIES];
struct aic3x_disable_nb disable_nb[AIC3X_NUM_SUPPLIES];
- enum snd_soc_control_type control_type;
struct aic3x_setup_data *setup;
unsigned int sysclk;
struct list_head list;
@@ -90,41 +91,45 @@ struct aic3x_priv {
enum aic3x_micbias_voltage micbias_vg;
};
-/*
- * AIC3X register cache
- * We can't read the AIC3X register space when we are
- * using 2 wire for device control, so we cache them instead.
- * There is no point in caching the reset register
- */
-static const u8 aic3x_reg[AIC3X_CACHEREGNUM] = {
- 0x00, 0x00, 0x00, 0x10, /* 0 */
- 0x04, 0x00, 0x00, 0x00, /* 4 */
- 0x00, 0x00, 0x00, 0x01, /* 8 */
- 0x00, 0x00, 0x00, 0x80, /* 12 */
- 0x80, 0xff, 0xff, 0x78, /* 16 */
- 0x78, 0x78, 0x78, 0x78, /* 20 */
- 0x78, 0x00, 0x00, 0xfe, /* 24 */
- 0x00, 0x00, 0xfe, 0x00, /* 28 */
- 0x18, 0x18, 0x00, 0x00, /* 32 */
- 0x00, 0x00, 0x00, 0x00, /* 36 */
- 0x00, 0x00, 0x00, 0x80, /* 40 */
- 0x80, 0x00, 0x00, 0x00, /* 44 */
- 0x00, 0x00, 0x00, 0x04, /* 48 */
- 0x00, 0x00, 0x00, 0x00, /* 52 */
- 0x00, 0x00, 0x04, 0x00, /* 56 */
- 0x00, 0x00, 0x00, 0x00, /* 60 */
- 0x00, 0x04, 0x00, 0x00, /* 64 */
- 0x00, 0x00, 0x00, 0x00, /* 68 */
- 0x04, 0x00, 0x00, 0x00, /* 72 */
- 0x00, 0x00, 0x00, 0x00, /* 76 */
- 0x00, 0x00, 0x00, 0x00, /* 80 */
- 0x00, 0x00, 0x00, 0x00, /* 84 */
- 0x00, 0x00, 0x00, 0x00, /* 88 */
- 0x00, 0x00, 0x00, 0x00, /* 92 */
- 0x00, 0x00, 0x00, 0x00, /* 96 */
- 0x00, 0x00, 0x02, 0x00, /* 100 */
- 0x00, 0x00, 0x00, 0x00, /* 104 */
- 0x00, 0x00, /* 108 */
+static const struct reg_default aic3x_reg[] = {
+ { 0, 0x00 }, { 1, 0x00 }, { 2, 0x00 }, { 3, 0x10 },
+ { 4, 0x04 }, { 5, 0x00 }, { 6, 0x00 }, { 7, 0x00 },
+ { 8, 0x00 }, { 9, 0x00 }, { 10, 0x00 }, { 11, 0x01 },
+ { 12, 0x00 }, { 13, 0x00 }, { 14, 0x00 }, { 15, 0x80 },
+ { 16, 0x80 }, { 17, 0xff }, { 18, 0xff }, { 19, 0x78 },
+ { 20, 0x78 }, { 21, 0x78 }, { 22, 0x78 }, { 23, 0x78 },
+ { 24, 0x78 }, { 25, 0x00 }, { 26, 0x00 }, { 27, 0xfe },
+ { 28, 0x00 }, { 29, 0x00 }, { 30, 0xfe }, { 31, 0x00 },
+ { 32, 0x18 }, { 33, 0x18 }, { 34, 0x00 }, { 35, 0x00 },
+ { 36, 0x00 }, { 37, 0x00 }, { 38, 0x00 }, { 39, 0x00 },
+ { 40, 0x00 }, { 41, 0x00 }, { 42, 0x00 }, { 43, 0x80 },
+ { 44, 0x80 }, { 45, 0x00 }, { 46, 0x00 }, { 47, 0x00 },
+ { 48, 0x00 }, { 49, 0x00 }, { 50, 0x00 }, { 51, 0x04 },
+ { 52, 0x00 }, { 53, 0x00 }, { 54, 0x00 }, { 55, 0x00 },
+ { 56, 0x00 }, { 57, 0x00 }, { 58, 0x04 }, { 59, 0x00 },
+ { 60, 0x00 }, { 61, 0x00 }, { 62, 0x00 }, { 63, 0x00 },
+ { 64, 0x00 }, { 65, 0x04 }, { 66, 0x00 }, { 67, 0x00 },
+ { 68, 0x00 }, { 69, 0x00 }, { 70, 0x00 }, { 71, 0x00 },
+ { 72, 0x04 }, { 73, 0x00 }, { 74, 0x00 }, { 75, 0x00 },
+ { 76, 0x00 }, { 77, 0x00 }, { 78, 0x00 }, { 79, 0x00 },
+ { 80, 0x00 }, { 81, 0x00 }, { 82, 0x00 }, { 83, 0x00 },
+ { 84, 0x00 }, { 85, 0x00 }, { 86, 0x00 }, { 87, 0x00 },
+ { 88, 0x00 }, { 89, 0x00 }, { 90, 0x00 }, { 91, 0x00 },
+ { 92, 0x00 }, { 93, 0x00 }, { 94, 0x00 }, { 95, 0x00 },
+ { 96, 0x00 }, { 97, 0x00 }, { 98, 0x00 }, { 99, 0x00 },
+ { 100, 0x00 }, { 101, 0x00 }, { 102, 0x02 }, { 103, 0x00 },
+ { 104, 0x00 }, { 105, 0x00 }, { 106, 0x00 }, { 107, 0x00 },
+ { 108, 0x00 }, { 109, 0x00 },
+};
+
+static const struct regmap_config aic3x_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = DAC_ICC_ADJ,
+ .reg_defaults = aic3x_reg,
+ .num_reg_defaults = ARRAY_SIZE(aic3x_reg),
+ .cache_type = REGCACHE_RBTREE,
};
#define SOC_DAPM_SINGLE_AIC3X(xname, reg, shift, mask, invert) \
@@ -674,6 +679,8 @@ static const struct snd_soc_dapm_route intercon[] = {
/* Left Input */
{"Left Line1L Mux", "single-ended", "LINE1L"},
{"Left Line1L Mux", "differential", "LINE1L"},
+ {"Left Line1R Mux", "single-ended", "LINE1R"},
+ {"Left Line1R Mux", "differential", "LINE1R"},
{"Left Line2L Mux", "single-ended", "LINE2L"},
{"Left Line2L Mux", "differential", "LINE2L"},
@@ -690,6 +697,8 @@ static const struct snd_soc_dapm_route intercon[] = {
/* Right Input */
{"Right Line1R Mux", "single-ended", "LINE1R"},
{"Right Line1R Mux", "differential", "LINE1R"},
+ {"Right Line1L Mux", "single-ended", "LINE1L"},
+ {"Right Line1L Mux", "differential", "LINE1L"},
{"Right Line2R Mux", "single-ended", "LINE2R"},
{"Right Line2R Mux", "differential", "LINE2R"},
@@ -824,12 +833,6 @@ static int aic3x_add_widgets(struct snd_soc_codec *codec)
struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
struct snd_soc_dapm_context *dapm = &codec->dapm;
- snd_soc_dapm_new_controls(dapm, aic3x_dapm_widgets,
- ARRAY_SIZE(aic3x_dapm_widgets));
-
- /* set up audio path interconnects */
- snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
-
if (aic3x->model == AIC3X_MODEL_3007) {
snd_soc_dapm_new_controls(dapm, aic3007_dapm_widgets,
ARRAY_SIZE(aic3007_dapm_widgets));
@@ -1078,29 +1081,6 @@ static int aic3x_set_dai_fmt(struct snd_soc_dai *codec_dai,
return 0;
}
-static int aic3x_init_3007(struct snd_soc_codec *codec)
-{
- u8 tmp1, tmp2, *cache = codec->reg_cache;
-
- /*
- * There is no need to cache writes to undocumented page 0xD but
- * respective page 0 register cache entries must be preserved
- */
- tmp1 = cache[0xD];
- tmp2 = cache[0x8];
- /* Class-D speaker driver init; datasheet p. 46 */
- snd_soc_write(codec, AIC3X_PAGE_SELECT, 0x0D);
- snd_soc_write(codec, 0xD, 0x0D);
- snd_soc_write(codec, 0x8, 0x5C);
- snd_soc_write(codec, 0x8, 0x5D);
- snd_soc_write(codec, 0x8, 0x5C);
- snd_soc_write(codec, AIC3X_PAGE_SELECT, 0x00);
- cache[0xD] = tmp1;
- cache[0x8] = tmp2;
-
- return 0;
-}
-
static int aic3x_regulator_event(struct notifier_block *nb,
unsigned long event, void *data)
{
@@ -1115,7 +1095,7 @@ static int aic3x_regulator_event(struct notifier_block *nb,
*/
if (gpio_is_valid(aic3x->gpio_reset))
gpio_set_value(aic3x->gpio_reset, 0);
- aic3x->codec->cache_sync = 1;
+ regcache_mark_dirty(aic3x->regmap);
}
return 0;
@@ -1124,8 +1104,7 @@ static int aic3x_regulator_event(struct notifier_block *nb,
static int aic3x_set_power(struct snd_soc_codec *codec, int power)
{
struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
- int i, ret;
- u8 *cache = codec->reg_cache;
+ int ret;
if (power) {
ret = regulator_bulk_enable(ARRAY_SIZE(aic3x->supplies),
@@ -1133,12 +1112,6 @@ static int aic3x_set_power(struct snd_soc_codec *codec, int power)
if (ret)
goto out;
aic3x->power = 1;
- /*
- * Reset release and cache sync is necessary only if some
- * supply was off or if there were cached writes
- */
- if (!codec->cache_sync)
- goto out;
if (gpio_is_valid(aic3x->gpio_reset)) {
udelay(1);
@@ -1146,12 +1119,8 @@ static int aic3x_set_power(struct snd_soc_codec *codec, int power)
}
/* Sync reg_cache with the hardware */
- codec->cache_only = 0;
- for (i = AIC3X_SAMPLE_RATE_SEL_REG; i < ARRAY_SIZE(aic3x_reg); i++)
- snd_soc_write(codec, i, cache[i]);
- if (aic3x->model == AIC3X_MODEL_3007)
- aic3x_init_3007(codec);
- codec->cache_sync = 0;
+ regcache_cache_only(aic3x->regmap, false);
+ regcache_sync(aic3x->regmap);
} else {
/*
* Do soft reset to this codec instance in order to clear
@@ -1159,10 +1128,10 @@ static int aic3x_set_power(struct snd_soc_codec *codec, int power)
* remain on
*/
snd_soc_write(codec, AIC3X_RESET, SOFT_RESET);
- codec->cache_sync = 1;
+ regcache_mark_dirty(aic3x->regmap);
aic3x->power = 0;
/* HW writes are needless when bias is off */
- codec->cache_only = 1;
+ regcache_cache_only(aic3x->regmap, true);
ret = regulator_bulk_disable(ARRAY_SIZE(aic3x->supplies),
aic3x->supplies);
}
@@ -1317,7 +1286,6 @@ static int aic3x_init(struct snd_soc_codec *codec)
snd_soc_write(codec, LINE2R_2_MONOLOPM_VOL, DEFAULT_VOL);
if (aic3x->model == AIC3X_MODEL_3007) {
- aic3x_init_3007(codec);
snd_soc_write(codec, CLASSD_CTRL, 0);
}
@@ -1345,29 +1313,12 @@ static int aic3x_probe(struct snd_soc_codec *codec)
INIT_LIST_HEAD(&aic3x->list);
aic3x->codec = codec;
- ret = snd_soc_codec_set_cache_io(codec, 8, 8, aic3x->control_type);
+ ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_REGMAP);
if (ret != 0) {
dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
return ret;
}
- if (gpio_is_valid(aic3x->gpio_reset) &&
- !aic3x_is_shared_reset(aic3x)) {
- ret = gpio_request(aic3x->gpio_reset, "tlv320aic3x reset");
- if (ret != 0)
- goto err_gpio;
- gpio_direction_output(aic3x->gpio_reset, 0);
- }
-
- for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++)
- aic3x->supplies[i].supply = aic3x_supply_names[i];
-
- ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(aic3x->supplies),
- aic3x->supplies);
- if (ret != 0) {
- dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
- goto err_get;
- }
for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++) {
aic3x->disable_nb[i].nb.notifier_call = aic3x_regulator_event;
aic3x->disable_nb[i].aic3x = aic3x;
@@ -1381,7 +1332,7 @@ static int aic3x_probe(struct snd_soc_codec *codec)
}
}
- codec->cache_only = 1;
+ regcache_mark_dirty(aic3x->regmap);
aic3x_init(codec);
if (aic3x->setup) {
@@ -1392,8 +1343,6 @@ static int aic3x_probe(struct snd_soc_codec *codec)
(aic3x->setup->gpio_func[1] & 0xf) << 4);
}
- snd_soc_add_codec_controls(codec, aic3x_snd_controls,
- ARRAY_SIZE(aic3x_snd_controls));
if (aic3x->model == AIC3X_MODEL_3007)
snd_soc_add_codec_controls(codec, &aic3x_classd_amp_gain_ctrl, 1);
@@ -1424,12 +1373,6 @@ err_notif:
while (i--)
regulator_unregister_notifier(aic3x->supplies[i].consumer,
&aic3x->disable_nb[i].nb);
- regulator_bulk_free(ARRAY_SIZE(aic3x->supplies), aic3x->supplies);
-err_get:
- if (gpio_is_valid(aic3x->gpio_reset) &&
- !aic3x_is_shared_reset(aic3x))
- gpio_free(aic3x->gpio_reset);
-err_gpio:
return ret;
}
@@ -1440,15 +1383,9 @@ static int aic3x_remove(struct snd_soc_codec *codec)
aic3x_set_bias_level(codec, SND_SOC_BIAS_OFF);
list_del(&aic3x->list);
- if (gpio_is_valid(aic3x->gpio_reset) &&
- !aic3x_is_shared_reset(aic3x)) {
- gpio_set_value(aic3x->gpio_reset, 0);
- gpio_free(aic3x->gpio_reset);
- }
for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++)
regulator_unregister_notifier(aic3x->supplies[i].consumer,
&aic3x->disable_nb[i].nb);
- regulator_bulk_free(ARRAY_SIZE(aic3x->supplies), aic3x->supplies);
return 0;
}
@@ -1456,13 +1393,16 @@ static int aic3x_remove(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_dev_aic3x = {
.set_bias_level = aic3x_set_bias_level,
.idle_bias_off = true,
- .reg_cache_size = ARRAY_SIZE(aic3x_reg),
- .reg_word_size = sizeof(u8),
- .reg_cache_default = aic3x_reg,
.probe = aic3x_probe,
.remove = aic3x_remove,
.suspend = aic3x_suspend,
.resume = aic3x_resume,
+ .controls = aic3x_snd_controls,
+ .num_controls = ARRAY_SIZE(aic3x_snd_controls),
+ .dapm_widgets = aic3x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(aic3x_dapm_widgets),
+ .dapm_routes = intercon,
+ .num_dapm_routes = ARRAY_SIZE(intercon),
};
/*
@@ -1479,6 +1419,16 @@ static const struct i2c_device_id aic3x_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, aic3x_i2c_id);
+static const struct reg_default aic3007_class_d[] = {
+ /* Class-D speaker driver init; datasheet p. 46 */
+ { AIC3X_PAGE_SELECT, 0x0D },
+ { 0xD, 0x0D },
+ { 0x8, 0x5C },
+ { 0x8, 0x5D },
+ { 0x8, 0x5C },
+ { AIC3X_PAGE_SELECT, 0x00 },
+};
+
/*
* If the i2c layer weren't so broken, we could pass this kind of data
* around
@@ -1490,7 +1440,7 @@ static int aic3x_i2c_probe(struct i2c_client *i2c,
struct aic3x_priv *aic3x;
struct aic3x_setup_data *ai3x_setup;
struct device_node *np = i2c->dev.of_node;
- int ret;
+ int ret, i;
u32 value;
aic3x = devm_kzalloc(&i2c->dev, sizeof(struct aic3x_priv), GFP_KERNEL);
@@ -1499,7 +1449,13 @@ static int aic3x_i2c_probe(struct i2c_client *i2c,
return -ENOMEM;
}
- aic3x->control_type = SND_SOC_I2C;
+ aic3x->regmap = devm_regmap_init_i2c(i2c, &aic3x_regmap);
+ if (IS_ERR(aic3x->regmap)) {
+ ret = PTR_ERR(aic3x->regmap);
+ return ret;
+ }
+
+ regcache_cache_only(aic3x->regmap, true);
i2c_set_clientdata(i2c, aic3x);
if (pdata) {
@@ -1551,14 +1507,54 @@ static int aic3x_i2c_probe(struct i2c_client *i2c,
aic3x->model = id->driver_data;
+ if (gpio_is_valid(aic3x->gpio_reset) &&
+ !aic3x_is_shared_reset(aic3x)) {
+ ret = gpio_request(aic3x->gpio_reset, "tlv320aic3x reset");
+ if (ret != 0)
+ goto err;
+ gpio_direction_output(aic3x->gpio_reset, 0);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++)
+ aic3x->supplies[i].supply = aic3x_supply_names[i];
+
+ ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(aic3x->supplies),
+ aic3x->supplies);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret);
+ goto err_gpio;
+ }
+
+ if (aic3x->model == AIC3X_MODEL_3007) {
+ ret = regmap_register_patch(aic3x->regmap, aic3007_class_d,
+ ARRAY_SIZE(aic3007_class_d));
+ if (ret != 0)
+ dev_err(&i2c->dev, "Failed to init class D: %d\n",
+ ret);
+ }
+
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_aic3x, &aic3x_dai, 1);
return ret;
+
+err_gpio:
+ if (gpio_is_valid(aic3x->gpio_reset) &&
+ !aic3x_is_shared_reset(aic3x))
+ gpio_free(aic3x->gpio_reset);
+err:
+ return ret;
}
static int aic3x_i2c_remove(struct i2c_client *client)
{
+ struct aic3x_priv *aic3x = i2c_get_clientdata(client);
+
snd_soc_unregister_codec(&client->dev);
+ if (gpio_is_valid(aic3x->gpio_reset) &&
+ !aic3x_is_shared_reset(aic3x)) {
+ gpio_set_value(aic3x->gpio_reset, 0);
+ gpio_free(aic3x->gpio_reset);
+ }
return 0;
}
diff --git a/sound/soc/codecs/tpa6130a2.c b/sound/soc/codecs/tpa6130a2.c
index c58bee8346ce..998555f2a8aa 100644
--- a/sound/soc/codecs/tpa6130a2.c
+++ b/sound/soc/codecs/tpa6130a2.c
@@ -30,6 +30,7 @@
#include <sound/tpa6130a2-plat.h>
#include <sound/soc.h>
#include <sound/tlv.h>
+#include <linux/of_gpio.h>
#include "tpa6130a2.h"
@@ -364,30 +365,33 @@ static int tpa6130a2_probe(struct i2c_client *client,
{
struct device *dev;
struct tpa6130a2_data *data;
- struct tpa6130a2_platform_data *pdata;
+ struct tpa6130a2_platform_data *pdata = client->dev.platform_data;
+ struct device_node *np = client->dev.of_node;
const char *regulator;
int ret;
dev = &client->dev;
- if (client->dev.platform_data == NULL) {
- dev_err(dev, "Platform data not set\n");
- dump_stack();
- return -ENODEV;
- }
-
data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (data == NULL) {
dev_err(dev, "Can not allocate memory\n");
return -ENOMEM;
}
+ if (pdata) {
+ data->power_gpio = pdata->power_gpio;
+ } else if (np) {
+ data->power_gpio = of_get_named_gpio(np, "power-gpio", 0);
+ } else {
+ dev_err(dev, "Platform data not set\n");
+ dump_stack();
+ return -ENODEV;
+ }
+
tpa6130a2_client = client;
i2c_set_clientdata(tpa6130a2_client, data);
- pdata = client->dev.platform_data;
- data->power_gpio = pdata->power_gpio;
data->id = id->driver_data;
mutex_init(&data->mutex);
@@ -466,10 +470,20 @@ static const struct i2c_device_id tpa6130a2_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tpa6130a2_id);
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id tpa6130a2_of_match[] = {
+ { .compatible = "ti,tpa6130a2", },
+ { .compatible = "ti,tpa6140a2" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tpa6130a2_of_match);
+#endif
+
static struct i2c_driver tpa6130a2_i2c_driver = {
.driver = {
.name = "tpa6130a2",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(tpa6130a2_of_match),
},
.probe = tpa6130a2_probe,
.remove = tpa6130a2_remove,
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index 1e3884d6b3fb..dfc51bb425da 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -46,13 +46,7 @@
/* TWL4030 PMBR1 Register GPIO6 mux bits */
#define TWL4030_GPIO6_PWM0_MUTE(value) ((value & 0x03) << 2)
-/* Shadow register used by the audio driver */
-#define TWL4030_REG_SW_SHADOW 0x4A
-#define TWL4030_CACHEREGNUM (TWL4030_REG_SW_SHADOW + 1)
-
-/* TWL4030_REG_SW_SHADOW (0x4A) Fields */
-#define TWL4030_HFL_EN 0x01
-#define TWL4030_HFR_EN 0x02
+#define TWL4030_CACHEREGNUM (TWL4030_REG_MISC_SET_2 + 1)
/*
* twl4030 register cache & default register settings
@@ -132,7 +126,6 @@ static const u8 twl4030_reg[TWL4030_CACHEREGNUM] = {
0x00, /* REG_VIBRA_PWM_SET (0x47) */
0x00, /* REG_ANAMIC_GAIN (0x48) */
0x00, /* REG_MISC_SET_2 (0x49) */
- 0x00, /* REG_SW_SHADOW (0x4A) - Shadow, non HW register */
};
/* codec private data */
@@ -198,42 +191,41 @@ static int twl4030_write(struct snd_soc_codec *codec,
int write_to_reg = 0;
twl4030_write_reg_cache(codec, reg, value);
- if (likely(reg < TWL4030_REG_SW_SHADOW)) {
- /* Decide if the given register can be written */
- switch (reg) {
- case TWL4030_REG_EAR_CTL:
- if (twl4030->earpiece_enabled)
- write_to_reg = 1;
- break;
- case TWL4030_REG_PREDL_CTL:
- if (twl4030->predrivel_enabled)
- write_to_reg = 1;
- break;
- case TWL4030_REG_PREDR_CTL:
- if (twl4030->predriver_enabled)
- write_to_reg = 1;
- break;
- case TWL4030_REG_PRECKL_CTL:
- if (twl4030->carkitl_enabled)
- write_to_reg = 1;
- break;
- case TWL4030_REG_PRECKR_CTL:
- if (twl4030->carkitr_enabled)
- write_to_reg = 1;
- break;
- case TWL4030_REG_HS_GAIN_SET:
- if (twl4030->hsl_enabled || twl4030->hsr_enabled)
- write_to_reg = 1;
- break;
- default:
- /* All other register can be written */
+ /* Decide if the given register can be written */
+ switch (reg) {
+ case TWL4030_REG_EAR_CTL:
+ if (twl4030->earpiece_enabled)
write_to_reg = 1;
- break;
- }
- if (write_to_reg)
- return twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
- value, reg);
+ break;
+ case TWL4030_REG_PREDL_CTL:
+ if (twl4030->predrivel_enabled)
+ write_to_reg = 1;
+ break;
+ case TWL4030_REG_PREDR_CTL:
+ if (twl4030->predriver_enabled)
+ write_to_reg = 1;
+ break;
+ case TWL4030_REG_PRECKL_CTL:
+ if (twl4030->carkitl_enabled)
+ write_to_reg = 1;
+ break;
+ case TWL4030_REG_PRECKR_CTL:
+ if (twl4030->carkitr_enabled)
+ write_to_reg = 1;
+ break;
+ case TWL4030_REG_HS_GAIN_SET:
+ if (twl4030->hsl_enabled || twl4030->hsr_enabled)
+ write_to_reg = 1;
+ break;
+ default:
+ /* All other register can be written */
+ write_to_reg = 1;
+ break;
}
+ if (write_to_reg)
+ return twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
+ value, reg);
+
return 0;
}
@@ -532,7 +524,7 @@ SOC_DAPM_ENUM("Route", twl4030_handsfreel_enum);
/* Handsfree Left virtual mute */
static const struct snd_kcontrol_new twl4030_dapm_handsfreelmute_control =
- SOC_DAPM_SINGLE("Switch", TWL4030_REG_SW_SHADOW, 0, 1, 0);
+ SOC_DAPM_SINGLE_VIRT("Switch", 1);
/* Handsfree Right */
static const char *twl4030_handsfreer_texts[] =
@@ -548,7 +540,7 @@ SOC_DAPM_ENUM("Route", twl4030_handsfreer_enum);
/* Handsfree Right virtual mute */
static const struct snd_kcontrol_new twl4030_dapm_handsfreermute_control =
- SOC_DAPM_SINGLE("Switch", TWL4030_REG_SW_SHADOW, 1, 1, 0);
+ SOC_DAPM_SINGLE_VIRT("Switch", 1);
/* Vibra */
/* Vibra audio path selection */
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index 3c79dbb6c323..f2f4bcb2ff71 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -54,12 +54,7 @@ enum twl6040_dai_id {
#define TWL6040_OUTHF_0dB 0x03
#define TWL6040_OUTHF_M52dB 0x1D
-/* Shadow register used by the driver */
-#define TWL6040_REG_SW_SHADOW 0x2F
-#define TWL6040_CACHEREGNUM (TWL6040_REG_SW_SHADOW + 1)
-
-/* TWL6040_REG_SW_SHADOW (0x2F) fields */
-#define TWL6040_EAR_PATH_ENABLE 0x01
+#define TWL6040_CACHEREGNUM (TWL6040_REG_STATUS + 1)
struct twl6040_jack_data {
struct snd_soc_jack *jack;
@@ -135,8 +130,6 @@ static const u8 twl6040_reg[TWL6040_CACHEREGNUM] = {
0x00, /* REG_HFOTRIM 0x2C */
0x09, /* REG_ACCCTL 0x2D */
0x00, /* REG_STATUS 0x2E (ro) */
-
- 0x00, /* REG_SW_SHADOW 0x2F - Shadow, non HW register */
};
/* List of registers to be restored after power up */
@@ -220,12 +213,8 @@ static int twl6040_read_reg_volatile(struct snd_soc_codec *codec,
if (reg >= TWL6040_CACHEREGNUM)
return -EIO;
- if (likely(reg < TWL6040_REG_SW_SHADOW)) {
- value = twl6040_reg_read(twl6040, reg);
- twl6040_write_reg_cache(codec, reg, value);
- } else {
- value = twl6040_read_reg_cache(codec, reg);
- }
+ value = twl6040_reg_read(twl6040, reg);
+ twl6040_write_reg_cache(codec, reg, value);
return value;
}
@@ -246,7 +235,7 @@ static bool twl6040_is_path_unmuted(struct snd_soc_codec *codec,
return priv->dl2_unmuted;
default:
return 1;
- };
+ }
}
/*
@@ -261,8 +250,7 @@ static int twl6040_write(struct snd_soc_codec *codec,
return -EIO;
twl6040_write_reg_cache(codec, reg, value);
- if (likely(reg < TWL6040_REG_SW_SHADOW) &&
- twl6040_is_path_unmuted(codec, reg))
+ if (twl6040_is_path_unmuted(codec, reg))
return twl6040_reg_write(twl6040, reg, value);
else
return 0;
@@ -555,7 +543,7 @@ static const struct snd_kcontrol_new hfr_mux_controls =
SOC_DAPM_ENUM("Route", twl6040_hf_enum[1]);
static const struct snd_kcontrol_new ep_path_enable_control =
- SOC_DAPM_SINGLE("Switch", TWL6040_REG_SW_SHADOW, 0, 1, 0);
+ SOC_DAPM_SINGLE_VIRT("Switch", 1);
static const struct snd_kcontrol_new auxl_switch_control =
SOC_DAPM_SINGLE("Switch", TWL6040_REG_HFLCTL, 6, 1, 0);
@@ -1100,7 +1088,7 @@ static void twl6040_mute_path(struct snd_soc_codec *codec, enum twl6040_dai_id i
break;
default:
break;
- };
+ }
}
static int twl6040_digital_mute(struct snd_soc_dai *dai, int mute)
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c
index d5ebcb00019b..bf7804a12863 100644
--- a/sound/soc/codecs/wm0010.c
+++ b/sound/soc/codecs/wm0010.c
@@ -793,11 +793,11 @@ static int wm0010_set_sysclk(struct snd_soc_codec *codec, int source,
wm0010->max_spi_freq = 0;
} else {
for (i = 0; i < ARRAY_SIZE(pll_clock_map); i++)
- if (freq >= pll_clock_map[i].max_sysclk)
+ if (freq >= pll_clock_map[i].max_sysclk) {
+ wm0010->max_spi_freq = pll_clock_map[i].max_pll_spi_speed;
+ wm0010->pll_clkctrl1 = pll_clock_map[i].pll_clkctrl1;
break;
-
- wm0010->max_spi_freq = pll_clock_map[i].max_pll_spi_speed;
- wm0010->pll_clkctrl1 = pll_clock_map[i].pll_clkctrl1;
+ }
}
return 0;
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index bbd64384ca1c..8c91be5d67e3 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -983,24 +983,36 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
ARIZONA_MUX_ROUTES("ASRC2L", "ASRC2L"),
ARIZONA_MUX_ROUTES("ASRC2R", "ASRC2R"),
+ { "AEC Loopback", "HPOUT1L", "OUT1L" },
+ { "AEC Loopback", "HPOUT1R", "OUT1R" },
{ "HPOUT1L", NULL, "OUT1L" },
{ "HPOUT1R", NULL, "OUT1R" },
+ { "AEC Loopback", "HPOUT2L", "OUT2L" },
+ { "AEC Loopback", "HPOUT2R", "OUT2R" },
{ "HPOUT2L", NULL, "OUT2L" },
{ "HPOUT2R", NULL, "OUT2R" },
+ { "AEC Loopback", "HPOUT3L", "OUT3L" },
+ { "AEC Loopback", "HPOUT3R", "OUT3R" },
{ "HPOUT3L", NULL, "OUT3L" },
{ "HPOUT3R", NULL, "OUT3L" },
+ { "AEC Loopback", "SPKOUTL", "OUT4L" },
{ "SPKOUTLN", NULL, "OUT4L" },
{ "SPKOUTLP", NULL, "OUT4L" },
+ { "AEC Loopback", "SPKOUTR", "OUT4R" },
{ "SPKOUTRN", NULL, "OUT4R" },
{ "SPKOUTRP", NULL, "OUT4R" },
+ { "AEC Loopback", "SPKDAT1L", "OUT5L" },
+ { "AEC Loopback", "SPKDAT1R", "OUT5R" },
{ "SPKDAT1L", NULL, "OUT5L" },
{ "SPKDAT1R", NULL, "OUT5R" },
+ { "AEC Loopback", "SPKDAT2L", "OUT6L" },
+ { "AEC Loopback", "SPKDAT2R", "OUT6R" },
{ "SPKDAT2L", NULL, "OUT6L" },
{ "SPKDAT2R", NULL, "OUT6R" },
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index d2a092850283..48dc7d2fee36 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -32,13 +32,6 @@
#include "wm8400.h"
-/* Fake register for internal state */
-#define WM8400_INTDRIVBITS (WM8400_REGISTER_COUNT + 1)
-#define WM8400_INMIXL_PWR 0
-#define WM8400_AINLMUX_PWR 1
-#define WM8400_INMIXR_PWR 2
-#define WM8400_AINRMUX_PWR 3
-
static struct regulator_bulk_data power[] = {
{
.supply = "I2S1VDD",
@@ -74,32 +67,6 @@ struct wm8400_priv {
int fll_in, fll_out;
};
-static inline unsigned int wm8400_read(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec);
-
- if (reg == WM8400_INTDRIVBITS)
- return wm8400->fake_register;
- else
- return wm8400_reg_read(wm8400->wm8400, reg);
-}
-
-/*
- * write to the wm8400 register space
- */
-static int wm8400_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value)
-{
- struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec);
-
- if (reg == WM8400_INTDRIVBITS) {
- wm8400->fake_register = value;
- return 0;
- } else
- return wm8400_set_bits(wm8400->wm8400, reg, 0xffff, value);
-}
-
static void wm8400_codec_reset(struct snd_soc_codec *codec)
{
struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec);
@@ -352,32 +319,6 @@ SOC_SINGLE("RIN34 Mute Switch", WM8400_RIGHT_LINE_INPUT_3_4_VOLUME,
* _DAPM_ Controls
*/
-static int inmixer_event (struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- u16 reg, fakepower;
-
- reg = snd_soc_read(w->codec, WM8400_POWER_MANAGEMENT_2);
- fakepower = snd_soc_read(w->codec, WM8400_INTDRIVBITS);
-
- if (fakepower & ((1 << WM8400_INMIXL_PWR) |
- (1 << WM8400_AINLMUX_PWR))) {
- reg |= WM8400_AINL_ENA;
- } else {
- reg &= ~WM8400_AINL_ENA;
- }
-
- if (fakepower & ((1 << WM8400_INMIXR_PWR) |
- (1 << WM8400_AINRMUX_PWR))) {
- reg |= WM8400_AINR_ENA;
- } else {
- reg &= ~WM8400_AINR_ENA;
- }
- snd_soc_write(w->codec, WM8400_POWER_MANAGEMENT_2, reg);
-
- return 0;
-}
-
static int outmixer_event (struct snd_soc_dapm_widget *w,
struct snd_kcontrol * kcontrol, int event)
{
@@ -658,27 +599,26 @@ SND_SOC_DAPM_MIXER("RIN34 PGA", WM8400_POWER_MANAGEMENT_2,
0, &wm8400_dapm_rin34_pga_controls[0],
ARRAY_SIZE(wm8400_dapm_rin34_pga_controls)),
+SND_SOC_DAPM_SUPPLY("INL", WM8400_POWER_MANAGEMENT_2, WM8400_AINL_ENA_SHIFT,
+ 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("INR", WM8400_POWER_MANAGEMENT_2, WM8400_AINR_ENA_SHIFT,
+ 0, NULL, 0),
+
/* INMIXL */
-SND_SOC_DAPM_MIXER_E("INMIXL", WM8400_INTDRIVBITS, WM8400_INMIXL_PWR, 0,
+SND_SOC_DAPM_MIXER("INMIXL", SND_SOC_NOPM, 0, 0,
&wm8400_dapm_inmixl_controls[0],
- ARRAY_SIZE(wm8400_dapm_inmixl_controls),
- inmixer_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ ARRAY_SIZE(wm8400_dapm_inmixl_controls)),
/* AINLMUX */
-SND_SOC_DAPM_MUX_E("AILNMUX", WM8400_INTDRIVBITS, WM8400_AINLMUX_PWR, 0,
- &wm8400_dapm_ainlmux_controls, inmixer_event,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_MUX("AILNMUX", SND_SOC_NOPM, 0, 0, &wm8400_dapm_ainlmux_controls),
/* INMIXR */
-SND_SOC_DAPM_MIXER_E("INMIXR", WM8400_INTDRIVBITS, WM8400_INMIXR_PWR, 0,
+SND_SOC_DAPM_MIXER("INMIXR", SND_SOC_NOPM, 0, 0,
&wm8400_dapm_inmixr_controls[0],
- ARRAY_SIZE(wm8400_dapm_inmixr_controls),
- inmixer_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ ARRAY_SIZE(wm8400_dapm_inmixr_controls)),
/* AINRMUX */
-SND_SOC_DAPM_MUX_E("AIRNMUX", WM8400_INTDRIVBITS, WM8400_AINRMUX_PWR, 0,
- &wm8400_dapm_ainrmux_controls, inmixer_event,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_MUX("AIRNMUX", SND_SOC_NOPM, 0, 0, &wm8400_dapm_ainrmux_controls),
/* Output Side */
/* DACs */
@@ -789,11 +729,13 @@ static const struct snd_soc_dapm_route wm8400_dapm_routes[] = {
{"LIN34 PGA", "LIN3 Switch", "LIN3"},
{"LIN34 PGA", "LIN4 Switch", "LIN4/RXN"},
/* INMIXL */
+ {"INMIXL", NULL, "INL"},
{"INMIXL", "Record Left Volume", "LOMIX"},
{"INMIXL", "LIN2 Volume", "LIN2"},
{"INMIXL", "LINPGA12 Switch", "LIN12 PGA"},
{"INMIXL", "LINPGA34 Switch", "LIN34 PGA"},
/* AILNMUX */
+ {"AILNMUX", NULL, "INL"},
{"AILNMUX", "INMIXL Mix", "INMIXL"},
{"AILNMUX", "DIFFINL Mix", "LIN12 PGA"},
{"AILNMUX", "DIFFINL Mix", "LIN34 PGA"},
@@ -808,12 +750,14 @@ static const struct snd_soc_dapm_route wm8400_dapm_routes[] = {
/* RIN34 PGA */
{"RIN34 PGA", "RIN3 Switch", "RIN3"},
{"RIN34 PGA", "RIN4 Switch", "RIN4/RXP"},
- /* INMIXL */
+ /* INMIXR */
+ {"INMIXR", NULL, "INR"},
{"INMIXR", "Record Right Volume", "ROMIX"},
{"INMIXR", "RIN2 Volume", "RIN2"},
{"INMIXR", "RINPGA12 Switch", "RIN12 PGA"},
{"INMIXR", "RINPGA34 Switch", "RIN34 PGA"},
/* AIRNMUX */
+ {"AIRNMUX", NULL, "INR"},
{"AIRNMUX", "INMIXR Mix", "INMIXR"},
{"AIRNMUX", "DIFFINR Mix", "RIN12 PGA"},
{"AIRNMUX", "DIFFINR Mix", "RIN34 PGA"},
@@ -1365,9 +1309,12 @@ static int wm8400_codec_probe(struct snd_soc_codec *codec)
return -ENOMEM;
snd_soc_codec_set_drvdata(codec, priv);
- codec->control_data = priv->wm8400 = wm8400;
+ priv->wm8400 = wm8400;
+ codec->control_data = wm8400->regmap;
priv->codec = codec;
+ snd_soc_codec_set_cache_io(codec, 8, 16, SND_SOC_REGMAP);
+
ret = devm_regulator_bulk_get(wm8400->dev,
ARRAY_SIZE(power), &power[0]);
if (ret != 0) {
@@ -1414,8 +1361,6 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8400 = {
.remove = wm8400_codec_remove,
.suspend = wm8400_suspend,
.resume = wm8400_resume,
- .read = snd_soc_read,
- .write = wm8400_write,
.set_bias_level = wm8400_set_bias_level,
.controls = wm8400_snd_controls,
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 11d80f3b6137..3a2f96c5442c 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -1758,6 +1758,9 @@ SOC_DOUBLE_R_TLV("EQ4 Volume", WM8962_EQ3, WM8962_EQ23,
WM8962_EQL_B4_GAIN_SHIFT, 31, 0, eq_tlv),
SOC_DOUBLE_R_TLV("EQ5 Volume", WM8962_EQ3, WM8962_EQ23,
WM8962_EQL_B5_GAIN_SHIFT, 31, 0, eq_tlv),
+SND_SOC_BYTES("EQL Coefficients", WM8962_EQ4, 18),
+SND_SOC_BYTES("EQR Coefficients", WM8962_EQ24, 18),
+
SOC_SINGLE("3D Switch", WM8962_THREED1, 0, 1, 0),
SND_SOC_BYTES_MASK("3D Coefficients", WM8962_THREED1, 4, WM8962_THREED_ENA),
@@ -1775,6 +1778,11 @@ WM8962_DSP2_ENABLE("HPF2 Switch", WM8962_HPF2_ENA_SHIFT),
SND_SOC_BYTES("HPF Coefficients", WM8962_LHPF2, 1),
WM8962_DSP2_ENABLE("HD Bass Switch", WM8962_HDBASS_ENA_SHIFT),
SND_SOC_BYTES("HD Bass Coefficients", WM8962_HDBASS_AI_1, 30),
+
+SOC_DOUBLE("ALC Switch", WM8962_ALC1, WM8962_ALCL_ENA_SHIFT,
+ WM8962_ALCR_ENA_SHIFT, 1, 0),
+SND_SOC_BYTES_MASK("ALC Coefficients", WM8962_ALC1, 4,
+ WM8962_ALCL_ENA_MASK | WM8962_ALCR_ENA_MASK),
};
static const struct snd_kcontrol_new wm8962_spk_mono_controls[] = {
@@ -3242,7 +3250,7 @@ static void wm8962_free_beep(struct snd_soc_codec *codec)
}
#endif
-static void wm8962_set_gpio_mode(struct snd_soc_codec *codec, int gpio)
+static void wm8962_set_gpio_mode(struct wm8962_priv *wm8962, int gpio)
{
int mask = 0;
int val = 0;
@@ -3263,8 +3271,8 @@ static void wm8962_set_gpio_mode(struct snd_soc_codec *codec, int gpio)
}
if (mask)
- snd_soc_update_bits(codec, WM8962_ANALOGUE_CLOCKING1,
- mask, val);
+ regmap_update_bits(wm8962->regmap, WM8962_ANALOGUE_CLOCKING1,
+ mask, val);
}
#ifdef CONFIG_GPIOLIB
@@ -3276,7 +3284,6 @@ static inline struct wm8962_priv *gpio_to_wm8962(struct gpio_chip *chip)
static int wm8962_gpio_request(struct gpio_chip *chip, unsigned offset)
{
struct wm8962_priv *wm8962 = gpio_to_wm8962(chip);
- struct snd_soc_codec *codec = wm8962->codec;
/* The WM8962 GPIOs aren't linearly numbered. For simplicity
* we export linear numbers and error out if the unsupported
@@ -3292,7 +3299,7 @@ static int wm8962_gpio_request(struct gpio_chip *chip, unsigned offset)
return -EINVAL;
}
- wm8962_set_gpio_mode(codec, offset + 1);
+ wm8962_set_gpio_mode(wm8962, offset + 1);
return 0;
}
@@ -3376,8 +3383,7 @@ static int wm8962_probe(struct snd_soc_codec *codec)
{
int ret;
struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec);
- struct wm8962_pdata *pdata = &wm8962->pdata;
- int i, trigger, irq_pol;
+ int i;
bool dmicclk, dmicdat;
wm8962->codec = codec;
@@ -3409,75 +3415,6 @@ static int wm8962_probe(struct snd_soc_codec *codec)
}
}
- /* SYSCLK defaults to on; make sure it is off so we can safely
- * write to registers if the device is declocked.
- */
- snd_soc_update_bits(codec, WM8962_CLOCKING2, WM8962_SYSCLK_ENA, 0);
-
- /* Ensure we have soft control over all registers */
- snd_soc_update_bits(codec, WM8962_CLOCKING2,
- WM8962_CLKREG_OVD, WM8962_CLKREG_OVD);
-
- /* Ensure that the oscillator and PLLs are disabled */
- snd_soc_update_bits(codec, WM8962_PLL2,
- WM8962_OSC_ENA | WM8962_PLL2_ENA | WM8962_PLL3_ENA,
- 0);
-
- /* Apply static configuration for GPIOs */
- for (i = 0; i < ARRAY_SIZE(pdata->gpio_init); i++)
- if (pdata->gpio_init[i]) {
- wm8962_set_gpio_mode(codec, i + 1);
- snd_soc_write(codec, 0x200 + i,
- pdata->gpio_init[i] & 0xffff);
- }
-
-
- /* Put the speakers into mono mode? */
- if (pdata->spk_mono)
- snd_soc_update_bits(codec, WM8962_CLASS_D_CONTROL_2,
- WM8962_SPK_MONO_MASK, WM8962_SPK_MONO);
-
- /* Micbias setup, detection enable and detection
- * threasholds. */
- if (pdata->mic_cfg)
- snd_soc_update_bits(codec, WM8962_ADDITIONAL_CONTROL_4,
- WM8962_MICDET_ENA |
- WM8962_MICDET_THR_MASK |
- WM8962_MICSHORT_THR_MASK |
- WM8962_MICBIAS_LVL,
- pdata->mic_cfg);
-
- /* Latch volume update bits */
- snd_soc_update_bits(codec, WM8962_LEFT_INPUT_VOLUME,
- WM8962_IN_VU, WM8962_IN_VU);
- snd_soc_update_bits(codec, WM8962_RIGHT_INPUT_VOLUME,
- WM8962_IN_VU, WM8962_IN_VU);
- snd_soc_update_bits(codec, WM8962_LEFT_ADC_VOLUME,
- WM8962_ADC_VU, WM8962_ADC_VU);
- snd_soc_update_bits(codec, WM8962_RIGHT_ADC_VOLUME,
- WM8962_ADC_VU, WM8962_ADC_VU);
- snd_soc_update_bits(codec, WM8962_LEFT_DAC_VOLUME,
- WM8962_DAC_VU, WM8962_DAC_VU);
- snd_soc_update_bits(codec, WM8962_RIGHT_DAC_VOLUME,
- WM8962_DAC_VU, WM8962_DAC_VU);
- snd_soc_update_bits(codec, WM8962_SPKOUTL_VOLUME,
- WM8962_SPKOUT_VU, WM8962_SPKOUT_VU);
- snd_soc_update_bits(codec, WM8962_SPKOUTR_VOLUME,
- WM8962_SPKOUT_VU, WM8962_SPKOUT_VU);
- snd_soc_update_bits(codec, WM8962_HPOUTL_VOLUME,
- WM8962_HPOUT_VU, WM8962_HPOUT_VU);
- snd_soc_update_bits(codec, WM8962_HPOUTR_VOLUME,
- WM8962_HPOUT_VU, WM8962_HPOUT_VU);
-
- /* Stereo control for EQ */
- snd_soc_update_bits(codec, WM8962_EQ1, WM8962_EQ_SHARED_COEFF, 0);
-
- /* Don't debouce interrupts so we don't need SYSCLK */
- snd_soc_update_bits(codec, WM8962_IRQ_DEBOUNCE,
- WM8962_FLL_LOCK_DB | WM8962_PLL3_LOCK_DB |
- WM8962_PLL2_LOCK_DB | WM8962_TEMP_SHUT_DB,
- 0);
-
wm8962_add_widgets(codec);
/* Save boards having to disable DMIC when not in use */
@@ -3506,36 +3443,6 @@ static int wm8962_probe(struct snd_soc_codec *codec)
wm8962_init_beep(codec);
wm8962_init_gpio(codec);
- if (wm8962->irq) {
- if (pdata->irq_active_low) {
- trigger = IRQF_TRIGGER_LOW;
- irq_pol = WM8962_IRQ_POL;
- } else {
- trigger = IRQF_TRIGGER_HIGH;
- irq_pol = 0;
- }
-
- snd_soc_update_bits(codec, WM8962_INTERRUPT_CONTROL,
- WM8962_IRQ_POL, irq_pol);
-
- ret = request_threaded_irq(wm8962->irq, NULL, wm8962_irq,
- trigger | IRQF_ONESHOT,
- "wm8962", codec->dev);
- if (ret != 0) {
- dev_err(codec->dev, "Failed to request IRQ %d: %d\n",
- wm8962->irq, ret);
- wm8962->irq = 0;
- /* Non-fatal */
- } else {
- /* Enable some IRQs by default */
- snd_soc_update_bits(codec,
- WM8962_INTERRUPT_STATUS_2_MASK,
- WM8962_FLL_LOCK_EINT |
- WM8962_TEMP_SHUT_EINT |
- WM8962_FIFOS_ERR_EINT, 0);
- }
- }
-
return 0;
}
@@ -3544,9 +3451,6 @@ static int wm8962_remove(struct snd_soc_codec *codec)
struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec);
int i;
- if (wm8962->irq)
- free_irq(wm8962->irq, codec);
-
cancel_delayed_work_sync(&wm8962->mic_work);
wm8962_free_gpio(codec);
@@ -3619,7 +3523,7 @@ static int wm8962_i2c_probe(struct i2c_client *i2c,
struct wm8962_pdata *pdata = dev_get_platdata(&i2c->dev);
struct wm8962_priv *wm8962;
unsigned int reg;
- int ret, i;
+ int ret, i, irq_pol, trigger;
wm8962 = devm_kzalloc(&i2c->dev, sizeof(struct wm8962_priv),
GFP_KERNEL);
@@ -3704,6 +3608,77 @@ static int wm8962_i2c_probe(struct i2c_client *i2c,
goto err_enable;
}
+ /* SYSCLK defaults to on; make sure it is off so we can safely
+ * write to registers if the device is declocked.
+ */
+ regmap_update_bits(wm8962->regmap, WM8962_CLOCKING2,
+ WM8962_SYSCLK_ENA, 0);
+
+ /* Ensure we have soft control over all registers */
+ regmap_update_bits(wm8962->regmap, WM8962_CLOCKING2,
+ WM8962_CLKREG_OVD, WM8962_CLKREG_OVD);
+
+ /* Ensure that the oscillator and PLLs are disabled */
+ regmap_update_bits(wm8962->regmap, WM8962_PLL2,
+ WM8962_OSC_ENA | WM8962_PLL2_ENA | WM8962_PLL3_ENA,
+ 0);
+
+ /* Apply static configuration for GPIOs */
+ for (i = 0; i < ARRAY_SIZE(wm8962->pdata.gpio_init); i++)
+ if (wm8962->pdata.gpio_init[i]) {
+ wm8962_set_gpio_mode(wm8962, i + 1);
+ regmap_write(wm8962->regmap, 0x200 + i,
+ wm8962->pdata.gpio_init[i] & 0xffff);
+ }
+
+
+ /* Put the speakers into mono mode? */
+ if (wm8962->pdata.spk_mono)
+ regmap_update_bits(wm8962->regmap, WM8962_CLASS_D_CONTROL_2,
+ WM8962_SPK_MONO_MASK, WM8962_SPK_MONO);
+
+ /* Micbias setup, detection enable and detection
+ * threasholds. */
+ if (wm8962->pdata.mic_cfg)
+ regmap_update_bits(wm8962->regmap, WM8962_ADDITIONAL_CONTROL_4,
+ WM8962_MICDET_ENA |
+ WM8962_MICDET_THR_MASK |
+ WM8962_MICSHORT_THR_MASK |
+ WM8962_MICBIAS_LVL,
+ wm8962->pdata.mic_cfg);
+
+ /* Latch volume update bits */
+ regmap_update_bits(wm8962->regmap, WM8962_LEFT_INPUT_VOLUME,
+ WM8962_IN_VU, WM8962_IN_VU);
+ regmap_update_bits(wm8962->regmap, WM8962_RIGHT_INPUT_VOLUME,
+ WM8962_IN_VU, WM8962_IN_VU);
+ regmap_update_bits(wm8962->regmap, WM8962_LEFT_ADC_VOLUME,
+ WM8962_ADC_VU, WM8962_ADC_VU);
+ regmap_update_bits(wm8962->regmap, WM8962_RIGHT_ADC_VOLUME,
+ WM8962_ADC_VU, WM8962_ADC_VU);
+ regmap_update_bits(wm8962->regmap, WM8962_LEFT_DAC_VOLUME,
+ WM8962_DAC_VU, WM8962_DAC_VU);
+ regmap_update_bits(wm8962->regmap, WM8962_RIGHT_DAC_VOLUME,
+ WM8962_DAC_VU, WM8962_DAC_VU);
+ regmap_update_bits(wm8962->regmap, WM8962_SPKOUTL_VOLUME,
+ WM8962_SPKOUT_VU, WM8962_SPKOUT_VU);
+ regmap_update_bits(wm8962->regmap, WM8962_SPKOUTR_VOLUME,
+ WM8962_SPKOUT_VU, WM8962_SPKOUT_VU);
+ regmap_update_bits(wm8962->regmap, WM8962_HPOUTL_VOLUME,
+ WM8962_HPOUT_VU, WM8962_HPOUT_VU);
+ regmap_update_bits(wm8962->regmap, WM8962_HPOUTR_VOLUME,
+ WM8962_HPOUT_VU, WM8962_HPOUT_VU);
+
+ /* Stereo control for EQ */
+ regmap_update_bits(wm8962->regmap, WM8962_EQ1,
+ WM8962_EQ_SHARED_COEFF, 0);
+
+ /* Don't debouce interrupts so we don't need SYSCLK */
+ regmap_update_bits(wm8962->regmap, WM8962_IRQ_DEBOUNCE,
+ WM8962_FLL_LOCK_DB | WM8962_PLL3_LOCK_DB |
+ WM8962_PLL2_LOCK_DB | WM8962_TEMP_SHUT_DB,
+ 0);
+
if (wm8962->pdata.in4_dc_measure) {
ret = regmap_register_patch(wm8962->regmap,
wm8962_dc_measure,
@@ -3714,6 +3689,37 @@ static int wm8962_i2c_probe(struct i2c_client *i2c,
ret);
}
+ if (wm8962->irq) {
+ if (wm8962->pdata.irq_active_low) {
+ trigger = IRQF_TRIGGER_LOW;
+ irq_pol = WM8962_IRQ_POL;
+ } else {
+ trigger = IRQF_TRIGGER_HIGH;
+ irq_pol = 0;
+ }
+
+ regmap_update_bits(wm8962->regmap, WM8962_INTERRUPT_CONTROL,
+ WM8962_IRQ_POL, irq_pol);
+
+ ret = devm_request_threaded_irq(&i2c->dev, wm8962->irq, NULL,
+ wm8962_irq,
+ trigger | IRQF_ONESHOT,
+ "wm8962", &i2c->dev);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to request IRQ %d: %d\n",
+ wm8962->irq, ret);
+ wm8962->irq = 0;
+ /* Non-fatal */
+ } else {
+ /* Enable some IRQs by default */
+ regmap_update_bits(wm8962->regmap,
+ WM8962_INTERRUPT_STATUS_2_MASK,
+ WM8962_FLL_LOCK_EINT |
+ WM8962_TEMP_SHUT_EINT |
+ WM8962_FIFOS_ERR_EINT, 0);
+ }
+ }
+
pm_runtime_enable(&i2c->dev);
pm_request_idle(&i2c->dev);
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index 46fe83d2b224..b70379ebd142 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -438,6 +438,8 @@ static int wm8996_get_retune_mobile_enum(struct snd_kcontrol *kcontrol,
struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec);
int block = wm8996_get_retune_mobile_block(kcontrol->id.name);
+ if (block < 0)
+ return block;
ucontrol->value.enumerated.item[0] = wm8996->retune_mobile_cfg[block];
return 0;
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index b38f3506418f..53b6033658a6 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -396,11 +396,12 @@ static int wm_coeff_write_control(struct snd_kcontrol *kcontrol,
ret = regmap_raw_write(adsp->regmap, reg, scratch,
ctl->len);
if (ret) {
- adsp_err(adsp, "Failed to write %zu bytes to %x\n",
- ctl->len, reg);
+ adsp_err(adsp, "Failed to write %zu bytes to %x: %d\n",
+ ctl->len, reg, ret);
kfree(scratch);
return ret;
}
+ adsp_dbg(adsp, "Wrote %zu bytes to %x\n", ctl->len, reg);
kfree(scratch);
@@ -450,11 +451,12 @@ static int wm_coeff_read_control(struct snd_kcontrol *kcontrol,
ret = regmap_raw_read(adsp->regmap, reg, scratch, ctl->len);
if (ret) {
- adsp_err(adsp, "Failed to read %zu bytes from %x\n",
- ctl->len, reg);
+ adsp_err(adsp, "Failed to read %zu bytes from %x: %d\n",
+ ctl->len, reg, ret);
kfree(scratch);
return ret;
}
+ adsp_dbg(adsp, "Read %zu bytes from %x\n", ctl->len, reg);
memcpy(buf, scratch, ctl->len);
kfree(scratch);
@@ -568,6 +570,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
file, header->ver);
goto out_fw;
}
+ adsp_info(dsp, "Firmware version: %d\n", header->ver);
if (header->core != dsp->type) {
adsp_err(dsp, "%s: invalid core %d != %d\n",
@@ -689,7 +692,8 @@ static int wm_adsp_load(struct wm_adsp *dsp)
&buf_list);
if (!buf) {
adsp_err(dsp, "Out of memory\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_fw;
}
ret = regmap_raw_write_async(regmap, reg, buf->buf,
@@ -1313,8 +1317,8 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
le32_to_cpu(blk->len));
if (ret != 0) {
adsp_err(dsp,
- "%s.%d: Failed to write to %x in %s\n",
- file, blocks, reg, region_name);
+ "%s.%d: Failed to write to %x in %s: %d\n",
+ file, blocks, reg, region_name, ret);
}
}
@@ -1358,6 +1362,7 @@ int wm_adsp1_event(struct snd_soc_dapm_widget *w,
struct snd_soc_codec *codec = w->codec;
struct wm_adsp *dsps = snd_soc_codec_get_drvdata(codec);
struct wm_adsp *dsp = &dsps[w->shift];
+ struct wm_adsp_alg_region *alg_region;
struct wm_coeff_ctl *ctl;
int ret;
int val;
@@ -1435,6 +1440,14 @@ int wm_adsp1_event(struct snd_soc_dapm_widget *w,
list_for_each_entry(ctl, &dsp->ctl_list, list)
ctl->enabled = 0;
+
+ while (!list_empty(&dsp->alg_regions)) {
+ alg_region = list_first_entry(&dsp->alg_regions,
+ struct wm_adsp_alg_region,
+ list);
+ list_del(&alg_region->list);
+ kfree(alg_region);
+ }
break;
default:
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 8b50e5958de5..01daf655e20b 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -530,6 +530,7 @@ static int hp_supply_event(struct snd_soc_dapm_widget *w,
hubs->hp_startup_mode);
break;
}
+ break;
case SND_SOC_DAPM_PRE_PMD:
snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1,
diff --git a/sound/soc/davinci/Kconfig b/sound/soc/davinci/Kconfig
index c82f89c9475b..95970f5db3ec 100644
--- a/sound/soc/davinci/Kconfig
+++ b/sound/soc/davinci/Kconfig
@@ -1,9 +1,10 @@
config SND_DAVINCI_SOC
- tristate "SoC Audio for the TI DAVINCI chip"
- depends on ARCH_DAVINCI
+ tristate "SoC Audio for the TI DAVINCI or AM33XX chip"
+ depends on ARCH_DAVINCI || SOC_AM33XX
help
+ Platform driver for daVinci or AM33xx
Say Y or M if you want to add support for codecs attached to
- the DAVINCI AC97 or I2S interface. You will also need
+ the DAVINCI AC97, I2S, or McASP interface. You will also need
to select the audio interfaces to support below.
config SND_DAVINCI_SOC_I2S
@@ -15,6 +16,17 @@ config SND_DAVINCI_SOC_MCASP
config SND_DAVINCI_SOC_VCIF
tristate
+config SND_AM33XX_SOC_EVM
+ tristate "SoC Audio for the AM33XX chip based boards"
+ depends on SND_DAVINCI_SOC && SOC_AM33XX
+ select SND_SOC_TLV320AIC3X
+ select SND_DAVINCI_SOC_MCASP
+ help
+ Say Y or M if you want to add support for SoC audio on AM33XX
+ boards using McASP and TLV320AIC3X codec. For example AM335X-EVM,
+ AM335X-EVMSK, and BeagelBone with AudioCape boards have this
+ setup.
+
config SND_DAVINCI_SOC_EVM
tristate "SoC Audio support for DaVinci DM6446, DM355 or DM365 EVM"
depends on SND_DAVINCI_SOC
diff --git a/sound/soc/davinci/Makefile b/sound/soc/davinci/Makefile
index a396ab6d6d5e..bc81e79fc301 100644
--- a/sound/soc/davinci/Makefile
+++ b/sound/soc/davinci/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_SND_DAVINCI_SOC_VCIF) += snd-soc-davinci-vcif.o
snd-soc-evm-objs := davinci-evm.o
obj-$(CONFIG_SND_DAVINCI_SOC_EVM) += snd-soc-evm.o
+obj-$(CONFIG_SND_AM33XX_SOC_EVM) += snd-soc-evm.o
obj-$(CONFIG_SND_DM6467_SOC_EVM) += snd-soc-evm.o
obj-$(CONFIG_SND_DA830_SOC_EVM) += snd-soc-evm.o
obj-$(CONFIG_SND_DA850_SOC_EVM) += snd-soc-evm.o
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
index fd7c45b9ed5a..623eb5e7c089 100644
--- a/sound/soc/davinci/davinci-evm.c
+++ b/sound/soc/davinci/davinci-evm.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/platform_data/edma.h>
#include <linux/i2c.h>
+#include <linux/of_platform.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
@@ -23,10 +24,16 @@
#include <asm/dma.h>
#include <asm/mach-types.h>
+#include <linux/edma.h>
+
#include "davinci-pcm.h"
#include "davinci-i2s.h"
#include "davinci-mcasp.h"
+struct snd_soc_card_drvdata_davinci {
+ unsigned sysclk;
+};
+
#define AUDIO_FORMAT (SND_SOC_DAIFMT_DSP_B | \
SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_IB_NF)
static int evm_hw_params(struct snd_pcm_substream *substream,
@@ -35,27 +42,11 @@ static int evm_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_card *soc_card = codec->card;
int ret = 0;
- unsigned sysclk;
-
- /* ASP1 on DM355 EVM is clocked by an external oscillator */
- if (machine_is_davinci_dm355_evm() || machine_is_davinci_dm6467_evm() ||
- machine_is_davinci_dm365_evm())
- sysclk = 27000000;
-
- /* ASP0 in DM6446 EVM is clocked by U55, as configured by
- * board-dm644x-evm.c using GPIOs from U18. There are six
- * options; here we "know" we use a 48 KHz sample rate.
- */
- else if (machine_is_davinci_evm())
- sysclk = 12288000;
-
- else if (machine_is_davinci_da830_evm() ||
- machine_is_davinci_da850_evm())
- sysclk = 24576000;
-
- else
- return -EINVAL;
+ unsigned sysclk = ((struct snd_soc_card_drvdata_davinci *)
+ snd_soc_card_get_drvdata(soc_card))->sysclk;
/* set codec DAI configuration */
ret = snd_soc_dai_set_fmt(codec_dai, AUDIO_FORMAT);
@@ -133,13 +124,22 @@ static int evm_aic3x_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
+ struct device_node *np = codec->card->dev->of_node;
+ int ret;
/* Add davinci-evm specific widgets */
snd_soc_dapm_new_controls(dapm, aic3x_dapm_widgets,
ARRAY_SIZE(aic3x_dapm_widgets));
- /* Set up davinci-evm specific audio path audio_map */
- snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
+ if (np) {
+ ret = snd_soc_of_parse_audio_routing(codec->card,
+ "ti,audio-routing");
+ if (ret)
+ return ret;
+ } else {
+ /* Set up davinci-evm specific audio path audio_map */
+ snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
+ }
/* not connected */
snd_soc_dapm_disable_pin(dapm, "MONO_LOUT");
@@ -243,35 +243,65 @@ static struct snd_soc_dai_link da850_evm_dai = {
};
/* davinci dm6446 evm audio machine driver */
+/*
+ * ASP0 in DM6446 EVM is clocked by U55, as configured by
+ * board-dm644x-evm.c using GPIOs from U18. There are six
+ * options; here we "know" we use a 48 KHz sample rate.
+ */
+static struct snd_soc_card_drvdata_davinci dm6446_snd_soc_card_drvdata = {
+ .sysclk = 12288000,
+};
+
static struct snd_soc_card dm6446_snd_soc_card_evm = {
.name = "DaVinci DM6446 EVM",
.owner = THIS_MODULE,
.dai_link = &dm6446_evm_dai,
.num_links = 1,
+ .drvdata = &dm6446_snd_soc_card_drvdata,
};
/* davinci dm355 evm audio machine driver */
+/* ASP1 on DM355 EVM is clocked by an external oscillator */
+static struct snd_soc_card_drvdata_davinci dm355_snd_soc_card_drvdata = {
+ .sysclk = 27000000,
+};
+
static struct snd_soc_card dm355_snd_soc_card_evm = {
.name = "DaVinci DM355 EVM",
.owner = THIS_MODULE,
.dai_link = &dm355_evm_dai,
.num_links = 1,
+ .drvdata = &dm355_snd_soc_card_drvdata,
};
/* davinci dm365 evm audio machine driver */
+static struct snd_soc_card_drvdata_davinci dm365_snd_soc_card_drvdata = {
+ .sysclk = 27000000,
+};
+
static struct snd_soc_card dm365_snd_soc_card_evm = {
.name = "DaVinci DM365 EVM",
.owner = THIS_MODULE,
.dai_link = &dm365_evm_dai,
.num_links = 1,
+ .drvdata = &dm365_snd_soc_card_drvdata,
};
/* davinci dm6467 evm audio machine driver */
+static struct snd_soc_card_drvdata_davinci dm6467_snd_soc_card_drvdata = {
+ .sysclk = 27000000,
+};
+
static struct snd_soc_card dm6467_snd_soc_card_evm = {
.name = "DaVinci DM6467 EVM",
.owner = THIS_MODULE,
.dai_link = dm6467_evm_dai,
.num_links = ARRAY_SIZE(dm6467_evm_dai),
+ .drvdata = &dm6467_snd_soc_card_drvdata,
+};
+
+static struct snd_soc_card_drvdata_davinci da830_snd_soc_card_drvdata = {
+ .sysclk = 24576000,
};
static struct snd_soc_card da830_snd_soc_card = {
@@ -279,6 +309,11 @@ static struct snd_soc_card da830_snd_soc_card = {
.owner = THIS_MODULE,
.dai_link = &da830_evm_dai,
.num_links = 1,
+ .drvdata = &da830_snd_soc_card_drvdata,
+};
+
+static struct snd_soc_card_drvdata_davinci da850_snd_soc_card_drvdata = {
+ .sysclk = 24576000,
};
static struct snd_soc_card da850_snd_soc_card = {
@@ -286,8 +321,101 @@ static struct snd_soc_card da850_snd_soc_card = {
.owner = THIS_MODULE,
.dai_link = &da850_evm_dai,
.num_links = 1,
+ .drvdata = &da850_snd_soc_card_drvdata,
+};
+
+#if defined(CONFIG_OF)
+
+/*
+ * The struct is used as place holder. It will be completely
+ * filled with data from dt node.
+ */
+static struct snd_soc_dai_link evm_dai_tlv320aic3x = {
+ .name = "TLV320AIC3X",
+ .stream_name = "AIC3X",
+ .codec_dai_name = "tlv320aic3x-hifi",
+ .ops = &evm_ops,
+ .init = evm_aic3x_init,
+};
+
+static const struct of_device_id davinci_evm_dt_ids[] = {
+ {
+ .compatible = "ti,da830-evm-audio",
+ .data = (void *) &evm_dai_tlv320aic3x,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, davinci_evm_dt_ids);
+
+/* davinci evm audio machine driver */
+static struct snd_soc_card evm_soc_card = {
+ .owner = THIS_MODULE,
+ .num_links = 1,
};
+static int davinci_evm_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match =
+ of_match_device(of_match_ptr(davinci_evm_dt_ids), &pdev->dev);
+ struct snd_soc_dai_link *dai = (struct snd_soc_dai_link *) match->data;
+ struct snd_soc_card_drvdata_davinci *drvdata = NULL;
+ int ret = 0;
+
+ evm_soc_card.dai_link = dai;
+
+ dai->codec_of_node = of_parse_phandle(np, "ti,audio-codec", 0);
+ if (!dai->codec_of_node)
+ return -EINVAL;
+
+ dai->cpu_of_node = of_parse_phandle(np, "ti,mcasp-controller", 0);
+ if (!dai->cpu_of_node)
+ return -EINVAL;
+
+ dai->platform_of_node = dai->cpu_of_node;
+
+ evm_soc_card.dev = &pdev->dev;
+ ret = snd_soc_of_parse_card_name(&evm_soc_card, "ti,model");
+ if (ret)
+ return ret;
+
+ drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(np, "ti,codec-clock-rate", &drvdata->sysclk);
+ if (ret < 0)
+ return -EINVAL;
+
+ snd_soc_card_set_drvdata(&evm_soc_card, drvdata);
+ ret = devm_snd_soc_register_card(&pdev->dev, &evm_soc_card);
+
+ if (ret)
+ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
+
+ return ret;
+}
+
+static int davinci_evm_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+ snd_soc_unregister_card(card);
+
+ return 0;
+}
+
+static struct platform_driver davinci_evm_driver = {
+ .probe = davinci_evm_probe,
+ .remove = davinci_evm_remove,
+ .driver = {
+ .name = "davinci_evm",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(davinci_evm_dt_ids),
+ },
+};
+#endif
+
static struct platform_device *evm_snd_device;
static int __init evm_init(void)
@@ -296,6 +424,15 @@ static int __init evm_init(void)
int index;
int ret;
+ /*
+ * If dtb is there, the devices will be created dynamically.
+ * Only register platfrom driver structure.
+ */
+#if defined(CONFIG_OF)
+ if (of_have_populated_dt())
+ return platform_driver_register(&davinci_evm_driver);
+#endif
+
if (machine_is_davinci_evm()) {
evm_snd_dev_data = &dm6446_snd_soc_card_evm;
index = 0;
@@ -331,6 +468,13 @@ static int __init evm_init(void)
static void __exit evm_exit(void)
{
+#if defined(CONFIG_OF)
+ if (of_have_populated_dt()) {
+ platform_driver_unregister(&davinci_evm_driver);
+ return;
+ }
+#endif
+
platform_device_unregister(evm_snd_device);
}
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 32ddb7fe5034..71e14bb3a8cd 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1001,18 +1001,40 @@ static const struct snd_soc_component_driver davinci_mcasp_component = {
.name = "davinci-mcasp",
};
+/* Some HW specific values and defaults. The rest is filled in from DT. */
+static struct snd_platform_data dm646x_mcasp_pdata = {
+ .tx_dma_offset = 0x400,
+ .rx_dma_offset = 0x400,
+ .asp_chan_q = EVENTQ_0,
+ .version = MCASP_VERSION_1,
+};
+
+static struct snd_platform_data da830_mcasp_pdata = {
+ .tx_dma_offset = 0x2000,
+ .rx_dma_offset = 0x2000,
+ .asp_chan_q = EVENTQ_0,
+ .version = MCASP_VERSION_2,
+};
+
+static struct snd_platform_data omap2_mcasp_pdata = {
+ .tx_dma_offset = 0,
+ .rx_dma_offset = 0,
+ .asp_chan_q = EVENTQ_0,
+ .version = MCASP_VERSION_3,
+};
+
static const struct of_device_id mcasp_dt_ids[] = {
{
.compatible = "ti,dm646x-mcasp-audio",
- .data = (void *)MCASP_VERSION_1,
+ .data = &dm646x_mcasp_pdata,
},
{
.compatible = "ti,da830-mcasp-audio",
- .data = (void *)MCASP_VERSION_2,
+ .data = &da830_mcasp_pdata,
},
{
- .compatible = "ti,omap2-mcasp-audio",
- .data = (void *)MCASP_VERSION_3,
+ .compatible = "ti,am33xx-mcasp-audio",
+ .data = &omap2_mcasp_pdata,
},
{ /* sentinel */ }
};
@@ -1025,9 +1047,9 @@ static struct snd_platform_data *davinci_mcasp_set_pdata_from_of(
struct snd_platform_data *pdata = NULL;
const struct of_device_id *match =
of_match_device(mcasp_dt_ids, &pdev->dev);
+ struct of_phandle_args dma_spec;
const u32 *of_serial_dir32;
- u8 *of_serial_dir;
u32 val;
int i, ret = 0;
@@ -1035,20 +1057,13 @@ static struct snd_platform_data *davinci_mcasp_set_pdata_from_of(
pdata = pdev->dev.platform_data;
return pdata;
} else if (match) {
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- ret = -ENOMEM;
- goto nodata;
- }
+ pdata = (struct snd_platform_data *) match->data;
} else {
/* control shouldn't reach here. something is wrong */
ret = -EINVAL;
goto nodata;
}
- if (match->data)
- pdata->version = (u8)((int)match->data);
-
ret = of_property_read_u32(np, "op-mode", &val);
if (ret >= 0)
pdata->op_mode = val;
@@ -1065,35 +1080,46 @@ static struct snd_platform_data *davinci_mcasp_set_pdata_from_of(
pdata->tdm_slots = val;
}
- ret = of_property_read_u32(np, "num-serializer", &val);
- if (ret >= 0)
- pdata->num_serializer = val;
-
of_serial_dir32 = of_get_property(np, "serial-dir", &val);
val /= sizeof(u32);
- if (val != pdata->num_serializer) {
- dev_err(&pdev->dev,
- "num-serializer(%d) != serial-dir size(%d)\n",
- pdata->num_serializer, val);
- ret = -EINVAL;
- goto nodata;
- }
-
if (of_serial_dir32) {
- of_serial_dir = devm_kzalloc(&pdev->dev,
- (sizeof(*of_serial_dir) * val),
- GFP_KERNEL);
+ u8 *of_serial_dir = devm_kzalloc(&pdev->dev,
+ (sizeof(*of_serial_dir) * val),
+ GFP_KERNEL);
if (!of_serial_dir) {
ret = -ENOMEM;
goto nodata;
}
- for (i = 0; i < pdata->num_serializer; i++)
+ for (i = 0; i < val; i++)
of_serial_dir[i] = be32_to_cpup(&of_serial_dir32[i]);
+ pdata->num_serializer = val;
pdata->serial_dir = of_serial_dir;
}
+ ret = of_property_match_string(np, "dma-names", "tx");
+ if (ret < 0)
+ goto nodata;
+
+ ret = of_parse_phandle_with_args(np, "dmas", "#dma-cells", ret,
+ &dma_spec);
+ if (ret < 0)
+ goto nodata;
+
+ pdata->tx_dma_channel = dma_spec.args[0];
+
+ ret = of_property_match_string(np, "dma-names", "rx");
+ if (ret < 0)
+ goto nodata;
+
+ ret = of_parse_phandle_with_args(np, "dmas", "#dma-cells", ret,
+ &dma_spec);
+ if (ret < 0)
+ goto nodata;
+
+ pdata->rx_dma_channel = dma_spec.args[0];
+
ret = of_property_read_u32(np, "tx-num-evt", &val);
if (ret >= 0)
pdata->txnumevt = val;
@@ -1124,7 +1150,7 @@ nodata:
static int davinci_mcasp_probe(struct platform_device *pdev)
{
struct davinci_pcm_dma_params *dma_data;
- struct resource *mem, *ioarea, *res;
+ struct resource *mem, *ioarea, *res, *dat;
struct snd_platform_data *pdata;
struct davinci_audio_dev *dev;
int ret;
@@ -1145,10 +1171,15 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
return -EINVAL;
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu");
if (!mem) {
- dev_err(&pdev->dev, "no mem resource?\n");
- return -ENODEV;
+ dev_warn(dev->dev,
+ "\"mpu\" mem resource not found, using index 0\n");
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "no mem resource?\n");
+ return -ENODEV;
+ }
}
ioarea = devm_request_mem_region(&pdev->dev, mem->start,
@@ -1182,40 +1213,36 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
dev->rxnumevt = pdata->rxnumevt;
dev->dev = &pdev->dev;
+ dat = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
+ if (!dat)
+ dat = mem;
+
dma_data = &dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK];
dma_data->asp_chan_q = pdata->asp_chan_q;
dma_data->ram_chan_q = pdata->ram_chan_q;
dma_data->sram_pool = pdata->sram_pool;
dma_data->sram_size = pdata->sram_size_playback;
- dma_data->dma_addr = (dma_addr_t) (pdata->tx_dma_offset +
- mem->start);
+ dma_data->dma_addr = dat->start + pdata->tx_dma_offset;
- /* first TX, then RX */
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!res) {
- dev_err(&pdev->dev, "no DMA resource\n");
- ret = -ENODEV;
- goto err_release_clk;
- }
-
- dma_data->channel = res->start;
+ if (res)
+ dma_data->channel = res->start;
+ else
+ dma_data->channel = pdata->tx_dma_channel;
dma_data = &dev->dma_params[SNDRV_PCM_STREAM_CAPTURE];
dma_data->asp_chan_q = pdata->asp_chan_q;
dma_data->ram_chan_q = pdata->ram_chan_q;
dma_data->sram_pool = pdata->sram_pool;
dma_data->sram_size = pdata->sram_size_capture;
- dma_data->dma_addr = (dma_addr_t)(pdata->rx_dma_offset +
- mem->start);
+ dma_data->dma_addr = dat->start + pdata->rx_dma_offset;
res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (!res) {
- dev_err(&pdev->dev, "no DMA resource\n");
- ret = -ENODEV;
- goto err_release_clk;
- }
+ if (res)
+ dma_data->channel = res->start;
+ else
+ dma_data->channel = pdata->rx_dma_channel;
- dma_data->channel = res->start;
dev_set_drvdata(&pdev->dev, dev);
ret = snd_soc_register_component(&pdev->dev, &davinci_mcasp_component,
&davinci_mcasp_dai[pdata->op_mode], 1);
@@ -1251,12 +1278,51 @@ static int davinci_mcasp_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int davinci_mcasp_suspend(struct device *dev)
+{
+ struct davinci_audio_dev *a = dev_get_drvdata(dev);
+ void __iomem *base = a->base;
+
+ a->context.txfmtctl = mcasp_get_reg(base + DAVINCI_MCASP_TXFMCTL_REG);
+ a->context.rxfmtctl = mcasp_get_reg(base + DAVINCI_MCASP_RXFMCTL_REG);
+ a->context.txfmt = mcasp_get_reg(base + DAVINCI_MCASP_TXFMT_REG);
+ a->context.rxfmt = mcasp_get_reg(base + DAVINCI_MCASP_RXFMT_REG);
+ a->context.aclkxctl = mcasp_get_reg(base + DAVINCI_MCASP_ACLKXCTL_REG);
+ a->context.aclkrctl = mcasp_get_reg(base + DAVINCI_MCASP_ACLKRCTL_REG);
+ a->context.pdir = mcasp_get_reg(base + DAVINCI_MCASP_PDIR_REG);
+
+ return 0;
+}
+
+static int davinci_mcasp_resume(struct device *dev)
+{
+ struct davinci_audio_dev *a = dev_get_drvdata(dev);
+ void __iomem *base = a->base;
+
+ mcasp_set_reg(base + DAVINCI_MCASP_TXFMCTL_REG, a->context.txfmtctl);
+ mcasp_set_reg(base + DAVINCI_MCASP_RXFMCTL_REG, a->context.rxfmtctl);
+ mcasp_set_reg(base + DAVINCI_MCASP_TXFMT_REG, a->context.txfmt);
+ mcasp_set_reg(base + DAVINCI_MCASP_RXFMT_REG, a->context.rxfmt);
+ mcasp_set_reg(base + DAVINCI_MCASP_ACLKXCTL_REG, a->context.aclkxctl);
+ mcasp_set_reg(base + DAVINCI_MCASP_ACLKRCTL_REG, a->context.aclkrctl);
+ mcasp_set_reg(base + DAVINCI_MCASP_PDIR_REG, a->context.pdir);
+
+ return 0;
+}
+#endif
+
+SIMPLE_DEV_PM_OPS(davinci_mcasp_pm_ops,
+ davinci_mcasp_suspend,
+ davinci_mcasp_resume);
+
static struct platform_driver davinci_mcasp_driver = {
.probe = davinci_mcasp_probe,
.remove = davinci_mcasp_remove,
.driver = {
.name = "davinci-mcasp",
.owner = THIS_MODULE,
+ .pm = &davinci_mcasp_pm_ops,
.of_match_table = mcasp_dt_ids,
},
};
@@ -1266,4 +1332,3 @@ module_platform_driver(davinci_mcasp_driver);
MODULE_AUTHOR("Steve Chen");
MODULE_DESCRIPTION("TI DAVINCI McASP SoC Interface");
MODULE_LICENSE("GPL");
-
diff --git a/sound/soc/davinci/davinci-mcasp.h b/sound/soc/davinci/davinci-mcasp.h
index a9ac0c11da71..a2e27e1c32f3 100644
--- a/sound/soc/davinci/davinci-mcasp.h
+++ b/sound/soc/davinci/davinci-mcasp.h
@@ -43,6 +43,18 @@ struct davinci_audio_dev {
/* McASP FIFO related */
u8 txnumevt;
u8 rxnumevt;
+
+#ifdef CONFIG_PM_SLEEP
+ struct {
+ u32 txfmtctl;
+ u32 rxfmtctl;
+ u32 txfmt;
+ u32 rxfmt;
+ u32 aclkxctl;
+ u32 aclkrctl;
+ u32 pdir;
+ } context;
+#endif
};
#endif /* DAVINCI_MCASP_H */
diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c
index 8460edce1c3b..84a63c660ab9 100644
--- a/sound/soc/davinci/davinci-pcm.c
+++ b/sound/soc/davinci/davinci-pcm.c
@@ -844,18 +844,15 @@ static void davinci_pcm_free(struct snd_pcm *pcm)
}
}
-static u64 davinci_pcm_dmamask = DMA_BIT_MASK(32);
-
static int davinci_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
int ret;
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &davinci_pcm_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = davinci_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c
index 9a4a0ca2c1de..5983740be123 100644
--- a/sound/soc/fsl/eukrea-tlv320.c
+++ b/sound/soc/fsl/eukrea-tlv320.c
@@ -42,7 +42,8 @@ static int eukrea_tlv320_hw_params(struct snd_pcm_substream *substream,
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM);
if (ret) {
- pr_err("%s: failed set cpu dai format\n", __func__);
+ dev_err(cpu_dai->dev,
+ "Failed to set the cpu dai format.\n");
return ret;
}
@@ -50,14 +51,16 @@ static int eukrea_tlv320_hw_params(struct snd_pcm_substream *substream,
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM);
if (ret) {
- pr_err("%s: failed set codec dai format\n", __func__);
+ dev_err(cpu_dai->dev,
+ "Failed to set the codec format.\n");
return ret;
}
ret = snd_soc_dai_set_sysclk(codec_dai, 0,
CODEC_CLOCK, SND_SOC_CLOCK_OUT);
if (ret) {
- pr_err("%s: failed setting codec sysclk\n", __func__);
+ dev_err(cpu_dai->dev,
+ "Failed to set the codec sysclk.\n");
return ret;
}
snd_soc_dai_set_tdm_slot(cpu_dai, 0xffffffc, 0xffffffc, 2, 0);
@@ -65,7 +68,8 @@ static int eukrea_tlv320_hw_params(struct snd_pcm_substream *substream,
ret = snd_soc_dai_set_sysclk(cpu_dai, IMX_SSP_SYS_CLK, 0,
SND_SOC_CLOCK_IN);
if (ret) {
- pr_err("can't set CPU system clock IMX_SSP_SYS_CLK\n");
+ dev_err(cpu_dai->dev,
+ "Can't set the IMX_SSP_SYS_CLK CPU system clock.\n");
return ret;
}
@@ -155,7 +159,8 @@ static struct platform_driver eukrea_tlv320_driver = {
.owner = THIS_MODULE,
},
.probe = eukrea_tlv320_probe,
- .remove = eukrea_tlv320_remove,};
+ .remove = eukrea_tlv320_remove,
+};
module_platform_driver(eukrea_tlv320_driver);
diff --git a/sound/soc/fsl/fsl_dma.c b/sound/soc/fsl/fsl_dma.c
index 9cc5c1f82f09..fb9bb9eb5ca3 100644
--- a/sound/soc/fsl/fsl_dma.c
+++ b/sound/soc/fsl/fsl_dma.c
@@ -21,6 +21,8 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/gfp.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/list.h>
#include <linux/slab.h>
@@ -298,14 +300,11 @@ static int fsl_dma_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
- static u64 fsl_dma_dmamask = DMA_BIT_MASK(36);
int ret;
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &fsl_dma_dmamask;
-
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = fsl_dma_dmamask;
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(36));
+ if (ret)
+ return ret;
/* Some codecs have separate DAIs for playback and capture, so we
* should allocate a DMA buffer only for the streams that are valid.
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index 3920c3e849ce..76c742a09ef9 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -963,7 +963,7 @@ static bool fsl_spdif_readable_reg(struct device *dev, unsigned int reg)
return true;
default:
return false;
- };
+ }
}
static bool fsl_spdif_writeable_reg(struct device *dev, unsigned int reg)
@@ -982,7 +982,7 @@ static bool fsl_spdif_writeable_reg(struct device *dev, unsigned int reg)
return true;
default:
return false;
- };
+ }
}
static const struct regmap_config fsl_spdif_regmap_config = {
@@ -1107,9 +1107,9 @@ static int fsl_spdif_probe(struct platform_device *pdev)
/* Get the addresses and IRQ */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (IS_ERR(res)) {
+ if (!res) {
dev_err(&pdev->dev, "could not determine device resources\n");
- return PTR_ERR(res);
+ return -ENXIO;
}
regs = devm_ioremap_resource(&pdev->dev, res);
@@ -1172,23 +1172,16 @@ static int fsl_spdif_probe(struct platform_device *pdev)
/* Register with ASoC */
dev_set_drvdata(&pdev->dev, spdif_priv);
- ret = snd_soc_register_component(&pdev->dev, &fsl_spdif_component,
- &spdif_priv->cpu_dai_drv, 1);
+ ret = devm_snd_soc_register_component(&pdev->dev, &fsl_spdif_component,
+ &spdif_priv->cpu_dai_drv, 1);
if (ret) {
dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
return ret;
}
ret = imx_pcm_dma_init(pdev);
- if (ret) {
+ if (ret)
dev_err(&pdev->dev, "imx_pcm_dma_init failed: %d\n", ret);
- goto error_component;
- }
-
- return ret;
-
-error_component:
- snd_soc_unregister_component(&pdev->dev);
return ret;
}
@@ -1196,7 +1189,6 @@ error_component:
static int fsl_spdif_remove(struct platform_device *pdev)
{
imx_pcm_dma_exit(pdev);
- snd_soc_unregister_component(&pdev->dev);
return 0;
}
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index c6b743978d5e..35e277379b86 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -469,19 +469,12 @@ static int fsl_ssi_startup(struct snd_pcm_substream *substream,
* parameters, then the second stream may be
* constrained to the wrong sample rate or size.
*/
- if (!first_runtime->sample_bits) {
- dev_err(substream->pcm->card->dev,
- "set sample size in %s stream first\n",
- substream->stream ==
- SNDRV_PCM_STREAM_PLAYBACK
- ? "capture" : "playback");
- return -EAGAIN;
- }
-
- snd_pcm_hw_constraint_minmax(substream->runtime,
- SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
+ if (first_runtime->sample_bits) {
+ snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
first_runtime->sample_bits,
first_runtime->sample_bits);
+ }
}
ssi_private->second_stream = substream;
@@ -748,7 +741,7 @@ static void fsl_ssi_ac97_init(void)
fsl_ssi_setup(fsl_ac97_data);
}
-void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
+static void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
unsigned short val)
{
struct ccsr_ssi *ssi = fsl_ac97_data->ssi;
@@ -770,7 +763,7 @@ void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
udelay(100);
}
-unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
+static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
unsigned short reg)
{
struct ccsr_ssi *ssi = fsl_ac97_data->ssi;
@@ -936,7 +929,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
ssi_private->ssi_phys = res.start;
ssi_private->irq = irq_of_parse_and_map(np, 0);
- if (ssi_private->irq == NO_IRQ) {
+ if (!ssi_private->irq) {
dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
return -ENXIO;
}
@@ -1135,7 +1128,6 @@ static int fsl_ssi_remove(struct platform_device *pdev)
if (ssi_private->ssi_on_imx)
imx_pcm_dma_exit(pdev);
snd_soc_unregister_component(&pdev->dev);
- dev_set_drvdata(&pdev->dev, NULL);
device_remove_file(&pdev->dev, &ssi_private->dev_attr);
if (ssi_private->ssi_on_imx)
clk_disable_unprepare(ssi_private->clk);
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c
index d3bf71a0ec56..ac869931d7f1 100644
--- a/sound/soc/fsl/imx-audmux.c
+++ b/sound/soc/fsl/imx-audmux.c
@@ -66,13 +66,10 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
ssize_t ret;
- char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ char *buf;
int port = (int)file->private_data;
u32 pdcr, ptcr;
- if (!buf)
- return -ENOMEM;
-
if (audmux_clk) {
ret = clk_prepare_enable(audmux_clk);
if (ret)
@@ -85,6 +82,10 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf,
if (audmux_clk)
clk_disable_unprepare(audmux_clk);
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
ret = snprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n",
pdcr, ptcr);
diff --git a/sound/soc/fsl/imx-mc13783.c b/sound/soc/fsl/imx-mc13783.c
index a3d60d4bea4c..79cee782dbbf 100644
--- a/sound/soc/fsl/imx-mc13783.c
+++ b/sound/soc/fsl/imx-mc13783.c
@@ -112,7 +112,7 @@ static int imx_mc13783_probe(struct platform_device *pdev)
return ret;
}
- if (machine_is_mx31_3ds()) {
+ if (machine_is_mx31_3ds() || machine_is_mx31moboard()) {
imx_audmux_v2_configure_port(MX31_AUDMUX_PORT4_SSI_PINS_4,
IMX_AUDMUX_V2_PTCR_SYN,
IMX_AUDMUX_V2_PDCR_RXDSEL(MX31_AUDMUX_PORT1_SSI0) |
@@ -160,6 +160,7 @@ static struct platform_driver imx_mc13783_audio_driver = {
.driver = {
.name = "imx_mc13783",
.owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
},
.probe = imx_mc13783_probe,
.remove = imx_mc13783_remove
diff --git a/sound/soc/fsl/imx-pcm-dma.c b/sound/soc/fsl/imx-pcm-dma.c
index 4dc1296688e9..aee23077080a 100644
--- a/sound/soc/fsl/imx-pcm-dma.c
+++ b/sound/soc/fsl/imx-pcm-dma.c
@@ -25,12 +25,10 @@
static bool filter(struct dma_chan *chan, void *param)
{
- struct snd_dmaengine_dai_dma_data *dma_data = param;
-
if (!imx_dma_is_general_purpose(chan))
return false;
- chan->private = dma_data->filter_data;
+ chan->private = param;
return true;
}
diff --git a/sound/soc/fsl/imx-pcm-fiq.c b/sound/soc/fsl/imx-pcm-fiq.c
index 34043c55f2a6..fd5f2fb955f1 100644
--- a/sound/soc/fsl/imx-pcm-fiq.c
+++ b/sound/soc/fsl/imx-pcm-fiq.c
@@ -272,18 +272,16 @@ static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
return 0;
}
-static u64 imx_pcm_dmamask = DMA_BIT_MASK(32);
-
static int imx_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
- int ret = 0;
+ int ret;
+
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &imx_pcm_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = imx_pcm_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_PLAYBACK);
diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
index ca1be1d9dcf0..f2beae78969f 100644
--- a/sound/soc/fsl/imx-sgtl5000.c
+++ b/sound/soc/fsl/imx-sgtl5000.c
@@ -159,7 +159,7 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
data->card.dapm_widgets = imx_sgtl5000_dapm_widgets;
data->card.num_dapm_widgets = ARRAY_SIZE(imx_sgtl5000_dapm_widgets);
- ret = snd_soc_register_card(&data->card);
+ ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
goto fail;
@@ -186,7 +186,6 @@ static int imx_sgtl5000_remove(struct platform_device *pdev)
{
struct imx_sgtl5000_data *data = platform_get_drvdata(pdev);
- snd_soc_unregister_card(&data->card);
clk_put(data->codec_clk);
return 0;
@@ -202,6 +201,7 @@ static struct platform_driver imx_sgtl5000_driver = {
.driver = {
.name = "imx-sgtl5000",
.owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
.of_match_table = imx_sgtl5000_dt_ids,
},
.probe = imx_sgtl5000_probe,
diff --git a/sound/soc/fsl/imx-spdif.c b/sound/soc/fsl/imx-spdif.c
index 816013b0ebba..8499d5292f08 100644
--- a/sound/soc/fsl/imx-spdif.c
+++ b/sound/soc/fsl/imx-spdif.c
@@ -87,7 +87,7 @@ static int imx_spdif_audio_probe(struct platform_device *pdev)
if (ret)
goto error_dir;
- ret = snd_soc_register_card(&data->card);
+ ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed: %d\n", ret);
goto error_dir;
@@ -119,8 +119,6 @@ static int imx_spdif_audio_remove(struct platform_device *pdev)
if (data->txdev)
platform_device_unregister(data->txdev);
- snd_soc_unregister_card(&data->card);
-
return 0;
}
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index f58bcd85c07f..f5f248c91c16 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -600,22 +600,19 @@ static int imx_ssi_probe(struct platform_device *pdev)
ssi->fiq_params.dma_params_rx = &ssi->dma_params_rx;
ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx;
- ret = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
- if (ret)
- goto failed_pcm_fiq;
+ ssi->fiq_init = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
+ ssi->dma_init = imx_pcm_dma_init(pdev);
- ret = imx_pcm_dma_init(pdev);
- if (ret)
- goto failed_pcm_dma;
+ if (ssi->fiq_init && ssi->dma_init) {
+ ret = ssi->fiq_init;
+ goto failed_pcm;
+ }
return 0;
-failed_pcm_dma:
- imx_pcm_fiq_exit(pdev);
-failed_pcm_fiq:
+failed_pcm:
snd_soc_unregister_component(&pdev->dev);
failed_register:
- release_mem_region(res->start, resource_size(res));
clk_disable_unprepare(ssi->clk);
failed_clk:
snd_soc_set_ac97_ops(NULL);
@@ -625,18 +622,19 @@ failed_clk:
static int imx_ssi_remove(struct platform_device *pdev)
{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct imx_ssi *ssi = platform_get_drvdata(pdev);
- imx_pcm_dma_exit(pdev);
- imx_pcm_fiq_exit(pdev);
+ if (!ssi->dma_init)
+ imx_pcm_dma_exit(pdev);
+
+ if (!ssi->fiq_init)
+ imx_pcm_fiq_exit(pdev);
snd_soc_unregister_component(&pdev->dev);
if (ssi->flags & IMX_SSI_USE_AC97)
ac97_ssi = NULL;
- release_mem_region(res->start, resource_size(res));
clk_disable_unprepare(ssi->clk);
snd_soc_set_ac97_ops(NULL);
diff --git a/sound/soc/fsl/imx-ssi.h b/sound/soc/fsl/imx-ssi.h
index fb1616ba8c59..560c40fc9ebb 100644
--- a/sound/soc/fsl/imx-ssi.h
+++ b/sound/soc/fsl/imx-ssi.h
@@ -211,6 +211,8 @@ struct imx_ssi {
struct imx_dma_data filter_data_rx;
struct imx_pcm_fiq_params fiq_params;
+ int fiq_init;
+ int dma_init;
int enabled;
};
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
index 722afe69169e..a1deac6e864d 100644
--- a/sound/soc/fsl/imx-wm8962.c
+++ b/sound/soc/fsl/imx-wm8962.c
@@ -215,7 +215,7 @@ static int imx_wm8962_probe(struct platform_device *pdev)
goto fail;
}
codec_dev = of_find_i2c_device_by_node(codec_np);
- if (!codec_dev || !codec_dev->driver) {
+ if (!codec_dev || !codec_dev->dev.driver) {
dev_err(&pdev->dev, "failed to find codec platform device\n");
ret = -EINVAL;
goto fail;
@@ -266,7 +266,7 @@ static int imx_wm8962_probe(struct platform_device *pdev)
data->card.late_probe = imx_wm8962_late_probe;
data->card.set_bias_level = imx_wm8962_set_bias_level;
- ret = snd_soc_register_card(&data->card);
+ ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
goto clk_fail;
@@ -296,7 +296,6 @@ static int imx_wm8962_remove(struct platform_device *pdev)
if (!IS_ERR(data->codec_clk))
clk_disable_unprepare(data->codec_clk);
- snd_soc_unregister_card(&data->card);
return 0;
}
@@ -311,6 +310,7 @@ static struct platform_driver imx_wm8962_driver = {
.driver = {
.name = "imx-wm8962",
.owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
.of_match_table = imx_wm8962_dt_ids,
},
.probe = imx_wm8962_probe,
diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
index 2a847ca494b5..71bf2f248cd4 100644
--- a/sound/soc/fsl/mpc5200_dma.c
+++ b/sound/soc/fsl/mpc5200_dma.c
@@ -10,6 +10,8 @@
#include <linux/of_device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <sound/soc.h>
@@ -299,7 +301,6 @@ static struct snd_pcm_ops psc_dma_ops = {
.hw_params = psc_dma_hw_params,
};
-static u64 psc_dma_dmamask = DMA_BIT_MASK(32);
static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
@@ -307,15 +308,14 @@ static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
struct snd_pcm *pcm = rtd->pcm;
struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
size_t size = psc_dma_hardware.buffer_bytes_max;
- int rc = 0;
+ int rc;
dev_dbg(rtd->platform->dev, "psc_dma_new(card=%p, dai=%p, pcm=%p)\n",
card, dai, pcm);
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &psc_dma_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ rc = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
diff --git a/sound/soc/fsl/mpc5200_psc_ac97.c b/sound/soc/fsl/mpc5200_psc_ac97.c
index 3ef7a0c92efa..24eafa2cfbf4 100644
--- a/sound/soc/fsl/mpc5200_psc_ac97.c
+++ b/sound/soc/fsl/mpc5200_psc_ac97.c
@@ -291,7 +291,7 @@ static int psc_ac97_of_probe(struct platform_device *op)
rc = snd_soc_set_ac97_ops(&psc_ac97_ops);
if (rc != 0) {
- dev_err(&op->dev, "Failed to set AC'97 ops: %d\n", ret);
+ dev_err(&op->dev, "Failed to set AC'97 ops: %d\n", rc);
return rc;
}
diff --git a/sound/soc/fsl/mpc8610_hpcd.c b/sound/soc/fsl/mpc8610_hpcd.c
index 228c52e71440..fa756d05b2f7 100644
--- a/sound/soc/fsl/mpc8610_hpcd.c
+++ b/sound/soc/fsl/mpc8610_hpcd.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/slab.h>
#include <sound/soc.h>
diff --git a/sound/soc/fsl/p1022_ds.c b/sound/soc/fsl/p1022_ds.c
index ba59c23a137b..f75c3cf0e6de 100644
--- a/sound/soc/fsl/p1022_ds.c
+++ b/sound/soc/fsl/p1022_ds.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/slab.h>
#include <sound/soc.h>
diff --git a/sound/soc/fsl/p1022_rdk.c b/sound/soc/fsl/p1022_rdk.c
index f21551911533..9d89bb028621 100644
--- a/sound/soc/fsl/p1022_rdk.c
+++ b/sound/soc/fsl/p1022_rdk.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/slab.h>
#include <sound/soc.h>
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 8c49147db84c..b2fbb7075a6c 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -27,6 +27,11 @@ static int __asoc_simple_card_dai_init(struct snd_soc_dai *dai,
if (!ret && daifmt)
ret = snd_soc_dai_set_fmt(dai, daifmt);
+ if (ret == -ENOTSUPP) {
+ dev_dbg(dai->dev, "ASoC: set_fmt is not supported\n");
+ ret = 0;
+ }
+
if (!ret && set->sysclk)
ret = snd_soc_dai_set_sysclk(dai, 0, set->sysclk, 0);
diff --git a/sound/soc/jz4740/jz4740-pcm.c b/sound/soc/jz4740/jz4740-pcm.c
index 710059292318..1d7ef28585e1 100644
--- a/sound/soc/jz4740/jz4740-pcm.c
+++ b/sound/soc/jz4740/jz4740-pcm.c
@@ -297,19 +297,15 @@ static void jz4740_pcm_free(struct snd_pcm *pcm)
}
}
-static u64 jz4740_pcm_dmamask = DMA_BIT_MASK(32);
-
static int jz4740_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
- int ret = 0;
-
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &jz4740_pcm_dmamask;
+ int ret;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = jz4740_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
index b238434f92b0..4af1936cf0f4 100644
--- a/sound/soc/kirkwood/kirkwood-dma.c
+++ b/sound/soc/kirkwood/kirkwood-dma.c
@@ -29,9 +29,7 @@
#define KIRKWOOD_FORMATS \
(SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S24_LE | \
- SNDRV_PCM_FMTBIT_S32_LE | \
- SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE | \
- SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE)
+ SNDRV_PCM_FMTBIT_S32_LE)
static struct kirkwood_dma_data *kirkwood_priv(struct snd_pcm_substream *subs)
{
@@ -59,8 +57,6 @@ static struct snd_pcm_hardware kirkwood_dma_snd_hw = {
.fifo_size = 0,
};
-static u64 kirkwood_dma_dmamask = DMA_BIT_MASK(32);
-
static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id)
{
struct kirkwood_dma_data *priv = dev_id;
@@ -161,7 +157,7 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
* Enable Error interrupts. We're only ack'ing them but
* it's useful for diagnostics
*/
- writel((unsigned long)-1, priv->io + KIRKWOOD_ERR_MASK);
+ writel((unsigned int)-1, priv->io + KIRKWOOD_ERR_MASK);
}
dram = mv_mbus_dram_info();
@@ -292,10 +288,9 @@ static int kirkwood_dma_new(struct snd_soc_pcm_runtime *rtd)
struct snd_pcm *pcm = rtd->pcm;
int ret;
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &kirkwood_dma_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = kirkwood_dma_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index 0f3d73d4ef48..d34d91743e3f 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -103,7 +103,7 @@ static void kirkwood_set_rate(struct snd_soc_dai *dai,
{
uint32_t clks_ctrl;
- if (rate == 44100 || rate == 48000 || rate == 96000) {
+ if (IS_ERR(priv->extclk)) {
/* use internal dco for the supported rates
* defined in kirkwood_i2s_dai */
dev_dbg(dai->dev, "%s: dco set rate = %lu\n",
@@ -160,9 +160,11 @@ static int kirkwood_i2s_hw_params(struct snd_pcm_substream *substream,
case SNDRV_PCM_FORMAT_S16_LE:
i2s_value |= KIRKWOOD_I2S_CTL_SIZE_16;
ctl_play = KIRKWOOD_PLAYCTL_SIZE_16_C |
- KIRKWOOD_PLAYCTL_I2S_EN;
+ KIRKWOOD_PLAYCTL_I2S_EN |
+ KIRKWOOD_PLAYCTL_SPDIF_EN;
ctl_rec = KIRKWOOD_RECCTL_SIZE_16_C |
- KIRKWOOD_RECCTL_I2S_EN;
+ KIRKWOOD_RECCTL_I2S_EN |
+ KIRKWOOD_RECCTL_SPDIF_EN;
break;
/*
* doesn't work... S20_3LE != kirkwood 20bit format ?
@@ -178,9 +180,11 @@ static int kirkwood_i2s_hw_params(struct snd_pcm_substream *substream,
case SNDRV_PCM_FORMAT_S24_LE:
i2s_value |= KIRKWOOD_I2S_CTL_SIZE_24;
ctl_play = KIRKWOOD_PLAYCTL_SIZE_24 |
- KIRKWOOD_PLAYCTL_I2S_EN;
+ KIRKWOOD_PLAYCTL_I2S_EN |
+ KIRKWOOD_PLAYCTL_SPDIF_EN;
ctl_rec = KIRKWOOD_RECCTL_SIZE_24 |
- KIRKWOOD_RECCTL_I2S_EN;
+ KIRKWOOD_RECCTL_I2S_EN |
+ KIRKWOOD_RECCTL_SPDIF_EN;
break;
case SNDRV_PCM_FORMAT_S32_LE:
i2s_value |= KIRKWOOD_I2S_CTL_SIZE_32;
@@ -240,6 +244,11 @@ static int kirkwood_i2s_play_trigger(struct snd_pcm_substream *substream,
ctl);
}
+ if (dai->id == 0)
+ ctl &= ~KIRKWOOD_PLAYCTL_SPDIF_EN; /* i2s */
+ else
+ ctl &= ~KIRKWOOD_PLAYCTL_I2S_EN; /* spdif */
+
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
/* configure */
@@ -258,7 +267,8 @@ static int kirkwood_i2s_play_trigger(struct snd_pcm_substream *substream,
case SNDRV_PCM_TRIGGER_STOP:
/* stop audio, disable interrupts */
- ctl |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE;
+ ctl |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE |
+ KIRKWOOD_PLAYCTL_SPDIF_MUTE;
writel(ctl, priv->io + KIRKWOOD_PLAYCTL);
value = readl(priv->io + KIRKWOOD_INT_MASK);
@@ -272,13 +282,15 @@ static int kirkwood_i2s_play_trigger(struct snd_pcm_substream *substream,
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_SUSPEND:
- ctl |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE;
+ ctl |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE |
+ KIRKWOOD_PLAYCTL_SPDIF_MUTE;
writel(ctl, priv->io + KIRKWOOD_PLAYCTL);
break;
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- ctl &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE);
+ ctl &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE |
+ KIRKWOOD_PLAYCTL_SPDIF_MUTE);
writel(ctl, priv->io + KIRKWOOD_PLAYCTL);
break;
@@ -301,7 +313,13 @@ static int kirkwood_i2s_rec_trigger(struct snd_pcm_substream *substream,
case SNDRV_PCM_TRIGGER_START:
/* configure */
ctl = priv->ctl_rec;
- value = ctl & ~KIRKWOOD_RECCTL_I2S_EN;
+ if (dai->id == 0)
+ ctl &= ~KIRKWOOD_RECCTL_SPDIF_EN; /* i2s */
+ else
+ ctl &= ~KIRKWOOD_RECCTL_I2S_EN; /* spdif */
+
+ value = ctl & ~(KIRKWOOD_RECCTL_I2S_EN |
+ KIRKWOOD_RECCTL_SPDIF_EN);
writel(value, priv->io + KIRKWOOD_RECCTL);
/* enable interrupts */
@@ -361,9 +379,8 @@ static int kirkwood_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
return 0;
}
-static int kirkwood_i2s_probe(struct snd_soc_dai *dai)
+static int kirkwood_i2s_init(struct kirkwood_dma_data *priv)
{
- struct kirkwood_dma_data *priv = snd_soc_dai_get_drvdata(dai);
unsigned long value;
unsigned int reg_data;
@@ -404,9 +421,10 @@ static const struct snd_soc_dai_ops kirkwood_i2s_dai_ops = {
.set_fmt = kirkwood_i2s_set_fmt,
};
-
-static struct snd_soc_dai_driver kirkwood_i2s_dai = {
- .probe = kirkwood_i2s_probe,
+static struct snd_soc_dai_driver kirkwood_i2s_dai[2] = {
+ {
+ .name = "i2s",
+ .id = 0,
.playback = {
.channels_min = 1,
.channels_max = 2,
@@ -422,10 +440,53 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai = {
.formats = KIRKWOOD_I2S_FORMATS,
},
.ops = &kirkwood_i2s_dai_ops,
+ },
+ {
+ .name = "spdif",
+ .id = 1,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_96000,
+ .formats = KIRKWOOD_I2S_FORMATS,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_96000,
+ .formats = KIRKWOOD_I2S_FORMATS,
+ },
+ .ops = &kirkwood_i2s_dai_ops,
+ },
};
-static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk = {
- .probe = kirkwood_i2s_probe,
+static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = {
+ {
+ .name = "i2s",
+ .id = 0,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000 |
+ SNDRV_PCM_RATE_CONTINUOUS |
+ SNDRV_PCM_RATE_KNOT,
+ .formats = KIRKWOOD_I2S_FORMATS,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000 |
+ SNDRV_PCM_RATE_CONTINUOUS |
+ SNDRV_PCM_RATE_KNOT,
+ .formats = KIRKWOOD_I2S_FORMATS,
+ },
+ .ops = &kirkwood_i2s_dai_ops,
+ },
+ {
+ .name = "spdif",
+ .id = 1,
.playback = {
.channels_min = 1,
.channels_max = 2,
@@ -443,6 +504,7 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk = {
.formats = KIRKWOOD_I2S_FORMATS,
},
.ops = &kirkwood_i2s_dai_ops,
+ },
};
static const struct snd_soc_component_driver kirkwood_i2s_component = {
@@ -452,7 +514,7 @@ static const struct snd_soc_component_driver kirkwood_i2s_component = {
static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
{
struct kirkwood_asoc_platform_data *data = pdev->dev.platform_data;
- struct snd_soc_dai_driver *soc_dai = &kirkwood_i2s_dai;
+ struct snd_soc_dai_driver *soc_dai = kirkwood_i2s_dai;
struct kirkwood_dma_data *priv;
struct resource *mem;
struct device_node *np = pdev->dev.of_node;
@@ -496,14 +558,17 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
return err;
priv->extclk = devm_clk_get(&pdev->dev, "extclk");
- if (!IS_ERR(priv->extclk)) {
+ if (IS_ERR(priv->extclk)) {
+ if (PTR_ERR(priv->extclk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ } else {
if (priv->extclk == priv->clk) {
devm_clk_put(&pdev->dev, priv->extclk);
priv->extclk = ERR_PTR(-EINVAL);
} else {
dev_info(&pdev->dev, "found external clock\n");
clk_prepare_enable(priv->extclk);
- soc_dai = &kirkwood_i2s_dai_extclk;
+ soc_dai = kirkwood_i2s_dai_extclk;
}
}
@@ -521,7 +586,7 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
}
err = snd_soc_register_component(&pdev->dev, &kirkwood_i2s_component,
- soc_dai, 1);
+ soc_dai, 2);
if (err) {
dev_err(&pdev->dev, "snd_soc_register_component failed\n");
goto err_component;
@@ -532,6 +597,9 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "snd_soc_register_platform failed\n");
goto err_platform;
}
+
+ kirkwood_i2s_init(priv);
+
return 0;
err_platform:
snd_soc_unregister_component(&pdev->dev);
diff --git a/sound/soc/kirkwood/kirkwood-openrd.c b/sound/soc/kirkwood/kirkwood-openrd.c
index 025be0e97164..65f2a5b9ec3b 100644
--- a/sound/soc/kirkwood/kirkwood-openrd.c
+++ b/sound/soc/kirkwood/kirkwood-openrd.c
@@ -52,7 +52,7 @@ static struct snd_soc_dai_link openrd_client_dai[] = {
{
.name = "CS42L51",
.stream_name = "CS42L51 HiFi",
- .cpu_dai_name = "mvebu-audio",
+ .cpu_dai_name = "i2s",
.platform_name = "mvebu-audio",
.codec_dai_name = "cs42l51-hifi",
.codec_name = "cs42l51-codec.0-004a",
diff --git a/sound/soc/kirkwood/kirkwood-t5325.c b/sound/soc/kirkwood/kirkwood-t5325.c
index 27545b0c4856..d213832b0c72 100644
--- a/sound/soc/kirkwood/kirkwood-t5325.c
+++ b/sound/soc/kirkwood/kirkwood-t5325.c
@@ -68,7 +68,7 @@ static struct snd_soc_dai_link t5325_dai[] = {
{
.name = "ALC5621",
.stream_name = "ALC5621 HiFi",
- .cpu_dai_name = "mvebu-audio",
+ .cpu_dai_name = "i2s",
.platform_name = "mvebu-audio",
.codec_dai_name = "alc5621-hifi",
.codec_name = "alc562x-codec.0-001a",
diff --git a/sound/soc/kirkwood/kirkwood.h b/sound/soc/kirkwood/kirkwood.h
index f8e1ccc1c58c..bf23afbba1d7 100644
--- a/sound/soc/kirkwood/kirkwood.h
+++ b/sound/soc/kirkwood/kirkwood.h
@@ -123,8 +123,8 @@
/* need to find where they come from */
#define KIRKWOOD_SND_MIN_PERIODS 8
#define KIRKWOOD_SND_MAX_PERIODS 16
-#define KIRKWOOD_SND_MIN_PERIOD_BYTES 0x4000
-#define KIRKWOOD_SND_MAX_PERIOD_BYTES 0x4000
+#define KIRKWOOD_SND_MIN_PERIOD_BYTES 0x800
+#define KIRKWOOD_SND_MAX_PERIOD_BYTES 0x8000
#define KIRKWOOD_SND_MAX_BUFFER_BYTES (KIRKWOOD_SND_MAX_PERIOD_BYTES \
* KIRKWOOD_SND_MAX_PERIODS)
diff --git a/sound/soc/mid-x86/mfld_machine.c b/sound/soc/mid-x86/mfld_machine.c
index ee363845759e..d3d4c32434f7 100644
--- a/sound/soc/mid-x86/mfld_machine.c
+++ b/sound/soc/mid-x86/mfld_machine.c
@@ -400,7 +400,7 @@ static int snd_mfld_mc_probe(struct platform_device *pdev)
}
/* register the soc card */
snd_soc_card_mfld.dev = &pdev->dev;
- ret_val = snd_soc_register_card(&snd_soc_card_mfld);
+ ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_mfld);
if (ret_val) {
pr_debug("snd_soc_register_card failed %d\n", ret_val);
return ret_val;
@@ -410,20 +410,12 @@ static int snd_mfld_mc_probe(struct platform_device *pdev)
return 0;
}
-static int snd_mfld_mc_remove(struct platform_device *pdev)
-{
- pr_debug("snd_mfld_mc_remove called\n");
- snd_soc_unregister_card(&snd_soc_card_mfld);
- return 0;
-}
-
static struct platform_driver snd_mfld_mc_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "msic_audio",
},
.probe = snd_mfld_mc_probe,
- .remove = snd_mfld_mc_remove,
};
module_platform_driver(snd_mfld_mc_driver);
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index b56b8a0e8deb..54e622acac33 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -494,6 +494,7 @@ static int mxs_saif_trigger(struct snd_pcm_substream *substream, int cmd,
struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai);
struct mxs_saif *master_saif;
u32 delay;
+ int ret;
master_saif = mxs_saif_get_master(saif);
if (!master_saif)
@@ -503,23 +504,37 @@ static int mxs_saif_trigger(struct snd_pcm_substream *substream, int cmd,
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ if (saif->state == MXS_SAIF_STATE_RUNNING)
+ return 0;
+
dev_dbg(cpu_dai->dev, "start\n");
- clk_enable(master_saif->clk);
- if (!master_saif->mclk_in_use)
- __raw_writel(BM_SAIF_CTRL_RUN,
- master_saif->base + SAIF_CTRL + MXS_SET_ADDR);
+ ret = clk_enable(master_saif->clk);
+ if (ret) {
+ dev_err(saif->dev, "Failed to enable master clock\n");
+ return ret;
+ }
/*
* If the saif's master is not himself, we also need to enable
* itself clk for its internal basic logic to work.
*/
if (saif != master_saif) {
- clk_enable(saif->clk);
+ ret = clk_enable(saif->clk);
+ if (ret) {
+ dev_err(saif->dev, "Failed to enable master clock\n");
+ clk_disable(master_saif->clk);
+ return ret;
+ }
+
__raw_writel(BM_SAIF_CTRL_RUN,
saif->base + SAIF_CTRL + MXS_SET_ADDR);
}
+ if (!master_saif->mclk_in_use)
+ __raw_writel(BM_SAIF_CTRL_RUN,
+ master_saif->base + SAIF_CTRL + MXS_SET_ADDR);
+
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
/*
* write data to saif data register to trigger
@@ -543,6 +558,7 @@ static int mxs_saif_trigger(struct snd_pcm_substream *substream, int cmd,
}
master_saif->ongoing = 1;
+ saif->state = MXS_SAIF_STATE_RUNNING;
dev_dbg(saif->dev, "CTRL 0x%x STAT 0x%x\n",
__raw_readl(saif->base + SAIF_CTRL),
@@ -555,6 +571,9 @@ static int mxs_saif_trigger(struct snd_pcm_substream *substream, int cmd,
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ if (saif->state == MXS_SAIF_STATE_STOPPED)
+ return 0;
+
dev_dbg(cpu_dai->dev, "stop\n");
/* wait a while for the current sample to complete */
@@ -575,6 +594,7 @@ static int mxs_saif_trigger(struct snd_pcm_substream *substream, int cmd,
}
master_saif->ongoing = 0;
+ saif->state = MXS_SAIF_STATE_STOPPED;
break;
default:
@@ -768,8 +788,8 @@ static int mxs_saif_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "failed to init clocks\n");
}
- ret = snd_soc_register_component(&pdev->dev, &mxs_saif_component,
- &mxs_saif_dai, 1);
+ ret = devm_snd_soc_register_component(&pdev->dev, &mxs_saif_component,
+ &mxs_saif_dai, 1);
if (ret) {
dev_err(&pdev->dev, "register DAI failed\n");
return ret;
@@ -778,21 +798,15 @@ static int mxs_saif_probe(struct platform_device *pdev)
ret = mxs_pcm_platform_register(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "register PCM failed: %d\n", ret);
- goto failed_pdev_alloc;
+ return ret;
}
return 0;
-
-failed_pdev_alloc:
- snd_soc_unregister_component(&pdev->dev);
-
- return ret;
}
static int mxs_saif_remove(struct platform_device *pdev)
{
mxs_pcm_platform_unregister(&pdev->dev);
- snd_soc_unregister_component(&pdev->dev);
return 0;
}
diff --git a/sound/soc/mxs/mxs-saif.h b/sound/soc/mxs/mxs-saif.h
index 53eaa4bf0e27..fbaf7badfdfb 100644
--- a/sound/soc/mxs/mxs-saif.h
+++ b/sound/soc/mxs/mxs-saif.h
@@ -124,6 +124,11 @@ struct mxs_saif {
u32 fifo_underrun;
u32 fifo_overrun;
+
+ enum {
+ MXS_SAIF_STATE_STOPPED,
+ MXS_SAIF_STATE_RUNNING,
+ } state;
};
extern int mxs_saif_put_mclk(unsigned int saif_id);
diff --git a/sound/soc/mxs/mxs-sgtl5000.c b/sound/soc/mxs/mxs-sgtl5000.c
index 4bb273786ff3..61822cc53bd3 100644
--- a/sound/soc/mxs/mxs-sgtl5000.c
+++ b/sound/soc/mxs/mxs-sgtl5000.c
@@ -122,14 +122,12 @@ static struct snd_soc_card mxs_sgtl5000 = {
.num_links = ARRAY_SIZE(mxs_sgtl5000_dai),
};
-static int mxs_sgtl5000_probe_dt(struct platform_device *pdev)
+static int mxs_sgtl5000_probe(struct platform_device *pdev)
{
+ struct snd_soc_card *card = &mxs_sgtl5000;
+ int ret, i;
struct device_node *np = pdev->dev.of_node;
struct device_node *saif_np[2], *codec_np;
- int i;
-
- if (!np)
- return 1; /* no device tree */
saif_np[0] = of_parse_phandle(np, "saif-controllers", 0);
saif_np[1] = of_parse_phandle(np, "saif-controllers", 1);
@@ -152,18 +150,6 @@ static int mxs_sgtl5000_probe_dt(struct platform_device *pdev)
of_node_put(saif_np[0]);
of_node_put(saif_np[1]);
- return 0;
-}
-
-static int mxs_sgtl5000_probe(struct platform_device *pdev)
-{
- struct snd_soc_card *card = &mxs_sgtl5000;
- int ret;
-
- ret = mxs_sgtl5000_probe_dt(pdev);
- if (ret < 0)
- return ret;
-
/*
* Set an init clock(11.28Mhz) for sgtl5000 initialization(i2c r/w).
* The Sgtl5000 sysclk is derived from saif0 mclk and it's range
diff --git a/sound/soc/nuc900/nuc900-pcm.c b/sound/soc/nuc900/nuc900-pcm.c
index c894ff0f2580..f588ee45b4fd 100644
--- a/sound/soc/nuc900/nuc900-pcm.c
+++ b/sound/soc/nuc900/nuc900-pcm.c
@@ -314,16 +314,15 @@ static void nuc900_dma_free_dma_buffers(struct snd_pcm *pcm)
snd_pcm_lib_preallocate_free_for_all(pcm);
}
-static u64 nuc900_pcm_dmamask = DMA_BIT_MASK(32);
static int nuc900_dma_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
+ int ret;
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &nuc900_pcm_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
card->dev, 4 * 1024, (4 * 1024) - 1);
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index daa78a0095fa..4a07f7179690 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -1,6 +1,6 @@
config SND_OMAP_SOC
tristate "SoC Audio for the Texas Instruments OMAP chips"
- depends on (ARCH_OMAP && DMA_OMAP) || (ARCH_ARM && COMPILE_TEST)
+ depends on (ARCH_OMAP && DMA_OMAP) || (ARM && COMPILE_TEST)
select SND_DMAENGINE_PCM
config SND_OMAP_SOC_DMIC
@@ -26,7 +26,7 @@ config SND_OMAP_SOC_N810
config SND_OMAP_SOC_RX51
tristate "SoC Audio support for Nokia RX-51"
- depends on SND_OMAP_SOC && ARCH_ARM && (MACH_NOKIA_RX51 || COMPILE_TEST)
+ depends on SND_OMAP_SOC && ARM && (MACH_NOKIA_RX51 || COMPILE_TEST)
select SND_OMAP_SOC_MCBSP
select SND_SOC_TLV320AIC3X
select SND_SOC_TPA6130A2
diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
index 90d2a7cd2563..cd9ee167959d 100644
--- a/sound/soc/omap/omap-mcpdm.c
+++ b/sound/soc/omap/omap-mcpdm.c
@@ -490,14 +490,9 @@ static int asoc_mcpdm_probe(struct platform_device *pdev)
mcpdm->dev = &pdev->dev;
- return snd_soc_register_component(&pdev->dev, &omap_mcpdm_component,
- &omap_mcpdm_dai, 1);
-}
-
-static int asoc_mcpdm_remove(struct platform_device *pdev)
-{
- snd_soc_unregister_component(&pdev->dev);
- return 0;
+ return devm_snd_soc_register_component(&pdev->dev,
+ &omap_mcpdm_component,
+ &omap_mcpdm_dai, 1);
}
static const struct of_device_id omap_mcpdm_of_match[] = {
@@ -514,7 +509,6 @@ static struct platform_driver asoc_mcpdm_driver = {
},
.probe = asoc_mcpdm_probe,
- .remove = asoc_mcpdm_remove,
};
module_platform_driver(asoc_mcpdm_driver);
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index a11405de86e8..b8fa9862e54c 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -156,8 +156,6 @@ static struct snd_pcm_ops omap_pcm_ops = {
.mmap = omap_pcm_mmap,
};
-static u64 omap_pcm_dmamask = DMA_BIT_MASK(64);
-
static int omap_pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
int stream)
{
@@ -202,12 +200,11 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
- int ret = 0;
+ int ret;
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &omap_pcm_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(64);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = omap_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/omap/omap-twl4030.c b/sound/soc/omap/omap-twl4030.c
index 2a9324f794d8..6a8d6b5f160d 100644
--- a/sound/soc/omap/omap-twl4030.c
+++ b/sound/soc/omap/omap-twl4030.c
@@ -338,9 +338,9 @@ static int omap_twl4030_probe(struct platform_device *pdev)
}
snd_soc_card_set_drvdata(card, priv);
- ret = snd_soc_register_card(card);
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
if (ret) {
- dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
+ dev_err(&pdev->dev, "devm_snd_soc_register_card() failed: %d\n",
ret);
return ret;
}
@@ -357,7 +357,6 @@ static int omap_twl4030_remove(struct platform_device *pdev)
snd_soc_jack_free_gpios(&priv->hs_jack,
ARRAY_SIZE(hs_jack_gpios),
hs_jack_gpios);
- snd_soc_unregister_card(card);
return 0;
}
diff --git a/sound/soc/pxa/brownstone.c b/sound/soc/pxa/brownstone.c
index 5b7d969f89a9..08acdc236bf8 100644
--- a/sound/soc/pxa/brownstone.c
+++ b/sound/soc/pxa/brownstone.c
@@ -163,6 +163,7 @@ static struct platform_driver mmp_driver = {
.driver = {
.name = "brownstone-audio",
.owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
},
.probe = brownstone_probe,
.remove = brownstone_remove,
diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c
index f4cce1e80112..1853d41034bf 100644
--- a/sound/soc/pxa/corgi.c
+++ b/sound/soc/pxa/corgi.c
@@ -329,6 +329,7 @@ static struct platform_driver corgi_driver = {
.driver = {
.name = "corgi-audio",
.owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
},
.probe = corgi_probe,
.remove = corgi_remove,
diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c
index 70d799b13f0d..44b5c09d296b 100644
--- a/sound/soc/pxa/e740_wm9705.c
+++ b/sound/soc/pxa/e740_wm9705.c
@@ -178,6 +178,7 @@ static struct platform_driver e740_driver = {
.driver = {
.name = "e740-audio",
.owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
},
.probe = e740_probe,
.remove = e740_remove,
diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c
index f94d2ab51351..c34e447eb991 100644
--- a/sound/soc/pxa/e750_wm9705.c
+++ b/sound/soc/pxa/e750_wm9705.c
@@ -160,6 +160,7 @@ static struct platform_driver e750_driver = {
.driver = {
.name = "e750-audio",
.owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
},
.probe = e750_probe,
.remove = e750_remove,
diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c
index 8768a640dd71..3137f800b43f 100644
--- a/sound/soc/pxa/e800_wm9712.c
+++ b/sound/soc/pxa/e800_wm9712.c
@@ -150,6 +150,7 @@ static struct platform_driver e800_driver = {
.driver = {
.name = "e800-audio",
.owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
},
.probe = e800_probe,
.remove = e800_remove,
diff --git a/sound/soc/pxa/imote2.c b/sound/soc/pxa/imote2.c
index eef1f7b7b38e..fd2f4eda1fd3 100644
--- a/sound/soc/pxa/imote2.c
+++ b/sound/soc/pxa/imote2.c
@@ -91,6 +91,7 @@ static struct platform_driver imote2_driver = {
.driver = {
.name = "imote2-audio",
.owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
},
.probe = imote2_probe,
.remove = imote2_remove,
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index bbea7780eac6..160c5245448f 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -215,6 +215,7 @@ static struct platform_driver mioa701_wm9713_driver = {
.driver = {
.name = "mioa701-wm9713",
.owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
},
};
diff --git a/sound/soc/pxa/mmp-sspa.c b/sound/soc/pxa/mmp-sspa.c
index 41752a5fe3b0..5bf5f1f7cac5 100644
--- a/sound/soc/pxa/mmp-sspa.c
+++ b/sound/soc/pxa/mmp-sspa.c
@@ -455,8 +455,8 @@ static int asoc_mmp_sspa_probe(struct platform_device *pdev)
priv->dai_fmt = (unsigned int) -1;
platform_set_drvdata(pdev, priv);
- return snd_soc_register_component(&pdev->dev, &mmp_sspa_component,
- &mmp_sspa_dai, 1);
+ return devm_snd_soc_register_component(&pdev->dev, &mmp_sspa_component,
+ &mmp_sspa_dai, 1);
}
static int asoc_mmp_sspa_remove(struct platform_device *pdev)
@@ -466,7 +466,6 @@ static int asoc_mmp_sspa_remove(struct platform_device *pdev)
clk_disable(priv->audio_clk);
clk_put(priv->audio_clk);
clk_put(priv->sysclk);
- snd_soc_unregister_component(&pdev->dev);
return 0;
}
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index e1ffcdd9a649..3284c4b901cb 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -181,6 +181,7 @@ static struct platform_driver palm27x_wm9712_driver = {
.driver = {
.name = "palm27x-asoc",
.owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
},
};
diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c
index fafe46355c31..c93e138d8dc3 100644
--- a/sound/soc/pxa/poodle.c
+++ b/sound/soc/pxa/poodle.c
@@ -303,6 +303,7 @@ static struct platform_driver poodle_driver = {
.driver = {
.name = "poodle-audio",
.owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
},
.probe = poodle_probe,
.remove = poodle_remove,
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index f1059d999de6..ae956e3f4b9d 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -89,33 +89,6 @@ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_mic_mono_in = {
.filter_data = &pxa2xx_ac97_pcm_aux_mic_mono_req,
};
-#ifdef CONFIG_PM
-static int pxa2xx_ac97_suspend(struct snd_soc_dai *dai)
-{
- return pxa2xx_ac97_hw_suspend();
-}
-
-static int pxa2xx_ac97_resume(struct snd_soc_dai *dai)
-{
- return pxa2xx_ac97_hw_resume();
-}
-
-#else
-#define pxa2xx_ac97_suspend NULL
-#define pxa2xx_ac97_resume NULL
-#endif
-
-static int pxa2xx_ac97_probe(struct snd_soc_dai *dai)
-{
- return pxa2xx_ac97_hw_probe(to_platform_device(dai->dev));
-}
-
-static int pxa2xx_ac97_remove(struct snd_soc_dai *dai)
-{
- pxa2xx_ac97_hw_remove(to_platform_device(dai->dev));
- return 0;
-}
-
static int pxa2xx_ac97_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *cpu_dai)
@@ -185,10 +158,6 @@ static struct snd_soc_dai_driver pxa_ac97_dai_driver[] = {
{
.name = "pxa2xx-ac97",
.ac97_control = 1,
- .probe = pxa2xx_ac97_probe,
- .remove = pxa2xx_ac97_remove,
- .suspend = pxa2xx_ac97_suspend,
- .resume = pxa2xx_ac97_resume,
.playback = {
.stream_name = "AC97 Playback",
.channels_min = 2,
@@ -246,6 +215,12 @@ static int pxa2xx_ac97_dev_probe(struct platform_device *pdev)
return -ENXIO;
}
+ ret = pxa2xx_ac97_hw_probe(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "PXA2xx AC97 hw probe error (%d)\n", ret);
+ return ret;
+ }
+
ret = snd_soc_set_ac97_ops(&pxa2xx_ac97_ops);
if (ret != 0)
return ret;
@@ -262,15 +237,34 @@ static int pxa2xx_ac97_dev_remove(struct platform_device *pdev)
{
snd_soc_unregister_component(&pdev->dev);
snd_soc_set_ac97_ops(NULL);
+ pxa2xx_ac97_hw_remove(pdev);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int pxa2xx_ac97_dev_suspend(struct device *dev)
+{
+ return pxa2xx_ac97_hw_suspend();
+}
+
+static int pxa2xx_ac97_dev_resume(struct device *dev)
+{
+ return pxa2xx_ac97_hw_resume();
+}
+
+static SIMPLE_DEV_PM_OPS(pxa2xx_ac97_pm_ops,
+ pxa2xx_ac97_dev_suspend, pxa2xx_ac97_dev_resume);
+#endif
+
static struct platform_driver pxa2xx_ac97_driver = {
.probe = pxa2xx_ac97_dev_probe,
.remove = pxa2xx_ac97_dev_remove,
.driver = {
.name = "pxa2xx-ac97",
.owner = THIS_MODULE,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &pxa2xx_ac97_pm_ops,
+#endif
},
};
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index 806da27b8b67..d58b09f4f7a4 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -87,18 +87,15 @@ static struct snd_pcm_ops pxa2xx_pcm_ops = {
.mmap = pxa2xx_pcm_mmap,
};
-static u64 pxa2xx_pcm_dmamask = DMA_BIT_MASK(32);
-
static int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
- int ret = 0;
+ int ret;
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &pxa2xx_pcm_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c
index a3fe19123f07..1d9c2ed223bc 100644
--- a/sound/soc/pxa/tosa.c
+++ b/sound/soc/pxa/tosa.c
@@ -275,6 +275,7 @@ static struct platform_driver tosa_driver = {
.driver = {
.name = "tosa-audio",
.owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
},
.probe = tosa_probe,
.remove = tosa_remove,
diff --git a/sound/soc/pxa/ttc-dkb.c b/sound/soc/pxa/ttc-dkb.c
index 13c9ee0cb83b..0b535b570622 100644
--- a/sound/soc/pxa/ttc-dkb.c
+++ b/sound/soc/pxa/ttc-dkb.c
@@ -160,6 +160,7 @@ static struct platform_driver ttc_dkb_driver = {
.driver = {
.name = "ttc-dkb-audio",
.owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
},
.probe = ttc_dkb_probe,
.remove = ttc_dkb_remove,
diff --git a/sound/soc/s6000/s6000-pcm.c b/sound/soc/s6000/s6000-pcm.c
index d0740a762963..283620a97fe7 100644
--- a/sound/soc/s6000/s6000-pcm.c
+++ b/sound/soc/s6000/s6000-pcm.c
@@ -444,8 +444,6 @@ static void s6000_pcm_free(struct snd_pcm *pcm)
snd_pcm_lib_preallocate_free_for_all(pcm);
}
-static u64 s6000_pcm_dmamask = DMA_BIT_MASK(32);
-
static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime)
{
struct snd_card *card = runtime->card->snd_card;
@@ -456,10 +454,9 @@ static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime)
params = snd_soc_dai_get_dma_data(runtime->cpu_dai,
pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream);
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &s6000_pcm_dmamask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ res = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (res)
+ return res;
if (params->dma_in) {
s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_in),
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index 2eea1840315d..37459dfd168d 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -2,7 +2,7 @@ config SND_SOC_SAMSUNG
tristate "ASoC support for Samsung"
depends on PLAT_SAMSUNG
select S3C64XX_DMA if ARCH_S3C64XX
- select S3C2410_DMA if ARCH_S3C24XX
+ select S3C24XX_DMA if ARCH_S3C24XX
help
Say Y or M if you want to add support for codecs attached to
the Samsung SoCs' Audio interfaces. You will also need to
diff --git a/sound/soc/samsung/bells.c b/sound/soc/samsung/bells.c
index 29e246803626..84f5d8b76679 100644
--- a/sound/soc/samsung/bells.c
+++ b/sound/soc/samsung/bells.c
@@ -356,6 +356,7 @@ static struct snd_soc_dapm_widget bells_widgets[] = {
static struct snd_soc_dapm_route bells_routes[] = {
{ "Sub CLK_SYS", NULL, "OPCLK" },
+ { "CLKIN", NULL, "OPCLK" },
{ "DMIC", NULL, "MICBIAS2" },
{ "IN2L", NULL, "DMIC" },
diff --git a/sound/soc/samsung/dma.c b/sound/soc/samsung/dma.c
index 9338d11e9216..fe2748b494d4 100644
--- a/sound/soc/samsung/dma.c
+++ b/sound/soc/samsung/dma.c
@@ -406,20 +406,17 @@ static void dma_free_dma_buffers(struct snd_pcm *pcm)
}
}
-static u64 dma_mask = DMA_BIT_MASK(32);
-
static int dma_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
- int ret = 0;
+ int ret;
pr_debug("Entered %s\n", __func__);
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &dma_mask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = preallocate_dma_buffer(pcm,
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index b302f3b7a587..a5cbdb4f1655 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -702,13 +702,6 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
}
writel(mod, i2s->addr + I2SMOD);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- snd_soc_dai_set_dma_data(dai, substream,
- (void *)&i2s->dma_playback);
- else
- snd_soc_dai_set_dma_data(dai, substream,
- (void *)&i2s->dma_capture);
-
i2s->frmclk = params_rate(params);
return 0;
@@ -970,6 +963,8 @@ static int samsung_i2s_dai_probe(struct snd_soc_dai *dai)
}
clk_prepare_enable(i2s->clk);
+ snd_soc_dai_init_dma_data(dai, &i2s->dma_playback, &i2s->dma_capture);
+
if (other) {
other->addr = i2s->addr;
other->clk = i2s->clk;
@@ -1060,7 +1055,7 @@ static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
i2s->i2s_dai_drv.ops = &samsung_i2s_dai_ops;
i2s->i2s_dai_drv.suspend = i2s_suspend;
i2s->i2s_dai_drv.resume = i2s_resume;
- i2s->i2s_dai_drv.playback.channels_min = 2;
+ i2s->i2s_dai_drv.playback.channels_min = 1;
i2s->i2s_dai_drv.playback.channels_max = 2;
i2s->i2s_dai_drv.playback.rates = SAMSUNG_I2S_RATES;
i2s->i2s_dai_drv.playback.formats = SAMSUNG_I2S_FMTS;
@@ -1073,7 +1068,7 @@ static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
dev_set_drvdata(&i2s->pdev->dev, i2s);
} else { /* Create a new platform_device for Secondary */
i2s->pdev = platform_device_alloc("samsung-i2s-sec", -1);
- if (IS_ERR(i2s->pdev))
+ if (!i2s->pdev)
return NULL;
i2s->pdev->dev.parent = &pdev->dev;
@@ -1143,9 +1138,9 @@ static int samsung_i2s_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Unable to get drvdata\n");
return -EFAULT;
}
- snd_soc_register_component(&sec_dai->pdev->dev,
- &samsung_i2s_component,
- &sec_dai->i2s_dai_drv, 1);
+ devm_snd_soc_register_component(&sec_dai->pdev->dev,
+ &samsung_i2s_component,
+ &sec_dai->i2s_dai_drv, 1);
samsung_asoc_dma_platform_register(&pdev->dev);
return 0;
}
@@ -1258,8 +1253,9 @@ static int samsung_i2s_probe(struct platform_device *pdev)
goto err;
}
- snd_soc_register_component(&pri_dai->pdev->dev, &samsung_i2s_component,
- &pri_dai->i2s_dai_drv, 1);
+ devm_snd_soc_register_component(&pri_dai->pdev->dev,
+ &samsung_i2s_component,
+ &pri_dai->i2s_dai_drv, 1);
pm_runtime_enable(&pdev->dev);
@@ -1294,7 +1290,6 @@ static int samsung_i2s_remove(struct platform_device *pdev)
i2s->sec_dai = NULL;
samsung_asoc_dma_platform_unregister(&pdev->dev);
- snd_soc_unregister_component(&pdev->dev);
return 0;
}
diff --git a/sound/soc/samsung/idma.c b/sound/soc/samsung/idma.c
index ce1e1e16f250..e4f318fc2f82 100644
--- a/sound/soc/samsung/idma.c
+++ b/sound/soc/samsung/idma.c
@@ -383,18 +383,15 @@ static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream)
return 0;
}
-static u64 idma_mask = DMA_BIT_MASK(32);
-
static int idma_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
- int ret = 0;
+ int ret;
- if (!card->dev->dma_mask)
- card->dev->dma_mask = &idma_mask;
- if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = preallocate_idma_buffer(pcm,
diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
index e5e81b111001..fefc56100349 100644
--- a/sound/soc/samsung/s3c-i2s-v2.c
+++ b/sound/soc/samsung/s3c-i2s-v2.c
@@ -31,11 +31,7 @@
#undef S3C_IIS_V2_SUPPORTED
#if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413) \
- || defined(CONFIG_CPU_S5PV210)
-#define S3C_IIS_V2_SUPPORTED
-#endif
-
-#ifdef CONFIG_PLAT_S3C64XX
+ || defined(CONFIG_ARCH_S3C64XX) || defined(CONFIG_CPU_S5PV210)
#define S3C_IIS_V2_SUPPORTED
#endif
diff --git a/sound/soc/samsung/smdk_wm8994.c b/sound/soc/samsung/smdk_wm8994.c
index 5fd7a05a9b9e..b072bd107b31 100644
--- a/sound/soc/samsung/smdk_wm8994.c
+++ b/sound/soc/samsung/smdk_wm8994.c
@@ -9,6 +9,7 @@
#include "../codecs/wm8994.h"
#include <sound/pcm_params.h>
+#include <sound/soc.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -193,7 +194,7 @@ static int smdk_audio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, board);
- ret = snd_soc_register_card(card);
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
if (ret)
dev_err(&pdev->dev, "snd_soc_register_card() failed:%d\n", ret);
@@ -201,23 +202,14 @@ static int smdk_audio_probe(struct platform_device *pdev)
return ret;
}
-static int smdk_audio_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
-
- snd_soc_unregister_card(card);
-
- return 0;
-}
-
static struct platform_driver smdk_audio_driver = {
.driver = {
.name = "smdk-audio-wm8894",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(samsung_wm8994_of_match),
+ .pm = &snd_soc_pm_ops,
},
.probe = smdk_audio_probe,
- .remove = smdk_audio_remove,
};
module_platform_driver(smdk_audio_driver);
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index d80deb7ccf13..9430097979a5 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -8,7 +8,6 @@
* for more details.
*/
#include <linux/sh_clk.h>
-#include <mach/clock.h>
#include "rsnd.h"
#define CLKA 0
@@ -22,6 +21,7 @@ struct rsnd_adg {
int rate_of_441khz_div_6;
int rate_of_48khz_div_6;
+ u32 ckr;
};
#define for_each_rsnd_clk(pos, adg, i) \
@@ -116,6 +116,11 @@ int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *mod, unsigned int rate)
found_clock:
+ /* see rsnd_adg_ssi_clk_init() */
+ rsnd_mod_bset(mod, SSICKR, 0x00FF0000, adg->ckr);
+ rsnd_mod_write(mod, BRRA, 0x00000002); /* 1/6 */
+ rsnd_mod_write(mod, BRRB, 0x00000002); /* 1/6 */
+
/*
* This "mod" = "ssi" here.
* we can get "ssi id" from mod
@@ -182,9 +187,7 @@ static void rsnd_adg_ssi_clk_init(struct rsnd_priv *priv, struct rsnd_adg *adg)
}
}
- rsnd_priv_bset(priv, SSICKR, 0x00FF0000, ckr);
- rsnd_priv_write(priv, BRRA, 0x00000002); /* 1/6 */
- rsnd_priv_write(priv, BRRB, 0x00000002); /* 1/6 */
+ adg->ckr = ckr;
}
int rsnd_adg_probe(struct platform_device *pdev,
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index a35706028514..78c35b44fc04 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -94,6 +94,7 @@
*
*/
#include <linux/pm_runtime.h>
+#include <linux/shdma-base.h>
#include "rsnd.h"
#define RSND_RATES SNDRV_PCM_RATE_8000_96000
@@ -103,54 +104,9 @@
* rsnd_platform functions
*/
#define rsnd_platform_call(priv, dai, func, param...) \
- (!(priv->info->func) ? -ENODEV : \
+ (!(priv->info->func) ? 0 : \
priv->info->func(param))
-
-/*
- * basic function
- */
-u32 rsnd_read(struct rsnd_priv *priv,
- struct rsnd_mod *mod, enum rsnd_reg reg)
-{
- void __iomem *base = rsnd_gen_reg_get(priv, mod, reg);
-
- BUG_ON(!base);
-
- return ioread32(base);
-}
-
-void rsnd_write(struct rsnd_priv *priv,
- struct rsnd_mod *mod,
- enum rsnd_reg reg, u32 data)
-{
- void __iomem *base = rsnd_gen_reg_get(priv, mod, reg);
- struct device *dev = rsnd_priv_to_dev(priv);
-
- BUG_ON(!base);
-
- dev_dbg(dev, "w %p : %08x\n", base, data);
-
- iowrite32(data, base);
-}
-
-void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod,
- enum rsnd_reg reg, u32 mask, u32 data)
-{
- void __iomem *base = rsnd_gen_reg_get(priv, mod, reg);
- struct device *dev = rsnd_priv_to_dev(priv);
- u32 val;
-
- BUG_ON(!base);
-
- val = ioread32(base);
- val &= ~mask;
- val |= data & mask;
- iowrite32(val, base);
-
- dev_dbg(dev, "s %p : %08x\n", base, val);
-}
-
/*
* rsnd_mod functions
*/
@@ -254,13 +210,6 @@ int rsnd_dma_available(struct rsnd_dma *dma)
return !!dma->chan;
}
-static bool rsnd_dma_filter(struct dma_chan *chan, void *param)
-{
- chan->private = param;
-
- return true;
-}
-
int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma,
int is_play, int id,
int (*inquiry)(struct rsnd_dma *dma,
@@ -268,7 +217,9 @@ int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma,
int (*complete)(struct rsnd_dma *dma))
{
struct device *dev = rsnd_priv_to_dev(priv);
+ struct dma_slave_config cfg;
dma_cap_mask_t mask;
+ int ret;
if (dma->chan) {
dev_err(dev, "it already has dma channel\n");
@@ -278,15 +229,23 @@ int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma,
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- dma->slave.shdma_slave.slave_id = id;
-
- dma->chan = dma_request_channel(mask, rsnd_dma_filter,
- &dma->slave.shdma_slave);
+ dma->chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
+ (void *)id, dev,
+ is_play ? "tx" : "rx");
if (!dma->chan) {
dev_err(dev, "can't get dma channel\n");
return -EIO;
}
+ cfg.slave_id = id;
+ cfg.dst_addr = 0; /* use default addr when playback */
+ cfg.src_addr = 0; /* use default addr when capture */
+ cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+
+ ret = dmaengine_slave_config(dma->chan, &cfg);
+ if (ret < 0)
+ goto rsnd_dma_init_err;
+
dma->dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
dma->priv = priv;
dma->inquiry = inquiry;
@@ -294,6 +253,11 @@ int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma,
INIT_WORK(&dma->work, rsnd_dma_do_work);
return 0;
+
+rsnd_dma_init_err:
+ rsnd_dma_quit(priv, dma);
+
+ return ret;
}
void rsnd_dma_quit(struct rsnd_priv *priv,
@@ -363,6 +327,9 @@ int rsnd_dai_id(struct rsnd_priv *priv, struct rsnd_dai *rdai)
struct rsnd_dai *rsnd_dai_get(struct rsnd_priv *priv, int id)
{
+ if ((id < 0) || (id >= rsnd_dai_nr(priv)))
+ return NULL;
+
return priv->rdai + id;
}
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index babb203b43b7..61212ee97c28 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -11,6 +11,11 @@
#include "rsnd.h"
struct rsnd_gen_ops {
+ int (*probe)(struct platform_device *pdev,
+ struct rcar_snd_info *info,
+ struct rsnd_priv *priv);
+ void (*remove)(struct platform_device *pdev,
+ struct rsnd_priv *priv);
int (*path_init)(struct rsnd_priv *priv,
struct rsnd_dai *rdai,
struct rsnd_dai_stream *io);
@@ -19,21 +24,97 @@ struct rsnd_gen_ops {
struct rsnd_dai_stream *io);
};
-struct rsnd_gen_reg_map {
- int index; /* -1 : not supported */
- u32 offset_id; /* offset of ssi0, ssi1, ssi2... */
- u32 offset_adr; /* offset of SSICR, SSISR, ... */
-};
-
struct rsnd_gen {
void __iomem *base[RSND_BASE_MAX];
- struct rsnd_gen_reg_map reg_map[RSND_REG_MAX];
struct rsnd_gen_ops *ops;
+
+ struct regmap *regmap;
+ struct regmap_field *regs[RSND_REG_MAX];
};
#define rsnd_priv_to_gen(p) ((struct rsnd_gen *)(p)->gen)
+#define RSND_REG_SET(gen, id, reg_id, offset, _id_offset, _id_size) \
+ [id] = { \
+ .reg = (unsigned int)gen->base[reg_id] + offset, \
+ .lsb = 0, \
+ .msb = 31, \
+ .id_size = _id_size, \
+ .id_offset = _id_offset, \
+ }
+
+/*
+ * basic function
+ */
+static int rsnd_regmap_write32(void *context, const void *_data, size_t count)
+{
+ struct rsnd_priv *priv = context;
+ struct device *dev = rsnd_priv_to_dev(priv);
+ u32 *data = (u32 *)_data;
+ u32 val = data[1];
+ void __iomem *reg = (void *)data[0];
+
+ iowrite32(val, reg);
+
+ dev_dbg(dev, "w %p : %08x\n", reg, val);
+
+ return 0;
+}
+
+static int rsnd_regmap_read32(void *context,
+ const void *_data, size_t reg_size,
+ void *_val, size_t val_size)
+{
+ struct rsnd_priv *priv = context;
+ struct device *dev = rsnd_priv_to_dev(priv);
+ u32 *data = (u32 *)_data;
+ u32 *val = (u32 *)_val;
+ void __iomem *reg = (void *)data[0];
+
+ *val = ioread32(reg);
+
+ dev_dbg(dev, "r %p : %08x\n", reg, *val);
+
+ return 0;
+}
+
+static struct regmap_bus rsnd_regmap_bus = {
+ .write = rsnd_regmap_write32,
+ .read = rsnd_regmap_read32,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+u32 rsnd_read(struct rsnd_priv *priv,
+ struct rsnd_mod *mod, enum rsnd_reg reg)
+{
+ struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
+ u32 val;
+
+ regmap_fields_read(gen->regs[reg], rsnd_mod_id(mod), &val);
+
+ return val;
+}
+
+void rsnd_write(struct rsnd_priv *priv,
+ struct rsnd_mod *mod,
+ enum rsnd_reg reg, u32 data)
+{
+ struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
+
+ regmap_fields_write(gen->regs[reg], rsnd_mod_id(mod), data);
+}
+
+void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod,
+ enum rsnd_reg reg, u32 mask, u32 data)
+{
+ struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
+
+ regmap_fields_update_bits(gen->regs[reg], rsnd_mod_id(mod),
+ mask, data);
+}
+
/*
* Gen2
* will be filled in the future
@@ -98,44 +179,64 @@ static int rsnd_gen1_path_exit(struct rsnd_priv *priv,
return ret;
}
-static struct rsnd_gen_ops rsnd_gen1_ops = {
- .path_init = rsnd_gen1_path_init,
- .path_exit = rsnd_gen1_path_exit,
-};
+/* single address mapping */
+#define RSND_GEN1_S_REG(gen, reg, id, offset) \
+ RSND_REG_SET(gen, RSND_REG_##id, RSND_GEN1_##reg, offset, 0, 9)
-#define RSND_GEN1_REG_MAP(g, s, i, oi, oa) \
- do { \
- (g)->reg_map[RSND_REG_##i].index = RSND_GEN1_##s; \
- (g)->reg_map[RSND_REG_##i].offset_id = oi; \
- (g)->reg_map[RSND_REG_##i].offset_adr = oa; \
- } while (0)
+/* multi address mapping */
+#define RSND_GEN1_M_REG(gen, reg, id, offset, _id_offset) \
+ RSND_REG_SET(gen, RSND_REG_##id, RSND_GEN1_##reg, offset, _id_offset, 9)
-static void rsnd_gen1_reg_map_init(struct rsnd_gen *gen)
+static int rsnd_gen1_regmap_init(struct rsnd_priv *priv, struct rsnd_gen *gen)
{
- RSND_GEN1_REG_MAP(gen, SRU, SRC_ROUTE_SEL, 0x0, 0x00);
- RSND_GEN1_REG_MAP(gen, SRU, SRC_TMG_SEL0, 0x0, 0x08);
- RSND_GEN1_REG_MAP(gen, SRU, SRC_TMG_SEL1, 0x0, 0x0c);
- RSND_GEN1_REG_MAP(gen, SRU, SRC_TMG_SEL2, 0x0, 0x10);
- RSND_GEN1_REG_MAP(gen, SRU, SRC_CTRL, 0x0, 0xc0);
- RSND_GEN1_REG_MAP(gen, SRU, SSI_MODE0, 0x0, 0xD0);
- RSND_GEN1_REG_MAP(gen, SRU, SSI_MODE1, 0x0, 0xD4);
- RSND_GEN1_REG_MAP(gen, SRU, BUSIF_MODE, 0x4, 0x20);
- RSND_GEN1_REG_MAP(gen, SRU, BUSIF_ADINR, 0x40, 0x214);
-
- RSND_GEN1_REG_MAP(gen, ADG, BRRA, 0x0, 0x00);
- RSND_GEN1_REG_MAP(gen, ADG, BRRB, 0x0, 0x04);
- RSND_GEN1_REG_MAP(gen, ADG, SSICKR, 0x0, 0x08);
- RSND_GEN1_REG_MAP(gen, ADG, AUDIO_CLK_SEL0, 0x0, 0x0c);
- RSND_GEN1_REG_MAP(gen, ADG, AUDIO_CLK_SEL1, 0x0, 0x10);
- RSND_GEN1_REG_MAP(gen, ADG, AUDIO_CLK_SEL3, 0x0, 0x18);
- RSND_GEN1_REG_MAP(gen, ADG, AUDIO_CLK_SEL4, 0x0, 0x1c);
- RSND_GEN1_REG_MAP(gen, ADG, AUDIO_CLK_SEL5, 0x0, 0x20);
-
- RSND_GEN1_REG_MAP(gen, SSI, SSICR, 0x40, 0x00);
- RSND_GEN1_REG_MAP(gen, SSI, SSISR, 0x40, 0x04);
- RSND_GEN1_REG_MAP(gen, SSI, SSITDR, 0x40, 0x08);
- RSND_GEN1_REG_MAP(gen, SSI, SSIRDR, 0x40, 0x0c);
- RSND_GEN1_REG_MAP(gen, SSI, SSIWSR, 0x40, 0x20);
+ int i;
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct regmap_config regc;
+ struct reg_field regf[RSND_REG_MAX] = {
+ RSND_GEN1_S_REG(gen, SRU, SRC_ROUTE_SEL, 0x00),
+ RSND_GEN1_S_REG(gen, SRU, SRC_TMG_SEL0, 0x08),
+ RSND_GEN1_S_REG(gen, SRU, SRC_TMG_SEL1, 0x0c),
+ RSND_GEN1_S_REG(gen, SRU, SRC_TMG_SEL2, 0x10),
+ RSND_GEN1_S_REG(gen, SRU, SRC_CTRL, 0xc0),
+ RSND_GEN1_S_REG(gen, SRU, SSI_MODE0, 0xD0),
+ RSND_GEN1_S_REG(gen, SRU, SSI_MODE1, 0xD4),
+ RSND_GEN1_M_REG(gen, SRU, BUSIF_MODE, 0x20, 0x4),
+ RSND_GEN1_M_REG(gen, SRU, BUSIF_ADINR, 0x214, 0x40),
+
+ RSND_GEN1_S_REG(gen, ADG, BRRA, 0x00),
+ RSND_GEN1_S_REG(gen, ADG, BRRB, 0x04),
+ RSND_GEN1_S_REG(gen, ADG, SSICKR, 0x08),
+ RSND_GEN1_S_REG(gen, ADG, AUDIO_CLK_SEL0, 0x0c),
+ RSND_GEN1_S_REG(gen, ADG, AUDIO_CLK_SEL1, 0x10),
+ RSND_GEN1_S_REG(gen, ADG, AUDIO_CLK_SEL3, 0x18),
+ RSND_GEN1_S_REG(gen, ADG, AUDIO_CLK_SEL4, 0x1c),
+ RSND_GEN1_S_REG(gen, ADG, AUDIO_CLK_SEL5, 0x20),
+
+ RSND_GEN1_M_REG(gen, SSI, SSICR, 0x00, 0x40),
+ RSND_GEN1_M_REG(gen, SSI, SSISR, 0x04, 0x40),
+ RSND_GEN1_M_REG(gen, SSI, SSITDR, 0x08, 0x40),
+ RSND_GEN1_M_REG(gen, SSI, SSIRDR, 0x0c, 0x40),
+ RSND_GEN1_M_REG(gen, SSI, SSIWSR, 0x20, 0x40),
+ };
+
+ memset(&regc, 0, sizeof(regc));
+ regc.reg_bits = 32;
+ regc.val_bits = 32;
+
+ gen->regmap = devm_regmap_init(dev, &rsnd_regmap_bus, priv, &regc);
+ if (IS_ERR(gen->regmap)) {
+ dev_err(dev, "regmap error %ld\n", PTR_ERR(gen->regmap));
+ return PTR_ERR(gen->regmap);
+ }
+
+ for (i = 0; i < RSND_REG_MAX; i++) {
+ gen->regs[i] = devm_regmap_field_alloc(dev, gen->regmap, regf[i]);
+ if (IS_ERR(gen->regs[i]))
+ return PTR_ERR(gen->regs[i]);
+
+ }
+
+ return 0;
}
static int rsnd_gen1_probe(struct platform_device *pdev,
@@ -147,6 +248,7 @@ static int rsnd_gen1_probe(struct platform_device *pdev,
struct resource *sru_res;
struct resource *adg_res;
struct resource *ssi_res;
+ int ret;
/*
* map address
@@ -163,8 +265,9 @@ static int rsnd_gen1_probe(struct platform_device *pdev,
IS_ERR(gen->base[RSND_GEN1_SSI]))
return -ENODEV;
- gen->ops = &rsnd_gen1_ops;
- rsnd_gen1_reg_map_init(gen);
+ ret = rsnd_gen1_regmap_init(priv, gen);
+ if (ret < 0)
+ return ret;
dev_dbg(dev, "Gen1 device probed\n");
dev_dbg(dev, "SRU : %08x => %p\n", sru_res->start,
@@ -183,6 +286,13 @@ static void rsnd_gen1_remove(struct platform_device *pdev,
{
}
+static struct rsnd_gen_ops rsnd_gen1_ops = {
+ .probe = rsnd_gen1_probe,
+ .remove = rsnd_gen1_remove,
+ .path_init = rsnd_gen1_path_init,
+ .path_exit = rsnd_gen1_path_exit,
+};
+
/*
* Gen
*/
@@ -204,46 +314,12 @@ int rsnd_gen_path_exit(struct rsnd_priv *priv,
return gen->ops->path_exit(priv, rdai, io);
}
-void __iomem *rsnd_gen_reg_get(struct rsnd_priv *priv,
- struct rsnd_mod *mod,
- enum rsnd_reg reg)
-{
- struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
- struct device *dev = rsnd_priv_to_dev(priv);
- int index;
- u32 offset_id, offset_adr;
-
- if (reg >= RSND_REG_MAX) {
- dev_err(dev, "rsnd_reg reg error\n");
- return NULL;
- }
-
- index = gen->reg_map[reg].index;
- offset_id = gen->reg_map[reg].offset_id;
- offset_adr = gen->reg_map[reg].offset_adr;
-
- if (index < 0) {
- dev_err(dev, "unsupported reg access %d\n", reg);
- return NULL;
- }
-
- if (offset_id && mod)
- offset_id *= rsnd_mod_id(mod);
-
- /*
- * index/offset were set on gen1/gen2
- */
-
- return gen->base[index] + offset_id + offset_adr;
-}
-
int rsnd_gen_probe(struct platform_device *pdev,
struct rcar_snd_info *info,
struct rsnd_priv *priv)
{
struct device *dev = rsnd_priv_to_dev(priv);
struct rsnd_gen *gen;
- int i;
gen = devm_kzalloc(dev, sizeof(*gen), GFP_KERNEL);
if (!gen) {
@@ -251,30 +327,23 @@ int rsnd_gen_probe(struct platform_device *pdev,
return -ENOMEM;
}
- priv->gen = gen;
-
- /*
- * see
- * rsnd_reg_get()
- * rsnd_gen_probe()
- */
- for (i = 0; i < RSND_REG_MAX; i++)
- gen->reg_map[i].index = -1;
-
- /*
- * init each module
- */
if (rsnd_is_gen1(priv))
- return rsnd_gen1_probe(pdev, info, priv);
+ gen->ops = &rsnd_gen1_ops;
- dev_err(dev, "unknown generation R-Car sound device\n");
+ if (!gen->ops) {
+ dev_err(dev, "unknown generation R-Car sound device\n");
+ return -ENODEV;
+ }
- return -ENODEV;
+ priv->gen = gen;
+
+ return gen->ops->probe(pdev, info, priv);
}
void rsnd_gen_remove(struct platform_device *pdev,
struct rsnd_priv *priv)
{
- if (rsnd_is_gen1(priv))
- rsnd_gen1_remove(pdev, priv);
+ struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
+
+ gen->ops->remove(pdev, priv);
}
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 9cc6986a8cfb..9e463e50e7e6 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -78,10 +78,6 @@ struct rsnd_dai_stream;
#define rsnd_mod_bset(m, r, s, d) \
rsnd_bset(rsnd_mod_to_priv(m), m, RSND_REG_##r, s, d)
-#define rsnd_priv_read(p, r) rsnd_read(p, NULL, RSND_REG_##r)
-#define rsnd_priv_write(p, r, d) rsnd_write(p, NULL, RSND_REG_##r, d)
-#define rsnd_priv_bset(p, r, s, d) rsnd_bset(p, NULL, RSND_REG_##r, s, d)
-
u32 rsnd_read(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg);
void rsnd_write(struct rsnd_priv *priv, struct rsnd_mod *mod,
enum rsnd_reg reg, u32 data);
@@ -220,8 +216,8 @@ int rsnd_gen_path_exit(struct rsnd_priv *priv,
void __iomem *rsnd_gen_reg_get(struct rsnd_priv *priv,
struct rsnd_mod *mod,
enum rsnd_reg reg);
-#define rsnd_is_gen1(s) ((s)->info->flags & RSND_GEN1)
-#define rsnd_is_gen2(s) ((s)->info->flags & RSND_GEN2)
+#define rsnd_is_gen1(s) (((s)->info->flags & RSND_GEN_MASK) == RSND_GEN1)
+#define rsnd_is_gen2(s) (((s)->info->flags & RSND_GEN_MASK) == RSND_GEN2)
/*
* R-Car ADG
@@ -285,6 +281,7 @@ int rsnd_scu_probe(struct platform_device *pdev,
void rsnd_scu_remove(struct platform_device *pdev,
struct rsnd_priv *priv);
struct rsnd_mod *rsnd_scu_mod_get(struct rsnd_priv *priv, int id);
+bool rsnd_scu_hpbif_is_enable(struct rsnd_mod *mod);
#define rsnd_scu_nr(priv) ((priv)->scu_nr)
/*
diff --git a/sound/soc/sh/rcar/scu.c b/sound/soc/sh/rcar/scu.c
index 2df2e9150b89..1ab1bce6be7f 100644
--- a/sound/soc/sh/rcar/scu.c
+++ b/sound/soc/sh/rcar/scu.c
@@ -146,20 +146,26 @@ static int rsnd_scu_set_hpbif(struct rsnd_priv *priv,
return 0;
}
+bool rsnd_scu_hpbif_is_enable(struct rsnd_mod *mod)
+{
+ struct rsnd_scu *scu = rsnd_mod_to_scu(mod);
+ u32 flags = rsnd_scu_mode_flags(scu);
+
+ return !!(flags & RSND_SCU_USE_HPBIF);
+}
+
static int rsnd_scu_start(struct rsnd_mod *mod,
struct rsnd_dai *rdai,
struct rsnd_dai_stream *io)
{
struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
- struct rsnd_scu *scu = rsnd_mod_to_scu(mod);
struct device *dev = rsnd_priv_to_dev(priv);
- u32 flags = rsnd_scu_mode_flags(scu);
int ret;
/*
* SCU will be used if it has RSND_SCU_USE_HPBIF flags
*/
- if (!(flags & RSND_SCU_USE_HPBIF)) {
+ if (!rsnd_scu_hpbif_is_enable(mod)) {
/* it use PIO transter */
dev_dbg(dev, "%s%d is not used\n",
rsnd_mod_name(mod), rsnd_mod_id(mod));
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index fae26d3f79d2..b71cf9d7dd3f 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -101,29 +101,30 @@ struct rsnd_ssiu {
#define rsnd_ssi_to_ssiu(ssi)\
(((struct rsnd_ssiu *)((ssi) - rsnd_mod_id(&(ssi)->mod))) - 1)
-static void rsnd_ssi_mode_init(struct rsnd_priv *priv,
- struct rsnd_ssiu *ssiu)
+static void rsnd_ssi_mode_set(struct rsnd_priv *priv,
+ struct rsnd_dai *rdai,
+ struct rsnd_ssi *ssi)
{
struct device *dev = rsnd_priv_to_dev(priv);
- struct rsnd_ssi *ssi;
+ struct rsnd_mod *scu;
+ struct rsnd_ssiu *ssiu = rsnd_ssi_to_ssiu(ssi);
+ int id = rsnd_mod_id(&ssi->mod);
u32 flags;
u32 val;
- int i;
+
+ scu = rsnd_scu_mod_get(priv, rsnd_mod_id(&ssi->mod));
/*
* SSI_MODE0
*/
- ssiu->ssi_mode0 = 0;
- for_each_rsnd_ssi(ssi, priv, i) {
- flags = rsnd_ssi_mode_flags(ssi);
-
- /* see also BUSIF_MODE */
- if (!(flags & RSND_SSI_DEPENDENT)) {
- ssiu->ssi_mode0 |= (1 << i);
- dev_dbg(dev, "SSI%d uses INDEPENDENT mode\n", i);
- } else {
- dev_dbg(dev, "SSI%d uses DEPENDENT mode\n", i);
- }
+
+ /* see also BUSIF_MODE */
+ if (rsnd_scu_hpbif_is_enable(scu)) {
+ ssiu->ssi_mode0 &= ~(1 << id);
+ dev_dbg(dev, "SSI%d uses DEPENDENT mode\n", id);
+ } else {
+ ssiu->ssi_mode0 |= (1 << id);
+ dev_dbg(dev, "SSI%d uses INDEPENDENT mode\n", id);
}
/*
@@ -132,7 +133,7 @@ static void rsnd_ssi_mode_init(struct rsnd_priv *priv,
#define ssi_parent_set(p, sync, adg, ext) \
do { \
ssi->parent = ssiu->ssi + p; \
- if (flags & RSND_SSI_CLK_FROM_ADG) \
+ if (rsnd_rdai_is_clk_master(rdai)) \
val = adg; \
else \
val = ext; \
@@ -140,15 +141,11 @@ static void rsnd_ssi_mode_init(struct rsnd_priv *priv,
val |= sync; \
} while (0)
- ssiu->ssi_mode1 = 0;
- for_each_rsnd_ssi(ssi, priv, i) {
- flags = rsnd_ssi_mode_flags(ssi);
-
- if (!(flags & RSND_SSI_CLK_PIN_SHARE))
- continue;
+ flags = rsnd_ssi_mode_flags(ssi);
+ if (flags & RSND_SSI_CLK_PIN_SHARE) {
val = 0;
- switch (i) {
+ switch (id) {
case 1:
ssi_parent_set(0, (1 << 4), (0x2 << 0), (0x1 << 0));
break;
@@ -165,11 +162,6 @@ static void rsnd_ssi_mode_init(struct rsnd_priv *priv,
ssiu->ssi_mode1 |= val;
}
-}
-
-static void rsnd_ssi_mode_set(struct rsnd_ssi *ssi)
-{
- struct rsnd_ssiu *ssiu = rsnd_ssi_to_ssiu(ssi);
rsnd_mod_write(&ssi->mod, SSI_MODE0, ssiu->ssi_mode0);
rsnd_mod_write(&ssi->mod, SSI_MODE1, ssiu->ssi_mode1);
@@ -379,7 +371,7 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
ssi->cr_own = cr;
ssi->err = -1; /* ignore 1st error */
- rsnd_ssi_mode_set(ssi);
+ rsnd_ssi_mode_set(priv, rdai, ssi);
dev_dbg(dev, "%s.%d init\n", rsnd_mod_name(mod), rsnd_mod_id(mod));
@@ -706,8 +698,6 @@ int rsnd_ssi_probe(struct platform_device *pdev,
rsnd_mod_init(priv, &ssi->mod, ops, i);
}
- rsnd_ssi_mode_init(priv, ssiu);
-
dev_dbg(dev, "ssi probed\n");
return 0;
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index e72f55428f0b..1b6663f45b34 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -11,12 +11,9 @@
* option) any later version.
*/
-#include <linux/i2c.h>
-#include <linux/spi/spi.h>
#include <sound/soc.h>
-#include <linux/bitmap.h>
-#include <linux/rbtree.h>
#include <linux/export.h>
+#include <linux/slab.h>
#include <trace/events/asoc.h>
@@ -66,126 +63,42 @@ static unsigned int snd_soc_get_cache_val(const void *base, unsigned int idx,
return -1;
}
-static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
+int snd_soc_cache_init(struct snd_soc_codec *codec)
{
- int i;
- int ret;
- const struct snd_soc_codec_driver *codec_drv;
- unsigned int val;
+ const struct snd_soc_codec_driver *codec_drv = codec->driver;
+ size_t reg_size;
- codec_drv = codec->driver;
- for (i = 0; i < codec_drv->reg_cache_size; ++i) {
- ret = snd_soc_cache_read(codec, i, &val);
- if (ret)
- return ret;
- if (codec->reg_def_copy)
- if (snd_soc_get_cache_val(codec->reg_def_copy,
- i, codec_drv->reg_word_size) == val)
- continue;
+ reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
- WARN_ON(!snd_soc_codec_writable_register(codec, i));
-
- ret = snd_soc_write(codec, i, val);
- if (ret)
- return ret;
- dev_dbg(codec->dev, "ASoC: Synced register %#x, value = %#x\n",
- i, val);
- }
- return 0;
-}
-
-static int snd_soc_flat_cache_write(struct snd_soc_codec *codec,
- unsigned int reg, unsigned int value)
-{
- snd_soc_set_cache_val(codec->reg_cache, reg, value,
- codec->driver->reg_word_size);
- return 0;
-}
-
-static int snd_soc_flat_cache_read(struct snd_soc_codec *codec,
- unsigned int reg, unsigned int *value)
-{
- *value = snd_soc_get_cache_val(codec->reg_cache, reg,
- codec->driver->reg_word_size);
- return 0;
-}
+ mutex_init(&codec->cache_rw_mutex);
-static int snd_soc_flat_cache_exit(struct snd_soc_codec *codec)
-{
- if (!codec->reg_cache)
- return 0;
- kfree(codec->reg_cache);
- codec->reg_cache = NULL;
- return 0;
-}
+ dev_dbg(codec->dev, "ASoC: Initializing cache for %s codec\n",
+ codec->name);
-static int snd_soc_flat_cache_init(struct snd_soc_codec *codec)
-{
- if (codec->reg_def_copy)
- codec->reg_cache = kmemdup(codec->reg_def_copy,
- codec->reg_size, GFP_KERNEL);
+ if (codec_drv->reg_cache_default)
+ codec->reg_cache = kmemdup(codec_drv->reg_cache_default,
+ reg_size, GFP_KERNEL);
else
- codec->reg_cache = kzalloc(codec->reg_size, GFP_KERNEL);
+ codec->reg_cache = kzalloc(reg_size, GFP_KERNEL);
if (!codec->reg_cache)
return -ENOMEM;
return 0;
}
-/* an array of all supported compression types */
-static const struct snd_soc_cache_ops cache_types[] = {
- /* Flat *must* be the first entry for fallback */
- {
- .id = SND_SOC_FLAT_COMPRESSION,
- .name = "flat",
- .init = snd_soc_flat_cache_init,
- .exit = snd_soc_flat_cache_exit,
- .read = snd_soc_flat_cache_read,
- .write = snd_soc_flat_cache_write,
- .sync = snd_soc_flat_cache_sync
- },
-};
-
-int snd_soc_cache_init(struct snd_soc_codec *codec)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(cache_types); ++i)
- if (cache_types[i].id == codec->compress_type)
- break;
-
- /* Fall back to flat compression */
- if (i == ARRAY_SIZE(cache_types)) {
- dev_warn(codec->dev, "ASoC: Could not match compress type: %d\n",
- codec->compress_type);
- i = 0;
- }
-
- mutex_init(&codec->cache_rw_mutex);
- codec->cache_ops = &cache_types[i];
-
- if (codec->cache_ops->init) {
- if (codec->cache_ops->name)
- dev_dbg(codec->dev, "ASoC: Initializing %s cache for %s codec\n",
- codec->cache_ops->name, codec->name);
- return codec->cache_ops->init(codec);
- }
- return -ENOSYS;
-}
-
/*
* NOTE: keep in mind that this function might be called
* multiple times.
*/
int snd_soc_cache_exit(struct snd_soc_codec *codec)
{
- if (codec->cache_ops && codec->cache_ops->exit) {
- if (codec->cache_ops->name)
- dev_dbg(codec->dev, "ASoC: Destroying %s cache for %s codec\n",
- codec->cache_ops->name, codec->name);
- return codec->cache_ops->exit(codec);
- }
- return -ENOSYS;
+ dev_dbg(codec->dev, "ASoC: Destroying cache for %s codec\n",
+ codec->name);
+ if (!codec->reg_cache)
+ return 0;
+ kfree(codec->reg_cache);
+ codec->reg_cache = NULL;
+ return 0;
}
/**
@@ -198,18 +111,15 @@ int snd_soc_cache_exit(struct snd_soc_codec *codec)
int snd_soc_cache_read(struct snd_soc_codec *codec,
unsigned int reg, unsigned int *value)
{
- int ret;
+ if (!value)
+ return -EINVAL;
mutex_lock(&codec->cache_rw_mutex);
-
- if (value && codec->cache_ops && codec->cache_ops->read) {
- ret = codec->cache_ops->read(codec, reg, value);
- mutex_unlock(&codec->cache_rw_mutex);
- return ret;
- }
-
+ *value = snd_soc_get_cache_val(codec->reg_cache, reg,
+ codec->driver->reg_word_size);
mutex_unlock(&codec->cache_rw_mutex);
- return -ENOSYS;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_cache_read);
@@ -223,20 +133,42 @@ EXPORT_SYMBOL_GPL(snd_soc_cache_read);
int snd_soc_cache_write(struct snd_soc_codec *codec,
unsigned int reg, unsigned int value)
{
+ mutex_lock(&codec->cache_rw_mutex);
+ snd_soc_set_cache_val(codec->reg_cache, reg, value,
+ codec->driver->reg_word_size);
+ mutex_unlock(&codec->cache_rw_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_cache_write);
+
+static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
+{
+ int i;
int ret;
+ const struct snd_soc_codec_driver *codec_drv;
+ unsigned int val;
- mutex_lock(&codec->cache_rw_mutex);
+ codec_drv = codec->driver;
+ for (i = 0; i < codec_drv->reg_cache_size; ++i) {
+ ret = snd_soc_cache_read(codec, i, &val);
+ if (ret)
+ return ret;
+ if (codec_drv->reg_cache_default)
+ if (snd_soc_get_cache_val(codec_drv->reg_cache_default,
+ i, codec_drv->reg_word_size) == val)
+ continue;
- if (codec->cache_ops && codec->cache_ops->write) {
- ret = codec->cache_ops->write(codec, reg, value);
- mutex_unlock(&codec->cache_rw_mutex);
- return ret;
- }
+ WARN_ON(!snd_soc_codec_writable_register(codec, i));
- mutex_unlock(&codec->cache_rw_mutex);
- return -ENOSYS;
+ ret = snd_soc_write(codec, i, val);
+ if (ret)
+ return ret;
+ dev_dbg(codec->dev, "ASoC: Synced register %#x, value = %#x\n",
+ i, val);
+ }
+ return 0;
}
-EXPORT_SYMBOL_GPL(snd_soc_cache_write);
/**
* snd_soc_cache_sync: Sync the register cache with the hardware.
@@ -249,92 +181,19 @@ EXPORT_SYMBOL_GPL(snd_soc_cache_write);
*/
int snd_soc_cache_sync(struct snd_soc_codec *codec)
{
+ const char *name = "flat";
int ret;
- const char *name;
- if (!codec->cache_sync) {
+ if (!codec->cache_sync)
return 0;
- }
-
- if (!codec->cache_ops || !codec->cache_ops->sync)
- return -ENOSYS;
- if (codec->cache_ops->name)
- name = codec->cache_ops->name;
- else
- name = "unknown";
-
- if (codec->cache_ops->name)
- dev_dbg(codec->dev, "ASoC: Syncing %s cache for %s codec\n",
- codec->cache_ops->name, codec->name);
+ dev_dbg(codec->dev, "ASoC: Syncing cache for %s codec\n",
+ codec->name);
trace_snd_soc_cache_sync(codec, name, "start");
- ret = codec->cache_ops->sync(codec);
+ ret = snd_soc_flat_cache_sync(codec);
if (!ret)
codec->cache_sync = 0;
trace_snd_soc_cache_sync(codec, name, "end");
return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_cache_sync);
-
-static int snd_soc_get_reg_access_index(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- const struct snd_soc_codec_driver *codec_drv;
- unsigned int min, max, index;
-
- codec_drv = codec->driver;
- min = 0;
- max = codec_drv->reg_access_size - 1;
- do {
- index = (min + max) / 2;
- if (codec_drv->reg_access_default[index].reg == reg)
- return index;
- if (codec_drv->reg_access_default[index].reg < reg)
- min = index + 1;
- else
- max = index;
- } while (min <= max);
- return -1;
-}
-
-int snd_soc_default_volatile_register(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- int index;
-
- if (reg >= codec->driver->reg_cache_size)
- return 1;
- index = snd_soc_get_reg_access_index(codec, reg);
- if (index < 0)
- return 0;
- return codec->driver->reg_access_default[index].vol;
-}
-EXPORT_SYMBOL_GPL(snd_soc_default_volatile_register);
-
-int snd_soc_default_readable_register(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- int index;
-
- if (reg >= codec->driver->reg_cache_size)
- return 1;
- index = snd_soc_get_reg_access_index(codec, reg);
- if (index < 0)
- return 0;
- return codec->driver->reg_access_default[index].read;
-}
-EXPORT_SYMBOL_GPL(snd_soc_default_readable_register);
-
-int snd_soc_default_writable_register(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- int index;
-
- if (reg >= codec->driver->reg_cache_size)
- return 1;
- index = snd_soc_get_reg_access_index(codec, reg);
- if (index < 0)
- return 0;
- return codec->driver->reg_access_default[index].write;
-}
-EXPORT_SYMBOL_GPL(snd_soc_default_writable_register);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 1a38be0d0ca8..bdc1d74eb7b0 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1589,17 +1589,13 @@ static void soc_remove_aux_dev(struct snd_soc_card *card, int num)
soc_remove_codec(codec);
}
-static int snd_soc_init_codec_cache(struct snd_soc_codec *codec,
- enum snd_soc_compress_type compress_type)
+static int snd_soc_init_codec_cache(struct snd_soc_codec *codec)
{
int ret;
if (codec->cache_init)
return 0;
- /* override the compress_type if necessary */
- if (compress_type && codec->compress_type != compress_type)
- codec->compress_type = compress_type;
ret = snd_soc_cache_init(codec);
if (ret < 0) {
dev_err(codec->dev,
@@ -1614,8 +1610,6 @@ static int snd_soc_init_codec_cache(struct snd_soc_codec *codec,
static int snd_soc_instantiate_card(struct snd_soc_card *card)
{
struct snd_soc_codec *codec;
- struct snd_soc_codec_conf *codec_conf;
- enum snd_soc_compress_type compress_type;
struct snd_soc_dai_link *dai_link;
int ret, i, order, dai_fmt;
@@ -1639,19 +1633,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
list_for_each_entry(codec, &codec_list, list) {
if (codec->cache_init)
continue;
- /* by default we don't override the compress_type */
- compress_type = 0;
- /* check to see if we need to override the compress_type */
- for (i = 0; i < card->num_configs; ++i) {
- codec_conf = &card->codec_conf[i];
- if (!strcmp(codec->name, codec_conf->dev_name)) {
- compress_type = codec_conf->compress_type;
- if (compress_type && compress_type
- != codec->compress_type)
- break;
- }
- }
- ret = snd_soc_init_codec_cache(codec, compress_type);
+ ret = snd_soc_init_codec_cache(codec);
if (ret < 0)
goto base_error;
}
@@ -2297,13 +2279,6 @@ unsigned int snd_soc_write(struct snd_soc_codec *codec,
}
EXPORT_SYMBOL_GPL(snd_soc_write);
-unsigned int snd_soc_bulk_write_raw(struct snd_soc_codec *codec,
- unsigned int reg, const void *data, size_t len)
-{
- return codec->bulk_write_raw(codec, reg, data, len);
-}
-EXPORT_SYMBOL_GPL(snd_soc_bulk_write_raw);
-
/**
* snd_soc_update_bits - update codec register bits
* @codec: audio codec
@@ -2576,8 +2551,9 @@ int snd_soc_info_enum_double(struct snd_kcontrol *kcontrol,
if (uinfo->value.enumerated.item > e->max - 1)
uinfo->value.enumerated.item = e->max - 1;
- strcpy(uinfo->value.enumerated.name,
- e->texts[uinfo->value.enumerated.item]);
+ strlcpy(uinfo->value.enumerated.name,
+ e->texts[uinfo->value.enumerated.item],
+ sizeof(uinfo->value.enumerated.name));
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_info_enum_double);
@@ -3576,6 +3552,22 @@ int snd_soc_codec_set_pll(struct snd_soc_codec *codec, int pll_id, int source,
EXPORT_SYMBOL_GPL(snd_soc_codec_set_pll);
/**
+ * snd_soc_dai_set_bclk_ratio - configure BCLK to sample rate ratio.
+ * @dai: DAI
+ * @ratio Ratio of BCLK to Sample rate.
+ *
+ * Configures the DAI for a preset BCLK to sample rate ratio.
+ */
+int snd_soc_dai_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
+{
+ if (dai->driver && dai->driver->ops->set_bclk_ratio)
+ return dai->driver->ops->set_bclk_ratio(dai, ratio);
+ else
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_dai_set_bclk_ratio);
+
+/**
* snd_soc_dai_set_fmt - configure DAI hardware audio format.
* @dai: DAI
* @fmt: SND_SOC_DAIFMT_ format value.
@@ -4020,6 +4012,113 @@ static void snd_soc_unregister_dais(struct device *dev, size_t count)
}
/**
+ * snd_soc_register_component - Register a component with the ASoC core
+ *
+ */
+static int
+__snd_soc_register_component(struct device *dev,
+ struct snd_soc_component *cmpnt,
+ const struct snd_soc_component_driver *cmpnt_drv,
+ struct snd_soc_dai_driver *dai_drv,
+ int num_dai, bool allow_single_dai)
+{
+ int ret;
+
+ dev_dbg(dev, "component register %s\n", dev_name(dev));
+
+ if (!cmpnt) {
+ dev_err(dev, "ASoC: Failed to connecting component\n");
+ return -ENOMEM;
+ }
+
+ cmpnt->name = fmt_single_name(dev, &cmpnt->id);
+ if (!cmpnt->name) {
+ dev_err(dev, "ASoC: Failed to simplifying name\n");
+ return -ENOMEM;
+ }
+
+ cmpnt->dev = dev;
+ cmpnt->driver = cmpnt_drv;
+ cmpnt->dai_drv = dai_drv;
+ cmpnt->num_dai = num_dai;
+
+ /*
+ * snd_soc_register_dai() uses fmt_single_name(), and
+ * snd_soc_register_dais() uses fmt_multiple_name()
+ * for dai->name which is used for name based matching
+ *
+ * this function is used from cpu/codec.
+ * allow_single_dai flag can ignore "codec" driver reworking
+ * since it had been used snd_soc_register_dais(),
+ */
+ if ((1 == num_dai) && allow_single_dai)
+ ret = snd_soc_register_dai(dev, dai_drv);
+ else
+ ret = snd_soc_register_dais(dev, dai_drv, num_dai);
+ if (ret < 0) {
+ dev_err(dev, "ASoC: Failed to regster DAIs: %d\n", ret);
+ goto error_component_name;
+ }
+
+ mutex_lock(&client_mutex);
+ list_add(&cmpnt->list, &component_list);
+ mutex_unlock(&client_mutex);
+
+ dev_dbg(cmpnt->dev, "ASoC: Registered component '%s'\n", cmpnt->name);
+
+ return ret;
+
+error_component_name:
+ kfree(cmpnt->name);
+
+ return ret;
+}
+
+int snd_soc_register_component(struct device *dev,
+ const struct snd_soc_component_driver *cmpnt_drv,
+ struct snd_soc_dai_driver *dai_drv,
+ int num_dai)
+{
+ struct snd_soc_component *cmpnt;
+
+ cmpnt = devm_kzalloc(dev, sizeof(*cmpnt), GFP_KERNEL);
+ if (!cmpnt) {
+ dev_err(dev, "ASoC: Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ return __snd_soc_register_component(dev, cmpnt, cmpnt_drv,
+ dai_drv, num_dai, true);
+}
+EXPORT_SYMBOL_GPL(snd_soc_register_component);
+
+/**
+ * snd_soc_unregister_component - Unregister a component from the ASoC core
+ *
+ */
+void snd_soc_unregister_component(struct device *dev)
+{
+ struct snd_soc_component *cmpnt;
+
+ list_for_each_entry(cmpnt, &component_list, list) {
+ if (dev == cmpnt->dev)
+ goto found;
+ }
+ return;
+
+found:
+ snd_soc_unregister_dais(dev, cmpnt->num_dai);
+
+ mutex_lock(&client_mutex);
+ list_del(&cmpnt->list);
+ mutex_unlock(&client_mutex);
+
+ dev_dbg(dev, "ASoC: Unregistered component '%s'\n", cmpnt->name);
+ kfree(cmpnt->name);
+}
+EXPORT_SYMBOL_GPL(snd_soc_unregister_component);
+
+/**
* snd_soc_add_platform - Add a platform to the ASoC core
* @dev: The parent device for the platform
* @platform: The platform to add
@@ -4165,7 +4264,6 @@ int snd_soc_register_codec(struct device *dev,
struct snd_soc_dai_driver *dai_drv,
int num_dai)
{
- size_t reg_size;
struct snd_soc_codec *codec;
int ret, i;
@@ -4182,11 +4280,6 @@ int snd_soc_register_codec(struct device *dev,
goto fail_codec;
}
- if (codec_drv->compress_type)
- codec->compress_type = codec_drv->compress_type;
- else
- codec->compress_type = SND_SOC_FLAT_COMPRESSION;
-
codec->write = codec_drv->write;
codec->read = codec_drv->read;
codec->volatile_register = codec_drv->volatile_register;
@@ -4203,35 +4296,6 @@ int snd_soc_register_codec(struct device *dev,
codec->num_dai = num_dai;
mutex_init(&codec->mutex);
- /* allocate CODEC register cache */
- if (codec_drv->reg_cache_size && codec_drv->reg_word_size) {
- reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
- codec->reg_size = reg_size;
- /* it is necessary to make a copy of the default register cache
- * because in the case of using a compression type that requires
- * the default register cache to be marked as the
- * kernel might have freed the array by the time we initialize
- * the cache.
- */
- if (codec_drv->reg_cache_default) {
- codec->reg_def_copy = kmemdup(codec_drv->reg_cache_default,
- reg_size, GFP_KERNEL);
- if (!codec->reg_def_copy) {
- ret = -ENOMEM;
- goto fail_codec_name;
- }
- }
- }
-
- if (codec_drv->reg_access_size && codec_drv->reg_access_default) {
- if (!codec->volatile_register)
- codec->volatile_register = snd_soc_default_volatile_register;
- if (!codec->readable_register)
- codec->readable_register = snd_soc_default_readable_register;
- if (!codec->writable_register)
- codec->writable_register = snd_soc_default_writable_register;
- }
-
for (i = 0; i < num_dai; i++) {
fixup_codec_formats(&dai_drv[i].playback);
fixup_codec_formats(&dai_drv[i].capture);
@@ -4241,10 +4305,12 @@ int snd_soc_register_codec(struct device *dev,
list_add(&codec->list, &codec_list);
mutex_unlock(&client_mutex);
- /* register any DAIs */
- ret = snd_soc_register_dais(dev, dai_drv, num_dai);
+ /* register component */
+ ret = __snd_soc_register_component(dev, &codec->component,
+ &codec_drv->component_driver,
+ dai_drv, num_dai, false);
if (ret < 0) {
- dev_err(codec->dev, "ASoC: Failed to regster DAIs: %d\n", ret);
+ dev_err(codec->dev, "ASoC: Failed to regster component: %d\n", ret);
goto fail_codec_name;
}
@@ -4279,7 +4345,7 @@ void snd_soc_unregister_codec(struct device *dev)
return;
found:
- snd_soc_unregister_dais(dev, codec->num_dai);
+ snd_soc_unregister_component(dev);
mutex_lock(&client_mutex);
list_del(&codec->list);
@@ -4288,98 +4354,11 @@ found:
dev_dbg(codec->dev, "ASoC: Unregistered codec '%s'\n", codec->name);
snd_soc_cache_exit(codec);
- kfree(codec->reg_def_copy);
kfree(codec->name);
kfree(codec);
}
EXPORT_SYMBOL_GPL(snd_soc_unregister_codec);
-
-/**
- * snd_soc_register_component - Register a component with the ASoC core
- *
- */
-int snd_soc_register_component(struct device *dev,
- const struct snd_soc_component_driver *cmpnt_drv,
- struct snd_soc_dai_driver *dai_drv,
- int num_dai)
-{
- struct snd_soc_component *cmpnt;
- int ret;
-
- dev_dbg(dev, "component register %s\n", dev_name(dev));
-
- cmpnt = devm_kzalloc(dev, sizeof(*cmpnt), GFP_KERNEL);
- if (!cmpnt) {
- dev_err(dev, "ASoC: Failed to allocate memory\n");
- return -ENOMEM;
- }
-
- cmpnt->name = fmt_single_name(dev, &cmpnt->id);
- if (!cmpnt->name) {
- dev_err(dev, "ASoC: Failed to simplifying name\n");
- return -ENOMEM;
- }
-
- cmpnt->dev = dev;
- cmpnt->driver = cmpnt_drv;
- cmpnt->num_dai = num_dai;
-
- /*
- * snd_soc_register_dai() uses fmt_single_name(), and
- * snd_soc_register_dais() uses fmt_multiple_name()
- * for dai->name which is used for name based matching
- */
- if (1 == num_dai)
- ret = snd_soc_register_dai(dev, dai_drv);
- else
- ret = snd_soc_register_dais(dev, dai_drv, num_dai);
- if (ret < 0) {
- dev_err(dev, "ASoC: Failed to regster DAIs: %d\n", ret);
- goto error_component_name;
- }
-
- mutex_lock(&client_mutex);
- list_add(&cmpnt->list, &component_list);
- mutex_unlock(&client_mutex);
-
- dev_dbg(cmpnt->dev, "ASoC: Registered component '%s'\n", cmpnt->name);
-
- return ret;
-
-error_component_name:
- kfree(cmpnt->name);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(snd_soc_register_component);
-
-/**
- * snd_soc_unregister_component - Unregister a component from the ASoC core
- *
- */
-void snd_soc_unregister_component(struct device *dev)
-{
- struct snd_soc_component *cmpnt;
-
- list_for_each_entry(cmpnt, &component_list, list) {
- if (dev == cmpnt->dev)
- goto found;
- }
- return;
-
-found:
- snd_soc_unregister_dais(dev, cmpnt->num_dai);
-
- mutex_lock(&client_mutex);
- list_del(&cmpnt->list);
- mutex_unlock(&client_mutex);
-
- dev_dbg(dev, "ASoC: Unregistered component '%s'\n", cmpnt->name);
- kfree(cmpnt->name);
-}
-EXPORT_SYMBOL_GPL(snd_soc_unregister_component);
-
/* Retrieve a card's name from device tree */
int snd_soc_of_parse_card_name(struct snd_soc_card *card,
const char *propname)
@@ -4567,6 +4546,60 @@ unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
}
EXPORT_SYMBOL_GPL(snd_soc_of_parse_daifmt);
+int snd_soc_of_get_dai_name(struct device_node *of_node,
+ const char **dai_name)
+{
+ struct snd_soc_component *pos;
+ struct of_phandle_args args;
+ int ret;
+
+ ret = of_parse_phandle_with_args(of_node, "sound-dai",
+ "#sound-dai-cells", 0, &args);
+ if (ret)
+ return ret;
+
+ ret = -EPROBE_DEFER;
+
+ mutex_lock(&client_mutex);
+ list_for_each_entry(pos, &component_list, list) {
+ if (pos->dev->of_node != args.np)
+ continue;
+
+ if (pos->driver->of_xlate_dai_name) {
+ ret = pos->driver->of_xlate_dai_name(pos, &args, dai_name);
+ } else {
+ int id = -1;
+
+ switch (args.args_count) {
+ case 0:
+ id = 0; /* same as dai_drv[0] */
+ break;
+ case 1:
+ id = args.args[0];
+ break;
+ default:
+ /* not supported */
+ break;
+ }
+
+ if (id < 0 || id >= pos->num_dai) {
+ ret = -EINVAL;
+ } else {
+ *dai_name = pos->dai_drv[id].name;
+ ret = 0;
+ }
+ }
+
+ break;
+ }
+ mutex_unlock(&client_mutex);
+
+ of_node_put(args.np);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_of_get_dai_name);
+
static int __init snd_soc_init(void)
{
#ifdef CONFIG_DEBUG_FS
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index c17c14c394df..cc36caaf6443 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -59,31 +59,31 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
/* dapm power sequences - make this per codec in the future */
static int dapm_up_seq[] = {
[snd_soc_dapm_pre] = 0,
- [snd_soc_dapm_supply] = 1,
[snd_soc_dapm_regulator_supply] = 1,
[snd_soc_dapm_clock_supply] = 1,
- [snd_soc_dapm_micbias] = 2,
+ [snd_soc_dapm_supply] = 2,
+ [snd_soc_dapm_micbias] = 3,
[snd_soc_dapm_dai_link] = 2,
- [snd_soc_dapm_dai_in] = 3,
- [snd_soc_dapm_dai_out] = 3,
- [snd_soc_dapm_aif_in] = 3,
- [snd_soc_dapm_aif_out] = 3,
- [snd_soc_dapm_mic] = 4,
- [snd_soc_dapm_mux] = 5,
- [snd_soc_dapm_virt_mux] = 5,
- [snd_soc_dapm_value_mux] = 5,
- [snd_soc_dapm_dac] = 6,
- [snd_soc_dapm_switch] = 7,
- [snd_soc_dapm_mixer] = 7,
- [snd_soc_dapm_mixer_named_ctl] = 7,
- [snd_soc_dapm_pga] = 8,
- [snd_soc_dapm_adc] = 9,
- [snd_soc_dapm_out_drv] = 10,
- [snd_soc_dapm_hp] = 10,
- [snd_soc_dapm_spk] = 10,
- [snd_soc_dapm_line] = 10,
- [snd_soc_dapm_kcontrol] = 11,
- [snd_soc_dapm_post] = 12,
+ [snd_soc_dapm_dai_in] = 4,
+ [snd_soc_dapm_dai_out] = 4,
+ [snd_soc_dapm_aif_in] = 4,
+ [snd_soc_dapm_aif_out] = 4,
+ [snd_soc_dapm_mic] = 5,
+ [snd_soc_dapm_mux] = 6,
+ [snd_soc_dapm_virt_mux] = 6,
+ [snd_soc_dapm_value_mux] = 6,
+ [snd_soc_dapm_dac] = 7,
+ [snd_soc_dapm_switch] = 8,
+ [snd_soc_dapm_mixer] = 8,
+ [snd_soc_dapm_mixer_named_ctl] = 8,
+ [snd_soc_dapm_pga] = 9,
+ [snd_soc_dapm_adc] = 10,
+ [snd_soc_dapm_out_drv] = 11,
+ [snd_soc_dapm_hp] = 11,
+ [snd_soc_dapm_spk] = 11,
+ [snd_soc_dapm_line] = 11,
+ [snd_soc_dapm_kcontrol] = 12,
+ [snd_soc_dapm_post] = 13,
};
static int dapm_down_seq[] = {
@@ -109,10 +109,10 @@ static int dapm_down_seq[] = {
[snd_soc_dapm_dai_in] = 10,
[snd_soc_dapm_dai_out] = 10,
[snd_soc_dapm_dai_link] = 11,
- [snd_soc_dapm_clock_supply] = 12,
- [snd_soc_dapm_regulator_supply] = 12,
[snd_soc_dapm_supply] = 12,
- [snd_soc_dapm_post] = 13,
+ [snd_soc_dapm_clock_supply] = 13,
+ [snd_soc_dapm_regulator_supply] = 13,
+ [snd_soc_dapm_post] = 14,
};
static void pop_wait(u32 pop_time)
@@ -409,6 +409,12 @@ static inline void soc_widget_unlock(struct snd_soc_dapm_widget *w)
mutex_unlock(&w->platform->mutex);
}
+static void soc_dapm_async_complete(struct snd_soc_dapm_context *dapm)
+{
+ if (dapm->codec && dapm->codec->using_regmap)
+ regmap_async_complete(dapm->codec->control_data);
+}
+
static int soc_widget_update_bits_locked(struct snd_soc_dapm_widget *w,
unsigned short reg, unsigned int mask, unsigned int value)
{
@@ -417,8 +423,9 @@ static int soc_widget_update_bits_locked(struct snd_soc_dapm_widget *w,
int ret;
if (w->codec && w->codec->using_regmap) {
- ret = regmap_update_bits_check(w->codec->control_data,
- reg, mask, value, &change);
+ ret = regmap_update_bits_check_async(w->codec->control_data,
+ reg, mask, value,
+ &change);
if (ret != 0)
return ret;
} else {
@@ -499,18 +506,22 @@ static void dapm_set_path_status(struct snd_soc_dapm_widget *w,
int val;
struct soc_mixer_control *mc = (struct soc_mixer_control *)
w->kcontrol_news[i].private_value;
- unsigned int reg = mc->reg;
+ int reg = mc->reg;
unsigned int shift = mc->shift;
int max = mc->max;
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
- val = soc_widget_read(w, reg);
- val = (val >> shift) & mask;
- if (invert)
- val = max - val;
+ if (reg != SND_SOC_NOPM) {
+ val = soc_widget_read(w, reg);
+ val = (val >> shift) & mask;
+ if (invert)
+ val = max - val;
+ p->connect = !!val;
+ } else {
+ p->connect = 0;
+ }
- p->connect = !!val;
}
break;
case snd_soc_dapm_mux: {
@@ -1197,6 +1208,8 @@ int dapm_regulator_event(struct snd_soc_dapm_widget *w,
{
int ret;
+ soc_dapm_async_complete(w->dapm);
+
if (SND_SOC_DAPM_EVENT_ON(event)) {
if (w->on_val & SND_SOC_DAPM_REGULATOR_BYPASS) {
ret = regulator_allow_bypass(w->regulator, false);
@@ -1230,6 +1243,8 @@ int dapm_clock_event(struct snd_soc_dapm_widget *w,
if (!w->clk)
return -EIO;
+ soc_dapm_async_complete(w->dapm);
+
#ifdef CONFIG_HAVE_CLK
if (SND_SOC_DAPM_EVENT_ON(event)) {
return clk_prepare_enable(w->clk);
@@ -1422,6 +1437,7 @@ static void dapm_seq_check_event(struct snd_soc_card *card,
if (w->event && (w->event_flags & event)) {
pop_dbg(w->dapm->dev, card->pop_time, "pop test : %s %s\n",
w->name, ev_name);
+ soc_dapm_async_complete(w->dapm);
trace_snd_soc_dapm_widget_event_start(w, event);
ret = w->event(w, NULL, event);
trace_snd_soc_dapm_widget_event_done(w, event);
@@ -1494,6 +1510,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
struct list_head *list, int event, bool power_up)
{
struct snd_soc_dapm_widget *w, *n;
+ struct snd_soc_dapm_context *d;
LIST_HEAD(pending);
int cur_sort = -1;
int cur_subseq = -1;
@@ -1524,6 +1541,9 @@ static void dapm_seq_run(struct snd_soc_card *card,
cur_subseq);
}
+ if (cur_dapm && w->dapm != cur_dapm)
+ soc_dapm_async_complete(cur_dapm);
+
INIT_LIST_HEAD(&pending);
cur_sort = -1;
cur_subseq = INT_MIN;
@@ -1582,6 +1602,10 @@ static void dapm_seq_run(struct snd_soc_card *card,
cur_dapm->seq_notifier(cur_dapm,
i, cur_subseq);
}
+
+ list_for_each_entry(d, &card->dapm_list, list) {
+ soc_dapm_async_complete(d);
+ }
}
static void dapm_widget_update(struct snd_soc_card *card)
@@ -1840,6 +1864,7 @@ static int dapm_power_widgets(struct snd_soc_card *card, int event)
*/
switch (w->id) {
case snd_soc_dapm_siggen:
+ case snd_soc_dapm_vmid:
break;
case snd_soc_dapm_supply:
case snd_soc_dapm_regulator_supply:
@@ -1949,7 +1974,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
w->active ? "active" : "inactive");
list_for_each_entry(p, &w->sources, list_sink) {
- if (p->connected && !p->connected(w, p->sink))
+ if (p->connected && !p->connected(w, p->source))
continue;
if (p->connect)
@@ -2791,7 +2816,7 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
struct snd_soc_card *card = codec->card;
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
- unsigned int reg = mc->reg;
+ int reg = mc->reg;
unsigned int shift = mc->shift;
int max = mc->max;
unsigned int mask = (1 << fls(max)) - 1;
@@ -2804,7 +2829,7 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
kcontrol->id.name);
mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
- if (dapm_kcontrol_is_powered(kcontrol))
+ if (dapm_kcontrol_is_powered(kcontrol) && reg != SND_SOC_NOPM)
val = (snd_soc_read(codec, reg) >> shift) & mask;
else
val = dapm_kcontrol_get_value(kcontrol);
@@ -2835,7 +2860,7 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
struct snd_soc_card *card = codec->card;
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
- unsigned int reg = mc->reg;
+ int reg = mc->reg;
unsigned int shift = mc->shift;
int max = mc->max;
unsigned int mask = (1 << fls(max)) - 1;
@@ -2857,19 +2882,24 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
- dapm_kcontrol_set_value(kcontrol, val);
+ change = dapm_kcontrol_set_value(kcontrol, val);
- mask = mask << shift;
- val = val << shift;
+ if (reg != SND_SOC_NOPM) {
+ mask = mask << shift;
+ val = val << shift;
+
+ change = snd_soc_test_bits(codec, reg, mask, val);
+ }
- change = snd_soc_test_bits(codec, reg, mask, val);
if (change) {
- update.kcontrol = kcontrol;
- update.reg = reg;
- update.mask = mask;
- update.val = val;
+ if (reg != SND_SOC_NOPM) {
+ update.kcontrol = kcontrol;
+ update.reg = reg;
+ update.mask = mask;
+ update.val = val;
- card->update = &update;
+ card->update = &update;
+ }
soc_dapm_mixer_update_power(card, kcontrol, connect);
@@ -3495,6 +3525,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
if (!w) {
dev_err(dapm->dev, "ASoC: Failed to create %s widget\n",
dai->driver->playback.stream_name);
+ return -ENOMEM;
}
w->priv = dai;
@@ -3513,6 +3544,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
if (!w) {
dev_err(dapm->dev, "ASoC: Failed to create %s widget\n",
dai->driver->capture.stream_name);
+ return -ENOMEM;
}
w->priv = dai;
diff --git a/sound/soc/soc-devres.c b/sound/soc/soc-devres.c
new file mode 100644
index 000000000000..b1d732255c02
--- /dev/null
+++ b/sound/soc/soc-devres.c
@@ -0,0 +1,86 @@
+/*
+ * soc-devres.c -- ALSA SoC Audio Layer devres functions
+ *
+ * Copyright (C) 2013 Linaro Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <sound/soc.h>
+
+static void devm_component_release(struct device *dev, void *res)
+{
+ snd_soc_unregister_component(*(struct device **)res);
+}
+
+/**
+ * devm_snd_soc_register_component - resource managed component registration
+ * @dev: Device used to manage component
+ * @cmpnt_drv: Component driver
+ * @dai_drv: DAI driver
+ * @num_dai: Number of DAIs to register
+ *
+ * Register a component with automatic unregistration when the device is
+ * unregistered.
+ */
+int devm_snd_soc_register_component(struct device *dev,
+ const struct snd_soc_component_driver *cmpnt_drv,
+ struct snd_soc_dai_driver *dai_drv, int num_dai)
+{
+ struct device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_component_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = snd_soc_register_component(dev, cmpnt_drv, dai_drv, num_dai);
+ if (ret == 0) {
+ *ptr = dev;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_snd_soc_register_component);
+
+static void devm_card_release(struct device *dev, void *res)
+{
+ snd_soc_unregister_card(*(struct snd_soc_card **)res);
+}
+
+/**
+ * devm_snd_soc_register_card - resource managed card registration
+ * @dev: Device used to manage card
+ * @card: Card to register
+ *
+ * Register a card with automatic unregistration when the device is
+ * unregistered.
+ */
+int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card)
+{
+ struct device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_card_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = snd_soc_register_card(card);
+ if (ret == 0) {
+ *ptr = dev;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_snd_soc_register_card);
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index e29ec3cd84b1..ee0790337ec9 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -25,7 +25,7 @@
#include <sound/dmaengine_pcm.h>
struct dmaengine_pcm {
- struct dma_chan *chan[SNDRV_PCM_STREAM_CAPTURE + 1];
+ struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1];
const struct snd_dmaengine_pcm_config *config;
struct snd_soc_platform platform;
unsigned int flags;
@@ -36,6 +36,15 @@ static struct dmaengine_pcm *soc_platform_to_pcm(struct snd_soc_platform *p)
return container_of(p, struct dmaengine_pcm, platform);
}
+static struct device *dmaengine_dma_dev(struct dmaengine_pcm *pcm,
+ struct snd_pcm_substream *substream)
+{
+ if (!pcm->chan[substream->stream])
+ return NULL;
+
+ return pcm->chan[substream->stream]->device->dev;
+}
+
/**
* snd_dmaengine_pcm_prepare_slave_config() - Generic prepare_slave_config callback
* @substream: PCM substream
@@ -75,12 +84,19 @@ static int dmaengine_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
+ int (*prepare_slave_config)(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config);
struct dma_slave_config slave_config;
int ret;
- if (pcm->config->prepare_slave_config) {
- ret = pcm->config->prepare_slave_config(substream, params,
- &slave_config);
+ if (!pcm->config)
+ prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config;
+ else
+ prepare_slave_config = pcm->config->prepare_slave_config;
+
+ if (prepare_slave_config) {
+ ret = prepare_slave_config(substream, params, &slave_config);
if (ret)
return ret;
@@ -92,28 +108,54 @@ static int dmaengine_pcm_hw_params(struct snd_pcm_substream *substream,
return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
}
-static int dmaengine_pcm_open(struct snd_pcm_substream *substream)
+static int dmaengine_pcm_set_runtime_hwparams(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
+ struct device *dma_dev = dmaengine_dma_dev(pcm, substream);
struct dma_chan *chan = pcm->chan[substream->stream];
+ struct snd_dmaengine_dai_dma_data *dma_data;
+ struct dma_slave_caps dma_caps;
+ struct snd_pcm_hardware hw;
int ret;
- ret = snd_soc_set_runtime_hwparams(substream,
+ if (pcm->config && pcm->config->pcm_hardware)
+ return snd_soc_set_runtime_hwparams(substream,
pcm->config->pcm_hardware);
- if (ret)
- return ret;
- return snd_dmaengine_pcm_open(substream, chan);
+ dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+
+ memset(&hw, 0, sizeof(hw));
+ hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED;
+ hw.periods_min = 2;
+ hw.periods_max = UINT_MAX;
+ hw.period_bytes_min = 256;
+ hw.period_bytes_max = dma_get_max_seg_size(dma_dev);
+ hw.buffer_bytes_max = SIZE_MAX;
+ hw.fifo_size = dma_data->fifo_size;
+
+ ret = dma_get_slave_caps(chan, &dma_caps);
+ if (ret == 0) {
+ if (dma_caps.cmd_pause)
+ hw.info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME;
+ }
+
+ return snd_soc_set_runtime_hwparams(substream, &hw);
}
-static struct device *dmaengine_dma_dev(struct dmaengine_pcm *pcm,
- struct snd_pcm_substream *substream)
+static int dmaengine_pcm_open(struct snd_pcm_substream *substream)
{
- if (!pcm->chan[substream->stream])
- return NULL;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
+ struct dma_chan *chan = pcm->chan[substream->stream];
+ int ret;
- return pcm->chan[substream->stream]->device->dev;
+ ret = dmaengine_pcm_set_runtime_hwparams(substream);
+ if (ret)
+ return ret;
+
+ return snd_dmaengine_pcm_open(substream, chan);
}
static void dmaengine_pcm_free(struct snd_pcm *pcm)
@@ -126,6 +168,9 @@ static struct dma_chan *dmaengine_pcm_compat_request_channel(
struct snd_pcm_substream *substream)
{
struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
+ struct snd_dmaengine_dai_dma_data *dma_data;
+
+ dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) && pcm->chan[0])
return pcm->chan[0];
@@ -134,22 +179,42 @@ static struct dma_chan *dmaengine_pcm_compat_request_channel(
return pcm->config->compat_request_channel(rtd, substream);
return snd_dmaengine_pcm_request_channel(pcm->config->compat_filter_fn,
- snd_soc_dai_get_dma_data(rtd->cpu_dai, substream));
+ dma_data->filter_data);
}
static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
const struct snd_dmaengine_pcm_config *config = pcm->config;
+ struct device *dev = rtd->platform->dev;
+ struct snd_dmaengine_dai_dma_data *dma_data;
struct snd_pcm_substream *substream;
+ size_t prealloc_buffer_size;
+ size_t max_buffer_size;
unsigned int i;
int ret;
+ if (config && config->prealloc_buffer_size) {
+ prealloc_buffer_size = config->prealloc_buffer_size;
+ max_buffer_size = config->pcm_hardware->buffer_bytes_max;
+ } else {
+ prealloc_buffer_size = 512 * 1024;
+ max_buffer_size = SIZE_MAX;
+ }
+
+
for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; i++) {
substream = rtd->pcm->streams[i].substream;
if (!substream)
continue;
+ dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+
+ if (!pcm->chan[i] &&
+ (pcm->flags & SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME))
+ pcm->chan[i] = dma_request_slave_channel(dev,
+ dma_data->chan_name);
+
if (!pcm->chan[i] && (pcm->flags & SND_DMAENGINE_PCM_FLAG_COMPAT)) {
pcm->chan[i] = dmaengine_pcm_compat_request_channel(rtd,
substream);
@@ -165,8 +230,8 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
ret = snd_pcm_lib_preallocate_pages(substream,
SNDRV_DMA_TYPE_DEV,
dmaengine_dma_dev(pcm, substream),
- config->prealloc_buffer_size,
- config->pcm_hardware->buffer_bytes_max);
+ prealloc_buffer_size,
+ max_buffer_size);
if (ret)
goto err_free;
}
@@ -222,7 +287,9 @@ static void dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm,
{
unsigned int i;
- if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_DT) || !dev->of_node)
+ if ((pcm->flags & (SND_DMAENGINE_PCM_FLAG_NO_DT |
+ SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME)) ||
+ !dev->of_node)
return;
if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) {
diff --git a/sound/soc/soc-io.c b/sound/soc/soc-io.c
index 122c0c18b9dd..4f11d23f2062 100644
--- a/sound/soc/soc-io.c
+++ b/sound/soc/soc-io.c
@@ -65,31 +65,6 @@ static unsigned int hw_read(struct snd_soc_codec *codec, unsigned int reg)
return val;
}
-/* Primitive bulk write support for soc-cache. The data pointed to by
- * `data' needs to already be in the form the hardware expects. Any
- * data written through this function will not go through the cache as
- * it only handles writing to volatile or out of bounds registers.
- *
- * This is currently only supported for devices using the regmap API
- * wrappers.
- */
-static int snd_soc_hw_bulk_write_raw(struct snd_soc_codec *codec,
- unsigned int reg,
- const void *data, size_t len)
-{
- /* To ensure that we don't get out of sync with the cache, check
- * whether the base register is volatile or if we've directly asked
- * to bypass the cache. Out of bounds registers are considered
- * volatile.
- */
- if (!codec->cache_bypass
- && !snd_soc_codec_volatile_register(codec, reg)
- && reg < codec->driver->reg_cache_size)
- return -EINVAL;
-
- return regmap_raw_write(codec->control_data, reg, data, len);
-}
-
/**
* snd_soc_codec_set_cache_io: Set up standard I/O functions.
*
@@ -119,7 +94,6 @@ int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
memset(&config, 0, sizeof(config));
codec->write = hw_write;
codec->read = hw_read;
- codec->bulk_write_raw = snd_soc_hw_bulk_write_raw;
config.reg_bits = addr_bits;
config.val_bits = data_bits;
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index 71358e3b54d9..23d43dac91da 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -65,6 +65,7 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
struct snd_soc_codec *codec;
struct snd_soc_dapm_context *dapm;
struct snd_soc_jack_pin *pin;
+ unsigned int sync = 0;
int enable;
trace_snd_soc_jack_report(jack, mask, status);
@@ -92,12 +93,16 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
snd_soc_dapm_enable_pin(dapm, pin->pin);
else
snd_soc_dapm_disable_pin(dapm, pin->pin);
+
+ /* we need to sync for this case only */
+ sync = 1;
}
/* Report before the DAPM sync to help users updating micbias status */
blocking_notifier_call_chain(&jack->notifier, jack->status, jack);
- snd_soc_dapm_sync(dapm);
+ if (sync)
+ snd_soc_dapm_sync(dapm);
snd_jack_report(jack->jack, jack->status);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 330c9a6b5cb5..591f0f3074c5 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -190,7 +190,7 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
/* startup the audio subsystem */
- if (cpu_dai->driver->ops->startup) {
+ if (cpu_dai->driver->ops && cpu_dai->driver->ops->startup) {
ret = cpu_dai->driver->ops->startup(substream, cpu_dai);
if (ret < 0) {
dev_err(cpu_dai->dev, "ASoC: can't open interface"
@@ -208,7 +208,7 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
}
}
- if (codec_dai->driver->ops->startup) {
+ if (codec_dai->driver->ops && codec_dai->driver->ops->startup) {
ret = codec_dai->driver->ops->startup(substream, codec_dai);
if (ret < 0) {
dev_err(codec_dai->dev, "ASoC: can't open codec"
@@ -463,7 +463,7 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
}
}
- if (codec_dai->driver->ops->prepare) {
+ if (codec_dai->driver->ops && codec_dai->driver->ops->prepare) {
ret = codec_dai->driver->ops->prepare(substream, codec_dai);
if (ret < 0) {
dev_err(codec_dai->dev, "ASoC: DAI prepare error: %d\n",
@@ -472,7 +472,7 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
}
}
- if (cpu_dai->driver->ops->prepare) {
+ if (cpu_dai->driver->ops && cpu_dai->driver->ops->prepare) {
ret = cpu_dai->driver->ops->prepare(substream, cpu_dai);
if (ret < 0) {
dev_err(cpu_dai->dev, "ASoC: DAI prepare error: %d\n",
@@ -523,7 +523,7 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
}
}
- if (codec_dai->driver->ops->hw_params) {
+ if (codec_dai->driver->ops && codec_dai->driver->ops->hw_params) {
ret = codec_dai->driver->ops->hw_params(substream, params, codec_dai);
if (ret < 0) {
dev_err(codec_dai->dev, "ASoC: can't set %s hw params:"
@@ -532,7 +532,7 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
}
}
- if (cpu_dai->driver->ops->hw_params) {
+ if (cpu_dai->driver->ops && cpu_dai->driver->ops->hw_params) {
ret = cpu_dai->driver->ops->hw_params(substream, params, cpu_dai);
if (ret < 0) {
dev_err(cpu_dai->dev, "ASoC: %s hw params failed: %d\n",
@@ -559,11 +559,11 @@ out:
return ret;
platform_err:
- if (cpu_dai->driver->ops->hw_free)
+ if (cpu_dai->driver->ops && cpu_dai->driver->ops->hw_free)
cpu_dai->driver->ops->hw_free(substream, cpu_dai);
interface_err:
- if (codec_dai->driver->ops->hw_free)
+ if (codec_dai->driver->ops && codec_dai->driver->ops->hw_free)
codec_dai->driver->ops->hw_free(substream, codec_dai);
codec_err:
@@ -600,10 +600,10 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
platform->driver->ops->hw_free(substream);
/* now free hw params for the DAIs */
- if (codec_dai->driver->ops->hw_free)
+ if (codec_dai->driver->ops && codec_dai->driver->ops->hw_free)
codec_dai->driver->ops->hw_free(substream, codec_dai);
- if (cpu_dai->driver->ops->hw_free)
+ if (cpu_dai->driver->ops && cpu_dai->driver->ops->hw_free)
cpu_dai->driver->ops->hw_free(substream, cpu_dai);
mutex_unlock(&rtd->pcm_mutex);
@@ -618,7 +618,7 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
struct snd_soc_dai *codec_dai = rtd->codec_dai;
int ret;
- if (codec_dai->driver->ops->trigger) {
+ if (codec_dai->driver->ops && codec_dai->driver->ops->trigger) {
ret = codec_dai->driver->ops->trigger(substream, cmd, codec_dai);
if (ret < 0)
return ret;
@@ -630,7 +630,7 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return ret;
}
- if (cpu_dai->driver->ops->trigger) {
+ if (cpu_dai->driver->ops && cpu_dai->driver->ops->trigger) {
ret = cpu_dai->driver->ops->trigger(substream, cmd, cpu_dai);
if (ret < 0)
return ret;
@@ -647,19 +647,20 @@ static int soc_pcm_bespoke_trigger(struct snd_pcm_substream *substream,
struct snd_soc_dai *codec_dai = rtd->codec_dai;
int ret;
- if (codec_dai->driver->ops->bespoke_trigger) {
+ if (codec_dai->driver->ops &&
+ codec_dai->driver->ops->bespoke_trigger) {
ret = codec_dai->driver->ops->bespoke_trigger(substream, cmd, codec_dai);
if (ret < 0)
return ret;
}
- if (platform->driver->bespoke_trigger) {
+ if (platform->driver->ops && platform->driver->bespoke_trigger) {
ret = platform->driver->bespoke_trigger(substream, cmd);
if (ret < 0)
return ret;
}
- if (cpu_dai->driver->ops->bespoke_trigger) {
+ if (cpu_dai->driver->ops && cpu_dai->driver->ops->bespoke_trigger) {
ret = cpu_dai->driver->ops->bespoke_trigger(substream, cmd, cpu_dai);
if (ret < 0)
return ret;
@@ -684,10 +685,10 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
if (platform->driver->ops && platform->driver->ops->pointer)
offset = platform->driver->ops->pointer(substream);
- if (cpu_dai->driver->ops->delay)
+ if (cpu_dai->driver->ops && cpu_dai->driver->ops->delay)
delay += cpu_dai->driver->ops->delay(substream, cpu_dai);
- if (codec_dai->driver->ops->delay)
+ if (codec_dai->driver->ops && codec_dai->driver->ops->delay)
delay += codec_dai->driver->ops->delay(substream, codec_dai);
if (platform->driver->delay)
@@ -721,7 +722,7 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
list_add(&dpcm->list_be, &fe->dpcm[stream].be_clients);
list_add(&dpcm->list_fe, &be->dpcm[stream].fe_clients);
- dev_dbg(fe->dev, " connected new DPCM %s path %s %s %s\n",
+ dev_dbg(fe->dev, "connected new DPCM %s path %s %s %s\n",
stream ? "capture" : "playback", fe->dai_link->name,
stream ? "<-" : "->", be->dai_link->name);
@@ -749,7 +750,7 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe,
if (dpcm->fe == fe)
continue;
- dev_dbg(fe->dev, " reparent %s path %s %s %s\n",
+ dev_dbg(fe->dev, "reparent %s path %s %s %s\n",
stream ? "capture" : "playback",
dpcm->fe->dai_link->name,
stream ? "<-" : "->", dpcm->be->dai_link->name);
@@ -773,7 +774,7 @@ static void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
if (dpcm->state != SND_SOC_DPCM_LINK_STATE_FREE)
continue;
- dev_dbg(fe->dev, " freed DSP %s path %s %s %s\n",
+ dev_dbg(fe->dev, "freed DSP %s path %s %s %s\n",
stream ? "capture" : "playback", fe->dai_link->name,
stream ? "<-" : "->", dpcm->be->dai_link->name);
@@ -1037,6 +1038,12 @@ static int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream)
struct snd_pcm_substream *be_substream =
snd_soc_dpcm_get_substream(be, stream);
+ if (!be_substream) {
+ dev_err(be->dev, "ASoC: no backend %s stream\n",
+ stream ? "capture" : "playback");
+ continue;
+ }
+
/* is this op for this BE ? */
if (!snd_soc_dpcm_be_can_update(fe, be, stream))
continue;
@@ -1054,7 +1061,8 @@ static int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream)
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_CLOSE))
continue;
- dev_dbg(be->dev, "ASoC: open BE %s\n", be->dai_link->name);
+ dev_dbg(be->dev, "ASoC: open %s BE %s\n",
+ stream ? "capture" : "playback", be->dai_link->name);
be_substream->runtime = be->dpcm[stream].runtime;
err = soc_pcm_open(be_substream);
@@ -1673,7 +1681,7 @@ static int soc_pcm_ioctl(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_platform *platform = rtd->platform;
- if (platform->driver->ops->ioctl)
+ if (platform->driver->ops && platform->driver->ops->ioctl)
return platform->driver->ops->ioctl(substream, cmd, arg);
return snd_pcm_lib_ioctl(substream, cmd, arg);
}
@@ -1934,8 +1942,8 @@ int soc_dpcm_be_digital_mute(struct snd_soc_pcm_runtime *fe, int mute)
dev_dbg(be->dev, "ASoC: BE digital mute %s\n", be->dai_link->name);
- if (drv->ops->digital_mute && dai->playback_active)
- drv->ops->digital_mute(dai, mute);
+ if (drv->ops && drv->ops->digital_mute && dai->playback_active)
+ drv->ops->digital_mute(dai, mute);
}
return 0;
@@ -2116,7 +2124,7 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
pcm->private_free = platform->driver->pcm_free;
out:
- dev_info(rtd->card->dev, " %s <-> %s mapping ok\n", codec_dai->name,
+ dev_info(rtd->card->dev, "%s <-> %s mapping ok\n", codec_dai->name,
cpu_dai->name);
return ret;
}
@@ -2224,7 +2232,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_params);
int snd_soc_platform_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_platform *platform)
{
- if (platform->driver->ops->trigger)
+ if (platform->driver->ops && platform->driver->ops->trigger)
return platform->driver->ops->trigger(substream, cmd);
return 0;
}
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 29b211e9c060..5e633659c1b3 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -75,7 +75,11 @@ static const struct snd_pcm_hardware dummy_dma_hardware = {
static int dummy_dma_open(struct snd_pcm_substream *substream)
{
- snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware);
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+
+ /* BE's dont need dummy params */
+ if (!rtd->dai_link->no_pcm)
+ snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware);
return 0;
}
diff --git a/sound/soc/spear/spdif_in.c b/sound/soc/spear/spdif_in.c
index 63acfeb4b69d..21a8c954af1c 100644
--- a/sound/soc/spear/spdif_in.c
+++ b/sound/soc/spear/spdif_in.c
@@ -257,20 +257,12 @@ static int spdif_in_probe(struct platform_device *pdev)
return ret;
}
- return snd_soc_register_component(&pdev->dev, &spdif_in_component,
- &spdif_in_dai, 1);
-}
-
-static int spdif_in_remove(struct platform_device *pdev)
-{
- snd_soc_unregister_component(&pdev->dev);
-
- return 0;
+ return devm_snd_soc_register_component(&pdev->dev, &spdif_in_component,
+ &spdif_in_dai, 1);
}
static struct platform_driver spdif_in_driver = {
.probe = spdif_in_probe,
- .remove = spdif_in_remove,
.driver = {
.name = "spdif-in",
.owner = THIS_MODULE,
diff --git a/sound/soc/spear/spdif_out.c b/sound/soc/spear/spdif_out.c
index 2fdf68c98d22..b6ef6f78dc78 100644
--- a/sound/soc/spear/spdif_out.c
+++ b/sound/soc/spear/spdif_out.c
@@ -280,7 +280,6 @@ static int spdif_out_probe(struct platform_device *pdev)
struct spdif_out_dev *host;
struct spear_spdif_platform_data *pdata;
struct resource *res;
- int ret;
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
if (!host) {
@@ -307,16 +306,8 @@ static int spdif_out_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, host);
- ret = snd_soc_register_component(&pdev->dev, &spdif_out_component,
- &spdif_out_dai, 1);
- return ret;
-}
-
-static int spdif_out_remove(struct platform_device *pdev)
-{
- snd_soc_unregister_component(&pdev->dev);
-
- return 0;
+ return devm_snd_soc_register_component(&pdev->dev, &spdif_out_component,
+ &spdif_out_dai, 1);
}
#ifdef CONFIG_PM
@@ -357,7 +348,6 @@ static SIMPLE_DEV_PM_OPS(spdif_out_dev_pm_ops, spdif_out_suspend, \
static struct platform_driver spdif_out_driver = {
.probe = spdif_out_probe,
- .remove = spdif_out_remove,
.driver = {
.name = "spdif-out",
.owner = THIS_MODULE,
diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c
index 52af7f6fb37f..364bf6a907e1 100644
--- a/sound/soc/tegra/tegra20_i2s.c
+++ b/sound/soc/tegra/tegra20_i2s.c
@@ -297,7 +297,7 @@ static bool tegra20_i2s_wr_rd_reg(struct device *dev, unsigned int reg)
return true;
default:
return false;
- };
+ }
}
static bool tegra20_i2s_volatile_reg(struct device *dev, unsigned int reg)
@@ -310,7 +310,7 @@ static bool tegra20_i2s_volatile_reg(struct device *dev, unsigned int reg)
return true;
default:
return false;
- };
+ }
}
static bool tegra20_i2s_precious_reg(struct device *dev, unsigned int reg)
@@ -321,7 +321,7 @@ static bool tegra20_i2s_precious_reg(struct device *dev, unsigned int reg)
return true;
default:
return false;
- };
+ }
}
static const struct regmap_config tegra20_i2s_regmap_config = {
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
index 551b3c93ce93..08bc6931c7c7 100644
--- a/sound/soc/tegra/tegra20_spdif.c
+++ b/sound/soc/tegra/tegra20_spdif.c
@@ -213,7 +213,7 @@ static bool tegra20_spdif_wr_rd_reg(struct device *dev, unsigned int reg)
return true;
default:
return false;
- };
+ }
}
static bool tegra20_spdif_volatile_reg(struct device *dev, unsigned int reg)
@@ -234,7 +234,7 @@ static bool tegra20_spdif_volatile_reg(struct device *dev, unsigned int reg)
return true;
default:
return false;
- };
+ }
}
static bool tegra20_spdif_precious_reg(struct device *dev, unsigned int reg)
@@ -247,7 +247,7 @@ static bool tegra20_spdif_precious_reg(struct device *dev, unsigned int reg)
return true;
default:
return false;
- };
+ }
}
static const struct regmap_config tegra20_spdif_regmap_config = {
diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c
index d554d46d08b5..31154338c1eb 100644
--- a/sound/soc/tegra/tegra30_ahub.c
+++ b/sound/soc/tegra/tegra30_ahub.c
@@ -100,6 +100,7 @@ int tegra30_ahub_allocate_rx_fifo(enum tegra30_ahub_rxcif *rxcif,
{
int channel;
u32 reg, val;
+ struct tegra30_ahub_cif_conf cif_conf;
channel = find_first_zero_bit(ahub->rx_usage,
TEGRA30_AHUB_CHANNEL_CTRL_COUNT);
@@ -123,15 +124,21 @@ int tegra30_ahub_allocate_rx_fifo(enum tegra30_ahub_rxcif *rxcif,
TEGRA30_AHUB_CHANNEL_CTRL_RX_PACK_16;
tegra30_apbif_write(reg, val);
+ cif_conf.threshold = 0;
+ cif_conf.audio_channels = 2;
+ cif_conf.client_channels = 2;
+ cif_conf.audio_bits = TEGRA30_AUDIOCIF_BITS_16;
+ cif_conf.client_bits = TEGRA30_AUDIOCIF_BITS_16;
+ cif_conf.expand = 0;
+ cif_conf.stereo_conv = 0;
+ cif_conf.replicate = 0;
+ cif_conf.direction = TEGRA30_AUDIOCIF_DIRECTION_RX;
+ cif_conf.truncate = 0;
+ cif_conf.mono_conv = 0;
+
reg = TEGRA30_AHUB_CIF_RX_CTRL +
(channel * TEGRA30_AHUB_CIF_RX_CTRL_STRIDE);
- val = (0 << TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT) |
- (1 << TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT) |
- (1 << TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT) |
- TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_16 |
- TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_16 |
- TEGRA30_AUDIOCIF_CTRL_DIRECTION_RX;
- tegra30_apbif_write(reg, val);
+ ahub->soc_data->set_audio_cif(ahub->regmap_apbif, reg, &cif_conf);
return 0;
}
@@ -183,6 +190,7 @@ int tegra30_ahub_allocate_tx_fifo(enum tegra30_ahub_txcif *txcif,
{
int channel;
u32 reg, val;
+ struct tegra30_ahub_cif_conf cif_conf;
channel = find_first_zero_bit(ahub->tx_usage,
TEGRA30_AHUB_CHANNEL_CTRL_COUNT);
@@ -206,15 +214,21 @@ int tegra30_ahub_allocate_tx_fifo(enum tegra30_ahub_txcif *txcif,
TEGRA30_AHUB_CHANNEL_CTRL_TX_PACK_16;
tegra30_apbif_write(reg, val);
+ cif_conf.threshold = 0;
+ cif_conf.audio_channels = 2;
+ cif_conf.client_channels = 2;
+ cif_conf.audio_bits = TEGRA30_AUDIOCIF_BITS_16;
+ cif_conf.client_bits = TEGRA30_AUDIOCIF_BITS_16;
+ cif_conf.expand = 0;
+ cif_conf.stereo_conv = 0;
+ cif_conf.replicate = 0;
+ cif_conf.direction = TEGRA30_AUDIOCIF_DIRECTION_TX;
+ cif_conf.truncate = 0;
+ cif_conf.mono_conv = 0;
+
reg = TEGRA30_AHUB_CIF_TX_CTRL +
(channel * TEGRA30_AHUB_CIF_TX_CTRL_STRIDE);
- val = (0 << TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT) |
- (1 << TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT) |
- (1 << TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT) |
- TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_16 |
- TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_16 |
- TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX;
- tegra30_apbif_write(reg, val);
+ ahub->soc_data->set_audio_cif(ahub->regmap_apbif, reg, &cif_conf);
return 0;
}
@@ -346,7 +360,7 @@ static bool tegra30_ahub_apbif_wr_rd_reg(struct device *dev, unsigned int reg)
return true;
default:
break;
- };
+ }
if (REG_IN_ARRAY(reg, CHANNEL_CTRL) ||
REG_IN_ARRAY(reg, CHANNEL_CLEAR) ||
@@ -381,7 +395,7 @@ static bool tegra30_ahub_apbif_volatile_reg(struct device *dev,
return true;
default:
break;
- };
+ }
if (REG_IN_ARRAY(reg, CHANNEL_CLEAR) ||
REG_IN_ARRAY(reg, CHANNEL_STATUS) ||
@@ -437,13 +451,21 @@ static const struct regmap_config tegra30_ahub_ahub_regmap_config = {
static struct tegra30_ahub_soc_data soc_data_tegra30 = {
.clk_list_mask = CLK_LIST_MASK_TEGRA30,
+ .set_audio_cif = tegra30_ahub_set_cif,
};
static struct tegra30_ahub_soc_data soc_data_tegra114 = {
.clk_list_mask = CLK_LIST_MASK_TEGRA114,
+ .set_audio_cif = tegra30_ahub_set_cif,
+};
+
+static struct tegra30_ahub_soc_data soc_data_tegra124 = {
+ .clk_list_mask = CLK_LIST_MASK_TEGRA114,
+ .set_audio_cif = tegra124_ahub_set_cif,
};
static const struct of_device_id tegra30_ahub_of_match[] = {
+ { .compatible = "nvidia,tegra124-ahub", .data = &soc_data_tegra124 },
{ .compatible = "nvidia,tegra114-ahub", .data = &soc_data_tegra114 },
{ .compatible = "nvidia,tegra30-ahub", .data = &soc_data_tegra30 },
{},
@@ -497,6 +519,7 @@ static int tegra30_ahub_probe(struct platform_device *pdev)
}
dev_set_drvdata(&pdev->dev, ahub);
+ ahub->soc_data = soc_data;
ahub->dev = &pdev->dev;
ahub->clk_d_audio = clk_get(&pdev->dev, "d_audio");
@@ -669,6 +692,70 @@ static struct platform_driver tegra30_ahub_driver = {
};
module_platform_driver(tegra30_ahub_driver);
+void tegra30_ahub_set_cif(struct regmap *regmap, unsigned int reg,
+ struct tegra30_ahub_cif_conf *conf)
+{
+ unsigned int value;
+
+ value = (conf->threshold <<
+ TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT) |
+ ((conf->audio_channels - 1) <<
+ TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT) |
+ ((conf->client_channels - 1) <<
+ TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT) |
+ (conf->audio_bits <<
+ TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_SHIFT) |
+ (conf->client_bits <<
+ TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_SHIFT) |
+ (conf->expand <<
+ TEGRA30_AUDIOCIF_CTRL_EXPAND_SHIFT) |
+ (conf->stereo_conv <<
+ TEGRA30_AUDIOCIF_CTRL_STEREO_CONV_SHIFT) |
+ (conf->replicate <<
+ TEGRA30_AUDIOCIF_CTRL_REPLICATE_SHIFT) |
+ (conf->direction <<
+ TEGRA30_AUDIOCIF_CTRL_DIRECTION_SHIFT) |
+ (conf->truncate <<
+ TEGRA30_AUDIOCIF_CTRL_TRUNCATE_SHIFT) |
+ (conf->mono_conv <<
+ TEGRA30_AUDIOCIF_CTRL_MONO_CONV_SHIFT);
+
+ regmap_write(regmap, reg, value);
+}
+EXPORT_SYMBOL_GPL(tegra30_ahub_set_cif);
+
+void tegra124_ahub_set_cif(struct regmap *regmap, unsigned int reg,
+ struct tegra30_ahub_cif_conf *conf)
+{
+ unsigned int value;
+
+ value = (conf->threshold <<
+ TEGRA124_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT) |
+ ((conf->audio_channels - 1) <<
+ TEGRA124_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT) |
+ ((conf->client_channels - 1) <<
+ TEGRA124_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT) |
+ (conf->audio_bits <<
+ TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_SHIFT) |
+ (conf->client_bits <<
+ TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_SHIFT) |
+ (conf->expand <<
+ TEGRA30_AUDIOCIF_CTRL_EXPAND_SHIFT) |
+ (conf->stereo_conv <<
+ TEGRA30_AUDIOCIF_CTRL_STEREO_CONV_SHIFT) |
+ (conf->replicate <<
+ TEGRA30_AUDIOCIF_CTRL_REPLICATE_SHIFT) |
+ (conf->direction <<
+ TEGRA30_AUDIOCIF_CTRL_DIRECTION_SHIFT) |
+ (conf->truncate <<
+ TEGRA30_AUDIOCIF_CTRL_TRUNCATE_SHIFT) |
+ (conf->mono_conv <<
+ TEGRA30_AUDIOCIF_CTRL_MONO_CONV_SHIFT);
+
+ regmap_write(regmap, reg, value);
+}
+EXPORT_SYMBOL_GPL(tegra124_ahub_set_cif);
+
MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
MODULE_DESCRIPTION("Tegra30 AHUB driver");
MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/tegra/tegra30_ahub.h b/sound/soc/tegra/tegra30_ahub.h
index 09766cdc45ca..d67321d90faa 100644
--- a/sound/soc/tegra/tegra30_ahub.h
+++ b/sound/soc/tegra/tegra30_ahub.h
@@ -25,16 +25,30 @@
#define TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_MASK_US 0xf
#define TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_MASK (TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_MASK_US << TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT)
+#define TEGRA124_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT 24
+#define TEGRA124_AUDIOCIF_CTRL_FIFO_THRESHOLD_MASK_US 0x3f
+#define TEGRA124_AUDIOCIF_CTRL_FIFO_THRESHOLD_MASK (TEGRA124_AUDIOCIF_CTRL_FIFO_THRESHOLD_MASK_US << TEGRA124_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT)
+
/* Channel count minus 1 */
#define TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT 24
#define TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_MASK_US 7
#define TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_MASK (TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_MASK_US << TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT)
/* Channel count minus 1 */
+#define TEGRA124_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT 20
+#define TEGRA124_AUDIOCIF_CTRL_AUDIO_CHANNELS_MASK_US 0xf
+#define TEGRA124_AUDIOCIF_CTRL_AUDIO_CHANNELS_MASK (TEGRA124_AUDIOCIF_CTRL_AUDIO_CHANNELS_MASK_US << TEGRA124_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT)
+
+/* Channel count minus 1 */
#define TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT 16
#define TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_MASK_US 7
#define TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_MASK (TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_MASK_US << TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT)
+/* Channel count minus 1 */
+#define TEGRA124_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT 16
+#define TEGRA124_AUDIOCIF_CTRL_CLIENT_CHANNELS_MASK_US 0xf
+#define TEGRA124_AUDIOCIF_CTRL_CLIENT_CHANNELS_MASK (TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_MASK_US << TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT)
+
#define TEGRA30_AUDIOCIF_BITS_4 0
#define TEGRA30_AUDIOCIF_BITS_8 1
#define TEGRA30_AUDIOCIF_BITS_12 2
@@ -86,7 +100,7 @@
#define TEGRA30_AUDIOCIF_CTRL_STEREO_CONV_CH1 (TEGRA30_AUDIOCIF_STEREO_CONV_CH1 << TEGRA30_AUDIOCIF_CTRL_STEREO_CONV_SHIFT)
#define TEGRA30_AUDIOCIF_CTRL_STEREO_CONV_AVG (TEGRA30_AUDIOCIF_STEREO_CONV_AVG << TEGRA30_AUDIOCIF_CTRL_STEREO_CONV_SHIFT)
-#define TEGRA30_AUDIOCIF_CTRL_REPLICATE 3
+#define TEGRA30_AUDIOCIF_CTRL_REPLICATE_SHIFT 3
#define TEGRA30_AUDIOCIF_DIRECTION_TX 0
#define TEGRA30_AUDIOCIF_DIRECTION_RX 1
@@ -468,8 +482,30 @@ extern int tegra30_ahub_set_rx_cif_source(enum tegra30_ahub_rxcif rxcif,
enum tegra30_ahub_txcif txcif);
extern int tegra30_ahub_unset_rx_cif_source(enum tegra30_ahub_rxcif rxcif);
+struct tegra30_ahub_cif_conf {
+ unsigned int threshold;
+ unsigned int audio_channels;
+ unsigned int client_channels;
+ unsigned int audio_bits;
+ unsigned int client_bits;
+ unsigned int expand;
+ unsigned int stereo_conv;
+ unsigned int replicate;
+ unsigned int direction;
+ unsigned int truncate;
+ unsigned int mono_conv;
+};
+
+void tegra30_ahub_set_cif(struct regmap *regmap, unsigned int reg,
+ struct tegra30_ahub_cif_conf *conf);
+void tegra124_ahub_set_cif(struct regmap *regmap, unsigned int reg,
+ struct tegra30_ahub_cif_conf *conf);
+
struct tegra30_ahub_soc_data {
u32 clk_list_mask;
+ void (*set_audio_cif)(struct regmap *regmap,
+ unsigned int reg,
+ struct tegra30_ahub_cif_conf *conf);
/*
* FIXME: There are many more differences in HW, such as:
* - More APBIF channels.
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index 47565fd04505..231a785b3921 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -30,6 +30,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -179,6 +180,7 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
unsigned int mask, val, reg;
int ret, sample_size, srate, i2sclock, bitcnt;
+ struct tegra30_ahub_cif_conf cif_conf;
if (params_channels(params) != 2)
return -EINVAL;
@@ -217,21 +219,26 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
regmap_write(i2s->regmap, TEGRA30_I2S_TIMING, val);
- val = (0 << TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT) |
- (1 << TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT) |
- (1 << TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT) |
- TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_16 |
- TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_16;
+ cif_conf.threshold = 0;
+ cif_conf.audio_channels = 2;
+ cif_conf.client_channels = 2;
+ cif_conf.audio_bits = TEGRA30_AUDIOCIF_BITS_16;
+ cif_conf.client_bits = TEGRA30_AUDIOCIF_BITS_16;
+ cif_conf.expand = 0;
+ cif_conf.stereo_conv = 0;
+ cif_conf.replicate = 0;
+ cif_conf.truncate = 0;
+ cif_conf.mono_conv = 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_RX;
+ cif_conf.direction = TEGRA30_AUDIOCIF_DIRECTION_RX;
reg = TEGRA30_I2S_CIF_RX_CTRL;
} else {
- val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX;
+ cif_conf.direction = TEGRA30_AUDIOCIF_DIRECTION_TX;
reg = TEGRA30_I2S_CIF_TX_CTRL;
}
- regmap_write(i2s->regmap, reg, val);
+ i2s->soc_data->set_audio_cif(i2s->regmap, reg, &cif_conf);
val = (1 << TEGRA30_I2S_OFFSET_RX_DATA_OFFSET_SHIFT) |
(1 << TEGRA30_I2S_OFFSET_TX_DATA_OFFSET_SHIFT);
@@ -369,7 +376,7 @@ static bool tegra30_i2s_wr_rd_reg(struct device *dev, unsigned int reg)
return true;
default:
return false;
- };
+ }
}
static bool tegra30_i2s_volatile_reg(struct device *dev, unsigned int reg)
@@ -382,7 +389,7 @@ static bool tegra30_i2s_volatile_reg(struct device *dev, unsigned int reg)
return true;
default:
return false;
- };
+ }
}
static const struct regmap_config tegra30_i2s_regmap_config = {
@@ -396,9 +403,24 @@ static const struct regmap_config tegra30_i2s_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
+static const struct tegra30_i2s_soc_data tegra30_i2s_config = {
+ .set_audio_cif = tegra30_ahub_set_cif,
+};
+
+static const struct tegra30_i2s_soc_data tegra124_i2s_config = {
+ .set_audio_cif = tegra124_ahub_set_cif,
+};
+
+static const struct of_device_id tegra30_i2s_of_match[] = {
+ { .compatible = "nvidia,tegra124-i2s", .data = &tegra124_i2s_config },
+ { .compatible = "nvidia,tegra30-i2s", .data = &tegra30_i2s_config },
+ {},
+};
+
static int tegra30_i2s_platform_probe(struct platform_device *pdev)
{
struct tegra30_i2s *i2s;
+ const struct of_device_id *match;
u32 cif_ids[2];
struct resource *mem, *memregion;
void __iomem *regs;
@@ -412,6 +434,14 @@ static int tegra30_i2s_platform_probe(struct platform_device *pdev)
}
dev_set_drvdata(&pdev->dev, i2s);
+ match = of_match_device(tegra30_i2s_of_match, &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ i2s->soc_data = (struct tegra30_i2s_soc_data *)match->data;
+
i2s->dai = tegra30_i2s_dai_template;
i2s->dai.name = dev_name(&pdev->dev);
@@ -539,11 +569,6 @@ static int tegra30_i2s_resume(struct device *dev)
}
#endif
-static const struct of_device_id tegra30_i2s_of_match[] = {
- { .compatible = "nvidia,tegra30-i2s", },
- {},
-};
-
static const struct dev_pm_ops tegra30_i2s_pm_ops = {
SET_RUNTIME_PM_OPS(tegra30_i2s_runtime_suspend,
tegra30_i2s_runtime_resume, NULL)
diff --git a/sound/soc/tegra/tegra30_i2s.h b/sound/soc/tegra/tegra30_i2s.h
index bea23afe3b9f..4d0b0a30dbfb 100644
--- a/sound/soc/tegra/tegra30_i2s.h
+++ b/sound/soc/tegra/tegra30_i2s.h
@@ -225,7 +225,14 @@
#define TEGRA30_I2S_LCOEF_COEF_MASK_US 0xffff
#define TEGRA30_I2S_LCOEF_COEF_MASK (TEGRA30_I2S_LCOEF_COEF_MASK_US << TEGRA30_I2S_LCOEF_COEF_SHIFT)
+struct tegra30_i2s_soc_data {
+ void (*set_audio_cif)(struct regmap *regmap,
+ unsigned int reg,
+ struct tegra30_ahub_cif_conf *conf);
+};
+
struct tegra30_i2s {
+ const struct tegra30_i2s_soc_data *soc_data;
struct snd_soc_dai_driver dai;
int cif_id;
struct clk *clk_i2s;
diff --git a/sound/soc/tegra/tegra_asoc_utils.c b/sound/soc/tegra/tegra_asoc_utils.c
index d173880f290d..1be311c51a18 100644
--- a/sound/soc/tegra/tegra_asoc_utils.c
+++ b/sound/soc/tegra/tegra_asoc_utils.c
@@ -182,6 +182,8 @@ int tegra_asoc_utils_init(struct tegra_asoc_utils_data *data,
data->soc = TEGRA_ASOC_UTILS_SOC_TEGRA30;
else if (of_machine_is_compatible("nvidia,tegra114"))
data->soc = TEGRA_ASOC_UTILS_SOC_TEGRA114;
+ else if (of_machine_is_compatible("nvidia,tegra124"))
+ data->soc = TEGRA_ASOC_UTILS_SOC_TEGRA124;
else {
dev_err(data->dev, "SoC unknown to Tegra ASoC utils\n");
return -EINVAL;
diff --git a/sound/soc/tegra/tegra_asoc_utils.h b/sound/soc/tegra/tegra_asoc_utils.h
index 19fdcafed32f..9577121ce971 100644
--- a/sound/soc/tegra/tegra_asoc_utils.h
+++ b/sound/soc/tegra/tegra_asoc_utils.h
@@ -30,6 +30,7 @@ enum tegra_asoc_utils_soc {
TEGRA_ASOC_UTILS_SOC_TEGRA20,
TEGRA_ASOC_UTILS_SOC_TEGRA30,
TEGRA_ASOC_UTILS_SOC_TEGRA114,
+ TEGRA_ASOC_UTILS_SOC_TEGRA124,
};
struct tegra_asoc_utils_data {
diff --git a/sound/soc/tegra/tegra_pcm.c b/sound/soc/tegra/tegra_pcm.c
index f056f632557c..7b2d23ba69b3 100644
--- a/sound/soc/tegra/tegra_pcm.c
+++ b/sound/soc/tegra/tegra_pcm.c
@@ -56,7 +56,6 @@ static const struct snd_pcm_hardware tegra_pcm_hardware = {
static const struct snd_dmaengine_pcm_config tegra_dmaengine_pcm_config = {
.pcm_hardware = &tegra_pcm_hardware,
.prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
- .compat_filter_fn = NULL,
.prealloc_buffer_size = PAGE_SIZE * 8,
};
diff --git a/sound/usb/6fire/chip.c b/sound/usb/6fire/chip.c
index c39c77978468..66edc4a7917f 100644
--- a/sound/usb/6fire/chip.c
+++ b/sound/usb/6fire/chip.c
@@ -101,7 +101,7 @@ static int usb6fire_chip_probe(struct usb_interface *intf,
usb_set_intfdata(intf, chips[i]);
mutex_unlock(&register_mutex);
return 0;
- } else if (regidx < 0)
+ } else if (!devices[i] && regidx < 0)
regidx = i;
}
if (regidx < 0) {
diff --git a/sound/usb/caiaq/control.c b/sound/usb/caiaq/control.c
index ae6b50f9ed56..f65fc0987cfb 100644
--- a/sound/usb/caiaq/control.c
+++ b/sound/usb/caiaq/control.c
@@ -28,6 +28,7 @@
#include "control.h"
#define CNT_INTVAL 0x10000
+#define MASCHINE_BANK_SIZE 32
static int control_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
@@ -105,6 +106,10 @@ static int control_put(struct snd_kcontrol *kcontrol,
USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1))
cmd = EP1_CMD_DIMM_LEDS;
+ if (cdev->chip.usb_id ==
+ USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER))
+ cmd = EP1_CMD_DIMM_LEDS;
+
if (pos & CNT_INTVAL) {
int i = pos & ~CNT_INTVAL;
@@ -121,6 +126,20 @@ static int control_put(struct snd_kcontrol *kcontrol,
usb_sndbulkpipe(cdev->chip.dev, 8),
cdev->ep8_out_buf, sizeof(cdev->ep8_out_buf),
&actual_len, 200);
+ } else if (cdev->chip.usb_id ==
+ USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER)) {
+
+ int bank = 0;
+ int offset = 0;
+
+ if (i >= MASCHINE_BANK_SIZE) {
+ bank = 0x1e;
+ offset = MASCHINE_BANK_SIZE;
+ }
+
+ snd_usb_caiaq_send_command_bank(cdev, cmd, bank,
+ cdev->control_state + offset,
+ MASCHINE_BANK_SIZE);
} else {
snd_usb_caiaq_send_command(cdev, cmd,
cdev->control_state, sizeof(cdev->control_state));
@@ -490,6 +509,74 @@ static struct caiaq_controller kontrols4_controller[] = {
{ "LED: FX2: Mode", 133 | CNT_INTVAL },
};
+static struct caiaq_controller maschine_controller[] = {
+ { "LED: Pad 1", 3 | CNT_INTVAL },
+ { "LED: Pad 2", 2 | CNT_INTVAL },
+ { "LED: Pad 3", 1 | CNT_INTVAL },
+ { "LED: Pad 4", 0 | CNT_INTVAL },
+ { "LED: Pad 5", 7 | CNT_INTVAL },
+ { "LED: Pad 6", 6 | CNT_INTVAL },
+ { "LED: Pad 7", 5 | CNT_INTVAL },
+ { "LED: Pad 8", 4 | CNT_INTVAL },
+ { "LED: Pad 9", 11 | CNT_INTVAL },
+ { "LED: Pad 10", 10 | CNT_INTVAL },
+ { "LED: Pad 11", 9 | CNT_INTVAL },
+ { "LED: Pad 12", 8 | CNT_INTVAL },
+ { "LED: Pad 13", 15 | CNT_INTVAL },
+ { "LED: Pad 14", 14 | CNT_INTVAL },
+ { "LED: Pad 15", 13 | CNT_INTVAL },
+ { "LED: Pad 16", 12 | CNT_INTVAL },
+
+ { "LED: Mute", 16 | CNT_INTVAL },
+ { "LED: Solo", 17 | CNT_INTVAL },
+ { "LED: Select", 18 | CNT_INTVAL },
+ { "LED: Duplicate", 19 | CNT_INTVAL },
+ { "LED: Navigate", 20 | CNT_INTVAL },
+ { "LED: Pad Mode", 21 | CNT_INTVAL },
+ { "LED: Pattern", 22 | CNT_INTVAL },
+ { "LED: Scene", 23 | CNT_INTVAL },
+
+ { "LED: Shift", 24 | CNT_INTVAL },
+ { "LED: Erase", 25 | CNT_INTVAL },
+ { "LED: Grid", 26 | CNT_INTVAL },
+ { "LED: Right Bottom", 27 | CNT_INTVAL },
+ { "LED: Rec", 28 | CNT_INTVAL },
+ { "LED: Play", 29 | CNT_INTVAL },
+ { "LED: Left Bottom", 32 | CNT_INTVAL },
+ { "LED: Restart", 33 | CNT_INTVAL },
+
+ { "LED: Group A", 41 | CNT_INTVAL },
+ { "LED: Group B", 40 | CNT_INTVAL },
+ { "LED: Group C", 37 | CNT_INTVAL },
+ { "LED: Group D", 36 | CNT_INTVAL },
+ { "LED: Group E", 39 | CNT_INTVAL },
+ { "LED: Group F", 38 | CNT_INTVAL },
+ { "LED: Group G", 35 | CNT_INTVAL },
+ { "LED: Group H", 34 | CNT_INTVAL },
+
+ { "LED: Auto Write", 42 | CNT_INTVAL },
+ { "LED: Snap", 43 | CNT_INTVAL },
+ { "LED: Right Top", 44 | CNT_INTVAL },
+ { "LED: Left Top", 45 | CNT_INTVAL },
+ { "LED: Sampling", 46 | CNT_INTVAL },
+ { "LED: Browse", 47 | CNT_INTVAL },
+ { "LED: Step", 48 | CNT_INTVAL },
+ { "LED: Control", 49 | CNT_INTVAL },
+
+ { "LED: Top Button 1", 57 | CNT_INTVAL },
+ { "LED: Top Button 2", 56 | CNT_INTVAL },
+ { "LED: Top Button 3", 55 | CNT_INTVAL },
+ { "LED: Top Button 4", 54 | CNT_INTVAL },
+ { "LED: Top Button 5", 53 | CNT_INTVAL },
+ { "LED: Top Button 6", 52 | CNT_INTVAL },
+ { "LED: Top Button 7", 51 | CNT_INTVAL },
+ { "LED: Top Button 8", 50 | CNT_INTVAL },
+
+ { "LED: Note Repeat", 58 | CNT_INTVAL },
+
+ { "Backlight Display", 59 | CNT_INTVAL }
+};
+
static int add_controls(struct caiaq_controller *c, int num,
struct snd_usb_caiaqdev *cdev)
{
@@ -553,6 +640,11 @@ int snd_usb_caiaq_control_init(struct snd_usb_caiaqdev *cdev)
ret = add_controls(kontrols4_controller,
ARRAY_SIZE(kontrols4_controller), cdev);
break;
+
+ case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER):
+ ret = add_controls(maschine_controller,
+ ARRAY_SIZE(maschine_controller), cdev);
+ break;
}
return ret;
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
index 1a61dd12fe38..bc55f708a696 100644
--- a/sound/usb/caiaq/device.c
+++ b/sound/usb/caiaq/device.c
@@ -235,6 +235,31 @@ int snd_usb_caiaq_send_command(struct snd_usb_caiaqdev *cdev,
cdev->ep1_out_buf, len+1, &actual_len, 200);
}
+int snd_usb_caiaq_send_command_bank(struct snd_usb_caiaqdev *cdev,
+ unsigned char command,
+ unsigned char bank,
+ const unsigned char *buffer,
+ int len)
+{
+ int actual_len;
+ struct usb_device *usb_dev = cdev->chip.dev;
+
+ if (!usb_dev)
+ return -EIO;
+
+ if (len > EP1_BUFSIZE - 2)
+ len = EP1_BUFSIZE - 2;
+
+ if (buffer && len > 0)
+ memcpy(cdev->ep1_out_buf+2, buffer, len);
+
+ cdev->ep1_out_buf[0] = command;
+ cdev->ep1_out_buf[1] = bank;
+
+ return usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, 1),
+ cdev->ep1_out_buf, len+2, &actual_len, 200);
+}
+
int snd_usb_caiaq_set_audio_params (struct snd_usb_caiaqdev *cdev,
int rate, int depth, int bpp)
{
diff --git a/sound/usb/caiaq/device.h b/sound/usb/caiaq/device.h
index ad102fac6942..ab0f7520a99b 100644
--- a/sound/usb/caiaq/device.h
+++ b/sound/usb/caiaq/device.h
@@ -128,5 +128,10 @@ int snd_usb_caiaq_send_command(struct snd_usb_caiaqdev *cdev,
unsigned char command,
const unsigned char *buffer,
int len);
+int snd_usb_caiaq_send_command_bank(struct snd_usb_caiaqdev *cdev,
+ unsigned char command,
+ unsigned char bank,
+ const unsigned char *buffer,
+ int len);
#endif /* CAIAQ_DEVICE_H */
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 64952e2d3ed1..d979050e6a6a 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -79,7 +79,6 @@ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;/* Enable this card *
/* Vendor/product IDs for this card */
static int vid[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = -1 };
static int pid[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = -1 };
-static int nrpacks = 8; /* max. number of packets per urb */
static int device_setup[SNDRV_CARDS]; /* device parameter for this card */
static bool ignore_ctl_error;
static bool autoclock = true;
@@ -94,8 +93,6 @@ module_param_array(vid, int, NULL, 0444);
MODULE_PARM_DESC(vid, "Vendor ID for the USB audio device.");
module_param_array(pid, int, NULL, 0444);
MODULE_PARM_DESC(pid, "Product ID for the USB audio device.");
-module_param(nrpacks, int, 0644);
-MODULE_PARM_DESC(nrpacks, "Max. number of packets per URB.");
module_param_array(device_setup, int, NULL, 0444);
MODULE_PARM_DESC(device_setup, "Specific device setup (if needed).");
module_param(ignore_ctl_error, bool, 0444);
@@ -349,6 +346,7 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx,
case USB_SPEED_LOW:
case USB_SPEED_FULL:
case USB_SPEED_HIGH:
+ case USB_SPEED_WIRELESS:
case USB_SPEED_SUPER:
break;
default:
@@ -374,7 +372,6 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx,
chip->dev = dev;
chip->card = card;
chip->setup = device_setup[idx];
- chip->nrpacks = nrpacks;
chip->autoclock = autoclock;
chip->probing = 1;
@@ -754,19 +751,4 @@ static struct usb_driver usb_audio_driver = {
.supports_autosuspend = 1,
};
-static int __init snd_usb_audio_init(void)
-{
- if (nrpacks < 1 || nrpacks > MAX_PACKS) {
- printk(KERN_WARNING "invalid nrpacks value.\n");
- return -EINVAL;
- }
- return usb_register(&usb_audio_driver);
-}
-
-static void __exit snd_usb_audio_cleanup(void)
-{
- usb_deregister(&usb_audio_driver);
-}
-
-module_init(snd_usb_audio_init);
-module_exit(snd_usb_audio_cleanup);
+module_usb_driver(usb_audio_driver);
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 5ecacaa90b53..9867ab866857 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -2,11 +2,11 @@
#define __USBAUDIO_CARD_H
#define MAX_NR_RATES 1024
-#define MAX_PACKS 20
+#define MAX_PACKS 6 /* per URB */
#define MAX_PACKS_HS (MAX_PACKS * 8) /* in high speed mode */
-#define MAX_URBS 8
+#define MAX_URBS 12
#define SYNC_URBS 4 /* always four urbs for sync */
-#define MAX_QUEUE 24 /* try not to exceed this queue length, in ms */
+#define MAX_QUEUE 18 /* try not to exceed this queue length, in ms */
struct audioformat {
struct list_head list;
@@ -87,6 +87,7 @@ struct snd_usb_endpoint {
unsigned int phase; /* phase accumulator */
unsigned int maxpacksize; /* max packet size in bytes */
unsigned int maxframesize; /* max packet size in frames */
+ unsigned int max_urb_frames; /* max URB size in frames */
unsigned int curpacksize; /* current packet size in bytes (for capture) */
unsigned int curframesize; /* current packet size in frames (for capture) */
unsigned int syncmaxsize; /* sync endpoint packet size */
@@ -95,7 +96,7 @@ struct snd_usb_endpoint {
unsigned int syncinterval; /* P for adaptive mode, 0 otherwise */
unsigned char silence_value;
unsigned int stride;
- int iface, alt_idx;
+ int iface, altsetting;
int skip_packets; /* quirks for devices to ignore the first n packets
in a stream */
@@ -116,6 +117,8 @@ struct snd_usb_substream {
unsigned int channels_max; /* max channels in the all audiofmts */
unsigned int cur_rate; /* current rate (for hw_params callback) */
unsigned int period_bytes; /* current period bytes (for hw_params callback) */
+ unsigned int period_frames; /* current frames per period */
+ unsigned int buffer_periods; /* current periods per buffer */
unsigned int altset_idx; /* USB data format: index of alternate setting */
unsigned int txfr_quirk:1; /* allow sub-frame alignment */
unsigned int fmt_type; /* USB audio format type (1-3) */
@@ -125,6 +128,7 @@ struct snd_usb_substream {
unsigned int hwptr_done; /* processed byte position in the buffer */
unsigned int transfer_done; /* processed frames since last period update */
+ unsigned int frame_limit; /* limits number of packets in URB */
/* data and sync endpoints for this stream */
unsigned int ep_num; /* the endpoint number */
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 93e970f2b3c0..b9ba0fcc45df 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -33,7 +33,6 @@
#include "pcm.h"
#include "quirks.h"
-#define EP_FLAG_ACTIVATED 0
#define EP_FLAG_RUNNING 1
#define EP_FLAG_STOPPING 2
@@ -426,9 +425,9 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
list_for_each_entry(ep, &chip->ep_list, list) {
if (ep->ep_num == ep_num &&
ep->iface == alts->desc.bInterfaceNumber &&
- ep->alt_idx == alts->desc.bAlternateSetting) {
+ ep->altsetting == alts->desc.bAlternateSetting) {
snd_printdd(KERN_DEBUG "Re-using EP %x in iface %d,%d @%p\n",
- ep_num, ep->iface, ep->alt_idx, ep);
+ ep_num, ep->iface, ep->altsetting, ep);
goto __exit_unlock;
}
}
@@ -447,7 +446,7 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
ep->type = type;
ep->ep_num = ep_num;
ep->iface = alts->desc.bInterfaceNumber;
- ep->alt_idx = alts->desc.bAlternateSetting;
+ ep->altsetting = alts->desc.bAlternateSetting;
INIT_LIST_HEAD(&ep->ready_playback_urbs);
ep_num &= USB_ENDPOINT_NUMBER_MASK;
@@ -574,11 +573,14 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep,
snd_pcm_format_t pcm_format,
unsigned int channels,
unsigned int period_bytes,
+ unsigned int frames_per_period,
+ unsigned int periods_per_buffer,
struct audioformat *fmt,
struct snd_usb_endpoint *sync_ep)
{
- unsigned int maxsize, i, urb_packs, total_packs, packs_per_ms;
- int is_playback = usb_pipeout(ep->pipe);
+ unsigned int maxsize, minsize, packs_per_ms, max_packs_per_urb;
+ unsigned int max_packs_per_period, urbs_per_period, urb_packs;
+ unsigned int max_urbs, i;
int frame_bits = snd_pcm_format_physical_width(pcm_format) * channels;
if (pcm_format == SNDRV_PCM_FORMAT_DSD_U16_LE && fmt->dsd_dop) {
@@ -611,58 +613,67 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep,
else
ep->curpacksize = maxsize;
- if (snd_usb_get_speed(ep->chip->dev) != USB_SPEED_FULL)
+ if (snd_usb_get_speed(ep->chip->dev) != USB_SPEED_FULL) {
packs_per_ms = 8 >> ep->datainterval;
- else
- packs_per_ms = 1;
-
- if (is_playback && !snd_usb_endpoint_implicit_feedback_sink(ep)) {
- urb_packs = max(ep->chip->nrpacks, 1);
- urb_packs = min(urb_packs, (unsigned int) MAX_PACKS);
+ max_packs_per_urb = MAX_PACKS_HS;
} else {
- urb_packs = 1;
+ packs_per_ms = 1;
+ max_packs_per_urb = MAX_PACKS;
}
+ if (sync_ep && !snd_usb_endpoint_implicit_feedback_sink(ep))
+ max_packs_per_urb = min(max_packs_per_urb,
+ 1U << sync_ep->syncinterval);
+ max_packs_per_urb = max(1u, max_packs_per_urb >> ep->datainterval);
- urb_packs *= packs_per_ms;
+ /*
+ * Capture endpoints need to use small URBs because there's no way
+ * to tell in advance where the next period will end, and we don't
+ * want the next URB to complete much after the period ends.
+ *
+ * Playback endpoints with implicit sync much use the same parameters
+ * as their corresponding capture endpoint.
+ */
+ if (usb_pipein(ep->pipe) ||
+ snd_usb_endpoint_implicit_feedback_sink(ep)) {
- if (sync_ep && !snd_usb_endpoint_implicit_feedback_sink(ep))
- urb_packs = min(urb_packs, 1U << sync_ep->syncinterval);
+ /* make capture URBs <= 1 ms and smaller than a period */
+ urb_packs = min(max_packs_per_urb, packs_per_ms);
+ while (urb_packs > 1 && urb_packs * maxsize >= period_bytes)
+ urb_packs >>= 1;
+ ep->nurbs = MAX_URBS;
- /* decide how many packets to be used */
- if (is_playback && !snd_usb_endpoint_implicit_feedback_sink(ep)) {
- unsigned int minsize, maxpacks;
+ /*
+ * Playback endpoints without implicit sync are adjusted so that
+ * a period fits as evenly as possible in the smallest number of
+ * URBs. The total number of URBs is adjusted to the size of the
+ * ALSA buffer, subject to the MAX_URBS and MAX_QUEUE limits.
+ */
+ } else {
/* determine how small a packet can be */
- minsize = (ep->freqn >> (16 - ep->datainterval))
- * (frame_bits >> 3);
+ minsize = (ep->freqn >> (16 - ep->datainterval)) *
+ (frame_bits >> 3);
/* with sync from device, assume it can be 12% lower */
if (sync_ep)
minsize -= minsize >> 3;
minsize = max(minsize, 1u);
- total_packs = (period_bytes + minsize - 1) / minsize;
- /* we need at least two URBs for queueing */
- if (total_packs < 2) {
- total_packs = 2;
- } else {
- /* and we don't want too long a queue either */
- maxpacks = max(MAX_QUEUE * packs_per_ms, urb_packs * 2);
- total_packs = min(total_packs, maxpacks);
- }
- } else {
- while (urb_packs > 1 && urb_packs * maxsize >= period_bytes)
- urb_packs >>= 1;
- total_packs = MAX_URBS * urb_packs;
- }
- ep->nurbs = (total_packs + urb_packs - 1) / urb_packs;
- if (ep->nurbs > MAX_URBS) {
- /* too much... */
- ep->nurbs = MAX_URBS;
- total_packs = MAX_URBS * urb_packs;
- } else if (ep->nurbs < 2) {
- /* too little - we need at least two packets
- * to ensure contiguous playback/capture
- */
- ep->nurbs = 2;
+ /* how many packets will contain an entire ALSA period? */
+ max_packs_per_period = DIV_ROUND_UP(period_bytes, minsize);
+
+ /* how many URBs will contain a period? */
+ urbs_per_period = DIV_ROUND_UP(max_packs_per_period,
+ max_packs_per_urb);
+ /* how many packets are needed in each URB? */
+ urb_packs = DIV_ROUND_UP(max_packs_per_period, urbs_per_period);
+
+ /* limit the number of frames in a single URB */
+ ep->max_urb_frames = DIV_ROUND_UP(frames_per_period,
+ urbs_per_period);
+
+ /* try to use enough URBs to contain an entire ALSA buffer */
+ max_urbs = min((unsigned) MAX_URBS,
+ MAX_QUEUE * packs_per_ms / urb_packs);
+ ep->nurbs = min(max_urbs, urbs_per_period * periods_per_buffer);
}
/* allocate and initialize data urbs */
@@ -670,8 +681,7 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep,
struct snd_urb_ctx *u = &ep->urb[i];
u->index = i;
u->ep = ep;
- u->packets = (i + 1) * total_packs / ep->nurbs
- - i * total_packs / ep->nurbs;
+ u->packets = urb_packs;
u->buffer_size = maxsize * u->packets;
if (fmt->fmt_type == UAC_FORMAT_TYPE_II)
@@ -703,8 +713,7 @@ out_of_memory:
/*
* configure a sync endpoint
*/
-static int sync_ep_set_params(struct snd_usb_endpoint *ep,
- struct audioformat *fmt)
+static int sync_ep_set_params(struct snd_usb_endpoint *ep)
{
int i;
@@ -748,6 +757,8 @@ out_of_memory:
* @pcm_format: the audio fomat.
* @channels: the number of audio channels.
* @period_bytes: the number of bytes in one alsa period.
+ * @period_frames: the number of frames in one alsa period.
+ * @buffer_periods: the number of periods in one alsa buffer.
* @rate: the frame rate.
* @fmt: the USB audio format information
* @sync_ep: the sync endpoint to use, if any
@@ -760,6 +771,8 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
snd_pcm_format_t pcm_format,
unsigned int channels,
unsigned int period_bytes,
+ unsigned int period_frames,
+ unsigned int buffer_periods,
unsigned int rate,
struct audioformat *fmt,
struct snd_usb_endpoint *sync_ep)
@@ -793,10 +806,11 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
switch (ep->type) {
case SND_USB_ENDPOINT_TYPE_DATA:
err = data_ep_set_params(ep, pcm_format, channels,
- period_bytes, fmt, sync_ep);
+ period_bytes, period_frames,
+ buffer_periods, fmt, sync_ep);
break;
case SND_USB_ENDPOINT_TYPE_SYNC:
- err = sync_ep_set_params(ep, fmt);
+ err = sync_ep_set_params(ep);
break;
default:
err = -EINVAL;
@@ -931,28 +945,21 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
*
* @ep: the endpoint to deactivate
*
- * If the endpoint is not currently in use, this functions will select the
- * alternate interface setting 0 for the interface of this endpoint.
+ * If the endpoint is not currently in use, this functions will
+ * deactivate its associated URBs.
*
* In case of any active users, this functions does nothing.
- *
- * Returns an error if usb_set_interface() failed, 0 in all other
- * cases.
*/
-int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
+void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
{
if (!ep)
- return -EINVAL;
-
- deactivate_urbs(ep, true);
- wait_clear_urbs(ep);
+ return;
if (ep->use_count != 0)
- return 0;
-
- clear_bit(EP_FLAG_ACTIVATED, &ep->flags);
+ return;
- return 0;
+ deactivate_urbs(ep, true);
+ wait_clear_urbs(ep);
}
/**
diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
index 2287adf5ca59..1c7e8ee48abc 100644
--- a/sound/usb/endpoint.h
+++ b/sound/usb/endpoint.h
@@ -12,6 +12,8 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
snd_pcm_format_t pcm_format,
unsigned int channels,
unsigned int period_bytes,
+ unsigned int period_frames,
+ unsigned int buffer_periods,
unsigned int rate,
struct audioformat *fmt,
struct snd_usb_endpoint *sync_ep);
@@ -20,7 +22,7 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep);
void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep);
void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep);
int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
-int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep);
+void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep);
void snd_usb_endpoint_free(struct list_head *head);
int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep);
diff --git a/sound/usb/helper.c b/sound/usb/helper.c
index 620902463c6e..51ed1ac825fd 100644
--- a/sound/usb/helper.c
+++ b/sound/usb/helper.c
@@ -118,6 +118,7 @@ unsigned char snd_usb_parse_datainterval(struct snd_usb_audio *chip,
{
switch (snd_usb_get_speed(chip->dev)) {
case USB_SPEED_HIGH:
+ case USB_SPEED_WIRELESS:
case USB_SPEED_SUPER:
if (get_endpoint(alts, 0)->bInterval >= 1 &&
get_endpoint(alts, 0)->bInterval <= 4)
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 95558ef4a7a0..44b0ba4feab3 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -1151,14 +1151,14 @@ static void check_no_speaker_on_headset(struct snd_kcontrol *kctl,
const char *names_to_check[] = {
"Headset", "headset", "Headphone", "headphone", NULL};
const char **s;
- bool found = 0;
+ bool found = false;
if (strcmp("Speaker", kctl->id.name))
return;
for (s = names_to_check; *s; s++)
if (strstr(card->shortname, *s)) {
- found = 1;
+ found = true;
break;
}
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index b375d58871e7..ca3256d6fde3 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -241,16 +241,17 @@ static int start_endpoints(struct snd_usb_substream *subs, bool can_sleep)
struct snd_usb_endpoint *ep = subs->sync_endpoint;
if (subs->data_endpoint->iface != subs->sync_endpoint->iface ||
- subs->data_endpoint->alt_idx != subs->sync_endpoint->alt_idx) {
+ subs->data_endpoint->altsetting != subs->sync_endpoint->altsetting) {
err = usb_set_interface(subs->dev,
subs->sync_endpoint->iface,
- subs->sync_endpoint->alt_idx);
+ subs->sync_endpoint->altsetting);
if (err < 0) {
+ clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags);
snd_printk(KERN_ERR
"%d:%d:%d: cannot set interface (%d)\n",
subs->dev->devnum,
subs->sync_endpoint->iface,
- subs->sync_endpoint->alt_idx, err);
+ subs->sync_endpoint->altsetting, err);
return -EIO;
}
}
@@ -282,22 +283,6 @@ static void stop_endpoints(struct snd_usb_substream *subs, bool wait)
}
}
-static int deactivate_endpoints(struct snd_usb_substream *subs)
-{
- int reta, retb;
-
- reta = snd_usb_endpoint_deactivate(subs->sync_endpoint);
- retb = snd_usb_endpoint_deactivate(subs->data_endpoint);
-
- if (reta < 0)
- return reta;
-
- if (retb < 0)
- return retb;
-
- return 0;
-}
-
static int search_roland_implicit_fb(struct usb_device *dev, int ifnum,
unsigned int altsetting,
struct usb_host_interface **alts,
@@ -595,6 +580,7 @@ static int configure_sync_endpoint(struct snd_usb_substream *subs)
subs->pcm_format,
subs->channels,
subs->period_bytes,
+ 0, 0,
subs->cur_rate,
subs->cur_audiofmt,
NULL);
@@ -631,6 +617,7 @@ static int configure_sync_endpoint(struct snd_usb_substream *subs)
subs->pcm_format,
sync_fp->channels,
sync_period_bytes,
+ 0, 0,
subs->cur_rate,
sync_fp,
NULL);
@@ -653,6 +640,8 @@ static int configure_endpoint(struct snd_usb_substream *subs)
subs->pcm_format,
subs->channels,
subs->period_bytes,
+ subs->period_frames,
+ subs->buffer_periods,
subs->cur_rate,
subs->cur_audiofmt,
subs->sync_endpoint);
@@ -689,6 +678,8 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
subs->pcm_format = params_format(hw_params);
subs->period_bytes = params_period_bytes(hw_params);
+ subs->period_frames = params_period_size(hw_params);
+ subs->buffer_periods = params_periods(hw_params);
subs->channels = params_channels(hw_params);
subs->cur_rate = params_rate(hw_params);
@@ -730,7 +721,8 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
down_read(&subs->stream->chip->shutdown_rwsem);
if (!subs->stream->chip->shutdown) {
stop_endpoints(subs, true);
- deactivate_endpoints(subs);
+ snd_usb_endpoint_deactivate(subs->sync_endpoint);
+ snd_usb_endpoint_deactivate(subs->data_endpoint);
}
up_read(&subs->stream->chip->shutdown_rwsem);
return snd_pcm_lib_free_vmalloc_buffer(substream);
@@ -1363,6 +1355,7 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
frames = 0;
urb->number_of_packets = 0;
spin_lock_irqsave(&subs->lock, flags);
+ subs->frame_limit += ep->max_urb_frames;
for (i = 0; i < ctx->packets; i++) {
if (ctx->packet_size[i])
counts = ctx->packet_size[i];
@@ -1377,6 +1370,7 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
subs->transfer_done += counts;
if (subs->transfer_done >= runtime->period_size) {
subs->transfer_done -= runtime->period_size;
+ subs->frame_limit = 0;
period_elapsed = 1;
if (subs->fmt_type == UAC_FORMAT_TYPE_II) {
if (subs->transfer_done > 0) {
@@ -1399,8 +1393,10 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
break;
}
}
- if (period_elapsed &&
- !snd_usb_endpoint_implicit_feedback_sink(subs->data_endpoint)) /* finish at the period boundary */
+ /* finish at the period boundary or after enough frames */
+ if ((period_elapsed ||
+ subs->transfer_done >= subs->frame_limit) &&
+ !snd_usb_endpoint_implicit_feedback_sink(ep))
break;
}
bytes = frames * ep->stride;
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index caabe9b3af49..5d2fe0530745 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -55,7 +55,6 @@ struct snd_usb_audio {
struct list_head mixer_list; /* list of mixer interfaces */
int setup; /* from the 'device_setup' module param */
- int nrpacks; /* from the 'nrpacks' module param */
bool autoclock; /* from the 'autoclock' module param */
struct usb_host_interface *ctrl_intf; /* the audio control interface */
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index ca6cb779876a..fc1502098595 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -134,14 +134,14 @@ ifeq ($(VERBOSE),1)
print_install =
else
Q = @
- print_compile = echo ' CC '$(OBJ);
- print_app_build = echo ' BUILD '$(OBJ);
- print_fpic_compile = echo ' CC FPIC '$(OBJ);
- print_shared_lib_compile = echo ' BUILD SHARED LIB '$(OBJ);
- print_plugin_obj_compile = echo ' CC PLUGIN OBJ '$(OBJ);
- print_plugin_build = echo ' CC PLUGI '$(OBJ);
- print_static_lib_build = echo ' BUILD STATIC LIB '$(OBJ);
- print_install = echo ' INSTALL '$1' to $(DESTDIR_SQ)$2';
+ print_compile = echo ' CC '$(OBJ);
+ print_app_build = echo ' BUILD '$(OBJ);
+ print_fpic_compile = echo ' CC FPIC '$(OBJ);
+ print_shared_lib_compile = echo ' BUILD SHARED LIB '$(OBJ);
+ print_plugin_obj_compile = echo ' BUILD PLUGIN OBJ '$(OBJ);
+ print_plugin_build = echo ' BUILD PLUGIN '$(OBJ);
+ print_static_lib_build = echo ' BUILD STATIC LIB '$(OBJ);
+ print_install = echo ' INSTALL '$1' to $(DESTDIR_SQ)$2';
endif
do_fpic_compile = \
@@ -268,7 +268,7 @@ TRACK_CFLAGS = $(subst ','\'',$(CFLAGS)):$(ARCH):$(CROSS_COMPILE)
TRACEEVENT-CFLAGS: force
@FLAGS='$(TRACK_CFLAGS)'; \
if test x"$$FLAGS" != x"`cat TRACEEVENT-CFLAGS 2>/dev/null`" ; then \
- echo 1>&2 " * new build flags or cross compiler"; \
+ echo 1>&2 " FLAGS: * new build flags or cross compiler"; \
echo "$$FLAGS" >TRACEEVENT-CFLAGS; \
fi
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
index 8f8fbc227a46..782d86e961b9 100644
--- a/tools/perf/.gitignore
+++ b/tools/perf/.gitignore
@@ -13,6 +13,7 @@ perf*.html
common-cmds.h
perf.data
perf.data.old
+output.svg
perf-archive
tags
TAGS
diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile
index 5a37a7c84e69..3ba1c0b09908 100644
--- a/tools/perf/Documentation/Makefile
+++ b/tools/perf/Documentation/Makefile
@@ -145,16 +145,17 @@ endif
ifneq ($(findstring $(MAKEFLAGS),s),s)
ifneq ($(V),1)
- QUIET_ASCIIDOC = @echo ' ' ASCIIDOC $@;
- QUIET_XMLTO = @echo ' ' XMLTO $@;
- QUIET_DB2TEXI = @echo ' ' DB2TEXI $@;
- QUIET_MAKEINFO = @echo ' ' MAKEINFO $@;
- QUIET_DBLATEX = @echo ' ' DBLATEX $@;
- QUIET_XSLTPROC = @echo ' ' XSLTPROC $@;
- QUIET_GEN = @echo ' ' GEN $@;
+ QUIET_ASCIIDOC = @echo ' ASCIIDOC '$@;
+ QUIET_XMLTO = @echo ' XMLTO '$@;
+ QUIET_DB2TEXI = @echo ' DB2TEXI '$@;
+ QUIET_MAKEINFO = @echo ' MAKEINFO '$@;
+ QUIET_DBLATEX = @echo ' DBLATEX '$@;
+ QUIET_XSLTPROC = @echo ' XSLTPROC '$@;
+ QUIET_GEN = @echo ' GEN '$@;
QUIET_STDERR = 2> /dev/null
QUIET_SUBDIR0 = +@subdir=
- QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \
+ QUIET_SUBDIR1 = ;$(NO_SUBDIR) \
+ echo ' SUBDIR ' $$subdir; \
$(MAKE) $(PRINT_DIR) -C $$subdir
export V
endif
@@ -183,47 +184,43 @@ ifdef missing_tools
endif
do-install-man: man
- $(INSTALL) -d -m 755 $(DESTDIR)$(man1dir)
-# $(INSTALL) -d -m 755 $(DESTDIR)$(man5dir)
-# $(INSTALL) -d -m 755 $(DESTDIR)$(man7dir)
- $(INSTALL) -m 644 $(DOC_MAN1) $(DESTDIR)$(man1dir)
-# $(INSTALL) -m 644 $(DOC_MAN5) $(DESTDIR)$(man5dir)
-# $(INSTALL) -m 644 $(DOC_MAN7) $(DESTDIR)$(man7dir)
+ $(call QUIET_INSTALL, Documentation-man) \
+ $(INSTALL) -d -m 755 $(DESTDIR)$(man1dir); \
+# $(INSTALL) -d -m 755 $(DESTDIR)$(man5dir); \
+# $(INSTALL) -d -m 755 $(DESTDIR)$(man7dir); \
+ $(INSTALL) -m 644 $(DOC_MAN1) $(DESTDIR)$(man1dir); \
+# $(INSTALL) -m 644 $(DOC_MAN5) $(DESTDIR)$(man5dir); \
+# $(INSTALL) -m 644 $(DOC_MAN7) $(DESTDIR)$(man7dir)
install-man: check-man-tools man
-try-install-man:
ifdef missing_tools
- $(warning Please install $(missing_tools) to have the man pages installed)
+ DO_INSTALL_MAN = $(warning Please install $(missing_tools) to have the man pages installed)
else
- $(MAKE) do-install-man
+ DO_INSTALL_MAN = do-install-man
endif
+try-install-man: $(DO_INSTALL_MAN)
+
install-info: info
- $(INSTALL) -d -m 755 $(DESTDIR)$(infodir)
- $(INSTALL) -m 644 $(OUTPUT)perf.info $(OUTPUT)perfman.info $(DESTDIR)$(infodir)
+ $(call QUIET_INSTALL, Documentation-info) \
+ $(INSTALL) -d -m 755 $(DESTDIR)$(infodir); \
+ $(INSTALL) -m 644 $(OUTPUT)perf.info $(OUTPUT)perfman.info $(DESTDIR)$(infodir); \
if test -r $(DESTDIR)$(infodir)/dir; then \
- $(INSTALL_INFO) --info-dir=$(DESTDIR)$(infodir) perf.info ;\
- $(INSTALL_INFO) --info-dir=$(DESTDIR)$(infodir) perfman.info ;\
+ $(INSTALL_INFO) --info-dir=$(DESTDIR)$(infodir) perf.info ;\
+ $(INSTALL_INFO) --info-dir=$(DESTDIR)$(infodir) perfman.info ;\
else \
echo "No directory found in $(DESTDIR)$(infodir)" >&2 ; \
fi
install-pdf: pdf
- $(INSTALL) -d -m 755 $(DESTDIR)$(pdfdir)
- $(INSTALL) -m 644 $(OUTPUT)user-manual.pdf $(DESTDIR)$(pdfdir)
+ $(call QUIET_INSTALL, Documentation-pdf) \
+ $(INSTALL) -d -m 755 $(DESTDIR)$(pdfdir); \
+ $(INSTALL) -m 644 $(OUTPUT)user-manual.pdf $(DESTDIR)$(pdfdir)
#install-html: html
# '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir)
-ifneq ($(MAKECMDGOALS),clean)
-ifneq ($(MAKECMDGOALS),tags)
-$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
- $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) $(OUTPUT)PERF-VERSION-FILE
-
--include $(OUTPUT)PERF-VERSION-FILE
-endif
-endif
#
# Determine "include::" file references in asciidoc files.
@@ -253,15 +250,17 @@ $(OUTPUT)cmd-list.made: cmd-list.perl ../command-list.txt $(MAN1_TXT)
$(PERL_PATH) ./cmd-list.perl ../command-list.txt $(QUIET_STDERR) && \
date >$@
+CLEAN_FILES = \
+ $(MAN_XML) $(addsuffix +,$(MAN_XML)) \
+ $(MAN_HTML) $(addsuffix +,$(MAN_HTML)) \
+ $(DOC_HTML) $(DOC_MAN1) $(DOC_MAN5) $(DOC_MAN7) \
+ $(OUTPUT)*.texi $(OUTPUT)*.texi+ $(OUTPUT)*.texi++ \
+ $(OUTPUT)perf.info $(OUTPUT)perfman.info \
+ $(OUTPUT)howto-index.txt $(OUTPUT)howto/*.html $(OUTPUT)doc.dep \
+ $(OUTPUT)technical/api-*.html $(OUTPUT)technical/api-index.txt \
+ $(cmds_txt) $(OUTPUT)*.made
clean:
- $(RM) $(MAN_XML) $(addsuffix +,$(MAN_XML))
- $(RM) $(MAN_HTML) $(addsuffix +,$(MAN_HTML))
- $(RM) $(DOC_HTML) $(DOC_MAN1) $(DOC_MAN5) $(DOC_MAN7)
- $(RM) $(OUTPUT)*.texi $(OUTPUT)*.texi+ $(OUTPUT)*.texi++
- $(RM) $(OUTPUT)perf.info $(OUTPUT)perfman.info
- $(RM) $(OUTPUT)howto-index.txt $(OUTPUT)howto/*.html $(OUTPUT)doc.dep
- $(RM) $(OUTPUT)technical/api-*.html $(OUTPUT)technical/api-index.txt
- $(RM) $(cmds_txt) $(OUTPUT)*.made
+ $(call QUIET_CLEAN, Documentation) $(RM) $(CLEAN_FILES)
$(MAN_HTML): $(OUTPUT)%.html : %.txt
$(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
@@ -342,5 +341,3 @@ $(patsubst %.txt,%.html,$(wildcard howto/*.txt)): %.html : %.txt
#quick-install-html:
# '$(SHELL_PATH_SQ)' ./install-doc-quick.sh $(HTML_REF) $(DESTDIR)$(htmldir)
-
-.PHONY: .FORCE-PERF-VERSION-FILE
diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt
index e9a8349a7172..fd77d81ea748 100644
--- a/tools/perf/Documentation/perf-buildid-cache.txt
+++ b/tools/perf/Documentation/perf-buildid-cache.txt
@@ -21,6 +21,19 @@ OPTIONS
-a::
--add=::
Add specified file to the cache.
+-k::
+--kcore::
+ Add specified kcore file to the cache. For the current host that is
+ /proc/kcore which requires root permissions to read. Be aware that
+ running 'perf buildid-cache' as root may update root's build-id cache
+ not the user's. Use the -v option to see where the file is created.
+ Note that the copied file contains only code sections not the whole core
+ image. Note also that files "kallsyms" and "modules" must also be in the
+ same directory and are also copied. All 3 files are created with read
+ permissions for root only. kcore will not be added if there is already a
+ kcore in the cache (with the same build-id) that has the same modules at
+ the same addresses. Use the -v option to see if a copy of kcore is
+ actually made.
-r::
--remove=::
Remove specified file from the cache.
diff --git a/tools/perf/Documentation/perf-kvm.txt b/tools/perf/Documentation/perf-kvm.txt
index ac84db2d2334..6a06cefe9642 100644
--- a/tools/perf/Documentation/perf-kvm.txt
+++ b/tools/perf/Documentation/perf-kvm.txt
@@ -109,7 +109,9 @@ STAT LIVE OPTIONS
-m::
--mmap-pages=::
- Number of mmap data pages. Must be a power of two.
+ Number of mmap data pages (must be a power of two) or size
+ specification with appended unit character - B/K/M/G. The
+ size is rounded up to have nearest pages power of two value.
-a::
--all-cpus::
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
index c7f5f55634ac..ab25be28c9dc 100644
--- a/tools/perf/Documentation/perf-lock.txt
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -48,7 +48,7 @@ REPORT OPTIONS
-k::
--key=<value>::
Sorting key. Possible values: acquired (default), contended,
- wait_total, wait_max, wait_min.
+ avg_wait, wait_total, wait_max, wait_min.
INFO OPTIONS
------------
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index e297b74471b8..052f7c4dc00c 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -87,11 +87,25 @@ OPTIONS
-m::
--mmap-pages=::
- Number of mmap data pages. Must be a power of two.
+ Number of mmap data pages (must be a power of two) or size
+ specification with appended unit character - B/K/M/G. The
+ size is rounded up to have nearest pages power of two value.
-g::
+ Enables call-graph (stack chain/backtrace) recording.
+
--call-graph::
- Do call-graph (stack chain/backtrace) recording.
+ Setup and enable call-graph (stack chain/backtrace) recording,
+ implies -g.
+
+ Allows specifying "fp" (frame pointer) or "dwarf"
+ (DWARF's CFI - Call Frame Information) as the method to collect
+ the information used to show the call graphs.
+
+ In some systems, where binaries are build with gcc
+ --fomit-frame-pointer, using the "fp" method will produce bogus
+ call graphs, using "dwarf", if available (perf tools linked to
+ the libunwind library) should be used instead.
-q::
--quiet::
@@ -166,6 +180,9 @@ following filters are defined:
- u: only when the branch target is at the user level
- k: only when the branch target is in the kernel
- hv: only when the target is at the hypervisor level
+ - in_tx: only when the target is in a hardware transaction
+ - no_tx: only when the target is not in a hardware transaction
+ - abort_tx: only when the target is a hardware transaction abort
+
The option requires at least one branch type among any, any_call, any_ret, ind_call.
@@ -176,12 +193,14 @@ is enabled for all the sampling events. The sampled branch type is the same for
The various filters must be specified as a comma separated list: --branch-filter any_ret,u,k
Note that this feature may not be available on all processors.
--W::
--weight::
Enable weightened sampling. An additional weight is recorded per sample and can be
displayed with the weight and local_weight sort keys. This currently works for TSX
abort events and some memory events in precise mode on modern Intel CPUs.
+--transaction::
+Record transaction flags for transaction related events.
+
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-list[1]
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 2b8097ee39d8..10a279871251 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -71,7 +71,11 @@ OPTIONS
entries are displayed as "[other]".
- cpu: cpu number the task ran at the time of sample
- srcline: filename and line number executed at the time of sample. The
- DWARF debuggin info must be provided.
+ DWARF debugging info must be provided.
+ - weight: Event specific weight, e.g. memory latency or transaction
+ abort cost. This is the global weight.
+ - local_weight: Local weight version of the weight above.
+ - transaction: Transaction abort flags.
By default, comm, dso and symbol keys are used.
(i.e. --sort comm,dso,symbol)
@@ -85,6 +89,8 @@ OPTIONS
- symbol_from: name of function branched from
- symbol_to: name of function branched to
- mispredict: "N" for predicted branch, "Y" for mispredicted branch
+ - in_tx: branch in TSX transaction
+ - abort: TSX transaction abort.
And default sort keys are changed to comm, dso_from, symbol_from, dso_to
and symbol_to, see '--branch-stack'.
@@ -135,6 +141,14 @@ OPTIONS
Default: fractal,0.5,callee,function.
+--max-stack::
+ Set the stack depth limit when parsing the callchain, anything
+ beyond the specified depth will be ignored. This is a trade-off
+ between information loss and faster processing especially for
+ workloads that can have a very long callchain stack.
+
+ Default: 127
+
-G::
--inverted::
alias for inverted caller based call graph.
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 73c9759005a3..80c7da6732f2 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -137,6 +137,11 @@ core number and the number of online logical processors on that physical process
After starting the program, wait msecs before measuring. This is useful to
filter out the startup phase of the program, which is often very different.
+-T::
+--transaction::
+
+Print statistics of transactional execution if supported.
+
EXAMPLES
--------
diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt
index 1632b0efc757..3ff8bd4f0b4d 100644
--- a/tools/perf/Documentation/perf-timechart.txt
+++ b/tools/perf/Documentation/perf-timechart.txt
@@ -8,7 +8,8 @@ perf-timechart - Tool to visualize total system behavior during a workload
SYNOPSIS
--------
[verse]
-'perf timechart' {record}
+'perf timechart' record <command>
+'perf timechart' [<options>]
DESCRIPTION
-----------
@@ -41,6 +42,18 @@ OPTIONS
--symfs=<directory>::
Look for files with symbols relative to this directory.
+EXAMPLES
+--------
+
+$ perf timechart record git pull
+
+ [ perf record: Woken up 13 times to write data ]
+ [ perf record: Captured and wrote 4.253 MB perf.data (~185801 samples) ]
+
+$ perf timechart
+
+ Written 10.2 seconds of trace to output.svg.
+
SEE ALSO
--------
linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 58d6598a9686..7de01dd79688 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -68,7 +68,9 @@ Default is to monitor all CPUS.
-m <pages>::
--mmap-pages=<pages>::
- Number of mmapped data pages.
+ Number of mmap data pages (must be a power of two) or size
+ specification with appended unit character - B/K/M/G. The
+ size is rounded up to have nearest pages power of two value.
-p <pid>::
--pid=<pid>::
@@ -112,7 +114,8 @@ Default is to monitor all CPUS.
-s::
--sort::
- Sort by key(s): pid, comm, dso, symbol, parent, srcline, weight, local_weight.
+ Sort by key(s): pid, comm, dso, symbol, parent, srcline, weight,
+ local_weight, abort, in_tx, transaction
-n::
--show-nr-samples::
@@ -140,20 +143,20 @@ Default is to monitor all CPUS.
--asm-raw::
Show raw instruction encoding of assembly instructions.
--G [type,min,order]::
+-G::
+ Enables call-graph (stack chain/backtrace) recording.
+
--call-graph::
- Display call chains using type, min percent threshold and order.
- type can be either:
- - flat: single column, linear exposure of call chains.
- - graph: use a graph tree, displaying absolute overhead rates.
- - fractal: like graph, but displays relative rates. Each branch of
- the tree is considered as a new profiled object.
-
- order can be either:
- - callee: callee based call graph.
- - caller: inverted caller based call graph.
-
- Default: fractal,0.5,callee.
+ Setup and enable call-graph (stack chain/backtrace) recording,
+ implies -G.
+
+--max-stack::
+ Set the stack depth limit when parsing the callchain, anything
+ beyond the specified depth will be ignored. This is a trade-off
+ between information loss and faster processing especially for
+ workloads that can have a very long callchain stack.
+
+ Default: 127
--ignore-callees=<regex>::
Ignore callees of the function(s) matching the given regex.
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index daccd2c0a48f..7b0497f95a75 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -9,6 +9,7 @@ SYNOPSIS
--------
[verse]
'perf trace'
+'perf trace record'
DESCRIPTION
-----------
@@ -16,9 +17,14 @@ This command will show the events associated with the target, initially
syscalls, but other system events like pagefaults, task lifetime events,
scheduling events, etc.
-Initially this is a live mode only tool, but eventually will work with
-perf.data files like the other tools, allowing a detached 'record' from
-analysis phases.
+This is a live mode tool in addition to working with perf.data files like
+the other perf tools. Files can be generated using the 'perf record' command
+but the session needs to include the raw_syscalls events (-e 'raw_syscalls:*').
+Alernatively, the 'perf trace record' can be used as a shortcut to
+automatically include the raw_syscalls events when writing events to a file.
+
+The following options apply to perf trace; options to perf trace record are
+found in the perf record man page.
OPTIONS
-------
@@ -59,7 +65,9 @@ OPTIONS
-m::
--mmap-pages=::
- Number of mmap data pages. Must be a power of two.
+ Number of mmap data pages (must be a power of two) or size
+ specification with appended unit character - B/K/M/G. The
+ size is rounded up to have nearest pages power of two value.
-C::
--cpu::
@@ -78,6 +86,21 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
--input
Process events from a given perf data file.
+-T
+--time
+ Print full timestamp rather time relative to first sample.
+
+--comm::
+ Show process COMM right beside its ID, on by default, disable with --no-comm.
+
+--summary::
+ Show a summary of syscalls by thread with min, max, and average times (in
+ msec) and relative stddev.
+
+--tool_stats::
+ Show tool stats such as number of times fd->pathname was discovered thru
+ hooking the open syscall return + vfs_getname or via reading /proc/pid/fd, etc.
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-script[1]
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 64c043b7a438..4835618a5608 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -1,819 +1,79 @@
-include ../scripts/Makefile.include
-
-# The default target of this Makefile is...
-all:
-
-include config/utilities.mak
-
-# Define V to have a more verbose compile.
-#
-# Define O to save output files in a separate directory.
-#
-# Define ARCH as name of target architecture if you want cross-builds.
-#
-# Define CROSS_COMPILE as prefix name of compiler if you want cross-builds.
-#
-# Define NO_LIBPERL to disable perl script extension.
-#
-# Define NO_LIBPYTHON to disable python script extension.
-#
-# Define PYTHON to point to the python binary if the default
-# `python' is not correct; for example: PYTHON=python2
-#
-# Define PYTHON_CONFIG to point to the python-config binary if
-# the default `$(PYTHON)-config' is not correct.
#
-# Define ASCIIDOC8 if you want to format documentation with AsciiDoc 8
+# This is a simple wrapper Makefile that calls the main Makefile.perf
+# with a -j option to do parallel builds
#
-# Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72.
+# If you want to invoke the perf build in some non-standard way then
+# you can use the 'make -f Makefile.perf' method to invoke it.
#
-# Define LDFLAGS=-static to build a static binary.
-#
-# Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds.
-#
-# Define NO_DWARF if you do not want debug-info analysis feature at all.
-#
-# Define WERROR=0 to disable treating any warnings as errors.
-#
-# Define NO_NEWT if you do not want TUI support. (deprecated)
-#
-# Define NO_SLANG if you do not want TUI support.
-#
-# Define NO_GTK2 if you do not want GTK+ GUI support.
+
#
-# Define NO_DEMANGLE if you do not want C++ symbol demangling.
+# Clear out the built-in rules GNU make defines by default (such as .o targets),
+# so that we pass through all targets to Makefile.perf:
#
-# Define NO_LIBELF if you do not want libelf dependency (e.g. cross-builds)
+.SUFFIXES:
+
#
-# Define NO_LIBUNWIND if you do not want libunwind dependency for dwarf
-# backtrace post unwind.
+# We don't want to pass along options like -j:
#
-# Define NO_BACKTRACE if you do not want stack backtrace debug feature
+unexport MAKEFLAGS
+
#
-# Define NO_LIBNUMA if you do not want numa perf benchmark
+# Do a parallel build with multiple jobs, based on the number of CPUs online
+# in this system: 'make -j8' on a 8-CPU system, etc.
#
-# Define NO_LIBAUDIT if you do not want libaudit support
+# (To override it, run 'make JOBS=1' and similar.)
#
-# Define NO_LIBBIONIC if you do not want bionic support
-
-ifeq ($(srctree),)
-srctree := $(patsubst %/,%,$(dir $(shell pwd)))
-srctree := $(patsubst %/,%,$(dir $(srctree)))
-#$(info Determined 'srctree' to be $(srctree))
-endif
-
-ifneq ($(objtree),)
-#$(info Determined 'objtree' to be $(objtree))
-endif
-
-ifneq ($(OUTPUT),)
-#$(info Determined 'OUTPUT' to be $(OUTPUT))
-endif
-
-$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
- @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
-
-CC = $(CROSS_COMPILE)gcc
-AR = $(CROSS_COMPILE)ar
-
-RM = rm -f
-MKDIR = mkdir
-FIND = find
-INSTALL = install
-FLEX = flex
-BISON = bison
-STRIP = strip
-
-LK_DIR = $(srctree)/tools/lib/lk/
-TRACE_EVENT_DIR = $(srctree)/tools/lib/traceevent/
-
-# include config/Makefile by default and rule out
-# non-config cases
-config := 1
-
-NON_CONFIG_TARGETS := clean TAGS tags cscope help
-
-ifdef MAKECMDGOALS
-ifeq ($(filter-out $(NON_CONFIG_TARGETS),$(MAKECMDGOALS)),)
- config := 0
-endif
+ifeq ($(JOBS),)
+ JOBS := $(shell grep -c ^processor /proc/cpuinfo 2>/dev/null)
+ ifeq ($(JOBS),)
+ JOBS := 1
+ endif
endif
-ifeq ($(config),1)
-include config/Makefile
+#
+# Only pass canonical directory names as the output directory:
+#
+ifneq ($(O),)
+ FULL_O := $(shell readlink -f $(O) || echo $(O))
endif
-export prefix bindir sharedir sysconfdir
-
-# sparse is architecture-neutral, which means that we need to tell it
-# explicitly what architecture to check for. Fix this up for yours..
-SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__
-
-# Guard against environment variables
-BUILTIN_OBJS =
-LIB_H =
-LIB_OBJS =
-PYRF_OBJS =
-SCRIPT_SH =
-
-SCRIPT_SH += perf-archive.sh
-
-grep-libs = $(filter -l%,$(1))
-strip-libs = $(filter-out -l%,$(1))
-
-ifneq ($(OUTPUT),)
- TE_PATH=$(OUTPUT)
-ifneq ($(subdir),)
- LK_PATH=$(OUTPUT)/../lib/lk/
-else
- LK_PATH=$(OUTPUT)
-endif
+#
+# Only accept the 'DEBUG' variable from the command line:
+#
+ifeq ("$(origin DEBUG)", "command line")
+ ifeq ($(DEBUG),)
+ override DEBUG = 0
+ else
+ SET_DEBUG = "DEBUG=$(DEBUG)"
+ endif
else
- TE_PATH=$(TRACE_EVENT_DIR)
- LK_PATH=$(LK_DIR)
+ override DEBUG = 0
endif
-LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
-export LIBTRACEEVENT
-
-LIBLK = $(LK_PATH)liblk.a
-export LIBLK
-
-# python extension build directories
-PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/
-PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/
-PYTHON_EXTBUILD_TMP := $(PYTHON_EXTBUILD)tmp/
-export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP
+define print_msg
+ @printf ' BUILD: Doing '\''make \033[33m-j'$(JOBS)'\033[m'\'' parallel build\n'
+endef
-python-clean := rm -rf $(PYTHON_EXTBUILD) $(OUTPUT)python/perf.so
+define make
+ @$(MAKE) -f Makefile.perf --no-print-directory -j$(JOBS) O=$(FULL_O) $(SET_DEBUG) $@
+endef
-PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources)
-PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py $(LIBTRACEEVENT) $(LIBLK)
-
-$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS)
- $(QUIET_GEN)CFLAGS='$(CFLAGS)' $(PYTHON_WORD) util/setup.py \
- --quiet build_ext; \
- mkdir -p $(OUTPUT)python && \
- cp $(PYTHON_EXTBUILD_LIB)perf.so $(OUTPUT)python/
#
-# No Perl scripts right now:
+# Needed if no target specified:
#
-
-SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH))
+all:
+ $(print_msg)
+ $(make)
#
-# Single 'perf' binary right now:
+# The clean target is not really parallel, don't print the jobs info:
#
-PROGRAMS += $(OUTPUT)perf
-
-# what 'all' will build and 'install' will install, in perfexecdir
-ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS)
-
-# what 'all' will build but not install in perfexecdir
-OTHER_PROGRAMS = $(OUTPUT)perf
-
-# Set paths to tools early so that they can be used for version tests.
-ifndef SHELL_PATH
- SHELL_PATH = /bin/sh
-endif
-ifndef PERL_PATH
- PERL_PATH = /usr/bin/perl
-endif
-
-export PERL_PATH
-
-$(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c
- $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c
-
-$(OUTPUT)util/parse-events-bison.c: util/parse-events.y
- $(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c -p parse_events_
-
-$(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c
- $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c
-
-$(OUTPUT)util/pmu-bison.c: util/pmu.y
- $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c -p perf_pmu_
-
-$(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c
-$(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c
-
-LIB_FILE=$(OUTPUT)libperf.a
-
-LIB_H += ../../include/uapi/linux/perf_event.h
-LIB_H += ../../include/linux/rbtree.h
-LIB_H += ../../include/linux/list.h
-LIB_H += ../../include/uapi/linux/const.h
-LIB_H += ../../include/linux/hash.h
-LIB_H += ../../include/linux/stringify.h
-LIB_H += util/include/linux/bitmap.h
-LIB_H += util/include/linux/bitops.h
-LIB_H += util/include/linux/compiler.h
-LIB_H += util/include/linux/const.h
-LIB_H += util/include/linux/ctype.h
-LIB_H += util/include/linux/kernel.h
-LIB_H += util/include/linux/list.h
-LIB_H += util/include/linux/export.h
-LIB_H += util/include/linux/magic.h
-LIB_H += util/include/linux/poison.h
-LIB_H += util/include/linux/prefetch.h
-LIB_H += util/include/linux/rbtree.h
-LIB_H += util/include/linux/rbtree_augmented.h
-LIB_H += util/include/linux/string.h
-LIB_H += util/include/linux/types.h
-LIB_H += util/include/linux/linkage.h
-LIB_H += util/include/asm/asm-offsets.h
-LIB_H += util/include/asm/bug.h
-LIB_H += util/include/asm/byteorder.h
-LIB_H += util/include/asm/hweight.h
-LIB_H += util/include/asm/swab.h
-LIB_H += util/include/asm/system.h
-LIB_H += util/include/asm/uaccess.h
-LIB_H += util/include/dwarf-regs.h
-LIB_H += util/include/asm/dwarf2.h
-LIB_H += util/include/asm/cpufeature.h
-LIB_H += util/include/asm/unistd_32.h
-LIB_H += util/include/asm/unistd_64.h
-LIB_H += perf.h
-LIB_H += util/annotate.h
-LIB_H += util/cache.h
-LIB_H += util/callchain.h
-LIB_H += util/build-id.h
-LIB_H += util/debug.h
-LIB_H += util/sysfs.h
-LIB_H += util/pmu.h
-LIB_H += util/event.h
-LIB_H += util/evsel.h
-LIB_H += util/evlist.h
-LIB_H += util/exec_cmd.h
-LIB_H += util/types.h
-LIB_H += util/levenshtein.h
-LIB_H += util/machine.h
-LIB_H += util/map.h
-LIB_H += util/parse-options.h
-LIB_H += util/parse-events.h
-LIB_H += util/quote.h
-LIB_H += util/util.h
-LIB_H += util/xyarray.h
-LIB_H += util/header.h
-LIB_H += util/help.h
-LIB_H += util/session.h
-LIB_H += util/strbuf.h
-LIB_H += util/strlist.h
-LIB_H += util/strfilter.h
-LIB_H += util/svghelper.h
-LIB_H += util/tool.h
-LIB_H += util/run-command.h
-LIB_H += util/sigchain.h
-LIB_H += util/dso.h
-LIB_H += util/symbol.h
-LIB_H += util/color.h
-LIB_H += util/values.h
-LIB_H += util/sort.h
-LIB_H += util/hist.h
-LIB_H += util/thread.h
-LIB_H += util/thread_map.h
-LIB_H += util/trace-event.h
-LIB_H += util/probe-finder.h
-LIB_H += util/dwarf-aux.h
-LIB_H += util/probe-event.h
-LIB_H += util/pstack.h
-LIB_H += util/cpumap.h
-LIB_H += util/top.h
-LIB_H += $(ARCH_INCLUDE)
-LIB_H += util/cgroup.h
-LIB_H += $(LIB_INCLUDE)traceevent/event-parse.h
-LIB_H += util/target.h
-LIB_H += util/rblist.h
-LIB_H += util/intlist.h
-LIB_H += util/perf_regs.h
-LIB_H += util/unwind.h
-LIB_H += util/vdso.h
-LIB_H += ui/helpline.h
-LIB_H += ui/progress.h
-LIB_H += ui/util.h
-LIB_H += ui/ui.h
-
-LIB_OBJS += $(OUTPUT)util/abspath.o
-LIB_OBJS += $(OUTPUT)util/alias.o
-LIB_OBJS += $(OUTPUT)util/annotate.o
-LIB_OBJS += $(OUTPUT)util/build-id.o
-LIB_OBJS += $(OUTPUT)util/config.o
-LIB_OBJS += $(OUTPUT)util/ctype.o
-LIB_OBJS += $(OUTPUT)util/sysfs.o
-LIB_OBJS += $(OUTPUT)util/pmu.o
-LIB_OBJS += $(OUTPUT)util/environment.o
-LIB_OBJS += $(OUTPUT)util/event.o
-LIB_OBJS += $(OUTPUT)util/evlist.o
-LIB_OBJS += $(OUTPUT)util/evsel.o
-LIB_OBJS += $(OUTPUT)util/exec_cmd.o
-LIB_OBJS += $(OUTPUT)util/help.o
-LIB_OBJS += $(OUTPUT)util/levenshtein.o
-LIB_OBJS += $(OUTPUT)util/parse-options.o
-LIB_OBJS += $(OUTPUT)util/parse-events.o
-LIB_OBJS += $(OUTPUT)util/path.o
-LIB_OBJS += $(OUTPUT)util/rbtree.o
-LIB_OBJS += $(OUTPUT)util/bitmap.o
-LIB_OBJS += $(OUTPUT)util/hweight.o
-LIB_OBJS += $(OUTPUT)util/run-command.o
-LIB_OBJS += $(OUTPUT)util/quote.o
-LIB_OBJS += $(OUTPUT)util/strbuf.o
-LIB_OBJS += $(OUTPUT)util/string.o
-LIB_OBJS += $(OUTPUT)util/strlist.o
-LIB_OBJS += $(OUTPUT)util/strfilter.o
-LIB_OBJS += $(OUTPUT)util/top.o
-LIB_OBJS += $(OUTPUT)util/usage.o
-LIB_OBJS += $(OUTPUT)util/wrapper.o
-LIB_OBJS += $(OUTPUT)util/sigchain.o
-LIB_OBJS += $(OUTPUT)util/dso.o
-LIB_OBJS += $(OUTPUT)util/symbol.o
-LIB_OBJS += $(OUTPUT)util/symbol-elf.o
-LIB_OBJS += $(OUTPUT)util/color.o
-LIB_OBJS += $(OUTPUT)util/pager.o
-LIB_OBJS += $(OUTPUT)util/header.o
-LIB_OBJS += $(OUTPUT)util/callchain.o
-LIB_OBJS += $(OUTPUT)util/values.o
-LIB_OBJS += $(OUTPUT)util/debug.o
-LIB_OBJS += $(OUTPUT)util/machine.o
-LIB_OBJS += $(OUTPUT)util/map.o
-LIB_OBJS += $(OUTPUT)util/pstack.o
-LIB_OBJS += $(OUTPUT)util/session.o
-LIB_OBJS += $(OUTPUT)util/thread.o
-LIB_OBJS += $(OUTPUT)util/thread_map.o
-LIB_OBJS += $(OUTPUT)util/trace-event-parse.o
-LIB_OBJS += $(OUTPUT)util/parse-events-flex.o
-LIB_OBJS += $(OUTPUT)util/parse-events-bison.o
-LIB_OBJS += $(OUTPUT)util/pmu-flex.o
-LIB_OBJS += $(OUTPUT)util/pmu-bison.o
-LIB_OBJS += $(OUTPUT)util/trace-event-read.o
-LIB_OBJS += $(OUTPUT)util/trace-event-info.o
-LIB_OBJS += $(OUTPUT)util/trace-event-scripting.o
-LIB_OBJS += $(OUTPUT)util/svghelper.o
-LIB_OBJS += $(OUTPUT)util/sort.o
-LIB_OBJS += $(OUTPUT)util/hist.o
-LIB_OBJS += $(OUTPUT)util/probe-event.o
-LIB_OBJS += $(OUTPUT)util/util.o
-LIB_OBJS += $(OUTPUT)util/xyarray.o
-LIB_OBJS += $(OUTPUT)util/cpumap.o
-LIB_OBJS += $(OUTPUT)util/cgroup.o
-LIB_OBJS += $(OUTPUT)util/target.o
-LIB_OBJS += $(OUTPUT)util/rblist.o
-LIB_OBJS += $(OUTPUT)util/intlist.o
-LIB_OBJS += $(OUTPUT)util/vdso.o
-LIB_OBJS += $(OUTPUT)util/stat.o
-LIB_OBJS += $(OUTPUT)util/record.o
-
-LIB_OBJS += $(OUTPUT)ui/setup.o
-LIB_OBJS += $(OUTPUT)ui/helpline.o
-LIB_OBJS += $(OUTPUT)ui/progress.o
-LIB_OBJS += $(OUTPUT)ui/util.o
-LIB_OBJS += $(OUTPUT)ui/hist.o
-LIB_OBJS += $(OUTPUT)ui/stdio/hist.o
-
-LIB_OBJS += $(OUTPUT)arch/common.o
-
-LIB_OBJS += $(OUTPUT)tests/parse-events.o
-LIB_OBJS += $(OUTPUT)tests/dso-data.o
-LIB_OBJS += $(OUTPUT)tests/attr.o
-LIB_OBJS += $(OUTPUT)tests/vmlinux-kallsyms.o
-LIB_OBJS += $(OUTPUT)tests/open-syscall.o
-LIB_OBJS += $(OUTPUT)tests/open-syscall-all-cpus.o
-LIB_OBJS += $(OUTPUT)tests/open-syscall-tp-fields.o
-LIB_OBJS += $(OUTPUT)tests/mmap-basic.o
-LIB_OBJS += $(OUTPUT)tests/perf-record.o
-LIB_OBJS += $(OUTPUT)tests/rdpmc.o
-LIB_OBJS += $(OUTPUT)tests/evsel-roundtrip-name.o
-LIB_OBJS += $(OUTPUT)tests/evsel-tp-sched.o
-LIB_OBJS += $(OUTPUT)tests/pmu.o
-LIB_OBJS += $(OUTPUT)tests/hists_link.o
-LIB_OBJS += $(OUTPUT)tests/python-use.o
-LIB_OBJS += $(OUTPUT)tests/bp_signal.o
-LIB_OBJS += $(OUTPUT)tests/bp_signal_overflow.o
-LIB_OBJS += $(OUTPUT)tests/task-exit.o
-LIB_OBJS += $(OUTPUT)tests/sw-clock.o
-ifeq ($(ARCH),x86)
-LIB_OBJS += $(OUTPUT)tests/perf-time-to-tsc.o
-endif
-LIB_OBJS += $(OUTPUT)tests/code-reading.o
-LIB_OBJS += $(OUTPUT)tests/sample-parsing.o
-LIB_OBJS += $(OUTPUT)tests/parse-no-sample-id-all.o
-
-BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
-BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
-# Benchmark modules
-BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o
-BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o
-ifeq ($(RAW_ARCH),x86_64)
-BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy-x86-64-asm.o
-BUILTIN_OBJS += $(OUTPUT)bench/mem-memset-x86-64-asm.o
-endif
-BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o
-BUILTIN_OBJS += $(OUTPUT)bench/mem-memset.o
-
-BUILTIN_OBJS += $(OUTPUT)builtin-diff.o
-BUILTIN_OBJS += $(OUTPUT)builtin-evlist.o
-BUILTIN_OBJS += $(OUTPUT)builtin-help.o
-BUILTIN_OBJS += $(OUTPUT)builtin-sched.o
-BUILTIN_OBJS += $(OUTPUT)builtin-buildid-list.o
-BUILTIN_OBJS += $(OUTPUT)builtin-buildid-cache.o
-BUILTIN_OBJS += $(OUTPUT)builtin-list.o
-BUILTIN_OBJS += $(OUTPUT)builtin-record.o
-BUILTIN_OBJS += $(OUTPUT)builtin-report.o
-BUILTIN_OBJS += $(OUTPUT)builtin-stat.o
-BUILTIN_OBJS += $(OUTPUT)builtin-timechart.o
-BUILTIN_OBJS += $(OUTPUT)builtin-top.o
-BUILTIN_OBJS += $(OUTPUT)builtin-script.o
-BUILTIN_OBJS += $(OUTPUT)builtin-probe.o
-BUILTIN_OBJS += $(OUTPUT)builtin-kmem.o
-BUILTIN_OBJS += $(OUTPUT)builtin-lock.o
-BUILTIN_OBJS += $(OUTPUT)builtin-kvm.o
-BUILTIN_OBJS += $(OUTPUT)builtin-inject.o
-BUILTIN_OBJS += $(OUTPUT)tests/builtin-test.o
-BUILTIN_OBJS += $(OUTPUT)builtin-mem.o
-
-PERFLIBS = $(LIB_FILE) $(LIBLK) $(LIBTRACEEVENT)
-
-# We choose to avoid "if .. else if .. else .. endif endif"
-# because maintaining the nesting to match is a pain. If
-# we had "elif" things would have been much nicer...
-
--include arch/$(ARCH)/Makefile
-
-ifneq ($(OUTPUT),)
- CFLAGS += -I$(OUTPUT)
-endif
-
-ifdef NO_LIBELF
-EXTLIBS := $(filter-out -lelf,$(EXTLIBS))
-
-# Remove ELF/DWARF dependent codes
-LIB_OBJS := $(filter-out $(OUTPUT)util/symbol-elf.o,$(LIB_OBJS))
-LIB_OBJS := $(filter-out $(OUTPUT)util/dwarf-aux.o,$(LIB_OBJS))
-LIB_OBJS := $(filter-out $(OUTPUT)util/probe-event.o,$(LIB_OBJS))
-LIB_OBJS := $(filter-out $(OUTPUT)util/probe-finder.o,$(LIB_OBJS))
-
-BUILTIN_OBJS := $(filter-out $(OUTPUT)builtin-probe.o,$(BUILTIN_OBJS))
-
-# Use minimal symbol handling
-LIB_OBJS += $(OUTPUT)util/symbol-minimal.o
-
-else # NO_LIBELF
-ifndef NO_DWARF
- LIB_OBJS += $(OUTPUT)util/probe-finder.o
- LIB_OBJS += $(OUTPUT)util/dwarf-aux.o
-endif # NO_DWARF
-endif # NO_LIBELF
-
-ifndef NO_LIBUNWIND
- LIB_OBJS += $(OUTPUT)util/unwind.o
-endif
-LIB_OBJS += $(OUTPUT)tests/keep-tracking.o
-
-ifndef NO_LIBAUDIT
- BUILTIN_OBJS += $(OUTPUT)builtin-trace.o
-endif
-
-ifndef NO_SLANG
- LIB_OBJS += $(OUTPUT)ui/browser.o
- LIB_OBJS += $(OUTPUT)ui/browsers/annotate.o
- LIB_OBJS += $(OUTPUT)ui/browsers/hists.o
- LIB_OBJS += $(OUTPUT)ui/browsers/map.o
- LIB_OBJS += $(OUTPUT)ui/browsers/scripts.o
- LIB_OBJS += $(OUTPUT)ui/tui/setup.o
- LIB_OBJS += $(OUTPUT)ui/tui/util.o
- LIB_OBJS += $(OUTPUT)ui/tui/helpline.o
- LIB_OBJS += $(OUTPUT)ui/tui/progress.o
- LIB_H += ui/browser.h
- LIB_H += ui/browsers/map.h
- LIB_H += ui/keysyms.h
- LIB_H += ui/libslang.h
-endif
-
-ifndef NO_GTK2
- LIB_OBJS += $(OUTPUT)ui/gtk/browser.o
- LIB_OBJS += $(OUTPUT)ui/gtk/hists.o
- LIB_OBJS += $(OUTPUT)ui/gtk/setup.o
- LIB_OBJS += $(OUTPUT)ui/gtk/util.o
- LIB_OBJS += $(OUTPUT)ui/gtk/helpline.o
- LIB_OBJS += $(OUTPUT)ui/gtk/progress.o
- LIB_OBJS += $(OUTPUT)ui/gtk/annotate.o
-endif
-
-ifndef NO_LIBPERL
- LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-perl.o
- LIB_OBJS += $(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o
-endif
-
-ifndef NO_LIBPYTHON
- LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o
- LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o
-endif
-
-ifeq ($(NO_PERF_REGS),0)
- ifeq ($(ARCH),x86)
- LIB_H += arch/x86/include/perf_regs.h
- endif
-endif
-
-ifndef NO_LIBNUMA
- BUILTIN_OBJS += $(OUTPUT)bench/numa.o
-endif
-
-ifdef ASCIIDOC8
- export ASCIIDOC8
-endif
-
-LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive -Wl,--start-group $(EXTLIBS) -Wl,--end-group
-
-export INSTALL SHELL_PATH
-
-### Build rules
-
-SHELL = $(SHELL_PATH)
-
-all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
-
-please_set_SHELL_PATH_to_a_more_modern_shell:
- @$$(:)
-
-shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell
-
-strip: $(PROGRAMS) $(OUTPUT)perf
- $(STRIP) $(STRIP_OPTS) $(PROGRAMS) $(OUTPUT)perf
-
-$(OUTPUT)perf.o: perf.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -include $(OUTPUT)PERF-VERSION-FILE \
- '-DPERF_HTML_PATH="$(htmldir_SQ)"' \
- $(CFLAGS) -c $(filter %.c,$^) -o $@
-
-$(OUTPUT)perf: $(OUTPUT)perf.o $(BUILTIN_OBJS) $(PERFLIBS)
- $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(OUTPUT)perf.o \
- $(BUILTIN_OBJS) $(LIBS) -o $@
-
-$(OUTPUT)builtin-help.o: builtin-help.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
- '-DPERF_HTML_PATH="$(htmldir_SQ)"' \
- '-DPERF_MAN_PATH="$(mandir_SQ)"' \
- '-DPERF_INFO_PATH="$(infodir_SQ)"' $<
-
-$(OUTPUT)builtin-timechart.o: builtin-timechart.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
- '-DPERF_HTML_PATH="$(htmldir_SQ)"' \
- '-DPERF_MAN_PATH="$(mandir_SQ)"' \
- '-DPERF_INFO_PATH="$(infodir_SQ)"' $<
-
-$(OUTPUT)common-cmds.h: util/generate-cmdlist.sh command-list.txt
-
-$(OUTPUT)common-cmds.h: $(wildcard Documentation/perf-*.txt)
- $(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@
-
-$(SCRIPTS) : % : %.sh
- $(QUIET_GEN)$(INSTALL) '$@.sh' '$(OUTPUT)$@'
-
-# These can record PERF_VERSION
-$(OUTPUT)perf.o perf.spec \
- $(SCRIPTS) \
- : $(OUTPUT)PERF-VERSION-FILE
-
-.SUFFIXES:
-.SUFFIXES: .o .c .S .s
-
-# These two need to be here so that when O= is not used they take precedence
-# over the general rule for .o
-
-$(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(CFLAGS) -w $<
-
-$(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w $<
-
-$(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $<
-$(OUTPUT)%.i: %.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -E $(CFLAGS) $<
-$(OUTPUT)%.s: %.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -S $(CFLAGS) $<
-$(OUTPUT)%.o: %.S
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $<
-$(OUTPUT)%.s: %.S
- $(QUIET_CC)$(CC) -o $@ -E $(CFLAGS) $<
-
-$(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
- '-DPERF_EXEC_PATH="$(perfexecdir_SQ)"' \
- '-DPREFIX="$(prefix_SQ)"' \
- $<
-
-$(OUTPUT)tests/attr.o: tests/attr.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
- '-DBINDIR="$(bindir_SQ)"' -DPYTHON='"$(PYTHON_WORD)"' \
- $<
-
-$(OUTPUT)tests/python-use.o: tests/python-use.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
- -DPYTHONPATH='"$(OUTPUT)python"' \
- -DPYTHON='"$(PYTHON_WORD)"' \
- $<
-
-$(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
-
-$(OUTPUT)ui/browser.o: ui/browser.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $<
-
-$(OUTPUT)ui/browsers/annotate.o: ui/browsers/annotate.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $<
+clean:
+ $(make)
-$(OUTPUT)ui/browsers/hists.o: ui/browsers/hists.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $<
-
-$(OUTPUT)ui/browsers/map.o: ui/browsers/map.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $<
-
-$(OUTPUT)ui/browsers/scripts.o: ui/browsers/scripts.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $<
-
-$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
-
-$(OUTPUT)util/parse-events.o: util/parse-events.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-redundant-decls $<
-
-$(OUTPUT)util/scripting-engines/trace-event-perl.o: util/scripting-engines/trace-event-perl.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-undef -Wno-switch-default $<
-
-$(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs -Wno-undef -Wno-switch-default $<
-
-$(OUTPUT)util/scripting-engines/trace-event-python.o: util/scripting-engines/trace-event-python.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
-
-$(OUTPUT)scripts/python/Perf-Trace-Util/Context.o: scripts/python/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
-
-$(OUTPUT)perf-%: %.o $(PERFLIBS)
- $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $(LDFLAGS) $(filter %.o,$^) $(LIBS)
-
-$(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H)
-$(patsubst perf-%,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h)
-
-# we compile into subdirectories. if the target directory is not the source directory, they might not exists. So
-# we depend the various files onto their directories.
-DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h
-$(DIRECTORY_DEPS): | $(sort $(dir $(DIRECTORY_DEPS)))
-# In the second step, we make a rule to actually create these directories
-$(sort $(dir $(DIRECTORY_DEPS))):
- $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null
-
-$(LIB_FILE): $(LIB_OBJS)
- $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS)
-
-# libtraceevent.a
-$(LIBTRACEEVENT):
- $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) libtraceevent.a
-
-$(LIBTRACEEVENT)-clean:
- $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) clean
-
-# if subdir is set, we've been called from above so target has been built
-# already
-$(LIBLK):
-ifeq ($(subdir),)
- $(QUIET_SUBDIR0)$(LK_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) liblk.a
-endif
-
-$(LIBLK)-clean:
-ifeq ($(subdir),)
- $(QUIET_SUBDIR0)$(LK_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) clean
-endif
-
-help:
- @echo 'Perf make targets:'
- @echo ' doc - make *all* documentation (see below)'
- @echo ' man - make manpage documentation (access with man <foo>)'
- @echo ' html - make html documentation'
- @echo ' info - make GNU info documentation (access with info <foo>)'
- @echo ' pdf - make pdf documentation'
- @echo ' TAGS - use etags to make tag information for source browsing'
- @echo ' tags - use ctags to make tag information for source browsing'
- @echo ' cscope - use cscope to make interactive browsing database'
- @echo ''
- @echo 'Perf install targets:'
- @echo ' NOTE: documentation build requires asciidoc, xmlto packages to be installed'
- @echo ' HINT: use "make prefix=<path> <install target>" to install to a particular'
- @echo ' path like make prefix=/usr/local install install-doc'
- @echo ' install - install compiled binaries'
- @echo ' install-doc - install *all* documentation'
- @echo ' install-man - install manpage documentation'
- @echo ' install-html - install html documentation'
- @echo ' install-info - install GNU info documentation'
- @echo ' install-pdf - install pdf documentation'
- @echo ''
- @echo ' quick-install-doc - alias for quick-install-man'
- @echo ' quick-install-man - install the documentation quickly'
- @echo ' quick-install-html - install the html documentation quickly'
- @echo ''
- @echo 'Perf maintainer targets:'
- @echo ' clean - clean all binary objects and build output'
-
-
-DOC_TARGETS := doc man html info pdf
-
-INSTALL_DOC_TARGETS := $(patsubst %,install-%,$(DOC_TARGETS)) try-install-man
-INSTALL_DOC_TARGETS += quick-install-doc quick-install-man quick-install-html
-
-# 'make doc' should call 'make -C Documentation all'
-$(DOC_TARGETS):
- $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) $(@:doc=all)
-
-TAGS:
- $(RM) TAGS
- $(FIND) . -name '*.[hcS]' -print | xargs etags -a
-
-tags:
- $(RM) tags
- $(FIND) . -name '*.[hcS]' -print | xargs ctags -a
-
-cscope:
- $(RM) cscope*
- $(FIND) . -name '*.[hcS]' -print | xargs cscope -b
-
-### Detect prefix changes
-TRACK_CFLAGS = $(subst ','\'',$(CFLAGS)):\
- $(bindir_SQ):$(perfexecdir_SQ):$(template_dir_SQ):$(prefix_SQ)
-
-$(OUTPUT)PERF-CFLAGS: .FORCE-PERF-CFLAGS
- @FLAGS='$(TRACK_CFLAGS)'; \
- if test x"$$FLAGS" != x"`cat $(OUTPUT)PERF-CFLAGS 2>/dev/null`" ; then \
- echo 1>&2 " * new build flags or prefix"; \
- echo "$$FLAGS" >$(OUTPUT)PERF-CFLAGS; \
- fi
-
-### Testing rules
-
-# GNU make supports exporting all variables by "export" without parameters.
-# However, the environment gets quite big, and some programs have problems
-# with that.
-
-check: $(OUTPUT)common-cmds.h
- if sparse; \
- then \
- for i in *.c */*.c; \
- do \
- sparse $(CFLAGS) $(SPARSE_FLAGS) $$i || exit; \
- done; \
- else \
- exit 1; \
- fi
-
-### Installation rules
-
-install-bin: all
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'
- $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
- $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
-ifndef NO_LIBPERL
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
- $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
- $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl'
- $(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
-endif
-ifndef NO_LIBPYTHON
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
- $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'
- $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'
- $(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
-endif
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d'
- $(INSTALL) bash_completion '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf'
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'
- $(INSTALL) tests/attr.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
- $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
-
-install: install-bin try-install-man
-
-install-python_ext:
- $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)'
-
-# 'make install-doc' should call 'make -C Documentation install'
-$(INSTALL_DOC_TARGETS):
- $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) $(@:-doc=)
-
-### Cleaning rules
-
-clean: $(LIBTRACEEVENT)-clean $(LIBLK)-clean
- $(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf.o $(LANG_BINDINGS)
- $(RM) $(ALL_PROGRAMS) perf
- $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope*
- $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
- $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS
- $(RM) $(OUTPUT)util/*-bison*
- $(RM) $(OUTPUT)util/*-flex*
- $(python-clean)
-
-.PHONY: all install clean strip $(LIBTRACEEVENT) $(LIBLK)
-.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
-.PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope .FORCE-PERF-CFLAGS
+#
+# All other targets get passed through:
+#
+%:
+ $(print_msg)
+ $(make)
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
new file mode 100644
index 000000000000..8a9ca3836043
--- /dev/null
+++ b/tools/perf/Makefile.perf
@@ -0,0 +1,889 @@
+include ../scripts/Makefile.include
+
+# The default target of this Makefile is...
+all:
+
+include config/utilities.mak
+
+# Define V to have a more verbose compile.
+#
+# Define O to save output files in a separate directory.
+#
+# Define ARCH as name of target architecture if you want cross-builds.
+#
+# Define CROSS_COMPILE as prefix name of compiler if you want cross-builds.
+#
+# Define NO_LIBPERL to disable perl script extension.
+#
+# Define NO_LIBPYTHON to disable python script extension.
+#
+# Define PYTHON to point to the python binary if the default
+# `python' is not correct; for example: PYTHON=python2
+#
+# Define PYTHON_CONFIG to point to the python-config binary if
+# the default `$(PYTHON)-config' is not correct.
+#
+# Define ASCIIDOC8 if you want to format documentation with AsciiDoc 8
+#
+# Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72.
+#
+# Define LDFLAGS=-static to build a static binary.
+#
+# Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds.
+#
+# Define NO_DWARF if you do not want debug-info analysis feature at all.
+#
+# Define WERROR=0 to disable treating any warnings as errors.
+#
+# Define NO_NEWT if you do not want TUI support. (deprecated)
+#
+# Define NO_SLANG if you do not want TUI support.
+#
+# Define NO_GTK2 if you do not want GTK+ GUI support.
+#
+# Define NO_DEMANGLE if you do not want C++ symbol demangling.
+#
+# Define NO_LIBELF if you do not want libelf dependency (e.g. cross-builds)
+#
+# Define NO_LIBUNWIND if you do not want libunwind dependency for dwarf
+# backtrace post unwind.
+#
+# Define NO_BACKTRACE if you do not want stack backtrace debug feature
+#
+# Define NO_LIBNUMA if you do not want numa perf benchmark
+#
+# Define NO_LIBAUDIT if you do not want libaudit support
+#
+# Define NO_LIBBIONIC if you do not want bionic support
+
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+#$(info Determined 'srctree' to be $(srctree))
+endif
+
+ifneq ($(objtree),)
+#$(info Determined 'objtree' to be $(objtree))
+endif
+
+ifneq ($(OUTPUT),)
+#$(info Determined 'OUTPUT' to be $(OUTPUT))
+endif
+
+$(OUTPUT)PERF-VERSION-FILE: ../../.git/HEAD
+ @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
+ @touch $(OUTPUT)PERF-VERSION-FILE
+
+CC = $(CROSS_COMPILE)gcc
+AR = $(CROSS_COMPILE)ar
+
+RM = rm -f
+LN = ln -f
+MKDIR = mkdir
+FIND = find
+INSTALL = install
+FLEX = flex
+BISON = bison
+STRIP = strip
+
+LK_DIR = $(srctree)/tools/lib/lk/
+TRACE_EVENT_DIR = $(srctree)/tools/lib/traceevent/
+
+# include config/Makefile by default and rule out
+# non-config cases
+config := 1
+
+NON_CONFIG_TARGETS := clean TAGS tags cscope help
+
+ifdef MAKECMDGOALS
+ifeq ($(filter-out $(NON_CONFIG_TARGETS),$(MAKECMDGOALS)),)
+ config := 0
+endif
+endif
+
+ifeq ($(config),1)
+include config/Makefile
+endif
+
+export prefix bindir sharedir sysconfdir
+
+# sparse is architecture-neutral, which means that we need to tell it
+# explicitly what architecture to check for. Fix this up for yours..
+SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__
+
+# Guard against environment variables
+BUILTIN_OBJS =
+LIB_H =
+LIB_OBJS =
+GTK_OBJS =
+PYRF_OBJS =
+SCRIPT_SH =
+
+SCRIPT_SH += perf-archive.sh
+
+grep-libs = $(filter -l%,$(1))
+strip-libs = $(filter-out -l%,$(1))
+
+ifneq ($(OUTPUT),)
+ TE_PATH=$(OUTPUT)
+ifneq ($(subdir),)
+ LK_PATH=$(OUTPUT)/../lib/lk/
+else
+ LK_PATH=$(OUTPUT)
+endif
+else
+ TE_PATH=$(TRACE_EVENT_DIR)
+ LK_PATH=$(LK_DIR)
+endif
+
+LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
+export LIBTRACEEVENT
+
+LIBLK = $(LK_PATH)liblk.a
+export LIBLK
+
+# python extension build directories
+PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/
+PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/
+PYTHON_EXTBUILD_TMP := $(PYTHON_EXTBUILD)tmp/
+export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP
+
+python-clean := $(call QUIET_CLEAN, python) $(RM) -r $(PYTHON_EXTBUILD) $(OUTPUT)python/perf.so
+
+PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources)
+PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py $(LIBTRACEEVENT) $(LIBLK)
+
+$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS)
+ $(QUIET_GEN)CFLAGS='$(CFLAGS)' $(PYTHON_WORD) util/setup.py \
+ --quiet build_ext; \
+ mkdir -p $(OUTPUT)python && \
+ cp $(PYTHON_EXTBUILD_LIB)perf.so $(OUTPUT)python/
+#
+# No Perl scripts right now:
+#
+
+SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH))
+
+#
+# Single 'perf' binary right now:
+#
+PROGRAMS += $(OUTPUT)perf
+
+# what 'all' will build and 'install' will install, in perfexecdir
+ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS)
+
+# what 'all' will build but not install in perfexecdir
+OTHER_PROGRAMS = $(OUTPUT)perf
+
+# Set paths to tools early so that they can be used for version tests.
+ifndef SHELL_PATH
+ SHELL_PATH = /bin/sh
+endif
+ifndef PERL_PATH
+ PERL_PATH = /usr/bin/perl
+endif
+
+export PERL_PATH
+
+$(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c
+ $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c
+
+$(OUTPUT)util/parse-events-bison.c: util/parse-events.y
+ $(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c -p parse_events_
+
+$(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c
+ $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c
+
+$(OUTPUT)util/pmu-bison.c: util/pmu.y
+ $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c -p perf_pmu_
+
+$(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c
+$(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c
+
+LIB_FILE=$(OUTPUT)libperf.a
+
+LIB_H += ../../include/uapi/linux/perf_event.h
+LIB_H += ../../include/linux/rbtree.h
+LIB_H += ../../include/linux/list.h
+LIB_H += ../../include/uapi/linux/const.h
+LIB_H += ../../include/linux/hash.h
+LIB_H += ../../include/linux/stringify.h
+LIB_H += util/include/linux/bitmap.h
+LIB_H += util/include/linux/bitops.h
+LIB_H += util/include/linux/compiler.h
+LIB_H += util/include/linux/const.h
+LIB_H += util/include/linux/ctype.h
+LIB_H += util/include/linux/kernel.h
+LIB_H += util/include/linux/list.h
+LIB_H += util/include/linux/export.h
+LIB_H += util/include/linux/magic.h
+LIB_H += util/include/linux/poison.h
+LIB_H += util/include/linux/prefetch.h
+LIB_H += util/include/linux/rbtree.h
+LIB_H += util/include/linux/rbtree_augmented.h
+LIB_H += util/include/linux/string.h
+LIB_H += util/include/linux/types.h
+LIB_H += util/include/linux/linkage.h
+LIB_H += util/include/asm/asm-offsets.h
+LIB_H += util/include/asm/bug.h
+LIB_H += util/include/asm/byteorder.h
+LIB_H += util/include/asm/hweight.h
+LIB_H += util/include/asm/swab.h
+LIB_H += util/include/asm/system.h
+LIB_H += util/include/asm/uaccess.h
+LIB_H += util/include/dwarf-regs.h
+LIB_H += util/include/asm/dwarf2.h
+LIB_H += util/include/asm/cpufeature.h
+LIB_H += util/include/asm/unistd_32.h
+LIB_H += util/include/asm/unistd_64.h
+LIB_H += perf.h
+LIB_H += util/annotate.h
+LIB_H += util/cache.h
+LIB_H += util/callchain.h
+LIB_H += util/build-id.h
+LIB_H += util/debug.h
+LIB_H += util/sysfs.h
+LIB_H += util/pmu.h
+LIB_H += util/event.h
+LIB_H += util/evsel.h
+LIB_H += util/evlist.h
+LIB_H += util/exec_cmd.h
+LIB_H += util/types.h
+LIB_H += util/levenshtein.h
+LIB_H += util/machine.h
+LIB_H += util/map.h
+LIB_H += util/parse-options.h
+LIB_H += util/parse-events.h
+LIB_H += util/quote.h
+LIB_H += util/util.h
+LIB_H += util/xyarray.h
+LIB_H += util/header.h
+LIB_H += util/help.h
+LIB_H += util/session.h
+LIB_H += util/strbuf.h
+LIB_H += util/strlist.h
+LIB_H += util/strfilter.h
+LIB_H += util/svghelper.h
+LIB_H += util/tool.h
+LIB_H += util/run-command.h
+LIB_H += util/sigchain.h
+LIB_H += util/dso.h
+LIB_H += util/symbol.h
+LIB_H += util/color.h
+LIB_H += util/values.h
+LIB_H += util/sort.h
+LIB_H += util/hist.h
+LIB_H += util/thread.h
+LIB_H += util/thread_map.h
+LIB_H += util/trace-event.h
+LIB_H += util/probe-finder.h
+LIB_H += util/dwarf-aux.h
+LIB_H += util/probe-event.h
+LIB_H += util/pstack.h
+LIB_H += util/cpumap.h
+LIB_H += util/top.h
+LIB_H += $(ARCH_INCLUDE)
+LIB_H += util/cgroup.h
+LIB_H += $(LIB_INCLUDE)traceevent/event-parse.h
+LIB_H += util/target.h
+LIB_H += util/rblist.h
+LIB_H += util/intlist.h
+LIB_H += util/perf_regs.h
+LIB_H += util/unwind.h
+LIB_H += util/vdso.h
+LIB_H += ui/helpline.h
+LIB_H += ui/progress.h
+LIB_H += ui/util.h
+LIB_H += ui/ui.h
+
+LIB_OBJS += $(OUTPUT)util/abspath.o
+LIB_OBJS += $(OUTPUT)util/alias.o
+LIB_OBJS += $(OUTPUT)util/annotate.o
+LIB_OBJS += $(OUTPUT)util/build-id.o
+LIB_OBJS += $(OUTPUT)util/config.o
+LIB_OBJS += $(OUTPUT)util/ctype.o
+LIB_OBJS += $(OUTPUT)util/sysfs.o
+LIB_OBJS += $(OUTPUT)util/pmu.o
+LIB_OBJS += $(OUTPUT)util/environment.o
+LIB_OBJS += $(OUTPUT)util/event.o
+LIB_OBJS += $(OUTPUT)util/evlist.o
+LIB_OBJS += $(OUTPUT)util/evsel.o
+LIB_OBJS += $(OUTPUT)util/exec_cmd.o
+LIB_OBJS += $(OUTPUT)util/help.o
+LIB_OBJS += $(OUTPUT)util/levenshtein.o
+LIB_OBJS += $(OUTPUT)util/parse-options.o
+LIB_OBJS += $(OUTPUT)util/parse-events.o
+LIB_OBJS += $(OUTPUT)util/path.o
+LIB_OBJS += $(OUTPUT)util/rbtree.o
+LIB_OBJS += $(OUTPUT)util/bitmap.o
+LIB_OBJS += $(OUTPUT)util/hweight.o
+LIB_OBJS += $(OUTPUT)util/run-command.o
+LIB_OBJS += $(OUTPUT)util/quote.o
+LIB_OBJS += $(OUTPUT)util/strbuf.o
+LIB_OBJS += $(OUTPUT)util/string.o
+LIB_OBJS += $(OUTPUT)util/strlist.o
+LIB_OBJS += $(OUTPUT)util/strfilter.o
+LIB_OBJS += $(OUTPUT)util/top.o
+LIB_OBJS += $(OUTPUT)util/usage.o
+LIB_OBJS += $(OUTPUT)util/wrapper.o
+LIB_OBJS += $(OUTPUT)util/sigchain.o
+LIB_OBJS += $(OUTPUT)util/dso.o
+LIB_OBJS += $(OUTPUT)util/symbol.o
+LIB_OBJS += $(OUTPUT)util/symbol-elf.o
+LIB_OBJS += $(OUTPUT)util/color.o
+LIB_OBJS += $(OUTPUT)util/pager.o
+LIB_OBJS += $(OUTPUT)util/header.o
+LIB_OBJS += $(OUTPUT)util/callchain.o
+LIB_OBJS += $(OUTPUT)util/values.o
+LIB_OBJS += $(OUTPUT)util/debug.o
+LIB_OBJS += $(OUTPUT)util/machine.o
+LIB_OBJS += $(OUTPUT)util/map.o
+LIB_OBJS += $(OUTPUT)util/pstack.o
+LIB_OBJS += $(OUTPUT)util/session.o
+LIB_OBJS += $(OUTPUT)util/thread.o
+LIB_OBJS += $(OUTPUT)util/thread_map.o
+LIB_OBJS += $(OUTPUT)util/trace-event-parse.o
+LIB_OBJS += $(OUTPUT)util/parse-events-flex.o
+LIB_OBJS += $(OUTPUT)util/parse-events-bison.o
+LIB_OBJS += $(OUTPUT)util/pmu-flex.o
+LIB_OBJS += $(OUTPUT)util/pmu-bison.o
+LIB_OBJS += $(OUTPUT)util/trace-event-read.o
+LIB_OBJS += $(OUTPUT)util/trace-event-info.o
+LIB_OBJS += $(OUTPUT)util/trace-event-scripting.o
+LIB_OBJS += $(OUTPUT)util/svghelper.o
+LIB_OBJS += $(OUTPUT)util/sort.o
+LIB_OBJS += $(OUTPUT)util/hist.o
+LIB_OBJS += $(OUTPUT)util/probe-event.o
+LIB_OBJS += $(OUTPUT)util/util.o
+LIB_OBJS += $(OUTPUT)util/xyarray.o
+LIB_OBJS += $(OUTPUT)util/cpumap.o
+LIB_OBJS += $(OUTPUT)util/cgroup.o
+LIB_OBJS += $(OUTPUT)util/target.o
+LIB_OBJS += $(OUTPUT)util/rblist.o
+LIB_OBJS += $(OUTPUT)util/intlist.o
+LIB_OBJS += $(OUTPUT)util/vdso.o
+LIB_OBJS += $(OUTPUT)util/stat.o
+LIB_OBJS += $(OUTPUT)util/record.o
+LIB_OBJS += $(OUTPUT)util/srcline.o
+LIB_OBJS += $(OUTPUT)util/data.o
+
+LIB_OBJS += $(OUTPUT)ui/setup.o
+LIB_OBJS += $(OUTPUT)ui/helpline.o
+LIB_OBJS += $(OUTPUT)ui/progress.o
+LIB_OBJS += $(OUTPUT)ui/util.o
+LIB_OBJS += $(OUTPUT)ui/hist.o
+LIB_OBJS += $(OUTPUT)ui/stdio/hist.o
+
+LIB_OBJS += $(OUTPUT)arch/common.o
+
+LIB_OBJS += $(OUTPUT)tests/parse-events.o
+LIB_OBJS += $(OUTPUT)tests/dso-data.o
+LIB_OBJS += $(OUTPUT)tests/attr.o
+LIB_OBJS += $(OUTPUT)tests/vmlinux-kallsyms.o
+LIB_OBJS += $(OUTPUT)tests/open-syscall.o
+LIB_OBJS += $(OUTPUT)tests/open-syscall-all-cpus.o
+LIB_OBJS += $(OUTPUT)tests/open-syscall-tp-fields.o
+LIB_OBJS += $(OUTPUT)tests/mmap-basic.o
+LIB_OBJS += $(OUTPUT)tests/perf-record.o
+LIB_OBJS += $(OUTPUT)tests/rdpmc.o
+LIB_OBJS += $(OUTPUT)tests/evsel-roundtrip-name.o
+LIB_OBJS += $(OUTPUT)tests/evsel-tp-sched.o
+LIB_OBJS += $(OUTPUT)tests/pmu.o
+LIB_OBJS += $(OUTPUT)tests/hists_link.o
+LIB_OBJS += $(OUTPUT)tests/python-use.o
+LIB_OBJS += $(OUTPUT)tests/bp_signal.o
+LIB_OBJS += $(OUTPUT)tests/bp_signal_overflow.o
+LIB_OBJS += $(OUTPUT)tests/task-exit.o
+LIB_OBJS += $(OUTPUT)tests/sw-clock.o
+ifeq ($(ARCH),x86)
+LIB_OBJS += $(OUTPUT)tests/perf-time-to-tsc.o
+endif
+LIB_OBJS += $(OUTPUT)tests/code-reading.o
+LIB_OBJS += $(OUTPUT)tests/sample-parsing.o
+LIB_OBJS += $(OUTPUT)tests/parse-no-sample-id-all.o
+
+BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
+BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
+# Benchmark modules
+BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o
+BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o
+ifeq ($(RAW_ARCH),x86_64)
+BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy-x86-64-asm.o
+BUILTIN_OBJS += $(OUTPUT)bench/mem-memset-x86-64-asm.o
+endif
+BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o
+BUILTIN_OBJS += $(OUTPUT)bench/mem-memset.o
+
+BUILTIN_OBJS += $(OUTPUT)builtin-diff.o
+BUILTIN_OBJS += $(OUTPUT)builtin-evlist.o
+BUILTIN_OBJS += $(OUTPUT)builtin-help.o
+BUILTIN_OBJS += $(OUTPUT)builtin-sched.o
+BUILTIN_OBJS += $(OUTPUT)builtin-buildid-list.o
+BUILTIN_OBJS += $(OUTPUT)builtin-buildid-cache.o
+BUILTIN_OBJS += $(OUTPUT)builtin-list.o
+BUILTIN_OBJS += $(OUTPUT)builtin-record.o
+BUILTIN_OBJS += $(OUTPUT)builtin-report.o
+BUILTIN_OBJS += $(OUTPUT)builtin-stat.o
+BUILTIN_OBJS += $(OUTPUT)builtin-timechart.o
+BUILTIN_OBJS += $(OUTPUT)builtin-top.o
+BUILTIN_OBJS += $(OUTPUT)builtin-script.o
+BUILTIN_OBJS += $(OUTPUT)builtin-probe.o
+BUILTIN_OBJS += $(OUTPUT)builtin-kmem.o
+BUILTIN_OBJS += $(OUTPUT)builtin-lock.o
+BUILTIN_OBJS += $(OUTPUT)builtin-kvm.o
+BUILTIN_OBJS += $(OUTPUT)builtin-inject.o
+BUILTIN_OBJS += $(OUTPUT)tests/builtin-test.o
+BUILTIN_OBJS += $(OUTPUT)builtin-mem.o
+
+PERFLIBS = $(LIB_FILE) $(LIBLK) $(LIBTRACEEVENT)
+
+# We choose to avoid "if .. else if .. else .. endif endif"
+# because maintaining the nesting to match is a pain. If
+# we had "elif" things would have been much nicer...
+
+-include arch/$(ARCH)/Makefile
+
+ifneq ($(OUTPUT),)
+ CFLAGS += -I$(OUTPUT)
+endif
+
+ifdef NO_LIBELF
+EXTLIBS := $(filter-out -lelf,$(EXTLIBS))
+
+# Remove ELF/DWARF dependent codes
+LIB_OBJS := $(filter-out $(OUTPUT)util/symbol-elf.o,$(LIB_OBJS))
+LIB_OBJS := $(filter-out $(OUTPUT)util/dwarf-aux.o,$(LIB_OBJS))
+LIB_OBJS := $(filter-out $(OUTPUT)util/probe-event.o,$(LIB_OBJS))
+LIB_OBJS := $(filter-out $(OUTPUT)util/probe-finder.o,$(LIB_OBJS))
+
+BUILTIN_OBJS := $(filter-out $(OUTPUT)builtin-probe.o,$(BUILTIN_OBJS))
+
+# Use minimal symbol handling
+LIB_OBJS += $(OUTPUT)util/symbol-minimal.o
+
+else # NO_LIBELF
+ifndef NO_DWARF
+ LIB_OBJS += $(OUTPUT)util/probe-finder.o
+ LIB_OBJS += $(OUTPUT)util/dwarf-aux.o
+endif # NO_DWARF
+endif # NO_LIBELF
+
+ifndef NO_LIBUNWIND
+ LIB_OBJS += $(OUTPUT)util/unwind.o
+endif
+LIB_OBJS += $(OUTPUT)tests/keep-tracking.o
+
+ifndef NO_LIBAUDIT
+ BUILTIN_OBJS += $(OUTPUT)builtin-trace.o
+endif
+
+ifndef NO_SLANG
+ LIB_OBJS += $(OUTPUT)ui/browser.o
+ LIB_OBJS += $(OUTPUT)ui/browsers/annotate.o
+ LIB_OBJS += $(OUTPUT)ui/browsers/hists.o
+ LIB_OBJS += $(OUTPUT)ui/browsers/map.o
+ LIB_OBJS += $(OUTPUT)ui/browsers/scripts.o
+ LIB_OBJS += $(OUTPUT)ui/tui/setup.o
+ LIB_OBJS += $(OUTPUT)ui/tui/util.o
+ LIB_OBJS += $(OUTPUT)ui/tui/helpline.o
+ LIB_OBJS += $(OUTPUT)ui/tui/progress.o
+ LIB_H += ui/tui/tui.h
+ LIB_H += ui/browser.h
+ LIB_H += ui/browsers/map.h
+ LIB_H += ui/keysyms.h
+ LIB_H += ui/libslang.h
+endif
+
+ifndef NO_GTK2
+ ALL_PROGRAMS += $(OUTPUT)libperf-gtk.so
+
+ GTK_OBJS += $(OUTPUT)ui/gtk/browser.o
+ GTK_OBJS += $(OUTPUT)ui/gtk/hists.o
+ GTK_OBJS += $(OUTPUT)ui/gtk/setup.o
+ GTK_OBJS += $(OUTPUT)ui/gtk/util.o
+ GTK_OBJS += $(OUTPUT)ui/gtk/helpline.o
+ GTK_OBJS += $(OUTPUT)ui/gtk/progress.o
+ GTK_OBJS += $(OUTPUT)ui/gtk/annotate.o
+
+install-gtk: $(OUTPUT)libperf-gtk.so
+ $(call QUIET_INSTALL, 'GTK UI') \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(libdir_SQ)'; \
+ $(INSTALL) $(OUTPUT)libperf-gtk.so '$(DESTDIR_SQ)$(libdir_SQ)'
+endif
+
+ifndef NO_LIBPERL
+ LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-perl.o
+ LIB_OBJS += $(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o
+endif
+
+ifndef NO_LIBPYTHON
+ LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o
+ LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o
+endif
+
+ifeq ($(NO_PERF_REGS),0)
+ ifeq ($(ARCH),x86)
+ LIB_H += arch/x86/include/perf_regs.h
+ endif
+endif
+
+ifndef NO_LIBNUMA
+ BUILTIN_OBJS += $(OUTPUT)bench/numa.o
+endif
+
+ifdef ASCIIDOC8
+ export ASCIIDOC8
+endif
+
+LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive -Wl,--start-group $(EXTLIBS) -Wl,--end-group
+
+export INSTALL SHELL_PATH
+
+### Build rules
+
+SHELL = $(SHELL_PATH)
+
+all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
+
+please_set_SHELL_PATH_to_a_more_modern_shell:
+ @$$(:)
+
+shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell
+
+strip: $(PROGRAMS) $(OUTPUT)perf
+ $(STRIP) $(STRIP_OPTS) $(PROGRAMS) $(OUTPUT)perf
+
+$(OUTPUT)perf.o: perf.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -include $(OUTPUT)PERF-VERSION-FILE \
+ '-DPERF_HTML_PATH="$(htmldir_SQ)"' \
+ $(CFLAGS) -c $(filter %.c,$^) -o $@
+
+$(OUTPUT)perf: $(OUTPUT)perf.o $(BUILTIN_OBJS) $(PERFLIBS)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(OUTPUT)perf.o \
+ $(BUILTIN_OBJS) $(LIBS) -o $@
+
+$(GTK_OBJS): $(OUTPUT)%.o: %.c $(LIB_H)
+ $(QUIET_CC)$(CC) -o $@ -c -fPIC $(CFLAGS) $(GTK_CFLAGS) $<
+
+$(OUTPUT)libperf-gtk.so: $(GTK_OBJS) $(PERFLIBS)
+ $(QUIET_LINK)$(CC) -o $@ -shared $(ALL_LDFLAGS) $(filter %.o,$^) $(GTK_LIBS)
+
+$(OUTPUT)builtin-help.o: builtin-help.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
+ '-DPERF_HTML_PATH="$(htmldir_SQ)"' \
+ '-DPERF_MAN_PATH="$(mandir_SQ)"' \
+ '-DPERF_INFO_PATH="$(infodir_SQ)"' $<
+
+$(OUTPUT)builtin-timechart.o: builtin-timechart.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
+ '-DPERF_HTML_PATH="$(htmldir_SQ)"' \
+ '-DPERF_MAN_PATH="$(mandir_SQ)"' \
+ '-DPERF_INFO_PATH="$(infodir_SQ)"' $<
+
+$(OUTPUT)common-cmds.h: util/generate-cmdlist.sh command-list.txt
+
+$(OUTPUT)common-cmds.h: $(wildcard Documentation/perf-*.txt)
+ $(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@
+
+$(SCRIPTS) : % : %.sh
+ $(QUIET_GEN)$(INSTALL) '$@.sh' '$(OUTPUT)$@'
+
+# These can record PERF_VERSION
+$(OUTPUT)perf.o perf.spec \
+ $(SCRIPTS) \
+ : $(OUTPUT)PERF-VERSION-FILE
+
+.SUFFIXES:
+
+#
+# If a target does not match any of the later rules then prefix it by $(OUTPUT)
+# This makes targets like 'make O=/tmp/perf perf.o' work in a natural way.
+#
+ifneq ($(OUTPUT),)
+%.o: $(OUTPUT)%.o
+ @echo " # Redirected target $@ => $(OUTPUT)$@"
+util/%.o: $(OUTPUT)util/%.o
+ @echo " # Redirected target $@ => $(OUTPUT)$@"
+bench/%.o: $(OUTPUT)bench/%.o
+ @echo " # Redirected target $@ => $(OUTPUT)$@"
+tests/%.o: $(OUTPUT)tests/%.o
+ @echo " # Redirected target $@ => $(OUTPUT)$@"
+endif
+
+# These two need to be here so that when O= is not used they take precedence
+# over the general rule for .o
+
+$(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(CFLAGS) -w $<
+
+$(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w $<
+
+$(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $<
+$(OUTPUT)%.i: %.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -E $(CFLAGS) $<
+$(OUTPUT)%.s: %.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -S $(CFLAGS) $<
+$(OUTPUT)%.o: %.S
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $<
+$(OUTPUT)%.s: %.S
+ $(QUIET_CC)$(CC) -o $@ -E $(CFLAGS) $<
+
+$(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
+ '-DPERF_EXEC_PATH="$(perfexecdir_SQ)"' \
+ '-DPREFIX="$(prefix_SQ)"' \
+ $<
+
+$(OUTPUT)tests/attr.o: tests/attr.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
+ '-DBINDIR="$(bindir_SQ)"' -DPYTHON='"$(PYTHON_WORD)"' \
+ $<
+
+$(OUTPUT)tests/python-use.o: tests/python-use.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
+ -DPYTHONPATH='"$(OUTPUT)python"' \
+ -DPYTHON='"$(PYTHON_WORD)"' \
+ $<
+
+$(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+
+$(OUTPUT)ui/setup.o: ui/setup.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DLIBDIR='"$(libdir_SQ)"' $<
+
+$(OUTPUT)ui/browser.o: ui/browser.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $<
+
+$(OUTPUT)ui/browsers/annotate.o: ui/browsers/annotate.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $<
+
+$(OUTPUT)ui/browsers/hists.o: ui/browsers/hists.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $<
+
+$(OUTPUT)ui/browsers/map.o: ui/browsers/map.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $<
+
+$(OUTPUT)ui/browsers/scripts.o: ui/browsers/scripts.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $<
+
+$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+
+$(OUTPUT)util/parse-events.o: util/parse-events.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-redundant-decls $<
+
+$(OUTPUT)util/scripting-engines/trace-event-perl.o: util/scripting-engines/trace-event-perl.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-undef -Wno-switch-default $<
+
+$(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs -Wno-undef -Wno-switch-default $<
+
+$(OUTPUT)util/scripting-engines/trace-event-python.o: util/scripting-engines/trace-event-python.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
+
+$(OUTPUT)scripts/python/Perf-Trace-Util/Context.o: scripts/python/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
+
+$(OUTPUT)perf-%: %.o $(PERFLIBS)
+ $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $(LDFLAGS) $(filter %.o,$^) $(LIBS)
+
+$(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H)
+$(patsubst perf-%,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h)
+
+# we compile into subdirectories. if the target directory is not the source directory, they might not exists. So
+# we depend the various files onto their directories.
+DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(GTK_OBJS)
+DIRECTORY_DEPS += $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h
+$(DIRECTORY_DEPS): | $(sort $(dir $(DIRECTORY_DEPS)))
+# In the second step, we make a rule to actually create these directories
+$(sort $(dir $(DIRECTORY_DEPS))):
+ $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null
+
+$(LIB_FILE): $(LIB_OBJS)
+ $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS)
+
+# libtraceevent.a
+TE_SOURCES = $(wildcard $(TRACE_EVENT_DIR)*.[ch])
+
+$(LIBTRACEEVENT): $(TE_SOURCES)
+ $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) libtraceevent.a
+
+$(LIBTRACEEVENT)-clean:
+ $(call QUIET_CLEAN, libtraceevent)
+ @$(MAKE) -C $(TRACE_EVENT_DIR) O=$(OUTPUT) clean >/dev/null
+
+LIBLK_SOURCES = $(wildcard $(LK_PATH)*.[ch])
+
+# if subdir is set, we've been called from above so target has been built
+# already
+$(LIBLK): $(LIBLK_SOURCES)
+ifeq ($(subdir),)
+ $(QUIET_SUBDIR0)$(LK_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) liblk.a
+endif
+
+$(LIBLK)-clean:
+ifeq ($(subdir),)
+ $(call QUIET_CLEAN, liblk)
+ @$(MAKE) -C $(LK_DIR) O=$(OUTPUT) clean >/dev/null
+endif
+
+help:
+ @echo 'Perf make targets:'
+ @echo ' doc - make *all* documentation (see below)'
+ @echo ' man - make manpage documentation (access with man <foo>)'
+ @echo ' html - make html documentation'
+ @echo ' info - make GNU info documentation (access with info <foo>)'
+ @echo ' pdf - make pdf documentation'
+ @echo ' TAGS - use etags to make tag information for source browsing'
+ @echo ' tags - use ctags to make tag information for source browsing'
+ @echo ' cscope - use cscope to make interactive browsing database'
+ @echo ''
+ @echo 'Perf install targets:'
+ @echo ' NOTE: documentation build requires asciidoc, xmlto packages to be installed'
+ @echo ' HINT: use "make prefix=<path> <install target>" to install to a particular'
+ @echo ' path like make prefix=/usr/local install install-doc'
+ @echo ' install - install compiled binaries'
+ @echo ' install-doc - install *all* documentation'
+ @echo ' install-man - install manpage documentation'
+ @echo ' install-html - install html documentation'
+ @echo ' install-info - install GNU info documentation'
+ @echo ' install-pdf - install pdf documentation'
+ @echo ''
+ @echo ' quick-install-doc - alias for quick-install-man'
+ @echo ' quick-install-man - install the documentation quickly'
+ @echo ' quick-install-html - install the html documentation quickly'
+ @echo ''
+ @echo 'Perf maintainer targets:'
+ @echo ' clean - clean all binary objects and build output'
+
+
+DOC_TARGETS := doc man html info pdf
+
+INSTALL_DOC_TARGETS := $(patsubst %,install-%,$(DOC_TARGETS)) try-install-man
+INSTALL_DOC_TARGETS += quick-install-doc quick-install-man quick-install-html
+
+# 'make doc' should call 'make -C Documentation all'
+$(DOC_TARGETS):
+ $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) $(@:doc=all)
+
+TAGS:
+ $(RM) TAGS
+ $(FIND) . -name '*.[hcS]' -print | xargs etags -a
+
+tags:
+ $(RM) tags
+ $(FIND) . -name '*.[hcS]' -print | xargs ctags -a
+
+cscope:
+ $(RM) cscope*
+ $(FIND) . -name '*.[hcS]' -print | xargs cscope -b
+
+### Detect prefix changes
+TRACK_CFLAGS = $(subst ','\'',$(CFLAGS)):\
+ $(bindir_SQ):$(perfexecdir_SQ):$(template_dir_SQ):$(prefix_SQ)
+
+$(OUTPUT)PERF-CFLAGS: .FORCE-PERF-CFLAGS
+ @FLAGS='$(TRACK_CFLAGS)'; \
+ if test x"$$FLAGS" != x"`cat $(OUTPUT)PERF-CFLAGS 2>/dev/null`" ; then \
+ echo 1>&2 " FLAGS: * new build flags or prefix"; \
+ echo "$$FLAGS" >$(OUTPUT)PERF-CFLAGS; \
+ fi
+
+### Testing rules
+
+# GNU make supports exporting all variables by "export" without parameters.
+# However, the environment gets quite big, and some programs have problems
+# with that.
+
+check: $(OUTPUT)common-cmds.h
+ if sparse; \
+ then \
+ for i in *.c */*.c; \
+ do \
+ sparse $(CFLAGS) $(SPARSE_FLAGS) $$i || exit; \
+ done; \
+ else \
+ exit 1; \
+ fi
+
+### Installation rules
+
+install-gtk:
+
+install-bin: all install-gtk
+ $(call QUIET_INSTALL, binaries) \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'; \
+ $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'; \
+ $(LN) '$(DESTDIR_SQ)$(bindir_SQ)/perf' '$(DESTDIR_SQ)$(bindir_SQ)/trace'
+ $(call QUIET_INSTALL, libexec) \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
+ $(call QUIET_INSTALL, perf-archive) \
+ $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
+ifndef NO_LIBPERL
+ $(call QUIET_INSTALL, perl-scripts) \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'; \
+ $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'; \
+ $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl'; \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'; \
+ $(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
+endif
+ifndef NO_LIBPYTHON
+ $(call QUIET_INSTALL, python-scripts) \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'; \
+ $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \
+ $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'; \
+ $(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
+endif
+ $(call QUIET_INSTALL, bash_completion-script) \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d'; \
+ $(INSTALL) bash_completion '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf'
+ $(call QUIET_INSTALL, tests) \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
+ $(INSTALL) tests/attr.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
+ $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
+
+install: install-bin try-install-man
+
+install-python_ext:
+ $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)'
+
+# 'make install-doc' should call 'make -C Documentation install'
+$(INSTALL_DOC_TARGETS):
+ $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) $(@:-doc=)
+
+### Cleaning rules
+
+#
+# This is here, not in config/Makefile, because config/Makefile does
+# not get included for the clean target:
+#
+config-clean:
+ $(call QUIET_CLEAN, config)
+ @$(MAKE) -C config/feature-checks clean >/dev/null
+
+clean: $(LIBTRACEEVENT)-clean $(LIBLK)-clean config-clean
+ $(call QUIET_CLEAN, core-objs) $(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf.o $(LANG_BINDINGS) $(GTK_OBJS)
+ $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf
+ $(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex*
+ $(call QUIET_CLEAN, Documentation)
+ @$(MAKE) -C Documentation O=$(OUTPUT) clean >/dev/null
+ $(python-clean)
+
+#
+# Trick: if ../../.git does not exist - we are building out of tree for example,
+# then force version regeneration:
+#
+ifeq ($(wildcard ../../.git/HEAD),)
+ GIT-HEAD-PHONY = ../../.git/HEAD
+else
+ GIT-HEAD-PHONY =
+endif
+
+.PHONY: all install clean config-clean strip install-gtk
+.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
+.PHONY: $(GIT-HEAD-PHONY) TAGS tags cscope .FORCE-PERF-CFLAGS
+
diff --git a/tools/perf/arch/arm/Makefile b/tools/perf/arch/arm/Makefile
index 15130b50dfe3..fe9b61e322a5 100644
--- a/tools/perf/arch/arm/Makefile
+++ b/tools/perf/arch/arm/Makefile
@@ -2,3 +2,6 @@ ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
endif
+ifndef NO_LIBUNWIND
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind.o
+endif
diff --git a/tools/perf/arch/arm/include/perf_regs.h b/tools/perf/arch/arm/include/perf_regs.h
new file mode 100644
index 000000000000..2a1cfde66b69
--- /dev/null
+++ b/tools/perf/arch/arm/include/perf_regs.h
@@ -0,0 +1,54 @@
+#ifndef ARCH_PERF_REGS_H
+#define ARCH_PERF_REGS_H
+
+#include <stdlib.h>
+#include "../../util/types.h"
+#include <asm/perf_regs.h>
+
+#define PERF_REGS_MASK ((1ULL << PERF_REG_ARM_MAX) - 1)
+#define PERF_REG_IP PERF_REG_ARM_PC
+#define PERF_REG_SP PERF_REG_ARM_SP
+
+static inline const char *perf_reg_name(int id)
+{
+ switch (id) {
+ case PERF_REG_ARM_R0:
+ return "r0";
+ case PERF_REG_ARM_R1:
+ return "r1";
+ case PERF_REG_ARM_R2:
+ return "r2";
+ case PERF_REG_ARM_R3:
+ return "r3";
+ case PERF_REG_ARM_R4:
+ return "r4";
+ case PERF_REG_ARM_R5:
+ return "r5";
+ case PERF_REG_ARM_R6:
+ return "r6";
+ case PERF_REG_ARM_R7:
+ return "r7";
+ case PERF_REG_ARM_R8:
+ return "r8";
+ case PERF_REG_ARM_R9:
+ return "r9";
+ case PERF_REG_ARM_R10:
+ return "r10";
+ case PERF_REG_ARM_FP:
+ return "fp";
+ case PERF_REG_ARM_IP:
+ return "ip";
+ case PERF_REG_ARM_SP:
+ return "sp";
+ case PERF_REG_ARM_LR:
+ return "lr";
+ case PERF_REG_ARM_PC:
+ return "pc";
+ default:
+ return NULL;
+ }
+
+ return NULL;
+}
+
+#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/arm/util/unwind.c b/tools/perf/arch/arm/util/unwind.c
new file mode 100644
index 000000000000..da3dc950550c
--- /dev/null
+++ b/tools/perf/arch/arm/util/unwind.c
@@ -0,0 +1,48 @@
+
+#include <errno.h>
+#include <libunwind.h>
+#include "perf_regs.h"
+#include "../../util/unwind.h"
+
+int unwind__arch_reg_id(int regnum)
+{
+ switch (regnum) {
+ case UNW_ARM_R0:
+ return PERF_REG_ARM_R0;
+ case UNW_ARM_R1:
+ return PERF_REG_ARM_R1;
+ case UNW_ARM_R2:
+ return PERF_REG_ARM_R2;
+ case UNW_ARM_R3:
+ return PERF_REG_ARM_R3;
+ case UNW_ARM_R4:
+ return PERF_REG_ARM_R4;
+ case UNW_ARM_R5:
+ return PERF_REG_ARM_R5;
+ case UNW_ARM_R6:
+ return PERF_REG_ARM_R6;
+ case UNW_ARM_R7:
+ return PERF_REG_ARM_R7;
+ case UNW_ARM_R8:
+ return PERF_REG_ARM_R8;
+ case UNW_ARM_R9:
+ return PERF_REG_ARM_R9;
+ case UNW_ARM_R10:
+ return PERF_REG_ARM_R10;
+ case UNW_ARM_R11:
+ return PERF_REG_ARM_FP;
+ case UNW_ARM_R12:
+ return PERF_REG_ARM_IP;
+ case UNW_ARM_R13:
+ return PERF_REG_ARM_SP;
+ case UNW_ARM_R14:
+ return PERF_REG_ARM_LR;
+ case UNW_ARM_R15:
+ return PERF_REG_ARM_PC;
+ default:
+ pr_err("unwind: invalid reg id %d\n", regnum);
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
diff --git a/tools/perf/arch/x86/include/perf_regs.h b/tools/perf/arch/x86/include/perf_regs.h
index 7fcdcdbee917..e84ca76aae77 100644
--- a/tools/perf/arch/x86/include/perf_regs.h
+++ b/tools/perf/arch/x86/include/perf_regs.h
@@ -5,7 +5,7 @@
#include "../../util/types.h"
#include <asm/perf_regs.h>
-#ifndef ARCH_X86_64
+#ifndef HAVE_ARCH_X86_64_SUPPORT
#define PERF_REGS_MASK ((1ULL << PERF_REG_X86_32_MAX) - 1)
#else
#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \
@@ -52,7 +52,7 @@ static inline const char *perf_reg_name(int id)
return "FS";
case PERF_REG_X86_GS:
return "GS";
-#ifdef ARCH_X86_64
+#ifdef HAVE_ARCH_X86_64_SUPPORT
case PERF_REG_X86_R8:
return "R8";
case PERF_REG_X86_R9:
@@ -69,7 +69,7 @@ static inline const char *perf_reg_name(int id)
return "R14";
case PERF_REG_X86_R15:
return "R15";
-#endif /* ARCH_X86_64 */
+#endif /* HAVE_ARCH_X86_64_SUPPORT */
default:
return NULL;
}
diff --git a/tools/perf/arch/x86/util/unwind.c b/tools/perf/arch/x86/util/unwind.c
index 78d956eff96f..456a88cf5b37 100644
--- a/tools/perf/arch/x86/util/unwind.c
+++ b/tools/perf/arch/x86/util/unwind.c
@@ -4,7 +4,7 @@
#include "perf_regs.h"
#include "../../util/unwind.h"
-#ifdef ARCH_X86_64
+#ifdef HAVE_ARCH_X86_64_SUPPORT
int unwind__arch_reg_id(int regnum)
{
int id;
@@ -108,4 +108,4 @@ int unwind__arch_reg_id(int regnum)
return id;
}
-#endif /* ARCH_X86_64 */
+#endif /* HAVE_ARCH_X86_64_SUPPORT */
diff --git a/tools/perf/bash_completion b/tools/perf/bash_completion
index 56e6a12aab59..62e157db2e2b 100644
--- a/tools/perf/bash_completion
+++ b/tools/perf/bash_completion
@@ -1,17 +1,87 @@
# perf completion
-function_exists()
+# Taken from git.git's completion script.
+__my_reassemble_comp_words_by_ref()
{
- declare -F $1 > /dev/null
- return $?
+ local exclude i j first
+ # Which word separators to exclude?
+ exclude="${1//[^$COMP_WORDBREAKS]}"
+ cword_=$COMP_CWORD
+ if [ -z "$exclude" ]; then
+ words_=("${COMP_WORDS[@]}")
+ return
+ fi
+ # List of word completion separators has shrunk;
+ # re-assemble words to complete.
+ for ((i=0, j=0; i < ${#COMP_WORDS[@]}; i++, j++)); do
+ # Append each nonempty word consisting of just
+ # word separator characters to the current word.
+ first=t
+ while
+ [ $i -gt 0 ] &&
+ [ -n "${COMP_WORDS[$i]}" ] &&
+ # word consists of excluded word separators
+ [ "${COMP_WORDS[$i]//[^$exclude]}" = "${COMP_WORDS[$i]}" ]
+ do
+ # Attach to the previous token,
+ # unless the previous token is the command name.
+ if [ $j -ge 2 ] && [ -n "$first" ]; then
+ ((j--))
+ fi
+ first=
+ words_[$j]=${words_[j]}${COMP_WORDS[i]}
+ if [ $i = $COMP_CWORD ]; then
+ cword_=$j
+ fi
+ if (($i < ${#COMP_WORDS[@]} - 1)); then
+ ((i++))
+ else
+ # Done.
+ return
+ fi
+ done
+ words_[$j]=${words_[j]}${COMP_WORDS[i]}
+ if [ $i = $COMP_CWORD ]; then
+ cword_=$j
+ fi
+ done
}
-function_exists __ltrim_colon_completions ||
+type _get_comp_words_by_ref &>/dev/null ||
+_get_comp_words_by_ref()
+{
+ local exclude cur_ words_ cword_
+ if [ "$1" = "-n" ]; then
+ exclude=$2
+ shift 2
+ fi
+ __my_reassemble_comp_words_by_ref "$exclude"
+ cur_=${words_[cword_]}
+ while [ $# -gt 0 ]; do
+ case "$1" in
+ cur)
+ cur=$cur_
+ ;;
+ prev)
+ prev=${words_[$cword_-1]}
+ ;;
+ words)
+ words=("${words_[@]}")
+ ;;
+ cword)
+ cword=$cword_
+ ;;
+ esac
+ shift
+ done
+}
+
+type __ltrim_colon_completions &>/dev/null ||
__ltrim_colon_completions()
{
if [[ "$1" == *:* && "$COMP_WORDBREAKS" == *:* ]]; then
# Remove colon-word prefix from COMPREPLY items
- local colon_word=${1%${1##*:}}
+ local colon_word=${1%"${1##*:}"}
local i=${#COMPREPLY[*]}
while [[ $((--i)) -ge 0 ]]; do
COMPREPLY[$i]=${COMPREPLY[$i]#"$colon_word"}
@@ -19,23 +89,18 @@ __ltrim_colon_completions()
fi
}
-have perf &&
+type perf &>/dev/null &&
_perf()
{
- local cur prev cmd
+ local cur words cword prev cmd
COMPREPLY=()
- if function_exists _get_comp_words_by_ref; then
- _get_comp_words_by_ref -n : cur prev
- else
- cur=$(_get_cword :)
- prev=${COMP_WORDS[COMP_CWORD-1]}
- fi
+ _get_comp_words_by_ref -n =: cur words cword prev
- cmd=${COMP_WORDS[0]}
+ cmd=${words[0]}
# List perf subcommands or long options
- if [ $COMP_CWORD -eq 1 ]; then
+ if [ $cword -eq 1 ]; then
if [[ $cur == --* ]]; then
COMPREPLY=( $( compgen -W '--help --version \
--exec-path --html-path --paginate --no-pager \
@@ -45,18 +110,17 @@ _perf()
COMPREPLY=( $( compgen -W '$cmds' -- "$cur" ) )
fi
# List possible events for -e option
- elif [[ $prev == "-e" && "${COMP_WORDS[1]}" == @(record|stat|top) ]]; then
+ elif [[ $prev == "-e" && "${words[1]}" == @(record|stat|top) ]]; then
evts=$($cmd list --raw-dump)
COMPREPLY=( $( compgen -W '$evts' -- "$cur" ) )
__ltrim_colon_completions $cur
# List long option names
elif [[ $cur == --* ]]; then
- subcmd=${COMP_WORDS[1]}
+ subcmd=${words[1]}
opts=$($cmd $subcmd --list-opts)
COMPREPLY=( $( compgen -W '$opts' -- "$cur" ) )
- # Fall down to list regular files
- else
- _filedir
fi
} &&
-complete -F _perf perf
+
+complete -o bashdefault -o default -o nospace -F _perf perf 2>/dev/null \
+ || complete -o default -o nospace -F _perf perf
diff --git a/tools/perf/bench/mem-memcpy-arch.h b/tools/perf/bench/mem-memcpy-arch.h
index a72e36cb5394..57b4ed871459 100644
--- a/tools/perf/bench/mem-memcpy-arch.h
+++ b/tools/perf/bench/mem-memcpy-arch.h
@@ -1,5 +1,5 @@
-#ifdef ARCH_X86_64
+#ifdef HAVE_ARCH_X86_64_SUPPORT
#define MEMCPY_FN(fn, name, desc) \
extern void *fn(void *, const void *, size_t);
diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c
index 8cdca43016b2..5ce71d3b72cf 100644
--- a/tools/perf/bench/mem-memcpy.c
+++ b/tools/perf/bench/mem-memcpy.c
@@ -58,7 +58,7 @@ struct routine routines[] = {
{ "default",
"Default memcpy() provided by glibc",
memcpy },
-#ifdef ARCH_X86_64
+#ifdef HAVE_ARCH_X86_64_SUPPORT
#define MEMCPY_FN(fn, name, desc) { name, desc, fn },
#include "mem-memcpy-x86-64-asm-def.h"
diff --git a/tools/perf/bench/mem-memset-arch.h b/tools/perf/bench/mem-memset-arch.h
index a040fa77665b..633800cb0dcb 100644
--- a/tools/perf/bench/mem-memset-arch.h
+++ b/tools/perf/bench/mem-memset-arch.h
@@ -1,5 +1,5 @@
-#ifdef ARCH_X86_64
+#ifdef HAVE_ARCH_X86_64_SUPPORT
#define MEMSET_FN(fn, name, desc) \
extern void *fn(void *, int, size_t);
diff --git a/tools/perf/bench/mem-memset.c b/tools/perf/bench/mem-memset.c
index 4a2f12081964..9af79d2b18e5 100644
--- a/tools/perf/bench/mem-memset.c
+++ b/tools/perf/bench/mem-memset.c
@@ -58,7 +58,7 @@ static const struct routine routines[] = {
{ "default",
"Default memset() provided by glibc",
memset },
-#ifdef ARCH_X86_64
+#ifdef HAVE_ARCH_X86_64_SUPPORT
#define MEMSET_FN(fn, name, desc) { name, desc, fn },
#include "mem-memset-x86-64-asm-def.h"
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 30d1c3225b46..d4c83c60b9b2 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -429,14 +429,14 @@ static int parse_cpu_list(const char *arg)
return 0;
}
-static void parse_setup_cpu_list(void)
+static int parse_setup_cpu_list(void)
{
struct thread_data *td;
char *str0, *str;
int t;
if (!g->p.cpu_list_str)
- return;
+ return 0;
dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
@@ -500,8 +500,12 @@ static void parse_setup_cpu_list(void)
dprintf("CPUs: %d_%d-%d#%dx%d\n", bind_cpu_0, bind_len, bind_cpu_1, step, mul);
- BUG_ON(bind_cpu_0 < 0 || bind_cpu_0 >= g->p.nr_cpus);
- BUG_ON(bind_cpu_1 < 0 || bind_cpu_1 >= g->p.nr_cpus);
+ if (bind_cpu_0 >= g->p.nr_cpus || bind_cpu_1 >= g->p.nr_cpus) {
+ printf("\nTest not applicable, system has only %d CPUs.\n", g->p.nr_cpus);
+ return -1;
+ }
+
+ BUG_ON(bind_cpu_0 < 0 || bind_cpu_1 < 0);
BUG_ON(bind_cpu_0 > bind_cpu_1);
for (bind_cpu = bind_cpu_0; bind_cpu <= bind_cpu_1; bind_cpu += step) {
@@ -541,6 +545,7 @@ out:
printf("# NOTE: %d tasks bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
free(str0);
+ return 0;
}
static int parse_cpus_opt(const struct option *opt __maybe_unused,
@@ -561,14 +566,14 @@ static int parse_node_list(const char *arg)
return 0;
}
-static void parse_setup_node_list(void)
+static int parse_setup_node_list(void)
{
struct thread_data *td;
char *str0, *str;
int t;
if (!g->p.node_list_str)
- return;
+ return 0;
dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
@@ -619,8 +624,12 @@ static void parse_setup_node_list(void)
dprintf("NODEs: %d-%d #%d\n", bind_node_0, bind_node_1, step);
- BUG_ON(bind_node_0 < 0 || bind_node_0 >= g->p.nr_nodes);
- BUG_ON(bind_node_1 < 0 || bind_node_1 >= g->p.nr_nodes);
+ if (bind_node_0 >= g->p.nr_nodes || bind_node_1 >= g->p.nr_nodes) {
+ printf("\nTest not applicable, system has only %d nodes.\n", g->p.nr_nodes);
+ return -1;
+ }
+
+ BUG_ON(bind_node_0 < 0 || bind_node_1 < 0);
BUG_ON(bind_node_0 > bind_node_1);
for (bind_node = bind_node_0; bind_node <= bind_node_1; bind_node += step) {
@@ -651,6 +660,7 @@ out:
printf("# NOTE: %d tasks mem-bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
free(str0);
+ return 0;
}
static int parse_nodes_opt(const struct option *opt __maybe_unused,
@@ -1110,7 +1120,7 @@ static void *worker_thread(void *__tdata)
/* Check whether our max runtime timed out: */
if (g->p.nr_secs) {
timersub(&stop, &start0, &diff);
- if (diff.tv_sec >= g->p.nr_secs) {
+ if ((u32)diff.tv_sec >= g->p.nr_secs) {
g->stop_work = true;
break;
}
@@ -1157,7 +1167,7 @@ static void *worker_thread(void *__tdata)
runtime_ns_max += diff.tv_usec * 1000;
if (details >= 0) {
- printf(" #%2d / %2d: %14.2lf nsecs/op [val: %016lx]\n",
+ printf(" #%2d / %2d: %14.2lf nsecs/op [val: %016"PRIx64"]\n",
process_nr, thread_nr, runtime_ns_max / bytes_done, val);
}
fflush(stdout);
@@ -1356,8 +1366,8 @@ static int init(void)
init_thread_data();
tprintf("#\n");
- parse_setup_cpu_list();
- parse_setup_node_list();
+ if (parse_setup_cpu_list() || parse_setup_node_list())
+ return -1;
tprintf("#\n");
print_summary();
@@ -1600,7 +1610,6 @@ static int run_bench_numa(const char *name, const char **argv)
return 0;
err:
- usage_with_options(numa_usage, options);
return -1;
}
@@ -1701,8 +1710,7 @@ static int bench_all(void)
BUG_ON(ret < 0);
for (i = 0; i < nr; i++) {
- if (run_bench_numa(tests[i][0], tests[i] + 1))
- return -1;
+ run_bench_numa(tests[i][0], tests[i] + 1);
}
printf("\n");
diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c
index 69cfba8d4c6c..07a8d7646a15 100644
--- a/tools/perf/bench/sched-pipe.c
+++ b/tools/perf/bench/sched-pipe.c
@@ -7,9 +7,7 @@
* Based on pipe-test-1m.c by Ingo Molnar <mingo@redhat.com>
* http://people.redhat.com/mingo/cfs-scheduler/tools/pipe-test-1m.c
* Ported to perf by Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp>
- *
*/
-
#include "../perf.h"
#include "../util/util.h"
#include "../util/parse-options.h"
@@ -28,12 +26,24 @@
#include <sys/time.h>
#include <sys/types.h>
+#include <pthread.h>
+
+struct thread_data {
+ int nr;
+ int pipe_read;
+ int pipe_write;
+ pthread_t pthread;
+};
+
#define LOOPS_DEFAULT 1000000
-static int loops = LOOPS_DEFAULT;
+static int loops = LOOPS_DEFAULT;
+
+/* Use processes by default: */
+static bool threaded;
static const struct option options[] = {
- OPT_INTEGER('l', "loop", &loops,
- "Specify number of loops"),
+ OPT_INTEGER('l', "loop", &loops, "Specify number of loops"),
+ OPT_BOOLEAN('T', "threaded", &threaded, "Specify threads/process based task setup"),
OPT_END()
};
@@ -42,13 +52,37 @@ static const char * const bench_sched_pipe_usage[] = {
NULL
};
-int bench_sched_pipe(int argc, const char **argv,
- const char *prefix __maybe_unused)
+static void *worker_thread(void *__tdata)
{
- int pipe_1[2], pipe_2[2];
+ struct thread_data *td = __tdata;
int m = 0, i;
+ int ret;
+
+ for (i = 0; i < loops; i++) {
+ if (!td->nr) {
+ ret = read(td->pipe_read, &m, sizeof(int));
+ BUG_ON(ret != sizeof(int));
+ ret = write(td->pipe_write, &m, sizeof(int));
+ BUG_ON(ret != sizeof(int));
+ } else {
+ ret = write(td->pipe_write, &m, sizeof(int));
+ BUG_ON(ret != sizeof(int));
+ ret = read(td->pipe_read, &m, sizeof(int));
+ BUG_ON(ret != sizeof(int));
+ }
+ }
+
+ return NULL;
+}
+
+int bench_sched_pipe(int argc, const char **argv, const char *prefix __maybe_unused)
+{
+ struct thread_data threads[2], *td;
+ int pipe_1[2], pipe_2[2];
struct timeval start, stop, diff;
unsigned long long result_usec = 0;
+ int nr_threads = 2;
+ int t;
/*
* why does "ret" exist?
@@ -58,43 +92,66 @@ int bench_sched_pipe(int argc, const char **argv,
int __maybe_unused ret, wait_stat;
pid_t pid, retpid __maybe_unused;
- argc = parse_options(argc, argv, options,
- bench_sched_pipe_usage, 0);
+ argc = parse_options(argc, argv, options, bench_sched_pipe_usage, 0);
BUG_ON(pipe(pipe_1));
BUG_ON(pipe(pipe_2));
- pid = fork();
- assert(pid >= 0);
-
gettimeofday(&start, NULL);
- if (!pid) {
- for (i = 0; i < loops; i++) {
- ret = read(pipe_1[0], &m, sizeof(int));
- ret = write(pipe_2[1], &m, sizeof(int));
- }
- } else {
- for (i = 0; i < loops; i++) {
- ret = write(pipe_1[1], &m, sizeof(int));
- ret = read(pipe_2[0], &m, sizeof(int));
+ for (t = 0; t < nr_threads; t++) {
+ td = threads + t;
+
+ td->nr = t;
+
+ if (t == 0) {
+ td->pipe_read = pipe_1[0];
+ td->pipe_write = pipe_2[1];
+ } else {
+ td->pipe_write = pipe_1[1];
+ td->pipe_read = pipe_2[0];
}
}
- gettimeofday(&stop, NULL);
- timersub(&stop, &start, &diff);
- if (pid) {
+ if (threaded) {
+
+ for (t = 0; t < nr_threads; t++) {
+ td = threads + t;
+
+ ret = pthread_create(&td->pthread, NULL, worker_thread, td);
+ BUG_ON(ret);
+ }
+
+ for (t = 0; t < nr_threads; t++) {
+ td = threads + t;
+
+ ret = pthread_join(td->pthread, NULL);
+ BUG_ON(ret);
+ }
+
+ } else {
+ pid = fork();
+ assert(pid >= 0);
+
+ if (!pid) {
+ worker_thread(threads + 0);
+ exit(0);
+ } else {
+ worker_thread(threads + 1);
+ }
+
retpid = waitpid(pid, &wait_stat, 0);
assert((retpid == pid) && WIFEXITED(wait_stat));
- } else {
- exit(0);
}
+ gettimeofday(&stop, NULL);
+ timersub(&stop, &start, &diff);
+
switch (bench_format) {
case BENCH_FORMAT_DEFAULT:
- printf("# Executed %d pipe operations between two tasks\n\n",
- loops);
+ printf("# Executed %d pipe operations between two %s\n\n",
+ loops, threaded ? "threads" : "processes");
result_usec = diff.tv_sec * 1000000;
result_usec += diff.tv_usec;
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 5ebd0c3b71b6..6c5ae57831f6 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -28,8 +28,10 @@
#include "util/hist.h"
#include "util/session.h"
#include "util/tool.h"
+#include "util/data.h"
#include "arch/common.h"
+#include <dlfcn.h>
#include <linux/bitmap.h>
struct perf_annotate {
@@ -63,7 +65,7 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
return 0;
}
- he = __hists__add_entry(&evsel->hists, al, NULL, 1, 1);
+ he = __hists__add_entry(&evsel->hists, al, NULL, 1, 1, 0);
if (he == NULL)
return -ENOMEM;
@@ -116,11 +118,11 @@ static int hist_entry__tty_annotate(struct hist_entry *he,
ann->print_line, ann->full_paths, 0, 0);
}
-static void hists__find_annotations(struct hists *self,
+static void hists__find_annotations(struct hists *hists,
struct perf_evsel *evsel,
struct perf_annotate *ann)
{
- struct rb_node *nd = rb_first(&self->entries), *next;
+ struct rb_node *nd = rb_first(&hists->entries), *next;
int key = K_RIGHT;
while (nd) {
@@ -142,8 +144,18 @@ find_next:
if (use_browser == 2) {
int ret;
+ int (*annotate)(struct hist_entry *he,
+ struct perf_evsel *evsel,
+ struct hist_browser_timer *hbt);
+
+ annotate = dlsym(perf_gtk_handle,
+ "hist_entry__gtk_annotate");
+ if (annotate == NULL) {
+ ui__error("GTK browser not found!\n");
+ return;
+ }
- ret = hist_entry__gtk_annotate(he, evsel, NULL);
+ ret = annotate(he, evsel, NULL);
if (!ret || !ann->skip_missing)
return;
@@ -188,9 +200,13 @@ static int __cmd_annotate(struct perf_annotate *ann)
struct perf_session *session;
struct perf_evsel *pos;
u64 total_nr_samples;
+ struct perf_data_file file = {
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = ann->force,
+ };
- session = perf_session__new(input_name, O_RDONLY,
- ann->force, false, &ann->tool);
+ session = perf_session__new(&file, false, &ann->tool);
if (session == NULL)
return -ENOMEM;
@@ -231,7 +247,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
if (nr_samples > 0) {
total_nr_samples += nr_samples;
- hists__collapse_resort(hists);
+ hists__collapse_resort(hists, NULL);
hists__output_resort(hists);
if (symbol_conf.event_group &&
@@ -243,12 +259,21 @@ static int __cmd_annotate(struct perf_annotate *ann)
}
if (total_nr_samples == 0) {
- ui__error("The %s file has no samples!\n", session->filename);
+ ui__error("The %s file has no samples!\n", file.path);
goto out_delete;
}
- if (use_browser == 2)
- perf_gtk__show_annotations();
+ if (use_browser == 2) {
+ void (*show_annotations)(void);
+
+ show_annotations = dlsym(perf_gtk_handle,
+ "perf_gtk__show_annotations");
+ if (show_annotations == NULL) {
+ ui__error("GTK browser not found!\n");
+ goto out_delete;
+ }
+ show_annotations();
+ }
out_delete:
/*
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index 77298bf892b8..e47f90cc7b98 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -1,21 +1,18 @@
/*
- *
* builtin-bench.c
*
- * General benchmarking subsystem provided by perf
+ * General benchmarking collections provided by perf
*
* Copyright (C) 2009, Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp>
- *
*/
/*
+ * Available benchmark collection list:
*
- * Available subsystem list:
- * sched ... scheduler and IPC mechanism
+ * sched ... scheduler and IPC performance
* mem ... memory access performance
- *
+ * numa ... NUMA scheduling and MM performance
*/
-
#include "perf.h"
#include "util/util.h"
#include "util/parse-options.h"
@@ -25,112 +22,92 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/prctl.h>
-struct bench_suite {
- const char *name;
- const char *summary;
- int (*fn)(int, const char **, const char *);
+typedef int (*bench_fn_t)(int argc, const char **argv, const char *prefix);
+
+struct bench {
+ const char *name;
+ const char *summary;
+ bench_fn_t fn;
};
- \
-/* sentinel: easy for help */
-#define suite_all { "all", "Test all benchmark suites", NULL }
-
-#ifdef LIBNUMA_SUPPORT
-static struct bench_suite numa_suites[] = {
- { "mem",
- "Benchmark for NUMA workloads",
- bench_numa },
- suite_all,
- { NULL,
- NULL,
- NULL }
+
+#ifdef HAVE_LIBNUMA_SUPPORT
+static struct bench numa_benchmarks[] = {
+ { "mem", "Benchmark for NUMA workloads", bench_numa },
+ { "all", "Test all NUMA benchmarks", NULL },
+ { NULL, NULL, NULL }
};
#endif
-static struct bench_suite sched_suites[] = {
- { "messaging",
- "Benchmark for scheduler and IPC mechanisms",
- bench_sched_messaging },
- { "pipe",
- "Flood of communication over pipe() between two processes",
- bench_sched_pipe },
- suite_all,
- { NULL,
- NULL,
- NULL }
+static struct bench sched_benchmarks[] = {
+ { "messaging", "Benchmark for scheduling and IPC", bench_sched_messaging },
+ { "pipe", "Benchmark for pipe() between two processes", bench_sched_pipe },
+ { "all", "Test all scheduler benchmarks", NULL },
+ { NULL, NULL, NULL }
};
-static struct bench_suite mem_suites[] = {
- { "memcpy",
- "Simple memory copy in various ways",
- bench_mem_memcpy },
- { "memset",
- "Simple memory set in various ways",
- bench_mem_memset },
- suite_all,
- { NULL,
- NULL,
- NULL }
+static struct bench mem_benchmarks[] = {
+ { "memcpy", "Benchmark for memcpy()", bench_mem_memcpy },
+ { "memset", "Benchmark for memset() tests", bench_mem_memset },
+ { "all", "Test all memory benchmarks", NULL },
+ { NULL, NULL, NULL }
};
-struct bench_subsys {
- const char *name;
- const char *summary;
- struct bench_suite *suites;
+struct collection {
+ const char *name;
+ const char *summary;
+ struct bench *benchmarks;
};
-static struct bench_subsys subsystems[] = {
-#ifdef LIBNUMA_SUPPORT
- { "numa",
- "NUMA scheduling and MM behavior",
- numa_suites },
+static struct collection collections[] = {
+ { "sched", "Scheduler and IPC benchmarks", sched_benchmarks },
+ { "mem", "Memory access benchmarks", mem_benchmarks },
+#ifdef HAVE_LIBNUMA_SUPPORT
+ { "numa", "NUMA scheduling and MM benchmarks", numa_benchmarks },
#endif
- { "sched",
- "scheduler and IPC mechanism",
- sched_suites },
- { "mem",
- "memory access performance",
- mem_suites },
- { "all", /* sentinel: easy for help */
- "all benchmark subsystem",
- NULL },
- { NULL,
- NULL,
- NULL }
+ { "all", "All benchmarks", NULL },
+ { NULL, NULL, NULL }
};
-static void dump_suites(int subsys_index)
+/* Iterate over all benchmark collections: */
+#define for_each_collection(coll) \
+ for (coll = collections; coll->name; coll++)
+
+/* Iterate over all benchmarks within a collection: */
+#define for_each_bench(coll, bench) \
+ for (bench = coll->benchmarks; bench->name; bench++)
+
+static void dump_benchmarks(struct collection *coll)
{
- int i;
+ struct bench *bench;
- printf("# List of available suites for %s...\n\n",
- subsystems[subsys_index].name);
+ printf("\n # List of available benchmarks for collection '%s':\n\n", coll->name);
- for (i = 0; subsystems[subsys_index].suites[i].name; i++)
- printf("%14s: %s\n",
- subsystems[subsys_index].suites[i].name,
- subsystems[subsys_index].suites[i].summary);
+ for_each_bench(coll, bench)
+ printf("%14s: %s\n", bench->name, bench->summary);
printf("\n");
- return;
}
static const char *bench_format_str;
+
+/* Output/formatting style, exported to benchmark modules: */
int bench_format = BENCH_FORMAT_DEFAULT;
static const struct option bench_options[] = {
- OPT_STRING('f', "format", &bench_format_str, "default",
- "Specify format style"),
+ OPT_STRING('f', "format", &bench_format_str, "default", "Specify format style"),
OPT_END()
};
static const char * const bench_usage[] = {
- "perf bench [<common options>] <subsystem> <suite> [<options>]",
+ "perf bench [<common options>] <collection> <benchmark> [<options>]",
NULL
};
static void print_usage(void)
{
+ struct collection *coll;
int i;
printf("Usage: \n");
@@ -138,11 +115,10 @@ static void print_usage(void)
printf("\t%s\n", bench_usage[i]);
printf("\n");
- printf("# List of available subsystems...\n\n");
+ printf(" # List of all available benchmark collections:\n\n");
- for (i = 0; subsystems[i].name; i++)
- printf("%14s: %s\n",
- subsystems[i].name, subsystems[i].summary);
+ for_each_collection(coll)
+ printf("%14s: %s\n", coll->name, coll->summary);
printf("\n");
}
@@ -159,44 +135,74 @@ static int bench_str2int(const char *str)
return BENCH_FORMAT_UNKNOWN;
}
-static void all_suite(struct bench_subsys *subsys) /* FROM HERE */
+/*
+ * Run a specific benchmark but first rename the running task's ->comm[]
+ * to something meaningful:
+ */
+static int run_bench(const char *coll_name, const char *bench_name, bench_fn_t fn,
+ int argc, const char **argv, const char *prefix)
{
- int i;
+ int size;
+ char *name;
+ int ret;
+
+ size = strlen(coll_name) + 1 + strlen(bench_name) + 1;
+
+ name = zalloc(size);
+ BUG_ON(!name);
+
+ scnprintf(name, size, "%s-%s", coll_name, bench_name);
+
+ prctl(PR_SET_NAME, name);
+ argv[0] = name;
+
+ ret = fn(argc, argv, prefix);
+
+ free(name);
+
+ return ret;
+}
+
+static void run_collection(struct collection *coll)
+{
+ struct bench *bench;
const char *argv[2];
- struct bench_suite *suites = subsys->suites;
argv[1] = NULL;
/*
* TODO:
- * preparing preset parameters for
+ *
+ * Preparing preset parameters for
* embedded, ordinary PC, HPC, etc...
- * will be helpful
+ * would be helpful.
*/
- for (i = 0; suites[i].fn; i++) {
- printf("# Running %s/%s benchmark...\n",
- subsys->name,
- suites[i].name);
+ for_each_bench(coll, bench) {
+ if (!bench->fn)
+ break;
+ printf("# Running %s/%s benchmark...\n", coll->name, bench->name);
fflush(stdout);
- argv[1] = suites[i].name;
- suites[i].fn(1, argv, NULL);
+ argv[1] = bench->name;
+ run_bench(coll->name, bench->name, bench->fn, 1, argv, NULL);
printf("\n");
}
}
-static void all_subsystem(void)
+static void run_all_collections(void)
{
- int i;
- for (i = 0; subsystems[i].suites; i++)
- all_suite(&subsystems[i]);
+ struct collection *coll;
+
+ for_each_collection(coll)
+ run_collection(coll);
}
int cmd_bench(int argc, const char **argv, const char *prefix __maybe_unused)
{
- int i, j, status = 0;
+ struct collection *coll;
+ int ret = 0;
if (argc < 2) {
- /* No subsystem specified. */
+ /* No collection specified. */
print_usage();
goto end;
}
@@ -206,7 +212,7 @@ int cmd_bench(int argc, const char **argv, const char *prefix __maybe_unused)
bench_format = bench_str2int(bench_format_str);
if (bench_format == BENCH_FORMAT_UNKNOWN) {
- printf("Unknown format descriptor:%s\n", bench_format_str);
+ printf("Unknown format descriptor: '%s'\n", bench_format_str);
goto end;
}
@@ -216,52 +222,51 @@ int cmd_bench(int argc, const char **argv, const char *prefix __maybe_unused)
}
if (!strcmp(argv[0], "all")) {
- all_subsystem();
+ run_all_collections();
goto end;
}
- for (i = 0; subsystems[i].name; i++) {
- if (strcmp(subsystems[i].name, argv[0]))
+ for_each_collection(coll) {
+ struct bench *bench;
+
+ if (strcmp(coll->name, argv[0]))
continue;
if (argc < 2) {
- /* No suite specified. */
- dump_suites(i);
+ /* No bench specified. */
+ dump_benchmarks(coll);
goto end;
}
if (!strcmp(argv[1], "all")) {
- all_suite(&subsystems[i]);
+ run_collection(coll);
goto end;
}
- for (j = 0; subsystems[i].suites[j].name; j++) {
- if (strcmp(subsystems[i].suites[j].name, argv[1]))
+ for_each_bench(coll, bench) {
+ if (strcmp(bench->name, argv[1]))
continue;
if (bench_format == BENCH_FORMAT_DEFAULT)
- printf("# Running %s/%s benchmark...\n",
- subsystems[i].name,
- subsystems[i].suites[j].name);
+ printf("# Running '%s/%s' benchmark:\n", coll->name, bench->name);
fflush(stdout);
- status = subsystems[i].suites[j].fn(argc - 1,
- argv + 1, prefix);
+ ret = run_bench(coll->name, bench->name, bench->fn, argc-1, argv+1, prefix);
goto end;
}
if (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")) {
- dump_suites(i);
+ dump_benchmarks(coll);
goto end;
}
- printf("Unknown suite:%s for %s\n", argv[1], argv[0]);
- status = 1;
+ printf("Unknown benchmark: '%s' for collection '%s'\n", argv[1], argv[0]);
+ ret = 1;
goto end;
}
- printf("Unknown subsystem:%s\n", argv[0]);
- status = 1;
+ printf("Unknown collection: '%s'\n", argv[0]);
+ ret = 1;
end:
- return status;
+ return ret;
}
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index c96c8fa38243..cfede86161d8 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -6,6 +6,11 @@
* Copyright (C) 2010, Red Hat Inc.
* Copyright (C) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
*/
+#include <sys/types.h>
+#include <sys/time.h>
+#include <time.h>
+#include <dirent.h>
+#include <unistd.h>
#include "builtin.h"
#include "perf.h"
#include "util/cache.h"
@@ -17,6 +22,140 @@
#include "util/session.h"
#include "util/symbol.h"
+static int build_id_cache__kcore_buildid(const char *proc_dir, char *sbuildid)
+{
+ char root_dir[PATH_MAX];
+ char notes[PATH_MAX];
+ u8 build_id[BUILD_ID_SIZE];
+ char *p;
+
+ strlcpy(root_dir, proc_dir, sizeof(root_dir));
+
+ p = strrchr(root_dir, '/');
+ if (!p)
+ return -1;
+ *p = '\0';
+
+ scnprintf(notes, sizeof(notes), "%s/sys/kernel/notes", root_dir);
+
+ if (sysfs__read_build_id(notes, build_id, sizeof(build_id)))
+ return -1;
+
+ build_id__sprintf(build_id, sizeof(build_id), sbuildid);
+
+ return 0;
+}
+
+static int build_id_cache__kcore_dir(char *dir, size_t sz)
+{
+ struct timeval tv;
+ struct tm tm;
+ char dt[32];
+
+ if (gettimeofday(&tv, NULL) || !localtime_r(&tv.tv_sec, &tm))
+ return -1;
+
+ if (!strftime(dt, sizeof(dt), "%Y%m%d%H%M%S", &tm))
+ return -1;
+
+ scnprintf(dir, sz, "%s%02u", dt, (unsigned)tv.tv_usec / 10000);
+
+ return 0;
+}
+
+static int build_id_cache__kcore_existing(const char *from_dir, char *to_dir,
+ size_t to_dir_sz)
+{
+ char from[PATH_MAX];
+ char to[PATH_MAX];
+ struct dirent *dent;
+ int ret = -1;
+ DIR *d;
+
+ d = opendir(to_dir);
+ if (!d)
+ return -1;
+
+ scnprintf(from, sizeof(from), "%s/modules", from_dir);
+
+ while (1) {
+ dent = readdir(d);
+ if (!dent)
+ break;
+ if (dent->d_type != DT_DIR)
+ continue;
+ scnprintf(to, sizeof(to), "%s/%s/modules", to_dir,
+ dent->d_name);
+ if (!compare_proc_modules(from, to)) {
+ scnprintf(to, sizeof(to), "%s/%s", to_dir,
+ dent->d_name);
+ strlcpy(to_dir, to, to_dir_sz);
+ ret = 0;
+ break;
+ }
+ }
+
+ closedir(d);
+
+ return ret;
+}
+
+static int build_id_cache__add_kcore(const char *filename, const char *debugdir)
+{
+ char dir[32], sbuildid[BUILD_ID_SIZE * 2 + 1];
+ char from_dir[PATH_MAX], to_dir[PATH_MAX];
+ char *p;
+
+ strlcpy(from_dir, filename, sizeof(from_dir));
+
+ p = strrchr(from_dir, '/');
+ if (!p || strcmp(p + 1, "kcore"))
+ return -1;
+ *p = '\0';
+
+ if (build_id_cache__kcore_buildid(from_dir, sbuildid))
+ return -1;
+
+ scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s",
+ debugdir, sbuildid);
+
+ if (!build_id_cache__kcore_existing(from_dir, to_dir, sizeof(to_dir))) {
+ pr_debug("same kcore found in %s\n", to_dir);
+ return 0;
+ }
+
+ if (build_id_cache__kcore_dir(dir, sizeof(dir)))
+ return -1;
+
+ scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s/%s",
+ debugdir, sbuildid, dir);
+
+ if (mkdir_p(to_dir, 0755))
+ return -1;
+
+ if (kcore_copy(from_dir, to_dir)) {
+ /* Remove YYYYmmddHHMMSShh directory */
+ if (!rmdir(to_dir)) {
+ p = strrchr(to_dir, '/');
+ if (p)
+ *p = '\0';
+ /* Try to remove buildid directory */
+ if (!rmdir(to_dir)) {
+ p = strrchr(to_dir, '/');
+ if (p)
+ *p = '\0';
+ /* Try to remove [kernel.kcore] directory */
+ rmdir(to_dir);
+ }
+ }
+ return -1;
+ }
+
+ pr_debug("kcore added to build-id cache directory %s\n", to_dir);
+
+ return 0;
+}
+
static int build_id_cache__add_file(const char *filename, const char *debugdir)
{
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
@@ -82,8 +221,12 @@ static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused)
static int build_id_cache__fprintf_missing(const char *filename, bool force, FILE *fp)
{
- struct perf_session *session = perf_session__new(filename, O_RDONLY,
- force, false, NULL);
+ struct perf_data_file file = {
+ .path = filename,
+ .mode = PERF_DATA_MODE_READ,
+ .force = force,
+ };
+ struct perf_session *session = perf_session__new(&file, false, NULL);
if (session == NULL)
return -1;
@@ -130,11 +273,14 @@ int cmd_buildid_cache(int argc, const char **argv,
char const *add_name_list_str = NULL,
*remove_name_list_str = NULL,
*missing_filename = NULL,
- *update_name_list_str = NULL;
+ *update_name_list_str = NULL,
+ *kcore_filename;
const struct option buildid_cache_options[] = {
OPT_STRING('a', "add", &add_name_list_str,
"file list", "file(s) to add"),
+ OPT_STRING('k', "kcore", &kcore_filename,
+ "file", "kcore file to add"),
OPT_STRING('r', "remove", &remove_name_list_str, "file list",
"file(s) to remove"),
OPT_STRING('M', "missing", &missing_filename, "file",
@@ -217,5 +363,9 @@ int cmd_buildid_cache(int argc, const char **argv,
}
}
+ if (kcore_filename &&
+ build_id_cache__add_kcore(kcore_filename, debugdir))
+ pr_warning("Couldn't add %s\n", kcore_filename);
+
return ret;
}
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index e74366a13218..ed3873b3e238 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -15,6 +15,7 @@
#include "util/parse-options.h"
#include "util/session.h"
#include "util/symbol.h"
+#include "util/data.h"
static int sysfs__fprintf_build_id(FILE *fp)
{
@@ -52,6 +53,11 @@ static bool dso__skip_buildid(struct dso *dso, int with_hits)
static int perf_session__list_build_ids(bool force, bool with_hits)
{
struct perf_session *session;
+ struct perf_data_file file = {
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = force,
+ };
symbol__elf_init();
/*
@@ -60,15 +66,14 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
if (filename__fprintf_build_id(input_name, stdout))
goto out;
- session = perf_session__new(input_name, O_RDONLY, force, false,
- &build_id__mark_dso_hit_ops);
+ session = perf_session__new(&file, false, &build_id__mark_dso_hit_ops);
if (session == NULL)
return -1;
/*
* in pipe-mode, the only way to get the buildids is to parse
* the record stream. Buildids are stored as RECORD_HEADER_BUILD_ID
*/
- if (with_hits || session->fd_pipe)
+ if (with_hits || perf_data_file__is_pipe(&file))
perf_session__process_events(session, &build_id__mark_dso_hit_ops);
perf_session__fprintf_dsos_buildid(session, stdout, dso__skip_buildid, with_hits);
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index f28799e94f2a..b605009e803f 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -16,6 +16,7 @@
#include "util/sort.h"
#include "util/symbol.h"
#include "util/util.h"
+#include "util/data.h"
#include <stdlib.h>
#include <math.h>
@@ -42,7 +43,7 @@ struct diff_hpp_fmt {
struct data__file {
struct perf_session *session;
- const char *file;
+ struct perf_data_file file;
int idx;
struct hists *hists;
struct diff_hpp_fmt fmt[PERF_HPP_DIFF__MAX_INDEX];
@@ -302,11 +303,11 @@ static int formula_fprintf(struct hist_entry *he, struct hist_entry *pair,
return -1;
}
-static int hists__add_entry(struct hists *self,
+static int hists__add_entry(struct hists *hists,
struct addr_location *al, u64 period,
- u64 weight)
+ u64 weight, u64 transaction)
{
- if (__hists__add_entry(self, al, NULL, period, weight) != NULL)
+ if (__hists__add_entry(hists, al, NULL, period, weight, transaction) != NULL)
return 0;
return -ENOMEM;
}
@@ -328,7 +329,8 @@ static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
if (al.filtered)
return 0;
- if (hists__add_entry(&evsel->hists, &al, sample->period, sample->weight)) {
+ if (hists__add_entry(&evsel->hists, &al, sample->period,
+ sample->weight, sample->transaction)) {
pr_warning("problem incrementing symbol period, skipping event\n");
return -1;
}
@@ -367,7 +369,7 @@ static void perf_evlist__collapse_resort(struct perf_evlist *evlist)
list_for_each_entry(evsel, &evlist->entries, node) {
struct hists *hists = &evsel->hists;
- hists__collapse_resort(hists);
+ hists__collapse_resort(hists, NULL);
}
}
@@ -599,7 +601,7 @@ static void data__fprintf(void)
data__for_each_file(i, d)
fprintf(stdout, "# [%d] %s %s\n",
- d->idx, d->file,
+ d->idx, d->file.path,
!d->idx ? "(Baseline)" : "");
fprintf(stdout, "#\n");
@@ -661,17 +663,16 @@ static int __cmd_diff(void)
int ret = -EINVAL, i;
data__for_each_file(i, d) {
- d->session = perf_session__new(d->file, O_RDONLY, force,
- false, &tool);
+ d->session = perf_session__new(&d->file, false, &tool);
if (!d->session) {
- pr_err("Failed to open %s\n", d->file);
+ pr_err("Failed to open %s\n", d->file.path);
ret = -ENOMEM;
goto out_delete;
}
ret = perf_session__process_events(d->session, &tool);
if (ret) {
- pr_err("Failed to process %s\n", d->file);
+ pr_err("Failed to process %s\n", d->file.path);
goto out_delete;
}
@@ -1014,7 +1015,12 @@ static int data_init(int argc, const char **argv)
return -ENOMEM;
data__for_each_file(i, d) {
- d->file = use_default ? defaults[i] : argv[i];
+ struct perf_data_file *file = &d->file;
+
+ file->path = use_default ? defaults[i] : argv[i];
+ file->mode = PERF_DATA_MODE_READ,
+ file->force = force,
+
d->idx = i;
}
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index 05bd9dfe875c..20b0f12763b0 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -14,13 +14,18 @@
#include "util/parse-events.h"
#include "util/parse-options.h"
#include "util/session.h"
+#include "util/data.h"
static int __cmd_evlist(const char *file_name, struct perf_attr_details *details)
{
struct perf_session *session;
struct perf_evsel *pos;
+ struct perf_data_file file = {
+ .path = file_name,
+ .mode = PERF_DATA_MODE_READ,
+ };
- session = perf_session__new(file_name, O_RDONLY, 0, false, NULL);
+ session = perf_session__new(&file, 0, NULL);
if (session == NULL)
return -ENOMEM;
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index afe377b2884f..409ceaf3b9b9 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -15,6 +15,7 @@
#include "util/tool.h"
#include "util/debug.h"
#include "util/build-id.h"
+#include "util/data.h"
#include "util/parse-options.h"
@@ -71,12 +72,17 @@ static int perf_event__repipe_attr(struct perf_tool *tool,
union perf_event *event,
struct perf_evlist **pevlist)
{
+ struct perf_inject *inject = container_of(tool, struct perf_inject,
+ tool);
int ret;
ret = perf_event__process_attr(tool, event, pevlist);
if (ret)
return ret;
+ if (!inject->pipe_output)
+ return 0;
+
return perf_event__repipe_synth(tool, event);
}
@@ -161,38 +167,38 @@ static int perf_event__repipe_tracing_data(struct perf_tool *tool,
return err;
}
-static int dso__read_build_id(struct dso *self)
+static int dso__read_build_id(struct dso *dso)
{
- if (self->has_build_id)
+ if (dso->has_build_id)
return 0;
- if (filename__read_build_id(self->long_name, self->build_id,
- sizeof(self->build_id)) > 0) {
- self->has_build_id = true;
+ if (filename__read_build_id(dso->long_name, dso->build_id,
+ sizeof(dso->build_id)) > 0) {
+ dso->has_build_id = true;
return 0;
}
return -1;
}
-static int dso__inject_build_id(struct dso *self, struct perf_tool *tool,
+static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
struct machine *machine)
{
u16 misc = PERF_RECORD_MISC_USER;
int err;
- if (dso__read_build_id(self) < 0) {
- pr_debug("no build_id found for %s\n", self->long_name);
+ if (dso__read_build_id(dso) < 0) {
+ pr_debug("no build_id found for %s\n", dso->long_name);
return -1;
}
- if (self->kernel)
+ if (dso->kernel)
misc = PERF_RECORD_MISC_KERNEL;
- err = perf_event__synthesize_build_id(tool, self, misc, perf_event__repipe,
+ err = perf_event__synthesize_build_id(tool, dso, misc, perf_event__repipe,
machine);
if (err) {
- pr_err("Can't synthesize build_id event for %s\n", self->long_name);
+ pr_err("Can't synthesize build_id event for %s\n", dso->long_name);
return -1;
}
@@ -231,7 +237,7 @@ static int perf_event__inject_buildid(struct perf_tool *tool,
* account this as unresolved.
*/
} else {
-#ifdef LIBELF_SUPPORT
+#ifdef HAVE_LIBELF_SUPPORT
pr_warning("no symbols found in %s, maybe "
"install a debug package?\n",
al.map->dso->long_name);
@@ -345,6 +351,10 @@ static int __cmd_inject(struct perf_inject *inject)
{
struct perf_session *session;
int ret = -EINVAL;
+ struct perf_data_file file = {
+ .path = inject->input_name,
+ .mode = PERF_DATA_MODE_READ,
+ };
signal(SIGINT, sig_handler);
@@ -355,7 +365,7 @@ static int __cmd_inject(struct perf_inject *inject)
inject->tool.tracing_data = perf_event__repipe_tracing_data;
}
- session = perf_session__new(inject->input_name, O_RDONLY, false, true, &inject->tool);
+ session = perf_session__new(&file, true, &inject->tool);
if (session == NULL)
return -ENOMEM;
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 9b5f077fee5b..1126382659a9 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -13,6 +13,7 @@
#include "util/parse-options.h"
#include "util/trace-event.h"
+#include "util/data.h"
#include "util/debug.h"
@@ -486,8 +487,12 @@ static int __cmd_kmem(void)
{ "kmem:kfree", perf_evsel__process_free_event, },
{ "kmem:kmem_cache_free", perf_evsel__process_free_event, },
};
+ struct perf_data_file file = {
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ };
- session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_kmem);
+ session = perf_session__new(&file, false, &perf_kmem);
if (session == NULL)
return -ENOMEM;
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 935d52216c89..cb05f39d8a77 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -17,6 +17,7 @@
#include "util/tool.h"
#include "util/stat.h"
#include "util/top.h"
+#include "util/data.h"
#include <sys/prctl.h>
#include <sys/timerfd.h>
@@ -888,11 +889,18 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
while ((event = perf_evlist__mmap_read(kvm->evlist, idx)) != NULL) {
err = perf_evlist__parse_sample(kvm->evlist, event, &sample);
if (err) {
+ perf_evlist__mmap_consume(kvm->evlist, idx);
pr_err("Failed to parse sample\n");
return -1;
}
err = perf_session_queue_event(kvm->session, event, &sample, 0);
+ /*
+ * FIXME: Here we can't consume the event, as perf_session_queue_event will
+ * point to it, and it'll get possibly overwritten by the kernel.
+ */
+ perf_evlist__mmap_consume(kvm->evlist, idx);
+
if (err) {
pr_err("Failed to enqueue sample: %d\n", err);
return -1;
@@ -1215,10 +1223,13 @@ static int read_events(struct perf_kvm_stat *kvm)
.comm = perf_event__process_comm,
.ordered_samples = true,
};
+ struct perf_data_file file = {
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ };
kvm->tool = eops;
- kvm->session = perf_session__new(kvm->file_name, O_RDONLY, 0, false,
- &kvm->tool);
+ kvm->session = perf_session__new(&file, false, &kvm->tool);
if (!kvm->session) {
pr_err("Initializing perf session failed\n");
return -EINVAL;
@@ -1426,8 +1437,9 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
const struct option live_options[] = {
OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid",
"record events on existing process id"),
- OPT_UINTEGER('m', "mmap-pages", &kvm->opts.mmap_pages,
- "number of mmap data pages"),
+ OPT_CALLBACK('m', "mmap-pages", &kvm->opts.mmap_pages, "pages",
+ "number of mmap data pages",
+ perf_evlist__parse_mmap_pages),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_BOOLEAN('a', "all-cpus", &kvm->opts.target.system_wide,
@@ -1449,6 +1461,9 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
"perf kvm stat live [<options>]",
NULL
};
+ struct perf_data_file file = {
+ .mode = PERF_DATA_MODE_WRITE,
+ };
/* event handling */
@@ -1513,7 +1528,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
/*
* perf session
*/
- kvm->session = perf_session__new(NULL, O_WRONLY, false, false, &kvm->tool);
+ kvm->session = perf_session__new(&file, false, &kvm->tool);
if (kvm->session == NULL) {
err = -ENOMEM;
goto out;
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index ee33ba2f05dd..33c7253295b9 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -15,6 +15,7 @@
#include "util/debug.h"
#include "util/session.h"
#include "util/tool.h"
+#include "util/data.h"
#include <sys/types.h>
#include <sys/prctl.h>
@@ -56,7 +57,9 @@ struct lock_stat {
unsigned int nr_readlock;
unsigned int nr_trylock;
+
/* these times are in nano sec. */
+ u64 avg_wait_time;
u64 wait_time_total;
u64 wait_time_min;
u64 wait_time_max;
@@ -208,6 +211,7 @@ static struct thread_stat *thread_stat_findnew_first(u32 tid)
SINGLE_KEY(nr_acquired)
SINGLE_KEY(nr_contended)
+SINGLE_KEY(avg_wait_time)
SINGLE_KEY(wait_time_total)
SINGLE_KEY(wait_time_max)
@@ -244,6 +248,7 @@ static struct rb_root result; /* place to store sorted data */
struct lock_key keys[] = {
DEF_KEY_LOCK(acquired, nr_acquired),
DEF_KEY_LOCK(contended, nr_contended),
+ DEF_KEY_LOCK(avg_wait, avg_wait_time),
DEF_KEY_LOCK(wait_total, wait_time_total),
DEF_KEY_LOCK(wait_min, wait_time_min),
DEF_KEY_LOCK(wait_max, wait_time_max),
@@ -321,10 +326,12 @@ static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
new->addr = addr;
new->name = zalloc(sizeof(char) * strlen(name) + 1);
- if (!new->name)
+ if (!new->name) {
+ free(new);
goto alloc_failed;
- strcpy(new->name, name);
+ }
+ strcpy(new->name, name);
new->wait_time_min = ULLONG_MAX;
list_add(&new->hash_entry, entry);
@@ -400,17 +407,17 @@ static int report_lock_acquire_event(struct perf_evsel *evsel,
ls = lock_stat_findnew(addr, name);
if (!ls)
- return -1;
+ return -ENOMEM;
if (ls->discard)
return 0;
ts = thread_stat_findnew(sample->tid);
if (!ts)
- return -1;
+ return -ENOMEM;
seq = get_seq(ts, addr);
if (!seq)
- return -1;
+ return -ENOMEM;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
@@ -446,7 +453,6 @@ broken:
list_del(&seq->list);
free(seq);
goto end;
- break;
default:
BUG_ON("Unknown state of lock sequence found!\n");
break;
@@ -473,17 +479,17 @@ static int report_lock_acquired_event(struct perf_evsel *evsel,
ls = lock_stat_findnew(addr, name);
if (!ls)
- return -1;
+ return -ENOMEM;
if (ls->discard)
return 0;
ts = thread_stat_findnew(sample->tid);
if (!ts)
- return -1;
+ return -ENOMEM;
seq = get_seq(ts, addr);
if (!seq)
- return -1;
+ return -ENOMEM;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
@@ -508,8 +514,6 @@ static int report_lock_acquired_event(struct perf_evsel *evsel,
list_del(&seq->list);
free(seq);
goto end;
- break;
-
default:
BUG_ON("Unknown state of lock sequence found!\n");
break;
@@ -517,6 +521,7 @@ static int report_lock_acquired_event(struct perf_evsel *evsel,
seq->state = SEQ_STATE_ACQUIRED;
ls->nr_acquired++;
+ ls->avg_wait_time = ls->nr_contended ? ls->wait_time_total/ls->nr_contended : 0;
seq->prev_event_time = sample->time;
end:
return 0;
@@ -536,17 +541,17 @@ static int report_lock_contended_event(struct perf_evsel *evsel,
ls = lock_stat_findnew(addr, name);
if (!ls)
- return -1;
+ return -ENOMEM;
if (ls->discard)
return 0;
ts = thread_stat_findnew(sample->tid);
if (!ts)
- return -1;
+ return -ENOMEM;
seq = get_seq(ts, addr);
if (!seq)
- return -1;
+ return -ENOMEM;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
@@ -564,7 +569,6 @@ static int report_lock_contended_event(struct perf_evsel *evsel,
list_del(&seq->list);
free(seq);
goto end;
- break;
default:
BUG_ON("Unknown state of lock sequence found!\n");
break;
@@ -572,6 +576,7 @@ static int report_lock_contended_event(struct perf_evsel *evsel,
seq->state = SEQ_STATE_CONTENDED;
ls->nr_contended++;
+ ls->avg_wait_time = ls->wait_time_total/ls->nr_contended;
seq->prev_event_time = sample->time;
end:
return 0;
@@ -591,22 +596,21 @@ static int report_lock_release_event(struct perf_evsel *evsel,
ls = lock_stat_findnew(addr, name);
if (!ls)
- return -1;
+ return -ENOMEM;
if (ls->discard)
return 0;
ts = thread_stat_findnew(sample->tid);
if (!ts)
- return -1;
+ return -ENOMEM;
seq = get_seq(ts, addr);
if (!seq)
- return -1;
+ return -ENOMEM;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
goto end;
- break;
case SEQ_STATE_ACQUIRED:
break;
case SEQ_STATE_READ_ACQUIRED:
@@ -624,7 +628,6 @@ static int report_lock_release_event(struct perf_evsel *evsel,
ls->discard = 1;
bad_hist[BROKEN_RELEASE]++;
goto free_seq;
- break;
default:
BUG_ON("Unknown state of lock sequence found!\n");
break;
@@ -690,7 +693,7 @@ static void print_bad_events(int bad, int total)
pr_info("\n=== output for debug===\n\n");
pr_info("bad: %d, total: %d\n", bad, total);
- pr_info("bad rate: %f %%\n", (double)bad / (double)total * 100);
+ pr_info("bad rate: %.2f %%\n", (double)bad / (double)total * 100);
pr_info("histogram of events caused bad sequence\n");
for (i = 0; i < BROKEN_MAX; i++)
pr_info(" %10s: %d\n", name[i], bad_hist[i]);
@@ -707,6 +710,7 @@ static void print_result(void)
pr_info("%10s ", "acquired");
pr_info("%10s ", "contended");
+ pr_info("%15s ", "avg wait (ns)");
pr_info("%15s ", "total wait (ns)");
pr_info("%15s ", "max wait (ns)");
pr_info("%15s ", "min wait (ns)");
@@ -738,6 +742,7 @@ static void print_result(void)
pr_info("%10u ", st->nr_acquired);
pr_info("%10u ", st->nr_contended);
+ pr_info("%15" PRIu64 " ", st->avg_wait_time);
pr_info("%15" PRIu64 " ", st->wait_time_total);
pr_info("%15" PRIu64 " ", st->wait_time_max);
pr_info("%15" PRIu64 " ", st->wait_time_min == ULLONG_MAX ?
@@ -822,6 +827,18 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
return 0;
}
+static void sort_result(void)
+{
+ unsigned int i;
+ struct lock_stat *st;
+
+ for (i = 0; i < LOCKHASH_SIZE; i++) {
+ list_for_each_entry(st, &lockhash_table[i], hash_entry) {
+ insert_to_result(st, compare);
+ }
+ }
+}
+
static const struct perf_evsel_str_handler lock_tracepoints[] = {
{ "lock:lock_acquire", perf_evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */
{ "lock:lock_acquired", perf_evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
@@ -829,51 +846,51 @@ static const struct perf_evsel_str_handler lock_tracepoints[] = {
{ "lock:lock_release", perf_evsel__process_lock_release, }, /* CONFIG_LOCKDEP */
};
-static int read_events(void)
+static int __cmd_report(bool display_info)
{
+ int err = -EINVAL;
struct perf_tool eops = {
.sample = process_sample_event,
.comm = perf_event__process_comm,
.ordered_samples = true,
};
- session = perf_session__new(input_name, O_RDONLY, 0, false, &eops);
+ struct perf_data_file file = {
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ };
+
+ session = perf_session__new(&file, false, &eops);
if (!session) {
pr_err("Initializing perf session failed\n");
- return -1;
+ return -ENOMEM;
}
+ if (!perf_session__has_traces(session, "lock record"))
+ goto out_delete;
+
if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) {
pr_err("Initializing perf session tracepoint handlers failed\n");
- return -1;
+ goto out_delete;
}
- return perf_session__process_events(session, &eops);
-}
-
-static void sort_result(void)
-{
- unsigned int i;
- struct lock_stat *st;
+ if (select_key())
+ goto out_delete;
- for (i = 0; i < LOCKHASH_SIZE; i++) {
- list_for_each_entry(st, &lockhash_table[i], hash_entry) {
- insert_to_result(st, compare);
- }
- }
-}
+ err = perf_session__process_events(session, &eops);
+ if (err)
+ goto out_delete;
-static int __cmd_report(void)
-{
setup_pager();
+ if (display_info) /* used for info subcommand */
+ err = dump_info();
+ else {
+ sort_result();
+ print_result();
+ }
- if ((select_key() != 0) ||
- (read_events() != 0))
- return -1;
-
- sort_result();
- print_result();
-
- return 0;
+out_delete:
+ perf_session__delete(session);
+ return err;
}
static int __cmd_record(int argc, const char **argv)
@@ -881,7 +898,7 @@ static int __cmd_record(int argc, const char **argv)
const char *record_args[] = {
"record", "-R", "-m", "1024", "-c", "1",
};
- unsigned int rec_argc, i, j;
+ unsigned int rec_argc, i, j, ret;
const char **rec_argv;
for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) {
@@ -898,7 +915,7 @@ static int __cmd_record(int argc, const char **argv)
rec_argc += 2 * ARRAY_SIZE(lock_tracepoints);
rec_argv = calloc(rec_argc + 1, sizeof(char *));
- if (rec_argv == NULL)
+ if (!rec_argv)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(record_args); i++)
@@ -914,7 +931,9 @@ static int __cmd_record(int argc, const char **argv)
BUG_ON(i != rec_argc);
- return cmd_record(i, rec_argv, NULL);
+ ret = cmd_record(i, rec_argv, NULL);
+ free(rec_argv);
+ return ret;
}
int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused)
@@ -934,7 +953,7 @@ int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused)
};
const struct option report_options[] = {
OPT_STRING('k', "key", &sort_key, "acquired",
- "key for sorting (acquired / contended / wait_total / wait_max / wait_min)"),
+ "key for sorting (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
/* TODO: type */
OPT_END()
};
@@ -972,7 +991,7 @@ int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused)
if (argc)
usage_with_options(report_usage, report_options);
}
- __cmd_report();
+ rc = __cmd_report(false);
} else if (!strcmp(argv[0], "script")) {
/* Aliased to 'perf script' */
return cmd_script(argc, argv, prefix);
@@ -985,11 +1004,7 @@ int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused)
}
/* recycling report_lock_ops */
trace_handler = &report_lock_ops;
- setup_pager();
- if (read_events() != 0)
- rc = -1;
- else
- rc = dump_info();
+ rc = __cmd_report(true);
} else {
usage_with_options(lock_usage, lock_options);
}
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 253133a6251d..31c00f186da1 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -5,6 +5,7 @@
#include "util/trace-event.h"
#include "util/tool.h"
#include "util/session.h"
+#include "util/data.h"
#define MEM_OPERATION_LOAD "load"
#define MEM_OPERATION_STORE "store"
@@ -119,10 +120,14 @@ static int process_sample_event(struct perf_tool *tool,
static int report_raw_events(struct perf_mem *mem)
{
+ struct perf_data_file file = {
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ };
int err = -EINVAL;
int ret;
- struct perf_session *session = perf_session__new(input_name, O_RDONLY,
- 0, false, &mem->tool);
+ struct perf_session *session = perf_session__new(&file, false,
+ &mem->tool);
if (session == NULL)
return -ENOMEM;
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index e8a66f9a6715..89acc17cf2a0 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -173,7 +173,7 @@ static int opt_set_target(const struct option *opt, const char *str,
if (str && !params.target) {
if (!strcmp(opt->long_name, "exec"))
params.uprobes = true;
-#ifdef DWARF_SUPPORT
+#ifdef HAVE_DWARF_SUPPORT
else if (!strcmp(opt->long_name, "module"))
params.uprobes = false;
#endif
@@ -187,7 +187,7 @@ static int opt_set_target(const struct option *opt, const char *str,
return ret;
}
-#ifdef DWARF_SUPPORT
+#ifdef HAVE_DWARF_SUPPORT
static int opt_show_lines(const struct option *opt __maybe_unused,
const char *str, int unset __maybe_unused)
{
@@ -257,7 +257,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
"perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]",
"perf probe [<options>] --del '[GROUP:]EVENT' ...",
"perf probe --list",
-#ifdef DWARF_SUPPORT
+#ifdef HAVE_DWARF_SUPPORT
"perf probe [<options>] --line 'LINEDESC'",
"perf probe [<options>] --vars 'PROBEPOINT'",
#endif
@@ -271,7 +271,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.",
opt_del_probe_event),
OPT_CALLBACK('a', "add", NULL,
-#ifdef DWARF_SUPPORT
+#ifdef HAVE_DWARF_SUPPORT
"[EVENT=]FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT"
" [[NAME=]ARG ...]",
#else
@@ -283,7 +283,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
"\t\tFUNC:\tFunction name\n"
"\t\tOFF:\tOffset from function entry (in byte)\n"
"\t\t%return:\tPut the probe at function return\n"
-#ifdef DWARF_SUPPORT
+#ifdef HAVE_DWARF_SUPPORT
"\t\tSRC:\tSource code path\n"
"\t\tRL:\tRelative line number from function entry.\n"
"\t\tAL:\tAbsolute line number in file.\n"
@@ -296,7 +296,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
opt_add_probe_event),
OPT_BOOLEAN('f', "force", &params.force_add, "forcibly add events"
" with existing name"),
-#ifdef DWARF_SUPPORT
+#ifdef HAVE_DWARF_SUPPORT
OPT_CALLBACK('L', "line", NULL,
"FUNC[:RLN[+NUM|-RLN2]]|SRC:ALN[+NUM|-ALN2]",
"Show source code lines.", opt_show_lines),
@@ -408,7 +408,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
return ret;
}
-#ifdef DWARF_SUPPORT
+#ifdef HAVE_DWARF_SUPPORT
if (params.show_lines && !params.uprobes) {
if (params.mod_events) {
pr_err(" Error: Don't use --line with"
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index a41ac41546c9..8b45fcead5f6 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -24,12 +24,13 @@
#include "util/symbol.h"
#include "util/cpumap.h"
#include "util/thread_map.h"
+#include "util/data.h"
#include <unistd.h>
#include <sched.h>
#include <sys/mman.h>
-#ifndef HAVE_ON_EXIT
+#ifndef HAVE_ON_EXIT_SUPPORT
#ifndef ATEXIT_MAX
#define ATEXIT_MAX 32
#endif
@@ -65,12 +66,10 @@ struct perf_record {
struct perf_tool tool;
struct perf_record_opts opts;
u64 bytes_written;
- const char *output_name;
+ struct perf_data_file file;
struct perf_evlist *evlist;
struct perf_session *session;
const char *progname;
- int output;
- unsigned int page_size;
int realtime_prio;
bool no_buildid;
bool no_buildid_cache;
@@ -85,11 +84,13 @@ static void advance_output(struct perf_record *rec, size_t size)
static int write_output(struct perf_record *rec, void *buf, size_t size)
{
+ struct perf_data_file *file = &rec->file;
+
while (size) {
- int ret = write(rec->output, buf, size);
+ int ret = write(file->fd, buf, size);
if (ret < 0) {
- pr_err("failed to write\n");
+ pr_err("failed to write perf data, error: %m\n");
return -1;
}
@@ -119,7 +120,7 @@ static int perf_record__mmap_read(struct perf_record *rec,
{
unsigned int head = perf_mmap__read_head(md);
unsigned int old = md->prev;
- unsigned char *data = md->base + rec->page_size;
+ unsigned char *data = md->base + page_size;
unsigned long size;
void *buf;
int rc = 0;
@@ -234,10 +235,6 @@ try_again:
"or try again with a smaller value of -m/--mmap_pages.\n"
"(current value: %d)\n", opts->mmap_pages);
rc = -errno;
- } else if (!is_power_of_2(opts->mmap_pages) &&
- (opts->mmap_pages != UINT_MAX)) {
- pr_err("--mmap_pages/-m value must be a power of two.");
- rc = -EINVAL;
} else {
pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
rc = -errno;
@@ -253,13 +250,14 @@ out:
static int process_buildids(struct perf_record *rec)
{
- u64 size = lseek(rec->output, 0, SEEK_CUR);
+ struct perf_data_file *file = &rec->file;
+ struct perf_session *session = rec->session;
+ u64 size = lseek(file->fd, 0, SEEK_CUR);
if (size == 0)
return 0;
- rec->session->fd = rec->output;
- return __perf_session__process_events(rec->session, rec->post_processing_offset,
+ return __perf_session__process_events(session, rec->post_processing_offset,
size - rec->post_processing_offset,
size, &build_id__mark_dso_hit_ops);
}
@@ -267,17 +265,18 @@ static int process_buildids(struct perf_record *rec)
static void perf_record__exit(int status, void *arg)
{
struct perf_record *rec = arg;
+ struct perf_data_file *file = &rec->file;
if (status != 0)
return;
- if (!rec->opts.pipe_output) {
+ if (!file->is_pipe) {
rec->session->header.data_size += rec->bytes_written;
if (!rec->no_buildid)
process_buildids(rec);
perf_session__write_header(rec->session, rec->evlist,
- rec->output, true);
+ file->fd, true);
perf_session__delete(rec->session);
perf_evlist__delete(rec->evlist);
symbol__exit();
@@ -345,62 +344,26 @@ out:
static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
{
- struct stat st;
- int flags;
- int err, output, feat;
+ int err, feat;
unsigned long waking = 0;
const bool forks = argc > 0;
struct machine *machine;
struct perf_tool *tool = &rec->tool;
struct perf_record_opts *opts = &rec->opts;
struct perf_evlist *evsel_list = rec->evlist;
- const char *output_name = rec->output_name;
+ struct perf_data_file *file = &rec->file;
struct perf_session *session;
bool disabled = false;
rec->progname = argv[0];
- rec->page_size = sysconf(_SC_PAGE_SIZE);
-
on_exit(perf_record__sig_exit, rec);
signal(SIGCHLD, sig_handler);
signal(SIGINT, sig_handler);
signal(SIGUSR1, sig_handler);
signal(SIGTERM, sig_handler);
- if (!output_name) {
- if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode))
- opts->pipe_output = true;
- else
- rec->output_name = output_name = "perf.data";
- }
- if (output_name) {
- if (!strcmp(output_name, "-"))
- opts->pipe_output = true;
- else if (!stat(output_name, &st) && st.st_size) {
- char oldname[PATH_MAX];
- snprintf(oldname, sizeof(oldname), "%s.old",
- output_name);
- unlink(oldname);
- rename(output_name, oldname);
- }
- }
-
- flags = O_CREAT|O_RDWR|O_TRUNC;
-
- if (opts->pipe_output)
- output = STDOUT_FILENO;
- else
- output = open(output_name, flags, S_IRUSR | S_IWUSR);
- if (output < 0) {
- perror("failed to create output file");
- return -1;
- }
-
- rec->output = output;
-
- session = perf_session__new(output_name, O_WRONLY,
- true, false, NULL);
+ session = perf_session__new(file, false, NULL);
if (session == NULL) {
pr_err("Not enough memory for reading perf file header\n");
return -1;
@@ -422,7 +385,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
if (forks) {
err = perf_evlist__prepare_workload(evsel_list, &opts->target,
- argv, opts->pipe_output,
+ argv, file->is_pipe,
true);
if (err < 0) {
pr_err("Couldn't run the workload!\n");
@@ -443,13 +406,13 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
*/
on_exit(perf_record__exit, rec);
- if (opts->pipe_output) {
- err = perf_header__write_pipe(output);
+ if (file->is_pipe) {
+ err = perf_header__write_pipe(file->fd);
if (err < 0)
goto out_delete_session;
} else {
err = perf_session__write_header(session, evsel_list,
- output, false);
+ file->fd, false);
if (err < 0)
goto out_delete_session;
}
@@ -462,11 +425,11 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
goto out_delete_session;
}
- rec->post_processing_offset = lseek(output, 0, SEEK_CUR);
+ rec->post_processing_offset = lseek(file->fd, 0, SEEK_CUR);
machine = &session->machines.host;
- if (opts->pipe_output) {
+ if (file->is_pipe) {
err = perf_event__synthesize_attrs(tool, session,
process_synthesized_event);
if (err < 0) {
@@ -483,7 +446,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
* return this more properly and also
* propagate errors that now are calling die()
*/
- err = perf_event__synthesize_tracing_data(tool, output, evsel_list,
+ err = perf_event__synthesize_tracing_data(tool, file->fd, evsel_list,
process_synthesized_event);
if (err <= 0) {
pr_err("Couldn't record tracing data.\n");
@@ -590,7 +553,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
fprintf(stderr,
"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
(double)rec->bytes_written / 1024.0 / 1024.0,
- output_name,
+ file->path,
rec->bytes_written / 24);
return 0;
@@ -618,6 +581,9 @@ static const struct branch_mode branch_modes[] = {
BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
+ BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
+ BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
+ BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
BRANCH_END
};
@@ -684,7 +650,7 @@ error:
return ret;
}
-#ifdef LIBUNWIND_SUPPORT
+#ifdef HAVE_LIBUNWIND_SUPPORT
static int get_stack_size(char *str, unsigned long *_size)
{
char *endptr;
@@ -710,23 +676,14 @@ static int get_stack_size(char *str, unsigned long *_size)
max_size, str);
return -1;
}
-#endif /* LIBUNWIND_SUPPORT */
+#endif /* HAVE_LIBUNWIND_SUPPORT */
-int record_parse_callchain_opt(const struct option *opt,
- const char *arg, int unset)
+int record_parse_callchain(const char *arg, struct perf_record_opts *opts)
{
- struct perf_record_opts *opts = opt->value;
char *tok, *name, *saveptr = NULL;
char *buf;
int ret = -1;
- /* --no-call-graph */
- if (unset)
- return 0;
-
- /* We specified default option if none is provided. */
- BUG_ON(!arg);
-
/* We need buffer that we know we can write to. */
buf = malloc(strlen(arg) + 1);
if (!buf)
@@ -748,7 +705,7 @@ int record_parse_callchain_opt(const struct option *opt,
"needed for -g fp\n");
break;
-#ifdef LIBUNWIND_SUPPORT
+#ifdef HAVE_LIBUNWIND_SUPPORT
/* Dwarf style */
} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
const unsigned long default_stack_dump_size = 8192;
@@ -764,13 +721,9 @@ int record_parse_callchain_opt(const struct option *opt,
ret = get_stack_size(tok, &size);
opts->stack_dump_size = size;
}
-
- if (!ret)
- pr_debug("callchain: stack dump size %d\n",
- opts->stack_dump_size);
-#endif /* LIBUNWIND_SUPPORT */
+#endif /* HAVE_LIBUNWIND_SUPPORT */
} else {
- pr_err("callchain: Unknown -g option "
+ pr_err("callchain: Unknown --call-graph option "
"value: %s\n", arg);
break;
}
@@ -778,13 +731,52 @@ int record_parse_callchain_opt(const struct option *opt,
} while (0);
free(buf);
+ return ret;
+}
+
+static void callchain_debug(struct perf_record_opts *opts)
+{
+ pr_debug("callchain: type %d\n", opts->call_graph);
+
+ if (opts->call_graph == CALLCHAIN_DWARF)
+ pr_debug("callchain: stack dump size %d\n",
+ opts->stack_dump_size);
+}
+
+int record_parse_callchain_opt(const struct option *opt,
+ const char *arg,
+ int unset)
+{
+ struct perf_record_opts *opts = opt->value;
+ int ret;
+
+ /* --no-call-graph */
+ if (unset) {
+ opts->call_graph = CALLCHAIN_NONE;
+ pr_debug("callchain: disabled\n");
+ return 0;
+ }
+ ret = record_parse_callchain(arg, opts);
if (!ret)
- pr_debug("callchain: type %d\n", opts->call_graph);
+ callchain_debug(opts);
return ret;
}
+int record_callchain_opt(const struct option *opt,
+ const char *arg __maybe_unused,
+ int unset __maybe_unused)
+{
+ struct perf_record_opts *opts = opt->value;
+
+ if (opts->call_graph == CALLCHAIN_NONE)
+ opts->call_graph = CALLCHAIN_FP;
+
+ callchain_debug(opts);
+ return 0;
+}
+
static const char * const record_usage[] = {
"perf record [<options>] [<command>]",
"perf record [<options>] -- <command> [<options>]",
@@ -813,12 +805,12 @@ static struct perf_record record = {
},
};
-#define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: "
+#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
-#ifdef LIBUNWIND_SUPPORT
-const char record_callchain_help[] = CALLCHAIN_HELP "[fp] dwarf";
+#ifdef HAVE_LIBUNWIND_SUPPORT
+const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
#else
-const char record_callchain_help[] = CALLCHAIN_HELP "[fp]";
+const char record_callchain_help[] = CALLCHAIN_HELP "fp";
#endif
/*
@@ -849,18 +841,22 @@ const struct option record_options[] = {
OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
"list of cpus to monitor"),
OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
- OPT_STRING('o', "output", &record.output_name, "file",
+ OPT_STRING('o', "output", &record.file.path, "file",
"output file name"),
OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
"child tasks do not inherit counters"),
OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
- OPT_UINTEGER('m', "mmap-pages", &record.opts.mmap_pages,
- "number of mmap data pages"),
+ OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
+ "number of mmap data pages",
+ perf_evlist__parse_mmap_pages),
OPT_BOOLEAN(0, "group", &record.opts.group,
"put the counters into a counter group"),
- OPT_CALLBACK_DEFAULT('g', "call-graph", &record.opts,
- "mode[,dump_size]", record_callchain_help,
- &record_parse_callchain_opt, "fp"),
+ OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
+ NULL, "enables call-graph recording" ,
+ &record_callchain_opt),
+ OPT_CALLBACK(0, "call-graph", &record.opts,
+ "mode[,dump_size]", record_callchain_help,
+ &record_parse_callchain_opt),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
@@ -891,6 +887,8 @@ const struct option record_options[] = {
parse_branch_stack),
OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
"sample by weight (on special events only)"),
+ OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
+ "sample transaction flags (special events only)"),
OPT_END()
};
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 72eae7498c09..98d3891392e2 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -33,8 +33,10 @@
#include "util/thread.h"
#include "util/sort.h"
#include "util/hist.h"
+#include "util/data.h"
#include "arch/common.h"
+#include <dlfcn.h>
#include <linux/bitmap.h>
struct perf_report {
@@ -47,6 +49,7 @@ struct perf_report {
bool show_threads;
bool inverted_callchain;
bool mem_mode;
+ int max_stack;
struct perf_read_values show_threads_values;
const char *pretty_printing_style;
const char *cpu_list;
@@ -88,7 +91,8 @@ static int perf_report__add_mem_hist_entry(struct perf_tool *tool,
if ((sort__has_parent || symbol_conf.use_callchain) &&
sample->callchain) {
err = machine__resolve_callchain(machine, evsel, al->thread,
- sample, &parent, al);
+ sample, &parent, al,
+ rep->max_stack);
if (err)
return err;
}
@@ -179,7 +183,8 @@ static int perf_report__add_branch_hist_entry(struct perf_tool *tool,
if ((sort__has_parent || symbol_conf.use_callchain)
&& sample->callchain) {
err = machine__resolve_callchain(machine, evsel, al->thread,
- sample, &parent, al);
+ sample, &parent, al,
+ rep->max_stack);
if (err)
return err;
}
@@ -242,24 +247,27 @@ out:
return err;
}
-static int perf_evsel__add_hist_entry(struct perf_evsel *evsel,
+static int perf_evsel__add_hist_entry(struct perf_tool *tool,
+ struct perf_evsel *evsel,
struct addr_location *al,
struct perf_sample *sample,
struct machine *machine)
{
+ struct perf_report *rep = container_of(tool, struct perf_report, tool);
struct symbol *parent = NULL;
int err = 0;
struct hist_entry *he;
if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) {
err = machine__resolve_callchain(machine, evsel, al->thread,
- sample, &parent, al);
+ sample, &parent, al,
+ rep->max_stack);
if (err)
return err;
}
he = __hists__add_entry(&evsel->hists, al, parent, sample->period,
- sample->weight);
+ sample->weight, sample->transaction);
if (he == NULL)
return -ENOMEM;
@@ -330,7 +338,8 @@ static int process_sample_event(struct perf_tool *tool,
if (al.map != NULL)
al.map->dso->hit = 1;
- ret = perf_evsel__add_hist_entry(evsel, &al, sample, machine);
+ ret = perf_evsel__add_hist_entry(tool, evsel, &al, sample,
+ machine);
if (ret < 0)
pr_debug("problem incrementing symbol period, skipping event\n");
}
@@ -364,10 +373,11 @@ static int process_read_event(struct perf_tool *tool,
/* For pipe mode, sample_type is not currently set */
static int perf_report__setup_sample_type(struct perf_report *rep)
{
- struct perf_session *self = rep->session;
- u64 sample_type = perf_evlist__combined_sample_type(self->evlist);
+ struct perf_session *session = rep->session;
+ u64 sample_type = perf_evlist__combined_sample_type(session->evlist);
+ bool is_pipe = perf_data_file__is_pipe(session->file);
- if (!self->fd_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
+ if (!is_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
if (sort__has_parent) {
ui__error("Selected --sort parent, but no "
"callchain data. Did you call "
@@ -390,7 +400,7 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
}
if (sort__mode == SORT_MODE__BRANCH) {
- if (!self->fd_pipe &&
+ if (!is_pipe &&
!(sample_type & PERF_SAMPLE_BRANCH_STACK)) {
ui__error("Selected -b but no branch data. "
"Did you call perf record without -b?\n");
@@ -407,14 +417,14 @@ static void sig_handler(int sig __maybe_unused)
}
static size_t hists__fprintf_nr_sample_events(struct perf_report *rep,
- struct hists *self,
+ struct hists *hists,
const char *evname, FILE *fp)
{
size_t ret;
char unit;
- unsigned long nr_samples = self->stats.nr_events[PERF_RECORD_SAMPLE];
- u64 nr_events = self->stats.total_period;
- struct perf_evsel *evsel = hists_to_evsel(self);
+ unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ u64 nr_events = hists->stats.total_period;
+ struct perf_evsel *evsel = hists_to_evsel(hists);
char buf[512];
size_t size = sizeof(buf);
@@ -486,6 +496,8 @@ static int __cmd_report(struct perf_report *rep)
struct map *kernel_map;
struct kmap *kernel_kmap;
const char *help = "For a higher level overview, try: perf report --sort comm,dso";
+ struct ui_progress prog;
+ struct perf_data_file *file = session->file;
signal(SIGINT, sig_handler);
@@ -547,13 +559,19 @@ static int __cmd_report(struct perf_report *rep)
}
nr_samples = 0;
+ list_for_each_entry(pos, &session->evlist->entries, node)
+ nr_samples += pos->hists.nr_entries;
+
+ ui_progress__init(&prog, nr_samples, "Merging related events...");
+
+ nr_samples = 0;
list_for_each_entry(pos, &session->evlist->entries, node) {
struct hists *hists = &pos->hists;
if (pos->idx == 0)
hists->symbol_filter_str = rep->symbol_filter_str;
- hists__collapse_resort(hists);
+ hists__collapse_resort(hists, &prog);
nr_samples += hists->stats.nr_events[PERF_RECORD_SAMPLE];
/* Non-group events are considered as leader */
@@ -565,12 +583,13 @@ static int __cmd_report(struct perf_report *rep)
hists__link(leader_hists, hists);
}
}
+ ui_progress__finish();
if (session_done())
return 0;
if (nr_samples == 0) {
- ui__error("The %s file has no samples!\n", session->filename);
+ ui__error("The %s file has no samples!\n", file->path);
return 0;
}
@@ -591,8 +610,19 @@ static int __cmd_report(struct perf_report *rep)
ret = 0;
} else if (use_browser == 2) {
- perf_evlist__gtk_browse_hists(session->evlist, help,
- NULL, rep->min_percent);
+ int (*hist_browser)(struct perf_evlist *,
+ const char *,
+ struct hist_browser_timer *,
+ float min_pcnt);
+
+ hist_browser = dlsym(perf_gtk_handle,
+ "perf_evlist__gtk_browse_hists");
+ if (hist_browser == NULL) {
+ ui__error("GTK browser not found!\n");
+ return ret;
+ }
+ hist_browser(session->evlist, help, NULL,
+ rep->min_percent);
}
} else
perf_evlist__tty_browse_hists(session->evlist, rep, help);
@@ -757,6 +787,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
.ordered_samples = true,
.ordering_requires_timestamps = true,
},
+ .max_stack = PERF_MAX_STACK_DEPTH,
.pretty_printing_style = "normal",
};
const struct option options[] = {
@@ -787,7 +818,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
"sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline,"
" dso_to, dso_from, symbol_to, symbol_from, mispredict,"
" weight, local_weight, mem, symbol_daddr, dso_daddr, tlb, "
- "snoop, locked"),
+ "snoop, locked, abort, in_tx, transaction"),
OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
"Show sample percentage for different cpu modes"),
OPT_STRING('p', "parent", &parent_pattern, "regex",
@@ -797,6 +828,10 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_CALLBACK_DEFAULT('g', "call-graph", &report, "output_type,min_percent[,print_limit],call_order",
"Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold, optional print limit, callchain order, key (function or address). "
"Default: fractal,0.5,callee,function", &parse_callchain_opt, callchain_default_opt),
+ OPT_INTEGER(0, "max-stack", &report.max_stack,
+ "Set the maximum stack depth when parsing the callchain, "
+ "anything beyond the specified depth will be ignored. "
+ "Default: " __stringify(PERF_MAX_STACK_DEPTH)),
OPT_BOOLEAN('G', "inverted", &report.inverted_callchain,
"alias for inverted call graph"),
OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
@@ -845,6 +880,9 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
"Don't show entries under that percent", parse_percent_limit),
OPT_END()
};
+ struct perf_data_file file = {
+ .mode = PERF_DATA_MODE_READ,
+ };
perf_config(perf_report_config, &report);
@@ -874,9 +912,11 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
perf_hpp__init();
}
+ file.path = input_name;
+ file.force = report.force;
+
repeat:
- session = perf_session__new(input_name, O_RDONLY,
- report.force, false, &report.tool);
+ session = perf_session__new(&file, false, &report.tool);
if (session == NULL)
return -ENOMEM;
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index d8c51b2f263f..ddb5dc15be17 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1446,8 +1446,12 @@ static int perf_sched__read_events(struct perf_sched *sched,
{ "sched:sched_migrate_task", process_sched_migrate_task_event, },
};
struct perf_session *session;
+ struct perf_data_file file = {
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ };
- session = perf_session__new(input_name, O_RDONLY, 0, false, &sched->tool);
+ session = perf_session__new(&file, false, &sched->tool);
if (session == NULL) {
pr_debug("No Memory for session\n");
return -1;
@@ -1651,29 +1655,27 @@ static int __cmd_record(int argc, const char **argv)
return cmd_record(i, rec_argv, NULL);
}
-static const char default_sort_order[] = "avg, max, switch, runtime";
-static struct perf_sched sched = {
- .tool = {
- .sample = perf_sched__process_tracepoint_sample,
- .comm = perf_event__process_comm,
- .lost = perf_event__process_lost,
- .fork = perf_sched__process_fork_event,
- .ordered_samples = true,
- },
- .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
- .sort_list = LIST_HEAD_INIT(sched.sort_list),
- .start_work_mutex = PTHREAD_MUTEX_INITIALIZER,
- .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
- .curr_pid = { [0 ... MAX_CPUS - 1] = -1 },
- .sort_order = default_sort_order,
- .replay_repeat = 10,
- .profile_cpu = -1,
- .next_shortname1 = 'A',
- .next_shortname2 = '0',
-};
-
int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
{
+ const char default_sort_order[] = "avg, max, switch, runtime";
+ struct perf_sched sched = {
+ .tool = {
+ .sample = perf_sched__process_tracepoint_sample,
+ .comm = perf_event__process_comm,
+ .lost = perf_event__process_lost,
+ .fork = perf_sched__process_fork_event,
+ .ordered_samples = true,
+ },
+ .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
+ .sort_list = LIST_HEAD_INIT(sched.sort_list),
+ .start_work_mutex = PTHREAD_MUTEX_INITIALIZER,
+ .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
+ .sort_order = default_sort_order,
+ .replay_repeat = 10,
+ .profile_cpu = -1,
+ .next_shortname1 = 'A',
+ .next_shortname2 = '0',
+ };
const struct option latency_options[] = {
OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
"sort by key(s): runtime, switch, avg, max"),
@@ -1729,6 +1731,10 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
.switch_event = replay_switch_event,
.fork_event = replay_fork_event,
};
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++)
+ sched.curr_pid[i] = -1;
argc = parse_options(argc, argv, sched_options, sched_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 9c333ff3dfeb..0ae88c2538a1 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -15,6 +15,7 @@
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/sort.h"
+#include "util/data.h"
#include <linux/bitmap.h>
static char const *script_name;
@@ -409,7 +410,9 @@ static void print_sample_bts(union perf_event *event,
printf(" => ");
/* print branch_to information */
- if (PRINT_FIELD(ADDR))
+ if (PRINT_FIELD(ADDR) ||
+ ((evsel->attr.sample_type & PERF_SAMPLE_ADDR) &&
+ !output[attr->type].user_set))
print_sample_addr(event, sample, machine, thread, attr);
printf("\n");
@@ -539,18 +542,9 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
return 0;
}
-static struct perf_tool perf_script = {
- .sample = process_sample_event,
- .mmap = perf_event__process_mmap,
- .mmap2 = perf_event__process_mmap2,
- .comm = perf_event__process_comm,
- .exit = perf_event__process_exit,
- .fork = perf_event__process_fork,
- .attr = perf_event__process_attr,
- .tracing_data = perf_event__process_tracing_data,
- .build_id = perf_event__process_build_id,
- .ordered_samples = true,
- .ordering_requires_timestamps = true,
+struct perf_script {
+ struct perf_tool tool;
+ struct perf_session *session;
};
static void sig_handler(int sig __maybe_unused)
@@ -558,13 +552,13 @@ static void sig_handler(int sig __maybe_unused)
session_done = 1;
}
-static int __cmd_script(struct perf_session *session)
+static int __cmd_script(struct perf_script *script)
{
int ret;
signal(SIGINT, sig_handler);
- ret = perf_session__process_events(session, &perf_script);
+ ret = perf_session__process_events(script->session, &script->tool);
if (debug_mode)
pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered);
@@ -1113,10 +1107,14 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN];
DIR *scripts_dir, *lang_dir;
struct perf_session *session;
+ struct perf_data_file file = {
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ };
char *temp;
int i = 0;
- session = perf_session__new(input_name, O_RDONLY, 0, false, NULL);
+ session = perf_session__new(&file, false, NULL);
if (!session)
return -1;
@@ -1266,6 +1264,21 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
char *script_path = NULL;
const char **__argv;
int i, j, err;
+ struct perf_script script = {
+ .tool = {
+ .sample = process_sample_event,
+ .mmap = perf_event__process_mmap,
+ .mmap2 = perf_event__process_mmap2,
+ .comm = perf_event__process_comm,
+ .exit = perf_event__process_exit,
+ .fork = perf_event__process_fork,
+ .attr = perf_event__process_attr,
+ .tracing_data = perf_event__process_tracing_data,
+ .build_id = perf_event__process_build_id,
+ .ordered_samples = true,
+ .ordering_requires_timestamps = true,
+ },
+ };
const struct option options[] = {
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
@@ -1317,12 +1330,17 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
"perf script [<options>] <top-script> [script-args]",
NULL
};
+ struct perf_data_file file = {
+ .mode = PERF_DATA_MODE_READ,
+ };
setup_scripting();
argc = parse_options(argc, argv, options, script_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
+ file.path = input_name;
+
if (argc > 1 && !strncmp(argv[0], "rec", strlen("rec"))) {
rec_script_path = get_script_path(argv[1], RECORD_SUFFIX);
if (!rec_script_path)
@@ -1486,11 +1504,12 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
if (!script_name)
setup_pager();
- session = perf_session__new(input_name, O_RDONLY, 0, false,
- &perf_script);
+ session = perf_session__new(&file, false, &script.tool);
if (session == NULL)
return -ENOMEM;
+ script.session = session;
+
if (cpu_list) {
if (perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap))
return -1;
@@ -1514,7 +1533,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
return -1;
}
- input = open(session->filename, O_RDONLY); /* input_name */
+ input = open(file.path, O_RDONLY); /* input_name */
if (input < 0) {
perror("failed to open file");
return -1;
@@ -1554,7 +1573,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
if (err < 0)
goto out;
- err = __cmd_script(session);
+ err = __cmd_script(&script);
perf_session__delete(session);
cleanup_scripting();
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 5098f144b92d..1a9c95d270aa 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -46,6 +46,7 @@
#include "util/util.h"
#include "util/parse-options.h"
#include "util/parse-events.h"
+#include "util/pmu.h"
#include "util/event.h"
#include "util/evlist.h"
#include "util/evsel.h"
@@ -70,6 +71,41 @@ static void print_counter_aggr(struct perf_evsel *counter, char *prefix);
static void print_counter(struct perf_evsel *counter, char *prefix);
static void print_aggr(char *prefix);
+/* Default events used for perf stat -T */
+static const char * const transaction_attrs[] = {
+ "task-clock",
+ "{"
+ "instructions,"
+ "cycles,"
+ "cpu/cycles-t/,"
+ "cpu/tx-start/,"
+ "cpu/el-start/,"
+ "cpu/cycles-ct/"
+ "}"
+};
+
+/* More limited version when the CPU does not have all events. */
+static const char * const transaction_limited_attrs[] = {
+ "task-clock",
+ "{"
+ "instructions,"
+ "cycles,"
+ "cpu/cycles-t/,"
+ "cpu/tx-start/"
+ "}"
+};
+
+/* must match transaction_attrs and the beginning limited_attrs */
+enum {
+ T_TASK_CLOCK,
+ T_INSTRUCTIONS,
+ T_CYCLES,
+ T_CYCLES_IN_TX,
+ T_TRANSACTION_START,
+ T_ELISION_START,
+ T_CYCLES_IN_TX_CP,
+};
+
static struct perf_evlist *evsel_list;
static struct perf_target target = {
@@ -90,6 +126,7 @@ static enum aggr_mode aggr_mode = AGGR_GLOBAL;
static volatile pid_t child_pid = -1;
static bool null_run = false;
static int detailed_run = 0;
+static bool transaction_run;
static bool big_num = true;
static int big_num_opt = -1;
static const char *csv_sep = NULL;
@@ -214,7 +251,10 @@ static struct stats runtime_l1_icache_stats[MAX_NR_CPUS];
static struct stats runtime_ll_cache_stats[MAX_NR_CPUS];
static struct stats runtime_itlb_cache_stats[MAX_NR_CPUS];
static struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS];
+static struct stats runtime_cycles_in_tx_stats[MAX_NR_CPUS];
static struct stats walltime_nsecs_stats;
+static struct stats runtime_transaction_stats[MAX_NR_CPUS];
+static struct stats runtime_elision_stats[MAX_NR_CPUS];
static void perf_stat__reset_stats(struct perf_evlist *evlist)
{
@@ -236,6 +276,11 @@ static void perf_stat__reset_stats(struct perf_evlist *evlist)
memset(runtime_ll_cache_stats, 0, sizeof(runtime_ll_cache_stats));
memset(runtime_itlb_cache_stats, 0, sizeof(runtime_itlb_cache_stats));
memset(runtime_dtlb_cache_stats, 0, sizeof(runtime_dtlb_cache_stats));
+ memset(runtime_cycles_in_tx_stats, 0,
+ sizeof(runtime_cycles_in_tx_stats));
+ memset(runtime_transaction_stats, 0,
+ sizeof(runtime_transaction_stats));
+ memset(runtime_elision_stats, 0, sizeof(runtime_elision_stats));
memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
}
@@ -274,6 +319,29 @@ static inline int nsec_counter(struct perf_evsel *evsel)
return 0;
}
+static struct perf_evsel *nth_evsel(int n)
+{
+ static struct perf_evsel **array;
+ static int array_len;
+ struct perf_evsel *ev;
+ int j;
+
+ /* Assumes this only called when evsel_list does not change anymore. */
+ if (!array) {
+ list_for_each_entry(ev, &evsel_list->entries, node)
+ array_len++;
+ array = malloc(array_len * sizeof(void *));
+ if (!array)
+ exit(ENOMEM);
+ j = 0;
+ list_for_each_entry(ev, &evsel_list->entries, node)
+ array[j++] = ev;
+ }
+ if (n < array_len)
+ return array[n];
+ return NULL;
+}
+
/*
* Update various tracking values we maintain to print
* more semantic information such as miss/hit ratios,
@@ -285,6 +353,15 @@ static void update_shadow_stats(struct perf_evsel *counter, u64 *count)
update_stats(&runtime_nsecs_stats[0], count[0]);
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
update_stats(&runtime_cycles_stats[0], count[0]);
+ else if (transaction_run &&
+ perf_evsel__cmp(counter, nth_evsel(T_CYCLES_IN_TX)))
+ update_stats(&runtime_cycles_in_tx_stats[0], count[0]);
+ else if (transaction_run &&
+ perf_evsel__cmp(counter, nth_evsel(T_TRANSACTION_START)))
+ update_stats(&runtime_transaction_stats[0], count[0]);
+ else if (transaction_run &&
+ perf_evsel__cmp(counter, nth_evsel(T_ELISION_START)))
+ update_stats(&runtime_elision_stats[0], count[0]);
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
update_stats(&runtime_stalled_cycles_front_stats[0], count[0]);
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
@@ -629,10 +706,13 @@ static void nsec_printout(int cpu, int nr, struct perf_evsel *evsel, double avg)
{
double msecs = avg / 1e6;
const char *fmt = csv_output ? "%.6f%s%s" : "%18.6f%s%-25s";
+ char name[25];
aggr_printout(evsel, cpu, nr);
- fprintf(output, fmt, msecs, csv_sep, perf_evsel__name(evsel));
+ scnprintf(name, sizeof(name), "%s%s",
+ perf_evsel__name(evsel), csv_output ? "" : " (msec)");
+ fprintf(output, fmt, msecs, csv_sep, name);
if (evsel->cgrp)
fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
@@ -828,7 +908,7 @@ static void print_ll_cache_misses(int cpu,
static void abs_printout(int cpu, int nr, struct perf_evsel *evsel, double avg)
{
- double total, ratio = 0.0;
+ double total, ratio = 0.0, total2;
const char *fmt;
if (csv_output)
@@ -853,11 +933,10 @@ static void abs_printout(int cpu, int nr, struct perf_evsel *evsel, double avg)
if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
total = avg_stats(&runtime_cycles_stats[cpu]);
- if (total)
+ if (total) {
ratio = avg / total;
-
- fprintf(output, " # %5.2f insns per cycle ", ratio);
-
+ fprintf(output, " # %5.2f insns per cycle ", ratio);
+ }
total = avg_stats(&runtime_stalled_cycles_front_stats[cpu]);
total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[cpu]));
@@ -920,10 +999,47 @@ static void abs_printout(int cpu, int nr, struct perf_evsel *evsel, double avg)
} else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
total = avg_stats(&runtime_nsecs_stats[cpu]);
+ if (total) {
+ ratio = avg / total;
+ fprintf(output, " # %8.3f GHz ", ratio);
+ }
+ } else if (transaction_run &&
+ perf_evsel__cmp(evsel, nth_evsel(T_CYCLES_IN_TX))) {
+ total = avg_stats(&runtime_cycles_stats[cpu]);
+ if (total)
+ fprintf(output,
+ " # %5.2f%% transactional cycles ",
+ 100.0 * (avg / total));
+ } else if (transaction_run &&
+ perf_evsel__cmp(evsel, nth_evsel(T_CYCLES_IN_TX_CP))) {
+ total = avg_stats(&runtime_cycles_stats[cpu]);
+ total2 = avg_stats(&runtime_cycles_in_tx_stats[cpu]);
+ if (total2 < avg)
+ total2 = avg;
+ if (total)
+ fprintf(output,
+ " # %5.2f%% aborted cycles ",
+ 100.0 * ((total2-avg) / total));
+ } else if (transaction_run &&
+ perf_evsel__cmp(evsel, nth_evsel(T_TRANSACTION_START)) &&
+ avg > 0 &&
+ runtime_cycles_in_tx_stats[cpu].n != 0) {
+ total = avg_stats(&runtime_cycles_in_tx_stats[cpu]);
+
+ if (total)
+ ratio = total / avg;
+
+ fprintf(output, " # %8.0f cycles / transaction ", ratio);
+ } else if (transaction_run &&
+ perf_evsel__cmp(evsel, nth_evsel(T_ELISION_START)) &&
+ avg > 0 &&
+ runtime_cycles_in_tx_stats[cpu].n != 0) {
+ total = avg_stats(&runtime_cycles_in_tx_stats[cpu]);
+
if (total)
- ratio = 1.0 * avg / total;
+ ratio = total / avg;
- fprintf(output, " # %8.3f GHz ", ratio);
+ fprintf(output, " # %8.0f cycles / elision ", ratio);
} else if (runtime_nsecs_stats[cpu].n != 0) {
char unit = 'M';
@@ -1116,7 +1232,11 @@ static void print_stat(int argc, const char **argv)
if (!csv_output) {
fprintf(output, "\n");
fprintf(output, " Performance counter stats for ");
- if (!perf_target__has_task(&target)) {
+ if (target.system_wide)
+ fprintf(output, "\'system wide");
+ else if (target.cpu_list)
+ fprintf(output, "\'CPU(s) %s", target.cpu_list);
+ else if (!perf_target__has_task(&target)) {
fprintf(output, "\'%s", argv[0]);
for (i = 1; i < argc; i++)
fprintf(output, " %s", argv[i]);
@@ -1237,6 +1357,16 @@ static int perf_stat_init_aggr_mode(void)
return 0;
}
+static int setup_events(const char * const *attrs, unsigned len)
+{
+ unsigned i;
+
+ for (i = 0; i < len; i++) {
+ if (parse_events(evsel_list, attrs[i]))
+ return -1;
+ }
+ return 0;
+}
/*
* Add default attributes, if there were no attributes specified or
@@ -1355,6 +1485,22 @@ static int add_default_attributes(void)
if (null_run)
return 0;
+ if (transaction_run) {
+ int err;
+ if (pmu_have_event("cpu", "cycles-ct") &&
+ pmu_have_event("cpu", "el-start"))
+ err = setup_events(transaction_attrs,
+ ARRAY_SIZE(transaction_attrs));
+ else
+ err = setup_events(transaction_limited_attrs,
+ ARRAY_SIZE(transaction_limited_attrs));
+ if (err < 0) {
+ fprintf(stderr, "Cannot set up transaction events\n");
+ return -1;
+ }
+ return 0;
+ }
+
if (!evsel_list->nr_entries) {
if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0)
return -1;
@@ -1389,6 +1535,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
int output_fd = 0;
const char *output_name = NULL;
const struct option options[] = {
+ OPT_BOOLEAN('T', "transaction", &transaction_run,
+ "hardware transaction statistics"),
OPT_CALLBACK('e', "event", &evsel_list, "event",
"event selector. use 'perf list' to list available events",
parse_events_option),
@@ -1514,8 +1662,9 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
} else if (big_num_opt == 0) /* User passed --no-big-num */
big_num = false;
- if (!argc && !perf_target__has_task(&target))
+ if (!argc && perf_target__none(&target))
usage_with_options(stat_usage, options);
+
if (run_count < 0) {
usage_with_options(stat_usage, options);
} else if (run_count == 0) {
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index c2e02319347a..e11c61d9bda4 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -36,6 +36,7 @@
#include "util/session.h"
#include "util/svghelper.h"
#include "util/tool.h"
+#include "util/data.h"
#define SUPPORT_OLD_POWER_EVENTS 1
#define PWR_EVENT_EXIT -1
@@ -990,8 +991,13 @@ static int __cmd_timechart(const char *output_name)
{ "power:power_frequency", process_sample_power_frequency },
#endif
};
- struct perf_session *session = perf_session__new(input_name, O_RDONLY,
- 0, false, &perf_timechart);
+ struct perf_data_file file = {
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ };
+
+ struct perf_session *session = perf_session__new(&file, false,
+ &perf_timechart);
int ret = -EINVAL;
if (session == NULL)
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 212214162bb2..a6ea956a533e 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -247,9 +247,8 @@ static struct hist_entry *perf_evsel__add_hist_entry(struct perf_evsel *evsel,
pthread_mutex_lock(&evsel->hists.lock);
he = __hists__add_entry(&evsel->hists, al, NULL, sample->period,
- sample->weight);
+ sample->weight, sample->transaction);
pthread_mutex_unlock(&evsel->hists.lock);
-
if (he == NULL)
return NULL;
@@ -287,7 +286,7 @@ static void perf_top__print_sym_table(struct perf_top *top)
return;
}
- hists__collapse_resort(&top->sym_evsel->hists);
+ hists__collapse_resort(&top->sym_evsel->hists, NULL);
hists__output_resort(&top->sym_evsel->hists);
hists__decay_entries(&top->sym_evsel->hists,
top->hide_user_symbols,
@@ -553,7 +552,7 @@ static void perf_top__sort_new_samples(void *arg)
if (t->evlist->selected != NULL)
t->sym_evsel = t->evlist->selected;
- hists__collapse_resort(&t->sym_evsel->hists);
+ hists__collapse_resort(&t->sym_evsel->hists, NULL);
hists__output_resort(&t->sym_evsel->hists);
hists__decay_entries(&t->sym_evsel->hists,
t->hide_user_symbols,
@@ -771,7 +770,8 @@ static void perf_event__process_sample(struct perf_tool *tool,
sample->callchain) {
err = machine__resolve_callchain(machine, evsel,
al.thread, sample,
- &parent, &al);
+ &parent, &al,
+ top->max_stack);
if (err)
return;
}
@@ -810,7 +810,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
ret = perf_evlist__parse_sample(top->evlist, event, &sample);
if (ret) {
pr_err("Can't parse sample, err = %d\n", ret);
- continue;
+ goto next_event;
}
evsel = perf_evlist__id2evsel(session->evlist, sample.id);
@@ -825,13 +825,13 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
case PERF_RECORD_MISC_USER:
++top->us_samples;
if (top->hide_user_symbols)
- continue;
+ goto next_event;
machine = &session->machines.host;
break;
case PERF_RECORD_MISC_KERNEL:
++top->kernel_samples;
if (top->hide_kernel_symbols)
- continue;
+ goto next_event;
machine = &session->machines.host;
break;
case PERF_RECORD_MISC_GUEST_KERNEL:
@@ -847,7 +847,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
*/
/* Fall thru */
default:
- continue;
+ goto next_event;
}
@@ -859,6 +859,8 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
machine__process_event(machine, event);
} else
++session->stats.nr_unknown_events;
+next_event:
+ perf_evlist__mmap_consume(top->evlist, idx);
}
}
@@ -930,11 +932,8 @@ static int __cmd_top(struct perf_top *top)
struct perf_record_opts *opts = &top->record_opts;
pthread_t thread;
int ret;
- /*
- * FIXME: perf_session__new should allow passing a O_MMAP, so that all this
- * mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
- */
- top->session = perf_session__new(NULL, O_WRONLY, false, false, NULL);
+
+ top->session = perf_session__new(NULL, false, NULL);
if (top->session == NULL)
return -ENOMEM;
@@ -1016,16 +1015,16 @@ out_delete:
}
static int
-parse_callchain_opt(const struct option *opt, const char *arg, int unset)
+callchain_opt(const struct option *opt, const char *arg, int unset)
{
- /*
- * --no-call-graph
- */
- if (unset)
- return 0;
-
symbol_conf.use_callchain = true;
+ return record_callchain_opt(opt, arg, unset);
+}
+static int
+parse_callchain_opt(const struct option *opt, const char *arg, int unset)
+{
+ symbol_conf.use_callchain = true;
return record_parse_callchain_opt(opt, arg, unset);
}
@@ -1051,10 +1050,11 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
.user_freq = UINT_MAX,
.user_interval = ULLONG_MAX,
.freq = 4000, /* 4 KHz */
- .target = {
+ .target = {
.uses_mmap = true,
},
},
+ .max_stack = PERF_MAX_STACK_DEPTH,
.sym_pcnt_filter = 5,
};
struct perf_record_opts *opts = &top.record_opts;
@@ -1074,10 +1074,13 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
"list of cpus to monitor"),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
+ OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
+ "don't load vmlinux even if found"),
OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
"hide kernel symbols"),
- OPT_UINTEGER('m', "mmap-pages", &opts->mmap_pages,
- "number of mmap data pages"),
+ OPT_CALLBACK('m', "mmap-pages", &opts->mmap_pages, "pages",
+ "number of mmap data pages",
+ perf_evlist__parse_mmap_pages),
OPT_INTEGER('r', "realtime", &top.realtime_prio,
"collect data with this RT SCHED_FIFO priority"),
OPT_INTEGER('d', "delay", &top.delay_secs,
@@ -1103,12 +1106,19 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
- "sort by key(s): pid, comm, dso, symbol, parent, weight, local_weight"),
+ "sort by key(s): pid, comm, dso, symbol, parent, weight, local_weight,"
+ " abort, in_tx, transaction"),
OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
"Show a column with the number of samples"),
- OPT_CALLBACK_DEFAULT('G', "call-graph", &top.record_opts,
- "mode[,dump_size]", record_callchain_help,
- &parse_callchain_opt, "fp"),
+ OPT_CALLBACK_NOOPT('G', NULL, &top.record_opts,
+ NULL, "enables call-graph recording",
+ &callchain_opt),
+ OPT_CALLBACK(0, "call-graph", &top.record_opts,
+ "mode[,dump_size]", record_callchain_help,
+ &parse_callchain_opt),
+ OPT_INTEGER(0, "max-stack", &top.max_stack,
+ "Set the maximum stack depth when parsing the callchain. "
+ "Default: " __stringify(PERF_MAX_STACK_DEPTH)),
OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
"ignore callees of these functions in call graphs",
report_parse_ignore_callees_opt),
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 71aa3e35406b..dc3da654ff12 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -10,9 +10,11 @@
#include "util/strlist.h"
#include "util/intlist.h"
#include "util/thread_map.h"
+#include "util/stat.h"
#include <libaudit.h>
#include <stdlib.h>
+#include <sys/eventfd.h>
#include <sys/mman.h>
#include <linux/futex.h>
@@ -33,49 +35,96 @@
# define MADV_UNMERGEABLE 13
#endif
-static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
- unsigned long arg,
- u8 arg_idx __maybe_unused,
- u8 *arg_mask __maybe_unused)
+struct syscall_arg {
+ unsigned long val;
+ struct thread *thread;
+ struct trace *trace;
+ void *parm;
+ u8 idx;
+ u8 mask;
+};
+
+struct strarray {
+ int offset;
+ int nr_entries;
+ const char **entries;
+};
+
+#define DEFINE_STRARRAY(array) struct strarray strarray__##array = { \
+ .nr_entries = ARRAY_SIZE(array), \
+ .entries = array, \
+}
+
+#define DEFINE_STRARRAY_OFFSET(array, off) struct strarray strarray__##array = { \
+ .offset = off, \
+ .nr_entries = ARRAY_SIZE(array), \
+ .entries = array, \
+}
+
+static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
+ const char *intfmt,
+ struct syscall_arg *arg)
{
- return scnprintf(bf, size, "%#lx", arg);
+ struct strarray *sa = arg->parm;
+ int idx = arg->val - sa->offset;
+
+ if (idx < 0 || idx >= sa->nr_entries)
+ return scnprintf(bf, size, intfmt, arg->val);
+
+ return scnprintf(bf, size, "%s", sa->entries[idx]);
}
-#define SCA_HEX syscall_arg__scnprintf_hex
+static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
+}
-static size_t syscall_arg__scnprintf_whence(char *bf, size_t size,
- unsigned long arg,
- u8 arg_idx __maybe_unused,
- u8 *arg_mask __maybe_unused)
+#define SCA_STRARRAY syscall_arg__scnprintf_strarray
+
+static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size,
+ struct syscall_arg *arg)
{
- int whence = arg;
+ return __syscall_arg__scnprintf_strarray(bf, size, "%#x", arg);
+}
- switch (whence) {
-#define P_WHENCE(n) case SEEK_##n: return scnprintf(bf, size, #n)
- P_WHENCE(SET);
- P_WHENCE(CUR);
- P_WHENCE(END);
-#ifdef SEEK_DATA
- P_WHENCE(DATA);
-#endif
-#ifdef SEEK_HOLE
- P_WHENCE(HOLE);
-#endif
-#undef P_WHENCE
- default: break;
- }
+#define SCA_STRHEXARRAY syscall_arg__scnprintf_strhexarray
+
+static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
+ struct syscall_arg *arg);
+
+#define SCA_FD syscall_arg__scnprintf_fd
+
+static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ int fd = arg->val;
+
+ if (fd == AT_FDCWD)
+ return scnprintf(bf, size, "CWD");
+
+ return syscall_arg__scnprintf_fd(bf, size, arg);
+}
- return scnprintf(bf, size, "%#x", whence);
+#define SCA_FDAT syscall_arg__scnprintf_fd_at
+
+static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
+ struct syscall_arg *arg);
+
+#define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
+
+static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ return scnprintf(bf, size, "%#lx", arg->val);
}
-#define SCA_WHENCE syscall_arg__scnprintf_whence
+#define SCA_HEX syscall_arg__scnprintf_hex
static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
- unsigned long arg,
- u8 arg_idx __maybe_unused,
- u8 *arg_mask __maybe_unused)
+ struct syscall_arg *arg)
{
- int printed = 0, prot = arg;
+ int printed = 0, prot = arg->val;
if (prot == PROT_NONE)
return scnprintf(bf, size, "NONE");
@@ -104,10 +153,9 @@ static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
#define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot
static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
- unsigned long arg, u8 arg_idx __maybe_unused,
- u8 *arg_mask __maybe_unused)
+ struct syscall_arg *arg)
{
- int printed = 0, flags = arg;
+ int printed = 0, flags = arg->val;
#define P_MMAP_FLAG(n) \
if (flags & MAP_##n) { \
@@ -148,10 +196,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
#define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags
static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size,
- unsigned long arg, u8 arg_idx __maybe_unused,
- u8 *arg_mask __maybe_unused)
+ struct syscall_arg *arg)
{
- int behavior = arg;
+ int behavior = arg->val;
switch (behavior) {
#define P_MADV_BHV(n) case MADV_##n: return scnprintf(bf, size, #n)
@@ -190,8 +237,38 @@ static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size,
#define SCA_MADV_BHV syscall_arg__scnprintf_madvise_behavior
-static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, unsigned long arg,
- u8 arg_idx __maybe_unused, u8 *arg_mask)
+static size_t syscall_arg__scnprintf_flock(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ int printed = 0, op = arg->val;
+
+ if (op == 0)
+ return scnprintf(bf, size, "NONE");
+#define P_CMD(cmd) \
+ if ((op & LOCK_##cmd) == LOCK_##cmd) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #cmd); \
+ op &= ~LOCK_##cmd; \
+ }
+
+ P_CMD(SH);
+ P_CMD(EX);
+ P_CMD(NB);
+ P_CMD(UN);
+ P_CMD(MAND);
+ P_CMD(RW);
+ P_CMD(READ);
+ P_CMD(WRITE);
+#undef P_OP
+
+ if (op)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", op);
+
+ return printed;
+}
+
+#define SCA_FLOCK syscall_arg__scnprintf_flock
+
+static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, struct syscall_arg *arg)
{
enum syscall_futex_args {
SCF_UADDR = (1 << 0),
@@ -201,24 +278,24 @@ static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, unsigned lo
SCF_UADDR2 = (1 << 4),
SCF_VAL3 = (1 << 5),
};
- int op = arg;
+ int op = arg->val;
int cmd = op & FUTEX_CMD_MASK;
size_t printed = 0;
switch (cmd) {
#define P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, #n);
- P_FUTEX_OP(WAIT); *arg_mask |= SCF_VAL3|SCF_UADDR2; break;
- P_FUTEX_OP(WAKE); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
- P_FUTEX_OP(FD); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
- P_FUTEX_OP(REQUEUE); *arg_mask |= SCF_VAL3|SCF_TIMEOUT; break;
- P_FUTEX_OP(CMP_REQUEUE); *arg_mask |= SCF_TIMEOUT; break;
- P_FUTEX_OP(CMP_REQUEUE_PI); *arg_mask |= SCF_TIMEOUT; break;
+ P_FUTEX_OP(WAIT); arg->mask |= SCF_VAL3|SCF_UADDR2; break;
+ P_FUTEX_OP(WAKE); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
+ P_FUTEX_OP(FD); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
+ P_FUTEX_OP(REQUEUE); arg->mask |= SCF_VAL3|SCF_TIMEOUT; break;
+ P_FUTEX_OP(CMP_REQUEUE); arg->mask |= SCF_TIMEOUT; break;
+ P_FUTEX_OP(CMP_REQUEUE_PI); arg->mask |= SCF_TIMEOUT; break;
P_FUTEX_OP(WAKE_OP); break;
- P_FUTEX_OP(LOCK_PI); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
- P_FUTEX_OP(UNLOCK_PI); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
- P_FUTEX_OP(TRYLOCK_PI); *arg_mask |= SCF_VAL3|SCF_UADDR2; break;
- P_FUTEX_OP(WAIT_BITSET); *arg_mask |= SCF_UADDR2; break;
- P_FUTEX_OP(WAKE_BITSET); *arg_mask |= SCF_UADDR2; break;
+ P_FUTEX_OP(LOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
+ P_FUTEX_OP(UNLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
+ P_FUTEX_OP(TRYLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2; break;
+ P_FUTEX_OP(WAIT_BITSET); arg->mask |= SCF_UADDR2; break;
+ P_FUTEX_OP(WAKE_BITSET); arg->mask |= SCF_UADDR2; break;
P_FUTEX_OP(WAIT_REQUEUE_PI); break;
default: printed = scnprintf(bf, size, "%#x", cmd); break;
}
@@ -234,14 +311,194 @@ static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, unsigned lo
#define SCA_FUTEX_OP syscall_arg__scnprintf_futex_op
+static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
+static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, 1);
+
+static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
+static DEFINE_STRARRAY(itimers);
+
+static const char *whences[] = { "SET", "CUR", "END",
+#ifdef SEEK_DATA
+"DATA",
+#endif
+#ifdef SEEK_HOLE
+"HOLE",
+#endif
+};
+static DEFINE_STRARRAY(whences);
+
+static const char *fcntl_cmds[] = {
+ "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
+ "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "F_GETLK64",
+ "F_SETLK64", "F_SETLKW64", "F_SETOWN_EX", "F_GETOWN_EX",
+ "F_GETOWNER_UIDS",
+};
+static DEFINE_STRARRAY(fcntl_cmds);
+
+static const char *rlimit_resources[] = {
+ "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
+ "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
+ "RTTIME",
+};
+static DEFINE_STRARRAY(rlimit_resources);
+
+static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
+static DEFINE_STRARRAY(sighow);
+
+static const char *clockid[] = {
+ "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
+ "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE",
+};
+static DEFINE_STRARRAY(clockid);
+
+static const char *socket_families[] = {
+ "UNSPEC", "LOCAL", "INET", "AX25", "IPX", "APPLETALK", "NETROM",
+ "BRIDGE", "ATMPVC", "X25", "INET6", "ROSE", "DECnet", "NETBEUI",
+ "SECURITY", "KEY", "NETLINK", "PACKET", "ASH", "ECONET", "ATMSVC",
+ "RDS", "SNA", "IRDA", "PPPOX", "WANPIPE", "LLC", "IB", "CAN", "TIPC",
+ "BLUETOOTH", "IUCV", "RXRPC", "ISDN", "PHONET", "IEEE802154", "CAIF",
+ "ALG", "NFC", "VSOCK",
+};
+static DEFINE_STRARRAY(socket_families);
+
+#ifndef SOCK_TYPE_MASK
+#define SOCK_TYPE_MASK 0xf
+#endif
+
+static size_t syscall_arg__scnprintf_socket_type(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ size_t printed;
+ int type = arg->val,
+ flags = type & ~SOCK_TYPE_MASK;
+
+ type &= SOCK_TYPE_MASK;
+ /*
+ * Can't use a strarray, MIPS may override for ABI reasons.
+ */
+ switch (type) {
+#define P_SK_TYPE(n) case SOCK_##n: printed = scnprintf(bf, size, #n); break;
+ P_SK_TYPE(STREAM);
+ P_SK_TYPE(DGRAM);
+ P_SK_TYPE(RAW);
+ P_SK_TYPE(RDM);
+ P_SK_TYPE(SEQPACKET);
+ P_SK_TYPE(DCCP);
+ P_SK_TYPE(PACKET);
+#undef P_SK_TYPE
+ default:
+ printed = scnprintf(bf, size, "%#x", type);
+ }
+
+#define P_SK_FLAG(n) \
+ if (flags & SOCK_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "|%s", #n); \
+ flags &= ~SOCK_##n; \
+ }
+
+ P_SK_FLAG(CLOEXEC);
+ P_SK_FLAG(NONBLOCK);
+#undef P_SK_FLAG
+
+ if (flags)
+ printed += scnprintf(bf + printed, size - printed, "|%#x", flags);
+
+ return printed;
+}
+
+#define SCA_SK_TYPE syscall_arg__scnprintf_socket_type
+
+#ifndef MSG_PROBE
+#define MSG_PROBE 0x10
+#endif
+#ifndef MSG_WAITFORONE
+#define MSG_WAITFORONE 0x10000
+#endif
+#ifndef MSG_SENDPAGE_NOTLAST
+#define MSG_SENDPAGE_NOTLAST 0x20000
+#endif
+#ifndef MSG_FASTOPEN
+#define MSG_FASTOPEN 0x20000000
+#endif
+
+static size_t syscall_arg__scnprintf_msg_flags(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ int printed = 0, flags = arg->val;
+
+ if (flags == 0)
+ return scnprintf(bf, size, "NONE");
+#define P_MSG_FLAG(n) \
+ if (flags & MSG_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ flags &= ~MSG_##n; \
+ }
+
+ P_MSG_FLAG(OOB);
+ P_MSG_FLAG(PEEK);
+ P_MSG_FLAG(DONTROUTE);
+ P_MSG_FLAG(TRYHARD);
+ P_MSG_FLAG(CTRUNC);
+ P_MSG_FLAG(PROBE);
+ P_MSG_FLAG(TRUNC);
+ P_MSG_FLAG(DONTWAIT);
+ P_MSG_FLAG(EOR);
+ P_MSG_FLAG(WAITALL);
+ P_MSG_FLAG(FIN);
+ P_MSG_FLAG(SYN);
+ P_MSG_FLAG(CONFIRM);
+ P_MSG_FLAG(RST);
+ P_MSG_FLAG(ERRQUEUE);
+ P_MSG_FLAG(NOSIGNAL);
+ P_MSG_FLAG(MORE);
+ P_MSG_FLAG(WAITFORONE);
+ P_MSG_FLAG(SENDPAGE_NOTLAST);
+ P_MSG_FLAG(FASTOPEN);
+ P_MSG_FLAG(CMSG_CLOEXEC);
+#undef P_MSG_FLAG
+
+ if (flags)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+ return printed;
+}
+
+#define SCA_MSG_FLAGS syscall_arg__scnprintf_msg_flags
+
+static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ size_t printed = 0;
+ int mode = arg->val;
+
+ if (mode == F_OK) /* 0 */
+ return scnprintf(bf, size, "F");
+#define P_MODE(n) \
+ if (mode & n##_OK) { \
+ printed += scnprintf(bf + printed, size - printed, "%s", #n); \
+ mode &= ~n##_OK; \
+ }
+
+ P_MODE(R);
+ P_MODE(W);
+ P_MODE(X);
+#undef P_MODE
+
+ if (mode)
+ printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
+
+ return printed;
+}
+
+#define SCA_ACCMODE syscall_arg__scnprintf_access_mode
+
static size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size,
- unsigned long arg,
- u8 arg_idx, u8 *arg_mask)
+ struct syscall_arg *arg)
{
- int printed = 0, flags = arg;
+ int printed = 0, flags = arg->val;
if (!(flags & O_CREAT))
- *arg_mask |= 1 << (arg_idx + 1); /* Mask the mode parm */
+ arg->mask |= 1 << (arg->idx + 1); /* Mask the mode parm */
if (flags == 0)
return scnprintf(bf, size, "RDONLY");
@@ -291,32 +548,225 @@ static size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size,
#define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags
+static size_t syscall_arg__scnprintf_eventfd_flags(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ int printed = 0, flags = arg->val;
+
+ if (flags == 0)
+ return scnprintf(bf, size, "NONE");
+#define P_FLAG(n) \
+ if (flags & EFD_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ flags &= ~EFD_##n; \
+ }
+
+ P_FLAG(SEMAPHORE);
+ P_FLAG(CLOEXEC);
+ P_FLAG(NONBLOCK);
+#undef P_FLAG
+
+ if (flags)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+ return printed;
+}
+
+#define SCA_EFD_FLAGS syscall_arg__scnprintf_eventfd_flags
+
+static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ int printed = 0, flags = arg->val;
+
+#define P_FLAG(n) \
+ if (flags & O_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ flags &= ~O_##n; \
+ }
+
+ P_FLAG(CLOEXEC);
+ P_FLAG(NONBLOCK);
+#undef P_FLAG
+
+ if (flags)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+ return printed;
+}
+
+#define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
+
+static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscall_arg *arg)
+{
+ int sig = arg->val;
+
+ switch (sig) {
+#define P_SIGNUM(n) case SIG##n: return scnprintf(bf, size, #n)
+ P_SIGNUM(HUP);
+ P_SIGNUM(INT);
+ P_SIGNUM(QUIT);
+ P_SIGNUM(ILL);
+ P_SIGNUM(TRAP);
+ P_SIGNUM(ABRT);
+ P_SIGNUM(BUS);
+ P_SIGNUM(FPE);
+ P_SIGNUM(KILL);
+ P_SIGNUM(USR1);
+ P_SIGNUM(SEGV);
+ P_SIGNUM(USR2);
+ P_SIGNUM(PIPE);
+ P_SIGNUM(ALRM);
+ P_SIGNUM(TERM);
+ P_SIGNUM(STKFLT);
+ P_SIGNUM(CHLD);
+ P_SIGNUM(CONT);
+ P_SIGNUM(STOP);
+ P_SIGNUM(TSTP);
+ P_SIGNUM(TTIN);
+ P_SIGNUM(TTOU);
+ P_SIGNUM(URG);
+ P_SIGNUM(XCPU);
+ P_SIGNUM(XFSZ);
+ P_SIGNUM(VTALRM);
+ P_SIGNUM(PROF);
+ P_SIGNUM(WINCH);
+ P_SIGNUM(IO);
+ P_SIGNUM(PWR);
+ P_SIGNUM(SYS);
+ default: break;
+ }
+
+ return scnprintf(bf, size, "%#x", sig);
+}
+
+#define SCA_SIGNUM syscall_arg__scnprintf_signum
+
+#define TCGETS 0x5401
+
+static const char *tioctls[] = {
+ "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
+ "TCSETAF", "TCSBRK", "TCXONC", "TCFLSH", "TIOCEXCL", "TIOCNXCL",
+ "TIOCSCTTY", "TIOCGPGRP", "TIOCSPGRP", "TIOCOUTQ", "TIOCSTI",
+ "TIOCGWINSZ", "TIOCSWINSZ", "TIOCMGET", "TIOCMBIS", "TIOCMBIC",
+ "TIOCMSET", "TIOCGSOFTCAR", "TIOCSSOFTCAR", "FIONREAD", "TIOCLINUX",
+ "TIOCCONS", "TIOCGSERIAL", "TIOCSSERIAL", "TIOCPKT", "FIONBIO",
+ "TIOCNOTTY", "TIOCSETD", "TIOCGETD", "TCSBRKP", [0x27] = "TIOCSBRK",
+ "TIOCCBRK", "TIOCGSID", "TCGETS2", "TCSETS2", "TCSETSW2", "TCSETSF2",
+ "TIOCGRS485", "TIOCSRS485", "TIOCGPTN", "TIOCSPTLCK",
+ "TIOCGDEV||TCGETX", "TCSETX", "TCSETXF", "TCSETXW", "TIOCSIG",
+ "TIOCVHANGUP", "TIOCGPKT", "TIOCGPTLCK", "TIOCGEXCL",
+ [0x50] = "FIONCLEX", "FIOCLEX", "FIOASYNC", "TIOCSERCONFIG",
+ "TIOCSERGWILD", "TIOCSERSWILD", "TIOCGLCKTRMIOS", "TIOCSLCKTRMIOS",
+ "TIOCSERGSTRUCT", "TIOCSERGETLSR", "TIOCSERGETMULTI", "TIOCSERSETMULTI",
+ "TIOCMIWAIT", "TIOCGICOUNT", [0x60] = "FIOQSIZE",
+};
+
+static DEFINE_STRARRAY_OFFSET(tioctls, 0x5401);
+
+#define STRARRAY(arg, name, array) \
+ .arg_scnprintf = { [arg] = SCA_STRARRAY, }, \
+ .arg_parm = { [arg] = &strarray__##array, }
+
static struct syscall_fmt {
const char *name;
const char *alias;
- size_t (*arg_scnprintf[6])(char *bf, size_t size, unsigned long arg, u8 arg_idx, u8 *arg_mask);
+ size_t (*arg_scnprintf[6])(char *bf, size_t size, struct syscall_arg *arg);
+ void *arg_parm[6];
bool errmsg;
bool timeout;
bool hexret;
} syscall_fmts[] = {
- { .name = "access", .errmsg = true, },
+ { .name = "access", .errmsg = true,
+ .arg_scnprintf = { [1] = SCA_ACCMODE, /* mode */ }, },
{ .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
{ .name = "brk", .hexret = true,
.arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
- { .name = "mmap", .hexret = true, },
+ { .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), },
+ { .name = "close", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
{ .name = "connect", .errmsg = true, },
- { .name = "fstat", .errmsg = true, .alias = "newfstat", },
- { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
+ { .name = "dup", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "dup2", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "dup3", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), },
+ { .name = "eventfd2", .errmsg = true,
+ .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, },
+ { .name = "faccessat", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+ { .name = "fadvise64", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "fallocate", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "fchdir", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "fchmod", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "fchmodat", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+ { .name = "fchown", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "fchownat", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+ { .name = "fcntl", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */
+ [1] = SCA_STRARRAY, /* cmd */ },
+ .arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, },
+ { .name = "fdatasync", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "flock", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */
+ [1] = SCA_FLOCK, /* cmd */ }, },
+ { .name = "fsetxattr", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "fstat", .errmsg = true, .alias = "newfstat",
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "fstatat", .errmsg = true, .alias = "newfstatat",
+ .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+ { .name = "fstatfs", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "fsync", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "ftruncate", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "futex", .errmsg = true,
.arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
+ { .name = "futimesat", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+ { .name = "getdents", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "getdents64", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), },
+ { .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
{ .name = "ioctl", .errmsg = true,
- .arg_scnprintf = { [2] = SCA_HEX, /* arg */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */
+ [1] = SCA_STRHEXARRAY, /* cmd */
+ [2] = SCA_HEX, /* arg */ },
+ .arg_parm = { [1] = &strarray__tioctls, /* cmd */ }, },
+ { .name = "kill", .errmsg = true,
+ .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
+ { .name = "linkat", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
{ .name = "lseek", .errmsg = true,
- .arg_scnprintf = { [2] = SCA_WHENCE, /* whence */ }, },
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */
+ [2] = SCA_STRARRAY, /* whence */ },
+ .arg_parm = { [2] = &strarray__whences, /* whence */ }, },
{ .name = "lstat", .errmsg = true, .alias = "newlstat", },
{ .name = "madvise", .errmsg = true,
.arg_scnprintf = { [0] = SCA_HEX, /* start */
[2] = SCA_MADV_BHV, /* behavior */ }, },
+ { .name = "mkdirat", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+ { .name = "mknodat", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+ { .name = "mlock", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
+ { .name = "mlockall", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
{ .name = "mmap", .hexret = true,
.arg_scnprintf = { [0] = SCA_HEX, /* addr */
[2] = SCA_MMAP_PROT, /* prot */
@@ -327,24 +777,91 @@ static struct syscall_fmt {
{ .name = "mremap", .hexret = true,
.arg_scnprintf = { [0] = SCA_HEX, /* addr */
[4] = SCA_HEX, /* new_addr */ }, },
+ { .name = "munlock", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
{ .name = "munmap", .errmsg = true,
.arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
+ { .name = "name_to_handle_at", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+ { .name = "newfstatat", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
{ .name = "open", .errmsg = true,
.arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, },
{ .name = "open_by_handle_at", .errmsg = true,
- .arg_scnprintf = { [2] = SCA_OPEN_FLAGS, /* flags */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
+ [2] = SCA_OPEN_FLAGS, /* flags */ }, },
{ .name = "openat", .errmsg = true,
- .arg_scnprintf = { [2] = SCA_OPEN_FLAGS, /* flags */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
+ [2] = SCA_OPEN_FLAGS, /* flags */ }, },
+ { .name = "pipe2", .errmsg = true,
+ .arg_scnprintf = { [1] = SCA_PIPE_FLAGS, /* flags */ }, },
{ .name = "poll", .errmsg = true, .timeout = true, },
{ .name = "ppoll", .errmsg = true, .timeout = true, },
- { .name = "pread", .errmsg = true, .alias = "pread64", },
- { .name = "pwrite", .errmsg = true, .alias = "pwrite64", },
- { .name = "read", .errmsg = true, },
- { .name = "recvfrom", .errmsg = true, },
+ { .name = "pread", .errmsg = true, .alias = "pread64",
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "preadv", .errmsg = true, .alias = "pread",
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), },
+ { .name = "pwrite", .errmsg = true, .alias = "pwrite64",
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "pwritev", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "read", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "readlinkat", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+ { .name = "readv", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "recvfrom", .errmsg = true,
+ .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
+ { .name = "recvmmsg", .errmsg = true,
+ .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
+ { .name = "recvmsg", .errmsg = true,
+ .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
+ { .name = "renameat", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+ { .name = "rt_sigaction", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, },
+ { .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), },
+ { .name = "rt_sigqueueinfo", .errmsg = true,
+ .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
+ { .name = "rt_tgsigqueueinfo", .errmsg = true,
+ .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
{ .name = "select", .errmsg = true, .timeout = true, },
- { .name = "socket", .errmsg = true, },
+ { .name = "sendmmsg", .errmsg = true,
+ .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
+ { .name = "sendmsg", .errmsg = true,
+ .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
+ { .name = "sendto", .errmsg = true,
+ .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
+ { .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), },
+ { .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
+ { .name = "shutdown", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "socket", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
+ [1] = SCA_SK_TYPE, /* type */ },
+ .arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
+ { .name = "socketpair", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
+ [1] = SCA_SK_TYPE, /* type */ },
+ .arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
{ .name = "stat", .errmsg = true, .alias = "newstat", },
+ { .name = "symlinkat", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+ { .name = "tgkill", .errmsg = true,
+ .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
+ { .name = "tkill", .errmsg = true,
+ .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
{ .name = "uname", .errmsg = true, .alias = "newuname", },
+ { .name = "unlinkat", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+ { .name = "utimensat", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ }, },
+ { .name = "write", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "writev", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
};
static int syscall_fmt__cmp(const void *name, const void *fmtp)
@@ -364,8 +881,8 @@ struct syscall {
const char *name;
bool filtered;
struct syscall_fmt *fmt;
- size_t (**arg_scnprintf)(char *bf, size_t size,
- unsigned long arg, u8 arg_idx, u8 *args_mask);
+ size_t (**arg_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
+ void **arg_parm;
};
static size_t fprintf_duration(unsigned long t, FILE *fp)
@@ -389,11 +906,24 @@ struct thread_trace {
unsigned long nr_events;
char *entry_str;
double runtime_ms;
+ struct {
+ int max;
+ char **table;
+ } paths;
+
+ struct intlist *syscall_stats;
};
static struct thread_trace *thread_trace__new(void)
{
- return zalloc(sizeof(struct thread_trace));
+ struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
+
+ if (ttrace)
+ ttrace->paths.max = -1;
+
+ ttrace->syscall_stats = intlist__new(NULL);
+
+ return ttrace;
}
static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
@@ -421,26 +951,140 @@ fail:
struct trace {
struct perf_tool tool;
- int audit_machine;
+ struct {
+ int machine;
+ int open_id;
+ } audit;
struct {
int max;
struct syscall *table;
} syscalls;
struct perf_record_opts opts;
- struct machine host;
+ struct machine *host;
u64 base_time;
+ bool full_time;
FILE *output;
unsigned long nr_events;
struct strlist *ev_qualifier;
bool not_ev_qualifier;
+ bool live;
+ const char *last_vfs_getname;
struct intlist *tid_list;
struct intlist *pid_list;
bool sched;
bool multiple_threads;
+ bool summary;
+ bool show_comm;
+ bool show_tool_stats;
double duration_filter;
double runtime_ms;
+ struct {
+ u64 vfs_getname, proc_getname;
+ } stats;
};
+static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
+{
+ struct thread_trace *ttrace = thread->priv;
+
+ if (fd > ttrace->paths.max) {
+ char **npath = realloc(ttrace->paths.table, (fd + 1) * sizeof(char *));
+
+ if (npath == NULL)
+ return -1;
+
+ if (ttrace->paths.max != -1) {
+ memset(npath + ttrace->paths.max + 1, 0,
+ (fd - ttrace->paths.max) * sizeof(char *));
+ } else {
+ memset(npath, 0, (fd + 1) * sizeof(char *));
+ }
+
+ ttrace->paths.table = npath;
+ ttrace->paths.max = fd;
+ }
+
+ ttrace->paths.table[fd] = strdup(pathname);
+
+ return ttrace->paths.table[fd] != NULL ? 0 : -1;
+}
+
+static int thread__read_fd_path(struct thread *thread, int fd)
+{
+ char linkname[PATH_MAX], pathname[PATH_MAX];
+ struct stat st;
+ int ret;
+
+ if (thread->pid_ == thread->tid) {
+ scnprintf(linkname, sizeof(linkname),
+ "/proc/%d/fd/%d", thread->pid_, fd);
+ } else {
+ scnprintf(linkname, sizeof(linkname),
+ "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
+ }
+
+ if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
+ return -1;
+
+ ret = readlink(linkname, pathname, sizeof(pathname));
+
+ if (ret < 0 || ret > st.st_size)
+ return -1;
+
+ pathname[ret] = '\0';
+ return trace__set_fd_pathname(thread, fd, pathname);
+}
+
+static const char *thread__fd_path(struct thread *thread, int fd,
+ struct trace *trace)
+{
+ struct thread_trace *ttrace = thread->priv;
+
+ if (ttrace == NULL)
+ return NULL;
+
+ if (fd < 0)
+ return NULL;
+
+ if ((fd > ttrace->paths.max || ttrace->paths.table[fd] == NULL))
+ if (!trace->live)
+ return NULL;
+ ++trace->stats.proc_getname;
+ if (thread__read_fd_path(thread, fd)) {
+ return NULL;
+ }
+
+ return ttrace->paths.table[fd];
+}
+
+static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ int fd = arg->val;
+ size_t printed = scnprintf(bf, size, "%d", fd);
+ const char *path = thread__fd_path(arg->thread, fd, arg->trace);
+
+ if (path)
+ printed += scnprintf(bf + printed, size - printed, "<%s>", path);
+
+ return printed;
+}
+
+static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ int fd = arg->val;
+ size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
+ struct thread_trace *ttrace = arg->thread->priv;
+
+ if (ttrace && fd >= 0 && fd <= ttrace->paths.max) {
+ free(ttrace->paths.table[fd]);
+ ttrace->paths.table[fd] = NULL;
+ }
+
+ return printed;
+}
+
static bool trace__filter_duration(struct trace *trace, double t)
{
return t < (trace->duration_filter * NSEC_PER_MSEC);
@@ -454,10 +1098,12 @@ static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
}
static bool done = false;
+static bool interrupted = false;
-static void sig_handler(int sig __maybe_unused)
+static void sig_handler(int sig)
{
done = true;
+ interrupted = sig == SIGINT;
}
static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
@@ -466,8 +1112,11 @@ static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thre
size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
printed += fprintf_duration(duration, fp);
- if (trace->multiple_threads)
+ if (trace->multiple_threads) {
+ if (trace->show_comm)
+ printed += fprintf(fp, "%.14s/", thread->comm);
printed += fprintf(fp, "%d ", thread->tid);
+ }
return printed;
}
@@ -506,16 +1155,17 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
if (err)
return err;
- machine__init(&trace->host, "", HOST_KERNEL_ID);
- machine__create_kernel_maps(&trace->host);
+ trace->host = machine__new_host();
+ if (trace->host == NULL)
+ return -ENOMEM;
if (perf_target__has_task(&trace->opts.target)) {
err = perf_event__synthesize_thread_map(&trace->tool, evlist->threads,
trace__tool_process,
- &trace->host);
+ trace->host);
} else {
err = perf_event__synthesize_threads(&trace->tool, trace__tool_process,
- &trace->host);
+ trace->host);
}
if (err)
@@ -533,6 +1183,9 @@ static int syscall__set_arg_fmts(struct syscall *sc)
if (sc->arg_scnprintf == NULL)
return -1;
+ if (sc->fmt)
+ sc->arg_parm = sc->fmt->arg_parm;
+
for (field = sc->tp_format->format.fields->next; field; field = field->next) {
if (sc->fmt && sc->fmt->arg_scnprintf[idx])
sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
@@ -548,7 +1201,7 @@ static int trace__read_syscall_info(struct trace *trace, int id)
{
char tp_name[128];
struct syscall *sc;
- const char *name = audit_syscall_to_name(id, trace->audit_machine);
+ const char *name = audit_syscall_to_name(id, trace->audit.machine);
if (name == NULL)
return -1;
@@ -603,32 +1256,52 @@ static int trace__read_syscall_info(struct trace *trace, int id)
}
static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
- unsigned long *args)
+ unsigned long *args, struct trace *trace,
+ struct thread *thread)
{
- int i = 0;
size_t printed = 0;
if (sc->tp_format != NULL) {
struct format_field *field;
- u8 mask = 0, bit = 1;
+ u8 bit = 1;
+ struct syscall_arg arg = {
+ .idx = 0,
+ .mask = 0,
+ .trace = trace,
+ .thread = thread,
+ };
for (field = sc->tp_format->format.fields->next; field;
- field = field->next, ++i, bit <<= 1) {
- if (mask & bit)
+ field = field->next, ++arg.idx, bit <<= 1) {
+ if (arg.mask & bit)
+ continue;
+ /*
+ * Suppress this argument if its value is zero and
+ * and we don't have a string associated in an
+ * strarray for it.
+ */
+ if (args[arg.idx] == 0 &&
+ !(sc->arg_scnprintf &&
+ sc->arg_scnprintf[arg.idx] == SCA_STRARRAY &&
+ sc->arg_parm[arg.idx]))
continue;
printed += scnprintf(bf + printed, size - printed,
"%s%s: ", printed ? ", " : "", field->name);
-
- if (sc->arg_scnprintf && sc->arg_scnprintf[i]) {
- printed += sc->arg_scnprintf[i](bf + printed, size - printed,
- args[i], i, &mask);
+ if (sc->arg_scnprintf && sc->arg_scnprintf[arg.idx]) {
+ arg.val = args[arg.idx];
+ if (sc->arg_parm)
+ arg.parm = sc->arg_parm[arg.idx];
+ printed += sc->arg_scnprintf[arg.idx](bf + printed,
+ size - printed, &arg);
} else {
printed += scnprintf(bf + printed, size - printed,
- "%ld", args[i]);
+ "%ld", args[arg.idx]);
}
}
} else {
+ int i = 0;
+
while (i < 6) {
printed += scnprintf(bf + printed, size - printed,
"%sarg%d: %ld",
@@ -644,10 +1317,8 @@ typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
struct perf_sample *sample);
static struct syscall *trace__syscall_info(struct trace *trace,
- struct perf_evsel *evsel,
- struct perf_sample *sample)
+ struct perf_evsel *evsel, int id)
{
- int id = perf_evsel__intval(evsel, sample, "id");
if (id < 0) {
@@ -688,6 +1359,32 @@ out_cant_read:
return NULL;
}
+static void thread__update_stats(struct thread_trace *ttrace,
+ int id, struct perf_sample *sample)
+{
+ struct int_node *inode;
+ struct stats *stats;
+ u64 duration = 0;
+
+ inode = intlist__findnew(ttrace->syscall_stats, id);
+ if (inode == NULL)
+ return;
+
+ stats = inode->priv;
+ if (stats == NULL) {
+ stats = malloc(sizeof(struct stats));
+ if (stats == NULL)
+ return;
+ init_stats(stats);
+ inode->priv = stats;
+ }
+
+ if (ttrace->entry_time && sample->time > ttrace->entry_time)
+ duration = sample->time - ttrace->entry_time;
+
+ update_stats(stats, duration);
+}
+
static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
struct perf_sample *sample)
{
@@ -695,7 +1392,8 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
void *args;
size_t printed = 0;
struct thread *thread;
- struct syscall *sc = trace__syscall_info(trace, evsel, sample);
+ int id = perf_evsel__intval(evsel, sample, "id");
+ struct syscall *sc = trace__syscall_info(trace, evsel, id);
struct thread_trace *ttrace;
if (sc == NULL)
@@ -704,8 +1402,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
if (sc->filtered)
return 0;
- thread = machine__findnew_thread(&trace->host, sample->pid,
- sample->tid);
+ thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
ttrace = thread__trace(thread, trace->output);
if (ttrace == NULL)
return -1;
@@ -728,7 +1425,8 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
msg = ttrace->entry_str;
printed += scnprintf(msg + printed, 1024 - printed, "%s(", sc->name);
- printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed, args);
+ printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed,
+ args, trace, thread);
if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) {
if (!trace->duration_filter) {
@@ -747,7 +1445,8 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
int ret;
u64 duration = 0;
struct thread *thread;
- struct syscall *sc = trace__syscall_info(trace, evsel, sample);
+ int id = perf_evsel__intval(evsel, sample, "id");
+ struct syscall *sc = trace__syscall_info(trace, evsel, id);
struct thread_trace *ttrace;
if (sc == NULL)
@@ -756,14 +1455,22 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
if (sc->filtered)
return 0;
- thread = machine__findnew_thread(&trace->host, sample->pid,
- sample->tid);
+ thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
ttrace = thread__trace(thread, trace->output);
if (ttrace == NULL)
return -1;
+ if (trace->summary)
+ thread__update_stats(ttrace, id, sample);
+
ret = perf_evsel__intval(evsel, sample, "ret");
+ if (id == trace->audit.open_id && ret >= 0 && trace->last_vfs_getname) {
+ trace__set_fd_pathname(thread, ret, trace->last_vfs_getname);
+ trace->last_vfs_getname = NULL;
+ ++trace->stats.vfs_getname;
+ }
+
ttrace = thread->priv;
ttrace->exit_time = sample->time;
@@ -808,12 +1515,19 @@ out:
return 0;
}
+static int trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel,
+ struct perf_sample *sample)
+{
+ trace->last_vfs_getname = perf_evsel__rawptr(evsel, sample, "pathname");
+ return 0;
+}
+
static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
struct perf_sample *sample)
{
u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
double runtime_ms = (double)runtime / NSEC_PER_MSEC;
- struct thread *thread = machine__findnew_thread(&trace->host,
+ struct thread *thread = machine__findnew_thread(trace->host,
sample->pid,
sample->tid);
struct thread_trace *ttrace = thread__trace(thread, trace->output);
@@ -861,7 +1575,7 @@ static int trace__process_sample(struct perf_tool *tool,
if (skip_sample(trace, sample))
return 0;
- if (trace->base_time == 0)
+ if (!trace->full_time && trace->base_time == 0)
trace->base_time = sample->time;
if (handler)
@@ -901,6 +1615,51 @@ static int parse_target_str(struct trace *trace)
return 0;
}
+static int trace__record(int argc, const char **argv)
+{
+ unsigned int rec_argc, i, j;
+ const char **rec_argv;
+ const char * const record_args[] = {
+ "record",
+ "-R",
+ "-m", "1024",
+ "-c", "1",
+ "-e", "raw_syscalls:sys_enter,raw_syscalls:sys_exit",
+ };
+
+ rec_argc = ARRAY_SIZE(record_args) + argc;
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
+
+ if (rec_argv == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(record_args); i++)
+ rec_argv[i] = record_args[i];
+
+ for (j = 0; j < (unsigned int)argc; j++, i++)
+ rec_argv[i] = argv[j];
+
+ return cmd_record(i, rec_argv, NULL);
+}
+
+static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
+
+static void perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname",
+ evlist->nr_entries);
+ if (evsel == NULL)
+ return;
+
+ if (perf_evsel__field(evsel, "pathname") == NULL) {
+ perf_evsel__delete(evsel);
+ return;
+ }
+
+ evsel->handler.func = trace__vfs_getname;
+ perf_evlist__add(evlist, evsel);
+}
+
static int trace__run(struct trace *trace, int argc, const char **argv)
{
struct perf_evlist *evlist = perf_evlist__new();
@@ -909,23 +1668,23 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
unsigned long before;
const bool forks = argc > 0;
+ trace->live = true;
+
if (evlist == NULL) {
fprintf(trace->output, "Not enough memory to run!\n");
goto out;
}
if (perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_enter", trace__sys_enter) ||
- perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_exit", trace__sys_exit)) {
- fprintf(trace->output, "Couldn't read the raw_syscalls tracepoints information!\n");
- goto out_delete_evlist;
- }
+ perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_exit", trace__sys_exit))
+ goto out_error_tp;
+
+ perf_evlist__add_vfs_getname(evlist);
if (trace->sched &&
- perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
- trace__sched_stat_runtime)) {
- fprintf(trace->output, "Couldn't read the sched_stat_runtime tracepoint information!\n");
- goto out_delete_evlist;
- }
+ perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
+ trace__sched_stat_runtime))
+ goto out_error_tp;
err = perf_evlist__create_maps(evlist, &trace->opts.target);
if (err < 0) {
@@ -954,10 +1713,8 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
}
err = perf_evlist__open(evlist);
- if (err < 0) {
- fprintf(trace->output, "Couldn't create the events: %s\n", strerror(errno));
- goto out_delete_maps;
- }
+ if (err < 0)
+ goto out_error_open;
err = perf_evlist__mmap(evlist, UINT_MAX, false);
if (err < 0) {
@@ -987,51 +1744,65 @@ again:
err = perf_evlist__parse_sample(evlist, event, &sample);
if (err) {
fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
- continue;
+ goto next_event;
}
- if (trace->base_time == 0)
+ if (!trace->full_time && trace->base_time == 0)
trace->base_time = sample.time;
if (type != PERF_RECORD_SAMPLE) {
- trace__process_event(trace, &trace->host, event);
+ trace__process_event(trace, trace->host, event);
continue;
}
evsel = perf_evlist__id2evsel(evlist, sample.id);
if (evsel == NULL) {
fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
- continue;
+ goto next_event;
}
if (sample.raw_data == NULL) {
fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
perf_evsel__name(evsel), sample.tid,
sample.cpu, sample.raw_size);
- continue;
+ goto next_event;
}
handler = evsel->handler.func;
handler(trace, evsel, &sample);
+next_event:
+ perf_evlist__mmap_consume(evlist, i);
- if (done)
- goto out_unmap_evlist;
+ if (interrupted)
+ goto out_disable;
}
}
if (trace->nr_events == before) {
- if (done)
- goto out_unmap_evlist;
+ int timeout = done ? 100 : -1;
- poll(evlist->pollfd, evlist->nr_fds, -1);
+ if (poll(evlist->pollfd, evlist->nr_fds, timeout) > 0)
+ goto again;
+ } else {
+ goto again;
}
- if (done)
- perf_evlist__disable(evlist);
+out_disable:
+ perf_evlist__disable(evlist);
+
+ if (!err) {
+ if (trace->summary)
+ trace__fprintf_thread_summary(trace, trace->output);
- goto again;
+ if (trace->show_tool_stats) {
+ fprintf(trace->output, "Stats:\n "
+ " vfs_getname : %" PRIu64 "\n"
+ " proc_getname: %" PRIu64 "\n",
+ trace->stats.vfs_getname,
+ trace->stats.proc_getname);
+ }
+ }
-out_unmap_evlist:
perf_evlist__munmap(evlist);
out_close_evlist:
perf_evlist__close(evlist);
@@ -1040,7 +1811,22 @@ out_delete_maps:
out_delete_evlist:
perf_evlist__delete(evlist);
out:
+ trace->live = false;
return err;
+{
+ char errbuf[BUFSIZ];
+
+out_error_tp:
+ perf_evlist__strerror_tp(evlist, errno, errbuf, sizeof(errbuf));
+ goto out_error;
+
+out_error_open:
+ perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
+
+out_error:
+ fprintf(trace->output, "%s\n", errbuf);
+ goto out_delete_evlist;
+}
}
static int trace__replay(struct trace *trace)
@@ -1048,8 +1834,12 @@ static int trace__replay(struct trace *trace)
const struct perf_evsel_str_handler handlers[] = {
{ "raw_syscalls:sys_enter", trace__sys_enter, },
{ "raw_syscalls:sys_exit", trace__sys_exit, },
+ { "probe:vfs_getname", trace__vfs_getname, },
+ };
+ struct perf_data_file file = {
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
};
-
struct perf_session *session;
int err = -1;
@@ -1072,11 +1862,12 @@ static int trace__replay(struct trace *trace)
if (symbol__init() < 0)
return -1;
- session = perf_session__new(input_name, O_RDONLY, 0, false,
- &trace->tool);
+ session = perf_session__new(&file, false, &trace->tool);
if (session == NULL)
return -ENOMEM;
+ trace->host = &session->machines.host;
+
err = perf_session__set_tracepoints_handlers(session, handlers);
if (err)
goto out;
@@ -1101,6 +1892,9 @@ static int trace__replay(struct trace *trace)
if (err)
pr_err("Failed to process events, error %d", err);
+ else if (trace->summary)
+ trace__fprintf_thread_summary(trace, trace->output);
+
out:
perf_session__delete(session);
@@ -1111,47 +1905,111 @@ static size_t trace__fprintf_threads_header(FILE *fp)
{
size_t printed;
- printed = fprintf(fp, "\n _____________________________________________________________________\n");
- printed += fprintf(fp," __) Summary of events (__\n\n");
- printed += fprintf(fp," [ task - pid ] [ events ] [ ratio ] [ runtime ]\n");
- printed += fprintf(fp," _____________________________________________________________________\n\n");
+ printed = fprintf(fp, "\n _____________________________________________________________________________\n");
+ printed += fprintf(fp, " __) Summary of events (__\n\n");
+ printed += fprintf(fp, " [ task - pid ] [ events ] [ ratio ] [ runtime ]\n");
+ printed += fprintf(fp, " syscall count min max avg stddev\n");
+ printed += fprintf(fp, " msec msec msec %%\n");
+ printed += fprintf(fp, " _____________________________________________________________________________\n\n");
return printed;
}
-static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
+static size_t thread__dump_stats(struct thread_trace *ttrace,
+ struct trace *trace, FILE *fp)
{
- size_t printed = trace__fprintf_threads_header(fp);
- struct rb_node *nd;
-
- for (nd = rb_first(&trace->host.threads); nd; nd = rb_next(nd)) {
- struct thread *thread = rb_entry(nd, struct thread, rb_node);
- struct thread_trace *ttrace = thread->priv;
- const char *color;
- double ratio;
-
- if (ttrace == NULL)
- continue;
-
- ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
-
- color = PERF_COLOR_NORMAL;
- if (ratio > 50.0)
- color = PERF_COLOR_RED;
- else if (ratio > 25.0)
- color = PERF_COLOR_GREEN;
- else if (ratio > 5.0)
- color = PERF_COLOR_YELLOW;
-
- printed += color_fprintf(fp, color, "%20s", thread->comm);
- printed += fprintf(fp, " - %-5d :%11lu [", thread->tid, ttrace->nr_events);
- printed += color_fprintf(fp, color, "%5.1f%%", ratio);
- printed += fprintf(fp, " ] %10.3f ms\n", ttrace->runtime_ms);
+ struct stats *stats;
+ size_t printed = 0;
+ struct syscall *sc;
+ struct int_node *inode = intlist__first(ttrace->syscall_stats);
+
+ if (inode == NULL)
+ return 0;
+
+ printed += fprintf(fp, "\n");
+
+ /* each int_node is a syscall */
+ while (inode) {
+ stats = inode->priv;
+ if (stats) {
+ double min = (double)(stats->min) / NSEC_PER_MSEC;
+ double max = (double)(stats->max) / NSEC_PER_MSEC;
+ double avg = avg_stats(stats);
+ double pct;
+ u64 n = (u64) stats->n;
+
+ pct = avg ? 100.0 * stddev_stats(stats)/avg : 0.0;
+ avg /= NSEC_PER_MSEC;
+
+ sc = &trace->syscalls.table[inode->i];
+ printed += fprintf(fp, "%24s %14s : ", "", sc->name);
+ printed += fprintf(fp, "%5" PRIu64 " %8.3f %8.3f",
+ n, min, max);
+ printed += fprintf(fp, " %8.3f %6.2f\n", avg, pct);
+ }
+
+ inode = intlist__next(inode);
}
+ printed += fprintf(fp, "\n\n");
+
return printed;
}
+/* struct used to pass data to per-thread function */
+struct summary_data {
+ FILE *fp;
+ struct trace *trace;
+ size_t printed;
+};
+
+static int trace__fprintf_one_thread(struct thread *thread, void *priv)
+{
+ struct summary_data *data = priv;
+ FILE *fp = data->fp;
+ size_t printed = data->printed;
+ struct trace *trace = data->trace;
+ struct thread_trace *ttrace = thread->priv;
+ const char *color;
+ double ratio;
+
+ if (ttrace == NULL)
+ return 0;
+
+ ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
+
+ color = PERF_COLOR_NORMAL;
+ if (ratio > 50.0)
+ color = PERF_COLOR_RED;
+ else if (ratio > 25.0)
+ color = PERF_COLOR_GREEN;
+ else if (ratio > 5.0)
+ color = PERF_COLOR_YELLOW;
+
+ printed += color_fprintf(fp, color, "%20s", thread->comm);
+ printed += fprintf(fp, " - %-5d :%11lu [", thread->tid, ttrace->nr_events);
+ printed += color_fprintf(fp, color, "%5.1f%%", ratio);
+ printed += fprintf(fp, " ] %10.3f ms\n", ttrace->runtime_ms);
+ printed += thread__dump_stats(ttrace, trace, fp);
+
+ data->printed += printed;
+
+ return 0;
+}
+
+static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
+{
+ struct summary_data data = {
+ .fp = fp,
+ .trace = trace
+ };
+ data.printed = trace__fprintf_threads_header(fp);
+
+ machine__for_each_thread(trace->host, trace__fprintf_one_thread, &data);
+
+ return data.printed;
+}
+
static int trace__set_duration(const struct option *opt, const char *str,
int unset __maybe_unused)
{
@@ -1183,10 +2041,15 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
const char * const trace_usage[] = {
"perf trace [<options>] [<command>]",
"perf trace [<options>] -- <command> [<options>]",
+ "perf trace record [<options>] [<command>]",
+ "perf trace record [<options>] -- <command> [<options>]",
NULL
};
struct trace trace = {
- .audit_machine = audit_detect_machine(),
+ .audit = {
+ .machine = audit_detect_machine(),
+ .open_id = audit_name_to_syscall("open", trace.audit.machine),
+ },
.syscalls = {
. max = -1,
},
@@ -1201,10 +2064,14 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
.mmap_pages = 1024,
},
.output = stdout,
+ .show_comm = true,
};
const char *output_name = NULL;
const char *ev_qualifier_str = NULL;
const struct option trace_options[] = {
+ OPT_BOOLEAN(0, "comm", &trace.show_comm,
+ "show the thread COMM next to its id"),
+ OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
OPT_STRING('e', "expr", &ev_qualifier_str, "expr",
"list of events to trace"),
OPT_STRING('o', "output", &output_name, "file", "output file name"),
@@ -1219,8 +2086,9 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
"list of cpus to monitor"),
OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
"child tasks do not inherit counters"),
- OPT_UINTEGER('m', "mmap-pages", &trace.opts.mmap_pages,
- "number of mmap data pages"),
+ OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
+ "number of mmap data pages",
+ perf_evlist__parse_mmap_pages),
OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
"user to profile"),
OPT_CALLBACK(0, "duration", &trace, "float",
@@ -1228,11 +2096,18 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
trace__set_duration),
OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
OPT_INCR('v', "verbose", &verbose, "be more verbose"),
+ OPT_BOOLEAN('T', "time", &trace.full_time,
+ "Show full timestamp, not time relative to first start"),
+ OPT_BOOLEAN(0, "summary", &trace.summary,
+ "Show syscall summary with statistics"),
OPT_END()
};
int err;
char bf[BUFSIZ];
+ if ((argc > 1) && (strcmp(argv[1], "record") == 0))
+ return trace__record(argc-2, &argv[2]);
+
argc = parse_options(argc, argv, trace_options, trace_usage, 0);
if (output_name != NULL) {
@@ -1280,9 +2155,6 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
else
err = trace__run(&trace, argc, argv);
- if (trace.sched && !err)
- trace__fprintf_thread_summary(&trace, trace.output);
-
out_close:
if (output_name != NULL)
fclose(trace.output);
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 5f6f9b3271bb..2e6a364530fe 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -23,15 +23,19 @@ ifeq ($(ARCH),x86_64)
endif
ifeq (${IS_X86_64}, 1)
RAW_ARCH := x86_64
- CFLAGS += -DARCH_X86_64
+ CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT
ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S
endif
NO_PERF_REGS := 0
LIBUNWIND_LIBS = -lunwind -lunwind-x86_64
endif
+ifeq ($(ARCH),arm)
+ NO_PERF_REGS := 0
+ LIBUNWIND_LIBS = -lunwind -lunwind-arm
+endif
ifeq ($(NO_PERF_REGS),0)
- CFLAGS += -DHAVE_PERF_REGS
+ CFLAGS += -DHAVE_PERF_REGS_SUPPORT
endif
ifeq ($(src-perf),)
@@ -51,7 +55,6 @@ LIB_INCLUDE := $(srctree)/tools/lib/
# include ARCH specific config
-include $(src-perf)/arch/$(ARCH)/Makefile
-include $(src-perf)/config/feature-tests.mak
include $(src-perf)/config/utilities.mak
ifeq ($(call get-executable,$(FLEX)),)
@@ -67,10 +70,11 @@ ifneq ($(WERROR),0)
CFLAGS += -Werror
endif
-ifeq ("$(origin DEBUG)", "command line")
- PERF_DEBUG = $(DEBUG)
+ifndef DEBUG
+ DEBUG := 0
endif
-ifndef PERF_DEBUG
+
+ifeq ($(DEBUG),0)
CFLAGS += -O6
endif
@@ -89,20 +93,126 @@ CFLAGS += -std=gnu99
EXTLIBS = -lelf -lpthread -lrt -lm -ldl
-ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y)
- CFLAGS += -fstack-protector-all
+ifneq ($(OUTPUT),)
+ OUTPUT_FEATURES = $(OUTPUT)config/feature-checks/
+ $(shell mkdir -p $(OUTPUT_FEATURES))
endif
-ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -Wstack-protector,-Wstack-protector),y)
- CFLAGS += -Wstack-protector
+feature_check = $(eval $(feature_check_code))
+define feature_check_code
+ feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) LDFLAGS=$(LDFLAGS) -C config/feature-checks test-$1 >/dev/null 2>/dev/null && echo 1 || echo 0)
+endef
+
+feature_set = $(eval $(feature_set_code))
+define feature_set_code
+ feature-$(1) := 1
+endef
+
+#
+# Build the feature check binaries in parallel, ignore errors, ignore return value and suppress output:
+#
+
+#
+# Note that this is not a complete list of all feature tests, just
+# those that are typically built on a fully configured system.
+#
+# [ Feature tests not mentioned here have to be built explicitly in
+# the rule that uses them - an example for that is the 'bionic'
+# feature check. ]
+#
+CORE_FEATURE_TESTS = \
+ backtrace \
+ dwarf \
+ fortify-source \
+ glibc \
+ gtk2 \
+ gtk2-infobar \
+ libaudit \
+ libbfd \
+ libelf \
+ libelf-getphdrnum \
+ libelf-mmap \
+ libnuma \
+ libperl \
+ libpython \
+ libpython-version \
+ libslang \
+ libunwind \
+ libunwind-debug-frame \
+ on-exit \
+ stackprotector \
+ stackprotector-all
+
+#
+# So here we detect whether test-all was rebuilt, to be able
+# to skip the print-out of the long features list if the file
+# existed before and after it was built:
+#
+ifeq ($(wildcard $(OUTPUT)config/feature-checks/test-all),)
+ test-all-failed := 1
+else
+ test-all-failed := 0
+endif
+
+#
+# Special fast-path for the 'all features are available' case:
+#
+$(call feature_check,all,$(MSG))
+
+#
+# Just in case the build freshly failed, make sure we print the
+# feature matrix:
+#
+ifeq ($(feature-all), 0)
+ test-all-failed := 1
+endif
+
+ifeq ($(test-all-failed),1)
+ $(info )
+ $(info Auto-detecting system features:)
endif
-ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -Wvolatile-register-var,-Wvolatile-register-var),y)
- CFLAGS += -Wvolatile-register-var
+ifeq ($(feature-all), 1)
+ #
+ # test-all.c passed - just set all the core feature flags to 1:
+ #
+ $(foreach feat,$(CORE_FEATURE_TESTS),$(call feature_set,$(feat)))
+else
+ $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) LDFLAGS=$(LDFLAGS) -i -j -C config/feature-checks $(CORE_FEATURE_TESTS) >/dev/null 2>&1)
+ $(foreach feat,$(CORE_FEATURE_TESTS),$(call feature_check,$(feat)))
endif
-ifndef PERF_DEBUG
- ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -D_FORTIFY_SOURCE=2,-D_FORTIFY_SOURCE=2),y)
+#
+# Print the result of the feature test:
+#
+feature_print = $(eval $(feature_print_code)) $(info $(MSG))
+
+define feature_print_code
+ ifeq ($(feature-$(1)), 1)
+ MSG = $(shell printf '...%30s: [ \033[32mon\033[m ]' $(1))
+ else
+ MSG = $(shell printf '...%30s: [ \033[31mOFF\033[m ]' $(1))
+ endif
+endef
+
+#
+# Only print out our features if we rebuilt the testcases or if a test failed:
+#
+ifeq ($(test-all-failed), 1)
+ $(foreach feat,$(CORE_FEATURE_TESTS),$(call feature_print,$(feat)))
+ $(info )
+endif
+
+ifeq ($(feature-stackprotector-all), 1)
+ CFLAGS += -fstack-protector-all
+endif
+
+ifeq ($(feature-stackprotector), 1)
+ CFLAGS += -Wstack-protector
+endif
+
+ifeq ($(DEBUG),0)
+ ifeq ($(feature-fortify-source), 1)
CFLAGS += -D_FORTIFY_SOURCE=2
endif
endif
@@ -128,120 +238,115 @@ CFLAGS += -I$(LIB_INCLUDE)
CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
ifndef NO_BIONIC
-ifeq ($(call try-cc,$(SOURCE_BIONIC),$(CFLAGS),bionic),y)
- BIONIC := 1
- EXTLIBS := $(filter-out -lrt,$(EXTLIBS))
- EXTLIBS := $(filter-out -lpthread,$(EXTLIBS))
+ $(feature_check,bionic)
+ ifeq ($(feature-bionic), 1)
+ BIONIC := 1
+ EXTLIBS := $(filter-out -lrt,$(EXTLIBS))
+ EXTLIBS := $(filter-out -lpthread,$(EXTLIBS))
+ endif
endif
-endif # NO_BIONIC
ifdef NO_LIBELF
NO_DWARF := 1
NO_DEMANGLE := 1
NO_LIBUNWIND := 1
else
-FLAGS_LIBELF=$(CFLAGS) $(LDFLAGS) $(EXTLIBS)
-ifneq ($(call try-cc,$(SOURCE_LIBELF),$(FLAGS_LIBELF),libelf),y)
- FLAGS_GLIBC=$(CFLAGS) $(LDFLAGS)
- ifeq ($(call try-cc,$(SOURCE_GLIBC),$(FLAGS_GLIBC),glibc),y)
- LIBC_SUPPORT := 1
- endif
- ifeq ($(BIONIC),1)
- LIBC_SUPPORT := 1
- endif
- ifeq ($(LIBC_SUPPORT),1)
- msg := $(warning No libelf found, disables 'probe' tool, please install elfutils-libelf-devel/libelf-dev);
+ ifeq ($(feature-libelf), 0)
+ ifeq ($(feature-glibc), 1)
+ LIBC_SUPPORT := 1
+ endif
+ ifeq ($(BIONIC),1)
+ LIBC_SUPPORT := 1
+ endif
+ ifeq ($(LIBC_SUPPORT),1)
+ msg := $(warning No libelf found, disables 'probe' tool, please install elfutils-libelf-devel/libelf-dev);
- NO_LIBELF := 1
- NO_DWARF := 1
- NO_DEMANGLE := 1
+ NO_LIBELF := 1
+ NO_DWARF := 1
+ NO_DEMANGLE := 1
+ else
+ msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
+ endif
else
- msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
- endif
-else
- # for linking with debug library, run like:
- # make DEBUG=1 LIBDW_DIR=/opt/libdw/
- ifdef LIBDW_DIR
- LIBDW_CFLAGS := -I$(LIBDW_DIR)/include
- LIBDW_LDFLAGS := -L$(LIBDW_DIR)/lib
- endif
+ # for linking with debug library, run like:
+ # make DEBUG=1 LIBDW_DIR=/opt/libdw/
+ ifdef LIBDW_DIR
+ LIBDW_CFLAGS := -I$(LIBDW_DIR)/include
+ LIBDW_LDFLAGS := -L$(LIBDW_DIR)/lib
+ endif
- FLAGS_DWARF=$(CFLAGS) $(LIBDW_CFLAGS) -ldw -lz -lelf $(LIBDW_LDFLAGS) $(LDFLAGS) $(EXTLIBS)
- ifneq ($(call try-cc,$(SOURCE_DWARF),$(FLAGS_DWARF),libdw),y)
- msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev);
- NO_DWARF := 1
- endif # Dwarf support
-endif # SOURCE_LIBELF
+ ifneq ($(feature-dwarf), 1)
+ msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev);
+ NO_DWARF := 1
+ endif # Dwarf support
+ endif # libelf support
endif # NO_LIBELF
ifndef NO_LIBELF
-CFLAGS += -DLIBELF_SUPPORT
-FLAGS_LIBELF=$(CFLAGS) $(LDFLAGS) $(EXTLIBS)
-ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_LIBELF),-DLIBELF_MMAP),y)
- CFLAGS += -DLIBELF_MMAP
-endif
-ifeq ($(call try-cc,$(SOURCE_ELF_GETPHDRNUM),$(FLAGS_LIBELF),-DHAVE_ELF_GETPHDRNUM),y)
- CFLAGS += -DHAVE_ELF_GETPHDRNUM
-endif
+ CFLAGS += -DHAVE_LIBELF_SUPPORT
-# include ARCH specific config
--include $(src-perf)/arch/$(ARCH)/Makefile
+ ifeq ($(feature-libelf-mmap), 1)
+ CFLAGS += -DHAVE_LIBELF_MMAP_SUPPORT
+ endif
-ifndef NO_DWARF
-ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
- msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled);
- NO_DWARF := 1
-else
- CFLAGS += -DDWARF_SUPPORT $(LIBDW_CFLAGS)
- LDFLAGS += $(LIBDW_LDFLAGS)
- EXTLIBS += -lelf -ldw
-endif # PERF_HAVE_DWARF_REGS
-endif # NO_DWARF
+ ifeq ($(feature-libelf-getphdrnum), 1)
+ CFLAGS += -DHAVE_ELF_GETPHDRNUM_SUPPORT
+ endif
-endif # NO_LIBELF
+ # include ARCH specific config
+ -include $(src-perf)/arch/$(ARCH)/Makefile
-ifndef NO_LIBELF
-CFLAGS += -DLIBELF_SUPPORT
-FLAGS_LIBELF=$(CFLAGS) $(LDFLAGS) $(EXTLIBS)
-ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_LIBELF),-DLIBELF_MMAP),y)
- CFLAGS += -DLIBELF_MMAP
-endif # try-cc
+ ifndef NO_DWARF
+ ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
+ msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled);
+ NO_DWARF := 1
+ else
+ CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS)
+ LDFLAGS += $(LIBDW_LDFLAGS)
+ EXTLIBS += -lelf -ldw
+ endif # PERF_HAVE_DWARF_REGS
+ endif # NO_DWARF
endif # NO_LIBELF
-# There's only x86 (both 32 and 64) support for CFI unwind so far
-ifneq ($(ARCH),x86)
+ifeq ($(LIBUNWIND_LIBS),)
NO_LIBUNWIND := 1
endif
ifndef NO_LIBUNWIND
-# for linking with debug library, run like:
-# make DEBUG=1 LIBUNWIND_DIR=/opt/libunwind/
-ifdef LIBUNWIND_DIR
- LIBUNWIND_CFLAGS := -I$(LIBUNWIND_DIR)/include
- LIBUNWIND_LDFLAGS := -L$(LIBUNWIND_DIR)/lib
-endif
+ #
+ # For linking with debug library, run like:
+ #
+ # make DEBUG=1 LIBUNWIND_DIR=/opt/libunwind/
+ #
+ ifdef LIBUNWIND_DIR
+ LIBUNWIND_CFLAGS := -I$(LIBUNWIND_DIR)/include
+ LIBUNWIND_LDFLAGS := -L$(LIBUNWIND_DIR)/lib
+ endif
-FLAGS_UNWIND=$(LIBUNWIND_CFLAGS) $(CFLAGS) $(LIBUNWIND_LDFLAGS) $(LDFLAGS) $(EXTLIBS) $(LIBUNWIND_LIBS)
-ifneq ($(call try-cc,$(SOURCE_LIBUNWIND),$(FLAGS_UNWIND),libunwind),y)
- msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 0.99);
- NO_LIBUNWIND := 1
-endif # Libunwind support
-endif # NO_LIBUNWIND
+ ifneq ($(feature-libunwind), 1)
+ msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 1.1);
+ NO_LIBUNWIND := 1
+ else
+ ifneq ($(feature-libunwind-debug-frame), 1)
+ msg := $(warning No debug_frame support found in libunwind);
+ CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
+ endif
+ endif
+endif
ifndef NO_LIBUNWIND
- CFLAGS += -DLIBUNWIND_SUPPORT
+ CFLAGS += -DHAVE_LIBUNWIND_SUPPORT
EXTLIBS += $(LIBUNWIND_LIBS)
CFLAGS += $(LIBUNWIND_CFLAGS)
LDFLAGS += $(LIBUNWIND_LDFLAGS)
-endif # NO_LIBUNWIND
+endif
ifndef NO_LIBAUDIT
- FLAGS_LIBAUDIT = $(CFLAGS) $(LDFLAGS) -laudit
- ifneq ($(call try-cc,$(SOURCE_LIBAUDIT),$(FLAGS_LIBAUDIT),libaudit),y)
+ ifneq ($(feature-libaudit), 1)
msg := $(warning No libaudit.h found, disables 'trace' tool, please install audit-libs-devel or libaudit-dev);
NO_LIBAUDIT := 1
else
- CFLAGS += -DLIBAUDIT_SUPPORT
+ CFLAGS += -DHAVE_LIBAUDIT_SUPPORT
EXTLIBS += -laudit
endif
endif
@@ -251,30 +356,30 @@ ifdef NO_NEWT
endif
ifndef NO_SLANG
- FLAGS_SLANG=$(CFLAGS) $(LDFLAGS) $(EXTLIBS) -I/usr/include/slang -lslang
- ifneq ($(call try-cc,$(SOURCE_SLANG),$(FLAGS_SLANG),libslang),y)
+ ifneq ($(feature-libslang), 1)
msg := $(warning slang not found, disables TUI support. Please install slang-devel or libslang-dev);
NO_SLANG := 1
else
# Fedora has /usr/include/slang/slang.h, but ubuntu /usr/include/slang.h
CFLAGS += -I/usr/include/slang
- CFLAGS += -DSLANG_SUPPORT
+ CFLAGS += -DHAVE_SLANG_SUPPORT
EXTLIBS += -lslang
endif
endif
ifndef NO_GTK2
FLAGS_GTK2=$(CFLAGS) $(LDFLAGS) $(EXTLIBS) $(shell pkg-config --libs --cflags gtk+-2.0 2>/dev/null)
- ifneq ($(call try-cc,$(SOURCE_GTK2),$(FLAGS_GTK2),gtk2),y)
+ ifneq ($(feature-gtk2), 1)
msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev);
NO_GTK2 := 1
else
- ifeq ($(call try-cc,$(SOURCE_GTK2_INFOBAR),$(FLAGS_GTK2),-DHAVE_GTK_INFO_BAR),y)
- CFLAGS += -DHAVE_GTK_INFO_BAR
+ ifeq ($(feature-gtk2-infobar), 1)
+ GTK_CFLAGS := -DHAVE_GTK_INFO_BAR_SUPPORT
endif
- CFLAGS += -DGTK2_SUPPORT
- CFLAGS += $(shell pkg-config --cflags gtk+-2.0 2>/dev/null)
- EXTLIBS += $(shell pkg-config --libs gtk+-2.0 2>/dev/null)
+ CFLAGS += -DHAVE_GTK2_SUPPORT
+ GTK_CFLAGS += $(shell pkg-config --cflags gtk+-2.0 2>/dev/null)
+ GTK_LIBS := $(shell pkg-config --libs gtk+-2.0 2>/dev/null)
+ EXTLIBS += -ldl
endif
endif
@@ -290,7 +395,7 @@ else
PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
- ifneq ($(call try-cc,$(SOURCE_PERL_EMBED),$(FLAGS_PERL_EMBED),perl),y)
+ ifneq ($(feature-libperl), 1)
CFLAGS += -DNO_LIBPERL
NO_LIBPERL := 1
else
@@ -335,11 +440,11 @@ else
PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
- ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED),python),y)
+ ifneq ($(feature-libpython), 1)
$(call disable-python,Python.h (for Python 2.x))
else
- ifneq ($(call try-cc,$(SOURCE_PYTHON_VERSION),$(FLAGS_PYTHON_EMBED),python version),y)
+ ifneq ($(feature-libpython-version), 1)
$(warning Python 3 is not yet supported; please set)
$(warning PYTHON and/or PYTHON_CONFIG appropriately.)
$(warning If you also have Python 2 installed, then)
@@ -362,33 +467,30 @@ else
endif
endif
+ifeq ($(feature-libbfd), 1)
+ EXTLIBS += -lbfd
+endif
+
ifdef NO_DEMANGLE
CFLAGS += -DNO_DEMANGLE
else
- ifdef HAVE_CPLUS_DEMANGLE
+ ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
EXTLIBS += -liberty
- CFLAGS += -DHAVE_CPLUS_DEMANGLE
+ CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
else
- FLAGS_BFD=$(CFLAGS) $(LDFLAGS) $(EXTLIBS) -DPACKAGE='perf' -lbfd
- has_bfd := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD),libbfd)
- ifeq ($(has_bfd),y)
- EXTLIBS += -lbfd
- else
- FLAGS_BFD_IBERTY=$(FLAGS_BFD) -liberty
- has_bfd_iberty := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD_IBERTY),liberty)
- ifeq ($(has_bfd_iberty),y)
+ ifneq ($(feature-libbfd), 1)
+ $(feature_check,liberty)
+ ifeq ($(feature-liberty), 1)
EXTLIBS += -lbfd -liberty
else
- FLAGS_BFD_IBERTY_Z=$(FLAGS_BFD_IBERTY) -lz
- has_bfd_iberty_z := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD_IBERTY_Z),libz)
- ifeq ($(has_bfd_iberty_z),y)
+ $(feature_check,liberty-z)
+ ifeq ($(feature-liberty-z), 1)
EXTLIBS += -lbfd -liberty -lz
else
- FLAGS_CPLUS_DEMANGLE=$(CFLAGS) $(LDFLAGS) $(EXTLIBS) -liberty
- has_cplus_demangle := $(call try-cc,$(SOURCE_CPLUS_DEMANGLE),$(FLAGS_CPLUS_DEMANGLE),demangle)
- ifeq ($(has_cplus_demangle),y)
+ $(feature_check,cplus-demangle)
+ ifeq ($(feature-cplus-demangle), 1)
EXTLIBS += -liberty
- CFLAGS += -DHAVE_CPLUS_DEMANGLE
+ CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
else
msg := $(warning No bfd.h/libbfd found, install binutils-dev[el]/zlib-static to gain symbol demangling)
CFLAGS += -DNO_DEMANGLE
@@ -399,31 +501,28 @@ else
endif
endif
-ifndef NO_STRLCPY
- ifeq ($(call try-cc,$(SOURCE_STRLCPY),,-DHAVE_STRLCPY),y)
- CFLAGS += -DHAVE_STRLCPY
- endif
+ifneq ($(filter -lbfd,$(EXTLIBS)),)
+ CFLAGS += -DHAVE_LIBBFD_SUPPORT
endif
ifndef NO_ON_EXIT
- ifeq ($(call try-cc,$(SOURCE_ON_EXIT),,-DHAVE_ON_EXIT),y)
- CFLAGS += -DHAVE_ON_EXIT
+ ifeq ($(feature-on-exit), 1)
+ CFLAGS += -DHAVE_ON_EXIT_SUPPORT
endif
endif
ifndef NO_BACKTRACE
- ifeq ($(call try-cc,$(SOURCE_BACKTRACE),,-DBACKTRACE_SUPPORT),y)
- CFLAGS += -DBACKTRACE_SUPPORT
+ ifeq ($(feature-backtrace), 1)
+ CFLAGS += -DHAVE_BACKTRACE_SUPPORT
endif
endif
ifndef NO_LIBNUMA
- FLAGS_LIBNUMA = $(CFLAGS) $(LDFLAGS) -lnuma
- ifneq ($(call try-cc,$(SOURCE_LIBNUMA),$(FLAGS_LIBNUMA),libnuma),y)
+ ifeq ($(feature-libnuma), 0)
msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numa-libs-devel or libnuma-dev);
NO_LIBNUMA := 1
else
- CFLAGS += -DLIBNUMA_SUPPORT
+ CFLAGS += -DHAVE_LIBNUMA_SUPPORT
EXTLIBS += -lnuma
endif
endif
@@ -459,7 +558,12 @@ else
sysconfdir = $(prefix)/etc
ETC_PERFCONFIG = etc/perfconfig
endif
+ifeq ($(IS_X86_64),1)
+lib = lib64
+else
lib = lib
+endif
+libdir = $(prefix)/$(lib)
# Shell quote (do not use $(call) to accommodate ancient setups);
ETC_PERFCONFIG_SQ = $(subst ','\'',$(ETC_PERFCONFIG))
@@ -472,6 +576,7 @@ template_dir_SQ = $(subst ','\'',$(template_dir))
htmldir_SQ = $(subst ','\'',$(htmldir))
prefix_SQ = $(subst ','\'',$(prefix))
sysconfdir_SQ = $(subst ','\'',$(sysconfdir))
+libdir_SQ = $(subst ','\'',$(libdir))
ifneq ($(filter /%,$(firstword $(perfexecdir))),)
perfexec_instdir = $(perfexecdir)
diff --git a/tools/perf/config/feature-checks/Makefile b/tools/perf/config/feature-checks/Makefile
new file mode 100644
index 000000000000..abaf8f4ea93a
--- /dev/null
+++ b/tools/perf/config/feature-checks/Makefile
@@ -0,0 +1,148 @@
+
+FILES= \
+ test-all \
+ test-backtrace \
+ test-bionic \
+ test-dwarf \
+ test-fortify-source \
+ test-glibc \
+ test-gtk2 \
+ test-gtk2-infobar \
+ test-hello \
+ test-libaudit \
+ test-libbfd \
+ test-liberty \
+ test-liberty-z \
+ test-cplus-demangle \
+ test-libelf \
+ test-libelf-getphdrnum \
+ test-libelf-mmap \
+ test-libnuma \
+ test-libperl \
+ test-libpython \
+ test-libpython-version \
+ test-libslang \
+ test-libunwind \
+ test-libunwind-debug-frame \
+ test-on-exit \
+ test-stackprotector-all \
+ test-stackprotector
+
+CC := $(CC) -MD
+
+all: $(FILES)
+
+BUILD = $(CC) $(LDFLAGS) -o $(OUTPUT)$@ $@.c
+
+###############################
+
+test-all:
+ $(BUILD) -Werror -fstack-protector -fstack-protector-all -O2 -Werror -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lunwind -lunwind-x86_64 -lelf -laudit -I/usr/include/slang -lslang $(shell pkg-config --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl
+
+test-hello:
+ $(BUILD)
+
+test-stackprotector-all:
+ $(BUILD) -Werror -fstack-protector-all
+
+test-stackprotector:
+ $(BUILD) -Werror -fstack-protector -Wstack-protector
+
+test-fortify-source:
+ $(BUILD) -O2 -Werror -D_FORTIFY_SOURCE=2
+
+test-bionic:
+ $(BUILD)
+
+test-libelf:
+ $(BUILD) -lelf
+
+test-glibc:
+ $(BUILD)
+
+test-dwarf:
+ $(BUILD) -ldw
+
+test-libelf-mmap:
+ $(BUILD) -lelf
+
+test-libelf-getphdrnum:
+ $(BUILD) -lelf
+
+test-libnuma:
+ $(BUILD) -lnuma
+
+test-libunwind:
+ $(BUILD) -lunwind -lunwind-x86_64 -lelf
+
+test-libunwind-debug-frame:
+ $(BUILD) -lunwind -lunwind-x86_64 -lelf
+
+test-libaudit:
+ $(BUILD) -laudit
+
+test-libslang:
+ $(BUILD) -I/usr/include/slang -lslang
+
+test-gtk2:
+ $(BUILD) $(shell pkg-config --libs --cflags gtk+-2.0 2>/dev/null)
+
+test-gtk2-infobar:
+ $(BUILD) $(shell pkg-config --libs --cflags gtk+-2.0 2>/dev/null)
+
+grep-libs = $(filter -l%,$(1))
+strip-libs = $(filter-out -l%,$(1))
+
+PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null)
+PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS))
+PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
+PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
+FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
+
+test-libperl:
+ $(BUILD) $(FLAGS_PERL_EMBED)
+
+override PYTHON := python
+override PYTHON_CONFIG := python-config
+
+escape-for-shell-sq = $(subst ','\'',$(1))
+shell-sq = '$(escape-for-shell-sq)'
+
+PYTHON_CONFIG_SQ = $(call shell-sq,$(PYTHON_CONFIG))
+
+PYTHON_EMBED_LDOPTS = $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null)
+PYTHON_EMBED_LDFLAGS = $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
+PYTHON_EMBED_LIBADD = $(call grep-libs,$(PYTHON_EMBED_LDOPTS))
+PYTHON_EMBED_CCOPTS = $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
+FLAGS_PYTHON_EMBED = $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
+
+test-libpython:
+ $(BUILD) $(FLAGS_PYTHON_EMBED)
+
+test-libpython-version:
+ $(BUILD) $(FLAGS_PYTHON_EMBED)
+
+test-libbfd:
+ $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
+
+test-liberty:
+ $(CC) -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty
+
+test-liberty-z:
+ $(CC) -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty -lz
+
+test-cplus-demangle:
+ $(BUILD) -liberty
+
+test-on-exit:
+ $(BUILD)
+
+test-backtrace:
+ $(BUILD)
+
+-include *.d
+
+###############################
+
+clean:
+ rm -f $(FILES) *.d
diff --git a/tools/perf/config/feature-checks/test-all.c b/tools/perf/config/feature-checks/test-all.c
new file mode 100644
index 000000000000..726be480f600
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-all.c
@@ -0,0 +1,110 @@
+/*
+ * test-all.c: Try to build all the main testcases at once.
+ *
+ * A well-configured system will have all the prereqs installed, so we can speed
+ * up auto-detection on such systems.
+ */
+
+/*
+ * Quirk: Python and Perl headers cannot be in arbitrary places, so keep
+ * these 3 testcases at the top:
+ */
+#define main main_test_libpython
+# include "test-libpython.c"
+#undef main
+
+#define main main_test_libpython_version
+# include "test-libpython-version.c"
+#undef main
+
+#define main main_test_libperl
+# include "test-libperl.c"
+#undef main
+
+#define main main_test_hello
+# include "test-hello.c"
+#undef main
+
+#define main main_test_libelf
+# include "test-libelf.c"
+#undef main
+
+#define main main_test_libelf_mmap
+# include "test-libelf-mmap.c"
+#undef main
+
+#define main main_test_glibc
+# include "test-glibc.c"
+#undef main
+
+#define main main_test_dwarf
+# include "test-dwarf.c"
+#undef main
+
+#define main main_test_libelf_getphdrnum
+# include "test-libelf-getphdrnum.c"
+#undef main
+
+#define main main_test_libunwind
+# include "test-libunwind.c"
+#undef main
+
+#define main main_test_libunwind_debug_frame
+# include "test-libunwind-debug-frame.c"
+#undef main
+
+#define main main_test_libaudit
+# include "test-libaudit.c"
+#undef main
+
+#define main main_test_libslang
+# include "test-libslang.c"
+#undef main
+
+#define main main_test_gtk2
+# include "test-gtk2.c"
+#undef main
+
+#define main main_test_gtk2_infobar
+# include "test-gtk2-infobar.c"
+#undef main
+
+#define main main_test_libbfd
+# include "test-libbfd.c"
+#undef main
+
+#define main main_test_on_exit
+# include "test-on-exit.c"
+#undef main
+
+#define main main_test_backtrace
+# include "test-backtrace.c"
+#undef main
+
+#define main main_test_libnuma
+# include "test-libnuma.c"
+#undef main
+
+int main(int argc, char *argv[])
+{
+ main_test_libpython();
+ main_test_libpython_version();
+ main_test_libperl();
+ main_test_hello();
+ main_test_libelf();
+ main_test_libelf_mmap();
+ main_test_glibc();
+ main_test_dwarf();
+ main_test_libelf_getphdrnum();
+ main_test_libunwind();
+ main_test_libaudit();
+ main_test_libslang();
+ main_test_gtk2(argc, argv);
+ main_test_gtk2_infobar(argc, argv);
+ main_test_libbfd();
+ main_test_on_exit();
+ main_test_backtrace();
+ main_test_libnuma();
+
+ return 0;
+}
diff --git a/tools/perf/config/feature-checks/test-backtrace.c b/tools/perf/config/feature-checks/test-backtrace.c
new file mode 100644
index 000000000000..7124aa1dc8fb
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-backtrace.c
@@ -0,0 +1,13 @@
+#include <execinfo.h>
+#include <stdio.h>
+
+int main(void)
+{
+ void *backtrace_fns[10];
+ size_t entries;
+
+ entries = backtrace(backtrace_fns, 10);
+ backtrace_symbols_fd(backtrace_fns, entries, 1);
+
+ return 0;
+}
diff --git a/tools/perf/config/feature-checks/test-bionic.c b/tools/perf/config/feature-checks/test-bionic.c
new file mode 100644
index 000000000000..eac24e9513eb
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-bionic.c
@@ -0,0 +1,6 @@
+#include <android/api-level.h>
+
+int main(void)
+{
+ return __ANDROID_API__;
+}
diff --git a/tools/perf/config/feature-checks/test-cplus-demangle.c b/tools/perf/config/feature-checks/test-cplus-demangle.c
new file mode 100644
index 000000000000..610c686e0009
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-cplus-demangle.c
@@ -0,0 +1,14 @@
+extern int printf(const char *format, ...);
+extern char *cplus_demangle(const char *, int);
+
+int main(void)
+{
+ char symbol[4096] = "FieldName__9ClassNameFd";
+ char *tmp;
+
+ tmp = cplus_demangle(symbol, 0);
+
+ printf("demangled symbol: {%s}\n", tmp);
+
+ return 0;
+}
diff --git a/tools/perf/config/feature-checks/test-dwarf.c b/tools/perf/config/feature-checks/test-dwarf.c
new file mode 100644
index 000000000000..3fc1801ce4a9
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-dwarf.c
@@ -0,0 +1,10 @@
+#include <dwarf.h>
+#include <elfutils/libdw.h>
+#include <elfutils/version.h>
+
+int main(void)
+{
+ Dwarf *dbg = dwarf_begin(0, DWARF_C_READ);
+
+ return (long)dbg;
+}
diff --git a/tools/perf/config/feature-checks/test-fortify-source.c b/tools/perf/config/feature-checks/test-fortify-source.c
new file mode 100644
index 000000000000..c9f398d87868
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-fortify-source.c
@@ -0,0 +1,6 @@
+#include <stdio.h>
+
+int main(void)
+{
+ return puts("hi");
+}
diff --git a/tools/perf/config/feature-checks/test-glibc.c b/tools/perf/config/feature-checks/test-glibc.c
new file mode 100644
index 000000000000..b0820345cd98
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-glibc.c
@@ -0,0 +1,8 @@
+#include <gnu/libc-version.h>
+
+int main(void)
+{
+ const char *version = gnu_get_libc_version();
+
+ return (long)version;
+}
diff --git a/tools/perf/config/feature-checks/test-gtk2-infobar.c b/tools/perf/config/feature-checks/test-gtk2-infobar.c
new file mode 100644
index 000000000000..397b4646d066
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-gtk2-infobar.c
@@ -0,0 +1,11 @@
+#pragma GCC diagnostic ignored "-Wstrict-prototypes"
+#include <gtk/gtk.h>
+#pragma GCC diagnostic error "-Wstrict-prototypes"
+
+int main(int argc, char *argv[])
+{
+ gtk_init(&argc, &argv);
+ gtk_info_bar_new();
+
+ return 0;
+}
diff --git a/tools/perf/config/feature-checks/test-gtk2.c b/tools/perf/config/feature-checks/test-gtk2.c
new file mode 100644
index 000000000000..6bd80e509439
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-gtk2.c
@@ -0,0 +1,10 @@
+#pragma GCC diagnostic ignored "-Wstrict-prototypes"
+#include <gtk/gtk.h>
+#pragma GCC diagnostic error "-Wstrict-prototypes"
+
+int main(int argc, char *argv[])
+{
+ gtk_init(&argc, &argv);
+
+ return 0;
+}
diff --git a/tools/perf/config/feature-checks/test-hello.c b/tools/perf/config/feature-checks/test-hello.c
new file mode 100644
index 000000000000..c9f398d87868
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-hello.c
@@ -0,0 +1,6 @@
+#include <stdio.h>
+
+int main(void)
+{
+ return puts("hi");
+}
diff --git a/tools/perf/config/feature-checks/test-libaudit.c b/tools/perf/config/feature-checks/test-libaudit.c
new file mode 100644
index 000000000000..afc019f08641
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-libaudit.c
@@ -0,0 +1,10 @@
+#include <libaudit.h>
+
+extern int printf(const char *format, ...);
+
+int main(void)
+{
+ printf("error message: %s\n", audit_errno_to_name(0));
+
+ return audit_open();
+}
diff --git a/tools/perf/config/feature-checks/test-libbfd.c b/tools/perf/config/feature-checks/test-libbfd.c
new file mode 100644
index 000000000000..24059907e990
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-libbfd.c
@@ -0,0 +1,15 @@
+#include <bfd.h>
+
+extern int printf(const char *format, ...);
+
+int main(void)
+{
+ char symbol[4096] = "FieldName__9ClassNameFd";
+ char *tmp;
+
+ tmp = bfd_demangle(0, symbol, 0);
+
+ printf("demangled symbol: {%s}\n", tmp);
+
+ return 0;
+}
diff --git a/tools/perf/config/feature-checks/test-libelf-getphdrnum.c b/tools/perf/config/feature-checks/test-libelf-getphdrnum.c
new file mode 100644
index 000000000000..d710459306c3
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-libelf-getphdrnum.c
@@ -0,0 +1,8 @@
+#include <libelf.h>
+
+int main(void)
+{
+ size_t dst;
+
+ return elf_getphdrnum(0, &dst);
+}
diff --git a/tools/perf/config/feature-checks/test-libelf-mmap.c b/tools/perf/config/feature-checks/test-libelf-mmap.c
new file mode 100644
index 000000000000..564427d7ef18
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-libelf-mmap.c
@@ -0,0 +1,8 @@
+#include <libelf.h>
+
+int main(void)
+{
+ Elf *elf = elf_begin(0, ELF_C_READ_MMAP, 0);
+
+ return (long)elf;
+}
diff --git a/tools/perf/config/feature-checks/test-libelf.c b/tools/perf/config/feature-checks/test-libelf.c
new file mode 100644
index 000000000000..08db322d8957
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-libelf.c
@@ -0,0 +1,8 @@
+#include <libelf.h>
+
+int main(void)
+{
+ Elf *elf = elf_begin(0, ELF_C_READ, 0);
+
+ return (long)elf;
+}
diff --git a/tools/perf/config/feature-checks/test-libnuma.c b/tools/perf/config/feature-checks/test-libnuma.c
new file mode 100644
index 000000000000..4763d9cd587d
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-libnuma.c
@@ -0,0 +1,9 @@
+#include <numa.h>
+#include <numaif.h>
+
+int main(void)
+{
+ numa_available();
+
+ return 0;
+}
diff --git a/tools/perf/config/feature-checks/test-libperl.c b/tools/perf/config/feature-checks/test-libperl.c
new file mode 100644
index 000000000000..8871f6a0fdb4
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-libperl.c
@@ -0,0 +1,9 @@
+#include <EXTERN.h>
+#include <perl.h>
+
+int main(void)
+{
+ perl_alloc();
+
+ return 0;
+}
diff --git a/tools/perf/config/feature-checks/test-libpython-version.c b/tools/perf/config/feature-checks/test-libpython-version.c
new file mode 100644
index 000000000000..facea122d812
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-libpython-version.c
@@ -0,0 +1,10 @@
+#include <Python.h>
+
+#if PY_VERSION_HEX >= 0x03000000
+ #error
+#endif
+
+int main(void)
+{
+ return 0;
+}
diff --git a/tools/perf/config/feature-checks/test-libpython.c b/tools/perf/config/feature-checks/test-libpython.c
new file mode 100644
index 000000000000..b24b28ad6324
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-libpython.c
@@ -0,0 +1,8 @@
+#include <Python.h>
+
+int main(void)
+{
+ Py_Initialize();
+
+ return 0;
+}
diff --git a/tools/perf/config/feature-checks/test-libslang.c b/tools/perf/config/feature-checks/test-libslang.c
new file mode 100644
index 000000000000..22ff22ed94d1
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-libslang.c
@@ -0,0 +1,6 @@
+#include <slang.h>
+
+int main(void)
+{
+ return SLsmg_init_smg();
+}
diff --git a/tools/perf/config/feature-checks/test-libunwind-debug-frame.c b/tools/perf/config/feature-checks/test-libunwind-debug-frame.c
new file mode 100644
index 000000000000..0ef8087a104a
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-libunwind-debug-frame.c
@@ -0,0 +1,16 @@
+#include <libunwind.h>
+#include <stdlib.h>
+
+extern int
+UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
+ unw_word_t ip, unw_word_t segbase,
+ const char *obj_name, unw_word_t start,
+ unw_word_t end);
+
+#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame)
+
+int main(void)
+{
+ dwarf_find_debug_frame(0, NULL, 0, 0, NULL, 0, 0);
+ return 0;
+}
diff --git a/tools/perf/config/feature-checks/test-libunwind.c b/tools/perf/config/feature-checks/test-libunwind.c
new file mode 100644
index 000000000000..43b9369bcab7
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-libunwind.c
@@ -0,0 +1,27 @@
+#include <libunwind.h>
+#include <stdlib.h>
+
+extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
+ unw_word_t ip,
+ unw_dyn_info_t *di,
+ unw_proc_info_t *pi,
+ int need_unwind_info, void *arg);
+
+
+#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
+
+static unw_accessors_t accessors;
+
+int main(void)
+{
+ unw_addr_space_t addr_space;
+
+ addr_space = unw_create_addr_space(&accessors, 0);
+ if (addr_space)
+ return 0;
+
+ unw_init_remote(NULL, addr_space, NULL);
+ dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL);
+
+ return 0;
+}
diff --git a/tools/perf/config/feature-checks/test-on-exit.c b/tools/perf/config/feature-checks/test-on-exit.c
new file mode 100644
index 000000000000..8e88b16e6ded
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-on-exit.c
@@ -0,0 +1,16 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+static void exit_fn(int status, void *__data)
+{
+ printf("exit status: %d, data: %d\n", status, *(int *)__data);
+}
+
+static int data = 123;
+
+int main(void)
+{
+ on_exit(exit_fn, &data);
+
+ return 321;
+}
diff --git a/tools/perf/config/feature-checks/test-stackprotector-all.c b/tools/perf/config/feature-checks/test-stackprotector-all.c
new file mode 100644
index 000000000000..c9f398d87868
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-stackprotector-all.c
@@ -0,0 +1,6 @@
+#include <stdio.h>
+
+int main(void)
+{
+ return puts("hi");
+}
diff --git a/tools/perf/config/feature-checks/test-stackprotector.c b/tools/perf/config/feature-checks/test-stackprotector.c
new file mode 100644
index 000000000000..c9f398d87868
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-stackprotector.c
@@ -0,0 +1,6 @@
+#include <stdio.h>
+
+int main(void)
+{
+ return puts("hi");
+}
diff --git a/tools/perf/config/feature-checks/test-volatile-register-var.c b/tools/perf/config/feature-checks/test-volatile-register-var.c
new file mode 100644
index 000000000000..c9f398d87868
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-volatile-register-var.c
@@ -0,0 +1,6 @@
+#include <stdio.h>
+
+int main(void)
+{
+ return puts("hi");
+}
diff --git a/tools/perf/config/feature-tests.mak b/tools/perf/config/feature-tests.mak
deleted file mode 100644
index f79305739ecc..000000000000
--- a/tools/perf/config/feature-tests.mak
+++ /dev/null
@@ -1,246 +0,0 @@
-define SOURCE_HELLO
-#include <stdio.h>
-int main(void)
-{
- return puts(\"hi\");
-}
-endef
-
-ifndef NO_DWARF
-define SOURCE_DWARF
-#include <dwarf.h>
-#include <elfutils/libdw.h>
-#include <elfutils/version.h>
-#ifndef _ELFUTILS_PREREQ
-#error
-#endif
-
-int main(void)
-{
- Dwarf *dbg = dwarf_begin(0, DWARF_C_READ);
- return (long)dbg;
-}
-endef
-endif
-
-define SOURCE_LIBELF
-#include <libelf.h>
-
-int main(void)
-{
- Elf *elf = elf_begin(0, ELF_C_READ, 0);
- return (long)elf;
-}
-endef
-
-define SOURCE_GLIBC
-#include <gnu/libc-version.h>
-
-int main(void)
-{
- const char *version = gnu_get_libc_version();
- return (long)version;
-}
-endef
-
-define SOURCE_BIONIC
-#include <android/api-level.h>
-
-int main(void)
-{
- return __ANDROID_API__;
-}
-endef
-
-define SOURCE_ELF_MMAP
-#include <libelf.h>
-int main(void)
-{
- Elf *elf = elf_begin(0, ELF_C_READ_MMAP, 0);
- return (long)elf;
-}
-endef
-
-define SOURCE_ELF_GETPHDRNUM
-#include <libelf.h>
-int main(void)
-{
- size_t dst;
- return elf_getphdrnum(0, &dst);
-}
-endef
-
-ifndef NO_SLANG
-define SOURCE_SLANG
-#include <slang.h>
-
-int main(void)
-{
- return SLsmg_init_smg();
-}
-endef
-endif
-
-ifndef NO_GTK2
-define SOURCE_GTK2
-#pragma GCC diagnostic ignored \"-Wstrict-prototypes\"
-#include <gtk/gtk.h>
-#pragma GCC diagnostic error \"-Wstrict-prototypes\"
-
-int main(int argc, char *argv[])
-{
- gtk_init(&argc, &argv);
-
- return 0;
-}
-endef
-
-define SOURCE_GTK2_INFOBAR
-#pragma GCC diagnostic ignored \"-Wstrict-prototypes\"
-#include <gtk/gtk.h>
-#pragma GCC diagnostic error \"-Wstrict-prototypes\"
-
-int main(void)
-{
- gtk_info_bar_new();
-
- return 0;
-}
-endef
-endif
-
-ifndef NO_LIBPERL
-define SOURCE_PERL_EMBED
-#include <EXTERN.h>
-#include <perl.h>
-
-int main(void)
-{
-perl_alloc();
-return 0;
-}
-endef
-endif
-
-ifndef NO_LIBPYTHON
-define SOURCE_PYTHON_VERSION
-#include <Python.h>
-#if PY_VERSION_HEX >= 0x03000000
- #error
-#endif
-int main(void)
-{
- return 0;
-}
-endef
-define SOURCE_PYTHON_EMBED
-#include <Python.h>
-int main(void)
-{
- Py_Initialize();
- return 0;
-}
-endef
-endif
-
-define SOURCE_BFD
-#include <bfd.h>
-
-int main(void)
-{
- bfd_demangle(0, 0, 0);
- return 0;
-}
-endef
-
-define SOURCE_CPLUS_DEMANGLE
-extern char *cplus_demangle(const char *, int);
-
-int main(void)
-{
- cplus_demangle(0, 0);
- return 0;
-}
-endef
-
-define SOURCE_STRLCPY
-#include <stdlib.h>
-extern size_t strlcpy(char *dest, const char *src, size_t size);
-
-int main(void)
-{
- strlcpy(NULL, NULL, 0);
- return 0;
-}
-endef
-
-ifndef NO_LIBUNWIND
-define SOURCE_LIBUNWIND
-#include <libunwind.h>
-#include <stdlib.h>
-
-extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
- unw_word_t ip,
- unw_dyn_info_t *di,
- unw_proc_info_t *pi,
- int need_unwind_info, void *arg);
-
-
-#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
-
-int main(void)
-{
- unw_addr_space_t addr_space;
- addr_space = unw_create_addr_space(NULL, 0);
- unw_init_remote(NULL, addr_space, NULL);
- dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL);
- return 0;
-}
-endef
-endif
-
-ifndef NO_BACKTRACE
-define SOURCE_BACKTRACE
-#include <execinfo.h>
-#include <stdio.h>
-
-int main(void)
-{
- backtrace(NULL, 0);
- backtrace_symbols(NULL, 0);
- return 0;
-}
-endef
-endif
-
-ifndef NO_LIBAUDIT
-define SOURCE_LIBAUDIT
-#include <libaudit.h>
-
-int main(void)
-{
- printf(\"error message: %s\", audit_errno_to_name(0));
- return audit_open();
-}
-endef
-endif
-
-define SOURCE_ON_EXIT
-#include <stdio.h>
-
-int main(void)
-{
- return on_exit(NULL, NULL);
-}
-endef
-
-define SOURCE_LIBNUMA
-#include <numa.h>
-#include <numaif.h>
-
-int main(void)
-{
- numa_available();
- return 0;
-}
-endef
diff --git a/tools/perf/config/utilities.mak b/tools/perf/config/utilities.mak
index 94d2d4f9c35d..f168debc5be2 100644
--- a/tools/perf/config/utilities.mak
+++ b/tools/perf/config/utilities.mak
@@ -179,16 +179,9 @@ _ge_attempt = $(if $(get-executable),$(get-executable),$(_gea_warn)$(call _gea_e
_gea_warn = $(warning The path '$(1)' is not executable.)
_gea_err = $(if $(1),$(error Please set '$(1)' appropriately))
-# try-cc
-# Usage: option = $(call try-cc, source-to-build, cc-options, msg)
-ifneq ($(V),1)
-TRY_CC_OUTPUT= > /dev/null 2>&1
+ifneq ($(findstring $(MAKEFLAGS),s),s)
+ ifneq ($(V),1)
+ QUIET_CLEAN = @printf ' CLEAN %s\n' $1;
+ QUIET_INSTALL = @printf ' INSTALL %s\n' $1;
+ endif
endif
-TRY_CC_MSG=echo " CHK $(3)" 1>&2;
-
-try-cc = $(shell sh -c \
- 'TMP="$(OUTPUT)$(TMPOUT).$$$$"; \
- $(TRY_CC_MSG) \
- echo "$(1)" | \
- $(CC) -x c - $(2) -o "$$TMP" $(TRY_CC_OUTPUT) && echo y; \
- rm -f "$$TMP"')
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 85e1aed95204..8b38b4e80ec2 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -49,14 +49,14 @@ static struct cmd_struct commands[] = {
{ "version", cmd_version, 0 },
{ "script", cmd_script, 0 },
{ "sched", cmd_sched, 0 },
-#ifdef LIBELF_SUPPORT
+#ifdef HAVE_LIBELF_SUPPORT
{ "probe", cmd_probe, 0 },
#endif
{ "kmem", cmd_kmem, 0 },
{ "lock", cmd_lock, 0 },
{ "kvm", cmd_kvm, 0 },
{ "test", cmd_test, 0 },
-#ifdef LIBAUDIT_SUPPORT
+#ifdef HAVE_LIBAUDIT_SUPPORT
{ "trace", cmd_trace, 0 },
#endif
{ "inject", cmd_inject, 0 },
@@ -456,6 +456,7 @@ int main(int argc, const char **argv)
{
const char *cmd;
+ /* The page_size is placed in util object. */
page_size = sysconf(_SC_PAGE_SIZE);
cmd = perf_extract_argv0_path(argv[0]);
@@ -480,7 +481,14 @@ int main(int argc, const char **argv)
fprintf(stderr, "cannot handle %s internally", cmd);
goto out;
}
-
+#ifdef HAVE_LIBAUDIT_SUPPORT
+ if (!prefixcmp(cmd, "trace")) {
+ set_buildid_dir();
+ setup_path();
+ argv[0] = "trace";
+ return cmd_trace(argc, argv, NULL);
+ }
+#endif
/* Look for flags.. */
argv++;
argc--;
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index cf20187eee0a..f61c230beec4 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -182,7 +182,9 @@ struct ip_callchain {
struct branch_flags {
u64 mispred:1;
u64 predicted:1;
- u64 reserved:62;
+ u64 in_tx:1;
+ u64 abort:1;
+ u64 reserved:60;
};
struct branch_entry {
@@ -218,7 +220,6 @@ struct perf_record_opts {
bool no_delay;
bool no_inherit;
bool no_samples;
- bool pipe_output;
bool raw_samples;
bool sample_address;
bool sample_weight;
@@ -231,6 +232,7 @@ struct perf_record_opts {
u64 default_interval;
u64 user_interval;
u16 stack_dump_size;
+ bool sample_transaction;
};
#endif
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 6fb781d5586c..e3fedfa2906e 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -290,6 +290,7 @@ static int process_events(struct machine *machine, struct perf_evlist *evlist,
for (i = 0; i < evlist->nr_mmaps; i++) {
while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
ret = process_event(machine, evlist, event, state);
+ perf_evlist__mmap_consume(evlist, i);
if (ret < 0)
return ret;
}
diff --git a/tools/perf/tests/dso-data.c b/tools/perf/tests/dso-data.c
index dffe0551acaa..9cc81a3eb9b4 100644
--- a/tools/perf/tests/dso-data.c
+++ b/tools/perf/tests/dso-data.c
@@ -35,6 +35,7 @@ static char *test_file(int size)
if (size != write(fd, buf, size))
templ = NULL;
+ free(buf);
close(fd);
return templ;
}
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
index 4228ffc0d968..b51abcb2c243 100644
--- a/tools/perf/tests/hists_link.c
+++ b/tools/perf/tests/hists_link.c
@@ -222,7 +222,8 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
&sample) < 0)
goto out;
- he = __hists__add_entry(&evsel->hists, &al, NULL, 1, 1);
+ he = __hists__add_entry(&evsel->hists, &al, NULL,
+ 1, 1, 0);
if (he == NULL)
goto out;
@@ -244,7 +245,8 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
&sample) < 0)
goto out;
- he = __hists__add_entry(&evsel->hists, &al, NULL, 1, 1);
+ he = __hists__add_entry(&evsel->hists, &al, NULL, 1, 1,
+ 0);
if (he == NULL)
goto out;
@@ -465,7 +467,7 @@ int test__hists_link(void)
goto out;
list_for_each_entry(evsel, &evlist->entries, node) {
- hists__collapse_resort(&evsel->hists);
+ hists__collapse_resort(&evsel->hists, NULL);
if (verbose > 2)
print_hists(&evsel->hists);
diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c
index d444ea2c47d9..376c35608534 100644
--- a/tools/perf/tests/keep-tracking.c
+++ b/tools/perf/tests/keep-tracking.c
@@ -36,6 +36,7 @@ static int find_comm(struct perf_evlist *evlist, const char *comm)
(pid_t)event->comm.tid == getpid() &&
strcmp(event->comm.comm, comm) == 0)
found += 1;
+ perf_evlist__mmap_consume(evlist, i);
}
}
return found;
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index c4185b9aeb80..a7232c204eb9 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -122,6 +122,7 @@ int test__basic_mmap(void)
goto out_munmap;
}
nr_events[evsel->idx]++;
+ perf_evlist__mmap_consume(evlist, 0);
}
err = 0;
diff --git a/tools/perf/tests/open-syscall-tp-fields.c b/tools/perf/tests/open-syscall-tp-fields.c
index fc5b9fca8b47..524b221b829b 100644
--- a/tools/perf/tests/open-syscall-tp-fields.c
+++ b/tools/perf/tests/open-syscall-tp-fields.c
@@ -77,8 +77,10 @@ int test__syscall_open_tp_fields(void)
++nr_events;
- if (type != PERF_RECORD_SAMPLE)
+ if (type != PERF_RECORD_SAMPLE) {
+ perf_evlist__mmap_consume(evlist, i);
continue;
+ }
err = perf_evsel__parse_sample(evsel, event, &sample);
if (err) {
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index b8a7056519ac..93a62b06c3af 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -45,7 +45,7 @@ int test__PERF_RECORD(void)
};
cpu_set_t cpu_mask;
size_t cpu_mask_size = sizeof(cpu_mask);
- struct perf_evlist *evlist = perf_evlist__new();
+ struct perf_evlist *evlist = perf_evlist__new_default();
struct perf_evsel *evsel;
struct perf_sample sample;
const char *cmd = "sleep";
@@ -66,16 +66,6 @@ int test__PERF_RECORD(void)
}
/*
- * We need at least one evsel in the evlist, use the default
- * one: "cycles".
- */
- err = perf_evlist__add_default(evlist);
- if (err < 0) {
- pr_debug("Not enough memory to create evsel\n");
- goto out_delete_evlist;
- }
-
- /*
* Create maps of threads and cpus to monitor. In this case
* we start with all threads and cpus (-1, -1) but then in
* perf_evlist__prepare_workload we'll fill in the only thread
@@ -263,6 +253,8 @@ int test__PERF_RECORD(void)
type);
++errs;
}
+
+ perf_evlist__mmap_consume(evlist, i);
}
}
diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c
index 0ab61b1f408e..4ca1b938f6a6 100644
--- a/tools/perf/tests/perf-time-to-tsc.c
+++ b/tools/perf/tests/perf-time-to-tsc.c
@@ -122,7 +122,7 @@ int test__perf_time_to_tsc(void)
if (event->header.type != PERF_RECORD_COMM ||
(pid_t)event->comm.pid != getpid() ||
(pid_t)event->comm.tid != getpid())
- continue;
+ goto next_event;
if (strcmp(event->comm.comm, comm1) == 0) {
CHECK__(perf_evsel__parse_sample(evsel, event,
@@ -134,6 +134,8 @@ int test__perf_time_to_tsc(void)
&sample));
comm2_time = sample.time;
}
+next_event:
+ perf_evlist__mmap_consume(evlist, i);
}
}
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index 77f598dbd97a..61c9da2eb3a9 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -275,8 +275,8 @@ int test__sample_parsing(void)
* Fail the test if it has not been updated when new sample format bits
* were added.
*/
- if (PERF_SAMPLE_MAX > PERF_SAMPLE_IDENTIFIER << 1) {
- pr_debug("sample format has changed - test needs updating\n");
+ if (PERF_SAMPLE_MAX > PERF_SAMPLE_TRANSACTION << 1) {
+ pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
return -1;
}
diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c
index 2e41e2d32ccc..6e2b44ec0749 100644
--- a/tools/perf/tests/sw-clock.c
+++ b/tools/perf/tests/sw-clock.c
@@ -78,7 +78,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
struct perf_sample sample;
if (event->header.type != PERF_RECORD_SAMPLE)
- continue;
+ goto next_event;
err = perf_evlist__parse_sample(evlist, event, &sample);
if (err < 0) {
@@ -88,6 +88,8 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
total_periods += sample.period;
nr_samples++;
+next_event:
+ perf_evlist__mmap_consume(evlist, 0);
}
if ((u64) nr_samples == total_periods) {
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index 28fe5894b061..c33d95f9559a 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -37,20 +37,11 @@ int test__task_exit(void)
signal(SIGCHLD, sig_handler);
signal(SIGUSR1, sig_handler);
- evlist = perf_evlist__new();
+ evlist = perf_evlist__new_default();
if (evlist == NULL) {
- pr_debug("perf_evlist__new\n");
+ pr_debug("perf_evlist__new_default\n");
return -1;
}
- /*
- * We need at least one evsel in the evlist, use the default
- * one: "cycles".
- */
- err = perf_evlist__add_default(evlist);
- if (err < 0) {
- pr_debug("Not enough memory to create evsel\n");
- goto out_free_evlist;
- }
/*
* Create maps of threads and cpus to monitor. In this case
@@ -96,10 +87,10 @@ int test__task_exit(void)
retry:
while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
- if (event->header.type != PERF_RECORD_EXIT)
- continue;
+ if (event->header.type == PERF_RECORD_EXIT)
+ nr_exit++;
- nr_exit++;
+ perf_evlist__mmap_consume(evlist, 0);
}
if (!exited || !nr_exit) {
@@ -117,7 +108,6 @@ out_close_evlist:
perf_evlist__close(evlist);
out_delete_maps:
perf_evlist__delete_maps(evlist);
-out_free_evlist:
perf_evlist__delete(evlist);
return err;
}
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 08545ae46992..f0697a3aede0 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -442,35 +442,37 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
{
struct map_symbol *ms = browser->b.priv;
struct disasm_line *dl = browser->selection;
- struct symbol *sym = ms->sym;
struct annotation *notes;
- struct symbol *target;
- u64 ip;
+ struct addr_map_symbol target = {
+ .map = ms->map,
+ .addr = map__objdump_2mem(ms->map, dl->ops.target.addr),
+ };
char title[SYM_TITLE_MAX_SIZE];
if (!ins__is_call(dl->ins))
return false;
- ip = ms->map->map_ip(ms->map, dl->ops.target.addr);
- target = map__find_symbol(ms->map, ip, NULL);
- if (target == NULL) {
+ if (map_groups__find_ams(&target, NULL) ||
+ map__rip_2objdump(target.map, target.map->map_ip(target.map,
+ target.addr)) !=
+ dl->ops.target.addr) {
ui_helpline__puts("The called function was not found.");
return true;
}
- notes = symbol__annotation(target);
+ notes = symbol__annotation(target.sym);
pthread_mutex_lock(&notes->lock);
- if (notes->src == NULL && symbol__alloc_hist(target) < 0) {
+ if (notes->src == NULL && symbol__alloc_hist(target.sym) < 0) {
pthread_mutex_unlock(&notes->lock);
ui__warning("Not enough memory for annotating '%s' symbol!\n",
- target->name);
+ target.sym->name);
return true;
}
pthread_mutex_unlock(&notes->lock);
- symbol__tui_annotate(target, ms->map, evsel, hbt);
- sym_title(sym, ms->map, title, sizeof(title));
+ symbol__tui_annotate(target.sym, target.map, evsel, hbt);
+ sym_title(ms->sym, ms->map, title, sizeof(title));
ui_browser__show_title(&browser->b, title);
return true;
}
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
index f538794615db..9c7ff8d31b27 100644
--- a/tools/perf/ui/gtk/annotate.c
+++ b/tools/perf/ui/gtk/annotate.c
@@ -154,9 +154,9 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym,
return 0;
}
-int symbol__gtk_annotate(struct symbol *sym, struct map *map,
- struct perf_evsel *evsel,
- struct hist_browser_timer *hbt)
+static int symbol__gtk_annotate(struct symbol *sym, struct map *map,
+ struct perf_evsel *evsel,
+ struct hist_browser_timer *hbt)
{
GtkWidget *window;
GtkWidget *notebook;
@@ -226,6 +226,13 @@ int symbol__gtk_annotate(struct symbol *sym, struct map *map,
return 0;
}
+int hist_entry__gtk_annotate(struct hist_entry *he,
+ struct perf_evsel *evsel,
+ struct hist_browser_timer *hbt)
+{
+ return symbol__gtk_annotate(he->ms.sym, he->ms.map, evsel, hbt);
+}
+
void perf_gtk__show_annotations(void)
{
GtkWidget *window;
diff --git a/tools/perf/ui/gtk/browser.c b/tools/perf/ui/gtk/browser.c
index c95012cdb438..c24d91221290 100644
--- a/tools/perf/ui/gtk/browser.c
+++ b/tools/perf/ui/gtk/browser.c
@@ -43,7 +43,7 @@ const char *perf_gtk__get_percent_color(double percent)
return NULL;
}
-#ifdef HAVE_GTK_INFO_BAR
+#ifdef HAVE_GTK_INFO_BAR_SUPPORT
GtkWidget *perf_gtk__setup_info_bar(void)
{
GtkWidget *info_bar;
diff --git a/tools/perf/ui/gtk/gtk.h b/tools/perf/ui/gtk/gtk.h
index 3d96785ef155..0a9173ff9a61 100644
--- a/tools/perf/ui/gtk/gtk.h
+++ b/tools/perf/ui/gtk/gtk.h
@@ -12,7 +12,7 @@ struct perf_gtk_context {
GtkWidget *main_window;
GtkWidget *notebook;
-#ifdef HAVE_GTK_INFO_BAR
+#ifdef HAVE_GTK_INFO_BAR_SUPPORT
GtkWidget *info_bar;
GtkWidget *message_label;
#endif
@@ -20,6 +20,9 @@ struct perf_gtk_context {
guint statbar_ctx_id;
};
+int perf_gtk__init(void);
+void perf_gtk__exit(bool wait_for_ok);
+
extern struct perf_gtk_context *pgctx;
static inline bool perf_gtk__is_active_context(struct perf_gtk_context *ctx)
@@ -31,7 +34,7 @@ struct perf_gtk_context *perf_gtk__activate_context(GtkWidget *window);
int perf_gtk__deactivate_context(struct perf_gtk_context **ctx);
void perf_gtk__init_helpline(void);
-void perf_gtk__init_progress(void);
+void gtk_ui_progress__init(void);
void perf_gtk__init_hpp(void);
void perf_gtk__signal(int sig);
@@ -39,7 +42,7 @@ void perf_gtk__resize_window(GtkWidget *window);
const char *perf_gtk__get_percent_color(double percent);
GtkWidget *perf_gtk__setup_statusbar(void);
-#ifdef HAVE_GTK_INFO_BAR
+#ifdef HAVE_GTK_INFO_BAR_SUPPORT
GtkWidget *perf_gtk__setup_info_bar(void);
#else
static inline GtkWidget *perf_gtk__setup_info_bar(void)
@@ -48,4 +51,17 @@ static inline GtkWidget *perf_gtk__setup_info_bar(void)
}
#endif
+struct perf_evsel;
+struct perf_evlist;
+struct hist_entry;
+struct hist_browser_timer;
+
+int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, const char *help,
+ struct hist_browser_timer *hbt,
+ float min_pcnt);
+int hist_entry__gtk_annotate(struct hist_entry *he,
+ struct perf_evsel *evsel,
+ struct hist_browser_timer *hbt);
+void perf_gtk__show_annotations(void);
+
#endif /* _PERF_GTK_H_ */
diff --git a/tools/perf/ui/gtk/progress.c b/tools/perf/ui/gtk/progress.c
index 482bcf3df9b7..b656655fbc39 100644
--- a/tools/perf/ui/gtk/progress.c
+++ b/tools/perf/ui/gtk/progress.c
@@ -7,14 +7,14 @@
static GtkWidget *dialog;
static GtkWidget *progress;
-static void gtk_progress_update(u64 curr, u64 total, const char *title)
+static void gtk_ui_progress__update(struct ui_progress *p)
{
- double fraction = total ? 1.0 * curr / total : 0.0;
+ double fraction = p->total ? 1.0 * p->curr / p->total : 0.0;
char buf[1024];
if (dialog == NULL) {
GtkWidget *vbox = gtk_vbox_new(TRUE, 5);
- GtkWidget *label = gtk_label_new(title);
+ GtkWidget *label = gtk_label_new(p->title);
dialog = gtk_window_new(GTK_WINDOW_TOPLEVEL);
progress = gtk_progress_bar_new();
@@ -32,7 +32,7 @@ static void gtk_progress_update(u64 curr, u64 total, const char *title)
}
gtk_progress_bar_set_fraction(GTK_PROGRESS_BAR(progress), fraction);
- snprintf(buf, sizeof(buf), "%"PRIu64" / %"PRIu64, curr, total);
+ snprintf(buf, sizeof(buf), "%"PRIu64" / %"PRIu64, p->curr, p->total);
gtk_progress_bar_set_text(GTK_PROGRESS_BAR(progress), buf);
/* we didn't call gtk_main yet, so do it manually */
@@ -40,7 +40,7 @@ static void gtk_progress_update(u64 curr, u64 total, const char *title)
gtk_main_iteration();
}
-static void gtk_progress_finish(void)
+static void gtk_ui_progress__finish(void)
{
/* this will also destroy all of its children */
gtk_widget_destroy(dialog);
@@ -48,12 +48,12 @@ static void gtk_progress_finish(void)
dialog = NULL;
}
-static struct ui_progress gtk_progress_fns = {
- .update = gtk_progress_update,
- .finish = gtk_progress_finish,
+static struct ui_progress_ops gtk_ui_progress__ops = {
+ .update = gtk_ui_progress__update,
+ .finish = gtk_ui_progress__finish,
};
-void perf_gtk__init_progress(void)
+void gtk_ui_progress__init(void)
{
- progress_fns = &gtk_progress_fns;
+ ui_progress__ops = &gtk_ui_progress__ops;
}
diff --git a/tools/perf/ui/gtk/setup.c b/tools/perf/ui/gtk/setup.c
index 6c2dd2e423f3..1d57676f8212 100644
--- a/tools/perf/ui/gtk/setup.c
+++ b/tools/perf/ui/gtk/setup.c
@@ -8,7 +8,7 @@ int perf_gtk__init(void)
{
perf_error__register(&perf_gtk_eops);
perf_gtk__init_helpline();
- perf_gtk__init_progress();
+ gtk_ui_progress__init();
perf_gtk__init_hpp();
return gtk_init_check(NULL, NULL) ? 0 : -1;
diff --git a/tools/perf/ui/gtk/util.c b/tools/perf/ui/gtk/util.c
index c06942a41c78..696c1fbe4248 100644
--- a/tools/perf/ui/gtk/util.c
+++ b/tools/perf/ui/gtk/util.c
@@ -53,7 +53,7 @@ static int perf_gtk__error(const char *format, va_list args)
return 0;
}
-#ifdef HAVE_GTK_INFO_BAR
+#ifdef HAVE_GTK_INFO_BAR_SUPPORT
static int perf_gtk__warning_info_bar(const char *format, va_list args)
{
char *msg;
@@ -105,7 +105,7 @@ static int perf_gtk__warning_statusbar(const char *format, va_list args)
struct perf_error_ops perf_gtk_eops = {
.error = perf_gtk__error,
-#ifdef HAVE_GTK_INFO_BAR
+#ifdef HAVE_GTK_INFO_BAR_SUPPORT
.warning = perf_gtk__warning_info_bar,
#else
.warning = perf_gtk__warning_statusbar,
diff --git a/tools/perf/ui/progress.c b/tools/perf/ui/progress.c
index 3ec695607a4d..a0f24c7115c5 100644
--- a/tools/perf/ui/progress.c
+++ b/tools/perf/ui/progress.c
@@ -1,26 +1,38 @@
#include "../cache.h"
#include "progress.h"
-static void nop_progress_update(u64 curr __maybe_unused,
- u64 total __maybe_unused,
- const char *title __maybe_unused)
+static void null_progress__update(struct ui_progress *p __maybe_unused)
{
}
-static struct ui_progress default_progress_fns =
+static struct ui_progress_ops null_progress__ops =
{
- .update = nop_progress_update,
+ .update = null_progress__update,
};
-struct ui_progress *progress_fns = &default_progress_fns;
+struct ui_progress_ops *ui_progress__ops = &null_progress__ops;
-void ui_progress__update(u64 curr, u64 total, const char *title)
+void ui_progress__update(struct ui_progress *p, u64 adv)
{
- return progress_fns->update(curr, total, title);
+ p->curr += adv;
+
+ if (p->curr >= p->next) {
+ p->next += p->step;
+ ui_progress__ops->update(p);
+ }
+}
+
+void ui_progress__init(struct ui_progress *p, u64 total, const char *title)
+{
+ p->curr = 0;
+ p->next = p->step = total / 16;
+ p->total = total;
+ p->title = title;
+
}
void ui_progress__finish(void)
{
- if (progress_fns->finish)
- progress_fns->finish();
+ if (ui_progress__ops->finish)
+ ui_progress__ops->finish();
}
diff --git a/tools/perf/ui/progress.h b/tools/perf/ui/progress.h
index 257cc224f9cf..29ec8efffefb 100644
--- a/tools/perf/ui/progress.h
+++ b/tools/perf/ui/progress.h
@@ -3,16 +3,21 @@
#include <../types.h>
+void ui_progress__finish(void);
+
struct ui_progress {
- void (*update)(u64, u64, const char *);
- void (*finish)(void);
+ const char *title;
+ u64 curr, next, step, total;
};
+
+void ui_progress__init(struct ui_progress *p, u64 total, const char *title);
+void ui_progress__update(struct ui_progress *p, u64 adv);
-extern struct ui_progress *progress_fns;
-
-void ui_progress__init(void);
+struct ui_progress_ops {
+ void (*update)(struct ui_progress *p);
+ void (*finish)(void);
+};
-void ui_progress__update(u64 curr, u64 total, const char *title);
-void ui_progress__finish(void);
+extern struct ui_progress_ops *ui_progress__ops;
#endif
diff --git a/tools/perf/ui/setup.c b/tools/perf/ui/setup.c
index 47d9a571f261..5df5140a9f29 100644
--- a/tools/perf/ui/setup.c
+++ b/tools/perf/ui/setup.c
@@ -1,10 +1,64 @@
#include <pthread.h>
+#include <dlfcn.h>
#include "../util/cache.h"
#include "../util/debug.h"
#include "../util/hist.h"
pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER;
+void *perf_gtk_handle;
+
+#ifdef HAVE_GTK2_SUPPORT
+static int setup_gtk_browser(void)
+{
+ int (*perf_ui_init)(void);
+
+ if (perf_gtk_handle)
+ return 0;
+
+ perf_gtk_handle = dlopen(PERF_GTK_DSO, RTLD_LAZY);
+ if (perf_gtk_handle == NULL) {
+ char buf[PATH_MAX];
+ scnprintf(buf, sizeof(buf), "%s/%s", LIBDIR, PERF_GTK_DSO);
+ perf_gtk_handle = dlopen(buf, RTLD_LAZY);
+ }
+ if (perf_gtk_handle == NULL)
+ return -1;
+
+ perf_ui_init = dlsym(perf_gtk_handle, "perf_gtk__init");
+ if (perf_ui_init == NULL)
+ goto out_close;
+
+ if (perf_ui_init() == 0)
+ return 0;
+
+out_close:
+ dlclose(perf_gtk_handle);
+ return -1;
+}
+
+static void exit_gtk_browser(bool wait_for_ok)
+{
+ void (*perf_ui_exit)(bool);
+
+ if (perf_gtk_handle == NULL)
+ return;
+
+ perf_ui_exit = dlsym(perf_gtk_handle, "perf_gtk__exit");
+ if (perf_ui_exit == NULL)
+ goto out_close;
+
+ perf_ui_exit(wait_for_ok);
+
+out_close:
+ dlclose(perf_gtk_handle);
+
+ perf_gtk_handle = NULL;
+}
+#else
+static inline int setup_gtk_browser(void) { return -1; }
+static inline void exit_gtk_browser(bool wait_for_ok __maybe_unused) {}
+#endif
void setup_browser(bool fallback_to_pager)
{
@@ -17,8 +71,11 @@ void setup_browser(bool fallback_to_pager)
switch (use_browser) {
case 2:
- if (perf_gtk__init() == 0)
+ if (setup_gtk_browser() == 0)
break;
+ printf("GTK browser requested but could not find %s\n",
+ PERF_GTK_DSO);
+ sleep(1);
/* fall through */
case 1:
use_browser = 1;
@@ -39,7 +96,7 @@ void exit_browser(bool wait_for_ok)
{
switch (use_browser) {
case 2:
- perf_gtk__exit(wait_for_ok);
+ exit_gtk_browser(wait_for_ok);
break;
case 1:
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 194e2f42ff5d..6c152686e837 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -315,8 +315,7 @@ static inline void advance_hpp(struct perf_hpp *hpp, int inc)
}
static int hist_entry__period_snprintf(struct perf_hpp *hpp,
- struct hist_entry *he,
- bool color)
+ struct hist_entry *he)
{
const char *sep = symbol_conf.field_sep;
struct perf_hpp_fmt *fmt;
@@ -338,7 +337,7 @@ static int hist_entry__period_snprintf(struct perf_hpp *hpp,
} else
first = false;
- if (color && fmt->color)
+ if (perf_hpp__use_color() && fmt->color)
ret = fmt->color(fmt, hpp, he);
else
ret = fmt->entry(fmt, hpp, he);
@@ -358,12 +357,11 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
.buf = bf,
.size = size,
};
- bool color = !symbol_conf.field_sep;
if (size == 0 || size > bfsz)
size = hpp.size = bfsz;
- ret = hist_entry__period_snprintf(&hpp, he, color);
+ ret = hist_entry__period_snprintf(&hpp, he);
hist_entry__sort_snprintf(he, bf + ret, size - ret, hists);
ret = fprintf(fp, "%s\n", bf);
@@ -482,6 +480,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
print_entries:
linesz = hists__sort_list_width(hists) + 3 + 1;
+ linesz += perf_hpp__color_overhead();
line = malloc(linesz);
if (line == NULL) {
ret = -1;
diff --git a/tools/perf/ui/tui/progress.c b/tools/perf/ui/tui/progress.c
index 6c2184d53cbf..3e2d936d7443 100644
--- a/tools/perf/ui/tui/progress.c
+++ b/tools/perf/ui/tui/progress.c
@@ -2,9 +2,10 @@
#include "../progress.h"
#include "../libslang.h"
#include "../ui.h"
+#include "tui.h"
#include "../browser.h"
-static void tui_progress__update(u64 curr, u64 total, const char *title)
+static void tui_progress__update(struct ui_progress *p)
{
int bar, y;
/*
@@ -14,7 +15,7 @@ static void tui_progress__update(u64 curr, u64 total, const char *title)
if (use_browser <= 0)
return;
- if (total == 0)
+ if (p->total == 0)
return;
ui__refresh_dimensions(true);
@@ -23,20 +24,20 @@ static void tui_progress__update(u64 curr, u64 total, const char *title)
SLsmg_set_color(0);
SLsmg_draw_box(y, 0, 3, SLtt_Screen_Cols);
SLsmg_gotorc(y++, 1);
- SLsmg_write_string((char *)title);
+ SLsmg_write_string((char *)p->title);
SLsmg_set_color(HE_COLORSET_SELECTED);
- bar = ((SLtt_Screen_Cols - 2) * curr) / total;
+ bar = ((SLtt_Screen_Cols - 2) * p->curr) / p->total;
SLsmg_fill_region(y, 1, 1, bar, ' ');
SLsmg_refresh();
pthread_mutex_unlock(&ui__lock);
}
-static struct ui_progress tui_progress_fns =
+static struct ui_progress_ops tui_progress__ops =
{
.update = tui_progress__update,
};
-void ui_progress__init(void)
+void tui_progress__init(void)
{
- progress_fns = &tui_progress_fns;
+ ui_progress__ops = &tui_progress__ops;
}
diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c
index b9401482d110..2f612562978c 100644
--- a/tools/perf/ui/tui/setup.c
+++ b/tools/perf/ui/tui/setup.c
@@ -9,6 +9,7 @@
#include "../util.h"
#include "../libslang.h"
#include "../keysyms.h"
+#include "tui.h"
static volatile int ui__need_resize;
@@ -119,7 +120,7 @@ int ui__init(void)
ui_helpline__init();
ui_browser__init();
- ui_progress__init();
+ tui_progress__init();
signal(SIGSEGV, ui__signal);
signal(SIGFPE, ui__signal);
diff --git a/tools/perf/ui/tui/tui.h b/tools/perf/ui/tui/tui.h
new file mode 100644
index 000000000000..18961c7b6ec5
--- /dev/null
+++ b/tools/perf/ui/tui/tui.h
@@ -0,0 +1,6 @@
+#ifndef _PERF_TUI_H_
+#define _PERF_TUI_H_ 1
+
+void tui_progress__init(void);
+
+#endif /* _PERF_TUI_H_ */
diff --git a/tools/perf/ui/ui.h b/tools/perf/ui/ui.h
index 70cb0d4eb8aa..ab88383f8be8 100644
--- a/tools/perf/ui/ui.h
+++ b/tools/perf/ui/ui.h
@@ -6,13 +6,14 @@
#include <linux/compiler.h>
extern pthread_mutex_t ui__lock;
+extern void *perf_gtk_handle;
extern int use_browser;
void setup_browser(bool fallback_to_pager);
void exit_browser(bool wait_for_ok);
-#ifdef SLANG_SUPPORT
+#ifdef HAVE_SLANG_SUPPORT
int ui__init(void);
void ui__exit(bool wait_for_ok);
#else
@@ -23,17 +24,6 @@ static inline int ui__init(void)
static inline void ui__exit(bool wait_for_ok __maybe_unused) {}
#endif
-#ifdef GTK2_SUPPORT
-int perf_gtk__init(void);
-void perf_gtk__exit(bool wait_for_ok);
-#else
-static inline int perf_gtk__init(void)
-{
- return -1;
-}
-static inline void perf_gtk__exit(bool wait_for_ok __maybe_unused) {}
-#endif
-
void ui__refresh_dimensions(bool force);
#endif /* _PERF_UI_H_ */
diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN
index 15a77b7c0e36..ce7a804b2951 100755
--- a/tools/perf/util/PERF-VERSION-GEN
+++ b/tools/perf/util/PERF-VERSION-GEN
@@ -40,7 +40,7 @@ else
VC=unset
fi
test "$VN" = "$VC" || {
- echo >&2 "PERF_VERSION = $VN"
+ echo >&2 " PERF_VERSION = $VN"
echo "#define PERF_VERSION \"$VN\"" >$GVF
}
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 7eae5488ecea..cf6242c92ee2 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -825,20 +825,16 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
dl->ops.target.offset = dl->ops.target.addr -
map__rip_2objdump(map, sym->start);
- /*
- * kcore has no symbols, so add the call target name if it is on the
- * same map.
- */
+ /* kcore has no symbols, so add the call target name */
if (dl->ins && ins__is_call(dl->ins) && !dl->ops.target.name) {
- struct symbol *s;
- u64 ip = dl->ops.target.addr;
-
- if (ip >= map->start && ip <= map->end) {
- ip = map->map_ip(map, ip);
- s = map__find_symbol(map, ip, NULL);
- if (s && s->start == ip)
- dl->ops.target.name = strdup(s->name);
- }
+ struct addr_map_symbol target = {
+ .map = map,
+ .addr = dl->ops.target.addr,
+ };
+
+ if (!map_groups__find_ams(&target, NULL) &&
+ target.sym->start == target.al_addr)
+ dl->ops.target.name = strdup(target.sym->name);
}
disasm__add(&notes->src->source, dl);
@@ -879,6 +875,8 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
FILE *file;
int err = 0;
char symfs_filename[PATH_MAX];
+ struct kcore_extract kce;
+ bool delete_extract = false;
if (filename) {
snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
@@ -940,6 +938,23 @@ fallback:
pr_debug("annotating [%p] %30s : [%p] %30s\n",
dso, dso->long_name, sym, sym->name);
+ if (dso__is_kcore(dso)) {
+ kce.kcore_filename = symfs_filename;
+ kce.addr = map__rip_2objdump(map, sym->start);
+ kce.offs = sym->start;
+ kce.len = sym->end + 1 - sym->start;
+ if (!kcore_extract__create(&kce)) {
+ delete_extract = true;
+ strlcpy(symfs_filename, kce.extract_filename,
+ sizeof(symfs_filename));
+ if (free_filename) {
+ free(filename);
+ free_filename = false;
+ }
+ filename = symfs_filename;
+ }
+ }
+
snprintf(command, sizeof(command),
"%s %s%s --start-address=0x%016" PRIx64
" --stop-address=0x%016" PRIx64
@@ -972,6 +987,8 @@ fallback:
pclose(file);
out_free_filename:
+ if (delete_extract)
+ kcore_extract__delete(&kce);
if (free_filename)
free(filename);
return err;
@@ -1070,7 +1087,7 @@ static void symbol__free_source_line(struct symbol *sym, int len)
(sizeof(src_line->p) * (src_line->nr_pcnt - 1));
for (i = 0; i < len; i++) {
- free(src_line->path);
+ free_srcline(src_line->path);
src_line = (void *)src_line + sizeof_src_line;
}
@@ -1081,13 +1098,11 @@ static void symbol__free_source_line(struct symbol *sym, int len)
/* Get the filename:line for the colored entries */
static int symbol__get_source_line(struct symbol *sym, struct map *map,
struct perf_evsel *evsel,
- struct rb_root *root, int len,
- const char *filename)
+ struct rb_root *root, int len)
{
u64 start;
int i, k;
int evidx = evsel->idx;
- char cmd[PATH_MAX * 2];
struct source_line *src_line;
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evidx);
@@ -1115,10 +1130,7 @@ static int symbol__get_source_line(struct symbol *sym, struct map *map,
start = map__rip_2objdump(map, sym->start);
for (i = 0; i < len; i++) {
- char *path = NULL;
- size_t line_len;
u64 offset;
- FILE *fp;
double percent_max = 0.0;
src_line->nr_pcnt = nr_pcnt;
@@ -1135,23 +1147,9 @@ static int symbol__get_source_line(struct symbol *sym, struct map *map,
goto next;
offset = start + i;
- sprintf(cmd, "addr2line -e %s %016" PRIx64, filename, offset);
- fp = popen(cmd, "r");
- if (!fp)
- goto next;
-
- if (getline(&path, &line_len, fp) < 0 || !line_len)
- goto next_close;
-
- src_line->path = malloc(sizeof(char) * line_len + 1);
- if (!src_line->path)
- goto next_close;
-
- strcpy(src_line->path, path);
+ src_line->path = get_srcline(map->dso, offset);
insert_source_line(&tmp_root, src_line);
- next_close:
- pclose(fp);
next:
src_line = (void *)src_line + sizeof_src_line;
}
@@ -1192,7 +1190,7 @@ static void print_summary(struct rb_root *root, const char *filename)
path = src_line->path;
color = get_percent_color(percent_max);
- color_fprintf(stdout, color, " %s", path);
+ color_fprintf(stdout, color, " %s\n", path);
node = rb_next(node);
}
@@ -1356,7 +1354,6 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map,
bool full_paths, int min_pcnt, int max_lines)
{
struct dso *dso = map->dso;
- const char *filename = dso->long_name;
struct rb_root source_line = RB_ROOT;
u64 len;
@@ -1366,9 +1363,8 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map,
len = symbol__size(sym);
if (print_lines) {
- symbol__get_source_line(sym, map, evsel, &source_line,
- len, filename);
- print_summary(&source_line, filename);
+ symbol__get_source_line(sym, map, evsel, &source_line, len);
+ print_summary(&source_line, dso->long_name);
}
symbol__annotate_printf(sym, map, evsel, full_paths,
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index af755156d278..834b7b57b788 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -150,7 +150,7 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel, bool print_lines,
bool full_paths, int min_pcnt, int max_lines);
-#ifdef SLANG_SUPPORT
+#ifdef HAVE_SLANG_SUPPORT
int symbol__tui_annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel,
struct hist_browser_timer *hbt);
@@ -165,30 +165,6 @@ static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused,
}
#endif
-#ifdef GTK2_SUPPORT
-int symbol__gtk_annotate(struct symbol *sym, struct map *map,
- struct perf_evsel *evsel,
- struct hist_browser_timer *hbt);
-
-static inline int hist_entry__gtk_annotate(struct hist_entry *he,
- struct perf_evsel *evsel,
- struct hist_browser_timer *hbt)
-{
- return symbol__gtk_annotate(he->ms.sym, he->ms.map, evsel, hbt);
-}
-
-void perf_gtk__show_annotations(void);
-#else
-static inline int hist_entry__gtk_annotate(struct hist_entry *he __maybe_unused,
- struct perf_evsel *evsel __maybe_unused,
- struct hist_browser_timer *hbt __maybe_unused)
-{
- return 0;
-}
-
-static inline void perf_gtk__show_annotations(void) {}
-#endif
-
extern const char *disassembler_style;
#endif /* __PERF_ANNOTATE_H */
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 7ded71d19d75..a92770c98cc7 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -89,14 +89,14 @@ int build_id__sprintf(const u8 *build_id, int len, char *bf)
return raw - build_id;
}
-char *dso__build_id_filename(struct dso *self, char *bf, size_t size)
+char *dso__build_id_filename(struct dso *dso, char *bf, size_t size)
{
char build_id_hex[BUILD_ID_SIZE * 2 + 1];
- if (!self->has_build_id)
+ if (!dso->has_build_id)
return NULL;
- build_id__sprintf(self->build_id, sizeof(self->build_id), build_id_hex);
+ build_id__sprintf(dso->build_id, sizeof(dso->build_id), build_id_hex);
if (bf == NULL) {
if (asprintf(&bf, "%s/.build-id/%.2s/%s", buildid_dir,
build_id_hex, build_id_hex + 2) < 0)
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index 26e367239873..7b176dd02e1a 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -70,8 +70,7 @@ extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2
extern char *perf_pathdup(const char *fmt, ...)
__attribute__((format (printf, 1, 2)));
-#ifndef HAVE_STRLCPY
+/* Matches the libc/libbsd function attribute so we declare this unconditionally: */
extern size_t strlcpy(char *dest, const char *src, size_t size);
-#endif
#endif /* __PERF_CACHE_H */
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 482f68081cd8..e3970e3eaacf 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -21,12 +21,6 @@
__thread struct callchain_cursor callchain_cursor;
-#define chain_for_each_child(child, parent) \
- list_for_each_entry(child, &parent->children, siblings)
-
-#define chain_for_each_child_safe(child, next, parent) \
- list_for_each_entry_safe(child, next, &parent->children, siblings)
-
static void
rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
enum chain_mode mode)
@@ -71,10 +65,16 @@ static void
__sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
u64 min_hit)
{
+ struct rb_node *n;
struct callchain_node *child;
- chain_for_each_child(child, node)
+ n = rb_first(&node->rb_root_in);
+ while (n) {
+ child = rb_entry(n, struct callchain_node, rb_node_in);
+ n = rb_next(n);
+
__sort_chain_flat(rb_root, child, min_hit);
+ }
if (node->hit && node->hit >= min_hit)
rb_insert_callchain(rb_root, node, CHAIN_FLAT);
@@ -94,11 +94,16 @@ sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
static void __sort_chain_graph_abs(struct callchain_node *node,
u64 min_hit)
{
+ struct rb_node *n;
struct callchain_node *child;
node->rb_root = RB_ROOT;
+ n = rb_first(&node->rb_root_in);
+
+ while (n) {
+ child = rb_entry(n, struct callchain_node, rb_node_in);
+ n = rb_next(n);
- chain_for_each_child(child, node) {
__sort_chain_graph_abs(child, min_hit);
if (callchain_cumul_hits(child) >= min_hit)
rb_insert_callchain(&node->rb_root, child,
@@ -117,13 +122,18 @@ sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root,
static void __sort_chain_graph_rel(struct callchain_node *node,
double min_percent)
{
+ struct rb_node *n;
struct callchain_node *child;
u64 min_hit;
node->rb_root = RB_ROOT;
min_hit = ceil(node->children_hit * min_percent);
- chain_for_each_child(child, node) {
+ n = rb_first(&node->rb_root_in);
+ while (n) {
+ child = rb_entry(n, struct callchain_node, rb_node_in);
+ n = rb_next(n);
+
__sort_chain_graph_rel(child, min_percent);
if (callchain_cumul_hits(child) >= min_hit)
rb_insert_callchain(&node->rb_root, child,
@@ -173,19 +183,26 @@ create_child(struct callchain_node *parent, bool inherit_children)
return NULL;
}
new->parent = parent;
- INIT_LIST_HEAD(&new->children);
INIT_LIST_HEAD(&new->val);
if (inherit_children) {
- struct callchain_node *next;
+ struct rb_node *n;
+ struct callchain_node *child;
+
+ new->rb_root_in = parent->rb_root_in;
+ parent->rb_root_in = RB_ROOT;
- list_splice(&parent->children, &new->children);
- INIT_LIST_HEAD(&parent->children);
+ n = rb_first(&new->rb_root_in);
+ while (n) {
+ child = rb_entry(n, struct callchain_node, rb_node_in);
+ child->parent = new;
+ n = rb_next(n);
+ }
- chain_for_each_child(next, new)
- next->parent = new;
+ /* make it the first child */
+ rb_link_node(&new->rb_node_in, NULL, &parent->rb_root_in.rb_node);
+ rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
}
- list_add_tail(&new->siblings, &parent->children);
return new;
}
@@ -223,7 +240,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
}
}
-static void
+static struct callchain_node *
add_child(struct callchain_node *parent,
struct callchain_cursor *cursor,
u64 period)
@@ -235,6 +252,19 @@ add_child(struct callchain_node *parent,
new->children_hit = 0;
new->hit = period;
+ return new;
+}
+
+static s64 match_chain(struct callchain_cursor_node *node,
+ struct callchain_list *cnode)
+{
+ struct symbol *sym = node->sym;
+
+ if (cnode->ms.sym && sym &&
+ callchain_param.key == CCKEY_FUNCTION)
+ return cnode->ms.sym->start - sym->start;
+ else
+ return cnode->ip - node->ip;
}
/*
@@ -272,9 +302,33 @@ split_add_child(struct callchain_node *parent,
/* create a new child for the new branch if any */
if (idx_total < cursor->nr) {
+ struct callchain_node *first;
+ struct callchain_list *cnode;
+ struct callchain_cursor_node *node;
+ struct rb_node *p, **pp;
+
parent->hit = 0;
- add_child(parent, cursor, period);
parent->children_hit += period;
+
+ node = callchain_cursor_current(cursor);
+ new = add_child(parent, cursor, period);
+
+ /*
+ * This is second child since we moved parent's children
+ * to new (first) child above.
+ */
+ p = parent->rb_root_in.rb_node;
+ first = rb_entry(p, struct callchain_node, rb_node_in);
+ cnode = list_first_entry(&first->val, struct callchain_list,
+ list);
+
+ if (match_chain(node, cnode) < 0)
+ pp = &p->rb_left;
+ else
+ pp = &p->rb_right;
+
+ rb_link_node(&new->rb_node_in, p, pp);
+ rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
} else {
parent->hit = period;
}
@@ -291,16 +345,40 @@ append_chain_children(struct callchain_node *root,
u64 period)
{
struct callchain_node *rnode;
+ struct callchain_cursor_node *node;
+ struct rb_node **p = &root->rb_root_in.rb_node;
+ struct rb_node *parent = NULL;
+
+ node = callchain_cursor_current(cursor);
+ if (!node)
+ return;
/* lookup in childrens */
- chain_for_each_child(rnode, root) {
- unsigned int ret = append_chain(rnode, cursor, period);
+ while (*p) {
+ s64 ret;
+ struct callchain_list *cnode;
- if (!ret)
+ parent = *p;
+ rnode = rb_entry(parent, struct callchain_node, rb_node_in);
+ cnode = list_first_entry(&rnode->val, struct callchain_list,
+ list);
+
+ /* just check first entry */
+ ret = match_chain(node, cnode);
+ if (ret == 0) {
+ append_chain(rnode, cursor, period);
goto inc_children_hit;
+ }
+
+ if (ret < 0)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
}
/* nothing in children, add to the current node */
- add_child(root, cursor, period);
+ rnode = add_child(root, cursor, period);
+ rb_link_node(&rnode->rb_node_in, parent, p);
+ rb_insert_color(&rnode->rb_node_in, &root->rb_root_in);
inc_children_hit:
root->children_hit += period;
@@ -325,28 +403,20 @@ append_chain(struct callchain_node *root,
*/
list_for_each_entry(cnode, &root->val, list) {
struct callchain_cursor_node *node;
- struct symbol *sym;
node = callchain_cursor_current(cursor);
if (!node)
break;
- sym = node->sym;
-
- if (cnode->ms.sym && sym &&
- callchain_param.key == CCKEY_FUNCTION) {
- if (cnode->ms.sym->start != sym->start)
- break;
- } else if (cnode->ip != node->ip)
+ if (match_chain(node, cnode) != 0)
break;
- if (!found)
- found = true;
+ found = true;
callchain_cursor_advance(cursor);
}
- /* matches not, relay on the parent */
+ /* matches not, relay no the parent */
if (!found) {
cursor->curr = curr_snap;
cursor->pos = start;
@@ -395,8 +465,9 @@ merge_chain_branch(struct callchain_cursor *cursor,
struct callchain_node *dst, struct callchain_node *src)
{
struct callchain_cursor_node **old_last = cursor->last;
- struct callchain_node *child, *next_child;
+ struct callchain_node *child;
struct callchain_list *list, *next_list;
+ struct rb_node *n;
int old_pos = cursor->nr;
int err = 0;
@@ -412,12 +483,16 @@ merge_chain_branch(struct callchain_cursor *cursor,
append_chain_children(dst, cursor, src->hit);
}
- chain_for_each_child_safe(child, next_child, src) {
+ n = rb_first(&src->rb_root_in);
+ while (n) {
+ child = container_of(n, struct callchain_node, rb_node_in);
+ n = rb_next(n);
+ rb_erase(&child->rb_node_in, &src->rb_root_in);
+
err = merge_chain_branch(cursor, dst, child);
if (err)
break;
- list_del(&child->siblings);
free(child);
}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 2b585bc308cf..4f7f989876ec 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -21,11 +21,11 @@ enum chain_order {
struct callchain_node {
struct callchain_node *parent;
- struct list_head siblings;
- struct list_head children;
struct list_head val;
- struct rb_node rb_node; /* to sort nodes in an rbtree */
- struct rb_root rb_root; /* sorted tree of children */
+ struct rb_node rb_node_in; /* to insert nodes in an rbtree */
+ struct rb_node rb_node; /* to sort nodes in an output tree */
+ struct rb_root rb_root_in; /* input tree of children */
+ struct rb_root rb_root; /* sorted output tree of children */
unsigned int val_nr;
u64 hit;
u64 children_hit;
@@ -86,13 +86,12 @@ extern __thread struct callchain_cursor callchain_cursor;
static inline void callchain_init(struct callchain_root *root)
{
- INIT_LIST_HEAD(&root->node.siblings);
- INIT_LIST_HEAD(&root->node.children);
INIT_LIST_HEAD(&root->node.val);
root->node.parent = NULL;
root->node.hit = 0;
root->node.children_hit = 0;
+ root->node.rb_root_in = RB_ROOT;
root->max_depth = 0;
}
@@ -147,6 +146,9 @@ static inline void callchain_cursor_advance(struct callchain_cursor *cursor)
struct option;
+int record_parse_callchain(const char *arg, struct perf_record_opts *opts);
int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset);
+int record_callchain_opt(const struct option *opt, const char *arg, int unset);
+
extern const char record_callchain_help[];
#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
new file mode 100644
index 000000000000..7d09faf85cf1
--- /dev/null
+++ b/tools/perf/util/data.c
@@ -0,0 +1,120 @@
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <string.h>
+
+#include "data.h"
+#include "util.h"
+
+static bool check_pipe(struct perf_data_file *file)
+{
+ struct stat st;
+ bool is_pipe = false;
+ int fd = perf_data_file__is_read(file) ?
+ STDIN_FILENO : STDOUT_FILENO;
+
+ if (!file->path) {
+ if (!fstat(fd, &st) && S_ISFIFO(st.st_mode))
+ is_pipe = true;
+ } else {
+ if (!strcmp(file->path, "-"))
+ is_pipe = true;
+ }
+
+ if (is_pipe)
+ file->fd = fd;
+
+ return file->is_pipe = is_pipe;
+}
+
+static int check_backup(struct perf_data_file *file)
+{
+ struct stat st;
+
+ if (!stat(file->path, &st) && st.st_size) {
+ /* TODO check errors properly */
+ char oldname[PATH_MAX];
+ snprintf(oldname, sizeof(oldname), "%s.old",
+ file->path);
+ unlink(oldname);
+ rename(file->path, oldname);
+ }
+
+ return 0;
+}
+
+static int open_file_read(struct perf_data_file *file)
+{
+ struct stat st;
+ int fd;
+
+ fd = open(file->path, O_RDONLY);
+ if (fd < 0) {
+ int err = errno;
+
+ pr_err("failed to open %s: %s", file->path, strerror(err));
+ if (err == ENOENT && !strcmp(file->path, "perf.data"))
+ pr_err(" (try 'perf record' first)");
+ pr_err("\n");
+ return -err;
+ }
+
+ if (fstat(fd, &st) < 0)
+ goto out_close;
+
+ if (!file->force && st.st_uid && (st.st_uid != geteuid())) {
+ pr_err("file %s not owned by current user or root\n",
+ file->path);
+ goto out_close;
+ }
+
+ if (!st.st_size) {
+ pr_info("zero-sized file (%s), nothing to do!\n",
+ file->path);
+ goto out_close;
+ }
+
+ file->size = st.st_size;
+ return fd;
+
+ out_close:
+ close(fd);
+ return -1;
+}
+
+static int open_file_write(struct perf_data_file *file)
+{
+ if (check_backup(file))
+ return -1;
+
+ return open(file->path, O_CREAT|O_RDWR|O_TRUNC, S_IRUSR|S_IWUSR);
+}
+
+static int open_file(struct perf_data_file *file)
+{
+ int fd;
+
+ fd = perf_data_file__is_read(file) ?
+ open_file_read(file) : open_file_write(file);
+
+ file->fd = fd;
+ return fd < 0 ? -1 : 0;
+}
+
+int perf_data_file__open(struct perf_data_file *file)
+{
+ if (check_pipe(file))
+ return 0;
+
+ if (!file->path)
+ file->path = "perf.data";
+
+ return open_file(file);
+}
+
+void perf_data_file__close(struct perf_data_file *file)
+{
+ close(file->fd);
+}
diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
new file mode 100644
index 000000000000..8c2df80152a5
--- /dev/null
+++ b/tools/perf/util/data.h
@@ -0,0 +1,48 @@
+#ifndef __PERF_DATA_H
+#define __PERF_DATA_H
+
+#include <stdbool.h>
+
+enum perf_data_mode {
+ PERF_DATA_MODE_WRITE,
+ PERF_DATA_MODE_READ,
+};
+
+struct perf_data_file {
+ const char *path;
+ int fd;
+ bool is_pipe;
+ bool force;
+ unsigned long size;
+ enum perf_data_mode mode;
+};
+
+static inline bool perf_data_file__is_read(struct perf_data_file *file)
+{
+ return file->mode == PERF_DATA_MODE_READ;
+}
+
+static inline bool perf_data_file__is_write(struct perf_data_file *file)
+{
+ return file->mode == PERF_DATA_MODE_WRITE;
+}
+
+static inline int perf_data_file__is_pipe(struct perf_data_file *file)
+{
+ return file->is_pipe;
+}
+
+static inline int perf_data_file__fd(struct perf_data_file *file)
+{
+ return file->fd;
+}
+
+static inline unsigned long perf_data_file__size(struct perf_data_file *file)
+{
+ return file->size;
+}
+
+int perf_data_file__open(struct perf_data_file *file);
+void perf_data_file__close(struct perf_data_file *file);
+
+#endif /* __PERF_DATA_H */
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index e3c1ff8512c8..af4c687cc49b 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -7,19 +7,20 @@
char dso__symtab_origin(const struct dso *dso)
{
static const char origin[] = {
- [DSO_BINARY_TYPE__KALLSYMS] = 'k',
- [DSO_BINARY_TYPE__VMLINUX] = 'v',
- [DSO_BINARY_TYPE__JAVA_JIT] = 'j',
- [DSO_BINARY_TYPE__DEBUGLINK] = 'l',
- [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B',
- [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
- [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
- [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
- [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
- [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
- [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
- [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
- [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
+ [DSO_BINARY_TYPE__KALLSYMS] = 'k',
+ [DSO_BINARY_TYPE__VMLINUX] = 'v',
+ [DSO_BINARY_TYPE__JAVA_JIT] = 'j',
+ [DSO_BINARY_TYPE__DEBUGLINK] = 'l',
+ [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B',
+ [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
+ [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
+ [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o',
+ [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
+ [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
+ [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
+ [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
+ [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
+ [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
};
if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
@@ -64,6 +65,28 @@ int dso__binary_type_file(struct dso *dso, enum dso_binary_type type,
symbol_conf.symfs, dso->long_name);
break;
+ case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
+ {
+ char *last_slash;
+ size_t len;
+ size_t dir_size;
+
+ last_slash = dso->long_name + dso->long_name_len;
+ while (last_slash != dso->long_name && *last_slash != '/')
+ last_slash--;
+
+ len = scnprintf(file, size, "%s", symbol_conf.symfs);
+ dir_size = last_slash - dso->long_name + 2;
+ if (dir_size > (size - len)) {
+ ret = -1;
+ break;
+ }
+ len += scnprintf(file + len, dir_size, "%s", dso->long_name);
+ len += scnprintf(file + len , size - len, ".debug%s",
+ last_slash);
+ break;
+ }
+
case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
if (!dso->has_build_id) {
ret = -1;
@@ -427,6 +450,7 @@ struct dso *dso__new(const char *name)
dso->rel = 0;
dso->sorted_by_name = 0;
dso->has_build_id = 0;
+ dso->has_srcline = 1;
dso->kernel = DSO_TYPE_USER;
dso->needs_swap = DSO_SWAP__UNSET;
INIT_LIST_HEAD(&dso->node);
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index b793053335d6..9ac666abbe7e 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -6,6 +6,7 @@
#include <stdbool.h>
#include "types.h"
#include "map.h"
+#include "build-id.h"
enum dso_binary_type {
DSO_BINARY_TYPE__KALLSYMS = 0,
@@ -23,6 +24,7 @@ enum dso_binary_type {
DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
DSO_BINARY_TYPE__KCORE,
DSO_BINARY_TYPE__GUEST_KCORE,
+ DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
DSO_BINARY_TYPE__NOT_FOUND,
};
@@ -81,6 +83,7 @@ struct dso {
enum dso_binary_type data_type;
u8 adjust_symbols:1;
u8 has_build_id:1;
+ u8 has_srcline:1;
u8 hit:1;
u8 annotate_warned:1;
u8 sname_alloc:1;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 9b393e7dca6f..49096ea58a15 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -187,7 +187,7 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
return -1;
}
- event->header.type = PERF_RECORD_MMAP2;
+ event->header.type = PERF_RECORD_MMAP;
/*
* Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
*/
@@ -198,7 +198,6 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
char prot[5];
char execname[PATH_MAX];
char anonstr[] = "//anon";
- unsigned int ino;
size_t size;
ssize_t n;
@@ -209,15 +208,12 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
strcpy(execname, "");
/* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
- n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
- &event->mmap2.start, &event->mmap2.len, prot,
- &event->mmap2.pgoff, &event->mmap2.maj,
- &event->mmap2.min,
- &ino, execname);
-
- event->mmap2.ino = (u64)ino;
+ n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %*x:%*x %*u %s\n",
+ &event->mmap.start, &event->mmap.len, prot,
+ &event->mmap.pgoff,
+ execname);
- if (n != 8)
+ if (n != 5)
continue;
if (prot[2] != 'x')
@@ -227,15 +223,15 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
strcpy(execname, anonstr);
size = strlen(execname) + 1;
- memcpy(event->mmap2.filename, execname, size);
+ memcpy(event->mmap.filename, execname, size);
size = PERF_ALIGN(size, sizeof(u64));
- event->mmap2.len -= event->mmap.start;
- event->mmap2.header.size = (sizeof(event->mmap2) -
- (sizeof(event->mmap2.filename) - size));
- memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
- event->mmap2.header.size += machine->id_hdr_size;
- event->mmap2.pid = tgid;
- event->mmap2.tid = pid;
+ event->mmap.len -= event->mmap.start;
+ event->mmap.header.size = (sizeof(event->mmap) -
+ (sizeof(event->mmap.filename) - size));
+ memset(event->mmap.filename + size, 0, machine->id_hdr_size);
+ event->mmap.header.size += machine->id_hdr_size;
+ event->mmap.pid = tgid;
+ event->mmap.tid = pid;
if (process(tool, event, &synth_sample, machine) != 0) {
rc = -1;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index c67ecc457d29..752709ccfb00 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -61,6 +61,12 @@ struct read_event {
u64 id;
};
+struct throttle_event {
+ struct perf_event_header header;
+ u64 time;
+ u64 id;
+ u64 stream_id;
+};
#define PERF_SAMPLE_MASK \
(PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
@@ -69,6 +75,9 @@ struct read_event {
PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD | \
PERF_SAMPLE_IDENTIFIER)
+/* perf sample has 16 bits size limit */
+#define PERF_SAMPLE_MAX_SIZE (1 << 16)
+
struct sample_event {
struct perf_event_header header;
u64 array[];
@@ -111,6 +120,7 @@ struct perf_sample {
u64 stream_id;
u64 period;
u64 weight;
+ u64 transaction;
u32 cpu;
u32 raw_size;
u64 data_src;
@@ -177,6 +187,7 @@ union perf_event {
struct fork_event fork;
struct lost_event lost;
struct read_event read;
+ struct throttle_event throttle;
struct sample_event sample;
struct attr_event attr;
struct event_type_event event_type;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index f9f77bee0b1b..0582f67fbefc 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -18,6 +18,7 @@
#include <unistd.h>
#include "parse-events.h"
+#include "parse-options.h"
#include <sys/mman.h>
@@ -49,6 +50,18 @@ struct perf_evlist *perf_evlist__new(void)
return evlist;
}
+struct perf_evlist *perf_evlist__new_default(void)
+{
+ struct perf_evlist *evlist = perf_evlist__new();
+
+ if (evlist && perf_evlist__add_default(evlist)) {
+ perf_evlist__delete(evlist);
+ evlist = NULL;
+ }
+
+ return evlist;
+}
+
/**
* perf_evlist__set_id_pos - set the positions of event ids.
* @evlist: selected event list
@@ -527,7 +540,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
if ((old & md->mask) + size != ((old + size) & md->mask)) {
unsigned int offset = old;
unsigned int len = min(sizeof(*event), size), cpy;
- void *dst = &md->event_copy;
+ void *dst = md->event_copy;
do {
cpy = min(md->mask + 1 - (offset & md->mask), len);
@@ -537,7 +550,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
len -= cpy;
} while (len);
- event = &md->event_copy;
+ event = (union perf_event *) md->event_copy;
}
old += size;
@@ -545,12 +558,19 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
md->prev = old;
- if (!evlist->overwrite)
- perf_mmap__write_tail(md, old);
-
return event;
}
+void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
+{
+ if (!evlist->overwrite) {
+ struct perf_mmap *md = &evlist->mmap[idx];
+ unsigned int old = md->prev;
+
+ perf_mmap__write_tail(md, old);
+ }
+}
+
static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
{
if (evlist->mmap[idx].base != NULL) {
@@ -595,9 +615,36 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist,
return 0;
}
-static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
+static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
+ int prot, int mask, int cpu, int thread,
+ int *output)
{
struct perf_evsel *evsel;
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ int fd = FD(evsel, cpu, thread);
+
+ if (*output == -1) {
+ *output = fd;
+ if (__perf_evlist__mmap(evlist, idx, prot, mask,
+ *output) < 0)
+ return -1;
+ } else {
+ if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
+ return -1;
+ }
+
+ if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+ perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot,
+ int mask)
+{
int cpu, thread;
int nr_cpus = cpu_map__nr(evlist->cpus);
int nr_threads = thread_map__nr(evlist->threads);
@@ -607,23 +654,9 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
int output = -1;
for (thread = 0; thread < nr_threads; thread++) {
- list_for_each_entry(evsel, &evlist->entries, node) {
- int fd = FD(evsel, cpu, thread);
-
- if (output == -1) {
- output = fd;
- if (__perf_evlist__mmap(evlist, cpu,
- prot, mask, output) < 0)
- goto out_unmap;
- } else {
- if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
- goto out_unmap;
- }
-
- if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
- perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
- goto out_unmap;
- }
+ if (perf_evlist__mmap_per_evsel(evlist, cpu, prot, mask,
+ cpu, thread, &output))
+ goto out_unmap;
}
}
@@ -635,9 +668,9 @@ out_unmap:
return -1;
}
-static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
+static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot,
+ int mask)
{
- struct perf_evsel *evsel;
int thread;
int nr_threads = thread_map__nr(evlist->threads);
@@ -645,23 +678,9 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
for (thread = 0; thread < nr_threads; thread++) {
int output = -1;
- list_for_each_entry(evsel, &evlist->entries, node) {
- int fd = FD(evsel, 0, thread);
-
- if (output == -1) {
- output = fd;
- if (__perf_evlist__mmap(evlist, thread,
- prot, mask, output) < 0)
- goto out_unmap;
- } else {
- if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
- goto out_unmap;
- }
-
- if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
- perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
- goto out_unmap;
- }
+ if (perf_evlist__mmap_per_evsel(evlist, thread, prot, mask, 0,
+ thread, &output))
+ goto out_unmap;
}
return 0;
@@ -672,20 +691,76 @@ out_unmap:
return -1;
}
-/** perf_evlist__mmap - Create per cpu maps to receive events
- *
- * @evlist - list of events
- * @pages - map length in pages
- * @overwrite - overwrite older events?
- *
- * If overwrite is false the user needs to signal event consuption using:
- *
- * struct perf_mmap *m = &evlist->mmap[cpu];
- * unsigned int head = perf_mmap__read_head(m);
+static size_t perf_evlist__mmap_size(unsigned long pages)
+{
+ /* 512 kiB: default amount of unprivileged mlocked memory */
+ if (pages == UINT_MAX)
+ pages = (512 * 1024) / page_size;
+ else if (!is_power_of_2(pages))
+ return 0;
+
+ return (pages + 1) * page_size;
+}
+
+int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
+ int unset __maybe_unused)
+{
+ unsigned int *mmap_pages = opt->value;
+ unsigned long pages, val;
+ size_t size;
+ static struct parse_tag tags[] = {
+ { .tag = 'B', .mult = 1 },
+ { .tag = 'K', .mult = 1 << 10 },
+ { .tag = 'M', .mult = 1 << 20 },
+ { .tag = 'G', .mult = 1 << 30 },
+ { .tag = 0 },
+ };
+
+ val = parse_tag_value(str, tags);
+ if (val != (unsigned long) -1) {
+ /* we got file size value */
+ pages = PERF_ALIGN(val, page_size) / page_size;
+ if (pages < (1UL << 31) && !is_power_of_2(pages)) {
+ pages = next_pow2(pages);
+ pr_info("rounding mmap pages size to %lu (%lu pages)\n",
+ pages * page_size, pages);
+ }
+ } else {
+ /* we got pages count value */
+ char *eptr;
+ pages = strtoul(str, &eptr, 10);
+ if (*eptr != '\0') {
+ pr_err("failed to parse --mmap_pages/-m value\n");
+ return -1;
+ }
+ }
+
+ if (pages > UINT_MAX || pages > SIZE_MAX / page_size) {
+ pr_err("--mmap_pages/-m value too big\n");
+ return -1;
+ }
+
+ size = perf_evlist__mmap_size(pages);
+ if (!size) {
+ pr_err("--mmap_pages/-m value must be a power of two.");
+ return -1;
+ }
+
+ *mmap_pages = pages;
+ return 0;
+}
+
+/**
+ * perf_evlist__mmap - Create mmaps to receive events.
+ * @evlist: list of events
+ * @pages: map length in pages
+ * @overwrite: overwrite older events?
*
- * perf_mmap__write_tail(m, head)
+ * If @overwrite is %false the user needs to signal event consumption using
+ * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
+ * automatically.
*
- * Using perf_evlist__read_on_cpu does this automatically.
+ * Return: %0 on success, negative error code otherwise.
*/
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
bool overwrite)
@@ -695,14 +770,6 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
const struct thread_map *threads = evlist->threads;
int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
- /* 512 kiB: default amount of unprivileged mlocked memory */
- if (pages == UINT_MAX)
- pages = (512 * 1024) / page_size;
- else if (!is_power_of_2(pages))
- return -EINVAL;
-
- mask = pages * page_size - 1;
-
if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
return -ENOMEM;
@@ -710,7 +777,9 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
return -ENOMEM;
evlist->overwrite = overwrite;
- evlist->mmap_len = (pages + 1) * page_size;
+ evlist->mmap_len = perf_evlist__mmap_size(pages);
+ pr_debug("mmap size %zuB\n", evlist->mmap_len);
+ mask = evlist->mmap_len - page_size - 1;
list_for_each_entry(evsel, &evlist->entries, node) {
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
@@ -1066,3 +1135,66 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
return printed + fprintf(fp, "\n");;
}
+
+int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
+ int err, char *buf, size_t size)
+{
+ char sbuf[128];
+
+ switch (err) {
+ case ENOENT:
+ scnprintf(buf, size, "%s",
+ "Error:\tUnable to find debugfs\n"
+ "Hint:\tWas your kernel was compiled with debugfs support?\n"
+ "Hint:\tIs the debugfs filesystem mounted?\n"
+ "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
+ break;
+ case EACCES:
+ scnprintf(buf, size,
+ "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
+ "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
+ debugfs_mountpoint, debugfs_mountpoint);
+ break;
+ default:
+ scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
+ break;
+ }
+
+ return 0;
+}
+
+int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
+ int err, char *buf, size_t size)
+{
+ int printed, value;
+ char sbuf[128], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
+
+ switch (err) {
+ case EACCES:
+ case EPERM:
+ printed = scnprintf(buf, size,
+ "Error:\t%s.\n"
+ "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
+
+ if (filename__read_int("/proc/sys/kernel/perf_event_paranoid", &value))
+ break;
+
+ printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
+
+ if (value >= 2) {
+ printed += scnprintf(buf + printed, size - printed,
+ "For your workloads it needs to be <= 1\nHint:\t");
+ }
+ printed += scnprintf(buf + printed, size - printed,
+ "For system wide tracing it needs to be set to -1");
+
+ printed += scnprintf(buf + printed, size - printed,
+ ".\nHint:\tThe current value is %d.", value);
+ break;
+ default:
+ scnprintf(buf, size, "%s", emsg);
+ break;
+ }
+
+ return 0;
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 880d7139d2fb..6e8acc9abe38 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -21,7 +21,7 @@ struct perf_mmap {
void *base;
int mask;
unsigned int prev;
- union perf_event event_copy;
+ char event_copy[PERF_SAMPLE_MAX_SIZE];
};
struct perf_evlist {
@@ -31,7 +31,7 @@ struct perf_evlist {
int nr_groups;
int nr_fds;
int nr_mmaps;
- int mmap_len;
+ size_t mmap_len;
int id_pos;
int is_pos;
u64 combined_sample_type;
@@ -53,6 +53,7 @@ struct perf_evsel_str_handler {
};
struct perf_evlist *perf_evlist__new(void);
+struct perf_evlist *perf_evlist__new_default(void);
void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
struct thread_map *threads);
void perf_evlist__exit(struct perf_evlist *evlist);
@@ -89,6 +90,8 @@ struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
+void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
+
int perf_evlist__open(struct perf_evlist *evlist);
void perf_evlist__close(struct perf_evlist *evlist);
@@ -103,6 +106,10 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist,
bool want_signal);
int perf_evlist__start_workload(struct perf_evlist *evlist);
+int perf_evlist__parse_mmap_pages(const struct option *opt,
+ const char *str,
+ int unset);
+
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
bool overwrite);
void perf_evlist__munmap(struct perf_evlist *evlist);
@@ -163,6 +170,9 @@ static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
+int perf_evlist__strerror_tp(struct perf_evlist *evlist, int err, char *buf, size_t size);
+int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
+
static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
{
struct perf_event_mmap_page *pc = mm->base;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 0ce9febf1ba0..3a334f001997 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -678,9 +678,11 @@ void perf_evsel__config(struct perf_evsel *evsel,
attr->sample_type |= PERF_SAMPLE_WEIGHT;
attr->mmap = track;
- attr->mmap2 = track && !perf_missing_features.mmap2;
attr->comm = track;
+ if (opts->sample_transaction)
+ attr->sample_type |= PERF_SAMPLE_TRANSACTION;
+
/*
* XXX see the function comment above
*
@@ -983,6 +985,7 @@ static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp)
ret += PRINT_ATTR2(exclude_host, exclude_guest);
ret += PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel,
"excl.callchain_user", exclude_callchain_user);
+ ret += PRINT_ATTR_U32(mmap2);
ret += PRINT_ATTR_U32(wakeup_events);
ret += PRINT_ATTR_U32(wakeup_watermark);
@@ -1214,6 +1217,7 @@ static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
sample->pid = u.val32[0];
sample->tid = u.val32[1];
+ array--;
}
return 0;
@@ -1453,6 +1457,9 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
array = (void *)array + sz;
OVERFLOW_CHECK_u64(array);
data->user_stack.size = *array++;
+ if (WARN_ONCE(data->user_stack.size > sz,
+ "user stack dump failure\n"))
+ return -EFAULT;
}
}
@@ -1470,6 +1477,12 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
array++;
}
+ data->transaction = 0;
+ if (type & PERF_SAMPLE_TRANSACTION) {
+ data->transaction = *array;
+ array++;
+ }
+
return 0;
}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 4a7bdc713bab..5aa68cddc7d9 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -197,6 +197,12 @@ static inline bool perf_evsel__match2(struct perf_evsel *e1,
(e1->attr.config == e2->attr.config);
}
+#define perf_evsel__cmp(a, b) \
+ ((a) && \
+ (b) && \
+ (a)->attr.type == (b)->attr.type && \
+ (a)->attr.config == (b)->attr.config)
+
int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
int cpu, int thread, bool scale);
diff --git a/tools/perf/util/generate-cmdlist.sh b/tools/perf/util/generate-cmdlist.sh
index 3ac38031d534..36a885d2cd22 100755
--- a/tools/perf/util/generate-cmdlist.sh
+++ b/tools/perf/util/generate-cmdlist.sh
@@ -22,7 +22,7 @@ do
}' "Documentation/perf-$cmd.txt"
done
-echo "#ifdef LIBELF_SUPPORT"
+echo "#ifdef HAVE_LIBELF_SUPPORT"
sed -n -e 's/^perf-\([^ ]*\)[ ].* full.*/\1/p' command-list.txt |
sort |
while read cmd
@@ -35,5 +35,5 @@ do
p
}' "Documentation/perf-$cmd.txt"
done
-echo "#endif /* LIBELF_SUPPORT */"
+echo "#endif /* HAVE_LIBELF_SUPPORT */"
echo "};"
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index c3e5a3b817ab..26d9520a0c1b 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -22,6 +22,7 @@
#include "vdso.h"
#include "strbuf.h"
#include "build-id.h"
+#include "data.h"
static bool no_buildid_cache = false;
@@ -2189,7 +2190,7 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
{
struct header_print_data hd;
struct perf_header *header = &session->header;
- int fd = session->fd;
+ int fd = perf_data_file__fd(session->file);
hd.fp = fp;
hd.full = full;
@@ -2650,7 +2651,8 @@ static int perf_header__read_pipe(struct perf_session *session)
struct perf_header *header = &session->header;
struct perf_pipe_file_header f_header;
- if (perf_file_header__read_pipe(&f_header, header, session->fd,
+ if (perf_file_header__read_pipe(&f_header, header,
+ perf_data_file__fd(session->file),
session->repipe) < 0) {
pr_debug("incompatible file format\n");
return -EINVAL;
@@ -2751,18 +2753,19 @@ static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
int perf_session__read_header(struct perf_session *session)
{
+ struct perf_data_file *file = session->file;
struct perf_header *header = &session->header;
struct perf_file_header f_header;
struct perf_file_attr f_attr;
u64 f_id;
int nr_attrs, nr_ids, i, j;
- int fd = session->fd;
+ int fd = perf_data_file__fd(file);
session->evlist = perf_evlist__new();
if (session->evlist == NULL)
return -ENOMEM;
- if (session->fd_pipe)
+ if (perf_data_file__is_pipe(file))
return perf_header__read_pipe(session);
if (perf_file_header__read(&f_header, header, fd) < 0)
@@ -2777,7 +2780,7 @@ int perf_session__read_header(struct perf_session *session)
if (f_header.data.size == 0) {
pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
"Was the 'perf record' command properly terminated?\n",
- session->filename);
+ file->path);
}
nr_attrs = f_header.attrs.size / f_header.attr_size;
@@ -2990,18 +2993,19 @@ int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
struct perf_session *session)
{
ssize_t size_read, padding, size = event->tracing_data.size;
- off_t offset = lseek(session->fd, 0, SEEK_CUR);
+ int fd = perf_data_file__fd(session->file);
+ off_t offset = lseek(fd, 0, SEEK_CUR);
char buf[BUFSIZ];
/* setup for reading amidst mmap */
- lseek(session->fd, offset + sizeof(struct tracing_data_event),
+ lseek(fd, offset + sizeof(struct tracing_data_event),
SEEK_SET);
- size_read = trace_report(session->fd, &session->pevent,
+ size_read = trace_report(fd, &session->pevent,
session->repipe);
padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
- if (readn(session->fd, buf, padding) < 0) {
+ if (readn(fd, buf, padding) < 0) {
pr_err("%s: reading input file", __func__);
return -1;
}
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 9ff6cf3e9a99..7e80253074b0 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -160,6 +160,10 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
+
+ if (h->transaction)
+ hists__new_col_len(hists, HISTC_TRANSACTION,
+ hist_entry__transaction_len());
}
void hists__output_recalc_col_len(struct hists *hists, int max_rows)
@@ -346,7 +350,7 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
struct rb_node **p;
struct rb_node *parent = NULL;
struct hist_entry *he;
- int cmp;
+ int64_t cmp;
p = &hists->entries_in->rb_node;
@@ -395,6 +399,7 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
if (!he)
return NULL;
+ hists->nr_entries++;
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color(&he->rb_node_in, hists->entries_in);
out:
@@ -402,7 +407,7 @@ out:
return he;
}
-struct hist_entry *__hists__add_mem_entry(struct hists *self,
+struct hist_entry *__hists__add_mem_entry(struct hists *hists,
struct addr_location *al,
struct symbol *sym_parent,
struct mem_info *mi,
@@ -425,14 +430,14 @@ struct hist_entry *__hists__add_mem_entry(struct hists *self,
.level = al->level,
.parent = sym_parent,
.filtered = symbol__parent_filter(sym_parent),
- .hists = self,
+ .hists = hists,
.mem_info = mi,
.branch_info = NULL,
};
- return add_hist_entry(self, &entry, al, period, weight);
+ return add_hist_entry(hists, &entry, al, period, weight);
}
-struct hist_entry *__hists__add_branch_entry(struct hists *self,
+struct hist_entry *__hists__add_branch_entry(struct hists *hists,
struct addr_location *al,
struct symbol *sym_parent,
struct branch_info *bi,
@@ -456,17 +461,17 @@ struct hist_entry *__hists__add_branch_entry(struct hists *self,
.parent = sym_parent,
.filtered = symbol__parent_filter(sym_parent),
.branch_info = bi,
- .hists = self,
+ .hists = hists,
.mem_info = NULL,
};
- return add_hist_entry(self, &entry, al, period, weight);
+ return add_hist_entry(hists, &entry, al, period, weight);
}
-struct hist_entry *__hists__add_entry(struct hists *self,
+struct hist_entry *__hists__add_entry(struct hists *hists,
struct addr_location *al,
struct symbol *sym_parent, u64 period,
- u64 weight)
+ u64 weight, u64 transaction)
{
struct hist_entry entry = {
.thread = al->thread,
@@ -484,12 +489,13 @@ struct hist_entry *__hists__add_entry(struct hists *self,
},
.parent = sym_parent,
.filtered = symbol__parent_filter(sym_parent),
- .hists = self,
+ .hists = hists,
.branch_info = NULL,
.mem_info = NULL,
+ .transaction = transaction,
};
- return add_hist_entry(self, &entry, al, period, weight);
+ return add_hist_entry(hists, &entry, al, period, weight);
}
int64_t
@@ -530,6 +536,7 @@ void hist_entry__free(struct hist_entry *he)
{
free(he->branch_info);
free(he->mem_info);
+ free_srcline(he->srcline);
free(he);
}
@@ -598,7 +605,7 @@ static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
hists__filter_entry_by_symbol(hists, he);
}
-void hists__collapse_resort(struct hists *hists)
+void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
{
struct rb_root *root;
struct rb_node *next;
@@ -625,6 +632,8 @@ void hists__collapse_resort(struct hists *hists)
*/
hists__apply_filters(hists, n);
}
+ if (prog)
+ ui_progress__update(prog, 1);
}
}
@@ -884,7 +893,7 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
struct rb_node **p;
struct rb_node *parent = NULL;
struct hist_entry *he;
- int cmp;
+ int64_t cmp;
if (sort__need_collapse)
root = &hists->entries_collapsed;
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 1329b6b6ffe6..9d2d022cdb79 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -5,6 +5,8 @@
#include <pthread.h>
#include "callchain.h"
#include "header.h"
+#include "color.h"
+#include "ui/progress.h"
extern struct callchain_param callchain_param;
@@ -45,6 +47,8 @@ enum hist_column {
HISTC_CPU,
HISTC_SRCLINE,
HISTC_MISPREDICT,
+ HISTC_IN_TX,
+ HISTC_ABORT,
HISTC_SYMBOL_FROM,
HISTC_SYMBOL_TO,
HISTC_DSO_FROM,
@@ -57,6 +61,7 @@ enum hist_column {
HISTC_MEM_TLB,
HISTC_MEM_LVL,
HISTC_MEM_SNOOP,
+ HISTC_TRANSACTION,
HISTC_NR_COLS, /* Last entry */
};
@@ -82,9 +87,10 @@ struct hists {
struct hist_entry *__hists__add_entry(struct hists *self,
struct addr_location *al,
struct symbol *parent, u64 period,
- u64 weight);
+ u64 weight, u64 transaction);
int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right);
int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
+int hist_entry__transaction_len(void);
int hist_entry__sort_snprintf(struct hist_entry *self, char *bf, size_t size,
struct hists *hists);
void hist_entry__free(struct hist_entry *);
@@ -104,7 +110,7 @@ struct hist_entry *__hists__add_mem_entry(struct hists *self,
u64 weight);
void hists__output_resort(struct hists *self);
-void hists__collapse_resort(struct hists *self);
+void hists__collapse_resort(struct hists *self, struct ui_progress *prog);
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
void hists__output_recalc_col_len(struct hists *hists, int max_rows);
@@ -175,6 +181,18 @@ void perf_hpp__init(void);
void perf_hpp__column_register(struct perf_hpp_fmt *format);
void perf_hpp__column_enable(unsigned col);
+static inline size_t perf_hpp__use_color(void)
+{
+ return !symbol_conf.field_sep;
+}
+
+static inline size_t perf_hpp__color_overhead(void)
+{
+ return perf_hpp__use_color() ?
+ (COLOR_MAXLEN + sizeof(PERF_COLOR_RESET)) * PERF_HPP__MAX_INDEX
+ : 0;
+}
+
struct perf_evlist;
struct hist_browser_timer {
@@ -183,7 +201,7 @@ struct hist_browser_timer {
int refresh;
};
-#ifdef SLANG_SUPPORT
+#ifdef HAVE_SLANG_SUPPORT
#include "../ui/keysyms.h"
int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel,
struct hist_browser_timer *hbt);
@@ -224,20 +242,5 @@ static inline int script_browse(const char *script_opt __maybe_unused)
#define K_SWITCH_INPUT_DATA -3000
#endif
-#ifdef GTK2_SUPPORT
-int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, const char *help,
- struct hist_browser_timer *hbt __maybe_unused,
- float min_pcnt);
-#else
-static inline
-int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __maybe_unused,
- const char *help __maybe_unused,
- struct hist_browser_timer *hbt __maybe_unused,
- float min_pcnt __maybe_unused)
-{
- return 0;
-}
-#endif
-
unsigned int hists__sort_list_width(struct hists *self);
#endif /* __PERF_HIST_H */
diff --git a/tools/perf/util/include/dwarf-regs.h b/tools/perf/util/include/dwarf-regs.h
index cf6727e99c44..8f149655f497 100644
--- a/tools/perf/util/include/dwarf-regs.h
+++ b/tools/perf/util/include/dwarf-regs.h
@@ -1,7 +1,7 @@
#ifndef _PERF_DWARF_REGS_H_
#define _PERF_DWARF_REGS_H_
-#ifdef DWARF_SUPPORT
+#ifdef HAVE_DWARF_SUPPORT
const char *get_arch_regstr(unsigned int n);
#endif
diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h
index 96b919dae11c..b003ad7200b2 100644
--- a/tools/perf/util/include/linux/compiler.h
+++ b/tools/perf/util/include/linux/compiler.h
@@ -2,20 +2,29 @@
#define _PERF_LINUX_COMPILER_H_
#ifndef __always_inline
-#define __always_inline inline
+# define __always_inline inline __attribute__((always_inline))
#endif
+
#define __user
+
#ifndef __attribute_const__
-#define __attribute_const__
+# define __attribute_const__
#endif
#ifndef __maybe_unused
-#define __maybe_unused __attribute__((unused))
+# define __maybe_unused __attribute__((unused))
+#endif
+
+#ifndef __packed
+# define __packed __attribute__((__packed__))
#endif
-#define __packed __attribute__((__packed__))
#ifndef __force
-#define __force
+# define __force
+#endif
+
+#ifndef __weak
+# define __weak __attribute__((weak))
#endif
#endif
diff --git a/tools/perf/util/intlist.c b/tools/perf/util/intlist.c
index 11a8d86f7fea..89715b64a315 100644
--- a/tools/perf/util/intlist.c
+++ b/tools/perf/util/intlist.c
@@ -20,6 +20,7 @@ static struct rb_node *intlist__node_new(struct rblist *rblist __maybe_unused,
if (node != NULL) {
node->i = i;
+ node->priv = NULL;
rc = &node->rb_node;
}
@@ -57,22 +58,36 @@ void intlist__remove(struct intlist *ilist, struct int_node *node)
rblist__remove_node(&ilist->rblist, &node->rb_node);
}
-struct int_node *intlist__find(struct intlist *ilist, int i)
+static struct int_node *__intlist__findnew(struct intlist *ilist,
+ int i, bool create)
{
- struct int_node *node;
+ struct int_node *node = NULL;
struct rb_node *rb_node;
if (ilist == NULL)
return NULL;
- node = NULL;
- rb_node = rblist__find(&ilist->rblist, (void *)((long)i));
+ if (create)
+ rb_node = rblist__findnew(&ilist->rblist, (void *)((long)i));
+ else
+ rb_node = rblist__find(&ilist->rblist, (void *)((long)i));
+
if (rb_node)
node = container_of(rb_node, struct int_node, rb_node);
return node;
}
+struct int_node *intlist__find(struct intlist *ilist, int i)
+{
+ return __intlist__findnew(ilist, i, false);
+}
+
+struct int_node *intlist__findnew(struct intlist *ilist, int i)
+{
+ return __intlist__findnew(ilist, i, true);
+}
+
static int intlist__parse_list(struct intlist *ilist, const char *s)
{
char *sep;
diff --git a/tools/perf/util/intlist.h b/tools/perf/util/intlist.h
index 62351dad848f..aa6877d36858 100644
--- a/tools/perf/util/intlist.h
+++ b/tools/perf/util/intlist.h
@@ -9,6 +9,7 @@
struct int_node {
struct rb_node rb_node;
int i;
+ void *priv;
};
struct intlist {
@@ -23,6 +24,7 @@ int intlist__add(struct intlist *ilist, int i);
struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx);
struct int_node *intlist__find(struct intlist *ilist, int i);
+struct int_node *intlist__findnew(struct intlist *ilist, int i);
static inline bool intlist__has_entry(struct intlist *ilist, int i)
{
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 6188d2876a71..ea93425cce95 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -46,6 +46,23 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
return 0;
}
+struct machine *machine__new_host(void)
+{
+ struct machine *machine = malloc(sizeof(*machine));
+
+ if (machine != NULL) {
+ machine__init(machine, "", HOST_KERNEL_ID);
+
+ if (machine__create_kernel_maps(machine) < 0)
+ goto out_delete;
+ }
+
+ return machine;
+out_delete:
+ free(machine);
+ return NULL;
+}
+
static void dsos__delete(struct list_head *dsos)
{
struct dso *pos, *n;
@@ -776,75 +793,44 @@ static int machine__set_modules_path(struct machine *machine)
return map_groups__set_modules_path_dir(&machine->kmaps, modules_path);
}
-static int machine__create_modules(struct machine *machine)
+static int machine__create_module(void *arg, const char *name, u64 start)
{
- char *line = NULL;
- size_t n;
- FILE *file;
+ struct machine *machine = arg;
struct map *map;
+
+ map = machine__new_module(machine, start, name);
+ if (map == NULL)
+ return -1;
+
+ dso__kernel_module_get_build_id(map->dso, machine->root_dir);
+
+ return 0;
+}
+
+static int machine__create_modules(struct machine *machine)
+{
const char *modules;
char path[PATH_MAX];
- if (machine__is_default_guest(machine))
+ if (machine__is_default_guest(machine)) {
modules = symbol_conf.default_guest_modules;
- else {
- sprintf(path, "%s/proc/modules", machine->root_dir);
+ } else {
+ snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
modules = path;
}
if (symbol__restricted_filename(modules, "/proc/modules"))
return -1;
- file = fopen(modules, "r");
- if (file == NULL)
+ if (modules__parse(modules, machine, machine__create_module))
return -1;
- while (!feof(file)) {
- char name[PATH_MAX];
- u64 start;
- char *sep;
- int line_len;
-
- line_len = getline(&line, &n, file);
- if (line_len < 0)
- break;
-
- if (!line)
- goto out_failure;
-
- line[--line_len] = '\0'; /* \n */
-
- sep = strrchr(line, 'x');
- if (sep == NULL)
- continue;
-
- hex2u64(sep + 1, &start);
-
- sep = strchr(line, ' ');
- if (sep == NULL)
- continue;
-
- *sep = '\0';
-
- snprintf(name, sizeof(name), "[%s]", line);
- map = machine__new_module(machine, start, name);
- if (map == NULL)
- goto out_delete_line;
- dso__kernel_module_get_build_id(map->dso, machine->root_dir);
- }
+ if (!machine__set_modules_path(machine))
+ return 0;
- free(line);
- fclose(file);
+ pr_debug("Problems setting modules path maps, continuing anyway...\n");
- if (machine__set_modules_path(machine) < 0) {
- pr_debug("Problems setting modules path maps, continuing anyway...\n");
- }
return 0;
-
-out_delete_line:
- free(line);
-out_failure:
- return -1;
}
int machine__create_kernel_maps(struct machine *machine)
@@ -1267,10 +1253,12 @@ static int machine__resolve_callchain_sample(struct machine *machine,
struct thread *thread,
struct ip_callchain *chain,
struct symbol **parent,
- struct addr_location *root_al)
+ struct addr_location *root_al,
+ int max_stack)
{
u8 cpumode = PERF_RECORD_MISC_USER;
- unsigned int i;
+ int chain_nr = min(max_stack, (int)chain->nr);
+ int i;
int err;
callchain_cursor_reset(&callchain_cursor);
@@ -1280,7 +1268,7 @@ static int machine__resolve_callchain_sample(struct machine *machine,
return 0;
}
- for (i = 0; i < chain->nr; i++) {
+ for (i = 0; i < chain_nr; i++) {
u64 ip;
struct addr_location al;
@@ -1352,12 +1340,14 @@ int machine__resolve_callchain(struct machine *machine,
struct thread *thread,
struct perf_sample *sample,
struct symbol **parent,
- struct addr_location *root_al)
+ struct addr_location *root_al,
+ int max_stack)
{
int ret;
ret = machine__resolve_callchain_sample(machine, thread,
- sample->callchain, parent, root_al);
+ sample->callchain, parent,
+ root_al, max_stack);
if (ret)
return ret;
@@ -1376,3 +1366,26 @@ int machine__resolve_callchain(struct machine *machine,
sample);
}
+
+int machine__for_each_thread(struct machine *machine,
+ int (*fn)(struct thread *thread, void *p),
+ void *priv)
+{
+ struct rb_node *nd;
+ struct thread *thread;
+ int rc = 0;
+
+ for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
+ thread = rb_entry(nd, struct thread, rb_node);
+ rc = fn(thread, priv);
+ if (rc != 0)
+ return rc;
+ }
+
+ list_for_each_entry(thread, &machine->dead_threads, node) {
+ rc = fn(thread, priv);
+ if (rc != 0)
+ return rc;
+ }
+ return rc;
+}
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 58a6be1fc739..4c1f5d567f54 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -74,6 +74,7 @@ char *machine__mmap_name(struct machine *machine, char *bf, size_t size);
void machines__set_symbol_filter(struct machines *machines,
symbol_filter_t symbol_filter);
+struct machine *machine__new_host(void);
int machine__init(struct machine *machine, const char *root_dir, pid_t pid);
void machine__exit(struct machine *machine);
void machine__delete_dead_threads(struct machine *machine);
@@ -91,7 +92,8 @@ int machine__resolve_callchain(struct machine *machine,
struct thread *thread,
struct perf_sample *sample,
struct symbol **parent,
- struct addr_location *root_al);
+ struct addr_location *root_al,
+ int max_stack);
/*
* Default guest kernel is defined by parameter --guestkallsyms
@@ -165,4 +167,8 @@ void machines__destroy_kernel_maps(struct machines *machines);
size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp);
+int machine__for_each_thread(struct machine *machine,
+ int (*fn)(struct thread *thread, void *p),
+ void *priv);
+
#endif /* __PERF_MACHINE_H */
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 4f6680d2043b..ef5bc913ca7a 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -172,7 +172,7 @@ int map__load(struct map *map, symbol_filter_t filter)
pr_warning(", continuing without symbols\n");
return -1;
} else if (nr == 0) {
-#ifdef LIBELF_SUPPORT
+#ifdef HAVE_LIBELF_SUPPORT
const size_t len = strlen(name);
const size_t real_len = len - sizeof(DSO__DELETED);
@@ -252,10 +252,16 @@ size_t map__fprintf_dsoname(struct map *map, FILE *fp)
return fprintf(fp, "%s", dsoname);
}
-/*
+/**
+ * map__rip_2objdump - convert symbol start address to objdump address.
+ * @map: memory map
+ * @rip: symbol start address
+ *
* objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
* map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
* relative to section start.
+ *
+ * Return: Address suitable for passing to "objdump --start-address="
*/
u64 map__rip_2objdump(struct map *map, u64 rip)
{
@@ -268,6 +274,29 @@ u64 map__rip_2objdump(struct map *map, u64 rip)
return map->unmap_ip(map, rip);
}
+/**
+ * map__objdump_2mem - convert objdump address to a memory address.
+ * @map: memory map
+ * @ip: objdump address
+ *
+ * Closely related to map__rip_2objdump(), this function takes an address from
+ * objdump and converts it to a memory address. Note this assumes that @map
+ * contains the address. To be sure the result is valid, check it forwards
+ * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip
+ *
+ * Return: Memory address.
+ */
+u64 map__objdump_2mem(struct map *map, u64 ip)
+{
+ if (!map->dso->adjust_symbols)
+ return map->unmap_ip(map, ip);
+
+ if (map->dso->rel)
+ return map->unmap_ip(map, ip + map->pgoff);
+
+ return ip;
+}
+
void map_groups__init(struct map_groups *mg)
{
int i;
@@ -371,6 +400,23 @@ struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
return NULL;
}
+int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter)
+{
+ if (ams->addr < ams->map->start || ams->addr > ams->map->end) {
+ if (ams->map->groups == NULL)
+ return -1;
+ ams->map = map_groups__find(ams->map->groups, ams->map->type,
+ ams->addr);
+ if (ams->map == NULL)
+ return -1;
+ }
+
+ ams->al_addr = ams->map->map_ip(ams->map, ams->addr);
+ ams->sym = map__find_symbol(ams->map, ams->al_addr, filter);
+
+ return ams->sym ? 0 : -1;
+}
+
size_t __map_groups__fprintf_maps(struct map_groups *mg,
enum map_type type, int verbose, FILE *fp)
{
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 4886ca280536..e4e259c3ba16 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -84,6 +84,9 @@ static inline u64 identity__map_ip(struct map *map __maybe_unused, u64 ip)
/* rip/ip <-> addr suitable for passing to `objdump --start-address=` */
u64 map__rip_2objdump(struct map *map, u64 rip);
+/* objdump address -> memory address */
+u64 map__objdump_2mem(struct map *map, u64 ip);
+
struct symbol;
typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
@@ -167,6 +170,10 @@ struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
struct map **mapp,
symbol_filter_t filter);
+struct addr_map_symbol;
+
+int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter);
+
static inline
struct symbol *map_groups__find_function_by_name(struct map_groups *mg,
const char *name, struct map **mapp,
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 98125319b158..c90e55cf7e82 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -998,8 +998,10 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
char evt_path[MAXPATHLEN];
char dir_path[MAXPATHLEN];
- if (debugfs_valid_mountpoint(tracing_events_path))
+ if (debugfs_valid_mountpoint(tracing_events_path)) {
+ printf(" [ Tracepoints not available: %s ]\n", strerror(errno));
return;
+ }
sys_dir = opendir(tracing_events_path);
if (!sys_dir)
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 91346b753960..343299575b30 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -126,6 +126,37 @@ modifier_bp [rwx]{1,3}
}
+<config>{
+config { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); }
+config1 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG1); }
+config2 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG2); }
+name { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NAME); }
+period { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); }
+branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); }
+, { return ','; }
+"/" { BEGIN(INITIAL); return '/'; }
+{name_minus} { return str(yyscanner, PE_NAME); }
+}
+
+<mem>{
+{modifier_bp} { return str(yyscanner, PE_MODIFIER_BP); }
+: { return ':'; }
+{num_dec} { return value(yyscanner, 10); }
+{num_hex} { return value(yyscanner, 16); }
+ /*
+ * We need to separate 'mem:' scanner part, in order to get specific
+ * modifier bits parsed out. Otherwise we would need to handle PE_NAME
+ * and we'd need to parse it manually. During the escape from <mem>
+ * state we need to put the escaping char back, so we dont miss it.
+ */
+. { unput(*yytext); BEGIN(INITIAL); }
+ /*
+ * We destroy the scanner after reaching EOF,
+ * but anyway just to be sure get back to INIT state.
+ */
+<<EOF>> { BEGIN(INITIAL); }
+}
+
cpu-cycles|cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); }
stalled-cycles-frontend|idle-cycles-frontend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); }
stalled-cycles-backend|idle-cycles-backend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); }
@@ -162,18 +193,6 @@ speculative-read|speculative-load |
refs|Reference|ops|access |
misses|miss { return str(yyscanner, PE_NAME_CACHE_OP_RESULT); }
-<config>{
-config { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); }
-config1 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG1); }
-config2 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG2); }
-name { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NAME); }
-period { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); }
-branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); }
-, { return ','; }
-"/" { BEGIN(INITIAL); return '/'; }
-{name_minus} { return str(yyscanner, PE_NAME); }
-}
-
mem: { BEGIN(mem); return PE_PREFIX_MEM; }
r{num_raw_hex} { return raw(yyscanner); }
{num_dec} { return value(yyscanner, 10); }
@@ -189,25 +208,7 @@ r{num_raw_hex} { return raw(yyscanner); }
"}" { return '}'; }
= { return '='; }
\n { }
-
-<mem>{
-{modifier_bp} { return str(yyscanner, PE_MODIFIER_BP); }
-: { return ':'; }
-{num_dec} { return value(yyscanner, 10); }
-{num_hex} { return value(yyscanner, 16); }
- /*
- * We need to separate 'mem:' scanner part, in order to get specific
- * modifier bits parsed out. Otherwise we would need to handle PE_NAME
- * and we'd need to parse it manually. During the escape from <mem>
- * state we need to put the escaping char back, so we dont miss it.
- */
-. { unput(*yytext); BEGIN(INITIAL); }
- /*
- * We destroy the scanner after reaching EOF,
- * but anyway just to be sure get back to INIT state.
- */
-<<EOF>> { BEGIN(INITIAL); }
-}
+. { }
%%
diff --git a/tools/perf/util/path.c b/tools/perf/util/path.c
index a8c49548ca48..5d13cb45b317 100644
--- a/tools/perf/util/path.c
+++ b/tools/perf/util/path.c
@@ -22,19 +22,23 @@ static const char *get_perf_dir(void)
return ".";
}
-#ifndef HAVE_STRLCPY
-size_t strlcpy(char *dest, const char *src, size_t size)
+/*
+ * If libc has strlcpy() then that version will override this
+ * implementation:
+ */
+size_t __weak strlcpy(char *dest, const char *src, size_t size)
{
size_t ret = strlen(src);
if (size) {
size_t len = (ret >= size) ? size - 1 : ret;
+
memcpy(dest, src, len);
dest[len] = '\0';
}
+
return ret;
}
-#endif
static char *get_pathname(void)
{
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h
index 5a4f2b6f3738..a3d42cd74919 100644
--- a/tools/perf/util/perf_regs.h
+++ b/tools/perf/util/perf_regs.h
@@ -1,7 +1,7 @@
#ifndef __PERF_REGS_H
#define __PERF_REGS_H
-#ifdef HAVE_PERF_REGS
+#ifdef HAVE_PERF_REGS_SUPPORT
#include <perf_regs.h>
#else
#define PERF_REGS_MASK 0
@@ -10,5 +10,5 @@ static inline const char *perf_reg_name(int id __maybe_unused)
{
return NULL;
}
-#endif /* HAVE_PERF_REGS */
+#endif /* HAVE_PERF_REGS_SUPPORT */
#endif /* __PERF_REGS_H */
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index bc9d8069d376..64362fe45b71 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -637,3 +637,19 @@ void print_pmu_events(const char *event_glob, bool name_only)
printf("\n");
free(aliases);
}
+
+bool pmu_have_event(const char *pname, const char *name)
+{
+ struct perf_pmu *pmu;
+ struct perf_pmu_alias *alias;
+
+ pmu = NULL;
+ while ((pmu = perf_pmu__scan(pmu)) != NULL) {
+ if (strcmp(pname, pmu->name))
+ continue;
+ list_for_each_entry(alias, &pmu->aliases, list)
+ if (!strcmp(alias->name, name))
+ return true;
+ }
+ return false;
+}
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 6b2cbe2d4cc3..1179b26f244a 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -42,6 +42,7 @@ int perf_pmu__format_parse(char *dir, struct list_head *head);
struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu);
void print_pmu_events(const char *event_glob, bool name_only);
+bool pmu_have_event(const char *pname, const char *name);
int perf_pmu__test(void);
#endif /* __PMU_H */
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index aa04bf9c9ad7..9c6989ca2bea 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -47,7 +47,6 @@
#include "session.h"
#define MAX_CMDLEN 256
-#define MAX_PROBE_ARGS 128
#define PERFPROBE_GROUP "probe"
bool probe_event_dry_run; /* Dry run flag */
@@ -201,7 +200,7 @@ static int convert_to_perf_probe_point(struct probe_trace_point *tp,
return 0;
}
-#ifdef DWARF_SUPPORT
+#ifdef HAVE_DWARF_SUPPORT
/* Open new debuginfo of given module */
static struct debuginfo *open_debuginfo(const char *module)
{
@@ -630,7 +629,7 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs,
return ret;
}
-#else /* !DWARF_SUPPORT */
+#else /* !HAVE_DWARF_SUPPORT */
static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
struct perf_probe_point *pp)
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index c09e0a9fdf4c..e41b0941e18f 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -273,12 +273,15 @@ static struct probe_trace_arg_ref *alloc_trace_arg_ref(long offs)
/*
* Convert a location into trace_arg.
* If tvar == NULL, this just checks variable can be converted.
+ * If fentry == true and vr_die is a parameter, do huristic search
+ * for the location fuzzed by function entry mcount.
*/
static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
- Dwarf_Op *fb_ops,
+ Dwarf_Op *fb_ops, Dwarf_Die *sp_die,
struct probe_trace_arg *tvar)
{
Dwarf_Attribute attr;
+ Dwarf_Addr tmp = 0;
Dwarf_Op *op;
size_t nops;
unsigned int regn;
@@ -291,12 +294,29 @@ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
goto static_var;
/* TODO: handle more than 1 exprs */
- if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL ||
- dwarf_getlocation_addr(&attr, addr, &op, &nops, 1) <= 0 ||
- nops == 0) {
- /* TODO: Support const_value */
+ if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL)
+ return -EINVAL; /* Broken DIE ? */
+ if (dwarf_getlocation_addr(&attr, addr, &op, &nops, 1) <= 0) {
+ ret = dwarf_entrypc(sp_die, &tmp);
+ if (ret || addr != tmp ||
+ dwarf_tag(vr_die) != DW_TAG_formal_parameter ||
+ dwarf_highpc(sp_die, &tmp))
+ return -ENOENT;
+ /*
+ * This is fuzzed by fentry mcount. We try to find the
+ * parameter location at the earliest address.
+ */
+ for (addr += 1; addr <= tmp; addr++) {
+ if (dwarf_getlocation_addr(&attr, addr, &op,
+ &nops, 1) > 0)
+ goto found;
+ }
return -ENOENT;
}
+found:
+ if (nops == 0)
+ /* TODO: Support const_value */
+ return -ENOENT;
if (op->atom == DW_OP_addr) {
static_var:
@@ -600,7 +620,7 @@ static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
dwarf_diename(vr_die));
ret = convert_variable_location(vr_die, pf->addr, pf->fb_ops,
- pf->tvar);
+ &pf->sp_die, pf->tvar);
if (ret == -ENOENT)
pr_err("Failed to find the location of %s at this address.\n"
" Perhaps, it has been optimized out.\n", pf->pvar->var);
@@ -1136,12 +1156,80 @@ found:
return ret;
}
+struct local_vars_finder {
+ struct probe_finder *pf;
+ struct perf_probe_arg *args;
+ int max_args;
+ int nargs;
+ int ret;
+};
+
+/* Collect available variables in this scope */
+static int copy_variables_cb(Dwarf_Die *die_mem, void *data)
+{
+ struct local_vars_finder *vf = data;
+ struct probe_finder *pf = vf->pf;
+ int tag;
+
+ tag = dwarf_tag(die_mem);
+ if (tag == DW_TAG_formal_parameter ||
+ tag == DW_TAG_variable) {
+ if (convert_variable_location(die_mem, vf->pf->addr,
+ vf->pf->fb_ops, &pf->sp_die,
+ NULL) == 0) {
+ vf->args[vf->nargs].var = (char *)dwarf_diename(die_mem);
+ if (vf->args[vf->nargs].var == NULL) {
+ vf->ret = -ENOMEM;
+ return DIE_FIND_CB_END;
+ }
+ pr_debug(" %s", vf->args[vf->nargs].var);
+ vf->nargs++;
+ }
+ }
+
+ if (dwarf_haspc(die_mem, vf->pf->addr))
+ return DIE_FIND_CB_CONTINUE;
+ else
+ return DIE_FIND_CB_SIBLING;
+}
+
+static int expand_probe_args(Dwarf_Die *sc_die, struct probe_finder *pf,
+ struct perf_probe_arg *args)
+{
+ Dwarf_Die die_mem;
+ int i;
+ int n = 0;
+ struct local_vars_finder vf = {.pf = pf, .args = args,
+ .max_args = MAX_PROBE_ARGS, .ret = 0};
+
+ for (i = 0; i < pf->pev->nargs; i++) {
+ /* var never be NULL */
+ if (strcmp(pf->pev->args[i].var, "$vars") == 0) {
+ pr_debug("Expanding $vars into:");
+ vf.nargs = n;
+ /* Special local variables */
+ die_find_child(sc_die, copy_variables_cb, (void *)&vf,
+ &die_mem);
+ pr_debug(" (%d)\n", vf.nargs - n);
+ if (vf.ret < 0)
+ return vf.ret;
+ n = vf.nargs;
+ } else {
+ /* Copy normal argument */
+ args[n] = pf->pev->args[i];
+ n++;
+ }
+ }
+ return n;
+}
+
/* Add a found probe point into trace event list */
static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
{
struct trace_event_finder *tf =
container_of(pf, struct trace_event_finder, pf);
struct probe_trace_event *tev;
+ struct perf_probe_arg *args;
int ret, i;
/* Check number of tevs */
@@ -1161,21 +1249,35 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
tev->point.offset);
- /* Find each argument */
- tev->nargs = pf->pev->nargs;
- tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
- if (tev->args == NULL)
+ /* Expand special probe argument if exist */
+ args = zalloc(sizeof(struct perf_probe_arg) * MAX_PROBE_ARGS);
+ if (args == NULL)
return -ENOMEM;
- for (i = 0; i < pf->pev->nargs; i++) {
- pf->pvar = &pf->pev->args[i];
+
+ ret = expand_probe_args(sc_die, pf, args);
+ if (ret < 0)
+ goto end;
+
+ tev->nargs = ret;
+ tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
+ if (tev->args == NULL) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ /* Find each argument */
+ for (i = 0; i < tev->nargs; i++) {
+ pf->pvar = &args[i];
pf->tvar = &tev->args[i];
/* Variable should be found from scope DIE */
ret = find_variable(sc_die, pf);
if (ret != 0)
- return ret;
+ break;
}
- return 0;
+end:
+ free(args);
+ return ret;
}
/* Find probe_trace_events specified by perf_probe_event from debuginfo */
@@ -1222,7 +1324,8 @@ static int collect_variables_cb(Dwarf_Die *die_mem, void *data)
if (tag == DW_TAG_formal_parameter ||
tag == DW_TAG_variable) {
ret = convert_variable_location(die_mem, af->pf.addr,
- af->pf.fb_ops, NULL);
+ af->pf.fb_ops, &af->pf.sp_die,
+ NULL);
if (ret == 0) {
ret = die_get_varname(die_mem, buf, MAX_VAR_LEN);
pr_debug2("Add new var: %s\n", buf);
@@ -1357,10 +1460,10 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr,
goto post;
}
+ fname = dwarf_decl_file(&spdie);
if (addr == (unsigned long)baseaddr) {
/* Function entry - Relative line number is 0 */
lineno = baseline;
- fname = dwarf_decl_file(&spdie);
goto post;
}
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index 3b7d63018960..d6dab0e0a937 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -7,6 +7,7 @@
#define MAX_PROBE_BUFFER 1024
#define MAX_PROBES 128
+#define MAX_PROBE_ARGS 128
static inline int is_c_varname(const char *name)
{
@@ -14,7 +15,7 @@ static inline int is_c_varname(const char *name)
return isalpha(name[0]) || name[0] == '_';
}
-#ifdef DWARF_SUPPORT
+#ifdef HAVE_DWARF_SUPPORT
#include "dwarf-aux.h"
@@ -105,6 +106,6 @@ struct line_finder {
int found;
};
-#endif /* DWARF_SUPPORT */
+#endif /* HAVE_DWARF_SUPPORT */
#endif /*_PROBE_FINDER_H */
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 71b5412bbbb9..4bf8ace7f511 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -33,13 +33,6 @@ int eprintf(int level, const char *fmt, ...)
# define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
#endif
-struct throttle_event {
- struct perf_event_header header;
- u64 time;
- u64 id;
- u64 stream_id;
-};
-
PyMODINIT_FUNC initperf(void);
#define member_def(type, member, ptype, help) \
@@ -822,6 +815,8 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
PyObject *pyevent = pyrf_event__new(event);
struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
+ perf_evlist__mmap_consume(evlist, cpu);
+
if (pyevent == NULL)
return PyErr_NoMemory();
@@ -1036,6 +1031,7 @@ PyMODINIT_FUNC initperf(void)
pyrf_cpu_map__setup_types() < 0)
return;
+ /* The page_size is placed in util object. */
page_size = sysconf(_SC_PAGE_SIZE);
Py_INCREF(&pyrf_evlist__type);
diff --git a/tools/perf/util/rblist.c b/tools/perf/util/rblist.c
index a16cdd2625ad..0dfe27d99458 100644
--- a/tools/perf/util/rblist.c
+++ b/tools/perf/util/rblist.c
@@ -48,10 +48,12 @@ void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node)
rblist->node_delete(rblist, rb_node);
}
-struct rb_node *rblist__find(struct rblist *rblist, const void *entry)
+static struct rb_node *__rblist__findnew(struct rblist *rblist,
+ const void *entry,
+ bool create)
{
struct rb_node **p = &rblist->entries.rb_node;
- struct rb_node *parent = NULL;
+ struct rb_node *parent = NULL, *new_node = NULL;
while (*p != NULL) {
int rc;
@@ -67,7 +69,26 @@ struct rb_node *rblist__find(struct rblist *rblist, const void *entry)
return parent;
}
- return NULL;
+ if (create) {
+ new_node = rblist->node_new(rblist, entry);
+ if (new_node) {
+ rb_link_node(new_node, parent, p);
+ rb_insert_color(new_node, &rblist->entries);
+ ++rblist->nr_entries;
+ }
+ }
+
+ return new_node;
+}
+
+struct rb_node *rblist__find(struct rblist *rblist, const void *entry)
+{
+ return __rblist__findnew(rblist, entry, false);
+}
+
+struct rb_node *rblist__findnew(struct rblist *rblist, const void *entry)
+{
+ return __rblist__findnew(rblist, entry, true);
}
void rblist__init(struct rblist *rblist)
diff --git a/tools/perf/util/rblist.h b/tools/perf/util/rblist.h
index 6d0cae5ae83d..ff9913b994c2 100644
--- a/tools/perf/util/rblist.h
+++ b/tools/perf/util/rblist.h
@@ -32,6 +32,7 @@ void rblist__delete(struct rblist *rblist);
int rblist__add_node(struct rblist *rblist, const void *new_entry);
void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node);
struct rb_node *rblist__find(struct rblist *rblist, const void *entry);
+struct rb_node *rblist__findnew(struct rblist *rblist, const void *entry);
struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx);
static inline bool rblist__empty(const struct rblist *rblist)
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index a85e4ae5f3ac..c0c9795c4f02 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -282,7 +282,7 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused,
event = find_cache_event(evsel);
if (!event)
- die("ug! no event found for type %" PRIu64, evsel->attr.config);
+ die("ug! no event found for type %" PRIu64, (u64)evsel->attr.config);
pid = raw_field_value(event, "common_pid", data);
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index cc75a3cef388..95d91a0b23af 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -56,6 +56,17 @@ static void handler_call_die(const char *handler_name)
Py_FatalError("problem in Python trace event handler");
}
+/*
+ * Insert val into into the dictionary and decrement the reference counter.
+ * This is necessary for dictionaries since PyDict_SetItemString() does not
+ * steal a reference, as opposed to PyTuple_SetItem().
+ */
+static void pydict_set_item_string_decref(PyObject *dict, const char *key, PyObject *val)
+{
+ PyDict_SetItemString(dict, key, val);
+ Py_DECREF(val);
+}
+
static void define_value(enum print_arg_type field_type,
const char *ev_name,
const char *field_name,
@@ -279,11 +290,11 @@ static void python_process_tracepoint(union perf_event *perf_event
PyTuple_SetItem(t, n++, PyInt_FromLong(pid));
PyTuple_SetItem(t, n++, PyString_FromString(comm));
} else {
- PyDict_SetItemString(dict, "common_cpu", PyInt_FromLong(cpu));
- PyDict_SetItemString(dict, "common_s", PyInt_FromLong(s));
- PyDict_SetItemString(dict, "common_ns", PyInt_FromLong(ns));
- PyDict_SetItemString(dict, "common_pid", PyInt_FromLong(pid));
- PyDict_SetItemString(dict, "common_comm", PyString_FromString(comm));
+ pydict_set_item_string_decref(dict, "common_cpu", PyInt_FromLong(cpu));
+ pydict_set_item_string_decref(dict, "common_s", PyInt_FromLong(s));
+ pydict_set_item_string_decref(dict, "common_ns", PyInt_FromLong(ns));
+ pydict_set_item_string_decref(dict, "common_pid", PyInt_FromLong(pid));
+ pydict_set_item_string_decref(dict, "common_comm", PyString_FromString(comm));
}
for (field = event->format.fields; field; field = field->next) {
if (field->flags & FIELD_IS_STRING) {
@@ -313,7 +324,7 @@ static void python_process_tracepoint(union perf_event *perf_event
if (handler)
PyTuple_SetItem(t, n++, obj);
else
- PyDict_SetItemString(dict, field->name, obj);
+ pydict_set_item_string_decref(dict, field->name, obj);
}
if (!handler)
@@ -370,21 +381,21 @@ static void python_process_general_event(union perf_event *perf_event
if (!handler || !PyCallable_Check(handler))
goto exit;
- PyDict_SetItemString(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel)));
- PyDict_SetItemString(dict, "attr", PyString_FromStringAndSize(
+ pydict_set_item_string_decref(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel)));
+ pydict_set_item_string_decref(dict, "attr", PyString_FromStringAndSize(
(const char *)&evsel->attr, sizeof(evsel->attr)));
- PyDict_SetItemString(dict, "sample", PyString_FromStringAndSize(
+ pydict_set_item_string_decref(dict, "sample", PyString_FromStringAndSize(
(const char *)sample, sizeof(*sample)));
- PyDict_SetItemString(dict, "raw_buf", PyString_FromStringAndSize(
+ pydict_set_item_string_decref(dict, "raw_buf", PyString_FromStringAndSize(
(const char *)sample->raw_data, sample->raw_size));
- PyDict_SetItemString(dict, "comm",
+ pydict_set_item_string_decref(dict, "comm",
PyString_FromString(thread->comm));
if (al->map) {
- PyDict_SetItemString(dict, "dso",
+ pydict_set_item_string_decref(dict, "dso",
PyString_FromString(al->map->dso->name));
}
if (al->sym) {
- PyDict_SetItemString(dict, "symbol",
+ pydict_set_item_string_decref(dict, "symbol",
PyString_FromString(al->sym->name));
}
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 568b750c01f6..4ba7b548e055 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -16,73 +16,34 @@
#include "perf_regs.h"
#include "vdso.h"
-static int perf_session__open(struct perf_session *self, bool force)
+static int perf_session__open(struct perf_session *self)
{
- struct stat input_stat;
-
- if (!strcmp(self->filename, "-")) {
- self->fd_pipe = true;
- self->fd = STDIN_FILENO;
-
- if (perf_session__read_header(self) < 0)
- pr_err("incompatible file format (rerun with -v to learn more)");
-
- return 0;
- }
-
- self->fd = open(self->filename, O_RDONLY);
- if (self->fd < 0) {
- int err = errno;
-
- pr_err("failed to open %s: %s", self->filename, strerror(err));
- if (err == ENOENT && !strcmp(self->filename, "perf.data"))
- pr_err(" (try 'perf record' first)");
- pr_err("\n");
- return -errno;
- }
-
- if (fstat(self->fd, &input_stat) < 0)
- goto out_close;
-
- if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
- pr_err("file %s not owned by current user or root\n",
- self->filename);
- goto out_close;
- }
-
- if (!input_stat.st_size) {
- pr_info("zero-sized file (%s), nothing to do!\n",
- self->filename);
- goto out_close;
- }
+ struct perf_data_file *file = self->file;
if (perf_session__read_header(self) < 0) {
pr_err("incompatible file format (rerun with -v to learn more)");
- goto out_close;
+ return -1;
}
+ if (perf_data_file__is_pipe(file))
+ return 0;
+
if (!perf_evlist__valid_sample_type(self->evlist)) {
pr_err("non matching sample_type");
- goto out_close;
+ return -1;
}
if (!perf_evlist__valid_sample_id_all(self->evlist)) {
pr_err("non matching sample_id_all");
- goto out_close;
+ return -1;
}
if (!perf_evlist__valid_read_format(self->evlist)) {
pr_err("non matching read_format");
- goto out_close;
+ return -1;
}
- self->size = input_stat.st_size;
return 0;
-
-out_close:
- close(self->fd);
- self->fd = -1;
- return -1;
}
void perf_session__set_id_hdr_size(struct perf_session *session)
@@ -106,39 +67,36 @@ static void perf_session__destroy_kernel_maps(struct perf_session *self)
machines__destroy_kernel_maps(&self->machines);
}
-struct perf_session *perf_session__new(const char *filename, int mode,
- bool force, bool repipe,
- struct perf_tool *tool)
+struct perf_session *perf_session__new(struct perf_data_file *file,
+ bool repipe, struct perf_tool *tool)
{
struct perf_session *self;
- struct stat st;
- size_t len;
-
- if (!filename || !strlen(filename)) {
- if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
- filename = "-";
- else
- filename = "perf.data";
- }
- len = strlen(filename);
- self = zalloc(sizeof(*self) + len);
-
- if (self == NULL)
+ self = zalloc(sizeof(*self));
+ if (!self)
goto out;
- memcpy(self->filename, filename, len);
self->repipe = repipe;
INIT_LIST_HEAD(&self->ordered_samples.samples);
INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
INIT_LIST_HEAD(&self->ordered_samples.to_free);
machines__init(&self->machines);
- if (mode == O_RDONLY) {
- if (perf_session__open(self, force) < 0)
+ if (file) {
+ if (perf_data_file__open(file))
goto out_delete;
- perf_session__set_id_hdr_size(self);
- } else if (mode == O_WRONLY) {
+
+ self->file = file;
+
+ if (perf_data_file__is_read(file)) {
+ if (perf_session__open(self) < 0)
+ goto out_close;
+
+ perf_session__set_id_hdr_size(self);
+ }
+ }
+
+ if (!file || perf_data_file__is_write(file)) {
/*
* In O_RDONLY mode this will be performed when reading the
* kernel MMAP event, in perf_event__process_mmap().
@@ -153,10 +111,13 @@ struct perf_session *perf_session__new(const char *filename, int mode,
tool->ordered_samples = false;
}
-out:
return self;
-out_delete:
+
+ out_close:
+ perf_data_file__close(file);
+ out_delete:
perf_session__delete(self);
+ out:
return NULL;
}
@@ -193,7 +154,8 @@ void perf_session__delete(struct perf_session *self)
perf_session__delete_threads(self);
perf_session_env__delete(&self->header.env);
machines__exit(&self->machines);
- close(self->fd);
+ if (self->file)
+ perf_data_file__close(self->file);
free(self);
vdso__exit();
}
@@ -397,6 +359,17 @@ static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
swap_sample_id_all(event, &event->read + 1);
}
+static void perf_event__throttle_swap(union perf_event *event,
+ bool sample_id_all)
+{
+ event->throttle.time = bswap_64(event->throttle.time);
+ event->throttle.id = bswap_64(event->throttle.id);
+ event->throttle.stream_id = bswap_64(event->throttle.stream_id);
+
+ if (sample_id_all)
+ swap_sample_id_all(event, &event->throttle + 1);
+}
+
static u8 revbyte(u8 b)
{
int rev = (b >> 4) | ((b & 0xf) << 4);
@@ -442,6 +415,9 @@ void perf_event__attr_swap(struct perf_event_attr *attr)
attr->bp_type = bswap_32(attr->bp_type);
attr->bp_addr = bswap_64(attr->bp_addr);
attr->bp_len = bswap_64(attr->bp_len);
+ attr->branch_sample_type = bswap_64(attr->branch_sample_type);
+ attr->sample_regs_user = bswap_64(attr->sample_regs_user);
+ attr->sample_stack_user = bswap_32(attr->sample_stack_user);
swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
}
@@ -482,6 +458,8 @@ static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_EXIT] = perf_event__task_swap,
[PERF_RECORD_LOST] = perf_event__all64_swap,
[PERF_RECORD_READ] = perf_event__read_swap,
+ [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
+ [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
[PERF_RECORD_SAMPLE] = perf_event__all64_swap,
[PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
[PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
@@ -525,13 +503,16 @@ static int flush_sample_queue(struct perf_session *s,
struct perf_sample sample;
u64 limit = os->next_flush;
u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
- unsigned idx = 0, progress_next = os->nr_samples / 16;
bool show_progress = limit == ULLONG_MAX;
+ struct ui_progress prog;
int ret;
if (!tool->ordered_samples || !limit)
return 0;
+ if (show_progress)
+ ui_progress__init(&prog, os->nr_samples, "Processing time ordered events...");
+
list_for_each_entry_safe(iter, tmp, head, list) {
if (session_done())
return 0;
@@ -552,11 +533,9 @@ static int flush_sample_queue(struct perf_session *s,
os->last_flush = iter->timestamp;
list_del(&iter->list);
list_add(&iter->list, &os->sample_cache);
- if (show_progress && (++idx >= progress_next)) {
- progress_next += os->nr_samples / 16;
- ui_progress__update(idx, os->nr_samples,
- "Processing time ordered events...");
- }
+
+ if (show_progress)
+ ui_progress__update(&prog, 1);
}
if (list_empty(head)) {
@@ -860,6 +839,9 @@ static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
if (sample_type & PERF_SAMPLE_DATA_SRC)
printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
+ if (sample_type & PERF_SAMPLE_TRANSACTION)
+ printf("... transaction: %" PRIx64 "\n", sample->transaction);
+
if (sample_type & PERF_SAMPLE_READ)
sample_read__printf(sample, evsel->attr.read_format);
}
@@ -1031,6 +1013,7 @@ static int perf_session_deliver_event(struct perf_session *session,
static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
struct perf_tool *tool, u64 file_offset)
{
+ int fd = perf_data_file__fd(session->file);
int err;
dump_event(session, event, file_offset, NULL);
@@ -1044,7 +1027,7 @@ static int perf_session__process_user_event(struct perf_session *session, union
return err;
case PERF_RECORD_HEADER_TRACING_DATA:
/* setup for reading amidst mmap */
- lseek(session->fd, file_offset, SEEK_SET);
+ lseek(fd, file_offset, SEEK_SET);
return tool->tracing_data(tool, event, session);
case PERF_RECORD_HEADER_BUILD_ID:
return tool->build_id(tool, event, session);
@@ -1170,6 +1153,7 @@ volatile int session_done;
static int __perf_session__process_pipe_events(struct perf_session *self,
struct perf_tool *tool)
{
+ int fd = perf_data_file__fd(self->file);
union perf_event *event;
uint32_t size, cur_size = 0;
void *buf = NULL;
@@ -1188,7 +1172,7 @@ static int __perf_session__process_pipe_events(struct perf_session *self,
return -errno;
more:
event = buf;
- err = readn(self->fd, event, sizeof(struct perf_event_header));
+ err = readn(fd, event, sizeof(struct perf_event_header));
if (err <= 0) {
if (err == 0)
goto done;
@@ -1220,7 +1204,7 @@ more:
p += sizeof(struct perf_event_header);
if (size - sizeof(struct perf_event_header)) {
- err = readn(self->fd, p, size - sizeof(struct perf_event_header));
+ err = readn(fd, p, size - sizeof(struct perf_event_header));
if (err <= 0) {
if (err == 0) {
pr_err("unexpected end of event stream\n");
@@ -1247,7 +1231,9 @@ more:
if (!session_done())
goto more;
done:
- err = 0;
+ /* do the final flush for ordered samples */
+ self->ordered_samples.next_flush = ULLONG_MAX;
+ err = flush_sample_queue(self, tool);
out_err:
free(buf);
perf_session__warn_about_errors(self, tool);
@@ -1299,12 +1285,14 @@ int __perf_session__process_events(struct perf_session *session,
u64 data_offset, u64 data_size,
u64 file_size, struct perf_tool *tool)
{
- u64 head, page_offset, file_offset, file_pos, progress_next;
+ int fd = perf_data_file__fd(session->file);
+ u64 head, page_offset, file_offset, file_pos;
int err, mmap_prot, mmap_flags, map_idx = 0;
size_t mmap_size;
char *buf, *mmaps[NUM_MMAPS];
union perf_event *event;
uint32_t size;
+ struct ui_progress prog;
perf_tool__fill_defaults(tool);
@@ -1315,7 +1303,7 @@ int __perf_session__process_events(struct perf_session *session,
if (data_size && (data_offset + data_size < file_size))
file_size = data_offset + data_size;
- progress_next = file_size / 16;
+ ui_progress__init(&prog, file_size, "Processing events...");
mmap_size = MMAP_SIZE;
if (mmap_size > file_size)
@@ -1331,7 +1319,7 @@ int __perf_session__process_events(struct perf_session *session,
mmap_flags = MAP_PRIVATE;
}
remap:
- buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
+ buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
file_offset);
if (buf == MAP_FAILED) {
pr_err("failed to mmap file\n");
@@ -1370,19 +1358,15 @@ more:
head += size;
file_pos += size;
- if (file_pos >= progress_next) {
- progress_next += file_size / 16;
- ui_progress__update(file_pos, file_size,
- "Processing events...");
- }
+ ui_progress__update(&prog, size);
- err = 0;
if (session_done())
- goto out_err;
+ goto out;
if (file_pos < file_size)
goto more;
+out:
/* do the final flush for ordered samples */
session->ordered_samples.next_flush = ULLONG_MAX;
err = flush_sample_queue(session, tool);
@@ -1396,16 +1380,17 @@ out_err:
int perf_session__process_events(struct perf_session *self,
struct perf_tool *tool)
{
+ u64 size = perf_data_file__size(self->file);
int err;
if (perf_session__register_idle_thread(self) == NULL)
return -ENOMEM;
- if (!self->fd_pipe)
+ if (!perf_data_file__is_pipe(self->file))
err = __perf_session__process_events(self,
self->header.data_offset,
self->header.data_size,
- self->size, tool);
+ size, tool);
else
err = __perf_session__process_pipe_events(self, tool);
@@ -1525,7 +1510,8 @@ void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
if (symbol_conf.use_callchain && sample->callchain) {
if (machine__resolve_callchain(machine, evsel, al.thread,
- sample, NULL, NULL) != 0) {
+ sample, NULL, NULL,
+ PERF_MAX_STACK_DEPTH) != 0) {
if (verbose)
error("Failed to resolve callchain. Skipping\n");
return;
@@ -1629,13 +1615,14 @@ int perf_session__cpu_bitmap(struct perf_session *session,
void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
bool full)
{
+ int fd = perf_data_file__fd(session->file);
struct stat st;
int ret;
if (session == NULL || fp == NULL)
return;
- ret = fstat(session->fd, &st);
+ ret = fstat(fd, &st);
if (ret == -1)
return;
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 04bf7373a7e5..27c74d38b868 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -7,6 +7,7 @@
#include "machine.h"
#include "symbol.h"
#include "thread.h"
+#include "data.h"
#include <linux/rbtree.h>
#include <linux/perf_event.h>
@@ -29,16 +30,13 @@ struct ordered_samples {
struct perf_session {
struct perf_header header;
- unsigned long size;
struct machines machines;
struct perf_evlist *evlist;
struct pevent *pevent;
struct events_stats stats;
- int fd;
- bool fd_pipe;
bool repipe;
struct ordered_samples ordered_samples;
- char filename[1];
+ struct perf_data_file *file;
};
#define PRINT_IP_OPT_IP (1<<0)
@@ -49,9 +47,8 @@ struct perf_session {
struct perf_tool;
-struct perf_session *perf_session__new(const char *filename, int mode,
- bool force, bool repipe,
- struct perf_tool *tool);
+struct perf_session *perf_session__new(struct perf_data_file *file,
+ bool repipe, struct perf_tool *tool);
void perf_session__delete(struct perf_session *session);
void perf_event_header__bswap(struct perf_event_header *self);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 5f118a089519..19b4aa279d1e 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -60,11 +60,11 @@ sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
return right->thread->tid - left->thread->tid;
}
-static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf,
+static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%*s:%5d", width - 6,
- self->thread->comm ?: "", self->thread->tid);
+ he->thread->comm ?: "", he->thread->tid);
}
struct sort_entry sort_thread = {
@@ -94,10 +94,10 @@ sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
return strcmp(comm_l, comm_r);
}
-static int hist_entry__comm_snprintf(struct hist_entry *self, char *bf,
+static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return repsep_snprintf(bf, size, "%*s", width, self->thread->comm);
+ return repsep_snprintf(bf, size, "%*s", width, he->thread->comm);
}
struct sort_entry sort_comm = {
@@ -148,10 +148,10 @@ static int _hist_entry__dso_snprintf(struct map *map, char *bf,
return repsep_snprintf(bf, size, "%-*s", width, "[unknown]");
}
-static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf,
+static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return _hist_entry__dso_snprintf(self->ms.map, bf, size, width);
+ return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
}
struct sort_entry sort_dso = {
@@ -182,9 +182,19 @@ static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
static int64_t
sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
{
+ int64_t ret;
+
if (!left->ms.sym && !right->ms.sym)
return right->level - left->level;
+ /*
+ * comparing symbol address alone is not enough since it's a
+ * relative address within a dso.
+ */
+ ret = sort__dso_cmp(left, right);
+ if (ret != 0)
+ return ret;
+
return _sort__sym_cmp(left->ms.sym, right->ms.sym);
}
@@ -224,11 +234,11 @@ static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
return ret;
}
-static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf,
+static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return _hist_entry__sym_snprintf(self->ms.map, self->ms.sym, self->ip,
- self->level, bf, size, width);
+ return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
+ he->level, bf, size, width);
}
struct sort_entry sort_sym = {
@@ -243,50 +253,32 @@ struct sort_entry sort_sym = {
static int64_t
sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
{
- return (int64_t)(right->ip - left->ip);
+ if (!left->srcline) {
+ if (!left->ms.map)
+ left->srcline = SRCLINE_UNKNOWN;
+ else {
+ struct map *map = left->ms.map;
+ left->srcline = get_srcline(map->dso,
+ map__rip_2objdump(map, left->ip));
+ }
+ }
+ if (!right->srcline) {
+ if (!right->ms.map)
+ right->srcline = SRCLINE_UNKNOWN;
+ else {
+ struct map *map = right->ms.map;
+ right->srcline = get_srcline(map->dso,
+ map__rip_2objdump(map, right->ip));
+ }
+ }
+ return strcmp(left->srcline, right->srcline);
}
-static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf,
+static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
size_t size,
unsigned int width __maybe_unused)
{
- FILE *fp = NULL;
- char cmd[PATH_MAX + 2], *path = self->srcline, *nl;
- size_t line_len;
-
- if (path != NULL)
- goto out_path;
-
- if (!self->ms.map)
- goto out_ip;
-
- if (!strncmp(self->ms.map->dso->long_name, "/tmp/perf-", 10))
- goto out_ip;
-
- snprintf(cmd, sizeof(cmd), "addr2line -e %s %016" PRIx64,
- self->ms.map->dso->long_name, self->ip);
- fp = popen(cmd, "r");
- if (!fp)
- goto out_ip;
-
- if (getline(&path, &line_len, fp) < 0 || !line_len)
- goto out_ip;
- self->srcline = strdup(path);
- if (self->srcline == NULL)
- goto out_ip;
-
- nl = strchr(self->srcline, '\n');
- if (nl != NULL)
- *nl = '\0';
- path = self->srcline;
-out_path:
- if (fp)
- pclose(fp);
- return repsep_snprintf(bf, size, "%s", path);
-out_ip:
- if (fp)
- pclose(fp);
- return repsep_snprintf(bf, size, "%-#*llx", BITS_PER_LONG / 4, self->ip);
+ return repsep_snprintf(bf, size, "%s", he->srcline);
}
struct sort_entry sort_srcline = {
@@ -310,11 +302,11 @@ sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
return strcmp(sym_l->name, sym_r->name);
}
-static int hist_entry__parent_snprintf(struct hist_entry *self, char *bf,
+static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%-*s", width,
- self->parent ? self->parent->name : "[other]");
+ he->parent ? he->parent->name : "[other]");
}
struct sort_entry sort_parent = {
@@ -332,10 +324,10 @@ sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
return right->cpu - left->cpu;
}
-static int hist_entry__cpu_snprintf(struct hist_entry *self, char *bf,
- size_t size, unsigned int width)
+static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
{
- return repsep_snprintf(bf, size, "%*d", width, self->cpu);
+ return repsep_snprintf(bf, size, "%*d", width, he->cpu);
}
struct sort_entry sort_cpu = {
@@ -354,10 +346,10 @@ sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
right->branch_info->from.map);
}
-static int hist_entry__dso_from_snprintf(struct hist_entry *self, char *bf,
+static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return _hist_entry__dso_snprintf(self->branch_info->from.map,
+ return _hist_entry__dso_snprintf(he->branch_info->from.map,
bf, size, width);
}
@@ -368,10 +360,10 @@ sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
right->branch_info->to.map);
}
-static int hist_entry__dso_to_snprintf(struct hist_entry *self, char *bf,
+static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return _hist_entry__dso_snprintf(self->branch_info->to.map,
+ return _hist_entry__dso_snprintf(he->branch_info->to.map,
bf, size, width);
}
@@ -399,21 +391,21 @@ sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
return _sort__sym_cmp(to_l->sym, to_r->sym);
}
-static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf,
+static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- struct addr_map_symbol *from = &self->branch_info->from;
+ struct addr_map_symbol *from = &he->branch_info->from;
return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
- self->level, bf, size, width);
+ he->level, bf, size, width);
}
-static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf,
+static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- struct addr_map_symbol *to = &self->branch_info->to;
+ struct addr_map_symbol *to = &he->branch_info->to;
return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
- self->level, bf, size, width);
+ he->level, bf, size, width);
}
@@ -456,13 +448,13 @@ sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
return mp || p;
}
-static int hist_entry__mispredict_snprintf(struct hist_entry *self, char *bf,
+static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width){
static const char *out = "N/A";
- if (self->branch_info->flags.predicted)
+ if (he->branch_info->flags.predicted)
out = "N";
- else if (self->branch_info->flags.mispred)
+ else if (he->branch_info->flags.mispred)
out = "Y";
return repsep_snprintf(bf, size, "%-*s", width, out);
@@ -482,19 +474,19 @@ sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
return (int64_t)(r - l);
}
-static int hist_entry__daddr_snprintf(struct hist_entry *self, char *bf,
+static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
uint64_t addr = 0;
struct map *map = NULL;
struct symbol *sym = NULL;
- if (self->mem_info) {
- addr = self->mem_info->daddr.addr;
- map = self->mem_info->daddr.map;
- sym = self->mem_info->daddr.sym;
+ if (he->mem_info) {
+ addr = he->mem_info->daddr.addr;
+ map = he->mem_info->daddr.map;
+ sym = he->mem_info->daddr.sym;
}
- return _hist_entry__sym_snprintf(map, sym, addr, self->level, bf, size,
+ return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
width);
}
@@ -512,13 +504,13 @@ sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
return _sort__dso_cmp(map_l, map_r);
}
-static int hist_entry__dso_daddr_snprintf(struct hist_entry *self, char *bf,
+static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
struct map *map = NULL;
- if (self->mem_info)
- map = self->mem_info->daddr.map;
+ if (he->mem_info)
+ map = he->mem_info->daddr.map;
return _hist_entry__dso_snprintf(map, bf, size, width);
}
@@ -542,14 +534,14 @@ sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
}
-static int hist_entry__locked_snprintf(struct hist_entry *self, char *bf,
+static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
const char *out;
u64 mask = PERF_MEM_LOCK_NA;
- if (self->mem_info)
- mask = self->mem_info->data_src.mem_lock;
+ if (he->mem_info)
+ mask = he->mem_info->data_src.mem_lock;
if (mask & PERF_MEM_LOCK_NA)
out = "N/A";
@@ -591,7 +583,7 @@ static const char * const tlb_access[] = {
};
#define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *))
-static int hist_entry__tlb_snprintf(struct hist_entry *self, char *bf,
+static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
char out[64];
@@ -602,8 +594,8 @@ static int hist_entry__tlb_snprintf(struct hist_entry *self, char *bf,
out[0] = '\0';
- if (self->mem_info)
- m = self->mem_info->data_src.mem_dtlb;
+ if (he->mem_info)
+ m = he->mem_info->data_src.mem_dtlb;
hit = m & PERF_MEM_TLB_HIT;
miss = m & PERF_MEM_TLB_MISS;
@@ -668,7 +660,7 @@ static const char * const mem_lvl[] = {
};
#define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *))
-static int hist_entry__lvl_snprintf(struct hist_entry *self, char *bf,
+static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
char out[64];
@@ -677,8 +669,8 @@ static int hist_entry__lvl_snprintf(struct hist_entry *self, char *bf,
u64 m = PERF_MEM_LVL_NA;
u64 hit, miss;
- if (self->mem_info)
- m = self->mem_info->data_src.mem_lvl;
+ if (he->mem_info)
+ m = he->mem_info->data_src.mem_lvl;
out[0] = '\0';
@@ -736,7 +728,7 @@ static const char * const snoop_access[] = {
};
#define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *))
-static int hist_entry__snoop_snprintf(struct hist_entry *self, char *bf,
+static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
char out[64];
@@ -746,8 +738,8 @@ static int hist_entry__snoop_snprintf(struct hist_entry *self, char *bf,
out[0] = '\0';
- if (self->mem_info)
- m = self->mem_info->data_src.mem_snoop;
+ if (he->mem_info)
+ m = he->mem_info->data_src.mem_snoop;
for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) {
if (!(m & 0x1))
@@ -784,10 +776,10 @@ sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
return he_weight(left) - he_weight(right);
}
-static int hist_entry__local_weight_snprintf(struct hist_entry *self, char *bf,
+static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return repsep_snprintf(bf, size, "%-*llu", width, he_weight(self));
+ return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
}
struct sort_entry sort_local_weight = {
@@ -803,10 +795,10 @@ sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
return left->stat.weight - right->stat.weight;
}
-static int hist_entry__global_weight_snprintf(struct hist_entry *self, char *bf,
+static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return repsep_snprintf(bf, size, "%-*llu", width, self->stat.weight);
+ return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
}
struct sort_entry sort_global_weight = {
@@ -858,6 +850,127 @@ struct sort_entry sort_mem_snoop = {
.se_width_idx = HISTC_MEM_SNOOP,
};
+static int64_t
+sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return left->branch_info->flags.abort !=
+ right->branch_info->flags.abort;
+}
+
+static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ static const char *out = ".";
+
+ if (he->branch_info->flags.abort)
+ out = "A";
+ return repsep_snprintf(bf, size, "%-*s", width, out);
+}
+
+struct sort_entry sort_abort = {
+ .se_header = "Transaction abort",
+ .se_cmp = sort__abort_cmp,
+ .se_snprintf = hist_entry__abort_snprintf,
+ .se_width_idx = HISTC_ABORT,
+};
+
+static int64_t
+sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return left->branch_info->flags.in_tx !=
+ right->branch_info->flags.in_tx;
+}
+
+static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ static const char *out = ".";
+
+ if (he->branch_info->flags.in_tx)
+ out = "T";
+
+ return repsep_snprintf(bf, size, "%-*s", width, out);
+}
+
+struct sort_entry sort_in_tx = {
+ .se_header = "Branch in transaction",
+ .se_cmp = sort__in_tx_cmp,
+ .se_snprintf = hist_entry__in_tx_snprintf,
+ .se_width_idx = HISTC_IN_TX,
+};
+
+static int64_t
+sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return left->transaction - right->transaction;
+}
+
+static inline char *add_str(char *p, const char *str)
+{
+ strcpy(p, str);
+ return p + strlen(str);
+}
+
+static struct txbit {
+ unsigned flag;
+ const char *name;
+ int skip_for_len;
+} txbits[] = {
+ { PERF_TXN_ELISION, "EL ", 0 },
+ { PERF_TXN_TRANSACTION, "TX ", 1 },
+ { PERF_TXN_SYNC, "SYNC ", 1 },
+ { PERF_TXN_ASYNC, "ASYNC ", 0 },
+ { PERF_TXN_RETRY, "RETRY ", 0 },
+ { PERF_TXN_CONFLICT, "CON ", 0 },
+ { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
+ { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
+ { 0, NULL, 0 }
+};
+
+int hist_entry__transaction_len(void)
+{
+ int i;
+ int len = 0;
+
+ for (i = 0; txbits[i].name; i++) {
+ if (!txbits[i].skip_for_len)
+ len += strlen(txbits[i].name);
+ }
+ len += 4; /* :XX<space> */
+ return len;
+}
+
+static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ u64 t = he->transaction;
+ char buf[128];
+ char *p = buf;
+ int i;
+
+ buf[0] = 0;
+ for (i = 0; txbits[i].name; i++)
+ if (txbits[i].flag & t)
+ p = add_str(p, txbits[i].name);
+ if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
+ p = add_str(p, "NEITHER ");
+ if (t & PERF_TXN_ABORT_MASK) {
+ sprintf(p, ":%" PRIx64,
+ (t & PERF_TXN_ABORT_MASK) >>
+ PERF_TXN_ABORT_SHIFT);
+ p += strlen(p);
+ }
+
+ return repsep_snprintf(bf, size, "%-*s", width, buf);
+}
+
+struct sort_entry sort_transaction = {
+ .se_header = "Transaction ",
+ .se_cmp = sort__transaction_cmp,
+ .se_snprintf = hist_entry__transaction_snprintf,
+ .se_width_idx = HISTC_TRANSACTION,
+};
+
struct sort_dimension {
const char *name;
struct sort_entry *entry;
@@ -876,6 +989,7 @@ static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_SRCLINE, "srcline", sort_srcline),
DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
+ DIM(SORT_TRANSACTION, "transaction", sort_transaction),
};
#undef DIM
@@ -888,6 +1002,8 @@ static struct sort_dimension bstack_sort_dimensions[] = {
DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
+ DIM(SORT_IN_TX, "in_tx", sort_in_tx),
+ DIM(SORT_ABORT, "abort", sort_abort),
};
#undef DIM
@@ -1009,7 +1125,7 @@ int setup_sorting(void)
return ret;
}
-static void sort_entry__setup_elide(struct sort_entry *self,
+static void sort_entry__setup_elide(struct sort_entry *se,
struct strlist *list,
const char *list_name, FILE *fp)
{
@@ -1017,7 +1133,7 @@ static void sort_entry__setup_elide(struct sort_entry *self,
if (fp != NULL)
fprintf(fp, "# %s: %s\n", list_name,
strlist__entry(list, 0)->s);
- self->elide = true;
+ se->elide = true;
}
}
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 4e80dbd271e7..bf4333694d3a 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -85,6 +85,7 @@ struct hist_entry {
struct map_symbol ms;
struct thread *thread;
u64 ip;
+ u64 transaction;
s32 cpu;
struct hist_entry_diff diff;
@@ -145,6 +146,7 @@ enum sort_type {
SORT_SRCLINE,
SORT_LOCAL_WEIGHT,
SORT_GLOBAL_WEIGHT,
+ SORT_TRANSACTION,
/* branch stack specific sort keys */
__SORT_BRANCH_STACK,
@@ -153,6 +155,8 @@ enum sort_type {
SORT_SYM_FROM,
SORT_SYM_TO,
SORT_MISPREDICT,
+ SORT_ABORT,
+ SORT_IN_TX,
/* memory mode specific sort keys */
__SORT_MEMORY_MODE,
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
new file mode 100644
index 000000000000..d11aefbc4b8d
--- /dev/null
+++ b/tools/perf/util/srcline.c
@@ -0,0 +1,265 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <linux/kernel.h>
+
+#include "util/dso.h"
+#include "util/util.h"
+#include "util/debug.h"
+
+#ifdef HAVE_LIBBFD_SUPPORT
+
+/*
+ * Implement addr2line using libbfd.
+ */
+#define PACKAGE "perf"
+#include <bfd.h>
+
+struct a2l_data {
+ const char *input;
+ unsigned long addr;
+
+ bool found;
+ const char *filename;
+ const char *funcname;
+ unsigned line;
+
+ bfd *abfd;
+ asymbol **syms;
+};
+
+static int bfd_error(const char *string)
+{
+ const char *errmsg;
+
+ errmsg = bfd_errmsg(bfd_get_error());
+ fflush(stdout);
+
+ if (string)
+ pr_debug("%s: %s\n", string, errmsg);
+ else
+ pr_debug("%s\n", errmsg);
+
+ return -1;
+}
+
+static int slurp_symtab(bfd *abfd, struct a2l_data *a2l)
+{
+ long storage;
+ long symcount;
+ asymbol **syms;
+ bfd_boolean dynamic = FALSE;
+
+ if ((bfd_get_file_flags(abfd) & HAS_SYMS) == 0)
+ return bfd_error(bfd_get_filename(abfd));
+
+ storage = bfd_get_symtab_upper_bound(abfd);
+ if (storage == 0L) {
+ storage = bfd_get_dynamic_symtab_upper_bound(abfd);
+ dynamic = TRUE;
+ }
+ if (storage < 0L)
+ return bfd_error(bfd_get_filename(abfd));
+
+ syms = malloc(storage);
+ if (dynamic)
+ symcount = bfd_canonicalize_dynamic_symtab(abfd, syms);
+ else
+ symcount = bfd_canonicalize_symtab(abfd, syms);
+
+ if (symcount < 0) {
+ free(syms);
+ return bfd_error(bfd_get_filename(abfd));
+ }
+
+ a2l->syms = syms;
+ return 0;
+}
+
+static void find_address_in_section(bfd *abfd, asection *section, void *data)
+{
+ bfd_vma pc, vma;
+ bfd_size_type size;
+ struct a2l_data *a2l = data;
+
+ if (a2l->found)
+ return;
+
+ if ((bfd_get_section_flags(abfd, section) & SEC_ALLOC) == 0)
+ return;
+
+ pc = a2l->addr;
+ vma = bfd_get_section_vma(abfd, section);
+ size = bfd_get_section_size(section);
+
+ if (pc < vma || pc >= vma + size)
+ return;
+
+ a2l->found = bfd_find_nearest_line(abfd, section, a2l->syms, pc - vma,
+ &a2l->filename, &a2l->funcname,
+ &a2l->line);
+}
+
+static struct a2l_data *addr2line_init(const char *path)
+{
+ bfd *abfd;
+ struct a2l_data *a2l = NULL;
+
+ abfd = bfd_openr(path, NULL);
+ if (abfd == NULL)
+ return NULL;
+
+ if (!bfd_check_format(abfd, bfd_object))
+ goto out;
+
+ a2l = zalloc(sizeof(*a2l));
+ if (a2l == NULL)
+ goto out;
+
+ a2l->abfd = abfd;
+ a2l->input = strdup(path);
+ if (a2l->input == NULL)
+ goto out;
+
+ if (slurp_symtab(abfd, a2l))
+ goto out;
+
+ return a2l;
+
+out:
+ if (a2l) {
+ free((void *)a2l->input);
+ free(a2l);
+ }
+ bfd_close(abfd);
+ return NULL;
+}
+
+static void addr2line_cleanup(struct a2l_data *a2l)
+{
+ if (a2l->abfd)
+ bfd_close(a2l->abfd);
+ free((void *)a2l->input);
+ free(a2l->syms);
+ free(a2l);
+}
+
+static int addr2line(const char *dso_name, unsigned long addr,
+ char **file, unsigned int *line)
+{
+ int ret = 0;
+ struct a2l_data *a2l;
+
+ a2l = addr2line_init(dso_name);
+ if (a2l == NULL) {
+ pr_warning("addr2line_init failed for %s\n", dso_name);
+ return 0;
+ }
+
+ a2l->addr = addr;
+ bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l);
+
+ if (a2l->found && a2l->filename) {
+ *file = strdup(a2l->filename);
+ *line = a2l->line;
+
+ if (*file)
+ ret = 1;
+ }
+
+ addr2line_cleanup(a2l);
+ return ret;
+}
+
+#else /* HAVE_LIBBFD_SUPPORT */
+
+static int addr2line(const char *dso_name, unsigned long addr,
+ char **file, unsigned int *line_nr)
+{
+ FILE *fp;
+ char cmd[PATH_MAX];
+ char *filename = NULL;
+ size_t len;
+ char *sep;
+ int ret = 0;
+
+ scnprintf(cmd, sizeof(cmd), "addr2line -e %s %016"PRIx64,
+ dso_name, addr);
+
+ fp = popen(cmd, "r");
+ if (fp == NULL) {
+ pr_warning("popen failed for %s\n", dso_name);
+ return 0;
+ }
+
+ if (getline(&filename, &len, fp) < 0 || !len) {
+ pr_warning("addr2line has no output for %s\n", dso_name);
+ goto out;
+ }
+
+ sep = strchr(filename, '\n');
+ if (sep)
+ *sep = '\0';
+
+ if (!strcmp(filename, "??:0")) {
+ pr_debug("no debugging info in %s\n", dso_name);
+ free(filename);
+ goto out;
+ }
+
+ sep = strchr(filename, ':');
+ if (sep) {
+ *sep++ = '\0';
+ *file = filename;
+ *line_nr = strtoul(sep, NULL, 0);
+ ret = 1;
+ }
+out:
+ pclose(fp);
+ return ret;
+}
+#endif /* HAVE_LIBBFD_SUPPORT */
+
+char *get_srcline(struct dso *dso, unsigned long addr)
+{
+ char *file = NULL;
+ unsigned line = 0;
+ char *srcline;
+ char *dso_name = dso->long_name;
+ size_t size;
+
+ if (!dso->has_srcline)
+ return SRCLINE_UNKNOWN;
+
+ if (dso_name[0] == '[')
+ goto out;
+
+ if (!strncmp(dso_name, "/tmp/perf-", 10))
+ goto out;
+
+ if (!addr2line(dso_name, addr, &file, &line))
+ goto out;
+
+ /* just calculate actual length */
+ size = snprintf(NULL, 0, "%s:%u", file, line) + 1;
+
+ srcline = malloc(size);
+ if (srcline)
+ snprintf(srcline, size, "%s:%u", file, line);
+ else
+ srcline = SRCLINE_UNKNOWN;
+
+ free(file);
+ return srcline;
+
+out:
+ dso->has_srcline = 0;
+ return SRCLINE_UNKNOWN;
+}
+
+void free_srcline(char *srcline)
+{
+ if (srcline && strcmp(srcline, SRCLINE_UNKNOWN) != 0)
+ free(srcline);
+}
diff --git a/tools/perf/util/strfilter.c b/tools/perf/util/strfilter.c
index 834c8ebfe38e..67e4a0082822 100644
--- a/tools/perf/util/strfilter.c
+++ b/tools/perf/util/strfilter.c
@@ -10,22 +10,22 @@ static const char *OP_not = "!"; /* Logical NOT */
#define is_operator(c) ((c) == '|' || (c) == '&' || (c) == '!')
#define is_separator(c) (is_operator(c) || (c) == '(' || (c) == ')')
-static void strfilter_node__delete(struct strfilter_node *self)
+static void strfilter_node__delete(struct strfilter_node *node)
{
- if (self) {
- if (self->p && !is_operator(*self->p))
- free((char *)self->p);
- strfilter_node__delete(self->l);
- strfilter_node__delete(self->r);
- free(self);
+ if (node) {
+ if (node->p && !is_operator(*node->p))
+ free((char *)node->p);
+ strfilter_node__delete(node->l);
+ strfilter_node__delete(node->r);
+ free(node);
}
}
-void strfilter__delete(struct strfilter *self)
+void strfilter__delete(struct strfilter *filter)
{
- if (self) {
- strfilter_node__delete(self->root);
- free(self);
+ if (filter) {
+ strfilter_node__delete(filter->root);
+ free(filter);
}
}
@@ -170,30 +170,30 @@ struct strfilter *strfilter__new(const char *rules, const char **err)
return ret;
}
-static bool strfilter_node__compare(struct strfilter_node *self,
+static bool strfilter_node__compare(struct strfilter_node *node,
const char *str)
{
- if (!self || !self->p)
+ if (!node || !node->p)
return false;
- switch (*self->p) {
+ switch (*node->p) {
case '|': /* OR */
- return strfilter_node__compare(self->l, str) ||
- strfilter_node__compare(self->r, str);
+ return strfilter_node__compare(node->l, str) ||
+ strfilter_node__compare(node->r, str);
case '&': /* AND */
- return strfilter_node__compare(self->l, str) &&
- strfilter_node__compare(self->r, str);
+ return strfilter_node__compare(node->l, str) &&
+ strfilter_node__compare(node->r, str);
case '!': /* NOT */
- return !strfilter_node__compare(self->r, str);
+ return !strfilter_node__compare(node->r, str);
default:
- return strglobmatch(str, self->p);
+ return strglobmatch(str, node->p);
}
}
/* Return true if STR matches the filter rules */
-bool strfilter__compare(struct strfilter *self, const char *str)
+bool strfilter__compare(struct strfilter *node, const char *str)
{
- if (!self)
+ if (!node)
return false;
- return strfilter_node__compare(self->root, str);
+ return strfilter_node__compare(node->root, str);
}
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index a9c829be5216..eed0b96302af 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -8,7 +8,7 @@
#include "symbol.h"
#include "debug.h"
-#ifndef HAVE_ELF_GETPHDRNUM
+#ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
static int elf_getphdrnum(Elf *elf, size_t *dst)
{
GElf_Ehdr gehdr;
@@ -487,27 +487,27 @@ int filename__read_debuglink(const char *filename, char *debuglink,
ek = elf_kind(elf);
if (ek != ELF_K_ELF)
- goto out_close;
+ goto out_elf_end;
if (gelf_getehdr(elf, &ehdr) == NULL) {
pr_err("%s: cannot get elf header.\n", __func__);
- goto out_close;
+ goto out_elf_end;
}
sec = elf_section_by_name(elf, &ehdr, &shdr,
".gnu_debuglink", NULL);
if (sec == NULL)
- goto out_close;
+ goto out_elf_end;
data = elf_getdata(sec, NULL);
if (data == NULL)
- goto out_close;
+ goto out_elf_end;
/* the start of this section is a zero-terminated string */
strncpy(debuglink, data->d_buf, size);
+out_elf_end:
elf_end(elf);
-
out_close:
close(fd);
out:
@@ -1018,6 +1018,601 @@ int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
return err;
}
+static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
+{
+ ssize_t r;
+ size_t n;
+ int err = -1;
+ char *buf = malloc(page_size);
+
+ if (buf == NULL)
+ return -1;
+
+ if (lseek(to, to_offs, SEEK_SET) != to_offs)
+ goto out;
+
+ if (lseek(from, from_offs, SEEK_SET) != from_offs)
+ goto out;
+
+ while (len) {
+ n = page_size;
+ if (len < n)
+ n = len;
+ /* Use read because mmap won't work on proc files */
+ r = read(from, buf, n);
+ if (r < 0)
+ goto out;
+ if (!r)
+ break;
+ n = r;
+ r = write(to, buf, n);
+ if (r < 0)
+ goto out;
+ if ((size_t)r != n)
+ goto out;
+ len -= n;
+ }
+
+ err = 0;
+out:
+ free(buf);
+ return err;
+}
+
+struct kcore {
+ int fd;
+ int elfclass;
+ Elf *elf;
+ GElf_Ehdr ehdr;
+};
+
+static int kcore__open(struct kcore *kcore, const char *filename)
+{
+ GElf_Ehdr *ehdr;
+
+ kcore->fd = open(filename, O_RDONLY);
+ if (kcore->fd == -1)
+ return -1;
+
+ kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
+ if (!kcore->elf)
+ goto out_close;
+
+ kcore->elfclass = gelf_getclass(kcore->elf);
+ if (kcore->elfclass == ELFCLASSNONE)
+ goto out_end;
+
+ ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
+ if (!ehdr)
+ goto out_end;
+
+ return 0;
+
+out_end:
+ elf_end(kcore->elf);
+out_close:
+ close(kcore->fd);
+ return -1;
+}
+
+static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
+ bool temp)
+{
+ GElf_Ehdr *ehdr;
+
+ kcore->elfclass = elfclass;
+
+ if (temp)
+ kcore->fd = mkstemp(filename);
+ else
+ kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
+ if (kcore->fd == -1)
+ return -1;
+
+ kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
+ if (!kcore->elf)
+ goto out_close;
+
+ if (!gelf_newehdr(kcore->elf, elfclass))
+ goto out_end;
+
+ ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
+ if (!ehdr)
+ goto out_end;
+
+ return 0;
+
+out_end:
+ elf_end(kcore->elf);
+out_close:
+ close(kcore->fd);
+ unlink(filename);
+ return -1;
+}
+
+static void kcore__close(struct kcore *kcore)
+{
+ elf_end(kcore->elf);
+ close(kcore->fd);
+}
+
+static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
+{
+ GElf_Ehdr *ehdr = &to->ehdr;
+ GElf_Ehdr *kehdr = &from->ehdr;
+
+ memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
+ ehdr->e_type = kehdr->e_type;
+ ehdr->e_machine = kehdr->e_machine;
+ ehdr->e_version = kehdr->e_version;
+ ehdr->e_entry = 0;
+ ehdr->e_shoff = 0;
+ ehdr->e_flags = kehdr->e_flags;
+ ehdr->e_phnum = count;
+ ehdr->e_shentsize = 0;
+ ehdr->e_shnum = 0;
+ ehdr->e_shstrndx = 0;
+
+ if (from->elfclass == ELFCLASS32) {
+ ehdr->e_phoff = sizeof(Elf32_Ehdr);
+ ehdr->e_ehsize = sizeof(Elf32_Ehdr);
+ ehdr->e_phentsize = sizeof(Elf32_Phdr);
+ } else {
+ ehdr->e_phoff = sizeof(Elf64_Ehdr);
+ ehdr->e_ehsize = sizeof(Elf64_Ehdr);
+ ehdr->e_phentsize = sizeof(Elf64_Phdr);
+ }
+
+ if (!gelf_update_ehdr(to->elf, ehdr))
+ return -1;
+
+ if (!gelf_newphdr(to->elf, count))
+ return -1;
+
+ return 0;
+}
+
+static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
+ u64 addr, u64 len)
+{
+ GElf_Phdr gphdr;
+ GElf_Phdr *phdr;
+
+ phdr = gelf_getphdr(kcore->elf, idx, &gphdr);
+ if (!phdr)
+ return -1;
+
+ phdr->p_type = PT_LOAD;
+ phdr->p_flags = PF_R | PF_W | PF_X;
+ phdr->p_offset = offset;
+ phdr->p_vaddr = addr;
+ phdr->p_paddr = 0;
+ phdr->p_filesz = len;
+ phdr->p_memsz = len;
+ phdr->p_align = page_size;
+
+ if (!gelf_update_phdr(kcore->elf, idx, phdr))
+ return -1;
+
+ return 0;
+}
+
+static off_t kcore__write(struct kcore *kcore)
+{
+ return elf_update(kcore->elf, ELF_C_WRITE);
+}
+
+struct phdr_data {
+ off_t offset;
+ u64 addr;
+ u64 len;
+};
+
+struct kcore_copy_info {
+ u64 stext;
+ u64 etext;
+ u64 first_symbol;
+ u64 last_symbol;
+ u64 first_module;
+ u64 last_module_symbol;
+ struct phdr_data kernel_map;
+ struct phdr_data modules_map;
+};
+
+static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
+ u64 start)
+{
+ struct kcore_copy_info *kci = arg;
+
+ if (!symbol_type__is_a(type, MAP__FUNCTION))
+ return 0;
+
+ if (strchr(name, '[')) {
+ if (start > kci->last_module_symbol)
+ kci->last_module_symbol = start;
+ return 0;
+ }
+
+ if (!kci->first_symbol || start < kci->first_symbol)
+ kci->first_symbol = start;
+
+ if (!kci->last_symbol || start > kci->last_symbol)
+ kci->last_symbol = start;
+
+ if (!strcmp(name, "_stext")) {
+ kci->stext = start;
+ return 0;
+ }
+
+ if (!strcmp(name, "_etext")) {
+ kci->etext = start;
+ return 0;
+ }
+
+ return 0;
+}
+
+static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
+ const char *dir)
+{
+ char kallsyms_filename[PATH_MAX];
+
+ scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
+
+ if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
+ return -1;
+
+ if (kallsyms__parse(kallsyms_filename, kci,
+ kcore_copy__process_kallsyms) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int kcore_copy__process_modules(void *arg,
+ const char *name __maybe_unused,
+ u64 start)
+{
+ struct kcore_copy_info *kci = arg;
+
+ if (!kci->first_module || start < kci->first_module)
+ kci->first_module = start;
+
+ return 0;
+}
+
+static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
+ const char *dir)
+{
+ char modules_filename[PATH_MAX];
+
+ scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
+
+ if (symbol__restricted_filename(modules_filename, "/proc/modules"))
+ return -1;
+
+ if (modules__parse(modules_filename, kci,
+ kcore_copy__process_modules) < 0)
+ return -1;
+
+ return 0;
+}
+
+static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff,
+ u64 s, u64 e)
+{
+ if (p->addr || s < start || s >= end)
+ return;
+
+ p->addr = s;
+ p->offset = (s - start) + pgoff;
+ p->len = e < end ? e - s : end - s;
+}
+
+static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
+{
+ struct kcore_copy_info *kci = data;
+ u64 end = start + len;
+
+ kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext,
+ kci->etext);
+
+ kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module,
+ kci->last_module_symbol);
+
+ return 0;
+}
+
+static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
+{
+ if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
+ Elf *elf)
+{
+ if (kcore_copy__parse_kallsyms(kci, dir))
+ return -1;
+
+ if (kcore_copy__parse_modules(kci, dir))
+ return -1;
+
+ if (kci->stext)
+ kci->stext = round_down(kci->stext, page_size);
+ else
+ kci->stext = round_down(kci->first_symbol, page_size);
+
+ if (kci->etext) {
+ kci->etext = round_up(kci->etext, page_size);
+ } else if (kci->last_symbol) {
+ kci->etext = round_up(kci->last_symbol, page_size);
+ kci->etext += page_size;
+ }
+
+ kci->first_module = round_down(kci->first_module, page_size);
+
+ if (kci->last_module_symbol) {
+ kci->last_module_symbol = round_up(kci->last_module_symbol,
+ page_size);
+ kci->last_module_symbol += page_size;
+ }
+
+ if (!kci->stext || !kci->etext)
+ return -1;
+
+ if (kci->first_module && !kci->last_module_symbol)
+ return -1;
+
+ return kcore_copy__read_maps(kci, elf);
+}
+
+static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
+ const char *name)
+{
+ char from_filename[PATH_MAX];
+ char to_filename[PATH_MAX];
+
+ scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
+ scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
+
+ return copyfile_mode(from_filename, to_filename, 0400);
+}
+
+static int kcore_copy__unlink(const char *dir, const char *name)
+{
+ char filename[PATH_MAX];
+
+ scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
+
+ return unlink(filename);
+}
+
+static int kcore_copy__compare_fds(int from, int to)
+{
+ char *buf_from;
+ char *buf_to;
+ ssize_t ret;
+ size_t len;
+ int err = -1;
+
+ buf_from = malloc(page_size);
+ buf_to = malloc(page_size);
+ if (!buf_from || !buf_to)
+ goto out;
+
+ while (1) {
+ /* Use read because mmap won't work on proc files */
+ ret = read(from, buf_from, page_size);
+ if (ret < 0)
+ goto out;
+
+ if (!ret)
+ break;
+
+ len = ret;
+
+ if (readn(to, buf_to, len) != (int)len)
+ goto out;
+
+ if (memcmp(buf_from, buf_to, len))
+ goto out;
+ }
+
+ err = 0;
+out:
+ free(buf_to);
+ free(buf_from);
+ return err;
+}
+
+static int kcore_copy__compare_files(const char *from_filename,
+ const char *to_filename)
+{
+ int from, to, err = -1;
+
+ from = open(from_filename, O_RDONLY);
+ if (from < 0)
+ return -1;
+
+ to = open(to_filename, O_RDONLY);
+ if (to < 0)
+ goto out_close_from;
+
+ err = kcore_copy__compare_fds(from, to);
+
+ close(to);
+out_close_from:
+ close(from);
+ return err;
+}
+
+static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
+ const char *name)
+{
+ char from_filename[PATH_MAX];
+ char to_filename[PATH_MAX];
+
+ scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
+ scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
+
+ return kcore_copy__compare_files(from_filename, to_filename);
+}
+
+/**
+ * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
+ * @from_dir: from directory
+ * @to_dir: to directory
+ *
+ * This function copies kallsyms, modules and kcore files from one directory to
+ * another. kallsyms and modules are copied entirely. Only code segments are
+ * copied from kcore. It is assumed that two segments suffice: one for the
+ * kernel proper and one for all the modules. The code segments are determined
+ * from kallsyms and modules files. The kernel map starts at _stext or the
+ * lowest function symbol, and ends at _etext or the highest function symbol.
+ * The module map starts at the lowest module address and ends at the highest
+ * module symbol. Start addresses are rounded down to the nearest page. End
+ * addresses are rounded up to the nearest page. An extra page is added to the
+ * highest kernel symbol and highest module symbol to, hopefully, encompass that
+ * symbol too. Because it contains only code sections, the resulting kcore is
+ * unusual. One significant peculiarity is that the mapping (start -> pgoff)
+ * is not the same for the kernel map and the modules map. That happens because
+ * the data is copied adjacently whereas the original kcore has gaps. Finally,
+ * kallsyms and modules files are compared with their copies to check that
+ * modules have not been loaded or unloaded while the copies were taking place.
+ *
+ * Return: %0 on success, %-1 on failure.
+ */
+int kcore_copy(const char *from_dir, const char *to_dir)
+{
+ struct kcore kcore;
+ struct kcore extract;
+ size_t count = 2;
+ int idx = 0, err = -1;
+ off_t offset = page_size, sz, modules_offset = 0;
+ struct kcore_copy_info kci = { .stext = 0, };
+ char kcore_filename[PATH_MAX];
+ char extract_filename[PATH_MAX];
+
+ if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
+ return -1;
+
+ if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
+ goto out_unlink_kallsyms;
+
+ scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
+ scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
+
+ if (kcore__open(&kcore, kcore_filename))
+ goto out_unlink_modules;
+
+ if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
+ goto out_kcore_close;
+
+ if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
+ goto out_kcore_close;
+
+ if (!kci.modules_map.addr)
+ count -= 1;
+
+ if (kcore__copy_hdr(&kcore, &extract, count))
+ goto out_extract_close;
+
+ if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr,
+ kci.kernel_map.len))
+ goto out_extract_close;
+
+ if (kci.modules_map.addr) {
+ modules_offset = offset + kci.kernel_map.len;
+ if (kcore__add_phdr(&extract, idx, modules_offset,
+ kci.modules_map.addr, kci.modules_map.len))
+ goto out_extract_close;
+ }
+
+ sz = kcore__write(&extract);
+ if (sz < 0 || sz > offset)
+ goto out_extract_close;
+
+ if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset,
+ kci.kernel_map.len))
+ goto out_extract_close;
+
+ if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset,
+ extract.fd, modules_offset,
+ kci.modules_map.len))
+ goto out_extract_close;
+
+ if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
+ goto out_extract_close;
+
+ if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
+ goto out_extract_close;
+
+ err = 0;
+
+out_extract_close:
+ kcore__close(&extract);
+ if (err)
+ unlink(extract_filename);
+out_kcore_close:
+ kcore__close(&kcore);
+out_unlink_modules:
+ if (err)
+ kcore_copy__unlink(to_dir, "modules");
+out_unlink_kallsyms:
+ if (err)
+ kcore_copy__unlink(to_dir, "kallsyms");
+
+ return err;
+}
+
+int kcore_extract__create(struct kcore_extract *kce)
+{
+ struct kcore kcore;
+ struct kcore extract;
+ size_t count = 1;
+ int idx = 0, err = -1;
+ off_t offset = page_size, sz;
+
+ if (kcore__open(&kcore, kce->kcore_filename))
+ return -1;
+
+ strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
+ if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
+ goto out_kcore_close;
+
+ if (kcore__copy_hdr(&kcore, &extract, count))
+ goto out_extract_close;
+
+ if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
+ goto out_extract_close;
+
+ sz = kcore__write(&extract);
+ if (sz < 0 || sz > offset)
+ goto out_extract_close;
+
+ if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
+ goto out_extract_close;
+
+ err = 0;
+
+out_extract_close:
+ kcore__close(&extract);
+ if (err)
+ unlink(kce->extract_filename);
+out_kcore_close:
+ kcore__close(&kcore);
+
+ return err;
+}
+
+void kcore_extract__delete(struct kcore_extract *kce)
+{
+ unlink(kce->extract_filename);
+}
+
void symbol__elf_init(void)
{
elf_version(EV_CURRENT);
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index 3a802c300fc5..2d2dd0532b5a 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -308,6 +308,21 @@ int file__read_maps(int fd __maybe_unused, bool exe __maybe_unused,
return -1;
}
+int kcore_extract__create(struct kcore_extract *kce __maybe_unused)
+{
+ return -1;
+}
+
+void kcore_extract__delete(struct kcore_extract *kce __maybe_unused)
+{
+}
+
+int kcore_copy(const char *from_dir __maybe_unused,
+ const char *to_dir __maybe_unused)
+{
+ return -1;
+}
+
void symbol__elf_init(void)
{
}
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 7eb0362f4ffd..c0c36965fff0 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -51,6 +51,7 @@ static enum dso_binary_type binary_type_symtab[] = {
DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
DSO_BINARY_TYPE__GUEST_KMODULE,
DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
+ DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
DSO_BINARY_TYPE__NOT_FOUND,
};
@@ -159,10 +160,12 @@ again:
if (choose_best_symbol(curr, next) == SYMBOL_A) {
rb_erase(&next->rb_node, symbols);
+ symbol__delete(next);
goto again;
} else {
nd = rb_next(&curr->rb_node);
rb_erase(&curr->rb_node, symbols);
+ symbol__delete(curr);
}
}
}
@@ -499,6 +502,64 @@ out_failure:
return -1;
}
+int modules__parse(const char *filename, void *arg,
+ int (*process_module)(void *arg, const char *name,
+ u64 start))
+{
+ char *line = NULL;
+ size_t n;
+ FILE *file;
+ int err = 0;
+
+ file = fopen(filename, "r");
+ if (file == NULL)
+ return -1;
+
+ while (1) {
+ char name[PATH_MAX];
+ u64 start;
+ char *sep;
+ ssize_t line_len;
+
+ line_len = getline(&line, &n, file);
+ if (line_len < 0) {
+ if (feof(file))
+ break;
+ err = -1;
+ goto out;
+ }
+
+ if (!line) {
+ err = -1;
+ goto out;
+ }
+
+ line[--line_len] = '\0'; /* \n */
+
+ sep = strrchr(line, 'x');
+ if (sep == NULL)
+ continue;
+
+ hex2u64(sep + 1, &start);
+
+ sep = strchr(line, ' ');
+ if (sep == NULL)
+ continue;
+
+ *sep = '\0';
+
+ scnprintf(name, sizeof(name), "[%s]", line);
+
+ err = process_module(arg, name, start);
+ if (err)
+ break;
+ }
+out:
+ free(line);
+ fclose(file);
+ return err;
+}
+
struct process_kallsyms_args {
struct map *map;
struct dso *dso;
@@ -739,51 +800,242 @@ bool symbol__restricted_filename(const char *filename,
return restricted;
}
-struct kcore_mapfn_data {
- struct dso *dso;
- enum map_type type;
- struct list_head maps;
+struct module_info {
+ struct rb_node rb_node;
+ char *name;
+ u64 start;
};
-static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
+static void add_module(struct module_info *mi, struct rb_root *modules)
{
- struct kcore_mapfn_data *md = data;
- struct map *map;
+ struct rb_node **p = &modules->rb_node;
+ struct rb_node *parent = NULL;
+ struct module_info *m;
- map = map__new2(start, md->dso, md->type);
- if (map == NULL)
+ while (*p != NULL) {
+ parent = *p;
+ m = rb_entry(parent, struct module_info, rb_node);
+ if (strcmp(mi->name, m->name) < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&mi->rb_node, parent, p);
+ rb_insert_color(&mi->rb_node, modules);
+}
+
+static void delete_modules(struct rb_root *modules)
+{
+ struct module_info *mi;
+ struct rb_node *next = rb_first(modules);
+
+ while (next) {
+ mi = rb_entry(next, struct module_info, rb_node);
+ next = rb_next(&mi->rb_node);
+ rb_erase(&mi->rb_node, modules);
+ free(mi->name);
+ free(mi);
+ }
+}
+
+static struct module_info *find_module(const char *name,
+ struct rb_root *modules)
+{
+ struct rb_node *n = modules->rb_node;
+
+ while (n) {
+ struct module_info *m;
+ int cmp;
+
+ m = rb_entry(n, struct module_info, rb_node);
+ cmp = strcmp(name, m->name);
+ if (cmp < 0)
+ n = n->rb_left;
+ else if (cmp > 0)
+ n = n->rb_right;
+ else
+ return m;
+ }
+
+ return NULL;
+}
+
+static int __read_proc_modules(void *arg, const char *name, u64 start)
+{
+ struct rb_root *modules = arg;
+ struct module_info *mi;
+
+ mi = zalloc(sizeof(struct module_info));
+ if (!mi)
return -ENOMEM;
- map->end = map->start + len;
- map->pgoff = pgoff;
+ mi->name = strdup(name);
+ mi->start = start;
- list_add(&map->node, &md->maps);
+ if (!mi->name) {
+ free(mi);
+ return -ENOMEM;
+ }
+
+ add_module(mi, modules);
+
+ return 0;
+}
+
+static int read_proc_modules(const char *filename, struct rb_root *modules)
+{
+ if (symbol__restricted_filename(filename, "/proc/modules"))
+ return -1;
+
+ if (modules__parse(filename, modules, __read_proc_modules)) {
+ delete_modules(modules);
+ return -1;
+ }
return 0;
}
+int compare_proc_modules(const char *from, const char *to)
+{
+ struct rb_root from_modules = RB_ROOT;
+ struct rb_root to_modules = RB_ROOT;
+ struct rb_node *from_node, *to_node;
+ struct module_info *from_m, *to_m;
+ int ret = -1;
+
+ if (read_proc_modules(from, &from_modules))
+ return -1;
+
+ if (read_proc_modules(to, &to_modules))
+ goto out_delete_from;
+
+ from_node = rb_first(&from_modules);
+ to_node = rb_first(&to_modules);
+ while (from_node) {
+ if (!to_node)
+ break;
+
+ from_m = rb_entry(from_node, struct module_info, rb_node);
+ to_m = rb_entry(to_node, struct module_info, rb_node);
+
+ if (from_m->start != to_m->start ||
+ strcmp(from_m->name, to_m->name))
+ break;
+
+ from_node = rb_next(from_node);
+ to_node = rb_next(to_node);
+ }
+
+ if (!from_node && !to_node)
+ ret = 0;
+
+ delete_modules(&to_modules);
+out_delete_from:
+ delete_modules(&from_modules);
+
+ return ret;
+}
+
+static int do_validate_kcore_modules(const char *filename, struct map *map,
+ struct map_groups *kmaps)
+{
+ struct rb_root modules = RB_ROOT;
+ struct map *old_map;
+ int err;
+
+ err = read_proc_modules(filename, &modules);
+ if (err)
+ return err;
+
+ old_map = map_groups__first(kmaps, map->type);
+ while (old_map) {
+ struct map *next = map_groups__next(old_map);
+ struct module_info *mi;
+
+ if (old_map == map || old_map->start == map->start) {
+ /* The kernel map */
+ old_map = next;
+ continue;
+ }
+
+ /* Module must be in memory at the same address */
+ mi = find_module(old_map->dso->short_name, &modules);
+ if (!mi || mi->start != old_map->start) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ old_map = next;
+ }
+out:
+ delete_modules(&modules);
+ return err;
+}
+
/*
- * If kallsyms is referenced by name then we look for kcore in the same
+ * If kallsyms is referenced by name then we look for filename in the same
* directory.
*/
-static bool kcore_filename_from_kallsyms_filename(char *kcore_filename,
- const char *kallsyms_filename)
+static bool filename_from_kallsyms_filename(char *filename,
+ const char *base_name,
+ const char *kallsyms_filename)
{
char *name;
- strcpy(kcore_filename, kallsyms_filename);
- name = strrchr(kcore_filename, '/');
+ strcpy(filename, kallsyms_filename);
+ name = strrchr(filename, '/');
if (!name)
return false;
- if (!strcmp(name, "/kallsyms")) {
- strcpy(name, "/kcore");
+ name += 1;
+
+ if (!strcmp(name, "kallsyms")) {
+ strcpy(name, base_name);
return true;
}
return false;
}
+static int validate_kcore_modules(const char *kallsyms_filename,
+ struct map *map)
+{
+ struct map_groups *kmaps = map__kmap(map)->kmaps;
+ char modules_filename[PATH_MAX];
+
+ if (!filename_from_kallsyms_filename(modules_filename, "modules",
+ kallsyms_filename))
+ return -EINVAL;
+
+ if (do_validate_kcore_modules(modules_filename, map, kmaps))
+ return -EINVAL;
+
+ return 0;
+}
+
+struct kcore_mapfn_data {
+ struct dso *dso;
+ enum map_type type;
+ struct list_head maps;
+};
+
+static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
+{
+ struct kcore_mapfn_data *md = data;
+ struct map *map;
+
+ map = map__new2(start, md->dso, md->type);
+ if (map == NULL)
+ return -ENOMEM;
+
+ map->end = map->start + len;
+ map->pgoff = pgoff;
+
+ list_add(&map->node, &md->maps);
+
+ return 0;
+}
+
static int dso__load_kcore(struct dso *dso, struct map *map,
const char *kallsyms_filename)
{
@@ -800,8 +1052,12 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
if (map != machine->vmlinux_maps[map->type])
return -EINVAL;
- if (!kcore_filename_from_kallsyms_filename(kcore_filename,
- kallsyms_filename))
+ if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
+ kallsyms_filename))
+ return -EINVAL;
+
+ /* All modules must be present at their original addresses */
+ if (validate_kcore_modules(kallsyms_filename, map))
return -EINVAL;
md.dso = dso;
@@ -1188,6 +1444,105 @@ out:
return err;
}
+static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
+{
+ char kallsyms_filename[PATH_MAX];
+ struct dirent *dent;
+ int ret = -1;
+ DIR *d;
+
+ d = opendir(dir);
+ if (!d)
+ return -1;
+
+ while (1) {
+ dent = readdir(d);
+ if (!dent)
+ break;
+ if (dent->d_type != DT_DIR)
+ continue;
+ scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
+ "%s/%s/kallsyms", dir, dent->d_name);
+ if (!validate_kcore_modules(kallsyms_filename, map)) {
+ strlcpy(dir, kallsyms_filename, dir_sz);
+ ret = 0;
+ break;
+ }
+ }
+
+ closedir(d);
+
+ return ret;
+}
+
+static char *dso__find_kallsyms(struct dso *dso, struct map *map)
+{
+ u8 host_build_id[BUILD_ID_SIZE];
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+ bool is_host = false;
+ char path[PATH_MAX];
+
+ if (!dso->has_build_id) {
+ /*
+ * Last resort, if we don't have a build-id and couldn't find
+ * any vmlinux file, try the running kernel kallsyms table.
+ */
+ goto proc_kallsyms;
+ }
+
+ if (sysfs__read_build_id("/sys/kernel/notes", host_build_id,
+ sizeof(host_build_id)) == 0)
+ is_host = dso__build_id_equal(dso, host_build_id);
+
+ build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
+
+ /* Use /proc/kallsyms if possible */
+ if (is_host) {
+ DIR *d;
+ int fd;
+
+ /* If no cached kcore go with /proc/kallsyms */
+ scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s",
+ buildid_dir, sbuild_id);
+ d = opendir(path);
+ if (!d)
+ goto proc_kallsyms;
+ closedir(d);
+
+ /*
+ * Do not check the build-id cache, until we know we cannot use
+ * /proc/kcore.
+ */
+ fd = open("/proc/kcore", O_RDONLY);
+ if (fd != -1) {
+ close(fd);
+ /* If module maps match go with /proc/kallsyms */
+ if (!validate_kcore_modules("/proc/kallsyms", map))
+ goto proc_kallsyms;
+ }
+
+ /* Find kallsyms in build-id cache with kcore */
+ if (!find_matching_kcore(map, path, sizeof(path)))
+ return strdup(path);
+
+ goto proc_kallsyms;
+ }
+
+ scnprintf(path, sizeof(path), "%s/[kernel.kallsyms]/%s",
+ buildid_dir, sbuild_id);
+
+ if (access(path, F_OK)) {
+ pr_err("No kallsyms or vmlinux with build-id %s was found\n",
+ sbuild_id);
+ return NULL;
+ }
+
+ return strdup(path);
+
+proc_kallsyms:
+ return strdup("/proc/kallsyms");
+}
+
static int dso__load_kernel_sym(struct dso *dso, struct map *map,
symbol_filter_t filter)
{
@@ -1214,7 +1569,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map,
goto do_kallsyms;
}
- if (symbol_conf.vmlinux_name != NULL) {
+ if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
err = dso__load_vmlinux(dso, map,
symbol_conf.vmlinux_name, filter);
if (err > 0) {
@@ -1226,7 +1581,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map,
return err;
}
- if (vmlinux_path != NULL) {
+ if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
err = dso__load_vmlinux_path(dso, map, filter);
if (err > 0)
return err;
@@ -1236,51 +1591,11 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map,
if (symbol_conf.symfs[0] != 0)
return -1;
- /*
- * Say the kernel DSO was created when processing the build-id header table,
- * we have a build-id, so check if it is the same as the running kernel,
- * using it if it is.
- */
- if (dso->has_build_id) {
- u8 kallsyms_build_id[BUILD_ID_SIZE];
- char sbuild_id[BUILD_ID_SIZE * 2 + 1];
-
- if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id,
- sizeof(kallsyms_build_id)) == 0) {
- if (dso__build_id_equal(dso, kallsyms_build_id)) {
- kallsyms_filename = "/proc/kallsyms";
- goto do_kallsyms;
- }
- }
- /*
- * Now look if we have it on the build-id cache in
- * $HOME/.debug/[kernel.kallsyms].
- */
- build_id__sprintf(dso->build_id, sizeof(dso->build_id),
- sbuild_id);
-
- if (asprintf(&kallsyms_allocated_filename,
- "%s/.debug/[kernel.kallsyms]/%s",
- getenv("HOME"), sbuild_id) == -1) {
- pr_err("Not enough memory for kallsyms file lookup\n");
- return -1;
- }
-
- kallsyms_filename = kallsyms_allocated_filename;
+ kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
+ if (!kallsyms_allocated_filename)
+ return -1;
- if (access(kallsyms_filename, F_OK)) {
- pr_err("No kallsyms or vmlinux with build-id %s "
- "was found\n", sbuild_id);
- free(kallsyms_allocated_filename);
- return -1;
- }
- } else {
- /*
- * Last resort, if we don't have a build-id and couldn't find
- * any vmlinux file, try the running kernel kallsyms table.
- */
- kallsyms_filename = "/proc/kallsyms";
- }
+ kallsyms_filename = kallsyms_allocated_filename;
do_kallsyms:
err = dso__load_kallsyms(dso, kallsyms_filename, map, filter);
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index fd5b70ea2981..07de8fea2f48 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -13,7 +13,7 @@
#include <libgen.h>
#include "build-id.h"
-#ifdef LIBELF_SUPPORT
+#ifdef HAVE_LIBELF_SUPPORT
#include <libelf.h>
#include <gelf.h>
#endif
@@ -21,7 +21,7 @@
#include "dso.h"
-#ifdef HAVE_CPLUS_DEMANGLE
+#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
extern char *cplus_demangle(const char *, int);
static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i)
@@ -46,7 +46,7 @@ static inline char *bfd_demangle(void __maybe_unused *v,
* libelf 0.8.x and earlier do not support ELF_C_READ_MMAP;
* for newer versions we can use mmap to reduce memory usage:
*/
-#ifdef LIBELF_MMAP
+#ifdef HAVE_LIBELF_MMAP_SUPPORT
# define PERF_ELF_C_READ_MMAP ELF_C_READ_MMAP
#else
# define PERF_ELF_C_READ_MMAP ELF_C_READ
@@ -85,6 +85,7 @@ struct symbol_conf {
unsigned short priv_size;
unsigned short nr_events;
bool try_vmlinux_path,
+ ignore_vmlinux,
show_kernel_path,
use_modules,
sort_by_name,
@@ -178,7 +179,7 @@ struct symsrc {
int fd;
enum dso_binary_type type;
-#ifdef LIBELF_SUPPORT
+#ifdef HAVE_LIBELF_SUPPORT
Elf *elf;
GElf_Ehdr ehdr;
@@ -222,6 +223,9 @@ int sysfs__read_build_id(const char *filename, void *bf, size_t size);
int kallsyms__parse(const char *filename, void *arg,
int (*process_symbol)(void *arg, const char *name,
char type, u64 start));
+int modules__parse(const char *filename, void *arg,
+ int (*process_module)(void *arg, const char *name,
+ u64 start));
int filename__read_debuglink(const char *filename, char *debuglink,
size_t size);
@@ -252,4 +256,21 @@ typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
bool *is_64_bit);
+#define PERF_KCORE_EXTRACT "/tmp/perf-kcore-XXXXXX"
+
+struct kcore_extract {
+ char *kcore_filename;
+ u64 addr;
+ u64 offs;
+ u64 len;
+ char extract_filename[sizeof(PERF_KCORE_EXTRACT)];
+ int fd;
+};
+
+int kcore_extract__create(struct kcore_extract *kce);
+void kcore_extract__delete(struct kcore_extract *kce);
+
+int kcore_copy(const char *from_dir, const char *to_dir);
+int compare_proc_modules(const char *from, const char *to);
+
#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index e3d4a550a703..80d19a086072 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -9,51 +9,51 @@
struct thread *thread__new(pid_t pid, pid_t tid)
{
- struct thread *self = zalloc(sizeof(*self));
+ struct thread *thread = zalloc(sizeof(*thread));
- if (self != NULL) {
- map_groups__init(&self->mg);
- self->pid_ = pid;
- self->tid = tid;
- self->ppid = -1;
- self->comm = malloc(32);
- if (self->comm)
- snprintf(self->comm, 32, ":%d", self->tid);
+ if (thread != NULL) {
+ map_groups__init(&thread->mg);
+ thread->pid_ = pid;
+ thread->tid = tid;
+ thread->ppid = -1;
+ thread->comm = malloc(32);
+ if (thread->comm)
+ snprintf(thread->comm, 32, ":%d", thread->tid);
}
- return self;
+ return thread;
}
-void thread__delete(struct thread *self)
+void thread__delete(struct thread *thread)
{
- map_groups__exit(&self->mg);
- free(self->comm);
- free(self);
+ map_groups__exit(&thread->mg);
+ free(thread->comm);
+ free(thread);
}
-int thread__set_comm(struct thread *self, const char *comm)
+int thread__set_comm(struct thread *thread, const char *comm)
{
int err;
- if (self->comm)
- free(self->comm);
- self->comm = strdup(comm);
- err = self->comm == NULL ? -ENOMEM : 0;
+ if (thread->comm)
+ free(thread->comm);
+ thread->comm = strdup(comm);
+ err = thread->comm == NULL ? -ENOMEM : 0;
if (!err) {
- self->comm_set = true;
+ thread->comm_set = true;
}
return err;
}
-int thread__comm_len(struct thread *self)
+int thread__comm_len(struct thread *thread)
{
- if (!self->comm_len) {
- if (!self->comm)
+ if (!thread->comm_len) {
+ if (!thread->comm)
return 0;
- self->comm_len = strlen(self->comm);
+ thread->comm_len = strlen(thread->comm);
}
- return self->comm_len;
+ return thread->comm_len;
}
size_t thread__fprintf(struct thread *thread, FILE *fp)
@@ -62,30 +62,30 @@ size_t thread__fprintf(struct thread *thread, FILE *fp)
map_groups__fprintf(&thread->mg, verbose, fp);
}
-void thread__insert_map(struct thread *self, struct map *map)
+void thread__insert_map(struct thread *thread, struct map *map)
{
- map_groups__fixup_overlappings(&self->mg, map, verbose, stderr);
- map_groups__insert(&self->mg, map);
+ map_groups__fixup_overlappings(&thread->mg, map, verbose, stderr);
+ map_groups__insert(&thread->mg, map);
}
-int thread__fork(struct thread *self, struct thread *parent)
+int thread__fork(struct thread *thread, struct thread *parent)
{
int i;
if (parent->comm_set) {
- if (self->comm)
- free(self->comm);
- self->comm = strdup(parent->comm);
- if (!self->comm)
+ if (thread->comm)
+ free(thread->comm);
+ thread->comm = strdup(parent->comm);
+ if (!thread->comm)
return -ENOMEM;
- self->comm_set = true;
+ thread->comm_set = true;
}
for (i = 0; i < MAP__NR_TYPES; ++i)
- if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
+ if (map_groups__clone(&thread->mg, &parent->mg, i) < 0)
return -ENOMEM;
- self->ppid = parent->tid;
+ thread->ppid = parent->tid;
return 0;
}
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index b554ffc462b6..88cfeaff600b 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -24,6 +24,7 @@ struct perf_top {
u64 exact_samples;
u64 guest_us_samples, guest_kernel_samples;
int print_entries, count_filter, delay_secs;
+ int max_stack;
bool hide_kernel_symbols, hide_user_symbols, zero;
bool use_tui, use_stdio;
bool kptr_restrict_warned;
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index e9e1c03f927d..6681f71f2f95 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -120,42 +120,6 @@ raw_field_value(struct event_format *event, const char *name, void *data)
return val;
}
-void *raw_field_ptr(struct event_format *event, const char *name, void *data)
-{
- struct format_field *field;
-
- field = pevent_find_any_field(event, name);
- if (!field)
- return NULL;
-
- if (field->flags & FIELD_IS_DYNAMIC) {
- int offset;
-
- offset = *(int *)(data + field->offset);
- offset &= 0xffff;
-
- return data + offset;
- }
-
- return data + field->offset;
-}
-
-int trace_parse_common_type(struct pevent *pevent, void *data)
-{
- struct pevent_record record;
-
- record.data = data;
- return pevent_data_type(pevent, &record);
-}
-
-int trace_parse_common_pid(struct pevent *pevent, void *data)
-{
- struct pevent_record record;
-
- record.data = data;
- return pevent_data_pid(pevent, &record);
-}
-
unsigned long long read_size(struct event_format *event, void *ptr, int size)
{
return pevent_read_number(event->pevent, ptr, size);
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index fafe1a40444a..04df63114109 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -11,8 +11,6 @@ union perf_event;
struct perf_tool;
struct thread;
-extern struct pevent *perf_pevent;
-
int bigendian(void);
struct pevent *read_trace_init(int file_bigendian, int host_bigendian);
@@ -23,26 +21,19 @@ int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size);
int parse_event_file(struct pevent *pevent,
char *buf, unsigned long size, char *sys);
-struct pevent_record *trace_peek_data(struct pevent *pevent, int cpu);
-
unsigned long long
raw_field_value(struct event_format *event, const char *name, void *data);
-void *raw_field_ptr(struct event_format *event, const char *name, void *data);
void parse_proc_kallsyms(struct pevent *pevent, char *file, unsigned int size);
void parse_ftrace_printk(struct pevent *pevent, char *file, unsigned int size);
ssize_t trace_report(int fd, struct pevent **pevent, bool repipe);
-int trace_parse_common_type(struct pevent *pevent, void *data);
-int trace_parse_common_pid(struct pevent *pevent, void *data);
-
struct event_format *trace_find_next_event(struct pevent *pevent,
struct event_format *event);
unsigned long long read_size(struct event_format *event, void *ptr, int size);
unsigned long long eval_flag(const char *flag);
-struct pevent_record *trace_read_data(struct pevent *pevent, int cpu);
int read_tracing_data(int fd, struct list_head *pattrs);
struct tracing_data {
diff --git a/tools/perf/util/unwind.c b/tools/perf/util/unwind.c
index 2f891f7e70bf..5390d0b8862a 100644
--- a/tools/perf/util/unwind.c
+++ b/tools/perf/util/unwind.c
@@ -39,6 +39,15 @@ UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
+extern int
+UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
+ unw_word_t ip,
+ unw_word_t segbase,
+ const char *obj_name, unw_word_t start,
+ unw_word_t end);
+
+#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame)
+
#define DW_EH_PE_FORMAT_MASK 0x0f /* format of the encoded value */
#define DW_EH_PE_APPL_MASK 0x70 /* how the value is to be applied */
@@ -245,8 +254,9 @@ static int unwind_spec_ehframe(struct dso *dso, struct machine *machine,
return 0;
}
-static int read_unwind_spec(struct dso *dso, struct machine *machine,
- u64 *table_data, u64 *segbase, u64 *fde_count)
+static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
+ u64 *table_data, u64 *segbase,
+ u64 *fde_count)
{
int ret = -EINVAL, fd;
u64 offset;
@@ -255,6 +265,7 @@ static int read_unwind_spec(struct dso *dso, struct machine *machine,
if (fd < 0)
return -EINVAL;
+ /* Check the .eh_frame section for unwinding info */
offset = elf_section_offset(fd, ".eh_frame_hdr");
close(fd);
@@ -263,10 +274,29 @@ static int read_unwind_spec(struct dso *dso, struct machine *machine,
table_data, segbase,
fde_count);
- /* TODO .debug_frame check if eh_frame_hdr fails */
return ret;
}
+#ifndef NO_LIBUNWIND_DEBUG_FRAME
+static int read_unwind_spec_debug_frame(struct dso *dso,
+ struct machine *machine, u64 *offset)
+{
+ int fd = dso__data_fd(dso, machine);
+
+ if (fd < 0)
+ return -EINVAL;
+
+ /* Check the .debug_frame section for unwinding info */
+ *offset = elf_section_offset(fd, ".debug_frame");
+ close(fd);
+
+ if (*offset)
+ return 0;
+
+ return -EINVAL;
+}
+#endif
+
static struct map *find_map(unw_word_t ip, struct unwind_info *ui)
{
struct addr_location al;
@@ -291,20 +321,33 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
pr_debug("unwind: find_proc_info dso %s\n", map->dso->name);
- if (read_unwind_spec(map->dso, ui->machine,
- &table_data, &segbase, &fde_count))
- return -EINVAL;
+ /* Check the .eh_frame section for unwinding info */
+ if (!read_unwind_spec_eh_frame(map->dso, ui->machine,
+ &table_data, &segbase, &fde_count)) {
+ memset(&di, 0, sizeof(di));
+ di.format = UNW_INFO_FORMAT_REMOTE_TABLE;
+ di.start_ip = map->start;
+ di.end_ip = map->end;
+ di.u.rti.segbase = map->start + segbase;
+ di.u.rti.table_data = map->start + table_data;
+ di.u.rti.table_len = fde_count * sizeof(struct table_entry)
+ / sizeof(unw_word_t);
+ return dwarf_search_unwind_table(as, ip, &di, pi,
+ need_unwind_info, arg);
+ }
+
+#ifndef NO_LIBUNWIND_DEBUG_FRAME
+ /* Check the .debug_frame section for unwinding info */
+ if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) {
+ memset(&di, 0, sizeof(di));
+ dwarf_find_debug_frame(0, &di, ip, 0, map->dso->name,
+ map->start, map->end);
+ return dwarf_search_unwind_table(as, ip, &di, pi,
+ need_unwind_info, arg);
+ }
+#endif
- memset(&di, 0, sizeof(di));
- di.format = UNW_INFO_FORMAT_REMOTE_TABLE;
- di.start_ip = map->start;
- di.end_ip = map->end;
- di.u.rti.segbase = map->start + segbase;
- di.u.rti.table_data = map->start + table_data;
- di.u.rti.table_len = fde_count * sizeof(struct table_entry)
- / sizeof(unw_word_t);
- return dwarf_search_unwind_table(as, ip, &di, pi,
- need_unwind_info, arg);
+ return -EINVAL;
}
static int access_fpreg(unw_addr_space_t __maybe_unused as,
diff --git a/tools/perf/util/unwind.h b/tools/perf/util/unwind.h
index cb6bc503a792..ec0c71a2ca2e 100644
--- a/tools/perf/util/unwind.h
+++ b/tools/perf/util/unwind.h
@@ -13,7 +13,7 @@ struct unwind_entry {
typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg);
-#ifdef LIBUNWIND_SUPPORT
+#ifdef HAVE_LIBUNWIND_SUPPORT
int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct machine *machine,
struct thread *thread,
@@ -31,5 +31,5 @@ unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
{
return 0;
}
-#endif /* LIBUNWIND_SUPPORT */
+#endif /* HAVE_LIBUNWIND_SUPPORT */
#endif /* __UNWIND_H */
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 6d17b18e915d..28a0a89c1f73 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -1,7 +1,7 @@
#include "../perf.h"
#include "util.h"
#include <sys/mman.h>
-#ifdef BACKTRACE_SUPPORT
+#ifdef HAVE_BACKTRACE_SUPPORT
#include <execinfo.h>
#endif
#include <stdio.h>
@@ -55,17 +55,20 @@ int mkdir_p(char *path, mode_t mode)
return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0;
}
-static int slow_copyfile(const char *from, const char *to)
+static int slow_copyfile(const char *from, const char *to, mode_t mode)
{
- int err = 0;
+ int err = -1;
char *line = NULL;
size_t n;
FILE *from_fp = fopen(from, "r"), *to_fp;
+ mode_t old_umask;
if (from_fp == NULL)
goto out;
+ old_umask = umask(mode ^ 0777);
to_fp = fopen(to, "w");
+ umask(old_umask);
if (to_fp == NULL)
goto out_fclose_from;
@@ -82,7 +85,7 @@ out:
return err;
}
-int copyfile(const char *from, const char *to)
+int copyfile_mode(const char *from, const char *to, mode_t mode)
{
int fromfd, tofd;
struct stat st;
@@ -93,13 +96,13 @@ int copyfile(const char *from, const char *to)
goto out;
if (st.st_size == 0) /* /proc? do it slowly... */
- return slow_copyfile(from, to);
+ return slow_copyfile(from, to, mode);
fromfd = open(from, O_RDONLY);
if (fromfd < 0)
goto out;
- tofd = creat(to, 0755);
+ tofd = creat(to, mode);
if (tofd < 0)
goto out_close_from;
@@ -121,6 +124,11 @@ out:
return err;
}
+int copyfile(const char *from, const char *to)
+{
+ return copyfile_mode(from, to, 0755);
+}
+
unsigned long convert_unit(unsigned long value, char *unit)
{
*unit = ' ';
@@ -204,7 +212,7 @@ int hex2u64(const char *ptr, u64 *long_val)
}
/* Obtain a backtrace and print it to stdout. */
-#ifdef BACKTRACE_SUPPORT
+#ifdef HAVE_BACKTRACE_SUPPORT
void dump_stack(void)
{
void *array[16];
@@ -361,3 +369,47 @@ int parse_nsec_time(const char *str, u64 *ptime)
*ptime = time_sec * NSEC_PER_SEC + time_nsec;
return 0;
}
+
+unsigned long parse_tag_value(const char *str, struct parse_tag *tags)
+{
+ struct parse_tag *i = tags;
+
+ while (i->tag) {
+ char *s;
+
+ s = strchr(str, i->tag);
+ if (s) {
+ unsigned long int value;
+ char *endptr;
+
+ value = strtoul(str, &endptr, 10);
+ if (s != endptr)
+ break;
+
+ if (value > ULONG_MAX / i->mult)
+ break;
+ value *= i->mult;
+ return value;
+ }
+ i++;
+ }
+
+ return (unsigned long) -1;
+}
+
+int filename__read_int(const char *filename, int *value)
+{
+ char line[64];
+ int fd = open(filename, O_RDONLY), err = -1;
+
+ if (fd < 0)
+ return -1;
+
+ if (read(fd, line, sizeof(line)) > 0) {
+ *value = atoi(line);
+ err = 0;
+ }
+
+ close(fd);
+ return err;
+}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index a53535949043..c8f362daba87 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -128,6 +128,8 @@ void put_tracing_file(char *file);
#endif
#endif
+#define PERF_GTK_DSO "libperf-gtk.so"
+
/* General helper functions */
extern void usage(const char *err) NORETURN;
extern void die(const char *err, ...) NORETURN __attribute__((format (printf, 1, 2)));
@@ -241,6 +243,7 @@ static inline int sane_case(int x, int high)
int mkdir_p(char *path, mode_t mode);
int copyfile(const char *from, const char *to);
+int copyfile_mode(const char *from, const char *to, mode_t mode);
s64 perf_atoll(const char *str);
char **argv_split(const char *str, int *argcp);
@@ -270,6 +273,13 @@ bool is_power_of_2(unsigned long n)
return (n != 0 && ((n & (n - 1)) == 0));
}
+static inline unsigned next_pow2(unsigned x)
+{
+ if (!x)
+ return 1;
+ return 1ULL << (32 - __builtin_clz(x - 1));
+}
+
size_t hex_width(u64 v);
int hex2u64(const char *ptr, u64 *val);
@@ -281,4 +291,20 @@ void dump_stack(void);
extern unsigned int page_size;
void get_term_dimensions(struct winsize *ws);
+
+struct parse_tag {
+ char tag;
+ int mult;
+};
+
+unsigned long parse_tag_value(const char *str, struct parse_tag *tags);
+
+#define SRCLINE_UNKNOWN ((char *) "??:0")
+
+struct dso;
+
+char *get_srcline(struct dso *dso, unsigned long addr);
+void free_srcline(char *srcline);
+
+int filename__read_int(const char *filename, int *value);
#endif /* GIT_COMPAT_UTIL_H */
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index fe702076ca46..2bb8bf506681 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -2,7 +2,7 @@
* turbostat -- show CPU frequency and C-state residency
* on modern Intel turbo-capable processors.
*
- * Copyright (c) 2012 Intel Corporation.
+ * Copyright (c) 2013 Intel Corporation.
* Len Brown <len.brown@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -47,6 +47,8 @@ unsigned int skip_c1;
unsigned int do_nhm_cstates;
unsigned int do_snb_cstates;
unsigned int do_c8_c9_c10;
+unsigned int do_slm_cstates;
+unsigned int use_c1_residency_msr;
unsigned int has_aperf;
unsigned int has_epb;
unsigned int units = 1000000000; /* Ghz etc */
@@ -81,6 +83,8 @@ double rapl_joule_counter_range;
#define RAPL_DRAM (1 << 3)
#define RAPL_PKG_PERF_STATUS (1 << 4)
#define RAPL_DRAM_PERF_STATUS (1 << 5)
+#define RAPL_PKG_POWER_INFO (1 << 6)
+#define RAPL_CORE_POLICY (1 << 7)
#define TJMAX_DEFAULT 100
#define MAX(a, b) ((a) > (b) ? (a) : (b))
@@ -96,7 +100,7 @@ struct thread_data {
unsigned long long tsc;
unsigned long long aperf;
unsigned long long mperf;
- unsigned long long c1; /* derived */
+ unsigned long long c1;
unsigned long long extra_msr64;
unsigned long long extra_delta64;
unsigned long long extra_msr32;
@@ -266,7 +270,7 @@ void print_header(void)
outp += sprintf(outp, " MSR 0x%03X", extra_msr_offset64);
if (do_nhm_cstates)
outp += sprintf(outp, " %%c1");
- if (do_nhm_cstates)
+ if (do_nhm_cstates && !do_slm_cstates)
outp += sprintf(outp, " %%c3");
if (do_nhm_cstates)
outp += sprintf(outp, " %%c6");
@@ -280,9 +284,9 @@ void print_header(void)
if (do_snb_cstates)
outp += sprintf(outp, " %%pc2");
- if (do_nhm_cstates)
+ if (do_nhm_cstates && !do_slm_cstates)
outp += sprintf(outp, " %%pc3");
- if (do_nhm_cstates)
+ if (do_nhm_cstates && !do_slm_cstates)
outp += sprintf(outp, " %%pc6");
if (do_snb_cstates)
outp += sprintf(outp, " %%pc7");
@@ -480,7 +484,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
goto done;
- if (do_nhm_cstates)
+ if (do_nhm_cstates && !do_slm_cstates)
outp += sprintf(outp, " %6.2f", 100.0 * c->c3/t->tsc);
if (do_nhm_cstates)
outp += sprintf(outp, " %6.2f", 100.0 * c->c6/t->tsc);
@@ -499,9 +503,9 @@ int format_counters(struct thread_data *t, struct core_data *c,
if (do_snb_cstates)
outp += sprintf(outp, " %6.2f", 100.0 * p->pc2/t->tsc);
- if (do_nhm_cstates)
+ if (do_nhm_cstates && !do_slm_cstates)
outp += sprintf(outp, " %6.2f", 100.0 * p->pc3/t->tsc);
- if (do_nhm_cstates)
+ if (do_nhm_cstates && !do_slm_cstates)
outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc);
if (do_snb_cstates)
outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc);
@@ -648,17 +652,24 @@ delta_thread(struct thread_data *new, struct thread_data *old,
}
- /*
- * As counter collection is not atomic,
- * it is possible for mperf's non-halted cycles + idle states
- * to exceed TSC's all cycles: show c1 = 0% in that case.
- */
- if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
- old->c1 = 0;
- else {
- /* normal case, derive c1 */
- old->c1 = old->tsc - old->mperf - core_delta->c3
+ if (use_c1_residency_msr) {
+ /*
+ * Some models have a dedicated C1 residency MSR,
+ * which should be more accurate than the derivation below.
+ */
+ } else {
+ /*
+ * As counter collection is not atomic,
+ * it is possible for mperf's non-halted cycles + idle states
+ * to exceed TSC's all cycles: show c1 = 0% in that case.
+ */
+ if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
+ old->c1 = 0;
+ else {
+ /* normal case, derive c1 */
+ old->c1 = old->tsc - old->mperf - core_delta->c3
- core_delta->c6 - core_delta->c7;
+ }
}
if (old->mperf == 0) {
@@ -872,13 +883,21 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
if (get_msr(cpu, extra_msr_offset64, &t->extra_msr64))
return -5;
+ if (use_c1_residency_msr) {
+ if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1))
+ return -6;
+ }
+
/* collect core counters only for 1st thread in core */
if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
return 0;
- if (do_nhm_cstates) {
+ if (do_nhm_cstates && !do_slm_cstates) {
if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
return -6;
+ }
+
+ if (do_nhm_cstates) {
if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
return -7;
}
@@ -898,7 +917,7 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
return 0;
- if (do_nhm_cstates) {
+ if (do_nhm_cstates && !do_slm_cstates) {
if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
return -9;
if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
@@ -1046,25 +1065,28 @@ print_nhm_turbo_ratio_limits:
switch(msr & 0x7) {
case 0:
- fprintf(stderr, "pc0");
+ fprintf(stderr, do_slm_cstates ? "no pkg states" : "pc0");
break;
case 1:
- fprintf(stderr, do_snb_cstates ? "pc2" : "pc0");
+ fprintf(stderr, do_slm_cstates ? "no pkg states" : do_snb_cstates ? "pc2" : "pc0");
break;
case 2:
- fprintf(stderr, do_snb_cstates ? "pc6-noret" : "pc3");
+ fprintf(stderr, do_slm_cstates ? "invalid" : do_snb_cstates ? "pc6-noret" : "pc3");
break;
case 3:
- fprintf(stderr, "pc6");
+ fprintf(stderr, do_slm_cstates ? "invalid" : "pc6");
break;
case 4:
- fprintf(stderr, "pc7");
+ fprintf(stderr, do_slm_cstates ? "pc4" : "pc7");
break;
case 5:
- fprintf(stderr, do_snb_cstates ? "pc7s" : "invalid");
+ fprintf(stderr, do_slm_cstates ? "invalid" : do_snb_cstates ? "pc7s" : "invalid");
+ break;
+ case 6:
+ fprintf(stderr, do_slm_cstates ? "pc6" : "invalid");
break;
case 7:
- fprintf(stderr, "unlimited");
+ fprintf(stderr, do_slm_cstates ? "pc7" : "unlimited");
break;
default:
fprintf(stderr, "invalid");
@@ -1460,6 +1482,7 @@ int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
case 0x3F: /* HSW */
case 0x45: /* HSW */
case 0x46: /* HSW */
+ case 0x4D: /* AVN */
return 1;
case 0x2E: /* Nehalem-EX Xeon - Beckton */
case 0x2F: /* Westmere-EX Xeon - Eagleton */
@@ -1555,11 +1578,14 @@ void rapl_probe(unsigned int family, unsigned int model)
case 0x3F: /* HSW */
case 0x45: /* HSW */
case 0x46: /* HSW */
- do_rapl = RAPL_PKG | RAPL_CORES | RAPL_GFX;
+ do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
break;
case 0x2D:
case 0x3E:
- do_rapl = RAPL_PKG | RAPL_CORES | RAPL_DRAM | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS;
+ do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO;
+ break;
+ case 0x4D: /* AVN */
+ do_rapl = RAPL_PKG | RAPL_CORES ;
break;
default:
return;
@@ -1573,17 +1599,18 @@ void rapl_probe(unsigned int family, unsigned int model)
rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
rapl_time_units = 1.0 / (1 << (msr >> 16 & 0xF));
- /* get TDP to determine energy counter range */
- if (get_msr(0, MSR_PKG_POWER_INFO, &msr))
- return;
+ if (do_rapl & RAPL_PKG_POWER_INFO) {
+ /* get TDP to determine energy counter range */
+ if (get_msr(0, MSR_PKG_POWER_INFO, &msr))
+ return;
- tdp = ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
+ tdp = ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
- rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
-
- if (verbose)
- fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range\n", rapl_joule_counter_range);
+ rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
+ if (verbose)
+ fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range\n", rapl_joule_counter_range);
+ }
return;
}
@@ -1702,7 +1729,8 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
"(%f Watts, %f Joules, %f sec.)\n", cpu, msr,
local_rapl_power_units, local_rapl_energy_units, local_rapl_time_units);
}
- if (do_rapl & RAPL_PKG) {
+ if (do_rapl & RAPL_PKG_POWER_INFO) {
+
if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr))
return -5;
@@ -1714,6 +1742,9 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
+ }
+ if (do_rapl & RAPL_PKG) {
+
if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr))
return -9;
@@ -1749,12 +1780,16 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
print_power_limit_msr(cpu, msr, "DRAM Limit");
}
- if (do_rapl & RAPL_CORES) {
+ if (do_rapl & RAPL_CORE_POLICY) {
if (verbose) {
if (get_msr(cpu, MSR_PP0_POLICY, &msr))
return -7;
fprintf(stderr, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF);
+ }
+ }
+ if (do_rapl & RAPL_CORES) {
+ if (verbose) {
if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr))
return -9;
@@ -1813,10 +1848,47 @@ int has_c8_c9_c10(unsigned int family, unsigned int model)
}
+int is_slm(unsigned int family, unsigned int model)
+{
+ if (!genuine_intel)
+ return 0;
+ switch (model) {
+ case 0x4D: /* AVN */
+ return 1;
+ }
+ return 0;
+}
+
+#define SLM_BCLK_FREQS 5
+double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
+
+double slm_bclk(void)
+{
+ unsigned long long msr = 3;
+ unsigned int i;
+ double freq;
+
+ if (get_msr(0, MSR_FSB_FREQ, &msr))
+ fprintf(stderr, "SLM BCLK: unknown\n");
+
+ i = msr & 0xf;
+ if (i >= SLM_BCLK_FREQS) {
+ fprintf(stderr, "SLM BCLK[%d] invalid\n", i);
+ msr = 3;
+ }
+ freq = slm_freq_table[i];
+
+ fprintf(stderr, "SLM BCLK: %.1f Mhz\n", freq);
+
+ return freq;
+}
+
double discover_bclk(unsigned int family, unsigned int model)
{
if (is_snb(family, model))
return 100.00;
+ else if (is_slm(family, model))
+ return slm_bclk();
else
return 133.33;
}
@@ -1873,7 +1945,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
fprintf(stderr, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
cpu, msr, target_c_local);
- if (target_c_local < 85 || target_c_local > 120)
+ if (target_c_local < 85 || target_c_local > 127)
goto guess;
tcc_activation_temp = target_c_local;
@@ -1970,6 +2042,7 @@ void check_cpuid()
do_smi = do_nhm_cstates;
do_snb_cstates = is_snb(family, model);
do_c8_c9_c10 = has_c8_c9_c10(family, model);
+ do_slm_cstates = is_slm(family, model);
bclk = discover_bclk(family, model);
do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
@@ -2331,7 +2404,7 @@ int main(int argc, char **argv)
cmdline(argc, argv);
if (verbose)
- fprintf(stderr, "turbostat v3.4 April 17, 2013"
+ fprintf(stderr, "turbostat v3.5 April 26, 2013"
" - Len Brown <lenb@kernel.org>\n");
turbostat_init();
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 0d0506d55c71..ee76544deecb 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -59,21 +59,22 @@ QUIET_SUBDIR0 = +$(MAKE) $(COMMAND_O) -C # space to separate -C and subdir
QUIET_SUBDIR1 =
ifneq ($(findstring $(MAKEFLAGS),s),s)
-ifneq ($(V),1)
- QUIET_CC = @echo ' ' CC $@;
- QUIET_AR = @echo ' ' AR $@;
- QUIET_LINK = @echo ' ' LINK $@;
- QUIET_MKDIR = @echo ' ' MKDIR $@;
- QUIET_GEN = @echo ' ' GEN $@;
+ ifneq ($(V),1)
+ QUIET_CC = @echo ' CC '$@;
+ QUIET_AR = @echo ' AR '$@;
+ QUIET_LINK = @echo ' LINK '$@;
+ QUIET_MKDIR = @echo ' MKDIR '$@;
+ QUIET_GEN = @echo ' GEN '$@;
QUIET_SUBDIR0 = +@subdir=
- QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \
+ QUIET_SUBDIR1 = ;$(NO_SUBDIR) \
+ echo ' SUBDIR '$$subdir; \
$(MAKE) $(PRINT_DIR) -C $$subdir
- QUIET_FLEX = @echo ' ' FLEX $@;
- QUIET_BISON = @echo ' ' BISON $@;
+ QUIET_FLEX = @echo ' FLEX '$@;
+ QUIET_BISON = @echo ' BISON '$@;
descend = \
- +@echo ' ' DESCEND $(1); \
+ +@echo ' DESCEND '$(1); \
mkdir -p $(OUTPUT)$(1) && \
$(MAKE) $(COMMAND_O) subdir=$(if $(subdir),$(subdir)/$(1),$(1)) $(PRINT_DIR) -C $(1) $(2)
-endif
+ endif
endif
diff --git a/tools/testing/ktest/examples/crosstests.conf b/tools/testing/ktest/examples/crosstests.conf
index 46736604c26c..a1203148dfa1 100644
--- a/tools/testing/ktest/examples/crosstests.conf
+++ b/tools/testing/ktest/examples/crosstests.conf
@@ -133,12 +133,6 @@ CROSS = frv-linux
ARCH = frv
GCC_VER = 4.5.1
-# h8300 - failed make defconfig??
-TEST_START IF ${RUN} == h8300 || ${DO_FAILED}
-CROSS = h8300-elf
-ARCH = h8300
-GCC_VER = 4.5.1
-
# m68k fails with error?
TEST_START IF ${RUN} == m68k || ${DO_DEFAULT}
CROSS = m68k-linux
diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c
index da7a19558281..bdb71a26ae35 100644
--- a/tools/virtio/virtio_test.c
+++ b/tools/virtio/virtio_test.c
@@ -41,13 +41,14 @@ struct vdev_info {
struct vhost_memory *mem;
};
-void vq_notify(struct virtqueue *vq)
+bool vq_notify(struct virtqueue *vq)
{
struct vq_info *info = vq->priv;
unsigned long long v = 1;
int r;
r = write(info->kick, &v, sizeof v);
assert(r == sizeof v);
+ return true;
}
void vq_callback(struct virtqueue *vq)
@@ -171,7 +172,8 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
GFP_ATOMIC);
if (likely(r == 0)) {
++started;
- virtqueue_kick(vq->vq);
+ if (unlikely(!virtqueue_kick(vq->vq))
+ r = -1;
}
} else
r = -1;
diff --git a/tools/virtio/vringh_test.c b/tools/virtio/vringh_test.c
index d053ea40c001..14a4f4cab5b9 100644
--- a/tools/virtio/vringh_test.c
+++ b/tools/virtio/vringh_test.c
@@ -22,7 +22,7 @@ static u64 user_addr_offset;
#define RINGSIZE 256
#define ALIGN 4096
-static void never_notify_host(struct virtqueue *vq)
+static bool never_notify_host(struct virtqueue *vq)
{
abort();
}
@@ -65,17 +65,22 @@ struct guest_virtio_device {
unsigned long notifies;
};
-static void parallel_notify_host(struct virtqueue *vq)
+static bool parallel_notify_host(struct virtqueue *vq)
{
+ int rc;
struct guest_virtio_device *gvdev;
gvdev = container_of(vq->vdev, struct guest_virtio_device, vdev);
- write(gvdev->to_host_fd, "", 1);
+ rc = write(gvdev->to_host_fd, "", 1);
+ if (rc < 0)
+ return false;
gvdev->notifies++;
+ return true;
}
-static void no_notify_host(struct virtqueue *vq)
+static bool no_notify_host(struct virtqueue *vq)
{
+ return true;
}
#define NUM_XFERS (10000000)
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index 779262f59e25..fbe1a48bd629 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -27,3 +27,6 @@ config HAVE_KVM_MSI
config HAVE_KVM_CPU_RELAX_INTERCEPT
bool
+
+config KVM_VFIO
+ bool
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 8a39dda7a325..8631d9c14320 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -56,7 +56,6 @@ void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
static void async_pf_execute(struct work_struct *work)
{
- struct page *page = NULL;
struct kvm_async_pf *apf =
container_of(work, struct kvm_async_pf, work);
struct mm_struct *mm = apf->mm;
@@ -68,14 +67,12 @@ static void async_pf_execute(struct work_struct *work)
use_mm(mm);
down_read(&mm->mmap_sem);
- get_user_pages(current, mm, addr, 1, 1, 0, &page, NULL);
+ get_user_pages(current, mm, addr, 1, 1, 0, NULL, NULL);
up_read(&mm->mmap_sem);
unuse_mm(mm);
spin_lock(&vcpu->async_pf.lock);
list_add_tail(&apf->link, &vcpu->async_pf.done);
- apf->page = page;
- apf->done = true;
spin_unlock(&vcpu->async_pf.lock);
/*
@@ -83,7 +80,7 @@ static void async_pf_execute(struct work_struct *work)
* this point
*/
- trace_kvm_async_pf_completed(addr, page, gva);
+ trace_kvm_async_pf_completed(addr, gva);
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
@@ -99,9 +96,8 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
struct kvm_async_pf *work =
list_entry(vcpu->async_pf.queue.next,
typeof(*work), queue);
- cancel_work_sync(&work->work);
list_del(&work->queue);
- if (!work->done) { /* work was canceled */
+ if (cancel_work_sync(&work->work)) {
mmdrop(work->mm);
kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
kmem_cache_free(async_pf_cache, work);
@@ -114,8 +110,6 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
list_entry(vcpu->async_pf.done.next,
typeof(*work), link);
list_del(&work->link);
- if (!is_error_page(work->page))
- kvm_release_page_clean(work->page);
kmem_cache_free(async_pf_cache, work);
}
spin_unlock(&vcpu->async_pf.lock);
@@ -135,14 +129,11 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
list_del(&work->link);
spin_unlock(&vcpu->async_pf.lock);
- if (work->page)
- kvm_arch_async_page_ready(vcpu, work);
+ kvm_arch_async_page_ready(vcpu, work);
kvm_arch_async_page_present(vcpu, work);
list_del(&work->queue);
vcpu->async_pf.queued--;
- if (!is_error_page(work->page))
- kvm_release_page_clean(work->page);
kmem_cache_free(async_pf_cache, work);
}
}
@@ -165,8 +156,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
if (!work)
return 0;
- work->page = NULL;
- work->done = false;
+ work->wakeup_all = false;
work->vcpu = vcpu;
work->gva = gva;
work->addr = gfn_to_hva(vcpu->kvm, gfn);
@@ -206,7 +196,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
if (!work)
return -ENOMEM;
- work->page = KVM_ERR_PTR_BAD_PAGE;
+ work->wakeup_all = true;
INIT_LIST_HEAD(&work->queue); /* for list_del to work */
spin_lock(&vcpu->async_pf.lock);
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index 72a130bc448a..c7d9ce122529 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -79,7 +79,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
flags = IOMMU_READ;
if (!(slot->flags & KVM_MEM_READONLY))
flags |= IOMMU_WRITE;
- if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)
+ if (!kvm->arch.iommu_noncoherent)
flags |= IOMMU_CACHE;
@@ -140,6 +140,9 @@ static int kvm_iommu_map_memslots(struct kvm *kvm)
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
+ if (kvm->arch.iommu_noncoherent)
+ kvm_arch_register_noncoherent_dma(kvm);
+
idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm);
@@ -158,7 +161,8 @@ int kvm_assign_device(struct kvm *kvm,
{
struct pci_dev *pdev = NULL;
struct iommu_domain *domain = kvm->arch.iommu_domain;
- int r, last_flags;
+ int r;
+ bool noncoherent;
/* check if iommu exists and in use */
if (!domain)
@@ -174,15 +178,13 @@ int kvm_assign_device(struct kvm *kvm,
return r;
}
- last_flags = kvm->arch.iommu_flags;
- if (iommu_domain_has_cap(kvm->arch.iommu_domain,
- IOMMU_CAP_CACHE_COHERENCY))
- kvm->arch.iommu_flags |= KVM_IOMMU_CACHE_COHERENCY;
+ noncoherent = !iommu_domain_has_cap(kvm->arch.iommu_domain,
+ IOMMU_CAP_CACHE_COHERENCY);
/* Check if need to update IOMMU page table for guest memory */
- if ((last_flags ^ kvm->arch.iommu_flags) ==
- KVM_IOMMU_CACHE_COHERENCY) {
+ if (noncoherent != kvm->arch.iommu_noncoherent) {
kvm_iommu_unmap_memslots(kvm);
+ kvm->arch.iommu_noncoherent = noncoherent;
r = kvm_iommu_map_memslots(kvm);
if (r)
goto out_unmap;
@@ -190,11 +192,7 @@ int kvm_assign_device(struct kvm *kvm,
pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
- printk(KERN_DEBUG "assign device %x:%x:%x.%x\n",
- assigned_dev->host_segnr,
- assigned_dev->host_busnr,
- PCI_SLOT(assigned_dev->host_devfn),
- PCI_FUNC(assigned_dev->host_devfn));
+ dev_info(&pdev->dev, "kvm assign device\n");
return 0;
out_unmap:
@@ -220,11 +218,7 @@ int kvm_deassign_device(struct kvm *kvm,
pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
- printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n",
- assigned_dev->host_segnr,
- assigned_dev->host_busnr,
- PCI_SLOT(assigned_dev->host_devfn),
- PCI_FUNC(assigned_dev->host_devfn));
+ dev_info(&pdev->dev, "kvm deassign device\n");
return 0;
}
@@ -336,6 +330,9 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm)
srcu_read_unlock(&kvm->srcu, idx);
+ if (kvm->arch.iommu_noncoherent)
+ kvm_arch_unregister_noncoherent_dma(kvm);
+
return 0;
}
@@ -350,6 +347,7 @@ int kvm_iommu_unmap_guest(struct kvm *kvm)
mutex_lock(&kvm->slots_lock);
kvm_iommu_unmap_memslots(kvm);
kvm->arch.iommu_domain = NULL;
+ kvm->arch.iommu_noncoherent = false;
mutex_unlock(&kvm->slots_lock);
iommu_domain_free(domain);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a9dd682cf5e3..fb9804437362 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -70,7 +70,8 @@ MODULE_LICENSE("GPL");
* kvm->lock --> kvm->slots_lock --> kvm->irq_lock
*/
-DEFINE_RAW_SPINLOCK(kvm_lock);
+DEFINE_SPINLOCK(kvm_lock);
+static DEFINE_RAW_SPINLOCK(kvm_count_lock);
LIST_HEAD(vm_list);
static cpumask_var_t cpus_hardware_enabled;
@@ -186,6 +187,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
++kvm->stat.remote_tlb_flush;
cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
}
+EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
void kvm_reload_remote_mmus(struct kvm *kvm)
{
@@ -490,9 +492,9 @@ static struct kvm *kvm_create_vm(unsigned long type)
if (r)
goto out_err;
- raw_spin_lock(&kvm_lock);
+ spin_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list);
- raw_spin_unlock(&kvm_lock);
+ spin_unlock(&kvm_lock);
return kvm;
@@ -540,13 +542,13 @@ static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
/*
* Free any memory in @free but not in @dont.
*/
-static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
+static void kvm_free_physmem_slot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
kvm_destroy_dirty_bitmap(free);
- kvm_arch_free_memslot(free, dont);
+ kvm_arch_free_memslot(kvm, free, dont);
free->npages = 0;
}
@@ -557,7 +559,7 @@ void kvm_free_physmem(struct kvm *kvm)
struct kvm_memory_slot *memslot;
kvm_for_each_memslot(memslot, slots)
- kvm_free_physmem_slot(memslot, NULL);
+ kvm_free_physmem_slot(kvm, memslot, NULL);
kfree(kvm->memslots);
}
@@ -581,9 +583,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
struct mm_struct *mm = kvm->mm;
kvm_arch_sync_events(kvm);
- raw_spin_lock(&kvm_lock);
+ spin_lock(&kvm_lock);
list_del(&kvm->vm_list);
- raw_spin_unlock(&kvm_lock);
+ spin_unlock(&kvm_lock);
kvm_free_irq_routing(kvm);
for (i = 0; i < KVM_NR_BUSES; i++)
kvm_io_bus_destroy(kvm->buses[i]);
@@ -821,7 +823,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (change == KVM_MR_CREATE) {
new.userspace_addr = mem->userspace_addr;
- if (kvm_arch_create_memslot(&new, npages))
+ if (kvm_arch_create_memslot(kvm, &new, npages))
goto out_free;
}
@@ -872,6 +874,19 @@ int __kvm_set_memory_region(struct kvm *kvm,
goto out_free;
}
+ /* actual memory is freed via old in kvm_free_physmem_slot below */
+ if (change == KVM_MR_DELETE) {
+ new.dirty_bitmap = NULL;
+ memset(&new.arch, 0, sizeof(new.arch));
+ }
+
+ old_memslots = install_new_memslots(kvm, slots, &new);
+
+ kvm_arch_commit_memory_region(kvm, mem, &old, change);
+
+ kvm_free_physmem_slot(kvm, &old, &new);
+ kfree(old_memslots);
+
/*
* IOMMU mapping: New slots need to be mapped. Old slots need to be
* un-mapped and re-mapped if their base changes. Since base change
@@ -883,29 +898,15 @@ int __kvm_set_memory_region(struct kvm *kvm,
*/
if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
r = kvm_iommu_map_pages(kvm, &new);
- if (r)
- goto out_slots;
- }
-
- /* actual memory is freed via old in kvm_free_physmem_slot below */
- if (change == KVM_MR_DELETE) {
- new.dirty_bitmap = NULL;
- memset(&new.arch, 0, sizeof(new.arch));
+ return r;
}
- old_memslots = install_new_memslots(kvm, slots, &new);
-
- kvm_arch_commit_memory_region(kvm, mem, &old, change);
-
- kvm_free_physmem_slot(&old, &new);
- kfree(old_memslots);
-
return 0;
out_slots:
kfree(slots);
out_free:
- kvm_free_physmem_slot(&new, &old);
+ kvm_free_physmem_slot(kvm, &new, &old);
out:
return r;
}
@@ -964,6 +965,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
out:
return r;
}
+EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
bool kvm_largepages_enabled(void)
{
@@ -1654,6 +1656,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
memslot = gfn_to_memslot(kvm, gfn);
mark_page_dirty_in_slot(kvm, memslot, gfn);
}
+EXPORT_SYMBOL_GPL(mark_page_dirty);
/*
* The vCPU has executed a HLT instruction with in-kernel mode enabled.
@@ -1679,6 +1682,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
finish_wait(&vcpu->wq, &wait);
}
+EXPORT_SYMBOL_GPL(kvm_vcpu_block);
#ifndef CONFIG_S390
/*
@@ -2271,6 +2275,11 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
ops = &kvm_xics_ops;
break;
#endif
+#ifdef CONFIG_KVM_VFIO
+ case KVM_DEV_TYPE_VFIO:
+ ops = &kvm_vfio_ops;
+ break;
+#endif
default:
return -ENODEV;
}
@@ -2683,11 +2692,12 @@ static void hardware_enable_nolock(void *junk)
}
}
-static void hardware_enable(void *junk)
+static void hardware_enable(void)
{
- raw_spin_lock(&kvm_lock);
- hardware_enable_nolock(junk);
- raw_spin_unlock(&kvm_lock);
+ raw_spin_lock(&kvm_count_lock);
+ if (kvm_usage_count)
+ hardware_enable_nolock(NULL);
+ raw_spin_unlock(&kvm_count_lock);
}
static void hardware_disable_nolock(void *junk)
@@ -2700,11 +2710,12 @@ static void hardware_disable_nolock(void *junk)
kvm_arch_hardware_disable(NULL);
}
-static void hardware_disable(void *junk)
+static void hardware_disable(void)
{
- raw_spin_lock(&kvm_lock);
- hardware_disable_nolock(junk);
- raw_spin_unlock(&kvm_lock);
+ raw_spin_lock(&kvm_count_lock);
+ if (kvm_usage_count)
+ hardware_disable_nolock(NULL);
+ raw_spin_unlock(&kvm_count_lock);
}
static void hardware_disable_all_nolock(void)
@@ -2718,16 +2729,16 @@ static void hardware_disable_all_nolock(void)
static void hardware_disable_all(void)
{
- raw_spin_lock(&kvm_lock);
+ raw_spin_lock(&kvm_count_lock);
hardware_disable_all_nolock();
- raw_spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_count_lock);
}
static int hardware_enable_all(void)
{
int r = 0;
- raw_spin_lock(&kvm_lock);
+ raw_spin_lock(&kvm_count_lock);
kvm_usage_count++;
if (kvm_usage_count == 1) {
@@ -2740,7 +2751,7 @@ static int hardware_enable_all(void)
}
}
- raw_spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_count_lock);
return r;
}
@@ -2750,20 +2761,17 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
{
int cpu = (long)v;
- if (!kvm_usage_count)
- return NOTIFY_OK;
-
val &= ~CPU_TASKS_FROZEN;
switch (val) {
case CPU_DYING:
printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
cpu);
- hardware_disable(NULL);
+ hardware_disable();
break;
case CPU_STARTING:
printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
cpu);
- hardware_enable(NULL);
+ hardware_enable();
break;
}
return NOTIFY_OK;
@@ -3056,10 +3064,10 @@ static int vm_stat_get(void *_offset, u64 *val)
struct kvm *kvm;
*val = 0;
- raw_spin_lock(&kvm_lock);
+ spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
*val += *(u32 *)((void *)kvm + offset);
- raw_spin_unlock(&kvm_lock);
+ spin_unlock(&kvm_lock);
return 0;
}
@@ -3073,12 +3081,12 @@ static int vcpu_stat_get(void *_offset, u64 *val)
int i;
*val = 0;
- raw_spin_lock(&kvm_lock);
+ spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
kvm_for_each_vcpu(i, vcpu, kvm)
*val += *(u32 *)((void *)vcpu + offset);
- raw_spin_unlock(&kvm_lock);
+ spin_unlock(&kvm_lock);
return 0;
}
@@ -3091,7 +3099,7 @@ static const struct file_operations *stat_fops[] = {
static int kvm_init_debug(void)
{
- int r = -EFAULT;
+ int r = -EEXIST;
struct kvm_stats_debugfs_item *p;
kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
@@ -3133,7 +3141,7 @@ static int kvm_suspend(void)
static void kvm_resume(void)
{
if (kvm_usage_count) {
- WARN_ON(raw_spin_is_locked(&kvm_lock));
+ WARN_ON(raw_spin_is_locked(&kvm_count_lock));
hardware_enable_nolock(NULL);
}
}
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
new file mode 100644
index 000000000000..ca4260e35037
--- /dev/null
+++ b/virt/kvm/vfio.c
@@ -0,0 +1,264 @@
+/*
+ * VFIO-KVM bridge pseudo device
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/kvm_host.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+
+struct kvm_vfio_group {
+ struct list_head node;
+ struct vfio_group *vfio_group;
+};
+
+struct kvm_vfio {
+ struct list_head group_list;
+ struct mutex lock;
+ bool noncoherent;
+};
+
+static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep)
+{
+ struct vfio_group *vfio_group;
+ struct vfio_group *(*fn)(struct file *);
+
+ fn = symbol_get(vfio_group_get_external_user);
+ if (!fn)
+ return ERR_PTR(-EINVAL);
+
+ vfio_group = fn(filep);
+
+ symbol_put(vfio_group_get_external_user);
+
+ return vfio_group;
+}
+
+static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group)
+{
+ void (*fn)(struct vfio_group *);
+
+ fn = symbol_get(vfio_group_put_external_user);
+ if (!fn)
+ return;
+
+ fn(vfio_group);
+
+ symbol_put(vfio_group_put_external_user);
+}
+
+/*
+ * Groups can use the same or different IOMMU domains. If the same then
+ * adding a new group may change the coherency of groups we've previously
+ * been told about. We don't want to care about any of that so we retest
+ * each group and bail as soon as we find one that's noncoherent. This
+ * means we only ever [un]register_noncoherent_dma once for the whole device.
+ */
+static void kvm_vfio_update_coherency(struct kvm_device *dev)
+{
+ struct kvm_vfio *kv = dev->private;
+ bool noncoherent = false;
+ struct kvm_vfio_group *kvg;
+
+ mutex_lock(&kv->lock);
+
+ list_for_each_entry(kvg, &kv->group_list, node) {
+ /*
+ * TODO: We need an interface to check the coherency of
+ * the IOMMU domain this group is using. For now, assume
+ * it's always noncoherent.
+ */
+ noncoherent = true;
+ break;
+ }
+
+ if (noncoherent != kv->noncoherent) {
+ kv->noncoherent = noncoherent;
+
+ if (kv->noncoherent)
+ kvm_arch_register_noncoherent_dma(dev->kvm);
+ else
+ kvm_arch_unregister_noncoherent_dma(dev->kvm);
+ }
+
+ mutex_unlock(&kv->lock);
+}
+
+static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
+{
+ struct kvm_vfio *kv = dev->private;
+ struct vfio_group *vfio_group;
+ struct kvm_vfio_group *kvg;
+ void __user *argp = (void __user *)arg;
+ struct fd f;
+ int32_t fd;
+ int ret;
+
+ switch (attr) {
+ case KVM_DEV_VFIO_GROUP_ADD:
+ if (get_user(fd, (int32_t __user *)argp))
+ return -EFAULT;
+
+ f = fdget(fd);
+ if (!f.file)
+ return -EBADF;
+
+ vfio_group = kvm_vfio_group_get_external_user(f.file);
+ fdput(f);
+
+ if (IS_ERR(vfio_group))
+ return PTR_ERR(vfio_group);
+
+ mutex_lock(&kv->lock);
+
+ list_for_each_entry(kvg, &kv->group_list, node) {
+ if (kvg->vfio_group == vfio_group) {
+ mutex_unlock(&kv->lock);
+ kvm_vfio_group_put_external_user(vfio_group);
+ return -EEXIST;
+ }
+ }
+
+ kvg = kzalloc(sizeof(*kvg), GFP_KERNEL);
+ if (!kvg) {
+ mutex_unlock(&kv->lock);
+ kvm_vfio_group_put_external_user(vfio_group);
+ return -ENOMEM;
+ }
+
+ list_add_tail(&kvg->node, &kv->group_list);
+ kvg->vfio_group = vfio_group;
+
+ mutex_unlock(&kv->lock);
+
+ kvm_vfio_update_coherency(dev);
+
+ return 0;
+
+ case KVM_DEV_VFIO_GROUP_DEL:
+ if (get_user(fd, (int32_t __user *)argp))
+ return -EFAULT;
+
+ f = fdget(fd);
+ if (!f.file)
+ return -EBADF;
+
+ vfio_group = kvm_vfio_group_get_external_user(f.file);
+ fdput(f);
+
+ if (IS_ERR(vfio_group))
+ return PTR_ERR(vfio_group);
+
+ ret = -ENOENT;
+
+ mutex_lock(&kv->lock);
+
+ list_for_each_entry(kvg, &kv->group_list, node) {
+ if (kvg->vfio_group != vfio_group)
+ continue;
+
+ list_del(&kvg->node);
+ kvm_vfio_group_put_external_user(kvg->vfio_group);
+ kfree(kvg);
+ ret = 0;
+ break;
+ }
+
+ mutex_unlock(&kv->lock);
+
+ kvm_vfio_group_put_external_user(vfio_group);
+
+ kvm_vfio_update_coherency(dev);
+
+ return ret;
+ }
+
+ return -ENXIO;
+}
+
+static int kvm_vfio_set_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_VFIO_GROUP:
+ return kvm_vfio_set_group(dev, attr->attr, attr->addr);
+ }
+
+ return -ENXIO;
+}
+
+static int kvm_vfio_has_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_VFIO_GROUP:
+ switch (attr->attr) {
+ case KVM_DEV_VFIO_GROUP_ADD:
+ case KVM_DEV_VFIO_GROUP_DEL:
+ return 0;
+ }
+
+ break;
+ }
+
+ return -ENXIO;
+}
+
+static void kvm_vfio_destroy(struct kvm_device *dev)
+{
+ struct kvm_vfio *kv = dev->private;
+ struct kvm_vfio_group *kvg, *tmp;
+
+ list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
+ kvm_vfio_group_put_external_user(kvg->vfio_group);
+ list_del(&kvg->node);
+ kfree(kvg);
+ }
+
+ kvm_vfio_update_coherency(dev);
+
+ kfree(kv);
+ kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
+}
+
+static int kvm_vfio_create(struct kvm_device *dev, u32 type)
+{
+ struct kvm_device *tmp;
+ struct kvm_vfio *kv;
+
+ /* Only one VFIO "device" per VM */
+ list_for_each_entry(tmp, &dev->kvm->devices, vm_node)
+ if (tmp->ops == &kvm_vfio_ops)
+ return -EBUSY;
+
+ kv = kzalloc(sizeof(*kv), GFP_KERNEL);
+ if (!kv)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&kv->group_list);
+ mutex_init(&kv->lock);
+
+ dev->private = kv;
+
+ return 0;
+}
+
+struct kvm_device_ops kvm_vfio_ops = {
+ .name = "kvm-vfio",
+ .create = kvm_vfio_create,
+ .destroy = kvm_vfio_destroy,
+ .set_attr = kvm_vfio_set_attr,
+ .has_attr = kvm_vfio_has_attr,
+};